diff --git a/ast/explain.go b/ast/explain.go deleted file mode 100644 index 709f25e1b..000000000 --- a/ast/explain.go +++ /dev/null @@ -1,1735 +0,0 @@ -package ast - -import ( - "fmt" - "strings" -) - -// Explain returns a string representation of the AST in the same format -// as ClickHouse's EXPLAIN AST output. -func Explain(stmt Statement) string { - var b strings.Builder - explainNode(&b, stmt, 0) - return b.String() -} - -// explainNode recursively writes the AST node to the builder. -func explainNode(b *strings.Builder, node interface{}, depth int) { - indent := strings.Repeat(" ", depth) - - switch n := node.(type) { - case *SelectWithUnionQuery: - // Check if first select has Format or IntoOutfile clause - var format *Identifier - var intoOutfile *IntoOutfileClause - if len(n.Selects) > 0 { - if sq, ok := n.Selects[0].(*SelectQuery); ok { - if sq.Format != nil { - format = sq.Format - } - if sq.IntoOutfile != nil { - intoOutfile = sq.IntoOutfile - } - } - } - unionChildren := 1 // ExpressionList - if format != nil { - unionChildren++ - } - if intoOutfile != nil { - unionChildren++ - } - fmt.Fprintf(b, "%sSelectWithUnionQuery (children %d)\n", indent, unionChildren) - fmt.Fprintf(b, "%s ExpressionList (children %d)\n", indent, len(n.Selects)) - for _, sel := range n.Selects { - explainNode(b, sel, depth+2) - } - if intoOutfile != nil { - fmt.Fprintf(b, "%s Literal \\'%s\\'\n", indent, intoOutfile.Filename) - } - if format != nil { - fmt.Fprintf(b, "%s Identifier %s\n", indent, format.Name()) - } - - case *SelectQuery: - children := countSelectQueryChildren(n) - fmt.Fprintf(b, "%sSelectQuery (children %d)\n", indent, children) - // WITH clause (comes first) - if len(n.With) > 0 { - fmt.Fprintf(b, "%s ExpressionList (children %d)\n", indent, len(n.With)) - for _, w := range n.With { - explainNode(b, w, depth+2) - } - } - // Columns - if len(n.Columns) > 0 { - fmt.Fprintf(b, "%s ExpressionList (children %d)\n", indent, len(n.Columns)) - for _, col := range n.Columns { - explainNode(b, col, depth+2) - } - } - // From (with ArrayJoin integrated) - if n.From != nil || n.ArrayJoin != nil { - explainTablesWithArrayJoin(b, n.From, n.ArrayJoin, depth+1) - } - // PreWhere - if n.PreWhere != nil { - explainNode(b, n.PreWhere, depth+1) - } - // Where - if n.Where != nil { - explainNode(b, n.Where, depth+1) - } - // GroupBy - if len(n.GroupBy) > 0 { - fmt.Fprintf(b, "%s ExpressionList (children %d)\n", indent, len(n.GroupBy)) - for _, expr := range n.GroupBy { - explainNode(b, expr, depth+2) - } - } - // Having - if n.Having != nil { - explainNode(b, n.Having, depth+1) - } - // OrderBy - if len(n.OrderBy) > 0 { - fmt.Fprintf(b, "%s ExpressionList (children %d)\n", indent, len(n.OrderBy)) - for _, elem := range n.OrderBy { - explainOrderByElement(b, elem, depth+2) - } - } - // Offset (comes before Limit in ClickHouse output) - if n.Offset != nil { - explainNode(b, n.Offset, depth+1) - } - // Limit - if n.Limit != nil { - explainNode(b, n.Limit, depth+1) - } - // Settings - if len(n.Settings) > 0 { - fmt.Fprintf(b, "%s Set\n", indent) - } - // Window clause (WINDOW w AS ...) - if len(n.Window) > 0 { - fmt.Fprintf(b, "%s ExpressionList (children %d)\n", indent, len(n.Window)) - for range n.Window { - fmt.Fprintf(b, "%s WindowListElement\n", indent) - } - } - - case *TablesInSelectQuery: - fmt.Fprintf(b, "%sTablesInSelectQuery (children %d)\n", indent, len(n.Tables)) - for i, table := range n.Tables { - explainTablesInSelectQueryElement(b, table, i > 0, depth+1) - } - - case *TablesInSelectQueryElement: - // This case is kept for direct calls, but TablesInSelectQuery uses the specialized function - children := 0 - if n.Table != nil { - children++ - } - if n.Join != nil { - children++ - } - fmt.Fprintf(b, "%sTablesInSelectQueryElement (children %d)\n", indent, children) - if n.Table != nil { - explainNode(b, n.Table, depth+1) - } - if n.Join != nil { - explainTableJoin(b, n.Join, depth+1) - } - - case *TableExpression: - children := 1 - if n.Sample != nil { - children++ // SampleRatio for ratio - if n.Sample.Offset != nil { - children++ // SampleRatio for offset - } - } - fmt.Fprintf(b, "%sTableExpression (children %d)\n", indent, children) - // Pass alias to the inner Table - explainTableWithAlias(b, n.Table, n.Alias, depth+1) - if n.Sample != nil { - explainSampleClause(b, n.Sample, depth+1) - } - - case *TableIdentifier: - name := n.Table - if n.Database != "" { - name = n.Database + "." + name - } - if n.Alias != "" { - fmt.Fprintf(b, "%sTableIdentifier %s (alias %s)\n", indent, name, n.Alias) - } else { - fmt.Fprintf(b, "%sTableIdentifier %s\n", indent, name) - } - - case *Identifier: - name := n.Name() - if n.Alias != "" { - fmt.Fprintf(b, "%sIdentifier %s (alias %s)\n", indent, name, n.Alias) - } else { - fmt.Fprintf(b, "%sIdentifier %s\n", indent, name) - } - - case *Literal: - // Empty array literal is represented as a function call - if n.Type == LiteralArray { - if arr, ok := n.Value.([]Expression); ok && len(arr) == 0 { - fmt.Fprintf(b, "%sFunction array (children 1)\n", indent) - fmt.Fprintf(b, "%s ExpressionList\n", indent) - return - } - if arr, ok := n.Value.([]interface{}); ok && len(arr) == 0 { - fmt.Fprintf(b, "%sFunction array (children 1)\n", indent) - fmt.Fprintf(b, "%s ExpressionList\n", indent) - return - } - } - // Tuple containing expressions should be output as Function tuple - if n.Type == LiteralTuple { - if exprs, ok := n.Value.([]Expression); ok && len(exprs) > 0 { - // Check if any element is not a simple literal - hasNonLiteral := false - for _, e := range exprs { - if _, isLit := e.(*Literal); !isLit { - hasNonLiteral = true - break - } - } - if hasNonLiteral { - fmt.Fprintf(b, "%sFunction tuple (children 1)\n", indent) - fmt.Fprintf(b, "%s ExpressionList (children %d)\n", indent, len(exprs)) - for _, e := range exprs { - explainNode(b, e, depth+2) - } - return - } - } - } - explainLiteral(b, n, "", depth) - - case *FunctionCall: - explainFunctionCall(b, n, depth) - - case *BinaryExpr: - funcName := binaryOpToFunction(n.Op) - args := []Expression{n.Left, n.Right} - fmt.Fprintf(b, "%sFunction %s (children 1)\n", indent, funcName) - fmt.Fprintf(b, "%s ExpressionList (children %d)\n", indent, len(args)) - for _, arg := range args { - explainNode(b, arg, depth+2) - } - - case *UnaryExpr: - // Special case: unary minus on a literal integer becomes a negative literal - if n.Op == "-" { - if lit, ok := n.Operand.(*Literal); ok && lit.Type == LiteralInteger { - fmt.Fprintf(b, "%sLiteral Int64_-%v\n", indent, lit.Value) - return - } - } - funcName := unaryOpToFunction(n.Op) - fmt.Fprintf(b, "%sFunction %s (children 1)\n", indent, funcName) - fmt.Fprintf(b, "%s ExpressionList (children 1)\n", indent) - explainNode(b, n.Operand, depth+2) - - case *Asterisk: - if len(n.Except) > 0 || len(n.Replace) > 0 { - children := 0 - if len(n.Except) > 0 || len(n.Replace) > 0 { - children = 1 - } - if n.Table != "" { - fmt.Fprintf(b, "%sQualifiedAsterisk (children %d)\n", indent, children+1) - fmt.Fprintf(b, "%s Identifier %s\n", indent, n.Table) - } else { - fmt.Fprintf(b, "%sAsterisk (children %d)\n", indent, children) - } - if len(n.Except) > 0 { - fmt.Fprintf(b, "%s ColumnsTransformerList (children 1)\n", indent) - fmt.Fprintf(b, "%s ColumnsExceptTransformer (children %d)\n", indent, len(n.Except)) - for _, col := range n.Except { - fmt.Fprintf(b, "%s Identifier %s\n", indent, col) - } - } - if len(n.Replace) > 0 { - fmt.Fprintf(b, "%s ColumnsTransformerList (children 1)\n", indent) - fmt.Fprintf(b, "%s ColumnsReplaceTransformer (children %d)\n", indent, len(n.Replace)) - for _, r := range n.Replace { - fmt.Fprintf(b, "%s ColumnsReplaceTransformer::Replacement (children 1)\n", indent) - // Unwrap AliasedExpr if present - REPLACE doesn't output alias on expression - expr := r.Expr - if ae, ok := expr.(*AliasedExpr); ok { - expr = ae.Expr - } - explainNode(b, expr, depth+4) - } - } - } else if n.Table != "" { - fmt.Fprintf(b, "%sQualifiedAsterisk (children 1)\n", indent) - fmt.Fprintf(b, "%s Identifier %s\n", indent, n.Table) - } else { - fmt.Fprintf(b, "%sAsterisk\n", indent) - } - - case *ColumnsMatcher: - fmt.Fprintf(b, "%sColumnsRegexpMatcher\n", indent) - - case *Subquery: - if n.Alias != "" { - fmt.Fprintf(b, "%sSubquery (alias %s) (children 1)\n", indent, n.Alias) - } else { - fmt.Fprintf(b, "%sSubquery (children 1)\n", indent) - } - explainNode(b, n.Query, depth+1) - - case *CaseExpr: - explainCaseExpr(b, n, depth) - - case *CastExpr: - explainCastExpr(b, n, depth) - - case *Lambda: - explainLambda(b, n, depth) - - case *TernaryExpr: - // Ternary is represented as if(cond, then, else) - fmt.Fprintf(b, "%sFunction if (children 1)\n", indent) - fmt.Fprintf(b, "%s ExpressionList (children 3)\n", indent) - explainNode(b, n.Condition, depth+2) - explainNode(b, n.Then, depth+2) - explainNode(b, n.Else, depth+2) - - case *InExpr: - funcName := "in" - if n.Not { - funcName = "notIn" - } - if n.Global { - funcName = "global" + strings.Title(funcName) - } - fmt.Fprintf(b, "%sFunction %s (children 1)\n", indent, funcName) - fmt.Fprintf(b, "%s ExpressionList (children 2)\n", indent) - explainNode(b, n.Expr, depth+2) - if n.Query != nil { - // Wrap query in Subquery node - fmt.Fprintf(b, "%s Subquery (children 1)\n", indent) - explainNode(b, n.Query, depth+3) - } else { - // List is shown as a Tuple literal - explainInListAsTuple(b, n.List, depth+2) - } - - case *BetweenExpr: - // BETWEEN is expanded to and(greaterOrEquals(expr, low), lessOrEquals(expr, high)) - // NOT BETWEEN is expanded to or(less(expr, low), greater(expr, high)) - if n.Not { - fmt.Fprintf(b, "%sFunction or (children 1)\n", indent) - fmt.Fprintf(b, "%s ExpressionList (children 2)\n", indent) - fmt.Fprintf(b, "%s Function less (children 1)\n", indent) - fmt.Fprintf(b, "%s ExpressionList (children 2)\n", indent) - explainNode(b, n.Expr, depth+4) - explainNode(b, n.Low, depth+4) - fmt.Fprintf(b, "%s Function greater (children 1)\n", indent) - fmt.Fprintf(b, "%s ExpressionList (children 2)\n", indent) - explainNode(b, n.Expr, depth+4) - explainNode(b, n.High, depth+4) - } else { - fmt.Fprintf(b, "%sFunction and (children 1)\n", indent) - fmt.Fprintf(b, "%s ExpressionList (children 2)\n", indent) - fmt.Fprintf(b, "%s Function greaterOrEquals (children 1)\n", indent) - fmt.Fprintf(b, "%s ExpressionList (children 2)\n", indent) - explainNode(b, n.Expr, depth+4) - explainNode(b, n.Low, depth+4) - fmt.Fprintf(b, "%s Function lessOrEquals (children 1)\n", indent) - fmt.Fprintf(b, "%s ExpressionList (children 2)\n", indent) - explainNode(b, n.Expr, depth+4) - explainNode(b, n.High, depth+4) - } - - case *IsNullExpr: - funcName := "isNull" - if n.Not { - funcName = "isNotNull" - } - fmt.Fprintf(b, "%sFunction %s (children 1)\n", indent, funcName) - fmt.Fprintf(b, "%s ExpressionList (children 1)\n", indent) - explainNode(b, n.Expr, depth+2) - - case *LikeExpr: - funcName := "like" - if n.Not && n.CaseInsensitive { - funcName = "notILike" - } else if n.CaseInsensitive { - funcName = "ilike" - } else if n.Not { - funcName = "notLike" - } - fmt.Fprintf(b, "%sFunction %s (children 1)\n", indent, funcName) - fmt.Fprintf(b, "%s ExpressionList (children 2)\n", indent) - explainNode(b, n.Expr, depth+2) - explainNode(b, n.Pattern, depth+2) - - case *ArrayAccess: - fmt.Fprintf(b, "%sFunction arrayElement (children 1)\n", indent) - fmt.Fprintf(b, "%s ExpressionList (children 2)\n", indent) - explainNode(b, n.Array, depth+2) - explainNode(b, n.Index, depth+2) - - case *TupleAccess: - fmt.Fprintf(b, "%sFunction tupleElement (children 1)\n", indent) - fmt.Fprintf(b, "%s ExpressionList (children 2)\n", indent) - explainNode(b, n.Tuple, depth+2) - explainNode(b, n.Index, depth+2) - - case *IntervalExpr: - fmt.Fprintf(b, "%sFunction toInterval%s (children 1)\n", indent, strings.Title(strings.ToLower(n.Unit))) - fmt.Fprintf(b, "%s ExpressionList (children 1)\n", indent) - explainNode(b, n.Value, depth+2) - - case *ExtractExpr: - // EXTRACT(YEAR FROM date) becomes toYear(date) - funcName := extractFieldToFunction(n.Field) - if n.Alias != "" { - fmt.Fprintf(b, "%sFunction %s (alias %s) (children 1)\n", indent, funcName, n.Alias) - } else { - fmt.Fprintf(b, "%sFunction %s (children 1)\n", indent, funcName) - } - fmt.Fprintf(b, "%s ExpressionList (children 1)\n", indent) - explainNode(b, n.From, depth+2) - - case *AliasedExpr: - // For aliased expressions, we need to print the inner expression with the alias - explainNodeWithAlias(b, n.Expr, n.Alias, depth) - - case *WithElement: - // For scalar WITH (WITH 1 AS x), output the expression with alias - // For subquery WITH (WITH x AS (SELECT 1)), output as WithElement - if _, isSubquery := n.Query.(*Subquery); isSubquery { - fmt.Fprintf(b, "%sWithElement (children 1)\n", indent) - explainNode(b, n.Query, depth+1) - } else { - // Scalar expression - output with alias - explainNodeWithAlias(b, n.Query, n.Name, depth) - } - - case *ExistsExpr: - fmt.Fprintf(b, "%sFunction exists (children 1)\n", indent) - fmt.Fprintf(b, "%s ExpressionList (children 1)\n", indent) - // Wrap query in Subquery node - fmt.Fprintf(b, "%s Subquery (children 1)\n", indent) - explainNode(b, n.Query, depth+3) - - case *DataType: - // Data types in expressions (like in CAST) - if len(n.Parameters) > 0 { - fmt.Fprintf(b, "%sFunction %s (children 1)\n", indent, n.Name) - fmt.Fprintf(b, "%s ExpressionList (children %d)\n", indent, len(n.Parameters)) - for _, p := range n.Parameters { - explainNode(b, p, depth+2) - } - } else { - fmt.Fprintf(b, "%sIdentifier %s\n", indent, n.Name) - } - - // Non-SELECT statements - case *UseQuery: - fmt.Fprintf(b, "%sUseQuery %s (children 1)\n", indent, n.Database) - fmt.Fprintf(b, "%s Identifier %s\n", indent, n.Database) - - case *TruncateQuery: - tableName := n.Table - if n.Database != "" { - tableName = n.Database + "." + tableName - } - fmt.Fprintf(b, "%sTruncateQuery %s (children 1)\n", indent, tableName) - fmt.Fprintf(b, "%s Identifier %s\n", indent, tableName) - - case *AlterQuery: - tableName := n.Table - if n.Database != "" { - tableName = n.Database + "." + tableName - } - fmt.Fprintf(b, "%sAlterQuery %s (children 2)\n", indent, tableName) - fmt.Fprintf(b, "%s ExpressionList (children %d)\n", indent, len(n.Commands)) - for _, cmd := range n.Commands { - explainAlterCommand(b, cmd, depth+2) - } - fmt.Fprintf(b, "%s Identifier %s\n", indent, tableName) - - case *DropQuery: - var name string - if n.DropDatabase { - name = n.Database - } else if n.View != "" { - name = n.View - } else { - name = n.Table - } - if n.Database != "" && !n.DropDatabase { - name = n.Database + "." + name - } - // Different spacing for DROP DATABASE vs DROP TABLE - if n.DropDatabase { - fmt.Fprintf(b, "%sDropQuery %s (children 1)\n", indent, name) - } else { - fmt.Fprintf(b, "%sDropQuery %s (children 1)\n", indent, name) - } - fmt.Fprintf(b, "%s Identifier %s\n", indent, name) - - case *CreateQuery: - explainCreateQuery(b, n, depth) - - case *InsertQuery: - tableName := n.Table - if n.Database != "" { - tableName = n.Database + "." + tableName - } - children := 1 // Always have table identifier - if len(n.Columns) > 0 { - children++ // column list - } - if n.Select != nil { - children++ - } - fmt.Fprintf(b, "%sInsertQuery (children %d)\n", indent, children) - fmt.Fprintf(b, "%s Identifier %s\n", indent, tableName) - if len(n.Columns) > 0 { - fmt.Fprintf(b, "%s ExpressionList (children %d)\n", indent, len(n.Columns)) - for _, col := range n.Columns { - fmt.Fprintf(b, "%s Identifier %s\n", indent, col.Name()) - } - } - if n.Select != nil { - explainNode(b, n.Select, depth+1) - } - - case *SystemQuery: - children := 0 - if n.Database != "" { - children++ - } - if n.Table != "" { - children++ - } - if children > 0 { - fmt.Fprintf(b, "%sSYSTEM query (children %d)\n", indent, children) - if n.Database != "" { - fmt.Fprintf(b, "%s Identifier %s\n", indent, n.Database) - } - if n.Table != "" { - fmt.Fprintf(b, "%s Identifier %s\n", indent, n.Table) - } - } else { - fmt.Fprintf(b, "%sSYSTEM query\n", indent) - } - - case *OptimizeQuery: - tableName := n.Table - if n.Database != "" { - tableName = n.Database + "." + tableName - } - // Add suffix based on flags - displayName := tableName - if n.Final { - displayName = tableName + "_final" - } else if n.Dedupe { - displayName = tableName + "_deduplicate" - } - children := 1 // identifier - if n.Partition != nil { - children++ - } - fmt.Fprintf(b, "%sOptimizeQuery %s (children %d)\n", indent, displayName, children) - if n.Partition != nil { - fmt.Fprintf(b, "%s Partition (children 1)\n", indent) - explainNode(b, n.Partition, depth+2) - } - fmt.Fprintf(b, "%s Identifier %s\n", indent, tableName) - - case *DescribeQuery: - tableName := n.Table - if n.Database != "" { - tableName = n.Database + "." + tableName - } - fmt.Fprintf(b, "%sDescribeQuery (children 1)\n", indent) - fmt.Fprintf(b, "%s TableExpression (children 1)\n", indent) - fmt.Fprintf(b, "%s TableIdentifier %s\n", indent, tableName) - - case *ShowQuery: - // Handle SHOW CREATE specially - if n.ShowType == ShowCreate || n.ShowType == ShowCreateDB { - if n.ShowType == ShowCreate { - // SHOW CREATE TABLE - tableName := n.From - if n.Database != "" && tableName != "" { - fmt.Fprintf(b, "%sShowCreateTableQuery %s %s (children 2)\n", indent, n.Database, tableName) - fmt.Fprintf(b, "%s Identifier %s\n", indent, n.Database) - fmt.Fprintf(b, "%s Identifier %s\n", indent, tableName) - } else if tableName != "" { - fmt.Fprintf(b, "%sShowCreateTableQuery %s (children 1)\n", indent, tableName) - fmt.Fprintf(b, "%s Identifier %s\n", indent, tableName) - } else { - fmt.Fprintf(b, "%sShowCreate\n", indent) - } - } else { - // SHOW CREATE DATABASE - database name is in From field - dbName := n.From - fmt.Fprintf(b, "%sShowCreateDatabaseQuery %s (children 1)\n", indent, dbName) - fmt.Fprintf(b, "%s Identifier %s\n", indent, dbName) - } - } else if n.ShowType == ShowProcesses { - fmt.Fprintf(b, "%sShowProcesslistQuery\n", indent) - } else if n.ShowType == ShowColumns { - // SHOW COLUMNS doesn't output table name in children - fmt.Fprintf(b, "%sShowColumns\n", indent) - } else if n.ShowType == ShowTables && (n.From != "" || n.Database != "") { - // SHOW TABLES FROM database - dbName := n.From - if dbName == "" { - dbName = n.Database - } - fmt.Fprintf(b, "%sShowTables (children 1)\n", indent) - fmt.Fprintf(b, "%s Identifier %s\n", indent, dbName) - } else { - showName := showTypeToName(n.ShowType) - fmt.Fprintf(b, "%s%s\n", indent, showName) - } - - case *SetQuery: - fmt.Fprintf(b, "%sSet\n", indent) - - case *RenameQuery: - fmt.Fprintf(b, "%sRename (children 2)\n", indent) - fmt.Fprintf(b, "%s Identifier %s\n", indent, n.From) - fmt.Fprintf(b, "%s Identifier %s\n", indent, n.To) - - case *ExchangeQuery: - fmt.Fprintf(b, "%sRename (children 2)\n", indent) - fmt.Fprintf(b, "%s Identifier %s\n", indent, n.Table1) - fmt.Fprintf(b, "%s Identifier %s\n", indent, n.Table2) - - case *ExplainQuery: - explainType := normalizeExplainType(string(n.ExplainType)) - fmt.Fprintf(b, "%sExplain %s (children 1)\n", indent, explainType) - explainNode(b, n.Statement, depth+1) - - default: - // For unknown types, just print the type name - fmt.Fprintf(b, "%s%T\n", indent, n) - } -} - -// explainTableWithAlias prints a table expression (TableIdentifier, Subquery, Function) with an alias. -func explainTableWithAlias(b *strings.Builder, table interface{}, alias string, depth int) { - indent := strings.Repeat(" ", depth) - - switch t := table.(type) { - case *TableIdentifier: - name := t.Table - if t.Database != "" { - name = t.Database + "." + name - } - if alias != "" { - fmt.Fprintf(b, "%sTableIdentifier %s (alias %s)\n", indent, name, alias) - } else if t.Alias != "" { - fmt.Fprintf(b, "%sTableIdentifier %s (alias %s)\n", indent, name, t.Alias) - } else { - fmt.Fprintf(b, "%sTableIdentifier %s\n", indent, name) - } - - case *Subquery: - if alias != "" { - fmt.Fprintf(b, "%sSubquery (alias %s) (children 1)\n", indent, alias) - } else if t.Alias != "" { - fmt.Fprintf(b, "%sSubquery (alias %s) (children 1)\n", indent, t.Alias) - } else { - fmt.Fprintf(b, "%sSubquery (children 1)\n", indent) - } - explainNode(b, t.Query, depth+1) - - case *FunctionCall: - // For table functions like numbers(), pass alias - if alias != "" { - explainFunctionCallWithAlias(b, t, alias, depth) - } else { - explainFunctionCall(b, t, depth) - } - - default: - explainNode(b, table, depth) - } -} - -// explainNodeWithAlias prints a node with an alias suffix. -func explainNodeWithAlias(b *strings.Builder, node interface{}, alias string, depth int) { - indent := strings.Repeat(" ", depth) - - switch n := node.(type) { - case *Literal: - explainLiteral(b, n, alias, depth) - - case *Identifier: - name := n.Name() - if alias != "" { - fmt.Fprintf(b, "%sIdentifier %s (alias %s)\n", indent, name, alias) - } else if n.Alias != "" { - fmt.Fprintf(b, "%sIdentifier %s (alias %s)\n", indent, name, n.Alias) - } else { - fmt.Fprintf(b, "%sIdentifier %s\n", indent, name) - } - - case *FunctionCall: - explainFunctionCallWithAlias(b, n, alias, depth) - - case *BinaryExpr: - funcName := binaryOpToFunction(n.Op) - args := []Expression{n.Left, n.Right} - if alias != "" { - fmt.Fprintf(b, "%sFunction %s (alias %s) (children 1)\n", indent, funcName, alias) - } else { - fmt.Fprintf(b, "%sFunction %s (children 1)\n", indent, funcName) - } - fmt.Fprintf(b, "%s ExpressionList (children %d)\n", indent, len(args)) - for _, arg := range args { - explainNode(b, arg, depth+2) - } - return - - default: - // Fall back to regular node printing - explainNode(b, node, depth) - } -} - -// explainLiteral formats a literal value. -func explainLiteral(b *strings.Builder, lit *Literal, alias string, depth int) { - indent := strings.Repeat(" ", depth) - var valueStr string - - switch lit.Type { - case LiteralString: - // Escape backslashes in string literals (ClickHouse doubles them) - strVal := strings.ReplaceAll(fmt.Sprintf("%v", lit.Value), "\\", "\\\\") - valueStr = fmt.Sprintf("\\'%s\\'", strVal) - case LiteralInteger: - valueStr = fmt.Sprintf("UInt64_%v", lit.Value) - case LiteralFloat: - valueStr = fmt.Sprintf("Float64_%v", lit.Value) - case LiteralBoolean: - if lit.Value.(bool) { - valueStr = "Bool_1" - } else { - valueStr = "Bool_0" - } - case LiteralNull: - valueStr = "NULL" - case LiteralArray: - valueStr = formatArrayLiteral(lit.Value) - case LiteralTuple: - valueStr = formatTupleLiteral(lit.Value) - default: - valueStr = fmt.Sprintf("%v", lit.Value) - } - - if alias != "" { - fmt.Fprintf(b, "%sLiteral %s (alias %s)\n", indent, valueStr, alias) - } else { - fmt.Fprintf(b, "%sLiteral %s\n", indent, valueStr) - } -} - -// formatArrayLiteral formats an array literal. -func formatArrayLiteral(value interface{}) string { - switch v := value.(type) { - case []interface{}: - parts := make([]string, len(v)) - for i, elem := range v { - parts[i] = formatLiteralElement(elem) - } - return fmt.Sprintf("Array_[%s]", strings.Join(parts, ", ")) - case []Expression: - parts := make([]string, len(v)) - for i, elem := range v { - if lit, ok := elem.(*Literal); ok { - switch lit.Type { - case LiteralString: - escaped := strings.ReplaceAll(fmt.Sprintf("%v", lit.Value), "\\", "\\\\") - parts[i] = fmt.Sprintf("\\'%s\\'", escaped) - case LiteralInteger: - parts[i] = fmt.Sprintf("UInt64_%v", lit.Value) - case LiteralFloat: - parts[i] = fmt.Sprintf("Float64_%v", lit.Value) - case LiteralArray: - parts[i] = formatArrayLiteral(lit.Value) - case LiteralTuple: - parts[i] = formatTupleLiteral(lit.Value) - default: - parts[i] = fmt.Sprintf("%v", lit.Value) - } - } else { - parts[i] = fmt.Sprintf("%v", elem) - } - } - return fmt.Sprintf("Array_[%s]", strings.Join(parts, ", ")) - default: - return fmt.Sprintf("Array_%v", value) - } -} - -// formatTupleLiteral formats a tuple literal. -func formatTupleLiteral(value interface{}) string { - switch v := value.(type) { - case []interface{}: - parts := make([]string, len(v)) - for i, elem := range v { - parts[i] = formatLiteralElement(elem) - } - return fmt.Sprintf("Tuple_(%s)", strings.Join(parts, ", ")) - case []Expression: - parts := make([]string, len(v)) - for i, elem := range v { - if lit, ok := elem.(*Literal); ok { - switch lit.Type { - case LiteralString: - escaped := strings.ReplaceAll(fmt.Sprintf("%v", lit.Value), "\\", "\\\\") - parts[i] = fmt.Sprintf("\\'%s\\'", escaped) - case LiteralInteger: - parts[i] = fmt.Sprintf("UInt64_%v", lit.Value) - case LiteralFloat: - parts[i] = fmt.Sprintf("Float64_%v", lit.Value) - default: - parts[i] = fmt.Sprintf("%v", lit.Value) - } - } else { - parts[i] = fmt.Sprintf("%v", elem) - } - } - return fmt.Sprintf("Tuple_(%s)", strings.Join(parts, ", ")) - default: - return fmt.Sprintf("Tuple_%v", value) - } -} - -// explainInListAsTuple formats an IN list as a Tuple literal. -func explainInListAsTuple(b *strings.Builder, list []Expression, depth int) { - indent := strings.Repeat(" ", depth) - - // Build the tuple elements - parts := make([]string, len(list)) - for i, elem := range list { - if lit, ok := elem.(*Literal); ok { - switch lit.Type { - case LiteralString: - parts[i] = fmt.Sprintf("'%v'", lit.Value) - case LiteralInteger: - parts[i] = fmt.Sprintf("UInt64_%v", lit.Value) - case LiteralFloat: - parts[i] = fmt.Sprintf("Float64_%v", lit.Value) - default: - parts[i] = fmt.Sprintf("%v", lit.Value) - } - } else { - parts[i] = fmt.Sprintf("%v", elem) - } - } - - fmt.Fprintf(b, "%sLiteral Tuple_(%s)\n", indent, strings.Join(parts, ", ")) -} - -// formatLiteralElement formats a single literal element. -func formatLiteralElement(elem interface{}) string { - switch e := elem.(type) { - case string: - escaped := strings.ReplaceAll(e, "\\", "\\\\") - return fmt.Sprintf("\\'%s\\'", escaped) - case int, int64, uint64: - return fmt.Sprintf("UInt64_%v", e) - case float64: - return fmt.Sprintf("Float64_%v", e) - case bool: - if e { - return "Bool_1" - } - return "Bool_0" - default: - return fmt.Sprintf("%v", e) - } -} - -// explainFunctionCall formats a function call. -func explainFunctionCall(b *strings.Builder, fn *FunctionCall, depth int) { - explainFunctionCallWithAlias(b, fn, fn.Alias, depth) -} - -// normalizeFunctionName normalizes function names to match ClickHouse EXPLAIN AST output. -func normalizeFunctionName(name string) string { - switch strings.ToLower(name) { - case "trim": - return "trimBoth" - case "ltrim": - return "trimLeft" - case "rtrim": - return "trimRight" - default: - return name - } -} - -// explainFunctionCallWithAlias formats a function call with an optional alias. -func explainFunctionCallWithAlias(b *strings.Builder, fn *FunctionCall, alias string, depth int) { - indent := strings.Repeat(" ", depth) - name := normalizeFunctionName(fn.Name) - // DISTINCT in aggregate functions gets appended to function name - if fn.Distinct { - name = name + "Distinct" - } - - // Count children: - // - 1 for arguments ExpressionList - // - 1 for parameters ExpressionList if present (parametric aggregate functions) - // - 1 for window spec if present (but not for named windows, which are output separately) - children := 1 // Always have arguments ExpressionList - if len(fn.Parameters) > 0 { - children++ - } - // Only count window spec for inline windows, not named windows (OVER w) - hasInlineWindow := fn.Over != nil && fn.Over.Name == "" - if hasInlineWindow { - children++ - } - - aliasSuffix := "" - if alias != "" { - aliasSuffix = fmt.Sprintf(" (alias %s)", alias) - } - - fmt.Fprintf(b, "%sFunction %s%s (children %d)\n", indent, name, aliasSuffix, children) - - // Arguments (first ExpressionList) - if len(fn.Arguments) > 0 { - fmt.Fprintf(b, "%s ExpressionList (children %d)\n", indent, len(fn.Arguments)) - for _, arg := range fn.Arguments { - explainNode(b, arg, depth+2) - } - } else { - // Empty argument list - fmt.Fprintf(b, "%s ExpressionList\n", indent) - } - - // Parameters (second ExpressionList, for parametric aggregate functions like quantile(0.9)) - if len(fn.Parameters) > 0 { - fmt.Fprintf(b, "%s ExpressionList (children %d)\n", indent, len(fn.Parameters)) - for _, param := range fn.Parameters { - explainNode(b, param, depth+2) - } - } - - // Window specification (only for inline windows, not named windows) - if hasInlineWindow { - explainWindowSpec(b, fn.Over, depth+1) - } -} - -// explainWindowSpec formats a window specification. -func explainWindowSpec(b *strings.Builder, spec *WindowSpec, depth int) { - indent := strings.Repeat(" ", depth) - - // Count children: partition by + order by + frame bounds - children := 0 - if len(spec.PartitionBy) > 0 { - children++ - } - if len(spec.OrderBy) > 0 { - children++ - } - // Count frame bound children - if spec.Frame != nil { - if spec.Frame.StartBound != nil && spec.Frame.StartBound.Offset != nil { - children++ - } - if spec.Frame.EndBound != nil && spec.Frame.EndBound.Offset != nil { - children++ - } - } - - if children > 0 { - fmt.Fprintf(b, "%sWindowDefinition (children %d)\n", indent, children) - } else { - fmt.Fprintf(b, "%sWindowDefinition\n", indent) - } - - // Partition by - if len(spec.PartitionBy) > 0 { - fmt.Fprintf(b, "%s ExpressionList (children %d)\n", indent, len(spec.PartitionBy)) - for _, expr := range spec.PartitionBy { - explainNode(b, expr, depth+2) - } - } - - // Order by - if len(spec.OrderBy) > 0 { - fmt.Fprintf(b, "%s ExpressionList (children %d)\n", indent, len(spec.OrderBy)) - for _, elem := range spec.OrderBy { - explainOrderByElement(b, elem, depth+2) - } - } - - // Frame bounds - if spec.Frame != nil { - if spec.Frame.StartBound != nil && spec.Frame.StartBound.Offset != nil { - explainNode(b, spec.Frame.StartBound.Offset, depth+1) - } - if spec.Frame.EndBound != nil && spec.Frame.EndBound.Offset != nil { - explainNode(b, spec.Frame.EndBound.Offset, depth+1) - } - } -} - -// explainTableJoin formats a table join. -func explainTableJoin(b *strings.Builder, join *TableJoin, depth int) { - indent := strings.Repeat(" ", depth) - children := 0 - if join.On != nil { - children++ - } - if len(join.Using) > 0 { - children++ - } - if children > 0 { - fmt.Fprintf(b, "%sTableJoin (children %d)\n", indent, children) - } else { - fmt.Fprintf(b, "%sTableJoin\n", indent) - } - if join.On != nil { - explainNode(b, join.On, depth+1) - } - if len(join.Using) > 0 { - fmt.Fprintf(b, "%s ExpressionList (children %d)\n", indent, len(join.Using)) - for _, col := range join.Using { - explainNode(b, col, depth+2) - } - } -} - -// explainArrayJoinClause formats an array join as a table element. -func explainArrayJoinClause(b *strings.Builder, aj *ArrayJoinClause, depth int) { - // Array join is already represented in TablesInSelectQuery - // This is just for when it's encountered directly - indent := strings.Repeat(" ", depth) - fmt.Fprintf(b, "%sArrayJoin (children 1)\n", indent) - fmt.Fprintf(b, "%s ExpressionList (children %d)\n", indent, len(aj.Columns)) - for _, col := range aj.Columns { - explainNode(b, col, depth+2) - } -} - -// explainOrderByElement formats an order by element. -func explainOrderByElement(b *strings.Builder, elem *OrderByElement, depth int) { - indent := strings.Repeat(" ", depth) - - // Count children: expression + optional FillFrom, FillTo, FillStep - children := 1 - if elem.FillFrom != nil { - children++ - } - if elem.FillTo != nil { - children++ - } - if elem.FillStep != nil { - children++ - } - - fmt.Fprintf(b, "%sOrderByElement (children %d)\n", indent, children) - explainNode(b, elem.Expression, depth+1) - - if elem.FillFrom != nil { - explainNode(b, elem.FillFrom, depth+1) - } - if elem.FillTo != nil { - explainNode(b, elem.FillTo, depth+1) - } - if elem.FillStep != nil { - explainNode(b, elem.FillStep, depth+1) - } -} - -// explainCaseExpr formats a CASE expression. -func explainCaseExpr(b *strings.Builder, c *CaseExpr, depth int) { - indent := strings.Repeat(" ", depth) - // CASE is represented as multiIf or caseWithExpression - aliasSuffix := "" - if c.Alias != "" { - aliasSuffix = fmt.Sprintf(" (alias %s)", c.Alias) - } - - if c.Operand != nil { - // CASE x WHEN ... -> caseWithExpression - children := 1 + len(c.Whens)*2 - if c.Else != nil { - children++ - } - fmt.Fprintf(b, "%sFunction caseWithExpression%s (children 1)\n", indent, aliasSuffix) - fmt.Fprintf(b, "%s ExpressionList (children %d)\n", indent, children) - explainNode(b, c.Operand, depth+2) - for _, when := range c.Whens { - explainNode(b, when.Condition, depth+2) - explainNode(b, when.Result, depth+2) - } - if c.Else != nil { - explainNode(b, c.Else, depth+2) - } - } else { - // CASE WHEN ... -> multiIf - children := len(c.Whens) * 2 - if c.Else != nil { - children++ - } - fmt.Fprintf(b, "%sFunction multiIf%s (children 1)\n", indent, aliasSuffix) - fmt.Fprintf(b, "%s ExpressionList (children %d)\n", indent, children) - for _, when := range c.Whens { - explainNode(b, when.Condition, depth+2) - explainNode(b, when.Result, depth+2) - } - if c.Else != nil { - explainNode(b, c.Else, depth+2) - } - } -} - -// explainCastExpr formats a CAST expression. -func explainCastExpr(b *strings.Builder, c *CastExpr, depth int) { - indent := strings.Repeat(" ", depth) - aliasSuffix := "" - if c.Alias != "" { - aliasSuffix = fmt.Sprintf(" (alias %s)", c.Alias) - } - fmt.Fprintf(b, "%sFunction CAST%s (children 1)\n", indent, aliasSuffix) - fmt.Fprintf(b, "%s ExpressionList (children 2)\n", indent) - // For :: operator syntax, the expression is output as a string literal - if c.OperatorSyntax { - if lit, ok := c.Expr.(*Literal); ok { - fmt.Fprintf(b, "%s Literal \\'%v\\'\n", indent, lit.Value) - } else { - explainNode(b, c.Expr, depth+2) - } - } else { - explainNode(b, c.Expr, depth+2) - } - // Type is represented as a Literal string - fmt.Fprintf(b, "%s Literal \\'%s\\'\n", indent, c.Type.Name) -} - -// explainLambda formats a lambda expression. -func explainLambda(b *strings.Builder, l *Lambda, depth int) { - indent := strings.Repeat(" ", depth) - fmt.Fprintf(b, "%sFunction lambda (children 1)\n", indent) - fmt.Fprintf(b, "%s ExpressionList (children 2)\n", indent) - // Parameters as tuple - fmt.Fprintf(b, "%s Function tuple (children 1)\n", indent) - fmt.Fprintf(b, "%s ExpressionList (children %d)\n", indent, len(l.Parameters)) - for _, param := range l.Parameters { - fmt.Fprintf(b, "%s Identifier %s\n", indent, param) - } - // Body - explainNode(b, l.Body, depth+2) -} - -// countSelectQueryChildren counts the non-nil children of a SelectQuery. -func countSelectQueryChildren(s *SelectQuery) int { - count := 0 - if len(s.With) > 0 { - count++ - } - if len(s.Columns) > 0 { - count++ - } - // From and ArrayJoin are combined into one TablesInSelectQuery - if s.From != nil || s.ArrayJoin != nil { - count++ - } - if s.PreWhere != nil { - count++ - } - if s.Where != nil { - count++ - } - if len(s.GroupBy) > 0 { - count++ - } - if s.Having != nil { - count++ - } - if len(s.OrderBy) > 0 { - count++ - } - if s.Limit != nil { - count++ - } - if s.Offset != nil { - count++ - } - if len(s.Settings) > 0 { - count++ - } - if len(s.Window) > 0 { - count++ - } - return count -} - -// explainTablesWithArrayJoin outputs TablesInSelectQuery with ArrayJoin integrated. -func explainTablesWithArrayJoin(b *strings.Builder, from *TablesInSelectQuery, arrayJoin *ArrayJoinClause, depth int) { - indent := strings.Repeat(" ", depth) - - tableCount := 0 - if from != nil { - tableCount = len(from.Tables) - } - if arrayJoin != nil { - tableCount++ - } - - fmt.Fprintf(b, "%sTablesInSelectQuery (children %d)\n", indent, tableCount) - - if from != nil { - for i, table := range from.Tables { - explainTablesInSelectQueryElement(b, table, i > 0, depth+1) - } - } - - if arrayJoin != nil { - // ArrayJoin is output as a TablesInSelectQueryElement - fmt.Fprintf(b, "%s TablesInSelectQueryElement (children 1)\n", indent) - explainArrayJoinClause(b, arrayJoin, depth+2) - } -} - -// binaryOpToFunction maps binary operators to their function names. -func binaryOpToFunction(op string) string { - switch op { - case "+": - return "plus" - case "-": - return "minus" - case "*": - return "multiply" - case "/": - return "divide" - case "%": - return "modulo" - case "=", "==": - return "equals" - case "!=", "<>": - return "notEquals" - case "<": - return "less" - case "<=": - return "lessOrEquals" - case ">": - return "greater" - case ">=": - return "greaterOrEquals" - case "<=>": - return "isNotDistinctFrom" - case "AND": - return "and" - case "OR": - return "or" - case "LIKE": - return "like" - case "ILIKE": - return "ilike" - case "NOT LIKE": - return "notLike" - case "NOT ILIKE": - return "notILike" - case "IN": - return "in" - case "NOT IN": - return "notIn" - case "GLOBAL IN": - return "globalIn" - case "GLOBAL NOT IN": - return "globalNotIn" - default: - return op - } -} - -// unaryOpToFunction maps unary operators to their function names. -func unaryOpToFunction(op string) string { - switch op { - case "-": - return "negate" - case "NOT": - return "not" - case "~": - return "bitNot" - default: - return op - } -} - -// extractFieldToFunction maps EXTRACT fields to function names. -func extractFieldToFunction(field string) string { - switch strings.ToUpper(field) { - case "YEAR": - return "toYear" - case "MONTH": - return "toMonth" - case "DAY": - return "toDayOfMonth" - case "HOUR": - return "toHour" - case "MINUTE": - return "toMinute" - case "SECOND": - return "toSecond" - default: - return "to" + strings.Title(strings.ToLower(field)) - } -} - -// normalizeAlterCommandType normalizes ALTER command types to match ClickHouse output. -func normalizeAlterCommandType(t AlterCommandType) string { - switch t { - case AlterFreeze: - return "FREEZE_ALL" - case AlterDetachPartition: - return "DROP_PARTITION" - case AlterClearIndex: - return "DROP_INDEX" - default: - return string(t) - } -} - -// explainAlterCommand formats an ALTER command. -func explainAlterCommand(b *strings.Builder, cmd *AlterCommand, depth int) { - indent := strings.Repeat(" ", depth) - - children := 0 - if cmd.Column != nil { - children++ - } - if cmd.ColumnName != "" && cmd.Type != AlterAddColumn && cmd.Type != AlterModifyColumn { - children++ - } - if cmd.NewName != "" { - children++ - } - if cmd.AfterColumn != "" { - children++ - } - if cmd.Constraint != nil { - children++ - } - if cmd.IndexExpr != nil { - children++ - } - if cmd.Partition != nil { - children++ - } - if cmd.Index != "" && cmd.IndexExpr == nil { - children++ - } - // Don't count ConstraintName for ADD_CONSTRAINT as it's part of the Constraint structure - if cmd.ConstraintName != "" && cmd.Type != AlterAddConstraint { - children++ - } - if cmd.TTL != nil { - children++ - } - - cmdType := normalizeAlterCommandType(cmd.Type) - if children > 0 { - fmt.Fprintf(b, "%sAlterCommand %s (children %d)\n", indent, cmdType, children) - } else { - fmt.Fprintf(b, "%sAlterCommand %s\n", indent, cmdType) - } - - if cmd.Column != nil { - explainColumnDeclaration(b, cmd.Column, depth+1) - } - if cmd.ColumnName != "" && cmd.Type != AlterAddColumn && cmd.Type != AlterModifyColumn { - fmt.Fprintf(b, "%s Identifier %s\n", indent, cmd.ColumnName) - } - if cmd.NewName != "" { - fmt.Fprintf(b, "%s Identifier %s\n", indent, cmd.NewName) - } - if cmd.AfterColumn != "" { - fmt.Fprintf(b, "%s Identifier %s\n", indent, cmd.AfterColumn) - } - if cmd.Constraint != nil { - explainConstraint(b, cmd.Constraint, depth+1) - } - if cmd.IndexExpr != nil { - fmt.Fprintf(b, "%s Index (children 2)\n", indent) - explainNode(b, cmd.IndexExpr, depth+2) - if cmd.IndexType != "" { - fmt.Fprintf(b, "%s Function %s (children 1)\n", indent, cmd.IndexType) - fmt.Fprintf(b, "%s ExpressionList\n", indent) - } - } - if cmd.Partition != nil { - fmt.Fprintf(b, "%s Partition (children 1)\n", indent) - explainNode(b, cmd.Partition, depth+2) - } - if cmd.Index != "" && cmd.IndexExpr == nil { - fmt.Fprintf(b, "%s Identifier %s\n", indent, cmd.Index) - } - // Don't output ConstraintName for ADD_CONSTRAINT - if cmd.ConstraintName != "" && cmd.Type != AlterAddConstraint { - fmt.Fprintf(b, "%s Identifier %s\n", indent, cmd.ConstraintName) - } - if cmd.TTL != nil { - fmt.Fprintf(b, "%s ExpressionList (children 1)\n", indent) - fmt.Fprintf(b, "%s TTLElement (children 1)\n", indent) - explainNode(b, cmd.TTL.Expression, depth+3) - } -} - -// explainColumnDeclaration formats a column declaration. -func explainColumnDeclaration(b *strings.Builder, col *ColumnDeclaration, depth int) { - indent := strings.Repeat(" ", depth) - - children := 0 - if col.Type != nil { - children++ - } - if col.Default != nil { - children++ - } - if col.Codec != nil { - children++ - } - if col.Comment != "" { - children++ - } - - fmt.Fprintf(b, "%sColumnDeclaration %s (children %d)\n", indent, col.Name, children) - if col.Type != nil { - fmt.Fprintf(b, "%s DataType %s\n", indent, col.Type.Name) - } - if col.Comment != "" { - fmt.Fprintf(b, "%s Literal \\'%s\\'\n", indent, col.Comment) - } - if col.Default != nil { - explainNode(b, col.Default, depth+1) - } - if col.Codec != nil { - explainCodec(b, col.Codec, depth+1) - } -} - -// explainCodec formats a CODEC expression. -func explainCodec(b *strings.Builder, codec *CodecExpr, depth int) { - indent := strings.Repeat(" ", depth) - fmt.Fprintf(b, "%sFunction CODEC (children 1)\n", indent) - fmt.Fprintf(b, "%s ExpressionList (children %d)\n", indent, len(codec.Codecs)) - for _, c := range codec.Codecs { - if len(c.Arguments) == 0 { - fmt.Fprintf(b, "%s Function %s\n", indent, c.Name) - } else { - fmt.Fprintf(b, "%s Function %s (children 1)\n", indent, c.Name) - fmt.Fprintf(b, "%s ExpressionList (children %d)\n", indent, len(c.Arguments)) - for _, arg := range c.Arguments { - explainNode(b, arg, depth+4) - } - } - } -} - -// explainConstraint formats a constraint. -func explainConstraint(b *strings.Builder, c *Constraint, depth int) { - indent := strings.Repeat(" ", depth) - fmt.Fprintf(b, "%sConstraint (children 1)\n", indent) - explainNode(b, c.Expression, depth+1) -} - -// explainCreateQuery formats a CREATE query. -func explainCreateQuery(b *strings.Builder, n *CreateQuery, depth int) { - indent := strings.Repeat(" ", depth) - - if n.CreateDatabase { - children := 1 - if n.Engine != nil { - children++ - } - fmt.Fprintf(b, "%sCreateQuery %s (children %d)\n", indent, n.Database, children) - fmt.Fprintf(b, "%s Identifier %s\n", indent, n.Database) - if n.Engine != nil { - fmt.Fprintf(b, "%s Storage definition (children 1)\n", indent) - fmt.Fprintf(b, "%s Function %s\n", indent, n.Engine.Name) - } - return - } - - var name string - if n.View != "" { - name = n.View - } else { - name = n.Table - } - if n.Database != "" { - name = n.Database + "." + name - } - - // For materialized views, handle specially - if n.View != "" { - explainCreateView(b, n, name, depth) - return - } - - children := 1 // identifier - if len(n.Columns) > 0 { - children++ - } - if n.Engine != nil || len(n.OrderBy) > 0 || n.PartitionBy != nil || len(n.PrimaryKey) > 0 || len(n.Settings) > 0 { - children++ - } - if n.AsSelect != nil { - children++ - } - - fmt.Fprintf(b, "%sCreateQuery %s (children %d)\n", indent, name, children) - fmt.Fprintf(b, "%s Identifier %s\n", indent, name) - - if len(n.Columns) > 0 { - fmt.Fprintf(b, "%s Columns definition (children 1)\n", indent) - fmt.Fprintf(b, "%s ExpressionList (children %d)\n", indent, len(n.Columns)) - for _, col := range n.Columns { - explainColumnDeclaration(b, col, depth+3) - } - } - - if n.Engine != nil || len(n.OrderBy) > 0 || n.PartitionBy != nil || len(n.PrimaryKey) > 0 || len(n.Settings) > 0 { - storageChildren := 0 - if n.Engine != nil { - storageChildren++ - } - if n.PartitionBy != nil { - storageChildren++ - } - if len(n.PrimaryKey) > 0 { - storageChildren++ - } - if len(n.OrderBy) > 0 { - storageChildren++ - } - if len(n.Settings) > 0 { - storageChildren++ - } - fmt.Fprintf(b, "%s Storage definition (children %d)\n", indent, storageChildren) - if n.Engine != nil { - if len(n.Engine.Parameters) == 0 && !n.Engine.HasParentheses { - fmt.Fprintf(b, "%s Function %s\n", indent, n.Engine.Name) - } else if len(n.Engine.Parameters) == 0 && n.Engine.HasParentheses { - fmt.Fprintf(b, "%s Function %s (children 1)\n", indent, n.Engine.Name) - fmt.Fprintf(b, "%s ExpressionList\n", indent) - } else { - fmt.Fprintf(b, "%s Function %s (children 1)\n", indent, n.Engine.Name) - fmt.Fprintf(b, "%s ExpressionList (children %d)\n", indent, len(n.Engine.Parameters)) - for _, p := range n.Engine.Parameters { - explainNode(b, p, depth+4) - } - } - } - if n.PartitionBy != nil { - explainNode(b, n.PartitionBy, depth+2) - } - if len(n.PrimaryKey) > 0 { - // For simple PRIMARY KEY, just output the identifier - if len(n.PrimaryKey) == 1 { - if id, ok := n.PrimaryKey[0].(*Identifier); ok { - fmt.Fprintf(b, "%s Identifier %s\n", indent, id.Name()) - } else { - explainNode(b, n.PrimaryKey[0], depth+2) - } - } else { - fmt.Fprintf(b, "%s Function tuple (children 1)\n", indent) - fmt.Fprintf(b, "%s ExpressionList (children %d)\n", indent, len(n.PrimaryKey)) - for _, expr := range n.PrimaryKey { - explainNode(b, expr, depth+4) - } - } - } - if len(n.OrderBy) > 0 { - // For simple ORDER BY, just output the identifier - if len(n.OrderBy) == 1 { - if id, ok := n.OrderBy[0].(*Identifier); ok { - fmt.Fprintf(b, "%s Identifier %s\n", indent, id.Name()) - } else { - explainNode(b, n.OrderBy[0], depth+2) - } - } else { - fmt.Fprintf(b, "%s Function tuple (children 1)\n", indent) - fmt.Fprintf(b, "%s ExpressionList (children %d)\n", indent, len(n.OrderBy)) - for _, expr := range n.OrderBy { - explainNode(b, expr, depth+4) - } - } - } - if len(n.Settings) > 0 { - fmt.Fprintf(b, "%s Set\n", indent) - } - } - - if n.AsSelect != nil { - explainNode(b, n.AsSelect, depth+1) - } -} - -// explainTablesInSelectQueryElement formats a table element with optional implicit join. -func explainTablesInSelectQueryElement(b *strings.Builder, elem *TablesInSelectQueryElement, isImplicitJoin bool, depth int) { - indent := strings.Repeat(" ", depth) - - children := 0 - if elem.Table != nil { - children++ - } - if elem.Join != nil { - children++ - } else if isImplicitJoin { - // For implicit cross joins (comma-separated tables), add an empty TableJoin - children++ - } - - fmt.Fprintf(b, "%sTablesInSelectQueryElement (children %d)\n", indent, children) - if elem.Table != nil { - explainNode(b, elem.Table, depth+1) - } - if elem.Join != nil { - explainTableJoin(b, elem.Join, depth+1) - } else if isImplicitJoin { - // Output empty TableJoin for implicit cross join - fmt.Fprintf(b, "%s TableJoin\n", indent) - } -} - -// explainSampleClause formats a SAMPLE clause. -func explainSampleClause(b *strings.Builder, sample *SampleClause, depth int) { - indent := strings.Repeat(" ", depth) - fmt.Fprintf(b, "%sSampleRatio %s\n", indent, formatSampleRatio(sample.Ratio)) - if sample.Offset != nil { - fmt.Fprintf(b, "%sSampleRatio %s\n", indent, formatSampleRatio(sample.Offset)) - } -} - -// formatSampleRatio formats a sample ratio expression. -func formatSampleRatio(expr Expression) string { - switch e := expr.(type) { - case *Literal: - if e.Type == LiteralInteger { - return fmt.Sprintf("%v", e.Value) - } - if e.Type == LiteralFloat { - // Convert float to fraction - return floatToFraction(e.Value.(float64)) - } - return fmt.Sprintf("%v", e.Value) - case *BinaryExpr: - // For division, format as "numerator / denominator" - if e.Op == "/" { - left := formatSampleRatio(e.Left) - right := formatSampleRatio(e.Right) - return fmt.Sprintf("%s / %s", left, right) - } - } - return fmt.Sprintf("%v", expr) -} - -// normalizeExplainType normalizes EXPLAIN type for output. -func normalizeExplainType(t string) string { - switch strings.ToUpper(t) { - case "", "PLAN": - return "EXPLAIN" - case "AST": - return "EXPLAIN AST" - case "SYNTAX": - return "EXPLAIN SYNTAX" - case "PIPELINE": - return "EXPLAIN PIPELINE" - default: - return "EXPLAIN " + t - } -} - -// showTypeToName maps ShowType to EXPLAIN AST output name. -func showTypeToName(t ShowType) string { - switch t { - case ShowTables: - return "ShowTables" - case ShowDatabases: - return "ShowTables" - case ShowProcesses: - return "ShowProcessList" - case ShowCreate: - return "ShowCreate" - case ShowCreateDB: - return "ShowCreate" - case ShowColumns: - return "ShowColumns" - case ShowDictionaries: - return "ShowTables" - default: - return "ShowTables" - } -} - -// floatToFraction converts a float to a fraction string. -func floatToFraction(f float64) string { - // Handle common fractions - if f == 0.1 { - return "1 / 10" - } - if f == 0.5 { - return "5 / 10" - } - if f == 0.25 { - return "25 / 100" - } - // For other floats, just return as is for now - return fmt.Sprintf("%v", f) -} - -// explainCreateView formats a CREATE VIEW or MATERIALIZED VIEW query. -func explainCreateView(b *strings.Builder, n *CreateQuery, name string, depth int) { - indent := strings.Repeat(" ", depth) - - children := 1 // identifier - if n.AsSelect != nil { - children++ - } - if n.Engine != nil { - children++ // ViewTargets - } - - fmt.Fprintf(b, "%sCreateQuery %s (children %d)\n", indent, name, children) - fmt.Fprintf(b, "%s Identifier %s\n", indent, name) - - // For views, the AS SELECT comes before storage/ViewTargets - if n.AsSelect != nil { - explainNode(b, n.AsSelect, depth+1) - } - - // Storage is wrapped in ViewTargets for views - if n.Engine != nil { - fmt.Fprintf(b, "%s ViewTargets (children 1)\n", indent) - fmt.Fprintf(b, "%s Storage definition (children 1)\n", indent) - if len(n.Engine.Parameters) == 0 { - fmt.Fprintf(b, "%s Function %s\n", indent, n.Engine.Name) - } else { - fmt.Fprintf(b, "%s Function %s (children 1)\n", indent, n.Engine.Name) - fmt.Fprintf(b, "%s ExpressionList (children %d)\n", indent, len(n.Engine.Parameters)) - for _, p := range n.Engine.Parameters { - explainNode(b, p, depth+5) - } - } - } -} diff --git a/ast/explain_test.go b/ast/explain_test.go deleted file mode 100644 index 7ea997a23..000000000 --- a/ast/explain_test.go +++ /dev/null @@ -1,86 +0,0 @@ -package ast_test - -import ( - "context" - "encoding/json" - "os" - "path/filepath" - "strings" - "testing" - - "github.com/kyleconroy/doubleclick/ast" - "github.com/kyleconroy/doubleclick/parser" -) - -// testMetadata holds optional metadata for a test case -type testMetadata struct { - Todo bool `json:"todo,omitempty"` - Source string `json:"source,omitempty"` -} - -func TestExplain(t *testing.T) { - testdataDir := "../parser/testdata" - - entries, err := os.ReadDir(testdataDir) - if err != nil { - t.Fatalf("Failed to read testdata directory: %v", err) - } - - for _, entry := range entries { - if !entry.IsDir() { - continue - } - - testName := entry.Name() - testDir := filepath.Join(testdataDir, testName) - - // Check if explain.txt exists - explainPath := filepath.Join(testDir, "explain.txt") - explainBytes, err := os.ReadFile(explainPath) - if err != nil { - continue // Skip test cases without explain.txt - } - expected := string(explainBytes) - - t.Run(testName, func(t *testing.T) { - // Read optional metadata - var metadata testMetadata - metadataPath := filepath.Join(testDir, "metadata.json") - if metadataBytes, err := os.ReadFile(metadataPath); err == nil { - if err := json.Unmarshal(metadataBytes, &metadata); err != nil { - t.Fatalf("Failed to parse metadata.json: %v", err) - } - } - - // Read the query - queryPath := filepath.Join(testDir, "query.sql") - queryBytes, err := os.ReadFile(queryPath) - if err != nil { - t.Fatalf("Failed to read query.sql: %v", err) - } - query := strings.TrimSpace(string(queryBytes)) - - // Parse the query - stmts, err := parser.Parse(context.Background(), strings.NewReader(query)) - if err != nil { - t.Skipf("Parse error (skipping): %v", err) - return - } - - if len(stmts) == 0 { - t.Fatalf("Expected at least 1 statement, got 0") - } - - // Generate explain output - got := ast.Explain(stmts[0]) - - // Compare - if got != expected { - if metadata.Todo { - t.Skipf("TODO: Explain output mismatch (skipping)") - } - t.Errorf("Explain output mismatch\nQuery: %s\n\nExpected:\n%s\nGot:\n%s", query, expected, got) - } - }) - } -} diff --git a/parser/parser_test.go b/parser/parser_test.go index 9f5f8ac08..203ae70db 100644 --- a/parser/parser_test.go +++ b/parser/parser_test.go @@ -18,12 +18,30 @@ type testMetadata struct { Source string `json:"source,omitempty"` } +// astJSON represents the structure of ast.json from ClickHouse EXPLAIN AST +type astJSON struct { + Meta []struct { + Name string `json:"name"` + Type string `json:"type"` + } `json:"meta"` + Data []struct { + Explain string `json:"explain"` + } `json:"data"` + Rows int `json:"rows"` + Statistics struct { + Elapsed float64 `json:"elapsed"` + RowsRead int `json:"rows_read"` + BytesRead int `json:"bytes_read"` + } `json:"statistics"` + Error bool `json:"error,omitempty"` +} + // TestParser tests the parser using test cases from the testdata directory. // Each subdirectory in testdata represents a test case with: // - query.sql: The SQL query to parse +// - ast.json: Expected AST from ClickHouse EXPLAIN AST // - metadata.json (optional): Metadata including: // - todo: true if the test is not yet expected to pass -// - source: URL to the source file in ClickHouse repository func TestParser(t *testing.T) { testdataDir := "testdata" @@ -37,21 +55,24 @@ func TestParser(t *testing.T) { continue } - testName := entry.Name() - testDir := filepath.Join(testdataDir, testName) + testDir := filepath.Join(testdataDir, entry.Name()) + + t.Run(entry.Name(), func(t *testing.T) { + t.Parallel() - t.Run(testName, func(t *testing.T) { // Create context with 1 second timeout ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) defer cancel() - // Read the query + // Read the query (only first line, as ast.json was generated from first statement) queryPath := filepath.Join(testDir, "query.sql") queryBytes, err := os.ReadFile(queryPath) if err != nil { t.Fatalf("Failed to read query.sql: %v", err) } - query := strings.TrimSpace(string(queryBytes)) + // Get first line only (ast.json contains AST for first statement) + lines := strings.SplitN(string(queryBytes), "\n", 2) + query := strings.TrimSpace(lines[0]) // Read optional metadata var metadata testMetadata @@ -62,9 +83,19 @@ func TestParser(t *testing.T) { } } - // Log source if available - if metadata.Source != "" { - t.Logf("Source: %s", metadata.Source) + // Read expected AST from ClickHouse + var expectedAST astJSON + astPath := filepath.Join(testDir, "ast.json") + if astBytes, err := os.ReadFile(astPath); err == nil { + if err := json.Unmarshal(astBytes, &expectedAST); err != nil { + t.Fatalf("Failed to parse ast.json: %v", err) + } + } + + // Skip tests where ClickHouse also couldn't parse the query + if expectedAST.Error { + t.Skipf("ClickHouse also failed to parse this query") + return } // Parse the query @@ -94,6 +125,8 @@ func TestParser(t *testing.T) { } t.Fatalf("JSON marshal error: %v\nQuery: %s", jsonErr, query) } + + // TODO: Compare parsed AST against expectedAST.Data }) } } diff --git a/parser/testdata/00001_count_hits/ast.json b/parser/testdata/00001_count_hits/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00001_count_hits/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/add_column_if_not_exists/metadata.json b/parser/testdata/00001_count_hits/metadata.json similarity index 100% rename from parser/testdata/add_column_if_not_exists/metadata.json rename to parser/testdata/00001_count_hits/metadata.json diff --git a/parser/testdata/00001_count_hits/query.sql b/parser/testdata/00001_count_hits/query.sql new file mode 100644 index 000000000..16a52624f --- /dev/null +++ b/parser/testdata/00001_count_hits/query.sql @@ -0,0 +1,2 @@ +-- Tags: stateful +SELECT count() FROM test.hits diff --git a/parser/testdata/00001_select_1/ast.json b/parser/testdata/00001_select_1/ast.json new file mode 100644 index 000000000..71091f417 --- /dev/null +++ b/parser/testdata/00001_select_1/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001936758, + "rows_read": 5, + "bytes_read": 177 + } +} diff --git a/parser/testdata/add_constraint/metadata.json b/parser/testdata/00001_select_1/metadata.json similarity index 100% rename from parser/testdata/add_constraint/metadata.json rename to parser/testdata/00001_select_1/metadata.json diff --git a/parser/testdata/simple_select/query.sql b/parser/testdata/00001_select_1/query.sql similarity index 100% rename from parser/testdata/simple_select/query.sql rename to parser/testdata/00001_select_1/query.sql diff --git a/parser/testdata/00002_count_visits/ast.json b/parser/testdata/00002_count_visits/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00002_count_visits/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/add_index/metadata.json b/parser/testdata/00002_count_visits/metadata.json similarity index 100% rename from parser/testdata/add_index/metadata.json rename to parser/testdata/00002_count_visits/metadata.json diff --git a/parser/testdata/00002_count_visits/query.sql b/parser/testdata/00002_count_visits/query.sql new file mode 100644 index 000000000..fc8dd4f4e --- /dev/null +++ b/parser/testdata/00002_count_visits/query.sql @@ -0,0 +1,2 @@ +-- Tags: stateful +SELECT sum(Sign) FROM test.visits diff --git a/parser/testdata/00002_system_numbers/ast.json b/parser/testdata/00002_system_numbers/ast.json new file mode 100644 index 000000000..df3bd2cb2 --- /dev/null +++ b/parser/testdata/00002_system_numbers/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001602544, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/array_constructor/metadata.json b/parser/testdata/00002_system_numbers/metadata.json similarity index 100% rename from parser/testdata/array_constructor/metadata.json rename to parser/testdata/00002_system_numbers/metadata.json diff --git a/parser/testdata/00002_system_numbers/query.sql b/parser/testdata/00002_system_numbers/query.sql new file mode 100644 index 000000000..1710a0d6a --- /dev/null +++ b/parser/testdata/00002_system_numbers/query.sql @@ -0,0 +1,14 @@ +SET send_logs_level = 'fatal'; + +SELECT * FROM system.numbers LIMIT 3; +SELECT sys_num.number FROM system.numbers AS sys_num WHERE number > 2 LIMIT 2; +SELECT number FROM system.numbers WHERE number >= 5 LIMIT 2; +SELECT * FROM system.numbers WHERE number == 7 LIMIT 1; +SELECT number AS n FROM system.numbers WHERE number IN(8, 9) LIMIT 2; +select number from system.numbers limit 0; +select x from system.numbers limit 1; -- { serverError UNKNOWN_IDENTIFIER } +SELECT x, number FROM system.numbers LIMIT 1; -- { serverError UNKNOWN_IDENTIFIER } +SELECT * FROM system.number LIMIT 1; -- { serverError UNKNOWN_TABLE } +SELECT * FROM system LIMIT 1; -- { serverError UNKNOWN_TABLE } +SELECT * FROM numbers LIMIT 1; -- { serverError UNKNOWN_TABLE } +SELECT sys.number FROM system.numbers AS sys_num LIMIT 1; -- { serverError UNKNOWN_IDENTIFIER } diff --git a/parser/testdata/00003_reinterpret_as_string/ast.json b/parser/testdata/00003_reinterpret_as_string/ast.json new file mode 100644 index 000000000..852f67e0a --- /dev/null +++ b/parser/testdata/00003_reinterpret_as_string/ast.json @@ -0,0 +1,70 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function reinterpretAsString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal 'Ё'" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 16, + + "statistics": + { + "elapsed": 0.001337951, + "rows_read": 16, + "bytes_read": 614 + } +} diff --git a/parser/testdata/arrayall_lambda/metadata.json b/parser/testdata/00003_reinterpret_as_string/metadata.json similarity index 100% rename from parser/testdata/arrayall_lambda/metadata.json rename to parser/testdata/00003_reinterpret_as_string/metadata.json diff --git a/parser/testdata/00003_reinterpret_as_string/query.sql b/parser/testdata/00003_reinterpret_as_string/query.sql new file mode 100644 index 000000000..1204f6280 --- /dev/null +++ b/parser/testdata/00003_reinterpret_as_string/query.sql @@ -0,0 +1 @@ +SELECT number FROM system.numbers WHERE reinterpretAsString(number) = 'Ё' LIMIT 1 diff --git a/parser/testdata/00004_shard_format_ast_and_remote_table/ast.json b/parser/testdata/00004_shard_format_ast_and_remote_table/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00004_shard_format_ast_and_remote_table/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/arrayexists_lambda/metadata.json b/parser/testdata/00004_shard_format_ast_and_remote_table/metadata.json similarity index 100% rename from parser/testdata/arrayexists_lambda/metadata.json rename to parser/testdata/00004_shard_format_ast_and_remote_table/metadata.json diff --git a/parser/testdata/00004_shard_format_ast_and_remote_table/query.sql b/parser/testdata/00004_shard_format_ast_and_remote_table/query.sql new file mode 100644 index 000000000..8bc1c39b5 --- /dev/null +++ b/parser/testdata/00004_shard_format_ast_and_remote_table/query.sql @@ -0,0 +1,3 @@ +-- Tags: shard + +SELECT (dummy AS x) - 1 FROM remote('127.0.0.{2,3}', system, one) diff --git a/parser/testdata/00004_top_counters/ast.json b/parser/testdata/00004_top_counters/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00004_top_counters/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/arrayfilter_lambda/metadata.json b/parser/testdata/00004_top_counters/metadata.json similarity index 100% rename from parser/testdata/arrayfilter_lambda/metadata.json rename to parser/testdata/00004_top_counters/metadata.json diff --git a/parser/testdata/00004_top_counters/query.sql b/parser/testdata/00004_top_counters/query.sql new file mode 100644 index 000000000..9ed7ae258 --- /dev/null +++ b/parser/testdata/00004_top_counters/query.sql @@ -0,0 +1,3 @@ +-- Tags: stateful +SELECT CounterID, count() AS c FROM test.hits GROUP BY CounterID ORDER BY c DESC LIMIT 10; +SELECT CounterID, count() AS c FROM test.hits GROUP BY CounterID ORDER BY c DESC LIMIT 10 SETTINGS optimize_aggregation_in_order = 1 diff --git a/parser/testdata/00005_filtering/ast.json b/parser/testdata/00005_filtering/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00005_filtering/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/arrayfirst_lambda/metadata.json b/parser/testdata/00005_filtering/metadata.json similarity index 100% rename from parser/testdata/arrayfirst_lambda/metadata.json rename to parser/testdata/00005_filtering/metadata.json diff --git a/parser/testdata/00005_filtering/query.sql b/parser/testdata/00005_filtering/query.sql new file mode 100644 index 000000000..22425ab5c --- /dev/null +++ b/parser/testdata/00005_filtering/query.sql @@ -0,0 +1,3 @@ +-- Tags: stateful +SELECT count() FROM test.hits WHERE AdvEngineID != 0 + diff --git a/parser/testdata/00005_shard_format_ast_and_remote_table_lambda/ast.json b/parser/testdata/00005_shard_format_ast_and_remote_table_lambda/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00005_shard_format_ast_and_remote_table_lambda/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/arraymap_lambda/metadata.json b/parser/testdata/00005_shard_format_ast_and_remote_table_lambda/metadata.json similarity index 100% rename from parser/testdata/arraymap_lambda/metadata.json rename to parser/testdata/00005_shard_format_ast_and_remote_table_lambda/metadata.json diff --git a/parser/testdata/00005_shard_format_ast_and_remote_table_lambda/query.sql b/parser/testdata/00005_shard_format_ast_and_remote_table_lambda/query.sql new file mode 100644 index 000000000..7f236212a --- /dev/null +++ b/parser/testdata/00005_shard_format_ast_and_remote_table_lambda/query.sql @@ -0,0 +1,3 @@ +-- Tags: shard + +SELECT count() FROM remote('127.0.0.{2,3}', system, one) WHERE arrayExists((x) -> x = 1, [1, 2, 3]) diff --git a/parser/testdata/00006_agregates/ast.json b/parser/testdata/00006_agregates/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00006_agregates/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/arraysplit_lambda/metadata.json b/parser/testdata/00006_agregates/metadata.json similarity index 100% rename from parser/testdata/arraysplit_lambda/metadata.json rename to parser/testdata/00006_agregates/metadata.json diff --git a/parser/testdata/00006_agregates/query.sql b/parser/testdata/00006_agregates/query.sql new file mode 100644 index 000000000..9d56f09cd --- /dev/null +++ b/parser/testdata/00006_agregates/query.sql @@ -0,0 +1,2 @@ +-- Tags: stateful +SELECT sum(AdvEngineID), count(), avg(ResolutionWidth) FROM test.hits diff --git a/parser/testdata/00006_extremes_and_subquery_from/ast.json b/parser/testdata/00006_extremes_and_subquery_from/ast.json new file mode 100644 index 000000000..d3b66b692 --- /dev/null +++ b/parser/testdata/00006_extremes_and_subquery_from/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001759471, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/asterisk_with_except/metadata.json b/parser/testdata/00006_extremes_and_subquery_from/metadata.json similarity index 100% rename from parser/testdata/asterisk_with_except/metadata.json rename to parser/testdata/00006_extremes_and_subquery_from/metadata.json diff --git a/parser/testdata/00006_extremes_and_subquery_from/query.sql b/parser/testdata/00006_extremes_and_subquery_from/query.sql new file mode 100644 index 000000000..21f26a5f5 --- /dev/null +++ b/parser/testdata/00006_extremes_and_subquery_from/query.sql @@ -0,0 +1,4 @@ +SET output_format_write_statistics = 0; +SET extremes = 1; +SELECT 'Hello, world' FROM (SELECT number FROM system.numbers LIMIT 10) WHERE number < 0 +FORMAT JSONCompact; diff --git a/parser/testdata/00007_array/ast.json b/parser/testdata/00007_array/ast.json new file mode 100644 index 000000000..e6a9fc2e4 --- /dev/null +++ b/parser/testdata/00007_array/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_['Hello', 'Goodbye']" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001963539, + "rows_read": 5, + "bytes_read": 195 + } +} diff --git a/parser/testdata/asterisk_with_replace/metadata.json b/parser/testdata/00007_array/metadata.json similarity index 100% rename from parser/testdata/asterisk_with_replace/metadata.json rename to parser/testdata/00007_array/metadata.json diff --git a/parser/testdata/00007_array/query.sql b/parser/testdata/00007_array/query.sql new file mode 100644 index 000000000..cf53e8f78 --- /dev/null +++ b/parser/testdata/00007_array/query.sql @@ -0,0 +1,3 @@ +SELECT ['Hello', 'Goodbye']; +SELECT ['Hello'], ['Goodbye']; +SELECT []; diff --git a/parser/testdata/00007_uniq/ast.json b/parser/testdata/00007_uniq/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00007_uniq/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/cast_function/metadata.json b/parser/testdata/00007_uniq/metadata.json similarity index 100% rename from parser/testdata/cast_function/metadata.json rename to parser/testdata/00007_uniq/metadata.json diff --git a/parser/testdata/00007_uniq/query.sql b/parser/testdata/00007_uniq/query.sql new file mode 100644 index 000000000..c47a33275 --- /dev/null +++ b/parser/testdata/00007_uniq/query.sql @@ -0,0 +1,2 @@ +-- Tags: stateful +SELECT RegionID, uniq(UserID) AS u FROM test.hits WHERE CounterID = 800784 GROUP BY RegionID ORDER BY u DESC, RegionID LIMIT 10 -- nothing diff --git a/parser/testdata/00008_array_join/ast.json b/parser/testdata/00008_array_join/ast.json new file mode 100644 index 000000000..300a78586 --- /dev/null +++ b/parser/testdata/00008_array_join/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_['Hello', 'Goodbye']" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001753708, + "rows_read": 7, + "bytes_read": 280 + } +} diff --git a/parser/testdata/cast_operator/metadata.json b/parser/testdata/00008_array_join/metadata.json similarity index 100% rename from parser/testdata/cast_operator/metadata.json rename to parser/testdata/00008_array_join/metadata.json diff --git a/parser/testdata/array_join_basic/query.sql b/parser/testdata/00008_array_join/query.sql similarity index 100% rename from parser/testdata/array_join_basic/query.sql rename to parser/testdata/00008_array_join/query.sql diff --git a/parser/testdata/00008_uniq/ast.json b/parser/testdata/00008_uniq/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00008_uniq/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/clear_index/metadata.json b/parser/testdata/00008_uniq/metadata.json similarity index 100% rename from parser/testdata/clear_index/metadata.json rename to parser/testdata/00008_uniq/metadata.json diff --git a/parser/testdata/00008_uniq/query.sql b/parser/testdata/00008_uniq/query.sql new file mode 100644 index 000000000..25d50ed34 --- /dev/null +++ b/parser/testdata/00008_uniq/query.sql @@ -0,0 +1,2 @@ +-- Tags: stateful +SELECT uniq(UserID), uniqIf(UserID, CounterID = 800784), uniqIf(FUniqID, RegionID = 213) FROM test.hits diff --git a/parser/testdata/00009_array_join_subquery/ast.json b/parser/testdata/00009_array_join_subquery/ast.json new file mode 100644 index 000000000..597094874 --- /dev/null +++ b/parser/testdata/00009_array_join_subquery/ast.json @@ -0,0 +1,70 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayJoin (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_['Hello', 'Goodbye']" + } + ], + + "rows": 16, + + "statistics": + { + "elapsed": 0.001987539, + "rows_read": 16, + "bytes_read": 682 + } +} diff --git a/parser/testdata/columns_matcher/metadata.json b/parser/testdata/00009_array_join_subquery/metadata.json similarity index 100% rename from parser/testdata/columns_matcher/metadata.json rename to parser/testdata/00009_array_join_subquery/metadata.json diff --git a/parser/testdata/00009_array_join_subquery/query.sql b/parser/testdata/00009_array_join_subquery/query.sql new file mode 100644 index 000000000..378baadd0 --- /dev/null +++ b/parser/testdata/00009_array_join_subquery/query.sql @@ -0,0 +1 @@ +SELECT x FROM (SELECT arrayJoin(['Hello', 'Goodbye']) AS x) diff --git a/parser/testdata/00009_uniq_distributed/ast.json b/parser/testdata/00009_uniq_distributed/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00009_uniq_distributed/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/conditional_ternary/metadata.json b/parser/testdata/00009_uniq_distributed/metadata.json similarity index 100% rename from parser/testdata/conditional_ternary/metadata.json rename to parser/testdata/00009_uniq_distributed/metadata.json diff --git a/parser/testdata/00009_uniq_distributed/query.sql b/parser/testdata/00009_uniq_distributed/query.sql new file mode 100644 index 000000000..a7b0f8a1b --- /dev/null +++ b/parser/testdata/00009_uniq_distributed/query.sql @@ -0,0 +1,4 @@ +-- Tags: stateful, distributed + + +SELECT uniq(UserID), uniqIf(UserID, CounterID = 800784), uniqIf(FUniqID, RegionID = 213) FROM remote('127.0.0.{1,2}', test, hits) diff --git a/parser/testdata/00010_big_array_join/ast.json b/parser/testdata/00010_big_array_join/ast.json new file mode 100644 index 000000000..10ef35ae4 --- /dev/null +++ b/parser/testdata/00010_big_array_join/ast.json @@ -0,0 +1,85 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " TablesInSelectQuery (children 2)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function arrayJoin (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_['Hello', 'Goodbye']" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_2, UInt64_3] (alias arr)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " ArrayJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier arr" + } + ], + + "rows": 21, + + "statistics": + { + "elapsed": 0.001767396, + "rows_read": 21, + "bytes_read": 913 + } +} diff --git a/parser/testdata/create_materialized_view/metadata.json b/parser/testdata/00010_big_array_join/metadata.json similarity index 100% rename from parser/testdata/create_materialized_view/metadata.json rename to parser/testdata/00010_big_array_join/metadata.json diff --git a/parser/testdata/00010_big_array_join/query.sql b/parser/testdata/00010_big_array_join/query.sql new file mode 100644 index 000000000..f7b9160b5 --- /dev/null +++ b/parser/testdata/00010_big_array_join/query.sql @@ -0,0 +1 @@ +SELECT x FROM (SELECT arrayJoin(['Hello', 'Goodbye']) AS x, [1, 2, 3] AS arr) ARRAY JOIN arr diff --git a/parser/testdata/00010_quantiles_segfault/ast.json b/parser/testdata/00010_quantiles_segfault/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00010_quantiles_segfault/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/create_table_with_comment/metadata.json b/parser/testdata/00010_quantiles_segfault/metadata.json similarity index 100% rename from parser/testdata/create_table_with_comment/metadata.json rename to parser/testdata/00010_quantiles_segfault/metadata.json diff --git a/parser/testdata/00010_quantiles_segfault/query.sql b/parser/testdata/00010_quantiles_segfault/query.sql new file mode 100644 index 000000000..9a5e7da3f --- /dev/null +++ b/parser/testdata/00010_quantiles_segfault/query.sql @@ -0,0 +1,2 @@ +-- Tags: stateful +SELECT URL AS `ym:ah:URL`, sum((NOT DontCountHits AND NOT Refresh)), quantilesTimingIf(0.1, 0.5, 0.9)((DOMCompleteTiming + LoadEventEndTiming), DOMCompleteTiming != -1 AND LoadEventEndTiming != -1) as t FROM remote('127.0.0.{1,2}', test, hits) WHERE (CounterID = 800784) AND (((DontCountHits = 0) OR (IsNotBounce = 1)) AND (URL != '')) GROUP BY `ym:ah:URL` WITH TOTALS HAVING (sum((NOT DontCountHits AND NOT Refresh)) > 0) AND (count() > 0) ORDER BY sum((NOT DontCountHits AND NOT Refresh)) DESC, URL LIMIT 0, 1 diff --git a/parser/testdata/00011_array_join_alias/ast.json b/parser/testdata/00011_array_join_alias/ast.json new file mode 100644 index 000000000..103156bae --- /dev/null +++ b/parser/testdata/00011_array_join_alias/ast.json @@ -0,0 +1,85 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Identifier a" + }, + { + "explain": " TablesInSelectQuery (children 2)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function arrayJoin (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_['Hello', 'Goodbye']" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_2, UInt64_3] (alias arr)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " ArrayJoin (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 21, + + "statistics": + { + "elapsed": 0.001752304, + "rows_read": 21, + "bytes_read": 895 + } +} diff --git a/parser/testdata/create_table_with_partition/metadata.json b/parser/testdata/00011_array_join_alias/metadata.json similarity index 100% rename from parser/testdata/create_table_with_partition/metadata.json rename to parser/testdata/00011_array_join_alias/metadata.json diff --git a/parser/testdata/00011_array_join_alias/query.sql b/parser/testdata/00011_array_join_alias/query.sql new file mode 100644 index 000000000..8e04d48a7 --- /dev/null +++ b/parser/testdata/00011_array_join_alias/query.sql @@ -0,0 +1,2 @@ +SELECT x, a FROM (SELECT arrayJoin(['Hello', 'Goodbye']) AS x, [1, 2, 3] AS arr) ARRAY JOIN; -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT x, a FROM (SELECT arrayJoin(['Hello', 'Goodbye']) AS x, [1, 2, 3] AS arr) ARRAY JOIN arr AS a; diff --git a/parser/testdata/00011_sorting/ast.json b/parser/testdata/00011_sorting/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00011_sorting/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/dateadd/metadata.json b/parser/testdata/00011_sorting/metadata.json similarity index 100% rename from parser/testdata/dateadd/metadata.json rename to parser/testdata/00011_sorting/metadata.json diff --git a/parser/testdata/00011_sorting/query.sql b/parser/testdata/00011_sorting/query.sql new file mode 100644 index 000000000..fe5cb3a9d --- /dev/null +++ b/parser/testdata/00011_sorting/query.sql @@ -0,0 +1,2 @@ +-- Tags: stateful +SELECT EventTime::DateTime('Asia/Dubai') FROM test.hits ORDER BY EventTime DESC LIMIT 10 diff --git a/parser/testdata/00012_array_join_alias_2/ast.json b/parser/testdata/00012_array_join_alias_2/ast.json new file mode 100644 index 000000000..8f620bc23 --- /dev/null +++ b/parser/testdata/00012_array_join_alias_2/ast.json @@ -0,0 +1,91 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Identifier a" + }, + { + "explain": " Identifier arr" + }, + { + "explain": " TablesInSelectQuery (children 2)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function arrayJoin (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_['Hello', 'Goodbye']" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_2, UInt64_3] (alias arr)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " ArrayJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier arr (alias a)" + } + ], + + "rows": 23, + + "statistics": + { + "elapsed": 0.001439494, + "rows_read": 23, + "bytes_read": 973 + } +} diff --git a/parser/testdata/datesub/metadata.json b/parser/testdata/00012_array_join_alias_2/metadata.json similarity index 100% rename from parser/testdata/datesub/metadata.json rename to parser/testdata/00012_array_join_alias_2/metadata.json diff --git a/parser/testdata/00012_array_join_alias_2/query.sql b/parser/testdata/00012_array_join_alias_2/query.sql new file mode 100644 index 000000000..a45cf2d87 --- /dev/null +++ b/parser/testdata/00012_array_join_alias_2/query.sql @@ -0,0 +1 @@ +SELECT x, a, arr FROM (SELECT arrayJoin(['Hello', 'Goodbye']) AS x, [1, 2, 3] AS arr) ARRAY JOIN arr AS a diff --git a/parser/testdata/00012_sorting_distributed/ast.json b/parser/testdata/00012_sorting_distributed/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00012_sorting_distributed/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/desc_table/metadata.json b/parser/testdata/00012_sorting_distributed/metadata.json similarity index 100% rename from parser/testdata/desc_table/metadata.json rename to parser/testdata/00012_sorting_distributed/metadata.json diff --git a/parser/testdata/00012_sorting_distributed/query.sql b/parser/testdata/00012_sorting_distributed/query.sql new file mode 100644 index 000000000..bf07fdba7 --- /dev/null +++ b/parser/testdata/00012_sorting_distributed/query.sql @@ -0,0 +1,4 @@ +-- Tags: stateful, distributed + + +SELECT EventTime::DateTime('Asia/Dubai') FROM remote('127.0.0.{1,2}', test, hits) ORDER BY EventTime DESC LIMIT 10 diff --git a/parser/testdata/00013_create_table_with_arrays/ast.json b/parser/testdata/00013_create_table_with_arrays/ast.json new file mode 100644 index 000000000..e6f7a6ebd --- /dev/null +++ b/parser/testdata/00013_create_table_with_arrays/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery arrays_test (children 1)" + }, + { + "explain": " Identifier arrays_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001414106, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/describe_short/metadata.json b/parser/testdata/00013_create_table_with_arrays/metadata.json similarity index 100% rename from parser/testdata/describe_short/metadata.json rename to parser/testdata/00013_create_table_with_arrays/metadata.json diff --git a/parser/testdata/00013_create_table_with_arrays/query.sql b/parser/testdata/00013_create_table_with_arrays/query.sql new file mode 100644 index 000000000..10db807c9 --- /dev/null +++ b/parser/testdata/00013_create_table_with_arrays/query.sql @@ -0,0 +1,12 @@ +DROP TABLE IF EXISTS arrays_test; +CREATE TABLE arrays_test (s String, arr Array(UInt8)) ENGINE = Memory; +INSERT INTO arrays_test VALUES ('Hello', [1,2]), ('World', [3,4,5]), ('Goodbye', []); +SELECT * FROM arrays_test; +SELECT s, arr FROM arrays_test ARRAY JOIN arr; +SELECT s, arr, a FROM arrays_test ARRAY JOIN arr AS a; +SELECT s, arr, a, num FROM arrays_test ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num; +SELECT s, arr, a, num, arrayEnumerate(arr) FROM arrays_test ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num; +SELECT s, arr, a, mapped FROM arrays_test ARRAY JOIN arr AS a, arrayMap(x -> x + 1, arr) AS mapped; +SELECT s, arr, a, num, mapped FROM arrays_test ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num, arrayMap(x -> x + 1, arr) AS mapped; +SELECT sumArray(arr), sumArrayIf(arr, s LIKE '%l%'), sumArrayIf(arr, s LIKE '%e%') FROM arrays_test; +DROP TABLE arrays_test; diff --git a/parser/testdata/00013_sorting_of_nested/ast.json b/parser/testdata/00013_sorting_of_nested/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00013_sorting_of_nested/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/describe_table_full/metadata.json b/parser/testdata/00013_sorting_of_nested/metadata.json similarity index 100% rename from parser/testdata/describe_table_full/metadata.json rename to parser/testdata/00013_sorting_of_nested/metadata.json diff --git a/parser/testdata/00013_sorting_of_nested/query.sql b/parser/testdata/00013_sorting_of_nested/query.sql new file mode 100644 index 000000000..595b14786 --- /dev/null +++ b/parser/testdata/00013_sorting_of_nested/query.sql @@ -0,0 +1,2 @@ +-- Tags: stateful +SELECT ParsedParams.Key1 FROM test.visits FINAL WHERE VisitID != 0 AND notEmpty(ParsedParams.Key1) ORDER BY VisitID LIMIT 10 diff --git a/parser/testdata/00014_filtering_arrays/ast.json b/parser/testdata/00014_filtering_arrays/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00014_filtering_arrays/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/drop_compiled_expression_cache/metadata.json b/parser/testdata/00014_filtering_arrays/metadata.json similarity index 100% rename from parser/testdata/drop_compiled_expression_cache/metadata.json rename to parser/testdata/00014_filtering_arrays/metadata.json diff --git a/parser/testdata/00014_filtering_arrays/query.sql b/parser/testdata/00014_filtering_arrays/query.sql new file mode 100644 index 000000000..78e8d1a5a --- /dev/null +++ b/parser/testdata/00014_filtering_arrays/query.sql @@ -0,0 +1,2 @@ +-- Tags: stateful +SELECT GeneralInterests FROM test.hits WHERE AdvEngineID != 0 ORDER BY GeneralInterests DESC LIMIT 10 diff --git a/parser/testdata/00014_select_from_table_with_nested/ast.json b/parser/testdata/00014_select_from_table_with_nested/ast.json new file mode 100644 index 000000000..0c58bc365 --- /dev/null +++ b/parser/testdata/00014_select_from_table_with_nested/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery nested_test (children 1)" + }, + { + "explain": " Identifier nested_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00136686, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/drop_constraint/metadata.json b/parser/testdata/00014_select_from_table_with_nested/metadata.json similarity index 100% rename from parser/testdata/drop_constraint/metadata.json rename to parser/testdata/00014_select_from_table_with_nested/metadata.json diff --git a/parser/testdata/00014_select_from_table_with_nested/query.sql b/parser/testdata/00014_select_from_table_with_nested/query.sql new file mode 100644 index 000000000..fa425374e --- /dev/null +++ b/parser/testdata/00014_select_from_table_with_nested/query.sql @@ -0,0 +1,12 @@ +DROP TABLE IF EXISTS nested_test; +CREATE TABLE nested_test (s String, nest Nested(x UInt8, y UInt32)) ENGINE = Memory; +INSERT INTO nested_test VALUES ('Hello', [1,2], [10,20]), ('World', [3,4,5], [30,40,50]), ('Goodbye', [], []); +SELECT * FROM nested_test; +SELECT s, nest.x, nest.y FROM nested_test ARRAY JOIN nest; +SELECT s, nest.x, nest.y FROM nested_test ARRAY JOIN nest.x; +SELECT s, nest.x, nest.y FROM nested_test ARRAY JOIN nest.x, nest.y; +SELECT s, n.x, n.y FROM nested_test ARRAY JOIN nest AS n; +SELECT s, n.x, n.y, nest.x FROM nested_test ARRAY JOIN nest AS n; +SELECT s, n.x, n.y, nest.x, nest.y FROM nested_test ARRAY JOIN nest AS n; +SELECT s, n.x, n.y, nest.x, nest.y, num FROM nested_test ARRAY JOIN nest AS n, arrayEnumerate(nest.x) AS num; +DROP TABLE nested_test; diff --git a/parser/testdata/00015_totals_and_no_aggregate_functions/ast.json b/parser/testdata/00015_totals_and_no_aggregate_functions/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00015_totals_and_no_aggregate_functions/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/drop_dns_cache/metadata.json b/parser/testdata/00015_totals_and_no_aggregate_functions/metadata.json similarity index 100% rename from parser/testdata/drop_dns_cache/metadata.json rename to parser/testdata/00015_totals_and_no_aggregate_functions/metadata.json diff --git a/parser/testdata/00015_totals_and_no_aggregate_functions/query.sql b/parser/testdata/00015_totals_and_no_aggregate_functions/query.sql new file mode 100644 index 000000000..11d0b7c0d --- /dev/null +++ b/parser/testdata/00015_totals_and_no_aggregate_functions/query.sql @@ -0,0 +1,2 @@ +-- Tags: stateful +SELECT AdvEngineID FROM test.hits GROUP BY AdvEngineID WITH TOTALS ORDER BY AdvEngineID diff --git a/parser/testdata/00015_totals_having_constants/ast.json b/parser/testdata/00015_totals_having_constants/ast.json new file mode 100644 index 000000000..e263b8d1c --- /dev/null +++ b/parser/testdata/00015_totals_having_constants/ast.json @@ -0,0 +1,124 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 5)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function divide (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function count (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Literal Float64_0.1" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function greater (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function count (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Literal Float64_0.1" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier number" + } + ], + + "rows": 34, + + "statistics": + { + "elapsed": 0.00179472, + "rows_read": 34, + "bytes_read": 1339 + } +} diff --git a/parser/testdata/drop_index/metadata.json b/parser/testdata/00015_totals_having_constants/metadata.json similarity index 100% rename from parser/testdata/drop_index/metadata.json rename to parser/testdata/00015_totals_having_constants/metadata.json diff --git a/parser/testdata/00015_totals_having_constants/query.sql b/parser/testdata/00015_totals_having_constants/query.sql new file mode 100644 index 000000000..b7fb28729 --- /dev/null +++ b/parser/testdata/00015_totals_having_constants/query.sql @@ -0,0 +1 @@ +SELECT number, count() / 0.1 FROM (SELECT number FROM system.numbers LIMIT 10) GROUP BY number WITH TOTALS HAVING count() > 0.1 ORDER BY number diff --git a/parser/testdata/00016_any_if_distributed_cond_always_false/ast.json b/parser/testdata/00016_any_if_distributed_cond_always_false/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00016_any_if_distributed_cond_always_false/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/drop_mark_cache/metadata.json b/parser/testdata/00016_any_if_distributed_cond_always_false/metadata.json similarity index 100% rename from parser/testdata/drop_mark_cache/metadata.json rename to parser/testdata/00016_any_if_distributed_cond_always_false/metadata.json diff --git a/parser/testdata/00016_any_if_distributed_cond_always_false/query.sql b/parser/testdata/00016_any_if_distributed_cond_always_false/query.sql new file mode 100644 index 000000000..e7949589d --- /dev/null +++ b/parser/testdata/00016_any_if_distributed_cond_always_false/query.sql @@ -0,0 +1,4 @@ +-- Tags: stateful, distributed + + +SELECT anyIf(SearchPhrase, CounterID = -1) FROM remote('127.0.0.{1,2}:9000', test, hits) diff --git a/parser/testdata/00016_totals_having_constants/ast.json b/parser/testdata/00016_totals_having_constants/ast.json new file mode 100644 index 000000000..07596a8e9 --- /dev/null +++ b/parser/testdata/00016_totals_having_constants/ast.json @@ -0,0 +1,73 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier dummy" + }, + { + "explain": " Function divide (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function count (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Literal Float64_0.1" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier dummy" + }, + { + "explain": " Function greater (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function count (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Literal Float64_0.1" + } + ], + + "rows": 17, + + "statistics": + { + "elapsed": 0.00149892, + "rows_read": 17, + "bytes_read": 605 + } +} diff --git a/parser/testdata/drop_table_sync/metadata.json b/parser/testdata/00016_totals_having_constants/metadata.json similarity index 100% rename from parser/testdata/drop_table_sync/metadata.json rename to parser/testdata/00016_totals_having_constants/metadata.json diff --git a/parser/testdata/00016_totals_having_constants/query.sql b/parser/testdata/00016_totals_having_constants/query.sql new file mode 100644 index 000000000..c50659b81 --- /dev/null +++ b/parser/testdata/00016_totals_having_constants/query.sql @@ -0,0 +1 @@ +SELECT dummy, count() / 0.1 GROUP BY dummy WITH TOTALS HAVING count() > 0.1 diff --git a/parser/testdata/00017_aggregation_uninitialized_memory/ast.json b/parser/testdata/00017_aggregation_uninitialized_memory/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00017_aggregation_uninitialized_memory/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/drop_uncompressed_cache/metadata.json b/parser/testdata/00017_aggregation_uninitialized_memory/metadata.json similarity index 100% rename from parser/testdata/drop_uncompressed_cache/metadata.json rename to parser/testdata/00017_aggregation_uninitialized_memory/metadata.json diff --git a/parser/testdata/00017_aggregation_uninitialized_memory/query.sql b/parser/testdata/00017_aggregation_uninitialized_memory/query.sql new file mode 100644 index 000000000..c757440da --- /dev/null +++ b/parser/testdata/00017_aggregation_uninitialized_memory/query.sql @@ -0,0 +1,3 @@ +-- Tags: stateful +SELECT DISTINCT (URLHierarchy(URL)[1]) AS q, 'x' AS w FROM test.hits WHERE CounterID = 14917930 ORDER BY URL + diff --git a/parser/testdata/00017_in_subquery_with_empty_result/ast.json b/parser/testdata/00017_in_subquery_with_empty_result/ast.json new file mode 100644 index 000000000..1e6edc5f9 --- /dev/null +++ b/parser/testdata/00017_in_subquery_with_empty_result/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001128, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/exchange_tables/metadata.json b/parser/testdata/00017_in_subquery_with_empty_result/metadata.json similarity index 100% rename from parser/testdata/exchange_tables/metadata.json rename to parser/testdata/00017_in_subquery_with_empty_result/metadata.json diff --git a/parser/testdata/00017_in_subquery_with_empty_result/query.sql b/parser/testdata/00017_in_subquery_with_empty_result/query.sql new file mode 100644 index 000000000..3f0bb10f6 --- /dev/null +++ b/parser/testdata/00017_in_subquery_with_empty_result/query.sql @@ -0,0 +1,4 @@ +SET output_format_write_statistics = 0; + +SELECT count() FROM (SELECT * FROM system.numbers LIMIT 1000) WHERE 1 IN (SELECT 0 WHERE 0) +FORMAT JSON; diff --git a/parser/testdata/00018_distinct_in_subquery/ast.json b/parser/testdata/00018_distinct_in_subquery/ast.json new file mode 100644 index 000000000..d85df6e17 --- /dev/null +++ b/parser/testdata/00018_distinct_in_subquery/ast.json @@ -0,0 +1,73 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1 (alias x)" + }, + { + "explain": " Function arrayJoin (alias y) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_2]" + } + ], + + "rows": 17, + + "statistics": + { + "elapsed": 0.001301782, + "rows_read": 17, + "bytes_read": 727 + } +} diff --git a/parser/testdata/extract_regex/metadata.json b/parser/testdata/00018_distinct_in_subquery/metadata.json similarity index 100% rename from parser/testdata/extract_regex/metadata.json rename to parser/testdata/00018_distinct_in_subquery/metadata.json diff --git a/parser/testdata/distinct_subquery/query.sql b/parser/testdata/00018_distinct_in_subquery/query.sql similarity index 100% rename from parser/testdata/distinct_subquery/query.sql rename to parser/testdata/00018_distinct_in_subquery/query.sql diff --git a/parser/testdata/00019_shard_quantiles_totals_distributed/ast.json b/parser/testdata/00019_shard_quantiles_totals_distributed/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00019_shard_quantiles_totals_distributed/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/format_function/metadata.json b/parser/testdata/00019_shard_quantiles_totals_distributed/metadata.json similarity index 100% rename from parser/testdata/format_function/metadata.json rename to parser/testdata/00019_shard_quantiles_totals_distributed/metadata.json diff --git a/parser/testdata/00019_shard_quantiles_totals_distributed/query.sql b/parser/testdata/00019_shard_quantiles_totals_distributed/query.sql new file mode 100644 index 000000000..e712b028a --- /dev/null +++ b/parser/testdata/00019_shard_quantiles_totals_distributed/query.sql @@ -0,0 +1,4 @@ +-- Tags: distributed + +SET enable_positional_arguments = 0; +SELECT quantilesTiming(0.1, 0.5, 0.9)(dummy) FROM remote('127.0.0.{2,3}', system, one) GROUP BY 1 WITH TOTALS; diff --git a/parser/testdata/00020_distinct_order_by_distributed/ast.json b/parser/testdata/00020_distinct_order_by_distributed/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00020_distinct_order_by_distributed/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/freeze_partition/metadata.json b/parser/testdata/00020_distinct_order_by_distributed/metadata.json similarity index 100% rename from parser/testdata/freeze_partition/metadata.json rename to parser/testdata/00020_distinct_order_by_distributed/metadata.json diff --git a/parser/testdata/00020_distinct_order_by_distributed/query.sql b/parser/testdata/00020_distinct_order_by_distributed/query.sql new file mode 100644 index 000000000..6aec76f30 --- /dev/null +++ b/parser/testdata/00020_distinct_order_by_distributed/query.sql @@ -0,0 +1,4 @@ +-- Tags: stateful, distributed + +SET max_rows_to_sort = 10000; +SELECT count() FROM (SELECT DISTINCT PredLastVisit AS x FROM remote('127.0.0.{1,2}', test, visits) ORDER BY VisitID); diff --git a/parser/testdata/00020_sorting_arrays/ast.json b/parser/testdata/00020_sorting_arrays/ast.json new file mode 100644 index 000000000..b0c06c387 --- /dev/null +++ b/parser/testdata/00020_sorting_arrays/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayJoin (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[Array_[UInt64_3, UInt64_4, UInt64_5], Array_[UInt64_6, UInt64_7], Array_[UInt64_2], Array_[UInt64_1, UInt64_1]]" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier x" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001441168, + "rows_read": 10, + "bytes_read": 484 + } +} diff --git a/parser/testdata/freeze_table/metadata.json b/parser/testdata/00020_sorting_arrays/metadata.json similarity index 100% rename from parser/testdata/freeze_table/metadata.json rename to parser/testdata/00020_sorting_arrays/metadata.json diff --git a/parser/testdata/array_join_nested/query.sql b/parser/testdata/00020_sorting_arrays/query.sql similarity index 100% rename from parser/testdata/array_join_nested/query.sql rename to parser/testdata/00020_sorting_arrays/query.sql diff --git a/parser/testdata/00021_1_select_with_in/ast.json b/parser/testdata/00021_1_select_with_in/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00021_1_select_with_in/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/global_in/metadata.json b/parser/testdata/00021_1_select_with_in/metadata.json similarity index 100% rename from parser/testdata/global_in/metadata.json rename to parser/testdata/00021_1_select_with_in/metadata.json diff --git a/parser/testdata/00021_1_select_with_in/query.sql b/parser/testdata/00021_1_select_with_in/query.sql new file mode 100644 index 000000000..a45f37d79 --- /dev/null +++ b/parser/testdata/00021_1_select_with_in/query.sql @@ -0,0 +1,2 @@ +-- Tags: stateful +select sum(Sign) from test.visits where CounterID in (942285); diff --git a/parser/testdata/00021_2_select_with_in/ast.json b/parser/testdata/00021_2_select_with_in/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00021_2_select_with_in/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/global_not_in/metadata.json b/parser/testdata/00021_2_select_with_in/metadata.json similarity index 100% rename from parser/testdata/global_not_in/metadata.json rename to parser/testdata/00021_2_select_with_in/metadata.json diff --git a/parser/testdata/00021_2_select_with_in/query.sql b/parser/testdata/00021_2_select_with_in/query.sql new file mode 100644 index 000000000..60e307461 --- /dev/null +++ b/parser/testdata/00021_2_select_with_in/query.sql @@ -0,0 +1,2 @@ +-- Tags: stateful +select sum(Sign) from test.visits where CounterID in (942285, 577322); diff --git a/parser/testdata/00021_3_select_with_in/ast.json b/parser/testdata/00021_3_select_with_in/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00021_3_select_with_in/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/if_function/metadata.json b/parser/testdata/00021_3_select_with_in/metadata.json similarity index 100% rename from parser/testdata/if_function/metadata.json rename to parser/testdata/00021_3_select_with_in/metadata.json diff --git a/parser/testdata/00021_3_select_with_in/query.sql b/parser/testdata/00021_3_select_with_in/query.sql new file mode 100644 index 000000000..1a6db3a06 --- /dev/null +++ b/parser/testdata/00021_3_select_with_in/query.sql @@ -0,0 +1,4 @@ +-- Tags: stateful +select 1 IN (1, 2, 3); + +SELECT count() FROM remote('localhost', test, hits) WHERE CounterID IN (598875); diff --git a/parser/testdata/00021_sorting_arrays/ast.json b/parser/testdata/00021_sorting_arrays/ast.json new file mode 100644 index 000000000..e75c01408 --- /dev/null +++ b/parser/testdata/00021_sorting_arrays/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayJoin (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[Array_[UInt64_3, UInt64_4, UInt64_5], Array_[UInt64_6, UInt64_7], Array_[UInt64_2], Array_[UInt64_1, UInt64_1]]" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier x" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001447617, + "rows_read": 10, + "bytes_read": 484 + } +} diff --git a/parser/testdata/into_outfile/metadata.json b/parser/testdata/00021_sorting_arrays/metadata.json similarity index 100% rename from parser/testdata/into_outfile/metadata.json rename to parser/testdata/00021_sorting_arrays/metadata.json diff --git a/parser/testdata/00021_sorting_arrays/query.sql b/parser/testdata/00021_sorting_arrays/query.sql new file mode 100644 index 000000000..e2034d94f --- /dev/null +++ b/parser/testdata/00021_sorting_arrays/query.sql @@ -0,0 +1 @@ +SELECT arrayJoin([[3,4,5], [6,7], [2], [1,1]]) AS x ORDER BY x DESC diff --git a/parser/testdata/00022_func_higher_order_and_constants/ast.json b/parser/testdata/00022_func_higher_order_and_constants/ast.json new file mode 100644 index 000000000..738505635 --- /dev/null +++ b/parser/testdata/00022_func_higher_order_and_constants/ast.json @@ -0,0 +1,79 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayExists (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function lambda (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function greater (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function position (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal 'a'" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal Array_['a']" + } + ], + + "rows": 19, + + "statistics": + { + "elapsed": 0.001343031, + "rows_read": 19, + "bytes_read": 748 + } +} diff --git a/parser/testdata/into_outfile_format/metadata.json b/parser/testdata/00022_func_higher_order_and_constants/metadata.json similarity index 100% rename from parser/testdata/into_outfile_format/metadata.json rename to parser/testdata/00022_func_higher_order_and_constants/metadata.json diff --git a/parser/testdata/00022_func_higher_order_and_constants/query.sql b/parser/testdata/00022_func_higher_order_and_constants/query.sql new file mode 100644 index 000000000..c2831e527 --- /dev/null +++ b/parser/testdata/00022_func_higher_order_and_constants/query.sql @@ -0,0 +1 @@ +select arrayExists(x -> position(x, 'a') > 0, ['a']) diff --git a/parser/testdata/00022_merge_prewhere/ast.json b/parser/testdata/00022_merge_prewhere/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00022_merge_prewhere/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/left_array_join/metadata.json b/parser/testdata/00022_merge_prewhere/metadata.json similarity index 100% rename from parser/testdata/left_array_join/metadata.json rename to parser/testdata/00022_merge_prewhere/metadata.json diff --git a/parser/testdata/00022_merge_prewhere/query.sql b/parser/testdata/00022_merge_prewhere/query.sql new file mode 100644 index 000000000..ecb13643f --- /dev/null +++ b/parser/testdata/00022_merge_prewhere/query.sql @@ -0,0 +1,6 @@ +-- Tags: stateful +DROP TABLE IF EXISTS merge_hits; +CREATE TABLE IF NOT EXISTS merge_hits AS test.hits ENGINE = Merge(test, '^hits$'); +SELECT count() FROM merge_hits WHERE AdvEngineID = 2; +SELECT count() FROM merge_hits PREWHERE AdvEngineID = 2; +DROP TABLE merge_hits; diff --git a/parser/testdata/00023_agg_select_agg_subquery/ast.json b/parser/testdata/00023_agg_select_agg_subquery/ast.json new file mode 100644 index 000000000..d415d6c74 --- /dev/null +++ b/parser/testdata/00023_agg_select_agg_subquery/ast.json @@ -0,0 +1,94 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function count (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function sum (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function sum (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2" + } + ], + + "rows": 24, + + "statistics": + { + "elapsed": 0.001391716, + "rows_read": 24, + "bytes_read": 1028 + } +} diff --git a/parser/testdata/materialize_index/metadata.json b/parser/testdata/00023_agg_select_agg_subquery/metadata.json similarity index 100% rename from parser/testdata/materialize_index/metadata.json rename to parser/testdata/00023_agg_select_agg_subquery/metadata.json diff --git a/parser/testdata/00023_agg_select_agg_subquery/query.sql b/parser/testdata/00023_agg_select_agg_subquery/query.sql new file mode 100644 index 000000000..bb63f3b9d --- /dev/null +++ b/parser/testdata/00023_agg_select_agg_subquery/query.sql @@ -0,0 +1 @@ +SELECT count() FROM (SELECT sum(materialize(1)), sum(materialize(2))) diff --git a/parser/testdata/00023_totals_limit/ast.json b/parser/testdata/00023_totals_limit/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00023_totals_limit/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/modify_ttl/metadata.json b/parser/testdata/00023_totals_limit/metadata.json similarity index 100% rename from parser/testdata/modify_ttl/metadata.json rename to parser/testdata/00023_totals_limit/metadata.json diff --git a/parser/testdata/00023_totals_limit/query.sql b/parser/testdata/00023_totals_limit/query.sql new file mode 100644 index 000000000..2002f56ea --- /dev/null +++ b/parser/testdata/00023_totals_limit/query.sql @@ -0,0 +1,3 @@ +-- Tags: stateful +SET output_format_write_statistics = 0; +SELECT goals_alias.ID AS `ym:s:goalDimension`, uniqIf(UserID, (UserID != 0) AND (`_uniq_Goals` = 1)) FROM test.visits ARRAY JOIN Goals AS goals_alias, arrayEnumerateUniq(Goals.ID) AS `_uniq_Goals` WHERE (CounterID = 842440) GROUP BY `ym:s:goalDimension` WITH TOTALS ORDER BY `ym:s:goalDimension` LIMIT 0, 1 FORMAT JSONCompact; diff --git a/parser/testdata/00024_random_counters/ast.json b/parser/testdata/00024_random_counters/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00024_random_counters/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/named_tuple_access/metadata.json b/parser/testdata/00024_random_counters/metadata.json similarity index 100% rename from parser/testdata/named_tuple_access/metadata.json rename to parser/testdata/00024_random_counters/metadata.json diff --git a/parser/testdata/00024_random_counters/query.sql b/parser/testdata/00024_random_counters/query.sql new file mode 100644 index 000000000..8d7903a5c --- /dev/null +++ b/parser/testdata/00024_random_counters/query.sql @@ -0,0 +1,1011 @@ +-- Tags: stateful, no-parallel +-- no-parallel: Heavy +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 32152608; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 9627212; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 25152951; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 22202319; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 13848191; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 27855803; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 27944638; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 16513894; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 4314057; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 11878090; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 23005927; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 17205778; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 21296650; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 12068702; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 8446208; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 8439835; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 30344780; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 2881921; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 1828473; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 27620040; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 14960013; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 103918; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 9626742; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 18370244; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 813903; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 22176733; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 17175454; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 31608140; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 11802602; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 12577104; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 153437; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 32240558; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 27444870; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 79306; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 15222279; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 11782937; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 1677; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 9527330; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 23580782; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 33027895; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 199609; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 29139484; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 1700065; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 30212873; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 6773723; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 21842879; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 9460479; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 16451704; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 51267; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 30489182; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 11947625; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 18776987; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 25762358; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 74905; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 877422; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 3465045; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 2084559; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 13828281; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 30299683; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 132115; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 10919775; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 12329250; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 11525543; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 32395537; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 24537202; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 2270964; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 8518291; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 11897183; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 23805647; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 22652078; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 19363661; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 32339088; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 11394550; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 1988179; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 2135273; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 14500371; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 10463153; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 18838936; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 24492652; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 26848923; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 12495799; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 12028938; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 8934725; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 18602951; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 32404741; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 19171705; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 9831187; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 20047182; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 26690858; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 126413; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 31244775; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 15690176; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 28374997; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 12717244; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 9152092; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 5397339; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 12452068; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 13626118; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 46783; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 11484344; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 21453219; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 7692388; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 30879805; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 27784549; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 665663; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 30535786; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 11685143; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 13652647; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 9880318; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 30148588; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 32745436; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 27390924; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 17470663; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 196859; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 22123478; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 87021; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 25264218; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 24125574; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 26099981; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 1141558; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 220829; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 15651875; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 182483; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 28430678; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 31384642; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 1008241; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 10462834; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 26829659; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 29130002; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 17891770; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 26531140; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 15014338; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 15375411; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 7952204; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 41859; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 21651593; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 9527676; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 107394; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 23409492; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 31407407; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 29312961; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 9705505; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 29848510; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 10187274; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 112606; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 15639744; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 4375349; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 1423039; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 13933371; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 20430236; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 30679961; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 37094; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 23197674; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 994587; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 437496; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 3904733; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 19200606; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 84668; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 28581029; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 11074306; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 2470089; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 12251899; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 16996077; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 12426411; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 1034934; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 4721601; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 22026000; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 21031300; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 559124; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 15492463; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 21419604; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 25632271; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 14446476; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 12684903; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 23292922; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 26976782; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 20269131; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 18309978; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 5305320; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 30926629; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 14816057; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 19523905; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 18775058; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 32507411; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 25535479; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 24858652; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 32420158; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 4805894; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 8157258; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 5759745; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 12626987; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 5342591; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 10951832; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 9729032; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 27999107; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 7302193; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 30447727; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 15764416; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 15727130; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 15116605; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 527313; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 16687935; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 28304381; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 17699739; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 17339596; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 29348067; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 20861945; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 12922065; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 27019489; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 18299445; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 108465; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 233447; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 13042904; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 31481509; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 2267268; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 26140306; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 19094364; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 25000943; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 6860549; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 30714288; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 16289139; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 1419182; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 33436573; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 30062358; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 18167743; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 27846382; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 30148240; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 32332238; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 25129158; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 14066924; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 19832770; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 29018190; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 852275; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 11328399; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 28179212; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 20155907; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 30685297; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 32783957; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 1552720; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 28110991; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 4814424; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 20171153; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 14920591; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 65690; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 14357916; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 26533001; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 17014738; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 11977336; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 30142464; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 14082365; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 18851419; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 27638649; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 8798932; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 717825; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 6912378; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 26898048; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 5992218; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 13422462; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 21204372; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 17845298; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 6933004; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 21627605; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 3395439; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 22315068; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 24973444; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 27751340; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 6022884; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 32417601; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 18087198; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 21940806; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 23809389; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 9510424; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 30651933; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 17818815; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 9038457; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 9153497; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 29938964; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 10471118; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 12913162; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 14933629; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 7173707; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 28680585; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 1279785; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 33276693; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 573557; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 27753414; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 22968595; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 25211823; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 32687774; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 6062762; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 18866703; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 5164840; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 6462629; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 25039797; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 10789598; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 33076990; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 28960547; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 32723171; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 17888313; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 29810654; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 21760643; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 16678170; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 368520; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 12506284; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 9802670; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 18488016; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 227003; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 15254606; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 32580177; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 30313645; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 20879524; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 27222776; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 11266528; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 17018146; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 19902143; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 19469853; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 22823497; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 56768; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 136798; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 16554922; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 20627728; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 6551053; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 124145; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 10881152; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 17271030; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 28213281; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 15665842; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 28264219; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 29277533; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 22926441; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 31057728; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 8027311; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 14229492; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 14782220; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 29099258; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 99953; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 9334015; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 16156945; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 124031; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 1670442; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 21036594; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 22954047; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 16054043; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 121765; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 1482385; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 25977258; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 24596247; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 550092; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 1579438; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 1205; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 126296; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 177248; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 27523607; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 15873699; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 11971473; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 18965085; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 19035683; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 29640643; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 11929806; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 9352219; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 18492653; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 7967264; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 11391453; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 4289; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 3567; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 13575826; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 2566437; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 21042675; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 26498330; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 23764459; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 32664413; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 10116935; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 24572551; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 26788657; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 12830859; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 530033; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 6764575; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 25219472; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 10721285; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 26254035; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 15486693; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 10323514; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 23578364; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 25449880; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 13428298; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 17679279; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 27610140; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 15346859; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 535736; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 513828; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 20411888; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 13595045; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 33221835; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 97601; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 12819274; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 18047205; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 19900235; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 27830172; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 20839743; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 29980468; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 27417156; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 17908689; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 24471592; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 32147490; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 22966030; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 21060870; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 238185; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 10152551; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 11255139; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 982334; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 15199978; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 15678357; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 18206303; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 10902608; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 22494906; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 22204221; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 13097211; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 30998656; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 26656294; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 922545; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 9428510; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 15137339; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 15578624; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 31695129; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 15791360; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 29571338; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 5371768; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 15163979; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 16312681; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 6126176; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 16061128; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 8528634; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 136544; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 3093873; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 3994698; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 8302978; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 16115563; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 21804036; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 9785708; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 10847072; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 30692218; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 15582824; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 19802155; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 20835290; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 204284; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 25636491; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 30446517; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 16761451; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 456303; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 17301839; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 27472581; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 24078399; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 26345482; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 451381; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 8576994; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 19418898; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 10068353; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 3767138; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 758020; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 13521375; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 25968099; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 26805240; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 13051011; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 901894; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 33097016; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 12545080; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 29944288; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 8250825; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 12499373; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 22535728; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 11929724; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 3615273; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 24172869; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 116132; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 12002817; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 23681158; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 3938; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 8468701; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 1295067; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 27469232; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 32708119; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 122578; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 12139400; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 12219626; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 9262336; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 9269892; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 122701; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 19589931; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 29539889; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 31115640; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 6283044; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 30642040; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 18065262; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 26714391; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 15351586; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 13090710; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 16201652; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 31960256; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 2658509; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 467277; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 1274110; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 23640128; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 16197014; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 28228612; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 11659509; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 24981440; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 52285; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 30583892; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 31467341; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 25512316; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 2908472; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 422752; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 32718035; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 14213540; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 14951444; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 6819113; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 9532880; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 4102488; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 19537427; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 7078160; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 29521616; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 5045377; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 23131467; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 22383622; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 22079706; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 29466380; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 12045654; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 30178011; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 20821588; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 21966434; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 29390311; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 19370159; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 24857158; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 31982180; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 11990254; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 3841725; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 13993951; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 31252290; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 26398773; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 891512; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 27087947; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 2097095; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 26252354; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 13928858; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 4331960; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 30552074; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 27905732; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 30049284; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 2118697; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 20849218; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 11338538; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 3348692; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 17693905; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 23502543; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 8905975; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 18343399; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 15235863; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 20356153; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 10552704; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 28875831; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 1488561; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 15012941; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 25726446; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 2601050; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 27426912; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 11269650; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 14880200; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 362337; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 23533327; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 26381021; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 17522450; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 31868526; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 18276314; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 1841289; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 22234319; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 11463222; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 15251006; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 24841412; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 28755796; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 9087442; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 13734462; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 9285105; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 13289061; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 29890926; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 30509694; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 17698850; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 46229; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 16541087; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 11305551; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 429238; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 7583796; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 8604476; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 29759280; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 1388922; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 10884907; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 18220244; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 122157; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 18069840; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 6707469; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 26818794; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 14770800; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 16652737; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 25497243; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 14747538; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 21371935; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 1681601; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 5343898; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 22040058; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 752596; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 9377867; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 1848946; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 1449313; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 31332002; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 10829982; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 22431161; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 29172033; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 7631750; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 898844; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 21460344; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 25387068; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 30980374; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 13021547; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 27715925; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 30292547; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 18666245; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 18954194; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 29070192; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 914290; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 14807517; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 23062682; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 5132969; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 15094854; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 622095; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 1244323; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 14804701; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 11656845; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 17167258; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 8959523; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 23121135; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 4339624; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 22679035; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 13127067; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 18362622; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 4189114; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 18776826; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 26792263; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 13409810; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 22183039; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 16132723; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 3925258; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 14248840; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 18135589; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 11234961; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 11179577; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 178965; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 10138078; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 21048048; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 8001235; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 32833016; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 32275374; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 1430786; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 12969140; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 25529912; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 18395861; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 27380554; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 16653574; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 16372034; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 28050494; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 6886254; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 7472729; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 12646802; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 6589761; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 19556032; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 10261903; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 4389; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 2415202; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 20007939; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 17957094; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 9920354; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 24840314; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 5077718; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 11650674; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 19766470; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 7854638; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 9169290; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 22873394; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 30838169; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 79894; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 25792494; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 25326672; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 33123311; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 33237554; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 15130284; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 18811870; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 25418177; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 17202302; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 31836505; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 28671820; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 25643858; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 16338596; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 27288074; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 9458517; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 25163573; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 15680967; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 20413991; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 19332304; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 23159444; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 24708786; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 250297; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 29944728; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 14582542; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 512441; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 31273184; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 30255145; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 89813; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 14959234; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 26621829; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 279206; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 13041403; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 33392742; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 10895948; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 20804625; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 10129067; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 13855355; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 31007051; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 4109301; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 29492024; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 28963180; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 11530154; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 31889101; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 1713672; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 16069992; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 9075873; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 14512529; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 8632591; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 33056094; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 28349520; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 26806792; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 11496875; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 11797321; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 25795940; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 33196708; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 13243216; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 25096876; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 26974949; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 27061789; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 29686454; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 5045092; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 2893170; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 21528033; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 16980819; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 30854698; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 1041468; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 215125; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 91347; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 22706469; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 33038294; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 1446406; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 183702; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 10246325; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 13754526; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 6854006; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 26686232; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 29345198; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 15956574; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 8558022; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 14066782; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 31710428; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 6750831; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 14832055; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 29613113; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 15159107; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 6309003; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 4311581; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 28180829; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 15131841; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 20458889; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 26250664; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 31737265; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 802571; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 25064649; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 21183784; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 3218637; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 3375471; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 1690000; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 18602620; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 29918973; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 8555235; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 32152623; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 19670163; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 25856874; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 6142197; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 27822106; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 8944163; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 7596672; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 129436; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 33541084; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 5199217; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 10337246; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 12718765; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 10729131; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 28049397; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 1410155; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 24924437; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 16706889; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 54647; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 29407271; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 1575071; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 6861225; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 30114382; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 129970; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 21103497; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 3433579; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 14174715; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 8450741; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 30033987; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 11474175; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 9601520; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 7377941; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 15646334; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 18305797; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 2057218; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 17121933; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 6870927; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 19743903; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 9019159; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 21251610; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 239704; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 16170940; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 31857931; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 25174672; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 31546315; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 811438; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 33135020; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 28325470; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 1196502; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 117339; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 19198214; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 28046111; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 27663162; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 3651; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 8443242; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 6773651; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 28957858; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 15586212; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 155469; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 731800; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 13198917; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 2080118; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 17987407; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 1832110; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 32960999; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 13858070; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 2800568; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 381151; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 26724412; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 238149; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 20458616; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 16847984; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 14870120; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 4729620; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 12886810; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 109350; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 17512881; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 5250020; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 184094; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 3071553; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 18940958; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 16166873; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 13648378; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 32750584; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 31167464; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 21597707; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 21992900; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 16695153; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 12272303; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 18958518; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 11827733; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 12495926; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 21022681; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 32262727; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 12082756; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 15636497; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 20081370; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 26349655; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 32832383; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 18190567; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 61749; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 28596915; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 28835938; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 32924951; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 15835912; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 22905942; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 12295903; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 12461093; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 27568271; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 33525856; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 10351138; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 16804486; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 24506501; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 1336365; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 19178381; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 17921720; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 25396786; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 22031463; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 19624501; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 28665905; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 14851585; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 27554706; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 14188052; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 33301471; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 32896955; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 1134828; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 27050219; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 23641604; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 22935857; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 29805516; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 25890338; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 20710225; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 3925036; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 31404180; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 25888177; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 11074293; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 30922753; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 11403908; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 25615656; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 17652214; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 16155802; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 5565120; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 5508217; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 33281735; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 11619273; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 67148; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 22687534; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 17887682; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 18506413; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 1443226; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 13761576; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 30941622; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 17681363; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 187532; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 95405; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 31073741; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 9706801; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 12504322; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 31779591; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 18781661; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 18284607; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 10633383; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 3249127; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 17567300; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 8789986; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 30073024; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 26477401; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 32222832; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 23098807; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 50708; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 25067039; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 29132588; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 22947337; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 27778601; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 25325678; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 12822401; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 8876685; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 31096269; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 17466070; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 26058342; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 1468384; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 22665021; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 24895973; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 15423066; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 26091197; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 12103346; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 15917190; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 31527060; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 3944; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 24572480; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 229185; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 17038391; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 27368675; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 26899897; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 13257515; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 19531252; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 21048946; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 33104049; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 20824535; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 15014380; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 25235392; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 29560548; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 2599836; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 32842358; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 5795232; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 29588193; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 19019850; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 29580949; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 15335748; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 15094099; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 6308405; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 20762370; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 14121177; + +SYSTEM DROP UNCOMPRESSED CACHE; + +SET local_filesystem_read_method = 'pread_threadpool'; +SET min_bytes_to_use_direct_io = 1; +SET use_uncompressed_cache = 1; + +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 32745436; +SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 33436573; diff --git a/parser/testdata/00024_unused_array_join_in_subquery/ast.json b/parser/testdata/00024_unused_array_join_in_subquery/ast.json new file mode 100644 index 000000000..d68386a53 --- /dev/null +++ b/parser/testdata/00024_unused_array_join_in_subquery/ast.json @@ -0,0 +1,76 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function count (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function arrayJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_2, UInt64_3]" + } + ], + + "rows": 18, + + "statistics": + { + "elapsed": 0.001299614, + "rows_read": 18, + "bytes_read": 759 + } +} diff --git a/parser/testdata/named_window/metadata.json b/parser/testdata/00024_unused_array_join_in_subquery/metadata.json similarity index 100% rename from parser/testdata/named_window/metadata.json rename to parser/testdata/00024_unused_array_join_in_subquery/metadata.json diff --git a/parser/testdata/00024_unused_array_join_in_subquery/query.sql b/parser/testdata/00024_unused_array_join_in_subquery/query.sql new file mode 100644 index 000000000..05ed9bdd3 --- /dev/null +++ b/parser/testdata/00024_unused_array_join_in_subquery/query.sql @@ -0,0 +1 @@ +SELECT count() FROM (SELECT 1, arrayJoin([1,2,3])) diff --git a/parser/testdata/00025_implicitly_used_subquery_column/ast.json b/parser/testdata/00025_implicitly_used_subquery_column/ast.json new file mode 100644 index 000000000..0d04dbcc7 --- /dev/null +++ b/parser/testdata/00025_implicitly_used_subquery_column/ast.json @@ -0,0 +1,73 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier y" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function materialize (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Identifier x (alias y)" + } + ], + + "rows": 17, + + "statistics": + { + "elapsed": 0.00160112, + "rows_read": 17, + "bytes_read": 707 + } +} diff --git a/parser/testdata/null_safe_equal/metadata.json b/parser/testdata/00025_implicitly_used_subquery_column/metadata.json similarity index 100% rename from parser/testdata/null_safe_equal/metadata.json rename to parser/testdata/00025_implicitly_used_subquery_column/metadata.json diff --git a/parser/testdata/00025_implicitly_used_subquery_column/query.sql b/parser/testdata/00025_implicitly_used_subquery_column/query.sql new file mode 100644 index 000000000..1954590ec --- /dev/null +++ b/parser/testdata/00025_implicitly_used_subquery_column/query.sql @@ -0,0 +1 @@ +SELECT y FROM (SELECT materialize(1) AS x, x AS y) diff --git a/parser/testdata/00026_shard_something_distributed/ast.json b/parser/testdata/00026_shard_something_distributed/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00026_shard_something_distributed/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/order_by_with_fill/metadata.json b/parser/testdata/00026_shard_something_distributed/metadata.json similarity index 100% rename from parser/testdata/order_by_with_fill/metadata.json rename to parser/testdata/00026_shard_something_distributed/metadata.json diff --git a/parser/testdata/00026_shard_something_distributed/query.sql b/parser/testdata/00026_shard_something_distributed/query.sql new file mode 100644 index 000000000..23598edb5 --- /dev/null +++ b/parser/testdata/00026_shard_something_distributed/query.sql @@ -0,0 +1,3 @@ +-- Tags: distributed + +SELECT NOT dummy FROM remote('127.0.0.{2,3}', system, one) WHERE NOT dummy diff --git a/parser/testdata/00027_argMinMax/ast.json b/parser/testdata/00027_argMinMax/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00027_argMinMax/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/order_by_with_fill_from_to/metadata.json b/parser/testdata/00027_argMinMax/metadata.json similarity index 100% rename from parser/testdata/order_by_with_fill_from_to/metadata.json rename to parser/testdata/00027_argMinMax/metadata.json diff --git a/parser/testdata/00027_argMinMax/query.sql b/parser/testdata/00027_argMinMax/query.sql new file mode 100644 index 000000000..57f815add --- /dev/null +++ b/parser/testdata/00027_argMinMax/query.sql @@ -0,0 +1,16 @@ +-- types +select argMin(x.1, x.2), argMax(x.1, x.2) from (select (number, number + 1) as x from numbers(10)); +select argMin(x.1, x.2), argMax(x.1, x.2) from (select (toString(number), toInt32(number) + 1) as x from numbers(10)); +select argMin(x.1, x.2), argMax(x.1, x.2) from (select (toDate(number, 'UTC'), toDateTime(number, 'UTC') + 1) as x from numbers(10)); +select argMin(x.1, x.2), argMax(x.1, x.2) from (select (toDecimal32(number, 2), toDecimal64(number, 2) + 1) as x from numbers(10)); + +-- array +SELECT + argMinArray(id, num), + argMaxArray(id, num) +FROM +( + SELECT + arrayJoin([[10, 4, 3], [7, 5, 6], [8, 8, 2]]) AS num, + arrayJoin([[1, 2, 4]]) AS id +); diff --git a/parser/testdata/00027_distinct_and_order_by/ast.json b/parser/testdata/00027_distinct_and_order_by/ast.json new file mode 100644 index 000000000..37731dc0f --- /dev/null +++ b/parser/testdata/00027_distinct_and_order_by/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001337932, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/order_by_with_fill_step/metadata.json b/parser/testdata/00027_distinct_and_order_by/metadata.json similarity index 100% rename from parser/testdata/order_by_with_fill_step/metadata.json rename to parser/testdata/00027_distinct_and_order_by/metadata.json diff --git a/parser/testdata/00027_distinct_and_order_by/query.sql b/parser/testdata/00027_distinct_and_order_by/query.sql new file mode 100644 index 000000000..e794605fe --- /dev/null +++ b/parser/testdata/00027_distinct_and_order_by/query.sql @@ -0,0 +1,2 @@ +SET max_rows_to_sort = 100; +SELECT DISTINCT x FROM (SELECT number % 10 AS x FROM system.numbers LIMIT 100000) ORDER BY x; diff --git a/parser/testdata/00027_simple_argMinArray/ast.json b/parser/testdata/00027_simple_argMinArray/ast.json new file mode 100644 index 000000000..0195d96dc --- /dev/null +++ b/parser/testdata/00027_simple_argMinArray/ast.json @@ -0,0 +1,100 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function argMinArray (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier id" + }, + { + "explain": " Identifier num" + }, + { + "explain": " Function argMaxArray (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier id" + }, + { + "explain": " Identifier num" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function arrayJoin (alias num) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[Array_[UInt64_10, UInt64_4, UInt64_3], Array_[UInt64_7, UInt64_5, UInt64_6], Array_[UInt64_8, UInt64_8, UInt64_2]]" + }, + { + "explain": " Function arrayJoin (alias id) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[Array_[UInt64_1, UInt64_2, UInt64_4]]" + } + ], + + "rows": 26, + + "statistics": + { + "elapsed": 0.001531981, + "rows_read": 26, + "bytes_read": 1216 + } +} diff --git a/parser/testdata/prewhere/metadata.json b/parser/testdata/00027_simple_argMinArray/metadata.json similarity index 100% rename from parser/testdata/prewhere/metadata.json rename to parser/testdata/00027_simple_argMinArray/metadata.json diff --git a/parser/testdata/00027_simple_argMinArray/query.sql b/parser/testdata/00027_simple_argMinArray/query.sql new file mode 100644 index 000000000..bdee2b058 --- /dev/null +++ b/parser/testdata/00027_simple_argMinArray/query.sql @@ -0,0 +1 @@ +SELECT argMinArray(id, num), argMaxArray(id, num) FROM (SELECT arrayJoin([[10, 4, 3], [7, 5, 6], [8, 8, 2]]) AS num, arrayJoin([[1, 2, 4]]) AS id) diff --git a/parser/testdata/00028_shard_big_agg_aj_distributed/ast.json b/parser/testdata/00028_shard_big_agg_aj_distributed/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00028_shard_big_agg_aj_distributed/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/prewhere_and_where/metadata.json b/parser/testdata/00028_shard_big_agg_aj_distributed/metadata.json similarity index 100% rename from parser/testdata/prewhere_and_where/metadata.json rename to parser/testdata/00028_shard_big_agg_aj_distributed/metadata.json diff --git a/parser/testdata/00028_shard_big_agg_aj_distributed/query.sql b/parser/testdata/00028_shard_big_agg_aj_distributed/query.sql new file mode 100644 index 000000000..f16679d6d --- /dev/null +++ b/parser/testdata/00028_shard_big_agg_aj_distributed/query.sql @@ -0,0 +1,9 @@ +-- Tags: distributed + +DROP TABLE IF EXISTS big_array; +CREATE TABLE big_array (x Array(UInt8)) ENGINE=TinyLog; +SET min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0; +INSERT INTO big_array SELECT groupArray(number % 255) AS x FROM (SELECT * FROM system.numbers LIMIT 1000000); +SELECT sum(y) AS s FROM remote('127.0.0.{2,3}', currentDatabase(), big_array) ARRAY JOIN x AS y; +SELECT sum(s) FROM (SELECT y AS s FROM remote('127.0.0.{2,3}', currentDatabase(), big_array) ARRAY JOIN x AS y); +DROP TABLE big_array; diff --git a/parser/testdata/00030_alter_table/ast.json b/parser/testdata/00030_alter_table/ast.json new file mode 100644 index 000000000..56382fa53 --- /dev/null +++ b/parser/testdata/00030_alter_table/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery alter_test (children 1)" + }, + { + "explain": " Identifier alter_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00145738, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/quantile_parametric/metadata.json b/parser/testdata/00030_alter_table/metadata.json similarity index 100% rename from parser/testdata/quantile_parametric/metadata.json rename to parser/testdata/00030_alter_table/metadata.json diff --git a/parser/testdata/00030_alter_table/query.sql b/parser/testdata/00030_alter_table/query.sql new file mode 100644 index 000000000..fb9b3de40 --- /dev/null +++ b/parser/testdata/00030_alter_table/query.sql @@ -0,0 +1,38 @@ +DROP TABLE IF EXISTS alter_test; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE alter_test (CounterID UInt32, StartDate Date, UserID UInt32, VisitID UInt32, NestedColumn Nested(A UInt8, S String), ToDrop UInt32) ENGINE = MergeTree(StartDate, intHash32(UserID), (CounterID, StartDate, intHash32(UserID), VisitID), 8192); + +INSERT INTO alter_test VALUES (1, '2014-01-01', 2, 3, [1,2,3], ['a','b','c'], 4); + +ALTER TABLE alter_test ADD COLUMN Added0 UInt32; +ALTER TABLE alter_test ADD COLUMN Added2 UInt32; +ALTER TABLE alter_test ADD COLUMN Added1 UInt32 AFTER Added0; + +ALTER TABLE alter_test ADD COLUMN AddedNested1 Nested(A UInt32, B UInt64) AFTER Added2; +ALTER TABLE alter_test ADD COLUMN AddedNested1.C Array(String) AFTER AddedNested1.B; +ALTER TABLE alter_test ADD COLUMN AddedNested2 Nested(A UInt32, B UInt64) AFTER AddedNested1; + +DESC TABLE alter_test; + +ALTER TABLE alter_test DROP COLUMN ToDrop; + +ALTER TABLE alter_test MODIFY COLUMN Added0 String; + +ALTER TABLE alter_test DROP COLUMN NestedColumn.A; +ALTER TABLE alter_test DROP COLUMN NestedColumn.S; + +ALTER TABLE alter_test DROP COLUMN AddedNested1.B; + +ALTER TABLE alter_test ADD COLUMN IF NOT EXISTS Added0 UInt32; +ALTER TABLE alter_test ADD COLUMN IF NOT EXISTS AddedNested1 Nested(A UInt32, B UInt64); +ALTER TABLE alter_test ADD COLUMN IF NOT EXISTS AddedNested1.C Array(String); +ALTER TABLE alter_test MODIFY COLUMN IF EXISTS ToDrop UInt64; +ALTER TABLE alter_test DROP COLUMN IF EXISTS ToDrop; +ALTER TABLE alter_test COMMENT COLUMN IF EXISTS ToDrop 'new comment'; + +DESC TABLE alter_test; + +SELECT * FROM alter_test; + +DROP TABLE alter_test; diff --git a/parser/testdata/00030_array_enumerate_uniq/ast.json b/parser/testdata/00030_array_enumerate_uniq/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00030_array_enumerate_uniq/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/quantiles_parametric/metadata.json b/parser/testdata/00030_array_enumerate_uniq/metadata.json similarity index 100% rename from parser/testdata/quantiles_parametric/metadata.json rename to parser/testdata/00030_array_enumerate_uniq/metadata.json diff --git a/parser/testdata/00030_array_enumerate_uniq/query.sql b/parser/testdata/00030_array_enumerate_uniq/query.sql new file mode 100644 index 000000000..22f6f7e07 --- /dev/null +++ b/parser/testdata/00030_array_enumerate_uniq/query.sql @@ -0,0 +1,2 @@ +-- Tags: stateful +SELECT max(arrayJoin(arrayEnumerateUniq(arrayMap(x -> intDiv(x, 10), URLCategories)))) FROM test.hits diff --git a/parser/testdata/00031_array_enumerate_uniq/ast.json b/parser/testdata/00031_array_enumerate_uniq/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00031_array_enumerate_uniq/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/reload_config/metadata.json b/parser/testdata/00031_array_enumerate_uniq/metadata.json similarity index 100% rename from parser/testdata/reload_config/metadata.json rename to parser/testdata/00031_array_enumerate_uniq/metadata.json diff --git a/parser/testdata/00031_array_enumerate_uniq/query.sql b/parser/testdata/00031_array_enumerate_uniq/query.sql new file mode 100644 index 000000000..44b6b9b70 --- /dev/null +++ b/parser/testdata/00031_array_enumerate_uniq/query.sql @@ -0,0 +1,21 @@ +-- Tags: stateful +SELECT UserID, arrayEnumerateUniq(groupArray(SearchPhrase)) AS arr +FROM +( + SELECT UserID, SearchPhrase + FROM test.hits + WHERE CounterID = 1704509 AND UserID IN + ( + SELECT UserID + FROM test.hits + WHERE notEmpty(SearchPhrase) AND CounterID = 1704509 + GROUP BY UserID + HAVING count() > 1 + ) + ORDER BY UserID, WatchID +) +WHERE notEmpty(SearchPhrase) +GROUP BY UserID +HAVING length(arr) > 1 +ORDER BY UserID +LIMIT 20 diff --git a/parser/testdata/00031_parser_number/ast.json b/parser/testdata/00031_parser_number/ast.json new file mode 100644 index 000000000..7bf1dc91a --- /dev/null +++ b/parser/testdata/00031_parser_number/ast.json @@ -0,0 +1,379 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 56)" + }, + { + "explain": " Literal UInt64_0 (alias x1)" + }, + { + "explain": " Literal UInt64_1 (alias x2)" + }, + { + "explain": " Literal Int64_-1 (alias x3)" + }, + { + "explain": " Literal UInt64_128 (alias x4)" + }, + { + "explain": " Literal Int64_-127 (alias x5)" + }, + { + "explain": " Literal Int64_-128 (alias x6)" + }, + { + "explain": " Literal UInt64_255 (alias x7)" + }, + { + "explain": " Literal Int64_-128 (alias x8)" + }, + { + "explain": " Literal UInt64_377 (alias x9)" + }, + { + "explain": " Literal Int64_-177 (alias x10)" + }, + { + "explain": " Literal UInt64_65535 (alias x11)" + }, + { + "explain": " Literal UInt64_4294967295 (alias x12)" + }, + { + "explain": " Literal Float64_12300 (alias x13)" + }, + { + "explain": " Literal Float64_4656 (alias x14)" + }, + { + "explain": " Literal Float64_-0 (alias x15)" + }, + { + "explain": " Literal Float64_-0 (alias x16)" + }, + { + "explain": " Literal Float64_0 (alias x17)" + }, + { + "explain": " Literal UInt64_18446744073709551615 (alias x18)" + }, + { + "explain": " Literal Float64_20988295479420645000 (alias x19)" + }, + { + "explain": " Literal Float64_-18446744073709552000 (alias x20)" + }, + { + "explain": " Literal Int64_-9223372036854775807 (alias x21)" + }, + { + "explain": " Literal Float64_-8.98846567431158e307 (alias x22)" + }, + { + "explain": " Literal Float64_-2.2250738585072014e-308 (alias x23)" + }, + { + "explain": " Literal Float64_inf (alias x24)" + }, + { + "explain": " Literal Float64_-inf (alias x25)" + }, + { + "explain": " Literal Float64_nan (alias x26)" + }, + { + "explain": " Function divide (alias x27) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal Float64_1e-302 (alias x28)" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x1" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x2" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x3" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x4" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x5" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x6" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x7" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x8" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x9" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x10" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x11" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x12" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x13" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x14" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x15" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x16" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x17" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x18" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x19" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x20" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x21" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x22" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x23" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x24" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x25" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x26" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x27" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x28" + } + ], + + "rows": 119, + + "statistics": + { + "elapsed": 0.002362934, + "rows_read": 119, + "bytes_read": 4680 + } +} diff --git a/parser/testdata/reload_dictionaries/metadata.json b/parser/testdata/00031_parser_number/metadata.json similarity index 100% rename from parser/testdata/reload_dictionaries/metadata.json rename to parser/testdata/00031_parser_number/metadata.json diff --git a/parser/testdata/00031_parser_number/query.sql b/parser/testdata/00031_parser_number/query.sql new file mode 100644 index 000000000..6e5a11802 --- /dev/null +++ b/parser/testdata/00031_parser_number/query.sql @@ -0,0 +1,3 @@ +SELECT 0 AS x1, 1 AS x2, -1 AS x3, 128 AS x4, -127 AS x5, -128 AS x6, 0xFF AS x7, -0x80 AS x8, 0377 AS x9, -0177 AS x10, 0xFFFF AS x11, 0xFFFFFFFF AS x12, 123e2 AS x13, 0x123p4 AS x14, -0. AS x15, -.0 AS x16, 0. AS x17, 0xFFFFFFFFFFFFFFFF AS x18, 0x123456789ABCDEF01 AS x19, -0xFFFFFFFFFFFFFFFF AS x20, -0x7FFFFFFFFFFFFFFF AS x21, -0x1P1023 AS x22, -0x1p-1022 AS x23, inf AS x24, -INF AS x25, nan AS x26, 0 / 0 AS x27, 0.01e-300 AS x28, toTypeName(x1), toTypeName(x2), toTypeName(x3), toTypeName(x4), toTypeName(x5), toTypeName(x6), toTypeName(x7), toTypeName(x8), toTypeName(x9), toTypeName(x10), toTypeName(x11), toTypeName(x12), toTypeName(x13), toTypeName(x14), toTypeName(x15), toTypeName(x16), toTypeName(x17), toTypeName(x18), toTypeName(x19), toTypeName(x20), toTypeName(x21), toTypeName(x22), toTypeName(x23), toTypeName(x24), toTypeName(x25), toTypeName(x26), toTypeName(x27), toTypeName(x28); +SELECT 0100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000; +SELECT -0.0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001; diff --git a/parser/testdata/00032_aggregate_key64/ast.json b/parser/testdata/00032_aggregate_key64/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00032_aggregate_key64/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/rename_table/metadata.json b/parser/testdata/00032_aggregate_key64/metadata.json similarity index 100% rename from parser/testdata/rename_table/metadata.json rename to parser/testdata/00032_aggregate_key64/metadata.json diff --git a/parser/testdata/00032_aggregate_key64/query.sql b/parser/testdata/00032_aggregate_key64/query.sql new file mode 100644 index 000000000..57ef13979 --- /dev/null +++ b/parser/testdata/00032_aggregate_key64/query.sql @@ -0,0 +1,2 @@ +-- Tags: stateful +SELECT SearchEngineID AS k1, count() AS c FROM test.hits GROUP BY k1 ORDER BY c DESC, k1 LIMIT 10 diff --git a/parser/testdata/00032_fixed_string_to_string/ast.json b/parser/testdata/00032_fixed_string_to_string/ast.json new file mode 100644 index 000000000..81a63a497 --- /dev/null +++ b/parser/testdata/00032_fixed_string_to_string/ast.json @@ -0,0 +1,73 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toFixedString (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_111" + } + ], + + "rows": 17, + + "statistics": + { + "elapsed": 0.001436885, + "rows_read": 17, + "bytes_read": 687 + } +} diff --git a/parser/testdata/replace_partition/metadata.json b/parser/testdata/00032_fixed_string_to_string/metadata.json similarity index 100% rename from parser/testdata/replace_partition/metadata.json rename to parser/testdata/00032_fixed_string_to_string/metadata.json diff --git a/parser/testdata/00032_fixed_string_to_string/query.sql b/parser/testdata/00032_fixed_string_to_string/query.sql new file mode 100644 index 000000000..10a0f08e6 --- /dev/null +++ b/parser/testdata/00032_fixed_string_to_string/query.sql @@ -0,0 +1 @@ +SELECT toString(toFixedString(toString(number), 3)) FROM system.numbers LIMIT 111 diff --git a/parser/testdata/00033_aggregate_key_string/ast.json b/parser/testdata/00033_aggregate_key_string/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00033_aggregate_key_string/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/restart_replica/metadata.json b/parser/testdata/00033_aggregate_key_string/metadata.json similarity index 100% rename from parser/testdata/restart_replica/metadata.json rename to parser/testdata/00033_aggregate_key_string/metadata.json diff --git a/parser/testdata/00033_aggregate_key_string/query.sql b/parser/testdata/00033_aggregate_key_string/query.sql new file mode 100644 index 000000000..ab6546875 --- /dev/null +++ b/parser/testdata/00033_aggregate_key_string/query.sql @@ -0,0 +1,2 @@ +-- Tags: stateful +SELECT SearchPhrase AS k1, count() AS c FROM test.hits GROUP BY k1 ORDER BY c DESC, k1 LIMIT 10 diff --git a/parser/testdata/00033_fixed_string_to_string/ast.json b/parser/testdata/00033_fixed_string_to_string/ast.json new file mode 100644 index 000000000..a8c1a03ad --- /dev/null +++ b/parser/testdata/00033_fixed_string_to_string/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toFixedString (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal ''" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.002180112, + "rows_read": 10, + "bytes_read": 381 + } +} diff --git a/parser/testdata/sample/metadata.json b/parser/testdata/00033_fixed_string_to_string/metadata.json similarity index 100% rename from parser/testdata/sample/metadata.json rename to parser/testdata/00033_fixed_string_to_string/metadata.json diff --git a/parser/testdata/00033_fixed_string_to_string/query.sql b/parser/testdata/00033_fixed_string_to_string/query.sql new file mode 100644 index 000000000..5bf1f3d8c --- /dev/null +++ b/parser/testdata/00033_fixed_string_to_string/query.sql @@ -0,0 +1 @@ +SELECT toString(toFixedString('', 10)) diff --git a/parser/testdata/00034_aggregate_key_fixed_string/ast.json b/parser/testdata/00034_aggregate_key_fixed_string/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00034_aggregate_key_fixed_string/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/sample_n/metadata.json b/parser/testdata/00034_aggregate_key_fixed_string/metadata.json similarity index 100% rename from parser/testdata/sample_n/metadata.json rename to parser/testdata/00034_aggregate_key_fixed_string/metadata.json diff --git a/parser/testdata/00034_aggregate_key_fixed_string/query.sql b/parser/testdata/00034_aggregate_key_fixed_string/query.sql new file mode 100644 index 000000000..23a895f4e --- /dev/null +++ b/parser/testdata/00034_aggregate_key_fixed_string/query.sql @@ -0,0 +1,2 @@ +-- Tags: stateful +SELECT toFixedString(substring(SearchPhrase, 1, 17), 17) AS k1, count() AS c FROM test.hits GROUP BY k1 ORDER BY c DESC, k1 LIMIT 10 diff --git a/parser/testdata/00034_fixed_string_to_number/ast.json b/parser/testdata/00034_fixed_string_to_number/ast.json new file mode 100644 index 000000000..49d7fab1d --- /dev/null +++ b/parser/testdata/00034_fixed_string_to_number/ast.json @@ -0,0 +1,73 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toUInt16 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toFixedString (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_111" + } + ], + + "rows": 17, + + "statistics": + { + "elapsed": 0.001630931, + "rows_read": 17, + "bytes_read": 687 + } +} diff --git a/parser/testdata/sample_offset/metadata.json b/parser/testdata/00034_fixed_string_to_number/metadata.json similarity index 100% rename from parser/testdata/sample_offset/metadata.json rename to parser/testdata/00034_fixed_string_to_number/metadata.json diff --git a/parser/testdata/00034_fixed_string_to_number/query.sql b/parser/testdata/00034_fixed_string_to_number/query.sql new file mode 100644 index 000000000..b7f45cf76 --- /dev/null +++ b/parser/testdata/00034_fixed_string_to_number/query.sql @@ -0,0 +1 @@ +SELECT toUInt16(toFixedString(toString(number), 3)) FROM system.numbers LIMIT 111 diff --git a/parser/testdata/00035_aggregate_keys128/ast.json b/parser/testdata/00035_aggregate_keys128/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00035_aggregate_keys128/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/select_final/metadata.json b/parser/testdata/00035_aggregate_keys128/metadata.json similarity index 100% rename from parser/testdata/select_final/metadata.json rename to parser/testdata/00035_aggregate_keys128/metadata.json diff --git a/parser/testdata/00035_aggregate_keys128/query.sql b/parser/testdata/00035_aggregate_keys128/query.sql new file mode 100644 index 000000000..3f79c12f5 --- /dev/null +++ b/parser/testdata/00035_aggregate_keys128/query.sql @@ -0,0 +1,2 @@ +-- Tags: stateful +SELECT SearchEngineID AS k1, AdvEngineID AS k2, count() AS c FROM test.hits GROUP BY k1, k2 ORDER BY c DESC, k1, k2 LIMIT 10 diff --git a/parser/testdata/00035_function_array_return_type/ast.json b/parser/testdata/00035_function_array_return_type/ast.json new file mode 100644 index 000000000..2488532ef --- /dev/null +++ b/parser/testdata/00035_function_array_return_type/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[UInt64_1]" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001267856, + "rows_read": 5, + "bytes_read": 185 + } +} diff --git a/parser/testdata/select_from_system_table/metadata.json b/parser/testdata/00035_function_array_return_type/metadata.json similarity index 100% rename from parser/testdata/select_from_system_table/metadata.json rename to parser/testdata/00035_function_array_return_type/metadata.json diff --git a/parser/testdata/00035_function_array_return_type/query.sql b/parser/testdata/00035_function_array_return_type/query.sql new file mode 100644 index 000000000..f698f6570 --- /dev/null +++ b/parser/testdata/00035_function_array_return_type/query.sql @@ -0,0 +1,5 @@ +SELECT [1]; +SELECT [1, 255]; +SELECT [1, 256]; +SELECT [-1, -2.5, 15, 699]; +SELECT ['q', 'w', 'ert', 'y']; diff --git a/parser/testdata/00036_aggregate_hashed/ast.json b/parser/testdata/00036_aggregate_hashed/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00036_aggregate_hashed/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/show_columns/metadata.json b/parser/testdata/00036_aggregate_hashed/metadata.json similarity index 100% rename from parser/testdata/show_columns/metadata.json rename to parser/testdata/00036_aggregate_hashed/metadata.json diff --git a/parser/testdata/00036_aggregate_hashed/query.sql b/parser/testdata/00036_aggregate_hashed/query.sql new file mode 100644 index 000000000..21945dd05 --- /dev/null +++ b/parser/testdata/00036_aggregate_hashed/query.sql @@ -0,0 +1,2 @@ +-- Tags: stateful +SELECT SearchEngineID AS k1, SearchPhrase AS k2, count() AS c FROM test.hits GROUP BY k1, k2 ORDER BY c DESC, k1, k2 LIMIT 10 diff --git a/parser/testdata/00036_array_element/ast.json b/parser/testdata/00036_array_element/ast.json new file mode 100644 index 000000000..4af7d3b33 --- /dev/null +++ b/parser/testdata/00036_array_element/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery array_element_test (children 1)" + }, + { + "explain": " Identifier array_element_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001372226, + "rows_read": 2, + "bytes_read": 88 + } +} diff --git a/parser/testdata/show_create_database/metadata.json b/parser/testdata/00036_array_element/metadata.json similarity index 100% rename from parser/testdata/show_create_database/metadata.json rename to parser/testdata/00036_array_element/metadata.json diff --git a/parser/testdata/00036_array_element/query.sql b/parser/testdata/00036_array_element/query.sql new file mode 100644 index 000000000..eba85d29c --- /dev/null +++ b/parser/testdata/00036_array_element/query.sql @@ -0,0 +1,31 @@ +DROP TABLE IF EXISTS array_element_test; +CREATE TABLE array_element_test (arr Array(Int32), id Int32) ENGINE = Memory; +insert into array_element_test VALUES ([11,12,13], 2), ([11,12], 3), ([11,12,13], -1), ([11,12], -2), ([11,12], -3), ([11], 0); +select arr[id] from array_element_test; + +DROP TABLE IF EXISTS array_element_test; +CREATE TABLE array_element_test (arr Array(Int32), id UInt32) ENGINE = Memory; +insert into array_element_test VALUES ([11,12,13], 2), ([11,12], 3), ([11,12,13], 1), ([11,12], 4), ([11], 0); +select arr[id] from array_element_test; + +DROP TABLE IF EXISTS array_element_test; +CREATE TABLE array_element_test (arr Array(String), id Int32) ENGINE = Memory; +insert into array_element_test VALUES (['Abc','Df','Q'], 2), (['Abc','DEFQ'], 3), (['ABC','Q','ERT'], -1), (['Ab','ber'], -2), (['AB','asd'], -3), (['A'], 0); +select arr[id] from array_element_test; + +DROP TABLE IF EXISTS array_element_test; +CREATE TABLE array_element_test (arr Array(String), id UInt32) ENGINE = Memory; +insert into array_element_test VALUES (['Abc','Df','Q'], 2), (['Abc','DEFQ'], 3), (['ABC','Q','ERT'], 1), (['Ab','ber'], 4), (['A'], 0); +select arr[id] from array_element_test; + +DROP TABLE IF EXISTS array_element_test; +CREATE TABLE array_element_test (id UInt32) ENGINE = Memory; +insert into array_element_test VALUES (2), (1), (4), (3), (0); +select [1, 2, 3] as arr, arr[id] from array_element_test; + +DROP TABLE IF EXISTS array_element_test; +CREATE TABLE array_element_test (id Int32) ENGINE = Memory; +insert into array_element_test VALUES (-2), (1), (-4), (3), (2), (-1), (4), (-3), (0); +select [1, 2, 3] as arr, arr[id] from array_element_test; + +DROP TABLE array_element_test; diff --git a/parser/testdata/00037_totals_limit/ast.json b/parser/testdata/00037_totals_limit/ast.json new file mode 100644 index 000000000..1face667d --- /dev/null +++ b/parser/testdata/00037_totals_limit/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001616272, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/show_create_table/metadata.json b/parser/testdata/00037_totals_limit/metadata.json similarity index 100% rename from parser/testdata/show_create_table/metadata.json rename to parser/testdata/00037_totals_limit/metadata.json diff --git a/parser/testdata/00037_totals_limit/query.sql b/parser/testdata/00037_totals_limit/query.sql new file mode 100644 index 000000000..e6ddd3fa9 --- /dev/null +++ b/parser/testdata/00037_totals_limit/query.sql @@ -0,0 +1,3 @@ +SET output_format_write_statistics = 0; + +SELECT count(), arrayJoin([1, 2, 3]) AS n GROUP BY n WITH TOTALS ORDER BY n LIMIT 1 FORMAT JSON; diff --git a/parser/testdata/00037_uniq_state_merge1/ast.json b/parser/testdata/00037_uniq_state_merge1/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00037_uniq_state_merge1/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/show_dictionaries/metadata.json b/parser/testdata/00037_uniq_state_merge1/metadata.json similarity index 100% rename from parser/testdata/show_dictionaries/metadata.json rename to parser/testdata/00037_uniq_state_merge1/metadata.json diff --git a/parser/testdata/00037_uniq_state_merge1/query.sql b/parser/testdata/00037_uniq_state_merge1/query.sql new file mode 100644 index 000000000..3d79df057 --- /dev/null +++ b/parser/testdata/00037_uniq_state_merge1/query.sql @@ -0,0 +1,4 @@ +-- Tags: stateful +SET max_bytes_before_external_group_by = '1G'; +SET max_bytes_ratio_before_external_group_by = 0; +SELECT k, any(u) AS u, uniqMerge(us) AS us FROM (SELECT domain(URL) AS k, uniq(UserID) AS u, uniqState(UserID) AS us FROM test.hits GROUP BY k) GROUP BY k ORDER BY u DESC, k ASC LIMIT 100 diff --git a/parser/testdata/00038_totals_limit/ast.json b/parser/testdata/00038_totals_limit/ast.json new file mode 100644 index 000000000..7193c883c --- /dev/null +++ b/parser/testdata/00038_totals_limit/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001239512, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/show_processlist/metadata.json b/parser/testdata/00038_totals_limit/metadata.json similarity index 100% rename from parser/testdata/show_processlist/metadata.json rename to parser/testdata/00038_totals_limit/metadata.json diff --git a/parser/testdata/00038_totals_limit/query.sql b/parser/testdata/00038_totals_limit/query.sql new file mode 100644 index 000000000..804378068 --- /dev/null +++ b/parser/testdata/00038_totals_limit/query.sql @@ -0,0 +1,2 @@ +SET enable_positional_arguments = 0; +SELECT count() GROUP BY 1 WITH TOTALS LIMIT 1; diff --git a/parser/testdata/00038_uniq_state_merge2/ast.json b/parser/testdata/00038_uniq_state_merge2/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00038_uniq_state_merge2/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/start_fetches/metadata.json b/parser/testdata/00038_uniq_state_merge2/metadata.json similarity index 100% rename from parser/testdata/start_fetches/metadata.json rename to parser/testdata/00038_uniq_state_merge2/metadata.json diff --git a/parser/testdata/00038_uniq_state_merge2/query.sql b/parser/testdata/00038_uniq_state_merge2/query.sql new file mode 100644 index 000000000..fb9bf2ace --- /dev/null +++ b/parser/testdata/00038_uniq_state_merge2/query.sql @@ -0,0 +1,4 @@ +-- Tags: stateful +SET max_bytes_before_external_group_by = '1G'; +SET max_bytes_ratio_before_external_group_by = 0; +SELECT topLevelDomain(concat('http://', k)) AS tld, sum(u) AS u, uniqMerge(us) AS us FROM (SELECT domain(URL) AS k, uniq(UserID) AS u, uniqState(UserID) AS us FROM test.hits GROUP BY k) GROUP BY tld ORDER BY u DESC, tld ASC LIMIT 100 diff --git a/parser/testdata/00039_primary_key/ast.json b/parser/testdata/00039_primary_key/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00039_primary_key/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/start_merges/metadata.json b/parser/testdata/00039_primary_key/metadata.json similarity index 100% rename from parser/testdata/start_merges/metadata.json rename to parser/testdata/00039_primary_key/metadata.json diff --git a/parser/testdata/00039_primary_key/query.sql b/parser/testdata/00039_primary_key/query.sql new file mode 100644 index 000000000..e03b9e87a --- /dev/null +++ b/parser/testdata/00039_primary_key/query.sql @@ -0,0 +1,3 @@ +-- Tags: stateful +SELECT count() FROM test.hits WHERE CounterID < 10000; +SELECT count() FROM test.hits WHERE 10000 > CounterID; diff --git a/parser/testdata/00040_aggregating_materialized_view/ast.json b/parser/testdata/00040_aggregating_materialized_view/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00040_aggregating_materialized_view/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/start_moves/metadata.json b/parser/testdata/00040_aggregating_materialized_view/metadata.json similarity index 100% rename from parser/testdata/start_moves/metadata.json rename to parser/testdata/00040_aggregating_materialized_view/metadata.json diff --git a/parser/testdata/00040_aggregating_materialized_view/query.sql b/parser/testdata/00040_aggregating_materialized_view/query.sql new file mode 100644 index 000000000..e5257cfc8 --- /dev/null +++ b/parser/testdata/00040_aggregating_materialized_view/query.sql @@ -0,0 +1,46 @@ +-- Tags: stateful +DROP TABLE IF EXISTS basic_00040; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE MATERIALIZED VIEW basic_00040 +ENGINE = AggregatingMergeTree(StartDate, (CounterID, StartDate), 8192) +POPULATE AS +SELECT + CounterID, + StartDate, + sumState(Sign) AS Visits, + uniqState(UserID) AS Users +FROM test.visits +GROUP BY CounterID, StartDate; + + +SELECT + StartDate, + sumMerge(Visits) AS Visits, + uniqMerge(Users) AS Users +FROM basic_00040 +GROUP BY StartDate +ORDER BY StartDate; + + +SELECT + StartDate, + sumMerge(Visits) AS Visits, + uniqMerge(Users) AS Users +FROM basic_00040 +WHERE CounterID = 942285 +GROUP BY StartDate +ORDER BY StartDate; + + +SELECT + StartDate, + sum(Sign) AS Visits, + uniq(UserID) AS Users +FROM test.visits +WHERE CounterID = 942285 +GROUP BY StartDate +ORDER BY StartDate; + + +DROP TABLE basic_00040; diff --git a/parser/testdata/00040_array_enumerate_uniq/ast.json b/parser/testdata/00040_array_enumerate_uniq/ast.json new file mode 100644 index 000000000..26b72f4d8 --- /dev/null +++ b/parser/testdata/00040_array_enumerate_uniq/ast.json @@ -0,0 +1,184 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function max (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier arr" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayEnumerateUniq (alias arr) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function groupArray (alias nums) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function intDiv (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_54321" + }, + { + "explain": " Function groupArray (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function intDiv (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_98765" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_1000000" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function intHash32 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_100000" + } + ], + + "rows": 54, + + "statistics": + { + "elapsed": 0.001671356, + "rows_read": 54, + "bytes_read": 2565 + } +} diff --git a/parser/testdata/start_replication_queues/metadata.json b/parser/testdata/00040_array_enumerate_uniq/metadata.json similarity index 100% rename from parser/testdata/start_replication_queues/metadata.json rename to parser/testdata/00040_array_enumerate_uniq/metadata.json diff --git a/parser/testdata/00040_array_enumerate_uniq/query.sql b/parser/testdata/00040_array_enumerate_uniq/query.sql new file mode 100644 index 000000000..6a67d8480 --- /dev/null +++ b/parser/testdata/00040_array_enumerate_uniq/query.sql @@ -0,0 +1,5 @@ +SELECT max(arrayJoin(arr)) FROM (SELECT arrayEnumerateUniq(groupArray(intDiv(number, 54321)) AS nums, groupArray(toString(intDiv(number, 98765)))) AS arr FROM (SELECT number FROM system.numbers LIMIT 1000000) GROUP BY intHash32(number) % 100000); + +SELECT arrayEnumerateUniq([[1], [2], [34], [1]]); +SELECT arrayEnumerateUniq([(1, 2), (3, 4), (1, 2)]); + diff --git a/parser/testdata/00041_aggregating_materialized_view/ast.json b/parser/testdata/00041_aggregating_materialized_view/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00041_aggregating_materialized_view/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/start_ttl_merges/metadata.json b/parser/testdata/00041_aggregating_materialized_view/metadata.json similarity index 100% rename from parser/testdata/start_ttl_merges/metadata.json rename to parser/testdata/00041_aggregating_materialized_view/metadata.json diff --git a/parser/testdata/00041_aggregating_materialized_view/query.sql b/parser/testdata/00041_aggregating_materialized_view/query.sql new file mode 100644 index 000000000..f0f838260 --- /dev/null +++ b/parser/testdata/00041_aggregating_materialized_view/query.sql @@ -0,0 +1,75 @@ +-- Tags: stateful +DROP TABLE IF EXISTS basic; +DROP TABLE IF EXISTS visits_null; + +CREATE TABLE visits_null +( + CounterID UInt32, + StartDate Date, + Sign Int8, + UserID UInt64 +) ENGINE = Null; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE MATERIALIZED VIEW basic +ENGINE = AggregatingMergeTree(StartDate, (CounterID, StartDate), 8192) +AS SELECT + CounterID, + StartDate, + sumState(Sign) AS Visits, + uniqState(UserID) AS Users +FROM visits_null +GROUP BY CounterID, StartDate; + +INSERT INTO visits_null +SELECT + CounterID, + StartDate, + Sign, + UserID +FROM test.visits; + + +SELECT + StartDate, + sumMerge(Visits) AS Visits, + uniqMerge(Users) AS Users +FROM basic +GROUP BY StartDate +ORDER BY StartDate; + + +SELECT + StartDate, + sumMerge(Visits) AS Visits, + uniqMerge(Users) AS Users +FROM basic +WHERE CounterID = 942285 +GROUP BY StartDate +ORDER BY StartDate; + + +SELECT + StartDate, + sum(Sign) AS Visits, + uniq(UserID) AS Users +FROM test.visits +WHERE CounterID = 942285 +GROUP BY StartDate +ORDER BY StartDate; + + +OPTIMIZE TABLE basic; +OPTIMIZE TABLE basic; +OPTIMIZE TABLE basic; +OPTIMIZE TABLE basic; +OPTIMIZE TABLE basic; +OPTIMIZE TABLE basic; +OPTIMIZE TABLE basic; +OPTIMIZE TABLE basic; +OPTIMIZE TABLE basic; +OPTIMIZE TABLE basic; + + +DROP TABLE visits_null; +DROP TABLE basic; diff --git a/parser/testdata/00041_aggregation_remap/ast.json b/parser/testdata/00041_aggregation_remap/ast.json new file mode 100644 index 000000000..1d6261d63 --- /dev/null +++ b/parser/testdata/00041_aggregation_remap/ast.json @@ -0,0 +1,112 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 5)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function count (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_200000" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Function count (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 30, + + "statistics": + { + "elapsed": 0.001577251, + "rows_read": 30, + "bytes_read": 1182 + } +} diff --git a/parser/testdata/stop_fetches/metadata.json b/parser/testdata/00041_aggregation_remap/metadata.json similarity index 100% rename from parser/testdata/stop_fetches/metadata.json rename to parser/testdata/00041_aggregation_remap/metadata.json diff --git a/parser/testdata/00041_aggregation_remap/query.sql b/parser/testdata/00041_aggregation_remap/query.sql new file mode 100644 index 000000000..0627f9a7b --- /dev/null +++ b/parser/testdata/00041_aggregation_remap/query.sql @@ -0,0 +1 @@ +SELECT number, count() FROM (SELECT number FROM system.numbers LIMIT 200000) GROUP BY number ORDER BY count(), number LIMIT 10 diff --git a/parser/testdata/00041_big_array_join/ast.json b/parser/testdata/00041_big_array_join/ast.json new file mode 100644 index 000000000..64f3b8c6c --- /dev/null +++ b/parser/testdata/00041_big_array_join/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery big_array (children 1)" + }, + { + "explain": " Identifier big_array" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001352707, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/stop_merges/metadata.json b/parser/testdata/00041_big_array_join/metadata.json similarity index 100% rename from parser/testdata/stop_merges/metadata.json rename to parser/testdata/00041_big_array_join/metadata.json diff --git a/parser/testdata/00041_big_array_join/query.sql b/parser/testdata/00041_big_array_join/query.sql new file mode 100644 index 000000000..6486152da --- /dev/null +++ b/parser/testdata/00041_big_array_join/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS big_array; +CREATE TABLE big_array (x Array(UInt8)) ENGINE=TinyLog; +SET min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0; +INSERT INTO big_array SELECT groupArray(number % 255) AS x FROM (SELECT * FROM system.numbers LIMIT 1000000); + +SELECT count() FROM big_array ARRAY JOIN x; +SELECT count() FROM big_array ARRAY JOIN x AS y; +SELECT countIf(has(x, 10)), sum(y) FROM big_array ARRAY JOIN x AS y; +SELECT countIf(has(x, 10)) FROM big_array ARRAY JOIN x AS y; +SELECT countIf(has(x, 10)), sum(y) FROM big_array ARRAY JOIN x AS y WHERE 1; +SELECT countIf(has(x, 10)) FROM big_array ARRAY JOIN x AS y WHERE 1; +SELECT countIf(has(x, 10)), sum(y) FROM big_array ARRAY JOIN x AS y WHERE has(x,15); + +DROP TABLE big_array; diff --git a/parser/testdata/00042_any_left_join/ast.json b/parser/testdata/00042_any_left_join/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00042_any_left_join/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/stop_moves/metadata.json b/parser/testdata/00042_any_left_join/metadata.json similarity index 100% rename from parser/testdata/stop_moves/metadata.json rename to parser/testdata/00042_any_left_join/metadata.json diff --git a/parser/testdata/00042_any_left_join/query.sql b/parser/testdata/00042_any_left_join/query.sql new file mode 100644 index 000000000..c67ab5575 --- /dev/null +++ b/parser/testdata/00042_any_left_join/query.sql @@ -0,0 +1,23 @@ +-- Tags: stateful +SELECT + EventDate, + hits, + visits +FROM +( + SELECT + EventDate, + count() AS hits + FROM test.hits + GROUP BY EventDate +) ANY LEFT JOIN +( + SELECT + StartDate AS EventDate, + sum(Sign) AS visits + FROM test.visits + GROUP BY EventDate +) USING EventDate +ORDER BY hits DESC +LIMIT 10 +SETTINGS joined_subquery_requires_alias = 0; diff --git a/parser/testdata/00042_set/ast.json b/parser/testdata/00042_set/ast.json new file mode 100644 index 000000000..8c5cee93d --- /dev/null +++ b/parser/testdata/00042_set/ast.json @@ -0,0 +1,82 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function in (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toUInt64 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_1100000" + } + ], + + "rows": 20, + + "statistics": + { + "elapsed": 0.001546592, + "rows_read": 20, + "bytes_read": 829 + } +} diff --git a/parser/testdata/stop_replication_queues/metadata.json b/parser/testdata/00042_set/metadata.json similarity index 100% rename from parser/testdata/stop_replication_queues/metadata.json rename to parser/testdata/00042_set/metadata.json diff --git a/parser/testdata/00042_set/query.sql b/parser/testdata/00042_set/query.sql new file mode 100644 index 000000000..d81a53ed9 --- /dev/null +++ b/parser/testdata/00042_set/query.sql @@ -0,0 +1 @@ +SELECT toUInt64(1) IN (SELECT * FROM system.numbers LIMIT 1100000) diff --git a/parser/testdata/00043_any_left_join/ast.json b/parser/testdata/00043_any_left_join/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00043_any_left_join/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/stop_ttl_merges/metadata.json b/parser/testdata/00043_any_left_join/metadata.json similarity index 100% rename from parser/testdata/stop_ttl_merges/metadata.json rename to parser/testdata/00043_any_left_join/metadata.json diff --git a/parser/testdata/00043_any_left_join/query.sql b/parser/testdata/00043_any_left_join/query.sql new file mode 100644 index 000000000..8a3af206c --- /dev/null +++ b/parser/testdata/00043_any_left_join/query.sql @@ -0,0 +1,17 @@ +-- Tags: stateful +SELECT + EventDate, + count() AS hits, + any(visits) +FROM test.hits ANY LEFT JOIN +( + SELECT + StartDate AS EventDate, + sum(Sign) AS visits + FROM test.visits + GROUP BY EventDate +) USING EventDate +GROUP BY EventDate +ORDER BY hits DESC +LIMIT 10 +SETTINGS joined_subquery_requires_alias = 0; diff --git a/parser/testdata/00043_summing_empty_part/ast.json b/parser/testdata/00043_summing_empty_part/ast.json new file mode 100644 index 000000000..3b372e03f --- /dev/null +++ b/parser/testdata/00043_summing_empty_part/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery empty_summing (children 1)" + }, + { + "explain": " Identifier empty_summing" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001276161, + "rows_read": 2, + "bytes_read": 78 + } +} diff --git a/parser/testdata/sync_replica/metadata.json b/parser/testdata/00043_summing_empty_part/metadata.json similarity index 100% rename from parser/testdata/sync_replica/metadata.json rename to parser/testdata/00043_summing_empty_part/metadata.json diff --git a/parser/testdata/00043_summing_empty_part/query.sql b/parser/testdata/00043_summing_empty_part/query.sql new file mode 100644 index 000000000..40cecabf3 --- /dev/null +++ b/parser/testdata/00043_summing_empty_part/query.sql @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS empty_summing; +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE empty_summing (d Date, k UInt64, v Int8) ENGINE=SummingMergeTree(d, k, 8192); + +INSERT INTO empty_summing VALUES ('2015-01-01', 1, 10); +INSERT INTO empty_summing VALUES ('2015-01-01', 1, -10); + +OPTIMIZE TABLE empty_summing; +SELECT * FROM empty_summing; + +INSERT INTO empty_summing VALUES ('2015-01-01', 1, 4),('2015-01-01', 2, -9),('2015-01-01', 3, -14); +INSERT INTO empty_summing VALUES ('2015-01-01', 1, -2),('2015-01-01', 1, -2),('2015-01-01', 3, 14); +INSERT INTO empty_summing VALUES ('2015-01-01', 1, 0),('2015-01-01', 3, 0); + +OPTIMIZE TABLE empty_summing; +SELECT * FROM empty_summing; + +DROP TABLE empty_summing; diff --git a/parser/testdata/00044_any_left_join_string/ast.json b/parser/testdata/00044_any_left_join_string/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00044_any_left_join_string/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/topk_parametric/metadata.json b/parser/testdata/00044_any_left_join_string/metadata.json similarity index 100% rename from parser/testdata/topk_parametric/metadata.json rename to parser/testdata/00044_any_left_join_string/metadata.json diff --git a/parser/testdata/00044_any_left_join_string/query.sql b/parser/testdata/00044_any_left_join_string/query.sql new file mode 100644 index 000000000..3852ce4b0 --- /dev/null +++ b/parser/testdata/00044_any_left_join_string/query.sql @@ -0,0 +1,23 @@ +-- Tags: stateful +SELECT + domain, + hits, + visits +FROM +( + SELECT + domain(URL) AS domain, + count() AS hits + FROM test.hits + GROUP BY domain +) ANY LEFT JOIN +( + SELECT + domain(StartURL) AS domain, + sum(Sign) AS visits + FROM test.visits + GROUP BY domain +) USING domain +ORDER BY hits DESC +LIMIT 10 +SETTINGS joined_subquery_requires_alias = 0; diff --git a/parser/testdata/00044_sorting_by_string_descending/ast.json b/parser/testdata/00044_sorting_by_string_descending/ast.json new file mode 100644 index 000000000..64723168e --- /dev/null +++ b/parser/testdata/00044_sorting_by_string_descending/ast.json @@ -0,0 +1,94 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier s" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function materialize (alias s) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'abc'" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_100" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier s" + } + ], + + "rows": 24, + + "statistics": + { + "elapsed": 0.001295107, + "rows_read": 24, + "bytes_read": 1008 + } +} diff --git a/parser/testdata/tuple_element_dot/metadata.json b/parser/testdata/00044_sorting_by_string_descending/metadata.json similarity index 100% rename from parser/testdata/tuple_element_dot/metadata.json rename to parser/testdata/00044_sorting_by_string_descending/metadata.json diff --git a/parser/testdata/00044_sorting_by_string_descending/query.sql b/parser/testdata/00044_sorting_by_string_descending/query.sql new file mode 100644 index 000000000..328bfd5b3 --- /dev/null +++ b/parser/testdata/00044_sorting_by_string_descending/query.sql @@ -0,0 +1 @@ +SELECT s FROM (SELECT materialize('abc') AS s FROM system.numbers LIMIT 100) ORDER BY s DESC diff --git a/parser/testdata/00045_sorting_by_fixed_string_descending/ast.json b/parser/testdata/00045_sorting_by_fixed_string_descending/ast.json new file mode 100644 index 000000000..b0ca7f506 --- /dev/null +++ b/parser/testdata/00045_sorting_by_fixed_string_descending/ast.json @@ -0,0 +1,103 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier s" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toFixedString (alias s) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'abc'" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_100" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier s" + } + ], + + "rows": 27, + + "statistics": + { + "elapsed": 0.001802083, + "rows_read": 27, + "bytes_read": 1152 + } +} diff --git a/parser/testdata/union_distinct/metadata.json b/parser/testdata/00045_sorting_by_fixed_string_descending/metadata.json similarity index 100% rename from parser/testdata/union_distinct/metadata.json rename to parser/testdata/00045_sorting_by_fixed_string_descending/metadata.json diff --git a/parser/testdata/00045_sorting_by_fixed_string_descending/query.sql b/parser/testdata/00045_sorting_by_fixed_string_descending/query.sql new file mode 100644 index 000000000..8f0f331b6 --- /dev/null +++ b/parser/testdata/00045_sorting_by_fixed_string_descending/query.sql @@ -0,0 +1 @@ +SELECT s FROM (SELECT toFixedString(materialize('abc'), 3) AS s FROM system.numbers LIMIT 100) ORDER BY s DESC diff --git a/parser/testdata/00045_uniq_upto/ast.json b/parser/testdata/00045_uniq_upto/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00045_uniq_upto/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/use_default/metadata.json b/parser/testdata/00045_uniq_upto/metadata.json similarity index 100% rename from parser/testdata/use_default/metadata.json rename to parser/testdata/00045_uniq_upto/metadata.json diff --git a/parser/testdata/00045_uniq_upto/query.sql b/parser/testdata/00045_uniq_upto/query.sql new file mode 100644 index 000000000..a5b5aac2a --- /dev/null +++ b/parser/testdata/00045_uniq_upto/query.sql @@ -0,0 +1,2 @@ +-- Tags: stateful +SELECT RegionID, uniqExact(UserID) AS u1, uniqUpTo(10)(UserID) AS u2 FROM test.visits GROUP BY RegionID HAVING u1 <= 11 AND u1 != u2 diff --git a/parser/testdata/00046_stored_aggregates_simple/ast.json b/parser/testdata/00046_stored_aggregates_simple/ast.json new file mode 100644 index 000000000..63eb3f5b8 --- /dev/null +++ b/parser/testdata/00046_stored_aggregates_simple/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery stored_aggregates (children 1)" + }, + { + "explain": " Identifier stored_aggregates" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00119504, + "rows_read": 2, + "bytes_read": 86 + } +} diff --git a/parser/testdata/with_scalar/metadata.json b/parser/testdata/00046_stored_aggregates_simple/metadata.json similarity index 100% rename from parser/testdata/with_scalar/metadata.json rename to parser/testdata/00046_stored_aggregates_simple/metadata.json diff --git a/parser/testdata/00046_stored_aggregates_simple/query.sql b/parser/testdata/00046_stored_aggregates_simple/query.sql new file mode 100644 index 000000000..2a4ee9fa5 --- /dev/null +++ b/parser/testdata/00046_stored_aggregates_simple/query.sql @@ -0,0 +1,22 @@ +DROP TABLE IF EXISTS stored_aggregates; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE stored_aggregates +( + d Date, + Uniq AggregateFunction(uniq, UInt64) +) +ENGINE = AggregatingMergeTree(d, d, 8192); + +INSERT INTO stored_aggregates +SELECT + toDate('2014-06-01') AS d, + uniqState(number) AS Uniq +FROM +( + SELECT * FROM system.numbers LIMIT 1000 +); + +SELECT uniqMerge(Uniq) FROM stored_aggregates; + +DROP TABLE stored_aggregates; diff --git a/parser/testdata/00046_uniq_upto_distributed/ast.json b/parser/testdata/00046_uniq_upto_distributed/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00046_uniq_upto_distributed/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/with_subquery_cte/metadata.json b/parser/testdata/00046_uniq_upto_distributed/metadata.json similarity index 100% rename from parser/testdata/with_subquery_cte/metadata.json rename to parser/testdata/00046_uniq_upto_distributed/metadata.json diff --git a/parser/testdata/00046_uniq_upto_distributed/query.sql b/parser/testdata/00046_uniq_upto_distributed/query.sql new file mode 100644 index 000000000..d7da72f21 --- /dev/null +++ b/parser/testdata/00046_uniq_upto_distributed/query.sql @@ -0,0 +1,3 @@ +-- Tags: stateful, distributed + +SELECT RegionID, uniqExact(UserID) AS u1, uniqUpTo(10)(UserID) AS u2 FROM remote('127.0.0.{1,2}', test, visits) GROUP BY RegionID HAVING u1 <= 11 AND u1 != u2 diff --git a/parser/testdata/00047_bar/ast.json b/parser/testdata/00047_bar/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00047_bar/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00047_bar/metadata.json b/parser/testdata/00047_bar/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00047_bar/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00047_bar/query.sql b/parser/testdata/00047_bar/query.sql new file mode 100644 index 000000000..d0e7c3401 --- /dev/null +++ b/parser/testdata/00047_bar/query.sql @@ -0,0 +1,3 @@ +-- Tags: stateful +SELECT CounterID, count() AS c, bar(c, 0, 523264) FROM test.hits GROUP BY CounterID ORDER BY c DESC, CounterID ASC LIMIT 100; +SELECT CounterID, count() AS c, bar(c, 0, 523264) FROM test.hits GROUP BY CounterID ORDER BY c DESC, CounterID ASC LIMIT 100 SETTINGS optimize_aggregation_in_order = 1 diff --git a/parser/testdata/00047_stored_aggregates_complex/ast.json b/parser/testdata/00047_stored_aggregates_complex/ast.json new file mode 100644 index 000000000..8928ed5a7 --- /dev/null +++ b/parser/testdata/00047_stored_aggregates_complex/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery stored_aggregates (children 1)" + }, + { + "explain": " Identifier stored_aggregates" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001374841, + "rows_read": 2, + "bytes_read": 86 + } +} diff --git a/parser/testdata/00047_stored_aggregates_complex/metadata.json b/parser/testdata/00047_stored_aggregates_complex/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00047_stored_aggregates_complex/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00047_stored_aggregates_complex/query.sql b/parser/testdata/00047_stored_aggregates_complex/query.sql new file mode 100644 index 000000000..df5305c97 --- /dev/null +++ b/parser/testdata/00047_stored_aggregates_complex/query.sql @@ -0,0 +1,67 @@ +DROP TABLE IF EXISTS stored_aggregates; + +set max_insert_threads = 1; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE stored_aggregates +( + d Date, + k1 UInt64, + k2 String, + Sum AggregateFunction(sum, UInt64), + Avg AggregateFunction(avg, UInt64), + Uniq AggregateFunction(uniq, UInt64), + Any AggregateFunction(any, String), + AnyIf AggregateFunction(anyIf, String, UInt8), + Quantiles AggregateFunction(quantiles(0.5, 0.9), UInt64), + GroupArray AggregateFunction(groupArray, String) +) +ENGINE = AggregatingMergeTree(d, (d, k1, k2), 8192); + +INSERT INTO stored_aggregates +SELECT + toDate('2014-06-01') AS d, + intDiv(number, 100) AS k1, + toString(intDiv(number, 10)) AS k2, + sumState(number) AS Sum, + avgState(number) AS Avg, + uniqState(toUInt64(number % 7)) AS Uniq, + anyState(toString(number)) AS Any, + anyIfState(toString(number), number % 7 = 0) AS AnyIf, + quantilesState(0.5, 0.9)(number) AS Quantiles, + groupArrayState(toString(number)) AS GroupArray +FROM +( + SELECT * FROM system.numbers LIMIT 1000 +) +GROUP BY d, k1, k2 +ORDER BY d, k1, k2; + +SELECT d, k1, k2, + sumMerge(Sum), avgMerge(Avg), uniqMerge(Uniq), + anyMerge(Any), anyIfMerge(AnyIf), + arrayMap(x -> round(x, 6), quantilesMerge(0.5, 0.9)(Quantiles)), + groupArrayMerge(GroupArray) +FROM stored_aggregates +GROUP BY d, k1, k2 +ORDER BY d, k1, k2; + +SELECT d, k1, + sumMerge(Sum), avgMerge(Avg), uniqMerge(Uniq), + anyMerge(Any), anyIfMerge(AnyIf), + arrayMap(x -> round(x, 6), quantilesMerge(0.5, 0.9)(Quantiles)), + groupArrayMerge(GroupArray) +FROM stored_aggregates +GROUP BY d, k1 +ORDER BY d, k1; + +SELECT d, + sumMerge(Sum), avgMerge(Avg), uniqMerge(Uniq), + anyMerge(Any), anyIfMerge(AnyIf), + arrayMap(x -> round(x, 6), quantilesMerge(0.5, 0.9)(Quantiles)), + groupArrayMerge(GroupArray) +FROM stored_aggregates +GROUP BY d +ORDER BY d; + +DROP TABLE stored_aggregates; diff --git a/parser/testdata/00048_a_stored_aggregates_merge/ast.json b/parser/testdata/00048_a_stored_aggregates_merge/ast.json new file mode 100644 index 000000000..969b5920c --- /dev/null +++ b/parser/testdata/00048_a_stored_aggregates_merge/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery stored_aggregates (children 1)" + }, + { + "explain": " Identifier stored_aggregates" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001177114, + "rows_read": 2, + "bytes_read": 86 + } +} diff --git a/parser/testdata/00048_a_stored_aggregates_merge/metadata.json b/parser/testdata/00048_a_stored_aggregates_merge/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00048_a_stored_aggregates_merge/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00048_a_stored_aggregates_merge/query.sql b/parser/testdata/00048_a_stored_aggregates_merge/query.sql new file mode 100644 index 000000000..0213ebf46 --- /dev/null +++ b/parser/testdata/00048_a_stored_aggregates_merge/query.sql @@ -0,0 +1,46 @@ +DROP TABLE IF EXISTS stored_aggregates; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE stored_aggregates +( + d Date, + Uniq AggregateFunction(uniq, UInt64) +) +ENGINE = AggregatingMergeTree(d, d, 8192); + +INSERT INTO stored_aggregates +SELECT + toDate(toUInt16(toDate('2014-06-01')) + intDiv(number, 100)) AS d, + uniqState(intDiv(number, 10)) AS Uniq +FROM +( + SELECT * FROM system.numbers LIMIT 1000 +) +GROUP BY d; + +SELECT uniqMerge(Uniq) FROM stored_aggregates; + +SELECT d, uniqMerge(Uniq) FROM stored_aggregates GROUP BY d ORDER BY d; + +INSERT INTO stored_aggregates +SELECT + toDate(toUInt16(toDate('2014-06-01')) + intDiv(number, 100)) AS d, + uniqState(intDiv(number + 50, 10)) AS Uniq +FROM +( + SELECT * FROM system.numbers LIMIT 500, 1000 +) +GROUP BY d; + +SELECT uniqMerge(Uniq) FROM stored_aggregates; + +SELECT d, uniqMerge(Uniq) FROM stored_aggregates GROUP BY d ORDER BY d; + +OPTIMIZE TABLE stored_aggregates; + +SELECT uniqMerge(Uniq) FROM stored_aggregates; + +SELECT d, uniqMerge(Uniq) FROM stored_aggregates GROUP BY d ORDER BY d; + +DROP TABLE stored_aggregates; + diff --git a/parser/testdata/00048_b_stored_aggregates_merge/ast.json b/parser/testdata/00048_b_stored_aggregates_merge/ast.json new file mode 100644 index 000000000..8b53981e9 --- /dev/null +++ b/parser/testdata/00048_b_stored_aggregates_merge/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery stored_aggregates (children 1)" + }, + { + "explain": " Identifier stored_aggregates" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001096626, + "rows_read": 2, + "bytes_read": 86 + } +} diff --git a/parser/testdata/00048_b_stored_aggregates_merge/metadata.json b/parser/testdata/00048_b_stored_aggregates_merge/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00048_b_stored_aggregates_merge/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00048_b_stored_aggregates_merge/query.sql b/parser/testdata/00048_b_stored_aggregates_merge/query.sql new file mode 100644 index 000000000..708794eab --- /dev/null +++ b/parser/testdata/00048_b_stored_aggregates_merge/query.sql @@ -0,0 +1,46 @@ +DROP TABLE IF EXISTS stored_aggregates; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE stored_aggregates +( + d Date, + Uniq AggregateFunction(uniq, UInt64) +) +ENGINE = AggregatingMergeTree(d, d, 8192); + +INSERT INTO stored_aggregates +SELECT + toDate(toUInt16(toDate('2014-06-01')) + intDiv(number, 100)) AS d, + uniqState(intDiv(number, 10)) AS Uniq +FROM +( + SELECT * FROM system.numbers LIMIT 1000 +) +GROUP BY d; + +SELECT uniqMerge(Uniq) FROM stored_aggregates; + +SELECT d, uniqMerge(Uniq) FROM stored_aggregates GROUP BY d ORDER BY d; + +INSERT INTO stored_aggregates +SELECT + toDate(toUInt16(toDate('2014-06-01')) + intDiv(number, 100)) AS d, + uniqState(intDiv(number + 50, 10)) AS Uniq +FROM +( + SELECT * FROM system.numbers LIMIT 500, 1000 +) +GROUP BY d; + +SELECT uniqMerge(Uniq) FROM stored_aggregates; + +SELECT d, uniqMerge(Uniq) FROM stored_aggregates GROUP BY d ORDER BY d; + +OPTIMIZE TABLE stored_aggregates; + +SELECT uniqMerge(Uniq) FROM stored_aggregates; + +SELECT d, uniqMerge(Uniq) FROM stored_aggregates GROUP BY d ORDER BY d; + +DROP TABLE stored_aggregates; + diff --git a/parser/testdata/00048_min_max/ast.json b/parser/testdata/00048_min_max/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00048_min_max/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00048_min_max/metadata.json b/parser/testdata/00048_min_max/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00048_min_max/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00048_min_max/query.sql b/parser/testdata/00048_min_max/query.sql new file mode 100644 index 000000000..ca816077b --- /dev/null +++ b/parser/testdata/00048_min_max/query.sql @@ -0,0 +1,2 @@ +-- Tags: stateful +SELECT min(EventDate), max(EventDate) FROM test.hits diff --git a/parser/testdata/00049_any_left_join/ast.json b/parser/testdata/00049_any_left_join/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00049_any_left_join/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00049_any_left_join/metadata.json b/parser/testdata/00049_any_left_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00049_any_left_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00049_any_left_join/query.sql b/parser/testdata/00049_any_left_join/query.sql new file mode 100644 index 000000000..de726a77b --- /dev/null +++ b/parser/testdata/00049_any_left_join/query.sql @@ -0,0 +1,3 @@ +SELECT * FROM ( +SELECT number, joined FROM system.numbers ANY LEFT JOIN (SELECT number * 2 AS number, number * 10 + 1 AS joined FROM system.numbers LIMIT 10) js2 USING number LIMIT 10 +) ORDER BY ALL; diff --git a/parser/testdata/00049_max_string_if/ast.json b/parser/testdata/00049_max_string_if/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00049_max_string_if/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00049_max_string_if/metadata.json b/parser/testdata/00049_max_string_if/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00049_max_string_if/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00049_max_string_if/query.sql b/parser/testdata/00049_max_string_if/query.sql new file mode 100644 index 000000000..79f4b036c --- /dev/null +++ b/parser/testdata/00049_max_string_if/query.sql @@ -0,0 +1,3 @@ +-- Tags: stateful +SELECT CounterID, count(), maxIf(SearchPhrase, notEmpty(SearchPhrase)) FROM test.hits GROUP BY CounterID ORDER BY count() DESC LIMIT 20; +SELECT CounterID, count(), maxIf(SearchPhrase, notEmpty(SearchPhrase)) FROM test.hits GROUP BY CounterID ORDER BY count() DESC LIMIT 20 SETTINGS optimize_aggregation_in_order = 1 diff --git a/parser/testdata/00050_any_left_join/ast.json b/parser/testdata/00050_any_left_join/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00050_any_left_join/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00050_any_left_join/metadata.json b/parser/testdata/00050_any_left_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00050_any_left_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00050_any_left_join/query.sql b/parser/testdata/00050_any_left_join/query.sql new file mode 100644 index 000000000..5019de9fc --- /dev/null +++ b/parser/testdata/00050_any_left_join/query.sql @@ -0,0 +1,10 @@ +SELECT a.*, b.* FROM +( + SELECT number AS k FROM system.numbers LIMIT 10 +) AS a +ANY LEFT JOIN +( + SELECT number * 2 AS k, number AS joined FROM system.numbers LIMIT 10 +) AS b +USING k +ORDER BY k; diff --git a/parser/testdata/00050_min_max/ast.json b/parser/testdata/00050_min_max/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00050_min_max/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00050_min_max/metadata.json b/parser/testdata/00050_min_max/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00050_min_max/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00050_min_max/query.sql b/parser/testdata/00050_min_max/query.sql new file mode 100644 index 000000000..2484b2773 --- /dev/null +++ b/parser/testdata/00050_min_max/query.sql @@ -0,0 +1,3 @@ +-- Tags: stateful +SELECT CounterID, min(WatchID), max(WatchID) FROM test.hits GROUP BY CounterID ORDER BY count() DESC LIMIT 20; +SELECT CounterID, min(WatchID), max(WatchID) FROM test.hits GROUP BY CounterID ORDER BY count() DESC LIMIT 20 SETTINGS optimize_aggregation_in_order = 1 diff --git a/parser/testdata/00051_any_inner_join/ast.json b/parser/testdata/00051_any_inner_join/ast.json new file mode 100644 index 000000000..1d35045f2 --- /dev/null +++ b/parser/testdata/00051_any_inner_join/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001376026, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00051_any_inner_join/metadata.json b/parser/testdata/00051_any_inner_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00051_any_inner_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00051_any_inner_join/query.sql b/parser/testdata/00051_any_inner_join/query.sql new file mode 100644 index 000000000..6de35ef46 --- /dev/null +++ b/parser/testdata/00051_any_inner_join/query.sql @@ -0,0 +1,12 @@ +SET any_join_distinct_right_table_keys = 1; + +SELECT a.*, b.* FROM +( + SELECT number AS k FROM system.numbers LIMIT 10 +) AS a +ANY INNER JOIN +( + SELECT number * 2 AS k, number AS joined FROM system.numbers LIMIT 10 +) AS b +USING k +ORDER BY ALL; diff --git a/parser/testdata/00051_min_max_array/ast.json b/parser/testdata/00051_min_max_array/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00051_min_max_array/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00051_min_max_array/metadata.json b/parser/testdata/00051_min_max_array/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00051_min_max_array/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00051_min_max_array/query.sql b/parser/testdata/00051_min_max_array/query.sql new file mode 100644 index 000000000..a7771f35a --- /dev/null +++ b/parser/testdata/00051_min_max_array/query.sql @@ -0,0 +1,3 @@ +-- Tags: stateful +SELECT CounterID, count(), max(GoalsReached), min(GoalsReached), minIf(GoalsReached, notEmpty(GoalsReached)) FROM test.hits GROUP BY CounterID ORDER BY count() DESC LIMIT 20; +SELECT CounterID, count(), max(GoalsReached), min(GoalsReached), minIf(GoalsReached, notEmpty(GoalsReached)) FROM test.hits GROUP BY CounterID ORDER BY count() DESC LIMIT 20 SETTINGS optimize_aggregation_in_order = 1 diff --git a/parser/testdata/00052_all_left_join/ast.json b/parser/testdata/00052_all_left_join/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00052_all_left_join/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00052_all_left_join/metadata.json b/parser/testdata/00052_all_left_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00052_all_left_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00052_all_left_join/query.sql b/parser/testdata/00052_all_left_join/query.sql new file mode 100644 index 000000000..0315b7934 --- /dev/null +++ b/parser/testdata/00052_all_left_join/query.sql @@ -0,0 +1,10 @@ +SELECT * FROM +( + SELECT number AS k FROM system.numbers LIMIT 10 +) js1 +ALL LEFT JOIN +( + SELECT intDiv(number, 2) AS k, number AS joined FROM system.numbers LIMIT 10 +) js2 +USING k +ORDER BY ALL; diff --git a/parser/testdata/00052_group_by_in/ast.json b/parser/testdata/00052_group_by_in/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00052_group_by_in/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00052_group_by_in/metadata.json b/parser/testdata/00052_group_by_in/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00052_group_by_in/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00052_group_by_in/query.sql b/parser/testdata/00052_group_by_in/query.sql new file mode 100644 index 000000000..8590769b7 --- /dev/null +++ b/parser/testdata/00052_group_by_in/query.sql @@ -0,0 +1,7 @@ +-- Tags: stateful, no-parallel-replicas +-- https://github.com/ClickHouse/ClickHouse/issues/74716 + +select StartDate, TraficSourceID in (0) ? 'type_in' : 'other' as traf_type, sum(Sign) +from test.visits +where CounterID = 842440 +group by StartDate, traf_type ORDER BY StartDate, traf_type diff --git a/parser/testdata/00053_all_inner_join/ast.json b/parser/testdata/00053_all_inner_join/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00053_all_inner_join/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00053_all_inner_join/metadata.json b/parser/testdata/00053_all_inner_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00053_all_inner_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00053_all_inner_join/query.sql b/parser/testdata/00053_all_inner_join/query.sql new file mode 100644 index 000000000..f4f84069d --- /dev/null +++ b/parser/testdata/00053_all_inner_join/query.sql @@ -0,0 +1,10 @@ +SELECT a.*, b.* FROM +( + SELECT number AS k FROM system.numbers LIMIT 10 +) AS a +ALL INNER JOIN +( + SELECT intDiv(number, 2) AS k, number AS joined FROM system.numbers LIMIT 10 +) AS b +USING k +ORDER BY k, joined; diff --git a/parser/testdata/00053_replicate_segfault/ast.json b/parser/testdata/00053_replicate_segfault/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00053_replicate_segfault/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00053_replicate_segfault/metadata.json b/parser/testdata/00053_replicate_segfault/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00053_replicate_segfault/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00053_replicate_segfault/query.sql b/parser/testdata/00053_replicate_segfault/query.sql new file mode 100644 index 000000000..bb703c98e --- /dev/null +++ b/parser/testdata/00053_replicate_segfault/query.sql @@ -0,0 +1,3 @@ +-- Tags: stateful, replica + +SELECT count() > 0 FROM (SELECT ParsedParams.Key1 AS p FROM test.visits WHERE arrayAll(y -> arrayExists(x -> y != x, p), p)) diff --git a/parser/testdata/00054_join_string/ast.json b/parser/testdata/00054_join_string/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00054_join_string/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00054_join_string/metadata.json b/parser/testdata/00054_join_string/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00054_join_string/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00054_join_string/query.sql b/parser/testdata/00054_join_string/query.sql new file mode 100644 index 000000000..0e7a5520e --- /dev/null +++ b/parser/testdata/00054_join_string/query.sql @@ -0,0 +1,11 @@ +SELECT * FROM +( + SELECT reinterpretAsString(number + reinterpretAsUInt8('A')) AS k FROM system.numbers LIMIT 10 +) js1 +ALL LEFT JOIN +( + SELECT reinterpretAsString(intDiv(number, 2) + reinterpretAsUInt8('A')) AS k, number AS joined FROM system.numbers LIMIT 10 +) js2 +USING k +ORDER BY k, joined +; diff --git a/parser/testdata/00054_merge_tree_partitions/ast.json b/parser/testdata/00054_merge_tree_partitions/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00054_merge_tree_partitions/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00054_merge_tree_partitions/metadata.json b/parser/testdata/00054_merge_tree_partitions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00054_merge_tree_partitions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00054_merge_tree_partitions/query.sql b/parser/testdata/00054_merge_tree_partitions/query.sql new file mode 100644 index 000000000..f6afa126d --- /dev/null +++ b/parser/testdata/00054_merge_tree_partitions/query.sql @@ -0,0 +1,38 @@ +-- Tags: stateful +DROP TABLE IF EXISTS partitions; +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE partitions (EventDate Date, CounterID UInt32) ENGINE = MergeTree(EventDate, CounterID, 8192); +INSERT INTO partitions SELECT EventDate + UserID % 365 AS EventDate, CounterID FROM test.hits WHERE CounterID = 1704509; + + +SELECT count() FROM partitions; +SELECT count() FROM partitions WHERE EventDate >= toDate('2015-01-01') AND EventDate < toDate('2015-02-01'); +SELECT count() FROM partitions WHERE EventDate < toDate('2015-01-01') OR EventDate >= toDate('2015-02-01'); + +ALTER TABLE partitions DETACH PARTITION 201501; + +SELECT count() FROM partitions; +SELECT count() FROM partitions WHERE EventDate >= toDate('2015-01-01') AND EventDate < toDate('2015-02-01'); +SELECT count() FROM partitions WHERE EventDate < toDate('2015-01-01') OR EventDate >= toDate('2015-02-01'); + +ALTER TABLE partitions ATTACH PARTITION 201501; + +SELECT count() FROM partitions; +SELECT count() FROM partitions WHERE EventDate >= toDate('2015-01-01') AND EventDate < toDate('2015-02-01'); +SELECT count() FROM partitions WHERE EventDate < toDate('2015-01-01') OR EventDate >= toDate('2015-02-01'); + + +ALTER TABLE partitions DETACH PARTITION 201403; + +SELECT count() FROM partitions; + +INSERT INTO partitions SELECT EventDate + UserID % 365 AS EventDate, CounterID FROM test.hits WHERE CounterID = 1704509 AND toStartOfMonth(EventDate) = toDate('2014-03-01'); + +SELECT count() FROM partitions; + +ALTER TABLE partitions ATTACH PARTITION 201403; + +SELECT count() FROM partitions; + + +DROP TABLE partitions; diff --git a/parser/testdata/00055_index_and_not/ast.json b/parser/testdata/00055_index_and_not/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00055_index_and_not/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00055_index_and_not/metadata.json b/parser/testdata/00055_index_and_not/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00055_index_and_not/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00055_index_and_not/query.sql b/parser/testdata/00055_index_and_not/query.sql new file mode 100644 index 000000000..f0046c77b --- /dev/null +++ b/parser/testdata/00055_index_and_not/query.sql @@ -0,0 +1,2 @@ +-- Tags: stateful +SELECT count() FROM test.hits WHERE NOT (EventDate >= toDate('2015-01-01') AND EventDate < toDate('2015-02-01')) diff --git a/parser/testdata/00055_join_two_numbers/ast.json b/parser/testdata/00055_join_two_numbers/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00055_join_two_numbers/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00055_join_two_numbers/metadata.json b/parser/testdata/00055_join_two_numbers/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00055_join_two_numbers/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00055_join_two_numbers/query.sql b/parser/testdata/00055_join_two_numbers/query.sql new file mode 100644 index 000000000..572062d45 --- /dev/null +++ b/parser/testdata/00055_join_two_numbers/query.sql @@ -0,0 +1,10 @@ +SELECT left, right FROM +( + SELECT number % 4 AS k1, number % 3 AS k2, number AS left FROM system.numbers LIMIT 10 +) js1 +ALL LEFT JOIN +( + SELECT number % 2 AS k1, number % 6 AS k2, number AS right FROM system.numbers LIMIT 10 +) js2 +USING k1, k2 +ORDER BY left, right; diff --git a/parser/testdata/00056_join_number_string/ast.json b/parser/testdata/00056_join_number_string/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00056_join_number_string/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00056_join_number_string/metadata.json b/parser/testdata/00056_join_number_string/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00056_join_number_string/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00056_join_number_string/query.sql b/parser/testdata/00056_join_number_string/query.sql new file mode 100644 index 000000000..6b8e54ce5 --- /dev/null +++ b/parser/testdata/00056_join_number_string/query.sql @@ -0,0 +1,10 @@ +SELECT left, right FROM +( + SELECT number % 4 AS k1, toString(number % 3) AS k2, number AS left FROM system.numbers LIMIT 10 +) js1 +ALL LEFT JOIN +( + SELECT number % 2 AS k1, toString(number % 6) AS k2, number AS right FROM system.numbers LIMIT 10 +) js2 +USING k1, k2 +ORDER BY left, right; diff --git a/parser/testdata/00056_view/ast.json b/parser/testdata/00056_view/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00056_view/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00056_view/metadata.json b/parser/testdata/00056_view/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00056_view/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00056_view/query.sql b/parser/testdata/00056_view/query.sql new file mode 100644 index 000000000..ec5ea2529 --- /dev/null +++ b/parser/testdata/00056_view/query.sql @@ -0,0 +1,8 @@ +-- Tags: stateful +DROP TABLE IF EXISTS view; +CREATE VIEW view AS SELECT CounterID, count() AS c FROM test.hits GROUP BY CounterID; +SELECT count() FROM view; +SELECT c, count() FROM view GROUP BY c ORDER BY count() DESC LIMIT 10; +SELECT * FROM view ORDER BY c DESC LIMIT 10; +SELECT * FROM view SAMPLE 0.1 ORDER BY c DESC LIMIT 10; +DROP TABLE view; diff --git a/parser/testdata/00057_join_aliases/ast.json b/parser/testdata/00057_join_aliases/ast.json new file mode 100644 index 000000000..056b05a9a --- /dev/null +++ b/parser/testdata/00057_join_aliases/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.0011279, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00057_join_aliases/metadata.json b/parser/testdata/00057_join_aliases/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00057_join_aliases/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00057_join_aliases/query.sql b/parser/testdata/00057_join_aliases/query.sql new file mode 100644 index 000000000..52ef0d883 --- /dev/null +++ b/parser/testdata/00057_join_aliases/query.sql @@ -0,0 +1,9 @@ +SET query_plan_join_swap_table = 0; + +SELECT * FROM ( + SELECT number, n, j1, j2 + FROM (SELECT number, number / 2 AS n FROM system.numbers) js1 + ANY LEFT JOIN (SELECT number / 3 AS n, number AS j1, 'Hello' AS j2 FROM system.numbers LIMIT 10) js2 + USING n LIMIT 10 +) ORDER BY n +SETTINGS join_algorithm = 'hash'; -- the query does not finish with merge join diff --git a/parser/testdata/00059_merge_sorting_empty_array_joined/ast.json b/parser/testdata/00059_merge_sorting_empty_array_joined/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00059_merge_sorting_empty_array_joined/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00059_merge_sorting_empty_array_joined/metadata.json b/parser/testdata/00059_merge_sorting_empty_array_joined/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00059_merge_sorting_empty_array_joined/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00059_merge_sorting_empty_array_joined/query.sql b/parser/testdata/00059_merge_sorting_empty_array_joined/query.sql new file mode 100644 index 000000000..9d3326633 --- /dev/null +++ b/parser/testdata/00059_merge_sorting_empty_array_joined/query.sql @@ -0,0 +1,2 @@ +-- Tags: stateful +SELECT CounterID FROM test.visits ARRAY JOIN Goals.ID WHERE CounterID = 942285 ORDER BY CounterID diff --git a/parser/testdata/00059_shard_global_in/ast.json b/parser/testdata/00059_shard_global_in/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00059_shard_global_in/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00059_shard_global_in/metadata.json b/parser/testdata/00059_shard_global_in/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00059_shard_global_in/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00059_shard_global_in/query.sql b/parser/testdata/00059_shard_global_in/query.sql new file mode 100644 index 000000000..e9ab21490 --- /dev/null +++ b/parser/testdata/00059_shard_global_in/query.sql @@ -0,0 +1,3 @@ +-- Tags: shard + +SELECT number FROM remote('127.0.0.{2,3}', system, numbers) WHERE number GLOBAL IN (SELECT number FROM remote('127.0.0.{2,3}', system, numbers) WHERE number % 2 = 1 LIMIT 10) LIMIT 10; diff --git a/parser/testdata/00059_shard_global_in_mergetree/ast.json b/parser/testdata/00059_shard_global_in_mergetree/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00059_shard_global_in_mergetree/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00059_shard_global_in_mergetree/metadata.json b/parser/testdata/00059_shard_global_in_mergetree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00059_shard_global_in_mergetree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00059_shard_global_in_mergetree/query.sql b/parser/testdata/00059_shard_global_in_mergetree/query.sql new file mode 100644 index 000000000..62eec6f32 --- /dev/null +++ b/parser/testdata/00059_shard_global_in_mergetree/query.sql @@ -0,0 +1,25 @@ +-- Tags: shard + +-- test for #56790 + +DROP TABLE IF EXISTS test_local; + +CREATE TABLE test_local (x Int64) ENGINE = MergeTree order by x as select * from numbers(10); + +select count() from remote('127.0.0.1,127.0.0.2', currentDatabase(), test_local); + +select count() from remote('127.0.0.1,127.0.0.2', currentDatabase(), test_local) where 'XXX' global in (select 'XXX'); + +select count() from remote('127.0.0.1,127.0.0.2', currentDatabase(), test_local) where * global in (select * from numbers(10)); + +select count() from remote('127.0.0.1,127.0.0.2', currentDatabase(), test_local) where * in (select * from numbers(10)); + +set prefer_localhost_replica=0; + +select count() from remote('127.0.0.1,127.0.0.2', currentDatabase(), test_local) where 'XXX' global in (select 'XXX'); + +select count() from remote('127.0.0.1,127.0.0.2', currentDatabase(), test_local) where * global in (select * from numbers(10)); + +select count() from remote('127.0.0.1,127.0.0.2', currentDatabase(), test_local) where * in (select * from numbers(10)); + +DROP TABLE test_local; diff --git a/parser/testdata/00060_date_lut/ast.json b/parser/testdata/00060_date_lut/ast.json new file mode 100644 index 000000000..87fd3bc82 --- /dev/null +++ b/parser/testdata/00060_date_lut/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDateTime (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '1970-01-01 14:25:36'" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001146519, + "rows_read": 9, + "bytes_read": 364 + } +} diff --git a/parser/testdata/00060_date_lut/metadata.json b/parser/testdata/00060_date_lut/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00060_date_lut/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00060_date_lut/query.sql b/parser/testdata/00060_date_lut/query.sql new file mode 100644 index 000000000..1209be85f --- /dev/null +++ b/parser/testdata/00060_date_lut/query.sql @@ -0,0 +1 @@ +SELECT toString(toDateTime('1970-01-01 14:25:36')) diff --git a/parser/testdata/00060_move_to_prewhere_and_sets/ast.json b/parser/testdata/00060_move_to_prewhere_and_sets/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00060_move_to_prewhere_and_sets/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00060_move_to_prewhere_and_sets/metadata.json b/parser/testdata/00060_move_to_prewhere_and_sets/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00060_move_to_prewhere_and_sets/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00060_move_to_prewhere_and_sets/query.sql b/parser/testdata/00060_move_to_prewhere_and_sets/query.sql new file mode 100644 index 000000000..a8c228abf --- /dev/null +++ b/parser/testdata/00060_move_to_prewhere_and_sets/query.sql @@ -0,0 +1,3 @@ +-- Tags: stateful +SET optimize_move_to_prewhere = 1; +SELECT uniq(URL) FROM test.hits WHERE TraficSourceID IN (7); diff --git a/parser/testdata/00061_merge_tree_alter/ast.json b/parser/testdata/00061_merge_tree_alter/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00061_merge_tree_alter/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00061_merge_tree_alter/metadata.json b/parser/testdata/00061_merge_tree_alter/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00061_merge_tree_alter/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00061_merge_tree_alter/query.sql b/parser/testdata/00061_merge_tree_alter/query.sql new file mode 100644 index 000000000..f2a36d6e5 --- /dev/null +++ b/parser/testdata/00061_merge_tree_alter/query.sql @@ -0,0 +1,73 @@ + +DROP TABLE IF EXISTS alter_00061; +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE alter_00061 (d Date, k UInt64, i32 Int32) ENGINE=MergeTree(d, k, 8192); + +INSERT INTO alter_00061 VALUES ('2015-01-01', 10, 42); + +DESC TABLE alter_00061; +SHOW CREATE TABLE alter_00061; +SELECT * FROM alter_00061 ORDER BY k; + +ALTER TABLE alter_00061 ADD COLUMN n Nested(ui8 UInt8, s String); +INSERT INTO alter_00061 VALUES ('2015-01-01', 8, 40, [1,2,3], ['12','13','14']); + +DESC TABLE alter_00061; +SHOW CREATE TABLE alter_00061; +SELECT * FROM alter_00061 ORDER BY k; + +ALTER TABLE alter_00061 ADD COLUMN `n.d` Array(Date); +INSERT INTO alter_00061 VALUES ('2015-01-01', 7, 39, [10,20,30], ['120','130','140'],['2000-01-01','2000-01-01','2000-01-03']); + +DESC TABLE alter_00061; +SHOW CREATE TABLE alter_00061; +SELECT * FROM alter_00061 ORDER BY k; + +ALTER TABLE alter_00061 ADD COLUMN s String DEFAULT '0'; +INSERT INTO alter_00061 VALUES ('2015-01-01', 6,38,[10,20,30],['asd','qwe','qwe'],['2000-01-01','2000-01-01','2000-01-03'],'100500'); + +DESC TABLE alter_00061; +SHOW CREATE TABLE alter_00061; +SELECT * FROM alter_00061 ORDER BY k; + +ALTER TABLE alter_00061 DROP COLUMN `n.d`, MODIFY COLUMN s Int64; + +DESC TABLE alter_00061; +SHOW CREATE TABLE alter_00061; +SELECT * FROM alter_00061 ORDER BY k; + +ALTER TABLE alter_00061 ADD COLUMN `n.d` Array(Date), MODIFY COLUMN s UInt32; + +DESC TABLE alter_00061; +SHOW CREATE TABLE alter_00061; +SELECT * FROM alter_00061 ORDER BY k; + +OPTIMIZE TABLE alter_00061; + +SELECT * FROM alter_00061 ORDER BY k; + +ALTER TABLE alter_00061 DROP COLUMN n.ui8, DROP COLUMN n.d; + +DESC TABLE alter_00061; +SHOW CREATE TABLE alter_00061; +SELECT * FROM alter_00061 ORDER BY k; + +ALTER TABLE alter_00061 DROP COLUMN n.s; + +DESC TABLE alter_00061; +SHOW CREATE TABLE alter_00061; +SELECT * FROM alter_00061 ORDER BY k; + +ALTER TABLE alter_00061 ADD COLUMN n.s Array(String), ADD COLUMN n.d Array(Date); + +DESC TABLE alter_00061; +SHOW CREATE TABLE alter_00061; +SELECT * FROM alter_00061 ORDER BY k; + +ALTER TABLE alter_00061 DROP COLUMN n; + +DESC TABLE alter_00061; +SHOW CREATE TABLE alter_00061; +SELECT * FROM alter_00061 ORDER BY k; + +DROP TABLE alter_00061; diff --git a/parser/testdata/00061_storage_buffer/ast.json b/parser/testdata/00061_storage_buffer/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00061_storage_buffer/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00061_storage_buffer/metadata.json b/parser/testdata/00061_storage_buffer/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00061_storage_buffer/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00061_storage_buffer/query.sql b/parser/testdata/00061_storage_buffer/query.sql new file mode 100644 index 000000000..d03caa0a2 --- /dev/null +++ b/parser/testdata/00061_storage_buffer/query.sql @@ -0,0 +1,23 @@ +-- Tags: stateful +DROP TABLE IF EXISTS hits_dst; +DROP TABLE IF EXISTS hits_buffer; + +CREATE TABLE hits_dst AS test.hits +ENGINE = MergeTree +PARTITION BY toYYYYMM(EventDate) +ORDER BY (CounterID, EventDate, intHash32(UserID)) +SAMPLE BY intHash32(UserID) +SETTINGS storage_policy = 'default'; + +CREATE TABLE hits_buffer AS hits_dst ENGINE = Buffer(current_database(), hits_dst, 8, 600, 600, 1000000, 1000000, 100000000, 1000000000); + +INSERT INTO hits_buffer SELECT * FROM test.hits WHERE CounterID = 800784; +SELECT count() FROM hits_buffer; +SELECT count() FROM hits_dst; + +OPTIMIZE TABLE hits_buffer; +SELECT count() FROM hits_buffer; +SELECT count() FROM hits_dst; + +DROP TABLE hits_dst; +DROP TABLE hits_buffer; diff --git a/parser/testdata/00062_loyalty/ast.json b/parser/testdata/00062_loyalty/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00062_loyalty/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00062_loyalty/metadata.json b/parser/testdata/00062_loyalty/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00062_loyalty/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00062_loyalty/query.sql b/parser/testdata/00062_loyalty/query.sql new file mode 100644 index 000000000..aec8e253f --- /dev/null +++ b/parser/testdata/00062_loyalty/query.sql @@ -0,0 +1,2 @@ +-- Tags: stateful +SELECT loyalty, count() AS c, bar(log(c + 1) * 1000, 0, log(6000) * 1000, 80) FROM (SELECT UserID, toInt8((yandex > google ? yandex / (yandex + google) : -google / (yandex + google)) * 10) AS loyalty FROM (SELECT UserID, sum(SearchEngineID = 2) AS yandex, sum(SearchEngineID = 3) AS google FROM test.hits WHERE SearchEngineID = 2 OR SearchEngineID = 3 GROUP BY UserID HAVING yandex + google > 10)) GROUP BY loyalty ORDER BY loyalty diff --git a/parser/testdata/00062_replicated_merge_tree_alter_zookeeper_long/ast.json b/parser/testdata/00062_replicated_merge_tree_alter_zookeeper_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00062_replicated_merge_tree_alter_zookeeper_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00062_replicated_merge_tree_alter_zookeeper_long/metadata.json b/parser/testdata/00062_replicated_merge_tree_alter_zookeeper_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00062_replicated_merge_tree_alter_zookeeper_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00062_replicated_merge_tree_alter_zookeeper_long/query.sql b/parser/testdata/00062_replicated_merge_tree_alter_zookeeper_long/query.sql new file mode 100644 index 000000000..5be766ee8 --- /dev/null +++ b/parser/testdata/00062_replicated_merge_tree_alter_zookeeper_long/query.sql @@ -0,0 +1,115 @@ +-- Tags: long, replica, no-replicated-database, no-shared-merge-tree +-- Tag no-replicated-database: Old syntax is not allowed +-- no-shared-merge-tree: implemented another test + +DROP TABLE IF EXISTS replicated_alter1; +DROP TABLE IF EXISTS replicated_alter2; + +SET replication_alter_partitions_sync = 2; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE replicated_alter1 (d Date, k UInt64, i32 Int32) ENGINE=ReplicatedMergeTree('/clickhouse/tables/{database}/test_00062/alter', 'r1', d, k, 8192); +CREATE TABLE replicated_alter2 (d Date, k UInt64, i32 Int32) ENGINE=ReplicatedMergeTree('/clickhouse/tables/{database}/test_00062/alter', 'r2', d, k, 8192); + +INSERT INTO replicated_alter1 VALUES ('2015-01-01', 10, 42); + +DESC TABLE replicated_alter1; +SHOW CREATE TABLE replicated_alter1; +DESC TABLE replicated_alter2; +SHOW CREATE TABLE replicated_alter2; +SELECT * FROM replicated_alter1 ORDER BY k; + +ALTER TABLE replicated_alter1 ADD COLUMN dt DateTime('UTC'); +INSERT INTO replicated_alter1 VALUES ('2015-01-01', 9, 41, '1992-01-01 08:00:00'); + +DESC TABLE replicated_alter1; +SHOW CREATE TABLE replicated_alter1; +DESC TABLE replicated_alter2; +SHOW CREATE TABLE replicated_alter2; +SELECT * FROM replicated_alter1 ORDER BY k; + +ALTER TABLE replicated_alter1 ADD COLUMN n Nested(ui8 UInt8, s String); +INSERT INTO replicated_alter1 VALUES ('2015-01-01', 8, 40, '2012-12-12 12:12:12', [1,2,3], ['12','13','14']); + +DESC TABLE replicated_alter1; +SHOW CREATE TABLE replicated_alter1; +DESC TABLE replicated_alter2; +SHOW CREATE TABLE replicated_alter2; +SELECT * FROM replicated_alter1 ORDER BY k; + +ALTER TABLE replicated_alter1 ADD COLUMN `n.d` Array(Date); +INSERT INTO replicated_alter1 VALUES ('2015-01-01', 7, 39, '2014-07-14 13:26:50', [10,20,30], ['120','130','140'],['2000-01-01','2000-01-01','2000-01-03']); + +DESC TABLE replicated_alter1; +SHOW CREATE TABLE replicated_alter1; +DESC TABLE replicated_alter2; +SHOW CREATE TABLE replicated_alter2; +SELECT * FROM replicated_alter1 ORDER BY k; + +ALTER TABLE replicated_alter1 ADD COLUMN s String DEFAULT '0'; +INSERT INTO replicated_alter1 VALUES ('2015-01-01', 6,38,'2014-07-15 13:26:50',[10,20,30],['asd','qwe','qwe'],['2000-01-01','2000-01-01','2000-01-03'],'100500'); + +DESC TABLE replicated_alter1; +SHOW CREATE TABLE replicated_alter1; +DESC TABLE replicated_alter2; +SHOW CREATE TABLE replicated_alter2; +SELECT * FROM replicated_alter1 ORDER BY k; + +ALTER TABLE replicated_alter1 DROP COLUMN `n.d`, MODIFY COLUMN s Int64; + +DESC TABLE replicated_alter1; +SHOW CREATE TABLE replicated_alter1; +DESC TABLE replicated_alter2; +SHOW CREATE TABLE replicated_alter2; +SELECT * FROM replicated_alter1 ORDER BY k; + +ALTER TABLE replicated_alter1 ADD COLUMN `n.d` Array(Date), MODIFY COLUMN s UInt32; + +DESC TABLE replicated_alter1; +SHOW CREATE TABLE replicated_alter1; +DESC TABLE replicated_alter2; +SHOW CREATE TABLE replicated_alter2; +SELECT * FROM replicated_alter1 ORDER BY k; + +ALTER TABLE replicated_alter1 DROP COLUMN n.ui8, DROP COLUMN n.d; + +DESC TABLE replicated_alter1; +SHOW CREATE TABLE replicated_alter1; +DESC TABLE replicated_alter2; +SHOW CREATE TABLE replicated_alter2; +SELECT * FROM replicated_alter1 ORDER BY k; + +ALTER TABLE replicated_alter1 DROP COLUMN n.s; + +DESC TABLE replicated_alter1; +SHOW CREATE TABLE replicated_alter1; +DESC TABLE replicated_alter2; +SHOW CREATE TABLE replicated_alter2; +SELECT * FROM replicated_alter1 ORDER BY k; + +ALTER TABLE replicated_alter1 ADD COLUMN n.s Array(String), ADD COLUMN n.d Array(Date); + +DESC TABLE replicated_alter1; +SHOW CREATE TABLE replicated_alter1; +DESC TABLE replicated_alter2; +SHOW CREATE TABLE replicated_alter2; +SELECT * FROM replicated_alter1 ORDER BY k; + +ALTER TABLE replicated_alter1 DROP COLUMN n; + +DESC TABLE replicated_alter1; +SHOW CREATE TABLE replicated_alter1; +DESC TABLE replicated_alter2; +SHOW CREATE TABLE replicated_alter2; +SELECT * FROM replicated_alter1 ORDER BY k; + +ALTER TABLE replicated_alter1 MODIFY COLUMN dt Date, MODIFY COLUMN s DateTime('UTC') DEFAULT '1970-01-01 00:00:00'; + +DESC TABLE replicated_alter1; +SHOW CREATE TABLE replicated_alter1; +DESC TABLE replicated_alter2; +SHOW CREATE TABLE replicated_alter2; +SELECT * FROM replicated_alter1 ORDER BY k; + +DROP TABLE replicated_alter1; +DROP TABLE replicated_alter2; diff --git a/parser/testdata/00063_check_query/ast.json b/parser/testdata/00063_check_query/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00063_check_query/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00063_check_query/metadata.json b/parser/testdata/00063_check_query/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00063_check_query/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00063_check_query/query.sql b/parser/testdata/00063_check_query/query.sql new file mode 100644 index 000000000..60af498b1 --- /dev/null +++ b/parser/testdata/00063_check_query/query.sql @@ -0,0 +1,29 @@ +-- Tags: log-engine + +SET check_query_single_value_result = 1; + +DROP TABLE IF EXISTS check_query_tiny_log; + +CREATE TABLE check_query_tiny_log (N UInt32, S String) Engine = TinyLog; + +INSERT INTO check_query_tiny_log VALUES (1, 'A'), (2, 'B'), (3, 'C'); + +CHECK TABLE check_query_tiny_log; + +CHECK TABLE check_query_tiny_log PARTITION tuple(); -- { serverError NOT_IMPLEMENTED } +CHECK TABLE check_query_tiny_log PART 'all_0_0_0'; -- { serverError NOT_IMPLEMENTED } + +-- Settings and FORMAT are supported +CHECK TABLE check_query_tiny_log SETTINGS max_threads = 16; +CHECK TABLE check_query_tiny_log FORMAT Null SETTINGS max_threads = 8, check_query_single_value_result = 0; + +DROP TABLE IF EXISTS check_query_log; + +CREATE TABLE check_query_log (N UInt32,S String) Engine = Log; + +INSERT INTO check_query_log VALUES (1, 'A'), (2, 'B'), (3, 'C'); + +CHECK TABLE check_query_log; + +DROP TABLE check_query_log; +DROP TABLE check_query_tiny_log; diff --git a/parser/testdata/00063_loyalty_joins/ast.json b/parser/testdata/00063_loyalty_joins/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00063_loyalty_joins/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00063_loyalty_joins/metadata.json b/parser/testdata/00063_loyalty_joins/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00063_loyalty_joins/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00063_loyalty_joins/query.sql b/parser/testdata/00063_loyalty_joins/query.sql new file mode 100644 index 000000000..42aae8a8c --- /dev/null +++ b/parser/testdata/00063_loyalty_joins/query.sql @@ -0,0 +1,98 @@ +-- Tags: stateful +SET any_join_distinct_right_table_keys = 1; +SET joined_subquery_requires_alias = 0; + +SELECT + loyalty, + count() +FROM test.hits ANY LEFT JOIN +( + SELECT + UserID, + sum(SearchEngineID = 2) AS yandex, + sum(SearchEngineID = 3) AS google, + toInt8(if(yandex > google, yandex / (yandex + google), -google / (yandex + google)) * 10) AS loyalty + FROM test.hits + WHERE (SearchEngineID = 2) OR (SearchEngineID = 3) + GROUP BY UserID + HAVING (yandex + google) > 10 +) USING UserID +GROUP BY loyalty +ORDER BY loyalty ASC; + + +SELECT + loyalty, + count() +FROM +( + SELECT UserID + FROM test.hits +) ANY LEFT JOIN +( + SELECT + UserID, + sum(SearchEngineID = 2) AS yandex, + sum(SearchEngineID = 3) AS google, + toInt8(if(yandex > google, yandex / (yandex + google), -google / (yandex + google)) * 10) AS loyalty + FROM test.hits + WHERE (SearchEngineID = 2) OR (SearchEngineID = 3) + GROUP BY UserID + HAVING (yandex + google) > 10 +) USING UserID +GROUP BY loyalty +ORDER BY loyalty ASC; + + +SELECT + loyalty, + count() +FROM +( + SELECT + loyalty, + UserID + FROM + ( + SELECT UserID + FROM test.hits + ) ANY LEFT JOIN + ( + SELECT + UserID, + sum(SearchEngineID = 2) AS yandex, + sum(SearchEngineID = 3) AS google, + toInt8(if(yandex > google, yandex / (yandex + google), -google / (yandex + google)) * 10) AS loyalty + FROM test.hits + WHERE (SearchEngineID = 2) OR (SearchEngineID = 3) + GROUP BY UserID + HAVING (yandex + google) > 10 + ) USING UserID +) +GROUP BY loyalty +ORDER BY loyalty ASC; + + +SELECT + loyalty, + count() AS c, + bar(log(c + 1) * 1000, 0, log(3000000) * 1000, 80) +FROM test.hits ANY INNER JOIN +( + SELECT + UserID, + toInt8(if(yandex > google, yandex / (yandex + google), -google / (yandex + google)) * 10) AS loyalty + FROM + ( + SELECT + UserID, + sum(SearchEngineID = 2) AS yandex, + sum(SearchEngineID = 3) AS google + FROM test.hits + WHERE (SearchEngineID = 2) OR (SearchEngineID = 3) + GROUP BY UserID + HAVING (yandex + google) > 10 + ) +) USING UserID +GROUP BY loyalty +ORDER BY loyalty ASC; diff --git a/parser/testdata/00064_negate_bug/ast.json b/parser/testdata/00064_negate_bug/ast.json new file mode 100644 index 000000000..323c42660 --- /dev/null +++ b/parser/testdata/00064_negate_bug/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function negate (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toUInt32 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function toTypeName (alias t) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001328882, + "rows_read": 12, + "bytes_read": 477 + } +} diff --git a/parser/testdata/00064_negate_bug/metadata.json b/parser/testdata/00064_negate_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00064_negate_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00064_negate_bug/query.sql b/parser/testdata/00064_negate_bug/query.sql new file mode 100644 index 000000000..ba0767701 --- /dev/null +++ b/parser/testdata/00064_negate_bug/query.sql @@ -0,0 +1 @@ +SELECT -toUInt32(1) AS x, toTypeName(x) AS t diff --git a/parser/testdata/00065_loyalty_with_storage_join/ast.json b/parser/testdata/00065_loyalty_with_storage_join/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00065_loyalty_with_storage_join/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00065_loyalty_with_storage_join/metadata.json b/parser/testdata/00065_loyalty_with_storage_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00065_loyalty_with_storage_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00065_loyalty_with_storage_join/query.sql b/parser/testdata/00065_loyalty_with_storage_join/query.sql new file mode 100644 index 000000000..6614de2df --- /dev/null +++ b/parser/testdata/00065_loyalty_with_storage_join/query.sql @@ -0,0 +1,34 @@ +-- Tags: stateful + +DROP TABLE IF EXISTS join; +CREATE TABLE join (UserID UInt64, loyalty Int8) ENGINE = Join(SEMI, LEFT, UserID); + +INSERT INTO join +SELECT + UserID, + toInt8(if((sum(SearchEngineID = 2) AS yandex) > (sum(SearchEngineID = 3) AS google), + yandex / (yandex + google), + -google / (yandex + google)) * 10) AS loyalty +FROM test.hits +WHERE (SearchEngineID = 2) OR (SearchEngineID = 3) +GROUP BY UserID +HAVING (yandex + google) > 10; + +SELECT + loyalty, + count() +FROM test.hits SEMI LEFT JOIN join USING UserID +GROUP BY loyalty +ORDER BY loyalty ASC; + +DETACH TABLE join; +ATTACH TABLE join; + +SELECT + loyalty, + count() +FROM test.hits SEMI LEFT JOIN join USING UserID +GROUP BY loyalty +ORDER BY loyalty ASC; + +DROP TABLE join; diff --git a/parser/testdata/00065_shard_float_literals_formatting/ast.json b/parser/testdata/00065_shard_float_literals_formatting/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00065_shard_float_literals_formatting/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00065_shard_float_literals_formatting/metadata.json b/parser/testdata/00065_shard_float_literals_formatting/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00065_shard_float_literals_formatting/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00065_shard_float_literals_formatting/query.sql b/parser/testdata/00065_shard_float_literals_formatting/query.sql new file mode 100644 index 000000000..9ee5c4d89 --- /dev/null +++ b/parser/testdata/00065_shard_float_literals_formatting/query.sql @@ -0,0 +1,3 @@ +-- Tags: shard + +SELECT toTypeName(1.0) FROM remote('127.0.0.{2,3}', system, one) diff --git a/parser/testdata/00066_group_by_in/ast.json b/parser/testdata/00066_group_by_in/ast.json new file mode 100644 index 000000000..8784d1bba --- /dev/null +++ b/parser/testdata/00066_group_by_in/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function in (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier dummy" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001130123, + "rows_read": 10, + "bytes_read": 357 + } +} diff --git a/parser/testdata/00066_group_by_in/metadata.json b/parser/testdata/00066_group_by_in/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00066_group_by_in/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00066_group_by_in/query.sql b/parser/testdata/00066_group_by_in/query.sql new file mode 100644 index 000000000..457a5297b --- /dev/null +++ b/parser/testdata/00066_group_by_in/query.sql @@ -0,0 +1,2 @@ +SELECT (dummy IN (1)) AS x GROUP BY x; +SELECT (1 IN (0,2)) AS x GROUP BY x; diff --git a/parser/testdata/00066_sorting_distributed_many_replicas/ast.json b/parser/testdata/00066_sorting_distributed_many_replicas/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00066_sorting_distributed_many_replicas/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00066_sorting_distributed_many_replicas/metadata.json b/parser/testdata/00066_sorting_distributed_many_replicas/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00066_sorting_distributed_many_replicas/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00066_sorting_distributed_many_replicas/query.sql b/parser/testdata/00066_sorting_distributed_many_replicas/query.sql new file mode 100644 index 000000000..a4de53a10 --- /dev/null +++ b/parser/testdata/00066_sorting_distributed_many_replicas/query.sql @@ -0,0 +1,5 @@ +-- Tags: stateful, replica, distributed, no-random-settings + + +SET max_parallel_replicas = 2; +SELECT EventTime::DateTime('Asia/Dubai') FROM remote('127.0.0.{1|2}', test, hits) ORDER BY EventTime DESC LIMIT 10 diff --git a/parser/testdata/00067_replicate_segfault/ast.json b/parser/testdata/00067_replicate_segfault/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00067_replicate_segfault/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00067_replicate_segfault/metadata.json b/parser/testdata/00067_replicate_segfault/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00067_replicate_segfault/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00067_replicate_segfault/query.sql b/parser/testdata/00067_replicate_segfault/query.sql new file mode 100644 index 000000000..bf4c90d5a --- /dev/null +++ b/parser/testdata/00067_replicate_segfault/query.sql @@ -0,0 +1,4 @@ +-- Tags: replica + +SELECT arrayFilter(x -> materialize(0), materialize([0])) AS p, arrayAll(y -> arrayExists(x -> y != x, p), p) AS test; +SELECT arrayFilter(x -> materialize(0), materialize([''])) AS p, arrayAll(y -> arrayExists(x -> y != x, p), p) AS test; diff --git a/parser/testdata/00067_union_all/ast.json b/parser/testdata/00067_union_all/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00067_union_all/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00067_union_all/metadata.json b/parser/testdata/00067_union_all/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00067_union_all/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00067_union_all/query.sql b/parser/testdata/00067_union_all/query.sql new file mode 100644 index 000000000..7848ce4f9 --- /dev/null +++ b/parser/testdata/00067_union_all/query.sql @@ -0,0 +1,15 @@ +-- Tags: stateful +SELECT * FROM +( + SELECT UserID AS id, 1 AS event + FROM remote('127.0.0.{1,2}', test, hits) + ORDER BY id DESC + LIMIT 10 +UNION ALL + SELECT FUniqID AS id, 2 AS event + FROM remote('127.0.0.{1,2}', test, hits) + ORDER BY id DESC + LIMIT 10 +) +ORDER BY id, event +SETTINGS max_rows_to_read = 40_000_000; diff --git a/parser/testdata/00068_empty_tiny_log/ast.json b/parser/testdata/00068_empty_tiny_log/ast.json new file mode 100644 index 000000000..f6b352d34 --- /dev/null +++ b/parser/testdata/00068_empty_tiny_log/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery empty_tiny_log (children 3)" + }, + { + "explain": " Identifier empty_tiny_log" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration A (children 1)" + }, + { + "explain": " DataType UInt8" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function TinyLog" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.00117892, + "rows_read": 8, + "bytes_read": 293 + } +} diff --git a/parser/testdata/00068_empty_tiny_log/metadata.json b/parser/testdata/00068_empty_tiny_log/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00068_empty_tiny_log/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00068_empty_tiny_log/query.sql b/parser/testdata/00068_empty_tiny_log/query.sql new file mode 100644 index 000000000..22b0f8bf1 --- /dev/null +++ b/parser/testdata/00068_empty_tiny_log/query.sql @@ -0,0 +1,5 @@ +CREATE TABLE IF NOT EXISTS empty_tiny_log(A UInt8) Engine = TinyLog; + +SELECT A FROM empty_tiny_log; + +DROP TABLE empty_tiny_log; diff --git a/parser/testdata/00068_subquery_in_prewhere/ast.json b/parser/testdata/00068_subquery_in_prewhere/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00068_subquery_in_prewhere/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00068_subquery_in_prewhere/metadata.json b/parser/testdata/00068_subquery_in_prewhere/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00068_subquery_in_prewhere/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00068_subquery_in_prewhere/query.sql b/parser/testdata/00068_subquery_in_prewhere/query.sql new file mode 100644 index 000000000..a7c07db2e --- /dev/null +++ b/parser/testdata/00068_subquery_in_prewhere/query.sql @@ -0,0 +1,2 @@ +-- Tags: stateful +SELECT count() FROM test.hits PREWHERE UserID IN (SELECT UserID FROM test.hits WHERE CounterID = 800784); diff --git a/parser/testdata/00069_date_arithmetic/ast.json b/parser/testdata/00069_date_arithmetic/ast.json new file mode 100644 index 000000000..e0af30e7c --- /dev/null +++ b/parser/testdata/00069_date_arithmetic/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function minus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function now (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function now (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Literal 'Int32'" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001505101, + "rows_read": 15, + "bytes_read": 585 + } +} diff --git a/parser/testdata/00069_date_arithmetic/metadata.json b/parser/testdata/00069_date_arithmetic/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00069_date_arithmetic/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00069_date_arithmetic/query.sql b/parser/testdata/00069_date_arithmetic/query.sql new file mode 100644 index 000000000..1268da5ff --- /dev/null +++ b/parser/testdata/00069_date_arithmetic/query.sql @@ -0,0 +1,20 @@ +SELECT toTypeName(now() - now()) = 'Int32'; +SELECT toTypeName(now() + 1) LIKE 'DateTime%'; +SELECT toTypeName(1 + now()) LIKE 'DateTime%'; +SELECT toTypeName(now() - 1) LIKE 'DateTime%'; +SELECT toDateTime(1) + 1 = toDateTime(2); +SELECT 1 + toDateTime(1) = toDateTime(2); +SELECT toDateTime(1) - 1 = toDateTime(0); + +SELECT toTypeName(today()) = 'Date'; +SELECT today() = toDate(now()); + +SELECT toTypeName(yesterday()) = 'Date'; + +SELECT toTypeName(today() - today()) = 'Int32'; +SELECT toTypeName(today() + 1) = 'Date'; +SELECT toTypeName(1 + today()) = 'Date'; +SELECT toTypeName(today() - 1) = 'Date'; +SELECT yesterday() + 1 = today(); +SELECT 1 + yesterday() = today(); +SELECT today() - 1 = yesterday(); diff --git a/parser/testdata/00069_duplicate_aggregation_keys/ast.json b/parser/testdata/00069_duplicate_aggregation_keys/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00069_duplicate_aggregation_keys/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00069_duplicate_aggregation_keys/metadata.json b/parser/testdata/00069_duplicate_aggregation_keys/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00069_duplicate_aggregation_keys/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00069_duplicate_aggregation_keys/query.sql b/parser/testdata/00069_duplicate_aggregation_keys/query.sql new file mode 100644 index 000000000..cddca5e5f --- /dev/null +++ b/parser/testdata/00069_duplicate_aggregation_keys/query.sql @@ -0,0 +1,2 @@ +-- Tags: stateful +SELECT URL, EventDate, max(URL) FROM test.hits WHERE CounterID = 1704509 AND UserID = 4322253409885123546 GROUP BY URL, EventDate, EventDate ORDER BY URL, EventDate; diff --git a/parser/testdata/00071_insert_fewer_columns/ast.json b/parser/testdata/00071_insert_fewer_columns/ast.json new file mode 100644 index 000000000..d9ae26a77 --- /dev/null +++ b/parser/testdata/00071_insert_fewer_columns/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery insert_fewer_columns (children 1)" + }, + { + "explain": " Identifier insert_fewer_columns" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001299904, + "rows_read": 2, + "bytes_read": 92 + } +} diff --git a/parser/testdata/00071_insert_fewer_columns/metadata.json b/parser/testdata/00071_insert_fewer_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00071_insert_fewer_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00071_insert_fewer_columns/query.sql b/parser/testdata/00071_insert_fewer_columns/query.sql new file mode 100644 index 000000000..bca63395d --- /dev/null +++ b/parser/testdata/00071_insert_fewer_columns/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS insert_fewer_columns; +CREATE TABLE insert_fewer_columns (a UInt8, b UInt8) ENGINE = Memory; +INSERT INTO insert_fewer_columns (a) VALUES (1), (2); +SELECT * FROM insert_fewer_columns; + +-- Test position arguments in insert. +DROP TABLE IF EXISTS insert_fewer_columns_2; +CREATE TABLE insert_fewer_columns_2 (b UInt8, a UInt8) ENGINE = Memory; +INSERT INTO insert_fewer_columns_2 SELECT * FROM insert_fewer_columns; +SELECT a, b FROM insert_fewer_columns; +SELECT a, b FROM insert_fewer_columns_2; + +DROP TABLE IF EXISTS insert_fewer_columns_2; +DROP TABLE insert_fewer_columns; diff --git a/parser/testdata/00071_merge_tree_optimize_aio/ast.json b/parser/testdata/00071_merge_tree_optimize_aio/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00071_merge_tree_optimize_aio/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00071_merge_tree_optimize_aio/metadata.json b/parser/testdata/00071_merge_tree_optimize_aio/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00071_merge_tree_optimize_aio/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00071_merge_tree_optimize_aio/query.sql b/parser/testdata/00071_merge_tree_optimize_aio/query.sql new file mode 100644 index 000000000..36b943b48 --- /dev/null +++ b/parser/testdata/00071_merge_tree_optimize_aio/query.sql @@ -0,0 +1,19 @@ +-- Tags: stateful +DROP TABLE IF EXISTS hits_snippet; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE hits_snippet(EventTime DateTime('Asia/Dubai'), EventDate Date, CounterID UInt32, UserID UInt64, URL String, Referer String) ENGINE = MergeTree(EventDate, intHash32(UserID), (CounterID, EventDate, intHash32(UserID), EventTime), 8192); + +SET min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0; +SET max_block_size = 4096; + +INSERT INTO hits_snippet(EventTime, EventDate, CounterID, UserID, URL, Referer) SELECT EventTime, EventDate, CounterID, UserID, URL, Referer FROM test.hits WHERE EventDate = toDate('2014-03-18') ORDER BY EventTime, EventDate, CounterID, UserID, URL, Referer ASC LIMIT 50; +INSERT INTO hits_snippet(EventTime, EventDate, CounterID, UserID, URL, Referer) SELECT EventTime, EventDate, CounterID, UserID, URL, Referer FROM test.hits WHERE EventDate = toDate('2014-03-19') ORDER BY EventTime, EventDate, CounterID, UserID, URL, Referer ASC LIMIT 50; + +SET min_bytes_to_use_direct_io = 8192; + +OPTIMIZE TABLE hits_snippet; + +SELECT EventTime, EventDate, CounterID, UserID, URL, Referer FROM hits_snippet ORDER BY EventTime, EventDate, CounterID, UserID, URL, Referer ASC; + +DROP TABLE hits_snippet; diff --git a/parser/testdata/00072_compare_date_and_string_index/ast.json b/parser/testdata/00072_compare_date_and_string_index/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00072_compare_date_and_string_index/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00072_compare_date_and_string_index/metadata.json b/parser/testdata/00072_compare_date_and_string_index/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00072_compare_date_and_string_index/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00072_compare_date_and_string_index/query.sql b/parser/testdata/00072_compare_date_and_string_index/query.sql new file mode 100644 index 000000000..b96109b95 --- /dev/null +++ b/parser/testdata/00072_compare_date_and_string_index/query.sql @@ -0,0 +1,38 @@ +-- Tags: stateful +SELECT count() FROM test.hits WHERE EventDate = '2014-03-18'; +SELECT count() FROM test.hits WHERE EventDate < '2014-03-18'; +SELECT count() FROM test.hits WHERE EventDate > '2014-03-18'; +SELECT count() FROM test.hits WHERE EventDate <= '2014-03-18'; +SELECT count() FROM test.hits WHERE EventDate >= '2014-03-18'; +SELECT count() FROM test.hits WHERE EventDate IN ('2014-03-18', '2014-03-19'); + +SELECT count() FROM test.hits WHERE EventDate = toDate('2014-03-18'); +SELECT count() FROM test.hits WHERE EventDate < toDate('2014-03-18'); +SELECT count() FROM test.hits WHERE EventDate > toDate('2014-03-18'); +SELECT count() FROM test.hits WHERE EventDate <= toDate('2014-03-18'); +SELECT count() FROM test.hits WHERE EventDate >= toDate('2014-03-18'); +SELECT count() FROM test.hits WHERE EventDate IN (toDate('2014-03-18'), toDate('2014-03-19')); + +SELECT count() FROM test.hits WHERE EventDate = concat('2014-0', '3-18'); + +DROP TABLE IF EXISTS hits_indexed_by_time; +CREATE TABLE hits_indexed_by_time (EventDate Date, EventTime DateTime('Asia/Dubai')) ENGINE = MergeTree ORDER BY (EventDate, EventTime) SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +INSERT INTO hits_indexed_by_time SELECT EventDate, EventTime FROM test.hits SETTINGS max_block_size = 65000; + +SELECT count() FROM hits_indexed_by_time WHERE EventTime = '2014-03-18 01:02:03'; +SELECT count() FROM hits_indexed_by_time WHERE EventTime < '2014-03-18 01:02:03'; +SELECT count() FROM hits_indexed_by_time WHERE EventTime > '2014-03-18 01:02:03'; +SELECT count() FROM hits_indexed_by_time WHERE EventTime <= '2014-03-18 01:02:03'; +SELECT count() FROM hits_indexed_by_time WHERE EventTime >= '2014-03-18 01:02:03'; +SELECT count() FROM hits_indexed_by_time WHERE EventTime IN ('2014-03-18 01:02:03', '2014-03-19 04:05:06'); + +SELECT count() FROM hits_indexed_by_time WHERE EventTime = toDateTime('2014-03-18 01:02:03', 'Asia/Dubai'); +SELECT count() FROM hits_indexed_by_time WHERE EventTime < toDateTime('2014-03-18 01:02:03', 'Asia/Dubai'); +SELECT count() FROM hits_indexed_by_time WHERE EventTime > toDateTime('2014-03-18 01:02:03', 'Asia/Dubai'); +SELECT count() FROM hits_indexed_by_time WHERE EventTime <= toDateTime('2014-03-18 01:02:03', 'Asia/Dubai'); +SELECT count() FROM hits_indexed_by_time WHERE EventTime >= toDateTime('2014-03-18 01:02:03', 'Asia/Dubai'); +SELECT count() FROM hits_indexed_by_time WHERE EventTime IN (toDateTime('2014-03-18 01:02:03', 'Asia/Dubai'), toDateTime('2014-03-19 04:05:06', 'Asia/Dubai')); + +SELECT count() FROM hits_indexed_by_time WHERE EventTime = concat('2014-03-18 ', '01:02:03'); + +DROP TABLE hits_indexed_by_time; diff --git a/parser/testdata/00072_in_types/ast.json b/parser/testdata/00072_in_types/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00072_in_types/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00072_in_types/metadata.json b/parser/testdata/00072_in_types/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00072_in_types/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00072_in_types/query.sql b/parser/testdata/00072_in_types/query.sql new file mode 100644 index 000000000..1705911ee --- /dev/null +++ b/parser/testdata/00072_in_types/query.sql @@ -0,0 +1,15 @@ +SELECT + -1 IN (-1), + -1 IN (1, -1, 2), + 1.0 IN (1), + 1.1 IN (1, -1), + 1.0 IN (3, 1., -1), + 1 IN (3, 2, 1), + toInt16(-1) IN (255), + materialize(-1) IN (-1), + materialize(-1) IN (1, -1, 2), + materialize(1.0) IN (1), + materialize(1.1) IN (1, -1), + materialize(1.0) IN (3, 1., -1), + materialize(1) IN (3, 2, 1), + materialize(toInt16(-1)) IN (255); diff --git a/parser/testdata/00073_merge_sorting_empty_array_joined/ast.json b/parser/testdata/00073_merge_sorting_empty_array_joined/ast.json new file mode 100644 index 000000000..4d92a2ecb --- /dev/null +++ b/parser/testdata/00073_merge_sorting_empty_array_joined/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001187544, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00073_merge_sorting_empty_array_joined/metadata.json b/parser/testdata/00073_merge_sorting_empty_array_joined/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00073_merge_sorting_empty_array_joined/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00073_merge_sorting_empty_array_joined/query.sql b/parser/testdata/00073_merge_sorting_empty_array_joined/query.sql new file mode 100644 index 000000000..50fa8b82e --- /dev/null +++ b/parser/testdata/00073_merge_sorting_empty_array_joined/query.sql @@ -0,0 +1,2 @@ +SET max_block_size = 1; +SELECT number, arr FROM (SELECT number, arrayFilter(x -> x = 0, [1]) AS arr FROM system.numbers LIMIT 10) ARRAY JOIN arr ORDER BY number; diff --git a/parser/testdata/00073_uniq_array/ast.json b/parser/testdata/00073_uniq_array/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00073_uniq_array/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00073_uniq_array/metadata.json b/parser/testdata/00073_uniq_array/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00073_uniq_array/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00073_uniq_array/query.sql b/parser/testdata/00073_uniq_array/query.sql new file mode 100644 index 000000000..29eefc34b --- /dev/null +++ b/parser/testdata/00073_uniq_array/query.sql @@ -0,0 +1,2 @@ +-- Tags: stateful +SELECT EventDate, uniqExact(UserID), length(groupUniqArray(UserID)), arrayUniq(groupArray(UserID)) FROM test.hits WHERE CounterID = 1704509 GROUP BY EventDate ORDER BY EventDate; diff --git a/parser/testdata/00074_full_join/ast.json b/parser/testdata/00074_full_join/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00074_full_join/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00074_full_join/metadata.json b/parser/testdata/00074_full_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00074_full_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00074_full_join/query.sql b/parser/testdata/00074_full_join/query.sql new file mode 100644 index 000000000..7a7318d8f --- /dev/null +++ b/parser/testdata/00074_full_join/query.sql @@ -0,0 +1,110 @@ +-- Tags: stateful +set any_join_distinct_right_table_keys = 1; +set joined_subquery_requires_alias = 0; + +SELECT + CounterID, + hits, + visits +FROM +( + SELECT + (CounterID % 100000) AS CounterID, + count() AS hits + FROM test.hits + GROUP BY CounterID +) ANY FULL OUTER JOIN +( + SELECT + (CounterID % 100000) AS CounterID, + sum(Sign) AS visits + FROM test.visits + GROUP BY CounterID + HAVING visits > 0 +) USING CounterID +WHERE hits = 0 OR visits = 0 +ORDER BY + hits + visits * 10 DESC, + CounterID ASC +LIMIT 20; + + +SELECT + CounterID, + hits, + visits +FROM +( + SELECT + (CounterID % 100000) AS CounterID, + count() AS hits + FROM test.hits + GROUP BY CounterID +) ANY LEFT JOIN +( + SELECT + (CounterID % 100000) AS CounterID, + sum(Sign) AS visits + FROM test.visits + GROUP BY CounterID + HAVING visits > 0 +) USING CounterID +WHERE hits = 0 OR visits = 0 +ORDER BY + hits + visits * 10 DESC, + CounterID ASC +LIMIT 20; + + +SELECT + CounterID, + hits, + visits +FROM +( + SELECT + (CounterID % 100000) AS CounterID, + count() AS hits + FROM test.hits + GROUP BY CounterID +) ANY RIGHT JOIN +( + SELECT + (CounterID % 100000) AS CounterID, + sum(Sign) AS visits + FROM test.visits + GROUP BY CounterID + HAVING visits > 0 +) USING CounterID +WHERE hits = 0 OR visits = 0 +ORDER BY + hits + visits * 10 DESC, + CounterID ASC +LIMIT 20; + + +SELECT + CounterID, + hits, + visits +FROM +( + SELECT + (CounterID % 100000) AS CounterID, + count() AS hits + FROM test.hits + GROUP BY CounterID +) ANY INNER JOIN +( + SELECT + (CounterID % 100000) AS CounterID, + sum(Sign) AS visits + FROM test.visits + GROUP BY CounterID + HAVING visits > 0 +) USING CounterID +WHERE hits = 0 OR visits = 0 +ORDER BY + hits + visits * 10 DESC, + CounterID ASC +LIMIT 20; diff --git a/parser/testdata/00075_left_array_join/ast.json b/parser/testdata/00075_left_array_join/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00075_left_array_join/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00075_left_array_join/metadata.json b/parser/testdata/00075_left_array_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00075_left_array_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00075_left_array_join/query.sql b/parser/testdata/00075_left_array_join/query.sql new file mode 100644 index 000000000..e40b34b99 --- /dev/null +++ b/parser/testdata/00075_left_array_join/query.sql @@ -0,0 +1,3 @@ +-- Tags: stateful +SELECT UserID, EventTime::DateTime('Asia/Dubai'), pp.Key1, pp.Key2, ParsedParams.Key1 FROM test.hits ARRAY JOIN ParsedParams AS pp WHERE CounterID = 1704509 ORDER BY UserID, EventTime, pp.Key1, pp.Key2 LIMIT 100; +SELECT UserID, EventTime::DateTime('Asia/Dubai'), pp.Key1, pp.Key2, ParsedParams.Key1 FROM test.hits LEFT ARRAY JOIN ParsedParams AS pp WHERE CounterID = 1704509 ORDER BY UserID, EventTime, pp.Key1, pp.Key2 LIMIT 100; diff --git a/parser/testdata/00075_shard_formatting_negate_of_negative_literal/ast.json b/parser/testdata/00075_shard_formatting_negate_of_negative_literal/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00075_shard_formatting_negate_of_negative_literal/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00075_shard_formatting_negate_of_negative_literal/metadata.json b/parser/testdata/00075_shard_formatting_negate_of_negative_literal/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00075_shard_formatting_negate_of_negative_literal/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00075_shard_formatting_negate_of_negative_literal/query.sql b/parser/testdata/00075_shard_formatting_negate_of_negative_literal/query.sql new file mode 100644 index 000000000..5305c0d05 --- /dev/null +++ b/parser/testdata/00075_shard_formatting_negate_of_negative_literal/query.sql @@ -0,0 +1,3 @@ +-- Tags: shard + +SELECT -(-1) FROM remote('127.0.0.{2,3}', system, one) diff --git a/parser/testdata/00076_ip_coding_functions/ast.json b/parser/testdata/00076_ip_coding_functions/ast.json new file mode 100644 index 000000000..d5377796f --- /dev/null +++ b/parser/testdata/00076_ip_coding_functions/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001215817, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00076_ip_coding_functions/metadata.json b/parser/testdata/00076_ip_coding_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00076_ip_coding_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00076_ip_coding_functions/query.sql b/parser/testdata/00076_ip_coding_functions/query.sql new file mode 100644 index 000000000..f693b336e --- /dev/null +++ b/parser/testdata/00076_ip_coding_functions/query.sql @@ -0,0 +1,130 @@ +SET cast_ipv4_ipv6_default_on_conversion_error = 1; + +select IPv4StringToNum('') == 0; +select IPv4StringToNum(materialize('')) == 0; +select IPv4StringToNum('not an ip string') == 0; +select IPv4StringToNum(materialize('not an ip string')) == 0; +select IPv4StringToNum('127.0.0.1' as p) == (0x7f000001 as n), IPv4NumToString(n) == p; +select IPv4StringToNum(materialize('127.0.0.1') as p) == (materialize(0x7f000001) as n), IPv4NumToString(n) == p; +select IPv4NumToString(toUInt32(0)) == '0.0.0.0'; +select IPv4NumToString(materialize(toUInt32(0))) == materialize('0.0.0.0'); +select IPv4NumToString(toUInt32(0x7f000001)) == '127.0.0.1'; +select IPv4NumToString(materialize(toUInt32(0x7f000001))) == materialize('127.0.0.1'); + +select IPv6NumToString(toFixedString('', 16)) == '::'; +select IPv6NumToString(toFixedString(materialize(''), 16)) == materialize('::'); +select IPv6NumToString(IPv6StringToNum('::ffff:127.0.0.1' as p) as n) == p; +select IPv6NumToString(IPv6StringToNum(materialize('::ffff:127.0.0.1') as p) as n) == p; +select IPv6NumToString(toFixedString(unhex('20010DB800000003000001FF0000002E'), 16)) == '2001:db8:0:3:0:1ff:0:2e'; +select IPv6NumToString(toFixedString(unhex(materialize('20010DB800000003000001FF0000002E')), 16)) == materialize('2001:db8:0:3:0:1ff:0:2e'); +select IPv6StringToNum('') == toFixedString(materialize(''), 16); +select IPv6StringToNum(materialize('')) == toFixedString(materialize(''), 16); +select IPv6StringToNum('not an ip string') == toFixedString(materialize(''), 16); +select IPv6StringToNum(materialize('not an ip string')) == toFixedString(materialize(''), 16); + +/* IPv4ToIPv6 */ + +SELECT hex(IPv4ToIPv6(1297626935)); + +/* Тест с таблицей */ + +DROP TABLE IF EXISTS addresses; +CREATE TABLE addresses(addr UInt32) ENGINE = Memory; +INSERT INTO addresses(addr) VALUES (1297626935), (2130706433), (3254522122); +SELECT hex(IPv4ToIPv6(addr)) FROM addresses ORDER BY addr ASC; + +/* cutIPv6 */ + +/* Реальный IPv6-адрес */ + +SELECT cutIPv6(IPv6StringToNum('2001:0DB8:AC10:FE01:FEED:BABE:CAFE:F00D'), 0, 0); + +SELECT cutIPv6(IPv6StringToNum('2001:0DB8:AC10:FE01:FEED:BABE:CAFE:F00D'), 1, 0); +SELECT cutIPv6(IPv6StringToNum('2001:0DB8:AC10:FE01:FEED:BABE:CAFE:F00D'), 2, 0); +SELECT cutIPv6(IPv6StringToNum('2001:0DB8:AC10:FE01:FEED:BABE:CAFE:F00D'), 3, 0); +SELECT cutIPv6(IPv6StringToNum('2001:0DB8:AC10:FE01:FEED:BABE:CAFE:F00D'), 4, 0); +SELECT cutIPv6(IPv6StringToNum('2001:0DB8:AC10:FE01:FEED:BABE:CAFE:F00D'), 5, 0); +SELECT cutIPv6(IPv6StringToNum('2001:0DB8:AC10:FE01:FEED:BABE:CAFE:F00D'), 6, 0); +SELECT cutIPv6(IPv6StringToNum('2001:0DB8:AC10:FE01:FEED:BABE:CAFE:F00D'), 7, 0); +SELECT cutIPv6(IPv6StringToNum('2001:0DB8:AC10:FE01:FEED:BABE:CAFE:F00D'), 8, 0); +SELECT cutIPv6(IPv6StringToNum('2001:0DB8:AC10:FE01:FEED:BABE:CAFE:F00D'), 9, 0); +SELECT cutIPv6(IPv6StringToNum('2001:0DB8:AC10:FE01:FEED:BABE:CAFE:F00D'), 10, 0); +SELECT cutIPv6(IPv6StringToNum('2001:0DB8:AC10:FE01:FEED:BABE:CAFE:F00D'), 11, 0); +SELECT cutIPv6(IPv6StringToNum('2001:0DB8:AC10:FE01:FEED:BABE:CAFE:F00D'), 12, 0); +SELECT cutIPv6(IPv6StringToNum('2001:0DB8:AC10:FE01:FEED:BABE:CAFE:F00D'), 13, 0); +SELECT cutIPv6(IPv6StringToNum('2001:0DB8:AC10:FE01:FEED:BABE:CAFE:F00D'), 14, 0); +SELECT cutIPv6(IPv6StringToNum('2001:0DB8:AC10:FE01:FEED:BABE:CAFE:F00D'), 15, 0); +SELECT cutIPv6(IPv6StringToNum('2001:0DB8:AC10:FE01:FEED:BABE:CAFE:F00D'), 16, 0); + +SELECT cutIPv6(IPv6StringToNum('2001:0DB8:AC10:FE01:FEED:BABE:CAFE:F00D'), 0, 1); +SELECT cutIPv6(IPv6StringToNum('2001:0DB8:AC10:FE01:FEED:BABE:CAFE:F00D'), 0, 2); +SELECT cutIPv6(IPv6StringToNum('2001:0DB8:AC10:FE01:FEED:BABE:CAFE:F00D'), 0, 3); +SELECT cutIPv6(IPv6StringToNum('2001:0DB8:AC10:FE01:FEED:BABE:CAFE:F00D'), 0, 4); +SELECT cutIPv6(IPv6StringToNum('2001:0DB8:AC10:FE01:FEED:BABE:CAFE:F00D'), 0, 5); +SELECT cutIPv6(IPv6StringToNum('2001:0DB8:AC10:FE01:FEED:BABE:CAFE:F00D'), 0, 6); +SELECT cutIPv6(IPv6StringToNum('2001:0DB8:AC10:FE01:FEED:BABE:CAFE:F00D'), 0, 7); +SELECT cutIPv6(IPv6StringToNum('2001:0DB8:AC10:FE01:FEED:BABE:CAFE:F00D'), 0, 8); +SELECT cutIPv6(IPv6StringToNum('2001:0DB8:AC10:FE01:FEED:BABE:CAFE:F00D'), 0, 9); +SELECT cutIPv6(IPv6StringToNum('2001:0DB8:AC10:FE01:FEED:BABE:CAFE:F00D'), 0, 10); +SELECT cutIPv6(IPv6StringToNum('2001:0DB8:AC10:FE01:FEED:BABE:CAFE:F00D'), 0, 11); +SELECT cutIPv6(IPv6StringToNum('2001:0DB8:AC10:FE01:FEED:BABE:CAFE:F00D'), 0, 12); +SELECT cutIPv6(IPv6StringToNum('2001:0DB8:AC10:FE01:FEED:BABE:CAFE:F00D'), 0, 13); +SELECT cutIPv6(IPv6StringToNum('2001:0DB8:AC10:FE01:FEED:BABE:CAFE:F00D'), 0, 14); +SELECT cutIPv6(IPv6StringToNum('2001:0DB8:AC10:FE01:FEED:BABE:CAFE:F00D'), 0, 15); +SELECT cutIPv6(IPv6StringToNum('2001:0DB8:AC10:FE01:FEED:BABE:CAFE:F00D'), 0, 16); + +/* IPv4-mapped IPv6-адрес */ + +SELECT cutIPv6(toFixedString(unhex('00000000000000000000FFFFC1FC110A'), 16), 0, 0); + +SELECT cutIPv6(toFixedString(unhex('00000000000000000000FFFFC1FC110A'), 16), 1, 0); +SELECT cutIPv6(toFixedString(unhex('00000000000000000000FFFFC1FC110A'), 16), 2, 0); +SELECT cutIPv6(toFixedString(unhex('00000000000000000000FFFFC1FC110A'), 16), 3, 0); +SELECT cutIPv6(toFixedString(unhex('00000000000000000000FFFFC1FC110A'), 16), 4, 0); +SELECT cutIPv6(toFixedString(unhex('00000000000000000000FFFFC1FC110A'), 16), 5, 0); +SELECT cutIPv6(toFixedString(unhex('00000000000000000000FFFFC1FC110A'), 16), 6, 0); +SELECT cutIPv6(toFixedString(unhex('00000000000000000000FFFFC1FC110A'), 16), 7, 0); +SELECT cutIPv6(toFixedString(unhex('00000000000000000000FFFFC1FC110A'), 16), 8, 0); +SELECT cutIPv6(toFixedString(unhex('00000000000000000000FFFFC1FC110A'), 16), 9, 0); +SELECT cutIPv6(toFixedString(unhex('00000000000000000000FFFFC1FC110A'), 16), 10, 0); +SELECT cutIPv6(toFixedString(unhex('00000000000000000000FFFFC1FC110A'), 16), 11, 0); +SELECT cutIPv6(toFixedString(unhex('00000000000000000000FFFFC1FC110A'), 16), 12, 0); +SELECT cutIPv6(toFixedString(unhex('00000000000000000000FFFFC1FC110A'), 16), 13, 0); +SELECT cutIPv6(toFixedString(unhex('00000000000000000000FFFFC1FC110A'), 16), 14, 0); +SELECT cutIPv6(toFixedString(unhex('00000000000000000000FFFFC1FC110A'), 16), 15, 0); +SELECT cutIPv6(toFixedString(unhex('00000000000000000000FFFFC1FC110A'), 16), 16, 0); + +SELECT cutIPv6(toFixedString(unhex('00000000000000000000FFFFC1FC110A'), 16), 0, 1); +SELECT cutIPv6(toFixedString(unhex('00000000000000000000FFFFC1FC110A'), 16), 0, 2); +SELECT cutIPv6(toFixedString(unhex('00000000000000000000FFFFC1FC110A'), 16), 0, 3); +SELECT cutIPv6(toFixedString(unhex('00000000000000000000FFFFC1FC110A'), 16), 0, 4); +SELECT cutIPv6(toFixedString(unhex('00000000000000000000FFFFC1FC110A'), 16), 0, 5); +SELECT cutIPv6(toFixedString(unhex('00000000000000000000FFFFC1FC110A'), 16), 0, 6); +SELECT cutIPv6(toFixedString(unhex('00000000000000000000FFFFC1FC110A'), 16), 0, 7); +SELECT cutIPv6(toFixedString(unhex('00000000000000000000FFFFC1FC110A'), 16), 0, 8); +SELECT cutIPv6(toFixedString(unhex('00000000000000000000FFFFC1FC110A'), 16), 0, 9); +SELECT cutIPv6(toFixedString(unhex('00000000000000000000FFFFC1FC110A'), 16), 0, 10); +SELECT cutIPv6(toFixedString(unhex('00000000000000000000FFFFC1FC110A'), 16), 0, 11); +SELECT cutIPv6(toFixedString(unhex('00000000000000000000FFFFC1FC110A'), 16), 0, 12); +SELECT cutIPv6(toFixedString(unhex('00000000000000000000FFFFC1FC110A'), 16), 0, 13); +SELECT cutIPv6(toFixedString(unhex('00000000000000000000FFFFC1FC110A'), 16), 0, 14); +SELECT cutIPv6(toFixedString(unhex('00000000000000000000FFFFC1FC110A'), 16), 0, 15); +SELECT cutIPv6(toFixedString(unhex('00000000000000000000FFFFC1FC110A'), 16), 0, 16); + +/* Тест с таблицами */ + +/* Реальные IPv6-адреса */ + +DROP TABLE IF EXISTS addresses; +CREATE TABLE addresses(addr String) ENGINE = Memory; +INSERT INTO addresses(addr) VALUES ('20010DB8AC10FE01FEEDBABECAFEF00D'), ('20010DB8AC10FE01DEADC0DECAFED00D'), ('20010DB8AC10FE01ABADBABEFACEB00C'); +SELECT cutIPv6(toFixedString(unhex(addr), 16), 3, 0) FROM addresses ORDER BY addr ASC; + +/* IPv4-mapped IPv6-адреса */ + +DROP TABLE IF EXISTS addresses; +CREATE TABLE addresses(addr String) ENGINE = Memory; +INSERT INTO addresses(addr) VALUES ('00000000000000000000FFFFC1FC110A'), ('00000000000000000000FFFF4D583737'), ('00000000000000000000FFFF7F000001'); +SELECT cutIPv6(toFixedString(unhex(addr), 16), 0, 3) FROM addresses ORDER BY addr ASC; + +DROP TABLE addresses; diff --git a/parser/testdata/00076_system_columns_bytes/ast.json b/parser/testdata/00076_system_columns_bytes/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00076_system_columns_bytes/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00076_system_columns_bytes/metadata.json b/parser/testdata/00076_system_columns_bytes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00076_system_columns_bytes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00076_system_columns_bytes/query.sql b/parser/testdata/00076_system_columns_bytes/query.sql new file mode 100644 index 000000000..1748973f4 --- /dev/null +++ b/parser/testdata/00076_system_columns_bytes/query.sql @@ -0,0 +1,17 @@ +-- Tags: stateful +-- NOTE: +-- - database = currentDatabase() is not mandatory +-- - Merge tables may cause UNKNOWN_DATABASE/CANNOT_EXTRACT_TABLE_STRUCTURE from StorageMerge::getColumnSizes() since the table/database can be removed +-- - StorageProxy can wrap any table function, so they also have to be excluded if their nested table function is merge +SELECT + sum(data_compressed_bytes) > 0, + sum(data_uncompressed_bytes) > 0, + sum(marks_bytes) > 0 +FROM system.columns +WHERE (database, `table`) IN ( + SELECT + database, + `table` + FROM system.tables + WHERE engine != 'Merge' AND (engine != 'StorageProxy' OR create_table_query NOT ILIKE '%merge%') +) \ No newline at end of file diff --git a/parser/testdata/00077_log_tinylog_stripelog/ast.json b/parser/testdata/00077_log_tinylog_stripelog/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00077_log_tinylog_stripelog/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00077_log_tinylog_stripelog/metadata.json b/parser/testdata/00077_log_tinylog_stripelog/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00077_log_tinylog_stripelog/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00077_log_tinylog_stripelog/query.sql b/parser/testdata/00077_log_tinylog_stripelog/query.sql new file mode 100644 index 000000000..5de6053cf --- /dev/null +++ b/parser/testdata/00077_log_tinylog_stripelog/query.sql @@ -0,0 +1,32 @@ +-- Tags: stateful, no-parallel +-- no-parallel: Heavy usage +SET check_query_single_value_result = 1; + +DROP TABLE IF EXISTS test.hits_log; +DROP TABLE IF EXISTS test.hits_tinylog; +DROP TABLE IF EXISTS test.hits_stripelog; + +CREATE TABLE test.hits_log (CounterID UInt32, AdvEngineID UInt8, RegionID UInt32, SearchPhrase String, UserID UInt64) ENGINE = Log; +CREATE TABLE test.hits_tinylog (CounterID UInt32, AdvEngineID UInt8, RegionID UInt32, SearchPhrase String, UserID UInt64) ENGINE = TinyLog; +CREATE TABLE test.hits_stripelog (CounterID UInt32, AdvEngineID UInt8, RegionID UInt32, SearchPhrase String, UserID UInt64) ENGINE = StripeLog; + +CHECK TABLE test.hits_log; +CHECK TABLE test.hits_tinylog; +CHECK TABLE test.hits_stripelog; + +INSERT INTO test.hits_log SELECT CounterID, AdvEngineID, RegionID, SearchPhrase, UserID FROM test.hits; +INSERT INTO test.hits_tinylog SELECT CounterID, AdvEngineID, RegionID, SearchPhrase, UserID FROM test.hits; +INSERT INTO test.hits_stripelog SELECT CounterID, AdvEngineID, RegionID, SearchPhrase, UserID FROM test.hits; + +SELECT count(), sum(cityHash64(CounterID, AdvEngineID, RegionID, SearchPhrase, UserID)) FROM test.hits; +SELECT count(), sum(cityHash64(*)) FROM test.hits_log; +SELECT count(), sum(cityHash64(*)) FROM test.hits_tinylog; +SELECT count(), sum(cityHash64(*)) FROM test.hits_stripelog; + +CHECK TABLE test.hits_log; +CHECK TABLE test.hits_tinylog; +CHECK TABLE test.hits_stripelog; + +DROP TABLE test.hits_log; +DROP TABLE test.hits_tinylog; +DROP TABLE test.hits_stripelog; diff --git a/parser/testdata/00077_set_keys_fit_128_bits_many_blocks/ast.json b/parser/testdata/00077_set_keys_fit_128_bits_many_blocks/ast.json new file mode 100644 index 000000000..85ab1e9e3 --- /dev/null +++ b/parser/testdata/00077_set_keys_fit_128_bits_many_blocks/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001086493, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00077_set_keys_fit_128_bits_many_blocks/metadata.json b/parser/testdata/00077_set_keys_fit_128_bits_many_blocks/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00077_set_keys_fit_128_bits_many_blocks/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00077_set_keys_fit_128_bits_many_blocks/query.sql b/parser/testdata/00077_set_keys_fit_128_bits_many_blocks/query.sql new file mode 100644 index 000000000..fe6a0cefd --- /dev/null +++ b/parser/testdata/00077_set_keys_fit_128_bits_many_blocks/query.sql @@ -0,0 +1,14 @@ +SET max_block_size = 1000; + +SELECT number FROM +( + SELECT * FROM system.numbers LIMIT 10000 +) +WHERE (number, number * 2) IN +( + SELECT number, number * 2 + FROM system.numbers + WHERE number % 1000 = 1 + LIMIT 2 +) +LIMIT 2; diff --git a/parser/testdata/00078_group_by_arrays/ast.json b/parser/testdata/00078_group_by_arrays/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00078_group_by_arrays/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00078_group_by_arrays/metadata.json b/parser/testdata/00078_group_by_arrays/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00078_group_by_arrays/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00078_group_by_arrays/query.sql b/parser/testdata/00078_group_by_arrays/query.sql new file mode 100644 index 000000000..0268a28ab --- /dev/null +++ b/parser/testdata/00078_group_by_arrays/query.sql @@ -0,0 +1,5 @@ +-- Tags: stateful +SELECT GoalsReached AS k, count() AS c FROM test.hits GROUP BY k ORDER BY c DESC LIMIT 10; +SELECT GeneralInterests AS k1, GoalsReached AS k2, count() AS c FROM test.hits GROUP BY k1, k2 ORDER BY c DESC LIMIT 10; +SELECT ParsedParams.Key1 AS k1, GeneralInterests AS k2, count() AS c FROM test.hits GROUP BY k1, k2 ORDER BY c DESC LIMIT 10; +SELECT ParsedParams.Key1 AS k1, GeneralInterests AS k2, count() AS c FROM test.hits WHERE notEmpty(k1) AND notEmpty(k2) GROUP BY k1, k2 ORDER BY c DESC LIMIT 10; diff --git a/parser/testdata/00078_string_concat/ast.json b/parser/testdata/00078_string_concat/ast.json new file mode 100644 index 000000000..c1a774446 --- /dev/null +++ b/parser/testdata/00078_string_concat/ast.json @@ -0,0 +1,73 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '{ key: fn, value: concat }'" + }, + { + "explain": " Function concat (children 1)" + }, + { + "explain": " ExpressionList (children 5)" + }, + { + "explain": " Literal '{ key: '" + }, + { + "explain": " Function toFixedString (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'fn'" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal ', value: '" + }, + { + "explain": " Literal 'concat'" + }, + { + "explain": " Literal ' }'" + } + ], + + "rows": 17, + + "statistics": + { + "elapsed": 0.001051844, + "rows_read": 17, + "bytes_read": 650 + } +} diff --git a/parser/testdata/00078_string_concat/metadata.json b/parser/testdata/00078_string_concat/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00078_string_concat/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00078_string_concat/query.sql b/parser/testdata/00078_string_concat/query.sql new file mode 100644 index 000000000..c825c852a --- /dev/null +++ b/parser/testdata/00078_string_concat/query.sql @@ -0,0 +1,185 @@ +select '{ key: fn, value: concat }' == concat('{ key: ', toFixedString('fn', 2), ', value: ', 'concat', ' }'); + +select concat('a', 'b') == 'ab'; +select concat('a', materialize('b')) == 'ab'; +select concat(materialize('a'), 'b') == 'ab'; +select concat(materialize('a'), materialize('b')) == 'ab'; + +select concat('a', toFixedString('b', 1)) == 'ab'; +select concat('a', materialize(toFixedString('b', 1))) == 'ab'; +select concat(materialize('a'), toFixedString('b', 1)) == 'ab'; +select concat(materialize('a'), materialize(toFixedString('b', 1))) == 'ab'; + +select concat(toFixedString('a', 1), 'b') == 'ab'; +select concat(toFixedString('a', 1), materialize('b')) == 'ab'; +select concat(materialize(toFixedString('a', 1)), 'b') == 'ab'; +select concat(materialize(toFixedString('a', 1)), materialize('b')) == 'ab'; + +select concat(toFixedString('a', 1), toFixedString('b', 1)) == 'ab'; +select concat(toFixedString('a', 1), materialize(toFixedString('b', 1))) == 'ab'; +select concat(materialize(toFixedString('a', 1)), toFixedString('b', 1)) == 'ab'; +select concat(materialize(toFixedString('a', 1)), materialize(toFixedString('b', 1))) == 'ab'; + +select concat('a', 'b') == 'ab' from system.numbers limit 5; +select concat('a', materialize('b')) == 'ab' from system.numbers limit 5; +select concat(materialize('a'), 'b') == 'ab' from system.numbers limit 5; +select concat(materialize('a'), materialize('b')) == 'ab' from system.numbers limit 5; + +select concat('a', toFixedString('b', 1)) == 'ab' from system.numbers limit 5; +select concat('a', materialize(toFixedString('b', 1))) == 'ab' from system.numbers limit 5; +select concat(materialize('a'), toFixedString('b', 1)) == 'ab' from system.numbers limit 5; +select concat(materialize('a'), materialize(toFixedString('b', 1))) == 'ab' from system.numbers limit 5; + +select concat(toFixedString('a', 1), 'b') == 'ab' from system.numbers limit 5; +select concat(toFixedString('a', 1), materialize('b')) == 'ab' from system.numbers limit 5; +select concat(materialize(toFixedString('a', 1)), 'b') == 'ab' from system.numbers limit 5; +select concat(materialize(toFixedString('a', 1)), materialize('b')) == 'ab' from system.numbers limit 5; + +select concat(toFixedString('a', 1), toFixedString('b', 1)) == 'ab' from system.numbers limit 5; +select concat(toFixedString('a', 1), materialize(toFixedString('b', 1))) == 'ab' from system.numbers limit 5; +select concat(materialize(toFixedString('a', 1)), toFixedString('b', 1)) == 'ab' from system.numbers limit 5; +select concat(materialize(toFixedString('a', 1)), materialize(toFixedString('b', 1))) == 'ab' from system.numbers limit 5; + +select concat('a', 'b', 'c') == 'abc'; +select concat('a', 'b', materialize('c')) == 'abc'; +select concat('a', materialize('b'), 'c') == 'abc'; +select concat('a', materialize('b'), materialize('c')) == 'abc'; +select concat(materialize('a'), 'b', 'c') == 'abc'; +select concat(materialize('a'), 'b', materialize('c')) == 'abc'; +select concat(materialize('a'), materialize('b'), 'c') == 'abc'; +select concat(materialize('a'), materialize('b'), materialize('c')) == 'abc'; + +select concat('a', 'b', toFixedString('c', 1)) == 'abc'; +select concat('a', 'b', materialize(toFixedString('c', 1))) == 'abc'; +select concat('a', materialize('b'), toFixedString('c', 1)) == 'abc'; +select concat('a', materialize('b'), materialize(toFixedString('c', 1))) == 'abc'; +select concat(materialize('a'), 'b', toFixedString('c', 1)) == 'abc'; +select concat(materialize('a'), 'b', materialize(toFixedString('c', 1))) == 'abc'; +select concat(materialize('a'), materialize('b'), toFixedString('c', 1)) == 'abc'; +select concat(materialize('a'), materialize('b'), materialize(toFixedString('c', 1))) == 'abc'; + +select concat('a', toFixedString('b', 1), 'c') == 'abc'; +select concat('a', toFixedString('b', 1), materialize('c')) == 'abc'; +select concat('a', materialize(toFixedString('b', 1)), 'c') == 'abc'; +select concat('a', materialize(toFixedString('b', 1)), materialize('c')) == 'abc'; +select concat(materialize('a'), toFixedString('b', 1), 'c') == 'abc'; +select concat(materialize('a'), toFixedString('b', 1), materialize('c')) == 'abc'; +select concat(materialize('a'), materialize(toFixedString('b', 1)), 'c') == 'abc'; +select concat(materialize('a'), materialize(toFixedString('b', 1)), materialize('c')) == 'abc'; + +select concat('a', toFixedString('b', 1), toFixedString('c', 1)) == 'abc'; +select concat('a', toFixedString('b', 1), materialize(toFixedString('c', 1))) == 'abc'; +select concat('a', materialize(toFixedString('b', 1)), toFixedString('c', 1)) == 'abc'; +select concat('a', materialize(toFixedString('b', 1)), materialize(toFixedString('c', 1))) == 'abc'; +select concat(materialize('a'), toFixedString('b', 1), toFixedString('c', 1)) == 'abc'; +select concat(materialize('a'), toFixedString('b', 1), materialize(toFixedString('c', 1))) == 'abc'; +select concat(materialize('a'), materialize(toFixedString('b', 1)), toFixedString('c', 1)) == 'abc'; +select concat(materialize('a'), materialize(toFixedString('b', 1)), materialize(toFixedString('c', 1))) == 'abc'; + +select concat(toFixedString('a', 1), 'b', 'c') == 'abc'; +select concat(toFixedString('a', 1), 'b', materialize('c')) == 'abc'; +select concat(toFixedString('a', 1), materialize('b'), 'c') == 'abc'; +select concat(toFixedString('a', 1), materialize('b'), materialize('c')) == 'abc'; +select concat(materialize(toFixedString('a', 1)), 'b', 'c') == 'abc'; +select concat(materialize(toFixedString('a', 1)), 'b', materialize('c')) == 'abc'; +select concat(materialize(toFixedString('a', 1)), materialize('b'), 'c') == 'abc'; +select concat(materialize(toFixedString('a', 1)), materialize('b'), materialize('c')) == 'abc'; + +select concat(toFixedString('a', 1), 'b', toFixedString('c', 1)) == 'abc'; +select concat(toFixedString('a', 1), 'b', materialize(toFixedString('c', 1))) == 'abc'; +select concat(toFixedString('a', 1), materialize('b'), toFixedString('c', 1)) == 'abc'; +select concat(toFixedString('a', 1), materialize('b'), materialize(toFixedString('c', 1))) == 'abc'; +select concat(materialize(toFixedString('a', 1)), 'b', toFixedString('c', 1)) == 'abc'; +select concat(materialize(toFixedString('a', 1)), 'b', materialize(toFixedString('c', 1))) == 'abc'; +select concat(materialize(toFixedString('a', 1)), materialize('b'), toFixedString('c', 1)) == 'abc'; +select concat(materialize(toFixedString('a', 1)), materialize('b'), materialize(toFixedString('c', 1))) == 'abc'; + +select concat(toFixedString('a', 1), toFixedString('b', 1), 'c') == 'abc'; +select concat(toFixedString('a', 1), toFixedString('b', 1), materialize('c')) == 'abc'; +select concat(toFixedString('a', 1), materialize(toFixedString('b', 1)), 'c') == 'abc'; +select concat(toFixedString('a', 1), materialize(toFixedString('b', 1)), materialize('c')) == 'abc'; +select concat(materialize(toFixedString('a', 1)), toFixedString('b', 1), 'c') == 'abc'; +select concat(materialize(toFixedString('a', 1)), toFixedString('b', 1), materialize('c')) == 'abc'; +select concat(materialize(toFixedString('a', 1)), materialize(toFixedString('b', 1)), 'c') == 'abc'; +select concat(materialize(toFixedString('a', 1)), materialize(toFixedString('b', 1)), materialize('c')) == 'abc'; + +select concat(toFixedString('a', 1), toFixedString('b', 1), toFixedString('c', 1)) == 'abc'; +select concat(toFixedString('a', 1), toFixedString('b', 1), materialize(toFixedString('c', 1))) == 'abc'; +select concat(toFixedString('a', 1), materialize(toFixedString('b', 1)), toFixedString('c', 1)) == 'abc'; +select concat(toFixedString('a', 1), materialize(toFixedString('b', 1)), materialize(toFixedString('c', 1))) == 'abc'; +select concat(materialize(toFixedString('a', 1)), toFixedString('b', 1), toFixedString('c', 1)) == 'abc'; +select concat(materialize(toFixedString('a', 1)), toFixedString('b', 1), materialize(toFixedString('c', 1))) == 'abc'; +select concat(materialize(toFixedString('a', 1)), materialize(toFixedString('b', 1)), toFixedString('c', 1)) == 'abc'; +select concat(materialize(toFixedString('a', 1)), materialize(toFixedString('b', 1)), materialize(toFixedString('c', 1))) == 'abc'; + +select concat('a', 'b', 'c') == 'abc' from system.numbers limit 5; +select concat('a', 'b', materialize('c')) == 'abc' from system.numbers limit 5; +select concat('a', materialize('b'), 'c') == 'abc' from system.numbers limit 5; +select concat('a', materialize('b'), materialize('c')) == 'abc' from system.numbers limit 5; +select concat(materialize('a'), 'b', 'c') == 'abc' from system.numbers limit 5; +select concat(materialize('a'), 'b', materialize('c')) == 'abc' from system.numbers limit 5; +select concat(materialize('a'), materialize('b'), 'c') == 'abc' from system.numbers limit 5; +select concat(materialize('a'), materialize('b'), materialize('c')) == 'abc' from system.numbers limit 5; + +select concat('a', 'b', toFixedString('c', 1)) == 'abc' from system.numbers limit 5; +select concat('a', 'b', materialize(toFixedString('c', 1))) == 'abc' from system.numbers limit 5; +select concat('a', materialize('b'), toFixedString('c', 1)) == 'abc' from system.numbers limit 5; +select concat('a', materialize('b'), materialize(toFixedString('c', 1))) == 'abc' from system.numbers limit 5; +select concat(materialize('a'), 'b', toFixedString('c', 1)) == 'abc' from system.numbers limit 5; +select concat(materialize('a'), 'b', materialize(toFixedString('c', 1))) == 'abc' from system.numbers limit 5; +select concat(materialize('a'), materialize('b'), toFixedString('c', 1)) == 'abc' from system.numbers limit 5; +select concat(materialize('a'), materialize('b'), materialize(toFixedString('c', 1))) == 'abc' from system.numbers limit 5; + +select concat('a', toFixedString('b', 1), 'c') == 'abc' from system.numbers limit 5; +select concat('a', toFixedString('b', 1), materialize('c')) == 'abc' from system.numbers limit 5; +select concat('a', materialize(toFixedString('b', 1)), 'c') == 'abc' from system.numbers limit 5; +select concat('a', materialize(toFixedString('b', 1)), materialize('c')) == 'abc' from system.numbers limit 5; +select concat(materialize('a'), toFixedString('b', 1), 'c') == 'abc' from system.numbers limit 5; +select concat(materialize('a'), toFixedString('b', 1), materialize('c')) == 'abc' from system.numbers limit 5; +select concat(materialize('a'), materialize(toFixedString('b', 1)), 'c') == 'abc' from system.numbers limit 5; +select concat(materialize('a'), materialize(toFixedString('b', 1)), materialize('c')) == 'abc' from system.numbers limit 5; + +select concat('a', toFixedString('b', 1), toFixedString('c', 1)) == 'abc' from system.numbers limit 5; +select concat('a', toFixedString('b', 1), materialize(toFixedString('c', 1))) == 'abc' from system.numbers limit 5; +select concat('a', materialize(toFixedString('b', 1)), toFixedString('c', 1)) == 'abc' from system.numbers limit 5; +select concat('a', materialize(toFixedString('b', 1)), materialize(toFixedString('c', 1))) == 'abc' from system.numbers limit 5; +select concat(materialize('a'), toFixedString('b', 1), toFixedString('c', 1)) == 'abc' from system.numbers limit 5; +select concat(materialize('a'), toFixedString('b', 1), materialize(toFixedString('c', 1))) == 'abc' from system.numbers limit 5; +select concat(materialize('a'), materialize(toFixedString('b', 1)), toFixedString('c', 1)) == 'abc' from system.numbers limit 5; +select concat(materialize('a'), materialize(toFixedString('b', 1)), materialize(toFixedString('c', 1))) == 'abc' from system.numbers limit 5; + +select concat(toFixedString('a', 1), 'b', 'c') == 'abc' from system.numbers limit 5; +select concat(toFixedString('a', 1), 'b', materialize('c')) == 'abc' from system.numbers limit 5; +select concat(toFixedString('a', 1), materialize('b'), 'c') == 'abc' from system.numbers limit 5; +select concat(toFixedString('a', 1), materialize('b'), materialize('c')) == 'abc' from system.numbers limit 5; +select concat(materialize(toFixedString('a', 1)), 'b', 'c') == 'abc' from system.numbers limit 5; +select concat(materialize(toFixedString('a', 1)), 'b', materialize('c')) == 'abc' from system.numbers limit 5; +select concat(materialize(toFixedString('a', 1)), materialize('b'), 'c') == 'abc' from system.numbers limit 5; +select concat(materialize(toFixedString('a', 1)), materialize('b'), materialize('c')) == 'abc' from system.numbers limit 5; + +select concat(toFixedString('a', 1), 'b', toFixedString('c', 1)) == 'abc' from system.numbers limit 5; +select concat(toFixedString('a', 1), 'b', materialize(toFixedString('c', 1))) == 'abc' from system.numbers limit 5; +select concat(toFixedString('a', 1), materialize('b'), toFixedString('c', 1)) == 'abc' from system.numbers limit 5; +select concat(toFixedString('a', 1), materialize('b'), materialize(toFixedString('c', 1))) == 'abc' from system.numbers limit 5; +select concat(materialize(toFixedString('a', 1)), 'b', toFixedString('c', 1)) == 'abc' from system.numbers limit 5; +select concat(materialize(toFixedString('a', 1)), 'b', materialize(toFixedString('c', 1))) == 'abc' from system.numbers limit 5; +select concat(materialize(toFixedString('a', 1)), materialize('b'), toFixedString('c', 1)) == 'abc' from system.numbers limit 5; +select concat(materialize(toFixedString('a', 1)), materialize('b'), materialize(toFixedString('c', 1))) == 'abc' from system.numbers limit 5; + +select concat(toFixedString('a', 1), toFixedString('b', 1), 'c') == 'abc' from system.numbers limit 5; +select concat(toFixedString('a', 1), toFixedString('b', 1), materialize('c')) == 'abc' from system.numbers limit 5; +select concat(toFixedString('a', 1), materialize(toFixedString('b', 1)), 'c') == 'abc' from system.numbers limit 5; +select concat(toFixedString('a', 1), materialize(toFixedString('b', 1)), materialize('c')) == 'abc' from system.numbers limit 5; +select concat(materialize(toFixedString('a', 1)), toFixedString('b', 1), 'c') == 'abc' from system.numbers limit 5; +select concat(materialize(toFixedString('a', 1)), toFixedString('b', 1), materialize('c')) == 'abc' from system.numbers limit 5; +select concat(materialize(toFixedString('a', 1)), materialize(toFixedString('b', 1)), 'c') == 'abc' from system.numbers limit 5; +select concat(materialize(toFixedString('a', 1)), materialize(toFixedString('b', 1)), materialize('c')) == 'abc' from system.numbers limit 5; + +select concat(toFixedString('a', 1), toFixedString('b', 1), toFixedString('c', 1)) == 'abc' from system.numbers limit 5; +select concat(toFixedString('a', 1), toFixedString('b', 1), materialize(toFixedString('c', 1))) == 'abc' from system.numbers limit 5; +select concat(toFixedString('a', 1), materialize(toFixedString('b', 1)), toFixedString('c', 1)) == 'abc' from system.numbers limit 5; +select concat(toFixedString('a', 1), materialize(toFixedString('b', 1)), materialize(toFixedString('c', 1))) == 'abc' from system.numbers limit 5; +select concat(materialize(toFixedString('a', 1)), toFixedString('b', 1), toFixedString('c', 1)) == 'abc' from system.numbers limit 5; +select concat(materialize(toFixedString('a', 1)), toFixedString('b', 1), materialize(toFixedString('c', 1))) == 'abc' from system.numbers limit 5; +select concat(materialize(toFixedString('a', 1)), materialize(toFixedString('b', 1)), toFixedString('c', 1)) == 'abc' from system.numbers limit 5; +select concat(materialize(toFixedString('a', 1)), materialize(toFixedString('b', 1)), materialize(toFixedString('c', 1))) == 'abc' from system.numbers limit 5; diff --git a/parser/testdata/00079_array_join_not_used_joined_column/ast.json b/parser/testdata/00079_array_join_not_used_joined_column/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00079_array_join_not_used_joined_column/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00079_array_join_not_used_joined_column/metadata.json b/parser/testdata/00079_array_join_not_used_joined_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00079_array_join_not_used_joined_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00079_array_join_not_used_joined_column/query.sql b/parser/testdata/00079_array_join_not_used_joined_column/query.sql new file mode 100644 index 000000000..4ddfef066 --- /dev/null +++ b/parser/testdata/00079_array_join_not_used_joined_column/query.sql @@ -0,0 +1,4 @@ +-- Tags: stateful +SELECT PP.Key1 AS `ym:s:paramsLevel1`, sum(arrayAll(`x_1` -> `x_1`= '', ParsedParams.Key2)) AS `ym:s:visits` FROM test.hits ARRAY JOIN ParsedParams AS `PP` WHERE CounterID = 1704509 GROUP BY `ym:s:paramsLevel1` ORDER BY PP.Key1, `ym:s:visits` LIMIT 0, 100; +SELECT PP.Key1 AS x1, ParsedParams.Key2 AS x2 FROM test.hits ARRAY JOIN ParsedParams AS PP WHERE CounterID = 1704509 ORDER BY x1, x2 LIMIT 10; +SELECT ParsedParams.Key2 AS x FROM test.hits ARRAY JOIN ParsedParams AS PP ORDER BY x DESC LIMIT 10; diff --git a/parser/testdata/00079_defaulted_columns/ast.json b/parser/testdata/00079_defaulted_columns/ast.json new file mode 100644 index 000000000..b7f1451c0 --- /dev/null +++ b/parser/testdata/00079_defaulted_columns/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery defaulted (children 1)" + }, + { + "explain": " Identifier defaulted" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001405023, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/00079_defaulted_columns/metadata.json b/parser/testdata/00079_defaulted_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00079_defaulted_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00079_defaulted_columns/query.sql b/parser/testdata/00079_defaulted_columns/query.sql new file mode 100644 index 000000000..28e6ec056 --- /dev/null +++ b/parser/testdata/00079_defaulted_columns/query.sql @@ -0,0 +1,38 @@ +drop table if exists defaulted; + +create table defaulted (col1 default 0) engine=Memory; +desc table defaulted; +drop table defaulted; + +create table defaulted (col1 UInt32, col2 default col1 + 1, col3 materialized col1 + 2, col4 alias col1 + 3) engine=Memory; +desc table defaulted; +insert into defaulted (col1) values (10); +select * from defaulted; +select col3, col4 from defaulted; +drop table defaulted; + +create table defaulted (col1 Int8, col2 UInt64 default (SELECT dummy+99 from system.one)) engine=Memory; --{serverError THERE_IS_NO_DEFAULT_VALUE} + +set allow_deprecated_syntax_for_merge_tree=1; +create table defaulted (payload String, date materialized today(), key materialized 0 * rand()) engine=MergeTree(date, key, 8192); +desc table defaulted; +insert into defaulted (payload) values ('hello clickhouse'); +select * from defaulted; +alter table defaulted add column payload_length UInt64 materialized length(payload); +desc table defaulted; +select *, payload_length from defaulted; +insert into defaulted (payload) values ('some string'); +select *, payload_length from defaulted order by payload; +select *, payload_length from defaulted order by payload; +alter table defaulted modify column payload_length default length(payload); +desc table defaulted; +select * from defaulted order by payload; +alter table defaulted modify column payload_length default length(payload) % 65535; +desc table defaulted; +select * from defaulted order by payload; +alter table defaulted modify column payload_length UInt16 default length(payload); +desc table defaulted; +alter table defaulted drop column payload_length; +desc table defaulted; +select * from defaulted order by payload; +drop table defaulted; diff --git a/parser/testdata/00080_array_join_and_union/ast.json b/parser/testdata/00080_array_join_and_union/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00080_array_join_and_union/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00080_array_join_and_union/metadata.json b/parser/testdata/00080_array_join_and_union/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00080_array_join_and_union/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00080_array_join_and_union/query.sql b/parser/testdata/00080_array_join_and_union/query.sql new file mode 100644 index 000000000..9c96c23da --- /dev/null +++ b/parser/testdata/00080_array_join_and_union/query.sql @@ -0,0 +1,2 @@ +-- Tags: stateful +SELECT count() FROM (SELECT Goals.ID FROM test.visits ARRAY JOIN Goals WHERE CounterID = 842440 LIMIT 10 UNION ALL SELECT Goals.ID FROM test.visits ARRAY JOIN Goals WHERE CounterID = 842440 LIMIT 10); diff --git a/parser/testdata/00080_show_tables_and_system_tables/ast.json b/parser/testdata/00080_show_tables_and_system_tables/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00080_show_tables_and_system_tables/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00080_show_tables_and_system_tables/metadata.json b/parser/testdata/00080_show_tables_and_system_tables/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00080_show_tables_and_system_tables/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00080_show_tables_and_system_tables/query.sql b/parser/testdata/00080_show_tables_and_system_tables/query.sql new file mode 100644 index 000000000..4a5c2d196 --- /dev/null +++ b/parser/testdata/00080_show_tables_and_system_tables/query.sql @@ -0,0 +1,28 @@ +-- Tags: log-engine + +DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}; + +CREATE DATABASE {CLICKHOUSE_DATABASE:Identifier}; + +CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.A (A UInt8) ENGINE = TinyLog; +CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.B (A UInt8) ENGINE = TinyLog; + +SHOW TABLES FROM {CLICKHOUSE_DATABASE:Identifier}; +SHOW TABLES IN system WHERE engine LIKE '%System%' AND name IN ('numbers', 'one') AND database = 'system'; + +SELECT name, toUInt32(metadata_modification_time) > 0, engine_full, create_table_query FROM system.tables WHERE database = currentDatabase() ORDER BY name FORMAT TSVRaw; + +CREATE TEMPORARY TABLE test_temporary_table (id UInt64); +SELECT name FROM system.tables WHERE is_temporary = 1 AND name = 'test_temporary_table'; + +CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.test_log(id UInt64) ENGINE = Log; +CREATE MATERIALIZED VIEW {CLICKHOUSE_DATABASE:Identifier}.test_materialized ENGINE = Log AS SELECT * FROM {CLICKHOUSE_DATABASE:Identifier}.test_log; +SELECT dependencies_database, dependencies_table FROM system.tables WHERE name = 'test_log' AND database=currentDatabase(); + +DROP DATABASE {CLICKHOUSE_DATABASE:Identifier}; + +-- Check that create_table_query works for system tables and unusual Databases +CREATE DATABASE {CLICKHOUSE_DATABASE:Identifier} ENGINE = Memory; +CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.A (A UInt8) ENGINE = Null; + +SELECT sum(ignore(*, metadata_modification_time, engine_full, create_table_query)) FROM system.tables WHERE database = '{CLICKHOUSE_DATABASE:String}'; diff --git a/parser/testdata/00081_group_by_without_key_and_totals/ast.json b/parser/testdata/00081_group_by_without_key_and_totals/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00081_group_by_without_key_and_totals/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00081_group_by_without_key_and_totals/metadata.json b/parser/testdata/00081_group_by_without_key_and_totals/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00081_group_by_without_key_and_totals/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00081_group_by_without_key_and_totals/query.sql b/parser/testdata/00081_group_by_without_key_and_totals/query.sql new file mode 100644 index 000000000..b4ad732fe --- /dev/null +++ b/parser/testdata/00081_group_by_without_key_and_totals/query.sql @@ -0,0 +1,17 @@ +-- Tags: stateful +SET enable_analyzer = 1; + +SELECT count() AS c FROM test.hits WHERE CounterID = 1704509 WITH TOTALS SETTINGS totals_mode = 'before_having', max_rows_to_group_by = 100000, group_by_overflow_mode = 'any'; +SELECT count() AS c FROM test.hits WHERE CounterID = 1704509 WITH TOTALS SETTINGS totals_mode = 'after_having_inclusive', max_rows_to_group_by = 100000, group_by_overflow_mode = 'any'; +SELECT count() AS c FROM test.hits WHERE CounterID = 1704509 WITH TOTALS SETTINGS totals_mode = 'after_having_exclusive', max_rows_to_group_by = 100000, group_by_overflow_mode = 'any'; +SELECT count() AS c FROM test.hits WHERE CounterID = 1704509 WITH TOTALS SETTINGS totals_mode = 'after_having_auto', max_rows_to_group_by = 100000, group_by_overflow_mode = 'any'; + +SELECT 1 AS k, count() AS c FROM test.hits WHERE CounterID = 1704509 GROUP BY k WITH TOTALS SETTINGS totals_mode = 'before_having', max_rows_to_group_by = 100000, group_by_overflow_mode = 'any'; +SELECT 1 AS k, count() AS c FROM test.hits WHERE CounterID = 1704509 GROUP BY k WITH TOTALS SETTINGS totals_mode = 'after_having_inclusive', max_rows_to_group_by = 100000, group_by_overflow_mode = 'any'; +SELECT 1 AS k, count() AS c FROM test.hits WHERE CounterID = 1704509 GROUP BY k WITH TOTALS SETTINGS totals_mode = 'after_having_exclusive', max_rows_to_group_by = 100000, group_by_overflow_mode = 'any'; +SELECT 1 AS k, count() AS c FROM test.hits WHERE CounterID = 1704509 GROUP BY k WITH TOTALS SETTINGS totals_mode = 'after_having_auto', max_rows_to_group_by = 100000, group_by_overflow_mode = 'any'; + +SELECT TraficSourceID AS k, count() AS c FROM test.hits WHERE CounterID = 1704509 GROUP BY k WITH TOTALS ORDER BY k SETTINGS totals_mode = 'before_having', max_rows_to_group_by = 100000, group_by_overflow_mode = 'any'; +SELECT TraficSourceID AS k, count() AS c FROM test.hits WHERE CounterID = 1704509 GROUP BY k WITH TOTALS ORDER BY k SETTINGS totals_mode = 'after_having_inclusive', max_rows_to_group_by = 100000, group_by_overflow_mode = 'any'; +SELECT TraficSourceID AS k, count() AS c FROM test.hits WHERE CounterID = 1704509 GROUP BY k WITH TOTALS ORDER BY k SETTINGS totals_mode = 'after_having_exclusive', max_rows_to_group_by = 100000, group_by_overflow_mode = 'any'; +SELECT TraficSourceID AS k, count() AS c FROM test.hits WHERE CounterID = 1704509 GROUP BY k WITH TOTALS ORDER BY k SETTINGS totals_mode = 'after_having_auto', max_rows_to_group_by = 100000, group_by_overflow_mode = 'any'; diff --git a/parser/testdata/00081_int_div_or_zero/ast.json b/parser/testdata/00081_int_div_or_zero/ast.json new file mode 100644 index 000000000..0420394a8 --- /dev/null +++ b/parser/testdata/00081_int_div_or_zero/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function intDivOrZero (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal UInt64_0" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.00133993, + "rows_read": 11, + "bytes_read": 413 + } +} diff --git a/parser/testdata/00081_int_div_or_zero/metadata.json b/parser/testdata/00081_int_div_or_zero/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00081_int_div_or_zero/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00081_int_div_or_zero/query.sql b/parser/testdata/00081_int_div_or_zero/query.sql new file mode 100644 index 000000000..204cac83b --- /dev/null +++ b/parser/testdata/00081_int_div_or_zero/query.sql @@ -0,0 +1,5 @@ +select intDivOrZero(0, 0) = 0; +select intDivOrZero(-128, -1) = 0; +select intDivOrZero(-127, -1) = 127; +select intDivOrZero(1, 1) = 1; +select intDivOrZero(4, 2) = 2; diff --git a/parser/testdata/00082_append_trailing_char_if_absent/ast.json b/parser/testdata/00082_append_trailing_char_if_absent/ast.json new file mode 100644 index 000000000..0c50e82c2 --- /dev/null +++ b/parser/testdata/00082_append_trailing_char_if_absent/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function appendTrailingCharIfAbsent (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal ''" + }, + { + "explain": " Literal 'a'" + }, + { + "explain": " Literal ''" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001437679, + "rows_read": 11, + "bytes_read": 410 + } +} diff --git a/parser/testdata/00082_append_trailing_char_if_absent/metadata.json b/parser/testdata/00082_append_trailing_char_if_absent/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00082_append_trailing_char_if_absent/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00082_append_trailing_char_if_absent/query.sql b/parser/testdata/00082_append_trailing_char_if_absent/query.sql new file mode 100644 index 000000000..158ad40d6 --- /dev/null +++ b/parser/testdata/00082_append_trailing_char_if_absent/query.sql @@ -0,0 +1,6 @@ +select appendTrailingCharIfAbsent('', 'a') = ''; +select appendTrailingCharIfAbsent('a', 'a') = 'a'; +select appendTrailingCharIfAbsent('a', 'b') = 'ab'; +select appendTrailingCharIfAbsent(materialize(''), 'a') = materialize(''); +select appendTrailingCharIfAbsent(materialize('a'), 'a') = materialize('a'); +select appendTrailingCharIfAbsent(materialize('a'), 'b') = materialize('ab'); diff --git a/parser/testdata/00082_quantiles/ast.json b/parser/testdata/00082_quantiles/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00082_quantiles/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00082_quantiles/metadata.json b/parser/testdata/00082_quantiles/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00082_quantiles/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00082_quantiles/query.sql b/parser/testdata/00082_quantiles/query.sql new file mode 100644 index 000000000..756523b09 --- /dev/null +++ b/parser/testdata/00082_quantiles/query.sql @@ -0,0 +1,17 @@ +-- Tags: stateful +-- The test uses quite a bit of memory. A low max_bytes_before_external_group_by value will lead to high disk usage +-- which in CI leads to timeouts +SET max_bytes_before_external_group_by=0; +SET max_bytes_ratio_before_external_group_by=0; +SELECT CounterID AS k, quantileExact(0.5)(ResolutionWidth) FROM test.hits GROUP BY k ORDER BY count() DESC, CounterID LIMIT 10; +SELECT CounterID AS k, quantilesExact(0.1, 0.5, 0.9, 0.99, 0.999)(ResolutionWidth) FROM test.hits GROUP BY k ORDER BY count() DESC, CounterID LIMIT 10; + +SELECT CounterID AS k, quantileTiming(0.5)(ResolutionWidth) FROM test.hits GROUP BY k ORDER BY count() DESC, CounterID LIMIT 10; +SELECT CounterID AS k, quantilesTiming(0.1, 0.5, 0.9, 0.99, 0.999)(ResolutionWidth) FROM test.hits GROUP BY k ORDER BY count() DESC, CounterID LIMIT 10; + + +SELECT CounterID AS k, quantileExact(0.5)(ResolutionWidth) FROM remote('127.0.0.{1,2}', test.hits) GROUP BY k ORDER BY count() DESC, CounterID LIMIT 10; +SELECT CounterID AS k, quantilesExact(0.1, 0.5, 0.9, 0.99, 0.999)(ResolutionWidth) FROM remote('127.0.0.{1,2}', test.hits) GROUP BY k ORDER BY count() DESC, CounterID LIMIT 10; + +SELECT CounterID AS k, quantileTiming(0.5)(ResolutionWidth) FROM remote('127.0.0.{1,2}', test.hits) GROUP BY k ORDER BY count() DESC, CounterID LIMIT 10; +SELECT CounterID AS k, quantilesTiming(0.1, 0.5, 0.9, 0.99, 0.999)(ResolutionWidth) FROM remote('127.0.0.{1,2}', test.hits) GROUP BY k ORDER BY count() DESC, CounterID LIMIT 10; diff --git a/parser/testdata/00083_array_filter/ast.json b/parser/testdata/00083_array_filter/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00083_array_filter/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00083_array_filter/metadata.json b/parser/testdata/00083_array_filter/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00083_array_filter/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00083_array_filter/query.sql b/parser/testdata/00083_array_filter/query.sql new file mode 100644 index 000000000..0c572dee5 --- /dev/null +++ b/parser/testdata/00083_array_filter/query.sql @@ -0,0 +1,3 @@ +-- Tags: stateful +SELECT sum(length(ParsedParams.Key1)) FROM test.hits WHERE notEmpty(ParsedParams.Key1); +SELECT sum(length(ParsedParams.ValueDouble)) FROM test.hits WHERE notEmpty(ParsedParams.ValueDouble); diff --git a/parser/testdata/00083_create_merge_tree_zookeeper_long/ast.json b/parser/testdata/00083_create_merge_tree_zookeeper_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00083_create_merge_tree_zookeeper_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00083_create_merge_tree_zookeeper_long/metadata.json b/parser/testdata/00083_create_merge_tree_zookeeper_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00083_create_merge_tree_zookeeper_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00083_create_merge_tree_zookeeper_long/query.sql b/parser/testdata/00083_create_merge_tree_zookeeper_long/query.sql new file mode 100644 index 000000000..e033a7e2b --- /dev/null +++ b/parser/testdata/00083_create_merge_tree_zookeeper_long/query.sql @@ -0,0 +1,145 @@ +-- Tags: long, zookeeper, no-replicated-database, no-shared-merge-tree +-- Tag no-replicated-database: Old syntax is not allowed +-- no-shared-merge-tree implemented another test + +SET optimize_on_insert = 0; + +DROP TABLE IF EXISTS merge_tree; +DROP TABLE IF EXISTS collapsing_merge_tree; +DROP TABLE IF EXISTS versioned_collapsing_merge_tree; +DROP TABLE IF EXISTS summing_merge_tree; +DROP TABLE IF EXISTS summing_merge_tree_with_list_of_columns_to_sum; +DROP TABLE IF EXISTS aggregating_merge_tree; + +DROP TABLE IF EXISTS merge_tree_with_sampling; +DROP TABLE IF EXISTS collapsing_merge_tree_with_sampling; +DROP TABLE IF EXISTS versioned_collapsing_merge_tree_with_sampling; +DROP TABLE IF EXISTS summing_merge_tree_with_sampling; +DROP TABLE IF EXISTS summing_merge_tree_with_sampling_with_list_of_columns_to_sum; +DROP TABLE IF EXISTS aggregating_merge_tree_with_sampling; + +DROP TABLE IF EXISTS replicated_merge_tree; +DROP TABLE IF EXISTS replicated_collapsing_merge_tree; +DROP TABLE IF EXISTS replicated_versioned_collapsing_merge_tree; +DROP TABLE IF EXISTS replicated_summing_merge_tree; +DROP TABLE IF EXISTS replicated_summing_merge_tree_with_list_of_columns_to_sum; +DROP TABLE IF EXISTS replicated_aggregating_merge_tree; + +DROP TABLE IF EXISTS replicated_merge_tree_with_sampling; +DROP TABLE IF EXISTS replicated_collapsing_merge_tree_with_sampling; +DROP TABLE IF EXISTS replicated_versioned_collapsing_merge_tree_with_sampling; +DROP TABLE IF EXISTS replicated_summing_merge_tree_with_sampling; +DROP TABLE IF EXISTS replicated_summing_merge_tree_with_sampling_with_list_of_columns_to_sum; +DROP TABLE IF EXISTS replicated_aggregating_merge_tree_with_sampling; + + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE merge_tree + (d Date, a String, b UInt8, x String, y Int8, z UInt32) ENGINE = MergeTree(d, (a, b), 111); +CREATE TABLE collapsing_merge_tree + (d Date, a String, b UInt8, x String, y Int8, z UInt32) ENGINE = CollapsingMergeTree(d, (a, b), 111, y); +CREATE TABLE versioned_collapsing_merge_tree + (d Date, a String, b UInt8, x String, y Int8, z UInt32) ENGINE = VersionedCollapsingMergeTree(d, (a, b), 111, y, b); +CREATE TABLE summing_merge_tree + (d Date, a String, b UInt8, x String, y Int8, z UInt32) ENGINE = SummingMergeTree(d, (a, b), 111); +CREATE TABLE summing_merge_tree_with_list_of_columns_to_sum + (d Date, a String, b UInt8, x String, y Int8, z UInt32) ENGINE = SummingMergeTree(d, (a, b), 111, (y, z)); +CREATE TABLE aggregating_merge_tree + (d Date, a String, b UInt8, x String, y Int8, z UInt32) ENGINE = AggregatingMergeTree(d, (a, b), 111); + +CREATE TABLE merge_tree_with_sampling + (d Date, a String, b UInt8, x String, y Int8, z UInt32) ENGINE = MergeTree(d, sipHash64(a) + b, (a, sipHash64(a) + b), 111); +CREATE TABLE collapsing_merge_tree_with_sampling + (d Date, a String, b UInt8, x String, y Int8, z UInt32) ENGINE = CollapsingMergeTree(d, sipHash64(a) + b, (a, sipHash64(a) + b), 111, y); +CREATE TABLE versioned_collapsing_merge_tree_with_sampling + (d Date, a String, b UInt8, x String, y Int8, z UInt32) ENGINE = VersionedCollapsingMergeTree(d, sipHash64(a) + b, (a, sipHash64(a) + b, b), 111, y, b); +CREATE TABLE summing_merge_tree_with_sampling + (d Date, a String, b UInt8, x String, y Int8, z UInt32) ENGINE = SummingMergeTree(d, sipHash64(a) + b, (a, sipHash64(a) + b), 111); +CREATE TABLE summing_merge_tree_with_sampling_with_list_of_columns_to_sum + (d Date, a String, b UInt8, x String, y Int8, z UInt32) ENGINE = SummingMergeTree(d, sipHash64(a) + b, (a, sipHash64(a) + b), 111, (y, z)); +CREATE TABLE aggregating_merge_tree_with_sampling + (d Date, a String, b UInt8, x String, y Int8, z UInt32) ENGINE = AggregatingMergeTree(d, sipHash64(a) + b, (a, sipHash64(a) + b), 111); + +CREATE TABLE replicated_merge_tree + (d Date, a String, b UInt8, x String, y Int8, z UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_00083/01/replicated_merge_tree/', 'r1', d, (a, b), 111); +CREATE TABLE replicated_collapsing_merge_tree + (d Date, a String, b UInt8, x String, y Int8, z UInt32) ENGINE = ReplicatedCollapsingMergeTree('/clickhouse/tables/{database}/test_00083/01/replicated_collapsing_merge_tree/', 'r1', d, (a, b), 111, y); +CREATE TABLE replicated_versioned_collapsing_merge_tree + (d Date, a String, b UInt8, x String, y Int8, z UInt32) ENGINE = ReplicatedVersionedCollapsingMergeTree('/clickhouse/tables/{database}/test_00083/01/replicated_versioned_collapsing_merge_tree/', 'r1', d, (a, b), 111, y, b); +CREATE TABLE replicated_summing_merge_tree + (d Date, a String, b UInt8, x String, y Int8, z UInt32) ENGINE = ReplicatedSummingMergeTree('/clickhouse/tables/{database}/test_00083/01/replicated_summing_merge_tree/', 'r1', d, (a, b), 111); +CREATE TABLE replicated_summing_merge_tree_with_list_of_columns_to_sum + (d Date, a String, b UInt8, x String, y Int8, z UInt32) ENGINE = ReplicatedSummingMergeTree('/clickhouse/tables/{database}/test_00083/01/replicated_summing_merge_tree_with_list_of_columns_to_sum/', 'r1', d, (a, b), 111, (y, z)); +CREATE TABLE replicated_aggregating_merge_tree + (d Date, a String, b UInt8, x String, y Int8, z UInt32) ENGINE = ReplicatedAggregatingMergeTree('/clickhouse/tables/{database}/test_00083/01/replicated_aggregating_merge_tree/', 'r1', d, (a, b), 111); + +CREATE TABLE replicated_merge_tree_with_sampling + (d Date, a String, b UInt8, x String, y Int8, z UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_00083/01/replicated_merge_tree_with_sampling/', 'r1', d, sipHash64(a) + b, (a, sipHash64(a) + b), 111); +CREATE TABLE replicated_collapsing_merge_tree_with_sampling + (d Date, a String, b UInt8, x String, y Int8, z UInt32) ENGINE = ReplicatedCollapsingMergeTree('/clickhouse/tables/{database}/test_00083/01/replicated_collapsing_merge_tree_with_sampling/', 'r1', d, sipHash64(a) + b, (a, sipHash64(a) + b), 111, y); +CREATE TABLE replicated_versioned_collapsing_merge_tree_with_sampling + (d Date, a String, b UInt8, x String, y Int8, z UInt32) ENGINE = ReplicatedVersionedCollapsingMergeTree('/clickhouse/tables/{database}/test_00083/01/replicated_versioned_collapsing_merge_tree_with_sampling/', 'r1', d, sipHash64(a) + b, (a, sipHash64(a) + b, b), 111, y, b); +CREATE TABLE replicated_summing_merge_tree_with_sampling + (d Date, a String, b UInt8, x String, y Int8, z UInt32) ENGINE = ReplicatedSummingMergeTree('/clickhouse/tables/{database}/test_00083/01/replicated_summing_merge_tree_with_sampling/', 'r1', d, sipHash64(a) + b, (a, sipHash64(a) + b), 111); +CREATE TABLE replicated_summing_merge_tree_with_sampling_with_list_of_columns_to_sum + (d Date, a String, b UInt8, x String, y Int8, z UInt32) ENGINE = ReplicatedSummingMergeTree('/clickhouse/tables/{database}/test_00083/01/replicated_summing_merge_tree_with_sampling_with_list_of_columns_to_sum/', 'r1', d, sipHash64(a) + b, (a, sipHash64(a) + b), 111, (y, z)); +CREATE TABLE replicated_aggregating_merge_tree_with_sampling + (d Date, a String, b UInt8, x String, y Int8, z UInt32) ENGINE = ReplicatedAggregatingMergeTree('/clickhouse/tables/{database}/test_00083/01/replicated_aggregating_merge_tree_with_sampling/', 'r1', d, sipHash64(a) + b, (a, sipHash64(a) + b), 111); + + +INSERT INTO merge_tree VALUES ('2000-01-01', 'Hello, world!', 123, 'xxx yyy', -123, 123456789); +INSERT INTO collapsing_merge_tree VALUES ('2000-01-01', 'Hello, world!', 123, 'xxx yyy', -123, 123456789); +INSERT INTO versioned_collapsing_merge_tree VALUES ('2000-01-01', 'Hello, world!', 123, 'xxx yyy', -123, 123456789); +INSERT INTO summing_merge_tree VALUES ('2000-01-01', 'Hello, world!', 123, 'xxx yyy', -123, 123456789); +INSERT INTO summing_merge_tree_with_list_of_columns_to_sum VALUES ('2000-01-01', 'Hello, world!', 123, 'xxx yyy', -123, 123456789); +INSERT INTO aggregating_merge_tree VALUES ('2000-01-01', 'Hello, world!', 123, 'xxx yyy', -123, 123456789); + +INSERT INTO merge_tree_with_sampling VALUES ('2000-01-01', 'Hello, world!', 123, 'xxx yyy', -123, 123456789); +INSERT INTO collapsing_merge_tree_with_sampling VALUES ('2000-01-01', 'Hello, world!', 123, 'xxx yyy', -123, 123456789); +INSERT INTO versioned_collapsing_merge_tree_with_sampling VALUES ('2000-01-01', 'Hello, world!', 123, 'xxx yyy', -123, 123456789); +INSERT INTO summing_merge_tree_with_sampling VALUES ('2000-01-01', 'Hello, world!', 123, 'xxx yyy', -123, 123456789); +INSERT INTO summing_merge_tree_with_sampling_with_list_of_columns_to_sum VALUES ('2000-01-01', 'Hello, world!', 123, 'xxx yyy', -123, 123456789); +INSERT INTO aggregating_merge_tree_with_sampling VALUES ('2000-01-01', 'Hello, world!', 123, 'xxx yyy', -123, 123456789); + +INSERT INTO replicated_merge_tree VALUES ('2000-01-01', 'Hello, world!', 123, 'xxx yyy', -123, 123456789); +INSERT INTO replicated_collapsing_merge_tree VALUES ('2000-01-01', 'Hello, world!', 123, 'xxx yyy', -123, 123456789); +INSERT INTO replicated_versioned_collapsing_merge_tree VALUES ('2000-01-01', 'Hello, world!', 123, 'xxx yyy', -123, 123456789); +INSERT INTO replicated_summing_merge_tree VALUES ('2000-01-01', 'Hello, world!', 123, 'xxx yyy', -123, 123456789); +INSERT INTO replicated_summing_merge_tree_with_list_of_columns_to_sum VALUES ('2000-01-01', 'Hello, world!', 123, 'xxx yyy', -123, 123456789); +INSERT INTO replicated_aggregating_merge_tree VALUES ('2000-01-01', 'Hello, world!', 123, 'xxx yyy', -123, 123456789); + +INSERT INTO replicated_merge_tree_with_sampling VALUES ('2000-01-01', 'Hello, world!', 123, 'xxx yyy', -123, 123456789); +INSERT INTO replicated_collapsing_merge_tree_with_sampling VALUES ('2000-01-01', 'Hello, world!', 123, 'xxx yyy', -123, 123456789); +INSERT INTO replicated_versioned_collapsing_merge_tree_with_sampling VALUES ('2000-01-01', 'Hello, world!', 123, 'xxx yyy', -123, 123456789); +INSERT INTO replicated_summing_merge_tree_with_sampling VALUES ('2000-01-01', 'Hello, world!', 123, 'xxx yyy', -123, 123456789); +INSERT INTO replicated_summing_merge_tree_with_sampling_with_list_of_columns_to_sum VALUES ('2000-01-01', 'Hello, world!', 123, 'xxx yyy', -123, 123456789); +INSERT INTO replicated_aggregating_merge_tree_with_sampling VALUES ('2000-01-01', 'Hello, world!', 123, 'xxx yyy', -123, 123456789); + + +DROP TABLE merge_tree; +DROP TABLE collapsing_merge_tree; +DROP TABLE versioned_collapsing_merge_tree; +DROP TABLE summing_merge_tree; +DROP TABLE summing_merge_tree_with_list_of_columns_to_sum; +DROP TABLE aggregating_merge_tree; + +DROP TABLE merge_tree_with_sampling; +DROP TABLE collapsing_merge_tree_with_sampling; +DROP TABLE versioned_collapsing_merge_tree_with_sampling; +DROP TABLE summing_merge_tree_with_sampling; +DROP TABLE summing_merge_tree_with_sampling_with_list_of_columns_to_sum; +DROP TABLE aggregating_merge_tree_with_sampling; + +DROP TABLE replicated_merge_tree; +DROP TABLE replicated_collapsing_merge_tree; +DROP TABLE replicated_versioned_collapsing_merge_tree; +DROP TABLE replicated_summing_merge_tree; +DROP TABLE replicated_summing_merge_tree_with_list_of_columns_to_sum; +DROP TABLE replicated_aggregating_merge_tree; + +DROP TABLE replicated_merge_tree_with_sampling; +DROP TABLE replicated_collapsing_merge_tree_with_sampling; +DROP TABLE replicated_versioned_collapsing_merge_tree_with_sampling; +DROP TABLE replicated_summing_merge_tree_with_sampling; +DROP TABLE replicated_summing_merge_tree_with_sampling_with_list_of_columns_to_sum; +DROP TABLE replicated_aggregating_merge_tree_with_sampling; diff --git a/parser/testdata/00084_external_aggregation/ast.json b/parser/testdata/00084_external_aggregation/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00084_external_aggregation/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00084_external_aggregation/metadata.json b/parser/testdata/00084_external_aggregation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00084_external_aggregation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00084_external_aggregation/query.sql b/parser/testdata/00084_external_aggregation/query.sql new file mode 100644 index 000000000..2f6321567 --- /dev/null +++ b/parser/testdata/00084_external_aggregation/query.sql @@ -0,0 +1,12 @@ +-- Tags: stateful +SET max_bytes_before_external_group_by = 200000000; +SET max_bytes_ratio_before_external_group_by = 0; + +SET max_memory_usage = 1500000000; +SET max_threads = 12; +SELECT URL, uniq(SearchPhrase) AS u FROM test.hits GROUP BY URL ORDER BY u DESC, URL LIMIT 10; + +SET max_memory_usage = 300000000; +SET max_threads = 2; +SET aggregation_memory_efficient_merge_threads = 1; +SELECT URL, uniq(SearchPhrase) AS u FROM test.hits GROUP BY URL ORDER BY u DESC, URL LIMIT 10; diff --git a/parser/testdata/00084_summing_merge_tree/ast.json b/parser/testdata/00084_summing_merge_tree/ast.json new file mode 100644 index 000000000..9edfedafc --- /dev/null +++ b/parser/testdata/00084_summing_merge_tree/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery summing_merge_tree (children 1)" + }, + { + "explain": " Identifier summing_merge_tree" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001707273, + "rows_read": 2, + "bytes_read": 88 + } +} diff --git a/parser/testdata/00084_summing_merge_tree/metadata.json b/parser/testdata/00084_summing_merge_tree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00084_summing_merge_tree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00084_summing_merge_tree/query.sql b/parser/testdata/00084_summing_merge_tree/query.sql new file mode 100644 index 000000000..429fde5c2 --- /dev/null +++ b/parser/testdata/00084_summing_merge_tree/query.sql @@ -0,0 +1,44 @@ +DROP TABLE IF EXISTS summing_merge_tree; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE summing_merge_tree (d Date, a String, x UInt32, y UInt64, z Float64) ENGINE = SummingMergeTree(d, a, 8192); + +INSERT INTO summing_merge_tree VALUES ('2000-01-01', 'Hello', 1, 2, 3); +INSERT INTO summing_merge_tree VALUES ('2000-01-01', 'Hello', 4, 5, 6); +INSERT INTO summing_merge_tree VALUES ('2000-01-01', 'Goodbye', 1, 2, 3); + +OPTIMIZE TABLE summing_merge_tree; +OPTIMIZE TABLE summing_merge_tree; +OPTIMIZE TABLE summing_merge_tree; + +SELECT * FROM summing_merge_tree ORDER BY d, a, x, y, z; + + +DROP TABLE summing_merge_tree; + +CREATE TABLE summing_merge_tree (d Date, a String, x UInt32, y UInt64, z Float64) ENGINE = SummingMergeTree(d, a, 8192, (y, z)); + +INSERT INTO summing_merge_tree VALUES ('2000-01-01', 'Hello', 1, 2, 3); +INSERT INTO summing_merge_tree VALUES ('2000-01-01', 'Hello', 4, 5, 6); +INSERT INTO summing_merge_tree VALUES ('2000-01-01', 'Goodbye', 1, 2, 3); + +OPTIMIZE TABLE summing_merge_tree; +OPTIMIZE TABLE summing_merge_tree; +OPTIMIZE TABLE summing_merge_tree; + +SELECT * FROM summing_merge_tree ORDER BY d, a, x, y, z; + + +DROP TABLE summing_merge_tree; + +-- +DROP TABLE IF EXISTS summing; +CREATE TABLE summing (p Date, k UInt64, s UInt64) ENGINE = SummingMergeTree(p, k, 1); + +INSERT INTO summing (k, s) VALUES (0, 1); +INSERT INTO summing (k, s) VALUES (0, 1), (666, 1), (666, 0); +OPTIMIZE TABLE summing PARTITION 197001; + +SELECT k, s FROM summing ORDER BY k; + +DROP TABLE summing; diff --git a/parser/testdata/00085_monotonic_evaluation_segfault/ast.json b/parser/testdata/00085_monotonic_evaluation_segfault/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00085_monotonic_evaluation_segfault/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00085_monotonic_evaluation_segfault/metadata.json b/parser/testdata/00085_monotonic_evaluation_segfault/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00085_monotonic_evaluation_segfault/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00085_monotonic_evaluation_segfault/query.sql b/parser/testdata/00085_monotonic_evaluation_segfault/query.sql new file mode 100644 index 000000000..e496cc411 --- /dev/null +++ b/parser/testdata/00085_monotonic_evaluation_segfault/query.sql @@ -0,0 +1,2 @@ +-- Tags: stateful +SELECT any(0) FROM test.visits WHERE (toInt32(toDateTime(StartDate))) > 1000000000; diff --git a/parser/testdata/00085_visible_width_of_tuple_of_dates/ast.json b/parser/testdata/00085_visible_width_of_tuple_of_dates/ast.json new file mode 100644 index 000000000..3af39a45c --- /dev/null +++ b/parser/testdata/00085_visible_width_of_tuple_of_dates/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001378178, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00085_visible_width_of_tuple_of_dates/metadata.json b/parser/testdata/00085_visible_width_of_tuple_of_dates/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00085_visible_width_of_tuple_of_dates/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00085_visible_width_of_tuple_of_dates/query.sql b/parser/testdata/00085_visible_width_of_tuple_of_dates/query.sql new file mode 100644 index 000000000..09208b915 --- /dev/null +++ b/parser/testdata/00085_visible_width_of_tuple_of_dates/query.sql @@ -0,0 +1,2 @@ +SET output_format_pretty_color=1; +SELECT (toDate('2000-01-01'), toDate('2000-01-01')) AS x FORMAT PrettyCompact; diff --git a/parser/testdata/00086_array_reduce/ast.json b/parser/testdata/00086_array_reduce/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00086_array_reduce/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00086_array_reduce/metadata.json b/parser/testdata/00086_array_reduce/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00086_array_reduce/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00086_array_reduce/query.sql b/parser/testdata/00086_array_reduce/query.sql new file mode 100644 index 000000000..83b062397 --- /dev/null +++ b/parser/testdata/00086_array_reduce/query.sql @@ -0,0 +1,2 @@ +-- Tags: stateful +SELECT arrayFilter(x -> x != 1, arrayMap((a, b) -> a = b, GeneralInterests, arrayReduce('groupArray', GeneralInterests))) AS res FROM test.hits WHERE length(res) != 0; diff --git a/parser/testdata/00086_concat_nary_const_with_nonconst_segfault/ast.json b/parser/testdata/00086_concat_nary_const_with_nonconst_segfault/ast.json new file mode 100644 index 000000000..5afd0a44c --- /dev/null +++ b/parser/testdata/00086_concat_nary_const_with_nonconst_segfault/ast.json @@ -0,0 +1,106 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 5)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function extract (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal '10000000'" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers_mt" + }, + { + "explain": " Function like (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function concat (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '1'" + }, + { + "explain": " Literal '...'" + }, + { + "explain": " Function toString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal '%10000000%'" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Set" + } + ], + + "rows": 28, + + "statistics": + { + "elapsed": 0.00151657, + "rows_read": 28, + "bytes_read": 1064 + } +} diff --git a/parser/testdata/00086_concat_nary_const_with_nonconst_segfault/metadata.json b/parser/testdata/00086_concat_nary_const_with_nonconst_segfault/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00086_concat_nary_const_with_nonconst_segfault/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00086_concat_nary_const_with_nonconst_segfault/query.sql b/parser/testdata/00086_concat_nary_const_with_nonconst_segfault/query.sql new file mode 100644 index 000000000..4b87b2af2 --- /dev/null +++ b/parser/testdata/00086_concat_nary_const_with_nonconst_segfault/query.sql @@ -0,0 +1 @@ +SELECT extract(toString(number), '10000000') FROM system.numbers_mt WHERE concat(materialize('1'), '...', toString(number)) LIKE '%10000000%' LIMIT 1 SETTINGS max_rows_to_read = 0; diff --git a/parser/testdata/00087_distinct_of_empty_arrays/ast.json b/parser/testdata/00087_distinct_of_empty_arrays/ast.json new file mode 100644 index 000000000..9eaffa9c0 --- /dev/null +++ b/parser/testdata/00087_distinct_of_empty_arrays/ast.json @@ -0,0 +1,82 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function emptyArrayString (alias k) (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_100000" + } + ], + + "rows": 20, + + "statistics": + { + "elapsed": 0.001132819, + "rows_read": 20, + "bytes_read": 856 + } +} diff --git a/parser/testdata/00087_distinct_of_empty_arrays/metadata.json b/parser/testdata/00087_distinct_of_empty_arrays/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00087_distinct_of_empty_arrays/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00087_distinct_of_empty_arrays/query.sql b/parser/testdata/00087_distinct_of_empty_arrays/query.sql new file mode 100644 index 000000000..2b0851b7d --- /dev/null +++ b/parser/testdata/00087_distinct_of_empty_arrays/query.sql @@ -0,0 +1 @@ +SELECT DISTINCT emptyArrayString() AS k FROM (SELECT * FROM system.numbers LIMIT 100000); diff --git a/parser/testdata/00087_math_functions/ast.json b/parser/testdata/00087_math_functions/ast.json new file mode 100644 index 000000000..562facca0 --- /dev/null +++ b/parser/testdata/00087_math_functions/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function abs (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal UInt64_0" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001423657, + "rows_read": 10, + "bytes_read": 372 + } +} diff --git a/parser/testdata/00087_math_functions/metadata.json b/parser/testdata/00087_math_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00087_math_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00087_math_functions/query.sql b/parser/testdata/00087_math_functions/query.sql new file mode 100644 index 000000000..b3a9c3ab4 --- /dev/null +++ b/parser/testdata/00087_math_functions/query.sql @@ -0,0 +1,147 @@ +select abs(0) = 0; +select abs(1) = 1; +select abs(1) = 1; +select abs(0.0) = 0; +select abs(1.0) = 1.0; +select abs(-1.0) = 1.0; +select abs(-128) = 128; +select abs(127) = 127; +select sum(abs(number - 10 as x) = (x < 0 ? -x : x)) / count() from system.one array join range(1000000) as number; + +select sqrt(0) = 0; +select sqrt(1) = 1; +select sqrt(4) = 2; +select sum(sqrt(x * x) = x) / count() from system.one array join range(1000000) as x; + +select cbrt(0) = 0; +select cbrt(1) = 1; +select cbrt(8) = 2; +select sum(abs(cbrt(x * x * x) - x) < 1.0e-9) / count() from system.one array join range(1000000) as x; + +select pow(1, 0) = 1; +select pow(2, 0) = 1; +select sum(pow(x, 0) = 1) / count() from system.one array join range(1000000) as x; +select pow(1, 1) = 1; +select pow(2, 1) = 2; +select sum(abs(pow(x, 1) - x) < 1.0e-9) / count() from system.one array join range(1000000) as x; +select sum(pow(x, 2) = x * x) / count() from system.one array join range(10000) as x; + +select isNaN(lgamma(-2)); +select isNaN(lgamma(-1)); +select lgamma(0) = inf; +select lgamma(1) = 0; +select lgamma(2) = 0; +select abs(lgamma(3) - 0.693147181) < 1.0e-8; +select abs(lgamma(4) - 1.791759469) < 1.0e-8; + +select tgamma(0) = inf; +select tgamma(1) = 1; +select tgamma(2) = 1; +select tgamma(3) = 2; +select tgamma(4) = 6; + +select sum(abs(lgamma(x + 1) - log(tgamma(x + 1))) < 1.0e-8) / count() from system.one array join range(10) as x; + +select abs(e() - arraySum(arrayMap(x -> 1 / tgamma(x + 1), range(13)))) < 1.0e-9; + +select log(0) = -inf; +select log(1) = 0; +select abs(log(e()) - 1) < 1e-8; +select abs(log(exp(1)) - 1) < 1e-8; +select abs(log(exp(2)) - 2) < 1e-8; +select sum(abs(log(exp(x)) - x) < 1e-8) / count() from system.one array join range(100) as x; + +select exp2(-1) = 1/2; +select exp2(0) = 1; +select exp2(1) = 2; +select exp2(2) = 4; +select exp2(3) = 8; +select sum(exp2(x) = pow(2, x)) / count() from system.one array join range(1000) as x; + +select log2(0) = -inf; +select log2(1) = 0; +select log2(2) = 1; +select log2(4) = 2; +select sum(abs(log2(exp2(x)) - x) < 1.0e-9) / count() from system.one array join range(1000) as x; + +select log1p(-1) = -inf; +select log1p(0) = 0; +select abs(log1p(exp(2) - 1) - 2) < 1e8; +select abs(log1p(exp(3) - 1) - 3) < 1e8; +select sum(abs(log1p(exp(x) - 1) - x) < 1e-8) / count() from system.one array join range(100) as x; + +select sin(0) = 0; +select sin(pi() / 4) = 1 / sqrt(2); +select sin(pi() / 2) = 1; +select sin(3 * pi() / 2) = -1; +select sum(sin(pi() / 2 + 2 * pi() * x) = 1) / count() from system.one array join range(1000000) as x; + +select cos(0) = 1; +select abs(cos(pi() / 4) - 1 / sqrt(2)) < 1.0e-9; +select cos(pi() / 2) < 1.0e-9; +select sum(abs(cos(2 * pi() * x)) - 1 < 1.0e-9) / count() from system.one array join range(1000000) as x; + +select tan(0) = 0; +select abs(tan(pi() / 4) - 1) < 1.0e-9; +select sum(abs(tan(pi() / 4 + 2 * pi() * x) - 1) < 1.0e-8) / count() from system.one array join range(1000000) as x; + +select asin(0) = 0; +select asin(1) = pi() / 2; +select asin(-1) = -pi() / 2; + +select acos(0) = pi() / 2; +select acos(1) = 0; +select acos(-1) = pi(); + +select atan(0) = 0; +select atan(1) = pi() / 4; + +select atan2(0, 1) = 0; +select atan2(0, 2) = 0; +select atan2(1, 0) = pi() / 2; +select atan2(1, 1) = pi() / 4; +select atan2(-1, -1) = -3 * pi() / 4; + +select hypot(0, 1) = 1; +select hypot(1, 0) = 1; +select hypot(1, 1) = sqrt(2); +select hypot(-1, 1) = sqrt(2); +select hypot(3, 4) = 5; + +select sinh(0) = 0; +select sinh(1) = -sinh(-1); +select abs(sinh(1) - 0.5 * (e() - exp(-1))) < 1e-6; +select abs(sinh(2) - 0.5 * (exp(2) - exp(-2))) < 1e-6; +select sum(abs(sinh(x) - 0.5 * (exp(x) - exp(-x))) < 1e-6) / count() from system.one array join range(10) as x; + +select cosh(0) = 1; +select cosh(1) = cosh(-1); +select abs(cosh(1) - 0.5 * (e() + exp(-1))) < 1e-6; +select abs(pow(cosh(1), 2) - pow(sinh(1), 2) - 1) < 1e-6; +select sum(abs(cosh(x) * cosh(x) - sinh(x) * sinh(x) - 1) < 1e-6) / count() from system.one array join range(10) as x; + +select asinh(0) = 0; +select asinh(1) = -asinh(-1); +select abs(asinh(1) - ln(1 + sqrt(2))) < 1e-9; +select abs(asinh(sinh(1)) - 1) < 1e-9; +select sum(abs(asinh(sinh(x)) - x) < 1e-9) / count() from system.one array join range(100) as x; + +select acosh(1) = 0; +select abs(acosh(2) - ln(2 + sqrt(3))) < 1e-9; +select abs(acosh(cosh(2)) - 2) < 1e-9; +select abs(acosh(cosh(3)) - 3) < 1e-9; +select sum(abs(acosh(cosh(x)) - x) < 1e-9) / count() from system.one array join range(1, 101) as x; + +select atanh(0) = 0; +select atanh(0.5) = -atanh(-0.5); +select abs(atanh(0.9) - 0.5 * ln(19)) < 1e-5; +select abs(atanh(tanh(1)) - 1) < 1e-5; +select sum(abs(atanh(tanh(x)) - x) < 1e-5) / count() from system.one array join range(10) as x; + +select erf(0) = 0; +select erf(-10) = -1; +select erf(10) = 1; + +select erfc(0) = 1; +select erfc(-10) = 2; +select erfc(28) = 0; diff --git a/parser/testdata/00087_where_0/ast.json b/parser/testdata/00087_where_0/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00087_where_0/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00087_where_0/metadata.json b/parser/testdata/00087_where_0/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00087_where_0/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00087_where_0/query.sql b/parser/testdata/00087_where_0/query.sql new file mode 100644 index 000000000..8717c819e --- /dev/null +++ b/parser/testdata/00087_where_0/query.sql @@ -0,0 +1,6 @@ +-- Tags: stateful +SET max_rows_to_read = 1000; +SELECT CounterID, uniq(UserID) FROM test.hits WHERE 0 != 0 GROUP BY CounterID; +SELECT CounterID, uniq(UserID) FROM test.hits WHERE 0 != 0 GROUP BY CounterID SETTINGS optimize_aggregation_in_order = 1; +SELECT CounterID, uniq(UserID) FROM test.hits WHERE 0 AND CounterID = 1704509 GROUP BY CounterID; +SELECT CounterID, uniq(UserID) FROM test.hits WHERE 0 AND CounterID = 1704509 GROUP BY CounterID SETTINGS optimize_aggregation_in_order = 1; diff --git a/parser/testdata/00088_distinct_of_arrays_of_strings/ast.json b/parser/testdata/00088_distinct_of_arrays_of_strings/ast.json new file mode 100644 index 000000000..c55b90323 --- /dev/null +++ b/parser/testdata/00088_distinct_of_arrays_of_strings/ast.json @@ -0,0 +1,73 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayFilter (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function lambda (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function notEmpty (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function arrayJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[Array_[''], Array_['is_registred'], Array_['registration_month', 'user_login', 'is_registred'], Array_['is_registred'], Array_['is_registred'], Array_['']]" + } + ], + + "rows": 17, + + "statistics": + { + "elapsed": 0.001710018, + "rows_read": 17, + "bytes_read": 828 + } +} diff --git a/parser/testdata/00088_distinct_of_arrays_of_strings/metadata.json b/parser/testdata/00088_distinct_of_arrays_of_strings/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00088_distinct_of_arrays_of_strings/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00088_distinct_of_arrays_of_strings/query.sql b/parser/testdata/00088_distinct_of_arrays_of_strings/query.sql new file mode 100644 index 000000000..296c7de29 --- /dev/null +++ b/parser/testdata/00088_distinct_of_arrays_of_strings/query.sql @@ -0,0 +1 @@ +SELECT DISTINCT arrayFilter(x -> notEmpty(x), arrayJoin([[''], ['is_registred'], ['registration_month','user_login','is_registred'], ['is_registred'], ['is_registred'], ['']])); diff --git a/parser/testdata/00088_global_in_one_shard_and_rows_before_limit/ast.json b/parser/testdata/00088_global_in_one_shard_and_rows_before_limit/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00088_global_in_one_shard_and_rows_before_limit/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00088_global_in_one_shard_and_rows_before_limit/metadata.json b/parser/testdata/00088_global_in_one_shard_and_rows_before_limit/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00088_global_in_one_shard_and_rows_before_limit/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00088_global_in_one_shard_and_rows_before_limit/query.sql b/parser/testdata/00088_global_in_one_shard_and_rows_before_limit/query.sql new file mode 100644 index 000000000..5d221393e --- /dev/null +++ b/parser/testdata/00088_global_in_one_shard_and_rows_before_limit/query.sql @@ -0,0 +1,4 @@ +-- Tags: stateful, shard + +SET output_format_write_statistics = 0, max_rows_to_read = 50_000_000; +SELECT EventDate, count() FROM remote('127.0.0.1', test.hits) WHERE UserID GLOBAL IN (SELECT UserID FROM test.hits) GROUP BY EventDate ORDER BY EventDate LIMIT 5 FORMAT JSONCompact; diff --git a/parser/testdata/00089_group_by_arrays_of_fixed/ast.json b/parser/testdata/00089_group_by_arrays_of_fixed/ast.json new file mode 100644 index 000000000..de2d4f5f6 --- /dev/null +++ b/parser/testdata/00089_group_by_arrays_of_fixed/ast.json @@ -0,0 +1,220 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier arr" + }, + { + "explain": " Function count (alias c) (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayMap (alias arr) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function lambda (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function arraySort (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function groupArray (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_10000" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function plus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function multiply (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_12379813738877118345" + }, + { + "explain": " Literal UInt64_1234" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier arr" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier c" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier arr" + } + ], + + "rows": 66, + + "statistics": + { + "elapsed": 0.001915992, + "rows_read": 66, + "bytes_read": 3001 + } +} diff --git a/parser/testdata/00089_group_by_arrays_of_fixed/metadata.json b/parser/testdata/00089_group_by_arrays_of_fixed/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00089_group_by_arrays_of_fixed/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00089_group_by_arrays_of_fixed/query.sql b/parser/testdata/00089_group_by_arrays_of_fixed/query.sql new file mode 100644 index 000000000..a068671b9 --- /dev/null +++ b/parser/testdata/00089_group_by_arrays_of_fixed/query.sql @@ -0,0 +1 @@ +SELECT arr, count() AS c FROM (SELECT arrayMap(x -> x % 2, arraySort(groupArray(number))) AS arr FROM (SELECT number FROM system.numbers LIMIT 10000) GROUP BY number % ((number * 0xABCDEF0123456789 % 1234) + 1)) GROUP BY arr ORDER BY c DESC, arr ASC; \ No newline at end of file diff --git a/parser/testdata/00089_position_functions_with_non_constant_arg/ast.json b/parser/testdata/00089_position_functions_with_non_constant_arg/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00089_position_functions_with_non_constant_arg/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00089_position_functions_with_non_constant_arg/metadata.json b/parser/testdata/00089_position_functions_with_non_constant_arg/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00089_position_functions_with_non_constant_arg/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00089_position_functions_with_non_constant_arg/query.sql b/parser/testdata/00089_position_functions_with_non_constant_arg/query.sql new file mode 100644 index 000000000..33523a1cf --- /dev/null +++ b/parser/testdata/00089_position_functions_with_non_constant_arg/query.sql @@ -0,0 +1,11 @@ +-- Tags: stateful +SET max_threads = 0; -- let's reset to automatic detection of the number of threads, otherwise test can be slow. + +SELECT count() FROM test.hits WHERE position(URL, 'metrika') != position(URL, materialize('metrika')); +SELECT count() FROM test.hits WHERE positionCaseInsensitive(URL, 'metrika') != positionCaseInsensitive(URL, materialize('metrika')); +SELECT count() FROM test.hits WHERE positionUTF8(Title, 'новости') != positionUTF8(Title, materialize('новости')); +SELECT count() FROM test.hits WHERE positionCaseInsensitiveUTF8(Title, 'новости') != positionCaseInsensitiveUTF8(Title, materialize('новости')); + +SELECT position(URL, domain(URL)) AS x FROM test.hits WHERE x = 0 AND URL NOT LIKE '%yandex.ru%' LIMIT 100; +SELECT URL FROM test.hits WHERE x > 10 ORDER BY position(URL, domain(URL)) AS x DESC, URL LIMIT 2; +SELECT DISTINCT URL, URLDomain, position('http://yandex.ru/', domain(URL)) AS x FROM test.hits WHERE x > 8 ORDER BY position('http://yandex.ru/', domain(URL)) DESC, URL LIMIT 3; diff --git a/parser/testdata/00091_prewhere_two_conditions/ast.json b/parser/testdata/00091_prewhere_two_conditions/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00091_prewhere_two_conditions/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00091_prewhere_two_conditions/metadata.json b/parser/testdata/00091_prewhere_two_conditions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00091_prewhere_two_conditions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00091_prewhere_two_conditions/query.sql b/parser/testdata/00091_prewhere_two_conditions/query.sql new file mode 100644 index 000000000..f0c2eb0fe --- /dev/null +++ b/parser/testdata/00091_prewhere_two_conditions/query.sql @@ -0,0 +1,19 @@ +-- Tags: stateful, no-parallel-replicas +-- Requires investigation (max_bytes_to_read is not respected) + +SET max_bytes_to_read = 600000000; + +SET optimize_move_to_prewhere = 1; +SET enable_multiple_prewhere_read_steps = 1; + +SELECT uniq(URL) FROM test.hits WHERE toTimeZone(EventTime, 'Asia/Dubai') >= '2014-03-20 00:00:00' AND toTimeZone(EventTime, 'Asia/Dubai') < '2014-03-21 00:00:00'; +SELECT uniq(URL) FROM test.hits WHERE toTimeZone(EventTime, 'Asia/Dubai') >= '2014-03-20 00:00:00' AND toTimeZone(EventTime, 'Asia/Dubai') < '2014-03-21 00:00:00' AND URL != ''; +SELECT uniq(*) FROM test.hits WHERE toTimeZone(EventTime, 'Asia/Dubai') >= '2014-03-20 00:00:00' AND toTimeZone(EventTime, 'Asia/Dubai') < '2014-03-21 00:00:00' AND EventDate = '2014-03-21'; +WITH toTimeZone(EventTime, 'Asia/Dubai') AS xyz SELECT uniq(*) FROM test.hits WHERE xyz >= '2014-03-20 00:00:00' AND xyz < '2014-03-21 00:00:00' AND EventDate = '2014-03-21'; + +SET optimize_move_to_prewhere = 0; +SET enable_multiple_prewhere_read_steps = 0; + +SELECT uniq(URL) FROM test.hits WHERE toTimeZone(EventTime, 'Asia/Dubai') >= '2014-03-20 00:00:00' AND toTimeZone(EventTime, 'Asia/Dubai') < '2014-03-21 00:00:00'; -- { serverError TOO_MANY_BYTES } +SELECT uniq(URL) FROM test.hits WHERE toTimeZone(EventTime, 'Asia/Dubai') >= '2014-03-20 00:00:00' AND URL != '' AND toTimeZone(EventTime, 'Asia/Dubai') < '2014-03-21 00:00:00'; -- { serverError TOO_MANY_BYTES } +SELECT uniq(URL) FROM test.hits PREWHERE toTimeZone(EventTime, 'Asia/Dubai') >= '2014-03-20 00:00:00' AND URL != '' AND toTimeZone(EventTime, 'Asia/Dubai') < '2014-03-21 00:00:00'; -- { serverError TOO_MANY_BYTES } diff --git a/parser/testdata/00093_prewhere_array_join/ast.json b/parser/testdata/00093_prewhere_array_join/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00093_prewhere_array_join/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00093_prewhere_array_join/metadata.json b/parser/testdata/00093_prewhere_array_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00093_prewhere_array_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00093_prewhere_array_join/query.sql b/parser/testdata/00093_prewhere_array_join/query.sql new file mode 100644 index 000000000..466219c1a --- /dev/null +++ b/parser/testdata/00093_prewhere_array_join/query.sql @@ -0,0 +1,10 @@ +-- Tags: stateful +SELECT arrayJoin([SearchEngineID]) AS search_engine, URL FROM test.hits WHERE SearchEngineID != 0 AND search_engine != 0 FORMAT Null; + +SELECT + arrayJoin([0]) AS browser, + arrayJoin([SearchEngineID]) AS search_engine, + URL +FROM test.hits +WHERE 1 AND (SearchEngineID != 0) AND (browser != 0) AND (search_engine != 0) +FORMAT Null; diff --git a/parser/testdata/00094_order_by_array_join_limit/ast.json b/parser/testdata/00094_order_by_array_join_limit/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00094_order_by_array_join_limit/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00094_order_by_array_join_limit/metadata.json b/parser/testdata/00094_order_by_array_join_limit/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00094_order_by_array_join_limit/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00094_order_by_array_join_limit/query.sql b/parser/testdata/00094_order_by_array_join_limit/query.sql new file mode 100644 index 000000000..9acf5e876 --- /dev/null +++ b/parser/testdata/00094_order_by_array_join_limit/query.sql @@ -0,0 +1,10 @@ +-- Tags: stateful +SELECT `ParsedParams.Key2` AS x +FROM test.hits +ARRAY JOIN ParsedParams AS PP +ORDER BY x ASC +LIMIT 2; + +SELECT arrayJoin(`ParsedParams.Key2`) AS x FROM test.hits ORDER BY x ASC LIMIT 2; +WITH arrayJoin(`ParsedParams.Key2`) AS pp SELECT ParsedParams.Key2 AS x FROM test.hits ORDER BY x ASC LIMIT 2; +WITH arrayJoin(`ParsedParams.Key2`) AS pp SELECT ParsedParams.Key2 AS x FROM test.hits WHERE NOT ignore(pp) ORDER BY x ASC LIMIT 2; diff --git a/parser/testdata/00095_hyperscan_profiler/ast.json b/parser/testdata/00095_hyperscan_profiler/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00095_hyperscan_profiler/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00095_hyperscan_profiler/metadata.json b/parser/testdata/00095_hyperscan_profiler/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00095_hyperscan_profiler/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00095_hyperscan_profiler/query.sql b/parser/testdata/00095_hyperscan_profiler/query.sql new file mode 100644 index 000000000..acacb462a --- /dev/null +++ b/parser/testdata/00095_hyperscan_profiler/query.sql @@ -0,0 +1,8 @@ +-- Tags: stateful, no-debug, use-vectorscan + +-- Check that server does not get segfault due to bad stack unwinding from Hyperscan + +SET query_profiler_cpu_time_period_ns = 1000000; +SET query_profiler_real_time_period_ns = 1000000; + +SELECT count() FROM test.hits WHERE multiFuzzyMatchAny(URL, 2, ['about/address', 'for_woman', '^https?://lm-company.ruy/$', 'ultimateguitar.com']); diff --git a/parser/testdata/00096_aggregation_min_if/ast.json b/parser/testdata/00096_aggregation_min_if/ast.json new file mode 100644 index 000000000..7037d0595 --- /dev/null +++ b/parser/testdata/00096_aggregation_min_if/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery min_if (children 1)" + }, + { + "explain": " Identifier min_if" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001414911, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/00096_aggregation_min_if/metadata.json b/parser/testdata/00096_aggregation_min_if/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00096_aggregation_min_if/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00096_aggregation_min_if/query.sql b/parser/testdata/00096_aggregation_min_if/query.sql new file mode 100644 index 000000000..52e6cd94d --- /dev/null +++ b/parser/testdata/00096_aggregation_min_if/query.sql @@ -0,0 +1,250 @@ +DROP TABLE IF EXISTS min_if; +CREATE TABLE min_if (arr Array(UInt8), str String, int Int32) ENGINE = Memory; +INSERT INTO min_if SELECT emptyArrayUInt8() AS arr, '' AS str, toInt32(0) AS int FROM system.numbers LIMIT 100000; +INSERT INTO min_if SELECT [1] AS arr, '2' AS str, toInt32(3) AS int; +INSERT INTO min_if SELECT emptyArrayUInt8() AS arr, '' AS str, toInt32(0) AS int FROM system.numbers LIMIT 100000; + +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; +SELECT minIf(arr, notEmpty(arr)) FROM min_if; + +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; +SELECT minIf(str, notEmpty(str)) FROM min_if; + +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; +SELECT minIf(int, int != 0) FROM min_if; + +DROP TABLE min_if; diff --git a/parser/testdata/00097_constexpr_in_index/ast.json b/parser/testdata/00097_constexpr_in_index/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00097_constexpr_in_index/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00097_constexpr_in_index/metadata.json b/parser/testdata/00097_constexpr_in_index/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00097_constexpr_in_index/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00097_constexpr_in_index/query.sql b/parser/testdata/00097_constexpr_in_index/query.sql new file mode 100644 index 000000000..d2a28f66c --- /dev/null +++ b/parser/testdata/00097_constexpr_in_index/query.sql @@ -0,0 +1,7 @@ +-- Tags: stateful +-- Even in presence of OR, we evaluate the "0 IN (1, 2, 3)" as a constant expression therefore it does not prevent the index analysis. + +-- Prevent remote replicas from skipping index analysis in Parallel Replicas. Otherwise, they may return full ranges and trigger max_rows_to_read validation failures. +SET parallel_replicas_index_analysis_only_on_coordinator = 0; + +SELECT count() FROM test.hits WHERE CounterID IN (14917930, 33034174) OR 0 IN (1, 2, 3) SETTINGS max_rows_to_read = 1000000, force_primary_key = 1; diff --git a/parser/testdata/00098_1_union_all/ast.json b/parser/testdata/00098_1_union_all/ast.json new file mode 100644 index 000000000..98529a6d5 --- /dev/null +++ b/parser/testdata/00098_1_union_all/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery data2013 (children 1)" + }, + { + "explain": " Identifier data2013" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001737411, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/00098_1_union_all/metadata.json b/parser/testdata/00098_1_union_all/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00098_1_union_all/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00098_1_union_all/query.sql b/parser/testdata/00098_1_union_all/query.sql new file mode 100644 index 000000000..fe9a8939c --- /dev/null +++ b/parser/testdata/00098_1_union_all/query.sql @@ -0,0 +1,34 @@ +DROP TABLE IF EXISTS data2013; +DROP TABLE IF EXISTS data2014; +DROP TABLE IF EXISTS data2015; + +CREATE TABLE data2013 (name String, value UInt32) ENGINE = Memory; +CREATE TABLE data2014 (name String, value UInt32) ENGINE = Memory; +CREATE TABLE data2015 (data_name String, data_value UInt32) ENGINE = Memory; + +INSERT INTO data2013(name,value) VALUES('Alice', 1000); +INSERT INTO data2013(name,value) VALUES('Bob', 2000); +INSERT INTO data2013(name,value) VALUES('Carol', 5000); + +INSERT INTO data2014(name,value) VALUES('Alice', 2000); +INSERT INTO data2014(name,value) VALUES('Bob', 2000); +INSERT INTO data2014(name,value) VALUES('Dennis', 35000); + +INSERT INTO data2015(data_name, data_value) VALUES('Foo', 42); +INSERT INTO data2015(data_name, data_value) VALUES('Bar', 1); + +SELECT val FROM +(SELECT value AS val FROM data2013 WHERE name = 'Alice' +UNION ALL +SELECT value AS val FROM data2014 WHERE name = 'Alice') +ORDER BY val ASC; + +SELECT val, name FROM +(SELECT value AS val, value AS val_1, name FROM data2013 WHERE name = 'Alice' +UNION ALL +SELECT value AS val, value, name FROM data2014 WHERE name = 'Alice') +ORDER BY val ASC; + +DROP TABLE data2013; +DROP TABLE data2014; +DROP TABLE data2015; diff --git a/parser/testdata/00098_2_union_all/ast.json b/parser/testdata/00098_2_union_all/ast.json new file mode 100644 index 000000000..43d9c4042 --- /dev/null +++ b/parser/testdata/00098_2_union_all/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery data2013 (children 1)" + }, + { + "explain": " Identifier data2013" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00173964, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/00098_2_union_all/metadata.json b/parser/testdata/00098_2_union_all/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00098_2_union_all/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00098_2_union_all/query.sql b/parser/testdata/00098_2_union_all/query.sql new file mode 100644 index 000000000..d77eca70d --- /dev/null +++ b/parser/testdata/00098_2_union_all/query.sql @@ -0,0 +1,22 @@ +DROP TABLE IF EXISTS data2013; +DROP TABLE IF EXISTS data2014; + +CREATE TABLE data2013 (name String, value UInt32) ENGINE = Memory; +CREATE TABLE data2014 (name String, value UInt32) ENGINE = Memory; + +INSERT INTO data2013(name,value) VALUES('Alice', 1000); +INSERT INTO data2013(name,value) VALUES('Bob', 2000); +INSERT INTO data2013(name,value) VALUES('Carol', 5000); + +INSERT INTO data2014(name,value) VALUES('Alice', 2000); +INSERT INTO data2014(name,value) VALUES('Bob', 2000); +INSERT INTO data2014(name,value) VALUES('Dennis', 35000); + +SELECT val FROM +(SELECT value AS val FROM data2013 WHERE name = 'Alice' +UNION ALL +SELECT value AS val FROM data2014 WHERE name = 'Alice') +ORDER BY val ASC; + +DROP TABLE data2013; +DROP TABLE data2014; diff --git a/parser/testdata/00098_3_union_all/ast.json b/parser/testdata/00098_3_union_all/ast.json new file mode 100644 index 000000000..d23b00f60 --- /dev/null +++ b/parser/testdata/00098_3_union_all/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery data2013 (children 1)" + }, + { + "explain": " Identifier data2013" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00165358, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/00098_3_union_all/metadata.json b/parser/testdata/00098_3_union_all/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00098_3_union_all/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00098_3_union_all/query.sql b/parser/testdata/00098_3_union_all/query.sql new file mode 100644 index 000000000..555d1cea3 --- /dev/null +++ b/parser/testdata/00098_3_union_all/query.sql @@ -0,0 +1,22 @@ +DROP TABLE IF EXISTS data2013; +DROP TABLE IF EXISTS data2014; + +CREATE TABLE data2013 (name String, value UInt32) ENGINE = Memory; +CREATE TABLE data2014 (name String, value UInt32) ENGINE = Memory; + +INSERT INTO data2013(name,value) VALUES('Alice', 1000); +INSERT INTO data2013(name,value) VALUES('Bob', 2000); +INSERT INTO data2013(name,value) VALUES('Carol', 5000); + +INSERT INTO data2014(name,value) VALUES('Alice', 2000); +INSERT INTO data2014(name,value) VALUES('Bob', 2000); +INSERT INTO data2014(name,value) VALUES('Dennis', 35000); + +SELECT val FROM +(SELECT value AS val FROM data2013 WHERE name = 'Alice' +UNION /*comment*/ ALL +SELECT value AS val FROM data2014 WHERE name = 'Alice') +ORDER BY val ASC; + +DROP TABLE data2013; +DROP TABLE data2014; diff --git a/parser/testdata/00098_4_union_all/ast.json b/parser/testdata/00098_4_union_all/ast.json new file mode 100644 index 000000000..99a69ee5f --- /dev/null +++ b/parser/testdata/00098_4_union_all/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery data2013 (children 1)" + }, + { + "explain": " Identifier data2013" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001297648, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/00098_4_union_all/metadata.json b/parser/testdata/00098_4_union_all/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00098_4_union_all/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00098_4_union_all/query.sql b/parser/testdata/00098_4_union_all/query.sql new file mode 100644 index 000000000..09086ce7a --- /dev/null +++ b/parser/testdata/00098_4_union_all/query.sql @@ -0,0 +1,24 @@ +DROP TABLE IF EXISTS data2013; +DROP TABLE IF EXISTS data2014; + +CREATE TABLE data2013 (name String, value UInt32) ENGINE = Memory; +CREATE TABLE data2014 (name String, value UInt32) ENGINE = Memory; + +INSERT INTO data2013(name,value) VALUES('Alice', 1000); +INSERT INTO data2013(name,value) VALUES('Bob', 2000); +INSERT INTO data2013(name,value) VALUES('Carol', 5000); + +INSERT INTO data2014(name,value) VALUES('Alice', 2000); +INSERT INTO data2014(name,value) VALUES('Bob', 2000); +INSERT INTO data2014(name,value) VALUES('Dennis', 35000); + +SELECT val FROM +(SELECT value AS val FROM data2013 WHERE name = 'Alice' +UNION ALL +SELECT value AS val FROM data2014 WHERE name = 'Alice' +UNION ALL +SELECT value AS val FROM data2014 WHERE name = 'Dennis') +ORDER BY val ASC; + +DROP TABLE data2013; +DROP TABLE data2014; diff --git a/parser/testdata/00098_5_union_all/ast.json b/parser/testdata/00098_5_union_all/ast.json new file mode 100644 index 000000000..72eb6461b --- /dev/null +++ b/parser/testdata/00098_5_union_all/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery data2013 (children 1)" + }, + { + "explain": " Identifier data2013" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001675555, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/00098_5_union_all/metadata.json b/parser/testdata/00098_5_union_all/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00098_5_union_all/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00098_5_union_all/query.sql b/parser/testdata/00098_5_union_all/query.sql new file mode 100644 index 000000000..c4d1a8dc3 --- /dev/null +++ b/parser/testdata/00098_5_union_all/query.sql @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS data2013; +DROP TABLE IF EXISTS data2014; + +CREATE TABLE data2013 (name String, value UInt32) ENGINE = Memory; +CREATE TABLE data2014 (name String, value UInt32) ENGINE = Memory; + +INSERT INTO data2013(name,value) VALUES('Alice', 1000); +INSERT INTO data2013(name,value) VALUES('Bob', 2000); +INSERT INTO data2013(name,value) VALUES('Carol', 5000); + +INSERT INTO data2014(name,value) VALUES('Alice', 2000); +INSERT INTO data2014(name,value) VALUES('Bob', 2000); +INSERT INTO data2014(name,value) VALUES('Dennis', 35000); + +SELECT nn,vv FROM (SELECT name AS nn, value AS vv FROM data2013 UNION ALL SELECT name AS nn, value AS vv FROM data2014) ORDER BY nn,vv ASC; + +DROP TABLE data2013; +DROP TABLE data2014; diff --git a/parser/testdata/00098_6_union_all/ast.json b/parser/testdata/00098_6_union_all/ast.json new file mode 100644 index 000000000..e11128a17 --- /dev/null +++ b/parser/testdata/00098_6_union_all/ast.json @@ -0,0 +1,91 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier X" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_3 (alias X)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2 (alias X)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1 (alias X)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier X" + } + ], + + "rows": 23, + + "statistics": + { + "elapsed": 0.001663114, + "rows_read": 23, + "bytes_read": 929 + } +} diff --git a/parser/testdata/00098_6_union_all/metadata.json b/parser/testdata/00098_6_union_all/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00098_6_union_all/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00098_6_union_all/query.sql b/parser/testdata/00098_6_union_all/query.sql new file mode 100644 index 000000000..57e9b3aac --- /dev/null +++ b/parser/testdata/00098_6_union_all/query.sql @@ -0,0 +1,2 @@ +SELECT X FROM (SELECT 3 AS X UNION ALL SELECT 2 AS X UNION ALL SELECT 1 AS X) ORDER BY X ASC; + diff --git a/parser/testdata/00098_7_union_all/ast.json b/parser/testdata/00098_7_union_all/ast.json new file mode 100644 index 000000000..cc8c0a3dd --- /dev/null +++ b/parser/testdata/00098_7_union_all/ast.json @@ -0,0 +1,88 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier DomainID" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1 (alias DomainID)" + }, + { + "explain": " Literal 'abc' (alias Domain)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_2 (alias DomainID)" + }, + { + "explain": " Literal 'def' (alias Domain)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier DomainID" + } + ], + + "rows": 22, + + "statistics": + { + "elapsed": 0.00138631, + "rows_read": 22, + "bytes_read": 920 + } +} diff --git a/parser/testdata/00098_7_union_all/metadata.json b/parser/testdata/00098_7_union_all/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00098_7_union_all/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00098_7_union_all/query.sql b/parser/testdata/00098_7_union_all/query.sql new file mode 100644 index 000000000..2798b1a28 --- /dev/null +++ b/parser/testdata/00098_7_union_all/query.sql @@ -0,0 +1 @@ +SELECT DomainID FROM (SELECT 1 AS DomainID, 'abc' AS Domain UNION ALL SELECT 2 AS DomainID, 'def' AS Domain) ORDER BY DomainID ASC diff --git a/parser/testdata/00098_8_union_all/ast.json b/parser/testdata/00098_8_union_all/ast.json new file mode 100644 index 000000000..6895ad03a --- /dev/null +++ b/parser/testdata/00098_8_union_all/ast.json @@ -0,0 +1,88 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier DomainID" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1 (alias DomainID)" + }, + { + "explain": " Literal 'abc' (alias Domain)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_2 (alias DomainID)" + }, + { + "explain": " Literal 'def' (alias Domain)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier DomainID" + } + ], + + "rows": 22, + + "statistics": + { + "elapsed": 0.001707809, + "rows_read": 22, + "bytes_read": 920 + } +} diff --git a/parser/testdata/00098_8_union_all/metadata.json b/parser/testdata/00098_8_union_all/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00098_8_union_all/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00098_8_union_all/query.sql b/parser/testdata/00098_8_union_all/query.sql new file mode 100644 index 000000000..86e88516f --- /dev/null +++ b/parser/testdata/00098_8_union_all/query.sql @@ -0,0 +1 @@ +SELECT DomainID FROM (SELECT DISTINCT 1 AS DomainID, 'abc' AS Domain UNION ALL SELECT 2 AS DomainID, 'def' AS Domain) ORDER BY DomainID ASC diff --git a/parser/testdata/00098_9_union_all/ast.json b/parser/testdata/00098_9_union_all/ast.json new file mode 100644 index 000000000..30f2f7047 --- /dev/null +++ b/parser/testdata/00098_9_union_all/ast.json @@ -0,0 +1,82 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 20, + + "statistics": + { + "elapsed": 0.001275369, + "rows_read": 20, + "bytes_read": 778 + } +} diff --git a/parser/testdata/00098_9_union_all/metadata.json b/parser/testdata/00098_9_union_all/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00098_9_union_all/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00098_9_union_all/query.sql b/parser/testdata/00098_9_union_all/query.sql new file mode 100644 index 000000000..781556ffa --- /dev/null +++ b/parser/testdata/00098_9_union_all/query.sql @@ -0,0 +1 @@ +SELECT * FROM (SELECT 1 UNION ALL SELECT 2) ORDER BY 1 ASC; diff --git a/parser/testdata/00098_a_union_all/ast.json b/parser/testdata/00098_a_union_all/ast.json new file mode 100644 index 000000000..04217343b --- /dev/null +++ b/parser/testdata/00098_a_union_all/ast.json @@ -0,0 +1,82 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1 (alias X)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier X" + } + ], + + "rows": 20, + + "statistics": + { + "elapsed": 0.001347603, + "rows_read": 20, + "bytes_read": 784 + } +} diff --git a/parser/testdata/00098_a_union_all/metadata.json b/parser/testdata/00098_a_union_all/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00098_a_union_all/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00098_a_union_all/query.sql b/parser/testdata/00098_a_union_all/query.sql new file mode 100644 index 000000000..23affc04c --- /dev/null +++ b/parser/testdata/00098_a_union_all/query.sql @@ -0,0 +1 @@ +SELECT * FROM (SELECT 1 AS X UNION ALL SELECT 2) ORDER BY X ASC; diff --git a/parser/testdata/00098_b_union_all/ast.json b/parser/testdata/00098_b_union_all/ast.json new file mode 100644 index 000000000..34b20cd12 --- /dev/null +++ b/parser/testdata/00098_b_union_all/ast.json @@ -0,0 +1,91 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1 (alias X)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_3 (alias X)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier X" + } + ], + + "rows": 23, + + "statistics": + { + "elapsed": 0.001355924, + "rows_read": 23, + "bytes_read": 915 + } +} diff --git a/parser/testdata/00098_b_union_all/metadata.json b/parser/testdata/00098_b_union_all/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00098_b_union_all/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00098_b_union_all/query.sql b/parser/testdata/00098_b_union_all/query.sql new file mode 100644 index 000000000..837f0b50d --- /dev/null +++ b/parser/testdata/00098_b_union_all/query.sql @@ -0,0 +1 @@ +SELECT * FROM (SELECT 1 AS X UNION ALL SELECT 2 UNION ALL SELECT 3 AS X) ORDER BY X ASC; diff --git a/parser/testdata/00098_c_union_all/ast.json b/parser/testdata/00098_c_union_all/ast.json new file mode 100644 index 000000000..0369aaa26 --- /dev/null +++ b/parser/testdata/00098_c_union_all/ast.json @@ -0,0 +1,106 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function plus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier X" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_12345678901 (alias X)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier X" + } + ], + + "rows": 28, + + "statistics": + { + "elapsed": 0.001903549, + "rows_read": 28, + "bytes_read": 1151 + } +} diff --git a/parser/testdata/00098_c_union_all/metadata.json b/parser/testdata/00098_c_union_all/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00098_c_union_all/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00098_c_union_all/query.sql b/parser/testdata/00098_c_union_all/query.sql new file mode 100644 index 000000000..1211be558 --- /dev/null +++ b/parser/testdata/00098_c_union_all/query.sql @@ -0,0 +1 @@ +SELECT X + 1 FROM (SELECT 12345678901 AS X UNION ALL SELECT number FROM system.numbers LIMIT 10) ORDER BY X ASC; diff --git a/parser/testdata/00098_d_union_all/ast.json b/parser/testdata/00098_d_union_all/ast.json new file mode 100644 index 000000000..33ac780a7 --- /dev/null +++ b/parser/testdata/00098_d_union_all/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery data2013 (children 1)" + }, + { + "explain": " Identifier data2013" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001605407, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/00098_d_union_all/metadata.json b/parser/testdata/00098_d_union_all/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00098_d_union_all/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00098_d_union_all/query.sql b/parser/testdata/00098_d_union_all/query.sql new file mode 100644 index 000000000..1f6741888 --- /dev/null +++ b/parser/testdata/00098_d_union_all/query.sql @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS data2013; +DROP TABLE IF EXISTS data2015; + +CREATE TABLE data2013 (name String, value UInt32) ENGINE = Memory; +CREATE TABLE data2015 (data_name String, data_value UInt32) ENGINE = Memory; + +INSERT INTO data2013(name,value) VALUES('Alice', 1000); +INSERT INTO data2013(name,value) VALUES('Bob', 2000); +INSERT INTO data2013(name,value) VALUES('Carol', 5000); + +INSERT INTO data2015(data_name, data_value) VALUES('Foo', 42); +INSERT INTO data2015(data_name, data_value) VALUES('Bar', 1); + +SELECT name FROM (SELECT name FROM data2013 UNION ALL SELECT data_name FROM data2015) ORDER BY name ASC; + +DROP TABLE data2013; +DROP TABLE data2015; diff --git a/parser/testdata/00098_e_union_all/ast.json b/parser/testdata/00098_e_union_all/ast.json new file mode 100644 index 000000000..84329ccd9 --- /dev/null +++ b/parser/testdata/00098_e_union_all/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery data2013 (children 1)" + }, + { + "explain": " Identifier data2013" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001314075, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/00098_e_union_all/metadata.json b/parser/testdata/00098_e_union_all/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00098_e_union_all/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00098_e_union_all/query.sql b/parser/testdata/00098_e_union_all/query.sql new file mode 100644 index 000000000..c0b74122e --- /dev/null +++ b/parser/testdata/00098_e_union_all/query.sql @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS data2013; +DROP TABLE IF EXISTS data2015; + +CREATE TABLE data2013 (name String, value UInt32) ENGINE = Memory; +CREATE TABLE data2015 (data_name String, data_value UInt32) ENGINE = Memory; + +INSERT INTO data2013(name,value) VALUES('Alice', 1000); +INSERT INTO data2013(name,value) VALUES('Bob', 2000); +INSERT INTO data2013(name,value) VALUES('Carol', 5000); + +INSERT INTO data2015(data_name, data_value) VALUES('Foo', 42); +INSERT INTO data2015(data_name, data_value) VALUES('Bar', 1); + +SELECT X FROM (SELECT name AS X FROM data2013 UNION ALL SELECT data_name FROM data2015) ORDER BY X ASC; + +DROP TABLE data2013; +DROP TABLE data2015; diff --git a/parser/testdata/00098_f_union_all/ast.json b/parser/testdata/00098_f_union_all/ast.json new file mode 100644 index 000000000..8df414a1a --- /dev/null +++ b/parser/testdata/00098_f_union_all/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery data2013 (children 1)" + }, + { + "explain": " Identifier data2013" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001331943, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/00098_f_union_all/metadata.json b/parser/testdata/00098_f_union_all/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00098_f_union_all/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00098_f_union_all/query.sql b/parser/testdata/00098_f_union_all/query.sql new file mode 100644 index 000000000..849079742 --- /dev/null +++ b/parser/testdata/00098_f_union_all/query.sql @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS data2013; +DROP TABLE IF EXISTS data2015; + +CREATE TABLE data2013 (name String, value UInt32) ENGINE = Memory; +CREATE TABLE data2015 (data_name String, data_value UInt32) ENGINE = Memory; + +INSERT INTO data2013(name,value) VALUES('Alice', 1000); +INSERT INTO data2013(name,value) VALUES('Bob', 2000); +INSERT INTO data2013(name,value) VALUES('Carol', 5000); + +INSERT INTO data2015(data_name, data_value) VALUES('Foo', 42); +INSERT INTO data2015(data_name, data_value) VALUES('Bar', 1); + +SELECT name FROM (SELECT name FROM data2013 UNION ALL SELECT data_name AS name FROM data2015) ORDER BY name ASC; + +DROP TABLE data2013; +DROP TABLE data2015; diff --git a/parser/testdata/00098_g_union_all/ast.json b/parser/testdata/00098_g_union_all/ast.json new file mode 100644 index 000000000..9648bd958 --- /dev/null +++ b/parser/testdata/00098_g_union_all/ast.json @@ -0,0 +1,115 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier X" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1 (alias X)" + }, + { + "explain": " Literal UInt64_2 (alias Y)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Literal UInt64_4" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier X" + } + ], + + "rows": 31, + + "statistics": + { + "elapsed": 0.001497911, + "rows_read": 31, + "bytes_read": 1312 + } +} diff --git a/parser/testdata/00098_g_union_all/metadata.json b/parser/testdata/00098_g_union_all/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00098_g_union_all/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00098_g_union_all/query.sql b/parser/testdata/00098_g_union_all/query.sql new file mode 100644 index 000000000..8f86d81bb --- /dev/null +++ b/parser/testdata/00098_g_union_all/query.sql @@ -0,0 +1 @@ +SELECT X FROM (SELECT * FROM (SELECT 1 AS X, 2 AS Y) UNION ALL SELECT 3, 4) ORDER BY X ASC; diff --git a/parser/testdata/00098_h_union_all/ast.json b/parser/testdata/00098_h_union_all/ast.json new file mode 100644 index 000000000..d7dece926 --- /dev/null +++ b/parser/testdata/00098_h_union_all/ast.json @@ -0,0 +1,115 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier X" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1 (alias X)" + }, + { + "explain": " Literal UInt64_2 (alias Y)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Literal UInt64_4" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier X" + } + ], + + "rows": 31, + + "statistics": + { + "elapsed": 0.001494377, + "rows_read": 31, + "bytes_read": 1312 + } +} diff --git a/parser/testdata/00098_h_union_all/metadata.json b/parser/testdata/00098_h_union_all/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00098_h_union_all/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00098_h_union_all/query.sql b/parser/testdata/00098_h_union_all/query.sql new file mode 100644 index 000000000..e85aaf2b7 --- /dev/null +++ b/parser/testdata/00098_h_union_all/query.sql @@ -0,0 +1 @@ +SELECT X FROM (SELECT 1 AS X, 2 AS Y UNION ALL SELECT * FROM (SELECT 3, 4)) ORDER BY X ASC; diff --git a/parser/testdata/00098_j_union_all/ast.json b/parser/testdata/00098_j_union_all/ast.json new file mode 100644 index 000000000..779409d5a --- /dev/null +++ b/parser/testdata/00098_j_union_all/ast.json @@ -0,0 +1,85 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier dummy" + }, + { + "explain": " Literal Int64_-1 (alias x)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier dummy" + }, + { + "explain": " Function arrayJoin (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[Int64_-1]" + } + ], + + "rows": 21, + + "statistics": + { + "elapsed": 0.001441956, + "rows_read": 21, + "bytes_read": 869 + } +} diff --git a/parser/testdata/00098_j_union_all/metadata.json b/parser/testdata/00098_j_union_all/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00098_j_union_all/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00098_j_union_all/query.sql b/parser/testdata/00098_j_union_all/query.sql new file mode 100644 index 000000000..2e450c0a5 --- /dev/null +++ b/parser/testdata/00098_j_union_all/query.sql @@ -0,0 +1,2 @@ +SELECT * FROM (SELECT dummy, -1 as x UNION ALL SELECT dummy, arrayJoin([-1]) as x); +SELECT * FROM (SELECT -1 as x, dummy UNION ALL SELECT arrayJoin([-1]) as x, dummy); diff --git a/parser/testdata/00098_k_union_all/ast.json b/parser/testdata/00098_k_union_all/ast.json new file mode 100644 index 000000000..d15653ba9 --- /dev/null +++ b/parser/testdata/00098_k_union_all/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001286558, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00098_k_union_all/metadata.json b/parser/testdata/00098_k_union_all/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00098_k_union_all/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00098_k_union_all/query.sql b/parser/testdata/00098_k_union_all/query.sql new file mode 100644 index 000000000..a3da234d4 --- /dev/null +++ b/parser/testdata/00098_k_union_all/query.sql @@ -0,0 +1,6 @@ +SET output_format_pretty_color=1; +SET output_format_pretty_display_footer_column_names=0; +SET output_format_pretty_squash_consecutive_ms = 0; +SELECT 1 FORMAT PrettySpace; +SELECT 1 UNION ALL SELECT 1 FORMAT PrettySpace; +SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 FORMAT PrettySpace; diff --git a/parser/testdata/00098_l_union_all/ast.json b/parser/testdata/00098_l_union_all/ast.json new file mode 100644 index 000000000..a2844c280 --- /dev/null +++ b/parser/testdata/00098_l_union_all/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001406797, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00098_l_union_all/metadata.json b/parser/testdata/00098_l_union_all/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00098_l_union_all/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00098_l_union_all/query.sql b/parser/testdata/00098_l_union_all/query.sql new file mode 100644 index 000000000..59c3711eb --- /dev/null +++ b/parser/testdata/00098_l_union_all/query.sql @@ -0,0 +1,5 @@ +SET any_join_distinct_right_table_keys = 1; + +SELECT a,b,c,d FROM (SELECT 1 AS a,2 AS b, 3 AS c UNION ALL SELECT 2,3,4 ) js1 ANY INNER JOIN (SELECT 1 AS a,2 AS b,4 AS d UNION ALL SELECT 2,3,5) js2 USING (a) ORDER BY a,b,c,d ASC; +SELECT a,b,c,d FROM (SELECT 1 AS a,2 AS b, 3 AS c UNION ALL SELECT 2,3,4 ) js1 ALL LEFT JOIN (SELECT 1 AS a,2 AS b,4 AS d UNION ALL SELECT 2,3,5) js2 USING (a) ORDER BY a,b,c,d ASC; +SELECT a,b,c,d FROM (SELECT 1 AS a,2 AS b, 3 AS c UNION ALL SELECT 2,3,4 ) js1 ALL LEFT JOIN (SELECT 1 AS a,2 AS b,4 AS d UNION ALL SELECT 2,3,5) js2 USING a,b ORDER BY a,b,c,d ASC; diff --git a/parser/testdata/00098_primary_key_memory_allocated/ast.json b/parser/testdata/00098_primary_key_memory_allocated/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00098_primary_key_memory_allocated/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00098_primary_key_memory_allocated/metadata.json b/parser/testdata/00098_primary_key_memory_allocated/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00098_primary_key_memory_allocated/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00098_primary_key_memory_allocated/query.sql b/parser/testdata/00098_primary_key_memory_allocated/query.sql new file mode 100644 index 000000000..e3c6965a6 --- /dev/null +++ b/parser/testdata/00098_primary_key_memory_allocated/query.sql @@ -0,0 +1,5 @@ +-- Tags: stateful, no-object-storage +-- Force PK load +SELECT CounterID FROM test.hits WHERE CounterID > 0 LIMIT 1 FORMAT Null; +-- Check PK size +SELECT primary_key_bytes_in_memory > 0, primary_key_bytes_in_memory < 16000, primary_key_bytes_in_memory_allocated < 16000, primary_key_bytes_in_memory_allocated / primary_key_bytes_in_memory < 1.1 FROM system.parts WHERE database = 'test' AND table = 'hits'; diff --git a/parser/testdata/00098_shard_i_union_all/ast.json b/parser/testdata/00098_shard_i_union_all/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00098_shard_i_union_all/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00098_shard_i_union_all/metadata.json b/parser/testdata/00098_shard_i_union_all/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00098_shard_i_union_all/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00098_shard_i_union_all/query.sql b/parser/testdata/00098_shard_i_union_all/query.sql new file mode 100644 index 000000000..58db30a8f --- /dev/null +++ b/parser/testdata/00098_shard_i_union_all/query.sql @@ -0,0 +1,16 @@ +-- Tags: shard + +DROP TABLE IF EXISTS report1; +DROP TABLE IF EXISTS report2; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE report1(id UInt32, event_date Date, priority UInt32, description String) ENGINE = MergeTree(event_date, intHash32(id), (id, event_date, intHash32(id)), 8192); +CREATE TABLE report2(id UInt32, event_date Date, priority UInt32, description String) ENGINE = MergeTree(event_date, intHash32(id), (id, event_date, intHash32(id)), 8192); + +INSERT INTO report1(id,event_date,priority,description) VALUES (1, '2015-01-01', 1, 'foo')(2, '2015-02-01', 2, 'bar')(3, '2015-03-01', 3, 'foo')(4, '2015-04-01', 4, 'bar')(5, '2015-05-01', 5, 'foo'); +INSERT INTO report2(id,event_date,priority,description) VALUES (1, '2016-01-01', 6, 'bar')(2, '2016-02-01', 7, 'foo')(3, '2016-03-01', 8, 'bar')(4, '2016-04-01', 9, 'foo')(5, '2016-05-01', 10, 'bar'); + +SELECT * FROM (SELECT id, event_date, priority, description FROM remote('127.0.0.{2,3}', currentDatabase(), report1) UNION ALL SELECT id, event_date, priority, description FROM remote('127.0.0.{2,3}', currentDatabase(), report2)) ORDER BY id, event_date ASC; + +DROP TABLE report1; +DROP TABLE report2; diff --git a/parser/testdata/00099_join_many_blocks_segfault/ast.json b/parser/testdata/00099_join_many_blocks_segfault/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00099_join_many_blocks_segfault/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00099_join_many_blocks_segfault/metadata.json b/parser/testdata/00099_join_many_blocks_segfault/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00099_join_many_blocks_segfault/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00099_join_many_blocks_segfault/query.sql b/parser/testdata/00099_join_many_blocks_segfault/query.sql new file mode 100644 index 000000000..31dec093e --- /dev/null +++ b/parser/testdata/00099_join_many_blocks_segfault/query.sql @@ -0,0 +1,17 @@ +SELECT + DomainID, + Domain +FROM +( + SELECT 1 AS DomainID FROM system.one +) js1 +ANY LEFT JOIN +( + SELECT + 1 AS DomainID, + 'abc' AS Domain + UNION ALL + SELECT + 2 AS DomainID, + 'def' AS Domain +) js2 USING DomainID; diff --git a/parser/testdata/00101_materialized_views_and_insert_without_explicit_database/ast.json b/parser/testdata/00101_materialized_views_and_insert_without_explicit_database/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00101_materialized_views_and_insert_without_explicit_database/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00101_materialized_views_and_insert_without_explicit_database/metadata.json b/parser/testdata/00101_materialized_views_and_insert_without_explicit_database/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00101_materialized_views_and_insert_without_explicit_database/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00101_materialized_views_and_insert_without_explicit_database/query.sql b/parser/testdata/00101_materialized_views_and_insert_without_explicit_database/query.sql new file mode 100644 index 000000000..7d925bc4f --- /dev/null +++ b/parser/testdata/00101_materialized_views_and_insert_without_explicit_database/query.sql @@ -0,0 +1,56 @@ + +DROP TABLE IF EXISTS test_table; +DROP TABLE IF EXISTS test_view; +DROP TABLE IF EXISTS test_view_filtered; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE test_table (EventDate Date, CounterID UInt32, UserID UInt64, EventTime DateTime('America/Los_Angeles'), UTCEventTime DateTime('UTC')) ENGINE = MergeTree(EventDate, CounterID, 8192); +CREATE MATERIALIZED VIEW test_view (Rows UInt64, MaxHitTime DateTime('America/Los_Angeles')) ENGINE = Memory AS SELECT count() AS Rows, max(UTCEventTime) AS MaxHitTime FROM test_table; +CREATE MATERIALIZED VIEW test_view_filtered (EventDate Date, CounterID UInt32) ENGINE = Memory POPULATE AS SELECT CounterID, EventDate FROM test_table WHERE EventDate < '2013-01-01'; + +INSERT INTO test_table (EventDate, UTCEventTime) VALUES ('2014-01-02', '2014-01-02 03:04:06'); + +SELECT * FROM test_table; +SELECT * FROM test_view; +SELECT * FROM test_view_filtered; + +DROP TABLE test_table; +DROP TABLE test_view; +DROP TABLE test_view_filtered; + +-- Check only sophisticated constructors and desctructors: + +CREATE DATABASE IF NOT EXISTS {CLICKHOUSE_DATABASE_1:Identifier}; + +USE {CLICKHOUSE_DATABASE_1:Identifier}; + +DROP TABLE IF EXISTS tmp; +DROP TABLE IF EXISTS tmp_mv; +DROP TABLE IF EXISTS tmp_mv2; +DROP TABLE IF EXISTS tmp_mv3; +DROP TABLE IF EXISTS tmp_mv4; +DROP TABLE IF EXISTS `.inner.tmp_mv`; +DROP TABLE IF EXISTS `.inner.tmp_mv2`; +DROP TABLE IF EXISTS `.inner.tmp_mv3`; +DROP TABLE IF EXISTS `.inner.tmp_mv4`; + +CREATE TABLE tmp (date Date, name String) ENGINE = Memory; +CREATE MATERIALIZED VIEW tmp_mv ENGINE = AggregatingMergeTree(date, (date, name), 8192) AS SELECT date, name, countState() AS cc FROM tmp GROUP BY date, name; +CREATE TABLE tmp_mv2 AS tmp_mv; +CREATE TABLE tmp_mv3 AS tmp_mv ENGINE = Memory; +CREATE MATERIALIZED VIEW tmp_mv4 ENGINE = AggregatingMergeTree(date, date, 8192) POPULATE AS SELECT DISTINCT * FROM tmp_mv; + +DROP TABLE tmp_mv; +DROP TABLE tmp_mv2; +DROP TABLE tmp_mv3; +DROP TABLE tmp_mv4; + +EXISTS TABLE `.inner.tmp_mv`; +EXISTS TABLE `.inner.tmp_mv2`; +EXISTS TABLE `.inner.tmp_mv3`; +EXISTS TABLE `.inner.tmp_mv4`; + +DROP TABLE tmp; + +DROP DATABASE {CLICKHOUSE_DATABASE:Identifier}; +DROP DATABASE {CLICKHOUSE_DATABASE_1:Identifier}; diff --git a/parser/testdata/00102_insert_into_temporary_table/ast.json b/parser/testdata/00102_insert_into_temporary_table/ast.json new file mode 100644 index 000000000..9b277cb9c --- /dev/null +++ b/parser/testdata/00102_insert_into_temporary_table/ast.json @@ -0,0 +1,40 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery t (children 2)" + }, + { + "explain": " Identifier t" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration a (children 1)" + }, + { + "explain": " DataType UInt8" + } + ], + + "rows": 6, + + "statistics": + { + "elapsed": 0.001499569, + "rows_read": 6, + "bytes_read": 201 + } +} diff --git a/parser/testdata/00102_insert_into_temporary_table/metadata.json b/parser/testdata/00102_insert_into_temporary_table/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00102_insert_into_temporary_table/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00102_insert_into_temporary_table/query.sql b/parser/testdata/00102_insert_into_temporary_table/query.sql new file mode 100644 index 000000000..4ef44cdf9 --- /dev/null +++ b/parser/testdata/00102_insert_into_temporary_table/query.sql @@ -0,0 +1,3 @@ +CREATE TEMPORARY TABLE t (a UInt8); +INSERT INTO t VALUES (1); +SELECT * FROM t; diff --git a/parser/testdata/00103_ipv4_num_to_string_class_c/ast.json b/parser/testdata/00103_ipv4_num_to_string_class_c/ast.json new file mode 100644 index 000000000..78e0d5aa6 --- /dev/null +++ b/parser/testdata/00103_ipv4_num_to_string_class_c/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function IPv4NumToStringClassC (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toUInt32 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal '0.0.0.xxx'" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001519302, + "rows_read": 12, + "bytes_read": 485 + } +} diff --git a/parser/testdata/00103_ipv4_num_to_string_class_c/metadata.json b/parser/testdata/00103_ipv4_num_to_string_class_c/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00103_ipv4_num_to_string_class_c/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00103_ipv4_num_to_string_class_c/query.sql b/parser/testdata/00103_ipv4_num_to_string_class_c/query.sql new file mode 100644 index 000000000..d72955f04 --- /dev/null +++ b/parser/testdata/00103_ipv4_num_to_string_class_c/query.sql @@ -0,0 +1,4 @@ +select IPv4NumToStringClassC(toUInt32(0)) = '0.0.0.xxx'; +select IPv4NumToStringClassC(0x7f000001) = '127.0.0.xxx'; +select sum(IPv4NumToStringClassC(materialize(toUInt32(0))) = '0.0.0.xxx') = count() from system.one array join range(1024) as n; +select sum(IPv4NumToStringClassC(materialize(0x7f000001)) = '127.0.0.xxx') = count() from system.one array join range(1024) as n; diff --git a/parser/testdata/00104_totals_having_mode/ast.json b/parser/testdata/00104_totals_having_mode/ast.json new file mode 100644 index 000000000..302f3f5dc --- /dev/null +++ b/parser/testdata/00104_totals_having_mode/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001202442, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00104_totals_having_mode/metadata.json b/parser/testdata/00104_totals_having_mode/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00104_totals_having_mode/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00104_totals_having_mode/query.sql b/parser/testdata/00104_totals_having_mode/query.sql new file mode 100644 index 000000000..96e83b263 --- /dev/null +++ b/parser/testdata/00104_totals_having_mode/query.sql @@ -0,0 +1,17 @@ +SET max_threads = 1; +SET max_block_size = 65536; +SET max_rows_to_group_by = 65535; +SET group_by_overflow_mode = 'any'; + +SET totals_mode = 'before_having'; +SELECT number, count() FROM (SELECT * FROM system.numbers LIMIT 100000) GROUP BY number WITH TOTALS HAVING number % 3 = 0 ORDER BY number LIMIT 1; + +SET totals_mode = 'after_having_inclusive'; +SELECT number, count() FROM (SELECT * FROM system.numbers LIMIT 100000) GROUP BY number WITH TOTALS HAVING number % 3 = 0 ORDER BY number LIMIT 1; + +SET totals_mode = 'after_having_exclusive'; +SELECT number, count() FROM (SELECT * FROM system.numbers LIMIT 100000) GROUP BY number WITH TOTALS HAVING number % 3 = 0 ORDER BY number LIMIT 1; + +SET totals_mode = 'after_having_auto'; +SET totals_auto_threshold = 0.5; +SELECT number, count() FROM (SELECT * FROM system.numbers LIMIT 100000) GROUP BY number WITH TOTALS HAVING number % 3 = 0 ORDER BY number LIMIT 1; diff --git a/parser/testdata/00105_shard_collations/ast.json b/parser/testdata/00105_shard_collations/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00105_shard_collations/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00105_shard_collations/metadata.json b/parser/testdata/00105_shard_collations/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00105_shard_collations/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00105_shard_collations/query.sql b/parser/testdata/00105_shard_collations/query.sql new file mode 100644 index 000000000..28c547270 --- /dev/null +++ b/parser/testdata/00105_shard_collations/query.sql @@ -0,0 +1,51 @@ +-- Tags: shard, no-fasttest + +SELECT 'Русский (default)'; +SELECT arrayJoin(['а', 'я', 'ё', 'А', 'Я', 'Ё']) AS x ORDER BY x; + +SELECT 'Русский (ru)'; +SELECT arrayJoin(['а', 'я', 'ё', 'А', 'Я', 'Ё']) AS x ORDER BY x COLLATE 'ru'; + +SELECT 'Русский (ru distributed)'; +SELECT arrayJoin(['а', 'я', 'ё', 'А', 'Я', 'Ё']) AS x FROM remote('127.0.0.{2,3}', system, one) ORDER BY x COLLATE 'ru'; + +SELECT 'Türk (default)'; +SELECT arrayJoin(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'ç', 'd', 'e', 'f', 'g', 'ğ', 'h', 'ı', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'ö', 'p', 'r', 's', 'ş', 't', 'u', 'ü', 'v', 'y', 'z', 'A', 'B', 'C', 'Ç', 'D', 'E', 'F', 'G', 'Ğ', 'H', 'I', 'İ', 'J', 'K', 'L', 'M', 'N', 'O', 'Ö', 'P', 'R', 'S', 'Ş', 'T', 'U', 'Ü', 'V', 'Y', 'Z']) AS x ORDER BY x; + +SELECT 'Türk (tr)'; +SELECT arrayJoin(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'ç', 'd', 'e', 'f', 'g', 'ğ', 'h', 'ı', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'ö', 'p', 'r', 's', 'ş', 't', 'u', 'ü', 'v', 'y', 'z', 'A', 'B', 'C', 'Ç', 'D', 'E', 'F', 'G', 'Ğ', 'H', 'I', 'İ', 'J', 'K', 'L', 'M', 'N', 'O', 'Ö', 'P', 'R', 'S', 'Ş', 'T', 'U', 'Ü', 'V', 'Y', 'Z']) AS x ORDER BY x COLLATE 'tr'; + +SELECT 'english (default)'; +SELECT arrayJoin(['A', 'c', 'Z', 'Q', 'e']) AS x ORDER BY x; +SELECT 'english (en_US)'; +SELECT arrayJoin(['A', 'c', 'Z', 'Q', 'e']) AS x ORDER BY x COLLATE 'en_US'; +SELECT 'english (en)'; +SELECT arrayJoin(['A', 'c', 'Z', 'Q', 'e']) AS x ORDER BY x COLLATE 'en'; + +SELECT 'español (default)'; +SELECT arrayJoin(['F', 'z', 'J', 'Ñ']) as x ORDER BY x; +SELECT 'español (es)'; +SELECT arrayJoin(['F', 'z', 'J', 'Ñ']) as x ORDER BY x COLLATE 'es'; + +SELECT 'Український (default)'; +SELECT arrayJoin(['ґ', 'ї', 'І', 'Б']) as x ORDER BY x; +SELECT 'Український (uk)'; +SELECT arrayJoin(['ґ', 'ї', 'І', 'Б']) as x ORDER BY x COLLATE 'uk'; + +SELECT 'Русский (ru group by)'; +SELECT x, n FROM (SELECT ['а', 'я', 'ё', 'А', 'Я', 'Ё'] AS arr) ARRAY JOIN arr AS x, arrayEnumerate(arr) AS n ORDER BY x COLLATE 'ru', n; + +--- Const expression +SELECT 'ζ' as x ORDER BY x COLLATE 'el'; + +-- check order by const with collation +SELECT number FROM numbers(2) ORDER BY 'x' COLLATE 'el'; + +-- check const and non const columns in order +SELECT number FROM numbers(11) ORDER BY 'x', toString(number), 'y' COLLATE 'el'; + +--- Trash locales +SELECT '' as x ORDER BY x COLLATE 'qq'; --{serverError UNSUPPORTED_COLLATION_LOCALE} +SELECT '' as x ORDER BY x COLLATE 'qwe'; --{serverError UNSUPPORTED_COLLATION_LOCALE} +SELECT '' as x ORDER BY x COLLATE 'some_non_existing_locale'; --{serverError UNSUPPORTED_COLLATION_LOCALE} +SELECT '' as x ORDER BY x COLLATE 'ру'; --{serverError UNSUPPORTED_COLLATION_LOCALE} diff --git a/parser/testdata/00106_totals_after_having/ast.json b/parser/testdata/00106_totals_after_having/ast.json new file mode 100644 index 000000000..7c5b5b83a --- /dev/null +++ b/parser/testdata/00106_totals_after_having/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001554128, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00106_totals_after_having/metadata.json b/parser/testdata/00106_totals_after_having/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00106_totals_after_having/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00106_totals_after_having/query.sql b/parser/testdata/00106_totals_after_having/query.sql new file mode 100644 index 000000000..b9e5b7933 --- /dev/null +++ b/parser/testdata/00106_totals_after_having/query.sql @@ -0,0 +1,21 @@ +SET max_rows_to_group_by = 100000; +SET group_by_overflow_mode = 'any'; + +-- 'any' overflow mode might select different values for two-level and +-- single-level GROUP BY, so we set a big enough threshold here to ensure that +-- the switch doesn't happen, we only use single-level GROUP BY and get a +-- predictable result. +SET group_by_two_level_threshold_bytes = 100000000; +SET group_by_two_level_threshold = 1000000; + +SET totals_mode = 'after_having_auto'; +SELECT dummy, count() GROUP BY dummy WITH TOTALS; + +SET totals_mode = 'after_having_inclusive'; +SELECT dummy, count() GROUP BY dummy WITH TOTALS; + +SET totals_mode = 'after_having_exclusive'; +SELECT dummy, count() GROUP BY dummy WITH TOTALS; + +SET totals_mode = 'before_having'; +SELECT dummy, count() GROUP BY dummy WITH TOTALS; diff --git a/parser/testdata/00107_totals_after_having/ast.json b/parser/testdata/00107_totals_after_having/ast.json new file mode 100644 index 000000000..f3d407d7b --- /dev/null +++ b/parser/testdata/00107_totals_after_having/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '*** In-memory aggregation.'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.00134825, + "rows_read": 5, + "bytes_read": 197 + } +} diff --git a/parser/testdata/00107_totals_after_having/metadata.json b/parser/testdata/00107_totals_after_having/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00107_totals_after_having/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00107_totals_after_having/query.sql b/parser/testdata/00107_totals_after_having/query.sql new file mode 100644 index 000000000..1c201a3ad --- /dev/null +++ b/parser/testdata/00107_totals_after_having/query.sql @@ -0,0 +1,51 @@ +SELECT '*** In-memory aggregation.'; + +SET max_rows_to_group_by = 100000; +SET max_block_size = 100001; +SET group_by_overflow_mode = 'any'; + +-- 'any' overflow mode might select different values for two-level and +-- single-level GROUP BY, so we set a big enough threshold here to ensure that +-- the switch doesn't happen, we only use single-level GROUP BY and get a +-- predictable result. +SET group_by_two_level_threshold_bytes = 100000000; +SET group_by_two_level_threshold = 1000000; + +SELECT '**** totals_mode = after_having_auto'; +SET totals_mode = 'after_having_auto'; +SELECT intDiv(number, 2) AS k, count(), argMax(toString(number), number) FROM (SELECT number FROM system.numbers LIMIT 500000) GROUP BY k WITH TOTALS ORDER BY k LIMIT 10; + +SELECT '**** totals_mode = after_having_inclusive'; +SET totals_mode = 'after_having_inclusive'; +SELECT intDiv(number, 2) AS k, count(), argMax(toString(number), number) FROM (SELECT number FROM system.numbers LIMIT 500000) GROUP BY k WITH TOTALS ORDER BY k LIMIT 10; + +SELECT '**** totals_mode = after_having_exclusive'; +SET totals_mode = 'after_having_exclusive'; +SELECT intDiv(number, 2) AS k, count(), argMax(toString(number), number) FROM (SELECT number FROM system.numbers LIMIT 500000) GROUP BY k WITH TOTALS ORDER BY k LIMIT 10; + +SELECT '**** totals_mode = before_having'; +SET totals_mode = 'before_having'; +SELECT intDiv(number, 2) AS k, count(), argMax(toString(number), number) FROM (SELECT number FROM system.numbers LIMIT 500000) GROUP BY k WITH TOTALS ORDER BY k LIMIT 10; + + +SELECT '*** External aggregation.'; + +SET max_bytes_ratio_before_external_group_by = 0; +SET max_bytes_before_external_group_by = 1000000; +SET group_by_two_level_threshold = 100000; + +SELECT '**** totals_mode = after_having_auto'; +SET totals_mode = 'after_having_auto'; +SELECT intDiv(number, 2) AS k, count(), argMax(toString(number), number) FROM (SELECT number FROM system.numbers LIMIT 500000) GROUP BY k WITH TOTALS ORDER BY k LIMIT 10; + +SELECT '**** totals_mode = after_having_inclusive'; +SET totals_mode = 'after_having_inclusive'; +SELECT intDiv(number, 2) AS k, count(), argMax(toString(number), number) FROM (SELECT number FROM system.numbers LIMIT 500000) GROUP BY k WITH TOTALS ORDER BY k LIMIT 10; + +SELECT '**** totals_mode = after_having_exclusive'; +SET totals_mode = 'after_having_exclusive'; +SELECT intDiv(number, 2) AS k, count(), argMax(toString(number), number) FROM (SELECT number FROM system.numbers LIMIT 500000) GROUP BY k WITH TOTALS ORDER BY k LIMIT 10; + +SELECT '**** totals_mode = before_having'; +SET totals_mode = 'before_having'; +SELECT intDiv(number, 2) AS k, count(), argMax(toString(number), number) FROM (SELECT number FROM system.numbers LIMIT 500000) GROUP BY k WITH TOTALS ORDER BY k LIMIT 10; diff --git a/parser/testdata/00108_shard_totals_after_having/ast.json b/parser/testdata/00108_shard_totals_after_having/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00108_shard_totals_after_having/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00108_shard_totals_after_having/metadata.json b/parser/testdata/00108_shard_totals_after_having/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00108_shard_totals_after_having/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00108_shard_totals_after_having/query.sql b/parser/testdata/00108_shard_totals_after_having/query.sql new file mode 100644 index 000000000..db0527c27 --- /dev/null +++ b/parser/testdata/00108_shard_totals_after_having/query.sql @@ -0,0 +1,16 @@ +-- Tags: shard + +SET max_rows_to_group_by = 100000; +SET group_by_overflow_mode = 'any'; + +SET totals_mode = 'after_having_auto'; +SELECT dummy, count() FROM remote('127.0.0.{2,3}', system, one) GROUP BY dummy WITH TOTALS; + +SET totals_mode = 'after_having_inclusive'; +SELECT dummy, count() FROM remote('127.0.0.{2,3}', system, one) GROUP BY dummy WITH TOTALS; + +SET totals_mode = 'after_having_exclusive'; +SELECT dummy, count() FROM remote('127.0.0.{2,3}', system, one) GROUP BY dummy WITH TOTALS; + +SET totals_mode = 'before_having'; +SELECT dummy, count() FROM remote('127.0.0.{2,3}', system, one) GROUP BY dummy WITH TOTALS; diff --git a/parser/testdata/00109_shard_totals_after_having/ast.json b/parser/testdata/00109_shard_totals_after_having/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00109_shard_totals_after_having/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00109_shard_totals_after_having/metadata.json b/parser/testdata/00109_shard_totals_after_having/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00109_shard_totals_after_having/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00109_shard_totals_after_having/query.sql b/parser/testdata/00109_shard_totals_after_having/query.sql new file mode 100644 index 000000000..86f1dd1ba --- /dev/null +++ b/parser/testdata/00109_shard_totals_after_having/query.sql @@ -0,0 +1,28 @@ +-- Tags: shard, no-fasttest + +SET max_rows_to_group_by = 100000; +SET max_block_size = 100001; +SET group_by_overflow_mode = 'any'; + +-- Settings 'max_rows_to_group_by', 'max_bytes_before_external_group_by' and 'max_bytes_ratio_before_external_group_by' are mutually exclusive. +SET max_bytes_ratio_before_external_group_by = 0; +SET max_bytes_before_external_group_by = 0; + +DROP TABLE IF EXISTS numbers500k; +CREATE TABLE numbers500k (number UInt32) ENGINE = TinyLog; + +INSERT INTO numbers500k SELECT number FROM system.numbers LIMIT 500000; + +SET totals_mode = 'after_having_auto'; +SELECT intDiv(number, 2) AS k, count(), argMax(toString(number), number) FROM (SELECT * FROM remote('127.0.0.{2,3}', currentDatabase(), numbers500k) ORDER BY number) GROUP BY k WITH TOTALS ORDER BY k LIMIT 10; + +SET totals_mode = 'after_having_inclusive'; +SELECT intDiv(number, 2) AS k, count(), argMax(toString(number), number) FROM (SELECT * FROM remote('127.0.0.{2,3}', currentDatabase(), numbers500k) ORDER BY number) GROUP BY k WITH TOTALS ORDER BY k LIMIT 10; + +SET totals_mode = 'after_having_exclusive'; +SELECT intDiv(number, 2) AS k, count(), argMax(toString(number), number) FROM (SELECT * FROM remote('127.0.0.{2,3}', currentDatabase(), numbers500k) ORDER BY number) GROUP BY k WITH TOTALS ORDER BY k LIMIT 10; + +SET totals_mode = 'before_having'; +SELECT intDiv(number, 2) AS k, count(), argMax(toString(number), number) FROM (SELECT * FROM remote('127.0.0.{2,3}', currentDatabase(), numbers500k) ORDER BY number) GROUP BY k WITH TOTALS ORDER BY k LIMIT 10; + +DROP TABLE numbers500k; diff --git a/parser/testdata/00110_external_sort/ast.json b/parser/testdata/00110_external_sort/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00110_external_sort/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00110_external_sort/metadata.json b/parser/testdata/00110_external_sort/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00110_external_sort/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00110_external_sort/query.sql b/parser/testdata/00110_external_sort/query.sql new file mode 100644 index 000000000..c46b61614 --- /dev/null +++ b/parser/testdata/00110_external_sort/query.sql @@ -0,0 +1,14 @@ +-- Tags: no-parallel, no-fasttest, no-flaky-check, no-asan + +SET max_bytes_ratio_before_external_sort = 0; + +-- { echoOn } +SELECT number FROM (SELECT number FROM system.numbers LIMIT 10000000) ORDER BY number * 1234567890123456789 LIMIT 9999990, 10 SETTINGS max_memory_usage='300Mi', max_bytes_before_external_sort='70M'; +SELECT number FROM (SELECT number FROM system.numbers LIMIT 10000000) ORDER BY number * 1234567890123456789 LIMIT 9999990, 10 SETTINGS max_memory_usage='300Mi', max_bytes_before_external_sort='10M'; +SELECT number FROM (SELECT number FROM numbers(2097152)) ORDER BY number * 1234567890123456789 LIMIT 2097142, 10 SETTINGS max_memory_usage='300Mi', max_bytes_before_external_sort='32M', max_block_size=1048576; + +-- This query is heavy, let's do it only once +SYSTEM FLUSH LOGS query_log; +SELECT ProfileEvents['ExternalSortWritePart'] FROM system.query_log WHERE type != 'QueryStart' AND current_database = currentDatabase() AND Settings['max_bytes_before_external_sort']='70000000'; +SELECT if((ProfileEvents['ExternalSortWritePart'] as x) > 10, 10, x) FROM system.query_log WHERE type != 'QueryStart' AND current_database = currentDatabase() AND Settings['max_bytes_before_external_sort']='10000000'; +SELECT ProfileEvents['ExternalSortWritePart'] FROM system.query_log WHERE type != 'QueryStart' AND current_database = currentDatabase() AND Settings['max_bytes_before_external_sort']='32000000'; diff --git a/parser/testdata/00111_shard_external_sort_distributed/ast.json b/parser/testdata/00111_shard_external_sort_distributed/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00111_shard_external_sort_distributed/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00111_shard_external_sort_distributed/metadata.json b/parser/testdata/00111_shard_external_sort_distributed/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00111_shard_external_sort_distributed/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00111_shard_external_sort_distributed/query.sql b/parser/testdata/00111_shard_external_sort_distributed/query.sql new file mode 100644 index 000000000..442a17c39 --- /dev/null +++ b/parser/testdata/00111_shard_external_sort_distributed/query.sql @@ -0,0 +1,18 @@ +-- Tags: distributed, long, no-msan, no-tsan, no-asan, no-ubsan +-- ^ slow + +SET max_memory_usage = 150000000; +SET max_bytes_before_external_sort = 10000000; +SET max_bytes_ratio_before_external_sort = 0; +SET max_threads = 8; +SET max_execution_time = 300; +SET max_execution_time_leaf = 300; + +DROP TABLE IF EXISTS numbers10m; +CREATE VIEW numbers10m AS SELECT number FROM system.numbers LIMIT 5000000; + +SELECT number FROM numbers10m ORDER BY number * 1234567890123456789 LIMIT 4999980, 20; +SELECT '-'; +SELECT number FROM remote('127.0.0.{2,3}', currentDatabase(), numbers10m) ORDER BY number * 1234567890123456789 LIMIT 4999980, 20; + +DROP TABLE numbers10m; diff --git a/parser/testdata/00112_shard_totals_after_having/ast.json b/parser/testdata/00112_shard_totals_after_having/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00112_shard_totals_after_having/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00112_shard_totals_after_having/metadata.json b/parser/testdata/00112_shard_totals_after_having/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00112_shard_totals_after_having/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00112_shard_totals_after_having/query.sql b/parser/testdata/00112_shard_totals_after_having/query.sql new file mode 100644 index 000000000..27952ae28 --- /dev/null +++ b/parser/testdata/00112_shard_totals_after_having/query.sql @@ -0,0 +1,6 @@ +-- Tags: shard + +SET totals_mode = 'after_having_auto'; +SET max_rows_to_group_by = 100000; +SET group_by_overflow_mode = 'any'; +SELECT dummy + 1 AS k, count() FROM remote('127.0.0.{2,3}', system, one) GROUP BY k WITH TOTALS ORDER BY k; diff --git a/parser/testdata/00113_shard_group_array/ast.json b/parser/testdata/00113_shard_group_array/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00113_shard_group_array/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00113_shard_group_array/metadata.json b/parser/testdata/00113_shard_group_array/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00113_shard_group_array/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00113_shard_group_array/query.sql b/parser/testdata/00113_shard_group_array/query.sql new file mode 100644 index 000000000..b589f9ce0 --- /dev/null +++ b/parser/testdata/00113_shard_group_array/query.sql @@ -0,0 +1,39 @@ +-- Tags: shard + +SELECT intDiv(number, 100) AS k, length(groupArray(number)) FROM (SELECT * FROM system.numbers LIMIT 1000000) GROUP BY k WITH TOTALS ORDER BY k LIMIT 10; + +SELECT ''; +SELECT length(toString(groupArrayState(toDate(number)))) FROM (SELECT * FROM system.numbers LIMIT 10); +SELECT length(toString(groupArrayState(toDateTime(number)))) FROM (SELECT * FROM system.numbers LIMIT 10); + +DROP TABLE IF EXISTS numbers_mt; +CREATE TABLE numbers_mt (number UInt64) ENGINE = Log; +INSERT INTO numbers_mt SELECT * FROM system.numbers LIMIT 1, 1000000; + +SELECT count(), sum(ns), max(ns) FROM (SELECT intDiv(number, 100) AS k, groupArray(number) AS ns FROM numbers_mt GROUP BY k) ARRAY JOIN ns; +SELECT count(), sum(toUInt64(ns)), max(toUInt64(ns)) FROM (SELECT intDiv(number, 100) AS k, groupArray(toString(number)) AS ns FROM numbers_mt GROUP BY k) ARRAY JOIN ns; +SELECT count(), sum(toUInt64(ns[1])), max(toUInt64(ns[1])), sum(toUInt64(ns[2]))/10 FROM (SELECT intDiv(number, 100) AS k, groupArray([toString(number), toString(number*10)]) AS ns FROM numbers_mt GROUP BY k) ARRAY JOIN ns; +SELECT count(), sum(ns[1]), max(ns[1]), sum(ns[2])/10 FROM (SELECT intDiv(number, 100) AS k, groupArray([number, number*10]) AS ns FROM numbers_mt GROUP BY k) ARRAY JOIN ns; + +SELECT count(), sum(ns), max(ns) FROM (SELECT intDiv(number, 100) AS k, groupArray(number) AS ns FROM remote('127.0.0.{2,3}', currentDatabase(), 'numbers_mt') GROUP BY k) ARRAY JOIN ns; +SELECT count(), sum(toUInt64(ns)), max(toUInt64(ns)) FROM (SELECT intDiv(number, 100) AS k, groupArray(toString(number)) AS ns FROM remote('127.0.0.{2,3}', currentDatabase(), 'numbers_mt') GROUP BY k) ARRAY JOIN ns; +SELECT count(), sum(toUInt64(ns[1])), max(toUInt64(ns[1])), sum(toUInt64(ns[2]))/10 FROM (SELECT intDiv(number, 100) AS k, groupArray([toString(number), toString(number*10)]) AS ns FROM remote('127.0.0.{2,3}', currentDatabase(), 'numbers_mt') GROUP BY k) ARRAY JOIN ns; + +DROP TABLE numbers_mt; +CREATE TABLE numbers_mt (number UInt64) ENGINE = Log; +INSERT INTO numbers_mt SELECT * FROM system.numbers LIMIT 1, 1048575; + +SELECT ''; +SELECT roundToExp2(number) AS k, length(groupArray(1)(number AS i)), length(groupArray(1024)(i)), length(groupArray(65536)(i)) AS s FROM numbers_mt GROUP BY k ORDER BY k LIMIT 9, 11; +SELECT roundToExp2(number) AS k, length(groupArray(1)(hex(number) AS i)), length(groupArray(1024)(i)), length(groupArray(65536)(i)) AS s FROM numbers_mt GROUP BY k ORDER BY k LIMIT 9, 11; +SELECT roundToExp2(number) AS k, length(groupArray(1)([hex(number)] AS i)), length(groupArray(1024)(i)), length(groupArray(65536)(i)) AS s FROM numbers_mt GROUP BY k ORDER BY k LIMIT 9, 11; + +SELECT ''; +SELECT roundToExp2(number) AS k, length(groupArray(1)(number AS i)), length(groupArray(1500)(i)), length(groupArray(70000)(i)) AS s FROM remote('127.0.0.{2,3}', currentDatabase(), 'numbers_mt') GROUP BY k ORDER BY k LIMIT 9, 11; +SELECT roundToExp2(number) AS k, length(groupArray(1)(hex(number) AS i)), length(groupArray(1500)(i)), length(groupArray(70000)(i)) AS s FROM remote('127.0.0.{2,3}', currentDatabase(), 'numbers_mt') GROUP BY k ORDER BY k LIMIT 9, 11; +SELECT roundToExp2(number) AS k, length(groupArray(1)([hex(number)] AS i)), length(groupArray(1500)(i)), length(groupArray(70000)(i)) AS s FROM remote('127.0.0.{2,3}', currentDatabase(), 'numbers_mt') GROUP BY k ORDER BY k LIMIT 9, 11; + +DROP TABLE numbers_mt; + +-- Check binary compatibility: +-- clickhouse-client -h old -q "SELECT arrayReduce('groupArrayState', [['1'], ['22'], ['333']]) FORMAT RowBinary" | clickhouse-local -s --input-format RowBinary --structure "d AggregateFunction(groupArray2, Array(String))" -q "SELECT groupArray2Merge(d) FROM table" diff --git a/parser/testdata/00114_float_type_result_of_division/ast.json b/parser/testdata/00114_float_type_result_of_division/ast.json new file mode 100644 index 000000000..bcf94d0c7 --- /dev/null +++ b/parser/testdata/00114_float_type_result_of_division/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function divide (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001324412, + "rows_read": 8, + "bytes_read": 290 + } +} diff --git a/parser/testdata/00114_float_type_result_of_division/metadata.json b/parser/testdata/00114_float_type_result_of_division/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00114_float_type_result_of_division/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00114_float_type_result_of_division/query.sql b/parser/testdata/00114_float_type_result_of_division/query.sql new file mode 100644 index 000000000..56c7de754 --- /dev/null +++ b/parser/testdata/00114_float_type_result_of_division/query.sql @@ -0,0 +1 @@ +SELECT 1 / 10; diff --git a/parser/testdata/00116_storage_set/ast.json b/parser/testdata/00116_storage_set/ast.json new file mode 100644 index 000000000..b0c08336b --- /dev/null +++ b/parser/testdata/00116_storage_set/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery set (children 1)" + }, + { + "explain": " Identifier set" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001411852, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/00116_storage_set/metadata.json b/parser/testdata/00116_storage_set/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00116_storage_set/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00116_storage_set/query.sql b/parser/testdata/00116_storage_set/query.sql new file mode 100644 index 000000000..36ad015c6 --- /dev/null +++ b/parser/testdata/00116_storage_set/query.sql @@ -0,0 +1,35 @@ +DROP TABLE IF EXISTS set; +DROP TABLE IF EXISTS set2; +DROP TABLE IF EXISTS tab; + +CREATE TABLE set (x String) ENGINE = Set; + +SELECT arrayJoin(['Hello', 'test', 'World', 'world', 'abc', 'xyz']) AS s WHERE s IN set; +SELECT arrayJoin(['Hello', 'test', 'World', 'world', 'abc', 'xyz']) AS s WHERE s NOT IN set; + +INSERT INTO set VALUES ('Hello'), ('World'); +SELECT arrayJoin(['Hello', 'test', 'World', 'world', 'abc', 'xyz']) AS s WHERE s IN set; + +RENAME TABLE set TO set2; +SELECT arrayJoin(['Hello', 'test', 'World', 'world', 'abc', 'xyz']) AS s WHERE s IN set2; + +INSERT INTO set2 VALUES ('Hello'), ('World'); +SELECT arrayJoin(['Hello', 'test', 'World', 'world', 'abc', 'xyz']) AS s WHERE s IN set2; + +INSERT INTO set2 VALUES ('abc'), ('World'); +SELECT arrayJoin(['Hello', 'test', 'World', 'world', 'abc', 'xyz']) AS s WHERE s IN set2; + +DETACH TABLE set2; +ATTACH TABLE set2; + +SELECT arrayJoin(['Hello', 'test', 'World', 'world', 'abc', 'xyz']) AS s WHERE s IN set2; + +RENAME TABLE set2 TO set; +SELECT arrayJoin(['Hello', 'test', 'World', 'world', 'abc', 'xyz']) AS s WHERE s IN set; + +create table tab (x String) engine = MergeTree order by x as select 'Hello'; +SELECT * FROM tab PREWHERE x IN (set) WHERE x IN (set) LIMIT 1 settings enable_analyzer=0; +SELECT * FROM tab PREWHERE x IN (set) WHERE x IN (set) LIMIT 1 settings enable_analyzer=1; +DROP TABLE tab; + +DROP TABLE set; diff --git a/parser/testdata/00117_parsing_arrays/ast.json b/parser/testdata/00117_parsing_arrays/ast.json new file mode 100644 index 000000000..b88dc5093 --- /dev/null +++ b/parser/testdata/00117_parsing_arrays/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery null_00117 (children 1)" + }, + { + "explain": " Identifier null_00117" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001315241, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/00117_parsing_arrays/metadata.json b/parser/testdata/00117_parsing_arrays/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00117_parsing_arrays/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00117_parsing_arrays/query.sql b/parser/testdata/00117_parsing_arrays/query.sql new file mode 100644 index 000000000..e5406d190 --- /dev/null +++ b/parser/testdata/00117_parsing_arrays/query.sql @@ -0,0 +1,10 @@ +DROP TABLE IF EXISTS null_00117; +CREATE TABLE null_00117 (a Array(UInt64), b Array(String), c Array(Array(Date))) ENGINE = Memory; + +INSERT INTO null_00117 (a) VALUES ([1,2]), ([3, 4]), ([ 5 ,6]), ([ 7 , 8 ]), ([]), ([ ]); +INSERT INTO null_00117 (b) VALUES ([ 'Hello' , 'World' ]); +INSERT INTO null_00117 (c) VALUES ([ ]), ([ [ ] ]), ([[],[]]), ([['2015-01-01', '2015-01-02'], ['2015-01-03', '2015-01-04']]); + +SELECT a, b, c FROM null_00117 ORDER BY a, b, c; + +DROP TABLE null_00117; \ No newline at end of file diff --git a/parser/testdata/00118_storage_join/ast.json b/parser/testdata/00118_storage_join/ast.json new file mode 100644 index 000000000..748c9b02f --- /dev/null +++ b/parser/testdata/00118_storage_join/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t2 (children 1)" + }, + { + "explain": " Identifier t2" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001098526, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/00118_storage_join/metadata.json b/parser/testdata/00118_storage_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00118_storage_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00118_storage_join/query.sql b/parser/testdata/00118_storage_join/query.sql new file mode 100644 index 000000000..c0bc28171 --- /dev/null +++ b/parser/testdata/00118_storage_join/query.sql @@ -0,0 +1,21 @@ +DROP TABLE IF EXISTS t2; + +CREATE TABLE t2 (k UInt64, s String) ENGINE = Join(ANY, LEFT, k); + +INSERT INTO t2 VALUES (1, 'abc'), (2, 'def'); +SELECT k, s FROM (SELECT number AS k FROM system.numbers LIMIT 10) js1 ANY LEFT JOIN t2 USING k ORDER BY k; + +INSERT INTO t2 VALUES (6, 'ghi'); +SELECT k, s FROM (SELECT number AS k FROM system.numbers LIMIT 10) js1 ANY LEFT JOIN t2 USING k ORDER BY k; + +SELECT k, js1.s, t2.s FROM (SELECT number AS k, number as s FROM system.numbers LIMIT 10) js1 ANY LEFT JOIN t2 USING k ORDER BY k; +SELECT k, t2.k, js1.s, t2.s FROM (SELECT number AS k, number as s FROM system.numbers LIMIT 10) js1 ANY LEFT JOIN t2 USING k ORDER BY k; + +SELECT k, js1.s, t2.s FROM (SELECT toUInt64(number / 3) AS k, sum(number) as s FROM numbers(10) GROUP BY toUInt64(number / 3) WITH TOTALS) js1 ANY LEFT JOIN t2 USING k ORDER BY k; + +SELECT k, js1.s, t2.s FROM (SELECT number AS k, number AS s FROM system.numbers LIMIT 10) js1 ANY LEFT JOIN t2 ON js1.k == t2.k ORDER BY k; +SELECT k, t2.k, js1.s, t2.s FROM (SELECT number AS k, number AS s FROM system.numbers LIMIT 10) js1 ANY LEFT JOIN t2 ON js1.k == t2.k ORDER BY k; + +SELECT k, js1.s, t2.s FROM (SELECT number AS k, number AS s FROM system.numbers LIMIT 10) js1 ANY LEFT JOIN t2 ON js1.k == t2.k OR js1.s == t2.k ORDER BY k; -- { serverError NOT_IMPLEMENTED, INCOMPATIBLE_TYPE_OF_JOIN } + +DROP TABLE t2; diff --git a/parser/testdata/00119_storage_join/ast.json b/parser/testdata/00119_storage_join/ast.json new file mode 100644 index 000000000..83a35c033 --- /dev/null +++ b/parser/testdata/00119_storage_join/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t2 (children 1)" + }, + { + "explain": " Identifier t2" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001292329, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/00119_storage_join/metadata.json b/parser/testdata/00119_storage_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00119_storage_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00119_storage_join/query.sql b/parser/testdata/00119_storage_join/query.sql new file mode 100644 index 000000000..cd255cdfe --- /dev/null +++ b/parser/testdata/00119_storage_join/query.sql @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS t2; + +CREATE TABLE t2 (s String, x Array(UInt8), k UInt64) ENGINE = Join(ANY, LEFT, k); + +INSERT INTO t2 VALUES ('abc', [0], 1), ('def', [1, 2], 2); +INSERT INTO t2 (k, s) VALUES (3, 'ghi'); +INSERT INTO t2 (x, k) VALUES ([3, 4, 5], 4); + +SELECT k, s FROM (SELECT number AS k FROM system.numbers LIMIT 10) js1 ANY LEFT JOIN t2 USING k; +SELECT s, x FROM (SELECT number AS k FROM system.numbers LIMIT 10) js1 ANY LEFT JOIN t2 USING k; +SELECT x, s, k FROM (SELECT number AS k FROM system.numbers LIMIT 10) js1 ANY LEFT JOIN t2 USING k; +SELECT 1, x, 2, s, 3, k, 4 FROM (SELECT number AS k FROM system.numbers LIMIT 10) js1 ANY LEFT JOIN t2 USING k; + +SELECT t1.k, t1.s, t2.x +FROM ( SELECT number AS k, 'a' AS s FROM numbers(2) GROUP BY number WITH TOTALS ORDER BY number) AS t1 +ANY LEFT JOIN t2 AS t2 USING(k); + +DROP TABLE t2; diff --git a/parser/testdata/00120_join_and_group_by/ast.json b/parser/testdata/00120_join_and_group_by/ast.json new file mode 100644 index 000000000..11ae71db1 --- /dev/null +++ b/parser/testdata/00120_join_and_group_by/ast.json @@ -0,0 +1,91 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier value" + }, + { + "explain": " TablesInSelectQuery (children 2)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.one" + }, + { + "explain": " TablesInSelectQueryElement (children 2)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (alias js2) (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier dummy" + }, + { + "explain": " Identifier dummy (alias value)" + }, + { + "explain": " TableJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier dummy" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier value" + } + ], + + "rows": 23, + + "statistics": + { + "elapsed": 0.001038523, + "rows_read": 23, + "bytes_read": 925 + } +} diff --git a/parser/testdata/00120_join_and_group_by/metadata.json b/parser/testdata/00120_join_and_group_by/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00120_join_and_group_by/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00120_join_and_group_by/query.sql b/parser/testdata/00120_join_and_group_by/query.sql new file mode 100644 index 000000000..005e25a71 --- /dev/null +++ b/parser/testdata/00120_join_and_group_by/query.sql @@ -0,0 +1,6 @@ +SELECT value FROM system.one ANY LEFT JOIN (SELECT dummy, dummy AS value) js2 USING dummy GROUP BY value; + +SELECT value1, value2, sum(number) +FROM (SELECT number, intHash64(number) AS value1 FROM system.numbers LIMIT 10) js1 +ANY LEFT JOIN (SELECT number, intHash32(number) AS value2 FROM system.numbers LIMIT 10) js2 +USING number GROUP BY value1, value2 ORDER BY value1, value2; diff --git a/parser/testdata/00121_drop_column_zookeeper/ast.json b/parser/testdata/00121_drop_column_zookeeper/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00121_drop_column_zookeeper/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00121_drop_column_zookeeper/metadata.json b/parser/testdata/00121_drop_column_zookeeper/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00121_drop_column_zookeeper/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00121_drop_column_zookeeper/query.sql b/parser/testdata/00121_drop_column_zookeeper/query.sql new file mode 100644 index 000000000..915551aa8 --- /dev/null +++ b/parser/testdata/00121_drop_column_zookeeper/query.sql @@ -0,0 +1,27 @@ +-- Tags: zookeeper, no-replicated-database, no-shared-merge-tree +-- Tag no-replicated-database: Old syntax is not allowed +-- no-shared-merge-tree: implemented replacement + +DROP TABLE IF EXISTS alter_00121 SYNC; +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE alter_00121 (d Date, x UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test/alter_00121/t1', 'r1', d, (d), 8192); + +INSERT INTO alter_00121 VALUES ('2014-01-01', 1); +ALTER TABLE alter_00121 DROP COLUMN x; + +DROP TABLE alter_00121 SYNC; + +CREATE TABLE alter_00121 (d Date) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test/alter_00121/t2', 'r1', d, (d), 8192); + +INSERT INTO alter_00121 VALUES ('2014-01-01'); +SELECT * FROM alter_00121 ORDER BY d; + +ALTER TABLE alter_00121 ADD COLUMN x UInt8; + +INSERT INTO alter_00121 VALUES ('2014-02-01', 1); +SELECT * FROM alter_00121 ORDER BY d; + +ALTER TABLE alter_00121 DROP COLUMN x; +SELECT * FROM alter_00121 ORDER BY d; + +DROP TABLE alter_00121 SYNC; diff --git a/parser/testdata/00122_join_with_subquery_with_subquery/ast.json b/parser/testdata/00122_join_with_subquery_with_subquery/ast.json new file mode 100644 index 000000000..f4fcd73b3 --- /dev/null +++ b/parser/testdata/00122_join_with_subquery_with_subquery/ast.json @@ -0,0 +1,139 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier k" + }, + { + "explain": " TablesInSelectQuery (children 2)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (alias js1) (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1 (alias k)" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.one" + }, + { + "explain": " TablesInSelectQueryElement (children 2)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (alias js2) (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier k" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1 (alias k)" + }, + { + "explain": " Literal UInt64_2 (alias x)" + }, + { + "explain": " TableJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier k" + } + ], + + "rows": 39, + + "statistics": + { + "elapsed": 0.001389967, + "rows_read": 39, + "bytes_read": 1737 + } +} diff --git a/parser/testdata/00122_join_with_subquery_with_subquery/metadata.json b/parser/testdata/00122_join_with_subquery_with_subquery/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00122_join_with_subquery_with_subquery/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00122_join_with_subquery_with_subquery/query.sql b/parser/testdata/00122_join_with_subquery_with_subquery/query.sql new file mode 100644 index 000000000..add311125 --- /dev/null +++ b/parser/testdata/00122_join_with_subquery_with_subquery/query.sql @@ -0,0 +1 @@ +SELECT k FROM (SELECT 1 AS k FROM system.one) js1 ANY LEFT JOIN (SELECT k FROM (SELECT 1 AS k, 2 AS x)) js2 USING k; diff --git a/parser/testdata/00123_shard_unmerged_result_when_max_distributed_connections_is_one/ast.json b/parser/testdata/00123_shard_unmerged_result_when_max_distributed_connections_is_one/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00123_shard_unmerged_result_when_max_distributed_connections_is_one/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00123_shard_unmerged_result_when_max_distributed_connections_is_one/metadata.json b/parser/testdata/00123_shard_unmerged_result_when_max_distributed_connections_is_one/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00123_shard_unmerged_result_when_max_distributed_connections_is_one/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00123_shard_unmerged_result_when_max_distributed_connections_is_one/query.sql b/parser/testdata/00123_shard_unmerged_result_when_max_distributed_connections_is_one/query.sql new file mode 100644 index 000000000..788f16c22 --- /dev/null +++ b/parser/testdata/00123_shard_unmerged_result_when_max_distributed_connections_is_one/query.sql @@ -0,0 +1,4 @@ +-- Tags: distributed + +SET max_distributed_connections = 1; +SELECT count() + 1 FROM remote('127.0.0.{2,3}', system, one); diff --git a/parser/testdata/00124_shard_distributed_with_many_replicas/ast.json b/parser/testdata/00124_shard_distributed_with_many_replicas/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00124_shard_distributed_with_many_replicas/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00124_shard_distributed_with_many_replicas/metadata.json b/parser/testdata/00124_shard_distributed_with_many_replicas/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00124_shard_distributed_with_many_replicas/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00124_shard_distributed_with_many_replicas/query.sql b/parser/testdata/00124_shard_distributed_with_many_replicas/query.sql new file mode 100644 index 000000000..fa713fc8d --- /dev/null +++ b/parser/testdata/00124_shard_distributed_with_many_replicas/query.sql @@ -0,0 +1,15 @@ +-- Tags: replica, distributed + +SET enable_parallel_replicas = 1; +SET parallel_replicas_mode = 'sampling_key'; +SET max_parallel_replicas = 2; +SET parallel_replicas_for_non_replicated_merge_tree = 1; +DROP TABLE IF EXISTS report; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE report(id UInt32, event_date Date, priority UInt32, description String) ENGINE = MergeTree(event_date, intHash32(id), (id, event_date, intHash32(id)), 8192); + +INSERT INTO report(id,event_date,priority,description) VALUES (1, '2015-01-01', 1, 'foo')(2, '2015-02-01', 2, 'bar')(3, '2015-03-01', 3, 'foo')(4, '2015-04-01', 4, 'bar')(5, '2015-05-01', 5, 'foo'); +SELECT * FROM (SELECT id, event_date, priority, description FROM remote('127.0.0.{2|3}', currentDatabase(), report)) ORDER BY id ASC; + +DROP TABLE report; diff --git a/parser/testdata/00125_array_element_of_array_of_tuple/ast.json b/parser/testdata/00125_array_element_of_array_of_tuple/ast.json new file mode 100644 index 000000000..b46ad6ea9 --- /dev/null +++ b/parser/testdata/00125_array_element_of_array_of_tuple/ast.json @@ -0,0 +1,82 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function groupArray (alias b) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier a" + }, + { + "explain": " Function arrayElement (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier b" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Tuple_(UInt64_1, UInt64_2) (alias a)" + } + ], + + "rows": 20, + + "statistics": + { + "elapsed": 0.001353601, + "rows_read": 20, + "bytes_read": 821 + } +} diff --git a/parser/testdata/00125_array_element_of_array_of_tuple/metadata.json b/parser/testdata/00125_array_element_of_array_of_tuple/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00125_array_element_of_array_of_tuple/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00125_array_element_of_array_of_tuple/query.sql b/parser/testdata/00125_array_element_of_array_of_tuple/query.sql new file mode 100644 index 000000000..a9757013a --- /dev/null +++ b/parser/testdata/00125_array_element_of_array_of_tuple/query.sql @@ -0,0 +1 @@ +select groupArray(a) as b, b[1] from (select (1, 2) as a); diff --git a/parser/testdata/00126_buffer/ast.json b/parser/testdata/00126_buffer/ast.json new file mode 100644 index 000000000..e0e884081 --- /dev/null +++ b/parser/testdata/00126_buffer/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery buffer_00126 (children 1)" + }, + { + "explain": " Identifier buffer_00126" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001616746, + "rows_read": 2, + "bytes_read": 76 + } +} diff --git a/parser/testdata/00126_buffer/metadata.json b/parser/testdata/00126_buffer/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00126_buffer/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00126_buffer/query.sql b/parser/testdata/00126_buffer/query.sql new file mode 100644 index 000000000..5d62fcb3c --- /dev/null +++ b/parser/testdata/00126_buffer/query.sql @@ -0,0 +1,62 @@ +DROP TABLE IF EXISTS buffer_00126; +DROP TABLE IF EXISTS null_sink_00126; + +CREATE TABLE null_sink_00126 (a UInt8, b String, c Array(UInt32)) ENGINE = Null; +CREATE TABLE buffer_00126 (a UInt8, b String, c Array(UInt32)) ENGINE = Buffer(currentDatabase(), null_sink_00126, 1, 1000, 1000, 1000, 1000, 1000000, 1000000); + +INSERT INTO buffer_00126 VALUES (1, '2', [3]); + +SELECT a, b, c FROM buffer_00126 ORDER BY a, b, c; +SELECT b, c, a FROM buffer_00126 ORDER BY a, b, c; +SELECT c, a, b FROM buffer_00126 ORDER BY a, b, c; +SELECT a, c, b FROM buffer_00126 ORDER BY a, b, c; +SELECT b, a, c FROM buffer_00126 ORDER BY a, b, c; +SELECT c, b, a FROM buffer_00126 ORDER BY a, b, c; +SELECT a, b FROM buffer_00126 ORDER BY a, b, c; +SELECT b, c FROM buffer_00126 ORDER BY a, b, c; +SELECT c, a FROM buffer_00126 ORDER BY a, b, c; +SELECT a, c FROM buffer_00126 ORDER BY a, b, c; +SELECT b, a FROM buffer_00126 ORDER BY a, b, c; +SELECT c, b FROM buffer_00126 ORDER BY a, b, c; +SELECT a FROM buffer_00126 ORDER BY a, b, c; +SELECT b FROM buffer_00126 ORDER BY a, b, c; +SELECT c FROM buffer_00126 ORDER BY a, b, c; + +INSERT INTO buffer_00126 (c, b, a) VALUES ([7], '8', 9); + +SELECT a, b, c FROM buffer_00126 ORDER BY a, b, c; +SELECT b, c, a FROM buffer_00126 ORDER BY a, b, c; +SELECT c, a, b FROM buffer_00126 ORDER BY a, b, c; +SELECT a, c, b FROM buffer_00126 ORDER BY a, b, c; +SELECT b, a, c FROM buffer_00126 ORDER BY a, b, c; +SELECT c, b, a FROM buffer_00126 ORDER BY a, b, c; +SELECT a, b FROM buffer_00126 ORDER BY a, b, c; +SELECT b, c FROM buffer_00126 ORDER BY a, b, c; +SELECT c, a FROM buffer_00126 ORDER BY a, b, c; +SELECT a, c FROM buffer_00126 ORDER BY a, b, c; +SELECT b, a FROM buffer_00126 ORDER BY a, b, c; +SELECT c, b FROM buffer_00126 ORDER BY a, b, c; +SELECT a FROM buffer_00126 ORDER BY a, b, c; +SELECT b FROM buffer_00126 ORDER BY a, b, c; +SELECT c FROM buffer_00126 ORDER BY a, b, c; + +INSERT INTO buffer_00126 (a, c) VALUES (11, [33]); + +SELECT a, b, c FROM buffer_00126 ORDER BY a, b, c; +SELECT b, c, a FROM buffer_00126 ORDER BY a, b, c; +SELECT c, a, b FROM buffer_00126 ORDER BY a, b, c; +SELECT a, c, b FROM buffer_00126 ORDER BY a, b, c; +SELECT b, a, c FROM buffer_00126 ORDER BY a, b, c; +SELECT c, b, a FROM buffer_00126 ORDER BY a, b, c; +SELECT a, b FROM buffer_00126 ORDER BY a, b, c; +SELECT b, c FROM buffer_00126 ORDER BY a, b, c; +SELECT c, a FROM buffer_00126 ORDER BY a, b, c; +SELECT a, c FROM buffer_00126 ORDER BY a, b, c; +SELECT b, a FROM buffer_00126 ORDER BY a, b, c; +SELECT c, b FROM buffer_00126 ORDER BY a, b, c; +SELECT a FROM buffer_00126 ORDER BY a, b, c; +SELECT b FROM buffer_00126 ORDER BY a, b, c; +SELECT c FROM buffer_00126 ORDER BY a, b, c; + +DROP TABLE buffer_00126; +DROP TABLE null_sink_00126; diff --git a/parser/testdata/00127_group_by_concat/ast.json b/parser/testdata/00127_group_by_concat/ast.json new file mode 100644 index 000000000..7714669e6 --- /dev/null +++ b/parser/testdata/00127_group_by_concat/ast.json @@ -0,0 +1,127 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function materialize (alias k1) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal ''" + }, + { + "explain": " Function modulo (alias k2) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_123" + }, + { + "explain": " Function count (alias c) (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_1000" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier k1" + }, + { + "explain": " Identifier k2" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier k1" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier k2" + } + ], + + "rows": 35, + + "statistics": + { + "elapsed": 0.001605652, + "rows_read": 35, + "bytes_read": 1373 + } +} diff --git a/parser/testdata/00127_group_by_concat/metadata.json b/parser/testdata/00127_group_by_concat/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00127_group_by_concat/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00127_group_by_concat/query.sql b/parser/testdata/00127_group_by_concat/query.sql new file mode 100644 index 000000000..4f343f695 --- /dev/null +++ b/parser/testdata/00127_group_by_concat/query.sql @@ -0,0 +1 @@ +SELECT materialize('') AS k1, number % 123 AS k2, count() AS c FROM (SELECT * FROM system.numbers LIMIT 1000) GROUP BY k1, k2 ORDER BY k1, k2; diff --git a/parser/testdata/00128_group_by_number_and_fixed_string/ast.json b/parser/testdata/00128_group_by_number_and_fixed_string/ast.json new file mode 100644 index 000000000..4140c1796 --- /dev/null +++ b/parser/testdata/00128_group_by_number_and_fixed_string/ast.json @@ -0,0 +1,127 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 5)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier n" + }, + { + "explain": " Identifier k" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number (alias n)" + }, + { + "explain": " Function toFixedString (alias k) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal ' '" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_100000" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier n" + }, + { + "explain": " Identifier k" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier n" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier k" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 35, + + "statistics": + { + "elapsed": 0.00133295, + "rows_read": 35, + "bytes_read": 1403 + } +} diff --git a/parser/testdata/00128_group_by_number_and_fixed_string/metadata.json b/parser/testdata/00128_group_by_number_and_fixed_string/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00128_group_by_number_and_fixed_string/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00128_group_by_number_and_fixed_string/query.sql b/parser/testdata/00128_group_by_number_and_fixed_string/query.sql new file mode 100644 index 000000000..25d25c2b7 --- /dev/null +++ b/parser/testdata/00128_group_by_number_and_fixed_string/query.sql @@ -0,0 +1 @@ +SELECT n, k FROM (SELECT number AS n, toFixedString(materialize(' '), 3) AS k FROM system.numbers LIMIT 100000) GROUP BY n, k ORDER BY n DESC, k LIMIT 10; diff --git a/parser/testdata/00129_quantile_timing_weighted/ast.json b/parser/testdata/00129_quantile_timing_weighted/ast.json new file mode 100644 index 000000000..0090ce6bd --- /dev/null +++ b/parser/testdata/00129_quantile_timing_weighted/ast.json @@ -0,0 +1,121 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function medianTiming (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier t" + }, + { + "explain": " Function medianTimingWeighted (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier t" + }, + { + "explain": " Identifier w" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number (alias t)" + }, + { + "explain": " Function if (alias w) (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_77" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_100" + } + ], + + "rows": 33, + + "statistics": + { + "elapsed": 0.001560945, + "rows_read": 33, + "bytes_read": 1396 + } +} diff --git a/parser/testdata/00129_quantile_timing_weighted/metadata.json b/parser/testdata/00129_quantile_timing_weighted/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00129_quantile_timing_weighted/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00129_quantile_timing_weighted/query.sql b/parser/testdata/00129_quantile_timing_weighted/query.sql new file mode 100644 index 000000000..d5056c0e4 --- /dev/null +++ b/parser/testdata/00129_quantile_timing_weighted/query.sql @@ -0,0 +1,4 @@ +SELECT medianTiming(t), medianTimingWeighted(t, w) FROM (SELECT number AS t, number = 77 ? 10 : 1 AS w FROM system.numbers LIMIT 100); +SELECT quantileTiming(0.5)(t), quantileTimingWeighted(0.5)(t, w) FROM (SELECT number AS t, number = 77 ? 10 : 0 AS w FROM system.numbers LIMIT 100); +SELECT medianTiming(t), medianTimingWeighted(t, w) FROM (SELECT number AS t, number = 77 ? 0 : 0 AS w FROM system.numbers LIMIT 100); +SELECT quantilesTiming(0.5, 0.9)(t), quantilesTimingWeighted(0.5, 0.9)(t, w) FROM (SELECT number AS t, number = 77 ? 10 : 1 AS w FROM system.numbers LIMIT 100); diff --git a/parser/testdata/00131_set_hashed/ast.json b/parser/testdata/00131_set_hashed/ast.json new file mode 100644 index 000000000..20b94b43b --- /dev/null +++ b/parser/testdata/00131_set_hashed/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function in (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Tuple_(UInt64_1, '')" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Tuple_(UInt64_1, '')" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001410162, + "rows_read": 10, + "bytes_read": 394 + } +} diff --git a/parser/testdata/00131_set_hashed/metadata.json b/parser/testdata/00131_set_hashed/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00131_set_hashed/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00131_set_hashed/query.sql b/parser/testdata/00131_set_hashed/query.sql new file mode 100644 index 000000000..53f867319 --- /dev/null +++ b/parser/testdata/00131_set_hashed/query.sql @@ -0,0 +1 @@ +SELECT (1, '') IN ((1, '')); diff --git a/parser/testdata/00132_sets/ast.json b/parser/testdata/00132_sets/ast.json new file mode 100644 index 000000000..12a35006e --- /dev/null +++ b/parser/testdata/00132_sets/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function in (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal Tuple_(UInt64_1, UInt64_2, UInt64_3)" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001250125, + "rows_read": 8, + "bytes_read": 313 + } +} diff --git a/parser/testdata/00132_sets/metadata.json b/parser/testdata/00132_sets/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00132_sets/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00132_sets/query.sql b/parser/testdata/00132_sets/query.sql new file mode 100644 index 000000000..1b0a5fd0f --- /dev/null +++ b/parser/testdata/00132_sets/query.sql @@ -0,0 +1,14 @@ +SELECT 1 IN (1, 2, 3); +SELECT toUInt16(1) IN (1, 1000, 3); +SELECT 'Hello' IN ('Hello', 'world'); +SELECT (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17) IN ((1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17)); +SELECT (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, '') IN ((1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, '')); +SELECT (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, '') IN (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 'a'); +SELECT (number AS n, n + 1, n + 2, n + 3) IN (1, 2, 3, 4) FROM system.numbers LIMIT 3; +SELECT (number AS n, n + 1, n + 2, n + 3, n - 1) IN (1, 2, 3, 4, 0) FROM system.numbers LIMIT 3; +SELECT (number AS n, n + 1, toString(n + 2), n + 3, n - 1) IN (1, 2, '3', 4, 0) FROM system.numbers LIMIT 3; +SELECT number, tuple FROM (SELECT 1 AS number, (2, 3) AS tuple) WHERE (number, tuple) IN ( (/*number*/1, /*tuple*/(2, 3)), (/*number*/4, /*tuple*/(5, 6)) ); +SELECT number, tuple FROM (SELECT 2 AS number, (2, 3) AS tuple) WHERE (number, tuple) IN ((2, (2, 3))); +SELECT number, tuple FROM (SELECT 3 AS number, (2, 3) AS tuple) WHERE (number, tuple) IN (3, (2, 3)); +SELECT number, tuple FROM (SELECT 4 AS number, (2, 3) AS tuple) WHERE (number, tuple) IN (SELECT 4, (2, 3)); +SELECT number, tuple FROM (SELECT 5 AS number, (2, 3) AS tuple) WHERE (number, tuple) IN (SELECT 5, (2, 3)); diff --git a/parser/testdata/00134_aggregation_by_fixed_string_of_size_1_2_4_8/ast.json b/parser/testdata/00134_aggregation_by_fixed_string_of_size_1_2_4_8/ast.json new file mode 100644 index 000000000..92542d149 --- /dev/null +++ b/parser/testdata/00134_aggregation_by_fixed_string_of_size_1_2_4_8/ast.json @@ -0,0 +1,70 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function materialize (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toFixedString (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal ''" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.one" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + } + ], + + "rows": 16, + + "statistics": + { + "elapsed": 0.001277864, + "rows_read": 16, + "bytes_read": 630 + } +} diff --git a/parser/testdata/00134_aggregation_by_fixed_string_of_size_1_2_4_8/metadata.json b/parser/testdata/00134_aggregation_by_fixed_string_of_size_1_2_4_8/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00134_aggregation_by_fixed_string_of_size_1_2_4_8/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00134_aggregation_by_fixed_string_of_size_1_2_4_8/query.sql b/parser/testdata/00134_aggregation_by_fixed_string_of_size_1_2_4_8/query.sql new file mode 100644 index 000000000..4514b2597 --- /dev/null +++ b/parser/testdata/00134_aggregation_by_fixed_string_of_size_1_2_4_8/query.sql @@ -0,0 +1,9 @@ +SELECT materialize(toFixedString('', 1)) AS x FROM system.one GROUP BY x; +SELECT materialize(toFixedString('', 2)) AS x FROM system.one GROUP BY x; +SELECT materialize(toFixedString('', 3)) AS x FROM system.one GROUP BY x; +SELECT materialize(toFixedString('', 4)) AS x FROM system.one GROUP BY x; +SELECT materialize(toFixedString('', 5)) AS x FROM system.one GROUP BY x; +SELECT materialize(toFixedString('', 6)) AS x FROM system.one GROUP BY x; +SELECT materialize(toFixedString('', 7)) AS x FROM system.one GROUP BY x; +SELECT materialize(toFixedString('', 8)) AS x FROM system.one GROUP BY x; +SELECT materialize(toFixedString('', 9)) AS x FROM system.one GROUP BY x; diff --git a/parser/testdata/00135_duplicate_group_by_keys_segfault/ast.json b/parser/testdata/00135_duplicate_group_by_keys_segfault/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00135_duplicate_group_by_keys_segfault/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00135_duplicate_group_by_keys_segfault/metadata.json b/parser/testdata/00135_duplicate_group_by_keys_segfault/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00135_duplicate_group_by_keys_segfault/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00135_duplicate_group_by_keys_segfault/query.sql b/parser/testdata/00135_duplicate_group_by_keys_segfault/query.sql new file mode 100644 index 000000000..c54593056 --- /dev/null +++ b/parser/testdata/00135_duplicate_group_by_keys_segfault/query.sql @@ -0,0 +1,5 @@ +-- Tags: no-random-settings + +SET max_rows_to_read = 1000000; +SET read_overflow_mode = 'break'; +SELECT concat(toString(number % 256 AS n), '') AS s, n, max(s) FROM system.numbers_mt GROUP BY s, n, n, n, n, n, n, n, n, n ORDER BY s, n; diff --git a/parser/testdata/00136_duplicate_order_by_elems/ast.json b/parser/testdata/00136_duplicate_order_by_elems/ast.json new file mode 100644 index 000000000..98d10f568 --- /dev/null +++ b/parser/testdata/00136_duplicate_order_by_elems/ast.json @@ -0,0 +1,148 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 5)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier n" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number (alias n)" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_1000000" + }, + { + "explain": " ExpressionList (children 10)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier n" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier n" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier n" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier n" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier n" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier n" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier n" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier n" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier n" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier n" + }, + { + "explain": " Literal UInt64_1000000" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 42, + + "statistics": + { + "elapsed": 0.001460118, + "rows_read": 42, + "bytes_read": 1552 + } +} diff --git a/parser/testdata/00136_duplicate_order_by_elems/metadata.json b/parser/testdata/00136_duplicate_order_by_elems/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00136_duplicate_order_by_elems/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00136_duplicate_order_by_elems/query.sql b/parser/testdata/00136_duplicate_order_by_elems/query.sql new file mode 100644 index 000000000..66a0d1a11 --- /dev/null +++ b/parser/testdata/00136_duplicate_order_by_elems/query.sql @@ -0,0 +1 @@ +SELECT n FROM (SELECT number AS n FROM system.numbers LIMIT 1000000) ORDER BY n, n, n, n, n, n, n, n, n, n LIMIT 1000000, 1; diff --git a/parser/testdata/00137_in_constants/ast.json b/parser/testdata/00137_in_constants/ast.json new file mode 100644 index 000000000..3daaca986 --- /dev/null +++ b/parser/testdata/00137_in_constants/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function in (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001117574, + "rows_read": 13, + "bytes_read": 502 + } +} diff --git a/parser/testdata/00137_in_constants/metadata.json b/parser/testdata/00137_in_constants/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00137_in_constants/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00137_in_constants/query.sql b/parser/testdata/00137_in_constants/query.sql new file mode 100644 index 000000000..bc365523b --- /dev/null +++ b/parser/testdata/00137_in_constants/query.sql @@ -0,0 +1,32 @@ +SELECT 1 IN (SELECT 1); +SELECT materialize(1) IN (SELECT 1); +SELECT 1 IN (SELECT materialize(1)); +SELECT materialize(1) IN (SELECT materialize(1)); +SELECT (1, 2) IN (SELECT 1, 2); +SELECT (1, materialize(2)) IN (SELECT 1, 2); +SELECT (1, materialize(2)) IN (SELECT materialize(1), 2); +SELECT (1, materialize(2), 'Hello') IN (SELECT materialize(1), 2, 'Hello'); +SELECT (1, materialize(2), materialize('Hello')) IN (SELECT materialize(1), 2, 'Hello'); +SELECT (1, materialize(2), materialize('Hello')) IN (SELECT materialize(1), 2, materialize('Hello')); +SELECT (1, materialize(2), 'Hello') IN (SELECT materialize(1), 2, materialize('Hello')); +SELECT 'Hello' IN (SELECT 'Hello'); +SELECT materialize('Hello') IN (SELECT 'Hello'); +SELECT 'Hello' IN (SELECT materialize('Hello')); +SELECT materialize('Hello') IN (SELECT materialize('Hello')); +SELECT toDate('2020-01-01') IN (toDateTime('2020-01-01', 'UTC')); + +SELECT 2 IN (SELECT 1); +SELECT materialize(2) IN (SELECT 1); +SELECT 2 IN (SELECT materialize(1)); +SELECT materialize(2) IN (SELECT materialize(1)); +SELECT (1, 3) IN (SELECT 1, 2); +SELECT (1, materialize(3)) IN (SELECT 1, 2); +SELECT (1, materialize(3)) IN (SELECT materialize(1), 2); +SELECT (1, materialize(2), 'World') IN (SELECT materialize(1), 2, 'Hello'); +SELECT (1, materialize(2), materialize('World')) IN (SELECT materialize(1), 2, 'Hello'); +SELECT (1, materialize(2), materialize('World')) IN (SELECT materialize(1), 2, materialize('Hello')); +SELECT (1, materialize(2), 'World') IN (SELECT materialize(1), 2, materialize('Hello')); +SELECT 'World' IN (SELECT 'Hello'); +SELECT materialize('World') IN (SELECT 'Hello'); +SELECT 'World' IN (SELECT materialize('Hello')); +SELECT materialize('World') IN (SELECT materialize('Hello')); diff --git a/parser/testdata/00138_table_aliases/ast.json b/parser/testdata/00138_table_aliases/ast.json new file mode 100644 index 000000000..b5208927c --- /dev/null +++ b/parser/testdata/00138_table_aliases/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.one (alias xxx)" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001545894, + "rows_read": 9, + "bytes_read": 356 + } +} diff --git a/parser/testdata/00138_table_aliases/metadata.json b/parser/testdata/00138_table_aliases/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00138_table_aliases/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00138_table_aliases/query.sql b/parser/testdata/00138_table_aliases/query.sql new file mode 100644 index 000000000..1d1682502 --- /dev/null +++ b/parser/testdata/00138_table_aliases/query.sql @@ -0,0 +1,2 @@ +SELECT * FROM `system`.`one` AS `xxx`; +SELECT k, s FROM (SELECT 1 AS k FROM `system`.`one`) AS `xxx` ANY LEFT JOIN (SELECT 1 AS k, 'Hello' AS s) AS `yyy` USING k; diff --git a/parser/testdata/00139_like/ast.json b/parser/testdata/00139_like/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00139_like/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00139_like/metadata.json b/parser/testdata/00139_like/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00139_like/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00139_like/query.sql b/parser/testdata/00139_like/query.sql new file mode 100644 index 000000000..e56ccb377 --- /dev/null +++ b/parser/testdata/00139_like/query.sql @@ -0,0 +1,6 @@ +-- Tags: stateful +/* Note that queries are written as the user doesn't really understand that the symbol _ has special meaning in LIKE pattern. */ +SELECT count() FROM test.hits WHERE URL LIKE '%/avtomobili_s_probegom/_%__%__%__%'; +SELECT count() FROM test.hits WHERE URL LIKE '/avtomobili_s_probegom/_%__%__%__%'; +SELECT count() FROM test.hits WHERE URL LIKE '%_/avtomobili_s_probegom/_%__%__%__%'; +SELECT count() FROM test.hits WHERE URL LIKE '%avtomobili%'; diff --git a/parser/testdata/00140_parse_unix_timestamp_as_datetime/ast.json b/parser/testdata/00140_parse_unix_timestamp_as_datetime/ast.json new file mode 100644 index 000000000..0994994e6 --- /dev/null +++ b/parser/testdata/00140_parse_unix_timestamp_as_datetime/ast.json @@ -0,0 +1,130 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function min (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier ts" + }, + { + "explain": " Function toUInt32 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDateTime (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier ts" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function plus (alias ts) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1000000000" + }, + { + "explain": " Function multiply (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1234" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_1000000" + } + ], + + "rows": 36, + + "statistics": + { + "elapsed": 0.001993259, + "rows_read": 36, + "bytes_read": 1583 + } +} diff --git a/parser/testdata/00140_parse_unix_timestamp_as_datetime/metadata.json b/parser/testdata/00140_parse_unix_timestamp_as_datetime/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00140_parse_unix_timestamp_as_datetime/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00140_parse_unix_timestamp_as_datetime/query.sql b/parser/testdata/00140_parse_unix_timestamp_as_datetime/query.sql new file mode 100644 index 000000000..822bff898 --- /dev/null +++ b/parser/testdata/00140_parse_unix_timestamp_as_datetime/query.sql @@ -0,0 +1 @@ +SELECT min(ts = toUInt32(toDateTime(toString(ts)))) FROM (SELECT 1000000000 + 1234 * number AS ts FROM system.numbers LIMIT 1000000); diff --git a/parser/testdata/00140_prewhere_column_order/ast.json b/parser/testdata/00140_prewhere_column_order/ast.json new file mode 100644 index 000000000..37de32c2d --- /dev/null +++ b/parser/testdata/00140_prewhere_column_order/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery prewhere (children 1)" + }, + { + "explain": " Identifier prewhere" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001746558, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/00140_prewhere_column_order/metadata.json b/parser/testdata/00140_prewhere_column_order/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00140_prewhere_column_order/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00140_prewhere_column_order/query.sql b/parser/testdata/00140_prewhere_column_order/query.sql new file mode 100644 index 000000000..61c2fbcf3 --- /dev/null +++ b/parser/testdata/00140_prewhere_column_order/query.sql @@ -0,0 +1,12 @@ +DROP TABLE IF EXISTS prewhere; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE prewhere (d Date, a String, b String) ENGINE = MergeTree(d, d, 8192); +INSERT INTO prewhere VALUES ('2015-01-01', 'hello', 'world'); + +ALTER TABLE prewhere ADD COLUMN a1 String AFTER a; +INSERT INTO prewhere VALUES ('2015-01-01', 'hello1', 'xxx', 'world1'); + +SELECT d, a, a1, b FROM prewhere PREWHERE a LIKE 'hello%' ORDER BY a1; + +DROP TABLE prewhere; diff --git a/parser/testdata/00140_rename/ast.json b/parser/testdata/00140_rename/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00140_rename/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00140_rename/metadata.json b/parser/testdata/00140_rename/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00140_rename/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00140_rename/query.sql b/parser/testdata/00140_rename/query.sql new file mode 100644 index 000000000..7604fa9d4 --- /dev/null +++ b/parser/testdata/00140_rename/query.sql @@ -0,0 +1,36 @@ +-- Tags: stateful, no-replicated-database, no-parallel +-- Tag no-replicated-database: Does not support renaming of multiple tables in single query +-- Tag: no-parallel: Changes stateful tables + +RENAME TABLE test.hits TO test.visits_tmp, test.visits TO test.hits, test.visits_tmp TO test.visits; + +SELECT sum(Sign) FROM test.hits WHERE CounterID = 912887; +SELECT count() FROM test.visits WHERE CounterID = 732797; + +RENAME TABLE test.hits TO test.hits_tmp, test.hits_tmp TO test.hits; + +SELECT sum(Sign) FROM test.hits WHERE CounterID = 912887; +SELECT count() FROM test.visits WHERE CounterID = 732797; + +RENAME TABLE test.hits TO test.visits_tmp, test.visits TO test.hits, test.visits_tmp TO test.visits; + +SELECT count() FROM test.hits WHERE CounterID = 732797; +SELECT sum(Sign) FROM test.visits WHERE CounterID = 912887; + +RENAME TABLE test.hits TO test.hits2, test.hits2 TO test.hits3, test.hits3 TO test.hits4, test.hits4 TO test.hits5, test.hits5 TO test.hits6, test.hits6 TO test.hits7, test.hits7 TO test.hits8, test.hits8 TO test.hits9, test.hits9 TO test.hits10; + +SELECT count() FROM test.hits10 WHERE CounterID = 732797; + +RENAME TABLE test.hits10 TO test.hits; + +SELECT count() FROM test.hits WHERE CounterID = 732797; + +RENAME TABLE test.hits TO default.hits, test.visits TO test.hits; + +SELECT sum(Sign) FROM test.hits WHERE CounterID = 912887; +SELECT count() FROM default.hits WHERE CounterID = 732797; + +RENAME TABLE test.hits TO test.visits, default.hits TO test.hits; + +SELECT count() FROM test.hits WHERE CounterID = 732797; +SELECT sum(Sign) FROM test.visits WHERE CounterID = 912887; diff --git a/parser/testdata/00141_parse_timestamp_as_datetime/ast.json b/parser/testdata/00141_parse_timestamp_as_datetime/ast.json new file mode 100644 index 000000000..beda012eb --- /dev/null +++ b/parser/testdata/00141_parse_timestamp_as_datetime/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery default (children 1)" + }, + { + "explain": " Identifier default" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001266263, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/00141_parse_timestamp_as_datetime/metadata.json b/parser/testdata/00141_parse_timestamp_as_datetime/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00141_parse_timestamp_as_datetime/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00141_parse_timestamp_as_datetime/query.sql b/parser/testdata/00141_parse_timestamp_as_datetime/query.sql new file mode 100644 index 000000000..dbd251f87 --- /dev/null +++ b/parser/testdata/00141_parse_timestamp_as_datetime/query.sql @@ -0,0 +1,8 @@ +DROP TABLE IF EXISTS default; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE default (d Date DEFAULT toDate(t), t DateTime) ENGINE = MergeTree(d, t, 8192); +INSERT INTO default (t) VALUES ('1234567890'); +SELECT toStartOfMonth(d), toUInt32(t) FROM default; + +DROP TABLE default; diff --git a/parser/testdata/00141_transform/ast.json b/parser/testdata/00141_transform/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00141_transform/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00141_transform/metadata.json b/parser/testdata/00141_transform/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00141_transform/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00141_transform/query.sql b/parser/testdata/00141_transform/query.sql new file mode 100644 index 000000000..e2d0494d0 --- /dev/null +++ b/parser/testdata/00141_transform/query.sql @@ -0,0 +1,2 @@ +-- Tags: stateful +SELECT transform(SearchEngineID, [2, 3], ['Яндекс', 'Google'], 'Остальные') AS title, count() AS c FROM test.hits WHERE SearchEngineID != 0 GROUP BY title HAVING c > 0 ORDER BY c DESC LIMIT 10; diff --git a/parser/testdata/00142_parse_timestamp_as_datetime/ast.json b/parser/testdata/00142_parse_timestamp_as_datetime/ast.json new file mode 100644 index 000000000..60bebd64f --- /dev/null +++ b/parser/testdata/00142_parse_timestamp_as_datetime/ast.json @@ -0,0 +1,130 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function min (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier ts" + }, + { + "explain": " Function toUInt32 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDateTime (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier ts" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function plus (alias ts) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1000000000" + }, + { + "explain": " Function multiply (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1234" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_1000000" + } + ], + + "rows": 36, + + "statistics": + { + "elapsed": 0.001489795, + "rows_read": 36, + "bytes_read": 1583 + } +} diff --git a/parser/testdata/00142_parse_timestamp_as_datetime/metadata.json b/parser/testdata/00142_parse_timestamp_as_datetime/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00142_parse_timestamp_as_datetime/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00142_parse_timestamp_as_datetime/query.sql b/parser/testdata/00142_parse_timestamp_as_datetime/query.sql new file mode 100644 index 000000000..fd9e65f9c --- /dev/null +++ b/parser/testdata/00142_parse_timestamp_as_datetime/query.sql @@ -0,0 +1,2 @@ +SELECT min(ts = toUInt32(toDateTime(toString(ts)))) FROM (SELECT 1000000000 + 1234 * number AS ts FROM system.numbers LIMIT 1000000); +SELECT min(ts = toUInt32(toDateTime(toString(ts)))) FROM (SELECT 10000 + 1234 * number AS ts FROM system.numbers LIMIT 1000000); diff --git a/parser/testdata/00142_system_columns/ast.json b/parser/testdata/00142_system_columns/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00142_system_columns/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00142_system_columns/metadata.json b/parser/testdata/00142_system_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00142_system_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00142_system_columns/query.sql b/parser/testdata/00142_system_columns/query.sql new file mode 100644 index 000000000..0c3e1098a --- /dev/null +++ b/parser/testdata/00142_system_columns/query.sql @@ -0,0 +1,2 @@ +-- Tags: stateful +SELECT table, name, type, default_kind, default_expression FROM system.columns WHERE database = 'test' AND table = 'hits' diff --git a/parser/testdata/00143_number_classification_functions/ast.json b/parser/testdata/00143_number_classification_functions/ast.json new file mode 100644 index 000000000..b98eb93cd --- /dev/null +++ b/parser/testdata/00143_number_classification_functions/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function isFinite (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001493776, + "rows_read": 10, + "bytes_read": 377 + } +} diff --git a/parser/testdata/00143_number_classification_functions/metadata.json b/parser/testdata/00143_number_classification_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00143_number_classification_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00143_number_classification_functions/query.sql b/parser/testdata/00143_number_classification_functions/query.sql new file mode 100644 index 000000000..feb59c5f2 --- /dev/null +++ b/parser/testdata/00143_number_classification_functions/query.sql @@ -0,0 +1,33 @@ +select isFinite(0) = 1; +select isFinite(1) = 1; +select isFinite(materialize(0)) = 1; +select isFinite(materialize(1)) = 1; +select isFinite(1/0) = 0; +select isFinite(-1/0) = 0; +select isFinite(0/0) = 0; +select isFinite(inf) = 0; +select isFinite(-inf) = 0; +select isFinite(nan) = 0; + +select isInfinite(0) = 0; +select isInfinite(1) = 0; +select isInfinite(materialize(0)) = 0; +select isInfinite(materialize(1)) = 0; +select isInfinite(1/0) = 1; +select isInfinite(-1/0) = 1; +select isInfinite(0/0) = 0; +select isInfinite(inf) = 1; +select isInfinite(-inf) = 1; +select isInfinite(nan) = 0; + + +select isNaN(0) = 0; +select isNaN(1) = 0; +select isNaN(materialize(0)) = 0; +select isNaN(materialize(1)) = 0; +select isNaN(1/0) = 0; +select isNaN(-1/0) = 0; +select isNaN(0/0) = 1; +select isNaN(inf) = 0; +select isNaN(-inf) = 0; +select isNaN(nan) = 1; diff --git a/parser/testdata/00143_transform_non_const_default/ast.json b/parser/testdata/00143_transform_non_const_default/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00143_transform_non_const_default/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00143_transform_non_const_default/metadata.json b/parser/testdata/00143_transform_non_const_default/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00143_transform_non_const_default/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00143_transform_non_const_default/query.sql b/parser/testdata/00143_transform_non_const_default/query.sql new file mode 100644 index 000000000..d94074187 --- /dev/null +++ b/parser/testdata/00143_transform_non_const_default/query.sql @@ -0,0 +1,2 @@ +-- Tags: stateful +SELECT transform(SearchEngineID, [2, 3], ['Яндекс', 'Google'], PageCharset) AS title, count() AS c FROM test.hits WHERE SearchEngineID != 0 GROUP BY title HAVING c > 0 ORDER BY c DESC LIMIT 10; diff --git a/parser/testdata/00144_empty_regexp/ast.json b/parser/testdata/00144_empty_regexp/ast.json new file mode 100644 index 000000000..2bb2e4269 --- /dev/null +++ b/parser/testdata/00144_empty_regexp/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function match (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'Hello'" + }, + { + "explain": " Literal ''" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001679196, + "rows_read": 10, + "bytes_read": 372 + } +} diff --git a/parser/testdata/00144_empty_regexp/metadata.json b/parser/testdata/00144_empty_regexp/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00144_empty_regexp/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00144_empty_regexp/query.sql b/parser/testdata/00144_empty_regexp/query.sql new file mode 100644 index 000000000..004b1c8ef --- /dev/null +++ b/parser/testdata/00144_empty_regexp/query.sql @@ -0,0 +1,2 @@ +SELECT match(materialize('Hello'), ''); +SELECT match('Hello', ''); diff --git a/parser/testdata/00144_functions_of_aggregation_states/ast.json b/parser/testdata/00144_functions_of_aggregation_states/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00144_functions_of_aggregation_states/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00144_functions_of_aggregation_states/metadata.json b/parser/testdata/00144_functions_of_aggregation_states/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00144_functions_of_aggregation_states/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00144_functions_of_aggregation_states/query.sql b/parser/testdata/00144_functions_of_aggregation_states/query.sql new file mode 100644 index 000000000..d56543790 --- /dev/null +++ b/parser/testdata/00144_functions_of_aggregation_states/query.sql @@ -0,0 +1,4 @@ +-- Tags: stateful +SET allow_deprecated_error_prone_window_functions = 1; + +SELECT EventDate, finalizeAggregation(state), runningAccumulate(state) FROM (SELECT EventDate, uniqState(UserID) AS state FROM test.hits GROUP BY EventDate ORDER BY EventDate); diff --git a/parser/testdata/00145_aggregate_functions_statistics/ast.json b/parser/testdata/00145_aggregate_functions_statistics/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00145_aggregate_functions_statistics/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00145_aggregate_functions_statistics/metadata.json b/parser/testdata/00145_aggregate_functions_statistics/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00145_aggregate_functions_statistics/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00145_aggregate_functions_statistics/query.sql b/parser/testdata/00145_aggregate_functions_statistics/query.sql new file mode 100644 index 000000000..87a9e1486 --- /dev/null +++ b/parser/testdata/00145_aggregate_functions_statistics/query.sql @@ -0,0 +1,29 @@ +-- Tags: stateful +SELECT varSamp(ResolutionWidth) FROM (SELECT ResolutionWidth FROM test.hits LIMIT 0); +SELECT varSamp(ResolutionWidth) FROM (SELECT ResolutionWidth FROM test.hits LIMIT 1); +SELECT round(varSamp(ResolutionWidth), 6) FROM test.hits; + +SELECT stddevSamp(ResolutionWidth) FROM (SELECT ResolutionWidth FROM test.hits LIMIT 0); +SELECT stddevSamp(ResolutionWidth) FROM (SELECT ResolutionWidth FROM test.hits LIMIT 1); +SELECT round(stddevSamp(ResolutionWidth), 6) FROM test.hits; + +SELECT varPop(ResolutionWidth) FROM (SELECT ResolutionWidth FROM test.hits LIMIT 0); +SELECT varPop(ResolutionWidth) FROM (SELECT ResolutionWidth FROM test.hits LIMIT 1); +SELECT round(varPop(ResolutionWidth), 6) FROM test.hits; + +SELECT stddevPop(ResolutionWidth) FROM (SELECT ResolutionWidth FROM test.hits LIMIT 0); +SELECT stddevPop(ResolutionWidth) FROM (SELECT ResolutionWidth FROM test.hits LIMIT 1); +SELECT round(stddevPop(ResolutionWidth), 6) FROM test.hits; + +SELECT covarSamp(ResolutionWidth, ResolutionHeight) FROM (SELECT ResolutionWidth, ResolutionHeight FROM test.hits LIMIT 0); +SELECT covarSamp(ResolutionWidth, ResolutionHeight) FROM (SELECT ResolutionWidth, ResolutionHeight FROM test.hits LIMIT 1); +SELECT round(covarSamp(ResolutionWidth, ResolutionHeight), 6) FROM test.hits; + +SELECT covarPop(ResolutionWidth, ResolutionHeight) FROM (SELECT ResolutionWidth, ResolutionHeight FROM test.hits LIMIT 0); +SELECT covarPop(ResolutionWidth, ResolutionHeight) FROM (SELECT ResolutionWidth, ResolutionHeight FROM test.hits LIMIT 1); +SELECT round(covarPop(ResolutionWidth, ResolutionHeight), 6) FROM test.hits; + +SELECT corr(ResolutionWidth, ResolutionHeight) FROM (SELECT ResolutionWidth, ResolutionHeight FROM test.hits LIMIT 0); +SELECT corr(ResolutionWidth, ResolutionHeight) FROM (SELECT ResolutionWidth, ResolutionHeight FROM test.hits LIMIT 1); +SELECT round(corr(ResolutionWidth, ResolutionHeight), 6) FROM test.hits; + diff --git a/parser/testdata/00145_empty_likes/ast.json b/parser/testdata/00145_empty_likes/ast.json new file mode 100644 index 000000000..658e9c082 --- /dev/null +++ b/parser/testdata/00145_empty_likes/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function like (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'Hello'" + }, + { + "explain": " Literal ''" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001571979, + "rows_read": 10, + "bytes_read": 371 + } +} diff --git a/parser/testdata/00145_empty_likes/metadata.json b/parser/testdata/00145_empty_likes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00145_empty_likes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00145_empty_likes/query.sql b/parser/testdata/00145_empty_likes/query.sql new file mode 100644 index 000000000..e1b4adf01 --- /dev/null +++ b/parser/testdata/00145_empty_likes/query.sql @@ -0,0 +1,18 @@ +SELECT materialize('Hello') LIKE ''; +SELECT materialize('Hello') LIKE '%'; +SELECT materialize('Hello') LIKE '%%'; +SELECT materialize('Hello') LIKE '%%%'; +SELECT materialize('Hello') LIKE '%_%'; +SELECT materialize('Hello') LIKE '_'; +SELECT materialize('Hello') LIKE '_%'; +SELECT materialize('Hello') LIKE '%_'; + +SELECT 'Hello' LIKE ''; +SELECT 'Hello' LIKE '%'; +SELECT 'Hello' LIKE '%%'; +SELECT 'Hello' LIKE '%%%'; +SELECT 'Hello' LIKE '%_%'; +SELECT 'Hello' LIKE '_'; +SELECT 'Hello' LIKE '_%'; +SELECT 'Hello' LIKE '%_'; + diff --git a/parser/testdata/00146_aggregate_function_uniq/ast.json b/parser/testdata/00146_aggregate_function_uniq/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00146_aggregate_function_uniq/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00146_aggregate_function_uniq/metadata.json b/parser/testdata/00146_aggregate_function_uniq/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00146_aggregate_function_uniq/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00146_aggregate_function_uniq/query.sql b/parser/testdata/00146_aggregate_function_uniq/query.sql new file mode 100644 index 000000000..1506ba06c --- /dev/null +++ b/parser/testdata/00146_aggregate_function_uniq/query.sql @@ -0,0 +1,4 @@ +-- Tags: stateful +SELECT RegionID, uniqHLL12(WatchID) AS X FROM remote('127.0.0.{1,2}', test, hits) GROUP BY RegionID HAVING X > 100000 ORDER BY RegionID ASC; +SELECT RegionID, uniqCombined(WatchID) AS X FROM remote('127.0.0.{1,2}', test, hits) GROUP BY RegionID HAVING X > 100000 ORDER BY RegionID ASC; +SELECT abs(uniq(WatchID) - uniqExact(WatchID)) FROM test.hits; diff --git a/parser/testdata/00146_summing_merge_tree_nested_map/ast.json b/parser/testdata/00146_summing_merge_tree_nested_map/ast.json new file mode 100644 index 000000000..ec6a2d942 --- /dev/null +++ b/parser/testdata/00146_summing_merge_tree_nested_map/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery nested_map (children 1)" + }, + { + "explain": " Identifier nested_map" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001640452, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/00146_summing_merge_tree_nested_map/metadata.json b/parser/testdata/00146_summing_merge_tree_nested_map/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00146_summing_merge_tree_nested_map/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00146_summing_merge_tree_nested_map/query.sql b/parser/testdata/00146_summing_merge_tree_nested_map/query.sql new file mode 100644 index 000000000..e759fbd85 --- /dev/null +++ b/parser/testdata/00146_summing_merge_tree_nested_map/query.sql @@ -0,0 +1,31 @@ +drop table if exists nested_map; + +set allow_deprecated_syntax_for_merge_tree=1; +create table nested_map (d default today(), k UInt64, payload default rand(), SomeMap Nested(ID UInt32, Num Int64)) engine=SummingMergeTree(d, k, 8192); + +insert into nested_map (k, `SomeMap.ID`, `SomeMap.Num`) values (0,[1],[100]),(1,[1],[100]),(2,[1],[100]),(3,[1,2],[100,150]); +insert into nested_map (k, `SomeMap.ID`, `SomeMap.Num`) values (0,[2],[150]),(1,[1],[150]),(2,[1,2],[150,150]),(3,[1],[-100]); +optimize table nested_map; +select `SomeMap.ID`, `SomeMap.Num` from nested_map; + +drop table nested_map; + +create table nested_map (d default today(), k UInt64, payload default rand(), SomeMap Nested(ID String, Num Int64)) engine=SummingMergeTree(d, k, 8192); + +insert into nested_map (k, `SomeMap.ID`, `SomeMap.Num`) values (0,['1'],[100]),(1,['1'],[100]),(2,['1'],[100]),(3,['1','2'],[100,150]); +insert into nested_map (k, `SomeMap.ID`, `SomeMap.Num`) values (0,['2'],[150]),(1,['1'],[150]),(2,['1','2'],[150,150]),(3,['1'],[-100]); +optimize table nested_map; +select `SomeMap.ID`, `SomeMap.Num` from nested_map; + +drop table nested_map; + +drop table if exists nested_map_explicit; + +create table nested_map_explicit (d default today(), k UInt64, SomeIntExcluded UInt32, SomeMap Nested(ID UInt32, Num Int64)) engine=SummingMergeTree(d, k, 8192, (SomeMap)); + +insert into nested_map_explicit (k, `SomeIntExcluded`, `SomeMap.ID`, `SomeMap.Num`) values (0, 20, [1],[100]),(1, 20, [1],[100]),(2, 20, [1],[100]),(3, 20, [1,2],[100,150]); +insert into nested_map_explicit (k, `SomeIntExcluded`, `SomeMap.ID`, `SomeMap.Num`) values (0, 20, [2],[150]),(1, 20, [1],[150]),(2, 20, [1,2],[150,150]),(3, 20, [1],[-100]); +optimize table nested_map_explicit; +select `SomeIntExcluded`, `SomeMap.ID`, `SomeMap.Num` from nested_map_explicit; + +drop table nested_map_explicit; diff --git a/parser/testdata/00147_alter_nested_default/ast.json b/parser/testdata/00147_alter_nested_default/ast.json new file mode 100644 index 000000000..e71da578c --- /dev/null +++ b/parser/testdata/00147_alter_nested_default/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery alter_00147 (children 1)" + }, + { + "explain": " Identifier alter_00147" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001528869, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/00147_alter_nested_default/metadata.json b/parser/testdata/00147_alter_nested_default/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00147_alter_nested_default/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00147_alter_nested_default/query.sql b/parser/testdata/00147_alter_nested_default/query.sql new file mode 100644 index 000000000..070204aef --- /dev/null +++ b/parser/testdata/00147_alter_nested_default/query.sql @@ -0,0 +1,30 @@ +DROP TABLE IF EXISTS alter_00147; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE alter_00147 (d Date DEFAULT toDate('2015-01-01'), n Nested(x String)) ENGINE = MergeTree(d, d, 8192); + +INSERT INTO alter_00147 (`n.x`) VALUES (['Hello', 'World']); + +SELECT * FROM alter_00147; +SELECT * FROM alter_00147 ARRAY JOIN n; +SELECT * FROM alter_00147 ARRAY JOIN n WHERE n.x LIKE '%Hello%'; + +ALTER TABLE alter_00147 ADD COLUMN n.y Array(UInt64); + +SELECT * FROM alter_00147; +SELECT * FROM alter_00147 ARRAY JOIN n; +SELECT * FROM alter_00147 ARRAY JOIN n WHERE n.x LIKE '%Hello%'; + +INSERT INTO alter_00147 (`n.x`) VALUES (['Hello2', 'World2']); + +SELECT * FROM alter_00147 ORDER BY n.x; +SELECT * FROM alter_00147 ARRAY JOIN n ORDER BY n.x; +SELECT * FROM alter_00147 ARRAY JOIN n WHERE n.x LIKE '%Hello%' ORDER BY n.x; + +OPTIMIZE TABLE alter_00147; + +SELECT * FROM alter_00147 ORDER BY n.x; +SELECT * FROM alter_00147 ARRAY JOIN n ORDER BY n.x; +SELECT * FROM alter_00147 ARRAY JOIN n WHERE n.x LIKE '%Hello%' ORDER BY n.x; + +DROP TABLE alter_00147; diff --git a/parser/testdata/00147_global_in_aggregate_function/ast.json b/parser/testdata/00147_global_in_aggregate_function/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00147_global_in_aggregate_function/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00147_global_in_aggregate_function/metadata.json b/parser/testdata/00147_global_in_aggregate_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00147_global_in_aggregate_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00147_global_in_aggregate_function/query.sql b/parser/testdata/00147_global_in_aggregate_function/query.sql new file mode 100644 index 000000000..f9e07d55d --- /dev/null +++ b/parser/testdata/00147_global_in_aggregate_function/query.sql @@ -0,0 +1,5 @@ +-- Tags: stateful, global + +SET max_rows_to_read = 100_000_000; +SELECT sum(UserID GLOBAL IN (SELECT UserID FROM remote('127.0.0.{1,2}', test.hits))) FROM remote('127.0.0.{1,2}', test.hits); +SELECT sum(UserID GLOBAL IN (SELECT UserID FROM test.hits)) FROM remote('127.0.0.{1,2}', test.hits); diff --git a/parser/testdata/00148_monotonic_functions_and_index/ast.json b/parser/testdata/00148_monotonic_functions_and_index/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00148_monotonic_functions_and_index/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00148_monotonic_functions_and_index/metadata.json b/parser/testdata/00148_monotonic_functions_and_index/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00148_monotonic_functions_and_index/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00148_monotonic_functions_and_index/query.sql b/parser/testdata/00148_monotonic_functions_and_index/query.sql new file mode 100644 index 000000000..71785ecba --- /dev/null +++ b/parser/testdata/00148_monotonic_functions_and_index/query.sql @@ -0,0 +1,60 @@ +-- Tags: stateful +SET max_rows_to_read = 60000; + +SELECT count() FROM test.hits WHERE -CounterID = -1731; +SELECT count() FROM test.hits WHERE abs(-CounterID) = 1731; +SELECT count() FROM test.hits WHERE -abs(CounterID) = -1731; +SELECT count() FROM test.hits WHERE toUInt32(CounterID) = 1731; +SELECT count() FROM test.hits WHERE toInt32(CounterID) = 1731; +SELECT count() FROM test.hits WHERE toFloat32(CounterID) = 1731; + +SET max_rows_to_read = 0; + +SELECT count() FROM test.hits WHERE toInt16(CounterID) = 1731; +SELECT count() FROM test.hits WHERE toInt8(CounterID) = toInt8(1731); + +SELECT count() FROM test.hits WHERE toDate(toUInt16(CounterID)) = toDate(1731); + +SELECT uniq(CounterID), uniqUpTo(5)(toInt8(CounterID)), count() FROM test.hits WHERE toInt8(CounterID + 1 - 1) = toInt8(1731); +SELECT uniq(CounterID), uniqUpTo(5)(toInt8(CounterID)), count() FROM test.hits WHERE toInt8(CounterID) = toInt8(1731); + +SELECT uniq(CounterID), uniqUpTo(5)(toInt16(CounterID)), count() FROM test.hits WHERE toInt16(CounterID + 1 - 1) = 1731; +SELECT uniq(CounterID), uniqUpTo(5)(toInt16(CounterID)), count() FROM test.hits WHERE toInt16(CounterID) = 1731; + +SET max_rows_to_read = 500000; + +SELECT uniq(CounterID), count() FROM test.hits WHERE toString(CounterID) = '1731'; + +SET max_rows_to_read = 2200000; + +SELECT count() FROM test.hits WHERE CounterID < 732797; +SELECT count() FROM test.hits WHERE CounterID <= 732797; +SELECT count() FROM test.hits WHERE CounterID < 732797 AND CounterID > 107931; +SELECT count() FROM test.hits WHERE CounterID < 732797 AND CounterID >= 107931; +SELECT count() FROM test.hits WHERE CounterID <= 732797 AND CounterID > 107931; +SELECT count() FROM test.hits WHERE CounterID <= 732797 AND CounterID >= 107931; +SELECT count() FROM test.hits WHERE -CounterID > -732797; +SELECT count() FROM test.hits WHERE -CounterID >= -732797; +SELECT count() FROM test.hits WHERE -CounterID > -732797 AND CounterID > 107931; +SELECT count() FROM test.hits WHERE -CounterID > -732797 AND CounterID >= 107931; +SELECT count() FROM test.hits WHERE -CounterID >= -732797 AND CounterID > 107931; +SELECT count() FROM test.hits WHERE -CounterID >= -732797 AND CounterID >= 107931; +SELECT count() FROM test.hits WHERE CounterID < 732797 AND -CounterID < -107931; +SELECT count() FROM test.hits WHERE CounterID < 732797 AND -CounterID <= -107931; +SELECT count() FROM test.hits WHERE CounterID <= 732797 AND -CounterID < -107931; +SELECT count() FROM test.hits WHERE CounterID <= 732797 AND -CounterID <= -107931; + +SET max_rows_to_read = 0; + +SELECT count() FROM test.hits WHERE EventDate = '2014-03-20'; +SELECT count() FROM test.hits WHERE toDayOfMonth(EventDate) = 20; +SELECT count() FROM test.hits WHERE toDayOfWeek(EventDate) = 4; +SELECT count() FROM test.hits WHERE toUInt16(EventDate) = toUInt16(toDate('2014-03-20')); +SELECT count() FROM test.hits WHERE toInt64(EventDate) = toInt64(toDate('2014-03-20')); +SELECT count() FROM test.hits WHERE toDateTime(EventDate) = '2014-03-20 00:00:00'; + +SET max_rows_to_read = 50000; + +SELECT count() FROM test.hits WHERE toMonth(EventDate) != 3; +SELECT count() FROM test.hits WHERE toYear(EventDate) != 2014; +SELECT count() FROM test.hits WHERE toDayOfMonth(EventDate) > 23 OR toDayOfMonth(EventDate) < 17; diff --git a/parser/testdata/00148_summing_merge_tree_aggregate_function/ast.json b/parser/testdata/00148_summing_merge_tree_aggregate_function/ast.json new file mode 100644 index 000000000..6caa502b2 --- /dev/null +++ b/parser/testdata/00148_summing_merge_tree_aggregate_function/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery summing_merge_tree_aggregate_function (children 1)" + }, + { + "explain": " Identifier summing_merge_tree_aggregate_function" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001631913, + "rows_read": 2, + "bytes_read": 126 + } +} diff --git a/parser/testdata/00148_summing_merge_tree_aggregate_function/metadata.json b/parser/testdata/00148_summing_merge_tree_aggregate_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00148_summing_merge_tree_aggregate_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00148_summing_merge_tree_aggregate_function/query.sql b/parser/testdata/00148_summing_merge_tree_aggregate_function/query.sql new file mode 100644 index 000000000..649c09dbb --- /dev/null +++ b/parser/testdata/00148_summing_merge_tree_aggregate_function/query.sql @@ -0,0 +1,159 @@ +drop table if exists summing_merge_tree_aggregate_function; +drop table if exists summing_merge_tree_null; + +---- partition merge +set allow_deprecated_syntax_for_merge_tree=1; +create table summing_merge_tree_aggregate_function ( + d Date, + k UInt64, + u AggregateFunction(uniq, UInt64) +) engine=SummingMergeTree(d, k, 1); + +insert into summing_merge_tree_aggregate_function +select today() as d, + number as k, + uniqState(toUInt64(number % 500)) +from numbers(5000) +group by d, k; + +insert into summing_merge_tree_aggregate_function +select today() as d, + number + 5000 as k, + uniqState(toUInt64(number % 500)) +from numbers(5000) +group by d, k; + +select count() from summing_merge_tree_aggregate_function; +optimize table summing_merge_tree_aggregate_function; +select count() from summing_merge_tree_aggregate_function; + +drop table summing_merge_tree_aggregate_function; + +---- sum + uniq + uniqExact +set allow_deprecated_syntax_for_merge_tree=1; +create table summing_merge_tree_aggregate_function ( + d materialized today(), + k UInt64, + c UInt64, + u AggregateFunction(uniq, UInt8), + ue AggregateFunction(uniqExact, UInt8) +) engine=SummingMergeTree(d, k, 8192); + +insert into summing_merge_tree_aggregate_function select 1, 1, uniqState(1), uniqExactState(1); +insert into summing_merge_tree_aggregate_function select 1, 1, uniqState(2), uniqExactState(2); +insert into summing_merge_tree_aggregate_function select 1, 1, uniqState(3), uniqExactState(2); +insert into summing_merge_tree_aggregate_function select 1, 1, uniqState(1), uniqExactState(1); +insert into summing_merge_tree_aggregate_function select 1, 1, uniqState(2), uniqExactState(2); +insert into summing_merge_tree_aggregate_function select 1, 1, uniqState(3), uniqExactState(3); + +select + k, sum(c), + uniqMerge(u), uniqExactMerge(ue) +from summing_merge_tree_aggregate_function group by k; + +optimize table summing_merge_tree_aggregate_function; + +select + k, sum(c), + uniqMerge(u), uniqExactMerge(ue) +from summing_merge_tree_aggregate_function group by k; + +drop table summing_merge_tree_aggregate_function; + +---- sum + topK +create table summing_merge_tree_aggregate_function (d materialized today(), k UInt64, c UInt64, x AggregateFunction(topK(2), UInt8)) engine=SummingMergeTree(d, k, 8192); + +insert into summing_merge_tree_aggregate_function select 1, 1, topKState(2)(1); +insert into summing_merge_tree_aggregate_function select 1, 1, topKState(2)(2); +insert into summing_merge_tree_aggregate_function select 1, 1, topKState(2)(2); +insert into summing_merge_tree_aggregate_function select 1, 1, topKState(2)(3); +insert into summing_merge_tree_aggregate_function select 1, 1, topKState(2)(3); +insert into summing_merge_tree_aggregate_function select 1, 1, topKState(2)(3); +select k, sum(c), topKMerge(2)(x) from summing_merge_tree_aggregate_function group by k; +optimize table summing_merge_tree_aggregate_function; +select k, sum(c), topKMerge(2)(x) from summing_merge_tree_aggregate_function group by k; + +drop table summing_merge_tree_aggregate_function; + +---- sum + topKWeighted +create table summing_merge_tree_aggregate_function (d materialized today(), k UInt64, c UInt64, x AggregateFunction(topKWeighted(2), UInt8, UInt8)) engine=SummingMergeTree(d, k, 8192); + +insert into summing_merge_tree_aggregate_function select 1, 1, topKWeightedState(2)(1, 1); +insert into summing_merge_tree_aggregate_function select 1, 1, topKWeightedState(2)(1, 1); +insert into summing_merge_tree_aggregate_function select 1, 1, topKWeightedState(2)(1, 1); +insert into summing_merge_tree_aggregate_function select 1, 1, topKWeightedState(2)(2, 2); +insert into summing_merge_tree_aggregate_function select 1, 1, topKWeightedState(2)(2, 2); +insert into summing_merge_tree_aggregate_function select 1, 1, topKWeightedState(2)(3, 5); +select k, sum(c), topKWeightedMerge(2)(x) from summing_merge_tree_aggregate_function group by k; +optimize table summing_merge_tree_aggregate_function; +select k, sum(c), topKWeightedMerge(2)(x) from summing_merge_tree_aggregate_function group by k; + +drop table summing_merge_tree_aggregate_function; + +---- avg +create table summing_merge_tree_aggregate_function (d materialized today(), k UInt64, x AggregateFunction(avg, Float64)) engine=SummingMergeTree(d, k, 8192); + +insert into summing_merge_tree_aggregate_function select 1, avgState(0.0); +insert into summing_merge_tree_aggregate_function select 1, avgState(0.125); +insert into summing_merge_tree_aggregate_function select 1, avgState(0.25); +insert into summing_merge_tree_aggregate_function select 1, avgState(0.375); +insert into summing_merge_tree_aggregate_function select 1, avgState(0.4375); +insert into summing_merge_tree_aggregate_function select 1, avgState(0.5); +insert into summing_merge_tree_aggregate_function select 1, avgState(0.5625); +insert into summing_merge_tree_aggregate_function select 1, avgState(0.625); +insert into summing_merge_tree_aggregate_function select 1, avgState(0.75); +insert into summing_merge_tree_aggregate_function select 1, avgState(0.875); +insert into summing_merge_tree_aggregate_function select 1, avgState(1.0); +select k, avgMerge(x) from summing_merge_tree_aggregate_function group by k; +optimize table summing_merge_tree_aggregate_function; +select k, avgMerge(x) from summing_merge_tree_aggregate_function group by k; + +drop table summing_merge_tree_aggregate_function; + +---- quantile +create table summing_merge_tree_aggregate_function (d materialized today(), k UInt64, x AggregateFunction(quantile(0.1), Float64)) engine=SummingMergeTree(d, k, 8192); + +insert into summing_merge_tree_aggregate_function select 1, quantileState(0.1)(0.0); +insert into summing_merge_tree_aggregate_function select 1, quantileState(0.1)(0.1); +insert into summing_merge_tree_aggregate_function select 1, quantileState(0.1)(0.2); +insert into summing_merge_tree_aggregate_function select 1, quantileState(0.1)(0.3); +insert into summing_merge_tree_aggregate_function select 1, quantileState(0.1)(0.4); +insert into summing_merge_tree_aggregate_function select 1, quantileState(0.1)(0.5); +insert into summing_merge_tree_aggregate_function select 1, quantileState(0.1)(0.6); +insert into summing_merge_tree_aggregate_function select 1, quantileState(0.1)(0.7); +insert into summing_merge_tree_aggregate_function select 1, quantileState(0.1)(0.8); +insert into summing_merge_tree_aggregate_function select 1, quantileState(0.1)(0.9); +insert into summing_merge_tree_aggregate_function select 1, quantileState(0.1)(1.0); +select k, round(quantileMerge(0.1)(x), 1) from summing_merge_tree_aggregate_function group by k; +optimize table summing_merge_tree_aggregate_function; +select k, round(quantileMerge(0.1)(x), 1) from summing_merge_tree_aggregate_function group by k; + +drop table summing_merge_tree_aggregate_function; + +---- sum + uniq with more data +create table summing_merge_tree_null ( + d materialized today(), + k UInt64, + c UInt64, + u UInt64 +) engine=Null; + +create materialized view summing_merge_tree_aggregate_function ( + d Date, + k UInt64, + c UInt64, + u AggregateFunction(uniq, UInt64) +) engine=SummingMergeTree(d, k, 8192) +as select d, k, sum(c) as c, uniqState(u) as u +from summing_merge_tree_null +group by d, k; + +-- prime number 53 to avoid resonanse between %3 and %53 +insert into summing_merge_tree_null select number % 3, 1, number % 53 from numbers(999999); + +select k, sum(c), uniqMerge(u) from summing_merge_tree_aggregate_function group by k order by k; +optimize table summing_merge_tree_aggregate_function; +select k, sum(c), uniqMerge(u) from summing_merge_tree_aggregate_function group by k order by k; + +drop table summing_merge_tree_aggregate_function; +drop table summing_merge_tree_null; diff --git a/parser/testdata/00148_summing_merge_tree_nested_map_multiple_values/ast.json b/parser/testdata/00148_summing_merge_tree_nested_map_multiple_values/ast.json new file mode 100644 index 000000000..ef1ea2520 --- /dev/null +++ b/parser/testdata/00148_summing_merge_tree_nested_map_multiple_values/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery nested_map_multiple_values (children 1)" + }, + { + "explain": " Identifier nested_map_multiple_values" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.0011313, + "rows_read": 2, + "bytes_read": 104 + } +} diff --git a/parser/testdata/00148_summing_merge_tree_nested_map_multiple_values/metadata.json b/parser/testdata/00148_summing_merge_tree_nested_map_multiple_values/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00148_summing_merge_tree_nested_map_multiple_values/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00148_summing_merge_tree_nested_map_multiple_values/query.sql b/parser/testdata/00148_summing_merge_tree_nested_map_multiple_values/query.sql new file mode 100644 index 000000000..7c5757cd5 --- /dev/null +++ b/parser/testdata/00148_summing_merge_tree_nested_map_multiple_values/query.sql @@ -0,0 +1,21 @@ +drop table if exists nested_map_multiple_values; + +set allow_deprecated_syntax_for_merge_tree=1; +create table nested_map_multiple_values (d materialized today(), k UInt64, payload materialized rand(), SomeMap Nested(ID UInt32, Num1 Int64, Num2 Float64)) engine=SummingMergeTree(d, k, 8192); + +insert into nested_map_multiple_values values (0,[1],[100],[1.0]),(1,[1],[100],[1.0]),(2,[1],[100],[1.0]),(3,[1,2],[100,150],[1.0,1.5]); +insert into nested_map_multiple_values values (0,[2],[150],[-2.5]),(1,[1],[150],[-1.0]),(2,[1,2],[150,150],[2.5,3.5]),(3,[1],[-100],[-1]); +optimize table nested_map_multiple_values; +select * from nested_map_multiple_values; + +drop table nested_map_multiple_values; + +drop table if exists nested_not_a_map; +create table nested_not_a_map (d materialized today(), k UInt64, payload materialized rand(), OnlyOneColumnMap Nested(ID UInt32), NonArithmeticValueMap Nested(ID UInt32, Date Date), Nested_ Nested(ID UInt32, Num Int64)) engine=SummingMergeTree(d, k, 8192); + +insert into nested_not_a_map values (0,[1],[1],['2015-04-09'],[1],[100]); +insert into nested_not_a_map values (0,[1],[1],['2015-04-08'],[1],[200]); +optimize table nested_not_a_map; +select * from nested_not_a_map; + +drop table nested_not_a_map; diff --git a/parser/testdata/00149_function_url_hash/ast.json b/parser/testdata/00149_function_url_hash/ast.json new file mode 100644 index 000000000..922b841ca --- /dev/null +++ b/parser/testdata/00149_function_url_hash/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function URLHash (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '' (alias url)" + }, + { + "explain": " Function URLHash (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function appendTrailingCharIfAbsent (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier url" + }, + { + "explain": " Literal '\/'" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001484711, + "rows_read": 15, + "bytes_read": 606 + } +} diff --git a/parser/testdata/00149_function_url_hash/metadata.json b/parser/testdata/00149_function_url_hash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00149_function_url_hash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00149_function_url_hash/query.sql b/parser/testdata/00149_function_url_hash/query.sql new file mode 100644 index 000000000..18e63978e --- /dev/null +++ b/parser/testdata/00149_function_url_hash/query.sql @@ -0,0 +1,17 @@ +select URLHash('' as url) = URLHash(appendTrailingCharIfAbsent(url, '/')); +select URLHash('http://ya.ru' as url) = URLHash(appendTrailingCharIfAbsent(url, '/')); +select URLHash('http://ya.ru' as url) = URLHash(appendTrailingCharIfAbsent(url, '?')); +select URLHash('http://ya.ru' as url) = URLHash(appendTrailingCharIfAbsent(url, '#')); + +select URLHash('' as url, 0) = URLHash(url); +select URLHash('' as url, 1) = URLHash(url); +select URLHash('' as url, 1000) = URLHash(url); + +select URLHash('http://ya.ru/a' as url, 0 as level) = URLHash(URLHierarchy(url)[level + 1]); +select URLHash('http://ya.ru/a' as url, 1 as level) = URLHash(URLHierarchy(url)[level + 1]); + +select URLHash(url, 0 as level) = URLHash(URLHierarchy(url)[level + 1]) from system.one array join ['', 'http://ya.ru', 'http://ya.ru/', 'http://ya.ru/a', 'http://ya.ru/a/', 'http://ya.ru/a/b', 'http://ya.ru/a/b?'] as url; +select URLHash(url, 1 as level) = URLHash(URLHierarchy(url)[level + 1]) from system.one array join ['', 'http://ya.ru', 'http://ya.ru/', 'http://ya.ru/a', 'http://ya.ru/a/', 'http://ya.ru/a/b', 'http://ya.ru/a/b?'] as url; +select URLHash(url, 2 as level) = URLHash(URLHierarchy(url)[level + 1]) from system.one array join ['', 'http://ya.ru', 'http://ya.ru/', 'http://ya.ru/a', 'http://ya.ru/a/', 'http://ya.ru/a/b', 'http://ya.ru/a/b?'] as url; +select URLHash(url, 3 as level) = URLHash(URLHierarchy(url)[level + 1]) from system.one array join ['', 'http://ya.ru', 'http://ya.ru/', 'http://ya.ru/a', 'http://ya.ru/a/', 'http://ya.ru/a/b', 'http://ya.ru/a/b?'] as url; +select URLHash(url, 4 as level) = URLHash(URLHierarchy(url)[level + 1]) from system.one array join ['', 'http://ya.ru', 'http://ya.ru/', 'http://ya.ru/a', 'http://ya.ru/a/', 'http://ya.ru/a/b', 'http://ya.ru/a/b?'] as url; diff --git a/parser/testdata/00149_quantiles_timing_distributed/ast.json b/parser/testdata/00149_quantiles_timing_distributed/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00149_quantiles_timing_distributed/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00149_quantiles_timing_distributed/metadata.json b/parser/testdata/00149_quantiles_timing_distributed/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00149_quantiles_timing_distributed/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00149_quantiles_timing_distributed/query.sql b/parser/testdata/00149_quantiles_timing_distributed/query.sql new file mode 100644 index 000000000..52ad42f7f --- /dev/null +++ b/parser/testdata/00149_quantiles_timing_distributed/query.sql @@ -0,0 +1,5 @@ +-- Tags: stateful, distributed + +SET max_rows_to_read = 100_000_000; +SELECT sum(cityHash64(*)) FROM (SELECT CounterID, quantileTiming(0.5)(SendTiming), count() FROM remote('127.0.0.{1,2,3,4,5,6,7,8,9,10}', test.hits) WHERE SendTiming != -1 GROUP BY CounterID); +SELECT sum(cityHash64(*)) FROM (SELECT CounterID, quantileTiming(0.5)(SendTiming), count() FROM remote('127.0.0.{1,2,3,4,5,6,7,8,9,10}', test.hits) WHERE SendTiming != -1 GROUP BY CounterID) SETTINGS optimize_aggregation_in_order = 1; diff --git a/parser/testdata/00150_quantiles_timing_precision/ast.json b/parser/testdata/00150_quantiles_timing_precision/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00150_quantiles_timing_precision/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00150_quantiles_timing_precision/metadata.json b/parser/testdata/00150_quantiles_timing_precision/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00150_quantiles_timing_precision/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00150_quantiles_timing_precision/query.sql b/parser/testdata/00150_quantiles_timing_precision/query.sql new file mode 100644 index 000000000..547c79393 --- /dev/null +++ b/parser/testdata/00150_quantiles_timing_precision/query.sql @@ -0,0 +1,3 @@ +-- Tags: stateful +SELECT CounterID, quantileTiming(0.5)(SendTiming) AS qt, least(30000, quantileExact(0.5)(SendTiming)) AS qe, count() AS c, round(abs(qt - qe) / greatest(qt, qe) AS diff, 3) AS rounded_diff FROM test.hits WHERE SendTiming != -1 GROUP BY CounterID HAVING diff != 0 ORDER BY diff DESC; +SELECT CounterID, quantileTiming(0.5)(SendTiming) AS qt, least(30000, quantileExact(0.5)(SendTiming)) AS qe, count() AS c, round(abs(qt - qe) / greatest(qt, qe) AS diff, 3) AS rounded_diff FROM test.hits WHERE SendTiming != -1 GROUP BY CounterID HAVING diff != 0 ORDER BY diff DESC SETTINGS optimize_aggregation_in_order = 1; diff --git a/parser/testdata/00150_with_totals_and_join/ast.json b/parser/testdata/00150_with_totals_and_join/ast.json new file mode 100644 index 000000000..79f1f5935 --- /dev/null +++ b/parser/testdata/00150_with_totals_and_join/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001104156, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00150_with_totals_and_join/metadata.json b/parser/testdata/00150_with_totals_and_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00150_with_totals_and_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00150_with_totals_and_join/query.sql b/parser/testdata/00150_with_totals_and_join/query.sql new file mode 100644 index 000000000..eb0eefb74 --- /dev/null +++ b/parser/testdata/00150_with_totals_and_join/query.sql @@ -0,0 +1,6 @@ +SET joined_subquery_requires_alias = 0; + +SELECT k, s1, s2 FROM (SELECT intDiv(number, 3) AS k, sum(number) AS s1 FROM (SELECT * FROM system.numbers LIMIT 10) GROUP BY k) ANY LEFT JOIN (SELECT intDiv(number, 4) AS k, sum(number) AS s2 FROM (SELECT * FROM system.numbers LIMIT 10) GROUP BY k) USING k ORDER BY k; +SELECT k, s1, s2 FROM (SELECT intDiv(number, 3) AS k, sum(number) AS s1 FROM (SELECT * FROM system.numbers LIMIT 10) GROUP BY k WITH TOTALS) ANY LEFT JOIN (SELECT intDiv(number, 4) AS k, sum(number) AS s2 FROM (SELECT * FROM system.numbers LIMIT 10) GROUP BY k) USING k ORDER BY k; +SELECT k, s1, s2 FROM (SELECT intDiv(number, 3) AS k, sum(number) AS s1 FROM (SELECT * FROM system.numbers LIMIT 10) GROUP BY k) ANY LEFT JOIN (SELECT intDiv(number, 4) AS k, sum(number) AS s2 FROM (SELECT * FROM system.numbers LIMIT 10) GROUP BY k WITH TOTALS) USING k ORDER BY k; +SELECT k, s1, s2 FROM (SELECT intDiv(number, 3) AS k, sum(number) AS s1 FROM (SELECT * FROM system.numbers LIMIT 10) GROUP BY k WITH TOTALS) ANY LEFT JOIN (SELECT intDiv(number, 4) AS k, sum(number) AS s2 FROM (SELECT * FROM system.numbers LIMIT 10) GROUP BY k WITH TOTALS) USING k ORDER BY k; diff --git a/parser/testdata/00151_order_by_read_in_order/ast.json b/parser/testdata/00151_order_by_read_in_order/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00151_order_by_read_in_order/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00151_order_by_read_in_order/metadata.json b/parser/testdata/00151_order_by_read_in_order/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00151_order_by_read_in_order/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00151_order_by_read_in_order/query.sql b/parser/testdata/00151_order_by_read_in_order/query.sql new file mode 100644 index 000000000..60d2fd108 --- /dev/null +++ b/parser/testdata/00151_order_by_read_in_order/query.sql @@ -0,0 +1,15 @@ +-- Tags: stateful +SET optimize_read_in_order = 1; +SELECT CounterID FROM test.hits ORDER BY CounterID DESC LIMIT 50; +SELECT CounterID FROM test.hits ORDER BY CounterID LIMIT 50; +SELECT CounterID FROM test.hits ORDER BY CounterID, EventDate LIMIT 50; +SELECT EventDate FROM test.hits ORDER BY CounterID, EventDate LIMIT 50; +SELECT EventDate FROM test.hits ORDER BY CounterID, EventDate DESC LIMIT 50; +SELECT CounterID FROM test.hits ORDER BY CounterID, EventDate DESC LIMIT 50; +SELECT CounterID FROM test.hits ORDER BY CounterID DESC, EventDate DESC LIMIT 50; +SELECT EventDate FROM test.hits ORDER BY CounterID DESC, EventDate DESC LIMIT 50; + +SELECT CounterID, EventDate FROM test.hits ORDER BY CounterID, EventDate LIMIT 50; +SELECT CounterID, EventDate FROM test.hits ORDER BY CounterID, EventDate DESC LIMIT 50; +SELECT CounterID, EventDate FROM test.hits ORDER BY CounterID DESC, EventDate LIMIT 50; +SELECT CounterID, EventDate FROM test.hits ORDER BY CounterID DESC, EventDate DESC LIMIT 50; diff --git a/parser/testdata/00151_tuple_with_array/ast.json b/parser/testdata/00151_tuple_with_array/ast.json new file mode 100644 index 000000000..174b94c2c --- /dev/null +++ b/parser/testdata/00151_tuple_with_array/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal Array_[UInt64_1]" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001263563, + "rows_read": 8, + "bytes_read": 296 + } +} diff --git a/parser/testdata/00151_tuple_with_array/metadata.json b/parser/testdata/00151_tuple_with_array/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00151_tuple_with_array/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00151_tuple_with_array/query.sql b/parser/testdata/00151_tuple_with_array/query.sql new file mode 100644 index 000000000..cadae546b --- /dev/null +++ b/parser/testdata/00151_tuple_with_array/query.sql @@ -0,0 +1 @@ +SELECT (1, [1]); diff --git a/parser/testdata/00152_insert_different_granularity/ast.json b/parser/testdata/00152_insert_different_granularity/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00152_insert_different_granularity/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00152_insert_different_granularity/metadata.json b/parser/testdata/00152_insert_different_granularity/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00152_insert_different_granularity/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00152_insert_different_granularity/query.sql b/parser/testdata/00152_insert_different_granularity/query.sql new file mode 100644 index 000000000..9a3ae36b5 --- /dev/null +++ b/parser/testdata/00152_insert_different_granularity/query.sql @@ -0,0 +1,22 @@ +-- Tags: stateful, no-replicated-database +-- Tag no-replicated-database: Fails due to additional replicas or shards + +DROP TABLE IF EXISTS fixed_granularity_table; + +CREATE TABLE fixed_granularity_table (`WatchID` UInt64, `JavaEnable` UInt8, `Title` String, `GoodEvent` Int16, `EventTime` DateTime, `EventDate` Date, `CounterID` UInt32, `ClientIP` UInt32, `ClientIP6` FixedString(16), `RegionID` UInt32, `UserID` UInt64, `CounterClass` Int8, `OS` UInt8, `UserAgent` UInt8, `URL` String, `Referer` String, `URLDomain` String, `RefererDomain` String, `Refresh` UInt8, `IsRobot` UInt8, `RefererCategories` Array(UInt16), `URLCategories` Array(UInt16), `URLRegions` Array(UInt32), `RefererRegions` Array(UInt32), `ResolutionWidth` UInt16, `ResolutionHeight` UInt16, `ResolutionDepth` UInt8, `FlashMajor` UInt8, `FlashMinor` UInt8, `FlashMinor2` String, `NetMajor` UInt8, `NetMinor` UInt8, `UserAgentMajor` UInt16, `UserAgentMinor` FixedString(2), `CookieEnable` UInt8, `JavascriptEnable` UInt8, `IsMobile` UInt8, `MobilePhone` UInt8, `MobilePhoneModel` String, `Params` String, `IPNetworkID` UInt32, `TraficSourceID` Int8, `SearchEngineID` UInt16, `SearchPhrase` String, `AdvEngineID` UInt8, `IsArtifical` UInt8, `WindowClientWidth` UInt16, `WindowClientHeight` UInt16, `ClientTimeZone` Int16, `ClientEventTime` DateTime, `SilverlightVersion1` UInt8, `SilverlightVersion2` UInt8, `SilverlightVersion3` UInt32, `SilverlightVersion4` UInt16, `PageCharset` String, `CodeVersion` UInt32, `IsLink` UInt8, `IsDownload` UInt8, `IsNotBounce` UInt8, `FUniqID` UInt64, `HID` UInt32, `IsOldCounter` UInt8, `IsEvent` UInt8, `IsParameter` UInt8, `DontCountHits` UInt8, `WithHash` UInt8, `HitColor` FixedString(1), `UTCEventTime` DateTime, `Age` UInt8, `Sex` UInt8, `Income` UInt8, `Interests` UInt16, `Robotness` UInt8, `GeneralInterests` Array(UInt16), `RemoteIP` UInt32, `RemoteIP6` FixedString(16), `WindowName` Int32, `OpenerName` Int32, `HistoryLength` Int16, `BrowserLanguage` FixedString(2), `BrowserCountry` FixedString(2), `SocialNetwork` String, `SocialAction` String, `HTTPError` UInt16, `SendTiming` Int32, `DNSTiming` Int32, `ConnectTiming` Int32, `ResponseStartTiming` Int32, `ResponseEndTiming` Int32, `FetchTiming` Int32, `RedirectTiming` Int32, `DOMInteractiveTiming` Int32, `DOMContentLoadedTiming` Int32, `DOMCompleteTiming` Int32, `LoadEventStartTiming` Int32, `LoadEventEndTiming` Int32, `NSToDOMContentLoadedTiming` Int32, `FirstPaintTiming` Int32, `RedirectCount` Int8, `SocialSourceNetworkID` UInt8, `SocialSourcePage` String, `ParamPrice` Int64, `ParamOrderID` String, `ParamCurrency` FixedString(3), `ParamCurrencyID` UInt16, `GoalsReached` Array(UInt32), `OpenstatServiceName` String, `OpenstatCampaignID` String, `OpenstatAdID` String, `OpenstatSourceID` String, `UTMSource` String, `UTMMedium` String, `UTMCampaign` String, `UTMContent` String, `UTMTerm` String, `FromTag` String, `HasGCLID` UInt8, `RefererHash` UInt64, `URLHash` UInt64, `CLID` UInt32, `YCLID` UInt64, `ShareService` String, `ShareURL` String, `ShareTitle` String, `ParsedParams.Key1` Array(String), `ParsedParams.Key2` Array(String), `ParsedParams.Key3` Array(String), `ParsedParams.Key4` Array(String), `ParsedParams.Key5` Array(String), `ParsedParams.ValueDouble` Array(Float64), `IslandID` FixedString(16), `RequestNum` UInt32, `RequestTry` UInt8) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity=8192, index_granularity_bytes=0, min_bytes_for_wide_part = 0; -- looks like default table before update + +INSERT INTO fixed_granularity_table SELECT * FROM test.hits LIMIT 10; -- should still have non adaptive granularity +INSERT INTO fixed_granularity_table SELECT * FROM test.hits LIMIT 10; + +-- We have removed testing of OPTIMIZE because it's too heavy on very slow builds (debug + coverage + thread fuzzer with sleeps) +-- OPTIMIZE TABLE fixed_granularity_table FINAL; -- and even after optimize + +DETACH TABLE fixed_granularity_table; +ATTACH TABLE fixed_granularity_table; + +ALTER TABLE fixed_granularity_table DETACH PARTITION 201403; +ALTER TABLE fixed_granularity_table ATTACH PARTITION 201403; + +SELECT count() from fixed_granularity_table; + +DROP TABLE IF EXISTS fixed_granularity_table; diff --git a/parser/testdata/00152_totals_in_subquery/ast.json b/parser/testdata/00152_totals_in_subquery/ast.json new file mode 100644 index 000000000..f84c5f171 --- /dev/null +++ b/parser/testdata/00152_totals_in_subquery/ast.json @@ -0,0 +1,82 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function count (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier dummy" + }, + { + "explain": " Function sum (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier dummy" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier dummy" + } + ], + + "rows": 20, + + "statistics": + { + "elapsed": 0.001543843, + "rows_read": 20, + "bytes_read": 805 + } +} diff --git a/parser/testdata/00152_totals_in_subquery/metadata.json b/parser/testdata/00152_totals_in_subquery/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00152_totals_in_subquery/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00152_totals_in_subquery/query.sql b/parser/testdata/00152_totals_in_subquery/query.sql new file mode 100644 index 000000000..0dbb589d0 --- /dev/null +++ b/parser/testdata/00152_totals_in_subquery/query.sql @@ -0,0 +1,2 @@ +SELECT count() FROM (SELECT dummy, sum(dummy) GROUP BY dummy WITH TOTALS); +SELECT * FROM (SELECT dummy, sum(dummy) GROUP BY dummy WITH TOTALS); diff --git a/parser/testdata/00153_aggregate_arena_race/ast.json b/parser/testdata/00153_aggregate_arena_race/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00153_aggregate_arena_race/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00153_aggregate_arena_race/metadata.json b/parser/testdata/00153_aggregate_arena_race/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00153_aggregate_arena_race/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00153_aggregate_arena_race/query.sql b/parser/testdata/00153_aggregate_arena_race/query.sql new file mode 100644 index 000000000..7d7e11e03 --- /dev/null +++ b/parser/testdata/00153_aggregate_arena_race/query.sql @@ -0,0 +1,6 @@ +-- Tags: stateful, race + +drop table if exists dest00153; +create temporary table dest00153 (`s` AggregateFunction(groupUniqArray, String)) engine Memory; +insert into dest00153 select groupUniqArrayState(RefererDomain) from test.hits group by URLDomain; +drop table if exists dest00153; diff --git a/parser/testdata/00153_transform/ast.json b/parser/testdata/00153_transform/ast.json new file mode 100644 index 000000000..3d1232ccf --- /dev/null +++ b/parser/testdata/00153_transform/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function transform (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal Array_[UInt64_3, UInt64_5, UInt64_7]" + }, + { + "explain": " Literal Array_[UInt64_111, UInt64_222, UInt64_333]" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001119986, + "rows_read": 14, + "bytes_read": 592 + } +} diff --git a/parser/testdata/00153_transform/metadata.json b/parser/testdata/00153_transform/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00153_transform/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00153_transform/query.sql b/parser/testdata/00153_transform/query.sql new file mode 100644 index 000000000..d69a18cb0 --- /dev/null +++ b/parser/testdata/00153_transform/query.sql @@ -0,0 +1,18 @@ +SELECT transform(number, [3, 5, 7], [111, 222, 333]) FROM system.numbers LIMIT 10; +SELECT transform(number, [3, 5, 7], [111, 222, 333], 9999) FROM system.numbers LIMIT 10; +SELECT transform(number, [3, 5, 7], ['hello', 'world', 'abc'], '') FROM system.numbers LIMIT 10; +SELECT transform(toString(number), ['3', '5', '7'], ['hello', 'world', 'abc']) FROM system.numbers LIMIT 10; +SELECT transform(toString(number), ['3', '5', '7'], ['hello', 'world', 'abc'], '') FROM system.numbers LIMIT 10; +SELECT transform(toString(number), ['3', '5', '7'], ['hello', 'world', 'abc'], '-') FROM system.numbers LIMIT 10; +SELECT transform(toString(number), ['3', '5', '7'], [111, 222, 333], 0) FROM system.numbers LIMIT 10; +SELECT transform(toString(number), ['3', '5', '7'], [111, 222, 333], -1) FROM system.numbers LIMIT 10; +SELECT transform(toString(number), ['3', '5', '7'], [111, 222, 333], -1.1) FROM system.numbers LIMIT 10; +SELECT transform(toString(number), ['3', '5', '7'], [111, 222.2, 333], 1) FROM system.numbers LIMIT 10; +SELECT transform(1, [2, 3], ['Bigmir)net', 'Google'], 'Остальные') AS title; +SELECT transform(2, [2, 3], ['Bigmir)net', 'Google'], 'Остальные') AS title; +SELECT transform(3, [2, 3], ['Bigmir)net', 'Google'], 'Остальные') AS title; +SELECT transform(4, [2, 3], ['Bigmir)net', 'Google'], 'Остальные') AS title; +SELECT transform('hello', 'wrong', 1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT transform('hello', ['wrong'], 1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT transform('hello', ['wrong'], [1]); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT transform(tuple(1), ['sdf'], [1]); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } diff --git a/parser/testdata/00154_avro/ast.json b/parser/testdata/00154_avro/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00154_avro/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00154_avro/metadata.json b/parser/testdata/00154_avro/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00154_avro/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00154_avro/query.sql b/parser/testdata/00154_avro/query.sql new file mode 100644 index 000000000..35e446604 --- /dev/null +++ b/parser/testdata/00154_avro/query.sql @@ -0,0 +1,13 @@ +-- Tags: stateful, no-fasttest + +DROP TABLE IF EXISTS avro; + +SET max_threads = 1, max_insert_threads = 0, max_block_size = 8192, min_insert_block_size_rows = 8192, min_insert_block_size_bytes = 1048576; -- lower memory usage + +CREATE TABLE avro AS test.hits ENGINE = File(Avro); +INSERT INTO avro SELECT * FROM test.hits LIMIT 10000; + +SELECT sum(cityHash64(*)) FROM (SELECT * FROM test.hits LIMIT 10000); +SELECT sum(cityHash64(*)) FROM avro; + +DROP TABLE avro; diff --git a/parser/testdata/00154_shard_distributed_with_distinct/ast.json b/parser/testdata/00154_shard_distributed_with_distinct/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00154_shard_distributed_with_distinct/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00154_shard_distributed_with_distinct/metadata.json b/parser/testdata/00154_shard_distributed_with_distinct/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00154_shard_distributed_with_distinct/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00154_shard_distributed_with_distinct/query.sql b/parser/testdata/00154_shard_distributed_with_distinct/query.sql new file mode 100644 index 000000000..a9493418e --- /dev/null +++ b/parser/testdata/00154_shard_distributed_with_distinct/query.sql @@ -0,0 +1,3 @@ +-- Tags: distributed + +SELECT DISTINCT number FROM remote('127.0.0.{2,3}', system.numbers) LIMIT 10 diff --git a/parser/testdata/00156_array_map_to_constant/ast.json b/parser/testdata/00156_array_map_to_constant/ast.json new file mode 100644 index 000000000..fb8f5a118 --- /dev/null +++ b/parser/testdata/00156_array_map_to_constant/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function arrayMap (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function lambda (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal Array_[UInt64_2]" + }, + { + "explain": " Literal UInt64_123 (alias y)" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001584423, + "rows_read": 14, + "bytes_read": 542 + } +} diff --git a/parser/testdata/00156_array_map_to_constant/metadata.json b/parser/testdata/00156_array_map_to_constant/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00156_array_map_to_constant/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00156_array_map_to_constant/query.sql b/parser/testdata/00156_array_map_to_constant/query.sql new file mode 100644 index 000000000..b6b12888e --- /dev/null +++ b/parser/testdata/00156_array_map_to_constant/query.sql @@ -0,0 +1,4 @@ +SELECT arrayMap(x -> 1, [2]), 123 AS y; +SELECT arrayMap(x -> x + 1, [2]), 123 AS y; +SELECT arrayMap(x -> 1, [2, 3]), 123 AS y; +SELECT arrayMap(x -> x + 1, [2, 3]), 123 AS y; diff --git a/parser/testdata/00156_max_execution_speed_sample_merge/ast.json b/parser/testdata/00156_max_execution_speed_sample_merge/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00156_max_execution_speed_sample_merge/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00156_max_execution_speed_sample_merge/metadata.json b/parser/testdata/00156_max_execution_speed_sample_merge/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00156_max_execution_speed_sample_merge/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00156_max_execution_speed_sample_merge/query.sql b/parser/testdata/00156_max_execution_speed_sample_merge/query.sql new file mode 100644 index 000000000..95e2acf68 --- /dev/null +++ b/parser/testdata/00156_max_execution_speed_sample_merge/query.sql @@ -0,0 +1,18 @@ +-- Tags: stateful +SET max_execution_speed = 8000000; +SET timeout_before_checking_execution_speed = 0; + +CREATE TEMPORARY TABLE times (t DateTime); + +INSERT INTO times SELECT now(); +SELECT count() FROM test.hits_s3 SAMPLE 1 / 2; +INSERT INTO times SELECT now(); + +SELECT max(t) - min(t) >= 1 FROM times; +TRUNCATE TABLE times; + +INSERT INTO times SELECT now(); +SELECT count() FROM merge(test, '^hits_s3$') SAMPLE 1 / 2; +INSERT INTO times SELECT now(); + +SELECT max(t) - min(t) >= 1 FROM times; diff --git a/parser/testdata/00157_aliases_and_lambda_formal_parameters/ast.json b/parser/testdata/00157_aliases_and_lambda_formal_parameters/ast.json new file mode 100644 index 000000000..1b7703a78 --- /dev/null +++ b/parser/testdata/00157_aliases_and_lambda_formal_parameters/ast.json @@ -0,0 +1,76 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function arrayMap (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function lambda (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal Array_[UInt64_2]" + }, + { + "explain": " Literal UInt64_123 (alias x)" + }, + { + "explain": " Function plus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 18, + + "statistics": + { + "elapsed": 0.001315224, + "rows_read": 18, + "bytes_read": 676 + } +} diff --git a/parser/testdata/00157_aliases_and_lambda_formal_parameters/metadata.json b/parser/testdata/00157_aliases_and_lambda_formal_parameters/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00157_aliases_and_lambda_formal_parameters/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00157_aliases_and_lambda_formal_parameters/query.sql b/parser/testdata/00157_aliases_and_lambda_formal_parameters/query.sql new file mode 100644 index 000000000..73942fc93 --- /dev/null +++ b/parser/testdata/00157_aliases_and_lambda_formal_parameters/query.sql @@ -0,0 +1 @@ +SELECT arrayMap(x -> 1, [2]), 123 AS x, x + 1; diff --git a/parser/testdata/00157_cache_dictionary/ast.json b/parser/testdata/00157_cache_dictionary/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00157_cache_dictionary/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00157_cache_dictionary/metadata.json b/parser/testdata/00157_cache_dictionary/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00157_cache_dictionary/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00157_cache_dictionary/query.sql b/parser/testdata/00157_cache_dictionary/query.sql new file mode 100644 index 000000000..4dbf2cb02 --- /dev/null +++ b/parser/testdata/00157_cache_dictionary/query.sql @@ -0,0 +1,41 @@ +-- Tags: stateful, no-tsan, no-msan, no-asan, no-parallel +-- no-parallel: Heavy + +DROP TABLE IF EXISTS test.hits_1m; + +CREATE TABLE test.hits_1m AS test.hits +ENGINE = MergeTree +PARTITION BY toYYYYMM(EventDate) +ORDER BY (CounterID, EventDate, intHash32(UserID)) +SAMPLE BY intHash32(UserID) +SETTINGS storage_policy = 'default', +-- set index_granularity correctly to avoid time out +index_granularity = 8192, +index_granularity_bytes = 10485760; + +SET max_execution_time = 300; +INSERT INTO test.hits_1m SELECT * FROM test.hits LIMIT 1000000 +SETTINGS min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0, max_block_size = 8192, max_insert_threads = 1, max_threads = 1, max_parallel_replicas=1; + +CREATE DATABASE IF NOT EXISTS db_dict; +DROP DICTIONARY IF EXISTS db_dict.cache_hits; + +CREATE DICTIONARY db_dict.cache_hits +(WatchID UInt64, UserID UInt64, SearchPhrase String) +PRIMARY KEY WatchID +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'hits_1m' PASSWORD '' DB 'test')) +LIFETIME(MIN 1 MAX 10) +LAYOUT(CACHE(SIZE_IN_CELLS 1 QUERY_WAIT_TIMEOUT_MILLISECONDS 60000)); + +SELECT count() FROM (SELECT WatchID, arrayDistinct(groupArray(dictGetUInt64( 'db_dict.cache_hits', 'UserID', toUInt64(WatchID)))) as arr +FROM test.hits_1m PREWHERE WatchID % 5 == 0 GROUP BY WatchID order by length(arr) desc) WHERE arr = [0]; + +SELECT count() FROM (SELECT WatchID, arrayDistinct(groupArray(dictGetUInt64( 'db_dict.cache_hits', 'UserID', toUInt64(WatchID)))) as arr +FROM test.hits_1m PREWHERE WatchID % 7 == 0 GROUP BY WatchID order by length(arr) desc) WHERE arr = [0]; + +SELECT count() FROM (SELECT WatchID, arrayDistinct(groupArray(dictGetUInt64( 'db_dict.cache_hits', 'UserID', toUInt64(WatchID)))) as arr +FROM test.hits_1m PREWHERE WatchID % 13 == 0 GROUP BY WatchID order by length(arr) desc) WHERE arr = [0]; + +DROP DICTIONARY IF EXISTS db_dict.cache_hits; +DROP DATABASE IF EXISTS db_dict; +DROP TABLE IF EXISTS hits_1m; diff --git a/parser/testdata/00158_buffer_and_nonexistent_table/ast.json b/parser/testdata/00158_buffer_and_nonexistent_table/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00158_buffer_and_nonexistent_table/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00158_buffer_and_nonexistent_table/metadata.json b/parser/testdata/00158_buffer_and_nonexistent_table/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00158_buffer_and_nonexistent_table/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00158_buffer_and_nonexistent_table/query.sql b/parser/testdata/00158_buffer_and_nonexistent_table/query.sql new file mode 100644 index 000000000..1d988b38b --- /dev/null +++ b/parser/testdata/00158_buffer_and_nonexistent_table/query.sql @@ -0,0 +1,10 @@ + +CREATE DATABASE IF NOT EXISTS {CLICKHOUSE_DATABASE:Identifier}; +DROP TABLE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}.mt_buffer_00158; +DROP TABLE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}.mt_00158; +CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.mt_buffer_00158 (d Date DEFAULT today(), x UInt64) ENGINE = Buffer({CLICKHOUSE_DATABASE:Identifier}, mt_00158, 16, 100, 100, 1000000, 1000000, 1000000000, 1000000000); +SET send_logs_level = 'fatal'; -- Supress "Destination table test2.mt doesn't exist. Block of data is discarded." +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.mt_buffer_00158 (x) SELECT number AS x FROM system.numbers LIMIT 100000; +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.mt_buffer_00158 (x) SELECT number AS x FROM system.numbers LIMIT 1000000; +DROP TABLE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}.mt_buffer_00158; +DROP DATABASE {CLICKHOUSE_DATABASE:Identifier}; diff --git a/parser/testdata/00158_cache_dictionary_has/ast.json b/parser/testdata/00158_cache_dictionary_has/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00158_cache_dictionary_has/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00158_cache_dictionary_has/metadata.json b/parser/testdata/00158_cache_dictionary_has/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00158_cache_dictionary_has/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00158_cache_dictionary_has/query.sql b/parser/testdata/00158_cache_dictionary_has/query.sql new file mode 100644 index 000000000..ca1ece60a --- /dev/null +++ b/parser/testdata/00158_cache_dictionary_has/query.sql @@ -0,0 +1,23 @@ +-- Tags: stateful + +DROP DICTIONARY IF EXISTS cache_hits; + +CREATE DICTIONARY cache_hits +(WatchID UInt64, UserID UInt64, SearchPhrase String) +PRIMARY KEY WatchID +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'hits' PASSWORD '' DB 'test')) +LIFETIME(MIN 300 MAX 600) +LAYOUT(CACHE(SIZE_IN_CELLS 100 QUERY_WAIT_TIMEOUT_MILLISECONDS 600000)); + +SET timeout_before_checking_execution_speed = 300; + +SELECT sum(flag) FROM (SELECT dictHas(current_database() || '.cache_hits', toUInt64(WatchID)) as flag FROM test.hits PREWHERE WatchID % 1400 == 0 LIMIT 100); +SELECT count() from test.hits PREWHERE WatchID % 1400 == 0; + +SELECT sum(flag) FROM (SELECT dictHas(current_database() || '.cache_hits', toUInt64(WatchID)) as flag FROM test.hits PREWHERE WatchID % 350 == 0 LIMIT 100); +SELECT count() from test.hits PREWHERE WatchID % 350 == 0; + +SELECT sum(flag) FROM (SELECT dictHas(current_database() || '.cache_hits', toUInt64(WatchID)) as flag FROM test.hits PREWHERE WatchID % 5 == 0 LIMIT 100); +SELECT count() from test.hits PREWHERE WatchID % 5 == 0; + +DROP DICTIONARY IF EXISTS cache_hits; diff --git a/parser/testdata/00159_whitespace_in_columns_list/ast.json b/parser/testdata/00159_whitespace_in_columns_list/ast.json new file mode 100644 index 000000000..354f06961 --- /dev/null +++ b/parser/testdata/00159_whitespace_in_columns_list/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery memory (children 1)" + }, + { + "explain": " Identifier memory" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001406108, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/00159_whitespace_in_columns_list/metadata.json b/parser/testdata/00159_whitespace_in_columns_list/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00159_whitespace_in_columns_list/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00159_whitespace_in_columns_list/query.sql b/parser/testdata/00159_whitespace_in_columns_list/query.sql new file mode 100644 index 000000000..57c057443 --- /dev/null +++ b/parser/testdata/00159_whitespace_in_columns_list/query.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS memory; +CREATE TABLE memory (x UInt8) ENGINE = Memory; + +INSERT INTO memory VALUES (1); +INSERT INTO memory (x) VALUES (2); +INSERT INTO memory ( x) VALUES (3); +INSERT INTO memory (x ) VALUES (4); +INSERT INTO memory ( x ) VALUES (5); +INSERT INTO memory(x)VALUES(6); + +SELECT * FROM memory ORDER BY x; + +DROP TABLE memory; diff --git a/parser/testdata/00160_decode_xml_component/ast.json b/parser/testdata/00160_decode_xml_component/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00160_decode_xml_component/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00160_decode_xml_component/metadata.json b/parser/testdata/00160_decode_xml_component/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00160_decode_xml_component/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00160_decode_xml_component/query.sql b/parser/testdata/00160_decode_xml_component/query.sql new file mode 100644 index 000000000..371881dfd --- /dev/null +++ b/parser/testdata/00160_decode_xml_component/query.sql @@ -0,0 +1,2 @@ +-- Tags: stateful +SELECT sum(DISTINCT sipHash64(decodeXMLComponent(Title) AS decoded)) FROM test.hits WHERE Title != decoded; diff --git a/parser/testdata/00160_merge_and_index_in_in/ast.json b/parser/testdata/00160_merge_and_index_in_in/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00160_merge_and_index_in_in/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00160_merge_and_index_in_in/metadata.json b/parser/testdata/00160_merge_and_index_in_in/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00160_merge_and_index_in_in/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00160_merge_and_index_in_in/query.sql b/parser/testdata/00160_merge_and_index_in_in/query.sql new file mode 100644 index 000000000..882026dd7 --- /dev/null +++ b/parser/testdata/00160_merge_and_index_in_in/query.sql @@ -0,0 +1,26 @@ +-- Tags: no-msan +-- ^ slow + +DROP TABLE IF EXISTS mt_00160; +DROP TABLE IF EXISTS merge_00160; + +CREATE TABLE mt_00160 (d Date DEFAULT toDate('2015-05-01'), x UInt64) ENGINE = MergeTree PARTITION BY d ORDER BY x SETTINGS index_granularity = 1, min_bytes_for_wide_part = 0; +CREATE TABLE merge_00160 (d Date, x UInt64) ENGINE = Merge(currentDatabase(), '^mt_00160$'); + +SET min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0; +SET max_block_size = 1000000; +INSERT INTO mt_00160 (x) SELECT number AS x FROM system.numbers LIMIT 100000; + +SELECT *, b FROM mt_00160 WHERE x IN (12345, 67890) AND NOT ignore(blockSize() < 10 AS b) ORDER BY x; +SELECT *, b FROM merge_00160 WHERE x IN (12345, 67890) AND NOT ignore(blockSize() < 10 AS b) ORDER BY x; + +DROP TABLE merge_00160; +DROP TABLE mt_00160; + +CREATE TABLE mt_00160 (d Date DEFAULT toDate('2015-05-01'), x UInt64, y UInt64, z UInt64) ENGINE = MergeTree PARTITION BY d ORDER BY (x, z) SETTINGS index_granularity = 1, min_bytes_for_wide_part = 0; + +INSERT INTO mt_00160 (x, y, z) SELECT number AS x, number + 10 AS y, number / 2 AS z FROM system.numbers LIMIT 100000; + +SELECT *, b FROM mt_00160 WHERE (z, y, x) IN ((617, 1244, 1234), (2839, 5688, 5678), (1,1,1)) AND NOT ignore(blockSize() < 10 AS b) ORDER BY (x, y, z); + +DROP TABLE mt_00160; diff --git a/parser/testdata/00161_rounding_functions/ast.json b/parser/testdata/00161_rounding_functions/ast.json new file mode 100644 index 000000000..bfe69ccc7 --- /dev/null +++ b/parser/testdata/00161_rounding_functions/ast.json @@ -0,0 +1,103 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 6)" + }, + { + "explain": " Function toUInt8 (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function round (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function roundBankers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function floor (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function ceil (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function trunc (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_20" + } + ], + + "rows": 27, + + "statistics": + { + "elapsed": 0.001376699, + "rows_read": 27, + "bytes_read": 1009 + } +} diff --git a/parser/testdata/00161_rounding_functions/metadata.json b/parser/testdata/00161_rounding_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00161_rounding_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00161_rounding_functions/query.sql b/parser/testdata/00161_rounding_functions/query.sql new file mode 100644 index 000000000..9dc117c4f --- /dev/null +++ b/parser/testdata/00161_rounding_functions/query.sql @@ -0,0 +1,50 @@ +SELECT toUInt8(number) AS x, round(x), roundBankers(x), floor(x), ceil(x), trunc(x) FROM system.numbers LIMIT 20; +SELECT toUInt16(number) AS x, round(x), roundBankers(x), floor(x), ceil(x), trunc(x) FROM system.numbers LIMIT 20; +SELECT toUInt32(number) AS x, round(x), roundBankers(x), floor(x), ceil(x), trunc(x) FROM system.numbers LIMIT 20; +SELECT toUInt64(number) AS x, round(x), roundBankers(x), floor(x), ceil(x), trunc(x) FROM system.numbers LIMIT 20; +SELECT toInt8(number - 10) AS x, round(x), roundBankers(x), floor(x), ceil(x), trunc(x) FROM system.numbers LIMIT 20; +SELECT toInt16(number - 10) AS x, round(x), roundBankers(x), floor(x), ceil(x), trunc(x) FROM system.numbers LIMIT 20; +SELECT toInt32(number - 10) AS x, round(x), roundBankers(x), floor(x), ceil(x), trunc(x) FROM system.numbers LIMIT 20; +SELECT toInt64(number - 10) AS x, round(x), roundBankers(x), floor(x), ceil(x), trunc(x) FROM system.numbers LIMIT 20; +SELECT toFloat32(number - 10) AS x, round(x), roundBankers(x), floor(x), ceil(x), trunc(x) FROM system.numbers LIMIT 20; +SELECT toFloat64(number - 10) AS x, round(x), roundBankers(x), floor(x), ceil(x), trunc(x) FROM system.numbers LIMIT 20; + +SELECT toFloat32((number - 10) / 10) AS x, round(x), roundBankers(x), floor(x), ceil(x), trunc(x) FROM system.numbers LIMIT 20; +SELECT toFloat64((number - 10) / 10) AS x, round(x), roundBankers(x), floor(x), ceil(x), trunc(x) FROM system.numbers LIMIT 20; + +SELECT toFloat32((number - 10) / 10) AS x, round(x, 1), roundBankers(x, 1), floor(x, 1), ceil(x, 1), trunc(x, 1) FROM system.numbers LIMIT 20; +SELECT toFloat64((number - 10) / 10) AS x, round(x, 1), roundBankers(x, 1), floor(x, 1), ceil(x, 1), trunc(x, 1) FROM system.numbers LIMIT 20; + +SELECT toUInt8(number) AS x, round(x, -1), roundBankers(x, -1), floor(x, -1), ceil(x, -1), trunc(x, -1) FROM system.numbers LIMIT 20; +SELECT toUInt16(number) AS x, round(x, -1), roundBankers(x, -1), floor(x, -1), ceil(x, -1), trunc(x, -1) FROM system.numbers LIMIT 20; +SELECT toUInt32(number) AS x, round(x, -1), roundBankers(x, -1), floor(x, -1), ceil(x, -1), trunc(x, -1) FROM system.numbers LIMIT 20; +SELECT toUInt64(number) AS x, round(x, -1), roundBankers(x, -1), floor(x, -1), ceil(x, -1), trunc(x, -1) FROM system.numbers LIMIT 20; +SELECT toInt8(number - 10) AS x, round(x, -1), roundBankers(x, -1), floor(x, -1), ceil(x, -1), trunc(x, -1) FROM system.numbers LIMIT 20; +SELECT toInt16(number - 10) AS x, round(x, -1), roundBankers(x, -1), floor(x, -1), ceil(x, -1), trunc(x, -1) FROM system.numbers LIMIT 20; +SELECT toInt32(number - 10) AS x, round(x, -1), roundBankers(x, -1), floor(x, -1), ceil(x, -1), trunc(x, -1) FROM system.numbers LIMIT 20; +SELECT toInt64(number - 10) AS x, round(x, -1), roundBankers(x, -1), floor(x, -1), ceil(x, -1), trunc(x, -1) FROM system.numbers LIMIT 20; +SELECT toFloat32(number - 10) AS x, round(x, -1), roundBankers(x, -1), floor(x, -1), ceil(x, -1), trunc(x, -1) FROM system.numbers LIMIT 20; +SELECT toFloat64(number - 10) AS x, round(x, -1), roundBankers(x, -1), floor(x, -1), ceil(x, -1), trunc(x, -1) FROM system.numbers LIMIT 20; + +SELECT toUInt8(number) AS x, round(x, -2), roundBankers(x, -2), floor(x, -2), ceil(x, -2), trunc(x, -2) FROM system.numbers LIMIT 20; +SELECT toUInt16(number) AS x, round(x, -2), roundBankers(x, -2), floor(x, -2), ceil(x, -2), trunc(x, -2) FROM system.numbers LIMIT 20; +SELECT toUInt32(number) AS x, round(x, -2), roundBankers(x, -2), floor(x, -2), ceil(x, -2), trunc(x, -2) FROM system.numbers LIMIT 20; +SELECT toUInt64(number) AS x, round(x, -2), roundBankers(x, -2), floor(x, -2), ceil(x, -2), trunc(x, -2) FROM system.numbers LIMIT 20; +SELECT toInt8(number - 10) AS x, round(x, -2), roundBankers(x, -2), floor(x, -2), ceil(x, -2), trunc(x, -2) FROM system.numbers LIMIT 20; +SELECT toInt16(number - 10) AS x, round(x, -2), roundBankers(x, -2), floor(x, -2), ceil(x, -2), trunc(x, -2) FROM system.numbers LIMIT 20; +SELECT toInt32(number - 10) AS x, round(x, -2), roundBankers(x, -2), floor(x, -2), ceil(x, -2), trunc(x, -2) FROM system.numbers LIMIT 20; +SELECT toInt64(number - 10) AS x, round(x, -2), roundBankers(x, -2), floor(x, -2), ceil(x, -2), trunc(x, -2) FROM system.numbers LIMIT 20; +SELECT toFloat32(number - 10) AS x, round(x, -2), roundBankers(x, -2), floor(x, -2), ceil(x, -2), trunc(x, -2) FROM system.numbers LIMIT 20; +SELECT toFloat64(number - 10) AS x, round(x, -2), roundBankers(x, -2), floor(x, -2), ceil(x, -2), trunc(x, -2) FROM system.numbers LIMIT 20; + +SELECT 123456789 AS x, floor(x, -1), floor(x, -2), floor(x, -3), floor(x, -4), floor(x, -5), floor(x, -6), floor(x, -7), floor(x, -8), floor(x, -9), floor(x, -10); +SELECT 12345.6789 AS x, floor(x, -1), floor(x, -2), floor(x, -3), floor(x, -4), floor(x, -5), floor(x, 1), floor(x, 2), floor(x, 3), floor(x, 4), floor(x, 5); + + +SELECT roundToExp2(100), roundToExp2(64), roundToExp2(3), roundToExp2(0), roundToExp2(-1); +SELECT roundToExp2(0.9), roundToExp2(0), roundToExp2(-0.5), roundToExp2(-0.6), roundToExp2(-0.2); + +select round(2, 4) round2, round(20, 4) round20, round(200, 4) round200, round(5, 4) round5, round(50, 4) round50, round(500, 4) round500, round(toInt32(5), 4) roundInt5, round(toInt32(50), 4) roundInt50, round(toInt32(500), 4) roundInt500; +select roundBankers(2, 4) round2, roundBankers(20, 4) round20, roundBankers(200, 4) round200, roundBankers(5, 4) round5, roundBankers(50, 4) round50, roundBankers(500, 4) round500, roundBankers(toInt32(5), 4) roundInt5, roundBankers(toInt32(50), 4) roundInt50, roundBankers(toInt32(500), 4) roundInt500; + +SELECT ceil(29375422, -54212) --{serverError ARGUMENT_OUT_OF_BOUND} diff --git a/parser/testdata/00162_mmap_compression_none/ast.json b/parser/testdata/00162_mmap_compression_none/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00162_mmap_compression_none/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00162_mmap_compression_none/metadata.json b/parser/testdata/00162_mmap_compression_none/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00162_mmap_compression_none/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00162_mmap_compression_none/query.sql b/parser/testdata/00162_mmap_compression_none/query.sql new file mode 100644 index 000000000..7173d662e --- /dev/null +++ b/parser/testdata/00162_mmap_compression_none/query.sql @@ -0,0 +1,9 @@ +-- Tags: stateful +DROP TABLE IF EXISTS hits_none; +CREATE TABLE hits_none (Title String CODEC(NONE)) ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +INSERT INTO hits_none SELECT Title FROM test.hits SETTINGS enable_filesystem_cache_on_write_operations=0, max_insert_threads=16; + +SET min_bytes_to_use_mmap_io = 1; +SELECT sum(length(Title)) FROM hits_none; + +DROP TABLE hits_none; diff --git a/parser/testdata/00162_shard_global_join/ast.json b/parser/testdata/00162_shard_global_join/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00162_shard_global_join/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00162_shard_global_join/metadata.json b/parser/testdata/00162_shard_global_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00162_shard_global_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00162_shard_global_join/query.sql b/parser/testdata/00162_shard_global_join/query.sql new file mode 100644 index 000000000..0bf80e70d --- /dev/null +++ b/parser/testdata/00162_shard_global_join/query.sql @@ -0,0 +1,4 @@ +-- Tags: shard + +SELECT n, j1, j2 FROM (SELECT toFloat64(dummy + 2) AS n FROM remote('127.0.0.{2,3}', system.one)) jr1 +GLOBAL ANY LEFT JOIN (SELECT number / 3 AS n, number AS j1, 'Hello' AS j2 FROM system.numbers LIMIT 10) jr2 USING n LIMIT 10; diff --git a/parser/testdata/00163_shard_join_with_empty_table/ast.json b/parser/testdata/00163_shard_join_with_empty_table/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00163_shard_join_with_empty_table/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00163_shard_join_with_empty_table/metadata.json b/parser/testdata/00163_shard_join_with_empty_table/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00163_shard_join_with_empty_table/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00163_shard_join_with_empty_table/query.sql b/parser/testdata/00163_shard_join_with_empty_table/query.sql new file mode 100644 index 000000000..7cd653b66 --- /dev/null +++ b/parser/testdata/00163_shard_join_with_empty_table/query.sql @@ -0,0 +1,33 @@ +-- Tags: shard + +SET any_join_distinct_right_table_keys = 1; +SET joined_subquery_requires_alias = 0; +SET join_algorithm = 'hash'; + +SELECT * FROM ( + SELECT number, n, j1, j2 + FROM (SELECT number, number / 2 AS n FROM remote('127.0.0.{2,3}', system.numbers)) + ANY LEFT JOIN (SELECT number / 3 AS n, number AS j1, 'Hello' AS j2 FROM system.numbers LIMIT 0) + USING n LIMIT 10 +) ORDER BY number; + +SELECT * FROM ( + SELECT number, n, j1, j2 + FROM (SELECT dummy + 2 AS number, number / 2 AS n FROM remote('127.0.0.{2,3}', system.one)) + ANY INNER JOIN (SELECT number / 3 AS n, number AS j1, 'Hello' AS j2 FROM system.numbers LIMIT 0) + USING n LIMIT 10 +) ORDER BY number; + +SELECT * FROM ( + SELECT number, n, j1, j2 + FROM (SELECT number, number / 2 AS n FROM remote('127.0.0.{2,3}', system.numbers)) + GLOBAL ANY LEFT JOIN (SELECT number / 3 AS n, number AS j1, 'Hello' AS j2 FROM system.numbers LIMIT 0) + USING n LIMIT 10 +) ORDER BY number; + +SELECT * FROM ( + SELECT number, n, j1, j2 + FROM (SELECT dummy + 2 AS number, number / 2 AS n FROM remote('127.0.0.{2,3}', system.one)) + GLOBAL ANY INNER JOIN (SELECT number / 3 AS n, number AS j1, 'Hello' AS j2 FROM system.numbers LIMIT 0) + USING n LIMIT 10 +) ORDER BY number; diff --git a/parser/testdata/00164_not_chain/ast.json b/parser/testdata/00164_not_chain/ast.json new file mode 100644 index 000000000..87a351d67 --- /dev/null +++ b/parser/testdata/00164_not_chain/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function not (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001779737, + "rows_read": 7, + "bytes_read": 256 + } +} diff --git a/parser/testdata/00164_not_chain/metadata.json b/parser/testdata/00164_not_chain/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00164_not_chain/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00164_not_chain/query.sql b/parser/testdata/00164_not_chain/query.sql new file mode 100644 index 000000000..39fe8724e --- /dev/null +++ b/parser/testdata/00164_not_chain/query.sql @@ -0,0 +1,5 @@ +SELECT NOT 1; +SELECT NOT NOT 1; +SELECT NOT NOT NOT 1; +SELECT NOT NOT NOT NOT 1 = 1; +SELECT NOT NOT not NoT NOT 1 = 1; diff --git a/parser/testdata/00164_quantileBfloat16/ast.json b/parser/testdata/00164_quantileBfloat16/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00164_quantileBfloat16/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00164_quantileBfloat16/metadata.json b/parser/testdata/00164_quantileBfloat16/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00164_quantileBfloat16/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00164_quantileBfloat16/query.sql b/parser/testdata/00164_quantileBfloat16/query.sql new file mode 100644 index 000000000..cbfee2602 --- /dev/null +++ b/parser/testdata/00164_quantileBfloat16/query.sql @@ -0,0 +1,7 @@ +-- Tags: stateful +SELECT CounterID AS k, quantileBFloat16(0.5)(ResolutionWidth) FROM test.hits GROUP BY k ORDER BY count() DESC, CounterID LIMIT 10; +SELECT CounterID AS k, quantilesBFloat16(0.1, 0.5, 0.9, 0.99, 0.999)(ResolutionWidth) FROM test.hits GROUP BY k ORDER BY count() DESC, CounterID LIMIT 10; + + +SELECT CounterID AS k, quantileBFloat16(0.5)(ResolutionWidth) FROM remote('127.0.0.{1,2}', test.hits) GROUP BY k ORDER BY count() DESC, CounterID LIMIT 10; +SELECT CounterID AS k, quantilesBFloat16(0.1, 0.5, 0.9, 0.99, 0.999)(ResolutionWidth) FROM remote('127.0.0.{1,2}', test.hits) GROUP BY k ORDER BY count() DESC, CounterID LIMIT 10; diff --git a/parser/testdata/00165_jit_aggregate_functions/ast.json b/parser/testdata/00165_jit_aggregate_functions/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00165_jit_aggregate_functions/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00165_jit_aggregate_functions/metadata.json b/parser/testdata/00165_jit_aggregate_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00165_jit_aggregate_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00165_jit_aggregate_functions/query.sql b/parser/testdata/00165_jit_aggregate_functions/query.sql new file mode 100644 index 000000000..6efee311f --- /dev/null +++ b/parser/testdata/00165_jit_aggregate_functions/query.sql @@ -0,0 +1,106 @@ +-- Tags: stateful +SET min_count_to_compile_aggregate_expression = 0; +-- The test uses many aggregations. A low max_bytes_before_external_group_by value will lead to high disk usage +-- which in CI leads to timeouts +SET max_bytes_before_external_group_by=0; +SET max_bytes_ratio_before_external_group_by=0; + +SELECT 'Aggregation using JIT compilation'; + +SELECT 'Simple functions'; + +SELECT + CounterID, + min(WatchID), + max(WatchID), + sum(WatchID), + avg(WatchID), + avgWeighted(WatchID, CounterID), + count(WatchID), + groupBitOr(WatchID), + groupBitAnd(WatchID), + groupBitXor(WatchID) +FROM test.hits +GROUP BY CounterID ORDER BY count() DESC LIMIT 20; + +SELECT 'Simple functions with non compilable function'; + +SELECT + CounterID, + min(WatchID), + max(WatchID), + sum(WatchID), + sum(toUInt128(WatchID)), + avg(WatchID), + avgWeighted(WatchID, CounterID), + count(WatchID), + groupBitOr(WatchID), + groupBitAnd(WatchID), + groupBitXor(WatchID) +FROM test.hits +GROUP BY CounterID ORDER BY count() DESC LIMIT 20; + +SELECT 'Simple functions if combinator'; + +WITH (WatchID % 2 == 0) AS predicate +SELECT + CounterID, + minIf(WatchID,predicate), + maxIf(WatchID, predicate), + sumIf(WatchID, predicate), + avgIf(WatchID, predicate), + avgWeightedIf(WatchID, CounterID, predicate), + countIf(WatchID, predicate), + groupBitOrIf(WatchID, predicate), + groupBitAndIf(WatchID, predicate), + groupBitXorIf(WatchID, predicate) +FROM test.hits +GROUP BY CounterID ORDER BY count() DESC LIMIT 20; + +SELECT 'Simple functions without key'; + +SELECT + min(WatchID) AS min_watch_id, + max(WatchID), + sum(WatchID), + avg(WatchID), + avgWeighted(WatchID, CounterID), + count(WatchID), + groupBitOr(WatchID), + groupBitAnd(WatchID), + groupBitXor(WatchID) +FROM test.hits +ORDER BY min_watch_id DESC LIMIT 20; + +SELECT 'Simple functions with non compilable function without key'; + +SELECT + min(WatchID) AS min_watch_id, + max(WatchID), + sum(WatchID), + sum(toUInt128(WatchID)), + avg(WatchID), + avgWeighted(WatchID, CounterID), + count(WatchID), + groupBitOr(WatchID), + groupBitAnd(WatchID), + groupBitXor(WatchID) +FROM test.hits +ORDER BY min_watch_id DESC LIMIT 20; + +SELECT 'Simple functions if combinator without key'; + +WITH (WatchID % 2 == 0) AS predicate +SELECT + minIf(WatchID, predicate) as min_watch_id, + maxIf(WatchID, predicate), + sumIf(WatchID, predicate), + avgIf(WatchID, predicate), + avgWeightedIf(WatchID, CounterID, predicate), + countIf(WatchID, predicate), + groupBitOrIf(WatchID, predicate), + groupBitAndIf(WatchID, predicate), + groupBitXorIf(WatchID, predicate) +FROM test.hits +ORDER BY min_watch_id +DESC LIMIT 20; diff --git a/parser/testdata/00165_transform_non_const_default/ast.json b/parser/testdata/00165_transform_non_const_default/ast.json new file mode 100644 index 000000000..313ca88c0 --- /dev/null +++ b/parser/testdata/00165_transform_non_const_default/ast.json @@ -0,0 +1,73 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function transform (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal Array_[UInt64_3, UInt64_5, UInt64_7]" + }, + { + "explain": " Literal Array_[UInt64_111, UInt64_222, UInt64_333]" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_9999" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 17, + + "statistics": + { + "elapsed": 0.001509685, + "rows_read": 17, + "bytes_read": 716 + } +} diff --git a/parser/testdata/00165_transform_non_const_default/metadata.json b/parser/testdata/00165_transform_non_const_default/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00165_transform_non_const_default/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00165_transform_non_const_default/query.sql b/parser/testdata/00165_transform_non_const_default/query.sql new file mode 100644 index 000000000..ef3b7c1f1 --- /dev/null +++ b/parser/testdata/00165_transform_non_const_default/query.sql @@ -0,0 +1,12 @@ +SELECT transform(number, [3, 5, 7], [111, 222, 333], materialize(9999)) FROM system.numbers LIMIT 10; +SELECT transform(number, [3, 5, 7], ['hello', 'world', 'abc'], materialize('')) FROM system.numbers LIMIT 10; +SELECT transform(toString(number), ['3', '5', '7'], ['hello', 'world', 'abc'], materialize('')) FROM system.numbers LIMIT 10; +SELECT transform(toString(number), ['3', '5', '7'], ['hello', 'world', 'abc'], materialize('-')) FROM system.numbers LIMIT 10; +SELECT transform(toString(number), ['3', '5', '7'], [111, 222, 333], materialize(0)) FROM system.numbers LIMIT 10; +SELECT transform(toString(number), ['3', '5', '7'], [111, 222, 333], materialize(-1)) FROM system.numbers LIMIT 10; +SELECT transform(toString(number), ['3', '5', '7'], [111, 222, 333], materialize(-1.1)) FROM system.numbers LIMIT 10; +SELECT transform(toString(number), ['3', '5', '7'], [111, 222.2, 333], materialize(1)) FROM system.numbers LIMIT 10; +SELECT transform(1, [2, 3], ['Meta.ua', 'Google'], materialize('Остальные')) AS title; +SELECT transform(2, [2, 3], ['Meta.ua', 'Google'], materialize('Остальные')) AS title; +SELECT transform(3, [2, 3], ['Meta.ua', 'Google'], materialize('Остальные')) AS title; +SELECT transform(4, [2, 3], ['Meta.ua', 'Google'], materialize('Остальные')) AS title; diff --git a/parser/testdata/00166_explain_estimate/ast.json b/parser/testdata/00166_explain_estimate/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00166_explain_estimate/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00166_explain_estimate/metadata.json b/parser/testdata/00166_explain_estimate/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00166_explain_estimate/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00166_explain_estimate/query.sql b/parser/testdata/00166_explain_estimate/query.sql new file mode 100644 index 000000000..d51917aa8 --- /dev/null +++ b/parser/testdata/00166_explain_estimate/query.sql @@ -0,0 +1,10 @@ +-- Tags: stateful, no-replicated-database, no-object-storage +-- Tag no-replicated-database: Requires investigation + +SET optimize_use_implicit_projections = 0; + +EXPLAIN ESTIMATE SELECT count() FROM test.hits WHERE CounterID = 29103473; +EXPLAIN ESTIMATE SELECT count() FROM test.hits WHERE CounterID != 29103473; +EXPLAIN ESTIMATE SELECT count() FROM test.hits WHERE CounterID > 29103473; +EXPLAIN ESTIMATE SELECT count() FROM test.hits WHERE CounterID < 29103473; +EXPLAIN ESTIMATE SELECT count() FROM test.hits WHERE CounterID = 29103473 UNION ALL SELECT count() FROM test.hits WHERE CounterID = 1704509; diff --git a/parser/testdata/00166_functions_of_aggregation_states/ast.json b/parser/testdata/00166_functions_of_aggregation_states/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00166_functions_of_aggregation_states/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00166_functions_of_aggregation_states/metadata.json b/parser/testdata/00166_functions_of_aggregation_states/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00166_functions_of_aggregation_states/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00166_functions_of_aggregation_states/query.sql b/parser/testdata/00166_functions_of_aggregation_states/query.sql new file mode 100644 index 000000000..e4df25464 --- /dev/null +++ b/parser/testdata/00166_functions_of_aggregation_states/query.sql @@ -0,0 +1,6 @@ +-- Disable external aggregation because the state is reset for each new block of data in 'runningAccumulate' function. +SET max_bytes_before_external_group_by = 0; +SET max_bytes_ratio_before_external_group_by = 0; +SET allow_deprecated_error_prone_window_functions = 1; + +SELECT k, finalizeAggregation(sum_state), runningAccumulate(sum_state) FROM (SELECT intDiv(number, 50000) AS k, sumState(number) AS sum_state FROM (SELECT number FROM system.numbers LIMIT 1000000) GROUP BY k ORDER BY k); diff --git a/parser/testdata/00167_read_bytes_from_fs/ast.json b/parser/testdata/00167_read_bytes_from_fs/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00167_read_bytes_from_fs/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00167_read_bytes_from_fs/metadata.json b/parser/testdata/00167_read_bytes_from_fs/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00167_read_bytes_from_fs/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00167_read_bytes_from_fs/query.sql b/parser/testdata/00167_read_bytes_from_fs/query.sql new file mode 100644 index 000000000..082fb5282 --- /dev/null +++ b/parser/testdata/00167_read_bytes_from_fs/query.sql @@ -0,0 +1,11 @@ +-- Tags: stateful, no-random-settings, no-parallel +-- no-parallel: Heavy query + +SET max_memory_usage = '10G'; +SELECT sum(cityHash64(*)) FROM test.hits SETTINGS max_threads=40; + +-- We had a bug which lead to additional compressed data read. test.hits compressed size is about 1.2Gb, but we read more then 3Gb. +-- Small additional reads still possible, so we compare with about 1.5Gb. +SYSTEM FLUSH LOGS query_log; + +SELECT ProfileEvents['ReadBufferFromFileDescriptorReadBytes'] < 1500000000 from system.query_log where query = 'SELECT sum(cityHash64(*)) FROM test.hits SETTINGS max_threads=40;' and current_database = currentDatabase() and type = 'QueryFinish'; diff --git a/parser/testdata/00167_settings_inside_query/ast.json b/parser/testdata/00167_settings_inside_query/ast.json new file mode 100644 index 000000000..d7c5160ea --- /dev/null +++ b/parser/testdata/00167_settings_inside_query/ast.json @@ -0,0 +1,118 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function min (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Function in (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toUInt64 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_1000" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function blockSize (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Set" + }, + { + "explain": " Set" + } + ], + + "rows": 32, + + "statistics": + { + "elapsed": 0.001880074, + "rows_read": 32, + "bytes_read": 1276 + } +} diff --git a/parser/testdata/00167_settings_inside_query/metadata.json b/parser/testdata/00167_settings_inside_query/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00167_settings_inside_query/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00167_settings_inside_query/query.sql b/parser/testdata/00167_settings_inside_query/query.sql new file mode 100644 index 000000000..987a9475b --- /dev/null +++ b/parser/testdata/00167_settings_inside_query/query.sql @@ -0,0 +1,2 @@ +SELECT min(number) FROM system.numbers WHERE toUInt64(number % 1000) IN (SELECT DISTINCT blockSize() FROM system.numbers SETTINGS max_block_size = 123, max_rows_to_read = 1000, read_overflow_mode = 'break') SETTINGS max_rows_to_read = 1000000, read_overflow_mode = 'break'; +SELECT * FROM (SELECT DISTINCT blockSize() AS x FROM system.numbers SETTINGS max_block_size = 123, max_rows_to_read = 1000, read_overflow_mode = 'break'); diff --git a/parser/testdata/00168_buffer_defaults/ast.json b/parser/testdata/00168_buffer_defaults/ast.json new file mode 100644 index 000000000..21a8a076d --- /dev/null +++ b/parser/testdata/00168_buffer_defaults/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery mt_00168 (children 1)" + }, + { + "explain": " Identifier mt_00168" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001194829, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/00168_buffer_defaults/metadata.json b/parser/testdata/00168_buffer_defaults/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00168_buffer_defaults/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00168_buffer_defaults/query.sql b/parser/testdata/00168_buffer_defaults/query.sql new file mode 100644 index 000000000..ce1dea8aa --- /dev/null +++ b/parser/testdata/00168_buffer_defaults/query.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS mt_00168; +DROP TABLE IF EXISTS mt_00168_buffer; +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE mt_00168 (EventDate Date, UTCEventTime DateTime, MoscowEventDate Date DEFAULT toDate(UTCEventTime)) ENGINE = MergeTree(EventDate, UTCEventTime, 8192); +CREATE TABLE mt_00168_buffer AS mt_00168 ENGINE = Buffer(currentDatabase(), mt_00168, 16, 10, 100, 10000, 1000000, 10000000, 100000000); +DESC TABLE mt_00168; +DESC TABLE mt_00168_buffer; +INSERT INTO mt_00168 (EventDate, UTCEventTime) VALUES ('2015-06-09', '2015-06-09 01:02:03'); +SELECT * FROM mt_00168_buffer; +INSERT INTO mt_00168_buffer (EventDate, UTCEventTime) VALUES ('2015-06-09', '2015-06-09 01:02:03'); +SELECT * FROM mt_00168_buffer; +DROP TABLE mt_00168_buffer; +DROP TABLE mt_00168; diff --git a/parser/testdata/00169_contingency/ast.json b/parser/testdata/00169_contingency/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00169_contingency/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00169_contingency/metadata.json b/parser/testdata/00169_contingency/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00169_contingency/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00169_contingency/query.sql b/parser/testdata/00169_contingency/query.sql new file mode 100644 index 000000000..4cb4df919 --- /dev/null +++ b/parser/testdata/00169_contingency/query.sql @@ -0,0 +1,15 @@ +-- Tags: stateful +WITH URLDomain AS a, URLDomain AS b +SELECT round(cramersV(a, b), 2), round(cramersVBiasCorrected(a, b), 2), round(theilsU(a, b), 2), round(theilsU(b, a), 2), round(contingency(a, b), 2) FROM test.hits; + +WITH URLDomain AS a, RefererDomain AS b +SELECT round(cramersV(a, b), 2), round(cramersVBiasCorrected(a, b), 2), round(theilsU(a, b), 2), round(theilsU(b, a), 2), round(contingency(a, b), 2) FROM test.hits; + +WITH URLDomain AS a, CounterID AS b +SELECT round(cramersV(a, b), 2), round(cramersVBiasCorrected(a, b), 2), round(theilsU(a, b), 2), round(theilsU(b, a), 2), round(contingency(a, b), 2) FROM test.hits; + +WITH ClientIP AS a, RemoteIP AS b +SELECT round(cramersV(a, b), 2), round(cramersVBiasCorrected(a, b), 2), round(theilsU(a, b), 2), round(theilsU(b, a), 2), round(contingency(a, b), 2) FROM test.hits; + +WITH ResolutionWidth AS a, ResolutionHeight AS b +SELECT round(cramersV(a, b), 2), round(cramersVBiasCorrected(a, b), 2), round(theilsU(a, b), 2), round(theilsU(b, a), 2), round(contingency(a, b), 2) FROM test.hits; diff --git a/parser/testdata/00169_join_constant_keys/ast.json b/parser/testdata/00169_join_constant_keys/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00169_join_constant_keys/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00169_join_constant_keys/metadata.json b/parser/testdata/00169_join_constant_keys/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00169_join_constant_keys/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00169_join_constant_keys/query.sql b/parser/testdata/00169_join_constant_keys/query.sql new file mode 100644 index 000000000..03c01c074 --- /dev/null +++ b/parser/testdata/00169_join_constant_keys/query.sql @@ -0,0 +1,18 @@ +SELECT + key1, + key2, + table_1 +FROM +( + SELECT + arrayJoin([1, 2, 3]) AS key1, + 0 AS key2, + 999 AS table_1 +) js1 ALL INNER JOIN +( + SELECT + arrayJoin([1, 3, 2]) AS key1, + 0 AS key2, + 999 AS table_1 +) js2 USING key2, key1 +ORDER BY key1; diff --git a/parser/testdata/00170_lower_upper_utf8/ast.json b/parser/testdata/00170_lower_upper_utf8/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00170_lower_upper_utf8/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00170_lower_upper_utf8/metadata.json b/parser/testdata/00170_lower_upper_utf8/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00170_lower_upper_utf8/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00170_lower_upper_utf8/query.sql b/parser/testdata/00170_lower_upper_utf8/query.sql new file mode 100644 index 000000000..7c7bbac0d --- /dev/null +++ b/parser/testdata/00170_lower_upper_utf8/query.sql @@ -0,0 +1,43 @@ +-- Tags: no-fasttest +-- no-fasttest: upper/lowerUTF8 use ICU + +select lower('aaaaaaaaaaaaaaa012345789,.!aaaa' as str) = str; +select lowerUTF8('aaaaaaaaaaaaaaa012345789,.!aaaa' as str) = str; +select lower('AaAaAaAaAaAaAaA012345789,.!aAaA') = 'aaaaaaaaaaaaaaa012345789,.!aaaa'; +select lowerUTF8('AaAaAaAaAaAaAaA012345789,.!aAaA') = 'aaaaaaaaaaaaaaa012345789,.!aaaa'; + +select upper('AAAAAAAAAAAAAAA012345789,.!AAAA' as str) = str; +select upperUTF8('AAAAAAAAAAAAAAA012345789,.!AAAA' as str) = str; +select upper('AaAaAaAaAaAaAaA012345789,.!aAaA') = 'AAAAAAAAAAAAAAA012345789,.!AAAA'; +select upperUTF8('AaAaAaAaAaAaAaA012345789,.!aAaA') = 'AAAAAAAAAAAAAAA012345789,.!AAAA'; + +select sum(lower(materialize('aaaaaaaaaaaaaaa012345789,.!aaaa') as str) = str) = count() from system.one array join range(16384) as n; +select sum(lowerUTF8(materialize('aaaaaaaaaaaaaaa012345789,.!aaaa') as str) = str) = count() from system.one array join range(16384) as n; +select sum(lower(materialize('AaAaAaAaAaAaAaA012345789,.!aAaA')) = materialize('aaaaaaaaaaaaaaa012345789,.!aaaa')) = count() from system.one array join range(16384) as n; +select sum(lowerUTF8(materialize('AaAaAaAaAaAaAaA012345789,.!aAaA')) = materialize('aaaaaaaaaaaaaaa012345789,.!aaaa')) = count() from system.one array join range(16384) as n; + +select sum(upper(materialize('AAAAAAAAAAAAAAA012345789,.!AAAA') as str) = str) = count() from system.one array join range(16384) as n; +select sum(upperUTF8(materialize('AAAAAAAAAAAAAAA012345789,.!AAAA') as str) = str) = count() from system.one array join range(16384) as n; +select sum(upper(materialize('AaAaAaAaAaAaAaA012345789,.!aAaA')) = materialize('AAAAAAAAAAAAAAA012345789,.!AAAA')) = count() from system.one array join range(16384) as n; +select sum(upperUTF8(materialize('AaAaAaAaAaAaAaA012345789,.!aAaA')) = materialize('AAAAAAAAAAAAAAA012345789,.!AAAA')) = count() from system.one array join range(16384) as n; + +select lower('aaaaАБВГAAAAaaAA') = 'aaaaАБВГaaaaaaaa'; +select upper('aaaaАБВГAAAAaaAA') = 'AAAAАБВГAAAAAAAA'; +select lowerUTF8('aaaaАБВГAAAAaaAA') = 'aaaaабвгaaaaaaaa'; +select upperUTF8('aaaaАБВГAAAAaaAA') = 'AAAAАБВГAAAAAAAA'; + +select sum(lower(materialize('aaaaАБВГAAAAaaAA')) = materialize('aaaaАБВГaaaaaaaa')) = count() from system.one array join range(16384) as n; +select sum(upper(materialize('aaaaАБВГAAAAaaAA')) = materialize('AAAAАБВГAAAAAAAA')) = count() from system.one array join range(16384) as n; +select sum(lowerUTF8(materialize('aaaaАБВГAAAAaaAA')) = materialize('aaaaабвгaaaaaaaa')) = count() from system.one array join range(16384) as n; +select sum(upperUTF8(materialize('aaaaАБВГAAAAaaAA')) = materialize('AAAAАБВГAAAAAAAA')) = count() from system.one array join range(16384) as n; + +-- Turkish language +select upperUTF8('ır') = 'IR'; +select lowerUTF8('ır') = 'ır'; + +-- German language +select upper('öäüß') = 'öäüß'; +select lower('ÖÄÜẞ') = 'ÖÄÜẞ'; + +-- Bug 68680 +SELECT lengthUTF8(lowerUTF8('Ä\0')); diff --git a/parser/testdata/00170_s3_cache/ast.json b/parser/testdata/00170_s3_cache/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00170_s3_cache/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00170_s3_cache/metadata.json b/parser/testdata/00170_s3_cache/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00170_s3_cache/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00170_s3_cache/query.sql b/parser/testdata/00170_s3_cache/query.sql new file mode 100644 index 000000000..4619be88a --- /dev/null +++ b/parser/testdata/00170_s3_cache/query.sql @@ -0,0 +1,53 @@ +-- Tags: stateful, no-parallel, no-random-settings +-- no-parallel: Heavy and it drops filesystem cache + +-- { echo } + +SET allow_prefetched_read_pool_for_remote_filesystem=0; +SET enable_filesystem_cache_on_write_operations=0; +SET max_memory_usage='20G'; +SET read_through_distributed_cache = 1; +SYSTEM DROP FILESYSTEM CACHE; +SELECT count() FROM test.hits_s3; +SELECT count() FROM test.hits_s3 WHERE AdvEngineID != 0; +SELECT sum(AdvEngineID), count(), avg(ResolutionWidth) FROM test.hits_s3 ; +SELECT sum(UserID) FROM test.hits_s3 ; +SELECT uniq(UserID) FROM test.hits_s3 ; +SELECT uniq(SearchPhrase) FROM test.hits_s3 ; +SELECT min(EventDate), max(EventDate) FROM test.hits_s3 ; +SELECT AdvEngineID, count() FROM test.hits_s3 WHERE AdvEngineID != 0 GROUP BY AdvEngineID ORDER BY AdvEngineID DESC; +SELECT RegionID, uniq(UserID) AS u FROM test.hits_s3 GROUP BY RegionID ORDER BY u DESC LIMIT 10; +SELECT RegionID, sum(AdvEngineID), count() AS c, avg(ResolutionWidth), uniq(UserID) FROM test.hits_s3 GROUP BY RegionID ORDER BY c DESC LIMIT 10; +SELECT MobilePhoneModel, uniq(UserID) AS u FROM test.hits_s3 WHERE MobilePhoneModel != '' GROUP BY MobilePhoneModel ORDER BY u DESC LIMIT 10; +SELECT MobilePhone, MobilePhoneModel, uniq(UserID) AS u FROM test.hits_s3 WHERE MobilePhoneModel != '' GROUP BY MobilePhone, MobilePhoneModel ORDER BY u DESC LIMIT 10; +SELECT uniq(SearchPhrase), count() AS c FROM test.hits_s3 WHERE SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10; +SELECT uniq(SearchPhrase), uniq(UserID) AS u FROM test.hits_s3 WHERE SearchPhrase != '' GROUP BY SearchPhrase ORDER BY u DESC LIMIT 10; +SELECT SearchEngineID, uniq(SearchPhrase), count() AS c FROM test.hits_s3 WHERE SearchPhrase != '' GROUP BY SearchEngineID, SearchPhrase ORDER BY c DESC LIMIT 10; +SELECT UserID, count() FROM test.hits_s3 GROUP BY UserID ORDER BY count() DESC LIMIT 10; +SELECT UserID, uniq(SearchPhrase) as m, count() as c FROM test.hits_s3 GROUP BY UserID, SearchPhrase ORDER BY UserID, m, c DESC LIMIT 10; +SELECT UserID, uniq(SearchPhrase) as m, count() as c FROM test.hits_s3 GROUP BY UserID, SearchPhrase ORDER BY UserID, m, c LIMIT 10; +SELECT UserID, toMinute(EventTime) AS m, uniq(SearchPhrase) as u, count() as c FROM test.hits_s3 GROUP BY UserID, m, SearchPhrase ORDER BY UserID DESC LIMIT 10 FORMAT Null; +SELECT UserID FROM test.hits_s3 WHERE UserID = 12345678901234567890; +SELECT count() FROM test.hits_s3 WHERE URL LIKE '%metrika%'; +SELECT uniq(SearchPhrase) as u, max(URL) as m, count() AS c FROM test.hits_s3 WHERE URL LIKE '%metrika%' AND SearchPhrase != '' GROUP BY SearchPhrase ORDER BY u, m, c DESC LIMIT 10; +SELECT uniq(SearchPhrase), max(URL), max(Title), count() AS c, uniq(UserID) FROM test.hits_s3 WHERE Title LIKE '%Яндекс%' AND URL NOT LIKE '%.yandex.%' AND SearchPhrase != '' GROUP BY SearchPhrase ORDER BY c DESC LIMIT 10; +SELECT * FROM test.hits_s3 WHERE URL LIKE '%metrika%' ORDER BY EventTime LIMIT 10 format Null; +SELECT SearchPhrase FROM test.hits_s3 WHERE SearchPhrase != '' ORDER BY EventTime LIMIT 10 FORMAT Null; +SELECT SearchPhrase FROM test.hits_s3 WHERE SearchPhrase != '' ORDER BY SearchPhrase LIMIT 10 FORMAT Null; +SELECT SearchPhrase FROM test.hits_s3 WHERE SearchPhrase != '' ORDER BY EventTime, SearchPhrase LIMIT 10 FORMAT Null; +SELECT CounterID, avg(length(URL)) AS l, count() AS c FROM test.hits_s3 WHERE URL != '' GROUP BY CounterID HAVING c > 100000 ORDER BY l DESC LIMIT 25; +SELECT domainWithoutWWW(Referer) AS key, avg(length(Referer)) AS l, count() AS c, max(Referer) FROM test.hits_s3 WHERE Referer != '' GROUP BY key HAVING c > 100000 ORDER BY l DESC LIMIT 25; +SELECT sum(ResolutionWidth), sum(ResolutionWidth + 1), sum(ResolutionWidth + 2), sum(ResolutionWidth + 3), sum(ResolutionWidth + 4), sum(ResolutionWidth + 5), sum(ResolutionWidth + 6), sum(ResolutionWidth + 7), sum(ResolutionWidth + 8), sum(ResolutionWidth + 9), sum(ResolutionWidth + 10), sum(ResolutionWidth + 11), sum(ResolutionWidth + 12), sum(ResolutionWidth + 13), sum(ResolutionWidth + 14), sum(ResolutionWidth + 15), sum(ResolutionWidth + 16), sum(ResolutionWidth + 17), sum(ResolutionWidth + 18), sum(ResolutionWidth + 19), sum(ResolutionWidth + 20), sum(ResolutionWidth + 21), sum(ResolutionWidth + 22), sum(ResolutionWidth + 23), sum(ResolutionWidth + 24), sum(ResolutionWidth + 25), sum(ResolutionWidth + 26), sum(ResolutionWidth + 27), sum(ResolutionWidth + 28), sum(ResolutionWidth + 29), sum(ResolutionWidth + 30), sum(ResolutionWidth + 31), sum(ResolutionWidth + 32), sum(ResolutionWidth + 33), sum(ResolutionWidth + 34), sum(ResolutionWidth + 35), sum(ResolutionWidth + 36), sum(ResolutionWidth + 37), sum(ResolutionWidth + 38), sum(ResolutionWidth + 39), sum(ResolutionWidth + 40), sum(ResolutionWidth + 41), sum(ResolutionWidth + 42), sum(ResolutionWidth + 43), sum(ResolutionWidth + 44), sum(ResolutionWidth + 45), sum(ResolutionWidth + 46), sum(ResolutionWidth + 47), sum(ResolutionWidth + 48), sum(ResolutionWidth + 49), sum(ResolutionWidth + 50), sum(ResolutionWidth + 51), sum(ResolutionWidth + 52), sum(ResolutionWidth + 53), sum(ResolutionWidth + 54), sum(ResolutionWidth + 55), sum(ResolutionWidth + 56), sum(ResolutionWidth + 57), sum(ResolutionWidth + 58), sum(ResolutionWidth + 59), sum(ResolutionWidth + 60), sum(ResolutionWidth + 61), sum(ResolutionWidth + 62), sum(ResolutionWidth + 63), sum(ResolutionWidth + 64), sum(ResolutionWidth + 65), sum(ResolutionWidth + 66), sum(ResolutionWidth + 67), sum(ResolutionWidth + 68), sum(ResolutionWidth + 69), sum(ResolutionWidth + 70), sum(ResolutionWidth + 71), sum(ResolutionWidth + 72), sum(ResolutionWidth + 73), sum(ResolutionWidth + 74), sum(ResolutionWidth + 75), sum(ResolutionWidth + 76), sum(ResolutionWidth + 77), sum(ResolutionWidth + 78), sum(ResolutionWidth + 79), sum(ResolutionWidth + 80), sum(ResolutionWidth + 81), sum(ResolutionWidth + 82), sum(ResolutionWidth + 83), sum(ResolutionWidth + 84), sum(ResolutionWidth + 85), sum(ResolutionWidth + 86), sum(ResolutionWidth + 87), sum(ResolutionWidth + 88), sum(ResolutionWidth + 89) FROM test.hits_s3; +SELECT SearchEngineID, ClientIP, count() AS c, sum(Refresh), avg(ResolutionWidth) FROM test.hits_s3 WHERE SearchPhrase != '' GROUP BY SearchEngineID, ClientIP ORDER BY c DESC LIMIT 10; +SELECT WatchID, ClientIP, count() AS c, sum(Refresh), avg(ResolutionWidth) FROM test.hits_s3 WHERE SearchPhrase != '' GROUP BY WatchID, ClientIP ORDER BY c, WatchID DESC LIMIT 10; +SELECT WatchID, ClientIP, count() AS c, sum(Refresh), avg(ResolutionWidth) FROM test.hits_s3 GROUP BY WatchID, ClientIP ORDER BY c, WatchID DESC LIMIT 10; +SELECT URL, count() AS c FROM test.hits_s3 GROUP BY URL ORDER BY c DESC LIMIT 10; +SELECT 1, URL, count() AS c FROM test.hits_s3 GROUP BY 1, URL ORDER BY c DESC LIMIT 10; +SELECT ClientIP AS x, x - 1, x - 2, x - 3, count() AS c FROM test.hits_s3 GROUP BY x, x - 1, x - 2, x - 3 ORDER BY c DESC LIMIT 10; +SELECT URL, count() AS PageViews FROM test.hits_s3 WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND NOT DontCountHits AND NOT Refresh AND notEmpty(URL) GROUP BY URL ORDER BY PageViews DESC LIMIT 10; +SELECT Title, count() AS PageViews FROM test.hits_s3 WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND NOT DontCountHits AND NOT Refresh AND notEmpty(Title) GROUP BY Title ORDER BY PageViews, Title DESC LIMIT 10; +SELECT URL, count() AS PageViews FROM test.hits_s3 WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND NOT Refresh AND IsLink AND NOT IsDownload GROUP BY URL ORDER BY PageViews DESC LIMIT 1000; +SELECT TraficSourceID, SearchEngineID, AdvEngineID, ((SearchEngineID = 0 AND AdvEngineID = 0) ? Referer : '') AS Src, URL AS Dst, count() AS PageViews FROM test.hits_s3 WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND NOT Refresh GROUP BY TraficSourceID, SearchEngineID, AdvEngineID, Src, Dst ORDER BY PageViews, TraficSourceID DESC LIMIT 1000; +SELECT URLHash, EventDate, count() AS PageViews FROM test.hits_s3 WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND NOT Refresh AND TraficSourceID IN (-1, 6) AND RefererHash = halfMD5('http://example.ru/') GROUP BY URLHash, EventDate ORDER BY PageViews DESC LIMIT 100; +SELECT WindowClientWidth, WindowClientHeight, count() AS PageViews FROM test.hits_s3 WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-31' AND NOT Refresh AND NOT DontCountHits AND URLHash = halfMD5('http://example.ru/') GROUP BY WindowClientWidth, WindowClientHeight ORDER BY PageViews DESC LIMIT 10000; +SELECT toStartOfMinute(EventTime) AS Minute, count() AS PageViews FROM test.hits_s3 WHERE CounterID = 62 AND EventDate >= '2013-07-01' AND EventDate <= '2013-07-02' AND NOT Refresh AND NOT DontCountHits GROUP BY Minute ORDER BY Minute; diff --git a/parser/testdata/00171_grouping_aggregated_transform_bug/ast.json b/parser/testdata/00171_grouping_aggregated_transform_bug/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00171_grouping_aggregated_transform_bug/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00171_grouping_aggregated_transform_bug/metadata.json b/parser/testdata/00171_grouping_aggregated_transform_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00171_grouping_aggregated_transform_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00171_grouping_aggregated_transform_bug/query.sql b/parser/testdata/00171_grouping_aggregated_transform_bug/query.sql new file mode 100644 index 000000000..712634c27 --- /dev/null +++ b/parser/testdata/00171_grouping_aggregated_transform_bug/query.sql @@ -0,0 +1,6 @@ +-- Tags: stateful, distributed, no-object-storage +-- no-object-storage: https://github.com/ClickHouse/ClickHouse/issues/74943 + +SET max_rows_to_read = '100M'; +SELECT sum(cityHash64(*)) FROM (SELECT CounterID, quantileTiming(0.5)(SendTiming), count() FROM remote('127.0.0.{1,2,3,4,5,6,7,8,9,10}', test.hits) WHERE SendTiming != -1 GROUP BY CounterID) SETTINGS max_block_size = 63169; +SELECT sum(cityHash64(*)) FROM (SELECT CounterID, quantileTiming(0.5)(SendTiming), count() FROM remote('127.0.0.{1,2,3,4,5,6,7,8,9,10}', test.hits) WHERE SendTiming != -1 GROUP BY CounterID) SETTINGS optimize_aggregation_in_order = 1, max_block_size = 63169; diff --git a/parser/testdata/00171_shard_array_of_tuple_remote/ast.json b/parser/testdata/00171_shard_array_of_tuple_remote/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00171_shard_array_of_tuple_remote/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00171_shard_array_of_tuple_remote/metadata.json b/parser/testdata/00171_shard_array_of_tuple_remote/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00171_shard_array_of_tuple_remote/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00171_shard_array_of_tuple_remote/query.sql b/parser/testdata/00171_shard_array_of_tuple_remote/query.sql new file mode 100644 index 000000000..0189d3a63 --- /dev/null +++ b/parser/testdata/00171_shard_array_of_tuple_remote/query.sql @@ -0,0 +1,4 @@ +-- Tags: shard + +SELECT arrayMap((x, y) -> (x, y), [1, 2, 3], [4, 5, 6]) FROM remote('127.0.0.{2,3}', system.one) ORDER BY rand(); +SELECT arrayMap((x, y) -> (x, y), [1, 2, 3], [4, 5, 6]) FROM remote('127.0.0.{2,3}') ORDER BY rand(); diff --git a/parser/testdata/00172_constexprs_in_set/ast.json b/parser/testdata/00172_constexprs_in_set/ast.json new file mode 100644 index 000000000..6e078a071 --- /dev/null +++ b/parser/testdata/00172_constexprs_in_set/ast.json @@ -0,0 +1,154 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function sumIf (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function sum (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function in (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function plus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function plus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Function toUInt64 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function concat (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '8'" + }, + { + "explain": " Literal ''" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 44, + + "statistics": + { + "elapsed": 0.002591436, + "rows_read": 44, + "bytes_read": 1897 + } +} diff --git a/parser/testdata/00172_constexprs_in_set/metadata.json b/parser/testdata/00172_constexprs_in_set/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00172_constexprs_in_set/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00172_constexprs_in_set/query.sql b/parser/testdata/00172_constexprs_in_set/query.sql new file mode 100644 index 000000000..5d79185ee --- /dev/null +++ b/parser/testdata/00172_constexprs_in_set/query.sql @@ -0,0 +1,7 @@ +SELECT sumIf(number, x), sum(x) FROM (SELECT number, number IN (0 + 1, 2 + 3, toUInt64(concat('8', ''))) AS x FROM system.numbers LIMIT 10); +SELECT toDate('2015-06-12') IN toDate('2015-06-12'); +SELECT toDate('2015-06-12') IN (toDate('2015-06-12')); +SELECT today() IN (toDate('2014-01-01'), toDate(now())); +SELECT - -1 IN (2 - 1); +SELECT - -1 IN (2 - 1, 3); +WITH (1, 2) AS a SELECT 1 IN a, 3 IN a; diff --git a/parser/testdata/00172_early_constant_folding/ast.json b/parser/testdata/00172_early_constant_folding/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00172_early_constant_folding/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00172_early_constant_folding/metadata.json b/parser/testdata/00172_early_constant_folding/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00172_early_constant_folding/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00172_early_constant_folding/query.sql b/parser/testdata/00172_early_constant_folding/query.sql new file mode 100644 index 000000000..d8294bb1f --- /dev/null +++ b/parser/testdata/00172_early_constant_folding/query.sql @@ -0,0 +1,6 @@ +-- Tags: stateful, no-parallel-replicas + +set max_threads=10; +set optimize_use_implicit_projections=1; +EXPLAIN PIPELINE SELECT count(JavaEnable) FROM test.hits WHERE WatchID = 1 OR Title = 'next' OR URL = 'prev' OR URL = '???' OR 1 SETTINGS enable_analyzer = 0; +EXPLAIN PIPELINE SELECT count(JavaEnable) FROM test.hits WHERE WatchID = 1 OR Title = 'next' OR URL = 'prev' OR URL = '???' OR 1 SETTINGS enable_analyzer = 1; diff --git a/parser/testdata/00173_compare_date_time_with_constant_string/ast.json b/parser/testdata/00173_compare_date_time_with_constant_string/ast.json new file mode 100644 index 000000000..1d6110dc6 --- /dev/null +++ b/parser/testdata/00173_compare_date_time_with_constant_string/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDate (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '2015-02-03'" + }, + { + "explain": " Literal '2015-02-03'" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001551051, + "rows_read": 10, + "bytes_read": 383 + } +} diff --git a/parser/testdata/00173_compare_date_time_with_constant_string/metadata.json b/parser/testdata/00173_compare_date_time_with_constant_string/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00173_compare_date_time_with_constant_string/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00173_compare_date_time_with_constant_string/query.sql b/parser/testdata/00173_compare_date_time_with_constant_string/query.sql new file mode 100644 index 000000000..c89292a25 --- /dev/null +++ b/parser/testdata/00173_compare_date_time_with_constant_string/query.sql @@ -0,0 +1,83 @@ +SELECT toDate('2015-02-03') = '2015-02-03'; +SELECT '2015-02-03' = toDate('2015-02-03'); +SELECT toDate('2015-02-03') = '2015-02-04'; +SELECT '2015-02-03' = toDate('2015-02-04'); +SELECT toDate('2015-02-03') < '2015-02-04'; +SELECT '2015-02-03' < toDate('2015-02-04'); +SELECT toDate('2015-02-03') > '2015-02-04'; +SELECT '2015-02-03' > toDate('2015-02-04'); +SELECT toDate('2015-02-03') <= '2015-02-04'; +SELECT '2015-02-03' <= toDate('2015-02-04'); +SELECT toDate('2015-02-03') >= '2015-02-04'; +SELECT '2015-02-03' >= toDate('2015-02-04'); +SELECT toDate('2015-02-05') < '2015-02-04'; +SELECT '2015-02-05' < toDate('2015-02-04'); +SELECT toDate('2015-02-05') > '2015-02-04'; +SELECT '2015-02-05' > toDate('2015-02-04'); +SELECT toDate('2015-02-05') <= '2015-02-04'; +SELECT '2015-02-05' <= toDate('2015-02-04'); +SELECT toDate('2015-02-05') >= '2015-02-04'; +SELECT '2015-02-05' >= toDate('2015-02-04'); + +SELECT materialize(toDate('2015-02-03')) = '2015-02-03'; +SELECT '2015-02-03' = materialize(toDate('2015-02-03')); +SELECT materialize(toDate('2015-02-03')) = '2015-02-04'; +SELECT '2015-02-03' = materialize(toDate('2015-02-04')); +SELECT materialize(toDate('2015-02-03')) < '2015-02-04'; +SELECT '2015-02-03' < materialize(toDate('2015-02-04')); +SELECT materialize(toDate('2015-02-03')) > '2015-02-04'; +SELECT '2015-02-03' > materialize(toDate('2015-02-04')); +SELECT materialize(toDate('2015-02-03')) <= '2015-02-04'; +SELECT '2015-02-03' <= materialize(toDate('2015-02-04')); +SELECT materialize(toDate('2015-02-03')) >= '2015-02-04'; +SELECT '2015-02-03' >= materialize(toDate('2015-02-04')); +SELECT materialize(toDate('2015-02-05')) < '2015-02-04'; +SELECT '2015-02-05' < materialize(toDate('2015-02-04')); +SELECT materialize(toDate('2015-02-05')) > '2015-02-04'; +SELECT '2015-02-05' > materialize(toDate('2015-02-04')); +SELECT materialize(toDate('2015-02-05')) <= '2015-02-04'; +SELECT '2015-02-05' <= materialize(toDate('2015-02-04')); +SELECT materialize(toDate('2015-02-05')) >= '2015-02-04'; +SELECT '2015-02-05' >= materialize(toDate('2015-02-04')); + +SELECT toDateTime('2015-02-03 04:05:06') = '2015-02-03 04:05:06'; +SELECT '2015-02-03 04:05:06' = toDateTime('2015-02-03 04:05:06'); +SELECT toDateTime('2015-02-03 04:05:06') = '2015-02-03 05:06:07'; +SELECT '2015-02-03 04:05:06' = toDateTime('2015-02-03 05:06:07'); +SELECT toDateTime('2015-02-03 04:05:06') < '2015-02-03 05:06:07'; +SELECT '2015-02-03 04:05:06' < toDateTime('2015-02-03 05:06:07'); +SELECT toDateTime('2015-02-03 04:05:06') > '2015-02-03 05:06:07'; +SELECT '2015-02-03 04:05:06' > toDateTime('2015-02-03 05:06:07'); +SELECT toDateTime('2015-02-03 04:05:06') <= '2015-02-03 05:06:07'; +SELECT '2015-02-03 04:05:06' <= toDateTime('2015-02-03 05:06:07'); +SELECT toDateTime('2015-02-03 04:05:06') >= '2015-02-03 05:06:07'; +SELECT '2015-02-03 04:05:06' >= toDateTime('2015-02-03 05:06:07'); +SELECT toDateTime('2015-02-03 06:07:08') < '2015-02-03 05:06:07'; +SELECT '2015-02-03 06:07:08' < toDateTime('2015-02-03 05:06:07'); +SELECT toDateTime('2015-02-03 06:07:08') > '2015-02-03 05:06:07'; +SELECT '2015-02-03 06:07:08' > toDateTime('2015-02-03 05:06:07'); +SELECT toDateTime('2015-02-03 06:07:08') <= '2015-02-03 05:06:07'; +SELECT '2015-02-03 06:07:08' <= toDateTime('2015-02-03 05:06:07'); +SELECT toDateTime('2015-02-03 06:07:08') >= '2015-02-03 05:06:07'; +SELECT '2015-02-03 06:07:08' >= toDateTime('2015-02-03 05:06:07'); + +SELECT materialize(toDateTime('2015-02-03 04:05:06')) = '2015-02-03 04:05:06'; +SELECT '2015-02-03 04:05:06' = materialize(toDateTime('2015-02-03 04:05:06')); +SELECT materialize(toDateTime('2015-02-03 04:05:06')) = '2015-02-03 05:06:07'; +SELECT '2015-02-03 04:05:06' = materialize(toDateTime('2015-02-03 05:06:07')); +SELECT materialize(toDateTime('2015-02-03 04:05:06')) < '2015-02-03 05:06:07'; +SELECT '2015-02-03 04:05:06' < materialize(toDateTime('2015-02-03 05:06:07')); +SELECT materialize(toDateTime('2015-02-03 04:05:06')) > '2015-02-03 05:06:07'; +SELECT '2015-02-03 04:05:06' > materialize(toDateTime('2015-02-03 05:06:07')); +SELECT materialize(toDateTime('2015-02-03 04:05:06')) <= '2015-02-03 05:06:07'; +SELECT '2015-02-03 04:05:06' <= materialize(toDateTime('2015-02-03 05:06:07')); +SELECT materialize(toDateTime('2015-02-03 04:05:06')) >= '2015-02-03 05:06:07'; +SELECT '2015-02-03 04:05:06' >= materialize(toDateTime('2015-02-03 05:06:07')); +SELECT materialize(toDateTime('2015-02-03 06:07:08')) < '2015-02-03 05:06:07'; +SELECT '2015-02-03 06:07:08' < materialize(toDateTime('2015-02-03 05:06:07')); +SELECT materialize(toDateTime('2015-02-03 06:07:08')) > '2015-02-03 05:06:07'; +SELECT '2015-02-03 06:07:08' > materialize(toDateTime('2015-02-03 05:06:07')); +SELECT materialize(toDateTime('2015-02-03 06:07:08')) <= '2015-02-03 05:06:07'; +SELECT '2015-02-03 06:07:08' <= materialize(toDateTime('2015-02-03 05:06:07')); +SELECT materialize(toDateTime('2015-02-03 06:07:08')) >= '2015-02-03 05:06:07'; +SELECT '2015-02-03 06:07:08' >= materialize(toDateTime('2015-02-03 05:06:07')); diff --git a/parser/testdata/00173_group_by_use_nulls/ast.json b/parser/testdata/00173_group_by_use_nulls/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00173_group_by_use_nulls/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00173_group_by_use_nulls/metadata.json b/parser/testdata/00173_group_by_use_nulls/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00173_group_by_use_nulls/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00173_group_by_use_nulls/query.sql b/parser/testdata/00173_group_by_use_nulls/query.sql new file mode 100644 index 000000000..7b70e0fb9 --- /dev/null +++ b/parser/testdata/00173_group_by_use_nulls/query.sql @@ -0,0 +1,36 @@ +-- Tags: stateful +SELECT + CounterID AS k, + quantileBFloat16(0.5)(ResolutionWidth) +FROM remote('127.0.0.{1,2}', test, hits) +GROUP BY k +ORDER BY + count() DESC, + CounterID ASC +LIMIT 10 +SETTINGS group_by_use_nulls = 1; + +SELECT + CounterID AS k, + quantileBFloat16(0.5)(ResolutionWidth) +FROM test.hits +GROUP BY k +ORDER BY + count() DESC, + CounterID ASC +LIMIT 10 +SETTINGS group_by_use_nulls = 1 FORMAT Null; + +-- { echoOn } +set enable_analyzer = 1; + +SELECT + CounterID AS k, + quantileBFloat16(0.5)(ResolutionWidth) +FROM remote('127.0.0.{1,2}', test, hits) +GROUP BY k +ORDER BY + count() DESC, + CounterID ASC +LIMIT 10 +SETTINGS group_by_use_nulls = 1; diff --git a/parser/testdata/00174_compare_date_time_with_constant_string_in_in/ast.json b/parser/testdata/00174_compare_date_time_with_constant_string_in_in/ast.json new file mode 100644 index 000000000..3c6abb366 --- /dev/null +++ b/parser/testdata/00174_compare_date_time_with_constant_string_in_in/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function in (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDate (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '2015-02-05'" + }, + { + "explain": " Literal Tuple_('2015-02-04', '2015-02-05')" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001501417, + "rows_read": 10, + "bytes_read": 401 + } +} diff --git a/parser/testdata/00174_compare_date_time_with_constant_string_in_in/metadata.json b/parser/testdata/00174_compare_date_time_with_constant_string_in_in/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00174_compare_date_time_with_constant_string_in_in/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00174_compare_date_time_with_constant_string_in_in/query.sql b/parser/testdata/00174_compare_date_time_with_constant_string_in_in/query.sql new file mode 100644 index 000000000..565163cfc --- /dev/null +++ b/parser/testdata/00174_compare_date_time_with_constant_string_in_in/query.sql @@ -0,0 +1,8 @@ +SELECT toDate('2015-02-05') IN ('2015-02-04', '2015-02-05'); +SELECT toDate('2015-02-05') IN ('2015-02-04', '2015-02-06'); +SELECT toDateTime('2015-02-03 04:05:06') IN ('2015-02-03 04:05:06', '2015-02-03 05:06:07'); +SELECT toDateTime('2015-02-03 04:05:06') IN ('2015-02-04 04:05:06', '2015-02-03 05:06:07'); +SELECT toDate('2015-02-05') NOT IN ('2015-02-04', '2015-02-05'); +SELECT toDate('2015-02-05') NOT IN ('2015-02-04', '2015-02-06'); +SELECT toDateTime('2015-02-03 04:05:06') NOT IN ('2015-02-03 04:05:06', '2015-02-03 05:06:07'); +SELECT toDateTime('2015-02-03 04:05:06') NOT IN ('2015-02-04 04:05:06', '2015-02-03 05:06:07'); diff --git a/parser/testdata/00174_distinct_in_order/ast.json b/parser/testdata/00174_distinct_in_order/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00174_distinct_in_order/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00174_distinct_in_order/metadata.json b/parser/testdata/00174_distinct_in_order/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00174_distinct_in_order/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00174_distinct_in_order/query.sql b/parser/testdata/00174_distinct_in_order/query.sql new file mode 100644 index 000000000..eedf8bb50 --- /dev/null +++ b/parser/testdata/00174_distinct_in_order/query.sql @@ -0,0 +1,25 @@ +-- Tags: stateful +select '-- check that distinct with and w/o optimization produce the same result'; + +drop table if exists distinct_in_order sync; +drop table if exists ordinary_distinct sync; + +select '-- DISTINCT columns are the same as in ORDER BY'; +create table distinct_in_order (CounterID UInt32, EventDate Date) engine=MergeTree() order by (CounterID, EventDate) SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +insert into distinct_in_order select distinct CounterID, EventDate from test.hits order by CounterID, EventDate settings optimize_distinct_in_order=1; +create table ordinary_distinct (CounterID UInt32, EventDate Date) engine=MergeTree() order by (CounterID, EventDate) SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +insert into ordinary_distinct select distinct CounterID, EventDate from test.hits order by CounterID, EventDate settings optimize_distinct_in_order=0; +select distinct * from distinct_in_order except select * from ordinary_distinct; + +drop table if exists distinct_in_order sync; +drop table if exists ordinary_distinct sync; + +select '-- DISTINCT columns has prefix in ORDER BY columns'; +create table distinct_in_order (CounterID UInt32, EventDate Date) engine=MergeTree() order by (CounterID, EventDate) SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +insert into distinct_in_order select distinct CounterID, EventDate from test.hits order by CounterID settings optimize_distinct_in_order=1; +create table ordinary_distinct (CounterID UInt32, EventDate Date) engine=MergeTree() order by (CounterID, EventDate) SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +insert into ordinary_distinct select distinct CounterID, EventDate from test.hits order by CounterID settings optimize_distinct_in_order=0; +select distinct * from distinct_in_order except select * from ordinary_distinct; + +drop table if exists distinct_in_order sync; +drop table if exists ordinary_distinct sync; diff --git a/parser/testdata/00175_counting_resources_in_subqueries/ast.json b/parser/testdata/00175_counting_resources_in_subqueries/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00175_counting_resources_in_subqueries/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00175_counting_resources_in_subqueries/metadata.json b/parser/testdata/00175_counting_resources_in_subqueries/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00175_counting_resources_in_subqueries/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00175_counting_resources_in_subqueries/query.sql b/parser/testdata/00175_counting_resources_in_subqueries/query.sql new file mode 100644 index 000000000..dfcad7e92 --- /dev/null +++ b/parser/testdata/00175_counting_resources_in_subqueries/query.sql @@ -0,0 +1,23 @@ +-- Tags: stateful +SET optimize_use_implicit_projections = 0; + +-- the work for scalar subquery is properly accounted: +SET max_rows_to_read = 1000000; +SELECT 1 = (SELECT count() FROM test.hits WHERE NOT ignore(AdvEngineID)); -- { serverError TOO_MANY_ROWS } + +-- the work for subquery in IN is properly accounted: +SET max_rows_to_read = 1000000; +SELECT 1 IN (SELECT count() FROM test.hits WHERE NOT ignore(AdvEngineID)); -- { serverError TOO_MANY_ROWS } + +-- this query reads from the table twice: +SET max_rows_to_read = 15000000; +SELECT count() IN (SELECT count() FROM test.hits WHERE NOT ignore(AdvEngineID)) FROM test.hits WHERE NOT ignore(AdvEngineID); -- { serverError TOO_MANY_ROWS } + +-- the resources are properly accounted even if the subquery is evaluated in advance to facilitate the index analysis. +-- this query is using index and filter out the second reading pass. +SET max_rows_to_read = 1000000; +SELECT count() FROM test.hits WHERE CounterID > (SELECT count() FROM test.hits WHERE NOT ignore(AdvEngineID)); -- { serverError TOO_MANY_ROWS } + +-- this query is using index but have to read all the data twice. +SET max_rows_to_read = 10000000; +SELECT count() FROM test.hits WHERE CounterID < (SELECT count() FROM test.hits WHERE NOT ignore(AdvEngineID)); -- { serverError TOO_MANY_ROWS } diff --git a/parser/testdata/00175_if_num_arrays/ast.json b/parser/testdata/00175_if_num_arrays/ast.json new file mode 100644 index 000000000..c254dd560 --- /dev/null +++ b/parser/testdata/00175_if_num_arrays/ast.json @@ -0,0 +1,76 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function if (alias res) (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_2]" + }, + { + "explain": " Literal Array_[UInt64_3, UInt64_4, UInt64_5]" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Identifier TabSeparatedWithNamesAndTypes" + } + ], + + "rows": 18, + + "statistics": + { + "elapsed": 0.001467315, + "rows_read": 18, + "bytes_read": 748 + } +} diff --git a/parser/testdata/00175_if_num_arrays/metadata.json b/parser/testdata/00175_if_num_arrays/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00175_if_num_arrays/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00175_if_num_arrays/query.sql b/parser/testdata/00175_if_num_arrays/query.sql new file mode 100644 index 000000000..11cae872c --- /dev/null +++ b/parser/testdata/00175_if_num_arrays/query.sql @@ -0,0 +1,30 @@ +SELECT number % 2 ? [1, 2] : [3, 4, 5] AS res FROM system.numbers LIMIT 10 FORMAT TabSeparatedWithNamesAndTypes; +SELECT number % 2 ? materialize([1, 2]) : [3, 4, 5] AS res FROM system.numbers LIMIT 10 FORMAT TabSeparatedWithNamesAndTypes; +SELECT number % 2 ? [1, 2] : materialize([3, 4, 5]) AS res FROM system.numbers LIMIT 10 FORMAT TabSeparatedWithNamesAndTypes; +SELECT number % 2 ? materialize([1, 2]) : materialize([3, 4, 5]) AS res FROM system.numbers LIMIT 10 FORMAT TabSeparatedWithNamesAndTypes; + +SELECT number % 2 ? [1, 2] : emptyArrayInt64() AS res FROM system.numbers LIMIT 10 FORMAT TabSeparatedWithNamesAndTypes; +SELECT number % 2 ? [1, 2] : range(number) AS res FROM system.numbers LIMIT 10 FORMAT TabSeparatedWithNamesAndTypes; +SELECT number % 2 ? range(number) : range(toUInt64(10 - number)) AS res FROM system.numbers LIMIT 10 FORMAT TabSeparatedWithNamesAndTypes; + +SELECT number % 2 ? [256, 257] : [300, -500000, 500] AS res FROM system.numbers LIMIT 10 FORMAT TabSeparatedWithNamesAndTypes; +SELECT number % 2 ? [1, 2] : [3, 4, -5] AS res FROM system.numbers LIMIT 10 FORMAT TabSeparatedWithNamesAndTypes; +SELECT number % 2 ? [256] : [3, 4, -5] AS res FROM system.numbers LIMIT 10 FORMAT TabSeparatedWithNamesAndTypes; +SELECT number % 2 ? [0xFFFFFFFF] : [-1] AS res FROM system.numbers LIMIT 10 FORMAT TabSeparatedWithNamesAndTypes; + +SELECT number % 2 ? materialize([256, 257]) : [300, -500000, 500] AS res FROM system.numbers LIMIT 10 FORMAT TabSeparatedWithNamesAndTypes; +SELECT number % 2 ? materialize([1, 2]) : [3, 4, -5] AS res FROM system.numbers LIMIT 10 FORMAT TabSeparatedWithNamesAndTypes; +SELECT number % 2 ? materialize([256]) : [3, 4, -5] AS res FROM system.numbers LIMIT 10 FORMAT TabSeparatedWithNamesAndTypes; +SELECT number % 2 ? materialize([0xFFFFFFFF]) : [-1] AS res FROM system.numbers LIMIT 10 FORMAT TabSeparatedWithNamesAndTypes; + +SELECT number % 2 ? [256, 257] : materialize([300, -500000, 500]) AS res FROM system.numbers LIMIT 10 FORMAT TabSeparatedWithNamesAndTypes; +SELECT number % 2 ? [1, 2] : materialize([3, 4, -5]) AS res FROM system.numbers LIMIT 10 FORMAT TabSeparatedWithNamesAndTypes; +SELECT number % 2 ? [256] : materialize([3, 4, -5]) AS res FROM system.numbers LIMIT 10 FORMAT TabSeparatedWithNamesAndTypes; +SELECT number % 2 ? [0xFFFFFFFF] : materialize([-1]) AS res FROM system.numbers LIMIT 10 FORMAT TabSeparatedWithNamesAndTypes; + +SELECT number % 2 ? materialize([256, 257]) : materialize([300, -500000, 500]) AS res FROM system.numbers LIMIT 10 FORMAT TabSeparatedWithNamesAndTypes; +SELECT number % 2 ? materialize([1, 2]) : materialize([3, 4, -5]) AS res FROM system.numbers LIMIT 10 FORMAT TabSeparatedWithNamesAndTypes; +SELECT number % 2 ? materialize([256]) : materialize([3, 4, -5]) AS res FROM system.numbers LIMIT 10 FORMAT TabSeparatedWithNamesAndTypes; +SELECT number % 2 ? materialize([0xFFFFFFFF]) : materialize([-1]) AS res FROM system.numbers LIMIT 10 FORMAT TabSeparatedWithNamesAndTypes; + +SELECT number % 2 ? [1.1, 2] : emptyArrayInt32() AS res FROM system.numbers LIMIT 10 FORMAT TabSeparatedWithNamesAndTypes; diff --git a/parser/testdata/00175_partition_by_ignore/ast.json b/parser/testdata/00175_partition_by_ignore/ast.json new file mode 100644 index 000000000..891a6598b --- /dev/null +++ b/parser/testdata/00175_partition_by_ignore/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '-- check that partition key with ignore works correctly'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.00131956, + "rows_read": 5, + "bytes_read": 226 + } +} diff --git a/parser/testdata/00175_partition_by_ignore/metadata.json b/parser/testdata/00175_partition_by_ignore/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00175_partition_by_ignore/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00175_partition_by_ignore/query.sql b/parser/testdata/00175_partition_by_ignore/query.sql new file mode 100644 index 000000000..19d63c82a --- /dev/null +++ b/parser/testdata/00175_partition_by_ignore/query.sql @@ -0,0 +1,11 @@ +SELECT '-- check that partition key with ignore works correctly'; + +DROP TABLE IF EXISTS partition_by_ignore SYNC; + +CREATE TABLE partition_by_ignore (ts DateTime, ts_2 DateTime) ENGINE=MergeTree PARTITION BY (toYYYYMM(ts), ignore(ts_2)) ORDER BY tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +INSERT INTO partition_by_ignore SELECT toDateTime('2022-08-03 00:00:00') + toIntervalDay(number), toDateTime('2022-08-04 00:00:00') + toIntervalDay(number) FROM numbers(60); + +EXPLAIN ESTIMATE SELECT count() FROM partition_by_ignore WHERE ts BETWEEN toDateTime('2022-08-07 00:00:00') AND toDateTime('2022-08-10 00:00:00') FORMAT CSV; +EXPLAIN ESTIMATE SELECT count() FROM partition_by_ignore WHERE ts_2 BETWEEN toDateTime('2022-08-07 00:00:00') AND toDateTime('2022-08-10 00:00:00') FORMAT CSV; + +DROP TABLE IF EXISTS partition_by_ignore SYNC; diff --git a/parser/testdata/00176_distinct_limit_by_limit_bug_43377/ast.json b/parser/testdata/00176_distinct_limit_by_limit_bug_43377/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00176_distinct_limit_by_limit_bug_43377/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00176_distinct_limit_by_limit_bug_43377/metadata.json b/parser/testdata/00176_distinct_limit_by_limit_bug_43377/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00176_distinct_limit_by_limit_bug_43377/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00176_distinct_limit_by_limit_bug_43377/query.sql b/parser/testdata/00176_distinct_limit_by_limit_bug_43377/query.sql new file mode 100644 index 000000000..d78398ffb --- /dev/null +++ b/parser/testdata/00176_distinct_limit_by_limit_bug_43377/query.sql @@ -0,0 +1,12 @@ +-- Tags: stateful +SELECT count() +FROM +( + SELECT DISTINCT + Title, + SearchPhrase + FROM test.hits + WHERE (SearchPhrase != '') AND (NOT match(Title, '[а-яА-ЯёЁ]')) AND (NOT match(SearchPhrase, '[а-яА-ЯёЁ]')) + LIMIT 1 BY Title + LIMIT 10 +); diff --git a/parser/testdata/00176_if_string_arrays/ast.json b/parser/testdata/00176_if_string_arrays/ast.json new file mode 100644 index 000000000..e1d090f7b --- /dev/null +++ b/parser/testdata/00176_if_string_arrays/ast.json @@ -0,0 +1,73 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function if (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal Array_['Hello', 'World']" + }, + { + "explain": " Literal Array_['abc']" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 17, + + "statistics": + { + "elapsed": 0.001575281, + "rows_read": 17, + "bytes_read": 662 + } +} diff --git a/parser/testdata/00176_if_string_arrays/metadata.json b/parser/testdata/00176_if_string_arrays/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00176_if_string_arrays/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00176_if_string_arrays/query.sql b/parser/testdata/00176_if_string_arrays/query.sql new file mode 100644 index 000000000..4f752b47c --- /dev/null +++ b/parser/testdata/00176_if_string_arrays/query.sql @@ -0,0 +1,12 @@ +SELECT number % 2 ? ['Hello', 'World'] : ['abc'] FROM system.numbers LIMIT 10; +SELECT number % 2 ? materialize(['Hello', 'World']) : ['abc'] FROM system.numbers LIMIT 10; +SELECT number % 2 ? ['Hello', 'World'] : materialize(['abc']) FROM system.numbers LIMIT 10; +SELECT number % 2 ? materialize(['Hello', 'World']) : materialize(['abc']) FROM system.numbers LIMIT 10; + +SELECT number % 2 ? ['Hello', '', 'World!'] : emptyArrayString() FROM system.numbers LIMIT 10; +SELECT number % 2 ? materialize(['Hello', '', 'World!']) : emptyArrayString() FROM system.numbers LIMIT 10; + +SELECT number % 2 ? [''] : ['', ''] FROM system.numbers LIMIT 10; +SELECT number % 2 ? materialize(['']) : ['', ''] FROM system.numbers LIMIT 10; +SELECT number % 2 ? [''] : materialize(['', '']) FROM system.numbers LIMIT 10; +SELECT number % 2 ? materialize(['']) : materialize(['', '']) FROM system.numbers LIMIT 10; diff --git a/parser/testdata/00178_function_replicate/ast.json b/parser/testdata/00178_function_replicate/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00178_function_replicate/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00178_function_replicate/metadata.json b/parser/testdata/00178_function_replicate/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00178_function_replicate/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00178_function_replicate/query.sql b/parser/testdata/00178_function_replicate/query.sql new file mode 100644 index 000000000..832334774 --- /dev/null +++ b/parser/testdata/00178_function_replicate/query.sql @@ -0,0 +1,11 @@ +-- Tags: replica + +SELECT + number, + range(number) AS arr, + replicate(number, arr), + replicate(toString(number), arr), + replicate(range(number), arr), + replicate(arrayMap(x -> toString(x), range(number)), arr) +FROM system.numbers +LIMIT 10; diff --git a/parser/testdata/00178_quantile_ddsketch/ast.json b/parser/testdata/00178_quantile_ddsketch/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00178_quantile_ddsketch/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00178_quantile_ddsketch/metadata.json b/parser/testdata/00178_quantile_ddsketch/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00178_quantile_ddsketch/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00178_quantile_ddsketch/query.sql b/parser/testdata/00178_quantile_ddsketch/query.sql new file mode 100644 index 000000000..23b9ef332 --- /dev/null +++ b/parser/testdata/00178_quantile_ddsketch/query.sql @@ -0,0 +1,6 @@ +-- Tags: stateful +SELECT CounterID AS k, round(quantileDD(0.01, 0.5)(ResolutionWidth), 2) FROM test.hits GROUP BY k ORDER BY count() DESC, CounterID LIMIT 10; +SELECT CounterID AS k, arrayMap(a -> round(a, 2), quantilesDD(0.01, 0.1, 0.5, 0.9, 0.99, 0.999)(ResolutionWidth)) FROM test.hits GROUP BY k ORDER BY count() DESC, CounterID LIMIT 10; + +SELECT CounterID AS k, round(quantileDD(0.01, 0.5)(ResolutionWidth), 2) FROM remote('127.0.0.{1,2}', test.hits) GROUP BY k ORDER BY count() DESC, CounterID LIMIT 10; +SELECT CounterID AS k, arrayMap(a -> round(a, 2), quantilesDD(0.01, 0.1, 0.5, 0.9, 0.99, 0.999)(ResolutionWidth)) FROM remote('127.0.0.{1,2}', test.hits) GROUP BY k ORDER BY count() DESC, CounterID LIMIT 10; diff --git a/parser/testdata/00178_query_datetime64_index/ast.json b/parser/testdata/00178_query_datetime64_index/ast.json new file mode 100644 index 000000000..6ca71a104 --- /dev/null +++ b/parser/testdata/00178_query_datetime64_index/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery datetime64_index_tbl (children 1)" + }, + { + "explain": " Identifier datetime64_index_tbl" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001230579, + "rows_read": 2, + "bytes_read": 92 + } +} diff --git a/parser/testdata/00178_query_datetime64_index/metadata.json b/parser/testdata/00178_query_datetime64_index/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00178_query_datetime64_index/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00178_query_datetime64_index/query.sql b/parser/testdata/00178_query_datetime64_index/query.sql new file mode 100644 index 000000000..a3fb594db --- /dev/null +++ b/parser/testdata/00178_query_datetime64_index/query.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS datetime64_index_tbl; + +CREATE TABLE datetime64_index_tbl(ts DateTime64(3, 'UTC')) ENGINE=MergeTree ORDER BY ts; +INSERT INTO datetime64_index_tbl(ts) VALUES(toDateTime64('2023-05-27 00:00:00', 3, 'UTC')); + +SELECT ts FROM datetime64_index_tbl WHERE ts < toDate('2023-05-28'); +SELECT ts FROM datetime64_index_tbl WHERE ts < toDate32('2023-05-28'); + +DROP TABLE datetime64_index_tbl; diff --git a/parser/testdata/00179_lambdas_with_common_expressions_and_filter/ast.json b/parser/testdata/00179_lambdas_with_common_expressions_and_filter/ast.json new file mode 100644 index 000000000..963e12f40 --- /dev/null +++ b/parser/testdata/00179_lambdas_with_common_expressions_and_filter/ast.json @@ -0,0 +1,82 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayMap (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function lambda (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function if (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function notEquals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal Int64_-1" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Identifier arr" + } + ], + + "rows": 20, + + "statistics": + { + "elapsed": 0.001980497, + "rows_read": 20, + "bytes_read": 776 + } +} diff --git a/parser/testdata/00179_lambdas_with_common_expressions_and_filter/metadata.json b/parser/testdata/00179_lambdas_with_common_expressions_and_filter/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00179_lambdas_with_common_expressions_and_filter/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00179_lambdas_with_common_expressions_and_filter/query.sql b/parser/testdata/00179_lambdas_with_common_expressions_and_filter/query.sql new file mode 100644 index 000000000..b5eefa57a --- /dev/null +++ b/parser/testdata/00179_lambdas_with_common_expressions_and_filter/query.sql @@ -0,0 +1,3 @@ +SELECT arrayMap(x -> number != -1 ? x : 0, arr) +FROM (SELECT number, range(number) AS arr FROM system.numbers LIMIT 10) +WHERE number % 2 = 1 AND arrayExists(x -> number != -1, arr); diff --git a/parser/testdata/00180_attach_materialized_view/ast.json b/parser/testdata/00180_attach_materialized_view/ast.json new file mode 100644 index 000000000..24a5cb9b4 --- /dev/null +++ b/parser/testdata/00180_attach_materialized_view/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_00180 (children 1)" + }, + { + "explain": " Identifier t_00180" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001243763, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/00180_attach_materialized_view/metadata.json b/parser/testdata/00180_attach_materialized_view/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00180_attach_materialized_view/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00180_attach_materialized_view/query.sql b/parser/testdata/00180_attach_materialized_view/query.sql new file mode 100644 index 000000000..d674c0bd2 --- /dev/null +++ b/parser/testdata/00180_attach_materialized_view/query.sql @@ -0,0 +1,12 @@ +DROP TABLE IF EXISTS t_00180; +DROP TABLE IF EXISTS mv_00180; +DROP TABLE IF EXISTS `.inner.mv_00180`; + +CREATE TABLE t_00180 (x UInt8) ENGINE = Null; +CREATE MATERIALIZED VIEW mv_00180 ENGINE = Null AS SELECT * FROM t_00180; + +DETACH TABLE mv_00180; +ATTACH TABLE mv_00180; + +DROP TABLE t_00180; +DROP TABLE mv_00180; diff --git a/parser/testdata/00181_aggregate_functions_statistics/ast.json b/parser/testdata/00181_aggregate_functions_statistics/ast.json new file mode 100644 index 000000000..9aecbd999 --- /dev/null +++ b/parser/testdata/00181_aggregate_functions_statistics/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001283953, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00181_aggregate_functions_statistics/metadata.json b/parser/testdata/00181_aggregate_functions_statistics/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00181_aggregate_functions_statistics/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00181_aggregate_functions_statistics/query.sql b/parser/testdata/00181_aggregate_functions_statistics/query.sql new file mode 100644 index 000000000..348e9ece4 --- /dev/null +++ b/parser/testdata/00181_aggregate_functions_statistics/query.sql @@ -0,0 +1,228 @@ +SET any_join_distinct_right_table_keys = 1; +SET joined_subquery_requires_alias = 0; + +DROP TABLE IF EXISTS series; + +CREATE TABLE series(i UInt32, x_value Float64, y_value Float64) ENGINE = Memory; + +INSERT INTO series(i, x_value, y_value) VALUES (1, 5.6,-4.4),(2, -9.6,3),(3, -1.3,-4),(4, 5.3,9.7),(5, 4.4,0.037),(6, -8.6,-7.8),(7, 5.1,9.3),(8, 7.9,-3.6),(9, -8.2,0.62),(10, -3,7.3); + +/* varSamp */ + +SELECT varSamp(x_value) FROM (SELECT x_value FROM series LIMIT 0); +SELECT varSamp(x_value) FROM (SELECT x_value FROM series LIMIT 1); + +SELECT round(abs(res1 - res2), 6) FROM +( +SELECT + varSamp(x_value) AS res1, + (sum(x_value * x_value) - ((sum(x_value) * sum(x_value)) / count())) / (count() - 1) AS res2 +FROM series +); + +/* stddevSamp */ + +SELECT stddevSamp(x_value) FROM (SELECT x_value FROM series LIMIT 0); +SELECT stddevSamp(x_value) FROM (SELECT x_value FROM series LIMIT 1); + +SELECT round(abs(res1 - res2), 6) FROM +( +SELECT + stddevSamp(x_value) AS res1, + sqrt((sum(x_value * x_value) - ((sum(x_value) * sum(x_value)) / count())) / (count() - 1)) AS res2 +FROM series +); + +/* skewSamp */ + +SELECT skewSamp(x_value) FROM (SELECT x_value FROM series LIMIT 0); +SELECT skewSamp(x_value) FROM (SELECT x_value FROM series LIMIT 1); + +SELECT round(abs(res1 - res2), 6) FROM +( +SELECT + skewSamp(x_value) AS res1, + ( + sum(x_value * x_value * x_value) / count() + - 3 * sum(x_value * x_value) / count() * sum(x_value) / count() + + 2 * sum(x_value) / count() * sum(x_value) / count() * sum(x_value) / count() + ) / pow((sum(x_value * x_value) - ((sum(x_value) * sum(x_value)) / count())) / (count() - 1), 1.5) AS res2 +FROM series +); + +/* kurtSamp */ + +SELECT kurtSamp(x_value) FROM (SELECT x_value FROM series LIMIT 0); +SELECT kurtSamp(x_value) FROM (SELECT x_value FROM series LIMIT 1); + +SELECT round(abs(res1 - res2), 6) FROM +( +SELECT + kurtSamp(x_value) AS res1, + ( + sum(x_value * x_value * x_value * x_value) / count() + - 4 * sum(x_value * x_value * x_value) / count() * sum(x_value) / count() + + 6 * sum(x_value * x_value) / count() * sum(x_value) / count() * sum(x_value) / count() + - 3 * sum(x_value) / count() * sum(x_value) / count() * sum(x_value) / count() * sum(x_value) / count() + ) / pow((sum(x_value * x_value) - ((sum(x_value) * sum(x_value)) / count())) / (count() - 1), 2) AS res2 +FROM series +); + +/* varPop */ + +SELECT varPop(x_value) FROM (SELECT x_value FROM series LIMIT 0); +SELECT varPop(x_value) FROM (SELECT x_value FROM series LIMIT 1); + +SELECT round(abs(res1 - res2), 6) FROM +( +SELECT + varPop(x_value) AS res1, + (sum(x_value * x_value) - ((sum(x_value) * sum(x_value)) / count())) / count() AS res2 +FROM series +); + +/* stddevPop */ + +SELECT stddevPop(x_value) FROM (SELECT x_value FROM series LIMIT 0); +SELECT stddevPop(x_value) FROM (SELECT x_value FROM series LIMIT 1); + +SELECT round(abs(res1 - res2), 6) FROM +( +SELECT + stddevPop(x_value) AS res1, + sqrt((sum(x_value * x_value) - ((sum(x_value) * sum(x_value)) / count())) / count()) AS res2 +FROM series +); + +/* skewPop */ + +SELECT skewPop(x_value) FROM (SELECT x_value FROM series LIMIT 0); +SELECT skewPop(x_value) FROM (SELECT x_value FROM series LIMIT 1); + +SELECT round(abs(res1 - res2), 6) FROM +( +SELECT + skewPop(x_value) AS res1, + ( + sum(x_value * x_value * x_value) / count() + - 3 * sum(x_value * x_value) / count() * sum(x_value) / count() + + 2 * sum(x_value) / count() * sum(x_value) / count() * sum(x_value) / count() + ) / pow((sum(x_value * x_value) - ((sum(x_value) * sum(x_value)) / count())) / count(), 1.5) AS res2 +FROM series +); + +/* kurtPop */ + +SELECT kurtPop(x_value) FROM (SELECT x_value FROM series LIMIT 0); +SELECT kurtPop(x_value) FROM (SELECT x_value FROM series LIMIT 1); + +SELECT round(abs(res1 - res2), 6) FROM +( +SELECT + kurtPop(x_value) AS res1, + ( + sum(x_value * x_value * x_value * x_value) / count() + - 4 * sum(x_value * x_value * x_value) / count() * sum(x_value) / count() + + 6 * sum(x_value * x_value) / count() * sum(x_value) / count() * sum(x_value) / count() + - 3 * sum(x_value) / count() * sum(x_value) / count() * sum(x_value) / count() * sum(x_value) / count() + ) / pow((sum(x_value * x_value) - ((sum(x_value) * sum(x_value)) / count())) / count(), 2) AS res2 +FROM series +); + +/* covarSamp */ + +SELECT covarSamp(x_value, y_value) FROM (SELECT x_value, y_value FROM series LIMIT 0); +SELECT covarSamp(x_value, y_value) FROM (SELECT x_value, y_value FROM series LIMIT 1); + +SELECT round(abs(COVAR1 - COVAR2), 6) +FROM +( + SELECT + arrayJoin([1]) AS ID2, + covarSamp(x_value, y_value) AS COVAR1 + FROM series +) ANY INNER JOIN +( + SELECT + arrayJoin([1]) AS ID2, + sum(VAL) / (count() - 1) AS COVAR2 + FROM + ( + SELECT (X - AVG_X) * (Y - AVG_Y) AS VAL + FROM + ( + SELECT + toUInt32(arrayJoin([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])) AS ID, + avg(x_value) AS AVG_X, + avg(y_value) AS AVG_Y + FROM series + ) ANY INNER JOIN + ( + SELECT + i AS ID, + x_value AS X, + y_value AS Y + FROM series + ) USING ID + ) +) USING ID2; + +/* covarPop */ + +SELECT covarPop(x_value, y_value) FROM (SELECT x_value, y_value FROM series LIMIT 0); +SELECT covarPop(x_value, y_value) FROM (SELECT x_value, y_value FROM series LIMIT 1); + +SELECT round(abs(COVAR1 - COVAR2), 6) +FROM +( + SELECT + arrayJoin([1]) AS ID2, + covarPop(x_value, y_value) AS COVAR1 + FROM series +) ANY INNER JOIN +( + SELECT + arrayJoin([1]) AS ID2, + sum(VAL) / count() AS COVAR2 + FROM + ( + SELECT (X - AVG_X) * (Y - AVG_Y) AS VAL + FROM + ( + SELECT + toUInt32(arrayJoin([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])) AS ID, + avg(x_value) AS AVG_X, + avg(y_value) AS AVG_Y + FROM series + ) ANY INNER JOIN + ( + SELECT + i AS ID, + x_value AS X, + y_value AS Y + FROM series + ) USING ID + ) +) USING ID2; + +/* corr */ + +SELECT corr(x_value, y_value) FROM (SELECT x_value, y_value FROM series LIMIT 0); +SELECT corr(x_value, y_value) FROM (SELECT x_value, y_value FROM series LIMIT 1); + +SELECT round(abs(corr(x_value, y_value) - covarPop(x_value, y_value) / (stddevPop(x_value) * stddevPop(y_value))), 6) FROM series; + +/* quantile AND quantileExact */ +SELECT '----quantile----'; + +SELECT quantileExactIf(number, number > 0) FROM numbers(90); + +SELECT quantileExactIf(number, number > 100) FROM numbers(90); +SELECT quantileExactIf(toFloat32(number) , number > 100) FROM numbers(90); +SELECT quantileExactIf(toFloat64(number) , number > 100) FROM numbers(90); + +SELECT quantileIf(number, number > 100) FROM numbers(90); +SELECT quantileIf(toFloat32(number) , number > 100) FROM numbers(90); +SELECT quantileIf(toFloat64(number) , number > 100) FROM numbers(90); + +DROP TABLE series; diff --git a/parser/testdata/00181_aggregate_functions_statistics_stable/ast.json b/parser/testdata/00181_aggregate_functions_statistics_stable/ast.json new file mode 100644 index 000000000..49eefe386 --- /dev/null +++ b/parser/testdata/00181_aggregate_functions_statistics_stable/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00119492, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00181_aggregate_functions_statistics_stable/metadata.json b/parser/testdata/00181_aggregate_functions_statistics_stable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00181_aggregate_functions_statistics_stable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00181_aggregate_functions_statistics_stable/query.sql b/parser/testdata/00181_aggregate_functions_statistics_stable/query.sql new file mode 100644 index 000000000..e94e6f9a7 --- /dev/null +++ b/parser/testdata/00181_aggregate_functions_statistics_stable/query.sql @@ -0,0 +1,145 @@ +SET any_join_distinct_right_table_keys = 1; +SET joined_subquery_requires_alias = 0; + +DROP TABLE IF EXISTS series; + +CREATE TABLE series(i UInt32, x_value Float64, y_value Float64) ENGINE = Memory; + +INSERT INTO series(i, x_value, y_value) VALUES (1, 5.6,-4.4),(2, -9.6,3),(3, -1.3,-4),(4, 5.3,9.7),(5, 4.4,0.037),(6, -8.6,-7.8),(7, 5.1,9.3),(8, 7.9,-3.6),(9, -8.2,0.62),(10, -3,7.3); + +/* varSampStable */ + +SELECT varSampStable(x_value) FROM (SELECT x_value FROM series LIMIT 0); +SELECT varSampStable(x_value) FROM (SELECT x_value FROM series LIMIT 1); + +SELECT round(abs(res1 - res2), 6) FROM +( +SELECT + varSampStable(x_value) AS res1, + (sum(x_value * x_value) - ((sum(x_value) * sum(x_value)) / count())) / (count() - 1) AS res2 +FROM series +); + +/* stddevSampStable */ + +SELECT stddevSampStable(x_value) FROM (SELECT x_value FROM series LIMIT 0); +SELECT stddevSampStable(x_value) FROM (SELECT x_value FROM series LIMIT 1); + +SELECT round(abs(res1 - res2), 6) FROM +( +SELECT + stddevSampStable(x_value) AS res1, + sqrt((sum(x_value * x_value) - ((sum(x_value) * sum(x_value)) / count())) / (count() - 1)) AS res2 +FROM series +); + +/* varPopStable */ + +SELECT varPopStable(x_value) FROM (SELECT x_value FROM series LIMIT 0); +SELECT varPopStable(x_value) FROM (SELECT x_value FROM series LIMIT 1); + +SELECT round(abs(res1 - res2), 6) FROM +( +SELECT + varPopStable(x_value) AS res1, + (sum(x_value * x_value) - ((sum(x_value) * sum(x_value)) / count())) / count() AS res2 +FROM series +); + +/* stddevPopStable */ + +SELECT stddevPopStable(x_value) FROM (SELECT x_value FROM series LIMIT 0); +SELECT stddevPopStable(x_value) FROM (SELECT x_value FROM series LIMIT 1); + +SELECT round(abs(res1 - res2), 6) FROM +( +SELECT + stddevPopStable(x_value) AS res1, + sqrt((sum(x_value * x_value) - ((sum(x_value) * sum(x_value)) / count())) / count()) AS res2 +FROM series +); + +/* covarSampStable */ + +SELECT covarSampStable(x_value, y_value) FROM (SELECT x_value, y_value FROM series LIMIT 0); +SELECT covarSampStable(x_value, y_value) FROM (SELECT x_value, y_value FROM series LIMIT 1); + +SELECT round(abs(COVAR1 - COVAR2), 6) +FROM +( + SELECT + arrayJoin([1]) AS ID2, + covarSampStable(x_value, y_value) AS COVAR1 + FROM series +) ANY INNER JOIN +( + SELECT + arrayJoin([1]) AS ID2, + sum(VAL) / (count() - 1) AS COVAR2 + FROM + ( + SELECT (X - AVG_X) * (Y - AVG_Y) AS VAL + FROM + ( + SELECT + toUInt32(arrayJoin([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])) AS ID, + avg(x_value) AS AVG_X, + avg(y_value) AS AVG_Y + FROM series + ) ANY INNER JOIN + ( + SELECT + i AS ID, + x_value AS X, + y_value AS Y + FROM series + ) USING ID + ) +) USING ID2; + +/* covarPopStable */ + +SELECT covarPopStable(x_value, y_value) FROM (SELECT x_value, y_value FROM series LIMIT 0); +SELECT covarPopStable(x_value, y_value) FROM (SELECT x_value, y_value FROM series LIMIT 1); + +SELECT round(abs(COVAR1 - COVAR2), 6) +FROM +( + SELECT + arrayJoin([1]) AS ID2, + covarPopStable(x_value, y_value) AS COVAR1 + FROM series +) ANY INNER JOIN +( + SELECT + arrayJoin([1]) AS ID2, + sum(VAL) / count() AS COVAR2 + FROM + ( + SELECT (X - AVG_X) * (Y - AVG_Y) AS VAL + FROM + ( + SELECT + toUInt32(arrayJoin([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])) AS ID, + avg(x_value) AS AVG_X, + avg(y_value) AS AVG_Y + FROM series + ) ANY INNER JOIN + ( + SELECT + i AS ID, + x_value AS X, + y_value AS Y + FROM series + ) USING ID + ) +) USING ID2; + +/* corr */ + +SELECT corrStable(x_value, y_value) FROM (SELECT x_value, y_value FROM series LIMIT 0); +SELECT corrStable(x_value, y_value) FROM (SELECT x_value, y_value FROM series LIMIT 1); + +SELECT round(abs(corrStable(x_value, y_value) - covarPopStable(x_value, y_value) / (stddevPopStable(x_value) * stddevPopStable(y_value))), 6) FROM series; + +DROP TABLE series; diff --git a/parser/testdata/00181_cross_join_compression/ast.json b/parser/testdata/00181_cross_join_compression/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00181_cross_join_compression/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00181_cross_join_compression/metadata.json b/parser/testdata/00181_cross_join_compression/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00181_cross_join_compression/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00181_cross_join_compression/query.sql b/parser/testdata/00181_cross_join_compression/query.sql new file mode 100644 index 000000000..23d65b2f6 --- /dev/null +++ b/parser/testdata/00181_cross_join_compression/query.sql @@ -0,0 +1,4 @@ +-- Tags: stateful +CREATE VIEW unit AS (SELECT 1); + +SELECT CounterID, StartURL FROM unit, test.visits ORDER BY (CounterID, StartURL) DESC LIMIT 1000; diff --git a/parser/testdata/00182_functions_higher_order_and_consts/ast.json b/parser/testdata/00182_functions_higher_order_and_consts/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00182_functions_higher_order_and_consts/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00182_functions_higher_order_and_consts/metadata.json b/parser/testdata/00182_functions_higher_order_and_consts/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00182_functions_higher_order_and_consts/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00182_functions_higher_order_and_consts/query.sql b/parser/testdata/00182_functions_higher_order_and_consts/query.sql new file mode 100644 index 000000000..3d065520e --- /dev/null +++ b/parser/testdata/00182_functions_higher_order_and_consts/query.sql @@ -0,0 +1,316 @@ +--{echoOn} +SELECT '---map--'; +SELECT arrayMap(x -> 123, emptyArrayUInt8()); +SELECT arrayMap(x -> 123, [1, 2, 3]); +SELECT arrayMap(x -> 123, range(number)) FROM system.numbers LIMIT 10; +SELECT arrayMap(x -> x, range(number)) FROM system.numbers LIMIT 3; +SELECT '---filter--'; +SELECT arrayFilter(x -> 0, emptyArrayUInt8()); +SELECT arrayFilter(x -> 0, [1, 2, 3]); +SELECT arrayFilter(x -> 0, range(number)) FROM system.numbers LIMIT 10; +SELECT arrayFilter(x -> 1, emptyArrayUInt8()); +SELECT arrayFilter(x -> 1, [1, 2, 3]); +SELECT arrayFilter(x -> 1, range(number)) FROM system.numbers LIMIT 10; +SELECT arrayFilter(x -> x > 1, [1, 2, 3]); +SELECT arrayFilter(x -> x > 2, [1, 2, 3]); +SELECT arrayFilter(x -> NULL, [1, 2, 3]); +SELECT arrayFilter(x -> 1.1, [1, 2, 3]); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT arrayFilter(x -> 'string', [1, 2, 3]); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT '---count---'; +SELECT arrayCount(x -> 0, emptyArrayUInt8()); +SELECT arrayCount(x -> 0, [1, 2, 3]); +SELECT arrayCount(x -> 0, range(number)) FROM system.numbers LIMIT 10; +SELECT arrayCount(x -> 1, emptyArrayUInt8()); +SELECT arrayCount(x -> 1, [1, 2, 3]); +SELECT arrayCount(x -> 1, range(number)) FROM system.numbers LIMIT 10; +SELECT arrayCount(x -> x > 0, [1, 2, 3]); +SELECT arrayCount(x -> x > 1, [1, 2, 3]); +SELECT arrayCount(x -> NULL, [1, 2, 3]); +SELECT arrayCount(x -> 'string', [1, 2, 3]); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT '---sum---'; +SELECT arraySum(x -> 0, emptyArrayUInt8()); +SELECT arraySum(x -> 0, [1, 2, 3]); +SELECT arraySum(x -> 0, range(number)) FROM system.numbers LIMIT 10; +SELECT arraySum(x -> 10, emptyArrayUInt8()); +SELECT arraySum(x -> 10, [1, 2, 3]); +SELECT arraySum(x -> 10, range(number)) FROM system.numbers LIMIT 10; +SELECT '---all---'; +SELECT arrayAll(x -> 0, emptyArrayUInt8()); +SELECT arrayAll(x -> 0, [1, 2, 3]); +SELECT arrayAll(x -> 0, range(number)) FROM system.numbers LIMIT 10; +SELECT arrayAll(x -> 1, emptyArrayUInt8()); +SELECT arrayAll(x -> 1, [1, 2, 3]); +SELECT arrayAll(x -> 1, range(number)) FROM system.numbers LIMIT 10; +SELECT arrayAll(x -> x > 0, [1, 2, 3]); +SELECT arrayAll(x -> x > 1, [1, 2, 3]); +SELECT arrayAll(x -> x, [1, 2, 3]); +SELECT arrayAll(x -> NULL, [1, 2, 3]); +SELECT arrayAll(x -> 'string', [1, 2, 3]); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT '---exists---'; +SELECT arrayExists(x -> 0, emptyArrayUInt8()); +SELECT arrayExists(x -> 0, [1, 2, 3]); +SELECT arrayExists(x -> 0, range(number)) FROM system.numbers LIMIT 10; +SELECT arrayExists(x -> 1, emptyArrayUInt8()); +SELECT arrayExists(x -> 1, [1, 2, 3]); +SELECT arrayExists(x -> 1, range(number)) FROM system.numbers LIMIT 10; +SELECT '---first---'; +SELECT arrayFirst(x -> 0, emptyArrayUInt8()); +SELECT arrayFirst(x -> 0, [1, 2, 3]); +SELECT arrayFirst(x -> 0, range(number)) FROM system.numbers LIMIT 10; +SELECT arrayFirst(x -> 1, emptyArrayUInt8()); +SELECT arrayFirst(x -> 1, [1, 2, 3]); +SELECT arrayFirst(x -> 1, range(number)) FROM system.numbers LIMIT 10; +SELECT arrayFirst(x -> x > 1, [1, 2, 3]); +SELECT arrayFirst(x -> x > 3, [1, 2, 3]); +SELECT arrayFirst(x -> x, [1, 2, 3]); +SELECT arrayFirst(x -> NULL, [1, 2, 3]); +SELECT arrayFirst(x -> 'string', [1, 2, 3]); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT '---last---'; +SELECT arrayLast(x -> 0, emptyArrayUInt8()); +SELECT arrayLast(x -> 0, [1, 2, 3]); +SELECT arrayLast(x -> 0, range(number)) FROM system.numbers LIMIT 10; +SELECT arrayLast(x -> 1, emptyArrayUInt8()); +SELECT arrayLast(x -> 1, [1, 2, 3]); +SELECT arrayLast(x -> 1, range(number)) FROM system.numbers LIMIT 10; +SELECT arrayLast(x -> x > 1, [1, 2, 3]); +SELECT arrayLast(x -> x > 3, [1, 2, 3]); +SELECT arrayLast(x -> x, [1, 2, 3]); +SELECT arrayLast(x -> NULL, [1, 2, 3]); +SELECT arrayLast(x -> 'string', [1, 2, 3]); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT '---first index---'; +SELECT arrayFirstIndex(x -> 0, emptyArrayUInt8()); +SELECT arrayFirstIndex(x -> 0, [1, 2, 3]); +SELECT arrayFirstIndex(x -> 0, range(number)) FROM system.numbers LIMIT 10; +SELECT arrayFirstIndex(x -> 1, emptyArrayUInt8()); +SELECT arrayFirstIndex(x -> 1, [1, 2, 3]); +SELECT arrayFirstIndex(x -> 1, range(number)) FROM system.numbers LIMIT 10; +SELECT arrayFirstIndex(x -> x > 1, [1, 2, 3]); +SELECT arrayFirstIndex(x -> x > 3, [1, 2, 3]); +SELECT arrayFirstIndex(x -> x, [1, 2, 3]); +SELECT arrayFirstIndex(x -> NULL, [1, 2, 3]); +SELECT arrayFirstIndex(x -> 'string', [1, 2, 3]); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT '---lastindex---'; +SELECT arrayLastIndex(x -> 0, emptyArrayUInt8()); +SELECT arrayLastIndex(x -> 0, [1, 2, 3]); +SELECT arrayLastIndex(x -> 0, range(number)) FROM system.numbers LIMIT 10; +SELECT arrayLastIndex(x -> 1, emptyArrayUInt8()); +SELECT arrayLastIndex(x -> 1, [1, 2, 3]); +SELECT arrayLastIndex(x -> 1, range(number)) FROM system.numbers LIMIT 10; +SELECT arrayLastIndex(x -> x > 1, [1, 2, 3]); +SELECT arrayLastIndex(x -> x > 3, [1, 2, 3]); +SELECT arrayLastIndex(x -> x, [1, 2, 3]); +SELECT arrayLastIndex(x -> NULL, [1, 2, 3]); +SELECT arrayLastIndex(x -> 'string', [1, 2, 3]); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT '---cumsum---'; +SELECT arrayCumSum(x -> 0, emptyArrayUInt8()); +SELECT arrayCumSum(x -> 0, [1, 2, 3]); +SELECT arrayCumSum(x -> 0, range(number)) FROM system.numbers LIMIT 10; +SELECT arrayCumSum(x -> 10, emptyArrayUInt8()); +SELECT arrayCumSum(x -> 10, [1, 2, 3]); +SELECT arrayCumSum(x -> 10, range(number)) FROM system.numbers LIMIT 10; + +SELECT '---map--'; +SELECT arrayMap(x -> materialize(123), emptyArrayUInt8()); +SELECT arrayMap(x -> materialize(123), [1, 2, 3]); +SELECT arrayMap(x -> materialize(123), range(number)) FROM system.numbers LIMIT 10; +SELECT '---filter--'; +SELECT arrayFilter(x -> materialize(0), emptyArrayUInt8()); +SELECT arrayFilter(x -> materialize(0), [1, 2, 3]); +SELECT arrayFilter(x -> materialize(0), range(number)) FROM system.numbers LIMIT 10; +SELECT arrayFilter(x -> materialize(1), emptyArrayUInt8()); +SELECT arrayFilter(x -> materialize(1), [1, 2, 3]); +SELECT arrayFilter(x -> materialize(1), range(number)) FROM system.numbers LIMIT 10; +SELECT '---count---'; +SELECT arrayCount(x -> materialize(0), emptyArrayUInt8()); +SELECT arrayCount(x -> materialize(0), [1, 2, 3]); +SELECT arrayCount(x -> materialize(0), range(number)) FROM system.numbers LIMIT 10; +SELECT arrayCount(x -> materialize(1), emptyArrayUInt8()); +SELECT arrayCount(x -> materialize(1), [1, 2, 3]); +SELECT arrayCount(x -> materialize(1), range(number)) FROM system.numbers LIMIT 10; +SELECT '---sum---'; +SELECT arraySum(x -> materialize(0), emptyArrayUInt8()); +SELECT arraySum(x -> materialize(0), [1, 2, 3]); +SELECT arraySum(x -> materialize(0), range(number)) FROM system.numbers LIMIT 10; +SELECT arraySum(x -> materialize(10), emptyArrayUInt8()); +SELECT arraySum(x -> materialize(10), [1, 2, 3]); +SELECT arraySum(x -> materialize(10), range(number)) FROM system.numbers LIMIT 10; +SELECT '---all---'; +SELECT arrayAll(x -> materialize(0), emptyArrayUInt8()); +SELECT arrayAll(x -> materialize(0), [1, 2, 3]); +SELECT arrayAll(x -> materialize(0), range(number)) FROM system.numbers LIMIT 10; +SELECT arrayAll(x -> materialize(1), emptyArrayUInt8()); +SELECT arrayAll(x -> materialize(1), [1, 2, 3]); +SELECT arrayAll(x -> materialize(1), range(number)) FROM system.numbers LIMIT 10; +SELECT '---exists---'; +SELECT arrayExists(x -> materialize(0), emptyArrayUInt8()); +SELECT arrayExists(x -> materialize(0), [1, 2, 3]); +SELECT arrayExists(x -> materialize(0), range(number)) FROM system.numbers LIMIT 10; +SELECT arrayExists(x -> materialize(1), emptyArrayUInt8()); +SELECT arrayExists(x -> materialize(1), [1, 2, 3]); +SELECT arrayExists(x -> materialize(1), range(number)) FROM system.numbers LIMIT 10; +SELECT arrayExists(x -> x, [1, 2, 3]); +SELECT arrayExists(x -> 'string', [1, 2, 3]); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT '---first---'; +SELECT arrayFirst(x -> materialize(0), emptyArrayUInt8()); +SELECT arrayFirst(x -> materialize(0), [1, 2, 3]); +SELECT arrayFirst(x -> materialize(0), range(number)) FROM system.numbers LIMIT 10; +SELECT arrayFirst(x -> materialize(1), emptyArrayUInt8()); +SELECT arrayFirst(x -> materialize(1), [1, 2, 3]); +SELECT arrayFirst(x -> materialize(1), range(number)) FROM system.numbers LIMIT 10; +SELECT '---first index---'; +SELECT arrayFirstIndex(x -> materialize(0), emptyArrayUInt8()); +SELECT arrayFirstIndex(x -> materialize(0), [1, 2, 3]); +SELECT arrayFirstIndex(x -> materialize(0), range(number)) FROM system.numbers LIMIT 10; +SELECT arrayFirstIndex(x -> materialize(1), emptyArrayUInt8()); +SELECT arrayFirstIndex(x -> materialize(1), [1, 2, 3]); +SELECT arrayFirstIndex(x -> materialize(1), range(number)) FROM system.numbers LIMIT 10; +SELECT '--cumsum---'; +SELECT arrayCumSum(x -> materialize(0), emptyArrayUInt8()); +SELECT arrayCumSum(x -> materialize(0), [1, 2, 3]); +SELECT arrayCumSum(x -> materialize(0), range(number)) FROM system.numbers LIMIT 10; +SELECT arrayCumSum(x -> materialize(10), emptyArrayUInt8()); +SELECT arrayCumSum(x -> materialize(10), [1, 2, 3]); +SELECT arrayCumSum(x -> materialize(10), range(number)) FROM system.numbers LIMIT 10; + +SELECT '---map--'; +SELECT arrayMap(x -> 123, emptyArrayString()); +SELECT arrayMap(x -> 123, arrayMap(x -> toString(x), [1, 2, 3])); +SELECT arrayMap(x -> 123, arrayMap(x -> toString(x), range(number))) FROM system.numbers LIMIT 10; +SELECT '---filter--'; +SELECT arrayFilter(x -> 0, emptyArrayString()); +SELECT arrayFilter(x -> 0, arrayMap(x -> toString(x), [1, 2, 3])); +SELECT arrayFilter(x -> 0, arrayMap(x -> toString(x), range(number))) FROM system.numbers LIMIT 10; +SELECT arrayFilter(x -> 1, emptyArrayString()); +SELECT arrayFilter(x -> 1, arrayMap(x -> toString(x), [1, 2, 3])); +SELECT arrayFilter(x -> 1, arrayMap(x -> toString(x), range(number))) FROM system.numbers LIMIT 10; +SELECT '---count---'; +SELECT arrayCount(x -> 0, emptyArrayString()); +SELECT arrayCount(x -> 0, arrayMap(x -> toString(x), [1, 2, 3])); +SELECT arrayCount(x -> 0, arrayMap(x -> toString(x), range(number))) FROM system.numbers LIMIT 10; +SELECT arrayCount(x -> 1, emptyArrayString()); +SELECT arrayCount(x -> 1, arrayMap(x -> toString(x), [1, 2, 3])); +SELECT arrayCount(x -> 1, arrayMap(x -> toString(x), range(number))) FROM system.numbers LIMIT 10; +SELECT '---sum---'; +SELECT arraySum(x -> 0, emptyArrayString()); +SELECT arraySum(x -> 0, arrayMap(x -> toString(x), [1, 2, 3])); +SELECT arraySum(x -> 0, arrayMap(x -> toString(x), range(number))) FROM system.numbers LIMIT 10; +SELECT arraySum(x -> 10, emptyArrayString()); +SELECT arraySum(x -> 10, arrayMap(x -> toString(x), [1, 2, 3])); +SELECT arraySum(x -> 10, arrayMap(x -> toString(x), range(number))) FROM system.numbers LIMIT 10; +SELECT '---all---'; +SELECT arrayAll(x -> 0, emptyArrayString()); +SELECT arrayAll(x -> 0, arrayMap(x -> toString(x), [1, 2, 3])); +SELECT arrayAll(x -> 0, arrayMap(x -> toString(x), range(number))) FROM system.numbers LIMIT 10; +SELECT arrayAll(x -> 1, emptyArrayString()); +SELECT arrayAll(x -> 1, arrayMap(x -> toString(x), [1, 2, 3])); +SELECT arrayAll(x -> 1, arrayMap(x -> toString(x), range(number))) FROM system.numbers LIMIT 10; +SELECT '---exists---'; +SELECT arrayExists(x -> 0, emptyArrayString()); +SELECT arrayExists(x -> 0, arrayMap(x -> toString(x), [1, 2, 3])); +SELECT arrayExists(x -> 0, arrayMap(x -> toString(x), range(number))) FROM system.numbers LIMIT 10; +SELECT arrayExists(x -> 1, emptyArrayString()); +SELECT arrayExists(x -> 1, arrayMap(x -> toString(x), [1, 2, 3])); +SELECT arrayExists(x -> 1, arrayMap(x -> toString(x), range(number))) FROM system.numbers LIMIT 10; +SELECT '---first---'; +SELECT arrayFirst(x -> 0, emptyArrayString()); +SELECT arrayFirst(x -> 0, arrayMap(x -> toString(x), [1, 2, 3])); +SELECT arrayFirst(x -> 0, arrayMap(x -> toString(x), range(number))) FROM system.numbers LIMIT 10; +SELECT arrayFirst(x -> 1, emptyArrayString()); +SELECT arrayFirst(x -> 1, arrayMap(x -> toString(x), [1, 2, 3])); +SELECT arrayFirst(x -> 1, arrayMap(x -> toString(x), range(number))) FROM system.numbers LIMIT 10; +SELECT '---first index---'; +SELECT arrayFirstIndex(x -> 0, emptyArrayString()); +SELECT arrayFirstIndex(x -> 0, arrayMap(x -> toString(x), [1, 2, 3])); +SELECT arrayFirstIndex(x -> 0, arrayMap(x -> toString(x), range(number))) FROM system.numbers LIMIT 10; +SELECT arrayFirstIndex(x -> 1, emptyArrayString()); +SELECT arrayFirstIndex(x -> 1, arrayMap(x -> toString(x), [1, 2, 3])); +SELECT arrayFirstIndex(x -> 1, arrayMap(x -> toString(x), range(number))) FROM system.numbers LIMIT 10; +SELECT '---cumsum---'; +SELECT arrayCumSum(x -> 0, emptyArrayString()); +SELECT arrayCumSum(x -> 0, arrayMap(x -> toString(x), [1, 2, 3])); +SELECT arrayCumSum(x -> 0, arrayMap(x -> toString(x), range(number))) FROM system.numbers LIMIT 10; +SELECT arrayCumSum(x -> 10, emptyArrayString()); +SELECT arrayCumSum(x -> 10, arrayMap(x -> toString(x), [1, 2, 3])); +SELECT arrayCumSum(x -> 10, arrayMap(x -> toString(x), range(number))) FROM system.numbers LIMIT 10; + +SELECT '---map--'; +SELECT arrayMap(x -> materialize(123), emptyArrayString()); +SELECT arrayMap(x -> materialize(123), arrayMap(x -> toString(x), [1, 2, 3])); +SELECT arrayMap(x -> materialize(123), arrayMap(x -> toString(x), range(number))) FROM system.numbers LIMIT 10; +SELECT '---filter--'; +SELECT arrayFilter(x -> materialize(0), emptyArrayString()); +SELECT arrayFilter(x -> materialize(0), arrayMap(x -> toString(x), [1, 2, 3])); +SELECT arrayFilter(x -> materialize(0), arrayMap(x -> toString(x), range(number))) FROM system.numbers LIMIT 10; +SELECT arrayFilter(x -> materialize(1), emptyArrayString()); +SELECT arrayFilter(x -> materialize(1), arrayMap(x -> toString(x), [1, 2, 3])); +SELECT arrayFilter(x -> materialize(1), arrayMap(x -> toString(x), range(number))) FROM system.numbers LIMIT 10; +SELECT '---count---'; +SELECT arrayCount(x -> materialize(0), emptyArrayString()); +SELECT arrayCount(x -> materialize(0), arrayMap(x -> toString(x), [1, 2, 3])); +SELECT arrayCount(x -> materialize(0), arrayMap(x -> toString(x), range(number))) FROM system.numbers LIMIT 10; +SELECT arrayCount(x -> materialize(1), emptyArrayString()); +SELECT arrayCount(x -> materialize(1), arrayMap(x -> toString(x), [1, 2, 3])); +SELECT arrayCount(x -> materialize(1), arrayMap(x -> toString(x), range(number))) FROM system.numbers LIMIT 10; +SELECT '---sum---'; +SELECT arraySum(x -> materialize(0), emptyArrayString()); +SELECT arraySum(x -> materialize(0), arrayMap(x -> toString(x), [1, 2, 3])); +SELECT arraySum(x -> materialize(0), arrayMap(x -> toString(x), range(number))) FROM system.numbers LIMIT 10; +SELECT arraySum(x -> materialize(10), emptyArrayString()); +SELECT arraySum(x -> materialize(10), arrayMap(x -> toString(x), [1, 2, 3])); +SELECT arraySum(x -> materialize(10), arrayMap(x -> toString(x), range(number))) FROM system.numbers LIMIT 10; +SELECT '---all---'; +SELECT arrayAll(x -> materialize(0), emptyArrayString()); +SELECT arrayAll(x -> materialize(0), arrayMap(x -> toString(x), [1, 2, 3])); +SELECT arrayAll(x -> materialize(0), arrayMap(x -> toString(x), range(number))) FROM system.numbers LIMIT 10; +SELECT arrayAll(x -> materialize(1), emptyArrayString()); +SELECT arrayAll(x -> materialize(1), arrayMap(x -> toString(x), [1, 2, 3])); +SELECT arrayAll(x -> materialize(1), arrayMap(x -> toString(x), range(number))) FROM system.numbers LIMIT 10; +SELECT '---exists---'; +SELECT arrayExists(x -> materialize(0), emptyArrayString()); +SELECT arrayExists(x -> materialize(0), arrayMap(x -> toString(x), [1, 2, 3])); +SELECT arrayExists(x -> materialize(0), arrayMap(x -> toString(x), range(number))) FROM system.numbers LIMIT 10; +SELECT arrayExists(x -> materialize(1), emptyArrayString()); +SELECT arrayExists(x -> materialize(1), arrayMap(x -> toString(x), [1, 2, 3])); +SELECT arrayExists(x -> materialize(1), arrayMap(x -> toString(x), range(number))) FROM system.numbers LIMIT 10; +SELECT '---first---'; +SELECT arrayFirst(x -> materialize(0), emptyArrayString()); +SELECT arrayFirst(x -> materialize(0), arrayMap(x -> toString(x), [1, 2, 3])); +SELECT arrayFirst(x -> materialize(0), arrayMap(x -> toString(x), range(number))) FROM system.numbers LIMIT 10; +SELECT arrayFirst(x -> materialize(1), emptyArrayString()); +SELECT arrayFirst(x -> materialize(1), arrayMap(x -> toString(x), [1, 2, 3])); +SELECT arrayFirst(x -> materialize(1), arrayMap(x -> toString(x), range(number))) FROM system.numbers LIMIT 10; +SELECT '---first index---'; +SELECT arrayFirstIndex(x -> materialize(0), emptyArrayString()); +SELECT arrayFirstIndex(x -> materialize(0), arrayMap(x -> toString(x), [1, 2, 3])); +SELECT arrayFirstIndex(x -> materialize(0), arrayMap(x -> toString(x), range(number))) FROM system.numbers LIMIT 10; +SELECT arrayFirstIndex(x -> materialize(1), emptyArrayString()); +SELECT arrayFirstIndex(x -> materialize(1), arrayMap(x -> toString(x), [1, 2, 3])); +SELECT arrayFirstIndex(x -> materialize(1), arrayMap(x -> toString(x), range(number))) FROM system.numbers LIMIT 10; +SELECT '---cumsum---'; +SELECT arrayCumSum(x -> materialize(0), emptyArrayString()); +SELECT arrayCumSum(x -> materialize(0), arrayMap(x -> toString(x), [1, 2, 3])); +SELECT arrayCumSum(x -> materialize(0), arrayMap(x -> toString(x), range(number))) FROM system.numbers LIMIT 10; +SELECT arrayCumSum(x -> materialize(10), emptyArrayString()); +SELECT arrayCumSum(x -> materialize(10), arrayMap(x -> toString(x), [1, 2, 3])); +SELECT arrayCumSum(x -> materialize(10), arrayMap(x -> toString(x), range(number))) FROM system.numbers LIMIT 10; + +SELECT '--- ---'; +SELECT arrayMap(x -> number % 2, range(number)) FROM system.numbers LIMIT 10; +SELECT arrayFilter(x -> number % 2, range(number)) FROM system.numbers LIMIT 10; +SELECT arrayCount(x -> number % 2, range(number)) FROM system.numbers LIMIT 10; +SELECT arraySum(x -> number % 2, range(number)) FROM system.numbers LIMIT 10; +SELECT arrayAll(x -> number % 2, range(number)) FROM system.numbers LIMIT 10; +SELECT arrayExists(x -> number % 2, range(number)) FROM system.numbers LIMIT 10; +SELECT arrayFirst(x -> number % 2, range(number)) FROM system.numbers LIMIT 10; +SELECT arrayFirstIndex(x -> number % 2, range(number)) FROM system.numbers LIMIT 10; +SELECT arrayCumSum(x -> number % 2, range(number)) FROM system.numbers LIMIT 10; +SELECT '--- ---'; +SELECT arrayMap(x -> number % 2, arrayMap(x -> toString(x), range(number))) FROM system.numbers LIMIT 10; +SELECT arrayFilter(x -> number % 2, arrayMap(x -> toString(x), range(number))) FROM system.numbers LIMIT 10; +SELECT arrayCount(x -> number % 2, arrayMap(x -> toString(x), range(number))) FROM system.numbers LIMIT 10; +SELECT arraySum(x -> number % 2, arrayMap(x -> toString(x), range(number))) FROM system.numbers LIMIT 10; +SELECT arrayAll(x -> number % 2, arrayMap(x -> toString(x), range(number))) FROM system.numbers LIMIT 10; +SELECT arrayExists(x -> number % 2, arrayMap(x -> toString(x), range(number))) FROM system.numbers LIMIT 10; +SELECT arrayFirst(x -> number % 2, arrayMap(x -> toString(x), range(number))) FROM system.numbers LIMIT 10; +SELECT arrayFirstIndex(x -> number % 2, arrayMap(x -> toString(x), range(number))) FROM system.numbers LIMIT 10; +SELECT arrayCumSum(x -> number % 2, arrayMap(x -> toString(x), range(number))) FROM system.numbers LIMIT 10; diff --git a/parser/testdata/00182_simple_squashing_transform_bug/ast.json b/parser/testdata/00182_simple_squashing_transform_bug/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00182_simple_squashing_transform_bug/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00182_simple_squashing_transform_bug/metadata.json b/parser/testdata/00182_simple_squashing_transform_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00182_simple_squashing_transform_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00182_simple_squashing_transform_bug/query.sql b/parser/testdata/00182_simple_squashing_transform_bug/query.sql new file mode 100644 index 000000000..2cc195937 --- /dev/null +++ b/parser/testdata/00182_simple_squashing_transform_bug/query.sql @@ -0,0 +1,7 @@ +-- Tags: stateful, global + +set allow_prefetched_read_pool_for_remote_filesystem=0, merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability=0, max_threads=2, max_block_size=65387; +set max_rows_to_read = '100M'; + +SELECT sum(UserID GLOBAL IN (SELECT UserID FROM remote('127.0.0.{1,2}', test.hits))) FROM remote('127.0.0.{1,2}', test.hits); +SELECT sum(UserID GLOBAL IN (SELECT UserID FROM test.hits)) FROM remote('127.0.0.{1,2}', test.hits); diff --git a/parser/testdata/00183_prewhere_conditions_order/ast.json b/parser/testdata/00183_prewhere_conditions_order/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00183_prewhere_conditions_order/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00183_prewhere_conditions_order/metadata.json b/parser/testdata/00183_prewhere_conditions_order/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00183_prewhere_conditions_order/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00183_prewhere_conditions_order/query.sql b/parser/testdata/00183_prewhere_conditions_order/query.sql new file mode 100644 index 000000000..77bdca2d2 --- /dev/null +++ b/parser/testdata/00183_prewhere_conditions_order/query.sql @@ -0,0 +1,31 @@ +-- Tags: stateful +SET optimize_move_to_prewhere = 1; +SET enable_multiple_prewhere_read_steps = 1; +SET parallel_replicas_local_plan = 1; +SET optimize_empty_string_comparisons = 0; + +SELECT trimBoth(explain) +FROM ( +EXPLAIN actions=1 +SELECT SearchPhrase, MIN(URL), MIN(Title), COUNT(*) AS c, COUNT(DISTINCT UserID) +FROM test.hits +WHERE Title LIKE '%Google%' AND URL NOT LIKE '%.google.%' AND SearchPhrase <> '' +GROUP BY SearchPhrase +ORDER BY c DESC +LIMIT 10 +SETTINGS allow_reorder_prewhere_conditions = 0 +) +WHERE explain like '%Prewhere filter column%'; + +SELECT trimBoth(explain) +FROM ( +EXPLAIN actions=1 +SELECT SearchPhrase, MIN(URL), MIN(Title), COUNT(*) AS c, COUNT(DISTINCT UserID) +FROM test.hits +WHERE Title LIKE '%Google%' AND URL NOT LIKE '%.google.%' AND SearchPhrase <> '' +GROUP BY SearchPhrase +ORDER BY c DESC +LIMIT 10 +SETTINGS allow_reorder_prewhere_conditions = 1 +) +WHERE explain like '%Prewhere filter column%'; diff --git a/parser/testdata/00183_skip_unavailable_shards/ast.json b/parser/testdata/00183_skip_unavailable_shards/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00183_skip_unavailable_shards/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00183_skip_unavailable_shards/metadata.json b/parser/testdata/00183_skip_unavailable_shards/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00183_skip_unavailable_shards/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00183_skip_unavailable_shards/query.sql b/parser/testdata/00183_skip_unavailable_shards/query.sql new file mode 100644 index 000000000..71187d75f --- /dev/null +++ b/parser/testdata/00183_skip_unavailable_shards/query.sql @@ -0,0 +1,5 @@ +-- Tags: shard, no-fasttest + +SET send_logs_level = 'fatal'; +SELECT count() FROM remote('{127,1}.0.0.{2,3}', system.one) SETTINGS skip_unavailable_shards = 1; +SELECT count() FROM remote('{1,127}.0.0.{2,3}', system.one) SETTINGS skip_unavailable_shards = 1; diff --git a/parser/testdata/00184_shard_distributed_group_by_no_merge/ast.json b/parser/testdata/00184_shard_distributed_group_by_no_merge/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00184_shard_distributed_group_by_no_merge/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00184_shard_distributed_group_by_no_merge/metadata.json b/parser/testdata/00184_shard_distributed_group_by_no_merge/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00184_shard_distributed_group_by_no_merge/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00184_shard_distributed_group_by_no_merge/query.sql b/parser/testdata/00184_shard_distributed_group_by_no_merge/query.sql new file mode 100644 index 000000000..1bd6cbe89 --- /dev/null +++ b/parser/testdata/00184_shard_distributed_group_by_no_merge/query.sql @@ -0,0 +1,45 @@ +-- Tags: distributed + +SELECT 'distributed_group_by_no_merge=1'; +SELECT count(), uniq(dummy) FROM remote('127.0.0.{2,3}', system.one) SETTINGS distributed_group_by_no_merge=1; +SELECT count(), uniq(dummy) FROM remote('127.0.0.{2,3,4,5}', system.one) SETTINGS distributed_group_by_no_merge=1; +SELECT count(), uniq(dummy) FROM remote('127.0.0.{2,3}', system.one) LIMIT 1 SETTINGS distributed_group_by_no_merge=1; + +SELECT 'distributed_group_by_no_merge=2'; +SET max_distributed_connections=1; +SET max_threads=1; + +SELECT 'LIMIT'; +SELECT * FROM (SELECT any(_shard_num) shard_num, count(), uniq(dummy) FROM remote('127.0.0.{2,3}', system.one)) ORDER BY shard_num LIMIT 1 SETTINGS distributed_group_by_no_merge=2; +SELECT 'OFFSET'; +SELECT * FROM (SELECT any(_shard_num) shard_num, count(), uniq(dummy) FROM remote('127.0.0.{2,3}', system.one)) ORDER BY shard_num LIMIT 1, 1 SETTINGS distributed_group_by_no_merge=2; + +SELECT 'ALIAS'; +SELECT dummy AS d FROM remote('127.0.0.{2,3}', system.one) ORDER BY d SETTINGS distributed_group_by_no_merge=2; + +DROP TABLE IF EXISTS data_00184; +CREATE TABLE data_00184 Engine=Memory() AS SELECT * FROM numbers(2); +SELECT 'ORDER BY'; +SELECT number FROM remote('127.0.0.{2,3}', currentDatabase(), data_00184) ORDER BY number DESC SETTINGS distributed_group_by_no_merge=2; +SELECT 'ORDER BY LIMIT'; +SELECT number FROM remote('127.0.0.{2,3}', currentDatabase(), data_00184) ORDER BY number DESC LIMIT 1 SETTINGS distributed_group_by_no_merge=2; + +SELECT 'LIMIT BY'; +SELECT number FROM remote('127.0.0.{2,3}', currentDatabase(), data_00184) LIMIT 1 BY number SETTINGS distributed_group_by_no_merge=2; +SELECT 'LIMIT BY LIMIT'; +SELECT number FROM remote('127.0.0.{2,3}', currentDatabase(), data_00184) LIMIT 1 BY number LIMIT 1 SETTINGS distributed_group_by_no_merge=2; + +SELECT 'GROUP BY ORDER BY'; +SELECT uniq(number) u FROM remote('127.0.0.{2,3}', currentDatabase(), data_00184) GROUP BY number ORDER BY u DESC SETTINGS distributed_group_by_no_merge=2; + +-- cover possible tricky issues +SELECT 'GROUP BY w/ ALIAS'; +SELECT n FROM remote('127.0.0.{2,3}', currentDatabase(), data_00184) GROUP BY number AS n ORDER BY n SETTINGS distributed_group_by_no_merge=2; + +SELECT 'ORDER BY w/ ALIAS'; +SELECT n FROM remote('127.0.0.{2,3}', currentDatabase(), data_00184) ORDER BY number AS n LIMIT 1 SETTINGS distributed_group_by_no_merge=2; + +SELECT 'func(aggregate function) GROUP BY'; +SELECT assumeNotNull(argMax(dummy, 1)) FROM remote('127.1', system.one) SETTINGS distributed_group_by_no_merge=2; + +drop table data_00184; diff --git a/parser/testdata/00185_array_literals/ast.json b/parser/testdata/00185_array_literals/ast.json new file mode 100644 index 000000000..f72cdcb02 --- /dev/null +++ b/parser/testdata/00185_array_literals/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_2]" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001388935, + "rows_read": 5, + "bytes_read": 195 + } +} diff --git a/parser/testdata/00185_array_literals/metadata.json b/parser/testdata/00185_array_literals/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00185_array_literals/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00185_array_literals/query.sql b/parser/testdata/00185_array_literals/query.sql new file mode 100644 index 000000000..5d6234c8b --- /dev/null +++ b/parser/testdata/00185_array_literals/query.sql @@ -0,0 +1,30 @@ +SELECT [1, 2]; +SELECT [1.0, 2]; +SELECT [-1, 2]; +SELECT [-1, 1000]; +SELECT [-1, 1000000]; +SELECT [-1000, 2]; +SELECT [-1000000, 2]; +SELECT ['Hello', 'world']; + +SELECT [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255,256]; + +SELECT [1, 1 + 1]; +SELECT [1.0, 1 + 1]; +SELECT [-1, 1 + 1]; +SELECT [-1, toUInt16(1000)]; +SELECT [-1, toUInt32(1000000)]; +SELECT [-1000, 1 + 1]; +SELECT [-1000000, 1 + 1]; +SELECT ['Hello', concat('wor', 'ld')]; + +SELECT [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255,256 + 0]; + +SELECT [0]; +SELECT [0 ]; +SELECT [ 0]; +SELECT [ 0 ]; +SELECT +[ + 0 +]; diff --git a/parser/testdata/00187_like_regexp_prefix/ast.json b/parser/testdata/00187_like_regexp_prefix/ast.json new file mode 100644 index 000000000..ba122b416 --- /dev/null +++ b/parser/testdata/00187_like_regexp_prefix/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function like (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'prepre_f'" + }, + { + "explain": " Literal '%pre_f%'" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001062605, + "rows_read": 10, + "bytes_read": 381 + } +} diff --git a/parser/testdata/00187_like_regexp_prefix/metadata.json b/parser/testdata/00187_like_regexp_prefix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00187_like_regexp_prefix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00187_like_regexp_prefix/query.sql b/parser/testdata/00187_like_regexp_prefix/query.sql new file mode 100644 index 000000000..430b86e8b --- /dev/null +++ b/parser/testdata/00187_like_regexp_prefix/query.sql @@ -0,0 +1,3 @@ +SELECT materialize('prepre_f') LIKE '%pre_f%'; + +SELECT materialize('prepre_f') LIKE '%%%pre_f%'; diff --git a/parser/testdata/00188_constants_as_arguments_of_aggregate_functions/ast.json b/parser/testdata/00188_constants_as_arguments_of_aggregate_functions/ast.json new file mode 100644 index 000000000..a3c68e281 --- /dev/null +++ b/parser/testdata/00188_constants_as_arguments_of_aggregate_functions/ast.json @@ -0,0 +1,100 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function count (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function sum (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function uniq (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_123" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 26, + + "statistics": + { + "elapsed": 0.00143747, + "rows_read": 26, + "bytes_read": 1048 + } +} diff --git a/parser/testdata/00188_constants_as_arguments_of_aggregate_functions/metadata.json b/parser/testdata/00188_constants_as_arguments_of_aggregate_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00188_constants_as_arguments_of_aggregate_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00188_constants_as_arguments_of_aggregate_functions/query.sql b/parser/testdata/00188_constants_as_arguments_of_aggregate_functions/query.sql new file mode 100644 index 000000000..fd36cf6cc --- /dev/null +++ b/parser/testdata/00188_constants_as_arguments_of_aggregate_functions/query.sql @@ -0,0 +1 @@ +SELECT count(), sum(1), uniq(123) FROM (SELECT * FROM system.numbers LIMIT 10); diff --git a/parser/testdata/00189_time_zones_long/ast.json b/parser/testdata/00189_time_zones_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00189_time_zones_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00189_time_zones_long/metadata.json b/parser/testdata/00189_time_zones_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00189_time_zones_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00189_time_zones_long/query.sql b/parser/testdata/00189_time_zones_long/query.sql new file mode 100644 index 000000000..7a61e858a --- /dev/null +++ b/parser/testdata/00189_time_zones_long/query.sql @@ -0,0 +1,442 @@ +-- Tags: long + +/* timestamp 1419800400 == 2014-12-29 00:00:00 (Asia/Istanbul) */ +/* timestamp 1412106600 == 2014-09-30 23:50:00 (Asia/Istanbul) */ +/* timestamp 1420102800 == 2015-01-01 12:00:00 (Asia/Istanbul) */ +/* timestamp 1428310800 == 2015-04-06 12:00:00 (Asia/Istanbul) */ +/* timestamp 1436956200 == 2015-07-15 13:30:00 (Asia/Istanbul) */ +/* timestamp 1426415400 == 2015-03-15 13:30:00 (Asia/Istanbul) */ +/* timestamp 1549483055 == 2019-02-06 22:57:35 (Asia/Istanbul) */ +/* date 16343 == 2014-09-30 */ +/* date 16433 == 2014-12-29 */ +/* date 17933 == 2019-02-06 */ + +/* toStartOfDay */ + +SELECT 'toStartOfDay'; +SELECT toStartOfDay(toDateTime(1412106600), 'Asia/Istanbul'); +SELECT toStartOfDay(toDateTime(1412106600), 'Europe/Paris'); +SELECT toStartOfDay(toDateTime(1412106600), 'Europe/London'); +SELECT toStartOfDay(toDateTime(1412106600), 'Asia/Tokyo'); +SELECT toStartOfDay(toDateTime(1412106600), 'Pacific/Pitcairn'); +SELECT toStartOfDay(toDate(16343), 'Asia/Istanbul'); +SELECT toStartOfDay(toDate(16343), 'Europe/Paris'); +SELECT toStartOfDay(toDate(16343), 'Europe/London'); +SELECT toStartOfDay(toDate(16343), 'Asia/Tokyo'); +SELECT toStartOfDay(toDate(16343), 'Pacific/Pitcairn'); + +/* toMonday */ + +SELECT 'toMonday'; +SELECT toMonday(toDateTime(1419800400), 'Asia/Istanbul'); +SELECT toMonday(toDateTime(1419800400), 'Europe/Paris'); +SELECT toMonday(toDateTime(1419800400), 'Europe/London'); +SELECT toMonday(toDateTime(1419800400), 'Asia/Tokyo'); +SELECT toMonday(toDateTime(1419800400), 'Pacific/Pitcairn'); +SELECT toMonday(toDate(16433), 'Asia/Istanbul'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toMonday(toDate(16433)); + +/* toStartOfWeek (Sunday) */ + +SELECT 'toStartOfWeek (Sunday)'; +SELECT toStartOfWeek(toDateTime(1419800400), 0, 'Asia/Istanbul'); +SELECT toStartOfWeek(toDateTime(1419800400), 0, 'Europe/Paris'); +SELECT toStartOfWeek(toDateTime(1419800400), 0, 'Europe/London'); +SELECT toStartOfWeek(toDateTime(1419800400), 0, 'Asia/Tokyo'); +SELECT toStartOfWeek(toDateTime(1419800400), 0, 'Pacific/Pitcairn'); +SELECT toStartOfWeek(toDate(16433), 0, 'Asia/Istanbul'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toStartOfWeek(toDate(16433), 0); + +/* toStartOfWeek (Monday) */ + +SELECT 'toStartOfWeek (Monday)'; +SELECT toStartOfWeek(toDateTime(1419800400), 1, 'Asia/Istanbul'); +SELECT toStartOfWeek(toDateTime(1419800400), 1, 'Europe/Paris'); +SELECT toStartOfWeek(toDateTime(1419800400), 1, 'Europe/London'); +SELECT toStartOfWeek(toDateTime(1419800400), 1, 'Asia/Tokyo'); +SELECT toStartOfWeek(toDateTime(1419800400), 1, 'Pacific/Pitcairn'); +SELECT toStartOfWeek(toDate(16433), 1, 'Asia/Istanbul'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toStartOfWeek(toDate(16433), 1); + +/* toLastDayOfWeek (Sunday) */ + +SELECT 'toLastDayOfWeek (Sunday)'; +SELECT toLastDayOfWeek(toDateTime(1419800400), 0, 'Asia/Istanbul'); +SELECT toLastDayOfWeek(toDateTime(1419800400), 0, 'Europe/Paris'); +SELECT toLastDayOfWeek(toDateTime(1419800400), 0, 'Europe/London'); +SELECT toLastDayOfWeek(toDateTime(1419800400), 0, 'Asia/Tokyo'); +SELECT toLastDayOfWeek(toDateTime(1419800400), 0, 'Pacific/Pitcairn'); +SELECT toLastDayOfWeek(toDate(16433), 0, 'Asia/Istanbul'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toLastDayOfWeek(toDate(16433), 0); + +/* toLastDayOfWeek (Monday) */ + +SELECT 'toLastDayOfWeek (Monday)'; +SELECT toLastDayOfWeek(toDateTime(1419800400), 1, 'Asia/Istanbul'); +SELECT toLastDayOfWeek(toDateTime(1419800400), 1, 'Europe/Paris'); +SELECT toLastDayOfWeek(toDateTime(1419800400), 1, 'Europe/London'); +SELECT toLastDayOfWeek(toDateTime(1419800400), 1, 'Asia/Tokyo'); +SELECT toLastDayOfWeek(toDateTime(1419800400), 1, 'Pacific/Pitcairn'); +SELECT toLastDayOfWeek(toDate(16433), 1, 'Asia/Istanbul'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toLastDayOfWeek(toDate(16433), 1); + +/* toStartOfMonth */ + +SELECT 'toStartOfMonth'; +SELECT toStartOfMonth(toDateTime(1419800400), 'Asia/Istanbul'); +SELECT toStartOfMonth(toDateTime(1419800400), 'Europe/Paris'); +SELECT toStartOfMonth(toDateTime(1419800400), 'Europe/London'); +SELECT toStartOfMonth(toDateTime(1419800400), 'Asia/Tokyo'); +SELECT toStartOfMonth(toDateTime(1419800400), 'Pacific/Pitcairn'); +SELECT toStartOfMonth(toDate(16433), 'Asia/Istanbul'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toStartOfMonth(toDate(16433)); + +/* toStartOfQuarter */ + +SELECT 'toStartOfQuarter'; +SELECT toStartOfQuarter(toDateTime(1412106600), 'Asia/Istanbul'); +SELECT toStartOfQuarter(toDateTime(1412106600), 'Europe/Paris'); +SELECT toStartOfQuarter(toDateTime(1412106600), 'Europe/London'); +SELECT toStartOfQuarter(toDateTime(1412106600), 'Asia/Tokyo'); +SELECT toStartOfQuarter(toDateTime(1412106600), 'Pacific/Pitcairn'); +SELECT toStartOfQuarter(toDate(16343), 'Asia/Istanbul'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toStartOfQuarter(toDate(16343)); + +/* toStartOfYear */ + +SELECT 'toStartOfYear'; +SELECT toStartOfYear(toDateTime(1419800400), 'Asia/Istanbul'); +SELECT toStartOfYear(toDateTime(1419800400), 'Europe/Paris'); +SELECT toStartOfYear(toDateTime(1419800400), 'Europe/London'); +SELECT toStartOfYear(toDateTime(1419800400), 'Asia/Tokyo'); +SELECT toStartOfYear(toDateTime(1419800400), 'Pacific/Pitcairn'); +SELECT toStartOfYear(toDate(16433), 'Asia/Istanbul'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toStartOfYear(toDate(16433)); + +/* toTimeWithFixedDate */ + +SELECT 'toTimeWithFixedDate'; +SELECT toString(toTimeWithFixedDate(toDateTime(1420102800), 'Asia/Istanbul'), 'Asia/Istanbul'), toString(toTimeWithFixedDate(toDateTime(1428310800), 'Asia/Istanbul'), 'Asia/Istanbul'); +SELECT toString(toTimeWithFixedDate(toDateTime(1420102800), 'Europe/Paris'), 'Europe/Paris'), toString(toTimeWithFixedDate(toDateTime(1428310800), 'Europe/Paris'), 'Europe/Paris'); +SELECT toString(toTimeWithFixedDate(toDateTime(1420102800), 'Europe/London'), 'Europe/London'), toString(toTimeWithFixedDate(toDateTime(1428310800), 'Europe/London'), 'Europe/London'); +SELECT toString(toTimeWithFixedDate(toDateTime(1420102800), 'Asia/Tokyo'), 'Asia/Tokyo'), toString(toTimeWithFixedDate(toDateTime(1428310800), 'Asia/Tokyo'), 'Asia/Tokyo'); +SELECT toString(toTimeWithFixedDate(toDateTime(1420102800), 'Pacific/Pitcairn'), 'Pacific/Pitcairn'), toString(toTimeWithFixedDate(toDateTime(1428310800), 'Pacific/Pitcairn'), 'Pacific/Pitcairn'); + +/* toYear */ + +SELECT 'toYear'; +SELECT toYear(toDateTime(1412106600), 'Asia/Istanbul'); +SELECT toYear(toDateTime(1412106600), 'Europe/Paris'); +SELECT toYear(toDateTime(1412106600), 'Europe/London'); +SELECT toYear(toDateTime(1412106600), 'Asia/Tokyo'); +SELECT toYear(toDateTime(1412106600), 'Pacific/Pitcairn'); + +/* toMonth */ + +SELECT 'toMonth'; +SELECT toMonth(toDateTime(1412106600), 'Asia/Istanbul'); +SELECT toMonth(toDateTime(1412106600), 'Europe/Paris'); +SELECT toMonth(toDateTime(1412106600), 'Europe/London'); +SELECT toMonth(toDateTime(1412106600), 'Asia/Tokyo'); +SELECT toMonth(toDateTime(1412106600), 'Pacific/Pitcairn'); + +/* toDayOfMonth */ + +SELECT 'toDayOfMonth'; +SELECT toDayOfMonth(toDateTime(1412106600), 'Asia/Istanbul'); +SELECT toDayOfMonth(toDateTime(1412106600), 'Europe/Paris'); +SELECT toDayOfMonth(toDateTime(1412106600), 'Europe/London'); +SELECT toDayOfMonth(toDateTime(1412106600), 'Asia/Tokyo'); +SELECT toDayOfMonth(toDateTime(1412106600), 'Pacific/Pitcairn'); + +/* toDayOfWeek */ + +SELECT 'toDayOfWeek'; +SELECT toDayOfWeek(toDateTime(1412106600), 0, 'Asia/Istanbul'); +SELECT toDayOfWeek(toDateTime(1412106600), 0, 'Europe/Paris'); +SELECT toDayOfWeek(toDateTime(1412106600), 0, 'Europe/London'); +SELECT toDayOfWeek(toDateTime(1412106600), 0, 'Asia/Tokyo'); +SELECT toDayOfWeek(toDateTime(1412106600), 0, 'Pacific/Pitcairn'); + +/* toHour */ + +SELECT 'toHour'; +SELECT toHour(toDateTime(1412106600), 'Asia/Istanbul'); +SELECT toHour(toDateTime(1412106600), 'Europe/Paris'); +SELECT toHour(toDateTime(1412106600), 'Europe/London'); +SELECT toHour(toDateTime(1412106600), 'Asia/Tokyo'); +SELECT toHour(toDateTime(1412106600), 'Pacific/Pitcairn'); + +/* toMinute */ + +SELECT 'toMinute'; +SELECT toMinute(toDateTime(1412106600), 'Asia/Istanbul'); +SELECT toMinute(toDateTime(1412106600), 'Europe/Paris'); +SELECT toMinute(toDateTime(1412106600), 'Europe/London'); +SELECT toMinute(toDateTime(1412106600), 'Asia/Tokyo'); +SELECT toMinute(toDateTime(1412106600), 'Pacific/Pitcairn'); + +/* toSecond */ + +SELECT 'toSecond'; +SELECT toSecond(toDateTime(1412106600), 'Asia/Istanbul'); +SELECT toSecond(toDateTime(1412106600), 'Europe/Paris'); +SELECT toSecond(toDateTime(1412106600), 'Europe/London'); +SELECT toSecond(toDateTime(1412106600), 'Asia/Tokyo'); +SELECT toSecond(toDateTime(1412106600), 'Pacific/Pitcairn'); + +/* toStartOfMinute */ + +SELECT 'toStartOfMinute'; +SELECT toString(toStartOfMinute(toDateTime(1549483055), 'Asia/Istanbul'), 'Asia/Istanbul'); +SELECT toString(toStartOfMinute(toDateTime(1549483055), 'Europe/Paris'), 'Europe/Paris'); +SELECT toString(toStartOfMinute(toDateTime(1549483055), 'Europe/London'), 'Europe/London'); +SELECT toString(toStartOfMinute(toDateTime(1549483055), 'Asia/Tokyo'), 'Asia/Tokyo'); +SELECT toString(toStartOfMinute(toDateTime(1549483055), 'Pacific/Pitcairn'), 'Pacific/Pitcairn'); + +/* toStartOfFiveMinutes */ + +SELECT 'toStartOfFiveMinutes'; +SELECT toString(toStartOfFiveMinutes(toDateTime(1549483055), 'Asia/Istanbul'), 'Asia/Istanbul'); +SELECT toString(toStartOfFiveMinutes(toDateTime(1549483055), 'Europe/Paris'), 'Europe/Paris'); +SELECT toString(toStartOfFiveMinutes(toDateTime(1549483055), 'Europe/London'), 'Europe/London'); +SELECT toString(toStartOfFiveMinutes(toDateTime(1549483055), 'Asia/Tokyo'), 'Asia/Tokyo'); +SELECT toString(toStartOfFiveMinutes(toDateTime(1549483055), 'Pacific/Pitcairn'), 'Pacific/Pitcairn'); + +/* toStartOfTenMinutes */ + +SELECT 'toStartOfTenMinutes'; +SELECT toString(toStartOfTenMinutes(toDateTime(1549483055), 'Asia/Istanbul'), 'Asia/Istanbul'); +SELECT toString(toStartOfTenMinutes(toDateTime(1549483055), 'Europe/Paris'), 'Europe/Paris'); +SELECT toString(toStartOfTenMinutes(toDateTime(1549483055), 'Europe/London'), 'Europe/London'); +SELECT toString(toStartOfTenMinutes(toDateTime(1549483055), 'Asia/Tokyo'), 'Asia/Tokyo'); +SELECT toString(toStartOfTenMinutes(toDateTime(1549483055), 'Pacific/Pitcairn'), 'Pacific/Pitcairn'); + +/* toStartOfFifteenMinutes */ + +SELECT 'toStartOfFifteenMinutes'; +SELECT toString(toStartOfFifteenMinutes(toDateTime(1549483055), 'Asia/Istanbul'), 'Asia/Istanbul'); +SELECT toString(toStartOfFifteenMinutes(toDateTime(1549483055), 'Europe/Paris'), 'Europe/Paris'); +SELECT toString(toStartOfFifteenMinutes(toDateTime(1549483055), 'Europe/London'), 'Europe/London'); +SELECT toString(toStartOfFifteenMinutes(toDateTime(1549483055), 'Asia/Tokyo'), 'Asia/Tokyo'); +SELECT toString(toStartOfFifteenMinutes(toDateTime(1549483055), 'Pacific/Pitcairn'), 'Pacific/Pitcairn'); + +/* toStartOfHour */ + +SELECT 'toStartOfHour'; +SELECT toString(toStartOfHour(toDateTime(1549483055), 'Asia/Istanbul'), 'Asia/Istanbul'); +SELECT toString(toStartOfHour(toDateTime(1549483055), 'Europe/Paris'), 'Europe/Paris'); +SELECT toString(toStartOfHour(toDateTime(1549483055), 'Europe/London'), 'Europe/London'); +SELECT toString(toStartOfHour(toDateTime(1549483055), 'Asia/Tokyo'), 'Asia/Tokyo'); +SELECT toString(toStartOfHour(toDateTime(1549483055), 'Pacific/Pitcairn'), 'Pacific/Pitcairn'); + +/* toStartOfInterval */ + +SELECT 'toStartOfInterval'; +SELECT toStartOfInterval(toDateTime(1549483055), INTERVAL 1 year, 'Asia/Istanbul'); +SELECT toStartOfInterval(toDateTime(1549483055), INTERVAL 2 year, 'Asia/Istanbul'); +SELECT toStartOfInterval(toDateTime(1549483055), INTERVAL 5 year, 'Asia/Istanbul'); +SELECT toStartOfInterval(toDateTime(1549483055), INTERVAL 1 quarter, 'Asia/Istanbul'); +SELECT toStartOfInterval(toDateTime(1549483055), INTERVAL 2 quarter, 'Asia/Istanbul'); +SELECT toStartOfInterval(toDateTime(1549483055), INTERVAL 3 quarter, 'Asia/Istanbul'); +SELECT toStartOfInterval(toDateTime(1549483055), INTERVAL 1 month, 'Asia/Istanbul'); +SELECT toStartOfInterval(toDateTime(1549483055), INTERVAL 2 month, 'Asia/Istanbul'); +SELECT toStartOfInterval(toDateTime(1549483055), INTERVAL 5 month, 'Asia/Istanbul'); +SELECT toStartOfInterval(toDateTime(1549483055), INTERVAL 1 week, 'Asia/Istanbul'); +SELECT toStartOfInterval(toDateTime(1549483055), INTERVAL 2 week, 'Asia/Istanbul'); +SELECT toStartOfInterval(toDateTime(1549483055), INTERVAL 6 week, 'Asia/Istanbul'); +SELECT toString(toStartOfInterval(toDateTime(1549483055), INTERVAL 1 day, 'Asia/Istanbul'), 'Asia/Istanbul'); +SELECT toString(toStartOfInterval(toDateTime(1549483055), INTERVAL 2 day, 'Asia/Istanbul'), 'Asia/Istanbul'); +SELECT toString(toStartOfInterval(toDateTime(1549483055), INTERVAL 5 day, 'Asia/Istanbul'), 'Asia/Istanbul'); +SELECT toString(toStartOfInterval(toDateTime(1549483055), INTERVAL 1 hour, 'Asia/Istanbul'), 'Asia/Istanbul'); +SELECT toString(toStartOfInterval(toDateTime(1549483055), INTERVAL 2 hour, 'Asia/Istanbul'), 'Asia/Istanbul'); +SELECT toString(toStartOfInterval(toDateTime(1549483055), INTERVAL 6 hour, 'Asia/Istanbul'), 'Asia/Istanbul'); +SELECT toString(toStartOfInterval(toDateTime(1549483055), INTERVAL 24 hour, 'Asia/Istanbul'), 'Asia/Istanbul'); +SELECT toString(toStartOfInterval(toDateTime(1549483055), INTERVAL 1 minute, 'Asia/Istanbul'), 'Asia/Istanbul'); +SELECT toString(toStartOfInterval(toDateTime(1549483055), INTERVAL 2 minute, 'Asia/Istanbul'), 'Asia/Istanbul'); +SELECT toString(toStartOfInterval(toDateTime(1549483055), INTERVAL 5 minute, 'Asia/Istanbul'), 'Asia/Istanbul'); +SELECT toString(toStartOfInterval(toDateTime(1549483055), INTERVAL 20 minute, 'Asia/Istanbul'), 'Asia/Istanbul'); +SELECT toString(toStartOfInterval(toDateTime(1549483055), INTERVAL 90 minute, 'Asia/Istanbul'), 'Asia/Istanbul'); +SELECT toString(toStartOfInterval(toDateTime(1549483055), INTERVAL 1 second, 'Asia/Istanbul'), 'Asia/Istanbul'); +SELECT toString(toStartOfInterval(toDateTime(1549483055), INTERVAL 2 second, 'Asia/Istanbul'), 'Asia/Istanbul'); +SELECT toString(toStartOfInterval(toDateTime(1549483055), INTERVAL 5 second, 'Asia/Istanbul'), 'Asia/Istanbul'); +SELECT toStartOfInterval(toDate(17933), INTERVAL 1 year); +SELECT toStartOfInterval(toDate(17933), INTERVAL 2 year); +SELECT toStartOfInterval(toDate(17933), INTERVAL 5 year); +SELECT toStartOfInterval(toDate(17933), INTERVAL 1 quarter); +SELECT toStartOfInterval(toDate(17933), INTERVAL 2 quarter); +SELECT toStartOfInterval(toDate(17933), INTERVAL 3 quarter); +SELECT toStartOfInterval(toDate(17933), INTERVAL 1 month); +SELECT toStartOfInterval(toDate(17933), INTERVAL 2 month); +SELECT toStartOfInterval(toDate(17933), INTERVAL 5 month); +SELECT toStartOfInterval(toDate(17933), INTERVAL 1 week); +SELECT toStartOfInterval(toDate(17933), INTERVAL 2 week); +SELECT toStartOfInterval(toDate(17933), INTERVAL 6 week); +SELECT toString(toStartOfInterval(toDate(17933), INTERVAL 1 day, 'Asia/Istanbul'), 'Asia/Istanbul'); +SELECT toString(toStartOfInterval(toDate(17933), INTERVAL 2 day, 'Asia/Istanbul'), 'Asia/Istanbul'); +SELECT toString(toStartOfInterval(toDate(17933), INTERVAL 5 day, 'Asia/Istanbul'), 'Asia/Istanbul'); + +/* toRelativeYearNum */ + +SELECT 'toRelativeYearNum'; +SELECT toRelativeYearNum(toDateTime(1412106600), 'Asia/Istanbul') - toRelativeYearNum(toDateTime(0), 'Asia/Istanbul'); +SELECT toRelativeYearNum(toDateTime(1412106600), 'Europe/Paris') - toRelativeYearNum(toDateTime(0), 'Europe/Paris'); +SELECT toRelativeYearNum(toDateTime(1412106600), 'Europe/London') - toRelativeYearNum(toDateTime(0), 'Europe/London'); +SELECT toRelativeYearNum(toDateTime(1412106600), 'Asia/Tokyo') - toRelativeYearNum(toDateTime(0), 'Asia/Tokyo'); +SELECT toRelativeYearNum(toDateTime(1412106600), 'Pacific/Pitcairn') - toRelativeYearNum(toDateTime(0), 'Pacific/Pitcairn'); + +/* toRelativeMonthNum */ + +SELECT 'toRelativeMonthNum'; +SELECT toRelativeMonthNum(toDateTime(1412106600), 'Asia/Istanbul') - toRelativeMonthNum(toDateTime(0), 'Asia/Istanbul'); +SELECT toRelativeMonthNum(toDateTime(1412106600), 'Europe/Paris') - toRelativeMonthNum(toDateTime(0), 'Europe/Paris'); +SELECT toRelativeMonthNum(toDateTime(1412106600), 'Europe/London') - toRelativeMonthNum(toDateTime(0), 'Europe/London'); +SELECT toRelativeMonthNum(toDateTime(1412106600), 'Asia/Tokyo') - toRelativeMonthNum(toDateTime(0), 'Asia/Tokyo'); +SELECT toRelativeMonthNum(toDateTime(1412106600), 'Pacific/Pitcairn') - toRelativeMonthNum(toDateTime(0), 'Pacific/Pitcairn'); + +/* toRelativeWeekNum */ + +SELECT 'toRelativeWeekNum'; +SELECT toRelativeWeekNum(toDateTime(1412106600), 'Asia/Istanbul') - toRelativeWeekNum(toDateTime(0), 'Asia/Istanbul'); +SELECT toRelativeWeekNum(toDateTime(1412106600), 'Europe/Paris') - toRelativeWeekNum(toDateTime(0), 'Europe/Paris'); +SELECT toRelativeWeekNum(toDateTime(1412106600), 'Europe/London') - toRelativeWeekNum(toDateTime(0), 'Europe/London'); +SELECT toRelativeWeekNum(toDateTime(1412106600), 'Asia/Tokyo') - toRelativeWeekNum(toDateTime(0), 'Asia/Tokyo'); +SELECT toRelativeWeekNum(toDateTime(1412106600), 'Pacific/Pitcairn') - toRelativeWeekNum(toDateTime(0), 'Pacific/Pitcairn'); + +/* toRelativeDayNum */ + +SELECT 'toRelativeDayNum'; +SELECT toRelativeDayNum(toDateTime(1412106600), 'Asia/Istanbul') - toRelativeDayNum(toDateTime(0), 'Asia/Istanbul'); +SELECT toRelativeDayNum(toDateTime(1412106600), 'Europe/Paris') - toRelativeDayNum(toDateTime(0), 'Europe/Paris'); +SELECT toRelativeDayNum(toDateTime(1412106600), 'Europe/London') - toRelativeDayNum(toDateTime(0), 'Europe/London'); +SELECT toRelativeDayNum(toDateTime(1412106600), 'Asia/Tokyo') - toRelativeDayNum(toDateTime(0), 'Asia/Tokyo'); +-- NOTE: toRelativeDayNum(toDateTime(0), 'Pacific/Pitcairn') overflows from -1 to 65535 +SELECT toUInt16(toRelativeDayNum(toDateTime(1412106600), 'Pacific/Pitcairn') - toRelativeDayNum(toDateTime(0), 'Pacific/Pitcairn')); + +/* toRelativeHourNum */ + +SELECT 'toRelativeHourNum'; +SELECT toRelativeHourNum(toDateTime(1412106600), 'Asia/Istanbul') - toRelativeHourNum(toDateTime(0), 'Asia/Istanbul'); +SELECT toRelativeHourNum(toDateTime(1412106600), 'Europe/Paris') - toRelativeHourNum(toDateTime(0), 'Europe/Paris'); +SELECT toRelativeHourNum(toDateTime(1412106600), 'Europe/London') - toRelativeHourNum(toDateTime(0), 'Europe/London'); +SELECT toRelativeHourNum(toDateTime(1412106600), 'Asia/Tokyo') - toRelativeHourNum(toDateTime(0), 'Asia/Tokyo'); +SELECT toRelativeHourNum(toDateTime(1412106600), 'Pacific/Pitcairn') - toRelativeHourNum(toDateTime(0), 'Pacific/Pitcairn'); + +/* toRelativeMinuteNum */ + +SELECT 'toRelativeMinuteNum'; +SELECT toRelativeMinuteNum(toDateTime(1412106600), 'Asia/Istanbul') - toRelativeMinuteNum(toDateTime(0), 'Asia/Istanbul'); +SELECT toRelativeMinuteNum(toDateTime(1412106600), 'Europe/Paris') - toRelativeMinuteNum(toDateTime(0), 'Europe/Paris'); +SELECT toRelativeMinuteNum(toDateTime(1412106600), 'Europe/London') - toRelativeMinuteNum(toDateTime(0), 'Europe/London'); +SELECT toRelativeMinuteNum(toDateTime(1412106600), 'Asia/Tokyo') - toRelativeMinuteNum(toDateTime(0), 'Asia/Tokyo'); +SELECT toRelativeMinuteNum(toDateTime(1412106600), 'Pacific/Pitcairn') - toRelativeMinuteNum(toDateTime(0), 'Pacific/Pitcairn'); + +/* toRelativeSecondNum */ + +SELECT 'toRelativeSecondNum'; +SELECT toRelativeSecondNum(toDateTime(1412106600), 'Asia/Istanbul') - toRelativeSecondNum(toDateTime(0), 'Asia/Istanbul'); +SELECT toRelativeSecondNum(toDateTime(1412106600), 'Europe/Paris') - toRelativeSecondNum(toDateTime(0), 'Europe/Paris'); +SELECT toRelativeSecondNum(toDateTime(1412106600), 'Europe/London') - toRelativeSecondNum(toDateTime(0), 'Europe/London'); +SELECT toRelativeSecondNum(toDateTime(1412106600), 'Asia/Tokyo') - toRelativeSecondNum(toDateTime(0), 'Asia/Tokyo'); +SELECT toRelativeSecondNum(toDateTime(1412106600), 'Pacific/Pitcairn') - toRelativeSecondNum(toDateTime(0), 'Pacific/Pitcairn'); + +/* toDate */ + +SELECT 'toDate'; +SELECT toDate(toDateTime(1412106600), 'Asia/Istanbul'); +SELECT toDate(toDateTime(1412106600), 'Europe/Paris'); +SELECT toDate(toDateTime(1412106600), 'Europe/London'); +SELECT toDate(toDateTime(1412106600), 'Asia/Tokyo'); +SELECT toDate(toDateTime(1412106600), 'Pacific/Pitcairn'); + +SELECT toDate(1412106600, 'Asia/Istanbul'); +SELECT toDate(1412106600, 'Europe/Paris'); +SELECT toDate(1412106600, 'Europe/London'); +SELECT toDate(1412106600, 'Asia/Tokyo'); +SELECT toDate(1412106600, 'Pacific/Pitcairn'); + +SELECT toDate(16343); + +/* toString */ + +SELECT 'toString'; +SELECT toString(toDateTime(1436956200), 'Asia/Istanbul'); +SELECT toString(toDateTime(1436956200), 'Europe/Paris'); +SELECT toString(toDateTime(1436956200), 'Europe/London'); +SELECT toString(toDateTime(1436956200), 'Asia/Tokyo'); +SELECT toString(toDateTime(1436956200), 'Pacific/Pitcairn'); + +/* toUnixTimestamp */ + +SELECT 'toUnixTimestamp'; +SELECT toUnixTimestamp(toString(toDateTime(1426415400), 'Asia/Istanbul'), 'Asia/Istanbul'); +SELECT toUnixTimestamp(toString(toDateTime(1426415400), 'Asia/Istanbul'), 'Europe/Paris'); +SELECT toUnixTimestamp(toString(toDateTime(1426415400), 'Asia/Istanbul'), 'Europe/London'); +SELECT toUnixTimestamp(toString(toDateTime(1426415400), 'Asia/Istanbul'), 'Asia/Tokyo'); +SELECT toUnixTimestamp(toString(toDateTime(1426415400), 'Asia/Istanbul'), 'Pacific/Pitcairn'); + +SELECT toUnixTimestamp(toString(toDateTime(1426415400), 'Asia/Istanbul'), 'Asia/Istanbul'); +SELECT toUnixTimestamp(toString(toDateTime(1426415400), 'Europe/Paris'), 'Europe/Paris'); +SELECT toUnixTimestamp(toString(toDateTime(1426415400), 'Europe/London'), 'Europe/London'); +SELECT toUnixTimestamp(toString(toDateTime(1426415400), 'Asia/Tokyo'), 'Asia/Tokyo'); +SELECT toUnixTimestamp(toString(toDateTime(1426415400), 'Pacific/Pitcairn'), 'Pacific/Pitcairn'); + +/* date_trunc */ + +SELECT 'date_trunc'; + +SELECT date_trunc('year', toDateTime('2020-01-01 04:11:22', 'Europe/London'), 'America/Vancouver'); +SELECT date_trunc('year', toDateTime('2020-01-01 12:11:22', 'Europe/London'), 'Europe/London'); +SELECT date_trunc('year', toDateTime('2020-01-01 20:11:22', 'Europe/London'), 'Asia/Tokyo'); +SELECT date_trunc('quarter', toDateTime('2020-01-01 04:11:22', 'Europe/London'), 'America/Vancouver'); +SELECT date_trunc('quarter', toDateTime('2020-01-01 12:11:22', 'Europe/London'), 'Europe/London'); +SELECT date_trunc('quarter', toDateTime('2020-01-01 20:11:22', 'Europe/London'), 'Asia/Tokyo'); +SELECT date_trunc('month', toDateTime('2020-01-01 04:11:22', 'Europe/London'), 'America/Vancouver'); +SELECT date_trunc('month', toDateTime('2020-01-01 12:11:22', 'Europe/London'), 'Europe/London'); +SELECT date_trunc('month', toDateTime('2020-01-01 20:11:22', 'Europe/London'), 'Asia/Tokyo'); +SELECT date_trunc('week', toDateTime('2020-01-01 04:11:22', 'Europe/London'), 'America/Vancouver'); +SELECT date_trunc('week', toDateTime('2020-01-01 12:11:22', 'Europe/London'), 'Europe/London'); +SELECT date_trunc('week', toDateTime('2020-01-01 20:11:22', 'Europe/London'), 'Asia/Tokyo'); +SELECT date_trunc('day', toDateTime('2020-01-01 04:11:22', 'Europe/London'), 'America/Vancouver'); +SELECT date_trunc('day', toDateTime('2020-01-01 12:11:22', 'Europe/London'), 'Europe/London'); +SELECT date_trunc('day', toDateTime('2020-01-01 20:11:22', 'Europe/London'), 'Asia/Tokyo'); +SELECT date_trunc('hour', toDateTime('2020-01-01 04:11:22', 'Europe/London'), 'America/Vancouver'); +SELECT date_trunc('hour', toDateTime('2020-01-01 12:11:22', 'Europe/London'), 'Europe/London'); +SELECT date_trunc('hour', toDateTime('2020-01-01 20:11:22', 'Europe/London'), 'Asia/Tokyo'); +SELECT date_trunc('minute', toDateTime('2020-01-01 04:11:22', 'Europe/London'), 'America/Vancouver'); +SELECT date_trunc('minute', toDateTime('2020-01-01 12:11:22', 'Europe/London'), 'Europe/London'); +SELECT date_trunc('minute', toDateTime('2020-01-01 20:11:22', 'Europe/London'), 'Asia/Tokyo'); +SELECT date_trunc('second', toDateTime('2020-01-01 04:11:22', 'Europe/London'), 'America/Vancouver'); +SELECT date_trunc('second', toDateTime('2020-01-01 12:11:22', 'Europe/London'), 'Europe/London'); +SELECT date_trunc('second', toDateTime('2020-01-01 20:11:22', 'Europe/London'), 'Asia/Tokyo'); + +SELECT date_trunc('year', toDateTime64('2020-01-01 04:11:22.123', 3, 'Europe/London'), 'America/Vancouver'); +SELECT date_trunc('year', toDateTime64('2020-01-01 12:11:22.123', 3, 'Europe/London'), 'Europe/London'); +SELECT date_trunc('year', toDateTime64('2020-01-01 20:11:22.123', 3, 'Europe/London'), 'Asia/Tokyo'); +SELECT date_trunc('quarter', toDateTime64('2020-01-01 04:11:22.123', 3, 'Europe/London'), 'America/Vancouver'); +SELECT date_trunc('quarter', toDateTime64('2020-01-01 12:11:22.123', 3, 'Europe/London'), 'Europe/London'); +SELECT date_trunc('quarter', toDateTime64('2020-01-01 20:11:22.123', 3, 'Europe/London'), 'Asia/Tokyo'); +SELECT date_trunc('month', toDateTime64('2020-01-01 04:11:22.123', 3, 'Europe/London'), 'America/Vancouver'); +SELECT date_trunc('month', toDateTime64('2020-01-01 12:11:22.123', 3, 'Europe/London'), 'Europe/London'); +SELECT date_trunc('month', toDateTime64('2020-01-01 20:11:22.123', 3, 'Europe/London'), 'Asia/Tokyo'); +SELECT date_trunc('week', toDateTime64('2020-01-01 04:11:22.123', 3, 'Europe/London'), 'America/Vancouver'); +SELECT date_trunc('week', toDateTime64('2020-01-01 12:11:22.123', 3, 'Europe/London'), 'Europe/London'); +SELECT date_trunc('week', toDateTime64('2020-01-01 20:11:22.123', 3, 'Europe/London'), 'Asia/Tokyo'); +SELECT date_trunc('day', toDateTime64('2020-01-01 04:11:22.123', 3, 'Europe/London'), 'America/Vancouver'); +SELECT date_trunc('day', toDateTime64('2020-01-01 12:11:22.123', 3, 'Europe/London'), 'Europe/London'); +SELECT date_trunc('day', toDateTime64('2020-01-01 20:11:22.123', 3, 'Europe/London'), 'Asia/Tokyo'); +SELECT date_trunc('hour', toDateTime64('2020-01-01 04:11:22.123', 3, 'Europe/London'), 'America/Vancouver'); +SELECT date_trunc('hour', toDateTime64('2020-01-01 12:11:22.123', 3, 'Europe/London'), 'Europe/London'); +SELECT date_trunc('hour', toDateTime64('2020-01-01 20:11:22.123', 3, 'Europe/London'), 'Asia/Tokyo'); +SELECT date_trunc('minute', toDateTime64('2020-01-01 04:11:22.123', 3, 'Europe/London'), 'America/Vancouver'); +SELECT date_trunc('minute', toDateTime64('2020-01-01 12:11:22.123', 3, 'Europe/London'), 'Europe/London'); +SELECT date_trunc('minute', toDateTime64('2020-01-01 20:11:22.123', 3, 'Europe/London'), 'Asia/Tokyo'); +SELECT date_trunc('second', toDateTime64('2020-01-01 04:11:22.123', 3, 'Europe/London'), 'America/Vancouver'); +SELECT date_trunc('second', toDateTime64('2020-01-01 12:11:22.123', 3, 'Europe/London'), 'Europe/London'); +SELECT date_trunc('second', toDateTime64('2020-01-01 20:11:22.123', 3, 'Europe/London'), 'Asia/Tokyo'); + +SELECT date_trunc('year', toDate('2020-01-01', 'Europe/London')); +SELECT date_trunc('quarter', toDate('2020-01-01', 'Europe/London')); +SELECT date_trunc('month', toDate('2020-01-01', 'Europe/London')); +SELECT date_trunc('week', toDate('2020-01-01', 'Europe/London')); +SELECT date_trunc('day', toDate('2020-01-01', 'Europe/London'), 'America/Vancouver'); diff --git a/parser/testdata/00190_non_constant_array_of_constant_data/ast.json b/parser/testdata/00190_non_constant_array_of_constant_data/ast.json new file mode 100644 index 000000000..5e503d66b --- /dev/null +++ b/parser/testdata/00190_non_constant_array_of_constant_data/ast.json @@ -0,0 +1,151 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayFilter (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function lambda (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function notEmpty (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function concat (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal 'hello'" + }, + { + "explain": " Literal Array_['']" + }, + { + "explain": " TablesInSelectQuery (children 2)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.one" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " ArrayJoin (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Array_[UInt64_0] (alias elem)" + }, + { + "explain": " Function arrayMap (alias unused) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function lambda (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function concat (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal 'hello'" + }, + { + "explain": " Literal Array_['']" + }, + { + "explain": " Function not (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function ignore (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier elem" + } + ], + + "rows": 43, + + "statistics": + { + "elapsed": 0.001767069, + "rows_read": 43, + "bytes_read": 1774 + } +} diff --git a/parser/testdata/00190_non_constant_array_of_constant_data/metadata.json b/parser/testdata/00190_non_constant_array_of_constant_data/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00190_non_constant_array_of_constant_data/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00190_non_constant_array_of_constant_data/query.sql b/parser/testdata/00190_non_constant_array_of_constant_data/query.sql new file mode 100644 index 000000000..53f9fe3f5 --- /dev/null +++ b/parser/testdata/00190_non_constant_array_of_constant_data/query.sql @@ -0,0 +1,11 @@ +SELECT arrayFilter(x -> notEmpty(concat(x, 'hello')), ['']) FROM system.one ARRAY JOIN [0] AS elem, arrayMap(x -> concat(x, 'hello'), ['']) AS unused WHERE NOT ignore(elem); +SELECT '---'; +SELECT arrayFilter(x -> x = 'hello', ['']) FROM system.one ARRAY JOIN [0] AS elem WHERE NOT ignore(elem) AND arrayExists(x -> x = 'hello', ['']); +SELECT '---'; +SELECT arrayJoin([0]), replicate('hello', [1]) WHERE NOT ignore(replicate('hello', [1])); +SELECT '---'; +SELECT arrayJoin([0]), replicate('hello', emptyArrayString()) FROM system.one ARRAY JOIN emptyArrayString() AS unused WHERE NOT ignore(replicate('hello', emptyArrayString())); +SELECT '---'; +SELECT arrayJoin([0]), replicate('hello', emptyArrayString()) WHERE NOT ignore(replicate('hello', emptyArrayString())); +SELECT '---'; +SELECT replicate('hello', emptyArrayString()) FROM system.one ARRAY JOIN emptyArrayString() AS unused WHERE NOT ignore(replicate('hello', emptyArrayString())); diff --git a/parser/testdata/00191_aggregating_merge_tree_and_final/ast.json b/parser/testdata/00191_aggregating_merge_tree_and_final/ast.json new file mode 100644 index 000000000..9390aa77e --- /dev/null +++ b/parser/testdata/00191_aggregating_merge_tree_and_final/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery aggregating_00191 (children 1)" + }, + { + "explain": " Identifier aggregating_00191" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001421653, + "rows_read": 2, + "bytes_read": 86 + } +} diff --git a/parser/testdata/00191_aggregating_merge_tree_and_final/metadata.json b/parser/testdata/00191_aggregating_merge_tree_and_final/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00191_aggregating_merge_tree_and_final/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00191_aggregating_merge_tree_and_final/query.sql b/parser/testdata/00191_aggregating_merge_tree_and_final/query.sql new file mode 100644 index 000000000..4f73a9e9a --- /dev/null +++ b/parser/testdata/00191_aggregating_merge_tree_and_final/query.sql @@ -0,0 +1,15 @@ +DROP TABLE IF EXISTS aggregating_00191; +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE aggregating_00191 (d Date DEFAULT '2000-01-01', k UInt64, u AggregateFunction(uniq, UInt64)) ENGINE = AggregatingMergeTree(d, k, 8192); + +INSERT INTO aggregating_00191 (k, u) SELECT intDiv(number, 100) AS k, uniqState(toUInt64(number % 100)) AS u FROM (SELECT * FROM system.numbers LIMIT 1000) GROUP BY k; +INSERT INTO aggregating_00191 (k, u) SELECT intDiv(number, 100) AS k, uniqState(toUInt64(number % 100) + 50) AS u FROM (SELECT * FROM system.numbers LIMIT 500, 1000) GROUP BY k; + +SELECT k, finalizeAggregation(u) FROM aggregating_00191 FINAL order by k; + +OPTIMIZE TABLE aggregating_00191 FINAL; + +SELECT k, finalizeAggregation(u) FROM aggregating_00191 order by k; +SELECT k, finalizeAggregation(u) FROM aggregating_00191 FINAL order by k; + +DROP TABLE aggregating_00191; diff --git a/parser/testdata/00192_least_greatest/ast.json b/parser/testdata/00192_least_greatest/ast.json new file mode 100644 index 000000000..4e326b238 --- /dev/null +++ b/parser/testdata/00192_least_greatest/ast.json @@ -0,0 +1,142 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 8)" + }, + { + "explain": " Literal UInt64_1 (alias x)" + }, + { + "explain": " Literal UInt64_2 (alias y)" + }, + { + "explain": " Function least (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Identifier y" + }, + { + "explain": " Function greatest (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Identifier y" + }, + { + "explain": " Function least (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier y" + }, + { + "explain": " Function greatest (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Identifier y" + }, + { + "explain": " Function greatest (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier y" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function least (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Identifier y" + } + ], + + "rows": 40, + + "statistics": + { + "elapsed": 0.001427949, + "rows_read": 40, + "bytes_read": 1476 + } +} diff --git a/parser/testdata/00192_least_greatest/metadata.json b/parser/testdata/00192_least_greatest/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00192_least_greatest/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00192_least_greatest/query.sql b/parser/testdata/00192_least_greatest/query.sql new file mode 100644 index 000000000..efc75a5ae --- /dev/null +++ b/parser/testdata/00192_least_greatest/query.sql @@ -0,0 +1,12 @@ +SELECT 1 AS x, 2 AS y, least(x, y), greatest(x, y), least(x, materialize(y)), greatest(materialize(x), y), greatest(materialize(x), materialize(y)), toTypeName(least(x, y)); +SELECT 1.1 AS x, 2 AS y, least(x, y), greatest(x, y), least(x, materialize(y)), greatest(materialize(x), y), greatest(materialize(x), materialize(y)), toTypeName(least(x, y)); +SELECT -1 AS x, 2 AS y, least(x, y), greatest(x, y), least(x, materialize(y)), greatest(materialize(x), y), greatest(materialize(x), materialize(y)), toTypeName(least(x, y)); +SELECT 1.0 AS x, 2.0 AS y, least(x, y), greatest(x, y), least(x, materialize(y)), greatest(materialize(x), y), greatest(materialize(x), materialize(y)), toTypeName(least(x, y)); +SELECT 1 AS x, 2000 AS y, least(x, y), greatest(x, y), least(x, materialize(y)), greatest(materialize(x), y), greatest(materialize(x), materialize(y)), toTypeName(least(x, y)); +SELECT 1 AS x, 200000 AS y, least(x, y), greatest(x, y), least(x, materialize(y)), greatest(materialize(x), y), greatest(materialize(x), materialize(y)), toTypeName(least(x, y)); +SELECT 1 AS x, 20000000000 AS y, least(x, y), greatest(x, y), least(x, materialize(y)), greatest(materialize(x), y), greatest(materialize(x), materialize(y)), toTypeName(least(x, y)); +SELECT 123 AS x, 123 AS y, least(x, y), greatest(x, y), least(x, materialize(y)), greatest(materialize(x), y), greatest(materialize(x), materialize(y)), toTypeName(least(x, y)); +SELECT toDate('2010-01-02') AS x, toDate('2011-02-03') AS y, least(x, y), greatest(x, y), least(x, materialize(y)), greatest(materialize(x), y), greatest(materialize(x), materialize(y)), toTypeName(least(x, y)); +SELECT toDateTime('2010-01-02 03:04:05') AS x, toDateTime('2011-02-03 04:05:06') AS y, least(x, y), greatest(x, y), least(x, materialize(y)), greatest(materialize(x), y), greatest(materialize(x), materialize(y)), toTypeName(least(x, y)); +SELECT greatest(now(), now() + 10) - now(); +SELECT greatest(today(), yesterday() + 10) - today(); diff --git a/parser/testdata/00193_parallel_replicas/ast.json b/parser/testdata/00193_parallel_replicas/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00193_parallel_replicas/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00193_parallel_replicas/metadata.json b/parser/testdata/00193_parallel_replicas/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00193_parallel_replicas/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00193_parallel_replicas/query.sql b/parser/testdata/00193_parallel_replicas/query.sql new file mode 100644 index 000000000..b19d9cfde --- /dev/null +++ b/parser/testdata/00193_parallel_replicas/query.sql @@ -0,0 +1,63 @@ +-- Tags: replica + +DROP TABLE IF EXISTS parallel_replicas; +DROP TABLE IF EXISTS parallel_replicas_backup; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE parallel_replicas (d Date DEFAULT today(), x UInt32, u UInt64, s String) ENGINE = MergeTree(d, cityHash64(u, s), (x, d, cityHash64(u, s)), 8192); +INSERT INTO parallel_replicas (x, u, s) VALUES (1, 2, 'A'),(3, 4, 'B'),(5, 6, 'C'),(7, 8, 'D'),(9,10,'E'); +INSERT INTO parallel_replicas (x, u, s) VALUES (11, 12, 'F'),(13, 14, 'G'),(15, 16, 'H'),(17, 18, 'I'),(19,20,'J'); +INSERT INTO parallel_replicas (x, u, s) VALUES (21, 22, 'K'),(23, 24, 'L'),(25, 26, 'M'),(27, 28, 'N'),(29,30,'O'); +INSERT INTO parallel_replicas (x, u, s) VALUES (31, 32, 'P'),(33, 34, 'Q'),(35, 36, 'R'),(37, 38, 'S'),(39,40,'T'); +INSERT INTO parallel_replicas (x, u, s) VALUES (41, 42, 'U'),(43, 44, 'V'),(45, 46, 'W'),(47, 48, 'X'),(49,50,'Y'); +INSERT INTO parallel_replicas (x, u, s) VALUES (51, 52, 'Z'); + +/* +* Check that: +* - the table is not empty on each replica; +* - combining the data of all replicas coincides with the contents of the parallel_replicas table. +*/ + +/* Two replicas */ + +SET enable_parallel_replicas=1, parallel_replicas_mode='sampling_key', max_parallel_replicas=3, parallel_replicas_for_non_replicated_merge_tree = 1; + +CREATE TABLE parallel_replicas_backup(d Date DEFAULT today(), x UInt32, u UInt64, s String) ENGINE = Memory; + +SET parallel_replicas_count = 2; + +SET parallel_replica_offset = 0; +INSERT INTO parallel_replicas_backup(d, x, u, s) SELECT d, x, u, s FROM parallel_replicas; +SELECT count() > 0 FROM parallel_replicas; + +SET parallel_replica_offset = 1; +INSERT INTO parallel_replicas_backup(d, x, u, s) SELECT d, x, u, s FROM parallel_replicas; +SELECT count() > 0 FROM parallel_replicas; + +SET parallel_replicas_count = 0; +SELECT x, u, s FROM parallel_replicas_backup ORDER BY x, u, s ASC; + +DROP TABLE parallel_replicas_backup; +CREATE TABLE parallel_replicas_backup(d Date DEFAULT today(), x UInt32, u UInt64, s String) ENGINE = Memory; + +/* Three replicas */ + +SET parallel_replicas_count = 3; + +SET parallel_replica_offset = 0; +INSERT INTO parallel_replicas_backup(d, x, u, s) SELECT d, x, u, s FROM parallel_replicas; +SELECT count() > 0 FROM parallel_replicas; + +SET parallel_replica_offset = 1; +INSERT INTO parallel_replicas_backup(d, x, u, s) SELECT d, x, u, s FROM parallel_replicas; +SELECT count() > 0 FROM parallel_replicas; + +SET parallel_replica_offset = 2; +INSERT INTO parallel_replicas_backup(d, x, u, s) SELECT d, x, u, s FROM parallel_replicas; +SELECT count() > 0 FROM parallel_replicas; + +SET parallel_replicas_count = 0; +SELECT x, u, s FROM parallel_replicas_backup ORDER BY x, u, s ASC; + +DROP TABLE parallel_replicas; +DROP TABLE parallel_replicas_backup; diff --git a/parser/testdata/00194_identity/ast.json b/parser/testdata/00194_identity/ast.json new file mode 100644 index 000000000..95b0b4e85 --- /dev/null +++ b/parser/testdata/00194_identity/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function identity (alias b) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1 (alias a)" + }, + { + "explain": " Identifier a" + }, + { + "explain": " Identifier b" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001419961, + "rows_read": 9, + "bytes_read": 329 + } +} diff --git a/parser/testdata/00194_identity/metadata.json b/parser/testdata/00194_identity/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00194_identity/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00194_identity/query.sql b/parser/testdata/00194_identity/query.sql new file mode 100644 index 000000000..cb2e7264b --- /dev/null +++ b/parser/testdata/00194_identity/query.sql @@ -0,0 +1 @@ +SELECT identity(1 AS a) AS b, a, b; diff --git a/parser/testdata/00195_shard_union_all_and_global_in/ast.json b/parser/testdata/00195_shard_union_all_and_global_in/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00195_shard_union_all_and_global_in/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00195_shard_union_all_and_global_in/metadata.json b/parser/testdata/00195_shard_union_all_and_global_in/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00195_shard_union_all_and_global_in/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00195_shard_union_all_and_global_in/query.sql b/parser/testdata/00195_shard_union_all_and_global_in/query.sql new file mode 100644 index 000000000..da21c5605 --- /dev/null +++ b/parser/testdata/00195_shard_union_all_and_global_in/query.sql @@ -0,0 +1,6 @@ +-- Tags: shard + +SELECT * FROM (SELECT * WHERE dummy GLOBAL IN (SELECT 0)); +SELECT * FROM (SELECT * WHERE dummy GLOBAL IN (SELECT toUInt8(number) FROM system.numbers LIMIT 10)); +SELECT * FROM (SELECT * FROM (SELECT * FROM system.numbers LIMIT 20) WHERE number GLOBAL IN (SELECT number FROM system.numbers LIMIT 10)); +SELECT * FROM (SELECT * FROM remote('127.0.0.{2,3,4}', system.one) WHERE dummy GLOBAL IN (SELECT * FROM remote('127.0.0.{2,3}', system.one))); diff --git a/parser/testdata/00196_float32_formatting/ast.json b/parser/testdata/00196_float32_formatting/ast.json new file mode 100644 index 000000000..f4000e0c3 --- /dev/null +++ b/parser/testdata/00196_float32_formatting/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Float64_21.99" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001077417, + "rows_read": 5, + "bytes_read": 182 + } +} diff --git a/parser/testdata/00196_float32_formatting/metadata.json b/parser/testdata/00196_float32_formatting/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00196_float32_formatting/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00196_float32_formatting/query.sql b/parser/testdata/00196_float32_formatting/query.sql new file mode 100644 index 000000000..ae55298c2 --- /dev/null +++ b/parser/testdata/00196_float32_formatting/query.sql @@ -0,0 +1,8 @@ +SELECT 21.99; +SELECT toFloat32(21.99); +SELECT visibleWidth(21.99); +SELECT visibleWidth(toFloat32(21.99)); +SELECT materialize(21.99); +SELECT toFloat32(materialize(21.99)); +SELECT visibleWidth(materialize(21.99)); +SELECT visibleWidth(toFloat32(materialize(21.99))); diff --git a/parser/testdata/00197_if_fixed_string/ast.json b/parser/testdata/00197_if_fixed_string/ast.json new file mode 100644 index 000000000..8e5aade34 --- /dev/null +++ b/parser/testdata/00197_if_fixed_string/ast.json @@ -0,0 +1,91 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function if (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function toString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function toString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function negate (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 23, + + "statistics": + { + "elapsed": 0.001037841, + "rows_read": 23, + "bytes_read": 909 + } +} diff --git a/parser/testdata/00197_if_fixed_string/metadata.json b/parser/testdata/00197_if_fixed_string/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00197_if_fixed_string/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00197_if_fixed_string/query.sql b/parser/testdata/00197_if_fixed_string/query.sql new file mode 100644 index 000000000..c01232028 --- /dev/null +++ b/parser/testdata/00197_if_fixed_string/query.sql @@ -0,0 +1,9 @@ +SELECT number % 2 ? toString(number) : toString(-number) FROM system.numbers LIMIT 10; +SELECT number % 2 ? toFixedString(toString(number), 2) : toFixedString(toString(-number), 2) FROM system.numbers LIMIT 10; +SELECT number % 2 ? toFixedString(toString(number), 2) : toString(-number) FROM system.numbers LIMIT 10; +SELECT number % 2 ? toString(number) : toFixedString(toString(-number), 2) FROM system.numbers LIMIT 10; +SELECT number % 2 ? toString(number) : 'Hello' FROM system.numbers LIMIT 10; +SELECT number % 2 ? 'Hello' : toString(-number) FROM system.numbers LIMIT 10; +SELECT number % 2 ? 'Hello' : 'Goodbye' FROM system.numbers LIMIT 10; +SELECT number % 2 ? toFixedString(toString(number), 2) : 'Hello' FROM system.numbers LIMIT 10; +SELECT number % 2 ? 'Hello' : toFixedString(toString(-number), 2) FROM system.numbers LIMIT 10; diff --git a/parser/testdata/00198_group_by_empty_arrays/ast.json b/parser/testdata/00198_group_by_empty_arrays/ast.json new file mode 100644 index 000000000..658d4fb86 --- /dev/null +++ b/parser/testdata/00198_group_by_empty_arrays/ast.json @@ -0,0 +1,127 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function range (alias k) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function count (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function if (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier k" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier k" + } + ], + + "rows": 35, + + "statistics": + { + "elapsed": 0.001298021, + "rows_read": 35, + "bytes_read": 1435 + } +} diff --git a/parser/testdata/00198_group_by_empty_arrays/metadata.json b/parser/testdata/00198_group_by_empty_arrays/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00198_group_by_empty_arrays/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00198_group_by_empty_arrays/query.sql b/parser/testdata/00198_group_by_empty_arrays/query.sql new file mode 100644 index 000000000..690fe7b43 --- /dev/null +++ b/parser/testdata/00198_group_by_empty_arrays/query.sql @@ -0,0 +1,2 @@ +SELECT range(x) AS k, count() FROM (SELECT number % 2 ? number : 0 AS x FROM system.numbers LIMIT 10) GROUP BY k ORDER BY k; +SELECT range(x) AS k1, range(y) AS k2, count() FROM (SELECT number % 2 ? number : 0 AS x, number % 3 ? toUInt64(20 - number) : 0 AS y FROM system.numbers LIMIT 20) GROUP BY k1, k2 ORDER BY k1, k2; diff --git a/parser/testdata/00199_ternary_operator_type_check/ast.json b/parser/testdata/00199_ternary_operator_type_check/ast.json new file mode 100644 index 000000000..9d7bf023e --- /dev/null +++ b/parser/testdata/00199_ternary_operator_type_check/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function if (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal 'abc' (alias s)" + }, + { + "explain": " Literal 'def'" + }, + { + "explain": " Identifier s" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001293719, + "rows_read": 12, + "bytes_read": 435 + } +} diff --git a/parser/testdata/00199_ternary_operator_type_check/metadata.json b/parser/testdata/00199_ternary_operator_type_check/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00199_ternary_operator_type_check/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00199_ternary_operator_type_check/query.sql b/parser/testdata/00199_ternary_operator_type_check/query.sql new file mode 100644 index 000000000..78fcb688c --- /dev/null +++ b/parser/testdata/00199_ternary_operator_type_check/query.sql @@ -0,0 +1,80 @@ +select (1 ? ('abc' as s) : 'def') = s; +select (1 ? toFixedString('abc' as s, 3) : 'def') = s; +select (1 ? toFixedString('abc' as s, 3) : toFixedString('def', 3)) = s; +select (1 ? ('abc' as s) : toFixedString('def', 3)) = s; + +select (1 ? (today() as t) : yesterday()) = t; + +select (1 ? (now() as n) : now() - 1) = n; + +select (1 ? (toUInt8(0) as i) : toUInt8(1)) = i; +select (1 ? (toUInt16(0) as i) : toUInt8(1)) = i; +select (1 ? (toUInt32(0) as i) : toUInt8(1)) = i; +select (1 ? (toUInt64(0) as i) : toUInt8(1)) = i; +select (1 ? (toInt8(0) as i) : toUInt8(1)) = i; +select (1 ? (toInt16(0) as i) : toUInt8(1)) = i; +select (1 ? (toInt32(0) as i) : toUInt8(1)) = i; +select (1 ? (toInt64(0) as i) : toUInt8(1)) = i; + +select (1 ? (toUInt8(0) as i) : toUInt16(1)) = i; +select (1 ? (toUInt16(0) as i) : toUInt16(1)) = i; +select (1 ? (toUInt32(0) as i) : toUInt16(1)) = i; +select (1 ? (toUInt64(0) as i) : toUInt16(1)) = i; +select (1 ? (toInt8(0) as i) : toUInt16(1)) = i; +select (1 ? (toInt16(0) as i) : toUInt16(1)) = i; +select (1 ? (toInt32(0) as i) : toUInt16(1)) = i; +select (1 ? (toInt64(0) as i) : toUInt16(1)) = i; + +select (1 ? (toUInt8(0) as i) : toUInt32(1)) = i; +select (1 ? (toUInt16(0) as i) : toUInt32(1)) = i; +select (1 ? (toUInt32(0) as i) : toUInt32(1)) = i; +select (1 ? (toUInt64(0) as i) : toUInt32(1)) = i; +select (1 ? (toInt8(0) as i) : toUInt32(1)) = i; +select (1 ? (toInt16(0) as i) : toUInt32(1)) = i; +select (1 ? (toInt32(0) as i) : toUInt32(1)) = i; +select (1 ? (toInt64(0) as i) : toUInt32(1)) = i; + +select (1 ? (toUInt8(0) as i) : toUInt64(1)) = i; +select (1 ? (toUInt16(0) as i) : toUInt64(1)) = i; +select (1 ? (toUInt32(0) as i) : toUInt64(1)) = i; +select (1 ? (toUInt64(0) as i) : toUInt64(1)) = i; +--select (1 ? (toInt8(0) as i) : toUInt64(1)) = i; +--select (1 ? (toInt16(0) as i) : toUInt64(1)) = i; +--select (1 ? (toInt32(0) as i) : toUInt64(1)) = i; +--select (1 ? (toInt64(0) as i) : toUInt64(1)) = i; + +select (1 ? (toUInt8(0) as i) : toInt8(1)) = i; +select (1 ? (toUInt16(0) as i) : toInt8(1)) = i; +select (1 ? (toUInt32(0) as i) : toInt8(1)) = i; +--select (1 ? (toUInt64(0) as i) : toInt8(1)) = i; +select (1 ? (toInt8(0) as i) : toInt8(1)) = i; +select (1 ? (toInt16(0) as i) : toInt8(1)) = i; +select (1 ? (toInt32(0) as i) : toInt8(1)) = i; +select (1 ? (toInt64(0) as i) : toInt8(1)) = i; + +select (1 ? (toUInt8(0) as i) : toInt16(1)) = i; +select (1 ? (toUInt16(0) as i) : toInt16(1)) = i; +select (1 ? (toUInt32(0) as i) : toInt16(1)) = i; +--select (1 ? (toUInt64(0) as i) : toInt16(1)) = i; +select (1 ? (toInt8(0) as i) : toInt16(1)) = i; +select (1 ? (toInt16(0) as i) : toInt16(1)) = i; +select (1 ? (toInt32(0) as i) : toInt16(1)) = i; +select (1 ? (toInt64(0) as i) : toInt16(1)) = i; + +select (1 ? (toUInt8(0) as i) : toInt32(1)) = i; +select (1 ? (toUInt16(0) as i) : toInt32(1)) = i; +select (1 ? (toUInt32(0) as i) : toInt32(1)) = i; +--select (1 ? (toUInt64(0) as i) : toInt32(1)) = i; +select (1 ? (toInt8(0) as i) : toInt32(1)) = i; +select (1 ? (toInt16(0) as i) : toInt32(1)) = i; +select (1 ? (toInt32(0) as i) : toInt32(1)) = i; +select (1 ? (toInt64(0) as i) : toInt32(1)) = i; + +select (1 ? (toUInt8(0) as i) : toInt64(1)) = i; +select (1 ? (toUInt16(0) as i) : toInt64(1)) = i; +select (1 ? (toUInt32(0) as i) : toInt64(1)) = i; +--select (1 ? (toUInt64(0) as i) : toInt64(1)) = i; +select (1 ? (toInt8(0) as i) : toInt64(1)) = i; +select (1 ? (toInt16(0) as i) : toInt64(1)) = i; +select (1 ? (toInt32(0) as i) : toInt64(1)) = i; +select (1 ? (toInt64(0) as i) : toInt64(1)) = i; diff --git a/parser/testdata/00200_shard_distinct_order_by_limit_distributed/ast.json b/parser/testdata/00200_shard_distinct_order_by_limit_distributed/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00200_shard_distinct_order_by_limit_distributed/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00200_shard_distinct_order_by_limit_distributed/metadata.json b/parser/testdata/00200_shard_distinct_order_by_limit_distributed/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00200_shard_distinct_order_by_limit_distributed/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00200_shard_distinct_order_by_limit_distributed/query.sql b/parser/testdata/00200_shard_distinct_order_by_limit_distributed/query.sql new file mode 100644 index 000000000..6c3b8b516 --- /dev/null +++ b/parser/testdata/00200_shard_distinct_order_by_limit_distributed/query.sql @@ -0,0 +1,7 @@ +-- Tags: distributed + +DROP TABLE IF EXISTS numbers_memory; +CREATE TABLE numbers_memory AS system.numbers ENGINE = Memory; +INSERT INTO numbers_memory SELECT number FROM system.numbers LIMIT 100; +SELECT DISTINCT number FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_memory) ORDER BY number LIMIT 10; +DROP TABLE numbers_memory; diff --git a/parser/testdata/00201_array_uniq/ast.json b/parser/testdata/00201_array_uniq/ast.json new file mode 100644 index 000000000..4402c451e --- /dev/null +++ b/parser/testdata/00201_array_uniq/ast.json @@ -0,0 +1,235 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 7)" + }, + { + "explain": " Function uniqExact (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function length (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function groupUniqArray (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function arrayUniq (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function groupArray (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function uniqExact (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier y" + }, + { + "explain": " Function arrayUniq (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function groupArray (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier y" + }, + { + "explain": " Function uniqExact (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function concat (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function toString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal '_'" + }, + { + "explain": " Identifier y" + }, + { + "explain": " Function arrayUniq (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function groupArray (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function groupArray (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier y" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function round (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function log (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function intHash32 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function toString (alias y) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function round (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function cbrt (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function intHash32 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_10000" + } + ], + + "rows": 71, + + "statistics": + { + "elapsed": 0.001437678, + "rows_read": 71, + "bytes_read": 3015 + } +} diff --git a/parser/testdata/00201_array_uniq/metadata.json b/parser/testdata/00201_array_uniq/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00201_array_uniq/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00201_array_uniq/query.sql b/parser/testdata/00201_array_uniq/query.sql new file mode 100644 index 000000000..4afaa3d98 --- /dev/null +++ b/parser/testdata/00201_array_uniq/query.sql @@ -0,0 +1 @@ +SELECT uniqExact(x), length(groupUniqArray(x)), arrayUniq(groupArray(x)), uniqExact(y), arrayUniq(groupArray(y)), uniqExact(concat(toString(x), '_', y)), arrayUniq(groupArray(x), groupArray(y)) FROM (SELECT round(log(intHash32(number))) AS x, toString(round(cbrt(intHash32(number)))) AS y FROM system.numbers LIMIT 10000); diff --git a/parser/testdata/00202_cross_join/ast.json b/parser/testdata/00202_cross_join/ast.json new file mode 100644 index 000000000..88e42ba75 --- /dev/null +++ b/parser/testdata/00202_cross_join/ast.json @@ -0,0 +1,139 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Identifier y" + }, + { + "explain": " TablesInSelectQuery (children 2)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (alias js1) (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number (alias x)" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " TablesInSelectQueryElement (children 2)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (alias js2) (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number (alias y)" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_5" + }, + { + "explain": " TableJoin" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier y" + } + ], + + "rows": 39, + + "statistics": + { + "elapsed": 0.001393001, + "rows_read": 39, + "bytes_read": 1634 + } +} diff --git a/parser/testdata/00202_cross_join/metadata.json b/parser/testdata/00202_cross_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00202_cross_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00202_cross_join/query.sql b/parser/testdata/00202_cross_join/query.sql new file mode 100644 index 000000000..de4351942 --- /dev/null +++ b/parser/testdata/00202_cross_join/query.sql @@ -0,0 +1,8 @@ +SELECT x, y FROM (SELECT number AS x FROM system.numbers LIMIT 3) js1 CROSS JOIN (SELECT number AS y FROM system.numbers LIMIT 5) js2 ORDER BY x, y; + +SET join_algorithm = 'auto'; +SELECT x, y FROM (SELECT number AS x FROM system.numbers LIMIT 3) js1 CROSS JOIN (SELECT number AS y FROM system.numbers LIMIT 5) js2 ORDER BY x, y; + +-- Just to test that we preserved old setting name this we use `enable_analyzer` instead of `enable_analyzer` here. +SET enable_analyzer = 1; +SELECT x, y FROM (SELECT number AS x FROM system.numbers LIMIT 3) js1 CROSS JOIN (SELECT number AS y FROM system.numbers LIMIT 5) js2 ORDER BY x, y; diff --git a/parser/testdata/00203_full_join/ast.json b/parser/testdata/00203_full_join/ast.json new file mode 100644 index 000000000..99adec2e2 --- /dev/null +++ b/parser/testdata/00203_full_join/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001406852, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00203_full_join/metadata.json b/parser/testdata/00203_full_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00203_full_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00203_full_join/query.sql b/parser/testdata/00203_full_join/query.sql new file mode 100644 index 000000000..c1b717517 --- /dev/null +++ b/parser/testdata/00203_full_join/query.sql @@ -0,0 +1,33 @@ +SET any_join_distinct_right_table_keys = 1; +SET joined_subquery_requires_alias = 0; + +SELECT k, x, y FROM (SELECT arrayJoin([1, 2, 3]) AS k, 'Hello' AS x) ANY FULL JOIN (SELECT range(k) AS y, arrayJoin([3, 4, 5]) AS k) USING k WHERE k < 10 ORDER BY k; +SELECT k, x FROM (SELECT arrayJoin([1, 2, 3]) AS k, 'Hello' AS x) ANY FULL JOIN (SELECT range(k) AS y, arrayJoin([3, 4, 5]) AS k) USING k WHERE k < 10 ORDER BY k; +SELECT k, y FROM (SELECT arrayJoin([1, 2, 3]) AS k, 'Hello' AS x) ANY FULL JOIN (SELECT range(k) AS y, arrayJoin([3, 4, 5]) AS k) USING k WHERE k < 10 ORDER BY k; +SELECT x, y FROM (SELECT arrayJoin([1, 2, 3]) AS k, 'Hello' AS x) ANY FULL JOIN (SELECT range(k) AS y, arrayJoin([3, 4, 5]) AS k) USING k WHERE k < 10 ORDER BY k; +SELECT k FROM (SELECT arrayJoin([1, 2, 3]) AS k, 'Hello' AS x) ANY FULL JOIN (SELECT range(k) AS y, arrayJoin([3, 4, 5]) AS k) USING k WHERE k < 10 ORDER BY k; + +SELECT k, x, y FROM (SELECT arrayJoin([1, 2, 3]) AS k, 'Hello' AS x) ANY RIGHT JOIN (SELECT range(k) AS y, arrayJoin([3, 4, 5]) AS k) USING k WHERE k < 10 ORDER BY k; +SELECT k, x FROM (SELECT arrayJoin([1, 2, 3]) AS k, 'Hello' AS x) ANY RIGHT JOIN (SELECT range(k) AS y, arrayJoin([3, 4, 5]) AS k) USING k WHERE k < 10 ORDER BY k; +SELECT k, y FROM (SELECT arrayJoin([1, 2, 3]) AS k, 'Hello' AS x) ANY RIGHT JOIN (SELECT range(k) AS y, arrayJoin([3, 4, 5]) AS k) USING k WHERE k < 10 ORDER BY k; +SELECT x, y FROM (SELECT arrayJoin([1, 2, 3]) AS k, 'Hello' AS x) ANY RIGHT JOIN (SELECT range(k) AS y, arrayJoin([3, 4, 5]) AS k) USING k WHERE k < 10 ORDER BY k; +SELECT k FROM (SELECT arrayJoin([1, 2, 3]) AS k, 'Hello' AS x) ANY RIGHT JOIN (SELECT range(k) AS y, arrayJoin([3, 4, 5]) AS k) USING k WHERE k < 10 ORDER BY k; + +DROP TABLE IF EXISTS t1_00203; +DROP TABLE IF EXISTS t2_00203; + +CREATE TABLE t1_00203 (k1 UInt32, k2 UInt32, k3 UInt32, val_t1 String) ENGINE=TinyLog; +CREATE TABLE t2_00203 (val_t2 String, k3 UInt32, k2 UInt32, k1 UInt32) ENGINE=TinyLog; + +INSERT INTO t1_00203 VALUES (1, 2, 3, 'aaa'), (2, 3, 4, 'bbb'); +INSERT INTO t2_00203 VALUES ('ccc', 4, 3, 2), ('ddd', 7, 6, 5); + +SELECT k1, k2, k3, val_t1, val_t2 FROM t1_00203 ANY FULL JOIN t2_00203 USING (k3, k1, k2) ORDER BY k1, k2, k3; +SELECT k1, k2, k3, val_t1, val_t2 FROM t1_00203 ANY RIGHT JOIN t2_00203 USING (k3, k1, k2) ORDER BY k1, k2, k3; + +SET any_join_distinct_right_table_keys = 0; +SELECT k1, k2, k3, val_t1, val_t2 FROM t1_00203 ANY FULL JOIN t2_00203 USING (k3, k1, k2) ORDER BY k1, k2, k3; -- { serverError NOT_IMPLEMENTED } +SELECT k1, k2, k3, val_t1, val_t2 FROM t1_00203 ANY RIGHT JOIN t2_00203 USING (k3, k1, k2) ORDER BY k1, k2, k3; + +DROP TABLE t1_00203; +DROP TABLE t2_00203; diff --git a/parser/testdata/00204_extract_url_parameter/ast.json b/parser/testdata/00204_extract_url_parameter/ast.json new file mode 100644 index 000000000..8ab8b2612 --- /dev/null +++ b/parser/testdata/00204_extract_url_parameter/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function extractURLParameter (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'http:\/\/com\/?testq=aaa&q=111'" + }, + { + "explain": " Literal 'q'" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001097699, + "rows_read": 8, + "bytes_read": 318 + } +} diff --git a/parser/testdata/00204_extract_url_parameter/metadata.json b/parser/testdata/00204_extract_url_parameter/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00204_extract_url_parameter/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00204_extract_url_parameter/query.sql b/parser/testdata/00204_extract_url_parameter/query.sql new file mode 100644 index 000000000..f3395a702 --- /dev/null +++ b/parser/testdata/00204_extract_url_parameter/query.sql @@ -0,0 +1 @@ +SELECT extractURLParameter('http://com/?testq=aaa&q=111', 'q'); diff --git a/parser/testdata/00205_emptyscalar_subquery_type_mismatch_bug/ast.json b/parser/testdata/00205_emptyscalar_subquery_type_mismatch_bug/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00205_emptyscalar_subquery_type_mismatch_bug/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00205_emptyscalar_subquery_type_mismatch_bug/metadata.json b/parser/testdata/00205_emptyscalar_subquery_type_mismatch_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00205_emptyscalar_subquery_type_mismatch_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00205_emptyscalar_subquery_type_mismatch_bug/query.sql b/parser/testdata/00205_emptyscalar_subquery_type_mismatch_bug/query.sql new file mode 100644 index 000000000..aeac6be7d --- /dev/null +++ b/parser/testdata/00205_emptyscalar_subquery_type_mismatch_bug/query.sql @@ -0,0 +1,38 @@ + +-- Bug reproduction form #25411 +WITH a AS (select (select 1 WHERE 0) as b) +select 1 +from system.one +cross join a +where a.b = 0; + +-- Reported query +drop table if exists t_q1ht4gq_5; +create table t_q1ht4gq_5 (c_zeij INTEGER NOT NULL, c_fehk75l TEXT, c_jz TEXT, c_wynzuek TEXT, c_nkt INTEGER NOT NULL, c_g TEXT, c_mc2 TEXT, primary key(c_nkt)) engine = MergeTree(); +WITH +cte_0 AS (select + subq_0.c6 as c2, + case when 0<>0 then ((select c_zeij from t_q1ht4gq_5 order by c_zeij limit 1 offset 1) + + subq_0.c4) else ((select c_zeij from t_q1ht4gq_5 order by c_zeij limit 1 offset 1) + + subq_0.c4) end as c4 + from + (select + ref_0.c_nkt as c4, + ref_0.c_nkt as c6 + from + t_q1ht4gq_5 as ref_0 + ) as subq_0 + ) +select + ref_12.c_zeij as c3 + from + t_q1ht4gq_5 as ref_12 + where (ref_12.c_jz not in ( + select + ref_14.c_mc2 as c0 + from + t_q1ht4gq_5 as ref_14 + cross join cte_0 as ref_15 + where ref_15.c4 > ref_15.c2)); + +drop table if exists t_q1ht4gq_5; diff --git a/parser/testdata/00205_scalar_subqueries/ast.json b/parser/testdata/00205_scalar_subqueries/ast.json new file mode 100644 index 000000000..d8d71a48c --- /dev/null +++ b/parser/testdata/00205_scalar_subqueries/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001247685, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00205_scalar_subqueries/metadata.json b/parser/testdata/00205_scalar_subqueries/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00205_scalar_subqueries/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00205_scalar_subqueries/query.sql b/parser/testdata/00205_scalar_subqueries/query.sql new file mode 100644 index 000000000..5c35de10f --- /dev/null +++ b/parser/testdata/00205_scalar_subqueries/query.sql @@ -0,0 +1,21 @@ +SET send_logs_level = 'fatal'; + +SELECT (SELECT (SELECT (SELECT (SELECT (SELECT count() FROM (SELECT * FROM system.numbers LIMIT 10)))))) = (SELECT 10), ((SELECT 1, 'Hello', [1, 2]).3)[1]; +SELECT toUInt64((SELECT 9)) IN (SELECT number FROM system.numbers LIMIT 10); +SELECT (SELECT toDate('2015-01-02')) = toDate('2015-01-02'), 'Hello' = (SELECT 'Hello'); +SELECT (SELECT toDate('2015-01-02'), 'Hello'); +SELECT (SELECT toDate('2015-01-02'), 'Hello') AS x, x, identity((SELECT 1)), identity((SELECT 1) AS y); +-- SELECT (SELECT uniqState('')); + + SELECT ( SELECT throwIf(1 + dummy) ); -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO } + +-- Scalar subquery with 0 rows must return Null +SELECT (SELECT 1 WHERE 0); +-- But tuple and array can't be inside nullable +SELECT (SELECT 1, 2 WHERE 0); -- { serverError INCORRECT_RESULT_OF_SCALAR_SUBQUERY } +SELECT (SELECT [1] WHERE 0); -- { serverError INCORRECT_RESULT_OF_SCALAR_SUBQUERY } +-- Works for not-empty casle +SELECT (SELECT 1, 2); +SELECT (SELECT [1]); +-- Several rows +SELECT (SELECT number FROM numbers(2)); -- { serverError INCORRECT_RESULT_OF_SCALAR_SUBQUERY } diff --git a/parser/testdata/00206_empty_array_to_single/ast.json b/parser/testdata/00206_empty_array_to_single/ast.json new file mode 100644 index 000000000..d83e28a68 --- /dev/null +++ b/parser/testdata/00206_empty_array_to_single/ast.json @@ -0,0 +1,82 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function emptyArrayToSingle (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayFilter (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function lambda (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function notEquals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal UInt64_99" + }, + { + "explain": " Function arrayJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[Array_[UInt64_1, UInt64_2], Array_[UInt64_99], Array_[UInt64_4, UInt64_5, UInt64_6]]" + } + ], + + "rows": 20, + + "statistics": + { + "elapsed": 0.001253839, + "rows_read": 20, + "bytes_read": 913 + } +} diff --git a/parser/testdata/00206_empty_array_to_single/metadata.json b/parser/testdata/00206_empty_array_to_single/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00206_empty_array_to_single/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00206_empty_array_to_single/query.sql b/parser/testdata/00206_empty_array_to_single/query.sql new file mode 100644 index 000000000..85e8f8243 --- /dev/null +++ b/parser/testdata/00206_empty_array_to_single/query.sql @@ -0,0 +1,8 @@ +SELECT emptyArrayToSingle(arrayFilter(x -> x != 99, arrayJoin([[1, 2], [99], [4, 5, 6]]))); +SELECT emptyArrayToSingle(emptyArrayString()), emptyArrayToSingle(emptyArrayDate()), emptyArrayToSingle(arrayFilter(x -> 0, [now('Asia/Istanbul')])); + +SELECT + emptyArrayToSingle(range(number % 3)), + emptyArrayToSingle(arrayMap(x -> toString(x), range(number % 2))), + emptyArrayToSingle(arrayMap(x -> toDateTime('2015-01-01 00:00:00', 'UTC') + x, range(number % 5))), + emptyArrayToSingle(arrayMap(x -> toDate('2015-01-01') + x, range(number % 4))) FROM system.numbers LIMIT 10; diff --git a/parser/testdata/00207_left_array_join/ast.json b/parser/testdata/00207_left_array_join/ast.json new file mode 100644 index 000000000..fae2d0a8b --- /dev/null +++ b/parser/testdata/00207_left_array_join/ast.json @@ -0,0 +1,79 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 2)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " ArrayJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function range (alias arr) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 19, + + "statistics": + { + "elapsed": 0.001090347, + "rows_read": 19, + "bytes_read": 770 + } +} diff --git a/parser/testdata/00207_left_array_join/metadata.json b/parser/testdata/00207_left_array_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00207_left_array_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00207_left_array_join/query.sql b/parser/testdata/00207_left_array_join/query.sql new file mode 100644 index 000000000..8186054c2 --- /dev/null +++ b/parser/testdata/00207_left_array_join/query.sql @@ -0,0 +1,2 @@ +SELECT number FROM system.numbers LEFT ARRAY JOIN range(number % 3) AS arr LIMIT 10; +SELECT number, arr, x FROM (SELECT number, range(number % 3) AS arr FROM system.numbers LIMIT 10) LEFT ARRAY JOIN arr AS x; diff --git a/parser/testdata/00208_agg_state_merge/ast.json b/parser/testdata/00208_agg_state_merge/ast.json new file mode 100644 index 000000000..e320451be --- /dev/null +++ b/parser/testdata/00208_agg_state_merge/ast.json @@ -0,0 +1,190 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function modulo (alias k2) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier k" + }, + { + "explain": " Literal UInt64_7" + }, + { + "explain": " Function finalizeAggregation (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function uniqMergeState (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier state" + }, + { + "explain": " Function uniqMerge (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier state" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier k" + }, + { + "explain": " Function uniqState (alias state) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function modulo (alias k) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_11" + }, + { + "explain": " Function intDiv (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_7" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_100" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier k" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier k2" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier k2" + } + ], + + "rows": 56, + + "statistics": + { + "elapsed": 0.001552654, + "rows_read": 56, + "bytes_read": 2486 + } +} diff --git a/parser/testdata/00208_agg_state_merge/metadata.json b/parser/testdata/00208_agg_state_merge/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00208_agg_state_merge/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00208_agg_state_merge/query.sql b/parser/testdata/00208_agg_state_merge/query.sql new file mode 100644 index 000000000..3f30f66dd --- /dev/null +++ b/parser/testdata/00208_agg_state_merge/query.sql @@ -0,0 +1 @@ +SELECT k % 7 AS k2, finalizeAggregation(uniqMergeState(state)), uniqMerge(state) FROM (SELECT k, uniqState(x) AS state FROM (SELECT number % 11 AS k, intDiv(number, 7) AS x FROM system.numbers LIMIT 100) GROUP BY k) GROUP BY k2 ORDER BY k2; diff --git a/parser/testdata/00209_insert_select_extremes/ast.json b/parser/testdata/00209_insert_select_extremes/ast.json new file mode 100644 index 000000000..39b4acadc --- /dev/null +++ b/parser/testdata/00209_insert_select_extremes/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_00209 (children 1)" + }, + { + "explain": " Identifier test_00209" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001170807, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/00209_insert_select_extremes/metadata.json b/parser/testdata/00209_insert_select_extremes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00209_insert_select_extremes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00209_insert_select_extremes/query.sql b/parser/testdata/00209_insert_select_extremes/query.sql new file mode 100644 index 000000000..98dfe8e26 --- /dev/null +++ b/parser/testdata/00209_insert_select_extremes/query.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS test_00209; +CREATE TABLE test_00209 (x UInt8) ENGINE = Log; + +SET enable_positional_arguments = 0; + +INSERT INTO test_00209 SELECT 1 AS x; +INSERT INTO test_00209 SELECT 1 AS x SETTINGS extremes = 1; +INSERT INTO test_00209 SELECT 1 AS x GROUP BY 1 WITH TOTALS; +INSERT INTO test_00209 SELECT 1 AS x GROUP BY 1 WITH TOTALS SETTINGS extremes = 1; + +SELECT count(), min(x), max(x) FROM test_00209; + +DROP TABLE test_00209; diff --git a/parser/testdata/00211_shard_query_formatting_aliases/ast.json b/parser/testdata/00211_shard_query_formatting_aliases/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00211_shard_query_formatting_aliases/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00211_shard_query_formatting_aliases/metadata.json b/parser/testdata/00211_shard_query_formatting_aliases/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00211_shard_query_formatting_aliases/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00211_shard_query_formatting_aliases/query.sql b/parser/testdata/00211_shard_query_formatting_aliases/query.sql new file mode 100644 index 000000000..7c44ac309 --- /dev/null +++ b/parser/testdata/00211_shard_query_formatting_aliases/query.sql @@ -0,0 +1,10 @@ +-- Tags: shard + +SELECT toUInt64(1) IN (1234567890, 2345678901, 3456789012, 4567890123, 5678901234, 6789012345, 7890123456, 8901234567, 9012345678, 123456789) AS x, + x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, + x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, + x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, + x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x +FROM remote('127.0.0.2', system, one) SETTINGS max_query_size = 10000; + +SELECT 1 AS x, x, (SELECT 2 AS x, x) FROM remote('127.0.0.{2,3}', system.one) WHERE (3, 4) IN (SELECT 3 AS x, toUInt8(x + 1)); diff --git a/parser/testdata/00212_long_shard_aggregate_function_uniq/ast.json b/parser/testdata/00212_long_shard_aggregate_function_uniq/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00212_long_shard_aggregate_function_uniq/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00212_long_shard_aggregate_function_uniq/metadata.json b/parser/testdata/00212_long_shard_aggregate_function_uniq/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00212_long_shard_aggregate_function_uniq/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00212_long_shard_aggregate_function_uniq/query.sql b/parser/testdata/00212_long_shard_aggregate_function_uniq/query.sql new file mode 100644 index 000000000..2e273cd80 --- /dev/null +++ b/parser/testdata/00212_long_shard_aggregate_function_uniq/query.sql @@ -0,0 +1,136 @@ +-- Tags: long, shard + +-- uniqHLL12 + +SELECT 'uniqHLL12'; + +SELECT Y, uniqHLL12(X) FROM (SELECT number AS X, (3*X*X - 7*X + 11) % 37 AS Y FROM system.numbers LIMIT 15) GROUP BY Y ORDER BY Y; +SELECT Y, uniqHLL12(X) FROM (SELECT number AS X, (3*X*X - 7*X + 11) % 37 AS Y FROM system.numbers LIMIT 3000) GROUP BY Y ORDER BY Y; +SELECT Y, uniqHLL12(X) FROM (SELECT number AS X, (3*X*X - 7*X + 11) % 37 AS Y FROM system.numbers LIMIT 1000000) GROUP BY Y ORDER BY Y; + +SELECT 'uniqHLL12 round(float)'; + +SELECT Y, uniqHLL12(X) FROM (SELECT number AS X, round(1/(1 + (3*X*X - 7*X + 11) % 37), 3) AS Y FROM system.numbers LIMIT 15) GROUP BY Y ORDER BY Y; +SELECT Y, uniqHLL12(X) FROM (SELECT number AS X, round(1/(1 + (3*X*X - 7*X + 11) % 37), 3) AS Y FROM system.numbers LIMIT 3000) GROUP BY Y ORDER BY Y; +SELECT Y, uniqHLL12(X) FROM (SELECT number AS X, round(1/(1 + (3*X*X - 7*X + 11) % 37), 3) AS Y FROM system.numbers LIMIT 1000000) GROUP BY Y ORDER BY Y; + +SELECT 'uniqHLL12 round(toFloat32())'; + +SELECT Y, uniqHLL12(X) FROM (SELECT number AS X, round(toFloat32(1/(1 + (3*X*X - 7*X + 11) % 37)), 3) AS Y FROM system.numbers LIMIT 15) GROUP BY Y ORDER BY Y; +SELECT Y, uniqHLL12(X) FROM (SELECT number AS X, round(toFloat32(1/(1 + (3*X*X - 7*X + 11) % 37)), 3) AS Y FROM system.numbers LIMIT 3000) GROUP BY Y ORDER BY Y; +SELECT Y, uniqHLL12(X) FROM (SELECT number AS X, round(toFloat32(1/(1 + (3*X*X - 7*X + 11) % 37)), 3) AS Y FROM system.numbers LIMIT 1000000) GROUP BY Y ORDER BY Y; + +SELECT 'uniqHLL12 IPv4NumToString'; + +SELECT Y, uniqHLL12(Z) FROM (SELECT number AS X, IPv4NumToString(toUInt32(X)) AS Z, (3*X*X - 7*X + 11) % 37 AS Y FROM system.numbers LIMIT 15) GROUP BY Y ORDER BY Y; +SELECT Y, uniqHLL12(Z) FROM (SELECT number AS X, IPv4NumToString(toUInt32(X)) AS Z, (3*X*X - 7*X + 11) % 37 AS Y FROM system.numbers LIMIT 3000) GROUP BY Y ORDER BY Y; +SELECT Y, uniqHLL12(Z) FROM (SELECT number AS X, IPv4NumToString(toUInt32(X)) AS Z, (3*X*X - 7*X + 11) % 37 AS Y FROM system.numbers LIMIT 1000000) GROUP BY Y ORDER BY Y; + +SELECT 'uniqHLL12 remote()'; + +SELECT uniqHLL12(dummy) FROM remote('127.0.0.{2,3}', system.one); + +-- uniqCombined + +SELECT 'uniqCombined'; + +SELECT Y, uniqCombined(X) FROM (SELECT number AS X, (3*X*X - 7*X + 11) % 37 AS Y FROM system.numbers LIMIT 15) GROUP BY Y ORDER BY Y; +SELECT Y, uniqCombined(X) FROM (SELECT number AS X, (3*X*X - 7*X + 11) % 37 AS Y FROM system.numbers LIMIT 3000) GROUP BY Y ORDER BY Y; +SELECT Y, uniqCombined(X) FROM (SELECT number AS X, (3*X*X - 7*X + 11) % 37 AS Y FROM system.numbers LIMIT 1000000) GROUP BY Y ORDER BY Y; + +SELECT 'uniqCombined(12)'; + +SELECT Y, uniqCombined(12)(X) FROM (SELECT number AS X, (3*X*X - 7*X + 11) % 37 AS Y FROM system.numbers LIMIT 15) GROUP BY Y ORDER BY Y; +SELECT Y, uniqCombined(12)(X) FROM (SELECT number AS X, (3*X*X - 7*X + 11) % 37 AS Y FROM system.numbers LIMIT 3000) GROUP BY Y ORDER BY Y; +SELECT Y, uniqCombined(12)(X) FROM (SELECT number AS X, (3*X*X - 7*X + 11) % 37 AS Y FROM system.numbers LIMIT 1000000) GROUP BY Y ORDER BY Y; + +SELECT 'uniqCombined(17)'; + +SELECT Y, uniqCombined(17)(X) FROM (SELECT number AS X, (3*X*X - 7*X + 11) % 37 AS Y FROM system.numbers LIMIT 15) GROUP BY Y ORDER BY Y; +SELECT Y, uniqCombined(17)(X) FROM (SELECT number AS X, (3*X*X - 7*X + 11) % 37 AS Y FROM system.numbers LIMIT 3000) GROUP BY Y ORDER BY Y; +SELECT Y, uniqCombined(17)(X) FROM (SELECT number AS X, (3*X*X - 7*X + 11) % 37 AS Y FROM system.numbers LIMIT 1000000) GROUP BY Y ORDER BY Y; + +SELECT 'uniqCombined(20)'; + +SELECT Y, uniqCombined(20)(X) FROM (SELECT number AS X, (3*X*X - 7*X + 11) % 37 AS Y FROM system.numbers LIMIT 15) GROUP BY Y ORDER BY Y; +SELECT Y, uniqCombined(20)(X) FROM (SELECT number AS X, (3*X*X - 7*X + 11) % 37 AS Y FROM system.numbers LIMIT 3000) GROUP BY Y ORDER BY Y; +SELECT Y, uniqCombined(20)(X) FROM (SELECT number AS X, (3*X*X - 7*X + 11) % 37 AS Y FROM system.numbers LIMIT 1000000) GROUP BY Y ORDER BY Y; + +SELECT 'uniqCombined(round(float))'; + +SELECT Y, uniqCombined(X) FROM (SELECT number AS X, round(1/(1 + (3*X*X - 7*X + 11) % 37), 3) AS Y FROM system.numbers LIMIT 15) GROUP BY Y ORDER BY Y; +SELECT Y, uniqCombined(X) FROM (SELECT number AS X, round(1/(1 + (3*X*X - 7*X + 11) % 37), 3) AS Y FROM system.numbers LIMIT 3000) GROUP BY Y ORDER BY Y; +SELECT Y, uniqCombined(X) FROM (SELECT number AS X, round(1/(1 + (3*X*X - 7*X + 11) % 37), 3) AS Y FROM system.numbers LIMIT 1000000) GROUP BY Y ORDER BY Y; + +SELECT 'uniqCombined(12)(round(float))'; + +SELECT Y, uniqCombined(12)(X) FROM (SELECT number AS X, round(1/(1 + (3*X*X - 7*X + 11) % 37), 3) AS Y FROM system.numbers LIMIT 15) GROUP BY Y ORDER BY Y; +SELECT Y, uniqCombined(12)(X) FROM (SELECT number AS X, round(1/(1 + (3*X*X - 7*X + 11) % 37), 3) AS Y FROM system.numbers LIMIT 3000) GROUP BY Y ORDER BY Y; +SELECT Y, uniqCombined(12)(X) FROM (SELECT number AS X, round(1/(1 + (3*X*X - 7*X + 11) % 37), 3) AS Y FROM system.numbers LIMIT 1000000) GROUP BY Y ORDER BY Y; + +SELECT 'uniqCombined(17)(round(float))'; + +SELECT Y, uniqCombined(17)(X) FROM (SELECT number AS X, round(1/(1 + (3*X*X - 7*X + 11) % 37), 3) AS Y FROM system.numbers LIMIT 15) GROUP BY Y ORDER BY Y; +SELECT Y, uniqCombined(17)(X) FROM (SELECT number AS X, round(1/(1 + (3*X*X - 7*X + 11) % 37), 3) AS Y FROM system.numbers LIMIT 3000) GROUP BY Y ORDER BY Y; +SELECT Y, uniqCombined(17)(X) FROM (SELECT number AS X, round(1/(1 + (3*X*X - 7*X + 11) % 37), 3) AS Y FROM system.numbers LIMIT 1000000) GROUP BY Y ORDER BY Y; + +SELECT 'uniqCombined(20)(round(float))'; + +SELECT Y, uniqCombined(20)(X) FROM (SELECT number AS X, round(1/(1 + (3*X*X - 7*X + 11) % 37), 3) AS Y FROM system.numbers LIMIT 15) GROUP BY Y ORDER BY Y; +SELECT Y, uniqCombined(20)(X) FROM (SELECT number AS X, round(1/(1 + (3*X*X - 7*X + 11) % 37), 3) AS Y FROM system.numbers LIMIT 3000) GROUP BY Y ORDER BY Y; +SELECT Y, uniqCombined(20)(X) FROM (SELECT number AS X, round(1/(1 + (3*X*X - 7*X + 11) % 37), 3) AS Y FROM system.numbers LIMIT 1000000) GROUP BY Y ORDER BY Y; + +SELECT 'uniqCombined(X)(round(toFloat32()))'; + +SELECT Y, uniqCombined(X) FROM (SELECT number AS X, round(toFloat32(1/(1 + (3*X*X - 7*X + 11) % 37)), 3) AS Y FROM system.numbers LIMIT 15) GROUP BY Y ORDER BY Y; +SELECT Y, uniqCombined(X) FROM (SELECT number AS X, round(toFloat32(1/(1 + (3*X*X - 7*X + 11) % 37)), 3) AS Y FROM system.numbers LIMIT 3000) GROUP BY Y ORDER BY Y; +SELECT Y, uniqCombined(X) FROM (SELECT number AS X, round(toFloat32(1/(1 + (3*X*X - 7*X + 11) % 37)), 3) AS Y FROM system.numbers LIMIT 1000000) GROUP BY Y ORDER BY Y; + +SELECT 'uniqCombined(12)(round(toFloat32()))'; + +SELECT Y, uniqCombined(12)(X) FROM (SELECT number AS X, round(toFloat32(1/(1 + (3*X*X - 7*X + 11) % 37)), 3) AS Y FROM system.numbers LIMIT 15) GROUP BY Y ORDER BY Y; +SELECT Y, uniqCombined(12)(X) FROM (SELECT number AS X, round(toFloat32(1/(1 + (3*X*X - 7*X + 11) % 37)), 3) AS Y FROM system.numbers LIMIT 3000) GROUP BY Y ORDER BY Y; +SELECT Y, uniqCombined(12)(X) FROM (SELECT number AS X, round(toFloat32(1/(1 + (3*X*X - 7*X + 11) % 37)), 3) AS Y FROM system.numbers LIMIT 1000000) GROUP BY Y ORDER BY Y; + +SELECT 'uniqCombined(17)(round(toFloat32()))'; + +SELECT Y, uniqCombined(17)(X) FROM (SELECT number AS X, round(toFloat32(1/(1 + (3*X*X - 7*X + 11) % 37)), 3) AS Y FROM system.numbers LIMIT 15) GROUP BY Y ORDER BY Y; +SELECT Y, uniqCombined(17)(X) FROM (SELECT number AS X, round(toFloat32(1/(1 + (3*X*X - 7*X + 11) % 37)), 3) AS Y FROM system.numbers LIMIT 3000) GROUP BY Y ORDER BY Y; +SELECT Y, uniqCombined(17)(X) FROM (SELECT number AS X, round(toFloat32(1/(1 + (3*X*X - 7*X + 11) % 37)), 3) AS Y FROM system.numbers LIMIT 1000000) GROUP BY Y ORDER BY Y; + +SELECT 'uniqCombined(20)(round(toFloat32()))'; + +SELECT Y, uniqCombined(20)(X) FROM (SELECT number AS X, round(toFloat32(1/(1 + (3*X*X - 7*X + 11) % 37)), 3) AS Y FROM system.numbers LIMIT 15) GROUP BY Y ORDER BY Y; +SELECT Y, uniqCombined(20)(X) FROM (SELECT number AS X, round(toFloat32(1/(1 + (3*X*X - 7*X + 11) % 37)), 3) AS Y FROM system.numbers LIMIT 3000) GROUP BY Y ORDER BY Y; +SELECT Y, uniqCombined(20)(X) FROM (SELECT number AS X, round(toFloat32(1/(1 + (3*X*X - 7*X + 11) % 37)), 3) AS Y FROM system.numbers LIMIT 1000000) GROUP BY Y ORDER BY Y; + +SELECT 'uniqCombined(Z)(IPv4NumToString)'; + +SELECT Y, uniqCombined(Z) FROM (SELECT number AS X, IPv4NumToString(toUInt32(X)) AS Z, (3*X*X - 7*X + 11) % 37 AS Y FROM system.numbers LIMIT 15) GROUP BY Y ORDER BY Y; +SELECT Y, uniqCombined(Z) FROM (SELECT number AS X, IPv4NumToString(toUInt32(X)) AS Z, (3*X*X - 7*X + 11) % 37 AS Y FROM system.numbers LIMIT 3000) GROUP BY Y ORDER BY Y; +SELECT Y, uniqCombined(Z) FROM (SELECT number AS X, IPv4NumToString(toUInt32(X)) AS Z, (3*X*X - 7*X + 11) % 37 AS Y FROM system.numbers LIMIT 1000000) GROUP BY Y ORDER BY Y; + +SELECT 'uniqCombined(12)(IPv4NumToString)'; + +SELECT Y, uniqCombined(12)(Z) FROM (SELECT number AS X, IPv4NumToString(toUInt32(X)) AS Z, (3*X*X - 7*X + 11) % 37 AS Y FROM system.numbers LIMIT 15) GROUP BY Y ORDER BY Y; +SELECT Y, uniqCombined(12)(Z) FROM (SELECT number AS X, IPv4NumToString(toUInt32(X)) AS Z, (3*X*X - 7*X + 11) % 37 AS Y FROM system.numbers LIMIT 3000) GROUP BY Y ORDER BY Y; +SELECT Y, uniqCombined(12)(Z) FROM (SELECT number AS X, IPv4NumToString(toUInt32(X)) AS Z, (3*X*X - 7*X + 11) % 37 AS Y FROM system.numbers LIMIT 1000000) GROUP BY Y ORDER BY Y; + +SELECT 'uniqCombined(17)(IPv4NumToString)'; + +SELECT Y, uniqCombined(17)(Z) FROM (SELECT number AS X, IPv4NumToString(toUInt32(X)) AS Z, (3*X*X - 7*X + 11) % 37 AS Y FROM system.numbers LIMIT 15) GROUP BY Y ORDER BY Y; +SELECT Y, uniqCombined(17)(Z) FROM (SELECT number AS X, IPv4NumToString(toUInt32(X)) AS Z, (3*X*X - 7*X + 11) % 37 AS Y FROM system.numbers LIMIT 3000) GROUP BY Y ORDER BY Y; +SELECT Y, uniqCombined(17)(Z) FROM (SELECT number AS X, IPv4NumToString(toUInt32(X)) AS Z, (3*X*X - 7*X + 11) % 37 AS Y FROM system.numbers LIMIT 1000000) GROUP BY Y ORDER BY Y; + +SELECT 'uniqCombined(20)(IPv4NumToString)'; + +SELECT Y, uniqCombined(20)(Z) FROM (SELECT number AS X, IPv4NumToString(toUInt32(X)) AS Z, (3*X*X - 7*X + 11) % 37 AS Y FROM system.numbers LIMIT 15) GROUP BY Y ORDER BY Y; +SELECT Y, uniqCombined(20)(Z) FROM (SELECT number AS X, IPv4NumToString(toUInt32(X)) AS Z, (3*X*X - 7*X + 11) % 37 AS Y FROM system.numbers LIMIT 3000) GROUP BY Y ORDER BY Y; +SELECT Y, uniqCombined(20)(Z) FROM (SELECT number AS X, IPv4NumToString(toUInt32(X)) AS Z, (3*X*X - 7*X + 11) % 37 AS Y FROM system.numbers LIMIT 1000000) GROUP BY Y ORDER BY Y; + +SELECT 'uniqCombined remote()'; + +SELECT uniqCombined(dummy) FROM remote('127.0.0.{2,3}', system.one); +SELECT uniqCombined(12)(dummy) FROM remote('127.0.0.{2,3}', system.one); +SELECT uniqCombined(17)(dummy) FROM remote('127.0.0.{2,3}', system.one); +SELECT uniqCombined(20)(dummy) FROM remote('127.0.0.{2,3}', system.one); diff --git a/parser/testdata/00213_multiple_global_in/ast.json b/parser/testdata/00213_multiple_global_in/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00213_multiple_global_in/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00213_multiple_global_in/metadata.json b/parser/testdata/00213_multiple_global_in/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00213_multiple_global_in/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00213_multiple_global_in/query.sql b/parser/testdata/00213_multiple_global_in/query.sql new file mode 100644 index 000000000..0992f666a --- /dev/null +++ b/parser/testdata/00213_multiple_global_in/query.sql @@ -0,0 +1,3 @@ +-- Tags: global + +SELECT 1 GLOBAL IN (SELECT 1), 2 GLOBAL IN (SELECT 2) FROM remote('localhost', system.one); diff --git a/parser/testdata/00214_primary_key_order/ast.json b/parser/testdata/00214_primary_key_order/ast.json new file mode 100644 index 000000000..52205bff4 --- /dev/null +++ b/parser/testdata/00214_primary_key_order/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery primary_key (children 1)" + }, + { + "explain": " Identifier primary_key" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001383695, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/00214_primary_key_order/metadata.json b/parser/testdata/00214_primary_key_order/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00214_primary_key_order/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00214_primary_key_order/query.sql b/parser/testdata/00214_primary_key_order/query.sql new file mode 100644 index 000000000..e8a3be5f8 --- /dev/null +++ b/parser/testdata/00214_primary_key_order/query.sql @@ -0,0 +1,15 @@ +DROP TABLE IF EXISTS primary_key; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE primary_key (d Date DEFAULT today(), x Int8) ENGINE = MergeTree(d, -x, 1); + +INSERT INTO primary_key (x) VALUES (1), (2), (3); + +SELECT x FROM primary_key ORDER BY x; + +SELECT 'a', -x FROM primary_key WHERE -x < -3; +SELECT 'b', -x FROM primary_key WHERE -x < -2; +SELECT 'c', -x FROM primary_key WHERE -x < -1; +SELECT 'd', -x FROM primary_key WHERE -x < toInt8(0); + +DROP TABLE primary_key; diff --git a/parser/testdata/00215_primary_key_order_zookeeper_long/ast.json b/parser/testdata/00215_primary_key_order_zookeeper_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00215_primary_key_order_zookeeper_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00215_primary_key_order_zookeeper_long/metadata.json b/parser/testdata/00215_primary_key_order_zookeeper_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00215_primary_key_order_zookeeper_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00215_primary_key_order_zookeeper_long/query.sql b/parser/testdata/00215_primary_key_order_zookeeper_long/query.sql new file mode 100644 index 000000000..218f8de91 --- /dev/null +++ b/parser/testdata/00215_primary_key_order_zookeeper_long/query.sql @@ -0,0 +1,19 @@ +-- Tags: long, zookeeper, no-replicated-database, no-shared-merge-tree +-- Tag no-replicated-database: Old syntax is not allowed +-- no-shared-merge-tree: implemented replacement + +DROP TABLE IF EXISTS primary_key; +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE primary_key (d Date DEFAULT today(), x Int8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_00215/primary_key', 'r1', d, -x, 1); + +INSERT INTO primary_key (x) VALUES (1), (2), (3); +INSERT INTO primary_key (x) VALUES (1), (3), (2); +INSERT INTO primary_key (x) VALUES (2), (1), (3); +INSERT INTO primary_key (x) VALUES (2), (3), (1); +INSERT INTO primary_key (x) VALUES (3), (1), (2); +INSERT INTO primary_key (x) VALUES (3), (2), (1); + +SELECT x FROM primary_key ORDER BY x; +SELECT x FROM primary_key WHERE -x < -1 ORDER BY x; + +DROP TABLE primary_key; diff --git a/parser/testdata/00216_bit_test_function_family/ast.json b/parser/testdata/00216_bit_test_function_family/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00216_bit_test_function_family/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00216_bit_test_function_family/metadata.json b/parser/testdata/00216_bit_test_function_family/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00216_bit_test_function_family/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00216_bit_test_function_family/query.sql b/parser/testdata/00216_bit_test_function_family/query.sql new file mode 100644 index 000000000..a8bbc3790 --- /dev/null +++ b/parser/testdata/00216_bit_test_function_family/query.sql @@ -0,0 +1,115 @@ +select + bitTest(0, 0) = 0, + bitTest(1, 0) = 1, + bitTest(1, 1) = 0, + bitTest(0xff, 7) = 1; + +select + bitTestAll(0, 0) = 0, + bitTestAll(1, 0) = 1, + bitTestAll(1, 1) = 0, + bitTestAll(0xff, 0) = 1, + bitTestAll(0xff, 1) = 1, + bitTestAll(0xff, 2) = 1, + bitTestAll(0xff, 3) = 1, + bitTestAll(0xff, 4) = 1, + bitTestAll(0xff, 5) = 1, + bitTestAll(0xff, 6) = 1, + bitTestAll(0xff, 7) = 1, + bitTestAll(0xff, 0, 1) = 1, + bitTestAll(0xff, 2, 3) = 1, + bitTestAll(0xff, 4, 5) = 1, + bitTestAll(0xff, 6, 7) = 1, + bitTestAll(0xff, 0, 1, 2, 3) = 1, + bitTestAll(0xff, 4, 5, 6, 7) = 1, + bitTestAll(0xff, 0, 1, 2, 3, 4, 5, 6, 7) = 1, + bitTestAll(0x81, 0) = 1, + bitTestAll(0x81, 1) = 0, + bitTestAll(0x81, 2) = 0, + bitTestAll(0x81, 3) = 0, + bitTestAll(0x81, 4) = 0, + bitTestAll(0x81, 5) = 0, + bitTestAll(0x81, 6) = 0, + bitTestAll(0x81, 7) = 1, + bitTestAll(0x81, 0, 1) = 0, + bitTestAll(0x81, 2, 3) = 0, + bitTestAll(0x81, 4, 5) = 0, + bitTestAll(0x81, 6, 7) = 0, + bitTestAll(0x81, 0, 1, 2, 3) = 0, + bitTestAll(0x81, 4, 5, 6, 7) = 0, + bitTestAll(0x81, 0, 1, 2, 3, 4, 5, 6, 7) = 0, + bitTestAll(0x81, 0, 7) = 1; + +select + bitTestAny(0, 0) = 0, + bitTestAny(1, 0) = 1, + bitTestAny(1, 1) = 0, + bitTestAny(0xff, 0) = 1, + bitTestAny(0xff, 1) = 1, + bitTestAny(0xff, 2) = 1, + bitTestAny(0xff, 3) = 1, + bitTestAny(0xff, 4) = 1, + bitTestAny(0xff, 5) = 1, + bitTestAny(0xff, 6) = 1, + bitTestAny(0xff, 7) = 1, + bitTestAny(0xff, 0, 1) = 1, + bitTestAny(0xff, 2, 3) = 1, + bitTestAny(0xff, 4, 5) = 1, + bitTestAny(0xff, 6, 7) = 1, + bitTestAny(0xff, 0, 1, 2, 3) = 1, + bitTestAny(0xff, 4, 5, 6, 7) = 1, + bitTestAny(0xff, 0, 1, 2, 3, 4, 5, 6, 7) = 1, + bitTestAny(0x81, 0) = 1, + bitTestAny(0x81, 1) = 0, + bitTestAny(0x81, 2) = 0, + bitTestAny(0x81, 3) = 0, + bitTestAny(0x81, 4) = 0, + bitTestAny(0x81, 5) = 0, + bitTestAny(0x81, 6) = 0, + bitTestAny(0x81, 7) = 1, + bitTestAny(0x81, 0, 1) = 1, + bitTestAny(0x81, 2, 3) = 0, + bitTestAny(0x81, 4, 5) = 0, + bitTestAny(0x81, 6, 7) = 1, + bitTestAny(0x81, 0, 1, 2, 3) = 1, + bitTestAny(0x81, 4, 5, 6, 7) = 1, + bitTestAny(0x81, 0, 1, 2, 3, 4, 5, 6, 7) = 1; + +select n = n_, + number as n, + bitOr(bitShiftLeft(bitOr(bitShiftLeft(bitOr(bitShiftLeft(bitOr(bitShiftLeft(bitOr(bitShiftLeft(bitOr(bitShiftLeft(bitOr(bitShiftLeft(b7, 1), b6), 1), b5), 1), b4), 1), b3), 1), b2), 1), b1), 1), b0) as n_, + bitTest(n, 7) as b7, + bitTest(n, 6) as b6, + bitTest(n, 5) as b5, + bitTest(n, 4) as b4, + bitTest(n, 3) as b3, + bitTest(n, 2) as b2, + bitTest(n, 1) as b1, + bitTest(n, 0) as b0 +from system.numbers limit 256; + +select n = n_, + number as n, + bitOr(bitShiftLeft(bitOr(bitShiftLeft(bitOr(bitShiftLeft(bitOr(bitShiftLeft(bitOr(bitShiftLeft(bitOr(bitShiftLeft(bitOr(bitShiftLeft(b7, 1), b6), 1), b5), 1), b4), 1), b3), 1), b2), 1), b1), 1), b0) as n_, + bitTestAll(n, 7) as b7, + bitTestAll(n, 6) as b6, + bitTestAll(n, 5) as b5, + bitTestAll(n, 4) as b4, + bitTestAll(n, 3) as b3, + bitTestAll(n, 2) as b2, + bitTestAll(n, 1) as b1, + bitTestAll(n, 0) as b0 +from system.numbers limit 256; + +select n = n_, + number as n, + bitOr(bitShiftLeft(bitOr(bitShiftLeft(bitOr(bitShiftLeft(bitOr(bitShiftLeft(bitOr(bitShiftLeft(bitOr(bitShiftLeft(bitOr(bitShiftLeft(b7, 1), b6), 1), b5), 1), b4), 1), b3), 1), b2), 1), b1), 1), b0) as n_, + bitTestAny(n, 7) as b7, + bitTestAny(n, 6) as b6, + bitTestAny(n, 5) as b5, + bitTestAny(n, 4) as b4, + bitTestAny(n, 3) as b3, + bitTestAny(n, 2) as b2, + bitTestAny(n, 1) as b1, + bitTestAny(n, 0) as b0 +from system.numbers limit 256; diff --git a/parser/testdata/00217_shard_global_subquery_columns_with_same_name/ast.json b/parser/testdata/00217_shard_global_subquery_columns_with_same_name/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00217_shard_global_subquery_columns_with_same_name/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00217_shard_global_subquery_columns_with_same_name/metadata.json b/parser/testdata/00217_shard_global_subquery_columns_with_same_name/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00217_shard_global_subquery_columns_with_same_name/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00217_shard_global_subquery_columns_with_same_name/query.sql b/parser/testdata/00217_shard_global_subquery_columns_with_same_name/query.sql new file mode 100644 index 000000000..0ebd60a7f --- /dev/null +++ b/parser/testdata/00217_shard_global_subquery_columns_with_same_name/query.sql @@ -0,0 +1,6 @@ +-- Tags: shard + +SET joined_subquery_requires_alias = 0; + +SELECT k, a FROM (SELECT 42 AS k FROM remote('127.0.0.2', system.one)) GLOBAL ALL FULL OUTER JOIN (SELECT 42 AS k, 1 AS a, a) USING k; +SELECT 1 FROM remote('127.0.0.2', system.one) WHERE (1, 1) GLOBAL IN (SELECT 1 AS a, a); diff --git a/parser/testdata/00218_like_regexp_newline/ast.json b/parser/testdata/00218_like_regexp_newline/ast.json new file mode 100644 index 000000000..19f7a38b8 --- /dev/null +++ b/parser/testdata/00218_like_regexp_newline/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function like (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'abcdef'" + }, + { + "explain": " Literal '%abc%def%'" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001330811, + "rows_read": 8, + "bytes_read": 290 + } +} diff --git a/parser/testdata/00218_like_regexp_newline/metadata.json b/parser/testdata/00218_like_regexp_newline/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00218_like_regexp_newline/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00218_like_regexp_newline/query.sql b/parser/testdata/00218_like_regexp_newline/query.sql new file mode 100644 index 000000000..824bbb66a --- /dev/null +++ b/parser/testdata/00218_like_regexp_newline/query.sql @@ -0,0 +1,11 @@ +SELECT 'abcdef' LIKE '%abc%def%'; +SELECT 'abctdef' LIKE '%abc%def%'; +SELECT 'abc\ndef' LIKE '%abc%def%'; +SELECT 'abc\ntdef' LIKE '%abc%def%'; +SELECT 'abct\ndef' LIKE '%abc%def%'; +SELECT 'abc\n\ndef' LIKE '%abc%def%'; +SELECT 'abc\n\ntdef' LIKE '%abc%def%'; +SELECT 'abc\nt\ndef' LIKE '%abc%def%'; +SELECT 'abct\n\ndef' LIKE '%abc%def%'; +SELECT 'ab\ndef' LIKE '%abc%def%'; +SELECT 'abc\nef' LIKE '%abc%def%'; diff --git a/parser/testdata/00219_full_right_join_column_order/ast.json b/parser/testdata/00219_full_right_join_column_order/ast.json new file mode 100644 index 000000000..ed42f7c76 --- /dev/null +++ b/parser/testdata/00219_full_right_join_column_order/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001313263, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00219_full_right_join_column_order/metadata.json b/parser/testdata/00219_full_right_join_column_order/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00219_full_right_join_column_order/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00219_full_right_join_column_order/query.sql b/parser/testdata/00219_full_right_join_column_order/query.sql new file mode 100644 index 000000000..78dfe2369 --- /dev/null +++ b/parser/testdata/00219_full_right_join_column_order/query.sql @@ -0,0 +1,8 @@ +SET any_join_distinct_right_table_keys = 1; + +SELECT a, b FROM (SELECT 1 AS a, 2000 AS b) js1 ANY RIGHT JOIN (SELECT 2 AS a, 3000 AS b) js2 USING a, b ORDER BY a, b; +SELECT a, b FROM (SELECT 1 AS a, 2000 AS b) js1 ANY RIGHT JOIN (SELECT 2 AS a, 3000 AS b) js2 USING b, a ORDER BY a, b; + +SELECT a, b FROM (SELECT 1 AS a, 2000 AS b) js1 ANY RIGHT JOIN (SELECT 2 AS a, 3000 AS b UNION ALL SELECT 1 AS a, 2000 AS b) js2 USING a, b ORDER BY a, b; +SELECT a, b FROM (SELECT 1 AS a, 2000 AS b) js1 ANY RIGHT JOIN (SELECT 2 AS a, 3000 AS b UNION ALL SELECT 1 AS a, 2000 AS b) js2 USING b, a ORDER BY a, b; + diff --git a/parser/testdata/00220_shard_with_totals_in_subquery_remote_and_limit/ast.json b/parser/testdata/00220_shard_with_totals_in_subquery_remote_and_limit/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00220_shard_with_totals_in_subquery_remote_and_limit/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00220_shard_with_totals_in_subquery_remote_and_limit/metadata.json b/parser/testdata/00220_shard_with_totals_in_subquery_remote_and_limit/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00220_shard_with_totals_in_subquery_remote_and_limit/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00220_shard_with_totals_in_subquery_remote_and_limit/query.sql b/parser/testdata/00220_shard_with_totals_in_subquery_remote_and_limit/query.sql new file mode 100644 index 000000000..a1e8d907a --- /dev/null +++ b/parser/testdata/00220_shard_with_totals_in_subquery_remote_and_limit/query.sql @@ -0,0 +1,4 @@ +-- Tags: shard + +SELECT x FROM (SELECT count() AS x FROM remote('127.0.0.2', system.one) WITH TOTALS) LIMIT 1; +SELECT x FROM (SELECT count() AS x FROM remote('127.0.0.2') WITH TOTALS) LIMIT 1; diff --git a/parser/testdata/00222_sequence_aggregate_function_family/ast.json b/parser/testdata/00222_sequence_aggregate_function_family/ast.json new file mode 100644 index 000000000..aeafc7d94 --- /dev/null +++ b/parser/testdata/00222_sequence_aggregate_function_family/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery sequence_test (children 1)" + }, + { + "explain": " Identifier sequence_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001320344, + "rows_read": 2, + "bytes_read": 78 + } +} diff --git a/parser/testdata/00222_sequence_aggregate_function_family/metadata.json b/parser/testdata/00222_sequence_aggregate_function_family/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00222_sequence_aggregate_function_family/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00222_sequence_aggregate_function_family/query.sql b/parser/testdata/00222_sequence_aggregate_function_family/query.sql new file mode 100644 index 000000000..dd6a3ef2a --- /dev/null +++ b/parser/testdata/00222_sequence_aggregate_function_family/query.sql @@ -0,0 +1,78 @@ +drop table if exists sequence_test; + +create table sequence_test (time UInt32, data UInt8) engine=MergeTree ORDER BY tuple(); +insert into sequence_test values (0,0),(1,0),(2,0),(3,0),(4,1),(5,2),(6,0),(7,0),(8,0),(9,0),(10,1),(11,1); + +select 1 = sequenceMatch('')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select 1 = sequenceMatch('.')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select 1 = sequenceMatch('.*')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select 1 = sequenceMatch('(?1)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select 1 = sequenceMatch('(?2)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select 1 = sequenceMatch('(?3)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select 0 = sequenceMatch('(?4)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select 1 = sequenceMatch('(?1)(?1)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select 1 = sequenceMatch('(?1)(?1)(?1)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select 1 = sequenceMatch('(?1)(?1)(?1)(?1)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select 0 = sequenceMatch('(?1)(?1)(?1)(?1)(?1)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select 1 = sequenceMatch('(?1)(?1)(?1)(?1)(?2)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select 1 = sequenceMatch('(?1)(?t>10)(?2)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select 0 = sequenceMatch('(?1)(?t>11)(?2)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select 1 = sequenceMatch('(?1)(?t<11)(?2)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select 1 = sequenceMatch('(?1)(?t<3)(?3)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select 1 = sequenceMatch('(?1)(?t<=2)(?3)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select 0 = sequenceMatch('(?1)(?t<2)(?3)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select 1 = sequenceMatch('(?2)(?t>=7)(?2)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select 0 = sequenceMatch('(?2)(?t>7)(?2)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select 1 = sequenceMatch('(?2)(?3)(?1)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select 0 = sequenceMatch('(?1)(?t==2)(?2)')(time, data = 1, data = 2) from sequence_test; +select 1 = sequenceMatch('(?1)(?t==1)(?2)')(time, data = 1, data = 2) from sequence_test; + +select count() = sequenceCount('')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select count() = sequenceCount('.')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select count() = sequenceCount('.*')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select 8 = sequenceCount('(?1)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select 3 = sequenceCount('(?2)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select 1 = sequenceCount('(?3)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select 0 = sequenceCount('(?4)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select 4 = sequenceCount('(?1)(?1)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select 2 = sequenceCount('(?1)(?1)(?1)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select 2 = sequenceCount('(?1)(?1)(?1)(?1)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select 0 = sequenceCount('(?1)(?1)(?1)(?1)(?1)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select 2 = sequenceCount('(?1)(?1)(?1)(?1)(?2)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select 1 = sequenceCount('(?1)(?t>10)(?2)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select 0 = sequenceCount('(?1)(?t>11)(?2)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select 2 = sequenceCount('(?1)(?t<11)(?2)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select 1 = sequenceCount('(?1)(?t<3)(?3)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select 1 = sequenceCount('(?1)(?t<=2)(?3)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select 0 = sequenceCount('(?1)(?t<2)(?3)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select 1 = sequenceCount('(?2)(?t>=7)(?2)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select 0 = sequenceCount('(?2)(?t>7)(?2)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select 1 = sequenceCount('(?2)(?3)(?1)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select 0 = sequenceCount('(?1)(?t==2)(?2)')(time, data = 1, data = 2) from sequence_test; +select 1 = sequenceCount('(?1)(?t==1)(?2)')(time, data = 1, data = 2) from sequence_test; + +select [] = sequenceMatchEvents('')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select [] = sequenceMatchEvents('.')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select [] = sequenceMatchEvents('.*')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select [0] = sequenceMatchEvents('(?1)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select [4] = sequenceMatchEvents('(?2)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select [5] = sequenceMatchEvents('(?3)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select [] = sequenceMatchEvents('(?4)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select [0,1] = sequenceMatchEvents('(?1)(?1)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select [0,1,2] = sequenceMatchEvents('(?1)(?1)(?1)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select [0,1,2,3] = sequenceMatchEvents('(?1)(?1)(?1)(?1)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select [0,1,2,3] = sequenceMatchEvents('(?1)(?1)(?1)(?1)(?1)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select [0,1,2,3,4] = sequenceMatchEvents('(?1)(?1)(?1)(?1)(?2)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select [0,11] = sequenceMatchEvents('(?1)(?t>10)(?2)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select [0] = sequenceMatchEvents('(?1)(?t>11)(?2)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select [0,4] = sequenceMatchEvents('(?1)(?t<11)(?2)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select [3,5] = sequenceMatchEvents('(?1)(?t<3)(?3)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select [3,5] = sequenceMatchEvents('(?1)(?t<=2)(?3)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select [0] = sequenceMatchEvents('(?1)(?t<2)(?3)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select [4,11] = sequenceMatchEvents('(?2)(?t>=7)(?2)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select [4] = sequenceMatchEvents('(?2)(?t>7)(?2)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select [4,5,6] = sequenceMatchEvents('(?2)(?3)(?1)')(time, data = 0, data = 1, data = 2, data = 3) from sequence_test; +select [4] = sequenceMatchEvents('(?1)(?t==2)(?2)')(time, data = 1, data = 2) from sequence_test; +select [4,5] = sequenceMatchEvents('(?1)(?t==1)(?2)')(time, data = 1, data = 2) from sequence_test; + +drop table sequence_test; diff --git a/parser/testdata/00223_shard_distributed_aggregation_memory_efficient/ast.json b/parser/testdata/00223_shard_distributed_aggregation_memory_efficient/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00223_shard_distributed_aggregation_memory_efficient/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00223_shard_distributed_aggregation_memory_efficient/metadata.json b/parser/testdata/00223_shard_distributed_aggregation_memory_efficient/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00223_shard_distributed_aggregation_memory_efficient/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00223_shard_distributed_aggregation_memory_efficient/query.sql b/parser/testdata/00223_shard_distributed_aggregation_memory_efficient/query.sql new file mode 100644 index 000000000..94a70ec1f --- /dev/null +++ b/parser/testdata/00223_shard_distributed_aggregation_memory_efficient/query.sql @@ -0,0 +1,133 @@ +-- Tags: distributed + +SET max_block_size = 1000; + +DROP TABLE IF EXISTS numbers_10_00223; +CREATE TABLE numbers_10_00223 ENGINE = Log AS SELECT * FROM system.numbers LIMIT 10000; + +SET distributed_aggregation_memory_efficient = 0; +SET group_by_two_level_threshold = 1000; + +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number); + +SET distributed_aggregation_memory_efficient = 0; +SET group_by_two_level_threshold = 7; + +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number); + +SET distributed_aggregation_memory_efficient = 1; +SET group_by_two_level_threshold = 1000; + +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number); + +SET distributed_aggregation_memory_efficient = 1; +SET group_by_two_level_threshold = 7; + +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number); + +SET distributed_aggregation_memory_efficient = 1; +SET group_by_two_level_threshold = 1; + +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10) FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY number); + +SET distributed_aggregation_memory_efficient = 1; +SET group_by_two_level_threshold = 1000; + +SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number); +SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number); +SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number); +SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number); +SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number); +SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number); +SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number); +SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number); +SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number); +SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number); + +SET distributed_aggregation_memory_efficient = 1; +SET group_by_two_level_threshold = 1; + +SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number); +SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number); +SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number); +SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number); +SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number); +SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number); +SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number); +SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number); +SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number); +SELECT sum(c = 1) IN (0, 10), sum(c = 2) IN (0, 5), sum(c) = 10 FROM (SELECT number, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) AND number >= (randConstant() % 2 ? 0 : 5) GROUP BY number); + +SET distributed_aggregation_memory_efficient = 1; +SET group_by_two_level_threshold = 7; + +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10), sum(c) IN (10, 15, 20) FROM (SELECT number AS k1, number + 1 AS k2, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10), sum(c) IN (10, 15, 20) FROM (SELECT number AS k1, number + 1 AS k2, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10), sum(c) IN (10, 15, 20) FROM (SELECT number AS k1, number + 1 AS k2, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10), sum(c) IN (10, 15, 20) FROM (SELECT number AS k1, number + 1 AS k2, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10), sum(c) IN (10, 15, 20) FROM (SELECT number AS k1, number + 1 AS k2, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10), sum(c) IN (10, 15, 20) FROM (SELECT number AS k1, number + 1 AS k2, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10), sum(c) IN (10, 15, 20) FROM (SELECT number AS k1, number + 1 AS k2, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10), sum(c) IN (10, 15, 20) FROM (SELECT number AS k1, number + 1 AS k2, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10), sum(c) IN (10, 15, 20) FROM (SELECT number AS k1, number + 1 AS k2, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2); +SELECT sum(c = 1) IN (0, 5), sum(c = 2) IN (5, 10), sum(c) IN (10, 15, 20) FROM (SELECT number AS k1, number + 1 AS k2, count() AS c FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 5 : 10) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2); + +SELECT sum(c = 20) IN (5, 10), sum(c = 10) IN (0, 5), sum(u != 10) = 0 FROM (SELECT intDiv(number, 10) AS k1, k1 + 1 AS k2, count() AS c, uniq(number) AS u FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 50 : 100) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2); +SELECT sum(c = 20) IN (5, 10), sum(c = 10) IN (0, 5), sum(u != 10) = 0 FROM (SELECT intDiv(number, 10) AS k1, k1 + 1 AS k2, count() AS c, uniq(number) AS u FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 50 : 100) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2); +SELECT sum(c = 20) IN (5, 10), sum(c = 10) IN (0, 5), sum(u != 10) = 0 FROM (SELECT intDiv(number, 10) AS k1, k1 + 1 AS k2, count() AS c, uniq(number) AS u FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 50 : 100) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2); +SELECT sum(c = 20) IN (5, 10), sum(c = 10) IN (0, 5), sum(u != 10) = 0 FROM (SELECT intDiv(number, 10) AS k1, k1 + 1 AS k2, count() AS c, uniq(number) AS u FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 50 : 100) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2); +SELECT sum(c = 20) IN (5, 10), sum(c = 10) IN (0, 5), sum(u != 10) = 0 FROM (SELECT intDiv(number, 10) AS k1, k1 + 1 AS k2, count() AS c, uniq(number) AS u FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 50 : 100) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2); +SELECT sum(c = 20) IN (5, 10), sum(c = 10) IN (0, 5), sum(u != 10) = 0 FROM (SELECT intDiv(number, 10) AS k1, k1 + 1 AS k2, count() AS c, uniq(number) AS u FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 50 : 100) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2); +SELECT sum(c = 20) IN (5, 10), sum(c = 10) IN (0, 5), sum(u != 10) = 0 FROM (SELECT intDiv(number, 10) AS k1, k1 + 1 AS k2, count() AS c, uniq(number) AS u FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 50 : 100) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2); +SELECT sum(c = 20) IN (5, 10), sum(c = 10) IN (0, 5), sum(u != 10) = 0 FROM (SELECT intDiv(number, 10) AS k1, k1 + 1 AS k2, count() AS c, uniq(number) AS u FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 50 : 100) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2); +SELECT sum(c = 20) IN (5, 10), sum(c = 10) IN (0, 5), sum(u != 10) = 0 FROM (SELECT intDiv(number, 10) AS k1, k1 + 1 AS k2, count() AS c, uniq(number) AS u FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 50 : 100) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2); +SELECT sum(c = 20) IN (5, 10), sum(c = 10) IN (0, 5), sum(u != 10) = 0 FROM (SELECT intDiv(number, 10) AS k1, k1 + 1 AS k2, count() AS c, uniq(number) AS u FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) WHERE number < (randConstant() % 2 ? 50 : 100) GROUP BY k1, k2 HAVING count() > 0 ORDER BY k1, k2); + +DROP TABLE numbers_10_00223; + +SELECT count() FROM remote('127.0.0.{2,3}', system.one); diff --git a/parser/testdata/00224_shard_distributed_aggregation_memory_efficient_and_overflows/ast.json b/parser/testdata/00224_shard_distributed_aggregation_memory_efficient_and_overflows/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00224_shard_distributed_aggregation_memory_efficient_and_overflows/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00224_shard_distributed_aggregation_memory_efficient_and_overflows/metadata.json b/parser/testdata/00224_shard_distributed_aggregation_memory_efficient_and_overflows/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00224_shard_distributed_aggregation_memory_efficient_and_overflows/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00224_shard_distributed_aggregation_memory_efficient_and_overflows/query.sql b/parser/testdata/00224_shard_distributed_aggregation_memory_efficient_and_overflows/query.sql new file mode 100644 index 000000000..f99f712b3 --- /dev/null +++ b/parser/testdata/00224_shard_distributed_aggregation_memory_efficient_and_overflows/query.sql @@ -0,0 +1,16 @@ +-- Tags: distributed + +DROP TABLE IF EXISTS numbers_100k_log; +CREATE TABLE numbers_100k_log ENGINE = Log AS SELECT * FROM system.numbers LIMIT 100000; + +SELECT count() = 200000 FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_100k_log) GROUP BY number WITH TOTALS ORDER BY number LIMIT 10; + +SET distributed_aggregation_memory_efficient = 1, + group_by_two_level_threshold = 1000, + group_by_overflow_mode = 'any', + max_rows_to_group_by = 1000, + totals_mode = 'after_having_auto'; + +SELECT count() = 200000 FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_100k_log) GROUP BY number WITH TOTALS ORDER BY number LIMIT 10; + +DROP TABLE numbers_100k_log; diff --git a/parser/testdata/00225_join_duplicate_columns/ast.json b/parser/testdata/00225_join_duplicate_columns/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00225_join_duplicate_columns/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00225_join_duplicate_columns/metadata.json b/parser/testdata/00225_join_duplicate_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00225_join_duplicate_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00225_join_duplicate_columns/query.sql b/parser/testdata/00225_join_duplicate_columns/query.sql new file mode 100644 index 000000000..712fae7a6 --- /dev/null +++ b/parser/testdata/00225_join_duplicate_columns/query.sql @@ -0,0 +1,18 @@ +-- The following queries use very weird block structure: +-- __table3.b UInt8 UInt8(size = 1), __table3.b UInt8 Const(size = 1, UInt8(size = 1)), __table3.c UInt8 Const(size = 1, UInt8(size = 1)) +-- That leads to a pretty legit error in ConcurrentHashJoin within a call to Block::cloneEmpty(): +-- Code: 352. DB::Exception: Block structure mismatch in (columns with identical name must have identical structure) stream: different columns: +-- __table3.b UInt8 UInt8(size = 0) +-- __table3.b UInt8 Const(size = 0, UInt8(size = 1)) +-- So let's disable parallel_hash. +SET join_algorithm = 'hash,grace_hash,partial_merge,full_sorting_merge'; + +select b from (select 1 as a, 42 as c) js1 any left join (select 2 as b, 2 as b, 41 as c) js2 using c; +select b from (select 1 as a, 42 as c) js1 any left join (select 2 as b, 2 as b, 42 as c) js2 using c; + +select c,a,a,b,b from + (select 1 as a, 1 as a, 42 as c group by c order by a,c) js1 + any left join + (select 2 as b, 2 as b, 41 as c group by c order by b,c) js2 + using c + order by b; diff --git a/parser/testdata/00226_zookeeper_deduplication_and_unexpected_parts_long/ast.json b/parser/testdata/00226_zookeeper_deduplication_and_unexpected_parts_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00226_zookeeper_deduplication_and_unexpected_parts_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00226_zookeeper_deduplication_and_unexpected_parts_long/metadata.json b/parser/testdata/00226_zookeeper_deduplication_and_unexpected_parts_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00226_zookeeper_deduplication_and_unexpected_parts_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00226_zookeeper_deduplication_and_unexpected_parts_long/query.sql b/parser/testdata/00226_zookeeper_deduplication_and_unexpected_parts_long/query.sql new file mode 100644 index 000000000..8968f83de --- /dev/null +++ b/parser/testdata/00226_zookeeper_deduplication_and_unexpected_parts_long/query.sql @@ -0,0 +1,33 @@ +-- Tags: long, zookeeper, no-replicated-database, no-shared-merge-tree +-- Tag no-replicated-database: Old syntax is not allowed +-- no-shared-merge-tree: implemented replacement + +DROP TABLE IF EXISTS deduplication; +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE deduplication (d Date DEFAULT '2015-01-01', x Int8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_00226/deduplication', 'r1', d, x, 1); + +INSERT INTO deduplication (x) VALUES (1); +INSERT INTO deduplication (x) VALUES (1); +INSERT INTO deduplication (x) VALUES (1); +INSERT INTO deduplication (x) VALUES (1); +INSERT INTO deduplication (x) VALUES (1); +INSERT INTO deduplication (x) VALUES (1); +INSERT INTO deduplication (x) VALUES (1); +INSERT INTO deduplication (x) VALUES (1); +INSERT INTO deduplication (x) VALUES (1); +INSERT INTO deduplication (x) VALUES (1); +INSERT INTO deduplication (x) VALUES (1); +INSERT INTO deduplication (x) VALUES (1); +INSERT INTO deduplication (x) VALUES (1); +INSERT INTO deduplication (x) VALUES (1); +INSERT INTO deduplication (x) VALUES (1); +INSERT INTO deduplication (x) VALUES (1); + +SELECT * FROM deduplication; + +DETACH TABLE deduplication; +ATTACH TABLE deduplication; + +SELECT * FROM deduplication; + +DROP TABLE deduplication; diff --git a/parser/testdata/00227_quantiles_timing_arbitrary_order/ast.json b/parser/testdata/00227_quantiles_timing_arbitrary_order/ast.json new file mode 100644 index 000000000..78a8f3576 --- /dev/null +++ b/parser/testdata/00227_quantiles_timing_arbitrary_order/ast.json @@ -0,0 +1,94 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function quantilesTiming (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Float64_0.5" + }, + { + "explain": " Literal Float64_0.9" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_100" + } + ], + + "rows": 24, + + "statistics": + { + "elapsed": 0.001732674, + "rows_read": 24, + "bytes_read": 1001 + } +} diff --git a/parser/testdata/00227_quantiles_timing_arbitrary_order/metadata.json b/parser/testdata/00227_quantiles_timing_arbitrary_order/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00227_quantiles_timing_arbitrary_order/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00227_quantiles_timing_arbitrary_order/query.sql b/parser/testdata/00227_quantiles_timing_arbitrary_order/query.sql new file mode 100644 index 000000000..6469217e5 --- /dev/null +++ b/parser/testdata/00227_quantiles_timing_arbitrary_order/query.sql @@ -0,0 +1,4 @@ +SELECT quantilesTiming(0.5, 0.9)(number) FROM (SELECT number FROM system.numbers LIMIT 100); +SELECT quantilesTiming(0.9, 0.5)(number) FROM (SELECT number FROM system.numbers LIMIT 100); +SELECT quantilesTiming(0.01, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.99)(number) FROM (SELECT number FROM system.numbers LIMIT 100); +SELECT quantilesTiming(0.99, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.01)(number) FROM (SELECT number FROM system.numbers LIMIT 100); diff --git a/parser/testdata/00228_shard_quantiles_deterministic_merge_overflow/ast.json b/parser/testdata/00228_shard_quantiles_deterministic_merge_overflow/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00228_shard_quantiles_deterministic_merge_overflow/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00228_shard_quantiles_deterministic_merge_overflow/metadata.json b/parser/testdata/00228_shard_quantiles_deterministic_merge_overflow/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00228_shard_quantiles_deterministic_merge_overflow/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00228_shard_quantiles_deterministic_merge_overflow/query.sql b/parser/testdata/00228_shard_quantiles_deterministic_merge_overflow/query.sql new file mode 100644 index 000000000..dc7365574 --- /dev/null +++ b/parser/testdata/00228_shard_quantiles_deterministic_merge_overflow/query.sql @@ -0,0 +1,5 @@ +-- Tags: shard + +select quantilesDeterministic(0.5, 0.9)(number, number) from (select number from system.numbers limit 101); +-- test merge does not cause overflow +select ignore(quantilesDeterministic(0.5, 0.9)(number, number)) from (select number from remote('127.0.0.{2,3}', system, numbers) limit 1000000); diff --git a/parser/testdata/00229_prewhere_column_missing/ast.json b/parser/testdata/00229_prewhere_column_missing/ast.json new file mode 100644 index 000000000..a73dac3c9 --- /dev/null +++ b/parser/testdata/00229_prewhere_column_missing/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery prewhere_column_missing (children 1)" + }, + { + "explain": " Identifier prewhere_column_missing" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001278963, + "rows_read": 2, + "bytes_read": 98 + } +} diff --git a/parser/testdata/00229_prewhere_column_missing/metadata.json b/parser/testdata/00229_prewhere_column_missing/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00229_prewhere_column_missing/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00229_prewhere_column_missing/query.sql b/parser/testdata/00229_prewhere_column_missing/query.sql new file mode 100644 index 000000000..1fb74b04a --- /dev/null +++ b/parser/testdata/00229_prewhere_column_missing/query.sql @@ -0,0 +1,30 @@ +drop table if exists prewhere_column_missing; + +set allow_deprecated_syntax_for_merge_tree=1; +create table prewhere_column_missing (d Date default '2015-01-01', x UInt64) engine=MergeTree(d, x, 1); + +insert into prewhere_column_missing (x) values (0); +select * from prewhere_column_missing; + +alter table prewhere_column_missing add column arr Array(UInt64); +select * from prewhere_column_missing; + +select *, arraySum(arr) as s from prewhere_column_missing; +select *, arraySum(arr) as s from prewhere_column_missing where s = 0; +select *, arraySum(arr) as s from prewhere_column_missing prewhere s = 0; + +select *, length(arr) as l from prewhere_column_missing; +select *, length(arr) as l from prewhere_column_missing where l = 0; +select *, length(arr) as l from prewhere_column_missing prewhere l = 0; + +alter table prewhere_column_missing add column hash_x UInt64 default intHash64(x); + +select * from prewhere_column_missing; +select * from prewhere_column_missing where hash_x = intHash64(x); +select * from prewhere_column_missing prewhere hash_x = intHash64(x); +select * from prewhere_column_missing where hash_x = intHash64(x) and length(arr) = 0; +select * from prewhere_column_missing prewhere hash_x = intHash64(x) and length(arr) = 0; +select * from prewhere_column_missing where hash_x = intHash64(x) and length(arr) = 0 and arraySum(arr) = 0; +select * from prewhere_column_missing prewhere hash_x = intHash64(x) and length(arr) = 0 and arraySum(arr) = 0; + +drop table prewhere_column_missing; diff --git a/parser/testdata/00230_array_functions_has_count_equal_index_of_non_const_second_arg/ast.json b/parser/testdata/00230_array_functions_has_count_equal_index_of_non_const_second_arg/ast.json new file mode 100644 index 000000000..9090585e3 --- /dev/null +++ b/parser/testdata/00230_array_functions_has_count_equal_index_of_non_const_second_arg/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function has (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_0 (alias x)" + }, + { + "explain": " Identifier x" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001408373, + "rows_read": 10, + "bytes_read": 377 + } +} diff --git a/parser/testdata/00230_array_functions_has_count_equal_index_of_non_const_second_arg/metadata.json b/parser/testdata/00230_array_functions_has_count_equal_index_of_non_const_second_arg/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00230_array_functions_has_count_equal_index_of_non_const_second_arg/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00230_array_functions_has_count_equal_index_of_non_const_second_arg/query.sql b/parser/testdata/00230_array_functions_has_count_equal_index_of_non_const_second_arg/query.sql new file mode 100644 index 000000000..38db4ca96 --- /dev/null +++ b/parser/testdata/00230_array_functions_has_count_equal_index_of_non_const_second_arg/query.sql @@ -0,0 +1,39 @@ +select has([0 as x], x); +select has([0 as x], materialize(x)); +select has(materialize([0 as x]), x); +select has(materialize([0 as x]), materialize(x)); + +select has([toString(0) as x], x); +select has([toString(0) as x], materialize(x)); +select has(materialize([toString(0) as x]), x); +select has(materialize([toString(0) as x]), materialize(x)); + +select has([toUInt64(0)], number) from system.numbers limit 10; +select has([toUInt64(0)], toUInt64(number % 3)) from system.numbers limit 10; +select has(materialize([toUInt64(0)]), number) from system.numbers limit 10; +select has(materialize([toUInt64(0)]), toUInt64(number % 3)) from system.numbers limit 10; + +select has([toString(0)], toString(number)) from system.numbers limit 10; +select has([toString(0)], toString(number % 3)) from system.numbers limit 10; +select has(materialize([toString(0)]), toString(number)) from system.numbers limit 10; +select has(materialize([toString(0)]), toString(number % 3)) from system.numbers limit 10; + +select 3 = countEqual([0 as x, 1, x, x], x); +select 3 = countEqual([0 as x, 1, x, x], materialize(x)); +select 3 = countEqual(materialize([0 as x, 1, x, x]), x); +select 3 = countEqual(materialize([0 as x, 1, x, x]), materialize(x)); + +select 3 = countEqual([0 as x, 1, x, x], x) from system.numbers limit 10; +select 3 = countEqual([0 as x, 1, x, x], materialize(x)) from system.numbers limit 10; +select 3 = countEqual(materialize([0 as x, 1, x, x]), x) from system.numbers limit 10; +select 3 = countEqual(materialize([0 as x, 1, x, x]), materialize(x)) from system.numbers limit 10; + +select 4 = indexOf([0, 1, 2, 3 as x], x); +select 4 = indexOf([0, 1, 2, 3 as x], materialize(x)); +select 4 = indexOf(materialize([0, 1, 2, 3 as x]), x); +select 4 = indexOf(materialize([0, 1, 2, 3 as x]), materialize(x)); + +select 4 = indexOf([0, 1, 2, 3 as x], x) from system.numbers limit 10; +select 4 = indexOf([0, 1, 2, 3 as x], materialize(x)) from system.numbers limit 10; +select 4 = indexOf(materialize([0, 1, 2, 3 as x]), x) from system.numbers limit 10; +select 4 = indexOf(materialize([0, 1, 2, 3 as x]), materialize(x)) from system.numbers limit 10; diff --git a/parser/testdata/00231_format_vertical_raw/ast.json b/parser/testdata/00231_format_vertical_raw/ast.json new file mode 100644 index 000000000..c29009e49 --- /dev/null +++ b/parser/testdata/00231_format_vertical_raw/ast.json @@ -0,0 +1,40 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'a\\tb\\nc\\td' (alias x)" + }, + { + "explain": " Identifier Vertical" + } + ], + + "rows": 6, + + "statistics": + { + "elapsed": 0.001220327, + "rows_read": 6, + "bytes_read": 219 + } +} diff --git a/parser/testdata/00231_format_vertical_raw/metadata.json b/parser/testdata/00231_format_vertical_raw/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00231_format_vertical_raw/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00231_format_vertical_raw/query.sql b/parser/testdata/00231_format_vertical_raw/query.sql new file mode 100644 index 000000000..48f8c4477 --- /dev/null +++ b/parser/testdata/00231_format_vertical_raw/query.sql @@ -0,0 +1 @@ +SELECT 'a\tb\nc\td' AS x FORMAT Vertical; diff --git a/parser/testdata/00232_format_readable_decimal_size/ast.json b/parser/testdata/00232_format_readable_decimal_size/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00232_format_readable_decimal_size/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00232_format_readable_decimal_size/metadata.json b/parser/testdata/00232_format_readable_decimal_size/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00232_format_readable_decimal_size/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00232_format_readable_decimal_size/query.sql b/parser/testdata/00232_format_readable_decimal_size/query.sql new file mode 100644 index 000000000..f8e1409ae --- /dev/null +++ b/parser/testdata/00232_format_readable_decimal_size/query.sql @@ -0,0 +1,4 @@ +WITH round(exp(number), 6) AS x, x > 0xFFFFFFFFFFFFFFFF ? 0xFFFFFFFFFFFFFFFF : toUInt64(x) AS y, x > 0x7FFFFFFF ? 0x7FFFFFFF : toInt32(x) AS z +SELECT formatReadableDecimalSize(x), formatReadableDecimalSize(y), formatReadableDecimalSize(z) +FROM system.numbers +LIMIT 70; diff --git a/parser/testdata/00232_format_readable_size/ast.json b/parser/testdata/00232_format_readable_size/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00232_format_readable_size/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00232_format_readable_size/metadata.json b/parser/testdata/00232_format_readable_size/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00232_format_readable_size/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00232_format_readable_size/query.sql b/parser/testdata/00232_format_readable_size/query.sql new file mode 100644 index 000000000..0fa6f1a8b --- /dev/null +++ b/parser/testdata/00232_format_readable_size/query.sql @@ -0,0 +1,4 @@ +WITH round(exp(number), 6) AS x, x > 0xFFFFFFFFFFFFFFFF ? 0xFFFFFFFFFFFFFFFF : toUInt64(x) AS y, x > 0x7FFFFFFF ? 0x7FFFFFFF : toInt32(x) AS z +SELECT FORMAT_BYTES(x), format_bytes(y), formatReadableSize(z) +FROM system.numbers +LIMIT 70; diff --git a/parser/testdata/00233_position_function_family/ast.json b/parser/testdata/00233_position_function_family/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00233_position_function_family/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00233_position_function_family/metadata.json b/parser/testdata/00233_position_function_family/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00233_position_function_family/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00233_position_function_family/query.sql b/parser/testdata/00233_position_function_family/query.sql new file mode 100644 index 000000000..78308efc5 --- /dev/null +++ b/parser/testdata/00233_position_function_family/query.sql @@ -0,0 +1,490 @@ +-- Tags: no-fasttest +-- no-fasttest: upper/lowerUTF8 use ICU + +SET send_logs_level = 'fatal'; + +select 1 = position('', ''); +select 1 = position('abc', ''); +select 0 = position('', 'abc'); +select 1 = position('abc', 'abc'); +select 2 = position('abc', 'bc'); +select 3 = position('abc', 'c'); + +select 1 = position('', '', 0); +select 1 = position('', '', 1); +select 0 = position('', '', 2); +select 1 = position('a', '', 1); +select 2 = position('a', '', 2); +select 0 = position('a', '', 3); + +select [1, 1, 2, 3, 4, 5, 0, 0, 0, 0] = groupArray(position('aaaa', '', number)) from numbers(10); +select [1, 1, 2, 3, 4, 5, 0, 0, 0, 0] = groupArray(position(materialize('aaaa'), '', number)) from numbers(10); +select [1, 1, 2, 3, 4, 5, 0, 0, 0, 0] = groupArray(position('aaaa', materialize(''), number)) from numbers(10); +select [1, 1, 2, 3, 4, 5, 0, 0, 0, 0] = groupArray(position(materialize('aaaa'), materialize(''), number)) from numbers(10); + +select [1, 1, 2, 3, 4, 0, 0, 0, 0, 0] = groupArray(position('aaaa', 'a', number)) from numbers(10); +select [1, 1, 2, 3, 4, 0, 0, 0, 0, 0] = groupArray(position(materialize('aaaa'), 'a', number)) from numbers(10); +select [1, 1, 2, 3, 4, 0, 0, 0, 0, 0] = groupArray(position('aaaa', materialize('a'), number)) from numbers(10); +select [1, 1, 2, 3, 4, 0, 0, 0, 0, 0] = groupArray(position(materialize('aaaa'), materialize('a'), number)) from numbers(10); + +select 1 = position(materialize(''), ''); +select 1 = position(materialize('abc'), ''); +select 0 = position(materialize(''), 'abc'); +select 1 = position(materialize('abc'), 'abc'); +select 2 = position(materialize('abc'), 'bc'); +select 3 = position(materialize('abc'), 'c'); + +select 1 = position(materialize(''), '') from system.numbers limit 10; +select 1 = position(materialize('abc'), '') from system.numbers limit 10; +select 0 = position(materialize(''), 'abc') from system.numbers limit 10; +select 1 = position(materialize('abc'), 'abc') from system.numbers limit 10; +select 2 = position(materialize('abc'), 'bc') from system.numbers limit 10; +select 3 = position(materialize('abc'), 'c') from system.numbers limit 10; + +select 1 = position('', ''); +select 1 = position('абв', ''); +select 0 = position('', 'абв'); +select 1 = position('абв', 'абв'); +select 3 = position('абв', 'бв'); +select 5 = position('абв', 'в'); + +select 2 = position('abcabc', 'b', 0); +select 2 = position('abcabc', 'b', 1); +select 2 = position('abcabc', 'b', 2); +select 5 = position('abcabc', 'b', 3); +select 5 = position('abcabc', 'b', 4); +select 5 = position('abcabc', 'b', 5); +select 0 = position('abcabc', 'b', 6); +select 2 = position('abcabc', 'bca', 0); +select 0 = position('abcabc', 'bca', 3); + +select 1 = position(materialize(''), ''); +select 1 = position(materialize('абв'), ''); +select 0 = position(materialize(''), 'абв'); +select 1 = position(materialize('абв'), 'абв'); +select 3 = position(materialize('абв'), 'бв'); +select 5 = position(materialize('абв'), 'в'); + +select 1 = position(materialize(''), '') from system.numbers limit 10; +select 1 = position(materialize('абв'), '') from system.numbers limit 10; +select 0 = position(materialize(''), 'абв') from system.numbers limit 10; +select 1 = position(materialize('абв'), 'абв') from system.numbers limit 10; +select 3 = position(materialize('абв'), 'бв') from system.numbers limit 10; +select 5 = position(materialize('абв'), 'в') from system.numbers limit 10; + +select 1 = positionUTF8('', ''); +select 1 = positionUTF8('абв', ''); +select 0 = positionUTF8('', 'абв'); +select 1 = positionUTF8('абв', 'абв'); +select 2 = positionUTF8('абв', 'бв'); +select 3 = positionUTF8('абв', 'в'); + +select 3 = position('абвабв', 'б', 2); +select 3 = position('абвабв', 'б', 3); +select 3 = position('абвабв', 'бва', 2); +select 9 = position('абвабв', 'б', 4); +select 0 = position('абвабв', 'бва', 4); +select 5 = position('абвабв', 'в', 0); +select 11 = position('абвабв', 'в', 6); + +select 1 = positionUTF8(materialize(''), ''); +select 1 = positionUTF8(materialize('абв'), ''); +select 0 = positionUTF8(materialize(''), 'абв'); +select 1 = positionUTF8(materialize('абв'), 'абв'); +select 2 = positionUTF8(materialize('абв'), 'бв'); +select 3 = positionUTF8(materialize('абв'), 'в'); + +select 1 = positionUTF8(materialize(''), '') from system.numbers limit 10; +select 1 = positionUTF8(materialize('абв'), '') from system.numbers limit 10; +select 0 = positionUTF8(materialize(''), 'абв') from system.numbers limit 10; +select 1 = positionUTF8(materialize('абв'), 'абв') from system.numbers limit 10; +select 2 = positionUTF8(materialize('абв'), 'бв') from system.numbers limit 10; +select 3 = positionUTF8(materialize('абв'), 'в') from system.numbers limit 10; + +select 2 = positionUTF8('абвабв', 'б', 0); +select 2 = positionUTF8('абвабв', 'б', 1); +select 2 = positionUTF8('абвабв', 'б', 2); +select 5 = positionUTF8('абвабв', 'б', 3); +select 5 = positionUTF8('абвабв', 'б', 4); +select 5 = positionUTF8('абвабв', 'б', 5); +select 0 = positionUTF8('абвабв', 'б', 6); +select 2 = positionUTF8('абвабв', 'бва', 0); +select 0 = positionUTF8('абвабв', 'бва', 3); + +select 2 = positionUTF8(materialize('абвабв'), 'б', 0) from system.numbers limit 10; +select 2 = positionUTF8(materialize('абвабв'), 'б', 1) from system.numbers limit 10; +select 2 = positionUTF8(materialize('абвабв'), 'б', 2) from system.numbers limit 10; +select 5 = positionUTF8(materialize('абвабв'), 'б', 3) from system.numbers limit 10; +select 5 = positionUTF8(materialize('абвабв'), 'б', 4) from system.numbers limit 10; +select 5 = positionUTF8(materialize('абвабв'), 'б', 5) from system.numbers limit 10; +select 0 = positionUTF8(materialize('абвабв'), 'б', 6) from system.numbers limit 10; +select 2 = positionUTF8(materialize('абвабв'), 'бва', 0) from system.numbers limit 10; +select 0 = positionUTF8(materialize('абвабв'), 'бва', 3) from system.numbers limit 10; + +select 2 = positionUTF8('абвабв', materialize('б'), 0) from system.numbers limit 10; +select 2 = positionUTF8('абвабв', materialize('б'), 1) from system.numbers limit 10; +select 2 = positionUTF8('абвабв', materialize('б'), 2) from system.numbers limit 10; +select 5 = positionUTF8('абвабв', materialize('б'), 3) from system.numbers limit 10; +select 5 = positionUTF8('абвабв', materialize('б'), 4) from system.numbers limit 10; +select 5 = positionUTF8('абвабв', materialize('б'), 5) from system.numbers limit 10; +select 0 = positionUTF8('абвабв', materialize('б'), 6) from system.numbers limit 10; +select 2 = positionUTF8('абвабв', materialize('бва'), 0) from system.numbers limit 10; +select 0 = positionUTF8('абвабв', materialize('бва'), 3) from system.numbers limit 10; + +select 2 = positionUTF8(materialize('абвабв'), materialize('б'), 0) from system.numbers limit 10; +select 2 = positionUTF8(materialize('абвабв'), materialize('б'), 1) from system.numbers limit 10; +select 2 = positionUTF8(materialize('абвабв'), materialize('б'), 2) from system.numbers limit 10; +select 5 = positionUTF8(materialize('абвабв'), materialize('б'), 3) from system.numbers limit 10; +select 5 = positionUTF8(materialize('абвабв'), materialize('б'), 4) from system.numbers limit 10; +select 5 = positionUTF8(materialize('абвабв'), materialize('б'), 5) from system.numbers limit 10; +select 0 = positionUTF8(materialize('абвабв'), materialize('б'), 6) from system.numbers limit 10; +select 2 = positionUTF8(materialize('абвабв'), materialize('бва'), 0) from system.numbers limit 10; +select 0 = positionUTF8(materialize('абвабв'), materialize('бва'), 3) from system.numbers limit 10; + +select [2, 2, 2, 5, 5, 5, 0, 0, 0, 0] = groupArray(positionUTF8(materialize('абвабв'), materialize('б'), number)) from numbers(10); +select [2, 2, 2, 5, 5, 5, 0, 0, 0, 0] = groupArray(positionUTF8('абвабв', materialize('б'), number)) from numbers(10); +select [2, 2, 2, 5, 5, 5, 0, 0, 0, 0] = groupArray(positionUTF8('абвабв', 'б', number)) from numbers(10); +select [2, 2, 2, 5, 5, 5, 0, 0, 0, 0] = groupArray(positionUTF8(materialize('абвабв'), 'б', number)) from numbers(10); + +select 1 = positionCaseInsensitive('', ''); +select 1 = positionCaseInsensitive('abc', ''); +select 0 = positionCaseInsensitive('', 'aBc'); +select 1 = positionCaseInsensitive('abc', 'aBc'); +select 2 = positionCaseInsensitive('abc', 'Bc'); +select 3 = positionCaseInsensitive('abc', 'C'); + +select 1 = positionCaseInsensitive(materialize(''), ''); +select 1 = positionCaseInsensitive(materialize('abc'), ''); +select 0 = positionCaseInsensitive(materialize(''), 'aBc'); +select 1 = positionCaseInsensitive(materialize('abc'), 'aBc'); +select 2 = positionCaseInsensitive(materialize('abc'), 'Bc'); +select 3 = positionCaseInsensitive(materialize('abc'), 'C'); + +select 1 = positionCaseInsensitive(materialize(''), '') from system.numbers limit 10; +select 1 = positionCaseInsensitive(materialize('abc'), '') from system.numbers limit 10; +select 0 = positionCaseInsensitive(materialize(''), 'aBc') from system.numbers limit 10; +select 1 = positionCaseInsensitive(materialize('abc'), 'aBc') from system.numbers limit 10; +select 2 = positionCaseInsensitive(materialize('abc'), 'Bc') from system.numbers limit 10; +select 3 = positionCaseInsensitive(materialize('abc'), 'C') from system.numbers limit 10; + +select 6 = positionCaseInsensitive(materialize('abcabc'), 'C', 4); +select 6 = positionCaseInsensitive(materialize('abcabc'), 'C', 4) from system.numbers limit 10; +select 6 = positionCaseInsensitive(materialize('abcabc'), 'C', materialize(4)) from system.numbers limit 10; + +select 1 = positionCaseInsensitive('', ''); +select 1 = positionCaseInsensitive('абв', ''); +select 0 = positionCaseInsensitive('', 'аБв'); +select 0 = positionCaseInsensitive('абв', 'аБв'); +select 0 = positionCaseInsensitive('абв', 'Бв'); +select 0 = positionCaseInsensitive('абв', 'В'); + +select 1 = positionCaseInsensitive(materialize(''), ''); +select 1 = positionCaseInsensitive(materialize('абв'), ''); +select 0 = positionCaseInsensitive(materialize(''), 'аБв'); +select 0 = positionCaseInsensitive(materialize('абв'), 'аБв'); +select 0 = positionCaseInsensitive(materialize('абв'), 'Бв'); +select 0 = positionCaseInsensitive(materialize('абв'), 'В'); + +select 1 = positionCaseInsensitive(materialize(''), '') from system.numbers limit 10; +select 1 = positionCaseInsensitive(materialize('абв'), '') from system.numbers limit 10; +select 0 = positionCaseInsensitive(materialize(''), 'аБв') from system.numbers limit 10; +select 0 = positionCaseInsensitive(materialize('абв'), 'аБв') from system.numbers limit 10; +select 0 = positionCaseInsensitive(materialize('абв'), 'Бв') from system.numbers limit 10; +select 0 = positionCaseInsensitive(materialize('абв'), 'В') from system.numbers limit 10; + +select 1 = positionCaseInsensitiveUTF8('', ''); +select 1 = positionCaseInsensitiveUTF8('абв', ''); +select 0 = positionCaseInsensitiveUTF8('', 'аБв'); +select 1 = positionCaseInsensitiveUTF8('абв', 'аБв'); +select 2 = positionCaseInsensitiveUTF8('абв', 'Бв'); +select 3 = positionCaseInsensitiveUTF8('абв', 'в'); + +select 1 = positionCaseInsensitiveUTF8(materialize(''), ''); +select 1 = positionCaseInsensitiveUTF8(materialize('абв'), ''); +select 0 = positionCaseInsensitiveUTF8(materialize(''), 'аБв'); +select 1 = positionCaseInsensitiveUTF8(materialize('абв'), 'аБв'); +select 2 = positionCaseInsensitiveUTF8(materialize('абв'), 'Бв'); +select 3 = positionCaseInsensitiveUTF8(materialize('абв'), 'В'); + +select 1 = positionCaseInsensitiveUTF8(materialize(''), '') from system.numbers limit 10; +select 1 = positionCaseInsensitiveUTF8(materialize('абв'), '') from system.numbers limit 10; +select 0 = positionCaseInsensitiveUTF8(materialize(''), 'аБв') from system.numbers limit 10; +select 1 = positionCaseInsensitiveUTF8(materialize('абв'), 'аБв') from system.numbers limit 10; +select 2 = positionCaseInsensitiveUTF8(materialize('абв'), 'Бв') from system.numbers limit 10; +select 3 = positionCaseInsensitiveUTF8(materialize('абв'), 'В') from system.numbers limit 10; + +select 6 = positionCaseInsensitiveUTF8(materialize('абвабв'), 'В', 4); +select 6 = positionCaseInsensitiveUTF8(materialize('абвабв'), 'В', 4) from system.numbers limit 10; +select 6 = positionCaseInsensitiveUTF8(materialize('абвабв'), 'В', materialize(4)) from system.numbers limit 10; + +select position('' as h, '' as n) = positionCaseInsensitive(h, n); +select position('abc' as h, '' as n) = positionCaseInsensitive(n, n); +select 0 = positionCaseInsensitive('', 'aBc'); +select position('abc' as h, lower('aBc' as n)) = positionCaseInsensitive(h, n); +select position('abc' as h, lower('Bc' as n)) = positionCaseInsensitive(h, n); +select position('abc' as h, lower('C' as n)) = positionCaseInsensitive(h, n); + +select positionCaseInsensitive(materialize('') as h, '' as n) = positionCaseInsensitive(h, n); +select positionCaseInsensitive(materialize('abc') as h, '' as n) = positionCaseInsensitive(h, n); +select positionCaseInsensitive(materialize('') as h, lower('aBc' as n)) = positionCaseInsensitive(h, n); +select positionCaseInsensitive(materialize('abc') as h, lower('aBc' as n)) = positionCaseInsensitive(h, n); +select positionCaseInsensitive(materialize('abc') as h, lower('Bc' as n)) = positionCaseInsensitive(h, n); +select positionCaseInsensitive(materialize('abc') as h, lower('C' as n)) = positionCaseInsensitive(h, n); + +select position(materialize('') as h, lower('' as n)) = positionCaseInsensitive(h, n) from system.numbers limit 10; +select position(materialize('abc') as h, lower('' as n)) = positionCaseInsensitive(h, n) from system.numbers limit 10; +select position(materialize('') as h, lower('aBc' as n)) = positionCaseInsensitive(h, n) from system.numbers limit 10; +select position(materialize('abc') as h, lower('aBc' as n)) = positionCaseInsensitive(h, n) from system.numbers limit 10; +select position(materialize('abc') as h, lower('Bc' as n)) = positionCaseInsensitive(h, n) from system.numbers limit 10; +select position(materialize('abc') as h, lower('C' as n)) = positionCaseInsensitive(h, n) from system.numbers limit 10; + +select position('' as h, lower('' as n)) = positionCaseInsensitive(h, n); +select position('абв' as h, lower('' as n)) = positionCaseInsensitive(h, n); +select position('' as h, lower('аБв' as n)) = positionCaseInsensitive(h, n); +select position('абв' as h, lower('аБв' as n)) = positionCaseInsensitive(h, n); +select position('абв' as h, lower('Бв' as n)) = positionCaseInsensitive(h, n); +select position('абв' as h, lower('В' as n)) = positionCaseInsensitive(h, n); + +select position(materialize('') as h, lower('' as n)) = positionCaseInsensitive(h, n); +select position(materialize('абв') as h, lower('' as n)) = positionCaseInsensitive(h, n); +select position(materialize('') as h, lower('аБв' as n)) = positionCaseInsensitive(h, n); +select position(materialize('абв') as h, lower('аБв' as n)) = positionCaseInsensitive(h, n); +select position(materialize('абв') as h, lower('Бв' as n)) = positionCaseInsensitive(h, n); +select position(materialize('абв') as h, lower('В' as n)) = positionCaseInsensitive(h, n); + +select position(materialize('') as h, lower('' as n)) = positionCaseInsensitive(h, n); +select position(materialize('абв') as h, lower('' as n)) = positionCaseInsensitive(h, n); +select position(materialize('') as h, lower('аБв' as n)) = positionCaseInsensitive(h, n); +select position(materialize('абв') as h, lower('аБв' as n)) = positionCaseInsensitive(h, n); +select position(materialize('абв') as h, lower('Бв' as n)) = positionCaseInsensitive(h, n); +select position(materialize('абв') as h, lower('В' as n)) = positionCaseInsensitive(h, n); + +select position(materialize('') as h, lower('' as n)) = positionCaseInsensitive(h, n) from system.numbers limit 10; +select position(materialize('абв') as h, lower('' as n)) = positionCaseInsensitive(h, n) from system.numbers limit 10; +select position(materialize('') as h, lower('аБв' as n)) = positionCaseInsensitive(h, n) from system.numbers limit 10; +select position(materialize('абв') as h, lower('аБв' as n)) = positionCaseInsensitive(h, n) from system.numbers limit 10; +select position(materialize('абв') as h, lower('Бв' as n)) = positionCaseInsensitive(h, n) from system.numbers limit 10; +select position(materialize('абв') as h, lower('В' as n)) = positionCaseInsensitive(h, n) from system.numbers limit 10; + +select positionUTF8('' as h, lowerUTF8('' as n)) = positionCaseInsensitiveUTF8(h, n); +select positionUTF8('абв' as h, lowerUTF8('' as n)) = positionCaseInsensitiveUTF8(h, n); +select positionUTF8('' as h, lowerUTF8('аБв' as n)) = positionCaseInsensitiveUTF8(h, n); +select positionUTF8('абв' as h, lowerUTF8('аБв' as n)) = positionCaseInsensitiveUTF8(h, n); +select positionUTF8('абв' as h, lowerUTF8('Бв' as n)) = positionCaseInsensitiveUTF8(h, n); +select positionUTF8('абв' as h, lowerUTF8('в' as n)) = positionCaseInsensitiveUTF8(h, n); + +select positionUTF8(materialize('') as h, lowerUTF8('' as n)) = positionCaseInsensitiveUTF8(h, n); +select positionUTF8(materialize('абв') as h, lowerUTF8('' as n)) = positionCaseInsensitiveUTF8(h, n); +select positionUTF8(materialize('') as h, lowerUTF8('аБв' as n)) = positionCaseInsensitiveUTF8(h, n); +select positionUTF8(materialize('абв') as h, lowerUTF8('аБв' as n)) = positionCaseInsensitiveUTF8(h, n); +select positionUTF8(materialize('абв') as h, lowerUTF8('Бв' as n)) = positionCaseInsensitiveUTF8(h, n); +select positionUTF8(materialize('абв') as h, lowerUTF8('В' as n)) = positionCaseInsensitiveUTF8(h, n); + +select positionUTF8(materialize('') as h, lowerUTF8('' as n)) = positionCaseInsensitiveUTF8(h, n) from system.numbers limit 10; +select positionUTF8(materialize('абв') as h, lowerUTF8('' as n)) = positionCaseInsensitiveUTF8(h, n) from system.numbers limit 10; +select positionUTF8(materialize('') as h, lowerUTF8('аБв' as n)) = positionCaseInsensitiveUTF8(h, n) from system.numbers limit 10; +select positionUTF8(materialize('абв') as h, lowerUTF8('аБв' as n)) = positionCaseInsensitiveUTF8(h, n) from system.numbers limit 10; +select positionUTF8(materialize('абв') as h, lowerUTF8('Бв' as n)) = positionCaseInsensitiveUTF8(h, n) from system.numbers limit 10; +select positionUTF8(materialize('абв') as h, lowerUTF8('В' as n)) = positionCaseInsensitiveUTF8(h, n) from system.numbers limit 10; + + +select 2 = position('abcdefgh', materialize('b')); +select 2 = position('abcdefgh', materialize('bc')); +select 2 = position('abcdefgh', materialize('bcd')); +select 2 = position('abcdefgh', materialize('bcde')); +select 2 = position('abcdefgh', materialize('bcdef')); +select 2 = position('abcdefgh', materialize('bcdefg')); +select 2 = position('abcdefgh', materialize('bcdefgh')); + +select 1 = position('abcdefgh', materialize('abcdefgh')); +select 1 = position('abcdefgh', materialize('abcdefg')); +select 1 = position('abcdefgh', materialize('abcdef')); +select 1 = position('abcdefgh', materialize('abcde')); +select 1 = position('abcdefgh', materialize('abcd')); +select 1 = position('abcdefgh', materialize('abc')); +select 1 = position('abcdefgh', materialize('ab')); +select 1 = position('abcdefgh', materialize('a')); + +select 3 = position('abcdefgh', materialize('c')); +select 3 = position('abcdefgh', materialize('cd')); +select 3 = position('abcdefgh', materialize('cde')); +select 3 = position('abcdefgh', materialize('cdef')); +select 3 = position('abcdefgh', materialize('cdefg')); +select 3 = position('abcdefgh', materialize('cdefgh')); + +select 4 = position('abcdefgh', materialize('defgh')); +select 4 = position('abcdefgh', materialize('defg')); +select 4 = position('abcdefgh', materialize('def')); +select 4 = position('abcdefgh', materialize('de')); +select 4 = position('abcdefgh', materialize('d')); + +select 5 = position('abcdefgh', materialize('e')); +select 5 = position('abcdefgh', materialize('ef')); +select 5 = position('abcdefgh', materialize('efg')); +select 5 = position('abcdefgh', materialize('efgh')); + +select 6 = position('abcdefgh', materialize('fgh')); +select 6 = position('abcdefgh', materialize('fg')); +select 6 = position('abcdefgh', materialize('f')); + +select 7 = position('abcdefgh', materialize('g')); +select 7 = position('abcdefgh', materialize('gh')); + +select 8 = position('abcdefgh', materialize('h')); + +select 2 = position('abcdefgh', materialize('b')) from system.numbers limit 10; +select 2 = position('abcdefgh', materialize('bc')) from system.numbers limit 10; +select 2 = position('abcdefgh', materialize('bcd')) from system.numbers limit 10; +select 2 = position('abcdefgh', materialize('bcde')) from system.numbers limit 10; +select 2 = position('abcdefgh', materialize('bcdef')) from system.numbers limit 10; +select 2 = position('abcdefgh', materialize('bcdefg')) from system.numbers limit 10; +select 2 = position('abcdefgh', materialize('bcdefgh')) from system.numbers limit 10; + +select 1 = position('abcdefgh', materialize('abcdefgh')) from system.numbers limit 10; +select 1 = position('abcdefgh', materialize('abcdefg')) from system.numbers limit 10; +select 1 = position('abcdefgh', materialize('abcdef')) from system.numbers limit 10; +select 1 = position('abcdefgh', materialize('abcde')) from system.numbers limit 10; +select 1 = position('abcdefgh', materialize('abcd')) from system.numbers limit 10; +select 1 = position('abcdefgh', materialize('abc')) from system.numbers limit 10; +select 1 = position('abcdefgh', materialize('ab')) from system.numbers limit 10; +select 1 = position('abcdefgh', materialize('a')) from system.numbers limit 10; + +select 3 = position('abcdefgh', materialize('c')) from system.numbers limit 10; +select 3 = position('abcdefgh', materialize('cd')) from system.numbers limit 10; +select 3 = position('abcdefgh', materialize('cde')) from system.numbers limit 10; +select 3 = position('abcdefgh', materialize('cdef')) from system.numbers limit 10; +select 3 = position('abcdefgh', materialize('cdefg')) from system.numbers limit 10; +select 3 = position('abcdefgh', materialize('cdefgh')) from system.numbers limit 10; + +select 4 = position('abcdefgh', materialize('defgh')) from system.numbers limit 10; +select 4 = position('abcdefgh', materialize('defg')) from system.numbers limit 10; +select 4 = position('abcdefgh', materialize('def')) from system.numbers limit 10; +select 4 = position('abcdefgh', materialize('de')) from system.numbers limit 10; +select 4 = position('abcdefgh', materialize('d')) from system.numbers limit 10; + +select 5 = position('abcdefgh', materialize('e')) from system.numbers limit 10; +select 5 = position('abcdefgh', materialize('ef')) from system.numbers limit 10; +select 5 = position('abcdefgh', materialize('efg')) from system.numbers limit 10; +select 5 = position('abcdefgh', materialize('efgh')) from system.numbers limit 10; + +select 6 = position('abcdefgh', materialize('fgh')) from system.numbers limit 10; +select 6 = position('abcdefgh', materialize('fg')) from system.numbers limit 10; +select 6 = position('abcdefgh', materialize('f')) from system.numbers limit 10; + +select 7 = position('abcdefgh', materialize('g')) from system.numbers limit 10; +select 7 = position('abcdefgh', materialize('gh')) from system.numbers limit 10; + +select 8 = position('abcdefgh', materialize('h')) from system.numbers limit 10; + +select 2 = position('abcdefgh', materialize('b')) from system.numbers limit 129; +select 2 = position('abcdefgh', materialize('bc')) from system.numbers limit 129; +select 2 = position('abcdefgh', materialize('bcd')) from system.numbers limit 10; +select 2 = position('abcdefgh', materialize('bcde')) from system.numbers limit 129; +select 2 = position('abcdefgh', materialize('bcdef')) from system.numbers limit 129; +select 2 = position('abcdefgh', materialize('bcdefg')) from system.numbers limit 129; +select 2 = position('abcdefgh', materialize('bcdefgh')) from system.numbers limit 129; + +select 1 = position('abcdefgh', materialize('abcdefgh')) from system.numbers limit 129; +select 1 = position('abcdefgh', materialize('abcdefg')) from system.numbers limit 129; +select 1 = position('abcdefgh', materialize('abcdef')) from system.numbers limit 129; +select 1 = position('abcdefgh', materialize('abcde')) from system.numbers limit 129; +select 1 = position('abcdefgh', materialize('abcd')) from system.numbers limit 129; +select 1 = position('abcdefgh', materialize('abc')) from system.numbers limit 129; +select 1 = position('abcdefgh', materialize('ab')) from system.numbers limit 129; +select 1 = position('abcdefgh', materialize('a')) from system.numbers limit 129; + +select 3 = position('abcdefgh', materialize('c')) from system.numbers limit 129; +select 3 = position('abcdefgh', materialize('cd')) from system.numbers limit 129; +select 3 = position('abcdefgh', materialize('cde')) from system.numbers limit 129; +select 3 = position('abcdefgh', materialize('cdef')) from system.numbers limit 129; +select 3 = position('abcdefgh', materialize('cdefg')) from system.numbers limit 129; +select 3 = position('abcdefgh', materialize('cdefgh')) from system.numbers limit 129; + +select 4 = position('abcdefgh', materialize('defgh')) from system.numbers limit 129; +select 4 = position('abcdefgh', materialize('defg')) from system.numbers limit 129; +select 4 = position('abcdefgh', materialize('def')) from system.numbers limit 129; +select 4 = position('abcdefgh', materialize('de')) from system.numbers limit 129; +select 4 = position('abcdefgh', materialize('d')) from system.numbers limit 129; + +select 5 = position('abcdefgh', materialize('e')) from system.numbers limit 129; +select 5 = position('abcdefgh', materialize('ef')) from system.numbers limit 129; +select 5 = position('abcdefgh', materialize('efg')) from system.numbers limit 129; +select 5 = position('abcdefgh', materialize('efgh')) from system.numbers limit 129; + +select 6 = position('abcdefgh', materialize('fgh')) from system.numbers limit 129; +select 6 = position('abcdefgh', materialize('fg')) from system.numbers limit 129; +select 6 = position('abcdefgh', materialize('f')) from system.numbers limit 129; + +select 7 = position('abcdefgh', materialize('g')) from system.numbers limit 129; +select 7 = position('abcdefgh', materialize('gh')) from system.numbers limit 129; + +select 8 = position('abcdefgh', materialize('h')) from system.numbers limit 129; + +select 2 = position('abc', materialize('b')); +select 2 = position('abc', materialize('bc')); +select 0 = position('abc', materialize('bcde')); +select 0 = position('abc', materialize('bcdef')); +select 0 = position('abc', materialize('bcdefg')); +select 0 = position('abc', materialize('bcdefgh')); + +select 0 = position('abc', materialize('abcdefg')); +select 0 = position('abc', materialize('abcdef')); +select 0 = position('abc', materialize('abcde')); +select 0 = position('abc', materialize('abcd')); +select 1 = position('abc', materialize('abc')); +select 1 = position('abc', materialize('ab')); +select 1 = position('abc', materialize('a')); + +select 3 = position('abcd', materialize('c')); +select 3 = position('abcd', materialize('cd')); +select 0 = position('abcd', materialize('cde')); +select 0 = position('abcd', materialize('cdef')); +select 0 = position('abcd', materialize('cdefg')); +select 0 = position('abcd', materialize('cdefgh')); + +select 0 = position('abc', materialize('defgh')); +select 0 = position('abc', materialize('defg')); +select 0 = position('abc', materialize('def')); +select 0 = position('abc', materialize('de')); +select 0 = position('abc', materialize('d')); + + +select 2 = position('abc', materialize('b')) from system.numbers limit 10; +select 2 = position('abc', materialize('bc')) from system.numbers limit 10; +select 0 = position('abc', materialize('bcde')) from system.numbers limit 10; +select 0 = position('abc', materialize('bcdef')) from system.numbers limit 10; +select 0 = position('abc', materialize('bcdefg')) from system.numbers limit 10; +select 0 = position('abc', materialize('bcdefgh')) from system.numbers limit 10; + + +select 0 = position('abc', materialize('abcdefg')) from system.numbers limit 10; +select 0 = position('abc', materialize('abcdef')) from system.numbers limit 10; +select 0 = position('abc', materialize('abcde')) from system.numbers limit 10; +select 0 = position('abc', materialize('abcd')) from system.numbers limit 10; +select 1 = position('abc', materialize('abc')) from system.numbers limit 10; +select 1 = position('abc', materialize('ab')) from system.numbers limit 10; +select 1 = position('abc', materialize('a')) from system.numbers limit 10; + +select 3 = position('abcd', materialize('c')) from system.numbers limit 10; +select 3 = position('abcd', materialize('cd')) from system.numbers limit 10; +select 0 = position('abcd', materialize('cde')) from system.numbers limit 10; +select 0 = position('abcd', materialize('cdef')) from system.numbers limit 10; +select 0 = position('abcd', materialize('cdefg')) from system.numbers limit 10; +select 0 = position('abcd', materialize('cdefgh')) from system.numbers limit 10; + +select 0 = position('abc', materialize('defgh')) from system.numbers limit 10; +select 0 = position('abc', materialize('defg')) from system.numbers limit 10; +select 0 = position('abc', materialize('def')) from system.numbers limit 10; +select 0 = position('abc', materialize('de')) from system.numbers limit 10; +select 0 = position('abc', materialize('d')) from system.numbers limit 10; + +select 1 = position('abc', materialize('')); +select 1 = position('abc', materialize('')) from system.numbers limit 10; +select 1 = position('abc', materialize('')) from system.numbers limit 100; +select 1 = position('abc', materialize('')) from system.numbers limit 1000; + +select 1 = position('abab', materialize('ab')); +select 1 = position('abababababababababababab', materialize('abab')); +select 1 = position('abababababababababababab', materialize('abababababababababa')); + +select positionUTF8('你', '', 3) = positionUTF8(materialize('你'), materialize(''), materialize(3)); diff --git a/parser/testdata/00233_position_function_sql_comparibilty/ast.json b/parser/testdata/00233_position_function_sql_comparibilty/ast.json new file mode 100644 index 000000000..e7de636e7 --- /dev/null +++ b/parser/testdata/00233_position_function_sql_comparibilty/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001039419, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00233_position_function_sql_comparibilty/metadata.json b/parser/testdata/00233_position_function_sql_comparibilty/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00233_position_function_sql_comparibilty/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00233_position_function_sql_comparibilty/query.sql b/parser/testdata/00233_position_function_sql_comparibilty/query.sql new file mode 100644 index 000000000..ae9409cd0 --- /dev/null +++ b/parser/testdata/00233_position_function_sql_comparibilty/query.sql @@ -0,0 +1,16 @@ +SET send_logs_level = 'fatal'; +select 1 = position('' in ''); +select 1 = position('' in 'abc'); +select 0 = position('abc' in ''); +select 1 = position('abc' in 'abc'); +select 2 = position('bc' in 'abc'); +select 3 = position('c' in 'abc'); + +select 1 = position('' in ''); +select 1 = position('' in 'абв'); +select 0 = position('абв' in ''); +select 1 = position('абв' in 'абв'); +select 3 = position('бв' in 'абв'); +select 5 = position('в' in 'абв'); + +select 6 = position('/' IN s) FROM (SELECT 'Hello/World' AS s); diff --git a/parser/testdata/00234_disjunctive_equality_chains_optimization/ast.json b/parser/testdata/00234_disjunctive_equality_chains_optimization/ast.json new file mode 100644 index 000000000..ed50f7a6f --- /dev/null +++ b/parser/testdata/00234_disjunctive_equality_chains_optimization/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery foo_00234 (children 3)" + }, + { + "explain": " Identifier foo_00234" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration id (children 1)" + }, + { + "explain": " DataType UInt64" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001311426, + "rows_read": 10, + "bytes_read": 349 + } +} diff --git a/parser/testdata/00234_disjunctive_equality_chains_optimization/metadata.json b/parser/testdata/00234_disjunctive_equality_chains_optimization/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00234_disjunctive_equality_chains_optimization/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00234_disjunctive_equality_chains_optimization/query.sql b/parser/testdata/00234_disjunctive_equality_chains_optimization/query.sql new file mode 100644 index 000000000..083548b92 --- /dev/null +++ b/parser/testdata/00234_disjunctive_equality_chains_optimization/query.sql @@ -0,0 +1,4 @@ +CREATE TABLE IF NOT EXISTS foo_00234(id UInt64) Engine=MergeTree ORDER BY tuple(); +INSERT INTO foo_00234(id) VALUES (0),(4),(1),(1),(3),(1),(1),(2),(2),(2),(1),(2),(3),(2),(1),(1),(2),(1),(1),(1),(3),(1),(2),(2),(1),(1),(3),(1),(2),(1),(1),(3),(2),(1),(1),(4),(0); +SELECT sum(id = 3 OR id = 1 OR id = 2) AS x, sum(id = 3 OR id = 1 OR id = 2) AS x FROM foo_00234; +DROP TABLE foo_00234; diff --git a/parser/testdata/00235_create_temporary_table_as/ast.json b/parser/testdata/00235_create_temporary_table_as/ast.json new file mode 100644 index 000000000..4c67c6ebb --- /dev/null +++ b/parser/testdata/00235_create_temporary_table_as/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery one_0023 (children 1)" + }, + { + "explain": " Identifier one_0023" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001076654, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/00235_create_temporary_table_as/metadata.json b/parser/testdata/00235_create_temporary_table_as/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00235_create_temporary_table_as/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00235_create_temporary_table_as/query.sql b/parser/testdata/00235_create_temporary_table_as/query.sql new file mode 100644 index 000000000..4bf93b0ce --- /dev/null +++ b/parser/testdata/00235_create_temporary_table_as/query.sql @@ -0,0 +1,3 @@ +drop temporary table if exists one_0023; +create temporary table one_0023 as select 1; +select * from one_0023; diff --git a/parser/testdata/00236_replicated_drop_on_non_leader_zookeeper_long/ast.json b/parser/testdata/00236_replicated_drop_on_non_leader_zookeeper_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00236_replicated_drop_on_non_leader_zookeeper_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00236_replicated_drop_on_non_leader_zookeeper_long/metadata.json b/parser/testdata/00236_replicated_drop_on_non_leader_zookeeper_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00236_replicated_drop_on_non_leader_zookeeper_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00236_replicated_drop_on_non_leader_zookeeper_long/query.sql b/parser/testdata/00236_replicated_drop_on_non_leader_zookeeper_long/query.sql new file mode 100644 index 000000000..aa5d7e10b --- /dev/null +++ b/parser/testdata/00236_replicated_drop_on_non_leader_zookeeper_long/query.sql @@ -0,0 +1,23 @@ +-- Tags: long, replica, no-replicated-database, no-shared-merge-tree +-- Tag no-replicated-database: Old syntax is not allowed +-- no-shared-merge-tree: implemented replacement + +SET replication_alter_partitions_sync = 2; + +DROP TABLE IF EXISTS attach_r1; +DROP TABLE IF EXISTS attach_r2; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE attach_r1 (d Date) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_00236/01/attach', 'r1', d, d, 8192); +CREATE TABLE attach_r2 (d Date) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_00236/01/attach', 'r2', d, d, 8192); + +INSERT INTO attach_r1 VALUES ('2014-01-01'), ('2014-02-01'), ('2014-03-01'); + +SELECT d FROM attach_r1 ORDER BY d; + +ALTER TABLE attach_r2 DROP PARTITION 201402; + +SELECT d FROM attach_r1 ORDER BY d; + +DROP TABLE attach_r1; +DROP TABLE attach_r2; diff --git a/parser/testdata/00237_group_by_arrays/ast.json b/parser/testdata/00237_group_by_arrays/ast.json new file mode 100644 index 000000000..9dc8f0ae2 --- /dev/null +++ b/parser/testdata/00237_group_by_arrays/ast.json @@ -0,0 +1,124 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Identifier arr1" + }, + { + "explain": " Identifier arr2" + }, + { + "explain": " Function count (alias c) (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function emptyArrayUInt8 (alias arr1) (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Literal Array_[UInt64_1] (alias arr2)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Array_[UInt64_1]" + }, + { + "explain": " Function emptyArrayUInt8 (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier arr1" + }, + { + "explain": " Identifier arr2" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier c" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier arr1" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier arr2" + } + ], + + "rows": 34, + + "statistics": + { + "elapsed": 0.001483855, + "rows_read": 34, + "bytes_read": 1332 + } +} diff --git a/parser/testdata/00237_group_by_arrays/metadata.json b/parser/testdata/00237_group_by_arrays/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00237_group_by_arrays/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00237_group_by_arrays/query.sql b/parser/testdata/00237_group_by_arrays/query.sql new file mode 100644 index 000000000..faf81ebc5 --- /dev/null +++ b/parser/testdata/00237_group_by_arrays/query.sql @@ -0,0 +1,6 @@ +SELECT arr1, arr2, count() AS c FROM (SELECT emptyArrayUInt8() AS arr1, [1] AS arr2 UNION ALL SELECT [1], emptyArrayUInt8()) GROUP BY arr1, arr2 ORDER BY c DESC, arr1, arr2; +SELECT arr1, arr2, count() AS c FROM (SELECT range(number) AS arr1, range(toUInt64(10 - number)) AS arr2 FROM system.numbers LIMIT 11) GROUP BY arr1, arr2 ORDER BY c DESC, arr1, arr2; +SELECT arr1, arr2, count() AS c FROM (SELECT arrayMap(x -> toString(x), range(number)) AS arr1, range(toUInt64(10 - number)) AS arr2 FROM system.numbers LIMIT 11) GROUP BY arr1, arr2 ORDER BY c DESC, arr1, arr2; +SELECT arr1, arr2, count() AS c FROM (SELECT arrayMap(x -> toString(x), range(number)) AS arr1, arrayMap(x -> toString(x), range(toUInt64(10 - number))) AS arr2 FROM system.numbers LIMIT 11) GROUP BY arr1, arr2 ORDER BY c DESC, arr1, arr2; +SELECT arr1, arr2, count() AS c FROM (SELECT arrayMap(x -> toString(x), range(number)) AS arr1, replicate(range(toUInt64(10 - number)), arr1) AS arr2 FROM system.numbers LIMIT 11) GROUP BY arr1, arr2 ORDER BY c DESC, arr1, arr2; +SELECT arr1, arr2, count() AS c FROM (SELECT arrayMap(x -> toString(x), range(number)) AS arr1, replicate(range(toUInt64(10 - number)), arr1) AS arr2 FROM (SELECT number % 11 AS number FROM system.numbers LIMIT 30)) GROUP BY arr1, arr2 ORDER BY c DESC, arr1, arr2; diff --git a/parser/testdata/00238_removal_of_temporary_columns/ast.json b/parser/testdata/00238_removal_of_temporary_columns/ast.json new file mode 100644 index 000000000..6b28bdbe1 --- /dev/null +++ b/parser/testdata/00238_removal_of_temporary_columns/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001263591, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00238_removal_of_temporary_columns/metadata.json b/parser/testdata/00238_removal_of_temporary_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00238_removal_of_temporary_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00238_removal_of_temporary_columns/query.sql b/parser/testdata/00238_removal_of_temporary_columns/query.sql new file mode 100644 index 000000000..a2a04a01a --- /dev/null +++ b/parser/testdata/00238_removal_of_temporary_columns/query.sql @@ -0,0 +1,2 @@ +SET max_temporary_non_const_columns = 10; +SELECT number + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 AS x FROM system.numbers LIMIT 1; diff --git a/parser/testdata/00239_type_conversion_in_in/ast.json b/parser/testdata/00239_type_conversion_in_in/ast.json new file mode 100644 index 000000000..7e9af98aa --- /dev/null +++ b/parser/testdata/00239_type_conversion_in_in/ast.json @@ -0,0 +1,91 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1 (alias x)" + }, + { + "explain": " Function or (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal Int64_-1" + } + ], + + "rows": 23, + + "statistics": + { + "elapsed": 0.001457183, + "rows_read": 23, + "bytes_read": 839 + } +} diff --git a/parser/testdata/00239_type_conversion_in_in/metadata.json b/parser/testdata/00239_type_conversion_in_in/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00239_type_conversion_in_in/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00239_type_conversion_in_in/query.sql b/parser/testdata/00239_type_conversion_in_in/query.sql new file mode 100644 index 000000000..5589d91ce --- /dev/null +++ b/parser/testdata/00239_type_conversion_in_in/query.sql @@ -0,0 +1,13 @@ +select 1 as x, x = 1 or x = 2 or x = 3 or x = -1; +select 1 as x, x = 1.0 or x = 2 or x = 3 or x = -1; +select 1 as x, x = 1.5 or x = 2 or x = 3 or x = -1; + +SELECT + 1 IN (1, -1, 2.0, 2.5), + 1.0 IN (1, -1, 2.0, 2.5), + 1 IN (1.0, -1, 2.0, 2.5), + 1.0 IN (1.0, -1, 2.0, 2.5), + 1 IN (1.1, -1, 2.0, 2.5), + -1 IN (1, -1, 2.0, 2.5); + +SELECT -number IN (1, 2, 3, -5.0, -2.0) FROM system.numbers LIMIT 10; diff --git a/parser/testdata/00240_replace_substring_loop/ast.json b/parser/testdata/00240_replace_substring_loop/ast.json new file mode 100644 index 000000000..cdeafb806 --- /dev/null +++ b/parser/testdata/00240_replace_substring_loop/ast.json @@ -0,0 +1,112 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Identifier s" + }, + { + "explain": " Function replaceAll (alias a) (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Identifier s" + }, + { + "explain": " Literal '_'" + }, + { + "explain": " Literal 'o'" + }, + { + "explain": " Function replaceRegexpAll (alias b) (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Identifier s" + }, + { + "explain": " Literal '_'" + }, + { + "explain": " Literal 'o'" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier a" + }, + { + "explain": " Identifier b" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayJoin (alias s) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_['.', '.']" + } + ], + + "rows": 30, + + "statistics": + { + "elapsed": 0.00154485, + "rows_read": 30, + "bytes_read": 1150 + } +} diff --git a/parser/testdata/00240_replace_substring_loop/metadata.json b/parser/testdata/00240_replace_substring_loop/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00240_replace_substring_loop/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00240_replace_substring_loop/query.sql b/parser/testdata/00240_replace_substring_loop/query.sql new file mode 100644 index 000000000..2c9157d59 --- /dev/null +++ b/parser/testdata/00240_replace_substring_loop/query.sql @@ -0,0 +1,101 @@ +SELECT s, replaceAll(s, '_', 'o') AS a, replaceRegexpAll(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['.', '.']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, replaceRegexpAll(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['.', '._']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, replaceRegexpAll(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['.', '_.']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, replaceRegexpAll(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['.', '_._']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, replaceRegexpAll(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['._', '.']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, replaceRegexpAll(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['._', '._']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, replaceRegexpAll(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['._', '_.']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, replaceRegexpAll(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['._', '_._']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, replaceRegexpAll(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['_.', '.']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, replaceRegexpAll(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['_.', '._']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, replaceRegexpAll(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['_.', '_.']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, replaceRegexpAll(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['_.', '_._']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, replaceRegexpAll(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['_._', '.']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, replaceRegexpAll(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['_._', '._']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, replaceRegexpAll(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['_._', '_.']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, replaceRegexpAll(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['_._', '_._']) AS s); + +SELECT s, replaceAll(s, '_', 'oo') AS a, replaceRegexpAll(s, '_', 'oo') AS b, a = b FROM (SELECT arrayJoin(['.', '.']) AS s); +SELECT s, replaceAll(s, '_', 'oo') AS a, replaceRegexpAll(s, '_', 'oo') AS b, a = b FROM (SELECT arrayJoin(['.', '._']) AS s); +SELECT s, replaceAll(s, '_', 'oo') AS a, replaceRegexpAll(s, '_', 'oo') AS b, a = b FROM (SELECT arrayJoin(['.', '_.']) AS s); +SELECT s, replaceAll(s, '_', 'oo') AS a, replaceRegexpAll(s, '_', 'oo') AS b, a = b FROM (SELECT arrayJoin(['.', '_._']) AS s); +SELECT s, replaceAll(s, '_', 'oo') AS a, replaceRegexpAll(s, '_', 'oo') AS b, a = b FROM (SELECT arrayJoin(['._', '.']) AS s); +SELECT s, replaceAll(s, '_', 'oo') AS a, replaceRegexpAll(s, '_', 'oo') AS b, a = b FROM (SELECT arrayJoin(['._', '._']) AS s); +SELECT s, replaceAll(s, '_', 'oo') AS a, replaceRegexpAll(s, '_', 'oo') AS b, a = b FROM (SELECT arrayJoin(['._', '_.']) AS s); +SELECT s, replaceAll(s, '_', 'oo') AS a, replaceRegexpAll(s, '_', 'oo') AS b, a = b FROM (SELECT arrayJoin(['._', '_._']) AS s); +SELECT s, replaceAll(s, '_', 'oo') AS a, replaceRegexpAll(s, '_', 'oo') AS b, a = b FROM (SELECT arrayJoin(['_.', '.']) AS s); +SELECT s, replaceAll(s, '_', 'oo') AS a, replaceRegexpAll(s, '_', 'oo') AS b, a = b FROM (SELECT arrayJoin(['_.', '._']) AS s); +SELECT s, replaceAll(s, '_', 'oo') AS a, replaceRegexpAll(s, '_', 'oo') AS b, a = b FROM (SELECT arrayJoin(['_.', '_.']) AS s); +SELECT s, replaceAll(s, '_', 'oo') AS a, replaceRegexpAll(s, '_', 'oo') AS b, a = b FROM (SELECT arrayJoin(['_.', '_._']) AS s); +SELECT s, replaceAll(s, '_', 'oo') AS a, replaceRegexpAll(s, '_', 'oo') AS b, a = b FROM (SELECT arrayJoin(['_._', '.']) AS s); +SELECT s, replaceAll(s, '_', 'oo') AS a, replaceRegexpAll(s, '_', 'oo') AS b, a = b FROM (SELECT arrayJoin(['_._', '._']) AS s); +SELECT s, replaceAll(s, '_', 'oo') AS a, replaceRegexpAll(s, '_', 'oo') AS b, a = b FROM (SELECT arrayJoin(['_._', '_.']) AS s); +SELECT s, replaceAll(s, '_', 'oo') AS a, replaceRegexpAll(s, '_', 'oo') AS b, a = b FROM (SELECT arrayJoin(['_._', '_._']) AS s); + +SELECT s, replaceAll(s, '_', 'o') AS a, replaceRegexpAll(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['.', '.']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, replaceRegexpAll(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['.', '.__']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, replaceRegexpAll(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['.', '__.']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, replaceRegexpAll(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['.', '__.__']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, replaceRegexpAll(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['.__', '.']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, replaceRegexpAll(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['.__', '.__']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, replaceRegexpAll(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['.__', '__.']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, replaceRegexpAll(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['.__', '__.__']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, replaceRegexpAll(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['__.', '.']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, replaceRegexpAll(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['__.', '.__']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, replaceRegexpAll(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['__.', '__.']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, replaceRegexpAll(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['__.', '__.__']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, replaceRegexpAll(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['__.__', '.']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, replaceRegexpAll(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['__.__', '.__']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, replaceRegexpAll(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['__.__', '__.']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, replaceRegexpAll(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['__.__', '__.__']) AS s); + +SELECT s, replaceOne(s, '_', 'o') AS a, replaceRegexpOne(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['.', '.']) AS s); +SELECT s, replaceOne(s, '_', 'o') AS a, replaceRegexpOne(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['.', '._']) AS s); +SELECT s, replaceOne(s, '_', 'o') AS a, replaceRegexpOne(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['.', '_.']) AS s); +SELECT s, replaceOne(s, '_', 'o') AS a, replaceRegexpOne(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['.', '_._']) AS s); +SELECT s, replaceOne(s, '_', 'o') AS a, replaceRegexpOne(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['._', '.']) AS s); +SELECT s, replaceOne(s, '_', 'o') AS a, replaceRegexpOne(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['._', '._']) AS s); +SELECT s, replaceOne(s, '_', 'o') AS a, replaceRegexpOne(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['._', '_.']) AS s); +SELECT s, replaceOne(s, '_', 'o') AS a, replaceRegexpOne(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['._', '_._']) AS s); +SELECT s, replaceOne(s, '_', 'o') AS a, replaceRegexpOne(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['_.', '.']) AS s); +SELECT s, replaceOne(s, '_', 'o') AS a, replaceRegexpOne(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['_.', '._']) AS s); +SELECT s, replaceOne(s, '_', 'o') AS a, replaceRegexpOne(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['_.', '_.']) AS s); +SELECT s, replaceOne(s, '_', 'o') AS a, replaceRegexpOne(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['_.', '_._']) AS s); +SELECT s, replaceOne(s, '_', 'o') AS a, replaceRegexpOne(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['_._', '.']) AS s); +SELECT s, replaceOne(s, '_', 'o') AS a, replaceRegexpOne(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['_._', '._']) AS s); +SELECT s, replaceOne(s, '_', 'o') AS a, replaceRegexpOne(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['_._', '_.']) AS s); +SELECT s, replaceOne(s, '_', 'o') AS a, replaceRegexpOne(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['_._', '_._']) AS s); + +SELECT s, replaceOne(s, '_', 'oo') AS a, replaceRegexpOne(s, '_', 'oo') AS b, a = b FROM (SELECT arrayJoin(['.', '.']) AS s); +SELECT s, replaceOne(s, '_', 'oo') AS a, replaceRegexpOne(s, '_', 'oo') AS b, a = b FROM (SELECT arrayJoin(['.', '._']) AS s); +SELECT s, replaceOne(s, '_', 'oo') AS a, replaceRegexpOne(s, '_', 'oo') AS b, a = b FROM (SELECT arrayJoin(['.', '_.']) AS s); +SELECT s, replaceOne(s, '_', 'oo') AS a, replaceRegexpOne(s, '_', 'oo') AS b, a = b FROM (SELECT arrayJoin(['.', '_._']) AS s); +SELECT s, replaceOne(s, '_', 'oo') AS a, replaceRegexpOne(s, '_', 'oo') AS b, a = b FROM (SELECT arrayJoin(['._', '.']) AS s); +SELECT s, replaceOne(s, '_', 'oo') AS a, replaceRegexpOne(s, '_', 'oo') AS b, a = b FROM (SELECT arrayJoin(['._', '._']) AS s); +SELECT s, replaceOne(s, '_', 'oo') AS a, replaceRegexpOne(s, '_', 'oo') AS b, a = b FROM (SELECT arrayJoin(['._', '_.']) AS s); +SELECT s, replaceOne(s, '_', 'oo') AS a, replaceRegexpOne(s, '_', 'oo') AS b, a = b FROM (SELECT arrayJoin(['._', '_._']) AS s); +SELECT s, replaceOne(s, '_', 'oo') AS a, replaceRegexpOne(s, '_', 'oo') AS b, a = b FROM (SELECT arrayJoin(['_.', '.']) AS s); +SELECT s, replaceOne(s, '_', 'oo') AS a, replaceRegexpOne(s, '_', 'oo') AS b, a = b FROM (SELECT arrayJoin(['_.', '._']) AS s); +SELECT s, replaceOne(s, '_', 'oo') AS a, replaceRegexpOne(s, '_', 'oo') AS b, a = b FROM (SELECT arrayJoin(['_.', '_.']) AS s); +SELECT s, replaceOne(s, '_', 'oo') AS a, replaceRegexpOne(s, '_', 'oo') AS b, a = b FROM (SELECT arrayJoin(['_.', '_._']) AS s); +SELECT s, replaceOne(s, '_', 'oo') AS a, replaceRegexpOne(s, '_', 'oo') AS b, a = b FROM (SELECT arrayJoin(['_._', '.']) AS s); +SELECT s, replaceOne(s, '_', 'oo') AS a, replaceRegexpOne(s, '_', 'oo') AS b, a = b FROM (SELECT arrayJoin(['_._', '._']) AS s); +SELECT s, replaceOne(s, '_', 'oo') AS a, replaceRegexpOne(s, '_', 'oo') AS b, a = b FROM (SELECT arrayJoin(['_._', '_.']) AS s); +SELECT s, replaceOne(s, '_', 'oo') AS a, replaceRegexpOne(s, '_', 'oo') AS b, a = b FROM (SELECT arrayJoin(['_._', '_._']) AS s); + +SELECT s, replaceOne(s, '_', 'o') AS a, replaceRegexpOne(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['.', '.']) AS s); +SELECT s, replaceOne(s, '_', 'o') AS a, replaceRegexpOne(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['.', '.__']) AS s); +SELECT s, replaceOne(s, '_', 'o') AS a, replaceRegexpOne(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['.', '__.']) AS s); +SELECT s, replaceOne(s, '_', 'o') AS a, replaceRegexpOne(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['.', '__.__']) AS s); +SELECT s, replaceOne(s, '_', 'o') AS a, replaceRegexpOne(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['.__', '.']) AS s); +SELECT s, replaceOne(s, '_', 'o') AS a, replaceRegexpOne(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['.__', '.__']) AS s); +SELECT s, replaceOne(s, '_', 'o') AS a, replaceRegexpOne(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['.__', '__.']) AS s); +SELECT s, replaceOne(s, '_', 'o') AS a, replaceRegexpOne(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['.__', '__.__']) AS s); +SELECT s, replaceOne(s, '_', 'o') AS a, replaceRegexpOne(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['__.', '.']) AS s); +SELECT s, replaceOne(s, '_', 'o') AS a, replaceRegexpOne(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['__.', '.__']) AS s); +SELECT s, replaceOne(s, '_', 'o') AS a, replaceRegexpOne(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['__.', '__.']) AS s); +SELECT s, replaceOne(s, '_', 'o') AS a, replaceRegexpOne(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['__.', '__.__']) AS s); +SELECT s, replaceOne(s, '_', 'o') AS a, replaceRegexpOne(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['__.__', '.']) AS s); +SELECT s, replaceOne(s, '_', 'o') AS a, replaceRegexpOne(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['__.__', '.__']) AS s); +SELECT s, replaceOne(s, '_', 'o') AS a, replaceRegexpOne(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['__.__', '__.']) AS s); +SELECT s, replaceOne(s, '_', 'o') AS a, replaceRegexpOne(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['__.__', '__.__']) AS s); diff --git a/parser/testdata/00250_tuple_comparison/ast.json b/parser/testdata/00250_tuple_comparison/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00250_tuple_comparison/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00250_tuple_comparison/metadata.json b/parser/testdata/00250_tuple_comparison/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00250_tuple_comparison/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00250_tuple_comparison/query.sql b/parser/testdata/00250_tuple_comparison/query.sql new file mode 100644 index 000000000..03a4d23a2 --- /dev/null +++ b/parser/testdata/00250_tuple_comparison/query.sql @@ -0,0 +1,111 @@ +SELECT + (1, 'Hello', 23) = (1, 'Hello', 23), + (1, 'Hello', 23) != (1, 'Hello', 23), + (1, 'Hello', 23) < (1, 'Hello', 23), + (1, 'Hello', 23) > (1, 'Hello', 23), + (1, 'Hello', 23) <= (1, 'Hello', 23), + (1, 'Hello', 23) >= (1, 'Hello', 23); +SELECT + (1, 'Hello', 23) = (2, 'Hello', 23), + (1, 'Hello', 23) != (2, 'Hello', 23), + (1, 'Hello', 23) < (2, 'Hello', 23), + (1, 'Hello', 23) > (2, 'Hello', 23), + (1, 'Hello', 23) <= (2, 'Hello', 23), + (1, 'Hello', 23) >= (2, 'Hello', 23); +SELECT + (1, 'Hello', 23) = (1, 'World', 23), + (1, 'Hello', 23) != (1, 'World', 23), + (1, 'Hello', 23) < (1, 'World', 23), + (1, 'Hello', 23) > (1, 'World', 23), + (1, 'Hello', 23) <= (1, 'World', 23), + (1, 'Hello', 23) >= (1, 'World', 23); +SELECT + (1, 'Hello', 23) = (1, 'Hello', 24), + (1, 'Hello', 23) != (1, 'Hello', 24), + (1, 'Hello', 23) < (1, 'Hello', 24), + (1, 'Hello', 23) > (1, 'Hello', 24), + (1, 'Hello', 23) <= (1, 'Hello', 24), + (1, 'Hello', 23) >= (1, 'Hello', 24); +SELECT + (2, 'Hello', 23) = (1, 'Hello', 23), + (2, 'Hello', 23) != (1, 'Hello', 23), + (2, 'Hello', 23) < (1, 'Hello', 23), + (2, 'Hello', 23) > (1, 'Hello', 23), + (2, 'Hello', 23) <= (1, 'Hello', 23), + (2, 'Hello', 23) >= (1, 'Hello', 23); +SELECT + (1, 'World', 23) = (1, 'Hello', 23), + (1, 'World', 23) != (1, 'Hello', 23), + (1, 'World', 23) < (1, 'Hello', 23), + (1, 'World', 23) > (1, 'Hello', 23), + (1, 'World', 23) <= (1, 'Hello', 23), + (1, 'World', 23) >= (1, 'Hello', 23); +SELECT + (1, 'Hello', 24) = (1, 'Hello', 23), + (1, 'Hello', 24) != (1, 'Hello', 23), + (1, 'Hello', 24) < (1, 'Hello', 23), + (1, 'Hello', 24) > (1, 'Hello', 23), + (1, 'Hello', 24) <= (1, 'Hello', 23), + (1, 'Hello', 24) >= (1, 'Hello', 23); +SELECT + (1, 'Hello') = (1, 'Hello'), + (1, 'Hello') != (1, 'Hello'), + (1, 'Hello') < (1, 'Hello'), + (1, 'Hello') > (1, 'Hello'), + (1, 'Hello') <= (1, 'Hello'), + (1, 'Hello') >= (1, 'Hello'); +SELECT + (1, 'Hello') = (2, 'Hello'), + (1, 'Hello') != (2, 'Hello'), + (1, 'Hello') < (2, 'Hello'), + (1, 'Hello') > (2, 'Hello'), + (1, 'Hello') <= (2, 'Hello'), + (1, 'Hello') >= (2, 'Hello'); +SELECT + (1, 'Hello') = (1, 'World'), + (1, 'Hello') != (1, 'World'), + (1, 'Hello') < (1, 'World'), + (1, 'Hello') > (1, 'World'), + (1, 'Hello') <= (1, 'World'), + (1, 'Hello') >= (1, 'World'); +SELECT + (2, 'Hello') = (1, 'Hello'), + (2, 'Hello') != (1, 'Hello'), + (2, 'Hello') < (1, 'Hello'), + (2, 'Hello') > (1, 'Hello'), + (2, 'Hello') <= (1, 'Hello'), + (2, 'Hello') >= (1, 'Hello'); +SELECT + (1, 'World') = (1, 'Hello'), + (1, 'World') != (1, 'Hello'), + (1, 'World') < (1, 'Hello'), + (1, 'World') > (1, 'Hello'), + (1, 'World') <= (1, 'Hello'), + (1, 'World') >= (1, 'Hello'); +SELECT + tuple(1) = tuple(1), + tuple(1) != tuple(1), + tuple(1) < tuple(1), + tuple(1) > tuple(1), + tuple(1) <= tuple(1), + tuple(1) >= tuple(1); +SELECT + tuple(1) = tuple(2), + tuple(1) != tuple(2), + tuple(1) < tuple(2), + tuple(1) > tuple(2), + tuple(1) <= tuple(2), + tuple(1) >= tuple(2); +SELECT + tuple(2) = tuple(1), + tuple(2) != tuple(1), + tuple(2) < tuple(1), + tuple(2) > tuple(1), + tuple(2) <= tuple(1), + tuple(2) >= tuple(1); +SELECT + tuple(NULL) < tuple(1), + tuple(NULL) = tuple(1), + tuple(NULL) <= tuple(1), + tuple(1, NULL) = tuple(2, 1), + tuple(1, NULL) < tuple(2, 1); diff --git a/parser/testdata/00251_has_types/ast.json b/parser/testdata/00251_has_types/ast.json new file mode 100644 index 000000000..5b6c6c39a --- /dev/null +++ b/parser/testdata/00251_has_types/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function has (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_2, UInt64_3]" + }, + { + "explain": " Literal Float64_3" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001420832, + "rows_read": 8, + "bytes_read": 315 + } +} diff --git a/parser/testdata/00251_has_types/metadata.json b/parser/testdata/00251_has_types/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00251_has_types/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00251_has_types/query.sql b/parser/testdata/00251_has_types/query.sql new file mode 100644 index 000000000..f64354b15 --- /dev/null +++ b/parser/testdata/00251_has_types/query.sql @@ -0,0 +1,23 @@ +SELECT has([1, 2, 3], 3.0); +SELECT has([1, 2.0, 3], 2); +SELECT has([1, 2.1, 3], 2); +SELECT has([1, -1], 1); +SELECT has([1, -1], 1000); + +SELECT has(materialize([1, 2, 3]), 3.0); +SELECT has(materialize([1, 2.0, 3]), 2); +SELECT has(materialize([1, 2.1, 3]), 2); +SELECT has(materialize([1, -1]), 1); +SELECT has(materialize([1, -1]), 1000); + +SELECT has([1, 2, 3], materialize(3.0)); +SELECT has([1, 2.0, 3], materialize(2)); +SELECT has([1, 2.1, 3], materialize(2)); +SELECT has([1, -1], materialize(1)); +SELECT has([1, -1], materialize(1000)); + +SELECT has(materialize([1, 2, 3]), materialize(3.0)); +SELECT has(materialize([1, 2.0, 3]), materialize(2)); +SELECT has(materialize([1, 2.1, 3]), materialize(2)); +SELECT has(materialize([1, -1]), materialize(1)); +SELECT has(materialize([1, -1]), materialize(1000)); diff --git a/parser/testdata/00252_shard_global_in_aggregate_function/ast.json b/parser/testdata/00252_shard_global_in_aggregate_function/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00252_shard_global_in_aggregate_function/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00252_shard_global_in_aggregate_function/metadata.json b/parser/testdata/00252_shard_global_in_aggregate_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00252_shard_global_in_aggregate_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00252_shard_global_in_aggregate_function/query.sql b/parser/testdata/00252_shard_global_in_aggregate_function/query.sql new file mode 100644 index 000000000..2a881b458 --- /dev/null +++ b/parser/testdata/00252_shard_global_in_aggregate_function/query.sql @@ -0,0 +1,10 @@ +-- Tags: shard + +DROP TABLE IF EXISTS storage; +CREATE TABLE storage(UserID UInt64) ENGINE=MergeTree ORDER BY tuple(); +INSERT INTO storage(UserID) values (6460432721393873721)(6460432721393873721)(6460432721393873721)(6460432721393873721)(6460432721393873721)(6460432721393873721)(6460432721393873721)(402895971392036118)(402895971392036118)(402895971392036118); + +SELECT sum(UserID GLOBAL IN (SELECT UserID FROM remote('127.0.0.{2,3}', currentDatabase(), storage))) FROM remote('127.0.0.{2,3}', currentDatabase(), storage); +SELECT sum(UserID GLOBAL IN (SELECT UserID FROM storage)) FROM remote('127.0.0.{2,3}', currentDatabase(), storage); + +DROP TABLE storage; diff --git a/parser/testdata/00253_insert_recursive_defaults/ast.json b/parser/testdata/00253_insert_recursive_defaults/ast.json new file mode 100644 index 000000000..97a4a5d2a --- /dev/null +++ b/parser/testdata/00253_insert_recursive_defaults/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery defaults (children 1)" + }, + { + "explain": " Identifier defaults" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001188722, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/00253_insert_recursive_defaults/metadata.json b/parser/testdata/00253_insert_recursive_defaults/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00253_insert_recursive_defaults/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00253_insert_recursive_defaults/query.sql b/parser/testdata/00253_insert_recursive_defaults/query.sql new file mode 100644 index 000000000..c0edc4471 --- /dev/null +++ b/parser/testdata/00253_insert_recursive_defaults/query.sql @@ -0,0 +1,24 @@ +DROP TABLE IF EXISTS defaults; +CREATE TABLE defaults (a UInt8, b DEFAULT 0, c DEFAULT identity(b)) ENGINE = Memory; +INSERT INTO defaults (a) VALUES (1); +SELECT * FROM defaults; +DROP TABLE defaults; + +DROP TABLE IF EXISTS elog_cut; +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE elog_cut +( + date Date DEFAULT toDate(uts), + uts DateTime, + pr UInt64, + ya_uid UInt64, + adf_uid UInt64, + owner_id UInt32, + eff_uid UInt64 DEFAULT if(adf_uid != 0, adf_uid, ya_uid), + page_session UInt64 DEFAULT cityHash64(eff_uid, pr), + sample_key UInt64 ALIAS page_session +) ENGINE = MergeTree(date, cityHash64(adf_uid, ya_uid, pr), (owner_id, date, cityHash64(adf_uid, ya_uid, pr)), 8192); + +INSERT INTO elog_cut (uts, pr, ya_uid, adf_uid, owner_id) VALUES ('2015-01-01 01:02:03', 111, 123, 456, 789); +SELECT date, uts, pr, ya_uid, adf_uid, owner_id, eff_uid, page_session, sample_key FROM elog_cut; +DROP TABLE elog_cut; diff --git a/parser/testdata/00254_tuple_extremes/ast.json b/parser/testdata/00254_tuple_extremes/ast.json new file mode 100644 index 000000000..c99d92212 --- /dev/null +++ b/parser/testdata/00254_tuple_extremes/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery numbers_10 (children 1)" + }, + { + "explain": " Identifier numbers_10" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001069584, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/00254_tuple_extremes/metadata.json b/parser/testdata/00254_tuple_extremes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00254_tuple_extremes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00254_tuple_extremes/query.sql b/parser/testdata/00254_tuple_extremes/query.sql new file mode 100644 index 000000000..9c78463bc --- /dev/null +++ b/parser/testdata/00254_tuple_extremes/query.sql @@ -0,0 +1,8 @@ +drop table if exists numbers_10; + +create table numbers_10 (number UInt64) engine = MergeTree order by number; +insert into numbers_10 select number from system.numbers limit 10; + +SELECT number, (number, toDate('2015-01-01') + number) FROM numbers_10 LIMIT 10 SETTINGS extremes = 1; + +drop table if exists numbers_10; diff --git a/parser/testdata/00255_array_concat_string/ast.json b/parser/testdata/00255_array_concat_string/ast.json new file mode 100644 index 000000000..9c1629c39 --- /dev/null +++ b/parser/testdata/00255_array_concat_string/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayStringConcat (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_['Hello', 'World']" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001090145, + "rows_read": 7, + "bytes_read": 286 + } +} diff --git a/parser/testdata/00255_array_concat_string/metadata.json b/parser/testdata/00255_array_concat_string/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00255_array_concat_string/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00255_array_concat_string/query.sql b/parser/testdata/00255_array_concat_string/query.sql new file mode 100644 index 000000000..a18d349ba --- /dev/null +++ b/parser/testdata/00255_array_concat_string/query.sql @@ -0,0 +1,26 @@ +SELECT arrayStringConcat(['Hello', 'World']); +SELECT arrayStringConcat(materialize(['Hello', 'World'])); +SELECT arrayStringConcat(['Hello', 'World'], ', '); +SELECT arrayStringConcat(materialize(['Hello', 'World']), ', '); +SELECT arrayStringConcat(emptyArrayString()); +SELECT arrayStringConcat(arrayMap(x -> toString(x), range(number))) FROM system.numbers LIMIT 10; +SELECT arrayStringConcat(arrayMap(x -> toString(x), range(number)), '') FROM system.numbers LIMIT 10; +SELECT arrayStringConcat(arrayMap(x -> toString(x), range(number)), ',') FROM system.numbers LIMIT 10; +SELECT arrayStringConcat(arrayMap(x -> transform(x, [0, 1, 2, 3, 4, 5, 6, 7, 8], ['meta.ua', 'google', 'test', '123', '', 'hello', 'world', 'goodbye', 'xyz'], ''), arrayMap(x -> x % 9, range(number))), ' ') FROM system.numbers LIMIT 20; +SELECT arrayStringConcat(arrayMap(x -> toString(x), range(number % 4))) FROM system.numbers LIMIT 10; +SELECT arrayStringConcat([Null, 'hello', Null, 'world', Null, 'xyz', 'def', Null], ';'); +SELECT arrayStringConcat([Null::Nullable(String), Null::Nullable(String)], ';'); +SELECT arrayStringConcat(arr, ';') FROM (SELECT [1, 23, 456] AS arr); +SELECT arrayStringConcat(arr, ';') FROM (SELECT [Null, 1, Null, 23, Null, 456, Null] AS arr); +SELECT arrayStringConcat(arr, '; ') FROM (SELECT [toIPv4('127.0.0.1'), toIPv4('1.0.0.1')] AS arr); +SELECT arrayStringConcat(arr, '; ') FROM (SELECT [toIPv4('127.0.0.1'), Null, toIPv4('1.0.0.1')] AS arr); +SELECT arrayStringConcat(arr, '; ') FROM (SELECT [toDate('2021-10-01'), toDate('2021-10-02')] AS arr); +SELECT arrayStringConcat(arr, '; ') FROM (SELECT [toDate('2021-10-01'), Null, toDate('2021-10-02')] AS arr); +SELECT arrayStringConcat(materialize([Null, 'hello', Null, 'world', Null, 'xyz', 'def', Null]), ';'); +SELECT arrayStringConcat(materialize([Null::Nullable(String), Null::Nullable(String)]), ';'); +SELECT arrayStringConcat(arr, ';') FROM (SELECT materialize([1, 23, 456]) AS arr); +SELECT arrayStringConcat(arr, ';') FROM (SELECT materialize([Null, 1, Null, 23, Null, 456, Null]) AS arr); +SELECT arrayStringConcat(arr, '; ') FROM (SELECT materialize([toIPv4('127.0.0.1'), toIPv4('1.0.0.1')]) AS arr); +SELECT arrayStringConcat(arr, '; ') FROM (SELECT materialize([toIPv4('127.0.0.1'), Null, toIPv4('1.0.0.1')]) AS arr); +SELECT arrayStringConcat(arr, '; ') FROM (SELECT materialize([toDate('2021-10-01'), toDate('2021-10-02')]) AS arr); +SELECT arrayStringConcat(arr, '; ') FROM (SELECT materialize([toDate('2021-10-01'), Null, toDate('2021-10-02')]) AS arr); diff --git a/parser/testdata/00256_reverse/ast.json b/parser/testdata/00256_reverse/ast.json new file mode 100644 index 000000000..cfdd1ef28 --- /dev/null +++ b/parser/testdata/00256_reverse/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function reverse (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'Hello'" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001058648, + "rows_read": 7, + "bytes_read": 259 + } +} diff --git a/parser/testdata/00256_reverse/metadata.json b/parser/testdata/00256_reverse/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00256_reverse/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00256_reverse/query.sql b/parser/testdata/00256_reverse/query.sql new file mode 100644 index 000000000..21e75b921 --- /dev/null +++ b/parser/testdata/00256_reverse/query.sql @@ -0,0 +1,9 @@ +SELECT reverse('Hello'); +SELECT reverse(materialize('Hello')); +SELECT reverse(toString(round(exp10(number)))) FROM system.numbers LIMIT 10; + +SELECT reverse(['Hello', 'World']); +SELECT reverse(materialize(['Hello', 'World'])); +SELECT reverse(range(number)) FROM system.numbers LIMIT 10; +SELECT reverse(arrayMap(x -> toString(round(exp10(x))), range(number))) FROM system.numbers LIMIT 10; +SELECT reverse(toFixedString(toString(round(exp10(number))), 10)) FROM system.numbers LIMIT 10; diff --git a/parser/testdata/00257_shard_no_aggregates_and_constant_keys/ast.json b/parser/testdata/00257_shard_no_aggregates_and_constant_keys/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00257_shard_no_aggregates_and_constant_keys/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00257_shard_no_aggregates_and_constant_keys/metadata.json b/parser/testdata/00257_shard_no_aggregates_and_constant_keys/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00257_shard_no_aggregates_and_constant_keys/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00257_shard_no_aggregates_and_constant_keys/query.sql b/parser/testdata/00257_shard_no_aggregates_and_constant_keys/query.sql new file mode 100644 index 000000000..b72162be4 --- /dev/null +++ b/parser/testdata/00257_shard_no_aggregates_and_constant_keys/query.sql @@ -0,0 +1,28 @@ +-- Tags: shard + +set enable_analyzer = 1; +set enable_positional_arguments = 0; + +select 40 as z from (select * from system.numbers limit 3) group by z; +select 41 as z from remote('127.0.0.{2,3}', system.one) group by z; +select count(), 42 AS z from remote('127.0.0.{2,3}', system.one) group by z; +select 43 AS z from remote('127.0.0.{2,3}', system.one) group by 42, 43, 44; +select 11 AS z from (SELECT 2 UNION ALL SELECT 3) group by 42, 43, 44; + +select 40 as z from (select * from system.numbers limit 3) group by z WITH TOTALS; +-- NOTE: non-analyzer preserves the original header (i.e. 41) for TOTALS in +-- case of remote queries with GROUP BY some_requested_const and there were no +-- aggregate functions, the query above. But everything else works in the same +-- way, i.e.: +-- +-- select 41 as z, count() from remote('127.0.0.{2,3}', system.one) group by z WITH TOTALS; +-- select 41 as z from remote('127.0.0.{2,3}', system.one) group by 1 WITH TOTALS; +-- +select 41 as z from remote('127.0.0.{2,3}', system.one) group by z WITH TOTALS; +select count(), 42 AS z from remote('127.0.0.{2,3}', system.one) group by z WITH TOTALS; +select 43 AS z from remote('127.0.0.{2,3}', system.one) group by 42, 43, 44 WITH TOTALS; +select 11 AS z from (SELECT 1 UNION ALL SELECT 2) group by 42, 43, 44 WITH TOTALS; +select 11 AS z from (SELECT 2 UNION ALL SELECT 3) group by 42, 43, 44 WITH TOTALS; + +SELECT count() WITH TOTALS; +SELECT count() FROM remote('127.0.0.{2,3}', system.one) WITH TOTALS; diff --git a/parser/testdata/00258_materializing_tuples/ast.json b/parser/testdata/00258_materializing_tuples/ast.json new file mode 100644 index 000000000..3a0d78a5b --- /dev/null +++ b/parser/testdata/00258_materializing_tuples/ast.json @@ -0,0 +1,94 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function tuple (alias a) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function tuple (alias a) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier a" + } + ], + + "rows": 24, + + "statistics": + { + "elapsed": 0.00114609, + "rows_read": 24, + "bytes_read": 984 + } +} diff --git a/parser/testdata/00258_materializing_tuples/metadata.json b/parser/testdata/00258_materializing_tuples/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00258_materializing_tuples/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00258_materializing_tuples/query.sql b/parser/testdata/00258_materializing_tuples/query.sql new file mode 100644 index 000000000..c74832992 --- /dev/null +++ b/parser/testdata/00258_materializing_tuples/query.sql @@ -0,0 +1,5 @@ +select * from (select tuple(1) as a union all select tuple(1) as a) order by a; +select * from (select tuple(1) as a union all select tuple(2) as a) order by a; +select * from (select tuple(materialize(0)) as a union all select tuple(0) as a) order by a; +select * from (select tuple(range(1)[1]) as a union all select tuple(0) as a) order by a; +select * from (select tuple(range(1)[2]) as a union all select tuple(1) as a) order by a; diff --git a/parser/testdata/00259_hashing_tuples/ast.json b/parser/testdata/00259_hashing_tuples/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00259_hashing_tuples/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00259_hashing_tuples/metadata.json b/parser/testdata/00259_hashing_tuples/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00259_hashing_tuples/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00259_hashing_tuples/query.sql b/parser/testdata/00259_hashing_tuples/query.sql new file mode 100644 index 000000000..3702dc133 --- /dev/null +++ b/parser/testdata/00259_hashing_tuples/query.sql @@ -0,0 +1,10 @@ +-- Tags: no-fasttest + +SELECT cityHash64(1, 2, '') AS x1, cityHash64((1, 2), '') AS x2, cityHash64(1, (2, '')) AS x3, cityHash64((1, 2, '')) AS x4; +SELECT cityHash64(materialize(1), 2, '') AS x1, cityHash64((materialize(1), 2), '') AS x2, cityHash64(materialize(1), (2, '')) AS x3, cityHash64((materialize(1), 2, '')) AS x4; +SELECT cityHash64(1, materialize(2), '') AS x1, cityHash64((1, materialize(2)), '') AS x2, cityHash64(1, (materialize(2), '')) AS x3, cityHash64((1, materialize(2), '')) AS x4; +SELECT cityHash64(1, 2, materialize('')) AS x1, cityHash64((1, 2), materialize('')) AS x2, cityHash64(1, (2, materialize(''))) AS x3, cityHash64((1, 2, materialize(''))) AS x4; +SELECT cityHash64(materialize(1), materialize(2), '') AS x1, cityHash64((materialize(1), materialize(2)), '') AS x2, cityHash64(materialize(1), (materialize(2), '')) AS x3, cityHash64((materialize(1), materialize(2), '')) AS x4; +SELECT cityHash64(1, materialize(2), materialize('')) AS x1, cityHash64((1, materialize(2)), materialize('')) AS x2, cityHash64(1, (materialize(2), materialize(''))) AS x3, cityHash64((1, materialize(2), materialize(''))) AS x4; +SELECT cityHash64(materialize(1), 2, materialize('')) AS x1, cityHash64((materialize(1), 2), materialize('')) AS x2, cityHash64(materialize(1), (2, materialize(''))) AS x3, cityHash64((materialize(1), 2, materialize(''))) AS x4; +SELECT cityHash64(materialize(1), materialize(2), materialize('')) AS x1, cityHash64((materialize(1), materialize(2)), materialize('')) AS x2, cityHash64(materialize(1), (materialize(2), materialize(''))) AS x3, cityHash64((materialize(1), materialize(2), materialize(''))) AS x4; diff --git a/parser/testdata/00260_like_and_curly_braces/ast.json b/parser/testdata/00260_like_and_curly_braces/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00260_like_and_curly_braces/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00260_like_and_curly_braces/metadata.json b/parser/testdata/00260_like_and_curly_braces/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00260_like_and_curly_braces/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00260_like_and_curly_braces/query.sql b/parser/testdata/00260_like_and_curly_braces/query.sql new file mode 100644 index 000000000..0064deb22 --- /dev/null +++ b/parser/testdata/00260_like_and_curly_braces/query.sql @@ -0,0 +1,36 @@ +-- Tags: race + +SELECT 'a}a' AS x, x LIKE (concat('%', x, '%') AS pat), materialize(x) LIKE pat; +SELECT 'a}a' AS x, x LIKE (concat('%', x) AS pat), materialize(x) LIKE pat; +SELECT 'a}a' AS x, x LIKE (concat(x, '%') AS pat), materialize(x) LIKE pat; +SELECT 'a}a' AS x, x LIKE (x AS pat), materialize(x) LIKE pat; + +SELECT 'a{a' AS x, x LIKE (concat('%', x, '%') AS pat), materialize(x) LIKE pat; +SELECT 'a{a' AS x, x LIKE (concat('%', x) AS pat), materialize(x) LIKE pat; +SELECT 'a{a' AS x, x LIKE (concat(x, '%') AS pat), materialize(x) LIKE pat; +SELECT 'a{a' AS x, x LIKE (x AS pat), materialize(x) LIKE pat; + +SELECT '{a' AS x, x LIKE (concat('%', x, '%') AS pat), materialize(x) LIKE pat; +SELECT '{a' AS x, x LIKE (concat('%', x) AS pat), materialize(x) LIKE pat; +SELECT '{a' AS x, x LIKE (concat(x, '%') AS pat), materialize(x) LIKE pat; +SELECT '{a' AS x, x LIKE (x AS pat), materialize(x) LIKE pat; + +SELECT 'a{' AS x, x LIKE (concat('%', x, '%') AS pat), materialize(x) LIKE pat; +SELECT 'a{' AS x, x LIKE (concat('%', x) AS pat), materialize(x) LIKE pat; +SELECT 'a{' AS x, x LIKE (concat(x, '%') AS pat), materialize(x) LIKE pat; +SELECT 'a{' AS x, x LIKE (x AS pat), materialize(x) LIKE pat; + +SELECT 'a}' AS x, x LIKE (concat('%', x, '%') AS pat), materialize(x) LIKE pat; +SELECT 'a}' AS x, x LIKE (concat('%', x) AS pat), materialize(x) LIKE pat; +SELECT 'a}' AS x, x LIKE (concat(x, '%') AS pat), materialize(x) LIKE pat; +SELECT 'a}' AS x, x LIKE (x AS pat), materialize(x) LIKE pat; + +SELECT '}a' AS x, x LIKE (concat('%', x, '%') AS pat), materialize(x) LIKE pat; +SELECT '}a' AS x, x LIKE (concat('%', x) AS pat), materialize(x) LIKE pat; +SELECT '}a' AS x, x LIKE (concat(x, '%') AS pat), materialize(x) LIKE pat; +SELECT '}a' AS x, x LIKE (x AS pat), materialize(x) LIKE pat; + +SELECT '{a}' AS x, x LIKE (concat('%', x, '%') AS pat), materialize(x) LIKE pat; +SELECT '{a}' AS x, x LIKE (concat('%', x) AS pat), materialize(x) LIKE pat; +SELECT '{a}' AS x, x LIKE (concat(x, '%') AS pat), materialize(x) LIKE pat; +SELECT '{a}' AS x, x LIKE (x AS pat), materialize(x) LIKE pat; diff --git a/parser/testdata/00261_storage_aliases_and_array_join/ast.json b/parser/testdata/00261_storage_aliases_and_array_join/ast.json new file mode 100644 index 000000000..22e81b39e --- /dev/null +++ b/parser/testdata/00261_storage_aliases_and_array_join/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery aliases_test (children 1)" + }, + { + "explain": " Identifier aliases_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000970878, + "rows_read": 2, + "bytes_read": 76 + } +} diff --git a/parser/testdata/00261_storage_aliases_and_array_join/metadata.json b/parser/testdata/00261_storage_aliases_and_array_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00261_storage_aliases_and_array_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00261_storage_aliases_and_array_join/query.sql b/parser/testdata/00261_storage_aliases_and_array_join/query.sql new file mode 100644 index 000000000..e50aa7ba5 --- /dev/null +++ b/parser/testdata/00261_storage_aliases_and_array_join/query.sql @@ -0,0 +1,119 @@ +drop table if exists aliases_test; + +set allow_deprecated_syntax_for_merge_tree=1; +create table aliases_test ( +date Date, id UInt64, +array default ['zero','one','two'], +d1 default array, +a1 alias array, a2 alias a1, a3 alias a2, +a4 alias arrayMap(x -> toString(x), range(3)), a5 alias a4, a6 alias a5, +`struct.d1` default array, +`struct.a1` alias array, +`struct.a2` alias `struct.a1`, +`struct.a3` alias `struct.a2`, +`struct.a4` alias arrayMap(x -> toString(x), range(3)), +`struct.a5` alias `struct.a4`, +`struct.a6` alias `struct.a5` +) engine=MergeTree(date, id, 1); + +insert into aliases_test (id) values (0); + +select '-- Ensure ALIAS columns are not selected by asterisk'; +select * from aliases_test; + +select '-- select DEFAULT and ALIAS arrays'; +select d1, a1, a2, a3, a4, a5, a6 from aliases_test; +select '-- select DEFAULT and ALIAS nested columns'; +select struct.d1, struct.a1, struct.a2, struct.a3, struct.a4, struct.a5, struct.a6 from aliases_test; + +select d1, a1 from aliases_test array join d1, a1; +select d1, a1 from aliases_test array join d1, a1 as a2; +select d1, a1 from aliases_test array join d1 as d2, a1; +select '-- array join, but request the original columns'; +select d1, a1 from aliases_test array join d1 as d2, a1 as a2; + +select '-- array join, do not use the result'; +select array from aliases_test array join d1, a1; +select array from aliases_test array join d1 as d2, a1 as a1; + +select '-- select DEFAULT and ALIAS arrays, array joining one at a time'; +select array, d1, a1, a2, a3, a4, a5, a6 from aliases_test array join d1; +select array, d1, a1, a2, a3, a4, a5, a6 from aliases_test array join a1; +select array, d1, a1, a2, a3, a4, a5, a6 from aliases_test array join a2; +select array, d1, a1, a2, a3, a4, a5, a6 from aliases_test array join a3; +select array, d1, a1, a2, a3, a4, a5, a6 from aliases_test array join a4; +select array, d1, a1, a2, a3, a4, a5, a6 from aliases_test array join a5; +select array, d1, a1, a2, a3, a4, a5, a6 from aliases_test array join a6; + +select '-- select DEFAULT and ALIAS arrays, array joining one at a time and aliasing result with original name'; +select array, d1, a1, a2, a3, a4, a5, a6 from aliases_test array join d1 as d1; +select array, d1, a1, a2, a3, a4, a5, a6 from aliases_test array join a1 as a1; +select array, d1, a1, a2, a3, a4, a5, a6 from aliases_test array join a2 as a2; +select array, d1, a1, a2, a3, a4, a5, a6 from aliases_test array join a3 as a3; +select array, d1, a1, a2, a3, a4, a5, a6 from aliases_test array join a4 as a4; +select array, d1, a1, a2, a3, a4, a5, a6 from aliases_test array join a5 as a5; +select array, d1, a1, a2, a3, a4, a5, a6 from aliases_test array join a6 as a6; + +select '-- select DEFAULT and ALIAS arrays and array join result, aliased as `joined`'; +select array, d1, a1, a2, a3, a4, a5, a6, joined from aliases_test array join d1 as joined; +select array, d1, a1, a2, a3, a4, a5, a6, joined from aliases_test array join a1 as joined; +select array, d1, a1, a2, a3, a4, a5, a6, joined from aliases_test array join a2 as joined; +select array, d1, a1, a2, a3, a4, a5, a6, joined from aliases_test array join a3 as joined; +select array, d1, a1, a2, a3, a4, a5, a6, joined from aliases_test array join a4 as joined; +select array, d1, a1, a2, a3, a4, a5, a6, joined from aliases_test array join a5 as joined; +select array, d1, a1, a2, a3, a4, a5, a6, joined from aliases_test array join a6 as joined; + +select '-- select DEFAULT and ALIAS nested columns, array joining one at a time'; +select array, struct.d1, struct.a1, struct.a2, struct.a3, struct.a4, struct.a5, struct.a6 from aliases_test array join struct.d1; +select array, struct.d1, struct.a1, struct.a2, struct.a3, struct.a4, struct.a5, struct.a6 from aliases_test array join struct.a1; +select array, struct.d1, struct.a1, struct.a2, struct.a3, struct.a4, struct.a5, struct.a6 from aliases_test array join struct.a2; +select array, struct.d1, struct.a1, struct.a2, struct.a3, struct.a4, struct.a5, struct.a6 from aliases_test array join struct.a3; +select array, struct.d1, struct.a1, struct.a2, struct.a3, struct.a4, struct.a5, struct.a6 from aliases_test array join struct.a4; +select array, struct.d1, struct.a1, struct.a2, struct.a3, struct.a4, struct.a5, struct.a6 from aliases_test array join struct.a5; +select array, struct.d1, struct.a1, struct.a2, struct.a3, struct.a4, struct.a5, struct.a6 from aliases_test array join struct.a6; + +select '-- select DEFAULT and ALIAS nested columns, array joining one at a time and aliasing result with original name'; +select array, struct.d1, struct.a1, struct.a2, struct.a3, struct.a4, struct.a5, struct.a6 from aliases_test array join struct.d1 as `struct.d1`; +select array, struct.d1, struct.a1, struct.a2, struct.a3, struct.a4, struct.a5, struct.a6 from aliases_test array join struct.a1 as `struct.a1`; +select array, struct.d1, struct.a1, struct.a2, struct.a3, struct.a4, struct.a5, struct.a6 from aliases_test array join struct.a2 as `struct.a2`; +select array, struct.d1, struct.a1, struct.a2, struct.a3, struct.a4, struct.a5, struct.a6 from aliases_test array join struct.a3 as `struct.a3`; +select array, struct.d1, struct.a1, struct.a2, struct.a3, struct.a4, struct.a5, struct.a6 from aliases_test array join struct.a4 as `struct.a4`; +select array, struct.d1, struct.a1, struct.a2, struct.a3, struct.a4, struct.a5, struct.a6 from aliases_test array join struct.a5 as `struct.a5`; +select array, struct.d1, struct.a1, struct.a2, struct.a3, struct.a4, struct.a5, struct.a6 from aliases_test array join struct.a6 as `struct.a6`; + +select '-- select DEFAULT and ALIAS nested columns and array join result, aliased as `joined`'; +select array, struct.d1, struct.a1, struct.a2, struct.a3, struct.a4, struct.a5, struct.a6, joined from aliases_test array join struct.d1 as joined; +select array, struct.d1, struct.a1, struct.a2, struct.a3, struct.a4, struct.a5, struct.a6, joined from aliases_test array join struct.a1 as joined; +select array, struct.d1, struct.a1, struct.a2, struct.a3, struct.a4, struct.a5, struct.a6, joined from aliases_test array join struct.a2 as joined; +select array, struct.d1, struct.a1, struct.a2, struct.a3, struct.a4, struct.a5, struct.a6, joined from aliases_test array join struct.a3 as joined; +select array, struct.d1, struct.a1, struct.a2, struct.a3, struct.a4, struct.a5, struct.a6, joined from aliases_test array join struct.a4 as joined; +select array, struct.d1, struct.a1, struct.a2, struct.a3, struct.a4, struct.a5, struct.a6, joined from aliases_test array join struct.a5 as joined; +select array, struct.d1, struct.a1, struct.a2, struct.a3, struct.a4, struct.a5, struct.a6, joined from aliases_test array join struct.a6 as joined; + +select '-- array join whole nested table'; +select array, struct.d1, struct.a1, struct.a2, struct.a3, struct.a4, struct.a5, struct.a6 from aliases_test array join struct; + +select '-- array join whole nested table not using the result'; +select array from aliases_test array join struct; + +select '-- array join whole nested table, aliasing with original name'; +select array, struct.d1, struct.a1, struct.a2, struct.a3, struct.a4, struct.a5, struct.a6 from aliases_test array join struct as struct; + +select '-- array join whole nested table, aliasing with original name not using the result'; +select array from aliases_test array join struct as struct; + +select '-- array join whole nested table, aliasing as `class`'; +select array, class.d1, class.a1, class.a2, class.a3, class.a4, class.a5, class.a6 from aliases_test array join struct as class; + +select '-- array join whole nested table, aliasing as `class` and not using the result'; +select array from aliases_test array join struct as class; + +select '-- array join whole nested table, aliasing as `class` but requesting the original columns'; +select array, struct.d1, struct.a1, struct.a2, struct.a3, struct.a4, struct.a5, struct.a6 from aliases_test array join struct as class; + +select array, +struct.d1, struct.a1, struct.a2, struct.a3, struct.a4, struct.a5, struct.a6, +class.d1, class.a1, class.a2, class.a3, class.a4, class.a5, class.a6 +from aliases_test array join struct as class; + +drop table aliases_test; diff --git a/parser/testdata/00262_alter_alias/ast.json b/parser/testdata/00262_alter_alias/ast.json new file mode 100644 index 000000000..886545863 --- /dev/null +++ b/parser/testdata/00262_alter_alias/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery aliases_test (children 1)" + }, + { + "explain": " Identifier aliases_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001107384, + "rows_read": 2, + "bytes_read": 76 + } +} diff --git a/parser/testdata/00262_alter_alias/metadata.json b/parser/testdata/00262_alter_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00262_alter_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00262_alter_alias/query.sql b/parser/testdata/00262_alter_alias/query.sql new file mode 100644 index 000000000..1c19f8636 --- /dev/null +++ b/parser/testdata/00262_alter_alias/query.sql @@ -0,0 +1,25 @@ +drop table if exists aliases_test; + +set allow_deprecated_syntax_for_merge_tree=1; +create table aliases_test (date default today(), id default rand(), array default [0, 1, 2]) engine=MergeTree(date, id, 1); + +insert into aliases_test (id) values (0); +select array from aliases_test; + +alter table aliases_test modify column array alias [0, 1, 2]; +select array from aliases_test; + +alter table aliases_test modify column array default [0, 1, 2]; +select array from aliases_test; + +alter table aliases_test add column struct.key Array(UInt8) default [0, 1, 2], add column struct.value Array(UInt8) default array; +select struct.key, struct.value from aliases_test; + +alter table aliases_test modify column struct.value alias array; +select struct.key, struct.value from aliases_test; + +select struct.key, struct.value from aliases_test array join struct; +select struct.key, struct.value from aliases_test array join struct as struct; +select class.key, class.value from aliases_test array join struct as class; + +drop table aliases_test; diff --git a/parser/testdata/00263_merge_aggregates_and_overflow/ast.json b/parser/testdata/00263_merge_aggregates_and_overflow/ast.json new file mode 100644 index 000000000..e17e90987 --- /dev/null +++ b/parser/testdata/00263_merge_aggregates_and_overflow/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery numbers_10k_log (children 1)" + }, + { + "explain": " Identifier numbers_10k_log" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001029106, + "rows_read": 2, + "bytes_read": 82 + } +} diff --git a/parser/testdata/00263_merge_aggregates_and_overflow/metadata.json b/parser/testdata/00263_merge_aggregates_and_overflow/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00263_merge_aggregates_and_overflow/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00263_merge_aggregates_and_overflow/query.sql b/parser/testdata/00263_merge_aggregates_and_overflow/query.sql new file mode 100644 index 000000000..891ec9376 --- /dev/null +++ b/parser/testdata/00263_merge_aggregates_and_overflow/query.sql @@ -0,0 +1,12 @@ +DROP TABLE IF EXISTS numbers_10k_log; + +SET max_block_size = 1000; + +CREATE TABLE numbers_10k_log ENGINE = Log AS SELECT number FROM system.numbers LIMIT 10000; + +SET max_threads = 4; +SET max_rows_to_group_by = 3000, group_by_overflow_mode = 'any'; + +SELECT ignore(rand() AS k), ignore(max(toString(number))) FROM numbers_10k_log GROUP BY k LIMIT 1; + +DROP TABLE numbers_10k_log; diff --git a/parser/testdata/00264_uniq_many_args/ast.json b/parser/testdata/00264_uniq_many_args/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00264_uniq_many_args/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00264_uniq_many_args/metadata.json b/parser/testdata/00264_uniq_many_args/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00264_uniq_many_args/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00264_uniq_many_args/query.sql b/parser/testdata/00264_uniq_many_args/query.sql new file mode 100644 index 000000000..847d753a3 --- /dev/null +++ b/parser/testdata/00264_uniq_many_args/query.sql @@ -0,0 +1,37 @@ +SELECT + uniq(x), uniq((x)), uniq(x, y), uniq((x, y)), uniq(x, y, z), uniq((x, y, z)), + uniqCombined(x), uniqCombined((x)), uniqCombined(x, y), uniqCombined((x, y)), uniqCombined(x, y, z), uniqCombined((x, y, z)), + uniqCombined(17)(x), uniqCombined(17)((x)), uniqCombined(17)(x, y), uniqCombined(17)((x, y)), uniqCombined(17)(x, y, z), uniqCombined(17)((x, y, z)), + uniqHLL12(x), uniqHLL12((x)), uniqHLL12(x, y), uniqHLL12((x, y)), uniqHLL12(x, y, z), uniqHLL12((x, y, z)), + uniqExact(x), uniqExact((x)), uniqExact(x, y), uniqExact((x, y)), uniqExact(x, y, z), uniqExact((x, y, z)), + uniqUpTo(5)(x), uniqUpTo(5)((x)), uniqUpTo(5)(x, y), uniqUpTo(5)((x, y)), uniqUpTo(5)(x, y, z), uniqUpTo(5)((x, y, z)) +FROM +( + SELECT + number % 10 AS x, + intDiv(number, 10) % 10 AS y, + toString(intDiv(number, 100) % 10) AS z + FROM system.numbers LIMIT 1000 +); + + +SELECT k, + uniq(x), uniq((x)), uniq(x, y), uniq((x, y)), uniq(x, y, z), uniq((x, y, z)), + uniqCombined(x), uniqCombined((x)), uniqCombined(x, y), uniqCombined((x, y)), uniqCombined(x, y, z), uniqCombined((x, y, z)), + uniqCombined(17)(x), uniqCombined(17)((x)), uniqCombined(17)(x, y), uniqCombined(17)((x, y)), uniqCombined(17)(x, y, z), uniqCombined(17)((x, y, z)), + uniqHLL12(x), uniqHLL12((x)), uniqHLL12(x, y), uniqHLL12((x, y)), uniqHLL12(x, y, z), uniqHLL12((x, y, z)), + uniqExact(x), uniqExact((x)), uniqExact(x, y), uniqExact((x, y)), uniqExact(x, y, z), uniqExact((x, y, z)), + uniqUpTo(5)(x), uniqUpTo(5)((x)), uniqUpTo(5)(x, y), uniqUpTo(5)((x, y)), uniqUpTo(5)(x, y, z), uniqUpTo(5)((x, y, z)), + count() AS c +FROM +( + SELECT + (number + 0x8ffcbd8257219a26) * 0x66bb3430c06d2353 % 131 AS k, + number % 10 AS x, + intDiv(number, 10) % 10 AS y, + toString(intDiv(number, 100) % 10) AS z + FROM system.numbers LIMIT 100000 +) +GROUP BY k +ORDER BY c DESC, k ASC +LIMIT 10; diff --git a/parser/testdata/00266_read_overflow_mode/ast.json b/parser/testdata/00266_read_overflow_mode/ast.json new file mode 100644 index 000000000..129231c39 --- /dev/null +++ b/parser/testdata/00266_read_overflow_mode/ast.json @@ -0,0 +1,103 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 6)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number (alias k)" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_110000" + }, + { + "explain": " Set" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier k" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier k" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Set" + } + ], + + "rows": 27, + + "statistics": + { + "elapsed": 0.001824839, + "rows_read": 27, + "bytes_read": 1044 + } +} diff --git a/parser/testdata/00266_read_overflow_mode/metadata.json b/parser/testdata/00266_read_overflow_mode/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00266_read_overflow_mode/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00266_read_overflow_mode/query.sql b/parser/testdata/00266_read_overflow_mode/query.sql new file mode 100644 index 000000000..968e88c58 --- /dev/null +++ b/parser/testdata/00266_read_overflow_mode/query.sql @@ -0,0 +1 @@ +SELECT number AS k FROM (SELECT number FROM system.numbers LIMIT 110000 SETTINGS max_result_rows = 0) GROUP BY k ORDER BY k LIMIT 10 SETTINGS max_result_rows = 100000, result_overflow_mode = 'break'; diff --git a/parser/testdata/00266_shard_global_subquery_and_aliases/ast.json b/parser/testdata/00266_shard_global_subquery_and_aliases/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00266_shard_global_subquery_and_aliases/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00266_shard_global_subquery_and_aliases/metadata.json b/parser/testdata/00266_shard_global_subquery_and_aliases/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00266_shard_global_subquery_and_aliases/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00266_shard_global_subquery_and_aliases/query.sql b/parser/testdata/00266_shard_global_subquery_and_aliases/query.sql new file mode 100644 index 000000000..37abdc90c --- /dev/null +++ b/parser/testdata/00266_shard_global_subquery_and_aliases/query.sql @@ -0,0 +1,3 @@ +-- Tags: shard + +SELECT 1 GLOBAL IN (SELECT 1) AS s, s FROM remote('127.0.0.{2,3}', system.one); diff --git a/parser/testdata/00267_tuple_array_access_operators_priority/ast.json b/parser/testdata/00267_tuple_array_access_operators_priority/ast.json new file mode 100644 index 000000000..803aae9ea --- /dev/null +++ b/parser/testdata/00267_tuple_array_access_operators_priority/ast.json @@ -0,0 +1,133 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function if (alias res) (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function plus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function multiply (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function negate (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function tupleElement (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function arrayElement (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier a" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal Int64_-245" + }, + { + "explain": " Literal 'Ok'" + }, + { + "explain": " Literal 'Fail'" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (alias a) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Tuple_('Hello', UInt64_123)" + } + ], + + "rows": 37, + + "statistics": + { + "elapsed": 0.001223877, + "rows_read": 37, + "bytes_read": 1597 + } +} diff --git a/parser/testdata/00267_tuple_array_access_operators_priority/metadata.json b/parser/testdata/00267_tuple_array_access_operators_priority/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00267_tuple_array_access_operators_priority/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00267_tuple_array_access_operators_priority/query.sql b/parser/testdata/00267_tuple_array_access_operators_priority/query.sql new file mode 100644 index 000000000..54294d4f2 --- /dev/null +++ b/parser/testdata/00267_tuple_array_access_operators_priority/query.sql @@ -0,0 +1 @@ +SELECT 1+-a[1].2*2 = -245 ? 'Ok' : 'Fail' AS res FROM (SELECT [('Hello', 123)] AS a); diff --git a/parser/testdata/00268_aliases_without_as_keyword/ast.json b/parser/testdata/00268_aliases_without_as_keyword/ast.json new file mode 100644 index 000000000..f5d0bae97 --- /dev/null +++ b/parser/testdata/00268_aliases_without_as_keyword/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1 (alias x)" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.one" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001301846, + "rows_read": 9, + "bytes_read": 362 + } +} diff --git a/parser/testdata/00268_aliases_without_as_keyword/metadata.json b/parser/testdata/00268_aliases_without_as_keyword/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00268_aliases_without_as_keyword/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00268_aliases_without_as_keyword/query.sql b/parser/testdata/00268_aliases_without_as_keyword/query.sql new file mode 100644 index 000000000..e13e1e0c1 --- /dev/null +++ b/parser/testdata/00268_aliases_without_as_keyword/query.sql @@ -0,0 +1,2 @@ +SELECT 1 x FROM system.one; +SELECT 1 + (2 AS x) y FROM system.one; diff --git a/parser/testdata/00269_database_table_whitespace/ast.json b/parser/testdata/00269_database_table_whitespace/ast.json new file mode 100644 index 000000000..ea67cbb27 --- /dev/null +++ b/parser/testdata/00269_database_table_whitespace/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.one" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001097602, + "rows_read": 9, + "bytes_read": 344 + } +} diff --git a/parser/testdata/00269_database_table_whitespace/metadata.json b/parser/testdata/00269_database_table_whitespace/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00269_database_table_whitespace/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00269_database_table_whitespace/query.sql b/parser/testdata/00269_database_table_whitespace/query.sql new file mode 100644 index 000000000..8e69d2713 --- /dev/null +++ b/parser/testdata/00269_database_table_whitespace/query.sql @@ -0,0 +1,2 @@ +SELECT * FROM system . one; +SELECT * FROM system /* Hello */. `one`; diff --git a/parser/testdata/00270_views_query_processing_stage/ast.json b/parser/testdata/00270_views_query_processing_stage/ast.json new file mode 100644 index 000000000..c21b4e754 --- /dev/null +++ b/parser/testdata/00270_views_query_processing_stage/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery view1_00270 (children 1)" + }, + { + "explain": " Identifier view1_00270" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001231761, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/00270_views_query_processing_stage/metadata.json b/parser/testdata/00270_views_query_processing_stage/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00270_views_query_processing_stage/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00270_views_query_processing_stage/query.sql b/parser/testdata/00270_views_query_processing_stage/query.sql new file mode 100644 index 000000000..602ca9f71 --- /dev/null +++ b/parser/testdata/00270_views_query_processing_stage/query.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS view1_00270; +DROP TABLE IF EXISTS view2_00270; +DROP TABLE IF EXISTS merge_view_00270; + +CREATE VIEW view1_00270 AS SELECT number FROM system.numbers LIMIT 10; +CREATE VIEW view2_00270 AS SELECT number FROM system.numbers LIMIT 10; +CREATE TABLE merge_view_00270 (number UInt64) ENGINE = Merge(currentDatabase(), '^view'); + +SELECT 'Hello, world!' FROM merge_view_00270 LIMIT 5; + +DROP TABLE view1_00270; +DROP TABLE view2_00270; +DROP TABLE merge_view_00270; diff --git a/parser/testdata/00271_agg_state_and_totals/ast.json b/parser/testdata/00271_agg_state_and_totals/ast.json new file mode 100644 index 000000000..a23b1fc9d --- /dev/null +++ b/parser/testdata/00271_agg_state_and_totals/ast.json @@ -0,0 +1,136 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier k" + }, + { + "explain": " Function finalizeAggregation (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function quantilesTimingState (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Float64_0.5" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function intDiv (alias k) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_30000 (alias d)" + }, + { + "explain": " Function modulo (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Identifier d" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_100000" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier k" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier k" + } + ], + + "rows": 38, + + "statistics": + { + "elapsed": 0.001073757, + "rows_read": 38, + "bytes_read": 1594 + } +} diff --git a/parser/testdata/00271_agg_state_and_totals/metadata.json b/parser/testdata/00271_agg_state_and_totals/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00271_agg_state_and_totals/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00271_agg_state_and_totals/query.sql b/parser/testdata/00271_agg_state_and_totals/query.sql new file mode 100644 index 000000000..d77cbce99 --- /dev/null +++ b/parser/testdata/00271_agg_state_and_totals/query.sql @@ -0,0 +1 @@ +SELECT k, finalizeAggregation(quantilesTimingState(0.5)(x)) FROM (SELECT intDiv(number, 30000 AS d) AS k, number % d AS x FROM system.numbers LIMIT 100000) GROUP BY k WITH TOTALS ORDER BY k; diff --git a/parser/testdata/00272_union_all_and_in_subquery/ast.json b/parser/testdata/00272_union_all_and_in_subquery/ast.json new file mode 100644 index 000000000..99495e4a8 --- /dev/null +++ b/parser/testdata/00272_union_all_and_in_subquery/ast.json @@ -0,0 +1,85 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function in (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_0" + } + ], + + "rows": 21, + + "statistics": + { + "elapsed": 0.001279346, + "rows_read": 21, + "bytes_read": 754 + } +} diff --git a/parser/testdata/00272_union_all_and_in_subquery/metadata.json b/parser/testdata/00272_union_all_and_in_subquery/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00272_union_all_and_in_subquery/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00272_union_all_and_in_subquery/query.sql b/parser/testdata/00272_union_all_and_in_subquery/query.sql new file mode 100644 index 000000000..8179a014d --- /dev/null +++ b/parser/testdata/00272_union_all_and_in_subquery/query.sql @@ -0,0 +1,2 @@ +SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 WHERE 1 IN (SELECT 1 WHERE 0); +SELECT 2 UNION ALL SELECT 2 UNION ALL SELECT 2 WHERE 1 IN (SELECT 1 WHERE 1); diff --git a/parser/testdata/00273_quantiles/ast.json b/parser/testdata/00273_quantiles/ast.json new file mode 100644 index 000000000..f76f01b01 --- /dev/null +++ b/parser/testdata/00273_quantiles/ast.json @@ -0,0 +1,91 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function quantiles (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Float64_0.5" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number (alias x)" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_1001" + } + ], + + "rows": 23, + + "statistics": + { + "elapsed": 0.00100761, + "rows_read": 23, + "bytes_read": 968 + } +} diff --git a/parser/testdata/00273_quantiles/metadata.json b/parser/testdata/00273_quantiles/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00273_quantiles/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00273_quantiles/query.sql b/parser/testdata/00273_quantiles/query.sql new file mode 100644 index 000000000..213ad876c --- /dev/null +++ b/parser/testdata/00273_quantiles/query.sql @@ -0,0 +1,17 @@ +SELECT quantiles(0.5)(x) FROM (SELECT number AS x FROM system.numbers LIMIT 1001); +SELECT quantilesExact(0.5)(x) FROM (SELECT number AS x FROM system.numbers LIMIT 1001); +SELECT quantilesTDigest(0.5)(x) FROM (SELECT number AS x FROM system.numbers LIMIT 1001); +SELECT quantilesDeterministic(0.5)(x, x) FROM (SELECT number AS x FROM system.numbers LIMIT 1001); +SELECT arrayMap(a -> round(a, 2), quantilesDD(0.01, 0.5)(x)) FROM (SELECT number AS x FROM system.numbers LIMIT 1001); + +SELECT quantiles(0, 0.001, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.99, 0.999, 1)(x) FROM (SELECT number AS x FROM system.numbers LIMIT 1001); +SELECT quantilesExact(0, 0.001, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.99, 0.999, 1)(x) FROM (SELECT number AS x FROM system.numbers LIMIT 1001); +SELECT quantilesTDigest(0, 0.001, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.99, 0.999, 1)(x) FROM (SELECT number AS x FROM system.numbers LIMIT 1001); +SELECT quantilesDeterministic(0, 0.001, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.99, 0.999, 1)(x, x) FROM (SELECT number AS x FROM system.numbers LIMIT 1001); +SELECT arrayMap(a -> round(a, 2), quantilesDD(0.01, 0, 0.001, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.99, 0.999, 1)(x)) FROM (SELECT number AS x FROM system.numbers LIMIT 1001); + +-- The result slightly differs but it's ok since `quantilesDeterministic` is an approximate function. +SET max_bytes_before_external_group_by = 0; +SET max_bytes_ratio_before_external_group_by = 0; + +SELECT round(1000000 / (number + 1)) AS k, count() AS c, arrayMap(x -> round(x, 6), quantilesDeterministic(0.1, 0.5, 0.9)(number, intHash64(number))) AS q1, quantilesExact(0.1, 0.5, 0.9)(number) AS q2 FROM (SELECT number FROM system.numbers LIMIT 1000000) GROUP BY k ORDER BY k; diff --git a/parser/testdata/00274_shard_group_array/ast.json b/parser/testdata/00274_shard_group_array/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00274_shard_group_array/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00274_shard_group_array/metadata.json b/parser/testdata/00274_shard_group_array/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00274_shard_group_array/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00274_shard_group_array/query.sql b/parser/testdata/00274_shard_group_array/query.sql new file mode 100644 index 000000000..c9618962d --- /dev/null +++ b/parser/testdata/00274_shard_group_array/query.sql @@ -0,0 +1,7 @@ +-- Tags: shard + +SELECT length(groupArray(number)), count() FROM (SELECT number FROM system.numbers_mt LIMIT 1000000); +SELECT groupArray(dummy), count() FROM remote('127.0.0.{2,3}', system.one); + +SELECT length(groupArray(toString(number))), count() FROM (SELECT number FROM system.numbers LIMIT 100000); +SELECT groupArray(toString(dummy)), count() FROM remote('127.0.0.{2,3}', system.one); diff --git a/parser/testdata/00275_shard_quantiles_weighted/ast.json b/parser/testdata/00275_shard_quantiles_weighted/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00275_shard_quantiles_weighted/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00275_shard_quantiles_weighted/metadata.json b/parser/testdata/00275_shard_quantiles_weighted/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00275_shard_quantiles_weighted/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00275_shard_quantiles_weighted/query.sql b/parser/testdata/00275_shard_quantiles_weighted/query.sql new file mode 100644 index 000000000..607b16b48 --- /dev/null +++ b/parser/testdata/00275_shard_quantiles_weighted/query.sql @@ -0,0 +1,25 @@ +-- Tags: shard + +SELECT quantileExactWeighted(0.5)(number, 1) FROM (SELECT number FROM system.numbers LIMIT 1001); +SELECT quantilesExactWeighted(0, 0.001, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.99, 0.999, 1)(number, 1) FROM (SELECT number FROM system.numbers LIMIT 1001); +SELECT quantilesExactWeighted(0, 0.001, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.99, 0.999, 1)(number, number) FROM (SELECT number FROM system.numbers LIMIT 1001); + +SELECT quantileTimingWeighted(0.5)(number, 1) FROM (SELECT number FROM system.numbers LIMIT 1001); +SELECT quantilesTimingWeighted(0, 0.001, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.99, 0.999, 1)(number, 1) FROM (SELECT number FROM system.numbers LIMIT 1001); +SELECT quantilesTimingWeighted(0, 0.001, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.99, 0.999, 1)(number, number) FROM (SELECT number FROM system.numbers LIMIT 1001); + +DROP TABLE IF EXISTS numbers_1001; +CREATE TABLE numbers_1001 (number UInt64) ENGINE = Memory; +SET min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0; +SET max_block_size = 10; +INSERT INTO numbers_1001 SELECT number FROM system.numbers LIMIT 1001; + +SELECT quantileExactWeighted(0.5)(number, 1) FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_1001); +SELECT quantilesExactWeighted(0, 0.001, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.99, 0.999, 1)(number, 1) FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_1001); +SELECT quantilesExactWeighted(0, 0.001, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.99, 0.999, 1)(number, number) FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_1001); + +SELECT quantileTimingWeighted(0.5)(number, 1) FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_1001); +SELECT quantilesTimingWeighted(0, 0.001, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.99, 0.999, 1)(number, 1) FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_1001); +SELECT quantilesTimingWeighted(0, 0.001, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.99, 0.999, 1)(number, number) FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_1001); + +DROP TABLE numbers_1001; diff --git a/parser/testdata/00276_sample/ast.json b/parser/testdata/00276_sample/ast.json new file mode 100644 index 000000000..3917242de --- /dev/null +++ b/parser/testdata/00276_sample/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery sample_00276 (children 1)" + }, + { + "explain": " Identifier sample_00276" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001371993, + "rows_read": 2, + "bytes_read": 76 + } +} diff --git a/parser/testdata/00276_sample/metadata.json b/parser/testdata/00276_sample/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00276_sample/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00276_sample/query.sql b/parser/testdata/00276_sample/query.sql new file mode 100644 index 000000000..b75ed188e --- /dev/null +++ b/parser/testdata/00276_sample/query.sql @@ -0,0 +1,290 @@ +DROP TABLE IF EXISTS sample_00276; + +set allow_deprecated_syntax_for_merge_tree=1; +SET min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0; +SET max_block_size = 10; + +CREATE TABLE sample_00276 (d Date DEFAULT '2000-01-01', x UInt8) ENGINE = MergeTree(d, x, x, 10); +INSERT INTO sample_00276 (x) SELECT toUInt8(number) AS x FROM system.numbers LIMIT 256; + +SELECT count(), min(x), max(x), sum(x), uniqExact(x) FROM sample_00276; +SELECT count(), min(x), max(x), sum(x), uniqExact(x) FROM sample_00276 SAMPLE 1; +SELECT count(), min(x), max(x), sum(x), uniqExact(x) FROM sample_00276 SAMPLE 0.1; +SELECT count(), min(x), max(x), sum(x), uniqExact(x) FROM sample_00276 SAMPLE 1/10; +SELECT count(), min(x), max(x), sum(x), uniqExact(x) FROM sample_00276 SAMPLE 1/1e1; +SELECT count(), min(x), max(x), sum(x), uniqExact(x) FROM sample_00276 SAMPLE 1e1/1e2; +SELECT count(), min(x), max(x), sum(x), uniqExact(x) FROM sample_00276 SAMPLE 1e-1; +SELECT count(), min(x), max(x), sum(x), uniqExact(x) FROM sample_00276 SAMPLE 2e-2; +SELECT count(), min(x), max(x), sum(x), uniqExact(x) FROM sample_00276 SAMPLE 1/10 OFFSET 1/10; +SELECT count(), min(x), max(x), sum(x), uniqExact(x) FROM sample_00276 SAMPLE 1/10 OFFSET 9/10; +SELECT count(), min(x), max(x), sum(x), uniqExact(x) FROM sample_00276 SAMPLE 1/10 OFFSET 10/10; +SELECT count(), min(x), max(x), sum(x), uniqExact(x) FROM sample_00276 SAMPLE 1/10 OFFSET 19/20; + +SELECT count() >= 100 FROM sample_00276 SAMPLE 100; +SELECT count(), min(x), max(x), sum(x), uniqExact(x) FROM sample_00276 SAMPLE 1000; + +SELECT count(), min(x), max(x), sum(x), uniqExact(x) FROM sample_00276 SAMPLE 1/2; +SELECT count(), min(x), max(x), sum(x), uniqExact(x) FROM sample_00276 SAMPLE 1/2 OFFSET 1/2; +SELECT count(), min(x), max(x), sum(x), uniqExact(x) FROM sample_00276 SAMPLE 1/2 SETTINGS parallel_replicas_count = 3; +SELECT count(), min(x), max(x), sum(x), uniqExact(x) FROM sample_00276 SAMPLE 1/2 SETTINGS parallel_replicas_count = 3, parallel_replica_offset = 0; +SELECT count(), min(x), max(x), sum(x), uniqExact(x) FROM sample_00276 SAMPLE 1/2 SETTINGS parallel_replicas_count = 3, parallel_replica_offset = 1; +SELECT count(), min(x), max(x), sum(x), uniqExact(x) FROM sample_00276 SAMPLE 1/2 SETTINGS parallel_replicas_count = 3, parallel_replica_offset = 2; +SELECT count(), min(x), max(x), sum(x), uniqExact(x) FROM sample_00276 SAMPLE 1/2 OFFSET 1/2 SETTINGS parallel_replicas_count = 3; +SELECT count(), min(x), max(x), sum(x), uniqExact(x) FROM sample_00276 SAMPLE 1/2 OFFSET 1/2 SETTINGS parallel_replicas_count = 3, parallel_replica_offset = 0; +SELECT count(), min(x), max(x), sum(x), uniqExact(x) FROM sample_00276 SAMPLE 1/2 OFFSET 1/2 SETTINGS parallel_replicas_count = 3, parallel_replica_offset = 1; +SELECT count(), min(x), max(x), sum(x), uniqExact(x) FROM sample_00276 SAMPLE 1/2 OFFSET 1/2 SETTINGS parallel_replicas_count = 3, parallel_replica_offset = 2; + + +SELECT count(), min(x), max(x), sum(x), uniqExact(x) FROM +( + SELECT x FROM sample_00276 SAMPLE 0.1 OFFSET 0.0 +UNION ALL SELECT x FROM sample_00276 SAMPLE 0.1 OFFSET 0.1 +UNION ALL SELECT x FROM sample_00276 SAMPLE 0.1 OFFSET 0.2 +UNION ALL SELECT x FROM sample_00276 SAMPLE 0.1 OFFSET 0.3 +UNION ALL SELECT x FROM sample_00276 SAMPLE 0.1 OFFSET 0.4 +UNION ALL SELECT x FROM sample_00276 SAMPLE 0.1 OFFSET 0.5 +UNION ALL SELECT x FROM sample_00276 SAMPLE 0.1 OFFSET 0.6 +UNION ALL SELECT x FROM sample_00276 SAMPLE 0.1 OFFSET 0.7 +UNION ALL SELECT x FROM sample_00276 SAMPLE 0.1 OFFSET 0.8 +UNION ALL SELECT x FROM sample_00276 SAMPLE 0.1 OFFSET 0.9 +); + +SELECT count(), min(x), max(x), sum(x), uniqExact(x) FROM sample_00276 SAMPLE 0.05 OFFSET 0.35; +SELECT count(), min(x), max(x), sum(x), uniqExact(x) FROM sample_00276 SAMPLE 0.05 OFFSET 0.4; + +SELECT count() +FROM +( + SELECT + x, + count() AS c + FROM + ( + SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.00 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.01 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.02 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.03 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.04 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.05 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.06 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.07 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.08 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.09 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.10 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.11 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.12 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.13 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.14 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.15 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.16 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.17 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.18 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.19 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.20 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.21 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.22 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.23 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.24 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.25 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.26 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.27 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.28 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.29 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.30 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.31 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.32 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.33 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.34 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.35 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.36 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.37 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.38 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.39 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.40 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.41 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.42 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.43 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.44 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.45 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.46 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.47 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.48 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.49 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.50 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.51 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.52 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.53 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.54 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.55 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.56 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.57 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.58 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.59 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.60 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.61 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.62 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.63 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.64 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.65 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.66 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.67 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.68 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.69 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.70 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.71 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.72 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.73 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.74 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.75 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.76 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.77 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.78 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.79 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.80 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.81 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.82 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.83 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.84 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.85 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.86 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.87 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.88 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.89 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.90 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.91 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.92 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.93 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.94 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.95 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.96 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.97 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.98 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.99 + ) + GROUP BY x + HAVING c = 1 + ORDER BY x ASC +); + +DROP TABLE sample_00276; + +SET max_block_size = 8192; + +CREATE TABLE sample_00276 (d Date DEFAULT '2000-01-01', x UInt16) ENGINE = MergeTree(d, x, x, 10); +INSERT INTO sample_00276 (x) SELECT toUInt16(number) AS x FROM system.numbers LIMIT 65536; + +SELECT count() +FROM +( + SELECT + x, + count() AS c + FROM + ( + SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.00 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.01 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.02 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.03 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.04 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.05 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.06 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.07 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.08 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.09 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.10 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.11 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.12 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.13 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.14 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.15 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.16 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.17 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.18 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.19 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.20 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.21 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.22 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.23 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.24 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.25 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.26 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.27 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.28 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.29 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.30 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.31 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.32 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.33 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.34 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.35 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.36 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.37 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.38 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.39 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.40 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.41 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.42 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.43 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.44 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.45 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.46 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.47 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.48 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.49 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.50 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.51 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.52 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.53 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.54 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.55 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.56 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.57 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.58 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.59 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.60 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.61 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.62 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.63 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.64 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.65 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.66 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.67 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.68 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.69 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.70 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.71 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.72 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.73 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.74 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.75 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.76 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.77 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.78 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.79 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.80 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.81 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.82 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.83 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.84 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.85 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.86 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.87 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.88 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.89 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.90 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.91 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.92 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.93 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.94 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.95 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.96 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.97 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.98 + UNION ALL SELECT * FROM sample_00276 SAMPLE 0.01 OFFSET 0.99 + ) + GROUP BY x + HAVING c = 1 + ORDER BY x ASC +); + +DROP TABLE sample_00276; diff --git a/parser/testdata/00277_array_filter/ast.json b/parser/testdata/00277_array_filter/ast.json new file mode 100644 index 000000000..d769982f7 --- /dev/null +++ b/parser/testdata/00277_array_filter/ast.json @@ -0,0 +1,190 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function sum (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function length (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier arr" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayMap (alias arr) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function lambda (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function toString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function range (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_1000" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function length (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier arr" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_0" + } + ], + + "rows": 56, + + "statistics": + { + "elapsed": 0.001427849, + "rows_read": 56, + "bytes_read": 2571 + } +} diff --git a/parser/testdata/00277_array_filter/metadata.json b/parser/testdata/00277_array_filter/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00277_array_filter/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00277_array_filter/query.sql b/parser/testdata/00277_array_filter/query.sql new file mode 100644 index 000000000..bc5c5fb2c --- /dev/null +++ b/parser/testdata/00277_array_filter/query.sql @@ -0,0 +1,2 @@ +SELECT sum(length(arr)) FROM (SELECT arrayMap(x -> toString(x), range(number % 10)) AS arr FROM (SELECT * FROM system.numbers LIMIT 1000) WHERE length(arr) % 2 = 0); +SELECT sum(length(arr)) FROM (SELECT range(number % 10) AS arr FROM (SELECT * FROM system.numbers LIMIT 1000) WHERE length(arr) % 2 = 0); diff --git a/parser/testdata/00278_insert_already_sorted/ast.json b/parser/testdata/00278_insert_already_sorted/ast.json new file mode 100644 index 000000000..c1c53aaa7 --- /dev/null +++ b/parser/testdata/00278_insert_already_sorted/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery sorted (children 1)" + }, + { + "explain": " Identifier sorted" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001269767, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/00278_insert_already_sorted/metadata.json b/parser/testdata/00278_insert_already_sorted/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00278_insert_already_sorted/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00278_insert_already_sorted/query.sql b/parser/testdata/00278_insert_already_sorted/query.sql new file mode 100644 index 000000000..dbd129d85 --- /dev/null +++ b/parser/testdata/00278_insert_already_sorted/query.sql @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS sorted; +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE sorted (d Date DEFAULT '2000-01-01', x UInt64) ENGINE = MergeTree(d, x, 8192); + +INSERT INTO sorted (x) SELECT intDiv(number, 100000) AS x FROM system.numbers LIMIT 1000000; + +SET max_threads = 1; + +SELECT count() FROM sorted; +SELECT x FROM (SELECT DISTINCT x FROM sorted) ORDER BY x; + +INSERT INTO sorted (x) SELECT (intHash64(number) % 1000 = 0 ? 999 : intDiv(number, 100000)) AS x FROM system.numbers LIMIT 1000000; + +SELECT count() FROM sorted; +SELECT x FROM (SELECT DISTINCT x FROM sorted) ORDER BY x; + +DROP TABLE sorted; diff --git a/parser/testdata/00279_quantiles_permuted_args/ast.json b/parser/testdata/00279_quantiles_permuted_args/ast.json new file mode 100644 index 000000000..f7a514337 --- /dev/null +++ b/parser/testdata/00279_quantiles_permuted_args/ast.json @@ -0,0 +1,148 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function quantilesExact (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " ExpressionList (children 20)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal Float64_0.001" + }, + { + "explain": " Literal Float64_0.01" + }, + { + "explain": " Literal Float64_0.05" + }, + { + "explain": " Literal Float64_0.9" + }, + { + "explain": " Literal Float64_0.2" + }, + { + "explain": " Literal Float64_0.3" + }, + { + "explain": " Literal Float64_0.6" + }, + { + "explain": " Literal Float64_0.5" + }, + { + "explain": " Literal Float64_0.4" + }, + { + "explain": " Literal Float64_0.7" + }, + { + "explain": " Literal Float64_0.8" + }, + { + "explain": " Literal Float64_0.1" + }, + { + "explain": " Literal Float64_0.95" + }, + { + "explain": " Literal Float64_0.99" + }, + { + "explain": " Literal Float64_0.999" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal Float64_0.5" + }, + { + "explain": " Literal Float64_0.3" + }, + { + "explain": " Literal Float64_0.4" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number (alias x)" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_1001" + } + ], + + "rows": 42, + + "statistics": + { + "elapsed": 0.001323157, + "rows_read": 42, + "bytes_read": 1603 + } +} diff --git a/parser/testdata/00279_quantiles_permuted_args/metadata.json b/parser/testdata/00279_quantiles_permuted_args/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00279_quantiles_permuted_args/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00279_quantiles_permuted_args/query.sql b/parser/testdata/00279_quantiles_permuted_args/query.sql new file mode 100644 index 000000000..19431355f --- /dev/null +++ b/parser/testdata/00279_quantiles_permuted_args/query.sql @@ -0,0 +1,7 @@ +SELECT quantilesExact(1, 0.001, 0.01, 0.05, 0.9, 0.2, 0.3, 0.6, 0.5, 0.4, 0.7, 0.8, 0.1, 0.95, 0.99, 0.999, 0, 0.5, 0.3, 0.4)(x) FROM (SELECT number AS x FROM system.numbers LIMIT 1001); +SELECT quantilesExactWeighted(1, 0.001, 0.01, 0.05, 0.9, 0.2, 0.3, 0.6, 0.5, 0.4, 0.7, 0.8, 0.1, 0.95, 0.99, 0.999, 0, 0.5, 0.3, 0.4)(x, 1) FROM (SELECT number AS x FROM system.numbers LIMIT 1001); +SELECT quantilesTiming(1, 0.001, 0.01, 0.05, 0.9, 0.2, 0.3, 0.6, 0.5, 0.4, 0.7, 0.8, 0.1, 0.95, 0.99, 0.999, 0, 0.5, 0.3, 0.4)(x) FROM (SELECT number AS x FROM system.numbers LIMIT 1001); +-- SELECT quantilesTDigest(1, 0.001, 0.01, 0.05, 0.9, 0.2, 0.3, 0.6, 0.5, 0.4, 0.7, 0.8, 0.1, 0.95, 0.99, 0.999, 0, 0.5, 0.3, 0.4)(x) FROM (SELECT number AS x FROM system.numbers LIMIT 1001); +-- SELECT quantilesTDigestWeighted(1, 0.001, 0.01, 0.05, 0.9, 0.2, 0.3, 0.6, 0.5, 0.4, 0.7, 0.8, 0.1, 0.95, 0.99, 0.999, 0, 0.5, 0.3, 0.4)(x, 1) FROM (SELECT number AS x FROM system.numbers LIMIT 1001); +SELECT quantiles(1, 0.001, 0.01, 0.05, 0.9, 0.2, 0.3, 0.6, 0.5, 0.4, 0.7, 0.8, 0.1, 0.95, 0.99, 0.999, 0, 0.5, 0.3, 0.4)(x) FROM (SELECT number AS x FROM system.numbers LIMIT 1001); +SELECT quantilesDeterministic(1, 0.001, 0.01, 0.05, 0.9, 0.2, 0.3, 0.6, 0.5, 0.4, 0.7, 0.8, 0.1, 0.95, 0.99, 0.999, 0, 0.5, 0.3, 0.4)(x, x) FROM (SELECT number AS x FROM system.numbers LIMIT 1001); diff --git a/parser/testdata/00280_hex_escape_sequence/ast.json b/parser/testdata/00280_hex_escape_sequence/ast.json new file mode 100644 index 000000000..ed62ef1ca --- /dev/null +++ b/parser/testdata/00280_hex_escape_sequence/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '0 Р'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001231277, + "rows_read": 5, + "bytes_read": 175 + } +} diff --git a/parser/testdata/00280_hex_escape_sequence/metadata.json b/parser/testdata/00280_hex_escape_sequence/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00280_hex_escape_sequence/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00280_hex_escape_sequence/query.sql b/parser/testdata/00280_hex_escape_sequence/query.sql new file mode 100644 index 000000000..a0eaa7504 --- /dev/null +++ b/parser/testdata/00280_hex_escape_sequence/query.sql @@ -0,0 +1 @@ +SELECT '\x30 \xD0\xA0'; diff --git a/parser/testdata/00282_merging/ast.json b/parser/testdata/00282_merging/ast.json new file mode 100644 index 000000000..f66a34ef3 --- /dev/null +++ b/parser/testdata/00282_merging/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery merge (children 1)" + }, + { + "explain": " Identifier merge" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001201427, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/00282_merging/metadata.json b/parser/testdata/00282_merging/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00282_merging/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00282_merging/query.sql b/parser/testdata/00282_merging/query.sql new file mode 100644 index 000000000..f4a3708ee --- /dev/null +++ b/parser/testdata/00282_merging/query.sql @@ -0,0 +1,103 @@ +DROP TABLE IF EXISTS merge; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE IF NOT EXISTS merge (d Date DEFAULT '2000-01-01', x UInt64) ENGINE = MergeTree(d, x, 5); + +INSERT INTO merge (x) VALUES (1), (2), (3); +INSERT INTO merge (x) VALUES (4), (5), (6); + +SELECT * FROM merge ORDER BY _part_index, x; +OPTIMIZE TABLE merge; +SELECT * FROM merge ORDER BY _part_index, x; + +DROP TABLE merge; + + +CREATE TABLE IF NOT EXISTS merge (d Date DEFAULT '2000-01-01', x UInt64) ENGINE = MergeTree(d, x, 5); + +INSERT INTO merge (x) SELECT number AS x FROM system.numbers LIMIT 10; +INSERT INTO merge (x) SELECT number + 10 AS x FROM system.numbers LIMIT 10; + +SELECT * FROM merge ORDER BY _part_index, x; +OPTIMIZE TABLE merge; +SELECT * FROM merge ORDER BY _part_index, x; + +DROP TABLE merge; + + +CREATE TABLE IF NOT EXISTS merge (d Date DEFAULT '2000-01-01', x UInt64) ENGINE = MergeTree(d, x, 5); + +INSERT INTO merge (x) SELECT number + 5 AS x FROM system.numbers LIMIT 10; +INSERT INTO merge (x) SELECT number AS x FROM system.numbers LIMIT 10; + +SELECT * FROM merge ORDER BY _part_index, x; +OPTIMIZE TABLE merge; +SELECT * FROM merge ORDER BY _part_index, x; + +DROP TABLE merge; + + +CREATE TABLE IF NOT EXISTS merge (d Date DEFAULT '2000-01-01', x UInt64) ENGINE = MergeTree(d, x, 5); + +INSERT INTO merge (x) SELECT number + 5 AS x FROM system.numbers LIMIT 10; +INSERT INTO merge (x) SELECT number AS x FROM system.numbers LIMIT 10; +INSERT INTO merge (x) SELECT number + 9 AS x FROM system.numbers LIMIT 10; + +SELECT * FROM merge ORDER BY _part_index, x; +OPTIMIZE TABLE merge; +SELECT * FROM merge ORDER BY _part_index, x; + +DROP TABLE merge; + + +CREATE TABLE IF NOT EXISTS merge (d Date DEFAULT '2000-01-01', x UInt64) ENGINE = MergeTree(d, x, 5); + +INSERT INTO merge (x) SELECT number AS x FROM system.numbers LIMIT 10; +INSERT INTO merge (x) SELECT number + 5 AS x FROM system.numbers LIMIT 10; +INSERT INTO merge (x) SELECT number + 10 AS x FROM system.numbers LIMIT 10; + +SELECT * FROM merge ORDER BY _part_index, x; +OPTIMIZE TABLE merge; +SELECT * FROM merge ORDER BY _part_index, x; + +INSERT INTO merge (x) SELECT number + 5 AS x FROM system.numbers LIMIT 10; + +SELECT * FROM merge ORDER BY _part_index, x; +OPTIMIZE TABLE merge; +SELECT * FROM merge ORDER BY _part_index, x; + +INSERT INTO merge (x) SELECT number + 100 AS x FROM system.numbers LIMIT 10; + +SELECT * FROM merge ORDER BY _part_index, x; +OPTIMIZE TABLE merge; +SELECT * FROM merge ORDER BY _part_index, x; + +DROP TABLE merge; + + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE IF NOT EXISTS merge (d Date DEFAULT '2000-01-01', x UInt64) ENGINE = MergeTree(d, x, 8192); + +SET min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0; +SET max_block_size = 8200; +INSERT INTO merge (x) SELECT number AS x FROM (SELECT * FROM system.numbers LIMIT 8200) ORDER BY rand(); +INSERT INTO merge (x) SELECT number AS x FROM (SELECT * FROM system.numbers LIMIT 8200) ORDER BY rand(); + +OPTIMIZE TABLE merge; + +SELECT count(), uniqExact(x), min(x), max(x), sum(x), sum(cityHash64(x)) FROM merge; + +DROP TABLE merge; + + +CREATE TABLE IF NOT EXISTS merge (d Date DEFAULT '2000-01-01', x UInt64) ENGINE = MergeTree(d, x, 8192); + +SET max_block_size = 10000; +INSERT INTO merge (x) SELECT number AS x FROM (SELECT number FROM system.numbers LIMIT 10000); +INSERT INTO merge (x) SELECT number AS x FROM (SELECT number + 5000 AS number FROM system.numbers LIMIT 10000); + +OPTIMIZE TABLE merge; + +SELECT count(), uniqExact(x), min(x), max(x), sum(x), sum(cityHash64(x)) FROM merge; + +DROP TABLE merge; diff --git a/parser/testdata/00283_column_cut/ast.json b/parser/testdata/00283_column_cut/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00283_column_cut/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00283_column_cut/metadata.json b/parser/testdata/00283_column_cut/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00283_column_cut/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00283_column_cut/query.sql b/parser/testdata/00283_column_cut/query.sql new file mode 100644 index 000000000..1269e139c --- /dev/null +++ b/parser/testdata/00283_column_cut/query.sql @@ -0,0 +1,10 @@ +SELECT + number, + toString(number), + range(number) AS arr, + arrayMap(x -> toString(x), arr) AS arr_s, + arrayMap(x -> range(x), arr) AS arr_arr, + arrayMap(x -> arrayMap(y -> toString(y), x), arr_arr) AS arr_arr_s, + arrayMap(x -> toFixedString(x, 3), arr_s) AS arr_fs +FROM system.numbers +LIMIT 5, 10; diff --git a/parser/testdata/00284_external_aggregation/ast.json b/parser/testdata/00284_external_aggregation/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00284_external_aggregation/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00284_external_aggregation/metadata.json b/parser/testdata/00284_external_aggregation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00284_external_aggregation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00284_external_aggregation/query.sql b/parser/testdata/00284_external_aggregation/query.sql new file mode 100644 index 000000000..704c9d0ad --- /dev/null +++ b/parser/testdata/00284_external_aggregation/query.sql @@ -0,0 +1,14 @@ +-- Tags: long + +-- This test was split in two due to long runtimes in sanitizers. +-- The other part is 00284_external_aggregation_2. + +SET max_bytes_before_external_group_by = 100000000; +SET max_bytes_ratio_before_external_group_by = 0; +SET max_memory_usage = 410000000; +SET group_by_two_level_threshold = 100000; +SET group_by_two_level_threshold_bytes = 50000000; +SET max_execution_time = 300; + +SELECT sum(k), sum(c) FROM (SELECT number AS k, count() AS c FROM (SELECT * FROM system.numbers LIMIT 10000000) GROUP BY k); +SELECT sum(k), sum(c), max(u) FROM (SELECT number AS k, count() AS c, uniqArray(range(number % 16)) AS u FROM (SELECT * FROM system.numbers LIMIT 1000000) GROUP BY k); diff --git a/parser/testdata/00284_external_aggregation_2/ast.json b/parser/testdata/00284_external_aggregation_2/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00284_external_aggregation_2/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00284_external_aggregation_2/metadata.json b/parser/testdata/00284_external_aggregation_2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00284_external_aggregation_2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00284_external_aggregation_2/query.sql b/parser/testdata/00284_external_aggregation_2/query.sql new file mode 100644 index 000000000..bb662d781 --- /dev/null +++ b/parser/testdata/00284_external_aggregation_2/query.sql @@ -0,0 +1,23 @@ +-- Tags: long + +-- This test was split in two due to long runtimes in sanitizers. +-- The other part is 00284_external_aggregation. + +SET group_by_two_level_threshold_bytes = 50000000; +SET max_memory_usage = 0; +SET group_by_two_level_threshold = 100000; +SET max_bytes_before_external_group_by = '1Mi'; +SET max_bytes_ratio_before_external_group_by = 0; + +-- method: key_string & key_string_two_level +CREATE TABLE t_00284_str(s String) ENGINE = MergeTree() ORDER BY tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +INSERT INTO t_00284_str SELECT toString(number) FROM numbers_mt(1e6); +INSERT INTO t_00284_str SELECT toString(number) FROM numbers_mt(1e6); +SELECT s, count() FROM t_00284_str GROUP BY s ORDER BY s LIMIT 10 OFFSET 42; + +-- method: low_cardinality_key_string & low_cardinality_key_string_two_level +CREATE TABLE t_00284_lc_str(s LowCardinality(String)) ENGINE = MergeTree() ORDER BY tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +INSERT INTO t_00284_lc_str SELECT toString(number) FROM numbers_mt(1e6); +INSERT INTO t_00284_lc_str SELECT toString(number) FROM numbers_mt(1e6); +SELECT s, count() FROM t_00284_lc_str GROUP BY s ORDER BY s LIMIT 10 OFFSET 42; + diff --git a/parser/testdata/00285_not_all_data_in_totals/ast.json b/parser/testdata/00285_not_all_data_in_totals/ast.json new file mode 100644 index 000000000..18f2258ca --- /dev/null +++ b/parser/testdata/00285_not_all_data_in_totals/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001096416, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00285_not_all_data_in_totals/metadata.json b/parser/testdata/00285_not_all_data_in_totals/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00285_not_all_data_in_totals/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00285_not_all_data_in_totals/query.sql b/parser/testdata/00285_not_all_data_in_totals/query.sql new file mode 100644 index 000000000..ce7599e82 --- /dev/null +++ b/parser/testdata/00285_not_all_data_in_totals/query.sql @@ -0,0 +1,4 @@ +SET output_format_write_statistics = 0; +SET group_by_two_level_threshold = 1; +SELECT ignore(x), count() FROM (SELECT number AS x FROM system.numbers LIMIT 1000 UNION ALL SELECT number AS x FROM system.numbers LIMIT 1000) GROUP BY x WITH TOTALS LIMIT 10 FORMAT JSONCompact; +SELECT ignore(x), count() FROM (SELECT number AS x FROM system.numbers LIMIT 1000 UNION ALL SELECT number AS x FROM system.numbers LIMIT 1000) GROUP BY x WITH TOTALS ORDER BY x LIMIT 10 FORMAT JSONCompact; diff --git a/parser/testdata/00286_format_long_negative_float/ast.json b/parser/testdata/00286_format_long_negative_float/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00286_format_long_negative_float/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00286_format_long_negative_float/metadata.json b/parser/testdata/00286_format_long_negative_float/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00286_format_long_negative_float/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00286_format_long_negative_float/query.sql b/parser/testdata/00286_format_long_negative_float/query.sql new file mode 100644 index 000000000..0a93bab13 --- /dev/null +++ b/parser/testdata/00286_format_long_negative_float/query.sql @@ -0,0 +1,3 @@ +-- Tags: long + +select reinterpretAsFloat64(unhex('875635ffffffbfbe')) diff --git a/parser/testdata/00287_column_const_with_nan/ast.json b/parser/testdata/00287_column_const_with_nan/ast.json new file mode 100644 index 000000000..11f4bef07 --- /dev/null +++ b/parser/testdata/00287_column_const_with_nan/ast.json @@ -0,0 +1,103 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Float64_nan" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_100" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 27, + + "statistics": + { + "elapsed": 0.001370247, + "rows_read": 27, + "bytes_read": 1078 + } +} diff --git a/parser/testdata/00287_column_const_with_nan/metadata.json b/parser/testdata/00287_column_const_with_nan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00287_column_const_with_nan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00287_column_const_with_nan/query.sql b/parser/testdata/00287_column_const_with_nan/query.sql new file mode 100644 index 000000000..67931511a --- /dev/null +++ b/parser/testdata/00287_column_const_with_nan/query.sql @@ -0,0 +1 @@ +SELECT * FROM (SELECT nan, number FROM system.numbers) WHERE number % 100 = 1 LIMIT 1; diff --git a/parser/testdata/00288_empty_stripelog/ast.json b/parser/testdata/00288_empty_stripelog/ast.json new file mode 100644 index 000000000..28d1ef466 --- /dev/null +++ b/parser/testdata/00288_empty_stripelog/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery stripelog (children 1)" + }, + { + "explain": " Identifier stripelog" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001567285, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/00288_empty_stripelog/metadata.json b/parser/testdata/00288_empty_stripelog/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00288_empty_stripelog/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00288_empty_stripelog/query.sql b/parser/testdata/00288_empty_stripelog/query.sql new file mode 100644 index 000000000..700d90447 --- /dev/null +++ b/parser/testdata/00288_empty_stripelog/query.sql @@ -0,0 +1,8 @@ +DROP TABLE IF EXISTS stripelog; +CREATE TABLE stripelog (x UInt8) ENGINE = StripeLog; + +SELECT * FROM stripelog ORDER BY x; +INSERT INTO stripelog VALUES (1), (2); +SELECT * FROM stripelog ORDER BY x; + +DROP TABLE stripelog; diff --git a/parser/testdata/00290_shard_aggregation_memory_efficient/ast.json b/parser/testdata/00290_shard_aggregation_memory_efficient/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00290_shard_aggregation_memory_efficient/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00290_shard_aggregation_memory_efficient/metadata.json b/parser/testdata/00290_shard_aggregation_memory_efficient/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00290_shard_aggregation_memory_efficient/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00290_shard_aggregation_memory_efficient/query.sql b/parser/testdata/00290_shard_aggregation_memory_efficient/query.sql new file mode 100644 index 000000000..3b65f4653 --- /dev/null +++ b/parser/testdata/00290_shard_aggregation_memory_efficient/query.sql @@ -0,0 +1,19 @@ +-- Tags: shard + +DROP TABLE IF EXISTS numbers_10_00290; +SET max_block_size = 1000; +CREATE TABLE numbers_10_00290 ENGINE = Log AS SELECT * FROM system.numbers LIMIT 10000; +SET distributed_aggregation_memory_efficient = 1, group_by_two_level_threshold = 5000; + +SELECT concat(toString(number), arrayStringConcat(arrayMap(x -> '.', range(number % 10)))) AS k FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00290) WHERE number < (randConstant() % 2 ? 4999 : 10000) GROUP BY k ORDER BY k LIMIT 10; +SELECT concat(toString(number), arrayStringConcat(arrayMap(x -> '.', range(number % 10)))) AS k FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00290) WHERE number < (randConstant() % 2 ? 4999 : 10000) GROUP BY k ORDER BY k LIMIT 10; +SELECT concat(toString(number), arrayStringConcat(arrayMap(x -> '.', range(number % 10)))) AS k FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00290) WHERE number < (randConstant() % 2 ? 4999 : 10000) GROUP BY k ORDER BY k LIMIT 10; +SELECT concat(toString(number), arrayStringConcat(arrayMap(x -> '.', range(number % 10)))) AS k FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00290) WHERE number < (randConstant() % 2 ? 4999 : 10000) GROUP BY k ORDER BY k LIMIT 10; +SELECT concat(toString(number), arrayStringConcat(arrayMap(x -> '.', range(number % 10)))) AS k FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00290) WHERE number < (randConstant() % 2 ? 4999 : 10000) GROUP BY k ORDER BY k LIMIT 10; +SELECT concat(toString(number), arrayStringConcat(arrayMap(x -> '.', range(number % 10)))) AS k FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00290) WHERE number < (randConstant() % 2 ? 4999 : 10000) GROUP BY k ORDER BY k LIMIT 10; +SELECT concat(toString(number), arrayStringConcat(arrayMap(x -> '.', range(number % 10)))) AS k FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00290) WHERE number < (randConstant() % 2 ? 4999 : 10000) GROUP BY k ORDER BY k LIMIT 10; +SELECT concat(toString(number), arrayStringConcat(arrayMap(x -> '.', range(number % 10)))) AS k FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00290) WHERE number < (randConstant() % 2 ? 4999 : 10000) GROUP BY k ORDER BY k LIMIT 10; +SELECT concat(toString(number), arrayStringConcat(arrayMap(x -> '.', range(number % 10)))) AS k FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00290) WHERE number < (randConstant() % 2 ? 4999 : 10000) GROUP BY k ORDER BY k LIMIT 10; +SELECT concat(toString(number), arrayStringConcat(arrayMap(x -> '.', range(number % 10)))) AS k FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00290) WHERE number < (randConstant() % 2 ? 4999 : 10000) GROUP BY k ORDER BY k LIMIT 10; + +DROP TABLE numbers_10_00290; diff --git a/parser/testdata/00291_array_reduce/ast.json b/parser/testdata/00291_array_reduce/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00291_array_reduce/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00291_array_reduce/metadata.json b/parser/testdata/00291_array_reduce/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00291_array_reduce/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00291_array_reduce/query.sql b/parser/testdata/00291_array_reduce/query.sql new file mode 100644 index 000000000..c11909247 --- /dev/null +++ b/parser/testdata/00291_array_reduce/query.sql @@ -0,0 +1,7 @@ +SELECT + arrayReduce('uniq', [1, 2, 1]) AS a, + arrayReduce('uniq', [1, 2, 2, 1], ['hello', 'world', '', '']) AS b, + arrayReduce('uniqUpTo(5)', [1, 2, 2, 1], materialize(['hello', 'world', '', ''])) AS c, + arrayReduce('uniqExactIf', [1, 2, 3, 4], [1, 0, 1, 1]) AS d; + +SELECT arrayReduce('quantiles(0.5, 0.9)', range(number) AS r), r FROM system.numbers LIMIT 12; diff --git a/parser/testdata/00292_parser_tuple_element/ast.json b/parser/testdata/00292_parser_tuple_element/ast.json new file mode 100644 index 000000000..2fb0a6751 --- /dev/null +++ b/parser/testdata/00292_parser_tuple_element/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function tupleElement (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Tuple_('a', 'b')" + }, + { + "explain": " Literal UInt64_2" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001423402, + "rows_read": 8, + "bytes_read": 303 + } +} diff --git a/parser/testdata/00292_parser_tuple_element/metadata.json b/parser/testdata/00292_parser_tuple_element/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00292_parser_tuple_element/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00292_parser_tuple_element/query.sql b/parser/testdata/00292_parser_tuple_element/query.sql new file mode 100644 index 000000000..6d43ac9c7 --- /dev/null +++ b/parser/testdata/00292_parser_tuple_element/query.sql @@ -0,0 +1 @@ +SELECT ('a', 'b').2 diff --git a/parser/testdata/00293_shard_max_subquery_depth/ast.json b/parser/testdata/00293_shard_max_subquery_depth/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00293_shard_max_subquery_depth/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00293_shard_max_subquery_depth/metadata.json b/parser/testdata/00293_shard_max_subquery_depth/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00293_shard_max_subquery_depth/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00293_shard_max_subquery_depth/query.sql b/parser/testdata/00293_shard_max_subquery_depth/query.sql new file mode 100644 index 000000000..150e087e0 --- /dev/null +++ b/parser/testdata/00293_shard_max_subquery_depth/query.sql @@ -0,0 +1,6 @@ +-- Tags: shard + +SET max_subquery_depth = 3; + +SELECT 1 FROM remote('127.0.0.{1,2}', system.one) WHERE 1 GLOBAL IN (SELECT 1 FROM remote('127.0.0.{2,3}', system.one) WHERE 1 GLOBAL IN (SELECT 1 FROM remote('127.0.0.{2,3}', system.one) WHERE 1 GLOBAL IN (SELECT 1 FROM remote('127.0.0.{2,3}', system.one)))); +SELECT 2 FROM system.one WHERE 1 IN (SELECT 1 FROM system.one WHERE 1 IN (SELECT 1 FROM system.one WHERE 1 IN (SELECT 1 FROM system.one))); diff --git a/parser/testdata/00294_shard_enums/ast.json b/parser/testdata/00294_shard_enums/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00294_shard_enums/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00294_shard_enums/metadata.json b/parser/testdata/00294_shard_enums/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00294_shard_enums/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00294_shard_enums/query.sql b/parser/testdata/00294_shard_enums/query.sql new file mode 100644 index 000000000..d5b929b2d --- /dev/null +++ b/parser/testdata/00294_shard_enums/query.sql @@ -0,0 +1,83 @@ +-- Tags: shard + +set max_threads = 1; +drop table if exists enums; + +set allow_deprecated_syntax_for_merge_tree=1; +create table enums ( + d Date default '2015-12-29', k default 0, + e Enum8('world' = 2, 'hello' = 1), sign Enum8('minus' = -1, 'plus' = 1), + letter Enum16('a' = 0, 'b' = 1, 'c' = 2, '*' = -256) +) engine = MergeTree(d, k, 1); + +desc table enums; + +-- insert default values +insert into enums (k) values (0); +select * from enums; + +alter table enums modify column e Enum8('world' = 2, 'hello' = 1, '!' = 3); +desc table enums; + +insert into enums (e, sign, letter) values ('!', 'plus', 'b'); +select * from enums ORDER BY _part; + +-- expand `e` and `sign` from Enum8 to Enum16 without changing values, change values of `letter` without changing type +alter table enums + modify column e Enum16('world' = 2, 'hello' = 1, '!' = 3), + modify column sign Enum16('minus' = -1, 'plus' = 1), + modify column letter Enum16('a' = 0, 'b' = 1, 'c' = 2, 'no letter' = -256); +desc table enums; + +select * from enums ORDER BY _part; + +alter table enums + modify column e Enum8('world' = 2, 'hello' = 1, '!' = 3), + modify column sign Enum8('minus' = -1, 'plus' = 1); + +desc table enums; + +insert into enums (letter, e) values ('c', 'world'); +select * from enums ORDER BY _part; + +drop table enums; + +create table enums (e Enum8('a' = 0, 'b' = 1, 'c' = 2, 'd' = 3)) engine = TinyLog; +insert into enums values ('d'), ('b'), ('a'), ('c'), ('a'), ('d'); +select * from enums; + +-- ORDER BY +select * from enums order by e; +select * from enums order by e desc; + +-- GROUP BY +select count(), e from enums group by e order by e; +select any(e) from enums; + +-- IN +select * from enums where e in ('a', 'd'); +select * from enums where e in (select e from enums); + +-- DISTINCT +select distinct e from enums; + +-- Comparison +select * from enums where e = e; +select * from enums where e = 'a' or e = 'd'; +select * from enums where e != 'a'; +select *, e < 'b' from enums; +select *, e > 'b' from enums; + +-- Conversion +select toInt8(e), toInt16(e), toUInt64(e), toString(e), e from enums; + +drop table if exists enums_copy; +create table enums_copy engine = TinyLog as select * from enums; +select * from enums_copy; + +drop table enums_copy; +create table enums_copy engine = TinyLog as select * from remote('127.0.0.2', currentDatabase(), enums); +select * from remote('127.0.0.2', currentDatabase(), enums_copy); + +drop table enums_copy; +drop table enums; diff --git a/parser/testdata/00295_global_in_one_shard_rows_before_limit/ast.json b/parser/testdata/00295_global_in_one_shard_rows_before_limit/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00295_global_in_one_shard_rows_before_limit/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00295_global_in_one_shard_rows_before_limit/metadata.json b/parser/testdata/00295_global_in_one_shard_rows_before_limit/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00295_global_in_one_shard_rows_before_limit/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00295_global_in_one_shard_rows_before_limit/query.sql b/parser/testdata/00295_global_in_one_shard_rows_before_limit/query.sql new file mode 100644 index 000000000..06cdb0bd8 --- /dev/null +++ b/parser/testdata/00295_global_in_one_shard_rows_before_limit/query.sql @@ -0,0 +1,5 @@ +-- Tags: shard + +SET output_format_write_statistics = 0; +SELECT arrayJoin(range(100)) AS x FROM remote('127.0.0.2', system.one) WHERE x GLOBAL IN (SELECT toUInt8(arrayJoin(range(100)) + 50)) GROUP BY x ORDER BY x LIMIT 10 FORMAT JSONCompact; +SELECT arrayJoin(range(100)) AS x FROM remote('127.0.0.{2,3}', system.one) WHERE x GLOBAL IN (SELECT toUInt8(arrayJoin(range(100)) + 50)) GROUP BY x ORDER BY x LIMIT 10 FORMAT JSONCompact; diff --git a/parser/testdata/00296_url_parameters/ast.json b/parser/testdata/00296_url_parameters/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00296_url_parameters/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00296_url_parameters/metadata.json b/parser/testdata/00296_url_parameters/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00296_url_parameters/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00296_url_parameters/query.sql b/parser/testdata/00296_url_parameters/query.sql new file mode 100644 index 000000000..8a96e3888 --- /dev/null +++ b/parser/testdata/00296_url_parameters/query.sql @@ -0,0 +1,200 @@ +SELECT + extractURLParameters('http://bigmir.net/?a=b&c=d'), + extractURLParameters('http://bigmir.net/?a=b&c=d#e=f'), + extractURLParameters('http://bigmir.net/?a&c=d#e=f'), + extractURLParameters('http://bigmir.net/?a=b&c=d#e=f&g=h'), + extractURLParameters('http://bigmir.net/?a=b&c=d#e'), + extractURLParameters('http://bigmir.net/?a=b&c=d#e&g=h'), + extractURLParameters('http://bigmir.net/?a=b&c=d#test?e=f&g=h'), + extractURLParameters('//bigmir.net/?a=b&c=d'), + extractURLParameters('//bigmir.net/?a=b&c=d#e=f'), + extractURLParameters('//bigmir.net/?a&c=d#e=f'), + extractURLParameters('//bigmir.net/?a=b&c=d#e=f&g=h'), + extractURLParameters('//bigmir.net/?a=b&c=d#e'), + extractURLParameters('//bigmir.net/?a=b&c=d#e&g=h'), + extractURLParameters('//bigmir.net/?a=b&c=d#test?e=f&g=h'); + +SELECT + extractURLParameterNames('http://bigmir.net/?a=b&c=d'), + extractURLParameterNames('http://bigmir.net/?a=b&c=d#e=f'), + extractURLParameterNames('http://bigmir.net/?a&c=d#e=f'), + extractURLParameterNames('http://bigmir.net/?a=b&c=d#e=f&g=h'), + extractURLParameterNames('http://bigmir.net/?a=b&c=d#e'), + extractURLParameterNames('http://bigmir.net/?a=b&c=d#e&g=h'), + extractURLParameterNames('http://bigmir.net/?a=b&c=d#test?e=f&g=h'), + extractURLParameterNames('//bigmir.net/?a=b&c=d'), + extractURLParameterNames('//bigmir.net/?a=b&c=d#e=f'), + extractURLParameterNames('//bigmir.net/?a&c=d#e=f'), + extractURLParameterNames('//bigmir.net/?a=b&c=d#e=f&g=h'), + extractURLParameterNames('//bigmir.net/?a=b&c=d#e'), + extractURLParameterNames('//bigmir.net/?a=b&c=d#e&g=h'), + extractURLParameterNames('//bigmir.net/?a=b&c=d#test?e=f&g=h'); + +SELECT + extractURLParameter('http://bigmir.net/?a=b&c=d', 'a'), + extractURLParameter('http://bigmir.net/?a=b&c=d', 'c'), + extractURLParameter('http://bigmir.net/?a=b&c=d#e=f', 'e'), + extractURLParameter('http://bigmir.net/?a&c=d#e=f', 'a'), + extractURLParameter('http://bigmir.net/?a&c=d#e=f', 'c'), + extractURLParameter('http://bigmir.net/?a&c=d#e=f', 'e'), + extractURLParameter('http://bigmir.net/?a=b&c=d#e=f&g=h', 'g'), + extractURLParameter('http://bigmir.net/?a=b&c=d#e', 'a'), + extractURLParameter('http://bigmir.net/?a=b&c=d#e', 'c'), + extractURLParameter('http://bigmir.net/?a=b&c=d#e', 'e'), + extractURLParameter('http://bigmir.net/?a=b&c=d#e&g=h', 'c'), + extractURLParameter('http://bigmir.net/?a=b&c=d#e&g=h', 'e'), + extractURLParameter('http://bigmir.net/?a=b&c=d#e&g=h', 'g'), + extractURLParameter('http://bigmir.net/?a=b&c=d#test?e=f&g=h', 'test'), + extractURLParameter('http://bigmir.net/?a=b&c=d#test?e=f&g=h', 'e'), + extractURLParameter('http://bigmir.net/?a=b&c=d#test?e=f&g=h', 'g'), + extractURLParameter('//bigmir.net/?a=b&c=d', 'a'), + extractURLParameter('//bigmir.net/?a=b&c=d', 'c'), + extractURLParameter('//bigmir.net/?a=b&c=d#e=f', 'e'), + extractURLParameter('//bigmir.net/?a&c=d#e=f', 'a'), + extractURLParameter('//bigmir.net/?a&c=d#e=f', 'c'), + extractURLParameter('//bigmir.net/?a&c=d#e=f', 'e'), + extractURLParameter('//bigmir.net/?a=b&c=d#e=f&g=h', 'g'), + extractURLParameter('//bigmir.net/?a=b&c=d#e', 'a'), + extractURLParameter('//bigmir.net/?a=b&c=d#e', 'c'), + extractURLParameter('//bigmir.net/?a=b&c=d#e', 'e'), + extractURLParameter('//bigmir.net/?a=b&c=d#e&g=h', 'c'), + extractURLParameter('//bigmir.net/?a=b&c=d#e&g=h', 'e'), + extractURLParameter('//bigmir.net/?a=b&c=d#e&g=h', 'g'), + extractURLParameter('//bigmir.net/?a=b&c=d#test?e=f&g=h', 'test'), + extractURLParameter('//bigmir.net/?a=b&c=d#test?e=f&g=h', 'e'), + extractURLParameter('//bigmir.net/?a=b&c=d#test?e=f&g=h', 'g'); + +SELECT + cutURLParameter('http://bigmir.net/?a=b&c=d', 'a'), + cutURLParameter('http://bigmir.net/?a=b&c=d', 'c'), + cutURLParameter('http://bigmir.net/?a=b&c=d#e=f', 'e'), + cutURLParameter('http://bigmir.net/?a&c=d#e=f', 'a'), + cutURLParameter('http://bigmir.net/?a&c=d#e=f', 'c'), + cutURLParameter('http://bigmir.net/?a&c=d#e=f', 'e'), + cutURLParameter('http://bigmir.net/?a=b&c=d#e=f&g=h', 'g'), + cutURLParameter('http://bigmir.net/?a=b&c=d#e', 'a'), + cutURLParameter('http://bigmir.net/?a=b&c=d#e', 'c'), + cutURLParameter('http://bigmir.net/?a=b&c=d#e', 'e'), + cutURLParameter('http://bigmir.net/?a=b&c=d#e&g=h', 'c'), + cutURLParameter('http://bigmir.net/?a=b&c=d#e&g=h', 'e'), + cutURLParameter('http://bigmir.net/?a=b&c=d#e&g=h', 'g'), + cutURLParameter('http://bigmir.net/?a=b&c=d#test?e=f&g=h', 'test'), + cutURLParameter('http://bigmir.net/?a=b&c=d#test?e=f&g=h', 'e'), + cutURLParameter('http://bigmir.net/?a=b&c=d#test?e=f&g=h', 'g'), + cutURLParameter('//bigmir.net/?a=b&c=d', 'a'), + cutURLParameter('//bigmir.net/?a=b&c=d', 'c'), + cutURLParameter('//bigmir.net/?a=b&c=d#e=f', 'e'), + cutURLParameter('//bigmir.net/?a&c=d#e=f', 'a'), + cutURLParameter('//bigmir.net/?a&c=d#e=f', 'c'), + cutURLParameter('//bigmir.net/?a&c=d#e=f', 'e'), + cutURLParameter('//bigmir.net/?a=b&c=d#e=f&g=h', 'g'), + cutURLParameter('//bigmir.net/?a=b&c=d#e', 'a'), + cutURLParameter('//bigmir.net/?a=b&c=d#e', 'c'), + cutURLParameter('//bigmir.net/?a=b&c=d#e', 'e'), + cutURLParameter('//bigmir.net/?a=b&c=d#e&g=h', 'c'), + cutURLParameter('//bigmir.net/?a=b&c=d#e&g=h', 'e'), + cutURLParameter('//bigmir.net/?a=b&c=d#e&g=h', 'g'), + cutURLParameter('//bigmir.net/?a=b&c=d#test?e=f&g=h', 'test'), + cutURLParameter('//bigmir.net/?a=b&c=d#test?e=f&g=h', 'e'), + cutURLParameter('//bigmir.net/?a=b&c=d#test?e=f&g=h', 'g'); + + +SELECT + extractURLParameters(materialize('http://bigmir.net/?a=b&c=d')), + extractURLParameters(materialize('http://bigmir.net/?a=b&c=d#e=f')), + extractURLParameters(materialize('http://bigmir.net/?a&c=d#e=f')), + extractURLParameters(materialize('http://bigmir.net/?a=b&c=d#e=f&g=h')), + extractURLParameters(materialize('http://bigmir.net/?a=b&c=d#e')), + extractURLParameters(materialize('http://bigmir.net/?a=b&c=d#e&g=h')), + extractURLParameters(materialize('http://bigmir.net/?a=b&c=d#test?e=f&g=h')), + extractURLParameters(materialize('//bigmir.net/?a=b&c=d')), + extractURLParameters(materialize('//bigmir.net/?a=b&c=d#e=f')), + extractURLParameters(materialize('//bigmir.net/?a&c=d#e=f')), + extractURLParameters(materialize('//bigmir.net/?a=b&c=d#e=f&g=h')), + extractURLParameters(materialize('//bigmir.net/?a=b&c=d#e')), + extractURLParameters(materialize('//bigmir.net/?a=b&c=d#e&g=h')), + extractURLParameters(materialize('//bigmir.net/?a=b&c=d#test?e=f&g=h')); + +SELECT + extractURLParameterNames(materialize('http://bigmir.net/?a=b&c=d')), + extractURLParameterNames(materialize('http://bigmir.net/?a=b&c=d#e=f')), + extractURLParameterNames(materialize('http://bigmir.net/?a&c=d#e=f')), + extractURLParameterNames(materialize('http://bigmir.net/?a=b&c=d#e=f&g=h')), + extractURLParameterNames(materialize('http://bigmir.net/?a=b&c=d#e')), + extractURLParameterNames(materialize('http://bigmir.net/?a=b&c=d#e&g=h')), + extractURLParameterNames(materialize('http://bigmir.net/?a=b&c=d#test?e=f&g=h')), + extractURLParameterNames(materialize('//bigmir.net/?a=b&c=d')), + extractURLParameterNames(materialize('//bigmir.net/?a=b&c=d#e=f')), + extractURLParameterNames(materialize('//bigmir.net/?a&c=d#e=f')), + extractURLParameterNames(materialize('//bigmir.net/?a=b&c=d#e=f&g=h')), + extractURLParameterNames(materialize('//bigmir.net/?a=b&c=d#e')), + extractURLParameterNames(materialize('//bigmir.net/?a=b&c=d#e&g=h')), + extractURLParameterNames(materialize('//bigmir.net/?a=b&c=d#test?e=f&g=h')); + +SELECT + extractURLParameter(materialize('http://bigmir.net/?a=b&c=d'), 'a'), + extractURLParameter(materialize('http://bigmir.net/?a=b&c=d'), 'c'), + extractURLParameter(materialize('http://bigmir.net/?a=b&c=d#e=f'), 'e'), + extractURLParameter(materialize('http://bigmir.net/?a&c=d#e=f'), 'a'), + extractURLParameter(materialize('http://bigmir.net/?a&c=d#e=f'), 'c'), + extractURLParameter(materialize('http://bigmir.net/?a&c=d#e=f'), 'e'), + extractURLParameter(materialize('http://bigmir.net/?a=b&c=d#e=f&g=h'), 'g'), + extractURLParameter(materialize('http://bigmir.net/?a=b&c=d#e'), 'a'), + extractURLParameter(materialize('http://bigmir.net/?a=b&c=d#e'), 'c'), + extractURLParameter(materialize('http://bigmir.net/?a=b&c=d#e'), 'e'), + extractURLParameter(materialize('http://bigmir.net/?a=b&c=d#e&g=h'), 'c'), + extractURLParameter(materialize('http://bigmir.net/?a=b&c=d#e&g=h'), 'e'), + extractURLParameter(materialize('http://bigmir.net/?a=b&c=d#e&g=h'), 'g'), + extractURLParameter(materialize('http://bigmir.net/?a=b&c=d#test?e=f&g=h'), 'test'), + extractURLParameter(materialize('http://bigmir.net/?a=b&c=d#test?e=f&g=h'), 'e'), + extractURLParameter(materialize('http://bigmir.net/?a=b&c=d#test?e=f&g=h'), 'g'), + extractURLParameter(materialize('//bigmir.net/?a=b&c=d'), 'a'), + extractURLParameter(materialize('//bigmir.net/?a=b&c=d'), 'c'), + extractURLParameter(materialize('//bigmir.net/?a=b&c=d#e=f'), 'e'), + extractURLParameter(materialize('//bigmir.net/?a&c=d#e=f'), 'a'), + extractURLParameter(materialize('//bigmir.net/?a&c=d#e=f'), 'c'), + extractURLParameter(materialize('//bigmir.net/?a&c=d#e=f'), 'e'), + extractURLParameter(materialize('//bigmir.net/?a=b&c=d#e=f&g=h'), 'g'), + extractURLParameter(materialize('//bigmir.net/?a=b&c=d#e'), 'a'), + extractURLParameter(materialize('//bigmir.net/?a=b&c=d#e'), 'c'), + extractURLParameter(materialize('//bigmir.net/?a=b&c=d#e'), 'e'), + extractURLParameter(materialize('//bigmir.net/?a=b&c=d#e&g=h'), 'c'), + extractURLParameter(materialize('//bigmir.net/?a=b&c=d#e&g=h'), 'e'), + extractURLParameter(materialize('//bigmir.net/?a=b&c=d#e&g=h'), 'g'), + extractURLParameter(materialize('//bigmir.net/?a=b&c=d#test?e=f&g=h'), 'test'), + extractURLParameter(materialize('//bigmir.net/?a=b&c=d#test?e=f&g=h'), 'e'), + extractURLParameter(materialize('//bigmir.net/?a=b&c=d#test?e=f&g=h'), 'g'); + +SELECT + cutURLParameter(materialize('http://bigmir.net/?a=b&c=d'), 'a'), + cutURLParameter(materialize('http://bigmir.net/?a=b&c=d'), 'c'), + cutURLParameter(materialize('http://bigmir.net/?a=b&c=d#e=f'), 'e'), + cutURLParameter(materialize('http://bigmir.net/?a&c=d#e=f'), 'a'), + cutURLParameter(materialize('http://bigmir.net/?a&c=d#e=f'), 'c'), + cutURLParameter(materialize('http://bigmir.net/?a&c=d#e=f'), 'e'), + cutURLParameter(materialize('http://bigmir.net/?a=b&c=d#e=f&g=h'), 'g'), + cutURLParameter(materialize('http://bigmir.net/?a=b&c=d#e'), 'a'), + cutURLParameter(materialize('http://bigmir.net/?a=b&c=d#e'), 'c'), + cutURLParameter(materialize('http://bigmir.net/?a=b&c=d#e'), 'e'), + cutURLParameter(materialize('http://bigmir.net/?a=b&c=d#e&g=h'), 'c'), + cutURLParameter(materialize('http://bigmir.net/?a=b&c=d#e&g=h'), 'e'), + cutURLParameter(materialize('http://bigmir.net/?a=b&c=d#e&g=h'), 'g'), + cutURLParameter(materialize('http://bigmir.net/?a=b&c=d#test?e=f&g=h'), 'test'), + cutURLParameter(materialize('http://bigmir.net/?a=b&c=d#test?e=f&g=h'), 'e'), + cutURLParameter(materialize('http://bigmir.net/?a=b&c=d#test?e=f&g=h'), 'g'), + cutURLParameter(materialize('//bigmir.net/?a=b&c=d'), 'a'), + cutURLParameter(materialize('//bigmir.net/?a=b&c=d'), 'c'), + cutURLParameter(materialize('//bigmir.net/?a=b&c=d#e=f'), 'e'), + cutURLParameter(materialize('//bigmir.net/?a&c=d#e=f'), 'a'), + cutURLParameter(materialize('//bigmir.net/?a&c=d#e=f'), 'c'), + cutURLParameter(materialize('//bigmir.net/?a&c=d#e=f'), 'e'), + cutURLParameter(materialize('//bigmir.net/?a=b&c=d#e=f&g=h'), 'g'), + cutURLParameter(materialize('//bigmir.net/?a=b&c=d#e'), 'a'), + cutURLParameter(materialize('//bigmir.net/?a=b&c=d#e'), 'c'), + cutURLParameter(materialize('//bigmir.net/?a=b&c=d#e'), 'e'), + cutURLParameter(materialize('//bigmir.net/?a=b&c=d#e&g=h'), 'c'), + cutURLParameter(materialize('//bigmir.net/?a=b&c=d#e&g=h'), 'e'), + cutURLParameter(materialize('//bigmir.net/?a=b&c=d#e&g=h'), 'g'), + cutURLParameter(materialize('//bigmir.net/?a=b&c=d#test?e=f&g=h'), 'test'), + cutURLParameter(materialize('//bigmir.net/?a=b&c=d#test?e=f&g=h'), 'e'), + cutURLParameter(materialize('//bigmir.net/?a=b&c=d#test?e=f&g=h'), 'g'); diff --git a/parser/testdata/00298_enum_width_and_cast/ast.json b/parser/testdata/00298_enum_width_and_cast/ast.json new file mode 100644 index 000000000..956968226 --- /dev/null +++ b/parser/testdata/00298_enum_width_and_cast/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery enum (children 1)" + }, + { + "explain": " Identifier enum" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001606673, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/00298_enum_width_and_cast/metadata.json b/parser/testdata/00298_enum_width_and_cast/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00298_enum_width_and_cast/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00298_enum_width_and_cast/query.sql b/parser/testdata/00298_enum_width_and_cast/query.sql new file mode 100644 index 000000000..a79f8314a --- /dev/null +++ b/parser/testdata/00298_enum_width_and_cast/query.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS enum; + +SET output_format_pretty_color=1; +CREATE TABLE enum (x Enum8('Hello' = -100, '\\' = 0, '\t\\t' = 111), y UInt8) ENGINE = TinyLog; +INSERT INTO enum (y) VALUES (0); +SELECT * FROM enum ORDER BY x, y FORMAT PrettyCompactMonoBlock; +INSERT INTO enum (x) VALUES ('\\'); +SELECT * FROM enum ORDER BY x, y FORMAT PrettyCompactMonoBlock; +INSERT INTO enum (x) VALUES ('\t\\t'); +SELECT * FROM enum ORDER BY x, y FORMAT PrettyCompactMonoBlock; +SELECT x, y, toInt8(x), toString(x) AS s, CAST(s AS Enum8('Hello' = -100, '\\' = 0, '\t\\t' = 111)) AS cast FROM enum ORDER BY x, y FORMAT PrettyCompactMonoBlock; + +DROP TABLE enum; diff --git a/parser/testdata/00299_stripe_log_multiple_inserts/ast.json b/parser/testdata/00299_stripe_log_multiple_inserts/ast.json new file mode 100644 index 000000000..b10d9f480 --- /dev/null +++ b/parser/testdata/00299_stripe_log_multiple_inserts/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery log (children 1)" + }, + { + "explain": " Identifier log" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001188938, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/00299_stripe_log_multiple_inserts/metadata.json b/parser/testdata/00299_stripe_log_multiple_inserts/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00299_stripe_log_multiple_inserts/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00299_stripe_log_multiple_inserts/query.sql b/parser/testdata/00299_stripe_log_multiple_inserts/query.sql new file mode 100644 index 000000000..0539dc757 --- /dev/null +++ b/parser/testdata/00299_stripe_log_multiple_inserts/query.sql @@ -0,0 +1,37 @@ +DROP TABLE IF EXISTS log; + +CREATE TABLE log (x UInt8) ENGINE = StripeLog; + +SELECT * FROM log ORDER BY x; +INSERT INTO log VALUES (0); +SELECT * FROM log ORDER BY x; +INSERT INTO log VALUES (1); +SELECT * FROM log ORDER BY x; +INSERT INTO log VALUES (2); +SELECT * FROM log ORDER BY x; + +DROP TABLE log; + +CREATE TABLE log (x UInt8) ENGINE = TinyLog; + +SELECT * FROM log ORDER BY x; +INSERT INTO log VALUES (0); +SELECT * FROM log ORDER BY x; +INSERT INTO log VALUES (1); +SELECT * FROM log ORDER BY x; +INSERT INTO log VALUES (2); +SELECT * FROM log ORDER BY x; + +DROP TABLE log; + +CREATE TABLE log (x UInt8) ENGINE = Log; + +SELECT * FROM log ORDER BY x; +INSERT INTO log VALUES (0); +SELECT * FROM log ORDER BY x; +INSERT INTO log VALUES (1); +SELECT * FROM log ORDER BY x; +INSERT INTO log VALUES (2); +SELECT * FROM log ORDER BY x; + +DROP TABLE log; diff --git a/parser/testdata/00300_csv/ast.json b/parser/testdata/00300_csv/ast.json new file mode 100644 index 000000000..8b788f99a --- /dev/null +++ b/parser/testdata/00300_csv/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 5)" + }, + { + "explain": " Literal 'Hello, \"World\"' (alias x)" + }, + { + "explain": " Literal UInt64_123 (alias y)" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_2, UInt64_3] (alias z)" + }, + { + "explain": " Function tuple (alias a) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_456" + }, + { + "explain": " Literal Array_['abc', 'def']" + }, + { + "explain": " Literal 'Newline\\nhere' (alias b)" + }, + { + "explain": " Identifier CSV" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001240521, + "rows_read": 13, + "bytes_read": 532 + } +} diff --git a/parser/testdata/00300_csv/metadata.json b/parser/testdata/00300_csv/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00300_csv/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00300_csv/query.sql b/parser/testdata/00300_csv/query.sql new file mode 100644 index 000000000..76b1b29df --- /dev/null +++ b/parser/testdata/00300_csv/query.sql @@ -0,0 +1,4 @@ +SELECT 'Hello, "World"' AS x, 123 AS y, [1, 2, 3] AS z, (456, ['abc', 'def']) AS a, 'Newline\nhere' AS b FORMAT CSV; +SELECT 'Hello, "World"' AS x, 123 AS y, [1, 2, 3] AS z, (456, ['abc', 'def']) AS a, 'Newline\nhere' AS b FORMAT CSVWithNames; +SELECT 'Hello, "World"' AS x, 123 AS y, [1, 2, 3] AS z, (456, ['abc', 'def']) AS a, 'Newline\nhere' AS b FORMAT CSVWithNamesAndTypes; +SELECT number, toString(number), range(number), toDate('2000-01-01') + number, toDateTime('2000-01-01 00:00:00') + number FROM system.numbers LIMIT 10 FORMAT CSV; diff --git a/parser/testdata/00306_insert_values_and_expressions/ast.json b/parser/testdata/00306_insert_values_and_expressions/ast.json new file mode 100644 index 000000000..0cce9e02e --- /dev/null +++ b/parser/testdata/00306_insert_values_and_expressions/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery insert (children 1)" + }, + { + "explain": " Identifier insert" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001331546, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/00306_insert_values_and_expressions/metadata.json b/parser/testdata/00306_insert_values_and_expressions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00306_insert_values_and_expressions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00306_insert_values_and_expressions/query.sql b/parser/testdata/00306_insert_values_and_expressions/query.sql new file mode 100644 index 000000000..01a66282b --- /dev/null +++ b/parser/testdata/00306_insert_values_and_expressions/query.sql @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS insert; +CREATE TABLE insert (i UInt64, s String, u UUID, d Date, t DateTime, a Array(UInt32)) ENGINE = Memory; + +INSERT INTO insert VALUES (1, 'Hello', 'ab41bdd6-5cd4-11e7-907b-a6006ad3dba0', '2016-01-01', '2016-01-02 03:04:05', [1, 2, 3]), (1 + 1, concat('Hello', ', world'), toUUID('00000000-0000-0000-0000-000000000000'), toDate('2016-01-01') + 1, toStartOfMinute(toDateTime('2016-01-02 03:04:05')), [[0,1],[2]][1]), (round(pi()), concat('hello', ', world!'), toUUID(toString('ab41bdd6-5cd4-11e7-907b-a6006ad3dba0')), toDate(toDateTime('2016-01-03 03:04:05')), toStartOfHour(toDateTime('2016-01-02 03:04:05')), []), (4, 'World', 'ab41bdd6-5cd4-11e7-907b-a6006ad3dba0', '2016-01-04', '2016-12-11 10:09:08', [3,2,1]); + +SELECT * FROM insert ORDER BY i; +DROP TABLE insert; + +-- Test the case where the VALUES are delimited by semicolon and a query follows +-- w/o newline. With most formats the query in the same line would be ignored or +-- lead to an error, but VALUES are an exception and support semicolon delimiter, +-- in addition to the newline. +create table if not exists t_306 (a int) engine Memory; +insert into t_306 values (1); select 11111; +select * from t_306; +drop table if exists t_306; diff --git a/parser/testdata/00307_format_xml/ast.json b/parser/testdata/00307_format_xml/ast.json new file mode 100644 index 000000000..43687a609 --- /dev/null +++ b/parser/testdata/00307_format_xml/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001285356, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00307_format_xml/metadata.json b/parser/testdata/00307_format_xml/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00307_format_xml/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00307_format_xml/query.sql b/parser/testdata/00307_format_xml/query.sql new file mode 100644 index 000000000..a7e0e6289 --- /dev/null +++ b/parser/testdata/00307_format_xml/query.sql @@ -0,0 +1,5 @@ +SET output_format_write_statistics = 0; +SELECT 'unnamed columns in tuple'; +SELECT 'Hello & world' AS s, 'Hello\n', toDateTime('2001-02-03 04:05:06') AS time, arrayMap(x -> toString(x), range(10)) AS arr, (s, time) AS tpl SETTINGS extremes = 1, enable_named_columns_in_function_tuple = 0 FORMAT XML; +SELECT 'named columns in tuple'; +SELECT 'Hello & world' AS s, toDateTime('2001-02-03 04:05:06') AS time, (s, time) AS tpl SETTINGS extremes = 1, enable_named_columns_in_function_tuple = 0 FORMAT XML; diff --git a/parser/testdata/00308_write_buffer_valid_utf8/ast.json b/parser/testdata/00308_write_buffer_valid_utf8/ast.json new file mode 100644 index 000000000..9f2d43b65 --- /dev/null +++ b/parser/testdata/00308_write_buffer_valid_utf8/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001077774, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00308_write_buffer_valid_utf8/metadata.json b/parser/testdata/00308_write_buffer_valid_utf8/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00308_write_buffer_valid_utf8/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00308_write_buffer_valid_utf8/query.sql b/parser/testdata/00308_write_buffer_valid_utf8/query.sql new file mode 100644 index 000000000..5ac04233f --- /dev/null +++ b/parser/testdata/00308_write_buffer_valid_utf8/query.sql @@ -0,0 +1,2 @@ +SET output_format_write_statistics = 0; +SELECT concat('Hello, ', unhex('a0'), ' World') AS s1, concat('Hello, ', unhex('a0')) AS s2, concat(unhex('a0'), ' World') AS s3 FORMAT JSONCompact; diff --git a/parser/testdata/00309_formats/ast.json b/parser/testdata/00309_formats/ast.json new file mode 100644 index 000000000..15947c83d --- /dev/null +++ b/parser/testdata/00309_formats/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00145641, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00309_formats/metadata.json b/parser/testdata/00309_formats/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00309_formats/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00309_formats/query.sql b/parser/testdata/00309_formats/query.sql new file mode 100644 index 000000000..ebe8d4914 --- /dev/null +++ b/parser/testdata/00309_formats/query.sql @@ -0,0 +1,17 @@ +SET output_format_write_statistics = 0; +SET enable_named_columns_in_function_tuple = 0; +set output_format_json_pretty_print = 0; + +SELECT number * 246 + 10 AS n, toDate('2000-01-01') + n AS d, range(n) AS arr, arrayStringConcat(arrayMap(x -> reinterpretAsString(x), arr)) AS s, (n, d) AS tuple FROM system.numbers LIMIT 2 FORMAT RowBinary; +SELECT number * 246 + 10 AS n, toDate('2000-01-01') + n AS d, range(n) AS arr, arrayStringConcat(arrayMap(x -> reinterpretAsString(x), arr)) AS s, (n, d) AS tuple FROM system.numbers LIMIT 2 FORMAT RowBinaryWithNamesAndTypes; +SELECT number * 246 + 10 AS n, toDate('2000-01-01') + n AS d, range(n) AS arr, arrayStringConcat(arrayMap(x -> reinterpretAsString(x), arr)) AS s, (n, d) AS tuple FROM system.numbers LIMIT 2 FORMAT TabSeparatedWithNamesAndTypes; +SELECT number * 246 + 10 AS n, toDate('2000-01-01') + n AS d, range(n) AS arr, arrayStringConcat(arrayMap(x -> reinterpretAsString(x), arr)) AS s, (n, d) AS tuple FROM system.numbers LIMIT 2 FORMAT TabSeparatedRaw; +SELECT number * 246 + 10 AS n, toDate('2000-01-01') + n AS d, range(n) AS arr, arrayStringConcat(arrayMap(x -> reinterpretAsString(x), arr)) AS s, (n, d) AS tuple FROM system.numbers LIMIT 2 FORMAT CSV; +SELECT number * 246 + 10 AS n, toDate('2000-01-01') + n AS d, range(n) AS arr, arrayStringConcat(arrayMap(x -> reinterpretAsString(x), arr)) AS s, (n, d) AS tuple FROM system.numbers LIMIT 2 FORMAT JSON; +SELECT number * 246 + 10 AS n, toDate('2000-01-01') + n AS d, range(n) AS arr, arrayStringConcat(arrayMap(x -> reinterpretAsString(x), arr)) AS s, (n, d) AS tuple FROM system.numbers LIMIT 2 FORMAT JSONCompact; +SELECT number * 246 + 10 AS n, toDate('2000-01-01') + n AS d, range(n) AS arr, arrayStringConcat(arrayMap(x -> reinterpretAsString(x), arr)) AS s, (n, d) AS tuple FROM system.numbers LIMIT 2 FORMAT XML; + +SET enable_named_columns_in_function_tuple = 1; + +SELECT 36 AS n, toDate('2000-01-01') + n AS d, (n, d) AS tuple FROM system.numbers LIMIT 1 FORMAT RowBinaryWithNamesAndTypes SETTINGS enable_analyzer=1; +SELECT number * 246 + 10 AS n, toDate('2000-01-01') + n AS d, (n, d) AS tuple FROM system.numbers LIMIT 1 FORMAT TabSeparatedWithNamesAndTypes SETTINGS enable_analyzer=1; diff --git a/parser/testdata/00311_array_primary_key/ast.json b/parser/testdata/00311_array_primary_key/ast.json new file mode 100644 index 000000000..03912464c --- /dev/null +++ b/parser/testdata/00311_array_primary_key/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00142114, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00311_array_primary_key/metadata.json b/parser/testdata/00311_array_primary_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00311_array_primary_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00311_array_primary_key/query.sql b/parser/testdata/00311_array_primary_key/query.sql new file mode 100644 index 000000000..c09d8020b --- /dev/null +++ b/parser/testdata/00311_array_primary_key/query.sql @@ -0,0 +1,19 @@ +set allow_deprecated_syntax_for_merge_tree=1; +DROP TABLE IF EXISTS array_pk; +CREATE TABLE array_pk (key Array(UInt8), s String, n UInt64, d Date MATERIALIZED '2000-01-01') ENGINE = MergeTree(d, (key, s, n), 1); + +INSERT INTO array_pk VALUES ([1, 2, 3], 'Hello, world!', 1); +INSERT INTO array_pk VALUES ([1, 2], 'Hello', 2); +INSERT INTO array_pk VALUES ([2], 'Goodbye', 3); +INSERT INTO array_pk VALUES ([], 'abc', 4); +INSERT INTO array_pk VALUES ([2, 3, 4], 'def', 5); +INSERT INTO array_pk VALUES ([5, 6], 'ghi', 6); + +SELECT * FROM array_pk ORDER BY n; + +DETACH TABLE array_pk; +ATTACH TABLE array_pk; + +SELECT * FROM array_pk ORDER BY n; + +DROP TABLE array_pk; diff --git a/parser/testdata/00312_position_case_insensitive_utf8/ast.json b/parser/testdata/00312_position_case_insensitive_utf8/ast.json new file mode 100644 index 000000000..4a7d06118 --- /dev/null +++ b/parser/testdata/00312_position_case_insensitive_utf8/ast.json @@ -0,0 +1,91 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function positionCaseInsensitiveUTF8 (alias res) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function concat (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'иголка.ру'" + }, + { + "explain": " Function arrayStringConcat (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayMap (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function lambda (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal ' '" + }, + { + "explain": " Function range (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_20000" + }, + { + "explain": " Literal 'иголка.ру'" + } + ], + + "rows": 23, + + "statistics": + { + "elapsed": 0.001639552, + "rows_read": 23, + "bytes_read": 1024 + } +} diff --git a/parser/testdata/00312_position_case_insensitive_utf8/metadata.json b/parser/testdata/00312_position_case_insensitive_utf8/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00312_position_case_insensitive_utf8/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00312_position_case_insensitive_utf8/query.sql b/parser/testdata/00312_position_case_insensitive_utf8/query.sql new file mode 100644 index 000000000..8bb8512fa --- /dev/null +++ b/parser/testdata/00312_position_case_insensitive_utf8/query.sql @@ -0,0 +1,126 @@ +SELECT positionCaseInsensitiveUTF8(concat('иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionCaseInsensitiveUTF8(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionCaseInsensitiveUTF8(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionCaseInsensitiveUTF8(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionCaseInsensitiveUTF8(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionCaseInsensitiveUTF8(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionCaseInsensitiveUTF8(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionCaseInsensitiveUTF8(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionCaseInsensitiveUTF8(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionCaseInsensitiveUTF8(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionCaseInsensitiveUTF8(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionCaseInsensitiveUTF8(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionCaseInsensitiveUTF8(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionCaseInsensitiveUTF8(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionCaseInsensitiveUTF8(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionCaseInsensitiveUTF8(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionCaseInsensitiveUTF8(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionCaseInsensitiveUTF8(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionCaseInsensitiveUTF8(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionCaseInsensitiveUTF8(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionCaseInsensitiveUTF8(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionCaseInsensitiveUTF8(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionCaseInsensitiveUTF8(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; + +SELECT positionCaseInsensitive(concat('иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionCaseInsensitive(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionCaseInsensitive(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionCaseInsensitive(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionCaseInsensitive(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionCaseInsensitive(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionCaseInsensitive(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionCaseInsensitive(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionCaseInsensitive(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionCaseInsensitive(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionCaseInsensitive(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionCaseInsensitive(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionCaseInsensitive(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionCaseInsensitive(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionCaseInsensitive(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionCaseInsensitive(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionCaseInsensitive(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionCaseInsensitive(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionCaseInsensitive(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionCaseInsensitive(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionCaseInsensitive(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionCaseInsensitive(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionCaseInsensitive(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; + +SELECT positionUTF8(concat('иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionUTF8(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionUTF8(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionUTF8(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionUTF8(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionUTF8(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionUTF8(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionUTF8(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionUTF8(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionUTF8(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionUTF8(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionUTF8(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionUTF8(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionUTF8(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionUTF8(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionUTF8(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionUTF8(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionUTF8(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionUTF8(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionUTF8(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionUTF8(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionUTF8(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT positionUTF8(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; + +SELECT position(concat('иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT position(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT position(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT position(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT position(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT position(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT position(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT position(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT position(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT position(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT position(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT position(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT position(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT position(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT position(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT position(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT position(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT position(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT position(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT position(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT position(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT position(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; +SELECT position(concat(' иголка.ру', arrayStringConcat(arrayMap(x -> ' ', range(20000)))), 'иголка.ру') AS res; + +SELECT positionCaseInsensitiveUTF8(materialize('test ß test'), 'ß') AS res; +SELECT positionCaseInsensitiveUTF8(materialize('test AaßAa test'), 'aßa') AS res; +SELECT positionCaseInsensitiveUTF8(materialize('test A1ß2a test'), '1ß2') AS res; +SELECT positionCaseInsensitiveUTF8(materialize('xẞyyaa1ẞ1yzẞXẞẞ1ẞẞ1bctest'), 'aa1ẞ1Yzßxßß1ßß1BC') AS res; + +SELECT positionCaseInsensitiveUTF8(materialize(concat('test a1ßAa test', arrayStringConcat(arrayMap(x -> ' ', range(20000))))), 'a1ẞaa') AS res; +SELECT positionCaseInsensitiveUTF8(materialize(concat(' test a1ßAa test', arrayStringConcat(arrayMap(x -> ' ', range(20000))))), 'a1ẞaa') AS res; +SELECT positionCaseInsensitiveUTF8(materialize(concat(' test a1ßAa test', arrayStringConcat(arrayMap(x -> ' ', range(20000))))), 'a1ẞaa') AS res; +SELECT positionCaseInsensitiveUTF8(materialize(concat(' test a1ßAa test', arrayStringConcat(arrayMap(x -> ' ', range(20000))))), 'a1ẞaa') AS res; +SELECT positionCaseInsensitiveUTF8(materialize(concat(' test a1ßAa test', arrayStringConcat(arrayMap(x -> ' ', range(20000))))), 'a1ẞaa') AS res; +SELECT positionCaseInsensitiveUTF8(materialize(concat(' test a1ßAa test', arrayStringConcat(arrayMap(x -> ' ', range(20000))))), 'a1ẞaa') AS res; +SELECT positionCaseInsensitiveUTF8(materialize(concat(' test a1ßAa test', arrayStringConcat(arrayMap(x -> ' ', range(20000))))), 'a1ẞaa') AS res; +SELECT positionCaseInsensitiveUTF8(materialize(concat(' test a1ßAa test', arrayStringConcat(arrayMap(x -> ' ', range(20000))))), 'a1ẞaa') AS res; +SELECT positionCaseInsensitiveUTF8(materialize(concat(' test a1ßAa test', arrayStringConcat(arrayMap(x -> ' ', range(20000))))), 'a1ẞaa') AS res; +SELECT positionCaseInsensitiveUTF8(materialize(concat(' test a1ßAa test', arrayStringConcat(arrayMap(x -> ' ', range(20000))))), 'a1ẞaa') AS res; +SELECT positionCaseInsensitiveUTF8(materialize(concat(' test a1ßAa test', arrayStringConcat(arrayMap(x -> ' ', range(20000))))), 'a1ẞaa') AS res; +SELECT positionCaseInsensitiveUTF8(materialize(concat(' test a1ßAa test', arrayStringConcat(arrayMap(x -> ' ', range(20000))))), 'a1ẞaa') AS res; + +SELECT positionCaseInsensitiveUTF8(materialize(concat('xẞyyaa1ẞ1yzẞXẞẞ1ẞẞ1bctest', arrayStringConcat(arrayMap(x -> ' ', range(20000))))), 'aa1ẞ1Yzßxßß1ßß1BC') AS res; +SELECT positionCaseInsensitiveUTF8(materialize(concat(' xẞyyaa1ẞ1yzẞXẞẞ1ẞẞ1bctest', arrayStringConcat(arrayMap(x -> ' ', range(20000))))), 'aa1ẞ1Yzßxßß1ßß1BC') AS res; +SELECT positionCaseInsensitiveUTF8(materialize(concat(' xẞyyaa1ẞ1yzẞXẞẞ1ẞẞ1bctest', arrayStringConcat(arrayMap(x -> ' ', range(20000))))), 'aa1ẞ1Yzßxßß1ßß1BC') AS res; +SELECT positionCaseInsensitiveUTF8(materialize(concat(' xẞyyaa1ẞ1yzẞXẞẞ1ẞẞ1bctest', arrayStringConcat(arrayMap(x -> ' ', range(20000))))), 'aa1ẞ1Yzßxßß1ßß1BC') AS res; +SELECT positionCaseInsensitiveUTF8(materialize(concat(' xẞyyaa1ẞ1yzẞXẞẞ1ẞẞ1bctest', arrayStringConcat(arrayMap(x -> ' ', range(20000))))), 'aa1ẞ1Yzßxßß1ßß1BC') AS res; +SELECT positionCaseInsensitiveUTF8(materialize(concat(' xẞyyaa1ẞ1yzẞXẞẞ1ẞẞ1bctest', arrayStringConcat(arrayMap(x -> ' ', range(20000))))), 'aa1ẞ1Yzßxßß1ßß1BC') AS res; +SELECT positionCaseInsensitiveUTF8(materialize(concat(' xẞyyaa1ẞ1yzẞXẞẞ1ẞẞ1bctest', arrayStringConcat(arrayMap(x -> ' ', range(20000))))), 'aa1ẞ1Yzßxßß1ßß1BC') AS res; +SELECT positionCaseInsensitiveUTF8(materialize(concat(' xẞyyaa1ẞ1yzẞXẞẞ1ẞẞ1bctest', arrayStringConcat(arrayMap(x -> ' ', range(20000))))), 'aa1ẞ1Yzßxßß1ßß1BC') AS res; +SELECT positionCaseInsensitiveUTF8(materialize(concat(' xẞyyaa1ẞ1yzẞXẞẞ1ẞẞ1bctest', arrayStringConcat(arrayMap(x -> ' ', range(20000))))), 'aa1ẞ1Yzßxßß1ßß1BC') AS res; +SELECT positionCaseInsensitiveUTF8(materialize(concat(' xẞyyaa1ẞ1yzẞXẞẞ1ẞẞ1bctest', arrayStringConcat(arrayMap(x -> ' ', range(20000))))), 'aa1ẞ1Yzßxßß1ßß1BC') AS res; +SELECT positionCaseInsensitiveUTF8(materialize(concat(' xẞyyaa1ẞ1yzẞXẞẞ1ẞẞ1bctest', arrayStringConcat(arrayMap(x -> ' ', range(20000))))), 'aa1ẞ1Yzßxßß1ßß1BC') AS res; +SELECT positionCaseInsensitiveUTF8(materialize(concat(' xẞyyaa1ẞ1yzẞXẞẞ1ẞẞ1bctest', arrayStringConcat(arrayMap(x -> ' ', range(20000))))), 'aa1ẞ1Yzßxßß1ßß1BC') AS res; diff --git a/parser/testdata/00314_sample_factor_virtual_column/ast.json b/parser/testdata/00314_sample_factor_virtual_column/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00314_sample_factor_virtual_column/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00314_sample_factor_virtual_column/metadata.json b/parser/testdata/00314_sample_factor_virtual_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00314_sample_factor_virtual_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00314_sample_factor_virtual_column/query.sql b/parser/testdata/00314_sample_factor_virtual_column/query.sql new file mode 100644 index 000000000..b8ac5e733 --- /dev/null +++ b/parser/testdata/00314_sample_factor_virtual_column/query.sql @@ -0,0 +1,25 @@ +-- Tags: no-msan +-- ^ +-- makes SELECTs extremely slow sometimes for some reason: "Aggregated. 1000000 to 1 rows (from 7.63 MiB) in 242.829221645 sec." + +DROP TABLE IF EXISTS sample_00314_1; +DROP TABLE IF EXISTS sample_00314_2; +DROP TABLE IF EXISTS sample_merge_00314; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE sample_00314_1 (x UInt64, d Date DEFAULT today()) ENGINE = MergeTree(d, intHash64(x), intHash64(x), 10); +CREATE TABLE sample_00314_2 (x UInt64, d Date DEFAULT today()) ENGINE = MergeTree(d, intHash64(x), intHash64(x), 10); + +SET min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0; + +INSERT INTO sample_00314_1 (x) SELECT number AS x FROM system.numbers LIMIT 1000000; +INSERT INTO sample_00314_2 (x) SELECT number AS x FROM system.numbers LIMIT 2000000; + +CREATE TABLE sample_merge_00314 AS sample_00314_1 ENGINE = Merge(currentDatabase(), '^sample_00314_\\d$'); + +SELECT abs(sum(_sample_factor) - 3000000) / 3000000 < 0.001 FROM sample_merge_00314 SAMPLE 100000; +SELECT abs(sum(_sample_factor) - 3000000) / 3000000 < 0.001 FROM merge(currentDatabase(), '^sample_00314_\\d$') SAMPLE 100000; + +DROP TABLE sample_00314_1; +DROP TABLE sample_00314_2; +DROP TABLE sample_merge_00314; diff --git a/parser/testdata/00315_quantile_off_by_one/ast.json b/parser/testdata/00315_quantile_off_by_one/ast.json new file mode 100644 index 000000000..df44d7676 --- /dev/null +++ b/parser/testdata/00315_quantile_off_by_one/ast.json @@ -0,0 +1,133 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function quantileExactWeighted (alias q5) (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Float64_0.5" + }, + { + "explain": " Function quantilesExactWeighted (alias qs) (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " ExpressionList (children 11)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal Float64_0.1" + }, + { + "explain": " Literal Float64_0.2" + }, + { + "explain": " Literal Float64_0.3" + }, + { + "explain": " Literal Float64_0.4" + }, + { + "explain": " Literal Float64_0.5" + }, + { + "explain": " Literal Float64_0.6" + }, + { + "explain": " Literal Float64_0.7" + }, + { + "explain": " Literal Float64_0.8" + }, + { + "explain": " Literal Float64_0.9" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayJoin (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_1, UInt64_1, UInt64_10, UInt64_10, UInt64_10, UInt64_10, UInt64_100, UInt64_100, UInt64_100]" + } + ], + + "rows": 37, + + "statistics": + { + "elapsed": 0.001730292, + "rows_read": 37, + "bytes_read": 1544 + } +} diff --git a/parser/testdata/00315_quantile_off_by_one/metadata.json b/parser/testdata/00315_quantile_off_by_one/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00315_quantile_off_by_one/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00315_quantile_off_by_one/query.sql b/parser/testdata/00315_quantile_off_by_one/query.sql new file mode 100644 index 000000000..8a4f9fc75 --- /dev/null +++ b/parser/testdata/00315_quantile_off_by_one/query.sql @@ -0,0 +1,6 @@ +SELECT quantileExactWeighted(0.5)(x, 1) AS q5, quantilesExactWeighted(0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1)(x, 1) AS qs FROM (SELECT arrayJoin([1, 1, 1, 10, 10, 10, 10, 100, 100, 100]) AS x); +SELECT quantileInterpolatedWeighted(0.5)(x, 1) AS q5, quantilesInterpolatedWeighted(0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1)(x, 1) AS qs FROM (SELECT arrayJoin([1, 1, 1, 10, 10, 10, 10, 100, 100, 100]) AS x); +SELECT quantileExactWeightedInterpolated(0.5)(x, 1) AS q5, quantilesExactWeightedInterpolated(0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1)(x, 1) AS qs FROM (SELECT arrayJoin([1, 1, 1, 10, 10, 10, 10, 100, 100, 100]) AS x); +SELECT quantile(0.5)(x) AS q5, quantiles(0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1)(x) AS qs FROM (SELECT arrayJoin([1, 1, 1, 10, 10, 10, 10, 100, 100, 100]) AS x); +SELECT quantileExact(0)(x), quantileTiming(0)(x) FROM (SELECT number + 100 AS x FROM system.numbers LIMIT 10000); +SELECT quantileExact(x), quantileTiming(x) FROM (SELECT number % 123 AS x FROM system.numbers LIMIT 10000); diff --git a/parser/testdata/00316_rounding_functions_and_empty_block/ast.json b/parser/testdata/00316_rounding_functions_and_empty_block/ast.json new file mode 100644 index 000000000..04ab3ce6a --- /dev/null +++ b/parser/testdata/00316_rounding_functions_and_empty_block/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001374105, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00316_rounding_functions_and_empty_block/metadata.json b/parser/testdata/00316_rounding_functions_and_empty_block/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00316_rounding_functions_and_empty_block/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00316_rounding_functions_and_empty_block/query.sql b/parser/testdata/00316_rounding_functions_and_empty_block/query.sql new file mode 100644 index 000000000..08c30324d --- /dev/null +++ b/parser/testdata/00316_rounding_functions_and_empty_block/query.sql @@ -0,0 +1,13 @@ +SET any_join_distinct_right_table_keys = 1; + +SELECT + floor((ReferrerTimestamp - InstallTimestamp) / 86400) AS DaysSinceInstallations +FROM +( + SELECT 6534090703218709881 AS DeviceIDHash, 1458586663 AS InstallTimestamp + UNION ALL SELECT 2697418689476658272, 1458561552 +) js1 ANY INNER JOIN +( + SELECT 1034415739529768519 AS DeviceIDHash, 1458566664 AS ReferrerTimestamp + UNION ALL SELECT 2697418689476658272, 1458561552 +) js2 USING DeviceIDHash; diff --git a/parser/testdata/00317_in_tuples_and_out_of_range_values/ast.json b/parser/testdata/00317_in_tuples_and_out_of_range_values/ast.json new file mode 100644 index 000000000..7f68749d7 --- /dev/null +++ b/parser/testdata/00317_in_tuples_and_out_of_range_values/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function in (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Tuple_(UInt64_1, '')" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Tuple_(Int64_-1, '')" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.00157742, + "rows_read": 10, + "bytes_read": 394 + } +} diff --git a/parser/testdata/00317_in_tuples_and_out_of_range_values/metadata.json b/parser/testdata/00317_in_tuples_and_out_of_range_values/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00317_in_tuples_and_out_of_range_values/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00317_in_tuples_and_out_of_range_values/query.sql b/parser/testdata/00317_in_tuples_and_out_of_range_values/query.sql new file mode 100644 index 000000000..8e7c4c80e --- /dev/null +++ b/parser/testdata/00317_in_tuples_and_out_of_range_values/query.sql @@ -0,0 +1,8 @@ +SELECT (1,'') IN ((-1,'')); +SELECT (1,'') IN ((1,'')); +SELECT (1,'') IN (-1,''); +SELECT (1,'') IN (1,''); +SELECT (1,'') IN ((-1,''),(1,'')); + +SELECT (number, toString(number)) IN ((1, '1'), (-1, '-1')) FROM system.numbers LIMIT 10; +SELECT (number - 1, toString(number - 1)) IN ((1, '1'), (-1, '-1')) FROM system.numbers LIMIT 10; diff --git a/parser/testdata/00318_pk_tuple_order/ast.json b/parser/testdata/00318_pk_tuple_order/ast.json new file mode 100644 index 000000000..323ff9006 --- /dev/null +++ b/parser/testdata/00318_pk_tuple_order/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery pk (children 1)" + }, + { + "explain": " Identifier pk" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001474743, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/00318_pk_tuple_order/metadata.json b/parser/testdata/00318_pk_tuple_order/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00318_pk_tuple_order/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00318_pk_tuple_order/query.sql b/parser/testdata/00318_pk_tuple_order/query.sql new file mode 100644 index 000000000..b698883ee --- /dev/null +++ b/parser/testdata/00318_pk_tuple_order/query.sql @@ -0,0 +1,71 @@ +DROP TABLE IF EXISTS pk; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE pk (d Date DEFAULT '2000-01-01', x UInt64, y UInt64, z UInt64) ENGINE = MergeTree(d, (x, y, z), 1); + +INSERT INTO pk (x, y, z) VALUES (1, 11, 1235), (1, 11, 4395), (1, 22, 3545), (1, 22, 6984), (1, 33, 4596), (2, 11, 4563), (2, 11, 4578), (2, 11, 3572), (2, 22, 5786), (2, 22, 5786), (2, 22, 2791), (2, 22, 2791), (3, 33, 2791), (3, 33, 2791), (3, 33, 1235), (3, 44, 4935), (3, 44, 4578), (3, 55, 5786), (3, 55, 2791), (3, 55, 1235); + +SET min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0; +SET max_block_size = 1; +-- Prevent remote replicas from skipping index analysis in Parallel Replicas. Otherwise, they may return full ranges and trigger max_rows_to_read validation failures. +SET parallel_replicas_index_analysis_only_on_coordinator = 0; + +SET max_rows_to_read = 4; +SELECT * FROM pk WHERE x = 2 AND y = 11 ORDER BY ALL; + +SET max_rows_to_read = 5; +SELECT * FROM pk WHERE x = 1 ORDER BY ALL; + +SET max_rows_to_read = 9; +SELECT * FROM pk WHERE x = 3 ORDER BY ALL; + +SET max_rows_to_read = 3; +SELECT * FROM pk WHERE x = 3 AND y = 44 ORDER BY ALL; + +SET max_rows_to_read = 2; +SELECT * FROM pk WHERE x = 3 AND y = 44 AND z = 4935 ORDER BY ALL; +SELECT * FROM pk WHERE x = 3 AND y = 44 AND z = 4578 ORDER BY ALL; + +SET max_rows_to_read = 1; +SELECT * FROM pk WHERE x = 3 AND y = 44 AND z = 4934 ORDER BY ALL; +SELECT * FROM pk WHERE x = 3 AND y = 44 AND z = 4936 ORDER BY ALL; +SELECT * FROM pk WHERE x = 3 AND y = 44 AND z = 4577 ORDER BY ALL; +SELECT * FROM pk WHERE x = 3 AND y = 44 AND z = 4579 ORDER BY ALL; + +SET max_rows_to_read = 1; +SELECT * FROM pk WHERE x = 3 AND y = 55 AND z > 5786 ORDER BY ALL; + +SET max_rows_to_read = 2; +SELECT * FROM pk WHERE x = 3 AND y = 55 AND z >= 5786 ORDER BY ALL; + +SET max_rows_to_read = 3; +SELECT * FROM pk WHERE x = 3 AND y = 55 AND z > 1235 ORDER BY ALL; + +SET max_rows_to_read = 4; +SELECT * FROM pk WHERE x = 3 AND y = 55 AND z >= 1235 ORDER BY ALL; +SELECT * FROM pk WHERE x = 3 AND y = 55 AND z >= 1000 ORDER BY ALL; +SELECT * FROM pk WHERE x = 3 AND y = 55 AND z >= 1000 AND x < 10000 ORDER BY ALL; +SELECT * FROM pk WHERE x = 3 AND y = 55 ORDER BY ALL; +SELECT * FROM pk WHERE x = 3 AND y >= 50 ORDER BY ALL; +SELECT * FROM pk WHERE x = 3 AND y > 44 ORDER BY ALL; +SELECT * FROM pk WHERE x >= 3 AND y > 44 ORDER BY ALL; +SELECT * FROM pk WHERE x > 2 AND y > 44 ORDER BY ALL; + +SET max_rows_to_read = 2; +SELECT * FROM pk WHERE x = 3 AND y = 55 AND z = 5786 ORDER BY ALL; + +SET max_rows_to_read = 15; +SET merge_tree_min_rows_for_seek = 0; +SELECT * FROM pk WHERE z = 2791 ORDER BY ALL; +SELECT * FROM pk WHERE z = 5786 ORDER BY ALL; +SELECT * FROM pk WHERE z = 1235 ORDER BY ALL; +SELECT * FROM pk WHERE z = 4578 ORDER BY ALL; + +SET max_rows_to_read = 10; +SELECT * FROM pk WHERE y = 11 ORDER BY ALL; +SELECT * FROM pk WHERE y = 22 ORDER BY ALL; +SELECT * FROM pk WHERE y = 33 ORDER BY ALL; +SELECT * FROM pk WHERE y = 44 ORDER BY ALL; +SELECT * FROM pk WHERE y = 55 ORDER BY ALL; + +DROP TABLE pk; diff --git a/parser/testdata/00319_index_for_like/ast.json b/parser/testdata/00319_index_for_like/ast.json new file mode 100644 index 000000000..39533cbbd --- /dev/null +++ b/parser/testdata/00319_index_for_like/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001486023, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00319_index_for_like/metadata.json b/parser/testdata/00319_index_for_like/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00319_index_for_like/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00319_index_for_like/query.sql b/parser/testdata/00319_index_for_like/query.sql new file mode 100644 index 000000000..1dfc7aff7 --- /dev/null +++ b/parser/testdata/00319_index_for_like/query.sql @@ -0,0 +1,59 @@ +SET merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.0; + +DROP TABLE IF EXISTS index_for_like; +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE index_for_like (s String, d Date DEFAULT today()) ENGINE = MergeTree(d, (s, d), 1); + +INSERT INTO index_for_like (s) VALUES ('Hello'), ('Hello, World'), ('Hello, World 1'), ('Hello 1'), ('Goodbye'), ('Goodbye, World'), ('Goodbye 1'), ('Goodbye, World 1'); + +-- Prevent remote replicas from skipping index analysis in Parallel Replicas. Otherwise, they may return full ranges and trigger max_rows_to_read validation failures. +SET parallel_replicas_index_analysis_only_on_coordinator = 0; +SET max_rows_to_read = 3; +SELECT s FROM index_for_like WHERE s LIKE 'Hello, World%'; + +SET max_rows_to_read = 2; +SELECT s FROM index_for_like WHERE s LIKE 'Hello, World %'; + +SET max_rows_to_read = 2; +SELECT s FROM index_for_like WHERE s LIKE 'Hello, World 1%'; + +SET max_rows_to_read = 1; +SELECT s FROM index_for_like WHERE s LIKE 'Hello, World 2%'; + +SET max_rows_to_read = 1; +SELECT s FROM index_for_like WHERE s LIKE 'Hello, Worle%'; + +SET max_rows_to_read = 3; +SELECT s FROM index_for_like WHERE s LIKE 'Hello, Wor%'; + +SET max_rows_to_read = 5; +SELECT s FROM index_for_like WHERE s LIKE 'Hello%'; + +SET max_rows_to_read = 2; +SELECT s FROM index_for_like WHERE s LIKE 'Hello %'; + +SET max_rows_to_read = 3; +SELECT s FROM index_for_like WHERE s LIKE 'Hello,%'; + +SET max_rows_to_read = 1; +SELECT s FROM index_for_like WHERE s LIKE 'Hello;%'; + +SET max_rows_to_read = 5; +SELECT s FROM index_for_like WHERE s LIKE 'H%'; + +SET max_rows_to_read = 4; +SELECT s FROM index_for_like WHERE s LIKE 'Good%'; + +SET max_rows_to_read = 8; +SELECT s FROM index_for_like WHERE s LIKE '%'; +SELECT s FROM index_for_like WHERE s LIKE '%Hello%'; +SELECT s FROM index_for_like WHERE s LIKE '%Hello'; + +SET max_rows_to_read = 3; +SELECT s FROM index_for_like WHERE s LIKE 'Hello, World% %'; +SELECT s FROM index_for_like WHERE s LIKE 'Hello, Worl_%'; + +SET max_rows_to_read = 1; +SELECT s FROM index_for_like WHERE s LIKE 'Hello, Worl\\_%'; + +DROP TABLE index_for_like; diff --git a/parser/testdata/00320_between/ast.json b/parser/testdata/00320_between/ast.json new file mode 100644 index 000000000..596199316 --- /dev/null +++ b/parser/testdata/00320_between/ast.json @@ -0,0 +1,82 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function and (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function greaterOrEquals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function plus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function lessOrEquals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function minus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 20, + + "statistics": + { + "elapsed": 0.001179466, + "rows_read": 20, + "bytes_read": 782 + } +} diff --git a/parser/testdata/00320_between/metadata.json b/parser/testdata/00320_between/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00320_between/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00320_between/query.sql b/parser/testdata/00320_between/query.sql new file mode 100644 index 000000000..af403bdcc --- /dev/null +++ b/parser/testdata/00320_between/query.sql @@ -0,0 +1 @@ +SELECT 2 BETWEEN 1 + 1 AND 3 - 1; diff --git a/parser/testdata/00321_pk_set/ast.json b/parser/testdata/00321_pk_set/ast.json new file mode 100644 index 000000000..f70b70c6a --- /dev/null +++ b/parser/testdata/00321_pk_set/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery pk_set (children 1)" + }, + { + "explain": " Identifier pk_set" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001243449, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/00321_pk_set/metadata.json b/parser/testdata/00321_pk_set/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00321_pk_set/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00321_pk_set/query.sql b/parser/testdata/00321_pk_set/query.sql new file mode 100644 index 000000000..bf61a684a --- /dev/null +++ b/parser/testdata/00321_pk_set/query.sql @@ -0,0 +1,20 @@ +DROP TABLE IF EXISTS pk_set; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE pk_set (d Date, n UInt64, host String, code UInt64) ENGINE = MergeTree(d, (n, host, code), 1); +INSERT INTO pk_set (n, host, code) VALUES (1, 'market', 100), (11, 'news', 100); + +SELECT count() FROM pk_set WHERE host IN ('admin.market1', 'admin.market2') AND code = 100; +SELECT count() FROM pk_set WHERE host IN ('admin.market1', 'admin.market2') AND code = 100 AND n = 11; +SELECT count() FROM pk_set WHERE host IN ('admin.market1', 'admin.market2') AND code = 100 AND n >= 11; +SELECT count() FROM pk_set WHERE host IN ('market', 'admin.market2', 'admin.market3', 'admin.market4', 'abc') AND code = 100 AND n = 11; +SELECT count() FROM pk_set WHERE host IN ('market', 'admin.market2', 'admin.market3', 'admin.market4', 'abc') AND code = 100 AND n >= 11; +SELECT count() FROM pk_set WHERE host IN ('admin.market2', 'admin.market3', 'admin.market4', 'abc') AND code = 100 AND n = 11; +SELECT count() FROM pk_set WHERE host IN ('admin.market2', 'admin.market3', 'admin.market4', 'abc', 'news') AND code = 100 AND n = 11; + +-- that barely reproduces the problem +-- better way: +-- for i in {1..1000}; do echo "SELECT count() FROM pk_set WHERE host IN ('a'"$(seq 1 $i | sed -r "s/.+/,'\\0'/")") AND code = 100 AND n = 11;"; done > queries.tsv +-- clickhouse-benchmark < queries.tsv + +DROP TABLE pk_set; diff --git a/parser/testdata/00323_quantiles_timing_bug/ast.json b/parser/testdata/00323_quantiles_timing_bug/ast.json new file mode 100644 index 000000000..63c50b71a --- /dev/null +++ b/parser/testdata/00323_quantiles_timing_bug/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function quantilesTiming (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function range (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_100000" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Float64_0.99" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001355977, + "rows_read": 13, + "bytes_read": 525 + } +} diff --git a/parser/testdata/00323_quantiles_timing_bug/metadata.json b/parser/testdata/00323_quantiles_timing_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00323_quantiles_timing_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00323_quantiles_timing_bug/query.sql b/parser/testdata/00323_quantiles_timing_bug/query.sql new file mode 100644 index 000000000..72ac3c0d0 --- /dev/null +++ b/parser/testdata/00323_quantiles_timing_bug/query.sql @@ -0,0 +1 @@ +SELECT quantilesTiming(0.99)(arrayJoin(range(100000))); diff --git a/parser/testdata/00324_hashing_enums/ast.json b/parser/testdata/00324_hashing_enums/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00324_hashing_enums/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00324_hashing_enums/metadata.json b/parser/testdata/00324_hashing_enums/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00324_hashing_enums/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00324_hashing_enums/query.sql b/parser/testdata/00324_hashing_enums/query.sql new file mode 100644 index 000000000..9952f9424 --- /dev/null +++ b/parser/testdata/00324_hashing_enums/query.sql @@ -0,0 +1,4 @@ +-- Tags: no-fasttest + +SELECT cityHash64(*) FROM (SELECT 1 AS x, CAST(x AS Enum8('Hello' = 0, 'World' = 1)) AS y); +SELECT cityHash64(*) FROM (SELECT 1 AS x, x AS y); diff --git a/parser/testdata/00326_long_function_multi_if/ast.json b/parser/testdata/00326_long_function_multi_if/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00326_long_function_multi_if/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00326_long_function_multi_if/metadata.json b/parser/testdata/00326_long_function_multi_if/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00326_long_function_multi_if/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00326_long_function_multi_if/query.sql b/parser/testdata/00326_long_function_multi_if/query.sql new file mode 100644 index 000000000..247f39e5c --- /dev/null +++ b/parser/testdata/00326_long_function_multi_if/query.sql @@ -0,0 +1,1954 @@ +-- Tags: long, no-msan +-- no-msan: it takes a long time + +SELECT 'Trivial case'; + +SELECT multiIf(1, 2, 1, 3, 4); +SELECT multiIf(1, 'A', 1, 'BC', 'DEF'); +SELECT multiIf(1, toFixedString('A', 16), 1, toFixedString('BC', 16), toFixedString('DEF', 16)); +SELECT multiIf(1, [1,2], 1, [3,4], [5,6]); +SELECT multiIf(1, ['A', 'B'], 1, ['C', 'D'], ['E', 'F']); +SELECT multiIf(rand() % 2 = 0, emptyArrayString(), emptyArrayString()); +SELECT multiIf(rand() % 2 = 0, emptyArrayUInt8(), emptyArrayUInt8()); +SELECT multiIf(rand() % 2 = 0, '', ''); + +SELECT 'Numeric branches'; + +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toInt8(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toInt8(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toInt8(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toInt8(2), toInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toInt8(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toInt8(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toInt8(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toInt8(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toInt8(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toInt16(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toInt16(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toInt16(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toInt16(2), toInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toInt16(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toInt16(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toInt16(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toInt16(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toInt16(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toInt32(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toInt32(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toInt32(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toInt32(2), toInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toInt32(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toInt32(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toInt32(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toInt32(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toInt32(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toInt64(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toInt64(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toInt64(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toInt64(2), toInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toInt64(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toInt64(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toInt64(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toUInt8(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toUInt8(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toUInt8(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toUInt8(2), toInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toUInt8(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toUInt8(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toUInt8(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toUInt8(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toUInt8(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toUInt16(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toUInt16(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toUInt16(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toUInt16(2), toInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toUInt16(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toUInt16(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toUInt16(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toUInt16(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toUInt16(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toUInt32(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toUInt32(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toUInt32(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toUInt32(2), toInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toUInt32(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toUInt32(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toUInt32(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toUInt32(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toUInt32(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toFloat32(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toFloat32(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toFloat32(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toFloat32(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toFloat32(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toFloat32(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toFloat32(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toFloat32(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toFloat64(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toFloat64(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toFloat64(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toFloat64(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toFloat64(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toFloat64(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toFloat64(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt8(1), (number % 3) = 0, toFloat64(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toInt8(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toInt8(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toInt8(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toInt8(2), toInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toInt8(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toInt8(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toInt8(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toInt8(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toInt8(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toInt16(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toInt16(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toInt16(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toInt16(2), toInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toInt16(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toInt16(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toInt16(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toInt16(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toInt16(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toInt32(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toInt32(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toInt32(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toInt32(2), toInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toInt32(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toInt32(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toInt32(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toInt32(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toInt32(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toInt64(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toInt64(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toInt64(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toInt64(2), toInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toInt64(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toInt64(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toInt64(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toUInt8(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toUInt8(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toUInt8(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toUInt8(2), toInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toUInt8(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toUInt8(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toUInt8(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toUInt8(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toUInt8(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toUInt16(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toUInt16(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toUInt16(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toUInt16(2), toInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toUInt16(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toUInt16(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toUInt16(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toUInt16(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toUInt16(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toUInt32(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toUInt32(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toUInt32(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toUInt32(2), toInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toUInt32(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toUInt32(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toUInt32(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toUInt32(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toUInt32(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toFloat32(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toFloat32(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toFloat32(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toFloat32(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toFloat32(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toFloat32(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toFloat32(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toFloat32(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toFloat64(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toFloat64(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toFloat64(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toFloat64(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toFloat64(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toFloat64(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toFloat64(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt16(1), (number % 3) = 0, toFloat64(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toInt8(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toInt8(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toInt8(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toInt8(2), toInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toInt8(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toInt8(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toInt8(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toInt8(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toInt8(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toInt16(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toInt16(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toInt16(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toInt16(2), toInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toInt16(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toInt16(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toInt16(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toInt16(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toInt16(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toInt32(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toInt32(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toInt32(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toInt32(2), toInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toInt32(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toInt32(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toInt32(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toInt32(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toInt32(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toInt64(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toInt64(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toInt64(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toInt64(2), toInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toInt64(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toInt64(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toInt64(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toUInt8(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toUInt8(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toUInt8(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toUInt8(2), toInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toUInt8(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toUInt8(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toUInt8(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toUInt8(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toUInt8(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toUInt16(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toUInt16(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toUInt16(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toUInt16(2), toInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toUInt16(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toUInt16(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toUInt16(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toUInt16(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toUInt16(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toUInt32(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toUInt32(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toUInt32(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toUInt32(2), toInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toUInt32(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toUInt32(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toUInt32(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toUInt32(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toUInt32(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toFloat32(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toFloat32(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toFloat32(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toFloat32(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toFloat32(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toFloat32(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toFloat32(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toFloat32(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toFloat64(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toFloat64(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toFloat64(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toFloat64(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toFloat64(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toFloat64(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toFloat64(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt32(1), (number % 3) = 0, toFloat64(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt64(1), (number % 3) = 0, toInt8(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt64(1), (number % 3) = 0, toInt8(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt64(1), (number % 3) = 0, toInt8(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt64(1), (number % 3) = 0, toInt8(2), toInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt64(1), (number % 3) = 0, toInt8(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt64(1), (number % 3) = 0, toInt8(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt64(1), (number % 3) = 0, toInt8(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt64(1), (number % 3) = 0, toInt16(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt64(1), (number % 3) = 0, toInt16(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt64(1), (number % 3) = 0, toInt16(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt64(1), (number % 3) = 0, toInt16(2), toInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt64(1), (number % 3) = 0, toInt16(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt64(1), (number % 3) = 0, toInt16(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt64(1), (number % 3) = 0, toInt16(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt64(1), (number % 3) = 0, toInt32(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt64(1), (number % 3) = 0, toInt32(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt64(1), (number % 3) = 0, toInt32(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt64(1), (number % 3) = 0, toInt32(2), toInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt64(1), (number % 3) = 0, toInt32(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt64(1), (number % 3) = 0, toInt32(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt64(1), (number % 3) = 0, toInt32(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt64(1), (number % 3) = 0, toInt64(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt64(1), (number % 3) = 0, toInt64(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt64(1), (number % 3) = 0, toInt64(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt64(1), (number % 3) = 0, toInt64(2), toInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt64(1), (number % 3) = 0, toInt64(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt64(1), (number % 3) = 0, toInt64(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt64(1), (number % 3) = 0, toInt64(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt64(1), (number % 3) = 0, toUInt8(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt64(1), (number % 3) = 0, toUInt8(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt64(1), (number % 3) = 0, toUInt8(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt64(1), (number % 3) = 0, toUInt8(2), toInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt64(1), (number % 3) = 0, toUInt8(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt64(1), (number % 3) = 0, toUInt8(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt64(1), (number % 3) = 0, toUInt8(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt64(1), (number % 3) = 0, toUInt16(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt64(1), (number % 3) = 0, toUInt16(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt64(1), (number % 3) = 0, toUInt16(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt64(1), (number % 3) = 0, toUInt16(2), toInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt64(1), (number % 3) = 0, toUInt16(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt64(1), (number % 3) = 0, toUInt16(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt64(1), (number % 3) = 0, toUInt16(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt64(1), (number % 3) = 0, toUInt32(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt64(1), (number % 3) = 0, toUInt32(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt64(1), (number % 3) = 0, toUInt32(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt64(1), (number % 3) = 0, toUInt32(2), toInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt64(1), (number % 3) = 0, toUInt32(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt64(1), (number % 3) = 0, toUInt32(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toInt64(1), (number % 3) = 0, toUInt32(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toInt8(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toInt8(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toInt8(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toInt8(2), toInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toInt8(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toInt8(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toInt8(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toInt8(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toInt8(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toInt16(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toInt16(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toInt16(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toInt16(2), toInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toInt16(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toInt16(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toInt16(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toInt16(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toInt16(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toInt32(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toInt32(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toInt32(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toInt32(2), toInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toInt32(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toInt32(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toInt32(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toInt32(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toInt32(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toInt64(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toInt64(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toInt64(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toInt64(2), toInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toInt64(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toInt64(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toInt64(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toUInt8(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toUInt8(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toUInt8(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toUInt8(2), toInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toUInt8(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toUInt8(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toUInt8(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toUInt8(2), toUInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toUInt8(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toUInt8(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toUInt16(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toUInt16(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toUInt16(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toUInt16(2), toInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toUInt16(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toUInt16(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toUInt16(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toUInt16(2), toUInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toUInt16(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toUInt16(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toUInt32(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toUInt32(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toUInt32(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toUInt32(2), toInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toUInt32(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toUInt32(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toUInt32(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toUInt32(2), toUInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toUInt32(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toUInt32(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toUInt64(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toUInt64(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toUInt64(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toUInt64(2), toUInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toFloat32(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toFloat32(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toFloat32(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toFloat32(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toFloat32(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toFloat32(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toFloat32(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toFloat32(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toFloat64(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toFloat64(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toFloat64(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toFloat64(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toFloat64(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toFloat64(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toFloat64(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt8(1), (number % 3) = 0, toFloat64(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toInt8(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toInt8(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toInt8(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toInt8(2), toInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toInt8(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toInt8(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toInt8(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toInt8(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toInt8(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toInt16(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toInt16(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toInt16(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toInt16(2), toInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toInt16(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toInt16(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toInt16(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toInt16(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toInt16(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toInt32(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toInt32(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toInt32(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toInt32(2), toInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toInt32(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toInt32(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toInt32(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toInt32(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toInt32(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toInt64(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toInt64(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toInt64(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toInt64(2), toInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toInt64(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toInt64(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toInt64(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toUInt8(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toUInt8(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toUInt8(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toUInt8(2), toInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toUInt8(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toUInt8(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toUInt8(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toUInt8(2), toUInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toUInt8(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toUInt8(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toUInt16(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toUInt16(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toUInt16(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toUInt16(2), toInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toUInt16(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toUInt16(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toUInt16(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toUInt16(2), toUInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toUInt16(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toUInt16(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toUInt32(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toUInt32(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toUInt32(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toUInt32(2), toInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toUInt32(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toUInt32(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toUInt32(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toUInt32(2), toUInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toUInt32(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toUInt32(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toUInt64(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toUInt64(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toUInt64(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toUInt64(2), toUInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toFloat32(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toFloat32(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toFloat32(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toFloat32(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toFloat32(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toFloat32(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toFloat32(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toFloat32(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toFloat64(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toFloat64(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toFloat64(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toFloat64(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toFloat64(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toFloat64(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toFloat64(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt16(1), (number % 3) = 0, toFloat64(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toInt8(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toInt8(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toInt8(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toInt8(2), toInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toInt8(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toInt8(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toInt8(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toInt8(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toInt8(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toInt16(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toInt16(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toInt16(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toInt16(2), toInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toInt16(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toInt16(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toInt16(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toInt16(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toInt16(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toInt32(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toInt32(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toInt32(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toInt32(2), toInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toInt32(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toInt32(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toInt32(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toInt32(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toInt32(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toInt64(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toInt64(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toInt64(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toInt64(2), toInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toInt64(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toInt64(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toInt64(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toUInt8(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toUInt8(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toUInt8(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toUInt8(2), toInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toUInt8(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toUInt8(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toUInt8(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toUInt8(2), toUInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toUInt8(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toUInt8(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toUInt16(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toUInt16(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toUInt16(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toUInt16(2), toInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toUInt16(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toUInt16(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toUInt16(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toUInt16(2), toUInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toUInt16(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toUInt16(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toUInt32(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toUInt32(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toUInt32(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toUInt32(2), toInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toUInt32(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toUInt32(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toUInt32(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toUInt32(2), toUInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toUInt32(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toUInt32(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toUInt64(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toUInt64(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toUInt64(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toUInt64(2), toUInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toFloat32(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toFloat32(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toFloat32(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toFloat32(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toFloat32(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toFloat32(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toFloat32(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toFloat32(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toFloat64(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toFloat64(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toFloat64(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toFloat64(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toFloat64(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toFloat64(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toFloat64(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt32(1), (number % 3) = 0, toFloat64(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt64(1), (number % 3) = 0, toUInt8(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt64(1), (number % 3) = 0, toUInt8(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt64(1), (number % 3) = 0, toUInt8(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt64(1), (number % 3) = 0, toUInt8(2), toUInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt64(1), (number % 3) = 0, toUInt16(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt64(1), (number % 3) = 0, toUInt16(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt64(1), (number % 3) = 0, toUInt16(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt64(1), (number % 3) = 0, toUInt16(2), toUInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt64(1), (number % 3) = 0, toUInt32(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt64(1), (number % 3) = 0, toUInt32(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt64(1), (number % 3) = 0, toUInt32(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt64(1), (number % 3) = 0, toUInt32(2), toUInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt64(1), (number % 3) = 0, toUInt64(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt64(1), (number % 3) = 0, toUInt64(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt64(1), (number % 3) = 0, toUInt64(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toUInt64(1), (number % 3) = 0, toUInt64(2), toUInt64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toInt8(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toInt8(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toInt8(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toInt8(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toInt8(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toInt8(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toInt8(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toInt8(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toInt16(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toInt16(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toInt16(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toInt16(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toInt16(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toInt16(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toInt16(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toInt16(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toInt32(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toInt32(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toInt32(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toInt32(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toInt32(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toInt32(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toInt32(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toInt32(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toUInt8(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toUInt8(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toUInt8(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toUInt8(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toUInt8(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toUInt8(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toUInt8(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toUInt8(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toUInt16(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toUInt16(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toUInt16(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toUInt16(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toUInt16(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toUInt16(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toUInt16(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toUInt16(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toUInt32(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toUInt32(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toUInt32(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toUInt32(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toUInt32(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toUInt32(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toUInt32(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toUInt32(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toFloat32(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toFloat32(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toFloat32(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toFloat32(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toFloat32(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toFloat32(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toFloat32(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toFloat32(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toFloat64(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toFloat64(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toFloat64(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toFloat64(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toFloat64(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toFloat64(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toFloat64(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat32(1), (number % 3) = 0, toFloat64(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toInt8(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toInt8(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toInt8(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toInt8(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toInt8(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toInt8(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toInt8(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toInt8(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toInt16(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toInt16(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toInt16(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toInt16(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toInt16(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toInt16(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toInt16(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toInt16(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toInt32(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toInt32(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toInt32(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toInt32(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toInt32(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toInt32(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toInt32(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toInt32(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toUInt8(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toUInt8(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toUInt8(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toUInt8(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toUInt8(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toUInt8(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toUInt8(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toUInt8(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toUInt16(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toUInt16(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toUInt16(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toUInt16(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toUInt16(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toUInt16(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toUInt16(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toUInt16(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toUInt32(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toUInt32(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toUInt32(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toUInt32(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toUInt32(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toUInt32(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toUInt32(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toUInt32(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toFloat32(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toFloat32(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toFloat32(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toFloat32(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toFloat32(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toFloat32(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toFloat32(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toFloat32(2), toFloat64(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toFloat64(2), toInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toFloat64(2), toInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toFloat64(2), toInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toFloat64(2), toUInt8(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toFloat64(2), toUInt16(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toFloat64(2), toUInt32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toFloat64(2), toFloat32(3)) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, toFloat64(1), (number % 3) = 0, toFloat64(2), toFloat64(3)) FROM system.numbers LIMIT 10; + +SELECT 'Numeric array branches'; + +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toInt64(3), toInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toInt64(3), toInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toInt64(3), toInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toInt64(2), toInt64(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toInt64(2), toInt64(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toInt64(2), toInt64(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toInt64(2), toInt64(3)], [toInt64(3), toInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toInt64(2), toInt64(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toInt64(2), toInt64(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toInt64(2), toInt64(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toInt64(3), toInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toInt64(3), toInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toInt64(3), toInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt8(1), toInt8(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toInt64(3), toInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toInt64(3), toInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toInt64(3), toInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toInt64(2), toInt64(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toInt64(2), toInt64(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toInt64(2), toInt64(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toInt64(2), toInt64(3)], [toInt64(3), toInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toInt64(2), toInt64(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toInt64(2), toInt64(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toInt64(2), toInt64(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toInt64(3), toInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toInt64(3), toInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toInt64(3), toInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt16(1), toInt16(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toInt64(3), toInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toInt64(3), toInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toInt64(3), toInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toInt64(2), toInt64(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toInt64(2), toInt64(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toInt64(2), toInt64(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toInt64(2), toInt64(3)], [toInt64(3), toInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toInt64(2), toInt64(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toInt64(2), toInt64(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toInt64(2), toInt64(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toInt64(3), toInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toInt64(3), toInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toInt64(3), toInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt32(1), toInt32(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt64(1), toInt64(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt64(1), toInt64(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt64(1), toInt64(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt64(1), toInt64(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toInt64(3), toInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt64(1), toInt64(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt64(1), toInt64(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt64(1), toInt64(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt64(1), toInt64(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt64(1), toInt64(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt64(1), toInt64(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt64(1), toInt64(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toInt64(3), toInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt64(1), toInt64(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt64(1), toInt64(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt64(1), toInt64(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt64(1), toInt64(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt64(1), toInt64(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt64(1), toInt64(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt64(1), toInt64(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toInt64(3), toInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt64(1), toInt64(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt64(1), toInt64(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt64(1), toInt64(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt64(1), toInt64(2)], (number % 3) = 0, [toInt64(2), toInt64(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt64(1), toInt64(2)], (number % 3) = 0, [toInt64(2), toInt64(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt64(1), toInt64(2)], (number % 3) = 0, [toInt64(2), toInt64(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt64(1), toInt64(2)], (number % 3) = 0, [toInt64(2), toInt64(3)], [toInt64(3), toInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt64(1), toInt64(2)], (number % 3) = 0, [toInt64(2), toInt64(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt64(1), toInt64(2)], (number % 3) = 0, [toInt64(2), toInt64(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt64(1), toInt64(2)], (number % 3) = 0, [toInt64(2), toInt64(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt64(1), toInt64(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt64(1), toInt64(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt64(1), toInt64(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt64(1), toInt64(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toInt64(3), toInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt64(1), toInt64(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt64(1), toInt64(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt64(1), toInt64(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt64(1), toInt64(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt64(1), toInt64(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt64(1), toInt64(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt64(1), toInt64(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toInt64(3), toInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt64(1), toInt64(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt64(1), toInt64(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt64(1), toInt64(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt64(1), toInt64(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt64(1), toInt64(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt64(1), toInt64(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt64(1), toInt64(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toInt64(3), toInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt64(1), toInt64(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt64(1), toInt64(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toInt64(1), toInt64(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toInt64(3), toInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toInt64(3), toInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toInt64(3), toInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toInt64(2), toInt64(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toInt64(2), toInt64(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toInt64(2), toInt64(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toInt64(2), toInt64(3)], [toInt64(3), toInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toInt64(2), toInt64(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toInt64(2), toInt64(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toInt64(2), toInt64(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toInt64(3), toInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toUInt64(3), toUInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toInt64(3), toInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toUInt64(3), toUInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toInt64(3), toInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toUInt64(3), toUInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toUInt64(2), toUInt64(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toUInt64(2), toUInt64(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toUInt64(2), toUInt64(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toUInt64(2), toUInt64(3)], [toUInt64(3), toUInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt8(1), toUInt8(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toInt64(3), toInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toInt64(3), toInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toInt64(3), toInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toInt64(2), toInt64(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toInt64(2), toInt64(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toInt64(2), toInt64(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toInt64(2), toInt64(3)], [toInt64(3), toInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toInt64(2), toInt64(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toInt64(2), toInt64(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toInt64(2), toInt64(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toInt64(3), toInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toUInt64(3), toUInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toInt64(3), toInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toUInt64(3), toUInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toInt64(3), toInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toUInt64(3), toUInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toUInt64(2), toUInt64(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toUInt64(2), toUInt64(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toUInt64(2), toUInt64(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toUInt64(2), toUInt64(3)], [toUInt64(3), toUInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt16(1), toUInt16(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toInt64(3), toInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toInt64(3), toInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toInt64(3), toInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toInt64(2), toInt64(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toInt64(2), toInt64(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toInt64(2), toInt64(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toInt64(2), toInt64(3)], [toInt64(3), toInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toInt64(2), toInt64(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toInt64(2), toInt64(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toInt64(2), toInt64(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toInt64(3), toInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toUInt64(3), toUInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toInt64(3), toInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toUInt64(3), toUInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toInt64(3), toInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toUInt64(3), toUInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toUInt64(2), toUInt64(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toUInt64(2), toUInt64(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toUInt64(2), toUInt64(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toUInt64(2), toUInt64(3)], [toUInt64(3), toUInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt32(1), toUInt32(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt64(1), toUInt64(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt64(1), toUInt64(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt64(1), toUInt64(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt64(1), toUInt64(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toUInt64(3), toUInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt64(1), toUInt64(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt64(1), toUInt64(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt64(1), toUInt64(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt64(1), toUInt64(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toUInt64(3), toUInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt64(1), toUInt64(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt64(1), toUInt64(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt64(1), toUInt64(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt64(1), toUInt64(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toUInt64(3), toUInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt64(1), toUInt64(2)], (number % 3) = 0, [toUInt64(2), toUInt64(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt64(1), toUInt64(2)], (number % 3) = 0, [toUInt64(2), toUInt64(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt64(1), toUInt64(2)], (number % 3) = 0, [toUInt64(2), toUInt64(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toUInt64(1), toUInt64(2)], (number % 3) = 0, [toUInt64(2), toUInt64(3)], [toUInt64(3), toUInt64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat32(1), toFloat32(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toInt8(2), toInt8(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toInt16(2), toInt16(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toInt32(2), toInt32(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toUInt8(2), toUInt8(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toUInt16(2), toUInt16(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toUInt32(2), toUInt32(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toFloat32(2), toFloat32(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toInt8(3), toInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toInt16(3), toInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toInt32(3), toInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toUInt8(3), toUInt8(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toUInt16(3), toUInt16(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toUInt32(3), toUInt32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toFloat32(3), toFloat32(3)]) FROM system.numbers LIMIT 10; +SELECT multiIf((number % 2) = 0, [toFloat64(1), toFloat64(2)], (number % 3) = 0, [toFloat64(2), toFloat64(3)], [toFloat64(3), toFloat64(3)]) FROM system.numbers LIMIT 10; + +SELECT 'String branches'; + +DROP TABLE IF EXISTS multi_if_check; +CREATE TABLE multi_if_check(col1 UInt64, col2 String, col3 String, col4 String) ENGINE=TinyLog; +INSERT INTO multi_if_check(col1, col2, col3, col4) VALUES(1, 'A', 'AB', 'ABC'); +INSERT INTO multi_if_check(col1, col2, col3, col4) VALUES(2, 'B', 'BC', 'BCD'); +INSERT INTO multi_if_check(col1, col2, col3, col4) VALUES(3, 'C', 'CD', 'CDE'); +INSERT INTO multi_if_check(col1, col2, col3, col4) VALUES(4, 'D', 'DE', 'DEF'); +INSERT INTO multi_if_check(col1, col2, col3, col4) VALUES(5, 'E', 'EF', 'EFG'); +INSERT INTO multi_if_check(col1, col2, col3, col4) VALUES(6, 'F', 'FG', 'FGH'); +INSERT INTO multi_if_check(col1, col2, col3, col4) VALUES(7, 'G', 'GH', 'GHI'); +INSERT INTO multi_if_check(col1, col2, col3, col4) VALUES(8, 'H', 'HI', 'HIJ'); +INSERT INTO multi_if_check(col1, col2, col3, col4) VALUES(9, 'I', 'IJ', 'IJK'); +INSERT INTO multi_if_check(col1, col2, col3, col4) VALUES(10, 'J', 'JK', 'JKL'); + +SELECT multiIf((col1 % 2) = 0, col2, (col1 % 3) = 0, col3, col4) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, col2, (col1 % 3) = 0, col3, toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, col2, (col1 % 3) = 0, col3, toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, col2, (col1 % 3) = 0, col3, 'baz') FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, col2, (col1 % 3) = 0, toFixedString(col3, 16), col4) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, col2, (col1 % 3) = 0, toFixedString(col3, 16), toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, col2, (col1 % 3) = 0, toFixedString(col3, 16), toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, col2, (col1 % 3) = 0, toFixedString(col3, 16), 'baz') FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, col2, (col1 % 3) = 0, toFixedString('bar', 16), col4) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, col2, (col1 % 3) = 0, toFixedString('bar', 16), toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, col2, (col1 % 3) = 0, toFixedString('bar', 16), toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, col2, (col1 % 3) = 0, toFixedString('bar', 16), 'baz') FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, col2, (col1 % 3) = 0, 'bar', col4) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, col2, (col1 % 3) = 0, 'bar', toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, col2, (col1 % 3) = 0, 'bar', toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, col2, (col1 % 3) = 0, 'bar', 'baz') FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, col2, 1, col3, col4) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, col2, 1, col3, toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, col2, 1, col3, toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, col2, 1, col3, 'baz') FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, col2, 1, toFixedString(col3, 16), col4) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, col2, 1, toFixedString(col3, 16), toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, col2, 1, toFixedString(col3, 16), toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, col2, 1, toFixedString(col3, 16), 'baz') FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, col2, 1, toFixedString('bar', 16), col4) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, col2, 1, toFixedString('bar', 16), toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, col2, 1, toFixedString('bar', 16), toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, col2, 1, toFixedString('bar', 16), 'baz') FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, col2, 1, 'bar', col4) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, col2, 1, 'bar', toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, col2, 1, 'bar', toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, col2, 1, 'bar', 'baz') FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString(col2, 16), (col1 % 3) = 0, col3, col4) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString(col2, 16), (col1 % 3) = 0, col3, toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString(col2, 16), (col1 % 3) = 0, col3, toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString(col2, 16), (col1 % 3) = 0, col3, 'baz') FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString(col2, 16), (col1 % 3) = 0, toFixedString(col3, 16), col4) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString(col2, 16), (col1 % 3) = 0, toFixedString(col3, 16), toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString(col2, 16), (col1 % 3) = 0, toFixedString(col3, 16), toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString(col2, 16), (col1 % 3) = 0, toFixedString(col3, 16), 'baz') FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString(col2, 16), (col1 % 3) = 0, toFixedString('bar', 16), col4) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString(col2, 16), (col1 % 3) = 0, toFixedString('bar', 16), toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString(col2, 16), (col1 % 3) = 0, toFixedString('bar', 16), toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString(col2, 16), (col1 % 3) = 0, toFixedString('bar', 16), 'baz') FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString(col2, 16), (col1 % 3) = 0, 'bar', col4) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString(col2, 16), (col1 % 3) = 0, 'bar', toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString(col2, 16), (col1 % 3) = 0, 'bar', toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString(col2, 16), (col1 % 3) = 0, 'bar', 'baz') FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString(col2, 16), 1, col3, col4) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString(col2, 16), 1, col3, toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString(col2, 16), 1, col3, toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString(col2, 16), 1, col3, 'baz') FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString(col2, 16), 1, toFixedString(col3, 16), col4) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString(col2, 16), 1, toFixedString(col3, 16), toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString(col2, 16), 1, toFixedString(col3, 16), toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString(col2, 16), 1, toFixedString(col3, 16), 'baz') FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString(col2, 16), 1, toFixedString('bar', 16), col4) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString(col2, 16), 1, toFixedString('bar', 16), toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString(col2, 16), 1, toFixedString('bar', 16), toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString(col2, 16), 1, toFixedString('bar', 16), 'baz') FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString(col2, 16), 1, 'bar', col4) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString(col2, 16), 1, 'bar', toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString(col2, 16), 1, 'bar', toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString(col2, 16), 1, 'bar', 'baz') FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString('foo', 16), (col1 % 3) = 0, col3, col4) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString('foo', 16), (col1 % 3) = 0, col3, toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString('foo', 16), (col1 % 3) = 0, col3, toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString('foo', 16), (col1 % 3) = 0, col3, 'baz') FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString('foo', 16), (col1 % 3) = 0, toFixedString(col3, 16), col4) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString('foo', 16), (col1 % 3) = 0, toFixedString(col3, 16), toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString('foo', 16), (col1 % 3) = 0, toFixedString(col3, 16), toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString('foo', 16), (col1 % 3) = 0, toFixedString(col3, 16), 'baz') FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString('foo', 16), (col1 % 3) = 0, toFixedString('bar', 16), col4) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString('foo', 16), (col1 % 3) = 0, toFixedString('bar', 16), toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString('foo', 16), (col1 % 3) = 0, toFixedString('bar', 16), toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString('foo', 16), (col1 % 3) = 0, toFixedString('bar', 16), 'baz') FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString('foo', 16), (col1 % 3) = 0, 'bar', col4) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString('foo', 16), (col1 % 3) = 0, 'bar', toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString('foo', 16), (col1 % 3) = 0, 'bar', toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString('foo', 16), (col1 % 3) = 0, 'bar', 'baz') FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString('foo', 16), 1, col3, col4) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString('foo', 16), 1, col3, toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString('foo', 16), 1, col3, toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString('foo', 16), 1, col3, 'baz') FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString('foo', 16), 1, toFixedString(col3, 16), col4) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString('foo', 16), 1, toFixedString(col3, 16), toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString('foo', 16), 1, toFixedString(col3, 16), toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString('foo', 16), 1, toFixedString(col3, 16), 'baz') FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString('foo', 16), 1, toFixedString('bar', 16), col4) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString('foo', 16), 1, toFixedString('bar', 16), toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString('foo', 16), 1, toFixedString('bar', 16), toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString('foo', 16), 1, toFixedString('bar', 16), 'baz') FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString('foo', 16), 1, 'bar', col4) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString('foo', 16), 1, 'bar', toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString('foo', 16), 1, 'bar', toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, toFixedString('foo', 16), 1, 'bar', 'baz') FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, 'foo', (col1 % 3) = 0, col3, col4) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, 'foo', (col1 % 3) = 0, col3, toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, 'foo', (col1 % 3) = 0, col3, toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, 'foo', (col1 % 3) = 0, col3, 'baz') FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, 'foo', (col1 % 3) = 0, toFixedString(col3, 16), col4) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, 'foo', (col1 % 3) = 0, toFixedString(col3, 16), toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, 'foo', (col1 % 3) = 0, toFixedString(col3, 16), toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, 'foo', (col1 % 3) = 0, toFixedString(col3, 16), 'baz') FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, 'foo', (col1 % 3) = 0, toFixedString('bar', 16), col4) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, 'foo', (col1 % 3) = 0, toFixedString('bar', 16), toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, 'foo', (col1 % 3) = 0, toFixedString('bar', 16), toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, 'foo', (col1 % 3) = 0, toFixedString('bar', 16), 'baz') FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, 'foo', (col1 % 3) = 0, 'bar', col4) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, 'foo', (col1 % 3) = 0, 'bar', toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, 'foo', (col1 % 3) = 0, 'bar', toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, 'foo', (col1 % 3) = 0, 'bar', 'baz') FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, 'foo', 1, col3, col4) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, 'foo', 1, col3, toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, 'foo', 1, col3, toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, 'foo', 1, col3, 'baz') FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, 'foo', 1, toFixedString(col3, 16), col4) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, 'foo', 1, toFixedString(col3, 16), toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, 'foo', 1, toFixedString(col3, 16), toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, 'foo', 1, toFixedString(col3, 16), 'baz') FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, 'foo', 1, toFixedString('bar', 16), col4) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, 'foo', 1, toFixedString('bar', 16), toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, 'foo', 1, toFixedString('bar', 16), toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, 'foo', 1, toFixedString('bar', 16), 'baz') FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, 'foo', 1, 'bar', col4) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, 'foo', 1, 'bar', toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, 'foo', 1, 'bar', toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, 'foo', 1, 'bar', 'baz') FROM multi_if_check; +SELECT multiIf(1, col2, (col1 % 3) = 0, col3, col4) FROM multi_if_check; +SELECT multiIf(1, col2, (col1 % 3) = 0, col3, toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf(1, col2, (col1 % 3) = 0, col3, toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf(1, col2, (col1 % 3) = 0, col3, 'baz') FROM multi_if_check; +SELECT multiIf(1, col2, (col1 % 3) = 0, toFixedString(col3, 16), col4) FROM multi_if_check; +SELECT multiIf(1, col2, (col1 % 3) = 0, toFixedString(col3, 16), toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf(1, col2, (col1 % 3) = 0, toFixedString(col3, 16), toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf(1, col2, (col1 % 3) = 0, toFixedString(col3, 16), 'baz') FROM multi_if_check; +SELECT multiIf(1, col2, (col1 % 3) = 0, toFixedString('bar', 16), col4) FROM multi_if_check; +SELECT multiIf(1, col2, (col1 % 3) = 0, toFixedString('bar', 16), toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf(1, col2, (col1 % 3) = 0, toFixedString('bar', 16), toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf(1, col2, (col1 % 3) = 0, toFixedString('bar', 16), 'baz') FROM multi_if_check; +SELECT multiIf(1, col2, (col1 % 3) = 0, 'bar', col4) FROM multi_if_check; +SELECT multiIf(1, col2, (col1 % 3) = 0, 'bar', toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf(1, col2, (col1 % 3) = 0, 'bar', toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf(1, col2, (col1 % 3) = 0, 'bar', 'baz') FROM multi_if_check; +SELECT multiIf(1, col2, 1, col3, col4) FROM multi_if_check; +SELECT multiIf(1, col2, 1, col3, toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf(1, col2, 1, col3, toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf(1, col2, 1, col3, 'baz') FROM multi_if_check; +SELECT multiIf(1, col2, 1, toFixedString(col3, 16), col4) FROM multi_if_check; +SELECT multiIf(1, col2, 1, toFixedString(col3, 16), toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf(1, col2, 1, toFixedString(col3, 16), toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf(1, col2, 1, toFixedString(col3, 16), 'baz') FROM multi_if_check; +SELECT multiIf(1, col2, 1, toFixedString('bar', 16), col4) FROM multi_if_check; +SELECT multiIf(1, col2, 1, toFixedString('bar', 16), toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf(1, col2, 1, toFixedString('bar', 16), toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf(1, col2, 1, toFixedString('bar', 16), 'baz') FROM multi_if_check; +SELECT multiIf(1, col2, 1, 'bar', col4) FROM multi_if_check; +SELECT multiIf(1, col2, 1, 'bar', toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf(1, col2, 1, 'bar', toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf(1, col2, 1, 'bar', 'baz') FROM multi_if_check; +SELECT multiIf(1, toFixedString(col2, 16), (col1 % 3) = 0, col3, col4) FROM multi_if_check; +SELECT multiIf(1, toFixedString(col2, 16), (col1 % 3) = 0, col3, toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf(1, toFixedString(col2, 16), (col1 % 3) = 0, col3, toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf(1, toFixedString(col2, 16), (col1 % 3) = 0, col3, 'baz') FROM multi_if_check; +SELECT multiIf(1, toFixedString(col2, 16), (col1 % 3) = 0, toFixedString(col3, 16), col4) FROM multi_if_check; +SELECT multiIf(1, toFixedString(col2, 16), (col1 % 3) = 0, toFixedString(col3, 16), toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf(1, toFixedString(col2, 16), (col1 % 3) = 0, toFixedString(col3, 16), toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf(1, toFixedString(col2, 16), (col1 % 3) = 0, toFixedString(col3, 16), 'baz') FROM multi_if_check; +SELECT multiIf(1, toFixedString(col2, 16), (col1 % 3) = 0, toFixedString('bar', 16), col4) FROM multi_if_check; +SELECT multiIf(1, toFixedString(col2, 16), (col1 % 3) = 0, toFixedString('bar', 16), toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf(1, toFixedString(col2, 16), (col1 % 3) = 0, toFixedString('bar', 16), toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf(1, toFixedString(col2, 16), (col1 % 3) = 0, toFixedString('bar', 16), 'baz') FROM multi_if_check; +SELECT multiIf(1, toFixedString(col2, 16), (col1 % 3) = 0, 'bar', col4) FROM multi_if_check; +SELECT multiIf(1, toFixedString(col2, 16), (col1 % 3) = 0, 'bar', toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf(1, toFixedString(col2, 16), (col1 % 3) = 0, 'bar', toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf(1, toFixedString(col2, 16), (col1 % 3) = 0, 'bar', 'baz') FROM multi_if_check; +SELECT multiIf(1, toFixedString(col2, 16), 1, col3, col4) FROM multi_if_check; +SELECT multiIf(1, toFixedString(col2, 16), 1, col3, toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf(1, toFixedString(col2, 16), 1, col3, toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf(1, toFixedString(col2, 16), 1, col3, 'baz') FROM multi_if_check; +SELECT multiIf(1, toFixedString(col2, 16), 1, toFixedString(col3, 16), col4) FROM multi_if_check; +SELECT multiIf(1, toFixedString(col2, 16), 1, toFixedString(col3, 16), toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf(1, toFixedString(col2, 16), 1, toFixedString(col3, 16), toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf(1, toFixedString(col2, 16), 1, toFixedString(col3, 16), 'baz') FROM multi_if_check; +SELECT multiIf(1, toFixedString(col2, 16), 1, toFixedString('bar', 16), col4) FROM multi_if_check; +SELECT multiIf(1, toFixedString(col2, 16), 1, toFixedString('bar', 16), toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf(1, toFixedString(col2, 16), 1, toFixedString('bar', 16), toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf(1, toFixedString(col2, 16), 1, toFixedString('bar', 16), 'baz') FROM multi_if_check; +SELECT multiIf(1, toFixedString(col2, 16), 1, 'bar', col4) FROM multi_if_check; +SELECT multiIf(1, toFixedString(col2, 16), 1, 'bar', toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf(1, toFixedString(col2, 16), 1, 'bar', toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf(1, toFixedString(col2, 16), 1, 'bar', 'baz') FROM multi_if_check; +SELECT multiIf(1, toFixedString('foo', 16), (col1 % 3) = 0, col3, col4) FROM multi_if_check; +SELECT multiIf(1, toFixedString('foo', 16), (col1 % 3) = 0, col3, toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf(1, toFixedString('foo', 16), (col1 % 3) = 0, col3, toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf(1, toFixedString('foo', 16), (col1 % 3) = 0, col3, 'baz') FROM multi_if_check; +SELECT multiIf(1, toFixedString('foo', 16), (col1 % 3) = 0, toFixedString(col3, 16), col4) FROM multi_if_check; +SELECT multiIf(1, toFixedString('foo', 16), (col1 % 3) = 0, toFixedString(col3, 16), toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf(1, toFixedString('foo', 16), (col1 % 3) = 0, toFixedString(col3, 16), toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf(1, toFixedString('foo', 16), (col1 % 3) = 0, toFixedString(col3, 16), 'baz') FROM multi_if_check; +SELECT multiIf(1, toFixedString('foo', 16), (col1 % 3) = 0, toFixedString('bar', 16), col4) FROM multi_if_check; +SELECT multiIf(1, toFixedString('foo', 16), (col1 % 3) = 0, toFixedString('bar', 16), toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf(1, toFixedString('foo', 16), (col1 % 3) = 0, toFixedString('bar', 16), toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf(1, toFixedString('foo', 16), (col1 % 3) = 0, toFixedString('bar', 16), 'baz') FROM multi_if_check; +SELECT multiIf(1, toFixedString('foo', 16), (col1 % 3) = 0, 'bar', col4) FROM multi_if_check; +SELECT multiIf(1, toFixedString('foo', 16), (col1 % 3) = 0, 'bar', toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf(1, toFixedString('foo', 16), (col1 % 3) = 0, 'bar', toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf(1, toFixedString('foo', 16), (col1 % 3) = 0, 'bar', 'baz') FROM multi_if_check; +SELECT multiIf(1, toFixedString('foo', 16), 1, col3, col4) FROM multi_if_check; +SELECT multiIf(1, toFixedString('foo', 16), 1, col3, toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf(1, toFixedString('foo', 16), 1, col3, toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf(1, toFixedString('foo', 16), 1, col3, 'baz') FROM multi_if_check; +SELECT multiIf(1, toFixedString('foo', 16), 1, toFixedString(col3, 16), col4) FROM multi_if_check; +SELECT multiIf(1, toFixedString('foo', 16), 1, toFixedString(col3, 16), toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf(1, toFixedString('foo', 16), 1, toFixedString(col3, 16), toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf(1, toFixedString('foo', 16), 1, toFixedString(col3, 16), 'baz') FROM multi_if_check; +SELECT multiIf(1, toFixedString('foo', 16), 1, toFixedString('bar', 16), col4) FROM multi_if_check; +SELECT multiIf(1, toFixedString('foo', 16), 1, toFixedString('bar', 16), toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf(1, toFixedString('foo', 16), 1, toFixedString('bar', 16), toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf(1, toFixedString('foo', 16), 1, toFixedString('bar', 16), 'baz') FROM multi_if_check; +SELECT multiIf(1, toFixedString('foo', 16), 1, 'bar', col4) FROM multi_if_check; +SELECT multiIf(1, toFixedString('foo', 16), 1, 'bar', toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf(1, toFixedString('foo', 16), 1, 'bar', toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf(1, toFixedString('foo', 16), 1, 'bar', 'baz') FROM multi_if_check; +SELECT multiIf(1, 'foo', (col1 % 3) = 0, col3, col4) FROM multi_if_check; +SELECT multiIf(1, 'foo', (col1 % 3) = 0, col3, toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf(1, 'foo', (col1 % 3) = 0, col3, toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf(1, 'foo', (col1 % 3) = 0, col3, 'baz') FROM multi_if_check; +SELECT multiIf(1, 'foo', (col1 % 3) = 0, toFixedString(col3, 16), col4) FROM multi_if_check; +SELECT multiIf(1, 'foo', (col1 % 3) = 0, toFixedString(col3, 16), toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf(1, 'foo', (col1 % 3) = 0, toFixedString(col3, 16), toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf(1, 'foo', (col1 % 3) = 0, toFixedString(col3, 16), 'baz') FROM multi_if_check; +SELECT multiIf(1, 'foo', (col1 % 3) = 0, toFixedString('bar', 16), col4) FROM multi_if_check; +SELECT multiIf(1, 'foo', (col1 % 3) = 0, toFixedString('bar', 16), toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf(1, 'foo', (col1 % 3) = 0, toFixedString('bar', 16), toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf(1, 'foo', (col1 % 3) = 0, toFixedString('bar', 16), 'baz') FROM multi_if_check; +SELECT multiIf(1, 'foo', (col1 % 3) = 0, 'bar', col4) FROM multi_if_check; +SELECT multiIf(1, 'foo', (col1 % 3) = 0, 'bar', toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf(1, 'foo', (col1 % 3) = 0, 'bar', toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf(1, 'foo', (col1 % 3) = 0, 'bar', 'baz') FROM multi_if_check; +SELECT multiIf(1, 'foo', 1, col3, col4) FROM multi_if_check; +SELECT multiIf(1, 'foo', 1, col3, toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf(1, 'foo', 1, col3, toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf(1, 'foo', 1, col3, 'baz') FROM multi_if_check; +SELECT multiIf(1, 'foo', 1, toFixedString(col3, 16), col4) FROM multi_if_check; +SELECT multiIf(1, 'foo', 1, toFixedString(col3, 16), toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf(1, 'foo', 1, toFixedString(col3, 16), toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf(1, 'foo', 1, toFixedString(col3, 16), 'baz') FROM multi_if_check; +SELECT multiIf(1, 'foo', 1, toFixedString('bar', 16), col4) FROM multi_if_check; +SELECT multiIf(1, 'foo', 1, toFixedString('bar', 16), toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf(1, 'foo', 1, toFixedString('bar', 16), toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf(1, 'foo', 1, toFixedString('bar', 16), 'baz') FROM multi_if_check; +SELECT multiIf(1, 'foo', 1, 'bar', col4) FROM multi_if_check; +SELECT multiIf(1, 'foo', 1, 'bar', toFixedString(col4, 16)) FROM multi_if_check; +SELECT multiIf(1, 'foo', 1, 'bar', toFixedString('baz', 16)) FROM multi_if_check; +SELECT multiIf(1, 'foo', 1, 'bar', 'baz') FROM multi_if_check; + +DROP TABLE IF EXISTS multi_if_check; + +SELECT 'String array branches'; + +CREATE TABLE multi_if_check(col1 UInt64, col2 String, col3 String, col4 String, col5 String, col6 String, col7 String) ENGINE=TinyLog; +INSERT INTO multi_if_check(col1, col2, col3, col4, col5, col6, col7) VALUES(1, 'A', 'AB', 'ABC', 'ABCD', 'ABCDE', 'ABCDEF'); +INSERT INTO multi_if_check(col1, col2, col3, col4, col5, col6, col7) VALUES(2, 'B', 'BC', 'BCD', 'BCDE', 'BCDEF', 'BCDEFG'); +INSERT INTO multi_if_check(col1, col2, col3, col4, col5, col6, col7) VALUES(3, 'C', 'CD', 'CDE', 'CDEF', 'CDEFG', 'CDEFGH'); +INSERT INTO multi_if_check(col1, col2, col3, col4, col5, col6, col7) VALUES(4, 'D', 'DE', 'DEF', 'DEFG', 'DEFGH', 'DEFGHI'); +INSERT INTO multi_if_check(col1, col2, col3, col4, col5, col6, col7) VALUES(5, 'E', 'EF', 'EFG', 'EFGH', 'EFGHI', 'EFGHIJ'); +INSERT INTO multi_if_check(col1, col2, col3, col4, col5, col6, col7) VALUES(6, 'F', 'FG', 'FGH', 'FGHI', 'FGHIJ', 'FGHIJK'); +INSERT INTO multi_if_check(col1, col2, col3, col4, col5, col6, col7) VALUES(7, 'G', 'GH', 'GHI', 'GHIJ', 'GHIJK', 'GHIJKL'); +INSERT INTO multi_if_check(col1, col2, col3, col4, col5, col6, col7) VALUES(8, 'H', 'HI', 'HIJ', 'HIJK', 'HIJKL', 'HIJKLM'); +INSERT INTO multi_if_check(col1, col2, col3, col4, col5, col6, col7) VALUES(9, 'I', 'IJ', 'IJK', 'IJKL', 'IJKLM', 'IJKLMN'); +INSERT INTO multi_if_check(col1, col2, col3, col4, col5, col6, col7) VALUES(10, 'J', 'JK', 'JKL', 'JKLM', 'JKLMN', 'JKLMNO'); + +SELECT multiIf((col1 % 2) = 0, [col2, col3], (col1 % 3) = 0, [col4, col5], [col6, col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, col3], (col1 % 3) = 0, [col4, col5], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, col3], (col1 % 3) = 0, [col4, col5], ['foo', col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, col3], (col1 % 3) = 0, [col4, col5], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, col3], (col1 % 3) = 0, [col4, 'bar'], [col6, col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, col3], (col1 % 3) = 0, [col4, 'bar'], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, col3], (col1 % 3) = 0, [col4, 'bar'], ['foo', col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, col3], (col1 % 3) = 0, [col4, 'bar'], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, col3], (col1 % 3) = 0, ['foo', col5], [col6, col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, col3], (col1 % 3) = 0, ['foo', col5], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, col3], (col1 % 3) = 0, ['foo', col5], ['foo', col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, col3], (col1 % 3) = 0, ['foo', col5], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, col3], (col1 % 3) = 0, ['foo', 'bar'], [col6, col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, col3], (col1 % 3) = 0, ['foo', 'bar'], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, col3], (col1 % 3) = 0, ['foo', 'bar'], ['foo', col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, col3], (col1 % 3) = 0, ['foo', 'bar'], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, col3], 1, [col4, col5], [col6, col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, col3], 1, [col4, col5], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, col3], 1, [col4, col5], ['foo', col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, col3], 1, [col4, col5], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, col3], 1, [col4, 'bar'], [col6, col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, col3], 1, [col4, 'bar'], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, col3], 1, [col4, 'bar'], ['foo', col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, col3], 1, [col4, 'bar'], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, col3], 1, ['foo', col5], [col6, col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, col3], 1, ['foo', col5], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, col3], 1, ['foo', col5], ['foo', col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, col3], 1, ['foo', col5], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, col3], 1, ['foo', 'bar'], [col6, col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, col3], 1, ['foo', 'bar'], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, col3], 1, ['foo', 'bar'], ['foo', col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, col3], 1, ['foo', 'bar'], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, 'bar'], (col1 % 3) = 0, [col4, col5], [col6, col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, 'bar'], (col1 % 3) = 0, [col4, col5], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, 'bar'], (col1 % 3) = 0, [col4, col5], ['foo', col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, 'bar'], (col1 % 3) = 0, [col4, col5], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, 'bar'], (col1 % 3) = 0, [col4, 'bar'], [col6, col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, 'bar'], (col1 % 3) = 0, [col4, 'bar'], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, 'bar'], (col1 % 3) = 0, [col4, 'bar'], ['foo', col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, 'bar'], (col1 % 3) = 0, [col4, 'bar'], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, 'bar'], (col1 % 3) = 0, ['foo', col5], [col6, col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, 'bar'], (col1 % 3) = 0, ['foo', col5], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, 'bar'], (col1 % 3) = 0, ['foo', col5], ['foo', col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, 'bar'], (col1 % 3) = 0, ['foo', col5], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, 'bar'], (col1 % 3) = 0, ['foo', 'bar'], [col6, col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, 'bar'], (col1 % 3) = 0, ['foo', 'bar'], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, 'bar'], (col1 % 3) = 0, ['foo', 'bar'], ['foo', col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, 'bar'], (col1 % 3) = 0, ['foo', 'bar'], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, 'bar'], 1, [col4, col5], [col6, col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, 'bar'], 1, [col4, col5], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, 'bar'], 1, [col4, col5], ['foo', col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, 'bar'], 1, [col4, col5], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, 'bar'], 1, [col4, 'bar'], [col6, col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, 'bar'], 1, [col4, 'bar'], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, 'bar'], 1, [col4, 'bar'], ['foo', col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, 'bar'], 1, [col4, 'bar'], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, 'bar'], 1, ['foo', col5], [col6, col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, 'bar'], 1, ['foo', col5], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, 'bar'], 1, ['foo', col5], ['foo', col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, 'bar'], 1, ['foo', col5], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, 'bar'], 1, ['foo', 'bar'], [col6, col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, 'bar'], 1, ['foo', 'bar'], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, 'bar'], 1, ['foo', 'bar'], ['foo', col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, [col2, 'bar'], 1, ['foo', 'bar'], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', col3], (col1 % 3) = 0, [col4, col5], [col6, col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', col3], (col1 % 3) = 0, [col4, col5], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', col3], (col1 % 3) = 0, [col4, col5], ['foo', col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', col3], (col1 % 3) = 0, [col4, col5], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', col3], (col1 % 3) = 0, [col4, 'bar'], [col6, col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', col3], (col1 % 3) = 0, [col4, 'bar'], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', col3], (col1 % 3) = 0, [col4, 'bar'], ['foo', col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', col3], (col1 % 3) = 0, [col4, 'bar'], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', col3], (col1 % 3) = 0, ['foo', col5], [col6, col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', col3], (col1 % 3) = 0, ['foo', col5], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', col3], (col1 % 3) = 0, ['foo', col5], ['foo', col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', col3], (col1 % 3) = 0, ['foo', col5], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', col3], (col1 % 3) = 0, ['foo', 'bar'], [col6, col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', col3], (col1 % 3) = 0, ['foo', 'bar'], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', col3], (col1 % 3) = 0, ['foo', 'bar'], ['foo', col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', col3], (col1 % 3) = 0, ['foo', 'bar'], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', col3], 1, [col4, col5], [col6, col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', col3], 1, [col4, col5], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', col3], 1, [col4, col5], ['foo', col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', col3], 1, [col4, col5], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', col3], 1, [col4, 'bar'], [col6, col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', col3], 1, [col4, 'bar'], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', col3], 1, [col4, 'bar'], ['foo', col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', col3], 1, [col4, 'bar'], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', col3], 1, ['foo', col5], [col6, col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', col3], 1, ['foo', col5], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', col3], 1, ['foo', col5], ['foo', col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', col3], 1, ['foo', col5], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', col3], 1, ['foo', 'bar'], [col6, col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', col3], 1, ['foo', 'bar'], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', col3], 1, ['foo', 'bar'], ['foo', col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', col3], 1, ['foo', 'bar'], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', 'bar'], (col1 % 3) = 0, [col4, col5], [col6, col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', 'bar'], (col1 % 3) = 0, [col4, col5], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', 'bar'], (col1 % 3) = 0, [col4, col5], ['foo', col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', 'bar'], (col1 % 3) = 0, [col4, col5], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', 'bar'], (col1 % 3) = 0, [col4, 'bar'], [col6, col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', 'bar'], (col1 % 3) = 0, [col4, 'bar'], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', 'bar'], (col1 % 3) = 0, [col4, 'bar'], ['foo', col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', 'bar'], (col1 % 3) = 0, [col4, 'bar'], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', 'bar'], (col1 % 3) = 0, ['foo', col5], [col6, col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', 'bar'], (col1 % 3) = 0, ['foo', col5], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', 'bar'], (col1 % 3) = 0, ['foo', col5], ['foo', col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', 'bar'], (col1 % 3) = 0, ['foo', col5], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', 'bar'], (col1 % 3) = 0, ['foo', 'bar'], [col6, col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', 'bar'], (col1 % 3) = 0, ['foo', 'bar'], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', 'bar'], (col1 % 3) = 0, ['foo', 'bar'], ['foo', col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', 'bar'], (col1 % 3) = 0, ['foo', 'bar'], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', 'bar'], 1, [col4, col5], [col6, col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', 'bar'], 1, [col4, col5], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', 'bar'], 1, [col4, col5], ['foo', col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', 'bar'], 1, [col4, col5], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', 'bar'], 1, [col4, 'bar'], [col6, col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', 'bar'], 1, [col4, 'bar'], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', 'bar'], 1, [col4, 'bar'], ['foo', col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', 'bar'], 1, [col4, 'bar'], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', 'bar'], 1, ['foo', col5], [col6, col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', 'bar'], 1, ['foo', col5], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', 'bar'], 1, ['foo', col5], ['foo', col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', 'bar'], 1, ['foo', col5], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', 'bar'], 1, ['foo', 'bar'], [col6, col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', 'bar'], 1, ['foo', 'bar'], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', 'bar'], 1, ['foo', 'bar'], ['foo', col7]) FROM multi_if_check; +SELECT multiIf((col1 % 2) = 0, ['foo', 'bar'], 1, ['foo', 'bar'], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf(1, [col2, col3], (col1 % 3) = 0, [col4, col5], [col6, col7]) FROM multi_if_check; +SELECT multiIf(1, [col2, col3], (col1 % 3) = 0, [col4, col5], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf(1, [col2, col3], (col1 % 3) = 0, [col4, col5], ['foo', col7]) FROM multi_if_check; +SELECT multiIf(1, [col2, col3], (col1 % 3) = 0, [col4, col5], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf(1, [col2, col3], (col1 % 3) = 0, [col4, 'bar'], [col6, col7]) FROM multi_if_check; +SELECT multiIf(1, [col2, col3], (col1 % 3) = 0, [col4, 'bar'], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf(1, [col2, col3], (col1 % 3) = 0, [col4, 'bar'], ['foo', col7]) FROM multi_if_check; +SELECT multiIf(1, [col2, col3], (col1 % 3) = 0, [col4, 'bar'], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf(1, [col2, col3], (col1 % 3) = 0, ['foo', col5], [col6, col7]) FROM multi_if_check; +SELECT multiIf(1, [col2, col3], (col1 % 3) = 0, ['foo', col5], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf(1, [col2, col3], (col1 % 3) = 0, ['foo', col5], ['foo', col7]) FROM multi_if_check; +SELECT multiIf(1, [col2, col3], (col1 % 3) = 0, ['foo', col5], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf(1, [col2, col3], (col1 % 3) = 0, ['foo', 'bar'], [col6, col7]) FROM multi_if_check; +SELECT multiIf(1, [col2, col3], (col1 % 3) = 0, ['foo', 'bar'], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf(1, [col2, col3], (col1 % 3) = 0, ['foo', 'bar'], ['foo', col7]) FROM multi_if_check; +SELECT multiIf(1, [col2, col3], (col1 % 3) = 0, ['foo', 'bar'], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf(1, [col2, col3], 1, [col4, col5], [col6, col7]) FROM multi_if_check; +SELECT multiIf(1, [col2, col3], 1, [col4, col5], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf(1, [col2, col3], 1, [col4, col5], ['foo', col7]) FROM multi_if_check; +SELECT multiIf(1, [col2, col3], 1, [col4, col5], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf(1, [col2, col3], 1, [col4, 'bar'], [col6, col7]) FROM multi_if_check; +SELECT multiIf(1, [col2, col3], 1, [col4, 'bar'], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf(1, [col2, col3], 1, [col4, 'bar'], ['foo', col7]) FROM multi_if_check; +SELECT multiIf(1, [col2, col3], 1, [col4, 'bar'], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf(1, [col2, col3], 1, ['foo', col5], [col6, col7]) FROM multi_if_check; +SELECT multiIf(1, [col2, col3], 1, ['foo', col5], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf(1, [col2, col3], 1, ['foo', col5], ['foo', col7]) FROM multi_if_check; +SELECT multiIf(1, [col2, col3], 1, ['foo', col5], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf(1, [col2, col3], 1, ['foo', 'bar'], [col6, col7]) FROM multi_if_check; +SELECT multiIf(1, [col2, col3], 1, ['foo', 'bar'], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf(1, [col2, col3], 1, ['foo', 'bar'], ['foo', col7]) FROM multi_if_check; +SELECT multiIf(1, [col2, col3], 1, ['foo', 'bar'], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf(1, [col2, 'bar'], (col1 % 3) = 0, [col4, col5], [col6, col7]) FROM multi_if_check; +SELECT multiIf(1, [col2, 'bar'], (col1 % 3) = 0, [col4, col5], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf(1, [col2, 'bar'], (col1 % 3) = 0, [col4, col5], ['foo', col7]) FROM multi_if_check; +SELECT multiIf(1, [col2, 'bar'], (col1 % 3) = 0, [col4, col5], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf(1, [col2, 'bar'], (col1 % 3) = 0, [col4, 'bar'], [col6, col7]) FROM multi_if_check; +SELECT multiIf(1, [col2, 'bar'], (col1 % 3) = 0, [col4, 'bar'], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf(1, [col2, 'bar'], (col1 % 3) = 0, [col4, 'bar'], ['foo', col7]) FROM multi_if_check; +SELECT multiIf(1, [col2, 'bar'], (col1 % 3) = 0, [col4, 'bar'], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf(1, [col2, 'bar'], (col1 % 3) = 0, ['foo', col5], [col6, col7]) FROM multi_if_check; +SELECT multiIf(1, [col2, 'bar'], (col1 % 3) = 0, ['foo', col5], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf(1, [col2, 'bar'], (col1 % 3) = 0, ['foo', col5], ['foo', col7]) FROM multi_if_check; +SELECT multiIf(1, [col2, 'bar'], (col1 % 3) = 0, ['foo', col5], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf(1, [col2, 'bar'], (col1 % 3) = 0, ['foo', 'bar'], [col6, col7]) FROM multi_if_check; +SELECT multiIf(1, [col2, 'bar'], (col1 % 3) = 0, ['foo', 'bar'], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf(1, [col2, 'bar'], (col1 % 3) = 0, ['foo', 'bar'], ['foo', col7]) FROM multi_if_check; +SELECT multiIf(1, [col2, 'bar'], (col1 % 3) = 0, ['foo', 'bar'], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf(1, [col2, 'bar'], 1, [col4, col5], [col6, col7]) FROM multi_if_check; +SELECT multiIf(1, [col2, 'bar'], 1, [col4, col5], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf(1, [col2, 'bar'], 1, [col4, col5], ['foo', col7]) FROM multi_if_check; +SELECT multiIf(1, [col2, 'bar'], 1, [col4, col5], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf(1, [col2, 'bar'], 1, [col4, 'bar'], [col6, col7]) FROM multi_if_check; +SELECT multiIf(1, [col2, 'bar'], 1, [col4, 'bar'], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf(1, [col2, 'bar'], 1, [col4, 'bar'], ['foo', col7]) FROM multi_if_check; +SELECT multiIf(1, [col2, 'bar'], 1, [col4, 'bar'], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf(1, [col2, 'bar'], 1, ['foo', col5], [col6, col7]) FROM multi_if_check; +SELECT multiIf(1, [col2, 'bar'], 1, ['foo', col5], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf(1, [col2, 'bar'], 1, ['foo', col5], ['foo', col7]) FROM multi_if_check; +SELECT multiIf(1, [col2, 'bar'], 1, ['foo', col5], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf(1, [col2, 'bar'], 1, ['foo', 'bar'], [col6, col7]) FROM multi_if_check; +SELECT multiIf(1, [col2, 'bar'], 1, ['foo', 'bar'], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf(1, [col2, 'bar'], 1, ['foo', 'bar'], ['foo', col7]) FROM multi_if_check; +SELECT multiIf(1, [col2, 'bar'], 1, ['foo', 'bar'], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf(1, ['foo', col3], (col1 % 3) = 0, [col4, col5], [col6, col7]) FROM multi_if_check; +SELECT multiIf(1, ['foo', col3], (col1 % 3) = 0, [col4, col5], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf(1, ['foo', col3], (col1 % 3) = 0, [col4, col5], ['foo', col7]) FROM multi_if_check; +SELECT multiIf(1, ['foo', col3], (col1 % 3) = 0, [col4, col5], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf(1, ['foo', col3], (col1 % 3) = 0, [col4, 'bar'], [col6, col7]) FROM multi_if_check; +SELECT multiIf(1, ['foo', col3], (col1 % 3) = 0, [col4, 'bar'], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf(1, ['foo', col3], (col1 % 3) = 0, [col4, 'bar'], ['foo', col7]) FROM multi_if_check; +SELECT multiIf(1, ['foo', col3], (col1 % 3) = 0, [col4, 'bar'], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf(1, ['foo', col3], (col1 % 3) = 0, ['foo', col5], [col6, col7]) FROM multi_if_check; +SELECT multiIf(1, ['foo', col3], (col1 % 3) = 0, ['foo', col5], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf(1, ['foo', col3], (col1 % 3) = 0, ['foo', col5], ['foo', col7]) FROM multi_if_check; +SELECT multiIf(1, ['foo', col3], (col1 % 3) = 0, ['foo', col5], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf(1, ['foo', col3], (col1 % 3) = 0, ['foo', 'bar'], [col6, col7]) FROM multi_if_check; +SELECT multiIf(1, ['foo', col3], (col1 % 3) = 0, ['foo', 'bar'], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf(1, ['foo', col3], (col1 % 3) = 0, ['foo', 'bar'], ['foo', col7]) FROM multi_if_check; +SELECT multiIf(1, ['foo', col3], (col1 % 3) = 0, ['foo', 'bar'], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf(1, ['foo', col3], 1, [col4, col5], [col6, col7]) FROM multi_if_check; +SELECT multiIf(1, ['foo', col3], 1, [col4, col5], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf(1, ['foo', col3], 1, [col4, col5], ['foo', col7]) FROM multi_if_check; +SELECT multiIf(1, ['foo', col3], 1, [col4, col5], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf(1, ['foo', col3], 1, [col4, 'bar'], [col6, col7]) FROM multi_if_check; +SELECT multiIf(1, ['foo', col3], 1, [col4, 'bar'], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf(1, ['foo', col3], 1, [col4, 'bar'], ['foo', col7]) FROM multi_if_check; +SELECT multiIf(1, ['foo', col3], 1, [col4, 'bar'], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf(1, ['foo', col3], 1, ['foo', col5], [col6, col7]) FROM multi_if_check; +SELECT multiIf(1, ['foo', col3], 1, ['foo', col5], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf(1, ['foo', col3], 1, ['foo', col5], ['foo', col7]) FROM multi_if_check; +SELECT multiIf(1, ['foo', col3], 1, ['foo', col5], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf(1, ['foo', col3], 1, ['foo', 'bar'], [col6, col7]) FROM multi_if_check; +SELECT multiIf(1, ['foo', col3], 1, ['foo', 'bar'], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf(1, ['foo', col3], 1, ['foo', 'bar'], ['foo', col7]) FROM multi_if_check; +SELECT multiIf(1, ['foo', col3], 1, ['foo', 'bar'], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf(1, ['foo', 'bar'], (col1 % 3) = 0, [col4, col5], [col6, col7]) FROM multi_if_check; +SELECT multiIf(1, ['foo', 'bar'], (col1 % 3) = 0, [col4, col5], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf(1, ['foo', 'bar'], (col1 % 3) = 0, [col4, col5], ['foo', col7]) FROM multi_if_check; +SELECT multiIf(1, ['foo', 'bar'], (col1 % 3) = 0, [col4, col5], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf(1, ['foo', 'bar'], (col1 % 3) = 0, [col4, 'bar'], [col6, col7]) FROM multi_if_check; +SELECT multiIf(1, ['foo', 'bar'], (col1 % 3) = 0, [col4, 'bar'], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf(1, ['foo', 'bar'], (col1 % 3) = 0, [col4, 'bar'], ['foo', col7]) FROM multi_if_check; +SELECT multiIf(1, ['foo', 'bar'], (col1 % 3) = 0, [col4, 'bar'], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf(1, ['foo', 'bar'], (col1 % 3) = 0, ['foo', col5], [col6, col7]) FROM multi_if_check; +SELECT multiIf(1, ['foo', 'bar'], (col1 % 3) = 0, ['foo', col5], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf(1, ['foo', 'bar'], (col1 % 3) = 0, ['foo', col5], ['foo', col7]) FROM multi_if_check; +SELECT multiIf(1, ['foo', 'bar'], (col1 % 3) = 0, ['foo', col5], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf(1, ['foo', 'bar'], (col1 % 3) = 0, ['foo', 'bar'], [col6, col7]) FROM multi_if_check; +SELECT multiIf(1, ['foo', 'bar'], (col1 % 3) = 0, ['foo', 'bar'], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf(1, ['foo', 'bar'], (col1 % 3) = 0, ['foo', 'bar'], ['foo', col7]) FROM multi_if_check; +SELECT multiIf(1, ['foo', 'bar'], (col1 % 3) = 0, ['foo', 'bar'], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf(1, ['foo', 'bar'], 1, [col4, col5], [col6, col7]) FROM multi_if_check; +SELECT multiIf(1, ['foo', 'bar'], 1, [col4, col5], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf(1, ['foo', 'bar'], 1, [col4, col5], ['foo', col7]) FROM multi_if_check; +SELECT multiIf(1, ['foo', 'bar'], 1, [col4, col5], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf(1, ['foo', 'bar'], 1, [col4, 'bar'], [col6, col7]) FROM multi_if_check; +SELECT multiIf(1, ['foo', 'bar'], 1, [col4, 'bar'], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf(1, ['foo', 'bar'], 1, [col4, 'bar'], ['foo', col7]) FROM multi_if_check; +SELECT multiIf(1, ['foo', 'bar'], 1, [col4, 'bar'], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf(1, ['foo', 'bar'], 1, ['foo', col5], [col6, col7]) FROM multi_if_check; +SELECT multiIf(1, ['foo', 'bar'], 1, ['foo', col5], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf(1, ['foo', 'bar'], 1, ['foo', col5], ['foo', col7]) FROM multi_if_check; +SELECT multiIf(1, ['foo', 'bar'], 1, ['foo', col5], ['foo', 'bar']) FROM multi_if_check; +SELECT multiIf(1, ['foo', 'bar'], 1, ['foo', 'bar'], [col6, col7]) FROM multi_if_check; +SELECT multiIf(1, ['foo', 'bar'], 1, ['foo', 'bar'], [col6, 'bar']) FROM multi_if_check; +SELECT multiIf(1, ['foo', 'bar'], 1, ['foo', 'bar'], ['foo', col7]) FROM multi_if_check; +SELECT multiIf(1, ['foo', 'bar'], 1, ['foo', 'bar'], ['foo', 'bar']) FROM multi_if_check; + +DROP TABLE IF EXISTS multi_if_check; + +SELECT 'Miscellaneous'; + +CREATE TABLE multi_if_check(col1 UInt64) ENGINE=TinyLog; +INSERT INTO multi_if_check(col1) VALUES (11225),(20273),(213),(240),(12),(187),(29252); +INSERT INTO multi_if_check(col1) VALUES (1),(65),(208),(9),(154),(20),(191),(2),(66),(970),(56),(144),(49),(10317),(145),(21623),(225); +INSERT INTO multi_if_check(col1) VALUES (10658),(11182),(11036),(194),(16),(54),(23),(172),(10748),(52),(63),(11282),(237),(55),(190); +INSERT INTO multi_if_check(col1) VALUES (11162),(197),(4),(64),(17),(59),(51),(1091),(14),(13),(10645),(73),(157),(169),(43),(47),(11111); +INSERT INTO multi_if_check(col1) VALUES (11002),(99),(42),(142),(40),(20259),(15),(976),(11220),(193),(21),(3),(973),(35),(148),(10277),(10253); +INSERT INTO multi_if_check(col1) VALUES (155),(972),(159),(959),(39),(50),(236),(969),(114678),(143),(37),(20167),(20086),(10536),(28948),(10355); +INSERT INTO multi_if_check(col1) VALUES (7),(192),(11119),(67),(6),(8),(10841),(141),(10951),(222),(10752),(10363),(10842),(24881),(11287),(198); +INSERT INTO multi_if_check(col1) VALUES (11453),(26),(147),(44),(19),(76),(29349),(10987),(28896),(75),(11168),(11084),(62),(46),(10747),(24); +INSERT INTO multi_if_check(col1) VALUES (10664),(966),(11256),(53),(10945),(10871),(158),(20544),(20540),(163),(10262),(11080),(0),(11159),(239); +INSERT INTO multi_if_check(col1) VALUES (10995),(11),(11143),(11212),(24893),(10318),(68),(21949),(28),(971),(153),(10689),(38),(11474),(11029); +INSERT INTO multi_if_check(col1) VALUES (11067),(21636),(965),(10761),(10324),(164),(977),(45),(10),(10944),(964),(20224),(20536),(102880),(33); +INSERT INTO multi_if_check(col1) VALUES (11232),(10315),(78),(11217),(10373),(11514),(10343),(48),(22),(20197),(10367),(36),(11116),(195),(10274); +INSERT INTO multi_if_check(col1) VALUES (115),(11069),(30),(11266),(10891),(11235),(112529),(206),(10393),(10712),(10649),(11164),(10511),(10295); +INSERT INTO multi_if_check(col1) VALUES (11139),(10347),(146),(11079),(961),(11231),(10358),(10653),(11358),(165),(11115),(1095),(960),(10992),(20221); +INSERT INTO multi_if_check(col1) VALUES (5),(10716),(102),(974),(10691),(102444),(11391),(10897),(10306),(10298),(10896),(21609),(118),(11148),(11451); +INSERT INTO multi_if_check(col1) VALUES (10398),(221),(975),(80),(162),(28051),(10838),(10765),(1058),(11464),(74),(21134),(21422),(10313),(28401),(20539); +INSERT INTO multi_if_check(col1) VALUES (10418),(235),(25),(179),(26030),(28381),(11091),(27398),(11108),(968),(10300),(11469),(35393),(10733),(11283),(11202); + +SELECT DISTINCT col1, multiIf(col1 != 213, 'Москва', 'Мир') AS k FROM multi_if_check LIMIT 10; +DROP TABLE IF EXISTS multi_if_check; + +SELECT 'Constant result'; + +CREATE TABLE multi_if_check(value String) ENGINE=TinyLog; +INSERT INTO multi_if_check VALUES ('1'); + +SELECT multiIf(2 > 1, 'Value', 'ElseValue') as a, isConstant(a) FROM multi_if_check; +SELECT multiIf(2 > 1, 'Value', value) as a, isConstant(a) FROM multi_if_check; +SELECT multiIf(value == '1', 'ValueFirst', 2 > 1, 'ValueSecond', 'ElseValue') as a, isConstant(a) FROM multi_if_check; +SELECT multiIf(1 > 2, 'Value', 'ElseValue') as a, isConstant(a); + +DROP TABLE IF EXISTS multi_if_check; diff --git a/parser/testdata/00327_summing_composite_nested/ast.json b/parser/testdata/00327_summing_composite_nested/ast.json new file mode 100644 index 000000000..04ffa1fb4 --- /dev/null +++ b/parser/testdata/00327_summing_composite_nested/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001341973, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00327_summing_composite_nested/metadata.json b/parser/testdata/00327_summing_composite_nested/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00327_summing_composite_nested/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00327_summing_composite_nested/query.sql b/parser/testdata/00327_summing_composite_nested/query.sql new file mode 100644 index 000000000..701735a71 --- /dev/null +++ b/parser/testdata/00327_summing_composite_nested/query.sql @@ -0,0 +1,31 @@ +SET optimize_on_insert = 0; + +DROP TABLE IF EXISTS summing_composite_key; +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE summing_composite_key (d Date, k UInt64, FirstMap Nested(k1 UInt32, k2ID Int8, s Float64), SecondMap Nested(k1ID UInt64, k2Key String, k3Type Int32, s Int64)) ENGINE = SummingMergeTree(d, k, 1); + +INSERT INTO summing_composite_key VALUES ('2000-01-01', 1, [1,2], ['3','4'], [10,11], [0,1,2], [3,4,5], [-1,-2,-3], [1,10,100]), ('2000-01-01', 1, [2,1], ['4','3'], [20,22], [2,2,1], [5,5,0], [-3,-3,-33], [10,100,1000]), ('2000-01-01', 2, [1,2], ['3','4'], [10,11], [0,1,2], [3,4,5], [-1,-2,-3], [1,10,100]), ('2000-01-01', 2, [2,1,1], ['4','3','3'], [20,22,33], [2,2], [5,5], [-3,-3], [10,100]), ('2000-01-01', 2, [1,2], ['3','4'], [10,11], [0,1,2], [3,4,5], [-1,-2,-3], [1,10,100]); + +SELECT * FROM summing_composite_key ORDER BY d, k, FirstMap.k1, FirstMap.k2ID, FirstMap.s, SecondMap.k1ID, SecondMap.k2Key, SecondMap.k3Type, SecondMap.s; + +SELECT d, k, m.k1, m.k2ID, m.s FROM summing_composite_key ARRAY JOIN FirstMap AS m ORDER BY d, k, m.k1, m.k2ID, m.s, SecondMap.k1ID, SecondMap.k2Key, SecondMap.k3Type, SecondMap.s; +SELECT d, k, m.k1, m.k2ID, sum(m.s) FROM summing_composite_key ARRAY JOIN FirstMap AS m GROUP BY d, k, m.k1, m.k2ID ORDER BY d, k, m.k1, m.k2ID; +SELECT d, k, m.k1, m.k2ID, m.s FROM summing_composite_key FINAL ARRAY JOIN FirstMap AS m ORDER BY d, k, m.k1, m.k2ID, m.s; + +SELECT d, k, m.k1ID, m.k2Key, m.k3Type, m.s FROM summing_composite_key ARRAY JOIN SecondMap AS m ORDER BY d, k, m.k1ID, m.k2Key, m.k3Type, m.s; +SELECT d, k, m.k1ID, m.k2Key, m.k3Type, sum(m.s) FROM summing_composite_key ARRAY JOIN SecondMap AS m GROUP BY d, k, m.k1ID, m.k2Key, m.k3Type ORDER BY d, k, m.k1ID, m.k2Key, m.k3Type; +SELECT d, k, m.k1ID, m.k2Key, m.k3Type, m.s FROM summing_composite_key FINAL ARRAY JOIN SecondMap AS m ORDER BY d, k, m.k1ID, m.k2Key, m.k3Type, m.s; + +OPTIMIZE TABLE summing_composite_key PARTITION 200001 FINAL; + +SELECT * FROM summing_composite_key ORDER BY d, k, FirstMap.k1, FirstMap.k2ID, FirstMap.s, SecondMap.k1ID, SecondMap.k2Key, SecondMap.k3Type, SecondMap.s;; + +SELECT d, k, m.k1, m.k2ID, m.s FROM summing_composite_key ARRAY JOIN FirstMap AS m ORDER BY d, k, m.k1, m.k2ID, m.s; +SELECT d, k, m.k1, m.k2ID, sum(m.s) FROM summing_composite_key ARRAY JOIN FirstMap AS m GROUP BY d, k, m.k1, m.k2ID ORDER BY d, k, m.k1, m.k2ID; +SELECT d, k, m.k1, m.k2ID, m.s FROM summing_composite_key FINAL ARRAY JOIN FirstMap AS m ORDER BY d, k, m.k1, m.k2ID, m.s; + +SELECT d, k, m.k1ID, m.k2Key, m.k3Type, m.s FROM summing_composite_key ARRAY JOIN SecondMap AS m ORDER BY d, k, m.k1ID, m.k2Key, m.k3Type, m.s; +SELECT d, k, m.k1ID, m.k2Key, m.k3Type, sum(m.s) FROM summing_composite_key ARRAY JOIN SecondMap AS m GROUP BY d, k, m.k1ID, m.k2Key, m.k3Type ORDER BY d, k, m.k1ID, m.k2Key, m.k3Type; +SELECT d, k, m.k1ID, m.k2Key, m.k3Type, m.s FROM summing_composite_key FINAL ARRAY JOIN SecondMap AS m ORDER BY d, k, m.k1ID, m.k2Key, m.k3Type, m.s; + +DROP TABLE summing_composite_key; diff --git a/parser/testdata/00328_long_case_construction/ast.json b/parser/testdata/00328_long_case_construction/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00328_long_case_construction/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00328_long_case_construction/metadata.json b/parser/testdata/00328_long_case_construction/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00328_long_case_construction/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00328_long_case_construction/query.sql b/parser/testdata/00328_long_case_construction/query.sql new file mode 100644 index 000000000..4f7b49c2a --- /dev/null +++ b/parser/testdata/00328_long_case_construction/query.sql @@ -0,0 +1,1927 @@ +-- Tags: long, no-msan + +/* Trivial case */ + +SELECT CASE WHEN 1 THEN 2 WHEN 3 THEN 4 ELSE 5 END; +SELECT CASE WHEN 1 THEN 'A' WHEN 2 THEN 'BC' ELSE 'DEF' END; +SELECT CASE WHEN 1 THEN toFixedString('A', 16) WHEN 2 THEN toFixedString('BC', 16) ELSE toFixedString('DEF', 16) END; +SELECT CASE WHEN 1 THEN [1,2] WHEN 2 THEN [3,4] ELSE [5,6] END; +SELECT CASE WHEN 1 THEN ['A','B'] WHEN 2 THEN ['C','D'] ELSE ['E','F'] END; + +/* No CASE expression. Numeric clauses */ + +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toInt64(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toInt64(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toInt64(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toInt64(2) ELSE toInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toInt64(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toInt64(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toInt64(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt8(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toInt64(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toInt64(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toInt64(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toInt64(2) ELSE toInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toInt64(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toInt64(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toInt64(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt16(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toInt64(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toInt64(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toInt64(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toInt64(2) ELSE toInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toInt64(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toInt64(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toInt64(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt32(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt64(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt64(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt64(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt64(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt64(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt64(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt64(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt64(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt64(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt64(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt64(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt64(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt64(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt64(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt64(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt64(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt64(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt64(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt64(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt64(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt64(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt64(1) WHEN (number % 3) = 0 THEN toInt64(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt64(1) WHEN (number % 3) = 0 THEN toInt64(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt64(1) WHEN (number % 3) = 0 THEN toInt64(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt64(1) WHEN (number % 3) = 0 THEN toInt64(2) ELSE toInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt64(1) WHEN (number % 3) = 0 THEN toInt64(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt64(1) WHEN (number % 3) = 0 THEN toInt64(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt64(1) WHEN (number % 3) = 0 THEN toInt64(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt64(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt64(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt64(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt64(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt64(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt64(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt64(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt64(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt64(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt64(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt64(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt64(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt64(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt64(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt64(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt64(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt64(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt64(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt64(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt64(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toInt64(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toInt64(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toInt64(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toInt64(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toInt64(2) ELSE toInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toInt64(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toInt64(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toInt64(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toUInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toUInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toUInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toUInt64(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toUInt64(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toUInt64(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toUInt64(2) ELSE toUInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt8(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toInt64(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toInt64(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toInt64(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toInt64(2) ELSE toInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toInt64(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toInt64(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toInt64(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toUInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toUInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toUInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toUInt64(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toUInt64(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toUInt64(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toUInt64(2) ELSE toUInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt16(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toInt64(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toInt64(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toInt64(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toInt64(2) ELSE toInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toInt64(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toInt64(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toInt64(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toUInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toUInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toUInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toUInt64(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toUInt64(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toUInt64(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toUInt64(2) ELSE toUInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt32(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt64(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt64(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt64(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt64(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toUInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt64(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt64(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt64(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt64(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toUInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt64(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt64(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt64(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt64(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toUInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt64(1) WHEN (number % 3) = 0 THEN toUInt64(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt64(1) WHEN (number % 3) = 0 THEN toUInt64(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt64(1) WHEN (number % 3) = 0 THEN toUInt64(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toUInt64(1) WHEN (number % 3) = 0 THEN toUInt64(2) ELSE toUInt64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat32(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toInt8(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toInt16(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toInt32(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toUInt8(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toUInt16(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toUInt32(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toFloat32(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toUInt8(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toUInt16(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toUInt32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toFloat32(3) END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN toFloat64(1) WHEN (number % 3) = 0 THEN toFloat64(2) ELSE toFloat64(3) END FROM system.numbers LIMIT 10; + +/* No CASE expression. Numeric array clauses. */ + +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toInt64(3), toInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toInt64(3), toInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toInt64(3), toInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toInt64(2), toInt64(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toInt64(2), toInt64(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toInt64(2), toInt64(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toInt64(2), toInt64(3)] ELSE [toInt64(3), toInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toInt64(2), toInt64(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toInt64(2), toInt64(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toInt64(2), toInt64(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toInt64(3), toInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toInt64(3), toInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toInt64(3), toInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt8(1), toInt8(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toInt64(3), toInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toInt64(3), toInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toInt64(3), toInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toInt64(2), toInt64(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toInt64(2), toInt64(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toInt64(2), toInt64(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toInt64(2), toInt64(3)] ELSE [toInt64(3), toInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toInt64(2), toInt64(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toInt64(2), toInt64(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toInt64(2), toInt64(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toInt64(3), toInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toInt64(3), toInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toInt64(3), toInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt16(1), toInt16(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toInt64(3), toInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toInt64(3), toInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toInt64(3), toInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toInt64(2), toInt64(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toInt64(2), toInt64(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toInt64(2), toInt64(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toInt64(2), toInt64(3)] ELSE [toInt64(3), toInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toInt64(2), toInt64(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toInt64(2), toInt64(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toInt64(2), toInt64(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toInt64(3), toInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toInt64(3), toInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toInt64(3), toInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt32(1), toInt32(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt64(1), toInt64(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt64(1), toInt64(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt64(1), toInt64(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt64(1), toInt64(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toInt64(3), toInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt64(1), toInt64(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt64(1), toInt64(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt64(1), toInt64(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt64(1), toInt64(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt64(1), toInt64(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt64(1), toInt64(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt64(1), toInt64(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toInt64(3), toInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt64(1), toInt64(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt64(1), toInt64(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt64(1), toInt64(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt64(1), toInt64(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt64(1), toInt64(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt64(1), toInt64(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt64(1), toInt64(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toInt64(3), toInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt64(1), toInt64(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt64(1), toInt64(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt64(1), toInt64(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt64(1), toInt64(2)] WHEN (number % 3) = 0 THEN [toInt64(2), toInt64(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt64(1), toInt64(2)] WHEN (number % 3) = 0 THEN [toInt64(2), toInt64(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt64(1), toInt64(2)] WHEN (number % 3) = 0 THEN [toInt64(2), toInt64(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt64(1), toInt64(2)] WHEN (number % 3) = 0 THEN [toInt64(2), toInt64(3)] ELSE [toInt64(3), toInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt64(1), toInt64(2)] WHEN (number % 3) = 0 THEN [toInt64(2), toInt64(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt64(1), toInt64(2)] WHEN (number % 3) = 0 THEN [toInt64(2), toInt64(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt64(1), toInt64(2)] WHEN (number % 3) = 0 THEN [toInt64(2), toInt64(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt64(1), toInt64(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt64(1), toInt64(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt64(1), toInt64(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt64(1), toInt64(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toInt64(3), toInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt64(1), toInt64(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt64(1), toInt64(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt64(1), toInt64(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt64(1), toInt64(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt64(1), toInt64(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt64(1), toInt64(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt64(1), toInt64(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toInt64(3), toInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt64(1), toInt64(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt64(1), toInt64(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt64(1), toInt64(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt64(1), toInt64(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt64(1), toInt64(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt64(1), toInt64(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt64(1), toInt64(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toInt64(3), toInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt64(1), toInt64(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt64(1), toInt64(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toInt64(1), toInt64(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toInt64(3), toInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toInt64(3), toInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toInt64(3), toInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toInt64(2), toInt64(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toInt64(2), toInt64(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toInt64(2), toInt64(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toInt64(2), toInt64(3)] ELSE [toInt64(3), toInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toInt64(2), toInt64(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toInt64(2), toInt64(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toInt64(2), toInt64(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toInt64(3), toInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toUInt64(3), toUInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toInt64(3), toInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toUInt64(3), toUInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toInt64(3), toInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toUInt64(3), toUInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toUInt64(2), toUInt64(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toUInt64(2), toUInt64(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toUInt64(2), toUInt64(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toUInt64(2), toUInt64(3)] ELSE [toUInt64(3), toUInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt8(1), toUInt8(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toInt64(3), toInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toInt64(3), toInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toInt64(3), toInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toInt64(2), toInt64(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toInt64(2), toInt64(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toInt64(2), toInt64(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toInt64(2), toInt64(3)] ELSE [toInt64(3), toInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toInt64(2), toInt64(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toInt64(2), toInt64(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toInt64(2), toInt64(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toInt64(3), toInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toUInt64(3), toUInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toInt64(3), toInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toUInt64(3), toUInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toInt64(3), toInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toUInt64(3), toUInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toUInt64(2), toUInt64(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toUInt64(2), toUInt64(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toUInt64(2), toUInt64(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toUInt64(2), toUInt64(3)] ELSE [toUInt64(3), toUInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt16(1), toUInt16(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toInt64(3), toInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toInt64(3), toInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toInt64(3), toInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toInt64(2), toInt64(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toInt64(2), toInt64(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toInt64(2), toInt64(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toInt64(2), toInt64(3)] ELSE [toInt64(3), toInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toInt64(2), toInt64(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toInt64(2), toInt64(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toInt64(2), toInt64(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toInt64(3), toInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toUInt64(3), toUInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toInt64(3), toInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toUInt64(3), toUInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toInt64(3), toInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toUInt64(3), toUInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toUInt64(2), toUInt64(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toUInt64(2), toUInt64(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toUInt64(2), toUInt64(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toUInt64(2), toUInt64(3)] ELSE [toUInt64(3), toUInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt32(1), toUInt32(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt64(1), toUInt64(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt64(1), toUInt64(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt64(1), toUInt64(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt64(1), toUInt64(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toUInt64(3), toUInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt64(1), toUInt64(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt64(1), toUInt64(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt64(1), toUInt64(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt64(1), toUInt64(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toUInt64(3), toUInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt64(1), toUInt64(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt64(1), toUInt64(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt64(1), toUInt64(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt64(1), toUInt64(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toUInt64(3), toUInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt64(1), toUInt64(2)] WHEN (number % 3) = 0 THEN [toUInt64(2), toUInt64(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt64(1), toUInt64(2)] WHEN (number % 3) = 0 THEN [toUInt64(2), toUInt64(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt64(1), toUInt64(2)] WHEN (number % 3) = 0 THEN [toUInt64(2), toUInt64(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toUInt64(1), toUInt64(2)] WHEN (number % 3) = 0 THEN [toUInt64(2), toUInt64(3)] ELSE [toUInt64(3), toUInt64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat32(1), toFloat32(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toInt8(2), toInt8(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toInt16(2), toInt16(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toInt32(2), toInt32(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toUInt8(2), toUInt8(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toUInt16(2), toUInt16(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toUInt32(2), toUInt32(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toFloat32(2), toFloat32(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toInt8(3), toInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toInt16(3), toInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toInt32(3), toInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toUInt8(3), toUInt8(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toUInt16(3), toUInt16(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toUInt32(3), toUInt32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toFloat32(3), toFloat32(3)] END FROM system.numbers LIMIT 10; +SELECT CASE WHEN (number % 2) = 0 THEN [toFloat64(1), toFloat64(2)] WHEN (number % 3) = 0 THEN [toFloat64(2), toFloat64(3)] ELSE [toFloat64(3), toFloat64(3)] END FROM system.numbers LIMIT 10; + +/* No CASE expression. String clauses. */ + +DROP TABLE IF EXISTS multi_if_check; +CREATE TABLE multi_if_check(col1 UInt64, col2 String, col3 String, col4 String) ENGINE=TinyLog; +INSERT INTO multi_if_check(col1, col2, col3, col4) VALUES(1, 'A', 'AB', 'ABC'); +INSERT INTO multi_if_check(col1, col2, col3, col4) VALUES(2, 'B', 'BC', 'BCD'); +INSERT INTO multi_if_check(col1, col2, col3, col4) VALUES(3, 'C', 'CD', 'CDE'); +INSERT INTO multi_if_check(col1, col2, col3, col4) VALUES(4, 'D', 'DE', 'DEF'); +INSERT INTO multi_if_check(col1, col2, col3, col4) VALUES(5, 'E', 'EF', 'EFG'); +INSERT INTO multi_if_check(col1, col2, col3, col4) VALUES(6, 'F', 'FG', 'FGH'); +INSERT INTO multi_if_check(col1, col2, col3, col4) VALUES(7, 'G', 'GH', 'GHI'); +INSERT INTO multi_if_check(col1, col2, col3, col4) VALUES(8, 'H', 'HI', 'HIJ'); +INSERT INTO multi_if_check(col1, col2, col3, col4) VALUES(9, 'I', 'IJ', 'IJK'); +INSERT INTO multi_if_check(col1, col2, col3, col4) VALUES(10, 'J', 'JK', 'JKL'); + +SELECT CASE WHEN (col1 % 2) = 0 THEN col2 WHEN (col1 % 3) = 0 THEN col3 ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN col2 WHEN (col1 % 3) = 0 THEN col3 ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN col2 WHEN (col1 % 3) = 0 THEN col3 ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN col2 WHEN (col1 % 3) = 0 THEN col3 ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN col2 WHEN (col1 % 3) = 0 THEN toFixedString(col3, 16) ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN col2 WHEN (col1 % 3) = 0 THEN toFixedString(col3, 16) ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN col2 WHEN (col1 % 3) = 0 THEN toFixedString(col3, 16) ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN col2 WHEN (col1 % 3) = 0 THEN toFixedString(col3, 16) ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN col2 WHEN (col1 % 3) = 0 THEN toFixedString('bar', 16) ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN col2 WHEN (col1 % 3) = 0 THEN toFixedString('bar', 16) ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN col2 WHEN (col1 % 3) = 0 THEN toFixedString('bar', 16) ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN col2 WHEN (col1 % 3) = 0 THEN toFixedString('bar', 16) ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN col2 WHEN (col1 % 3) = 0 THEN 'bar' ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN col2 WHEN (col1 % 3) = 0 THEN 'bar' ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN col2 WHEN (col1 % 3) = 0 THEN 'bar' ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN col2 WHEN (col1 % 3) = 0 THEN 'bar' ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN col2 WHEN 1 THEN col3 ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN col2 WHEN 1 THEN col3 ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN col2 WHEN 1 THEN col3 ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN col2 WHEN 1 THEN col3 ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN col2 WHEN 1 THEN toFixedString(col3, 16) ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN col2 WHEN 1 THEN toFixedString(col3, 16) ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN col2 WHEN 1 THEN toFixedString(col3, 16) ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN col2 WHEN 1 THEN toFixedString(col3, 16) ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN col2 WHEN 1 THEN toFixedString('bar', 16) ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN col2 WHEN 1 THEN toFixedString('bar', 16) ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN col2 WHEN 1 THEN toFixedString('bar', 16) ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN col2 WHEN 1 THEN toFixedString('bar', 16) ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN col2 WHEN 1 THEN 'bar' ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN col2 WHEN 1 THEN 'bar' ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN col2 WHEN 1 THEN 'bar' ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN col2 WHEN 1 THEN 'bar' ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString(col2, 16) WHEN (col1 % 3) = 0 THEN col3 ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString(col2, 16) WHEN (col1 % 3) = 0 THEN col3 ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString(col2, 16) WHEN (col1 % 3) = 0 THEN col3 ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString(col2, 16) WHEN (col1 % 3) = 0 THEN col3 ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString(col2, 16) WHEN (col1 % 3) = 0 THEN toFixedString(col3, 16) ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString(col2, 16) WHEN (col1 % 3) = 0 THEN toFixedString(col3, 16) ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString(col2, 16) WHEN (col1 % 3) = 0 THEN toFixedString(col3, 16) ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString(col2, 16) WHEN (col1 % 3) = 0 THEN toFixedString(col3, 16) ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString(col2, 16) WHEN (col1 % 3) = 0 THEN toFixedString('bar', 16) ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString(col2, 16) WHEN (col1 % 3) = 0 THEN toFixedString('bar', 16) ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString(col2, 16) WHEN (col1 % 3) = 0 THEN toFixedString('bar', 16) ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString(col2, 16) WHEN (col1 % 3) = 0 THEN toFixedString('bar', 16) ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString(col2, 16) WHEN (col1 % 3) = 0 THEN 'bar' ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString(col2, 16) WHEN (col1 % 3) = 0 THEN 'bar' ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString(col2, 16) WHEN (col1 % 3) = 0 THEN 'bar' ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString(col2, 16) WHEN (col1 % 3) = 0 THEN 'bar' ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString(col2, 16) WHEN 1 THEN col3 ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString(col2, 16) WHEN 1 THEN col3 ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString(col2, 16) WHEN 1 THEN col3 ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString(col2, 16) WHEN 1 THEN col3 ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString(col2, 16) WHEN 1 THEN toFixedString(col3, 16) ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString(col2, 16) WHEN 1 THEN toFixedString(col3, 16) ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString(col2, 16) WHEN 1 THEN toFixedString(col3, 16) ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString(col2, 16) WHEN 1 THEN toFixedString(col3, 16) ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString(col2, 16) WHEN 1 THEN toFixedString('bar', 16) ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString(col2, 16) WHEN 1 THEN toFixedString('bar', 16) ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString(col2, 16) WHEN 1 THEN toFixedString('bar', 16) ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString(col2, 16) WHEN 1 THEN toFixedString('bar', 16) ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString(col2, 16) WHEN 1 THEN 'bar' ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString(col2, 16) WHEN 1 THEN 'bar' ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString(col2, 16) WHEN 1 THEN 'bar' ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString(col2, 16) WHEN 1 THEN 'bar' ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString('foo', 16) WHEN (col1 % 3) = 0 THEN col3 ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString('foo', 16) WHEN (col1 % 3) = 0 THEN col3 ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString('foo', 16) WHEN (col1 % 3) = 0 THEN col3 ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString('foo', 16) WHEN (col1 % 3) = 0 THEN col3 ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString('foo', 16) WHEN (col1 % 3) = 0 THEN toFixedString(col3, 16) ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString('foo', 16) WHEN (col1 % 3) = 0 THEN toFixedString(col3, 16) ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString('foo', 16) WHEN (col1 % 3) = 0 THEN toFixedString(col3, 16) ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString('foo', 16) WHEN (col1 % 3) = 0 THEN toFixedString(col3, 16) ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString('foo', 16) WHEN (col1 % 3) = 0 THEN toFixedString('bar', 16) ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString('foo', 16) WHEN (col1 % 3) = 0 THEN toFixedString('bar', 16) ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString('foo', 16) WHEN (col1 % 3) = 0 THEN toFixedString('bar', 16) ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString('foo', 16) WHEN (col1 % 3) = 0 THEN toFixedString('bar', 16) ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString('foo', 16) WHEN (col1 % 3) = 0 THEN 'bar' ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString('foo', 16) WHEN (col1 % 3) = 0 THEN 'bar' ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString('foo', 16) WHEN (col1 % 3) = 0 THEN 'bar' ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString('foo', 16) WHEN (col1 % 3) = 0 THEN 'bar' ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString('foo', 16) WHEN 1 THEN col3 ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString('foo', 16) WHEN 1 THEN col3 ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString('foo', 16) WHEN 1 THEN col3 ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString('foo', 16) WHEN 1 THEN col3 ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString('foo', 16) WHEN 1 THEN toFixedString(col3, 16) ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString('foo', 16) WHEN 1 THEN toFixedString(col3, 16) ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString('foo', 16) WHEN 1 THEN toFixedString(col3, 16) ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString('foo', 16) WHEN 1 THEN toFixedString(col3, 16) ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString('foo', 16) WHEN 1 THEN toFixedString('bar', 16) ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString('foo', 16) WHEN 1 THEN toFixedString('bar', 16) ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString('foo', 16) WHEN 1 THEN toFixedString('bar', 16) ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString('foo', 16) WHEN 1 THEN toFixedString('bar', 16) ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString('foo', 16) WHEN 1 THEN 'bar' ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString('foo', 16) WHEN 1 THEN 'bar' ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString('foo', 16) WHEN 1 THEN 'bar' ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN toFixedString('foo', 16) WHEN 1 THEN 'bar' ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN 'foo' WHEN (col1 % 3) = 0 THEN col3 ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN 'foo' WHEN (col1 % 3) = 0 THEN col3 ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN 'foo' WHEN (col1 % 3) = 0 THEN col3 ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN 'foo' WHEN (col1 % 3) = 0 THEN col3 ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN 'foo' WHEN (col1 % 3) = 0 THEN toFixedString(col3, 16) ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN 'foo' WHEN (col1 % 3) = 0 THEN toFixedString(col3, 16) ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN 'foo' WHEN (col1 % 3) = 0 THEN toFixedString(col3, 16) ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN 'foo' WHEN (col1 % 3) = 0 THEN toFixedString(col3, 16) ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN 'foo' WHEN (col1 % 3) = 0 THEN toFixedString('bar', 16) ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN 'foo' WHEN (col1 % 3) = 0 THEN toFixedString('bar', 16) ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN 'foo' WHEN (col1 % 3) = 0 THEN toFixedString('bar', 16) ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN 'foo' WHEN (col1 % 3) = 0 THEN toFixedString('bar', 16) ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN 'foo' WHEN (col1 % 3) = 0 THEN 'bar' ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN 'foo' WHEN (col1 % 3) = 0 THEN 'bar' ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN 'foo' WHEN (col1 % 3) = 0 THEN 'bar' ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN 'foo' WHEN (col1 % 3) = 0 THEN 'bar' ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN 'foo' WHEN 1 THEN col3 ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN 'foo' WHEN 1 THEN col3 ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN 'foo' WHEN 1 THEN col3 ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN 'foo' WHEN 1 THEN col3 ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN 'foo' WHEN 1 THEN toFixedString(col3, 16) ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN 'foo' WHEN 1 THEN toFixedString(col3, 16) ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN 'foo' WHEN 1 THEN toFixedString(col3, 16) ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN 'foo' WHEN 1 THEN toFixedString(col3, 16) ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN 'foo' WHEN 1 THEN toFixedString('bar', 16) ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN 'foo' WHEN 1 THEN toFixedString('bar', 16) ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN 'foo' WHEN 1 THEN toFixedString('bar', 16) ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN 'foo' WHEN 1 THEN toFixedString('bar', 16) ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN 'foo' WHEN 1 THEN 'bar' ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN 'foo' WHEN 1 THEN 'bar' ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN 'foo' WHEN 1 THEN 'bar' ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN 'foo' WHEN 1 THEN 'bar' ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN 1 THEN col2 WHEN (col1 % 3) = 0 THEN col3 ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN 1 THEN col2 WHEN (col1 % 3) = 0 THEN col3 ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN col2 WHEN (col1 % 3) = 0 THEN col3 ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN col2 WHEN (col1 % 3) = 0 THEN col3 ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN 1 THEN col2 WHEN (col1 % 3) = 0 THEN toFixedString(col3, 16) ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN 1 THEN col2 WHEN (col1 % 3) = 0 THEN toFixedString(col3, 16) ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN col2 WHEN (col1 % 3) = 0 THEN toFixedString(col3, 16) ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN col2 WHEN (col1 % 3) = 0 THEN toFixedString(col3, 16) ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN 1 THEN col2 WHEN (col1 % 3) = 0 THEN toFixedString('bar', 16) ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN 1 THEN col2 WHEN (col1 % 3) = 0 THEN toFixedString('bar', 16) ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN col2 WHEN (col1 % 3) = 0 THEN toFixedString('bar', 16) ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN col2 WHEN (col1 % 3) = 0 THEN toFixedString('bar', 16) ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN 1 THEN col2 WHEN (col1 % 3) = 0 THEN 'bar' ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN 1 THEN col2 WHEN (col1 % 3) = 0 THEN 'bar' ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN col2 WHEN (col1 % 3) = 0 THEN 'bar' ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN col2 WHEN (col1 % 3) = 0 THEN 'bar' ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN 1 THEN col2 WHEN 1 THEN col3 ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN 1 THEN col2 WHEN 1 THEN col3 ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN col2 WHEN 1 THEN col3 ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN col2 WHEN 1 THEN col3 ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN 1 THEN col2 WHEN 1 THEN toFixedString(col3, 16) ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN 1 THEN col2 WHEN 1 THEN toFixedString(col3, 16) ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN col2 WHEN 1 THEN toFixedString(col3, 16) ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN col2 WHEN 1 THEN toFixedString(col3, 16) ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN 1 THEN col2 WHEN 1 THEN toFixedString('bar', 16) ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN 1 THEN col2 WHEN 1 THEN toFixedString('bar', 16) ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN col2 WHEN 1 THEN toFixedString('bar', 16) ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN col2 WHEN 1 THEN toFixedString('bar', 16) ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN 1 THEN col2 WHEN 1 THEN 'bar' ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN 1 THEN col2 WHEN 1 THEN 'bar' ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN col2 WHEN 1 THEN 'bar' ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN col2 WHEN 1 THEN 'bar' ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString(col2, 16) WHEN (col1 % 3) = 0 THEN col3 ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString(col2, 16) WHEN (col1 % 3) = 0 THEN col3 ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString(col2, 16) WHEN (col1 % 3) = 0 THEN col3 ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString(col2, 16) WHEN (col1 % 3) = 0 THEN col3 ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString(col2, 16) WHEN (col1 % 3) = 0 THEN toFixedString(col3, 16) ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString(col2, 16) WHEN (col1 % 3) = 0 THEN toFixedString(col3, 16) ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString(col2, 16) WHEN (col1 % 3) = 0 THEN toFixedString(col3, 16) ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString(col2, 16) WHEN (col1 % 3) = 0 THEN toFixedString(col3, 16) ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString(col2, 16) WHEN (col1 % 3) = 0 THEN toFixedString('bar', 16) ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString(col2, 16) WHEN (col1 % 3) = 0 THEN toFixedString('bar', 16) ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString(col2, 16) WHEN (col1 % 3) = 0 THEN toFixedString('bar', 16) ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString(col2, 16) WHEN (col1 % 3) = 0 THEN toFixedString('bar', 16) ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString(col2, 16) WHEN (col1 % 3) = 0 THEN 'bar' ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString(col2, 16) WHEN (col1 % 3) = 0 THEN 'bar' ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString(col2, 16) WHEN (col1 % 3) = 0 THEN 'bar' ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString(col2, 16) WHEN (col1 % 3) = 0 THEN 'bar' ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString(col2, 16) WHEN 1 THEN col3 ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString(col2, 16) WHEN 1 THEN col3 ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString(col2, 16) WHEN 1 THEN col3 ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString(col2, 16) WHEN 1 THEN col3 ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString(col2, 16) WHEN 1 THEN toFixedString(col3, 16) ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString(col2, 16) WHEN 1 THEN toFixedString(col3, 16) ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString(col2, 16) WHEN 1 THEN toFixedString(col3, 16) ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString(col2, 16) WHEN 1 THEN toFixedString(col3, 16) ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString(col2, 16) WHEN 1 THEN toFixedString('bar', 16) ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString(col2, 16) WHEN 1 THEN toFixedString('bar', 16) ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString(col2, 16) WHEN 1 THEN toFixedString('bar', 16) ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString(col2, 16) WHEN 1 THEN toFixedString('bar', 16) ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString(col2, 16) WHEN 1 THEN 'bar' ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString(col2, 16) WHEN 1 THEN 'bar' ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString(col2, 16) WHEN 1 THEN 'bar' ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString(col2, 16) WHEN 1 THEN 'bar' ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString('foo', 16) WHEN (col1 % 3) = 0 THEN col3 ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString('foo', 16) WHEN (col1 % 3) = 0 THEN col3 ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString('foo', 16) WHEN (col1 % 3) = 0 THEN col3 ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString('foo', 16) WHEN (col1 % 3) = 0 THEN col3 ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString('foo', 16) WHEN (col1 % 3) = 0 THEN toFixedString(col3, 16) ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString('foo', 16) WHEN (col1 % 3) = 0 THEN toFixedString(col3, 16) ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString('foo', 16) WHEN (col1 % 3) = 0 THEN toFixedString(col3, 16) ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString('foo', 16) WHEN (col1 % 3) = 0 THEN toFixedString(col3, 16) ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString('foo', 16) WHEN (col1 % 3) = 0 THEN toFixedString('bar', 16) ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString('foo', 16) WHEN (col1 % 3) = 0 THEN toFixedString('bar', 16) ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString('foo', 16) WHEN (col1 % 3) = 0 THEN toFixedString('bar', 16) ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString('foo', 16) WHEN (col1 % 3) = 0 THEN toFixedString('bar', 16) ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString('foo', 16) WHEN (col1 % 3) = 0 THEN 'bar' ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString('foo', 16) WHEN (col1 % 3) = 0 THEN 'bar' ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString('foo', 16) WHEN (col1 % 3) = 0 THEN 'bar' ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString('foo', 16) WHEN (col1 % 3) = 0 THEN 'bar' ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString('foo', 16) WHEN 1 THEN col3 ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString('foo', 16) WHEN 1 THEN col3 ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString('foo', 16) WHEN 1 THEN col3 ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString('foo', 16) WHEN 1 THEN col3 ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString('foo', 16) WHEN 1 THEN toFixedString(col3, 16) ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString('foo', 16) WHEN 1 THEN toFixedString(col3, 16) ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString('foo', 16) WHEN 1 THEN toFixedString(col3, 16) ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString('foo', 16) WHEN 1 THEN toFixedString(col3, 16) ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString('foo', 16) WHEN 1 THEN toFixedString('bar', 16) ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString('foo', 16) WHEN 1 THEN toFixedString('bar', 16) ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString('foo', 16) WHEN 1 THEN toFixedString('bar', 16) ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString('foo', 16) WHEN 1 THEN toFixedString('bar', 16) ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString('foo', 16) WHEN 1 THEN 'bar' ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString('foo', 16) WHEN 1 THEN 'bar' ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString('foo', 16) WHEN 1 THEN 'bar' ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN toFixedString('foo', 16) WHEN 1 THEN 'bar' ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN 1 THEN 'foo' WHEN (col1 % 3) = 0 THEN col3 ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN 1 THEN 'foo' WHEN (col1 % 3) = 0 THEN col3 ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN 'foo' WHEN (col1 % 3) = 0 THEN col3 ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN 'foo' WHEN (col1 % 3) = 0 THEN col3 ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN 1 THEN 'foo' WHEN (col1 % 3) = 0 THEN toFixedString(col3, 16) ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN 1 THEN 'foo' WHEN (col1 % 3) = 0 THEN toFixedString(col3, 16) ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN 'foo' WHEN (col1 % 3) = 0 THEN toFixedString(col3, 16) ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN 'foo' WHEN (col1 % 3) = 0 THEN toFixedString(col3, 16) ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN 1 THEN 'foo' WHEN (col1 % 3) = 0 THEN toFixedString('bar', 16) ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN 1 THEN 'foo' WHEN (col1 % 3) = 0 THEN toFixedString('bar', 16) ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN 'foo' WHEN (col1 % 3) = 0 THEN toFixedString('bar', 16) ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN 'foo' WHEN (col1 % 3) = 0 THEN toFixedString('bar', 16) ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN 1 THEN 'foo' WHEN (col1 % 3) = 0 THEN 'bar' ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN 1 THEN 'foo' WHEN (col1 % 3) = 0 THEN 'bar' ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN 'foo' WHEN (col1 % 3) = 0 THEN 'bar' ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN 'foo' WHEN (col1 % 3) = 0 THEN 'bar' ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN 1 THEN 'foo' WHEN 1 THEN col3 ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN 1 THEN 'foo' WHEN 1 THEN col3 ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN 'foo' WHEN 1 THEN col3 ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN 'foo' WHEN 1 THEN col3 ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN 1 THEN 'foo' WHEN 1 THEN toFixedString(col3, 16) ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN 1 THEN 'foo' WHEN 1 THEN toFixedString(col3, 16) ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN 'foo' WHEN 1 THEN toFixedString(col3, 16) ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN 'foo' WHEN 1 THEN toFixedString(col3, 16) ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN 1 THEN 'foo' WHEN 1 THEN toFixedString('bar', 16) ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN 1 THEN 'foo' WHEN 1 THEN toFixedString('bar', 16) ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN 'foo' WHEN 1 THEN toFixedString('bar', 16) ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN 'foo' WHEN 1 THEN toFixedString('bar', 16) ELSE 'baz' END FROM multi_if_check; +SELECT CASE WHEN 1 THEN 'foo' WHEN 1 THEN 'bar' ELSE col4 END FROM multi_if_check; +SELECT CASE WHEN 1 THEN 'foo' WHEN 1 THEN 'bar' ELSE toFixedString(col4, 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN 'foo' WHEN 1 THEN 'bar' ELSE toFixedString('baz', 16) END FROM multi_if_check; +SELECT CASE WHEN 1 THEN 'foo' WHEN 1 THEN 'bar' ELSE 'baz' END FROM multi_if_check; + +DROP TABLE IF EXISTS multi_if_check; + +/* No CASE expression. String array clauses. */ + +CREATE TABLE multi_if_check(col1 UInt64, col2 String, col3 String, col4 String, col5 String, col6 String, col7 String) ENGINE=TinyLog; +INSERT INTO multi_if_check(col1, col2, col3, col4, col5, col6, col7) VALUES(1, 'A', 'AB', 'ABC', 'ABCD', 'ABCDE', 'ABCDEF'); +INSERT INTO multi_if_check(col1, col2, col3, col4, col5, col6, col7) VALUES(2, 'B', 'BC', 'BCD', 'BCDE', 'BCDEF', 'BCDEFG'); +INSERT INTO multi_if_check(col1, col2, col3, col4, col5, col6, col7) VALUES(3, 'C', 'CD', 'CDE', 'CDEF', 'CDEFG', 'CDEFGH'); +INSERT INTO multi_if_check(col1, col2, col3, col4, col5, col6, col7) VALUES(4, 'D', 'DE', 'DEF', 'DEFG', 'DEFGH', 'DEFGHI'); +INSERT INTO multi_if_check(col1, col2, col3, col4, col5, col6, col7) VALUES(5, 'E', 'EF', 'EFG', 'EFGH', 'EFGHI', 'EFGHIJ'); +INSERT INTO multi_if_check(col1, col2, col3, col4, col5, col6, col7) VALUES(6, 'F', 'FG', 'FGH', 'FGHI', 'FGHIJ', 'FGHIJK'); +INSERT INTO multi_if_check(col1, col2, col3, col4, col5, col6, col7) VALUES(7, 'G', 'GH', 'GHI', 'GHIJ', 'GHIJK', 'GHIJKL'); +INSERT INTO multi_if_check(col1, col2, col3, col4, col5, col6, col7) VALUES(8, 'H', 'HI', 'HIJ', 'HIJK', 'HIJKL', 'HIJKLM'); +INSERT INTO multi_if_check(col1, col2, col3, col4, col5, col6, col7) VALUES(9, 'I', 'IJ', 'IJK', 'IJKL', 'IJKLM', 'IJKLMN'); +INSERT INTO multi_if_check(col1, col2, col3, col4, col5, col6, col7) VALUES(10, 'J', 'JK', 'JKL', 'JKLM', 'JKLMN', 'JKLMNO'); + +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, col3] WHEN (col1 % 3) = 0 THEN [col4, col5] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, col3] WHEN (col1 % 3) = 0 THEN [col4, col5] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, col3] WHEN (col1 % 3) = 0 THEN [col4, col5] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, col3] WHEN (col1 % 3) = 0 THEN [col4, col5] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, col3] WHEN (col1 % 3) = 0 THEN [col4, 'bar'] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, col3] WHEN (col1 % 3) = 0 THEN [col4, 'bar'] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, col3] WHEN (col1 % 3) = 0 THEN [col4, 'bar'] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, col3] WHEN (col1 % 3) = 0 THEN [col4, 'bar'] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, col3] WHEN (col1 % 3) = 0 THEN ['foo', col5] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, col3] WHEN (col1 % 3) = 0 THEN ['foo', col5] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, col3] WHEN (col1 % 3) = 0 THEN ['foo', col5] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, col3] WHEN (col1 % 3) = 0 THEN ['foo', col5] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, col3] WHEN (col1 % 3) = 0 THEN ['foo', 'bar'] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, col3] WHEN (col1 % 3) = 0 THEN ['foo', 'bar'] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, col3] WHEN (col1 % 3) = 0 THEN ['foo', 'bar'] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, col3] WHEN (col1 % 3) = 0 THEN ['foo', 'bar'] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, col3] WHEN 1 THEN [col4, col5] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, col3] WHEN 1 THEN [col4, col5] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, col3] WHEN 1 THEN [col4, col5] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, col3] WHEN 1 THEN [col4, col5] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, col3] WHEN 1 THEN [col4, 'bar'] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, col3] WHEN 1 THEN [col4, 'bar'] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, col3] WHEN 1 THEN [col4, 'bar'] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, col3] WHEN 1 THEN [col4, 'bar'] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, col3] WHEN 1 THEN ['foo', col5] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, col3] WHEN 1 THEN ['foo', col5] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, col3] WHEN 1 THEN ['foo', col5] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, col3] WHEN 1 THEN ['foo', col5] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, col3] WHEN 1 THEN ['foo', 'bar'] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, col3] WHEN 1 THEN ['foo', 'bar'] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, col3] WHEN 1 THEN ['foo', 'bar'] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, col3] WHEN 1 THEN ['foo', 'bar'] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, 'bar'] WHEN (col1 % 3) = 0 THEN [col4, col5] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, 'bar'] WHEN (col1 % 3) = 0 THEN [col4, col5] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, 'bar'] WHEN (col1 % 3) = 0 THEN [col4, col5] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, 'bar'] WHEN (col1 % 3) = 0 THEN [col4, col5] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, 'bar'] WHEN (col1 % 3) = 0 THEN [col4, 'bar'] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, 'bar'] WHEN (col1 % 3) = 0 THEN [col4, 'bar'] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, 'bar'] WHEN (col1 % 3) = 0 THEN [col4, 'bar'] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, 'bar'] WHEN (col1 % 3) = 0 THEN [col4, 'bar'] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, 'bar'] WHEN (col1 % 3) = 0 THEN ['foo', col5] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, 'bar'] WHEN (col1 % 3) = 0 THEN ['foo', col5] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, 'bar'] WHEN (col1 % 3) = 0 THEN ['foo', col5] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, 'bar'] WHEN (col1 % 3) = 0 THEN ['foo', col5] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, 'bar'] WHEN (col1 % 3) = 0 THEN ['foo', 'bar'] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, 'bar'] WHEN (col1 % 3) = 0 THEN ['foo', 'bar'] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, 'bar'] WHEN (col1 % 3) = 0 THEN ['foo', 'bar'] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, 'bar'] WHEN (col1 % 3) = 0 THEN ['foo', 'bar'] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, 'bar'] WHEN 1 THEN [col4, col5] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, 'bar'] WHEN 1 THEN [col4, col5] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, 'bar'] WHEN 1 THEN [col4, col5] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, 'bar'] WHEN 1 THEN [col4, col5] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, 'bar'] WHEN 1 THEN [col4, 'bar'] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, 'bar'] WHEN 1 THEN [col4, 'bar'] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, 'bar'] WHEN 1 THEN [col4, 'bar'] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, 'bar'] WHEN 1 THEN [col4, 'bar'] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, 'bar'] WHEN 1 THEN ['foo', col5] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, 'bar'] WHEN 1 THEN ['foo', col5] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, 'bar'] WHEN 1 THEN ['foo', col5] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, 'bar'] WHEN 1 THEN ['foo', col5] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, 'bar'] WHEN 1 THEN ['foo', 'bar'] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, 'bar'] WHEN 1 THEN ['foo', 'bar'] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, 'bar'] WHEN 1 THEN ['foo', 'bar'] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN [col2, 'bar'] WHEN 1 THEN ['foo', 'bar'] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', col3] WHEN (col1 % 3) = 0 THEN [col4, col5] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', col3] WHEN (col1 % 3) = 0 THEN [col4, col5] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', col3] WHEN (col1 % 3) = 0 THEN [col4, col5] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', col3] WHEN (col1 % 3) = 0 THEN [col4, col5] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', col3] WHEN (col1 % 3) = 0 THEN [col4, 'bar'] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', col3] WHEN (col1 % 3) = 0 THEN [col4, 'bar'] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', col3] WHEN (col1 % 3) = 0 THEN [col4, 'bar'] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', col3] WHEN (col1 % 3) = 0 THEN [col4, 'bar'] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', col3] WHEN (col1 % 3) = 0 THEN ['foo', col5] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', col3] WHEN (col1 % 3) = 0 THEN ['foo', col5] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', col3] WHEN (col1 % 3) = 0 THEN ['foo', col5] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', col3] WHEN (col1 % 3) = 0 THEN ['foo', col5] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', col3] WHEN (col1 % 3) = 0 THEN ['foo', 'bar'] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', col3] WHEN (col1 % 3) = 0 THEN ['foo', 'bar'] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', col3] WHEN (col1 % 3) = 0 THEN ['foo', 'bar'] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', col3] WHEN (col1 % 3) = 0 THEN ['foo', 'bar'] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', col3] WHEN 1 THEN [col4, col5] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', col3] WHEN 1 THEN [col4, col5] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', col3] WHEN 1 THEN [col4, col5] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', col3] WHEN 1 THEN [col4, col5] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', col3] WHEN 1 THEN [col4, 'bar'] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', col3] WHEN 1 THEN [col4, 'bar'] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', col3] WHEN 1 THEN [col4, 'bar'] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', col3] WHEN 1 THEN [col4, 'bar'] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', col3] WHEN 1 THEN ['foo', col5] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', col3] WHEN 1 THEN ['foo', col5] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', col3] WHEN 1 THEN ['foo', col5] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', col3] WHEN 1 THEN ['foo', col5] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', col3] WHEN 1 THEN ['foo', 'bar'] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', col3] WHEN 1 THEN ['foo', 'bar'] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', col3] WHEN 1 THEN ['foo', 'bar'] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', col3] WHEN 1 THEN ['foo', 'bar'] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', 'bar'] WHEN (col1 % 3) = 0 THEN [col4, col5] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', 'bar'] WHEN (col1 % 3) = 0 THEN [col4, col5] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', 'bar'] WHEN (col1 % 3) = 0 THEN [col4, col5] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', 'bar'] WHEN (col1 % 3) = 0 THEN [col4, col5] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', 'bar'] WHEN (col1 % 3) = 0 THEN [col4, 'bar'] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', 'bar'] WHEN (col1 % 3) = 0 THEN [col4, 'bar'] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', 'bar'] WHEN (col1 % 3) = 0 THEN [col4, 'bar'] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', 'bar'] WHEN (col1 % 3) = 0 THEN [col4, 'bar'] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', 'bar'] WHEN (col1 % 3) = 0 THEN ['foo', col5] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', 'bar'] WHEN (col1 % 3) = 0 THEN ['foo', col5] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', 'bar'] WHEN (col1 % 3) = 0 THEN ['foo', col5] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', 'bar'] WHEN (col1 % 3) = 0 THEN ['foo', col5] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', 'bar'] WHEN (col1 % 3) = 0 THEN ['foo', 'bar'] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', 'bar'] WHEN (col1 % 3) = 0 THEN ['foo', 'bar'] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', 'bar'] WHEN (col1 % 3) = 0 THEN ['foo', 'bar'] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', 'bar'] WHEN (col1 % 3) = 0 THEN ['foo', 'bar'] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', 'bar'] WHEN 1 THEN [col4, col5] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', 'bar'] WHEN 1 THEN [col4, col5] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', 'bar'] WHEN 1 THEN [col4, col5] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', 'bar'] WHEN 1 THEN [col4, col5] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', 'bar'] WHEN 1 THEN [col4, 'bar'] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', 'bar'] WHEN 1 THEN [col4, 'bar'] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', 'bar'] WHEN 1 THEN [col4, 'bar'] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', 'bar'] WHEN 1 THEN [col4, 'bar'] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', 'bar'] WHEN 1 THEN ['foo', col5] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', 'bar'] WHEN 1 THEN ['foo', col5] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', 'bar'] WHEN 1 THEN ['foo', col5] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', 'bar'] WHEN 1 THEN ['foo', col5] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', 'bar'] WHEN 1 THEN ['foo', 'bar'] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', 'bar'] WHEN 1 THEN ['foo', 'bar'] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', 'bar'] WHEN 1 THEN ['foo', 'bar'] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN (col1 % 2) = 0 THEN ['foo', 'bar'] WHEN 1 THEN ['foo', 'bar'] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, col3] WHEN (col1 % 3) = 0 THEN [col4, col5] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, col3] WHEN (col1 % 3) = 0 THEN [col4, col5] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, col3] WHEN (col1 % 3) = 0 THEN [col4, col5] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, col3] WHEN (col1 % 3) = 0 THEN [col4, col5] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, col3] WHEN (col1 % 3) = 0 THEN [col4, 'bar'] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, col3] WHEN (col1 % 3) = 0 THEN [col4, 'bar'] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, col3] WHEN (col1 % 3) = 0 THEN [col4, 'bar'] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, col3] WHEN (col1 % 3) = 0 THEN [col4, 'bar'] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, col3] WHEN (col1 % 3) = 0 THEN ['foo', col5] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, col3] WHEN (col1 % 3) = 0 THEN ['foo', col5] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, col3] WHEN (col1 % 3) = 0 THEN ['foo', col5] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, col3] WHEN (col1 % 3) = 0 THEN ['foo', col5] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, col3] WHEN (col1 % 3) = 0 THEN ['foo', 'bar'] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, col3] WHEN (col1 % 3) = 0 THEN ['foo', 'bar'] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, col3] WHEN (col1 % 3) = 0 THEN ['foo', 'bar'] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, col3] WHEN (col1 % 3) = 0 THEN ['foo', 'bar'] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, col3] WHEN 1 THEN [col4, col5] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, col3] WHEN 1 THEN [col4, col5] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, col3] WHEN 1 THEN [col4, col5] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, col3] WHEN 1 THEN [col4, col5] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, col3] WHEN 1 THEN [col4, 'bar'] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, col3] WHEN 1 THEN [col4, 'bar'] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, col3] WHEN 1 THEN [col4, 'bar'] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, col3] WHEN 1 THEN [col4, 'bar'] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, col3] WHEN 1 THEN ['foo', col5] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, col3] WHEN 1 THEN ['foo', col5] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, col3] WHEN 1 THEN ['foo', col5] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, col3] WHEN 1 THEN ['foo', col5] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, col3] WHEN 1 THEN ['foo', 'bar'] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, col3] WHEN 1 THEN ['foo', 'bar'] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, col3] WHEN 1 THEN ['foo', 'bar'] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, col3] WHEN 1 THEN ['foo', 'bar'] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, 'bar'] WHEN (col1 % 3) = 0 THEN [col4, col5] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, 'bar'] WHEN (col1 % 3) = 0 THEN [col4, col5] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, 'bar'] WHEN (col1 % 3) = 0 THEN [col4, col5] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, 'bar'] WHEN (col1 % 3) = 0 THEN [col4, col5] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, 'bar'] WHEN (col1 % 3) = 0 THEN [col4, 'bar'] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, 'bar'] WHEN (col1 % 3) = 0 THEN [col4, 'bar'] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, 'bar'] WHEN (col1 % 3) = 0 THEN [col4, 'bar'] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, 'bar'] WHEN (col1 % 3) = 0 THEN [col4, 'bar'] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, 'bar'] WHEN (col1 % 3) = 0 THEN ['foo', col5] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, 'bar'] WHEN (col1 % 3) = 0 THEN ['foo', col5] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, 'bar'] WHEN (col1 % 3) = 0 THEN ['foo', col5] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, 'bar'] WHEN (col1 % 3) = 0 THEN ['foo', col5] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, 'bar'] WHEN (col1 % 3) = 0 THEN ['foo', 'bar'] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, 'bar'] WHEN (col1 % 3) = 0 THEN ['foo', 'bar'] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, 'bar'] WHEN (col1 % 3) = 0 THEN ['foo', 'bar'] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, 'bar'] WHEN (col1 % 3) = 0 THEN ['foo', 'bar'] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, 'bar'] WHEN 1 THEN [col4, col5] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, 'bar'] WHEN 1 THEN [col4, col5] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, 'bar'] WHEN 1 THEN [col4, col5] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, 'bar'] WHEN 1 THEN [col4, col5] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, 'bar'] WHEN 1 THEN [col4, 'bar'] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, 'bar'] WHEN 1 THEN [col4, 'bar'] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, 'bar'] WHEN 1 THEN [col4, 'bar'] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, 'bar'] WHEN 1 THEN [col4, 'bar'] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, 'bar'] WHEN 1 THEN ['foo', col5] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, 'bar'] WHEN 1 THEN ['foo', col5] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, 'bar'] WHEN 1 THEN ['foo', col5] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, 'bar'] WHEN 1 THEN ['foo', col5] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, 'bar'] WHEN 1 THEN ['foo', 'bar'] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, 'bar'] WHEN 1 THEN ['foo', 'bar'] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, 'bar'] WHEN 1 THEN ['foo', 'bar'] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN [col2, 'bar'] WHEN 1 THEN ['foo', 'bar'] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', col3] WHEN (col1 % 3) = 0 THEN [col4, col5] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', col3] WHEN (col1 % 3) = 0 THEN [col4, col5] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', col3] WHEN (col1 % 3) = 0 THEN [col4, col5] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', col3] WHEN (col1 % 3) = 0 THEN [col4, col5] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', col3] WHEN (col1 % 3) = 0 THEN [col4, 'bar'] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', col3] WHEN (col1 % 3) = 0 THEN [col4, 'bar'] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', col3] WHEN (col1 % 3) = 0 THEN [col4, 'bar'] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', col3] WHEN (col1 % 3) = 0 THEN [col4, 'bar'] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', col3] WHEN (col1 % 3) = 0 THEN ['foo', col5] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', col3] WHEN (col1 % 3) = 0 THEN ['foo', col5] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', col3] WHEN (col1 % 3) = 0 THEN ['foo', col5] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', col3] WHEN (col1 % 3) = 0 THEN ['foo', col5] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', col3] WHEN (col1 % 3) = 0 THEN ['foo', 'bar'] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', col3] WHEN (col1 % 3) = 0 THEN ['foo', 'bar'] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', col3] WHEN (col1 % 3) = 0 THEN ['foo', 'bar'] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', col3] WHEN (col1 % 3) = 0 THEN ['foo', 'bar'] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', col3] WHEN 1 THEN [col4, col5] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', col3] WHEN 1 THEN [col4, col5] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', col3] WHEN 1 THEN [col4, col5] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', col3] WHEN 1 THEN [col4, col5] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', col3] WHEN 1 THEN [col4, 'bar'] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', col3] WHEN 1 THEN [col4, 'bar'] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', col3] WHEN 1 THEN [col4, 'bar'] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', col3] WHEN 1 THEN [col4, 'bar'] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', col3] WHEN 1 THEN ['foo', col5] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', col3] WHEN 1 THEN ['foo', col5] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', col3] WHEN 1 THEN ['foo', col5] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', col3] WHEN 1 THEN ['foo', col5] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', col3] WHEN 1 THEN ['foo', 'bar'] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', col3] WHEN 1 THEN ['foo', 'bar'] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', col3] WHEN 1 THEN ['foo', 'bar'] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', col3] WHEN 1 THEN ['foo', 'bar'] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', 'bar'] WHEN (col1 % 3) = 0 THEN [col4, col5] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', 'bar'] WHEN (col1 % 3) = 0 THEN [col4, col5] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', 'bar'] WHEN (col1 % 3) = 0 THEN [col4, col5] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', 'bar'] WHEN (col1 % 3) = 0 THEN [col4, col5] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', 'bar'] WHEN (col1 % 3) = 0 THEN [col4, 'bar'] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', 'bar'] WHEN (col1 % 3) = 0 THEN [col4, 'bar'] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', 'bar'] WHEN (col1 % 3) = 0 THEN [col4, 'bar'] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', 'bar'] WHEN (col1 % 3) = 0 THEN [col4, 'bar'] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', 'bar'] WHEN (col1 % 3) = 0 THEN ['foo', col5] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', 'bar'] WHEN (col1 % 3) = 0 THEN ['foo', col5] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', 'bar'] WHEN (col1 % 3) = 0 THEN ['foo', col5] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', 'bar'] WHEN (col1 % 3) = 0 THEN ['foo', col5] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', 'bar'] WHEN (col1 % 3) = 0 THEN ['foo', 'bar'] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', 'bar'] WHEN (col1 % 3) = 0 THEN ['foo', 'bar'] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', 'bar'] WHEN (col1 % 3) = 0 THEN ['foo', 'bar'] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', 'bar'] WHEN (col1 % 3) = 0 THEN ['foo', 'bar'] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', 'bar'] WHEN 1 THEN [col4, col5] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', 'bar'] WHEN 1 THEN [col4, col5] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', 'bar'] WHEN 1 THEN [col4, col5] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', 'bar'] WHEN 1 THEN [col4, col5] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', 'bar'] WHEN 1 THEN [col4, 'bar'] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', 'bar'] WHEN 1 THEN [col4, 'bar'] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', 'bar'] WHEN 1 THEN [col4, 'bar'] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', 'bar'] WHEN 1 THEN [col4, 'bar'] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', 'bar'] WHEN 1 THEN ['foo', col5] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', 'bar'] WHEN 1 THEN ['foo', col5] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', 'bar'] WHEN 1 THEN ['foo', col5] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', 'bar'] WHEN 1 THEN ['foo', col5] ELSE ['foo', 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', 'bar'] WHEN 1 THEN ['foo', 'bar'] ELSE [col6, col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', 'bar'] WHEN 1 THEN ['foo', 'bar'] ELSE [col6, 'bar'] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', 'bar'] WHEN 1 THEN ['foo', 'bar'] ELSE ['foo', col7] END FROM multi_if_check; +SELECT CASE WHEN 1 THEN ['foo', 'bar'] WHEN 1 THEN ['foo', 'bar'] ELSE ['foo', 'bar'] END FROM multi_if_check; + +DROP TABLE IF EXISTS multi_if_check; + +/* CASE expression. Numeric clauses. */ + +CREATE TABLE multi_if_check(col1 UInt64) ENGINE=TinyLog; +INSERT INTO multi_if_check(col1) SELECT toUInt64((number * 37 + 13) % 3) AS col1 FROM system.numbers LIMIT 10; + +SELECT CASE col1 WHEN 0 THEN 1 WHEN 1 THEN 2 ELSE 3 END FROM multi_if_check; + +/* CASE expression. String clauses. */ + +SELECT CASE col1 WHEN 1 THEN 'A' WHEN 2 THEN 'AB' ELSE 'ABC' END FROM multi_if_check; + +DROP TABLE IF EXISTS multi_if_check; diff --git a/parser/testdata/00330_view_subqueries/ast.json b/parser/testdata/00330_view_subqueries/ast.json new file mode 100644 index 000000000..788155887 --- /dev/null +++ b/parser/testdata/00330_view_subqueries/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery v1 (children 1)" + }, + { + "explain": " Identifier v1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001038615, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/00330_view_subqueries/metadata.json b/parser/testdata/00330_view_subqueries/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00330_view_subqueries/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00330_view_subqueries/query.sql b/parser/testdata/00330_view_subqueries/query.sql new file mode 100644 index 000000000..bcf7dda8d --- /dev/null +++ b/parser/testdata/00330_view_subqueries/query.sql @@ -0,0 +1,11 @@ +DROP TABLE IF EXISTS v1; +DROP TABLE IF EXISTS v2; + +CREATE VIEW v1 AS SELECT 1 FROM (SELECT 1); +SELECT * FROM v1; + +CREATE VIEW v2 AS SELECT number * number FROM (SELECT number FROM system.numbers LIMIT 10); +SELECT * FROM v2; + +DROP TABLE v1; +DROP TABLE v2; diff --git a/parser/testdata/00331_final_and_prewhere/ast.json b/parser/testdata/00331_final_and_prewhere/ast.json new file mode 100644 index 000000000..cc21e273b --- /dev/null +++ b/parser/testdata/00331_final_and_prewhere/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery replace (children 1)" + }, + { + "explain": " Identifier replace" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001161932, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/00331_final_and_prewhere/metadata.json b/parser/testdata/00331_final_and_prewhere/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00331_final_and_prewhere/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00331_final_and_prewhere/query.sql b/parser/testdata/00331_final_and_prewhere/query.sql new file mode 100644 index 000000000..5d0b80d63 --- /dev/null +++ b/parser/testdata/00331_final_and_prewhere/query.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS replace; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE replace ( EventDate Date, Id UInt64, Data String, Version UInt32) ENGINE = ReplacingMergeTree(EventDate, Id, 8192, Version); +INSERT INTO replace VALUES ('2016-06-02', 1, 'version 1', 1); +INSERT INTO replace VALUES ('2016-06-02', 2, 'version 1', 1); +INSERT INTO replace VALUES ('2016-06-02', 1, 'version 0', 0); + +SELECT * FROM replace ORDER BY Id, Version; +SELECT * FROM replace FINAL ORDER BY Id, Version; +SELECT * FROM replace FINAL WHERE Version = 0 ORDER BY Id, Version; + +DROP TABLE replace; diff --git a/parser/testdata/00331_final_and_prewhere_condition_ver_column/ast.json b/parser/testdata/00331_final_and_prewhere_condition_ver_column/ast.json new file mode 100644 index 000000000..42dedd6cd --- /dev/null +++ b/parser/testdata/00331_final_and_prewhere_condition_ver_column/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00107301, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00331_final_and_prewhere_condition_ver_column/metadata.json b/parser/testdata/00331_final_and_prewhere_condition_ver_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00331_final_and_prewhere_condition_ver_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00331_final_and_prewhere_condition_ver_column/query.sql b/parser/testdata/00331_final_and_prewhere_condition_ver_column/query.sql new file mode 100644 index 000000000..a3c499f16 --- /dev/null +++ b/parser/testdata/00331_final_and_prewhere_condition_ver_column/query.sql @@ -0,0 +1,16 @@ +SET enable_analyzer = 1; + +-- https://github.com/ClickHouse/ClickHouse/issues/45804 + +CREATE TABLE myRMT( + key Int64, + someCol String, + ver DateTime +) ENGINE = ReplacingMergeTree(ver) +ORDER BY key as SELECT 1, 'test', '2020-01-01'; + +SELECT count(ver) FROM myRMT FINAL PREWHERE ver > '2000-01-01'; + +SELECT count() FROM myRMT FINAL PREWHERE ver > '2000-01-01'; + +DROP TABLE myRMT; diff --git a/parser/testdata/00332_quantile_timing_memory_leak/ast.json b/parser/testdata/00332_quantile_timing_memory_leak/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00332_quantile_timing_memory_leak/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00332_quantile_timing_memory_leak/metadata.json b/parser/testdata/00332_quantile_timing_memory_leak/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00332_quantile_timing_memory_leak/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00332_quantile_timing_memory_leak/query.sql b/parser/testdata/00332_quantile_timing_memory_leak/query.sql new file mode 100644 index 000000000..1cc0938b1 --- /dev/null +++ b/parser/testdata/00332_quantile_timing_memory_leak/query.sql @@ -0,0 +1,4 @@ +-- Tags: no-parallel, no-fasttest + +SELECT quantileTiming(number) FROM (SELECT * FROM system.numbers LIMIT 10000); +SELECT floor(log2(1 + number) / log2(1.5)) AS k, count() AS c, quantileTiming(number % 10000) AS q FROM (SELECT * FROM system.numbers LIMIT 1000000) GROUP BY k ORDER BY k; diff --git a/parser/testdata/00333_parser_number_bug/ast.json b/parser/testdata/00333_parser_number_bug/ast.json new file mode 100644 index 000000000..77a52f836 --- /dev/null +++ b/parser/testdata/00333_parser_number_bug/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier info" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1 (alias info)" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001384156, + "rows_read": 14, + "bytes_read": 571 + } +} diff --git a/parser/testdata/00333_parser_number_bug/metadata.json b/parser/testdata/00333_parser_number_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00333_parser_number_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00333_parser_number_bug/query.sql b/parser/testdata/00333_parser_number_bug/query.sql new file mode 100644 index 000000000..24784f347 --- /dev/null +++ b/parser/testdata/00333_parser_number_bug/query.sql @@ -0,0 +1 @@ +SELECT info FROM (SELECT 1 AS info); diff --git a/parser/testdata/00334_column_aggregate_function_limit/ast.json b/parser/testdata/00334_column_aggregate_function_limit/ast.json new file mode 100644 index 000000000..515c50020 --- /dev/null +++ b/parser/testdata/00334_column_aggregate_function_limit/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery ontime (children 1)" + }, + { + "explain": " Identifier ontime" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001277977, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/00334_column_aggregate_function_limit/metadata.json b/parser/testdata/00334_column_aggregate_function_limit/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00334_column_aggregate_function_limit/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00334_column_aggregate_function_limit/query.sql b/parser/testdata/00334_column_aggregate_function_limit/query.sql new file mode 100644 index 000000000..70334ad91 --- /dev/null +++ b/parser/testdata/00334_column_aggregate_function_limit/query.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS ontime; + +CREATE TABLE ontime (FlightDate Date, Carrier String, FlightNum String) ENGINE = Memory; + +INSERT INTO ontime VALUES ('1987-10-01','AA','2'),('1987-10-01','AA','2'),('1987-10-01','AA','7'),('1987-10-01','AA','7'),('1987-10-01','AA','26'),('1987-10-01','AA','34'),('1987-10-01','AA','36'),('1987-10-01','AA','91'),('1987-10-01','AA','101'),('1987-10-01','AA','101'),('1987-10-01','AA','109'),('1987-10-01','AA','109'),('1987-10-01','AA','112'),('1987-10-01','AA','123'),('1987-10-01','AA','160'),('1987-10-01','AA','165'),('1987-10-01','AA','165'),('1987-10-01','AA','165'),('1987-10-01','AA','176'),('1987-10-01','AA','176'),('1987-10-01','AA','176'),('1987-10-01','AA','176'),('1987-10-01','AA','179'),('1987-10-01','AA','179'),('1987-10-01','AA','215'),('1987-10-01','AA','215'),('1987-10-01','AA','231'),('1987-10-01','AA','231'),('1987-10-01','AA','263'),('1987-10-01','AA','263'),('1987-10-01','AA','268'),('1987-10-01','AA','268'),('1987-10-01','AA','281'),('1987-10-01','AA','287'),('1987-10-01','AA','287'),('1987-10-01','AA','309'),('1987-10-01','AA','309'),('1987-10-01','AA','309'),('1987-10-01','AA','341'),('1987-10-01','AA','344'),('1987-10-01','AA','344'),('1987-10-01','AA','347'),('1987-10-01','AA','347'),('1987-10-01','AA','368'),('1987-10-01','AA','381'),('1987-10-01','AA','381'),('1987-10-01','AA','381'),('1987-10-01','AA','396'),('1987-10-01','AA','396'),('1987-10-01','AA','397'),('1987-10-01','AA','397'),('1987-10-01','AA','417'),('1987-10-01','AA','417'),('1987-10-01','AA','446'),('1987-10-01','AA','451'),('1987-10-01','AA','451'),('1987-10-01','AA','460'),('1987-10-01','AA','460'),('1987-10-01','AA','491'),('1987-10-01','AA','504'),('1987-10-01','AA','504'),('1987-10-01','AA','519'),('1987-10-01','AA','519'),('1987-10-01','AA','523'),('1987-10-01','AA','523'),('1987-10-01','AA','525'),('1987-10-01','AA','525'),('1987-10-01','AA','525'),('1987-10-01','AA','533'),('1987-10-01','AA','533'),('1987-10-01','AA','533'),('1987-10-01','AA','546'),('1987-10-01','AA','546'),('1987-10-01','AA','556'),('1987-10-01','AA','556'),('1987-10-01','AA','556'),('1987-10-01','AA','597'),('1987-10-01','AA','597'),('1987-10-01','AA','597'),('1987-10-01','AA','601'),('1987-10-01','AA','601'),('1987-10-01','AA','627'),('1987-10-01','AA','670'),('1987-10-01','AA','673'),('1987-10-01','AA','673'),('1987-10-01','AA','680'),('1987-10-01','AA','680'),('1987-10-01','AA','817'),('1987-10-01','AA','817'),('1987-10-01','AA','824'),('1987-10-01','AA','824'),('1987-10-01','AA','824'),('1987-10-01','AA','824'),('1987-10-01','AA','832'),('1987-10-01','AA','832'),('1987-10-01','AA','852'),('1987-10-01','AA','852'),('1987-10-01','AA','866'),('1987-10-01','AA','866'),('1987-10-01','AA','871'),('1987-10-01','AA','871'),('1987-10-01','AA','880'),('1987-10-01','AA','880'),('1987-10-01','AA','880'),('1987-10-01','AA','880'),('1987-10-01','AA','883'),('1987-10-01','AA','883'),('1987-10-01','AA','885'),('1987-10-01','AA','885'),('1987-10-01','AA','885'),('1987-10-01','AA','890'),('1987-10-01','AA','893'),('1987-10-01','AA','893'),('1987-10-01','AA','905'),('1987-10-01','AA','905'),('1987-10-01','AA','929'),('1987-10-01','AA','929'),('1987-10-01','AA','936'),('1987-10-01','AA','936'),('1987-10-01','AA','937'),('1987-10-01','AA','937'),('1987-10-01','AA','955'),('1987-10-01','AA','966'),('1987-10-01','AA','1002'),('1987-10-01','AA','1002'),('1987-10-01','AA','1004'),('1987-10-01','AA','1004'),('1987-10-01','AA','1015'),('1987-10-01','AA','1015'),('1987-10-01','AA','1021'),('1987-10-01','AA','1021'),('1987-10-01','AA','1041'),('1987-10-01','AA','1041'),('1987-10-01','AA','1046'),('1987-10-01','AA','1046'),('1987-10-01','AA','1048'),('1987-10-01','AA','1048'),('1987-10-01','AA','1061'),('1987-10-01','AA','1061'),('1987-10-01','AA','1088'),('1987-10-01','AA','1088'),('1987-10-01','AA','2033'),('1987-10-01','AA','2033'),('1987-10-01','AA','2050'),('1987-10-01','AA','2058'),('1987-10-01','AA','2071'),('1987-10-01','AA','2071'),('1987-10-01','AA','2086'),('1987-10-01','AA','2105'),('1987-10-01','AA','2111'),('1987-10-01','AA','2123'),('1987-10-01','AA','2123'),('1987-10-01','AA','2147'),('1987-10-01','AA','2147'),('1987-10-01','AA','2199'),('1987-10-01','AA','2199'),('1987-10-01','AA','2207'),('1987-10-01','AA','2207'),('1987-10-01','AA','2217'),('1987-10-01','AA','2230'),('1987-10-01','AA','2245'),('1987-10-01','AA','2251'),('1987-10-01','AA','2251'),('1987-10-01','AA','2275'),('1987-10-01','AA','2278'),('1987-10-01','AA','2351'),('1987-10-01','AA','2357'),('1987-10-01','AA','2490'),('1987-10-01','AA','2528'),('1987-10-01','AA','2528'),('1987-10-01','AA','2735'),('1987-10-01','AA','2735'),('1987-10-01','AA','2751'),('1987-10-01','AL','2'),('1987-10-01','AL','2'),('1987-10-01','AL','7'),('1987-10-01','AL','7'),('1987-10-01','AL','26'),('1987-10-01','AL','26'),('1987-10-01','AL','34'),('1987-10-01','AL','34'),('1987-10-01','AL','36'),('1987-10-01','AL','36'),('1987-10-01','AL','45'),('1987-10-01','AL','45'),('1987-10-01','AL','45'),('1987-10-01','AL','91'),('1987-10-01','AL','91'),('1987-10-01','AL','104'),('1987-10-01','AL','104'),('1987-10-01','AL','104'),('1987-10-01','AL','109'),('1987-10-01','AL','112'),('1987-10-01','AL','112'),('1987-10-01','AL','123'),('1987-10-01','AL','149'),('1987-10-01','AL','160'),('1987-10-01','AL','160'),('1987-10-01','AL','165'),('1987-10-01','AL','171'),('1987-10-01','AL','171'),('1987-10-01','AL','176'),('1987-10-01','AL','176'),('1987-10-01','AL','179'),('1987-10-01','AL','215'),('1987-10-01','AL','231'),('1987-10-01','AL','263'),('1987-10-01','AL','263'),('1987-10-01','AL','268'),('1987-10-01','AL','268'),('1987-10-01','AL','268'),('1987-10-01','AL','281'),('1987-10-01','AL','281'),('1987-10-01','AL','287'),('1987-10-01','AL','287'),('1987-10-01','AL','309'),('1987-10-01','AL','309'),('1987-10-01','AL','341'),('1987-10-01','AL','344'),('1987-10-01','AL','344'),('1987-10-01','AL','357'),('1987-10-01','AL','357'),('1987-10-01','AL','368'),('1987-10-01','AL','381'),('1987-10-01','AL','396'),('1987-10-01','AL','397'),('1987-10-01','AL','397'),('1987-10-01','AL','416'),('1987-10-01','AL','416'),('1987-10-01','AL','417'),('1987-10-01','AL','438'),('1987-10-01','AL','446'),('1987-10-01','AL','451'),('1987-10-01','AL','451'),('1987-10-01','AL','491'),('1987-10-01','AL','491'),('1987-10-01','AL','523'),('1987-10-01','AL','523'),('1987-10-01','AL','523'),('1987-10-01','AL','525'),('1987-10-01','AL','525'),('1987-10-01','AL','533'),('1987-10-01','AL','533'),('1987-10-01','AL','546'),('1987-10-01','AL','546'),('1987-10-01','AL','556'),('1987-10-01','AL','556'),('1987-10-01','AL','601'),('1987-10-01','AL','601'),('1987-10-01','AL','627'),('1987-10-01','AL','629'),('1987-10-01','AL','670'),('1987-10-01','AL','670'),('1987-10-01','AL','670'),('1987-10-01','AL','673'),('1987-10-01','AL','680'),('1987-10-01','AL','700'),('1987-10-02','AA','2'),('1987-10-02','AA','2'),('1987-10-02','AA','2'),('1987-10-02','AA','7'),('1987-10-02','AA','7'),('1987-10-02','AA','26'),('1987-10-02','AA','34'),('1987-10-02','AA','36'),('1987-10-02','AA','91'),('1987-10-02','AA','101'),('1987-10-02','AA','101'),('1987-10-02','AA','109'),('1987-10-02','AA','109'),('1987-10-02','AA','112'),('1987-10-02','AA','123'),('1987-10-02','AA','123'),('1987-10-02','AA','160'),('1987-10-02','AA','165'),('1987-10-02','AA','165'),('1987-10-02','AA','165'),('1987-10-02','AA','176'),('1987-10-02','AA','176'),('1987-10-02','AA','176'),('1987-10-02','AA','176'),('1987-10-02','AA','179'),('1987-10-02','AA','179'),('1987-10-02','AA','215'),('1987-10-02','AA','215'),('1987-10-02','AA','231'),('1987-10-02','AA','231'),('1987-10-02','AA','263'),('1987-10-02','AA','263'),('1987-10-02','AA','268'),('1987-10-02','AA','281'),('1987-10-02','AA','287'),('1987-10-02','AA','287'),('1987-10-02','AA','309'),('1987-10-02','AA','309'),('1987-10-02','AA','309'),('1987-10-02','AA','341'),('1987-10-02','AA','344'),('1987-10-02','AA','344'),('1987-10-02','AA','347'),('1987-10-02','AA','347'),('1987-10-02','AA','368'),('1987-10-02','AA','381'),('1987-10-02','AA','381'),('1987-10-02','AA','381'),('1987-10-02','AA','396'),('1987-10-02','AA','396'),('1987-10-02','AA','397'),('1987-10-02','AA','397'),('1987-10-02','AA','417'),('1987-10-02','AA','417'),('1987-10-02','AA','446'),('1987-10-02','AA','451'),('1987-10-02','AA','451'),('1987-10-02','AA','460'),('1987-10-02','AA','460'),('1987-10-02','AA','491'),('1987-10-02','AA','504'),('1987-10-02','AA','504'),('1987-10-02','AA','519'),('1987-10-02','AA','519'),('1987-10-02','AA','523'),('1987-10-02','AA','523'),('1987-10-02','AA','525'),('1987-10-02','AA','525'),('1987-10-02','AA','525'),('1987-10-02','AA','533'),('1987-10-02','AA','533'),('1987-10-02','AA','533'),('1987-10-02','AA','546'),('1987-10-02','AA','546'),('1987-10-02','AA','546'),('1987-10-02','AA','546'),('1987-10-02','AA','556'),('1987-10-02','AA','556'),('1987-10-02','AA','556'),('1987-10-02','AA','597'),('1987-10-02','AA','597'),('1987-10-02','AA','597'),('1987-10-02','AA','601'),('1987-10-02','AA','601'),('1987-10-02','AA','627'),('1987-10-02','AA','629'),('1987-10-02','AA','629'),('1987-10-02','AA','670'),('1987-10-02','AA','673'),('1987-10-02','AA','673'),('1987-10-02','AA','680'),('1987-10-02','AA','680'),('1987-10-02','AA','817'),('1987-10-02','AA','817'),('1987-10-02','AA','824'),('1987-10-02','AA','824'),('1987-10-02','AA','824'),('1987-10-02','AA','824'),('1987-10-02','AA','832'),('1987-10-02','AA','832'),('1987-10-02','AA','852'),('1987-10-02','AA','866'),('1987-10-02','AA','866'),('1987-10-02','AA','871'),('1987-10-02','AA','871'),('1987-10-02','AA','880'),('1987-10-02','AA','880'),('1987-10-02','AA','880'),('1987-10-02','AA','880'),('1987-10-02','AA','883'),('1987-10-02','AA','883'),('1987-10-02','AA','885'),('1987-10-02','AA','885'),('1987-10-02','AA','885'),('1987-10-02','AA','890'),('1987-10-02','AA','890'),('1987-10-02','AA','893'),('1987-10-02','AA','893'),('1987-10-02','AA','905'),('1987-10-02','AA','905'),('1987-10-02','AA','915'),('1987-10-02','AA','929'),('1987-10-02','AA','929'),('1987-10-02','AA','936'),('1987-10-02','AA','936'),('1987-10-02','AA','937'),('1987-10-02','AA','937'),('1987-10-02','AA','955'),('1987-10-02','AA','955'),('1987-10-02','AA','966'),('1987-10-02','AA','1002'),('1987-10-02','AA','1002'),('1987-10-02','AA','1004'),('1987-10-02','AA','1004'),('1987-10-02','AA','1015'),('1987-10-02','AA','1015'),('1987-10-02','AA','1021'),('1987-10-02','AA','1021'),('1987-10-02','AA','1041'),('1987-10-02','AA','1041'),('1987-10-02','AA','1046'),('1987-10-02','AA','1046'),('1987-10-02','AA','1048'),('1987-10-02','AA','1048'),('1987-10-02','AA','1061'),('1987-10-02','AA','1061'),('1987-10-02','AA','1088'),('1987-10-02','AA','1088'),('1987-10-02','AA','2033'),('1987-10-02','AA','2033'),('1987-10-02','AA','2050'),('1987-10-02','AA','2058'),('1987-10-02','AA','2071'),('1987-10-02','AA','2071'),('1987-10-02','AA','2086'),('1987-10-02','AA','2105'),('1987-10-02','AA','2111'),('1987-10-02','AA','2123'),('1987-10-02','AA','2123'),('1987-10-02','AA','2147'),('1987-10-02','AA','2147'),('1987-10-02','AA','2199'),('1987-10-02','AA','2199'),('1987-10-02','AA','2207'),('1987-10-02','AA','2207'),('1987-10-02','AA','2217'),('1987-10-02','AA','2230'),('1987-10-02','AA','2245'),('1987-10-02','AA','2251'),('1987-10-02','AA','2251'),('1987-10-02','AA','2275'),('1987-10-02','AA','2278'),('1987-10-02','AA','2351'),('1987-10-02','AA','2357'),('1987-10-02','AA','2361'),('1987-10-02','AA','2490'),('1987-10-02','AA','2528'),('1987-10-02','AA','2528'),('1987-10-02','AA','2735'),('1987-10-02','AA','2735'),('1987-10-02','AA','2751'),('1987-10-02','AL','2'),('1987-10-02','AL','2'),('1987-10-02','AL','7'),('1987-10-02','AL','7'),('1987-10-02','AL','26'),('1987-10-02','AL','26'),('1987-10-02','AL','34'),('1987-10-02','AL','34'),('1987-10-02','AL','36'),('1987-10-02','AL','36'),('1987-10-02','AL','45'),('1987-10-02','AL','45'),('1987-10-02','AL','45'),('1987-10-02','AL','91'),('1987-10-02','AL','91'),('1987-10-02','AL','104'),('1987-10-02','AL','104'),('1987-10-02','AL','104'),('1987-10-02','AL','109'),('1987-10-02','AL','112'),('1987-10-02','AL','112'),('1987-10-02','AL','123'),('1987-10-02','AL','149'),('1987-10-02','AL','160'),('1987-10-02','AL','160'),('1987-10-02','AL','165'),('1987-10-02','AL','171'),('1987-10-02','AL','171'),('1987-10-02','AL','176'),('1987-10-02','AL','176'),('1987-10-02','AL','179'),('1987-10-02','AL','215'),('1987-10-02','AL','231'),('1987-10-02','AL','263'),('1987-10-02','AL','263'),('1987-10-02','AL','268'),('1987-10-02','AL','268'),('1987-10-02','AL','268'),('1987-10-02','AL','281'),('1987-10-02','AL','281'),('1987-10-02','AL','287'),('1987-10-02','AL','287'),('1987-10-02','AL','309'),('1987-10-02','AL','309'),('1987-10-02','AL','341'),('1987-10-02','AL','344'),('1987-10-02','AL','344'),('1987-10-02','AL','357'),('1987-10-02','AL','357'),('1987-10-02','AL','368'),('1987-10-02','AL','381'),('1987-10-02','AL','396'),('1987-10-02','AL','397'),('1987-10-02','AL','397'),('1987-10-02','AL','416'),('1987-10-02','AL','416'),('1987-10-02','AL','417'),('1987-10-02','AL','438'),('1987-10-02','AL','438'),('1987-10-02','AL','446'),('1987-10-02','AL','451'),('1987-10-02','AL','451'),('1987-10-02','AL','491'),('1987-10-02','AL','491'),('1987-10-02','AL','523'),('1987-10-02','AL','523'),('1987-10-02','AL','523'),('1987-10-02','AL','525'),('1987-10-02','AL','525'),('1987-10-02','AL','533'),('1987-10-02','AL','533'),('1987-10-02','AL','546'),('1987-10-02','AL','546'),('1987-10-02','AL','556'),('1987-10-02','AL','556'),('1987-10-02','AL','601'),('1987-10-02','AL','601'),('1987-10-02','AL','627'),('1987-10-02','AL','629'),('1987-10-02','AL','670'),('1987-10-02','AL','670'),('1987-10-02','AL','670'),('1987-10-02','AL','673'),('1987-10-02','AL','680'),('1987-10-03','AA','2'),('1987-10-03','AA','2'),('1987-10-03','AA','2'),('1987-10-03','AA','7'),('1987-10-03','AA','7'),('1987-10-03','AA','26'),('1987-10-03','AA','34'),('1987-10-03','AA','36'),('1987-10-03','AA','91'),('1987-10-03','AA','101'),('1987-10-03','AA','101'),('1987-10-03','AA','109'),('1987-10-03','AA','109'),('1987-10-03','AA','112'),('1987-10-03','AA','123'),('1987-10-03','AA','123'),('1987-10-03','AA','165'),('1987-10-03','AA','165'),('1987-10-03','AA','165'),('1987-10-03','AA','176'),('1987-10-03','AA','176'),('1987-10-03','AA','176'),('1987-10-03','AA','176'),('1987-10-03','AA','179'),('1987-10-03','AA','179'),('1987-10-03','AA','215'),('1987-10-03','AA','215'),('1987-10-03','AA','231'),('1987-10-03','AA','231'),('1987-10-03','AA','263'),('1987-10-03','AA','263'),('1987-10-03','AA','268'),('1987-10-03','AA','281'),('1987-10-03','AA','287'),('1987-10-03','AA','287'),('1987-10-03','AA','309'),('1987-10-03','AA','309'),('1987-10-03','AA','309'),('1987-10-03','AA','341'),('1987-10-03','AA','344'),('1987-10-03','AA','344'),('1987-10-03','AA','347'),('1987-10-03','AA','347'),('1987-10-03','AA','368'),('1987-10-03','AA','381'),('1987-10-03','AA','381'),('1987-10-03','AA','381'),('1987-10-03','AA','396'),('1987-10-03','AA','396'),('1987-10-03','AA','397'),('1987-10-03','AA','397'),('1987-10-03','AA','417'),('1987-10-03','AA','417'),('1987-10-03','AA','446'),('1987-10-03','AA','451'),('1987-10-03','AA','451'),('1987-10-03','AA','460'),('1987-10-03','AA','460'),('1987-10-03','AA','491'),('1987-10-03','AA','491'),('1987-10-03','AA','504'),('1987-10-03','AA','504'),('1987-10-03','AA','519'),('1987-10-03','AA','519'),('1987-10-03','AA','523'),('1987-10-03','AA','523'),('1987-10-03','AA','525'),('1987-10-03','AA','525'),('1987-10-03','AA','525'),('1987-10-03','AA','533'),('1987-10-03','AA','533'),('1987-10-03','AA','546'),('1987-10-03','AA','546'),('1987-10-03','AA','546'),('1987-10-03','AA','546'),('1987-10-03','AA','556'),('1987-10-03','AA','556'),('1987-10-03','AA','556'),('1987-10-03','AA','597'),('1987-10-03','AA','597'),('1987-10-03','AA','597'),('1987-10-03','AA','601'),('1987-10-03','AA','601'),('1987-10-03','AA','627'),('1987-10-03','AA','629'),('1987-10-03','AA','629'),('1987-10-03','AA','670'),('1987-10-03','AA','673'),('1987-10-03','AA','673'),('1987-10-03','AA','680'),('1987-10-03','AA','680'),('1987-10-03','AA','817'),('1987-10-03','AA','817'),('1987-10-03','AA','824'),('1987-10-03','AA','824'),('1987-10-03','AA','824'),('1987-10-03','AA','824'),('1987-10-03','AA','832'),('1987-10-03','AA','832'),('1987-10-03','AA','852'),('1987-10-03','AA','852'),('1987-10-03','AA','866'),('1987-10-03','AA','866'),('1987-10-03','AA','871'),('1987-10-03','AA','871'),('1987-10-03','AA','880'),('1987-10-03','AA','880'),('1987-10-03','AA','880'),('1987-10-03','AA','880'),('1987-10-03','AA','883'),('1987-10-03','AA','883'),('1987-10-03','AA','885'),('1987-10-03','AA','885'),('1987-10-03','AA','885'),('1987-10-03','AA','890'),('1987-10-03','AA','890'),('1987-10-03','AA','893'),('1987-10-03','AA','893'),('1987-10-03','AA','905'),('1987-10-03','AA','905'),('1987-10-03','AA','915'),('1987-10-03','AA','929'),('1987-10-03','AA','929'),('1987-10-03','AA','936'),('1987-10-03','AA','936'),('1987-10-03','AA','937'),('1987-10-03','AA','937'),('1987-10-03','AA','955'),('1987-10-03','AA','955'),('1987-10-03','AA','966'),('1987-10-03','AA','1002'),('1987-10-03','AA','1002'),('1987-10-03','AA','1004'),('1987-10-03','AA','1004'),('1987-10-03','AA','1015'),('1987-10-03','AA','1015'),('1987-10-03','AA','1021'),('1987-10-03','AA','1041'),('1987-10-03','AA','1041'),('1987-10-03','AA','1046'),('1987-10-03','AA','1046'),('1987-10-03','AA','1048'),('1987-10-03','AA','1048'),('1987-10-03','AA','1061'),('1987-10-03','AA','1061'),('1987-10-03','AA','1088'),('1987-10-03','AA','1088'),('1987-10-03','AA','2033'),('1987-10-03','AA','2033'),('1987-10-03','AA','2050'),('1987-10-03','AA','2058'),('1987-10-03','AA','2071'),('1987-10-03','AA','2071'),('1987-10-03','AA','2086'),('1987-10-03','AA','2105'),('1987-10-03','AA','2111'),('1987-10-03','AA','2123'),('1987-10-03','AA','2123'),('1987-10-03','AA','2147'),('1987-10-03','AA','2147'),('1987-10-03','AA','2199'),('1987-10-03','AA','2207'),('1987-10-03','AA','2207'),('1987-10-03','AA','2245'),('1987-10-03','AA','2251'),('1987-10-03','AA','2251'),('1987-10-03','AA','2275'),('1987-10-03','AA','2278'),('1987-10-03','AA','2361'),('1987-10-03','AA','2490'),('1987-10-03','AA','2528'),('1987-10-03','AA','2528'),('1987-10-03','AA','2735'),('1987-10-03','AA','2735'),('1987-10-03','AL','2'),('1987-10-03','AL','2'),('1987-10-03','AL','7'),('1987-10-03','AL','7'),('1987-10-03','AL','26'),('1987-10-03','AL','26'),('1987-10-03','AL','34'),('1987-10-03','AL','34'),('1987-10-03','AL','36'),('1987-10-03','AL','36'),('1987-10-03','AL','45'),('1987-10-03','AL','45'),('1987-10-03','AL','45'),('1987-10-03','AL','91'),('1987-10-03','AL','91'),('1987-10-03','AL','104'),('1987-10-03','AL','104'),('1987-10-03','AL','109'),('1987-10-03','AL','112'),('1987-10-03','AL','112'),('1987-10-03','AL','149'),('1987-10-03','AL','160'),('1987-10-03','AL','160'),('1987-10-03','AL','165'),('1987-10-03','AL','171'),('1987-10-03','AL','171'),('1987-10-03','AL','176'),('1987-10-03','AL','176'),('1987-10-03','AL','179'),('1987-10-03','AL','215'),('1987-10-03','AL','231'),('1987-10-03','AL','263'),('1987-10-03','AL','287'),('1987-10-03','AL','287'),('1987-10-03','AL','309'),('1987-10-03','AL','309'),('1987-10-03','AL','344'),('1987-10-03','AL','344'),('1987-10-03','AL','357'),('1987-10-03','AL','357'),('1987-10-03','AL','381'),('1987-10-03','AL','396'),('1987-10-03','AL','397'),('1987-10-03','AL','397'),('1987-10-03','AL','397'),('1987-10-03','AL','416'),('1987-10-03','AL','417'),('1987-10-03','AL','438'),('1987-10-03','AL','451'),('1987-10-03','AL','451'),('1987-10-03','AL','491'),('1987-10-03','AL','491'),('1987-10-03','AL','523'),('1987-10-03','AL','523'),('1987-10-03','AL','525'),('1987-10-03','AL','525'),('1987-10-03','AL','533'),('1987-10-03','AL','546'),('1987-10-03','AL','546'),('1987-10-03','AL','556'),('1987-10-03','AL','556'),('1987-10-03','AL','601'),('1987-10-03','AL','601'),('1987-10-03','AL','627'),('1987-10-03','AL','670'),('1987-10-03','AL','673'),('1987-10-03','AL','680'),('1987-10-03','AL','905'),('1987-10-03','AL','936'),('1987-10-03','AL','966'),('1987-10-04','AA','2'),('1987-10-04','AA','2'),('1987-10-04','AA','2'),('1987-10-04','AA','7'),('1987-10-04','AA','7'),('1987-10-04','AA','26'),('1987-10-04','AA','34'),('1987-10-04','AA','36'),('1987-10-04','AA','91'),('1987-10-04','AA','101'),('1987-10-04','AA','101'),('1987-10-04','AA','109'),('1987-10-04','AA','109'),('1987-10-04','AA','112'),('1987-10-04','AA','123'),('1987-10-04','AA','123'),('1987-10-04','AA','160'),('1987-10-04','AA','165'),('1987-10-04','AA','165'),('1987-10-04','AA','165'),('1987-10-04','AA','176'),('1987-10-04','AA','176'),('1987-10-04','AA','176'),('1987-10-04','AA','176'),('1987-10-04','AA','179'),('1987-10-04','AA','179'),('1987-10-04','AA','215'),('1987-10-04','AA','215'),('1987-10-04','AA','231'),('1987-10-04','AA','231'),('1987-10-04','AA','263'),('1987-10-04','AA','263'),('1987-10-04','AA','268'),('1987-10-04','AA','268'),('1987-10-04','AA','281'),('1987-10-04','AA','287'),('1987-10-04','AA','287'),('1987-10-04','AA','309'),('1987-10-04','AA','309'),('1987-10-04','AA','309'),('1987-10-04','AA','341'),('1987-10-04','AA','344'),('1987-10-04','AA','344'),('1987-10-04','AA','347'),('1987-10-04','AA','347'),('1987-10-04','AA','381'),('1987-10-04','AA','381'),('1987-10-04','AA','381'),('1987-10-04','AA','396'),('1987-10-04','AA','396'),('1987-10-04','AA','397'),('1987-10-04','AA','397'),('1987-10-04','AA','417'),('1987-10-04','AA','417'),('1987-10-04','AA','446'),('1987-10-04','AA','451'),('1987-10-04','AA','451'),('1987-10-04','AA','460'),('1987-10-04','AA','460'),('1987-10-04','AA','491'),('1987-10-04','AA','491'),('1987-10-04','AA','504'),('1987-10-04','AA','504'),('1987-10-04','AA','519'),('1987-10-04','AA','519'),('1987-10-04','AA','523'),('1987-10-04','AA','523'),('1987-10-04','AA','525'),('1987-10-04','AA','525'),('1987-10-04','AA','525'),('1987-10-04','AA','533'),('1987-10-04','AA','533'),('1987-10-04','AA','533'),('1987-10-04','AA','546'),('1987-10-04','AA','546'),('1987-10-04','AA','546'),('1987-10-04','AA','546'),('1987-10-04','AA','556'),('1987-10-04','AA','556'),('1987-10-04','AA','556'),('1987-10-04','AA','597'),('1987-10-04','AA','597'),('1987-10-04','AA','597'),('1987-10-04','AA','601'),('1987-10-04','AA','601'),('1987-10-04','AA','627'),('1987-10-04','AA','629'),('1987-10-04','AA','629'),('1987-10-04','AA','670'),('1987-10-04','AA','673'),('1987-10-04','AA','673'),('1987-10-04','AA','680'),('1987-10-04','AA','680'),('1987-10-04','AA','817'),('1987-10-04','AA','817'),('1987-10-04','AA','824'),('1987-10-04','AA','824'),('1987-10-04','AA','824'),('1987-10-04','AA','832'),('1987-10-04','AA','832'),('1987-10-04','AA','852'),('1987-10-04','AA','852'),('1987-10-04','AA','866'),('1987-10-04','AA','866'),('1987-10-04','AA','871'),('1987-10-04','AA','871'),('1987-10-04','AA','880'),('1987-10-04','AA','880'),('1987-10-04','AA','880'),('1987-10-04','AA','880'),('1987-10-04','AA','883'),('1987-10-04','AA','883'),('1987-10-04','AA','885'),('1987-10-04','AA','885'),('1987-10-04','AA','890'),('1987-10-04','AA','890'),('1987-10-04','AA','893'),('1987-10-04','AA','893'),('1987-10-04','AA','905'),('1987-10-04','AA','905'),('1987-10-04','AA','915'),('1987-10-04','AA','929'),('1987-10-04','AA','929'),('1987-10-04','AA','936'),('1987-10-04','AA','936'),('1987-10-04','AA','937'),('1987-10-04','AA','937'),('1987-10-04','AA','955'),('1987-10-04','AA','955'),('1987-10-04','AA','966'),('1987-10-04','AA','1002'),('1987-10-04','AA','1002'),('1987-10-04','AA','1004'),('1987-10-04','AA','1004'),('1987-10-04','AA','1015'),('1987-10-04','AA','1021'),('1987-10-04','AA','1021'),('1987-10-04','AA','1041'),('1987-10-04','AA','1041'),('1987-10-04','AA','1046'),('1987-10-04','AA','1046'),('1987-10-04','AA','1048'),('1987-10-04','AA','1048'),('1987-10-04','AA','1061'),('1987-10-04','AA','1061'),('1987-10-04','AA','1088'),('1987-10-04','AA','1088'),('1987-10-04','AA','2033'),('1987-10-04','AA','2033'),('1987-10-04','AA','2050'),('1987-10-04','AA','2058'),('1987-10-04','AA','2071'),('1987-10-04','AA','2071'),('1987-10-04','AA','2086'),('1987-10-04','AA','2111'),('1987-10-04','AA','2123'),('1987-10-04','AA','2123'),('1987-10-04','AA','2147'),('1987-10-04','AA','2147'),('1987-10-04','AA','2199'),('1987-10-04','AA','2199'),('1987-10-04','AA','2207'),('1987-10-04','AA','2207'),('1987-10-04','AA','2230'),('1987-10-04','AA','2245'),('1987-10-04','AA','2251'),('1987-10-04','AA','2251'),('1987-10-04','AA','2275'),('1987-10-04','AA','2278'),('1987-10-04','AA','2357'),('1987-10-04','AA','2361'),('1987-10-04','AA','2490'),('1987-10-04','AA','2528'),('1987-10-04','AA','2528'),('1987-10-04','AA','2735'),('1987-10-04','AA','2735'),('1987-10-04','AA','2751'),('1987-10-04','AL','7'),('1987-10-04','AL','7'),('1987-10-04','AL','26'),('1987-10-04','AL','26'),('1987-10-04','AL','34'),('1987-10-04','AL','34'),('1987-10-04','AL','36'),('1987-10-04','AL','36'),('1987-10-04','AL','45'),('1987-10-04','AL','45'),('1987-10-04','AL','45'),('1987-10-04','AL','91'),('1987-10-04','AL','91'),('1987-10-04','AL','104'),('1987-10-04','AL','123'),('1987-10-04','AL','149'),('1987-10-04','AL','160'),('1987-10-04','AL','160'),('1987-10-04','AL','165'),('1987-10-04','AL','171'),('1987-10-04','AL','171'),('1987-10-04','AL','176'),('1987-10-04','AL','176'),('1987-10-04','AL','179'),('1987-10-04','AL','215'),('1987-10-04','AL','231'),('1987-10-04','AL','263'),('1987-10-04','AL','263'),('1987-10-04','AL','281'),('1987-10-04','AL','281'),('1987-10-04','AL','309'),('1987-10-04','AL','309'),('1987-10-04','AL','341'),('1987-10-04','AL','344'),('1987-10-04','AL','344'),('1987-10-04','AL','357'),('1987-10-04','AL','357'),('1987-10-04','AL','368'),('1987-10-04','AL','416'),('1987-10-04','AL','416'),('1987-10-04','AL','417'),('1987-10-04','AL','438'),('1987-10-04','AL','438'),('1987-10-04','AL','451'),('1987-10-04','AL','491'),('1987-10-04','AL','491'),('1987-10-04','AL','525'),('1987-10-04','AL','525'),('1987-10-04','AL','533'),('1987-10-04','AL','533'),('1987-10-04','AL','546'),('1987-10-04','AL','546'),('1987-10-04','AL','556'),('1987-10-04','AL','556'),('1987-10-04','AL','601'),('1987-10-04','AL','627'),('1987-10-04','AL','629'),('1987-10-04','AL','670'),('1987-10-04','AL','670'),('1987-10-04','AL','670'),('1987-10-04','AL','673'),('1987-10-04','AL','680'),('1987-10-04','AL','937'),('1987-10-04','AL','937'),('1987-10-04','AL','955'),('1987-10-12','AA','2'),('1987-10-12','AA','2'),('1987-10-12','AA','2'),('1987-10-12','AA','7'),('1987-10-12','AA','7'),('1987-10-12','AA','26'),('1987-10-12','AA','34'),('1987-10-12','AA','36'),('1987-10-12','AA','91'),('1987-10-12','AA','101'),('1987-10-12','AA','101'),('1987-10-12','AA','109'),('1987-10-12','AA','109'),('1987-10-12','AA','112'),('1987-10-12','AA','123'),('1987-10-12','AA','123'),('1987-10-12','AA','160'),('1987-10-12','AA','165'),('1987-10-12','AA','165'),('1987-10-12','AA','165'),('1987-10-12','AA','176'),('1987-10-12','AA','176'),('1987-10-12','AA','176'),('1987-10-12','AA','176'),('1987-10-12','AA','179'),('1987-10-12','AA','179'),('1987-10-12','AA','215'),('1987-10-12','AA','215'),('1987-10-12','AA','231'),('1987-10-12','AA','263'),('1987-10-12','AA','263'),('1987-10-12','AA','268'),('1987-10-12','AA','268'),('1987-10-12','AA','281'),('1987-10-12','AA','287'),('1987-10-12','AA','287'),('1987-10-12','AA','309'),('1987-10-12','AA','309'),('1987-10-12','AA','309'),('1987-10-12','AA','341'),('1987-10-12','AA','344'),('1987-10-12','AA','344'),('1987-10-12','AA','347'),('1987-10-12','AA','347'),('1987-10-12','AA','368'),('1987-10-12','AA','381'),('1987-10-12','AA','381'),('1987-10-12','AA','381'),('1987-10-12','AA','396'),('1987-10-12','AA','396'),('1987-10-12','AA','397'),('1987-10-12','AA','397'),('1987-10-12','AA','417'),('1987-10-12','AA','417'),('1987-10-12','AA','446'),('1987-10-12','AA','451'),('1987-10-12','AA','451'),('1987-10-12','AA','460'),('1987-10-12','AA','460'),('1987-10-12','AA','491'),('1987-10-12','AA','504'),('1987-10-12','AA','504'),('1987-10-12','AA','519'),('1987-10-12','AA','523'),('1987-10-12','AA','523'),('1987-10-12','AA','525'),('1987-10-12','AA','525'),('1987-10-12','AA','525'),('1987-10-12','AA','533'),('1987-10-12','AA','533'),('1987-10-12','AA','533'),('1987-10-12','AA','546'),('1987-10-12','AA','546'),('1987-10-12','AA','546'),('1987-10-12','AA','546'),('1987-10-12','AA','556'),('1987-10-12','AA','556'),('1987-10-12','AA','556'),('1987-10-12','AA','597'),('1987-10-12','AA','597'),('1987-10-12','AA','597'),('1987-10-12','AA','601'),('1987-10-12','AA','601'),('1987-10-12','AA','627'),('1987-10-12','AA','629'),('1987-10-12','AA','629'),('1987-10-12','AA','670'),('1987-10-12','AA','673'),('1987-10-12','AA','673'),('1987-10-12','AA','680'),('1987-10-12','AA','680'),('1987-10-12','AA','817'),('1987-10-12','AA','817'),('1987-10-12','AA','824'),('1987-10-12','AA','824'),('1987-10-12','AA','824'),('1987-10-12','AA','824'),('1987-10-12','AA','832'),('1987-10-12','AA','832'),('1987-10-12','AA','852'),('1987-10-12','AA','852'),('1987-10-12','AA','866'),('1987-10-12','AA','866'),('1987-10-12','AA','871'),('1987-10-12','AA','871'),('1987-10-12','AA','880'),('1987-10-12','AA','880'),('1987-10-12','AA','880'),('1987-10-12','AA','880'),('1987-10-12','AA','883'),('1987-10-12','AA','883'),('1987-10-12','AA','885'),('1987-10-12','AA','885'),('1987-10-12','AA','885'),('1987-10-12','AA','890'),('1987-10-12','AA','890'),('1987-10-12','AA','893'),('1987-10-12','AA','893'),('1987-10-12','AA','905'),('1987-10-12','AA','905'),('1987-10-12','AA','915'),('1987-10-12','AA','929'),('1987-10-12','AA','929'),('1987-10-12','AA','936'),('1987-10-12','AA','936'),('1987-10-12','AA','937'),('1987-10-12','AA','937'),('1987-10-12','AA','955'),('1987-10-12','AA','955'),('1987-10-12','AA','966'),('1987-10-12','AA','1002'),('1987-10-12','AA','1002'),('1987-10-12','AA','1004'),('1987-10-12','AA','1015'),('1987-10-12','AA','1015'),('1987-10-12','AA','1021'),('1987-10-12','AA','1021'),('1987-10-12','AA','1041'),('1987-10-12','AA','1041'),('1987-10-12','AA','1046'),('1987-10-12','AA','1046'),('1987-10-12','AA','1048'),('1987-10-12','AA','1048'),('1987-10-12','AA','1061'),('1987-10-12','AA','1061'),('1987-10-12','AA','1088'),('1987-10-12','AA','1088'),('1987-10-12','AA','2033'),('1987-10-12','AA','2033'),('1987-10-12','AA','2050'),('1987-10-12','AA','2058'),('1987-10-12','AA','2071'),('1987-10-12','AA','2071'),('1987-10-12','AA','2086'),('1987-10-12','AA','2105'),('1987-10-12','AA','2111'),('1987-10-12','AA','2123'),('1987-10-12','AA','2123'),('1987-10-12','AA','2147'),('1987-10-12','AA','2147'),('1987-10-12','AA','2199'),('1987-10-12','AA','2199'),('1987-10-12','AA','2207'),('1987-10-12','AA','2207'),('1987-10-12','AA','2217'),('1987-10-12','AA','2230'),('1987-10-12','AA','2245'),('1987-10-12','AA','2251'),('1987-10-12','AA','2251'),('1987-10-12','AA','2275'),('1987-10-12','AA','2278'),('1987-10-12','AA','2351'),('1987-10-12','AA','2357'),('1987-10-12','AA','2361'),('1987-10-12','AA','2490'),('1987-10-12','AA','2528'),('1987-10-12','AA','2528'),('1987-10-12','AA','2735'),('1987-10-12','AA','2735'),('1987-10-12','AA','2751'),('1987-10-12','AL','2'),('1987-10-12','AL','2'),('1987-10-12','AL','7'),('1987-10-12','AL','7'),('1987-10-12','AL','26'),('1987-10-12','AL','26'),('1987-10-12','AL','34'),('1987-10-12','AL','34'),('1987-10-12','AL','36'),('1987-10-12','AL','36'),('1987-10-12','AL','45'),('1987-10-12','AL','45'),('1987-10-12','AL','45'),('1987-10-12','AL','91'),('1987-10-12','AL','91'),('1987-10-12','AL','104'),('1987-10-12','AL','104'),('1987-10-12','AL','104'),('1987-10-12','AL','109'),('1987-10-12','AL','112'),('1987-10-12','AL','112'),('1987-10-12','AL','123'),('1987-10-12','AL','149'),('1987-10-12','AL','160'),('1987-10-12','AL','160'),('1987-10-12','AL','165'),('1987-10-12','AL','171'),('1987-10-12','AL','171'),('1987-10-12','AL','176'),('1987-10-12','AL','176'),('1987-10-12','AL','179'),('1987-10-12','AL','215'),('1987-10-12','AL','231'),('1987-10-12','AL','263'),('1987-10-12','AL','263'),('1987-10-12','AL','268'),('1987-10-12','AL','268'),('1987-10-12','AL','268'),('1987-10-12','AL','281'),('1987-10-12','AL','281'),('1987-10-12','AL','287'),('1987-10-12','AL','287'),('1987-10-12','AL','309'),('1987-10-12','AL','309'),('1987-10-12','AL','341'),('1987-10-12','AL','344'),('1987-10-12','AL','344'),('1987-10-12','AL','357'),('1987-10-12','AL','357'),('1987-10-12','AL','368'),('1987-10-12','AL','381'),('1987-10-12','AL','396'),('1987-10-12','AL','397'),('1987-10-12','AL','397'),('1987-10-12','AL','416'),('1987-10-12','AL','416'),('1987-10-12','AL','417'),('1987-10-12','AL','438'),('1987-10-12','AL','438'),('1987-10-12','AL','446'),('1987-10-12','AL','451'),('1987-10-12','AL','451'),('1987-10-12','AL','491'),('1987-10-12','AL','491'),('1987-10-12','AL','523'),('1987-10-12','AL','523'),('1987-10-12','AL','523'),('1987-10-12','AL','525'),('1987-10-12','AL','525'),('1987-10-12','AL','533'),('1987-10-12','AL','533'),('1987-10-12','AL','546'),('1987-10-12','AL','546'),('1987-10-12','AL','556'),('1987-10-12','AL','556'),('1987-10-12','AL','627'),('1987-10-12','AL','629'),('1987-10-12','AL','670'),('1987-10-12','AL','670'),('1987-10-12','AL','670'),('1987-10-12','AL','673'),('1987-10-13','AA','2'),('1987-10-13','AA','2'),('1987-10-13','AA','2'),('1987-10-13','AA','7'),('1987-10-13','AA','7'),('1987-10-13','AA','26'),('1987-10-13','AA','34'),('1987-10-13','AA','36'),('1987-10-13','AA','91'),('1987-10-13','AA','101'),('1987-10-13','AA','101'),('1987-10-13','AA','109'),('1987-10-13','AA','109'),('1987-10-13','AA','112'),('1987-10-13','AA','123'),('1987-10-13','AA','123'),('1987-10-13','AA','160'),('1987-10-13','AA','165'),('1987-10-13','AA','165'),('1987-10-13','AA','165'),('1987-10-13','AA','176'),('1987-10-13','AA','176'),('1987-10-13','AA','176'),('1987-10-13','AA','176'),('1987-10-13','AA','179'),('1987-10-13','AA','179'),('1987-10-13','AA','215'),('1987-10-13','AA','215'),('1987-10-13','AA','231'),('1987-10-13','AA','231'),('1987-10-13','AA','263'),('1987-10-13','AA','263'),('1987-10-13','AA','268'),('1987-10-13','AA','268'),('1987-10-13','AA','281'),('1987-10-13','AA','287'),('1987-10-13','AA','287'),('1987-10-13','AA','309'),('1987-10-13','AA','309'),('1987-10-13','AA','309'),('1987-10-13','AA','341'),('1987-10-13','AA','344'),('1987-10-13','AA','344'),('1987-10-13','AA','347'),('1987-10-13','AA','347'),('1987-10-13','AA','368'),('1987-10-13','AA','381'),('1987-10-13','AA','381'),('1987-10-13','AA','381'),('1987-10-13','AA','396'),('1987-10-13','AA','396'),('1987-10-13','AA','397'),('1987-10-13','AA','397'),('1987-10-13','AA','417'),('1987-10-13','AA','417'),('1987-10-13','AA','446'),('1987-10-13','AA','451'),('1987-10-13','AA','451'),('1987-10-13','AA','460'),('1987-10-13','AA','460'),('1987-10-13','AA','491'),('1987-10-13','AA','504'),('1987-10-13','AA','504'),('1987-10-13','AA','519'),('1987-10-13','AA','519'),('1987-10-13','AA','523'),('1987-10-13','AA','523'),('1987-10-13','AA','525'),('1987-10-13','AA','525'),('1987-10-13','AA','533'),('1987-10-13','AA','533'),('1987-10-13','AA','533'),('1987-10-13','AA','546'),('1987-10-13','AA','546'),('1987-10-13','AA','546'),('1987-10-13','AA','556'),('1987-10-13','AA','556'),('1987-10-13','AA','556'),('1987-10-13','AA','597'),('1987-10-13','AA','597'),('1987-10-13','AA','597'),('1987-10-13','AA','601'),('1987-10-13','AA','601'),('1987-10-13','AA','627'),('1987-10-13','AA','629'),('1987-10-13','AA','629'),('1987-10-13','AA','673'),('1987-10-13','AA','673'),('1987-10-13','AA','680'),('1987-10-13','AA','817'),('1987-10-13','AA','817'),('1987-10-13','AA','824'),('1987-10-13','AA','824'),('1987-10-13','AA','824'),('1987-10-13','AA','832'),('1987-10-13','AA','832'),('1987-10-13','AA','852'),('1987-10-13','AA','866'),('1987-10-13','AA','866'),('1987-10-13','AA','871'),('1987-10-13','AA','871'),('1987-10-13','AA','880'),('1987-10-13','AA','880'),('1987-10-13','AA','880'),('1987-10-13','AA','880'),('1987-10-13','AA','883'),('1987-10-13','AA','883'),('1987-10-13','AA','885'),('1987-10-13','AA','885'),('1987-10-13','AA','885'),('1987-10-13','AA','890'),('1987-10-13','AA','890'),('1987-10-13','AA','893'),('1987-10-13','AA','893'),('1987-10-13','AA','905'),('1987-10-13','AA','905'),('1987-10-13','AA','915'),('1987-10-13','AA','929'),('1987-10-13','AA','929'),('1987-10-13','AA','936'),('1987-10-13','AA','936'),('1987-10-13','AA','937'),('1987-10-13','AA','937'),('1987-10-13','AA','955'),('1987-10-13','AA','955'),('1987-10-13','AA','966'),('1987-10-13','AA','1002'),('1987-10-13','AA','1002'),('1987-10-13','AA','1004'),('1987-10-13','AA','1004'),('1987-10-13','AA','1015'),('1987-10-13','AA','1015'),('1987-10-13','AA','1021'),('1987-10-13','AA','1021'),('1987-10-13','AA','1041'),('1987-10-13','AA','1041'),('1987-10-13','AA','1046'),('1987-10-13','AA','1046'),('1987-10-13','AA','1048'),('1987-10-13','AA','1048'),('1987-10-13','AA','1061'),('1987-10-13','AA','1061'),('1987-10-13','AA','1088'),('1987-10-13','AA','1088'),('1987-10-13','AA','2033'),('1987-10-13','AA','2050'),('1987-10-13','AA','2071'),('1987-10-13','AA','2071'),('1987-10-13','AA','2086'),('1987-10-13','AA','2105'),('1987-10-13','AA','2111'),('1987-10-13','AA','2123'),('1987-10-13','AA','2123'),('1987-10-13','AA','2147'),('1987-10-13','AA','2147'),('1987-10-13','AA','2199'),('1987-10-13','AA','2199'),('1987-10-13','AA','2207'),('1987-10-13','AA','2207'),('1987-10-13','AA','2217'),('1987-10-13','AA','2230'),('1987-10-13','AA','2245'),('1987-10-13','AA','2251'),('1987-10-13','AA','2251'),('1987-10-13','AA','2275'),('1987-10-13','AA','2351'),('1987-10-13','AA','2357'),('1987-10-13','AA','2361'),('1987-10-13','AA','2490'),('1987-10-13','AA','2528'),('1987-10-13','AA','2528'),('1987-10-13','AA','2735'),('1987-10-13','AA','2735'),('1987-10-13','AA','2751'),('1987-10-13','AL','2'),('1987-10-13','AL','2'),('1987-10-13','AL','7'),('1987-10-13','AL','7'),('1987-10-13','AL','26'),('1987-10-13','AL','26'),('1987-10-13','AL','34'),('1987-10-13','AL','34'),('1987-10-13','AL','36'),('1987-10-13','AL','36'),('1987-10-13','AL','45'),('1987-10-13','AL','45'),('1987-10-13','AL','45'),('1987-10-13','AL','91'),('1987-10-13','AL','91'),('1987-10-13','AL','104'),('1987-10-13','AL','104'),('1987-10-13','AL','104'),('1987-10-13','AL','109'),('1987-10-13','AL','112'),('1987-10-13','AL','112'),('1987-10-13','AL','123'),('1987-10-13','AL','149'),('1987-10-13','AL','160'),('1987-10-13','AL','160'),('1987-10-13','AL','171'),('1987-10-13','AL','171'),('1987-10-13','AL','176'),('1987-10-13','AL','176'),('1987-10-13','AL','179'),('1987-10-13','AL','231'),('1987-10-13','AL','263'),('1987-10-13','AL','263'),('1987-10-13','AL','268'),('1987-10-13','AL','268'),('1987-10-13','AL','268'),('1987-10-13','AL','281'),('1987-10-13','AL','281'),('1987-10-13','AL','287'),('1987-10-13','AL','287'),('1987-10-13','AL','309'),('1987-10-13','AL','309'),('1987-10-13','AL','341'),('1987-10-13','AL','357'),('1987-10-13','AL','357'),('1987-10-13','AL','368'),('1987-10-13','AL','381'),('1987-10-13','AL','396'),('1987-10-13','AL','397'),('1987-10-13','AL','397'),('1987-10-13','AL','416'),('1987-10-13','AL','417'),('1987-10-13','AL','438'),('1987-10-13','AL','438'),('1987-10-13','AL','446'),('1987-10-13','AL','451'),('1987-10-13','AL','451'),('1987-10-13','AL','491'),('1987-10-13','AL','491'),('1987-10-13','AL','523'),('1987-10-13','AL','523'),('1987-10-13','AL','523'),('1987-10-13','AL','525'),('1987-10-13','AL','525'),('1987-10-13','AL','533'),('1987-10-13','AL','533'),('1987-10-13','AL','546'),('1987-10-13','AL','546'),('1987-10-13','AL','556'),('1987-10-13','AL','556'),('1987-10-13','AL','601'),('1987-10-13','AL','601'),('1987-10-13','AL','627'),('1987-10-13','AL','629'),('1987-10-13','AL','670'),('1987-10-13','AL','670'),('1987-10-13','AL','670'),('1987-10-13','AL','673'),('1987-10-13','AL','680'),('1987-10-14','AA','2'),('1987-10-14','AA','2'),('1987-10-14','AA','2'),('1987-10-14','AA','7'),('1987-10-14','AA','7'),('1987-10-14','AA','26'),('1987-10-14','AA','34'),('1987-10-14','AA','36'),('1987-10-14','AA','91'),('1987-10-14','AA','101'),('1987-10-14','AA','101'),('1987-10-14','AA','109'),('1987-10-14','AA','109'),('1987-10-14','AA','112'),('1987-10-14','AA','123'),('1987-10-14','AA','123'),('1987-10-14','AA','160'),('1987-10-14','AA','165'),('1987-10-14','AA','165'),('1987-10-14','AA','176'),('1987-10-14','AA','176'),('1987-10-14','AA','176'),('1987-10-14','AA','176'),('1987-10-14','AA','179'),('1987-10-14','AA','179'),('1987-10-14','AA','215'),('1987-10-14','AA','215'),('1987-10-14','AA','231'),('1987-10-14','AA','231'),('1987-10-14','AA','263'),('1987-10-14','AA','263'),('1987-10-14','AA','268'),('1987-10-14','AA','268'),('1987-10-14','AA','281'),('1987-10-14','AA','287'),('1987-10-14','AA','309'),('1987-10-14','AA','309'),('1987-10-14','AA','309'),('1987-10-14','AA','341'),('1987-10-14','AA','344'),('1987-10-14','AA','344'),('1987-10-14','AA','347'),('1987-10-14','AA','347'),('1987-10-14','AA','368'),('1987-10-14','AA','381'),('1987-10-14','AA','381'),('1987-10-14','AA','381'),('1987-10-14','AA','396'),('1987-10-14','AA','396'),('1987-10-14','AA','397'),('1987-10-14','AA','417'),('1987-10-14','AA','446'),('1987-10-14','AA','451'),('1987-10-14','AA','451'),('1987-10-14','AA','460'),('1987-10-14','AA','460'),('1987-10-14','AA','491'),('1987-10-14','AA','504'),('1987-10-14','AA','519'),('1987-10-14','AA','519'),('1987-10-14','AA','523'),('1987-10-14','AA','523'),('1987-10-14','AA','525'),('1987-10-14','AA','525'),('1987-10-14','AA','533'),('1987-10-14','AA','533'),('1987-10-14','AA','533'),('1987-10-14','AA','546'),('1987-10-14','AA','546'),('1987-10-14','AA','546'),('1987-10-14','AA','546'),('1987-10-14','AA','556'),('1987-10-14','AA','556'),('1987-10-14','AA','556'),('1987-10-14','AA','597'),('1987-10-14','AA','597'),('1987-10-14','AA','597'),('1987-10-14','AA','601'),('1987-10-14','AA','601'),('1987-10-14','AA','627'),('1987-10-14','AA','629'),('1987-10-14','AA','629'),('1987-10-14','AA','670'),('1987-10-14','AA','673'),('1987-10-14','AA','673'),('1987-10-14','AA','680'),('1987-10-14','AA','680'),('1987-10-14','AA','817'),('1987-10-14','AA','817'),('1987-10-14','AA','824'),('1987-10-14','AA','824'),('1987-10-14','AA','824'),('1987-10-14','AA','824'),('1987-10-14','AA','832'),('1987-10-14','AA','832'),('1987-10-14','AA','852'),('1987-10-14','AA','866'),('1987-10-14','AA','866'),('1987-10-14','AA','871'),('1987-10-14','AA','871'),('1987-10-14','AA','880'),('1987-10-14','AA','880'),('1987-10-14','AA','880'),('1987-10-14','AA','883'),('1987-10-14','AA','883'),('1987-10-14','AA','885'),('1987-10-14','AA','885'),('1987-10-14','AA','885'),('1987-10-14','AA','890'),('1987-10-14','AA','890'),('1987-10-14','AA','893'),('1987-10-14','AA','893'),('1987-10-14','AA','905'),('1987-10-14','AA','905'),('1987-10-14','AA','915'),('1987-10-14','AA','929'),('1987-10-14','AA','929'),('1987-10-14','AA','936'),('1987-10-14','AA','936'),('1987-10-14','AA','937'),('1987-10-14','AA','937'),('1987-10-14','AA','955'),('1987-10-14','AA','955'),('1987-10-14','AA','966'),('1987-10-14','AA','1002'),('1987-10-14','AA','1002'),('1987-10-14','AA','1004'),('1987-10-14','AA','1004'),('1987-10-14','AA','1015'),('1987-10-14','AA','1015'),('1987-10-14','AA','1021'),('1987-10-14','AA','1021'),('1987-10-14','AA','1041'),('1987-10-14','AA','1041'),('1987-10-14','AA','1046'),('1987-10-14','AA','1046'),('1987-10-14','AA','1048'),('1987-10-14','AA','1048'),('1987-10-14','AA','1061'),('1987-10-14','AA','1061'),('1987-10-14','AA','1088'),('1987-10-14','AA','1088'),('1987-10-14','AA','2033'),('1987-10-14','AA','2033'),('1987-10-14','AA','2050'),('1987-10-14','AA','2058'),('1987-10-14','AA','2071'),('1987-10-14','AA','2071'),('1987-10-14','AA','2086'),('1987-10-14','AA','2111'),('1987-10-14','AA','2123'),('1987-10-14','AA','2123'),('1987-10-14','AA','2147'),('1987-10-14','AA','2147'),('1987-10-14','AA','2199'),('1987-10-14','AA','2199'),('1987-10-14','AA','2207'),('1987-10-14','AA','2207'),('1987-10-14','AA','2217'),('1987-10-14','AA','2230'),('1987-10-14','AA','2251'),('1987-10-14','AA','2251'),('1987-10-14','AA','2278'),('1987-10-14','AA','2351'),('1987-10-14','AA','2357'),('1987-10-14','AA','2490'),('1987-10-14','AA','2528'),('1987-10-14','AA','2528'),('1987-10-14','AA','2735'),('1987-10-14','AA','2735'),('1987-10-14','AA','2751'),('1987-10-14','AL','2'),('1987-10-14','AL','2'),('1987-10-14','AL','7'),('1987-10-14','AL','7'),('1987-10-14','AL','26'),('1987-10-14','AL','26'),('1987-10-14','AL','34'),('1987-10-14','AL','36'),('1987-10-14','AL','36'),('1987-10-14','AL','45'),('1987-10-14','AL','45'),('1987-10-14','AL','91'),('1987-10-14','AL','91'),('1987-10-14','AL','104'),('1987-10-14','AL','104'),('1987-10-14','AL','104'),('1987-10-14','AL','109'),('1987-10-14','AL','112'),('1987-10-14','AL','112'),('1987-10-14','AL','123'),('1987-10-14','AL','149'),('1987-10-14','AL','160'),('1987-10-14','AL','160'),('1987-10-14','AL','165'),('1987-10-14','AL','171'),('1987-10-14','AL','171'),('1987-10-14','AL','176'),('1987-10-14','AL','176'),('1987-10-14','AL','179'),('1987-10-14','AL','215'),('1987-10-14','AL','231'),('1987-10-14','AL','263'),('1987-10-14','AL','263'),('1987-10-14','AL','268'),('1987-10-14','AL','268'),('1987-10-14','AL','268'),('1987-10-14','AL','281'),('1987-10-14','AL','281'),('1987-10-14','AL','287'),('1987-10-14','AL','287'),('1987-10-14','AL','309'),('1987-10-14','AL','309'),('1987-10-14','AL','341'),('1987-10-14','AL','344'),('1987-10-14','AL','344'),('1987-10-14','AL','357'),('1987-10-14','AL','357'),('1987-10-14','AL','368'),('1987-10-14','AL','381'),('1987-10-14','AL','396'),('1987-10-14','AL','397'),('1987-10-14','AL','397'),('1987-10-14','AL','416'),('1987-10-14','AL','416'),('1987-10-14','AL','417'),('1987-10-14','AL','438'),('1987-10-14','AL','438'),('1987-10-14','AL','446'),('1987-10-14','AL','451'),('1987-10-14','AL','451'),('1987-10-14','AL','491'),('1987-10-14','AL','491'),('1987-10-14','AL','523'),('1987-10-14','AL','523'),('1987-10-14','AL','523'),('1987-10-14','AL','525'),('1987-10-14','AL','525'),('1987-10-14','AL','533'),('1987-10-14','AL','533'),('1987-10-14','AL','546'),('1987-10-14','AL','546'),('1987-10-14','AL','556'),('1987-10-14','AL','556'),('1987-10-14','AL','601'),('1987-10-14','AL','601'),('1987-10-14','AL','627'),('1987-10-14','AL','629'),('1987-10-14','AL','670'),('1987-10-14','AL','670'),('1987-10-14','AL','670'),('1987-10-14','AL','673'),('1987-10-14','AL','680'),('1987-10-15','AA','2'),('1987-10-15','AA','2'),('1987-10-15','AA','2'),('1987-10-15','AA','7'),('1987-10-15','AA','7'),('1987-10-15','AA','26'),('1987-10-15','AA','34'),('1987-10-15','AA','36'),('1987-10-15','AA','91'),('1987-10-15','AA','101'),('1987-10-15','AA','101'),('1987-10-15','AA','109'),('1987-10-15','AA','109'),('1987-10-15','AA','112'),('1987-10-15','AA','123'),('1987-10-15','AA','123'),('1987-10-15','AA','160'),('1987-10-15','AA','165'),('1987-10-15','AA','165'),('1987-10-15','AA','165'),('1987-10-15','AA','176'),('1987-10-15','AA','176'),('1987-10-15','AA','176'),('1987-10-15','AA','176'),('1987-10-15','AA','179'),('1987-10-15','AA','179'),('1987-10-15','AA','215'),('1987-10-15','AA','215'),('1987-10-15','AA','231'),('1987-10-15','AA','263'),('1987-10-15','AA','263'),('1987-10-15','AA','268'),('1987-10-15','AA','268'),('1987-10-15','AA','281'),('1987-10-15','AA','287'),('1987-10-15','AA','287'),('1987-10-15','AA','309'),('1987-10-15','AA','309'),('1987-10-15','AA','309'),('1987-10-15','AA','341'),('1987-10-15','AA','344'),('1987-10-15','AA','344'),('1987-10-15','AA','347'),('1987-10-15','AA','347'),('1987-10-15','AA','368'),('1987-10-15','AA','381'),('1987-10-15','AA','381'),('1987-10-15','AA','396'),('1987-10-15','AA','396'),('1987-10-15','AA','397'),('1987-10-15','AA','397'),('1987-10-15','AA','417'),('1987-10-15','AA','417'),('1987-10-15','AA','446'),('1987-10-15','AA','451'),('1987-10-15','AA','451'),('1987-10-15','AA','460'),('1987-10-15','AA','460'),('1987-10-15','AA','491'),('1987-10-15','AA','504'),('1987-10-15','AA','504'),('1987-10-15','AA','519'),('1987-10-15','AA','519'),('1987-10-15','AA','523'),('1987-10-15','AA','523'),('1987-10-15','AA','525'),('1987-10-15','AA','525'),('1987-10-15','AA','525'),('1987-10-15','AA','533'),('1987-10-15','AA','533'),('1987-10-15','AA','533'),('1987-10-15','AA','546'),('1987-10-15','AA','546'),('1987-10-15','AA','546'),('1987-10-15','AA','546'),('1987-10-15','AA','556'),('1987-10-15','AA','556'),('1987-10-15','AA','556'),('1987-10-15','AA','597'),('1987-10-15','AA','597'),('1987-10-15','AA','597'),('1987-10-15','AA','601'),('1987-10-15','AA','601'),('1987-10-15','AA','629'),('1987-10-15','AA','629'),('1987-10-15','AA','670'),('1987-10-15','AA','673'),('1987-10-15','AA','673'),('1987-10-15','AA','680'),('1987-10-15','AA','680'),('1987-10-15','AA','817'),('1987-10-15','AA','817'),('1987-10-15','AA','824'),('1987-10-15','AA','824'),('1987-10-15','AA','824'),('1987-10-15','AA','824'),('1987-10-15','AA','832'),('1987-10-15','AA','832'),('1987-10-15','AA','852'),('1987-10-15','AA','866'),('1987-10-15','AA','866'),('1987-10-15','AA','871'),('1987-10-15','AA','871'),('1987-10-15','AA','880'),('1987-10-15','AA','880'),('1987-10-15','AA','880'),('1987-10-15','AA','883'),('1987-10-15','AA','883'),('1987-10-15','AA','885'),('1987-10-15','AA','885'),('1987-10-15','AA','885'),('1987-10-15','AA','890'),('1987-10-15','AA','890'),('1987-10-15','AA','893'),('1987-10-15','AA','893'),('1987-10-15','AA','905'),('1987-10-15','AA','905'),('1987-10-15','AA','915'),('1987-10-15','AA','929'),('1987-10-15','AA','929'),('1987-10-15','AA','936'),('1987-10-15','AA','936'),('1987-10-15','AA','937'),('1987-10-15','AA','955'),('1987-10-15','AA','955'),('1987-10-15','AA','966'),('1987-10-15','AA','1002'),('1987-10-15','AA','1002'),('1987-10-15','AA','1004'),('1987-10-15','AA','1004'),('1987-10-15','AA','1015'),('1987-10-15','AA','1015'),('1987-10-15','AA','1021'),('1987-10-15','AA','1021'),('1987-10-15','AA','1041'),('1987-10-15','AA','1041'),('1987-10-15','AA','1046'),('1987-10-15','AA','1046'),('1987-10-15','AA','1048'),('1987-10-15','AA','1048'),('1987-10-15','AA','1061'),('1987-10-15','AA','1061'),('1987-10-15','AA','1088'),('1987-10-15','AA','1088'),('1987-10-15','AA','2033'),('1987-10-15','AA','2033'),('1987-10-15','AA','2050'),('1987-10-15','AA','2058'),('1987-10-15','AA','2071'),('1987-10-15','AA','2071'),('1987-10-15','AA','2086'),('1987-10-15','AA','2105'),('1987-10-15','AA','2111'),('1987-10-15','AA','2123'),('1987-10-15','AA','2123'),('1987-10-15','AA','2147'),('1987-10-15','AA','2147'),('1987-10-15','AA','2199'),('1987-10-15','AA','2199'),('1987-10-15','AA','2207'),('1987-10-15','AA','2207'),('1987-10-15','AA','2217'),('1987-10-15','AA','2230'),('1987-10-15','AA','2245'),('1987-10-15','AA','2251'),('1987-10-15','AA','2251'),('1987-10-15','AA','2275'),('1987-10-15','AA','2278'),('1987-10-15','AA','2351'),('1987-10-15','AA','2357'),('1987-10-15','AA','2361'),('1987-10-15','AA','2490'),('1987-10-15','AA','2528'),('1987-10-15','AA','2528'),('1987-10-15','AA','2735'),('1987-10-15','AA','2735'),('1987-10-15','AA','2751'),('1987-10-15','AL','2'),('1987-10-15','AL','2'),('1987-10-15','AL','7'),('1987-10-15','AL','7'),('1987-10-15','AL','26'),('1987-10-15','AL','26'),('1987-10-15','AL','34'),('1987-10-15','AL','34'),('1987-10-15','AL','36'),('1987-10-15','AL','36'),('1987-10-15','AL','45'),('1987-10-15','AL','45'),('1987-10-15','AL','45'),('1987-10-15','AL','91'),('1987-10-15','AL','91'),('1987-10-15','AL','104'),('1987-10-15','AL','104'),('1987-10-15','AL','104'),('1987-10-15','AL','109'),('1987-10-15','AL','112'),('1987-10-15','AL','112'),('1987-10-15','AL','123'),('1987-10-15','AL','149'),('1987-10-15','AL','160'),('1987-10-15','AL','160'),('1987-10-15','AL','165'),('1987-10-15','AL','171'),('1987-10-15','AL','171'),('1987-10-15','AL','176'),('1987-10-15','AL','176'),('1987-10-15','AL','179'),('1987-10-15','AL','215'),('1987-10-15','AL','231'),('1987-10-15','AL','263'),('1987-10-15','AL','263'),('1987-10-15','AL','268'),('1987-10-15','AL','268'),('1987-10-15','AL','268'),('1987-10-15','AL','281'),('1987-10-15','AL','281'),('1987-10-15','AL','287'),('1987-10-15','AL','287'),('1987-10-15','AL','309'),('1987-10-15','AL','309'),('1987-10-15','AL','341'),('1987-10-15','AL','344'),('1987-10-15','AL','344'),('1987-10-15','AL','357'),('1987-10-15','AL','357'),('1987-10-15','AL','368'),('1987-10-15','AL','381'),('1987-10-15','AL','396'),('1987-10-15','AL','397'),('1987-10-15','AL','397'),('1987-10-15','AL','416'),('1987-10-15','AL','416'),('1987-10-15','AL','417'),('1987-10-15','AL','438'),('1987-10-15','AL','438'),('1987-10-15','AL','446'),('1987-10-15','AL','451'),('1987-10-15','AL','451'),('1987-10-15','AL','491'),('1987-10-15','AL','491'),('1987-10-15','AL','523'),('1987-10-15','AL','523'),('1987-10-15','AL','523'),('1987-10-15','AL','525'),('1987-10-15','AL','525'),('1987-10-15','AL','533'),('1987-10-15','AL','533'),('1987-10-15','AL','546'),('1987-10-15','AL','546'),('1987-10-15','AL','556'),('1987-10-15','AL','556'),('1987-10-15','AL','601'),('1987-10-15','AL','601'),('1987-10-15','AL','627'),('1987-10-15','AL','629'),('1987-10-15','AL','670'),('1987-10-15','AL','670'),('1987-10-15','AL','670'),('1987-10-15','AL','673'),('1987-10-15','AL','680'),('1987-10-16','AA','2'),('1987-10-16','AA','2'),('1987-10-16','AA','2'),('1987-10-16','AA','7'),('1987-10-16','AA','7'),('1987-10-16','AA','26'),('1987-10-16','AA','34'),('1987-10-16','AA','36'),('1987-10-16','AA','91'),('1987-10-16','AA','101'),('1987-10-16','AA','101'),('1987-10-16','AA','109'),('1987-10-16','AA','109'),('1987-10-16','AA','112'),('1987-10-16','AA','123'),('1987-10-16','AA','160'),('1987-10-16','AA','165'),('1987-10-16','AA','165'),('1987-10-16','AA','176'),('1987-10-16','AA','176'),('1987-10-16','AA','176'),('1987-10-16','AA','176'),('1987-10-16','AA','179'),('1987-10-16','AA','179'),('1987-10-16','AA','215'),('1987-10-16','AA','215'),('1987-10-16','AA','231'),('1987-10-16','AA','263'),('1987-10-16','AA','263'),('1987-10-16','AA','268'),('1987-10-16','AA','281'),('1987-10-16','AA','287'),('1987-10-16','AA','287'),('1987-10-16','AA','309'),('1987-10-16','AA','309'),('1987-10-16','AA','309'),('1987-10-16','AA','341'),('1987-10-16','AA','344'),('1987-10-16','AA','344'),('1987-10-16','AA','347'),('1987-10-16','AA','347'),('1987-10-16','AA','368'),('1987-10-16','AA','381'),('1987-10-16','AA','381'),('1987-10-16','AA','381'),('1987-10-16','AA','396'),('1987-10-16','AA','396'),('1987-10-16','AA','397'),('1987-10-16','AA','417'),('1987-10-16','AA','417'),('1987-10-16','AA','446'),('1987-10-16','AA','451'),('1987-10-16','AA','451'),('1987-10-16','AA','460'),('1987-10-16','AA','460'),('1987-10-16','AA','491'),('1987-10-16','AA','504'),('1987-10-16','AA','504'),('1987-10-16','AA','519'),('1987-10-16','AA','519'),('1987-10-16','AA','523'),('1987-10-16','AA','523'),('1987-10-16','AA','525'),('1987-10-16','AA','525'),('1987-10-16','AA','533'),('1987-10-16','AA','533'),('1987-10-16','AA','546'),('1987-10-16','AA','546'),('1987-10-16','AA','546'),('1987-10-16','AA','546'),('1987-10-16','AA','556'),('1987-10-16','AA','556'),('1987-10-16','AA','556'),('1987-10-16','AA','597'),('1987-10-16','AA','597'),('1987-10-16','AA','597'),('1987-10-16','AA','601'),('1987-10-16','AA','601'),('1987-10-16','AA','627'),('1987-10-16','AA','629'),('1987-10-16','AA','629'),('1987-10-16','AA','670'),('1987-10-16','AA','673'),('1987-10-16','AA','673'),('1987-10-16','AA','680'),('1987-10-16','AA','680'),('1987-10-16','AA','817'),('1987-10-16','AA','817'),('1987-10-16','AA','824'),('1987-10-16','AA','824'),('1987-10-16','AA','824'),('1987-10-16','AA','824'),('1987-10-16','AA','832'),('1987-10-16','AA','832'),('1987-10-16','AA','852'),('1987-10-16','AA','866'),('1987-10-16','AA','866'),('1987-10-16','AA','871'),('1987-10-16','AA','871'),('1987-10-16','AA','880'),('1987-10-16','AA','880'),('1987-10-16','AA','880'),('1987-10-16','AA','880'),('1987-10-16','AA','883'),('1987-10-16','AA','883'),('1987-10-16','AA','885'),('1987-10-16','AA','885'),('1987-10-16','AA','885'),('1987-10-16','AA','890'),('1987-10-16','AA','890'),('1987-10-16','AA','893'),('1987-10-16','AA','893'),('1987-10-16','AA','905'),('1987-10-16','AA','905'),('1987-10-16','AA','915'),('1987-10-16','AA','929'),('1987-10-16','AA','929'),('1987-10-16','AA','936'),('1987-10-16','AA','936'),('1987-10-16','AA','937'),('1987-10-16','AA','937'),('1987-10-16','AA','955'),('1987-10-16','AA','955'),('1987-10-16','AA','966'),('1987-10-16','AA','1002'),('1987-10-16','AA','1002'),('1987-10-16','AA','1004'),('1987-10-16','AA','1004'),('1987-10-16','AA','1015'),('1987-10-16','AA','1015'),('1987-10-16','AA','1021'),('1987-10-16','AA','1021'),('1987-10-16','AA','1041'),('1987-10-16','AA','1041'),('1987-10-16','AA','1046'),('1987-10-16','AA','1046'),('1987-10-16','AA','1048'),('1987-10-16','AA','1048'),('1987-10-16','AA','1061'),('1987-10-16','AA','1061'),('1987-10-16','AA','1088'),('1987-10-16','AA','1088'),('1987-10-16','AA','2033'),('1987-10-16','AA','2033'),('1987-10-16','AA','2050'),('1987-10-16','AA','2058'),('1987-10-16','AA','2071'),('1987-10-16','AA','2071'),('1987-10-16','AA','2086'),('1987-10-16','AA','2105'),('1987-10-16','AA','2111'),('1987-10-16','AA','2123'),('1987-10-16','AA','2123'),('1987-10-16','AA','2147'),('1987-10-16','AA','2147'),('1987-10-16','AA','2199'),('1987-10-16','AA','2199'),('1987-10-16','AA','2207'),('1987-10-16','AA','2207'),('1987-10-16','AA','2217'),('1987-10-16','AA','2230'),('1987-10-16','AA','2245'),('1987-10-16','AA','2251'),('1987-10-16','AA','2251'),('1987-10-16','AA','2275'),('1987-10-16','AA','2278'),('1987-10-16','AA','2351'),('1987-10-16','AA','2357'),('1987-10-16','AA','2361'),('1987-10-16','AA','2490'),('1987-10-16','AA','2528'),('1987-10-16','AA','2528'),('1987-10-16','AA','2735'),('1987-10-16','AA','2735'),('1987-10-16','AA','2751'),('1987-10-16','AL','2'),('1987-10-16','AL','2'),('1987-10-16','AL','7'),('1987-10-16','AL','7'),('1987-10-16','AL','26'),('1987-10-16','AL','26'),('1987-10-16','AL','34'),('1987-10-16','AL','36'),('1987-10-16','AL','36'),('1987-10-16','AL','45'),('1987-10-16','AL','45'),('1987-10-16','AL','45'),('1987-10-16','AL','91'),('1987-10-16','AL','91'),('1987-10-16','AL','104'),('1987-10-16','AL','104'),('1987-10-16','AL','109'),('1987-10-16','AL','112'),('1987-10-16','AL','112'),('1987-10-16','AL','123'),('1987-10-16','AL','149'),('1987-10-16','AL','160'),('1987-10-16','AL','160'),('1987-10-16','AL','165'),('1987-10-16','AL','171'),('1987-10-16','AL','171'),('1987-10-16','AL','176'),('1987-10-16','AL','176'),('1987-10-16','AL','179'),('1987-10-16','AL','215'),('1987-10-16','AL','231'),('1987-10-16','AL','263'),('1987-10-16','AL','263'),('1987-10-16','AL','268'),('1987-10-16','AL','268'),('1987-10-16','AL','268'),('1987-10-16','AL','281'),('1987-10-16','AL','281'),('1987-10-16','AL','287'),('1987-10-16','AL','287'),('1987-10-16','AL','309'),('1987-10-16','AL','309'),('1987-10-16','AL','341'),('1987-10-16','AL','344'),('1987-10-16','AL','344'),('1987-10-16','AL','357'),('1987-10-16','AL','357'),('1987-10-16','AL','368'),('1987-10-16','AL','381'),('1987-10-16','AL','396'),('1987-10-16','AL','397'),('1987-10-16','AL','397'),('1987-10-16','AL','416'),('1987-10-16','AL','416'),('1987-10-16','AL','417'),('1987-10-16','AL','438'),('1987-10-16','AL','438'),('1987-10-16','AL','446'),('1987-10-16','AL','451'),('1987-10-16','AL','451'),('1987-10-16','AL','491'),('1987-10-16','AL','491'),('1987-10-16','AL','523'),('1987-10-16','AL','523'),('1987-10-16','AL','523'),('1987-10-16','AL','525'),('1987-10-16','AL','525'),('1987-10-16','AL','533'),('1987-10-16','AL','533'),('1987-10-16','AL','546'),('1987-10-16','AL','546'),('1987-10-16','AL','556'),('1987-10-16','AL','556'),('1987-10-16','AL','601'),('1987-10-16','AL','601'),('1987-10-16','AL','627'),('1987-10-16','AL','629'),('1987-10-16','AL','670'),('1987-10-16','AL','670'),('1987-10-16','AL','670'),('1987-10-16','AL','673'),('1987-10-16','AL','680'),('1987-10-17','AA','2'),('1987-10-17','AA','2'),('1987-10-17','AA','2'),('1987-10-17','AA','7'),('1987-10-17','AA','7'),('1987-10-17','AA','26'),('1987-10-17','AA','36'),('1987-10-17','AA','91'),('1987-10-17','AA','101'),('1987-10-17','AA','101'),('1987-10-17','AA','109'),('1987-10-17','AA','109'),('1987-10-17','AA','112'),('1987-10-17','AA','123'),('1987-10-17','AA','123'),('1987-10-17','AA','160'),('1987-10-17','AA','165'),('1987-10-17','AA','165'),('1987-10-17','AA','165'),('1987-10-17','AA','176'),('1987-10-17','AA','176'),('1987-10-17','AA','176'),('1987-10-17','AA','176'),('1987-10-17','AA','179'),('1987-10-17','AA','179'),('1987-10-17','AA','215'),('1987-10-17','AA','215'),('1987-10-17','AA','231'),('1987-10-17','AA','231'),('1987-10-17','AA','263'),('1987-10-17','AA','263'),('1987-10-17','AA','268'),('1987-10-17','AA','268'),('1987-10-17','AA','281'),('1987-10-17','AA','287'),('1987-10-17','AA','287'),('1987-10-17','AA','309'),('1987-10-17','AA','309'),('1987-10-17','AA','309'),('1987-10-17','AA','341'),('1987-10-17','AA','344'),('1987-10-17','AA','344'),('1987-10-17','AA','347'),('1987-10-17','AA','347'),('1987-10-17','AA','368'),('1987-10-17','AA','381'),('1987-10-17','AA','381'),('1987-10-17','AA','381'),('1987-10-17','AA','396'),('1987-10-17','AA','396'),('1987-10-17','AA','397'),('1987-10-17','AA','417'),('1987-10-17','AA','446'),('1987-10-17','AA','451'),('1987-10-17','AA','451'),('1987-10-17','AA','460'),('1987-10-17','AA','460'),('1987-10-17','AA','491'),('1987-10-17','AA','491'),('1987-10-17','AA','504'),('1987-10-17','AA','504'),('1987-10-17','AA','519'),('1987-10-17','AA','519'),('1987-10-17','AA','523'),('1987-10-17','AA','523'),('1987-10-17','AA','525'),('1987-10-17','AA','525'),('1987-10-17','AA','525'),('1987-10-17','AA','533'),('1987-10-17','AA','533'),('1987-10-17','AA','546'),('1987-10-17','AA','546'),('1987-10-17','AA','546'),('1987-10-17','AA','546'),('1987-10-17','AA','556'),('1987-10-17','AA','556'),('1987-10-17','AA','556'),('1987-10-17','AA','597'),('1987-10-17','AA','597'),('1987-10-17','AA','597'),('1987-10-17','AA','601'),('1987-10-17','AA','627'),('1987-10-17','AA','629'),('1987-10-17','AA','629'),('1987-10-17','AA','670'),('1987-10-17','AA','673'),('1987-10-17','AA','673'),('1987-10-17','AA','680'),('1987-10-17','AA','680'),('1987-10-17','AA','817'),('1987-10-17','AA','817'),('1987-10-17','AA','824'),('1987-10-17','AA','824'),('1987-10-17','AA','824'),('1987-10-17','AA','824'),('1987-10-17','AA','832'),('1987-10-17','AA','832'),('1987-10-17','AA','852'),('1987-10-17','AA','852'),('1987-10-17','AA','866'),('1987-10-17','AA','866'),('1987-10-17','AA','871'),('1987-10-17','AA','871'),('1987-10-17','AA','880'),('1987-10-17','AA','880'),('1987-10-17','AA','880'),('1987-10-17','AA','880'),('1987-10-17','AA','883'),('1987-10-17','AA','883'),('1987-10-17','AA','885'),('1987-10-17','AA','885'),('1987-10-17','AA','885'),('1987-10-17','AA','890'),('1987-10-17','AA','890'),('1987-10-17','AA','893'),('1987-10-17','AA','893'),('1987-10-17','AA','905'),('1987-10-17','AA','905'),('1987-10-17','AA','915'),('1987-10-17','AA','929'),('1987-10-17','AA','936'),('1987-10-17','AA','936'),('1987-10-17','AA','937'),('1987-10-17','AA','937'),('1987-10-17','AA','955'),('1987-10-17','AA','955'),('1987-10-17','AA','966'),('1987-10-17','AA','1002'),('1987-10-17','AA','1002'),('1987-10-17','AA','1004'),('1987-10-17','AA','1004'),('1987-10-17','AA','1015'),('1987-10-17','AA','1015'),('1987-10-17','AA','1021'),('1987-10-17','AA','1021'),('1987-10-17','AA','1041'),('1987-10-17','AA','1041'),('1987-10-17','AA','1046'),('1987-10-17','AA','1046'),('1987-10-17','AA','1048'),('1987-10-17','AA','1048'),('1987-10-17','AA','1061'),('1987-10-17','AA','1061'),('1987-10-17','AA','1088'),('1987-10-17','AA','1088'),('1987-10-17','AA','2033'),('1987-10-17','AA','2033'),('1987-10-17','AA','2050'),('1987-10-17','AA','2058'),('1987-10-17','AA','2071'),('1987-10-17','AA','2086'),('1987-10-17','AA','2105'),('1987-10-17','AA','2111'),('1987-10-17','AA','2123'),('1987-10-17','AA','2123'),('1987-10-17','AA','2147'),('1987-10-17','AA','2147'),('1987-10-17','AA','2199'),('1987-10-17','AA','2199'),('1987-10-17','AA','2207'),('1987-10-17','AA','2207'),('1987-10-17','AA','2217'),('1987-10-17','AA','2230'),('1987-10-17','AA','2251'),('1987-10-17','AA','2251'),('1987-10-17','AA','2275'),('1987-10-17','AA','2278'),('1987-10-17','AA','2351'),('1987-10-17','AA','2357'),('1987-10-17','AA','2361'),('1987-10-17','AA','2490'),('1987-10-17','AA','2528'),('1987-10-17','AA','2528'),('1987-10-17','AA','2735'),('1987-10-17','AA','2735'),('1987-10-17','AL','2'),('1987-10-17','AL','2'),('1987-10-17','AL','7'),('1987-10-17','AL','7'),('1987-10-17','AL','26'),('1987-10-17','AL','26'),('1987-10-17','AL','34'),('1987-10-17','AL','36'),('1987-10-17','AL','36'),('1987-10-17','AL','45'),('1987-10-17','AL','45'),('1987-10-17','AL','45'),('1987-10-17','AL','91'),('1987-10-17','AL','91'),('1987-10-17','AL','104'),('1987-10-17','AL','104'),('1987-10-17','AL','109'),('1987-10-17','AL','112'),('1987-10-17','AL','112'),('1987-10-17','AL','149'),('1987-10-17','AL','160'),('1987-10-17','AL','160'),('1987-10-17','AL','165'),('1987-10-17','AL','171'),('1987-10-17','AL','171'),('1987-10-17','AL','176'),('1987-10-17','AL','176'),('1987-10-17','AL','179'),('1987-10-17','AL','215'),('1987-10-17','AL','231'),('1987-10-17','AL','263'),('1987-10-17','AL','268'),('1987-10-17','AL','268'),('1987-10-17','AL','268'),('1987-10-17','AL','287'),('1987-10-17','AL','287'),('1987-10-17','AL','309'),('1987-10-17','AL','309'),('1987-10-17','AL','344'),('1987-10-17','AL','344'),('1987-10-17','AL','357'),('1987-10-17','AL','357'),('1987-10-17','AL','381'),('1987-10-17','AL','396'),('1987-10-17','AL','397'),('1987-10-17','AL','397'),('1987-10-17','AL','397'),('1987-10-17','AL','416'),('1987-10-17','AL','417'),('1987-10-17','AL','438'),('1987-10-17','AL','438'),('1987-10-17','AL','451'),('1987-10-17','AL','451'),('1987-10-17','AL','491'),('1987-10-17','AL','491'),('1987-10-17','AL','523'),('1987-10-17','AL','523'),('1987-10-17','AL','525'),('1987-10-17','AL','525'),('1987-10-17','AL','533'),('1987-10-17','AL','546'),('1987-10-17','AL','546'),('1987-10-17','AL','556'),('1987-10-17','AL','556'),('1987-10-17','AL','601'),('1987-10-17','AL','601'),('1987-10-17','AL','627'),('1987-10-17','AL','670'),('1987-10-17','AL','673'),('1987-10-17','AL','680'),('1987-10-17','AL','936'),('1987-10-17','AL','966'),('1987-10-18','AA','2'),('1987-10-18','AA','2'),('1987-10-18','AA','2'),('1987-10-18','AA','7'),('1987-10-18','AA','7'),('1987-10-18','AA','26'),('1987-10-18','AA','34'),('1987-10-18','AA','36'),('1987-10-18','AA','91'),('1987-10-18','AA','101'),('1987-10-18','AA','101'),('1987-10-18','AA','109'),('1987-10-18','AA','109'),('1987-10-18','AA','112'),('1987-10-18','AA','123'),('1987-10-18','AA','123'),('1987-10-18','AA','160'),('1987-10-18','AA','165'),('1987-10-18','AA','165'),('1987-10-18','AA','165'),('1987-10-18','AA','176'),('1987-10-18','AA','176'),('1987-10-18','AA','176'),('1987-10-18','AA','176'),('1987-10-18','AA','179'),('1987-10-18','AA','179'),('1987-10-18','AA','215'),('1987-10-18','AA','231'),('1987-10-18','AA','231'),('1987-10-18','AA','263'),('1987-10-18','AA','268'),('1987-10-18','AA','268'),('1987-10-18','AA','281'),('1987-10-18','AA','287'),('1987-10-18','AA','287'),('1987-10-18','AA','309'),('1987-10-18','AA','309'),('1987-10-18','AA','309'),('1987-10-18','AA','341'),('1987-10-18','AA','344'),('1987-10-18','AA','344'),('1987-10-18','AA','347'),('1987-10-18','AA','347'),('1987-10-18','AA','368'),('1987-10-18','AA','381'),('1987-10-18','AA','381'),('1987-10-18','AA','381'),('1987-10-18','AA','396'),('1987-10-18','AA','396'),('1987-10-18','AA','397'),('1987-10-18','AA','397'),('1987-10-18','AA','417'),('1987-10-18','AA','417'),('1987-10-18','AA','451'),('1987-10-18','AA','451'),('1987-10-18','AA','460'),('1987-10-18','AA','491'),('1987-10-18','AA','491'),('1987-10-18','AA','504'),('1987-10-18','AA','504'),('1987-10-18','AA','519'),('1987-10-18','AA','519'),('1987-10-18','AA','523'),('1987-10-18','AA','523'),('1987-10-18','AA','525'),('1987-10-18','AA','525'),('1987-10-18','AA','525'),('1987-10-18','AA','533'),('1987-10-18','AA','533'),('1987-10-18','AA','546'),('1987-10-18','AA','546'),('1987-10-18','AA','546'),('1987-10-18','AA','546'),('1987-10-18','AA','556'),('1987-10-18','AA','556'),('1987-10-18','AA','556'),('1987-10-18','AA','597'),('1987-10-18','AA','597'),('1987-10-18','AA','597'),('1987-10-18','AA','601'),('1987-10-18','AA','601'),('1987-10-18','AA','629'),('1987-10-18','AA','629'),('1987-10-18','AA','670'),('1987-10-18','AA','673'),('1987-10-18','AA','680'),('1987-10-18','AA','680'),('1987-10-18','AA','817'),('1987-10-18','AA','817'),('1987-10-18','AA','824'),('1987-10-18','AA','824'),('1987-10-18','AA','824'),('1987-10-18','AA','824'),('1987-10-18','AA','832'),('1987-10-18','AA','832'),('1987-10-18','AA','852'),('1987-10-18','AA','866'),('1987-10-18','AA','866'),('1987-10-18','AA','871'),('1987-10-18','AA','871'),('1987-10-18','AA','880'),('1987-10-18','AA','880'),('1987-10-18','AA','880'),('1987-10-18','AA','880'),('1987-10-18','AA','883'),('1987-10-18','AA','883'),('1987-10-18','AA','885'),('1987-10-18','AA','885'),('1987-10-18','AA','885'),('1987-10-18','AA','890'),('1987-10-18','AA','890'),('1987-10-18','AA','893'),('1987-10-18','AA','893'),('1987-10-18','AA','905'),('1987-10-18','AA','905'),('1987-10-18','AA','915'),('1987-10-18','AA','929'),('1987-10-18','AA','929'),('1987-10-18','AA','936'),('1987-10-18','AA','936'),('1987-10-18','AA','937'),('1987-10-18','AA','937'),('1987-10-18','AA','955'),('1987-10-18','AA','966'),('1987-10-18','AA','1002'),('1987-10-18','AA','1002'),('1987-10-18','AA','1004'),('1987-10-18','AA','1004'),('1987-10-18','AA','1015'),('1987-10-18','AA','1015'),('1987-10-18','AA','1021'),('1987-10-18','AA','1021'),('1987-10-18','AA','1041'),('1987-10-18','AA','1041'),('1987-10-18','AA','1046'),('1987-10-18','AA','1046'),('1987-10-18','AA','1048'),('1987-10-18','AA','1048'),('1987-10-18','AA','1061'),('1987-10-18','AA','1061'),('1987-10-18','AA','1088'),('1987-10-18','AA','1088'),('1987-10-18','AA','2033'),('1987-10-18','AA','2033'),('1987-10-18','AA','2050'),('1987-10-18','AA','2058'),('1987-10-18','AA','2071'),('1987-10-18','AA','2071'),('1987-10-18','AA','2086'),('1987-10-18','AA','2111'),('1987-10-18','AA','2123'),('1987-10-18','AA','2147'),('1987-10-18','AA','2147'),('1987-10-18','AA','2199'),('1987-10-18','AA','2199'),('1987-10-18','AA','2207'),('1987-10-18','AA','2207'),('1987-10-18','AA','2230'),('1987-10-18','AA','2245'),('1987-10-18','AA','2251'),('1987-10-18','AA','2251'),('1987-10-18','AA','2275'),('1987-10-18','AA','2278'),('1987-10-18','AA','2351'),('1987-10-18','AA','2357'),('1987-10-18','AA','2361'),('1987-10-18','AA','2490'),('1987-10-18','AA','2528'),('1987-10-18','AA','2528'),('1987-10-18','AA','2735'),('1987-10-18','AA','2735'),('1987-10-18','AA','2751'),('1987-10-18','AL','2'),('1987-10-18','AL','2'),('1987-10-18','AL','7'),('1987-10-18','AL','7'),('1987-10-18','AL','26'),('1987-10-18','AL','26'),('1987-10-18','AL','34'),('1987-10-18','AL','34'),('1987-10-18','AL','36'),('1987-10-18','AL','36'),('1987-10-18','AL','45'),('1987-10-18','AL','45'),('1987-10-18','AL','45'),('1987-10-18','AL','91'),('1987-10-18','AL','91'),('1987-10-18','AL','104'),('1987-10-18','AL','104'),('1987-10-18','AL','104'),('1987-10-18','AL','112'),('1987-10-18','AL','123'),('1987-10-18','AL','160'),('1987-10-18','AL','160'),('1987-10-18','AL','165'),('1987-10-18','AL','171'),('1987-10-18','AL','171'),('1987-10-18','AL','176'),('1987-10-18','AL','176'),('1987-10-18','AL','179'),('1987-10-18','AL','215'),('1987-10-18','AL','231'),('1987-10-18','AL','263'),('1987-10-18','AL','263'),('1987-10-18','AL','281'),('1987-10-18','AL','281'),('1987-10-18','AL','309'),('1987-10-18','AL','309'),('1987-10-18','AL','341'),('1987-10-18','AL','344'),('1987-10-18','AL','344'),('1987-10-18','AL','357'),('1987-10-18','AL','357'),('1987-10-18','AL','368'),('1987-10-18','AL','396'),('1987-10-18','AL','416'),('1987-10-18','AL','416'),('1987-10-18','AL','417'),('1987-10-18','AL','438'),('1987-10-18','AL','438'),('1987-10-18','AL','446'),('1987-10-18','AL','451'),('1987-10-18','AL','491'),('1987-10-18','AL','523'),('1987-10-18','AL','523'),('1987-10-18','AL','523'),('1987-10-18','AL','525'),('1987-10-18','AL','525'),('1987-10-18','AL','533'),('1987-10-18','AL','533'),('1987-10-18','AL','546'),('1987-10-18','AL','546'),('1987-10-18','AL','556'),('1987-10-18','AL','601'),('1987-10-18','AL','601'),('1987-10-18','AL','627'),('1987-10-18','AL','629'),('1987-10-18','AL','670'),('1987-10-18','AL','670'),('1987-10-18','AL','670'),('1987-10-18','AL','673'),('1987-10-18','AL','680'),('1987-10-18','AL','937'),('1987-10-18','AL','937'),('1987-10-18','AL','955'),('1987-10-19','AA','2'),('1987-10-19','AA','2'),('1987-10-19','AA','2'),('1987-10-19','AA','7'),('1987-10-19','AA','7'),('1987-10-19','AA','26'),('1987-10-19','AA','34'),('1987-10-19','AA','36'),('1987-10-19','AA','91'),('1987-10-19','AA','101'),('1987-10-19','AA','101'),('1987-10-19','AA','109'),('1987-10-19','AA','109'),('1987-10-19','AA','112'),('1987-10-19','AA','123'),('1987-10-19','AA','123'),('1987-10-19','AA','160'),('1987-10-19','AA','165'),('1987-10-19','AA','165'),('1987-10-19','AA','165'),('1987-10-19','AA','176'),('1987-10-19','AA','176'),('1987-10-19','AA','176'),('1987-10-19','AA','176'),('1987-10-19','AA','179'),('1987-10-19','AA','179'),('1987-10-19','AA','215'),('1987-10-19','AA','215'),('1987-10-19','AA','231'),('1987-10-19','AA','231'),('1987-10-19','AA','263'),('1987-10-19','AA','263'),('1987-10-19','AA','268'),('1987-10-19','AA','268'),('1987-10-19','AA','281'),('1987-10-19','AA','287'),('1987-10-19','AA','287'),('1987-10-19','AA','309'),('1987-10-19','AA','309'),('1987-10-19','AA','341'),('1987-10-19','AA','344'),('1987-10-19','AA','344'),('1987-10-19','AA','347'),('1987-10-19','AA','347'),('1987-10-19','AA','368'),('1987-10-19','AA','381'),('1987-10-19','AA','381'),('1987-10-19','AA','381'),('1987-10-19','AA','396'),('1987-10-19','AA','396'),('1987-10-19','AA','397'),('1987-10-19','AA','397'),('1987-10-19','AA','417'),('1987-10-19','AA','417'),('1987-10-19','AA','446'),('1987-10-19','AA','451'),('1987-10-19','AA','451'),('1987-10-19','AA','491'),('1987-10-19','AA','504'),('1987-10-19','AA','504'),('1987-10-19','AA','519'),('1987-10-19','AA','519'),('1987-10-19','AA','523'),('1987-10-19','AA','523'),('1987-10-19','AA','525'),('1987-10-19','AA','525'),('1987-10-19','AA','525'),('1987-10-19','AA','533'),('1987-10-19','AA','533'),('1987-10-19','AA','546'),('1987-10-19','AA','546'),('1987-10-19','AA','546'),('1987-10-19','AA','546'),('1987-10-19','AA','556'),('1987-10-19','AA','556'),('1987-10-19','AA','556'),('1987-10-19','AA','597'),('1987-10-19','AA','597'),('1987-10-19','AA','597'),('1987-10-19','AA','601'),('1987-10-19','AA','601'),('1987-10-19','AA','627'),('1987-10-19','AA','629'),('1987-10-19','AA','629'),('1987-10-19','AA','670'),('1987-10-19','AA','673'),('1987-10-19','AA','673'),('1987-10-19','AA','680'),('1987-10-19','AA','680'),('1987-10-19','AA','817'),('1987-10-19','AA','817'),('1987-10-19','AA','824'),('1987-10-19','AA','824'),('1987-10-19','AA','824'),('1987-10-19','AA','832'),('1987-10-19','AA','832'),('1987-10-19','AA','852'),('1987-10-19','AA','852'),('1987-10-19','AA','866'),('1987-10-19','AA','866'),('1987-10-19','AA','871'),('1987-10-19','AA','871'),('1987-10-19','AA','880'),('1987-10-19','AA','880'),('1987-10-19','AA','880'),('1987-10-19','AA','880'),('1987-10-19','AA','883'),('1987-10-19','AA','883'),('1987-10-19','AA','885'),('1987-10-19','AA','885'),('1987-10-19','AA','890'),('1987-10-19','AA','890'),('1987-10-19','AA','893'),('1987-10-19','AA','893'),('1987-10-19','AA','905'),('1987-10-19','AA','905'),('1987-10-19','AA','915'),('1987-10-19','AA','929'),('1987-10-19','AA','929'),('1987-10-19','AA','936'),('1987-10-19','AA','936'),('1987-10-19','AA','937'),('1987-10-19','AA','955'),('1987-10-19','AA','955'),('1987-10-19','AA','966'),('1987-10-19','AA','1002'),('1987-10-19','AA','1002'),('1987-10-19','AA','1004'),('1987-10-19','AA','1004'),('1987-10-19','AA','1015'),('1987-10-19','AA','1015'),('1987-10-19','AA','1021'),('1987-10-19','AA','1021'),('1987-10-19','AA','1041'),('1987-10-19','AA','1041'),('1987-10-19','AA','1046'),('1987-10-19','AA','1046'),('1987-10-19','AA','1048'),('1987-10-19','AA','1048'),('1987-10-19','AA','1061'),('1987-10-19','AA','1061'),('1987-10-19','AA','1088'),('1987-10-19','AA','1088'),('1987-10-19','AA','2033'),('1987-10-19','AA','2033'),('1987-10-19','AA','2050'),('1987-10-19','AA','2058'),('1987-10-19','AA','2071'),('1987-10-19','AA','2071'),('1987-10-19','AA','2086'),('1987-10-19','AA','2105'),('1987-10-19','AA','2111'),('1987-10-19','AA','2123'),('1987-10-19','AA','2123'),('1987-10-19','AA','2147'),('1987-10-19','AA','2147'),('1987-10-19','AA','2199'),('1987-10-19','AA','2199'),('1987-10-19','AA','2207'),('1987-10-19','AA','2207'),('1987-10-19','AA','2217'),('1987-10-19','AA','2230'),('1987-10-19','AA','2245'),('1987-10-19','AA','2251'),('1987-10-19','AA','2251'),('1987-10-19','AA','2275'),('1987-10-19','AA','2278'),('1987-10-19','AA','2357'),('1987-10-19','AA','2361'),('1987-10-19','AA','2490'),('1987-10-19','AA','2528'),('1987-10-19','AA','2735'),('1987-10-19','AA','2735'),('1987-10-19','AA','2751'),('1987-10-19','AL','2'),('1987-10-19','AL','2'),('1987-10-19','AL','7'),('1987-10-19','AL','7'),('1987-10-19','AL','26'),('1987-10-19','AL','26'),('1987-10-19','AL','34'),('1987-10-19','AL','34'),('1987-10-19','AL','36'),('1987-10-19','AL','36'),('1987-10-19','AL','45'),('1987-10-19','AL','45'),('1987-10-19','AL','45'),('1987-10-19','AL','91'),('1987-10-19','AL','91'),('1987-10-19','AL','104'),('1987-10-19','AL','104'),('1987-10-19','AL','104'),('1987-10-19','AL','109'),('1987-10-19','AL','112'),('1987-10-19','AL','112'),('1987-10-19','AL','123'),('1987-10-19','AL','149'),('1987-10-19','AL','160'),('1987-10-19','AL','160'),('1987-10-19','AL','165'),('1987-10-19','AL','171'),('1987-10-19','AL','171'),('1987-10-19','AL','176'),('1987-10-19','AL','176'),('1987-10-19','AL','179'),('1987-10-19','AL','215'),('1987-10-19','AL','231'),('1987-10-19','AL','263'),('1987-10-19','AL','263'),('1987-10-19','AL','268'),('1987-10-19','AL','268'),('1987-10-19','AL','268'),('1987-10-19','AL','281'),('1987-10-19','AL','281'),('1987-10-19','AL','287'),('1987-10-19','AL','287'),('1987-10-19','AL','309'),('1987-10-19','AL','309'),('1987-10-19','AL','341'),('1987-10-19','AL','344'),('1987-10-19','AL','344'),('1987-10-19','AL','357'),('1987-10-19','AL','357'),('1987-10-19','AL','368'),('1987-10-19','AL','381'),('1987-10-19','AL','396'),('1987-10-19','AL','416'),('1987-10-19','AL','416'),('1987-10-19','AL','417'),('1987-10-19','AL','438'),('1987-10-19','AL','438'),('1987-10-19','AL','446'),('1987-10-19','AL','451'),('1987-10-19','AL','451'),('1987-10-19','AL','491'),('1987-10-19','AL','491'),('1987-10-19','AL','523'),('1987-10-19','AL','523'),('1987-10-19','AL','523'),('1987-10-19','AL','525'),('1987-10-19','AL','525'),('1987-10-19','AL','533'),('1987-10-19','AL','533'),('1987-10-19','AL','546'),('1987-10-19','AL','546'),('1987-10-19','AL','556'),('1987-10-19','AL','556'),('1987-10-19','AL','601'),('1987-10-19','AL','601'),('1987-10-19','AL','627'),('1987-10-19','AL','629'),('1987-10-19','AL','670'),('1987-10-19','AL','670'),('1987-10-19','AL','670'),('1987-10-20','AA','2'),('1987-10-20','AA','2'),('1987-10-20','AA','2'),('1987-10-20','AA','7'),('1987-10-20','AA','7'),('1987-10-20','AA','34'),('1987-10-20','AA','36'),('1987-10-20','AA','91'),('1987-10-20','AA','101'),('1987-10-20','AA','101'),('1987-10-20','AA','109'),('1987-10-20','AA','109'),('1987-10-20','AA','112'),('1987-10-20','AA','123'),('1987-10-20','AA','123'),('1987-10-20','AA','160'),('1987-10-20','AA','165'),('1987-10-20','AA','165'),('1987-10-20','AA','165'),('1987-10-20','AA','176'),('1987-10-20','AA','176'),('1987-10-20','AA','176'),('1987-10-20','AA','176'),('1987-10-20','AA','179'),('1987-10-20','AA','179'),('1987-10-20','AA','215'),('1987-10-20','AA','215'),('1987-10-20','AA','231'),('1987-10-20','AA','231'),('1987-10-20','AA','263'),('1987-10-20','AA','263'),('1987-10-20','AA','268'),('1987-10-20','AA','268'),('1987-10-20','AA','281'),('1987-10-20','AA','287'),('1987-10-20','AA','287'),('1987-10-20','AA','309'),('1987-10-20','AA','309'),('1987-10-20','AA','309'),('1987-10-20','AA','341'),('1987-10-20','AA','344'),('1987-10-20','AA','344'),('1987-10-20','AA','347'),('1987-10-20','AA','347'),('1987-10-20','AA','368'),('1987-10-20','AA','381'),('1987-10-20','AA','381'),('1987-10-20','AA','381'),('1987-10-20','AA','396'),('1987-10-20','AA','396'),('1987-10-20','AA','397'),('1987-10-20','AA','397'),('1987-10-20','AA','417'),('1987-10-20','AA','417'),('1987-10-20','AA','446'),('1987-10-20','AA','460'),('1987-10-20','AA','460'),('1987-10-20','AA','491'),('1987-10-20','AA','504'),('1987-10-20','AA','504'),('1987-10-20','AA','519'),('1987-10-20','AA','519'),('1987-10-20','AA','523'),('1987-10-20','AA','525'),('1987-10-20','AA','525'),('1987-10-20','AA','525'),('1987-10-20','AA','533'),('1987-10-20','AA','533'),('1987-10-20','AA','533'),('1987-10-20','AA','546'),('1987-10-20','AA','546'),('1987-10-20','AA','546'),('1987-10-20','AA','546'),('1987-10-20','AA','556'),('1987-10-20','AA','556'),('1987-10-20','AA','597'),('1987-10-20','AA','597'),('1987-10-20','AA','601'),('1987-10-20','AA','601'),('1987-10-20','AA','629'),('1987-10-20','AA','670'),('1987-10-20','AA','673'),('1987-10-20','AA','673'),('1987-10-20','AA','680'),('1987-10-20','AA','680'),('1987-10-20','AA','817'),('1987-10-20','AA','824'),('1987-10-20','AA','824'),('1987-10-20','AA','824'),('1987-10-20','AA','832'),('1987-10-20','AA','832'),('1987-10-20','AA','852'),('1987-10-20','AA','852'),('1987-10-20','AA','866'),('1987-10-20','AA','866'),('1987-10-20','AA','871'),('1987-10-20','AA','871'),('1987-10-20','AA','880'),('1987-10-20','AA','880'),('1987-10-20','AA','880'),('1987-10-20','AA','880'),('1987-10-20','AA','883'),('1987-10-20','AA','883'),('1987-10-20','AA','885'),('1987-10-20','AA','885'),('1987-10-20','AA','885'),('1987-10-20','AA','890'),('1987-10-20','AA','890'),('1987-10-20','AA','893'),('1987-10-20','AA','893'),('1987-10-20','AA','905'),('1987-10-20','AA','905'),('1987-10-20','AA','915'),('1987-10-20','AA','929'),('1987-10-20','AA','929'),('1987-10-20','AA','936'),('1987-10-20','AA','937'),('1987-10-20','AA','937'),('1987-10-20','AA','955'),('1987-10-20','AA','955'),('1987-10-20','AA','966'),('1987-10-20','AA','1002'),('1987-10-20','AA','1002'),('1987-10-20','AA','1004'),('1987-10-20','AA','1004'),('1987-10-20','AA','1015'),('1987-10-20','AA','1015'),('1987-10-20','AA','1021'),('1987-10-20','AA','1021'),('1987-10-20','AA','1041'),('1987-10-20','AA','1041'),('1987-10-20','AA','1046'),('1987-10-20','AA','1046'),('1987-10-20','AA','1048'),('1987-10-20','AA','1048'),('1987-10-20','AA','1061'),('1987-10-20','AA','1061'),('1987-10-20','AA','1088'),('1987-10-20','AA','1088'),('1987-10-20','AA','2033'),('1987-10-20','AA','2033'),('1987-10-20','AA','2050'),('1987-10-20','AA','2058'),('1987-10-20','AA','2071'),('1987-10-20','AA','2071'),('1987-10-20','AA','2086'),('1987-10-20','AA','2105'),('1987-10-20','AA','2111'),('1987-10-20','AA','2123'),('1987-10-20','AA','2123'),('1987-10-20','AA','2147'),('1987-10-20','AA','2199'),('1987-10-20','AA','2207'),('1987-10-20','AA','2217'),('1987-10-20','AA','2230'),('1987-10-20','AA','2245'),('1987-10-20','AA','2251'),('1987-10-20','AA','2251'),('1987-10-20','AA','2275'),('1987-10-20','AA','2278'),('1987-10-20','AA','2351'),('1987-10-20','AA','2357'),('1987-10-20','AA','2361'),('1987-10-20','AA','2490'),('1987-10-20','AA','2528'),('1987-10-20','AA','2528'),('1987-10-20','AA','2735'),('1987-10-20','AA','2735'),('1987-10-20','AA','2751'),('1987-10-20','AL','2'),('1987-10-20','AL','2'),('1987-10-20','AL','7'),('1987-10-20','AL','7'),('1987-10-20','AL','26'),('1987-10-20','AL','26'),('1987-10-20','AL','34'),('1987-10-20','AL','34'),('1987-10-20','AL','36'),('1987-10-20','AL','36'),('1987-10-20','AL','45'),('1987-10-20','AL','45'),('1987-10-20','AL','45'),('1987-10-20','AL','104'),('1987-10-20','AL','104'),('1987-10-20','AL','104'),('1987-10-20','AL','109'),('1987-10-20','AL','112'),('1987-10-20','AL','112'),('1987-10-20','AL','123'),('1987-10-20','AL','149'),('1987-10-20','AL','160'),('1987-10-20','AL','160'),('1987-10-20','AL','165'),('1987-10-20','AL','171'),('1987-10-20','AL','171'),('1987-10-20','AL','176'),('1987-10-20','AL','176'),('1987-10-20','AL','179'),('1987-10-20','AL','215'),('1987-10-20','AL','231'),('1987-10-20','AL','263'),('1987-10-20','AL','263'),('1987-10-20','AL','268'),('1987-10-20','AL','268'),('1987-10-20','AL','268'),('1987-10-20','AL','281'),('1987-10-20','AL','281'),('1987-10-20','AL','287'),('1987-10-20','AL','287'),('1987-10-20','AL','309'),('1987-10-20','AL','309'),('1987-10-20','AL','341'),('1987-10-20','AL','344'),('1987-10-20','AL','344'),('1987-10-20','AL','357'),('1987-10-20','AL','357'),('1987-10-20','AL','368'),('1987-10-20','AL','381'),('1987-10-20','AL','396'),('1987-10-20','AL','397'),('1987-10-20','AL','397'),('1987-10-20','AL','416'),('1987-10-20','AL','416'),('1987-10-20','AL','417'),('1987-10-20','AL','438'),('1987-10-20','AL','438'),('1987-10-20','AL','446'),('1987-10-20','AL','451'),('1987-10-20','AL','451'),('1987-10-20','AL','491'),('1987-10-20','AL','491'),('1987-10-20','AL','523'),('1987-10-20','AL','523'),('1987-10-20','AL','523'),('1987-10-20','AL','525'),('1987-10-20','AL','525'),('1987-10-20','AL','533'),('1987-10-20','AL','533'),('1987-10-20','AL','546'),('1987-10-20','AL','546'),('1987-10-20','AL','556'),('1987-10-20','AL','556'),('1987-10-20','AL','601'),('1987-10-20','AL','601'),('1987-10-20','AL','627'),('1987-10-20','AL','629'),('1987-10-20','AL','670'),('1987-10-20','AL','670'),('1987-10-20','AL','670'),('1987-10-20','AL','673'),('1987-10-20','AL','680'),('1987-10-05','AA','2'),('1987-10-05','AA','2'),('1987-10-05','AA','2'),('1987-10-05','AA','7'),('1987-10-05','AA','7'),('1987-10-05','AA','26'),('1987-10-05','AA','34'),('1987-10-05','AA','36'),('1987-10-05','AA','91'),('1987-10-05','AA','101'),('1987-10-05','AA','101'),('1987-10-05','AA','109'),('1987-10-05','AA','109'),('1987-10-05','AA','112'),('1987-10-05','AA','123'),('1987-10-05','AA','123'),('1987-10-05','AA','165'),('1987-10-05','AA','165'),('1987-10-05','AA','165'),('1987-10-05','AA','176'),('1987-10-05','AA','176'),('1987-10-05','AA','176'),('1987-10-05','AA','176'),('1987-10-05','AA','179'),('1987-10-05','AA','179'),('1987-10-05','AA','215'),('1987-10-05','AA','215'),('1987-10-05','AA','231'),('1987-10-05','AA','263'),('1987-10-05','AA','263'),('1987-10-05','AA','281'),('1987-10-05','AA','287'),('1987-10-05','AA','287'),('1987-10-05','AA','309'),('1987-10-05','AA','309'),('1987-10-05','AA','309'),('1987-10-05','AA','341'),('1987-10-05','AA','347'),('1987-10-05','AA','347'),('1987-10-05','AA','368'),('1987-10-05','AA','381'),('1987-10-05','AA','381'),('1987-10-05','AA','381'),('1987-10-05','AA','396'),('1987-10-05','AA','396'),('1987-10-05','AA','397'),('1987-10-05','AA','397'),('1987-10-05','AA','417'),('1987-10-05','AA','417'),('1987-10-05','AA','446'),('1987-10-05','AA','451'),('1987-10-05','AA','451'),('1987-10-05','AA','460'),('1987-10-05','AA','460'),('1987-10-05','AA','491'),('1987-10-05','AA','504'),('1987-10-05','AA','504'),('1987-10-05','AA','519'),('1987-10-05','AA','519'),('1987-10-05','AA','523'),('1987-10-05','AA','523'),('1987-10-05','AA','525'),('1987-10-05','AA','525'),('1987-10-05','AA','525'),('1987-10-05','AA','533'),('1987-10-05','AA','533'),('1987-10-05','AA','533'),('1987-10-05','AA','546'),('1987-10-05','AA','546'),('1987-10-05','AA','546'),('1987-10-05','AA','546'),('1987-10-05','AA','556'),('1987-10-05','AA','556'),('1987-10-05','AA','556'),('1987-10-05','AA','597'),('1987-10-05','AA','597'),('1987-10-05','AA','597'),('1987-10-05','AA','601'),('1987-10-05','AA','601'),('1987-10-05','AA','627'),('1987-10-05','AA','629'),('1987-10-05','AA','629'),('1987-10-05','AA','670'),('1987-10-05','AA','673'),('1987-10-05','AA','673'),('1987-10-05','AA','680'),('1987-10-05','AA','680'),('1987-10-05','AA','817'),('1987-10-05','AA','817'),('1987-10-05','AA','824'),('1987-10-05','AA','824'),('1987-10-05','AA','824'),('1987-10-05','AA','824'),('1987-10-05','AA','832'),('1987-10-05','AA','832'),('1987-10-05','AA','852'),('1987-10-05','AA','852'),('1987-10-05','AA','866'),('1987-10-05','AA','866'),('1987-10-05','AA','871'),('1987-10-05','AA','871'),('1987-10-05','AA','880'),('1987-10-05','AA','880'),('1987-10-05','AA','880'),('1987-10-05','AA','880'),('1987-10-05','AA','883'),('1987-10-05','AA','883'),('1987-10-05','AA','885'),('1987-10-05','AA','885'),('1987-10-05','AA','885'),('1987-10-05','AA','890'),('1987-10-05','AA','890'),('1987-10-05','AA','893'),('1987-10-05','AA','893'),('1987-10-05','AA','905'),('1987-10-05','AA','905'),('1987-10-05','AA','915'),('1987-10-05','AA','929'),('1987-10-05','AA','929'),('1987-10-05','AA','936'),('1987-10-05','AA','936'),('1987-10-05','AA','937'),('1987-10-05','AA','937'),('1987-10-05','AA','955'),('1987-10-05','AA','955'),('1987-10-05','AA','966'),('1987-10-05','AA','1002'),('1987-10-05','AA','1002'),('1987-10-05','AA','1004'),('1987-10-05','AA','1004'),('1987-10-05','AA','1015'),('1987-10-05','AA','1015'),('1987-10-05','AA','1021'),('1987-10-05','AA','1021'),('1987-10-05','AA','1041'),('1987-10-05','AA','1041'),('1987-10-05','AA','1046'),('1987-10-05','AA','1046'),('1987-10-05','AA','1048'),('1987-10-05','AA','1048'),('1987-10-05','AA','1061'),('1987-10-05','AA','1061'),('1987-10-05','AA','1088'),('1987-10-05','AA','1088'),('1987-10-05','AA','2033'),('1987-10-05','AA','2033'),('1987-10-05','AA','2050'),('1987-10-05','AA','2058'),('1987-10-05','AA','2071'),('1987-10-05','AA','2071'),('1987-10-05','AA','2086'),('1987-10-05','AA','2105'),('1987-10-05','AA','2111'),('1987-10-05','AA','2123'),('1987-10-05','AA','2123'),('1987-10-05','AA','2147'),('1987-10-05','AA','2147'),('1987-10-05','AA','2199'),('1987-10-05','AA','2199'),('1987-10-05','AA','2207'),('1987-10-05','AA','2207'),('1987-10-05','AA','2217'),('1987-10-05','AA','2230'),('1987-10-05','AA','2245'),('1987-10-05','AA','2251'),('1987-10-05','AA','2251'),('1987-10-05','AA','2275'),('1987-10-05','AA','2278'),('1987-10-05','AA','2351'),('1987-10-05','AA','2357'),('1987-10-05','AA','2361'),('1987-10-05','AA','2528'),('1987-10-05','AA','2528'),('1987-10-05','AA','2735'),('1987-10-05','AA','2735'),('1987-10-05','AA','2751'),('1987-10-05','AL','2'),('1987-10-05','AL','2'),('1987-10-05','AL','7'),('1987-10-05','AL','7'),('1987-10-05','AL','26'),('1987-10-05','AL','26'),('1987-10-05','AL','36'),('1987-10-05','AL','36'),('1987-10-05','AL','45'),('1987-10-05','AL','45'),('1987-10-05','AL','45'),('1987-10-05','AL','91'),('1987-10-05','AL','91'),('1987-10-05','AL','104'),('1987-10-05','AL','104'),('1987-10-05','AL','104'),('1987-10-05','AL','109'),('1987-10-05','AL','112'),('1987-10-05','AL','112'),('1987-10-05','AL','123'),('1987-10-05','AL','149'),('1987-10-05','AL','160'),('1987-10-05','AL','160'),('1987-10-05','AL','165'),('1987-10-05','AL','171'),('1987-10-05','AL','171'),('1987-10-05','AL','176'),('1987-10-05','AL','176'),('1987-10-05','AL','179'),('1987-10-05','AL','215'),('1987-10-05','AL','231'),('1987-10-05','AL','263'),('1987-10-05','AL','263'),('1987-10-05','AL','268'),('1987-10-05','AL','268'),('1987-10-05','AL','268'),('1987-10-05','AL','281'),('1987-10-05','AL','281'),('1987-10-05','AL','287'),('1987-10-05','AL','287'),('1987-10-05','AL','309'),('1987-10-05','AL','309'),('1987-10-05','AL','341'),('1987-10-05','AL','344'),('1987-10-05','AL','344'),('1987-10-05','AL','357'),('1987-10-05','AL','357'),('1987-10-05','AL','368'),('1987-10-05','AL','381'),('1987-10-05','AL','396'),('1987-10-05','AL','397'),('1987-10-05','AL','397'),('1987-10-05','AL','416'),('1987-10-05','AL','416'),('1987-10-05','AL','417'),('1987-10-05','AL','438'),('1987-10-05','AL','438'),('1987-10-05','AL','446'),('1987-10-05','AL','451'),('1987-10-05','AL','451'),('1987-10-05','AL','491'),('1987-10-05','AL','491'),('1987-10-05','AL','523'),('1987-10-05','AL','523'),('1987-10-05','AL','523'),('1987-10-05','AL','525'),('1987-10-05','AL','525'),('1987-10-05','AL','533'),('1987-10-05','AL','533'),('1987-10-05','AL','546'),('1987-10-05','AL','546'),('1987-10-05','AL','601'),('1987-10-05','AL','601'),('1987-10-05','AL','627'),('1987-10-05','AL','629'),('1987-10-05','AL','670'),('1987-10-05','AL','670'),('1987-10-05','AL','670'),('1987-10-05','AL','673'),('1987-10-05','AL','680'),('1987-10-06','AA','2'),('1987-10-06','AA','2'),('1987-10-06','AA','7'),('1987-10-06','AA','7'),('1987-10-06','AA','34'),('1987-10-06','AA','36'),('1987-10-06','AA','91'),('1987-10-06','AA','101'),('1987-10-06','AA','109'),('1987-10-06','AA','109'),('1987-10-06','AA','112'),('1987-10-06','AA','123'),('1987-10-06','AA','123'),('1987-10-06','AA','165'),('1987-10-06','AA','165'),('1987-10-06','AA','165'),('1987-10-06','AA','176'),('1987-10-06','AA','176'),('1987-10-06','AA','176'),('1987-10-06','AA','176'),('1987-10-06','AA','179'),('1987-10-06','AA','179'),('1987-10-06','AA','215'),('1987-10-06','AA','215'),('1987-10-06','AA','231'),('1987-10-06','AA','231'),('1987-10-06','AA','263'),('1987-10-06','AA','263'),('1987-10-06','AA','268'),('1987-10-06','AA','268'),('1987-10-06','AA','287'),('1987-10-06','AA','287'),('1987-10-06','AA','309'),('1987-10-06','AA','309'),('1987-10-06','AA','309'),('1987-10-06','AA','341'),('1987-10-06','AA','344'),('1987-10-06','AA','344'),('1987-10-06','AA','347'),('1987-10-06','AA','347'),('1987-10-06','AA','368'),('1987-10-06','AA','381'),('1987-10-06','AA','381'),('1987-10-06','AA','381'),('1987-10-06','AA','396'),('1987-10-06','AA','396'),('1987-10-06','AA','397'),('1987-10-06','AA','397'),('1987-10-06','AA','417'),('1987-10-06','AA','417'),('1987-10-06','AA','446'),('1987-10-06','AA','451'),('1987-10-06','AA','451'),('1987-10-06','AA','460'),('1987-10-06','AA','460'),('1987-10-06','AA','491'),('1987-10-06','AA','504'),('1987-10-06','AA','504'),('1987-10-06','AA','519'),('1987-10-06','AA','519'),('1987-10-06','AA','523'),('1987-10-06','AA','523'),('1987-10-06','AA','525'),('1987-10-06','AA','525'),('1987-10-06','AA','525'),('1987-10-06','AA','533'),('1987-10-06','AA','533'),('1987-10-06','AA','533'),('1987-10-06','AA','546'),('1987-10-06','AA','546'),('1987-10-06','AA','546'),('1987-10-06','AA','546'),('1987-10-06','AA','556'),('1987-10-06','AA','556'),('1987-10-06','AA','556'),('1987-10-06','AA','597'),('1987-10-06','AA','597'),('1987-10-06','AA','597'),('1987-10-06','AA','601'),('1987-10-06','AA','601'),('1987-10-06','AA','627'),('1987-10-06','AA','629'),('1987-10-06','AA','629'),('1987-10-06','AA','670'),('1987-10-06','AA','673'),('1987-10-06','AA','673'),('1987-10-06','AA','680'),('1987-10-06','AA','680'),('1987-10-06','AA','817'),('1987-10-06','AA','817'),('1987-10-06','AA','824'),('1987-10-06','AA','824'),('1987-10-06','AA','824'),('1987-10-06','AA','824'),('1987-10-06','AA','832'),('1987-10-06','AA','832'),('1987-10-06','AA','852'),('1987-10-06','AA','852'),('1987-10-06','AA','866'),('1987-10-06','AA','871'),('1987-10-06','AA','871'),('1987-10-06','AA','880'),('1987-10-06','AA','880'),('1987-10-06','AA','880'),('1987-10-06','AA','880'),('1987-10-06','AA','883'),('1987-10-06','AA','883'),('1987-10-06','AA','885'),('1987-10-06','AA','885'),('1987-10-06','AA','885'),('1987-10-06','AA','890'),('1987-10-06','AA','890'),('1987-10-06','AA','893'),('1987-10-06','AA','893'),('1987-10-06','AA','905'),('1987-10-06','AA','905'),('1987-10-06','AA','915'),('1987-10-06','AA','929'),('1987-10-06','AA','929'),('1987-10-06','AA','936'),('1987-10-06','AA','936'),('1987-10-06','AA','937'),('1987-10-06','AA','937'),('1987-10-06','AA','955'),('1987-10-06','AA','955'),('1987-10-06','AA','966'),('1987-10-06','AA','1002'),('1987-10-06','AA','1002'),('1987-10-06','AA','1004'),('1987-10-06','AA','1004'),('1987-10-06','AA','1015'),('1987-10-06','AA','1015'),('1987-10-06','AA','1021'),('1987-10-06','AA','1021'),('1987-10-06','AA','1041'),('1987-10-06','AA','1041'),('1987-10-06','AA','1046'),('1987-10-06','AA','1046'),('1987-10-06','AA','1048'),('1987-10-06','AA','1061'),('1987-10-06','AA','1088'),('1987-10-06','AA','1088'),('1987-10-06','AA','2033'),('1987-10-06','AA','2033'),('1987-10-06','AA','2050'),('1987-10-06','AA','2058'),('1987-10-06','AA','2071'),('1987-10-06','AA','2071'),('1987-10-06','AA','2086'),('1987-10-06','AA','2105'),('1987-10-06','AA','2111'),('1987-10-06','AA','2123'),('1987-10-06','AA','2123'),('1987-10-06','AA','2147'),('1987-10-06','AA','2147'),('1987-10-06','AA','2199'),('1987-10-06','AA','2199'),('1987-10-06','AA','2207'),('1987-10-06','AA','2207'),('1987-10-06','AA','2217'),('1987-10-06','AA','2230'),('1987-10-06','AA','2245'),('1987-10-06','AA','2251'),('1987-10-06','AA','2251'),('1987-10-06','AA','2275'),('1987-10-06','AA','2278'),('1987-10-06','AA','2351'),('1987-10-06','AA','2357'),('1987-10-06','AA','2490'),('1987-10-06','AA','2528'),('1987-10-06','AA','2528'),('1987-10-06','AA','2735'),('1987-10-06','AA','2735'),('1987-10-06','AA','2751'),('1987-10-06','AL','2'),('1987-10-06','AL','2'),('1987-10-06','AL','7'),('1987-10-06','AL','7'),('1987-10-06','AL','26'),('1987-10-06','AL','26'),('1987-10-06','AL','34'),('1987-10-06','AL','34'),('1987-10-06','AL','36'),('1987-10-06','AL','36'),('1987-10-06','AL','45'),('1987-10-06','AL','45'),('1987-10-06','AL','45'),('1987-10-06','AL','91'),('1987-10-06','AL','91'),('1987-10-06','AL','104'),('1987-10-06','AL','104'),('1987-10-06','AL','104'),('1987-10-06','AL','109'),('1987-10-06','AL','112'),('1987-10-06','AL','112'),('1987-10-06','AL','123'),('1987-10-06','AL','149'),('1987-10-06','AL','160'),('1987-10-06','AL','160'),('1987-10-06','AL','165'),('1987-10-06','AL','171'),('1987-10-06','AL','171'),('1987-10-06','AL','176'),('1987-10-06','AL','176'),('1987-10-06','AL','179'),('1987-10-06','AL','231'),('1987-10-06','AL','263'),('1987-10-06','AL','263'),('1987-10-06','AL','268'),('1987-10-06','AL','268'),('1987-10-06','AL','268'),('1987-10-06','AL','281'),('1987-10-06','AL','281'),('1987-10-06','AL','287'),('1987-10-06','AL','287'),('1987-10-06','AL','309'),('1987-10-06','AL','309'),('1987-10-06','AL','341'),('1987-10-06','AL','344'),('1987-10-06','AL','344'),('1987-10-06','AL','357'),('1987-10-06','AL','357'),('1987-10-06','AL','381'),('1987-10-06','AL','397'),('1987-10-06','AL','397'),('1987-10-06','AL','416'),('1987-10-06','AL','416'),('1987-10-06','AL','417'),('1987-10-06','AL','438'),('1987-10-06','AL','438'),('1987-10-06','AL','446'),('1987-10-06','AL','451'),('1987-10-06','AL','451'),('1987-10-06','AL','491'),('1987-10-06','AL','491'),('1987-10-06','AL','523'),('1987-10-06','AL','523'),('1987-10-06','AL','523'),('1987-10-06','AL','525'),('1987-10-06','AL','525'),('1987-10-06','AL','533'),('1987-10-06','AL','533'),('1987-10-06','AL','546'),('1987-10-06','AL','546'),('1987-10-06','AL','601'),('1987-10-06','AL','601'),('1987-10-06','AL','627'),('1987-10-06','AL','629'),('1987-10-06','AL','670'),('1987-10-06','AL','670'),('1987-10-06','AL','670'),('1987-10-06','AL','673'),('1987-10-06','AL','680'),('1987-10-07','AA','2'),('1987-10-07','AA','2'),('1987-10-07','AA','2'),('1987-10-07','AA','7'),('1987-10-07','AA','7'),('1987-10-07','AA','26'),('1987-10-07','AA','34'),('1987-10-07','AA','36'),('1987-10-07','AA','91'),('1987-10-07','AA','101'),('1987-10-07','AA','101'),('1987-10-07','AA','109'),('1987-10-07','AA','112'),('1987-10-07','AA','123'),('1987-10-07','AA','123'),('1987-10-07','AA','160'),('1987-10-07','AA','165'),('1987-10-07','AA','165'),('1987-10-07','AA','165'),('1987-10-07','AA','176'),('1987-10-07','AA','176'),('1987-10-07','AA','176'),('1987-10-07','AA','176'),('1987-10-07','AA','179'),('1987-10-07','AA','179'),('1987-10-07','AA','215'),('1987-10-07','AA','215'),('1987-10-07','AA','231'),('1987-10-07','AA','231'),('1987-10-07','AA','263'),('1987-10-07','AA','263'),('1987-10-07','AA','268'),('1987-10-07','AA','268'),('1987-10-07','AA','281'),('1987-10-07','AA','287'),('1987-10-07','AA','287'),('1987-10-07','AA','309'),('1987-10-07','AA','309'),('1987-10-07','AA','309'),('1987-10-07','AA','341'),('1987-10-07','AA','344'),('1987-10-07','AA','344'),('1987-10-07','AA','347'),('1987-10-07','AA','347'),('1987-10-07','AA','368'),('1987-10-07','AA','381'),('1987-10-07','AA','381'),('1987-10-07','AA','381'),('1987-10-07','AA','396'),('1987-10-07','AA','396'),('1987-10-07','AA','397'),('1987-10-07','AA','397'),('1987-10-07','AA','417'),('1987-10-07','AA','417'),('1987-10-07','AA','446'),('1987-10-07','AA','451'),('1987-10-07','AA','451'),('1987-10-07','AA','460'),('1987-10-07','AA','460'),('1987-10-07','AA','504'),('1987-10-07','AA','504'),('1987-10-07','AA','519'),('1987-10-07','AA','519'),('1987-10-07','AA','523'),('1987-10-07','AA','523'),('1987-10-07','AA','525'),('1987-10-07','AA','525'),('1987-10-07','AA','525'),('1987-10-07','AA','533'),('1987-10-07','AA','533'),('1987-10-07','AA','533'),('1987-10-07','AA','546'),('1987-10-07','AA','546'),('1987-10-07','AA','546'),('1987-10-07','AA','546'),('1987-10-07','AA','556'),('1987-10-07','AA','556'),('1987-10-07','AA','556'),('1987-10-07','AA','597'),('1987-10-07','AA','597'),('1987-10-07','AA','597'),('1987-10-07','AA','601'),('1987-10-07','AA','601'),('1987-10-07','AA','627'),('1987-10-07','AA','629'),('1987-10-07','AA','629'),('1987-10-07','AA','670'),('1987-10-07','AA','673'),('1987-10-07','AA','673'),('1987-10-07','AA','680'),('1987-10-07','AA','680'),('1987-10-07','AA','817'),('1987-10-07','AA','817'),('1987-10-07','AA','824'),('1987-10-07','AA','824'),('1987-10-07','AA','824'),('1987-10-07','AA','824'),('1987-10-07','AA','832'),('1987-10-07','AA','832'),('1987-10-07','AA','852'),('1987-10-07','AA','852'),('1987-10-07','AA','866'),('1987-10-07','AA','866'),('1987-10-07','AA','871'),('1987-10-07','AA','871'),('1987-10-07','AA','880'),('1987-10-07','AA','880'),('1987-10-07','AA','880'),('1987-10-07','AA','880'),('1987-10-07','AA','883'),('1987-10-07','AA','883'),('1987-10-07','AA','885'),('1987-10-07','AA','885'),('1987-10-07','AA','890'),('1987-10-07','AA','890'),('1987-10-07','AA','893'),('1987-10-07','AA','893'),('1987-10-07','AA','905'),('1987-10-07','AA','905'),('1987-10-07','AA','915'),('1987-10-07','AA','929'),('1987-10-07','AA','929'),('1987-10-07','AA','936'),('1987-10-07','AA','936'),('1987-10-07','AA','937'),('1987-10-07','AA','937'),('1987-10-07','AA','955'),('1987-10-07','AA','955'),('1987-10-07','AA','966'),('1987-10-07','AA','1002'),('1987-10-07','AA','1002'),('1987-10-07','AA','1004'),('1987-10-07','AA','1004'),('1987-10-07','AA','1015'),('1987-10-07','AA','1015'),('1987-10-07','AA','1021'),('1987-10-07','AA','1021'),('1987-10-07','AA','1041'),('1987-10-07','AA','1041'),('1987-10-07','AA','1046'),('1987-10-07','AA','1046'),('1987-10-07','AA','1048'),('1987-10-07','AA','1048'),('1987-10-07','AA','1061'),('1987-10-07','AA','1061'),('1987-10-07','AA','1088'),('1987-10-07','AA','1088'),('1987-10-07','AA','2033'),('1987-10-07','AA','2033'),('1987-10-07','AA','2050'),('1987-10-07','AA','2058'),('1987-10-07','AA','2071'),('1987-10-07','AA','2071'),('1987-10-07','AA','2086'),('1987-10-07','AA','2105'),('1987-10-07','AA','2111'),('1987-10-07','AA','2123'),('1987-10-07','AA','2123'),('1987-10-07','AA','2147'),('1987-10-07','AA','2147'),('1987-10-07','AA','2199'),('1987-10-07','AA','2199'),('1987-10-07','AA','2207'),('1987-10-07','AA','2207'),('1987-10-07','AA','2217'),('1987-10-07','AA','2230'),('1987-10-07','AA','2245'),('1987-10-07','AA','2251'),('1987-10-07','AA','2251'),('1987-10-07','AA','2275'),('1987-10-07','AA','2351'),('1987-10-07','AA','2357'),('1987-10-07','AA','2361'),('1987-10-07','AA','2490'),('1987-10-07','AA','2528'),('1987-10-07','AA','2528'),('1987-10-07','AA','2735'),('1987-10-07','AA','2735'),('1987-10-07','AA','2751'),('1987-10-07','AL','2'),('1987-10-07','AL','2'),('1987-10-07','AL','7'),('1987-10-07','AL','7'),('1987-10-07','AL','26'),('1987-10-07','AL','26'),('1987-10-07','AL','34'),('1987-10-07','AL','34'),('1987-10-07','AL','36'),('1987-10-07','AL','36'),('1987-10-07','AL','45'),('1987-10-07','AL','45'),('1987-10-07','AL','45'),('1987-10-07','AL','91'),('1987-10-07','AL','91'),('1987-10-07','AL','104'),('1987-10-07','AL','104'),('1987-10-07','AL','104'),('1987-10-07','AL','109'),('1987-10-07','AL','112'),('1987-10-07','AL','112'),('1987-10-07','AL','123'),('1987-10-07','AL','149'),('1987-10-07','AL','160'),('1987-10-07','AL','160'),('1987-10-07','AL','165'),('1987-10-07','AL','171'),('1987-10-07','AL','171'),('1987-10-07','AL','176'),('1987-10-07','AL','176'),('1987-10-07','AL','179'),('1987-10-07','AL','215'),('1987-10-07','AL','231'),('1987-10-07','AL','263'),('1987-10-07','AL','263'),('1987-10-07','AL','268'),('1987-10-07','AL','268'),('1987-10-07','AL','268'),('1987-10-07','AL','281'),('1987-10-07','AL','281'),('1987-10-07','AL','287'),('1987-10-07','AL','287'),('1987-10-07','AL','309'),('1987-10-07','AL','309'),('1987-10-07','AL','341'),('1987-10-07','AL','344'),('1987-10-07','AL','344'),('1987-10-07','AL','357'),('1987-10-07','AL','357'),('1987-10-07','AL','368'),('1987-10-07','AL','381'),('1987-10-07','AL','396'),('1987-10-07','AL','397'),('1987-10-07','AL','397'),('1987-10-07','AL','416'),('1987-10-07','AL','416'),('1987-10-07','AL','417'),('1987-10-07','AL','438'),('1987-10-07','AL','438'),('1987-10-07','AL','446'),('1987-10-07','AL','451'),('1987-10-07','AL','451'),('1987-10-07','AL','491'),('1987-10-07','AL','491'),('1987-10-07','AL','523'),('1987-10-07','AL','523'),('1987-10-07','AL','523'),('1987-10-07','AL','525'),('1987-10-07','AL','525'),('1987-10-07','AL','533'),('1987-10-07','AL','533'),('1987-10-07','AL','546'),('1987-10-07','AL','546'),('1987-10-07','AL','556'),('1987-10-07','AL','556'),('1987-10-07','AL','601'),('1987-10-07','AL','627'),('1987-10-07','AL','629'),('1987-10-07','AL','670'),('1987-10-07','AL','670'),('1987-10-07','AL','670'),('1987-10-07','AL','673'),('1987-10-07','AL','680'),('1987-10-08','AA','2'),('1987-10-08','AA','2'),('1987-10-08','AA','2'),('1987-10-08','AA','7'),('1987-10-08','AA','7'),('1987-10-08','AA','26'),('1987-10-08','AA','34'),('1987-10-08','AA','36'),('1987-10-08','AA','91'),('1987-10-08','AA','101'),('1987-10-08','AA','101'),('1987-10-08','AA','109'),('1987-10-08','AA','109'),('1987-10-08','AA','112'),('1987-10-08','AA','123'),('1987-10-08','AA','123'),('1987-10-08','AA','160'),('1987-10-08','AA','165'),('1987-10-08','AA','165'),('1987-10-08','AA','165'),('1987-10-08','AA','176'),('1987-10-08','AA','176'),('1987-10-08','AA','176'),('1987-10-08','AA','176'),('1987-10-08','AA','179'),('1987-10-08','AA','179'),('1987-10-08','AA','215'),('1987-10-08','AA','215'),('1987-10-08','AA','231'),('1987-10-08','AA','231'),('1987-10-08','AA','263'),('1987-10-08','AA','263'),('1987-10-08','AA','268'),('1987-10-08','AA','268'),('1987-10-08','AA','281'),('1987-10-08','AA','287'),('1987-10-08','AA','287'),('1987-10-08','AA','309'),('1987-10-08','AA','309'),('1987-10-08','AA','309'),('1987-10-08','AA','344'),('1987-10-08','AA','344'),('1987-10-08','AA','347'),('1987-10-08','AA','347'),('1987-10-08','AA','368'),('1987-10-08','AA','381'),('1987-10-08','AA','381'),('1987-10-08','AA','381'),('1987-10-08','AA','396'),('1987-10-08','AA','396'),('1987-10-08','AA','397'),('1987-10-08','AA','397'),('1987-10-08','AA','417'),('1987-10-08','AA','417'),('1987-10-08','AA','446'),('1987-10-08','AA','451'),('1987-10-08','AA','460'),('1987-10-08','AA','460'),('1987-10-08','AA','491'),('1987-10-08','AA','504'),('1987-10-08','AA','504'),('1987-10-08','AA','519'),('1987-10-08','AA','519'),('1987-10-08','AA','523'),('1987-10-08','AA','525'),('1987-10-08','AA','533'),('1987-10-08','AA','533'),('1987-10-08','AA','533'),('1987-10-08','AA','546'),('1987-10-08','AA','546'),('1987-10-08','AA','546'),('1987-10-08','AA','546'),('1987-10-08','AA','556'),('1987-10-08','AA','556'),('1987-10-08','AA','556'),('1987-10-08','AA','597'),('1987-10-08','AA','597'),('1987-10-08','AA','597'),('1987-10-08','AA','601'),('1987-10-08','AA','601'),('1987-10-08','AA','627'),('1987-10-08','AA','629'),('1987-10-08','AA','629'),('1987-10-08','AA','670'),('1987-10-08','AA','673'),('1987-10-08','AA','673'),('1987-10-08','AA','680'),('1987-10-08','AA','680'),('1987-10-08','AA','817'),('1987-10-08','AA','817'),('1987-10-08','AA','824'),('1987-10-08','AA','824'),('1987-10-08','AA','832'),('1987-10-08','AA','832'),('1987-10-08','AA','852'),('1987-10-08','AA','866'),('1987-10-08','AA','866'),('1987-10-08','AA','871'),('1987-10-08','AA','871'),('1987-10-08','AA','880'),('1987-10-08','AA','880'),('1987-10-08','AA','880'),('1987-10-08','AA','880'),('1987-10-08','AA','883'),('1987-10-08','AA','883'),('1987-10-08','AA','885'),('1987-10-08','AA','885'),('1987-10-08','AA','885'),('1987-10-08','AA','890'),('1987-10-08','AA','890'),('1987-10-08','AA','893'),('1987-10-08','AA','893'),('1987-10-08','AA','905'),('1987-10-08','AA','905'),('1987-10-08','AA','915'),('1987-10-08','AA','929'),('1987-10-08','AA','929'),('1987-10-08','AA','936'),('1987-10-08','AA','936'),('1987-10-08','AA','937'),('1987-10-08','AA','937'),('1987-10-08','AA','955'),('1987-10-08','AA','955'),('1987-10-08','AA','966'),('1987-10-08','AA','1002'),('1987-10-08','AA','1002'),('1987-10-08','AA','1004'),('1987-10-08','AA','1004'),('1987-10-08','AA','1015'),('1987-10-08','AA','1015'),('1987-10-08','AA','1021'),('1987-10-08','AA','1021'),('1987-10-08','AA','1041'),('1987-10-08','AA','1041'),('1987-10-08','AA','1046'),('1987-10-08','AA','1046'),('1987-10-08','AA','1048'),('1987-10-08','AA','1061'),('1987-10-08','AA','1061'),('1987-10-08','AA','1088'),('1987-10-08','AA','1088'),('1987-10-08','AA','2033'),('1987-10-08','AA','2033'),('1987-10-08','AA','2050'),('1987-10-08','AA','2058'),('1987-10-08','AA','2071'),('1987-10-08','AA','2071'),('1987-10-08','AA','2086'),('1987-10-08','AA','2111'),('1987-10-08','AA','2123'),('1987-10-08','AA','2123'),('1987-10-08','AA','2147'),('1987-10-08','AA','2147'),('1987-10-08','AA','2199'),('1987-10-08','AA','2199'),('1987-10-08','AA','2207'),('1987-10-08','AA','2207'),('1987-10-08','AA','2217'),('1987-10-08','AA','2230'),('1987-10-08','AA','2245'),('1987-10-08','AA','2251'),('1987-10-08','AA','2251'),('1987-10-08','AA','2275'),('1987-10-08','AA','2278'),('1987-10-08','AA','2351'),('1987-10-08','AA','2357'),('1987-10-08','AA','2361'),('1987-10-08','AA','2490'),('1987-10-08','AA','2528'),('1987-10-08','AA','2528'),('1987-10-08','AA','2735'),('1987-10-08','AA','2735'),('1987-10-08','AA','2751'),('1987-10-08','AL','2'),('1987-10-08','AL','2'),('1987-10-08','AL','7'),('1987-10-08','AL','7'),('1987-10-08','AL','26'),('1987-10-08','AL','26'),('1987-10-08','AL','34'),('1987-10-08','AL','34'),('1987-10-08','AL','36'),('1987-10-08','AL','36'),('1987-10-08','AL','45'),('1987-10-08','AL','45'),('1987-10-08','AL','45'),('1987-10-08','AL','91'),('1987-10-08','AL','91'),('1987-10-08','AL','104'),('1987-10-08','AL','104'),('1987-10-08','AL','104'),('1987-10-08','AL','109'),('1987-10-08','AL','112'),('1987-10-08','AL','112'),('1987-10-08','AL','123'),('1987-10-08','AL','149'),('1987-10-08','AL','160'),('1987-10-08','AL','160'),('1987-10-08','AL','165'),('1987-10-08','AL','171'),('1987-10-08','AL','171'),('1987-10-08','AL','176'),('1987-10-08','AL','176'),('1987-10-08','AL','179'),('1987-10-08','AL','215'),('1987-10-08','AL','231'),('1987-10-08','AL','263'),('1987-10-08','AL','263'),('1987-10-08','AL','268'),('1987-10-08','AL','268'),('1987-10-08','AL','268'),('1987-10-08','AL','281'),('1987-10-08','AL','281'),('1987-10-08','AL','287'),('1987-10-08','AL','287'),('1987-10-08','AL','309'),('1987-10-08','AL','309'),('1987-10-08','AL','341'),('1987-10-08','AL','344'),('1987-10-08','AL','344'),('1987-10-08','AL','357'),('1987-10-08','AL','357'),('1987-10-08','AL','368'),('1987-10-08','AL','381'),('1987-10-08','AL','396'),('1987-10-08','AL','397'),('1987-10-08','AL','397'),('1987-10-08','AL','416'),('1987-10-08','AL','416'),('1987-10-08','AL','417'),('1987-10-08','AL','438'),('1987-10-08','AL','438'),('1987-10-08','AL','446'),('1987-10-08','AL','451'),('1987-10-08','AL','451'),('1987-10-08','AL','491'),('1987-10-08','AL','491'),('1987-10-08','AL','523'),('1987-10-08','AL','523'),('1987-10-08','AL','523'),('1987-10-08','AL','525'),('1987-10-08','AL','525'),('1987-10-08','AL','533'),('1987-10-08','AL','533'),('1987-10-08','AL','546'),('1987-10-08','AL','546'),('1987-10-08','AL','556'),('1987-10-08','AL','556'),('1987-10-08','AL','601'),('1987-10-08','AL','601'),('1987-10-08','AL','627'),('1987-10-08','AL','629'),('1987-10-08','AL','670'),('1987-10-08','AL','670'),('1987-10-08','AL','670'),('1987-10-08','AL','680'),('1987-10-09','AA','2'),('1987-10-09','AA','2'),('1987-10-09','AA','2'),('1987-10-09','AA','7'),('1987-10-09','AA','7'),('1987-10-09','AA','26'),('1987-10-09','AA','34'),('1987-10-09','AA','36'),('1987-10-09','AA','91'),('1987-10-09','AA','101'),('1987-10-09','AA','101'),('1987-10-09','AA','109'),('1987-10-09','AA','109'),('1987-10-09','AA','112'),('1987-10-09','AA','123'),('1987-10-09','AA','123'),('1987-10-09','AA','160'),('1987-10-09','AA','165'),('1987-10-09','AA','165'),('1987-10-09','AA','176'),('1987-10-09','AA','176'),('1987-10-09','AA','176'),('1987-10-09','AA','176'),('1987-10-09','AA','179'),('1987-10-09','AA','179'),('1987-10-09','AA','215'),('1987-10-09','AA','215'),('1987-10-09','AA','231'),('1987-10-09','AA','263'),('1987-10-09','AA','263'),('1987-10-09','AA','268'),('1987-10-09','AA','268'),('1987-10-09','AA','281'),('1987-10-09','AA','287'),('1987-10-09','AA','309'),('1987-10-09','AA','309'),('1987-10-09','AA','309'),('1987-10-09','AA','344'),('1987-10-09','AA','344'),('1987-10-09','AA','347'),('1987-10-09','AA','347'),('1987-10-09','AA','368'),('1987-10-09','AA','381'),('1987-10-09','AA','381'),('1987-10-09','AA','381'),('1987-10-09','AA','396'),('1987-10-09','AA','396'),('1987-10-09','AA','397'),('1987-10-09','AA','397'),('1987-10-09','AA','417'),('1987-10-09','AA','417'),('1987-10-09','AA','451'),('1987-10-09','AA','451'),('1987-10-09','AA','460'),('1987-10-09','AA','460'),('1987-10-09','AA','491'),('1987-10-09','AA','504'),('1987-10-09','AA','504'),('1987-10-09','AA','519'),('1987-10-09','AA','519'),('1987-10-09','AA','523'),('1987-10-09','AA','523'),('1987-10-09','AA','525'),('1987-10-09','AA','525'),('1987-10-09','AA','525'),('1987-10-09','AA','533'),('1987-10-09','AA','533'),('1987-10-09','AA','533'),('1987-10-09','AA','546'),('1987-10-09','AA','546'),('1987-10-09','AA','546'),('1987-10-09','AA','546'),('1987-10-09','AA','556'),('1987-10-09','AA','556'),('1987-10-09','AA','556'),('1987-10-09','AA','597'),('1987-10-09','AA','597'),('1987-10-09','AA','597'),('1987-10-09','AA','601'),('1987-10-09','AA','601'),('1987-10-09','AA','629'),('1987-10-09','AA','629'),('1987-10-09','AA','670'),('1987-10-09','AA','673'),('1987-10-09','AA','673'),('1987-10-09','AA','680'),('1987-10-09','AA','680'),('1987-10-09','AA','817'),('1987-10-09','AA','824'),('1987-10-09','AA','824'),('1987-10-09','AA','824'),('1987-10-09','AA','824'),('1987-10-09','AA','832'),('1987-10-09','AA','832'),('1987-10-09','AA','852'),('1987-10-09','AA','852'),('1987-10-09','AA','866'),('1987-10-09','AA','866'),('1987-10-09','AA','871'),('1987-10-09','AA','880'),('1987-10-09','AA','880'),('1987-10-09','AA','880'),('1987-10-09','AA','883'),('1987-10-09','AA','883'),('1987-10-09','AA','885'),('1987-10-09','AA','885'),('1987-10-09','AA','885'),('1987-10-09','AA','890'),('1987-10-09','AA','890'),('1987-10-09','AA','893'),('1987-10-09','AA','893'),('1987-10-09','AA','905'),('1987-10-09','AA','905'),('1987-10-09','AA','915'),('1987-10-09','AA','929'),('1987-10-09','AA','929'),('1987-10-09','AA','936'),('1987-10-09','AA','936'),('1987-10-09','AA','937'),('1987-10-09','AA','937'),('1987-10-09','AA','955'),('1987-10-09','AA','955'),('1987-10-09','AA','966'),('1987-10-09','AA','1002'),('1987-10-09','AA','1002'),('1987-10-09','AA','1004'),('1987-10-09','AA','1004'),('1987-10-09','AA','1015'),('1987-10-09','AA','1015'),('1987-10-09','AA','1021'),('1987-10-09','AA','1021'),('1987-10-09','AA','1041'),('1987-10-09','AA','1041'),('1987-10-09','AA','1046'),('1987-10-09','AA','1046'),('1987-10-09','AA','1048'),('1987-10-09','AA','1048'),('1987-10-09','AA','1061'),('1987-10-09','AA','1061'),('1987-10-09','AA','1088'),('1987-10-09','AA','1088'),('1987-10-09','AA','2033'),('1987-10-09','AA','2033'),('1987-10-09','AA','2050'),('1987-10-09','AA','2058'),('1987-10-09','AA','2071'),('1987-10-09','AA','2071'),('1987-10-09','AA','2086'),('1987-10-09','AA','2105'),('1987-10-09','AA','2111'),('1987-10-09','AA','2123'),('1987-10-09','AA','2123'),('1987-10-09','AA','2147'),('1987-10-09','AA','2147'),('1987-10-09','AA','2199'),('1987-10-09','AA','2199'),('1987-10-09','AA','2207'),('1987-10-09','AA','2207'),('1987-10-09','AA','2217'),('1987-10-09','AA','2230'),('1987-10-09','AA','2245'),('1987-10-09','AA','2251'),('1987-10-09','AA','2251'),('1987-10-09','AA','2275'),('1987-10-09','AA','2278'),('1987-10-09','AA','2351'),('1987-10-09','AA','2357'),('1987-10-09','AA','2361'),('1987-10-09','AA','2490'),('1987-10-09','AA','2528'),('1987-10-09','AA','2528'),('1987-10-09','AA','2735'),('1987-10-09','AA','2735'),('1987-10-09','AA','2751'),('1987-10-09','AL','2'),('1987-10-09','AL','2'),('1987-10-09','AL','7'),('1987-10-09','AL','7'),('1987-10-09','AL','26'),('1987-10-09','AL','34'),('1987-10-09','AL','34'),('1987-10-09','AL','36'),('1987-10-09','AL','36'),('1987-10-09','AL','45'),('1987-10-09','AL','45'),('1987-10-09','AL','45'),('1987-10-09','AL','91'),('1987-10-09','AL','91'),('1987-10-09','AL','104'),('1987-10-09','AL','104'),('1987-10-09','AL','104'),('1987-10-09','AL','109'),('1987-10-09','AL','112'),('1987-10-09','AL','112'),('1987-10-09','AL','123'),('1987-10-09','AL','149'),('1987-10-09','AL','160'),('1987-10-09','AL','160'),('1987-10-09','AL','165'),('1987-10-09','AL','171'),('1987-10-09','AL','171'),('1987-10-09','AL','176'),('1987-10-09','AL','176'),('1987-10-09','AL','179'),('1987-10-09','AL','215'),('1987-10-09','AL','231'),('1987-10-09','AL','263'),('1987-10-09','AL','263'),('1987-10-09','AL','268'),('1987-10-09','AL','268'),('1987-10-09','AL','268'),('1987-10-09','AL','281'),('1987-10-09','AL','281'),('1987-10-09','AL','287'),('1987-10-09','AL','287'),('1987-10-09','AL','309'),('1987-10-09','AL','309'),('1987-10-09','AL','341'),('1987-10-09','AL','344'),('1987-10-09','AL','344'),('1987-10-09','AL','368'),('1987-10-09','AL','381'),('1987-10-09','AL','396'),('1987-10-09','AL','397'),('1987-10-09','AL','397'),('1987-10-09','AL','416'),('1987-10-09','AL','416'),('1987-10-09','AL','417'),('1987-10-09','AL','438'),('1987-10-09','AL','438'),('1987-10-09','AL','446'),('1987-10-09','AL','451'),('1987-10-09','AL','491'),('1987-10-09','AL','491'),('1987-10-09','AL','523'),('1987-10-09','AL','523'),('1987-10-09','AL','523'),('1987-10-09','AL','525'),('1987-10-09','AL','525'),('1987-10-09','AL','533'),('1987-10-09','AL','533'),('1987-10-09','AL','546'),('1987-10-09','AL','546'),('1987-10-09','AL','556'),('1987-10-09','AL','556'),('1987-10-09','AL','601'),('1987-10-09','AL','601'),('1987-10-09','AL','627'),('1987-10-09','AL','629'),('1987-10-09','AL','670'),('1987-10-09','AL','670'),('1987-10-09','AL','670'),('1987-10-09','AL','673'),('1987-10-09','AL','680'),('1987-10-10','AA','2'),('1987-10-10','AA','2'),('1987-10-10','AA','2'),('1987-10-10','AA','7'),('1987-10-10','AA','7'),('1987-10-10','AA','26'),('1987-10-10','AA','34'),('1987-10-10','AA','36'),('1987-10-10','AA','91'),('1987-10-10','AA','101'),('1987-10-10','AA','101'),('1987-10-10','AA','109'),('1987-10-10','AA','109'),('1987-10-10','AA','112'),('1987-10-10','AA','123'),('1987-10-10','AA','123'),('1987-10-10','AA','160'),('1987-10-10','AA','165'),('1987-10-10','AA','165'),('1987-10-10','AA','165'),('1987-10-10','AA','176'),('1987-10-10','AA','176'),('1987-10-10','AA','176'),('1987-10-10','AA','176'),('1987-10-10','AA','179'),('1987-10-10','AA','179'),('1987-10-10','AA','215'),('1987-10-10','AA','215'),('1987-10-10','AA','231'),('1987-10-10','AA','263'),('1987-10-10','AA','263'),('1987-10-10','AA','268'),('1987-10-10','AA','268'),('1987-10-10','AA','281'),('1987-10-10','AA','287'),('1987-10-10','AA','287'),('1987-10-10','AA','309'),('1987-10-10','AA','309'),('1987-10-10','AA','309'),('1987-10-10','AA','341'),('1987-10-10','AA','344'),('1987-10-10','AA','344'),('1987-10-10','AA','347'),('1987-10-10','AA','347'),('1987-10-10','AA','368'),('1987-10-10','AA','381'),('1987-10-10','AA','381'),('1987-10-10','AA','381'),('1987-10-10','AA','396'),('1987-10-10','AA','396'),('1987-10-10','AA','397'),('1987-10-10','AA','417'),('1987-10-10','AA','417'),('1987-10-10','AA','446'),('1987-10-10','AA','451'),('1987-10-10','AA','451'),('1987-10-10','AA','460'),('1987-10-10','AA','460'),('1987-10-10','AA','491'),('1987-10-10','AA','491'),('1987-10-10','AA','504'),('1987-10-10','AA','504'),('1987-10-10','AA','519'),('1987-10-10','AA','523'),('1987-10-10','AA','523'),('1987-10-10','AA','525'),('1987-10-10','AA','525'),('1987-10-10','AA','525'),('1987-10-10','AA','533'),('1987-10-10','AA','533'),('1987-10-10','AA','546'),('1987-10-10','AA','546'),('1987-10-10','AA','546'),('1987-10-10','AA','546'),('1987-10-10','AA','556'),('1987-10-10','AA','556'),('1987-10-10','AA','556'),('1987-10-10','AA','597'),('1987-10-10','AA','597'),('1987-10-10','AA','597'),('1987-10-10','AA','601'),('1987-10-10','AA','601'),('1987-10-10','AA','627'),('1987-10-10','AA','629'),('1987-10-10','AA','629'),('1987-10-10','AA','670'),('1987-10-10','AA','673'),('1987-10-10','AA','673'),('1987-10-10','AA','680'),('1987-10-10','AA','680'),('1987-10-10','AA','817'),('1987-10-10','AA','817'),('1987-10-10','AA','824'),('1987-10-10','AA','824'),('1987-10-10','AA','824'),('1987-10-10','AA','824'),('1987-10-10','AA','832'),('1987-10-10','AA','832'),('1987-10-10','AA','852'),('1987-10-10','AA','852'),('1987-10-10','AA','866'),('1987-10-10','AA','866'),('1987-10-10','AA','871'),('1987-10-10','AA','871'),('1987-10-10','AA','880'),('1987-10-10','AA','880'),('1987-10-10','AA','880'),('1987-10-10','AA','880'),('1987-10-10','AA','883'),('1987-10-10','AA','883'),('1987-10-10','AA','885'),('1987-10-10','AA','885'),('1987-10-10','AA','885'),('1987-10-10','AA','890'),('1987-10-10','AA','890'),('1987-10-10','AA','893'),('1987-10-10','AA','893'),('1987-10-10','AA','905'),('1987-10-10','AA','905'),('1987-10-10','AA','915'),('1987-10-10','AA','929'),('1987-10-10','AA','929'),('1987-10-10','AA','936'),('1987-10-10','AA','936'),('1987-10-10','AA','937'),('1987-10-10','AA','955'),('1987-10-10','AA','955'),('1987-10-10','AA','966'),('1987-10-10','AA','1002'),('1987-10-10','AA','1002'),('1987-10-10','AA','1004'),('1987-10-10','AA','1004'),('1987-10-10','AA','1015'),('1987-10-10','AA','1015'),('1987-10-10','AA','1021'),('1987-10-10','AA','1021'),('1987-10-10','AA','1041'),('1987-10-10','AA','1041'),('1987-10-10','AA','1046'),('1987-10-10','AA','1046'),('1987-10-10','AA','1048'),('1987-10-10','AA','1048'),('1987-10-10','AA','1061'),('1987-10-10','AA','1061'),('1987-10-10','AA','1088'),('1987-10-10','AA','1088'),('1987-10-10','AA','2033'),('1987-10-10','AA','2033'),('1987-10-10','AA','2050'),('1987-10-10','AA','2058'),('1987-10-10','AA','2071'),('1987-10-10','AA','2071'),('1987-10-10','AA','2086'),('1987-10-10','AA','2111'),('1987-10-10','AA','2123'),('1987-10-10','AA','2123'),('1987-10-10','AA','2147'),('1987-10-10','AA','2147'),('1987-10-10','AA','2199'),('1987-10-10','AA','2207'),('1987-10-10','AA','2207'),('1987-10-10','AA','2217'),('1987-10-10','AA','2230'),('1987-10-10','AA','2245'),('1987-10-10','AA','2251'),('1987-10-10','AA','2251'),('1987-10-10','AA','2275'),('1987-10-10','AA','2278'),('1987-10-10','AA','2351'),('1987-10-10','AA','2357'),('1987-10-10','AA','2361'),('1987-10-10','AA','2490'),('1987-10-10','AA','2528'),('1987-10-10','AA','2528'),('1987-10-10','AA','2735'),('1987-10-10','AA','2735'),('1987-10-10','AL','2'),('1987-10-10','AL','2'),('1987-10-10','AL','7'),('1987-10-10','AL','7'),('1987-10-10','AL','26'),('1987-10-10','AL','26'),('1987-10-10','AL','34'),('1987-10-10','AL','34'),('1987-10-10','AL','36'),('1987-10-10','AL','36'),('1987-10-10','AL','45'),('1987-10-10','AL','45'),('1987-10-10','AL','45'),('1987-10-10','AL','91'),('1987-10-10','AL','91'),('1987-10-10','AL','104'),('1987-10-10','AL','104'),('1987-10-10','AL','109'),('1987-10-10','AL','112'),('1987-10-10','AL','112'),('1987-10-10','AL','149'),('1987-10-10','AL','160'),('1987-10-10','AL','165'),('1987-10-10','AL','171'),('1987-10-10','AL','171'),('1987-10-10','AL','176'),('1987-10-10','AL','176'),('1987-10-10','AL','179'),('1987-10-10','AL','215'),('1987-10-10','AL','231'),('1987-10-10','AL','263'),('1987-10-10','AL','268'),('1987-10-10','AL','268'),('1987-10-10','AL','268'),('1987-10-10','AL','287'),('1987-10-10','AL','287'),('1987-10-10','AL','309'),('1987-10-10','AL','309'),('1987-10-10','AL','344'),('1987-10-10','AL','344'),('1987-10-10','AL','357'),('1987-10-10','AL','396'),('1987-10-10','AL','397'),('1987-10-10','AL','397'),('1987-10-10','AL','397'),('1987-10-10','AL','416'),('1987-10-10','AL','417'),('1987-10-10','AL','438'),('1987-10-10','AL','438'),('1987-10-10','AL','451'),('1987-10-10','AL','451'),('1987-10-10','AL','491'),('1987-10-10','AL','491'),('1987-10-10','AL','523'),('1987-10-10','AL','523'),('1987-10-10','AL','525'),('1987-10-10','AL','525'),('1987-10-10','AL','533'),('1987-10-10','AL','546'),('1987-10-10','AL','546'),('1987-10-10','AL','556'),('1987-10-10','AL','556'),('1987-10-10','AL','601'),('1987-10-10','AL','601'),('1987-10-10','AL','627'),('1987-10-10','AL','670'),('1987-10-10','AL','673'),('1987-10-10','AL','680'),('1987-10-10','AL','905'),('1987-10-10','AL','936'),('1987-10-10','AL','966'),('1987-10-11','AA','2'),('1987-10-11','AA','2'),('1987-10-11','AA','2'),('1987-10-11','AA','7'),('1987-10-11','AA','7'),('1987-10-11','AA','26'),('1987-10-11','AA','36'),('1987-10-11','AA','91'),('1987-10-11','AA','101'),('1987-10-11','AA','101'),('1987-10-11','AA','109'),('1987-10-11','AA','109'),('1987-10-11','AA','112'),('1987-10-11','AA','123'),('1987-10-11','AA','160'),('1987-10-11','AA','165'),('1987-10-11','AA','165'),('1987-10-11','AA','165'),('1987-10-11','AA','176'),('1987-10-11','AA','176'),('1987-10-11','AA','176'),('1987-10-11','AA','176'),('1987-10-11','AA','179'),('1987-10-11','AA','179'),('1987-10-11','AA','215'),('1987-10-11','AA','215'),('1987-10-11','AA','231'),('1987-10-11','AA','263'),('1987-10-11','AA','263'),('1987-10-11','AA','268'),('1987-10-11','AA','268'),('1987-10-11','AA','281'),('1987-10-11','AA','287'),('1987-10-11','AA','287'),('1987-10-11','AA','309'),('1987-10-11','AA','309'),('1987-10-11','AA','309'),('1987-10-11','AA','341'),('1987-10-11','AA','344'),('1987-10-11','AA','344'),('1987-10-11','AA','347'),('1987-10-11','AA','347'),('1987-10-11','AA','368'),('1987-10-11','AA','381'),('1987-10-11','AA','381'),('1987-10-11','AA','381'),('1987-10-11','AA','396'),('1987-10-11','AA','396'),('1987-10-11','AA','397'),('1987-10-11','AA','397'),('1987-10-11','AA','417'),('1987-10-11','AA','417'),('1987-10-11','AA','446'),('1987-10-11','AA','451'),('1987-10-11','AA','451'),('1987-10-11','AA','460'),('1987-10-11','AA','460'),('1987-10-11','AA','491'),('1987-10-11','AA','491'),('1987-10-11','AA','504'),('1987-10-11','AA','504'),('1987-10-11','AA','519'),('1987-10-11','AA','519'),('1987-10-11','AA','523'),('1987-10-11','AA','523'),('1987-10-11','AA','525'),('1987-10-11','AA','525'),('1987-10-11','AA','525'),('1987-10-11','AA','533'),('1987-10-11','AA','533'),('1987-10-11','AA','533'),('1987-10-11','AA','546'),('1987-10-11','AA','546'),('1987-10-11','AA','546'),('1987-10-11','AA','546'),('1987-10-11','AA','556'),('1987-10-11','AA','556'),('1987-10-11','AA','556'),('1987-10-11','AA','597'),('1987-10-11','AA','597'),('1987-10-11','AA','597'),('1987-10-11','AA','601'),('1987-10-11','AA','601'),('1987-10-11','AA','627'),('1987-10-11','AA','629'),('1987-10-11','AA','629'),('1987-10-11','AA','670'),('1987-10-11','AA','673'),('1987-10-11','AA','673'),('1987-10-11','AA','680'),('1987-10-11','AA','680'),('1987-10-11','AA','817'),('1987-10-11','AA','817'),('1987-10-11','AA','824'),('1987-10-11','AA','824'),('1987-10-11','AA','824'),('1987-10-11','AA','824'),('1987-10-11','AA','832'),('1987-10-11','AA','832'),('1987-10-11','AA','852'),('1987-10-11','AA','852'),('1987-10-11','AA','866'),('1987-10-11','AA','866'),('1987-10-11','AA','871'),('1987-10-11','AA','871'),('1987-10-11','AA','880'),('1987-10-11','AA','880'),('1987-10-11','AA','880'),('1987-10-11','AA','880'),('1987-10-11','AA','883'),('1987-10-11','AA','883'),('1987-10-11','AA','885'),('1987-10-11','AA','885'),('1987-10-11','AA','885'),('1987-10-11','AA','890'),('1987-10-11','AA','890'),('1987-10-11','AA','893'),('1987-10-11','AA','893'),('1987-10-11','AA','905'),('1987-10-11','AA','905'),('1987-10-11','AA','915'),('1987-10-11','AA','929'),('1987-10-11','AA','929'),('1987-10-11','AA','936'),('1987-10-11','AA','936'),('1987-10-11','AA','937'),('1987-10-11','AA','937'),('1987-10-11','AA','955'),('1987-10-11','AA','955'),('1987-10-11','AA','966'),('1987-10-11','AA','1002'),('1987-10-11','AA','1002'),('1987-10-11','AA','1004'),('1987-10-11','AA','1004'),('1987-10-11','AA','1015'),('1987-10-11','AA','1015'),('1987-10-11','AA','1021'),('1987-10-11','AA','1021'),('1987-10-11','AA','1041'),('1987-10-11','AA','1041'),('1987-10-11','AA','1046'),('1987-10-11','AA','1046'),('1987-10-11','AA','1048'),('1987-10-11','AA','1048'),('1987-10-11','AA','1061'),('1987-10-11','AA','1061'),('1987-10-11','AA','1088'),('1987-10-11','AA','1088'),('1987-10-11','AA','2033'),('1987-10-11','AA','2033'),('1987-10-11','AA','2058'),('1987-10-11','AA','2071'),('1987-10-11','AA','2071'),('1987-10-11','AA','2086'),('1987-10-11','AA','2111'),('1987-10-11','AA','2123'),('1987-10-11','AA','2123'),('1987-10-11','AA','2147'),('1987-10-11','AA','2147'),('1987-10-11','AA','2199'),('1987-10-11','AA','2199'),('1987-10-11','AA','2199'),('1987-10-11','AA','2207'),('1987-10-11','AA','2207'),('1987-10-11','AA','2230'),('1987-10-11','AA','2245'),('1987-10-11','AA','2251'),('1987-10-11','AA','2251'),('1987-10-11','AA','2275'),('1987-10-11','AA','2278'),('1987-10-11','AA','2351'),('1987-10-11','AA','2357'),('1987-10-11','AA','2361'),('1987-10-11','AA','2490'),('1987-10-11','AA','2528'),('1987-10-11','AA','2528'),('1987-10-11','AA','2735'),('1987-10-11','AA','2735'),('1987-10-11','AA','2751'),('1987-10-11','AL','2'),('1987-10-11','AL','2'),('1987-10-11','AL','7'),('1987-10-11','AL','7'),('1987-10-11','AL','26'),('1987-10-11','AL','26'),('1987-10-11','AL','34'),('1987-10-11','AL','34'),('1987-10-11','AL','36'),('1987-10-11','AL','36'),('1987-10-11','AL','45'),('1987-10-11','AL','45'),('1987-10-11','AL','45'),('1987-10-11','AL','91'),('1987-10-11','AL','91'),('1987-10-11','AL','104'),('1987-10-11','AL','104'),('1987-10-11','AL','104'),('1987-10-11','AL','112'),('1987-10-11','AL','123'),('1987-10-11','AL','149'),('1987-10-11','AL','160'),('1987-10-11','AL','160'),('1987-10-11','AL','165'),('1987-10-11','AL','171'),('1987-10-11','AL','171'),('1987-10-11','AL','176'),('1987-10-11','AL','176'),('1987-10-11','AL','179'),('1987-10-11','AL','215'),('1987-10-11','AL','231'),('1987-10-11','AL','263'),('1987-10-11','AL','263'),('1987-10-11','AL','281'),('1987-10-11','AL','281'),('1987-10-11','AL','309'),('1987-10-11','AL','309'),('1987-10-11','AL','341'),('1987-10-11','AL','344'),('1987-10-11','AL','344'),('1987-10-11','AL','357'),('1987-10-11','AL','357'),('1987-10-11','AL','368'),('1987-10-11','AL','396'),('1987-10-11','AL','416'),('1987-10-11','AL','416'),('1987-10-11','AL','417'),('1987-10-11','AL','438'),('1987-10-11','AL','438'),('1987-10-11','AL','446'),('1987-10-11','AL','451'),('1987-10-11','AL','491'),('1987-10-11','AL','491'),('1987-10-11','AL','523'),('1987-10-11','AL','523'),('1987-10-11','AL','523'),('1987-10-11','AL','525'),('1987-10-11','AL','525'),('1987-10-11','AL','533'),('1987-10-11','AL','533'),('1987-10-11','AL','546'),('1987-10-11','AL','546'),('1987-10-11','AL','556'),('1987-10-11','AL','556'),('1987-10-11','AL','601'),('1987-10-11','AL','601'),('1987-10-11','AL','627'),('1987-10-11','AL','629'),('1987-10-11','AL','670'),('1987-10-11','AL','670'),('1987-10-11','AL','670'),('1987-10-11','AL','673'),('1987-10-11','AL','680'),('1987-10-11','AL','937'),('1987-10-11','AL','937'),('1987-10-11','AL','955'),('1987-10-21','AA','2'),('1987-10-21','AA','2'),('1987-10-21','AA','2'),('1987-10-21','AA','7'),('1987-10-21','AA','7'),('1987-10-21','AA','26'),('1987-10-21','AA','34'),('1987-10-21','AA','36'),('1987-10-21','AA','91'),('1987-10-21','AA','101'),('1987-10-21','AA','101'),('1987-10-21','AA','109'),('1987-10-21','AA','109'),('1987-10-21','AA','123'),('1987-10-21','AA','123'),('1987-10-21','AA','160'),('1987-10-21','AA','165'),('1987-10-21','AA','165'),('1987-10-21','AA','165'),('1987-10-21','AA','176'),('1987-10-21','AA','176'),('1987-10-21','AA','176'),('1987-10-21','AA','176'),('1987-10-21','AA','179'),('1987-10-21','AA','179'),('1987-10-21','AA','215'),('1987-10-21','AA','231'),('1987-10-21','AA','231'),('1987-10-21','AA','263'),('1987-10-21','AA','263'),('1987-10-21','AA','268'),('1987-10-21','AA','268'),('1987-10-21','AA','281'),('1987-10-21','AA','287'),('1987-10-21','AA','287'),('1987-10-21','AA','309'),('1987-10-21','AA','309'),('1987-10-21','AA','309'),('1987-10-21','AA','341'),('1987-10-21','AA','344'),('1987-10-21','AA','344'),('1987-10-21','AA','347'),('1987-10-21','AA','347'),('1987-10-21','AA','368'),('1987-10-21','AA','381'),('1987-10-21','AA','381'),('1987-10-21','AA','381'),('1987-10-21','AA','396'),('1987-10-21','AA','396'),('1987-10-21','AA','397'),('1987-10-21','AA','397'),('1987-10-21','AA','417'),('1987-10-21','AA','417'),('1987-10-21','AA','446'),('1987-10-21','AA','451'),('1987-10-21','AA','451'),('1987-10-21','AA','460'),('1987-10-21','AA','460'),('1987-10-21','AA','491'),('1987-10-21','AA','504'),('1987-10-21','AA','504'),('1987-10-21','AA','519'),('1987-10-21','AA','519'),('1987-10-21','AA','523'),('1987-10-21','AA','523'),('1987-10-21','AA','525'),('1987-10-21','AA','525'),('1987-10-21','AA','525'),('1987-10-21','AA','533'),('1987-10-21','AA','533'),('1987-10-21','AA','533'),('1987-10-21','AA','546'),('1987-10-21','AA','546'),('1987-10-21','AA','546'),('1987-10-21','AA','546'),('1987-10-21','AA','556'),('1987-10-21','AA','556'),('1987-10-21','AA','556'),('1987-10-21','AA','597'),('1987-10-21','AA','597'),('1987-10-21','AA','597'),('1987-10-21','AA','601'),('1987-10-21','AA','601'),('1987-10-21','AA','627'),('1987-10-21','AA','629'),('1987-10-21','AA','629'),('1987-10-21','AA','673'),('1987-10-21','AA','673'),('1987-10-21','AA','680'),('1987-10-21','AA','680'),('1987-10-21','AA','817'),('1987-10-21','AA','817'),('1987-10-21','AA','824'),('1987-10-21','AA','824'),('1987-10-21','AA','824'),('1987-10-21','AA','832'),('1987-10-21','AA','832'),('1987-10-21','AA','852'),('1987-10-21','AA','866'),('1987-10-21','AA','866'),('1987-10-21','AA','871'),('1987-10-21','AA','871'),('1987-10-21','AA','880'),('1987-10-21','AA','880'),('1987-10-21','AA','880'),('1987-10-21','AA','880'),('1987-10-21','AA','883'),('1987-10-21','AA','883'),('1987-10-21','AA','885'),('1987-10-21','AA','885'),('1987-10-21','AA','885'),('1987-10-21','AA','890'),('1987-10-21','AA','890'),('1987-10-21','AA','893'),('1987-10-21','AA','893'),('1987-10-21','AA','905'),('1987-10-21','AA','905'),('1987-10-21','AA','915'),('1987-10-21','AA','929'),('1987-10-21','AA','929'),('1987-10-21','AA','936'),('1987-10-21','AA','936'),('1987-10-21','AA','937'),('1987-10-21','AA','1002'),('1987-10-21','AA','1002'),('1987-10-21','AA','1004'),('1987-10-21','AA','1004'),('1987-10-21','AA','1015'),('1987-10-21','AA','1021'),('1987-10-21','AA','1021'),('1987-10-21','AA','1041'),('1987-10-21','AA','1041'),('1987-10-21','AA','1046'),('1987-10-21','AA','1046'),('1987-10-21','AA','1048'),('1987-10-21','AA','1048'),('1987-10-21','AA','1061'),('1987-10-21','AA','1061'),('1987-10-21','AA','1088'),('1987-10-21','AA','1088'),('1987-10-21','AA','2033'),('1987-10-21','AA','2033'),('1987-10-21','AA','2050'),('1987-10-21','AA','2071'),('1987-10-21','AA','2086'),('1987-10-21','AA','2105'),('1987-10-21','AA','2111'),('1987-10-21','AA','2123'),('1987-10-21','AA','2123'),('1987-10-21','AA','2147'),('1987-10-21','AA','2147'),('1987-10-21','AA','2199'),('1987-10-21','AA','2199'),('1987-10-21','AA','2207'),('1987-10-21','AA','2207'),('1987-10-21','AA','2217'),('1987-10-21','AA','2230'),('1987-10-21','AA','2245'),('1987-10-21','AA','2251'),('1987-10-21','AA','2251'),('1987-10-21','AA','2275'),('1987-10-21','AA','2278'),('1987-10-21','AA','2351'),('1987-10-21','AA','2357'),('1987-10-21','AA','2361'),('1987-10-21','AA','2490'),('1987-10-21','AA','2528'),('1987-10-21','AA','2528'),('1987-10-21','AA','2735'),('1987-10-21','AA','2735'),('1987-10-21','AA','2751'),('1987-10-21','AL','2'),('1987-10-21','AL','2'),('1987-10-21','AL','7'),('1987-10-21','AL','7'),('1987-10-21','AL','26'),('1987-10-21','AL','26'),('1987-10-21','AL','34'),('1987-10-21','AL','34'),('1987-10-21','AL','36'),('1987-10-21','AL','36'),('1987-10-21','AL','45'),('1987-10-21','AL','45'),('1987-10-21','AL','45'),('1987-10-21','AL','91'),('1987-10-21','AL','91'),('1987-10-21','AL','104'),('1987-10-21','AL','104'),('1987-10-21','AL','104'),('1987-10-21','AL','109'),('1987-10-21','AL','112'),('1987-10-21','AL','112'),('1987-10-21','AL','123'),('1987-10-21','AL','149'),('1987-10-21','AL','160'),('1987-10-21','AL','160'),('1987-10-21','AL','165'),('1987-10-21','AL','171'),('1987-10-21','AL','171'),('1987-10-21','AL','176'),('1987-10-21','AL','176'),('1987-10-21','AL','179'),('1987-10-21','AL','215'),('1987-10-21','AL','263'),('1987-10-21','AL','263'),('1987-10-21','AL','268'),('1987-10-21','AL','268'),('1987-10-21','AL','268'),('1987-10-21','AL','281'),('1987-10-21','AL','281'),('1987-10-21','AL','287'),('1987-10-21','AL','287'),('1987-10-21','AL','309'),('1987-10-21','AL','309'),('1987-10-21','AL','341'),('1987-10-21','AL','344'),('1987-10-21','AL','344'),('1987-10-21','AL','357'),('1987-10-21','AL','357'),('1987-10-21','AL','368'),('1987-10-21','AL','381'),('1987-10-21','AL','396'),('1987-10-21','AL','397'),('1987-10-21','AL','397'),('1987-10-21','AL','416'),('1987-10-21','AL','416'),('1987-10-21','AL','417'),('1987-10-21','AL','438'),('1987-10-21','AL','438'),('1987-10-21','AL','446'),('1987-10-21','AL','451'),('1987-10-21','AL','451'),('1987-10-21','AL','491'),('1987-10-21','AL','491'),('1987-10-21','AL','523'),('1987-10-21','AL','523'),('1987-10-21','AL','523'),('1987-10-21','AL','525'),('1987-10-21','AL','525'),('1987-10-21','AL','533'),('1987-10-21','AL','533'),('1987-10-21','AL','546'),('1987-10-21','AL','546'),('1987-10-21','AL','556'),('1987-10-21','AL','556'),('1987-10-21','AL','601'),('1987-10-21','AL','601'),('1987-10-21','AL','627'),('1987-10-21','AL','629'),('1987-10-21','AL','670'),('1987-10-21','AL','670'),('1987-10-21','AL','670'),('1987-10-21','AL','673'),('1987-10-21','AL','680'),('1987-10-22','AA','2'),('1987-10-22','AA','2'),('1987-10-22','AA','2'),('1987-10-22','AA','7'),('1987-10-22','AA','7'),('1987-10-22','AA','26'),('1987-10-22','AA','34'),('1987-10-22','AA','36'),('1987-10-22','AA','91'),('1987-10-22','AA','101'),('1987-10-22','AA','101'),('1987-10-22','AA','109'),('1987-10-22','AA','109'),('1987-10-22','AA','112'),('1987-10-22','AA','123'),('1987-10-22','AA','123'),('1987-10-22','AA','160'),('1987-10-22','AA','165'),('1987-10-22','AA','165'),('1987-10-22','AA','165'),('1987-10-22','AA','176'),('1987-10-22','AA','176'),('1987-10-22','AA','176'),('1987-10-22','AA','179'),('1987-10-22','AA','215'),('1987-10-22','AA','215'),('1987-10-22','AA','231'),('1987-10-22','AA','231'),('1987-10-22','AA','263'),('1987-10-22','AA','263'),('1987-10-22','AA','268'),('1987-10-22','AA','268'),('1987-10-22','AA','281'),('1987-10-22','AA','287'),('1987-10-22','AA','287'),('1987-10-22','AA','309'),('1987-10-22','AA','309'),('1987-10-22','AA','309'),('1987-10-22','AA','341'),('1987-10-22','AA','344'),('1987-10-22','AA','344'),('1987-10-22','AA','347'),('1987-10-22','AA','368'),('1987-10-22','AA','381'),('1987-10-22','AA','381'),('1987-10-22','AA','381'),('1987-10-22','AA','396'),('1987-10-22','AA','396'),('1987-10-22','AA','397'),('1987-10-22','AA','397'),('1987-10-22','AA','417'),('1987-10-22','AA','417'),('1987-10-22','AA','446'),('1987-10-22','AA','451'),('1987-10-22','AA','451'),('1987-10-22','AA','460'),('1987-10-22','AA','460'),('1987-10-22','AA','491'),('1987-10-22','AA','504'),('1987-10-22','AA','519'),('1987-10-22','AA','519'),('1987-10-22','AA','523'),('1987-10-22','AA','523'),('1987-10-22','AA','525'),('1987-10-22','AA','525'),('1987-10-22','AA','533'),('1987-10-22','AA','533'),('1987-10-22','AA','533'),('1987-10-22','AA','546'),('1987-10-22','AA','546'),('1987-10-22','AA','546'),('1987-10-22','AA','546'),('1987-10-22','AA','556'),('1987-10-22','AA','556'),('1987-10-22','AA','556'),('1987-10-22','AA','597'),('1987-10-22','AA','597'),('1987-10-22','AA','597'),('1987-10-22','AA','601'),('1987-10-22','AA','601'),('1987-10-22','AA','627'),('1987-10-22','AA','629'),('1987-10-22','AA','629'),('1987-10-22','AA','673'),('1987-10-22','AA','673'),('1987-10-22','AA','680'),('1987-10-22','AA','680'),('1987-10-22','AA','817'),('1987-10-22','AA','817'),('1987-10-22','AA','824'),('1987-10-22','AA','824'),('1987-10-22','AA','824'),('1987-10-22','AA','832'),('1987-10-22','AA','832'),('1987-10-22','AA','852'),('1987-10-22','AA','852'),('1987-10-22','AA','866'),('1987-10-22','AA','866'),('1987-10-22','AA','871'),('1987-10-22','AA','871'),('1987-10-22','AA','880'),('1987-10-22','AA','880'),('1987-10-22','AA','880'),('1987-10-22','AA','880'),('1987-10-22','AA','883'),('1987-10-22','AA','883'),('1987-10-22','AA','885'),('1987-10-22','AA','885'),('1987-10-22','AA','885'),('1987-10-22','AA','890'),('1987-10-22','AA','890'),('1987-10-22','AA','893'),('1987-10-22','AA','893'),('1987-10-22','AA','905'),('1987-10-22','AA','905'),('1987-10-22','AA','915'),('1987-10-22','AA','929'),('1987-10-22','AA','929'),('1987-10-22','AA','936'),('1987-10-22','AA','936'),('1987-10-22','AA','937'),('1987-10-22','AA','937'),('1987-10-22','AA','955'),('1987-10-22','AA','955'),('1987-10-22','AA','1002'),('1987-10-22','AA','1002'),('1987-10-22','AA','1004'),('1987-10-22','AA','1015'),('1987-10-22','AA','1015'),('1987-10-22','AA','1021'),('1987-10-22','AA','1021'),('1987-10-22','AA','1041'),('1987-10-22','AA','1041'),('1987-10-22','AA','1046'),('1987-10-22','AA','1046'),('1987-10-22','AA','1048'),('1987-10-22','AA','1048'),('1987-10-22','AA','1061'),('1987-10-22','AA','1061'),('1987-10-22','AA','1088'),('1987-10-22','AA','1088'),('1987-10-22','AA','2033'),('1987-10-22','AA','2058'),('1987-10-22','AA','2071'),('1987-10-22','AA','2071'),('1987-10-22','AA','2086'),('1987-10-22','AA','2105'),('1987-10-22','AA','2111'),('1987-10-22','AA','2147'),('1987-10-22','AA','2147'),('1987-10-22','AA','2199'),('1987-10-22','AA','2207'),('1987-10-22','AA','2207'),('1987-10-22','AA','2217'),('1987-10-22','AA','2230'),('1987-10-22','AA','2245'),('1987-10-22','AA','2251'),('1987-10-22','AA','2251'),('1987-10-22','AA','2275'),('1987-10-22','AA','2278'),('1987-10-22','AA','2351'),('1987-10-22','AA','2357'),('1987-10-22','AA','2361'),('1987-10-22','AA','2490'),('1987-10-22','AA','2528'),('1987-10-22','AA','2528'),('1987-10-22','AA','2735'),('1987-10-22','AA','2735'),('1987-10-22','AA','2751'),('1987-10-22','AL','2'),('1987-10-22','AL','2'),('1987-10-22','AL','7'),('1987-10-22','AL','7'),('1987-10-22','AL','26'),('1987-10-22','AL','26'),('1987-10-22','AL','34'),('1987-10-22','AL','34'),('1987-10-22','AL','36'),('1987-10-22','AL','36'),('1987-10-22','AL','45'),('1987-10-22','AL','45'),('1987-10-22','AL','45'),('1987-10-22','AL','91'),('1987-10-22','AL','91'),('1987-10-22','AL','104'),('1987-10-22','AL','104'),('1987-10-22','AL','104'),('1987-10-22','AL','109'),('1987-10-22','AL','112'),('1987-10-22','AL','112'),('1987-10-22','AL','123'),('1987-10-22','AL','149'),('1987-10-22','AL','160'),('1987-10-22','AL','160'),('1987-10-22','AL','165'),('1987-10-22','AL','171'),('1987-10-22','AL','171'),('1987-10-22','AL','176'),('1987-10-22','AL','176'),('1987-10-22','AL','179'),('1987-10-22','AL','215'),('1987-10-22','AL','231'),('1987-10-22','AL','263'),('1987-10-22','AL','263'),('1987-10-22','AL','268'),('1987-10-22','AL','268'),('1987-10-22','AL','268'),('1987-10-22','AL','281'),('1987-10-22','AL','281'),('1987-10-22','AL','287'),('1987-10-22','AL','287'),('1987-10-22','AL','309'),('1987-10-22','AL','309'),('1987-10-22','AL','341'),('1987-10-22','AL','344'),('1987-10-22','AL','344'),('1987-10-22','AL','357'),('1987-10-22','AL','357'),('1987-10-22','AL','368'),('1987-10-22','AL','381'),('1987-10-22','AL','396'),('1987-10-22','AL','397'),('1987-10-22','AL','397'),('1987-10-22','AL','416'),('1987-10-22','AL','417'),('1987-10-22','AL','438'),('1987-10-22','AL','438'),('1987-10-22','AL','446'),('1987-10-22','AL','451'),('1987-10-22','AL','451'),('1987-10-22','AL','491'),('1987-10-22','AL','491'),('1987-10-22','AL','523'),('1987-10-22','AL','523'),('1987-10-22','AL','523'),('1987-10-22','AL','525'),('1987-10-22','AL','525'),('1987-10-22','AL','533'),('1987-10-22','AL','533'),('1987-10-22','AL','546'),('1987-10-22','AL','546'),('1987-10-22','AL','556'),('1987-10-22','AL','556'),('1987-10-22','AL','601'),('1987-10-22','AL','601'),('1987-10-22','AL','627'),('1987-10-22','AL','629'),('1987-10-22','AL','670'),('1987-10-22','AL','670'),('1987-10-22','AL','670'),('1987-10-22','AL','673'),('1987-10-22','AL','680'),('1987-10-23','AA','2'),('1987-10-23','AA','2'),('1987-10-23','AA','2'),('1987-10-23','AA','7'),('1987-10-23','AA','7'),('1987-10-23','AA','26'),('1987-10-23','AA','34'),('1987-10-23','AA','36'),('1987-10-23','AA','91'),('1987-10-23','AA','101'),('1987-10-23','AA','109'),('1987-10-23','AA','109'),('1987-10-23','AA','112'),('1987-10-23','AA','123'),('1987-10-23','AA','123'),('1987-10-23','AA','160'),('1987-10-23','AA','165'),('1987-10-23','AA','165'),('1987-10-23','AA','165'),('1987-10-23','AA','176'),('1987-10-23','AA','176'),('1987-10-23','AA','176'),('1987-10-23','AA','176'),('1987-10-23','AA','179'),('1987-10-23','AA','179'),('1987-10-23','AA','215'),('1987-10-23','AA','215'),('1987-10-23','AA','231'),('1987-10-23','AA','231'),('1987-10-23','AA','263'),('1987-10-23','AA','263'),('1987-10-23','AA','268'),('1987-10-23','AA','268'),('1987-10-23','AA','281'),('1987-10-23','AA','287'),('1987-10-23','AA','287'),('1987-10-23','AA','309'),('1987-10-23','AA','309'),('1987-10-23','AA','309'),('1987-10-23','AA','341'),('1987-10-23','AA','344'),('1987-10-23','AA','344'),('1987-10-23','AA','347'),('1987-10-23','AA','368'),('1987-10-23','AA','381'),('1987-10-23','AA','381'),('1987-10-23','AA','381'),('1987-10-23','AA','396'),('1987-10-23','AA','396'),('1987-10-23','AA','397'),('1987-10-23','AA','397'),('1987-10-23','AA','417'),('1987-10-23','AA','417'),('1987-10-23','AA','446'),('1987-10-23','AA','451'),('1987-10-23','AA','451'),('1987-10-23','AA','460'),('1987-10-23','AA','491'),('1987-10-23','AA','504'),('1987-10-23','AA','504'),('1987-10-23','AA','519'),('1987-10-23','AA','519'),('1987-10-23','AA','523'),('1987-10-23','AA','523'),('1987-10-23','AA','525'),('1987-10-23','AA','533'),('1987-10-23','AA','533'),('1987-10-23','AA','533'),('1987-10-23','AA','546'),('1987-10-23','AA','546'),('1987-10-23','AA','546'),('1987-10-23','AA','546'),('1987-10-23','AA','556'),('1987-10-23','AA','556'),('1987-10-23','AA','556'),('1987-10-23','AA','597'),('1987-10-23','AA','597'),('1987-10-23','AA','601'),('1987-10-23','AA','601'),('1987-10-23','AA','627'),('1987-10-23','AA','629'),('1987-10-23','AA','629'),('1987-10-23','AA','673'),('1987-10-23','AA','680'),('1987-10-23','AA','680'),('1987-10-23','AA','817'),('1987-10-23','AA','817'),('1987-10-23','AA','824'),('1987-10-23','AA','824'),('1987-10-23','AA','824'),('1987-10-23','AA','824'),('1987-10-23','AA','832'),('1987-10-23','AA','832'),('1987-10-23','AA','852'),('1987-10-23','AA','866'),('1987-10-23','AA','866'),('1987-10-23','AA','871'),('1987-10-23','AA','871'),('1987-10-23','AA','880'),('1987-10-23','AA','880'),('1987-10-23','AA','880'),('1987-10-23','AA','880'),('1987-10-23','AA','883'),('1987-10-23','AA','883'),('1987-10-23','AA','885'),('1987-10-23','AA','885'),('1987-10-23','AA','885'),('1987-10-23','AA','890'),('1987-10-23','AA','890'),('1987-10-23','AA','893'),('1987-10-23','AA','893'),('1987-10-23','AA','905'),('1987-10-23','AA','905'),('1987-10-23','AA','929'),('1987-10-23','AA','929'),('1987-10-23','AA','936'),('1987-10-23','AA','936'),('1987-10-23','AA','937'),('1987-10-23','AA','937'),('1987-10-23','AA','955'),('1987-10-23','AA','955'),('1987-10-23','AA','966'),('1987-10-23','AA','1002'),('1987-10-23','AA','1002'),('1987-10-23','AA','1004'),('1987-10-23','AA','1004'),('1987-10-23','AA','1015'),('1987-10-23','AA','1015'),('1987-10-23','AA','1021'),('1987-10-23','AA','1021'),('1987-10-23','AA','1041'),('1987-10-23','AA','1041'),('1987-10-23','AA','1046'),('1987-10-23','AA','1048'),('1987-10-23','AA','1048'),('1987-10-23','AA','1061'),('1987-10-23','AA','1061'),('1987-10-23','AA','1088'),('1987-10-23','AA','1088'),('1987-10-23','AA','2033'),('1987-10-23','AA','2033'),('1987-10-23','AA','2050'),('1987-10-23','AA','2058'),('1987-10-23','AA','2071'),('1987-10-23','AA','2071'),('1987-10-23','AA','2086'),('1987-10-23','AA','2105'),('1987-10-23','AA','2111'),('1987-10-23','AA','2123'),('1987-10-23','AA','2123'),('1987-10-23','AA','2147'),('1987-10-23','AA','2147'),('1987-10-23','AA','2199'),('1987-10-23','AA','2199'),('1987-10-23','AA','2199'),('1987-10-23','AA','2207'),('1987-10-23','AA','2207'),('1987-10-23','AA','2245'),('1987-10-23','AA','2251'),('1987-10-23','AA','2275'),('1987-10-23','AA','2278'),('1987-10-23','AA','2351'),('1987-10-23','AA','2357'),('1987-10-23','AA','2361'),('1987-10-23','AA','2490'),('1987-10-23','AA','2528'),('1987-10-23','AA','2528'),('1987-10-23','AA','2735'),('1987-10-23','AA','2735'),('1987-10-23','AA','2751'),('1987-10-23','AL','2'),('1987-10-23','AL','2'),('1987-10-23','AL','7'),('1987-10-23','AL','7'),('1987-10-23','AL','26'),('1987-10-23','AL','26'),('1987-10-23','AL','34'),('1987-10-23','AL','34'),('1987-10-23','AL','36'),('1987-10-23','AL','36'),('1987-10-23','AL','45'),('1987-10-23','AL','45'),('1987-10-23','AL','45'),('1987-10-23','AL','91'),('1987-10-23','AL','91'),('1987-10-23','AL','104'),('1987-10-23','AL','104'),('1987-10-23','AL','104'),('1987-10-23','AL','109'),('1987-10-23','AL','112'),('1987-10-23','AL','112'),('1987-10-23','AL','123'),('1987-10-23','AL','149'),('1987-10-23','AL','160'),('1987-10-23','AL','160'),('1987-10-23','AL','165'),('1987-10-23','AL','171'),('1987-10-23','AL','176'),('1987-10-23','AL','176'),('1987-10-23','AL','179'),('1987-10-23','AL','215'),('1987-10-23','AL','231'),('1987-10-23','AL','263'),('1987-10-23','AL','263'),('1987-10-23','AL','268'),('1987-10-23','AL','268'),('1987-10-23','AL','268'),('1987-10-23','AL','281'),('1987-10-23','AL','281'),('1987-10-23','AL','287'),('1987-10-23','AL','287'),('1987-10-23','AL','309'),('1987-10-23','AL','309'),('1987-10-23','AL','341'),('1987-10-23','AL','344'),('1987-10-23','AL','344'),('1987-10-23','AL','357'),('1987-10-23','AL','357'),('1987-10-23','AL','368'),('1987-10-23','AL','381'),('1987-10-23','AL','396'),('1987-10-23','AL','397'),('1987-10-23','AL','397'),('1987-10-23','AL','416'),('1987-10-23','AL','416'),('1987-10-23','AL','417'),('1987-10-23','AL','438'),('1987-10-23','AL','438'),('1987-10-23','AL','446'),('1987-10-23','AL','451'),('1987-10-23','AL','451'),('1987-10-23','AL','491'),('1987-10-23','AL','491'),('1987-10-23','AL','523'),('1987-10-23','AL','523'),('1987-10-23','AL','523'),('1987-10-23','AL','525'),('1987-10-23','AL','525'),('1987-10-23','AL','533'),('1987-10-23','AL','533'),('1987-10-23','AL','546'),('1987-10-23','AL','546'),('1987-10-23','AL','556'),('1987-10-23','AL','556'),('1987-10-23','AL','601'),('1987-10-23','AL','601'),('1987-10-23','AL','627'),('1987-10-23','AL','629'),('1987-10-23','AL','670'),('1987-10-23','AL','670'),('1987-10-23','AL','670'),('1987-10-23','AL','673'),('1987-10-23','AL','680'),('1987-10-24','AA','2'),('1987-10-24','AA','2'),('1987-10-24','AA','7'),('1987-10-24','AA','7'),('1987-10-24','AA','26'),('1987-10-24','AA','34'),('1987-10-24','AA','36'),('1987-10-24','AA','91'),('1987-10-24','AA','101'),('1987-10-24','AA','101'),('1987-10-24','AA','109'),('1987-10-24','AA','109'),('1987-10-24','AA','112'),('1987-10-24','AA','123'),('1987-10-24','AA','123'),('1987-10-24','AA','160'),('1987-10-24','AA','165'),('1987-10-24','AA','165'),('1987-10-24','AA','165'),('1987-10-24','AA','176'),('1987-10-24','AA','176'),('1987-10-24','AA','176'),('1987-10-24','AA','179'),('1987-10-24','AA','179'),('1987-10-24','AA','215'),('1987-10-24','AA','215'),('1987-10-24','AA','231'),('1987-10-24','AA','263'),('1987-10-24','AA','263'),('1987-10-24','AA','268'),('1987-10-24','AA','268'),('1987-10-24','AA','281'),('1987-10-24','AA','287'),('1987-10-24','AA','287'),('1987-10-24','AA','309'),('1987-10-24','AA','309'),('1987-10-24','AA','309'),('1987-10-24','AA','344'),('1987-10-24','AA','344'),('1987-10-24','AA','347'),('1987-10-24','AA','347'),('1987-10-24','AA','368'),('1987-10-24','AA','381'),('1987-10-24','AA','381'),('1987-10-24','AA','381'),('1987-10-24','AA','396'),('1987-10-24','AA','396'),('1987-10-24','AA','397'),('1987-10-24','AA','397'),('1987-10-24','AA','417'),('1987-10-24','AA','417'),('1987-10-24','AA','446'),('1987-10-24','AA','451'),('1987-10-24','AA','451'),('1987-10-24','AA','460'),('1987-10-24','AA','460'),('1987-10-24','AA','491'),('1987-10-24','AA','491'),('1987-10-24','AA','504'),('1987-10-24','AA','504'),('1987-10-24','AA','519'),('1987-10-24','AA','519'),('1987-10-24','AA','523'),('1987-10-24','AA','523'),('1987-10-24','AA','525'),('1987-10-24','AA','525'),('1987-10-24','AA','525'),('1987-10-24','AA','533'),('1987-10-24','AA','533'),('1987-10-24','AA','546'),('1987-10-24','AA','546'),('1987-10-24','AA','546'),('1987-10-24','AA','556'),('1987-10-24','AA','556'),('1987-10-24','AA','556'),('1987-10-24','AA','597'),('1987-10-24','AA','597'),('1987-10-24','AA','597'),('1987-10-24','AA','601'),('1987-10-24','AA','601'),('1987-10-24','AA','627'),('1987-10-24','AA','629'),('1987-10-24','AA','673'),('1987-10-24','AA','673'),('1987-10-24','AA','680'),('1987-10-24','AA','680'),('1987-10-24','AA','817'),('1987-10-24','AA','817'),('1987-10-24','AA','824'),('1987-10-24','AA','824'),('1987-10-24','AA','824'),('1987-10-24','AA','832'),('1987-10-24','AA','832'),('1987-10-24','AA','852'),('1987-10-24','AA','852'),('1987-10-24','AA','866'),('1987-10-24','AA','866'),('1987-10-24','AA','871'),('1987-10-24','AA','871'),('1987-10-24','AA','880'),('1987-10-24','AA','880'),('1987-10-24','AA','880'),('1987-10-24','AA','880'),('1987-10-24','AA','883'),('1987-10-24','AA','883'),('1987-10-24','AA','885'),('1987-10-24','AA','885'),('1987-10-24','AA','885'),('1987-10-24','AA','890'),('1987-10-24','AA','890'),('1987-10-24','AA','893'),('1987-10-24','AA','893'),('1987-10-24','AA','905'),('1987-10-24','AA','905'),('1987-10-24','AA','915'),('1987-10-24','AA','929'),('1987-10-24','AA','929'),('1987-10-24','AA','936'),('1987-10-24','AA','936'),('1987-10-24','AA','937'),('1987-10-24','AA','937'),('1987-10-24','AA','955'),('1987-10-24','AA','955'),('1987-10-24','AA','966'),('1987-10-24','AA','1002'),('1987-10-24','AA','1002'),('1987-10-24','AA','1004'),('1987-10-24','AA','1004'),('1987-10-24','AA','1015'),('1987-10-24','AA','1021'),('1987-10-24','AA','1021'),('1987-10-24','AA','1041'),('1987-10-24','AA','1041'),('1987-10-24','AA','1046'),('1987-10-24','AA','1046'),('1987-10-24','AA','1048'),('1987-10-24','AA','1048'),('1987-10-24','AA','1061'),('1987-10-24','AA','1061'),('1987-10-24','AA','1088'),('1987-10-24','AA','1088'),('1987-10-24','AA','2033'),('1987-10-24','AA','2033'),('1987-10-24','AA','2050'),('1987-10-24','AA','2058'),('1987-10-24','AA','2071'),('1987-10-24','AA','2071'),('1987-10-24','AA','2086'),('1987-10-24','AA','2105'),('1987-10-24','AA','2111'),('1987-10-24','AA','2123'),('1987-10-24','AA','2147'),('1987-10-24','AA','2199'),('1987-10-24','AA','2199'),('1987-10-24','AA','2207'),('1987-10-24','AA','2217'),('1987-10-24','AA','2230'),('1987-10-24','AA','2245'),('1987-10-24','AA','2251'),('1987-10-24','AA','2251'),('1987-10-24','AA','2275'),('1987-10-24','AA','2278'),('1987-10-24','AA','2351'),('1987-10-24','AA','2357'),('1987-10-24','AA','2361'),('1987-10-24','AA','2490'),('1987-10-24','AA','2528'),('1987-10-24','AA','2528'),('1987-10-24','AA','2735'),('1987-10-24','AA','2735'),('1987-10-24','AL','2'),('1987-10-24','AL','2'),('1987-10-24','AL','7'),('1987-10-24','AL','7'),('1987-10-24','AL','26'),('1987-10-24','AL','26'),('1987-10-24','AL','34'),('1987-10-24','AL','34'),('1987-10-24','AL','36'),('1987-10-24','AL','36'),('1987-10-24','AL','45'),('1987-10-24','AL','91'),('1987-10-24','AL','91'),('1987-10-24','AL','104'),('1987-10-24','AL','104'),('1987-10-24','AL','109'),('1987-10-24','AL','112'),('1987-10-24','AL','112'),('1987-10-24','AL','149'),('1987-10-24','AL','160'),('1987-10-24','AL','160'),('1987-10-24','AL','165'),('1987-10-24','AL','171'),('1987-10-24','AL','171'),('1987-10-24','AL','176'),('1987-10-24','AL','176'),('1987-10-24','AL','179'),('1987-10-24','AL','215'),('1987-10-24','AL','231'),('1987-10-24','AL','263'),('1987-10-24','AL','268'),('1987-10-24','AL','268'),('1987-10-24','AL','268'),('1987-10-24','AL','287'),('1987-10-24','AL','287'),('1987-10-24','AL','309'),('1987-10-24','AL','309'),('1987-10-24','AL','344'),('1987-10-24','AL','344'),('1987-10-24','AL','357'),('1987-10-24','AL','357'),('1987-10-24','AL','381'),('1987-10-24','AL','396'),('1987-10-24','AL','397'),('1987-10-24','AL','397'),('1987-10-24','AL','397'),('1987-10-24','AL','416'),('1987-10-24','AL','417'),('1987-10-24','AL','438'),('1987-10-24','AL','438'),('1987-10-24','AL','451'),('1987-10-24','AL','451'),('1987-10-24','AL','491'),('1987-10-24','AL','491'),('1987-10-24','AL','523'),('1987-10-24','AL','523'),('1987-10-24','AL','525'),('1987-10-24','AL','525'),('1987-10-24','AL','533'),('1987-10-24','AL','546'),('1987-10-24','AL','546'),('1987-10-24','AL','556'),('1987-10-24','AL','556'),('1987-10-24','AL','601'),('1987-10-24','AL','601'),('1987-10-24','AL','627'),('1987-10-24','AL','670'),('1987-10-24','AL','673'),('1987-10-24','AL','680'),('1987-10-24','AL','905'),('1987-10-24','AL','936'),('1987-10-24','AL','966'),('1987-10-24','AL','982'),('1987-10-25','AA','2'),('1987-10-25','AA','2'),('1987-10-25','AA','2'),('1987-10-25','AA','7'),('1987-10-25','AA','7'),('1987-10-25','AA','26'),('1987-10-25','AA','34'),('1987-10-25','AA','36'),('1987-10-25','AA','91'),('1987-10-25','AA','101'),('1987-10-25','AA','109'),('1987-10-25','AA','109'),('1987-10-25','AA','123'),('1987-10-25','AA','123'),('1987-10-25','AA','160'),('1987-10-25','AA','165'),('1987-10-25','AA','165'),('1987-10-25','AA','165'),('1987-10-25','AA','176'),('1987-10-25','AA','176'),('1987-10-25','AA','176'),('1987-10-25','AA','176'),('1987-10-25','AA','179'),('1987-10-25','AA','179'),('1987-10-25','AA','215'),('1987-10-25','AA','215'),('1987-10-25','AA','231'),('1987-10-25','AA','231'),('1987-10-25','AA','263'),('1987-10-25','AA','263'),('1987-10-25','AA','268'),('1987-10-25','AA','268'),('1987-10-25','AA','281'),('1987-10-25','AA','287'),('1987-10-25','AA','287'),('1987-10-25','AA','309'),('1987-10-25','AA','309'),('1987-10-25','AA','309'),('1987-10-25','AA','341'),('1987-10-25','AA','344'),('1987-10-25','AA','344'),('1987-10-25','AA','347'),('1987-10-25','AA','347'),('1987-10-25','AA','368'),('1987-10-25','AA','381'),('1987-10-25','AA','381'),('1987-10-25','AA','396'),('1987-10-25','AA','397'),('1987-10-25','AA','397'),('1987-10-25','AA','417'),('1987-10-25','AA','417'),('1987-10-25','AA','446'),('1987-10-25','AA','451'),('1987-10-25','AA','460'),('1987-10-25','AA','460'),('1987-10-25','AA','491'),('1987-10-25','AA','491'),('1987-10-25','AA','504'),('1987-10-25','AA','504'),('1987-10-25','AA','519'),('1987-10-25','AA','519'),('1987-10-25','AA','523'),('1987-10-25','AA','523'),('1987-10-25','AA','525'),('1987-10-25','AA','525'),('1987-10-25','AA','525'),('1987-10-25','AA','533'),('1987-10-25','AA','533'),('1987-10-25','AA','533'),('1987-10-25','AA','546'),('1987-10-25','AA','546'),('1987-10-25','AA','546'),('1987-10-25','AA','546'),('1987-10-25','AA','556'),('1987-10-25','AA','556'),('1987-10-25','AA','556'),('1987-10-25','AA','597'),('1987-10-25','AA','597'),('1987-10-25','AA','597'),('1987-10-25','AA','601'),('1987-10-25','AA','627'),('1987-10-25','AA','629'),('1987-10-25','AA','629'),('1987-10-25','AA','670'),('1987-10-25','AA','673'),('1987-10-25','AA','673'),('1987-10-25','AA','680'),('1987-10-25','AA','680'),('1987-10-25','AA','817'),('1987-10-25','AA','817'),('1987-10-25','AA','824'),('1987-10-25','AA','824'),('1987-10-25','AA','824'),('1987-10-25','AA','824'),('1987-10-25','AA','832'),('1987-10-25','AA','832'),('1987-10-25','AA','852'),('1987-10-25','AA','852'),('1987-10-25','AA','866'),('1987-10-25','AA','866'),('1987-10-25','AA','871'),('1987-10-25','AA','871'),('1987-10-25','AA','880'),('1987-10-25','AA','880'),('1987-10-25','AA','880'),('1987-10-25','AA','880'),('1987-10-25','AA','883'),('1987-10-25','AA','883'),('1987-10-25','AA','885'),('1987-10-25','AA','885'),('1987-10-25','AA','885'),('1987-10-25','AA','890'),('1987-10-25','AA','893'),('1987-10-25','AA','893'),('1987-10-25','AA','905'),('1987-10-25','AA','905'),('1987-10-25','AA','915'),('1987-10-25','AA','929'),('1987-10-25','AA','929'),('1987-10-25','AA','936'),('1987-10-25','AA','936'),('1987-10-25','AA','937'),('1987-10-25','AA','937'),('1987-10-25','AA','955'),('1987-10-25','AA','955'),('1987-10-25','AA','1002'),('1987-10-25','AA','1002'),('1987-10-25','AA','1004'),('1987-10-25','AA','1004'),('1987-10-25','AA','1015'),('1987-10-25','AA','1015'),('1987-10-25','AA','1021'),('1987-10-25','AA','1021'),('1987-10-25','AA','1041'),('1987-10-25','AA','1041'),('1987-10-25','AA','1046'),('1987-10-25','AA','1046'),('1987-10-25','AA','1048'),('1987-10-25','AA','1048'),('1987-10-25','AA','1061'),('1987-10-25','AA','1061'),('1987-10-25','AA','1088'),('1987-10-25','AA','1088'),('1987-10-25','AA','2033'),('1987-10-25','AA','2033'),('1987-10-25','AA','2050'),('1987-10-25','AA','2058'),('1987-10-25','AA','2071'),('1987-10-25','AA','2071'),('1987-10-25','AA','2086'),('1987-10-25','AA','2111'),('1987-10-25','AA','2123'),('1987-10-25','AA','2123'),('1987-10-25','AA','2147'),('1987-10-25','AA','2199'),('1987-10-25','AA','2199'),('1987-10-25','AA','2207'),('1987-10-25','AA','2207'),('1987-10-25','AA','2230'),('1987-10-25','AA','2245'),('1987-10-25','AA','2251'),('1987-10-25','AA','2251'),('1987-10-25','AA','2275'),('1987-10-25','AA','2278'),('1987-10-25','AA','2357'),('1987-10-25','AA','2361'),('1987-10-25','AA','2490'),('1987-10-25','AA','2528'),('1987-10-25','AA','2528'),('1987-10-25','AA','2735'),('1987-10-25','AA','2735'),('1987-10-25','AA','2751'),('1987-10-25','AL','2'),('1987-10-25','AL','2'),('1987-10-25','AL','7'),('1987-10-25','AL','7'),('1987-10-25','AL','26'),('1987-10-25','AL','26'),('1987-10-25','AL','34'),('1987-10-25','AL','34'),('1987-10-25','AL','36'),('1987-10-25','AL','36'),('1987-10-25','AL','45'),('1987-10-25','AL','45'),('1987-10-25','AL','45'),('1987-10-25','AL','91'),('1987-10-25','AL','91'),('1987-10-25','AL','104'),('1987-10-25','AL','104'),('1987-10-25','AL','104'),('1987-10-25','AL','112'),('1987-10-25','AL','123'),('1987-10-25','AL','149'),('1987-10-25','AL','160'),('1987-10-25','AL','160'),('1987-10-25','AL','165'),('1987-10-25','AL','171'),('1987-10-25','AL','171'),('1987-10-25','AL','176'),('1987-10-25','AL','176'),('1987-10-25','AL','179'),('1987-10-25','AL','215'),('1987-10-25','AL','231'),('1987-10-25','AL','263'),('1987-10-25','AL','263'),('1987-10-25','AL','281'),('1987-10-25','AL','281'),('1987-10-25','AL','309'),('1987-10-25','AL','309'),('1987-10-25','AL','341'),('1987-10-25','AL','344'),('1987-10-25','AL','344'),('1987-10-25','AL','357'),('1987-10-25','AL','357'),('1987-10-25','AL','368'),('1987-10-25','AL','396'),('1987-10-25','AL','416'),('1987-10-25','AL','416'),('1987-10-25','AL','417'),('1987-10-25','AL','438'),('1987-10-25','AL','438'),('1987-10-25','AL','446'),('1987-10-25','AL','451'),('1987-10-25','AL','491'),('1987-10-25','AL','523'),('1987-10-25','AL','523'),('1987-10-25','AL','523'),('1987-10-25','AL','525'),('1987-10-25','AL','525'),('1987-10-25','AL','533'),('1987-10-25','AL','533'),('1987-10-25','AL','546'),('1987-10-25','AL','546'),('1987-10-25','AL','556'),('1987-10-25','AL','556'),('1987-10-25','AL','601'),('1987-10-25','AL','601'),('1987-10-25','AL','627'),('1987-10-25','AL','629'),('1987-10-25','AL','670'),('1987-10-25','AL','670'),('1987-10-25','AL','670'),('1987-10-25','AL','673'),('1987-10-25','AL','680'),('1987-10-25','AL','937'),('1987-10-25','AL','937'),('1987-10-25','AL','955'),('1987-10-26','AA','2'),('1987-10-26','AA','2'),('1987-10-26','AA','2'),('1987-10-26','AA','7'),('1987-10-26','AA','7'),('1987-10-26','AA','26'),('1987-10-26','AA','34'),('1987-10-26','AA','36'),('1987-10-26','AA','91'),('1987-10-26','AA','101'),('1987-10-26','AA','101'),('1987-10-26','AA','109'),('1987-10-26','AA','109'),('1987-10-26','AA','112'),('1987-10-26','AA','123'),('1987-10-26','AA','123'),('1987-10-26','AA','160'),('1987-10-26','AA','165'),('1987-10-26','AA','165'),('1987-10-26','AA','165'),('1987-10-26','AA','176'),('1987-10-26','AA','176'),('1987-10-26','AA','176'),('1987-10-26','AA','176'),('1987-10-26','AA','179'),('1987-10-26','AA','179'),('1987-10-26','AA','215'),('1987-10-26','AA','215'),('1987-10-26','AA','231'),('1987-10-26','AA','231'),('1987-10-26','AA','263'),('1987-10-26','AA','263'),('1987-10-26','AA','268'),('1987-10-26','AA','268'),('1987-10-26','AA','281'),('1987-10-26','AA','287'),('1987-10-26','AA','287'),('1987-10-26','AA','309'),('1987-10-26','AA','309'),('1987-10-26','AA','309'),('1987-10-26','AA','341'),('1987-10-26','AA','344'),('1987-10-26','AA','344'),('1987-10-26','AA','347'),('1987-10-26','AA','347'),('1987-10-26','AA','368'),('1987-10-26','AA','381'),('1987-10-26','AA','381'),('1987-10-26','AA','381'),('1987-10-26','AA','396'),('1987-10-26','AA','396'),('1987-10-26','AA','397'),('1987-10-26','AA','397'),('1987-10-26','AA','417'),('1987-10-26','AA','417'),('1987-10-26','AA','446'),('1987-10-26','AA','451'),('1987-10-26','AA','451'),('1987-10-26','AA','491'),('1987-10-26','AA','504'),('1987-10-26','AA','504'),('1987-10-26','AA','519'),('1987-10-26','AA','523'),('1987-10-26','AA','523'),('1987-10-26','AA','525'),('1987-10-26','AA','525'),('1987-10-26','AA','525'),('1987-10-26','AA','533'),('1987-10-26','AA','533'),('1987-10-26','AA','533'),('1987-10-26','AA','546'),('1987-10-26','AA','546'),('1987-10-26','AA','546'),('1987-10-26','AA','546'),('1987-10-26','AA','556'),('1987-10-26','AA','556'),('1987-10-26','AA','556'),('1987-10-26','AA','597'),('1987-10-26','AA','597'),('1987-10-26','AA','597'),('1987-10-26','AA','601'),('1987-10-26','AA','601'),('1987-10-26','AA','627'),('1987-10-26','AA','629'),('1987-10-26','AA','629'),('1987-10-26','AA','670'),('1987-10-26','AA','673'),('1987-10-26','AA','673'),('1987-10-26','AA','680'),('1987-10-26','AA','680'),('1987-10-26','AA','817'),('1987-10-26','AA','817'),('1987-10-26','AA','824'),('1987-10-26','AA','824'),('1987-10-26','AA','832'),('1987-10-26','AA','832'),('1987-10-26','AA','852'),('1987-10-26','AA','852'),('1987-10-26','AA','866'),('1987-10-26','AA','866'),('1987-10-26','AA','871'),('1987-10-26','AA','871'),('1987-10-26','AA','880'),('1987-10-26','AA','880'),('1987-10-26','AA','880'),('1987-10-26','AA','880'),('1987-10-26','AA','883'),('1987-10-26','AA','883'),('1987-10-26','AA','885'),('1987-10-26','AA','885'),('1987-10-26','AA','885'),('1987-10-26','AA','890'),('1987-10-26','AA','890'),('1987-10-26','AA','893'),('1987-10-26','AA','893'),('1987-10-26','AA','905'),('1987-10-26','AA','905'),('1987-10-26','AA','915'),('1987-10-26','AA','929'),('1987-10-26','AA','929'),('1987-10-26','AA','936'),('1987-10-26','AA','936'),('1987-10-26','AA','937'),('1987-10-26','AA','937'),('1987-10-26','AA','955'),('1987-10-26','AA','955'),('1987-10-26','AA','966'),('1987-10-26','AA','1002'),('1987-10-26','AA','1002'),('1987-10-26','AA','1004'),('1987-10-26','AA','1004'),('1987-10-26','AA','1015'),('1987-10-26','AA','1015'),('1987-10-26','AA','1021'),('1987-10-26','AA','1021'),('1987-10-26','AA','1046'),('1987-10-26','AA','1046'),('1987-10-26','AA','1048'),('1987-10-26','AA','1048'),('1987-10-26','AA','1061'),('1987-10-26','AA','1061'),('1987-10-26','AA','1088'),('1987-10-26','AA','1088'),('1987-10-26','AA','2033'),('1987-10-26','AA','2033'),('1987-10-26','AA','2050'),('1987-10-26','AA','2058'),('1987-10-26','AA','2071'),('1987-10-26','AA','2071'),('1987-10-26','AA','2086'),('1987-10-26','AA','2105'),('1987-10-26','AA','2111'),('1987-10-26','AA','2123'),('1987-10-26','AA','2123'),('1987-10-26','AA','2147'),('1987-10-26','AA','2147'),('1987-10-26','AA','2199'),('1987-10-26','AA','2199'),('1987-10-26','AA','2207'),('1987-10-26','AA','2207'),('1987-10-26','AA','2230'),('1987-10-26','AA','2245'),('1987-10-26','AA','2251'),('1987-10-26','AA','2251'),('1987-10-26','AA','2275'),('1987-10-26','AA','2278'),('1987-10-26','AA','2351'),('1987-10-26','AA','2357'),('1987-10-26','AA','2361'),('1987-10-26','AA','2490'),('1987-10-26','AA','2528'),('1987-10-26','AA','2528'),('1987-10-26','AA','2735'),('1987-10-26','AA','2735'),('1987-10-26','AA','2751'),('1987-10-26','AL','2'),('1987-10-26','AL','2'),('1987-10-26','AL','7'),('1987-10-26','AL','7'),('1987-10-26','AL','26'),('1987-10-26','AL','26'),('1987-10-26','AL','34'),('1987-10-26','AL','34'),('1987-10-26','AL','36'),('1987-10-26','AL','36'),('1987-10-26','AL','45'),('1987-10-26','AL','45'),('1987-10-26','AL','45'),('1987-10-26','AL','91'),('1987-10-26','AL','91'),('1987-10-26','AL','104'),('1987-10-26','AL','104'),('1987-10-26','AL','104'),('1987-10-26','AL','109'),('1987-10-26','AL','112'),('1987-10-26','AL','112'),('1987-10-26','AL','123'),('1987-10-26','AL','149'),('1987-10-26','AL','160'),('1987-10-26','AL','160'),('1987-10-26','AL','165'),('1987-10-26','AL','171'),('1987-10-26','AL','171'),('1987-10-26','AL','176'),('1987-10-26','AL','176'),('1987-10-26','AL','179'),('1987-10-26','AL','215'),('1987-10-26','AL','231'),('1987-10-26','AL','263'),('1987-10-26','AL','263'),('1987-10-26','AL','268'),('1987-10-26','AL','268'),('1987-10-26','AL','268'),('1987-10-26','AL','281'),('1987-10-26','AL','281'),('1987-10-26','AL','287'),('1987-10-26','AL','287'),('1987-10-26','AL','309'),('1987-10-26','AL','309'),('1987-10-26','AL','341'),('1987-10-26','AL','344'),('1987-10-26','AL','357'),('1987-10-26','AL','357'),('1987-10-26','AL','368'),('1987-10-26','AL','381'),('1987-10-26','AL','396'),('1987-10-26','AL','397'),('1987-10-26','AL','397'),('1987-10-26','AL','416'),('1987-10-26','AL','416'),('1987-10-26','AL','417'),('1987-10-26','AL','438'),('1987-10-26','AL','438'),('1987-10-26','AL','446'),('1987-10-26','AL','451'),('1987-10-26','AL','451'),('1987-10-26','AL','491'),('1987-10-26','AL','491'),('1987-10-26','AL','523'),('1987-10-26','AL','523'),('1987-10-26','AL','523'),('1987-10-26','AL','525'),('1987-10-26','AL','533'),('1987-10-26','AL','533'),('1987-10-26','AL','546'),('1987-10-26','AL','546'),('1987-10-26','AL','556'),('1987-10-26','AL','556'),('1987-10-26','AL','601'),('1987-10-26','AL','601'),('1987-10-26','AL','627'),('1987-10-26','AL','629'),('1987-10-26','AL','670'),('1987-10-26','AL','670'),('1987-10-26','AL','670'),('1987-10-26','AL','673'),('1987-10-26','AL','680'),('1987-10-27','AA','2'),('1987-10-27','AA','2'),('1987-10-27','AA','2'),('1987-10-27','AA','7'),('1987-10-27','AA','26'),('1987-10-27','AA','34'),('1987-10-27','AA','36'),('1987-10-27','AA','91'),('1987-10-27','AA','101'),('1987-10-27','AA','101'),('1987-10-27','AA','109'),('1987-10-27','AA','109'),('1987-10-27','AA','112'),('1987-10-27','AA','123'),('1987-10-27','AA','123'),('1987-10-27','AA','160'),('1987-10-27','AA','165'),('1987-10-27','AA','165'),('1987-10-27','AA','165'),('1987-10-27','AA','176'),('1987-10-27','AA','176'),('1987-10-27','AA','176'),('1987-10-27','AA','176'),('1987-10-27','AA','179'),('1987-10-27','AA','179'),('1987-10-27','AA','215'),('1987-10-27','AA','215'),('1987-10-27','AA','231'),('1987-10-27','AA','263'),('1987-10-27','AA','263'),('1987-10-27','AA','268'),('1987-10-27','AA','268'),('1987-10-27','AA','281'),('1987-10-27','AA','287'),('1987-10-27','AA','287'),('1987-10-27','AA','309'),('1987-10-27','AA','309'),('1987-10-27','AA','309'),('1987-10-27','AA','341'),('1987-10-27','AA','344'),('1987-10-27','AA','344'),('1987-10-27','AA','347'),('1987-10-27','AA','347'),('1987-10-27','AA','368'),('1987-10-27','AA','381'),('1987-10-27','AA','381'),('1987-10-27','AA','381'),('1987-10-27','AA','396'),('1987-10-27','AA','396'),('1987-10-27','AA','397'),('1987-10-27','AA','397'),('1987-10-27','AA','417'),('1987-10-27','AA','417'),('1987-10-27','AA','446'),('1987-10-27','AA','451'),('1987-10-27','AA','451'),('1987-10-27','AA','460'),('1987-10-27','AA','460'),('1987-10-27','AA','491'),('1987-10-27','AA','504'),('1987-10-27','AA','504'),('1987-10-27','AA','519'),('1987-10-27','AA','519'),('1987-10-27','AA','523'),('1987-10-27','AA','523'),('1987-10-27','AA','525'),('1987-10-27','AA','525'),('1987-10-27','AA','525'),('1987-10-27','AA','533'),('1987-10-27','AA','533'),('1987-10-27','AA','533'),('1987-10-27','AA','546'),('1987-10-27','AA','546'),('1987-10-27','AA','546'),('1987-10-27','AA','546'),('1987-10-27','AA','556'),('1987-10-27','AA','556'),('1987-10-27','AA','556'),('1987-10-27','AA','597'),('1987-10-27','AA','597'),('1987-10-27','AA','597'),('1987-10-27','AA','601'),('1987-10-27','AA','601'),('1987-10-27','AA','627'),('1987-10-27','AA','629'),('1987-10-27','AA','629'),('1987-10-27','AA','670'),('1987-10-27','AA','673'),('1987-10-27','AA','673'),('1987-10-27','AA','680'),('1987-10-27','AA','680'),('1987-10-27','AA','817'),('1987-10-27','AA','817'),('1987-10-27','AA','824'),('1987-10-27','AA','824'),('1987-10-27','AA','824'),('1987-10-27','AA','824'),('1987-10-27','AA','832'),('1987-10-27','AA','832'),('1987-10-27','AA','852'),('1987-10-27','AA','852'),('1987-10-27','AA','866'),('1987-10-27','AA','866'),('1987-10-27','AA','871'),('1987-10-27','AA','871'),('1987-10-27','AA','880'),('1987-10-27','AA','880'),('1987-10-27','AA','880'),('1987-10-27','AA','880'),('1987-10-27','AA','883'),('1987-10-27','AA','883'),('1987-10-27','AA','885'),('1987-10-27','AA','885'),('1987-10-27','AA','885'),('1987-10-27','AA','890'),('1987-10-27','AA','890'),('1987-10-27','AA','893'),('1987-10-27','AA','893'),('1987-10-27','AA','905'),('1987-10-27','AA','905'),('1987-10-27','AA','915'),('1987-10-27','AA','929'),('1987-10-27','AA','929'),('1987-10-27','AA','936'),('1987-10-27','AA','936'),('1987-10-27','AA','937'),('1987-10-27','AA','937'),('1987-10-27','AA','955'),('1987-10-27','AA','955'),('1987-10-27','AA','966'),('1987-10-27','AA','1002'),('1987-10-27','AA','1002'),('1987-10-27','AA','1004'),('1987-10-27','AA','1004'),('1987-10-27','AA','1015'),('1987-10-27','AA','1015'),('1987-10-27','AA','1021'),('1987-10-27','AA','1021'),('1987-10-27','AA','1041'),('1987-10-27','AA','1041'),('1987-10-27','AA','1046'),('1987-10-27','AA','1046'),('1987-10-27','AA','1048'),('1987-10-27','AA','1061'),('1987-10-27','AA','1061'),('1987-10-27','AA','1088'),('1987-10-27','AA','1088'),('1987-10-27','AA','2033'),('1987-10-27','AA','2033'),('1987-10-27','AA','2050'),('1987-10-27','AA','2058'),('1987-10-27','AA','2071'),('1987-10-27','AA','2071'),('1987-10-27','AA','2086'),('1987-10-27','AA','2105'),('1987-10-27','AA','2111'),('1987-10-27','AA','2123'),('1987-10-27','AA','2123'),('1987-10-27','AA','2147'),('1987-10-27','AA','2147'),('1987-10-27','AA','2199'),('1987-10-27','AA','2199'),('1987-10-27','AA','2207'),('1987-10-27','AA','2207'),('1987-10-27','AA','2217'),('1987-10-27','AA','2230'),('1987-10-27','AA','2245'),('1987-10-27','AA','2251'),('1987-10-27','AA','2251'),('1987-10-27','AA','2275'),('1987-10-27','AA','2278'),('1987-10-27','AA','2357'),('1987-10-27','AA','2490'),('1987-10-27','AA','2528'),('1987-10-27','AA','2528'),('1987-10-27','AA','2735'),('1987-10-27','AA','2735'),('1987-10-27','AL','2'),('1987-10-27','AL','2'),('1987-10-27','AL','7'),('1987-10-27','AL','7'),('1987-10-27','AL','26'),('1987-10-27','AL','26'),('1987-10-27','AL','34'),('1987-10-27','AL','34'),('1987-10-27','AL','36'),('1987-10-27','AL','36'),('1987-10-27','AL','45'),('1987-10-27','AL','45'),('1987-10-27','AL','45'),('1987-10-27','AL','91'),('1987-10-27','AL','91'),('1987-10-27','AL','104'),('1987-10-27','AL','104'),('1987-10-27','AL','104'),('1987-10-27','AL','109'),('1987-10-27','AL','112'),('1987-10-27','AL','112'),('1987-10-27','AL','123'),('1987-10-27','AL','149'),('1987-10-27','AL','160'),('1987-10-27','AL','160'),('1987-10-27','AL','165'),('1987-10-27','AL','171'),('1987-10-27','AL','171'),('1987-10-27','AL','176'),('1987-10-27','AL','176'),('1987-10-27','AL','179'),('1987-10-27','AL','215'),('1987-10-27','AL','231'),('1987-10-27','AL','263'),('1987-10-27','AL','263'),('1987-10-27','AL','268'),('1987-10-27','AL','268'),('1987-10-27','AL','268'),('1987-10-27','AL','281'),('1987-10-27','AL','281'),('1987-10-27','AL','287'),('1987-10-27','AL','287'),('1987-10-27','AL','309'),('1987-10-27','AL','309'),('1987-10-27','AL','341'),('1987-10-27','AL','344'),('1987-10-27','AL','344'),('1987-10-27','AL','357'),('1987-10-27','AL','357'),('1987-10-27','AL','368'),('1987-10-27','AL','381'),('1987-10-27','AL','396'),('1987-10-27','AL','397'),('1987-10-27','AL','397'),('1987-10-27','AL','416'),('1987-10-27','AL','416'),('1987-10-27','AL','417'),('1987-10-27','AL','438'),('1987-10-27','AL','438'),('1987-10-27','AL','446'),('1987-10-27','AL','451'),('1987-10-27','AL','451'),('1987-10-27','AL','491'),('1987-10-27','AL','491'),('1987-10-27','AL','523'),('1987-10-27','AL','523'),('1987-10-27','AL','523'),('1987-10-27','AL','525'),('1987-10-27','AL','525'),('1987-10-27','AL','533'),('1987-10-27','AL','533'),('1987-10-27','AL','546'),('1987-10-27','AL','546'),('1987-10-27','AL','556'),('1987-10-27','AL','556'),('1987-10-27','AL','601'),('1987-10-27','AL','601'),('1987-10-27','AL','627'),('1987-10-27','AL','629'),('1987-10-27','AL','670'),('1987-10-27','AL','670'),('1987-10-27','AL','670'),('1987-10-27','AL','673'),('1987-10-27','AL','680'),('1987-10-28','AA','2'),('1987-10-28','AA','2'),('1987-10-28','AA','2'),('1987-10-28','AA','7'),('1987-10-28','AA','7'),('1987-10-28','AA','26'),('1987-10-28','AA','34'),('1987-10-28','AA','36'),('1987-10-28','AA','91'),('1987-10-28','AA','101'),('1987-10-28','AA','101'),('1987-10-28','AA','109'),('1987-10-28','AA','109'),('1987-10-28','AA','112'),('1987-10-28','AA','123'),('1987-10-28','AA','123'),('1987-10-28','AA','165'),('1987-10-28','AA','165'),('1987-10-28','AA','165'),('1987-10-28','AA','176'),('1987-10-28','AA','176'),('1987-10-28','AA','176'),('1987-10-28','AA','176'),('1987-10-28','AA','179'),('1987-10-28','AA','179'),('1987-10-28','AA','215'),('1987-10-28','AA','215'),('1987-10-28','AA','231'),('1987-10-28','AA','231'),('1987-10-28','AA','263'),('1987-10-28','AA','268'),('1987-10-28','AA','268'),('1987-10-28','AA','281'),('1987-10-28','AA','287'),('1987-10-28','AA','287'),('1987-10-28','AA','309'),('1987-10-28','AA','309'),('1987-10-28','AA','309'),('1987-10-28','AA','341'),('1987-10-28','AA','344'),('1987-10-28','AA','344'),('1987-10-28','AA','347'),('1987-10-28','AA','347'),('1987-10-28','AA','368'),('1987-10-28','AA','381'),('1987-10-28','AA','381'),('1987-10-28','AA','381'),('1987-10-28','AA','396'),('1987-10-28','AA','396'),('1987-10-28','AA','397'),('1987-10-28','AA','397'),('1987-10-28','AA','417'),('1987-10-28','AA','417'),('1987-10-28','AA','446'),('1987-10-28','AA','451'),('1987-10-28','AA','451'),('1987-10-28','AA','460'),('1987-10-28','AA','460'),('1987-10-28','AA','491'),('1987-10-28','AA','504'),('1987-10-28','AA','504'),('1987-10-28','AA','519'),('1987-10-28','AA','519'),('1987-10-28','AA','523'),('1987-10-28','AA','523'),('1987-10-28','AA','525'),('1987-10-28','AA','525'),('1987-10-28','AA','525'),('1987-10-28','AA','533'),('1987-10-28','AA','533'),('1987-10-28','AA','533'),('1987-10-28','AA','546'),('1987-10-28','AA','546'),('1987-10-28','AA','546'),('1987-10-28','AA','546'),('1987-10-28','AA','556'),('1987-10-28','AA','556'),('1987-10-28','AA','556'),('1987-10-28','AA','597'),('1987-10-28','AA','597'),('1987-10-28','AA','597'),('1987-10-28','AA','601'),('1987-10-28','AA','601'),('1987-10-28','AA','627'),('1987-10-28','AA','629'),('1987-10-28','AA','629'),('1987-10-28','AA','670'),('1987-10-28','AA','673'),('1987-10-28','AA','673'),('1987-10-28','AA','680'),('1987-10-28','AA','680'),('1987-10-28','AA','817'),('1987-10-28','AA','817'),('1987-10-28','AA','824'),('1987-10-28','AA','824'),('1987-10-28','AA','824'),('1987-10-28','AA','832'),('1987-10-28','AA','832'),('1987-10-28','AA','852'),('1987-10-28','AA','852'),('1987-10-28','AA','866'),('1987-10-28','AA','866'),('1987-10-28','AA','871'),('1987-10-28','AA','871'),('1987-10-28','AA','880'),('1987-10-28','AA','880'),('1987-10-28','AA','880'),('1987-10-28','AA','880'),('1987-10-28','AA','883'),('1987-10-28','AA','883'),('1987-10-28','AA','885'),('1987-10-28','AA','885'),('1987-10-28','AA','890'),('1987-10-28','AA','890'),('1987-10-28','AA','893'),('1987-10-28','AA','893'),('1987-10-28','AA','905'),('1987-10-28','AA','905'),('1987-10-28','AA','915'),('1987-10-28','AA','929'),('1987-10-28','AA','929'),('1987-10-28','AA','936'),('1987-10-28','AA','936'),('1987-10-28','AA','937'),('1987-10-28','AA','937'),('1987-10-28','AA','955'),('1987-10-28','AA','955'),('1987-10-28','AA','966'),('1987-10-28','AA','1002'),('1987-10-28','AA','1002'),('1987-10-28','AA','1004'),('1987-10-28','AA','1004'),('1987-10-28','AA','1015'),('1987-10-28','AA','1021'),('1987-10-28','AA','1021'),('1987-10-28','AA','1041'),('1987-10-28','AA','1041'),('1987-10-28','AA','1046'),('1987-10-28','AA','1046'),('1987-10-28','AA','1048'),('1987-10-28','AA','1048'),('1987-10-28','AA','1061'),('1987-10-28','AA','1061'),('1987-10-28','AA','1088'),('1987-10-28','AA','1088'),('1987-10-28','AA','2033'),('1987-10-28','AA','2033'),('1987-10-28','AA','2050'),('1987-10-28','AA','2058'),('1987-10-28','AA','2071'),('1987-10-28','AA','2071'),('1987-10-28','AA','2086'),('1987-10-28','AA','2105'),('1987-10-28','AA','2111'),('1987-10-28','AA','2123'),('1987-10-28','AA','2123'),('1987-10-28','AA','2147'),('1987-10-28','AA','2199'),('1987-10-28','AA','2199'),('1987-10-28','AA','2207'),('1987-10-28','AA','2207'),('1987-10-28','AA','2217'),('1987-10-28','AA','2230'),('1987-10-28','AA','2245'),('1987-10-28','AA','2251'),('1987-10-28','AA','2251'),('1987-10-28','AA','2275'),('1987-10-28','AA','2278'),('1987-10-28','AA','2351'),('1987-10-28','AA','2361'),('1987-10-28','AA','2490'),('1987-10-28','AA','2528'),('1987-10-28','AA','2528'),('1987-10-28','AA','2751'),('1987-10-28','AL','2'),('1987-10-28','AL','2'),('1987-10-28','AL','7'),('1987-10-28','AL','7'),('1987-10-28','AL','26'),('1987-10-28','AL','34'),('1987-10-28','AL','34'),('1987-10-28','AL','45'),('1987-10-28','AL','91'),('1987-10-28','AL','91'),('1987-10-28','AL','104'),('1987-10-28','AL','104'),('1987-10-28','AL','104'),('1987-10-28','AL','109'),('1987-10-28','AL','112'),('1987-10-28','AL','112'),('1987-10-28','AL','123'),('1987-10-28','AL','149'),('1987-10-28','AL','160'),('1987-10-28','AL','160'),('1987-10-28','AL','165'),('1987-10-28','AL','171'),('1987-10-28','AL','171'),('1987-10-28','AL','176'),('1987-10-28','AL','176'),('1987-10-28','AL','179'),('1987-10-28','AL','215'),('1987-10-28','AL','231'),('1987-10-28','AL','263'),('1987-10-28','AL','263'),('1987-10-28','AL','268'),('1987-10-28','AL','268'),('1987-10-28','AL','268'),('1987-10-28','AL','281'),('1987-10-28','AL','281'),('1987-10-28','AL','287'),('1987-10-28','AL','287'),('1987-10-28','AL','309'),('1987-10-28','AL','309'),('1987-10-28','AL','341'),('1987-10-28','AL','344'),('1987-10-28','AL','344'),('1987-10-28','AL','357'),('1987-10-28','AL','357'),('1987-10-28','AL','368'),('1987-10-28','AL','381'),('1987-10-28','AL','396'),('1987-10-28','AL','397'),('1987-10-28','AL','397'),('1987-10-28','AL','416'),('1987-10-28','AL','416'),('1987-10-28','AL','417'),('1987-10-28','AL','438'),('1987-10-28','AL','438'),('1987-10-28','AL','446'),('1987-10-28','AL','451'),('1987-10-28','AL','451'),('1987-10-28','AL','491'),('1987-10-28','AL','491'),('1987-10-28','AL','523'),('1987-10-28','AL','523'),('1987-10-28','AL','523'),('1987-10-28','AL','525'),('1987-10-28','AL','525'),('1987-10-28','AL','533'),('1987-10-28','AL','533'),('1987-10-28','AL','546'),('1987-10-28','AL','546'),('1987-10-28','AL','556'),('1987-10-28','AL','556'),('1987-10-28','AL','601'),('1987-10-28','AL','601'),('1987-10-28','AL','627'),('1987-10-28','AL','629'),('1987-10-28','AL','670'),('1987-10-28','AL','670'),('1987-10-28','AL','670'),('1987-10-28','AL','673'),('1987-10-28','AL','680'),('1987-10-29','AA','2'),('1987-10-29','AA','2'),('1987-10-29','AA','2'),('1987-10-29','AA','7'),('1987-10-29','AA','7'),('1987-10-29','AA','26'),('1987-10-29','AA','34'),('1987-10-29','AA','36'),('1987-10-29','AA','91'),('1987-10-29','AA','101'),('1987-10-29','AA','101'),('1987-10-29','AA','109'),('1987-10-29','AA','109'),('1987-10-29','AA','112'),('1987-10-29','AA','123'),('1987-10-29','AA','123'),('1987-10-29','AA','160'),('1987-10-29','AA','165'),('1987-10-29','AA','165'),('1987-10-29','AA','165'),('1987-10-29','AA','176'),('1987-10-29','AA','176'),('1987-10-29','AA','176'),('1987-10-29','AA','176'),('1987-10-29','AA','179'),('1987-10-29','AA','179'),('1987-10-29','AA','215'),('1987-10-29','AA','215'),('1987-10-29','AA','231'),('1987-10-29','AA','231'),('1987-10-29','AA','263'),('1987-10-29','AA','263'),('1987-10-29','AA','268'),('1987-10-29','AA','268'),('1987-10-29','AA','281'),('1987-10-29','AA','287'),('1987-10-29','AA','287'),('1987-10-29','AA','309'),('1987-10-29','AA','309'),('1987-10-29','AA','309'),('1987-10-29','AA','341'),('1987-10-29','AA','344'),('1987-10-29','AA','344'),('1987-10-29','AA','347'),('1987-10-29','AA','347'),('1987-10-29','AA','368'),('1987-10-29','AA','381'),('1987-10-29','AA','381'),('1987-10-29','AA','381'),('1987-10-29','AA','396'),('1987-10-29','AA','396'),('1987-10-29','AA','397'),('1987-10-29','AA','397'),('1987-10-29','AA','417'),('1987-10-29','AA','417'),('1987-10-29','AA','451'),('1987-10-29','AA','451'),('1987-10-29','AA','460'),('1987-10-29','AA','460'),('1987-10-29','AA','491'),('1987-10-29','AA','504'),('1987-10-29','AA','504'),('1987-10-29','AA','519'),('1987-10-29','AA','519'),('1987-10-29','AA','523'),('1987-10-29','AA','523'),('1987-10-29','AA','525'),('1987-10-29','AA','525'),('1987-10-29','AA','525'),('1987-10-29','AA','533'),('1987-10-29','AA','533'),('1987-10-29','AA','533'),('1987-10-29','AA','546'),('1987-10-29','AA','546'),('1987-10-29','AA','546'),('1987-10-29','AA','546'),('1987-10-29','AA','556'),('1987-10-29','AA','556'),('1987-10-29','AA','556'),('1987-10-29','AA','597'),('1987-10-29','AA','597'),('1987-10-29','AA','597'),('1987-10-29','AA','601'),('1987-10-29','AA','601'),('1987-10-29','AA','627'),('1987-10-29','AA','629'),('1987-10-29','AA','670'),('1987-10-29','AA','673'),('1987-10-29','AA','680'),('1987-10-29','AA','680'),('1987-10-29','AA','817'),('1987-10-29','AA','817'),('1987-10-29','AA','824'),('1987-10-29','AA','824'),('1987-10-29','AA','824'),('1987-10-29','AA','824'),('1987-10-29','AA','832'),('1987-10-29','AA','832'),('1987-10-29','AA','852'),('1987-10-29','AA','852'),('1987-10-29','AA','866'),('1987-10-29','AA','866'),('1987-10-29','AA','871'),('1987-10-29','AA','871'),('1987-10-29','AA','880'),('1987-10-29','AA','880'),('1987-10-29','AA','880'),('1987-10-29','AA','880'),('1987-10-29','AA','883'),('1987-10-29','AA','883'),('1987-10-29','AA','885'),('1987-10-29','AA','885'),('1987-10-29','AA','885'),('1987-10-29','AA','890'),('1987-10-29','AA','890'),('1987-10-29','AA','893'),('1987-10-29','AA','893'),('1987-10-29','AA','905'),('1987-10-29','AA','915'),('1987-10-29','AA','929'),('1987-10-29','AA','929'),('1987-10-29','AA','936'),('1987-10-29','AA','936'),('1987-10-29','AA','937'),('1987-10-29','AA','937'),('1987-10-29','AA','955'),('1987-10-29','AA','955'),('1987-10-29','AA','966'),('1987-10-29','AA','1002'),('1987-10-29','AA','1002'),('1987-10-29','AA','1004'),('1987-10-29','AA','1004'),('1987-10-29','AA','1015'),('1987-10-29','AA','1015'),('1987-10-29','AA','1021'),('1987-10-29','AA','1021'),('1987-10-29','AA','1041'),('1987-10-29','AA','1041'),('1987-10-29','AA','1046'),('1987-10-29','AA','1048'),('1987-10-29','AA','1048'),('1987-10-29','AA','1061'),('1987-10-29','AA','1061'),('1987-10-29','AA','1088'),('1987-10-29','AA','1088'),('1987-10-29','AA','2033'),('1987-10-29','AA','2033'),('1987-10-29','AA','2050'),('1987-10-29','AA','2058'),('1987-10-29','AA','2071'),('1987-10-29','AA','2071'),('1987-10-29','AA','2086'),('1987-10-29','AA','2105'),('1987-10-29','AA','2111'),('1987-10-29','AA','2123'),('1987-10-29','AA','2123'),('1987-10-29','AA','2147'),('1987-10-29','AA','2147'),('1987-10-29','AA','2199'),('1987-10-29','AA','2199'),('1987-10-29','AA','2207'),('1987-10-29','AA','2207'),('1987-10-29','AA','2217'),('1987-10-29','AA','2230'),('1987-10-29','AA','2251'),('1987-10-29','AA','2251'),('1987-10-29','AA','2275'),('1987-10-29','AA','2278'),('1987-10-29','AA','2351'),('1987-10-29','AA','2357'),('1987-10-29','AA','2361'),('1987-10-29','AA','2490'),('1987-10-29','AA','2528'),('1987-10-29','AA','2735'),('1987-10-29','AA','2735'),('1987-10-29','AA','2751'),('1987-10-29','AL','2'),('1987-10-29','AL','2'),('1987-10-29','AL','7'),('1987-10-29','AL','7'),('1987-10-29','AL','26'),('1987-10-29','AL','26'),('1987-10-29','AL','34'),('1987-10-29','AL','34'),('1987-10-29','AL','36'),('1987-10-29','AL','36'),('1987-10-29','AL','45'),('1987-10-29','AL','45'),('1987-10-29','AL','45'),('1987-10-29','AL','91'),('1987-10-29','AL','91'),('1987-10-29','AL','104'),('1987-10-29','AL','109'),('1987-10-29','AL','112'),('1987-10-29','AL','112'),('1987-10-29','AL','123'),('1987-10-29','AL','149'),('1987-10-29','AL','160'),('1987-10-29','AL','160'),('1987-10-29','AL','165'),('1987-10-29','AL','171'),('1987-10-29','AL','171'),('1987-10-29','AL','176'),('1987-10-29','AL','176'),('1987-10-29','AL','179'),('1987-10-29','AL','215'),('1987-10-29','AL','231'),('1987-10-29','AL','263'),('1987-10-29','AL','263'),('1987-10-29','AL','268'),('1987-10-29','AL','268'),('1987-10-29','AL','268'),('1987-10-29','AL','281'),('1987-10-29','AL','281'),('1987-10-29','AL','287'),('1987-10-29','AL','287'),('1987-10-29','AL','309'),('1987-10-29','AL','309'),('1987-10-29','AL','341'),('1987-10-29','AL','344'),('1987-10-29','AL','344'),('1987-10-29','AL','357'),('1987-10-29','AL','357'),('1987-10-29','AL','368'),('1987-10-29','AL','381'),('1987-10-29','AL','396'),('1987-10-29','AL','397'),('1987-10-29','AL','397'),('1987-10-29','AL','416'),('1987-10-29','AL','416'),('1987-10-29','AL','417'),('1987-10-29','AL','438'),('1987-10-29','AL','438'),('1987-10-29','AL','446'),('1987-10-29','AL','451'),('1987-10-29','AL','451'),('1987-10-29','AL','491'),('1987-10-29','AL','491'),('1987-10-29','AL','523'),('1987-10-29','AL','523'),('1987-10-29','AL','523'),('1987-10-29','AL','525'),('1987-10-29','AL','525'),('1987-10-29','AL','533'),('1987-10-29','AL','533'),('1987-10-29','AL','546'),('1987-10-29','AL','546'),('1987-10-29','AL','556'),('1987-10-29','AL','556'),('1987-10-29','AL','601'),('1987-10-29','AL','601'),('1987-10-29','AL','627'),('1987-10-29','AL','629'),('1987-10-29','AL','670'),('1987-10-29','AL','670'),('1987-10-29','AL','670'),('1987-10-29','AL','673'),('1987-10-29','AL','680'),('1987-10-30','AA','2'),('1987-10-30','AA','2'),('1987-10-30','AA','2'),('1987-10-30','AA','7'),('1987-10-30','AA','7'),('1987-10-30','AA','26'),('1987-10-30','AA','34'),('1987-10-30','AA','91'),('1987-10-30','AA','101'),('1987-10-30','AA','101'),('1987-10-30','AA','109'),('1987-10-30','AA','109'),('1987-10-30','AA','109'),('1987-10-30','AA','112'),('1987-10-30','AA','123'),('1987-10-30','AA','165'),('1987-10-30','AA','165'),('1987-10-30','AA','165'),('1987-10-30','AA','176'),('1987-10-30','AA','176'),('1987-10-30','AA','176'),('1987-10-30','AA','176'),('1987-10-30','AA','179'),('1987-10-30','AA','179'),('1987-10-30','AA','215'),('1987-10-30','AA','215'),('1987-10-30','AA','231'),('1987-10-30','AA','231'),('1987-10-30','AA','263'),('1987-10-30','AA','268'),('1987-10-30','AA','268'),('1987-10-30','AA','281'),('1987-10-30','AA','287'),('1987-10-30','AA','287'),('1987-10-30','AA','309'),('1987-10-30','AA','309'),('1987-10-30','AA','309'),('1987-10-30','AA','344'),('1987-10-30','AA','344'),('1987-10-30','AA','347'),('1987-10-30','AA','347'),('1987-10-30','AA','368'),('1987-10-30','AA','381'),('1987-10-30','AA','381'),('1987-10-30','AA','381'),('1987-10-30','AA','396'),('1987-10-30','AA','396'),('1987-10-30','AA','397'),('1987-10-30','AA','397'),('1987-10-30','AA','417'),('1987-10-30','AA','417'),('1987-10-30','AA','446'),('1987-10-30','AA','451'),('1987-10-30','AA','451'),('1987-10-30','AA','460'),('1987-10-30','AA','460'),('1987-10-30','AA','491'),('1987-10-30','AA','504'),('1987-10-30','AA','504'),('1987-10-30','AA','519'),('1987-10-30','AA','519'),('1987-10-30','AA','523'),('1987-10-30','AA','523'),('1987-10-30','AA','525'),('1987-10-30','AA','525'),('1987-10-30','AA','525'),('1987-10-30','AA','533'),('1987-10-30','AA','533'),('1987-10-30','AA','533'),('1987-10-30','AA','546'),('1987-10-30','AA','546'),('1987-10-30','AA','546'),('1987-10-30','AA','546'),('1987-10-30','AA','556'),('1987-10-30','AA','556'),('1987-10-30','AA','556'),('1987-10-30','AA','597'),('1987-10-30','AA','597'),('1987-10-30','AA','597'),('1987-10-30','AA','601'),('1987-10-30','AA','601'),('1987-10-30','AA','627'),('1987-10-30','AA','629'),('1987-10-30','AA','629'),('1987-10-30','AA','670'),('1987-10-30','AA','673'),('1987-10-30','AA','673'),('1987-10-30','AA','680'),('1987-10-30','AA','817'),('1987-10-30','AA','817'),('1987-10-30','AA','824'),('1987-10-30','AA','824'),('1987-10-30','AA','824'),('1987-10-30','AA','824'),('1987-10-30','AA','832'),('1987-10-30','AA','832'),('1987-10-30','AA','852'),('1987-10-30','AA','866'),('1987-10-30','AA','866'),('1987-10-30','AA','871'),('1987-10-30','AA','871'),('1987-10-30','AA','880'),('1987-10-30','AA','880'),('1987-10-30','AA','880'),('1987-10-30','AA','880'),('1987-10-30','AA','883'),('1987-10-30','AA','883'),('1987-10-30','AA','885'),('1987-10-30','AA','885'),('1987-10-30','AA','885'),('1987-10-30','AA','890'),('1987-10-30','AA','890'),('1987-10-30','AA','893'),('1987-10-30','AA','893'),('1987-10-30','AA','905'),('1987-10-30','AA','905'),('1987-10-30','AA','915'),('1987-10-30','AA','929'),('1987-10-30','AA','929'),('1987-10-30','AA','936'),('1987-10-30','AA','936'),('1987-10-30','AA','937'),('1987-10-30','AA','937'),('1987-10-30','AA','955'),('1987-10-30','AA','955'),('1987-10-30','AA','966'),('1987-10-30','AA','1002'),('1987-10-30','AA','1002'),('1987-10-30','AA','1004'),('1987-10-30','AA','1004'),('1987-10-30','AA','1015'),('1987-10-30','AA','1015'),('1987-10-30','AA','1021'),('1987-10-30','AA','1021'),('1987-10-30','AA','1041'),('1987-10-30','AA','1041'),('1987-10-30','AA','1046'),('1987-10-30','AA','1046'),('1987-10-30','AA','1048'),('1987-10-30','AA','1048'),('1987-10-30','AA','1061'),('1987-10-30','AA','1061'),('1987-10-30','AA','1088'),('1987-10-30','AA','1088'),('1987-10-30','AA','2033'),('1987-10-30','AA','2033'),('1987-10-30','AA','2050'),('1987-10-30','AA','2058'),('1987-10-30','AA','2071'),('1987-10-30','AA','2071'),('1987-10-30','AA','2086'),('1987-10-30','AA','2105'),('1987-10-30','AA','2111'),('1987-10-30','AA','2123'),('1987-10-30','AA','2123'),('1987-10-30','AA','2147'),('1987-10-30','AA','2147'),('1987-10-30','AA','2199'),('1987-10-30','AA','2199'),('1987-10-30','AA','2207'),('1987-10-30','AA','2207'),('1987-10-30','AA','2217'),('1987-10-30','AA','2230'),('1987-10-30','AA','2245'),('1987-10-30','AA','2251'),('1987-10-30','AA','2275'),('1987-10-30','AA','2278'),('1987-10-30','AA','2351'),('1987-10-30','AA','2357'),('1987-10-30','AA','2361'),('1987-10-30','AA','2490'),('1987-10-30','AA','2528'),('1987-10-30','AA','2528'),('1987-10-30','AA','2735'),('1987-10-30','AA','2735'),('1987-10-30','AA','2751'),('1987-10-30','AL','2'),('1987-10-30','AL','2'),('1987-10-30','AL','7'),('1987-10-30','AL','7'),('1987-10-30','AL','26'),('1987-10-30','AL','26'),('1987-10-30','AL','34'),('1987-10-30','AL','34'),('1987-10-30','AL','36'),('1987-10-30','AL','36'),('1987-10-30','AL','45'),('1987-10-30','AL','45'),('1987-10-30','AL','45'),('1987-10-30','AL','91'),('1987-10-30','AL','91'),('1987-10-30','AL','104'),('1987-10-30','AL','104'),('1987-10-30','AL','104'),('1987-10-30','AL','109'),('1987-10-30','AL','112'),('1987-10-30','AL','112'),('1987-10-30','AL','123'),('1987-10-30','AL','149'),('1987-10-30','AL','160'),('1987-10-30','AL','160'),('1987-10-30','AL','165'),('1987-10-30','AL','171'),('1987-10-30','AL','171'),('1987-10-30','AL','176'),('1987-10-30','AL','176'),('1987-10-30','AL','179'),('1987-10-30','AL','215'),('1987-10-30','AL','231'),('1987-10-30','AL','263'),('1987-10-30','AL','263'),('1987-10-30','AL','268'),('1987-10-30','AL','268'),('1987-10-30','AL','268'),('1987-10-30','AL','281'),('1987-10-30','AL','281'),('1987-10-30','AL','287'),('1987-10-30','AL','287'),('1987-10-30','AL','309'),('1987-10-30','AL','309'),('1987-10-30','AL','341'),('1987-10-30','AL','344'),('1987-10-30','AL','344'),('1987-10-30','AL','357'),('1987-10-30','AL','357'),('1987-10-30','AL','368'),('1987-10-30','AL','381'),('1987-10-30','AL','396'),('1987-10-30','AL','397'),('1987-10-30','AL','397'),('1987-10-30','AL','416'),('1987-10-30','AL','416'),('1987-10-30','AL','417'),('1987-10-30','AL','438'),('1987-10-30','AL','438'),('1987-10-30','AL','446'),('1987-10-30','AL','451'),('1987-10-30','AL','451'),('1987-10-30','AL','491'),('1987-10-30','AL','491'),('1987-10-30','AL','523'),('1987-10-30','AL','523'),('1987-10-30','AL','523'),('1987-10-30','AL','525'),('1987-10-30','AL','525'),('1987-10-30','AL','533'),('1987-10-30','AL','533'),('1987-10-30','AL','546'),('1987-10-30','AL','546'),('1987-10-30','AL','556'),('1987-10-30','AL','556'),('1987-10-30','AL','601'),('1987-10-30','AL','601'),('1987-10-30','AL','627'),('1987-10-30','AL','629'),('1987-10-30','AL','670'),('1987-10-30','AL','670'),('1987-10-30','AL','670'),('1987-10-30','AL','673'),('1987-10-30','AL','680'),('1987-10-31','AA','2'),('1987-10-31','AA','2'),('1987-10-31','AA','2'),('1987-10-31','AA','7'),('1987-10-31','AA','7'),('1987-10-31','AA','26'),('1987-10-31','AA','34'),('1987-10-31','AA','36'),('1987-10-31','AA','91'),('1987-10-31','AA','101'),('1987-10-31','AA','101'),('1987-10-31','AA','109'),('1987-10-31','AA','109'),('1987-10-31','AA','112'),('1987-10-31','AA','123'),('1987-10-31','AA','123'),('1987-10-31','AA','160'),('1987-10-31','AA','165'),('1987-10-31','AA','165'),('1987-10-31','AA','165'),('1987-10-31','AA','176'),('1987-10-31','AA','176'),('1987-10-31','AA','176'),('1987-10-31','AA','176'),('1987-10-31','AA','179'),('1987-10-31','AA','179'),('1987-10-31','AA','215'),('1987-10-31','AA','215'),('1987-10-31','AA','231'),('1987-10-31','AA','263'),('1987-10-31','AA','268'),('1987-10-31','AA','268'),('1987-10-31','AA','281'),('1987-10-31','AA','287'),('1987-10-31','AA','287'),('1987-10-31','AA','309'),('1987-10-31','AA','309'),('1987-10-31','AA','309'),('1987-10-31','AA','341'),('1987-10-31','AA','344'),('1987-10-31','AA','344'),('1987-10-31','AA','347'),('1987-10-31','AA','347'),('1987-10-31','AA','368'),('1987-10-31','AA','381'),('1987-10-31','AA','381'),('1987-10-31','AA','381'),('1987-10-31','AA','396'),('1987-10-31','AA','396'),('1987-10-31','AA','397'),('1987-10-31','AA','417'),('1987-10-31','AA','417'),('1987-10-31','AA','446'),('1987-10-31','AA','451'),('1987-10-31','AA','451'),('1987-10-31','AA','491'),('1987-10-31','AA','491'),('1987-10-31','AA','504'),('1987-10-31','AA','504'),('1987-10-31','AA','519'),('1987-10-31','AA','519'),('1987-10-31','AA','523'),('1987-10-31','AA','523'),('1987-10-31','AA','525'),('1987-10-31','AA','525'),('1987-10-31','AA','533'),('1987-10-31','AA','533'),('1987-10-31','AA','546'),('1987-10-31','AA','546'),('1987-10-31','AA','546'),('1987-10-31','AA','546'),('1987-10-31','AA','556'),('1987-10-31','AA','556'),('1987-10-31','AA','556'),('1987-10-31','AA','597'),('1987-10-31','AA','597'),('1987-10-31','AA','597'),('1987-10-31','AA','601'),('1987-10-31','AA','601'),('1987-10-31','AA','627'),('1987-10-31','AA','629'),('1987-10-31','AA','670'),('1987-10-31','AA','673'),('1987-10-31','AA','673'),('1987-10-31','AA','680'),('1987-10-31','AA','680'),('1987-10-31','AA','817'),('1987-10-31','AA','817'),('1987-10-31','AA','824'),('1987-10-31','AA','824'),('1987-10-31','AA','824'),('1987-10-31','AA','832'),('1987-10-31','AA','832'),('1987-10-31','AA','852'),('1987-10-31','AA','852'),('1987-10-31','AA','866'),('1987-10-31','AA','871'),('1987-10-31','AA','871'),('1987-10-31','AA','880'),('1987-10-31','AA','880'),('1987-10-31','AA','880'),('1987-10-31','AA','883'),('1987-10-31','AA','885'),('1987-10-31','AA','885'),('1987-10-31','AA','885'),('1987-10-31','AA','890'),('1987-10-31','AA','890'),('1987-10-31','AA','893'),('1987-10-31','AA','893'),('1987-10-31','AA','905'),('1987-10-31','AA','905'),('1987-10-31','AA','915'),('1987-10-31','AA','929'),('1987-10-31','AA','929'),('1987-10-31','AA','936'),('1987-10-31','AA','936'),('1987-10-31','AA','937'),('1987-10-31','AA','937'),('1987-10-31','AA','955'),('1987-10-31','AA','955'),('1987-10-31','AA','1002'),('1987-10-31','AA','1002'),('1987-10-31','AA','1004'),('1987-10-31','AA','1004'),('1987-10-31','AA','1015'),('1987-10-31','AA','1015'),('1987-10-31','AA','1021'),('1987-10-31','AA','1021'),('1987-10-31','AA','1041'),('1987-10-31','AA','1041'),('1987-10-31','AA','1046'),('1987-10-31','AA','1046'),('1987-10-31','AA','1048'),('1987-10-31','AA','1048'),('1987-10-31','AA','1061'),('1987-10-31','AA','1061'),('1987-10-31','AA','1088'),('1987-10-31','AA','1088'),('1987-10-31','AA','2033'),('1987-10-31','AA','2058'),('1987-10-31','AA','2086'),('1987-10-31','AA','2105'),('1987-10-31','AA','2123'),('1987-10-31','AA','2123'),('1987-10-31','AA','2147'),('1987-10-31','AA','2147'),('1987-10-31','AA','2199'),('1987-10-31','AA','2199'),('1987-10-31','AA','2207'),('1987-10-31','AA','2207'),('1987-10-31','AA','2217'),('1987-10-31','AA','2230'),('1987-10-31','AA','2251'),('1987-10-31','AA','2251'),('1987-10-31','AA','2275'),('1987-10-31','AA','2278'),('1987-10-31','AA','2351'),('1987-10-31','AA','2357'),('1987-10-31','AA','2361'),('1987-10-31','AA','2528'),('1987-10-31','AA','2528'),('1987-10-31','AA','2735'),('1987-10-31','AA','2735'),('1987-10-31','AL','2'),('1987-10-31','AL','2'),('1987-10-31','AL','7'),('1987-10-31','AL','7'),('1987-10-31','AL','26'),('1987-10-31','AL','26'),('1987-10-31','AL','34'),('1987-10-31','AL','34'),('1987-10-31','AL','36'),('1987-10-31','AL','36'),('1987-10-31','AL','91'),('1987-10-31','AL','91'),('1987-10-31','AL','104'),('1987-10-31','AL','104'),('1987-10-31','AL','109'),('1987-10-31','AL','112'),('1987-10-31','AL','112'),('1987-10-31','AL','149'),('1987-10-31','AL','160'),('1987-10-31','AL','160'),('1987-10-31','AL','165'),('1987-10-31','AL','171'),('1987-10-31','AL','171'),('1987-10-31','AL','176'),('1987-10-31','AL','176'),('1987-10-31','AL','179'),('1987-10-31','AL','215'),('1987-10-31','AL','231'),('1987-10-31','AL','263'),('1987-10-31','AL','268'),('1987-10-31','AL','268'),('1987-10-31','AL','268'),('1987-10-31','AL','287'),('1987-10-31','AL','287'),('1987-10-31','AL','309'),('1987-10-31','AL','309'),('1987-10-31','AL','344'),('1987-10-31','AL','344'),('1987-10-31','AL','357'),('1987-10-31','AL','357'),('1987-10-31','AL','381'),('1987-10-31','AL','396'),('1987-10-31','AL','397'),('1987-10-31','AL','397'),('1987-10-31','AL','397'),('1987-10-31','AL','416'),('1987-10-31','AL','417'),('1987-10-31','AL','438'),('1987-10-31','AL','438'),('1987-10-31','AL','451'),('1987-10-31','AL','451'),('1987-10-31','AL','491'),('1987-10-31','AL','491'),('1987-10-31','AL','523'),('1987-10-31','AL','523'),('1987-10-31','AL','525'),('1987-10-31','AL','525'),('1987-10-31','AL','533'),('1987-10-31','AL','546'),('1987-10-31','AL','546'),('1987-10-31','AL','556'),('1987-10-31','AL','556'),('1987-10-31','AL','601'),('1987-10-31','AL','601'),('1987-10-31','AL','627'),('1987-10-31','AL','670'),('1987-10-31','AL','673'),('1987-10-31','AL','680'),('1987-10-31','AL','745'),('1987-10-31','AL','797'),('1987-10-31','AL','905'),('1987-10-31','AL','936'),('1987-10-31','AL','966'),('1987-10-31','AL','982'); + +SELECT ignore(finalizeAggregation(Users)) FROM (SELECT FlightDate, Carrier, uniqState(FlightNum) AS Users FROM ontime GROUP BY FlightDate, Carrier LIMIT 1); + +DROP TABLE ontime; diff --git a/parser/testdata/00337_shard_any_heavy/ast.json b/parser/testdata/00337_shard_any_heavy/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00337_shard_any_heavy/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00337_shard_any_heavy/metadata.json b/parser/testdata/00337_shard_any_heavy/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00337_shard_any_heavy/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00337_shard_any_heavy/query.sql b/parser/testdata/00337_shard_any_heavy/query.sql new file mode 100644 index 000000000..ff955ab0d --- /dev/null +++ b/parser/testdata/00337_shard_any_heavy/query.sql @@ -0,0 +1,4 @@ +-- Tags: shard + +SELECT anyHeavy(x) FROM (SELECT intHash64(number) % 100 < 60 ? 999 : number AS x FROM system.numbers LIMIT 100000); +SELECT anyHeavy(1) FROM remote('127.0.0.{2,3}', system.one); diff --git a/parser/testdata/00338_replicate_array_of_strings/ast.json b/parser/testdata/00338_replicate_array_of_strings/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00338_replicate_array_of_strings/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00338_replicate_array_of_strings/metadata.json b/parser/testdata/00338_replicate_array_of_strings/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00338_replicate_array_of_strings/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00338_replicate_array_of_strings/query.sql b/parser/testdata/00338_replicate_array_of_strings/query.sql new file mode 100644 index 000000000..b49dddcdd --- /dev/null +++ b/parser/testdata/00338_replicate_array_of_strings/query.sql @@ -0,0 +1,30 @@ +-- Tags: replica + +DROP TABLE IF EXISTS bad_arrays; +CREATE TABLE bad_arrays (a Array(String), b Array(UInt8)) ENGINE = Memory; + +INSERT INTO bad_arrays VALUES ([''],[]),([''],[1]); + +SELECT a FROM bad_arrays ARRAY JOIN b; + +DROP TABLE bad_arrays; + + +DROP TABLE IF EXISTS bad_arrays; +CREATE TABLE bad_arrays (a Array(String), b Array(String)) ENGINE = Memory; + +INSERT INTO bad_arrays VALUES ([''],[]),([''],[]),([''],[]),([''],[]),([''],[]),([''],[]),([''],[]),([''],[]),([''],[]),(['abc'],['223750']),(['ноутбук acer aspire e5-532-p3p2'],[]),([''],[]),([''],[]),([''],[]),([''],[]),(['лучшие моноблоки 2016'],[]),(['лучшие моноблоки 2016'],[]),([''],[]),([''],[]); + +SELECT a FROM bad_arrays ARRAY JOIN b; + +DROP TABLE bad_arrays; + + +DROP TABLE IF EXISTS bad_arrays; +CREATE TABLE bad_arrays (a Array(String), b Array(UInt8)) ENGINE = Memory; + +INSERT INTO bad_arrays VALUES (['abc','def'],[1,2,3]),([],[1,2]),(['a','b'],[]),(['Hello'],[1,2]),([],[]),(['x','y','z'],[4,5,6]); + +SELECT a, b FROM bad_arrays ARRAY JOIN b; + +DROP TABLE bad_arrays; diff --git a/parser/testdata/00340_squashing_insert_select/ast.json b/parser/testdata/00340_squashing_insert_select/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00340_squashing_insert_select/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00340_squashing_insert_select/metadata.json b/parser/testdata/00340_squashing_insert_select/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00340_squashing_insert_select/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00340_squashing_insert_select/query.sql b/parser/testdata/00340_squashing_insert_select/query.sql new file mode 100644 index 000000000..7d6ad8c81 --- /dev/null +++ b/parser/testdata/00340_squashing_insert_select/query.sql @@ -0,0 +1,23 @@ +-- Tags: log-engine + +DROP TABLE IF EXISTS numbers_squashed; +CREATE TABLE numbers_squashed AS system.numbers ENGINE = StripeLog; + +SET optimize_trivial_insert_select = 'false'; +SET max_block_size = 10000; + +SET min_insert_block_size_rows = 1000000; +SET min_insert_block_size_bytes = 0; + +set max_insert_threads = 1; + +INSERT INTO numbers_squashed SELECT * FROM system.numbers LIMIT 10000000; +SELECT blockSize() AS b, count() / b AS c FROM numbers_squashed GROUP BY blockSize() ORDER BY c DESC; + +SET min_insert_block_size_bytes = 1000000; +INSERT INTO numbers_squashed SELECT * FROM system.numbers LIMIT 10000000; +SELECT blockSize() AS b, count() / b AS c FROM numbers_squashed GROUP BY blockSize() ORDER BY c DESC; + +SELECT count() FROM numbers_squashed; + +DROP TABLE numbers_squashed; diff --git a/parser/testdata/00341_squashing_insert_select2/ast.json b/parser/testdata/00341_squashing_insert_select2/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00341_squashing_insert_select2/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00341_squashing_insert_select2/metadata.json b/parser/testdata/00341_squashing_insert_select2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00341_squashing_insert_select2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00341_squashing_insert_select2/query.sql b/parser/testdata/00341_squashing_insert_select2/query.sql new file mode 100644 index 000000000..660f86b6f --- /dev/null +++ b/parser/testdata/00341_squashing_insert_select2/query.sql @@ -0,0 +1,71 @@ +-- Tags: log-engine + +DROP TABLE IF EXISTS numbers_squashed; +CREATE TABLE numbers_squashed (number UInt8) ENGINE = StripeLog; + +SET min_insert_block_size_rows = 100; +SET min_insert_block_size_bytes = 0; +SET max_insert_threads = 1; +SET max_threads = 1; + +INSERT INTO numbers_squashed +SELECT arrayJoin(range(10)) AS number +UNION ALL +SELECT arrayJoin(range(100)) +UNION ALL +SELECT arrayJoin(range(10)); + +SELECT blockSize() AS b, count() / b AS c FROM numbers_squashed GROUP BY blockSize() ORDER BY c DESC, b ASC; +SELECT count() FROM numbers_squashed; + +INSERT INTO numbers_squashed +SELECT arrayJoin(range(100)) AS number +UNION ALL +SELECT arrayJoin(range(10)) +UNION ALL +SELECT arrayJoin(range(100)); + +SELECT blockSize() AS b, count() / b AS c FROM numbers_squashed GROUP BY blockSize() ORDER BY c DESC, b ASC; +SELECT count() FROM numbers_squashed; + +INSERT INTO numbers_squashed +SELECT arrayJoin(range(10)) AS number +UNION ALL +SELECT arrayJoin(range(100)) +UNION ALL +SELECT arrayJoin(range(100)); + +SELECT blockSize() AS b, count() / b AS c FROM numbers_squashed GROUP BY blockSize() ORDER BY c DESC, b ASC; +SELECT count() FROM numbers_squashed; + +INSERT INTO numbers_squashed +SELECT arrayJoin(range(10)) AS number +UNION ALL +SELECT arrayJoin(range(10)) +UNION ALL +SELECT arrayJoin(range(10)) +UNION ALL +SELECT arrayJoin(range(100)) +UNION ALL +SELECT arrayJoin(range(10)); + +SELECT blockSize() AS b, count() / b AS c FROM numbers_squashed GROUP BY blockSize() ORDER BY c DESC, b ASC; +SELECT count() FROM numbers_squashed; + +SET min_insert_block_size_rows = 10; + +INSERT INTO numbers_squashed +SELECT arrayJoin(range(10)) AS number +UNION ALL +SELECT arrayJoin(range(10)) +UNION ALL +SELECT arrayJoin(range(10)) +UNION ALL +SELECT arrayJoin(range(100)) +UNION ALL +SELECT arrayJoin(range(10)); + +SELECT blockSize() AS b, count() / b AS c FROM numbers_squashed GROUP BY blockSize() ORDER BY c DESC, b ASC; +SELECT count() FROM numbers_squashed; + +DROP TABLE numbers_squashed; diff --git a/parser/testdata/00342_escape_sequences/ast.json b/parser/testdata/00342_escape_sequences/ast.json new file mode 100644 index 000000000..44c66de4d --- /dev/null +++ b/parser/testdata/00342_escape_sequences/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function hex (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '\u0007\\b\\f\\n\\r\\t\u000B\\\\\\'\"\\\\?�'" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001487137, + "rows_read": 7, + "bytes_read": 271 + } +} diff --git a/parser/testdata/00342_escape_sequences/metadata.json b/parser/testdata/00342_escape_sequences/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00342_escape_sequences/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00342_escape_sequences/query.sql b/parser/testdata/00342_escape_sequences/query.sql new file mode 100644 index 000000000..4f5aeff5e --- /dev/null +++ b/parser/testdata/00342_escape_sequences/query.sql @@ -0,0 +1 @@ +SELECT hex('\a\b\f\n\r\t\v\\\'\"\?\xAA'); diff --git a/parser/testdata/00343_array_element_generic/ast.json b/parser/testdata/00343_array_element_generic/ast.json new file mode 100644 index 000000000..e469e5ce2 --- /dev/null +++ b/parser/testdata/00343_array_element_generic/ast.json @@ -0,0 +1,100 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function and (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function range (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_100" + }, + { + "explain": " Function range (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal UInt64_100" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function range (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal UInt64_100" + }, + { + "explain": " Function range (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal UInt64_100" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 26, + + "statistics": + { + "elapsed": 0.001363966, + "rows_read": 26, + "bytes_read": 1022 + } +} diff --git a/parser/testdata/00343_array_element_generic/metadata.json b/parser/testdata/00343_array_element_generic/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00343_array_element_generic/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00343_array_element_generic/query.sql b/parser/testdata/00343_array_element_generic/query.sql new file mode 100644 index 000000000..33a5f4dc1 --- /dev/null +++ b/parser/testdata/00343_array_element_generic/query.sql @@ -0,0 +1,24 @@ +SELECT range(100) == range(0, 100) and range(0, 100) == range(0, 100, 1); +SELECT distinct length(range(number, number + 100, 99)) == 2 FROM numbers(1000); +SELECT distinct length(range(number, number + 100, 100)) == 1 FROM numbers(1000); +SELECT range(0)[-1]; +SELECT range(0)[1]; +SELECT range(number)[2] FROM system.numbers LIMIT 10; +SELECT range(number)[-1] FROM system.numbers LIMIT 10; +SELECT range(number)[number] FROM system.numbers LIMIT 10; +SELECT range(number)[2 - number] FROM system.numbers LIMIT 10; + +SELECT arrayMap(x -> toString(x), range(number))[2] FROM system.numbers LIMIT 10; +SELECT arrayMap(x -> toString(x), range(number))[-1] FROM system.numbers LIMIT 10; +SELECT arrayMap(x -> toString(x), range(number))[number] FROM system.numbers LIMIT 10; +SELECT arrayMap(x -> toString(x), range(number))[2 - number] FROM system.numbers LIMIT 10; + +SELECT arrayMap(x -> range(x), range(number))[2] FROM system.numbers LIMIT 10; +SELECT arrayMap(x -> range(x), range(number))[-1] FROM system.numbers LIMIT 10; +SELECT arrayMap(x -> range(x), range(number))[number] FROM system.numbers LIMIT 10; +SELECT arrayMap(x -> range(x), range(number))[2 - number] FROM system.numbers LIMIT 10; + +SELECT [[1]][1], materialize([[1]])[1], [[1]][materialize(1)], materialize([[1]])[materialize(1)]; +SELECT [['Hello']][1], materialize([['World']])[1], [['Hello']][materialize(1)], materialize([['World']])[materialize(1)]; + +SELECT ([[['a'], ['b', 'c']], [['d', 'e', 'f'], ['g', 'h', 'i', 'j'], ['k', 'l', 'm', 'n', 'o']], [['p', 'q', 'r', 's', 't', 'u'], ['v', 'w', 'x', 'y', 'z', 'aa', 'bb'], ['cc', 'dd', 'ee', 'ff', 'gg', 'hh', 'ii', 'jj'], ['kk', 'll', 'mm', 'nn', 'oo', 'pp', 'qq', 'rr', 'ss']]] AS arr)[number], arr[number][number], arr[number][number][number] FROM system.numbers LIMIT 10; diff --git a/parser/testdata/00344_row_number_in_all_blocks/ast.json b/parser/testdata/00344_row_number_in_all_blocks/ast.json new file mode 100644 index 000000000..eda2c3def --- /dev/null +++ b/parser/testdata/00344_row_number_in_all_blocks/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00142055, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00344_row_number_in_all_blocks/metadata.json b/parser/testdata/00344_row_number_in_all_blocks/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00344_row_number_in_all_blocks/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00344_row_number_in_all_blocks/query.sql b/parser/testdata/00344_row_number_in_all_blocks/query.sql new file mode 100644 index 000000000..56a04c85c --- /dev/null +++ b/parser/testdata/00344_row_number_in_all_blocks/query.sql @@ -0,0 +1,12 @@ +SET max_block_size = 1000; +SET max_threads = 10; +SELECT + groupUniqArray(blockSize()), + uniqExact(rowNumberInAllBlocks()), + min(rowNumberInAllBlocks()), + max(rowNumberInAllBlocks()), + uniqExact(rowNumberInBlock()), + min(rowNumberInBlock()), + max(rowNumberInBlock()), + uniqExact(blockNumber()) +FROM (SELECT * FROM system.numbers_mt LIMIT 100000); diff --git a/parser/testdata/00345_index_accurate_comparison/ast.json b/parser/testdata/00345_index_accurate_comparison/ast.json new file mode 100644 index 000000000..6548e6b0c --- /dev/null +++ b/parser/testdata/00345_index_accurate_comparison/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery index (children 1)" + }, + { + "explain": " Identifier index" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001027974, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/00345_index_accurate_comparison/metadata.json b/parser/testdata/00345_index_accurate_comparison/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00345_index_accurate_comparison/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00345_index_accurate_comparison/query.sql b/parser/testdata/00345_index_accurate_comparison/query.sql new file mode 100644 index 000000000..aafe2a0ae --- /dev/null +++ b/parser/testdata/00345_index_accurate_comparison/query.sql @@ -0,0 +1,21 @@ +DROP TABLE IF EXISTS index; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE index +( + key Int32, + name String, + merge_date Date +) ENGINE = MergeTree(merge_date, key, 8192); + +insert into index values (1,'1','2016-07-07'); +insert into index values (-1,'-1','2016-07-07'); + +select * from index where key = 1; +select * from index where key = -1; +OPTIMIZE TABLE index; +select * from index where key = 1; +select * from index where key = -1; +select * from index where key < -0.5; + +DROP TABLE index; diff --git a/parser/testdata/00346_if_tuple/ast.json b/parser/testdata/00346_if_tuple/ast.json new file mode 100644 index 000000000..da6a4335a --- /dev/null +++ b/parser/testdata/00346_if_tuple/ast.json @@ -0,0 +1,130 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function if (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function toString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function multiply (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Function concat (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '! '" + }, + { + "explain": " Function toString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 36, + + "statistics": + { + "elapsed": 0.001441645, + "rows_read": 36, + "bytes_read": 1438 + } +} diff --git a/parser/testdata/00346_if_tuple/metadata.json b/parser/testdata/00346_if_tuple/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00346_if_tuple/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00346_if_tuple/query.sql b/parser/testdata/00346_if_tuple/query.sql new file mode 100644 index 000000000..c1257f7c9 --- /dev/null +++ b/parser/testdata/00346_if_tuple/query.sql @@ -0,0 +1,8 @@ +SELECT number % 3 = 2 ? (number, toString(number)) : (number * 10, concat('! ', toString(number))) FROM system.numbers LIMIT 10; + +SELECT 0 ? (number, toString(number)) : (number * 10, concat('! ', toString(number))) FROM system.numbers LIMIT 10; +SELECT 1 ? (number, toString(number)) : (number * 10, concat('! ', toString(number))) FROM system.numbers LIMIT 10; + +SELECT number % 3 = 2 ? (1, 'Hello') : (2, 'World') FROM system.numbers LIMIT 10; +SELECT number % 3 = 2 ? (number, 'Hello') : (0, 'World') FROM system.numbers LIMIT 10; +SELECT number % 3 = 2 ? (number, 'Hello') : (0, toString(exp2(number))) FROM system.numbers LIMIT 10; diff --git a/parser/testdata/00347_has_tuple/ast.json b/parser/testdata/00347_has_tuple/ast.json new file mode 100644 index 000000000..60e2d1ae1 --- /dev/null +++ b/parser/testdata/00347_has_tuple/ast.json @@ -0,0 +1,118 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function has (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier a" + }, + { + "explain": " Identifier b" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier c" + }, + { + "explain": " Identifier d" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier a" + }, + { + "explain": " Identifier b" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Literal UInt64_1 (alias a)" + }, + { + "explain": " Literal UInt64_2 (alias b)" + }, + { + "explain": " Literal UInt64_3 (alias c)" + }, + { + "explain": " Literal UInt64_4 (alias d)" + } + ], + + "rows": 32, + + "statistics": + { + "elapsed": 0.001746096, + "rows_read": 32, + "bytes_read": 1269 + } +} diff --git a/parser/testdata/00347_has_tuple/metadata.json b/parser/testdata/00347_has_tuple/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00347_has_tuple/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00347_has_tuple/query.sql b/parser/testdata/00347_has_tuple/query.sql new file mode 100644 index 000000000..87750a9fd --- /dev/null +++ b/parser/testdata/00347_has_tuple/query.sql @@ -0,0 +1,24 @@ +SELECT has([(a, b), (c, d)], (a, b)) FROM (SELECT 1 AS a, 2 AS b, 3 AS c, 4 AS d); +SELECT has([(a, b), (c, d)], (c, d)) FROM (SELECT 1 AS a, 2 AS b, 3 AS c, 4 AS d); +SELECT has([(a, b), (c, d)], (b, c)) FROM (SELECT 1 AS a, 2 AS b, 3 AS c, 4 AS d); +SELECT has([(a, b), (c, d)], (b, c)) FROM (SELECT 1 AS a, 2 AS b, 2 AS c, 2 AS d); + +SELECT has([(a, b), (c, d)], (a, b)) FROM (SELECT number + 1 AS a, number + 2 AS b, number + 3 AS c, number + 4 AS d FROM system.numbers LIMIT 2); +SELECT has([(a, b), (c, d)], (c, d)) FROM (SELECT number + 1 AS a, number + 2 AS b, number + 3 AS c, number + 4 AS d FROM system.numbers LIMIT 2); +SELECT has([(a, b), (c, d)], (b, c)) FROM (SELECT number + 1 AS a, number + 2 AS b, number + 3 AS c, number + 4 AS d FROM system.numbers LIMIT 2); +SELECT has([(a, b), (c, d)], (b, c)) FROM (SELECT number + 1 AS a, number + 2 AS b, number + 2 AS c, number + 2 AS d FROM system.numbers LIMIT 2); + +SELECT has([(a, b), (c, d)], (a, b)) FROM (SELECT materialize(1) AS a, 2 AS b, 3 AS c, 4 AS d); +SELECT has([(a, b), (c, d)], (c, d)) FROM (SELECT materialize(1) AS a, 2 AS b, 3 AS c, 4 AS d); +SELECT has([(a, b), (c, d)], (b, c)) FROM (SELECT materialize(1) AS a, 2 AS b, 3 AS c, 4 AS d); +SELECT has([(a, b), (c, d)], (b, c)) FROM (SELECT materialize(1) AS a, 2 AS b, 2 AS c, 2 AS d); + +SELECT has([(a, b), (c, d)], (a, b)) FROM (SELECT materialize(1) AS a, 2 AS b, materialize(3) AS c, 4 AS d); +SELECT has([(a, b), (c, d)], (c, d)) FROM (SELECT materialize(1) AS a, 2 AS b, materialize(3) AS c, 4 AS d); +SELECT has([(a, b), (c, d)], (b, c)) FROM (SELECT materialize(1) AS a, 2 AS b, materialize(3) AS c, 4 AS d); +SELECT has([(a, b), (c, d)], (b, c)) FROM (SELECT materialize(1) AS a, 2 AS b, materialize(2) AS c, 2 AS d); + +SELECT has([(a, b), (c, d)], (a, b)) FROM (SELECT materialize(1) AS a, materialize(2) AS b, materialize(3) AS c, 4 AS d); +SELECT has([(a, b), (c, d)], (c, d)) FROM (SELECT materialize(1) AS a, materialize(2) AS b, materialize(3) AS c, 4 AS d); +SELECT has([(a, b), (c, d)], (b, c)) FROM (SELECT materialize(1) AS a, materialize(2) AS b, materialize(3) AS c, 4 AS d); +SELECT has([(a, b), (c, d)], (b, c)) FROM (SELECT materialize(1) AS a, materialize(2) AS b, materialize(2) AS c, 2 AS d); diff --git a/parser/testdata/00348_tuples/ast.json b/parser/testdata/00348_tuples/ast.json new file mode 100644 index 000000000..8c7c65a89 --- /dev/null +++ b/parser/testdata/00348_tuples/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal Tuple_('1', UInt64_2) (alias t)" + }, + { + "explain": " Function tupleElement (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier t" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function tupleElement (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier t" + }, + { + "explain": " Literal UInt64_2" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001309854, + "rows_read": 13, + "bytes_read": 484 + } +} diff --git a/parser/testdata/00348_tuples/metadata.json b/parser/testdata/00348_tuples/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00348_tuples/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00348_tuples/query.sql b/parser/testdata/00348_tuples/query.sql new file mode 100644 index 000000000..61125b4aa --- /dev/null +++ b/parser/testdata/00348_tuples/query.sql @@ -0,0 +1,30 @@ +SELECT ('1',2) AS t, t.1, t.2; +SELECT materialize(('1',2)) AS t, t.1, t.2; +SELECT (materialize('1'),2) AS t, t.1, t.2; +SELECT ('1',materialize(2)) AS t, t.1, t.2; +SELECT (materialize('1'),materialize(2)) AS t, t.1, t.2; + +SELECT [('1',2)] AS t, t[1].1, t[1].2; +SELECT [materialize(('1',2))] AS t, t[1].1, t[1].2; +SELECT [(materialize('1'),2)] AS t, t[1].1, t[1].2; +SELECT [('1',materialize(2))] AS t, t[1].1, t[1].2; +SELECT [(materialize('1'),materialize(2))] AS t, t[1].1, t[1].2; +SELECT materialize([('1',2)]) AS t, t[1].1, t[1].2; + +SELECT [((1, materialize('2')), [(3, [4])])] AS thing, + thing[1], + thing[1].1, + thing[1].2, + thing[1].1.1, + thing[1].1.2, + (thing[1].2)[1], + (thing[1].2)[1].1, + (thing[1].2)[1].2, + ((thing[1].2)[1].2)[1]; + +select arrayMap(t->tuple(t.1, t.2*2), [('1',2)]); +select arrayMap(t->tuple(t.1, t.2*2), [materialize(('1',2))]); +select arrayMap(t->tuple(t.1, t.2*2), [(materialize('1'),2)]); +select arrayMap(t->tuple(t.1, t.2*2), [('1',materialize(2))]); +select arrayMap(t->tuple(t.1, t.2*2), [(materialize('1'),materialize(2))]); +select arrayMap(t->tuple(t.1, t.2*2), materialize([('1',2)])); diff --git a/parser/testdata/00349_visible_width/ast.json b/parser/testdata/00349_visible_width/ast.json new file mode 100644 index 000000000..f6c2d4213 --- /dev/null +++ b/parser/testdata/00349_visible_width/ast.json @@ -0,0 +1,70 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function visibleWidth (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Tuple_(UInt64_1, UInt64_2)" + }, + { + "explain": " Function visibleWidth (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_2, UInt64_3]" + }, + { + "explain": " Function visibleWidth (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal Array_[UInt64_2]" + } + ], + + "rows": 16, + + "statistics": + { + "elapsed": 0.001333359, + "rows_read": 16, + "bytes_read": 668 + } +} diff --git a/parser/testdata/00349_visible_width/metadata.json b/parser/testdata/00349_visible_width/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00349_visible_width/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00349_visible_width/query.sql b/parser/testdata/00349_visible_width/query.sql new file mode 100644 index 000000000..a17e713c3 --- /dev/null +++ b/parser/testdata/00349_visible_width/query.sql @@ -0,0 +1 @@ +SELECT visibleWidth((1, 2)), visibleWidth([1, 2, 3]), visibleWidth((1, [2])); diff --git a/parser/testdata/00350_count_distinct/ast.json b/parser/testdata/00350_count_distinct/ast.json new file mode 100644 index 000000000..e67ffdbcd --- /dev/null +++ b/parser/testdata/00350_count_distinct/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001402676, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00350_count_distinct/metadata.json b/parser/testdata/00350_count_distinct/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00350_count_distinct/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00350_count_distinct/query.sql b/parser/testdata/00350_count_distinct/query.sql new file mode 100644 index 000000000..5800c5eb9 --- /dev/null +++ b/parser/testdata/00350_count_distinct/query.sql @@ -0,0 +1,9 @@ +SET count_distinct_implementation = 'uniq'; +SELECT count(DISTINCT x) FROM (SELECT number % 123 AS x FROM system.numbers LIMIT 1000); +SELECT count(DISTINCT x, y) FROM (SELECT number % 11 AS x, number % 13 AS y FROM system.numbers LIMIT 1000); +SET count_distinct_implementation = 'uniqCombined'; +SELECT count(DISTINCT x) FROM (SELECT number % 123 AS x FROM system.numbers LIMIT 1000); +SELECT count(DISTINCT x, y) FROM (SELECT number % 11 AS x, number % 13 AS y FROM system.numbers LIMIT 1000); +SET count_distinct_implementation = 'uniqExact'; +SELECT count(DISTINCT x) FROM (SELECT number % 123 AS x FROM system.numbers LIMIT 1000); +SELECT count(DISTINCT x, y) FROM (SELECT number % 11 AS x, number % 13 AS y FROM system.numbers LIMIT 1000); diff --git a/parser/testdata/00351_select_distinct_arrays_tuples/ast.json b/parser/testdata/00351_select_distinct_arrays_tuples/ast.json new file mode 100644 index 000000000..997856193 --- /dev/null +++ b/parser/testdata/00351_select_distinct_arrays_tuples/ast.json @@ -0,0 +1,160 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_5" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_5" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_5" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_100" + } + ], + + "rows": 46, + + "statistics": + { + "elapsed": 0.001377385, + "rows_read": 46, + "bytes_read": 1802 + } +} diff --git a/parser/testdata/00351_select_distinct_arrays_tuples/metadata.json b/parser/testdata/00351_select_distinct_arrays_tuples/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00351_select_distinct_arrays_tuples/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00351_select_distinct_arrays_tuples/query.sql b/parser/testdata/00351_select_distinct_arrays_tuples/query.sql new file mode 100644 index 000000000..a4dedf7a9 --- /dev/null +++ b/parser/testdata/00351_select_distinct_arrays_tuples/query.sql @@ -0,0 +1,3 @@ +SELECT DISTINCT number % 3, number % 5, (number % 3, number % 5), [number % 3, number % 5] FROM (SELECT * FROM system.numbers LIMIT 100); +SELECT count(), count(DISTINCT x, y) FROM (SELECT DISTINCT * FROM (SELECT 'a\0b' AS x, 'c' AS y UNION ALL SELECT 'a', 'b\0c')); +SELECT count(), count(DISTINCT x, y) FROM (SELECT DISTINCT * FROM (SELECT [1, 2] AS x, [3] AS y UNION ALL SELECT [1], [2, 3])); diff --git a/parser/testdata/00352_external_sorting_and_constants/ast.json b/parser/testdata/00352_external_sorting_and_constants/ast.json new file mode 100644 index 000000000..8406c8267 --- /dev/null +++ b/parser/testdata/00352_external_sorting_and_constants/ast.json @@ -0,0 +1,100 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 6)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal 'Hello' (alias k)" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_1000000" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_999990" + }, + { + "explain": " Literal UInt64_100" + }, + { + "explain": " Set" + } + ], + + "rows": 26, + + "statistics": + { + "elapsed": 0.001646786, + "rows_read": 26, + "bytes_read": 1027 + } +} diff --git a/parser/testdata/00352_external_sorting_and_constants/metadata.json b/parser/testdata/00352_external_sorting_and_constants/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00352_external_sorting_and_constants/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00352_external_sorting_and_constants/query.sql b/parser/testdata/00352_external_sorting_and_constants/query.sql new file mode 100644 index 000000000..31c905700 --- /dev/null +++ b/parser/testdata/00352_external_sorting_and_constants/query.sql @@ -0,0 +1,4 @@ +SELECT number, 'Hello' AS k FROM (SELECT number FROM system.numbers LIMIT 1000000) ORDER BY number LIMIT 999990, 100 SETTINGS max_bytes_before_external_sort = 1000000, max_bytes_ratio_before_external_sort = 0; +SELECT number, 'Hello' AS k FROM (SELECT number FROM system.numbers LIMIT 1000000) ORDER BY number, k LIMIT 999990, 100 SETTINGS max_bytes_before_external_sort = 1000000, max_bytes_ratio_before_external_sort = 0; +SELECT number, 'Hello' AS k FROM (SELECT number FROM system.numbers LIMIT 1000000) ORDER BY k, number, k LIMIT 999990, 100 SETTINGS max_bytes_before_external_sort = 1000000, max_bytes_ratio_before_external_sort = 0; +SELECT number, 'Hello' AS k FROM (SELECT number FROM system.numbers LIMIT 1000000) ORDER BY number, k, number LIMIT 999990, 100 SETTINGS max_bytes_before_external_sort = 1000000, max_bytes_ratio_before_external_sort = 0; diff --git a/parser/testdata/00353_join_by_tuple/ast.json b/parser/testdata/00353_join_by_tuple/ast.json new file mode 100644 index 000000000..83ae6d926 --- /dev/null +++ b/parser/testdata/00353_join_by_tuple/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001328557, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00353_join_by_tuple/metadata.json b/parser/testdata/00353_join_by_tuple/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00353_join_by_tuple/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00353_join_by_tuple/query.sql b/parser/testdata/00353_join_by_tuple/query.sql new file mode 100644 index 000000000..99019f53c --- /dev/null +++ b/parser/testdata/00353_join_by_tuple/query.sql @@ -0,0 +1,2 @@ +set any_join_distinct_right_table_keys = 1; +select a from (select (1, 2) as a) js1 any inner join (select (1, 2) as a) js2 using a; diff --git a/parser/testdata/00355_array_of_non_const_convertible_types/ast.json b/parser/testdata/00355_array_of_non_const_convertible_types/ast.json new file mode 100644 index 000000000..45b5bce3a --- /dev/null +++ b/parser/testdata/00355_array_of_non_const_convertible_types/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toUInt8 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_3" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001397965, + "rows_read": 15, + "bytes_read": 583 + } +} diff --git a/parser/testdata/00355_array_of_non_const_convertible_types/metadata.json b/parser/testdata/00355_array_of_non_const_convertible_types/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00355_array_of_non_const_convertible_types/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00355_array_of_non_const_convertible_types/query.sql b/parser/testdata/00355_array_of_non_const_convertible_types/query.sql new file mode 100644 index 000000000..67ebc112e --- /dev/null +++ b/parser/testdata/00355_array_of_non_const_convertible_types/query.sql @@ -0,0 +1 @@ +SELECT [toUInt8(number), number] FROM system.numbers LIMIT 3; diff --git a/parser/testdata/00356_analyze_aggregations_and_union_all/ast.json b/parser/testdata/00356_analyze_aggregations_and_union_all/ast.json new file mode 100644 index 000000000..d7740405d --- /dev/null +++ b/parser/testdata/00356_analyze_aggregations_and_union_all/ast.json @@ -0,0 +1,112 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function uniq (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier a" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1 (alias a)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function uniq (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier b" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1 (alias b)" + } + ], + + "rows": 30, + + "statistics": + { + "elapsed": 0.001108915, + "rows_read": 30, + "bytes_read": 1213 + } +} diff --git a/parser/testdata/00356_analyze_aggregations_and_union_all/metadata.json b/parser/testdata/00356_analyze_aggregations_and_union_all/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00356_analyze_aggregations_and_union_all/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00356_analyze_aggregations_and_union_all/query.sql b/parser/testdata/00356_analyze_aggregations_and_union_all/query.sql new file mode 100644 index 000000000..05d87808e --- /dev/null +++ b/parser/testdata/00356_analyze_aggregations_and_union_all/query.sql @@ -0,0 +1 @@ +SELECT uniq(a) FROM (SELECT 1 AS a) UNION ALL SELECT uniq(b) FROM (SELECT 1 AS b); diff --git a/parser/testdata/00357_to_string_complex_types/ast.json b/parser/testdata/00357_to_string_complex_types/ast.json new file mode 100644 index 000000000..14212c9fb --- /dev/null +++ b/parser/testdata/00357_to_string_complex_types/ast.json @@ -0,0 +1,70 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal 'Hello'" + }, + { + "explain": " Function toDate (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '2016-01-01'" + }, + { + "explain": " Function toString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_2, UInt64_3]" + } + ], + + "rows": 16, + + "statistics": + { + "elapsed": 0.001647136, + "rows_read": 16, + "bytes_read": 643 + } +} diff --git a/parser/testdata/00357_to_string_complex_types/metadata.json b/parser/testdata/00357_to_string_complex_types/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00357_to_string_complex_types/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00357_to_string_complex_types/query.sql b/parser/testdata/00357_to_string_complex_types/query.sql new file mode 100644 index 000000000..dcfb8db9f --- /dev/null +++ b/parser/testdata/00357_to_string_complex_types/query.sql @@ -0,0 +1,14 @@ +SELECT toString((1, 'Hello', toDate('2016-01-01'))), toString([1, 2, 3]); +SELECT (number, toString(number), range(number)) AS x, toString(x) FROM system.numbers LIMIT 10; +SELECT hex(toString(countState())) FROM (SELECT * FROM system.numbers LIMIT 10); + +SELECT CAST((1, 'Hello', toDate('2016-01-01')) AS String), CAST([1, 2, 3] AS String); +SELECT (number, toString(number), range(number)) AS x, CAST(x AS String) FROM system.numbers LIMIT 10; +SELECT hex(CAST(countState() AS String)) FROM (SELECT * FROM system.numbers LIMIT 10); + +SELECT toDateTime64('2024-01-01 00:00:00.00', 6), + cast(toDateTime64('2024-01-01 00:00:00.100', 6) as String), + toString((1, toDateTime64('2024-01-01 00:00:00.12000', 6))), + toString([toDateTime64('2024-01-01 00:00:00.123000', 6), toDateTime64('2024-01-01 00:00:00.123400', 6)]), + JSONExtractString('{"a" : "2024-01-01 00:00:00"}', 'a')::DateTime64(6) + SETTINGS date_time_64_output_format_cut_trailing_zeros_align_to_groups_of_thousands = true; diff --git a/parser/testdata/00358_from_string_complex_types/ast.json b/parser/testdata/00358_from_string_complex_types/ast.json new file mode 100644 index 000000000..1e66499b7 --- /dev/null +++ b/parser/testdata/00358_from_string_complex_types/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '[1, 2, 3]'" + }, + { + "explain": " Literal 'Array(UInt8)'" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001431861, + "rows_read": 8, + "bytes_read": 296 + } +} diff --git a/parser/testdata/00358_from_string_complex_types/metadata.json b/parser/testdata/00358_from_string_complex_types/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00358_from_string_complex_types/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00358_from_string_complex_types/query.sql b/parser/testdata/00358_from_string_complex_types/query.sql new file mode 100644 index 000000000..0452ac9fa --- /dev/null +++ b/parser/testdata/00358_from_string_complex_types/query.sql @@ -0,0 +1,2 @@ +SELECT CAST('[1, 2, 3]' AS Array(UInt8)); +SELECT CAST(toString(range(number)) AS Array(UInt64)), CAST(toString((number, number * 111)) AS Tuple(UInt64, UInt8)) FROM system.numbers LIMIT 10; diff --git a/parser/testdata/00359_convert_or_zero_functions/ast.json b/parser/testdata/00359_convert_or_zero_functions/ast.json new file mode 100644 index 000000000..4f9de11e3 --- /dev/null +++ b/parser/testdata/00359_convert_or_zero_functions/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toUInt32OrZero (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '123a'" + }, + { + "explain": " Function toUInt32OrZero (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '456'" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.000984313, + "rows_read": 10, + "bytes_read": 380 + } +} diff --git a/parser/testdata/00359_convert_or_zero_functions/metadata.json b/parser/testdata/00359_convert_or_zero_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00359_convert_or_zero_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00359_convert_or_zero_functions/query.sql b/parser/testdata/00359_convert_or_zero_functions/query.sql new file mode 100644 index 000000000..c3595d9a0 --- /dev/null +++ b/parser/testdata/00359_convert_or_zero_functions/query.sql @@ -0,0 +1,5 @@ +SELECT toUInt32OrZero('123a'), toUInt32OrZero('456'); +SELECT toUInt32OrZero(arrayJoin(['123a', '456'])); + +SELECT toFloat64OrZero('123.456a'), toFloat64OrZero('456.789'); +SELECT toFloat64OrZero(arrayJoin(['123.456a', '456.789'])); diff --git a/parser/testdata/00360_to_date_from_string_with_datetime/ast.json b/parser/testdata/00360_to_date_from_string_with_datetime/ast.json new file mode 100644 index 000000000..d1ded31e8 --- /dev/null +++ b/parser/testdata/00360_to_date_from_string_with_datetime/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDate (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '2016-08-02 12:34:19'" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.00110991, + "rows_read": 7, + "bytes_read": 272 + } +} diff --git a/parser/testdata/00360_to_date_from_string_with_datetime/metadata.json b/parser/testdata/00360_to_date_from_string_with_datetime/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00360_to_date_from_string_with_datetime/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00360_to_date_from_string_with_datetime/query.sql b/parser/testdata/00360_to_date_from_string_with_datetime/query.sql new file mode 100644 index 000000000..aa6743e89 --- /dev/null +++ b/parser/testdata/00360_to_date_from_string_with_datetime/query.sql @@ -0,0 +1,2 @@ +SELECT toDate('2016-08-02 12:34:19'); +SELECT toDate(toString(toDateTime('2000-01-01 00:00:00') + number)) FROM system.numbers LIMIT 3; diff --git a/parser/testdata/00361_shared_array_offsets_and_squash_blocks/ast.json b/parser/testdata/00361_shared_array_offsets_and_squash_blocks/ast.json new file mode 100644 index 000000000..aa99e97c6 --- /dev/null +++ b/parser/testdata/00361_shared_array_offsets_and_squash_blocks/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery nested1 (children 1)" + }, + { + "explain": " Identifier nested1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001870661, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/00361_shared_array_offsets_and_squash_blocks/metadata.json b/parser/testdata/00361_shared_array_offsets_and_squash_blocks/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00361_shared_array_offsets_and_squash_blocks/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00361_shared_array_offsets_and_squash_blocks/query.sql b/parser/testdata/00361_shared_array_offsets_and_squash_blocks/query.sql new file mode 100644 index 000000000..7815a3525 --- /dev/null +++ b/parser/testdata/00361_shared_array_offsets_and_squash_blocks/query.sql @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS nested1; +DROP TABLE IF EXISTS nested2; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE nested1 (d Date DEFAULT '2000-01-01', x UInt64, n Nested(a String, b String)) ENGINE = MergeTree(d, x, 1); +INSERT INTO nested1 (x, n.a, n.b) VALUES (1, ['Hello', 'World'], ['abc', 'def']), (2, [], []); + +SET max_block_size = 1; +SELECT * FROM nested1 ORDER BY x; + +CREATE TABLE nested2 (d Date DEFAULT '2000-01-01', x UInt64, n Nested(a String, b String)) ENGINE = MergeTree(d, x, 1); + +INSERT INTO nested2 SELECT * FROM nested1; + +SELECT * FROM nested2 ORDER BY x; + +DROP TABLE nested1; +DROP TABLE nested2; diff --git a/parser/testdata/00362_great_circle_distance/ast.json b/parser/testdata/00362_great_circle_distance/ast.json new file mode 100644 index 000000000..73e0244c5 --- /dev/null +++ b/parser/testdata/00362_great_circle_distance/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function floor (alias distance) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function greatCircleDistance (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Literal Float64_33.3" + }, + { + "explain": " Literal Float64_55.3" + }, + { + "explain": " Literal Float64_33.3" + }, + { + "explain": " Literal Float64_55.3" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001284591, + "rows_read": 12, + "bytes_read": 486 + } +} diff --git a/parser/testdata/00362_great_circle_distance/metadata.json b/parser/testdata/00362_great_circle_distance/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00362_great_circle_distance/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00362_great_circle_distance/query.sql b/parser/testdata/00362_great_circle_distance/query.sql new file mode 100644 index 000000000..62f9e8376 --- /dev/null +++ b/parser/testdata/00362_great_circle_distance/query.sql @@ -0,0 +1,13 @@ +SELECT floor(greatCircleDistance(33.3, 55.3, 33.3, 55.3)) AS distance; +-- consts are from vincenty formula from geopy +-- k = '158.756175, 53.006373' +-- u = '37.531014, 55.703050' +-- y = '37.588144, 55.733842' +-- m = '37.617780, 55.755830' +-- n = '83.089598, 54.842461' +select abs(greatCircleDistance(37.531014, 55.703050, 37.588144, 55.733842) - 4964.25740448) / 4964.25740448 < 0.004; +select abs(greatCircleDistance(37.531014, 55.703050, 37.617780, 55.755830) - 8015.52288508) / 8015.52288508 < 0.004; +select abs(greatCircleDistance(37.588144, 55.733842, 37.617780, 55.755830) - 3075.27332275) / 3075.27332275 < 0.004; +select abs(greatCircleDistance(83.089598, 54.842461, 37.617780, 55.755830) - 2837839.72863) / 2837839.72863 < 0.004; +select abs(greatCircleDistance(37.617780, 55.755830, 158.756175, 53.006373) - 6802821.68814) / 6802821.68814 < 0.004; +select abs(greatCircleDistance(83.089598, 54.842461, 158.756175, 53.006373) - 4727216.39539) / 4727216.39539 < 0.004; diff --git a/parser/testdata/00363_defaults/ast.json b/parser/testdata/00363_defaults/ast.json new file mode 100644 index 000000000..934d9e469 --- /dev/null +++ b/parser/testdata/00363_defaults/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery prewhere_defaults (children 1)" + }, + { + "explain": " Identifier prewhere_defaults" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001360112, + "rows_read": 2, + "bytes_read": 86 + } +} diff --git a/parser/testdata/00363_defaults/metadata.json b/parser/testdata/00363_defaults/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00363_defaults/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00363_defaults/query.sql b/parser/testdata/00363_defaults/query.sql new file mode 100644 index 000000000..1ec3b13a1 --- /dev/null +++ b/parser/testdata/00363_defaults/query.sql @@ -0,0 +1,20 @@ +DROP TABLE IF EXISTS prewhere_defaults; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE prewhere_defaults (d Date DEFAULT '2000-01-01', k UInt64 DEFAULT 0, x UInt16) ENGINE = MergeTree(d, k, 1); + +INSERT INTO prewhere_defaults (x) VALUES (1); + +SET max_block_size = 1; + +SELECT * FROM prewhere_defaults PREWHERE x != 0 ORDER BY x; + +ALTER TABLE prewhere_defaults ADD COLUMN y UInt16 DEFAULT x; + +SELECT * FROM prewhere_defaults PREWHERE x != 0 ORDER BY x; + +INSERT INTO prewhere_defaults (x) VALUES (2); + +SELECT * FROM prewhere_defaults PREWHERE x != 0 ORDER BY x; + +DROP TABLE prewhere_defaults; diff --git a/parser/testdata/00364_java_style_denormals/ast.json b/parser/testdata/00364_java_style_denormals/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00364_java_style_denormals/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00364_java_style_denormals/metadata.json b/parser/testdata/00364_java_style_denormals/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00364_java_style_denormals/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00364_java_style_denormals/query.sql b/parser/testdata/00364_java_style_denormals/query.sql new file mode 100644 index 000000000..3e73c82eb --- /dev/null +++ b/parser/testdata/00364_java_style_denormals/query.sql @@ -0,0 +1,56 @@ +SELECT +toFloat64('inf'), +toFloat64('-inf'), +toFloat64('INF'), +toFloat64('-INF'), +toFloat64('Infinity'), +toFloat64('-Infinity'), +toFloat64('nan'), +toFloat64('-nan'), +toFloat64('NAN'), +toFloat64('-NAN'), +toFloat64('NaN'), +toFloat64('-NaN'), + +toFloat64OrZero('in'), +toFloat64OrZero('-in'), +toFloat64OrZero('INFi'), +toFloat64OrZero('-INFi'), +toFloat64OrZero('Infinit'), +toFloat64OrZero('-Infinit'), +toFloat64OrZero('na'), +toFloat64OrZero('-na'), +toFloat64OrZero('NANo'), +toFloat64OrZero('-NANo'), +toFloat64OrZero('NaN+'), +toFloat64OrZero('-NaNa'), +toFloat64OrZero('+Na'), + +toFloat32('inf'), +toFloat32('-inf'), +toFloat32('INF'), +toFloat32('-INF'), +toFloat32('Infinity'), +toFloat32('-Infinity'), +toFloat32('nan'), +toFloat32('-nan'), +toFloat32('NAN'), +toFloat32('-NAN'), +toFloat32('NaN'), +toFloat32('-NaN'), + +toFloat32OrZero('in'), +toFloat32OrZero('-in'), +toFloat32OrZero('INFi'), +toFloat32OrZero('-INFi'), +toFloat32OrZero('Infinit'), +toFloat32OrZero('-Infinit'), +toFloat32OrZero('na'), +toFloat32OrZero('-na'), +toFloat32OrZero('NANo'), +toFloat32OrZero('-NANo'), +toFloat32OrZero('NaN+'), +toFloat32OrZero('-NaNa'), +toFloat32OrZero('+Na') + +FORMAT TabSeparated; diff --git a/parser/testdata/00367_visible_width_of_array_tuple_enum/ast.json b/parser/testdata/00367_visible_width_of_array_tuple_enum/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00367_visible_width_of_array_tuple_enum/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00367_visible_width_of_array_tuple_enum/metadata.json b/parser/testdata/00367_visible_width_of_array_tuple_enum/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00367_visible_width_of_array_tuple_enum/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00367_visible_width_of_array_tuple_enum/query.sql b/parser/testdata/00367_visible_width_of_array_tuple_enum/query.sql new file mode 100644 index 000000000..684980120 --- /dev/null +++ b/parser/testdata/00367_visible_width_of_array_tuple_enum/query.sql @@ -0,0 +1,4 @@ +SELECT +CAST(['hello'] AS Array(Enum8('hello' = 1))) AS x, +(1, CAST('hello' AS Enum8('hello' = 1))) AS y +FORMAT PrettyCompactNoEscapes; diff --git a/parser/testdata/00369_int_div_of_float/ast.json b/parser/testdata/00369_int_div_of_float/ast.json new file mode 100644 index 000000000..78c45dbcf --- /dev/null +++ b/parser/testdata/00369_int_div_of_float/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function intDiv (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Literal UInt64_4" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001144141, + "rows_read": 8, + "bytes_read": 290 + } +} diff --git a/parser/testdata/00369_int_div_of_float/metadata.json b/parser/testdata/00369_int_div_of_float/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00369_int_div_of_float/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00369_int_div_of_float/query.sql b/parser/testdata/00369_int_div_of_float/query.sql new file mode 100644 index 000000000..4d5d477ce --- /dev/null +++ b/parser/testdata/00369_int_div_of_float/query.sql @@ -0,0 +1,6 @@ +SELECT intDiv(10, 4); +SELECT intDiv(10., 4); +SELECT intDiv(10, 4.); +SELECT intDiv(10., 4.); +SELECT intDiv(1, 0.3); +SELECT intDiv(1.0, 0.3); diff --git a/parser/testdata/00370_duplicate_columns_in_subqueries/ast.json b/parser/testdata/00370_duplicate_columns_in_subqueries/ast.json new file mode 100644 index 000000000..f9b8d2268 --- /dev/null +++ b/parser/testdata/00370_duplicate_columns_in_subqueries/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001738685, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00370_duplicate_columns_in_subqueries/metadata.json b/parser/testdata/00370_duplicate_columns_in_subqueries/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00370_duplicate_columns_in_subqueries/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00370_duplicate_columns_in_subqueries/query.sql b/parser/testdata/00370_duplicate_columns_in_subqueries/query.sql new file mode 100644 index 000000000..8a93c0a9d --- /dev/null +++ b/parser/testdata/00370_duplicate_columns_in_subqueries/query.sql @@ -0,0 +1,16 @@ +SET any_join_distinct_right_table_keys = 1; +SET joined_subquery_requires_alias = 0; +SET enable_analyzer = 1; + +select x, y from (select 1 as x, 2 as y, x, y); +select x, y from (select 1 as x, 1 as y, x, y); +select x from (select 1 as x, 1 as y, x, y); +select * from (select 1 as x, 2 as y, x, y); +select * from (select 1 as a, 1 as b, 1 as c, b, c); +select b, c from (select 1 as a, 1 as b, 1 as c, b, c); +select b, c from (select 1 as a, 1 as b, 1 as c, b, c) any left join (select 1 as a) using a; +select b, c from (select 1 as a, 1 as b, 1 as c, 1 as b, 1 as c) any left join (select 1 as a) using a; +select a, b, c from (select 42 as a, 1 as b, 2 as c, 1 as b, 2 as c) any left join (select 42 as a, 3 as d) using a; +select a, b, c from (select 42 as a, 1 as b, 2 as c, 1 as b, 2 as c) any left join (select 42 as a, 3 as d) using a order by d; + +SELECT k, a1, b1, a2, b2 FROM (SELECT 0 AS k, 'hello' AS a1, 'world' AS b1, a1) ANY FULL OUTER JOIN (SELECT 1 AS k, 'hello' AS a2, 'world' AS b2, a2) USING (k) ORDER BY k; diff --git a/parser/testdata/00371_union_all/ast.json b/parser/testdata/00371_union_all/ast.json new file mode 100644 index 000000000..ac100d22f --- /dev/null +++ b/parser/testdata/00371_union_all/ast.json @@ -0,0 +1,94 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toUInt64 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function countIf (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function greater (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier n" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2 (alias n)" + } + ], + + "rows": 24, + + "statistics": + { + "elapsed": 0.001481769, + "rows_read": 24, + "bytes_read": 951 + } +} diff --git a/parser/testdata/00371_union_all/metadata.json b/parser/testdata/00371_union_all/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00371_union_all/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00371_union_all/query.sql b/parser/testdata/00371_union_all/query.sql new file mode 100644 index 000000000..0a738b6b2 --- /dev/null +++ b/parser/testdata/00371_union_all/query.sql @@ -0,0 +1,2 @@ +select toUInt64(1) union all select countIf(n>0) from (select 2 as n); +SELECT q FROM (select [1,2,3] AS q UNION ALL select groupArray(arrayJoin([4,5,6])) AS q ORDER BY q) ORDER BY q; diff --git a/parser/testdata/00373_group_by_tuple/ast.json b/parser/testdata/00373_group_by_tuple/ast.json new file mode 100644 index 000000000..6b67c2b42 --- /dev/null +++ b/parser/testdata/00373_group_by_tuple/ast.json @@ -0,0 +1,139 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function if (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal 'Hello'" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal 'World'" + }, + { + "explain": " Function count (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier x" + } + ], + + "rows": 39, + + "statistics": + { + "elapsed": 0.001675815, + "rows_read": 39, + "bytes_read": 1531 + } +} diff --git a/parser/testdata/00373_group_by_tuple/metadata.json b/parser/testdata/00373_group_by_tuple/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00373_group_by_tuple/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00373_group_by_tuple/query.sql b/parser/testdata/00373_group_by_tuple/query.sql new file mode 100644 index 000000000..2f24ce751 --- /dev/null +++ b/parser/testdata/00373_group_by_tuple/query.sql @@ -0,0 +1,2 @@ +SELECT if(number % 2, tuple(0, 'Hello'), tuple(1, 'World')) AS x, count() FROM (SELECT number FROM system.numbers LIMIT 10) GROUP BY x ORDER BY x; +SELECT if(0, tuple(0), tuple(1)) AS x GROUP BY x; diff --git a/parser/testdata/00374_any_last_if_merge/ast.json b/parser/testdata/00374_any_last_if_merge/ast.json new file mode 100644 index 000000000..1b599597a --- /dev/null +++ b/parser/testdata/00374_any_last_if_merge/ast.json @@ -0,0 +1,142 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function modulo (alias k) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_10000" + }, + { + "explain": " Function anyLastIf (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Float64_1" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_1000" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_1000" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier k" + }, + { + "explain": " Function notEquals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal UInt64_0" + } + ], + + "rows": 40, + + "statistics": + { + "elapsed": 0.001948608, + "rows_read": 40, + "bytes_read": 1630 + } +} diff --git a/parser/testdata/00374_any_last_if_merge/metadata.json b/parser/testdata/00374_any_last_if_merge/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00374_any_last_if_merge/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00374_any_last_if_merge/query.sql b/parser/testdata/00374_any_last_if_merge/query.sql new file mode 100644 index 000000000..5aa613dfa --- /dev/null +++ b/parser/testdata/00374_any_last_if_merge/query.sql @@ -0,0 +1 @@ +SELECT number % 10000 AS k, anyLastIf(1.0, 0) AS x FROM (SELECT * FROM system.numbers LIMIT 1000 UNION ALL SELECT * FROM system.numbers LIMIT 1000) GROUP BY k HAVING x != 0; diff --git a/parser/testdata/00375_shard_group_uniq_array_of_string/ast.json b/parser/testdata/00375_shard_group_uniq_array_of_string/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00375_shard_group_uniq_array_of_string/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00375_shard_group_uniq_array_of_string/metadata.json b/parser/testdata/00375_shard_group_uniq_array_of_string/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00375_shard_group_uniq_array_of_string/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00375_shard_group_uniq_array_of_string/query.sql b/parser/testdata/00375_shard_group_uniq_array_of_string/query.sql new file mode 100644 index 000000000..c33eb5728 --- /dev/null +++ b/parser/testdata/00375_shard_group_uniq_array_of_string/query.sql @@ -0,0 +1,17 @@ +-- Tags: shard, long + +DROP TABLE IF EXISTS group_uniq_str; +CREATE TABLE group_uniq_str ENGINE = Memory AS SELECT number % 10 as id, toString(intDiv((number%10000), 10)) as v FROM system.numbers LIMIT 10000000; + +INSERT INTO group_uniq_str SELECT 2 as id, toString(number % 100) as v FROM system.numbers LIMIT 1000000; +INSERT INTO group_uniq_str SELECT 5 as id, toString(number % 100) as v FROM system.numbers LIMIT 10000000; + +-- Prevent remote replicas from skipping index analysis in Parallel Replicas. Otherwise, they may return full ranges and trigger max_rows_to_read validation failures. +SET parallel_replicas_index_analysis_only_on_coordinator = 0; + +SELECT length(groupUniqArray(v)) FROM group_uniq_str GROUP BY id ORDER BY id; +SELECT length(groupUniqArray(v)) FROM remote('127.0.0.{2,3,4,5}', currentDatabase(), 'group_uniq_str') GROUP BY id ORDER BY id SETTINGS max_rows_to_read = '100M'; +SELECT length(groupUniqArray(10)(v)) FROM group_uniq_str GROUP BY id ORDER BY id; +SELECT length(groupUniqArray(10000)(v)) FROM group_uniq_str GROUP BY id ORDER BY id; + +DROP TABLE IF EXISTS group_uniq_str; diff --git a/parser/testdata/00376_shard_group_uniq_array_of_int_array/ast.json b/parser/testdata/00376_shard_group_uniq_array_of_int_array/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00376_shard_group_uniq_array_of_int_array/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00376_shard_group_uniq_array_of_int_array/metadata.json b/parser/testdata/00376_shard_group_uniq_array_of_int_array/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00376_shard_group_uniq_array_of_int_array/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00376_shard_group_uniq_array_of_int_array/query.sql b/parser/testdata/00376_shard_group_uniq_array_of_int_array/query.sql new file mode 100644 index 000000000..4453c2628 --- /dev/null +++ b/parser/testdata/00376_shard_group_uniq_array_of_int_array/query.sql @@ -0,0 +1,16 @@ +-- Tags: long + +SET max_rows_to_read = '55M'; + +DROP TABLE IF EXISTS group_uniq_arr_int; +CREATE TABLE group_uniq_arr_int ENGINE = Memory AS + SELECT g as id, if(c == 0, [v], if(c == 1, emptyArrayInt64(), [v, v])) as v FROM + (SELECT intDiv(number%1000000, 100) as v, intDiv(number%100, 10) as g, number%10 as c FROM system.numbers WHERE c < 3 LIMIT 10000000); + +SELECT length(groupUniqArray(v)) FROM group_uniq_arr_int GROUP BY id ORDER BY id; +SELECT length(groupUniqArray(v)) FROM remote('127.0.0.{2,3,4,5}', currentDatabase(), 'group_uniq_arr_int') GROUP BY id ORDER BY id; +SELECT length(groupUniqArray(10)(v)) FROM group_uniq_arr_int GROUP BY id ORDER BY id; +SELECT length(groupUniqArray(100000)(v)) FROM group_uniq_arr_int GROUP BY id ORDER BY id; + + +DROP TABLE IF EXISTS group_uniq_arr_int; diff --git a/parser/testdata/00377_shard_group_uniq_array_of_string_array/ast.json b/parser/testdata/00377_shard_group_uniq_array_of_string_array/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00377_shard_group_uniq_array_of_string_array/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00377_shard_group_uniq_array_of_string_array/metadata.json b/parser/testdata/00377_shard_group_uniq_array_of_string_array/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00377_shard_group_uniq_array_of_string_array/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00377_shard_group_uniq_array_of_string_array/query.sql b/parser/testdata/00377_shard_group_uniq_array_of_string_array/query.sql new file mode 100644 index 000000000..1ec91ac23 --- /dev/null +++ b/parser/testdata/00377_shard_group_uniq_array_of_string_array/query.sql @@ -0,0 +1,12 @@ +-- Tags: shard, long +SET max_rows_to_read = '55M'; + +DROP TABLE IF EXISTS group_uniq_arr_str; +CREATE TABLE group_uniq_arr_str ENGINE = Memory AS + SELECT hex(intHash32(g)) as id, if(c == 0, [hex(v)], if(c == 1, emptyArrayString(), [hex(v), hex(v)])) as v FROM + (SELECT intDiv(number%1000000, 100) as v, intDiv(number%100, 10) as g, number%10 as c FROM system.numbers WHERE c < 3 LIMIT 10000000); + +SELECT length(groupUniqArray(v)) FROM group_uniq_arr_str GROUP BY id ORDER BY id; +SELECT length(groupUniqArray(v)) FROM remote('127.0.0.{2,3,4,5}', currentDatabase(), 'group_uniq_arr_str') GROUP BY id ORDER BY id; + +DROP TABLE IF EXISTS group_uniq_arr_str; diff --git a/parser/testdata/00378_json_quote_64bit_integers/ast.json b/parser/testdata/00378_json_quote_64bit_integers/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00378_json_quote_64bit_integers/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00378_json_quote_64bit_integers/metadata.json b/parser/testdata/00378_json_quote_64bit_integers/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00378_json_quote_64bit_integers/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00378_json_quote_64bit_integers/query.sql b/parser/testdata/00378_json_quote_64bit_integers/query.sql new file mode 100644 index 000000000..4325f4b2e --- /dev/null +++ b/parser/testdata/00378_json_quote_64bit_integers/query.sql @@ -0,0 +1,16 @@ +-- Tags: no-fasttest + +SET output_format_write_statistics = 0; +SET output_format_json_pretty_print = 0; +SET extremes = 1; +SET enable_analyzer = 1; + +SET output_format_json_quote_64bit_integers = 1; +SELECT toInt64(0) as i0, toUInt64(0) as u0, toInt64(9223372036854775807) as ip, toInt64(-9223372036854775808) as in, toUInt64(18446744073709551615) as up, [toInt64(0)] as arr, (toUInt64(0), toUInt64(0)) as tuple GROUP BY i0, u0, ip, in, up, arr, tuple WITH TOTALS FORMAT JSON; +SELECT toInt64(0) as i0, toUInt64(0) as u0, toInt64(9223372036854775807) as ip, toInt64(-9223372036854775808) as in, toUInt64(18446744073709551615) as up, [toInt64(0)] as arr, (toUInt64(0), toUInt64(0)) as tuple GROUP BY i0, u0, ip, in, up, arr, tuple WITH TOTALS FORMAT JSONCompact; +SELECT toInt64(0) as i0, toUInt64(0) as u0, toInt64(9223372036854775807) as ip, toInt64(-9223372036854775808) as in, toUInt64(18446744073709551615) as up, [toInt64(0)] as arr, (toUInt64(0), toUInt64(0)) as tuple GROUP BY i0, u0, ip, in, up, arr, tuple WITH TOTALS FORMAT JSONEachRow; + +SET output_format_json_quote_64bit_integers = 0; +SELECT toInt64(0) as i0, toUInt64(0) as u0, toInt64(9223372036854775807) as ip, toInt64(-9223372036854775808) as in, toUInt64(18446744073709551615) as up, [toInt64(0)] as arr, (toUInt64(0), toUInt64(0)) as tuple GROUP BY i0, u0, ip, in, up, arr, tuple WITH TOTALS FORMAT JSON; +SELECT toInt64(0) as i0, toUInt64(0) as u0, toInt64(9223372036854775807) as ip, toInt64(-9223372036854775808) as in, toUInt64(18446744073709551615) as up, [toInt64(0)] as arr, (toUInt64(0), toUInt64(0)) as tuple GROUP BY i0, u0, ip, in, up, arr, tuple WITH TOTALS FORMAT JSONCompact; +SELECT toInt64(0) as i0, toUInt64(0) as u0, toInt64(9223372036854775807) as ip, toInt64(-9223372036854775808) as in, toUInt64(18446744073709551615) as up, [toInt64(0)] as arr, (toUInt64(0), toUInt64(0)) as tuple GROUP BY i0, u0, ip, in, up, arr, tuple WITH TOTALS FORMAT JSONEachRow; diff --git a/parser/testdata/00381_first_significant_subdomain/ast.json b/parser/testdata/00381_first_significant_subdomain/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00381_first_significant_subdomain/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00381_first_significant_subdomain/metadata.json b/parser/testdata/00381_first_significant_subdomain/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00381_first_significant_subdomain/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00381_first_significant_subdomain/query.sql b/parser/testdata/00381_first_significant_subdomain/query.sql new file mode 100644 index 000000000..5d8c53afc --- /dev/null +++ b/parser/testdata/00381_first_significant_subdomain/query.sql @@ -0,0 +1,30 @@ +SELECT + firstSignificantSubdomain('http://hello.canada.ca') AS canada, + firstSignificantSubdomain('http://hello.congo.com') AS congo, + firstSignificantSubdomain('http://pochemu.net-domena.ru') AS why; + +SELECT + firstSignificantSubdomain('ftp://www.meta.com.ua/news.html'), + firstSignificantSubdomain('https://www.bigmir.net/news.html'), + firstSignificantSubdomain('magnet:ukr.abc'), + firstSignificantSubdomain('ftp://www.yahoo.co.jp/news.html'), + firstSignificantSubdomain('https://api.www3.static.dev.ввв.гугл.ком'), + firstSignificantSubdomain('//www.meta.com.ua/news.html'); + +SELECT + firstSignificantSubdomain('http://hello.canada.c'), + firstSignificantSubdomain('http://hello.canada.'), + firstSignificantSubdomain('http://hello.canada'), + firstSignificantSubdomain('http://hello.c'), + firstSignificantSubdomain('http://hello.'), + firstSignificantSubdomain('http://hello'), + firstSignificantSubdomain('http://'), + firstSignificantSubdomain('http:/'), + firstSignificantSubdomain('http:'), + firstSignificantSubdomain('http'), + firstSignificantSubdomain('h'), + firstSignificantSubdomain('.'), + firstSignificantSubdomain(''), + firstSignificantSubdomain('http://hello.canada..com'), + firstSignificantSubdomain('http://hello..canada.com'), + firstSignificantSubdomain('http://hello.canada.com.'); diff --git a/parser/testdata/00383_utf8_validation/ast.json b/parser/testdata/00383_utf8_validation/ast.json new file mode 100644 index 000000000..2fd2cda5c --- /dev/null +++ b/parser/testdata/00383_utf8_validation/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001440384, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00383_utf8_validation/metadata.json b/parser/testdata/00383_utf8_validation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00383_utf8_validation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00383_utf8_validation/query.sql b/parser/testdata/00383_utf8_validation/query.sql new file mode 100644 index 000000000..8d73bd2f5 --- /dev/null +++ b/parser/testdata/00383_utf8_validation/query.sql @@ -0,0 +1,2 @@ +SET output_format_write_statistics = 0; +SELECT '\xED\x20\xA8' AS s FORMAT JSONCompact; diff --git a/parser/testdata/00384_column_aggregate_function_insert_from/ast.json b/parser/testdata/00384_column_aggregate_function_insert_from/ast.json new file mode 100644 index 000000000..c6d0cd949 --- /dev/null +++ b/parser/testdata/00384_column_aggregate_function_insert_from/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery aggregates (children 1)" + }, + { + "explain": " Identifier aggregates" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001755396, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/00384_column_aggregate_function_insert_from/metadata.json b/parser/testdata/00384_column_aggregate_function_insert_from/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00384_column_aggregate_function_insert_from/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00384_column_aggregate_function_insert_from/query.sql b/parser/testdata/00384_column_aggregate_function_insert_from/query.sql new file mode 100644 index 000000000..a723bc9b5 --- /dev/null +++ b/parser/testdata/00384_column_aggregate_function_insert_from/query.sql @@ -0,0 +1,23 @@ +DROP TABLE IF EXISTS aggregates; +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE aggregates (d Date, s AggregateFunction(uniq, UInt64)) ENGINE = MergeTree(d, d, 8192); + +INSERT INTO aggregates + SELECT toDate('2016-10-31') AS d, uniqState(toUInt64(arrayJoin(range(100)))) AS s + UNION ALL + SELECT toDate('2016-11-01') AS d, uniqState(toUInt64(arrayJoin(range(100)))) AS s; + +INSERT INTO aggregates SELECT toDate('2016-10-31') + number AS d, uniqState(toUInt64(arrayJoin(range(100)))) AS s FROM (SELECT * FROM system.numbers LIMIT 2) GROUP BY d; + +SELECT d, uniqMerge(s) FROM aggregates GROUP BY d ORDER BY d; + +INSERT INTO aggregates + SELECT toDate('2016-12-01') AS d, uniqState(toUInt64(arrayJoin(range(100)))) AS s + UNION ALL + SELECT toDate('2016-12-02') AS d, uniqState(toUInt64(arrayJoin(range(100)))) AS s + UNION ALL + SELECT toDate('2016-12-03') AS d, uniqState(toUInt64(arrayJoin(range(100)))) AS s; + +SELECT d, uniqMerge(s) FROM aggregates GROUP BY d ORDER BY d; + +DROP TABLE aggregates; diff --git a/parser/testdata/00386_enum_in_pk/ast.json b/parser/testdata/00386_enum_in_pk/ast.json new file mode 100644 index 000000000..d8f53b212 --- /dev/null +++ b/parser/testdata/00386_enum_in_pk/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery enum_pk (children 1)" + }, + { + "explain": " Identifier enum_pk" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001345748, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/00386_enum_in_pk/metadata.json b/parser/testdata/00386_enum_in_pk/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00386_enum_in_pk/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00386_enum_in_pk/query.sql b/parser/testdata/00386_enum_in_pk/query.sql new file mode 100644 index 000000000..b59118ed4 --- /dev/null +++ b/parser/testdata/00386_enum_in_pk/query.sql @@ -0,0 +1,45 @@ +DROP TABLE IF EXISTS enum_pk; +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE enum_pk (date Date DEFAULT '0000-00-00', x Enum8('0' = 0, '1' = 1, '2' = 2), d Enum8('0' = 0, '1' = 1, '2' = 2)) ENGINE = MergeTree(date, x, 1); +INSERT INTO enum_pk (x, d) VALUES ('0', '0')('1', '1')('0', '0')('1', '1')('1', '1')('0', '0')('0', '0')('2', '2')('0', '0')('1', '1')('1', '1')('1', '1')('1', '1')('0', '0'); + +SELECT cityHash64(groupArraySorted(100)(x)) FROM enum_pk WHERE x = '0'; +SELECT cityHash64(groupArraySorted(100)(d)) FROM enum_pk WHERE d = '0'; + +SELECT cityHash64(groupArraySorted(100)(x)) FROM enum_pk WHERE x != '0'; +SELECT cityHash64(groupArraySorted(100)(d)) FROM enum_pk WHERE d != '0'; + +SELECT cityHash64(groupArraySorted(100)(x)) FROM enum_pk WHERE x = '1'; +SELECT cityHash64(groupArraySorted(100)(d)) FROM enum_pk WHERE d = '1'; + +SELECT cityHash64(groupArraySorted(100)(x)) FROM enum_pk WHERE exp2(toInt64(x != '1')) > 1; +SELECT cityHash64(groupArraySorted(100)(d)) FROM enum_pk WHERE exp2(toInt64(d != '1')) > 1; + +SELECT cityHash64(groupArraySorted(100)(x)) FROM enum_pk WHERE x = toString(0); +SELECT cityHash64(groupArraySorted(100)(d)) FROM enum_pk WHERE d = toString(0); + +SELECT cityHash64(groupArraySorted(100)(x)) FROM enum_pk WHERE (x = toString(0)) > 0; +SELECT cityHash64(groupArraySorted(100)(d)) FROM enum_pk WHERE (d = toString(0)) > 0; + +SELECT cityHash64(groupArraySorted(100)(x)) FROM enum_pk WHERE ((x != toString(1)) > 0) > 0; +SELECT cityHash64(groupArraySorted(100)(d)) FROM enum_pk WHERE ((d != toString(1)) > 0) > 0; + +SELECT cityHash64(groupArraySorted(100)(x)) FROM enum_pk WHERE exp2((x != toString(0)) != 0) > 1; +SELECT cityHash64(groupArraySorted(100)(d)) FROM enum_pk WHERE exp2((d != toString(0)) != 0) > 1; + +SELECT cityHash64(groupArraySorted(100)(x)) FROM enum_pk WHERE (-(x != toString(0)) = -1) > 0; +SELECT cityHash64(groupArraySorted(100)(d)) FROM enum_pk WHERE (-(d != toString(0)) = -1) > 0; + +SELECT cityHash64(groupArraySorted(100)(x)) FROM enum_pk WHERE 1 = 1; +SELECT cityHash64(groupArraySorted(100)(d)) FROM enum_pk WHERE 1 = 1; + +SELECT cityHash64(groupArraySorted(100)(x)) FROM enum_pk WHERE (x = '0' OR x = '1'); +SELECT cityHash64(groupArraySorted(100)(d)) FROM enum_pk WHERE (d = '0' OR d = '1'); + +SELECT cityHash64(groupArraySorted(100)(x)) FROM enum_pk WHERE x IN ('0', '1'); +SELECT cityHash64(groupArraySorted(100)(d)) FROM enum_pk WHERE d IN ('0', '1'); + +SELECT cityHash64(groupArraySorted(100)(x)) FROM enum_pk WHERE (x != '0' AND x != '1'); +SELECT cityHash64(groupArraySorted(100)(d)) FROM enum_pk WHERE (d != '0' AND d != '1'); + +DROP TABLE enum_pk; diff --git a/parser/testdata/00386_has_column_in_table/ast.json b/parser/testdata/00386_has_column_in_table/ast.json new file mode 100644 index 000000000..8179d9863 --- /dev/null +++ b/parser/testdata/00386_has_column_in_table/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery has_column_in_table (children 1)" + }, + { + "explain": " Identifier has_column_in_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001365759, + "rows_read": 2, + "bytes_read": 90 + } +} diff --git a/parser/testdata/00386_has_column_in_table/metadata.json b/parser/testdata/00386_has_column_in_table/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00386_has_column_in_table/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00386_has_column_in_table/query.sql b/parser/testdata/00386_has_column_in_table/query.sql new file mode 100644 index 000000000..a54af18f8 --- /dev/null +++ b/parser/testdata/00386_has_column_in_table/query.sql @@ -0,0 +1,31 @@ +DROP TABLE IF EXISTS has_column_in_table; +CREATE TABLE has_column_in_table (i Int64, s String, nest Nested(x UInt8, y UInt32)) ENGINE = Memory; + +/* existing column */ +SELECT hasColumnInTable(currentDatabase(), 'has_column_in_table', 'i'); +SELECT hasColumnInTable('localhost', currentDatabase(), 'has_column_in_table', 'i'); +SELECT hasColumnInTable(currentDatabase(), 'has_column_in_table', 's'); +SELECT hasColumnInTable('localhost', currentDatabase(), 'has_column_in_table', 's'); +SELECT hasColumnInTable(currentDatabase(), 'has_column_in_table', 'nest.x'); +SELECT hasColumnInTable('localhost', currentDatabase(), 'has_column_in_table', 'nest.x'); +SELECT hasColumnInTable(currentDatabase(), 'has_column_in_table', 'nest.y'); +SELECT hasColumnInTable('localhost', currentDatabase(), 'has_column_in_table', 'nest.y'); + +/* not existing column */ +SELECT hasColumnInTable(currentDatabase(), 'has_column_in_table', 'nest'); +SELECT hasColumnInTable('localhost', currentDatabase(), 'has_column_in_table', 'nest'); +SELECT hasColumnInTable(currentDatabase(), 'has_column_in_table', 'nest.not_existing'); +SELECT hasColumnInTable('localhost', currentDatabase(), 'has_column_in_table', 'nest.not_existing'); +SELECT hasColumnInTable(currentDatabase(), 'has_column_in_table', 'not_existing'); +SELECT hasColumnInTable('localhost', currentDatabase(), 'has_column_in_table', 'not_existing'); +SELECT hasColumnInTable('system', 'one', ''); + +/* bad queries */ +SELECT hasColumnInTable('', '', ''); -- { serverError UNKNOWN_TABLE } +SELECT hasColumnInTable('', 't', 'c'); -- { serverError UNKNOWN_DATABASE } +SELECT hasColumnInTable(currentDatabase(), '', 'c'); -- { serverError UNKNOWN_TABLE } +SELECT hasColumnInTable('d', 't', 's'); -- { serverError UNKNOWN_DATABASE } +SELECT hasColumnInTable(currentDatabase(), 't', 's'); -- { serverError UNKNOWN_TABLE } + + +DROP TABLE has_column_in_table; diff --git a/parser/testdata/00388_enum_with_totals/ast.json b/parser/testdata/00388_enum_with_totals/ast.json new file mode 100644 index 000000000..503b46e05 --- /dev/null +++ b/parser/testdata/00388_enum_with_totals/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery enum_totals (children 1)" + }, + { + "explain": " Identifier enum_totals" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001416688, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/00388_enum_with_totals/metadata.json b/parser/testdata/00388_enum_with_totals/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00388_enum_with_totals/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00388_enum_with_totals/query.sql b/parser/testdata/00388_enum_with_totals/query.sql new file mode 100644 index 000000000..289174f13 --- /dev/null +++ b/parser/testdata/00388_enum_with_totals/query.sql @@ -0,0 +1,6 @@ +DROP TABLE IF EXISTS enum_totals; +CREATE TABLE enum_totals (e Enum8('hello' = 1, 'world' = 2)) ENGINE = Memory; +INSERT INTO enum_totals VALUES ('hello'), ('world'), ('world'); + +SELECT e, count() FROM enum_totals GROUP BY e WITH TOTALS ORDER BY e; +DROP TABLE enum_totals; diff --git a/parser/testdata/00389_concat_operator/ast.json b/parser/testdata/00389_concat_operator/ast.json new file mode 100644 index 000000000..30c1a10c0 --- /dev/null +++ b/parser/testdata/00389_concat_operator/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function concat (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal 'Hello'" + }, + { + "explain": " Literal ', '" + }, + { + "explain": " Literal 'World'" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001150154, + "rows_read": 9, + "bytes_read": 313 + } +} diff --git a/parser/testdata/00389_concat_operator/metadata.json b/parser/testdata/00389_concat_operator/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00389_concat_operator/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00389_concat_operator/query.sql b/parser/testdata/00389_concat_operator/query.sql new file mode 100644 index 000000000..c64e42963 --- /dev/null +++ b/parser/testdata/00389_concat_operator/query.sql @@ -0,0 +1 @@ +SELECT 'Hello' || ', ' || 'World'; diff --git a/parser/testdata/00390_array_sort/ast.json b/parser/testdata/00390_array_sort/ast.json new file mode 100644 index 000000000..ffb93aace --- /dev/null +++ b/parser/testdata/00390_array_sort/ast.json @@ -0,0 +1,88 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Literal Array_[UInt64_2, UInt64_1, UInt64_3] (alias arr)" + }, + { + "explain": " Function arraySort (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier arr" + }, + { + "explain": " Function arrayReverseSort (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier arr" + }, + { + "explain": " Function arraySort (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function lambda (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function negate (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Identifier arr" + } + ], + + "rows": 22, + + "statistics": + { + "elapsed": 0.001569257, + "rows_read": 22, + "bytes_read": 876 + } +} diff --git a/parser/testdata/00390_array_sort/metadata.json b/parser/testdata/00390_array_sort/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00390_array_sort/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00390_array_sort/query.sql b/parser/testdata/00390_array_sort/query.sql new file mode 100644 index 000000000..bead64ff5 --- /dev/null +++ b/parser/testdata/00390_array_sort/query.sql @@ -0,0 +1,54 @@ +SELECT [2, 1, 3] AS arr, arraySort(arr), arrayReverseSort(arr), arraySort(x -> -x, arr); +SELECT materialize([2, 1, 3]) AS arr, arraySort(arr), arrayReverseSort(arr), arraySort(x -> -x, arr); + +SELECT arrayMap(x -> toString(x), [2, 1, 3]) AS arr, arraySort(arr), arrayReverseSort(arr), arraySort(x -> reverse(x), arr); +SELECT arrayMap(x -> toString(x), materialize([2, 1, 3])) AS arr, arraySort(arr), arrayReverseSort(arr), arraySort(x -> reverse(x), arr); + +SELECT arrayMap(x -> range(x), [2, 1, 3]) AS arr, arraySort(arr), arrayReverseSort(arr), arraySort(x -> -length(x), arr); +SELECT arrayMap(x -> range(x), materialize([2, 1, 3])) AS arr, arraySort(arr), arrayReverseSort(arr), arraySort(x -> -length(x), arr); + +SELECT splitByChar('0', toString(intHash64(number))) AS arr, arraySort(arr) AS sorted, arraySort(x -> toUInt64OrZero(x), arr) AS sorted_nums FROM system.numbers LIMIT 10; + +SELECT arrayReverseSort(number % 2 ? emptyArrayUInt64() : range(number)) FROM system.numbers LIMIT 10; + +SELECT arraySort((x, y) -> y, ['hello', 'world'], [2, 1]); + +-- Using arrayResize to trim the unsorted bit of the array that is normally left in unspecified order +SELECT [9,4,8,10,5,2,3,7,1,6] AS arr, 4 AS lim, arrayResize(arrayPartialReverseSort(lim, arr), lim), arrayResize(arrayPartialSort(x -> -x, lim, arr), lim); +SELECT materialize([9,4,8,10,5,2,3,7,1,6]) AS arr, 4 AS lim, arrayResize(arrayPartialReverseSort(lim, arr), lim), arrayResize(arrayPartialSort(x -> -x, lim, arr), lim); + +SELECT arrayMap(x -> toString(x), [9,4,8,10,5,2,3,7,1,6]) AS arr, 4 AS lim, arrayResize(arrayPartialReverseSort(lim, arr), lim), arrayResize(arrayPartialSort(x -> reverse(x), lim, arr), lim); +SELECT arrayMap(x -> toString(x), materialize([9,4,8,10,5,2,3,7,1,6])) AS arr, 4 AS lim, arrayResize(arrayPartialReverseSort(lim, arr), lim), arrayResize(arrayPartialSort(x -> reverse(x), lim, arr), lim); + +SELECT arrayMap(x -> range(x), [4,1,2,3]) AS arr, 2 AS lim, arrayResize(arrayPartialSort(lim, arr), lim), arrayResize(arrayPartialReverseSort(lim, arr), lim), arrayResize(arrayPartialSort(x -> -length(x), lim, arr), lim); +SELECT arrayMap(x -> range(x), materialize([4,1,2,3])) AS arr, 2 AS lim, arrayResize(arrayPartialSort(lim, arr), lim), arrayResize(arrayPartialReverseSort(lim, arr), lim), arrayResize(arrayPartialSort(x -> -length(x), lim, arr), lim); + +SELECT splitByChar('0', toString(intHash64(number))) AS arr, 3 AS lim, arrayResize(arrayPartialSort(lim, arr), lim) AS sorted, arrayResize(arrayPartialSort(x -> toUInt64OrZero(x), lim, arr), lim) AS sorted_nums FROM system.numbers LIMIT 10; + +SELECT res FROM (SELECT arrayPartialReverseSort(2, number % 2 ? emptyArrayUInt64() : range(number)) AS arr, arrayResize(arr, if(empty(arr), 0, 2)) AS res FROM system.numbers LIMIT 10); + +SELECT arrayResize(arrayPartialSort((x, y) -> y, 3, ['directly','ignore','wrongly','relocate','upright'], [4,2,1,3,5]), 3); + +SELECT 2 as nelems, arrayResize(arrayPartialSort(nelems, [NULL,9,4,8,10,5,2,3,7,1,6]), nelems); +SELECT 2 as nelems, arrayResize(arrayPartialSort(nelems, [NULL,'9','4','8','10','5','2','3','7','1','6']), nelems); +SELECT 2 as nelems, arrayResize(arrayPartialReverseSort(nelems, [NULL,9,4,8,10,5,2,3,7,1,6]), nelems); +SELECT 2 as nelems, arrayResize(arrayPartialReverseSort(nelems, [NULL,'9','4','8','10','5','2','3','7','1','6']), nelems); + + +SELECT 4 as nelems, arrayResize(arrayPartialSort(nelems, [1, nan, 2, NULL, 3, nan, -4, NULL, inf, -inf]), nelems); +SELECT 4 as nelems, arrayResize(arrayPartialSort((x) -> -x, nelems, [1, nan, 2, NULL, 3, nan, -4, NULL, inf, -inf]), nelems); +SELECT 10 as nelems, arrayResize(arrayPartialSort(nelems, [1, nan, 2, NULL, 3, nan, -4, NULL, inf, -inf]), nelems); +SELECT 10 as nelems, arrayResize(arrayPartialSort(nelems, [1, nan, 2, NULL, 3, nan, -4, NULL, inf, -inf]), nelems); + +SELECT 3 as nelems, [[1,2],[-10,-20],[10,20],[0,0],[-1.5,1]] as arr, arrayResize(arrayPartialSort(nelems, arr), nelems), arrayResize(arrayPartialReverseSort(nelems, arr), nelems), arrayResize(arrayPartialSort((x) -> arraySum(x), nelems, arr), nelems); + +SELECT 0 as nelems, [NULL,9,4,8,10,5,2,3,7,1,6] AS arr, arrayPartialSort(nelems, arr), arrayPartialReverseSort(nelems, arr), arrayPartialSort((x) -> -x, nelems, arr); +SELECT 10 as nelems, [NULL,9,4,8,10,5,2,3,7,1,6] AS arr, arrayPartialSort(nelems, arr), arrayPartialReverseSort(nelems, arr), arrayPartialSort((x) -> -x, nelems, arr); + + +SELECT arrayPartialSort([1,2,3]); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT arrayPartialSort(2, [1,2,3], [1]); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT arrayPartialSort(2, [1,2,3], 3); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT arrayPartialSort(arraySort([1,2,3]), [1,2,3]); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT arrayMap(x -> range(x), [4, 1, 2, 3]) AS arr, 100 AS lim, arrayResize(arrayPartialSort(arrayPartialSort(lim, arr), arr), lim), arrayResize(arrayPartialReverseSort(lim, arr), lim), arrayResize(arrayPartialSort(x -> (-length(x)), lim, arr), lim); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT arrayPartialReverseSort(arraySort((x, y) -> y, [NULL, NULL], [NULL, NULL]), arr), arrayMap(x -> toString(x), [257, -9223372036854775807, 2, -2147483648, 2147483648, NULL, 65536, -2147483648, 2, 65535]) AS arr, NULL, 100 AS lim, 65536, arrayResize(arrayPartialSort(x -> reverse(x), lim, arr), lim) GROUP BY [NULL, 1023, -2, NULL, 255, '0', NULL, 9223372036854775806] WITH ROLLUP; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT, NO_COMMON_TYPE } diff --git a/parser/testdata/00392_enum_nested_alter/ast.json b/parser/testdata/00392_enum_nested_alter/ast.json new file mode 100644 index 000000000..a6ecfc147 --- /dev/null +++ b/parser/testdata/00392_enum_nested_alter/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery enum_nested_alter (children 1)" + }, + { + "explain": " Identifier enum_nested_alter" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001220713, + "rows_read": 2, + "bytes_read": 86 + } +} diff --git a/parser/testdata/00392_enum_nested_alter/metadata.json b/parser/testdata/00392_enum_nested_alter/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00392_enum_nested_alter/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00392_enum_nested_alter/query.sql b/parser/testdata/00392_enum_nested_alter/query.sql new file mode 100644 index 000000000..b5989885d --- /dev/null +++ b/parser/testdata/00392_enum_nested_alter/query.sql @@ -0,0 +1,69 @@ +DROP TABLE IF EXISTS enum_nested_alter; +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE enum_nested_alter +(d Date DEFAULT '2000-01-01', x UInt64, n Nested(a String, e Enum8('Hello' = 1), b UInt8)) +ENGINE = MergeTree(d, x, 1); + +INSERT INTO enum_nested_alter (x, n.e) VALUES (1, ['Hello']); +SELECT * FROM enum_nested_alter; + +ALTER TABLE enum_nested_alter MODIFY COLUMN n.e Array(Enum8('Hello' = 1, 'World' = 2)); +INSERT INTO enum_nested_alter (x, n.e) VALUES (2, ['World']); +SELECT * FROM enum_nested_alter ORDER BY x; + +ALTER TABLE enum_nested_alter MODIFY COLUMN n.e Array(Enum16('Hello' = 1, 'World' = 2, 'a' = 300)); +SELECT * FROM enum_nested_alter ORDER BY x; + +ALTER TABLE enum_nested_alter MODIFY COLUMN n.e Array(UInt16); +SELECT * FROM enum_nested_alter ORDER BY x; + +ALTER TABLE enum_nested_alter MODIFY COLUMN n.e Array(Enum16('Hello' = 1, 'World' = 2, 'a' = 300)); +SELECT * FROM enum_nested_alter ORDER BY x; + +ALTER TABLE enum_nested_alter MODIFY COLUMN n.e Array(String); +SELECT * FROM enum_nested_alter ORDER BY x; + +ALTER TABLE enum_nested_alter MODIFY COLUMN n.e Array(Enum16('Hello' = 1, 'World' = 2, 'a' = 300)); +SELECT * FROM enum_nested_alter ORDER BY x; + +DROP TABLE enum_nested_alter; + + +CREATE TABLE enum_nested_alter +( + d Date DEFAULT '2000-01-01', + x UInt64, + tasks Nested( + errcategory Enum8( + 'undefined' = 0, 'system' = 1, 'generic' = 2, 'asio.netdb' = 3, 'asio.misc' = 4, + 'asio.addrinfo' = 5, 'rtb.client' = 6, 'rtb.logic' = 7, 'http.status' = 8), + status Enum16('hello' = 1, 'world' = 2))) +ENGINE = MergeTree(d, x, 1); + +INSERT INTO enum_nested_alter (x, tasks.errcategory, tasks.status) VALUES (1, ['system', 'rtb.client'], ['hello', 'world']); +SELECT * FROM enum_nested_alter ORDER BY x; + +ALTER TABLE enum_nested_alter + MODIFY COLUMN tasks.errcategory Array(Enum8( + 'undefined' = 0, 'system' = 1, 'generic' = 2, 'asio.netdb' = 3, 'asio.misc' = 4, + 'asio.addrinfo' = 5, 'rtb.client' = 6, 'rtb.logic' = 7, 'http.status' = 8, 'http.code' = 9)), + MODIFY COLUMN tasks.status Array(Enum8('hello' = 1, 'world' = 2, 'goodbye' = 3)); + +INSERT INTO enum_nested_alter (x, tasks.errcategory, tasks.status) VALUES (2, ['http.status', 'http.code'], ['hello', 'goodbye']); +SELECT * FROM enum_nested_alter ORDER BY x; + +DROP TABLE enum_nested_alter; + + +DROP TABLE IF EXISTS enum_nested_alter; +CREATE TABLE enum_nested_alter +(d Date DEFAULT '2000-01-01', x UInt64, n Nested(a String, e Enum8('Hello.world' = 1), b UInt8)) +ENGINE = MergeTree(d, x, 1); + +INSERT INTO enum_nested_alter (x, n.e) VALUES (1, ['Hello.world']); +SELECT * FROM enum_nested_alter; + +ALTER TABLE enum_nested_alter MODIFY COLUMN n.e Array(Enum8('Hello.world' = 1, 'a' = 2)); +SELECT * FROM enum_nested_alter; + +DROP TABLE enum_nested_alter; diff --git a/parser/testdata/00393_if_with_constant_condition/ast.json b/parser/testdata/00393_if_with_constant_condition/ast.json new file mode 100644 index 000000000..1beccdf9c --- /dev/null +++ b/parser/testdata/00393_if_with_constant_condition/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function if (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_0" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001259016, + "rows_read": 9, + "bytes_read": 315 + } +} diff --git a/parser/testdata/00393_if_with_constant_condition/metadata.json b/parser/testdata/00393_if_with_constant_condition/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00393_if_with_constant_condition/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00393_if_with_constant_condition/query.sql b/parser/testdata/00393_if_with_constant_condition/query.sql new file mode 100644 index 000000000..c35e4ff24 --- /dev/null +++ b/parser/testdata/00393_if_with_constant_condition/query.sql @@ -0,0 +1,15 @@ +SELECT 1 ? 1 : 0; +SELECT 0 ? not_existing_column : 1 FROM system.numbers LIMIT 1; +SELECT 1 ? (0 ? not_existing_column : 2) : 0 FROM system.numbers LIMIT 1; + +/* scalar subquery optimization */ +SELECT (SELECT toUInt8(number + 1) FROM system.numbers LIMIT 1) ? 1 : 2 FROM system.numbers LIMIT 1; + +/* alias test */ +SELECT (1 as a) ? (2 as b) : (3 as c) as d, a, b, c, d FORMAT TSKV; +SELECT (0 as a) ? (2 as b) : (3 as c) as d, a, b, c, d FORMAT TSKV; + +SELECT (1 as a) ? (number + 2 as b) : (number + 3 as c) as d, a, b, c, d FROM system.numbers LIMIT 1 FORMAT TSKV; + +/* intergration test */ +SELECT (SELECT hasColumnInTable('system', 'numbers', 'not_existing')) ? not_existing : 42 as not_existing FROM system.numbers LIMIT 1 FORMAT TSKV; \ No newline at end of file diff --git a/parser/testdata/00394_new_nested_column_keeps_offsets/ast.json b/parser/testdata/00394_new_nested_column_keeps_offsets/ast.json new file mode 100644 index 000000000..5795f143b --- /dev/null +++ b/parser/testdata/00394_new_nested_column_keeps_offsets/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery alter_00394 (children 1)" + }, + { + "explain": " Identifier alter_00394" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001336067, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/00394_new_nested_column_keeps_offsets/metadata.json b/parser/testdata/00394_new_nested_column_keeps_offsets/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00394_new_nested_column_keeps_offsets/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00394_new_nested_column_keeps_offsets/query.sql b/parser/testdata/00394_new_nested_column_keeps_offsets/query.sql new file mode 100644 index 000000000..f058a852e --- /dev/null +++ b/parser/testdata/00394_new_nested_column_keeps_offsets/query.sql @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS alter_00394; +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE alter_00394 (d Date, k UInt64, i32 Int32, n Nested(ui8 UInt8, s String)) ENGINE=MergeTree(d, k, 8192); + +INSERT INTO alter_00394 VALUES ('2015-01-01', 3, 30, [1,2,3], ['1','12','123']); +INSERT INTO alter_00394 VALUES ('2015-01-01', 2, 20, [1,2], ['1','12']); +INSERT INTO alter_00394 VALUES ('2015-01-01', 1, 10, [1], ['1']); + +ALTER TABLE alter_00394 ADD COLUMN `n.i8` Array(Int8) AFTER i32; + +SELECT `n.i8`, `n.ui8`, `n.s` FROM alter_00394 ORDER BY k; +SELECT `n.i8` FROM alter_00394 ORDER BY k; + +OPTIMIZE TABLE alter_00394; + +SELECT `n.i8` FROM alter_00394 ORDER BY k; + +DROP TABLE IF EXISTS alter_00394; diff --git a/parser/testdata/00394_replaceall_vector_fixed/ast.json b/parser/testdata/00394_replaceall_vector_fixed/ast.json new file mode 100644 index 000000000..b48d4a92d --- /dev/null +++ b/parser/testdata/00394_replaceall_vector_fixed/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery replaceall (children 1)" + }, + { + "explain": " Identifier replaceall" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001142965, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/00394_replaceall_vector_fixed/metadata.json b/parser/testdata/00394_replaceall_vector_fixed/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00394_replaceall_vector_fixed/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00394_replaceall_vector_fixed/query.sql b/parser/testdata/00394_replaceall_vector_fixed/query.sql new file mode 100644 index 000000000..ada48add4 --- /dev/null +++ b/parser/testdata/00394_replaceall_vector_fixed/query.sql @@ -0,0 +1,25 @@ +DROP TABLE IF EXISTS replaceall; +CREATE TABLE replaceall (str FixedString(3)) ENGINE = Memory; + +INSERT INTO replaceall VALUES ('foo'); +INSERT INTO replaceall VALUES ('boa'); +INSERT INTO replaceall VALUES ('bar'); +INSERT INTO replaceall VALUES ('bao'); + +SELECT + str, + replaceAll(str, 'o', '*') AS replaced +FROM replaceall +ORDER BY str ASC; + +DROP TABLE replaceall; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE replaceall (date Date DEFAULT today(), fs FixedString(16)) ENGINE = MergeTree(date, (date, fs), 8192); +INSERT INTO replaceall (fs) VALUES ('54db0d43009d\0\0\0\0'), ('fe2b58224766cf10'), ('54db0d43009d\0\0\0\0'), ('fe2b58224766cf10'); + +SELECT fs, replaceAll(fs, '\0', '*') +FROM replaceall +ORDER BY fs ASC; + +DROP TABLE replaceall; diff --git a/parser/testdata/00395_nullable/ast.json b/parser/testdata/00395_nullable/ast.json new file mode 100644 index 000000000..fd807c9eb --- /dev/null +++ b/parser/testdata/00395_nullable/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '----- NULL value -----'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001067926, + "rows_read": 5, + "bytes_read": 193 + } +} diff --git a/parser/testdata/00395_nullable/metadata.json b/parser/testdata/00395_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00395_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00395_nullable/query.sql b/parser/testdata/00395_nullable/query.sql new file mode 100644 index 000000000..a55c80e24 --- /dev/null +++ b/parser/testdata/00395_nullable/query.sql @@ -0,0 +1,501 @@ +SELECT '----- NULL value -----'; + +SELECT NULL; +SELECT 1 + NULL; +SELECT abs(NULL); +SELECT NULL + NULL; + +SELECT '----- MergeTree engine -----'; + +DROP TABLE IF EXISTS test1_00395; +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE test1_00395( +col1 UInt64, col2 Nullable(UInt64), +col3 String, col4 Nullable(String), +col5 Array(UInt64), col6 Array(Nullable(UInt64)), +col7 Array(String), col8 Array(Nullable(String)), +d Date) Engine = MergeTree(d, (col1, d), 8192); + +INSERT INTO test1_00395 VALUES (1, 1, 'a', 'a', [1], [1], ['a'], ['a'], '2000-01-01'), + (1, NULL, 'a', 'a', [1], [1], ['a'], ['a'], '2000-01-01'), + (1, 1, 'a', NULL, [1], [1], ['a'], ['a'], '2000-01-01'), + (1, 1, 'a', 'a', [1], [NULL], ['a'], ['a'], '2000-01-01'), + (1, 1, 'a', 'a', [1], [1], ['a'], [NULL], '2000-01-01'); +SELECT * FROM test1_00395 ORDER BY col1,col2,col3,col4,col5,col6,col7,col8 ASC; + + +SELECT '----- Memory engine -----'; + +DROP TABLE IF EXISTS test1_00395; +CREATE TABLE test1_00395( +col1 UInt64, col2 Nullable(UInt64), +col3 String, col4 Nullable(String), +col5 Array(UInt64), col6 Array(Nullable(UInt64)), +col7 Array(String), col8 Array(Nullable(String)), +d Date) Engine = Memory; + +INSERT INTO test1_00395 VALUES (1, 1, 'a', 'a', [1], [1], ['a'], ['a'], '2000-01-01'), + (1, NULL, 'a', 'a', [1], [1], ['a'], ['a'], '2000-01-01'), + (1, 1, 'a', NULL, [1], [1], ['a'], ['a'], '2000-01-01'), + (1, 1, 'a', 'a', [1], [NULL], ['a'], ['a'], '2000-01-01'), + (1, 1, 'a', 'a', [1], [1], ['a'], [NULL], '2000-01-01'); +SELECT * FROM test1_00395 ORDER BY col1,col2,col3,col4,col5,col6,col7,col8 ASC; + +SELECT '----- TinyLog engine -----'; + +DROP TABLE IF EXISTS test1_00395; +CREATE TABLE test1_00395( +col1 UInt64, col2 Nullable(UInt64), +col3 String, col4 Nullable(String), +col5 Array(UInt64), col6 Array(Nullable(UInt64)), +col7 Array(String), col8 Array(Nullable(String)), +d Date) Engine = TinyLog; + +INSERT INTO test1_00395 VALUES (1, 1, 'a', 'a', [1], [1], ['a'], ['a'], '2000-01-01'), + (1, NULL, 'a', 'a', [1], [1], ['a'], ['a'], '2000-01-01'), + (1, 1, 'a', NULL, [1], [1], ['a'], ['a'], '2000-01-01'), + (1, 1, 'a', 'a', [1], [NULL], ['a'], ['a'], '2000-01-01'), + (1, 1, 'a', 'a', [1], [1], ['a'], [NULL], '2000-01-01'); +SELECT * FROM test1_00395 ORDER BY col1,col2,col3,col4,col5,col6,col7,col8 ASC; + +SELECT '----- Log engine -----'; + +DROP TABLE IF EXISTS test1_00395; +CREATE TABLE test1_00395( +col1 UInt64, col2 Nullable(UInt64), +col3 String, col4 Nullable(String), +col5 Array(UInt64), col6 Array(Nullable(UInt64)), +col7 Array(String), col8 Array(Nullable(String)), +d Date) Engine = Log; + +INSERT INTO test1_00395 VALUES (1, 1, 'a', 'a', [1], [1], ['a'], ['a'], '2000-01-01'), + (1, NULL, 'a', 'a', [1], [1], ['a'], ['a'], '2000-01-01'), + (1, 1, 'a', NULL, [1], [1], ['a'], ['a'], '2000-01-01'), + (1, 1, 'a', 'a', [1], [NULL], ['a'], ['a'], '2000-01-01'), + (1, 1, 'a', 'a', [1], [1], ['a'], [NULL], '2000-01-01'); +SELECT * FROM test1_00395 ORDER BY col1,col2,col3,col4,col5,col6,col7,col8 ASC; + +SELECT '----- StripeLog engine -----'; + +DROP TABLE IF EXISTS test1_00395; +CREATE TABLE test1_00395( +col1 UInt64, col2 Nullable(UInt64), +col3 String, col4 Nullable(String), +col5 Array(UInt64), col6 Array(Nullable(UInt64)), +col7 Array(String), col8 Array(Nullable(String)), +d Date) Engine = StripeLog; + +INSERT INTO test1_00395 VALUES (1, 1, 'a', 'a', [1], [1], ['a'], ['a'], '2000-01-01'), + (1, NULL, 'a', 'a', [1], [1], ['a'], ['a'], '2000-01-01'), + (1, 1, 'a', NULL, [1], [1], ['a'], ['a'], '2000-01-01'), + (1, 1, 'a', 'a', [1], [NULL], ['a'], ['a'], '2000-01-01'), + (1, 1, 'a', 'a', [1], [1], ['a'], [NULL], '2000-01-01'); +SELECT * FROM test1_00395 ORDER BY col1,col2,col3,col4,col5,col6,col7,col8 ASC; + + +SELECT '----- Insert with expression -----'; + +DROP TABLE IF EXISTS test1_00395; +CREATE TABLE test1_00395(col1 Array(Nullable(UInt64))) Engine=Memory; +INSERT INTO test1_00395(col1) VALUES ([1+1]); +SELECT col1 FROM test1_00395 ORDER BY col1 ASC; + +SELECT '----- Insert. Source and target columns have same types up to nullability. -----'; +DROP TABLE IF EXISTS test1_00395; +CREATE TABLE test1_00395(col1 Nullable(UInt64), col2 UInt64) Engine=Memory; +DROP TABLE IF EXISTS test2; +CREATE TABLE test2(col1 UInt64, col2 Nullable(UInt64)) Engine=Memory; +INSERT INTO test1_00395(col1,col2) VALUES (2,7)(6,9)(5,1)(4,3)(8,2); +INSERT INTO test2(col1,col2) SELECT col1,col2 FROM test1_00395; +SELECT col1,col2 FROM test2 ORDER BY col1,col2 ASC; + +SELECT '----- Apply functions and aggregate functions on columns that may contain null values -----'; + +DROP TABLE IF EXISTS test1_00395; +CREATE TABLE test1_00395(col1 Nullable(UInt64), col2 Nullable(UInt64)) Engine=Memory; +INSERT INTO test1_00395(col1,col2) VALUES (2,7)(NULL,6)(9,NULL)(NULL,NULL)(5,1)(42,42); +SELECT col1, col2, col1 + col2, col1 * 7 FROM test1_00395 ORDER BY col1,col2 ASC; +SELECT sum(col1) FROM test1_00395; +SELECT sum(col1 * 7) FROM test1_00395; + +SELECT '----- isNull, isNotNull -----'; + +SELECT col1, col2, isNull(col1), isNotNull(col2) FROM test1_00395 ORDER BY col1,col2 ASC; + +SELECT '----- ifNull, nullIf -----'; + +SELECT col1, col2, ifNull(col1,col2) FROM test1_00395 ORDER BY col1,col2 ASC; +SELECT col1, col2, nullIf(col1,col2) FROM test1_00395 ORDER BY col1,col2 ASC; +SELECT nullIf(1, NULL); + +SELECT '----- coalesce -----'; + +SELECT coalesce(NULL); +SELECT coalesce(NULL, 1); +SELECT coalesce(NULL, NULL, 1); +SELECT coalesce(NULL, 42, NULL, 1); +SELECT coalesce(NULL, NULL, NULL); +SELECT col1, col2, coalesce(col1, col2) FROM test1_00395 ORDER BY col1, col2 ASC; +SELECT col1, col2, coalesce(col1, col2, 99) FROM test1_00395 ORDER BY col1, col2 ASC; + +SELECT '----- assumeNotNull -----'; + +SELECT res FROM (SELECT col1, assumeNotNull(col1) AS res FROM test1_00395) WHERE col1 IS NOT NULL ORDER BY res ASC; + +SELECT '----- IS NULL, IS NOT NULL -----'; + +SELECT col1 FROM test1_00395 WHERE col1 IS NOT NULL ORDER BY col1 ASC; +SELECT col1 FROM test1_00395 WHERE col1 IS NULL; + +SELECT '----- if -----'; + +DROP TABLE IF EXISTS test1_00395; +CREATE TABLE test1_00395 (col1 Nullable(String)) ENGINE=TinyLog; +INSERT INTO test1_00395 VALUES ('a'), ('b'), ('c'), (NULL); + +SELECT col1, if(col1 IN ('a' ,'b'), 1, 0) AS t, toTypeName(t) FROM test1_00395; +SELECT col1, if(col1 IN ('a' ,'b'), NULL, 0) AS t, toTypeName(t) FROM test1_00395; + +SELECT '----- case when -----'; + +SELECT col1, CASE WHEN col1 IN ('a' ,'b') THEN 1 ELSE 0 END AS t, toTypeName(t) FROM test1_00395; +SELECT col1, CASE WHEN col1 IN ('a' ,'b') THEN NULL ELSE 0 END AS t, toTypeName(t) FROM test1_00395; +SELECT col1, CASE WHEN col1 IN ('a' ,'b') THEN 1 END AS t, toTypeName(t) FROM test1_00395; + +SELECT '----- multiIf -----'; + +SELECT multiIf(1, NULL, 1, 3, 4); +SELECT multiIf(1, 2, 1, NULL, 4); +SELECT multiIf(NULL, NULL, NULL); + +SELECT multiIf(1, 'A', 1, NULL, 'DEF'); +SELECT multiIf(1, toFixedString('A', 16), 1, NULL, toFixedString('DEF', 16)); + +SELECT multiIf(NULL, 2, 1, 3, 4); +SELECT multiIf(1, 2, NULL, 3, 4); + +DROP TABLE IF EXISTS test1_00395; +CREATE TABLE test1_00395(col1 Nullable(Int8), col2 Nullable(UInt16), col3 Nullable(Float32)) Engine=TinyLog; +INSERT INTO test1_00395(col1,col2,col3) VALUES (toInt8(1),toUInt16(2),toFloat32(3))(NULL,toUInt16(1),toFloat32(2))(toInt8(1),NULL,toFloat32(2))(toInt8(1),toUInt16(2),NULL); +SELECT multiIf(col1 == 1, col2, col2 == 2, col3, col3 == 3, col1, 42) FROM test1_00395; + +DROP TABLE IF EXISTS test1_00395; +CREATE TABLE test1_00395(cond1 Nullable(UInt8), then1 Int8, cond2 UInt8, then2 Nullable(UInt16), then3 Nullable(Float32)) Engine=TinyLog; +INSERT INTO test1_00395(cond1,then1,cond2,then2,then3) VALUES(1,1,1,42,99)(0,7,1,99,42)(NULL,6,2,99,NULL); +SELECT multiIf(cond1,then1,cond2,then2,then3) FROM test1_00395; + +SELECT '----- Array functions -----'; + +SELECT [NULL]; +SELECT [NULL,NULL,NULL]; +SELECT [NULL,2,3]; +SELECT [1,NULL,3]; +SELECT [1,2,NULL]; + +SELECT [NULL,'b','c']; +SELECT ['a',NULL,'c']; +SELECT ['a','b',NULL]; + +SELECT '----- arrayElement -----'; + +SELECT '----- constant arrays -----'; + +SELECT arrayElement([1,NULL,2,3], 1); +SELECT arrayElement([1,NULL,2,3], 2); +SELECT arrayElement([1,NULL,2,3], 3); +SELECT arrayElement([1,NULL,2,3], 4); + +SELECT arrayElement(['a',NULL,'c','d'], 1); +SELECT arrayElement(['a',NULL,'c','d'], 2); +SELECT arrayElement(['a',NULL,'c','d'], 3); +SELECT arrayElement(['a',NULL,'c','d'], 4); + +DROP TABLE IF EXISTS test1_00395; +CREATE TABLE test1_00395(col1 UInt64) Engine=TinyLog; +INSERT INTO test1_00395(col1) VALUES(1),(2),(3),(4); + +SELECT arrayElement([1,NULL,2,3], col1) FROM test1_00395; + +SELECT '----- variable arrays -----'; + +DROP TABLE IF EXISTS test1_00395; +CREATE TABLE test1_00395(col1 Array(Nullable(UInt64))) Engine=TinyLog; +INSERT INTO test1_00395(col1) VALUES([2,3,7,NULL]), + ([NULL,3,7,4]), + ([2,NULL,7,NULL]), + ([2,3,NULL,4]), + ([NULL,NULL,NULL,NULL]); + +SELECT arrayElement(col1, 1) FROM test1_00395; +SELECT arrayElement(col1, 2) FROM test1_00395; +SELECT arrayElement(col1, 3) FROM test1_00395; +SELECT arrayElement(col1, 4) FROM test1_00395; + +DROP TABLE IF EXISTS test1_00395; +CREATE TABLE test1_00395(col1 Array(Nullable(String))) Engine=TinyLog; +INSERT INTO test1_00395(col1) VALUES(['a','bc','def',NULL]), + ([NULL,'bc','def','ghij']), + (['a',NULL,'def',NULL]), + (['a','bc',NULL,'ghij']), + ([NULL,NULL,NULL,NULL]); + +SELECT arrayElement(col1, 1) FROM test1_00395; +SELECT arrayElement(col1, 2) FROM test1_00395; +SELECT arrayElement(col1, 3) FROM test1_00395; +SELECT arrayElement(col1, 4) FROM test1_00395; + +DROP TABLE IF EXISTS test1_00395; +CREATE TABLE test1_00395(col1 Array(Nullable(UInt64)), col2 UInt64) Engine=TinyLog; +INSERT INTO test1_00395(col1,col2) VALUES([2,3,7,NULL], 1), + ([NULL,3,7,4], 2), + ([2,NULL,7,NULL], 3), + ([2,3,NULL,4],4), + ([NULL,NULL,NULL,NULL],3); + +SELECT arrayElement(col1,col2) FROM test1_00395; + +DROP TABLE IF EXISTS test1_00395; +CREATE TABLE test1_00395(col1 Array(Nullable(String)), col2 UInt64) Engine=TinyLog; +INSERT INTO test1_00395(col1,col2) VALUES(['a','bc','def',NULL], 1), + ([NULL,'bc','def','ghij'], 2), + (['a',NULL,'def','ghij'], 3), + (['a','bc',NULL,'ghij'],4), + ([NULL,NULL,NULL,NULL],3); + +SELECT arrayElement(col1,col2) FROM test1_00395; + +SELECT '----- has -----'; + +SELECT '----- constant arrays -----'; + +SELECT has([1,NULL,2,3], 1); +SELECT has([1,NULL,2,3], NULL); +SELECT has([1,NULL,2,3], 2); +SELECT has([1,NULL,2,3], 3); +SELECT has([1,NULL,2,3], 4); + +SELECT has(['a',NULL,'def','ghij'], 'a'); +SELECT has(['a',NULL,'def','ghij'], NULL); +SELECT has(['a',NULL,'def','ghij'], 'def'); +SELECT has(['a',NULL,'def','ghij'], 'ghij'); + +DROP TABLE IF EXISTS test1_00395; +CREATE TABLE test1_00395(col1 UInt64) Engine=TinyLog; +INSERT INTO test1_00395(col1) VALUES(1),(2),(3),(4); + +SELECT has([1,NULL,2,3], col1) FROM test1_00395; + +DROP TABLE IF EXISTS test1_00395; +CREATE TABLE test1_00395(col1 Nullable(UInt64)) Engine=TinyLog; +INSERT INTO test1_00395(col1) VALUES(1),(2),(3),(4),(NULL); + +SELECT has([1,NULL,2,3], col1) FROM test1_00395; + +DROP TABLE IF EXISTS test1_00395; +CREATE TABLE test1_00395(col1 String) Engine=TinyLog; +INSERT INTO test1_00395(col1) VALUES('a'),('bc'),('def'),('ghij'); + +SELECT has(['a',NULL,'def','ghij'], col1) FROM test1_00395; + +DROP TABLE IF EXISTS test1_00395; +CREATE TABLE test1_00395(col1 Nullable(String)) Engine=TinyLog; +INSERT INTO test1_00395(col1) VALUES('a'),('bc'),('def'),('ghij'),(NULL); + +SELECT has(['a',NULL,'def','ghij'], col1) FROM test1_00395; + +SELECT '----- variable arrays -----'; + +DROP TABLE IF EXISTS test1_00395; +CREATE TABLE test1_00395(col1 Array(Nullable(UInt64))) Engine=TinyLog; +INSERT INTO test1_00395(col1) VALUES([2,3,7,NULL]), + ([NULL,3,7,4]), + ([2,NULL,7,NULL]), + ([2,3,NULL,4]), + ([NULL,NULL,NULL,NULL]); + +SELECT has(col1, 2) FROM test1_00395; +SELECT has(col1, 3) FROM test1_00395; +SELECT has(col1, 4) FROM test1_00395; +SELECT has(col1, 5) FROM test1_00395; +SELECT has(col1, 7) FROM test1_00395; +SELECT has(col1, NULL) FROM test1_00395; + +DROP TABLE IF EXISTS test1_00395; +CREATE TABLE test1_00395(col1 Array(Nullable(String))) Engine=TinyLog; +INSERT INTO test1_00395(col1) VALUES(['a','bc','def',NULL]), + ([NULL,'bc','def','ghij']), + (['a',NULL,'def',NULL]), + (['a','bc',NULL,'ghij']), + ([NULL,NULL,NULL,NULL]); + +SELECT has(col1, 'a') FROM test1_00395; +SELECT has(col1, 'bc') FROM test1_00395; +SELECT has(col1, 'def') FROM test1_00395; +SELECT has(col1, 'ghij') FROM test1_00395; +SELECT has(col1, NULL) FROM test1_00395; + +DROP TABLE IF EXISTS test1_00395; +CREATE TABLE test1_00395(col1 Array(Nullable(UInt64)), col2 UInt64) Engine=TinyLog; +INSERT INTO test1_00395(col1,col2) VALUES([2,3,7,NULL], 2), + ([NULL,3,7,4], 3), + ([2,NULL,7,NULL], 7), + ([2,3,NULL,4],5); + +SELECT has(col1,col2) FROM test1_00395; + +DROP TABLE IF EXISTS test1_00395; +CREATE TABLE test1_00395(col1 Array(Nullable(UInt64)), col2 Nullable(UInt64)) Engine=TinyLog; +INSERT INTO test1_00395(col1,col2) VALUES([2,3,7,NULL], 2), + ([NULL,3,7,4], 3), + ([2,NULL,7,NULL], 7), + ([2,3,NULL,4],5), + ([NULL,NULL,NULL,NULL],NULL); + +SELECT has(col1,col2) FROM test1_00395; + +DROP TABLE IF EXISTS test1_00395; +CREATE TABLE test1_00395(col1 Array(Nullable(String)), col2 String) Engine=TinyLog; +INSERT INTO test1_00395(col1,col2) VALUES(['a','bc','def',NULL], 'a'), + ([NULL,'bc','def','ghij'], 'bc'), + (['a',NULL,'def','ghij'], 'def'), + (['a','bc',NULL,'ghij'], 'ghij'); + +SELECT has(col1,col2) FROM test1_00395; + +DROP TABLE IF EXISTS test1_00395; +CREATE TABLE test1_00395(col1 Array(Nullable(String)), col2 Nullable(String)) Engine=TinyLog; +INSERT INTO test1_00395(col1,col2) VALUES(['a','bc','def',NULL], 'a'), + ([NULL,'bc','def','ghij'], 'bc'), + (['a',NULL,'def','ghij'], 'def'), + (['a','bc',NULL,'ghij'], 'ghij'), + ([NULL,NULL,NULL,NULL], NULL); + +SELECT has(col1,col2) FROM test1_00395; + +SELECT '----- Aggregation -----'; + +DROP TABLE IF EXISTS test1_00395; +CREATE TABLE test1_00395(col1 Nullable(String), col2 Nullable(UInt8), col3 String) ENGINE=TinyLog; +INSERT INTO test1_00395(col1,col2,col3) VALUES('A', 0, 'ABCDEFGH'), + ('A', 0, 'BACDEFGH'), + ('A', 1, 'BCADEFGH'), + ('A', 1, 'BCDAEFGH'), + ('B', 1, 'BCDEAFGH'), + ('B', 1, 'BCDEFAGH'), + ('B', 1, 'BCDEFGAH'), + ('B', 1, 'BCDEFGHA'), + ('C', 1, 'ACBDEFGH'), + ('C', NULL, 'ACDBEFGH'), + ('C', NULL, 'ACDEBFGH'), + ('C', NULL, 'ACDEFBGH'), + (NULL, 1, 'ACDEFGBH'), + (NULL, NULL, 'ACDEFGHB'); + +SELECT col1, col2, count() FROM test1_00395 GROUP BY col1, col2 ORDER BY col1, col2; + +DROP TABLE IF EXISTS test1_00395; +CREATE TABLE test1_00395(col1 String, col2 Nullable(UInt8), col3 String) ENGINE=TinyLog; +INSERT INTO test1_00395(col1,col2,col3) VALUES('A', 0, 'ABCDEFGH'), + ('A', 0, 'BACDEFGH'), + ('A', 1, 'BCADEFGH'), + ('A', 1, 'BCDAEFGH'), + ('B', 1, 'BCDEAFGH'), + ('B', 1, 'BCDEFAGH'), + ('B', 1, 'BCDEFGAH'), + ('B', 1, 'BCDEFGHA'), + ('C', 1, 'ACBDEFGH'), + ('C', NULL, 'ACDBEFGH'), + ('C', NULL, 'ACDEBFGH'), + ('C', NULL, 'ACDEFBGH'); + +SELECT col1, col2, count() FROM test1_00395 GROUP BY col1, col2 ORDER BY col1, col2; + +DROP TABLE IF EXISTS test1_00395; +CREATE TABLE test1_00395(col1 Nullable(String), col2 String) ENGINE=TinyLog; +INSERT INTO test1_00395(col1,col2) VALUES('A', 'ABCDEFGH'), + ('A', 'BACDEFGH'), + ('A', 'BCADEFGH'), + ('A', 'BCDAEFGH'), + ('B', 'BCDEAFGH'), + ('B', 'BCDEFAGH'), + ('B', 'BCDEFGAH'), + ('B', 'BCDEFGHA'), + ('C', 'ACBDEFGH'), + ('C', 'ACDBEFGH'), + ('C', 'ACDEBFGH'), + ('C', 'ACDEFBGH'), + (NULL, 'ACDEFGBH'), + (NULL, 'ACDEFGHB'); + +SELECT col1, count() FROM test1_00395 GROUP BY col1 ORDER BY col1; + +DROP TABLE IF EXISTS test1_00395; +CREATE TABLE test1_00395(col1 Nullable(UInt8), col2 String) ENGINE=TinyLog; +INSERT INTO test1_00395(col1,col2) VALUES(0, 'ABCDEFGH'), + (0, 'BACDEFGH'), + (1, 'BCADEFGH'), + (1, 'BCDAEFGH'), + (1, 'BCDEAFGH'), + (1, 'BCDEFAGH'), + (1, 'BCDEFGAH'), + (1, 'BCDEFGHA'), + (1, 'ACBDEFGH'), + (NULL, 'ACDBEFGH'), + (NULL, 'ACDEBFGH'), + (NULL, 'ACDEFBGH'); + +SELECT col1, count() FROM test1_00395 GROUP BY col1 ORDER BY col1; + +DROP TABLE IF EXISTS test1_00395; +CREATE TABLE test1_00395(col1 Nullable(UInt64), col2 UInt64, col3 String) ENGINE=TinyLog; +INSERT INTO test1_00395(col1,col2,col3) VALUES(0, 2, 'ABCDEFGH'), + (0, 3, 'BACDEFGH'), + (1, 5, 'BCADEFGH'), + (1, 2, 'BCDAEFGH'), + (1, 3, 'BCDEAFGH'), + (1, 5, 'BCDEFAGH'), + (1, 2, 'BCDEFGAH'), + (1, 3, 'BCDEFGHA'), + (1, 5, 'ACBDEFGH'), + (NULL, 2, 'ACDBEFGH'), + (NULL, 3, 'ACDEBFGH'), + (NULL, 3, 'ACDEFBGH'); + +SELECT col1, col2, count() FROM test1_00395 GROUP BY col1, col2 ORDER BY col1, col2; + +DROP TABLE IF EXISTS test1_00395; +CREATE TABLE test1_00395(col1 Nullable(UInt64), col2 UInt64, col3 Nullable(UInt64), col4 String) ENGINE=TinyLog; +INSERT INTO test1_00395(col1,col2,col3,col4) VALUES(0, 2, 1, 'ABCDEFGH'), + (0, 3, NULL, 'BACDEFGH'), + (1, 5, 1, 'BCADEFGH'), + (1, 2, NULL, 'BCDAEFGH'), + (1, 3, 1, 'BCDEAFGH'), + (1, 5, NULL, 'BCDEFAGH'), + (1, 2, 1, 'BCDEFGAH'), + (1, 3, NULL, 'BCDEFGHA'), + (1, 5, 1, 'ACBDEFGH'), + (NULL, 2, NULL, 'ACDBEFGH'), + (NULL, 3, 1, 'ACDEBFGH'), + (NULL, 3, NULL, 'ACDEFBGH'); + +SELECT col1, col2, col3, count() FROM test1_00395 GROUP BY col1, col2, col3 ORDER BY col1, col2, col3; + +DROP TABLE IF EXISTS test1_00395; +CREATE TABLE test1_00395(col1 Array(Nullable(UInt8)), col2 String) ENGINE=TinyLog; +INSERT INTO test1_00395(col1,col2) VALUES([0], 'ABCDEFGH'), + ([0], 'BACDEFGH'), + ([1], 'BCADEFGH'), + ([1], 'BCDAEFGH'), + ([1], 'BCDEAFGH'), + ([1], 'BCDEFAGH'), + ([1], 'BCDEFGAH'), + ([1], 'BCDEFGHA'), + ([1], 'ACBDEFGH'), + ([NULL], 'ACDBEFGH'), + ([NULL], 'ACDEBFGH'), + ([NULL], 'ACDEFBGH'); + +SELECT col1, count() FROM test1_00395 GROUP BY col1 ORDER BY col1; + +DROP TABLE IF EXISTS test1_00395; +DROP TABLE test2; diff --git a/parser/testdata/00396_uuid/ast.json b/parser/testdata/00396_uuid/ast.json new file mode 100644 index 000000000..0db659ebb --- /dev/null +++ b/parser/testdata/00396_uuid/ast.json @@ -0,0 +1,100 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function UUIDNumToString (alias uuid_string) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toFixedString (alias uuid_binary) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function unhex (alias bytes) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '0123456789ABCDEF0123456789ABCDEF' (alias hex)" + }, + { + "explain": " Literal UInt64_16" + }, + { + "explain": " Function equals (alias test1) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function hex (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function UUIDStringToNum (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier uuid_string" + }, + { + "explain": " Identifier hex" + }, + { + "explain": " Function equals (alias test2) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function UUIDStringToNum (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier uuid_string" + }, + { + "explain": " Identifier bytes" + } + ], + + "rows": 26, + + "statistics": + { + "elapsed": 0.001275309, + "rows_read": 26, + "bytes_read": 1170 + } +} diff --git a/parser/testdata/00396_uuid/metadata.json b/parser/testdata/00396_uuid/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00396_uuid/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00396_uuid/query.sql b/parser/testdata/00396_uuid/query.sql new file mode 100644 index 000000000..4ad659e24 --- /dev/null +++ b/parser/testdata/00396_uuid/query.sql @@ -0,0 +1,19 @@ +SELECT UUIDNumToString(toFixedString(unhex('0123456789ABCDEF0123456789ABCDEF' AS hex) AS bytes, 16) AS uuid_binary) AS uuid_string, hex(UUIDStringToNum(uuid_string)) = hex AS test1, UUIDStringToNum(uuid_string) = bytes AS test2; +SELECT UUIDNumToString(toFixedString(unhex(materialize('0123456789ABCDEF0123456789ABCDEF') AS hex) AS bytes, 16) AS uuid_binary) AS uuid_string, hex(UUIDStringToNum(uuid_string)) = hex AS test1, UUIDStringToNum(uuid_string) = bytes AS test2; +SELECT hex(UUIDStringToNum('01234567-89ab-cdef-0123-456789abcdef')); +SELECT hex(UUIDStringToNum(materialize('01234567-89ab-cdef-0123-456789abcdef'))); +SELECT '01234567-89ab-cdef-0123-456789abcdef' AS str, UUIDNumToString(UUIDStringToNum(str)), UUIDNumToString(UUIDStringToNum(toFixedString(str, 36))); +SELECT materialize('01234567-89ab-cdef-0123-456789abcdef') AS str, UUIDNumToString(UUIDStringToNum(str)), UUIDNumToString(UUIDStringToNum(toFixedString(str, 36))); +SELECT toString(toUUID('3f1ed72e-f7fe-4459-9cbe-95fe9298f845')); + +-- conversion back and forth to big-endian hex string +with generateUUIDv4() as uuid, + identity(lower(hex(reverse(reinterpretAsString(uuid))))) as str, + reinterpretAsUUID(reverse(unhex(str))) as uuid2 +select uuid = uuid2; + +select '-- UUID variants --'; +select hex(UUIDStringToNum('00112233-4455-6677-8899-aabbccddeeff', 1)); +select hex(UUIDStringToNum('00112233-4455-6677-8899-aabbccddeeff', 2)); +select UUIDNumToString(UUIDStringToNum('00112233-4455-6677-8899-aabbccddeeff', 1), 1); +select UUIDNumToString(UUIDStringToNum('00112233-4455-6677-8899-aabbccddeeff', 2), 2); diff --git a/parser/testdata/00396_uuid_v7/ast.json b/parser/testdata/00396_uuid_v7/ast.json new file mode 100644 index 000000000..764260012 --- /dev/null +++ b/parser/testdata/00396_uuid_v7/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '-- UUIDToNum --'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001365315, + "rows_read": 5, + "bytes_read": 186 + } +} diff --git a/parser/testdata/00396_uuid_v7/metadata.json b/parser/testdata/00396_uuid_v7/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00396_uuid_v7/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00396_uuid_v7/query.sql b/parser/testdata/00396_uuid_v7/query.sql new file mode 100644 index 000000000..5be5a72a3 --- /dev/null +++ b/parser/testdata/00396_uuid_v7/query.sql @@ -0,0 +1,19 @@ +SELECT '-- UUIDToNum --'; +SELECT UUIDToNum(toUUID('00112233-4455-6677-8899-aabbccddeeff'), 1) = UUIDStringToNum('00112233-4455-6677-8899-aabbccddeeff', 1); +SELECT UUIDToNum(toUUID('00112233-4455-6677-8899-aabbccddeeff'), 2) = UUIDStringToNum('00112233-4455-6677-8899-aabbccddeeff', 2); +SELECT UUIDToNum(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT UUIDToNum(toUUID('00112233-4455-6677-8899-aabbccddeeff'), 1, 2); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT UUIDToNum(toUUID('00112233-4455-6677-8899-aabbccddeeff'), 3); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT UUIDToNum('00112233-4455-6677-8899-aabbccddeeff', 1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT UUIDToNum(toUUID('00112233-4455-6677-8899-aabbccddeeff'), '1'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT UUIDToNum(toUUID('00112233-4455-6677-8899-aabbccddeeff'), materialize(1)); -- { serverError ILLEGAL_COLUMN } + +SELECT '-- UUIDv7toDateTime --'; +SELECT UUIDv7ToDateTime(toUUID('018f05c9-4ab8-7b86-b64e-c9f03fbd45d1'), 'America/New_York'); +SELECT UUIDv7ToDateTime(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT UUIDv7ToDateTime(toUUID('018f05c9-4ab8-7b86-b64e-c9f03fbd45d1'), 1); -- { serverError ILLEGAL_COLUMN } +SELECT UUIDv7ToDateTime(toUUID('018f05c9-4ab8-7b86-b64e-c9f03fbd45d1'), 'America/New_York', 1); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT UUIDv7ToDateTime('018f05c9-4ab8-7b86-b64e-c9f03fbd45d1'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT UUIDv7ToDateTime(toUUID('018f05c9-4ab8-7b86-b64e-c9f03fbd45d1'), 'America/NewYork'); -- { serverError BAD_ARGUMENTS } +SELECT UUIDv7ToDateTime(toUUID('018f05c9-4ab8-7b86-b64e-c9f03fbd45d1'), materialize('America/New_York')); -- { serverError ILLEGAL_COLUMN } + diff --git a/parser/testdata/00397_tsv_format_synonym/ast.json b/parser/testdata/00397_tsv_format_synonym/ast.json new file mode 100644 index 000000000..caf45aaaa --- /dev/null +++ b/parser/testdata/00397_tsv_format_synonym/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function arrayJoin (alias arr) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_2, UInt64_3]" + }, + { + "explain": " Literal 'hello' (alias s1)" + }, + { + "explain": " Literal 'world' (alias s2)" + }, + { + "explain": " Identifier TabSeparated" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001041781, + "rows_read": 10, + "bytes_read": 410 + } +} diff --git a/parser/testdata/00397_tsv_format_synonym/metadata.json b/parser/testdata/00397_tsv_format_synonym/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00397_tsv_format_synonym/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00397_tsv_format_synonym/query.sql b/parser/testdata/00397_tsv_format_synonym/query.sql new file mode 100644 index 000000000..b3b231fbf --- /dev/null +++ b/parser/testdata/00397_tsv_format_synonym/query.sql @@ -0,0 +1,20 @@ +SELECT arrayJoin([1, 2, 3]) AS arr, 'hello' AS s1, 'world' AS s2 FORMAT TabSeparated; +SELECT arrayJoin([1, 2, 3]) AS arr, 'hello' AS s1, 'world' AS s2 FORMAT TSV; + +SELECT arrayJoin([1, 2, 3]) AS arr, 'hello' AS s1, 'world' AS s2 FORMAT TabSeparatedWithNames; +SELECT arrayJoin([1, 2, 3]) AS arr, 'hello' AS s1, 'world' AS s2 FORMAT TSVWithNames; + +SELECT arrayJoin([1, 2, 3]) AS arr, 'hello' AS s1, 'world' AS s2 FORMAT TabSeparatedWithNamesAndTypes; +SELECT arrayJoin([1, 2, 3]) AS arr, 'hello' AS s1, 'world' AS s2 FORMAT TSVWithNamesAndTypes; + +SELECT arrayJoin([1, 2, 3]) AS arr, 'hello' AS s1, 'world' AS s2 FORMAT TabSeparatedRaw; +SELECT arrayJoin([1, 2, 3]) AS arr, 'hello' AS s1, 'world' AS s2 FORMAT TSVRaw; +SELECT arrayJoin([1, 2, 3]) AS arr, 'hello' AS s1, 'world' AS s2 FORMAT Raw; + +SELECT arrayJoin([1, 2, 3]) AS arr, 'hello' AS s1, 'world' AS s2 FORMAT TabSeparatedRawWithNames; +SELECT arrayJoin([1, 2, 3]) AS arr, 'hello' AS s1, 'world' AS s2 FORMAT TSVRawWithNames; +SELECT arrayJoin([1, 2, 3]) AS arr, 'hello' AS s1, 'world' AS s2 FORMAT RawWithNames; + +SELECT arrayJoin([1, 2, 3]) AS arr, 'hello' AS s1, 'world' AS s2 FORMAT TabSeparatedRawWithNamesAndTypes; +SELECT arrayJoin([1, 2, 3]) AS arr, 'hello' AS s1, 'world' AS s2 FORMAT TSVRawWithNamesAndTypes; +SELECT arrayJoin([1, 2, 3]) AS arr, 'hello' AS s1, 'world' AS s2 FORMAT RawWithNamesAndTypes; diff --git a/parser/testdata/00399_group_uniq_array_date_datetime/ast.json b/parser/testdata/00399_group_uniq_array_date_datetime/ast.json new file mode 100644 index 000000000..5f32f1d13 --- /dev/null +++ b/parser/testdata/00399_group_uniq_array_date_datetime/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery grop_uniq_array_date (children 1)" + }, + { + "explain": " Identifier grop_uniq_array_date" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00130867, + "rows_read": 2, + "bytes_read": 92 + } +} diff --git a/parser/testdata/00399_group_uniq_array_date_datetime/metadata.json b/parser/testdata/00399_group_uniq_array_date_datetime/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00399_group_uniq_array_date_datetime/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00399_group_uniq_array_date_datetime/query.sql b/parser/testdata/00399_group_uniq_array_date_datetime/query.sql new file mode 100644 index 000000000..d732249f0 --- /dev/null +++ b/parser/testdata/00399_group_uniq_array_date_datetime/query.sql @@ -0,0 +1,8 @@ +DROP TABLE IF EXISTS grop_uniq_array_date; +CREATE TABLE grop_uniq_array_date (d Date, dt DateTime, id Integer) ENGINE = Memory; +INSERT INTO grop_uniq_array_date VALUES (toDate('2016-12-16'), toDateTime('2016-12-16 12:00:00'), 1) (toDate('2016-12-16'), toDateTime('2016-12-16 12:00:00'), 1); +SELECT groupUniqArray(d), groupUniqArray(dt) FROM grop_uniq_array_date; +INSERT INTO grop_uniq_array_date VALUES (toDate('2016-12-17'), toDateTime('2016-12-17 12:00:00'), 1), (toDate('2016-12-18'), toDateTime('2016-12-18 12:00:00'), 1), (toDate('2016-12-16'), toDateTime('2016-12-16 12:00:00'), 2); +SELECT length(groupUniqArray(2)(d)), length(groupUniqArray(2)(dt)), length(groupUniqArray(d)), length(groupUniqArray(dt)) FROM grop_uniq_array_date GROUP BY id ORDER BY id; +SELECT length(groupUniqArray(10000)(d)), length(groupUniqArray(10000)(dt)) FROM grop_uniq_array_date GROUP BY id ORDER BY id; +DROP TABLE IF EXISTS grop_uniq_array_date; diff --git a/parser/testdata/00401_merge_and_stripelog/ast.json b/parser/testdata/00401_merge_and_stripelog/ast.json new file mode 100644 index 000000000..6b5d57e7a --- /dev/null +++ b/parser/testdata/00401_merge_and_stripelog/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery stripe1 (children 1)" + }, + { + "explain": " Identifier stripe1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000940647, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/00401_merge_and_stripelog/metadata.json b/parser/testdata/00401_merge_and_stripelog/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00401_merge_and_stripelog/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00401_merge_and_stripelog/query.sql b/parser/testdata/00401_merge_and_stripelog/query.sql new file mode 100644 index 000000000..ed2857042 --- /dev/null +++ b/parser/testdata/00401_merge_and_stripelog/query.sql @@ -0,0 +1,48 @@ +DROP TABLE IF EXISTS stripe1; +DROP TABLE IF EXISTS stripe2; +DROP TABLE IF EXISTS stripe3; +DROP TABLE IF EXISTS stripe4; +DROP TABLE IF EXISTS stripe5; +DROP TABLE IF EXISTS stripe6; +DROP TABLE IF EXISTS stripe7; +DROP TABLE IF EXISTS stripe8; +DROP TABLE IF EXISTS stripe9; +DROP TABLE IF EXISTS stripe10; +DROP TABLE IF EXISTS merge_00401; + +CREATE TABLE stripe1 ENGINE = StripeLog AS SELECT number AS x FROM system.numbers LIMIT 10; +CREATE TABLE stripe2 ENGINE = StripeLog AS SELECT number AS x FROM system.numbers LIMIT 10; +CREATE TABLE stripe3 ENGINE = StripeLog AS SELECT number AS x FROM system.numbers LIMIT 10; +CREATE TABLE stripe4 ENGINE = StripeLog AS SELECT number AS x FROM system.numbers LIMIT 10; +CREATE TABLE stripe5 ENGINE = StripeLog AS SELECT number AS x FROM system.numbers LIMIT 10; +CREATE TABLE stripe6 ENGINE = StripeLog AS SELECT number AS x FROM system.numbers LIMIT 10; +CREATE TABLE stripe7 ENGINE = StripeLog AS SELECT number AS x FROM system.numbers LIMIT 10; +CREATE TABLE stripe8 ENGINE = StripeLog AS SELECT number AS x FROM system.numbers LIMIT 10; +CREATE TABLE stripe9 ENGINE = StripeLog AS SELECT number AS x FROM system.numbers LIMIT 10; +CREATE TABLE stripe10 ENGINE = StripeLog AS SELECT number AS x FROM system.numbers LIMIT 10; + +CREATE TABLE merge_00401 AS stripe1 ENGINE = Merge(currentDatabase(), '^stripe\\d+'); + +SELECT x, count() FROM merge_00401 GROUP BY x ORDER BY x; +SET max_threads = 1; +SELECT x, count() FROM merge_00401 GROUP BY x ORDER BY x; +SET max_threads = 2; +SELECT x, count() FROM merge_00401 GROUP BY x ORDER BY x; +SET max_threads = 5; +SELECT x, count() FROM merge_00401 GROUP BY x ORDER BY x; +SET max_threads = 10; +SELECT x, count() FROM merge_00401 GROUP BY x ORDER BY x; +SET max_threads = 20; +SELECT x, count() FROM merge_00401 GROUP BY x ORDER BY x; + +DROP TABLE IF EXISTS stripe1; +DROP TABLE IF EXISTS stripe2; +DROP TABLE IF EXISTS stripe3; +DROP TABLE IF EXISTS stripe4; +DROP TABLE IF EXISTS stripe5; +DROP TABLE IF EXISTS stripe6; +DROP TABLE IF EXISTS stripe7; +DROP TABLE IF EXISTS stripe8; +DROP TABLE IF EXISTS stripe9; +DROP TABLE IF EXISTS stripe10; +DROP TABLE IF EXISTS merge_00401; diff --git a/parser/testdata/00402_nan_and_extremes/ast.json b/parser/testdata/00402_nan_and_extremes/ast.json new file mode 100644 index 000000000..9f1b9f0e4 --- /dev/null +++ b/parser/testdata/00402_nan_and_extremes/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[UInt64_3, UInt64_1, UInt64_2]" + }, + { + "explain": " Set" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001112525, + "rows_read": 8, + "bytes_read": 304 + } +} diff --git a/parser/testdata/00402_nan_and_extremes/metadata.json b/parser/testdata/00402_nan_and_extremes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00402_nan_and_extremes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00402_nan_and_extremes/query.sql b/parser/testdata/00402_nan_and_extremes/query.sql new file mode 100644 index 000000000..8de809a30 --- /dev/null +++ b/parser/testdata/00402_nan_and_extremes/query.sql @@ -0,0 +1,8 @@ +SELECT arrayJoin([3, 1, 2]) SETTINGS extremes = 1; +SELECT arrayJoin([nan, 1, 2]) SETTINGS extremes = 1; +SELECT arrayJoin([3, nan, 2]) SETTINGS extremes = 1; +SELECT arrayJoin([3, 1, nan]) SETTINGS extremes = 1; +SELECT arrayJoin([nan, nan, 2]) SETTINGS extremes = 1; +SELECT arrayJoin([nan, 1, nan]) SETTINGS extremes = 1; +SELECT arrayJoin([3, nan, nan]) SETTINGS extremes = 1; +SELECT arrayJoin([nan, nan, nan]) SETTINGS extremes = 1; diff --git a/parser/testdata/00403_to_start_of_day/ast.json b/parser/testdata/00403_to_start_of_day/ast.json new file mode 100644 index 000000000..060adcb2e --- /dev/null +++ b/parser/testdata/00403_to_start_of_day/ast.json @@ -0,0 +1,70 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toStartOfDay (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function now (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function toDateTime (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDate (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function now (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 16, + + "statistics": + { + "elapsed": 0.001048913, + "rows_read": 16, + "bytes_read": 643 + } +} diff --git a/parser/testdata/00403_to_start_of_day/metadata.json b/parser/testdata/00403_to_start_of_day/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00403_to_start_of_day/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00403_to_start_of_day/query.sql b/parser/testdata/00403_to_start_of_day/query.sql new file mode 100644 index 000000000..e298afd80 --- /dev/null +++ b/parser/testdata/00403_to_start_of_day/query.sql @@ -0,0 +1 @@ +SELECT toStartOfDay(now()) = toDateTime(toDate(now())); diff --git a/parser/testdata/00404_null_literal/ast.json b/parser/testdata/00404_null_literal/ast.json new file mode 100644 index 000000000..00a042381 --- /dev/null +++ b/parser/testdata/00404_null_literal/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function isNull (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal NULL" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001093264, + "rows_read": 7, + "bytes_read": 255 + } +} diff --git a/parser/testdata/00404_null_literal/metadata.json b/parser/testdata/00404_null_literal/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00404_null_literal/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00404_null_literal/query.sql b/parser/testdata/00404_null_literal/query.sql new file mode 100644 index 000000000..ec5872e45 --- /dev/null +++ b/parser/testdata/00404_null_literal/query.sql @@ -0,0 +1,6 @@ +SELECT NULL IS NULL; +SELECT Null IS NULL; +SELECT null is null; +SELECT nuLL IS NOT NULL; +SELECT NOT NULL IS NOT NULL; +SELECT NOT NULL IS null; diff --git a/parser/testdata/00405_output_format_pretty_color/ast.json b/parser/testdata/00405_output_format_pretty_color/ast.json new file mode 100644 index 000000000..823aeb230 --- /dev/null +++ b/parser/testdata/00405_output_format_pretty_color/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001454107, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00405_output_format_pretty_color/metadata.json b/parser/testdata/00405_output_format_pretty_color/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00405_output_format_pretty_color/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00405_output_format_pretty_color/query.sql b/parser/testdata/00405_output_format_pretty_color/query.sql new file mode 100644 index 000000000..af9635e97 --- /dev/null +++ b/parser/testdata/00405_output_format_pretty_color/query.sql @@ -0,0 +1,34 @@ +SET output_format_pretty_display_footer_column_names=0; +SET output_format_pretty_color = 0; +SET output_format_pretty_squash_consecutive_ms = 0; +SHOW SETTING output_format_pretty_color; + +SELECT number AS hello, toString(number) AS world, (hello, world) AS tuple, nullIf(hello % 3, 0) AS sometimes_nulls FROM system.numbers LIMIT 10 SETTINGS max_block_size = 5 FORMAT Pretty; +SELECT number AS hello, toString(number) AS world, (hello, world) AS tuple, nullIf(hello % 3, 0) AS sometimes_nulls FROM system.numbers LIMIT 10 SETTINGS max_block_size = 5 FORMAT PrettyCompact; +SELECT number AS hello, toString(number) AS world, (hello, world) AS tuple, nullIf(hello % 3, 0) AS sometimes_nulls FROM system.numbers LIMIT 10 SETTINGS max_block_size = 5 FORMAT PrettySpace; +SELECT number AS hello, toString(number) AS world, (hello, world) AS tuple, nullIf(hello % 3, 0) AS sometimes_nulls FROM system.numbers LIMIT 10 SETTINGS max_block_size = 5 FORMAT PrettyCompactMonoBlock; +SELECT number AS hello, toString(number) AS world, (hello, world) AS tuple, nullIf(hello % 3, 0) AS sometimes_nulls FROM system.numbers LIMIT 10 SETTINGS max_block_size = 5 FORMAT PrettyNoEscapes; +SELECT number AS hello, toString(number) AS world, (hello, world) AS tuple, nullIf(hello % 3, 0) AS sometimes_nulls FROM system.numbers LIMIT 10 SETTINGS max_block_size = 5 FORMAT PrettyCompactNoEscapes; +SELECT number AS hello, toString(number) AS world, (hello, world) AS tuple, nullIf(hello % 3, 0) AS sometimes_nulls FROM system.numbers LIMIT 10 SETTINGS max_block_size = 5 FORMAT PrettySpaceNoEscapes; + +SET output_format_pretty_color = 1; +SHOW SETTING output_format_pretty_color; + +SELECT number AS hello, toString(number) AS world, (hello, world) AS tuple, nullIf(hello % 3, 0) AS sometimes_nulls FROM system.numbers LIMIT 10 SETTINGS max_block_size = 5 FORMAT Pretty; +SELECT number AS hello, toString(number) AS world, (hello, world) AS tuple, nullIf(hello % 3, 0) AS sometimes_nulls FROM system.numbers LIMIT 10 SETTINGS max_block_size = 5 FORMAT PrettyCompact; +SELECT number AS hello, toString(number) AS world, (hello, world) AS tuple, nullIf(hello % 3, 0) AS sometimes_nulls FROM system.numbers LIMIT 10 SETTINGS max_block_size = 5 FORMAT PrettySpace; +SELECT number AS hello, toString(number) AS world, (hello, world) AS tuple, nullIf(hello % 3, 0) AS sometimes_nulls FROM system.numbers LIMIT 10 SETTINGS max_block_size = 5 FORMAT PrettyCompactMonoBlock; +SELECT number AS hello, toString(number) AS world, (hello, world) AS tuple, nullIf(hello % 3, 0) AS sometimes_nulls FROM system.numbers LIMIT 10 SETTINGS max_block_size = 5 FORMAT PrettyNoEscapes; +SELECT number AS hello, toString(number) AS world, (hello, world) AS tuple, nullIf(hello % 3, 0) AS sometimes_nulls FROM system.numbers LIMIT 10 SETTINGS max_block_size = 5 FORMAT PrettyCompactNoEscapes; +SELECT number AS hello, toString(number) AS world, (hello, world) AS tuple, nullIf(hello % 3, 0) AS sometimes_nulls FROM system.numbers LIMIT 10 SETTINGS max_block_size = 5 FORMAT PrettySpaceNoEscapes; + +SET output_format_pretty_color = 'auto'; +SHOW SETTING output_format_pretty_color; + +SELECT number AS hello, toString(number) AS world, (hello, world) AS tuple, nullIf(hello % 3, 0) AS sometimes_nulls FROM system.numbers LIMIT 10 SETTINGS max_block_size = 5 FORMAT Pretty; +SELECT number AS hello, toString(number) AS world, (hello, world) AS tuple, nullIf(hello % 3, 0) AS sometimes_nulls FROM system.numbers LIMIT 10 SETTINGS max_block_size = 5 FORMAT PrettyCompact; +SELECT number AS hello, toString(number) AS world, (hello, world) AS tuple, nullIf(hello % 3, 0) AS sometimes_nulls FROM system.numbers LIMIT 10 SETTINGS max_block_size = 5 FORMAT PrettySpace; +SELECT number AS hello, toString(number) AS world, (hello, world) AS tuple, nullIf(hello % 3, 0) AS sometimes_nulls FROM system.numbers LIMIT 10 SETTINGS max_block_size = 5 FORMAT PrettyCompactMonoBlock; +SELECT number AS hello, toString(number) AS world, (hello, world) AS tuple, nullIf(hello % 3, 0) AS sometimes_nulls FROM system.numbers LIMIT 10 SETTINGS max_block_size = 5 FORMAT PrettyNoEscapes; +SELECT number AS hello, toString(number) AS world, (hello, world) AS tuple, nullIf(hello % 3, 0) AS sometimes_nulls FROM system.numbers LIMIT 10 SETTINGS max_block_size = 5 FORMAT PrettyCompactNoEscapes; +SELECT number AS hello, toString(number) AS world, (hello, world) AS tuple, nullIf(hello % 3, 0) AS sometimes_nulls FROM system.numbers LIMIT 10 SETTINGS max_block_size = 5 FORMAT PrettySpaceNoEscapes; diff --git a/parser/testdata/00405_pretty_formats/ast.json b/parser/testdata/00405_pretty_formats/ast.json new file mode 100644 index 000000000..e451f7fa8 --- /dev/null +++ b/parser/testdata/00405_pretty_formats/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001284782, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00405_pretty_formats/metadata.json b/parser/testdata/00405_pretty_formats/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00405_pretty_formats/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00405_pretty_formats/query.sql b/parser/testdata/00405_pretty_formats/query.sql new file mode 100644 index 000000000..3d6017163 --- /dev/null +++ b/parser/testdata/00405_pretty_formats/query.sql @@ -0,0 +1,32 @@ +SET output_format_pretty_display_footer_column_names=0; +SET output_format_pretty_color = 1; +SET output_format_pretty_squash_consecutive_ms = 0; + +SELECT number AS hello, toString(number) AS world, (hello, world) AS tuple, nullIf(hello % 3, 0) AS sometimes_nulls FROM system.numbers LIMIT 10 SETTINGS max_block_size = 5 FORMAT Pretty; +SELECT number AS hello, toString(number) AS world, (hello, world) AS tuple, nullIf(hello % 3, 0) AS sometimes_nulls FROM system.numbers LIMIT 10 SETTINGS max_block_size = 5 FORMAT PrettyCompact; +SELECT number AS hello, toString(number) AS world, (hello, world) AS tuple, nullIf(hello % 3, 0) AS sometimes_nulls FROM system.numbers LIMIT 10 SETTINGS max_block_size = 5 FORMAT PrettySpace; +SELECT number AS hello, toString(number) AS world, (hello, world) AS tuple, nullIf(hello % 3, 0) AS sometimes_nulls FROM system.numbers LIMIT 10 SETTINGS max_block_size = 5 FORMAT PrettyCompactMonoBlock; +SELECT number AS hello, toString(number) AS world, (hello, world) AS tuple, nullIf(hello % 3, 0) AS sometimes_nulls FROM system.numbers LIMIT 10 SETTINGS max_block_size = 5 FORMAT PrettyNoEscapes; +SELECT number AS hello, toString(number) AS world, (hello, world) AS tuple, nullIf(hello % 3, 0) AS sometimes_nulls FROM system.numbers LIMIT 10 SETTINGS max_block_size = 5 FORMAT PrettyCompactNoEscapes; +SELECT number AS hello, toString(number) AS world, (hello, world) AS tuple, nullIf(hello % 3, 0) AS sometimes_nulls FROM system.numbers LIMIT 10 SETTINGS max_block_size = 5 FORMAT PrettySpaceNoEscapes; + +SET output_format_pretty_max_rows = 6; + +SELECT number AS hello, toString(number) AS world, (hello, world) AS tuple, nullIf(hello % 3, 0) AS sometimes_nulls FROM system.numbers LIMIT 10 SETTINGS max_block_size = 5 FORMAT Pretty; +SELECT number AS hello, toString(number) AS world, (hello, world) AS tuple, nullIf(hello % 3, 0) AS sometimes_nulls FROM system.numbers LIMIT 10 SETTINGS max_block_size = 5 FORMAT PrettyCompact; +SELECT number AS hello, toString(number) AS world, (hello, world) AS tuple, nullIf(hello % 3, 0) AS sometimes_nulls FROM system.numbers LIMIT 10 SETTINGS max_block_size = 5 FORMAT PrettySpace; +SELECT number AS hello, toString(number) AS world, (hello, world) AS tuple, nullIf(hello % 3, 0) AS sometimes_nulls FROM system.numbers LIMIT 10 SETTINGS max_block_size = 5 FORMAT PrettyCompactMonoBlock; +SELECT number AS hello, toString(number) AS world, (hello, world) AS tuple, nullIf(hello % 3, 0) AS sometimes_nulls FROM system.numbers LIMIT 10 SETTINGS max_block_size = 5 FORMAT PrettyNoEscapes; +SELECT number AS hello, toString(number) AS world, (hello, world) AS tuple, nullIf(hello % 3, 0) AS sometimes_nulls FROM system.numbers LIMIT 10 SETTINGS max_block_size = 5 FORMAT PrettyCompactNoEscapes; +SELECT number AS hello, toString(number) AS world, (hello, world) AS tuple, nullIf(hello % 3, 0) AS sometimes_nulls FROM system.numbers LIMIT 10 SETTINGS max_block_size = 5 FORMAT PrettySpaceNoEscapes; + + +SET output_format_pretty_grid_charset = 'ASCII'; + +SELECT number AS hello, toString(number) AS world, (hello, world) AS tuple, nullIf(hello % 3, 0) AS sometimes_nulls FROM system.numbers LIMIT 10 SETTINGS max_block_size = 5 FORMAT Pretty; +SELECT number AS hello, toString(number) AS world, (hello, world) AS tuple, nullIf(hello % 3, 0) AS sometimes_nulls FROM system.numbers LIMIT 10 SETTINGS max_block_size = 5 FORMAT PrettyCompact; +SELECT number AS hello, toString(number) AS world, (hello, world) AS tuple, nullIf(hello % 3, 0) AS sometimes_nulls FROM system.numbers LIMIT 10 SETTINGS max_block_size = 5 FORMAT PrettySpace; +SELECT number AS hello, toString(number) AS world, (hello, world) AS tuple, nullIf(hello % 3, 0) AS sometimes_nulls FROM system.numbers LIMIT 10 SETTINGS max_block_size = 5 FORMAT PrettyCompactMonoBlock; +SELECT number AS hello, toString(number) AS world, (hello, world) AS tuple, nullIf(hello % 3, 0) AS sometimes_nulls FROM system.numbers LIMIT 10 SETTINGS max_block_size = 5 FORMAT PrettyNoEscapes; +SELECT number AS hello, toString(number) AS world, (hello, world) AS tuple, nullIf(hello % 3, 0) AS sometimes_nulls FROM system.numbers LIMIT 10 SETTINGS max_block_size = 5 FORMAT PrettyCompactNoEscapes; +SELECT number AS hello, toString(number) AS world, (hello, world) AS tuple, nullIf(hello % 3, 0) AS sometimes_nulls FROM system.numbers LIMIT 10 SETTINGS max_block_size = 5 FORMAT PrettySpaceNoEscapes; diff --git a/parser/testdata/00406_tuples_with_nulls/ast.json b/parser/testdata/00406_tuples_with_nulls/ast.json new file mode 100644 index 000000000..b195d6d8c --- /dev/null +++ b/parser/testdata/00406_tuples_with_nulls/ast.json @@ -0,0 +1,109 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function tuple (alias tuple) (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function nullIf (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Function toString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function nullIf (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Identifier PrettyCompactNoEscapes" + } + ], + + "rows": 29, + + "statistics": + { + "elapsed": 0.001342323, + "rows_read": 29, + "bytes_read": 1168 + } +} diff --git a/parser/testdata/00406_tuples_with_nulls/metadata.json b/parser/testdata/00406_tuples_with_nulls/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00406_tuples_with_nulls/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00406_tuples_with_nulls/query.sql b/parser/testdata/00406_tuples_with_nulls/query.sql new file mode 100644 index 000000000..a947e6f78 --- /dev/null +++ b/parser/testdata/00406_tuples_with_nulls/query.sql @@ -0,0 +1,2 @@ +SELECT (number, nullIf(number % 3, 0), toString(nullIf(number % 2, 0))) AS tuple FROM system.numbers LIMIT 10 FORMAT PrettyCompactNoEscapes; +SELECT NULL AS x, tuple(NULL) AS y FORMAT PrettyCompactNoEscapes; diff --git a/parser/testdata/00409_shard_limit_by/ast.json b/parser/testdata/00409_shard_limit_by/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00409_shard_limit_by/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00409_shard_limit_by/metadata.json b/parser/testdata/00409_shard_limit_by/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00409_shard_limit_by/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00409_shard_limit_by/query.sql b/parser/testdata/00409_shard_limit_by/query.sql new file mode 100644 index 000000000..0db29982d --- /dev/null +++ b/parser/testdata/00409_shard_limit_by/query.sql @@ -0,0 +1,40 @@ +-- Tags: shard + +DROP TABLE IF EXISTS limit_by; +CREATE TABLE limit_by (Num UInt32, Name String) ENGINE = Memory; + +INSERT INTO limit_by (Num, Name) VALUES (1, 'John'); +INSERT INTO limit_by (Num, Name) VALUES (1, 'John'); +INSERT INTO limit_by (Num, Name) VALUES (3, 'Mary'); +INSERT INTO limit_by (Num, Name) VALUES (3, 'Mary'); +INSERT INTO limit_by (Num, Name) VALUES (3, 'Mary'); +INSERT INTO limit_by (Num, Name) VALUES (4, 'Mary'); +INSERT INTO limit_by (Num, Name) VALUES (4, 'Mary'); +INSERT INTO limit_by (Num, Name) VALUES (5, 'Bill'); +INSERT INTO limit_by (Num, Name) VALUES (7, 'Bill'); +INSERT INTO limit_by (Num, Name) VALUES (7, 'Bill'); +INSERT INTO limit_by (Num, Name) VALUES (7, 'Mary'); +INSERT INTO limit_by (Num, Name) VALUES (7, 'John'); + +-- Two elemens in each group +SELECT Num FROM limit_by ORDER BY Num LIMIT 2 BY Num; + +-- LIMIT BY doesn't affect result of GROUP BY +SELECT Num, count(*) FROM limit_by GROUP BY Num ORDER BY Num LIMIT 2 BY Num; + +-- LIMIT BY can be combined with LIMIT +SELECT Num, Name FROM limit_by ORDER BY Num LIMIT 1 BY Num, Name LIMIT 3; + +-- Distributed LIMIT BY +SELECT dummy FROM remote('127.0.0.{2,3}', system.one) LIMIT 1 BY dummy; +SELECT dummy FROM remote('127.0.0.{2,3}', system.one) LIMIT 2 BY dummy; + +SELECT 1 as one FROM remote('127.0.0.{2,3}', system.one) LIMIT 1 BY one; + +-- Distributed LIMIT BY with LIMIT +SELECT toInt8(number / 5 + 100) AS x FROM remote('127.0.0.1', system.numbers) LIMIT 2 BY x LIMIT 5; + +-- Distributed LIMIT BY with ORDER BY non-selected column +SELECT 1 AS x FROM remote('127.0.0.{2,3}', system.one) ORDER BY dummy LIMIT 1 BY x; + +DROP TABLE IF EXISTS limit_by; diff --git a/parser/testdata/00410_aggregation_combinators_with_arenas/ast.json b/parser/testdata/00410_aggregation_combinators_with_arenas/ast.json new file mode 100644 index 000000000..3d6665c25 --- /dev/null +++ b/parser/testdata/00410_aggregation_combinators_with_arenas/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001044609, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00410_aggregation_combinators_with_arenas/metadata.json b/parser/testdata/00410_aggregation_combinators_with_arenas/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00410_aggregation_combinators_with_arenas/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00410_aggregation_combinators_with_arenas/query.sql b/parser/testdata/00410_aggregation_combinators_with_arenas/query.sql new file mode 100644 index 000000000..d6295b20c --- /dev/null +++ b/parser/testdata/00410_aggregation_combinators_with_arenas/query.sql @@ -0,0 +1,16 @@ +SET allow_deprecated_error_prone_window_functions = 1; +DROP TABLE IF EXISTS arena; +CREATE TABLE arena (k UInt8, d String) ENGINE = Memory; +INSERT INTO arena SELECT number % 10 AS k, hex(intDiv(number, 10) % 1000) AS d FROM system.numbers LIMIT 10000000; +SELECT length(groupUniqArrayIf(d, d != hex(0))) FROM arena GROUP BY k; +SELECT length(groupUniqArrayMerge(ds)) FROM (SELECT k, groupUniqArrayState(d) AS ds FROM arena GROUP BY k) GROUP BY k; +DROP TABLE IF EXISTS arena; + +SELECT length(arrayReduce('groupUniqArray', [[1, 2], [1], emptyArrayUInt8(), [1], [1, 2]])); +SELECT min(x), max(x) FROM (SELECT length(arrayReduce('groupUniqArray', [hex(number), hex(number+1), hex(number)])) AS x FROM system.numbers LIMIT 100000); + +-- Disable external aggregation because the state is reset for each new block of data in 'runningAccumulate' function. +SET max_bytes_before_external_group_by = 0; +SET max_bytes_ratio_before_external_group_by = 0; + +SELECT sum(length(runningAccumulate(x))) FROM (SELECT groupUniqArrayState(toString(number % 10)) AS x, number FROM (SELECT * FROM system.numbers LIMIT 11) GROUP BY number ORDER BY number); diff --git a/parser/testdata/00411_long_accurate_number_comparison_float/ast.json b/parser/testdata/00411_long_accurate_number_comparison_float/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00411_long_accurate_number_comparison_float/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00411_long_accurate_number_comparison_float/metadata.json b/parser/testdata/00411_long_accurate_number_comparison_float/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00411_long_accurate_number_comparison_float/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00411_long_accurate_number_comparison_float/query.sql b/parser/testdata/00411_long_accurate_number_comparison_float/query.sql new file mode 100644 index 000000000..f309f6bab --- /dev/null +++ b/parser/testdata/00411_long_accurate_number_comparison_float/query.sql @@ -0,0 +1,129 @@ +-- Tags: long + +-- The results are different than in Python. That's why this file is genearated and the reference is edited instead of using the Python script. +-- Example: in ClickHouse, 9223372036854775808.0 != 9223372036854775808. + +SELECT '0', '0.000000000', 0 = 0.000000000, 0 != 0.000000000, 0 < 0.000000000, 0 <= 0.000000000, 0 > 0.000000000, 0 >= 0.000000000, 0.000000000 = 0, 0.000000000 != 0, 0.000000000 < 0, 0.000000000 <= 0, 0.000000000 > 0, 0.000000000 >= 0 , toUInt8(0) = 0.000000000, toUInt8(0) != 0.000000000, toUInt8(0) < 0.000000000, toUInt8(0) <= 0.000000000, toUInt8(0) > 0.000000000, toUInt8(0) >= 0.000000000, 0.000000000 = toUInt8(0), 0.000000000 != toUInt8(0), 0.000000000 < toUInt8(0), 0.000000000 <= toUInt8(0), 0.000000000 > toUInt8(0), 0.000000000 >= toUInt8(0) , toInt8(0) = 0.000000000, toInt8(0) != 0.000000000, toInt8(0) < 0.000000000, toInt8(0) <= 0.000000000, toInt8(0) > 0.000000000, toInt8(0) >= 0.000000000, 0.000000000 = toInt8(0), 0.000000000 != toInt8(0), 0.000000000 < toInt8(0), 0.000000000 <= toInt8(0), 0.000000000 > toInt8(0), 0.000000000 >= toInt8(0) , toUInt16(0) = 0.000000000, toUInt16(0) != 0.000000000, toUInt16(0) < 0.000000000, toUInt16(0) <= 0.000000000, toUInt16(0) > 0.000000000, toUInt16(0) >= 0.000000000, 0.000000000 = toUInt16(0), 0.000000000 != toUInt16(0), 0.000000000 < toUInt16(0), 0.000000000 <= toUInt16(0), 0.000000000 > toUInt16(0), 0.000000000 >= toUInt16(0) , toInt16(0) = 0.000000000, toInt16(0) != 0.000000000, toInt16(0) < 0.000000000, toInt16(0) <= 0.000000000, toInt16(0) > 0.000000000, toInt16(0) >= 0.000000000, 0.000000000 = toInt16(0), 0.000000000 != toInt16(0), 0.000000000 < toInt16(0), 0.000000000 <= toInt16(0), 0.000000000 > toInt16(0), 0.000000000 >= toInt16(0) , toUInt32(0) = 0.000000000, toUInt32(0) != 0.000000000, toUInt32(0) < 0.000000000, toUInt32(0) <= 0.000000000, toUInt32(0) > 0.000000000, toUInt32(0) >= 0.000000000, 0.000000000 = toUInt32(0), 0.000000000 != toUInt32(0), 0.000000000 < toUInt32(0), 0.000000000 <= toUInt32(0), 0.000000000 > toUInt32(0), 0.000000000 >= toUInt32(0) , toInt32(0) = 0.000000000, toInt32(0) != 0.000000000, toInt32(0) < 0.000000000, toInt32(0) <= 0.000000000, toInt32(0) > 0.000000000, toInt32(0) >= 0.000000000, 0.000000000 = toInt32(0), 0.000000000 != toInt32(0), 0.000000000 < toInt32(0), 0.000000000 <= toInt32(0), 0.000000000 > toInt32(0), 0.000000000 >= toInt32(0) , toUInt64(0) = 0.000000000, toUInt64(0) != 0.000000000, toUInt64(0) < 0.000000000, toUInt64(0) <= 0.000000000, toUInt64(0) > 0.000000000, toUInt64(0) >= 0.000000000, 0.000000000 = toUInt64(0), 0.000000000 != toUInt64(0), 0.000000000 < toUInt64(0), 0.000000000 <= toUInt64(0), 0.000000000 > toUInt64(0), 0.000000000 >= toUInt64(0) , toInt64(0) = 0.000000000, toInt64(0) != 0.000000000, toInt64(0) < 0.000000000, toInt64(0) <= 0.000000000, toInt64(0) > 0.000000000, toInt64(0) >= 0.000000000, 0.000000000 = toInt64(0), 0.000000000 != toInt64(0), 0.000000000 < toInt64(0), 0.000000000 <= toInt64(0), 0.000000000 > toInt64(0), 0.000000000 >= toInt64(0) ; +SELECT '0', '-1.000000000', 0 = -1.000000000, 0 != -1.000000000, 0 < -1.000000000, 0 <= -1.000000000, 0 > -1.000000000, 0 >= -1.000000000, -1.000000000 = 0, -1.000000000 != 0, -1.000000000 < 0, -1.000000000 <= 0, -1.000000000 > 0, -1.000000000 >= 0 , toUInt8(0) = -1.000000000, toUInt8(0) != -1.000000000, toUInt8(0) < -1.000000000, toUInt8(0) <= -1.000000000, toUInt8(0) > -1.000000000, toUInt8(0) >= -1.000000000, -1.000000000 = toUInt8(0), -1.000000000 != toUInt8(0), -1.000000000 < toUInt8(0), -1.000000000 <= toUInt8(0), -1.000000000 > toUInt8(0), -1.000000000 >= toUInt8(0) , toInt8(0) = -1.000000000, toInt8(0) != -1.000000000, toInt8(0) < -1.000000000, toInt8(0) <= -1.000000000, toInt8(0) > -1.000000000, toInt8(0) >= -1.000000000, -1.000000000 = toInt8(0), -1.000000000 != toInt8(0), -1.000000000 < toInt8(0), -1.000000000 <= toInt8(0), -1.000000000 > toInt8(0), -1.000000000 >= toInt8(0) , toUInt16(0) = -1.000000000, toUInt16(0) != -1.000000000, toUInt16(0) < -1.000000000, toUInt16(0) <= -1.000000000, toUInt16(0) > -1.000000000, toUInt16(0) >= -1.000000000, -1.000000000 = toUInt16(0), -1.000000000 != toUInt16(0), -1.000000000 < toUInt16(0), -1.000000000 <= toUInt16(0), -1.000000000 > toUInt16(0), -1.000000000 >= toUInt16(0) , toInt16(0) = -1.000000000, toInt16(0) != -1.000000000, toInt16(0) < -1.000000000, toInt16(0) <= -1.000000000, toInt16(0) > -1.000000000, toInt16(0) >= -1.000000000, -1.000000000 = toInt16(0), -1.000000000 != toInt16(0), -1.000000000 < toInt16(0), -1.000000000 <= toInt16(0), -1.000000000 > toInt16(0), -1.000000000 >= toInt16(0) , toUInt32(0) = -1.000000000, toUInt32(0) != -1.000000000, toUInt32(0) < -1.000000000, toUInt32(0) <= -1.000000000, toUInt32(0) > -1.000000000, toUInt32(0) >= -1.000000000, -1.000000000 = toUInt32(0), -1.000000000 != toUInt32(0), -1.000000000 < toUInt32(0), -1.000000000 <= toUInt32(0), -1.000000000 > toUInt32(0), -1.000000000 >= toUInt32(0) , toInt32(0) = -1.000000000, toInt32(0) != -1.000000000, toInt32(0) < -1.000000000, toInt32(0) <= -1.000000000, toInt32(0) > -1.000000000, toInt32(0) >= -1.000000000, -1.000000000 = toInt32(0), -1.000000000 != toInt32(0), -1.000000000 < toInt32(0), -1.000000000 <= toInt32(0), -1.000000000 > toInt32(0), -1.000000000 >= toInt32(0) , toUInt64(0) = -1.000000000, toUInt64(0) != -1.000000000, toUInt64(0) < -1.000000000, toUInt64(0) <= -1.000000000, toUInt64(0) > -1.000000000, toUInt64(0) >= -1.000000000, -1.000000000 = toUInt64(0), -1.000000000 != toUInt64(0), -1.000000000 < toUInt64(0), -1.000000000 <= toUInt64(0), -1.000000000 > toUInt64(0), -1.000000000 >= toUInt64(0) , toInt64(0) = -1.000000000, toInt64(0) != -1.000000000, toInt64(0) < -1.000000000, toInt64(0) <= -1.000000000, toInt64(0) > -1.000000000, toInt64(0) >= -1.000000000, -1.000000000 = toInt64(0), -1.000000000 != toInt64(0), -1.000000000 < toInt64(0), -1.000000000 <= toInt64(0), -1.000000000 > toInt64(0), -1.000000000 >= toInt64(0) ; +SELECT '0', '1.000000000', 0 = 1.000000000, 0 != 1.000000000, 0 < 1.000000000, 0 <= 1.000000000, 0 > 1.000000000, 0 >= 1.000000000, 1.000000000 = 0, 1.000000000 != 0, 1.000000000 < 0, 1.000000000 <= 0, 1.000000000 > 0, 1.000000000 >= 0 , toUInt8(0) = 1.000000000, toUInt8(0) != 1.000000000, toUInt8(0) < 1.000000000, toUInt8(0) <= 1.000000000, toUInt8(0) > 1.000000000, toUInt8(0) >= 1.000000000, 1.000000000 = toUInt8(0), 1.000000000 != toUInt8(0), 1.000000000 < toUInt8(0), 1.000000000 <= toUInt8(0), 1.000000000 > toUInt8(0), 1.000000000 >= toUInt8(0) , toInt8(0) = 1.000000000, toInt8(0) != 1.000000000, toInt8(0) < 1.000000000, toInt8(0) <= 1.000000000, toInt8(0) > 1.000000000, toInt8(0) >= 1.000000000, 1.000000000 = toInt8(0), 1.000000000 != toInt8(0), 1.000000000 < toInt8(0), 1.000000000 <= toInt8(0), 1.000000000 > toInt8(0), 1.000000000 >= toInt8(0) , toUInt16(0) = 1.000000000, toUInt16(0) != 1.000000000, toUInt16(0) < 1.000000000, toUInt16(0) <= 1.000000000, toUInt16(0) > 1.000000000, toUInt16(0) >= 1.000000000, 1.000000000 = toUInt16(0), 1.000000000 != toUInt16(0), 1.000000000 < toUInt16(0), 1.000000000 <= toUInt16(0), 1.000000000 > toUInt16(0), 1.000000000 >= toUInt16(0) , toInt16(0) = 1.000000000, toInt16(0) != 1.000000000, toInt16(0) < 1.000000000, toInt16(0) <= 1.000000000, toInt16(0) > 1.000000000, toInt16(0) >= 1.000000000, 1.000000000 = toInt16(0), 1.000000000 != toInt16(0), 1.000000000 < toInt16(0), 1.000000000 <= toInt16(0), 1.000000000 > toInt16(0), 1.000000000 >= toInt16(0) , toUInt32(0) = 1.000000000, toUInt32(0) != 1.000000000, toUInt32(0) < 1.000000000, toUInt32(0) <= 1.000000000, toUInt32(0) > 1.000000000, toUInt32(0) >= 1.000000000, 1.000000000 = toUInt32(0), 1.000000000 != toUInt32(0), 1.000000000 < toUInt32(0), 1.000000000 <= toUInt32(0), 1.000000000 > toUInt32(0), 1.000000000 >= toUInt32(0) , toInt32(0) = 1.000000000, toInt32(0) != 1.000000000, toInt32(0) < 1.000000000, toInt32(0) <= 1.000000000, toInt32(0) > 1.000000000, toInt32(0) >= 1.000000000, 1.000000000 = toInt32(0), 1.000000000 != toInt32(0), 1.000000000 < toInt32(0), 1.000000000 <= toInt32(0), 1.000000000 > toInt32(0), 1.000000000 >= toInt32(0) , toUInt64(0) = 1.000000000, toUInt64(0) != 1.000000000, toUInt64(0) < 1.000000000, toUInt64(0) <= 1.000000000, toUInt64(0) > 1.000000000, toUInt64(0) >= 1.000000000, 1.000000000 = toUInt64(0), 1.000000000 != toUInt64(0), 1.000000000 < toUInt64(0), 1.000000000 <= toUInt64(0), 1.000000000 > toUInt64(0), 1.000000000 >= toUInt64(0) , toInt64(0) = 1.000000000, toInt64(0) != 1.000000000, toInt64(0) < 1.000000000, toInt64(0) <= 1.000000000, toInt64(0) > 1.000000000, toInt64(0) >= 1.000000000, 1.000000000 = toInt64(0), 1.000000000 != toInt64(0), 1.000000000 < toInt64(0), 1.000000000 <= toInt64(0), 1.000000000 > toInt64(0), 1.000000000 >= toInt64(0) ; +SELECT '0', '18446744073709551616.000000000', 0 = 18446744073709551616.000000000, 0 != 18446744073709551616.000000000, 0 < 18446744073709551616.000000000, 0 <= 18446744073709551616.000000000, 0 > 18446744073709551616.000000000, 0 >= 18446744073709551616.000000000, 18446744073709551616.000000000 = 0, 18446744073709551616.000000000 != 0, 18446744073709551616.000000000 < 0, 18446744073709551616.000000000 <= 0, 18446744073709551616.000000000 > 0, 18446744073709551616.000000000 >= 0 , toUInt8(0) = 18446744073709551616.000000000, toUInt8(0) != 18446744073709551616.000000000, toUInt8(0) < 18446744073709551616.000000000, toUInt8(0) <= 18446744073709551616.000000000, toUInt8(0) > 18446744073709551616.000000000, toUInt8(0) >= 18446744073709551616.000000000, 18446744073709551616.000000000 = toUInt8(0), 18446744073709551616.000000000 != toUInt8(0), 18446744073709551616.000000000 < toUInt8(0), 18446744073709551616.000000000 <= toUInt8(0), 18446744073709551616.000000000 > toUInt8(0), 18446744073709551616.000000000 >= toUInt8(0) , toInt8(0) = 18446744073709551616.000000000, toInt8(0) != 18446744073709551616.000000000, toInt8(0) < 18446744073709551616.000000000, toInt8(0) <= 18446744073709551616.000000000, toInt8(0) > 18446744073709551616.000000000, toInt8(0) >= 18446744073709551616.000000000, 18446744073709551616.000000000 = toInt8(0), 18446744073709551616.000000000 != toInt8(0), 18446744073709551616.000000000 < toInt8(0), 18446744073709551616.000000000 <= toInt8(0), 18446744073709551616.000000000 > toInt8(0), 18446744073709551616.000000000 >= toInt8(0) , toUInt16(0) = 18446744073709551616.000000000, toUInt16(0) != 18446744073709551616.000000000, toUInt16(0) < 18446744073709551616.000000000, toUInt16(0) <= 18446744073709551616.000000000, toUInt16(0) > 18446744073709551616.000000000, toUInt16(0) >= 18446744073709551616.000000000, 18446744073709551616.000000000 = toUInt16(0), 18446744073709551616.000000000 != toUInt16(0), 18446744073709551616.000000000 < toUInt16(0), 18446744073709551616.000000000 <= toUInt16(0), 18446744073709551616.000000000 > toUInt16(0), 18446744073709551616.000000000 >= toUInt16(0) , toInt16(0) = 18446744073709551616.000000000, toInt16(0) != 18446744073709551616.000000000, toInt16(0) < 18446744073709551616.000000000, toInt16(0) <= 18446744073709551616.000000000, toInt16(0) > 18446744073709551616.000000000, toInt16(0) >= 18446744073709551616.000000000, 18446744073709551616.000000000 = toInt16(0), 18446744073709551616.000000000 != toInt16(0), 18446744073709551616.000000000 < toInt16(0), 18446744073709551616.000000000 <= toInt16(0), 18446744073709551616.000000000 > toInt16(0), 18446744073709551616.000000000 >= toInt16(0) , toUInt32(0) = 18446744073709551616.000000000, toUInt32(0) != 18446744073709551616.000000000, toUInt32(0) < 18446744073709551616.000000000, toUInt32(0) <= 18446744073709551616.000000000, toUInt32(0) > 18446744073709551616.000000000, toUInt32(0) >= 18446744073709551616.000000000, 18446744073709551616.000000000 = toUInt32(0), 18446744073709551616.000000000 != toUInt32(0), 18446744073709551616.000000000 < toUInt32(0), 18446744073709551616.000000000 <= toUInt32(0), 18446744073709551616.000000000 > toUInt32(0), 18446744073709551616.000000000 >= toUInt32(0) , toInt32(0) = 18446744073709551616.000000000, toInt32(0) != 18446744073709551616.000000000, toInt32(0) < 18446744073709551616.000000000, toInt32(0) <= 18446744073709551616.000000000, toInt32(0) > 18446744073709551616.000000000, toInt32(0) >= 18446744073709551616.000000000, 18446744073709551616.000000000 = toInt32(0), 18446744073709551616.000000000 != toInt32(0), 18446744073709551616.000000000 < toInt32(0), 18446744073709551616.000000000 <= toInt32(0), 18446744073709551616.000000000 > toInt32(0), 18446744073709551616.000000000 >= toInt32(0) , toUInt64(0) = 18446744073709551616.000000000, toUInt64(0) != 18446744073709551616.000000000, toUInt64(0) < 18446744073709551616.000000000, toUInt64(0) <= 18446744073709551616.000000000, toUInt64(0) > 18446744073709551616.000000000, toUInt64(0) >= 18446744073709551616.000000000, 18446744073709551616.000000000 = toUInt64(0), 18446744073709551616.000000000 != toUInt64(0), 18446744073709551616.000000000 < toUInt64(0), 18446744073709551616.000000000 <= toUInt64(0), 18446744073709551616.000000000 > toUInt64(0), 18446744073709551616.000000000 >= toUInt64(0) , toInt64(0) = 18446744073709551616.000000000, toInt64(0) != 18446744073709551616.000000000, toInt64(0) < 18446744073709551616.000000000, toInt64(0) <= 18446744073709551616.000000000, toInt64(0) > 18446744073709551616.000000000, toInt64(0) >= 18446744073709551616.000000000, 18446744073709551616.000000000 = toInt64(0), 18446744073709551616.000000000 != toInt64(0), 18446744073709551616.000000000 < toInt64(0), 18446744073709551616.000000000 <= toInt64(0), 18446744073709551616.000000000 > toInt64(0), 18446744073709551616.000000000 >= toInt64(0) ; +SELECT '0', '9223372036854775808.000000000', 0 = 9223372036854775808.000000000, 0 != 9223372036854775808.000000000, 0 < 9223372036854775808.000000000, 0 <= 9223372036854775808.000000000, 0 > 9223372036854775808.000000000, 0 >= 9223372036854775808.000000000, 9223372036854775808.000000000 = 0, 9223372036854775808.000000000 != 0, 9223372036854775808.000000000 < 0, 9223372036854775808.000000000 <= 0, 9223372036854775808.000000000 > 0, 9223372036854775808.000000000 >= 0 , toUInt8(0) = 9223372036854775808.000000000, toUInt8(0) != 9223372036854775808.000000000, toUInt8(0) < 9223372036854775808.000000000, toUInt8(0) <= 9223372036854775808.000000000, toUInt8(0) > 9223372036854775808.000000000, toUInt8(0) >= 9223372036854775808.000000000, 9223372036854775808.000000000 = toUInt8(0), 9223372036854775808.000000000 != toUInt8(0), 9223372036854775808.000000000 < toUInt8(0), 9223372036854775808.000000000 <= toUInt8(0), 9223372036854775808.000000000 > toUInt8(0), 9223372036854775808.000000000 >= toUInt8(0) , toInt8(0) = 9223372036854775808.000000000, toInt8(0) != 9223372036854775808.000000000, toInt8(0) < 9223372036854775808.000000000, toInt8(0) <= 9223372036854775808.000000000, toInt8(0) > 9223372036854775808.000000000, toInt8(0) >= 9223372036854775808.000000000, 9223372036854775808.000000000 = toInt8(0), 9223372036854775808.000000000 != toInt8(0), 9223372036854775808.000000000 < toInt8(0), 9223372036854775808.000000000 <= toInt8(0), 9223372036854775808.000000000 > toInt8(0), 9223372036854775808.000000000 >= toInt8(0) , toUInt16(0) = 9223372036854775808.000000000, toUInt16(0) != 9223372036854775808.000000000, toUInt16(0) < 9223372036854775808.000000000, toUInt16(0) <= 9223372036854775808.000000000, toUInt16(0) > 9223372036854775808.000000000, toUInt16(0) >= 9223372036854775808.000000000, 9223372036854775808.000000000 = toUInt16(0), 9223372036854775808.000000000 != toUInt16(0), 9223372036854775808.000000000 < toUInt16(0), 9223372036854775808.000000000 <= toUInt16(0), 9223372036854775808.000000000 > toUInt16(0), 9223372036854775808.000000000 >= toUInt16(0) , toInt16(0) = 9223372036854775808.000000000, toInt16(0) != 9223372036854775808.000000000, toInt16(0) < 9223372036854775808.000000000, toInt16(0) <= 9223372036854775808.000000000, toInt16(0) > 9223372036854775808.000000000, toInt16(0) >= 9223372036854775808.000000000, 9223372036854775808.000000000 = toInt16(0), 9223372036854775808.000000000 != toInt16(0), 9223372036854775808.000000000 < toInt16(0), 9223372036854775808.000000000 <= toInt16(0), 9223372036854775808.000000000 > toInt16(0), 9223372036854775808.000000000 >= toInt16(0) , toUInt32(0) = 9223372036854775808.000000000, toUInt32(0) != 9223372036854775808.000000000, toUInt32(0) < 9223372036854775808.000000000, toUInt32(0) <= 9223372036854775808.000000000, toUInt32(0) > 9223372036854775808.000000000, toUInt32(0) >= 9223372036854775808.000000000, 9223372036854775808.000000000 = toUInt32(0), 9223372036854775808.000000000 != toUInt32(0), 9223372036854775808.000000000 < toUInt32(0), 9223372036854775808.000000000 <= toUInt32(0), 9223372036854775808.000000000 > toUInt32(0), 9223372036854775808.000000000 >= toUInt32(0) , toInt32(0) = 9223372036854775808.000000000, toInt32(0) != 9223372036854775808.000000000, toInt32(0) < 9223372036854775808.000000000, toInt32(0) <= 9223372036854775808.000000000, toInt32(0) > 9223372036854775808.000000000, toInt32(0) >= 9223372036854775808.000000000, 9223372036854775808.000000000 = toInt32(0), 9223372036854775808.000000000 != toInt32(0), 9223372036854775808.000000000 < toInt32(0), 9223372036854775808.000000000 <= toInt32(0), 9223372036854775808.000000000 > toInt32(0), 9223372036854775808.000000000 >= toInt32(0) , toUInt64(0) = 9223372036854775808.000000000, toUInt64(0) != 9223372036854775808.000000000, toUInt64(0) < 9223372036854775808.000000000, toUInt64(0) <= 9223372036854775808.000000000, toUInt64(0) > 9223372036854775808.000000000, toUInt64(0) >= 9223372036854775808.000000000, 9223372036854775808.000000000 = toUInt64(0), 9223372036854775808.000000000 != toUInt64(0), 9223372036854775808.000000000 < toUInt64(0), 9223372036854775808.000000000 <= toUInt64(0), 9223372036854775808.000000000 > toUInt64(0), 9223372036854775808.000000000 >= toUInt64(0) , toInt64(0) = 9223372036854775808.000000000, toInt64(0) != 9223372036854775808.000000000, toInt64(0) < 9223372036854775808.000000000, toInt64(0) <= 9223372036854775808.000000000, toInt64(0) > 9223372036854775808.000000000, toInt64(0) >= 9223372036854775808.000000000, 9223372036854775808.000000000 = toInt64(0), 9223372036854775808.000000000 != toInt64(0), 9223372036854775808.000000000 < toInt64(0), 9223372036854775808.000000000 <= toInt64(0), 9223372036854775808.000000000 > toInt64(0), 9223372036854775808.000000000 >= toInt64(0) ; +SELECT '0', '-9223372036854775808.000000000', 0 = -9223372036854775808.000000000, 0 != -9223372036854775808.000000000, 0 < -9223372036854775808.000000000, 0 <= -9223372036854775808.000000000, 0 > -9223372036854775808.000000000, 0 >= -9223372036854775808.000000000, -9223372036854775808.000000000 = 0, -9223372036854775808.000000000 != 0, -9223372036854775808.000000000 < 0, -9223372036854775808.000000000 <= 0, -9223372036854775808.000000000 > 0, -9223372036854775808.000000000 >= 0 , toUInt8(0) = -9223372036854775808.000000000, toUInt8(0) != -9223372036854775808.000000000, toUInt8(0) < -9223372036854775808.000000000, toUInt8(0) <= -9223372036854775808.000000000, toUInt8(0) > -9223372036854775808.000000000, toUInt8(0) >= -9223372036854775808.000000000, -9223372036854775808.000000000 = toUInt8(0), -9223372036854775808.000000000 != toUInt8(0), -9223372036854775808.000000000 < toUInt8(0), -9223372036854775808.000000000 <= toUInt8(0), -9223372036854775808.000000000 > toUInt8(0), -9223372036854775808.000000000 >= toUInt8(0) , toInt8(0) = -9223372036854775808.000000000, toInt8(0) != -9223372036854775808.000000000, toInt8(0) < -9223372036854775808.000000000, toInt8(0) <= -9223372036854775808.000000000, toInt8(0) > -9223372036854775808.000000000, toInt8(0) >= -9223372036854775808.000000000, -9223372036854775808.000000000 = toInt8(0), -9223372036854775808.000000000 != toInt8(0), -9223372036854775808.000000000 < toInt8(0), -9223372036854775808.000000000 <= toInt8(0), -9223372036854775808.000000000 > toInt8(0), -9223372036854775808.000000000 >= toInt8(0) , toUInt16(0) = -9223372036854775808.000000000, toUInt16(0) != -9223372036854775808.000000000, toUInt16(0) < -9223372036854775808.000000000, toUInt16(0) <= -9223372036854775808.000000000, toUInt16(0) > -9223372036854775808.000000000, toUInt16(0) >= -9223372036854775808.000000000, -9223372036854775808.000000000 = toUInt16(0), -9223372036854775808.000000000 != toUInt16(0), -9223372036854775808.000000000 < toUInt16(0), -9223372036854775808.000000000 <= toUInt16(0), -9223372036854775808.000000000 > toUInt16(0), -9223372036854775808.000000000 >= toUInt16(0) , toInt16(0) = -9223372036854775808.000000000, toInt16(0) != -9223372036854775808.000000000, toInt16(0) < -9223372036854775808.000000000, toInt16(0) <= -9223372036854775808.000000000, toInt16(0) > -9223372036854775808.000000000, toInt16(0) >= -9223372036854775808.000000000, -9223372036854775808.000000000 = toInt16(0), -9223372036854775808.000000000 != toInt16(0), -9223372036854775808.000000000 < toInt16(0), -9223372036854775808.000000000 <= toInt16(0), -9223372036854775808.000000000 > toInt16(0), -9223372036854775808.000000000 >= toInt16(0) , toUInt32(0) = -9223372036854775808.000000000, toUInt32(0) != -9223372036854775808.000000000, toUInt32(0) < -9223372036854775808.000000000, toUInt32(0) <= -9223372036854775808.000000000, toUInt32(0) > -9223372036854775808.000000000, toUInt32(0) >= -9223372036854775808.000000000, -9223372036854775808.000000000 = toUInt32(0), -9223372036854775808.000000000 != toUInt32(0), -9223372036854775808.000000000 < toUInt32(0), -9223372036854775808.000000000 <= toUInt32(0), -9223372036854775808.000000000 > toUInt32(0), -9223372036854775808.000000000 >= toUInt32(0) , toInt32(0) = -9223372036854775808.000000000, toInt32(0) != -9223372036854775808.000000000, toInt32(0) < -9223372036854775808.000000000, toInt32(0) <= -9223372036854775808.000000000, toInt32(0) > -9223372036854775808.000000000, toInt32(0) >= -9223372036854775808.000000000, -9223372036854775808.000000000 = toInt32(0), -9223372036854775808.000000000 != toInt32(0), -9223372036854775808.000000000 < toInt32(0), -9223372036854775808.000000000 <= toInt32(0), -9223372036854775808.000000000 > toInt32(0), -9223372036854775808.000000000 >= toInt32(0) , toUInt64(0) = -9223372036854775808.000000000, toUInt64(0) != -9223372036854775808.000000000, toUInt64(0) < -9223372036854775808.000000000, toUInt64(0) <= -9223372036854775808.000000000, toUInt64(0) > -9223372036854775808.000000000, toUInt64(0) >= -9223372036854775808.000000000, -9223372036854775808.000000000 = toUInt64(0), -9223372036854775808.000000000 != toUInt64(0), -9223372036854775808.000000000 < toUInt64(0), -9223372036854775808.000000000 <= toUInt64(0), -9223372036854775808.000000000 > toUInt64(0), -9223372036854775808.000000000 >= toUInt64(0) , toInt64(0) = -9223372036854775808.000000000, toInt64(0) != -9223372036854775808.000000000, toInt64(0) < -9223372036854775808.000000000, toInt64(0) <= -9223372036854775808.000000000, toInt64(0) > -9223372036854775808.000000000, toInt64(0) >= -9223372036854775808.000000000, -9223372036854775808.000000000 = toInt64(0), -9223372036854775808.000000000 != toInt64(0), -9223372036854775808.000000000 < toInt64(0), -9223372036854775808.000000000 <= toInt64(0), -9223372036854775808.000000000 > toInt64(0), -9223372036854775808.000000000 >= toInt64(0) ; +SELECT '0', '9223372036854775808.000000000', 0 = 9223372036854775808.000000000, 0 != 9223372036854775808.000000000, 0 < 9223372036854775808.000000000, 0 <= 9223372036854775808.000000000, 0 > 9223372036854775808.000000000, 0 >= 9223372036854775808.000000000, 9223372036854775808.000000000 = 0, 9223372036854775808.000000000 != 0, 9223372036854775808.000000000 < 0, 9223372036854775808.000000000 <= 0, 9223372036854775808.000000000 > 0, 9223372036854775808.000000000 >= 0 , toUInt8(0) = 9223372036854775808.000000000, toUInt8(0) != 9223372036854775808.000000000, toUInt8(0) < 9223372036854775808.000000000, toUInt8(0) <= 9223372036854775808.000000000, toUInt8(0) > 9223372036854775808.000000000, toUInt8(0) >= 9223372036854775808.000000000, 9223372036854775808.000000000 = toUInt8(0), 9223372036854775808.000000000 != toUInt8(0), 9223372036854775808.000000000 < toUInt8(0), 9223372036854775808.000000000 <= toUInt8(0), 9223372036854775808.000000000 > toUInt8(0), 9223372036854775808.000000000 >= toUInt8(0) , toInt8(0) = 9223372036854775808.000000000, toInt8(0) != 9223372036854775808.000000000, toInt8(0) < 9223372036854775808.000000000, toInt8(0) <= 9223372036854775808.000000000, toInt8(0) > 9223372036854775808.000000000, toInt8(0) >= 9223372036854775808.000000000, 9223372036854775808.000000000 = toInt8(0), 9223372036854775808.000000000 != toInt8(0), 9223372036854775808.000000000 < toInt8(0), 9223372036854775808.000000000 <= toInt8(0), 9223372036854775808.000000000 > toInt8(0), 9223372036854775808.000000000 >= toInt8(0) , toUInt16(0) = 9223372036854775808.000000000, toUInt16(0) != 9223372036854775808.000000000, toUInt16(0) < 9223372036854775808.000000000, toUInt16(0) <= 9223372036854775808.000000000, toUInt16(0) > 9223372036854775808.000000000, toUInt16(0) >= 9223372036854775808.000000000, 9223372036854775808.000000000 = toUInt16(0), 9223372036854775808.000000000 != toUInt16(0), 9223372036854775808.000000000 < toUInt16(0), 9223372036854775808.000000000 <= toUInt16(0), 9223372036854775808.000000000 > toUInt16(0), 9223372036854775808.000000000 >= toUInt16(0) , toInt16(0) = 9223372036854775808.000000000, toInt16(0) != 9223372036854775808.000000000, toInt16(0) < 9223372036854775808.000000000, toInt16(0) <= 9223372036854775808.000000000, toInt16(0) > 9223372036854775808.000000000, toInt16(0) >= 9223372036854775808.000000000, 9223372036854775808.000000000 = toInt16(0), 9223372036854775808.000000000 != toInt16(0), 9223372036854775808.000000000 < toInt16(0), 9223372036854775808.000000000 <= toInt16(0), 9223372036854775808.000000000 > toInt16(0), 9223372036854775808.000000000 >= toInt16(0) , toUInt32(0) = 9223372036854775808.000000000, toUInt32(0) != 9223372036854775808.000000000, toUInt32(0) < 9223372036854775808.000000000, toUInt32(0) <= 9223372036854775808.000000000, toUInt32(0) > 9223372036854775808.000000000, toUInt32(0) >= 9223372036854775808.000000000, 9223372036854775808.000000000 = toUInt32(0), 9223372036854775808.000000000 != toUInt32(0), 9223372036854775808.000000000 < toUInt32(0), 9223372036854775808.000000000 <= toUInt32(0), 9223372036854775808.000000000 > toUInt32(0), 9223372036854775808.000000000 >= toUInt32(0) , toInt32(0) = 9223372036854775808.000000000, toInt32(0) != 9223372036854775808.000000000, toInt32(0) < 9223372036854775808.000000000, toInt32(0) <= 9223372036854775808.000000000, toInt32(0) > 9223372036854775808.000000000, toInt32(0) >= 9223372036854775808.000000000, 9223372036854775808.000000000 = toInt32(0), 9223372036854775808.000000000 != toInt32(0), 9223372036854775808.000000000 < toInt32(0), 9223372036854775808.000000000 <= toInt32(0), 9223372036854775808.000000000 > toInt32(0), 9223372036854775808.000000000 >= toInt32(0) , toUInt64(0) = 9223372036854775808.000000000, toUInt64(0) != 9223372036854775808.000000000, toUInt64(0) < 9223372036854775808.000000000, toUInt64(0) <= 9223372036854775808.000000000, toUInt64(0) > 9223372036854775808.000000000, toUInt64(0) >= 9223372036854775808.000000000, 9223372036854775808.000000000 = toUInt64(0), 9223372036854775808.000000000 != toUInt64(0), 9223372036854775808.000000000 < toUInt64(0), 9223372036854775808.000000000 <= toUInt64(0), 9223372036854775808.000000000 > toUInt64(0), 9223372036854775808.000000000 >= toUInt64(0) , toInt64(0) = 9223372036854775808.000000000, toInt64(0) != 9223372036854775808.000000000, toInt64(0) < 9223372036854775808.000000000, toInt64(0) <= 9223372036854775808.000000000, toInt64(0) > 9223372036854775808.000000000, toInt64(0) >= 9223372036854775808.000000000, 9223372036854775808.000000000 = toInt64(0), 9223372036854775808.000000000 != toInt64(0), 9223372036854775808.000000000 < toInt64(0), 9223372036854775808.000000000 <= toInt64(0), 9223372036854775808.000000000 > toInt64(0), 9223372036854775808.000000000 >= toInt64(0) ; +SELECT '0', '2251799813685248.000000000', 0 = 2251799813685248.000000000, 0 != 2251799813685248.000000000, 0 < 2251799813685248.000000000, 0 <= 2251799813685248.000000000, 0 > 2251799813685248.000000000, 0 >= 2251799813685248.000000000, 2251799813685248.000000000 = 0, 2251799813685248.000000000 != 0, 2251799813685248.000000000 < 0, 2251799813685248.000000000 <= 0, 2251799813685248.000000000 > 0, 2251799813685248.000000000 >= 0 , toUInt8(0) = 2251799813685248.000000000, toUInt8(0) != 2251799813685248.000000000, toUInt8(0) < 2251799813685248.000000000, toUInt8(0) <= 2251799813685248.000000000, toUInt8(0) > 2251799813685248.000000000, toUInt8(0) >= 2251799813685248.000000000, 2251799813685248.000000000 = toUInt8(0), 2251799813685248.000000000 != toUInt8(0), 2251799813685248.000000000 < toUInt8(0), 2251799813685248.000000000 <= toUInt8(0), 2251799813685248.000000000 > toUInt8(0), 2251799813685248.000000000 >= toUInt8(0) , toInt8(0) = 2251799813685248.000000000, toInt8(0) != 2251799813685248.000000000, toInt8(0) < 2251799813685248.000000000, toInt8(0) <= 2251799813685248.000000000, toInt8(0) > 2251799813685248.000000000, toInt8(0) >= 2251799813685248.000000000, 2251799813685248.000000000 = toInt8(0), 2251799813685248.000000000 != toInt8(0), 2251799813685248.000000000 < toInt8(0), 2251799813685248.000000000 <= toInt8(0), 2251799813685248.000000000 > toInt8(0), 2251799813685248.000000000 >= toInt8(0) , toUInt16(0) = 2251799813685248.000000000, toUInt16(0) != 2251799813685248.000000000, toUInt16(0) < 2251799813685248.000000000, toUInt16(0) <= 2251799813685248.000000000, toUInt16(0) > 2251799813685248.000000000, toUInt16(0) >= 2251799813685248.000000000, 2251799813685248.000000000 = toUInt16(0), 2251799813685248.000000000 != toUInt16(0), 2251799813685248.000000000 < toUInt16(0), 2251799813685248.000000000 <= toUInt16(0), 2251799813685248.000000000 > toUInt16(0), 2251799813685248.000000000 >= toUInt16(0) , toInt16(0) = 2251799813685248.000000000, toInt16(0) != 2251799813685248.000000000, toInt16(0) < 2251799813685248.000000000, toInt16(0) <= 2251799813685248.000000000, toInt16(0) > 2251799813685248.000000000, toInt16(0) >= 2251799813685248.000000000, 2251799813685248.000000000 = toInt16(0), 2251799813685248.000000000 != toInt16(0), 2251799813685248.000000000 < toInt16(0), 2251799813685248.000000000 <= toInt16(0), 2251799813685248.000000000 > toInt16(0), 2251799813685248.000000000 >= toInt16(0) , toUInt32(0) = 2251799813685248.000000000, toUInt32(0) != 2251799813685248.000000000, toUInt32(0) < 2251799813685248.000000000, toUInt32(0) <= 2251799813685248.000000000, toUInt32(0) > 2251799813685248.000000000, toUInt32(0) >= 2251799813685248.000000000, 2251799813685248.000000000 = toUInt32(0), 2251799813685248.000000000 != toUInt32(0), 2251799813685248.000000000 < toUInt32(0), 2251799813685248.000000000 <= toUInt32(0), 2251799813685248.000000000 > toUInt32(0), 2251799813685248.000000000 >= toUInt32(0) , toInt32(0) = 2251799813685248.000000000, toInt32(0) != 2251799813685248.000000000, toInt32(0) < 2251799813685248.000000000, toInt32(0) <= 2251799813685248.000000000, toInt32(0) > 2251799813685248.000000000, toInt32(0) >= 2251799813685248.000000000, 2251799813685248.000000000 = toInt32(0), 2251799813685248.000000000 != toInt32(0), 2251799813685248.000000000 < toInt32(0), 2251799813685248.000000000 <= toInt32(0), 2251799813685248.000000000 > toInt32(0), 2251799813685248.000000000 >= toInt32(0) , toUInt64(0) = 2251799813685248.000000000, toUInt64(0) != 2251799813685248.000000000, toUInt64(0) < 2251799813685248.000000000, toUInt64(0) <= 2251799813685248.000000000, toUInt64(0) > 2251799813685248.000000000, toUInt64(0) >= 2251799813685248.000000000, 2251799813685248.000000000 = toUInt64(0), 2251799813685248.000000000 != toUInt64(0), 2251799813685248.000000000 < toUInt64(0), 2251799813685248.000000000 <= toUInt64(0), 2251799813685248.000000000 > toUInt64(0), 2251799813685248.000000000 >= toUInt64(0) , toInt64(0) = 2251799813685248.000000000, toInt64(0) != 2251799813685248.000000000, toInt64(0) < 2251799813685248.000000000, toInt64(0) <= 2251799813685248.000000000, toInt64(0) > 2251799813685248.000000000, toInt64(0) >= 2251799813685248.000000000, 2251799813685248.000000000 = toInt64(0), 2251799813685248.000000000 != toInt64(0), 2251799813685248.000000000 < toInt64(0), 2251799813685248.000000000 <= toInt64(0), 2251799813685248.000000000 > toInt64(0), 2251799813685248.000000000 >= toInt64(0) ; +SELECT '0', '4503599627370496.000000000', 0 = 4503599627370496.000000000, 0 != 4503599627370496.000000000, 0 < 4503599627370496.000000000, 0 <= 4503599627370496.000000000, 0 > 4503599627370496.000000000, 0 >= 4503599627370496.000000000, 4503599627370496.000000000 = 0, 4503599627370496.000000000 != 0, 4503599627370496.000000000 < 0, 4503599627370496.000000000 <= 0, 4503599627370496.000000000 > 0, 4503599627370496.000000000 >= 0 , toUInt8(0) = 4503599627370496.000000000, toUInt8(0) != 4503599627370496.000000000, toUInt8(0) < 4503599627370496.000000000, toUInt8(0) <= 4503599627370496.000000000, toUInt8(0) > 4503599627370496.000000000, toUInt8(0) >= 4503599627370496.000000000, 4503599627370496.000000000 = toUInt8(0), 4503599627370496.000000000 != toUInt8(0), 4503599627370496.000000000 < toUInt8(0), 4503599627370496.000000000 <= toUInt8(0), 4503599627370496.000000000 > toUInt8(0), 4503599627370496.000000000 >= toUInt8(0) , toInt8(0) = 4503599627370496.000000000, toInt8(0) != 4503599627370496.000000000, toInt8(0) < 4503599627370496.000000000, toInt8(0) <= 4503599627370496.000000000, toInt8(0) > 4503599627370496.000000000, toInt8(0) >= 4503599627370496.000000000, 4503599627370496.000000000 = toInt8(0), 4503599627370496.000000000 != toInt8(0), 4503599627370496.000000000 < toInt8(0), 4503599627370496.000000000 <= toInt8(0), 4503599627370496.000000000 > toInt8(0), 4503599627370496.000000000 >= toInt8(0) , toUInt16(0) = 4503599627370496.000000000, toUInt16(0) != 4503599627370496.000000000, toUInt16(0) < 4503599627370496.000000000, toUInt16(0) <= 4503599627370496.000000000, toUInt16(0) > 4503599627370496.000000000, toUInt16(0) >= 4503599627370496.000000000, 4503599627370496.000000000 = toUInt16(0), 4503599627370496.000000000 != toUInt16(0), 4503599627370496.000000000 < toUInt16(0), 4503599627370496.000000000 <= toUInt16(0), 4503599627370496.000000000 > toUInt16(0), 4503599627370496.000000000 >= toUInt16(0) , toInt16(0) = 4503599627370496.000000000, toInt16(0) != 4503599627370496.000000000, toInt16(0) < 4503599627370496.000000000, toInt16(0) <= 4503599627370496.000000000, toInt16(0) > 4503599627370496.000000000, toInt16(0) >= 4503599627370496.000000000, 4503599627370496.000000000 = toInt16(0), 4503599627370496.000000000 != toInt16(0), 4503599627370496.000000000 < toInt16(0), 4503599627370496.000000000 <= toInt16(0), 4503599627370496.000000000 > toInt16(0), 4503599627370496.000000000 >= toInt16(0) , toUInt32(0) = 4503599627370496.000000000, toUInt32(0) != 4503599627370496.000000000, toUInt32(0) < 4503599627370496.000000000, toUInt32(0) <= 4503599627370496.000000000, toUInt32(0) > 4503599627370496.000000000, toUInt32(0) >= 4503599627370496.000000000, 4503599627370496.000000000 = toUInt32(0), 4503599627370496.000000000 != toUInt32(0), 4503599627370496.000000000 < toUInt32(0), 4503599627370496.000000000 <= toUInt32(0), 4503599627370496.000000000 > toUInt32(0), 4503599627370496.000000000 >= toUInt32(0) , toInt32(0) = 4503599627370496.000000000, toInt32(0) != 4503599627370496.000000000, toInt32(0) < 4503599627370496.000000000, toInt32(0) <= 4503599627370496.000000000, toInt32(0) > 4503599627370496.000000000, toInt32(0) >= 4503599627370496.000000000, 4503599627370496.000000000 = toInt32(0), 4503599627370496.000000000 != toInt32(0), 4503599627370496.000000000 < toInt32(0), 4503599627370496.000000000 <= toInt32(0), 4503599627370496.000000000 > toInt32(0), 4503599627370496.000000000 >= toInt32(0) , toUInt64(0) = 4503599627370496.000000000, toUInt64(0) != 4503599627370496.000000000, toUInt64(0) < 4503599627370496.000000000, toUInt64(0) <= 4503599627370496.000000000, toUInt64(0) > 4503599627370496.000000000, toUInt64(0) >= 4503599627370496.000000000, 4503599627370496.000000000 = toUInt64(0), 4503599627370496.000000000 != toUInt64(0), 4503599627370496.000000000 < toUInt64(0), 4503599627370496.000000000 <= toUInt64(0), 4503599627370496.000000000 > toUInt64(0), 4503599627370496.000000000 >= toUInt64(0) , toInt64(0) = 4503599627370496.000000000, toInt64(0) != 4503599627370496.000000000, toInt64(0) < 4503599627370496.000000000, toInt64(0) <= 4503599627370496.000000000, toInt64(0) > 4503599627370496.000000000, toInt64(0) >= 4503599627370496.000000000, 4503599627370496.000000000 = toInt64(0), 4503599627370496.000000000 != toInt64(0), 4503599627370496.000000000 < toInt64(0), 4503599627370496.000000000 <= toInt64(0), 4503599627370496.000000000 > toInt64(0), 4503599627370496.000000000 >= toInt64(0) ; +SELECT '0', '9007199254740991.000000000', 0 = 9007199254740991.000000000, 0 != 9007199254740991.000000000, 0 < 9007199254740991.000000000, 0 <= 9007199254740991.000000000, 0 > 9007199254740991.000000000, 0 >= 9007199254740991.000000000, 9007199254740991.000000000 = 0, 9007199254740991.000000000 != 0, 9007199254740991.000000000 < 0, 9007199254740991.000000000 <= 0, 9007199254740991.000000000 > 0, 9007199254740991.000000000 >= 0 , toUInt8(0) = 9007199254740991.000000000, toUInt8(0) != 9007199254740991.000000000, toUInt8(0) < 9007199254740991.000000000, toUInt8(0) <= 9007199254740991.000000000, toUInt8(0) > 9007199254740991.000000000, toUInt8(0) >= 9007199254740991.000000000, 9007199254740991.000000000 = toUInt8(0), 9007199254740991.000000000 != toUInt8(0), 9007199254740991.000000000 < toUInt8(0), 9007199254740991.000000000 <= toUInt8(0), 9007199254740991.000000000 > toUInt8(0), 9007199254740991.000000000 >= toUInt8(0) , toInt8(0) = 9007199254740991.000000000, toInt8(0) != 9007199254740991.000000000, toInt8(0) < 9007199254740991.000000000, toInt8(0) <= 9007199254740991.000000000, toInt8(0) > 9007199254740991.000000000, toInt8(0) >= 9007199254740991.000000000, 9007199254740991.000000000 = toInt8(0), 9007199254740991.000000000 != toInt8(0), 9007199254740991.000000000 < toInt8(0), 9007199254740991.000000000 <= toInt8(0), 9007199254740991.000000000 > toInt8(0), 9007199254740991.000000000 >= toInt8(0) , toUInt16(0) = 9007199254740991.000000000, toUInt16(0) != 9007199254740991.000000000, toUInt16(0) < 9007199254740991.000000000, toUInt16(0) <= 9007199254740991.000000000, toUInt16(0) > 9007199254740991.000000000, toUInt16(0) >= 9007199254740991.000000000, 9007199254740991.000000000 = toUInt16(0), 9007199254740991.000000000 != toUInt16(0), 9007199254740991.000000000 < toUInt16(0), 9007199254740991.000000000 <= toUInt16(0), 9007199254740991.000000000 > toUInt16(0), 9007199254740991.000000000 >= toUInt16(0) , toInt16(0) = 9007199254740991.000000000, toInt16(0) != 9007199254740991.000000000, toInt16(0) < 9007199254740991.000000000, toInt16(0) <= 9007199254740991.000000000, toInt16(0) > 9007199254740991.000000000, toInt16(0) >= 9007199254740991.000000000, 9007199254740991.000000000 = toInt16(0), 9007199254740991.000000000 != toInt16(0), 9007199254740991.000000000 < toInt16(0), 9007199254740991.000000000 <= toInt16(0), 9007199254740991.000000000 > toInt16(0), 9007199254740991.000000000 >= toInt16(0) , toUInt32(0) = 9007199254740991.000000000, toUInt32(0) != 9007199254740991.000000000, toUInt32(0) < 9007199254740991.000000000, toUInt32(0) <= 9007199254740991.000000000, toUInt32(0) > 9007199254740991.000000000, toUInt32(0) >= 9007199254740991.000000000, 9007199254740991.000000000 = toUInt32(0), 9007199254740991.000000000 != toUInt32(0), 9007199254740991.000000000 < toUInt32(0), 9007199254740991.000000000 <= toUInt32(0), 9007199254740991.000000000 > toUInt32(0), 9007199254740991.000000000 >= toUInt32(0) , toInt32(0) = 9007199254740991.000000000, toInt32(0) != 9007199254740991.000000000, toInt32(0) < 9007199254740991.000000000, toInt32(0) <= 9007199254740991.000000000, toInt32(0) > 9007199254740991.000000000, toInt32(0) >= 9007199254740991.000000000, 9007199254740991.000000000 = toInt32(0), 9007199254740991.000000000 != toInt32(0), 9007199254740991.000000000 < toInt32(0), 9007199254740991.000000000 <= toInt32(0), 9007199254740991.000000000 > toInt32(0), 9007199254740991.000000000 >= toInt32(0) , toUInt64(0) = 9007199254740991.000000000, toUInt64(0) != 9007199254740991.000000000, toUInt64(0) < 9007199254740991.000000000, toUInt64(0) <= 9007199254740991.000000000, toUInt64(0) > 9007199254740991.000000000, toUInt64(0) >= 9007199254740991.000000000, 9007199254740991.000000000 = toUInt64(0), 9007199254740991.000000000 != toUInt64(0), 9007199254740991.000000000 < toUInt64(0), 9007199254740991.000000000 <= toUInt64(0), 9007199254740991.000000000 > toUInt64(0), 9007199254740991.000000000 >= toUInt64(0) , toInt64(0) = 9007199254740991.000000000, toInt64(0) != 9007199254740991.000000000, toInt64(0) < 9007199254740991.000000000, toInt64(0) <= 9007199254740991.000000000, toInt64(0) > 9007199254740991.000000000, toInt64(0) >= 9007199254740991.000000000, 9007199254740991.000000000 = toInt64(0), 9007199254740991.000000000 != toInt64(0), 9007199254740991.000000000 < toInt64(0), 9007199254740991.000000000 <= toInt64(0), 9007199254740991.000000000 > toInt64(0), 9007199254740991.000000000 >= toInt64(0) ; +SELECT '0', '9007199254740992.000000000', 0 = 9007199254740992.000000000, 0 != 9007199254740992.000000000, 0 < 9007199254740992.000000000, 0 <= 9007199254740992.000000000, 0 > 9007199254740992.000000000, 0 >= 9007199254740992.000000000, 9007199254740992.000000000 = 0, 9007199254740992.000000000 != 0, 9007199254740992.000000000 < 0, 9007199254740992.000000000 <= 0, 9007199254740992.000000000 > 0, 9007199254740992.000000000 >= 0 , toUInt8(0) = 9007199254740992.000000000, toUInt8(0) != 9007199254740992.000000000, toUInt8(0) < 9007199254740992.000000000, toUInt8(0) <= 9007199254740992.000000000, toUInt8(0) > 9007199254740992.000000000, toUInt8(0) >= 9007199254740992.000000000, 9007199254740992.000000000 = toUInt8(0), 9007199254740992.000000000 != toUInt8(0), 9007199254740992.000000000 < toUInt8(0), 9007199254740992.000000000 <= toUInt8(0), 9007199254740992.000000000 > toUInt8(0), 9007199254740992.000000000 >= toUInt8(0) , toInt8(0) = 9007199254740992.000000000, toInt8(0) != 9007199254740992.000000000, toInt8(0) < 9007199254740992.000000000, toInt8(0) <= 9007199254740992.000000000, toInt8(0) > 9007199254740992.000000000, toInt8(0) >= 9007199254740992.000000000, 9007199254740992.000000000 = toInt8(0), 9007199254740992.000000000 != toInt8(0), 9007199254740992.000000000 < toInt8(0), 9007199254740992.000000000 <= toInt8(0), 9007199254740992.000000000 > toInt8(0), 9007199254740992.000000000 >= toInt8(0) , toUInt16(0) = 9007199254740992.000000000, toUInt16(0) != 9007199254740992.000000000, toUInt16(0) < 9007199254740992.000000000, toUInt16(0) <= 9007199254740992.000000000, toUInt16(0) > 9007199254740992.000000000, toUInt16(0) >= 9007199254740992.000000000, 9007199254740992.000000000 = toUInt16(0), 9007199254740992.000000000 != toUInt16(0), 9007199254740992.000000000 < toUInt16(0), 9007199254740992.000000000 <= toUInt16(0), 9007199254740992.000000000 > toUInt16(0), 9007199254740992.000000000 >= toUInt16(0) , toInt16(0) = 9007199254740992.000000000, toInt16(0) != 9007199254740992.000000000, toInt16(0) < 9007199254740992.000000000, toInt16(0) <= 9007199254740992.000000000, toInt16(0) > 9007199254740992.000000000, toInt16(0) >= 9007199254740992.000000000, 9007199254740992.000000000 = toInt16(0), 9007199254740992.000000000 != toInt16(0), 9007199254740992.000000000 < toInt16(0), 9007199254740992.000000000 <= toInt16(0), 9007199254740992.000000000 > toInt16(0), 9007199254740992.000000000 >= toInt16(0) , toUInt32(0) = 9007199254740992.000000000, toUInt32(0) != 9007199254740992.000000000, toUInt32(0) < 9007199254740992.000000000, toUInt32(0) <= 9007199254740992.000000000, toUInt32(0) > 9007199254740992.000000000, toUInt32(0) >= 9007199254740992.000000000, 9007199254740992.000000000 = toUInt32(0), 9007199254740992.000000000 != toUInt32(0), 9007199254740992.000000000 < toUInt32(0), 9007199254740992.000000000 <= toUInt32(0), 9007199254740992.000000000 > toUInt32(0), 9007199254740992.000000000 >= toUInt32(0) , toInt32(0) = 9007199254740992.000000000, toInt32(0) != 9007199254740992.000000000, toInt32(0) < 9007199254740992.000000000, toInt32(0) <= 9007199254740992.000000000, toInt32(0) > 9007199254740992.000000000, toInt32(0) >= 9007199254740992.000000000, 9007199254740992.000000000 = toInt32(0), 9007199254740992.000000000 != toInt32(0), 9007199254740992.000000000 < toInt32(0), 9007199254740992.000000000 <= toInt32(0), 9007199254740992.000000000 > toInt32(0), 9007199254740992.000000000 >= toInt32(0) , toUInt64(0) = 9007199254740992.000000000, toUInt64(0) != 9007199254740992.000000000, toUInt64(0) < 9007199254740992.000000000, toUInt64(0) <= 9007199254740992.000000000, toUInt64(0) > 9007199254740992.000000000, toUInt64(0) >= 9007199254740992.000000000, 9007199254740992.000000000 = toUInt64(0), 9007199254740992.000000000 != toUInt64(0), 9007199254740992.000000000 < toUInt64(0), 9007199254740992.000000000 <= toUInt64(0), 9007199254740992.000000000 > toUInt64(0), 9007199254740992.000000000 >= toUInt64(0) , toInt64(0) = 9007199254740992.000000000, toInt64(0) != 9007199254740992.000000000, toInt64(0) < 9007199254740992.000000000, toInt64(0) <= 9007199254740992.000000000, toInt64(0) > 9007199254740992.000000000, toInt64(0) >= 9007199254740992.000000000, 9007199254740992.000000000 = toInt64(0), 9007199254740992.000000000 != toInt64(0), 9007199254740992.000000000 < toInt64(0), 9007199254740992.000000000 <= toInt64(0), 9007199254740992.000000000 > toInt64(0), 9007199254740992.000000000 >= toInt64(0) ; +SELECT '0', '9007199254740992.000000000', 0 = 9007199254740992.000000000, 0 != 9007199254740992.000000000, 0 < 9007199254740992.000000000, 0 <= 9007199254740992.000000000, 0 > 9007199254740992.000000000, 0 >= 9007199254740992.000000000, 9007199254740992.000000000 = 0, 9007199254740992.000000000 != 0, 9007199254740992.000000000 < 0, 9007199254740992.000000000 <= 0, 9007199254740992.000000000 > 0, 9007199254740992.000000000 >= 0 , toUInt8(0) = 9007199254740992.000000000, toUInt8(0) != 9007199254740992.000000000, toUInt8(0) < 9007199254740992.000000000, toUInt8(0) <= 9007199254740992.000000000, toUInt8(0) > 9007199254740992.000000000, toUInt8(0) >= 9007199254740992.000000000, 9007199254740992.000000000 = toUInt8(0), 9007199254740992.000000000 != toUInt8(0), 9007199254740992.000000000 < toUInt8(0), 9007199254740992.000000000 <= toUInt8(0), 9007199254740992.000000000 > toUInt8(0), 9007199254740992.000000000 >= toUInt8(0) , toInt8(0) = 9007199254740992.000000000, toInt8(0) != 9007199254740992.000000000, toInt8(0) < 9007199254740992.000000000, toInt8(0) <= 9007199254740992.000000000, toInt8(0) > 9007199254740992.000000000, toInt8(0) >= 9007199254740992.000000000, 9007199254740992.000000000 = toInt8(0), 9007199254740992.000000000 != toInt8(0), 9007199254740992.000000000 < toInt8(0), 9007199254740992.000000000 <= toInt8(0), 9007199254740992.000000000 > toInt8(0), 9007199254740992.000000000 >= toInt8(0) , toUInt16(0) = 9007199254740992.000000000, toUInt16(0) != 9007199254740992.000000000, toUInt16(0) < 9007199254740992.000000000, toUInt16(0) <= 9007199254740992.000000000, toUInt16(0) > 9007199254740992.000000000, toUInt16(0) >= 9007199254740992.000000000, 9007199254740992.000000000 = toUInt16(0), 9007199254740992.000000000 != toUInt16(0), 9007199254740992.000000000 < toUInt16(0), 9007199254740992.000000000 <= toUInt16(0), 9007199254740992.000000000 > toUInt16(0), 9007199254740992.000000000 >= toUInt16(0) , toInt16(0) = 9007199254740992.000000000, toInt16(0) != 9007199254740992.000000000, toInt16(0) < 9007199254740992.000000000, toInt16(0) <= 9007199254740992.000000000, toInt16(0) > 9007199254740992.000000000, toInt16(0) >= 9007199254740992.000000000, 9007199254740992.000000000 = toInt16(0), 9007199254740992.000000000 != toInt16(0), 9007199254740992.000000000 < toInt16(0), 9007199254740992.000000000 <= toInt16(0), 9007199254740992.000000000 > toInt16(0), 9007199254740992.000000000 >= toInt16(0) , toUInt32(0) = 9007199254740992.000000000, toUInt32(0) != 9007199254740992.000000000, toUInt32(0) < 9007199254740992.000000000, toUInt32(0) <= 9007199254740992.000000000, toUInt32(0) > 9007199254740992.000000000, toUInt32(0) >= 9007199254740992.000000000, 9007199254740992.000000000 = toUInt32(0), 9007199254740992.000000000 != toUInt32(0), 9007199254740992.000000000 < toUInt32(0), 9007199254740992.000000000 <= toUInt32(0), 9007199254740992.000000000 > toUInt32(0), 9007199254740992.000000000 >= toUInt32(0) , toInt32(0) = 9007199254740992.000000000, toInt32(0) != 9007199254740992.000000000, toInt32(0) < 9007199254740992.000000000, toInt32(0) <= 9007199254740992.000000000, toInt32(0) > 9007199254740992.000000000, toInt32(0) >= 9007199254740992.000000000, 9007199254740992.000000000 = toInt32(0), 9007199254740992.000000000 != toInt32(0), 9007199254740992.000000000 < toInt32(0), 9007199254740992.000000000 <= toInt32(0), 9007199254740992.000000000 > toInt32(0), 9007199254740992.000000000 >= toInt32(0) , toUInt64(0) = 9007199254740992.000000000, toUInt64(0) != 9007199254740992.000000000, toUInt64(0) < 9007199254740992.000000000, toUInt64(0) <= 9007199254740992.000000000, toUInt64(0) > 9007199254740992.000000000, toUInt64(0) >= 9007199254740992.000000000, 9007199254740992.000000000 = toUInt64(0), 9007199254740992.000000000 != toUInt64(0), 9007199254740992.000000000 < toUInt64(0), 9007199254740992.000000000 <= toUInt64(0), 9007199254740992.000000000 > toUInt64(0), 9007199254740992.000000000 >= toUInt64(0) , toInt64(0) = 9007199254740992.000000000, toInt64(0) != 9007199254740992.000000000, toInt64(0) < 9007199254740992.000000000, toInt64(0) <= 9007199254740992.000000000, toInt64(0) > 9007199254740992.000000000, toInt64(0) >= 9007199254740992.000000000, 9007199254740992.000000000 = toInt64(0), 9007199254740992.000000000 != toInt64(0), 9007199254740992.000000000 < toInt64(0), 9007199254740992.000000000 <= toInt64(0), 9007199254740992.000000000 > toInt64(0), 9007199254740992.000000000 >= toInt64(0) ; +SELECT '0', '9007199254740994.000000000', 0 = 9007199254740994.000000000, 0 != 9007199254740994.000000000, 0 < 9007199254740994.000000000, 0 <= 9007199254740994.000000000, 0 > 9007199254740994.000000000, 0 >= 9007199254740994.000000000, 9007199254740994.000000000 = 0, 9007199254740994.000000000 != 0, 9007199254740994.000000000 < 0, 9007199254740994.000000000 <= 0, 9007199254740994.000000000 > 0, 9007199254740994.000000000 >= 0 , toUInt8(0) = 9007199254740994.000000000, toUInt8(0) != 9007199254740994.000000000, toUInt8(0) < 9007199254740994.000000000, toUInt8(0) <= 9007199254740994.000000000, toUInt8(0) > 9007199254740994.000000000, toUInt8(0) >= 9007199254740994.000000000, 9007199254740994.000000000 = toUInt8(0), 9007199254740994.000000000 != toUInt8(0), 9007199254740994.000000000 < toUInt8(0), 9007199254740994.000000000 <= toUInt8(0), 9007199254740994.000000000 > toUInt8(0), 9007199254740994.000000000 >= toUInt8(0) , toInt8(0) = 9007199254740994.000000000, toInt8(0) != 9007199254740994.000000000, toInt8(0) < 9007199254740994.000000000, toInt8(0) <= 9007199254740994.000000000, toInt8(0) > 9007199254740994.000000000, toInt8(0) >= 9007199254740994.000000000, 9007199254740994.000000000 = toInt8(0), 9007199254740994.000000000 != toInt8(0), 9007199254740994.000000000 < toInt8(0), 9007199254740994.000000000 <= toInt8(0), 9007199254740994.000000000 > toInt8(0), 9007199254740994.000000000 >= toInt8(0) , toUInt16(0) = 9007199254740994.000000000, toUInt16(0) != 9007199254740994.000000000, toUInt16(0) < 9007199254740994.000000000, toUInt16(0) <= 9007199254740994.000000000, toUInt16(0) > 9007199254740994.000000000, toUInt16(0) >= 9007199254740994.000000000, 9007199254740994.000000000 = toUInt16(0), 9007199254740994.000000000 != toUInt16(0), 9007199254740994.000000000 < toUInt16(0), 9007199254740994.000000000 <= toUInt16(0), 9007199254740994.000000000 > toUInt16(0), 9007199254740994.000000000 >= toUInt16(0) , toInt16(0) = 9007199254740994.000000000, toInt16(0) != 9007199254740994.000000000, toInt16(0) < 9007199254740994.000000000, toInt16(0) <= 9007199254740994.000000000, toInt16(0) > 9007199254740994.000000000, toInt16(0) >= 9007199254740994.000000000, 9007199254740994.000000000 = toInt16(0), 9007199254740994.000000000 != toInt16(0), 9007199254740994.000000000 < toInt16(0), 9007199254740994.000000000 <= toInt16(0), 9007199254740994.000000000 > toInt16(0), 9007199254740994.000000000 >= toInt16(0) , toUInt32(0) = 9007199254740994.000000000, toUInt32(0) != 9007199254740994.000000000, toUInt32(0) < 9007199254740994.000000000, toUInt32(0) <= 9007199254740994.000000000, toUInt32(0) > 9007199254740994.000000000, toUInt32(0) >= 9007199254740994.000000000, 9007199254740994.000000000 = toUInt32(0), 9007199254740994.000000000 != toUInt32(0), 9007199254740994.000000000 < toUInt32(0), 9007199254740994.000000000 <= toUInt32(0), 9007199254740994.000000000 > toUInt32(0), 9007199254740994.000000000 >= toUInt32(0) , toInt32(0) = 9007199254740994.000000000, toInt32(0) != 9007199254740994.000000000, toInt32(0) < 9007199254740994.000000000, toInt32(0) <= 9007199254740994.000000000, toInt32(0) > 9007199254740994.000000000, toInt32(0) >= 9007199254740994.000000000, 9007199254740994.000000000 = toInt32(0), 9007199254740994.000000000 != toInt32(0), 9007199254740994.000000000 < toInt32(0), 9007199254740994.000000000 <= toInt32(0), 9007199254740994.000000000 > toInt32(0), 9007199254740994.000000000 >= toInt32(0) , toUInt64(0) = 9007199254740994.000000000, toUInt64(0) != 9007199254740994.000000000, toUInt64(0) < 9007199254740994.000000000, toUInt64(0) <= 9007199254740994.000000000, toUInt64(0) > 9007199254740994.000000000, toUInt64(0) >= 9007199254740994.000000000, 9007199254740994.000000000 = toUInt64(0), 9007199254740994.000000000 != toUInt64(0), 9007199254740994.000000000 < toUInt64(0), 9007199254740994.000000000 <= toUInt64(0), 9007199254740994.000000000 > toUInt64(0), 9007199254740994.000000000 >= toUInt64(0) , toInt64(0) = 9007199254740994.000000000, toInt64(0) != 9007199254740994.000000000, toInt64(0) < 9007199254740994.000000000, toInt64(0) <= 9007199254740994.000000000, toInt64(0) > 9007199254740994.000000000, toInt64(0) >= 9007199254740994.000000000, 9007199254740994.000000000 = toInt64(0), 9007199254740994.000000000 != toInt64(0), 9007199254740994.000000000 < toInt64(0), 9007199254740994.000000000 <= toInt64(0), 9007199254740994.000000000 > toInt64(0), 9007199254740994.000000000 >= toInt64(0) ; +SELECT '0', '-9007199254740991.000000000', 0 = -9007199254740991.000000000, 0 != -9007199254740991.000000000, 0 < -9007199254740991.000000000, 0 <= -9007199254740991.000000000, 0 > -9007199254740991.000000000, 0 >= -9007199254740991.000000000, -9007199254740991.000000000 = 0, -9007199254740991.000000000 != 0, -9007199254740991.000000000 < 0, -9007199254740991.000000000 <= 0, -9007199254740991.000000000 > 0, -9007199254740991.000000000 >= 0 , toUInt8(0) = -9007199254740991.000000000, toUInt8(0) != -9007199254740991.000000000, toUInt8(0) < -9007199254740991.000000000, toUInt8(0) <= -9007199254740991.000000000, toUInt8(0) > -9007199254740991.000000000, toUInt8(0) >= -9007199254740991.000000000, -9007199254740991.000000000 = toUInt8(0), -9007199254740991.000000000 != toUInt8(0), -9007199254740991.000000000 < toUInt8(0), -9007199254740991.000000000 <= toUInt8(0), -9007199254740991.000000000 > toUInt8(0), -9007199254740991.000000000 >= toUInt8(0) , toInt8(0) = -9007199254740991.000000000, toInt8(0) != -9007199254740991.000000000, toInt8(0) < -9007199254740991.000000000, toInt8(0) <= -9007199254740991.000000000, toInt8(0) > -9007199254740991.000000000, toInt8(0) >= -9007199254740991.000000000, -9007199254740991.000000000 = toInt8(0), -9007199254740991.000000000 != toInt8(0), -9007199254740991.000000000 < toInt8(0), -9007199254740991.000000000 <= toInt8(0), -9007199254740991.000000000 > toInt8(0), -9007199254740991.000000000 >= toInt8(0) , toUInt16(0) = -9007199254740991.000000000, toUInt16(0) != -9007199254740991.000000000, toUInt16(0) < -9007199254740991.000000000, toUInt16(0) <= -9007199254740991.000000000, toUInt16(0) > -9007199254740991.000000000, toUInt16(0) >= -9007199254740991.000000000, -9007199254740991.000000000 = toUInt16(0), -9007199254740991.000000000 != toUInt16(0), -9007199254740991.000000000 < toUInt16(0), -9007199254740991.000000000 <= toUInt16(0), -9007199254740991.000000000 > toUInt16(0), -9007199254740991.000000000 >= toUInt16(0) , toInt16(0) = -9007199254740991.000000000, toInt16(0) != -9007199254740991.000000000, toInt16(0) < -9007199254740991.000000000, toInt16(0) <= -9007199254740991.000000000, toInt16(0) > -9007199254740991.000000000, toInt16(0) >= -9007199254740991.000000000, -9007199254740991.000000000 = toInt16(0), -9007199254740991.000000000 != toInt16(0), -9007199254740991.000000000 < toInt16(0), -9007199254740991.000000000 <= toInt16(0), -9007199254740991.000000000 > toInt16(0), -9007199254740991.000000000 >= toInt16(0) , toUInt32(0) = -9007199254740991.000000000, toUInt32(0) != -9007199254740991.000000000, toUInt32(0) < -9007199254740991.000000000, toUInt32(0) <= -9007199254740991.000000000, toUInt32(0) > -9007199254740991.000000000, toUInt32(0) >= -9007199254740991.000000000, -9007199254740991.000000000 = toUInt32(0), -9007199254740991.000000000 != toUInt32(0), -9007199254740991.000000000 < toUInt32(0), -9007199254740991.000000000 <= toUInt32(0), -9007199254740991.000000000 > toUInt32(0), -9007199254740991.000000000 >= toUInt32(0) , toInt32(0) = -9007199254740991.000000000, toInt32(0) != -9007199254740991.000000000, toInt32(0) < -9007199254740991.000000000, toInt32(0) <= -9007199254740991.000000000, toInt32(0) > -9007199254740991.000000000, toInt32(0) >= -9007199254740991.000000000, -9007199254740991.000000000 = toInt32(0), -9007199254740991.000000000 != toInt32(0), -9007199254740991.000000000 < toInt32(0), -9007199254740991.000000000 <= toInt32(0), -9007199254740991.000000000 > toInt32(0), -9007199254740991.000000000 >= toInt32(0) , toUInt64(0) = -9007199254740991.000000000, toUInt64(0) != -9007199254740991.000000000, toUInt64(0) < -9007199254740991.000000000, toUInt64(0) <= -9007199254740991.000000000, toUInt64(0) > -9007199254740991.000000000, toUInt64(0) >= -9007199254740991.000000000, -9007199254740991.000000000 = toUInt64(0), -9007199254740991.000000000 != toUInt64(0), -9007199254740991.000000000 < toUInt64(0), -9007199254740991.000000000 <= toUInt64(0), -9007199254740991.000000000 > toUInt64(0), -9007199254740991.000000000 >= toUInt64(0) , toInt64(0) = -9007199254740991.000000000, toInt64(0) != -9007199254740991.000000000, toInt64(0) < -9007199254740991.000000000, toInt64(0) <= -9007199254740991.000000000, toInt64(0) > -9007199254740991.000000000, toInt64(0) >= -9007199254740991.000000000, -9007199254740991.000000000 = toInt64(0), -9007199254740991.000000000 != toInt64(0), -9007199254740991.000000000 < toInt64(0), -9007199254740991.000000000 <= toInt64(0), -9007199254740991.000000000 > toInt64(0), -9007199254740991.000000000 >= toInt64(0) ; +SELECT '0', '-9007199254740992.000000000', 0 = -9007199254740992.000000000, 0 != -9007199254740992.000000000, 0 < -9007199254740992.000000000, 0 <= -9007199254740992.000000000, 0 > -9007199254740992.000000000, 0 >= -9007199254740992.000000000, -9007199254740992.000000000 = 0, -9007199254740992.000000000 != 0, -9007199254740992.000000000 < 0, -9007199254740992.000000000 <= 0, -9007199254740992.000000000 > 0, -9007199254740992.000000000 >= 0 , toUInt8(0) = -9007199254740992.000000000, toUInt8(0) != -9007199254740992.000000000, toUInt8(0) < -9007199254740992.000000000, toUInt8(0) <= -9007199254740992.000000000, toUInt8(0) > -9007199254740992.000000000, toUInt8(0) >= -9007199254740992.000000000, -9007199254740992.000000000 = toUInt8(0), -9007199254740992.000000000 != toUInt8(0), -9007199254740992.000000000 < toUInt8(0), -9007199254740992.000000000 <= toUInt8(0), -9007199254740992.000000000 > toUInt8(0), -9007199254740992.000000000 >= toUInt8(0) , toInt8(0) = -9007199254740992.000000000, toInt8(0) != -9007199254740992.000000000, toInt8(0) < -9007199254740992.000000000, toInt8(0) <= -9007199254740992.000000000, toInt8(0) > -9007199254740992.000000000, toInt8(0) >= -9007199254740992.000000000, -9007199254740992.000000000 = toInt8(0), -9007199254740992.000000000 != toInt8(0), -9007199254740992.000000000 < toInt8(0), -9007199254740992.000000000 <= toInt8(0), -9007199254740992.000000000 > toInt8(0), -9007199254740992.000000000 >= toInt8(0) , toUInt16(0) = -9007199254740992.000000000, toUInt16(0) != -9007199254740992.000000000, toUInt16(0) < -9007199254740992.000000000, toUInt16(0) <= -9007199254740992.000000000, toUInt16(0) > -9007199254740992.000000000, toUInt16(0) >= -9007199254740992.000000000, -9007199254740992.000000000 = toUInt16(0), -9007199254740992.000000000 != toUInt16(0), -9007199254740992.000000000 < toUInt16(0), -9007199254740992.000000000 <= toUInt16(0), -9007199254740992.000000000 > toUInt16(0), -9007199254740992.000000000 >= toUInt16(0) , toInt16(0) = -9007199254740992.000000000, toInt16(0) != -9007199254740992.000000000, toInt16(0) < -9007199254740992.000000000, toInt16(0) <= -9007199254740992.000000000, toInt16(0) > -9007199254740992.000000000, toInt16(0) >= -9007199254740992.000000000, -9007199254740992.000000000 = toInt16(0), -9007199254740992.000000000 != toInt16(0), -9007199254740992.000000000 < toInt16(0), -9007199254740992.000000000 <= toInt16(0), -9007199254740992.000000000 > toInt16(0), -9007199254740992.000000000 >= toInt16(0) , toUInt32(0) = -9007199254740992.000000000, toUInt32(0) != -9007199254740992.000000000, toUInt32(0) < -9007199254740992.000000000, toUInt32(0) <= -9007199254740992.000000000, toUInt32(0) > -9007199254740992.000000000, toUInt32(0) >= -9007199254740992.000000000, -9007199254740992.000000000 = toUInt32(0), -9007199254740992.000000000 != toUInt32(0), -9007199254740992.000000000 < toUInt32(0), -9007199254740992.000000000 <= toUInt32(0), -9007199254740992.000000000 > toUInt32(0), -9007199254740992.000000000 >= toUInt32(0) , toInt32(0) = -9007199254740992.000000000, toInt32(0) != -9007199254740992.000000000, toInt32(0) < -9007199254740992.000000000, toInt32(0) <= -9007199254740992.000000000, toInt32(0) > -9007199254740992.000000000, toInt32(0) >= -9007199254740992.000000000, -9007199254740992.000000000 = toInt32(0), -9007199254740992.000000000 != toInt32(0), -9007199254740992.000000000 < toInt32(0), -9007199254740992.000000000 <= toInt32(0), -9007199254740992.000000000 > toInt32(0), -9007199254740992.000000000 >= toInt32(0) , toUInt64(0) = -9007199254740992.000000000, toUInt64(0) != -9007199254740992.000000000, toUInt64(0) < -9007199254740992.000000000, toUInt64(0) <= -9007199254740992.000000000, toUInt64(0) > -9007199254740992.000000000, toUInt64(0) >= -9007199254740992.000000000, -9007199254740992.000000000 = toUInt64(0), -9007199254740992.000000000 != toUInt64(0), -9007199254740992.000000000 < toUInt64(0), -9007199254740992.000000000 <= toUInt64(0), -9007199254740992.000000000 > toUInt64(0), -9007199254740992.000000000 >= toUInt64(0) , toInt64(0) = -9007199254740992.000000000, toInt64(0) != -9007199254740992.000000000, toInt64(0) < -9007199254740992.000000000, toInt64(0) <= -9007199254740992.000000000, toInt64(0) > -9007199254740992.000000000, toInt64(0) >= -9007199254740992.000000000, -9007199254740992.000000000 = toInt64(0), -9007199254740992.000000000 != toInt64(0), -9007199254740992.000000000 < toInt64(0), -9007199254740992.000000000 <= toInt64(0), -9007199254740992.000000000 > toInt64(0), -9007199254740992.000000000 >= toInt64(0) ; +SELECT '0', '-9007199254740992.000000000', 0 = -9007199254740992.000000000, 0 != -9007199254740992.000000000, 0 < -9007199254740992.000000000, 0 <= -9007199254740992.000000000, 0 > -9007199254740992.000000000, 0 >= -9007199254740992.000000000, -9007199254740992.000000000 = 0, -9007199254740992.000000000 != 0, -9007199254740992.000000000 < 0, -9007199254740992.000000000 <= 0, -9007199254740992.000000000 > 0, -9007199254740992.000000000 >= 0 , toUInt8(0) = -9007199254740992.000000000, toUInt8(0) != -9007199254740992.000000000, toUInt8(0) < -9007199254740992.000000000, toUInt8(0) <= -9007199254740992.000000000, toUInt8(0) > -9007199254740992.000000000, toUInt8(0) >= -9007199254740992.000000000, -9007199254740992.000000000 = toUInt8(0), -9007199254740992.000000000 != toUInt8(0), -9007199254740992.000000000 < toUInt8(0), -9007199254740992.000000000 <= toUInt8(0), -9007199254740992.000000000 > toUInt8(0), -9007199254740992.000000000 >= toUInt8(0) , toInt8(0) = -9007199254740992.000000000, toInt8(0) != -9007199254740992.000000000, toInt8(0) < -9007199254740992.000000000, toInt8(0) <= -9007199254740992.000000000, toInt8(0) > -9007199254740992.000000000, toInt8(0) >= -9007199254740992.000000000, -9007199254740992.000000000 = toInt8(0), -9007199254740992.000000000 != toInt8(0), -9007199254740992.000000000 < toInt8(0), -9007199254740992.000000000 <= toInt8(0), -9007199254740992.000000000 > toInt8(0), -9007199254740992.000000000 >= toInt8(0) , toUInt16(0) = -9007199254740992.000000000, toUInt16(0) != -9007199254740992.000000000, toUInt16(0) < -9007199254740992.000000000, toUInt16(0) <= -9007199254740992.000000000, toUInt16(0) > -9007199254740992.000000000, toUInt16(0) >= -9007199254740992.000000000, -9007199254740992.000000000 = toUInt16(0), -9007199254740992.000000000 != toUInt16(0), -9007199254740992.000000000 < toUInt16(0), -9007199254740992.000000000 <= toUInt16(0), -9007199254740992.000000000 > toUInt16(0), -9007199254740992.000000000 >= toUInt16(0) , toInt16(0) = -9007199254740992.000000000, toInt16(0) != -9007199254740992.000000000, toInt16(0) < -9007199254740992.000000000, toInt16(0) <= -9007199254740992.000000000, toInt16(0) > -9007199254740992.000000000, toInt16(0) >= -9007199254740992.000000000, -9007199254740992.000000000 = toInt16(0), -9007199254740992.000000000 != toInt16(0), -9007199254740992.000000000 < toInt16(0), -9007199254740992.000000000 <= toInt16(0), -9007199254740992.000000000 > toInt16(0), -9007199254740992.000000000 >= toInt16(0) , toUInt32(0) = -9007199254740992.000000000, toUInt32(0) != -9007199254740992.000000000, toUInt32(0) < -9007199254740992.000000000, toUInt32(0) <= -9007199254740992.000000000, toUInt32(0) > -9007199254740992.000000000, toUInt32(0) >= -9007199254740992.000000000, -9007199254740992.000000000 = toUInt32(0), -9007199254740992.000000000 != toUInt32(0), -9007199254740992.000000000 < toUInt32(0), -9007199254740992.000000000 <= toUInt32(0), -9007199254740992.000000000 > toUInt32(0), -9007199254740992.000000000 >= toUInt32(0) , toInt32(0) = -9007199254740992.000000000, toInt32(0) != -9007199254740992.000000000, toInt32(0) < -9007199254740992.000000000, toInt32(0) <= -9007199254740992.000000000, toInt32(0) > -9007199254740992.000000000, toInt32(0) >= -9007199254740992.000000000, -9007199254740992.000000000 = toInt32(0), -9007199254740992.000000000 != toInt32(0), -9007199254740992.000000000 < toInt32(0), -9007199254740992.000000000 <= toInt32(0), -9007199254740992.000000000 > toInt32(0), -9007199254740992.000000000 >= toInt32(0) , toUInt64(0) = -9007199254740992.000000000, toUInt64(0) != -9007199254740992.000000000, toUInt64(0) < -9007199254740992.000000000, toUInt64(0) <= -9007199254740992.000000000, toUInt64(0) > -9007199254740992.000000000, toUInt64(0) >= -9007199254740992.000000000, -9007199254740992.000000000 = toUInt64(0), -9007199254740992.000000000 != toUInt64(0), -9007199254740992.000000000 < toUInt64(0), -9007199254740992.000000000 <= toUInt64(0), -9007199254740992.000000000 > toUInt64(0), -9007199254740992.000000000 >= toUInt64(0) , toInt64(0) = -9007199254740992.000000000, toInt64(0) != -9007199254740992.000000000, toInt64(0) < -9007199254740992.000000000, toInt64(0) <= -9007199254740992.000000000, toInt64(0) > -9007199254740992.000000000, toInt64(0) >= -9007199254740992.000000000, -9007199254740992.000000000 = toInt64(0), -9007199254740992.000000000 != toInt64(0), -9007199254740992.000000000 < toInt64(0), -9007199254740992.000000000 <= toInt64(0), -9007199254740992.000000000 > toInt64(0), -9007199254740992.000000000 >= toInt64(0) ; +SELECT '0', '-9007199254740994.000000000', 0 = -9007199254740994.000000000, 0 != -9007199254740994.000000000, 0 < -9007199254740994.000000000, 0 <= -9007199254740994.000000000, 0 > -9007199254740994.000000000, 0 >= -9007199254740994.000000000, -9007199254740994.000000000 = 0, -9007199254740994.000000000 != 0, -9007199254740994.000000000 < 0, -9007199254740994.000000000 <= 0, -9007199254740994.000000000 > 0, -9007199254740994.000000000 >= 0 , toUInt8(0) = -9007199254740994.000000000, toUInt8(0) != -9007199254740994.000000000, toUInt8(0) < -9007199254740994.000000000, toUInt8(0) <= -9007199254740994.000000000, toUInt8(0) > -9007199254740994.000000000, toUInt8(0) >= -9007199254740994.000000000, -9007199254740994.000000000 = toUInt8(0), -9007199254740994.000000000 != toUInt8(0), -9007199254740994.000000000 < toUInt8(0), -9007199254740994.000000000 <= toUInt8(0), -9007199254740994.000000000 > toUInt8(0), -9007199254740994.000000000 >= toUInt8(0) , toInt8(0) = -9007199254740994.000000000, toInt8(0) != -9007199254740994.000000000, toInt8(0) < -9007199254740994.000000000, toInt8(0) <= -9007199254740994.000000000, toInt8(0) > -9007199254740994.000000000, toInt8(0) >= -9007199254740994.000000000, -9007199254740994.000000000 = toInt8(0), -9007199254740994.000000000 != toInt8(0), -9007199254740994.000000000 < toInt8(0), -9007199254740994.000000000 <= toInt8(0), -9007199254740994.000000000 > toInt8(0), -9007199254740994.000000000 >= toInt8(0) , toUInt16(0) = -9007199254740994.000000000, toUInt16(0) != -9007199254740994.000000000, toUInt16(0) < -9007199254740994.000000000, toUInt16(0) <= -9007199254740994.000000000, toUInt16(0) > -9007199254740994.000000000, toUInt16(0) >= -9007199254740994.000000000, -9007199254740994.000000000 = toUInt16(0), -9007199254740994.000000000 != toUInt16(0), -9007199254740994.000000000 < toUInt16(0), -9007199254740994.000000000 <= toUInt16(0), -9007199254740994.000000000 > toUInt16(0), -9007199254740994.000000000 >= toUInt16(0) , toInt16(0) = -9007199254740994.000000000, toInt16(0) != -9007199254740994.000000000, toInt16(0) < -9007199254740994.000000000, toInt16(0) <= -9007199254740994.000000000, toInt16(0) > -9007199254740994.000000000, toInt16(0) >= -9007199254740994.000000000, -9007199254740994.000000000 = toInt16(0), -9007199254740994.000000000 != toInt16(0), -9007199254740994.000000000 < toInt16(0), -9007199254740994.000000000 <= toInt16(0), -9007199254740994.000000000 > toInt16(0), -9007199254740994.000000000 >= toInt16(0) , toUInt32(0) = -9007199254740994.000000000, toUInt32(0) != -9007199254740994.000000000, toUInt32(0) < -9007199254740994.000000000, toUInt32(0) <= -9007199254740994.000000000, toUInt32(0) > -9007199254740994.000000000, toUInt32(0) >= -9007199254740994.000000000, -9007199254740994.000000000 = toUInt32(0), -9007199254740994.000000000 != toUInt32(0), -9007199254740994.000000000 < toUInt32(0), -9007199254740994.000000000 <= toUInt32(0), -9007199254740994.000000000 > toUInt32(0), -9007199254740994.000000000 >= toUInt32(0) , toInt32(0) = -9007199254740994.000000000, toInt32(0) != -9007199254740994.000000000, toInt32(0) < -9007199254740994.000000000, toInt32(0) <= -9007199254740994.000000000, toInt32(0) > -9007199254740994.000000000, toInt32(0) >= -9007199254740994.000000000, -9007199254740994.000000000 = toInt32(0), -9007199254740994.000000000 != toInt32(0), -9007199254740994.000000000 < toInt32(0), -9007199254740994.000000000 <= toInt32(0), -9007199254740994.000000000 > toInt32(0), -9007199254740994.000000000 >= toInt32(0) , toUInt64(0) = -9007199254740994.000000000, toUInt64(0) != -9007199254740994.000000000, toUInt64(0) < -9007199254740994.000000000, toUInt64(0) <= -9007199254740994.000000000, toUInt64(0) > -9007199254740994.000000000, toUInt64(0) >= -9007199254740994.000000000, -9007199254740994.000000000 = toUInt64(0), -9007199254740994.000000000 != toUInt64(0), -9007199254740994.000000000 < toUInt64(0), -9007199254740994.000000000 <= toUInt64(0), -9007199254740994.000000000 > toUInt64(0), -9007199254740994.000000000 >= toUInt64(0) , toInt64(0) = -9007199254740994.000000000, toInt64(0) != -9007199254740994.000000000, toInt64(0) < -9007199254740994.000000000, toInt64(0) <= -9007199254740994.000000000, toInt64(0) > -9007199254740994.000000000, toInt64(0) >= -9007199254740994.000000000, -9007199254740994.000000000 = toInt64(0), -9007199254740994.000000000 != toInt64(0), -9007199254740994.000000000 < toInt64(0), -9007199254740994.000000000 <= toInt64(0), -9007199254740994.000000000 > toInt64(0), -9007199254740994.000000000 >= toInt64(0) ; +SELECT '0', '104.000000000', 0 = 104.000000000, 0 != 104.000000000, 0 < 104.000000000, 0 <= 104.000000000, 0 > 104.000000000, 0 >= 104.000000000, 104.000000000 = 0, 104.000000000 != 0, 104.000000000 < 0, 104.000000000 <= 0, 104.000000000 > 0, 104.000000000 >= 0 , toUInt8(0) = 104.000000000, toUInt8(0) != 104.000000000, toUInt8(0) < 104.000000000, toUInt8(0) <= 104.000000000, toUInt8(0) > 104.000000000, toUInt8(0) >= 104.000000000, 104.000000000 = toUInt8(0), 104.000000000 != toUInt8(0), 104.000000000 < toUInt8(0), 104.000000000 <= toUInt8(0), 104.000000000 > toUInt8(0), 104.000000000 >= toUInt8(0) , toInt8(0) = 104.000000000, toInt8(0) != 104.000000000, toInt8(0) < 104.000000000, toInt8(0) <= 104.000000000, toInt8(0) > 104.000000000, toInt8(0) >= 104.000000000, 104.000000000 = toInt8(0), 104.000000000 != toInt8(0), 104.000000000 < toInt8(0), 104.000000000 <= toInt8(0), 104.000000000 > toInt8(0), 104.000000000 >= toInt8(0) , toUInt16(0) = 104.000000000, toUInt16(0) != 104.000000000, toUInt16(0) < 104.000000000, toUInt16(0) <= 104.000000000, toUInt16(0) > 104.000000000, toUInt16(0) >= 104.000000000, 104.000000000 = toUInt16(0), 104.000000000 != toUInt16(0), 104.000000000 < toUInt16(0), 104.000000000 <= toUInt16(0), 104.000000000 > toUInt16(0), 104.000000000 >= toUInt16(0) , toInt16(0) = 104.000000000, toInt16(0) != 104.000000000, toInt16(0) < 104.000000000, toInt16(0) <= 104.000000000, toInt16(0) > 104.000000000, toInt16(0) >= 104.000000000, 104.000000000 = toInt16(0), 104.000000000 != toInt16(0), 104.000000000 < toInt16(0), 104.000000000 <= toInt16(0), 104.000000000 > toInt16(0), 104.000000000 >= toInt16(0) , toUInt32(0) = 104.000000000, toUInt32(0) != 104.000000000, toUInt32(0) < 104.000000000, toUInt32(0) <= 104.000000000, toUInt32(0) > 104.000000000, toUInt32(0) >= 104.000000000, 104.000000000 = toUInt32(0), 104.000000000 != toUInt32(0), 104.000000000 < toUInt32(0), 104.000000000 <= toUInt32(0), 104.000000000 > toUInt32(0), 104.000000000 >= toUInt32(0) , toInt32(0) = 104.000000000, toInt32(0) != 104.000000000, toInt32(0) < 104.000000000, toInt32(0) <= 104.000000000, toInt32(0) > 104.000000000, toInt32(0) >= 104.000000000, 104.000000000 = toInt32(0), 104.000000000 != toInt32(0), 104.000000000 < toInt32(0), 104.000000000 <= toInt32(0), 104.000000000 > toInt32(0), 104.000000000 >= toInt32(0) , toUInt64(0) = 104.000000000, toUInt64(0) != 104.000000000, toUInt64(0) < 104.000000000, toUInt64(0) <= 104.000000000, toUInt64(0) > 104.000000000, toUInt64(0) >= 104.000000000, 104.000000000 = toUInt64(0), 104.000000000 != toUInt64(0), 104.000000000 < toUInt64(0), 104.000000000 <= toUInt64(0), 104.000000000 > toUInt64(0), 104.000000000 >= toUInt64(0) , toInt64(0) = 104.000000000, toInt64(0) != 104.000000000, toInt64(0) < 104.000000000, toInt64(0) <= 104.000000000, toInt64(0) > 104.000000000, toInt64(0) >= 104.000000000, 104.000000000 = toInt64(0), 104.000000000 != toInt64(0), 104.000000000 < toInt64(0), 104.000000000 <= toInt64(0), 104.000000000 > toInt64(0), 104.000000000 >= toInt64(0) ; +SELECT '0', '-4503599627370496.000000000', 0 = -4503599627370496.000000000, 0 != -4503599627370496.000000000, 0 < -4503599627370496.000000000, 0 <= -4503599627370496.000000000, 0 > -4503599627370496.000000000, 0 >= -4503599627370496.000000000, -4503599627370496.000000000 = 0, -4503599627370496.000000000 != 0, -4503599627370496.000000000 < 0, -4503599627370496.000000000 <= 0, -4503599627370496.000000000 > 0, -4503599627370496.000000000 >= 0 , toUInt8(0) = -4503599627370496.000000000, toUInt8(0) != -4503599627370496.000000000, toUInt8(0) < -4503599627370496.000000000, toUInt8(0) <= -4503599627370496.000000000, toUInt8(0) > -4503599627370496.000000000, toUInt8(0) >= -4503599627370496.000000000, -4503599627370496.000000000 = toUInt8(0), -4503599627370496.000000000 != toUInt8(0), -4503599627370496.000000000 < toUInt8(0), -4503599627370496.000000000 <= toUInt8(0), -4503599627370496.000000000 > toUInt8(0), -4503599627370496.000000000 >= toUInt8(0) , toInt8(0) = -4503599627370496.000000000, toInt8(0) != -4503599627370496.000000000, toInt8(0) < -4503599627370496.000000000, toInt8(0) <= -4503599627370496.000000000, toInt8(0) > -4503599627370496.000000000, toInt8(0) >= -4503599627370496.000000000, -4503599627370496.000000000 = toInt8(0), -4503599627370496.000000000 != toInt8(0), -4503599627370496.000000000 < toInt8(0), -4503599627370496.000000000 <= toInt8(0), -4503599627370496.000000000 > toInt8(0), -4503599627370496.000000000 >= toInt8(0) , toUInt16(0) = -4503599627370496.000000000, toUInt16(0) != -4503599627370496.000000000, toUInt16(0) < -4503599627370496.000000000, toUInt16(0) <= -4503599627370496.000000000, toUInt16(0) > -4503599627370496.000000000, toUInt16(0) >= -4503599627370496.000000000, -4503599627370496.000000000 = toUInt16(0), -4503599627370496.000000000 != toUInt16(0), -4503599627370496.000000000 < toUInt16(0), -4503599627370496.000000000 <= toUInt16(0), -4503599627370496.000000000 > toUInt16(0), -4503599627370496.000000000 >= toUInt16(0) , toInt16(0) = -4503599627370496.000000000, toInt16(0) != -4503599627370496.000000000, toInt16(0) < -4503599627370496.000000000, toInt16(0) <= -4503599627370496.000000000, toInt16(0) > -4503599627370496.000000000, toInt16(0) >= -4503599627370496.000000000, -4503599627370496.000000000 = toInt16(0), -4503599627370496.000000000 != toInt16(0), -4503599627370496.000000000 < toInt16(0), -4503599627370496.000000000 <= toInt16(0), -4503599627370496.000000000 > toInt16(0), -4503599627370496.000000000 >= toInt16(0) , toUInt32(0) = -4503599627370496.000000000, toUInt32(0) != -4503599627370496.000000000, toUInt32(0) < -4503599627370496.000000000, toUInt32(0) <= -4503599627370496.000000000, toUInt32(0) > -4503599627370496.000000000, toUInt32(0) >= -4503599627370496.000000000, -4503599627370496.000000000 = toUInt32(0), -4503599627370496.000000000 != toUInt32(0), -4503599627370496.000000000 < toUInt32(0), -4503599627370496.000000000 <= toUInt32(0), -4503599627370496.000000000 > toUInt32(0), -4503599627370496.000000000 >= toUInt32(0) , toInt32(0) = -4503599627370496.000000000, toInt32(0) != -4503599627370496.000000000, toInt32(0) < -4503599627370496.000000000, toInt32(0) <= -4503599627370496.000000000, toInt32(0) > -4503599627370496.000000000, toInt32(0) >= -4503599627370496.000000000, -4503599627370496.000000000 = toInt32(0), -4503599627370496.000000000 != toInt32(0), -4503599627370496.000000000 < toInt32(0), -4503599627370496.000000000 <= toInt32(0), -4503599627370496.000000000 > toInt32(0), -4503599627370496.000000000 >= toInt32(0) , toUInt64(0) = -4503599627370496.000000000, toUInt64(0) != -4503599627370496.000000000, toUInt64(0) < -4503599627370496.000000000, toUInt64(0) <= -4503599627370496.000000000, toUInt64(0) > -4503599627370496.000000000, toUInt64(0) >= -4503599627370496.000000000, -4503599627370496.000000000 = toUInt64(0), -4503599627370496.000000000 != toUInt64(0), -4503599627370496.000000000 < toUInt64(0), -4503599627370496.000000000 <= toUInt64(0), -4503599627370496.000000000 > toUInt64(0), -4503599627370496.000000000 >= toUInt64(0) , toInt64(0) = -4503599627370496.000000000, toInt64(0) != -4503599627370496.000000000, toInt64(0) < -4503599627370496.000000000, toInt64(0) <= -4503599627370496.000000000, toInt64(0) > -4503599627370496.000000000, toInt64(0) >= -4503599627370496.000000000, -4503599627370496.000000000 = toInt64(0), -4503599627370496.000000000 != toInt64(0), -4503599627370496.000000000 < toInt64(0), -4503599627370496.000000000 <= toInt64(0), -4503599627370496.000000000 > toInt64(0), -4503599627370496.000000000 >= toInt64(0) ; +SELECT '0', '-0.500000000', 0 = -0.500000000, 0 != -0.500000000, 0 < -0.500000000, 0 <= -0.500000000, 0 > -0.500000000, 0 >= -0.500000000, -0.500000000 = 0, -0.500000000 != 0, -0.500000000 < 0, -0.500000000 <= 0, -0.500000000 > 0, -0.500000000 >= 0 , toUInt8(0) = -0.500000000, toUInt8(0) != -0.500000000, toUInt8(0) < -0.500000000, toUInt8(0) <= -0.500000000, toUInt8(0) > -0.500000000, toUInt8(0) >= -0.500000000, -0.500000000 = toUInt8(0), -0.500000000 != toUInt8(0), -0.500000000 < toUInt8(0), -0.500000000 <= toUInt8(0), -0.500000000 > toUInt8(0), -0.500000000 >= toUInt8(0) , toInt8(0) = -0.500000000, toInt8(0) != -0.500000000, toInt8(0) < -0.500000000, toInt8(0) <= -0.500000000, toInt8(0) > -0.500000000, toInt8(0) >= -0.500000000, -0.500000000 = toInt8(0), -0.500000000 != toInt8(0), -0.500000000 < toInt8(0), -0.500000000 <= toInt8(0), -0.500000000 > toInt8(0), -0.500000000 >= toInt8(0) , toUInt16(0) = -0.500000000, toUInt16(0) != -0.500000000, toUInt16(0) < -0.500000000, toUInt16(0) <= -0.500000000, toUInt16(0) > -0.500000000, toUInt16(0) >= -0.500000000, -0.500000000 = toUInt16(0), -0.500000000 != toUInt16(0), -0.500000000 < toUInt16(0), -0.500000000 <= toUInt16(0), -0.500000000 > toUInt16(0), -0.500000000 >= toUInt16(0) , toInt16(0) = -0.500000000, toInt16(0) != -0.500000000, toInt16(0) < -0.500000000, toInt16(0) <= -0.500000000, toInt16(0) > -0.500000000, toInt16(0) >= -0.500000000, -0.500000000 = toInt16(0), -0.500000000 != toInt16(0), -0.500000000 < toInt16(0), -0.500000000 <= toInt16(0), -0.500000000 > toInt16(0), -0.500000000 >= toInt16(0) , toUInt32(0) = -0.500000000, toUInt32(0) != -0.500000000, toUInt32(0) < -0.500000000, toUInt32(0) <= -0.500000000, toUInt32(0) > -0.500000000, toUInt32(0) >= -0.500000000, -0.500000000 = toUInt32(0), -0.500000000 != toUInt32(0), -0.500000000 < toUInt32(0), -0.500000000 <= toUInt32(0), -0.500000000 > toUInt32(0), -0.500000000 >= toUInt32(0) , toInt32(0) = -0.500000000, toInt32(0) != -0.500000000, toInt32(0) < -0.500000000, toInt32(0) <= -0.500000000, toInt32(0) > -0.500000000, toInt32(0) >= -0.500000000, -0.500000000 = toInt32(0), -0.500000000 != toInt32(0), -0.500000000 < toInt32(0), -0.500000000 <= toInt32(0), -0.500000000 > toInt32(0), -0.500000000 >= toInt32(0) , toUInt64(0) = -0.500000000, toUInt64(0) != -0.500000000, toUInt64(0) < -0.500000000, toUInt64(0) <= -0.500000000, toUInt64(0) > -0.500000000, toUInt64(0) >= -0.500000000, -0.500000000 = toUInt64(0), -0.500000000 != toUInt64(0), -0.500000000 < toUInt64(0), -0.500000000 <= toUInt64(0), -0.500000000 > toUInt64(0), -0.500000000 >= toUInt64(0) , toInt64(0) = -0.500000000, toInt64(0) != -0.500000000, toInt64(0) < -0.500000000, toInt64(0) <= -0.500000000, toInt64(0) > -0.500000000, toInt64(0) >= -0.500000000, -0.500000000 = toInt64(0), -0.500000000 != toInt64(0), -0.500000000 < toInt64(0), -0.500000000 <= toInt64(0), -0.500000000 > toInt64(0), -0.500000000 >= toInt64(0) ; +SELECT '0', '0.500000000', 0 = 0.500000000, 0 != 0.500000000, 0 < 0.500000000, 0 <= 0.500000000, 0 > 0.500000000, 0 >= 0.500000000, 0.500000000 = 0, 0.500000000 != 0, 0.500000000 < 0, 0.500000000 <= 0, 0.500000000 > 0, 0.500000000 >= 0 , toUInt8(0) = 0.500000000, toUInt8(0) != 0.500000000, toUInt8(0) < 0.500000000, toUInt8(0) <= 0.500000000, toUInt8(0) > 0.500000000, toUInt8(0) >= 0.500000000, 0.500000000 = toUInt8(0), 0.500000000 != toUInt8(0), 0.500000000 < toUInt8(0), 0.500000000 <= toUInt8(0), 0.500000000 > toUInt8(0), 0.500000000 >= toUInt8(0) , toInt8(0) = 0.500000000, toInt8(0) != 0.500000000, toInt8(0) < 0.500000000, toInt8(0) <= 0.500000000, toInt8(0) > 0.500000000, toInt8(0) >= 0.500000000, 0.500000000 = toInt8(0), 0.500000000 != toInt8(0), 0.500000000 < toInt8(0), 0.500000000 <= toInt8(0), 0.500000000 > toInt8(0), 0.500000000 >= toInt8(0) , toUInt16(0) = 0.500000000, toUInt16(0) != 0.500000000, toUInt16(0) < 0.500000000, toUInt16(0) <= 0.500000000, toUInt16(0) > 0.500000000, toUInt16(0) >= 0.500000000, 0.500000000 = toUInt16(0), 0.500000000 != toUInt16(0), 0.500000000 < toUInt16(0), 0.500000000 <= toUInt16(0), 0.500000000 > toUInt16(0), 0.500000000 >= toUInt16(0) , toInt16(0) = 0.500000000, toInt16(0) != 0.500000000, toInt16(0) < 0.500000000, toInt16(0) <= 0.500000000, toInt16(0) > 0.500000000, toInt16(0) >= 0.500000000, 0.500000000 = toInt16(0), 0.500000000 != toInt16(0), 0.500000000 < toInt16(0), 0.500000000 <= toInt16(0), 0.500000000 > toInt16(0), 0.500000000 >= toInt16(0) , toUInt32(0) = 0.500000000, toUInt32(0) != 0.500000000, toUInt32(0) < 0.500000000, toUInt32(0) <= 0.500000000, toUInt32(0) > 0.500000000, toUInt32(0) >= 0.500000000, 0.500000000 = toUInt32(0), 0.500000000 != toUInt32(0), 0.500000000 < toUInt32(0), 0.500000000 <= toUInt32(0), 0.500000000 > toUInt32(0), 0.500000000 >= toUInt32(0) , toInt32(0) = 0.500000000, toInt32(0) != 0.500000000, toInt32(0) < 0.500000000, toInt32(0) <= 0.500000000, toInt32(0) > 0.500000000, toInt32(0) >= 0.500000000, 0.500000000 = toInt32(0), 0.500000000 != toInt32(0), 0.500000000 < toInt32(0), 0.500000000 <= toInt32(0), 0.500000000 > toInt32(0), 0.500000000 >= toInt32(0) , toUInt64(0) = 0.500000000, toUInt64(0) != 0.500000000, toUInt64(0) < 0.500000000, toUInt64(0) <= 0.500000000, toUInt64(0) > 0.500000000, toUInt64(0) >= 0.500000000, 0.500000000 = toUInt64(0), 0.500000000 != toUInt64(0), 0.500000000 < toUInt64(0), 0.500000000 <= toUInt64(0), 0.500000000 > toUInt64(0), 0.500000000 >= toUInt64(0) , toInt64(0) = 0.500000000, toInt64(0) != 0.500000000, toInt64(0) < 0.500000000, toInt64(0) <= 0.500000000, toInt64(0) > 0.500000000, toInt64(0) >= 0.500000000, 0.500000000 = toInt64(0), 0.500000000 != toInt64(0), 0.500000000 < toInt64(0), 0.500000000 <= toInt64(0), 0.500000000 > toInt64(0), 0.500000000 >= toInt64(0) ; +SELECT '0', '-1.500000000', 0 = -1.500000000, 0 != -1.500000000, 0 < -1.500000000, 0 <= -1.500000000, 0 > -1.500000000, 0 >= -1.500000000, -1.500000000 = 0, -1.500000000 != 0, -1.500000000 < 0, -1.500000000 <= 0, -1.500000000 > 0, -1.500000000 >= 0 , toUInt8(0) = -1.500000000, toUInt8(0) != -1.500000000, toUInt8(0) < -1.500000000, toUInt8(0) <= -1.500000000, toUInt8(0) > -1.500000000, toUInt8(0) >= -1.500000000, -1.500000000 = toUInt8(0), -1.500000000 != toUInt8(0), -1.500000000 < toUInt8(0), -1.500000000 <= toUInt8(0), -1.500000000 > toUInt8(0), -1.500000000 >= toUInt8(0) , toInt8(0) = -1.500000000, toInt8(0) != -1.500000000, toInt8(0) < -1.500000000, toInt8(0) <= -1.500000000, toInt8(0) > -1.500000000, toInt8(0) >= -1.500000000, -1.500000000 = toInt8(0), -1.500000000 != toInt8(0), -1.500000000 < toInt8(0), -1.500000000 <= toInt8(0), -1.500000000 > toInt8(0), -1.500000000 >= toInt8(0) , toUInt16(0) = -1.500000000, toUInt16(0) != -1.500000000, toUInt16(0) < -1.500000000, toUInt16(0) <= -1.500000000, toUInt16(0) > -1.500000000, toUInt16(0) >= -1.500000000, -1.500000000 = toUInt16(0), -1.500000000 != toUInt16(0), -1.500000000 < toUInt16(0), -1.500000000 <= toUInt16(0), -1.500000000 > toUInt16(0), -1.500000000 >= toUInt16(0) , toInt16(0) = -1.500000000, toInt16(0) != -1.500000000, toInt16(0) < -1.500000000, toInt16(0) <= -1.500000000, toInt16(0) > -1.500000000, toInt16(0) >= -1.500000000, -1.500000000 = toInt16(0), -1.500000000 != toInt16(0), -1.500000000 < toInt16(0), -1.500000000 <= toInt16(0), -1.500000000 > toInt16(0), -1.500000000 >= toInt16(0) , toUInt32(0) = -1.500000000, toUInt32(0) != -1.500000000, toUInt32(0) < -1.500000000, toUInt32(0) <= -1.500000000, toUInt32(0) > -1.500000000, toUInt32(0) >= -1.500000000, -1.500000000 = toUInt32(0), -1.500000000 != toUInt32(0), -1.500000000 < toUInt32(0), -1.500000000 <= toUInt32(0), -1.500000000 > toUInt32(0), -1.500000000 >= toUInt32(0) , toInt32(0) = -1.500000000, toInt32(0) != -1.500000000, toInt32(0) < -1.500000000, toInt32(0) <= -1.500000000, toInt32(0) > -1.500000000, toInt32(0) >= -1.500000000, -1.500000000 = toInt32(0), -1.500000000 != toInt32(0), -1.500000000 < toInt32(0), -1.500000000 <= toInt32(0), -1.500000000 > toInt32(0), -1.500000000 >= toInt32(0) , toUInt64(0) = -1.500000000, toUInt64(0) != -1.500000000, toUInt64(0) < -1.500000000, toUInt64(0) <= -1.500000000, toUInt64(0) > -1.500000000, toUInt64(0) >= -1.500000000, -1.500000000 = toUInt64(0), -1.500000000 != toUInt64(0), -1.500000000 < toUInt64(0), -1.500000000 <= toUInt64(0), -1.500000000 > toUInt64(0), -1.500000000 >= toUInt64(0) , toInt64(0) = -1.500000000, toInt64(0) != -1.500000000, toInt64(0) < -1.500000000, toInt64(0) <= -1.500000000, toInt64(0) > -1.500000000, toInt64(0) >= -1.500000000, -1.500000000 = toInt64(0), -1.500000000 != toInt64(0), -1.500000000 < toInt64(0), -1.500000000 <= toInt64(0), -1.500000000 > toInt64(0), -1.500000000 >= toInt64(0) ; +SELECT '0', '1.500000000', 0 = 1.500000000, 0 != 1.500000000, 0 < 1.500000000, 0 <= 1.500000000, 0 > 1.500000000, 0 >= 1.500000000, 1.500000000 = 0, 1.500000000 != 0, 1.500000000 < 0, 1.500000000 <= 0, 1.500000000 > 0, 1.500000000 >= 0 , toUInt8(0) = 1.500000000, toUInt8(0) != 1.500000000, toUInt8(0) < 1.500000000, toUInt8(0) <= 1.500000000, toUInt8(0) > 1.500000000, toUInt8(0) >= 1.500000000, 1.500000000 = toUInt8(0), 1.500000000 != toUInt8(0), 1.500000000 < toUInt8(0), 1.500000000 <= toUInt8(0), 1.500000000 > toUInt8(0), 1.500000000 >= toUInt8(0) , toInt8(0) = 1.500000000, toInt8(0) != 1.500000000, toInt8(0) < 1.500000000, toInt8(0) <= 1.500000000, toInt8(0) > 1.500000000, toInt8(0) >= 1.500000000, 1.500000000 = toInt8(0), 1.500000000 != toInt8(0), 1.500000000 < toInt8(0), 1.500000000 <= toInt8(0), 1.500000000 > toInt8(0), 1.500000000 >= toInt8(0) , toUInt16(0) = 1.500000000, toUInt16(0) != 1.500000000, toUInt16(0) < 1.500000000, toUInt16(0) <= 1.500000000, toUInt16(0) > 1.500000000, toUInt16(0) >= 1.500000000, 1.500000000 = toUInt16(0), 1.500000000 != toUInt16(0), 1.500000000 < toUInt16(0), 1.500000000 <= toUInt16(0), 1.500000000 > toUInt16(0), 1.500000000 >= toUInt16(0) , toInt16(0) = 1.500000000, toInt16(0) != 1.500000000, toInt16(0) < 1.500000000, toInt16(0) <= 1.500000000, toInt16(0) > 1.500000000, toInt16(0) >= 1.500000000, 1.500000000 = toInt16(0), 1.500000000 != toInt16(0), 1.500000000 < toInt16(0), 1.500000000 <= toInt16(0), 1.500000000 > toInt16(0), 1.500000000 >= toInt16(0) , toUInt32(0) = 1.500000000, toUInt32(0) != 1.500000000, toUInt32(0) < 1.500000000, toUInt32(0) <= 1.500000000, toUInt32(0) > 1.500000000, toUInt32(0) >= 1.500000000, 1.500000000 = toUInt32(0), 1.500000000 != toUInt32(0), 1.500000000 < toUInt32(0), 1.500000000 <= toUInt32(0), 1.500000000 > toUInt32(0), 1.500000000 >= toUInt32(0) , toInt32(0) = 1.500000000, toInt32(0) != 1.500000000, toInt32(0) < 1.500000000, toInt32(0) <= 1.500000000, toInt32(0) > 1.500000000, toInt32(0) >= 1.500000000, 1.500000000 = toInt32(0), 1.500000000 != toInt32(0), 1.500000000 < toInt32(0), 1.500000000 <= toInt32(0), 1.500000000 > toInt32(0), 1.500000000 >= toInt32(0) , toUInt64(0) = 1.500000000, toUInt64(0) != 1.500000000, toUInt64(0) < 1.500000000, toUInt64(0) <= 1.500000000, toUInt64(0) > 1.500000000, toUInt64(0) >= 1.500000000, 1.500000000 = toUInt64(0), 1.500000000 != toUInt64(0), 1.500000000 < toUInt64(0), 1.500000000 <= toUInt64(0), 1.500000000 > toUInt64(0), 1.500000000 >= toUInt64(0) , toInt64(0) = 1.500000000, toInt64(0) != 1.500000000, toInt64(0) < 1.500000000, toInt64(0) <= 1.500000000, toInt64(0) > 1.500000000, toInt64(0) >= 1.500000000, 1.500000000 = toInt64(0), 1.500000000 != toInt64(0), 1.500000000 < toInt64(0), 1.500000000 <= toInt64(0), 1.500000000 > toInt64(0), 1.500000000 >= toInt64(0) ; +SELECT '0', '9007199254740992.000000000', 0 = 9007199254740992.000000000, 0 != 9007199254740992.000000000, 0 < 9007199254740992.000000000, 0 <= 9007199254740992.000000000, 0 > 9007199254740992.000000000, 0 >= 9007199254740992.000000000, 9007199254740992.000000000 = 0, 9007199254740992.000000000 != 0, 9007199254740992.000000000 < 0, 9007199254740992.000000000 <= 0, 9007199254740992.000000000 > 0, 9007199254740992.000000000 >= 0 , toUInt8(0) = 9007199254740992.000000000, toUInt8(0) != 9007199254740992.000000000, toUInt8(0) < 9007199254740992.000000000, toUInt8(0) <= 9007199254740992.000000000, toUInt8(0) > 9007199254740992.000000000, toUInt8(0) >= 9007199254740992.000000000, 9007199254740992.000000000 = toUInt8(0), 9007199254740992.000000000 != toUInt8(0), 9007199254740992.000000000 < toUInt8(0), 9007199254740992.000000000 <= toUInt8(0), 9007199254740992.000000000 > toUInt8(0), 9007199254740992.000000000 >= toUInt8(0) , toInt8(0) = 9007199254740992.000000000, toInt8(0) != 9007199254740992.000000000, toInt8(0) < 9007199254740992.000000000, toInt8(0) <= 9007199254740992.000000000, toInt8(0) > 9007199254740992.000000000, toInt8(0) >= 9007199254740992.000000000, 9007199254740992.000000000 = toInt8(0), 9007199254740992.000000000 != toInt8(0), 9007199254740992.000000000 < toInt8(0), 9007199254740992.000000000 <= toInt8(0), 9007199254740992.000000000 > toInt8(0), 9007199254740992.000000000 >= toInt8(0) , toUInt16(0) = 9007199254740992.000000000, toUInt16(0) != 9007199254740992.000000000, toUInt16(0) < 9007199254740992.000000000, toUInt16(0) <= 9007199254740992.000000000, toUInt16(0) > 9007199254740992.000000000, toUInt16(0) >= 9007199254740992.000000000, 9007199254740992.000000000 = toUInt16(0), 9007199254740992.000000000 != toUInt16(0), 9007199254740992.000000000 < toUInt16(0), 9007199254740992.000000000 <= toUInt16(0), 9007199254740992.000000000 > toUInt16(0), 9007199254740992.000000000 >= toUInt16(0) , toInt16(0) = 9007199254740992.000000000, toInt16(0) != 9007199254740992.000000000, toInt16(0) < 9007199254740992.000000000, toInt16(0) <= 9007199254740992.000000000, toInt16(0) > 9007199254740992.000000000, toInt16(0) >= 9007199254740992.000000000, 9007199254740992.000000000 = toInt16(0), 9007199254740992.000000000 != toInt16(0), 9007199254740992.000000000 < toInt16(0), 9007199254740992.000000000 <= toInt16(0), 9007199254740992.000000000 > toInt16(0), 9007199254740992.000000000 >= toInt16(0) , toUInt32(0) = 9007199254740992.000000000, toUInt32(0) != 9007199254740992.000000000, toUInt32(0) < 9007199254740992.000000000, toUInt32(0) <= 9007199254740992.000000000, toUInt32(0) > 9007199254740992.000000000, toUInt32(0) >= 9007199254740992.000000000, 9007199254740992.000000000 = toUInt32(0), 9007199254740992.000000000 != toUInt32(0), 9007199254740992.000000000 < toUInt32(0), 9007199254740992.000000000 <= toUInt32(0), 9007199254740992.000000000 > toUInt32(0), 9007199254740992.000000000 >= toUInt32(0) , toInt32(0) = 9007199254740992.000000000, toInt32(0) != 9007199254740992.000000000, toInt32(0) < 9007199254740992.000000000, toInt32(0) <= 9007199254740992.000000000, toInt32(0) > 9007199254740992.000000000, toInt32(0) >= 9007199254740992.000000000, 9007199254740992.000000000 = toInt32(0), 9007199254740992.000000000 != toInt32(0), 9007199254740992.000000000 < toInt32(0), 9007199254740992.000000000 <= toInt32(0), 9007199254740992.000000000 > toInt32(0), 9007199254740992.000000000 >= toInt32(0) , toUInt64(0) = 9007199254740992.000000000, toUInt64(0) != 9007199254740992.000000000, toUInt64(0) < 9007199254740992.000000000, toUInt64(0) <= 9007199254740992.000000000, toUInt64(0) > 9007199254740992.000000000, toUInt64(0) >= 9007199254740992.000000000, 9007199254740992.000000000 = toUInt64(0), 9007199254740992.000000000 != toUInt64(0), 9007199254740992.000000000 < toUInt64(0), 9007199254740992.000000000 <= toUInt64(0), 9007199254740992.000000000 > toUInt64(0), 9007199254740992.000000000 >= toUInt64(0) , toInt64(0) = 9007199254740992.000000000, toInt64(0) != 9007199254740992.000000000, toInt64(0) < 9007199254740992.000000000, toInt64(0) <= 9007199254740992.000000000, toInt64(0) > 9007199254740992.000000000, toInt64(0) >= 9007199254740992.000000000, 9007199254740992.000000000 = toInt64(0), 9007199254740992.000000000 != toInt64(0), 9007199254740992.000000000 < toInt64(0), 9007199254740992.000000000 <= toInt64(0), 9007199254740992.000000000 > toInt64(0), 9007199254740992.000000000 >= toInt64(0) ; +SELECT '0', '2251799813685247.500000000', 0 = 2251799813685247.500000000, 0 != 2251799813685247.500000000, 0 < 2251799813685247.500000000, 0 <= 2251799813685247.500000000, 0 > 2251799813685247.500000000, 0 >= 2251799813685247.500000000, 2251799813685247.500000000 = 0, 2251799813685247.500000000 != 0, 2251799813685247.500000000 < 0, 2251799813685247.500000000 <= 0, 2251799813685247.500000000 > 0, 2251799813685247.500000000 >= 0 , toUInt8(0) = 2251799813685247.500000000, toUInt8(0) != 2251799813685247.500000000, toUInt8(0) < 2251799813685247.500000000, toUInt8(0) <= 2251799813685247.500000000, toUInt8(0) > 2251799813685247.500000000, toUInt8(0) >= 2251799813685247.500000000, 2251799813685247.500000000 = toUInt8(0), 2251799813685247.500000000 != toUInt8(0), 2251799813685247.500000000 < toUInt8(0), 2251799813685247.500000000 <= toUInt8(0), 2251799813685247.500000000 > toUInt8(0), 2251799813685247.500000000 >= toUInt8(0) , toInt8(0) = 2251799813685247.500000000, toInt8(0) != 2251799813685247.500000000, toInt8(0) < 2251799813685247.500000000, toInt8(0) <= 2251799813685247.500000000, toInt8(0) > 2251799813685247.500000000, toInt8(0) >= 2251799813685247.500000000, 2251799813685247.500000000 = toInt8(0), 2251799813685247.500000000 != toInt8(0), 2251799813685247.500000000 < toInt8(0), 2251799813685247.500000000 <= toInt8(0), 2251799813685247.500000000 > toInt8(0), 2251799813685247.500000000 >= toInt8(0) , toUInt16(0) = 2251799813685247.500000000, toUInt16(0) != 2251799813685247.500000000, toUInt16(0) < 2251799813685247.500000000, toUInt16(0) <= 2251799813685247.500000000, toUInt16(0) > 2251799813685247.500000000, toUInt16(0) >= 2251799813685247.500000000, 2251799813685247.500000000 = toUInt16(0), 2251799813685247.500000000 != toUInt16(0), 2251799813685247.500000000 < toUInt16(0), 2251799813685247.500000000 <= toUInt16(0), 2251799813685247.500000000 > toUInt16(0), 2251799813685247.500000000 >= toUInt16(0) , toInt16(0) = 2251799813685247.500000000, toInt16(0) != 2251799813685247.500000000, toInt16(0) < 2251799813685247.500000000, toInt16(0) <= 2251799813685247.500000000, toInt16(0) > 2251799813685247.500000000, toInt16(0) >= 2251799813685247.500000000, 2251799813685247.500000000 = toInt16(0), 2251799813685247.500000000 != toInt16(0), 2251799813685247.500000000 < toInt16(0), 2251799813685247.500000000 <= toInt16(0), 2251799813685247.500000000 > toInt16(0), 2251799813685247.500000000 >= toInt16(0) , toUInt32(0) = 2251799813685247.500000000, toUInt32(0) != 2251799813685247.500000000, toUInt32(0) < 2251799813685247.500000000, toUInt32(0) <= 2251799813685247.500000000, toUInt32(0) > 2251799813685247.500000000, toUInt32(0) >= 2251799813685247.500000000, 2251799813685247.500000000 = toUInt32(0), 2251799813685247.500000000 != toUInt32(0), 2251799813685247.500000000 < toUInt32(0), 2251799813685247.500000000 <= toUInt32(0), 2251799813685247.500000000 > toUInt32(0), 2251799813685247.500000000 >= toUInt32(0) , toInt32(0) = 2251799813685247.500000000, toInt32(0) != 2251799813685247.500000000, toInt32(0) < 2251799813685247.500000000, toInt32(0) <= 2251799813685247.500000000, toInt32(0) > 2251799813685247.500000000, toInt32(0) >= 2251799813685247.500000000, 2251799813685247.500000000 = toInt32(0), 2251799813685247.500000000 != toInt32(0), 2251799813685247.500000000 < toInt32(0), 2251799813685247.500000000 <= toInt32(0), 2251799813685247.500000000 > toInt32(0), 2251799813685247.500000000 >= toInt32(0) , toUInt64(0) = 2251799813685247.500000000, toUInt64(0) != 2251799813685247.500000000, toUInt64(0) < 2251799813685247.500000000, toUInt64(0) <= 2251799813685247.500000000, toUInt64(0) > 2251799813685247.500000000, toUInt64(0) >= 2251799813685247.500000000, 2251799813685247.500000000 = toUInt64(0), 2251799813685247.500000000 != toUInt64(0), 2251799813685247.500000000 < toUInt64(0), 2251799813685247.500000000 <= toUInt64(0), 2251799813685247.500000000 > toUInt64(0), 2251799813685247.500000000 >= toUInt64(0) , toInt64(0) = 2251799813685247.500000000, toInt64(0) != 2251799813685247.500000000, toInt64(0) < 2251799813685247.500000000, toInt64(0) <= 2251799813685247.500000000, toInt64(0) > 2251799813685247.500000000, toInt64(0) >= 2251799813685247.500000000, 2251799813685247.500000000 = toInt64(0), 2251799813685247.500000000 != toInt64(0), 2251799813685247.500000000 < toInt64(0), 2251799813685247.500000000 <= toInt64(0), 2251799813685247.500000000 > toInt64(0), 2251799813685247.500000000 >= toInt64(0) ; +SELECT '0', '2251799813685248.500000000', 0 = 2251799813685248.500000000, 0 != 2251799813685248.500000000, 0 < 2251799813685248.500000000, 0 <= 2251799813685248.500000000, 0 > 2251799813685248.500000000, 0 >= 2251799813685248.500000000, 2251799813685248.500000000 = 0, 2251799813685248.500000000 != 0, 2251799813685248.500000000 < 0, 2251799813685248.500000000 <= 0, 2251799813685248.500000000 > 0, 2251799813685248.500000000 >= 0 , toUInt8(0) = 2251799813685248.500000000, toUInt8(0) != 2251799813685248.500000000, toUInt8(0) < 2251799813685248.500000000, toUInt8(0) <= 2251799813685248.500000000, toUInt8(0) > 2251799813685248.500000000, toUInt8(0) >= 2251799813685248.500000000, 2251799813685248.500000000 = toUInt8(0), 2251799813685248.500000000 != toUInt8(0), 2251799813685248.500000000 < toUInt8(0), 2251799813685248.500000000 <= toUInt8(0), 2251799813685248.500000000 > toUInt8(0), 2251799813685248.500000000 >= toUInt8(0) , toInt8(0) = 2251799813685248.500000000, toInt8(0) != 2251799813685248.500000000, toInt8(0) < 2251799813685248.500000000, toInt8(0) <= 2251799813685248.500000000, toInt8(0) > 2251799813685248.500000000, toInt8(0) >= 2251799813685248.500000000, 2251799813685248.500000000 = toInt8(0), 2251799813685248.500000000 != toInt8(0), 2251799813685248.500000000 < toInt8(0), 2251799813685248.500000000 <= toInt8(0), 2251799813685248.500000000 > toInt8(0), 2251799813685248.500000000 >= toInt8(0) , toUInt16(0) = 2251799813685248.500000000, toUInt16(0) != 2251799813685248.500000000, toUInt16(0) < 2251799813685248.500000000, toUInt16(0) <= 2251799813685248.500000000, toUInt16(0) > 2251799813685248.500000000, toUInt16(0) >= 2251799813685248.500000000, 2251799813685248.500000000 = toUInt16(0), 2251799813685248.500000000 != toUInt16(0), 2251799813685248.500000000 < toUInt16(0), 2251799813685248.500000000 <= toUInt16(0), 2251799813685248.500000000 > toUInt16(0), 2251799813685248.500000000 >= toUInt16(0) , toInt16(0) = 2251799813685248.500000000, toInt16(0) != 2251799813685248.500000000, toInt16(0) < 2251799813685248.500000000, toInt16(0) <= 2251799813685248.500000000, toInt16(0) > 2251799813685248.500000000, toInt16(0) >= 2251799813685248.500000000, 2251799813685248.500000000 = toInt16(0), 2251799813685248.500000000 != toInt16(0), 2251799813685248.500000000 < toInt16(0), 2251799813685248.500000000 <= toInt16(0), 2251799813685248.500000000 > toInt16(0), 2251799813685248.500000000 >= toInt16(0) , toUInt32(0) = 2251799813685248.500000000, toUInt32(0) != 2251799813685248.500000000, toUInt32(0) < 2251799813685248.500000000, toUInt32(0) <= 2251799813685248.500000000, toUInt32(0) > 2251799813685248.500000000, toUInt32(0) >= 2251799813685248.500000000, 2251799813685248.500000000 = toUInt32(0), 2251799813685248.500000000 != toUInt32(0), 2251799813685248.500000000 < toUInt32(0), 2251799813685248.500000000 <= toUInt32(0), 2251799813685248.500000000 > toUInt32(0), 2251799813685248.500000000 >= toUInt32(0) , toInt32(0) = 2251799813685248.500000000, toInt32(0) != 2251799813685248.500000000, toInt32(0) < 2251799813685248.500000000, toInt32(0) <= 2251799813685248.500000000, toInt32(0) > 2251799813685248.500000000, toInt32(0) >= 2251799813685248.500000000, 2251799813685248.500000000 = toInt32(0), 2251799813685248.500000000 != toInt32(0), 2251799813685248.500000000 < toInt32(0), 2251799813685248.500000000 <= toInt32(0), 2251799813685248.500000000 > toInt32(0), 2251799813685248.500000000 >= toInt32(0) , toUInt64(0) = 2251799813685248.500000000, toUInt64(0) != 2251799813685248.500000000, toUInt64(0) < 2251799813685248.500000000, toUInt64(0) <= 2251799813685248.500000000, toUInt64(0) > 2251799813685248.500000000, toUInt64(0) >= 2251799813685248.500000000, 2251799813685248.500000000 = toUInt64(0), 2251799813685248.500000000 != toUInt64(0), 2251799813685248.500000000 < toUInt64(0), 2251799813685248.500000000 <= toUInt64(0), 2251799813685248.500000000 > toUInt64(0), 2251799813685248.500000000 >= toUInt64(0) , toInt64(0) = 2251799813685248.500000000, toInt64(0) != 2251799813685248.500000000, toInt64(0) < 2251799813685248.500000000, toInt64(0) <= 2251799813685248.500000000, toInt64(0) > 2251799813685248.500000000, toInt64(0) >= 2251799813685248.500000000, 2251799813685248.500000000 = toInt64(0), 2251799813685248.500000000 != toInt64(0), 2251799813685248.500000000 < toInt64(0), 2251799813685248.500000000 <= toInt64(0), 2251799813685248.500000000 > toInt64(0), 2251799813685248.500000000 >= toInt64(0) ; +SELECT '0', '1152921504606846976.000000000', 0 = 1152921504606846976.000000000, 0 != 1152921504606846976.000000000, 0 < 1152921504606846976.000000000, 0 <= 1152921504606846976.000000000, 0 > 1152921504606846976.000000000, 0 >= 1152921504606846976.000000000, 1152921504606846976.000000000 = 0, 1152921504606846976.000000000 != 0, 1152921504606846976.000000000 < 0, 1152921504606846976.000000000 <= 0, 1152921504606846976.000000000 > 0, 1152921504606846976.000000000 >= 0 , toUInt8(0) = 1152921504606846976.000000000, toUInt8(0) != 1152921504606846976.000000000, toUInt8(0) < 1152921504606846976.000000000, toUInt8(0) <= 1152921504606846976.000000000, toUInt8(0) > 1152921504606846976.000000000, toUInt8(0) >= 1152921504606846976.000000000, 1152921504606846976.000000000 = toUInt8(0), 1152921504606846976.000000000 != toUInt8(0), 1152921504606846976.000000000 < toUInt8(0), 1152921504606846976.000000000 <= toUInt8(0), 1152921504606846976.000000000 > toUInt8(0), 1152921504606846976.000000000 >= toUInt8(0) , toInt8(0) = 1152921504606846976.000000000, toInt8(0) != 1152921504606846976.000000000, toInt8(0) < 1152921504606846976.000000000, toInt8(0) <= 1152921504606846976.000000000, toInt8(0) > 1152921504606846976.000000000, toInt8(0) >= 1152921504606846976.000000000, 1152921504606846976.000000000 = toInt8(0), 1152921504606846976.000000000 != toInt8(0), 1152921504606846976.000000000 < toInt8(0), 1152921504606846976.000000000 <= toInt8(0), 1152921504606846976.000000000 > toInt8(0), 1152921504606846976.000000000 >= toInt8(0) , toUInt16(0) = 1152921504606846976.000000000, toUInt16(0) != 1152921504606846976.000000000, toUInt16(0) < 1152921504606846976.000000000, toUInt16(0) <= 1152921504606846976.000000000, toUInt16(0) > 1152921504606846976.000000000, toUInt16(0) >= 1152921504606846976.000000000, 1152921504606846976.000000000 = toUInt16(0), 1152921504606846976.000000000 != toUInt16(0), 1152921504606846976.000000000 < toUInt16(0), 1152921504606846976.000000000 <= toUInt16(0), 1152921504606846976.000000000 > toUInt16(0), 1152921504606846976.000000000 >= toUInt16(0) , toInt16(0) = 1152921504606846976.000000000, toInt16(0) != 1152921504606846976.000000000, toInt16(0) < 1152921504606846976.000000000, toInt16(0) <= 1152921504606846976.000000000, toInt16(0) > 1152921504606846976.000000000, toInt16(0) >= 1152921504606846976.000000000, 1152921504606846976.000000000 = toInt16(0), 1152921504606846976.000000000 != toInt16(0), 1152921504606846976.000000000 < toInt16(0), 1152921504606846976.000000000 <= toInt16(0), 1152921504606846976.000000000 > toInt16(0), 1152921504606846976.000000000 >= toInt16(0) , toUInt32(0) = 1152921504606846976.000000000, toUInt32(0) != 1152921504606846976.000000000, toUInt32(0) < 1152921504606846976.000000000, toUInt32(0) <= 1152921504606846976.000000000, toUInt32(0) > 1152921504606846976.000000000, toUInt32(0) >= 1152921504606846976.000000000, 1152921504606846976.000000000 = toUInt32(0), 1152921504606846976.000000000 != toUInt32(0), 1152921504606846976.000000000 < toUInt32(0), 1152921504606846976.000000000 <= toUInt32(0), 1152921504606846976.000000000 > toUInt32(0), 1152921504606846976.000000000 >= toUInt32(0) , toInt32(0) = 1152921504606846976.000000000, toInt32(0) != 1152921504606846976.000000000, toInt32(0) < 1152921504606846976.000000000, toInt32(0) <= 1152921504606846976.000000000, toInt32(0) > 1152921504606846976.000000000, toInt32(0) >= 1152921504606846976.000000000, 1152921504606846976.000000000 = toInt32(0), 1152921504606846976.000000000 != toInt32(0), 1152921504606846976.000000000 < toInt32(0), 1152921504606846976.000000000 <= toInt32(0), 1152921504606846976.000000000 > toInt32(0), 1152921504606846976.000000000 >= toInt32(0) , toUInt64(0) = 1152921504606846976.000000000, toUInt64(0) != 1152921504606846976.000000000, toUInt64(0) < 1152921504606846976.000000000, toUInt64(0) <= 1152921504606846976.000000000, toUInt64(0) > 1152921504606846976.000000000, toUInt64(0) >= 1152921504606846976.000000000, 1152921504606846976.000000000 = toUInt64(0), 1152921504606846976.000000000 != toUInt64(0), 1152921504606846976.000000000 < toUInt64(0), 1152921504606846976.000000000 <= toUInt64(0), 1152921504606846976.000000000 > toUInt64(0), 1152921504606846976.000000000 >= toUInt64(0) , toInt64(0) = 1152921504606846976.000000000, toInt64(0) != 1152921504606846976.000000000, toInt64(0) < 1152921504606846976.000000000, toInt64(0) <= 1152921504606846976.000000000, toInt64(0) > 1152921504606846976.000000000, toInt64(0) >= 1152921504606846976.000000000, 1152921504606846976.000000000 = toInt64(0), 1152921504606846976.000000000 != toInt64(0), 1152921504606846976.000000000 < toInt64(0), 1152921504606846976.000000000 <= toInt64(0), 1152921504606846976.000000000 > toInt64(0), 1152921504606846976.000000000 >= toInt64(0) ; +SELECT '0', '-1152921504606846976.000000000', 0 = -1152921504606846976.000000000, 0 != -1152921504606846976.000000000, 0 < -1152921504606846976.000000000, 0 <= -1152921504606846976.000000000, 0 > -1152921504606846976.000000000, 0 >= -1152921504606846976.000000000, -1152921504606846976.000000000 = 0, -1152921504606846976.000000000 != 0, -1152921504606846976.000000000 < 0, -1152921504606846976.000000000 <= 0, -1152921504606846976.000000000 > 0, -1152921504606846976.000000000 >= 0 , toUInt8(0) = -1152921504606846976.000000000, toUInt8(0) != -1152921504606846976.000000000, toUInt8(0) < -1152921504606846976.000000000, toUInt8(0) <= -1152921504606846976.000000000, toUInt8(0) > -1152921504606846976.000000000, toUInt8(0) >= -1152921504606846976.000000000, -1152921504606846976.000000000 = toUInt8(0), -1152921504606846976.000000000 != toUInt8(0), -1152921504606846976.000000000 < toUInt8(0), -1152921504606846976.000000000 <= toUInt8(0), -1152921504606846976.000000000 > toUInt8(0), -1152921504606846976.000000000 >= toUInt8(0) , toInt8(0) = -1152921504606846976.000000000, toInt8(0) != -1152921504606846976.000000000, toInt8(0) < -1152921504606846976.000000000, toInt8(0) <= -1152921504606846976.000000000, toInt8(0) > -1152921504606846976.000000000, toInt8(0) >= -1152921504606846976.000000000, -1152921504606846976.000000000 = toInt8(0), -1152921504606846976.000000000 != toInt8(0), -1152921504606846976.000000000 < toInt8(0), -1152921504606846976.000000000 <= toInt8(0), -1152921504606846976.000000000 > toInt8(0), -1152921504606846976.000000000 >= toInt8(0) , toUInt16(0) = -1152921504606846976.000000000, toUInt16(0) != -1152921504606846976.000000000, toUInt16(0) < -1152921504606846976.000000000, toUInt16(0) <= -1152921504606846976.000000000, toUInt16(0) > -1152921504606846976.000000000, toUInt16(0) >= -1152921504606846976.000000000, -1152921504606846976.000000000 = toUInt16(0), -1152921504606846976.000000000 != toUInt16(0), -1152921504606846976.000000000 < toUInt16(0), -1152921504606846976.000000000 <= toUInt16(0), -1152921504606846976.000000000 > toUInt16(0), -1152921504606846976.000000000 >= toUInt16(0) , toInt16(0) = -1152921504606846976.000000000, toInt16(0) != -1152921504606846976.000000000, toInt16(0) < -1152921504606846976.000000000, toInt16(0) <= -1152921504606846976.000000000, toInt16(0) > -1152921504606846976.000000000, toInt16(0) >= -1152921504606846976.000000000, -1152921504606846976.000000000 = toInt16(0), -1152921504606846976.000000000 != toInt16(0), -1152921504606846976.000000000 < toInt16(0), -1152921504606846976.000000000 <= toInt16(0), -1152921504606846976.000000000 > toInt16(0), -1152921504606846976.000000000 >= toInt16(0) , toUInt32(0) = -1152921504606846976.000000000, toUInt32(0) != -1152921504606846976.000000000, toUInt32(0) < -1152921504606846976.000000000, toUInt32(0) <= -1152921504606846976.000000000, toUInt32(0) > -1152921504606846976.000000000, toUInt32(0) >= -1152921504606846976.000000000, -1152921504606846976.000000000 = toUInt32(0), -1152921504606846976.000000000 != toUInt32(0), -1152921504606846976.000000000 < toUInt32(0), -1152921504606846976.000000000 <= toUInt32(0), -1152921504606846976.000000000 > toUInt32(0), -1152921504606846976.000000000 >= toUInt32(0) , toInt32(0) = -1152921504606846976.000000000, toInt32(0) != -1152921504606846976.000000000, toInt32(0) < -1152921504606846976.000000000, toInt32(0) <= -1152921504606846976.000000000, toInt32(0) > -1152921504606846976.000000000, toInt32(0) >= -1152921504606846976.000000000, -1152921504606846976.000000000 = toInt32(0), -1152921504606846976.000000000 != toInt32(0), -1152921504606846976.000000000 < toInt32(0), -1152921504606846976.000000000 <= toInt32(0), -1152921504606846976.000000000 > toInt32(0), -1152921504606846976.000000000 >= toInt32(0) , toUInt64(0) = -1152921504606846976.000000000, toUInt64(0) != -1152921504606846976.000000000, toUInt64(0) < -1152921504606846976.000000000, toUInt64(0) <= -1152921504606846976.000000000, toUInt64(0) > -1152921504606846976.000000000, toUInt64(0) >= -1152921504606846976.000000000, -1152921504606846976.000000000 = toUInt64(0), -1152921504606846976.000000000 != toUInt64(0), -1152921504606846976.000000000 < toUInt64(0), -1152921504606846976.000000000 <= toUInt64(0), -1152921504606846976.000000000 > toUInt64(0), -1152921504606846976.000000000 >= toUInt64(0) , toInt64(0) = -1152921504606846976.000000000, toInt64(0) != -1152921504606846976.000000000, toInt64(0) < -1152921504606846976.000000000, toInt64(0) <= -1152921504606846976.000000000, toInt64(0) > -1152921504606846976.000000000, toInt64(0) >= -1152921504606846976.000000000, -1152921504606846976.000000000 = toInt64(0), -1152921504606846976.000000000 != toInt64(0), -1152921504606846976.000000000 < toInt64(0), -1152921504606846976.000000000 <= toInt64(0), -1152921504606846976.000000000 > toInt64(0), -1152921504606846976.000000000 >= toInt64(0) ; +SELECT '0', '-9223372036854786048.000000000', 0 = -9223372036854786048.000000000, 0 != -9223372036854786048.000000000, 0 < -9223372036854786048.000000000, 0 <= -9223372036854786048.000000000, 0 > -9223372036854786048.000000000, 0 >= -9223372036854786048.000000000, -9223372036854786048.000000000 = 0, -9223372036854786048.000000000 != 0, -9223372036854786048.000000000 < 0, -9223372036854786048.000000000 <= 0, -9223372036854786048.000000000 > 0, -9223372036854786048.000000000 >= 0 , toUInt8(0) = -9223372036854786048.000000000, toUInt8(0) != -9223372036854786048.000000000, toUInt8(0) < -9223372036854786048.000000000, toUInt8(0) <= -9223372036854786048.000000000, toUInt8(0) > -9223372036854786048.000000000, toUInt8(0) >= -9223372036854786048.000000000, -9223372036854786048.000000000 = toUInt8(0), -9223372036854786048.000000000 != toUInt8(0), -9223372036854786048.000000000 < toUInt8(0), -9223372036854786048.000000000 <= toUInt8(0), -9223372036854786048.000000000 > toUInt8(0), -9223372036854786048.000000000 >= toUInt8(0) , toInt8(0) = -9223372036854786048.000000000, toInt8(0) != -9223372036854786048.000000000, toInt8(0) < -9223372036854786048.000000000, toInt8(0) <= -9223372036854786048.000000000, toInt8(0) > -9223372036854786048.000000000, toInt8(0) >= -9223372036854786048.000000000, -9223372036854786048.000000000 = toInt8(0), -9223372036854786048.000000000 != toInt8(0), -9223372036854786048.000000000 < toInt8(0), -9223372036854786048.000000000 <= toInt8(0), -9223372036854786048.000000000 > toInt8(0), -9223372036854786048.000000000 >= toInt8(0) , toUInt16(0) = -9223372036854786048.000000000, toUInt16(0) != -9223372036854786048.000000000, toUInt16(0) < -9223372036854786048.000000000, toUInt16(0) <= -9223372036854786048.000000000, toUInt16(0) > -9223372036854786048.000000000, toUInt16(0) >= -9223372036854786048.000000000, -9223372036854786048.000000000 = toUInt16(0), -9223372036854786048.000000000 != toUInt16(0), -9223372036854786048.000000000 < toUInt16(0), -9223372036854786048.000000000 <= toUInt16(0), -9223372036854786048.000000000 > toUInt16(0), -9223372036854786048.000000000 >= toUInt16(0) , toInt16(0) = -9223372036854786048.000000000, toInt16(0) != -9223372036854786048.000000000, toInt16(0) < -9223372036854786048.000000000, toInt16(0) <= -9223372036854786048.000000000, toInt16(0) > -9223372036854786048.000000000, toInt16(0) >= -9223372036854786048.000000000, -9223372036854786048.000000000 = toInt16(0), -9223372036854786048.000000000 != toInt16(0), -9223372036854786048.000000000 < toInt16(0), -9223372036854786048.000000000 <= toInt16(0), -9223372036854786048.000000000 > toInt16(0), -9223372036854786048.000000000 >= toInt16(0) , toUInt32(0) = -9223372036854786048.000000000, toUInt32(0) != -9223372036854786048.000000000, toUInt32(0) < -9223372036854786048.000000000, toUInt32(0) <= -9223372036854786048.000000000, toUInt32(0) > -9223372036854786048.000000000, toUInt32(0) >= -9223372036854786048.000000000, -9223372036854786048.000000000 = toUInt32(0), -9223372036854786048.000000000 != toUInt32(0), -9223372036854786048.000000000 < toUInt32(0), -9223372036854786048.000000000 <= toUInt32(0), -9223372036854786048.000000000 > toUInt32(0), -9223372036854786048.000000000 >= toUInt32(0) , toInt32(0) = -9223372036854786048.000000000, toInt32(0) != -9223372036854786048.000000000, toInt32(0) < -9223372036854786048.000000000, toInt32(0) <= -9223372036854786048.000000000, toInt32(0) > -9223372036854786048.000000000, toInt32(0) >= -9223372036854786048.000000000, -9223372036854786048.000000000 = toInt32(0), -9223372036854786048.000000000 != toInt32(0), -9223372036854786048.000000000 < toInt32(0), -9223372036854786048.000000000 <= toInt32(0), -9223372036854786048.000000000 > toInt32(0), -9223372036854786048.000000000 >= toInt32(0) , toUInt64(0) = -9223372036854786048.000000000, toUInt64(0) != -9223372036854786048.000000000, toUInt64(0) < -9223372036854786048.000000000, toUInt64(0) <= -9223372036854786048.000000000, toUInt64(0) > -9223372036854786048.000000000, toUInt64(0) >= -9223372036854786048.000000000, -9223372036854786048.000000000 = toUInt64(0), -9223372036854786048.000000000 != toUInt64(0), -9223372036854786048.000000000 < toUInt64(0), -9223372036854786048.000000000 <= toUInt64(0), -9223372036854786048.000000000 > toUInt64(0), -9223372036854786048.000000000 >= toUInt64(0) , toInt64(0) = -9223372036854786048.000000000, toInt64(0) != -9223372036854786048.000000000, toInt64(0) < -9223372036854786048.000000000, toInt64(0) <= -9223372036854786048.000000000, toInt64(0) > -9223372036854786048.000000000, toInt64(0) >= -9223372036854786048.000000000, -9223372036854786048.000000000 = toInt64(0), -9223372036854786048.000000000 != toInt64(0), -9223372036854786048.000000000 < toInt64(0), -9223372036854786048.000000000 <= toInt64(0), -9223372036854786048.000000000 > toInt64(0), -9223372036854786048.000000000 >= toInt64(0) ; +SELECT '0', '9223372036854786048.000000000', 0 = 9223372036854786048.000000000, 0 != 9223372036854786048.000000000, 0 < 9223372036854786048.000000000, 0 <= 9223372036854786048.000000000, 0 > 9223372036854786048.000000000, 0 >= 9223372036854786048.000000000, 9223372036854786048.000000000 = 0, 9223372036854786048.000000000 != 0, 9223372036854786048.000000000 < 0, 9223372036854786048.000000000 <= 0, 9223372036854786048.000000000 > 0, 9223372036854786048.000000000 >= 0 , toUInt8(0) = 9223372036854786048.000000000, toUInt8(0) != 9223372036854786048.000000000, toUInt8(0) < 9223372036854786048.000000000, toUInt8(0) <= 9223372036854786048.000000000, toUInt8(0) > 9223372036854786048.000000000, toUInt8(0) >= 9223372036854786048.000000000, 9223372036854786048.000000000 = toUInt8(0), 9223372036854786048.000000000 != toUInt8(0), 9223372036854786048.000000000 < toUInt8(0), 9223372036854786048.000000000 <= toUInt8(0), 9223372036854786048.000000000 > toUInt8(0), 9223372036854786048.000000000 >= toUInt8(0) , toInt8(0) = 9223372036854786048.000000000, toInt8(0) != 9223372036854786048.000000000, toInt8(0) < 9223372036854786048.000000000, toInt8(0) <= 9223372036854786048.000000000, toInt8(0) > 9223372036854786048.000000000, toInt8(0) >= 9223372036854786048.000000000, 9223372036854786048.000000000 = toInt8(0), 9223372036854786048.000000000 != toInt8(0), 9223372036854786048.000000000 < toInt8(0), 9223372036854786048.000000000 <= toInt8(0), 9223372036854786048.000000000 > toInt8(0), 9223372036854786048.000000000 >= toInt8(0) , toUInt16(0) = 9223372036854786048.000000000, toUInt16(0) != 9223372036854786048.000000000, toUInt16(0) < 9223372036854786048.000000000, toUInt16(0) <= 9223372036854786048.000000000, toUInt16(0) > 9223372036854786048.000000000, toUInt16(0) >= 9223372036854786048.000000000, 9223372036854786048.000000000 = toUInt16(0), 9223372036854786048.000000000 != toUInt16(0), 9223372036854786048.000000000 < toUInt16(0), 9223372036854786048.000000000 <= toUInt16(0), 9223372036854786048.000000000 > toUInt16(0), 9223372036854786048.000000000 >= toUInt16(0) , toInt16(0) = 9223372036854786048.000000000, toInt16(0) != 9223372036854786048.000000000, toInt16(0) < 9223372036854786048.000000000, toInt16(0) <= 9223372036854786048.000000000, toInt16(0) > 9223372036854786048.000000000, toInt16(0) >= 9223372036854786048.000000000, 9223372036854786048.000000000 = toInt16(0), 9223372036854786048.000000000 != toInt16(0), 9223372036854786048.000000000 < toInt16(0), 9223372036854786048.000000000 <= toInt16(0), 9223372036854786048.000000000 > toInt16(0), 9223372036854786048.000000000 >= toInt16(0) , toUInt32(0) = 9223372036854786048.000000000, toUInt32(0) != 9223372036854786048.000000000, toUInt32(0) < 9223372036854786048.000000000, toUInt32(0) <= 9223372036854786048.000000000, toUInt32(0) > 9223372036854786048.000000000, toUInt32(0) >= 9223372036854786048.000000000, 9223372036854786048.000000000 = toUInt32(0), 9223372036854786048.000000000 != toUInt32(0), 9223372036854786048.000000000 < toUInt32(0), 9223372036854786048.000000000 <= toUInt32(0), 9223372036854786048.000000000 > toUInt32(0), 9223372036854786048.000000000 >= toUInt32(0) , toInt32(0) = 9223372036854786048.000000000, toInt32(0) != 9223372036854786048.000000000, toInt32(0) < 9223372036854786048.000000000, toInt32(0) <= 9223372036854786048.000000000, toInt32(0) > 9223372036854786048.000000000, toInt32(0) >= 9223372036854786048.000000000, 9223372036854786048.000000000 = toInt32(0), 9223372036854786048.000000000 != toInt32(0), 9223372036854786048.000000000 < toInt32(0), 9223372036854786048.000000000 <= toInt32(0), 9223372036854786048.000000000 > toInt32(0), 9223372036854786048.000000000 >= toInt32(0) , toUInt64(0) = 9223372036854786048.000000000, toUInt64(0) != 9223372036854786048.000000000, toUInt64(0) < 9223372036854786048.000000000, toUInt64(0) <= 9223372036854786048.000000000, toUInt64(0) > 9223372036854786048.000000000, toUInt64(0) >= 9223372036854786048.000000000, 9223372036854786048.000000000 = toUInt64(0), 9223372036854786048.000000000 != toUInt64(0), 9223372036854786048.000000000 < toUInt64(0), 9223372036854786048.000000000 <= toUInt64(0), 9223372036854786048.000000000 > toUInt64(0), 9223372036854786048.000000000 >= toUInt64(0) , toInt64(0) = 9223372036854786048.000000000, toInt64(0) != 9223372036854786048.000000000, toInt64(0) < 9223372036854786048.000000000, toInt64(0) <= 9223372036854786048.000000000, toInt64(0) > 9223372036854786048.000000000, toInt64(0) >= 9223372036854786048.000000000, 9223372036854786048.000000000 = toInt64(0), 9223372036854786048.000000000 != toInt64(0), 9223372036854786048.000000000 < toInt64(0), 9223372036854786048.000000000 <= toInt64(0), 9223372036854786048.000000000 > toInt64(0), 9223372036854786048.000000000 >= toInt64(0) ; +SELECT '-1', '0.000000000', -1 = 0.000000000, -1 != 0.000000000, -1 < 0.000000000, -1 <= 0.000000000, -1 > 0.000000000, -1 >= 0.000000000, 0.000000000 = -1, 0.000000000 != -1, 0.000000000 < -1, 0.000000000 <= -1, 0.000000000 > -1, 0.000000000 >= -1 , toInt8(-1) = 0.000000000, toInt8(-1) != 0.000000000, toInt8(-1) < 0.000000000, toInt8(-1) <= 0.000000000, toInt8(-1) > 0.000000000, toInt8(-1) >= 0.000000000, 0.000000000 = toInt8(-1), 0.000000000 != toInt8(-1), 0.000000000 < toInt8(-1), 0.000000000 <= toInt8(-1), 0.000000000 > toInt8(-1), 0.000000000 >= toInt8(-1) , toInt16(-1) = 0.000000000, toInt16(-1) != 0.000000000, toInt16(-1) < 0.000000000, toInt16(-1) <= 0.000000000, toInt16(-1) > 0.000000000, toInt16(-1) >= 0.000000000, 0.000000000 = toInt16(-1), 0.000000000 != toInt16(-1), 0.000000000 < toInt16(-1), 0.000000000 <= toInt16(-1), 0.000000000 > toInt16(-1), 0.000000000 >= toInt16(-1) , toInt32(-1) = 0.000000000, toInt32(-1) != 0.000000000, toInt32(-1) < 0.000000000, toInt32(-1) <= 0.000000000, toInt32(-1) > 0.000000000, toInt32(-1) >= 0.000000000, 0.000000000 = toInt32(-1), 0.000000000 != toInt32(-1), 0.000000000 < toInt32(-1), 0.000000000 <= toInt32(-1), 0.000000000 > toInt32(-1), 0.000000000 >= toInt32(-1) , toInt64(-1) = 0.000000000, toInt64(-1) != 0.000000000, toInt64(-1) < 0.000000000, toInt64(-1) <= 0.000000000, toInt64(-1) > 0.000000000, toInt64(-1) >= 0.000000000, 0.000000000 = toInt64(-1), 0.000000000 != toInt64(-1), 0.000000000 < toInt64(-1), 0.000000000 <= toInt64(-1), 0.000000000 > toInt64(-1), 0.000000000 >= toInt64(-1) ; +SELECT '-1', '-1.000000000', -1 = -1.000000000, -1 != -1.000000000, -1 < -1.000000000, -1 <= -1.000000000, -1 > -1.000000000, -1 >= -1.000000000, -1.000000000 = -1, -1.000000000 != -1, -1.000000000 < -1, -1.000000000 <= -1, -1.000000000 > -1, -1.000000000 >= -1 , toInt8(-1) = -1.000000000, toInt8(-1) != -1.000000000, toInt8(-1) < -1.000000000, toInt8(-1) <= -1.000000000, toInt8(-1) > -1.000000000, toInt8(-1) >= -1.000000000, -1.000000000 = toInt8(-1), -1.000000000 != toInt8(-1), -1.000000000 < toInt8(-1), -1.000000000 <= toInt8(-1), -1.000000000 > toInt8(-1), -1.000000000 >= toInt8(-1) , toInt16(-1) = -1.000000000, toInt16(-1) != -1.000000000, toInt16(-1) < -1.000000000, toInt16(-1) <= -1.000000000, toInt16(-1) > -1.000000000, toInt16(-1) >= -1.000000000, -1.000000000 = toInt16(-1), -1.000000000 != toInt16(-1), -1.000000000 < toInt16(-1), -1.000000000 <= toInt16(-1), -1.000000000 > toInt16(-1), -1.000000000 >= toInt16(-1) , toInt32(-1) = -1.000000000, toInt32(-1) != -1.000000000, toInt32(-1) < -1.000000000, toInt32(-1) <= -1.000000000, toInt32(-1) > -1.000000000, toInt32(-1) >= -1.000000000, -1.000000000 = toInt32(-1), -1.000000000 != toInt32(-1), -1.000000000 < toInt32(-1), -1.000000000 <= toInt32(-1), -1.000000000 > toInt32(-1), -1.000000000 >= toInt32(-1) , toInt64(-1) = -1.000000000, toInt64(-1) != -1.000000000, toInt64(-1) < -1.000000000, toInt64(-1) <= -1.000000000, toInt64(-1) > -1.000000000, toInt64(-1) >= -1.000000000, -1.000000000 = toInt64(-1), -1.000000000 != toInt64(-1), -1.000000000 < toInt64(-1), -1.000000000 <= toInt64(-1), -1.000000000 > toInt64(-1), -1.000000000 >= toInt64(-1) ; +SELECT '-1', '1.000000000', -1 = 1.000000000, -1 != 1.000000000, -1 < 1.000000000, -1 <= 1.000000000, -1 > 1.000000000, -1 >= 1.000000000, 1.000000000 = -1, 1.000000000 != -1, 1.000000000 < -1, 1.000000000 <= -1, 1.000000000 > -1, 1.000000000 >= -1 , toInt8(-1) = 1.000000000, toInt8(-1) != 1.000000000, toInt8(-1) < 1.000000000, toInt8(-1) <= 1.000000000, toInt8(-1) > 1.000000000, toInt8(-1) >= 1.000000000, 1.000000000 = toInt8(-1), 1.000000000 != toInt8(-1), 1.000000000 < toInt8(-1), 1.000000000 <= toInt8(-1), 1.000000000 > toInt8(-1), 1.000000000 >= toInt8(-1) , toInt16(-1) = 1.000000000, toInt16(-1) != 1.000000000, toInt16(-1) < 1.000000000, toInt16(-1) <= 1.000000000, toInt16(-1) > 1.000000000, toInt16(-1) >= 1.000000000, 1.000000000 = toInt16(-1), 1.000000000 != toInt16(-1), 1.000000000 < toInt16(-1), 1.000000000 <= toInt16(-1), 1.000000000 > toInt16(-1), 1.000000000 >= toInt16(-1) , toInt32(-1) = 1.000000000, toInt32(-1) != 1.000000000, toInt32(-1) < 1.000000000, toInt32(-1) <= 1.000000000, toInt32(-1) > 1.000000000, toInt32(-1) >= 1.000000000, 1.000000000 = toInt32(-1), 1.000000000 != toInt32(-1), 1.000000000 < toInt32(-1), 1.000000000 <= toInt32(-1), 1.000000000 > toInt32(-1), 1.000000000 >= toInt32(-1) , toInt64(-1) = 1.000000000, toInt64(-1) != 1.000000000, toInt64(-1) < 1.000000000, toInt64(-1) <= 1.000000000, toInt64(-1) > 1.000000000, toInt64(-1) >= 1.000000000, 1.000000000 = toInt64(-1), 1.000000000 != toInt64(-1), 1.000000000 < toInt64(-1), 1.000000000 <= toInt64(-1), 1.000000000 > toInt64(-1), 1.000000000 >= toInt64(-1) ; +SELECT '-1', '18446744073709551616.000000000', -1 = 18446744073709551616.000000000, -1 != 18446744073709551616.000000000, -1 < 18446744073709551616.000000000, -1 <= 18446744073709551616.000000000, -1 > 18446744073709551616.000000000, -1 >= 18446744073709551616.000000000, 18446744073709551616.000000000 = -1, 18446744073709551616.000000000 != -1, 18446744073709551616.000000000 < -1, 18446744073709551616.000000000 <= -1, 18446744073709551616.000000000 > -1, 18446744073709551616.000000000 >= -1 , toInt8(-1) = 18446744073709551616.000000000, toInt8(-1) != 18446744073709551616.000000000, toInt8(-1) < 18446744073709551616.000000000, toInt8(-1) <= 18446744073709551616.000000000, toInt8(-1) > 18446744073709551616.000000000, toInt8(-1) >= 18446744073709551616.000000000, 18446744073709551616.000000000 = toInt8(-1), 18446744073709551616.000000000 != toInt8(-1), 18446744073709551616.000000000 < toInt8(-1), 18446744073709551616.000000000 <= toInt8(-1), 18446744073709551616.000000000 > toInt8(-1), 18446744073709551616.000000000 >= toInt8(-1) , toInt16(-1) = 18446744073709551616.000000000, toInt16(-1) != 18446744073709551616.000000000, toInt16(-1) < 18446744073709551616.000000000, toInt16(-1) <= 18446744073709551616.000000000, toInt16(-1) > 18446744073709551616.000000000, toInt16(-1) >= 18446744073709551616.000000000, 18446744073709551616.000000000 = toInt16(-1), 18446744073709551616.000000000 != toInt16(-1), 18446744073709551616.000000000 < toInt16(-1), 18446744073709551616.000000000 <= toInt16(-1), 18446744073709551616.000000000 > toInt16(-1), 18446744073709551616.000000000 >= toInt16(-1) , toInt32(-1) = 18446744073709551616.000000000, toInt32(-1) != 18446744073709551616.000000000, toInt32(-1) < 18446744073709551616.000000000, toInt32(-1) <= 18446744073709551616.000000000, toInt32(-1) > 18446744073709551616.000000000, toInt32(-1) >= 18446744073709551616.000000000, 18446744073709551616.000000000 = toInt32(-1), 18446744073709551616.000000000 != toInt32(-1), 18446744073709551616.000000000 < toInt32(-1), 18446744073709551616.000000000 <= toInt32(-1), 18446744073709551616.000000000 > toInt32(-1), 18446744073709551616.000000000 >= toInt32(-1) , toInt64(-1) = 18446744073709551616.000000000, toInt64(-1) != 18446744073709551616.000000000, toInt64(-1) < 18446744073709551616.000000000, toInt64(-1) <= 18446744073709551616.000000000, toInt64(-1) > 18446744073709551616.000000000, toInt64(-1) >= 18446744073709551616.000000000, 18446744073709551616.000000000 = toInt64(-1), 18446744073709551616.000000000 != toInt64(-1), 18446744073709551616.000000000 < toInt64(-1), 18446744073709551616.000000000 <= toInt64(-1), 18446744073709551616.000000000 > toInt64(-1), 18446744073709551616.000000000 >= toInt64(-1) ; +SELECT '-1', '9223372036854775808.000000000', -1 = 9223372036854775808.000000000, -1 != 9223372036854775808.000000000, -1 < 9223372036854775808.000000000, -1 <= 9223372036854775808.000000000, -1 > 9223372036854775808.000000000, -1 >= 9223372036854775808.000000000, 9223372036854775808.000000000 = -1, 9223372036854775808.000000000 != -1, 9223372036854775808.000000000 < -1, 9223372036854775808.000000000 <= -1, 9223372036854775808.000000000 > -1, 9223372036854775808.000000000 >= -1 , toInt8(-1) = 9223372036854775808.000000000, toInt8(-1) != 9223372036854775808.000000000, toInt8(-1) < 9223372036854775808.000000000, toInt8(-1) <= 9223372036854775808.000000000, toInt8(-1) > 9223372036854775808.000000000, toInt8(-1) >= 9223372036854775808.000000000, 9223372036854775808.000000000 = toInt8(-1), 9223372036854775808.000000000 != toInt8(-1), 9223372036854775808.000000000 < toInt8(-1), 9223372036854775808.000000000 <= toInt8(-1), 9223372036854775808.000000000 > toInt8(-1), 9223372036854775808.000000000 >= toInt8(-1) , toInt16(-1) = 9223372036854775808.000000000, toInt16(-1) != 9223372036854775808.000000000, toInt16(-1) < 9223372036854775808.000000000, toInt16(-1) <= 9223372036854775808.000000000, toInt16(-1) > 9223372036854775808.000000000, toInt16(-1) >= 9223372036854775808.000000000, 9223372036854775808.000000000 = toInt16(-1), 9223372036854775808.000000000 != toInt16(-1), 9223372036854775808.000000000 < toInt16(-1), 9223372036854775808.000000000 <= toInt16(-1), 9223372036854775808.000000000 > toInt16(-1), 9223372036854775808.000000000 >= toInt16(-1) , toInt32(-1) = 9223372036854775808.000000000, toInt32(-1) != 9223372036854775808.000000000, toInt32(-1) < 9223372036854775808.000000000, toInt32(-1) <= 9223372036854775808.000000000, toInt32(-1) > 9223372036854775808.000000000, toInt32(-1) >= 9223372036854775808.000000000, 9223372036854775808.000000000 = toInt32(-1), 9223372036854775808.000000000 != toInt32(-1), 9223372036854775808.000000000 < toInt32(-1), 9223372036854775808.000000000 <= toInt32(-1), 9223372036854775808.000000000 > toInt32(-1), 9223372036854775808.000000000 >= toInt32(-1) , toInt64(-1) = 9223372036854775808.000000000, toInt64(-1) != 9223372036854775808.000000000, toInt64(-1) < 9223372036854775808.000000000, toInt64(-1) <= 9223372036854775808.000000000, toInt64(-1) > 9223372036854775808.000000000, toInt64(-1) >= 9223372036854775808.000000000, 9223372036854775808.000000000 = toInt64(-1), 9223372036854775808.000000000 != toInt64(-1), 9223372036854775808.000000000 < toInt64(-1), 9223372036854775808.000000000 <= toInt64(-1), 9223372036854775808.000000000 > toInt64(-1), 9223372036854775808.000000000 >= toInt64(-1) ; +SELECT '-1', '-9223372036854775808.000000000', -1 = -9223372036854775808.000000000, -1 != -9223372036854775808.000000000, -1 < -9223372036854775808.000000000, -1 <= -9223372036854775808.000000000, -1 > -9223372036854775808.000000000, -1 >= -9223372036854775808.000000000, -9223372036854775808.000000000 = -1, -9223372036854775808.000000000 != -1, -9223372036854775808.000000000 < -1, -9223372036854775808.000000000 <= -1, -9223372036854775808.000000000 > -1, -9223372036854775808.000000000 >= -1 , toInt8(-1) = -9223372036854775808.000000000, toInt8(-1) != -9223372036854775808.000000000, toInt8(-1) < -9223372036854775808.000000000, toInt8(-1) <= -9223372036854775808.000000000, toInt8(-1) > -9223372036854775808.000000000, toInt8(-1) >= -9223372036854775808.000000000, -9223372036854775808.000000000 = toInt8(-1), -9223372036854775808.000000000 != toInt8(-1), -9223372036854775808.000000000 < toInt8(-1), -9223372036854775808.000000000 <= toInt8(-1), -9223372036854775808.000000000 > toInt8(-1), -9223372036854775808.000000000 >= toInt8(-1) , toInt16(-1) = -9223372036854775808.000000000, toInt16(-1) != -9223372036854775808.000000000, toInt16(-1) < -9223372036854775808.000000000, toInt16(-1) <= -9223372036854775808.000000000, toInt16(-1) > -9223372036854775808.000000000, toInt16(-1) >= -9223372036854775808.000000000, -9223372036854775808.000000000 = toInt16(-1), -9223372036854775808.000000000 != toInt16(-1), -9223372036854775808.000000000 < toInt16(-1), -9223372036854775808.000000000 <= toInt16(-1), -9223372036854775808.000000000 > toInt16(-1), -9223372036854775808.000000000 >= toInt16(-1) , toInt32(-1) = -9223372036854775808.000000000, toInt32(-1) != -9223372036854775808.000000000, toInt32(-1) < -9223372036854775808.000000000, toInt32(-1) <= -9223372036854775808.000000000, toInt32(-1) > -9223372036854775808.000000000, toInt32(-1) >= -9223372036854775808.000000000, -9223372036854775808.000000000 = toInt32(-1), -9223372036854775808.000000000 != toInt32(-1), -9223372036854775808.000000000 < toInt32(-1), -9223372036854775808.000000000 <= toInt32(-1), -9223372036854775808.000000000 > toInt32(-1), -9223372036854775808.000000000 >= toInt32(-1) , toInt64(-1) = -9223372036854775808.000000000, toInt64(-1) != -9223372036854775808.000000000, toInt64(-1) < -9223372036854775808.000000000, toInt64(-1) <= -9223372036854775808.000000000, toInt64(-1) > -9223372036854775808.000000000, toInt64(-1) >= -9223372036854775808.000000000, -9223372036854775808.000000000 = toInt64(-1), -9223372036854775808.000000000 != toInt64(-1), -9223372036854775808.000000000 < toInt64(-1), -9223372036854775808.000000000 <= toInt64(-1), -9223372036854775808.000000000 > toInt64(-1), -9223372036854775808.000000000 >= toInt64(-1) ; +SELECT '-1', '9223372036854775808.000000000', -1 = 9223372036854775808.000000000, -1 != 9223372036854775808.000000000, -1 < 9223372036854775808.000000000, -1 <= 9223372036854775808.000000000, -1 > 9223372036854775808.000000000, -1 >= 9223372036854775808.000000000, 9223372036854775808.000000000 = -1, 9223372036854775808.000000000 != -1, 9223372036854775808.000000000 < -1, 9223372036854775808.000000000 <= -1, 9223372036854775808.000000000 > -1, 9223372036854775808.000000000 >= -1 , toInt8(-1) = 9223372036854775808.000000000, toInt8(-1) != 9223372036854775808.000000000, toInt8(-1) < 9223372036854775808.000000000, toInt8(-1) <= 9223372036854775808.000000000, toInt8(-1) > 9223372036854775808.000000000, toInt8(-1) >= 9223372036854775808.000000000, 9223372036854775808.000000000 = toInt8(-1), 9223372036854775808.000000000 != toInt8(-1), 9223372036854775808.000000000 < toInt8(-1), 9223372036854775808.000000000 <= toInt8(-1), 9223372036854775808.000000000 > toInt8(-1), 9223372036854775808.000000000 >= toInt8(-1) , toInt16(-1) = 9223372036854775808.000000000, toInt16(-1) != 9223372036854775808.000000000, toInt16(-1) < 9223372036854775808.000000000, toInt16(-1) <= 9223372036854775808.000000000, toInt16(-1) > 9223372036854775808.000000000, toInt16(-1) >= 9223372036854775808.000000000, 9223372036854775808.000000000 = toInt16(-1), 9223372036854775808.000000000 != toInt16(-1), 9223372036854775808.000000000 < toInt16(-1), 9223372036854775808.000000000 <= toInt16(-1), 9223372036854775808.000000000 > toInt16(-1), 9223372036854775808.000000000 >= toInt16(-1) , toInt32(-1) = 9223372036854775808.000000000, toInt32(-1) != 9223372036854775808.000000000, toInt32(-1) < 9223372036854775808.000000000, toInt32(-1) <= 9223372036854775808.000000000, toInt32(-1) > 9223372036854775808.000000000, toInt32(-1) >= 9223372036854775808.000000000, 9223372036854775808.000000000 = toInt32(-1), 9223372036854775808.000000000 != toInt32(-1), 9223372036854775808.000000000 < toInt32(-1), 9223372036854775808.000000000 <= toInt32(-1), 9223372036854775808.000000000 > toInt32(-1), 9223372036854775808.000000000 >= toInt32(-1) , toInt64(-1) = 9223372036854775808.000000000, toInt64(-1) != 9223372036854775808.000000000, toInt64(-1) < 9223372036854775808.000000000, toInt64(-1) <= 9223372036854775808.000000000, toInt64(-1) > 9223372036854775808.000000000, toInt64(-1) >= 9223372036854775808.000000000, 9223372036854775808.000000000 = toInt64(-1), 9223372036854775808.000000000 != toInt64(-1), 9223372036854775808.000000000 < toInt64(-1), 9223372036854775808.000000000 <= toInt64(-1), 9223372036854775808.000000000 > toInt64(-1), 9223372036854775808.000000000 >= toInt64(-1) ; +SELECT '-1', '2251799813685248.000000000', -1 = 2251799813685248.000000000, -1 != 2251799813685248.000000000, -1 < 2251799813685248.000000000, -1 <= 2251799813685248.000000000, -1 > 2251799813685248.000000000, -1 >= 2251799813685248.000000000, 2251799813685248.000000000 = -1, 2251799813685248.000000000 != -1, 2251799813685248.000000000 < -1, 2251799813685248.000000000 <= -1, 2251799813685248.000000000 > -1, 2251799813685248.000000000 >= -1 , toInt8(-1) = 2251799813685248.000000000, toInt8(-1) != 2251799813685248.000000000, toInt8(-1) < 2251799813685248.000000000, toInt8(-1) <= 2251799813685248.000000000, toInt8(-1) > 2251799813685248.000000000, toInt8(-1) >= 2251799813685248.000000000, 2251799813685248.000000000 = toInt8(-1), 2251799813685248.000000000 != toInt8(-1), 2251799813685248.000000000 < toInt8(-1), 2251799813685248.000000000 <= toInt8(-1), 2251799813685248.000000000 > toInt8(-1), 2251799813685248.000000000 >= toInt8(-1) , toInt16(-1) = 2251799813685248.000000000, toInt16(-1) != 2251799813685248.000000000, toInt16(-1) < 2251799813685248.000000000, toInt16(-1) <= 2251799813685248.000000000, toInt16(-1) > 2251799813685248.000000000, toInt16(-1) >= 2251799813685248.000000000, 2251799813685248.000000000 = toInt16(-1), 2251799813685248.000000000 != toInt16(-1), 2251799813685248.000000000 < toInt16(-1), 2251799813685248.000000000 <= toInt16(-1), 2251799813685248.000000000 > toInt16(-1), 2251799813685248.000000000 >= toInt16(-1) , toInt32(-1) = 2251799813685248.000000000, toInt32(-1) != 2251799813685248.000000000, toInt32(-1) < 2251799813685248.000000000, toInt32(-1) <= 2251799813685248.000000000, toInt32(-1) > 2251799813685248.000000000, toInt32(-1) >= 2251799813685248.000000000, 2251799813685248.000000000 = toInt32(-1), 2251799813685248.000000000 != toInt32(-1), 2251799813685248.000000000 < toInt32(-1), 2251799813685248.000000000 <= toInt32(-1), 2251799813685248.000000000 > toInt32(-1), 2251799813685248.000000000 >= toInt32(-1) , toInt64(-1) = 2251799813685248.000000000, toInt64(-1) != 2251799813685248.000000000, toInt64(-1) < 2251799813685248.000000000, toInt64(-1) <= 2251799813685248.000000000, toInt64(-1) > 2251799813685248.000000000, toInt64(-1) >= 2251799813685248.000000000, 2251799813685248.000000000 = toInt64(-1), 2251799813685248.000000000 != toInt64(-1), 2251799813685248.000000000 < toInt64(-1), 2251799813685248.000000000 <= toInt64(-1), 2251799813685248.000000000 > toInt64(-1), 2251799813685248.000000000 >= toInt64(-1) ; +SELECT '-1', '4503599627370496.000000000', -1 = 4503599627370496.000000000, -1 != 4503599627370496.000000000, -1 < 4503599627370496.000000000, -1 <= 4503599627370496.000000000, -1 > 4503599627370496.000000000, -1 >= 4503599627370496.000000000, 4503599627370496.000000000 = -1, 4503599627370496.000000000 != -1, 4503599627370496.000000000 < -1, 4503599627370496.000000000 <= -1, 4503599627370496.000000000 > -1, 4503599627370496.000000000 >= -1 , toInt8(-1) = 4503599627370496.000000000, toInt8(-1) != 4503599627370496.000000000, toInt8(-1) < 4503599627370496.000000000, toInt8(-1) <= 4503599627370496.000000000, toInt8(-1) > 4503599627370496.000000000, toInt8(-1) >= 4503599627370496.000000000, 4503599627370496.000000000 = toInt8(-1), 4503599627370496.000000000 != toInt8(-1), 4503599627370496.000000000 < toInt8(-1), 4503599627370496.000000000 <= toInt8(-1), 4503599627370496.000000000 > toInt8(-1), 4503599627370496.000000000 >= toInt8(-1) , toInt16(-1) = 4503599627370496.000000000, toInt16(-1) != 4503599627370496.000000000, toInt16(-1) < 4503599627370496.000000000, toInt16(-1) <= 4503599627370496.000000000, toInt16(-1) > 4503599627370496.000000000, toInt16(-1) >= 4503599627370496.000000000, 4503599627370496.000000000 = toInt16(-1), 4503599627370496.000000000 != toInt16(-1), 4503599627370496.000000000 < toInt16(-1), 4503599627370496.000000000 <= toInt16(-1), 4503599627370496.000000000 > toInt16(-1), 4503599627370496.000000000 >= toInt16(-1) , toInt32(-1) = 4503599627370496.000000000, toInt32(-1) != 4503599627370496.000000000, toInt32(-1) < 4503599627370496.000000000, toInt32(-1) <= 4503599627370496.000000000, toInt32(-1) > 4503599627370496.000000000, toInt32(-1) >= 4503599627370496.000000000, 4503599627370496.000000000 = toInt32(-1), 4503599627370496.000000000 != toInt32(-1), 4503599627370496.000000000 < toInt32(-1), 4503599627370496.000000000 <= toInt32(-1), 4503599627370496.000000000 > toInt32(-1), 4503599627370496.000000000 >= toInt32(-1) , toInt64(-1) = 4503599627370496.000000000, toInt64(-1) != 4503599627370496.000000000, toInt64(-1) < 4503599627370496.000000000, toInt64(-1) <= 4503599627370496.000000000, toInt64(-1) > 4503599627370496.000000000, toInt64(-1) >= 4503599627370496.000000000, 4503599627370496.000000000 = toInt64(-1), 4503599627370496.000000000 != toInt64(-1), 4503599627370496.000000000 < toInt64(-1), 4503599627370496.000000000 <= toInt64(-1), 4503599627370496.000000000 > toInt64(-1), 4503599627370496.000000000 >= toInt64(-1) ; +SELECT '-1', '9007199254740991.000000000', -1 = 9007199254740991.000000000, -1 != 9007199254740991.000000000, -1 < 9007199254740991.000000000, -1 <= 9007199254740991.000000000, -1 > 9007199254740991.000000000, -1 >= 9007199254740991.000000000, 9007199254740991.000000000 = -1, 9007199254740991.000000000 != -1, 9007199254740991.000000000 < -1, 9007199254740991.000000000 <= -1, 9007199254740991.000000000 > -1, 9007199254740991.000000000 >= -1 , toInt8(-1) = 9007199254740991.000000000, toInt8(-1) != 9007199254740991.000000000, toInt8(-1) < 9007199254740991.000000000, toInt8(-1) <= 9007199254740991.000000000, toInt8(-1) > 9007199254740991.000000000, toInt8(-1) >= 9007199254740991.000000000, 9007199254740991.000000000 = toInt8(-1), 9007199254740991.000000000 != toInt8(-1), 9007199254740991.000000000 < toInt8(-1), 9007199254740991.000000000 <= toInt8(-1), 9007199254740991.000000000 > toInt8(-1), 9007199254740991.000000000 >= toInt8(-1) , toInt16(-1) = 9007199254740991.000000000, toInt16(-1) != 9007199254740991.000000000, toInt16(-1) < 9007199254740991.000000000, toInt16(-1) <= 9007199254740991.000000000, toInt16(-1) > 9007199254740991.000000000, toInt16(-1) >= 9007199254740991.000000000, 9007199254740991.000000000 = toInt16(-1), 9007199254740991.000000000 != toInt16(-1), 9007199254740991.000000000 < toInt16(-1), 9007199254740991.000000000 <= toInt16(-1), 9007199254740991.000000000 > toInt16(-1), 9007199254740991.000000000 >= toInt16(-1) , toInt32(-1) = 9007199254740991.000000000, toInt32(-1) != 9007199254740991.000000000, toInt32(-1) < 9007199254740991.000000000, toInt32(-1) <= 9007199254740991.000000000, toInt32(-1) > 9007199254740991.000000000, toInt32(-1) >= 9007199254740991.000000000, 9007199254740991.000000000 = toInt32(-1), 9007199254740991.000000000 != toInt32(-1), 9007199254740991.000000000 < toInt32(-1), 9007199254740991.000000000 <= toInt32(-1), 9007199254740991.000000000 > toInt32(-1), 9007199254740991.000000000 >= toInt32(-1) , toInt64(-1) = 9007199254740991.000000000, toInt64(-1) != 9007199254740991.000000000, toInt64(-1) < 9007199254740991.000000000, toInt64(-1) <= 9007199254740991.000000000, toInt64(-1) > 9007199254740991.000000000, toInt64(-1) >= 9007199254740991.000000000, 9007199254740991.000000000 = toInt64(-1), 9007199254740991.000000000 != toInt64(-1), 9007199254740991.000000000 < toInt64(-1), 9007199254740991.000000000 <= toInt64(-1), 9007199254740991.000000000 > toInt64(-1), 9007199254740991.000000000 >= toInt64(-1) ; +SELECT '-1', '9007199254740992.000000000', -1 = 9007199254740992.000000000, -1 != 9007199254740992.000000000, -1 < 9007199254740992.000000000, -1 <= 9007199254740992.000000000, -1 > 9007199254740992.000000000, -1 >= 9007199254740992.000000000, 9007199254740992.000000000 = -1, 9007199254740992.000000000 != -1, 9007199254740992.000000000 < -1, 9007199254740992.000000000 <= -1, 9007199254740992.000000000 > -1, 9007199254740992.000000000 >= -1 , toInt8(-1) = 9007199254740992.000000000, toInt8(-1) != 9007199254740992.000000000, toInt8(-1) < 9007199254740992.000000000, toInt8(-1) <= 9007199254740992.000000000, toInt8(-1) > 9007199254740992.000000000, toInt8(-1) >= 9007199254740992.000000000, 9007199254740992.000000000 = toInt8(-1), 9007199254740992.000000000 != toInt8(-1), 9007199254740992.000000000 < toInt8(-1), 9007199254740992.000000000 <= toInt8(-1), 9007199254740992.000000000 > toInt8(-1), 9007199254740992.000000000 >= toInt8(-1) , toInt16(-1) = 9007199254740992.000000000, toInt16(-1) != 9007199254740992.000000000, toInt16(-1) < 9007199254740992.000000000, toInt16(-1) <= 9007199254740992.000000000, toInt16(-1) > 9007199254740992.000000000, toInt16(-1) >= 9007199254740992.000000000, 9007199254740992.000000000 = toInt16(-1), 9007199254740992.000000000 != toInt16(-1), 9007199254740992.000000000 < toInt16(-1), 9007199254740992.000000000 <= toInt16(-1), 9007199254740992.000000000 > toInt16(-1), 9007199254740992.000000000 >= toInt16(-1) , toInt32(-1) = 9007199254740992.000000000, toInt32(-1) != 9007199254740992.000000000, toInt32(-1) < 9007199254740992.000000000, toInt32(-1) <= 9007199254740992.000000000, toInt32(-1) > 9007199254740992.000000000, toInt32(-1) >= 9007199254740992.000000000, 9007199254740992.000000000 = toInt32(-1), 9007199254740992.000000000 != toInt32(-1), 9007199254740992.000000000 < toInt32(-1), 9007199254740992.000000000 <= toInt32(-1), 9007199254740992.000000000 > toInt32(-1), 9007199254740992.000000000 >= toInt32(-1) , toInt64(-1) = 9007199254740992.000000000, toInt64(-1) != 9007199254740992.000000000, toInt64(-1) < 9007199254740992.000000000, toInt64(-1) <= 9007199254740992.000000000, toInt64(-1) > 9007199254740992.000000000, toInt64(-1) >= 9007199254740992.000000000, 9007199254740992.000000000 = toInt64(-1), 9007199254740992.000000000 != toInt64(-1), 9007199254740992.000000000 < toInt64(-1), 9007199254740992.000000000 <= toInt64(-1), 9007199254740992.000000000 > toInt64(-1), 9007199254740992.000000000 >= toInt64(-1) ; +SELECT '-1', '9007199254740992.000000000', -1 = 9007199254740992.000000000, -1 != 9007199254740992.000000000, -1 < 9007199254740992.000000000, -1 <= 9007199254740992.000000000, -1 > 9007199254740992.000000000, -1 >= 9007199254740992.000000000, 9007199254740992.000000000 = -1, 9007199254740992.000000000 != -1, 9007199254740992.000000000 < -1, 9007199254740992.000000000 <= -1, 9007199254740992.000000000 > -1, 9007199254740992.000000000 >= -1 , toInt8(-1) = 9007199254740992.000000000, toInt8(-1) != 9007199254740992.000000000, toInt8(-1) < 9007199254740992.000000000, toInt8(-1) <= 9007199254740992.000000000, toInt8(-1) > 9007199254740992.000000000, toInt8(-1) >= 9007199254740992.000000000, 9007199254740992.000000000 = toInt8(-1), 9007199254740992.000000000 != toInt8(-1), 9007199254740992.000000000 < toInt8(-1), 9007199254740992.000000000 <= toInt8(-1), 9007199254740992.000000000 > toInt8(-1), 9007199254740992.000000000 >= toInt8(-1) , toInt16(-1) = 9007199254740992.000000000, toInt16(-1) != 9007199254740992.000000000, toInt16(-1) < 9007199254740992.000000000, toInt16(-1) <= 9007199254740992.000000000, toInt16(-1) > 9007199254740992.000000000, toInt16(-1) >= 9007199254740992.000000000, 9007199254740992.000000000 = toInt16(-1), 9007199254740992.000000000 != toInt16(-1), 9007199254740992.000000000 < toInt16(-1), 9007199254740992.000000000 <= toInt16(-1), 9007199254740992.000000000 > toInt16(-1), 9007199254740992.000000000 >= toInt16(-1) , toInt32(-1) = 9007199254740992.000000000, toInt32(-1) != 9007199254740992.000000000, toInt32(-1) < 9007199254740992.000000000, toInt32(-1) <= 9007199254740992.000000000, toInt32(-1) > 9007199254740992.000000000, toInt32(-1) >= 9007199254740992.000000000, 9007199254740992.000000000 = toInt32(-1), 9007199254740992.000000000 != toInt32(-1), 9007199254740992.000000000 < toInt32(-1), 9007199254740992.000000000 <= toInt32(-1), 9007199254740992.000000000 > toInt32(-1), 9007199254740992.000000000 >= toInt32(-1) , toInt64(-1) = 9007199254740992.000000000, toInt64(-1) != 9007199254740992.000000000, toInt64(-1) < 9007199254740992.000000000, toInt64(-1) <= 9007199254740992.000000000, toInt64(-1) > 9007199254740992.000000000, toInt64(-1) >= 9007199254740992.000000000, 9007199254740992.000000000 = toInt64(-1), 9007199254740992.000000000 != toInt64(-1), 9007199254740992.000000000 < toInt64(-1), 9007199254740992.000000000 <= toInt64(-1), 9007199254740992.000000000 > toInt64(-1), 9007199254740992.000000000 >= toInt64(-1) ; +SELECT '-1', '9007199254740994.000000000', -1 = 9007199254740994.000000000, -1 != 9007199254740994.000000000, -1 < 9007199254740994.000000000, -1 <= 9007199254740994.000000000, -1 > 9007199254740994.000000000, -1 >= 9007199254740994.000000000, 9007199254740994.000000000 = -1, 9007199254740994.000000000 != -1, 9007199254740994.000000000 < -1, 9007199254740994.000000000 <= -1, 9007199254740994.000000000 > -1, 9007199254740994.000000000 >= -1 , toInt8(-1) = 9007199254740994.000000000, toInt8(-1) != 9007199254740994.000000000, toInt8(-1) < 9007199254740994.000000000, toInt8(-1) <= 9007199254740994.000000000, toInt8(-1) > 9007199254740994.000000000, toInt8(-1) >= 9007199254740994.000000000, 9007199254740994.000000000 = toInt8(-1), 9007199254740994.000000000 != toInt8(-1), 9007199254740994.000000000 < toInt8(-1), 9007199254740994.000000000 <= toInt8(-1), 9007199254740994.000000000 > toInt8(-1), 9007199254740994.000000000 >= toInt8(-1) , toInt16(-1) = 9007199254740994.000000000, toInt16(-1) != 9007199254740994.000000000, toInt16(-1) < 9007199254740994.000000000, toInt16(-1) <= 9007199254740994.000000000, toInt16(-1) > 9007199254740994.000000000, toInt16(-1) >= 9007199254740994.000000000, 9007199254740994.000000000 = toInt16(-1), 9007199254740994.000000000 != toInt16(-1), 9007199254740994.000000000 < toInt16(-1), 9007199254740994.000000000 <= toInt16(-1), 9007199254740994.000000000 > toInt16(-1), 9007199254740994.000000000 >= toInt16(-1) , toInt32(-1) = 9007199254740994.000000000, toInt32(-1) != 9007199254740994.000000000, toInt32(-1) < 9007199254740994.000000000, toInt32(-1) <= 9007199254740994.000000000, toInt32(-1) > 9007199254740994.000000000, toInt32(-1) >= 9007199254740994.000000000, 9007199254740994.000000000 = toInt32(-1), 9007199254740994.000000000 != toInt32(-1), 9007199254740994.000000000 < toInt32(-1), 9007199254740994.000000000 <= toInt32(-1), 9007199254740994.000000000 > toInt32(-1), 9007199254740994.000000000 >= toInt32(-1) , toInt64(-1) = 9007199254740994.000000000, toInt64(-1) != 9007199254740994.000000000, toInt64(-1) < 9007199254740994.000000000, toInt64(-1) <= 9007199254740994.000000000, toInt64(-1) > 9007199254740994.000000000, toInt64(-1) >= 9007199254740994.000000000, 9007199254740994.000000000 = toInt64(-1), 9007199254740994.000000000 != toInt64(-1), 9007199254740994.000000000 < toInt64(-1), 9007199254740994.000000000 <= toInt64(-1), 9007199254740994.000000000 > toInt64(-1), 9007199254740994.000000000 >= toInt64(-1) ; +SELECT '-1', '-9007199254740991.000000000', -1 = -9007199254740991.000000000, -1 != -9007199254740991.000000000, -1 < -9007199254740991.000000000, -1 <= -9007199254740991.000000000, -1 > -9007199254740991.000000000, -1 >= -9007199254740991.000000000, -9007199254740991.000000000 = -1, -9007199254740991.000000000 != -1, -9007199254740991.000000000 < -1, -9007199254740991.000000000 <= -1, -9007199254740991.000000000 > -1, -9007199254740991.000000000 >= -1 , toInt8(-1) = -9007199254740991.000000000, toInt8(-1) != -9007199254740991.000000000, toInt8(-1) < -9007199254740991.000000000, toInt8(-1) <= -9007199254740991.000000000, toInt8(-1) > -9007199254740991.000000000, toInt8(-1) >= -9007199254740991.000000000, -9007199254740991.000000000 = toInt8(-1), -9007199254740991.000000000 != toInt8(-1), -9007199254740991.000000000 < toInt8(-1), -9007199254740991.000000000 <= toInt8(-1), -9007199254740991.000000000 > toInt8(-1), -9007199254740991.000000000 >= toInt8(-1) , toInt16(-1) = -9007199254740991.000000000, toInt16(-1) != -9007199254740991.000000000, toInt16(-1) < -9007199254740991.000000000, toInt16(-1) <= -9007199254740991.000000000, toInt16(-1) > -9007199254740991.000000000, toInt16(-1) >= -9007199254740991.000000000, -9007199254740991.000000000 = toInt16(-1), -9007199254740991.000000000 != toInt16(-1), -9007199254740991.000000000 < toInt16(-1), -9007199254740991.000000000 <= toInt16(-1), -9007199254740991.000000000 > toInt16(-1), -9007199254740991.000000000 >= toInt16(-1) , toInt32(-1) = -9007199254740991.000000000, toInt32(-1) != -9007199254740991.000000000, toInt32(-1) < -9007199254740991.000000000, toInt32(-1) <= -9007199254740991.000000000, toInt32(-1) > -9007199254740991.000000000, toInt32(-1) >= -9007199254740991.000000000, -9007199254740991.000000000 = toInt32(-1), -9007199254740991.000000000 != toInt32(-1), -9007199254740991.000000000 < toInt32(-1), -9007199254740991.000000000 <= toInt32(-1), -9007199254740991.000000000 > toInt32(-1), -9007199254740991.000000000 >= toInt32(-1) , toInt64(-1) = -9007199254740991.000000000, toInt64(-1) != -9007199254740991.000000000, toInt64(-1) < -9007199254740991.000000000, toInt64(-1) <= -9007199254740991.000000000, toInt64(-1) > -9007199254740991.000000000, toInt64(-1) >= -9007199254740991.000000000, -9007199254740991.000000000 = toInt64(-1), -9007199254740991.000000000 != toInt64(-1), -9007199254740991.000000000 < toInt64(-1), -9007199254740991.000000000 <= toInt64(-1), -9007199254740991.000000000 > toInt64(-1), -9007199254740991.000000000 >= toInt64(-1) ; +SELECT '-1', '-9007199254740992.000000000', -1 = -9007199254740992.000000000, -1 != -9007199254740992.000000000, -1 < -9007199254740992.000000000, -1 <= -9007199254740992.000000000, -1 > -9007199254740992.000000000, -1 >= -9007199254740992.000000000, -9007199254740992.000000000 = -1, -9007199254740992.000000000 != -1, -9007199254740992.000000000 < -1, -9007199254740992.000000000 <= -1, -9007199254740992.000000000 > -1, -9007199254740992.000000000 >= -1 , toInt8(-1) = -9007199254740992.000000000, toInt8(-1) != -9007199254740992.000000000, toInt8(-1) < -9007199254740992.000000000, toInt8(-1) <= -9007199254740992.000000000, toInt8(-1) > -9007199254740992.000000000, toInt8(-1) >= -9007199254740992.000000000, -9007199254740992.000000000 = toInt8(-1), -9007199254740992.000000000 != toInt8(-1), -9007199254740992.000000000 < toInt8(-1), -9007199254740992.000000000 <= toInt8(-1), -9007199254740992.000000000 > toInt8(-1), -9007199254740992.000000000 >= toInt8(-1) , toInt16(-1) = -9007199254740992.000000000, toInt16(-1) != -9007199254740992.000000000, toInt16(-1) < -9007199254740992.000000000, toInt16(-1) <= -9007199254740992.000000000, toInt16(-1) > -9007199254740992.000000000, toInt16(-1) >= -9007199254740992.000000000, -9007199254740992.000000000 = toInt16(-1), -9007199254740992.000000000 != toInt16(-1), -9007199254740992.000000000 < toInt16(-1), -9007199254740992.000000000 <= toInt16(-1), -9007199254740992.000000000 > toInt16(-1), -9007199254740992.000000000 >= toInt16(-1) , toInt32(-1) = -9007199254740992.000000000, toInt32(-1) != -9007199254740992.000000000, toInt32(-1) < -9007199254740992.000000000, toInt32(-1) <= -9007199254740992.000000000, toInt32(-1) > -9007199254740992.000000000, toInt32(-1) >= -9007199254740992.000000000, -9007199254740992.000000000 = toInt32(-1), -9007199254740992.000000000 != toInt32(-1), -9007199254740992.000000000 < toInt32(-1), -9007199254740992.000000000 <= toInt32(-1), -9007199254740992.000000000 > toInt32(-1), -9007199254740992.000000000 >= toInt32(-1) , toInt64(-1) = -9007199254740992.000000000, toInt64(-1) != -9007199254740992.000000000, toInt64(-1) < -9007199254740992.000000000, toInt64(-1) <= -9007199254740992.000000000, toInt64(-1) > -9007199254740992.000000000, toInt64(-1) >= -9007199254740992.000000000, -9007199254740992.000000000 = toInt64(-1), -9007199254740992.000000000 != toInt64(-1), -9007199254740992.000000000 < toInt64(-1), -9007199254740992.000000000 <= toInt64(-1), -9007199254740992.000000000 > toInt64(-1), -9007199254740992.000000000 >= toInt64(-1) ; +SELECT '-1', '-9007199254740992.000000000', -1 = -9007199254740992.000000000, -1 != -9007199254740992.000000000, -1 < -9007199254740992.000000000, -1 <= -9007199254740992.000000000, -1 > -9007199254740992.000000000, -1 >= -9007199254740992.000000000, -9007199254740992.000000000 = -1, -9007199254740992.000000000 != -1, -9007199254740992.000000000 < -1, -9007199254740992.000000000 <= -1, -9007199254740992.000000000 > -1, -9007199254740992.000000000 >= -1 , toInt8(-1) = -9007199254740992.000000000, toInt8(-1) != -9007199254740992.000000000, toInt8(-1) < -9007199254740992.000000000, toInt8(-1) <= -9007199254740992.000000000, toInt8(-1) > -9007199254740992.000000000, toInt8(-1) >= -9007199254740992.000000000, -9007199254740992.000000000 = toInt8(-1), -9007199254740992.000000000 != toInt8(-1), -9007199254740992.000000000 < toInt8(-1), -9007199254740992.000000000 <= toInt8(-1), -9007199254740992.000000000 > toInt8(-1), -9007199254740992.000000000 >= toInt8(-1) , toInt16(-1) = -9007199254740992.000000000, toInt16(-1) != -9007199254740992.000000000, toInt16(-1) < -9007199254740992.000000000, toInt16(-1) <= -9007199254740992.000000000, toInt16(-1) > -9007199254740992.000000000, toInt16(-1) >= -9007199254740992.000000000, -9007199254740992.000000000 = toInt16(-1), -9007199254740992.000000000 != toInt16(-1), -9007199254740992.000000000 < toInt16(-1), -9007199254740992.000000000 <= toInt16(-1), -9007199254740992.000000000 > toInt16(-1), -9007199254740992.000000000 >= toInt16(-1) , toInt32(-1) = -9007199254740992.000000000, toInt32(-1) != -9007199254740992.000000000, toInt32(-1) < -9007199254740992.000000000, toInt32(-1) <= -9007199254740992.000000000, toInt32(-1) > -9007199254740992.000000000, toInt32(-1) >= -9007199254740992.000000000, -9007199254740992.000000000 = toInt32(-1), -9007199254740992.000000000 != toInt32(-1), -9007199254740992.000000000 < toInt32(-1), -9007199254740992.000000000 <= toInt32(-1), -9007199254740992.000000000 > toInt32(-1), -9007199254740992.000000000 >= toInt32(-1) , toInt64(-1) = -9007199254740992.000000000, toInt64(-1) != -9007199254740992.000000000, toInt64(-1) < -9007199254740992.000000000, toInt64(-1) <= -9007199254740992.000000000, toInt64(-1) > -9007199254740992.000000000, toInt64(-1) >= -9007199254740992.000000000, -9007199254740992.000000000 = toInt64(-1), -9007199254740992.000000000 != toInt64(-1), -9007199254740992.000000000 < toInt64(-1), -9007199254740992.000000000 <= toInt64(-1), -9007199254740992.000000000 > toInt64(-1), -9007199254740992.000000000 >= toInt64(-1) ; +SELECT '-1', '-9007199254740994.000000000', -1 = -9007199254740994.000000000, -1 != -9007199254740994.000000000, -1 < -9007199254740994.000000000, -1 <= -9007199254740994.000000000, -1 > -9007199254740994.000000000, -1 >= -9007199254740994.000000000, -9007199254740994.000000000 = -1, -9007199254740994.000000000 != -1, -9007199254740994.000000000 < -1, -9007199254740994.000000000 <= -1, -9007199254740994.000000000 > -1, -9007199254740994.000000000 >= -1 , toInt8(-1) = -9007199254740994.000000000, toInt8(-1) != -9007199254740994.000000000, toInt8(-1) < -9007199254740994.000000000, toInt8(-1) <= -9007199254740994.000000000, toInt8(-1) > -9007199254740994.000000000, toInt8(-1) >= -9007199254740994.000000000, -9007199254740994.000000000 = toInt8(-1), -9007199254740994.000000000 != toInt8(-1), -9007199254740994.000000000 < toInt8(-1), -9007199254740994.000000000 <= toInt8(-1), -9007199254740994.000000000 > toInt8(-1), -9007199254740994.000000000 >= toInt8(-1) , toInt16(-1) = -9007199254740994.000000000, toInt16(-1) != -9007199254740994.000000000, toInt16(-1) < -9007199254740994.000000000, toInt16(-1) <= -9007199254740994.000000000, toInt16(-1) > -9007199254740994.000000000, toInt16(-1) >= -9007199254740994.000000000, -9007199254740994.000000000 = toInt16(-1), -9007199254740994.000000000 != toInt16(-1), -9007199254740994.000000000 < toInt16(-1), -9007199254740994.000000000 <= toInt16(-1), -9007199254740994.000000000 > toInt16(-1), -9007199254740994.000000000 >= toInt16(-1) , toInt32(-1) = -9007199254740994.000000000, toInt32(-1) != -9007199254740994.000000000, toInt32(-1) < -9007199254740994.000000000, toInt32(-1) <= -9007199254740994.000000000, toInt32(-1) > -9007199254740994.000000000, toInt32(-1) >= -9007199254740994.000000000, -9007199254740994.000000000 = toInt32(-1), -9007199254740994.000000000 != toInt32(-1), -9007199254740994.000000000 < toInt32(-1), -9007199254740994.000000000 <= toInt32(-1), -9007199254740994.000000000 > toInt32(-1), -9007199254740994.000000000 >= toInt32(-1) , toInt64(-1) = -9007199254740994.000000000, toInt64(-1) != -9007199254740994.000000000, toInt64(-1) < -9007199254740994.000000000, toInt64(-1) <= -9007199254740994.000000000, toInt64(-1) > -9007199254740994.000000000, toInt64(-1) >= -9007199254740994.000000000, -9007199254740994.000000000 = toInt64(-1), -9007199254740994.000000000 != toInt64(-1), -9007199254740994.000000000 < toInt64(-1), -9007199254740994.000000000 <= toInt64(-1), -9007199254740994.000000000 > toInt64(-1), -9007199254740994.000000000 >= toInt64(-1) ; +SELECT '-1', '104.000000000', -1 = 104.000000000, -1 != 104.000000000, -1 < 104.000000000, -1 <= 104.000000000, -1 > 104.000000000, -1 >= 104.000000000, 104.000000000 = -1, 104.000000000 != -1, 104.000000000 < -1, 104.000000000 <= -1, 104.000000000 > -1, 104.000000000 >= -1 , toInt8(-1) = 104.000000000, toInt8(-1) != 104.000000000, toInt8(-1) < 104.000000000, toInt8(-1) <= 104.000000000, toInt8(-1) > 104.000000000, toInt8(-1) >= 104.000000000, 104.000000000 = toInt8(-1), 104.000000000 != toInt8(-1), 104.000000000 < toInt8(-1), 104.000000000 <= toInt8(-1), 104.000000000 > toInt8(-1), 104.000000000 >= toInt8(-1) , toInt16(-1) = 104.000000000, toInt16(-1) != 104.000000000, toInt16(-1) < 104.000000000, toInt16(-1) <= 104.000000000, toInt16(-1) > 104.000000000, toInt16(-1) >= 104.000000000, 104.000000000 = toInt16(-1), 104.000000000 != toInt16(-1), 104.000000000 < toInt16(-1), 104.000000000 <= toInt16(-1), 104.000000000 > toInt16(-1), 104.000000000 >= toInt16(-1) , toInt32(-1) = 104.000000000, toInt32(-1) != 104.000000000, toInt32(-1) < 104.000000000, toInt32(-1) <= 104.000000000, toInt32(-1) > 104.000000000, toInt32(-1) >= 104.000000000, 104.000000000 = toInt32(-1), 104.000000000 != toInt32(-1), 104.000000000 < toInt32(-1), 104.000000000 <= toInt32(-1), 104.000000000 > toInt32(-1), 104.000000000 >= toInt32(-1) , toInt64(-1) = 104.000000000, toInt64(-1) != 104.000000000, toInt64(-1) < 104.000000000, toInt64(-1) <= 104.000000000, toInt64(-1) > 104.000000000, toInt64(-1) >= 104.000000000, 104.000000000 = toInt64(-1), 104.000000000 != toInt64(-1), 104.000000000 < toInt64(-1), 104.000000000 <= toInt64(-1), 104.000000000 > toInt64(-1), 104.000000000 >= toInt64(-1) ; +SELECT '-1', '-4503599627370496.000000000', -1 = -4503599627370496.000000000, -1 != -4503599627370496.000000000, -1 < -4503599627370496.000000000, -1 <= -4503599627370496.000000000, -1 > -4503599627370496.000000000, -1 >= -4503599627370496.000000000, -4503599627370496.000000000 = -1, -4503599627370496.000000000 != -1, -4503599627370496.000000000 < -1, -4503599627370496.000000000 <= -1, -4503599627370496.000000000 > -1, -4503599627370496.000000000 >= -1 , toInt8(-1) = -4503599627370496.000000000, toInt8(-1) != -4503599627370496.000000000, toInt8(-1) < -4503599627370496.000000000, toInt8(-1) <= -4503599627370496.000000000, toInt8(-1) > -4503599627370496.000000000, toInt8(-1) >= -4503599627370496.000000000, -4503599627370496.000000000 = toInt8(-1), -4503599627370496.000000000 != toInt8(-1), -4503599627370496.000000000 < toInt8(-1), -4503599627370496.000000000 <= toInt8(-1), -4503599627370496.000000000 > toInt8(-1), -4503599627370496.000000000 >= toInt8(-1) , toInt16(-1) = -4503599627370496.000000000, toInt16(-1) != -4503599627370496.000000000, toInt16(-1) < -4503599627370496.000000000, toInt16(-1) <= -4503599627370496.000000000, toInt16(-1) > -4503599627370496.000000000, toInt16(-1) >= -4503599627370496.000000000, -4503599627370496.000000000 = toInt16(-1), -4503599627370496.000000000 != toInt16(-1), -4503599627370496.000000000 < toInt16(-1), -4503599627370496.000000000 <= toInt16(-1), -4503599627370496.000000000 > toInt16(-1), -4503599627370496.000000000 >= toInt16(-1) , toInt32(-1) = -4503599627370496.000000000, toInt32(-1) != -4503599627370496.000000000, toInt32(-1) < -4503599627370496.000000000, toInt32(-1) <= -4503599627370496.000000000, toInt32(-1) > -4503599627370496.000000000, toInt32(-1) >= -4503599627370496.000000000, -4503599627370496.000000000 = toInt32(-1), -4503599627370496.000000000 != toInt32(-1), -4503599627370496.000000000 < toInt32(-1), -4503599627370496.000000000 <= toInt32(-1), -4503599627370496.000000000 > toInt32(-1), -4503599627370496.000000000 >= toInt32(-1) , toInt64(-1) = -4503599627370496.000000000, toInt64(-1) != -4503599627370496.000000000, toInt64(-1) < -4503599627370496.000000000, toInt64(-1) <= -4503599627370496.000000000, toInt64(-1) > -4503599627370496.000000000, toInt64(-1) >= -4503599627370496.000000000, -4503599627370496.000000000 = toInt64(-1), -4503599627370496.000000000 != toInt64(-1), -4503599627370496.000000000 < toInt64(-1), -4503599627370496.000000000 <= toInt64(-1), -4503599627370496.000000000 > toInt64(-1), -4503599627370496.000000000 >= toInt64(-1) ; +SELECT '-1', '-0.500000000', -1 = -0.500000000, -1 != -0.500000000, -1 < -0.500000000, -1 <= -0.500000000, -1 > -0.500000000, -1 >= -0.500000000, -0.500000000 = -1, -0.500000000 != -1, -0.500000000 < -1, -0.500000000 <= -1, -0.500000000 > -1, -0.500000000 >= -1 , toInt8(-1) = -0.500000000, toInt8(-1) != -0.500000000, toInt8(-1) < -0.500000000, toInt8(-1) <= -0.500000000, toInt8(-1) > -0.500000000, toInt8(-1) >= -0.500000000, -0.500000000 = toInt8(-1), -0.500000000 != toInt8(-1), -0.500000000 < toInt8(-1), -0.500000000 <= toInt8(-1), -0.500000000 > toInt8(-1), -0.500000000 >= toInt8(-1) , toInt16(-1) = -0.500000000, toInt16(-1) != -0.500000000, toInt16(-1) < -0.500000000, toInt16(-1) <= -0.500000000, toInt16(-1) > -0.500000000, toInt16(-1) >= -0.500000000, -0.500000000 = toInt16(-1), -0.500000000 != toInt16(-1), -0.500000000 < toInt16(-1), -0.500000000 <= toInt16(-1), -0.500000000 > toInt16(-1), -0.500000000 >= toInt16(-1) , toInt32(-1) = -0.500000000, toInt32(-1) != -0.500000000, toInt32(-1) < -0.500000000, toInt32(-1) <= -0.500000000, toInt32(-1) > -0.500000000, toInt32(-1) >= -0.500000000, -0.500000000 = toInt32(-1), -0.500000000 != toInt32(-1), -0.500000000 < toInt32(-1), -0.500000000 <= toInt32(-1), -0.500000000 > toInt32(-1), -0.500000000 >= toInt32(-1) , toInt64(-1) = -0.500000000, toInt64(-1) != -0.500000000, toInt64(-1) < -0.500000000, toInt64(-1) <= -0.500000000, toInt64(-1) > -0.500000000, toInt64(-1) >= -0.500000000, -0.500000000 = toInt64(-1), -0.500000000 != toInt64(-1), -0.500000000 < toInt64(-1), -0.500000000 <= toInt64(-1), -0.500000000 > toInt64(-1), -0.500000000 >= toInt64(-1) ; +SELECT '-1', '0.500000000', -1 = 0.500000000, -1 != 0.500000000, -1 < 0.500000000, -1 <= 0.500000000, -1 > 0.500000000, -1 >= 0.500000000, 0.500000000 = -1, 0.500000000 != -1, 0.500000000 < -1, 0.500000000 <= -1, 0.500000000 > -1, 0.500000000 >= -1 , toInt8(-1) = 0.500000000, toInt8(-1) != 0.500000000, toInt8(-1) < 0.500000000, toInt8(-1) <= 0.500000000, toInt8(-1) > 0.500000000, toInt8(-1) >= 0.500000000, 0.500000000 = toInt8(-1), 0.500000000 != toInt8(-1), 0.500000000 < toInt8(-1), 0.500000000 <= toInt8(-1), 0.500000000 > toInt8(-1), 0.500000000 >= toInt8(-1) , toInt16(-1) = 0.500000000, toInt16(-1) != 0.500000000, toInt16(-1) < 0.500000000, toInt16(-1) <= 0.500000000, toInt16(-1) > 0.500000000, toInt16(-1) >= 0.500000000, 0.500000000 = toInt16(-1), 0.500000000 != toInt16(-1), 0.500000000 < toInt16(-1), 0.500000000 <= toInt16(-1), 0.500000000 > toInt16(-1), 0.500000000 >= toInt16(-1) , toInt32(-1) = 0.500000000, toInt32(-1) != 0.500000000, toInt32(-1) < 0.500000000, toInt32(-1) <= 0.500000000, toInt32(-1) > 0.500000000, toInt32(-1) >= 0.500000000, 0.500000000 = toInt32(-1), 0.500000000 != toInt32(-1), 0.500000000 < toInt32(-1), 0.500000000 <= toInt32(-1), 0.500000000 > toInt32(-1), 0.500000000 >= toInt32(-1) , toInt64(-1) = 0.500000000, toInt64(-1) != 0.500000000, toInt64(-1) < 0.500000000, toInt64(-1) <= 0.500000000, toInt64(-1) > 0.500000000, toInt64(-1) >= 0.500000000, 0.500000000 = toInt64(-1), 0.500000000 != toInt64(-1), 0.500000000 < toInt64(-1), 0.500000000 <= toInt64(-1), 0.500000000 > toInt64(-1), 0.500000000 >= toInt64(-1) ; +SELECT '-1', '-1.500000000', -1 = -1.500000000, -1 != -1.500000000, -1 < -1.500000000, -1 <= -1.500000000, -1 > -1.500000000, -1 >= -1.500000000, -1.500000000 = -1, -1.500000000 != -1, -1.500000000 < -1, -1.500000000 <= -1, -1.500000000 > -1, -1.500000000 >= -1 , toInt8(-1) = -1.500000000, toInt8(-1) != -1.500000000, toInt8(-1) < -1.500000000, toInt8(-1) <= -1.500000000, toInt8(-1) > -1.500000000, toInt8(-1) >= -1.500000000, -1.500000000 = toInt8(-1), -1.500000000 != toInt8(-1), -1.500000000 < toInt8(-1), -1.500000000 <= toInt8(-1), -1.500000000 > toInt8(-1), -1.500000000 >= toInt8(-1) , toInt16(-1) = -1.500000000, toInt16(-1) != -1.500000000, toInt16(-1) < -1.500000000, toInt16(-1) <= -1.500000000, toInt16(-1) > -1.500000000, toInt16(-1) >= -1.500000000, -1.500000000 = toInt16(-1), -1.500000000 != toInt16(-1), -1.500000000 < toInt16(-1), -1.500000000 <= toInt16(-1), -1.500000000 > toInt16(-1), -1.500000000 >= toInt16(-1) , toInt32(-1) = -1.500000000, toInt32(-1) != -1.500000000, toInt32(-1) < -1.500000000, toInt32(-1) <= -1.500000000, toInt32(-1) > -1.500000000, toInt32(-1) >= -1.500000000, -1.500000000 = toInt32(-1), -1.500000000 != toInt32(-1), -1.500000000 < toInt32(-1), -1.500000000 <= toInt32(-1), -1.500000000 > toInt32(-1), -1.500000000 >= toInt32(-1) , toInt64(-1) = -1.500000000, toInt64(-1) != -1.500000000, toInt64(-1) < -1.500000000, toInt64(-1) <= -1.500000000, toInt64(-1) > -1.500000000, toInt64(-1) >= -1.500000000, -1.500000000 = toInt64(-1), -1.500000000 != toInt64(-1), -1.500000000 < toInt64(-1), -1.500000000 <= toInt64(-1), -1.500000000 > toInt64(-1), -1.500000000 >= toInt64(-1) ; +SELECT '-1', '1.500000000', -1 = 1.500000000, -1 != 1.500000000, -1 < 1.500000000, -1 <= 1.500000000, -1 > 1.500000000, -1 >= 1.500000000, 1.500000000 = -1, 1.500000000 != -1, 1.500000000 < -1, 1.500000000 <= -1, 1.500000000 > -1, 1.500000000 >= -1 , toInt8(-1) = 1.500000000, toInt8(-1) != 1.500000000, toInt8(-1) < 1.500000000, toInt8(-1) <= 1.500000000, toInt8(-1) > 1.500000000, toInt8(-1) >= 1.500000000, 1.500000000 = toInt8(-1), 1.500000000 != toInt8(-1), 1.500000000 < toInt8(-1), 1.500000000 <= toInt8(-1), 1.500000000 > toInt8(-1), 1.500000000 >= toInt8(-1) , toInt16(-1) = 1.500000000, toInt16(-1) != 1.500000000, toInt16(-1) < 1.500000000, toInt16(-1) <= 1.500000000, toInt16(-1) > 1.500000000, toInt16(-1) >= 1.500000000, 1.500000000 = toInt16(-1), 1.500000000 != toInt16(-1), 1.500000000 < toInt16(-1), 1.500000000 <= toInt16(-1), 1.500000000 > toInt16(-1), 1.500000000 >= toInt16(-1) , toInt32(-1) = 1.500000000, toInt32(-1) != 1.500000000, toInt32(-1) < 1.500000000, toInt32(-1) <= 1.500000000, toInt32(-1) > 1.500000000, toInt32(-1) >= 1.500000000, 1.500000000 = toInt32(-1), 1.500000000 != toInt32(-1), 1.500000000 < toInt32(-1), 1.500000000 <= toInt32(-1), 1.500000000 > toInt32(-1), 1.500000000 >= toInt32(-1) , toInt64(-1) = 1.500000000, toInt64(-1) != 1.500000000, toInt64(-1) < 1.500000000, toInt64(-1) <= 1.500000000, toInt64(-1) > 1.500000000, toInt64(-1) >= 1.500000000, 1.500000000 = toInt64(-1), 1.500000000 != toInt64(-1), 1.500000000 < toInt64(-1), 1.500000000 <= toInt64(-1), 1.500000000 > toInt64(-1), 1.500000000 >= toInt64(-1) ; +SELECT '-1', '9007199254740992.000000000', -1 = 9007199254740992.000000000, -1 != 9007199254740992.000000000, -1 < 9007199254740992.000000000, -1 <= 9007199254740992.000000000, -1 > 9007199254740992.000000000, -1 >= 9007199254740992.000000000, 9007199254740992.000000000 = -1, 9007199254740992.000000000 != -1, 9007199254740992.000000000 < -1, 9007199254740992.000000000 <= -1, 9007199254740992.000000000 > -1, 9007199254740992.000000000 >= -1 , toInt8(-1) = 9007199254740992.000000000, toInt8(-1) != 9007199254740992.000000000, toInt8(-1) < 9007199254740992.000000000, toInt8(-1) <= 9007199254740992.000000000, toInt8(-1) > 9007199254740992.000000000, toInt8(-1) >= 9007199254740992.000000000, 9007199254740992.000000000 = toInt8(-1), 9007199254740992.000000000 != toInt8(-1), 9007199254740992.000000000 < toInt8(-1), 9007199254740992.000000000 <= toInt8(-1), 9007199254740992.000000000 > toInt8(-1), 9007199254740992.000000000 >= toInt8(-1) , toInt16(-1) = 9007199254740992.000000000, toInt16(-1) != 9007199254740992.000000000, toInt16(-1) < 9007199254740992.000000000, toInt16(-1) <= 9007199254740992.000000000, toInt16(-1) > 9007199254740992.000000000, toInt16(-1) >= 9007199254740992.000000000, 9007199254740992.000000000 = toInt16(-1), 9007199254740992.000000000 != toInt16(-1), 9007199254740992.000000000 < toInt16(-1), 9007199254740992.000000000 <= toInt16(-1), 9007199254740992.000000000 > toInt16(-1), 9007199254740992.000000000 >= toInt16(-1) , toInt32(-1) = 9007199254740992.000000000, toInt32(-1) != 9007199254740992.000000000, toInt32(-1) < 9007199254740992.000000000, toInt32(-1) <= 9007199254740992.000000000, toInt32(-1) > 9007199254740992.000000000, toInt32(-1) >= 9007199254740992.000000000, 9007199254740992.000000000 = toInt32(-1), 9007199254740992.000000000 != toInt32(-1), 9007199254740992.000000000 < toInt32(-1), 9007199254740992.000000000 <= toInt32(-1), 9007199254740992.000000000 > toInt32(-1), 9007199254740992.000000000 >= toInt32(-1) , toInt64(-1) = 9007199254740992.000000000, toInt64(-1) != 9007199254740992.000000000, toInt64(-1) < 9007199254740992.000000000, toInt64(-1) <= 9007199254740992.000000000, toInt64(-1) > 9007199254740992.000000000, toInt64(-1) >= 9007199254740992.000000000, 9007199254740992.000000000 = toInt64(-1), 9007199254740992.000000000 != toInt64(-1), 9007199254740992.000000000 < toInt64(-1), 9007199254740992.000000000 <= toInt64(-1), 9007199254740992.000000000 > toInt64(-1), 9007199254740992.000000000 >= toInt64(-1) ; +SELECT '-1', '2251799813685247.500000000', -1 = 2251799813685247.500000000, -1 != 2251799813685247.500000000, -1 < 2251799813685247.500000000, -1 <= 2251799813685247.500000000, -1 > 2251799813685247.500000000, -1 >= 2251799813685247.500000000, 2251799813685247.500000000 = -1, 2251799813685247.500000000 != -1, 2251799813685247.500000000 < -1, 2251799813685247.500000000 <= -1, 2251799813685247.500000000 > -1, 2251799813685247.500000000 >= -1 , toInt8(-1) = 2251799813685247.500000000, toInt8(-1) != 2251799813685247.500000000, toInt8(-1) < 2251799813685247.500000000, toInt8(-1) <= 2251799813685247.500000000, toInt8(-1) > 2251799813685247.500000000, toInt8(-1) >= 2251799813685247.500000000, 2251799813685247.500000000 = toInt8(-1), 2251799813685247.500000000 != toInt8(-1), 2251799813685247.500000000 < toInt8(-1), 2251799813685247.500000000 <= toInt8(-1), 2251799813685247.500000000 > toInt8(-1), 2251799813685247.500000000 >= toInt8(-1) , toInt16(-1) = 2251799813685247.500000000, toInt16(-1) != 2251799813685247.500000000, toInt16(-1) < 2251799813685247.500000000, toInt16(-1) <= 2251799813685247.500000000, toInt16(-1) > 2251799813685247.500000000, toInt16(-1) >= 2251799813685247.500000000, 2251799813685247.500000000 = toInt16(-1), 2251799813685247.500000000 != toInt16(-1), 2251799813685247.500000000 < toInt16(-1), 2251799813685247.500000000 <= toInt16(-1), 2251799813685247.500000000 > toInt16(-1), 2251799813685247.500000000 >= toInt16(-1) , toInt32(-1) = 2251799813685247.500000000, toInt32(-1) != 2251799813685247.500000000, toInt32(-1) < 2251799813685247.500000000, toInt32(-1) <= 2251799813685247.500000000, toInt32(-1) > 2251799813685247.500000000, toInt32(-1) >= 2251799813685247.500000000, 2251799813685247.500000000 = toInt32(-1), 2251799813685247.500000000 != toInt32(-1), 2251799813685247.500000000 < toInt32(-1), 2251799813685247.500000000 <= toInt32(-1), 2251799813685247.500000000 > toInt32(-1), 2251799813685247.500000000 >= toInt32(-1) , toInt64(-1) = 2251799813685247.500000000, toInt64(-1) != 2251799813685247.500000000, toInt64(-1) < 2251799813685247.500000000, toInt64(-1) <= 2251799813685247.500000000, toInt64(-1) > 2251799813685247.500000000, toInt64(-1) >= 2251799813685247.500000000, 2251799813685247.500000000 = toInt64(-1), 2251799813685247.500000000 != toInt64(-1), 2251799813685247.500000000 < toInt64(-1), 2251799813685247.500000000 <= toInt64(-1), 2251799813685247.500000000 > toInt64(-1), 2251799813685247.500000000 >= toInt64(-1) ; +SELECT '-1', '2251799813685248.500000000', -1 = 2251799813685248.500000000, -1 != 2251799813685248.500000000, -1 < 2251799813685248.500000000, -1 <= 2251799813685248.500000000, -1 > 2251799813685248.500000000, -1 >= 2251799813685248.500000000, 2251799813685248.500000000 = -1, 2251799813685248.500000000 != -1, 2251799813685248.500000000 < -1, 2251799813685248.500000000 <= -1, 2251799813685248.500000000 > -1, 2251799813685248.500000000 >= -1 , toInt8(-1) = 2251799813685248.500000000, toInt8(-1) != 2251799813685248.500000000, toInt8(-1) < 2251799813685248.500000000, toInt8(-1) <= 2251799813685248.500000000, toInt8(-1) > 2251799813685248.500000000, toInt8(-1) >= 2251799813685248.500000000, 2251799813685248.500000000 = toInt8(-1), 2251799813685248.500000000 != toInt8(-1), 2251799813685248.500000000 < toInt8(-1), 2251799813685248.500000000 <= toInt8(-1), 2251799813685248.500000000 > toInt8(-1), 2251799813685248.500000000 >= toInt8(-1) , toInt16(-1) = 2251799813685248.500000000, toInt16(-1) != 2251799813685248.500000000, toInt16(-1) < 2251799813685248.500000000, toInt16(-1) <= 2251799813685248.500000000, toInt16(-1) > 2251799813685248.500000000, toInt16(-1) >= 2251799813685248.500000000, 2251799813685248.500000000 = toInt16(-1), 2251799813685248.500000000 != toInt16(-1), 2251799813685248.500000000 < toInt16(-1), 2251799813685248.500000000 <= toInt16(-1), 2251799813685248.500000000 > toInt16(-1), 2251799813685248.500000000 >= toInt16(-1) , toInt32(-1) = 2251799813685248.500000000, toInt32(-1) != 2251799813685248.500000000, toInt32(-1) < 2251799813685248.500000000, toInt32(-1) <= 2251799813685248.500000000, toInt32(-1) > 2251799813685248.500000000, toInt32(-1) >= 2251799813685248.500000000, 2251799813685248.500000000 = toInt32(-1), 2251799813685248.500000000 != toInt32(-1), 2251799813685248.500000000 < toInt32(-1), 2251799813685248.500000000 <= toInt32(-1), 2251799813685248.500000000 > toInt32(-1), 2251799813685248.500000000 >= toInt32(-1) , toInt64(-1) = 2251799813685248.500000000, toInt64(-1) != 2251799813685248.500000000, toInt64(-1) < 2251799813685248.500000000, toInt64(-1) <= 2251799813685248.500000000, toInt64(-1) > 2251799813685248.500000000, toInt64(-1) >= 2251799813685248.500000000, 2251799813685248.500000000 = toInt64(-1), 2251799813685248.500000000 != toInt64(-1), 2251799813685248.500000000 < toInt64(-1), 2251799813685248.500000000 <= toInt64(-1), 2251799813685248.500000000 > toInt64(-1), 2251799813685248.500000000 >= toInt64(-1) ; +SELECT '-1', '1152921504606846976.000000000', -1 = 1152921504606846976.000000000, -1 != 1152921504606846976.000000000, -1 < 1152921504606846976.000000000, -1 <= 1152921504606846976.000000000, -1 > 1152921504606846976.000000000, -1 >= 1152921504606846976.000000000, 1152921504606846976.000000000 = -1, 1152921504606846976.000000000 != -1, 1152921504606846976.000000000 < -1, 1152921504606846976.000000000 <= -1, 1152921504606846976.000000000 > -1, 1152921504606846976.000000000 >= -1 , toInt8(-1) = 1152921504606846976.000000000, toInt8(-1) != 1152921504606846976.000000000, toInt8(-1) < 1152921504606846976.000000000, toInt8(-1) <= 1152921504606846976.000000000, toInt8(-1) > 1152921504606846976.000000000, toInt8(-1) >= 1152921504606846976.000000000, 1152921504606846976.000000000 = toInt8(-1), 1152921504606846976.000000000 != toInt8(-1), 1152921504606846976.000000000 < toInt8(-1), 1152921504606846976.000000000 <= toInt8(-1), 1152921504606846976.000000000 > toInt8(-1), 1152921504606846976.000000000 >= toInt8(-1) , toInt16(-1) = 1152921504606846976.000000000, toInt16(-1) != 1152921504606846976.000000000, toInt16(-1) < 1152921504606846976.000000000, toInt16(-1) <= 1152921504606846976.000000000, toInt16(-1) > 1152921504606846976.000000000, toInt16(-1) >= 1152921504606846976.000000000, 1152921504606846976.000000000 = toInt16(-1), 1152921504606846976.000000000 != toInt16(-1), 1152921504606846976.000000000 < toInt16(-1), 1152921504606846976.000000000 <= toInt16(-1), 1152921504606846976.000000000 > toInt16(-1), 1152921504606846976.000000000 >= toInt16(-1) , toInt32(-1) = 1152921504606846976.000000000, toInt32(-1) != 1152921504606846976.000000000, toInt32(-1) < 1152921504606846976.000000000, toInt32(-1) <= 1152921504606846976.000000000, toInt32(-1) > 1152921504606846976.000000000, toInt32(-1) >= 1152921504606846976.000000000, 1152921504606846976.000000000 = toInt32(-1), 1152921504606846976.000000000 != toInt32(-1), 1152921504606846976.000000000 < toInt32(-1), 1152921504606846976.000000000 <= toInt32(-1), 1152921504606846976.000000000 > toInt32(-1), 1152921504606846976.000000000 >= toInt32(-1) , toInt64(-1) = 1152921504606846976.000000000, toInt64(-1) != 1152921504606846976.000000000, toInt64(-1) < 1152921504606846976.000000000, toInt64(-1) <= 1152921504606846976.000000000, toInt64(-1) > 1152921504606846976.000000000, toInt64(-1) >= 1152921504606846976.000000000, 1152921504606846976.000000000 = toInt64(-1), 1152921504606846976.000000000 != toInt64(-1), 1152921504606846976.000000000 < toInt64(-1), 1152921504606846976.000000000 <= toInt64(-1), 1152921504606846976.000000000 > toInt64(-1), 1152921504606846976.000000000 >= toInt64(-1) ; +SELECT '-1', '-1152921504606846976.000000000', -1 = -1152921504606846976.000000000, -1 != -1152921504606846976.000000000, -1 < -1152921504606846976.000000000, -1 <= -1152921504606846976.000000000, -1 > -1152921504606846976.000000000, -1 >= -1152921504606846976.000000000, -1152921504606846976.000000000 = -1, -1152921504606846976.000000000 != -1, -1152921504606846976.000000000 < -1, -1152921504606846976.000000000 <= -1, -1152921504606846976.000000000 > -1, -1152921504606846976.000000000 >= -1 , toInt8(-1) = -1152921504606846976.000000000, toInt8(-1) != -1152921504606846976.000000000, toInt8(-1) < -1152921504606846976.000000000, toInt8(-1) <= -1152921504606846976.000000000, toInt8(-1) > -1152921504606846976.000000000, toInt8(-1) >= -1152921504606846976.000000000, -1152921504606846976.000000000 = toInt8(-1), -1152921504606846976.000000000 != toInt8(-1), -1152921504606846976.000000000 < toInt8(-1), -1152921504606846976.000000000 <= toInt8(-1), -1152921504606846976.000000000 > toInt8(-1), -1152921504606846976.000000000 >= toInt8(-1) , toInt16(-1) = -1152921504606846976.000000000, toInt16(-1) != -1152921504606846976.000000000, toInt16(-1) < -1152921504606846976.000000000, toInt16(-1) <= -1152921504606846976.000000000, toInt16(-1) > -1152921504606846976.000000000, toInt16(-1) >= -1152921504606846976.000000000, -1152921504606846976.000000000 = toInt16(-1), -1152921504606846976.000000000 != toInt16(-1), -1152921504606846976.000000000 < toInt16(-1), -1152921504606846976.000000000 <= toInt16(-1), -1152921504606846976.000000000 > toInt16(-1), -1152921504606846976.000000000 >= toInt16(-1) , toInt32(-1) = -1152921504606846976.000000000, toInt32(-1) != -1152921504606846976.000000000, toInt32(-1) < -1152921504606846976.000000000, toInt32(-1) <= -1152921504606846976.000000000, toInt32(-1) > -1152921504606846976.000000000, toInt32(-1) >= -1152921504606846976.000000000, -1152921504606846976.000000000 = toInt32(-1), -1152921504606846976.000000000 != toInt32(-1), -1152921504606846976.000000000 < toInt32(-1), -1152921504606846976.000000000 <= toInt32(-1), -1152921504606846976.000000000 > toInt32(-1), -1152921504606846976.000000000 >= toInt32(-1) , toInt64(-1) = -1152921504606846976.000000000, toInt64(-1) != -1152921504606846976.000000000, toInt64(-1) < -1152921504606846976.000000000, toInt64(-1) <= -1152921504606846976.000000000, toInt64(-1) > -1152921504606846976.000000000, toInt64(-1) >= -1152921504606846976.000000000, -1152921504606846976.000000000 = toInt64(-1), -1152921504606846976.000000000 != toInt64(-1), -1152921504606846976.000000000 < toInt64(-1), -1152921504606846976.000000000 <= toInt64(-1), -1152921504606846976.000000000 > toInt64(-1), -1152921504606846976.000000000 >= toInt64(-1) ; +SELECT '-1', '-9223372036854786048.000000000', -1 = -9223372036854786048.000000000, -1 != -9223372036854786048.000000000, -1 < -9223372036854786048.000000000, -1 <= -9223372036854786048.000000000, -1 > -9223372036854786048.000000000, -1 >= -9223372036854786048.000000000, -9223372036854786048.000000000 = -1, -9223372036854786048.000000000 != -1, -9223372036854786048.000000000 < -1, -9223372036854786048.000000000 <= -1, -9223372036854786048.000000000 > -1, -9223372036854786048.000000000 >= -1 , toInt8(-1) = -9223372036854786048.000000000, toInt8(-1) != -9223372036854786048.000000000, toInt8(-1) < -9223372036854786048.000000000, toInt8(-1) <= -9223372036854786048.000000000, toInt8(-1) > -9223372036854786048.000000000, toInt8(-1) >= -9223372036854786048.000000000, -9223372036854786048.000000000 = toInt8(-1), -9223372036854786048.000000000 != toInt8(-1), -9223372036854786048.000000000 < toInt8(-1), -9223372036854786048.000000000 <= toInt8(-1), -9223372036854786048.000000000 > toInt8(-1), -9223372036854786048.000000000 >= toInt8(-1) , toInt16(-1) = -9223372036854786048.000000000, toInt16(-1) != -9223372036854786048.000000000, toInt16(-1) < -9223372036854786048.000000000, toInt16(-1) <= -9223372036854786048.000000000, toInt16(-1) > -9223372036854786048.000000000, toInt16(-1) >= -9223372036854786048.000000000, -9223372036854786048.000000000 = toInt16(-1), -9223372036854786048.000000000 != toInt16(-1), -9223372036854786048.000000000 < toInt16(-1), -9223372036854786048.000000000 <= toInt16(-1), -9223372036854786048.000000000 > toInt16(-1), -9223372036854786048.000000000 >= toInt16(-1) , toInt32(-1) = -9223372036854786048.000000000, toInt32(-1) != -9223372036854786048.000000000, toInt32(-1) < -9223372036854786048.000000000, toInt32(-1) <= -9223372036854786048.000000000, toInt32(-1) > -9223372036854786048.000000000, toInt32(-1) >= -9223372036854786048.000000000, -9223372036854786048.000000000 = toInt32(-1), -9223372036854786048.000000000 != toInt32(-1), -9223372036854786048.000000000 < toInt32(-1), -9223372036854786048.000000000 <= toInt32(-1), -9223372036854786048.000000000 > toInt32(-1), -9223372036854786048.000000000 >= toInt32(-1) , toInt64(-1) = -9223372036854786048.000000000, toInt64(-1) != -9223372036854786048.000000000, toInt64(-1) < -9223372036854786048.000000000, toInt64(-1) <= -9223372036854786048.000000000, toInt64(-1) > -9223372036854786048.000000000, toInt64(-1) >= -9223372036854786048.000000000, -9223372036854786048.000000000 = toInt64(-1), -9223372036854786048.000000000 != toInt64(-1), -9223372036854786048.000000000 < toInt64(-1), -9223372036854786048.000000000 <= toInt64(-1), -9223372036854786048.000000000 > toInt64(-1), -9223372036854786048.000000000 >= toInt64(-1) ; +SELECT '-1', '9223372036854786048.000000000', -1 = 9223372036854786048.000000000, -1 != 9223372036854786048.000000000, -1 < 9223372036854786048.000000000, -1 <= 9223372036854786048.000000000, -1 > 9223372036854786048.000000000, -1 >= 9223372036854786048.000000000, 9223372036854786048.000000000 = -1, 9223372036854786048.000000000 != -1, 9223372036854786048.000000000 < -1, 9223372036854786048.000000000 <= -1, 9223372036854786048.000000000 > -1, 9223372036854786048.000000000 >= -1 , toInt8(-1) = 9223372036854786048.000000000, toInt8(-1) != 9223372036854786048.000000000, toInt8(-1) < 9223372036854786048.000000000, toInt8(-1) <= 9223372036854786048.000000000, toInt8(-1) > 9223372036854786048.000000000, toInt8(-1) >= 9223372036854786048.000000000, 9223372036854786048.000000000 = toInt8(-1), 9223372036854786048.000000000 != toInt8(-1), 9223372036854786048.000000000 < toInt8(-1), 9223372036854786048.000000000 <= toInt8(-1), 9223372036854786048.000000000 > toInt8(-1), 9223372036854786048.000000000 >= toInt8(-1) , toInt16(-1) = 9223372036854786048.000000000, toInt16(-1) != 9223372036854786048.000000000, toInt16(-1) < 9223372036854786048.000000000, toInt16(-1) <= 9223372036854786048.000000000, toInt16(-1) > 9223372036854786048.000000000, toInt16(-1) >= 9223372036854786048.000000000, 9223372036854786048.000000000 = toInt16(-1), 9223372036854786048.000000000 != toInt16(-1), 9223372036854786048.000000000 < toInt16(-1), 9223372036854786048.000000000 <= toInt16(-1), 9223372036854786048.000000000 > toInt16(-1), 9223372036854786048.000000000 >= toInt16(-1) , toInt32(-1) = 9223372036854786048.000000000, toInt32(-1) != 9223372036854786048.000000000, toInt32(-1) < 9223372036854786048.000000000, toInt32(-1) <= 9223372036854786048.000000000, toInt32(-1) > 9223372036854786048.000000000, toInt32(-1) >= 9223372036854786048.000000000, 9223372036854786048.000000000 = toInt32(-1), 9223372036854786048.000000000 != toInt32(-1), 9223372036854786048.000000000 < toInt32(-1), 9223372036854786048.000000000 <= toInt32(-1), 9223372036854786048.000000000 > toInt32(-1), 9223372036854786048.000000000 >= toInt32(-1) , toInt64(-1) = 9223372036854786048.000000000, toInt64(-1) != 9223372036854786048.000000000, toInt64(-1) < 9223372036854786048.000000000, toInt64(-1) <= 9223372036854786048.000000000, toInt64(-1) > 9223372036854786048.000000000, toInt64(-1) >= 9223372036854786048.000000000, 9223372036854786048.000000000 = toInt64(-1), 9223372036854786048.000000000 != toInt64(-1), 9223372036854786048.000000000 < toInt64(-1), 9223372036854786048.000000000 <= toInt64(-1), 9223372036854786048.000000000 > toInt64(-1), 9223372036854786048.000000000 >= toInt64(-1) ; +SELECT '1', '0.000000000', 1 = 0.000000000, 1 != 0.000000000, 1 < 0.000000000, 1 <= 0.000000000, 1 > 0.000000000, 1 >= 0.000000000, 0.000000000 = 1, 0.000000000 != 1, 0.000000000 < 1, 0.000000000 <= 1, 0.000000000 > 1, 0.000000000 >= 1 , toUInt8(1) = 0.000000000, toUInt8(1) != 0.000000000, toUInt8(1) < 0.000000000, toUInt8(1) <= 0.000000000, toUInt8(1) > 0.000000000, toUInt8(1) >= 0.000000000, 0.000000000 = toUInt8(1), 0.000000000 != toUInt8(1), 0.000000000 < toUInt8(1), 0.000000000 <= toUInt8(1), 0.000000000 > toUInt8(1), 0.000000000 >= toUInt8(1) , toInt8(1) = 0.000000000, toInt8(1) != 0.000000000, toInt8(1) < 0.000000000, toInt8(1) <= 0.000000000, toInt8(1) > 0.000000000, toInt8(1) >= 0.000000000, 0.000000000 = toInt8(1), 0.000000000 != toInt8(1), 0.000000000 < toInt8(1), 0.000000000 <= toInt8(1), 0.000000000 > toInt8(1), 0.000000000 >= toInt8(1) , toUInt16(1) = 0.000000000, toUInt16(1) != 0.000000000, toUInt16(1) < 0.000000000, toUInt16(1) <= 0.000000000, toUInt16(1) > 0.000000000, toUInt16(1) >= 0.000000000, 0.000000000 = toUInt16(1), 0.000000000 != toUInt16(1), 0.000000000 < toUInt16(1), 0.000000000 <= toUInt16(1), 0.000000000 > toUInt16(1), 0.000000000 >= toUInt16(1) , toInt16(1) = 0.000000000, toInt16(1) != 0.000000000, toInt16(1) < 0.000000000, toInt16(1) <= 0.000000000, toInt16(1) > 0.000000000, toInt16(1) >= 0.000000000, 0.000000000 = toInt16(1), 0.000000000 != toInt16(1), 0.000000000 < toInt16(1), 0.000000000 <= toInt16(1), 0.000000000 > toInt16(1), 0.000000000 >= toInt16(1) , toUInt32(1) = 0.000000000, toUInt32(1) != 0.000000000, toUInt32(1) < 0.000000000, toUInt32(1) <= 0.000000000, toUInt32(1) > 0.000000000, toUInt32(1) >= 0.000000000, 0.000000000 = toUInt32(1), 0.000000000 != toUInt32(1), 0.000000000 < toUInt32(1), 0.000000000 <= toUInt32(1), 0.000000000 > toUInt32(1), 0.000000000 >= toUInt32(1) , toInt32(1) = 0.000000000, toInt32(1) != 0.000000000, toInt32(1) < 0.000000000, toInt32(1) <= 0.000000000, toInt32(1) > 0.000000000, toInt32(1) >= 0.000000000, 0.000000000 = toInt32(1), 0.000000000 != toInt32(1), 0.000000000 < toInt32(1), 0.000000000 <= toInt32(1), 0.000000000 > toInt32(1), 0.000000000 >= toInt32(1) , toUInt64(1) = 0.000000000, toUInt64(1) != 0.000000000, toUInt64(1) < 0.000000000, toUInt64(1) <= 0.000000000, toUInt64(1) > 0.000000000, toUInt64(1) >= 0.000000000, 0.000000000 = toUInt64(1), 0.000000000 != toUInt64(1), 0.000000000 < toUInt64(1), 0.000000000 <= toUInt64(1), 0.000000000 > toUInt64(1), 0.000000000 >= toUInt64(1) , toInt64(1) = 0.000000000, toInt64(1) != 0.000000000, toInt64(1) < 0.000000000, toInt64(1) <= 0.000000000, toInt64(1) > 0.000000000, toInt64(1) >= 0.000000000, 0.000000000 = toInt64(1), 0.000000000 != toInt64(1), 0.000000000 < toInt64(1), 0.000000000 <= toInt64(1), 0.000000000 > toInt64(1), 0.000000000 >= toInt64(1) ; +SELECT '1', '-1.000000000', 1 = -1.000000000, 1 != -1.000000000, 1 < -1.000000000, 1 <= -1.000000000, 1 > -1.000000000, 1 >= -1.000000000, -1.000000000 = 1, -1.000000000 != 1, -1.000000000 < 1, -1.000000000 <= 1, -1.000000000 > 1, -1.000000000 >= 1 , toUInt8(1) = -1.000000000, toUInt8(1) != -1.000000000, toUInt8(1) < -1.000000000, toUInt8(1) <= -1.000000000, toUInt8(1) > -1.000000000, toUInt8(1) >= -1.000000000, -1.000000000 = toUInt8(1), -1.000000000 != toUInt8(1), -1.000000000 < toUInt8(1), -1.000000000 <= toUInt8(1), -1.000000000 > toUInt8(1), -1.000000000 >= toUInt8(1) , toInt8(1) = -1.000000000, toInt8(1) != -1.000000000, toInt8(1) < -1.000000000, toInt8(1) <= -1.000000000, toInt8(1) > -1.000000000, toInt8(1) >= -1.000000000, -1.000000000 = toInt8(1), -1.000000000 != toInt8(1), -1.000000000 < toInt8(1), -1.000000000 <= toInt8(1), -1.000000000 > toInt8(1), -1.000000000 >= toInt8(1) , toUInt16(1) = -1.000000000, toUInt16(1) != -1.000000000, toUInt16(1) < -1.000000000, toUInt16(1) <= -1.000000000, toUInt16(1) > -1.000000000, toUInt16(1) >= -1.000000000, -1.000000000 = toUInt16(1), -1.000000000 != toUInt16(1), -1.000000000 < toUInt16(1), -1.000000000 <= toUInt16(1), -1.000000000 > toUInt16(1), -1.000000000 >= toUInt16(1) , toInt16(1) = -1.000000000, toInt16(1) != -1.000000000, toInt16(1) < -1.000000000, toInt16(1) <= -1.000000000, toInt16(1) > -1.000000000, toInt16(1) >= -1.000000000, -1.000000000 = toInt16(1), -1.000000000 != toInt16(1), -1.000000000 < toInt16(1), -1.000000000 <= toInt16(1), -1.000000000 > toInt16(1), -1.000000000 >= toInt16(1) , toUInt32(1) = -1.000000000, toUInt32(1) != -1.000000000, toUInt32(1) < -1.000000000, toUInt32(1) <= -1.000000000, toUInt32(1) > -1.000000000, toUInt32(1) >= -1.000000000, -1.000000000 = toUInt32(1), -1.000000000 != toUInt32(1), -1.000000000 < toUInt32(1), -1.000000000 <= toUInt32(1), -1.000000000 > toUInt32(1), -1.000000000 >= toUInt32(1) , toInt32(1) = -1.000000000, toInt32(1) != -1.000000000, toInt32(1) < -1.000000000, toInt32(1) <= -1.000000000, toInt32(1) > -1.000000000, toInt32(1) >= -1.000000000, -1.000000000 = toInt32(1), -1.000000000 != toInt32(1), -1.000000000 < toInt32(1), -1.000000000 <= toInt32(1), -1.000000000 > toInt32(1), -1.000000000 >= toInt32(1) , toUInt64(1) = -1.000000000, toUInt64(1) != -1.000000000, toUInt64(1) < -1.000000000, toUInt64(1) <= -1.000000000, toUInt64(1) > -1.000000000, toUInt64(1) >= -1.000000000, -1.000000000 = toUInt64(1), -1.000000000 != toUInt64(1), -1.000000000 < toUInt64(1), -1.000000000 <= toUInt64(1), -1.000000000 > toUInt64(1), -1.000000000 >= toUInt64(1) , toInt64(1) = -1.000000000, toInt64(1) != -1.000000000, toInt64(1) < -1.000000000, toInt64(1) <= -1.000000000, toInt64(1) > -1.000000000, toInt64(1) >= -1.000000000, -1.000000000 = toInt64(1), -1.000000000 != toInt64(1), -1.000000000 < toInt64(1), -1.000000000 <= toInt64(1), -1.000000000 > toInt64(1), -1.000000000 >= toInt64(1) ; +SELECT '1', '1.000000000', 1 = 1.000000000, 1 != 1.000000000, 1 < 1.000000000, 1 <= 1.000000000, 1 > 1.000000000, 1 >= 1.000000000, 1.000000000 = 1, 1.000000000 != 1, 1.000000000 < 1, 1.000000000 <= 1, 1.000000000 > 1, 1.000000000 >= 1 , toUInt8(1) = 1.000000000, toUInt8(1) != 1.000000000, toUInt8(1) < 1.000000000, toUInt8(1) <= 1.000000000, toUInt8(1) > 1.000000000, toUInt8(1) >= 1.000000000, 1.000000000 = toUInt8(1), 1.000000000 != toUInt8(1), 1.000000000 < toUInt8(1), 1.000000000 <= toUInt8(1), 1.000000000 > toUInt8(1), 1.000000000 >= toUInt8(1) , toInt8(1) = 1.000000000, toInt8(1) != 1.000000000, toInt8(1) < 1.000000000, toInt8(1) <= 1.000000000, toInt8(1) > 1.000000000, toInt8(1) >= 1.000000000, 1.000000000 = toInt8(1), 1.000000000 != toInt8(1), 1.000000000 < toInt8(1), 1.000000000 <= toInt8(1), 1.000000000 > toInt8(1), 1.000000000 >= toInt8(1) , toUInt16(1) = 1.000000000, toUInt16(1) != 1.000000000, toUInt16(1) < 1.000000000, toUInt16(1) <= 1.000000000, toUInt16(1) > 1.000000000, toUInt16(1) >= 1.000000000, 1.000000000 = toUInt16(1), 1.000000000 != toUInt16(1), 1.000000000 < toUInt16(1), 1.000000000 <= toUInt16(1), 1.000000000 > toUInt16(1), 1.000000000 >= toUInt16(1) , toInt16(1) = 1.000000000, toInt16(1) != 1.000000000, toInt16(1) < 1.000000000, toInt16(1) <= 1.000000000, toInt16(1) > 1.000000000, toInt16(1) >= 1.000000000, 1.000000000 = toInt16(1), 1.000000000 != toInt16(1), 1.000000000 < toInt16(1), 1.000000000 <= toInt16(1), 1.000000000 > toInt16(1), 1.000000000 >= toInt16(1) , toUInt32(1) = 1.000000000, toUInt32(1) != 1.000000000, toUInt32(1) < 1.000000000, toUInt32(1) <= 1.000000000, toUInt32(1) > 1.000000000, toUInt32(1) >= 1.000000000, 1.000000000 = toUInt32(1), 1.000000000 != toUInt32(1), 1.000000000 < toUInt32(1), 1.000000000 <= toUInt32(1), 1.000000000 > toUInt32(1), 1.000000000 >= toUInt32(1) , toInt32(1) = 1.000000000, toInt32(1) != 1.000000000, toInt32(1) < 1.000000000, toInt32(1) <= 1.000000000, toInt32(1) > 1.000000000, toInt32(1) >= 1.000000000, 1.000000000 = toInt32(1), 1.000000000 != toInt32(1), 1.000000000 < toInt32(1), 1.000000000 <= toInt32(1), 1.000000000 > toInt32(1), 1.000000000 >= toInt32(1) , toUInt64(1) = 1.000000000, toUInt64(1) != 1.000000000, toUInt64(1) < 1.000000000, toUInt64(1) <= 1.000000000, toUInt64(1) > 1.000000000, toUInt64(1) >= 1.000000000, 1.000000000 = toUInt64(1), 1.000000000 != toUInt64(1), 1.000000000 < toUInt64(1), 1.000000000 <= toUInt64(1), 1.000000000 > toUInt64(1), 1.000000000 >= toUInt64(1) , toInt64(1) = 1.000000000, toInt64(1) != 1.000000000, toInt64(1) < 1.000000000, toInt64(1) <= 1.000000000, toInt64(1) > 1.000000000, toInt64(1) >= 1.000000000, 1.000000000 = toInt64(1), 1.000000000 != toInt64(1), 1.000000000 < toInt64(1), 1.000000000 <= toInt64(1), 1.000000000 > toInt64(1), 1.000000000 >= toInt64(1) ; +SELECT '1', '18446744073709551616.000000000', 1 = 18446744073709551616.000000000, 1 != 18446744073709551616.000000000, 1 < 18446744073709551616.000000000, 1 <= 18446744073709551616.000000000, 1 > 18446744073709551616.000000000, 1 >= 18446744073709551616.000000000, 18446744073709551616.000000000 = 1, 18446744073709551616.000000000 != 1, 18446744073709551616.000000000 < 1, 18446744073709551616.000000000 <= 1, 18446744073709551616.000000000 > 1, 18446744073709551616.000000000 >= 1 , toUInt8(1) = 18446744073709551616.000000000, toUInt8(1) != 18446744073709551616.000000000, toUInt8(1) < 18446744073709551616.000000000, toUInt8(1) <= 18446744073709551616.000000000, toUInt8(1) > 18446744073709551616.000000000, toUInt8(1) >= 18446744073709551616.000000000, 18446744073709551616.000000000 = toUInt8(1), 18446744073709551616.000000000 != toUInt8(1), 18446744073709551616.000000000 < toUInt8(1), 18446744073709551616.000000000 <= toUInt8(1), 18446744073709551616.000000000 > toUInt8(1), 18446744073709551616.000000000 >= toUInt8(1) , toInt8(1) = 18446744073709551616.000000000, toInt8(1) != 18446744073709551616.000000000, toInt8(1) < 18446744073709551616.000000000, toInt8(1) <= 18446744073709551616.000000000, toInt8(1) > 18446744073709551616.000000000, toInt8(1) >= 18446744073709551616.000000000, 18446744073709551616.000000000 = toInt8(1), 18446744073709551616.000000000 != toInt8(1), 18446744073709551616.000000000 < toInt8(1), 18446744073709551616.000000000 <= toInt8(1), 18446744073709551616.000000000 > toInt8(1), 18446744073709551616.000000000 >= toInt8(1) , toUInt16(1) = 18446744073709551616.000000000, toUInt16(1) != 18446744073709551616.000000000, toUInt16(1) < 18446744073709551616.000000000, toUInt16(1) <= 18446744073709551616.000000000, toUInt16(1) > 18446744073709551616.000000000, toUInt16(1) >= 18446744073709551616.000000000, 18446744073709551616.000000000 = toUInt16(1), 18446744073709551616.000000000 != toUInt16(1), 18446744073709551616.000000000 < toUInt16(1), 18446744073709551616.000000000 <= toUInt16(1), 18446744073709551616.000000000 > toUInt16(1), 18446744073709551616.000000000 >= toUInt16(1) , toInt16(1) = 18446744073709551616.000000000, toInt16(1) != 18446744073709551616.000000000, toInt16(1) < 18446744073709551616.000000000, toInt16(1) <= 18446744073709551616.000000000, toInt16(1) > 18446744073709551616.000000000, toInt16(1) >= 18446744073709551616.000000000, 18446744073709551616.000000000 = toInt16(1), 18446744073709551616.000000000 != toInt16(1), 18446744073709551616.000000000 < toInt16(1), 18446744073709551616.000000000 <= toInt16(1), 18446744073709551616.000000000 > toInt16(1), 18446744073709551616.000000000 >= toInt16(1) , toUInt32(1) = 18446744073709551616.000000000, toUInt32(1) != 18446744073709551616.000000000, toUInt32(1) < 18446744073709551616.000000000, toUInt32(1) <= 18446744073709551616.000000000, toUInt32(1) > 18446744073709551616.000000000, toUInt32(1) >= 18446744073709551616.000000000, 18446744073709551616.000000000 = toUInt32(1), 18446744073709551616.000000000 != toUInt32(1), 18446744073709551616.000000000 < toUInt32(1), 18446744073709551616.000000000 <= toUInt32(1), 18446744073709551616.000000000 > toUInt32(1), 18446744073709551616.000000000 >= toUInt32(1) , toInt32(1) = 18446744073709551616.000000000, toInt32(1) != 18446744073709551616.000000000, toInt32(1) < 18446744073709551616.000000000, toInt32(1) <= 18446744073709551616.000000000, toInt32(1) > 18446744073709551616.000000000, toInt32(1) >= 18446744073709551616.000000000, 18446744073709551616.000000000 = toInt32(1), 18446744073709551616.000000000 != toInt32(1), 18446744073709551616.000000000 < toInt32(1), 18446744073709551616.000000000 <= toInt32(1), 18446744073709551616.000000000 > toInt32(1), 18446744073709551616.000000000 >= toInt32(1) , toUInt64(1) = 18446744073709551616.000000000, toUInt64(1) != 18446744073709551616.000000000, toUInt64(1) < 18446744073709551616.000000000, toUInt64(1) <= 18446744073709551616.000000000, toUInt64(1) > 18446744073709551616.000000000, toUInt64(1) >= 18446744073709551616.000000000, 18446744073709551616.000000000 = toUInt64(1), 18446744073709551616.000000000 != toUInt64(1), 18446744073709551616.000000000 < toUInt64(1), 18446744073709551616.000000000 <= toUInt64(1), 18446744073709551616.000000000 > toUInt64(1), 18446744073709551616.000000000 >= toUInt64(1) , toInt64(1) = 18446744073709551616.000000000, toInt64(1) != 18446744073709551616.000000000, toInt64(1) < 18446744073709551616.000000000, toInt64(1) <= 18446744073709551616.000000000, toInt64(1) > 18446744073709551616.000000000, toInt64(1) >= 18446744073709551616.000000000, 18446744073709551616.000000000 = toInt64(1), 18446744073709551616.000000000 != toInt64(1), 18446744073709551616.000000000 < toInt64(1), 18446744073709551616.000000000 <= toInt64(1), 18446744073709551616.000000000 > toInt64(1), 18446744073709551616.000000000 >= toInt64(1) ; +SELECT '1', '9223372036854775808.000000000', 1 = 9223372036854775808.000000000, 1 != 9223372036854775808.000000000, 1 < 9223372036854775808.000000000, 1 <= 9223372036854775808.000000000, 1 > 9223372036854775808.000000000, 1 >= 9223372036854775808.000000000, 9223372036854775808.000000000 = 1, 9223372036854775808.000000000 != 1, 9223372036854775808.000000000 < 1, 9223372036854775808.000000000 <= 1, 9223372036854775808.000000000 > 1, 9223372036854775808.000000000 >= 1 , toUInt8(1) = 9223372036854775808.000000000, toUInt8(1) != 9223372036854775808.000000000, toUInt8(1) < 9223372036854775808.000000000, toUInt8(1) <= 9223372036854775808.000000000, toUInt8(1) > 9223372036854775808.000000000, toUInt8(1) >= 9223372036854775808.000000000, 9223372036854775808.000000000 = toUInt8(1), 9223372036854775808.000000000 != toUInt8(1), 9223372036854775808.000000000 < toUInt8(1), 9223372036854775808.000000000 <= toUInt8(1), 9223372036854775808.000000000 > toUInt8(1), 9223372036854775808.000000000 >= toUInt8(1) , toInt8(1) = 9223372036854775808.000000000, toInt8(1) != 9223372036854775808.000000000, toInt8(1) < 9223372036854775808.000000000, toInt8(1) <= 9223372036854775808.000000000, toInt8(1) > 9223372036854775808.000000000, toInt8(1) >= 9223372036854775808.000000000, 9223372036854775808.000000000 = toInt8(1), 9223372036854775808.000000000 != toInt8(1), 9223372036854775808.000000000 < toInt8(1), 9223372036854775808.000000000 <= toInt8(1), 9223372036854775808.000000000 > toInt8(1), 9223372036854775808.000000000 >= toInt8(1) , toUInt16(1) = 9223372036854775808.000000000, toUInt16(1) != 9223372036854775808.000000000, toUInt16(1) < 9223372036854775808.000000000, toUInt16(1) <= 9223372036854775808.000000000, toUInt16(1) > 9223372036854775808.000000000, toUInt16(1) >= 9223372036854775808.000000000, 9223372036854775808.000000000 = toUInt16(1), 9223372036854775808.000000000 != toUInt16(1), 9223372036854775808.000000000 < toUInt16(1), 9223372036854775808.000000000 <= toUInt16(1), 9223372036854775808.000000000 > toUInt16(1), 9223372036854775808.000000000 >= toUInt16(1) , toInt16(1) = 9223372036854775808.000000000, toInt16(1) != 9223372036854775808.000000000, toInt16(1) < 9223372036854775808.000000000, toInt16(1) <= 9223372036854775808.000000000, toInt16(1) > 9223372036854775808.000000000, toInt16(1) >= 9223372036854775808.000000000, 9223372036854775808.000000000 = toInt16(1), 9223372036854775808.000000000 != toInt16(1), 9223372036854775808.000000000 < toInt16(1), 9223372036854775808.000000000 <= toInt16(1), 9223372036854775808.000000000 > toInt16(1), 9223372036854775808.000000000 >= toInt16(1) , toUInt32(1) = 9223372036854775808.000000000, toUInt32(1) != 9223372036854775808.000000000, toUInt32(1) < 9223372036854775808.000000000, toUInt32(1) <= 9223372036854775808.000000000, toUInt32(1) > 9223372036854775808.000000000, toUInt32(1) >= 9223372036854775808.000000000, 9223372036854775808.000000000 = toUInt32(1), 9223372036854775808.000000000 != toUInt32(1), 9223372036854775808.000000000 < toUInt32(1), 9223372036854775808.000000000 <= toUInt32(1), 9223372036854775808.000000000 > toUInt32(1), 9223372036854775808.000000000 >= toUInt32(1) , toInt32(1) = 9223372036854775808.000000000, toInt32(1) != 9223372036854775808.000000000, toInt32(1) < 9223372036854775808.000000000, toInt32(1) <= 9223372036854775808.000000000, toInt32(1) > 9223372036854775808.000000000, toInt32(1) >= 9223372036854775808.000000000, 9223372036854775808.000000000 = toInt32(1), 9223372036854775808.000000000 != toInt32(1), 9223372036854775808.000000000 < toInt32(1), 9223372036854775808.000000000 <= toInt32(1), 9223372036854775808.000000000 > toInt32(1), 9223372036854775808.000000000 >= toInt32(1) , toUInt64(1) = 9223372036854775808.000000000, toUInt64(1) != 9223372036854775808.000000000, toUInt64(1) < 9223372036854775808.000000000, toUInt64(1) <= 9223372036854775808.000000000, toUInt64(1) > 9223372036854775808.000000000, toUInt64(1) >= 9223372036854775808.000000000, 9223372036854775808.000000000 = toUInt64(1), 9223372036854775808.000000000 != toUInt64(1), 9223372036854775808.000000000 < toUInt64(1), 9223372036854775808.000000000 <= toUInt64(1), 9223372036854775808.000000000 > toUInt64(1), 9223372036854775808.000000000 >= toUInt64(1) , toInt64(1) = 9223372036854775808.000000000, toInt64(1) != 9223372036854775808.000000000, toInt64(1) < 9223372036854775808.000000000, toInt64(1) <= 9223372036854775808.000000000, toInt64(1) > 9223372036854775808.000000000, toInt64(1) >= 9223372036854775808.000000000, 9223372036854775808.000000000 = toInt64(1), 9223372036854775808.000000000 != toInt64(1), 9223372036854775808.000000000 < toInt64(1), 9223372036854775808.000000000 <= toInt64(1), 9223372036854775808.000000000 > toInt64(1), 9223372036854775808.000000000 >= toInt64(1) ; +SELECT '1', '-9223372036854775808.000000000', 1 = -9223372036854775808.000000000, 1 != -9223372036854775808.000000000, 1 < -9223372036854775808.000000000, 1 <= -9223372036854775808.000000000, 1 > -9223372036854775808.000000000, 1 >= -9223372036854775808.000000000, -9223372036854775808.000000000 = 1, -9223372036854775808.000000000 != 1, -9223372036854775808.000000000 < 1, -9223372036854775808.000000000 <= 1, -9223372036854775808.000000000 > 1, -9223372036854775808.000000000 >= 1 , toUInt8(1) = -9223372036854775808.000000000, toUInt8(1) != -9223372036854775808.000000000, toUInt8(1) < -9223372036854775808.000000000, toUInt8(1) <= -9223372036854775808.000000000, toUInt8(1) > -9223372036854775808.000000000, toUInt8(1) >= -9223372036854775808.000000000, -9223372036854775808.000000000 = toUInt8(1), -9223372036854775808.000000000 != toUInt8(1), -9223372036854775808.000000000 < toUInt8(1), -9223372036854775808.000000000 <= toUInt8(1), -9223372036854775808.000000000 > toUInt8(1), -9223372036854775808.000000000 >= toUInt8(1) , toInt8(1) = -9223372036854775808.000000000, toInt8(1) != -9223372036854775808.000000000, toInt8(1) < -9223372036854775808.000000000, toInt8(1) <= -9223372036854775808.000000000, toInt8(1) > -9223372036854775808.000000000, toInt8(1) >= -9223372036854775808.000000000, -9223372036854775808.000000000 = toInt8(1), -9223372036854775808.000000000 != toInt8(1), -9223372036854775808.000000000 < toInt8(1), -9223372036854775808.000000000 <= toInt8(1), -9223372036854775808.000000000 > toInt8(1), -9223372036854775808.000000000 >= toInt8(1) , toUInt16(1) = -9223372036854775808.000000000, toUInt16(1) != -9223372036854775808.000000000, toUInt16(1) < -9223372036854775808.000000000, toUInt16(1) <= -9223372036854775808.000000000, toUInt16(1) > -9223372036854775808.000000000, toUInt16(1) >= -9223372036854775808.000000000, -9223372036854775808.000000000 = toUInt16(1), -9223372036854775808.000000000 != toUInt16(1), -9223372036854775808.000000000 < toUInt16(1), -9223372036854775808.000000000 <= toUInt16(1), -9223372036854775808.000000000 > toUInt16(1), -9223372036854775808.000000000 >= toUInt16(1) , toInt16(1) = -9223372036854775808.000000000, toInt16(1) != -9223372036854775808.000000000, toInt16(1) < -9223372036854775808.000000000, toInt16(1) <= -9223372036854775808.000000000, toInt16(1) > -9223372036854775808.000000000, toInt16(1) >= -9223372036854775808.000000000, -9223372036854775808.000000000 = toInt16(1), -9223372036854775808.000000000 != toInt16(1), -9223372036854775808.000000000 < toInt16(1), -9223372036854775808.000000000 <= toInt16(1), -9223372036854775808.000000000 > toInt16(1), -9223372036854775808.000000000 >= toInt16(1) , toUInt32(1) = -9223372036854775808.000000000, toUInt32(1) != -9223372036854775808.000000000, toUInt32(1) < -9223372036854775808.000000000, toUInt32(1) <= -9223372036854775808.000000000, toUInt32(1) > -9223372036854775808.000000000, toUInt32(1) >= -9223372036854775808.000000000, -9223372036854775808.000000000 = toUInt32(1), -9223372036854775808.000000000 != toUInt32(1), -9223372036854775808.000000000 < toUInt32(1), -9223372036854775808.000000000 <= toUInt32(1), -9223372036854775808.000000000 > toUInt32(1), -9223372036854775808.000000000 >= toUInt32(1) , toInt32(1) = -9223372036854775808.000000000, toInt32(1) != -9223372036854775808.000000000, toInt32(1) < -9223372036854775808.000000000, toInt32(1) <= -9223372036854775808.000000000, toInt32(1) > -9223372036854775808.000000000, toInt32(1) >= -9223372036854775808.000000000, -9223372036854775808.000000000 = toInt32(1), -9223372036854775808.000000000 != toInt32(1), -9223372036854775808.000000000 < toInt32(1), -9223372036854775808.000000000 <= toInt32(1), -9223372036854775808.000000000 > toInt32(1), -9223372036854775808.000000000 >= toInt32(1) , toUInt64(1) = -9223372036854775808.000000000, toUInt64(1) != -9223372036854775808.000000000, toUInt64(1) < -9223372036854775808.000000000, toUInt64(1) <= -9223372036854775808.000000000, toUInt64(1) > -9223372036854775808.000000000, toUInt64(1) >= -9223372036854775808.000000000, -9223372036854775808.000000000 = toUInt64(1), -9223372036854775808.000000000 != toUInt64(1), -9223372036854775808.000000000 < toUInt64(1), -9223372036854775808.000000000 <= toUInt64(1), -9223372036854775808.000000000 > toUInt64(1), -9223372036854775808.000000000 >= toUInt64(1) , toInt64(1) = -9223372036854775808.000000000, toInt64(1) != -9223372036854775808.000000000, toInt64(1) < -9223372036854775808.000000000, toInt64(1) <= -9223372036854775808.000000000, toInt64(1) > -9223372036854775808.000000000, toInt64(1) >= -9223372036854775808.000000000, -9223372036854775808.000000000 = toInt64(1), -9223372036854775808.000000000 != toInt64(1), -9223372036854775808.000000000 < toInt64(1), -9223372036854775808.000000000 <= toInt64(1), -9223372036854775808.000000000 > toInt64(1), -9223372036854775808.000000000 >= toInt64(1) ; +SELECT '1', '9223372036854775808.000000000', 1 = 9223372036854775808.000000000, 1 != 9223372036854775808.000000000, 1 < 9223372036854775808.000000000, 1 <= 9223372036854775808.000000000, 1 > 9223372036854775808.000000000, 1 >= 9223372036854775808.000000000, 9223372036854775808.000000000 = 1, 9223372036854775808.000000000 != 1, 9223372036854775808.000000000 < 1, 9223372036854775808.000000000 <= 1, 9223372036854775808.000000000 > 1, 9223372036854775808.000000000 >= 1 , toUInt8(1) = 9223372036854775808.000000000, toUInt8(1) != 9223372036854775808.000000000, toUInt8(1) < 9223372036854775808.000000000, toUInt8(1) <= 9223372036854775808.000000000, toUInt8(1) > 9223372036854775808.000000000, toUInt8(1) >= 9223372036854775808.000000000, 9223372036854775808.000000000 = toUInt8(1), 9223372036854775808.000000000 != toUInt8(1), 9223372036854775808.000000000 < toUInt8(1), 9223372036854775808.000000000 <= toUInt8(1), 9223372036854775808.000000000 > toUInt8(1), 9223372036854775808.000000000 >= toUInt8(1) , toInt8(1) = 9223372036854775808.000000000, toInt8(1) != 9223372036854775808.000000000, toInt8(1) < 9223372036854775808.000000000, toInt8(1) <= 9223372036854775808.000000000, toInt8(1) > 9223372036854775808.000000000, toInt8(1) >= 9223372036854775808.000000000, 9223372036854775808.000000000 = toInt8(1), 9223372036854775808.000000000 != toInt8(1), 9223372036854775808.000000000 < toInt8(1), 9223372036854775808.000000000 <= toInt8(1), 9223372036854775808.000000000 > toInt8(1), 9223372036854775808.000000000 >= toInt8(1) , toUInt16(1) = 9223372036854775808.000000000, toUInt16(1) != 9223372036854775808.000000000, toUInt16(1) < 9223372036854775808.000000000, toUInt16(1) <= 9223372036854775808.000000000, toUInt16(1) > 9223372036854775808.000000000, toUInt16(1) >= 9223372036854775808.000000000, 9223372036854775808.000000000 = toUInt16(1), 9223372036854775808.000000000 != toUInt16(1), 9223372036854775808.000000000 < toUInt16(1), 9223372036854775808.000000000 <= toUInt16(1), 9223372036854775808.000000000 > toUInt16(1), 9223372036854775808.000000000 >= toUInt16(1) , toInt16(1) = 9223372036854775808.000000000, toInt16(1) != 9223372036854775808.000000000, toInt16(1) < 9223372036854775808.000000000, toInt16(1) <= 9223372036854775808.000000000, toInt16(1) > 9223372036854775808.000000000, toInt16(1) >= 9223372036854775808.000000000, 9223372036854775808.000000000 = toInt16(1), 9223372036854775808.000000000 != toInt16(1), 9223372036854775808.000000000 < toInt16(1), 9223372036854775808.000000000 <= toInt16(1), 9223372036854775808.000000000 > toInt16(1), 9223372036854775808.000000000 >= toInt16(1) , toUInt32(1) = 9223372036854775808.000000000, toUInt32(1) != 9223372036854775808.000000000, toUInt32(1) < 9223372036854775808.000000000, toUInt32(1) <= 9223372036854775808.000000000, toUInt32(1) > 9223372036854775808.000000000, toUInt32(1) >= 9223372036854775808.000000000, 9223372036854775808.000000000 = toUInt32(1), 9223372036854775808.000000000 != toUInt32(1), 9223372036854775808.000000000 < toUInt32(1), 9223372036854775808.000000000 <= toUInt32(1), 9223372036854775808.000000000 > toUInt32(1), 9223372036854775808.000000000 >= toUInt32(1) , toInt32(1) = 9223372036854775808.000000000, toInt32(1) != 9223372036854775808.000000000, toInt32(1) < 9223372036854775808.000000000, toInt32(1) <= 9223372036854775808.000000000, toInt32(1) > 9223372036854775808.000000000, toInt32(1) >= 9223372036854775808.000000000, 9223372036854775808.000000000 = toInt32(1), 9223372036854775808.000000000 != toInt32(1), 9223372036854775808.000000000 < toInt32(1), 9223372036854775808.000000000 <= toInt32(1), 9223372036854775808.000000000 > toInt32(1), 9223372036854775808.000000000 >= toInt32(1) , toUInt64(1) = 9223372036854775808.000000000, toUInt64(1) != 9223372036854775808.000000000, toUInt64(1) < 9223372036854775808.000000000, toUInt64(1) <= 9223372036854775808.000000000, toUInt64(1) > 9223372036854775808.000000000, toUInt64(1) >= 9223372036854775808.000000000, 9223372036854775808.000000000 = toUInt64(1), 9223372036854775808.000000000 != toUInt64(1), 9223372036854775808.000000000 < toUInt64(1), 9223372036854775808.000000000 <= toUInt64(1), 9223372036854775808.000000000 > toUInt64(1), 9223372036854775808.000000000 >= toUInt64(1) , toInt64(1) = 9223372036854775808.000000000, toInt64(1) != 9223372036854775808.000000000, toInt64(1) < 9223372036854775808.000000000, toInt64(1) <= 9223372036854775808.000000000, toInt64(1) > 9223372036854775808.000000000, toInt64(1) >= 9223372036854775808.000000000, 9223372036854775808.000000000 = toInt64(1), 9223372036854775808.000000000 != toInt64(1), 9223372036854775808.000000000 < toInt64(1), 9223372036854775808.000000000 <= toInt64(1), 9223372036854775808.000000000 > toInt64(1), 9223372036854775808.000000000 >= toInt64(1) ; +SELECT '1', '2251799813685248.000000000', 1 = 2251799813685248.000000000, 1 != 2251799813685248.000000000, 1 < 2251799813685248.000000000, 1 <= 2251799813685248.000000000, 1 > 2251799813685248.000000000, 1 >= 2251799813685248.000000000, 2251799813685248.000000000 = 1, 2251799813685248.000000000 != 1, 2251799813685248.000000000 < 1, 2251799813685248.000000000 <= 1, 2251799813685248.000000000 > 1, 2251799813685248.000000000 >= 1 , toUInt8(1) = 2251799813685248.000000000, toUInt8(1) != 2251799813685248.000000000, toUInt8(1) < 2251799813685248.000000000, toUInt8(1) <= 2251799813685248.000000000, toUInt8(1) > 2251799813685248.000000000, toUInt8(1) >= 2251799813685248.000000000, 2251799813685248.000000000 = toUInt8(1), 2251799813685248.000000000 != toUInt8(1), 2251799813685248.000000000 < toUInt8(1), 2251799813685248.000000000 <= toUInt8(1), 2251799813685248.000000000 > toUInt8(1), 2251799813685248.000000000 >= toUInt8(1) , toInt8(1) = 2251799813685248.000000000, toInt8(1) != 2251799813685248.000000000, toInt8(1) < 2251799813685248.000000000, toInt8(1) <= 2251799813685248.000000000, toInt8(1) > 2251799813685248.000000000, toInt8(1) >= 2251799813685248.000000000, 2251799813685248.000000000 = toInt8(1), 2251799813685248.000000000 != toInt8(1), 2251799813685248.000000000 < toInt8(1), 2251799813685248.000000000 <= toInt8(1), 2251799813685248.000000000 > toInt8(1), 2251799813685248.000000000 >= toInt8(1) , toUInt16(1) = 2251799813685248.000000000, toUInt16(1) != 2251799813685248.000000000, toUInt16(1) < 2251799813685248.000000000, toUInt16(1) <= 2251799813685248.000000000, toUInt16(1) > 2251799813685248.000000000, toUInt16(1) >= 2251799813685248.000000000, 2251799813685248.000000000 = toUInt16(1), 2251799813685248.000000000 != toUInt16(1), 2251799813685248.000000000 < toUInt16(1), 2251799813685248.000000000 <= toUInt16(1), 2251799813685248.000000000 > toUInt16(1), 2251799813685248.000000000 >= toUInt16(1) , toInt16(1) = 2251799813685248.000000000, toInt16(1) != 2251799813685248.000000000, toInt16(1) < 2251799813685248.000000000, toInt16(1) <= 2251799813685248.000000000, toInt16(1) > 2251799813685248.000000000, toInt16(1) >= 2251799813685248.000000000, 2251799813685248.000000000 = toInt16(1), 2251799813685248.000000000 != toInt16(1), 2251799813685248.000000000 < toInt16(1), 2251799813685248.000000000 <= toInt16(1), 2251799813685248.000000000 > toInt16(1), 2251799813685248.000000000 >= toInt16(1) , toUInt32(1) = 2251799813685248.000000000, toUInt32(1) != 2251799813685248.000000000, toUInt32(1) < 2251799813685248.000000000, toUInt32(1) <= 2251799813685248.000000000, toUInt32(1) > 2251799813685248.000000000, toUInt32(1) >= 2251799813685248.000000000, 2251799813685248.000000000 = toUInt32(1), 2251799813685248.000000000 != toUInt32(1), 2251799813685248.000000000 < toUInt32(1), 2251799813685248.000000000 <= toUInt32(1), 2251799813685248.000000000 > toUInt32(1), 2251799813685248.000000000 >= toUInt32(1) , toInt32(1) = 2251799813685248.000000000, toInt32(1) != 2251799813685248.000000000, toInt32(1) < 2251799813685248.000000000, toInt32(1) <= 2251799813685248.000000000, toInt32(1) > 2251799813685248.000000000, toInt32(1) >= 2251799813685248.000000000, 2251799813685248.000000000 = toInt32(1), 2251799813685248.000000000 != toInt32(1), 2251799813685248.000000000 < toInt32(1), 2251799813685248.000000000 <= toInt32(1), 2251799813685248.000000000 > toInt32(1), 2251799813685248.000000000 >= toInt32(1) , toUInt64(1) = 2251799813685248.000000000, toUInt64(1) != 2251799813685248.000000000, toUInt64(1) < 2251799813685248.000000000, toUInt64(1) <= 2251799813685248.000000000, toUInt64(1) > 2251799813685248.000000000, toUInt64(1) >= 2251799813685248.000000000, 2251799813685248.000000000 = toUInt64(1), 2251799813685248.000000000 != toUInt64(1), 2251799813685248.000000000 < toUInt64(1), 2251799813685248.000000000 <= toUInt64(1), 2251799813685248.000000000 > toUInt64(1), 2251799813685248.000000000 >= toUInt64(1) , toInt64(1) = 2251799813685248.000000000, toInt64(1) != 2251799813685248.000000000, toInt64(1) < 2251799813685248.000000000, toInt64(1) <= 2251799813685248.000000000, toInt64(1) > 2251799813685248.000000000, toInt64(1) >= 2251799813685248.000000000, 2251799813685248.000000000 = toInt64(1), 2251799813685248.000000000 != toInt64(1), 2251799813685248.000000000 < toInt64(1), 2251799813685248.000000000 <= toInt64(1), 2251799813685248.000000000 > toInt64(1), 2251799813685248.000000000 >= toInt64(1) ; +SELECT '1', '4503599627370496.000000000', 1 = 4503599627370496.000000000, 1 != 4503599627370496.000000000, 1 < 4503599627370496.000000000, 1 <= 4503599627370496.000000000, 1 > 4503599627370496.000000000, 1 >= 4503599627370496.000000000, 4503599627370496.000000000 = 1, 4503599627370496.000000000 != 1, 4503599627370496.000000000 < 1, 4503599627370496.000000000 <= 1, 4503599627370496.000000000 > 1, 4503599627370496.000000000 >= 1 , toUInt8(1) = 4503599627370496.000000000, toUInt8(1) != 4503599627370496.000000000, toUInt8(1) < 4503599627370496.000000000, toUInt8(1) <= 4503599627370496.000000000, toUInt8(1) > 4503599627370496.000000000, toUInt8(1) >= 4503599627370496.000000000, 4503599627370496.000000000 = toUInt8(1), 4503599627370496.000000000 != toUInt8(1), 4503599627370496.000000000 < toUInt8(1), 4503599627370496.000000000 <= toUInt8(1), 4503599627370496.000000000 > toUInt8(1), 4503599627370496.000000000 >= toUInt8(1) , toInt8(1) = 4503599627370496.000000000, toInt8(1) != 4503599627370496.000000000, toInt8(1) < 4503599627370496.000000000, toInt8(1) <= 4503599627370496.000000000, toInt8(1) > 4503599627370496.000000000, toInt8(1) >= 4503599627370496.000000000, 4503599627370496.000000000 = toInt8(1), 4503599627370496.000000000 != toInt8(1), 4503599627370496.000000000 < toInt8(1), 4503599627370496.000000000 <= toInt8(1), 4503599627370496.000000000 > toInt8(1), 4503599627370496.000000000 >= toInt8(1) , toUInt16(1) = 4503599627370496.000000000, toUInt16(1) != 4503599627370496.000000000, toUInt16(1) < 4503599627370496.000000000, toUInt16(1) <= 4503599627370496.000000000, toUInt16(1) > 4503599627370496.000000000, toUInt16(1) >= 4503599627370496.000000000, 4503599627370496.000000000 = toUInt16(1), 4503599627370496.000000000 != toUInt16(1), 4503599627370496.000000000 < toUInt16(1), 4503599627370496.000000000 <= toUInt16(1), 4503599627370496.000000000 > toUInt16(1), 4503599627370496.000000000 >= toUInt16(1) , toInt16(1) = 4503599627370496.000000000, toInt16(1) != 4503599627370496.000000000, toInt16(1) < 4503599627370496.000000000, toInt16(1) <= 4503599627370496.000000000, toInt16(1) > 4503599627370496.000000000, toInt16(1) >= 4503599627370496.000000000, 4503599627370496.000000000 = toInt16(1), 4503599627370496.000000000 != toInt16(1), 4503599627370496.000000000 < toInt16(1), 4503599627370496.000000000 <= toInt16(1), 4503599627370496.000000000 > toInt16(1), 4503599627370496.000000000 >= toInt16(1) , toUInt32(1) = 4503599627370496.000000000, toUInt32(1) != 4503599627370496.000000000, toUInt32(1) < 4503599627370496.000000000, toUInt32(1) <= 4503599627370496.000000000, toUInt32(1) > 4503599627370496.000000000, toUInt32(1) >= 4503599627370496.000000000, 4503599627370496.000000000 = toUInt32(1), 4503599627370496.000000000 != toUInt32(1), 4503599627370496.000000000 < toUInt32(1), 4503599627370496.000000000 <= toUInt32(1), 4503599627370496.000000000 > toUInt32(1), 4503599627370496.000000000 >= toUInt32(1) , toInt32(1) = 4503599627370496.000000000, toInt32(1) != 4503599627370496.000000000, toInt32(1) < 4503599627370496.000000000, toInt32(1) <= 4503599627370496.000000000, toInt32(1) > 4503599627370496.000000000, toInt32(1) >= 4503599627370496.000000000, 4503599627370496.000000000 = toInt32(1), 4503599627370496.000000000 != toInt32(1), 4503599627370496.000000000 < toInt32(1), 4503599627370496.000000000 <= toInt32(1), 4503599627370496.000000000 > toInt32(1), 4503599627370496.000000000 >= toInt32(1) , toUInt64(1) = 4503599627370496.000000000, toUInt64(1) != 4503599627370496.000000000, toUInt64(1) < 4503599627370496.000000000, toUInt64(1) <= 4503599627370496.000000000, toUInt64(1) > 4503599627370496.000000000, toUInt64(1) >= 4503599627370496.000000000, 4503599627370496.000000000 = toUInt64(1), 4503599627370496.000000000 != toUInt64(1), 4503599627370496.000000000 < toUInt64(1), 4503599627370496.000000000 <= toUInt64(1), 4503599627370496.000000000 > toUInt64(1), 4503599627370496.000000000 >= toUInt64(1) , toInt64(1) = 4503599627370496.000000000, toInt64(1) != 4503599627370496.000000000, toInt64(1) < 4503599627370496.000000000, toInt64(1) <= 4503599627370496.000000000, toInt64(1) > 4503599627370496.000000000, toInt64(1) >= 4503599627370496.000000000, 4503599627370496.000000000 = toInt64(1), 4503599627370496.000000000 != toInt64(1), 4503599627370496.000000000 < toInt64(1), 4503599627370496.000000000 <= toInt64(1), 4503599627370496.000000000 > toInt64(1), 4503599627370496.000000000 >= toInt64(1) ; +SELECT '1', '9007199254740991.000000000', 1 = 9007199254740991.000000000, 1 != 9007199254740991.000000000, 1 < 9007199254740991.000000000, 1 <= 9007199254740991.000000000, 1 > 9007199254740991.000000000, 1 >= 9007199254740991.000000000, 9007199254740991.000000000 = 1, 9007199254740991.000000000 != 1, 9007199254740991.000000000 < 1, 9007199254740991.000000000 <= 1, 9007199254740991.000000000 > 1, 9007199254740991.000000000 >= 1 , toUInt8(1) = 9007199254740991.000000000, toUInt8(1) != 9007199254740991.000000000, toUInt8(1) < 9007199254740991.000000000, toUInt8(1) <= 9007199254740991.000000000, toUInt8(1) > 9007199254740991.000000000, toUInt8(1) >= 9007199254740991.000000000, 9007199254740991.000000000 = toUInt8(1), 9007199254740991.000000000 != toUInt8(1), 9007199254740991.000000000 < toUInt8(1), 9007199254740991.000000000 <= toUInt8(1), 9007199254740991.000000000 > toUInt8(1), 9007199254740991.000000000 >= toUInt8(1) , toInt8(1) = 9007199254740991.000000000, toInt8(1) != 9007199254740991.000000000, toInt8(1) < 9007199254740991.000000000, toInt8(1) <= 9007199254740991.000000000, toInt8(1) > 9007199254740991.000000000, toInt8(1) >= 9007199254740991.000000000, 9007199254740991.000000000 = toInt8(1), 9007199254740991.000000000 != toInt8(1), 9007199254740991.000000000 < toInt8(1), 9007199254740991.000000000 <= toInt8(1), 9007199254740991.000000000 > toInt8(1), 9007199254740991.000000000 >= toInt8(1) , toUInt16(1) = 9007199254740991.000000000, toUInt16(1) != 9007199254740991.000000000, toUInt16(1) < 9007199254740991.000000000, toUInt16(1) <= 9007199254740991.000000000, toUInt16(1) > 9007199254740991.000000000, toUInt16(1) >= 9007199254740991.000000000, 9007199254740991.000000000 = toUInt16(1), 9007199254740991.000000000 != toUInt16(1), 9007199254740991.000000000 < toUInt16(1), 9007199254740991.000000000 <= toUInt16(1), 9007199254740991.000000000 > toUInt16(1), 9007199254740991.000000000 >= toUInt16(1) , toInt16(1) = 9007199254740991.000000000, toInt16(1) != 9007199254740991.000000000, toInt16(1) < 9007199254740991.000000000, toInt16(1) <= 9007199254740991.000000000, toInt16(1) > 9007199254740991.000000000, toInt16(1) >= 9007199254740991.000000000, 9007199254740991.000000000 = toInt16(1), 9007199254740991.000000000 != toInt16(1), 9007199254740991.000000000 < toInt16(1), 9007199254740991.000000000 <= toInt16(1), 9007199254740991.000000000 > toInt16(1), 9007199254740991.000000000 >= toInt16(1) , toUInt32(1) = 9007199254740991.000000000, toUInt32(1) != 9007199254740991.000000000, toUInt32(1) < 9007199254740991.000000000, toUInt32(1) <= 9007199254740991.000000000, toUInt32(1) > 9007199254740991.000000000, toUInt32(1) >= 9007199254740991.000000000, 9007199254740991.000000000 = toUInt32(1), 9007199254740991.000000000 != toUInt32(1), 9007199254740991.000000000 < toUInt32(1), 9007199254740991.000000000 <= toUInt32(1), 9007199254740991.000000000 > toUInt32(1), 9007199254740991.000000000 >= toUInt32(1) , toInt32(1) = 9007199254740991.000000000, toInt32(1) != 9007199254740991.000000000, toInt32(1) < 9007199254740991.000000000, toInt32(1) <= 9007199254740991.000000000, toInt32(1) > 9007199254740991.000000000, toInt32(1) >= 9007199254740991.000000000, 9007199254740991.000000000 = toInt32(1), 9007199254740991.000000000 != toInt32(1), 9007199254740991.000000000 < toInt32(1), 9007199254740991.000000000 <= toInt32(1), 9007199254740991.000000000 > toInt32(1), 9007199254740991.000000000 >= toInt32(1) , toUInt64(1) = 9007199254740991.000000000, toUInt64(1) != 9007199254740991.000000000, toUInt64(1) < 9007199254740991.000000000, toUInt64(1) <= 9007199254740991.000000000, toUInt64(1) > 9007199254740991.000000000, toUInt64(1) >= 9007199254740991.000000000, 9007199254740991.000000000 = toUInt64(1), 9007199254740991.000000000 != toUInt64(1), 9007199254740991.000000000 < toUInt64(1), 9007199254740991.000000000 <= toUInt64(1), 9007199254740991.000000000 > toUInt64(1), 9007199254740991.000000000 >= toUInt64(1) , toInt64(1) = 9007199254740991.000000000, toInt64(1) != 9007199254740991.000000000, toInt64(1) < 9007199254740991.000000000, toInt64(1) <= 9007199254740991.000000000, toInt64(1) > 9007199254740991.000000000, toInt64(1) >= 9007199254740991.000000000, 9007199254740991.000000000 = toInt64(1), 9007199254740991.000000000 != toInt64(1), 9007199254740991.000000000 < toInt64(1), 9007199254740991.000000000 <= toInt64(1), 9007199254740991.000000000 > toInt64(1), 9007199254740991.000000000 >= toInt64(1) ; +SELECT '1', '9007199254740992.000000000', 1 = 9007199254740992.000000000, 1 != 9007199254740992.000000000, 1 < 9007199254740992.000000000, 1 <= 9007199254740992.000000000, 1 > 9007199254740992.000000000, 1 >= 9007199254740992.000000000, 9007199254740992.000000000 = 1, 9007199254740992.000000000 != 1, 9007199254740992.000000000 < 1, 9007199254740992.000000000 <= 1, 9007199254740992.000000000 > 1, 9007199254740992.000000000 >= 1 , toUInt8(1) = 9007199254740992.000000000, toUInt8(1) != 9007199254740992.000000000, toUInt8(1) < 9007199254740992.000000000, toUInt8(1) <= 9007199254740992.000000000, toUInt8(1) > 9007199254740992.000000000, toUInt8(1) >= 9007199254740992.000000000, 9007199254740992.000000000 = toUInt8(1), 9007199254740992.000000000 != toUInt8(1), 9007199254740992.000000000 < toUInt8(1), 9007199254740992.000000000 <= toUInt8(1), 9007199254740992.000000000 > toUInt8(1), 9007199254740992.000000000 >= toUInt8(1) , toInt8(1) = 9007199254740992.000000000, toInt8(1) != 9007199254740992.000000000, toInt8(1) < 9007199254740992.000000000, toInt8(1) <= 9007199254740992.000000000, toInt8(1) > 9007199254740992.000000000, toInt8(1) >= 9007199254740992.000000000, 9007199254740992.000000000 = toInt8(1), 9007199254740992.000000000 != toInt8(1), 9007199254740992.000000000 < toInt8(1), 9007199254740992.000000000 <= toInt8(1), 9007199254740992.000000000 > toInt8(1), 9007199254740992.000000000 >= toInt8(1) , toUInt16(1) = 9007199254740992.000000000, toUInt16(1) != 9007199254740992.000000000, toUInt16(1) < 9007199254740992.000000000, toUInt16(1) <= 9007199254740992.000000000, toUInt16(1) > 9007199254740992.000000000, toUInt16(1) >= 9007199254740992.000000000, 9007199254740992.000000000 = toUInt16(1), 9007199254740992.000000000 != toUInt16(1), 9007199254740992.000000000 < toUInt16(1), 9007199254740992.000000000 <= toUInt16(1), 9007199254740992.000000000 > toUInt16(1), 9007199254740992.000000000 >= toUInt16(1) , toInt16(1) = 9007199254740992.000000000, toInt16(1) != 9007199254740992.000000000, toInt16(1) < 9007199254740992.000000000, toInt16(1) <= 9007199254740992.000000000, toInt16(1) > 9007199254740992.000000000, toInt16(1) >= 9007199254740992.000000000, 9007199254740992.000000000 = toInt16(1), 9007199254740992.000000000 != toInt16(1), 9007199254740992.000000000 < toInt16(1), 9007199254740992.000000000 <= toInt16(1), 9007199254740992.000000000 > toInt16(1), 9007199254740992.000000000 >= toInt16(1) , toUInt32(1) = 9007199254740992.000000000, toUInt32(1) != 9007199254740992.000000000, toUInt32(1) < 9007199254740992.000000000, toUInt32(1) <= 9007199254740992.000000000, toUInt32(1) > 9007199254740992.000000000, toUInt32(1) >= 9007199254740992.000000000, 9007199254740992.000000000 = toUInt32(1), 9007199254740992.000000000 != toUInt32(1), 9007199254740992.000000000 < toUInt32(1), 9007199254740992.000000000 <= toUInt32(1), 9007199254740992.000000000 > toUInt32(1), 9007199254740992.000000000 >= toUInt32(1) , toInt32(1) = 9007199254740992.000000000, toInt32(1) != 9007199254740992.000000000, toInt32(1) < 9007199254740992.000000000, toInt32(1) <= 9007199254740992.000000000, toInt32(1) > 9007199254740992.000000000, toInt32(1) >= 9007199254740992.000000000, 9007199254740992.000000000 = toInt32(1), 9007199254740992.000000000 != toInt32(1), 9007199254740992.000000000 < toInt32(1), 9007199254740992.000000000 <= toInt32(1), 9007199254740992.000000000 > toInt32(1), 9007199254740992.000000000 >= toInt32(1) , toUInt64(1) = 9007199254740992.000000000, toUInt64(1) != 9007199254740992.000000000, toUInt64(1) < 9007199254740992.000000000, toUInt64(1) <= 9007199254740992.000000000, toUInt64(1) > 9007199254740992.000000000, toUInt64(1) >= 9007199254740992.000000000, 9007199254740992.000000000 = toUInt64(1), 9007199254740992.000000000 != toUInt64(1), 9007199254740992.000000000 < toUInt64(1), 9007199254740992.000000000 <= toUInt64(1), 9007199254740992.000000000 > toUInt64(1), 9007199254740992.000000000 >= toUInt64(1) , toInt64(1) = 9007199254740992.000000000, toInt64(1) != 9007199254740992.000000000, toInt64(1) < 9007199254740992.000000000, toInt64(1) <= 9007199254740992.000000000, toInt64(1) > 9007199254740992.000000000, toInt64(1) >= 9007199254740992.000000000, 9007199254740992.000000000 = toInt64(1), 9007199254740992.000000000 != toInt64(1), 9007199254740992.000000000 < toInt64(1), 9007199254740992.000000000 <= toInt64(1), 9007199254740992.000000000 > toInt64(1), 9007199254740992.000000000 >= toInt64(1) ; +SELECT '1', '9007199254740992.000000000', 1 = 9007199254740992.000000000, 1 != 9007199254740992.000000000, 1 < 9007199254740992.000000000, 1 <= 9007199254740992.000000000, 1 > 9007199254740992.000000000, 1 >= 9007199254740992.000000000, 9007199254740992.000000000 = 1, 9007199254740992.000000000 != 1, 9007199254740992.000000000 < 1, 9007199254740992.000000000 <= 1, 9007199254740992.000000000 > 1, 9007199254740992.000000000 >= 1 , toUInt8(1) = 9007199254740992.000000000, toUInt8(1) != 9007199254740992.000000000, toUInt8(1) < 9007199254740992.000000000, toUInt8(1) <= 9007199254740992.000000000, toUInt8(1) > 9007199254740992.000000000, toUInt8(1) >= 9007199254740992.000000000, 9007199254740992.000000000 = toUInt8(1), 9007199254740992.000000000 != toUInt8(1), 9007199254740992.000000000 < toUInt8(1), 9007199254740992.000000000 <= toUInt8(1), 9007199254740992.000000000 > toUInt8(1), 9007199254740992.000000000 >= toUInt8(1) , toInt8(1) = 9007199254740992.000000000, toInt8(1) != 9007199254740992.000000000, toInt8(1) < 9007199254740992.000000000, toInt8(1) <= 9007199254740992.000000000, toInt8(1) > 9007199254740992.000000000, toInt8(1) >= 9007199254740992.000000000, 9007199254740992.000000000 = toInt8(1), 9007199254740992.000000000 != toInt8(1), 9007199254740992.000000000 < toInt8(1), 9007199254740992.000000000 <= toInt8(1), 9007199254740992.000000000 > toInt8(1), 9007199254740992.000000000 >= toInt8(1) , toUInt16(1) = 9007199254740992.000000000, toUInt16(1) != 9007199254740992.000000000, toUInt16(1) < 9007199254740992.000000000, toUInt16(1) <= 9007199254740992.000000000, toUInt16(1) > 9007199254740992.000000000, toUInt16(1) >= 9007199254740992.000000000, 9007199254740992.000000000 = toUInt16(1), 9007199254740992.000000000 != toUInt16(1), 9007199254740992.000000000 < toUInt16(1), 9007199254740992.000000000 <= toUInt16(1), 9007199254740992.000000000 > toUInt16(1), 9007199254740992.000000000 >= toUInt16(1) , toInt16(1) = 9007199254740992.000000000, toInt16(1) != 9007199254740992.000000000, toInt16(1) < 9007199254740992.000000000, toInt16(1) <= 9007199254740992.000000000, toInt16(1) > 9007199254740992.000000000, toInt16(1) >= 9007199254740992.000000000, 9007199254740992.000000000 = toInt16(1), 9007199254740992.000000000 != toInt16(1), 9007199254740992.000000000 < toInt16(1), 9007199254740992.000000000 <= toInt16(1), 9007199254740992.000000000 > toInt16(1), 9007199254740992.000000000 >= toInt16(1) , toUInt32(1) = 9007199254740992.000000000, toUInt32(1) != 9007199254740992.000000000, toUInt32(1) < 9007199254740992.000000000, toUInt32(1) <= 9007199254740992.000000000, toUInt32(1) > 9007199254740992.000000000, toUInt32(1) >= 9007199254740992.000000000, 9007199254740992.000000000 = toUInt32(1), 9007199254740992.000000000 != toUInt32(1), 9007199254740992.000000000 < toUInt32(1), 9007199254740992.000000000 <= toUInt32(1), 9007199254740992.000000000 > toUInt32(1), 9007199254740992.000000000 >= toUInt32(1) , toInt32(1) = 9007199254740992.000000000, toInt32(1) != 9007199254740992.000000000, toInt32(1) < 9007199254740992.000000000, toInt32(1) <= 9007199254740992.000000000, toInt32(1) > 9007199254740992.000000000, toInt32(1) >= 9007199254740992.000000000, 9007199254740992.000000000 = toInt32(1), 9007199254740992.000000000 != toInt32(1), 9007199254740992.000000000 < toInt32(1), 9007199254740992.000000000 <= toInt32(1), 9007199254740992.000000000 > toInt32(1), 9007199254740992.000000000 >= toInt32(1) , toUInt64(1) = 9007199254740992.000000000, toUInt64(1) != 9007199254740992.000000000, toUInt64(1) < 9007199254740992.000000000, toUInt64(1) <= 9007199254740992.000000000, toUInt64(1) > 9007199254740992.000000000, toUInt64(1) >= 9007199254740992.000000000, 9007199254740992.000000000 = toUInt64(1), 9007199254740992.000000000 != toUInt64(1), 9007199254740992.000000000 < toUInt64(1), 9007199254740992.000000000 <= toUInt64(1), 9007199254740992.000000000 > toUInt64(1), 9007199254740992.000000000 >= toUInt64(1) , toInt64(1) = 9007199254740992.000000000, toInt64(1) != 9007199254740992.000000000, toInt64(1) < 9007199254740992.000000000, toInt64(1) <= 9007199254740992.000000000, toInt64(1) > 9007199254740992.000000000, toInt64(1) >= 9007199254740992.000000000, 9007199254740992.000000000 = toInt64(1), 9007199254740992.000000000 != toInt64(1), 9007199254740992.000000000 < toInt64(1), 9007199254740992.000000000 <= toInt64(1), 9007199254740992.000000000 > toInt64(1), 9007199254740992.000000000 >= toInt64(1) ; +SELECT '1', '9007199254740994.000000000', 1 = 9007199254740994.000000000, 1 != 9007199254740994.000000000, 1 < 9007199254740994.000000000, 1 <= 9007199254740994.000000000, 1 > 9007199254740994.000000000, 1 >= 9007199254740994.000000000, 9007199254740994.000000000 = 1, 9007199254740994.000000000 != 1, 9007199254740994.000000000 < 1, 9007199254740994.000000000 <= 1, 9007199254740994.000000000 > 1, 9007199254740994.000000000 >= 1 , toUInt8(1) = 9007199254740994.000000000, toUInt8(1) != 9007199254740994.000000000, toUInt8(1) < 9007199254740994.000000000, toUInt8(1) <= 9007199254740994.000000000, toUInt8(1) > 9007199254740994.000000000, toUInt8(1) >= 9007199254740994.000000000, 9007199254740994.000000000 = toUInt8(1), 9007199254740994.000000000 != toUInt8(1), 9007199254740994.000000000 < toUInt8(1), 9007199254740994.000000000 <= toUInt8(1), 9007199254740994.000000000 > toUInt8(1), 9007199254740994.000000000 >= toUInt8(1) , toInt8(1) = 9007199254740994.000000000, toInt8(1) != 9007199254740994.000000000, toInt8(1) < 9007199254740994.000000000, toInt8(1) <= 9007199254740994.000000000, toInt8(1) > 9007199254740994.000000000, toInt8(1) >= 9007199254740994.000000000, 9007199254740994.000000000 = toInt8(1), 9007199254740994.000000000 != toInt8(1), 9007199254740994.000000000 < toInt8(1), 9007199254740994.000000000 <= toInt8(1), 9007199254740994.000000000 > toInt8(1), 9007199254740994.000000000 >= toInt8(1) , toUInt16(1) = 9007199254740994.000000000, toUInt16(1) != 9007199254740994.000000000, toUInt16(1) < 9007199254740994.000000000, toUInt16(1) <= 9007199254740994.000000000, toUInt16(1) > 9007199254740994.000000000, toUInt16(1) >= 9007199254740994.000000000, 9007199254740994.000000000 = toUInt16(1), 9007199254740994.000000000 != toUInt16(1), 9007199254740994.000000000 < toUInt16(1), 9007199254740994.000000000 <= toUInt16(1), 9007199254740994.000000000 > toUInt16(1), 9007199254740994.000000000 >= toUInt16(1) , toInt16(1) = 9007199254740994.000000000, toInt16(1) != 9007199254740994.000000000, toInt16(1) < 9007199254740994.000000000, toInt16(1) <= 9007199254740994.000000000, toInt16(1) > 9007199254740994.000000000, toInt16(1) >= 9007199254740994.000000000, 9007199254740994.000000000 = toInt16(1), 9007199254740994.000000000 != toInt16(1), 9007199254740994.000000000 < toInt16(1), 9007199254740994.000000000 <= toInt16(1), 9007199254740994.000000000 > toInt16(1), 9007199254740994.000000000 >= toInt16(1) , toUInt32(1) = 9007199254740994.000000000, toUInt32(1) != 9007199254740994.000000000, toUInt32(1) < 9007199254740994.000000000, toUInt32(1) <= 9007199254740994.000000000, toUInt32(1) > 9007199254740994.000000000, toUInt32(1) >= 9007199254740994.000000000, 9007199254740994.000000000 = toUInt32(1), 9007199254740994.000000000 != toUInt32(1), 9007199254740994.000000000 < toUInt32(1), 9007199254740994.000000000 <= toUInt32(1), 9007199254740994.000000000 > toUInt32(1), 9007199254740994.000000000 >= toUInt32(1) , toInt32(1) = 9007199254740994.000000000, toInt32(1) != 9007199254740994.000000000, toInt32(1) < 9007199254740994.000000000, toInt32(1) <= 9007199254740994.000000000, toInt32(1) > 9007199254740994.000000000, toInt32(1) >= 9007199254740994.000000000, 9007199254740994.000000000 = toInt32(1), 9007199254740994.000000000 != toInt32(1), 9007199254740994.000000000 < toInt32(1), 9007199254740994.000000000 <= toInt32(1), 9007199254740994.000000000 > toInt32(1), 9007199254740994.000000000 >= toInt32(1) , toUInt64(1) = 9007199254740994.000000000, toUInt64(1) != 9007199254740994.000000000, toUInt64(1) < 9007199254740994.000000000, toUInt64(1) <= 9007199254740994.000000000, toUInt64(1) > 9007199254740994.000000000, toUInt64(1) >= 9007199254740994.000000000, 9007199254740994.000000000 = toUInt64(1), 9007199254740994.000000000 != toUInt64(1), 9007199254740994.000000000 < toUInt64(1), 9007199254740994.000000000 <= toUInt64(1), 9007199254740994.000000000 > toUInt64(1), 9007199254740994.000000000 >= toUInt64(1) , toInt64(1) = 9007199254740994.000000000, toInt64(1) != 9007199254740994.000000000, toInt64(1) < 9007199254740994.000000000, toInt64(1) <= 9007199254740994.000000000, toInt64(1) > 9007199254740994.000000000, toInt64(1) >= 9007199254740994.000000000, 9007199254740994.000000000 = toInt64(1), 9007199254740994.000000000 != toInt64(1), 9007199254740994.000000000 < toInt64(1), 9007199254740994.000000000 <= toInt64(1), 9007199254740994.000000000 > toInt64(1), 9007199254740994.000000000 >= toInt64(1) ; +SELECT '1', '-9007199254740991.000000000', 1 = -9007199254740991.000000000, 1 != -9007199254740991.000000000, 1 < -9007199254740991.000000000, 1 <= -9007199254740991.000000000, 1 > -9007199254740991.000000000, 1 >= -9007199254740991.000000000, -9007199254740991.000000000 = 1, -9007199254740991.000000000 != 1, -9007199254740991.000000000 < 1, -9007199254740991.000000000 <= 1, -9007199254740991.000000000 > 1, -9007199254740991.000000000 >= 1 , toUInt8(1) = -9007199254740991.000000000, toUInt8(1) != -9007199254740991.000000000, toUInt8(1) < -9007199254740991.000000000, toUInt8(1) <= -9007199254740991.000000000, toUInt8(1) > -9007199254740991.000000000, toUInt8(1) >= -9007199254740991.000000000, -9007199254740991.000000000 = toUInt8(1), -9007199254740991.000000000 != toUInt8(1), -9007199254740991.000000000 < toUInt8(1), -9007199254740991.000000000 <= toUInt8(1), -9007199254740991.000000000 > toUInt8(1), -9007199254740991.000000000 >= toUInt8(1) , toInt8(1) = -9007199254740991.000000000, toInt8(1) != -9007199254740991.000000000, toInt8(1) < -9007199254740991.000000000, toInt8(1) <= -9007199254740991.000000000, toInt8(1) > -9007199254740991.000000000, toInt8(1) >= -9007199254740991.000000000, -9007199254740991.000000000 = toInt8(1), -9007199254740991.000000000 != toInt8(1), -9007199254740991.000000000 < toInt8(1), -9007199254740991.000000000 <= toInt8(1), -9007199254740991.000000000 > toInt8(1), -9007199254740991.000000000 >= toInt8(1) , toUInt16(1) = -9007199254740991.000000000, toUInt16(1) != -9007199254740991.000000000, toUInt16(1) < -9007199254740991.000000000, toUInt16(1) <= -9007199254740991.000000000, toUInt16(1) > -9007199254740991.000000000, toUInt16(1) >= -9007199254740991.000000000, -9007199254740991.000000000 = toUInt16(1), -9007199254740991.000000000 != toUInt16(1), -9007199254740991.000000000 < toUInt16(1), -9007199254740991.000000000 <= toUInt16(1), -9007199254740991.000000000 > toUInt16(1), -9007199254740991.000000000 >= toUInt16(1) , toInt16(1) = -9007199254740991.000000000, toInt16(1) != -9007199254740991.000000000, toInt16(1) < -9007199254740991.000000000, toInt16(1) <= -9007199254740991.000000000, toInt16(1) > -9007199254740991.000000000, toInt16(1) >= -9007199254740991.000000000, -9007199254740991.000000000 = toInt16(1), -9007199254740991.000000000 != toInt16(1), -9007199254740991.000000000 < toInt16(1), -9007199254740991.000000000 <= toInt16(1), -9007199254740991.000000000 > toInt16(1), -9007199254740991.000000000 >= toInt16(1) , toUInt32(1) = -9007199254740991.000000000, toUInt32(1) != -9007199254740991.000000000, toUInt32(1) < -9007199254740991.000000000, toUInt32(1) <= -9007199254740991.000000000, toUInt32(1) > -9007199254740991.000000000, toUInt32(1) >= -9007199254740991.000000000, -9007199254740991.000000000 = toUInt32(1), -9007199254740991.000000000 != toUInt32(1), -9007199254740991.000000000 < toUInt32(1), -9007199254740991.000000000 <= toUInt32(1), -9007199254740991.000000000 > toUInt32(1), -9007199254740991.000000000 >= toUInt32(1) , toInt32(1) = -9007199254740991.000000000, toInt32(1) != -9007199254740991.000000000, toInt32(1) < -9007199254740991.000000000, toInt32(1) <= -9007199254740991.000000000, toInt32(1) > -9007199254740991.000000000, toInt32(1) >= -9007199254740991.000000000, -9007199254740991.000000000 = toInt32(1), -9007199254740991.000000000 != toInt32(1), -9007199254740991.000000000 < toInt32(1), -9007199254740991.000000000 <= toInt32(1), -9007199254740991.000000000 > toInt32(1), -9007199254740991.000000000 >= toInt32(1) , toUInt64(1) = -9007199254740991.000000000, toUInt64(1) != -9007199254740991.000000000, toUInt64(1) < -9007199254740991.000000000, toUInt64(1) <= -9007199254740991.000000000, toUInt64(1) > -9007199254740991.000000000, toUInt64(1) >= -9007199254740991.000000000, -9007199254740991.000000000 = toUInt64(1), -9007199254740991.000000000 != toUInt64(1), -9007199254740991.000000000 < toUInt64(1), -9007199254740991.000000000 <= toUInt64(1), -9007199254740991.000000000 > toUInt64(1), -9007199254740991.000000000 >= toUInt64(1) , toInt64(1) = -9007199254740991.000000000, toInt64(1) != -9007199254740991.000000000, toInt64(1) < -9007199254740991.000000000, toInt64(1) <= -9007199254740991.000000000, toInt64(1) > -9007199254740991.000000000, toInt64(1) >= -9007199254740991.000000000, -9007199254740991.000000000 = toInt64(1), -9007199254740991.000000000 != toInt64(1), -9007199254740991.000000000 < toInt64(1), -9007199254740991.000000000 <= toInt64(1), -9007199254740991.000000000 > toInt64(1), -9007199254740991.000000000 >= toInt64(1) ; +SELECT '1', '-9007199254740992.000000000', 1 = -9007199254740992.000000000, 1 != -9007199254740992.000000000, 1 < -9007199254740992.000000000, 1 <= -9007199254740992.000000000, 1 > -9007199254740992.000000000, 1 >= -9007199254740992.000000000, -9007199254740992.000000000 = 1, -9007199254740992.000000000 != 1, -9007199254740992.000000000 < 1, -9007199254740992.000000000 <= 1, -9007199254740992.000000000 > 1, -9007199254740992.000000000 >= 1 , toUInt8(1) = -9007199254740992.000000000, toUInt8(1) != -9007199254740992.000000000, toUInt8(1) < -9007199254740992.000000000, toUInt8(1) <= -9007199254740992.000000000, toUInt8(1) > -9007199254740992.000000000, toUInt8(1) >= -9007199254740992.000000000, -9007199254740992.000000000 = toUInt8(1), -9007199254740992.000000000 != toUInt8(1), -9007199254740992.000000000 < toUInt8(1), -9007199254740992.000000000 <= toUInt8(1), -9007199254740992.000000000 > toUInt8(1), -9007199254740992.000000000 >= toUInt8(1) , toInt8(1) = -9007199254740992.000000000, toInt8(1) != -9007199254740992.000000000, toInt8(1) < -9007199254740992.000000000, toInt8(1) <= -9007199254740992.000000000, toInt8(1) > -9007199254740992.000000000, toInt8(1) >= -9007199254740992.000000000, -9007199254740992.000000000 = toInt8(1), -9007199254740992.000000000 != toInt8(1), -9007199254740992.000000000 < toInt8(1), -9007199254740992.000000000 <= toInt8(1), -9007199254740992.000000000 > toInt8(1), -9007199254740992.000000000 >= toInt8(1) , toUInt16(1) = -9007199254740992.000000000, toUInt16(1) != -9007199254740992.000000000, toUInt16(1) < -9007199254740992.000000000, toUInt16(1) <= -9007199254740992.000000000, toUInt16(1) > -9007199254740992.000000000, toUInt16(1) >= -9007199254740992.000000000, -9007199254740992.000000000 = toUInt16(1), -9007199254740992.000000000 != toUInt16(1), -9007199254740992.000000000 < toUInt16(1), -9007199254740992.000000000 <= toUInt16(1), -9007199254740992.000000000 > toUInt16(1), -9007199254740992.000000000 >= toUInt16(1) , toInt16(1) = -9007199254740992.000000000, toInt16(1) != -9007199254740992.000000000, toInt16(1) < -9007199254740992.000000000, toInt16(1) <= -9007199254740992.000000000, toInt16(1) > -9007199254740992.000000000, toInt16(1) >= -9007199254740992.000000000, -9007199254740992.000000000 = toInt16(1), -9007199254740992.000000000 != toInt16(1), -9007199254740992.000000000 < toInt16(1), -9007199254740992.000000000 <= toInt16(1), -9007199254740992.000000000 > toInt16(1), -9007199254740992.000000000 >= toInt16(1) , toUInt32(1) = -9007199254740992.000000000, toUInt32(1) != -9007199254740992.000000000, toUInt32(1) < -9007199254740992.000000000, toUInt32(1) <= -9007199254740992.000000000, toUInt32(1) > -9007199254740992.000000000, toUInt32(1) >= -9007199254740992.000000000, -9007199254740992.000000000 = toUInt32(1), -9007199254740992.000000000 != toUInt32(1), -9007199254740992.000000000 < toUInt32(1), -9007199254740992.000000000 <= toUInt32(1), -9007199254740992.000000000 > toUInt32(1), -9007199254740992.000000000 >= toUInt32(1) , toInt32(1) = -9007199254740992.000000000, toInt32(1) != -9007199254740992.000000000, toInt32(1) < -9007199254740992.000000000, toInt32(1) <= -9007199254740992.000000000, toInt32(1) > -9007199254740992.000000000, toInt32(1) >= -9007199254740992.000000000, -9007199254740992.000000000 = toInt32(1), -9007199254740992.000000000 != toInt32(1), -9007199254740992.000000000 < toInt32(1), -9007199254740992.000000000 <= toInt32(1), -9007199254740992.000000000 > toInt32(1), -9007199254740992.000000000 >= toInt32(1) , toUInt64(1) = -9007199254740992.000000000, toUInt64(1) != -9007199254740992.000000000, toUInt64(1) < -9007199254740992.000000000, toUInt64(1) <= -9007199254740992.000000000, toUInt64(1) > -9007199254740992.000000000, toUInt64(1) >= -9007199254740992.000000000, -9007199254740992.000000000 = toUInt64(1), -9007199254740992.000000000 != toUInt64(1), -9007199254740992.000000000 < toUInt64(1), -9007199254740992.000000000 <= toUInt64(1), -9007199254740992.000000000 > toUInt64(1), -9007199254740992.000000000 >= toUInt64(1) , toInt64(1) = -9007199254740992.000000000, toInt64(1) != -9007199254740992.000000000, toInt64(1) < -9007199254740992.000000000, toInt64(1) <= -9007199254740992.000000000, toInt64(1) > -9007199254740992.000000000, toInt64(1) >= -9007199254740992.000000000, -9007199254740992.000000000 = toInt64(1), -9007199254740992.000000000 != toInt64(1), -9007199254740992.000000000 < toInt64(1), -9007199254740992.000000000 <= toInt64(1), -9007199254740992.000000000 > toInt64(1), -9007199254740992.000000000 >= toInt64(1) ; +SELECT '1', '-9007199254740992.000000000', 1 = -9007199254740992.000000000, 1 != -9007199254740992.000000000, 1 < -9007199254740992.000000000, 1 <= -9007199254740992.000000000, 1 > -9007199254740992.000000000, 1 >= -9007199254740992.000000000, -9007199254740992.000000000 = 1, -9007199254740992.000000000 != 1, -9007199254740992.000000000 < 1, -9007199254740992.000000000 <= 1, -9007199254740992.000000000 > 1, -9007199254740992.000000000 >= 1 , toUInt8(1) = -9007199254740992.000000000, toUInt8(1) != -9007199254740992.000000000, toUInt8(1) < -9007199254740992.000000000, toUInt8(1) <= -9007199254740992.000000000, toUInt8(1) > -9007199254740992.000000000, toUInt8(1) >= -9007199254740992.000000000, -9007199254740992.000000000 = toUInt8(1), -9007199254740992.000000000 != toUInt8(1), -9007199254740992.000000000 < toUInt8(1), -9007199254740992.000000000 <= toUInt8(1), -9007199254740992.000000000 > toUInt8(1), -9007199254740992.000000000 >= toUInt8(1) , toInt8(1) = -9007199254740992.000000000, toInt8(1) != -9007199254740992.000000000, toInt8(1) < -9007199254740992.000000000, toInt8(1) <= -9007199254740992.000000000, toInt8(1) > -9007199254740992.000000000, toInt8(1) >= -9007199254740992.000000000, -9007199254740992.000000000 = toInt8(1), -9007199254740992.000000000 != toInt8(1), -9007199254740992.000000000 < toInt8(1), -9007199254740992.000000000 <= toInt8(1), -9007199254740992.000000000 > toInt8(1), -9007199254740992.000000000 >= toInt8(1) , toUInt16(1) = -9007199254740992.000000000, toUInt16(1) != -9007199254740992.000000000, toUInt16(1) < -9007199254740992.000000000, toUInt16(1) <= -9007199254740992.000000000, toUInt16(1) > -9007199254740992.000000000, toUInt16(1) >= -9007199254740992.000000000, -9007199254740992.000000000 = toUInt16(1), -9007199254740992.000000000 != toUInt16(1), -9007199254740992.000000000 < toUInt16(1), -9007199254740992.000000000 <= toUInt16(1), -9007199254740992.000000000 > toUInt16(1), -9007199254740992.000000000 >= toUInt16(1) , toInt16(1) = -9007199254740992.000000000, toInt16(1) != -9007199254740992.000000000, toInt16(1) < -9007199254740992.000000000, toInt16(1) <= -9007199254740992.000000000, toInt16(1) > -9007199254740992.000000000, toInt16(1) >= -9007199254740992.000000000, -9007199254740992.000000000 = toInt16(1), -9007199254740992.000000000 != toInt16(1), -9007199254740992.000000000 < toInt16(1), -9007199254740992.000000000 <= toInt16(1), -9007199254740992.000000000 > toInt16(1), -9007199254740992.000000000 >= toInt16(1) , toUInt32(1) = -9007199254740992.000000000, toUInt32(1) != -9007199254740992.000000000, toUInt32(1) < -9007199254740992.000000000, toUInt32(1) <= -9007199254740992.000000000, toUInt32(1) > -9007199254740992.000000000, toUInt32(1) >= -9007199254740992.000000000, -9007199254740992.000000000 = toUInt32(1), -9007199254740992.000000000 != toUInt32(1), -9007199254740992.000000000 < toUInt32(1), -9007199254740992.000000000 <= toUInt32(1), -9007199254740992.000000000 > toUInt32(1), -9007199254740992.000000000 >= toUInt32(1) , toInt32(1) = -9007199254740992.000000000, toInt32(1) != -9007199254740992.000000000, toInt32(1) < -9007199254740992.000000000, toInt32(1) <= -9007199254740992.000000000, toInt32(1) > -9007199254740992.000000000, toInt32(1) >= -9007199254740992.000000000, -9007199254740992.000000000 = toInt32(1), -9007199254740992.000000000 != toInt32(1), -9007199254740992.000000000 < toInt32(1), -9007199254740992.000000000 <= toInt32(1), -9007199254740992.000000000 > toInt32(1), -9007199254740992.000000000 >= toInt32(1) , toUInt64(1) = -9007199254740992.000000000, toUInt64(1) != -9007199254740992.000000000, toUInt64(1) < -9007199254740992.000000000, toUInt64(1) <= -9007199254740992.000000000, toUInt64(1) > -9007199254740992.000000000, toUInt64(1) >= -9007199254740992.000000000, -9007199254740992.000000000 = toUInt64(1), -9007199254740992.000000000 != toUInt64(1), -9007199254740992.000000000 < toUInt64(1), -9007199254740992.000000000 <= toUInt64(1), -9007199254740992.000000000 > toUInt64(1), -9007199254740992.000000000 >= toUInt64(1) , toInt64(1) = -9007199254740992.000000000, toInt64(1) != -9007199254740992.000000000, toInt64(1) < -9007199254740992.000000000, toInt64(1) <= -9007199254740992.000000000, toInt64(1) > -9007199254740992.000000000, toInt64(1) >= -9007199254740992.000000000, -9007199254740992.000000000 = toInt64(1), -9007199254740992.000000000 != toInt64(1), -9007199254740992.000000000 < toInt64(1), -9007199254740992.000000000 <= toInt64(1), -9007199254740992.000000000 > toInt64(1), -9007199254740992.000000000 >= toInt64(1) ; +SELECT '1', '-9007199254740994.000000000', 1 = -9007199254740994.000000000, 1 != -9007199254740994.000000000, 1 < -9007199254740994.000000000, 1 <= -9007199254740994.000000000, 1 > -9007199254740994.000000000, 1 >= -9007199254740994.000000000, -9007199254740994.000000000 = 1, -9007199254740994.000000000 != 1, -9007199254740994.000000000 < 1, -9007199254740994.000000000 <= 1, -9007199254740994.000000000 > 1, -9007199254740994.000000000 >= 1 , toUInt8(1) = -9007199254740994.000000000, toUInt8(1) != -9007199254740994.000000000, toUInt8(1) < -9007199254740994.000000000, toUInt8(1) <= -9007199254740994.000000000, toUInt8(1) > -9007199254740994.000000000, toUInt8(1) >= -9007199254740994.000000000, -9007199254740994.000000000 = toUInt8(1), -9007199254740994.000000000 != toUInt8(1), -9007199254740994.000000000 < toUInt8(1), -9007199254740994.000000000 <= toUInt8(1), -9007199254740994.000000000 > toUInt8(1), -9007199254740994.000000000 >= toUInt8(1) , toInt8(1) = -9007199254740994.000000000, toInt8(1) != -9007199254740994.000000000, toInt8(1) < -9007199254740994.000000000, toInt8(1) <= -9007199254740994.000000000, toInt8(1) > -9007199254740994.000000000, toInt8(1) >= -9007199254740994.000000000, -9007199254740994.000000000 = toInt8(1), -9007199254740994.000000000 != toInt8(1), -9007199254740994.000000000 < toInt8(1), -9007199254740994.000000000 <= toInt8(1), -9007199254740994.000000000 > toInt8(1), -9007199254740994.000000000 >= toInt8(1) , toUInt16(1) = -9007199254740994.000000000, toUInt16(1) != -9007199254740994.000000000, toUInt16(1) < -9007199254740994.000000000, toUInt16(1) <= -9007199254740994.000000000, toUInt16(1) > -9007199254740994.000000000, toUInt16(1) >= -9007199254740994.000000000, -9007199254740994.000000000 = toUInt16(1), -9007199254740994.000000000 != toUInt16(1), -9007199254740994.000000000 < toUInt16(1), -9007199254740994.000000000 <= toUInt16(1), -9007199254740994.000000000 > toUInt16(1), -9007199254740994.000000000 >= toUInt16(1) , toInt16(1) = -9007199254740994.000000000, toInt16(1) != -9007199254740994.000000000, toInt16(1) < -9007199254740994.000000000, toInt16(1) <= -9007199254740994.000000000, toInt16(1) > -9007199254740994.000000000, toInt16(1) >= -9007199254740994.000000000, -9007199254740994.000000000 = toInt16(1), -9007199254740994.000000000 != toInt16(1), -9007199254740994.000000000 < toInt16(1), -9007199254740994.000000000 <= toInt16(1), -9007199254740994.000000000 > toInt16(1), -9007199254740994.000000000 >= toInt16(1) , toUInt32(1) = -9007199254740994.000000000, toUInt32(1) != -9007199254740994.000000000, toUInt32(1) < -9007199254740994.000000000, toUInt32(1) <= -9007199254740994.000000000, toUInt32(1) > -9007199254740994.000000000, toUInt32(1) >= -9007199254740994.000000000, -9007199254740994.000000000 = toUInt32(1), -9007199254740994.000000000 != toUInt32(1), -9007199254740994.000000000 < toUInt32(1), -9007199254740994.000000000 <= toUInt32(1), -9007199254740994.000000000 > toUInt32(1), -9007199254740994.000000000 >= toUInt32(1) , toInt32(1) = -9007199254740994.000000000, toInt32(1) != -9007199254740994.000000000, toInt32(1) < -9007199254740994.000000000, toInt32(1) <= -9007199254740994.000000000, toInt32(1) > -9007199254740994.000000000, toInt32(1) >= -9007199254740994.000000000, -9007199254740994.000000000 = toInt32(1), -9007199254740994.000000000 != toInt32(1), -9007199254740994.000000000 < toInt32(1), -9007199254740994.000000000 <= toInt32(1), -9007199254740994.000000000 > toInt32(1), -9007199254740994.000000000 >= toInt32(1) , toUInt64(1) = -9007199254740994.000000000, toUInt64(1) != -9007199254740994.000000000, toUInt64(1) < -9007199254740994.000000000, toUInt64(1) <= -9007199254740994.000000000, toUInt64(1) > -9007199254740994.000000000, toUInt64(1) >= -9007199254740994.000000000, -9007199254740994.000000000 = toUInt64(1), -9007199254740994.000000000 != toUInt64(1), -9007199254740994.000000000 < toUInt64(1), -9007199254740994.000000000 <= toUInt64(1), -9007199254740994.000000000 > toUInt64(1), -9007199254740994.000000000 >= toUInt64(1) , toInt64(1) = -9007199254740994.000000000, toInt64(1) != -9007199254740994.000000000, toInt64(1) < -9007199254740994.000000000, toInt64(1) <= -9007199254740994.000000000, toInt64(1) > -9007199254740994.000000000, toInt64(1) >= -9007199254740994.000000000, -9007199254740994.000000000 = toInt64(1), -9007199254740994.000000000 != toInt64(1), -9007199254740994.000000000 < toInt64(1), -9007199254740994.000000000 <= toInt64(1), -9007199254740994.000000000 > toInt64(1), -9007199254740994.000000000 >= toInt64(1) ; +SELECT '1', '104.000000000', 1 = 104.000000000, 1 != 104.000000000, 1 < 104.000000000, 1 <= 104.000000000, 1 > 104.000000000, 1 >= 104.000000000, 104.000000000 = 1, 104.000000000 != 1, 104.000000000 < 1, 104.000000000 <= 1, 104.000000000 > 1, 104.000000000 >= 1 , toUInt8(1) = 104.000000000, toUInt8(1) != 104.000000000, toUInt8(1) < 104.000000000, toUInt8(1) <= 104.000000000, toUInt8(1) > 104.000000000, toUInt8(1) >= 104.000000000, 104.000000000 = toUInt8(1), 104.000000000 != toUInt8(1), 104.000000000 < toUInt8(1), 104.000000000 <= toUInt8(1), 104.000000000 > toUInt8(1), 104.000000000 >= toUInt8(1) , toInt8(1) = 104.000000000, toInt8(1) != 104.000000000, toInt8(1) < 104.000000000, toInt8(1) <= 104.000000000, toInt8(1) > 104.000000000, toInt8(1) >= 104.000000000, 104.000000000 = toInt8(1), 104.000000000 != toInt8(1), 104.000000000 < toInt8(1), 104.000000000 <= toInt8(1), 104.000000000 > toInt8(1), 104.000000000 >= toInt8(1) , toUInt16(1) = 104.000000000, toUInt16(1) != 104.000000000, toUInt16(1) < 104.000000000, toUInt16(1) <= 104.000000000, toUInt16(1) > 104.000000000, toUInt16(1) >= 104.000000000, 104.000000000 = toUInt16(1), 104.000000000 != toUInt16(1), 104.000000000 < toUInt16(1), 104.000000000 <= toUInt16(1), 104.000000000 > toUInt16(1), 104.000000000 >= toUInt16(1) , toInt16(1) = 104.000000000, toInt16(1) != 104.000000000, toInt16(1) < 104.000000000, toInt16(1) <= 104.000000000, toInt16(1) > 104.000000000, toInt16(1) >= 104.000000000, 104.000000000 = toInt16(1), 104.000000000 != toInt16(1), 104.000000000 < toInt16(1), 104.000000000 <= toInt16(1), 104.000000000 > toInt16(1), 104.000000000 >= toInt16(1) , toUInt32(1) = 104.000000000, toUInt32(1) != 104.000000000, toUInt32(1) < 104.000000000, toUInt32(1) <= 104.000000000, toUInt32(1) > 104.000000000, toUInt32(1) >= 104.000000000, 104.000000000 = toUInt32(1), 104.000000000 != toUInt32(1), 104.000000000 < toUInt32(1), 104.000000000 <= toUInt32(1), 104.000000000 > toUInt32(1), 104.000000000 >= toUInt32(1) , toInt32(1) = 104.000000000, toInt32(1) != 104.000000000, toInt32(1) < 104.000000000, toInt32(1) <= 104.000000000, toInt32(1) > 104.000000000, toInt32(1) >= 104.000000000, 104.000000000 = toInt32(1), 104.000000000 != toInt32(1), 104.000000000 < toInt32(1), 104.000000000 <= toInt32(1), 104.000000000 > toInt32(1), 104.000000000 >= toInt32(1) , toUInt64(1) = 104.000000000, toUInt64(1) != 104.000000000, toUInt64(1) < 104.000000000, toUInt64(1) <= 104.000000000, toUInt64(1) > 104.000000000, toUInt64(1) >= 104.000000000, 104.000000000 = toUInt64(1), 104.000000000 != toUInt64(1), 104.000000000 < toUInt64(1), 104.000000000 <= toUInt64(1), 104.000000000 > toUInt64(1), 104.000000000 >= toUInt64(1) , toInt64(1) = 104.000000000, toInt64(1) != 104.000000000, toInt64(1) < 104.000000000, toInt64(1) <= 104.000000000, toInt64(1) > 104.000000000, toInt64(1) >= 104.000000000, 104.000000000 = toInt64(1), 104.000000000 != toInt64(1), 104.000000000 < toInt64(1), 104.000000000 <= toInt64(1), 104.000000000 > toInt64(1), 104.000000000 >= toInt64(1) ; +SELECT '1', '-4503599627370496.000000000', 1 = -4503599627370496.000000000, 1 != -4503599627370496.000000000, 1 < -4503599627370496.000000000, 1 <= -4503599627370496.000000000, 1 > -4503599627370496.000000000, 1 >= -4503599627370496.000000000, -4503599627370496.000000000 = 1, -4503599627370496.000000000 != 1, -4503599627370496.000000000 < 1, -4503599627370496.000000000 <= 1, -4503599627370496.000000000 > 1, -4503599627370496.000000000 >= 1 , toUInt8(1) = -4503599627370496.000000000, toUInt8(1) != -4503599627370496.000000000, toUInt8(1) < -4503599627370496.000000000, toUInt8(1) <= -4503599627370496.000000000, toUInt8(1) > -4503599627370496.000000000, toUInt8(1) >= -4503599627370496.000000000, -4503599627370496.000000000 = toUInt8(1), -4503599627370496.000000000 != toUInt8(1), -4503599627370496.000000000 < toUInt8(1), -4503599627370496.000000000 <= toUInt8(1), -4503599627370496.000000000 > toUInt8(1), -4503599627370496.000000000 >= toUInt8(1) , toInt8(1) = -4503599627370496.000000000, toInt8(1) != -4503599627370496.000000000, toInt8(1) < -4503599627370496.000000000, toInt8(1) <= -4503599627370496.000000000, toInt8(1) > -4503599627370496.000000000, toInt8(1) >= -4503599627370496.000000000, -4503599627370496.000000000 = toInt8(1), -4503599627370496.000000000 != toInt8(1), -4503599627370496.000000000 < toInt8(1), -4503599627370496.000000000 <= toInt8(1), -4503599627370496.000000000 > toInt8(1), -4503599627370496.000000000 >= toInt8(1) , toUInt16(1) = -4503599627370496.000000000, toUInt16(1) != -4503599627370496.000000000, toUInt16(1) < -4503599627370496.000000000, toUInt16(1) <= -4503599627370496.000000000, toUInt16(1) > -4503599627370496.000000000, toUInt16(1) >= -4503599627370496.000000000, -4503599627370496.000000000 = toUInt16(1), -4503599627370496.000000000 != toUInt16(1), -4503599627370496.000000000 < toUInt16(1), -4503599627370496.000000000 <= toUInt16(1), -4503599627370496.000000000 > toUInt16(1), -4503599627370496.000000000 >= toUInt16(1) , toInt16(1) = -4503599627370496.000000000, toInt16(1) != -4503599627370496.000000000, toInt16(1) < -4503599627370496.000000000, toInt16(1) <= -4503599627370496.000000000, toInt16(1) > -4503599627370496.000000000, toInt16(1) >= -4503599627370496.000000000, -4503599627370496.000000000 = toInt16(1), -4503599627370496.000000000 != toInt16(1), -4503599627370496.000000000 < toInt16(1), -4503599627370496.000000000 <= toInt16(1), -4503599627370496.000000000 > toInt16(1), -4503599627370496.000000000 >= toInt16(1) , toUInt32(1) = -4503599627370496.000000000, toUInt32(1) != -4503599627370496.000000000, toUInt32(1) < -4503599627370496.000000000, toUInt32(1) <= -4503599627370496.000000000, toUInt32(1) > -4503599627370496.000000000, toUInt32(1) >= -4503599627370496.000000000, -4503599627370496.000000000 = toUInt32(1), -4503599627370496.000000000 != toUInt32(1), -4503599627370496.000000000 < toUInt32(1), -4503599627370496.000000000 <= toUInt32(1), -4503599627370496.000000000 > toUInt32(1), -4503599627370496.000000000 >= toUInt32(1) , toInt32(1) = -4503599627370496.000000000, toInt32(1) != -4503599627370496.000000000, toInt32(1) < -4503599627370496.000000000, toInt32(1) <= -4503599627370496.000000000, toInt32(1) > -4503599627370496.000000000, toInt32(1) >= -4503599627370496.000000000, -4503599627370496.000000000 = toInt32(1), -4503599627370496.000000000 != toInt32(1), -4503599627370496.000000000 < toInt32(1), -4503599627370496.000000000 <= toInt32(1), -4503599627370496.000000000 > toInt32(1), -4503599627370496.000000000 >= toInt32(1) , toUInt64(1) = -4503599627370496.000000000, toUInt64(1) != -4503599627370496.000000000, toUInt64(1) < -4503599627370496.000000000, toUInt64(1) <= -4503599627370496.000000000, toUInt64(1) > -4503599627370496.000000000, toUInt64(1) >= -4503599627370496.000000000, -4503599627370496.000000000 = toUInt64(1), -4503599627370496.000000000 != toUInt64(1), -4503599627370496.000000000 < toUInt64(1), -4503599627370496.000000000 <= toUInt64(1), -4503599627370496.000000000 > toUInt64(1), -4503599627370496.000000000 >= toUInt64(1) , toInt64(1) = -4503599627370496.000000000, toInt64(1) != -4503599627370496.000000000, toInt64(1) < -4503599627370496.000000000, toInt64(1) <= -4503599627370496.000000000, toInt64(1) > -4503599627370496.000000000, toInt64(1) >= -4503599627370496.000000000, -4503599627370496.000000000 = toInt64(1), -4503599627370496.000000000 != toInt64(1), -4503599627370496.000000000 < toInt64(1), -4503599627370496.000000000 <= toInt64(1), -4503599627370496.000000000 > toInt64(1), -4503599627370496.000000000 >= toInt64(1) ; +SELECT '1', '-0.500000000', 1 = -0.500000000, 1 != -0.500000000, 1 < -0.500000000, 1 <= -0.500000000, 1 > -0.500000000, 1 >= -0.500000000, -0.500000000 = 1, -0.500000000 != 1, -0.500000000 < 1, -0.500000000 <= 1, -0.500000000 > 1, -0.500000000 >= 1 , toUInt8(1) = -0.500000000, toUInt8(1) != -0.500000000, toUInt8(1) < -0.500000000, toUInt8(1) <= -0.500000000, toUInt8(1) > -0.500000000, toUInt8(1) >= -0.500000000, -0.500000000 = toUInt8(1), -0.500000000 != toUInt8(1), -0.500000000 < toUInt8(1), -0.500000000 <= toUInt8(1), -0.500000000 > toUInt8(1), -0.500000000 >= toUInt8(1) , toInt8(1) = -0.500000000, toInt8(1) != -0.500000000, toInt8(1) < -0.500000000, toInt8(1) <= -0.500000000, toInt8(1) > -0.500000000, toInt8(1) >= -0.500000000, -0.500000000 = toInt8(1), -0.500000000 != toInt8(1), -0.500000000 < toInt8(1), -0.500000000 <= toInt8(1), -0.500000000 > toInt8(1), -0.500000000 >= toInt8(1) , toUInt16(1) = -0.500000000, toUInt16(1) != -0.500000000, toUInt16(1) < -0.500000000, toUInt16(1) <= -0.500000000, toUInt16(1) > -0.500000000, toUInt16(1) >= -0.500000000, -0.500000000 = toUInt16(1), -0.500000000 != toUInt16(1), -0.500000000 < toUInt16(1), -0.500000000 <= toUInt16(1), -0.500000000 > toUInt16(1), -0.500000000 >= toUInt16(1) , toInt16(1) = -0.500000000, toInt16(1) != -0.500000000, toInt16(1) < -0.500000000, toInt16(1) <= -0.500000000, toInt16(1) > -0.500000000, toInt16(1) >= -0.500000000, -0.500000000 = toInt16(1), -0.500000000 != toInt16(1), -0.500000000 < toInt16(1), -0.500000000 <= toInt16(1), -0.500000000 > toInt16(1), -0.500000000 >= toInt16(1) , toUInt32(1) = -0.500000000, toUInt32(1) != -0.500000000, toUInt32(1) < -0.500000000, toUInt32(1) <= -0.500000000, toUInt32(1) > -0.500000000, toUInt32(1) >= -0.500000000, -0.500000000 = toUInt32(1), -0.500000000 != toUInt32(1), -0.500000000 < toUInt32(1), -0.500000000 <= toUInt32(1), -0.500000000 > toUInt32(1), -0.500000000 >= toUInt32(1) , toInt32(1) = -0.500000000, toInt32(1) != -0.500000000, toInt32(1) < -0.500000000, toInt32(1) <= -0.500000000, toInt32(1) > -0.500000000, toInt32(1) >= -0.500000000, -0.500000000 = toInt32(1), -0.500000000 != toInt32(1), -0.500000000 < toInt32(1), -0.500000000 <= toInt32(1), -0.500000000 > toInt32(1), -0.500000000 >= toInt32(1) , toUInt64(1) = -0.500000000, toUInt64(1) != -0.500000000, toUInt64(1) < -0.500000000, toUInt64(1) <= -0.500000000, toUInt64(1) > -0.500000000, toUInt64(1) >= -0.500000000, -0.500000000 = toUInt64(1), -0.500000000 != toUInt64(1), -0.500000000 < toUInt64(1), -0.500000000 <= toUInt64(1), -0.500000000 > toUInt64(1), -0.500000000 >= toUInt64(1) , toInt64(1) = -0.500000000, toInt64(1) != -0.500000000, toInt64(1) < -0.500000000, toInt64(1) <= -0.500000000, toInt64(1) > -0.500000000, toInt64(1) >= -0.500000000, -0.500000000 = toInt64(1), -0.500000000 != toInt64(1), -0.500000000 < toInt64(1), -0.500000000 <= toInt64(1), -0.500000000 > toInt64(1), -0.500000000 >= toInt64(1) ; +SELECT '1', '0.500000000', 1 = 0.500000000, 1 != 0.500000000, 1 < 0.500000000, 1 <= 0.500000000, 1 > 0.500000000, 1 >= 0.500000000, 0.500000000 = 1, 0.500000000 != 1, 0.500000000 < 1, 0.500000000 <= 1, 0.500000000 > 1, 0.500000000 >= 1 , toUInt8(1) = 0.500000000, toUInt8(1) != 0.500000000, toUInt8(1) < 0.500000000, toUInt8(1) <= 0.500000000, toUInt8(1) > 0.500000000, toUInt8(1) >= 0.500000000, 0.500000000 = toUInt8(1), 0.500000000 != toUInt8(1), 0.500000000 < toUInt8(1), 0.500000000 <= toUInt8(1), 0.500000000 > toUInt8(1), 0.500000000 >= toUInt8(1) , toInt8(1) = 0.500000000, toInt8(1) != 0.500000000, toInt8(1) < 0.500000000, toInt8(1) <= 0.500000000, toInt8(1) > 0.500000000, toInt8(1) >= 0.500000000, 0.500000000 = toInt8(1), 0.500000000 != toInt8(1), 0.500000000 < toInt8(1), 0.500000000 <= toInt8(1), 0.500000000 > toInt8(1), 0.500000000 >= toInt8(1) , toUInt16(1) = 0.500000000, toUInt16(1) != 0.500000000, toUInt16(1) < 0.500000000, toUInt16(1) <= 0.500000000, toUInt16(1) > 0.500000000, toUInt16(1) >= 0.500000000, 0.500000000 = toUInt16(1), 0.500000000 != toUInt16(1), 0.500000000 < toUInt16(1), 0.500000000 <= toUInt16(1), 0.500000000 > toUInt16(1), 0.500000000 >= toUInt16(1) , toInt16(1) = 0.500000000, toInt16(1) != 0.500000000, toInt16(1) < 0.500000000, toInt16(1) <= 0.500000000, toInt16(1) > 0.500000000, toInt16(1) >= 0.500000000, 0.500000000 = toInt16(1), 0.500000000 != toInt16(1), 0.500000000 < toInt16(1), 0.500000000 <= toInt16(1), 0.500000000 > toInt16(1), 0.500000000 >= toInt16(1) , toUInt32(1) = 0.500000000, toUInt32(1) != 0.500000000, toUInt32(1) < 0.500000000, toUInt32(1) <= 0.500000000, toUInt32(1) > 0.500000000, toUInt32(1) >= 0.500000000, 0.500000000 = toUInt32(1), 0.500000000 != toUInt32(1), 0.500000000 < toUInt32(1), 0.500000000 <= toUInt32(1), 0.500000000 > toUInt32(1), 0.500000000 >= toUInt32(1) , toInt32(1) = 0.500000000, toInt32(1) != 0.500000000, toInt32(1) < 0.500000000, toInt32(1) <= 0.500000000, toInt32(1) > 0.500000000, toInt32(1) >= 0.500000000, 0.500000000 = toInt32(1), 0.500000000 != toInt32(1), 0.500000000 < toInt32(1), 0.500000000 <= toInt32(1), 0.500000000 > toInt32(1), 0.500000000 >= toInt32(1) , toUInt64(1) = 0.500000000, toUInt64(1) != 0.500000000, toUInt64(1) < 0.500000000, toUInt64(1) <= 0.500000000, toUInt64(1) > 0.500000000, toUInt64(1) >= 0.500000000, 0.500000000 = toUInt64(1), 0.500000000 != toUInt64(1), 0.500000000 < toUInt64(1), 0.500000000 <= toUInt64(1), 0.500000000 > toUInt64(1), 0.500000000 >= toUInt64(1) , toInt64(1) = 0.500000000, toInt64(1) != 0.500000000, toInt64(1) < 0.500000000, toInt64(1) <= 0.500000000, toInt64(1) > 0.500000000, toInt64(1) >= 0.500000000, 0.500000000 = toInt64(1), 0.500000000 != toInt64(1), 0.500000000 < toInt64(1), 0.500000000 <= toInt64(1), 0.500000000 > toInt64(1), 0.500000000 >= toInt64(1) ; +SELECT '1', '-1.500000000', 1 = -1.500000000, 1 != -1.500000000, 1 < -1.500000000, 1 <= -1.500000000, 1 > -1.500000000, 1 >= -1.500000000, -1.500000000 = 1, -1.500000000 != 1, -1.500000000 < 1, -1.500000000 <= 1, -1.500000000 > 1, -1.500000000 >= 1 , toUInt8(1) = -1.500000000, toUInt8(1) != -1.500000000, toUInt8(1) < -1.500000000, toUInt8(1) <= -1.500000000, toUInt8(1) > -1.500000000, toUInt8(1) >= -1.500000000, -1.500000000 = toUInt8(1), -1.500000000 != toUInt8(1), -1.500000000 < toUInt8(1), -1.500000000 <= toUInt8(1), -1.500000000 > toUInt8(1), -1.500000000 >= toUInt8(1) , toInt8(1) = -1.500000000, toInt8(1) != -1.500000000, toInt8(1) < -1.500000000, toInt8(1) <= -1.500000000, toInt8(1) > -1.500000000, toInt8(1) >= -1.500000000, -1.500000000 = toInt8(1), -1.500000000 != toInt8(1), -1.500000000 < toInt8(1), -1.500000000 <= toInt8(1), -1.500000000 > toInt8(1), -1.500000000 >= toInt8(1) , toUInt16(1) = -1.500000000, toUInt16(1) != -1.500000000, toUInt16(1) < -1.500000000, toUInt16(1) <= -1.500000000, toUInt16(1) > -1.500000000, toUInt16(1) >= -1.500000000, -1.500000000 = toUInt16(1), -1.500000000 != toUInt16(1), -1.500000000 < toUInt16(1), -1.500000000 <= toUInt16(1), -1.500000000 > toUInt16(1), -1.500000000 >= toUInt16(1) , toInt16(1) = -1.500000000, toInt16(1) != -1.500000000, toInt16(1) < -1.500000000, toInt16(1) <= -1.500000000, toInt16(1) > -1.500000000, toInt16(1) >= -1.500000000, -1.500000000 = toInt16(1), -1.500000000 != toInt16(1), -1.500000000 < toInt16(1), -1.500000000 <= toInt16(1), -1.500000000 > toInt16(1), -1.500000000 >= toInt16(1) , toUInt32(1) = -1.500000000, toUInt32(1) != -1.500000000, toUInt32(1) < -1.500000000, toUInt32(1) <= -1.500000000, toUInt32(1) > -1.500000000, toUInt32(1) >= -1.500000000, -1.500000000 = toUInt32(1), -1.500000000 != toUInt32(1), -1.500000000 < toUInt32(1), -1.500000000 <= toUInt32(1), -1.500000000 > toUInt32(1), -1.500000000 >= toUInt32(1) , toInt32(1) = -1.500000000, toInt32(1) != -1.500000000, toInt32(1) < -1.500000000, toInt32(1) <= -1.500000000, toInt32(1) > -1.500000000, toInt32(1) >= -1.500000000, -1.500000000 = toInt32(1), -1.500000000 != toInt32(1), -1.500000000 < toInt32(1), -1.500000000 <= toInt32(1), -1.500000000 > toInt32(1), -1.500000000 >= toInt32(1) , toUInt64(1) = -1.500000000, toUInt64(1) != -1.500000000, toUInt64(1) < -1.500000000, toUInt64(1) <= -1.500000000, toUInt64(1) > -1.500000000, toUInt64(1) >= -1.500000000, -1.500000000 = toUInt64(1), -1.500000000 != toUInt64(1), -1.500000000 < toUInt64(1), -1.500000000 <= toUInt64(1), -1.500000000 > toUInt64(1), -1.500000000 >= toUInt64(1) , toInt64(1) = -1.500000000, toInt64(1) != -1.500000000, toInt64(1) < -1.500000000, toInt64(1) <= -1.500000000, toInt64(1) > -1.500000000, toInt64(1) >= -1.500000000, -1.500000000 = toInt64(1), -1.500000000 != toInt64(1), -1.500000000 < toInt64(1), -1.500000000 <= toInt64(1), -1.500000000 > toInt64(1), -1.500000000 >= toInt64(1) ; +SELECT '1', '1.500000000', 1 = 1.500000000, 1 != 1.500000000, 1 < 1.500000000, 1 <= 1.500000000, 1 > 1.500000000, 1 >= 1.500000000, 1.500000000 = 1, 1.500000000 != 1, 1.500000000 < 1, 1.500000000 <= 1, 1.500000000 > 1, 1.500000000 >= 1 , toUInt8(1) = 1.500000000, toUInt8(1) != 1.500000000, toUInt8(1) < 1.500000000, toUInt8(1) <= 1.500000000, toUInt8(1) > 1.500000000, toUInt8(1) >= 1.500000000, 1.500000000 = toUInt8(1), 1.500000000 != toUInt8(1), 1.500000000 < toUInt8(1), 1.500000000 <= toUInt8(1), 1.500000000 > toUInt8(1), 1.500000000 >= toUInt8(1) , toInt8(1) = 1.500000000, toInt8(1) != 1.500000000, toInt8(1) < 1.500000000, toInt8(1) <= 1.500000000, toInt8(1) > 1.500000000, toInt8(1) >= 1.500000000, 1.500000000 = toInt8(1), 1.500000000 != toInt8(1), 1.500000000 < toInt8(1), 1.500000000 <= toInt8(1), 1.500000000 > toInt8(1), 1.500000000 >= toInt8(1) , toUInt16(1) = 1.500000000, toUInt16(1) != 1.500000000, toUInt16(1) < 1.500000000, toUInt16(1) <= 1.500000000, toUInt16(1) > 1.500000000, toUInt16(1) >= 1.500000000, 1.500000000 = toUInt16(1), 1.500000000 != toUInt16(1), 1.500000000 < toUInt16(1), 1.500000000 <= toUInt16(1), 1.500000000 > toUInt16(1), 1.500000000 >= toUInt16(1) , toInt16(1) = 1.500000000, toInt16(1) != 1.500000000, toInt16(1) < 1.500000000, toInt16(1) <= 1.500000000, toInt16(1) > 1.500000000, toInt16(1) >= 1.500000000, 1.500000000 = toInt16(1), 1.500000000 != toInt16(1), 1.500000000 < toInt16(1), 1.500000000 <= toInt16(1), 1.500000000 > toInt16(1), 1.500000000 >= toInt16(1) , toUInt32(1) = 1.500000000, toUInt32(1) != 1.500000000, toUInt32(1) < 1.500000000, toUInt32(1) <= 1.500000000, toUInt32(1) > 1.500000000, toUInt32(1) >= 1.500000000, 1.500000000 = toUInt32(1), 1.500000000 != toUInt32(1), 1.500000000 < toUInt32(1), 1.500000000 <= toUInt32(1), 1.500000000 > toUInt32(1), 1.500000000 >= toUInt32(1) , toInt32(1) = 1.500000000, toInt32(1) != 1.500000000, toInt32(1) < 1.500000000, toInt32(1) <= 1.500000000, toInt32(1) > 1.500000000, toInt32(1) >= 1.500000000, 1.500000000 = toInt32(1), 1.500000000 != toInt32(1), 1.500000000 < toInt32(1), 1.500000000 <= toInt32(1), 1.500000000 > toInt32(1), 1.500000000 >= toInt32(1) , toUInt64(1) = 1.500000000, toUInt64(1) != 1.500000000, toUInt64(1) < 1.500000000, toUInt64(1) <= 1.500000000, toUInt64(1) > 1.500000000, toUInt64(1) >= 1.500000000, 1.500000000 = toUInt64(1), 1.500000000 != toUInt64(1), 1.500000000 < toUInt64(1), 1.500000000 <= toUInt64(1), 1.500000000 > toUInt64(1), 1.500000000 >= toUInt64(1) , toInt64(1) = 1.500000000, toInt64(1) != 1.500000000, toInt64(1) < 1.500000000, toInt64(1) <= 1.500000000, toInt64(1) > 1.500000000, toInt64(1) >= 1.500000000, 1.500000000 = toInt64(1), 1.500000000 != toInt64(1), 1.500000000 < toInt64(1), 1.500000000 <= toInt64(1), 1.500000000 > toInt64(1), 1.500000000 >= toInt64(1) ; +SELECT '1', '9007199254740992.000000000', 1 = 9007199254740992.000000000, 1 != 9007199254740992.000000000, 1 < 9007199254740992.000000000, 1 <= 9007199254740992.000000000, 1 > 9007199254740992.000000000, 1 >= 9007199254740992.000000000, 9007199254740992.000000000 = 1, 9007199254740992.000000000 != 1, 9007199254740992.000000000 < 1, 9007199254740992.000000000 <= 1, 9007199254740992.000000000 > 1, 9007199254740992.000000000 >= 1 , toUInt8(1) = 9007199254740992.000000000, toUInt8(1) != 9007199254740992.000000000, toUInt8(1) < 9007199254740992.000000000, toUInt8(1) <= 9007199254740992.000000000, toUInt8(1) > 9007199254740992.000000000, toUInt8(1) >= 9007199254740992.000000000, 9007199254740992.000000000 = toUInt8(1), 9007199254740992.000000000 != toUInt8(1), 9007199254740992.000000000 < toUInt8(1), 9007199254740992.000000000 <= toUInt8(1), 9007199254740992.000000000 > toUInt8(1), 9007199254740992.000000000 >= toUInt8(1) , toInt8(1) = 9007199254740992.000000000, toInt8(1) != 9007199254740992.000000000, toInt8(1) < 9007199254740992.000000000, toInt8(1) <= 9007199254740992.000000000, toInt8(1) > 9007199254740992.000000000, toInt8(1) >= 9007199254740992.000000000, 9007199254740992.000000000 = toInt8(1), 9007199254740992.000000000 != toInt8(1), 9007199254740992.000000000 < toInt8(1), 9007199254740992.000000000 <= toInt8(1), 9007199254740992.000000000 > toInt8(1), 9007199254740992.000000000 >= toInt8(1) , toUInt16(1) = 9007199254740992.000000000, toUInt16(1) != 9007199254740992.000000000, toUInt16(1) < 9007199254740992.000000000, toUInt16(1) <= 9007199254740992.000000000, toUInt16(1) > 9007199254740992.000000000, toUInt16(1) >= 9007199254740992.000000000, 9007199254740992.000000000 = toUInt16(1), 9007199254740992.000000000 != toUInt16(1), 9007199254740992.000000000 < toUInt16(1), 9007199254740992.000000000 <= toUInt16(1), 9007199254740992.000000000 > toUInt16(1), 9007199254740992.000000000 >= toUInt16(1) , toInt16(1) = 9007199254740992.000000000, toInt16(1) != 9007199254740992.000000000, toInt16(1) < 9007199254740992.000000000, toInt16(1) <= 9007199254740992.000000000, toInt16(1) > 9007199254740992.000000000, toInt16(1) >= 9007199254740992.000000000, 9007199254740992.000000000 = toInt16(1), 9007199254740992.000000000 != toInt16(1), 9007199254740992.000000000 < toInt16(1), 9007199254740992.000000000 <= toInt16(1), 9007199254740992.000000000 > toInt16(1), 9007199254740992.000000000 >= toInt16(1) , toUInt32(1) = 9007199254740992.000000000, toUInt32(1) != 9007199254740992.000000000, toUInt32(1) < 9007199254740992.000000000, toUInt32(1) <= 9007199254740992.000000000, toUInt32(1) > 9007199254740992.000000000, toUInt32(1) >= 9007199254740992.000000000, 9007199254740992.000000000 = toUInt32(1), 9007199254740992.000000000 != toUInt32(1), 9007199254740992.000000000 < toUInt32(1), 9007199254740992.000000000 <= toUInt32(1), 9007199254740992.000000000 > toUInt32(1), 9007199254740992.000000000 >= toUInt32(1) , toInt32(1) = 9007199254740992.000000000, toInt32(1) != 9007199254740992.000000000, toInt32(1) < 9007199254740992.000000000, toInt32(1) <= 9007199254740992.000000000, toInt32(1) > 9007199254740992.000000000, toInt32(1) >= 9007199254740992.000000000, 9007199254740992.000000000 = toInt32(1), 9007199254740992.000000000 != toInt32(1), 9007199254740992.000000000 < toInt32(1), 9007199254740992.000000000 <= toInt32(1), 9007199254740992.000000000 > toInt32(1), 9007199254740992.000000000 >= toInt32(1) , toUInt64(1) = 9007199254740992.000000000, toUInt64(1) != 9007199254740992.000000000, toUInt64(1) < 9007199254740992.000000000, toUInt64(1) <= 9007199254740992.000000000, toUInt64(1) > 9007199254740992.000000000, toUInt64(1) >= 9007199254740992.000000000, 9007199254740992.000000000 = toUInt64(1), 9007199254740992.000000000 != toUInt64(1), 9007199254740992.000000000 < toUInt64(1), 9007199254740992.000000000 <= toUInt64(1), 9007199254740992.000000000 > toUInt64(1), 9007199254740992.000000000 >= toUInt64(1) , toInt64(1) = 9007199254740992.000000000, toInt64(1) != 9007199254740992.000000000, toInt64(1) < 9007199254740992.000000000, toInt64(1) <= 9007199254740992.000000000, toInt64(1) > 9007199254740992.000000000, toInt64(1) >= 9007199254740992.000000000, 9007199254740992.000000000 = toInt64(1), 9007199254740992.000000000 != toInt64(1), 9007199254740992.000000000 < toInt64(1), 9007199254740992.000000000 <= toInt64(1), 9007199254740992.000000000 > toInt64(1), 9007199254740992.000000000 >= toInt64(1) ; +SELECT '1', '2251799813685247.500000000', 1 = 2251799813685247.500000000, 1 != 2251799813685247.500000000, 1 < 2251799813685247.500000000, 1 <= 2251799813685247.500000000, 1 > 2251799813685247.500000000, 1 >= 2251799813685247.500000000, 2251799813685247.500000000 = 1, 2251799813685247.500000000 != 1, 2251799813685247.500000000 < 1, 2251799813685247.500000000 <= 1, 2251799813685247.500000000 > 1, 2251799813685247.500000000 >= 1 , toUInt8(1) = 2251799813685247.500000000, toUInt8(1) != 2251799813685247.500000000, toUInt8(1) < 2251799813685247.500000000, toUInt8(1) <= 2251799813685247.500000000, toUInt8(1) > 2251799813685247.500000000, toUInt8(1) >= 2251799813685247.500000000, 2251799813685247.500000000 = toUInt8(1), 2251799813685247.500000000 != toUInt8(1), 2251799813685247.500000000 < toUInt8(1), 2251799813685247.500000000 <= toUInt8(1), 2251799813685247.500000000 > toUInt8(1), 2251799813685247.500000000 >= toUInt8(1) , toInt8(1) = 2251799813685247.500000000, toInt8(1) != 2251799813685247.500000000, toInt8(1) < 2251799813685247.500000000, toInt8(1) <= 2251799813685247.500000000, toInt8(1) > 2251799813685247.500000000, toInt8(1) >= 2251799813685247.500000000, 2251799813685247.500000000 = toInt8(1), 2251799813685247.500000000 != toInt8(1), 2251799813685247.500000000 < toInt8(1), 2251799813685247.500000000 <= toInt8(1), 2251799813685247.500000000 > toInt8(1), 2251799813685247.500000000 >= toInt8(1) , toUInt16(1) = 2251799813685247.500000000, toUInt16(1) != 2251799813685247.500000000, toUInt16(1) < 2251799813685247.500000000, toUInt16(1) <= 2251799813685247.500000000, toUInt16(1) > 2251799813685247.500000000, toUInt16(1) >= 2251799813685247.500000000, 2251799813685247.500000000 = toUInt16(1), 2251799813685247.500000000 != toUInt16(1), 2251799813685247.500000000 < toUInt16(1), 2251799813685247.500000000 <= toUInt16(1), 2251799813685247.500000000 > toUInt16(1), 2251799813685247.500000000 >= toUInt16(1) , toInt16(1) = 2251799813685247.500000000, toInt16(1) != 2251799813685247.500000000, toInt16(1) < 2251799813685247.500000000, toInt16(1) <= 2251799813685247.500000000, toInt16(1) > 2251799813685247.500000000, toInt16(1) >= 2251799813685247.500000000, 2251799813685247.500000000 = toInt16(1), 2251799813685247.500000000 != toInt16(1), 2251799813685247.500000000 < toInt16(1), 2251799813685247.500000000 <= toInt16(1), 2251799813685247.500000000 > toInt16(1), 2251799813685247.500000000 >= toInt16(1) , toUInt32(1) = 2251799813685247.500000000, toUInt32(1) != 2251799813685247.500000000, toUInt32(1) < 2251799813685247.500000000, toUInt32(1) <= 2251799813685247.500000000, toUInt32(1) > 2251799813685247.500000000, toUInt32(1) >= 2251799813685247.500000000, 2251799813685247.500000000 = toUInt32(1), 2251799813685247.500000000 != toUInt32(1), 2251799813685247.500000000 < toUInt32(1), 2251799813685247.500000000 <= toUInt32(1), 2251799813685247.500000000 > toUInt32(1), 2251799813685247.500000000 >= toUInt32(1) , toInt32(1) = 2251799813685247.500000000, toInt32(1) != 2251799813685247.500000000, toInt32(1) < 2251799813685247.500000000, toInt32(1) <= 2251799813685247.500000000, toInt32(1) > 2251799813685247.500000000, toInt32(1) >= 2251799813685247.500000000, 2251799813685247.500000000 = toInt32(1), 2251799813685247.500000000 != toInt32(1), 2251799813685247.500000000 < toInt32(1), 2251799813685247.500000000 <= toInt32(1), 2251799813685247.500000000 > toInt32(1), 2251799813685247.500000000 >= toInt32(1) , toUInt64(1) = 2251799813685247.500000000, toUInt64(1) != 2251799813685247.500000000, toUInt64(1) < 2251799813685247.500000000, toUInt64(1) <= 2251799813685247.500000000, toUInt64(1) > 2251799813685247.500000000, toUInt64(1) >= 2251799813685247.500000000, 2251799813685247.500000000 = toUInt64(1), 2251799813685247.500000000 != toUInt64(1), 2251799813685247.500000000 < toUInt64(1), 2251799813685247.500000000 <= toUInt64(1), 2251799813685247.500000000 > toUInt64(1), 2251799813685247.500000000 >= toUInt64(1) , toInt64(1) = 2251799813685247.500000000, toInt64(1) != 2251799813685247.500000000, toInt64(1) < 2251799813685247.500000000, toInt64(1) <= 2251799813685247.500000000, toInt64(1) > 2251799813685247.500000000, toInt64(1) >= 2251799813685247.500000000, 2251799813685247.500000000 = toInt64(1), 2251799813685247.500000000 != toInt64(1), 2251799813685247.500000000 < toInt64(1), 2251799813685247.500000000 <= toInt64(1), 2251799813685247.500000000 > toInt64(1), 2251799813685247.500000000 >= toInt64(1) ; +SELECT '1', '2251799813685248.500000000', 1 = 2251799813685248.500000000, 1 != 2251799813685248.500000000, 1 < 2251799813685248.500000000, 1 <= 2251799813685248.500000000, 1 > 2251799813685248.500000000, 1 >= 2251799813685248.500000000, 2251799813685248.500000000 = 1, 2251799813685248.500000000 != 1, 2251799813685248.500000000 < 1, 2251799813685248.500000000 <= 1, 2251799813685248.500000000 > 1, 2251799813685248.500000000 >= 1 , toUInt8(1) = 2251799813685248.500000000, toUInt8(1) != 2251799813685248.500000000, toUInt8(1) < 2251799813685248.500000000, toUInt8(1) <= 2251799813685248.500000000, toUInt8(1) > 2251799813685248.500000000, toUInt8(1) >= 2251799813685248.500000000, 2251799813685248.500000000 = toUInt8(1), 2251799813685248.500000000 != toUInt8(1), 2251799813685248.500000000 < toUInt8(1), 2251799813685248.500000000 <= toUInt8(1), 2251799813685248.500000000 > toUInt8(1), 2251799813685248.500000000 >= toUInt8(1) , toInt8(1) = 2251799813685248.500000000, toInt8(1) != 2251799813685248.500000000, toInt8(1) < 2251799813685248.500000000, toInt8(1) <= 2251799813685248.500000000, toInt8(1) > 2251799813685248.500000000, toInt8(1) >= 2251799813685248.500000000, 2251799813685248.500000000 = toInt8(1), 2251799813685248.500000000 != toInt8(1), 2251799813685248.500000000 < toInt8(1), 2251799813685248.500000000 <= toInt8(1), 2251799813685248.500000000 > toInt8(1), 2251799813685248.500000000 >= toInt8(1) , toUInt16(1) = 2251799813685248.500000000, toUInt16(1) != 2251799813685248.500000000, toUInt16(1) < 2251799813685248.500000000, toUInt16(1) <= 2251799813685248.500000000, toUInt16(1) > 2251799813685248.500000000, toUInt16(1) >= 2251799813685248.500000000, 2251799813685248.500000000 = toUInt16(1), 2251799813685248.500000000 != toUInt16(1), 2251799813685248.500000000 < toUInt16(1), 2251799813685248.500000000 <= toUInt16(1), 2251799813685248.500000000 > toUInt16(1), 2251799813685248.500000000 >= toUInt16(1) , toInt16(1) = 2251799813685248.500000000, toInt16(1) != 2251799813685248.500000000, toInt16(1) < 2251799813685248.500000000, toInt16(1) <= 2251799813685248.500000000, toInt16(1) > 2251799813685248.500000000, toInt16(1) >= 2251799813685248.500000000, 2251799813685248.500000000 = toInt16(1), 2251799813685248.500000000 != toInt16(1), 2251799813685248.500000000 < toInt16(1), 2251799813685248.500000000 <= toInt16(1), 2251799813685248.500000000 > toInt16(1), 2251799813685248.500000000 >= toInt16(1) , toUInt32(1) = 2251799813685248.500000000, toUInt32(1) != 2251799813685248.500000000, toUInt32(1) < 2251799813685248.500000000, toUInt32(1) <= 2251799813685248.500000000, toUInt32(1) > 2251799813685248.500000000, toUInt32(1) >= 2251799813685248.500000000, 2251799813685248.500000000 = toUInt32(1), 2251799813685248.500000000 != toUInt32(1), 2251799813685248.500000000 < toUInt32(1), 2251799813685248.500000000 <= toUInt32(1), 2251799813685248.500000000 > toUInt32(1), 2251799813685248.500000000 >= toUInt32(1) , toInt32(1) = 2251799813685248.500000000, toInt32(1) != 2251799813685248.500000000, toInt32(1) < 2251799813685248.500000000, toInt32(1) <= 2251799813685248.500000000, toInt32(1) > 2251799813685248.500000000, toInt32(1) >= 2251799813685248.500000000, 2251799813685248.500000000 = toInt32(1), 2251799813685248.500000000 != toInt32(1), 2251799813685248.500000000 < toInt32(1), 2251799813685248.500000000 <= toInt32(1), 2251799813685248.500000000 > toInt32(1), 2251799813685248.500000000 >= toInt32(1) , toUInt64(1) = 2251799813685248.500000000, toUInt64(1) != 2251799813685248.500000000, toUInt64(1) < 2251799813685248.500000000, toUInt64(1) <= 2251799813685248.500000000, toUInt64(1) > 2251799813685248.500000000, toUInt64(1) >= 2251799813685248.500000000, 2251799813685248.500000000 = toUInt64(1), 2251799813685248.500000000 != toUInt64(1), 2251799813685248.500000000 < toUInt64(1), 2251799813685248.500000000 <= toUInt64(1), 2251799813685248.500000000 > toUInt64(1), 2251799813685248.500000000 >= toUInt64(1) , toInt64(1) = 2251799813685248.500000000, toInt64(1) != 2251799813685248.500000000, toInt64(1) < 2251799813685248.500000000, toInt64(1) <= 2251799813685248.500000000, toInt64(1) > 2251799813685248.500000000, toInt64(1) >= 2251799813685248.500000000, 2251799813685248.500000000 = toInt64(1), 2251799813685248.500000000 != toInt64(1), 2251799813685248.500000000 < toInt64(1), 2251799813685248.500000000 <= toInt64(1), 2251799813685248.500000000 > toInt64(1), 2251799813685248.500000000 >= toInt64(1) ; +SELECT '1', '1152921504606846976.000000000', 1 = 1152921504606846976.000000000, 1 != 1152921504606846976.000000000, 1 < 1152921504606846976.000000000, 1 <= 1152921504606846976.000000000, 1 > 1152921504606846976.000000000, 1 >= 1152921504606846976.000000000, 1152921504606846976.000000000 = 1, 1152921504606846976.000000000 != 1, 1152921504606846976.000000000 < 1, 1152921504606846976.000000000 <= 1, 1152921504606846976.000000000 > 1, 1152921504606846976.000000000 >= 1 , toUInt8(1) = 1152921504606846976.000000000, toUInt8(1) != 1152921504606846976.000000000, toUInt8(1) < 1152921504606846976.000000000, toUInt8(1) <= 1152921504606846976.000000000, toUInt8(1) > 1152921504606846976.000000000, toUInt8(1) >= 1152921504606846976.000000000, 1152921504606846976.000000000 = toUInt8(1), 1152921504606846976.000000000 != toUInt8(1), 1152921504606846976.000000000 < toUInt8(1), 1152921504606846976.000000000 <= toUInt8(1), 1152921504606846976.000000000 > toUInt8(1), 1152921504606846976.000000000 >= toUInt8(1) , toInt8(1) = 1152921504606846976.000000000, toInt8(1) != 1152921504606846976.000000000, toInt8(1) < 1152921504606846976.000000000, toInt8(1) <= 1152921504606846976.000000000, toInt8(1) > 1152921504606846976.000000000, toInt8(1) >= 1152921504606846976.000000000, 1152921504606846976.000000000 = toInt8(1), 1152921504606846976.000000000 != toInt8(1), 1152921504606846976.000000000 < toInt8(1), 1152921504606846976.000000000 <= toInt8(1), 1152921504606846976.000000000 > toInt8(1), 1152921504606846976.000000000 >= toInt8(1) , toUInt16(1) = 1152921504606846976.000000000, toUInt16(1) != 1152921504606846976.000000000, toUInt16(1) < 1152921504606846976.000000000, toUInt16(1) <= 1152921504606846976.000000000, toUInt16(1) > 1152921504606846976.000000000, toUInt16(1) >= 1152921504606846976.000000000, 1152921504606846976.000000000 = toUInt16(1), 1152921504606846976.000000000 != toUInt16(1), 1152921504606846976.000000000 < toUInt16(1), 1152921504606846976.000000000 <= toUInt16(1), 1152921504606846976.000000000 > toUInt16(1), 1152921504606846976.000000000 >= toUInt16(1) , toInt16(1) = 1152921504606846976.000000000, toInt16(1) != 1152921504606846976.000000000, toInt16(1) < 1152921504606846976.000000000, toInt16(1) <= 1152921504606846976.000000000, toInt16(1) > 1152921504606846976.000000000, toInt16(1) >= 1152921504606846976.000000000, 1152921504606846976.000000000 = toInt16(1), 1152921504606846976.000000000 != toInt16(1), 1152921504606846976.000000000 < toInt16(1), 1152921504606846976.000000000 <= toInt16(1), 1152921504606846976.000000000 > toInt16(1), 1152921504606846976.000000000 >= toInt16(1) , toUInt32(1) = 1152921504606846976.000000000, toUInt32(1) != 1152921504606846976.000000000, toUInt32(1) < 1152921504606846976.000000000, toUInt32(1) <= 1152921504606846976.000000000, toUInt32(1) > 1152921504606846976.000000000, toUInt32(1) >= 1152921504606846976.000000000, 1152921504606846976.000000000 = toUInt32(1), 1152921504606846976.000000000 != toUInt32(1), 1152921504606846976.000000000 < toUInt32(1), 1152921504606846976.000000000 <= toUInt32(1), 1152921504606846976.000000000 > toUInt32(1), 1152921504606846976.000000000 >= toUInt32(1) , toInt32(1) = 1152921504606846976.000000000, toInt32(1) != 1152921504606846976.000000000, toInt32(1) < 1152921504606846976.000000000, toInt32(1) <= 1152921504606846976.000000000, toInt32(1) > 1152921504606846976.000000000, toInt32(1) >= 1152921504606846976.000000000, 1152921504606846976.000000000 = toInt32(1), 1152921504606846976.000000000 != toInt32(1), 1152921504606846976.000000000 < toInt32(1), 1152921504606846976.000000000 <= toInt32(1), 1152921504606846976.000000000 > toInt32(1), 1152921504606846976.000000000 >= toInt32(1) , toUInt64(1) = 1152921504606846976.000000000, toUInt64(1) != 1152921504606846976.000000000, toUInt64(1) < 1152921504606846976.000000000, toUInt64(1) <= 1152921504606846976.000000000, toUInt64(1) > 1152921504606846976.000000000, toUInt64(1) >= 1152921504606846976.000000000, 1152921504606846976.000000000 = toUInt64(1), 1152921504606846976.000000000 != toUInt64(1), 1152921504606846976.000000000 < toUInt64(1), 1152921504606846976.000000000 <= toUInt64(1), 1152921504606846976.000000000 > toUInt64(1), 1152921504606846976.000000000 >= toUInt64(1) , toInt64(1) = 1152921504606846976.000000000, toInt64(1) != 1152921504606846976.000000000, toInt64(1) < 1152921504606846976.000000000, toInt64(1) <= 1152921504606846976.000000000, toInt64(1) > 1152921504606846976.000000000, toInt64(1) >= 1152921504606846976.000000000, 1152921504606846976.000000000 = toInt64(1), 1152921504606846976.000000000 != toInt64(1), 1152921504606846976.000000000 < toInt64(1), 1152921504606846976.000000000 <= toInt64(1), 1152921504606846976.000000000 > toInt64(1), 1152921504606846976.000000000 >= toInt64(1) ; +SELECT '1', '-1152921504606846976.000000000', 1 = -1152921504606846976.000000000, 1 != -1152921504606846976.000000000, 1 < -1152921504606846976.000000000, 1 <= -1152921504606846976.000000000, 1 > -1152921504606846976.000000000, 1 >= -1152921504606846976.000000000, -1152921504606846976.000000000 = 1, -1152921504606846976.000000000 != 1, -1152921504606846976.000000000 < 1, -1152921504606846976.000000000 <= 1, -1152921504606846976.000000000 > 1, -1152921504606846976.000000000 >= 1 , toUInt8(1) = -1152921504606846976.000000000, toUInt8(1) != -1152921504606846976.000000000, toUInt8(1) < -1152921504606846976.000000000, toUInt8(1) <= -1152921504606846976.000000000, toUInt8(1) > -1152921504606846976.000000000, toUInt8(1) >= -1152921504606846976.000000000, -1152921504606846976.000000000 = toUInt8(1), -1152921504606846976.000000000 != toUInt8(1), -1152921504606846976.000000000 < toUInt8(1), -1152921504606846976.000000000 <= toUInt8(1), -1152921504606846976.000000000 > toUInt8(1), -1152921504606846976.000000000 >= toUInt8(1) , toInt8(1) = -1152921504606846976.000000000, toInt8(1) != -1152921504606846976.000000000, toInt8(1) < -1152921504606846976.000000000, toInt8(1) <= -1152921504606846976.000000000, toInt8(1) > -1152921504606846976.000000000, toInt8(1) >= -1152921504606846976.000000000, -1152921504606846976.000000000 = toInt8(1), -1152921504606846976.000000000 != toInt8(1), -1152921504606846976.000000000 < toInt8(1), -1152921504606846976.000000000 <= toInt8(1), -1152921504606846976.000000000 > toInt8(1), -1152921504606846976.000000000 >= toInt8(1) , toUInt16(1) = -1152921504606846976.000000000, toUInt16(1) != -1152921504606846976.000000000, toUInt16(1) < -1152921504606846976.000000000, toUInt16(1) <= -1152921504606846976.000000000, toUInt16(1) > -1152921504606846976.000000000, toUInt16(1) >= -1152921504606846976.000000000, -1152921504606846976.000000000 = toUInt16(1), -1152921504606846976.000000000 != toUInt16(1), -1152921504606846976.000000000 < toUInt16(1), -1152921504606846976.000000000 <= toUInt16(1), -1152921504606846976.000000000 > toUInt16(1), -1152921504606846976.000000000 >= toUInt16(1) , toInt16(1) = -1152921504606846976.000000000, toInt16(1) != -1152921504606846976.000000000, toInt16(1) < -1152921504606846976.000000000, toInt16(1) <= -1152921504606846976.000000000, toInt16(1) > -1152921504606846976.000000000, toInt16(1) >= -1152921504606846976.000000000, -1152921504606846976.000000000 = toInt16(1), -1152921504606846976.000000000 != toInt16(1), -1152921504606846976.000000000 < toInt16(1), -1152921504606846976.000000000 <= toInt16(1), -1152921504606846976.000000000 > toInt16(1), -1152921504606846976.000000000 >= toInt16(1) , toUInt32(1) = -1152921504606846976.000000000, toUInt32(1) != -1152921504606846976.000000000, toUInt32(1) < -1152921504606846976.000000000, toUInt32(1) <= -1152921504606846976.000000000, toUInt32(1) > -1152921504606846976.000000000, toUInt32(1) >= -1152921504606846976.000000000, -1152921504606846976.000000000 = toUInt32(1), -1152921504606846976.000000000 != toUInt32(1), -1152921504606846976.000000000 < toUInt32(1), -1152921504606846976.000000000 <= toUInt32(1), -1152921504606846976.000000000 > toUInt32(1), -1152921504606846976.000000000 >= toUInt32(1) , toInt32(1) = -1152921504606846976.000000000, toInt32(1) != -1152921504606846976.000000000, toInt32(1) < -1152921504606846976.000000000, toInt32(1) <= -1152921504606846976.000000000, toInt32(1) > -1152921504606846976.000000000, toInt32(1) >= -1152921504606846976.000000000, -1152921504606846976.000000000 = toInt32(1), -1152921504606846976.000000000 != toInt32(1), -1152921504606846976.000000000 < toInt32(1), -1152921504606846976.000000000 <= toInt32(1), -1152921504606846976.000000000 > toInt32(1), -1152921504606846976.000000000 >= toInt32(1) , toUInt64(1) = -1152921504606846976.000000000, toUInt64(1) != -1152921504606846976.000000000, toUInt64(1) < -1152921504606846976.000000000, toUInt64(1) <= -1152921504606846976.000000000, toUInt64(1) > -1152921504606846976.000000000, toUInt64(1) >= -1152921504606846976.000000000, -1152921504606846976.000000000 = toUInt64(1), -1152921504606846976.000000000 != toUInt64(1), -1152921504606846976.000000000 < toUInt64(1), -1152921504606846976.000000000 <= toUInt64(1), -1152921504606846976.000000000 > toUInt64(1), -1152921504606846976.000000000 >= toUInt64(1) , toInt64(1) = -1152921504606846976.000000000, toInt64(1) != -1152921504606846976.000000000, toInt64(1) < -1152921504606846976.000000000, toInt64(1) <= -1152921504606846976.000000000, toInt64(1) > -1152921504606846976.000000000, toInt64(1) >= -1152921504606846976.000000000, -1152921504606846976.000000000 = toInt64(1), -1152921504606846976.000000000 != toInt64(1), -1152921504606846976.000000000 < toInt64(1), -1152921504606846976.000000000 <= toInt64(1), -1152921504606846976.000000000 > toInt64(1), -1152921504606846976.000000000 >= toInt64(1) ; +SELECT '1', '-9223372036854786048.000000000', 1 = -9223372036854786048.000000000, 1 != -9223372036854786048.000000000, 1 < -9223372036854786048.000000000, 1 <= -9223372036854786048.000000000, 1 > -9223372036854786048.000000000, 1 >= -9223372036854786048.000000000, -9223372036854786048.000000000 = 1, -9223372036854786048.000000000 != 1, -9223372036854786048.000000000 < 1, -9223372036854786048.000000000 <= 1, -9223372036854786048.000000000 > 1, -9223372036854786048.000000000 >= 1 , toUInt8(1) = -9223372036854786048.000000000, toUInt8(1) != -9223372036854786048.000000000, toUInt8(1) < -9223372036854786048.000000000, toUInt8(1) <= -9223372036854786048.000000000, toUInt8(1) > -9223372036854786048.000000000, toUInt8(1) >= -9223372036854786048.000000000, -9223372036854786048.000000000 = toUInt8(1), -9223372036854786048.000000000 != toUInt8(1), -9223372036854786048.000000000 < toUInt8(1), -9223372036854786048.000000000 <= toUInt8(1), -9223372036854786048.000000000 > toUInt8(1), -9223372036854786048.000000000 >= toUInt8(1) , toInt8(1) = -9223372036854786048.000000000, toInt8(1) != -9223372036854786048.000000000, toInt8(1) < -9223372036854786048.000000000, toInt8(1) <= -9223372036854786048.000000000, toInt8(1) > -9223372036854786048.000000000, toInt8(1) >= -9223372036854786048.000000000, -9223372036854786048.000000000 = toInt8(1), -9223372036854786048.000000000 != toInt8(1), -9223372036854786048.000000000 < toInt8(1), -9223372036854786048.000000000 <= toInt8(1), -9223372036854786048.000000000 > toInt8(1), -9223372036854786048.000000000 >= toInt8(1) , toUInt16(1) = -9223372036854786048.000000000, toUInt16(1) != -9223372036854786048.000000000, toUInt16(1) < -9223372036854786048.000000000, toUInt16(1) <= -9223372036854786048.000000000, toUInt16(1) > -9223372036854786048.000000000, toUInt16(1) >= -9223372036854786048.000000000, -9223372036854786048.000000000 = toUInt16(1), -9223372036854786048.000000000 != toUInt16(1), -9223372036854786048.000000000 < toUInt16(1), -9223372036854786048.000000000 <= toUInt16(1), -9223372036854786048.000000000 > toUInt16(1), -9223372036854786048.000000000 >= toUInt16(1) , toInt16(1) = -9223372036854786048.000000000, toInt16(1) != -9223372036854786048.000000000, toInt16(1) < -9223372036854786048.000000000, toInt16(1) <= -9223372036854786048.000000000, toInt16(1) > -9223372036854786048.000000000, toInt16(1) >= -9223372036854786048.000000000, -9223372036854786048.000000000 = toInt16(1), -9223372036854786048.000000000 != toInt16(1), -9223372036854786048.000000000 < toInt16(1), -9223372036854786048.000000000 <= toInt16(1), -9223372036854786048.000000000 > toInt16(1), -9223372036854786048.000000000 >= toInt16(1) , toUInt32(1) = -9223372036854786048.000000000, toUInt32(1) != -9223372036854786048.000000000, toUInt32(1) < -9223372036854786048.000000000, toUInt32(1) <= -9223372036854786048.000000000, toUInt32(1) > -9223372036854786048.000000000, toUInt32(1) >= -9223372036854786048.000000000, -9223372036854786048.000000000 = toUInt32(1), -9223372036854786048.000000000 != toUInt32(1), -9223372036854786048.000000000 < toUInt32(1), -9223372036854786048.000000000 <= toUInt32(1), -9223372036854786048.000000000 > toUInt32(1), -9223372036854786048.000000000 >= toUInt32(1) , toInt32(1) = -9223372036854786048.000000000, toInt32(1) != -9223372036854786048.000000000, toInt32(1) < -9223372036854786048.000000000, toInt32(1) <= -9223372036854786048.000000000, toInt32(1) > -9223372036854786048.000000000, toInt32(1) >= -9223372036854786048.000000000, -9223372036854786048.000000000 = toInt32(1), -9223372036854786048.000000000 != toInt32(1), -9223372036854786048.000000000 < toInt32(1), -9223372036854786048.000000000 <= toInt32(1), -9223372036854786048.000000000 > toInt32(1), -9223372036854786048.000000000 >= toInt32(1) , toUInt64(1) = -9223372036854786048.000000000, toUInt64(1) != -9223372036854786048.000000000, toUInt64(1) < -9223372036854786048.000000000, toUInt64(1) <= -9223372036854786048.000000000, toUInt64(1) > -9223372036854786048.000000000, toUInt64(1) >= -9223372036854786048.000000000, -9223372036854786048.000000000 = toUInt64(1), -9223372036854786048.000000000 != toUInt64(1), -9223372036854786048.000000000 < toUInt64(1), -9223372036854786048.000000000 <= toUInt64(1), -9223372036854786048.000000000 > toUInt64(1), -9223372036854786048.000000000 >= toUInt64(1) , toInt64(1) = -9223372036854786048.000000000, toInt64(1) != -9223372036854786048.000000000, toInt64(1) < -9223372036854786048.000000000, toInt64(1) <= -9223372036854786048.000000000, toInt64(1) > -9223372036854786048.000000000, toInt64(1) >= -9223372036854786048.000000000, -9223372036854786048.000000000 = toInt64(1), -9223372036854786048.000000000 != toInt64(1), -9223372036854786048.000000000 < toInt64(1), -9223372036854786048.000000000 <= toInt64(1), -9223372036854786048.000000000 > toInt64(1), -9223372036854786048.000000000 >= toInt64(1) ; +SELECT '1', '9223372036854786048.000000000', 1 = 9223372036854786048.000000000, 1 != 9223372036854786048.000000000, 1 < 9223372036854786048.000000000, 1 <= 9223372036854786048.000000000, 1 > 9223372036854786048.000000000, 1 >= 9223372036854786048.000000000, 9223372036854786048.000000000 = 1, 9223372036854786048.000000000 != 1, 9223372036854786048.000000000 < 1, 9223372036854786048.000000000 <= 1, 9223372036854786048.000000000 > 1, 9223372036854786048.000000000 >= 1 , toUInt8(1) = 9223372036854786048.000000000, toUInt8(1) != 9223372036854786048.000000000, toUInt8(1) < 9223372036854786048.000000000, toUInt8(1) <= 9223372036854786048.000000000, toUInt8(1) > 9223372036854786048.000000000, toUInt8(1) >= 9223372036854786048.000000000, 9223372036854786048.000000000 = toUInt8(1), 9223372036854786048.000000000 != toUInt8(1), 9223372036854786048.000000000 < toUInt8(1), 9223372036854786048.000000000 <= toUInt8(1), 9223372036854786048.000000000 > toUInt8(1), 9223372036854786048.000000000 >= toUInt8(1) , toInt8(1) = 9223372036854786048.000000000, toInt8(1) != 9223372036854786048.000000000, toInt8(1) < 9223372036854786048.000000000, toInt8(1) <= 9223372036854786048.000000000, toInt8(1) > 9223372036854786048.000000000, toInt8(1) >= 9223372036854786048.000000000, 9223372036854786048.000000000 = toInt8(1), 9223372036854786048.000000000 != toInt8(1), 9223372036854786048.000000000 < toInt8(1), 9223372036854786048.000000000 <= toInt8(1), 9223372036854786048.000000000 > toInt8(1), 9223372036854786048.000000000 >= toInt8(1) , toUInt16(1) = 9223372036854786048.000000000, toUInt16(1) != 9223372036854786048.000000000, toUInt16(1) < 9223372036854786048.000000000, toUInt16(1) <= 9223372036854786048.000000000, toUInt16(1) > 9223372036854786048.000000000, toUInt16(1) >= 9223372036854786048.000000000, 9223372036854786048.000000000 = toUInt16(1), 9223372036854786048.000000000 != toUInt16(1), 9223372036854786048.000000000 < toUInt16(1), 9223372036854786048.000000000 <= toUInt16(1), 9223372036854786048.000000000 > toUInt16(1), 9223372036854786048.000000000 >= toUInt16(1) , toInt16(1) = 9223372036854786048.000000000, toInt16(1) != 9223372036854786048.000000000, toInt16(1) < 9223372036854786048.000000000, toInt16(1) <= 9223372036854786048.000000000, toInt16(1) > 9223372036854786048.000000000, toInt16(1) >= 9223372036854786048.000000000, 9223372036854786048.000000000 = toInt16(1), 9223372036854786048.000000000 != toInt16(1), 9223372036854786048.000000000 < toInt16(1), 9223372036854786048.000000000 <= toInt16(1), 9223372036854786048.000000000 > toInt16(1), 9223372036854786048.000000000 >= toInt16(1) , toUInt32(1) = 9223372036854786048.000000000, toUInt32(1) != 9223372036854786048.000000000, toUInt32(1) < 9223372036854786048.000000000, toUInt32(1) <= 9223372036854786048.000000000, toUInt32(1) > 9223372036854786048.000000000, toUInt32(1) >= 9223372036854786048.000000000, 9223372036854786048.000000000 = toUInt32(1), 9223372036854786048.000000000 != toUInt32(1), 9223372036854786048.000000000 < toUInt32(1), 9223372036854786048.000000000 <= toUInt32(1), 9223372036854786048.000000000 > toUInt32(1), 9223372036854786048.000000000 >= toUInt32(1) , toInt32(1) = 9223372036854786048.000000000, toInt32(1) != 9223372036854786048.000000000, toInt32(1) < 9223372036854786048.000000000, toInt32(1) <= 9223372036854786048.000000000, toInt32(1) > 9223372036854786048.000000000, toInt32(1) >= 9223372036854786048.000000000, 9223372036854786048.000000000 = toInt32(1), 9223372036854786048.000000000 != toInt32(1), 9223372036854786048.000000000 < toInt32(1), 9223372036854786048.000000000 <= toInt32(1), 9223372036854786048.000000000 > toInt32(1), 9223372036854786048.000000000 >= toInt32(1) , toUInt64(1) = 9223372036854786048.000000000, toUInt64(1) != 9223372036854786048.000000000, toUInt64(1) < 9223372036854786048.000000000, toUInt64(1) <= 9223372036854786048.000000000, toUInt64(1) > 9223372036854786048.000000000, toUInt64(1) >= 9223372036854786048.000000000, 9223372036854786048.000000000 = toUInt64(1), 9223372036854786048.000000000 != toUInt64(1), 9223372036854786048.000000000 < toUInt64(1), 9223372036854786048.000000000 <= toUInt64(1), 9223372036854786048.000000000 > toUInt64(1), 9223372036854786048.000000000 >= toUInt64(1) , toInt64(1) = 9223372036854786048.000000000, toInt64(1) != 9223372036854786048.000000000, toInt64(1) < 9223372036854786048.000000000, toInt64(1) <= 9223372036854786048.000000000, toInt64(1) > 9223372036854786048.000000000, toInt64(1) >= 9223372036854786048.000000000, 9223372036854786048.000000000 = toInt64(1), 9223372036854786048.000000000 != toInt64(1), 9223372036854786048.000000000 < toInt64(1), 9223372036854786048.000000000 <= toInt64(1), 9223372036854786048.000000000 > toInt64(1), 9223372036854786048.000000000 >= toInt64(1) ; +SELECT '18446744073709551615', '0.000000000', 18446744073709551615 = 0.000000000, 18446744073709551615 != 0.000000000, 18446744073709551615 < 0.000000000, 18446744073709551615 <= 0.000000000, 18446744073709551615 > 0.000000000, 18446744073709551615 >= 0.000000000, 0.000000000 = 18446744073709551615, 0.000000000 != 18446744073709551615, 0.000000000 < 18446744073709551615, 0.000000000 <= 18446744073709551615, 0.000000000 > 18446744073709551615, 0.000000000 >= 18446744073709551615 , toUInt64(18446744073709551615) = 0.000000000, toUInt64(18446744073709551615) != 0.000000000, toUInt64(18446744073709551615) < 0.000000000, toUInt64(18446744073709551615) <= 0.000000000, toUInt64(18446744073709551615) > 0.000000000, toUInt64(18446744073709551615) >= 0.000000000, 0.000000000 = toUInt64(18446744073709551615), 0.000000000 != toUInt64(18446744073709551615), 0.000000000 < toUInt64(18446744073709551615), 0.000000000 <= toUInt64(18446744073709551615), 0.000000000 > toUInt64(18446744073709551615), 0.000000000 >= toUInt64(18446744073709551615) ; +SELECT '18446744073709551615', '-1.000000000', 18446744073709551615 = -1.000000000, 18446744073709551615 != -1.000000000, 18446744073709551615 < -1.000000000, 18446744073709551615 <= -1.000000000, 18446744073709551615 > -1.000000000, 18446744073709551615 >= -1.000000000, -1.000000000 = 18446744073709551615, -1.000000000 != 18446744073709551615, -1.000000000 < 18446744073709551615, -1.000000000 <= 18446744073709551615, -1.000000000 > 18446744073709551615, -1.000000000 >= 18446744073709551615 , toUInt64(18446744073709551615) = -1.000000000, toUInt64(18446744073709551615) != -1.000000000, toUInt64(18446744073709551615) < -1.000000000, toUInt64(18446744073709551615) <= -1.000000000, toUInt64(18446744073709551615) > -1.000000000, toUInt64(18446744073709551615) >= -1.000000000, -1.000000000 = toUInt64(18446744073709551615), -1.000000000 != toUInt64(18446744073709551615), -1.000000000 < toUInt64(18446744073709551615), -1.000000000 <= toUInt64(18446744073709551615), -1.000000000 > toUInt64(18446744073709551615), -1.000000000 >= toUInt64(18446744073709551615) ; +SELECT '18446744073709551615', '1.000000000', 18446744073709551615 = 1.000000000, 18446744073709551615 != 1.000000000, 18446744073709551615 < 1.000000000, 18446744073709551615 <= 1.000000000, 18446744073709551615 > 1.000000000, 18446744073709551615 >= 1.000000000, 1.000000000 = 18446744073709551615, 1.000000000 != 18446744073709551615, 1.000000000 < 18446744073709551615, 1.000000000 <= 18446744073709551615, 1.000000000 > 18446744073709551615, 1.000000000 >= 18446744073709551615 , toUInt64(18446744073709551615) = 1.000000000, toUInt64(18446744073709551615) != 1.000000000, toUInt64(18446744073709551615) < 1.000000000, toUInt64(18446744073709551615) <= 1.000000000, toUInt64(18446744073709551615) > 1.000000000, toUInt64(18446744073709551615) >= 1.000000000, 1.000000000 = toUInt64(18446744073709551615), 1.000000000 != toUInt64(18446744073709551615), 1.000000000 < toUInt64(18446744073709551615), 1.000000000 <= toUInt64(18446744073709551615), 1.000000000 > toUInt64(18446744073709551615), 1.000000000 >= toUInt64(18446744073709551615) ; +SELECT '18446744073709551615', '18446744073709551616.000000000', 18446744073709551615 = 18446744073709551616.000000000, 18446744073709551615 != 18446744073709551616.000000000, 18446744073709551615 < 18446744073709551616.000000000, 18446744073709551615 <= 18446744073709551616.000000000, 18446744073709551615 > 18446744073709551616.000000000, 18446744073709551615 >= 18446744073709551616.000000000, 18446744073709551616.000000000 = 18446744073709551615, 18446744073709551616.000000000 != 18446744073709551615, 18446744073709551616.000000000 < 18446744073709551615, 18446744073709551616.000000000 <= 18446744073709551615, 18446744073709551616.000000000 > 18446744073709551615, 18446744073709551616.000000000 >= 18446744073709551615 , toUInt64(18446744073709551615) = 18446744073709551616.000000000, toUInt64(18446744073709551615) != 18446744073709551616.000000000, toUInt64(18446744073709551615) < 18446744073709551616.000000000, toUInt64(18446744073709551615) <= 18446744073709551616.000000000, toUInt64(18446744073709551615) > 18446744073709551616.000000000, toUInt64(18446744073709551615) >= 18446744073709551616.000000000, 18446744073709551616.000000000 = toUInt64(18446744073709551615), 18446744073709551616.000000000 != toUInt64(18446744073709551615), 18446744073709551616.000000000 < toUInt64(18446744073709551615), 18446744073709551616.000000000 <= toUInt64(18446744073709551615), 18446744073709551616.000000000 > toUInt64(18446744073709551615), 18446744073709551616.000000000 >= toUInt64(18446744073709551615) ; +SELECT '18446744073709551615', '9223372036854775808.000000000', 18446744073709551615 = 9223372036854775808.000000000, 18446744073709551615 != 9223372036854775808.000000000, 18446744073709551615 < 9223372036854775808.000000000, 18446744073709551615 <= 9223372036854775808.000000000, 18446744073709551615 > 9223372036854775808.000000000, 18446744073709551615 >= 9223372036854775808.000000000, 9223372036854775808.000000000 = 18446744073709551615, 9223372036854775808.000000000 != 18446744073709551615, 9223372036854775808.000000000 < 18446744073709551615, 9223372036854775808.000000000 <= 18446744073709551615, 9223372036854775808.000000000 > 18446744073709551615, 9223372036854775808.000000000 >= 18446744073709551615 , toUInt64(18446744073709551615) = 9223372036854775808.000000000, toUInt64(18446744073709551615) != 9223372036854775808.000000000, toUInt64(18446744073709551615) < 9223372036854775808.000000000, toUInt64(18446744073709551615) <= 9223372036854775808.000000000, toUInt64(18446744073709551615) > 9223372036854775808.000000000, toUInt64(18446744073709551615) >= 9223372036854775808.000000000, 9223372036854775808.000000000 = toUInt64(18446744073709551615), 9223372036854775808.000000000 != toUInt64(18446744073709551615), 9223372036854775808.000000000 < toUInt64(18446744073709551615), 9223372036854775808.000000000 <= toUInt64(18446744073709551615), 9223372036854775808.000000000 > toUInt64(18446744073709551615), 9223372036854775808.000000000 >= toUInt64(18446744073709551615) ; +SELECT '18446744073709551615', '-9223372036854775808.000000000', 18446744073709551615 = -9223372036854775808.000000000, 18446744073709551615 != -9223372036854775808.000000000, 18446744073709551615 < -9223372036854775808.000000000, 18446744073709551615 <= -9223372036854775808.000000000, 18446744073709551615 > -9223372036854775808.000000000, 18446744073709551615 >= -9223372036854775808.000000000, -9223372036854775808.000000000 = 18446744073709551615, -9223372036854775808.000000000 != 18446744073709551615, -9223372036854775808.000000000 < 18446744073709551615, -9223372036854775808.000000000 <= 18446744073709551615, -9223372036854775808.000000000 > 18446744073709551615, -9223372036854775808.000000000 >= 18446744073709551615 , toUInt64(18446744073709551615) = -9223372036854775808.000000000, toUInt64(18446744073709551615) != -9223372036854775808.000000000, toUInt64(18446744073709551615) < -9223372036854775808.000000000, toUInt64(18446744073709551615) <= -9223372036854775808.000000000, toUInt64(18446744073709551615) > -9223372036854775808.000000000, toUInt64(18446744073709551615) >= -9223372036854775808.000000000, -9223372036854775808.000000000 = toUInt64(18446744073709551615), -9223372036854775808.000000000 != toUInt64(18446744073709551615), -9223372036854775808.000000000 < toUInt64(18446744073709551615), -9223372036854775808.000000000 <= toUInt64(18446744073709551615), -9223372036854775808.000000000 > toUInt64(18446744073709551615), -9223372036854775808.000000000 >= toUInt64(18446744073709551615) ; +SELECT '18446744073709551615', '9223372036854775808.000000000', 18446744073709551615 = 9223372036854775808.000000000, 18446744073709551615 != 9223372036854775808.000000000, 18446744073709551615 < 9223372036854775808.000000000, 18446744073709551615 <= 9223372036854775808.000000000, 18446744073709551615 > 9223372036854775808.000000000, 18446744073709551615 >= 9223372036854775808.000000000, 9223372036854775808.000000000 = 18446744073709551615, 9223372036854775808.000000000 != 18446744073709551615, 9223372036854775808.000000000 < 18446744073709551615, 9223372036854775808.000000000 <= 18446744073709551615, 9223372036854775808.000000000 > 18446744073709551615, 9223372036854775808.000000000 >= 18446744073709551615 , toUInt64(18446744073709551615) = 9223372036854775808.000000000, toUInt64(18446744073709551615) != 9223372036854775808.000000000, toUInt64(18446744073709551615) < 9223372036854775808.000000000, toUInt64(18446744073709551615) <= 9223372036854775808.000000000, toUInt64(18446744073709551615) > 9223372036854775808.000000000, toUInt64(18446744073709551615) >= 9223372036854775808.000000000, 9223372036854775808.000000000 = toUInt64(18446744073709551615), 9223372036854775808.000000000 != toUInt64(18446744073709551615), 9223372036854775808.000000000 < toUInt64(18446744073709551615), 9223372036854775808.000000000 <= toUInt64(18446744073709551615), 9223372036854775808.000000000 > toUInt64(18446744073709551615), 9223372036854775808.000000000 >= toUInt64(18446744073709551615) ; +SELECT '18446744073709551615', '2251799813685248.000000000', 18446744073709551615 = 2251799813685248.000000000, 18446744073709551615 != 2251799813685248.000000000, 18446744073709551615 < 2251799813685248.000000000, 18446744073709551615 <= 2251799813685248.000000000, 18446744073709551615 > 2251799813685248.000000000, 18446744073709551615 >= 2251799813685248.000000000, 2251799813685248.000000000 = 18446744073709551615, 2251799813685248.000000000 != 18446744073709551615, 2251799813685248.000000000 < 18446744073709551615, 2251799813685248.000000000 <= 18446744073709551615, 2251799813685248.000000000 > 18446744073709551615, 2251799813685248.000000000 >= 18446744073709551615 , toUInt64(18446744073709551615) = 2251799813685248.000000000, toUInt64(18446744073709551615) != 2251799813685248.000000000, toUInt64(18446744073709551615) < 2251799813685248.000000000, toUInt64(18446744073709551615) <= 2251799813685248.000000000, toUInt64(18446744073709551615) > 2251799813685248.000000000, toUInt64(18446744073709551615) >= 2251799813685248.000000000, 2251799813685248.000000000 = toUInt64(18446744073709551615), 2251799813685248.000000000 != toUInt64(18446744073709551615), 2251799813685248.000000000 < toUInt64(18446744073709551615), 2251799813685248.000000000 <= toUInt64(18446744073709551615), 2251799813685248.000000000 > toUInt64(18446744073709551615), 2251799813685248.000000000 >= toUInt64(18446744073709551615) ; +SELECT '18446744073709551615', '4503599627370496.000000000', 18446744073709551615 = 4503599627370496.000000000, 18446744073709551615 != 4503599627370496.000000000, 18446744073709551615 < 4503599627370496.000000000, 18446744073709551615 <= 4503599627370496.000000000, 18446744073709551615 > 4503599627370496.000000000, 18446744073709551615 >= 4503599627370496.000000000, 4503599627370496.000000000 = 18446744073709551615, 4503599627370496.000000000 != 18446744073709551615, 4503599627370496.000000000 < 18446744073709551615, 4503599627370496.000000000 <= 18446744073709551615, 4503599627370496.000000000 > 18446744073709551615, 4503599627370496.000000000 >= 18446744073709551615 , toUInt64(18446744073709551615) = 4503599627370496.000000000, toUInt64(18446744073709551615) != 4503599627370496.000000000, toUInt64(18446744073709551615) < 4503599627370496.000000000, toUInt64(18446744073709551615) <= 4503599627370496.000000000, toUInt64(18446744073709551615) > 4503599627370496.000000000, toUInt64(18446744073709551615) >= 4503599627370496.000000000, 4503599627370496.000000000 = toUInt64(18446744073709551615), 4503599627370496.000000000 != toUInt64(18446744073709551615), 4503599627370496.000000000 < toUInt64(18446744073709551615), 4503599627370496.000000000 <= toUInt64(18446744073709551615), 4503599627370496.000000000 > toUInt64(18446744073709551615), 4503599627370496.000000000 >= toUInt64(18446744073709551615) ; +SELECT '18446744073709551615', '9007199254740991.000000000', 18446744073709551615 = 9007199254740991.000000000, 18446744073709551615 != 9007199254740991.000000000, 18446744073709551615 < 9007199254740991.000000000, 18446744073709551615 <= 9007199254740991.000000000, 18446744073709551615 > 9007199254740991.000000000, 18446744073709551615 >= 9007199254740991.000000000, 9007199254740991.000000000 = 18446744073709551615, 9007199254740991.000000000 != 18446744073709551615, 9007199254740991.000000000 < 18446744073709551615, 9007199254740991.000000000 <= 18446744073709551615, 9007199254740991.000000000 > 18446744073709551615, 9007199254740991.000000000 >= 18446744073709551615 , toUInt64(18446744073709551615) = 9007199254740991.000000000, toUInt64(18446744073709551615) != 9007199254740991.000000000, toUInt64(18446744073709551615) < 9007199254740991.000000000, toUInt64(18446744073709551615) <= 9007199254740991.000000000, toUInt64(18446744073709551615) > 9007199254740991.000000000, toUInt64(18446744073709551615) >= 9007199254740991.000000000, 9007199254740991.000000000 = toUInt64(18446744073709551615), 9007199254740991.000000000 != toUInt64(18446744073709551615), 9007199254740991.000000000 < toUInt64(18446744073709551615), 9007199254740991.000000000 <= toUInt64(18446744073709551615), 9007199254740991.000000000 > toUInt64(18446744073709551615), 9007199254740991.000000000 >= toUInt64(18446744073709551615) ; +SELECT '18446744073709551615', '9007199254740992.000000000', 18446744073709551615 = 9007199254740992.000000000, 18446744073709551615 != 9007199254740992.000000000, 18446744073709551615 < 9007199254740992.000000000, 18446744073709551615 <= 9007199254740992.000000000, 18446744073709551615 > 9007199254740992.000000000, 18446744073709551615 >= 9007199254740992.000000000, 9007199254740992.000000000 = 18446744073709551615, 9007199254740992.000000000 != 18446744073709551615, 9007199254740992.000000000 < 18446744073709551615, 9007199254740992.000000000 <= 18446744073709551615, 9007199254740992.000000000 > 18446744073709551615, 9007199254740992.000000000 >= 18446744073709551615 , toUInt64(18446744073709551615) = 9007199254740992.000000000, toUInt64(18446744073709551615) != 9007199254740992.000000000, toUInt64(18446744073709551615) < 9007199254740992.000000000, toUInt64(18446744073709551615) <= 9007199254740992.000000000, toUInt64(18446744073709551615) > 9007199254740992.000000000, toUInt64(18446744073709551615) >= 9007199254740992.000000000, 9007199254740992.000000000 = toUInt64(18446744073709551615), 9007199254740992.000000000 != toUInt64(18446744073709551615), 9007199254740992.000000000 < toUInt64(18446744073709551615), 9007199254740992.000000000 <= toUInt64(18446744073709551615), 9007199254740992.000000000 > toUInt64(18446744073709551615), 9007199254740992.000000000 >= toUInt64(18446744073709551615) ; +SELECT '18446744073709551615', '9007199254740992.000000000', 18446744073709551615 = 9007199254740992.000000000, 18446744073709551615 != 9007199254740992.000000000, 18446744073709551615 < 9007199254740992.000000000, 18446744073709551615 <= 9007199254740992.000000000, 18446744073709551615 > 9007199254740992.000000000, 18446744073709551615 >= 9007199254740992.000000000, 9007199254740992.000000000 = 18446744073709551615, 9007199254740992.000000000 != 18446744073709551615, 9007199254740992.000000000 < 18446744073709551615, 9007199254740992.000000000 <= 18446744073709551615, 9007199254740992.000000000 > 18446744073709551615, 9007199254740992.000000000 >= 18446744073709551615 , toUInt64(18446744073709551615) = 9007199254740992.000000000, toUInt64(18446744073709551615) != 9007199254740992.000000000, toUInt64(18446744073709551615) < 9007199254740992.000000000, toUInt64(18446744073709551615) <= 9007199254740992.000000000, toUInt64(18446744073709551615) > 9007199254740992.000000000, toUInt64(18446744073709551615) >= 9007199254740992.000000000, 9007199254740992.000000000 = toUInt64(18446744073709551615), 9007199254740992.000000000 != toUInt64(18446744073709551615), 9007199254740992.000000000 < toUInt64(18446744073709551615), 9007199254740992.000000000 <= toUInt64(18446744073709551615), 9007199254740992.000000000 > toUInt64(18446744073709551615), 9007199254740992.000000000 >= toUInt64(18446744073709551615) ; +SELECT '18446744073709551615', '9007199254740994.000000000', 18446744073709551615 = 9007199254740994.000000000, 18446744073709551615 != 9007199254740994.000000000, 18446744073709551615 < 9007199254740994.000000000, 18446744073709551615 <= 9007199254740994.000000000, 18446744073709551615 > 9007199254740994.000000000, 18446744073709551615 >= 9007199254740994.000000000, 9007199254740994.000000000 = 18446744073709551615, 9007199254740994.000000000 != 18446744073709551615, 9007199254740994.000000000 < 18446744073709551615, 9007199254740994.000000000 <= 18446744073709551615, 9007199254740994.000000000 > 18446744073709551615, 9007199254740994.000000000 >= 18446744073709551615 , toUInt64(18446744073709551615) = 9007199254740994.000000000, toUInt64(18446744073709551615) != 9007199254740994.000000000, toUInt64(18446744073709551615) < 9007199254740994.000000000, toUInt64(18446744073709551615) <= 9007199254740994.000000000, toUInt64(18446744073709551615) > 9007199254740994.000000000, toUInt64(18446744073709551615) >= 9007199254740994.000000000, 9007199254740994.000000000 = toUInt64(18446744073709551615), 9007199254740994.000000000 != toUInt64(18446744073709551615), 9007199254740994.000000000 < toUInt64(18446744073709551615), 9007199254740994.000000000 <= toUInt64(18446744073709551615), 9007199254740994.000000000 > toUInt64(18446744073709551615), 9007199254740994.000000000 >= toUInt64(18446744073709551615) ; +SELECT '18446744073709551615', '-9007199254740991.000000000', 18446744073709551615 = -9007199254740991.000000000, 18446744073709551615 != -9007199254740991.000000000, 18446744073709551615 < -9007199254740991.000000000, 18446744073709551615 <= -9007199254740991.000000000, 18446744073709551615 > -9007199254740991.000000000, 18446744073709551615 >= -9007199254740991.000000000, -9007199254740991.000000000 = 18446744073709551615, -9007199254740991.000000000 != 18446744073709551615, -9007199254740991.000000000 < 18446744073709551615, -9007199254740991.000000000 <= 18446744073709551615, -9007199254740991.000000000 > 18446744073709551615, -9007199254740991.000000000 >= 18446744073709551615 , toUInt64(18446744073709551615) = -9007199254740991.000000000, toUInt64(18446744073709551615) != -9007199254740991.000000000, toUInt64(18446744073709551615) < -9007199254740991.000000000, toUInt64(18446744073709551615) <= -9007199254740991.000000000, toUInt64(18446744073709551615) > -9007199254740991.000000000, toUInt64(18446744073709551615) >= -9007199254740991.000000000, -9007199254740991.000000000 = toUInt64(18446744073709551615), -9007199254740991.000000000 != toUInt64(18446744073709551615), -9007199254740991.000000000 < toUInt64(18446744073709551615), -9007199254740991.000000000 <= toUInt64(18446744073709551615), -9007199254740991.000000000 > toUInt64(18446744073709551615), -9007199254740991.000000000 >= toUInt64(18446744073709551615) ; +SELECT '18446744073709551615', '-9007199254740992.000000000', 18446744073709551615 = -9007199254740992.000000000, 18446744073709551615 != -9007199254740992.000000000, 18446744073709551615 < -9007199254740992.000000000, 18446744073709551615 <= -9007199254740992.000000000, 18446744073709551615 > -9007199254740992.000000000, 18446744073709551615 >= -9007199254740992.000000000, -9007199254740992.000000000 = 18446744073709551615, -9007199254740992.000000000 != 18446744073709551615, -9007199254740992.000000000 < 18446744073709551615, -9007199254740992.000000000 <= 18446744073709551615, -9007199254740992.000000000 > 18446744073709551615, -9007199254740992.000000000 >= 18446744073709551615 , toUInt64(18446744073709551615) = -9007199254740992.000000000, toUInt64(18446744073709551615) != -9007199254740992.000000000, toUInt64(18446744073709551615) < -9007199254740992.000000000, toUInt64(18446744073709551615) <= -9007199254740992.000000000, toUInt64(18446744073709551615) > -9007199254740992.000000000, toUInt64(18446744073709551615) >= -9007199254740992.000000000, -9007199254740992.000000000 = toUInt64(18446744073709551615), -9007199254740992.000000000 != toUInt64(18446744073709551615), -9007199254740992.000000000 < toUInt64(18446744073709551615), -9007199254740992.000000000 <= toUInt64(18446744073709551615), -9007199254740992.000000000 > toUInt64(18446744073709551615), -9007199254740992.000000000 >= toUInt64(18446744073709551615) ; +SELECT '18446744073709551615', '-9007199254740992.000000000', 18446744073709551615 = -9007199254740992.000000000, 18446744073709551615 != -9007199254740992.000000000, 18446744073709551615 < -9007199254740992.000000000, 18446744073709551615 <= -9007199254740992.000000000, 18446744073709551615 > -9007199254740992.000000000, 18446744073709551615 >= -9007199254740992.000000000, -9007199254740992.000000000 = 18446744073709551615, -9007199254740992.000000000 != 18446744073709551615, -9007199254740992.000000000 < 18446744073709551615, -9007199254740992.000000000 <= 18446744073709551615, -9007199254740992.000000000 > 18446744073709551615, -9007199254740992.000000000 >= 18446744073709551615 , toUInt64(18446744073709551615) = -9007199254740992.000000000, toUInt64(18446744073709551615) != -9007199254740992.000000000, toUInt64(18446744073709551615) < -9007199254740992.000000000, toUInt64(18446744073709551615) <= -9007199254740992.000000000, toUInt64(18446744073709551615) > -9007199254740992.000000000, toUInt64(18446744073709551615) >= -9007199254740992.000000000, -9007199254740992.000000000 = toUInt64(18446744073709551615), -9007199254740992.000000000 != toUInt64(18446744073709551615), -9007199254740992.000000000 < toUInt64(18446744073709551615), -9007199254740992.000000000 <= toUInt64(18446744073709551615), -9007199254740992.000000000 > toUInt64(18446744073709551615), -9007199254740992.000000000 >= toUInt64(18446744073709551615) ; +SELECT '18446744073709551615', '-9007199254740994.000000000', 18446744073709551615 = -9007199254740994.000000000, 18446744073709551615 != -9007199254740994.000000000, 18446744073709551615 < -9007199254740994.000000000, 18446744073709551615 <= -9007199254740994.000000000, 18446744073709551615 > -9007199254740994.000000000, 18446744073709551615 >= -9007199254740994.000000000, -9007199254740994.000000000 = 18446744073709551615, -9007199254740994.000000000 != 18446744073709551615, -9007199254740994.000000000 < 18446744073709551615, -9007199254740994.000000000 <= 18446744073709551615, -9007199254740994.000000000 > 18446744073709551615, -9007199254740994.000000000 >= 18446744073709551615 , toUInt64(18446744073709551615) = -9007199254740994.000000000, toUInt64(18446744073709551615) != -9007199254740994.000000000, toUInt64(18446744073709551615) < -9007199254740994.000000000, toUInt64(18446744073709551615) <= -9007199254740994.000000000, toUInt64(18446744073709551615) > -9007199254740994.000000000, toUInt64(18446744073709551615) >= -9007199254740994.000000000, -9007199254740994.000000000 = toUInt64(18446744073709551615), -9007199254740994.000000000 != toUInt64(18446744073709551615), -9007199254740994.000000000 < toUInt64(18446744073709551615), -9007199254740994.000000000 <= toUInt64(18446744073709551615), -9007199254740994.000000000 > toUInt64(18446744073709551615), -9007199254740994.000000000 >= toUInt64(18446744073709551615) ; +SELECT '18446744073709551615', '104.000000000', 18446744073709551615 = 104.000000000, 18446744073709551615 != 104.000000000, 18446744073709551615 < 104.000000000, 18446744073709551615 <= 104.000000000, 18446744073709551615 > 104.000000000, 18446744073709551615 >= 104.000000000, 104.000000000 = 18446744073709551615, 104.000000000 != 18446744073709551615, 104.000000000 < 18446744073709551615, 104.000000000 <= 18446744073709551615, 104.000000000 > 18446744073709551615, 104.000000000 >= 18446744073709551615 , toUInt64(18446744073709551615) = 104.000000000, toUInt64(18446744073709551615) != 104.000000000, toUInt64(18446744073709551615) < 104.000000000, toUInt64(18446744073709551615) <= 104.000000000, toUInt64(18446744073709551615) > 104.000000000, toUInt64(18446744073709551615) >= 104.000000000, 104.000000000 = toUInt64(18446744073709551615), 104.000000000 != toUInt64(18446744073709551615), 104.000000000 < toUInt64(18446744073709551615), 104.000000000 <= toUInt64(18446744073709551615), 104.000000000 > toUInt64(18446744073709551615), 104.000000000 >= toUInt64(18446744073709551615) ; +SELECT '18446744073709551615', '-4503599627370496.000000000', 18446744073709551615 = -4503599627370496.000000000, 18446744073709551615 != -4503599627370496.000000000, 18446744073709551615 < -4503599627370496.000000000, 18446744073709551615 <= -4503599627370496.000000000, 18446744073709551615 > -4503599627370496.000000000, 18446744073709551615 >= -4503599627370496.000000000, -4503599627370496.000000000 = 18446744073709551615, -4503599627370496.000000000 != 18446744073709551615, -4503599627370496.000000000 < 18446744073709551615, -4503599627370496.000000000 <= 18446744073709551615, -4503599627370496.000000000 > 18446744073709551615, -4503599627370496.000000000 >= 18446744073709551615 , toUInt64(18446744073709551615) = -4503599627370496.000000000, toUInt64(18446744073709551615) != -4503599627370496.000000000, toUInt64(18446744073709551615) < -4503599627370496.000000000, toUInt64(18446744073709551615) <= -4503599627370496.000000000, toUInt64(18446744073709551615) > -4503599627370496.000000000, toUInt64(18446744073709551615) >= -4503599627370496.000000000, -4503599627370496.000000000 = toUInt64(18446744073709551615), -4503599627370496.000000000 != toUInt64(18446744073709551615), -4503599627370496.000000000 < toUInt64(18446744073709551615), -4503599627370496.000000000 <= toUInt64(18446744073709551615), -4503599627370496.000000000 > toUInt64(18446744073709551615), -4503599627370496.000000000 >= toUInt64(18446744073709551615) ; +SELECT '18446744073709551615', '-0.500000000', 18446744073709551615 = -0.500000000, 18446744073709551615 != -0.500000000, 18446744073709551615 < -0.500000000, 18446744073709551615 <= -0.500000000, 18446744073709551615 > -0.500000000, 18446744073709551615 >= -0.500000000, -0.500000000 = 18446744073709551615, -0.500000000 != 18446744073709551615, -0.500000000 < 18446744073709551615, -0.500000000 <= 18446744073709551615, -0.500000000 > 18446744073709551615, -0.500000000 >= 18446744073709551615 , toUInt64(18446744073709551615) = -0.500000000, toUInt64(18446744073709551615) != -0.500000000, toUInt64(18446744073709551615) < -0.500000000, toUInt64(18446744073709551615) <= -0.500000000, toUInt64(18446744073709551615) > -0.500000000, toUInt64(18446744073709551615) >= -0.500000000, -0.500000000 = toUInt64(18446744073709551615), -0.500000000 != toUInt64(18446744073709551615), -0.500000000 < toUInt64(18446744073709551615), -0.500000000 <= toUInt64(18446744073709551615), -0.500000000 > toUInt64(18446744073709551615), -0.500000000 >= toUInt64(18446744073709551615) ; +SELECT '18446744073709551615', '0.500000000', 18446744073709551615 = 0.500000000, 18446744073709551615 != 0.500000000, 18446744073709551615 < 0.500000000, 18446744073709551615 <= 0.500000000, 18446744073709551615 > 0.500000000, 18446744073709551615 >= 0.500000000, 0.500000000 = 18446744073709551615, 0.500000000 != 18446744073709551615, 0.500000000 < 18446744073709551615, 0.500000000 <= 18446744073709551615, 0.500000000 > 18446744073709551615, 0.500000000 >= 18446744073709551615 , toUInt64(18446744073709551615) = 0.500000000, toUInt64(18446744073709551615) != 0.500000000, toUInt64(18446744073709551615) < 0.500000000, toUInt64(18446744073709551615) <= 0.500000000, toUInt64(18446744073709551615) > 0.500000000, toUInt64(18446744073709551615) >= 0.500000000, 0.500000000 = toUInt64(18446744073709551615), 0.500000000 != toUInt64(18446744073709551615), 0.500000000 < toUInt64(18446744073709551615), 0.500000000 <= toUInt64(18446744073709551615), 0.500000000 > toUInt64(18446744073709551615), 0.500000000 >= toUInt64(18446744073709551615) ; +SELECT '18446744073709551615', '-1.500000000', 18446744073709551615 = -1.500000000, 18446744073709551615 != -1.500000000, 18446744073709551615 < -1.500000000, 18446744073709551615 <= -1.500000000, 18446744073709551615 > -1.500000000, 18446744073709551615 >= -1.500000000, -1.500000000 = 18446744073709551615, -1.500000000 != 18446744073709551615, -1.500000000 < 18446744073709551615, -1.500000000 <= 18446744073709551615, -1.500000000 > 18446744073709551615, -1.500000000 >= 18446744073709551615 , toUInt64(18446744073709551615) = -1.500000000, toUInt64(18446744073709551615) != -1.500000000, toUInt64(18446744073709551615) < -1.500000000, toUInt64(18446744073709551615) <= -1.500000000, toUInt64(18446744073709551615) > -1.500000000, toUInt64(18446744073709551615) >= -1.500000000, -1.500000000 = toUInt64(18446744073709551615), -1.500000000 != toUInt64(18446744073709551615), -1.500000000 < toUInt64(18446744073709551615), -1.500000000 <= toUInt64(18446744073709551615), -1.500000000 > toUInt64(18446744073709551615), -1.500000000 >= toUInt64(18446744073709551615) ; +SELECT '18446744073709551615', '1.500000000', 18446744073709551615 = 1.500000000, 18446744073709551615 != 1.500000000, 18446744073709551615 < 1.500000000, 18446744073709551615 <= 1.500000000, 18446744073709551615 > 1.500000000, 18446744073709551615 >= 1.500000000, 1.500000000 = 18446744073709551615, 1.500000000 != 18446744073709551615, 1.500000000 < 18446744073709551615, 1.500000000 <= 18446744073709551615, 1.500000000 > 18446744073709551615, 1.500000000 >= 18446744073709551615 , toUInt64(18446744073709551615) = 1.500000000, toUInt64(18446744073709551615) != 1.500000000, toUInt64(18446744073709551615) < 1.500000000, toUInt64(18446744073709551615) <= 1.500000000, toUInt64(18446744073709551615) > 1.500000000, toUInt64(18446744073709551615) >= 1.500000000, 1.500000000 = toUInt64(18446744073709551615), 1.500000000 != toUInt64(18446744073709551615), 1.500000000 < toUInt64(18446744073709551615), 1.500000000 <= toUInt64(18446744073709551615), 1.500000000 > toUInt64(18446744073709551615), 1.500000000 >= toUInt64(18446744073709551615) ; +SELECT '18446744073709551615', '9007199254740992.000000000', 18446744073709551615 = 9007199254740992.000000000, 18446744073709551615 != 9007199254740992.000000000, 18446744073709551615 < 9007199254740992.000000000, 18446744073709551615 <= 9007199254740992.000000000, 18446744073709551615 > 9007199254740992.000000000, 18446744073709551615 >= 9007199254740992.000000000, 9007199254740992.000000000 = 18446744073709551615, 9007199254740992.000000000 != 18446744073709551615, 9007199254740992.000000000 < 18446744073709551615, 9007199254740992.000000000 <= 18446744073709551615, 9007199254740992.000000000 > 18446744073709551615, 9007199254740992.000000000 >= 18446744073709551615 , toUInt64(18446744073709551615) = 9007199254740992.000000000, toUInt64(18446744073709551615) != 9007199254740992.000000000, toUInt64(18446744073709551615) < 9007199254740992.000000000, toUInt64(18446744073709551615) <= 9007199254740992.000000000, toUInt64(18446744073709551615) > 9007199254740992.000000000, toUInt64(18446744073709551615) >= 9007199254740992.000000000, 9007199254740992.000000000 = toUInt64(18446744073709551615), 9007199254740992.000000000 != toUInt64(18446744073709551615), 9007199254740992.000000000 < toUInt64(18446744073709551615), 9007199254740992.000000000 <= toUInt64(18446744073709551615), 9007199254740992.000000000 > toUInt64(18446744073709551615), 9007199254740992.000000000 >= toUInt64(18446744073709551615) ; +SELECT '18446744073709551615', '2251799813685247.500000000', 18446744073709551615 = 2251799813685247.500000000, 18446744073709551615 != 2251799813685247.500000000, 18446744073709551615 < 2251799813685247.500000000, 18446744073709551615 <= 2251799813685247.500000000, 18446744073709551615 > 2251799813685247.500000000, 18446744073709551615 >= 2251799813685247.500000000, 2251799813685247.500000000 = 18446744073709551615, 2251799813685247.500000000 != 18446744073709551615, 2251799813685247.500000000 < 18446744073709551615, 2251799813685247.500000000 <= 18446744073709551615, 2251799813685247.500000000 > 18446744073709551615, 2251799813685247.500000000 >= 18446744073709551615 , toUInt64(18446744073709551615) = 2251799813685247.500000000, toUInt64(18446744073709551615) != 2251799813685247.500000000, toUInt64(18446744073709551615) < 2251799813685247.500000000, toUInt64(18446744073709551615) <= 2251799813685247.500000000, toUInt64(18446744073709551615) > 2251799813685247.500000000, toUInt64(18446744073709551615) >= 2251799813685247.500000000, 2251799813685247.500000000 = toUInt64(18446744073709551615), 2251799813685247.500000000 != toUInt64(18446744073709551615), 2251799813685247.500000000 < toUInt64(18446744073709551615), 2251799813685247.500000000 <= toUInt64(18446744073709551615), 2251799813685247.500000000 > toUInt64(18446744073709551615), 2251799813685247.500000000 >= toUInt64(18446744073709551615) ; +SELECT '18446744073709551615', '2251799813685248.500000000', 18446744073709551615 = 2251799813685248.500000000, 18446744073709551615 != 2251799813685248.500000000, 18446744073709551615 < 2251799813685248.500000000, 18446744073709551615 <= 2251799813685248.500000000, 18446744073709551615 > 2251799813685248.500000000, 18446744073709551615 >= 2251799813685248.500000000, 2251799813685248.500000000 = 18446744073709551615, 2251799813685248.500000000 != 18446744073709551615, 2251799813685248.500000000 < 18446744073709551615, 2251799813685248.500000000 <= 18446744073709551615, 2251799813685248.500000000 > 18446744073709551615, 2251799813685248.500000000 >= 18446744073709551615 , toUInt64(18446744073709551615) = 2251799813685248.500000000, toUInt64(18446744073709551615) != 2251799813685248.500000000, toUInt64(18446744073709551615) < 2251799813685248.500000000, toUInt64(18446744073709551615) <= 2251799813685248.500000000, toUInt64(18446744073709551615) > 2251799813685248.500000000, toUInt64(18446744073709551615) >= 2251799813685248.500000000, 2251799813685248.500000000 = toUInt64(18446744073709551615), 2251799813685248.500000000 != toUInt64(18446744073709551615), 2251799813685248.500000000 < toUInt64(18446744073709551615), 2251799813685248.500000000 <= toUInt64(18446744073709551615), 2251799813685248.500000000 > toUInt64(18446744073709551615), 2251799813685248.500000000 >= toUInt64(18446744073709551615) ; +SELECT '18446744073709551615', '1152921504606846976.000000000', 18446744073709551615 = 1152921504606846976.000000000, 18446744073709551615 != 1152921504606846976.000000000, 18446744073709551615 < 1152921504606846976.000000000, 18446744073709551615 <= 1152921504606846976.000000000, 18446744073709551615 > 1152921504606846976.000000000, 18446744073709551615 >= 1152921504606846976.000000000, 1152921504606846976.000000000 = 18446744073709551615, 1152921504606846976.000000000 != 18446744073709551615, 1152921504606846976.000000000 < 18446744073709551615, 1152921504606846976.000000000 <= 18446744073709551615, 1152921504606846976.000000000 > 18446744073709551615, 1152921504606846976.000000000 >= 18446744073709551615 , toUInt64(18446744073709551615) = 1152921504606846976.000000000, toUInt64(18446744073709551615) != 1152921504606846976.000000000, toUInt64(18446744073709551615) < 1152921504606846976.000000000, toUInt64(18446744073709551615) <= 1152921504606846976.000000000, toUInt64(18446744073709551615) > 1152921504606846976.000000000, toUInt64(18446744073709551615) >= 1152921504606846976.000000000, 1152921504606846976.000000000 = toUInt64(18446744073709551615), 1152921504606846976.000000000 != toUInt64(18446744073709551615), 1152921504606846976.000000000 < toUInt64(18446744073709551615), 1152921504606846976.000000000 <= toUInt64(18446744073709551615), 1152921504606846976.000000000 > toUInt64(18446744073709551615), 1152921504606846976.000000000 >= toUInt64(18446744073709551615) ; +SELECT '18446744073709551615', '-1152921504606846976.000000000', 18446744073709551615 = -1152921504606846976.000000000, 18446744073709551615 != -1152921504606846976.000000000, 18446744073709551615 < -1152921504606846976.000000000, 18446744073709551615 <= -1152921504606846976.000000000, 18446744073709551615 > -1152921504606846976.000000000, 18446744073709551615 >= -1152921504606846976.000000000, -1152921504606846976.000000000 = 18446744073709551615, -1152921504606846976.000000000 != 18446744073709551615, -1152921504606846976.000000000 < 18446744073709551615, -1152921504606846976.000000000 <= 18446744073709551615, -1152921504606846976.000000000 > 18446744073709551615, -1152921504606846976.000000000 >= 18446744073709551615 , toUInt64(18446744073709551615) = -1152921504606846976.000000000, toUInt64(18446744073709551615) != -1152921504606846976.000000000, toUInt64(18446744073709551615) < -1152921504606846976.000000000, toUInt64(18446744073709551615) <= -1152921504606846976.000000000, toUInt64(18446744073709551615) > -1152921504606846976.000000000, toUInt64(18446744073709551615) >= -1152921504606846976.000000000, -1152921504606846976.000000000 = toUInt64(18446744073709551615), -1152921504606846976.000000000 != toUInt64(18446744073709551615), -1152921504606846976.000000000 < toUInt64(18446744073709551615), -1152921504606846976.000000000 <= toUInt64(18446744073709551615), -1152921504606846976.000000000 > toUInt64(18446744073709551615), -1152921504606846976.000000000 >= toUInt64(18446744073709551615) ; +SELECT '18446744073709551615', '-9223372036854786048.000000000', 18446744073709551615 = -9223372036854786048.000000000, 18446744073709551615 != -9223372036854786048.000000000, 18446744073709551615 < -9223372036854786048.000000000, 18446744073709551615 <= -9223372036854786048.000000000, 18446744073709551615 > -9223372036854786048.000000000, 18446744073709551615 >= -9223372036854786048.000000000, -9223372036854786048.000000000 = 18446744073709551615, -9223372036854786048.000000000 != 18446744073709551615, -9223372036854786048.000000000 < 18446744073709551615, -9223372036854786048.000000000 <= 18446744073709551615, -9223372036854786048.000000000 > 18446744073709551615, -9223372036854786048.000000000 >= 18446744073709551615 , toUInt64(18446744073709551615) = -9223372036854786048.000000000, toUInt64(18446744073709551615) != -9223372036854786048.000000000, toUInt64(18446744073709551615) < -9223372036854786048.000000000, toUInt64(18446744073709551615) <= -9223372036854786048.000000000, toUInt64(18446744073709551615) > -9223372036854786048.000000000, toUInt64(18446744073709551615) >= -9223372036854786048.000000000, -9223372036854786048.000000000 = toUInt64(18446744073709551615), -9223372036854786048.000000000 != toUInt64(18446744073709551615), -9223372036854786048.000000000 < toUInt64(18446744073709551615), -9223372036854786048.000000000 <= toUInt64(18446744073709551615), -9223372036854786048.000000000 > toUInt64(18446744073709551615), -9223372036854786048.000000000 >= toUInt64(18446744073709551615) ; +SELECT '18446744073709551615', '9223372036854786048.000000000', 18446744073709551615 = 9223372036854786048.000000000, 18446744073709551615 != 9223372036854786048.000000000, 18446744073709551615 < 9223372036854786048.000000000, 18446744073709551615 <= 9223372036854786048.000000000, 18446744073709551615 > 9223372036854786048.000000000, 18446744073709551615 >= 9223372036854786048.000000000, 9223372036854786048.000000000 = 18446744073709551615, 9223372036854786048.000000000 != 18446744073709551615, 9223372036854786048.000000000 < 18446744073709551615, 9223372036854786048.000000000 <= 18446744073709551615, 9223372036854786048.000000000 > 18446744073709551615, 9223372036854786048.000000000 >= 18446744073709551615 , toUInt64(18446744073709551615) = 9223372036854786048.000000000, toUInt64(18446744073709551615) != 9223372036854786048.000000000, toUInt64(18446744073709551615) < 9223372036854786048.000000000, toUInt64(18446744073709551615) <= 9223372036854786048.000000000, toUInt64(18446744073709551615) > 9223372036854786048.000000000, toUInt64(18446744073709551615) >= 9223372036854786048.000000000, 9223372036854786048.000000000 = toUInt64(18446744073709551615), 9223372036854786048.000000000 != toUInt64(18446744073709551615), 9223372036854786048.000000000 < toUInt64(18446744073709551615), 9223372036854786048.000000000 <= toUInt64(18446744073709551615), 9223372036854786048.000000000 > toUInt64(18446744073709551615), 9223372036854786048.000000000 >= toUInt64(18446744073709551615) ; +SELECT '9223372036854775808', '0.000000000', 9223372036854775808 = 0.000000000, 9223372036854775808 != 0.000000000, 9223372036854775808 < 0.000000000, 9223372036854775808 <= 0.000000000, 9223372036854775808 > 0.000000000, 9223372036854775808 >= 0.000000000, 0.000000000 = 9223372036854775808, 0.000000000 != 9223372036854775808, 0.000000000 < 9223372036854775808, 0.000000000 <= 9223372036854775808, 0.000000000 > 9223372036854775808, 0.000000000 >= 9223372036854775808 , toUInt64(9223372036854775808) = 0.000000000, toUInt64(9223372036854775808) != 0.000000000, toUInt64(9223372036854775808) < 0.000000000, toUInt64(9223372036854775808) <= 0.000000000, toUInt64(9223372036854775808) > 0.000000000, toUInt64(9223372036854775808) >= 0.000000000, 0.000000000 = toUInt64(9223372036854775808), 0.000000000 != toUInt64(9223372036854775808), 0.000000000 < toUInt64(9223372036854775808), 0.000000000 <= toUInt64(9223372036854775808), 0.000000000 > toUInt64(9223372036854775808), 0.000000000 >= toUInt64(9223372036854775808) ; +SELECT '9223372036854775808', '-1.000000000', 9223372036854775808 = -1.000000000, 9223372036854775808 != -1.000000000, 9223372036854775808 < -1.000000000, 9223372036854775808 <= -1.000000000, 9223372036854775808 > -1.000000000, 9223372036854775808 >= -1.000000000, -1.000000000 = 9223372036854775808, -1.000000000 != 9223372036854775808, -1.000000000 < 9223372036854775808, -1.000000000 <= 9223372036854775808, -1.000000000 > 9223372036854775808, -1.000000000 >= 9223372036854775808 , toUInt64(9223372036854775808) = -1.000000000, toUInt64(9223372036854775808) != -1.000000000, toUInt64(9223372036854775808) < -1.000000000, toUInt64(9223372036854775808) <= -1.000000000, toUInt64(9223372036854775808) > -1.000000000, toUInt64(9223372036854775808) >= -1.000000000, -1.000000000 = toUInt64(9223372036854775808), -1.000000000 != toUInt64(9223372036854775808), -1.000000000 < toUInt64(9223372036854775808), -1.000000000 <= toUInt64(9223372036854775808), -1.000000000 > toUInt64(9223372036854775808), -1.000000000 >= toUInt64(9223372036854775808) ; +SELECT '9223372036854775808', '1.000000000', 9223372036854775808 = 1.000000000, 9223372036854775808 != 1.000000000, 9223372036854775808 < 1.000000000, 9223372036854775808 <= 1.000000000, 9223372036854775808 > 1.000000000, 9223372036854775808 >= 1.000000000, 1.000000000 = 9223372036854775808, 1.000000000 != 9223372036854775808, 1.000000000 < 9223372036854775808, 1.000000000 <= 9223372036854775808, 1.000000000 > 9223372036854775808, 1.000000000 >= 9223372036854775808 , toUInt64(9223372036854775808) = 1.000000000, toUInt64(9223372036854775808) != 1.000000000, toUInt64(9223372036854775808) < 1.000000000, toUInt64(9223372036854775808) <= 1.000000000, toUInt64(9223372036854775808) > 1.000000000, toUInt64(9223372036854775808) >= 1.000000000, 1.000000000 = toUInt64(9223372036854775808), 1.000000000 != toUInt64(9223372036854775808), 1.000000000 < toUInt64(9223372036854775808), 1.000000000 <= toUInt64(9223372036854775808), 1.000000000 > toUInt64(9223372036854775808), 1.000000000 >= toUInt64(9223372036854775808) ; +SELECT '9223372036854775808', '18446744073709551616.000000000', 9223372036854775808 = 18446744073709551616.000000000, 9223372036854775808 != 18446744073709551616.000000000, 9223372036854775808 < 18446744073709551616.000000000, 9223372036854775808 <= 18446744073709551616.000000000, 9223372036854775808 > 18446744073709551616.000000000, 9223372036854775808 >= 18446744073709551616.000000000, 18446744073709551616.000000000 = 9223372036854775808, 18446744073709551616.000000000 != 9223372036854775808, 18446744073709551616.000000000 < 9223372036854775808, 18446744073709551616.000000000 <= 9223372036854775808, 18446744073709551616.000000000 > 9223372036854775808, 18446744073709551616.000000000 >= 9223372036854775808 , toUInt64(9223372036854775808) = 18446744073709551616.000000000, toUInt64(9223372036854775808) != 18446744073709551616.000000000, toUInt64(9223372036854775808) < 18446744073709551616.000000000, toUInt64(9223372036854775808) <= 18446744073709551616.000000000, toUInt64(9223372036854775808) > 18446744073709551616.000000000, toUInt64(9223372036854775808) >= 18446744073709551616.000000000, 18446744073709551616.000000000 = toUInt64(9223372036854775808), 18446744073709551616.000000000 != toUInt64(9223372036854775808), 18446744073709551616.000000000 < toUInt64(9223372036854775808), 18446744073709551616.000000000 <= toUInt64(9223372036854775808), 18446744073709551616.000000000 > toUInt64(9223372036854775808), 18446744073709551616.000000000 >= toUInt64(9223372036854775808) ; diff --git a/parser/testdata/00411_merge_tree_where_const_in_set/ast.json b/parser/testdata/00411_merge_tree_where_const_in_set/ast.json new file mode 100644 index 000000000..9b118a6e5 --- /dev/null +++ b/parser/testdata/00411_merge_tree_where_const_in_set/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery const_in_const (children 1)" + }, + { + "explain": " Identifier const_in_const" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001282887, + "rows_read": 2, + "bytes_read": 80 + } +} diff --git a/parser/testdata/00411_merge_tree_where_const_in_set/metadata.json b/parser/testdata/00411_merge_tree_where_const_in_set/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00411_merge_tree_where_const_in_set/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00411_merge_tree_where_const_in_set/query.sql b/parser/testdata/00411_merge_tree_where_const_in_set/query.sql new file mode 100644 index 000000000..22779509a --- /dev/null +++ b/parser/testdata/00411_merge_tree_where_const_in_set/query.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS const_in_const; +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE const_in_const (id UInt64, date Date, uid UInt32, name String, Sign Int8) ENGINE = CollapsingMergeTree(date, intHash32(uid), (id, date, intHash32(uid)), 8192, Sign); +INSERT INTO const_in_const VALUES(1, now(), 1, 'test1', 1); +INSERT INTO const_in_const VALUES(2, now(), 1, 'test2', 1); +INSERT INTO const_in_const VALUES(3, now(), 1, 'test3', 1); +INSERT INTO const_in_const VALUES(4, now(), 2, 'test4', 1); +INSERT INTO const_in_const VALUES(5, now(), 3, 'test5', 1); + +SELECT 1 from const_in_const where 42 in (225); +SELECT name FROM const_in_const WHERE 1 IN (125, 1, 2) ORDER BY name LIMIT 1; + +DROP TABLE IF EXISTS const_in_const; diff --git a/parser/testdata/00412_logical_expressions_optimizer/ast.json b/parser/testdata/00412_logical_expressions_optimizer/ast.json new file mode 100644 index 000000000..e1edee783 --- /dev/null +++ b/parser/testdata/00412_logical_expressions_optimizer/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery merge_tree (children 1)" + }, + { + "explain": " Identifier merge_tree" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001432084, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/00412_logical_expressions_optimizer/metadata.json b/parser/testdata/00412_logical_expressions_optimizer/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00412_logical_expressions_optimizer/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00412_logical_expressions_optimizer/query.sql b/parser/testdata/00412_logical_expressions_optimizer/query.sql new file mode 100644 index 000000000..c4fad7d50 --- /dev/null +++ b/parser/testdata/00412_logical_expressions_optimizer/query.sql @@ -0,0 +1,8 @@ +DROP TABLE IF EXISTS merge_tree; +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE merge_tree (x UInt64, date Date) ENGINE = MergeTree(date, x, 1); + +INSERT INTO merge_tree VALUES (1, '2000-01-01'); +SELECT x AS y, y FROM merge_tree; + +DROP TABLE IF EXISTS merge_tree; diff --git a/parser/testdata/00413_distinct/ast.json b/parser/testdata/00413_distinct/ast.json new file mode 100644 index 000000000..a5e9a8e7d --- /dev/null +++ b/parser/testdata/00413_distinct/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery distinct (children 1)" + }, + { + "explain": " Identifier distinct" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001348942, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/00413_distinct/metadata.json b/parser/testdata/00413_distinct/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00413_distinct/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00413_distinct/query.sql b/parser/testdata/00413_distinct/query.sql new file mode 100644 index 000000000..79577cb0b --- /dev/null +++ b/parser/testdata/00413_distinct/query.sql @@ -0,0 +1,26 @@ +DROP TABLE IF EXISTS distinct; +CREATE TABLE distinct (Num UInt32, Name String) ENGINE = Memory; + +INSERT INTO distinct (Num, Name) VALUES (1, 'John'); +INSERT INTO distinct (Num, Name) VALUES (1, 'John'); +INSERT INTO distinct (Num, Name) VALUES (3, 'Mary'); +INSERT INTO distinct (Num, Name) VALUES (3, 'Mary'); +INSERT INTO distinct (Num, Name) VALUES (3, 'Mary'); +INSERT INTO distinct (Num, Name) VALUES (4, 'Mary'); +INSERT INTO distinct (Num, Name) VALUES (4, 'Mary'); +INSERT INTO distinct (Num, Name) VALUES (5, 'Bill'); +INSERT INTO distinct (Num, Name) VALUES (7, 'Bill'); +INSERT INTO distinct (Num, Name) VALUES (7, 'Bill'); +INSERT INTO distinct (Num, Name) VALUES (7, 'Mary'); +INSERT INTO distinct (Num, Name) VALUES (7, 'John'); + +-- { echoOn } +-- String field +SELECT Name FROM (SELECT DISTINCT Name FROM distinct) ORDER BY Name; +-- Num field +SELECT Num FROM (SELECT DISTINCT Num FROM distinct) ORDER BY Num; +-- all const columns +SELECT DISTINCT 1 as a, 2 as b FROM distinct; +-- { echoOff } + +DROP TABLE IF EXISTS distinct; diff --git a/parser/testdata/00413_least_greatest_new_behavior/ast.json b/parser/testdata/00413_least_greatest_new_behavior/ast.json new file mode 100644 index 000000000..27db7aacc --- /dev/null +++ b/parser/testdata/00413_least_greatest_new_behavior/ast.json @@ -0,0 +1,70 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function least (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Int64_-9223372036854775808" + }, + { + "explain": " Literal UInt64_18446744073709551615" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function greatest (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Int64_-9223372036854775808" + }, + { + "explain": " Literal UInt64_18446744073709551615" + } + ], + + "rows": 16, + + "statistics": + { + "elapsed": 0.001319679, + "rows_read": 16, + "bytes_read": 688 + } +} diff --git a/parser/testdata/00413_least_greatest_new_behavior/metadata.json b/parser/testdata/00413_least_greatest_new_behavior/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00413_least_greatest_new_behavior/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00413_least_greatest_new_behavior/query.sql b/parser/testdata/00413_least_greatest_new_behavior/query.sql new file mode 100644 index 000000000..b458e7749 --- /dev/null +++ b/parser/testdata/00413_least_greatest_new_behavior/query.sql @@ -0,0 +1,3 @@ +SELECT toTypeName(least(-9223372036854775808, 18446744073709551615)), toTypeName(greatest(-9223372036854775808, 18446744073709551615)); +SELECT least(0, 1), least(-400, -200), least(toInt8(127), 255), least(-1, 1), least(toUInt64(1), toInt64(-1)), least(-128, 254), least(-128, 255), least(-9223372036854775808, 18446744073709551615), least(-9223372036854775807, 18446744073709551615), least(toInt64(9223372036854775807), 9223372036854775808); +SELECT greatest(0, 1), greatest(-400, -200), greatest(toInt8(127), 255), greatest(-1, 1), greatest(toUInt64(1), toInt64(-1)), greatest(-128, 254), greatest(-128, 255), greatest(-9223372036854775808, 18446744073709551615), greatest(-9223372036854775807, 18446744073709551615), greatest(toInt64(9223372036854775807), 9223372036854775808); diff --git a/parser/testdata/00414_time_zones_direct_conversion/ast.json b/parser/testdata/00414_time_zones_direct_conversion/ast.json new file mode 100644 index 000000000..b6d624bbf --- /dev/null +++ b/parser/testdata/00414_time_zones_direct_conversion/ast.json @@ -0,0 +1,88 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function plus (alias ts) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1301146200" + }, + { + "explain": " Function multiply (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1800" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function toString (alias time_in_sydney) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDateTime (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier ts" + }, + { + "explain": " Literal 'Australia\/Sydney'" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_7" + } + ], + + "rows": 22, + + "statistics": + { + "elapsed": 0.001587451, + "rows_read": 22, + "bytes_read": 899 + } +} diff --git a/parser/testdata/00414_time_zones_direct_conversion/metadata.json b/parser/testdata/00414_time_zones_direct_conversion/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00414_time_zones_direct_conversion/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00414_time_zones_direct_conversion/query.sql b/parser/testdata/00414_time_zones_direct_conversion/query.sql new file mode 100644 index 000000000..901a4a708 --- /dev/null +++ b/parser/testdata/00414_time_zones_direct_conversion/query.sql @@ -0,0 +1 @@ +SELECT 1301146200 + 1800 * number AS ts, toString(toDateTime(ts), 'Australia/Sydney') AS time_in_sydney FROM system.numbers LIMIT 7; diff --git a/parser/testdata/00420_null_in_scalar_subqueries/ast.json b/parser/testdata/00420_null_in_scalar_subqueries/ast.json new file mode 100644 index 000000000..0ebccc5e2 --- /dev/null +++ b/parser/testdata/00420_null_in_scalar_subqueries/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_0" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001179807, + "rows_read": 11, + "bytes_read": 416 + } +} diff --git a/parser/testdata/00420_null_in_scalar_subqueries/metadata.json b/parser/testdata/00420_null_in_scalar_subqueries/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00420_null_in_scalar_subqueries/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00420_null_in_scalar_subqueries/query.sql b/parser/testdata/00420_null_in_scalar_subqueries/query.sql new file mode 100644 index 000000000..1a7544492 --- /dev/null +++ b/parser/testdata/00420_null_in_scalar_subqueries/query.sql @@ -0,0 +1,8 @@ +SELECT (SELECT 1 WHERE 0); +SELECT (SELECT * FROM (SELECT * FROM system.numbers LIMIT 2) WHERE number = number + 1); +SELECT (SELECT NULL WHERE 0); +SELECT (SELECT Null WHERE nuLL IS NOT NULL); +SELECT (SELECT Null WHERE 1); +SELECT CAST(NULL as Nullable(Nothing)); +SELECT (SELECT CAST(NULL as Nullable(Nothing)) WHERE 0); +SELECT (SELECT 1 WHERE 0) AS a, (SELECT 1 WHERE 1) AS b FORMAT TSVWithNames; diff --git a/parser/testdata/00422_hash_function_constexpr/ast.json b/parser/testdata/00422_hash_function_constexpr/ast.json new file mode 100644 index 000000000..61733fd3b --- /dev/null +++ b/parser/testdata/00422_hash_function_constexpr/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function in (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function cityHash64 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'abc'" + }, + { + "explain": " Function cityHash64 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'abc'" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001583993, + "rows_read": 12, + "bytes_read": 459 + } +} diff --git a/parser/testdata/00422_hash_function_constexpr/metadata.json b/parser/testdata/00422_hash_function_constexpr/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00422_hash_function_constexpr/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00422_hash_function_constexpr/query.sql b/parser/testdata/00422_hash_function_constexpr/query.sql new file mode 100644 index 000000000..5c2c415e6 --- /dev/null +++ b/parser/testdata/00422_hash_function_constexpr/query.sql @@ -0,0 +1,2 @@ +SELECT cityHash64('abc') IN cityHash64('abc'); +SELECT cityHash64(arrayJoin(['abc', 'def'])) IN cityHash64('abc'); diff --git a/parser/testdata/00423_storage_log_single_thread/ast.json b/parser/testdata/00423_storage_log_single_thread/ast.json new file mode 100644 index 000000000..30537f6ef --- /dev/null +++ b/parser/testdata/00423_storage_log_single_thread/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery log (children 1)" + }, + { + "explain": " Identifier log" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001291057, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/00423_storage_log_single_thread/metadata.json b/parser/testdata/00423_storage_log_single_thread/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00423_storage_log_single_thread/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00423_storage_log_single_thread/query.sql b/parser/testdata/00423_storage_log_single_thread/query.sql new file mode 100644 index 000000000..8eff93235 --- /dev/null +++ b/parser/testdata/00423_storage_log_single_thread/query.sql @@ -0,0 +1,29 @@ +DROP TABLE IF EXISTS log; +CREATE TABLE log (s String) ENGINE = Log; + +SELECT * FROM log LIMIT 1; +SELECT * FROM log; + +DETACH TABLE log; +ATTACH TABLE log; + +SELECT * FROM log; +SELECT * FROM log LIMIT 1; + +INSERT INTO log VALUES ('Hello'), ('World'); + +SELECT * FROM log LIMIT 1; + +DETACH TABLE log; +ATTACH TABLE log; + +SELECT * FROM log LIMIT 1; +SELECT * FROM log; + +DETACH TABLE log; +ATTACH TABLE log; + +SELECT * FROM log; +SELECT * FROM log LIMIT 1; + +DROP TABLE log; diff --git a/parser/testdata/00424_shard_aggregate_functions_of_nullable/ast.json b/parser/testdata/00424_shard_aggregate_functions_of_nullable/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00424_shard_aggregate_functions_of_nullable/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00424_shard_aggregate_functions_of_nullable/metadata.json b/parser/testdata/00424_shard_aggregate_functions_of_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00424_shard_aggregate_functions_of_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00424_shard_aggregate_functions_of_nullable/query.sql b/parser/testdata/00424_shard_aggregate_functions_of_nullable/query.sql new file mode 100644 index 000000000..0211c21a5 --- /dev/null +++ b/parser/testdata/00424_shard_aggregate_functions_of_nullable/query.sql @@ -0,0 +1,42 @@ +-- Tags: shard + +SELECT avg(arrayJoin([NULL])); +SELECT avg(arrayJoin([NULL, 1])); +SELECT avg(arrayJoin([NULL, 1, 2])); + +SELECT quantileExactWeighted(0.5)(x, y) FROM +( + SELECT CAST(NULL AS Nullable(UInt8)) AS x, CAST(1 AS Nullable(UInt8)) AS y + UNION ALL + SELECT CAST(2 AS Nullable(UInt8)) AS x, CAST(NULL AS Nullable(UInt8)) AS y +); + +SELECT quantileExactWeighted(0.5)(x, y) FROM +( + SELECT CAST(1 AS Nullable(UInt8)) AS x, CAST(0 AS Nullable(UInt8)) AS y + UNION ALL + SELECT CAST(NULL AS Nullable(UInt8)) AS x, CAST(1 AS Nullable(UInt8)) AS y + UNION ALL + SELECT CAST(2 AS Nullable(UInt8)) AS x, CAST(NULL AS Nullable(UInt8)) AS y + UNION ALL + SELECT CAST(number AS Nullable(UInt8)) AS x, CAST(number AS Nullable(UInt8)) AS y FROM system.numbers LIMIT 10 +); + +SELECT quantileExactWeighted(0.5)(x, y) FROM +( + SELECT CAST(NULL AS Nullable(UInt8)) AS x, 1 AS y + UNION ALL + SELECT CAST(2 AS Nullable(UInt8)) AS x, 1 AS y +); + +SELECT quantileExactWeighted(0.5)(x, y) FROM +( + SELECT CAST(NULL AS Nullable(UInt8)) AS x, 1 AS y +); + +SELECT + sum(1 + CAST(dummy AS Nullable(UInt8))) AS res1, toTypeName(res1) AS t1, + sum(1 + nullIf(dummy, 0)) AS res2, toTypeName(res2) AS t2 +FROM remote('127.0.0.{2,3}', system.one); + +SELECT CAST(NULL AS Nullable(UInt64)) FROM system.numbers LIMIT 2 diff --git a/parser/testdata/00425_count_nullable/ast.json b/parser/testdata/00425_count_nullable/ast.json new file mode 100644 index 000000000..6d1977ddd --- /dev/null +++ b/parser/testdata/00425_count_nullable/ast.json @@ -0,0 +1,82 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function count (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number (alias x)" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 20, + + "statistics": + { + "elapsed": 0.001295395, + "rows_read": 20, + "bytes_read": 850 + } +} diff --git a/parser/testdata/00425_count_nullable/metadata.json b/parser/testdata/00425_count_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00425_count_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00425_count_nullable/query.sql b/parser/testdata/00425_count_nullable/query.sql new file mode 100644 index 000000000..e0fb491e4 --- /dev/null +++ b/parser/testdata/00425_count_nullable/query.sql @@ -0,0 +1,9 @@ +SELECT count() FROM (SELECT number AS x FROM system.numbers LIMIT 10); + +SELECT count(x) FROM (SELECT number AS x FROM system.numbers LIMIT 10); + +SELECT count(x) FROM (SELECT CAST(number AS Nullable(UInt64)) AS x FROM system.numbers LIMIT 10); + +SELECT count(x) FROM (SELECT nullIf(number, 5) AS x FROM system.numbers LIMIT 10); + +SELECT count(NULL); diff --git a/parser/testdata/00426_nulls_sorting/ast.json b/parser/testdata/00426_nulls_sorting/ast.json new file mode 100644 index 000000000..c06620623 --- /dev/null +++ b/parser/testdata/00426_nulls_sorting/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayJoin (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[UInt64_0, UInt64_0, UInt64_0, UInt64_0, UInt64_0, UInt64_0, UInt64_0, UInt64_0, UInt64_0, UInt64_0, UInt64_0, UInt64_1, UInt64_2, UInt64_2, UInt64_3, UInt64_4, UInt64_12, NULL]" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier x" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001080606, + "rows_read": 10, + "bytes_read": 549 + } +} diff --git a/parser/testdata/00426_nulls_sorting/metadata.json b/parser/testdata/00426_nulls_sorting/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00426_nulls_sorting/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00426_nulls_sorting/query.sql b/parser/testdata/00426_nulls_sorting/query.sql new file mode 100644 index 000000000..f670792ff --- /dev/null +++ b/parser/testdata/00426_nulls_sorting/query.sql @@ -0,0 +1,24 @@ +SELECT arrayJoin([0,0,0,0,0,0,0,0,0,0,0,1,2,2,3,4,12,NULL]) AS x ORDER BY x; +SELECT arrayJoin([0,0,0,0,0,0,0,0,0,0,0,1,2,2,3,4,12,NULL]) AS x ORDER BY x DESC; + +SET max_block_size = 1000; + +SELECT nullIf(number, number % 3 = 0 ? number : 0) AS x FROM (SELECT * FROM system.numbers LIMIT 10) ORDER BY x; +SELECT nullIf(number, number % 3 = 0 ? number : 0) AS x FROM (SELECT * FROM system.numbers LIMIT 10) ORDER BY x DESC; + +SET max_block_size = 5; + +SELECT nullIf(number, number % 3 = 0 ? number : 0) AS x FROM (SELECT * FROM system.numbers LIMIT 10) ORDER BY x; +SELECT nullIf(number, number % 3 = 0 ? number : 0) AS x FROM (SELECT * FROM system.numbers LIMIT 10) ORDER BY x DESC; + +SET max_block_size = 1000; + +SELECT nullIf(number, number % 3 = 0 ? number : 0) AS x, number AS y FROM (SELECT * FROM system.numbers LIMIT 10) ORDER BY x, y; +SELECT nullIf(number, number % 3 = 0 ? number : 0) AS x, number AS y FROM (SELECT * FROM system.numbers LIMIT 10) ORDER BY x DESC, y; + +SET max_block_size = 5; + +SELECT nullIf(number, number % 3 = 0 ? number : 0) AS x, number AS y FROM (SELECT * FROM system.numbers LIMIT 10) ORDER BY x, y; +SELECT nullIf(number, number % 3 = 0 ? number : 0) AS x, number AS y FROM (SELECT * FROM system.numbers LIMIT 10) ORDER BY x DESC, y; + +SELECT x FROM (SELECT toNullable(number) AS x FROM system.numbers LIMIT 3) ORDER BY x DESC LIMIT 10 diff --git a/parser/testdata/00429_point_in_ellipses/ast.json b/parser/testdata/00429_point_in_ellipses/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00429_point_in_ellipses/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00429_point_in_ellipses/metadata.json b/parser/testdata/00429_point_in_ellipses/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00429_point_in_ellipses/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00429_point_in_ellipses/query.sql b/parser/testdata/00429_point_in_ellipses/query.sql new file mode 100644 index 000000000..1c9e0e1ce --- /dev/null +++ b/parser/testdata/00429_point_in_ellipses/query.sql @@ -0,0 +1,7 @@ + +SELECT pointInEllipses(33.3, 55.3, 33.4, 55.1, 1.0, 1.0) AS distance; +SELECT pointInEllipses(33.3 + v, 55.3 + v, 33.4, 55.1, 1.0, 1.0) AS distance from +( + select number + 0.1 as v from system.numbers limit 1 +); +SELECT pointInEllipses(33.3, 55.3, 33.4, 55.1, 0.1, 0.2) AS distance; diff --git a/parser/testdata/00431_if_nulls/ast.json b/parser/testdata/00431_if_nulls/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00431_if_nulls/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00431_if_nulls/metadata.json b/parser/testdata/00431_if_nulls/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00431_if_nulls/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00431_if_nulls/query.sql b/parser/testdata/00431_if_nulls/query.sql new file mode 100644 index 000000000..024f24944 --- /dev/null +++ b/parser/testdata/00431_if_nulls/query.sql @@ -0,0 +1,145 @@ +/* Condition could be: + * - constant, true + * - constant, false + * - constant, NULL + * - non constant, non nullable_00431 + * - non constant, nullable_00431 + * + * Then and else could be: + * - constant, not NULL + * - constant, NULL + * - non constant, non nullable_00431 + * - non constant, nullable_00431 + * + * Thus we have 5 * 4 * 4 = 80 combinations. + */ + +DROP TABLE IF EXISTS nullable_00431; + +CREATE VIEW nullable_00431 +AS SELECT + 1 AS constant_true, + 0 AS constant_false, + NULL AS constant_null, + number % 3 = 1 AS cond_non_constant, + number % 3 = 2 ? NULL : (number % 3 = 1) AS cond_non_constant_nullable, + 'Hello' AS then_constant, + 'World' AS else_constant, + toString(number) AS then_non_constant, + toString(-number) AS else_non_constant, + nullIf(toString(number), '5') AS then_non_constant_nullable, + nullIf(toString(-number), '-5') AS else_non_constant_nullable +FROM system.numbers LIMIT 10; + +SELECT '---------- constant_true ----------'; + +SELECT constant_true ? then_constant : else_constant AS res FROM nullable_00431; +SELECT constant_true ? then_constant : constant_null AS res FROM nullable_00431; +SELECT constant_true ? then_constant : else_non_constant AS res FROM nullable_00431; +SELECT constant_true ? then_constant : else_non_constant_nullable AS res FROM nullable_00431; + +SELECT constant_true ? constant_null : else_constant AS res FROM nullable_00431; +SELECT constant_true ? constant_null : constant_null AS res FROM nullable_00431; +SELECT constant_true ? constant_null : else_non_constant AS res FROM nullable_00431; +SELECT constant_true ? constant_null : else_non_constant_nullable AS res FROM nullable_00431; + +SELECT constant_true ? then_non_constant : else_constant AS res FROM nullable_00431; +SELECT constant_true ? then_non_constant : constant_null AS res FROM nullable_00431; +SELECT constant_true ? then_non_constant : else_non_constant AS res FROM nullable_00431; +SELECT constant_true ? then_non_constant : else_non_constant_nullable AS res FROM nullable_00431; + +SELECT constant_true ? then_non_constant_nullable : else_constant AS res FROM nullable_00431; +SELECT constant_true ? then_non_constant_nullable : constant_null AS res FROM nullable_00431; +SELECT constant_true ? then_non_constant_nullable : else_non_constant AS res FROM nullable_00431; +SELECT constant_true ? then_non_constant_nullable : else_non_constant_nullable AS res FROM nullable_00431; + +SELECT '---------- constant_false ----------'; + +SELECT constant_false ? then_constant : else_constant AS res FROM nullable_00431; +SELECT constant_false ? then_constant : constant_null AS res FROM nullable_00431; +SELECT constant_false ? then_constant : else_non_constant AS res FROM nullable_00431; +SELECT constant_false ? then_constant : else_non_constant_nullable AS res FROM nullable_00431; + +SELECT constant_false ? constant_null : else_constant AS res FROM nullable_00431; +SELECT constant_false ? constant_null : constant_null AS res FROM nullable_00431; +SELECT constant_false ? constant_null : else_non_constant AS res FROM nullable_00431; +SELECT constant_false ? constant_null : else_non_constant_nullable AS res FROM nullable_00431; + +SELECT constant_false ? then_non_constant : else_constant AS res FROM nullable_00431; +SELECT constant_false ? then_non_constant : constant_null AS res FROM nullable_00431; +SELECT constant_false ? then_non_constant : else_non_constant AS res FROM nullable_00431; +SELECT constant_false ? then_non_constant : else_non_constant_nullable AS res FROM nullable_00431; + +SELECT constant_false ? then_non_constant_nullable : else_constant AS res FROM nullable_00431; +SELECT constant_false ? then_non_constant_nullable : constant_null AS res FROM nullable_00431; +SELECT constant_false ? then_non_constant_nullable : else_non_constant AS res FROM nullable_00431; +SELECT constant_false ? then_non_constant_nullable : else_non_constant_nullable AS res FROM nullable_00431; + +SELECT '---------- constant_null ----------'; + +SELECT constant_null ? then_constant : else_constant AS res FROM nullable_00431; +SELECT constant_null ? then_constant : constant_null AS res FROM nullable_00431; +SELECT constant_null ? then_constant : else_non_constant AS res FROM nullable_00431; +SELECT constant_null ? then_constant : else_non_constant_nullable AS res FROM nullable_00431; + +SELECT constant_null ? constant_null : else_constant AS res FROM nullable_00431; +SELECT constant_null ? constant_null : constant_null AS res FROM nullable_00431; +SELECT constant_null ? constant_null : else_non_constant AS res FROM nullable_00431; +SELECT constant_null ? constant_null : else_non_constant_nullable AS res FROM nullable_00431; + +SELECT constant_null ? then_non_constant : else_constant AS res FROM nullable_00431; +SELECT constant_null ? then_non_constant : constant_null AS res FROM nullable_00431; +SELECT constant_null ? then_non_constant : else_non_constant AS res FROM nullable_00431; +SELECT constant_null ? then_non_constant : else_non_constant_nullable AS res FROM nullable_00431; + +SELECT constant_null ? then_non_constant_nullable : else_constant AS res FROM nullable_00431; +SELECT constant_null ? then_non_constant_nullable : constant_null AS res FROM nullable_00431; +SELECT constant_null ? then_non_constant_nullable : else_non_constant AS res FROM nullable_00431; +SELECT constant_null ? then_non_constant_nullable : else_non_constant_nullable AS res FROM nullable_00431; + +SELECT '---------- cond_non_constant ----------'; + +SELECT cond_non_constant ? then_constant : else_constant AS res FROM nullable_00431; +SELECT cond_non_constant ? then_constant : constant_null AS res FROM nullable_00431; +SELECT cond_non_constant ? then_constant : else_non_constant AS res FROM nullable_00431; +SELECT cond_non_constant ? then_constant : else_non_constant_nullable AS res FROM nullable_00431; + +SELECT cond_non_constant ? constant_null : else_constant AS res FROM nullable_00431; +SELECT cond_non_constant ? constant_null : constant_null AS res FROM nullable_00431; +SELECT cond_non_constant ? constant_null : else_non_constant AS res FROM nullable_00431; +SELECT cond_non_constant ? constant_null : else_non_constant_nullable AS res FROM nullable_00431; + +SELECT cond_non_constant ? then_non_constant : else_constant AS res FROM nullable_00431; +SELECT cond_non_constant ? then_non_constant : constant_null AS res FROM nullable_00431; +SELECT cond_non_constant ? then_non_constant : else_non_constant AS res FROM nullable_00431; +SELECT cond_non_constant ? then_non_constant : else_non_constant_nullable AS res FROM nullable_00431; + +SELECT cond_non_constant ? then_non_constant_nullable : else_constant AS res FROM nullable_00431; +SELECT cond_non_constant ? then_non_constant_nullable : constant_null AS res FROM nullable_00431; +SELECT cond_non_constant ? then_non_constant_nullable : else_non_constant AS res FROM nullable_00431; +SELECT cond_non_constant ? then_non_constant_nullable : else_non_constant_nullable AS res FROM nullable_00431; + +SELECT '---------- cond_non_constant_nullable ----------'; + +SELECT cond_non_constant_nullable ? then_constant : else_constant AS res FROM nullable_00431; +SELECT cond_non_constant_nullable ? then_constant : constant_null AS res FROM nullable_00431; +SELECT cond_non_constant_nullable ? then_constant : else_non_constant AS res FROM nullable_00431; +SELECT cond_non_constant_nullable ? then_constant : else_non_constant_nullable AS res FROM nullable_00431; + +SELECT cond_non_constant_nullable ? constant_null : else_constant AS res FROM nullable_00431; +SELECT cond_non_constant_nullable ? constant_null : constant_null AS res FROM nullable_00431; +SELECT cond_non_constant_nullable ? constant_null : else_non_constant AS res FROM nullable_00431; +SELECT cond_non_constant_nullable ? constant_null : else_non_constant_nullable AS res FROM nullable_00431; + +SELECT cond_non_constant_nullable ? then_non_constant : else_constant AS res FROM nullable_00431; +SELECT cond_non_constant_nullable ? then_non_constant : constant_null AS res FROM nullable_00431; +SELECT cond_non_constant_nullable ? then_non_constant : else_non_constant AS res FROM nullable_00431; +SELECT cond_non_constant_nullable ? then_non_constant : else_non_constant_nullable AS res FROM nullable_00431; + +SELECT cond_non_constant_nullable ? then_non_constant_nullable : else_constant AS res FROM nullable_00431; +SELECT cond_non_constant_nullable ? then_non_constant_nullable : constant_null AS res FROM nullable_00431; +SELECT cond_non_constant_nullable ? then_non_constant_nullable : else_non_constant AS res FROM nullable_00431; +SELECT cond_non_constant_nullable ? then_non_constant_nullable : else_non_constant_nullable AS res FROM nullable_00431; + + +DROP TABLE nullable_00431; diff --git a/parser/testdata/00432_aggregate_function_scalars_and_constants/ast.json b/parser/testdata/00432_aggregate_function_scalars_and_constants/ast.json new file mode 100644 index 000000000..616d12219 --- /dev/null +++ b/parser/testdata/00432_aggregate_function_scalars_and_constants/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery agg_func_col (children 1)" + }, + { + "explain": " Identifier agg_func_col" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001208278, + "rows_read": 2, + "bytes_read": 76 + } +} diff --git a/parser/testdata/00432_aggregate_function_scalars_and_constants/metadata.json b/parser/testdata/00432_aggregate_function_scalars_and_constants/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00432_aggregate_function_scalars_and_constants/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00432_aggregate_function_scalars_and_constants/query.sql b/parser/testdata/00432_aggregate_function_scalars_and_constants/query.sql new file mode 100644 index 000000000..a6f31b935 --- /dev/null +++ b/parser/testdata/00432_aggregate_function_scalars_and_constants/query.sql @@ -0,0 +1,58 @@ +DROP TABLE IF EXISTS agg_func_col; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE agg_func_col (p Date, k UInt8, d AggregateFunction(sum, UInt64) DEFAULT arrayReduce('sumState', [toUInt64(200)])) ENGINE = AggregatingMergeTree(p, k, 1); +INSERT INTO agg_func_col (k) VALUES (0); +INSERT INTO agg_func_col (k, d) SELECT 1 AS k, arrayReduce('sumState', [toUInt64(100)]) AS d; +SELECT k, sumMerge(d) FROM agg_func_col GROUP BY k ORDER BY k; + +SELECT ''; +ALTER TABLE agg_func_col ADD COLUMN af_avg1 AggregateFunction(avg, UInt8); +SELECT k, sumMerge(d), avgMerge(af_avg1) FROM agg_func_col GROUP BY k ORDER BY k; + +SELECT ''; +INSERT INTO agg_func_col (k, af_avg1) VALUES (2, arrayReduce('avgState', [101])); +SELECT k, sumMerge(d), avgMerge(af_avg1) FROM agg_func_col GROUP BY k ORDER BY k; + +SELECT ''; +ALTER TABLE agg_func_col ADD COLUMN af_gua AggregateFunction(groupUniqArray, String) DEFAULT arrayReduce('groupUniqArrayState', ['---', '---']); +SELECT k, sumMerge(d), avgMerge(af_avg1), groupUniqArrayMerge(af_gua) FROM agg_func_col GROUP BY k ORDER BY k; + +SELECT ''; +INSERT INTO agg_func_col (k, af_avg1, af_gua) VALUES (3, arrayReduce('avgState', [102, 102]), arrayReduce('groupUniqArrayState', ['igua', 'igua'])); +SELECT k, sumMerge(d), avgMerge(af_avg1), groupUniqArrayMerge(af_gua) FROM agg_func_col GROUP BY k ORDER BY k; + +OPTIMIZE TABLE agg_func_col; + +SELECT ''; +SELECT k, sumMerge(d), avgMerge(af_avg1), groupUniqArrayMerge(af_gua) FROM agg_func_col GROUP BY k ORDER BY k; + +DROP TABLE IF EXISTS agg_func_col; + +SELECT ''; +SELECT arrayReduce('groupUniqArrayIf', ['---', '---', 't1'], [1, 1, 0]); +SELECT arrayReduce('groupUniqArrayMergeIf', + [arrayReduce('groupUniqArrayState', ['---', '---']), arrayReduce('groupUniqArrayState', ['t1', 't'])], + [1, 0] +); + +SELECT ''; +SELECT arrayReduce('avgState', [0]) IN (arrayReduce('avgState', [0, 1]), arrayReduce('avgState', [0])); +SELECT arrayReduce('avgState', [0]) IN (arrayReduce('avgState', [0, 1]), arrayReduce('avgState', [1])); + +SELECT ''; +SELECT arrayReduce('uniqExactMerge', + [arrayReduce('uniqExactMergeState', + [ + arrayReduce('uniqExactState', [12345678901]), + arrayReduce('uniqExactState', [12345678901]) + ]) + ]); + +SELECT arrayReduce('uniqExactMerge', + [arrayReduce('uniqExactMergeState', + [ + arrayReduce('uniqExactState', [12345678901]), + arrayReduce('uniqExactState', [12345678902]) + ]) + ]); diff --git a/parser/testdata/00433_ifnull/ast.json b/parser/testdata/00433_ifnull/ast.json new file mode 100644 index 000000000..f304790a1 --- /dev/null +++ b/parser/testdata/00433_ifnull/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function ifNull (alias res) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'x'" + }, + { + "explain": " Literal 'y'" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier res" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001174763, + "rows_read": 11, + "bytes_read": 403 + } +} diff --git a/parser/testdata/00433_ifnull/metadata.json b/parser/testdata/00433_ifnull/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00433_ifnull/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00433_ifnull/query.sql b/parser/testdata/00433_ifnull/query.sql new file mode 100644 index 000000000..9071305a8 --- /dev/null +++ b/parser/testdata/00433_ifnull/query.sql @@ -0,0 +1,21 @@ +SELECT ifNull('x', 'y') AS res, toTypeName(res); +SELECT ifNull(materialize('x'), materialize('y')) AS res, toTypeName(res); + +SELECT ifNull(toNullable('x'), 'y') AS res, toTypeName(res); +SELECT ifNull(toNullable('x'), materialize('y')) AS res, toTypeName(res); + +SELECT ifNull('x', toNullable('y')) AS res, toTypeName(res); +SELECT ifNull(materialize('x'), toNullable('y')) AS res, toTypeName(res); + +SELECT ifNull(toNullable('x'), toNullable('y')) AS res, toTypeName(res); + +SELECT ifNull(toString(number), toString(-number)) AS res, toTypeName(res) FROM system.numbers LIMIT 5; +SELECT ifNull(nullIf(toString(number), '1'), toString(-number)) AS res, toTypeName(res) FROM system.numbers LIMIT 5; +SELECT ifNull(toString(number), nullIf(toString(-number), '-3')) AS res, toTypeName(res) FROM system.numbers LIMIT 5; +SELECT ifNull(nullIf(toString(number), '1'), nullIf(toString(-number), '-3')) AS res, toTypeName(res) FROM system.numbers LIMIT 5; + +SELECT ifNull(NULL, 1) AS res, toTypeName(res); +SELECT ifNull(1, NULL) AS res, toTypeName(res); +SELECT ifNull(NULL, NULL) AS res, toTypeName(res); + +SELECT IFNULL(NULLIF(toString(number), '1'), NULLIF(toString(-number), '-3')) AS res, toTypeName(res) FROM system.numbers LIMIT 5; diff --git a/parser/testdata/00434_tonullable/ast.json b/parser/testdata/00434_tonullable/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00434_tonullable/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00434_tonullable/metadata.json b/parser/testdata/00434_tonullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00434_tonullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00434_tonullable/query.sql b/parser/testdata/00434_tonullable/query.sql new file mode 100644 index 000000000..b9420ff77 --- /dev/null +++ b/parser/testdata/00434_tonullable/query.sql @@ -0,0 +1,13 @@ +SELECT + toNullable(NULL) AS a, + toNullable('Hello') AS b, + toNullable(toNullable(1)) AS c, + toNullable(materialize(NULL)) AS d, + toNullable(materialize('Hello')) AS e, + toNullable(toNullable(materialize(1))) AS f, + toTypeName(a), + toTypeName(b), + toTypeName(c), + toTypeName(d), + toTypeName(e), + toTypeName(f); diff --git a/parser/testdata/00435_coalesce/ast.json b/parser/testdata/00435_coalesce/ast.json new file mode 100644 index 000000000..f90e56c27 --- /dev/null +++ b/parser/testdata/00435_coalesce/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function coalesce (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function coalesce (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Function coalesce (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Literal NULL" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001351996, + "rows_read": 13, + "bytes_read": 460 + } +} diff --git a/parser/testdata/00435_coalesce/metadata.json b/parser/testdata/00435_coalesce/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00435_coalesce/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00435_coalesce/query.sql b/parser/testdata/00435_coalesce/query.sql new file mode 100644 index 000000000..3744f5ff2 --- /dev/null +++ b/parser/testdata/00435_coalesce/query.sql @@ -0,0 +1,8 @@ +SELECT coalesce(), coalesce(NULL), coalesce(NULL, NULL), + coalesce(1), coalesce(1, NULL), coalesce(NULL, 1), coalesce(NULL, 1, NULL); + +SELECT COALESCE(), COALESCE(NULL), COALESCE(1, NULL); + +SELECT coalesce(number % 2 = 0 ? number : NULL, number % 3 = 0 ? number : NULL, number % 5 = 0 ? number : NULL) AS res, toTypeName(res) FROM system.numbers LIMIT 15; +SELECT coalesce(number % 2 = 0 ? number : NULL, number % 3 = 0 ? number : NULL, number) AS res, toTypeName(res) FROM system.numbers LIMIT 15; +SELECT coalesce(number % 2 = 0 ? number : NULL, number % 3 = 0 ? number : NULL, 100) AS res, toTypeName(res) FROM system.numbers LIMIT 15; diff --git a/parser/testdata/00436_convert_charset/ast.json b/parser/testdata/00436_convert_charset/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00436_convert_charset/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00436_convert_charset/metadata.json b/parser/testdata/00436_convert_charset/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00436_convert_charset/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00436_convert_charset/query.sql b/parser/testdata/00436_convert_charset/query.sql new file mode 100644 index 000000000..8ac9193c4 --- /dev/null +++ b/parser/testdata/00436_convert_charset/query.sql @@ -0,0 +1,31 @@ +-- Tags: no-fasttest + +SELECT + 'абвгдеёжзийклмнопрстуфхцчшщъыьэюяАБВГДЕЁЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯ' AS orig, + hex(convertCharset(orig, 'utf-8', 'cp1251') AS cp1251) AS cp1251_hex, + hex(convertCharset(orig, 'utf-8', 'utf-7')) AS utf7_hex, + hex(convertCharset(orig, 'utf-8', 'bocu-1')) AS bocu1_hex, + hex(convertCharset(orig, 'utf-8', 'scsu')) AS scsu_hex, + convertCharset(cp1251, 'cp1251', 'utf-8') AS orig2, + convertCharset(orig, 'cp1251', 'utf8') AS broken1, + convertCharset(orig, 'latin1', 'utf8') AS broken2, + convertCharset(orig, 'koi8-r', 'utf8') AS broken3, + convertCharset(broken1, 'utf-8', 'cp1251') AS restored1, + convertCharset(broken2, 'utf-8', 'latin1') AS restored2, + convertCharset(broken3, 'utf-8', 'koi8-r') AS restored3 +FORMAT Vertical; + +SELECT + materialize('абвгдеёжзийклмнопрстуфхцчшщъыьэюяАБВГДЕЁЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯ') AS orig, + hex(convertCharset(orig, 'utf-8', 'cp1251') AS cp1251) AS cp1251_hex, + hex(convertCharset(orig, 'utf-8', 'utf-7')) AS utf7_hex, + hex(convertCharset(orig, 'utf-8', 'bocu-1')) AS bocu1_hex, + hex(convertCharset(orig, 'utf-8', 'scsu')) AS scsu_hex, + convertCharset(cp1251, 'cp1251', 'utf-8') AS orig2, + convertCharset(orig, 'cp1251', 'utf8') AS broken1, + convertCharset(orig, 'latin1', 'utf8') AS broken2, + convertCharset(orig, 'koi8-r', 'utf8') AS broken3, + convertCharset(broken1, 'utf-8', 'cp1251') AS restored1, + convertCharset(broken2, 'utf-8', 'latin1') AS restored2, + convertCharset(broken3, 'utf-8', 'koi8-r') AS restored3 +FORMAT Vertical; diff --git a/parser/testdata/00436_fixed_string_16_comparisons/ast.json b/parser/testdata/00436_fixed_string_16_comparisons/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00436_fixed_string_16_comparisons/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00436_fixed_string_16_comparisons/metadata.json b/parser/testdata/00436_fixed_string_16_comparisons/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00436_fixed_string_16_comparisons/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00436_fixed_string_16_comparisons/query.sql b/parser/testdata/00436_fixed_string_16_comparisons/query.sql new file mode 100644 index 000000000..d22e9cbea --- /dev/null +++ b/parser/testdata/00436_fixed_string_16_comparisons/query.sql @@ -0,0 +1,63 @@ +SELECT + a, b, a = b, a != b, a < b, a > b, a <= b, a >= b, + toFixedString(a, 16) AS fa, toFixedString(b, 16) AS fb, fa = fb, fa != fb, fa < fb, fa > fb, fa <= fb, fa >= fb +FROM +( + SELECT 'aaaaaaaaaaaaaaaa' AS a + UNION ALL SELECT 'aaaaaaaaaaaaaaab' + UNION ALL SELECT 'aaaaaaaaaaaaaaac' + UNION ALL SELECT 'baaaaaaaaaaaaaaa' + UNION ALL SELECT 'baaaaaaaaaaaaaab' + UNION ALL SELECT 'baaaaaaaaaaaaaac' + UNION ALL SELECT 'aaaaaaaabaaaaaaa' + UNION ALL SELECT 'aaaaaaabaaaaaaaa' + UNION ALL SELECT 'aaaaaaacaaaaaaaa' +) js1 +CROSS JOIN +( + SELECT 'aaaaaaaaaaaaaaaa' AS b + UNION ALL SELECT 'aaaaaaaaaaaaaaab' + UNION ALL SELECT 'aaaaaaaaaaaaaaac' + UNION ALL SELECT 'baaaaaaaaaaaaaaa' + UNION ALL SELECT 'baaaaaaaaaaaaaab' + UNION ALL SELECT 'baaaaaaaaaaaaaac' + UNION ALL SELECT 'aaaaaaaabaaaaaaa' + UNION ALL SELECT 'aaaaaaabaaaaaaaa' + UNION ALL SELECT 'aaaaaaacaaaaaaaa' +) js2 +ORDER BY a, b; + + +SELECT + toFixedString(a, 16) AS a, + toFixedString('aaaaaaaaaaaaaaaa', 16) AS b1, + toFixedString('aaaaaaaaaaaaaaab', 16) AS b2, + toFixedString('aaaaaaaaaaaaaaac', 16) AS b3, + toFixedString('baaaaaaaaaaaaaaa', 16) AS b4, + toFixedString('baaaaaaaaaaaaaab', 16) AS b5, + toFixedString('baaaaaaaaaaaaaac', 16) AS b6, + toFixedString('aaaaaaaabaaaaaaa', 16) AS b7, + toFixedString('aaaaaaabaaaaaaaa', 16) AS b8, + toFixedString('aaaaaaacaaaaaaaa', 16) AS b9, + a = b1, a != b1, a < b1, a > b1, a <= b1, a >= b1, + a = b2, a != b2, a < b2, a > b2, a <= b2, a >= b2, + a = b3, a != b3, a < b3, a > b3, a <= b3, a >= b3, + a = b4, a != b4, a < b4, a > b4, a <= b4, a >= b4, + a = b5, a != b5, a < b5, a > b5, a <= b5, a >= b5, + a = b6, a != b6, a < b6, a > b6, a <= b6, a >= b6, + a = b7, a != b7, a < b7, a > b7, a <= b7, a >= b7, + a = b8, a != b8, a < b8, a > b8, a <= b8, a >= b8, + a = b9, a != b9, a < b9, a > b9, a <= b9, a >= b9 +FROM +( + SELECT 'aaaaaaaaaaaaaaaa' AS a + UNION ALL SELECT 'aaaaaaaaaaaaaaab' + UNION ALL SELECT 'aaaaaaaaaaaaaaac' + UNION ALL SELECT 'baaaaaaaaaaaaaaa' + UNION ALL SELECT 'baaaaaaaaaaaaaab' + UNION ALL SELECT 'baaaaaaaaaaaaaac' + UNION ALL SELECT 'aaaaaaaabaaaaaaa' + UNION ALL SELECT 'aaaaaaabaaaaaaaa' + UNION ALL SELECT 'aaaaaaacaaaaaaaa' +) +ORDER BY a; diff --git a/parser/testdata/00437_nulls_first_last/ast.json b/parser/testdata/00437_nulls_first_last/ast.json new file mode 100644 index 000000000..2e9051023 --- /dev/null +++ b/parser/testdata/00437_nulls_first_last/ast.json @@ -0,0 +1,154 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function if (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_5" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Function if (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal Float64_nan" + }, + { + "explain": " Function toFloat64 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier x" + } + ], + + "rows": 44, + + "statistics": + { + "elapsed": 0.001639807, + "rows_read": 44, + "bytes_read": 1938 + } +} diff --git a/parser/testdata/00437_nulls_first_last/metadata.json b/parser/testdata/00437_nulls_first_last/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00437_nulls_first_last/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00437_nulls_first_last/query.sql b/parser/testdata/00437_nulls_first_last/query.sql new file mode 100644 index 000000000..e06cbfda9 --- /dev/null +++ b/parser/testdata/00437_nulls_first_last/query.sql @@ -0,0 +1,39 @@ +SELECT x FROM (SELECT number % 5 = 0 ? NULL : (number % 3 = 0 ? nan : toFloat64(number)) AS x FROM system.numbers LIMIT 10) ORDER BY x ASC; +SELECT x FROM (SELECT number % 5 = 0 ? NULL : (number % 3 = 0 ? nan : toFloat64(number)) AS x FROM system.numbers LIMIT 10) ORDER BY x ASC NULLS FIRST; +SELECT x FROM (SELECT number % 5 = 0 ? NULL : (number % 3 = 0 ? nan : toFloat64(number)) AS x FROM system.numbers LIMIT 10) ORDER BY x ASC NULLS LAST; + +SELECT x FROM (SELECT number % 5 = 0 ? NULL : (number % 3 = 0 ? nan : toFloat64(number)) AS x FROM system.numbers LIMIT 10) ORDER BY x DESC; +SELECT x FROM (SELECT number % 5 = 0 ? NULL : (number % 3 = 0 ? nan : toFloat64(number)) AS x FROM system.numbers LIMIT 10) ORDER BY x DESC NULLS FIRST; +SELECT x FROM (SELECT number % 5 = 0 ? NULL : (number % 3 = 0 ? nan : toFloat64(number)) AS x FROM system.numbers LIMIT 10) ORDER BY x DESC NULLS LAST; + +SELECT x, y FROM (SELECT number % 5 = 0 ? NULL : number AS x, number % 3 = 0 ? nan : toFloat64(number) AS y FROM system.numbers LIMIT 10) ORDER BY x ASC, y ASC; +SELECT x, y FROM (SELECT number % 5 = 0 ? NULL : number AS x, number % 3 = 0 ? nan : toFloat64(number) AS y FROM system.numbers LIMIT 10) ORDER BY x ASC, y ASC NULLS FIRST; +SELECT x, y FROM (SELECT number % 5 = 0 ? NULL : number AS x, number % 3 = 0 ? nan : toFloat64(number) AS y FROM system.numbers LIMIT 10) ORDER BY x DESC NULLS FIRST, y ASC NULLS FIRST; + +SET max_block_size = 5; + +SELECT x FROM (SELECT number % 5 = 0 ? NULL : (number % 3 = 0 ? nan : toFloat64(number)) AS x FROM system.numbers LIMIT 10) ORDER BY x ASC; +SELECT x FROM (SELECT number % 5 = 0 ? NULL : (number % 3 = 0 ? nan : toFloat64(number)) AS x FROM system.numbers LIMIT 10) ORDER BY x ASC NULLS FIRST; +SELECT x FROM (SELECT number % 5 = 0 ? NULL : (number % 3 = 0 ? nan : toFloat64(number)) AS x FROM system.numbers LIMIT 10) ORDER BY x ASC NULLS LAST; + +SELECT x FROM (SELECT number % 5 = 0 ? NULL : (number % 3 = 0 ? nan : toFloat64(number)) AS x FROM system.numbers LIMIT 10) ORDER BY x DESC; +SELECT x FROM (SELECT number % 5 = 0 ? NULL : (number % 3 = 0 ? nan : toFloat64(number)) AS x FROM system.numbers LIMIT 10) ORDER BY x DESC NULLS FIRST; +SELECT x FROM (SELECT number % 5 = 0 ? NULL : (number % 3 = 0 ? nan : toFloat64(number)) AS x FROM system.numbers LIMIT 10) ORDER BY x DESC NULLS LAST; + +SELECT x, y FROM (SELECT number % 5 = 0 ? NULL : number AS x, number % 3 = 0 ? nan : toFloat64(number) AS y FROM system.numbers LIMIT 10) ORDER BY x ASC, y ASC; +SELECT x, y FROM (SELECT number % 5 = 0 ? NULL : number AS x, number % 3 = 0 ? nan : toFloat64(number) AS y FROM system.numbers LIMIT 10) ORDER BY x ASC, y ASC NULLS FIRST; +SELECT x, y FROM (SELECT number % 5 = 0 ? NULL : number AS x, number % 3 = 0 ? nan : toFloat64(number) AS y FROM system.numbers LIMIT 10) ORDER BY x DESC NULLS FIRST, y ASC NULLS FIRST; + +SET max_block_size = 3; + +SELECT x FROM (SELECT number % 5 = 0 ? NULL : (number % 3 = 0 ? nan : toFloat64(number)) AS x FROM system.numbers LIMIT 10) ORDER BY x ASC; +SELECT x FROM (SELECT number % 5 = 0 ? NULL : (number % 3 = 0 ? nan : toFloat64(number)) AS x FROM system.numbers LIMIT 10) ORDER BY x ASC NULLS FIRST; +SELECT x FROM (SELECT number % 5 = 0 ? NULL : (number % 3 = 0 ? nan : toFloat64(number)) AS x FROM system.numbers LIMIT 10) ORDER BY x ASC NULLS LAST; + +SELECT x FROM (SELECT number % 5 = 0 ? NULL : (number % 3 = 0 ? nan : toFloat64(number)) AS x FROM system.numbers LIMIT 10) ORDER BY x DESC; +SELECT x FROM (SELECT number % 5 = 0 ? NULL : (number % 3 = 0 ? nan : toFloat64(number)) AS x FROM system.numbers LIMIT 10) ORDER BY x DESC NULLS FIRST; +SELECT x FROM (SELECT number % 5 = 0 ? NULL : (number % 3 = 0 ? nan : toFloat64(number)) AS x FROM system.numbers LIMIT 10) ORDER BY x DESC NULLS LAST; + +SELECT x, y FROM (SELECT number % 5 = 0 ? NULL : number AS x, number % 3 = 0 ? nan : toFloat64(number) AS y FROM system.numbers LIMIT 10) ORDER BY x ASC, y ASC; +SELECT x, y FROM (SELECT number % 5 = 0 ? NULL : number AS x, number % 3 = 0 ? nan : toFloat64(number) AS y FROM system.numbers LIMIT 10) ORDER BY x ASC, y ASC NULLS FIRST; +SELECT x, y FROM (SELECT number % 5 = 0 ? NULL : number AS x, number % 3 = 0 ? nan : toFloat64(number) AS y FROM system.numbers LIMIT 10) ORDER BY x DESC NULLS FIRST, y ASC NULLS FIRST; diff --git a/parser/testdata/00438_bit_rotate/ast.json b/parser/testdata/00438_bit_rotate/ast.json new file mode 100644 index 000000000..4afdb6ef3 --- /dev/null +++ b/parser/testdata/00438_bit_rotate/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function hex (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function bitRotateLeft (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_9223372036854775809" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001066124, + "rows_read": 10, + "bytes_read": 399 + } +} diff --git a/parser/testdata/00438_bit_rotate/metadata.json b/parser/testdata/00438_bit_rotate/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00438_bit_rotate/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00438_bit_rotate/query.sql b/parser/testdata/00438_bit_rotate/query.sql new file mode 100644 index 000000000..d532fd522 --- /dev/null +++ b/parser/testdata/00438_bit_rotate/query.sql @@ -0,0 +1,2 @@ +SELECT hex(bitRotateLeft(0x8000000000000001, 1)); +SELECT hex(bitRotateRight(0x8000000000000001, 1)); diff --git a/parser/testdata/00439_fixed_string_filter/ast.json b/parser/testdata/00439_fixed_string_filter/ast.json new file mode 100644 index 000000000..84f599b02 --- /dev/null +++ b/parser/testdata/00439_fixed_string_filter/ast.json @@ -0,0 +1,121 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toFixedString (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function if (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function less (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_20" + }, + { + "explain": " Literal ''" + }, + { + "explain": " Literal 'Hello'" + }, + { + "explain": " Literal UInt64_5" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_50" + }, + { + "explain": " Function notEquals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal '\\0\\0\\0\\0\\0'" + } + ], + + "rows": 33, + + "statistics": + { + "elapsed": 0.001483817, + "rows_read": 33, + "bytes_read": 1398 + } +} diff --git a/parser/testdata/00439_fixed_string_filter/metadata.json b/parser/testdata/00439_fixed_string_filter/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00439_fixed_string_filter/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00439_fixed_string_filter/query.sql b/parser/testdata/00439_fixed_string_filter/query.sql new file mode 100644 index 000000000..163fa15f6 --- /dev/null +++ b/parser/testdata/00439_fixed_string_filter/query.sql @@ -0,0 +1 @@ +SELECT DISTINCT x FROM (SELECT toFixedString(number < 20 ? '' : 'Hello', 5) AS x FROM system.numbers LIMIT 50) WHERE x != '\0\0\0\0\0'; diff --git a/parser/testdata/00440_nulls_merge_tree/ast.json b/parser/testdata/00440_nulls_merge_tree/ast.json new file mode 100644 index 000000000..25e53ff4b --- /dev/null +++ b/parser/testdata/00440_nulls_merge_tree/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery nulls (children 1)" + }, + { + "explain": " Identifier nulls" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000989445, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/00440_nulls_merge_tree/metadata.json b/parser/testdata/00440_nulls_merge_tree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00440_nulls_merge_tree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00440_nulls_merge_tree/query.sql b/parser/testdata/00440_nulls_merge_tree/query.sql new file mode 100644 index 000000000..dd9473027 --- /dev/null +++ b/parser/testdata/00440_nulls_merge_tree/query.sql @@ -0,0 +1,6 @@ +DROP TABLE IF EXISTS nulls; +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE nulls (d Date, x Nullable(UInt64)) ENGINE = MergeTree(d, d, 8192); +INSERT INTO nulls SELECT toDate('2000-01-01'), number % 10 != 0 ? number : NULL FROM system.numbers LIMIT 10000; +SELECT count() FROM nulls WHERE x IS NULL; +DROP TABLE nulls; diff --git a/parser/testdata/00441_nulls_in/ast.json b/parser/testdata/00441_nulls_in/ast.json new file mode 100644 index 000000000..53dc8aedf --- /dev/null +++ b/parser/testdata/00441_nulls_in/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function in (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal Tuple_(UInt64_1, NULL, UInt64_3)" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_5" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001295171, + "rows_read": 13, + "bytes_read": 516 + } +} diff --git a/parser/testdata/00441_nulls_in/metadata.json b/parser/testdata/00441_nulls_in/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00441_nulls_in/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00441_nulls_in/query.sql b/parser/testdata/00441_nulls_in/query.sql new file mode 100644 index 000000000..c240a3be5 --- /dev/null +++ b/parser/testdata/00441_nulls_in/query.sql @@ -0,0 +1,29 @@ +SELECT number IN (1, NULL, 3) FROM system.numbers LIMIT 5; +SELECT nullIf(number, 2) IN (1, NULL, 3) FROM system.numbers LIMIT 5; +SELECT nullIf(number, 2) IN (1, 2, 3) FROM system.numbers LIMIT 5; + +SELECT number IN (SELECT number FROM system.numbers LIMIT 1, 3) AS res FROM system.numbers LIMIT 5; +SELECT number IN (SELECT nullIf(number, 2) FROM system.numbers LIMIT 1, 3) AS res FROM system.numbers LIMIT 5; +SELECT nullIf(number, 4) IN (SELECT nullIf(number, 2) FROM system.numbers LIMIT 1, 3) AS res FROM system.numbers LIMIT 5; + + +SELECT toString(number) IN ('1', NULL, '3') FROM system.numbers LIMIT 5; +SELECT nullIf(toString(number), '2') IN ('1', NULL, '3') FROM system.numbers LIMIT 5; +SELECT nullIf(toString(number), '2') IN ('1', '2', '3') FROM system.numbers LIMIT 5; + +SELECT toString(number) IN (SELECT toString(number) FROM system.numbers LIMIT 1, 3) AS res FROM system.numbers LIMIT 5; +SELECT toString(number) IN (SELECT nullIf(toString(number), '2') FROM system.numbers LIMIT 1, 3) AS res FROM system.numbers LIMIT 5; +SELECT nullIf(toString(number), '4') IN (SELECT nullIf(toString(number), '2') FROM system.numbers LIMIT 1, 3) AS res FROM system.numbers LIMIT 5; + + +SELECT (number, -number) IN ((1, -1), (NULL, NULL), (3, -3)) FROM system.numbers LIMIT 5; +SELECT (nullIf(number, 2), -number) IN ((1, -1), (NULL, NULL), (3, -3)) FROM system.numbers LIMIT 5; +SELECT (nullIf(number, 2), -number) IN ((1, -1), (2, -2), (3, -3)) FROM system.numbers LIMIT 5; +SELECT (nullIf(number, 2), -nullIf(number, 2)) IN ((1, -1), (NULL, NULL), (3, -3)) FROM system.numbers LIMIT 5; +SELECT (nullIf(number, 2), -nullIf(number, 2)) IN ((1, -1), (2, -2), (3, -3)) FROM system.numbers LIMIT 5; + +SELECT (number, -number) IN (SELECT number, -number FROM system.numbers LIMIT 1, 3) AS res FROM system.numbers LIMIT 5; +SELECT (number, -number) IN (SELECT nullIf(number, 2), -number FROM system.numbers LIMIT 1, 3) AS res FROM system.numbers LIMIT 5; +SELECT (nullIf(number, 4), -number) IN (SELECT nullIf(number, 2), -number FROM system.numbers LIMIT 1, 3) AS res FROM system.numbers LIMIT 5; +SELECT (number, -nullIf(number, 3)) IN (SELECT nullIf(number, 2), -number FROM system.numbers LIMIT 1, 3) AS res FROM system.numbers LIMIT 5; +SELECT (nullIf(number, 4), -nullIf(number, 3)) IN (SELECT nullIf(number, 2), -number FROM system.numbers LIMIT 1, 3) AS res FROM system.numbers LIMIT 5; diff --git a/parser/testdata/00442_filter_by_nullable/ast.json b/parser/testdata/00442_filter_by_nullable/ast.json new file mode 100644 index 000000000..99ddd8251 --- /dev/null +++ b/parser/testdata/00442_filter_by_nullable/ast.json @@ -0,0 +1,73 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toNullable (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Identifier x" + } + ], + + "rows": 17, + + "statistics": + { + "elapsed": 0.001388768, + "rows_read": 17, + "bytes_read": 688 + } +} diff --git a/parser/testdata/00442_filter_by_nullable/metadata.json b/parser/testdata/00442_filter_by_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00442_filter_by_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00442_filter_by_nullable/query.sql b/parser/testdata/00442_filter_by_nullable/query.sql new file mode 100644 index 000000000..674a55573 --- /dev/null +++ b/parser/testdata/00442_filter_by_nullable/query.sql @@ -0,0 +1,17 @@ +SELECT x FROM (SELECT toNullable(1) AS x) WHERE x; +SELECT x FROM (SELECT toNullable(0) AS x) WHERE x; +SELECT x FROM (SELECT NULL AS x) WHERE x; + +SELECT 1 WHERE toNullable(1); +SELECT 1 WHERE toNullable(0); +SELECT 1 WHERE NULL; + +SELECT x FROM (SELECT toNullable(materialize(1)) AS x) WHERE x; +SELECT x FROM (SELECT toNullable(materialize(0)) AS x) WHERE x; +SELECT x FROM (SELECT materialize(NULL) AS x) WHERE x; + +SELECT materialize('Hello') WHERE toNullable(materialize(1)); +SELECT materialize('Hello') WHERE toNullable(materialize(0)); +SELECT materialize('Hello') WHERE materialize(NULL); + +SELECT x, y FROM (SELECT number % 3 = 0 ? NULL : number AS x, number AS y FROM system.numbers LIMIT 10) WHERE x % 2 != 0; diff --git a/parser/testdata/00444_join_use_nulls/ast.json b/parser/testdata/00444_join_use_nulls/ast.json new file mode 100644 index 000000000..7fa505acd --- /dev/null +++ b/parser/testdata/00444_join_use_nulls/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001118492, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00444_join_use_nulls/metadata.json b/parser/testdata/00444_join_use_nulls/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00444_join_use_nulls/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00444_join_use_nulls/query.sql b/parser/testdata/00444_join_use_nulls/query.sql new file mode 100644 index 000000000..2ac9fc0f6 --- /dev/null +++ b/parser/testdata/00444_join_use_nulls/query.sql @@ -0,0 +1,86 @@ +SET join_use_nulls = 0; +SET any_join_distinct_right_table_keys = 1; +SET joined_subquery_requires_alias = 0; + +SELECT k, a, b +FROM +( + SELECT number AS k, toString(number) AS a FROM system.numbers LIMIT 10 +) +ANY INNER JOIN +( + SELECT number AS k, toString(number) AS b FROM system.numbers LIMIT 5, 10 +) USING (k) ORDER BY k; + +SELECT k, a, b +FROM +( + SELECT number AS k, toString(number) AS a FROM system.numbers LIMIT 10 +) +ANY LEFT JOIN +( + SELECT number AS k, toString(number) AS b FROM system.numbers LIMIT 5, 10 +) USING (k) ORDER BY k; + +SELECT k, a, b +FROM +( + SELECT number AS k, toString(number) AS a FROM system.numbers LIMIT 10 +) +ANY RIGHT JOIN +( + SELECT number AS k, toString(number) AS b FROM system.numbers LIMIT 5, 10 +) USING (k) ORDER BY k; + +SELECT k, a, b +FROM +( + SELECT number AS k, toString(number) AS a FROM system.numbers LIMIT 10 +) +ANY FULL JOIN +( + SELECT number AS k, toString(number) AS b FROM system.numbers LIMIT 5, 10 +) USING (k) ORDER BY k; + + +SET join_use_nulls = 1; + +SELECT k, a, b +FROM +( + SELECT number AS k, toString(number) AS a FROM system.numbers LIMIT 10 +) +ANY INNER JOIN +( + SELECT number AS k, toString(number) AS b FROM system.numbers LIMIT 5, 10 +) USING (k) ORDER BY k; + +SELECT k, a, b +FROM +( + SELECT number AS k, toString(number) AS a FROM system.numbers LIMIT 10 +) +ANY LEFT JOIN +( + SELECT number AS k, toString(number) AS b FROM system.numbers LIMIT 5, 10 +) USING (k) ORDER BY k; + +SELECT k, a, b +FROM +( + SELECT number AS k, toString(number) AS a FROM system.numbers LIMIT 10 +) +ANY RIGHT JOIN +( + SELECT number AS k, toString(number) AS b FROM system.numbers LIMIT 5, 10 +) USING (k) ORDER BY k; + +SELECT k, a, b +FROM +( + SELECT number AS k, toString(number) AS a FROM system.numbers LIMIT 10 +) +ANY FULL JOIN +( + SELECT number AS k, toString(number) AS b FROM system.numbers LIMIT 5, 10 +) USING (k) ORDER BY k; diff --git a/parser/testdata/00445_join_nullable_keys/ast.json b/parser/testdata/00445_join_nullable_keys/ast.json new file mode 100644 index 000000000..df66b21f2 --- /dev/null +++ b/parser/testdata/00445_join_nullable_keys/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001125124, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00445_join_nullable_keys/metadata.json b/parser/testdata/00445_join_nullable_keys/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00445_join_nullable_keys/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00445_join_nullable_keys/query.sql b/parser/testdata/00445_join_nullable_keys/query.sql new file mode 100644 index 000000000..bec0c76eb --- /dev/null +++ b/parser/testdata/00445_join_nullable_keys/query.sql @@ -0,0 +1,43 @@ +SET enable_analyzer = 1; +SET join_use_nulls = 0; +SET any_join_distinct_right_table_keys = 1; + +SELECT k, a, b +FROM +( + SELECT nullIf(number, 7) AS k, toString(number) AS a FROM system.numbers LIMIT 10 +) js1 +ANY INNER JOIN +( + SELECT number AS k, toString(number) AS b FROM system.numbers LIMIT 5, 10 +) js2 USING (k) ORDER BY k; + +SELECT k, a, b +FROM +( + SELECT number AS k, toString(number) AS a FROM system.numbers LIMIT 10 +) js1 +ANY LEFT JOIN +( + SELECT nullIf(number, 8) AS k, toString(number) AS b FROM system.numbers LIMIT 5, 10 +) js2 USING (k) ORDER BY k; + +SELECT k, a, b +FROM +( + SELECT nullIf(number, 7) AS k, toString(number) AS a FROM system.numbers LIMIT 10 +) js1 +ANY RIGHT JOIN +( + SELECT nullIf(number, 8) AS k, toString(number) AS b FROM system.numbers LIMIT 5, 10 +) js2 USING (k) ORDER BY k; + +SELECT k, b +FROM +( + SELECT number + 1 AS k FROM numbers(10) +) js1 +RIGHT JOIN +( + SELECT nullIf(number, if(number % 2 == 0, number, 0)) AS k, number AS b FROM numbers(10) +) js2 USING (k) ORDER BY k, b; diff --git a/parser/testdata/00446_clear_column_in_partition_zookeeper_long/ast.json b/parser/testdata/00446_clear_column_in_partition_zookeeper_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00446_clear_column_in_partition_zookeeper_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00446_clear_column_in_partition_zookeeper_long/metadata.json b/parser/testdata/00446_clear_column_in_partition_zookeeper_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00446_clear_column_in_partition_zookeeper_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00446_clear_column_in_partition_zookeeper_long/query.sql b/parser/testdata/00446_clear_column_in_partition_zookeeper_long/query.sql new file mode 100644 index 000000000..5427119f4 --- /dev/null +++ b/parser/testdata/00446_clear_column_in_partition_zookeeper_long/query.sql @@ -0,0 +1,79 @@ +-- Tags: long, zookeeper, no-shared-merge-tree +-- no-shared-merge-tree: different sychronization, replaced with another test + +SELECT '===Ordinary case==='; + +SET replication_alter_partitions_sync = 2; + +DROP TABLE IF EXISTS clear_column; +CREATE TABLE clear_column (d Date, num Int64, str String) ENGINE = MergeTree ORDER BY d PARTITION by toYYYYMM(d) SETTINGS min_bytes_for_wide_part = 0; + +INSERT INTO clear_column VALUES ('2016-12-12', 1, 'a'), ('2016-11-12', 2, 'b'); + +SELECT data_uncompressed_bytes FROM system.columns WHERE (database = currentDatabase()) AND (table = 'clear_column') AND (name = 'num'); + +SELECT num, str FROM clear_column ORDER BY num; +ALTER TABLE clear_column CLEAR COLUMN num IN PARTITION '201612'; +SELECT num, str FROM clear_column ORDER BY num; + +SELECT data_uncompressed_bytes FROM system.columns WHERE (database = currentDatabase()) AND (table = 'clear_column') AND (name = 'num'); +ALTER TABLE clear_column CLEAR COLUMN num IN PARTITION '201611'; +SELECT data_compressed_bytes, data_uncompressed_bytes FROM system.columns WHERE (database = currentDatabase()) AND (table = 'clear_column') AND (name = 'num'); + +DROP TABLE clear_column; + +SELECT '===Replicated case==='; + +DROP TABLE IF EXISTS clear_column1; +DROP TABLE IF EXISTS clear_column2; +SELECT sleep(1) FORMAT Null; +CREATE TABLE clear_column1 (d Date, i Int64) ENGINE = ReplicatedMergeTree('/clickhouse/{database}/test_00446/tables/clear_column', '1') ORDER BY d PARTITION by toYYYYMM(d) SETTINGS min_bytes_for_wide_part = 0; +CREATE TABLE clear_column2 (d Date, i Int64) ENGINE = ReplicatedMergeTree('/clickhouse/{database}/test_00446/tables/clear_column', '2') ORDER BY d PARTITION by toYYYYMM(d) SETTINGS min_bytes_for_wide_part = 0; + +INSERT INTO clear_column1 (d) VALUES ('2000-01-01'), ('2000-02-01'); +SYSTEM SYNC REPLICA clear_column2; + +SET replication_alter_partitions_sync=2; +ALTER TABLE clear_column1 ADD COLUMN s String; +ALTER TABLE clear_column1 CLEAR COLUMN s IN PARTITION '200001'; + +INSERT INTO clear_column1 VALUES ('2000-01-01', 1, 'a'), ('2000-02-01', 2, 'b'); +INSERT INTO clear_column1 VALUES ('2000-01-01', 3, 'c'), ('2000-02-01', 4, 'd'); +SYSTEM SYNC REPLICA clear_column2; + +SELECT 'all'; +SELECT * FROM clear_column2 ORDER BY d, i, s; + +SELECT 'w/o i 1'; +ALTER TABLE clear_column1 CLEAR COLUMN i IN PARTITION '200001'; +SELECT * FROM clear_column2 ORDER BY d, i, s; + +SELECT 'w/o is 1'; +ALTER TABLE clear_column1 CLEAR COLUMN s IN PARTITION '200001'; +SELECT * FROM clear_column2 ORDER BY d, i, s; + +SELECT 'w/o is 12'; +ALTER TABLE clear_column1 CLEAR COLUMN i IN PARTITION '200002'; +ALTER TABLE clear_column1 CLEAR COLUMN s IN PARTITION '200002'; +SELECT DISTINCT * FROM clear_column2 ORDER BY d, i, s; +SELECT DISTINCT * FROM clear_column2 ORDER BY d, i, s; + +SELECT 'sizes'; +SELECT sum(data_uncompressed_bytes) FROM system.columns WHERE database=currentDatabase() AND table LIKE 'clear_column_' AND (name = 'i' OR name = 's') GROUP BY table; + +-- double call should be OK +ALTER TABLE clear_column1 CLEAR COLUMN s IN PARTITION '200001'; +ALTER TABLE clear_column1 CLEAR COLUMN s IN PARTITION '200002'; + +SET optimize_throw_if_noop = 1; +OPTIMIZE TABLE clear_column1 PARTITION '200001'; +OPTIMIZE TABLE clear_column1 PARTITION '200002'; + + +-- clear column in empty partition should be Ok +ALTER TABLE clear_column1 CLEAR COLUMN s IN PARTITION '200012', CLEAR COLUMN i IN PARTITION '200012'; +-- Drop empty partition also Ok +ALTER TABLE clear_column1 DROP PARTITION '200012', DROP PARTITION '200011'; + +DROP TABLE clear_column1; +DROP TABLE clear_column2; diff --git a/parser/testdata/00447_foreach_modifier/ast.json b/parser/testdata/00447_foreach_modifier/ast.json new file mode 100644 index 000000000..a96f82518 --- /dev/null +++ b/parser/testdata/00447_foreach_modifier/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery data (children 3)" + }, + { + "explain": " Identifier data" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration sketch (children 1)" + }, + { + "explain": " DataType Array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " DataType Int8" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001510638, + "rows_read": 12, + "bytes_read": 422 + } +} diff --git a/parser/testdata/00447_foreach_modifier/metadata.json b/parser/testdata/00447_foreach_modifier/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00447_foreach_modifier/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00447_foreach_modifier/query.sql b/parser/testdata/00447_foreach_modifier/query.sql new file mode 100644 index 000000000..e645aea41 --- /dev/null +++ b/parser/testdata/00447_foreach_modifier/query.sql @@ -0,0 +1,25 @@ +CREATE TABLE IF NOT EXISTS data (sketch Array(Int8)) ENGINE=MergeTree ORDER BY tuple(); + +INSERT INTO data VALUES ([-1,-1,-1]), ([4,-1,2]), ([0,25,-1]), ([-1,-1,7]), ([-1,-1,-1]); + +SELECT max(sketch) FROM data; + +SELECT maxArray(sketch) FROM data; + +SELECT maxForEach(sketch) FROM data; + +DROP TABLE data; + + +SELECT k, sumForEach(arr) FROM (SELECT number % 3 AS k, range(number) AS arr FROM system.numbers LIMIT 10) GROUP BY k ORDER BY k; +SELECT k, sumForEach(arr) FROM (SELECT intDiv(number, 3) AS k, range(number) AS arr FROM system.numbers LIMIT 10) GROUP BY k ORDER BY k; + +SELECT k, groupArrayForEach(arr) FROM (SELECT number % 3 AS k, range(number) AS arr FROM system.numbers LIMIT 10) GROUP BY k ORDER BY k; +SELECT k, groupArrayForEach(arr) FROM (SELECT intDiv(number, 3) AS k, range(number) AS arr FROM system.numbers LIMIT 10) GROUP BY k ORDER BY k; + +SELECT k, groupArrayForEach(arr) FROM (SELECT number % 3 AS k, arrayMap(x -> toString(x), range(number)) AS arr FROM system.numbers LIMIT 10) GROUP BY k ORDER BY k; +SELECT k, groupArrayForEach(arr) FROM (SELECT intDiv(number, 3) AS k, arrayMap(x -> toString(x), range(number)) AS arr FROM system.numbers LIMIT 10) GROUP BY k ORDER BY k; + +SELECT k, groupArrayForEach(arr), quantilesExactForEach(0.5, 0.9)(arr) FROM (SELECT intDiv(number, 3) AS k, arrayMap(x -> number + x, range(number)) AS arr FROM system.numbers LIMIT 10) GROUP BY k ORDER BY k; + +SELECT uniqForEach(x) FROM (SELECT emptyArrayUInt8() AS x UNION ALL SELECT [1, 2, 3] UNION ALL SELECT emptyArrayUInt8() UNION ALL SELECT [2, 2]); diff --git a/parser/testdata/00448_replicate_nullable_tuple_generic/ast.json b/parser/testdata/00448_replicate_nullable_tuple_generic/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00448_replicate_nullable_tuple_generic/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00448_replicate_nullable_tuple_generic/metadata.json b/parser/testdata/00448_replicate_nullable_tuple_generic/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00448_replicate_nullable_tuple_generic/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00448_replicate_nullable_tuple_generic/query.sql b/parser/testdata/00448_replicate_nullable_tuple_generic/query.sql new file mode 100644 index 000000000..082ba1bf1 --- /dev/null +++ b/parser/testdata/00448_replicate_nullable_tuple_generic/query.sql @@ -0,0 +1,5 @@ +-- Tags: replica + +SELECT x, arrayJoin(x) FROM (SELECT materialize([1, NULL, 2]) AS x); +SELECT x, arrayJoin(x) FROM (SELECT materialize([(1, 2), (3, 4), (5, 6)]) AS x); +SELECT x, arrayJoin(x) FROM (SELECT materialize(arrayMap(x -> toFixedString(x, 5), ['Hello', 'world'])) AS x); diff --git a/parser/testdata/00448_to_string_cut_to_zero/ast.json b/parser/testdata/00448_to_string_cut_to_zero/ast.json new file mode 100644 index 000000000..4141948cf --- /dev/null +++ b/parser/testdata/00448_to_string_cut_to_zero/ast.json @@ -0,0 +1,106 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function toStringCutToZero (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_1000" + } + ], + + "rows": 28, + + "statistics": + { + "elapsed": 0.00147051, + "rows_read": 28, + "bytes_read": 1186 + } +} diff --git a/parser/testdata/00448_to_string_cut_to_zero/metadata.json b/parser/testdata/00448_to_string_cut_to_zero/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00448_to_string_cut_to_zero/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00448_to_string_cut_to_zero/query.sql b/parser/testdata/00448_to_string_cut_to_zero/query.sql new file mode 100644 index 000000000..8c5829523 --- /dev/null +++ b/parser/testdata/00448_to_string_cut_to_zero/query.sql @@ -0,0 +1,2 @@ +SELECT DISTINCT toString(number) = toStringCutToZero(toString(number)) FROM (SELECT * FROM system.numbers LIMIT 1000); +SELECT DISTINCT toString(number) = toStringCutToZero(toFixedString(toString(number), 10)) FROM (SELECT * FROM system.numbers LIMIT 1000); diff --git a/parser/testdata/00449_filter_array_nullable_tuple/ast.json b/parser/testdata/00449_filter_array_nullable_tuple/ast.json new file mode 100644 index 000000000..1f5e2c79b --- /dev/null +++ b/parser/testdata/00449_filter_array_nullable_tuple/ast.json @@ -0,0 +1,112 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function range (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function length (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_0" + } + ], + + "rows": 30, + + "statistics": + { + "elapsed": 0.00139412, + "rows_read": 30, + "bytes_read": 1234 + } +} diff --git a/parser/testdata/00449_filter_array_nullable_tuple/metadata.json b/parser/testdata/00449_filter_array_nullable_tuple/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00449_filter_array_nullable_tuple/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00449_filter_array_nullable_tuple/query.sql b/parser/testdata/00449_filter_array_nullable_tuple/query.sql new file mode 100644 index 000000000..4e714db90 --- /dev/null +++ b/parser/testdata/00449_filter_array_nullable_tuple/query.sql @@ -0,0 +1,6 @@ +SELECT * FROM (SELECT range(number) AS x FROM system.numbers LIMIT 10) WHERE length(x) % 2 = 0; +SELECT * FROM (SELECT arrayMap(x -> toNullable(x), range(number)) AS x FROM system.numbers LIMIT 10) WHERE length(x) % 2 = 0; +SELECT * FROM (SELECT arrayMap(x -> (x, x), range(number)) AS x FROM system.numbers LIMIT 10) WHERE length(x) % 2 = 0; +SELECT * FROM (SELECT arrayMap(x -> (x, x + 1), range(number)) AS x FROM system.numbers LIMIT 10) WHERE length(x) % 2 = 0; +SELECT * FROM (SELECT arrayMap(x -> (x, toNullable(x)), range(number)) AS x FROM system.numbers LIMIT 10) WHERE length(x) % 2 = 0; +SELECT * FROM (SELECT arrayMap(x -> (x, nullIf(x, 3)), range(number)) AS x FROM system.numbers LIMIT 10) WHERE length(x) % 2 = 0; diff --git a/parser/testdata/00450_higher_order_and_nullable/ast.json b/parser/testdata/00450_higher_order_and_nullable/ast.json new file mode 100644 index 000000000..b0d139238 --- /dev/null +++ b/parser/testdata/00450_higher_order_and_nullable/ast.json @@ -0,0 +1,112 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayMap (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function lambda (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function if (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function range (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 30, + + "statistics": + { + "elapsed": 0.001495722, + "rows_read": 30, + "bytes_read": 1195 + } +} diff --git a/parser/testdata/00450_higher_order_and_nullable/metadata.json b/parser/testdata/00450_higher_order_and_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00450_higher_order_and_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00450_higher_order_and_nullable/query.sql b/parser/testdata/00450_higher_order_and_nullable/query.sql new file mode 100644 index 000000000..0b88ffa36 --- /dev/null +++ b/parser/testdata/00450_higher_order_and_nullable/query.sql @@ -0,0 +1 @@ +SELECT arrayMap(x -> x % 2 = 0 ? NULL : x, range(number)) FROM system.numbers LIMIT 10; diff --git a/parser/testdata/00451_left_array_join_and_constants/ast.json b/parser/testdata/00451_left_array_join_and_constants/ast.json new file mode 100644 index 000000000..cfb6ff53b --- /dev/null +++ b/parser/testdata/00451_left_array_join_and_constants/ast.json @@ -0,0 +1,79 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier arr" + }, + { + "explain": " Identifier element" + }, + { + "explain": " TablesInSelectQuery (children 2)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[UInt64_1] (alias arr)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " ArrayJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier arr (alias element)" + } + ], + + "rows": 19, + + "statistics": + { + "elapsed": 0.001136843, + "rows_read": 19, + "bytes_read": 779 + } +} diff --git a/parser/testdata/00451_left_array_join_and_constants/metadata.json b/parser/testdata/00451_left_array_join_and_constants/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00451_left_array_join_and_constants/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00451_left_array_join_and_constants/query.sql b/parser/testdata/00451_left_array_join_and_constants/query.sql new file mode 100644 index 000000000..c808444dd --- /dev/null +++ b/parser/testdata/00451_left_array_join_and_constants/query.sql @@ -0,0 +1,3 @@ +SELECT arr, element FROM (SELECT [1] AS arr) LEFT ARRAY JOIN arr AS element; +SELECT arr, element FROM (SELECT emptyArrayUInt8() AS arr) LEFT ARRAY JOIN arr AS element; +SELECT arr, element FROM (SELECT arrayJoin([emptyArrayUInt8(), [1], [2, 3]]) AS arr) LEFT ARRAY JOIN arr AS element; diff --git a/parser/testdata/00452_left_array_join_and_nullable/ast.json b/parser/testdata/00452_left_array_join_and_nullable/ast.json new file mode 100644 index 000000000..856d751ca --- /dev/null +++ b/parser/testdata/00452_left_array_join_and_nullable/ast.json @@ -0,0 +1,97 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function emptyArrayToSingle (alias arr) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayMap (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function lambda (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function nullIf (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function arrayJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function emptyArrayUInt8 (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Literal Array_[UInt64_1]" + }, + { + "explain": " Literal Array_[UInt64_2, UInt64_3]" + } + ], + + "rows": 25, + + "statistics": + { + "elapsed": 0.001622711, + "rows_read": 25, + "bytes_read": 1082 + } +} diff --git a/parser/testdata/00452_left_array_join_and_nullable/metadata.json b/parser/testdata/00452_left_array_join_and_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00452_left_array_join_and_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00452_left_array_join_and_nullable/query.sql b/parser/testdata/00452_left_array_join_and_nullable/query.sql new file mode 100644 index 000000000..9a84acfba --- /dev/null +++ b/parser/testdata/00452_left_array_join_and_nullable/query.sql @@ -0,0 +1,7 @@ +SELECT emptyArrayToSingle(arrayMap(x -> nullIf(x, 2), arrayJoin([emptyArrayUInt8(), [1], [2, 3]]))) AS arr; +SELECT arr, element FROM (SELECT arrayMap(x -> nullIf(x, 2), arrayJoin([emptyArrayUInt8(), [1], [2, 3]])) AS arr) LEFT ARRAY JOIN arr AS element; + +SELECT emptyArrayToSingle(arr) FROM (SELECT arrayMap(x -> (x, toString(x), x = 1 ? NULL : x), range(number % 3)) AS arr FROM system.numbers LIMIT 10); + +SELECT emptyArrayToSingle(arrayMap(x -> toString(x), arrayMap(x -> nullIf(x, 2), arrayJoin([emptyArrayUInt8(), [1], [2, 3]])))) AS arr; +SELECT emptyArrayToSingle(arrayMap(x -> toFixedString(toString(x), 3), arrayMap(x -> nullIf(x, 2), arrayJoin([emptyArrayUInt8(), [1], [2, 3], [3, 4, 5]])))) AS arr; diff --git a/parser/testdata/00453_cast_enum/ast.json b/parser/testdata/00453_cast_enum/ast.json new file mode 100644 index 000000000..1e8127c29 --- /dev/null +++ b/parser/testdata/00453_cast_enum/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery cast_enums (children 1)" + }, + { + "explain": " Identifier cast_enums" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001140458, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/00453_cast_enum/metadata.json b/parser/testdata/00453_cast_enum/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00453_cast_enum/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00453_cast_enum/query.sql b/parser/testdata/00453_cast_enum/query.sql new file mode 100644 index 000000000..e526f1f5e --- /dev/null +++ b/parser/testdata/00453_cast_enum/query.sql @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS cast_enums; +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE cast_enums +( + type Enum8('session' = 1, 'pageview' = 2, 'click' = 3), + date Date, + id UInt64 +) ENGINE = MergeTree(date, (type, date, id), 8192); + +INSERT INTO cast_enums SELECT 'session' AS type, toDate('2017-01-01') AS date, number AS id FROM system.numbers LIMIT 2; +INSERT INTO cast_enums SELECT 2 AS type, toDate('2017-01-01') AS date, number AS id FROM system.numbers LIMIT 2; + +SELECT type, date, id FROM cast_enums ORDER BY type, id; + +INSERT INTO cast_enums VALUES ('wrong_value', '2017-01-02', 7); -- { error UNKNOWN_ELEMENT_OF_ENUM } + +DROP TABLE IF EXISTS cast_enums; diff --git a/parser/testdata/00453_top_k/ast.json b/parser/testdata/00453_top_k/ast.json new file mode 100644 index 000000000..be2397244 --- /dev/null +++ b/parser/testdata/00453_top_k/ast.json @@ -0,0 +1,136 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arraySort (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function topK (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier n" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function if (alias n) (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function less (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_100" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_100000" + } + ], + + "rows": 38, + + "statistics": + { + "elapsed": 0.001631439, + "rows_read": 38, + "bytes_read": 1649 + } +} diff --git a/parser/testdata/00453_top_k/metadata.json b/parser/testdata/00453_top_k/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00453_top_k/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00453_top_k/query.sql b/parser/testdata/00453_top_k/query.sql new file mode 100644 index 000000000..cae25e7ce --- /dev/null +++ b/parser/testdata/00453_top_k/query.sql @@ -0,0 +1,15 @@ +SELECT arraySort(topK(10)(n)) FROM (SELECT if(number % 100 < 10, number % 10, number) AS n FROM system.numbers LIMIT 100000); + +SELECT + k, + arraySort(topK(v)) +FROM +( + SELECT + number % 7 AS k, + arrayMap(x -> arrayMap(x -> if(x = 0, NULL, toString(x)), range(x)), range(intDiv(number, 1))) AS v + FROM system.numbers + LIMIT 10 +) +GROUP BY k +ORDER BY k ASC diff --git a/parser/testdata/00456_alter_nullable/ast.json b/parser/testdata/00456_alter_nullable/ast.json new file mode 100644 index 000000000..b71bff0e2 --- /dev/null +++ b/parser/testdata/00456_alter_nullable/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery nullable_alter (children 1)" + }, + { + "explain": " Identifier nullable_alter" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001404429, + "rows_read": 2, + "bytes_read": 80 + } +} diff --git a/parser/testdata/00456_alter_nullable/metadata.json b/parser/testdata/00456_alter_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00456_alter_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00456_alter_nullable/query.sql b/parser/testdata/00456_alter_nullable/query.sql new file mode 100644 index 000000000..0fa383776 --- /dev/null +++ b/parser/testdata/00456_alter_nullable/query.sql @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS nullable_alter; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE nullable_alter (d Date DEFAULT '2000-01-01', x String) ENGINE = MergeTree(d, d, 1); + +INSERT INTO nullable_alter (x) VALUES ('Hello'), ('World'); +SELECT x FROM nullable_alter ORDER BY x; + +ALTER TABLE nullable_alter MODIFY COLUMN x Nullable(String); +SELECT x FROM nullable_alter ORDER BY x; + +INSERT INTO nullable_alter (x) VALUES ('xyz'), (NULL); +SELECT x FROM nullable_alter ORDER BY x NULLS FIRST; + +ALTER TABLE nullable_alter MODIFY COLUMN x Nullable(FixedString(5)); +SELECT x FROM nullable_alter ORDER BY x NULLS FIRST; + +DROP TABLE nullable_alter; diff --git a/parser/testdata/00457_log_tinylog_stripelog_nullable/ast.json b/parser/testdata/00457_log_tinylog_stripelog_nullable/ast.json new file mode 100644 index 000000000..cd871a5e3 --- /dev/null +++ b/parser/testdata/00457_log_tinylog_stripelog_nullable/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery nullable_00457 (children 1)" + }, + { + "explain": " Identifier nullable_00457" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001630252, + "rows_read": 2, + "bytes_read": 80 + } +} diff --git a/parser/testdata/00457_log_tinylog_stripelog_nullable/metadata.json b/parser/testdata/00457_log_tinylog_stripelog_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00457_log_tinylog_stripelog_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00457_log_tinylog_stripelog_nullable/query.sql b/parser/testdata/00457_log_tinylog_stripelog_nullable/query.sql new file mode 100644 index 000000000..cb3ca2709 --- /dev/null +++ b/parser/testdata/00457_log_tinylog_stripelog_nullable/query.sql @@ -0,0 +1,40 @@ +DROP TABLE IF EXISTS nullable_00457; + +CREATE TABLE nullable_00457 (s String, ns Nullable(String), narr Array(Nullable(UInt64))) ENGINE = Log; + +INSERT INTO nullable_00457 SELECT toString(number), number % 3 = 1 ? toString(number) : NULL, arrayMap(x -> x % 2 = 1 ? x : NULL, range(number)) FROM system.numbers LIMIT 10; +SELECT * FROM nullable_00457 ORDER BY s; +SELECT s FROM nullable_00457 ORDER BY s; +SELECT ns FROM nullable_00457 ORDER BY s; +SELECT narr FROM nullable_00457 ORDER BY s; +SELECT s, narr FROM nullable_00457 ORDER BY s; + +INSERT INTO nullable_00457 SELECT toString(number), number % 3 = 1 ? toString(number) : NULL, arrayMap(x -> x % 2 = 1 ? x : NULL, range(number)) FROM system.numbers LIMIT 10, 10; + +DROP TABLE IF EXISTS nullable_00457; + +CREATE TABLE nullable_00457 (s String, ns Nullable(String), narr Array(Nullable(UInt64))) ENGINE = TinyLog; + +INSERT INTO nullable_00457 SELECT toString(number), number % 3 = 1 ? toString(number) : NULL, arrayMap(x -> x % 2 = 1 ? x : NULL, range(number)) FROM system.numbers LIMIT 10; +SELECT * FROM nullable_00457 ORDER BY s; +SELECT s FROM nullable_00457 ORDER BY s; +SELECT ns FROM nullable_00457 ORDER BY s; +SELECT narr FROM nullable_00457 ORDER BY s; +SELECT s, narr FROM nullable_00457 ORDER BY s; + +INSERT INTO nullable_00457 SELECT toString(number), number % 3 = 1 ? toString(number) : NULL, arrayMap(x -> x % 2 = 1 ? x : NULL, range(number)) FROM system.numbers LIMIT 10, 10; + +DROP TABLE IF EXISTS nullable_00457; + +CREATE TABLE nullable_00457 (s String, ns Nullable(String), narr Array(Nullable(UInt64))) ENGINE = StripeLog; + +INSERT INTO nullable_00457 SELECT toString(number), number % 3 = 1 ? toString(number) : NULL, arrayMap(x -> x % 2 = 1 ? x : NULL, range(number)) FROM system.numbers LIMIT 10; +SELECT * FROM nullable_00457 ORDER BY s; +SELECT s FROM nullable_00457 ORDER BY s; +SELECT ns FROM nullable_00457 ORDER BY s; +SELECT narr FROM nullable_00457 ORDER BY s; +SELECT s, narr FROM nullable_00457 ORDER BY s; + +INSERT INTO nullable_00457 SELECT toString(number), number % 3 = 1 ? toString(number) : NULL, arrayMap(x -> x % 2 = 1 ? x : NULL, range(number)) FROM system.numbers LIMIT 10, 10; + +DROP TABLE nullable_00457; diff --git a/parser/testdata/00458_merge_type_cast/ast.json b/parser/testdata/00458_merge_type_cast/ast.json new file mode 100644 index 000000000..d89b4342b --- /dev/null +++ b/parser/testdata/00458_merge_type_cast/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal ' UInt32 | UInt64 '" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001538415, + "rows_read": 5, + "bytes_read": 188 + } +} diff --git a/parser/testdata/00458_merge_type_cast/metadata.json b/parser/testdata/00458_merge_type_cast/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00458_merge_type_cast/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00458_merge_type_cast/query.sql b/parser/testdata/00458_merge_type_cast/query.sql new file mode 100644 index 000000000..8d9943e98 --- /dev/null +++ b/parser/testdata/00458_merge_type_cast/query.sql @@ -0,0 +1,136 @@ +SELECT ' UInt32 | UInt64 '; + +DROP TABLE IF EXISTS u32; +DROP TABLE IF EXISTS u64; +DROP TABLE IF EXISTS merge_32_64; + +CREATE TABLE u32 (x UInt32, y UInt32 DEFAULT x) ENGINE = Memory; +CREATE TABLE u64 (x UInt64, y UInt64 DEFAULT x) ENGINE = Memory; +CREATE TABLE merge_32_64 (x UInt64) ENGINE = Merge(currentDatabase(), '^u32|u64$'); + +INSERT INTO u32 (x) VALUES (1); +INSERT INTO u64 (x) VALUES (1); + +INSERT INTO u32 (x) VALUES (4294967290); +INSERT INTO u64 (x) VALUES (4294967290); +--now inserts 3. maybe need out of range check? +--INSERT INTO u32 VALUES (4294967299); +INSERT INTO u64 (x) VALUES (4294967299); + +select ' = 1:'; +SELECT x FROM merge_32_64 WHERE x = 1; +select ' 1:'; +SELECT x FROM merge_32_64 WHERE x IN (1); +select ' 4294967290:'; +SELECT x FROM merge_32_64 WHERE x IN (4294967290); +select ' 4294967299:'; +SELECT x FROM merge_32_64 WHERE x IN (4294967299); +--select ' -1: '; +--SELECT x FROM merge_32_64 WHERE x IN (-1); + +DROP TABLE u32; +DROP TABLE u64; +DROP TABLE merge_32_64; + + +SELECT ' Int64 | UInt64 '; + +DROP TABLE IF EXISTS s64; +DROP TABLE IF EXISTS u64; +DROP TABLE IF EXISTS merge_s64_u64; + +CREATE TABLE s64 (x Int64) ENGINE = Memory; +CREATE TABLE u64 (x UInt64) ENGINE = Memory; +CREATE TABLE merge_s64_u64 (x UInt64) ENGINE = Merge(currentDatabase(), '^s64|u64$'); + +INSERT INTO s64 VALUES (1); +INSERT INTO s64 VALUES (-1); +INSERT INTO u64 VALUES (1); + +select ' 1:'; +SELECT x FROM merge_s64_u64 WHERE x IN (1); +select ' -1: '; +SELECT x FROM merge_s64_u64 WHERE x IN (-1); + +DROP TABLE s64; +DROP TABLE u64; +DROP TABLE merge_s64_u64; + + +SELECT ' Int32 | UInt64 '; + +DROP TABLE IF EXISTS one_00458; +DROP TABLE IF EXISTS two_00458; +DROP TABLE IF EXISTS merge_one_two; + +CREATE TABLE one_00458 (x Int32) ENGINE = Memory; +CREATE TABLE two_00458 (x UInt64) ENGINE = Memory; +CREATE TABLE merge_one_two (x UInt64) ENGINE = Merge(currentDatabase(), '^one_00458$|^two_00458$'); + +INSERT INTO one_00458 VALUES (1); +INSERT INTO two_00458 VALUES (1); + +INSERT INTO one_00458 VALUES (2147483650); +INSERT INTO two_00458 VALUES (2147483650); + +SELECT * FROM merge_one_two WHERE x IN (1); +SELECT x FROM merge_one_two WHERE x IN (2147483650); +SELECT x FROM merge_one_two WHERE x IN (-1); + + +SELECT ' String | FixedString(16) '; + +DROP TABLE IF EXISTS one_00458; +DROP TABLE IF EXISTS two_00458; +DROP TABLE IF EXISTS merge_one_two; + +CREATE TABLE one_00458 (x String) ENGINE = Memory; +CREATE TABLE two_00458 (x FixedString(16)) ENGINE = Memory; +CREATE TABLE merge_one_two (x String) ENGINE = Merge(currentDatabase(), '^one_00458$|^two_00458$'); + +INSERT INTO one_00458 VALUES ('1'); +INSERT INTO two_00458 VALUES ('1'); + +SELECT * FROM merge_one_two WHERE x IN ('1'); + + +SELECT ' DateTime | UInt64 '; + +DROP TABLE IF EXISTS one_00458; +DROP TABLE IF EXISTS two_00458; +DROP TABLE IF EXISTS merge_one_two; + +CREATE TABLE one_00458 (x DateTime) ENGINE = Memory; +CREATE TABLE two_00458 (x UInt64) ENGINE = Memory; +CREATE TABLE merge_one_two (x UInt64) ENGINE = Merge(currentDatabase(), '^one_00458$|^two_00458$'); + +INSERT INTO one_00458 VALUES (1); +INSERT INTO two_00458 VALUES (1); + +SELECT * FROM merge_one_two WHERE x IN (1); + + +SELECT ' Array(UInt32) | Array(UInt64) '; + +DROP TABLE IF EXISTS one_00458; +DROP TABLE IF EXISTS two_00458; +DROP TABLE IF EXISTS merge_one_two; + +CREATE TABLE one_00458 (x Array(UInt32), z String DEFAULT '', y Array(UInt32)) ENGINE = Memory; +CREATE TABLE two_00458 (x Array(UInt64), z String DEFAULT '', y Array(UInt64)) ENGINE = Memory; +CREATE TABLE merge_one_two (x Array(UInt64), z String, y Array(UInt64)) ENGINE = Merge(currentDatabase(), '^one_00458$|^two_00458$'); + +INSERT INTO one_00458 (x, y) VALUES ([1], [0]); +INSERT INTO two_00458 (x, y) VALUES ([1], [0]); +INSERT INTO one_00458 (x, y) VALUES ([4294967290], [4294967290]); +INSERT INTO two_00458 (x, y) VALUES ([4294967290], [4294967290]); +INSERT INTO one_00458 (x, y) VALUES ([4294967299], [4294967299]); +INSERT INTO two_00458 (x, y) VALUES ([4294967299], [4294967299]); + +SELECT x, y FROM merge_one_two WHERE arrayExists(_ -> _ IN (1), x); +SELECT x, y FROM merge_one_two WHERE arrayExists(_ -> _ IN (4294967290), x); +SELECT x, y FROM merge_one_two WHERE arrayExists(_ -> _ IN (4294967299), x); + +DROP TABLE IF EXISTS one_00458; +DROP TABLE IF EXISTS two_00458; +DROP TABLE IF EXISTS merge_one_two; diff --git a/parser/testdata/00459_group_array_insert_at/ast.json b/parser/testdata/00459_group_array_insert_at/ast.json new file mode 100644 index 000000000..f8f6d0940 --- /dev/null +++ b/parser/testdata/00459_group_array_insert_at/ast.json @@ -0,0 +1,103 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function groupArrayInsertAt (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function multiply (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 27, + + "statistics": + { + "elapsed": 0.001412684, + "rows_read": 27, + "bytes_read": 1127 + } +} diff --git a/parser/testdata/00459_group_array_insert_at/metadata.json b/parser/testdata/00459_group_array_insert_at/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00459_group_array_insert_at/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00459_group_array_insert_at/query.sql b/parser/testdata/00459_group_array_insert_at/query.sql new file mode 100644 index 000000000..59ecfc050 --- /dev/null +++ b/parser/testdata/00459_group_array_insert_at/query.sql @@ -0,0 +1,6 @@ +SELECT groupArrayInsertAt(toString(number), number * 2) FROM (SELECT * FROM system.numbers LIMIT 10); +SELECT groupArrayInsertAt('-')(toString(number), number * 2) FROM (SELECT * FROM system.numbers LIMIT 10); +SELECT groupArrayInsertAt([123])(range(number), number * 2) FROM (SELECT * FROM system.numbers LIMIT 10); +SELECT number, groupArrayInsertAt(number, number) FROM (SELECT * FROM system.numbers LIMIT 10) GROUP BY number ORDER BY number; +SELECT k, ignore(groupArrayInsertAt(x, x)) FROM (SELECT dummy AS k, (randConstant() * 10) % 10 AS x FROM remote('127.0.0.{1,1}', system.one)) GROUP BY k ORDER BY k; +SELECT k, groupArrayInsertAt('-', 10)(toString(x), x) FROM (SELECT number AS k, number AS x FROM system.numbers LIMIT 11) GROUP BY k ORDER BY k; diff --git a/parser/testdata/00460_vertical_and_totals_extremes/ast.json b/parser/testdata/00460_vertical_and_totals_extremes/ast.json new file mode 100644 index 000000000..82a18a85a --- /dev/null +++ b/parser/testdata/00460_vertical_and_totals_extremes/ast.json @@ -0,0 +1,112 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier k" + }, + { + "explain": " Function count (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function modulo (alias k) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_5" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_100" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier k" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier k" + }, + { + "explain": " Identifier Vertical" + } + ], + + "rows": 30, + + "statistics": + { + "elapsed": 0.00130698, + "rows_read": 30, + "bytes_read": 1200 + } +} diff --git a/parser/testdata/00460_vertical_and_totals_extremes/metadata.json b/parser/testdata/00460_vertical_and_totals_extremes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00460_vertical_and_totals_extremes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00460_vertical_and_totals_extremes/query.sql b/parser/testdata/00460_vertical_and_totals_extremes/query.sql new file mode 100644 index 000000000..26b8d14d7 --- /dev/null +++ b/parser/testdata/00460_vertical_and_totals_extremes/query.sql @@ -0,0 +1,10 @@ +SELECT k, count() FROM (SELECT number % 5 AS k FROM system.numbers LIMIT 100) GROUP BY k WITH TOTALS ORDER BY k FORMAT Vertical; + +SET extremes = 1; +SELECT k, count() FROM (SELECT number % 5 AS k FROM system.numbers LIMIT 100) GROUP BY k WITH TOTALS ORDER BY k FORMAT Vertical; + +SET output_format_pretty_max_rows = 5; +SELECT k, count() FROM (SELECT number % 5 AS k FROM system.numbers LIMIT 100) GROUP BY k WITH TOTALS ORDER BY k FORMAT Vertical; + +SET output_format_pretty_max_rows = 4; +SELECT k, count() FROM (SELECT number % 5 AS k FROM system.numbers LIMIT 100) GROUP BY k WITH TOTALS ORDER BY k FORMAT Vertical; diff --git a/parser/testdata/00461_default_value_of_argument_type/ast.json b/parser/testdata/00461_default_value_of_argument_type/ast.json new file mode 100644 index 000000000..4d3a090dc --- /dev/null +++ b/parser/testdata/00461_default_value_of_argument_type/ast.json @@ -0,0 +1,91 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function defaultValueOfArgumentType (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_2, UInt64_3]" + }, + { + "explain": " Function defaultValueOfArgumentType (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[Array_[Array_[UInt64_1]]]" + }, + { + "explain": " Function defaultValueOfArgumentType (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal 'Hello'" + }, + { + "explain": " Function toTimeZone (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function now (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Literal 'UTC'" + }, + { + "explain": " Function today (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 23, + + "statistics": + { + "elapsed": 0.001645719, + "rows_read": 23, + "bytes_read": 980 + } +} diff --git a/parser/testdata/00461_default_value_of_argument_type/metadata.json b/parser/testdata/00461_default_value_of_argument_type/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00461_default_value_of_argument_type/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00461_default_value_of_argument_type/query.sql b/parser/testdata/00461_default_value_of_argument_type/query.sql new file mode 100644 index 000000000..d74af75cc --- /dev/null +++ b/parser/testdata/00461_default_value_of_argument_type/query.sql @@ -0,0 +1 @@ +SELECT defaultValueOfArgumentType([1, 2, 3]), defaultValueOfArgumentType([[[1]]]), defaultValueOfArgumentType((1, 'Hello', toTimeZone(now(), 'UTC'), today())); diff --git a/parser/testdata/00462_json_true_false_literals/ast.json b/parser/testdata/00462_json_true_false_literals/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00462_json_true_false_literals/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00462_json_true_false_literals/metadata.json b/parser/testdata/00462_json_true_false_literals/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00462_json_true_false_literals/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00462_json_true_false_literals/query.sql b/parser/testdata/00462_json_true_false_literals/query.sql new file mode 100644 index 000000000..2c45229fc --- /dev/null +++ b/parser/testdata/00462_json_true_false_literals/query.sql @@ -0,0 +1,8 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS json; +CREATE TABLE json (x UInt8, title String) ENGINE = Memory; +INSERT INTO json FORMAT JSONEachRow {"x": true, "title": "true"}, {"x": false, "title": "false"}, {"x": 0, "title": "0"}, {"x": 1, "title": "1"}; + +SELECT * FROM json ORDER BY title; +DROP TABLE IF EXISTS json; diff --git a/parser/testdata/00464_array_element_out_of_range/ast.json b/parser/testdata/00464_array_element_out_of_range/ast.json new file mode 100644 index 000000000..e2f085397 --- /dev/null +++ b/parser/testdata/00464_array_element_out_of_range/ast.json @@ -0,0 +1,79 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function arrayElement (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_2]" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Function arrayElement (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Array_[UInt64_1, NULL, UInt64_2]" + }, + { + "explain": " Literal UInt64_4" + }, + { + "explain": " Function arrayElement (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Tuple_('1', UInt64_1)" + }, + { + "explain": " Literal Tuple_('2', UInt64_2)" + }, + { + "explain": " Literal Int64_-3" + } + ], + + "rows": 19, + + "statistics": + { + "elapsed": 0.00129614, + "rows_read": 19, + "bytes_read": 772 + } +} diff --git a/parser/testdata/00464_array_element_out_of_range/metadata.json b/parser/testdata/00464_array_element_out_of_range/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00464_array_element_out_of_range/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00464_array_element_out_of_range/query.sql b/parser/testdata/00464_array_element_out_of_range/query.sql new file mode 100644 index 000000000..666d06831 --- /dev/null +++ b/parser/testdata/00464_array_element_out_of_range/query.sql @@ -0,0 +1 @@ +SELECT [1, 2][3], [1, NULL, 2][4], [('1', 1), ('2', 2)][-3]; diff --git a/parser/testdata/00464_sort_all_constant_columns/ast.json b/parser/testdata/00464_sort_all_constant_columns/ast.json new file mode 100644 index 000000000..9e7df81e2 --- /dev/null +++ b/parser/testdata/00464_sort_all_constant_columns/ast.json @@ -0,0 +1,76 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1 (alias x)" + }, + { + "explain": " Literal UInt64_2 (alias y)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier x" + } + ], + + "rows": 18, + + "statistics": + { + "elapsed": 0.001253124, + "rows_read": 18, + "bytes_read": 708 + } +} diff --git a/parser/testdata/00464_sort_all_constant_columns/metadata.json b/parser/testdata/00464_sort_all_constant_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00464_sort_all_constant_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00464_sort_all_constant_columns/query.sql b/parser/testdata/00464_sort_all_constant_columns/query.sql new file mode 100644 index 000000000..6c89e14a8 --- /dev/null +++ b/parser/testdata/00464_sort_all_constant_columns/query.sql @@ -0,0 +1,3 @@ +SELECT * FROM (SELECT 1 AS x, 2 AS y) ORDER BY x; + +SELECT * FROM (SELECT x FROM (SELECT 2 AS x, arrayJoin([1,2,3]))) ORDER BY x; diff --git a/parser/testdata/00465_nullable_default/ast.json b/parser/testdata/00465_nullable_default/ast.json new file mode 100644 index 000000000..ba69c9aa1 --- /dev/null +++ b/parser/testdata/00465_nullable_default/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery nullable_00465 (children 1)" + }, + { + "explain": " Identifier nullable_00465" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001297712, + "rows_read": 2, + "bytes_read": 80 + } +} diff --git a/parser/testdata/00465_nullable_default/metadata.json b/parser/testdata/00465_nullable_default/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00465_nullable_default/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00465_nullable_default/query.sql b/parser/testdata/00465_nullable_default/query.sql new file mode 100644 index 000000000..bbc9af5a9 --- /dev/null +++ b/parser/testdata/00465_nullable_default/query.sql @@ -0,0 +1,5 @@ +DROP TABLE IF EXISTS nullable_00465; +CREATE TABLE nullable_00465 (id Nullable(UInt32), cat String) ENGINE = Log; +INSERT INTO nullable_00465 (cat) VALUES ('test'); +SELECT * FROM nullable_00465; +DROP TABLE nullable_00465; diff --git a/parser/testdata/00466_comments_in_keyword/ast.json b/parser/testdata/00466_comments_in_keyword/ast.json new file mode 100644 index 000000000..f9e5a5f98 --- /dev/null +++ b/parser/testdata/00466_comments_in_keyword/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1 (alias x)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier x" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001318009, + "rows_read": 8, + "bytes_read": 289 + } +} diff --git a/parser/testdata/00466_comments_in_keyword/metadata.json b/parser/testdata/00466_comments_in_keyword/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00466_comments_in_keyword/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00466_comments_in_keyword/query.sql b/parser/testdata/00466_comments_in_keyword/query.sql new file mode 100644 index 000000000..3c23f5478 --- /dev/null +++ b/parser/testdata/00466_comments_in_keyword/query.sql @@ -0,0 +1,8 @@ +SELECT 1 AS x ORDER/* hello */BY x; + +SELECT 1 AS x GROUP -- hello +BY x; + +SELECT 1 AS x +UNION/**/ALL +SELECT 1; diff --git a/parser/testdata/00467_qualified_names/ast.json b/parser/testdata/00467_qualified_names/ast.json new file mode 100644 index 000000000..9ea009f91 --- /dev/null +++ b/parser/testdata/00467_qualified_names/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier dummy" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.one" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001731749, + "rows_read": 9, + "bytes_read": 352 + } +} diff --git a/parser/testdata/00467_qualified_names/metadata.json b/parser/testdata/00467_qualified_names/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00467_qualified_names/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00467_qualified_names/query.sql b/parser/testdata/00467_qualified_names/query.sql new file mode 100644 index 000000000..ba3f07c26 --- /dev/null +++ b/parser/testdata/00467_qualified_names/query.sql @@ -0,0 +1,40 @@ +SELECT dummy FROM system.one; +SELECT * FROM system.one; +SELECT `one`.dummy FROM system.one; +SELECT one.* FROM system.one; +SELECT system.`one`.dummy FROM system.one; +SELECT `system`.`one`.* FROM system.one; + +SELECT `t`.dummy FROM system.one AS t; +SELECT t.* FROM system.one AS t; +SELECT t.dummy FROM system.one t; +SELECT t.* FROM system.one t; + +SELECT one.dummy FROM system.one one; +SELECT one.* FROM system.one one; + +USE system; + +SELECT `dummy` FROM `one`; +SELECT * FROM one; +SELECT one.dummy FROM one; +SELECT one.* FROM one; +SELECT system.one.dummy FROM one; +SELECT system.one.* FROM one; +SELECT system.one.dummy FROM `one` AS `t`; +SELECT system.one.* FROM one AS `t`; + +USE {CLICKHOUSE_DATABASE:Identifier}; + +DROP TABLE IF EXISTS nested; +CREATE TABLE nested (nest Nested(a UInt8, b String)) ENGINE = Memory; +INSERT INTO nested VALUES ([1, 2], ['hello', 'world']); +SELECT nest.a, nest.b, nested.`nest`.`a`, nested.nest.b, t.nest.a, t.nest.b, t.* FROM nested AS t; +DROP TABLE nested; + +USE system; + +SELECT number FROM numbers(2); +SELECT t.number FROM numbers(2) t; +SELECT x FROM (SELECT 1 AS x); +SELECT t.x FROM (SELECT 1 AS x) t; diff --git a/parser/testdata/00468_array_join_multiple_arrays_and_use_original_column/ast.json b/parser/testdata/00468_array_join_multiple_arrays_and_use_original_column/ast.json new file mode 100644 index 000000000..d25313c31 --- /dev/null +++ b/parser/testdata/00468_array_join_multiple_arrays_and_use_original_column/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery nested (children 1)" + }, + { + "explain": " Identifier nested" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001052644, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/00468_array_join_multiple_arrays_and_use_original_column/metadata.json b/parser/testdata/00468_array_join_multiple_arrays_and_use_original_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00468_array_join_multiple_arrays_and_use_original_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00468_array_join_multiple_arrays_and_use_original_column/query.sql b/parser/testdata/00468_array_join_multiple_arrays_and_use_original_column/query.sql new file mode 100644 index 000000000..9ccf647aa --- /dev/null +++ b/parser/testdata/00468_array_join_multiple_arrays_and_use_original_column/query.sql @@ -0,0 +1,12 @@ +DROP TABLE IF EXISTS nested; +CREATE TABLE nested (nest Nested(x UInt8, y UInt8)) ENGINE = Memory; +INSERT INTO nested VALUES ([1, 2, 3], [4, 5, 6]); + +SELECT nx FROM nested ARRAY JOIN nest.x AS nx, nest.y AS ny WHERE notEmpty(nest.y); +SELECT 1 FROM nested ARRAY JOIN nest.x AS nx, nest.y AS ny WHERE notEmpty(nest.y); +SELECT nx, ny FROM nested ARRAY JOIN nest.x AS nx, nest.y AS ny WHERE notEmpty(nest.y); +SELECT nx FROM nested ARRAY JOIN nest.x AS nx, nest.y AS ny WHERE notEmpty(nest.x); +SELECT nx, nest.y FROM nested ARRAY JOIN nest.x AS nx, nest.y AS ny; +SELECT nx, ny, nest.x, nest.y FROM nested ARRAY JOIN nest.x AS nx, nest.y AS ny; + +DROP TABLE nested; diff --git a/parser/testdata/00469_comparison_of_strings_containing_null_char/ast.json b/parser/testdata/00469_comparison_of_strings_containing_null_char/ast.json new file mode 100644 index 000000000..327ed9f81 --- /dev/null +++ b/parser/testdata/00469_comparison_of_strings_containing_null_char/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '**** constant-constant comparisons ****'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.00129716, + "rows_read": 5, + "bytes_read": 210 + } +} diff --git a/parser/testdata/00469_comparison_of_strings_containing_null_char/metadata.json b/parser/testdata/00469_comparison_of_strings_containing_null_char/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00469_comparison_of_strings_containing_null_char/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00469_comparison_of_strings_containing_null_char/query.sql b/parser/testdata/00469_comparison_of_strings_containing_null_char/query.sql new file mode 100644 index 000000000..d37335026 --- /dev/null +++ b/parser/testdata/00469_comparison_of_strings_containing_null_char/query.sql @@ -0,0 +1,32 @@ +SELECT '**** constant-constant comparisons ****'; + +SELECT 'ab\0c' < 'ab\0d', 'ab\0c' > 'ab\0d'; +SELECT 'a' < 'a\0b', 'a' > 'a\0b'; +SELECT 'a\0\0\0\0' < 'a\0\0\0', 'a\0\0\0\0' > 'a\0\0\0'; + +DROP TABLE IF EXISTS strings_00469; +CREATE TABLE strings_00469(x String, y String) ENGINE = TinyLog; + +INSERT INTO strings_00469 VALUES ('abcde\0', 'abcde'), ('aa\0a', 'aa\0b'), ('aa', 'aa\0'), ('a\0\0\0\0', 'a\0\0\0'), ('a\0\0', 'a\0'), ('a', 'a'); + +SELECT '**** vector-vector comparisons ****'; + +SELECT x < y, x > y FROM strings_00469; + +SELECT '**** vector-constant comparisons ****'; + +SELECT x < 'aa', x > 'aa' FROM strings_00469; + +SELECT '****'; + +SELECT x < 'a\0', x > 'a\0' FROM strings_00469; + +SELECT '**** single-column sort ****'; -- Uses ColumnString::getPermutation() + +SELECT * FROM strings_00469 ORDER BY x; + +SELECT '**** multi-column sort ****'; -- Uses ColumnString::compareAt() + +SELECT * FROM strings_00469 ORDER BY x, y; + +DROP TABLE strings_00469; diff --git a/parser/testdata/00470_identifiers_in_double_quotes/ast.json b/parser/testdata/00470_identifiers_in_double_quotes/ast.json new file mode 100644 index 000000000..314cd2204 --- /dev/null +++ b/parser/testdata/00470_identifiers_in_double_quotes/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier numbers.number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001380895, + "rows_read": 10, + "bytes_read": 392 + } +} diff --git a/parser/testdata/00470_identifiers_in_double_quotes/metadata.json b/parser/testdata/00470_identifiers_in_double_quotes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00470_identifiers_in_double_quotes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00470_identifiers_in_double_quotes/query.sql b/parser/testdata/00470_identifiers_in_double_quotes/query.sql new file mode 100644 index 000000000..ec00c5a43 --- /dev/null +++ b/parser/testdata/00470_identifiers_in_double_quotes/query.sql @@ -0,0 +1 @@ +SELECT "numbers"."number" FROM "system"."numbers" LIMIT 1; diff --git a/parser/testdata/00471_sql_style_quoting/ast.json b/parser/testdata/00471_sql_style_quoting/ast.json new file mode 100644 index 000000000..a8c45c31b --- /dev/null +++ b/parser/testdata/00471_sql_style_quoting/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier ta`ble.dummy" + }, + { + "explain": " Literal 'hello\\'world' (alias hel\"lo)" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.one (alias ta`ble)" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001214776, + "rows_read": 10, + "bytes_read": 423 + } +} diff --git a/parser/testdata/00471_sql_style_quoting/metadata.json b/parser/testdata/00471_sql_style_quoting/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00471_sql_style_quoting/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00471_sql_style_quoting/query.sql b/parser/testdata/00471_sql_style_quoting/query.sql new file mode 100644 index 000000000..3e3d2daee --- /dev/null +++ b/parser/testdata/00471_sql_style_quoting/query.sql @@ -0,0 +1 @@ +SELECT "ta`ble".dummy, 'hello''world' AS "hel""lo" FROM system.one AS `ta``ble`; diff --git a/parser/testdata/00472_compare_uuid_with_constant_string/ast.json b/parser/testdata/00472_compare_uuid_with_constant_string/ast.json new file mode 100644 index 000000000..36951d96a --- /dev/null +++ b/parser/testdata/00472_compare_uuid_with_constant_string/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toUUID (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '61f0c404-5cb3-11e7-907b-a6006ad3dba0'" + }, + { + "explain": " Literal '61f0c404-5cb3-11e7-907b-a6006ad3dba0'" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001332885, + "rows_read": 10, + "bytes_read": 435 + } +} diff --git a/parser/testdata/00472_compare_uuid_with_constant_string/metadata.json b/parser/testdata/00472_compare_uuid_with_constant_string/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00472_compare_uuid_with_constant_string/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00472_compare_uuid_with_constant_string/query.sql b/parser/testdata/00472_compare_uuid_with_constant_string/query.sql new file mode 100644 index 000000000..856271cb8 --- /dev/null +++ b/parser/testdata/00472_compare_uuid_with_constant_string/query.sql @@ -0,0 +1,43 @@ +SELECT toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0') = '61f0c404-5cb3-11e7-907b-a6006ad3dba0'; +SELECT '61f0c404-5cb3-11e7-907b-a6006ad3dba0' = toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0'); +SELECT toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0') = '61f0c404-5cb3-11e7-907b-a6006ad3dba1'; +SELECT '61f0c404-5cb3-11e7-907b-a6006ad3dba0' = toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba1'); + +SELECT toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0') != '61f0c404-5cb3-11e7-907b-a6006ad3dba0'; +SELECT '61f0c404-5cb3-11e7-907b-a6006ad3dba0' != toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0'); +SELECT toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0') != '61f0c404-5cb3-11e7-907b-a6006ad3dba1'; +SELECT '61f0c404-5cb3-11e7-907b-a6006ad3dba0' != toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba1'); + + +SELECT toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0') < '61f0c404-5cb3-11e7-907b-a6006ad3dba1'; +SELECT '61f0c404-5cb3-11e7-907b-a6006ad3dba0' < toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba1'); +SELECT toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba1') < '61f0c404-5cb3-11e7-907b-a6006ad3dba0'; +SELECT '61f0c404-5cb3-11e7-907b-a6006ad3dba1' < toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0'); +SELECT toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0') < '61f0c404-5cb3-11e7-907b-a6006ad3dba0'; +SELECT '61f0c404-5cb3-11e7-907b-a6006ad3dba0' < toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0'); + + +SELECT toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0') > '61f0c404-5cb3-11e7-907b-a6006ad3dba1'; +SELECT '61f0c404-5cb3-11e7-907b-a6006ad3dba0' > toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba1'); +SELECT toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba2') > '61f0c404-5cb3-11e7-907b-a6006ad3dba1'; +SELECT '61f0c404-5cb3-11e7-907b-a6006ad3dba2' > toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba1'); +SELECT toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba1') > '61f0c404-5cb3-11e7-907b-a6006ad3dba1'; +SELECT '61f0c404-5cb3-11e7-907b-a6006ad3dba1' > toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba1'); + + +SELECT toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0') <= '61f0c404-5cb3-11e7-907b-a6006ad3dba1'; +SELECT '61f0c404-5cb3-11e7-907b-a6006ad3dba0' <= toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba1'); +SELECT toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0') <= '61f0c404-5cb3-11e7-907b-a6006ad3dba0'; +SELECT '61f0c404-5cb3-11e7-907b-a6006ad3dba0' <= toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0'); +SELECT toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba2') <= '61f0c404-5cb3-11e7-907b-a6006ad3dba1'; +SELECT '61f0c404-5cb3-11e7-907b-a6006ad3dba2' <= toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba1'); + +SELECT toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0') >= '61f0c404-5cb3-11e7-907b-a6006ad3dba1'; +SELECT '61f0c404-5cb3-11e7-907b-a6006ad3dba0' >= toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba1'); +SELECT toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0') >= '61f0c404-5cb3-11e7-907b-a6006ad3dba0'; +SELECT '61f0c404-5cb3-11e7-907b-a6006ad3dba0' >= toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0'); +SELECT toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba2') >= '61f0c404-5cb3-11e7-907b-a6006ad3dba1'; +SELECT '61f0c404-5cb3-11e7-907b-a6006ad3dba2' >= toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba1'); + + + diff --git a/parser/testdata/00472_create_view_if_not_exists/ast.json b/parser/testdata/00472_create_view_if_not_exists/ast.json new file mode 100644 index 000000000..4f6dd75a7 --- /dev/null +++ b/parser/testdata/00472_create_view_if_not_exists/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_00472 (children 1)" + }, + { + "explain": " Identifier t_00472" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00121735, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/00472_create_view_if_not_exists/metadata.json b/parser/testdata/00472_create_view_if_not_exists/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00472_create_view_if_not_exists/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00472_create_view_if_not_exists/query.sql b/parser/testdata/00472_create_view_if_not_exists/query.sql new file mode 100644 index 000000000..cff8b605b --- /dev/null +++ b/parser/testdata/00472_create_view_if_not_exists/query.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS t_00472; +DROP TABLE IF EXISTS mv_00472; +DROP TABLE IF EXISTS `.inner.mv_00472`; + +CREATE TABLE t_00472 (x UInt8) ENGINE = Null; +CREATE VIEW IF NOT EXISTS mv_00472 AS SELECT * FROM t_00472; + +DROP TABLE t_00472; +DROP TABLE mv_00472; diff --git a/parser/testdata/00475_in_join_db_table/ast.json b/parser/testdata/00475_in_join_db_table/ast.json new file mode 100644 index 000000000..e30130eb0 --- /dev/null +++ b/parser/testdata/00475_in_join_db_table/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery set (children 1)" + }, + { + "explain": " Identifier set" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001306747, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/00475_in_join_db_table/metadata.json b/parser/testdata/00475_in_join_db_table/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00475_in_join_db_table/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00475_in_join_db_table/query.sql b/parser/testdata/00475_in_join_db_table/query.sql new file mode 100644 index 000000000..97ffe2193 --- /dev/null +++ b/parser/testdata/00475_in_join_db_table/query.sql @@ -0,0 +1,23 @@ +DROP TABLE IF EXISTS set; +CREATE TABLE set (x String) ENGINE = Memory; +INSERT INTO set VALUES ('hello'); +SELECT (arrayJoin(['hello', 'world']) AS s) IN set, s; + +DROP TABLE set; +CREATE TABLE set (x String) ENGINE = Set; +INSERT INTO set VALUES ('hello'); +SELECT (arrayJoin(['hello', 'world']) AS s) IN set, s; + +DROP TABLE set; + +DROP TABLE IF EXISTS join; +CREATE TABLE join (k UInt8, x String) ENGINE = Memory; +INSERT INTO join VALUES (1, 'hello'); +SELECT k, x FROM (SELECT arrayJoin([1, 2]) AS k) js1 ANY LEFT JOIN join USING k ORDER BY ALL; + +DROP TABLE join; +CREATE TABLE join (k UInt8, x String) ENGINE = Join(ANY, LEFT, k); +INSERT INTO join VALUES (1, 'hello'); +SELECT k, x FROM (SELECT arrayJoin([1, 2]) AS k) js1 ANY LEFT JOIN join USING k ORDER BY ALL; + +DROP TABLE join; diff --git a/parser/testdata/00476_pretty_formats_and_widths/ast.json b/parser/testdata/00476_pretty_formats_and_widths/ast.json new file mode 100644 index 000000000..bab1100e7 --- /dev/null +++ b/parser/testdata/00476_pretty_formats_and_widths/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001242716, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00476_pretty_formats_and_widths/metadata.json b/parser/testdata/00476_pretty_formats_and_widths/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00476_pretty_formats_and_widths/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00476_pretty_formats_and_widths/query.sql b/parser/testdata/00476_pretty_formats_and_widths/query.sql new file mode 100644 index 000000000..598098417 --- /dev/null +++ b/parser/testdata/00476_pretty_formats_and_widths/query.sql @@ -0,0 +1,8 @@ +SET output_format_pretty_color=1, output_format_pretty_highlight_digit_groups=0, output_format_pretty_display_footer_column_names=0; +SELECT toUInt64(round(exp10(number))) AS x, toString(x) AS s FROM system.numbers LIMIT 10 FORMAT Pretty; +SELECT toUInt64(round(exp10(number))) AS x, toString(x) AS s FROM system.numbers LIMIT 10 FORMAT PrettyCompact; +SELECT toUInt64(round(exp10(number))) AS x, toString(x) AS s FROM system.numbers LIMIT 10 FORMAT PrettySpace; +SET max_block_size = 5; +SELECT toUInt64(round(exp10(number))) AS x, toString(x) AS s FROM system.numbers LIMIT 10 FORMAT PrettyCompactMonoBlock; +SELECT '\\''\'' FORMAT Pretty; +SELECT '\\''\'', 1 FORMAT Vertical; diff --git a/parser/testdata/00477_parsing_data_types/ast.json b/parser/testdata/00477_parsing_data_types/ast.json new file mode 100644 index 000000000..d5df48c67 --- /dev/null +++ b/parser/testdata/00477_parsing_data_types/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_00477 (children 1)" + }, + { + "explain": " Identifier t_00477" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001076469, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/00477_parsing_data_types/metadata.json b/parser/testdata/00477_parsing_data_types/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00477_parsing_data_types/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00477_parsing_data_types/query.sql b/parser/testdata/00477_parsing_data_types/query.sql new file mode 100644 index 000000000..67331abe4 --- /dev/null +++ b/parser/testdata/00477_parsing_data_types/query.sql @@ -0,0 +1,2 @@ +DROP TEMPORARY TABLE IF EXISTS t_00477; +CREATE TEMPORARY TABLE t_00477 (x Array( /* Hello */ UInt32 /* World */ )) ENGINE = Memory; diff --git a/parser/testdata/00479_date_and_datetime_to_number/ast.json b/parser/testdata/00479_date_and_datetime_to_number/ast.json new file mode 100644 index 000000000..fabeca7af --- /dev/null +++ b/parser/testdata/00479_date_and_datetime_to_number/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toYYYYMM (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDate (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '2017-07-21'" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001159284, + "rows_read": 9, + "bytes_read": 351 + } +} diff --git a/parser/testdata/00479_date_and_datetime_to_number/metadata.json b/parser/testdata/00479_date_and_datetime_to_number/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00479_date_and_datetime_to_number/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00479_date_and_datetime_to_number/query.sql b/parser/testdata/00479_date_and_datetime_to_number/query.sql new file mode 100644 index 000000000..711516900 --- /dev/null +++ b/parser/testdata/00479_date_and_datetime_to_number/query.sql @@ -0,0 +1,6 @@ +SELECT toYYYYMM(toDate('2017-07-21')); +SELECT toYYYYMMDD(toDate('2017-07-21')); +SELECT toYYYYMMDDhhmmss(toDate('2017-07-21')); +SELECT toYYYYMM(toDateTime('2017-07-21T11:22:33')); +SELECT toYYYYMMDD(toDateTime('2017-07-21T11:22:33')); +SELECT toYYYYMMDDhhmmss(toDateTime('2017-07-21T11:22:33')); diff --git a/parser/testdata/00480_mac_addresses/ast.json b/parser/testdata/00480_mac_addresses/ast.json new file mode 100644 index 000000000..77e745dd2 --- /dev/null +++ b/parser/testdata/00480_mac_addresses/ast.json @@ -0,0 +1,127 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 9)" + }, + { + "explain": " Literal '01:02:03:04:05:06' (alias mac_str)" + }, + { + "explain": " Function MACStringToNum (alias mac_num) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier mac_str" + }, + { + "explain": " Function hex (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier mac_num" + }, + { + "explain": " Function MACNumToString (alias mac_str2) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier mac_num" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier mac_str" + }, + { + "explain": " Identifier mac_str2" + }, + { + "explain": " Function MACStringToOUI (alias oui_num) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier mac_str" + }, + { + "explain": " Function hex (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier oui_num" + }, + { + "explain": " Function MACStringToOUI (alias oui_num2) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function substring (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Identifier mac_str" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_8" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier oui_num" + }, + { + "explain": " Identifier oui_num2" + } + ], + + "rows": 35, + + "statistics": + { + "elapsed": 0.001316276, + "rows_read": 35, + "bytes_read": 1411 + } +} diff --git a/parser/testdata/00480_mac_addresses/metadata.json b/parser/testdata/00480_mac_addresses/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00480_mac_addresses/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00480_mac_addresses/query.sql b/parser/testdata/00480_mac_addresses/query.sql new file mode 100644 index 000000000..7f45beaaf --- /dev/null +++ b/parser/testdata/00480_mac_addresses/query.sql @@ -0,0 +1,2 @@ +SELECT '01:02:03:04:05:06' AS mac_str, MACStringToNum(mac_str) AS mac_num, hex(mac_num), MACNumToString(mac_num) AS mac_str2, mac_str = mac_str2, MACStringToOUI(mac_str) AS oui_num, hex(oui_num), MACStringToOUI(substring(mac_str, 1, 8)) AS oui_num2, oui_num = oui_num2; +SELECT materialize('01:02:03:04:05:06') AS mac_str, MACStringToNum(mac_str) AS mac_num, hex(mac_num), MACNumToString(mac_num) AS mac_str2, mac_str = mac_str2, MACStringToOUI(mac_str) AS oui_num, hex(oui_num), MACStringToOUI(substring(mac_str, 1, 8)) AS oui_num2, oui_num = oui_num2; diff --git a/parser/testdata/00481_create_view_for_null/ast.json b/parser/testdata/00481_create_view_for_null/ast.json new file mode 100644 index 000000000..2db2568c5 --- /dev/null +++ b/parser/testdata/00481_create_view_for_null/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery null_00481 (children 1)" + }, + { + "explain": " Identifier null_00481" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000986069, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/00481_create_view_for_null/metadata.json b/parser/testdata/00481_create_view_for_null/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00481_create_view_for_null/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00481_create_view_for_null/query.sql b/parser/testdata/00481_create_view_for_null/query.sql new file mode 100644 index 000000000..29fddc21d --- /dev/null +++ b/parser/testdata/00481_create_view_for_null/query.sql @@ -0,0 +1,12 @@ +DROP TABLE IF EXISTS null_00481; +DROP TABLE IF EXISTS null_view; + +CREATE TABLE null_00481 (x UInt8) ENGINE = Null; +CREATE VIEW null_view AS SELECT * FROM null_00481; +INSERT INTO null_00481 VALUES (1); + +SELECT * FROM null_00481; +SELECT * FROM null_view; + +DROP TABLE null_00481; +DROP TABLE null_view; diff --git a/parser/testdata/00481_reading_from_last_granula/ast.json b/parser/testdata/00481_reading_from_last_granula/ast.json new file mode 100644 index 000000000..5cc14ae0e --- /dev/null +++ b/parser/testdata/00481_reading_from_last_granula/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tab_00481 (children 1)" + }, + { + "explain": " Identifier tab_00481" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001290182, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/00481_reading_from_last_granula/metadata.json b/parser/testdata/00481_reading_from_last_granula/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00481_reading_from_last_granula/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00481_reading_from_last_granula/query.sql b/parser/testdata/00481_reading_from_last_granula/query.sql new file mode 100644 index 000000000..c98068e46 --- /dev/null +++ b/parser/testdata/00481_reading_from_last_granula/query.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS tab_00481; +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE tab_00481 (date Date, value UInt64, s String, m FixedString(16)) ENGINE = MergeTree(date, (date, value), 8); +INSERT INTO tab_00481 SELECT today() as date, number as value, '' as s, toFixedString('', 16) as m from system.numbers limit 42; +SET preferred_max_column_in_block_size_bytes = 32; +SELECT blockSize(), * from tab_00481 format Null; +SELECT 0; + +DROP TABLE tab_00481; diff --git a/parser/testdata/00482_subqueries_and_aliases/ast.json b/parser/testdata/00482_subqueries_and_aliases/ast.json new file mode 100644 index 000000000..46b900bad --- /dev/null +++ b/parser/testdata/00482_subqueries_and_aliases/ast.json @@ -0,0 +1,79 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.one" + }, + { + "explain": " Function in (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Tuple_(UInt64_1, UInt64_1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1 (alias x)" + }, + { + "explain": " Identifier x" + } + ], + + "rows": 19, + + "statistics": + { + "elapsed": 0.001228451, + "rows_read": 19, + "bytes_read": 754 + } +} diff --git a/parser/testdata/00482_subqueries_and_aliases/metadata.json b/parser/testdata/00482_subqueries_and_aliases/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00482_subqueries_and_aliases/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00482_subqueries_and_aliases/query.sql b/parser/testdata/00482_subqueries_and_aliases/query.sql new file mode 100644 index 000000000..52920fdfd --- /dev/null +++ b/parser/testdata/00482_subqueries_and_aliases/query.sql @@ -0,0 +1,5 @@ +SELECT 1 FROM system.one WHERE (1, 1) IN (SELECT 1 AS x, x); +SELECT 2 FROM system.one WHERE (1, 1) IN (SELECT 1 AS x, x) AND (1, 0) IN (SELECT 1 AS x, x); +SELECT 3 FROM system.one WHERE (1, 1) IN (SELECT 1 AS x, x) OR (1, 0) IN (SELECT 1 AS x, x); +SELECT 4 FROM system.one WHERE (1, 1) IN (SELECT 1 AS x, x) AND (1, 0) IN (SELECT 1 AS x, toUInt8(x - 1)); +SELECT 5 FROM system.one WHERE (1, 1) IN (SELECT 1 AS x, x) OR (1, 0) IN (SELECT 1 AS x, toUInt8(x - 1)); diff --git a/parser/testdata/00483_cast_syntax/ast.json b/parser/testdata/00483_cast_syntax/ast.json new file mode 100644 index 000000000..0a9e82f59 --- /dev/null +++ b/parser/testdata/00483_cast_syntax/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal 'Int8'" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.00121161, + "rows_read": 8, + "bytes_read": 285 + } +} diff --git a/parser/testdata/00483_cast_syntax/metadata.json b/parser/testdata/00483_cast_syntax/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00483_cast_syntax/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00483_cast_syntax/query.sql b/parser/testdata/00483_cast_syntax/query.sql new file mode 100644 index 000000000..be248fce3 --- /dev/null +++ b/parser/testdata/00483_cast_syntax/query.sql @@ -0,0 +1,2 @@ +SELECT CAST(1 AS Int8); +SELECT CAST(1, 'Int8'); diff --git a/parser/testdata/00483_reading_from_array_structure/ast.json b/parser/testdata/00483_reading_from_array_structure/ast.json new file mode 100644 index 000000000..28eb5dd1a --- /dev/null +++ b/parser/testdata/00483_reading_from_array_structure/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery table_00483 (children 1)" + }, + { + "explain": " Identifier table_00483" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001278537, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/00483_reading_from_array_structure/metadata.json b/parser/testdata/00483_reading_from_array_structure/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00483_reading_from_array_structure/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00483_reading_from_array_structure/query.sql b/parser/testdata/00483_reading_from_array_structure/query.sql new file mode 100644 index 000000000..bab0dcd37 --- /dev/null +++ b/parser/testdata/00483_reading_from_array_structure/query.sql @@ -0,0 +1,15 @@ +drop table if exists `table_00483`; + +set allow_deprecated_syntax_for_merge_tree=1; +create table `table_00483` (date Date, `Struct.Key1` Array(UInt64), `Struct.Key2` Array(UInt64), padding FixedString(16)) engine = MergeTree(date, (date), 16); +insert into `table_00483` select today() as date, [number], [number + 1], toFixedString('', 16) from system.numbers limit 100; +set preferred_max_column_in_block_size_bytes = 96; +select blockSize(), * from `table_00483` prewhere `Struct.Key1`[1] = 19 and `Struct.Key2`[1] >= 0 format Null; + +drop table if exists `table_00483`; +create table `table_00483` (date Date, `Struct.Key1` Array(UInt64), `Struct.Key2` Array(UInt64), padding FixedString(16), x UInt64) engine = MergeTree(date, (date), 8); +insert into `table_00483` select today() as date, [number], [number + 1], toFixedString('', 16), number from system.numbers limit 100; +set preferred_max_column_in_block_size_bytes = 112; +select blockSize(), * from `table_00483` prewhere x = 7 format Null; + +drop table if exists `table_00483`; diff --git a/parser/testdata/00484_preferred_max_column_in_block_size_bytes/ast.json b/parser/testdata/00484_preferred_max_column_in_block_size_bytes/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00484_preferred_max_column_in_block_size_bytes/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00484_preferred_max_column_in_block_size_bytes/metadata.json b/parser/testdata/00484_preferred_max_column_in_block_size_bytes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00484_preferred_max_column_in_block_size_bytes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00484_preferred_max_column_in_block_size_bytes/query.sql b/parser/testdata/00484_preferred_max_column_in_block_size_bytes/query.sql new file mode 100644 index 000000000..be4af2221 --- /dev/null +++ b/parser/testdata/00484_preferred_max_column_in_block_size_bytes/query.sql @@ -0,0 +1,37 @@ +-- Tags: no-random-settings + +drop table if exists tab_00484; +create table tab_00484 (date Date, x UInt64, s FixedString(128)) engine = MergeTree PARTITION BY date ORDER BY (date, x) SETTINGS min_bytes_for_wide_part = 0, ratio_of_defaults_for_sparse_serialization = 1; +insert into tab_00484 select today(), number, toFixedString('', 128) from system.numbers limit 8192; + +set preferred_block_size_bytes = 2000000; +set preferred_max_column_in_block_size_bytes = 0; +select max(blockSize()), min(blockSize()), any(ignore(*)) from tab_00484; +set preferred_max_column_in_block_size_bytes = 128; +select max(blockSize()), min(blockSize()), any(ignore(*)) from tab_00484; +set preferred_max_column_in_block_size_bytes = 256; +select max(blockSize()), min(blockSize()), any(ignore(*)) from tab_00484; +set preferred_max_column_in_block_size_bytes = 2097152; +select max(blockSize()), min(blockSize()), any(ignore(*)) from tab_00484; +set preferred_max_column_in_block_size_bytes = 4194304; +select max(blockSize()), min(blockSize()), any(ignore(*)) from tab_00484; + +drop table if exists tab_00484; +create table tab_00484 (date Date, x UInt64, s FixedString(128)) engine = MergeTree PARTITION BY date ORDER BY (date, x) SETTINGS min_bytes_for_wide_part = 0, ratio_of_defaults_for_sparse_serialization = 1; +insert into tab_00484 select today(), number, toFixedString('', 128) from system.numbers limit 47; +set preferred_max_column_in_block_size_bytes = 1152; +select blockSize(), * from tab_00484 where x = 1 or x > 36 format Null; + +drop table if exists tab_00484; +create table tab_00484 (date Date, x UInt64, s FixedString(128)) engine = MergeTree PARTITION BY date ORDER BY (date, x) SETTINGS min_bytes_for_wide_part = 0, ratio_of_defaults_for_sparse_serialization = 1; +insert into tab_00484 select today(), number, toFixedString('', 128) from system.numbers limit 10; +set preferred_max_column_in_block_size_bytes = 128; +select s from tab_00484 where s == '' format Null; + +drop table if exists tab_00484; +create table tab_00484 (date Date, x UInt64, s String) engine = MergeTree PARTITION BY date ORDER BY (date, x) SETTINGS min_bytes_for_wide_part = 0, ratio_of_defaults_for_sparse_serialization = 1; +insert into tab_00484 select today(), number, 'abc' from system.numbers limit 81920; +set preferred_block_size_bytes = 0; +select count(*) from tab_00484 prewhere s != 'abc' format Null; +select count(*) from tab_00484 prewhere s = 'abc' format Null; +drop table tab_00484; diff --git a/parser/testdata/00486_if_fixed_string/ast.json b/parser/testdata/00486_if_fixed_string/ast.json new file mode 100644 index 000000000..34371b20c --- /dev/null +++ b/parser/testdata/00486_if_fixed_string/ast.json @@ -0,0 +1,73 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function if (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal 'hello'" + }, + { + "explain": " Literal 'world'" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_5" + } + ], + + "rows": 17, + + "statistics": + { + "elapsed": 0.001253303, + "rows_read": 17, + "bytes_read": 638 + } +} diff --git a/parser/testdata/00486_if_fixed_string/metadata.json b/parser/testdata/00486_if_fixed_string/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00486_if_fixed_string/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00486_if_fixed_string/query.sql b/parser/testdata/00486_if_fixed_string/query.sql new file mode 100644 index 000000000..f1ef7c441 --- /dev/null +++ b/parser/testdata/00486_if_fixed_string/query.sql @@ -0,0 +1,19 @@ +SELECT number % 2 ? 'hello' : 'world' FROM system.numbers LIMIT 5; +SELECT number % 2 ? materialize('hello') : 'world' FROM system.numbers LIMIT 5; +SELECT number % 2 ? 'hello' : materialize('world') FROM system.numbers LIMIT 5; +SELECT number % 2 ? materialize('hello') : materialize('world') FROM system.numbers LIMIT 5; + +SELECT number % 2 ? toFixedString('hello', 5) : 'world' FROM system.numbers LIMIT 5; +SELECT number % 2 ? materialize(toFixedString('hello', 5)) : 'world' FROM system.numbers LIMIT 5; +SELECT number % 2 ? toFixedString('hello', 5) : materialize('world') FROM system.numbers LIMIT 5; +SELECT number % 2 ? materialize(toFixedString('hello', 5)) : materialize('world') FROM system.numbers LIMIT 5; + +SELECT number % 2 ? 'hello' : toFixedString('world', 5) FROM system.numbers LIMIT 5; +SELECT number % 2 ? materialize('hello') : toFixedString('world', 5) FROM system.numbers LIMIT 5; +SELECT number % 2 ? 'hello' : materialize(toFixedString('world', 5)) FROM system.numbers LIMIT 5; +SELECT number % 2 ? materialize('hello') : materialize(toFixedString('world', 5)) FROM system.numbers LIMIT 5; + +SELECT number % 2 ? toFixedString('hello', 5) : toFixedString('world', 5) FROM system.numbers LIMIT 5; +SELECT number % 2 ? materialize(toFixedString('hello', 5)) : toFixedString('world', 5) FROM system.numbers LIMIT 5; +SELECT number % 2 ? toFixedString('hello', 5) : materialize(toFixedString('world', 5)) FROM system.numbers LIMIT 5; +SELECT number % 2 ? materialize(toFixedString('hello', 5)) : materialize(toFixedString('world', 5)) FROM system.numbers LIMIT 5; diff --git a/parser/testdata/00487_if_array_fixed_string/ast.json b/parser/testdata/00487_if_array_fixed_string/ast.json new file mode 100644 index 000000000..2889f4ee8 --- /dev/null +++ b/parser/testdata/00487_if_array_fixed_string/ast.json @@ -0,0 +1,139 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function if (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function arrayMap (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function lambda (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function toFixedString (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal UInt64_5" + }, + { + "explain": " Literal Array_['hello', 'world']" + }, + { + "explain": " Function arrayMap (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function lambda (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function toFixedString (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal UInt64_5" + }, + { + "explain": " Literal Array_['a', 'b', 'c']" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_4" + } + ], + + "rows": 39, + + "statistics": + { + "elapsed": 0.001197552, + "rows_read": 39, + "bytes_read": 1601 + } +} diff --git a/parser/testdata/00487_if_array_fixed_string/metadata.json b/parser/testdata/00487_if_array_fixed_string/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00487_if_array_fixed_string/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00487_if_array_fixed_string/query.sql b/parser/testdata/00487_if_array_fixed_string/query.sql new file mode 100644 index 000000000..743b88dac --- /dev/null +++ b/parser/testdata/00487_if_array_fixed_string/query.sql @@ -0,0 +1,4 @@ +SELECT number % 2 ? arrayMap(x -> toFixedString(x, 5), ['hello', 'world']) : arrayMap(x -> toFixedString(x, 5), ['a', 'b', 'c']) FROM system.numbers LIMIT 4; +SELECT number % 2 ? materialize(arrayMap(x -> toFixedString(x, 5), ['hello', 'world'])) : arrayMap(x -> toFixedString(x, 5), ['a', 'b', 'c']) FROM system.numbers LIMIT 4; +SELECT number % 2 ? arrayMap(x -> toFixedString(x, 5), ['hello', 'world']) : materialize(arrayMap(x -> toFixedString(x, 5), ['a', 'b', 'c'])) FROM system.numbers LIMIT 4; +SELECT number % 2 ? materialize(arrayMap(x -> toFixedString(x, 5), ['hello', 'world'])) : materialize(arrayMap(x -> toFixedString(x, 5), ['a', 'b', 'c'])) FROM system.numbers LIMIT 4; diff --git a/parser/testdata/00488_column_name_primary/ast.json b/parser/testdata/00488_column_name_primary/ast.json new file mode 100644 index 000000000..3fddc9fa6 --- /dev/null +++ b/parser/testdata/00488_column_name_primary/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery primary (children 1)" + }, + { + "explain": " Identifier primary" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001133642, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/00488_column_name_primary/metadata.json b/parser/testdata/00488_column_name_primary/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00488_column_name_primary/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00488_column_name_primary/query.sql b/parser/testdata/00488_column_name_primary/query.sql new file mode 100644 index 000000000..124d0e142 --- /dev/null +++ b/parser/testdata/00488_column_name_primary/query.sql @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS primary; + +CREATE TABLE primary +( + `primary` String +) +ENGINE = MergeTree +ORDER BY primary +settings min_bytes_for_wide_part=0,min_bytes_for_wide_part=0 + AS +SELECT * +FROM numbers(1000); + +select max(primary) from primary; + +DROP TABLE primary; diff --git a/parser/testdata/00488_non_ascii_column_names/ast.json b/parser/testdata/00488_non_ascii_column_names/ast.json new file mode 100644 index 000000000..f771a9268 --- /dev/null +++ b/parser/testdata/00488_non_ascii_column_names/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery non_ascii (children 1)" + }, + { + "explain": " Identifier non_ascii" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001245757, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/00488_non_ascii_column_names/metadata.json b/parser/testdata/00488_non_ascii_column_names/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00488_non_ascii_column_names/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00488_non_ascii_column_names/query.sql b/parser/testdata/00488_non_ascii_column_names/query.sql new file mode 100644 index 000000000..ac84f020e --- /dev/null +++ b/parser/testdata/00488_non_ascii_column_names/query.sql @@ -0,0 +1,6 @@ +DROP TABLE IF EXISTS non_ascii; +CREATE TABLE non_ascii (`привет` String, `мир` String) ENGINE = TinyLog; +INSERT INTO non_ascii VALUES ('hello', 'world'); +SELECT `привет` FROM non_ascii; +SELECT * FROM non_ascii; +DROP TABLE non_ascii; diff --git a/parser/testdata/00489_pk_subexpression/ast.json b/parser/testdata/00489_pk_subexpression/ast.json new file mode 100644 index 000000000..5e90df281 --- /dev/null +++ b/parser/testdata/00489_pk_subexpression/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery pk (children 1)" + }, + { + "explain": " Identifier pk" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001339584, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/00489_pk_subexpression/metadata.json b/parser/testdata/00489_pk_subexpression/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00489_pk_subexpression/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00489_pk_subexpression/query.sql b/parser/testdata/00489_pk_subexpression/query.sql new file mode 100644 index 000000000..700581a9d --- /dev/null +++ b/parser/testdata/00489_pk_subexpression/query.sql @@ -0,0 +1,43 @@ +DROP TABLE IF EXISTS pk; + +set allow_deprecated_syntax_for_merge_tree=1; +-- NOTE: here the timezone is pinned to UTC, to avoid issues with "partial +-- timezones" (timezones that does not starts from 00:00), like +-- Africa/Monrovia, for which toStartOfMinute(0) and toStartOfMinute(59) can +-- give different values: +-- +-- SELECT +-- toDateTime(0, 'Africa/Monrovia') AS sec0, +-- toDateTime(59, 'Africa/Monrovia') AS sec59 +-- +-- ┌────────────────sec0─┬───────────────sec59─┐ +-- │ 1969-12-31 23:15:30 │ 1969-12-31 23:16:29 │ +-- └─────────────────────┴─────────────────────┘ +-- +CREATE TABLE pk (d Date DEFAULT '2000-01-01', x DateTime, y UInt64, z UInt64) ENGINE = MergeTree(d, (toStartOfMinute(x, 'UTC'), y, z), 1); + +INSERT INTO pk (x, y, z) VALUES (1, 11, 1235), (2, 11, 4395), (3, 22, 3545), (4, 22, 6984), (5, 33, 4596), (61, 11, 4563), (62, 11, 4578), (63, 11, 3572), (64, 22, 5786), (65, 22, 5786), (66, 22, 2791), (67, 22, 2791), (121, 33, 2791), (122, 33, 2791), (123, 33, 1235), (124, 44, 4935), (125, 44, 4578), (126, 55, 5786), (127, 55, 2791), (128, 55, 1235); + +SET min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0; +SET max_block_size = 1; + +-- Test inferred limit +SET max_rows_to_read = 5; +SELECT toUInt32(x), y, z FROM pk WHERE x BETWEEN toDateTime(0) AND toDateTime(59); + +SET max_rows_to_read = 9; +SELECT toUInt32(x), y, z FROM pk WHERE x BETWEEN toDateTime(120) AND toDateTime(240); + +-- Index is coarse, cannot read single row +SET max_rows_to_read = 5; +SELECT toUInt32(x), y, z FROM pk WHERE x = toDateTime(1); + +-- Index works on interval 00:01:00 - 00:01:59 +SET max_rows_to_read = 4; +SELECT toUInt32(x), y, z FROM pk WHERE (x BETWEEN toDateTime(60) AND toDateTime(119)) AND y = 11; + +-- Cannot read less rows as PK is coarser on interval 00:01:00 - 00:02:00 +SET max_rows_to_read = 5; +SELECT toUInt32(x), y, z FROM pk WHERE (x BETWEEN toDateTime(60) AND toDateTime(120)) AND y = 11; + +DROP TABLE pk; diff --git a/parser/testdata/00490_special_line_separators_and_characters_outside_of_bmp/ast.json b/parser/testdata/00490_special_line_separators_and_characters_outside_of_bmp/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00490_special_line_separators_and_characters_outside_of_bmp/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00490_special_line_separators_and_characters_outside_of_bmp/metadata.json b/parser/testdata/00490_special_line_separators_and_characters_outside_of_bmp/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00490_special_line_separators_and_characters_outside_of_bmp/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00490_special_line_separators_and_characters_outside_of_bmp/query.sql b/parser/testdata/00490_special_line_separators_and_characters_outside_of_bmp/query.sql new file mode 100644 index 000000000..aac30d45d --- /dev/null +++ b/parser/testdata/00490_special_line_separators_and_characters_outside_of_bmp/query.sql @@ -0,0 +1,5 @@ +-- Tags: no-fasttest + +SELECT visitParamExtractString('{"x":"\\uD800\\udf38"}', 'x') AS x, visitParamExtractString('{"x":"Hello \\u2028 World \\u2029 !"}', 'x') AS y FORMAT JSONEachRow; +SELECT 'Hello' || convertCharset(unhex('2028'), 'utf16be', 'utf8') || 'World' || convertCharset(unhex('2029'), 'utf16be', 'utf8') || '!' AS x, hex(x) AS h FORMAT JSONEachRow; +SELECT 'Hello' || convertCharset(unhex('2028'), 'utf16be', 'utf8') || 'World' || convertCharset(unhex('2029'), 'utf16be', 'utf8') || '!' AS x, hex(x) AS h FORMAT TSV; diff --git a/parser/testdata/00490_with_select/ast.json b/parser/testdata/00490_with_select/ast.json new file mode 100644 index 000000000..a685bda05 --- /dev/null +++ b/parser/testdata/00490_with_select/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001365504, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00490_with_select/metadata.json b/parser/testdata/00490_with_select/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00490_with_select/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00490_with_select/query.sql b/parser/testdata/00490_with_select/query.sql new file mode 100644 index 000000000..d63f0ca4f --- /dev/null +++ b/parser/testdata/00490_with_select/query.sql @@ -0,0 +1,8 @@ +SET enable_analyzer = 1; + +with pow(2,2) as four select pow(four, 2), 2 as two, pow(two, 2); +select `pow(four, 2)`, `pow(two, 2)` from (with pow(2,2) as four select pow(four, 2), 2 as two, pow(two, 2)); +with (select pow(two,2)) as four select pow(four, 2), 2 as two, pow(two, 2); +select `pow(four, 2)`, `pow(two, 2)` from (with (select pow(2,2)) as four select pow(four, 2), 2 as two, pow(two, 2)); +with 'string' as str select str || '_abc'; +select `concat(str, \'_abc\')` from (with 'string' as str select str || '_abc'); diff --git a/parser/testdata/00491_shard_distributed_and_aliases_in_where_having/ast.json b/parser/testdata/00491_shard_distributed_and_aliases_in_where_having/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00491_shard_distributed_and_aliases_in_where_having/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00491_shard_distributed_and_aliases_in_where_having/metadata.json b/parser/testdata/00491_shard_distributed_and_aliases_in_where_having/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00491_shard_distributed_and_aliases_in_where_having/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00491_shard_distributed_and_aliases_in_where_having/query.sql b/parser/testdata/00491_shard_distributed_and_aliases_in_where_having/query.sql new file mode 100644 index 000000000..117ca8025 --- /dev/null +++ b/parser/testdata/00491_shard_distributed_and_aliases_in_where_having/query.sql @@ -0,0 +1,3 @@ +-- Tags: distributed + +SELECT dummy FROM (SELECT dummy, NOT dummy AS x FROM remote('127.0.0.{2,3}', system.one) GROUP BY dummy HAVING x); diff --git a/parser/testdata/00492_drop_temporary_table/ast.json b/parser/testdata/00492_drop_temporary_table/ast.json new file mode 100644 index 000000000..85c0ac376 --- /dev/null +++ b/parser/testdata/00492_drop_temporary_table/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery temp_tab (children 1)" + }, + { + "explain": " Identifier temp_tab" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001286529, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/00492_drop_temporary_table/metadata.json b/parser/testdata/00492_drop_temporary_table/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00492_drop_temporary_table/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00492_drop_temporary_table/query.sql b/parser/testdata/00492_drop_temporary_table/query.sql new file mode 100644 index 000000000..a065b7f22 --- /dev/null +++ b/parser/testdata/00492_drop_temporary_table/query.sql @@ -0,0 +1,12 @@ +DROP TEMPORARY TABLE IF EXISTS temp_tab; +CREATE TEMPORARY TABLE temp_tab (number UInt64); +INSERT INTO temp_tab SELECT number FROM system.numbers LIMIT 1; +SELECT number FROM temp_tab; +SET send_logs_level = 'fatal'; +EXISTS TEMPORARY TABLE temp_tab; +DROP TABLE temp_tab; +EXISTS TEMPORARY TABLE temp_tab; +SET send_logs_level = 'warning'; +CREATE TEMPORARY TABLE temp_tab (number UInt64); +SELECT number FROM temp_tab; +DROP TEMPORARY TABLE temp_tab; diff --git a/parser/testdata/00494_shard_alias_substitution_bug/ast.json b/parser/testdata/00494_shard_alias_substitution_bug/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00494_shard_alias_substitution_bug/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00494_shard_alias_substitution_bug/metadata.json b/parser/testdata/00494_shard_alias_substitution_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00494_shard_alias_substitution_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00494_shard_alias_substitution_bug/query.sql b/parser/testdata/00494_shard_alias_substitution_bug/query.sql new file mode 100644 index 000000000..4a1ee2d79 --- /dev/null +++ b/parser/testdata/00494_shard_alias_substitution_bug/query.sql @@ -0,0 +1,11 @@ +-- Tags: shard + +DROP TABLE IF EXISTS nested; +CREATE TABLE nested (n Nested(x UInt8)) ENGINE = Memory; +INSERT INTO nested VALUES ([1, 2]); +SELECT 1 AS x FROM remote('127.0.0.2', currentDatabase(), nested) ARRAY JOIN n.x; +DROP TABLE nested; + +SELECT dummy AS dummy, dummy AS b FROM system.one; +SELECT dummy AS dummy, dummy AS b, b AS c FROM system.one; +SELECT b AS c, dummy AS b, dummy AS dummy FROM system.one; diff --git a/parser/testdata/00495_reading_const_zero_column/ast.json b/parser/testdata/00495_reading_const_zero_column/ast.json new file mode 100644 index 000000000..eb33ee88e --- /dev/null +++ b/parser/testdata/00495_reading_const_zero_column/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery one_table (children 1)" + }, + { + "explain": " Identifier one_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001450921, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/00495_reading_const_zero_column/metadata.json b/parser/testdata/00495_reading_const_zero_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00495_reading_const_zero_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00495_reading_const_zero_column/query.sql b/parser/testdata/00495_reading_const_zero_column/query.sql new file mode 100644 index 000000000..0af201b66 --- /dev/null +++ b/parser/testdata/00495_reading_const_zero_column/query.sql @@ -0,0 +1,7 @@ +drop table if exists one_table; +set allow_deprecated_syntax_for_merge_tree=1; +create table one_table (date Date, one UInt64) engine = MergeTree(date, (date, one), 8192); +insert into one_table select today(), toUInt64(1) from system.numbers limit 100000; +SET preferred_block_size_bytes = 8192; +select isNull(one) from one_table where isNull(one); +drop table if exists one_table; diff --git a/parser/testdata/00498_array_functions_concat_slice_push_pop/ast.json b/parser/testdata/00498_array_functions_concat_slice_push_pop/ast.json new file mode 100644 index 000000000..9599a05b5 --- /dev/null +++ b/parser/testdata/00498_array_functions_concat_slice_push_pop/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'const args'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001043555, + "rows_read": 5, + "bytes_read": 181 + } +} diff --git a/parser/testdata/00498_array_functions_concat_slice_push_pop/metadata.json b/parser/testdata/00498_array_functions_concat_slice_push_pop/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00498_array_functions_concat_slice_push_pop/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00498_array_functions_concat_slice_push_pop/query.sql b/parser/testdata/00498_array_functions_concat_slice_push_pop/query.sql new file mode 100644 index 000000000..c87d52d24 --- /dev/null +++ b/parser/testdata/00498_array_functions_concat_slice_push_pop/query.sql @@ -0,0 +1,371 @@ +select 'const args'; +select 'concat'; +select arrayConcat(emptyArrayUInt8()); +select arrayConcat(emptyArrayUInt8(), emptyArrayUInt8()); +select arrayConcat(emptyArrayUInt8(), emptyArrayUInt8(), emptyArrayUInt8()); +select arrayConcat([Null], emptyArrayUInt8()); +select arrayConcat([Null], emptyArrayUInt8(), [1]); +select arrayConcat([1, 2], [-1, -2], [0.3, 0.7], [Null]); +select arrayConcat(Null, emptyArrayUInt8()); +select arrayConcat([1], [-1], Null); +select arrayConcat([1, 2], [3, 4]); +select arrayConcat([1], [2, 3, 4]); +select arrayConcat(emptyArrayUInt8(), emptyArrayUInt8()); +SELECT arrayConcat(['abc'], ['def', 'gh', 'qwe']); +SELECT arrayConcat([1, NULL, 2], [3, NULL, 4]); +select arrayConcat([1, Null, 2], [3, 4]); + +select 'slice'; +select arraySlice(Null, 1, 2); +select arraySlice([1, 2, 3, 4, 5, 6], Null, Null); +select arraySlice([1, 2, 3, 4, 5, 6], 2, Null); +select arraySlice([1, 2, 3, 4, 5, 6], Null, 4); +select arraySlice([1, 2, 3, 4, 5, 6], Null, -2); +select arraySlice([1, 2, 3, 4, 5, 6], -3, Null); +select arraySlice([1, 2, 3, 4, 5, 6], 2, 3); +select arraySlice([1, 2, 3, 4, 5, 6], 2, -2); +select arraySlice([1, 2, 3, 4, 5, 6], -4, 2); +select arraySlice([1, 2, 3, 4, 5, 6], -4, -2); +select arraySlice([1, 2, 3, 4, 5, 6], 2, 0); +select arraySlice([1, 2, 3, 4, 5, 6], -10, 15); +select arraySlice([1, 2, 3, 4, 5, 6], -15, 10); +select arraySlice([1, 2, 3, 4, 5, 6], -15, 9); +select arraySlice([1, 2, 3, 4, 5, 6], 10, 0); +select arraySlice([1, 2, 3, 4, 5, 6], 10, -1); +select arraySlice([1, 2, 3, 4, 5, 6], 10, 1); +select arraySlice([1, 2, Null, 4, 5, 6], 2, 4); +select arraySlice(['a', 'b', 'c', 'd', 'e'], 2, 3); +select arraySlice([Null, 'b', Null, 'd', 'e'], 2, 3); +select arraySlice([], materialize(NULL), NULL), 1 from numbers(2); + +select 'push back'; +select arrayPushBack(Null, 1); +select arrayPushBack([1], 1); +select arrayPushBack([Null], 1); +select arrayPushBack([0.5, 0.7], 1); +select arrayPushBack([1], -1); +select arrayPushBack(['a', 'b'], 'cd'); +select arrayPushBack(emptyArrayUInt8(), 1); +select arrayPushBack(emptyArrayUInt8(), -1); + +select 'push front'; +select arrayPushFront(Null, 1); +select arrayPushFront([1], 1); +select arrayPushFront([Null], 1); +select arrayPushFront([0.5, 0.7], 1); +select arrayPushFront([1], -1); +select arrayPushFront(['a', 'b'], 'cd'); +select arrayPushFront(emptyArrayUInt8(), 1); +select arrayPushFront(emptyArrayUInt8(), -1); + +select 'pop back'; +select arrayPopBack(Null); +select arrayPopBack(emptyArrayUInt8()); +select arrayPopBack([1]); +select arrayPopBack([1, 2, 3]); +select arrayPopBack([0.1, 0.2, 0.3]); +select arrayPopBack(['a', 'b', 'c']); + +select 'pop front'; +select arrayPopFront(Null); +select arrayPopFront(emptyArrayUInt8()); +select arrayPopFront([1]); +select arrayPopFront([1, 2, 3]); +select arrayPopFront([0.1, 0.2, 0.3]); +select arrayPopFront(['a', 'b', 'c']); + +DROP TABLE if exists array_functions; +select ''; +select 'table'; +create table array_functions (arr1 Array(Int8), arr2 Array(Int8), o Int8, no Nullable(Int8), l Int8, nl Nullable(Int8)) engine = TinyLog; +insert into array_functions values ([], [], 1, Null, 1, Null), ([], [1], 1, Null, 1, Null), ([1, 2, 3, 4, 5], [6, 7], 2, Null, 1, Null), ([1, 2, 3, 4, 5, 6, 7], [8], 2, 2, 3, 3), ([1, 2, 3, 4, 5, 6, 7], [], 2, Null, -3, -3), ([1, 2, 3, 4, 5, 6, 7], [], 2, Null, -3, Null), ([1, 2, 3, 4, 5, 6, 7], [], -5, -5, 4, 4), ([1, 2, 3, 4, 5, 6, 7], [], -5, -5, -3, -3); + +select * from array_functions; +select 'concat arr1, arr2'; +select arrayConcat(arr1, arr2), arr1, arr2 from array_functions; +select 'concat arr1, arr2, arr1'; +select arrayConcat(arr1, arr2, arr1), arr1, arr2 from array_functions; + +select 'arraySlice(arr1, o, l)'; +select arr1, o, l, arraySlice(arr1, o, l) from array_functions; +select 'arraySlice(arr1, no, nl)'; +select arr1, no, nl, arraySlice(arr1, no, nl) from array_functions; +select 'arraySlice(arr1, 2, l)'; +select arr1, 2, l, arraySlice(arr1, 2, l) from array_functions; +select 'arraySlice(arr1, o, 2)'; +select arr1, o, 2, arraySlice(arr1, o, 2) from array_functions; +select 'arraySlice(arr1, 2, nl)'; +select arr1, 2, nl, arraySlice(arr1, 2, nl) from array_functions; +select 'arraySlice(arr1, no, 2)'; +select arr1, no, 2, arraySlice(arr1, no, 2) from array_functions; +select 'arraySlice(arr1, -4, l)'; +select arr1, 2, l, arraySlice(arr1, -4, l) from array_functions; +select 'arraySlice(arr1, o, -2)'; +select arr1, o, 2, arraySlice(arr1, o, -2) from array_functions; +select 'arraySlice(arr1, -4, nl)'; +select arr1, 2, nl, arraySlice(arr1, -4, nl) from array_functions; +select 'arraySlice(arr1, no, -2)'; +select arr1, no, 2, arraySlice(arr1, no, -2) from array_functions; +select 'arraySlice(arr1, 2, 4)'; +select arr1, 2, 4, arraySlice(arr1, 2, 4) from array_functions; +select 'arraySlice(arr1, 2, -4)'; +select arr1, 2, 4, arraySlice(arr1, 2, -4) from array_functions; +select 'arraySlice(arr1, -4, 2)'; +select arr1, 2, 4, arraySlice(arr1, -4, 2) from array_functions; +select 'arraySlice(arr1, -4, -1)'; +select arr1, 2, 4, arraySlice(arr1, -4, -1) from array_functions; + +select 'arrayPushFront(arr1, 1)'; +select arr1, arrayPushFront(arr1, 1) from array_functions; +select 'arrayPushFront(arr1, 0.1)'; +select arr1, arrayPushFront(arr1, 0.1) from array_functions; +select 'arrayPushFront(arr1, l)'; +select arr1, arrayPushFront(arr1, l) from array_functions; +select 'arrayPushFront(arr1, nl)'; +select arr1, arrayPushFront(arr1, nl) from array_functions; +select 'arrayPushFront([1, 2, 3], l)'; +select arrayPushFront([1, 2, 3], l) from array_functions; +select 'arrayPushFront([1, 2, 3], nl)' from array_functions; +select arrayPushFront([1, 2, 3], nl) from array_functions; + +select 'arrayPushBack(arr1, 1)'; +select arr1, arrayPushBack(arr1, 1) from array_functions; +select 'arrayPushBack(arr1, 0.1)'; +select arr1, arrayPushBack(arr1, 0.1) from array_functions; +select 'arrayPushBack(arr1, l)'; +select arr1, arrayPushBack(arr1, l) from array_functions; +select 'arrayPushBack(arr1, nl)'; +select arr1, arrayPushBack(arr1, nl) from array_functions; +select 'arrayPushBack([1, 2, 3], l)'; +select arrayPushBack([1, 2, 3], l) from array_functions; +select 'arrayPushBack([1, 2, 3], nl)'; +select arrayPushBack([1, 2, 3], nl) from array_functions; + +select 'arrayPopFront(arr1)'; +select arr1, arrayPopFront(arr1) from array_functions; +select 'arrayPopBack(arr1)'; +select arr1, arrayPopBack(arr1) from array_functions; + + +DROP TABLE if exists array_functions; +select ''; +select 'table'; +create table array_functions (arr1 Array(Nullable(Int8)), arr2 Array(Nullable(Float32)), o Int8, no Nullable(Int8), l Int8, nl Nullable(Int8)) engine = TinyLog; +insert into array_functions values ([], [], 1, Null, 1, Null), ([], [1, Null], 1, Null, 1, Null), ([1, 2, 3, 4, 5], [6, Null], 2, Null, 1, Null), ([1, Null, 3, 4, Null, 6, 7], [8], 2, 2, 3, 3),([1, 2, 3, Null, 5, 6, 7], [Null, 1], 2, Null, -3, -3),([1, 2, 3, 4, 5, Null, 7], [1, Null], 2, Null, -3, Null), ([1, 2, 3, 4, 5, 6, 7], [1, 2], -5, -5, 4, 4),([1, Null, 3, Null, 5, 6, 7], [], -5, -5, -3, -3); + +select * from array_functions; +select 'concat arr1, arr2'; +select arrayConcat(arr1, arr2), arr1, arr2 from array_functions; +select 'concat arr1, arr2, arr1'; +select arrayConcat(arr1, arr2, arr1), arr1, arr2 from array_functions; + +select 'arraySlice(arr1, o, l)'; +select arr1, o, l, arraySlice(arr1, o, l) from array_functions; +select 'arraySlice(arr1, no, nl)'; +select arr1, no, nl, arraySlice(arr1, no, nl) from array_functions; +select 'arraySlice(arr1, 2, l)'; +select arr1, 2, l, arraySlice(arr1, 2, l) from array_functions; +select 'arraySlice(arr1, o, 2)'; +select arr1, o, 2, arraySlice(arr1, o, 2) from array_functions; +select 'arraySlice(arr1, 2, nl)'; +select arr1, 2, nl, arraySlice(arr1, 2, nl) from array_functions; +select 'arraySlice(arr1, no, 2)'; +select arr1, no, 2, arraySlice(arr1, no, 2) from array_functions; +select 'arraySlice(arr1, -4, l)'; +select arr1, 2, l, arraySlice(arr1, -4, l) from array_functions; +select 'arraySlice(arr1, o, -2)'; +select arr1, o, 2, arraySlice(arr1, o, -2) from array_functions; +select 'arraySlice(arr1, -4, nl)'; +select arr1, 2, nl, arraySlice(arr1, -4, nl) from array_functions; +select 'arraySlice(arr1, no, -2)'; +select arr1, no, 2, arraySlice(arr1, no, -2) from array_functions; +select 'arraySlice(arr1, 2, 4)'; +select arr1, 2, 4, arraySlice(arr1, 2, 4) from array_functions; +select 'arraySlice(arr1, 2, -4)'; +select arr1, 2, 4, arraySlice(arr1, 2, -4) from array_functions; +select 'arraySlice(arr1, -4, 2)'; +select arr1, 2, 4, arraySlice(arr1, -4, 2) from array_functions; +select 'arraySlice(arr1, -4, -1)'; +select arr1, 2, 4, arraySlice(arr1, -4, -1) from array_functions; + +select 'arrayPushFront(arr1, 1)'; +select arr1, arrayPushFront(arr1, 1) from array_functions; +select 'arrayPushFront(arr1, 0.1)'; +select arr1, arrayPushFront(arr1, 0.1) from array_functions; +select 'arrayPushFront(arr1, l)'; +select arr1, arrayPushFront(arr1, l) from array_functions; +select 'arrayPushFront(arr1, nl)'; +select arr1, arrayPushFront(arr1, nl) from array_functions; +select 'arrayPushFront([1, 2, 3], l)'; +select arrayPushFront([1, 2, 3], l) from array_functions; +select 'arrayPushFront([1, 2, 3], nl)' from array_functions; +select arrayPushFront([1, 2, 3], nl) from array_functions; + +select 'arrayPushBack(arr1, 1)'; +select arr1, arrayPushBack(arr1, 1) from array_functions; +select 'arrayPushBack(arr1, 0.1)'; +select arr1, arrayPushBack(arr1, 0.1) from array_functions; +select 'arrayPushBack(arr1, l)'; +select arr1, arrayPushBack(arr1, l) from array_functions; +select 'arrayPushBack(arr1, nl)'; +select arr1, arrayPushBack(arr1, nl) from array_functions; +select 'arrayPushBack([1, 2, 3], l)'; +select arrayPushBack([1, 2, 3], l) from array_functions; +select 'arrayPushBack([1, 2, 3], nl)'; +select arrayPushBack([1, 2, 3], nl) from array_functions; + +select 'arrayPopFront(arr1)'; +select arr1, arrayPopFront(arr1) from array_functions; +select 'arrayPopBack(arr1)'; +select arr1, arrayPopBack(arr1) from array_functions; + + +DROP TABLE if exists array_functions; +select ''; +select 'table'; +create table array_functions (arr1 Array(Nullable(Int8)), arr2 Array(UInt8), o Int8, no Nullable(Int8), l Int8, nl Nullable(Int8)) engine = TinyLog; +insert into array_functions values ([], [], 1, Null, 1, Null), ([], [1, 2], 1, Null, 1, Null), ([1, 2, 3, 4, 5], [6, 7], 2, Null, 1, Null), ([1, Null,3,4, Null, 6, 7], [8], 2, 2, 3, 3),([1, 2, 3, Null, 5, 6, 7], [0, 1], 2, Null, -3, -3),([1, 2, 3, 4, 5, Null, 7], [1, 2], 2, Null, -3, Null),([1, 2, 3,4, 5, 6, 7], [1, 2], -5, -5, 4, 4),([1, Null, 3, Null, 5, 6, 7], [], -5, -5, -3, -3); + +select * from array_functions; +select 'concat arr1, arr2'; +select arrayConcat(arr1, arr2), arr1, arr2 from array_functions; +select 'concat arr1, arr2, arr1'; +select arrayConcat(arr1, arr2, arr1), arr1, arr2 from array_functions; + + +select * from array_functions; +select 'concat arr1, arr2'; +select arrayConcat(arr1, arr2), arr1, arr2 from array_functions; +select 'concat arr1, arr2, arr1'; +select arrayConcat(arr1, arr2, arr1), arr1, arr2 from array_functions; + +select 'arraySlice(arr1, o, l)'; +select arr1, o, l, arraySlice(arr1, o, l) from array_functions; +select 'arraySlice(arr1, no, nl)'; +select arr1, no, nl, arraySlice(arr1, no, nl) from array_functions; +select 'arraySlice(arr1, 2, l)'; +select arr1, 2, l, arraySlice(arr1, 2, l) from array_functions; +select 'arraySlice(arr1, o, 2)'; +select arr1, o, 2, arraySlice(arr1, o, 2) from array_functions; +select 'arraySlice(arr1, 2, nl)'; +select arr1, 2, nl, arraySlice(arr1, 2, nl) from array_functions; +select 'arraySlice(arr1, no, 2)'; +select arr1, no, 2, arraySlice(arr1, no, 2) from array_functions; +select 'arraySlice(arr1, -4, l)'; +select arr1, 2, l, arraySlice(arr1, -4, l) from array_functions; +select 'arraySlice(arr1, o, -2)'; +select arr1, o, 2, arraySlice(arr1, o, -2) from array_functions; +select 'arraySlice(arr1, -4, nl)'; +select arr1, 2, nl, arraySlice(arr1, -4, nl) from array_functions; +select 'arraySlice(arr1, no, -2)'; +select arr1, no, 2, arraySlice(arr1, no, -2) from array_functions; +select 'arraySlice(arr1, 2, 4)'; +select arr1, 2, 4, arraySlice(arr1, 2, 4) from array_functions; +select 'arraySlice(arr1, 2, -4)'; +select arr1, 2, 4, arraySlice(arr1, 2, -4) from array_functions; +select 'arraySlice(arr1, -4, 2)'; +select arr1, 2, 4, arraySlice(arr1, -4, 2) from array_functions; +select 'arraySlice(arr1, -4, -1)'; +select arr1, 2, 4, arraySlice(arr1, -4, -1) from array_functions; + +select 'arrayPushFront(arr1, 1)'; +select arr1, arrayPushFront(arr1, 1) from array_functions; +select 'arrayPushFront(arr1, 0.1)'; +select arr1, arrayPushFront(arr1, 0.1) from array_functions; +select 'arrayPushFront(arr1, l)'; +select arr1, arrayPushFront(arr1, l) from array_functions; +select 'arrayPushFront(arr1, nl)'; +select arr1, arrayPushFront(arr1, nl) from array_functions; +select 'arrayPushFront([1, 2, 3], l)'; +select arrayPushFront([1, 2, 3], l) from array_functions; +select 'arrayPushFront([1, 2, 3], nl)' from array_functions; +select arrayPushFront([1, 2, 3], nl) from array_functions; + +select 'arrayPushBack(arr1, 1)'; +select arr1, arrayPushBack(arr1, 1) from array_functions; +select 'arrayPushBack(arr1, 0.1)'; +select arr1, arrayPushBack(arr1, 0.1) from array_functions; +select 'arrayPushBack(arr1, l)'; +select arr1, arrayPushBack(arr1, l) from array_functions; +select 'arrayPushBack(arr1, nl)'; +select arr1, arrayPushBack(arr1, nl) from array_functions; +select 'arrayPushBack([1, 2, 3], l)'; +select arrayPushBack([1, 2, 3], l) from array_functions; +select 'arrayPushBack([1, 2, 3], nl)'; +select arrayPushBack([1, 2, 3], nl) from array_functions; + +select 'arrayPopFront(arr1)'; +select arr1, arrayPopFront(arr1) from array_functions; +select 'arrayPopBack(arr1)'; +select arr1, arrayPopBack(arr1) from array_functions; + +DROP TABLE if exists array_functions; +select ''; +select 'table'; +create table array_functions (arr1 Array(Nullable(String)), arr2 Array(String), val String, val2 Nullable(String), o Int8, no Nullable(Int8), l Int8, nl Nullable(Int8)) engine = TinyLog; +insert into array_functions values ([], [], '', Null, 1, Null, 1, Null), ([], ['1', '2'], 'a', 'b', 1, Null, 1, Null), (['1', '2', '3', '4', '5'], ['6','7'], 'a', Null, 2, Null, 1, Null), (['1', Null, '3', '4', Null, '6', '7'], ['8'], 'a', 'b', 2, 2, 3, 3),(['1', '2', '3', Null, '5', '6', '7'], ['0','1'], 'a', Null, 2, Null, -3, -3),(['1', '2', '3', '4', '5', Null, '7'], ['1', '2'], 'a', 'b', 2, Null, -3, Null),(['1', '2', '3', '4', '5', '6', '7'],['1', '2'], 'a', Null, -5, -5, 4, 4),(['1', Null, '3', Null, '5', '6', '7'], [], 'a', 'b', -5, -5, -3, -3); + + +select * from array_functions; +select 'concat arr1, arr2'; +select arrayConcat(arr1, arr2), arr1, arr2 from array_functions; +select 'concat arr1, arr2, arr1'; +select arrayConcat(arr1, arr2, arr1), arr1, arr2 from array_functions; + +select 'arraySlice(arr1, o, l)'; +select arr1, o, l, arraySlice(arr1, o, l) from array_functions; +select 'arraySlice(arr1, no, nl)'; +select arr1, no, nl, arraySlice(arr1, no, nl) from array_functions; +select 'arraySlice(arr1, 2, l)'; +select arr1, 2, l, arraySlice(arr1, 2, l) from array_functions; +select 'arraySlice(arr1, o, 2)'; +select arr1, o, 2, arraySlice(arr1, o, 2) from array_functions; +select 'arraySlice(arr1, 2, nl)'; +select arr1, 2, nl, arraySlice(arr1, 2, nl) from array_functions; +select 'arraySlice(arr1, no, 2)'; +select arr1, no, 2, arraySlice(arr1, no, 2) from array_functions; +select 'arraySlice(arr1, -4, l)'; +select arr1, 2, l, arraySlice(arr1, -4, l) from array_functions; +select 'arraySlice(arr1, o, -2)'; +select arr1, o, 2, arraySlice(arr1, o, -2) from array_functions; +select 'arraySlice(arr1, -4, nl)'; +select arr1, 2, nl, arraySlice(arr1, -4, nl) from array_functions; +select 'arraySlice(arr1, no, -2)'; +select arr1, no, 2, arraySlice(arr1, no, -2) from array_functions; +select 'arraySlice(arr1, 2, 4)'; +select arr1, 2, 4, arraySlice(arr1, 2, 4) from array_functions; +select 'arraySlice(arr1, 2, -4)'; +select arr1, 2, 4, arraySlice(arr1, 2, -4) from array_functions; +select 'arraySlice(arr1, -4, 2)'; +select arr1, 2, 4, arraySlice(arr1, -4, 2) from array_functions; +select 'arraySlice(arr1, -4, -1)'; +select arr1, 2, 4, arraySlice(arr1, -4, -1) from array_functions; + +select 'arrayPushFront(arr1, 1)'; +select arr1, arrayPushFront(arr1, '1') from array_functions; +select 'arrayPushFront(arr1, val)'; +select arr1, arrayPushFront(arr1, val) from array_functions; +select 'arrayPushFront(arr1, val2)'; +select arr1, arrayPushFront(arr1, val2) from array_functions; +select 'arrayPushFront([a, b, c], val)'; +select arrayPushFront(['a', 'b', 'c'], val) from array_functions; +select 'arrayPushFront([a, b, c], val2)'; +select arrayPushFront(['a', 'b', 'c'], val2) from array_functions; + +select 'arrayPushBack(arr1, 1)'; +select arr1, arrayPushBack(arr1, '1') from array_functions; +select 'arrayPushBack(arr1, val)'; +select arr1, arrayPushBack(arr1, val) from array_functions; +select 'arrayPushBack(arr1, val2)'; +select arr1, arrayPushBack(arr1, val2) from array_functions; +select 'arrayPushBack([a, b, c], val)'; +select arrayPushBack(['a', 'b', 'c'], val) from array_functions; +select 'arrayPushBack([a, b, c], val2)'; +select arrayPushBack(['a', 'b', 'c'], val2) from array_functions; + +select 'arrayPopFront(arr1)'; +select arr1, arrayPopFront(arr1) from array_functions; +select 'arrayPopBack(arr1)'; +select arr1, arrayPopBack(arr1) from array_functions; + +DROP TABLE if exists array_functions; diff --git a/parser/testdata/00498_bitwise_aggregate_functions/ast.json b/parser/testdata/00498_bitwise_aggregate_functions/ast.json new file mode 100644 index 000000000..42ea50818 --- /dev/null +++ b/parser/testdata/00498_bitwise_aggregate_functions/ast.json @@ -0,0 +1,139 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 5)" + }, + { + "explain": " Function modulo (alias k) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_4" + }, + { + "explain": " Function groupArray (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function groupBitOr (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function groupBitAnd (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function groupBitXor (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_20" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier k" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier k" + } + ], + + "rows": 39, + + "statistics": + { + "elapsed": 0.001756765, + "rows_read": 39, + "bytes_read": 1542 + } +} diff --git a/parser/testdata/00498_bitwise_aggregate_functions/metadata.json b/parser/testdata/00498_bitwise_aggregate_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00498_bitwise_aggregate_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00498_bitwise_aggregate_functions/query.sql b/parser/testdata/00498_bitwise_aggregate_functions/query.sql new file mode 100644 index 000000000..2c4610e6c --- /dev/null +++ b/parser/testdata/00498_bitwise_aggregate_functions/query.sql @@ -0,0 +1,3 @@ +SELECT number % 4 AS k, groupArray(number), groupBitOr(number), groupBitAnd(number), groupBitXor(number) FROM (SELECT * FROM system.numbers LIMIT 20) GROUP BY k ORDER BY k; +SELECT number % 4 AS k, groupArray(-number), groupBitOr(-number), groupBitAnd(-number), groupBitXor(-number) FROM (SELECT * FROM system.numbers LIMIT 20) GROUP BY k ORDER BY k; +SELECT number % 4 AS k, groupArray(number-10), groupBitOr(number-10), groupBitAnd(number-10), groupBitXor(number-10) FROM (SELECT * FROM system.numbers LIMIT 20) GROUP BY k ORDER BY k; diff --git a/parser/testdata/00499_json_enum_insert/ast.json b/parser/testdata/00499_json_enum_insert/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00499_json_enum_insert/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00499_json_enum_insert/metadata.json b/parser/testdata/00499_json_enum_insert/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00499_json_enum_insert/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00499_json_enum_insert/query.sql b/parser/testdata/00499_json_enum_insert/query.sql new file mode 100644 index 000000000..24eab38c6 --- /dev/null +++ b/parser/testdata/00499_json_enum_insert/query.sql @@ -0,0 +1,18 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS json; +CREATE TABLE json (x Enum8('browser' = 1, 'mobile' = 2), y String) ENGINE = Memory; + +INSERT INTO json (y) VALUES ('Hello'); + +SELECT * FROM json ORDER BY y; + +INSERT INTO json (y) FORMAT JSONEachRow {"y": "World 1"}; + +SELECT * FROM json ORDER BY y; + +INSERT INTO json (x, y) FORMAT JSONEachRow {"y": "World 2"}; + +SELECT * FROM json ORDER BY y; + +DROP TABLE json; diff --git a/parser/testdata/00500_point_in_polygon/ast.json b/parser/testdata/00500_point_in_polygon/ast.json new file mode 100644 index 000000000..c7a3d4325 --- /dev/null +++ b/parser/testdata/00500_point_in_polygon/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'inner'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001247223, + "rows_read": 5, + "bytes_read": 176 + } +} diff --git a/parser/testdata/00500_point_in_polygon/metadata.json b/parser/testdata/00500_point_in_polygon/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00500_point_in_polygon/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00500_point_in_polygon/query.sql b/parser/testdata/00500_point_in_polygon/query.sql new file mode 100644 index 000000000..2b46111ac --- /dev/null +++ b/parser/testdata/00500_point_in_polygon/query.sql @@ -0,0 +1,84 @@ +SELECT 'inner'; +SELECT pointInPolygon((3., 3.), [(6, 0), (8, 4), (5, 8), (0, 2), (6, 0)]); +SELECT 'outer'; +SELECT pointInPolygon((0.1, 0.1), [(6., 0.), (8., 4.), (5., 8.), (0., 2.), (6., 0.)]); +SELECT 'single line'; +SELECT pointInPolygon((4.1, 0.1), [(6., 0.), (8., 4.), (5., 8.), (0., 2.), (6., 0.)]); +SELECT pointInPolygon((4.9, 0.9), [(6., 0.), (8., 4.), (5., 8.), (0., 2.), (6., 0.)]); +SELECT 'shifted grid'; +SELECT pointInPolygon((0., 0.), [(6., 1.), (8., 4.), (5., 8.), (1., 2.), (6., 1.)]); +SELECT pointInPolygon((6., 5.), [(6., 1.), (8., 4.), (5., 8.), (1., 2.), (6., 1.)]); + +SELECT 'pair of lines, single polygon'; +SELECT pointInPolygon((0.1, 0.1), [(0., 0.), (8., 7.), (7., 8.), (0., 0.)]); +SELECT pointInPolygon((0.9, 0.1), [(0., 0.), (8., 7.), (7., 8.), (0., 0.)]); +SELECT pointInPolygon((0.1, 0.9), [(0., 0.), (8., 7.), (7., 8.), (0., 0.)]); +SELECT pointInPolygon((2.2, 2.2), [(0., 0.), (8., 7.), (7., 8.), (0., 0.)]); +SELECT pointInPolygon((2.1, 2.9), [(0., 0.), (8., 7.), (7., 8.), (0., 0.)]); +SELECT pointInPolygon((2.9, 2.1), [(0., 0.), (8., 7.), (7., 8.), (0., 0.)]); + +SELECT 'pair of lines, different polygons'; +SELECT pointInPolygon((0.1, 0.1), [(0.5, 0.), (1.0, 0.), (8.0, 7.5), (7.5, 8.0), (0., 1.), (0., 0.5), (4.5, 5.5), (5.5, 4.5), (0.5, 0.0)]); -- { serverError BAD_ARGUMENTS } +SELECT pointInPolygon((1., 1.), [(0.5, 0.), (1.0, 0.), (8.0, 7.5), (7.5, 8.0), (0., 1.), (0., 0.5), (4.5, 5.5), (5.5, 4.5), (0.5, 0.0)]); -- { serverError BAD_ARGUMENTS } +SELECT pointInPolygon((0.7, 0.1), [(0.5, 0.), (1.0, 0.), (8.0, 7.5), (7.5, 8.0), (0., 1.), (0., 0.5), (4.5, 5.5), (5.5, 4.5), (0.5, 0.0)]); -- { serverError BAD_ARGUMENTS } +SELECT pointInPolygon((0.1, 0.7), [(0.5, 0.), (1.0, 0.), (8.0, 7.5), (7.5, 8.0), (0., 1.), (0., 0.5), (4.5, 5.5), (5.5, 4.5), (0.5, 0.0)]); -- { serverError BAD_ARGUMENTS } +SELECT pointInPolygon((1.1, 0.1), [(0.5, 0.), (1.0, 0.), (8.0, 7.5), (7.5, 8.0), (0., 1.), (0., 0.5), (4.5, 5.5), (5.5, 4.5), (0.5, 0.0)]); -- { serverError BAD_ARGUMENTS } +SELECT pointInPolygon((0.1, 1.1), [(0.5, 0.), (1.0, 0.), (8.0, 7.5), (7.5, 8.0), (0., 1.), (0., 0.5), (4.5, 5.5), (5.5, 4.5), (0.5, 0.0)]); -- { serverError BAD_ARGUMENTS } +SELECT pointInPolygon((5.0, 5.0), [(0.5, 0.), (1.0, 0.), (8.0, 7.5), (7.5, 8.0), (0., 1.), (0., 0.5), (4.5, 5.5), (5.5, 4.5), (0.5, 0.0)]); -- { serverError BAD_ARGUMENTS } +SELECT pointInPolygon((7.9, 7.9), [(0.5, 0.), (1.0, 0.), (8.0, 7.5), (7.5, 8.0), (0., 1.), (0., 0.5), (4.5, 5.5), (5.5, 4.5), (0.5, 0.0)]); -- { serverError BAD_ARGUMENTS } + +SELECT 'complex polygon'; +SELECT pointInPolygon((0.05, 0.05), [(0., 1.), (0.2, 0.5), (0.6, 0.5), (0.8, 0.8), (0.8, 0.3), (0.1, 0.3), (0.1, 0.1), (0.8, 0.1), (1.0, 0.0), (8.0, 7.0), (7.0, 8.0), (0., 1.)]); +SELECT pointInPolygon((0.15, 0.15), [(0., 1.), (0.2, 0.5), (0.6, 0.5), (0.8, 0.8), (0.8, 0.3), (0.1, 0.3), (0.1, 0.1), (0.8, 0.1), (1.0, 0.0), (8.0, 7.0), (7.0, 8.0), (0., 1.)]); +SELECT pointInPolygon((0.3, 0.4), [(0., 1.), (0.2, 0.5), (0.6, 0.5), (0.8, 0.8), (0.8, 0.3), (0.1, 0.3), (0.1, 0.1), (0.8, 0.1), (1.0, 0.0), (8.0, 7.0), (7.0, 8.0), (0., 1.)]); +SELECT pointInPolygon((0.4, 0.7), [(0., 1.), (0.2, 0.5), (0.6, 0.5), (0.8, 0.8), (0.8, 0.3), (0.1, 0.3), (0.1, 0.1), (0.8, 0.1), (1.0, 0.0), (8.0, 7.0), (7.0, 8.0), (0., 1.)]); +SELECT pointInPolygon((0.7, 0.6), [(0., 1.), (0.2, 0.5), (0.6, 0.5), (0.8, 0.8), (0.8, 0.3), (0.1, 0.3), (0.1, 0.1), (0.8, 0.1), (1.0, 0.0), (8.0, 7.0), (7.0, 8.0), (0., 1.)]); +SELECT pointInPolygon((0.9, 0.1), [(0., 1.), (0.2, 0.5), (0.6, 0.5), (0.8, 0.8), (0.8, 0.3), (0.1, 0.3), (0.1, 0.1), (0.8, 0.1), (1.0, 0.0), (8.0, 7.0), (7.0, 8.0), (0., 1.)]); + +SELECT 'polygon with holes'; +SELECT pointInPolygon((1., 1.), [(4., 0.), (8., 4.), (4., 8.), (0., 4.)], [(3., 3.), (3., 5.), (5., 5.), (5., 3.)]); +SELECT pointInPolygon((2.5, 2.5), [(4., 0.), (8., 4.), (4., 8.), (0., 4.)], [(3., 3.), (3., 5.), (5., 5.), (5., 3.)]); +SELECT pointInPolygon((4., 4.), [(4., 0.), (8., 4.), (4., 8.), (0., 4.)], [(3., 3.), (3., 5.), (5., 5.), (5., 3.)]); +SELECT pointInPolygon((4., 2.), [(4., 0.), (8., 4.), (4., 8.), (0., 4.)], [(3., 3.), (3., 5.), (5., 5.), (5., 3.)]); +SELECT pointInPolygon((9., 9.), [(4., 0.), (8., 4.), (4., 8.), (0., 4.)], [(3., 3.), (3., 5.), (5., 5.), (5., 3.)]); + +SELECT pointInPolygon((0.5, 1.5), [(0., 0.), (7., 0.), (7., 3.), (0., 3.)], [(1., 1.), (2., 1.), (2., 2.), (1., 2.)], [(3., 1.), (4., 1.), (4., 2.), (3., 2.)], [(5., 1.), (6., 1.), (6., 2.), (5., 2.)]); +SELECT pointInPolygon((1.5, 1.5), [(0., 0.), (7., 0.), (7., 3.), (0., 3.)], [(1., 1.), (2., 1.), (2., 2.), (1., 2.)], [(3., 1.), (4., 1.), (4., 2.), (3., 2.)], [(5., 1.), (6., 1.), (6., 2.), (5., 2.)]); +SELECT pointInPolygon((2.5, 1.5), [(0., 0.), (7., 0.), (7., 3.), (0., 3.)], [(1., 1.), (2., 1.), (2., 2.), (1., 2.)], [(3., 1.), (4., 1.), (4., 2.), (3., 2.)], [(5., 1.), (6., 1.), (6., 2.), (5., 2.)]); +SELECT pointInPolygon((3.5, 1.5), [(0., 0.), (7., 0.), (7., 3.), (0., 3.)], [(1., 1.), (2., 1.), (2., 2.), (1., 2.)], [(3., 1.), (4., 1.), (4., 2.), (3., 2.)], [(5., 1.), (6., 1.), (6., 2.), (5., 2.)]); +SELECT pointInPolygon((4.5, 1.5), [(0., 0.), (7., 0.), (7., 3.), (0., 3.)], [(1., 1.), (2., 1.), (2., 2.), (1., 2.)], [(3., 1.), (4., 1.), (4., 2.), (3., 2.)], [(5., 1.), (6., 1.), (6., 2.), (5., 2.)]); +SELECT pointInPolygon((5.5, 1.5), [(0., 0.), (7., 0.), (7., 3.), (0., 3.)], [(1., 1.), (2., 1.), (2., 2.), (1., 2.)], [(3., 1.), (4., 1.), (4., 2.), (3., 2.)], [(5., 1.), (6., 1.), (6., 2.), (5., 2.)]); +SELECT pointInPolygon((6.5, 1.5), [(0., 0.), (7., 0.), (7., 3.), (0., 3.)], [(1., 1.), (2., 1.), (2., 2.), (1., 2.)], [(3., 1.), (4., 1.), (4., 2.), (3., 2.)], [(5., 1.), (6., 1.), (6., 2.), (5., 2.)]); + +SELECT 'polygons with reversed direction'; +SELECT pointInPolygon((4.1, .1), [(6., 0.), (0., 2.), (5., 8.), (8., 4.)]); +SELECT pointInPolygon((4.1, .9), [(6., 0.), (0., 2.), (5., 8.), (8., 4.)]); + +SELECT pointInPolygon((1., 1.), [(4., 0.), (8., 4.), (4., 8.), (0., 4.)], [(3., 3.), (5., 3.), (5., 5.), (3., 5.)]); +SELECT pointInPolygon((2.5, 2.5), [(4., 0.), (8., 4.), (4., 8.), (0., 4.)], [(3., 3.), (5., 3.), (5., 5.), (3., 5.)]); +SELECT pointInPolygon((4., 4.), [(4., 0.), (8., 4.), (4., 8.), (0., 4.)], [(3., 3.), (5., 3.), (5., 5.), (3., 5.)]); +SELECT pointInPolygon((4., 2.), [(4., 0.), (8., 4.), (4., 8.), (0., 4.)], [(3., 3.), (5., 3.), (5., 5.), (3., 5.)]); +SELECT pointInPolygon((9., 9.), [(4., 0.), (8., 4.), (4., 8.), (0., 4.)],[(3., 3.), (5., 3.), (5., 5.), (3., 5.)]); + + +SELECT 'eps for complex polygon in grid'; +SELECT pointInPolygon((0., 0.), [(0., 1.), (0.2, 0.5), (0.6, 0.5), (0.8, 0.8), (0.8, 0.3), (0.1, 0.3), (0.1, 0.1), (0.8, 0.1), (1., 0.), (-6., -7.), (-7., -6.), (0., 1.)]); + +SELECT 'multipolygon with one polygon with holes'; +SELECT pointInPolygon((0.5, 1.5), [[[(0., 0.), (7., 0.), (7., 3.), (0., 3.)], [(1., 1.), (2., 1.), (2., 2.), (1., 2.)], [(3., 1.), (4., 1.), (4., 2.), (3., 2.)], [(5., 1.), (6., 1.), (6., 2.), (5., 2.)]]]); +SELECT pointInPolygon((1.5, 1.5), [[[(0., 0.), (7., 0.), (7., 3.), (0., 3.)], [(1., 1.), (2., 1.), (2., 2.), (1., 2.)], [(3., 1.), (4., 1.), (4., 2.), (3., 2.)], [(5., 1.), (6., 1.), (6., 2.), (5., 2.)]]]); +SELECT pointInPolygon((2.5, 1.5), [[[(0., 0.), (7., 0.), (7., 3.), (0., 3.)], [(1., 1.), (2., 1.), (2., 2.), (1., 2.)], [(3., 1.), (4., 1.), (4., 2.), (3., 2.)], [(5., 1.), (6., 1.), (6., 2.), (5., 2.)]]]); +SELECT pointInPolygon((3.5, 1.5), [[[(0., 0.), (7., 0.), (7., 3.), (0., 3.)], [(1., 1.), (2., 1.), (2., 2.), (1., 2.)], [(3., 1.), (4., 1.), (4., 2.), (3., 2.)], [(5., 1.), (6., 1.), (6., 2.), (5., 2.)]]]); +SELECT pointInPolygon((4.5, 1.5), [[[(0., 0.), (7., 0.), (7., 3.), (0., 3.)], [(1., 1.), (2., 1.), (2., 2.), (1., 2.)], [(3., 1.), (4., 1.), (4., 2.), (3., 2.)], [(5., 1.), (6., 1.), (6., 2.), (5., 2.)]]]); +SELECT pointInPolygon((5.5, 1.5), [[[(0., 0.), (7., 0.), (7., 3.), (0., 3.)], [(1., 1.), (2., 1.), (2., 2.), (1., 2.)], [(3., 1.), (4., 1.), (4., 2.), (3., 2.)], [(5., 1.), (6., 1.), (6., 2.), (5., 2.)]]]); +SELECT pointInPolygon((6.5, 1.5), [[[(0., 0.), (7., 0.), (7., 3.), (0., 3.)], [(1., 1.), (2., 1.), (2., 2.), (1., 2.)], [(3., 1.), (4., 1.), (4., 2.), (3., 2.)], [(5., 1.), (6., 1.), (6., 2.), (5., 2.)]]]); + + +SELECT 'multipolygon with two polygons with holes'; +SELECT pointInPolygon((1.5, 8.), [[[(0.,0.),(4.,0.),(4.,4.),(0.,4.),(0.,0.)],[(1.,1.),(2.,1.),(2.,2.),(1.,2.),(1.,1.)]], [[(6.,0.),(10.,0.),(10.,4.),(6.,4.),(6.,0.)],[(7.,1.),(9.,1.),(9.,3.),(7.,3.),(7.,1.)]], [[(0.,6.),(4.,6.),(4.,10.),(0.,10.),(0.,6.)],[(1.,7.5),(3.,7.5),(3.,8.5),(1.,8.5),(1.,7.5)]], [[(6.,6.),(10.,6.),(10.,10.),(6.,10.),(6.,6.)],[(7.,7.),(8.,7.),(8.,8.),(7.,8.),(7.,7.)]]]); +SELECT pointInPolygon((0.5, 1.5), [[[(0.,0.),(6.,0.),(6.,4.),(0.,4.),(0.,0.)],[(1.,1.),(2.,1.),(2.,2.),(1.,2.),(1.,1.)],[(4.,1.),(5.,1.),(5.,2.),(4.,2.),(4.,1.)]],[[(8.,0.),(14.,0.),(14.,5.),(8.,5.),(8.,0.)],[(9.,1.),(11.,1.),(11.,3.),(9.,3.),(9.,1.)],[(10.,3.5),(12.,3.5),(12.,4.5),(10.,4.5),(10.,3.5)]]]); + + +SELECT 'multipolygon with two polygons with holes (variadic)'; +SELECT pointInPolygon((1.5, 8.), [[(0.,0.),(4.,0.),(4.,4.),(0.,4.),(0.,0.)],[(1.,1.),(2.,1.),(2.,2.),(1.,2.),(1.,1.)]], [[(6.,0.),(10.,0.),(10.,4.),(6.,4.),(6.,0.)],[(7.,1.),(9.,1.),(9.,3.),(7.,3.),(7.,1.)]], [[(0.,6.),(4.,6.),(4.,10.),(0.,10.),(0.,6.)],[(1.,7.5),(3.,7.5),(3.,8.5),(1.,8.5),(1.,7.5)]], [[(6.,6.),(10.,6.),(10.,10.),(6.,10.),(6.,6.)],[(7.,7.),(8.,7.),(8.,8.),(7.,8.),(7.,7.)]]); +SELECT pointInPolygon((0.5, 1.5), [[(0.,0.),(6.,0.),(6.,4.),(0.,4.),(0.,0.)],[(1.,1.),(2.,1.),(2.,2.),(1.,2.),(1.,1.)],[(4.,1.),(5.,1.),(5.,2.),(4.,2.),(4.,1.)]],[[(8.,0.),(14.,0.),(14.,5.),(8.,5.),(8.,0.)],[(9.,1.),(11.,1.),(11.,3.),(9.,3.),(9.,1.)],[(10.,3.5),(12.,3.5),(12.,4.5),(10.,4.5),(10.,3.5)]]); diff --git a/parser/testdata/00500_point_in_polygon_2d_const/ast.json b/parser/testdata/00500_point_in_polygon_2d_const/ast.json new file mode 100644 index 000000000..dccad09f8 --- /dev/null +++ b/parser/testdata/00500_point_in_polygon_2d_const/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function pointInPolygon (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Tuple_(UInt64_0, UInt64_0)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Literal Tuple_(UInt64_0, UInt64_0)" + }, + { + "explain": " Literal Tuple_(UInt64_10, UInt64_0)" + }, + { + "explain": " Literal Tuple_(UInt64_10, UInt64_10)" + }, + { + "explain": " Literal Tuple_(UInt64_0, UInt64_10)" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001476042, + "rows_read": 15, + "bytes_read": 667 + } +} diff --git a/parser/testdata/00500_point_in_polygon_2d_const/metadata.json b/parser/testdata/00500_point_in_polygon_2d_const/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00500_point_in_polygon_2d_const/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00500_point_in_polygon_2d_const/query.sql b/parser/testdata/00500_point_in_polygon_2d_const/query.sql new file mode 100644 index 000000000..f2f9771b8 --- /dev/null +++ b/parser/testdata/00500_point_in_polygon_2d_const/query.sql @@ -0,0 +1,14 @@ +SELECT pointInPolygon((0, 0), [[(0, 0), (10, 0), (10, 10), (0, 10)]]); + +DROP TABLE IF EXISTS s; +CREATE TABLE s (`id` String, `lng` Int64, `lat` Int64) ENGINE = Memory(); + +DROP TABLE IF EXISTS p; +CREATE TABLE p (`polygon_id` Int64, `polygon_name` String, `shape` Array(Array(Tuple(Float64, Float64))), `state` String) ENGINE = Memory(); + +INSERT INTO s VALUES ('a', 0, 0); +INSERT INTO p VALUES (8, 'a', [[(0, 0), (10, 0), (10, 10), (0, 10)]], 'a'); +SELECT id FROM s WHERE pointInPolygon((lng,lat), (select shape from p where polygon_id = 8)); + +DROP TABLE s; +DROP TABLE p; diff --git a/parser/testdata/00500_point_in_polygon_3d_const/ast.json b/parser/testdata/00500_point_in_polygon_3d_const/ast.json new file mode 100644 index 000000000..3becdf37a --- /dev/null +++ b/parser/testdata/00500_point_in_polygon_3d_const/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery points_test (children 1)" + }, + { + "explain": " Identifier points_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001169329, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/00500_point_in_polygon_3d_const/metadata.json b/parser/testdata/00500_point_in_polygon_3d_const/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00500_point_in_polygon_3d_const/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00500_point_in_polygon_3d_const/query.sql b/parser/testdata/00500_point_in_polygon_3d_const/query.sql new file mode 100644 index 000000000..c5433dc3d --- /dev/null +++ b/parser/testdata/00500_point_in_polygon_3d_const/query.sql @@ -0,0 +1,41 @@ +DROP TABLE IF EXISTS points_test; + +CREATE TABLE points_test +( + x Float64, + y Float64, + note String +) +ENGINE = TinyLog; + +INSERT INTO points_test (x, y, note) VALUES +(3, 3, 'poly-0 | hole-0'), +(7, 3, 'poly-0 | hole-1'), +(5, 7, 'poly-0 | hole-2'), +(1, 1, 'poly-0 solid'), +(9, 9, 'poly-0 solid'), +(23, 3, 'poly-1 | hole-0'), +(27, 3, 'poly-1 | hole-1'), +(25, 7, 'poly-1 | hole-2'), +(21, 1, 'poly-1 solid'), +(29, 9, 'poly-1 solid'), +(-1,-1, 'outside all'), +(15, 5, 'outside all'), +(35, 5, 'outside all'); + +SELECT x, y, note, +pointInPolygon( (x, y), +[ + [ [(0,0),(10,0),(10,10),(0,10),(0,0)], + [(2,2),(4,2),(4,4),(2,4),(2,2)], + [(6,2),(8,2),(8,4),(6,4),(6,2)], + [(4,6),(6,6),(6,8),(4,8),(4,6)] ], + [ [(20,0),(30,0),(30,10),(20,10),(20,0)], + [(22,2),(24,2),(24,4),(22,4),(22,2)], + [(26,2),(28,2),(28,4),(26,4),(26,2)], + [(24,6),(26,6),(26,8),(24,8),(24,6)] ] +]) AS inside +FROM points_test +ORDER BY note, x, y; + +DROP TABLE IF EXISTS points_test; diff --git a/parser/testdata/00500_point_in_polygon_bug/ast.json b/parser/testdata/00500_point_in_polygon_bug/ast.json new file mode 100644 index 000000000..e6ebe2cd5 --- /dev/null +++ b/parser/testdata/00500_point_in_polygon_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery coords (children 1)" + }, + { + "explain": " Identifier coords" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001133819, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/00500_point_in_polygon_bug/metadata.json b/parser/testdata/00500_point_in_polygon_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00500_point_in_polygon_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00500_point_in_polygon_bug/query.sql b/parser/testdata/00500_point_in_polygon_bug/query.sql new file mode 100644 index 000000000..ee1802126 --- /dev/null +++ b/parser/testdata/00500_point_in_polygon_bug/query.sql @@ -0,0 +1,6 @@ +drop table if exists coords; +create table coords (x Float32, y Float32) engine = Memory; +insert into coords values (-81.7897198,29.4292161), (-81.7886889,29.4292222), (-81.787658,29.4292283), (-81.7866341,29.4301369), (-81.7866271,29.4292344), (-81.7856171,29.4319479), (-81.7856101,29.4310454), (-81.7856032,29.430143), (-81.7855962,29.4292405), (-81.7845931,29.4328564), (-81.7845862,29.4319539), (-81.7845792,29.4310515), (-81.7845723,29.430149), (-81.7845654,29.4292466), (-81.7835622,29.4328625), (-81.7835553,29.43196), (-81.7835483,29.4310576), (-81.7835414,29.4301551), (-81.7835345,29.4292527), (-81.7825382,29.433771), (-81.7825313,29.4328685), (-81.7825243,29.4319661), (-81.7825174,29.4310636), (-81.7825105,29.4301612), (-81.7825036,29.4292587), (-81.7815142,29.4346795), (-81.7815073,29.433777), (-81.7815003,29.4328746), (-81.7814934,29.4319721), (-81.7814865,29.4310697), (-81.7814796,29.4301672), (-81.7814727,29.4292648), (-81.7804832,29.4346855), (-81.7804763,29.4337831), (-81.7804694,29.4328806), (-81.7804625,29.4319782), (-81.7804556,29.4310757), (-81.7804487,29.4301733), (-81.7804418,29.4292708), (-81.7794592,29.435594), (-81.7794523,29.4346916), (-81.7794454,29.4337891), (-81.7794385,29.4328867), (-81.7794316,29.4319842), (-81.7794247,29.4310818), (-81.7794178,29.4301793), (-81.7794109,29.4292768), (-81.7784282,29.4356001), (-81.7784213,29.4346976), (-81.7784145,29.4337952), (-81.7784076,29.4328927), (-81.7784007,29.4319902), (-81.7783938,29.4310878), (-81.7783869,29.4301853), (-81.77838,29.4292829), (-81.7773973,29.4356061), (-81.7773904,29.4347036), (-81.7773835,29.4338012), (-81.7773766,29.4328987), (-81.7773698,29.4319963), (-81.7773629,29.4310938), (-81.777356,29.4301914), (-81.7773492,29.4292889), (-81.7763732,29.4365146), (-81.7763663,29.4356121), (-81.7763594,29.4347096), (-81.7763526,29.4338072), (-81.7763457,29.4329047), (-81.7763389,29.4320023), (-81.776332,29.4310998), (-81.7763251,29.4301974), (-81.7763183,29.4292949), (-81.7753422,29.4365206), (-81.7753354,29.4356181), (-81.7753285,29.4347156), (-81.7753217,29.4338132), (-81.7753148,29.4329107), (-81.7753079,29.4320083), (-81.7753011,29.4311058), (-81.7752942,29.4302034), (-81.7752874,29.4293009), (-81.7742496,29.4284045), (-81.7742428,29.427502), (-81.774236,29.426599500000002), (-81.7742291,29.4256971), (-81.7742223,29.4247946), (-81.7742154,29.4238922), (-81.7742086,29.4229897), (-81.7742017,29.4220873), (-81.7741949,29.4211848), (-81.774188,29.4202823), (-81.7741812,29.4193799), (-81.7741744,29.4184774), (-81.7741675,29.417575), (-81.7741607,29.4166725), (-81.7741538,29.4157701), (-81.774147,29.4148676), (-81.7741402,29.4139651), (-81.7741333,29.4130627), (-81.7741265,29.4121602), (-81.7741196,29.4112578), (-81.7741128,29.4103553), (-81.774106,29.4094528), (-81.7740991,29.4085504), (-81.7740923,29.4076479), (-81.7740855,29.4067455), (-81.7740786,29.405843), (-81.7740718,29.4049405), (-81.7740649,29.4040381), (-81.7740581,29.4031356), (-81.7740513,29.4022332), (-81.7740444,29.4013307), (-81.7740376,29.4004282), (-81.7740308,29.3995258), (-81.7740239,29.3986233), (-81.7740171,29.3977209), (-81.7740103,29.3968184), (-81.7740034,29.3959159), (-81.7739966,29.3950135), (-81.7739898,29.394111), (-81.7739829,29.3932086), (-81.7739761,29.3923061), (-81.7739693,29.3914036), (-81.7739624,29.3905012), (-81.7739556,29.3895987), (-81.7739488,29.3886963), (-81.7739419,29.3877938), (-81.7739351,29.3868913), (-81.7739283,29.3859889), (-81.7732188,29.4284104), (-81.7732119,29.427508), (-81.7732051,29.4266055), (-81.7731983,29.4257031), (-81.7731914,29.4248006), (-81.7731846,29.4238982), (-81.7731778,29.4229957), (-81.7731709,29.4220932), (-81.7731641,29.4211908), (-81.7731573,29.4202883), (-81.7731504,29.4193859), (-81.7731436,29.4184834), (-81.7731368,29.417581), (-81.7731299,29.4166785), (-81.7731231,29.415776), (-81.7731163,29.4148736), (-81.7731094,29.4139711), (-81.7731026,29.4130687), (-81.7730958,29.4121662), (-81.7730889,29.4112637), (-81.7730821,29.4103613), (-81.7730753,29.4094588), (-81.7730684,29.4085564), (-81.7730616,29.4076539), (-81.7730548,29.4067514), (-81.773048,29.405849), (-81.7730411,29.4049465), (-81.7730343,29.4040441), (-81.7730275,29.4031416), (-81.7730207,29.4022391), (-81.7730138,29.4013367), (-81.773007,29.4004342), (-81.7730002,29.3995318), (-81.7729933,29.3986293), (-81.7729865,29.3977268), (-81.7729797,29.3968244), (-81.7729729,29.3959219), (-81.772966,29.3950195), (-81.7729592,29.394117), (-81.7729524,29.3932145), (-81.7729456,29.3923121), (-81.7729388,29.3914096), (-81.7729319,29.3905072), (-81.7729251,29.3896047), (-81.7729183,29.3887022), (-81.7729115,29.3877998), (-81.7729046,29.3868973), (-81.7728978,29.3859948), (-81.772891,29.3850924), (-81.7728842,29.3841899), (-81.7728774,29.3832875), (-81.7728705,29.382385), (-81.7728637,29.3814825), (-81.7721879,29.4284164), (-81.7721811,29.427514), (-81.7721742,29.4266115), (-81.7721674,29.4257091), (-81.7721606,29.4248066), (-81.7721537,29.4239041), (-81.7721469,29.4230017), (-81.7721401,29.4220992), (-81.7721333,29.4211968), (-81.7721265,29.4202943), (-81.7721196,29.4193918), (-81.7721128,29.4184894), (-81.772106,29.4175869), (-81.7720992,29.4166845), (-81.7720923,29.415782), (-81.7720855,29.4148796), (-81.7720787,29.4139771), (-81.7720719,29.4130746), (-81.772065,29.4121722), (-81.7720582,29.4112697), (-81.7720514,29.4103673), (-81.7720446,29.4094648), (-81.7720378,29.4085623), (-81.7720309,29.4076599), (-81.7720241,29.4067574), (-81.7720173,29.405855), (-81.7720105,29.4049525), (-81.7720037,29.40405), (-81.7719969,29.4031476), (-81.77199,29.402245100000002), (-81.7719832,29.4013427), (-81.7719764,29.4004402), (-81.7719696,29.3995377), (-81.7719628,29.3986353), (-81.771956,29.3977328), (-81.7719491,29.3968304), (-81.7719423,29.3959279), (-81.7719355,29.3950254), (-81.7719287,29.394123), (-81.7719219,29.393220499999998), (-81.7719151,29.3923181), (-81.7719082,29.3914156), (-81.7719014,29.3905131), (-81.7718946,29.3896107), (-81.7718878,29.3887082), (-81.771881,29.3878057), (-81.7718742,29.3869033), (-81.7718674,29.3860008), (-81.7718606,29.3850984), (-81.7718537,29.3841959), (-81.7718469,29.3832934), (-81.7718401,29.382391), (-81.7718333,29.3814885), (-81.771157,29.4284224), (-81.7711502,29.4275199), (-81.7711434,29.4266175), (-81.7711365,29.425715), (-81.7711297,29.4248126), (-81.7711229,29.4239101), (-81.7711161,29.4230077), (-81.7711093,29.4221052), (-81.7711025,29.4212027), (-81.7710957,29.4203003), (-81.7710888,29.4193978), (-81.771082,29.4184954), (-81.7710752,29.4175929), (-81.7710684,29.4166904), (-81.7710616,29.415788), (-81.7710548,29.4148855), (-81.771048,29.4139831), (-81.7710411,29.4130806), (-81.7710343,29.4121781), (-81.7710275,29.4112757), (-81.7710207,29.4103732), (-81.7710139,29.4094708), (-81.7710071,29.4085683), (-81.7710003,29.4076659), (-81.7709935,29.4067634), (-81.7709867,29.4058609), (-81.7709798,29.4049585), (-81.770973,29.404056), (-81.7709662,29.4031536), (-81.7709594,29.4022511), (-81.7709526,29.4013486), (-81.7709458,29.4004462), (-81.770939,29.3995437), (-81.7709322,29.3986412), (-81.7709254,29.3977388), (-81.7709186,29.3968363), (-81.7709118,29.3959339), (-81.770905,29.3950314), (-81.7708982,29.3941289), (-81.7708913,29.3932265), (-81.7708845,29.392324), (-81.7708777,29.3914216), (-81.7708709,29.3905191), (-81.7708641,29.3896166), (-81.7708573,29.3887142), (-81.7708505,29.3878117), (-81.7708437,29.3869092), (-81.7708369,29.3860068), (-81.7708301,29.3851043), (-81.7708233,29.3842019), (-81.7708165,29.3832994), (-81.7708097,29.3823969), (-81.7708029,29.3814945), (-81.7701329,29.4293308), (-81.7701261,29.4284284), (-81.7701193,29.4275259), (-81.7701125,29.4266234), (-81.7701057,29.425721), (-81.7700989,29.4248185), (-81.7700921,29.4239161), (-81.7700853,29.4230136), (-81.7700785,29.4221112), (-81.7700717,29.4212087), (-81.7700649,29.4203062), (-81.770058,29.4194038), (-81.7700512,29.4185013), (-81.7700444,29.4175989), (-81.7700376,29.4166964), (-81.7700308,29.4157939), (-81.770024,29.4148915), (-81.7700172,29.413989), (-81.7700104,29.4130866), (-81.7700036,29.4121841), (-81.7699968,29.4112816), (-81.76999,29.4103792), (-81.7699832,29.4094767), (-81.7699764,29.4085743), (-81.7699696,29.4076718), (-81.7699628,29.4067694), (-81.769956,29.4058669), (-81.7699492,29.4049644), (-81.7699424,29.404062), (-81.7699356,29.4031595), (-81.7699288,29.402257), (-81.769922,29.4013546), (-81.7699152,29.4004521), (-81.7699084,29.3995497), (-81.7699016,29.3986472), (-81.7698948,29.3977447), (-81.769888,29.3968423), (-81.7698812,29.3959398), (-81.7698744,29.3950374), (-81.7698676,29.3941349), (-81.7698608,29.3932324), (-81.769854,29.39233), (-81.7698472,29.3914275), (-81.7698404,29.390525), (-81.7698336,29.3896226), (-81.7698268,29.3887201), (-81.76982,29.3878177), (-81.7698133,29.3869152), (-81.7698065,29.3860127), (-81.7697997,29.3851103), (-81.7697929,29.3842078), (-81.7697861,29.3833053), (-81.7697793,29.3824029), (-81.7697725,29.3815004), (-81.769102,29.4293368), (-81.7690952,29.4284343), (-81.7690884,29.4275319), (-81.7690816,29.4266294), (-81.7690748,29.4257269), (-81.769068,29.4248245), (-81.7690612,29.423922), (-81.7690544,29.4230196), (-81.7690476,29.4221171), (-81.7690409,29.4212147), (-81.7690341,29.4203122), (-81.7690273,29.4194097), (-81.7690205,29.4185073), (-81.7690137,29.4176048), (-81.7690069,29.4167024), (-81.7690001,29.4157999), (-81.7689933,29.4148974), (-81.7689865,29.413995), (-81.7689797,29.4130925), (-81.7689729,29.4121901), (-81.7689661,29.4112876), (-81.7689593,29.4103851), (-81.7689525,29.4094827), (-81.7689457,29.4085802), (-81.7689389,29.4076778), (-81.7689321,29.4067753), (-81.7689253,29.4058728), (-81.7689186,29.4049704), (-81.7689118,29.4040679), (-81.768905,29.4031655), (-81.7688982,29.402263), (-81.7688914,29.4013605), (-81.7688846,29.4004581), (-81.7688778,29.3995556), (-81.768871,29.3986532), (-81.7688642,29.3977507), (-81.7688574,29.3968482), (-81.7688507,29.3959458), (-81.7688439,29.3950433), (-81.7688371,29.3941408), (-81.7688303,29.3932384), (-81.7688235,29.3923359), (-81.7688167,29.3914335), (-81.7688099,29.390531), (-81.7688031,29.3896285), (-81.7687964,29.3887261), (-81.7687896,29.3878236), (-81.7687828,29.3869211), (-81.768776,29.3860187), (-81.7687692,29.3851162), (-81.7687624,29.3842138), (-81.7687557,29.3833113), (-81.7687489,29.3824088), (-81.7687421,29.3815064), (-81.7680711,29.4293427), (-81.7680644,29.4284403), (-81.7680576,29.4275378), (-81.7680508,29.4266353), (-81.768044,29.4257329), (-81.7680372,29.4248304), (-81.7680304,29.423928), (-81.7680236,29.4230255), (-81.7680168,29.4221231), (-81.76801,29.4212206), (-81.7680033,29.4203181), (-81.7679965,29.4194157), (-81.7679897,29.4185132), (-81.7679829,29.4176108), (-81.7679761,29.4167083), (-81.7679693,29.4158058), (-81.7679625,29.4149034), (-81.7679557,29.4140009), (-81.767949,29.4130985), (-81.7679422,29.412196), (-81.7679354,29.4112935), (-81.7679286,29.4103911), (-81.7679218,29.4094886), (-81.767915,29.4085862), (-81.7679083,29.4076837), (-81.7679015,29.4067812), (-81.7678947,29.4058788), (-81.7678879,29.4049763), (-81.7678811,29.4040739), (-81.7678743,29.4031714), (-81.7678676,29.4022689), (-81.7678608,29.4013665), (-81.767854,29.400464), (-81.7678472,29.3995616), (-81.7678404,29.3986591), (-81.7678337,29.3977566), (-81.7678269,29.3968542), (-81.7678201,29.3959517), (-81.7678133,29.3950492), (-81.7678065,29.3941468), (-81.7677998,29.3932443), (-81.767793,29.3923419), (-81.7677862,29.3914394), (-81.7677794,29.3905369), (-81.7677727,29.3896345), (-81.7677659,29.388732), (-81.7677591,29.3878295), (-81.7677523,29.3869271), (-81.7677456,29.3860246), (-81.7677388,29.3851222), (-81.767732,29.3842197), (-81.7677252,29.3833172), (-81.7677185,29.3824148), (-81.7677117,29.3815123), (-81.7670403,29.4293487), (-81.7670335,29.4284462), (-81.7670267,29.4275437), (-81.7670199,29.4266413), (-81.7670131,29.4257388), (-81.7670063,29.4248364), (-81.7669996,29.4239339), (-81.7669928,29.4230315), (-81.766986,29.422128999999998), (-81.7669792,29.4212265), (-81.7669725,29.4203241), (-81.7669657,29.4194216), (-81.7669589,29.4185192), (-81.7669521,29.4176167), (-81.7669453,29.4167142), (-81.7669386,29.4158118), (-81.7669318,29.4149093), (-81.766925,29.4140069), (-81.7669182,29.4131044), (-81.7669115,29.4122019), (-81.7669047,29.4112995), (-81.7668979,29.410397), (-81.7668911,29.4094946), (-81.7668844,29.4085921), (-81.7668776,29.4076896), (-81.7668708,29.4067872), (-81.766864,29.4058847), (-81.7668573,29.4049823), (-81.7668505,29.4040798), (-81.7668437,29.4031773), (-81.7668369,29.4022749), (-81.7668302,29.4013724), (-81.7668234,29.40047), (-81.7668166,29.3995675), (-81.7668099,29.398665), (-81.7668031,29.3977626), (-81.7667963,29.3968601), (-81.7667895,29.3959576), (-81.7667828,29.3950552), (-81.766776,29.3941527), (-81.7667692,29.3932503), (-81.7667625,29.3923478), (-81.7667557,29.3914453), (-81.7667489,29.3905429), (-81.7667422,29.3896404), (-81.7667354,29.3887379), (-81.7667286,29.3878355), (-81.7667219,29.386933), (-81.7667151,29.3860305), (-81.7667083,29.3851281), (-81.7667016,29.3842256), (-81.7666948,29.3833232), (-81.766688,29.3824207), (-81.7666813,29.3815182), (-81.7660094,29.4293546), (-81.7660026,29.4284521), (-81.7659958,29.4275497), (-81.765989,29.4266472), (-81.7659823,29.4257448), (-81.7659755,29.4248423), (-81.7659687,29.4239398), (-81.765962,29.4230374), (-81.7659552,29.4221349), (-81.7659484,29.4212325), (-81.7659417,29.42033), (-81.7659349,29.4194275), (-81.7659281,29.4185251), (-81.7659213,29.4176226), (-81.7659146,29.4167202), (-81.7659078,29.4158177), (-81.765901,29.4149153), (-81.7658943,29.4140128), (-81.7658875,29.4131103), (-81.7658807,29.4122079), (-81.765874,29.4113054), (-81.7658672,29.4104029), (-81.7658604,29.4095005), (-81.7658537,29.408598), (-81.7658469,29.4076956), (-81.7658401,29.4067931), (-81.7658334,29.4058906), (-81.7658266,29.4049882), (-81.7658199,29.4040857), (-81.7658131,29.4031833), (-81.7658063,29.4022808), (-81.7657996,29.4013783), (-81.7657928,29.4004759), (-81.765786,29.3995734), (-81.7657793,29.398671), (-81.7657725,29.3977685), (-81.7657658,29.396866), (-81.765759,29.3959636), (-81.7657522,29.3950611), (-81.7657455,29.3941586), (-81.7657387,29.3932562), (-81.765732,29.3923537), (-81.7657252,29.3914513), (-81.7657184,29.3905488), (-81.7657117,29.3896463), (-81.7657049,29.3887439), (-81.7656982,29.3878414), (-81.7656914,29.3869389), (-81.7656846,29.3860365), (-81.7656779,29.385134), (-81.7656711,29.3842315), (-81.7656644,29.3833291), (-81.7656576,29.3824266), (-81.7656509,29.3815241), (-81.7649785,29.4293605), (-81.7649717,29.4284581), (-81.7649649,29.4275556), (-81.7649582,29.4266531), (-81.7649514,29.4257507), (-81.7649447,29.4248482), (-81.7649379,29.4239458), (-81.7649311,29.4230433), (-81.7649244,29.4221408), (-81.7649176,29.4212384), (-81.7649109,29.4203359), (-81.7649041,29.4194335), (-81.7648973,29.418531), (-81.7648906,29.4176286), (-81.7648838,29.416726099999998), (-81.7648771,29.4158236), (-81.7648703,29.4149212), (-81.7648635,29.4140187), (-81.7648568,29.4131163), (-81.76485,29.4122138), (-81.7648433,29.4113113), (-81.7648365,29.4104089), (-81.7648298,29.4095064), (-81.764823,29.4086039), (-81.7648162,29.4077015), (-81.7648095,29.406799), (-81.7648027,29.4058966), (-81.764796,29.4049941), (-81.7647892,29.4040916), (-81.7647825,29.4031892), (-81.7647757,29.4022867), (-81.764769,29.4013843), (-81.7647622,29.4004818), (-81.7647554,29.3995793), (-81.7647487,29.3986769), (-81.7647419,29.3977744), (-81.7647352,29.3968719), (-81.7647284,29.3959695), (-81.7647217,29.395067), (-81.7647149,29.3941646), (-81.7647082,29.3932621), (-81.7647014,29.3923596), (-81.7646947,29.3914572), (-81.7646879,29.3905547), (-81.7646812,29.3896522), (-81.7646744,29.3887498), (-81.7646677,29.3878473), (-81.7646609,29.3869448), (-81.7646542,29.3860424), (-81.7646474,29.3851399), (-81.7646407,29.3842375), (-81.7646339,29.383335), (-81.7646272,29.3824325), (-81.7646204,29.3815301), (-81.7639476,29.4293664), (-81.7639408,29.428464), (-81.7639341,29.4275615), (-81.7639273,29.4266591), (-81.7639206,29.4257566), (-81.7639138,29.4248541), (-81.7639071,29.4239517), (-81.7639003,29.4230492), (-81.7638936,29.4221468), (-81.7638868,29.4212443), (-81.7638801,29.4203418), (-81.7638733,29.4194394), (-81.7638666,29.4185369), (-81.7638598,29.4176345), (-81.763853,29.416732), (-81.7638463,29.4158295), (-81.7638396,29.4149271), (-81.7638328,29.4140246), (-81.7638261,29.4131222), (-81.7638193,29.4122197), (-81.7638126,29.4113172), (-81.7638058,29.4104148), (-81.7637991,29.4095123), (-81.7637923,29.4086099), (-81.7637856,29.4077074), (-81.7637788,29.4068049), (-81.7637721,29.4059025), (-81.7637653,29.405), (-81.7637586,29.4040976), (-81.7637518,29.4031951), (-81.7637451,29.4022926), (-81.7637383,29.4013902), (-81.7637316,29.4004877), (-81.7637249,29.3995852), (-81.7637181,29.3986828), (-81.7637114,29.3977803), (-81.7637046,29.3968779), (-81.7636979,29.3959754), (-81.7636911,29.3950729), (-81.7636844,29.3941705), (-81.7636777,29.393268), (-81.7636709,29.3923655), (-81.7636642,29.3914631), (-81.7636574,29.3905606), (-81.7636507,29.3896581), (-81.763644,29.3887557), (-81.7636372,29.3878532), (-81.7636305,29.3869508), (-81.7636237,29.3860483), (-81.763617,29.3851458), (-81.7636103,29.3842434), (-81.7636035,29.3833409), (-81.7635968,29.3824384), (-81.76359,29.381536), (-81.7629167,29.4293723), (-81.7629099,29.4284699), (-81.7629032,29.4275674), (-81.7628965,29.426665), (-81.7628897,29.4257625), (-81.762883,29.42486), (-81.7628762,29.4239576), (-81.7628695,29.4230551), (-81.7628627,29.4221527), (-81.762856,29.4212502), (-81.7628493,29.4203478), (-81.7628425,29.4194453), (-81.7628358,29.4185428), (-81.762829,29.4176404), (-81.7628223,29.4167379), (-81.7628155,29.4158355), (-81.7628088,29.414933), (-81.7628021,29.4140305), (-81.7627953,29.4131281), (-81.7627886,29.4122256), (-81.7627818,29.4113231), (-81.7627751,29.4104207), (-81.7627684,29.4095182), (-81.7627616,29.4086158), (-81.7627549,29.4077133), (-81.7627482,29.4068108), (-81.7627414,29.4059084), (-81.7627347,29.4050059), (-81.7627279,29.4041035), (-81.7627212,29.403201), (-81.7627145,29.4022985), (-81.76270769999999,29.4013961), (-81.762701,29.4004936), (-81.7626943,29.3995911), (-81.7626875,29.3986887), (-81.7626808,29.3977862), (-81.7626741,29.3968838), (-81.7626673,29.3959813), (-81.7626606,29.3950788), (-81.7626539,29.3941764), (-81.7626471,29.3932739), (-81.7626404,29.3923714), (-81.7626337,29.391469), (-81.7626269,29.3905665), (-81.7626202,29.389664), (-81.7626135,29.3887616), (-81.7626067,29.3878591), (-81.7626,29.3869567), (-81.7625933,29.3860542), (-81.7625865,29.3851517), (-81.7625798,29.3842493), (-81.7625731,29.383346799999998), (-81.7625664,29.3824443), (-81.7625596,29.3815419), (-81.7618858,29.4293782), (-81.7618791,29.4284758), (-81.7618723,29.4275733), (-81.7618656,29.4266709), (-81.7618589,29.4257684), (-81.7618521,29.4248659), (-81.7618454,29.4239635), (-81.7618386,29.423061), (-81.7618319,29.4221586), (-81.7618252,29.4212561), (-81.7618184,29.4203537), (-81.7618117,29.4194512), (-81.761805,29.4185487), (-81.7617983,29.4176463), (-81.7617915,29.4167438), (-81.7617848,29.4158413), (-81.7617781,29.4149389), (-81.7617713,29.4140364), (-81.7617646,29.413134), (-81.7617579,29.4122315), (-81.7617511,29.411329), (-81.7617444,29.4104266), (-81.7617377,29.4095241), (-81.7617309,29.4086217), (-81.7617242,29.4077192), (-81.7617175,29.4068167), (-81.7617108,29.4059143), (-81.761704,29.4050118), (-81.7616973,29.404109300000002), (-81.7616906,29.4032069), (-81.7616839,29.4023044), (-81.7616771,29.401402), (-81.7616704,29.4004995), (-81.7616637,29.399597), (-81.7616569,29.3986946), (-81.7616502,29.3977921), (-81.7616435,29.3968896), (-81.7616368,29.3959872), (-81.76163,29.3950847), (-81.7616233,29.3941823), (-81.7616166,29.3932798), (-81.7616099,29.3923773), (-81.7616032,29.3914749), (-81.7615964,29.3905724), (-81.7615897,29.3896699), (-81.761583,29.3887675), (-81.7615763,29.387865), (-81.7615695,29.3869625), (-81.7615628,29.3860601), (-81.7615561,29.3851576), (-81.7615494,29.3842551), (-81.7615427,29.3833527), (-81.7615359,29.3824502), (-81.7615292,29.3815478), (-81.7608549,29.4293841), (-81.7608482,29.4284817), (-81.7608414,29.4275792), (-81.7608347,29.4266768), (-81.760828,29.4257743), (-81.7608213,29.4248718), (-81.7608145,29.4239694), (-81.7608078,29.4230669), (-81.7608011,29.4221645), (-81.7607944,29.421262), (-81.7607876,29.4203595), (-81.7607809,29.4194571), (-81.7607742,29.4185546), (-81.7607675,29.4176522), (-81.7607608,29.4167497), (-81.760754,29.4158472), (-81.7607473,29.4149448), (-81.7607406,29.4140423), (-81.7607339,29.4131399), (-81.7607271,29.4122374), (-81.7607204,29.4113349), (-81.7607137,29.4104325), (-81.760707,29.40953), (-81.7607003,29.4086275), (-81.7606935,29.4077251), (-81.7606868,29.4068226), (-81.7606801,29.4059202), (-81.7606734,29.4050177), (-81.7606667,29.4041152), (-81.76066,29.4032128), (-81.7606532,29.4023103), (-81.76064650000001,29.4014078), (-81.7606398,29.4005054), (-81.7606331,29.3996029), (-81.7606264,29.3987005), (-81.7606196,29.397798), (-81.7606129,29.3968955), (-81.7606062,29.3959931), (-81.7605995,29.3950906), (-81.7605928,29.3941881), (-81.7605861,29.3932857), (-81.7605794,29.3923832), (-81.7605726,29.3914807), (-81.7605659,29.3905783), (-81.7605592,29.3896758), (-81.7605525,29.3887734), (-81.7605458,29.3878709), (-81.7605391,29.3869684), (-81.7605324,29.386066), (-81.7605257,29.3851635), (-81.7605189,29.384261), (-81.7605122,29.3833586), (-81.7605055,29.3824561), (-81.7604988,29.3815536), (-81.759824,29.42939), (-81.7598173,29.4284876), (-81.7598106,29.4275851), (-81.7598039,29.4266826), (-81.7597971,29.4257802), (-81.7597904,29.4248777), (-81.7597837,29.4239753), (-81.759777,29.4230728), (-81.7597703,29.4221703), (-81.7597636,29.4212679), (-81.7597568,29.4203654), (-81.7597501,29.419463), (-81.7597434,29.4185605), (-81.7597367,29.417658), (-81.75973,29.4167556), (-81.7597233,29.4158531), (-81.7597166,29.4149507), (-81.7597098,29.4140482), (-81.7597031,29.4131457), (-81.7596964,29.4122433), (-81.7596897,29.4113408), (-81.759683,29.4104384), (-81.7596763,29.4095359), (-81.7596696,29.4086334), (-81.7596629,29.407731), (-81.7596562,29.4068285), (-81.7596494,29.405926), (-81.7596427,29.4050236), (-81.759636,29.4041211), (-81.7596293,29.4032187), (-81.7596226,29.4023162), (-81.7596159,29.4014137), (-81.7596092,29.4005113), (-81.7596025,29.3996088), (-81.7595958,29.3987063), (-81.7595891,29.3978039), (-81.7595824,29.3969014), (-81.7595757,29.3959989), (-81.759569,29.3950965), (-81.7595622,29.394194), (-81.7595555,29.3932916), (-81.7595488,29.3923891), (-81.7595421,29.3914866), (-81.7595354,29.3905842), (-81.7595287,29.3896817), (-81.759522,29.3887792), (-81.7595153,29.3878768), (-81.7595086,29.3869743), (-81.7595019,29.3860718), (-81.7594952,29.3851694), (-81.7594885,29.3842669), (-81.7594818,29.3833644), (-81.7594751,29.382462), (-81.7594684,29.3815595), (-81.7587931,29.4293959), (-81.7587864,29.4284934), (-81.7587797,29.427591), (-81.758773,29.4266885), (-81.7587663,29.4257861), (-81.7587596,29.4248836), (-81.7587529,29.4239811), (-81.7587462,29.4230787), (-81.7587395,29.4221762), (-81.7587327,29.4212738), (-81.758726,29.4203713), (-81.7587193,29.4194688), (-81.7587126,29.4185664), (-81.7587059,29.4176639), (-81.7586992,29.4167615), (-81.7586925,29.415859), (-81.7586858,29.4149565), (-81.7586791,29.4140541), (-81.7586724,29.4131516), (-81.7586657,29.4122491), (-81.758659,29.4113467), (-81.7586523,29.4104442), (-81.7586456,29.4095418), (-81.7586389,29.4086393), (-81.7586322,29.4077368), (-81.7586255,29.4068344), (-81.7586188,29.4059319), (-81.7586121,29.4050294), (-81.7586054,29.404127), (-81.7585987,29.4032245), (-81.758592,29.4023221), (-81.7585853,29.4014196), (-81.7585786,29.4005171), (-81.7585719,29.3996147), (-81.7585652,29.3987122), (-81.7585585,29.3978097), (-81.7585518,29.3969073), (-81.7585451,29.3960048), (-81.7585384,29.3951024), (-81.7585317,29.3941999), (-81.758525,29.3932974), (-81.7585183,29.392395), (-81.7585116,29.3914925), (-81.7585049,29.39059), (-81.7584982,29.3896876), (-81.7584915,29.3887851), (-81.7584848,29.3878826), (-81.7584781,29.3869802), (-81.7584715,29.3860777), (-81.7584648,29.3851752), (-81.7584581,29.3842728), (-81.7584514,29.3833703), (-81.7584447,29.3824678), (-81.758438,29.3815654), (-81.7577622,29.4294018), (-81.7577555,29.4284993), (-81.7577488,29.4275968), (-81.7577421,29.4266944), (-81.7577354,29.4257919), (-81.7577287,29.4248895), (-81.757722,29.423987), (-81.7577153,29.4230845), (-81.7577086,29.4221821), (-81.7577019,29.4212796), (-81.7576952,29.4203772), (-81.7576885,29.4194747), (-81.7576818,29.4185722), (-81.7576752,29.4176698), (-81.7576685,29.4167673), (-81.7576618,29.4158649), (-81.7576551,29.4149624), (-81.7576484,29.4140599), (-81.7576417,29.4131575), (-81.757635,29.412255), (-81.7576283,29.4113525), (-81.7576216,29.4104501), (-81.7576149,29.4095476), (-81.75760819999999,29.4086452), (-81.7576015,29.4077427), (-81.7575948,29.4068402), (-81.7575881,29.4059378), (-81.7575814,29.4050353), (-81.7575748,29.4041328), (-81.7575681,29.403230399999998), (-81.7575614,29.4023279), (-81.7575547,29.4014255), (-81.757548,29.400523), (-81.7575413,29.3996205), (-81.7575346,29.3987181), (-81.7575279,29.3978156), (-81.7575212,29.3969131), (-81.7575145,29.3960107), (-81.7575079,29.3951082), (-81.7575012,29.3942057), (-81.7574945,29.3933033), (-81.7574878,29.3924008), (-81.7574811,29.3914984), (-81.7574744,29.3905959), (-81.7574677,29.3896934), (-81.7574611,29.388791), (-81.7574544,29.3878885), (-81.7574477,29.386986), (-81.757441,29.3860836), (-81.7574343,29.3851811), (-81.7574276,29.384278600000002), (-81.7574209,29.3833762), (-81.7574143,29.3824737), (-81.7574076,29.3815712), (-81.7567313,29.4294076), (-81.7567246,29.4285052), (-81.7567179,29.4276027), (-81.7567113,29.4267002), (-81.7567046,29.4257978), (-81.7566979,29.4248953), (-81.7566912,29.423992900000002), (-81.7566845,29.4230904), (-81.7566778,29.4221879), (-81.7566711,29.4212855), (-81.7566644,29.420383), (-81.7566577,29.4194806), (-81.7566511,29.4185781), (-81.7566444,29.4176756), (-81.7566377,29.4167732), (-81.756631,29.4158707), (-81.7566243,29.4149683), (-81.7566176,29.4140658), (-81.7566109,29.4131633), (-81.7566043,29.4122609), (-81.7565976,29.4113584), (-81.7565909,29.4104559), (-81.7565842,29.4095535), (-81.7565775,29.408651), (-81.7565708,29.4077486), (-81.7565642,29.4068461), (-81.7565575,29.4059436), (-81.7565508,29.4050412), (-81.7565441,29.4041387), (-81.7565374,29.4032362), (-81.7565308,29.4023338), (-81.75652410000001,29.4014313), (-81.7565174,29.4005288), (-81.7565107,29.3996264), (-81.756504,29.3987239), (-81.7564973,29.3978215), (-81.7564907,29.396919), (-81.756484,29.3960165), (-81.7564773,29.3951141), (-81.7564706,29.3942116), (-81.756464,29.3933091), (-81.7564573,29.3924067), (-81.7564506,29.3915042), (-81.7564439,29.3906017), (-81.7564372,29.3896993), (-81.7564306,29.3887968), (-81.7564239,29.3878943), (-81.7564172,29.3869919), (-81.7564105,29.3860894), (-81.7564039,29.3851869), (-81.7563972,29.3842845), (-81.7563905,29.383382), (-81.7563838,29.3824795), (-81.7563772,29.3815771), (-81.7557004,29.4294135), (-81.7556937,29.428511), (-81.7556871,29.4276086), (-81.7556804,29.4267061), (-81.7556737,29.4258036), (-81.755667,29.4249012), (-81.7556603,29.4239987), (-81.7556537,29.4230963), (-81.755647,29.4221938), (-81.7556403,29.4212913), (-81.7556336,29.4203889), (-81.755627,29.4194864), (-81.7556203,29.418584), (-81.7556136,29.4176815), (-81.7556069,29.416779), (-81.7556002,29.4158766), (-81.7555936,29.4149741), (-81.7555869,29.4140716), (-81.7555802,29.4131692), (-81.7555735,29.4122667), (-81.7555669,29.4113643), (-81.7555602,29.4104618), (-81.7555535,29.4095593), (-81.7555468,29.4086569), (-81.7555402,29.4077544), (-81.7555335,29.4068519), (-81.7555268,29.4059495), (-81.7555201,29.405047), (-81.7555135,29.4041446), (-81.7555068,29.4032421), (-81.7555001,29.4023396), (-81.7554935,29.4014372), (-81.7554868,29.4005347), (-81.7554801,29.3996322), (-81.7554734,29.3987298), (-81.7554668,29.3978273), (-81.7554601,29.3969248), (-81.7554534,29.3960224), (-81.7554468,29.3951199), (-81.7554401,29.3942174), (-81.7554334,29.393315), (-81.7554268,29.3924125), (-81.7554201,29.39151), (-81.7554134,29.3906076), (-81.7554067,29.3897051), (-81.7554001,29.3888027), (-81.7553934,29.3879002), (-81.7553867,29.3869977), (-81.7553801,29.3860953), (-81.7553734,29.3851928), (-81.7553667,29.3842903), (-81.7553601,29.3833879), (-81.7553534,29.3824854), (-81.7553468,29.3815829), (-81.7546695,29.4294193), (-81.7546629,29.428516899999998), (-81.7546562,29.4276144), (-81.7546495,29.4267119), (-81.7546428,29.4258095), (-81.7546362,29.424907), (-81.7546295,29.4240046), (-81.7546228,29.4231021), (-81.7546162,29.4221996), (-81.7546095,29.4212972), (-81.7546028,29.4203947), (-81.7545962,29.4194923), (-81.7545895,29.4185898), (-81.7545828,29.4176873), (-81.7545762,29.4167849), (-81.7545695,29.4158824), (-81.7545628,29.4149799), (-81.7545562,29.4140775), (-81.7545495,29.413175), (-81.7545428,29.4122726), (-81.7545362,29.4113701), (-81.7545295,29.4104676), (-81.7545228,29.4095652), (-81.7545162,29.4086627), (-81.7545095,29.4077602), (-81.7545028,29.4068578), (-81.7544962,29.4059553), (-81.7544895,29.4050529), (-81.7544828,29.4041504), (-81.7544762,29.4032479), (-81.7544695,29.4023455), (-81.7544628,29.401443), (-81.7544562,29.4005405), (-81.7544495,29.3996381), (-81.7544429,29.3987356), (-81.7544362,29.3978331), (-81.7544295,29.3969307), (-81.7544229,29.3960282), (-81.7544162,29.3951257), (-81.7544096,29.3942233), (-81.7544029,29.3933208), (-81.7543962,29.3924183), (-81.7543896,29.3915159), (-81.7543829,29.3906134), (-81.7543763,29.389711), (-81.7543696,29.3888085), (-81.7543629,29.387906), (-81.7543563,29.3870036), (-81.7543496,29.3861011), (-81.754343,29.3851986), (-81.7543363,29.3842962), (-81.7543297,29.3833937), (-81.754323,29.3824912), (-81.7543163,29.3815888), (-81.7536386,29.4294252), (-81.753632,29.4285227), (-81.7536253,29.4276202), (-81.7536187,29.4267178), (-81.753612,29.4258153), (-81.7536053,29.4249129), (-81.7535987,29.4240104), (-81.753592,29.4231079), (-81.7535853,29.4222055), (-81.7535787,29.421303), (-81.753572,29.4204006), (-81.7535654,29.4194981), (-81.7535587,29.4185956), (-81.753552,29.4176932), (-81.7535454,29.4167907), (-81.75353870000001,29.4158882), (-81.7535321,29.4149858), (-81.7535254,29.4140833), (-81.7535188,29.4131809), (-81.7535121,29.4122784), (-81.7535054,29.4113759), (-81.7534988,29.4104735), (-81.7534921,29.409571), (-81.7534855,29.4086685), (-81.7534788,29.4077661), (-81.7534722,29.4068636), (-81.7534655,29.4059611), (-81.7534588,29.4050587), (-81.7534522,29.4041562), (-81.7534455,29.4032538), (-81.7534389,29.4023513), (-81.7534322,29.4014488), (-81.7534256,29.4005464), (-81.7534189,29.3996439), (-81.7534123,29.3987414), (-81.7534056,29.397839), (-81.753399,29.3969365), (-81.7533923,29.396034), (-81.7533857,29.3951316), (-81.753379,29.3942291), (-81.7533724,29.3933266), (-81.7533657,29.3924242), (-81.7533591,29.3915217), (-81.7533524,29.3906192), (-81.7533458,29.3897168), (-81.7533391,29.3888143), (-81.7533325,29.3879118), (-81.7533258,29.3870094), (-81.75331919999999,29.3861069), (-81.7533125,29.3852044), (-81.7533059,29.384302), (-81.7532992,29.3833995), (-81.7532926,29.382497), (-81.7532859,29.3815946), (-81.7526077,29.429431), (-81.7526011,29.4285285), (-81.7525944,29.4276261), (-81.7525878,29.4267236), (-81.7525811,29.4258211), (-81.7525745,29.4249187), (-81.7525678,29.4240162), (-81.7525612,29.4231138), (-81.7525545,29.4222113), (-81.7525479,29.4213088), (-81.7525412,29.4204064), (-81.7525346,29.4195039), (-81.7525279,29.4186015), (-81.7525213,29.417699), (-81.7525146,29.4167965), (-81.752508,29.4158941), (-81.7525013,29.4149916), (-81.7524947,29.414089099999998), (-81.752488,29.4131867), (-81.7524814,29.4122842), (-81.7524747,29.4113818), (-81.7524681,29.4104793), (-81.7524614,29.4095768), (-81.7524548,29.4086744), (-81.7524481,29.4077719), (-81.7524415,29.4068694), (-81.7524348,29.405967), (-81.7524282,29.4050645), (-81.7524216,29.404162), (-81.7524149,29.4032596), (-81.7524083,29.4023571), (-81.7524016,29.4014546), (-81.752395,29.4005522), (-81.7523883,29.3996497), (-81.7523817,29.3987473), (-81.752375,29.3978448), (-81.7523684,29.3969423), (-81.7523618,29.3960399), (-81.7523551,29.3951374), (-81.7523485,29.3942349), (-81.7523418,29.3933325), (-81.7523352,29.39243), (-81.7523285,29.3915275), (-81.7523219,29.3906251), (-81.7523153,29.3897226), (-81.7523086,29.3888201), (-81.752302,29.3879177), (-81.7522953,29.3870152), (-81.7522887,29.386112699999998), (-81.7522821,29.3852103), (-81.7522754,29.3843078), (-81.7522688,29.3834053), (-81.7522621,29.3825029), (-81.7522555,29.3816004), (-81.7515768,29.4294368), (-81.7515702,29.4285344), (-81.7515636,29.4276319), (-81.7515569,29.4267294), (-81.7515503,29.425827), (-81.7515436,29.4249245), (-81.751537,29.424022), (-81.7515303,29.4231196), (-81.7515237,29.4222171), (-81.7515171,29.4213147), (-81.7515104,29.4204122), (-81.7515038,29.4195097), (-81.7514971,29.4186073), (-81.7514905,29.4177048), (-81.7514839,29.4168023), (-81.7514772,29.4158999), (-81.7514706,29.4149974), (-81.7514639,29.414095), (-81.7514573,29.4131925), (-81.7514507,29.41229), (-81.751444,29.4113876), (-81.7514374,29.4104851), (-81.7514307,29.4095826), (-81.7514241,29.4086802), (-81.7514175,29.4077777), (-81.7514108,29.4068753), (-81.7514042,29.4059728), (-81.7513975,29.4050703), (-81.7513909,29.4041679), (-81.7513843,29.4032654), (-81.7513776,29.4023629), (-81.751371,29.4014605), (-81.7513644,29.400558), (-81.7513577,29.3996555), (-81.7513511,29.3987531), (-81.7513445,29.3978506), (-81.7513378,29.3969481), (-81.7513312,29.3960457), (-81.7513246,29.3951432), (-81.7513179,29.3942407), (-81.7513113,29.3933383), (-81.7513047,29.3924358), (-81.751298,29.3915333), (-81.7512914,29.3906309), (-81.7512848,29.3897284), (-81.7512781,29.3888259), (-81.7512715,29.3879235), (-81.7512649,29.387021), (-81.7512582,29.3861185), (-81.7512516,29.3852161), (-81.751245,29.3843136), (-81.7512384,29.3834111), (-81.7512317,29.3825087), (-81.7512251,29.3816062); + +select count(), sum(pointInPolygon((x, y),[(-82.311206, 28.960046), (-81.658722, 28.960345), (-81.65921, 29.047816), (-81.638697, 29.047761), (-81.641902, 29.276992), (-81.680709, 29.323243), (-81.675429, 29.338641), (-81.656377, 29.337981), (-81.668564, 29.371704), (-81.741634, 29.371123), (-81.741671, 29.429891), (-81.774905, 29.429249), (-81.776206, 29.487448), (-81.842765, 29.486042), (-81.844929, 29.521679), (-81.912052, 29.503906), (-81.93199, 29.51639), (-82.03174, 29.492385), (-82.055896, 29.471639), (-82.056748, 29.439952), (-82.080922, 29.440351), (-82.099886, 29.418665), (-82.101119, 29.438298), (-82.134555, 29.434982), (-82.148231, 29.417245), (-82.211144, 29.429693), (-82.212727, 29.465624), (-82.188, 29.456239), (-82.211448, 29.484017), (-82.40662, 29.485049), (-82.403059, 29.215536), (-82.534829, 29.2147), (-82.535191, 29.041186), (-82.471915, 29.052551), (-82.418649, 29.013144), (-82.401814, 29.019004), (-82.365594, 29.00246), (-82.311206, 28.960046)])) from coords; +drop table if exists coords; diff --git a/parser/testdata/00500_point_in_polygon_bug_2/ast.json b/parser/testdata/00500_point_in_polygon_bug_2/ast.json new file mode 100644 index 000000000..81ba8e131 --- /dev/null +++ b/parser/testdata/00500_point_in_polygon_bug_2/ast.json @@ -0,0 +1,82 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function pointInPolygon (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Tuple_(Float64_35.45285, Float64_58.72587)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 11)" + }, + { + "explain": " Literal Tuple_(Float64_32.947998, Float64_59.506455)" + }, + { + "explain": " Literal Tuple_(Float64_34.222412, Float64_59.215312)" + }, + { + "explain": " Literal Tuple_(Float64_33.343506, Float64_58.551061)" + }, + { + "explain": " Literal Tuple_(Float64_34.859619, Float64_58.938673)" + }, + { + "explain": " Literal Tuple_(Float64_36.463623, Float64_58.528125)" + }, + { + "explain": " Literal Tuple_(Float64_35.397949, Float64_59.215312)" + }, + { + "explain": " Literal Tuple_(Float64_36.804199, Float64_59.495303)" + }, + { + "explain": " Literal Tuple_(Float64_35.2771, Float64_59.50088)" + }, + { + "explain": " Literal Tuple_(Float64_34.892578, Float64_60.267066)" + }, + { + "explain": " Literal Tuple_(Float64_34.343262, Float64_59.517603)" + }, + { + "explain": " Literal Tuple_(Float64_32.947998, Float64_59.506455)" + } + ], + + "rows": 20, + + "statistics": + { + "elapsed": 0.001226443, + "rows_read": 20, + "bytes_read": 1130 + } +} diff --git a/parser/testdata/00500_point_in_polygon_bug_2/metadata.json b/parser/testdata/00500_point_in_polygon_bug_2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00500_point_in_polygon_bug_2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00500_point_in_polygon_bug_2/query.sql b/parser/testdata/00500_point_in_polygon_bug_2/query.sql new file mode 100644 index 000000000..48e946877 --- /dev/null +++ b/parser/testdata/00500_point_in_polygon_bug_2/query.sql @@ -0,0 +1,2 @@ +select pointInPolygon((35.45285, 58.72587), [(32.947998, 59.506455), (34.222412, 59.215312), (33.343506, 58.551061), (34.859619, 58.938673), (36.463623, 58.528125), (35.397949, 59.215312), (36.804199, 59.495303), (35.2771, 59.50088), (34.892578, 60.267066), (34.343262, 59.517603), (32.947998, 59.506455)]); +select pointInPolygon((35.72308, 58.8294), [(32.947998, 59.506455), (34.222412, 59.215312), (33.343506, 58.551061), (34.859619, 58.938673), (36.463623, 58.528125), (35.397949, 59.215312), (36.804199, 59.495303), (35.2771, 59.50088), (34.892578, 60.267066), (34.343262, 59.517603), (32.947998, 59.506455)]); diff --git a/parser/testdata/00500_point_in_polygon_bug_3_linestring_rotation_precision/ast.json b/parser/testdata/00500_point_in_polygon_bug_3_linestring_rotation_precision/ast.json new file mode 100644 index 000000000..8c75518f4 --- /dev/null +++ b/parser/testdata/00500_point_in_polygon_bug_3_linestring_rotation_precision/ast.json @@ -0,0 +1,1105 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function pointInPolygon (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Tuple_(Float64_106.6671509, Float64_10.7674952)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 352)" + }, + { + "explain": " Literal Tuple_(Float64_106.667161868227, Float64_10.7674952)" + }, + { + "explain": " Literal Tuple_(Float64_106.667165727127, Float64_10.7675059912261)" + }, + { + "explain": " Literal Tuple_(Float64_106.667170817563, Float64_10.7674904752629)" + }, + { + "explain": " Literal Tuple_(Float64_106.667229225265, Float64_10.7672278502066)" + }, + { + "explain": " Literal Tuple_(Float64_106.667231193621, Float64_10.7672115129572)" + }, + { + "explain": " Literal Tuple_(Float64_106.667229912029, Float64_10.7671951075415)" + }, + { + "explain": " Literal Tuple_(Float64_106.667225430503, Float64_10.767179274157)" + }, + { + "explain": " Literal Tuple_(Float64_106.667217923927, Float64_10.7671646306786)" + }, + { + "explain": " Literal Tuple_(Float64_106.667207685234, Float64_10.7671517485471)" + }, + { + "explain": " Literal Tuple_(Float64_106.667195113975, Float64_10.7671411304688)" + }, + { + "explain": " Literal Tuple_(Float64_106.667180700725, Float64_10.7671331907989)" + }, + { + "explain": " Literal Tuple_(Float64_106.66716500794, Float64_10.7671282393715)" + }, + { + "explain": " Literal Tuple_(Float64_106.666628232995, Float64_10.7670156787539)" + }, + { + "explain": " Literal Tuple_(Float64_106.666612233649, Float64_10.7670139127584)" + }, + { + "explain": " Literal Tuple_(Float64_106.666596193354, Float64_10.7670152569112)" + }, + { + "explain": " Literal Tuple_(Float64_106.666580711053, Float64_10.7670196610218)" + }, + { + "explain": " Literal Tuple_(Float64_106.666566364856, Float64_10.7670269606408)" + }, + { + "explain": " Literal Tuple_(Float64_106.666553690448, Float64_10.7670368832008)" + }, + { + "explain": " Literal Tuple_(Float64_106.666543161092, Float64_10.767049058194)" + }, + { + "explain": " Literal Tuple_(Float64_106.666535169952, Float64_10.7670630310067)" + }, + { + "explain": " Literal Tuple_(Float64_106.666530015418, Float64_10.7670782798948)" + }, + { + "explain": " Literal Tuple_(Float64_106.666482284259, Float64_10.7672828714379)" + }, + { + "explain": " Literal Tuple_(Float64_106.666480170141, Float64_10.7672985245675)" + }, + { + "explain": " Literal Tuple_(Float64_106.666481048788, Float64_10.7673142953614)" + }, + { + "explain": " Literal Tuple_(Float64_106.666484888609, Float64_10.7673296167758)" + }, + { + "explain": " Literal Tuple_(Float64_106.666491551541, Float64_10.7673439379244)" + }, + { + "explain": " Literal Tuple_(Float64_106.666500798017, Float64_10.7673567438858)" + }, + { + "explain": " Literal Tuple_(Float64_106.666512295576, Float64_10.7673675742178)" + }, + { + "explain": " Literal Tuple_(Float64_106.666525630821, Float64_10.7673760395122)" + }, + { + "explain": " Literal Tuple_(Float64_106.667032331859, Float64_10.7676338521733)" + }, + { + "explain": " Literal Tuple_(Float64_106.6671413386, Float64_10.7676893154858)" + }, + { + "explain": " Literal Tuple_(Float64_106.667371048786, Float64_10.7678061934666)" + }, + { + "explain": " Literal Tuple_(Float64_106.667552760053, Float64_10.7678987010209)" + }, + { + "explain": " Literal Tuple_(Float64_106.667801848625, Float64_10.7680278028917)" + }, + { + "explain": " Literal Tuple_(Float64_106.667817742281, Float64_10.7680340673957)" + }, + { + "explain": " Literal Tuple_(Float64_106.667834579682, Float64_10.7680369577679)" + }, + { + "explain": " Literal Tuple_(Float64_106.66785165264, Float64_10.7680363524383)" + }, + { + "explain": " Literal Tuple_(Float64_106.667868243061, Float64_10.7680322768672)" + }, + { + "explain": " Literal Tuple_(Float64_106.667878683314, Float64_10.7680285412847)" + }, + { + "explain": " Literal Tuple_(Float64_106.667885469819, Float64_10.7680268413536)" + }, + { + "explain": " Literal Tuple_(Float64_106.667892390269, Float64_10.7680258148018)" + }, + { + "explain": " Literal Tuple_(Float64_106.667899378015, Float64_10.7680254715159)" + }, + { + "explain": " Literal Tuple_(Float64_106.667906365761, Float64_10.7680258148018)" + }, + { + "explain": " Literal Tuple_(Float64_106.667913286211, Float64_10.7680268413536)" + }, + { + "explain": " Literal Tuple_(Float64_106.667920072716, Float64_10.7680285412847)" + }, + { + "explain": " Literal Tuple_(Float64_106.667926659921, Float64_10.7680308982244)" + }, + { + "explain": " Literal Tuple_(Float64_106.667932984386, Float64_10.7680338894736)" + }, + { + "explain": " Literal Tuple_(Float64_106.667938985204, Float64_10.7680374862253)" + }, + { + "explain": " Literal Tuple_(Float64_106.667944604583, Float64_10.7680416538412)" + }, + { + "explain": " Literal Tuple_(Float64_106.667949788405, Float64_10.7680463521828)" + }, + { + "explain": " Literal Tuple_(Float64_106.667954486747, Float64_10.7680515360051)" + }, + { + "explain": " Literal Tuple_(Float64_106.667958654362, Float64_10.7680571553826)" + }, + { + "explain": " Literal Tuple_(Float64_106.667962251113, Float64_10.7680631561994)" + }, + { + "explain": " Literal Tuple_(Float64_106.667965242363, Float64_10.7680694806664)" + }, + { + "explain": " Literal Tuple_(Float64_106.667967599303, Float64_10.7680760678724)" + }, + { + "explain": " Literal Tuple_(Float64_106.667969299234, Float64_10.7680828543774)" + }, + { + "explain": " Literal Tuple_(Float64_106.667970926246, Float64_10.7680938227996)" + }, + { + "explain": " Literal Tuple_(Float64_106.667974657027, Float64_10.7681089916695)" + }, + { + "explain": " Literal Tuple_(Float64_106.667981154238, Float64_10.7681231972879)" + }, + { + "explain": " Literal Tuple_(Float64_106.667990189396, Float64_10.7681359400994)" + }, + { + "explain": " Literal Tuple_(Float64_106.668001444773, Float64_10.7681467719897)" + }, + { + "explain": " Literal Tuple_(Float64_106.668014524559, Float64_10.7681553120441)" + }, + { + "explain": " Literal Tuple_(Float64_106.668198488147, Float64_10.7682521458591)" + }, + { + "explain": " Literal Tuple_(Float64_106.669562015793, Float64_10.7689901124345)" + }, + { + "explain": " Literal Tuple_(Float64_106.669614757162, Float64_10.7690820717448)" + }, + { + "explain": " Literal Tuple_(Float64_106.669623023723, Float64_10.7690939566151)" + }, + { + "explain": " Literal Tuple_(Float64_106.669633223154, Float64_10.7691042307472)" + }, + { + "explain": " Literal Tuple_(Float64_106.669645047385, Float64_10.7691125838155)" + }, + { + "explain": " Literal Tuple_(Float64_106.670748051536, Float64_10.7697559307954)" + }, + { + "explain": " Literal Tuple_(Float64_106.670751419717, Float64_10.7697577924329)" + }, + { + "explain": " Literal Tuple_(Float64_106.671035494073, Float64_10.7699063431327)" + }, + { + "explain": " Literal Tuple_(Float64_106.671270162713, Float64_10.7700364834325)" + }, + { + "explain": " Literal Tuple_(Float64_106.67127192876, Float64_10.7700374352053)" + }, + { + "explain": " Literal Tuple_(Float64_106.671437929267, Float64_10.7701243344783)" + }, + { + "explain": " Literal Tuple_(Float64_106.671665917937, Float64_10.7702517637461)" + }, + { + "explain": " Literal Tuple_(Float64_106.67166656035, Float64_10.7702521191025)" + }, + { + "explain": " Literal Tuple_(Float64_106.671943689514, Float64_10.7704038245574)" + }, + { + "explain": " Literal Tuple_(Float64_106.671943806749, Float64_10.7704038886117)" + }, + { + "explain": " Literal Tuple_(Float64_106.6722776446, Float64_10.7705859421916)" + }, + { + "explain": " Literal Tuple_(Float64_106.672278295949, Float64_10.7705862936499)" + }, + { + "explain": " Literal Tuple_(Float64_106.673020324076, Float64_10.7709824352208)" + }, + { + "explain": " Literal Tuple_(Float64_106.673433726727, Float64_10.7712057751884)" + }, + { + "explain": " Literal Tuple_(Float64_106.673694081332, Float64_10.7713489702214)" + }, + { + "explain": " Literal Tuple_(Float64_106.673977066657, Float64_10.7715146655761)" + }, + { + "explain": " Literal Tuple_(Float64_106.674254247937, Float64_10.7716778144336)" + }, + { + "explain": " Literal Tuple_(Float64_106.67440928634, Float64_10.7717698954974)" + }, + { + "explain": " Literal Tuple_(Float64_106.674658478275, Float64_10.7719268836667)" + }, + { + "explain": " Literal Tuple_(Float64_106.674658802254, Float64_10.7719270867325)" + }, + { + "explain": " Literal Tuple_(Float64_106.6748919449, Float64_10.7720724734391)" + }, + { + "explain": " Literal Tuple_(Float64_106.675071660589, Float64_10.7721853602936)" + }, + { + "explain": " Literal Tuple_(Float64_106.675350447469, Float64_10.7723606751059)" + }, + { + "explain": " Literal Tuple_(Float64_106.675350748696, Float64_10.7723608636368)" + }, + { + "explain": " Literal Tuple_(Float64_106.6756252856, Float64_10.7725318758852)" + }, + { + "explain": " Literal Tuple_(Float64_106.675888735092, Float64_10.7726957126602)" + }, + { + "explain": " Literal Tuple_(Float64_106.676114500069, Float64_10.7728361211927)" + }, + { + "explain": " Literal Tuple_(Float64_106.676379504941, Float64_10.7730007692002)" + }, + { + "explain": " Literal Tuple_(Float64_106.67661713771, Float64_10.7731502653527)" + }, + { + "explain": " Literal Tuple_(Float64_106.676617572241, Float64_10.773150536857)" + }, + { + "explain": " Literal Tuple_(Float64_106.676852995814, Float64_10.7732966297465)" + }, + { + "explain": " Literal Tuple_(Float64_106.677284352687, Float64_10.7735807849214)" + }, + { + "explain": " Literal Tuple_(Float64_106.677738143311, Float64_10.7738851794554)" + }, + { + "explain": " Literal Tuple_(Float64_106.677752655777, Float64_10.7738929549383)" + }, + { + "explain": " Literal Tuple_(Float64_106.677768414072, Float64_10.773897724206)" + }, + { + "explain": " Literal Tuple_(Float64_106.677784802596, Float64_10.7738993009456)" + }, + { + "explain": " Literal Tuple_(Float64_106.677801181124, Float64_10.7738976235612)" + }, + { + "explain": " Literal Tuple_(Float64_106.677816909825, Float64_10.7738927575805)" + }, + { + "explain": " Literal Tuple_(Float64_106.677831374252, Float64_10.7738848930944)" + }, + { + "explain": " Literal Tuple_(Float64_106.677844009349, Float64_10.7738743373313)" + }, + { + "explain": " Literal Tuple_(Float64_106.677920079221, Float64_10.7737967983562)" + }, + { + "explain": " Literal Tuple_(Float64_106.678239245717, Float64_10.7735243703649)" + }, + { + "explain": " Literal Tuple_(Float64_106.67839926068, Float64_10.7733892116467)" + }, + { + "explain": " Literal Tuple_(Float64_106.678400691571, Float64_10.7733879749217)" + }, + { + "explain": " Literal Tuple_(Float64_106.678515896101, Float64_10.7732860955802)" + }, + { + "explain": " Literal Tuple_(Float64_106.678557979259, Float64_10.7732504310319)" + }, + { + "explain": " Literal Tuple_(Float64_106.67855930664, Float64_10.7732492818517)" + }, + { + "explain": " Literal Tuple_(Float64_106.679033975331, Float64_10.7728295048433)" + }, + { + "explain": " Literal Tuple_(Float64_106.679053201911, Float64_10.772844898411)" + }, + { + "explain": " Literal Tuple_(Float64_106.679632133733, Float64_10.7733262832973)" + }, + { + "explain": " Literal Tuple_(Float64_106.679771732358, Float64_10.7734524450384)" + }, + { + "explain": " Literal Tuple_(Float64_106.679773325229, Float64_10.7734538481348)" + }, + { + "explain": " Literal Tuple_(Float64_106.680011463819, Float64_10.7736582857586)" + }, + { + "explain": " Literal Tuple_(Float64_106.680175801881, Float64_10.7738018862846)" + }, + { + "explain": " Literal Tuple_(Float64_106.680176891116, Float64_10.7738028216402)" + }, + { + "explain": " Literal Tuple_(Float64_106.680320149367, Float64_10.773923712053)" + }, + { + "explain": " Literal Tuple_(Float64_106.680672123374, Float64_10.7742204563391)" + }, + { + "explain": " Literal Tuple_(Float64_106.68094213423, Float64_10.7744504786771)" + }, + { + "explain": " Literal Tuple_(Float64_106.68094233625, Float64_10.7744506502241)" + }, + { + "explain": " Literal Tuple_(Float64_106.68124725775, Float64_10.7747087432576)" + }, + { + "explain": " Literal Tuple_(Float64_106.681247329066, Float64_10.7747088035527)" + }, + { + "explain": " Literal Tuple_(Float64_106.681470746982, Float64_10.7748974804345)" + }, + { + "explain": " Literal Tuple_(Float64_106.681471338135, Float64_10.7748979749973)" + }, + { + "explain": " Literal Tuple_(Float64_106.681840030697, Float64_10.7752035373868)" + }, + { + "explain": " Literal Tuple_(Float64_106.682304929691, Float64_10.7756040772245)" + }, + { + "explain": " Literal Tuple_(Float64_106.682308650112, Float64_10.7756071005185)" + }, + { + "explain": " Literal Tuple_(Float64_106.682312917236, Float64_10.7756103687835)" + }, + { + "explain": " Literal Tuple_(Float64_106.682359764439, Float64_10.7756490693986)" + }, + { + "explain": " Literal Tuple_(Float64_106.682640114944, Float64_10.7758996628849)" + }, + { + "explain": " Literal Tuple_(Float64_106.682644070655, Float64_10.7759029839554)" + }, + { + "explain": " Literal Tuple_(Float64_106.682711710544, Float64_10.7759562859055)" + }, + { + "explain": " Literal Tuple_(Float64_106.682806505954, Float64_10.7760368956153)" + }, + { + "explain": " Literal Tuple_(Float64_106.68280745353, Float64_10.776037689352)" + }, + { + "explain": " Literal Tuple_(Float64_106.683169164535, Float64_10.7763361378178)" + }, + { + "explain": " Literal Tuple_(Float64_106.68363265876, Float64_10.7767252395911)" + }, + { + "explain": " Literal Tuple_(Float64_106.683677875719, Float64_10.7767650291442)" + }, + { + "explain": " Literal Tuple_(Float64_106.683797775698, Float64_10.77688614766)" + }, + { + "explain": " Literal Tuple_(Float64_106.684138558845, Float64_10.7772306328105)" + }, + { + "explain": " Literal Tuple_(Float64_106.68414063031, Float64_10.7772326552454)" + }, + { + "explain": " Literal Tuple_(Float64_106.684827531639, Float64_10.777880369263)" + }, + { + "explain": " Literal Tuple_(Float64_106.685228619785, Float64_10.7782605077038)" + }, + { + "explain": " Literal Tuple_(Float64_106.685228896163, Float64_10.7782607684525)" + }, + { + "explain": " Literal Tuple_(Float64_106.686025996525, Float64_10.7790093622583)" + }, + { + "explain": " Literal Tuple_(Float64_106.686026813787, Float64_10.7790101368229)" + }, + { + "explain": " Literal Tuple_(Float64_106.68658269265, Float64_10.7795369738106)" + }, + { + "explain": " Literal Tuple_(Float64_106.687194479537, Float64_10.7801158277128)" + }, + { + "explain": " Literal Tuple_(Float64_106.688401155505, Float64_10.7812670656457)" + }, + { + "explain": " Literal Tuple_(Float64_106.688401571342, Float64_10.7812674596561)" + }, + { + "explain": " Literal Tuple_(Float64_106.689622367701, Float64_10.7824162362891)" + }, + { + "explain": " Literal Tuple_(Float64_106.690002723257, Float64_10.7827815572149)" + }, + { + "explain": " Literal Tuple_(Float64_106.690002908997, Float64_10.7827817350625)" + }, + { + "explain": " Literal Tuple_(Float64_106.690359062158, Float64_10.7831217027417)" + }, + { + "explain": " Literal Tuple_(Float64_106.690359638585, Float64_10.7831222477508)" + }, + { + "explain": " Literal Tuple_(Float64_106.690747557266, Float64_10.7834855403784)" + }, + { + "explain": " Literal Tuple_(Float64_106.691628272565, Float64_10.7843952548301)" + }, + { + "explain": " Literal Tuple_(Float64_106.692179613338, Float64_10.7849709155958)" + }, + { + "explain": " Literal Tuple_(Float64_106.692179802225, Float64_10.7849711121697)" + }, + { + "explain": " Literal Tuple_(Float64_106.692743910048, Float64_10.7855562574979)" + }, + { + "explain": " Literal Tuple_(Float64_106.693288875836, Float64_10.7861225208133)" + }, + { + "explain": " Literal Tuple_(Float64_106.693601234729, Float64_10.7864484801726)" + }, + { + "explain": " Literal Tuple_(Float64_106.69220838651, Float64_10.7875617536129)" + }, + { + "explain": " Literal Tuple_(Float64_106.692196691453, Float64_10.787573150248)" + }, + { + "explain": " Literal Tuple_(Float64_106.692187444486, Float64_10.7875866094924)" + }, + { + "explain": " Literal Tuple_(Float64_106.692181000965, Float64_10.7876016141149)" + }, + { + "explain": " Literal Tuple_(Float64_106.692177608512, Float64_10.7876175874962)" + }, + { + "explain": " Literal Tuple_(Float64_106.692177397496, Float64_10.7876339157883)" + }, + { + "explain": " Literal Tuple_(Float64_106.692180376026, Float64_10.7876499715041)" + }, + { + "explain": " Literal Tuple_(Float64_106.692186429639, Float64_10.7876651376314)" + }, + { + "explain": " Literal Tuple_(Float64_106.692195325699, Float64_10.7876788313445)" + }, + { + "explain": " Literal Tuple_(Float64_106.692206722334, Float64_10.7876905264015)" + }, + { + "explain": " Literal Tuple_(Float64_106.692220181578, Float64_10.7876997733682)" + }, + { + "explain": " Literal Tuple_(Float64_106.692235186201, Float64_10.7877062168886)" + }, + { + "explain": " Literal Tuple_(Float64_106.692251159582, Float64_10.787709609342)" + }, + { + "explain": " Literal Tuple_(Float64_106.692267487874, Float64_10.7877098203582)" + }, + { + "explain": " Literal Tuple_(Float64_106.69228354359, Float64_10.7877068418281)" + }, + { + "explain": " Literal Tuple_(Float64_106.692298709717, Float64_10.7877007882148)" + }, + { + "explain": " Literal Tuple_(Float64_106.69231240343, Float64_10.7876918921553)" + }, + { + "explain": " Literal Tuple_(Float64_106.693776442708, Float64_10.7865217172423)" + }, + { + "explain": " Literal Tuple_(Float64_106.693788736175, Float64_10.7865096022178)" + }, + { + "explain": " Literal Tuple_(Float64_106.693798269005, Float64_10.7864952137411)" + }, + { + "explain": " Literal Tuple_(Float64_106.693804631934, Float64_10.7864791695437)" + }, + { + "explain": " Literal Tuple_(Float64_106.693807551784, Float64_10.7864621584413)" + }, + { + "explain": " Literal Tuple_(Float64_106.693806903199, Float64_10.7864449107613)" + }, + { + "explain": " Literal Tuple_(Float64_106.693802714026, Float64_10.7864281669878)" + }, + { + "explain": " Literal Tuple_(Float64_106.693795164114, Float64_10.786412645971)" + }, + { + "explain": " Literal Tuple_(Float64_106.693784577601, Float64_10.7863990140651)" + }, + { + "explain": " Literal Tuple_(Float64_106.69340910087, Float64_10.7860071886444)" + }, + { + "explain": " Literal Tuple_(Float64_106.69340897739, Float64_10.7860070600637)" + }, + { + "explain": " Literal Tuple_(Float64_106.692863924954, Float64_10.7854407067139)" + }, + { + "explain": " Literal Tuple_(Float64_106.69229983717, Float64_10.7848555821281)" + }, + { + "explain": " Literal Tuple_(Float64_106.691748435669, Float64_10.7842798579551)" + }, + { + "explain": " Literal Tuple_(Float64_106.691748124777, Float64_10.7842795350934)" + }, + { + "explain": " Literal Tuple_(Float64_106.690865834778, Float64_10.7833681940925)" + }, + { + "explain": " Literal Tuple_(Float64_106.690862927107, Float64_10.7833653342196)" + }, + { + "explain": " Literal Tuple_(Float64_106.690473809086, Float64_10.7830009183885)" + }, + { + "explain": " Literal Tuple_(Float64_106.690118035849, Float64_10.7826613133679)" + }, + { + "explain": " Literal Tuple_(Float64_106.689737465891, Float64_10.7822957865149)" + }, + { + "explain": " Literal Tuple_(Float64_106.689736848623, Float64_10.7822951996834)" + }, + { + "explain": " Literal Tuple_(Float64_106.688515950726, Float64_10.7811463275029)" + }, + { + "explain": " Literal Tuple_(Float64_106.687309357068, Float64_10.7799951680976)" + }, + { + "explain": " Literal Tuple_(Float64_106.687309106711, Float64_10.779994930232)" + }, + { + "explain": " Literal Tuple_(Float64_106.686697270266, Float64_10.7794160294802)" + }, + { + "explain": " Literal Tuple_(Float64_106.686141416688, Float64_10.7788892164565)" + }, + { + "explain": " Literal Tuple_(Float64_106.686140461741, Float64_10.7788883114)" + }, + { + "explain": " Literal Tuple_(Float64_106.686140185762, Float64_10.7788880510296)" + }, + { + "explain": " Literal Tuple_(Float64_106.6853430856, Float64_10.7781394574112)" + }, + { + "explain": " Literal Tuple_(Float64_106.684942058447, Float64_10.7777593767781)" + }, + { + "explain": " Literal Tuple_(Float64_106.684941904463, Float64_10.7777592312084)" + }, + { + "explain": " Literal Tuple_(Float64_106.684255979358, Float64_10.7771124377212)" + }, + { + "explain": " Literal Tuple_(Float64_106.683916204215, Float64_10.776768971525)" + }, + { + "explain": " Literal Tuple_(Float64_106.683794256559, Float64_10.7766457845149)" + }, + { + "explain": " Literal Tuple_(Float64_106.68379008676, Float64_10.7766418525893)" + }, + { + "explain": " Literal Tuple_(Float64_106.683741989497, Float64_10.7765995284558)" + }, + { + "explain": " Literal Tuple_(Float64_106.683740519326, Float64_10.7765982647987)" + }, + { + "explain": " Literal Tuple_(Float64_106.683276011394, Float64_10.7762083120217)" + }, + { + "explain": " Literal Tuple_(Float64_106.683275466929, Float64_10.7762078588774)" + }, + { + "explain": " Literal Tuple_(Float64_106.68291395946, Float64_10.77590957835)" + }, + { + "explain": " Literal Tuple_(Float64_106.682818451152, Float64_10.775828362424)" + }, + { + "explain": " Literal Tuple_(Float64_106.682816046951, Float64_10.7758263940715)" + }, + { + "explain": " Literal Tuple_(Float64_106.682749215964, Float64_10.7757737295564)" + }, + { + "explain": " Literal Tuple_(Float64_106.682469581984, Float64_10.775523776542)" + }, + { + "explain": " Literal Tuple_(Float64_106.682467121137, Float64_10.7755216616573)" + }, + { + "explain": " Literal Tuple_(Float64_106.682417839663, Float64_10.775480950083)" + }, + { + "explain": " Literal Tuple_(Float64_106.68241543796, Float64_10.7754790393628)" + }, + { + "explain": " Literal Tuple_(Float64_106.682411856108, Float64_10.7754762959601)" + }, + { + "explain": " Literal Tuple_(Float64_106.681948170223, Float64_10.775076801292)" + }, + { + "explain": " Literal Tuple_(Float64_106.681946953215, Float64_10.7750757728772)" + }, + { + "explain": " Literal Tuple_(Float64_106.681577943952, Float64_10.7747699480145)" + }, + { + "explain": " Literal Tuple_(Float64_106.681354856141, Float64_10.7745815499075)" + }, + { + "explain": " Literal Tuple_(Float64_106.681050071432, Float64_10.7743235726569)" + }, + { + "explain": " Literal Tuple_(Float64_106.680779998801, Float64_10.774093497693)" + }, + { + "explain": " Literal Tuple_(Float64_106.680779672798, Float64_10.7740932214111)" + }, + { + "explain": " Literal Tuple_(Float64_106.680427578845, Float64_10.7737963760106)" + }, + { + "explain": " Literal Tuple_(Float64_106.680284883706, Float64_10.7736759607876)" + }, + { + "explain": " Literal Tuple_(Float64_106.680120811518, Float64_10.7735325925854)" + }, + { + "explain": " Literal Tuple_(Float64_106.680120259999, Float64_10.7735321149047)" + }, + { + "explain": " Literal Tuple_(Float64_106.679882649978, Float64_10.7733281310479)" + }, + { + "explain": " Literal Tuple_(Float64_106.679742564868, Float64_10.7732015296478)" + }, + { + "explain": " Literal Tuple_(Float64_106.67973997054, Float64_10.7731992804165)" + }, + { + "explain": " Literal Tuple_(Float64_106.679159125009, Float64_10.772716304271)" + }, + { + "explain": " Literal Tuple_(Float64_106.679157929246, Float64_10.7727153285815)" + }, + { + "explain": " Literal Tuple_(Float64_106.679083371982, Float64_10.7726556350576)" + }, + { + "explain": " Literal Tuple_(Float64_106.679069423592, Float64_10.7726465921904)" + }, + { + "explain": " Literal Tuple_(Float64_106.679053957365, Float64_10.7726404990091)" + }, + { + "explain": " Literal Tuple_(Float64_106.679037589221, Float64_10.7726375981655)" + }, + { + "explain": " Literal Tuple_(Float64_106.679020970997, Float64_10.7726380051815)" + }, + { + "explain": " Literal Tuple_(Float64_106.679004764489, Float64_10.7726417038483)" + }, + { + "explain": " Literal Tuple_(Float64_106.678989615098, Float64_10.7726485468719)" + }, + { + "explain": " Literal Tuple_(Float64_106.678976126125, Float64_10.772658261739)" + }, + { + "explain": " Literal Tuple_(Float64_106.678449597495, Float64_10.7731239014943)" + }, + { + "explain": " Literal Tuple_(Float64_106.678407514754, Float64_10.773159565689)" + }, + { + "explain": " Literal Tuple_(Float64_106.678406188192, Float64_10.7731607141448)" + }, + { + "explain": " Literal Tuple_(Float64_106.678291034854, Float64_10.7732625482153)" + }, + { + "explain": " Literal Tuple_(Float64_106.678131577851, Float64_10.7733972356454)" + }, + { + "explain": " Literal Tuple_(Float64_106.678131249559, Float64_10.7733975143985)" + }, + { + "explain": " Literal Tuple_(Float64_106.677809116892, Float64_10.7736724741964)" + }, + { + "explain": " Literal Tuple_(Float64_106.677803734254, Float64_10.7736774962862)" + }, + { + "explain": " Literal Tuple_(Float64_106.67777351642, Float64_10.773708297704)" + }, + { + "explain": " Literal Tuple_(Float64_106.677376870851, Float64_10.7734422350384)" + }, + { + "explain": " Literal Tuple_(Float64_106.677376291861, Float64_10.7734418501559)" + }, + { + "explain": " Literal Tuple_(Float64_106.676943701895, Float64_10.7731568826838)" + }, + { + "explain": " Literal Tuple_(Float64_106.676941799819, Float64_10.7731556663352)" + }, + { + "explain": " Literal Tuple_(Float64_106.676705634648, Float64_10.7730091132449)" + }, + { + "explain": " Literal Tuple_(Float64_106.676468020922, Float64_10.7728596290723)" + }, + { + "explain": " Literal Tuple_(Float64_106.676467624617, Float64_10.7728593813034)" + }, + { + "explain": " Literal Tuple_(Float64_106.676202468827, Float64_10.7726946395397)" + }, + { + "explain": " Literal Tuple_(Float64_106.675976718772, Float64_10.7725542402878)" + }, + { + "explain": " Literal Tuple_(Float64_106.675713344944, Float64_10.7723904505946)" + }, + { + "explain": " Literal Tuple_(Float64_106.675438984881, Float64_10.7722195485022)" + }, + { + "explain": " Literal Tuple_(Float64_106.675160330528, Float64_10.7720443170291)" + }, + { + "explain": " Literal Tuple_(Float64_106.674980445983, Float64_10.7719313240966)" + }, + { + "explain": " Literal Tuple_(Float64_106.674980215342, Float64_10.7719311797465)" + }, + { + "explain": " Literal Tuple_(Float64_106.674747119479, Float64_10.7717858222138)" + }, + { + "explain": " Literal Tuple_(Float64_106.674497164595, Float64_10.7716283533947)" + }, + { + "explain": " Literal Tuple_(Float64_106.674495300219, Float64_10.7716272127471)" + }, + { + "explain": " Literal Tuple_(Float64_106.674339180867, Float64_10.7715344896819)" + }, + { + "explain": " Literal Tuple_(Float64_106.674338897981, Float64_10.771534322423)" + }, + { + "explain": " Literal Tuple_(Float64_106.674061493048, Float64_10.7713710419232)" + }, + { + "explain": " Literal Tuple_(Float64_106.674061328848, Float64_10.7713709455279)" + }, + { + "explain": " Literal Tuple_(Float64_106.673777295695, Float64_10.7712046366425)" + }, + { + "explain": " Literal Tuple_(Float64_106.673775349509, Float64_10.7712035319333)" + }, + { + "explain": " Literal Tuple_(Float64_106.673513740027, Float64_10.7710596467179)" + }, + { + "explain": " Literal Tuple_(Float64_106.673513190173, Float64_10.7710593469847)" + }, + { + "explain": " Literal Tuple_(Float64_106.673099330442, Float64_10.7708357600807)" + }, + { + "explain": " Literal Tuple_(Float64_106.673098966779, Float64_10.7708355647753)" + }, + { + "explain": " Literal Tuple_(Float64_106.672357083034, Float64_10.7704395002842)" + }, + { + "explain": " Literal Tuple_(Float64_106.672023628724, Float64_10.7702576558632)" + }, + { + "explain": " Literal Tuple_(Float64_106.671746880137, Float64_10.7701061587426)" + }, + { + "explain": " Literal Tuple_(Float64_106.671518215262, Float64_10.7699783515251)" + }, + { + "explain": " Literal Tuple_(Float64_106.671516207112, Float64_10.7699772649622)" + }, + { + "explain": " Literal Tuple_(Float64_106.671350083838, Float64_10.7698903014222)" + }, + { + "explain": " Literal Tuple_(Float64_106.671115399209, Float64_10.7697601522552)" + }, + { + "explain": " Literal Tuple_(Float64_106.671113600766, Float64_10.7697591835329)" + }, + { + "explain": " Literal Tuple_(Float64_106.670830326847, Float64_10.7696110514048)" + }, + { + "explain": " Literal Tuple_(Float64_106.66974820551, Float64_10.7689798847013)" + }, + { + "explain": " Literal Tuple_(Float64_106.66969475177, Float64_10.7688866833063)" + }, + { + "explain": " Literal Tuple_(Float64_106.669685913661, Float64_10.7688741199651)" + }, + { + "explain": " Literal Tuple_(Float64_106.669674918986, Float64_10.7688633930448)" + }, + { + "explain": " Literal Tuple_(Float64_106.669662141606, Float64_10.7688548673033)" + }, + { + "explain": " Literal Tuple_(Float64_106.668277363011, Float64_10.7681053993183)" + }, + { + "explain": " Literal Tuple_(Float64_106.668276514094, Float64_10.7681049461882)" + }, + { + "explain": " Literal Tuple_(Float64_106.668126503268, Float64_10.7680259842551)" + }, + { + "explain": " Literal Tuple_(Float64_106.668125839186, Float64_10.7680237950692)" + }, + { + "explain": " Literal Tuple_(Float64_106.66812072496, Float64_10.7680095017658)" + }, + { + "explain": " Literal Tuple_(Float64_106.668117596648, Float64_10.7680019493532)" + }, + { + "explain": " Literal Tuple_(Float64_106.66811110606, Float64_10.7679882261576)" + }, + { + "explain": " Literal Tuple_(Float64_106.668107252546, Float64_10.7679810167398)" + }, + { + "explain": " Literal Tuple_(Float64_106.668099448104, Float64_10.7679679958141)" + }, + { + "explain": " Literal Tuple_(Float64_106.668094906497, Float64_10.767961198818)" + }, + { + "explain": " Literal Tuple_(Float64_106.668085863361, Float64_10.7679490055608)" + }, + { + "explain": " Literal Tuple_(Float64_106.668080677403, Float64_10.7679426864524)" + }, + { + "explain": " Literal Tuple_(Float64_106.668070482664, Float64_10.7679314382913)" + }, + { + "explain": " Literal Tuple_(Float64_106.668064702296, Float64_10.7679256579236)" + }, + { + "explain": " Literal Tuple_(Float64_106.668053454135, Float64_10.7679154631847)" + }, + { + "explain": " Literal Tuple_(Float64_106.668047135024, Float64_10.7679102772246)" + }, + { + "explain": " Literal Tuple_(Float64_106.668034941766, Float64_10.7679012340887)" + }, + { + "explain": " Literal Tuple_(Float64_106.668028144776, Float64_10.7678966924853)" + }, + { + "explain": " Literal Tuple_(Float64_106.668015123851, Float64_10.7678888880428)" + }, + { + "explain": " Literal Tuple_(Float64_106.668007914429, Float64_10.7678850345264)" + }, + { + "explain": " Literal Tuple_(Float64_106.667994191233, Float64_10.7678785439383)" + }, + { + "explain": " Literal Tuple_(Float64_106.667986638821, Float64_10.7678754156266)" + }, + { + "explain": " Literal Tuple_(Float64_106.667972345518, Float64_10.7678703014008)" + }, + { + "explain": " Literal Tuple_(Float64_106.667964522841, Float64_10.7678679284177)" + }, + { + "explain": " Literal Tuple_(Float64_106.667949797082, Float64_10.7678642398071)" + }, + { + "explain": " Literal Tuple_(Float64_106.667941779481, Float64_10.7678626450072)" + }, + { + "explain": " Literal Tuple_(Float64_106.667926763083, Float64_10.767860417535)" + }, + { + "explain": " Literal Tuple_(Float64_106.667918627772, Float64_10.7678596162768)" + }, + { + "explain": " Literal Tuple_(Float64_106.667903465352, Float64_10.7678588713949)" + }, + { + "explain": " Literal Tuple_(Float64_106.667895290678, Float64_10.7678588713949)" + }, + { + "explain": " Literal Tuple_(Float64_106.667880128258, Float64_10.7678596162768)" + }, + { + "explain": " Literal Tuple_(Float64_106.667871992947, Float64_10.767860417535)" + }, + { + "explain": " Literal Tuple_(Float64_106.667856976549, Float64_10.7678626450072)" + }, + { + "explain": " Literal Tuple_(Float64_106.667848958948, Float64_10.7678642398071)" + }, + { + "explain": " Literal Tuple_(Float64_106.667848526162, Float64_10.7678643482145)" + }, + { + "explain": " Literal Tuple_(Float64_106.667629153721, Float64_10.7677506481269)" + }, + { + "explain": " Literal Tuple_(Float64_106.667628614008, Float64_10.7677503708842)" + }, + { + "explain": " Literal Tuple_(Float64_106.66744662399, Float64_10.7676577214203)" + }, + { + "explain": " Literal Tuple_(Float64_106.667216888626, Float64_10.7675408306262)" + }, + { + "explain": " Literal Tuple_(Float64_106.667161868227, Float64_10.7675128359024)" + }, + { + "explain": " Literal Tuple_(Float64_106.667012119458, Float64_10.7674366427911)" + }, + { + "explain": " Literal Tuple_(Float64_106.666659357657, Float64_10.7672571553777)" + }, + { + "explain": " Literal Tuple_(Float64_106.666673753979, Float64_10.7671954479766)" + }, + { + "explain": " Literal Tuple_(Float64_106.667048293768, Float64_10.7672739882109)" + }, + { + "explain": " Literal Tuple_(Float64_106.6670141, Float64_10.7674274)" + } + ], + + "rows": 361, + + "statistics": + { + "elapsed": 0.003636441, + "rows_read": 361, + "bytes_read": 29164 + } +} diff --git a/parser/testdata/00500_point_in_polygon_bug_3_linestring_rotation_precision/metadata.json b/parser/testdata/00500_point_in_polygon_bug_3_linestring_rotation_precision/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00500_point_in_polygon_bug_3_linestring_rotation_precision/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00500_point_in_polygon_bug_3_linestring_rotation_precision/query.sql b/parser/testdata/00500_point_in_polygon_bug_3_linestring_rotation_precision/query.sql new file mode 100644 index 000000000..0de749bfe --- /dev/null +++ b/parser/testdata/00500_point_in_polygon_bug_3_linestring_rotation_precision/query.sql @@ -0,0 +1,8 @@ +SELECT pointInPolygon((106.6671509, 10.7674952), [(106.667161868227, 10.7674952), (106.667165727127, 10.7675059912261), (106.667170817563, 10.7674904752629), (106.667229225265, 10.7672278502066), (106.667231193621, 10.7672115129572), (106.667229912029, 10.7671951075415), (106.667225430503, 10.767179274157), (106.667217923927, 10.7671646306786), (106.667207685234, 10.7671517485471), (106.667195113975, 10.7671411304688), (106.667180700725, 10.7671331907989), (106.66716500794, 10.7671282393715), (106.666628232995, 10.7670156787539), (106.666612233649, 10.7670139127584), (106.666596193354, 10.7670152569112), (106.666580711053, 10.7670196610218), (106.666566364856, 10.7670269606408), (106.666553690448, 10.7670368832008), (106.666543161092, 10.767049058194), (106.666535169952, 10.7670630310067), (106.666530015418, 10.7670782798948), (106.666482284259, 10.7672828714379), (106.666480170141, 10.7672985245675), (106.666481048788, 10.7673142953614), (106.666484888609, 10.7673296167758), (106.666491551541, 10.7673439379244), (106.666500798017, 10.7673567438858), (106.666512295576, 10.7673675742178), (106.666525630821, 10.7673760395122), (106.667032331859, 10.7676338521733), (106.6671413386, 10.7676893154858), (106.667371048786, 10.7678061934666), (106.667552760053, 10.7678987010209), (106.667801848625, 10.7680278028917), (106.667817742281, 10.7680340673957), (106.667834579682, 10.7680369577679), (106.66785165264, 10.7680363524383), (106.667868243061, 10.7680322768672), (106.667878683314, 10.7680285412847), (106.667885469819, 10.7680268413536), (106.667892390269, 10.7680258148018), (106.667899378015, 10.7680254715159), (106.667906365761, 10.7680258148018), (106.667913286211, 10.7680268413536), (106.667920072716, 10.7680285412847), (106.667926659921, 10.7680308982244), (106.667932984386, 10.7680338894736), (106.667938985204, 10.7680374862253), (106.667944604583, 10.7680416538412), (106.667949788405, 10.7680463521828), (106.667954486747, 10.7680515360051), (106.667958654362, 10.7680571553826), (106.667962251113, 10.7680631561994), (106.667965242363, 10.7680694806664), (106.667967599303, 10.7680760678724), (106.667969299234, 10.7680828543774), (106.667970926246, 10.7680938227996), (106.667974657027, 10.7681089916695), (106.667981154238, 10.7681231972879), (106.667990189396, 10.7681359400994), (106.668001444773, 10.7681467719897), (106.668014524559, 10.7681553120441), (106.668198488147, 10.7682521458591), (106.669562015793, 10.7689901124345), (106.669614757162, 10.7690820717448), (106.669623023723, 10.7690939566151), (106.669633223154, 10.7691042307472), (106.669645047385, 10.7691125838155), (106.670748051536, 10.7697559307954), (106.670751419717, 10.7697577924329), (106.671035494073, 10.7699063431327), (106.671270162713, 10.7700364834325), (106.67127192876, 10.7700374352053), (106.671437929267, 10.7701243344783), (106.671665917937, 10.7702517637461), (106.67166656035, 10.7702521191025), (106.671943689514, 10.7704038245574), (106.671943806749, 10.7704038886117), (106.6722776446, 10.7705859421916), (106.672278295949, 10.7705862936499), (106.673020324076, 10.7709824352208), (106.673433726727, 10.7712057751884), (106.673694081332, 10.7713489702214), (106.673977066657, 10.7715146655761), (106.674254247937, 10.7716778144336), (106.67440928634, 10.7717698954974), (106.674658478275, 10.7719268836667), (106.674658802254, 10.7719270867325), (106.6748919449, 10.7720724734391), (106.675071660589, 10.7721853602936), (106.675350447469, 10.7723606751059), (106.675350748696, 10.7723608636368), (106.6756252856, 10.7725318758852), (106.675888735092, 10.7726957126602), (106.676114500069, 10.7728361211927), (106.676379504941, 10.7730007692002), (106.67661713771, 10.7731502653527), (106.676617572241, 10.773150536857), (106.676852995814, 10.7732966297465), (106.677284352687, 10.7735807849214), (106.677738143311, 10.7738851794554), (106.677752655777, 10.7738929549383), (106.677768414072, 10.773897724206), (106.677784802596, 10.7738993009456), (106.677801181124, 10.7738976235612), (106.677816909825, 10.7738927575805), (106.677831374252, 10.7738848930944), (106.677844009349, 10.7738743373313), (106.677920079221, 10.7737967983562), (106.678239245717, 10.7735243703649), (106.67839926068, 10.7733892116467), (106.678400691571, 10.7733879749217), (106.678515896101, 10.7732860955802), (106.678557979259, 10.7732504310319), (106.67855930664, 10.7732492818517), (106.679033975331, 10.7728295048433), (106.679053201911, 10.772844898411), (106.679632133733, 10.7733262832973), (106.679771732358, 10.7734524450384), (106.679773325229, 10.7734538481348), (106.680011463819, 10.7736582857586), (106.680175801881, 10.7738018862846), (106.680176891116, 10.7738028216402), (106.680320149367, 10.773923712053), (106.680672123374, 10.7742204563391), (106.68094213423, 10.7744504786771), (106.68094233625, 10.7744506502241), (106.68124725775, 10.7747087432576), (106.681247329066, 10.7747088035527), (106.681470746982, 10.7748974804345), (106.681471338135, 10.7748979749973), (106.681840030697, 10.7752035373868), (106.682304929691, 10.7756040772245), (106.682308650112, 10.7756071005185), (106.682312917236, 10.7756103687835), (106.682359764439, 10.7756490693986), (106.682640114944, 10.7758996628849), (106.682644070655, 10.7759029839554), (106.682711710544, 10.7759562859055), (106.682806505954, 10.7760368956153), (106.68280745353, 10.776037689352), (106.683169164535, 10.7763361378178), (106.68363265876, 10.7767252395911), (106.683677875719, 10.7767650291442), (106.683797775698, 10.77688614766), (106.684138558845, 10.7772306328105), (106.68414063031, 10.7772326552454), (106.684827531639, 10.777880369263), (106.685228619785, 10.7782605077038), (106.685228896163, 10.7782607684525), (106.686025996525, 10.7790093622583), (106.686026813787, 10.7790101368229), (106.68658269265, 10.7795369738106), (106.687194479537, 10.7801158277128), (106.688401155505, 10.7812670656457), (106.688401571342, 10.7812674596561), (106.689622367701, 10.7824162362891), (106.690002723257, 10.7827815572149), (106.690002908997, 10.7827817350625), (106.690359062158, 10.7831217027417), (106.690359638585, 10.7831222477508), (106.690747557266, 10.7834855403784), (106.691628272565, 10.7843952548301), (106.692179613338, 10.7849709155958), (106.692179802225, 10.7849711121697), (106.692743910048, 10.7855562574979), (106.693288875836, 10.7861225208133), (106.693601234729, 10.7864484801726), (106.69220838651, 10.7875617536129), (106.692196691453, 10.787573150248), (106.692187444486, 10.7875866094924), (106.692181000965, 10.7876016141149), (106.692177608512, 10.7876175874962), (106.692177397496, 10.7876339157883), (106.692180376026, 10.7876499715041), (106.692186429639, 10.7876651376314), (106.692195325699, 10.7876788313445), (106.692206722334, 10.7876905264015), (106.692220181578, 10.7876997733682), (106.692235186201, 10.7877062168886), (106.692251159582, 10.787709609342), (106.692267487874, 10.7877098203582), (106.69228354359, 10.7877068418281), (106.692298709717, 10.7877007882148), (106.69231240343, 10.7876918921553), (106.693776442708, 10.7865217172423), (106.693788736175, 10.7865096022178), (106.693798269005, 10.7864952137411), (106.693804631934, 10.7864791695437), (106.693807551784, 10.7864621584413), (106.693806903199, 10.7864449107613), (106.693802714026, 10.7864281669878), (106.693795164114, 10.786412645971), (106.693784577601, 10.7863990140651), (106.69340910087, 10.7860071886444), (106.69340897739, 10.7860070600637), (106.692863924954, 10.7854407067139), (106.69229983717, 10.7848555821281), (106.691748435669, 10.7842798579551), (106.691748124777, 10.7842795350934), (106.690865834778, 10.7833681940925), (106.690862927107, 10.7833653342196), (106.690473809086, 10.7830009183885), (106.690118035849, 10.7826613133679), (106.689737465891, 10.7822957865149), (106.689736848623, 10.7822951996834), (106.688515950726, 10.7811463275029), (106.687309357068, 10.7799951680976), (106.687309106711, 10.779994930232), (106.686697270266, 10.7794160294802), (106.686141416688, 10.7788892164565), (106.686140461741, 10.7788883114), (106.686140185762, 10.7788880510296), (106.6853430856, 10.7781394574112), (106.684942058447, 10.7777593767781), (106.684941904463, 10.7777592312084), (106.684255979358, 10.7771124377212), (106.683916204215, 10.776768971525), (106.683794256559, 10.7766457845149), (106.68379008676, 10.7766418525893), (106.683741989497, 10.7765995284558), (106.683740519326, 10.7765982647987), (106.683276011394, 10.7762083120217), (106.683275466929, 10.7762078588774), (106.68291395946, 10.77590957835), (106.682818451152, 10.775828362424), (106.682816046951, 10.7758263940715), (106.682749215964, 10.7757737295564), (106.682469581984, 10.775523776542), (106.682467121137, 10.7755216616573), (106.682417839663, 10.775480950083), (106.68241543796, 10.7754790393628), (106.682411856108, 10.7754762959601), (106.681948170223, 10.775076801292), (106.681946953215, 10.7750757728772), (106.681577943952, 10.7747699480145), (106.681354856141, 10.7745815499075), (106.681050071432, 10.7743235726569), (106.680779998801, 10.774093497693), (106.680779672798, 10.7740932214111), (106.680427578845, 10.7737963760106), (106.680284883706, 10.7736759607876), (106.680120811518, 10.7735325925854), (106.680120259999, 10.7735321149047), (106.679882649978, 10.7733281310479), (106.679742564868, 10.7732015296478), (106.67973997054, 10.7731992804165), (106.679159125009, 10.772716304271), (106.679157929246, 10.7727153285815), (106.679083371982, 10.7726556350576), (106.679069423592, 10.7726465921904), (106.679053957365, 10.7726404990091), (106.679037589221, 10.7726375981655), (106.679020970997, 10.7726380051815), (106.679004764489, 10.7726417038483), (106.678989615098, 10.7726485468719), (106.678976126125, 10.772658261739), (106.678449597495, 10.7731239014943), (106.678407514754, 10.773159565689), (106.678406188192, 10.7731607141448), (106.678291034854, 10.7732625482153), (106.678131577851, 10.7733972356454), (106.678131249559, 10.7733975143985), (106.677809116892, 10.7736724741964), (106.677803734254, 10.7736774962862), (106.67777351642, 10.773708297704), (106.677376870851, 10.7734422350384), (106.677376291861, 10.7734418501559), (106.676943701895, 10.7731568826838), (106.676941799819, 10.7731556663352), (106.676705634648, 10.7730091132449), (106.676468020922, 10.7728596290723), (106.676467624617, 10.7728593813034), (106.676202468827, 10.7726946395397), (106.675976718772, 10.7725542402878), (106.675713344944, 10.7723904505946), (106.675438984881, 10.7722195485022), (106.675160330528, 10.7720443170291), (106.674980445983, 10.7719313240966), (106.674980215342, 10.7719311797465), (106.674747119479, 10.7717858222138), (106.674497164595, 10.7716283533947), (106.674495300219, 10.7716272127471), (106.674339180867, 10.7715344896819), (106.674338897981, 10.771534322423), (106.674061493048, 10.7713710419232), (106.674061328848, 10.7713709455279), (106.673777295695, 10.7712046366425), (106.673775349509, 10.7712035319333), (106.673513740027, 10.7710596467179), (106.673513190173, 10.7710593469847), (106.673099330442, 10.7708357600807), (106.673098966779, 10.7708355647753), (106.672357083034, 10.7704395002842), (106.672023628724, 10.7702576558632), (106.671746880137, 10.7701061587426), (106.671518215262, 10.7699783515251), (106.671516207112, 10.7699772649622), (106.671350083838, 10.7698903014222), (106.671115399209, 10.7697601522552), (106.671113600766, 10.7697591835329), (106.670830326847, 10.7696110514048), (106.66974820551, 10.7689798847013), (106.66969475177, 10.7688866833063), (106.669685913661, 10.7688741199651), (106.669674918986, 10.7688633930448), (106.669662141606, 10.7688548673033), (106.668277363011, 10.7681053993183), (106.668276514094, 10.7681049461882), (106.668126503268, 10.7680259842551), (106.668125839186, 10.7680237950692), (106.66812072496, 10.7680095017658), (106.668117596648, 10.7680019493532), (106.66811110606, 10.7679882261576), (106.668107252546, 10.7679810167398), (106.668099448104, 10.7679679958141), (106.668094906497, 10.767961198818), (106.668085863361, 10.7679490055608), (106.668080677403, 10.7679426864524), (106.668070482664, 10.7679314382913), (106.668064702296, 10.7679256579236), (106.668053454135, 10.7679154631847), (106.668047135024, 10.7679102772246), (106.668034941766, 10.7679012340887), (106.668028144776, 10.7678966924853), (106.668015123851, 10.7678888880428), (106.668007914429, 10.7678850345264), (106.667994191233, 10.7678785439383), (106.667986638821, 10.7678754156266), (106.667972345518, 10.7678703014008), (106.667964522841, 10.7678679284177), (106.667949797082, 10.7678642398071), (106.667941779481, 10.7678626450072), (106.667926763083, 10.767860417535), (106.667918627772, 10.7678596162768), (106.667903465352, 10.7678588713949), (106.667895290678, 10.7678588713949), (106.667880128258, 10.7678596162768), (106.667871992947, 10.767860417535), (106.667856976549, 10.7678626450072), (106.667848958948, 10.7678642398071), (106.667848526162, 10.7678643482145), (106.667629153721, 10.7677506481269), (106.667628614008, 10.7677503708842), (106.66744662399, 10.7676577214203), (106.667216888626, 10.7675408306262), (106.667161868227, 10.7675128359024), (106.667012119458, 10.7674366427911), (106.666659357657, 10.7672571553777), (106.666673753979, 10.7671954479766), (106.667048293768, 10.7672739882109), (106.6670141, 10.7674274)]); + +SELECT pointInPolygon((106.677085876465,10.7744951248169), [(106.667161868227,10.7675128359024),(106.667165727127,10.7675059912261),(106.667170817563,10.7674904752629),(106.667229225265,10.7672278502066),(106.667231193621,10.7672115129572),(106.667229912029,10.7671951075415),(106.667225430503,10.767179274157),(106.667217923927,10.7671646306786),(106.667207685234,10.7671517485471),(106.667195113975,10.7671411304688),(106.667180700725,10.7671331907989),(106.66716500794,10.7671282393715),(106.666628232995,10.7670156787539),(106.666612233649,10.7670139127584),(106.666596193354,10.7670152569112),(106.666580711053,10.7670196610218),(106.666566364856,10.7670269606408),(106.666553690448,10.7670368832008),(106.666543161092,10.767049058194),(106.666535169952,10.7670630310067),(106.666530015418,10.7670782798948),(106.666482284259,10.7672828714379),(106.666480170141,10.7672985245675),(106.666481048788,10.7673142953614),(106.666484888609,10.7673296167758),(106.666491551541,10.7673439379244),(106.666500798017,10.7673567438858),(106.666512295576,10.7673675742178),(106.666525630821,10.7673760395122),(106.667032331859,10.7676338521733),(106.6671413386,10.7676893154858),(106.667371048786,10.7678061934666),(106.667552760053,10.7678987010209),(106.667801848625,10.7680278028917),(106.667817742281,10.7680340673957),(106.667834579682,10.7680369577679),(106.66785165264,10.7680363524383),(106.667868243061,10.7680322768672),(106.667878683314,10.7680285412847),(106.667885469819,10.7680268413536),(106.667892390269,10.7680258148018),(106.667899378015,10.7680254715159),(106.667906365761,10.7680258148018),(106.667913286211,10.7680268413536),(106.667920072716,10.7680285412847),(106.667926659921,10.7680308982244),(106.667932984386,10.7680338894736),(106.667938985204,10.7680374862253),(106.667944604583,10.7680416538412),(106.667949788405,10.7680463521828),(106.667954486747,10.7680515360051),(106.667958654362,10.7680571553826),(106.667962251113,10.7680631561994),(106.667965242363,10.7680694806664),(106.667967599303,10.7680760678724),(106.667969299234,10.7680828543774),(106.667970926246,10.7680938227996),(106.667974657027,10.7681089916695),(106.667981154238,10.7681231972879),(106.667990189396,10.7681359400994),(106.668001444773,10.7681467719897),(106.668014524559,10.7681553120441),(106.668198488147,10.7682521458591),(106.669562015793,10.7689901124345),(106.669614757162,10.7690820717448),(106.669623023723,10.7690939566151),(106.669633223154,10.7691042307472),(106.669645047385,10.7691125838155),(106.670748051536,10.7697559307954),(106.670751419717,10.7697577924329),(106.671035494073,10.7699063431327),(106.671270162713,10.7700364834325),(106.67127192876,10.7700374352053),(106.671437929267,10.7701243344783),(106.671665917937,10.7702517637461),(106.67166656035,10.7702521191025),(106.671943689514,10.7704038245574),(106.671943806749,10.7704038886117),(106.6722776446,10.7705859421916),(106.672278295949,10.7705862936499),(106.673020324076,10.7709824352208),(106.673433726727,10.7712057751884),(106.673694081332,10.7713489702214),(106.673977066657,10.7715146655761),(106.674254247937,10.7716778144336),(106.67440928634,10.7717698954974),(106.674658478275,10.7719268836667),(106.674658802254,10.7719270867325),(106.6748919449,10.7720724734391),(106.675071660589,10.7721853602936),(106.675350447469,10.7723606751059),(106.675350748696,10.7723608636368),(106.6756252856,10.7725318758852),(106.675888735092,10.7726957126602),(106.676114500069,10.7728361211927),(106.676379504941,10.7730007692002),(106.67661713771,10.7731502653527),(106.676617572241,10.773150536857),(106.676852995814,10.7732966297465),(106.677284352687,10.7735807849214),(106.677738143311,10.7738851794554),(106.677752655777,10.7738929549383),(106.677768414072,10.773897724206),(106.677784802596,10.7738993009456),(106.677801181124,10.7738976235612),(106.677816909825,10.7738927575805),(106.677831374252,10.7738848930944),(106.677844009349,10.7738743373313),(106.677920079221,10.7737967983562),(106.678239245717,10.7735243703649),(106.67839926068,10.7733892116467),(106.678400691571,10.7733879749217),(106.678515896101,10.7732860955802),(106.678557979259,10.7732504310319),(106.67855930664,10.7732492818517),(106.679033975331,10.7728295048433),(106.679053201911,10.772844898411),(106.679632133733,10.7733262832973),(106.679771732358,10.7734524450384),(106.679773325229,10.7734538481348),(106.680011463819,10.7736582857586),(106.680175801881,10.7738018862846),(106.680176891116,10.7738028216402),(106.680320149367,10.773923712053),(106.680672123374,10.7742204563391),(106.68094213423,10.7744504786771),(106.68094233625,10.7744506502241),(106.68124725775,10.7747087432576),(106.681247329066,10.7747088035527),(106.681470746982,10.7748974804345),(106.681471338135,10.7748979749973),(106.681840030697,10.7752035373868),(106.682304929691,10.7756040772245),(106.682308650112,10.7756071005185),(106.682312917236,10.7756103687835),(106.682359764439,10.7756490693986),(106.682640114944,10.7758996628849),(106.682644070655,10.7759029839554),(106.682711710544,10.7759562859055),(106.682806505954,10.7760368956153),(106.68280745353,10.776037689352),(106.683169164535,10.7763361378178),(106.68363265876,10.7767252395911),(106.683677875719,10.7767650291442),(106.683797775698,10.77688614766),(106.684138558845,10.7772306328105),(106.68414063031,10.7772326552454),(106.684827531639,10.777880369263),(106.685228619785,10.7782605077038),(106.685228896163,10.7782607684525),(106.686025996525,10.7790093622583),(106.686026813787,10.7790101368229),(106.68658269265,10.7795369738106),(106.687194479537,10.7801158277128),(106.688401155505,10.7812670656457),(106.688401571342,10.7812674596561),(106.689622367701,10.7824162362891),(106.690002723257,10.7827815572149),(106.690002908997,10.7827817350625),(106.690359062158,10.7831217027417),(106.690359638585,10.7831222477508),(106.690747557266,10.7834855403784),(106.691628272565,10.7843952548301),(106.692179613338,10.7849709155958),(106.692179802225,10.7849711121697),(106.692743910048,10.7855562574979),(106.693288875836,10.7861225208133),(106.693601234729,10.7864484801726),(106.69220838651,10.7875617536129),(106.692196691453,10.787573150248),(106.692187444486,10.7875866094924),(106.692181000965,10.7876016141149),(106.692177608512,10.7876175874962),(106.692177397496,10.7876339157883),(106.692180376026,10.7876499715041),(106.692186429639,10.7876651376314),(106.692195325699,10.7876788313445),(106.692206722334,10.7876905264015),(106.692220181578,10.7876997733682),(106.692235186201,10.7877062168886),(106.692251159582,10.787709609342),(106.692267487874,10.7877098203582),(106.69228354359,10.7877068418281),(106.692298709717,10.7877007882148),(106.69231240343,10.7876918921553),(106.693776442708,10.7865217172423),(106.693788736175,10.7865096022178),(106.693798269005,10.7864952137411),(106.693804631934,10.7864791695437),(106.693807551784,10.7864621584413),(106.693806903199,10.7864449107613),(106.693802714026,10.7864281669878),(106.693795164114,10.786412645971),(106.693784577601,10.7863990140651),(106.69340910087,10.7860071886444),(106.69340897739,10.7860070600637),(106.692863924954,10.7854407067139),(106.69229983717,10.7848555821281),(106.691748435669,10.7842798579551),(106.691748124777,10.7842795350934),(106.690865834778,10.7833681940925),(106.690862927107,10.7833653342196),(106.690473809086,10.7830009183885),(106.690118035849,10.7826613133679),(106.689737465891,10.7822957865149),(106.689736848623,10.7822951996834),(106.688515950726,10.7811463275029),(106.687309357068,10.7799951680976),(106.687309106711,10.779994930232),(106.686697270266,10.7794160294802),(106.686141416688,10.7788892164565),(106.686140461741,10.7788883114),(106.686140185762,10.7788880510296),(106.6853430856,10.7781394574112),(106.684942058447,10.7777593767781),(106.684941904463,10.7777592312084),(106.684255979358,10.7771124377212),(106.683916204215,10.776768971525),(106.683794256559,10.7766457845149),(106.68379008676,10.7766418525893),(106.683741989497,10.7765995284558),(106.683740519326,10.7765982647987),(106.683276011394,10.7762083120217),(106.683275466929,10.7762078588774),(106.68291395946,10.77590957835),(106.682818451152,10.775828362424),(106.682816046951,10.7758263940715),(106.682749215964,10.7757737295564),(106.682469581984,10.775523776542),(106.682467121137,10.7755216616573),(106.682417839663,10.775480950083),(106.68241543796,10.7754790393628),(106.682411856108,10.7754762959601),(106.681948170223,10.775076801292),(106.681946953215,10.7750757728772),(106.681577943952,10.7747699480145),(106.681354856141,10.7745815499075),(106.681050071432,10.7743235726569),(106.680779998801,10.774093497693),(106.680779672798,10.7740932214111),(106.680427578845,10.7737963760106),(106.680284883706,10.7736759607876),(106.680120811518,10.7735325925854),(106.680120259999,10.7735321149047),(106.679882649978,10.7733281310479),(106.679742564868,10.7732015296478),(106.67973997054,10.7731992804165),(106.679159125009,10.772716304271),(106.679157929246,10.7727153285815),(106.679083371982,10.7726556350576),(106.679069423592,10.7726465921904),(106.679053957365,10.7726404990091),(106.679037589221,10.7726375981655),(106.679020970997,10.7726380051815),(106.679004764489,10.7726417038483),(106.678989615098,10.7726485468719),(106.678976126125,10.772658261739),(106.678449597495,10.7731239014943),(106.678407514754,10.773159565689),(106.678406188192,10.7731607141448),(106.678291034854,10.7732625482153),(106.678131577851,10.7733972356454),(106.678131249559,10.7733975143985),(106.677809116892,10.7736724741964),(106.677803734254,10.7736774962862),(106.67777351642,10.773708297704),(106.677376870851,10.7734422350384),(106.677376291861,10.7734418501559),(106.676943701895,10.7731568826838),(106.676941799819,10.7731556663352),(106.676705634648,10.7730091132449),(106.676468020922,10.7728596290723),(106.676467624617,10.7728593813034),(106.676202468827,10.7726946395397),(106.675976718772,10.7725542402878),(106.675713344944,10.7723904505946),(106.675438984881,10.7722195485022),(106.675160330528,10.7720443170291),(106.674980445983,10.7719313240966),(106.674980215342,10.7719311797465),(106.674747119479,10.7717858222138),(106.674497164595,10.7716283533947),(106.674495300219,10.7716272127471),(106.674339180867,10.7715344896819),(106.674338897981,10.771534322423),(106.674061493048,10.7713710419232),(106.674061328848,10.7713709455279),(106.673777295695,10.7712046366425),(106.673775349509,10.7712035319333),(106.673513740027,10.7710596467179),(106.673513190173,10.7710593469847),(106.673099330442,10.7708357600807),(106.673098966779,10.7708355647753),(106.672357083034,10.7704395002842),(106.672023628724,10.7702576558632),(106.671746880137,10.7701061587426),(106.671518215262,10.7699783515251),(106.671516207112,10.7699772649622),(106.671350083838,10.7698903014222),(106.671115399209,10.7697601522552),(106.671113600766,10.7697591835329),(106.670830326847,10.7696110514048),(106.66974820551,10.7689798847013),(106.66969475177,10.7688866833063),(106.669685913661,10.7688741199651),(106.669674918986,10.7688633930448),(106.669662141606,10.7688548673033),(106.668277363011,10.7681053993183),(106.668276514094,10.7681049461882),(106.668126503268,10.7680259842551),(106.668125839186,10.7680237950692),(106.66812072496,10.7680095017658),(106.668117596648,10.7680019493532),(106.66811110606,10.7679882261576),(106.668107252546,10.7679810167398),(106.668099448104,10.7679679958141),(106.668094906497,10.767961198818),(106.668085863361,10.7679490055608),(106.668080677403,10.7679426864524),(106.668070482664,10.7679314382913),(106.668064702296,10.7679256579236),(106.668053454135,10.7679154631847),(106.668047135024,10.7679102772246),(106.668034941766,10.7679012340887),(106.668028144776,10.7678966924853),(106.668015123851,10.7678888880428),(106.668007914429,10.7678850345264),(106.667994191233,10.7678785439383),(106.667986638821,10.7678754156266),(106.667972345518,10.7678703014008),(106.667964522841,10.7678679284177),(106.667949797082,10.7678642398071),(106.667941779481,10.7678626450072),(106.667926763083,10.767860417535),(106.667918627772,10.7678596162768),(106.667903465352,10.7678588713949),(106.667895290678,10.7678588713949),(106.667880128258,10.7678596162768),(106.667871992947,10.767860417535),(106.667856976549,10.7678626450072),(106.667848958948,10.7678642398071),(106.667848526162,10.7678643482145),(106.667629153721,10.7677506481269),(106.667628614008,10.7677503708842),(106.66744662399,10.7676577214203),(106.667216888626,10.7675408306262),(106.667161868227,10.7675128359024),(106.667012119458,10.7674366427911),(106.666659357657,10.7672571553777),(106.666673753979,10.7671954479766),(106.667048293768,10.7672739882109),(106.667012119458,10.7674366427911)] +); -- { serverError BAD_ARGUMENTS } + +SET validate_polygons = 0; + +SELECT pointInPolygon((106.677085876465,10.7744951248169), [(106.667161868227,10.7675128359024),(106.667165727127,10.7675059912261),(106.667170817563,10.7674904752629),(106.667229225265,10.7672278502066),(106.667231193621,10.7672115129572),(106.667229912029,10.7671951075415),(106.667225430503,10.767179274157),(106.667217923927,10.7671646306786),(106.667207685234,10.7671517485471),(106.667195113975,10.7671411304688),(106.667180700725,10.7671331907989),(106.66716500794,10.7671282393715),(106.666628232995,10.7670156787539),(106.666612233649,10.7670139127584),(106.666596193354,10.7670152569112),(106.666580711053,10.7670196610218),(106.666566364856,10.7670269606408),(106.666553690448,10.7670368832008),(106.666543161092,10.767049058194),(106.666535169952,10.7670630310067),(106.666530015418,10.7670782798948),(106.666482284259,10.7672828714379),(106.666480170141,10.7672985245675),(106.666481048788,10.7673142953614),(106.666484888609,10.7673296167758),(106.666491551541,10.7673439379244),(106.666500798017,10.7673567438858),(106.666512295576,10.7673675742178),(106.666525630821,10.7673760395122),(106.667032331859,10.7676338521733),(106.6671413386,10.7676893154858),(106.667371048786,10.7678061934666),(106.667552760053,10.7678987010209),(106.667801848625,10.7680278028917),(106.667817742281,10.7680340673957),(106.667834579682,10.7680369577679),(106.66785165264,10.7680363524383),(106.667868243061,10.7680322768672),(106.667878683314,10.7680285412847),(106.667885469819,10.7680268413536),(106.667892390269,10.7680258148018),(106.667899378015,10.7680254715159),(106.667906365761,10.7680258148018),(106.667913286211,10.7680268413536),(106.667920072716,10.7680285412847),(106.667926659921,10.7680308982244),(106.667932984386,10.7680338894736),(106.667938985204,10.7680374862253),(106.667944604583,10.7680416538412),(106.667949788405,10.7680463521828),(106.667954486747,10.7680515360051),(106.667958654362,10.7680571553826),(106.667962251113,10.7680631561994),(106.667965242363,10.7680694806664),(106.667967599303,10.7680760678724),(106.667969299234,10.7680828543774),(106.667970926246,10.7680938227996),(106.667974657027,10.7681089916695),(106.667981154238,10.7681231972879),(106.667990189396,10.7681359400994),(106.668001444773,10.7681467719897),(106.668014524559,10.7681553120441),(106.668198488147,10.7682521458591),(106.669562015793,10.7689901124345),(106.669614757162,10.7690820717448),(106.669623023723,10.7690939566151),(106.669633223154,10.7691042307472),(106.669645047385,10.7691125838155),(106.670748051536,10.7697559307954),(106.670751419717,10.7697577924329),(106.671035494073,10.7699063431327),(106.671270162713,10.7700364834325),(106.67127192876,10.7700374352053),(106.671437929267,10.7701243344783),(106.671665917937,10.7702517637461),(106.67166656035,10.7702521191025),(106.671943689514,10.7704038245574),(106.671943806749,10.7704038886117),(106.6722776446,10.7705859421916),(106.672278295949,10.7705862936499),(106.673020324076,10.7709824352208),(106.673433726727,10.7712057751884),(106.673694081332,10.7713489702214),(106.673977066657,10.7715146655761),(106.674254247937,10.7716778144336),(106.67440928634,10.7717698954974),(106.674658478275,10.7719268836667),(106.674658802254,10.7719270867325),(106.6748919449,10.7720724734391),(106.675071660589,10.7721853602936),(106.675350447469,10.7723606751059),(106.675350748696,10.7723608636368),(106.6756252856,10.7725318758852),(106.675888735092,10.7726957126602),(106.676114500069,10.7728361211927),(106.676379504941,10.7730007692002),(106.67661713771,10.7731502653527),(106.676617572241,10.773150536857),(106.676852995814,10.7732966297465),(106.677284352687,10.7735807849214),(106.677738143311,10.7738851794554),(106.677752655777,10.7738929549383),(106.677768414072,10.773897724206),(106.677784802596,10.7738993009456),(106.677801181124,10.7738976235612),(106.677816909825,10.7738927575805),(106.677831374252,10.7738848930944),(106.677844009349,10.7738743373313),(106.677920079221,10.7737967983562),(106.678239245717,10.7735243703649),(106.67839926068,10.7733892116467),(106.678400691571,10.7733879749217),(106.678515896101,10.7732860955802),(106.678557979259,10.7732504310319),(106.67855930664,10.7732492818517),(106.679033975331,10.7728295048433),(106.679053201911,10.772844898411),(106.679632133733,10.7733262832973),(106.679771732358,10.7734524450384),(106.679773325229,10.7734538481348),(106.680011463819,10.7736582857586),(106.680175801881,10.7738018862846),(106.680176891116,10.7738028216402),(106.680320149367,10.773923712053),(106.680672123374,10.7742204563391),(106.68094213423,10.7744504786771),(106.68094233625,10.7744506502241),(106.68124725775,10.7747087432576),(106.681247329066,10.7747088035527),(106.681470746982,10.7748974804345),(106.681471338135,10.7748979749973),(106.681840030697,10.7752035373868),(106.682304929691,10.7756040772245),(106.682308650112,10.7756071005185),(106.682312917236,10.7756103687835),(106.682359764439,10.7756490693986),(106.682640114944,10.7758996628849),(106.682644070655,10.7759029839554),(106.682711710544,10.7759562859055),(106.682806505954,10.7760368956153),(106.68280745353,10.776037689352),(106.683169164535,10.7763361378178),(106.68363265876,10.7767252395911),(106.683677875719,10.7767650291442),(106.683797775698,10.77688614766),(106.684138558845,10.7772306328105),(106.68414063031,10.7772326552454),(106.684827531639,10.777880369263),(106.685228619785,10.7782605077038),(106.685228896163,10.7782607684525),(106.686025996525,10.7790093622583),(106.686026813787,10.7790101368229),(106.68658269265,10.7795369738106),(106.687194479537,10.7801158277128),(106.688401155505,10.7812670656457),(106.688401571342,10.7812674596561),(106.689622367701,10.7824162362891),(106.690002723257,10.7827815572149),(106.690002908997,10.7827817350625),(106.690359062158,10.7831217027417),(106.690359638585,10.7831222477508),(106.690747557266,10.7834855403784),(106.691628272565,10.7843952548301),(106.692179613338,10.7849709155958),(106.692179802225,10.7849711121697),(106.692743910048,10.7855562574979),(106.693288875836,10.7861225208133),(106.693601234729,10.7864484801726),(106.69220838651,10.7875617536129),(106.692196691453,10.787573150248),(106.692187444486,10.7875866094924),(106.692181000965,10.7876016141149),(106.692177608512,10.7876175874962),(106.692177397496,10.7876339157883),(106.692180376026,10.7876499715041),(106.692186429639,10.7876651376314),(106.692195325699,10.7876788313445),(106.692206722334,10.7876905264015),(106.692220181578,10.7876997733682),(106.692235186201,10.7877062168886),(106.692251159582,10.787709609342),(106.692267487874,10.7877098203582),(106.69228354359,10.7877068418281),(106.692298709717,10.7877007882148),(106.69231240343,10.7876918921553),(106.693776442708,10.7865217172423),(106.693788736175,10.7865096022178),(106.693798269005,10.7864952137411),(106.693804631934,10.7864791695437),(106.693807551784,10.7864621584413),(106.693806903199,10.7864449107613),(106.693802714026,10.7864281669878),(106.693795164114,10.786412645971),(106.693784577601,10.7863990140651),(106.69340910087,10.7860071886444),(106.69340897739,10.7860070600637),(106.692863924954,10.7854407067139),(106.69229983717,10.7848555821281),(106.691748435669,10.7842798579551),(106.691748124777,10.7842795350934),(106.690865834778,10.7833681940925),(106.690862927107,10.7833653342196),(106.690473809086,10.7830009183885),(106.690118035849,10.7826613133679),(106.689737465891,10.7822957865149),(106.689736848623,10.7822951996834),(106.688515950726,10.7811463275029),(106.687309357068,10.7799951680976),(106.687309106711,10.779994930232),(106.686697270266,10.7794160294802),(106.686141416688,10.7788892164565),(106.686140461741,10.7788883114),(106.686140185762,10.7788880510296),(106.6853430856,10.7781394574112),(106.684942058447,10.7777593767781),(106.684941904463,10.7777592312084),(106.684255979358,10.7771124377212),(106.683916204215,10.776768971525),(106.683794256559,10.7766457845149),(106.68379008676,10.7766418525893),(106.683741989497,10.7765995284558),(106.683740519326,10.7765982647987),(106.683276011394,10.7762083120217),(106.683275466929,10.7762078588774),(106.68291395946,10.77590957835),(106.682818451152,10.775828362424),(106.682816046951,10.7758263940715),(106.682749215964,10.7757737295564),(106.682469581984,10.775523776542),(106.682467121137,10.7755216616573),(106.682417839663,10.775480950083),(106.68241543796,10.7754790393628),(106.682411856108,10.7754762959601),(106.681948170223,10.775076801292),(106.681946953215,10.7750757728772),(106.681577943952,10.7747699480145),(106.681354856141,10.7745815499075),(106.681050071432,10.7743235726569),(106.680779998801,10.774093497693),(106.680779672798,10.7740932214111),(106.680427578845,10.7737963760106),(106.680284883706,10.7736759607876),(106.680120811518,10.7735325925854),(106.680120259999,10.7735321149047),(106.679882649978,10.7733281310479),(106.679742564868,10.7732015296478),(106.67973997054,10.7731992804165),(106.679159125009,10.772716304271),(106.679157929246,10.7727153285815),(106.679083371982,10.7726556350576),(106.679069423592,10.7726465921904),(106.679053957365,10.7726404990091),(106.679037589221,10.7726375981655),(106.679020970997,10.7726380051815),(106.679004764489,10.7726417038483),(106.678989615098,10.7726485468719),(106.678976126125,10.772658261739),(106.678449597495,10.7731239014943),(106.678407514754,10.773159565689),(106.678406188192,10.7731607141448),(106.678291034854,10.7732625482153),(106.678131577851,10.7733972356454),(106.678131249559,10.7733975143985),(106.677809116892,10.7736724741964),(106.677803734254,10.7736774962862),(106.67777351642,10.773708297704),(106.677376870851,10.7734422350384),(106.677376291861,10.7734418501559),(106.676943701895,10.7731568826838),(106.676941799819,10.7731556663352),(106.676705634648,10.7730091132449),(106.676468020922,10.7728596290723),(106.676467624617,10.7728593813034),(106.676202468827,10.7726946395397),(106.675976718772,10.7725542402878),(106.675713344944,10.7723904505946),(106.675438984881,10.7722195485022),(106.675160330528,10.7720443170291),(106.674980445983,10.7719313240966),(106.674980215342,10.7719311797465),(106.674747119479,10.7717858222138),(106.674497164595,10.7716283533947),(106.674495300219,10.7716272127471),(106.674339180867,10.7715344896819),(106.674338897981,10.771534322423),(106.674061493048,10.7713710419232),(106.674061328848,10.7713709455279),(106.673777295695,10.7712046366425),(106.673775349509,10.7712035319333),(106.673513740027,10.7710596467179),(106.673513190173,10.7710593469847),(106.673099330442,10.7708357600807),(106.673098966779,10.7708355647753),(106.672357083034,10.7704395002842),(106.672023628724,10.7702576558632),(106.671746880137,10.7701061587426),(106.671518215262,10.7699783515251),(106.671516207112,10.7699772649622),(106.671350083838,10.7698903014222),(106.671115399209,10.7697601522552),(106.671113600766,10.7697591835329),(106.670830326847,10.7696110514048),(106.66974820551,10.7689798847013),(106.66969475177,10.7688866833063),(106.669685913661,10.7688741199651),(106.669674918986,10.7688633930448),(106.669662141606,10.7688548673033),(106.668277363011,10.7681053993183),(106.668276514094,10.7681049461882),(106.668126503268,10.7680259842551),(106.668125839186,10.7680237950692),(106.66812072496,10.7680095017658),(106.668117596648,10.7680019493532),(106.66811110606,10.7679882261576),(106.668107252546,10.7679810167398),(106.668099448104,10.7679679958141),(106.668094906497,10.767961198818),(106.668085863361,10.7679490055608),(106.668080677403,10.7679426864524),(106.668070482664,10.7679314382913),(106.668064702296,10.7679256579236),(106.668053454135,10.7679154631847),(106.668047135024,10.7679102772246),(106.668034941766,10.7679012340887),(106.668028144776,10.7678966924853),(106.668015123851,10.7678888880428),(106.668007914429,10.7678850345264),(106.667994191233,10.7678785439383),(106.667986638821,10.7678754156266),(106.667972345518,10.7678703014008),(106.667964522841,10.7678679284177),(106.667949797082,10.7678642398071),(106.667941779481,10.7678626450072),(106.667926763083,10.767860417535),(106.667918627772,10.7678596162768),(106.667903465352,10.7678588713949),(106.667895290678,10.7678588713949),(106.667880128258,10.7678596162768),(106.667871992947,10.767860417535),(106.667856976549,10.7678626450072),(106.667848958948,10.7678642398071),(106.667848526162,10.7678643482145),(106.667629153721,10.7677506481269),(106.667628614008,10.7677503708842),(106.66744662399,10.7676577214203),(106.667216888626,10.7675408306262),(106.667161868227,10.7675128359024),(106.667012119458,10.7674366427911),(106.666659357657,10.7672571553777),(106.666673753979,10.7671954479766),(106.667048293768,10.7672739882109),(106.667012119458,10.7674366427911)]); diff --git a/parser/testdata/00500_point_in_polygon_empty_bound/ast.json b/parser/testdata/00500_point_in_polygon_empty_bound/ast.json new file mode 100644 index 000000000..b81ff4fed --- /dev/null +++ b/parser/testdata/00500_point_in_polygon_empty_bound/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00114868, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00500_point_in_polygon_empty_bound/metadata.json b/parser/testdata/00500_point_in_polygon_empty_bound/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00500_point_in_polygon_empty_bound/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00500_point_in_polygon_empty_bound/query.sql b/parser/testdata/00500_point_in_polygon_empty_bound/query.sql new file mode 100644 index 000000000..d875979a7 --- /dev/null +++ b/parser/testdata/00500_point_in_polygon_empty_bound/query.sql @@ -0,0 +1,70 @@ +SET validate_polygons = 0; + +SELECT 'Polygon: Point'; +SELECT pointInPolygon((0., 0.), [(0., 0.)]); +SELECT pointInPolygon((1., 1.), [(0., 0.)]); + +SELECT 'Polygon: Straight Segment'; +SELECT pointInPolygon((0., 0.), [(0., 0.), (0., 0.)]); +SELECT pointInPolygon((1., 1.), [(0., 0.), (0., 0.)]); +SELECT pointInPolygon((5., 5.), [(0., 0.), (5., 0.), (10., 0.)]); + +SELECT 'Polygon: Empty Bound Hole'; +SELECT pointInPolygon((2., 2.), [(0., 0.), (5., 5.), (5., 0.)], [(2., 2.)]); +SELECT pointInPolygon((2., 2.), [(0., 0.), (5., 5.), (5., 0.)], [(2., 2.), (5., 2.)]); + +SELECT 'Polygon: Empty Bound Outer Ring'; +SELECT pointInPolygon((2.5, 2.5), [(0., 0.), (5., 0.), (10., 0.)], [(2., 2.), (3., 3.), (3., 2.)]); +SELECT pointInPolygon((1., 1.), [(0., 0.), (5., 0.), (10., 0.)], [(2., 2.), (3., 3.), (3., 2.)]); + + +SELECT 'MultiPolygon: Some Empty Bound Polygon, Others good'; +DROP TABLE IF EXISTS points_test; + +CREATE TABLE points_test +( + x Float64, + y Float64, + note String +) +ENGINE = TinyLog; + +INSERT INTO points_test (x, y, note) VALUES +(3, 3, 'poly-0 | hole-0'), +(7, 3, 'poly-0 | hole-1'), +(5, 7, 'poly-0 | hole-2'), +(1, 1, 'poly-0 solid'), +(9, 9, 'poly-0 solid'), +(23, 3, 'poly-1 | hole-0'), +(27, 3, 'poly-1 | hole-1'), +(25, 7, 'poly-1 | hole-2'), +(21, 1, 'poly-1 solid'), +(29, 9, 'poly-1 solid'), +(-1,-1, 'outside all'), +(15, 5, 'outside all'), +(35, 5, 'outside all'), +(-10, -10, 'outside all (on empty bound polygon)'), +(500, 3, 'outside all (on empty bound polygon)'); + +SELECT x, y, note, +pointInPolygon( (x, y), +[ + [ [(0, 0), (10, 0), (10, 10), (0, 10), (0, 0)], + [(2, 2), (4, 2), (4, 4), (2, 4), (2, 2)], + [(6, 2), (8, 2), (8, 4), (6, 4), (6, 2)], + [(4, 6), (6, 6), (6, 8), (4, 8), (4, 6)] ], + [ [(20, 0), (30, 0), (30, 10), (20, 10),(20, 0)], + [(22, 2), (24, 2), (24, 4), (22, 4), (22, 2)], + [(26, 2), (28, 2), (28, 4), (26, 4), (26, 2)], + [(24, 6), (26, 6), (26, 8), (24, 8), (24, 6)] ], + [ [(-10, -10)] ], -- Empty Bound Polgyon + [ [(3, 3), (100, 3), (500, 3)] ] -- Empty Bound Polgyon +]) AS inside +FROM points_test +ORDER BY note, x, y; + +DROP TABLE IF EXISTS points_test; + +SELECT 'MultiPolygon: All Empty Bound Polygon'; +SELECT pointInPolygon((0., 0.), [[(0, 0)]]); +SELECT pointInPolygon((5., 5.), [[(0, 0)], [(1, 5), (5, 5), (10, 5)]]); diff --git a/parser/testdata/00500_point_in_polygon_nan/ast.json b/parser/testdata/00500_point_in_polygon_nan/ast.json new file mode 100644 index 000000000..49274b67c --- /dev/null +++ b/parser/testdata/00500_point_in_polygon_nan/ast.json @@ -0,0 +1,73 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function pointInPolygon (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Tuple_(Float64_nan, Float64_10.000100135803223)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 8)" + }, + { + "explain": " Literal Tuple_(Float64_39.83154, Float64_21.41527)" + }, + { + "explain": " Literal Tuple_(Float64_2, Float64_1000.0001220703125)" + }, + { + "explain": " Literal Tuple_(Float64_39.90033, Float64_21.37195)" + }, + { + "explain": " Literal Tuple_(Float64_1.000100016593933, Float64_10.000100135803223)" + }, + { + "explain": " Literal Tuple_(Float64_39.83051, Float64_21.42553)" + }, + { + "explain": " Literal Tuple_(Float64_39.82898, Float64_21.41382)" + }, + { + "explain": " Literal Tuple_(Float64_39.83043, Float64_21.41432)" + }, + { + "explain": " Literal Tuple_(Float64_39.83154, Float64_21.41527)" + } + ], + + "rows": 17, + + "statistics": + { + "elapsed": 0.001090062, + "rows_read": 17, + "bytes_read": 939 + } +} diff --git a/parser/testdata/00500_point_in_polygon_nan/metadata.json b/parser/testdata/00500_point_in_polygon_nan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00500_point_in_polygon_nan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00500_point_in_polygon_nan/query.sql b/parser/testdata/00500_point_in_polygon_nan/query.sql new file mode 100644 index 000000000..37ed8dbed --- /dev/null +++ b/parser/testdata/00500_point_in_polygon_nan/query.sql @@ -0,0 +1 @@ +SELECT pointInPolygon((nan, 10.000100135803223), [(39.83154, 21.41527), (2., 1000.0001220703125), (39.90033, 21.37195), (1.000100016593933, 10.000100135803223), (39.83051, 21.42553), (39.82898, 21.41382), (39.83043, 21.41432), (39.83154, 21.41527)]); diff --git a/parser/testdata/00500_point_in_polygon_non_const_poly/ast.json b/parser/testdata/00500_point_in_polygon_non_const_poly/ast.json new file mode 100644 index 000000000..d178afc60 --- /dev/null +++ b/parser/testdata/00500_point_in_polygon_non_const_poly/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery polygons (children 1)" + }, + { + "explain": " Identifier polygons" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001091191, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/00500_point_in_polygon_non_const_poly/metadata.json b/parser/testdata/00500_point_in_polygon_non_const_poly/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00500_point_in_polygon_non_const_poly/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00500_point_in_polygon_non_const_poly/query.sql b/parser/testdata/00500_point_in_polygon_non_const_poly/query.sql new file mode 100644 index 000000000..4c4e2b74d --- /dev/null +++ b/parser/testdata/00500_point_in_polygon_non_const_poly/query.sql @@ -0,0 +1,136 @@ +DROP TABLE IF EXISTS polygons; + +SELECT 'Const point; No holes'; +create table polygons ( id Int32, poly Array(Tuple(Int32, Int32))) engine = Log(); + +INSERT INTO polygons VALUES (1, [(0, 0), (10, 0), (10, 10), (0, 10)]), + (2, [(-5, -5), (5, -5), (5, 5), (-5, 5)]); + +SELECT pointInPolygon((-9, 0), poly) FROM polygons ORDER BY id; +SELECT pointInPolygon((0, -9), poly) FROM polygons ORDER BY id; +SELECT pointInPolygon((-4, -4), poly) FROM polygons ORDER BY id; +SELECT pointInPolygon((0, 0), poly) FROM polygons ORDER BY id; +SELECT pointInPolygon((4, 4), poly) FROM polygons ORDER BY id; +SELECT pointInPolygon((9, 9), poly) FROM polygons ORDER BY id; +SELECT pointInPolygon((9, 4), poly) FROM polygons ORDER BY id; +SELECT pointInPolygon((4, 9), poly) FROM polygons ORDER BY id; + +DROP TABLE polygons; + +SELECT 'Non-const point; No holes'; + +create table polygons ( id Int32, pt Tuple(Int32, Int32), poly Array(Tuple(Int32, Int32))) engine = Log(); + +INSERT INTO polygons VALUES (1, (-9, 0), [(0, 0), (10, 0), (10, 10), (0, 10)]), + (2, (-9, 0), [(-5, -5), (5, -5), (5, 5), (-5, 5)]), + (3, (0, -9), [(0, 0), (10, 0), (10, 10), (0, 10)]), + (4, (0, -9), [(-5, -5), (5, -5), (5, 5), (-5, 5)]), + (5, (-4, -4), [(0, 0), (10, 0), (10, 10), (0, 10)]), + (6, (-4, -4), [(-5, -5), (5, -5), (5, 5), (-5, 5)]), + (7, (0, 0), [(0, 0), (10, 0), (10, 10), (0, 10)]), + (8, (0, 0), [(-5, -5), (5, -5), (5, 5), (-5, 5)]), + (9, (4, 4), [(0, 0), (10, 0), (10, 10), (0, 10)]), + (10, (4, 4), [(-5, -5), (5, -5), (5, 5), (-5, 5)]), + (11, (9, 9), [(0, 0), (10, 0), (10, 10), (0, 10)]), + (12, (9, 9), [(-5, -5), (5, -5), (5, 5), (-5, 5)]), + (13, (9, 4), [(0, 0), (10, 0), (10, 10), (0, 10)]), + (14, (9, 4), [(-5, -5), (5, -5), (5, 5), (-5, 5)]), + (15, (4, 9), [(0, 0), (10, 0), (10, 10), (0, 10)]), + (16, (4, 9), [(-5, -5), (5, -5), (5, 5), (-5, 5)]); + +SELECT pointInPolygon(pt, poly) FROM polygons ORDER BY id; + +DROP TABLE polygons; + +SELECT 'Const point; With holes'; + +create table polygons ( id Int32, poly Array(Array(Tuple(Int32, Int32)))) engine = Log(); + +INSERT INTO polygons VALUES (1, [[(0, 0), (10, 0), (10, 10), (0, 10)], [(4, 4), (6, 4), (6, 6), (4, 6)]]), + (2, [[(-5, -5), (5, -5), (5, 5), (-5, 5)], [(-1, -1), (1, -1), (1, 1), (-1, 1)]]); + +SELECT pointInPolygon((-9, 0), poly) FROM polygons ORDER BY id; +SELECT pointInPolygon((0, -9), poly) FROM polygons ORDER BY id; +SELECT pointInPolygon((-4, -4), poly) FROM polygons ORDER BY id; +SELECT pointInPolygon((0, 0), poly) FROM polygons ORDER BY id; +SELECT pointInPolygon((4, 4), poly) FROM polygons ORDER BY id; +SELECT pointInPolygon((9, 9), poly) FROM polygons ORDER BY id; +SELECT pointInPolygon((9, 4), poly) FROM polygons ORDER BY id; +SELECT pointInPolygon((4, 9), poly) FROM polygons ORDER BY id; + +DROP TABLE polygons; + +SELECT 'Non-const point; With holes'; + +create table polygons ( id Int32, pt Tuple(Int32, Int32), poly Array(Array(Tuple(Int32, Int32)))) engine = Log(); + +INSERT INTO polygons VALUES (1, (-9, 0), [[(0, 0), (10, 0), (10, 10), (0, 10)], [(4, 4), (6, 4), (6, 6), (4, 6)]]), + (2, (-9, 0), [[(-5, -5), (5, -5), (5, 5), (-5, 5)], [(-1, -1), (1, -1), (1, 1), (-1, 1)]]), + (3, (0, -9), [[(0, 0), (10, 0), (10, 10), (0, 10)], [(4, 4), (6, 4), (6, 6), (4, 6)]]), + (4, (0, -9), [[(-5, -5), (5, -5), (5, 5), (-5, 5)], [(-1, -1), (1, -1), (1, 1), (-1, 1)]]), + (5, (-4, -4), [[(0, 0), (10, 0), (10, 10), (0, 10)], [(4, 4), (6, 4), (6, 6), (4, 6)]]), + (6, (-4, -4), [[(-5, -5), (5, -5), (5, 5), (-5, 5)], [(-1, -1), (1, -1), (1, 1), (-1, 1)]]), + (7, (0, 0), [[(0, 0), (10, 0), (10, 10), (0, 10)], [(4, 4), (6, 4), (6, 6), (4, 6)]]), + (8, (0, 0), [[(-5, -5), (5, -5), (5, 5), (-5, 5)], [(-1, -1), (1, -1), (1, 1), (-1, 1)]]), + (9, (4, 4), [[(0, 0), (10, 0), (10, 10), (0, 10)], [(4, 4), (6, 4), (6, 6), (4, 6)]]), + (10, (4, 4), [[(-5, -5), (5, -5), (5, 5), (-5, 5)], [(-1, -1), (1, -1), (1, 1), (-1, 1)]]), + (11, (9, 9), [[(0, 0), (10, 0), (10, 10), (0, 10)], [(4, 4), (6, 4), (6, 6), (4, 6)]]), + (12, (9, 9), [[(-5, -5), (5, -5), (5, 5), (-5, 5)], [(-1, -1), (1, -1), (1, 1), (-1, 1)]]), + (13, (9, 4), [[(0, 0), (10, 0), (10, 10), (0, 10)], [(4, 4), (6, 4), (6, 6), (4, 6)]]), + (14, (9, 4), [[(-5, -5), (5, -5), (5, 5), (-5, 5)], [(-1, -1), (1, -1), (1, 1), (-1, 1)]]), + (15, (4, 9), [[(0, 0), (10, 0), (10, 10), (0, 10)], [(4, 4), (6, 4), (6, 6), (4, 6)]]), + (16, (4, 9), [[(-5, -5), (5, -5), (5, 5), (-5, 5)], [(-1, -1), (1, -1), (1, 1), (-1, 1)]]); + +SELECT pointInPolygon(pt, poly) FROM polygons ORDER BY id; + +DROP TABLE polygons; + +SELECT 'Non-const point; MultiPolygon with holes (same as above)'; + +create table polygons ( id Int32, pt Tuple(Int32, Int32), poly Array(Array(Array(Tuple(Int32, Int32))))) engine = Log(); + +INSERT INTO polygons VALUES (1, (-9, 0), [[[(0, 0), (10, 0), (10, 10), (0, 10)], [(4, 4), (6, 4), (6, 6), (4, 6)]]]), + (2, (-9, 0), [[[(-5, -5), (5, -5), (5, 5), (-5, 5)], [(-1, -1), (1, -1), (1, 1), (-1, 1)]]]), + (3, (0, -9), [[[(0, 0), (10, 0), (10, 10), (0, 10)], [(4, 4), (6, 4), (6, 6), (4, 6)]]]), + (4, (0, -9), [[[(-5, -5), (5, -5), (5, 5), (-5, 5)], [(-1, -1), (1, -1), (1, 1), (-1, 1)]]]), + (5, (-4, -4), [[[(0, 0), (10, 0), (10, 10), (0, 10)], [(4, 4), (6, 4), (6, 6), (4, 6)]]]), + (6, (-4, -4), [[[(-5, -5), (5, -5), (5, 5), (-5, 5)], [(-1, -1), (1, -1), (1, 1), (-1, 1)]]]), + (7, (0, 0), [[[(0, 0), (10, 0), (10, 10), (0, 10)], [(4, 4), (6, 4), (6, 6), (4, 6)]]]), + (8, (0, 0), [[[(-5, -5), (5, -5), (5, 5), (-5, 5)], [(-1, -1), (1, -1), (1, 1), (-1, 1)]]]), + (9, (4, 4), [[[(0, 0), (10, 0), (10, 10), (0, 10)], [(4, 4), (6, 4), (6, 6), (4, 6)]]]), + (10, (4, 4), [[[(-5, -5), (5, -5), (5, 5), (-5, 5)], [(-1, -1), (1, -1), (1, 1), (-1, 1)]]]), + (11, (9, 9), [[[(0, 0), (10, 0), (10, 10), (0, 10)], [(4, 4), (6, 4), (6, 6), (4, 6)]]]), + (12, (9, 9), [[[(-5, -5), (5, -5), (5, 5), (-5, 5)], [(-1, -1), (1, -1), (1, 1), (-1, 1)]]]), + (13, (9, 4), [[[(0, 0), (10, 0), (10, 10), (0, 10)], [(4, 4), (6, 4), (6, 6), (4, 6)]]]), + (14, (9, 4), [[[(-5, -5), (5, -5), (5, 5), (-5, 5)], [(-1, -1), (1, -1), (1, 1), (-1, 1)]]]), + (15, (4, 9), [[[(0, 0), (10, 0), (10, 10), (0, 10)], [(4, 4), (6, 4), (6, 6), (4, 6)]]]), + (16, (4, 9), [[[(-5, -5), (5, -5), (5, 5), (-5, 5)], [(-1, -1), (1, -1), (1, 1), (-1, 1)]]]); + +SELECT pointInPolygon(pt, poly) FROM polygons ORDER BY id; + +DROP TABLE polygons; + +SELECT 'Non-const point; MultiPolygon with two polygons (each has a hole)'; + +create table polygons +( + id Int32, + pt Tuple(Int32, Int32), + poly Array(Array(Array(Tuple(Int32, Int32)))) +) engine = Log(); + +INSERT INTO polygons VALUES +(1, ( 2, 2), [[[(0,0),(10,0),(10,10),(0,10)],[(4,4),(6,4),(6,6),(4,6)]],[ [(-10,-10),(0,-10),(0,0),(-10,0)],[(-9,-9),(-1,-9),(-1,-1),(-9,-1)] ]]), +(2, ( 0, 0), [[[(-5,-5),(5,-5),(5,5),(-5,5)],[(-1,-1),(1,-1),(1,1),(-1,1)]],[ [(15,15),(25,15),(25,25),(15,25)],[(18,18),(22,18),(22,22),(18,22)] ]]), +(3, ( 20, 20), [[[(0,0),(10,0),(10,10),(0,10)],[(4,4),(6,4),(6,6),(4,6)]],[ [(-10,-10),(0,-10),(0,0),(-10,0)],[(-9,-9),(-1,-9),(-1,-1),(-9,-1)] ]]), +(4, ( 16, 16), [[[(-5,-5),(5,-5),(5,5),(-5,5)],[(-1,-1),(1,-1),(1,1),(-1,1)]],[ [(15,15),(25,15),(25,25),(15,25)],[(18,18),(22,18),(22,22),(18,22)] ]]), +(5, (-5, -5), [[[(0,0),(10,0),(10,10),(0,10)],[(4,4),(6,4),(6,6),(4,6)]],[ [(-10,-10),(0,-10),(0,0),(-10,0)],[(-9,-9),(-1,-9),(-1,-1),(-9,-1)] ]]), +(6, (-10,-10), [[[(-5,-5),(5,-5),(5,5),(-5,5)],[(-1,-1),(1,-1),(1,1),(-1,1)]],[ [(15,15),(25,15),(25,25),(15,25)],[(18,18),(22,18),(22,22),(18,22)] ]]), +(7, ( 5, 9), [[[(0,0),(10,0),(10,10),(0,10)],[(4,4),(6,4),(6,6),(4,6)]],[ [(-10,-10),(0,-10),(0,0),(-10,0)],[(-9,-9),(-1,-9),(-1,-1),(-9,-1)] ]]), +(8, ( 3, 3), [[[(-5,-5),(5,-5),(5,5),(-5,5)],[(-1,-1),(1,-1),(1,1),(-1,1)]],[ [(15,15),(25,15),(25,25),(15,25)],[(18,18),(22,18),(22,22),(18,22)] ]]), +(9, ( 4, 4), [[[(0,0),(10,0),(10,10),(0,10)],[(4,4),(6,4),(6,6),(4,6)]],[ [(-10,-10),(0,-10),(0,0),(-10,0)],[(-9,-9),(-1,-9),(-1,-1),(-9,-1)] ]]), +(10, ( 19, 19), [[[(-5,-5),(5,-5),(5,5),(-5,5)],[(-1,-1),(1,-1),(1,1),(-1,1)]],[ [(15,15),(25,15),(25,25),(15,25)],[(18,18),(22,18),(22,22),(18,22)] ]]); + +SELECT pointInPolygon(pt, poly) FROM polygons ORDER BY id; + +DROP TABLE polygons; diff --git a/parser/testdata/00502_custom_partitioning_local/ast.json b/parser/testdata/00502_custom_partitioning_local/ast.json new file mode 100644 index 000000000..b81b430f4 --- /dev/null +++ b/parser/testdata/00502_custom_partitioning_local/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '*** Not partitioned ***'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001184344, + "rows_read": 5, + "bytes_read": 194 + } +} diff --git a/parser/testdata/00502_custom_partitioning_local/metadata.json b/parser/testdata/00502_custom_partitioning_local/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00502_custom_partitioning_local/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00502_custom_partitioning_local/query.sql b/parser/testdata/00502_custom_partitioning_local/query.sql new file mode 100644 index 000000000..a116f8bca --- /dev/null +++ b/parser/testdata/00502_custom_partitioning_local/query.sql @@ -0,0 +1,109 @@ +SELECT '*** Not partitioned ***'; + +DROP TABLE IF EXISTS not_partitioned; +CREATE TABLE not_partitioned(x UInt8) ENGINE MergeTree ORDER BY x; + +INSERT INTO not_partitioned VALUES (1), (2), (3); +INSERT INTO not_partitioned VALUES (4), (5); + +SELECT 'Parts before OPTIMIZE:'; +SELECT partition, name FROM system.parts WHERE database = currentDatabase() AND table = 'not_partitioned' AND active ORDER BY name; +OPTIMIZE TABLE not_partitioned PARTITION tuple() FINAL; +SELECT 'Parts after OPTIMIZE:'; +SELECT partition, name FROM system.parts WHERE database = currentDatabase() AND table = 'not_partitioned' AND active ORDER BY name; + +SELECT 'Sum before DETACH PARTITION:'; +SELECT sum(x) FROM not_partitioned; +ALTER TABLE not_partitioned DETACH PARTITION ID 'all'; +SELECT 'Sum after DETACH PARTITION:'; +SELECT sum(x) FROM not_partitioned; +SELECT 'system.detached_parts after DETACH PARTITION:'; +SELECT system.detached_parts.* EXCEPT (bytes_on_disk, `path`, disk, modification_time) FROM system.detached_parts WHERE database = currentDatabase() AND table = 'not_partitioned'; + +DROP TABLE not_partitioned; + +SELECT '*** Partitioned by week ***'; + +DROP TABLE IF EXISTS partitioned_by_week; +CREATE TABLE partitioned_by_week(d Date, x UInt8) ENGINE = MergeTree PARTITION BY toMonday(d) ORDER BY x; + +-- 2000-01-03 belongs to a different week than 2000-01-01 and 2000-01-02 +INSERT INTO partitioned_by_week VALUES ('2000-01-01', 1), ('2000-01-02', 2), ('2000-01-03', 3); +INSERT INTO partitioned_by_week VALUES ('2000-01-03', 4), ('2000-01-03', 5); + +SELECT 'Parts before OPTIMIZE:'; +SELECT partition, name FROM system.parts WHERE database = currentDatabase() AND table = 'partitioned_by_week' AND active ORDER BY name; +OPTIMIZE TABLE partitioned_by_week PARTITION '2000-01-03' FINAL; +SELECT 'Parts after OPTIMIZE:'; +SELECT partition, name FROM system.parts WHERE database = currentDatabase() AND table = 'partitioned_by_week' AND active ORDER BY name; + +SELECT 'Sum before DROP PARTITION:'; +SELECT sum(x) FROM partitioned_by_week; +ALTER TABLE partitioned_by_week DROP PARTITION '1999-12-27'; +SELECT 'Sum after DROP PARTITION:'; +SELECT sum(x) FROM partitioned_by_week; + +DROP TABLE partitioned_by_week; + +SELECT '*** Partitioned by a (Date, UInt8) tuple ***'; + +DROP TABLE IF EXISTS partitioned_by_tuple; +CREATE TABLE partitioned_by_tuple(d Date, x UInt8, y UInt8) ENGINE MergeTree ORDER BY x PARTITION BY (d, x); + +INSERT INTO partitioned_by_tuple VALUES ('2000-01-01', 1, 1), ('2000-01-01', 2, 2), ('2000-01-02', 1, 3); +INSERT INTO partitioned_by_tuple VALUES ('2000-01-02', 1, 4), ('2000-01-01', 1, 5); + +SELECT 'Parts before OPTIMIZE:'; +SELECT partition, name FROM system.parts WHERE database = currentDatabase() AND table = 'partitioned_by_tuple' AND active ORDER BY name; +OPTIMIZE TABLE partitioned_by_tuple PARTITION ('2000-01-01', 1) FINAL; +OPTIMIZE TABLE partitioned_by_tuple PARTITION ('2000-01-02', 1) FINAL; +SELECT 'Parts after OPTIMIZE:'; +SELECT partition, name FROM system.parts WHERE database = currentDatabase() AND table = 'partitioned_by_tuple' AND active ORDER BY name; + +SELECT 'Sum before DETACH PARTITION:'; +SELECT sum(y) FROM partitioned_by_tuple; +ALTER TABLE partitioned_by_tuple DETACH PARTITION ID '20000101-1'; +SELECT 'Sum after DETACH PARTITION:'; +SELECT sum(y) FROM partitioned_by_tuple; + +DROP TABLE partitioned_by_tuple; + +SELECT '*** Partitioned by String ***'; + +DROP TABLE IF EXISTS partitioned_by_string; +CREATE TABLE partitioned_by_string(s String, x UInt8) ENGINE = MergeTree PARTITION BY s ORDER BY x; + +INSERT INTO partitioned_by_string VALUES ('aaa', 1), ('aaa', 2), ('bbb', 3); +INSERT INTO partitioned_by_string VALUES ('bbb', 4), ('aaa', 5); + +SELECT 'Parts before OPTIMIZE:'; +SELECT partition, name FROM system.parts WHERE database = currentDatabase() AND table = 'partitioned_by_string' AND active ORDER BY name; +OPTIMIZE TABLE partitioned_by_string PARTITION 'aaa' FINAL; +SELECT 'Parts after OPTIMIZE:'; +SELECT partition, name FROM system.parts WHERE database = currentDatabase() AND table = 'partitioned_by_string' AND active ORDER BY name; + +SELECT 'Sum before DROP PARTITION:'; +SELECT sum(x) FROM partitioned_by_string; +ALTER TABLE partitioned_by_string DROP PARTITION 'bbb'; +SELECT 'Sum after DROP PARTITION:'; +SELECT sum(x) FROM partitioned_by_string; + +DROP TABLE partitioned_by_string; + +SELECT '*** Table without columns with fixed size ***'; + +DROP TABLE IF EXISTS without_fixed_size_columns; +CREATE TABLE without_fixed_size_columns(s String) ENGINE MergeTree PARTITION BY length(s) ORDER BY s; + +INSERT INTO without_fixed_size_columns VALUES ('a'), ('aa'), ('b'), ('cc'); + +SELECT 'Parts:'; +SELECT partition, name, rows FROM system.parts WHERE database = currentDatabase() AND table = 'without_fixed_size_columns' AND active ORDER BY name; + +SELECT 'Before DROP PARTITION:'; +SELECT * FROM without_fixed_size_columns ORDER BY s; +ALTER TABLE without_fixed_size_columns DROP PARTITION 1; +SELECT 'After DROP PARTITION:'; +SELECT * FROM without_fixed_size_columns ORDER BY s; + +DROP TABLE without_fixed_size_columns; diff --git a/parser/testdata/00502_custom_partitioning_replicated_zookeeper_long/ast.json b/parser/testdata/00502_custom_partitioning_replicated_zookeeper_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00502_custom_partitioning_replicated_zookeeper_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00502_custom_partitioning_replicated_zookeeper_long/metadata.json b/parser/testdata/00502_custom_partitioning_replicated_zookeeper_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00502_custom_partitioning_replicated_zookeeper_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00502_custom_partitioning_replicated_zookeeper_long/query.sql b/parser/testdata/00502_custom_partitioning_replicated_zookeeper_long/query.sql new file mode 100644 index 000000000..06484f53d --- /dev/null +++ b/parser/testdata/00502_custom_partitioning_replicated_zookeeper_long/query.sql @@ -0,0 +1,141 @@ +-- Tags: long, replica, no-shared-merge-tree +-- no-shared-merge-tree: different synchronization + +SET replication_alter_partitions_sync = 2; +SET insert_keeper_fault_injection_probability=0; + +SELECT '*** Not partitioned ***'; + +DROP TABLE IF EXISTS not_partitioned_replica1_00502 SYNC; +DROP TABLE IF EXISTS not_partitioned_replica2_00502 SYNC; +CREATE TABLE not_partitioned_replica1_00502(x UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test/not_partitioned_00502', '1') ORDER BY x; +CREATE TABLE not_partitioned_replica2_00502(x UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test/not_partitioned_00502', '2') ORDER BY x; + +INSERT INTO not_partitioned_replica1_00502 VALUES (1), (2), (3); +INSERT INTO not_partitioned_replica1_00502 VALUES (4), (5); + +SELECT 'Parts before OPTIMIZE:'; +SELECT partition, name FROM system.parts WHERE database = currentDatabase() AND table = 'not_partitioned_replica1_00502' AND active ORDER BY name; +SYSTEM SYNC REPLICA not_partitioned_replica1_00502 PULL; +SYSTEM SYNC REPLICA not_partitioned_replica2_00502; +OPTIMIZE TABLE not_partitioned_replica1_00502 PARTITION tuple() FINAL; +SELECT 'Parts after OPTIMIZE:'; +SELECT partition, name FROM system.parts WHERE database = currentDatabase() AND table = 'not_partitioned_replica2_00502' AND active ORDER BY name; + +SELECT 'Sum before DETACH PARTITION:'; +SELECT sum(x) FROM not_partitioned_replica2_00502; +ALTER TABLE not_partitioned_replica1_00502 DETACH PARTITION ID 'all'; +SELECT 'Sum after DETACH PARTITION:'; +SELECT sum(x) FROM not_partitioned_replica2_00502; + +DROP TABLE not_partitioned_replica1_00502 SYNC; +DROP TABLE not_partitioned_replica2_00502 SYNC; + +SELECT '*** Partitioned by week ***'; + +DROP TABLE IF EXISTS partitioned_by_week_replica1 SYNC; +DROP TABLE IF EXISTS partitioned_by_week_replica2 SYNC; +CREATE TABLE partitioned_by_week_replica1(d Date, x UInt8) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test/partitioned_by_week_00502', '1') PARTITION BY toMonday(d) ORDER BY x; +CREATE TABLE partitioned_by_week_replica2(d Date, x UInt8) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test/partitioned_by_week_00502', '2') PARTITION BY toMonday(d) ORDER BY x; + +-- 2000-01-03 belongs to a different week than 2000-01-01 and 2000-01-02 +INSERT INTO partitioned_by_week_replica1 VALUES ('2000-01-01', 1), ('2000-01-02', 2), ('2000-01-03', 3); +INSERT INTO partitioned_by_week_replica1 VALUES ('2000-01-03', 4), ('2000-01-03', 5); + +SELECT 'Parts before OPTIMIZE:'; -- Select parts on the first replica to avoid waiting for replication. +SELECT partition, name FROM system.parts WHERE database = currentDatabase() AND table = 'partitioned_by_week_replica1' AND active ORDER BY name; +SYSTEM SYNC REPLICA partitioned_by_week_replica1 PULL; +SYSTEM SYNC REPLICA partitioned_by_week_replica2; +OPTIMIZE TABLE partitioned_by_week_replica1 PARTITION '2000-01-03' FINAL; +SELECT 'Parts after OPTIMIZE:'; -- After OPTIMIZE with replication_alter_partitions_sync=2 replicas must be in sync. +SELECT partition, name FROM system.parts WHERE database = currentDatabase() AND table = 'partitioned_by_week_replica2' AND active ORDER BY name; + +SELECT 'Sum before DROP PARTITION:'; +SELECT sum(x) FROM partitioned_by_week_replica2; +ALTER TABLE partitioned_by_week_replica1 DROP PARTITION '1999-12-27'; +SELECT 'Sum after DROP PARTITION:'; +SELECT sum(x) FROM partitioned_by_week_replica2; + +DROP TABLE partitioned_by_week_replica1 SYNC; +DROP TABLE partitioned_by_week_replica2 SYNC; + +SELECT '*** Partitioned by a (Date, UInt8) tuple ***'; + +DROP TABLE IF EXISTS partitioned_by_tuple_replica1_00502 SYNC; +DROP TABLE IF EXISTS partitioned_by_tuple_replica2_00502 SYNC; +CREATE TABLE partitioned_by_tuple_replica1_00502(d Date, x UInt8, y UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test/partitioned_by_tuple_00502', '1') ORDER BY x PARTITION BY (d, x); +CREATE TABLE partitioned_by_tuple_replica2_00502(d Date, x UInt8, y UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test/partitioned_by_tuple_00502', '2') ORDER BY x PARTITION BY (d, x); + +INSERT INTO partitioned_by_tuple_replica1_00502 VALUES ('2000-01-01', 1, 1), ('2000-01-01', 2, 2), ('2000-01-02', 1, 3); +INSERT INTO partitioned_by_tuple_replica1_00502 VALUES ('2000-01-02', 1, 4), ('2000-01-01', 1, 5); + +SELECT 'Parts before OPTIMIZE:'; +SELECT partition, name FROM system.parts WHERE database = currentDatabase() AND table = 'partitioned_by_tuple_replica1_00502' AND active ORDER BY name; +SYSTEM SYNC REPLICA partitioned_by_tuple_replica1_00502 PULL; +SYSTEM SYNC REPLICA partitioned_by_tuple_replica2_00502; +OPTIMIZE TABLE partitioned_by_tuple_replica1_00502 PARTITION ('2000-01-01', 1) FINAL; +OPTIMIZE TABLE partitioned_by_tuple_replica1_00502 PARTITION ('2000-01-02', 1) FINAL; +SELECT 'Parts after OPTIMIZE:'; +SELECT partition, name FROM system.parts WHERE database = currentDatabase() AND table = 'partitioned_by_tuple_replica2_00502' AND active ORDER BY name; + +SELECT 'Sum before DETACH PARTITION:'; +SELECT sum(y) FROM partitioned_by_tuple_replica2_00502; +ALTER TABLE partitioned_by_tuple_replica1_00502 DETACH PARTITION ID '20000101-1'; +SELECT 'Sum after DETACH PARTITION:'; +SELECT sum(y) FROM partitioned_by_tuple_replica2_00502; + +DROP TABLE partitioned_by_tuple_replica1_00502 SYNC; +DROP TABLE partitioned_by_tuple_replica2_00502 SYNC; + +SELECT '*** Partitioned by String ***'; + +DROP TABLE IF EXISTS partitioned_by_string_replica1 SYNC; +DROP TABLE IF EXISTS partitioned_by_string_replica2 SYNC; +CREATE TABLE partitioned_by_string_replica1(s String, x UInt8) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test/partitioned_by_string_00502', '1') PARTITION BY s ORDER BY x; +CREATE TABLE partitioned_by_string_replica2(s String, x UInt8) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test/partitioned_by_string_00502', '2') PARTITION BY s ORDER BY x; + +INSERT INTO partitioned_by_string_replica1 VALUES ('aaa', 1), ('aaa', 2), ('bbb', 3); +INSERT INTO partitioned_by_string_replica1 VALUES ('bbb', 4), ('aaa', 5); + +SELECT 'Parts before OPTIMIZE:'; +SELECT partition, name FROM system.parts WHERE database = currentDatabase() AND table = 'partitioned_by_string_replica1' AND active ORDER BY name; +SYSTEM SYNC REPLICA partitioned_by_string_replica1 PULL; +SYSTEM SYNC REPLICA partitioned_by_string_replica2; +OPTIMIZE TABLE partitioned_by_string_replica2 PARTITION 'aaa' FINAL; +SELECT 'Parts after OPTIMIZE:'; +SELECT partition, name FROM system.parts WHERE database = currentDatabase() AND table = 'partitioned_by_string_replica2' AND active ORDER BY name; + +SELECT 'Sum before DROP PARTITION:'; +SELECT sum(x) FROM partitioned_by_string_replica2; +ALTER TABLE partitioned_by_string_replica1 DROP PARTITION 'bbb'; +SELECT 'Sum after DROP PARTITION:'; +SELECT sum(x) FROM partitioned_by_string_replica2; + +DROP TABLE partitioned_by_string_replica1 SYNC; +DROP TABLE partitioned_by_string_replica2 SYNC; + +SELECT '*** Table without columns with fixed size ***'; + +DROP TABLE IF EXISTS without_fixed_size_columns_replica1 SYNC; +DROP TABLE IF EXISTS without_fixed_size_columns_replica2 SYNC; +CREATE TABLE without_fixed_size_columns_replica1(s String) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test/without_fixed_size_columns_00502', '1') PARTITION BY length(s) ORDER BY s; +CREATE TABLE without_fixed_size_columns_replica2(s String) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test/without_fixed_size_columns_00502', '2') PARTITION BY length(s) ORDER BY s; + +INSERT INTO without_fixed_size_columns_replica1 VALUES ('a'), ('aa'), ('b'), ('cc'); + +-- Wait for replication. +SYSTEM SYNC REPLICA without_fixed_size_columns_replica1 PULL; +SYSTEM SYNC REPLICA without_fixed_size_columns_replica2; +OPTIMIZE TABLE without_fixed_size_columns_replica2 PARTITION 1 FINAL; + +SELECT 'Parts:'; +SELECT partition, name, rows FROM system.parts WHERE database = currentDatabase() AND table = 'without_fixed_size_columns_replica2' AND active ORDER BY name; + +SELECT 'Before DROP PARTITION:'; +SELECT * FROM without_fixed_size_columns_replica2 ORDER BY s; +ALTER TABLE without_fixed_size_columns_replica1 DROP PARTITION 1; +SELECT 'After DROP PARTITION:'; +SELECT * FROM without_fixed_size_columns_replica2 ORDER BY s; + +DROP TABLE without_fixed_size_columns_replica1 SYNC; +DROP TABLE without_fixed_size_columns_replica2 SYNC; diff --git a/parser/testdata/00502_string_concat_with_array/ast.json b/parser/testdata/00502_string_concat_with_array/ast.json new file mode 100644 index 000000000..325f4c075 --- /dev/null +++ b/parser/testdata/00502_string_concat_with_array/ast.json @@ -0,0 +1,106 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier a" + }, + { + "explain": " Function concat (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier b" + }, + { + "explain": " Identifier b" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function array (alias a) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function toString (alias b) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_2" + } + ], + + "rows": 28, + + "statistics": + { + "elapsed": 0.00151906, + "rows_read": 28, + "bytes_read": 1178 + } +} diff --git a/parser/testdata/00502_string_concat_with_array/metadata.json b/parser/testdata/00502_string_concat_with_array/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00502_string_concat_with_array/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00502_string_concat_with_array/query.sql b/parser/testdata/00502_string_concat_with_array/query.sql new file mode 100644 index 000000000..8bfcaa8da --- /dev/null +++ b/parser/testdata/00502_string_concat_with_array/query.sql @@ -0,0 +1 @@ +select a, b || b from (select [number] as a, toString(number) as b from system.numbers limit 2); diff --git a/parser/testdata/00502_sum_map/ast.json b/parser/testdata/00502_sum_map/ast.json new file mode 100644 index 000000000..ebafe229e --- /dev/null +++ b/parser/testdata/00502_sum_map/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001093848, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00502_sum_map/metadata.json b/parser/testdata/00502_sum_map/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00502_sum_map/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00502_sum_map/query.sql b/parser/testdata/00502_sum_map/query.sql new file mode 100644 index 000000000..8b88c57a6 --- /dev/null +++ b/parser/testdata/00502_sum_map/query.sql @@ -0,0 +1,62 @@ +SET send_logs_level = 'fatal'; + +-- { echoOn } +DROP TABLE IF EXISTS sum_map; +CREATE TABLE sum_map(date Date, timeslot DateTime, statusMap Nested(status UInt16, requests UInt64)) ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO sum_map VALUES ('2000-01-01', '2000-01-01 00:00:00', [1, 2, 3], [10, 10, 10]), ('2000-01-01', '2000-01-01 00:00:00', [3, 4, 5], [10, 10, 10]), ('2000-01-01', '2000-01-01 00:01:00', [4, 5, 6], [10, 10, 10]), ('2000-01-01', '2000-01-01 00:01:00', [6, 7, 8], [10, 10, 10]); + +SELECT * FROM sum_map ORDER BY timeslot, statusMap.status, statusMap.requests; +SELECT sumMap(statusMap.status, statusMap.requests) FROM sum_map; +SELECT sumMap((statusMap.status, statusMap.requests)) FROM sum_map; +SELECT sumMapMerge(s) FROM (SELECT sumMapState(statusMap.status, statusMap.requests) AS s FROM sum_map); +SELECT timeslot, sumMap(statusMap.status, statusMap.requests) FROM sum_map GROUP BY timeslot ORDER BY timeslot; +SELECT timeslot, sumMap(statusMap.status, statusMap.requests).1, sumMap(statusMap.status, statusMap.requests).2 FROM sum_map GROUP BY timeslot ORDER BY timeslot; + +SELECT sumMapFiltered([1])(statusMap.status, statusMap.requests) FROM sum_map; +SELECT sumMapFiltered([1, 4, 8])(statusMap.status, statusMap.requests) FROM sum_map; + +DROP TABLE sum_map; + +DROP TABLE IF EXISTS sum_map_overflow; +CREATE TABLE sum_map_overflow(events Array(UInt8), counts Array(UInt8)) ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO sum_map_overflow VALUES ([1], [255]), ([1], [2]); + +SELECT sumMap(events, counts) FROM sum_map_overflow; +SELECT sumMapWithOverflow(events, counts) FROM sum_map_overflow; + +DROP TABLE sum_map_overflow; + +select sumMap(val, cnt) from ( SELECT [ CAST(1, 'UInt64') ] as val, [1] as cnt ); +select sumMap(val, cnt) from ( SELECT [ CAST(1, 'Float64') ] as val, [1] as cnt ); +select sumMap(val, cnt) from ( SELECT [ CAST('a', 'Enum16(\'a\'=1)') ] as val, [1] as cnt ); + +select sumMap(val, cnt) from ( SELECT [ CAST(1, 'DateTime(\'Asia/Istanbul\')') ] as val, [1] as cnt ); +select sumMap(val, cnt) from ( SELECT [ CAST(1, 'Date') ] as val, [1] as cnt ); +select sumMap(val, cnt) from ( SELECT [ CAST('01234567-89ab-cdef-0123-456789abcdef', 'UUID') ] as val, [1] as cnt ); +select sumMap(val, cnt) from ( SELECT [ CAST(1.01, 'Decimal(10,2)') ] as val, [1] as cnt ); + +select sumMap(val, cnt) from ( SELECT [ CAST('a', 'FixedString(1)'), CAST('b', 'FixedString(1)' ) ] as val, [1, 2] as cnt ); +select sumMap(val, cnt) from ( SELECT [ CAST('abc', 'String'), CAST('ab', 'String'), CAST('a', 'String') ] as val, [1, 2, 3] as cnt ); + +DROP TABLE IF EXISTS sum_map_decimal; + +CREATE TABLE sum_map_decimal( + statusMap Nested( + goal_id UInt16, + revenue Decimal32(5) + ) +) ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO sum_map_decimal VALUES ([1, 2, 3], [1.0, 2.0, 3.0]), ([3, 4, 5], [3.0, 4.0, 5.0]), ([4, 5, 6], [4.0, 5.0, 6.0]), ([6, 7, 8], [6.0, 7.0, 8.0]); + +SELECT sumMap(statusMap.goal_id, statusMap.revenue) FROM sum_map_decimal; +SELECT sumMapWithOverflow(statusMap.goal_id, statusMap.revenue) FROM sum_map_decimal; + +DROP TABLE sum_map_decimal; + +CREATE TABLE sum_map_decimal_nullable (`statusMap` Nested(goal_id UInt16, revenue Nullable(Decimal(9, 5)))) engine=MergeTree ORDER BY tuple(); +INSERT INTO sum_map_decimal_nullable VALUES ([1, 2, 3], [1.0, 2.0, 3.0]), ([3, 4, 5], [3.0, 4.0, 5.0]), ([4, 5, 6], [4.0, 5.0, 6.0]), ([6, 7, 8], [6.0, 7.0, 8.0]); +SELECT sumMap(statusMap.goal_id, statusMap.revenue) FROM sum_map_decimal_nullable; +DROP TABLE sum_map_decimal_nullable; diff --git a/parser/testdata/00503_cast_const_nullable/ast.json b/parser/testdata/00503_cast_const_nullable/ast.json new file mode 100644 index 000000000..c9c6833f5 --- /dev/null +++ b/parser/testdata/00503_cast_const_nullable/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (alias id) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal 'Nullable(UInt8)'" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier id" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal 'Nullable(UInt8)'" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001245968, + "rows_read": 15, + "bytes_read": 562 + } +} diff --git a/parser/testdata/00503_cast_const_nullable/metadata.json b/parser/testdata/00503_cast_const_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00503_cast_const_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00503_cast_const_nullable/query.sql b/parser/testdata/00503_cast_const_nullable/query.sql new file mode 100644 index 000000000..a1a621e74 --- /dev/null +++ b/parser/testdata/00503_cast_const_nullable/query.sql @@ -0,0 +1,3 @@ +SELECT CAST(1 AS Nullable(UInt8)) AS id WHERE id = CAST(1 AS Nullable(UInt8)); +SELECT CAST(1 AS Nullable(UInt8)) AS id WHERE id = 1; +SELECT NULL == CAST(toUInt8(0) AS Nullable(UInt8)); diff --git a/parser/testdata/00504_mergetree_arrays_rw/ast.json b/parser/testdata/00504_mergetree_arrays_rw/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00504_mergetree_arrays_rw/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00504_mergetree_arrays_rw/metadata.json b/parser/testdata/00504_mergetree_arrays_rw/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00504_mergetree_arrays_rw/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00504_mergetree_arrays_rw/query.sql b/parser/testdata/00504_mergetree_arrays_rw/query.sql new file mode 100644 index 000000000..149290453 --- /dev/null +++ b/parser/testdata/00504_mergetree_arrays_rw/query.sql @@ -0,0 +1,40 @@ + +set allow_deprecated_syntax_for_merge_tree=1; +set max_threads = 1; +set max_insert_threads = 1; + +drop table if exists test_ins_arr; +create table test_ins_arr (date Date, val Array(UInt64)) engine = MergeTree(date, (date), 8192); +insert into test_ins_arr select toDate('2017-10-02'), [number, 42] from system.numbers limit 10000; +select * from test_ins_arr limit 10; +drop table test_ins_arr; + +drop table if exists test_ins_null; +create table test_ins_null (date Date, val Nullable(UInt64)) engine = MergeTree(date, (date), 8192); +insert into test_ins_null select toDate('2017-10-02'), if(number % 2, number, Null) from system.numbers limit 10000; +select * from test_ins_null limit 10; +drop table test_ins_null; + +drop table if exists test_ins_arr_null; +create table test_ins_arr_null (date Date, val Array(Nullable(UInt64))) engine = MergeTree(date, (date), 8192); +insert into test_ins_arr_null select toDate('2017-10-02'), [if(number % 2, number, Null), number, Null] from system.numbers limit 10000; +select * from test_ins_arr_null limit 10; +drop table test_ins_arr_null; + +drop table if exists test_ins_arr_arr; +create table test_ins_arr_arr (date Date, val Array(Array(UInt64))) engine = MergeTree(date, (date), 8192); +insert into test_ins_arr_arr select toDate('2017-10-02'), [[number],[number + 1, number + 2]] from system.numbers limit 10000; +select * from test_ins_arr_arr limit 10; +drop table test_ins_arr_arr; + +drop table if exists test_ins_arr_arr_null; +create table test_ins_arr_arr_null (date Date, val Array(Array(Nullable(UInt64)))) engine = MergeTree(date, (date), 8192); +insert into test_ins_arr_arr_null select toDate('2017-10-02'), [[1, Null, number], [3, Null, number]] from system.numbers limit 10000; +select * from test_ins_arr_arr_null limit 10; +drop table test_ins_arr_arr_null; + +drop table if exists test_ins_arr_arr_arr; +create table test_ins_arr_arr_arr (date Date, val Array(Array(Array(UInt64)))) engine = MergeTree(date, (date), 8192); +insert into test_ins_arr_arr_arr select toDate('2017-10-02'), [[[number]],[[number + 1], [number + 2, number + 3]]] from system.numbers limit 10000; +select * from test_ins_arr_arr_arr limit 10; +drop table test_ins_arr_arr_arr; diff --git a/parser/testdata/00506_shard_global_in_union/ast.json b/parser/testdata/00506_shard_global_in_union/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00506_shard_global_in_union/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00506_shard_global_in_union/metadata.json b/parser/testdata/00506_shard_global_in_union/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00506_shard_global_in_union/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00506_shard_global_in_union/query.sql b/parser/testdata/00506_shard_global_in_union/query.sql new file mode 100644 index 000000000..e51c18c56 --- /dev/null +++ b/parser/testdata/00506_shard_global_in_union/query.sql @@ -0,0 +1,59 @@ +-- Tags: shard + +SELECT X FROM (SELECT * FROM (SELECT 1 AS X, 2 AS Y) UNION ALL SELECT 3, 4) ORDER BY X; + +DROP TABLE IF EXISTS globalin; + +CREATE TABLE globalin (CounterID UInt32, StartDate Date ) ENGINE = Memory; + +INSERT INTO globalin VALUES (34, toDate('2017-10-02')), (42, toDate('2017-10-02')), (55, toDate('2017-10-01')); + +SELECT * FROM ( SELECT CounterID FROM remote('127.0.0.2', currentDatabase(), 'globalin') WHERE (CounterID GLOBAL IN ( SELECT toUInt32(34))) GROUP BY CounterID); +SELECT 'NOW okay =========================:'; +SELECT CounterID FROM remote('127.0.0.2', currentDatabase(), 'globalin') WHERE (CounterID GLOBAL IN ( SELECT toUInt32(34) )) GROUP BY CounterID UNION ALL SELECT CounterID FROM remote('127.0.0.2', currentDatabase(), 'globalin') WHERE (CounterID GLOBAL IN ( SELECT toUInt32(34))) GROUP BY CounterID; +SELECT 'NOW BAD ==========================:'; +SELECT * FROM ( SELECT CounterID FROM remote('127.0.0.2', currentDatabase(), 'globalin') WHERE (CounterID GLOBAL IN ( SELECT toUInt32(34) )) GROUP BY CounterID UNION ALL SELECT CounterID FROM remote('127.0.0.2', currentDatabase(), 'globalin') WHERE (CounterID GLOBAL IN ( SELECT toUInt32(34))) GROUP BY CounterID); +SELECT 'finish ===========================;'; + +DROP TABLE globalin; + + +DROP TABLE IF EXISTS union_bug; + +CREATE TABLE union_bug ( + Event String, + Datetime DateTime('Asia/Istanbul') +) Engine = Memory; + +INSERT INTO union_bug VALUES ('A', 1), ('B', 2); + +SELECT ' * A UNION * B:'; +SELECT * FROM ( + SELECT * FROM union_bug WHERE Event = 'A' + UNION ALL + SELECT * FROM union_bug WHERE Event = 'B' +) ORDER BY Datetime; + +SELECT ' Event, Datetime A UNION * B:'; +SELECT * FROM ( + SELECT Event, Datetime FROM union_bug WHERE Event = 'A' + UNION ALL + SELECT * FROM union_bug WHERE Event = 'B' +) ORDER BY Datetime; + +SELECT ' * A UNION Event, Datetime B:'; +SELECT * FROM ( + SELECT * FROM union_bug WHERE Event = 'A' + UNION ALL + SELECT Event, Datetime FROM union_bug WHERE Event = 'B' +) ORDER BY Datetime; + +SELECT ' Event, Datetime A UNION Event, Datetime B:'; +SELECT * FROM ( + SELECT Event, Datetime FROM union_bug WHERE Event = 'A' + UNION ALL + SELECT Event, Datetime FROM union_bug WHERE Event = 'B' +) ORDER BY Datetime; + + +DROP TABLE union_bug; diff --git a/parser/testdata/00506_union_distributed/ast.json b/parser/testdata/00506_union_distributed/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00506_union_distributed/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00506_union_distributed/metadata.json b/parser/testdata/00506_union_distributed/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00506_union_distributed/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00506_union_distributed/query.sql b/parser/testdata/00506_union_distributed/query.sql new file mode 100644 index 000000000..c7d74a6d7 --- /dev/null +++ b/parser/testdata/00506_union_distributed/query.sql @@ -0,0 +1,28 @@ +-- Tags: distributed + +-- https://github.com/ClickHouse/ClickHouse/issues/1059 + +SET distributed_foreground_insert = 1; + +DROP TABLE IF EXISTS union1; +DROP TABLE IF EXISTS union2; +DROP TABLE IF EXISTS union3; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE union1 ( date Date, a Int32, b Int32, c Int32, d Int32) ENGINE = MergeTree(date, (a, date), 8192); +CREATE TABLE union2 ( date Date, a Int32, b Int32, c Int32, d Int32) ENGINE = Distributed(test_shard_localhost, currentDatabase(), 'union1'); +CREATE TABLE union3 ( date Date, a Int32, b Int32, c Int32, d Int32) ENGINE = Distributed(test_shard_localhost, currentDatabase(), 'union2'); + +INSERT INTO union1 VALUES (1, 2, 3, 4, 5); +INSERT INTO union1 VALUES (11,12,13,14,15); +INSERT INTO union2 VALUES (21,22,23,24,25); +INSERT INTO union3 VALUES (31,32,33,34,35); + +select b, sum(c) from ( select a, b, sum(c) as c from union2 where a>1 group by a,b UNION ALL select a, b, sum(c) as c from union2 where b>1 group by a, b order by a, b) as a group by b order by b; +select b, sum(c) from ( select a, b, sum(c) as c from union1 where a>1 group by a,b UNION ALL select a, b, sum(c) as c from union2 where b>1 group by a, b order by a, b) as a group by b order by b; +select b, sum(c) from ( select a, b, sum(c) as c from union1 where a>1 group by a,b UNION ALL select a, b, sum(c) as c from union1 where b>1 group by a, b order by a, b) as a group by b order by b; +select b, sum(c) from ( select a, b, sum(c) as c from union2 where a>1 group by a,b UNION ALL select a, b, sum(c) as c from union3 where b>1 group by a, b order by a, b) as a group by b order by b; + +DROP TABLE union1; +DROP TABLE union2; +DROP TABLE union3; diff --git a/parser/testdata/00507_sumwithoverflow/ast.json b/parser/testdata/00507_sumwithoverflow/ast.json new file mode 100644 index 000000000..7e68c723c --- /dev/null +++ b/parser/testdata/00507_sumwithoverflow/ast.json @@ -0,0 +1,97 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function sum (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier n" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toUInt16 (alias n) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_100" + } + ], + + "rows": 25, + + "statistics": + { + "elapsed": 0.001124731, + "rows_read": 25, + "bytes_read": 1076 + } +} diff --git a/parser/testdata/00507_sumwithoverflow/metadata.json b/parser/testdata/00507_sumwithoverflow/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00507_sumwithoverflow/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00507_sumwithoverflow/query.sql b/parser/testdata/00507_sumwithoverflow/query.sql new file mode 100644 index 000000000..8371feaf4 --- /dev/null +++ b/parser/testdata/00507_sumwithoverflow/query.sql @@ -0,0 +1,7 @@ +SELECT toTypeName(sum(n)) FROM (SELECT toUInt16(number) AS n FROM system.numbers LIMIT 100); +SELECT toTypeName(sumWithOverflow(n)) FROM (SELECT toUInt16(number) AS n FROM system.numbers LIMIT 100); +SELECT toTypeName(sum(n)) FROM (SELECT toFloat32(number) AS n FROM system.numbers LIMIT 100); +SELECT toTypeName(sumWithOverflow(n)) FROM (SELECT toFloat32(number) AS n FROM system.numbers LIMIT 100); + +SELECT sum(n) FROM (SELECT toUInt16(number) AS n FROM system.numbers LIMIT 100); +SELECT sumWithOverflow(n) FROM (SELECT toUInt16(number) AS n FROM system.numbers LIMIT 100); diff --git a/parser/testdata/00508_materialized_view_to/ast.json b/parser/testdata/00508_materialized_view_to/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00508_materialized_view_to/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00508_materialized_view_to/metadata.json b/parser/testdata/00508_materialized_view_to/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00508_materialized_view_to/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00508_materialized_view_to/query.sql b/parser/testdata/00508_materialized_view_to/query.sql new file mode 100644 index 000000000..0d8fb85ee --- /dev/null +++ b/parser/testdata/00508_materialized_view_to/query.sql @@ -0,0 +1,30 @@ + +CREATE TABLE src (x UInt8) ENGINE = Null; +CREATE TABLE dst (x UInt8) ENGINE = Memory; + +CREATE MATERIALIZED VIEW mv_00508 TO dst AS SELECT * FROM src; + +INSERT INTO src VALUES (1), (2); +SELECT * FROM mv_00508 ORDER BY x; + +-- Detach MV and see if the data is still readable +DETACH TABLE mv_00508; +SELECT * FROM dst ORDER BY x; + +USE default; + +-- Reattach MV (shortcut) +ATTACH TABLE {CLICKHOUSE_DATABASE:Identifier}.mv_00508; + +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.src VALUES (3); + +SELECT * FROM {CLICKHOUSE_DATABASE:Identifier}.mv_00508 ORDER BY x; + +-- Drop the MV and see if the data is still readable +DROP TABLE {CLICKHOUSE_DATABASE:Identifier}.mv_00508; +SELECT * FROM {CLICKHOUSE_DATABASE:Identifier}.dst ORDER BY x; + +DROP TABLE {CLICKHOUSE_DATABASE:Identifier}.src; +DROP TABLE {CLICKHOUSE_DATABASE:Identifier}.dst; + +DROP DATABASE {CLICKHOUSE_DATABASE:Identifier}; diff --git a/parser/testdata/00509_extended_storage_definition_syntax_zookeeper/ast.json b/parser/testdata/00509_extended_storage_definition_syntax_zookeeper/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00509_extended_storage_definition_syntax_zookeeper/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00509_extended_storage_definition_syntax_zookeeper/metadata.json b/parser/testdata/00509_extended_storage_definition_syntax_zookeeper/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00509_extended_storage_definition_syntax_zookeeper/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00509_extended_storage_definition_syntax_zookeeper/query.sql b/parser/testdata/00509_extended_storage_definition_syntax_zookeeper/query.sql new file mode 100644 index 000000000..c0b70f87d --- /dev/null +++ b/parser/testdata/00509_extended_storage_definition_syntax_zookeeper/query.sql @@ -0,0 +1,86 @@ +-- Tags: zookeeper, no-shared-merge-tree +-- no-shared-merge-tree: boring test, nothing new + +SET optimize_on_insert = 0; + +SELECT '*** Replicated with sampling ***'; + +DROP TABLE IF EXISTS replicated_with_sampling; + +CREATE TABLE replicated_with_sampling(x UInt8) + ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00509/replicated_with_sampling', 'r1') + ORDER BY x + SAMPLE BY x; + +INSERT INTO replicated_with_sampling VALUES (1), (128); +SELECT sum(x) FROM replicated_with_sampling SAMPLE 1/2; + +DROP TABLE replicated_with_sampling; + +SELECT '*** Replacing with implicit version ***'; + +DROP TABLE IF EXISTS replacing; + +CREATE TABLE replacing(d Date, x UInt32, s String) ENGINE = ReplacingMergeTree ORDER BY x PARTITION BY d; + +INSERT INTO replacing VALUES ('2017-10-23', 1, 'a'); +INSERT INTO replacing VALUES ('2017-10-23', 1, 'b'); +INSERT INTO replacing VALUES ('2017-10-23', 1, 'c'); + +OPTIMIZE TABLE replacing PARTITION '2017-10-23' FINAL; + +SELECT * FROM replacing; + +DROP TABLE replacing; + +SELECT '*** Replicated Collapsing ***'; + +DROP TABLE IF EXISTS replicated_collapsing; + +CREATE TABLE replicated_collapsing(d Date, x UInt32, sign Int8) + ENGINE = ReplicatedCollapsingMergeTree('/clickhouse/tables/{database}/test_00509/replicated_collapsing', 'r1', sign) + PARTITION BY toYYYYMM(d) ORDER BY d; + +INSERT INTO replicated_collapsing VALUES ('2017-10-23', 1, 1); +INSERT INTO replicated_collapsing VALUES ('2017-10-23', 1, -1), ('2017-10-23', 2, 1); + +SYSTEM SYNC REPLICA replicated_collapsing PULL; +OPTIMIZE TABLE replicated_collapsing PARTITION 201710 FINAL; + +SELECT * FROM replicated_collapsing; + +DROP TABLE replicated_collapsing; + +SELECT '*** Replicated VersionedCollapsing ***'; + +DROP TABLE IF EXISTS replicated_versioned_collapsing; + +CREATE TABLE replicated_versioned_collapsing(d Date, x UInt32, sign Int8, version UInt8) + ENGINE = ReplicatedVersionedCollapsingMergeTree('/clickhouse/tables/{database}/test_00509/replicated_versioned_collapsing', 'r1', sign, version) + PARTITION BY toYYYYMM(d) ORDER BY (d, version); + +INSERT INTO replicated_versioned_collapsing VALUES ('2017-10-23', 1, 1, 0); +INSERT INTO replicated_versioned_collapsing VALUES ('2017-10-23', 1, -1, 0), ('2017-10-23', 2, 1, 0); +INSERT INTO replicated_versioned_collapsing VALUES ('2017-10-23', 1, -1, 1), ('2017-10-23', 2, 1, 2); + +SYSTEM SYNC REPLICA replicated_versioned_collapsing; +OPTIMIZE TABLE replicated_versioned_collapsing PARTITION 201710 FINAL; + +SELECT * FROM replicated_versioned_collapsing; + +DROP TABLE replicated_versioned_collapsing; + +SELECT '*** Table definition with SETTINGS ***'; + +DROP TABLE IF EXISTS with_settings; + +CREATE TABLE with_settings(x UInt32) + ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00509/with_settings', 'r1') + ORDER BY x + SETTINGS replicated_can_become_leader = 0; + +SELECT sleep(1); -- If replicated_can_become_leader were true, this replica would become the leader after 1 second. + +SELECT is_leader FROM system.replicas WHERE database = currentDatabase() AND table = 'with_settings'; + +DROP TABLE with_settings; diff --git a/parser/testdata/00510_materizlized_view_and_deduplication_zookeeper/ast.json b/parser/testdata/00510_materizlized_view_and_deduplication_zookeeper/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00510_materizlized_view_and_deduplication_zookeeper/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00510_materizlized_view_and_deduplication_zookeeper/metadata.json b/parser/testdata/00510_materizlized_view_and_deduplication_zookeeper/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00510_materizlized_view_and_deduplication_zookeeper/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00510_materizlized_view_and_deduplication_zookeeper/query.sql b/parser/testdata/00510_materizlized_view_and_deduplication_zookeeper/query.sql new file mode 100644 index 000000000..329f6ad22 --- /dev/null +++ b/parser/testdata/00510_materizlized_view_and_deduplication_zookeeper/query.sql @@ -0,0 +1,54 @@ +-- Tags: zookeeper, no-ordinary-database, no-parallel +-- Tag no-parallel: static UUID + +DROP TABLE IF EXISTS with_deduplication; +DROP TABLE IF EXISTS without_deduplication; +DROP TABLE IF EXISTS with_deduplication_mv; +DROP TABLE IF EXISTS without_deduplication_mv; + +SET database_replicated_allow_explicit_uuid=3; +SET database_replicated_allow_replicated_engine_arguments=3; +CREATE TABLE with_deduplication(x UInt32) + ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00510/with_deduplication', 'r1') ORDER BY x; +CREATE TABLE without_deduplication(x UInt32) + ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00510/without_deduplication', 'r1') ORDER BY x SETTINGS replicated_deduplication_window = 0; + +CREATE MATERIALIZED VIEW with_deduplication_mv UUID '00000510-1000-4000-8000-000000000001' + ENGINE = ReplicatedAggregatingMergeTree('/clickhouse/tables/{database}/test_00510/with_deduplication_mv', 'r1') ORDER BY dummy + AS SELECT 0 AS dummy, countState(x) AS cnt FROM with_deduplication; +CREATE MATERIALIZED VIEW without_deduplication_mv UUID '00000510-1000-4000-8000-000000000002' + ENGINE = ReplicatedAggregatingMergeTree('/clickhouse/tables/{database}/test_00510/without_deduplication_mv', 'r1') ORDER BY dummy + AS SELECT 0 AS dummy, countState(x) AS cnt FROM without_deduplication; + +INSERT INTO with_deduplication VALUES (42); +INSERT INTO with_deduplication VALUES (42); +INSERT INTO with_deduplication VALUES (43); + +INSERT INTO without_deduplication VALUES (42); +INSERT INTO without_deduplication VALUES (42); +INSERT INTO without_deduplication VALUES (43); + +SELECT count() FROM with_deduplication; +SELECT count() FROM without_deduplication; + +-- Implicit insert isn't deduplicated, because deduplicate_blocks_in_dependent_materialized_views = 0 by default +SELECT ''; +SELECT countMerge(cnt) FROM with_deduplication_mv; +SELECT countMerge(cnt) FROM without_deduplication_mv; + +-- Explicit insert is deduplicated +ALTER TABLE `.inner_id.00000510-1000-4000-8000-000000000001` DROP PARTITION ID 'all'; +ALTER TABLE `.inner_id.00000510-1000-4000-8000-000000000002` DROP PARTITION ID 'all'; +INSERT INTO `.inner_id.00000510-1000-4000-8000-000000000001` SELECT 0 AS dummy, arrayReduce('countState', [toUInt32(42)]) AS cnt; +INSERT INTO `.inner_id.00000510-1000-4000-8000-000000000001` SELECT 0 AS dummy, arrayReduce('countState', [toUInt32(42)]) AS cnt; +INSERT INTO `.inner_id.00000510-1000-4000-8000-000000000002` SELECT 0 AS dummy, arrayReduce('countState', [toUInt32(42)]) AS cnt; +INSERT INTO `.inner_id.00000510-1000-4000-8000-000000000002` SELECT 0 AS dummy, arrayReduce('countState', [toUInt32(42)]) AS cnt; + +SELECT ''; +SELECT countMerge(cnt) FROM with_deduplication_mv; +SELECT countMerge(cnt) FROM without_deduplication_mv; + +DROP TABLE IF EXISTS with_deduplication; +DROP TABLE IF EXISTS without_deduplication; +DROP TABLE IF EXISTS with_deduplication_mv; +DROP TABLE IF EXISTS without_deduplication_mv; diff --git a/parser/testdata/00511_get_size_of_enum/ast.json b/parser/testdata/00511_get_size_of_enum/ast.json new file mode 100644 index 000000000..357f51364 --- /dev/null +++ b/parser/testdata/00511_get_size_of_enum/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function getSizeOfEnumType (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal 'Enum8(\\'a\\' = 1, \\'b\\' = 2)'" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001474037, + "rows_read": 10, + "bytes_read": 407 + } +} diff --git a/parser/testdata/00511_get_size_of_enum/metadata.json b/parser/testdata/00511_get_size_of_enum/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00511_get_size_of_enum/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00511_get_size_of_enum/query.sql b/parser/testdata/00511_get_size_of_enum/query.sql new file mode 100644 index 000000000..ebdf18467 --- /dev/null +++ b/parser/testdata/00511_get_size_of_enum/query.sql @@ -0,0 +1,2 @@ +SELECT getSizeOfEnumType(CAST(1 AS Enum8('a' = 1, 'b' = 2))); +SELECT getSizeOfEnumType(CAST('b' AS Enum16('a' = 1, 'b' = 2, 'x' = 10))); diff --git a/parser/testdata/00513_fractional_time_zones/ast.json b/parser/testdata/00513_fractional_time_zones/ast.json new file mode 100644 index 000000000..e5ce7a715 --- /dev/null +++ b/parser/testdata/00513_fractional_time_zones/ast.json @@ -0,0 +1,112 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function plus (alias t) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDateTime (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1509138000" + }, + { + "explain": " Function multiply (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_300" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toHour (alias h) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier t" + }, + { + "explain": " Literal 'Asia\/Kolkata'" + }, + { + "explain": " Function toString (alias h_start) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toStartOfHour (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier t" + }, + { + "explain": " Literal 'Asia\/Kolkata'" + }, + { + "explain": " Literal 'Asia\/Kolkata'" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_12" + } + ], + + "rows": 30, + + "statistics": + { + "elapsed": 0.001428555, + "rows_read": 30, + "bytes_read": 1207 + } +} diff --git a/parser/testdata/00513_fractional_time_zones/metadata.json b/parser/testdata/00513_fractional_time_zones/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00513_fractional_time_zones/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00513_fractional_time_zones/query.sql b/parser/testdata/00513_fractional_time_zones/query.sql new file mode 100644 index 000000000..807fb11b2 --- /dev/null +++ b/parser/testdata/00513_fractional_time_zones/query.sql @@ -0,0 +1 @@ +WITH toDateTime(1509138000) + number * 300 AS t SELECT toHour(t, 'Asia/Kolkata') AS h, toString(toStartOfHour(t, 'Asia/Kolkata'), 'Asia/Kolkata') AS h_start FROM system.numbers LIMIT 12; diff --git a/parser/testdata/00514_interval_operators/ast.json b/parser/testdata/00514_interval_operators/ast.json new file mode 100644 index 000000000..d7d8a8cd4 --- /dev/null +++ b/parser/testdata/00514_interval_operators/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001096457, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00514_interval_operators/metadata.json b/parser/testdata/00514_interval_operators/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00514_interval_operators/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00514_interval_operators/query.sql b/parser/testdata/00514_interval_operators/query.sql new file mode 100644 index 000000000..e8f03cb4f --- /dev/null +++ b/parser/testdata/00514_interval_operators/query.sql @@ -0,0 +1,27 @@ +SET session_timezone = 'Etc/UTC'; + +SELECT toDateTime('2017-10-30 08:18:19') + INTERVAL 1 DAY + INTERVAL 1 MONTH - INTERVAL 1 YEAR; +SELECT toDateTime('2017-10-30 08:18:19') + INTERVAL 1 HOUR + INTERVAL 1000 MINUTE + INTERVAL 10 SECOND; +SELECT toDateTime('2017-10-30 08:18:19') + INTERVAL 1 DAY + INTERVAL number MONTH FROM system.numbers LIMIT 20; +SELECT toDateTime('2016-02-29 01:02:03') + INTERVAL number YEAR, toDateTime('2016-02-29 01:02:03') + INTERVAL number MONTH FROM system.numbers LIMIT 16; +SELECT toDateTime('2016-02-29 01:02:03') - INTERVAL 1 QUARTER; + +SELECT (toDateTime('2000-01-01 12:00:00') + INTERVAL 1234567 SECOND) x, toTypeName(x); +SELECT (toDateTime('2000-01-01 12:00:00') + INTERVAL 1234567 MILLISECOND) x, toTypeName(x); +SELECT (toDateTime('2000-01-01 12:00:00') + INTERVAL 1234567 MICROSECOND) x, toTypeName(x); +SELECT (toDateTime('2000-01-01 12:00:00') + INTERVAL 1234567 NANOSECOND) x, toTypeName(x); + +SELECT (toDateTime('2000-01-01 12:00:00') - INTERVAL 1234567 SECOND) x, toTypeName(x); +SELECT (toDateTime('2000-01-01 12:00:00') - INTERVAL 1234567 MILLISECOND) x, toTypeName(x); +SELECT (toDateTime('2000-01-01 12:00:00') - INTERVAL 1234567 MICROSECOND) x, toTypeName(x); +SELECT (toDateTime('2000-01-01 12:00:00') - INTERVAL 1234567 NANOSECOND) x, toTypeName(x); + +SELECT (toDateTime64('2000-01-01 12:00:00.678', 3) - INTERVAL 12345 MILLISECOND) x, toTypeName(x); +SELECT (toDateTime64('2000-01-01 12:00:00.67898', 5) - INTERVAL 12345 MILLISECOND) x, toTypeName(x); +SELECT (toDateTime64('2000-01-01 12:00:00.67', 2) - INTERVAL 12345 MILLISECOND) x, toTypeName(x); + +select toDateTime64('3000-01-01 12:00:00.12345', 0) + interval 0 nanosecond; -- { serverError DECIMAL_OVERFLOW } +select toDateTime64('3000-01-01 12:00:00.12345', 0) + interval 0 microsecond; + +-- Check that the error is thrown during typechecking, not execution. +select materialize(toDate('2000-01-01')) + interval 1 nanosecond from numbers(0); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } diff --git a/parser/testdata/00515_enhanced_time_zones/ast.json b/parser/testdata/00515_enhanced_time_zones/ast.json new file mode 100644 index 000000000..7e4158a2e --- /dev/null +++ b/parser/testdata/00515_enhanced_time_zones/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00127451, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00515_enhanced_time_zones/metadata.json b/parser/testdata/00515_enhanced_time_zones/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00515_enhanced_time_zones/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00515_enhanced_time_zones/query.sql b/parser/testdata/00515_enhanced_time_zones/query.sql new file mode 100644 index 000000000..c5cafdbb5 --- /dev/null +++ b/parser/testdata/00515_enhanced_time_zones/query.sql @@ -0,0 +1,76 @@ +SET allow_deprecated_snowflake_conversion_functions = 1; + +SELECT addMonths(toDateTime('2017-11-05 08:07:47', 'Asia/Istanbul'), 1, 'Asia/Kolkata'); +SELECT addMonths(toDateTime('2017-11-05 10:37:47', 'Asia/Kolkata'), 1); +SELECT addMonths(toTimeZone(toDateTime('2017-11-05 08:07:47', 'Asia/Istanbul'), 'Asia/Kolkata'), 1); + +SELECT addMonths(toDateTime('2017-11-05 08:07:47'), 1); +SELECT addMonths(materialize(toDateTime('2017-11-05 08:07:47')), 1); +SELECT addMonths(toDateTime('2017-11-05 08:07:47'), materialize(1)); +SELECT addMonths(materialize(toDateTime('2017-11-05 08:07:47')), materialize(1)); + +SELECT addMonths(toDateTime('2017-11-05 08:07:47'), -1); +SELECT addMonths(materialize(toDateTime('2017-11-05 08:07:47')), -1); +SELECT addMonths(toDateTime('2017-11-05 08:07:47'), materialize(-1)); +SELECT addMonths(materialize(toDateTime('2017-11-05 08:07:47')), materialize(-1)); + +SELECT toUnixTimestamp('2017-11-05 08:07:47', 'Asia/Istanbul'); +SELECT toUnixTimestamp(toDateTime('2017-11-05 08:07:47', 'Asia/Istanbul'), 'Asia/Istanbul'); + +SELECT toDateTime('2017-11-05 08:07:47', 'Asia/Istanbul'); +SELECT toTimeZone(toDateTime('2017-11-05 08:07:47', 'Asia/Istanbul'), 'Asia/Kolkata'); +SELECT toString(toDateTime('2017-11-05 08:07:47', 'Asia/Istanbul')); +SELECT toString(toTimeZone(toDateTime('2017-11-05 08:07:47', 'Asia/Istanbul'), 'Asia/Kolkata')); +SELECT toString(toDateTime('2017-11-05 08:07:47', 'Asia/Istanbul'), 'Asia/Kolkata'); + +SELECT '-- Test const timezone arguments --'; + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab (val Int64, tz String) engine=Log; +INSERT INTO tab VALUES (42, 'Asia/Singapore') (43, 'Asia/Tokyo'); + +SELECT val FROM tab WHERE now(tz) != toDateTime('2000-01-01 00:00:00') ORDER BY val SETTINGS allow_nonconst_timezone_arguments = 0; -- { serverError ILLEGAL_COLUMN } +SELECT val FROM tab WHERE now(tz) != toDateTime('2000-01-01 00:00:00') ORDER BY val SETTINGS allow_nonconst_timezone_arguments = 1; + +SELECT val FROM tab WHERE now64(9, tz) != toDateTime64('2000-01-01 00:00:00', 6) ORDER BY val SETTINGS allow_nonconst_timezone_arguments = 0; -- { serverError ILLEGAL_COLUMN } +SELECT val FROM tab WHERE now64(9, tz) != toDateTime64('2000-01-01 00:00:00', 6) ORDER BY val SETTINGS allow_nonconst_timezone_arguments = 1; + +SELECT val FROM tab WHERE nowInBlock(tz) != toDateTime('2000-01-01 00:00:00') ORDER BY val SETTINGS allow_nonconst_timezone_arguments = 0; -- { serverError ILLEGAL_COLUMN } +SELECT val FROM tab WHERE nowInBlock(tz) != toDateTime('2000-01-01 00:00:00') ORDER BY val SETTINGS allow_nonconst_timezone_arguments = 1; + +SELECT val FROM tab WHERE toTimeZone(toDateTime(val), tz) != toDateTime('2023-06-11 14:14:14') ORDER BY val SETTINGS allow_nonconst_timezone_arguments = 0; -- { serverError ILLEGAL_COLUMN } +SELECT val FROM tab WHERE toTimeZone(toDateTime(val), tz) != toDateTime('2023-06-11 14:14:14') ORDER BY val SETTINGS allow_nonconst_timezone_arguments = 1; + +SELECT val FROM tab WHERE fromUnixTimestamp64Milli(val, tz) != toDateTime64('2023-06-11 14:14:14', 6) ORDER BY val SETTINGS allow_nonconst_timezone_arguments = 0; -- { serverError ILLEGAL_COLUMN } +SELECT val FROM tab WHERE fromUnixTimestamp64Milli(val, tz) != toDateTime64('2023-06-11 14:14:14', 6) ORDER BY val SETTINGS allow_nonconst_timezone_arguments = 1; + +SELECT val FROM tab WHERE fromUnixTimestamp64Micro(val, tz) != toDateTime64('2023-06-11 14:14:14', 6) ORDER BY val SETTINGS allow_nonconst_timezone_arguments = 0; -- { serverError ILLEGAL_COLUMN } +SELECT val FROM tab WHERE fromUnixTimestamp64Micro(val, tz) != toDateTime64('2023-06-11 14:14:14', 6) ORDER BY val SETTINGS allow_nonconst_timezone_arguments = 1; + +SELECT val FROM tab WHERE fromUnixTimestamp64Nano(val, tz) != toDateTime64('2023-06-11 14:14:14', 6) ORDER BY val SETTINGS allow_nonconst_timezone_arguments = 0; -- { serverError ILLEGAL_COLUMN } +SELECT val FROM tab WHERE fromUnixTimestamp64Nano(val, tz) != toDateTime64('2023-06-11 14:14:14', 6) ORDER BY val SETTINGS allow_nonconst_timezone_arguments = 1; + +SELECT val FROM tab WHERE snowflakeToDateTime(val, tz) != toDateTime('2023-06-11 14:14:14') ORDER BY val SETTINGS allow_nonconst_timezone_arguments = 0; -- { serverError ILLEGAL_COLUMN } +SELECT val FROM tab WHERE snowflakeToDateTime(val, tz) != toDateTime('2023-06-11 14:14:14') ORDER BY val SETTINGS allow_nonconst_timezone_arguments = 1; + +SELECT val FROM tab WHERE snowflakeToDateTime64(val, tz) != toDateTime64('2023-06-11 14:14:14', 6) ORDER BY val SETTINGS allow_nonconst_timezone_arguments = 0; -- { serverError ILLEGAL_COLUMN } +SELECT val FROM tab WHERE snowflakeToDateTime64(val, tz) != toDateTime64('2023-06-11 14:14:14', 6) ORDER BY val SETTINGS allow_nonconst_timezone_arguments = 1; + +-- test for a related bug: + +DROP TABLE tab; + +SET allow_nonconst_timezone_arguments = 1; + +CREATE TABLE tab (`country` LowCardinality(FixedString(7)) DEFAULT 'unknown', `city` LowCardinality(String) DEFAULT 'unknown', `region` LowCardinality(String) DEFAULT 'unknown', `continent` LowCardinality(FixedString(7)) DEFAULT 'unknown', `is_eu_country` Bool, `date` DateTime CODEC(DoubleDelta, LZ4), `viewer_date` DateTime ALIAS toTimezone(date, timezone), `device_browser` LowCardinality(String) DEFAULT 'unknown', `metro_code` LowCardinality(String) DEFAULT 'unknown', `domain` String DEFAULT 'unknown', `device_platform` LowCardinality(String) DEFAULT 'unknown', `device_type` LowCardinality(String) DEFAULT 'unknown', `device_vendor` LowCardinality(String) DEFAULT 'unknown', `ip` FixedString(39) DEFAULT 'unknown', `lat` Decimal(8, 6) CODEC(T64), `lng` Decimal(9, 6) CODEC(T64), `asset_id` String DEFAULT 'unknown', `is_personalized` Bool, `metric` String, `origin` String DEFAULT 'unknown', `product_id` UInt64 CODEC(T64), `referer` String DEFAULT 'unknown', `server_side` Int8 CODEC(T64), `third_party_id` String DEFAULT 'unknown', `partner_slug` LowCardinality(FixedString(10)) DEFAULT 'unknown', `user_agent` String DEFAULT 'unknown', `user_id` UUID, `zip` FixedString(10) DEFAULT 'unknown', `timezone` LowCardinality(String), `as_organization` LowCardinality(String) DEFAULT 'unknown', `content_cat` Array(String), `playback_method` LowCardinality(String) DEFAULT 'unknown', `store_id` LowCardinality(String) DEFAULT 'unknown', `store_url` String DEFAULT 'unknown', `timestamp` Nullable(DateTime), `ad_count` Int8 CODEC(T64), `ad_type` LowCardinality(FixedString(10)) DEFAULT 'unknown', `ad_categories` Array(FixedString(8)), `blocked_ad_categories` Array(FixedString(8)), `break_max_ad_length` Int8 CODEC(T64), `break_max_ads` Int8 CODEC(T64), `break_max_duration` Int8 CODEC(T64), `break_min_ad_length` Int8 CODEC(T64), `break_position` LowCardinality(FixedString(18)) DEFAULT 'unknown', `media_playhead` String DEFAULT 'unknown', `placement_type` Int8 CODEC(T64), `transaction_id` String, `universal_ad_id` Array(String), `client_ua` LowCardinality(String) DEFAULT 'unknown', `device_ip` FixedString(39) DEFAULT 'unknown', `device_ua` LowCardinality(String) DEFAULT 'unknown', `ifa` String, `ifa_type` LowCardinality(String) DEFAULT 'unknown', `vast_lat` Decimal(8, 6) CODEC(T64), `vast_long` Decimal(9, 6) CODEC(T64), `server_ua` String DEFAULT 'unknown', `app_bundle` String DEFAULT 'unknown', `page_url` String DEFAULT 'unknown', `api_framework` Array(UInt8), `click_type` LowCardinality(String), `extensions` Array(String), `media_mime` Array(String), `om_id_partner` LowCardinality(String) DEFAULT 'unknown', `player_capabilities` Array(FixedString(12)), `vast_versions` Array(UInt8), `verification_vendors` Array(String), `ad_play_head` String DEFAULT 'unknown', `ad_serving_id` String DEFAULT 'unknown', `asset_uri` String DEFAULT 'unknown', `content_id` String DEFAULT 'unknown', `content_uri` String DEFAULT 'unknown', `inventory_state` Array(FixedString(14)), `player_size` Array(UInt8), `player_state` Array(FixedString(12)), `pod_sequence` Int8 CODEC(T64), `click_position` Array(UInt32), `error_code` Int16 CODEC(T64), `error_reason` Int8 CODEC(T64), `gdpr_consent` String DEFAULT 'unknown', `limited_tracking` Bool, `regulations` String DEFAULT 'unknown', `content_category` Array(String), PROJECTION projection_TPAG_VAST_date (SELECT * ORDER BY toYYYYMMDD(date), metric, product_id, asset_id)) ENGINE = MergeTree ORDER BY (product_id, metric, asset_id, toYYYYMMDD(date)); +DETACH TABLE tab; + +SET allow_nonconst_timezone_arguments = 0; + +-- ATTACH TABLE doesn't check the default expressions +ATTACH TABLE tab; +DROP TABLE tab; + +-- CREATE TABLE does check the default expressions, so the following is expected to fail: +CREATE TABLE tab (`country` LowCardinality(FixedString(7)) DEFAULT 'unknown', `city` LowCardinality(String) DEFAULT 'unknown', `region` LowCardinality(String) DEFAULT 'unknown', `continent` LowCardinality(FixedString(7)) DEFAULT 'unknown', `is_eu_country` Bool, `date` DateTime CODEC(DoubleDelta, LZ4), `viewer_date` DateTime ALIAS toTimezone(date, timezone), `device_browser` LowCardinality(String) DEFAULT 'unknown', `metro_code` LowCardinality(String) DEFAULT 'unknown', `domain` String DEFAULT 'unknown', `device_platform` LowCardinality(String) DEFAULT 'unknown', `device_type` LowCardinality(String) DEFAULT 'unknown', `device_vendor` LowCardinality(String) DEFAULT 'unknown', `ip` FixedString(39) DEFAULT 'unknown', `lat` Decimal(8, 6) CODEC(T64), `lng` Decimal(9, 6) CODEC(T64), `asset_id` String DEFAULT 'unknown', `is_personalized` Bool, `metric` String, `origin` String DEFAULT 'unknown', `product_id` UInt64 CODEC(T64), `referer` String DEFAULT 'unknown', `server_side` Int8 CODEC(T64), `third_party_id` String DEFAULT 'unknown', `partner_slug` LowCardinality(FixedString(10)) DEFAULT 'unknown', `user_agent` String DEFAULT 'unknown', `user_id` UUID, `zip` FixedString(10) DEFAULT 'unknown', `timezone` LowCardinality(String), `as_organization` LowCardinality(String) DEFAULT 'unknown', `content_cat` Array(String), `playback_method` LowCardinality(String) DEFAULT 'unknown', `store_id` LowCardinality(String) DEFAULT 'unknown', `store_url` String DEFAULT 'unknown', `timestamp` Nullable(DateTime), `ad_count` Int8 CODEC(T64), `ad_type` LowCardinality(FixedString(10)) DEFAULT 'unknown', `ad_categories` Array(FixedString(8)), `blocked_ad_categories` Array(FixedString(8)), `break_max_ad_length` Int8 CODEC(T64), `break_max_ads` Int8 CODEC(T64), `break_max_duration` Int8 CODEC(T64), `break_min_ad_length` Int8 CODEC(T64), `break_position` LowCardinality(FixedString(18)) DEFAULT 'unknown', `media_playhead` String DEFAULT 'unknown', `placement_type` Int8 CODEC(T64), `transaction_id` String, `universal_ad_id` Array(String), `client_ua` LowCardinality(String) DEFAULT 'unknown', `device_ip` FixedString(39) DEFAULT 'unknown', `device_ua` LowCardinality(String) DEFAULT 'unknown', `ifa` String, `ifa_type` LowCardinality(String) DEFAULT 'unknown', `vast_lat` Decimal(8, 6) CODEC(T64), `vast_long` Decimal(9, 6) CODEC(T64), `server_ua` String DEFAULT 'unknown', `app_bundle` String DEFAULT 'unknown', `page_url` String DEFAULT 'unknown', `api_framework` Array(UInt8), `click_type` LowCardinality(String), `extensions` Array(String), `media_mime` Array(String), `om_id_partner` LowCardinality(String) DEFAULT 'unknown', `player_capabilities` Array(FixedString(12)), `vast_versions` Array(UInt8), `verification_vendors` Array(String), `ad_play_head` String DEFAULT 'unknown', `ad_serving_id` String DEFAULT 'unknown', `asset_uri` String DEFAULT 'unknown', `content_id` String DEFAULT 'unknown', `content_uri` String DEFAULT 'unknown', `inventory_state` Array(FixedString(14)), `player_size` Array(UInt8), `player_state` Array(FixedString(12)), `pod_sequence` Int8 CODEC(T64), `click_position` Array(UInt32), `error_code` Int16 CODEC(T64), `error_reason` Int8 CODEC(T64), `gdpr_consent` String DEFAULT 'unknown', `limited_tracking` Bool, `regulations` String DEFAULT 'unknown', `content_category` Array(String), PROJECTION projection_TPAG_VAST_date (SELECT * ORDER BY toYYYYMMDD(date), metric, product_id, asset_id)) ENGINE = MergeTree ORDER BY (product_id, metric, asset_id, toYYYYMMDD(date)); -- { serverError ILLEGAL_COLUMN } diff --git a/parser/testdata/00515_gcd_lcm/ast.json b/parser/testdata/00515_gcd_lcm/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00515_gcd_lcm/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00515_gcd_lcm/metadata.json b/parser/testdata/00515_gcd_lcm/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00515_gcd_lcm/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00515_gcd_lcm/query.sql b/parser/testdata/00515_gcd_lcm/query.sql new file mode 100644 index 000000000..829a86536 --- /dev/null +++ b/parser/testdata/00515_gcd_lcm/query.sql @@ -0,0 +1,41 @@ +-- test gcd +select gcd(1280, 1024); +select gcd(11, 121); +select gcd(-256, 64); +select gcd(1, 1); +select gcd(4, 2); +select gcd(15, 49); +select gcd(255, 254); +select gcd(2147483647, 2147483646); +select gcd(4611686011984936962, 2147483647); +select gcd(-2147483648, 1); +select gcd(255, 515); +select gcd(255, 510); +select gcd(255, 512); +-- test lcm +select lcm(1280, 1024); +select lcm(11, 121); +select lcm(-256, 64); +select lcm(1, 1); +select lcm(4, 2); +select lcm(15, 49); +select lcm(255, 254); +select lcm(2147483647, 2147483646); +select lcm(4611686011984936962, 2147483647); +select lcm(-2147483648, 1); +-- test gcd float +select gcd(1280.1, 1024.1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select gcd(11.1, 121.1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select gcd(-256.1, 64.1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select gcd(1.1, 1.1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select gcd(4.1, 2.1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select gcd(15.1, 49.1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select gcd(255.1, 254.1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +-- test lcm float +select lcm(1280.1, 1024.1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select lcm(11.1, 121.1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select lcm(-256.1, 64.1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select lcm(1.1, 1.1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select lcm(4.1, 2.1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select lcm(15.1, 49.1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select lcm(255.1, 254.1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } diff --git a/parser/testdata/00515_shard_desc_table_functions_and_subqueries/ast.json b/parser/testdata/00515_shard_desc_table_functions_and_subqueries/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00515_shard_desc_table_functions_and_subqueries/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00515_shard_desc_table_functions_and_subqueries/metadata.json b/parser/testdata/00515_shard_desc_table_functions_and_subqueries/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00515_shard_desc_table_functions_and_subqueries/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00515_shard_desc_table_functions_and_subqueries/query.sql b/parser/testdata/00515_shard_desc_table_functions_and_subqueries/query.sql new file mode 100644 index 000000000..e35807224 --- /dev/null +++ b/parser/testdata/00515_shard_desc_table_functions_and_subqueries/query.sql @@ -0,0 +1,20 @@ +-- Tags: shard + +drop table if exists tab; +set allow_deprecated_syntax_for_merge_tree=1; +create table tab (date Date, val UInt64, val2 UInt8 default 42, val3 UInt8 default val2 + 1, val4 UInt64 alias val) engine = MergeTree(date, (date, val), 8192); +desc tab; +select '-'; +desc table tab; +select '-'; +desc remote('127.0.0.2', currentDatabase(), tab); +select '-'; +desc table remote('127.0.0.2', currentDatabase(), tab); +select '-'; +desc (select 1); +select '-'; +desc table (select 1); +select '-'; +desc (select * from system.numbers); +select '-'; +drop table if exists tab; diff --git a/parser/testdata/00516_deduplication_after_drop_partition_zookeeper/ast.json b/parser/testdata/00516_deduplication_after_drop_partition_zookeeper/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00516_deduplication_after_drop_partition_zookeeper/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00516_deduplication_after_drop_partition_zookeeper/metadata.json b/parser/testdata/00516_deduplication_after_drop_partition_zookeeper/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00516_deduplication_after_drop_partition_zookeeper/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00516_deduplication_after_drop_partition_zookeeper/query.sql b/parser/testdata/00516_deduplication_after_drop_partition_zookeeper/query.sql new file mode 100644 index 000000000..fb996684d --- /dev/null +++ b/parser/testdata/00516_deduplication_after_drop_partition_zookeeper/query.sql @@ -0,0 +1,36 @@ +-- Tags: zookeeper + +DROP TABLE IF EXISTS deduplication_by_partition; +CREATE TABLE deduplication_by_partition(d Date, x UInt32) ENGINE = + ReplicatedMergeTree('/clickhouse/tables/{database}/test_00516/deduplication_by_partition', 'r1') order by x partition by toYYYYMM(d); + +INSERT INTO deduplication_by_partition VALUES ('2000-01-01', 1); +INSERT INTO deduplication_by_partition VALUES ('2000-01-01', 2), ('2000-01-01', 3); +INSERT INTO deduplication_by_partition VALUES ('2000-01-01', 1); +INSERT INTO deduplication_by_partition VALUES ('2000-01-01', 2), ('2000-01-01', 3); +INSERT INTO deduplication_by_partition VALUES ('2000-02-01', 3), ('2000-02-01', 4), ('2000-02-01', 5); +INSERT INTO deduplication_by_partition VALUES ('2000-02-01', 3), ('2000-02-01', 4), ('2000-02-01', 5); + +SELECT '*** Before DROP PARTITION ***'; + +SELECT * FROM deduplication_by_partition ORDER BY d, x; + +ALTER TABLE deduplication_by_partition DROP PARTITION 200001; + +SELECT '*** After DROP PARTITION ***'; + +SELECT * FROM deduplication_by_partition ORDER BY d, x; + +INSERT INTO deduplication_by_partition VALUES ('2000-01-01', 1); +INSERT INTO deduplication_by_partition VALUES ('2000-01-01', 1); +INSERT INTO deduplication_by_partition VALUES ('2000-01-01', 2), ('2000-01-01', 3); +INSERT INTO deduplication_by_partition VALUES ('2000-01-01', 2), ('2000-01-01', 3); +INSERT INTO deduplication_by_partition VALUES ('2000-01-01', 4); +INSERT INTO deduplication_by_partition VALUES ('2000-02-01', 3), ('2000-02-01', 4), ('2000-02-01', 5); +INSERT INTO deduplication_by_partition VALUES ('2000-02-01', 6), ('2000-02-01', 7); + +SELECT '*** After INSERT ***'; + +SELECT * FROM deduplication_by_partition ORDER BY d, x; + +DROP TABLE deduplication_by_partition; diff --git a/parser/testdata/00516_is_inf_nan/ast.json b/parser/testdata/00516_is_inf_nan/ast.json new file mode 100644 index 000000000..9a62a83c0 --- /dev/null +++ b/parser/testdata/00516_is_inf_nan/ast.json @@ -0,0 +1,205 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function isFinite (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function isInfinite (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function isNaN (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayJoin (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 14)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal Int64_-1" + }, + { + "explain": " Literal Float64_inf" + }, + { + "explain": " Literal Float64_-inf" + }, + { + "explain": " Literal Float64_nan" + }, + { + "explain": " Literal Float64_nan" + }, + { + "explain": " Function divide (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Function divide (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Function divide (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Int64_-1" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Function divide (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal Float64_-0" + }, + { + "explain": " Function divide (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Float64_-0" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Function divide (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal Float64_-0" + }, + { + "explain": " Function divide (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Int64_-1" + }, + { + "explain": " Literal Float64_-0" + } + ], + + "rows": 61, + + "statistics": + { + "elapsed": 0.001265533, + "rows_read": 61, + "bytes_read": 2630 + } +} diff --git a/parser/testdata/00516_is_inf_nan/metadata.json b/parser/testdata/00516_is_inf_nan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00516_is_inf_nan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00516_is_inf_nan/query.sql b/parser/testdata/00516_is_inf_nan/query.sql new file mode 100644 index 000000000..e0754e921 --- /dev/null +++ b/parser/testdata/00516_is_inf_nan/query.sql @@ -0,0 +1,2 @@ +SELECT x, isFinite(x), isInfinite(x), isNaN(x) FROM (SELECT arrayJoin([0, 1, -1, inf, -inf, nan, -nan, 0 / 0, 1 / 0, -1 / 0, 0 / -0., -0. / 0, 1 / -0., -1 / -0.]) AS x); +SELECT x, isFinite(x), isInfinite(x), isNaN(x) FROM (SELECT toFloat32(arrayJoin([0, 1, -1, inf, -inf, nan, -nan, 0 / 0, 1 / 0, -1 / 0, 0 / -0., -0. / 0, 1 / -0., -1 / -0.])) AS x); \ No newline at end of file diff --git a/parser/testdata/00516_modulo/ast.json b/parser/testdata/00516_modulo/ast.json new file mode 100644 index 000000000..e588d12cd --- /dev/null +++ b/parser/testdata/00516_modulo/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1000" + }, + { + "explain": " Literal UInt64_32" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001851594, + "rows_read": 8, + "bytes_read": 293 + } +} diff --git a/parser/testdata/00516_modulo/metadata.json b/parser/testdata/00516_modulo/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00516_modulo/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00516_modulo/query.sql b/parser/testdata/00516_modulo/query.sql new file mode 100644 index 000000000..ceb255043 --- /dev/null +++ b/parser/testdata/00516_modulo/query.sql @@ -0,0 +1,13 @@ +SELECT 1000 % 32; +SELECT 7 % 3; +SELECT 255 % 510; +SELECT 255 % 512; +SELECT 255 % 1000000009; +SELECT 0 % 255; +SELECT 2147483647 % 255; +SELECT -1 % -1; +SELECT -1 % -2; +SELECT 255 % 99; +SELECT 42 % 13; +SELECT 42 % 22; +SELECT 1234567 % 123; diff --git a/parser/testdata/00517_date_parsing/ast.json b/parser/testdata/00517_date_parsing/ast.json new file mode 100644 index 000000000..a887c5d30 --- /dev/null +++ b/parser/testdata/00517_date_parsing/ast.json @@ -0,0 +1,76 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDate (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier s" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayJoin (alias s) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_['2017-01-02', '2017-1-02', '2017-01-2', '2017-1-2', '2017\/01\/02', '2017\/1\/02', '2017\/01\/2', '2017\/1\/2', '2017-11-12']" + } + ], + + "rows": 18, + + "statistics": + { + "elapsed": 0.001715085, + "rows_read": 18, + "bytes_read": 862 + } +} diff --git a/parser/testdata/00517_date_parsing/metadata.json b/parser/testdata/00517_date_parsing/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00517_date_parsing/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00517_date_parsing/query.sql b/parser/testdata/00517_date_parsing/query.sql new file mode 100644 index 000000000..4cfede7cb --- /dev/null +++ b/parser/testdata/00517_date_parsing/query.sql @@ -0,0 +1,15 @@ +SELECT toDate(s) FROM (SELECT arrayJoin(['2017-01-02', '2017-1-02', '2017-01-2', '2017-1-2', '2017/01/02', '2017/1/02', '2017/01/2', '2017/1/2', '2017-11-12']) AS s); + +DROP TABLE IF EXISTS date; +CREATE TABLE date (d Date) ENGINE = Memory; + +INSERT INTO date VALUES ('2017-01-02'), ('2017-1-02'), ('2017-01-2'), ('2017-1-2'), ('2017/01/02'), ('2017/1/02'), ('2017/01/2'), ('2017/1/2'), ('2017-11-12'); +SELECT * FROM date; + +INSERT INTO date FORMAT JSONEachRow {"d": "2017-01-02"}, {"d": "2017-1-02"}, {"d": "2017-01-2"}, {"d": "2017-1-2"}, {"d": "2017/01/02"}, {"d": "2017/1/02"}, {"d": "2017/01/2"}, {"d": "2017/1/2"}, {"d": "2017-11-12"}; + +SELECT * FROM date ORDER BY d; + +DROP TABLE date; + +WITH toDate('2000-01-01') + rand() % (30000) AS EventDate SELECT * FROM numbers(1000000) WHERE EventDate != toDate(concat(toString(toYear(EventDate)), '-', toString(toMonth(EventDate)), '-', toString(toDayOfMonth(EventDate)))); diff --git a/parser/testdata/00518_extract_all_and_empty_matches/ast.json b/parser/testdata/00518_extract_all_and_empty_matches/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00518_extract_all_and_empty_matches/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00518_extract_all_and_empty_matches/metadata.json b/parser/testdata/00518_extract_all_and_empty_matches/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00518_extract_all_and_empty_matches/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00518_extract_all_and_empty_matches/query.sql b/parser/testdata/00518_extract_all_and_empty_matches/query.sql new file mode 100644 index 000000000..1a23cb283 --- /dev/null +++ b/parser/testdata/00518_extract_all_and_empty_matches/query.sql @@ -0,0 +1,4 @@ +SELECT + '{"a":"1","b":"2","c":"","d":"4"}' AS json, + extractAll(json, '"([^"]*)":') AS keys, + extractAll(json, ':"([^"]*)"') AS values; diff --git a/parser/testdata/00519_create_as_select_from_temporary_table/ast.json b/parser/testdata/00519_create_as_select_from_temporary_table/ast.json new file mode 100644 index 000000000..0288b2bec --- /dev/null +++ b/parser/testdata/00519_create_as_select_from_temporary_table/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1_00519 (children 1)" + }, + { + "explain": " Identifier t1_00519" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001222884, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/00519_create_as_select_from_temporary_table/metadata.json b/parser/testdata/00519_create_as_select_from_temporary_table/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00519_create_as_select_from_temporary_table/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00519_create_as_select_from_temporary_table/query.sql b/parser/testdata/00519_create_as_select_from_temporary_table/query.sql new file mode 100644 index 000000000..19a5f41f4 --- /dev/null +++ b/parser/testdata/00519_create_as_select_from_temporary_table/query.sql @@ -0,0 +1,5 @@ +DROP TEMPORARY TABLE IF EXISTS t1_00519; +DROP TEMPORARY TABLE IF EXISTS t3_00519; +CREATE TEMPORARY TABLE t1_00519 AS SELECT 1; +CREATE TEMPORARY TABLE t3_00519 AS SELECT * FROM t1_00519; +SELECT * FROM t3_00519; diff --git a/parser/testdata/00520_tuple_values_interpreter/ast.json b/parser/testdata/00520_tuple_values_interpreter/ast.json new file mode 100644 index 000000000..f17aa112d --- /dev/null +++ b/parser/testdata/00520_tuple_values_interpreter/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tuple (children 1)" + }, + { + "explain": " Identifier tuple" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001130574, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/00520_tuple_values_interpreter/metadata.json b/parser/testdata/00520_tuple_values_interpreter/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00520_tuple_values_interpreter/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00520_tuple_values_interpreter/query.sql b/parser/testdata/00520_tuple_values_interpreter/query.sql new file mode 100644 index 000000000..a36730012 --- /dev/null +++ b/parser/testdata/00520_tuple_values_interpreter/query.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS tuple; +CREATE TABLE tuple (t Tuple(Date, UInt32, UInt64)) ENGINE = Memory; +INSERT INTO tuple VALUES ((concat('2000', '-01-01'), /* Hello */ 12+3, 45+6)); + +SET input_format_values_interpret_expressions = 0; +INSERT INTO tuple VALUES (('2000-01-01', 123, 456)); + +SELECT * FROM tuple ORDER BY t; +DROP TABLE tuple; diff --git a/parser/testdata/00521_multidimensional/ast.json b/parser/testdata/00521_multidimensional/ast.json new file mode 100644 index 000000000..3dfd4d003 --- /dev/null +++ b/parser/testdata/00521_multidimensional/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery multidimensional (children 1)" + }, + { + "explain": " Identifier multidimensional" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001444248, + "rows_read": 2, + "bytes_read": 84 + } +} diff --git a/parser/testdata/00521_multidimensional/metadata.json b/parser/testdata/00521_multidimensional/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00521_multidimensional/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00521_multidimensional/query.sql b/parser/testdata/00521_multidimensional/query.sql new file mode 100644 index 000000000..f48faf72c --- /dev/null +++ b/parser/testdata/00521_multidimensional/query.sql @@ -0,0 +1,34 @@ +DROP TABLE IF EXISTS multidimensional; +CREATE TABLE multidimensional (x UInt64, arr Array(Array(String))) ENGINE = MergeTree ORDER BY x; + +INSERT INTO multidimensional VALUES (1, [['Hello', 'World'], ['Goodbye'], []]); +SELECT * FROM multidimensional; + +ALTER TABLE multidimensional ADD COLUMN t Tuple(String, Array(Nullable(String)), Tuple(UInt32, Date)); +INSERT INTO multidimensional (t) VALUES (('Hello', ['World', NULL], (123, '2000-01-01'))); +SELECT * FROM multidimensional ORDER BY t; + +OPTIMIZE TABLE multidimensional; +SELECT * FROM multidimensional ORDER BY t; + +DROP TABLE multidimensional; + +CREATE TABLE multidimensional (x UInt64, arr Array(Array(String)), t Tuple(String, Array(Nullable(String)), Tuple(UInt32, Date))) ENGINE = Memory; +INSERT INTO multidimensional VALUES (1, [['Hello', 'World'], ['Goodbye'], []], ('Hello', ['World', NULL], (123, '2000-01-01'))); +SELECT * FROM multidimensional ORDER BY t; +DROP TABLE multidimensional; + +CREATE TABLE multidimensional (x UInt64, arr Array(Array(String)), t Tuple(String, Array(Nullable(String)), Tuple(UInt32, Date))) ENGINE = TinyLog; +INSERT INTO multidimensional VALUES (1, [['Hello', 'World'], ['Goodbye'], []], ('Hello', ['World', NULL], (123, '2000-01-01'))); +SELECT * FROM multidimensional ORDER BY t; +DROP TABLE multidimensional; + +CREATE TABLE multidimensional (x UInt64, arr Array(Array(String)), t Tuple(String, Array(Nullable(String)), Tuple(UInt32, Date))) ENGINE = StripeLog; +INSERT INTO multidimensional VALUES (1, [['Hello', 'World'], ['Goodbye'], []], ('Hello', ['World', NULL], (123, '2000-01-01'))); +SELECT * FROM multidimensional ORDER BY t; +DROP TABLE multidimensional; + +CREATE TABLE multidimensional (x UInt64, arr Array(Array(String)), t Tuple(String, Array(Nullable(String)), Tuple(UInt32, Date))) ENGINE = Log; +INSERT INTO multidimensional VALUES (1, [['Hello', 'World'], ['Goodbye'], []], ('Hello', ['World', NULL], (123, '2000-01-01'))); +SELECT * FROM multidimensional ORDER BY t; +DROP TABLE multidimensional; diff --git a/parser/testdata/00522_multidimensional/ast.json b/parser/testdata/00522_multidimensional/ast.json new file mode 100644 index 000000000..edc31334a --- /dev/null +++ b/parser/testdata/00522_multidimensional/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery multidimensional (children 1)" + }, + { + "explain": " Identifier multidimensional" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001436906, + "rows_read": 2, + "bytes_read": 84 + } +} diff --git a/parser/testdata/00522_multidimensional/metadata.json b/parser/testdata/00522_multidimensional/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00522_multidimensional/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00522_multidimensional/query.sql b/parser/testdata/00522_multidimensional/query.sql new file mode 100644 index 000000000..ea9881c61 --- /dev/null +++ b/parser/testdata/00522_multidimensional/query.sql @@ -0,0 +1,6 @@ +DROP TABLE IF EXISTS multidimensional; +CREATE TABLE multidimensional ENGINE = MergeTree ORDER BY number SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi' AS SELECT number, arrayMap(x -> (x, [x], [[x]], (x, toString(x))), arrayMap(x -> range(x), range(number % 10))) AS value FROM system.numbers LIMIT 100000; + +SELECT sum(cityHash64(toString(value))) FROM multidimensional; + +DROP TABLE multidimensional; diff --git a/parser/testdata/00523_aggregate_functions_in_group_array/ast.json b/parser/testdata/00523_aggregate_functions_in_group_array/ast.json new file mode 100644 index 000000000..91f218e93 --- /dev/null +++ b/parser/testdata/00523_aggregate_functions_in_group_array/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier key2" + }, + { + "explain": " Function arrayReduce (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'uniqExactMerge'" + }, + { + "explain": " Identifier arr" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001257939, + "rows_read": 9, + "bytes_read": 327 + } +} diff --git a/parser/testdata/00523_aggregate_functions_in_group_array/metadata.json b/parser/testdata/00523_aggregate_functions_in_group_array/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00523_aggregate_functions_in_group_array/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00523_aggregate_functions_in_group_array/query.sql b/parser/testdata/00523_aggregate_functions_in_group_array/query.sql new file mode 100644 index 000000000..438f1f199 --- /dev/null +++ b/parser/testdata/00523_aggregate_functions_in_group_array/query.sql @@ -0,0 +1,22 @@ +SELECT key2, arrayReduce('uniqExactMerge', arr) +FROM +( + SELECT + key1 % 3 AS key2, + groupArray(state) AS arr + FROM + ( + SELECT + number % 10 AS key1, + uniqExactState(number) AS state + FROM + ( + SELECT * + FROM system.numbers + LIMIT 100 + ) + GROUP BY key1 + ) + GROUP BY key2 +) +ORDER BY key2; diff --git a/parser/testdata/00524_time_intervals_months_underflow/ast.json b/parser/testdata/00524_time_intervals_months_underflow/ast.json new file mode 100644 index 000000000..823baeeef --- /dev/null +++ b/parser/testdata/00524_time_intervals_months_underflow/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function plus (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDateTime (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '2017-01-01 00:00:00'" + }, + { + "explain": " Function toIntervalMonth (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_0" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001297387, + "rows_read": 12, + "bytes_read": 495 + } +} diff --git a/parser/testdata/00524_time_intervals_months_underflow/metadata.json b/parser/testdata/00524_time_intervals_months_underflow/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00524_time_intervals_months_underflow/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00524_time_intervals_months_underflow/query.sql b/parser/testdata/00524_time_intervals_months_underflow/query.sql new file mode 100644 index 000000000..09c1ce9bf --- /dev/null +++ b/parser/testdata/00524_time_intervals_months_underflow/query.sql @@ -0,0 +1,70 @@ +SELECT toDateTime('2017-01-01 00:00:00') + INTERVAL 0 MONTH AS x; +SELECT toDateTime('2017-01-01 00:00:00') + INTERVAL 1 MONTH AS x; +SELECT toDateTime('2017-01-01 00:00:00') + INTERVAL 11 MONTH AS x; +SELECT toDateTime('2017-01-01 00:00:00') + INTERVAL 12 MONTH AS x; +SELECT toDateTime('2017-01-01 00:00:00') + INTERVAL 13 MONTH AS x; +SELECT toDateTime('2017-01-01 00:00:00') + INTERVAL -1 MONTH AS x; +SELECT toDateTime('2017-01-01 00:00:00') + INTERVAL -11 MONTH AS x; +SELECT toDateTime('2017-01-01 00:00:00') + INTERVAL -12 MONTH AS x; +SELECT toDateTime('2017-01-01 00:00:00') + INTERVAL -13 MONTH AS x; + +SELECT toDateTime('2017-01-01 00:00:00') - INTERVAL 0 MONTH AS x; +SELECT toDateTime('2017-01-01 00:00:00') - INTERVAL 1 MONTH AS x; +SELECT toDateTime('2017-01-01 00:00:00') - INTERVAL 11 MONTH AS x; +SELECT toDateTime('2017-01-01 00:00:00') - INTERVAL 12 MONTH AS x; +SELECT toDateTime('2017-01-01 00:00:00') - INTERVAL 13 MONTH AS x; +SELECT toDateTime('2017-01-01 00:00:00') - INTERVAL -1 MONTH AS x; +SELECT toDateTime('2017-01-01 00:00:00') - INTERVAL -11 MONTH AS x; +SELECT toDateTime('2017-01-01 00:00:00') - INTERVAL -12 MONTH AS x; +SELECT toDateTime('2017-01-01 00:00:00') - INTERVAL -13 MONTH AS x; + +SELECT toDate('2017-01-01') + INTERVAL 0 MONTH AS x; +SELECT toDate('2017-01-01') + INTERVAL 1 MONTH AS x; +SELECT toDate('2017-01-01') + INTERVAL 11 MONTH AS x; +SELECT toDate('2017-01-01') + INTERVAL 12 MONTH AS x; +SELECT toDate('2017-01-01') + INTERVAL 13 MONTH AS x; +SELECT toDate('2017-01-01') + INTERVAL -1 MONTH AS x; +SELECT toDate('2017-01-01') + INTERVAL -11 MONTH AS x; +SELECT toDate('2017-01-01') + INTERVAL -12 MONTH AS x; +SELECT toDate('2017-01-01') + INTERVAL -13 MONTH AS x; + +SELECT toDate('2017-01-01') - INTERVAL 0 MONTH AS x; +SELECT toDate('2017-01-01') - INTERVAL 1 MONTH AS x; +SELECT toDate('2017-01-01') - INTERVAL 11 MONTH AS x; +SELECT toDate('2017-01-01') - INTERVAL 12 MONTH AS x; +SELECT toDate('2017-01-01') - INTERVAL 13 MONTH AS x; +SELECT toDate('2017-01-01') - INTERVAL -1 MONTH AS x; +SELECT toDate('2017-01-01') - INTERVAL -11 MONTH AS x; +SELECT toDate('2017-01-01') - INTERVAL -12 MONTH AS x; +SELECT toDate('2017-01-01') - INTERVAL -13 MONTH AS x; + +SELECT toDateTime('2017-01-01 00:00:00') + INTERVAL 0 YEAR AS x; +SELECT toDateTime('2017-01-01 00:00:00') + INTERVAL 1 YEAR AS x; +SELECT toDateTime('2017-01-01 00:00:00') + INTERVAL -1 YEAR AS x; +SELECT toDateTime('2017-01-01 00:00:00') - INTERVAL 0 YEAR AS x; +SELECT toDateTime('2017-01-01 00:00:00') - INTERVAL 1 YEAR AS x; +SELECT toDateTime('2017-01-01 00:00:00') - INTERVAL -1 YEAR AS x; + +SELECT toDate('2017-01-01') + INTERVAL 0 YEAR AS x; +SELECT toDate('2017-01-01') + INTERVAL 1 YEAR AS x; +SELECT toDate('2017-01-01') + INTERVAL -1 YEAR AS x; +SELECT toDate('2017-01-01') - INTERVAL 0 YEAR AS x; +SELECT toDate('2017-01-01') - INTERVAL 1 YEAR AS x; +SELECT toDate('2017-01-01') - INTERVAL -1 YEAR AS x; + + +SELECT INTERVAL number - 15 MONTH + toDate('2017-01-01') AS x FROM system.numbers LIMIT 30; +SELECT toDate('2017-01-01') - INTERVAL number - 15 MONTH AS x FROM system.numbers LIMIT 30; + +SELECT INTERVAL number - 15 YEAR + toDate('2017-01-01') AS x FROM system.numbers LIMIT 30; +SELECT toDate('2017-01-01') - INTERVAL number - 15 YEAR AS x FROM system.numbers LIMIT 30; + + +SELECT toDate32('2217-01-01') + INTERVAL number * 20 - 100 DAY AS x FROM system.numbers LIMIT 10; +SELECT INTERVAL 100 - number * 20 DAY + toDate32('2217-01-01') AS x FROM system.numbers LIMIT 10; + +SELECT INTERVAL number * 4 - 20 MONTH + toDate32('2217-01-01') AS x FROM system.numbers LIMIT 10; +SELECT toDate32('2217-01-01') - INTERVAL number * 4 - 20 MONTH AS x FROM system.numbers LIMIT 10; + +SELECT INTERVAL number * 4 - 20 YEAR + toDate32('2217-01-01') AS x FROM system.numbers LIMIT 10; +SELECT toDate32('2217-01-01') - INTERVAL number * 4 - 20 YEAR AS x FROM system.numbers LIMIT 10; diff --git a/parser/testdata/00525_aggregate_functions_of_nullable_that_return_non_nullable/ast.json b/parser/testdata/00525_aggregate_functions_of_nullable_that_return_non_nullable/ast.json new file mode 100644 index 000000000..a76974b6e --- /dev/null +++ b/parser/testdata/00525_aggregate_functions_of_nullable_that_return_non_nullable/ast.json @@ -0,0 +1,151 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 5)" + }, + { + "explain": " Identifier k" + }, + { + "explain": " Function groupArray (alias res1) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier res1" + }, + { + "explain": " Function avg (alias res2) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier res2" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1 (alias k)" + }, + { + "explain": " Function arrayJoin (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[UInt64_1, NULL, UInt64_2]" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_2 (alias k)" + }, + { + "explain": " Function CAST (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function arrayJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[NULL, NULL]" + }, + { + "explain": " Literal 'Nullable(UInt8)'" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier k" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier k" + } + ], + + "rows": 43, + + "statistics": + { + "elapsed": 0.001320637, + "rows_read": 43, + "bytes_read": 1791 + } +} diff --git a/parser/testdata/00525_aggregate_functions_of_nullable_that_return_non_nullable/metadata.json b/parser/testdata/00525_aggregate_functions_of_nullable_that_return_non_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00525_aggregate_functions_of_nullable_that_return_non_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00525_aggregate_functions_of_nullable_that_return_non_nullable/query.sql b/parser/testdata/00525_aggregate_functions_of_nullable_that_return_non_nullable/query.sql new file mode 100644 index 000000000..598de2e74 --- /dev/null +++ b/parser/testdata/00525_aggregate_functions_of_nullable_that_return_non_nullable/query.sql @@ -0,0 +1 @@ +SELECT k, groupArray(x) AS res1, toTypeName(res1), avg(x) AS res2, toTypeName(res2) FROM (SELECT 1 AS k, arrayJoin([1, NULL, 2]) AS x UNION ALL SELECT 2 AS k, CAST(arrayJoin([NULL, NULL]) AS Nullable(UInt8)) AS x) GROUP BY k ORDER BY k; diff --git a/parser/testdata/00526_array_join_with_arrays_of_nullable/ast.json b/parser/testdata/00526_array_join_with_arrays_of_nullable/ast.json new file mode 100644 index 000000000..8e041c84b --- /dev/null +++ b/parser/testdata/00526_array_join_with_arrays_of_nullable/ast.json @@ -0,0 +1,109 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Identifier y" + }, + { + "explain": " Function arrayJoin (alias z) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_['a', NULL, 'b']" + }, + { + "explain": " TablesInSelectQuery (children 2)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.one" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " ArrayJoin (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Array_[UInt64_1, NULL, UInt64_3] (alias x)" + }, + { + "explain": " Function array (alias y) (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal Tuple_(NULL, '')" + }, + { + "explain": " Literal Tuple_(UInt64_123, 'Hello')" + }, + { + "explain": " Literal Tuple_(UInt64_456, NULL)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier y" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier z" + } + ], + + "rows": 29, + + "statistics": + { + "elapsed": 0.001697309, + "rows_read": 29, + "bytes_read": 1168 + } +} diff --git a/parser/testdata/00526_array_join_with_arrays_of_nullable/metadata.json b/parser/testdata/00526_array_join_with_arrays_of_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00526_array_join_with_arrays_of_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00526_array_join_with_arrays_of_nullable/query.sql b/parser/testdata/00526_array_join_with_arrays_of_nullable/query.sql new file mode 100644 index 000000000..50a8e2b7d --- /dev/null +++ b/parser/testdata/00526_array_join_with_arrays_of_nullable/query.sql @@ -0,0 +1 @@ +SELECT x, y, arrayJoin(['a', NULL, 'b']) AS z FROM system.one ARRAY JOIN [1, NULL, 3] AS x, [(NULL, ''), (123, 'Hello'), (456, NULL)] AS y order by x, y, z; diff --git a/parser/testdata/00527_totals_having_nullable/ast.json b/parser/testdata/00527_totals_having_nullable/ast.json new file mode 100644 index 000000000..0819c6755 --- /dev/null +++ b/parser/testdata/00527_totals_having_nullable/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function count (alias x) (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function notEquals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function toNullable (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_0" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001183358, + "rows_read": 12, + "bytes_read": 448 + } +} diff --git a/parser/testdata/00527_totals_having_nullable/metadata.json b/parser/testdata/00527_totals_having_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00527_totals_having_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00527_totals_having_nullable/query.sql b/parser/testdata/00527_totals_having_nullable/query.sql new file mode 100644 index 000000000..58966c334 --- /dev/null +++ b/parser/testdata/00527_totals_having_nullable/query.sql @@ -0,0 +1,2 @@ +SELECT count() AS x WITH TOTALS HAVING x != toNullable(0); +SELECT k, count() AS c FROM (SELECT number, CASE WHEN number < 10 THEN 'hello' WHEN number < 50 THEN 'world' ELSE 'goodbye' END AS k FROM system.numbers LIMIT 100) GROUP BY k WITH TOTALS HAVING nullIf(c, 10) < 50 ORDER BY c; diff --git a/parser/testdata/00528_const_of_nullable/ast.json b/parser/testdata/00528_const_of_nullable/ast.json new file mode 100644 index 000000000..f06612131 --- /dev/null +++ b/parser/testdata/00528_const_of_nullable/ast.json @@ -0,0 +1,70 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function plus (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toNullable (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function toColumnTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + } + ], + + "rows": 16, + + "statistics": + { + "elapsed": 0.001298948, + "rows_read": 16, + "bytes_read": 613 + } +} diff --git a/parser/testdata/00528_const_of_nullable/metadata.json b/parser/testdata/00528_const_of_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00528_const_of_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00528_const_of_nullable/query.sql b/parser/testdata/00528_const_of_nullable/query.sql new file mode 100644 index 000000000..bb130ad50 --- /dev/null +++ b/parser/testdata/00528_const_of_nullable/query.sql @@ -0,0 +1,6 @@ +SELECT toNullable(0) + 1 AS x, toTypeName(x), toColumnTypeName(x); +SELECT toNullable(materialize(0)) + 1 AS x, toTypeName(x), toColumnTypeName(x); +SELECT materialize(toNullable(0)) + 1 AS x, toTypeName(x), toColumnTypeName(x); +SELECT toNullable(0) + materialize(1) AS x, toTypeName(x), toColumnTypeName(x); +SELECT toNullable(materialize(0)) + materialize(1) AS x, toTypeName(x), toColumnTypeName(x); +SELECT materialize(toNullable(0)) + materialize(1) AS x, toTypeName(x), toColumnTypeName(x); diff --git a/parser/testdata/00529_orantius/ast.json b/parser/testdata/00529_orantius/ast.json new file mode 100644 index 000000000..cd5eb3d73 --- /dev/null +++ b/parser/testdata/00529_orantius/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toNullable (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal NULL" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001627961, + "rows_read": 7, + "bytes_read": 259 + } +} diff --git a/parser/testdata/00529_orantius/metadata.json b/parser/testdata/00529_orantius/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00529_orantius/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00529_orantius/query.sql b/parser/testdata/00529_orantius/query.sql new file mode 100644 index 000000000..87dbd6ea9 --- /dev/null +++ b/parser/testdata/00529_orantius/query.sql @@ -0,0 +1,17 @@ +SELECT toNullable(NULL); +SELECT [toNullable(NULL)]; +SELECT CAST(1 AS Nullable(UInt8)); +SELECT CAST(materialize(1) AS Nullable(UInt8)); +SELECT [[1], [-1]]; +SELECT groupArray(NULL); +SELECT [1, 2, 3][toInt8(1)]; +select arrayReduce('sumMerge', [sumState(y)]), finalizeAggregation(sumState(y)) from (select toNullable(42) as y); +SELECT arrayConcat([NULL, ''], [''], [NULL]); +SELECT CAST(NULL AS Nullable(UUID)); +SELECT arrayMap(x -> substring('abc', x), [1, 2, 3]); +select 1 as x where 1 as x; +select 1 from (select 1) having count() > 0 as x; +select a.1-1 FROM (SELECT (1, 2) AS a); +select a.1-1,a.1+1 FROM (SELECT (1, 2) AS a); +SELECT defaultValueOfArgumentType([1])[1]; +SELECT groupArrayInsertAt([2, 2], 10)([1, 1], 2); diff --git a/parser/testdata/00530_arrays_of_nothing/ast.json b/parser/testdata/00530_arrays_of_nothing/ast.json new file mode 100644 index 000000000..e0f56c419 --- /dev/null +++ b/parser/testdata/00530_arrays_of_nothing/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.00127361, + "rows_read": 14, + "bytes_read": 554 + } +} diff --git a/parser/testdata/00530_arrays_of_nothing/metadata.json b/parser/testdata/00530_arrays_of_nothing/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00530_arrays_of_nothing/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00530_arrays_of_nothing/query.sql b/parser/testdata/00530_arrays_of_nothing/query.sql new file mode 100644 index 000000000..f17d8053a --- /dev/null +++ b/parser/testdata/00530_arrays_of_nothing/query.sql @@ -0,0 +1,13 @@ +SELECT [[[[],[]]]]; +SELECT [[1], []]; +SELECT [[[[],['']]]]; +SELECT concat([], ['Hello'], []); +SELECT arrayPushBack([], 1), arrayPushFront([[]], []); + +DROP TABLE IF EXISTS arr; +CREATE TABLE arr (x Array(String), y Nullable(String), z Array(Array(Nullable(String)))) ENGINE = TinyLog; + +INSERT INTO arr SELECT [], NULL, [[], [NULL], [NULL, 'Hello']]; +SELECT * FROM arr; + +DROP TABLE arr; diff --git a/parser/testdata/00531_aggregate_over_nullable/ast.json b/parser/testdata/00531_aggregate_over_nullable/ast.json new file mode 100644 index 000000000..13e7343ce --- /dev/null +++ b/parser/testdata/00531_aggregate_over_nullable/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery agg_over_nullable (children 1)" + }, + { + "explain": " Identifier agg_over_nullable" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001108095, + "rows_read": 2, + "bytes_read": 86 + } +} diff --git a/parser/testdata/00531_aggregate_over_nullable/metadata.json b/parser/testdata/00531_aggregate_over_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00531_aggregate_over_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00531_aggregate_over_nullable/query.sql b/parser/testdata/00531_aggregate_over_nullable/query.sql new file mode 100644 index 000000000..1680bb90b --- /dev/null +++ b/parser/testdata/00531_aggregate_over_nullable/query.sql @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS agg_over_nullable; +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE agg_over_nullable ( + partition Date, + timestamp DateTime, + user_id Nullable(UInt32), + description Nullable(String) +) ENGINE = MergeTree(partition, timestamp, 8192); + +INSERT INTO agg_over_nullable(partition, timestamp, user_id, description) VALUES(now(), now(), 1, 'ss'); +INSERT INTO agg_over_nullable(partition, timestamp, user_id, description) VALUES(now(), now(), 1, NULL); +INSERT INTO agg_over_nullable(partition, timestamp, user_id, description) VALUES(now(), now(), 1, 'aa'); + +SELECT arraySort(groupUniqArray(description)) FROM agg_over_nullable; +SELECT arraySort(topK(3)(description)) FROM agg_over_nullable; + +DROP TABLE agg_over_nullable; diff --git a/parser/testdata/00532_topk_generic/ast.json b/parser/testdata/00532_topk_generic/ast.json new file mode 100644 index 000000000..d34eada90 --- /dev/null +++ b/parser/testdata/00532_topk_generic/ast.json @@ -0,0 +1,217 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier k" + }, + { + "explain": " Function arraySort (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function topK (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier v" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function modulo (alias k) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Function arrayMap (alias v) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function lambda (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function arrayMap (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function lambda (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function if (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Function toString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function range (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function range (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function intDiv (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_13" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_100" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier k" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier k" + } + ], + + "rows": 65, + + "statistics": + { + "elapsed": 0.00166912, + "rows_read": 65, + "bytes_read": 2939 + } +} diff --git a/parser/testdata/00532_topk_generic/metadata.json b/parser/testdata/00532_topk_generic/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00532_topk_generic/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00532_topk_generic/query.sql b/parser/testdata/00532_topk_generic/query.sql new file mode 100644 index 000000000..d51ae2519 --- /dev/null +++ b/parser/testdata/00532_topk_generic/query.sql @@ -0,0 +1 @@ +SELECT k, arraySort(topK(v)) FROM (SELECT number % 10 AS k, arrayMap(x -> arrayMap(x -> x = 0 ? NULL : toString(x), range(x)), range(intDiv(number, 13))) AS v FROM system.numbers LIMIT 100) GROUP BY k ORDER BY k; diff --git a/parser/testdata/00533_uniq_array/ast.json b/parser/testdata/00533_uniq_array/ast.json new file mode 100644 index 000000000..22ccb34c2 --- /dev/null +++ b/parser/testdata/00533_uniq_array/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function uniqArray (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal Array_[UInt64_0, UInt64_1, UInt64_1]" + }, + { + "explain": " Literal Array_[UInt64_0, UInt64_1, UInt64_1]" + }, + { + "explain": " Literal Array_[UInt64_0, UInt64_1, UInt64_1]" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001172759, + "rows_read": 9, + "bytes_read": 406 + } +} diff --git a/parser/testdata/00533_uniq_array/metadata.json b/parser/testdata/00533_uniq_array/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00533_uniq_array/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00533_uniq_array/query.sql b/parser/testdata/00533_uniq_array/query.sql new file mode 100644 index 000000000..653d47cad --- /dev/null +++ b/parser/testdata/00533_uniq_array/query.sql @@ -0,0 +1,6 @@ +SELECT uniqArray([0, 1, 1], [0, 1, 1], [0, 1, 1]); +SELECT uniqArray([0, 1, 1], [0, 1, 1], [0, 1, 0]); +SELECT uniqExactArray([0, 1, 1], [0, 1, 1], [0, 1, 1]); +SELECT uniqExactArray([0, 1, 1], [0, 1, 1], [0, 1, 0]); +SELECT uniqUpToArray(10)([0, 1, 1], [0, 1, 1], [0, 1, 1]); +SELECT uniqUpToArray(10)([0, 1, 1], [0, 1, 1], [0, 1, 0]); diff --git a/parser/testdata/00534_exp10/ast.json b/parser/testdata/00534_exp10/ast.json new file mode 100644 index 000000000..0780c59e3 --- /dev/null +++ b/parser/testdata/00534_exp10/ast.json @@ -0,0 +1,70 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function exp10 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function minus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_500" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_1000" + } + ], + + "rows": 16, + + "statistics": + { + "elapsed": 0.001421564, + "rows_read": 16, + "bytes_read": 616 + } +} diff --git a/parser/testdata/00534_exp10/metadata.json b/parser/testdata/00534_exp10/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00534_exp10/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00534_exp10/query.sql b/parser/testdata/00534_exp10/query.sql new file mode 100644 index 000000000..f2836fe06 --- /dev/null +++ b/parser/testdata/00534_exp10/query.sql @@ -0,0 +1,2 @@ +SELECT number, exp10(number - 500) FROM system.numbers LIMIT 1000; +SELECT exp10(nan); diff --git a/parser/testdata/00535_parse_float_scientific/ast.json b/parser/testdata/00535_parse_float_scientific/ast.json new file mode 100644 index 000000000..a1ebf2b81 --- /dev/null +++ b/parser/testdata/00535_parse_float_scientific/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery float (children 1)" + }, + { + "explain": " Identifier float" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001237961, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/00535_parse_float_scientific/metadata.json b/parser/testdata/00535_parse_float_scientific/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00535_parse_float_scientific/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00535_parse_float_scientific/query.sql b/parser/testdata/00535_parse_float_scientific/query.sql new file mode 100644 index 000000000..8363050be --- /dev/null +++ b/parser/testdata/00535_parse_float_scientific/query.sql @@ -0,0 +1,7 @@ +DROP TABLE IF EXISTS float; +CREATE TABLE float (x Float64) ENGINE = Log; + +INSERT INTO float VALUES (1e7); +SELECT * FROM float; + +DROP TABLE float; diff --git a/parser/testdata/00536_int_exp/ast.json b/parser/testdata/00536_int_exp/ast.json new file mode 100644 index 000000000..bae7eaf17 --- /dev/null +++ b/parser/testdata/00536_int_exp/ast.json @@ -0,0 +1,127 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 6)" + }, + { + "explain": " Function exp2 (alias e2d) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function intExp2 (alias e2i) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function equals (alias e2eq) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toUInt64 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier e2d" + }, + { + "explain": " Identifier e2i" + }, + { + "explain": " Function exp10 (alias e10d) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function intExp10 (alias e10i) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function equals (alias e10eq) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier e10d" + }, + { + "explain": " Function toString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier e10i" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_64" + } + ], + + "rows": 35, + + "statistics": + { + "elapsed": 0.001815934, + "rows_read": 35, + "bytes_read": 1415 + } +} diff --git a/parser/testdata/00536_int_exp/metadata.json b/parser/testdata/00536_int_exp/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00536_int_exp/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00536_int_exp/query.sql b/parser/testdata/00536_int_exp/query.sql new file mode 100644 index 000000000..80b88e8f4 --- /dev/null +++ b/parser/testdata/00536_int_exp/query.sql @@ -0,0 +1 @@ +SELECT exp2(number) AS e2d, intExp2(number) AS e2i, toUInt64(e2d) = e2i AS e2eq, exp10(number) AS e10d, intExp10(number) AS e10i, toString(e10d) = toString(e10i) AS e10eq FROM system.numbers LIMIT 64; diff --git a/parser/testdata/00536_int_exp_overflow/ast.json b/parser/testdata/00536_int_exp_overflow/ast.json new file mode 100644 index 000000000..3b3f1205c --- /dev/null +++ b/parser/testdata/00536_int_exp_overflow/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'intExp2:'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.00141966, + "rows_read": 5, + "bytes_read": 179 + } +} diff --git a/parser/testdata/00536_int_exp_overflow/metadata.json b/parser/testdata/00536_int_exp_overflow/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00536_int_exp_overflow/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00536_int_exp_overflow/query.sql b/parser/testdata/00536_int_exp_overflow/query.sql new file mode 100644 index 000000000..5526e8721 --- /dev/null +++ b/parser/testdata/00536_int_exp_overflow/query.sql @@ -0,0 +1,9 @@ +SELECT 'intExp2:'; +SELECT arrayJoin([-inf, -1000.5, -1000, -2.5, -2, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, 2, 2.5, 62, 62.5, 63, 63.5, 64, 64.5, 65, 65.5, 1000, 1000.5, inf]) as x, toTypeName(x), intExp2(x); +SELECT arrayJoin([-1000, -2, -1, 0, 1, 2, 62, 63, 64, 65, 1000]) as x, toTypeName(x), intExp2(x); +SELECT intExp2(nan); -- { serverError BAD_ARGUMENTS } + +SELECT 'intExp10:'; +SELECT arrayJoin([-inf, -1000.5, -1000, -2.5, -2, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, 2, 2.5, 18, 18.5, 19, 19.5, 20, 20.5, 21, 21.5, 1000, 1000.5, inf]) as x, toTypeName(x), intExp10(x); +SELECT arrayJoin([-1000, -2, -1, 0, 1, 2, 18, 19, 20, 21, 1000]) as x, toTypeName(x), intExp10(x); +SELECT intExp10(nan); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/00537_quarters/ast.json b/parser/testdata/00537_quarters/ast.json new file mode 100644 index 000000000..d31e584ae --- /dev/null +++ b/parser/testdata/00537_quarters/ast.json @@ -0,0 +1,169 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 8)" + }, + { + "explain": " Function plus (alias d) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDate (alias base) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '2017-01-01'" + }, + { + "explain": " Function toIntervalMonth (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function toDateTime (alias t) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier d" + }, + { + "explain": " Literal 'UTC'" + }, + { + "explain": " Function toQuarter (alias qd) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier d" + }, + { + "explain": " Function toQuarter (alias qt) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier t" + }, + { + "explain": " Function toStartOfQuarter (alias sqd) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier d" + }, + { + "explain": " Function toStartOfQuarter (alias sqt) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier t" + }, + { + "explain": " Function minus (alias qdiff_d) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toRelativeQuarterNum (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier d" + }, + { + "explain": " Function toRelativeQuarterNum (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier base" + }, + { + "explain": " Function minus (alias qdiff_t) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toRelativeQuarterNum (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier t" + }, + { + "explain": " Function toRelativeQuarterNum (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier base" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_24" + } + ], + + "rows": 49, + + "statistics": + { + "elapsed": 0.001900929, + "rows_read": 49, + "bytes_read": 2046 + } +} diff --git a/parser/testdata/00537_quarters/metadata.json b/parser/testdata/00537_quarters/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00537_quarters/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00537_quarters/query.sql b/parser/testdata/00537_quarters/query.sql new file mode 100644 index 000000000..8bab10095 --- /dev/null +++ b/parser/testdata/00537_quarters/query.sql @@ -0,0 +1 @@ +SELECT (toDate('2017-01-01') AS base) + INTERVAL number MONTH AS d, toDateTime(d, 'UTC') AS t, toQuarter(d) AS qd, toQuarter(t) AS qt, toStartOfQuarter(d) AS sqd, toStartOfQuarter(t) AS sqt, toRelativeQuarterNum(d) - toRelativeQuarterNum(base) AS qdiff_d, toRelativeQuarterNum(t) - toRelativeQuarterNum(base) as qdiff_t FROM system.numbers LIMIT 24; diff --git a/parser/testdata/00538_datediff/ast.json b/parser/testdata/00538_datediff/ast.json new file mode 100644 index 000000000..9079398da --- /dev/null +++ b/parser/testdata/00538_datediff/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'Various intervals'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001412231, + "rows_read": 5, + "bytes_read": 188 + } +} diff --git a/parser/testdata/00538_datediff/metadata.json b/parser/testdata/00538_datediff/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00538_datediff/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00538_datediff/query.sql b/parser/testdata/00538_datediff/query.sql new file mode 100644 index 000000000..5dc416ad0 --- /dev/null +++ b/parser/testdata/00538_datediff/query.sql @@ -0,0 +1,76 @@ +SELECT 'Various intervals'; + +SELECT dateDiff('year', toDate('2017-12-31'), toDate('2016-01-01')); +SELECT dateDiff('year', toDate('2017-12-31'), toDate('2017-01-01')); +SELECT dateDiff('year', toDate('2017-12-31'), toDate('2018-01-01')); +SELECT dateDiff('quarter', toDate('2017-12-31'), toDate('2016-01-01')); +SELECT dateDiff('quarter', toDate('2017-12-31'), toDate('2017-01-01')); +SELECT dateDiff('quarter', toDate('2017-12-31'), toDate('2018-01-01')); +SELECT dateDiff('month', toDate('2017-12-31'), toDate('2016-01-01')); +SELECT dateDiff('month', toDate('2017-12-31'), toDate('2017-01-01')); +SELECT dateDiff('month', toDate('2017-12-31'), toDate('2018-01-01')); +SELECT dateDiff('week', toDate('2017-12-31'), toDate('2016-01-01')); +SELECT dateDiff('week', toDate('2017-12-31'), toDate('2017-01-01')); +SELECT dateDiff('week', toDate('2017-12-31'), toDate('2018-01-01')); +SELECT dateDiff('day', toDate('2017-12-31'), toDate('2016-01-01')); +SELECT dateDiff('day', toDate('2017-12-31'), toDate('2017-01-01')); +SELECT dateDiff('day', toDate('2017-12-31'), toDate('2018-01-01')); +SELECT dateDiff('hour', toDate('2017-12-31'), toDate('2016-01-01'), 'UTC'); +SELECT dateDiff('hour', toDate('2017-12-31'), toDate('2017-01-01'), 'UTC'); +SELECT dateDiff('hour', toDate('2017-12-31'), toDate('2018-01-01'), 'UTC'); +SELECT dateDiff('minute', toDate('2017-12-31'), toDate('2016-01-01'), 'UTC'); +SELECT dateDiff('minute', toDate('2017-12-31'), toDate('2017-01-01'), 'UTC'); +SELECT dateDiff('minute', toDate('2017-12-31'), toDate('2018-01-01'), 'UTC'); +SELECT dateDiff('second', toDate('2017-12-31'), toDate('2016-01-01'), 'UTC'); +SELECT dateDiff('second', toDate('2017-12-31'), toDate('2017-01-01'), 'UTC'); +SELECT dateDiff('second', toDate('2017-12-31'), toDate('2018-01-01'), 'UTC'); + +SELECT 'Date and DateTime arguments'; + +SELECT dateDiff('second', toDate('2017-12-31'), toDateTime('2016-01-01 00:00:00', 'UTC'), 'UTC'); +SELECT dateDiff('second', toDateTime('2017-12-31 00:00:00', 'UTC'), toDate('2017-01-01'), 'UTC'); +SELECT dateDiff('second', toDateTime('2017-12-31 00:00:00', 'UTC'), toDateTime('2018-01-01 00:00:00', 'UTC')); + +SELECT 'Constant and non-constant arguments'; + +SELECT dateDiff('minute', materialize(toDate('2017-12-31')), toDate('2016-01-01'), 'UTC'); +SELECT dateDiff('minute', toDate('2017-12-31'), materialize(toDate('2017-01-01')), 'UTC'); +SELECT dateDiff('minute', materialize(toDate('2017-12-31')), materialize(toDate('2018-01-01')), 'UTC'); + +SELECT 'Case insensitive'; + +SELECT DATEDIFF('year', today(), today() - INTERVAL 10 YEAR); + +SELECT 'Dependance of timezones'; + +SELECT dateDiff('month', toDate('2014-10-26'), toDate('2014-10-27'), 'Asia/Istanbul'); +SELECT dateDiff('week', toDate('2014-10-26'), toDate('2014-10-27'), 'Asia/Istanbul'); +SELECT dateDiff('day', toDate('2014-10-26'), toDate('2014-10-27'), 'Asia/Istanbul'); +SELECT dateDiff('hour', toDate('2014-10-26'), toDate('2014-10-27'), 'Asia/Istanbul'); +SELECT dateDiff('minute', toDate('2014-10-26'), toDate('2014-10-27'), 'Asia/Istanbul'); +SELECT dateDiff('second', toDate('2014-10-26'), toDate('2014-10-27'), 'Asia/Istanbul'); + +SELECT dateDiff('month', toDate('2014-10-26'), toDate('2014-10-27'), 'UTC'); +SELECT dateDiff('week', toDate('2014-10-26'), toDate('2014-10-27'), 'UTC'); +SELECT dateDiff('day', toDate('2014-10-26'), toDate('2014-10-27'), 'UTC'); +SELECT dateDiff('hour', toDate('2014-10-26'), toDate('2014-10-27'), 'UTC'); +SELECT dateDiff('minute', toDate('2014-10-26'), toDate('2014-10-27'), 'UTC'); +SELECT dateDiff('second', toDate('2014-10-26'), toDate('2014-10-27'), 'UTC'); + +SELECT dateDiff('month', toDateTime('2014-10-26 00:00:00', 'Asia/Istanbul'), toDateTime('2014-10-27 00:00:00', 'Asia/Istanbul')); +SELECT dateDiff('week', toDateTime('2014-10-26 00:00:00', 'Asia/Istanbul'), toDateTime('2014-10-27 00:00:00', 'Asia/Istanbul')); +SELECT dateDiff('day', toDateTime('2014-10-26 00:00:00', 'Asia/Istanbul'), toDateTime('2014-10-27 00:00:00', 'Asia/Istanbul')); +SELECT dateDiff('hour', toDateTime('2014-10-26 00:00:00', 'Asia/Istanbul'), toDateTime('2014-10-27 00:00:00', 'Asia/Istanbul')); +SELECT dateDiff('minute', toDateTime('2014-10-26 00:00:00', 'Asia/Istanbul'), toDateTime('2014-10-27 00:00:00', 'Asia/Istanbul')); +SELECT dateDiff('second', toDateTime('2014-10-26 00:00:00', 'Asia/Istanbul'), toDateTime('2014-10-27 00:00:00', 'Asia/Istanbul')); + +SELECT dateDiff('month', toDateTime('2014-10-26 00:00:00', 'UTC'), toDateTime('2014-10-27 00:00:00', 'UTC')); +SELECT dateDiff('week', toDateTime('2014-10-26 00:00:00', 'UTC'), toDateTime('2014-10-27 00:00:00', 'UTC')); +SELECT dateDiff('day', toDateTime('2014-10-26 00:00:00', 'UTC'), toDateTime('2014-10-27 00:00:00', 'UTC')); +SELECT dateDiff('hour', toDateTime('2014-10-26 00:00:00', 'UTC'), toDateTime('2014-10-27 00:00:00', 'UTC')); +SELECT dateDiff('minute', toDateTime('2014-10-26 00:00:00', 'UTC'), toDateTime('2014-10-27 00:00:00', 'UTC')); +SELECT dateDiff('second', toDateTime('2014-10-26 00:00:00', 'UTC'), toDateTime('2014-10-27 00:00:00', 'UTC')); + +SELECT 'Additional test'; + +SELECT number = dateDiff('month', now() - INTERVAL number MONTH, now()) FROM system.numbers LIMIT 10; diff --git a/parser/testdata/00538_datediff_plural_units/ast.json b/parser/testdata/00538_datediff_plural_units/ast.json new file mode 100644 index 000000000..dd54dad1a --- /dev/null +++ b/parser/testdata/00538_datediff_plural_units/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function dateDiff (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal 'years'" + }, + { + "explain": " Function toDate (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '2017-12-31'" + }, + { + "explain": " Function toDate (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '2016-01-01'" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001050687, + "rows_read": 13, + "bytes_read": 500 + } +} diff --git a/parser/testdata/00538_datediff_plural_units/metadata.json b/parser/testdata/00538_datediff_plural_units/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00538_datediff_plural_units/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00538_datediff_plural_units/query.sql b/parser/testdata/00538_datediff_plural_units/query.sql new file mode 100644 index 000000000..dd8395fc6 --- /dev/null +++ b/parser/testdata/00538_datediff_plural_units/query.sql @@ -0,0 +1,11 @@ +SELECT dateDiff('years', toDate('2017-12-31'), toDate('2016-01-01')); +SELECT dateDiff('quarters', toDate('2017-12-31'), toDate('2016-01-01')); +SELECT dateDiff('months', toDateTime('2017-12-31'), toDateTime('2016-01-01')); +SELECT dateDiff('weeks', toDateTime('2017-12-31'), toDateTime('2016-01-01')); +SELECT dateDiff('days', toDateTime('2017-12-31'), toDateTime('2016-01-01')); +SELECT dateDiff('hours', toDateTime('2017-12-31', 'UTC'), toDateTime('2016-01-01', 'UTC')); +SELECT dateDiff('minutes', toDateTime('2017-12-31', 'UTC'), toDateTime('2016-01-01', 'UTC')); +SELECT dateDiff('seconds', toDateTime('2017-12-31', 'UTC'), toDateTime('2016-01-01', 'UTC')); +SELECT dateDiff('milliseconds', toDateTime('2017-12-31', 'UTC'), toDateTime('2016-01-01', 'UTC')); +SELECT dateDiff('microseconds', toDateTime('2017-12-31', 'UTC'), toDateTime('2016-01-01', 'UTC')); +SELECT dateDiff('nanoseconds', toDateTime('2017-12-31', 'UTC'), toDateTime('2016-01-01', 'UTC')); diff --git a/parser/testdata/00539_functions_for_working_with_json/ast.json b/parser/testdata/00539_functions_for_working_with_json/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00539_functions_for_working_with_json/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00539_functions_for_working_with_json/metadata.json b/parser/testdata/00539_functions_for_working_with_json/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00539_functions_for_working_with_json/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00539_functions_for_working_with_json/query.sql b/parser/testdata/00539_functions_for_working_with_json/query.sql new file mode 100644 index 000000000..59d00058a --- /dev/null +++ b/parser/testdata/00539_functions_for_working_with_json/query.sql @@ -0,0 +1,26 @@ + +-- VisitParam with basic type +SELECT visitParamExtractInt('{"myparam":-1}', 'myparam'); +SELECT visitParamExtractUInt('{"myparam":-1}', 'myparam'); +SELECT visitParamExtractFloat('{"myparam":null}', 'myparam'); +SELECT visitParamExtractFloat('{"myparam":-1}', 'myparam'); +SELECT visitParamExtractBool('{"myparam":true}', 'myparam'); +SELECT visitParamExtractString('{"myparam":"test_string"}', 'myparam'); +SELECT visitParamExtractString('{"myparam":"test\\"string"}', 'myparam'); +-- VisitParam with complex type +SELECT visitParamExtractRaw('{"myparam":"test_string"}', 'myparam'); +SELECT visitParamExtractRaw('{"myparam": "test_string"}', 'myparam'); +SELECT visitParamExtractRaw('{"myparam": "test\\"string"}', 'myparam'); +SELECT visitParamExtractRaw('{"myparam": "test\\"string", "other":123}', 'myparam'); +SELECT visitParamExtractRaw('{"myparam": "{"}', 'myparam'); +SELECT visitParamExtractRaw('{"myparam": "["}', 'myparam'); +SELECT visitParamExtractRaw('{"myparam": ["]", "2", "3"], "other":123}', 'myparam'); +SELECT visitParamExtractRaw('{"myparam": {"nested" : [1,2,3]}, "other":123}', 'myparam'); + +SELECT simpleJSONExtractInt('{"myparam":-1}', 'myparam'); +SELECT simpleJSONExtractUInt('{"myparam":-1}', 'myparam'); +SELECT simpleJSONExtractFloat('{"myparam":null}', 'myparam'); +SELECT simpleJSONExtractFloat('{"myparam":-1}', 'myparam'); +SELECT simpleJSONExtractBool('{"myparam":true}', 'myparam'); +SELECT simpleJSONExtractString('{"myparam":"test_string"}', 'myparam'); +SELECT simpleJSONExtractString('{"myparam":"test\\"string"}', 'myparam'); diff --git a/parser/testdata/00541_kahan_sum/ast.json b/parser/testdata/00541_kahan_sum/ast.json new file mode 100644 index 000000000..a38ab43bf --- /dev/null +++ b/parser/testdata/00541_kahan_sum/ast.json @@ -0,0 +1,85 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function sum (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_1000" + } + ], + + "rows": 21, + + "statistics": + { + "elapsed": 0.001818126, + "rows_read": 21, + "bytes_read": 874 + } +} diff --git a/parser/testdata/00541_kahan_sum/metadata.json b/parser/testdata/00541_kahan_sum/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00541_kahan_sum/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00541_kahan_sum/query.sql b/parser/testdata/00541_kahan_sum/query.sql new file mode 100644 index 000000000..0eef1b747 --- /dev/null +++ b/parser/testdata/00541_kahan_sum/query.sql @@ -0,0 +1,4 @@ +SELECT sum(1) FROM (SELECT * FROM system.numbers LIMIT 1000); +SELECT sumWithOverflow(1) FROM (SELECT * FROM system.numbers LIMIT 1000); +SELECT sumKahan(1e100) - 1e100 * 1000 FROM (SELECT * FROM system.numbers LIMIT 1000); +SELECT abs(sum(1e100) - 1e100 * 1000) > 1 FROM (SELECT * FROM system.numbers LIMIT 1000); diff --git a/parser/testdata/00541_to_start_of_fifteen_minutes/ast.json b/parser/testdata/00541_to_start_of_fifteen_minutes/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00541_to_start_of_fifteen_minutes/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00541_to_start_of_fifteen_minutes/metadata.json b/parser/testdata/00541_to_start_of_fifteen_minutes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00541_to_start_of_fifteen_minutes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00541_to_start_of_fifteen_minutes/query.sql b/parser/testdata/00541_to_start_of_fifteen_minutes/query.sql new file mode 100644 index 000000000..0c20670fa --- /dev/null +++ b/parser/testdata/00541_to_start_of_fifteen_minutes/query.sql @@ -0,0 +1,8 @@ +SELECT + DISTINCT result +FROM ( + SELECT + toStartOfFifteenMinutes(toDateTime('2017-12-25 00:00:00') + number * 60) AS result + FROM system.numbers + LIMIT 120 +) ORDER BY result diff --git a/parser/testdata/00542_access_to_temporary_table_in_readonly_mode/ast.json b/parser/testdata/00542_access_to_temporary_table_in_readonly_mode/ast.json new file mode 100644 index 000000000..5704b0506 --- /dev/null +++ b/parser/testdata/00542_access_to_temporary_table_in_readonly_mode/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001332775, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00542_access_to_temporary_table_in_readonly_mode/metadata.json b/parser/testdata/00542_access_to_temporary_table_in_readonly_mode/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00542_access_to_temporary_table_in_readonly_mode/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00542_access_to_temporary_table_in_readonly_mode/query.sql b/parser/testdata/00542_access_to_temporary_table_in_readonly_mode/query.sql new file mode 100644 index 000000000..8b3683619 --- /dev/null +++ b/parser/testdata/00542_access_to_temporary_table_in_readonly_mode/query.sql @@ -0,0 +1,19 @@ +SET readonly = 2; + +CREATE TEMPORARY TABLE readonly00542 ( + ID Int +) Engine = MergeTree ORDER BY tuple(); + +INSERT INTO readonly00542 (ID) + VALUES (1), (2), (3), (4), (5); + +SELECT ID FROM readonly00542 ORDER BY ID; + +INSERT INTO readonly00542 (ID) + SELECT CAST(number * 10 AS Int) FROM system.numbers LIMIT 10; + +SELECT '---'; + +SELECT ID FROM readonly00542 ORDER BY ID; + +DROP TEMPORARY TABLE readonly00542; diff --git a/parser/testdata/00542_materialized_view_and_time_zone_tag/ast.json b/parser/testdata/00542_materialized_view_and_time_zone_tag/ast.json new file mode 100644 index 000000000..c7856a98b --- /dev/null +++ b/parser/testdata/00542_materialized_view_and_time_zone_tag/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery m3 (children 1)" + }, + { + "explain": " Identifier m3" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001303031, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/00542_materialized_view_and_time_zone_tag/metadata.json b/parser/testdata/00542_materialized_view_and_time_zone_tag/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00542_materialized_view_and_time_zone_tag/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00542_materialized_view_and_time_zone_tag/query.sql b/parser/testdata/00542_materialized_view_and_time_zone_tag/query.sql new file mode 100644 index 000000000..88808ac20 --- /dev/null +++ b/parser/testdata/00542_materialized_view_and_time_zone_tag/query.sql @@ -0,0 +1,19 @@ +DROP TABLE IF EXISTS m3; +DROP TABLE IF EXISTS m1; +DROP TABLE IF EXISTS x; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE x (d Date, t DateTime) ENGINE = MergeTree(d, (d, t), 1); + +CREATE MATERIALIZED VIEW m1 (d Date, t DateTime, c UInt64) ENGINE = SummingMergeTree(d, (d, t), 1) AS SELECT d, toStartOfMinute(x.t) as t, count() as c FROM x GROUP BY d, t; + +CREATE MATERIALIZED VIEW m3 ENGINE = SummingMergeTree(d, (d, t), 1) AS SELECT d, toStartOfHour(m1.t) as t, c FROM m1; + +INSERT INTO x VALUES (today(), now()); +INSERT INTO x VALUES (today(), now()); + +OPTIMIZE TABLE m3; + +DROP TABLE m3; +DROP TABLE m1; +DROP TABLE x; diff --git a/parser/testdata/00543_null_and_prewhere/ast.json b/parser/testdata/00543_null_and_prewhere/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00543_null_and_prewhere/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00543_null_and_prewhere/metadata.json b/parser/testdata/00543_null_and_prewhere/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00543_null_and_prewhere/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00543_null_and_prewhere/query.sql b/parser/testdata/00543_null_and_prewhere/query.sql new file mode 100644 index 000000000..5f5039786 --- /dev/null +++ b/parser/testdata/00543_null_and_prewhere/query.sql @@ -0,0 +1,19 @@ + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE test +( + dt Date, + id UInt32, + val Nullable(UInt32) +) +ENGINE = MergeTree(dt, id, 8192); + +insert into test (dt, id, val) values ('2017-01-01', 1, 10); +insert into test (dt, id, val) values ('2017-01-01', 1, null); +insert into test (dt, id, val) values ('2017-01-01', 1, 0); + +SELECT count() +FROM test +WHERE val = 0; + +DROP TABLE IF EXISTS test; diff --git a/parser/testdata/00544_agg_foreach_of_two_arg/ast.json b/parser/testdata/00544_agg_foreach_of_two_arg/ast.json new file mode 100644 index 000000000..0931fda2b --- /dev/null +++ b/parser/testdata/00544_agg_foreach_of_two_arg/ast.json @@ -0,0 +1,151 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function sumForEach (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier arr" + }, + { + "explain": " Function sumForEachIf (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier arr" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function arrayElement (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier arr" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function sumIfForEach (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier arr" + }, + { + "explain": " Function arrayMap (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function lambda (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function notEquals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal UInt64_5" + }, + { + "explain": " Identifier arr" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayJoin (alias arr) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[Array_[UInt64_1, UInt64_2, UInt64_3], Array_[UInt64_4, UInt64_5, UInt64_6]]" + } + ], + + "rows": 43, + + "statistics": + { + "elapsed": 0.00134122, + "rows_read": 43, + "bytes_read": 1822 + } +} diff --git a/parser/testdata/00544_agg_foreach_of_two_arg/metadata.json b/parser/testdata/00544_agg_foreach_of_two_arg/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00544_agg_foreach_of_two_arg/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00544_agg_foreach_of_two_arg/query.sql b/parser/testdata/00544_agg_foreach_of_two_arg/query.sql new file mode 100644 index 000000000..c07de77a3 --- /dev/null +++ b/parser/testdata/00544_agg_foreach_of_two_arg/query.sql @@ -0,0 +1 @@ +SELECT sumForEach(arr), sumForEachIf(arr, arr[1] = 1), sumIfForEach(arr, arrayMap(x -> x != 5, arr)) FROM (SELECT arrayJoin([[1, 2, 3], [4, 5, 6]]) AS arr); diff --git a/parser/testdata/00544_insert_with_select/ast.json b/parser/testdata/00544_insert_with_select/ast.json new file mode 100644 index 000000000..029fd8108 --- /dev/null +++ b/parser/testdata/00544_insert_with_select/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001200337, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/00544_insert_with_select/metadata.json b/parser/testdata/00544_insert_with_select/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00544_insert_with_select/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00544_insert_with_select/query.sql b/parser/testdata/00544_insert_with_select/query.sql new file mode 100644 index 000000000..6333a3f49 --- /dev/null +++ b/parser/testdata/00544_insert_with_select/query.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS test; + +CREATE TABLE test(number UInt64, num2 UInt64) ENGINE = Log; + +INSERT INTO test WITH number * 2 AS num2 SELECT number, num2 FROM system.numbers LIMIT 3; + +SELECT * FROM test; + +DROP TABLE test; diff --git a/parser/testdata/00545_weird_aggregate_functions/ast.json b/parser/testdata/00545_weird_aggregate_functions/ast.json new file mode 100644 index 000000000..7902bfbe9 --- /dev/null +++ b/parser/testdata/00545_weird_aggregate_functions/ast.json @@ -0,0 +1,112 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function sumForEachMergeArray (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier y" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function sumForEachStateForEachIfArrayMerge (alias y) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function sumForEachStateForEachIfArrayState (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Array_[Array_[Array_[UInt64_1, UInt64_2, UInt64_3], Array_[UInt64_4, UInt64_5, UInt64_6], Array_[UInt64_7, UInt64_8, UInt64_9]]]" + }, + { + "explain": " Literal Array_[UInt64_1]" + } + ], + + "rows": 30, + + "statistics": + { + "elapsed": 0.001322515, + "rows_read": 30, + "bytes_read": 1546 + } +} diff --git a/parser/testdata/00545_weird_aggregate_functions/metadata.json b/parser/testdata/00545_weird_aggregate_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00545_weird_aggregate_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00545_weird_aggregate_functions/query.sql b/parser/testdata/00545_weird_aggregate_functions/query.sql new file mode 100644 index 000000000..c728dfcc5 --- /dev/null +++ b/parser/testdata/00545_weird_aggregate_functions/query.sql @@ -0,0 +1 @@ +SELECT sumForEachMergeArray(y) FROM (SELECT sumForEachStateForEachIfArrayMerge(x) AS y FROM (SELECT sumForEachStateForEachIfArrayState([[[1, 2, 3], [4, 5, 6], [7, 8, 9]]], [1]) AS x)); diff --git a/parser/testdata/00546_shard_tuple_element_formatting/ast.json b/parser/testdata/00546_shard_tuple_element_formatting/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00546_shard_tuple_element_formatting/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00546_shard_tuple_element_formatting/metadata.json b/parser/testdata/00546_shard_tuple_element_formatting/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00546_shard_tuple_element_formatting/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00546_shard_tuple_element_formatting/query.sql b/parser/testdata/00546_shard_tuple_element_formatting/query.sql new file mode 100644 index 000000000..f3dbdf8ed --- /dev/null +++ b/parser/testdata/00546_shard_tuple_element_formatting/query.sql @@ -0,0 +1,3 @@ +-- Tags: shard + +SELECT tupleElement((1, 2), toUInt8(1 + 0)) FROM remote('127.0.0.{2,3}', system.one); diff --git a/parser/testdata/00547_named_tuples/ast.json b/parser/testdata/00547_named_tuples/ast.json new file mode 100644 index 000000000..83634abbb --- /dev/null +++ b/parser/testdata/00547_named_tuples/ast.json @@ -0,0 +1,103 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 6)" + }, + { + "explain": " Function CAST (alias t) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Tuple_(UInt64_1, 'Hello')" + }, + { + "explain": " Literal 'Tuple(x UInt64, s String)'" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier t" + }, + { + "explain": " Function tupleElement (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier t" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function tupleElement (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier t" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function tupleElement (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier t" + }, + { + "explain": " Literal 'x'" + }, + { + "explain": " Function tupleElement (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier t" + }, + { + "explain": " Literal 's'" + } + ], + + "rows": 27, + + "statistics": + { + "elapsed": 0.0013219, + "rows_read": 27, + "bytes_read": 1001 + } +} diff --git a/parser/testdata/00547_named_tuples/metadata.json b/parser/testdata/00547_named_tuples/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00547_named_tuples/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00547_named_tuples/query.sql b/parser/testdata/00547_named_tuples/query.sql new file mode 100644 index 000000000..ec5a8aac2 --- /dev/null +++ b/parser/testdata/00547_named_tuples/query.sql @@ -0,0 +1 @@ +SELECT CAST((1, 'Hello') AS Tuple(x UInt64, s String)) AS t, toTypeName(t), t.1, t.2, tupleElement(t, 'x'), tupleElement(t, 's'); diff --git a/parser/testdata/00548_slice_of_nested/ast.json b/parser/testdata/00548_slice_of_nested/ast.json new file mode 100644 index 000000000..941a23e84 --- /dev/null +++ b/parser/testdata/00548_slice_of_nested/ast.json @@ -0,0 +1,70 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function array (alias nested) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Tuple_(UInt64_1, 'Hello')" + }, + { + "explain": " Literal Tuple_(UInt64_2, 'World')" + }, + { + "explain": " Function tupleElement (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier nested" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function tupleElement (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier nested" + }, + { + "explain": " Literal UInt64_2" + } + ], + + "rows": 16, + + "statistics": + { + "elapsed": 0.001227626, + "rows_read": 16, + "bytes_read": 631 + } +} diff --git a/parser/testdata/00548_slice_of_nested/metadata.json b/parser/testdata/00548_slice_of_nested/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00548_slice_of_nested/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00548_slice_of_nested/query.sql b/parser/testdata/00548_slice_of_nested/query.sql new file mode 100644 index 000000000..f55082da0 --- /dev/null +++ b/parser/testdata/00548_slice_of_nested/query.sql @@ -0,0 +1,2 @@ +SELECT [(1, 'Hello'), (2, 'World')] AS nested, nested.1, nested.2; +SELECT [[(1, 'Hello'), (2, 'World')], [(3, 'Goodbye')]] AS nested, nested.1, nested.2; diff --git a/parser/testdata/00549_join_use_nulls/ast.json b/parser/testdata/00549_join_use_nulls/ast.json new file mode 100644 index 000000000..54ac5f8da --- /dev/null +++ b/parser/testdata/00549_join_use_nulls/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001434453, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00549_join_use_nulls/metadata.json b/parser/testdata/00549_join_use_nulls/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00549_join_use_nulls/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00549_join_use_nulls/query.sql b/parser/testdata/00549_join_use_nulls/query.sql new file mode 100644 index 000000000..6ed34fd5f --- /dev/null +++ b/parser/testdata/00549_join_use_nulls/query.sql @@ -0,0 +1,30 @@ +SET join_use_nulls = 1; + +DROP TABLE IF EXISTS null_00549; +CREATE TABLE null_00549 (k UInt64, a String, b Nullable(String)) ENGINE = Log; + +INSERT INTO null_00549 SELECT + k, + a, + b +FROM +( + SELECT + number AS k, + toString(number) AS a + FROM system.numbers + LIMIT 2 +) js1 +ANY LEFT JOIN +( + SELECT + number AS k, + toString(number) AS b + FROM system.numbers + LIMIT 1, 2 +) js2 USING (k) +ORDER BY k ASC; + +SELECT * FROM null_00549 ORDER BY k, a, b; + +DROP TABLE null_00549; diff --git a/parser/testdata/00551_parse_or_null/ast.json b/parser/testdata/00551_parse_or_null/ast.json new file mode 100644 index 000000000..216e5b75b --- /dev/null +++ b/parser/testdata/00551_parse_or_null/ast.json @@ -0,0 +1,130 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toUInt64OrZero (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier s" + }, + { + "explain": " Function toUInt64OrNull (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier s" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function multiIf (alias s) (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function toString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal 'hello'" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 36, + + "statistics": + { + "elapsed": 0.001270508, + "rows_read": 36, + "bytes_read": 1562 + } +} diff --git a/parser/testdata/00551_parse_or_null/metadata.json b/parser/testdata/00551_parse_or_null/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00551_parse_or_null/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00551_parse_or_null/query.sql b/parser/testdata/00551_parse_or_null/query.sql new file mode 100644 index 000000000..9ae02a75c --- /dev/null +++ b/parser/testdata/00551_parse_or_null/query.sql @@ -0,0 +1,2 @@ +SELECT toUInt64OrZero(s), toUInt64OrNull(s) FROM (SELECT CASE WHEN number % 2 = 1 THEN toString(number) ELSE 'hello' END AS s FROM system.numbers) LIMIT 10; +SELECT toUInt64OrZero(s), toUInt64OrNull(s) FROM (SELECT CASE WHEN number = 5 THEN NULL WHEN number % 2 = 1 THEN toString(number) ELSE 'hello' END AS s FROM system.numbers) LIMIT 10; diff --git a/parser/testdata/00552_logical_functions_simple/ast.json b/parser/testdata/00552_logical_functions_simple/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00552_logical_functions_simple/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00552_logical_functions_simple/metadata.json b/parser/testdata/00552_logical_functions_simple/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00552_logical_functions_simple/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00552_logical_functions_simple/query.sql b/parser/testdata/00552_logical_functions_simple/query.sql new file mode 100644 index 000000000..2043199b9 --- /dev/null +++ b/parser/testdata/00552_logical_functions_simple/query.sql @@ -0,0 +1,56 @@ + +-- Test simple logic over smaller batch of columns +SELECT + -- x1, x2, x3, x4, + xor(x1, x2, x3, x4) AS xor1, + xor(xor(x1, x2), xor(x3, x4)) AS xor2, + + or(x1, x2, x3, x4) AS or1, + or(x1 or x2, x3 or x4) AS or2, + + and(x1, x2, x3, x4) AS and1, + and(x1 and x2, x3 and x4) AS and2 +FROM ( + SELECT + toUInt8(number % 2) AS x1, + toUInt8(number / 2 % 2) AS x2, + toUInt8(number / 4 % 2) AS x3, + toUInt8(number / 8 % 2) AS x4 + FROM numbers(16) +) +WHERE + xor1 != xor2 OR (and1 != and2 OR or1 != or2) +; + +-- Test simple logic over multiple batches of columns (currently batch spans over 10 columns) +SELECT + -- x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, + xor(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11) AS xor1, + xor(x1, xor(xor(xor(x2, x3), xor(x4, x5)), xor(xor(x6, x7), xor(x8, xor(x9, xor(x10, x11)))))) AS xor2, + + or(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11) AS or1, + or(x1, or(or(or(x2, x3), or(x4, x5)), or(or(x6, x7), or(x8, or(x9, or(x10, x11)))))) AS or2, + + and(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11) AS and1, + and(x1, and((x2 and x3) and (x4 and x5), (x6 and x7) and (x8 and (x9 and (x10 and x11))))) AS and2 +FROM ( + SELECT + toUInt8(number % 2) AS x1, + toUInt8(number / 2 % 2) AS x2, + toUInt8(number / 4 % 2) AS x3, + toUInt8(number / 8 % 2) AS x4, + toUInt8(number / 16 % 2) AS x5, + toUInt8(number / 32 % 2) AS x6, + toUInt8(number / 64 % 2) AS x7, + toUInt8(number / 128 % 2) AS x8, + toUInt8(number / 256 % 2) AS x9, + toUInt8(number / 512 % 2) AS x10, + toUInt8(number / 1024 % 2) AS x11 + FROM numbers(2048) +) +WHERE + xor1 != xor2 OR (and1 != and2 OR or1 != or2) +; + + +SELECT 'OK'; diff --git a/parser/testdata/00552_logical_functions_ternary/ast.json b/parser/testdata/00552_logical_functions_ternary/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00552_logical_functions_ternary/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00552_logical_functions_ternary/metadata.json b/parser/testdata/00552_logical_functions_ternary/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00552_logical_functions_ternary/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00552_logical_functions_ternary/query.sql b/parser/testdata/00552_logical_functions_ternary/query.sql new file mode 100644 index 000000000..b22e5c61f --- /dev/null +++ b/parser/testdata/00552_logical_functions_ternary/query.sql @@ -0,0 +1,59 @@ + +-- Tests codepath for ternary logic +SELECT + -- x1, x2, x3, x4, + xor(x1, x2, x3, x4) AS xor1, + xor(xor(x1, x2), xor(x3, x4)) AS xor2, + + or(x1, x2, x3, x4) AS or1, + or(x1 or x2, x3 or x4) AS or2, + + and(x1, x2, x3, x4) AS and1, + and(x1 and x2, x3 and x4) AS and2 +FROM ( + SELECT + nullIf(toUInt8(number % 3), 2) AS x1, + nullIf(toUInt8(number / 3 % 3), 2) AS x2, + nullIf(toUInt8(number / 9 % 3), 2) AS x3, + nullIf(toUInt8(number / 27 % 3), 2) AS x4 + FROM numbers(81) +) +WHERE + (xor1 != xor2 OR (xor1 is NULL) != (xor2 is NULL)) OR + (or1 != or2 OR (or1 is NULL) != (or2 is NULL) OR (and1 != and2 OR (and1 is NULL) != (and2 is NULL))) +; + + +-- Test ternary logic over multiple batches of columns (currently batch spans over 10 columns) +SELECT + -- x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, + xor(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11) AS xor1, + xor(x1, xor(xor(xor(x2, x3), xor(x4, x5)), xor(xor(x6, x7), xor(x8, xor(x9, xor(x10, x11)))))) AS xor2, + + or(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11) AS or1, + or(x1, or(or(or(x2, x3), or(x4, x5)), or(or(x6, x7), or(x8, or(x9, or(x10, x11)))))) AS or2, + + and(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11) AS and1, + and(x1, and((x2 and x3) and (x4 and x5), (x6 and x7) and (x8 and (x9 and (x10 and x11))))) AS and2 +FROM ( + SELECT + nullIf(toUInt8(number % 3), 2) AS x1, + nullIf(toUInt8(number / 3 % 3), 2) AS x2, + nullIf(toUInt8(number / 9 % 3), 2) AS x3, + nullIf(toUInt8(number / 27 % 3), 2) AS x4, + nullIf(toUInt8(number / 81 % 3), 2) AS x5, + nullIf(toUInt8(number / 243 % 3), 2) AS x6, + nullIf(toUInt8(number / 729 % 3), 2) AS x7, + nullIf(toUInt8(number / 2187 % 3), 2) AS x8, + nullIf(toUInt8(number / 6561 % 3), 2) AS x9, + nullIf(toUInt8(number / 19683 % 3), 2) AS x10, + nullIf(toUInt8(number / 59049 % 3), 2) AS x11 + FROM numbers(177147) +) +WHERE + (xor1 != xor2 OR (xor1 is NULL) != (xor2 is NULL)) OR + (or1 != or2 OR (or1 is NULL) != (or2 is NULL) OR (and1 != and2 OR (and1 is NULL) != (and2 is NULL))) +; + + +SELECT 'OK'; diff --git a/parser/testdata/00552_logical_functions_uint8_as_bool/ast.json b/parser/testdata/00552_logical_functions_uint8_as_bool/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00552_logical_functions_uint8_as_bool/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00552_logical_functions_uint8_as_bool/metadata.json b/parser/testdata/00552_logical_functions_uint8_as_bool/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00552_logical_functions_uint8_as_bool/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00552_logical_functions_uint8_as_bool/query.sql b/parser/testdata/00552_logical_functions_uint8_as_bool/query.sql new file mode 100644 index 000000000..f62a02288 --- /dev/null +++ b/parser/testdata/00552_logical_functions_uint8_as_bool/query.sql @@ -0,0 +1,20 @@ + +-- Test that UInt8 type is processed correctly as bool + +SELECT + 1 AND 2, + 2 AND 4, + 1 AND 2 AND 4, + 1 OR 2, + 2 OR 4, + 1 OR 2 OR 4 +; + +SELECT + toUInt8(bitAnd(number, 4)) AS a, + toUInt8(bitAnd(number, 2)) AS b, + toUInt8(bitAnd(number, 1)) AS c, + a AND b AND c AS AND, + a OR b OR c AS OR +FROM numbers(8) +; diff --git a/parser/testdata/00552_or_nullable/ast.json b/parser/testdata/00552_or_nullable/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00552_or_nullable/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00552_or_nullable/metadata.json b/parser/testdata/00552_or_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00552_or_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00552_or_nullable/query.sql b/parser/testdata/00552_or_nullable/query.sql new file mode 100644 index 000000000..ebec2db84 --- /dev/null +++ b/parser/testdata/00552_or_nullable/query.sql @@ -0,0 +1,61 @@ +SELECT + 0 OR NULL, + 1 OR NULL, + toNullable(0) OR NULL, + toNullable(1) OR NULL, + 0.0 OR NULL, + 0.1 OR NULL, + NULL OR 1 OR NULL, + 0 OR NULL OR 1 OR NULL; + +SELECT + 0 AND NULL, + 1 AND NULL, + toNullable(0) AND NULL, + toNullable(1) AND NULL, + 0.0 AND NULL, + 0.1 AND NULL, + NULL AND 1 AND NULL, + 0 AND NULL AND 1 AND NULL; + +SELECT + x, + 0 OR x, + 1 OR x, + x OR x, + toNullable(0) OR x, + toNullable(1) OR x, + 0.0 OR x, + 0.1 OR x, + x OR 1 OR x, + 0 OR x OR 1 OR x +FROM (SELECT number % 2 ? number % 3 : NULL AS x FROM system.numbers LIMIT 10); + +SELECT + x, + 0 AND x, + 1 AND x, + x AND x, + toNullable(0) AND x, + toNullable(1) AND x, + 0.0 AND x, + 0.1 AND x, + x AND 1 AND x, + 0 AND x AND 1 AND x +FROM (SELECT number % 2 ? number % 3 : NULL AS x FROM system.numbers LIMIT 10); + +DROP TABLE IF EXISTS test; + +CREATE TABLE test +( + x Nullable(Int32) +) ENGINE = Log; + +INSERT INTO test VALUES(1), (0), (null); + +SELECT * FROM test; +SELECT x FROM test WHERE x != 0; +SELECT x FROM test WHERE x != 0 OR isNull(x); +SELECT x FROM test WHERE x != 1; + +DROP TABLE test; diff --git a/parser/testdata/00553_buff_exists_materlized_column/ast.json b/parser/testdata/00553_buff_exists_materlized_column/ast.json new file mode 100644 index 000000000..06b75cccd --- /dev/null +++ b/parser/testdata/00553_buff_exists_materlized_column/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery nums (children 1)" + }, + { + "explain": " Identifier nums" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001640776, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/00553_buff_exists_materlized_column/metadata.json b/parser/testdata/00553_buff_exists_materlized_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00553_buff_exists_materlized_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00553_buff_exists_materlized_column/query.sql b/parser/testdata/00553_buff_exists_materlized_column/query.sql new file mode 100644 index 000000000..abf14d360 --- /dev/null +++ b/parser/testdata/00553_buff_exists_materlized_column/query.sql @@ -0,0 +1,19 @@ +DROP TABLE IF EXISTS nums; +DROP TABLE IF EXISTS nums_buf; + +SET insert_allow_materialized_columns = 1; + +CREATE TABLE nums ( n UInt64, m UInt64 MATERIALIZED n+1 ) ENGINE = Log; +CREATE TABLE nums_buf AS nums ENGINE = Buffer(currentDatabase(), nums, 1, 10, 100, 1, 3, 10000000, 100000000); + +INSERT INTO nums_buf (n) VALUES (1); +INSERT INTO nums_buf (n) VALUES (2); +INSERT INTO nums_buf (n) VALUES (3); +INSERT INTO nums_buf (n) VALUES (4); +INSERT INTO nums_buf (n) VALUES (5); + +SELECT n,m FROM nums ORDER BY n; +SELECT n,m FROM nums_buf ORDER BY n; + +DROP TABLE IF EXISTS nums_buf; +DROP TABLE IF EXISTS nums; diff --git a/parser/testdata/00553_invalid_nested_name/ast.json b/parser/testdata/00553_invalid_nested_name/ast.json new file mode 100644 index 000000000..004611d3c --- /dev/null +++ b/parser/testdata/00553_invalid_nested_name/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[Float64_1.1, Float64_1.2]" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001064155, + "rows_read": 14, + "bytes_read": 575 + } +} diff --git a/parser/testdata/00553_invalid_nested_name/metadata.json b/parser/testdata/00553_invalid_nested_name/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00553_invalid_nested_name/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00553_invalid_nested_name/query.sql b/parser/testdata/00553_invalid_nested_name/query.sql new file mode 100644 index 000000000..55f205c5e --- /dev/null +++ b/parser/testdata/00553_invalid_nested_name/query.sql @@ -0,0 +1 @@ +SELECT * FROM (SELECT [1.1, 1.2]); diff --git a/parser/testdata/00554_nested_and_table_engines/ast.json b/parser/testdata/00554_nested_and_table_engines/ast.json new file mode 100644 index 000000000..7d6e6e520 --- /dev/null +++ b/parser/testdata/00554_nested_and_table_engines/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery nested (children 1)" + }, + { + "explain": " Identifier nested" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001611639, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/00554_nested_and_table_engines/metadata.json b/parser/testdata/00554_nested_and_table_engines/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00554_nested_and_table_engines/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00554_nested_and_table_engines/query.sql b/parser/testdata/00554_nested_and_table_engines/query.sql new file mode 100644 index 000000000..3da696fa6 --- /dev/null +++ b/parser/testdata/00554_nested_and_table_engines/query.sql @@ -0,0 +1,61 @@ +DROP TABLE IF EXISTS nested; + +CREATE TABLE nested (x UInt8, n Nested(a UInt64, b String)) ENGINE = TinyLog; + +INSERT INTO nested VALUES (1, [2, 3], ['Hello', 'World']); +INSERT INTO nested VALUES (4, [5], ['Goodbye']); + +SELECT * FROM nested ORDER BY x; +SELECT x, n.a FROM nested ORDER BY x; +SELECT n.a, n.b FROM nested ORDER BY n.a; + + +DROP TABLE IF EXISTS nested; + +CREATE TABLE nested (x UInt8, n Nested(a UInt64, b String)) ENGINE = Log; + +INSERT INTO nested VALUES (1, [2, 3], ['Hello', 'World']); +INSERT INTO nested VALUES (4, [5], ['Goodbye']); + +SELECT * FROM nested ORDER BY x; +SELECT x, n.a FROM nested ORDER BY x; +SELECT n.a, n.b FROM nested ORDER BY n.a; + + +DROP TABLE IF EXISTS nested; + +CREATE TABLE nested (x UInt8, n Nested(a UInt64, b String)) ENGINE = StripeLog; + +INSERT INTO nested VALUES (1, [2, 3], ['Hello', 'World']); +INSERT INTO nested VALUES (4, [5], ['Goodbye']); + +SELECT * FROM nested ORDER BY x; +SELECT x, n.a FROM nested ORDER BY x; +SELECT n.a, n.b FROM nested ORDER BY n.a; + + +DROP TABLE IF EXISTS nested; + +CREATE TABLE nested (x UInt8, n Nested(a UInt64, b String)) ENGINE = Memory; + +INSERT INTO nested VALUES (1, [2, 3], ['Hello', 'World']); +INSERT INTO nested VALUES (4, [5], ['Goodbye']); + +SELECT * FROM nested ORDER BY x; +SELECT x, n.a FROM nested ORDER BY x; +SELECT n.a, n.b FROM nested ORDER BY n.a; + + +DROP TABLE IF EXISTS nested; + +CREATE TABLE nested (x UInt8, n Nested(a UInt64, b String)) ENGINE = MergeTree ORDER BY x; + +INSERT INTO nested VALUES (1, [2, 3], ['Hello', 'World']); +INSERT INTO nested VALUES (4, [5], ['Goodbye']); + +SELECT * FROM nested ORDER BY x; +SELECT x, n.a FROM nested ORDER BY x; +SELECT n.a, n.b FROM nested ORDER BY n.a; + + +DROP TABLE nested; diff --git a/parser/testdata/00555_hasAll_hasAny/ast.json b/parser/testdata/00555_hasAll_hasAny/ast.json new file mode 100644 index 000000000..b0de55721 --- /dev/null +++ b/parser/testdata/00555_hasAll_hasAny/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function hasAll (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001167865, + "rows_read": 10, + "bytes_read": 369 + } +} diff --git a/parser/testdata/00555_hasAll_hasAny/metadata.json b/parser/testdata/00555_hasAll_hasAny/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00555_hasAll_hasAny/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00555_hasAll_hasAny/query.sql b/parser/testdata/00555_hasAll_hasAny/query.sql new file mode 100644 index 000000000..cf037d1ef --- /dev/null +++ b/parser/testdata/00555_hasAll_hasAny/query.sql @@ -0,0 +1,52 @@ +select hasAll([], []); +select hasAll([], [1]); +select hasAll([], [NULL]); +select hasAll([Null], [Null]); +select hasAll([Null], [Null, 1]); +select hasAll([1], []); +select hasAll([1], [Null]); +select hasAll([1, Null], [Null]); +select '-'; + +select hasAny([], []); +select hasAny([], [1]); +select hasAny([], [NULL]); +select hasAny([Null], [Null]); +select hasAny([Null], [Null, 1]); +select hasAny([1], []); +select hasAny([1], [Null]); +select hasAny([1, Null], [Null]); +select '-'; + +select hasAll([1], emptyArrayUInt8()); +select hasAny([1], emptyArrayUInt8()); +select '-'; + +select hasAny([1, 2, 3, 4], [5, 6]); +select hasAny([1, 2, 3, 4], [1, 3, 5]); +select hasAny([1, 2, 3, 4], [1, 3]); +select hasAll([1, 2, 3, 4], [1, 3]); +select hasAll([1, 2, 3, 4], [1, 3, 5]); +select hasAny([-128, 1., 512], [1.]); +select hasAny([-128, 1.0, 512], [.3]); +select hasAll([-128, 1.0, 512], [1.0]); +select hasAll([-128, 1.0, 512], [1.0, 513]); +select '-'; + +select hasAny(['a'], ['a']); +select hasAll(['a'], ['a']); +select hasAny(['a', 'b'], ['a', 'c']); +select hasAll(['a', 'b'], ['a', 'c']); +select '-'; + +select hasAny([1], ['a']); -- { serverError NO_COMMON_TYPE } +select hasAll([1], ['a']); -- { serverError NO_COMMON_TYPE } +select hasAll([[1, 2], [3, 4]], ['a', 'c']); -- { serverError NO_COMMON_TYPE } +select hasAny([[1, 2], [3, 4]], ['a', 'c']); -- { serverError NO_COMMON_TYPE } +select '-'; + +select hasAll([[1, 2], [3, 4]], [[1, 2], [3, 5]]); +select hasAll([[1, 2], [3, 4]], [[1, 2], [1, 2]]); +select hasAny([[1, 2], [3, 4]], [[1, 2], [3, 5]]); +select hasAny([[1, 2], [3, 4]], [[1, 3], [4, 2]]); + diff --git a/parser/testdata/00555_hasSubstr/ast.json b/parser/testdata/00555_hasSubstr/ast.json new file mode 100644 index 000000000..ed27b5164 --- /dev/null +++ b/parser/testdata/00555_hasSubstr/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function hasSubstr (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001279964, + "rows_read": 10, + "bytes_read": 372 + } +} diff --git a/parser/testdata/00555_hasSubstr/metadata.json b/parser/testdata/00555_hasSubstr/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00555_hasSubstr/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00555_hasSubstr/query.sql b/parser/testdata/00555_hasSubstr/query.sql new file mode 100644 index 000000000..25af5a648 --- /dev/null +++ b/parser/testdata/00555_hasSubstr/query.sql @@ -0,0 +1,32 @@ +select hasSubstr([], []); +select hasSubstr([], [1]); +select hasSubstr([], [NULL]); +select hasSubstr([Null], [Null]); +select hasSubstr([Null], [Null, 1]); +select hasSubstr([1], []); +select hasSubstr([1], [Null]); +select hasSubstr([1, Null], [Null]); +select hasSubstr([1, Null, 3, 4, Null, 5, 7], [3, 4, Null]); +select hasSubstr([1, Null], [3, 4, Null]); +select '-'; + + +select hasSubstr([1], emptyArrayUInt8()); +select '-'; + +select hasSubstr([1, 2, 3, 4], [1, 3]); +select hasSubstr([1, 2, 3, 4], [1, 3, 5]); +select hasSubstr([-128, 1., 512], [1.]); +select hasSubstr([-128, 1.0, 512], [.3]); +select '-'; + +select hasSubstr(['a'], ['a']); +select hasSubstr(['a', 'b'], ['a', 'c']); +select hasSubstr(['a', 'c', 'b'], ['a', 'c']); +select '-'; + +select hasSubstr([1], ['a']); -- { serverError NO_COMMON_TYPE } +select hasSubstr([[1, 2], [3, 4]], ['a', 'c']); -- { serverError NO_COMMON_TYPE } +select hasSubstr([[1, 2], [3, 4], [5, 8]], [[3, 4]]); +select hasSubstr([[1, 2], [3, 4], [5, 8]], [[3, 4], [5, 8]]); +select hasSubstr([[1, 2], [3, 4], [5, 8]], [[1, 2], [5, 8]]); diff --git a/parser/testdata/00555_right_join_excessive_rows/ast.json b/parser/testdata/00555_right_join_excessive_rows/ast.json new file mode 100644 index 000000000..f79dc05f6 --- /dev/null +++ b/parser/testdata/00555_right_join_excessive_rows/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001099768, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00555_right_join_excessive_rows/metadata.json b/parser/testdata/00555_right_join_excessive_rows/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00555_right_join_excessive_rows/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00555_right_join_excessive_rows/query.sql b/parser/testdata/00555_right_join_excessive_rows/query.sql new file mode 100644 index 000000000..29cc40c00 --- /dev/null +++ b/parser/testdata/00555_right_join_excessive_rows/query.sql @@ -0,0 +1,3 @@ +SET any_join_distinct_right_table_keys = 1; +SET max_block_size = 10; +SELECT * FROM (select toUInt64(1) s limit 1) js1 any right join (select number s, s as x from numbers(11)) js2 using (s) ORDER BY s; diff --git a/parser/testdata/00556_array_intersect/ast.json b/parser/testdata/00556_array_intersect/ast.json new file mode 100644 index 000000000..429f36d6c --- /dev/null +++ b/parser/testdata/00556_array_intersect/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayIntersect (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001111728, + "rows_read": 10, + "bytes_read": 377 + } +} diff --git a/parser/testdata/00556_array_intersect/metadata.json b/parser/testdata/00556_array_intersect/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00556_array_intersect/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00556_array_intersect/query.sql b/parser/testdata/00556_array_intersect/query.sql new file mode 100644 index 000000000..56fc2abbd --- /dev/null +++ b/parser/testdata/00556_array_intersect/query.sql @@ -0,0 +1,15 @@ +select arrayIntersect([], []); +select arrayIntersect([1], []); +select arrayIntersect([1], [1]); +select arrayIntersect([1, 2], [1, 3], [2, 3]); +select arrayIntersect([1, 2], [1, 3], [1, 4]); +select arrayIntersect([1, -1], [1]); +select arrayIntersect([1, -1], [Null, 1]); +select arrayIntersect([1, -1, Null], [Null, 1]); +select arrayIntersect(cast([1, 2] as Array(Nullable(Int8))), [1, 3]); +select arrayIntersect(CAST([1, -1] AS Array(Nullable(Int8))), [NULL, 1]); +select arrayIntersect([[1, 2], [1, 1]], [[2, 1], [1, 1]]); +select arrayIntersect([[1, 2, Null], [1, 1]], [[-2, 1], [1, 1]]); +select arrayIntersect([(1, ['a', 'b']), (Null, ['c'])], [(2, ['c', Null]), (1, ['a', 'b'])]); +select toTypeName(arrayIntersect([(1, ['a', 'b']), (Null, ['c'])], [(2, ['c', Null]), (1, ['a', 'b'])])); + diff --git a/parser/testdata/00556_remove_columns_from_subquery/ast.json b/parser/testdata/00556_remove_columns_from_subquery/ast.json new file mode 100644 index 000000000..9be8e5d71 --- /dev/null +++ b/parser/testdata/00556_remove_columns_from_subquery/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier a" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1 (alias a)" + }, + { + "explain": " Literal UInt64_2 (alias b)" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001259067, + "rows_read": 15, + "bytes_read": 610 + } +} diff --git a/parser/testdata/00556_remove_columns_from_subquery/metadata.json b/parser/testdata/00556_remove_columns_from_subquery/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00556_remove_columns_from_subquery/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00556_remove_columns_from_subquery/query.sql b/parser/testdata/00556_remove_columns_from_subquery/query.sql new file mode 100644 index 000000000..cfe47981e --- /dev/null +++ b/parser/testdata/00556_remove_columns_from_subquery/query.sql @@ -0,0 +1,6 @@ +SELECT a FROM (SELECT 1 AS a, 2 AS b); +SELECT a FROM (SELECT 1 AS a, arrayJoin([2, 3]) AS b); +SELECT a FROM (SELECT 1 AS a, arrayJoin([2, 3]), arrayJoin([2, 3])); +SELECT a FROM (SELECT 1 AS a, arrayJoin([2, 3]), arrayJoin([4, 5])); +SELECT a, b FROM (SELECT a, * FROM (SELECT 1 AS a, 2 AS b, 3 AS c)); +SELECT a, b FROM (SELECT a, *, arrayJoin(c) FROM (SELECT 1 AS a, 2 AS b, [3, 4] AS c)); diff --git a/parser/testdata/00557_alter_null_storage_tables/ast.json b/parser/testdata/00557_alter_null_storage_tables/ast.json new file mode 100644 index 000000000..e9c26f8a9 --- /dev/null +++ b/parser/testdata/00557_alter_null_storage_tables/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery null_00557 (children 1)" + }, + { + "explain": " Identifier null_00557" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000994026, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/00557_alter_null_storage_tables/metadata.json b/parser/testdata/00557_alter_null_storage_tables/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00557_alter_null_storage_tables/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00557_alter_null_storage_tables/query.sql b/parser/testdata/00557_alter_null_storage_tables/query.sql new file mode 100644 index 000000000..4d36591f0 --- /dev/null +++ b/parser/testdata/00557_alter_null_storage_tables/query.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS null_00557; + +CREATE TABLE null_00557 (x UInt8) ENGINE = Null; +DESCRIBE TABLE null_00557; + +ALTER TABLE null_00557 ADD COLUMN y String, MODIFY COLUMN x Int64 DEFAULT toInt64(y); +DESCRIBE TABLE null_00557; + +DROP TABLE null_00557; diff --git a/parser/testdata/00557_array_resize/ast.json b/parser/testdata/00557_array_resize/ast.json new file mode 100644 index 000000000..4bc451151 --- /dev/null +++ b/parser/testdata/00557_array_resize/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayResize (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_2, UInt64_3]" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001143007, + "rows_read": 8, + "bytes_read": 323 + } +} diff --git a/parser/testdata/00557_array_resize/metadata.json b/parser/testdata/00557_array_resize/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00557_array_resize/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00557_array_resize/query.sql b/parser/testdata/00557_array_resize/query.sql new file mode 100644 index 000000000..0a37d341c --- /dev/null +++ b/parser/testdata/00557_array_resize/query.sql @@ -0,0 +1,20 @@ +select arrayResize([1, 2, 3], 10); +select arrayResize([1, 2, 3], -10); +select arrayResize([1, Null, 3], 10); +select arrayResize([1, Null, 3], -10); +select arrayResize([1, 2, 3, 4, 5, 6], 3); +select arrayResize([1, 2, 3, 4, 5, 6], -3); +select arrayResize([1, 2, 3], 5, 42); +select arrayResize([1, 2, 3], -5, 42); +select arrayResize(['a', 'b', 'c'], 5); +select arrayResize([[1, 2], [3, 4]], 4); +select arrayResize([[1, 2], [3, 4]], -4); +select arrayResize([[1, 2], [3, 4]], 4, [5, 6]); +select arrayResize([[1, 2], [3, 4]], -4, [5, 6]); + +-- different types of array elements and default value to fill +select arrayResize([1, 2, 3], 5, 423.56); + +-- Second argument cannot be Nullable +select arrayResize([1, 2, 3], toNullable(3)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT arrayResize(materialize([1]), toNullable(3)) -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } diff --git a/parser/testdata/00558_aggregate_merge_totals_with_arenas/ast.json b/parser/testdata/00558_aggregate_merge_totals_with_arenas/ast.json new file mode 100644 index 000000000..ec9fab545 --- /dev/null +++ b/parser/testdata/00558_aggregate_merge_totals_with_arenas/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001507616, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00558_aggregate_merge_totals_with_arenas/metadata.json b/parser/testdata/00558_aggregate_merge_totals_with_arenas/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00558_aggregate_merge_totals_with_arenas/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00558_aggregate_merge_totals_with_arenas/query.sql b/parser/testdata/00558_aggregate_merge_totals_with_arenas/query.sql new file mode 100644 index 000000000..1b5384a2e --- /dev/null +++ b/parser/testdata/00558_aggregate_merge_totals_with_arenas/query.sql @@ -0,0 +1,46 @@ +SET group_by_two_level_threshold = 1, max_threads = 1; + +SELECT + k, + anyLast(s) +FROM +( + SELECT + 123456789 AS k, + 'Hello 1234567890 1234567890 1234567890 1234567890 1234567890 1234567890' AS s + UNION ALL + SELECT + 234567890, + 'World 1234567890 1234567890 1234567890 1234567890 1234567890 1234567890' +) +GROUP BY k + WITH TOTALS +HAVING length(anyLast(s)) > 0 +ORDER BY k; + +/* There was a bug in implementation of WITH TOTALS. + * When there was more than one block after aggregation, + * nullptr is passed to IAggregateFunction::merge instead of pointer to valid Arena. + * + * To reproduce, we set 'group_by_two_level_threshold' to small value to enable two-level aggregation. + * Only in two-level aggregation there are many blocks after GROUP BY. + * + * Also use UNION ALL in subquery to generate two blocks before GROUP BY. + * Because two-level aggregation may be triggered only after a block is processed. + * + * Use large numbers as a key, because for 8, 16 bit numbers, + * two-level aggregation is not possible as simple aggregation method is used. + * These numbers are happy to hash to different buckets and we thus we have two blocks after GROUP BY. + * + * Also we use long strings (at least 64 bytes) in aggregation state, + * because aggregate functions min/max/any/anyLast use Arena only for long enough strings. + * + * And we use function 'anyLast' for method IAggregateFunction::merge to be called for every new value. + * + * We use useless HAVING (that is always true), because in absense of HAVING, + * TOTALS are calculated in a simple way in same pass during aggregation, not in TotalsHavingTransform, + * and bug doesn't trigger. + * + * We use ORDER BY for result of the test to be deterministic. + * max_threads = 1 for deterministic order of result in subquery and the value of 'anyLast'. + */ diff --git a/parser/testdata/00558_parse_floats/ast.json b/parser/testdata/00558_parse_floats/ast.json new file mode 100644 index 000000000..bc6edd096 --- /dev/null +++ b/parser/testdata/00558_parse_floats/ast.json @@ -0,0 +1,73 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toFloat64 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function concat (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '0.00000'" + }, + { + "explain": " Function toString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_100" + } + ], + + "rows": 17, + + "statistics": + { + "elapsed": 0.001462862, + "rows_read": 17, + "bytes_read": 682 + } +} diff --git a/parser/testdata/00558_parse_floats/metadata.json b/parser/testdata/00558_parse_floats/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00558_parse_floats/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00558_parse_floats/query.sql b/parser/testdata/00558_parse_floats/query.sql new file mode 100644 index 000000000..c4b7c071f --- /dev/null +++ b/parser/testdata/00558_parse_floats/query.sql @@ -0,0 +1 @@ +SELECT toFloat64(concat('0.00000', toString(number))) FROM system.numbers LIMIT 100; diff --git a/parser/testdata/00559_filter_array_generic/ast.json b/parser/testdata/00559_filter_array_generic/ast.json new file mode 100644 index 000000000..5d634d03a --- /dev/null +++ b/parser/testdata/00559_filter_array_generic/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayJoin (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[Array_[Array_[UInt64_1], Array_[UInt64_2]], Array_[Array_[UInt64_1]]]" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function length (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal UInt64_2" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001429947, + "rows_read": 13, + "bytes_read": 556 + } +} diff --git a/parser/testdata/00559_filter_array_generic/metadata.json b/parser/testdata/00559_filter_array_generic/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00559_filter_array_generic/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00559_filter_array_generic/query.sql b/parser/testdata/00559_filter_array_generic/query.sql new file mode 100644 index 000000000..860059f67 --- /dev/null +++ b/parser/testdata/00559_filter_array_generic/query.sql @@ -0,0 +1 @@ +SELECT arrayJoin([[[1], [2]], [[1]]]) AS x WHERE length(x) = 2; diff --git a/parser/testdata/00560_float_leading_plus_in_exponent/ast.json b/parser/testdata/00560_float_leading_plus_in_exponent/ast.json new file mode 100644 index 000000000..38b73740f --- /dev/null +++ b/parser/testdata/00560_float_leading_plus_in_exponent/ast.json @@ -0,0 +1,40 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery test_float (children 2)" + }, + { + "explain": " Identifier test_float" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration x (children 1)" + }, + { + "explain": " DataType Float64" + } + ], + + "rows": 6, + + "statistics": + { + "elapsed": 0.001452855, + "rows_read": 6, + "bytes_read": 221 + } +} diff --git a/parser/testdata/00560_float_leading_plus_in_exponent/metadata.json b/parser/testdata/00560_float_leading_plus_in_exponent/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00560_float_leading_plus_in_exponent/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00560_float_leading_plus_in_exponent/query.sql b/parser/testdata/00560_float_leading_plus_in_exponent/query.sql new file mode 100644 index 000000000..d2d62510e --- /dev/null +++ b/parser/testdata/00560_float_leading_plus_in_exponent/query.sql @@ -0,0 +1,5 @@ +CREATE TEMPORARY TABLE test_float (x Float64); + +INSERT INTO test_float FORMAT TabSeparated 1.075e+06 + +SELECT * FROM test_float; diff --git a/parser/testdata/00561_storage_join/ast.json b/parser/testdata/00561_storage_join/ast.json new file mode 100644 index 000000000..fc1925baa --- /dev/null +++ b/parser/testdata/00561_storage_join/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery joinbug (children 1)" + }, + { + "explain": " Identifier joinbug" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00112576, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/00561_storage_join/metadata.json b/parser/testdata/00561_storage_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00561_storage_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00561_storage_join/query.sql b/parser/testdata/00561_storage_join/query.sql new file mode 100644 index 000000000..1603e85f7 --- /dev/null +++ b/parser/testdata/00561_storage_join/query.sql @@ -0,0 +1,42 @@ +drop table IF EXISTS joinbug; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE joinbug ( + event_date Date MATERIALIZED toDate(created, 'Asia/Istanbul'), + id UInt64, + id2 UInt64, + val UInt64, + val2 Int32, + created UInt64 +) ENGINE = MergeTree(event_date, (id, id2), 8192); + +insert into joinbug (id, id2, val, val2, created) values (1,11,91,81,123456), (2,22,92,82,123457); + +drop table IF EXISTS joinbug_join; + +CREATE TABLE joinbug_join ( + id UInt64, + id2 UInt64, + val UInt64, + val2 Int32, + created UInt64 +) ENGINE = Join(SEMI, LEFT, id2); + +insert into joinbug_join (id, id2, val, val2, created) +select id, id2, val, val2, created +from joinbug; + +select * from joinbug; + +select id, id2, val, val2, created +from ( SELECT toUInt64(arrayJoin(range(50))) AS id2 ) js1 +SEMI LEFT JOIN joinbug_join using id2; + +-- type conversion +SELECT * FROM ( SELECT toUInt32(11) AS id2 ) AS js1 SEMI LEFT JOIN joinbug_join USING (id2); + +-- can't convert right side in case on storage join +SELECT * FROM ( SELECT toInt64(11) AS id2 ) AS js1 SEMI LEFT JOIN joinbug_join USING (id2); -- { serverError TYPE_MISMATCH, 386 } + +DROP TABLE joinbug; +DROP TABLE joinbug_join; diff --git a/parser/testdata/00562_in_subquery_merge_tree/ast.json b/parser/testdata/00562_in_subquery_merge_tree/ast.json new file mode 100644 index 000000000..6256e18df --- /dev/null +++ b/parser/testdata/00562_in_subquery_merge_tree/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery merge_tree_in_subqueries (children 1)" + }, + { + "explain": " Identifier merge_tree_in_subqueries" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001382289, + "rows_read": 2, + "bytes_read": 100 + } +} diff --git a/parser/testdata/00562_in_subquery_merge_tree/metadata.json b/parser/testdata/00562_in_subquery_merge_tree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00562_in_subquery_merge_tree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00562_in_subquery_merge_tree/query.sql b/parser/testdata/00562_in_subquery_merge_tree/query.sql new file mode 100644 index 000000000..d2847af2b --- /dev/null +++ b/parser/testdata/00562_in_subquery_merge_tree/query.sql @@ -0,0 +1,25 @@ +DROP TABLE IF EXISTS merge_tree_in_subqueries; +CREATE TABLE merge_tree_in_subqueries (id UInt64, name String, num UInt64) ENGINE = MergeTree ORDER BY (id, name); +INSERT INTO merge_tree_in_subqueries VALUES(1, 'test1', 42); +INSERT INTO merge_tree_in_subqueries VALUES(2, 'test2', 8); +INSERT INTO merge_tree_in_subqueries VALUES(3, 'test3', 8); +INSERT INTO merge_tree_in_subqueries VALUES(4, 'test4', 1985); +INSERT INTO merge_tree_in_subqueries VALUES(5, 'test5', 0); + +-- Index scans. +SET force_primary_key = 1; + +SELECT * FROM merge_tree_in_subqueries WHERE id IN (SELECT * FROM system.numbers LIMIT 0); + +SELECT * FROM merge_tree_in_subqueries WHERE id IN (SELECT * FROM system.numbers LIMIT 2, 3) ORDER BY id; +SELECT * FROM merge_tree_in_subqueries WHERE name IN (SELECT 'test' || toString(number) FROM system.numbers LIMIT 2, 3) ORDER BY id; + +SELECT id AS id2, name AS value FROM merge_tree_in_subqueries WHERE (value, id2) IN (SELECT 'test' || toString(number), number FROM system.numbers LIMIT 2, 3) ORDER BY id; + +-- Non-index scans. +SET force_primary_key = 0; + +SELECT id AS id2, name AS value FROM merge_tree_in_subqueries WHERE num IN (SELECT number FROM system.numbers LIMIT 10) ORDER BY id; +SELECT id AS id2, name AS value FROM merge_tree_in_subqueries WHERE (id, num) IN (SELECT number, number + 6 FROM system.numbers LIMIT 10) ORDER BY id; + +DROP TABLE IF EXISTS merge_tree_in_subqueries; diff --git a/parser/testdata/00562_rewrite_select_expression_with_union/ast.json b/parser/testdata/00562_rewrite_select_expression_with_union/ast.json new file mode 100644 index 000000000..654b6df06 --- /dev/null +++ b/parser/testdata/00562_rewrite_select_expression_with_union/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_00562 (children 1)" + }, + { + "explain": " Identifier test_00562" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001371171, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/00562_rewrite_select_expression_with_union/metadata.json b/parser/testdata/00562_rewrite_select_expression_with_union/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00562_rewrite_select_expression_with_union/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00562_rewrite_select_expression_with_union/query.sql b/parser/testdata/00562_rewrite_select_expression_with_union/query.sql new file mode 100644 index 000000000..46f7697cc --- /dev/null +++ b/parser/testdata/00562_rewrite_select_expression_with_union/query.sql @@ -0,0 +1,10 @@ +DROP TABLE IF EXISTS test_00562; + +CREATE TABLE test_00562 ( s String, i Int64) ENGINE = Memory; + +INSERT INTO test_00562 VALUES('test_string', 1); + +SELECT s, SUM(i*2) AS i FROM test_00562 GROUP BY s UNION ALL SELECT s, SUM(i*2) AS i FROM test_00562 GROUP BY s; +SELECT s FROM (SELECT s, SUM(i*2) AS i FROM test_00562 GROUP BY s UNION ALL SELECT s, SUM(i*2) AS i FROM test_00562 GROUP BY s); + +DROP TABLE IF EXISTS test_00562; diff --git a/parser/testdata/00563_complex_in_expression/ast.json b/parser/testdata/00563_complex_in_expression/ast.json new file mode 100644 index 000000000..3a42aa713 --- /dev/null +++ b/parser/testdata/00563_complex_in_expression/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_00563 (children 1)" + }, + { + "explain": " Identifier test_00563" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001119594, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/00563_complex_in_expression/metadata.json b/parser/testdata/00563_complex_in_expression/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00563_complex_in_expression/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00563_complex_in_expression/query.sql b/parser/testdata/00563_complex_in_expression/query.sql new file mode 100644 index 000000000..bd053e0d0 --- /dev/null +++ b/parser/testdata/00563_complex_in_expression/query.sql @@ -0,0 +1,32 @@ +DROP TABLE IF EXISTS test_00563; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE test_00563 ( dt Date, site_id Int32, site_key String ) ENGINE = MergeTree(dt, (site_id, site_key, dt), 8192); +INSERT INTO test_00563 (dt,site_id, site_key) VALUES ('2018-1-29', 100, 'key'); +SELECT * FROM test_00563 WHERE toInt32(site_id) IN (100); +SELECT * FROM test_00563 WHERE toInt32(site_id) IN (100,101); + +DROP TABLE IF EXISTS test_00563; + +DROP TABLE IF EXISTS join_with_index; +CREATE TABLE join_with_index (key UInt32, data UInt64) ENGINE = MergeTree ORDER BY key SETTINGS index_granularity=1; +INSERT INTO join_with_index VALUES (1, 0), (2, 99); + +SELECT key + 1 +FROM join_with_index +ALL INNER JOIN +( + SELECT + key, + data + FROM join_with_index + WHERE toUInt64(data) IN (0, 529335254087962442) +) js2 USING (key); + +SELECT _uniq, _uniq IN (0, 99) +FROM join_with_index +ARRAY JOIN + [key, data] AS _uniq +ORDER BY _uniq; + +DROP TABLE IF EXISTS join_with_index; diff --git a/parser/testdata/00563_insert_into_remote_and_zookeeper_long/ast.json b/parser/testdata/00563_insert_into_remote_and_zookeeper_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00563_insert_into_remote_and_zookeeper_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00563_insert_into_remote_and_zookeeper_long/metadata.json b/parser/testdata/00563_insert_into_remote_and_zookeeper_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00563_insert_into_remote_and_zookeeper_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00563_insert_into_remote_and_zookeeper_long/query.sql b/parser/testdata/00563_insert_into_remote_and_zookeeper_long/query.sql new file mode 100644 index 000000000..4676829f4 --- /dev/null +++ b/parser/testdata/00563_insert_into_remote_and_zookeeper_long/query.sql @@ -0,0 +1,21 @@ +-- Tags: zookeeper + +-- Check that settings are correctly passed through Distributed table +DROP TABLE IF EXISTS simple; +CREATE TABLE simple (d Int8) ENGINE = ReplicatedMergeTree('/clickhouse/{database}/test_00563/tables/simple', '1') ORDER BY d; + +SELECT 'prefer_localhost_replica=1'; +INSERT INTO TABLE FUNCTION remote('127.0.0.1', currentDatabase(), 'simple') SETTINGS prefer_localhost_replica=1, insert_deduplicate=1 VALUES (1); +INSERT INTO TABLE FUNCTION remote('127.0.0.1', currentDatabase(), 'simple') SETTINGS prefer_localhost_replica=1, insert_deduplicate=1 VALUES (1); +INSERT INTO TABLE FUNCTION remote('127.0.0.1', currentDatabase(), 'simple') SETTINGS prefer_localhost_replica=1, insert_deduplicate=0 VALUES (2); +INSERT INTO TABLE FUNCTION remote('127.0.0.1', currentDatabase(), 'simple') SETTINGS prefer_localhost_replica=1, insert_deduplicate=0 VALUES (2); +SELECT * FROM remote('127.0.0.1', currentDatabase(), 'simple') ORDER BY d; + +SELECT 'prefer_localhost_replica=0'; +TRUNCATE TABLE simple; +INSERT INTO TABLE FUNCTION remote('127.0.0.1', currentDatabase(), 'simple') SETTINGS prefer_localhost_replica=0, insert_deduplicate=1 VALUES (1); +INSERT INTO TABLE FUNCTION remote('127.0.0.1', currentDatabase(), 'simple') SETTINGS prefer_localhost_replica=0, insert_deduplicate=1 VALUES (1); +INSERT INTO TABLE FUNCTION remote('127.0.0.1', currentDatabase(), 'simple') SETTINGS prefer_localhost_replica=0, insert_deduplicate=0 VALUES (2); +INSERT INTO TABLE FUNCTION remote('127.0.0.1', currentDatabase(), 'simple') SETTINGS prefer_localhost_replica=0, insert_deduplicate=0 VALUES (2); +SELECT * FROM remote('127.0.0.2', currentDatabase(), 'simple') ORDER BY d; +DROP TABLE simple; diff --git a/parser/testdata/00563_shard_insert_into_remote/ast.json b/parser/testdata/00563_shard_insert_into_remote/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00563_shard_insert_into_remote/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00563_shard_insert_into_remote/metadata.json b/parser/testdata/00563_shard_insert_into_remote/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00563_shard_insert_into_remote/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00563_shard_insert_into_remote/query.sql b/parser/testdata/00563_shard_insert_into_remote/query.sql new file mode 100644 index 000000000..abc9658fc --- /dev/null +++ b/parser/testdata/00563_shard_insert_into_remote/query.sql @@ -0,0 +1,9 @@ +-- Tags: shard + +drop table if exists tab; +create table tab (val UInt8) engine = MergeTree order by val; +insert into function remote('127.0.0.2', currentDatabase(), tab) values (1); +insert into function remote('127.0.0.{2|3}', currentDatabase(), tab) values (2); +insert into function remote('127.0.0.{2|3|4}', currentDatabase(), tab) values (3); +select * from tab order by val; +drop table tab; diff --git a/parser/testdata/00564_initial_column_values_with_default_expression/ast.json b/parser/testdata/00564_initial_column_values_with_default_expression/ast.json new file mode 100644 index 000000000..fa1a468b4 --- /dev/null +++ b/parser/testdata/00564_initial_column_values_with_default_expression/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001007392, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/00564_initial_column_values_with_default_expression/metadata.json b/parser/testdata/00564_initial_column_values_with_default_expression/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00564_initial_column_values_with_default_expression/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00564_initial_column_values_with_default_expression/query.sql b/parser/testdata/00564_initial_column_values_with_default_expression/query.sql new file mode 100644 index 000000000..3fff20a1f --- /dev/null +++ b/parser/testdata/00564_initial_column_values_with_default_expression/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS test; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE IF NOT EXISTS test( id UInt32, track UInt8, codec String, content String, rdate Date DEFAULT '2018-02-03', track_id String DEFAULT concat(concat(concat(toString(track), '-'), codec), content) ) ENGINE=MergeTree(rdate, (id, track_id), 8192); + +INSERT INTO test(id, track, codec) VALUES(1, 0, 'h264'); + +SELECT * FROM test ORDER BY id; + +INSERT INTO test(id, track, codec, content) VALUES(2, 0, 'h264', 'CONTENT'); + +SELECT * FROM test ORDER BY id; + +DROP TABLE IF EXISTS test; diff --git a/parser/testdata/00564_temporary_table_management/ast.json b/parser/testdata/00564_temporary_table_management/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00564_temporary_table_management/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00564_temporary_table_management/metadata.json b/parser/testdata/00564_temporary_table_management/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00564_temporary_table_management/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00564_temporary_table_management/query.sql b/parser/testdata/00564_temporary_table_management/query.sql new file mode 100644 index 000000000..4b43c8e7a --- /dev/null +++ b/parser/testdata/00564_temporary_table_management/query.sql @@ -0,0 +1,8 @@ +-- Tags: memory-engine +DROP TEMPORARY TABLE IF EXISTS temp_tab; +CREATE TEMPORARY TABLE temp_tab (number UInt64); +EXISTS TEMPORARY TABLE temp_tab; +SHOW CREATE TEMPORARY TABLE temp_tab; +SHOW TEMPORARY TABLES LIKE 'temp_tab'; +DROP TEMPORARY TABLE temp_tab; +EXISTS TEMPORARY TABLE temp_tab; diff --git a/parser/testdata/00564_versioned_collapsing_merge_tree/ast.json b/parser/testdata/00564_versioned_collapsing_merge_tree/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00564_versioned_collapsing_merge_tree/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00564_versioned_collapsing_merge_tree/metadata.json b/parser/testdata/00564_versioned_collapsing_merge_tree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00564_versioned_collapsing_merge_tree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00564_versioned_collapsing_merge_tree/query.sql b/parser/testdata/00564_versioned_collapsing_merge_tree/query.sql new file mode 100644 index 000000000..24da1490f --- /dev/null +++ b/parser/testdata/00564_versioned_collapsing_merge_tree/query.sql @@ -0,0 +1,209 @@ +-- Tags: no-random-merge-tree-settings + +set allow_deprecated_syntax_for_merge_tree=1; +set optimize_on_insert = 0; +set optimize_trivial_insert_select = 1; + +drop table if exists mult_tab; +create table mult_tab (date Date, value String, version UInt64, sign Int8) engine = VersionedCollapsingMergeTree(date, (date), 8192, sign, version); +insert into mult_tab select '2018-01-31', 'str_' || toString(number), 0, if(number % 2, 1, -1) from system.numbers limit 10; +insert into mult_tab select '2018-01-31', 'str_' || toString(number), 0, if(number % 2, 1, -1) from system.numbers limit 10; +select 'table with 2 blocks final'; +select * from mult_tab final order by date, value, sign; +optimize table mult_tab final; +select 'table with 2 blocks optimized'; +select * from mult_tab; + +select '-------------------------'; + +drop table if exists mult_tab; +create table mult_tab (date Date, value String, version UInt64, sign Int8) engine = VersionedCollapsingMergeTree(date, (date, value), 8192, sign, version); +insert into mult_tab select '2018-01-31', 'str_' || toString(number), 0, if(number % 2, 1, -1) from system.numbers limit 10; +insert into mult_tab select '2018-01-31', 'str_' || toString(number), 0, if(number % 2, 1, -1) from system.numbers limit 10; +select 'table with 2 blocks final'; +select * from mult_tab final order by date, value, sign; +optimize table mult_tab final; +select 'table with 2 blocks optimized'; +select * from mult_tab; + +select '-------------------------'; + +drop table if exists mult_tab; +create table mult_tab (date Date, value String, version UInt64, sign Int8) engine = VersionedCollapsingMergeTree(date, (date, value), 8192, sign, version); +insert into mult_tab select '2018-01-31', 'str_' || toString(number), 0, if(number % 2, 1, -1) from system.numbers limit 10; +insert into mult_tab select '2018-01-31', 'str_' || toString(number), 0, if(number % 2, -1, 1) from system.numbers limit 10; +select 'table with 2 blocks final'; +select * from mult_tab final order by date, value, sign; +optimize table mult_tab final; +select 'table with 2 blocks optimized'; +select * from mult_tab; + +select '-------------------------'; + +drop table if exists mult_tab; +create table mult_tab (date Date, value String, version UInt64, sign Int8) engine = VersionedCollapsingMergeTree(date, (date, value), 8192, sign, version); +insert into mult_tab select '2018-01-31', 'str_' || toString(number), 0, if(number % 2, 1, -1) from system.numbers limit 10; +insert into mult_tab select '2018-01-31', 'str_' || toString(number), 1, if(number % 2, -1, 1) from system.numbers limit 10; +select 'table with 2 blocks final'; +select * from mult_tab final order by date, value, version, sign; +optimize table mult_tab final; +select 'table with 2 blocks optimized'; +select * from mult_tab; + +select '-------------------------'; + +drop table if exists mult_tab; +create table mult_tab (date Date, value String, version UInt64, sign Int8) engine = VersionedCollapsingMergeTree(date, (date, value), 8192, sign, version); +insert into mult_tab select '2018-01-31', 'str_' || toString(number), 0, if(number % 2, 1, -1) from system.numbers limit 10; +insert into mult_tab select '2018-01-31', 'str_' || toString(number), 0, if(number % 2, -1, 1) from system.numbers limit 10; +insert into mult_tab select '2018-01-31', 'str_' || toString(number), 0, if(number % 2, 1, -1) from system.numbers limit 10; +insert into mult_tab select '2018-01-31', 'str_' || toString(number), 0, if(number % 2, -1, 1) from system.numbers limit 10; +select 'table with 4 blocks final'; +select * from mult_tab final order by date, value, sign; +optimize table mult_tab final; +select 'table with 4 blocks optimized'; +select * from mult_tab; + +select '-------------------------'; + +drop table if exists mult_tab; +create table mult_tab (date Date, value String, version UInt64, sign Int8) engine = VersionedCollapsingMergeTree(date, (date, value), 8192, sign, version); +insert into mult_tab select '2018-01-31', 'str_' || toString(number), 0, if(number % 2, 1, -1) from system.numbers limit 10; +insert into mult_tab select '2018-01-31', 'str_' || toString(number), 0, if(number % 2, -1, 1) from system.numbers limit 10; +insert into mult_tab select '2018-01-31', 'str_' || toString(number), 1, if(number % 3 = 0, 1, -1) from system.numbers limit 10; +insert into mult_tab select '2018-01-31', 'str_' || toString(number), 1, if(number % 3 = 1, 1, -1) from system.numbers limit 10; +insert into mult_tab select '2018-01-31', 'str_' || toString(number), 1, if(number % 3 = 2, 1, -1) from system.numbers limit 10; +select 'table with 5 blocks final'; +select * from mult_tab final order by date, value, sign; +optimize table mult_tab final; +select 'table with 5 blocks optimized'; +select * from mult_tab; + +select '-------------------------'; + +drop table if exists mult_tab; +create table mult_tab (date Date, value String, version UInt64, sign Int8) engine = VersionedCollapsingMergeTree(date, (date, value), 8192, sign, version); +insert into mult_tab select '2018-01-31', 'str_' || toString(number), 0, if(number % 2, 1, -1) from system.numbers limit 1000000; +insert into mult_tab select '2018-01-31', 'str_' || toString(number), 0, if(number % 2, -1, 1) from system.numbers limit 1000000; +select 'table with 2 blocks final'; +select * from mult_tab final order by date, value, sign; +optimize table mult_tab final; +select 'table with 2 blocks optimized'; +select * from mult_tab; + +select '-------------------------'; + +drop table if exists mult_tab; +create table mult_tab (date Date, value UInt64, key UInt64, version UInt64, sign Int8) engine = VersionedCollapsingMergeTree(date, (date), 8192, sign, version); +insert into mult_tab select '2018-01-31', number, number, 0, if(number < 64, 1, -1) from system.numbers limit 128; +insert into mult_tab select '2018-01-31', number, number + 128, 0, if(number < 64, -1, 1) from system.numbers limit 128; +select 'table with 2 blocks final'; +select date, value, version, sign from mult_tab final order by date, key, sign settings max_block_size=33; +optimize table mult_tab final; +select 'table with 2 blocks optimized'; +select date, value, version, sign from mult_tab; + +select '-------------------------'; +select 'Vertival merge'; +select '-------------------------'; + +drop table if exists mult_tab; +create table mult_tab (date Date, value String, version UInt64, sign Int8) engine = VersionedCollapsingMergeTree(sign, version) order by (date) settings enable_vertical_merge_algorithm = 1, vertical_merge_algorithm_min_rows_to_activate = 1, vertical_merge_algorithm_min_columns_to_activate = 0; +insert into mult_tab select '2018-01-31', 'str_' || toString(number), 0, if(number % 2, 1, -1) from system.numbers limit 10; +insert into mult_tab select '2018-01-31', 'str_' || toString(number), 0, if(number % 2, 1, -1) from system.numbers limit 10; +select 'table with 2 blocks final'; +select * from mult_tab final order by date, value, sign; +optimize table mult_tab final; +select 'table with 2 blocks optimized'; +select * from mult_tab; + +select '-------------------------'; + +drop table if exists mult_tab; +create table mult_tab (date Date, value String, version UInt64, sign Int8) engine = VersionedCollapsingMergeTree(sign, version) order by (date, value) settings enable_vertical_merge_algorithm = 1, vertical_merge_algorithm_min_rows_to_activate = 1, vertical_merge_algorithm_min_columns_to_activate = 0; +insert into mult_tab select '2018-01-31', 'str_' || toString(number), 0, if(number % 2, 1, -1) from system.numbers limit 10; +insert into mult_tab select '2018-01-31', 'str_' || toString(number), 0, if(number % 2, 1, -1) from system.numbers limit 10; +select 'table with 2 blocks final'; +select * from mult_tab final order by date, value, sign; +optimize table mult_tab final; +select 'table with 2 blocks optimized'; +select * from mult_tab; + +select '-------------------------'; + +drop table if exists mult_tab; +create table mult_tab (date Date, value String, version UInt64, sign Int8) engine = VersionedCollapsingMergeTree(sign, version) order by (date, value) settings enable_vertical_merge_algorithm = 1, vertical_merge_algorithm_min_rows_to_activate = 1, vertical_merge_algorithm_min_columns_to_activate = 0; +insert into mult_tab select '2018-01-31', 'str_' || toString(number), 0, if(number % 2, 1, -1) from system.numbers limit 10; +insert into mult_tab select '2018-01-31', 'str_' || toString(number), 0, if(number % 2, -1, 1) from system.numbers limit 10; +select 'table with 2 blocks final'; +select * from mult_tab final order by date, value, sign; +optimize table mult_tab final; +select 'table with 2 blocks optimized'; +select * from mult_tab; + +select '-------------------------'; + +drop table if exists mult_tab; +create table mult_tab (date Date, value String, version UInt64, sign Int8) engine = VersionedCollapsingMergeTree(sign, version) order by (date, value) settings enable_vertical_merge_algorithm = 1, vertical_merge_algorithm_min_rows_to_activate = 1, vertical_merge_algorithm_min_columns_to_activate = 0; +insert into mult_tab select '2018-01-31', 'str_' || toString(number), 0, if(number % 2, 1, -1) from system.numbers limit 10; +insert into mult_tab select '2018-01-31', 'str_' || toString(number), 1, if(number % 2, -1, 1) from system.numbers limit 10; +select 'table with 2 blocks final'; +select * from mult_tab final order by date, value, version, sign; +optimize table mult_tab final; +select 'table with 2 blocks optimized'; +select * from mult_tab; + +select '-------------------------'; + +drop table if exists mult_tab; +create table mult_tab (date Date, value String, version UInt64, sign Int8) engine = VersionedCollapsingMergeTree(sign, version) order by (date, value) settings enable_vertical_merge_algorithm = 1, vertical_merge_algorithm_min_rows_to_activate = 1, vertical_merge_algorithm_min_columns_to_activate = 0; +insert into mult_tab select '2018-01-31', 'str_' || toString(number), 0, if(number % 2, 1, -1) from system.numbers limit 10; +insert into mult_tab select '2018-01-31', 'str_' || toString(number), 0, if(number % 2, -1, 1) from system.numbers limit 10; +insert into mult_tab select '2018-01-31', 'str_' || toString(number), 0, if(number % 2, 1, -1) from system.numbers limit 10; +insert into mult_tab select '2018-01-31', 'str_' || toString(number), 0, if(number % 2, -1, 1) from system.numbers limit 10; +select 'table with 4 blocks final'; +select * from mult_tab final order by date, value, sign; +optimize table mult_tab final; +select 'table with 4 blocks optimized'; +select * from mult_tab; + +select '-------------------------'; + +drop table if exists mult_tab; +create table mult_tab (date Date, value String, version UInt64, sign Int8) engine = VersionedCollapsingMergeTree(sign, version) order by (date, value) settings enable_vertical_merge_algorithm = 1, vertical_merge_algorithm_min_rows_to_activate = 1, vertical_merge_algorithm_min_columns_to_activate = 0; +insert into mult_tab select '2018-01-31', 'str_' || toString(number), 0, if(number % 2, 1, -1) from system.numbers limit 10; +insert into mult_tab select '2018-01-31', 'str_' || toString(number), 0, if(number % 2, -1, 1) from system.numbers limit 10; +insert into mult_tab select '2018-01-31', 'str_' || toString(number), 1, if(number % 3 = 0, 1, -1) from system.numbers limit 10; +insert into mult_tab select '2018-01-31', 'str_' || toString(number), 1, if(number % 3 = 1, 1, -1) from system.numbers limit 10; +insert into mult_tab select '2018-01-31', 'str_' || toString(number), 1, if(number % 3 = 2, 1, -1) from system.numbers limit 10; +select 'table with 5 blocks final'; +select * from mult_tab final order by date, value, sign; +optimize table mult_tab final; +select 'table with 5 blocks optimized'; +select * from mult_tab; + +select '-------------------------'; + +drop table if exists mult_tab; +create table mult_tab (date Date, value String, version UInt64, sign Int8) engine = VersionedCollapsingMergeTree(sign, version) order by (date, value) settings enable_vertical_merge_algorithm = 1, vertical_merge_algorithm_min_rows_to_activate = 1, vertical_merge_algorithm_min_columns_to_activate = 0; +insert into mult_tab select '2018-01-31', 'str_' || toString(number), 0, if(number % 2, 1, -1) from system.numbers limit 1000000; +insert into mult_tab select '2018-01-31', 'str_' || toString(number), 0, if(number % 2, -1, 1) from system.numbers limit 1000000; +select 'table with 2 blocks final'; +select * from mult_tab final order by date, value, sign; +optimize table mult_tab final; +select 'table with 2 blocks optimized'; +select * from mult_tab; + +select '-------------------------'; + +drop table if exists mult_tab; +create table mult_tab (date Date, value UInt64, key UInt64, version UInt64, sign Int8) engine = VersionedCollapsingMergeTree(sign, version) order by (date) settings enable_vertical_merge_algorithm = 1, vertical_merge_algorithm_min_rows_to_activate = 1, vertical_merge_algorithm_min_columns_to_activate = 0; +insert into mult_tab select '2018-01-31', number, number, 0, if(number < 64, 1, -1) from system.numbers limit 128; +insert into mult_tab select '2018-01-31', number, number + 128, 0, if(number < 64, -1, 1) from system.numbers limit 128; +select 'table with 2 blocks final'; +select date, value, version, sign from mult_tab final order by date, key, sign settings max_block_size=33; +optimize table mult_tab final; +select 'table with 2 blocks optimized'; +select date, value, version, sign from mult_tab; + +DROP TABLE mult_tab; diff --git a/parser/testdata/00566_enum_min_max/ast.json b/parser/testdata/00566_enum_min_max/ast.json new file mode 100644 index 000000000..0a4613330 --- /dev/null +++ b/parser/testdata/00566_enum_min_max/ast.json @@ -0,0 +1,103 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function min (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function max (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function sum (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function arrayJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_2]" + }, + { + "explain": " Literal 'Enum8(\\'Hello\\' = 1, \\'World\\' = 2)'" + } + ], + + "rows": 27, + + "statistics": + { + "elapsed": 0.001214845, + "rows_read": 27, + "bytes_read": 1131 + } +} diff --git a/parser/testdata/00566_enum_min_max/metadata.json b/parser/testdata/00566_enum_min_max/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00566_enum_min_max/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00566_enum_min_max/query.sql b/parser/testdata/00566_enum_min_max/query.sql new file mode 100644 index 000000000..ed1922384 --- /dev/null +++ b/parser/testdata/00566_enum_min_max/query.sql @@ -0,0 +1 @@ +SELECT min(x), max(x), sum(x) FROM (SELECT CAST(arrayJoin([1, 2]) AS Enum8('Hello' = 1, 'World' = 2)) AS x); diff --git a/parser/testdata/00567_parse_datetime_as_unix_timestamp/ast.json b/parser/testdata/00567_parse_datetime_as_unix_timestamp/ast.json new file mode 100644 index 000000000..06dcdb36b --- /dev/null +++ b/parser/testdata/00567_parse_datetime_as_unix_timestamp/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001151386, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00567_parse_datetime_as_unix_timestamp/metadata.json b/parser/testdata/00567_parse_datetime_as_unix_timestamp/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00567_parse_datetime_as_unix_timestamp/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00567_parse_datetime_as_unix_timestamp/query.sql b/parser/testdata/00567_parse_datetime_as_unix_timestamp/query.sql new file mode 100644 index 000000000..6efdb5b51 --- /dev/null +++ b/parser/testdata/00567_parse_datetime_as_unix_timestamp/query.sql @@ -0,0 +1,6 @@ +SET input_format_values_interpret_expressions = 0; + +CREATE TEMPORARY TABLE t (x DateTime('UTC')); +INSERT INTO t VALUES ('2000-01-02 03:04:05'), ('1234567890'), (1111111111); + +SELECT x FROM t ORDER BY x; diff --git a/parser/testdata/00568_empty_function_with_fixed_string/ast.json b/parser/testdata/00568_empty_function_with_fixed_string/ast.json new file mode 100644 index 000000000..f959f2d0b --- /dev/null +++ b/parser/testdata/00568_empty_function_with_fixed_string/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toFixedString (alias str) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal ''" + }, + { + "explain": " Literal UInt64_4" + }, + { + "explain": " Function empty (alias is_empty) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier str" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.00114886, + "rows_read": 11, + "bytes_read": 426 + } +} diff --git a/parser/testdata/00568_empty_function_with_fixed_string/metadata.json b/parser/testdata/00568_empty_function_with_fixed_string/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00568_empty_function_with_fixed_string/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00568_empty_function_with_fixed_string/query.sql b/parser/testdata/00568_empty_function_with_fixed_string/query.sql new file mode 100644 index 000000000..4f7154775 --- /dev/null +++ b/parser/testdata/00568_empty_function_with_fixed_string/query.sql @@ -0,0 +1,8 @@ +SELECT toFixedString('', 4) AS str, empty(str) AS is_empty; +SELECT toFixedString('\0abc', 4) AS str, empty(str) AS is_empty; + +DROP TABLE IF EXISTS defaulted; +CREATE TABLE defaulted (v6 FixedString(16)) ENGINE=MergeTree ORDER BY tuple(); +INSERT INTO defaulted SELECT toFixedString('::0', 16) FROM numbers(32768); +SELECT count(), notEmpty(v6) e FROM defaulted GROUP BY e; +DROP TABLE defaulted; diff --git a/parser/testdata/00569_parse_date_time_best_effort/ast.json b/parser/testdata/00569_parse_date_time_best_effort/ast.json new file mode 100644 index 000000000..2df7b70f4 --- /dev/null +++ b/parser/testdata/00569_parse_date_time_best_effort/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001210177, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00569_parse_date_time_best_effort/metadata.json b/parser/testdata/00569_parse_date_time_best_effort/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00569_parse_date_time_best_effort/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00569_parse_date_time_best_effort/query.sql b/parser/testdata/00569_parse_date_time_best_effort/query.sql new file mode 100644 index 000000000..ca423c192 --- /dev/null +++ b/parser/testdata/00569_parse_date_time_best_effort/query.sql @@ -0,0 +1,114 @@ +SET output_format_pretty_display_footer_column_names=0; +SELECT + s, + parseDateTimeBestEffortOrNull(s, 'UTC') AS a, + parseDateTimeBestEffortOrZero(s, 'UTC') AS b +FROM +( + SELECT arrayJoin([ +'0', +'0000', +'2000-01-01 00:00:00', +'2000-01-01 01:00:00', +'02/01/17 010203 MSK', +'02/01/17 010203 MSK+0100', +'02/01/17 010203 UTC+0300', +'02/01/17 010203Z', +'02/01/1970 010203Z', +'02/01/70 010203Z', +'11 Feb 2018 06:40:50 +0300', +'17 Apr 2000 2 1:2:3', +'19700102 01:00:00', +'1970010201:00:00', +'19700102010203', +'19700102010203Z', +'1970/01/02 010203Z', +'20 2000', +'201', +'20160101', +'2016-01-01', +'201601-01', +'2016-01-01MSD', +'2016-01-01 MSD', +'201601-01 MSD', +'2016-01-01UTC', +'2016-01-01Z', +'2017', +'2017/01/00', +'2017/01/00 MSD', +'2017/01/00 MSD Jun', +'2017/01/01', +'201701 02 010203 UTC+0300', +'2017-01-02 03:04:05', +'2017-01-0203:04:05', +'2017-01-02 03:04:05+0', +'2017-01-02 03:04:05+00', +'2017-01-02 03:04:05+0000', +'2017-01-02 03:04:05 -0100', +'2017-01-02 03:04:05+030', +'2017-01-02 03:04:05+0300', +'2017-01-02 03:04:05+1', +'2017-01-02 03:04:05+300', +'2017-01-02 03:04:05+900', +'2017-01-02 03:04:05GMT', +'2017-01-02 03:04:05 MSD', +'2017-01-02 03:04:05 MSD Feb', +'2017-01-02 03:04:05 MSD Jun', +'2017-01-02 03:04:05 MSK', +'2017-01-02T03:04:05', +'2017-01-02T03:04:05+00', +'2017-01-02T03:04:05 -0100', +'2017-01-02T03:04:05-0100', +'2017-01-02T03:04:05+0100', +'2017-01-02T03:04:05Z', +'2017-01 03:04:05 MSD Jun', +'2017-01 03:04 MSD Jun', +'2017/01/31', +'2017/01/32', +'2017-01 MSD Jun', +'201701 MSD Jun', +'2017 25 1:2:3', +'2017 25 Apr 1:2:3', +'2017 Apr 01 11:22:33', +'2017 Apr 02 01/02/03 UTC+0300', +'2017 Apr 02 010203 UTC+0300', +'2017 Apr 02 01:2:3 UTC+0300', +'2017 Apr 02 1:02:3', +'2017 Apr 02 11:22:33', +'2017 Apr 02 1:2:03', +'2017 Apr 02 1:22:33', +'2017 Apr 02 1:2:3', +'2017 Apr 02 1:2:33', +'2017 Apr 02 1:2:3 MSK', +'2017 Apr 02 1:2:3 MSK 2017', +'2017 Apr 02 1:2:3 MSK 2018', +'2017 Apr 02 1:2:3 UTC+0000', +'2017 Apr 02 1:2:3 UTC+0300', +'2017 Apr 02 1:2:3 UTC+0400', +'2017 Apr 2 1:2:3', +'2017 Jan 02 010203 UTC+0300', +'25 Apr 2017 01:02:03', +'25 Apr 2017 1:2:3', +'25 Jan 2017 1:2:3', +'25 Jan 2017 1:2:3 MSK', +'25 Jan 2017 1:2:3 PM', +'25 Jan 2017 1:2:3Z', +'25 Jan 2017 1:2:3 Z', +'25 Jan 2017 1:2:3 Z +0300', +'25 Jan 2017 1:2:3 Z+03:00', +'25 Jan 2017 1:2:3 Z +0300 OM', +'25 Jan 2017 1:2:3 Z +03:00 PM', +'25 Jan 2017 1:2:3 Z +0300 PM', +'25 Jan 2017 1:2:3 Z+03:00 PM', +'25 Jan 2017 1:2:3 Z +03:30 PM', +'25 Jan 2017 1:2:3Z Mo', +'25 Jan 2017 1:2:3Z Mon', +'25 Jan 2017 1:2:3Z Moo', +'25 Jan 2017 1:2:3 Z PM', +'25 Jan 2017 1:2:3Z PM', +'25 Jan 2017 1:2:3 Z PM +03:00', +'Jun, 11 Feb 2018 06:40:50 +0300', +'Sun 11 Feb 2018 06:40:50 +0300', +'Sun, 11 Feb 2018 06:40:50 +0300' +]) AS s) +FORMAT PrettySpaceNoEscapes; diff --git a/parser/testdata/00570_empty_array_is_const/ast.json b/parser/testdata/00570_empty_array_is_const/ast.json new file mode 100644 index 000000000..e694fecdb --- /dev/null +++ b/parser/testdata/00570_empty_array_is_const/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function dumpColumnStructure (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001364619, + "rows_read": 8, + "bytes_read": 312 + } +} diff --git a/parser/testdata/00570_empty_array_is_const/metadata.json b/parser/testdata/00570_empty_array_is_const/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00570_empty_array_is_const/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00570_empty_array_is_const/query.sql b/parser/testdata/00570_empty_array_is_const/query.sql new file mode 100644 index 000000000..62d56a774 --- /dev/null +++ b/parser/testdata/00570_empty_array_is_const/query.sql @@ -0,0 +1,3 @@ +SELECT dumpColumnStructure([]); +SELECT dumpColumnStructure([[[]]]); +SELECT DISTINCT dumpColumnStructure([[], [1]]) FROM numbers(2); diff --git a/parser/testdata/00571_alter_nullable/ast.json b/parser/testdata/00571_alter_nullable/ast.json new file mode 100644 index 000000000..0d2678c65 --- /dev/null +++ b/parser/testdata/00571_alter_nullable/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery nullable_00571 (children 1)" + }, + { + "explain": " Identifier nullable_00571" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001520795, + "rows_read": 2, + "bytes_read": 80 + } +} diff --git a/parser/testdata/00571_alter_nullable/metadata.json b/parser/testdata/00571_alter_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00571_alter_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00571_alter_nullable/query.sql b/parser/testdata/00571_alter_nullable/query.sql new file mode 100644 index 000000000..7b3b36f73 --- /dev/null +++ b/parser/testdata/00571_alter_nullable/query.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS nullable_00571; +CREATE TABLE nullable_00571 (x String) ENGINE = MergeTree ORDER BY x; +INSERT INTO nullable_00571 VALUES ('hello'), ('world'); +SELECT * FROM nullable_00571; +ALTER TABLE nullable_00571 ADD COLUMN n Nullable(UInt64); +SELECT * FROM nullable_00571; +ALTER TABLE nullable_00571 DROP COLUMN n; +ALTER TABLE nullable_00571 ADD COLUMN n Nullable(UInt64) DEFAULT NULL; +SELECT * FROM nullable_00571; +ALTER TABLE nullable_00571 DROP COLUMN n; +ALTER TABLE nullable_00571 ADD COLUMN n Nullable(UInt64) DEFAULT 0; +SELECT * FROM nullable_00571; +DROP TABLE nullable_00571; diff --git a/parser/testdata/00571_non_exist_database_when_create_materializ_view/ast.json b/parser/testdata/00571_non_exist_database_when_create_materializ_view/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00571_non_exist_database_when_create_materializ_view/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00571_non_exist_database_when_create_materializ_view/metadata.json b/parser/testdata/00571_non_exist_database_when_create_materializ_view/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00571_non_exist_database_when_create_materializ_view/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00571_non_exist_database_when_create_materializ_view/query.sql b/parser/testdata/00571_non_exist_database_when_create_materializ_view/query.sql new file mode 100644 index 000000000..d24a57187 --- /dev/null +++ b/parser/testdata/00571_non_exist_database_when_create_materializ_view/query.sql @@ -0,0 +1,25 @@ + +DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE_1:Identifier}; +DROP TABLE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}; +DROP TABLE IF EXISTS test_materialized_00571; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE DATABASE {CLICKHOUSE_DATABASE_1:Identifier}; +CREATE TABLE test_00571 ( date Date, platform Enum8('a' = 0, 'b' = 1, 'c' = 2), app Enum8('a' = 0, 'b' = 1) ) ENGINE = MergeTree(date, (platform, app), 8192); +CREATE MATERIALIZED VIEW test_materialized_00571 ENGINE = MergeTree(date, (platform, app), 8192) POPULATE AS SELECT date, platform, app FROM (SELECT * FROM test_00571); + +USE {CLICKHOUSE_DATABASE_1:Identifier}; + +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.test_00571 VALUES('2018-02-16', 'a', 'a'); + +SELECT * FROM {CLICKHOUSE_DATABASE:Identifier}.test_00571; +SELECT * FROM {CLICKHOUSE_DATABASE:Identifier}.test_materialized_00571; + +DETACH TABLE {CLICKHOUSE_DATABASE:Identifier}.test_materialized_00571; +ATTACH TABLE {CLICKHOUSE_DATABASE:Identifier}.test_materialized_00571; + +SELECT * FROM {CLICKHOUSE_DATABASE:Identifier}.test_materialized_00571; + +DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE_1:Identifier}; +DROP TABLE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}.test_00571; +DROP TABLE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}.test_materialized_00571; diff --git a/parser/testdata/00572_aggregation_by_empty_set/ast.json b/parser/testdata/00572_aggregation_by_empty_set/ast.json new file mode 100644 index 000000000..66ff0f8a9 --- /dev/null +++ b/parser/testdata/00572_aggregation_by_empty_set/ast.json @@ -0,0 +1,40 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery t (children 2)" + }, + { + "explain": " Identifier t" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration x (children 1)" + }, + { + "explain": " DataType UInt8" + } + ], + + "rows": 6, + + "statistics": + { + "elapsed": 0.001183711, + "rows_read": 6, + "bytes_read": 201 + } +} diff --git a/parser/testdata/00572_aggregation_by_empty_set/metadata.json b/parser/testdata/00572_aggregation_by_empty_set/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00572_aggregation_by_empty_set/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00572_aggregation_by_empty_set/query.sql b/parser/testdata/00572_aggregation_by_empty_set/query.sql new file mode 100644 index 000000000..8058605a7 --- /dev/null +++ b/parser/testdata/00572_aggregation_by_empty_set/query.sql @@ -0,0 +1,21 @@ +CREATE TEMPORARY TABLE t (x UInt8); + +SET empty_result_for_aggregation_by_empty_set = 0; + +SELECT count() FROM system.one WHERE 0; +SELECT count() FROM system.one WHERE rand() < 0; +SELECT count() FROM system.one WHERE 1; + +SELECT count(), uniq(x), avg(x), avg(toNullable(x)), groupArray(x), groupUniqArray(x) FROM t; +SELECT count(), uniq(x), avg(x), avg(toNullable(x)), groupArray(x), groupUniqArray(x) FROM (SELECT * FROM t UNION ALL SELECT * FROM t); +SELECT x, count(), uniq(x), avg(x), avg(toNullable(x)), groupArray(x), groupUniqArray(x) FROM t GROUP BY x; + +SET empty_result_for_aggregation_by_empty_set = 1; + +SELECT count() FROM system.one WHERE 0; +SELECT count() FROM system.one WHERE rand() < 0; +SELECT count() FROM system.one WHERE 1; + +SELECT count(), uniq(x), avg(x), avg(toNullable(x)), groupArray(x), groupUniqArray(x) FROM t; +SELECT count(), uniq(x), avg(x), avg(toNullable(x)), groupArray(x), groupUniqArray(x) FROM (SELECT * FROM t UNION ALL SELECT * FROM t); +SELECT x, count(), uniq(x), avg(x), avg(toNullable(x)), groupArray(x), groupUniqArray(x) FROM t GROUP BY x; diff --git a/parser/testdata/00573_shard_aggregation_by_empty_set/ast.json b/parser/testdata/00573_shard_aggregation_by_empty_set/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00573_shard_aggregation_by_empty_set/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00573_shard_aggregation_by_empty_set/metadata.json b/parser/testdata/00573_shard_aggregation_by_empty_set/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00573_shard_aggregation_by_empty_set/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00573_shard_aggregation_by_empty_set/query.sql b/parser/testdata/00573_shard_aggregation_by_empty_set/query.sql new file mode 100644 index 000000000..adb569897 --- /dev/null +++ b/parser/testdata/00573_shard_aggregation_by_empty_set/query.sql @@ -0,0 +1,11 @@ +-- Tags: shard + +CREATE TEMPORARY TABLE t_00573 (x UInt8); + +SET empty_result_for_aggregation_by_empty_set = 0; +SELECT count(), uniq(x), avg(x), avg(toNullable(x)), groupArray(x), groupUniqArray(x) FROM remote('127.0.0.{1..10}', system.one) WHERE (rand() AS x) < 0; +SELECT count(), uniq(x), avg(x), avg(toNullable(x)), groupArray(x), groupUniqArray(x) FROM remote('127.0.0.{1..10}', system.one) WHERE (rand() AS x) < 0 GROUP BY x; + +SET empty_result_for_aggregation_by_empty_set = 1; +SELECT count(), uniq(x), avg(x), avg(toNullable(x)), groupArray(x), groupUniqArray(x) FROM remote('127.0.0.{1..10}', system.one) WHERE (rand() AS x) < 0; +SELECT count(), uniq(x), avg(x), avg(toNullable(x)), groupArray(x), groupUniqArray(x) FROM remote('127.0.0.{1..10}', system.one) WHERE (rand() AS x) < 0 GROUP BY x; diff --git a/parser/testdata/00575_merge_and_index_with_function_in_in/ast.json b/parser/testdata/00575_merge_and_index_with_function_in_in/ast.json new file mode 100644 index 000000000..8ee6e776b --- /dev/null +++ b/parser/testdata/00575_merge_and_index_with_function_in_in/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_00575 (children 1)" + }, + { + "explain": " Identifier t_00575" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000966019, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/00575_merge_and_index_with_function_in_in/metadata.json b/parser/testdata/00575_merge_and_index_with_function_in_in/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00575_merge_and_index_with_function_in_in/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00575_merge_and_index_with_function_in_in/query.sql b/parser/testdata/00575_merge_and_index_with_function_in_in/query.sql new file mode 100644 index 000000000..6f0ddd9fa --- /dev/null +++ b/parser/testdata/00575_merge_and_index_with_function_in_in/query.sql @@ -0,0 +1,10 @@ +DROP TABLE IF EXISTS t_00575; + +set allow_deprecated_syntax_for_merge_tree=1; +create table t_00575(d Date) engine MergeTree(d, d, 8192); + +insert into t_00575 values ('2018-02-20'); + +select count() from t_00575 where toDayOfWeek(d) in (2); + +DROP TABLE t_00575; diff --git a/parser/testdata/00576_nested_and_prewhere/ast.json b/parser/testdata/00576_nested_and_prewhere/ast.json new file mode 100644 index 000000000..568b5f4b8 --- /dev/null +++ b/parser/testdata/00576_nested_and_prewhere/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery nested (children 1)" + }, + { + "explain": " Identifier nested" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001331222, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/00576_nested_and_prewhere/metadata.json b/parser/testdata/00576_nested_and_prewhere/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00576_nested_and_prewhere/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00576_nested_and_prewhere/query.sql b/parser/testdata/00576_nested_and_prewhere/query.sql new file mode 100644 index 000000000..f5d9f0d62 --- /dev/null +++ b/parser/testdata/00576_nested_and_prewhere/query.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS nested; + +CREATE TABLE nested (x UInt64, filter UInt8, n Nested(a UInt64)) ENGINE = MergeTree ORDER BY x SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +INSERT INTO nested SELECT number, number % 2, range(number % 10) FROM system.numbers LIMIT 100000; + +ALTER TABLE nested ADD COLUMN n.b Array(UInt64); +SELECT DISTINCT n.b FROM nested PREWHERE filter ORDER BY ALL; + +ALTER TABLE nested ADD COLUMN n.c Array(UInt64) DEFAULT arrayMap(x -> x * 2, n.a); +SELECT DISTINCT n.c FROM nested PREWHERE filter ORDER BY ALL; +SELECT DISTINCT n.a, n.c FROM nested PREWHERE filter ORDER BY ALL; + +DROP TABLE nested; diff --git a/parser/testdata/00577_full_join_segfault/ast.json b/parser/testdata/00577_full_join_segfault/ast.json new file mode 100644 index 000000000..b8b0574b1 --- /dev/null +++ b/parser/testdata/00577_full_join_segfault/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000981647, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00577_full_join_segfault/metadata.json b/parser/testdata/00577_full_join_segfault/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00577_full_join_segfault/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00577_full_join_segfault/query.sql b/parser/testdata/00577_full_join_segfault/query.sql new file mode 100644 index 000000000..e24921417 --- /dev/null +++ b/parser/testdata/00577_full_join_segfault/query.sql @@ -0,0 +1,10 @@ +SET any_join_distinct_right_table_keys = 1; +SET joined_subquery_requires_alias = 0; + +SELECT k, a1, b1, a2, b2 FROM (SELECT 0 AS k, 'hello' AS a1, 123 AS b1, a1) ANY FULL OUTER JOIN (SELECT 1 AS k, 'hello' AS a2, 456 AS b2, a2) USING (k) ORDER BY k; +SELECT k, a, b FROM (SELECT 0 AS k, 'hello' AS a, 123 AS b, a) ANY FULL OUTER JOIN (SELECT 1 AS k) USING (k) ORDER BY k; + +SET join_use_nulls = 1; + +SELECT k, a1, b1, a2, b2 FROM (SELECT 0 AS k, 'hello' AS a1, 123 AS b1, a1) ANY FULL OUTER JOIN (SELECT 1 AS k, 'hello' AS a2, 456 AS b2, a2) USING (k) ORDER BY k; +SELECT k, a, b FROM (SELECT 0 AS k, 'hello' AS a, 123 AS b, a) ANY FULL OUTER JOIN (SELECT 1 AS k) USING (k) ORDER BY k; diff --git a/parser/testdata/00577_replacing_merge_tree_vertical_merge/ast.json b/parser/testdata/00577_replacing_merge_tree_vertical_merge/ast.json new file mode 100644 index 000000000..4ee022626 --- /dev/null +++ b/parser/testdata/00577_replacing_merge_tree_vertical_merge/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001678271, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00577_replacing_merge_tree_vertical_merge/metadata.json b/parser/testdata/00577_replacing_merge_tree_vertical_merge/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00577_replacing_merge_tree_vertical_merge/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00577_replacing_merge_tree_vertical_merge/query.sql b/parser/testdata/00577_replacing_merge_tree_vertical_merge/query.sql new file mode 100644 index 000000000..871f96bb0 --- /dev/null +++ b/parser/testdata/00577_replacing_merge_tree_vertical_merge/query.sql @@ -0,0 +1,30 @@ +set optimize_on_insert = 0; + +drop table if exists tab_00577; +create table tab_00577 (date Date, version UInt64, val UInt64) engine = ReplacingMergeTree(version) partition by date order by date settings enable_vertical_merge_algorithm = 1, + vertical_merge_algorithm_min_rows_to_activate = 0, vertical_merge_algorithm_min_columns_to_activate = 0, min_rows_for_wide_part = 0, + min_bytes_for_wide_part = 0, allow_experimental_replacing_merge_with_cleanup=1; +insert into tab_00577 values ('2018-01-01', 2, 2), ('2018-01-01', 1, 1); +insert into tab_00577 values ('2018-01-01', 0, 0); +select * from tab_00577 order by version; +OPTIMIZE TABLE tab_00577 FINAL CLEANUP; +select * from tab_00577; +drop table tab_00577; + + +DROP TABLE IF EXISTS testCleanupR1; +CREATE TABLE testCleanupR1 (uid String, version UInt32, is_deleted UInt8) + ENGINE = ReplicatedReplacingMergeTree('/clickhouse/{database}/tables/test_cleanup/', 'r1', version, is_deleted) + ORDER BY uid SETTINGS enable_vertical_merge_algorithm = 1, vertical_merge_algorithm_min_rows_to_activate = 0, vertical_merge_algorithm_min_columns_to_activate = 0, min_rows_for_wide_part = 0, + min_bytes_for_wide_part = 0, allow_experimental_replacing_merge_with_cleanup=1; +INSERT INTO testCleanupR1 (*) VALUES ('d1', 1, 0),('d2', 1, 0),('d3', 1, 0),('d4', 1, 0); +INSERT INTO testCleanupR1 (*) VALUES ('d3', 2, 1); +INSERT INTO testCleanupR1 (*) VALUES ('d1', 2, 1); +SYSTEM SYNC REPLICA testCleanupR1; -- Avoid "Cannot select parts for optimization: Entry for part all_2_2_0 hasn't been read from the replication log yet" + +OPTIMIZE TABLE testCleanupR1 FINAL CLEANUP; + +-- Only d3 to d5 remain +SELECT '== (Replicas) Test optimize =='; +SELECT * FROM testCleanupR1 order by uid; +DROP TABLE IF EXISTS testCleanupR1 \ No newline at end of file diff --git a/parser/testdata/00578_merge_table_and_table_virtual_column/ast.json b/parser/testdata/00578_merge_table_and_table_virtual_column/ast.json new file mode 100644 index 000000000..9fd053347 --- /dev/null +++ b/parser/testdata/00578_merge_table_and_table_virtual_column/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery numbers1 (children 1)" + }, + { + "explain": " Identifier numbers1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00098555, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/00578_merge_table_and_table_virtual_column/metadata.json b/parser/testdata/00578_merge_table_and_table_virtual_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00578_merge_table_and_table_virtual_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00578_merge_table_and_table_virtual_column/query.sql b/parser/testdata/00578_merge_table_and_table_virtual_column/query.sql new file mode 100644 index 000000000..f292eb306 --- /dev/null +++ b/parser/testdata/00578_merge_table_and_table_virtual_column/query.sql @@ -0,0 +1,44 @@ +DROP TABLE IF EXISTS numbers1; +DROP TABLE IF EXISTS numbers2; +DROP TABLE IF EXISTS numbers3; +DROP TABLE IF EXISTS numbers4; +DROP TABLE IF EXISTS numbers5; + +CREATE TABLE numbers1 ENGINE = StripeLog AS SELECT number FROM numbers(1000); +CREATE TABLE numbers2 ENGINE = TinyLog AS SELECT number FROM numbers(1000); +CREATE TABLE numbers3 ENGINE = Log AS SELECT number FROM numbers(1000); +CREATE TABLE numbers4 ENGINE = Memory AS SELECT number FROM numbers(1000); +CREATE TABLE numbers5 ENGINE = MergeTree ORDER BY number AS SELECT number FROM numbers(1000); + +SELECT count() FROM merge(currentDatabase(), '^numbers\\d+$'); +SELECT DISTINCT count() FROM merge(currentDatabase(), '^numbers\\d+$') GROUP BY number; + +SET optimize_aggregation_in_order = 0; -- FIXME : in order may happen before filter push down + +SET max_rows_to_read = 1000; + +SET max_threads = 'auto'; +SELECT count() FROM merge(currentDatabase(), '^numbers\\d+$') WHERE _table = 'numbers1'; + +SET max_threads = 1; +SELECT count() FROM merge(currentDatabase(), '^numbers\\d+$') WHERE _table = 'numbers2'; + +SET max_threads = 10; +SELECT count() FROM merge(currentDatabase(), '^numbers\\d+$') WHERE _table = 'numbers3'; + +SET max_rows_to_read = 1; + +SET max_threads = 'auto'; +SELECT count() FROM merge(currentDatabase(), '^numbers\\d+$') WHERE _table = 'non_existing'; + +SET max_threads = 1; +SELECT count() FROM merge(currentDatabase(), '^numbers\\d+$') WHERE _table = 'non_existing'; + +SET max_threads = 10; +SELECT count() FROM merge(currentDatabase(), '^numbers\\d+$') WHERE _table = 'non_existing'; + +DROP TABLE numbers1; +DROP TABLE numbers2; +DROP TABLE numbers3; +DROP TABLE numbers4; +DROP TABLE numbers5; diff --git a/parser/testdata/00578_merge_table_sampling/ast.json b/parser/testdata/00578_merge_table_sampling/ast.json new file mode 100644 index 000000000..0a4351b90 --- /dev/null +++ b/parser/testdata/00578_merge_table_sampling/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery numbers1 (children 1)" + }, + { + "explain": " Identifier numbers1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00145057, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/00578_merge_table_sampling/metadata.json b/parser/testdata/00578_merge_table_sampling/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00578_merge_table_sampling/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00578_merge_table_sampling/query.sql b/parser/testdata/00578_merge_table_sampling/query.sql new file mode 100644 index 000000000..03f57792f --- /dev/null +++ b/parser/testdata/00578_merge_table_sampling/query.sql @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS numbers1; +DROP TABLE IF EXISTS numbers2; + +CREATE TABLE numbers1 ENGINE = Memory AS SELECT number FROM numbers(1000); +CREATE TABLE numbers2 ENGINE = Memory AS SELECT number FROM numbers(1000); + +SELECT * FROM merge(currentDatabase(), '^numbers\\d+$') SAMPLE 0.1; -- { serverError SAMPLING_NOT_SUPPORTED } + +DROP TABLE numbers1; +DROP TABLE numbers2; + +CREATE TABLE numbers1 ENGINE = MergeTree ORDER BY intHash32(number) SAMPLE BY intHash32(number) AS SELECT number FROM numbers(1000); +CREATE TABLE numbers2 ENGINE = MergeTree ORDER BY intHash32(number) SAMPLE BY intHash32(number) AS SELECT number FROM numbers(1000); + +SELECT * FROM merge(currentDatabase(), '^numbers\\d+$') SAMPLE 0.01; + +DROP TABLE numbers1; +DROP TABLE numbers2; diff --git a/parser/testdata/00578_merge_table_shadow_virtual_column/ast.json b/parser/testdata/00578_merge_table_shadow_virtual_column/ast.json new file mode 100644 index 000000000..cc336dc3b --- /dev/null +++ b/parser/testdata/00578_merge_table_shadow_virtual_column/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery numbers1 (children 1)" + }, + { + "explain": " Identifier numbers1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001543551, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/00578_merge_table_shadow_virtual_column/metadata.json b/parser/testdata/00578_merge_table_shadow_virtual_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00578_merge_table_shadow_virtual_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00578_merge_table_shadow_virtual_column/query.sql b/parser/testdata/00578_merge_table_shadow_virtual_column/query.sql new file mode 100644 index 000000000..0cd92591a --- /dev/null +++ b/parser/testdata/00578_merge_table_shadow_virtual_column/query.sql @@ -0,0 +1,11 @@ +DROP TABLE IF EXISTS numbers1; +DROP TABLE IF EXISTS numbers2; + +CREATE TABLE numbers1 ENGINE = Memory AS SELECT number as _table FROM numbers(1000); +CREATE TABLE numbers2 ENGINE = Memory AS SELECT number as _table FROM numbers(1000); + +SELECT count() FROM merge(currentDatabase(), '^numbers\\d+$') WHERE _table='numbers1'; -- { serverError TYPE_MISMATCH } +SELECT count() FROM merge(currentDatabase(), '^numbers\\d+$') WHERE _table=1; + +DROP TABLE numbers1; +DROP TABLE numbers2; diff --git a/parser/testdata/00578_merge_trees_without_primary_key/ast.json b/parser/testdata/00578_merge_trees_without_primary_key/ast.json new file mode 100644 index 000000000..57b3740d1 --- /dev/null +++ b/parser/testdata/00578_merge_trees_without_primary_key/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '*** MergeTree ***'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001418209, + "rows_read": 5, + "bytes_read": 188 + } +} diff --git a/parser/testdata/00578_merge_trees_without_primary_key/metadata.json b/parser/testdata/00578_merge_trees_without_primary_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00578_merge_trees_without_primary_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00578_merge_trees_without_primary_key/query.sql b/parser/testdata/00578_merge_trees_without_primary_key/query.sql new file mode 100644 index 000000000..cebeb408f --- /dev/null +++ b/parser/testdata/00578_merge_trees_without_primary_key/query.sql @@ -0,0 +1,56 @@ +SELECT '*** MergeTree ***'; + +DROP TABLE IF EXISTS unsorted; +CREATE TABLE unsorted (x UInt32, y String) ENGINE MergeTree ORDER BY tuple() SETTINGS vertical_merge_algorithm_min_rows_to_activate=0, vertical_merge_algorithm_min_columns_to_activate=0; + +INSERT INTO unsorted VALUES (1, 'a'), (5, 'b'); +INSERT INTO unsorted VALUES (2, 'c'), (4, 'd'); +INSERT INTO unsorted VALUES (3, 'e'); + +OPTIMIZE TABLE unsorted PARTITION tuple() FINAL; + +SELECT * FROM unsorted; + +DROP TABLE unsorted; + +SET allow_suspicious_primary_key = 1; +SELECT '*** ReplacingMergeTree ***'; + +DROP TABLE IF EXISTS unsorted_replacing; + +CREATE TABLE unsorted_replacing (x UInt32, s String, v UInt32) ENGINE ReplacingMergeTree(v) ORDER BY tuple(); + +INSERT INTO unsorted_replacing VALUES (1, 'a', 5), (5, 'b', 4); +INSERT INTO unsorted_replacing VALUES (2, 'c', 3), (4, 'd', 2); +INSERT INTO unsorted_replacing VALUES (3, 'e', 1); + +SELECT * FROM unsorted_replacing FINAL; + +SELECT '---'; + +OPTIMIZE TABLE unsorted_replacing PARTITION tuple() FINAL; + +SELECT * FROM unsorted_replacing; + +DROP TABLE unsorted_replacing; + + +SELECT '*** CollapsingMergeTree ***'; + +DROP TABLE IF EXISTS unsorted_collapsing; + +CREATE TABLE unsorted_collapsing (x UInt32, s String, sign Int8) ENGINE CollapsingMergeTree(sign) ORDER BY tuple(); + +INSERT INTO unsorted_collapsing VALUES (1, 'a', 1); +INSERT INTO unsorted_collapsing VALUES (1, 'a', -1), (2, 'b', 1); +INSERT INTO unsorted_collapsing VALUES (2, 'b', -1), (3, 'c', 1); + +SELECT * FROM unsorted_collapsing FINAL; + +SELECT '---'; + +OPTIMIZE TABLE unsorted_collapsing PARTITION tuple() FINAL; + +SELECT * FROM unsorted_collapsing; + +DROP TABLE unsorted_collapsing; diff --git a/parser/testdata/00579_merge_tree_partition_and_primary_keys_using_same_expression/ast.json b/parser/testdata/00579_merge_tree_partition_and_primary_keys_using_same_expression/ast.json new file mode 100644 index 000000000..cf903d65d --- /dev/null +++ b/parser/testdata/00579_merge_tree_partition_and_primary_keys_using_same_expression/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery partition_and_primary_keys_using_same_expression (children 1)" + }, + { + "explain": " Identifier partition_and_primary_keys_using_same_expression" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001290505, + "rows_read": 2, + "bytes_read": 148 + } +} diff --git a/parser/testdata/00579_merge_tree_partition_and_primary_keys_using_same_expression/metadata.json b/parser/testdata/00579_merge_tree_partition_and_primary_keys_using_same_expression/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00579_merge_tree_partition_and_primary_keys_using_same_expression/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00579_merge_tree_partition_and_primary_keys_using_same_expression/query.sql b/parser/testdata/00579_merge_tree_partition_and_primary_keys_using_same_expression/query.sql new file mode 100644 index 000000000..477a75f17 --- /dev/null +++ b/parser/testdata/00579_merge_tree_partition_and_primary_keys_using_same_expression/query.sql @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS partition_and_primary_keys_using_same_expression; + +CREATE TABLE partition_and_primary_keys_using_same_expression(dt DateTime) + ENGINE MergeTree PARTITION BY toDate(dt) ORDER BY toDayOfWeek(toDate(dt)); + +INSERT INTO partition_and_primary_keys_using_same_expression + VALUES ('2018-02-19 12:00:00'); +INSERT INTO partition_and_primary_keys_using_same_expression + VALUES ('2018-02-20 12:00:00'), ('2018-02-21 12:00:00'); + +SELECT * FROM partition_and_primary_keys_using_same_expression ORDER BY dt; + +SELECT '---'; + +ALTER TABLE partition_and_primary_keys_using_same_expression DROP PARTITION '2018-02-20'; +SELECT * FROM partition_and_primary_keys_using_same_expression ORDER BY dt; + +DROP TABLE partition_and_primary_keys_using_same_expression; diff --git a/parser/testdata/00579_virtual_column_and_lazy/ast.json b/parser/testdata/00579_virtual_column_and_lazy/ast.json new file mode 100644 index 000000000..1e1c5a32c --- /dev/null +++ b/parser/testdata/00579_virtual_column_and_lazy/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery sample_00579_1 (children 1)" + }, + { + "explain": " Identifier sample_00579_1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001114452, + "rows_read": 2, + "bytes_read": 80 + } +} diff --git a/parser/testdata/00579_virtual_column_and_lazy/metadata.json b/parser/testdata/00579_virtual_column_and_lazy/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00579_virtual_column_and_lazy/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00579_virtual_column_and_lazy/query.sql b/parser/testdata/00579_virtual_column_and_lazy/query.sql new file mode 100644 index 000000000..ca58a5fc9 --- /dev/null +++ b/parser/testdata/00579_virtual_column_and_lazy/query.sql @@ -0,0 +1,19 @@ +DROP TABLE IF EXISTS sample_00579_1; +DROP TABLE IF EXISTS sample_00579_2; +DROP TABLE IF EXISTS sample_merge_00579; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE sample_00579_1 (x UInt64, d Date DEFAULT today()) ENGINE = MergeTree(d, intHash64(x), intHash64(x), 10); +CREATE TABLE sample_00579_2 (x UInt64, d Date DEFAULT today()) ENGINE = MergeTree(d, intHash64(x), intHash64(x), 10); + +INSERT INTO sample_00579_1 (x) SELECT number AS x FROM system.numbers LIMIT 1000; +INSERT INTO sample_00579_2 (x) SELECT number AS x FROM system.numbers LIMIT 2000; + +CREATE TABLE sample_merge_00579 AS sample_00579_1 ENGINE = Merge(currentDatabase(), '^sample_00579_\\d$'); + +SET max_threads = 1; +SELECT _sample_factor FROM merge(currentDatabase(), '^sample_00579_\\d$'); + +DROP TABLE sample_00579_1; +DROP TABLE sample_00579_2; +DROP TABLE sample_merge_00579; diff --git a/parser/testdata/00580_cast_nullable_to_non_nullable/ast.json b/parser/testdata/00580_cast_nullable_to_non_nullable/ast.json new file mode 100644 index 000000000..969d6f3cf --- /dev/null +++ b/parser/testdata/00580_cast_nullable_to_non_nullable/ast.json @@ -0,0 +1,82 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function if (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_999999" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal 'UInt64'" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 20, + + "statistics": + { + "elapsed": 0.001420899, + "rows_read": 20, + "bytes_read": 767 + } +} diff --git a/parser/testdata/00580_cast_nullable_to_non_nullable/metadata.json b/parser/testdata/00580_cast_nullable_to_non_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00580_cast_nullable_to_non_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00580_cast_nullable_to_non_nullable/query.sql b/parser/testdata/00580_cast_nullable_to_non_nullable/query.sql new file mode 100644 index 000000000..c50e35d43 --- /dev/null +++ b/parser/testdata/00580_cast_nullable_to_non_nullable/query.sql @@ -0,0 +1 @@ +SELECT CAST(number = 999999 ? NULL : number AS UInt64) FROM system.numbers LIMIT 10; diff --git a/parser/testdata/00580_consistent_hashing_functions/ast.json b/parser/testdata/00580_consistent_hashing_functions/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00580_consistent_hashing_functions/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00580_consistent_hashing_functions/metadata.json b/parser/testdata/00580_consistent_hashing_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00580_consistent_hashing_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00580_consistent_hashing_functions/query.sql b/parser/testdata/00580_consistent_hashing_functions/query.sql new file mode 100644 index 000000000..f470642d3 --- /dev/null +++ b/parser/testdata/00580_consistent_hashing_functions/query.sql @@ -0,0 +1,6 @@ +-- Tags: no-fasttest + +SELECT jumpConsistentHash(1, 1), jumpConsistentHash(42, 57), jumpConsistentHash(256, 1024), jumpConsistentHash(3735883980, 1), jumpConsistentHash(3735883980, 666), jumpConsistentHash(16045690984833335023, 255); +SELECT kostikConsistentHash(16045690984833335023, 1), kostikConsistentHash(16045690984833335023, 2), kostikConsistentHash(16045690984833335023, 3), kostikConsistentHash(16045690984833335023, 4), kostikConsistentHash(16045690984833335023, 173), kostikConsistentHash(16045690984833335023, 255); +SELECT jumpConsistentHash(intHash64(number), 787) FROM system.numbers LIMIT 1000000, 2; +SELECT kostikConsistentHash(16045690984833335023+number-number, 120) FROM system.numbers LIMIT 1000000, 2; diff --git a/parser/testdata/00581_limit_on_result_and_subquery_and_insert/ast.json b/parser/testdata/00581_limit_on_result_and_subquery_and_insert/ast.json new file mode 100644 index 000000000..d4fd9580c --- /dev/null +++ b/parser/testdata/00581_limit_on_result_and_subquery_and_insert/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00177981, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00581_limit_on_result_and_subquery_and_insert/metadata.json b/parser/testdata/00581_limit_on_result_and_subquery_and_insert/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00581_limit_on_result_and_subquery_and_insert/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00581_limit_on_result_and_subquery_and_insert/query.sql b/parser/testdata/00581_limit_on_result_and_subquery_and_insert/query.sql new file mode 100644 index 000000000..c25187e80 --- /dev/null +++ b/parser/testdata/00581_limit_on_result_and_subquery_and_insert/query.sql @@ -0,0 +1,9 @@ +SET max_result_rows = 10; + +SELECT count() FROM (SELECT * FROM system.numbers LIMIT 11); + +CREATE TEMPORARY TABLE t AS SELECT * FROM system.numbers LIMIT 11; +SELECT count() FROM t; + +INSERT INTO t SELECT * FROM system.numbers LIMIT 11; +SELECT count() FROM t; diff --git a/parser/testdata/00582_not_aliasing_functions/ast.json b/parser/testdata/00582_not_aliasing_functions/ast.json new file mode 100644 index 000000000..a003eb92f --- /dev/null +++ b/parser/testdata/00582_not_aliasing_functions/ast.json @@ -0,0 +1,88 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function count (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function count (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 22, + + "statistics": + { + "elapsed": 0.001611959, + "rows_read": 22, + "bytes_read": 937 + } +} diff --git a/parser/testdata/00582_not_aliasing_functions/metadata.json b/parser/testdata/00582_not_aliasing_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00582_not_aliasing_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00582_not_aliasing_functions/query.sql b/parser/testdata/00582_not_aliasing_functions/query.sql new file mode 100644 index 000000000..9c56eb5e5 --- /dev/null +++ b/parser/testdata/00582_not_aliasing_functions/query.sql @@ -0,0 +1 @@ +SELECT count() FROM (SELECT count() FROM numbers(10)); diff --git a/parser/testdata/00583_limit_by_expressions/ast.json b/parser/testdata/00583_limit_by_expressions/ast.json new file mode 100644 index 000000000..21a0fd6f2 --- /dev/null +++ b/parser/testdata/00583_limit_by_expressions/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.one" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001494562, + "rows_read": 12, + "bytes_read": 445 + } +} diff --git a/parser/testdata/00583_limit_by_expressions/metadata.json b/parser/testdata/00583_limit_by_expressions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00583_limit_by_expressions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00583_limit_by_expressions/query.sql b/parser/testdata/00583_limit_by_expressions/query.sql new file mode 100644 index 000000000..0163a36f5 --- /dev/null +++ b/parser/testdata/00583_limit_by_expressions/query.sql @@ -0,0 +1,7 @@ +SELECT 1 FROM system.one LIMIT 1 BY 1; +SELECT 1 FROM system.one LIMIT 1 BY 1 AS one; +SELECT 1 as one FROM system.one LIMIT 1 BY 1; +SELECT 1 as one FROM system.one LIMIT 1 BY one; +SELECT 1 as one FROM system.one LIMIT 1 BY rand(); +SELECT number FROM numbers(10) LIMIT 2 BY number % 2; +SELECT number FROM numbers(10) LIMIT 2 BY intDiv(number, 5); diff --git a/parser/testdata/00584_view_union_all/ast.json b/parser/testdata/00584_view_union_all/ast.json new file mode 100644 index 000000000..50269715a --- /dev/null +++ b/parser/testdata/00584_view_union_all/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery Test_00584 (children 1)" + }, + { + "explain": " Identifier Test_00584" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001335869, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/00584_view_union_all/metadata.json b/parser/testdata/00584_view_union_all/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00584_view_union_all/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00584_view_union_all/query.sql b/parser/testdata/00584_view_union_all/query.sql new file mode 100644 index 000000000..a86dfaec6 --- /dev/null +++ b/parser/testdata/00584_view_union_all/query.sql @@ -0,0 +1,30 @@ +DROP TABLE IF EXISTS Test_00584; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE Test_00584 ( + createdDate Date, + str String, + key Enum8('A' = 0, 'B' = 1, 'ALL' = 2), + a Int64 +) +ENGINE = MergeTree(createdDate, str, 8192); + +INSERT INTO Test_00584 VALUES ('2000-01-01', 'hello', 'A', 123); + +SET max_threads = 1; + +CREATE VIEW TestView AS + SELECT str, key, sumIf(a, 0) AS sum + FROM Test_00584 + GROUP BY str, key + + UNION ALL + + SELECT str AS str, CAST('ALL' AS Enum8('A' = 0, 'B' = 1, 'ALL' = 2)) AS key, sumIf(a, 0) AS sum + FROM Test_00584 + GROUP BY str; + +SELECT * FROM TestView ORDER BY key; + +DROP TABLE TestView; +DROP TABLE Test_00584; diff --git a/parser/testdata/00585_union_all_subquery_aggregation_column_removal/ast.json b/parser/testdata/00585_union_all_subquery_aggregation_column_removal/ast.json new file mode 100644 index 000000000..409a3484e --- /dev/null +++ b/parser/testdata/00585_union_all_subquery_aggregation_column_removal/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery clicks (children 1)" + }, + { + "explain": " Identifier clicks" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001485085, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/00585_union_all_subquery_aggregation_column_removal/metadata.json b/parser/testdata/00585_union_all_subquery_aggregation_column_removal/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00585_union_all_subquery_aggregation_column_removal/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00585_union_all_subquery_aggregation_column_removal/query.sql b/parser/testdata/00585_union_all_subquery_aggregation_column_removal/query.sql new file mode 100644 index 000000000..07d39e8d5 --- /dev/null +++ b/parser/testdata/00585_union_all_subquery_aggregation_column_removal/query.sql @@ -0,0 +1,331 @@ +DROP TABLE IF EXISTS clicks; +DROP TABLE IF EXISTS transactions; + +CREATE TABLE clicks (domain String) ENGINE = Memory; +CREATE TABLE transactions (domain String) ENGINE = Memory; + +INSERT INTO clicks VALUES ('facebook.com'), ('meta.ua'), ('google.com'); +INSERT INTO transactions VALUES ('facebook.com'), ('meta.ua'), ('baidu.com'); + + +SELECT + sum(total_count) AS total, + domain +FROM +( + SELECT + COUNT(*) AS total_count, + toUInt64(0) AS facebookHits, + domain + FROM transactions + GROUP BY domain + UNION ALL + SELECT + COUNT(*) AS total_count, + SUM(if(domain = 'facebook.com', 1, 0)) AS facebookHits, + domain + FROM clicks + GROUP BY domain +) +GROUP BY domain +ORDER BY domain +LIMIT 10 +FORMAT JSONEachRow; + + +SELECT + sum(total_count) AS total, + domain +FROM +( + SELECT + COUNT(*) AS total_count, + SUM(if(domain = 'facebook.com', 1, 0)) AS facebookHits, + domain + FROM clicks + GROUP BY domain +UNION ALL + SELECT + COUNT(*) AS total_count, + toUInt64(0) AS facebookHits, + domain + FROM transactions + GROUP BY domain +) +GROUP BY domain +ORDER BY domain +LIMIT 10 +FORMAT JSONEachRow; + + +SELECT DISTINCT * FROM +( +SELECT + sum(total_count) AS total, + domain +FROM +( + SELECT + COUNT(*) AS total_count, + toUInt64(0) AS facebookHits, + domain + FROM transactions + GROUP BY domain + UNION ALL + SELECT + COUNT(*) AS total_count, + SUM(if(domain = 'facebook.com', 1, 0)) AS facebookHits, + domain + FROM clicks + GROUP BY domain +) +GROUP BY domain +ORDER BY domain +LIMIT 10 + +UNION ALL + +SELECT + sum(total_count) AS total, + domain +FROM +( + SELECT + COUNT(*) AS total_count, + SUM(if(domain = 'facebook.com', 1, 0)) AS facebookHits, + domain + FROM clicks + GROUP BY domain +UNION ALL + SELECT + COUNT(*) AS total_count, + toUInt64(0) AS facebookHits, + domain + FROM transactions + GROUP BY domain +) +GROUP BY domain +ORDER BY domain +LIMIT 10 +); + + +SELECT DISTINCT total, domain FROM +( +SELECT + sum(total_count) AS total, + sum(facebookHits) AS facebook, + domain +FROM +( + SELECT + COUNT(*) AS total_count, + toUInt64(0) AS facebookHits, + domain + FROM transactions + GROUP BY domain + UNION ALL + SELECT + COUNT(*) AS total_count, + SUM(if(domain = 'facebook.com', 1, 0)) AS facebookHits, + domain + FROM clicks + GROUP BY domain +) +GROUP BY domain +ORDER BY domain +LIMIT 10 + +UNION ALL + +SELECT + sum(total_count) AS total, + max(facebookHits) AS facebook, + domain +FROM +( + SELECT + COUNT(*) AS total_count, + SUM(if(domain = 'facebook.com', 1, 0)) AS facebookHits, + domain + FROM clicks + GROUP BY domain +UNION ALL + SELECT + COUNT(*) AS total_count, + toUInt64(0) AS facebookHits, + domain + FROM transactions + GROUP BY domain +) +GROUP BY domain +ORDER BY domain +LIMIT 10 +) +ORDER BY domain, total; + + +SELECT * FROM +( +SELECT + sum(total_count) AS total, + domain +FROM +( + SELECT + COUNT(*) AS total_count, + toUInt64(0) AS facebookHits, + domain + FROM transactions + GROUP BY domain + UNION ALL + SELECT + COUNT(*) AS total_count, + SUM(if(domain = 'facebook.com', 1, 0)) AS facebookHits, + domain + FROM clicks + GROUP BY domain +) +GROUP BY domain +ORDER BY domain +LIMIT 10 +) js1 +ALL FULL OUTER JOIN +( +SELECT + sum(total_count) AS total, + domain +FROM +( + SELECT + COUNT(*) AS total_count, + SUM(if(domain = 'facebook.com', 1, 0)) AS facebookHits, + domain + FROM clicks + GROUP BY domain +UNION ALL + SELECT + COUNT(*) AS total_count, + toUInt64(0) AS facebookHits, + domain + FROM transactions + GROUP BY domain +) +GROUP BY domain +ORDER BY domain +LIMIT 10 +) js2 +USING (total, domain) +ORDER BY total, domain; + + +SELECT total FROM +( +SELECT + sum(total_count) AS total, + domain +FROM +( + SELECT + COUNT(*) AS total_count, + toUInt64(0) AS facebookHits, + domain + FROM transactions + GROUP BY domain + UNION ALL + SELECT + COUNT(*) AS total_count, + SUM(if(domain = 'facebook.com', 1, 0)) AS facebookHits, + domain + FROM clicks + GROUP BY domain +) +GROUP BY domain +ORDER BY domain +LIMIT 10 +) js1 +ALL FULL OUTER JOIN +( +SELECT + sum(total_count) AS total, + domain +FROM +( + SELECT + COUNT(*) AS total_count, + SUM(if(domain = 'facebook.com', 1, 0)) AS facebookHits, + domain + FROM clicks + GROUP BY domain +UNION ALL + SELECT + COUNT(*) AS total_count, + toUInt64(0) AS facebookHits, + domain + FROM transactions + GROUP BY domain +) +GROUP BY domain +ORDER BY domain +LIMIT 10 +) js2 +USING (total, domain) +ORDER BY total, domain; + + +SELECT domain FROM +( +SELECT + sum(total_count) AS total, + domain +FROM +( + SELECT + COUNT(*) AS total_count, + toUInt64(0) AS facebookHits, + domain + FROM transactions + GROUP BY domain + UNION ALL + SELECT + COUNT(*) AS total_count, + SUM(if(domain = 'facebook.com', 1, 0)) AS facebookHits, + domain + FROM clicks + GROUP BY domain +) +GROUP BY domain +ORDER BY domain +LIMIT 10 +) js1 +ALL FULL OUTER JOIN +( +SELECT + sum(total_count) AS total, + domain +FROM +( + SELECT + COUNT(*) AS total_count, + SUM(if(domain = 'facebook.com', 1, 0)) AS facebookHits, + domain + FROM clicks + GROUP BY domain +UNION ALL + SELECT + COUNT(*) AS total_count, + toUInt64(0) AS facebookHits, + domain + FROM transactions + GROUP BY domain +) +GROUP BY domain +ORDER BY domain +LIMIT 10 +) js2 +USING (total, domain) +ORDER BY total, domain; + + +DROP TABLE clicks; +DROP TABLE transactions; diff --git a/parser/testdata/00586_removing_unused_columns_from_subquery/ast.json b/parser/testdata/00586_removing_unused_columns_from_subquery/ast.json new file mode 100644 index 000000000..29e2f035a --- /dev/null +++ b/parser/testdata/00586_removing_unused_columns_from_subquery/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001408882, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00586_removing_unused_columns_from_subquery/metadata.json b/parser/testdata/00586_removing_unused_columns_from_subquery/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00586_removing_unused_columns_from_subquery/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00586_removing_unused_columns_from_subquery/query.sql b/parser/testdata/00586_removing_unused_columns_from_subquery/query.sql new file mode 100644 index 000000000..ce52c652d --- /dev/null +++ b/parser/testdata/00586_removing_unused_columns_from_subquery/query.sql @@ -0,0 +1,51 @@ +SET any_join_distinct_right_table_keys = 1; +SET joined_subquery_requires_alias = 0; + +DROP TABLE IF EXISTS local_statements; +DROP TABLE IF EXISTS statements; + +CREATE TABLE local_statements ( statementId String, eventDate Date, eventHour DateTime, eventTime DateTime, verb String, objectId String, onCourse UInt8, courseId UInt16, contextRegistration String, resultScoreRaw Float64, resultScoreMin Float64, resultScoreMax Float64, resultSuccess UInt8, resultCompletition UInt8, resultDuration UInt32, resultResponse String, learnerId String, learnerHash String, contextId UInt16) ENGINE = MergeTree ORDER BY tuple(); + +CREATE TABLE statements ( statementId String, eventDate Date, eventHour DateTime, eventTime DateTime, verb String, objectId String, onCourse UInt8, courseId UInt16, contextRegistration String, resultScoreRaw Float64, resultScoreMin Float64, resultScoreMax Float64, resultSuccess UInt8, resultCompletition UInt8, resultDuration UInt32, resultResponse String, learnerId String, learnerHash String, contextId UInt16) ENGINE = Distributed(test_shard_localhost, currentDatabase(), 'local_statements', sipHash64(learnerHash)); + +INSERT INTO local_statements FORMAT CSV "2b3b04ee-0bb8-4200-906f-d47c48e56bd0","2016-08-25","2016-08-25 14:00:00","2016-08-25 14:43:34","http://adlnet.gov/expapi/verbs/passed","https://crmm.ru/xapi/courses/spp/2/0/3/2/8",0,1,"c13d788c-26e0-40e3-bacb-a1ff78ee1518",100,0,0,0,0,0,"","https://sberbank-school.ru/xapi/accounts/userid/94312","6f696f938a69b5e173093718e1c2bbf2",0 + +SELECT avg(diff) +FROM +( + SELECT * + FROM + ( + SELECT + learnerHash, + passed - eventTime AS diff + FROM statements + GLOBAL SEMI LEFT JOIN + ( + SELECT + learnerHash, + argMax(eventTime, resultScoreRaw) AS passed + FROM + ( + SELECT + learnerHash, + eventTime, + resultScoreRaw + FROM statements + WHERE (courseId = 1) AND (onCourse = 0) + AND (verb = 'http://adlnet.gov/expapi/verbs/passed') AND (objectId = 'https://crmm.ru/xapi/courses/spp/1/1/0-1') + ORDER BY eventTime ASC + ) + GROUP BY learnerHash + ) USING (learnerHash) + WHERE (courseId = 1) AND (onCourse = 0) + AND (verb = 'http://adlnet.gov/expapi/verbs/interacted') AND (eventTime <= passed) AND (diff > 0) + ORDER BY eventTime DESC + LIMIT 1 BY learnerHash + ) + ORDER BY diff DESC + LIMIT 7, 126 +); + +DROP TABLE local_statements; +DROP TABLE statements; diff --git a/parser/testdata/00587_union_all_type_conversions/ast.json b/parser/testdata/00587_union_all_type_conversions/ast.json new file mode 100644 index 000000000..8be5b0757 --- /dev/null +++ b/parser/testdata/00587_union_all_type_conversions/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001455822, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00587_union_all_type_conversions/metadata.json b/parser/testdata/00587_union_all_type_conversions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00587_union_all_type_conversions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00587_union_all_type_conversions/query.sql b/parser/testdata/00587_union_all_type_conversions/query.sql new file mode 100644 index 000000000..6bc6d98d1 --- /dev/null +++ b/parser/testdata/00587_union_all_type_conversions/query.sql @@ -0,0 +1,12 @@ +SET max_threads = 1; + +SELECT * FROM (SELECT 1 as x UNION ALL SELECT -1) ORDER BY x DESC; +SELECT * FROM (SELECT x, toTypeName(x) FROM (SELECT 1 AS x UNION ALL SELECT -1)) ORDER BY x DESC; + +SELECT x FROM (SELECT 1 as x UNION ALL SELECT NULL) ORDER BY x DESC; +SELECT * FROM (SELECT x, toTypeName(x) FROM (SELECT 1 AS x UNION ALL SELECT NULL)) ORDER BY x DESC; + +SELECT x FROM (SELECT 1 AS x UNION ALL SELECT NULL UNION ALL SELECT 1.0) ORDER BY x DESC; +SELECT * FROM (SELECT x, toTypeName(x), count() FROM (SELECT 1 AS x UNION ALL SELECT NULL UNION ALL SELECT 1.0) GROUP BY x) ORDER BY x DESC; + +SELECT res FROM (SELECT arrayJoin(x) AS res FROM (SELECT [1, 2, 3] AS x UNION ALL SELECT [nan, NULL]) ORDER BY res) ORDER BY res DESC; diff --git a/parser/testdata/00588_shard_distributed_prewhere/ast.json b/parser/testdata/00588_shard_distributed_prewhere/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00588_shard_distributed_prewhere/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00588_shard_distributed_prewhere/metadata.json b/parser/testdata/00588_shard_distributed_prewhere/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00588_shard_distributed_prewhere/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00588_shard_distributed_prewhere/query.sql b/parser/testdata/00588_shard_distributed_prewhere/query.sql new file mode 100644 index 000000000..b3738fd5a --- /dev/null +++ b/parser/testdata/00588_shard_distributed_prewhere/query.sql @@ -0,0 +1,17 @@ +-- Tags: distributed + +DROP TABLE IF EXISTS mergetree_00588; +DROP TABLE IF EXISTS distributed_00588; + +CREATE TABLE mergetree_00588 (x UInt64, s String) ENGINE = MergeTree ORDER BY x; +INSERT INTO mergetree_00588 VALUES (1, 'hello'), (2, 'world'); + +SELECT * FROM mergetree_00588 PREWHERE x = 1 WHERE s LIKE '%l%' ORDER BY x, s; +SELECT * FROM remote('127.0.0.{1,2,3}', currentDatabase(), mergetree_00588) PREWHERE x = 1 WHERE s LIKE '%l%' ORDER BY x, s; + +CREATE TABLE distributed_00588 AS mergetree_00588 ENGINE = Distributed(test_shard_localhost, currentDatabase(), mergetree_00588); + +SELECT * FROM distributed_00588 PREWHERE x = 1 WHERE s LIKE '%l%' ORDER BY x, s; + +DROP TABLE mergetree_00588; +DROP TABLE distributed_00588; diff --git a/parser/testdata/00589_removal_unused_columns_aggregation/ast.json b/parser/testdata/00589_removal_unused_columns_aggregation/ast.json new file mode 100644 index 000000000..36b0758e7 --- /dev/null +++ b/parser/testdata/00589_removal_unused_columns_aggregation/ast.json @@ -0,0 +1,124 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function sum (alias a) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function avg (alias b) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number (alias x)" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 34, + + "statistics": + { + "elapsed": 0.001422753, + "rows_read": 34, + "bytes_read": 1577 + } +} diff --git a/parser/testdata/00589_removal_unused_columns_aggregation/metadata.json b/parser/testdata/00589_removal_unused_columns_aggregation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00589_removal_unused_columns_aggregation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00589_removal_unused_columns_aggregation/query.sql b/parser/testdata/00589_removal_unused_columns_aggregation/query.sql new file mode 100644 index 000000000..67f8e49f4 --- /dev/null +++ b/parser/testdata/00589_removal_unused_columns_aggregation/query.sql @@ -0,0 +1,13 @@ +SELECT * FROM (SELECT sum(x) AS a, avg(x) AS b FROM (SELECT number AS x FROM numbers(10))); +SELECT a, b FROM (SELECT sum(x) AS a, avg(x) AS b FROM (SELECT number AS x FROM numbers(10))); +SELECT a FROM (SELECT sum(x) AS a, avg(x) AS b FROM (SELECT number AS x FROM numbers(10))); +SELECT b FROM (SELECT sum(x) AS a, avg(x) AS b FROM (SELECT number AS x FROM numbers(10))); +SELECT 1 FROM (SELECT sum(x) AS a, avg(x) AS b FROM (SELECT number AS x FROM numbers(10))); +SELECT 1 FROM (SELECT sum(x), avg(x) FROM (SELECT number AS x FROM numbers(10))); +SELECT count() FROM (SELECT sum(x) AS a, avg(x) AS b FROM (SELECT number AS x FROM numbers(10))); +SELECT count() FROM (SELECT sum(x), avg(x) FROM (SELECT number AS x FROM numbers(10))); +SELECT count() FROM (SELECT sum(x), avg(x) FROM (SELECT number % 3 AS x FROM numbers(10) GROUP BY x)); +SELECT 1 FROM (SELECT DISTINCT sum(x), avg(x) FROM (SELECT number AS x FROM numbers(10))); +SELECT count() FROM (SELECT sum(x), arrayJoin([min(x), max(x)]) FROM (SELECT number AS x FROM numbers(10))); +SELECT count() FROM (SELECT arrayJoin([sum(x), medianExact(x)]), arrayJoin([min(x), max(x)]) FROM (SELECT number AS x FROM numbers(10))); +SELECT 1 FROM (SELECT arrayJoin([sum(x), medianExact(x)]), arrayJoin([min(x), max(x)]) FROM (SELECT number AS x FROM numbers(10))); diff --git a/parser/testdata/00590_limit_by_column_removal/ast.json b/parser/testdata/00590_limit_by_column_removal/ast.json new file mode 100644 index 000000000..48131d026 --- /dev/null +++ b/parser/testdata/00590_limit_by_column_removal/ast.json @@ -0,0 +1,76 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1 (alias x)" + }, + { + "explain": " Literal UInt64_2 (alias y)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier y" + } + ], + + "rows": 18, + + "statistics": + { + "elapsed": 0.001571, + "rows_read": 18, + "bytes_read": 699 + } +} diff --git a/parser/testdata/00590_limit_by_column_removal/metadata.json b/parser/testdata/00590_limit_by_column_removal/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00590_limit_by_column_removal/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00590_limit_by_column_removal/query.sql b/parser/testdata/00590_limit_by_column_removal/query.sql new file mode 100644 index 000000000..fd8b7eeed --- /dev/null +++ b/parser/testdata/00590_limit_by_column_removal/query.sql @@ -0,0 +1,3 @@ +SELECT x FROM (SELECT 1 AS x, 2 AS y) LIMIT 1 BY y; +SELECT x FROM (SELECT number AS x, number + 1 AS y FROM system.numbers LIMIT 10) ORDER BY y LIMIT 1 BY y; +SELECT sum(x) FROM (SELECT x, y FROM (SELECT number AS x, number + 1 AS y FROM system.numbers LIMIT 10) ORDER BY y LIMIT 1 BY y); diff --git a/parser/testdata/00591_columns_removal_union_all/ast.json b/parser/testdata/00591_columns_removal_union_all/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00591_columns_removal_union_all/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00591_columns_removal_union_all/metadata.json b/parser/testdata/00591_columns_removal_union_all/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00591_columns_removal_union_all/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00591_columns_removal_union_all/query.sql b/parser/testdata/00591_columns_removal_union_all/query.sql new file mode 100644 index 000000000..cb5771603 --- /dev/null +++ b/parser/testdata/00591_columns_removal_union_all/query.sql @@ -0,0 +1,4 @@ +SELECT * FROM +( + SELECT x FROM (SELECT x, y, arrayJoin(z) FROM (SELECT number AS x, number + 1 AS y, [number % 2, number % 3] AS z FROM numbers(10)) UNION ALL SELECT 1, 2, 3) +) ORDER BY x; diff --git a/parser/testdata/00592_union_all_different_aliases/ast.json b/parser/testdata/00592_union_all_different_aliases/ast.json new file mode 100644 index 000000000..032ee0a2e --- /dev/null +++ b/parser/testdata/00592_union_all_different_aliases/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1 (alias a)" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001517394, + "rows_read": 5, + "bytes_read": 187 + } +} diff --git a/parser/testdata/00592_union_all_different_aliases/metadata.json b/parser/testdata/00592_union_all_different_aliases/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00592_union_all_different_aliases/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00592_union_all_different_aliases/query.sql b/parser/testdata/00592_union_all_different_aliases/query.sql new file mode 100644 index 000000000..0c837a5a4 --- /dev/null +++ b/parser/testdata/00592_union_all_different_aliases/query.sql @@ -0,0 +1,7 @@ +SELECT 1 AS a +UNION ALL +SELECT 1 AS b; + +SELECT 1 AS a +UNION ALL +SELECT 1 AS a; diff --git a/parser/testdata/00593_union_all_assert_columns_removed/ast.json b/parser/testdata/00593_union_all_assert_columns_removed/ast.json new file mode 100644 index 000000000..1e858e381 --- /dev/null +++ b/parser/testdata/00593_union_all_assert_columns_removed/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery columns (children 1)" + }, + { + "explain": " Identifier columns" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001309812, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/00593_union_all_assert_columns_removed/metadata.json b/parser/testdata/00593_union_all_assert_columns_removed/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00593_union_all_assert_columns_removed/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00593_union_all_assert_columns_removed/query.sql b/parser/testdata/00593_union_all_assert_columns_removed/query.sql new file mode 100644 index 000000000..c4d653160 --- /dev/null +++ b/parser/testdata/00593_union_all_assert_columns_removed/query.sql @@ -0,0 +1,10 @@ +DROP TABLE IF EXISTS columns; +CREATE TABLE columns (a UInt8, b UInt8, c UInt8) ENGINE = Memory; +INSERT INTO columns VALUES (1, 2, 3); +SET max_columns_to_read = 1; + +SELECT a FROM (SELECT * FROM columns); +SELECT a FROM (SELECT * FROM (SELECT * FROM columns)); +SELECT a FROM (SELECT * FROM columns UNION ALL SELECT * FROM columns); + +DROP TABLE columns; diff --git a/parser/testdata/00594_alias_in_distributed/ast.json b/parser/testdata/00594_alias_in_distributed/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00594_alias_in_distributed/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00594_alias_in_distributed/metadata.json b/parser/testdata/00594_alias_in_distributed/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00594_alias_in_distributed/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00594_alias_in_distributed/query.sql b/parser/testdata/00594_alias_in_distributed/query.sql new file mode 100644 index 000000000..250ede2bb --- /dev/null +++ b/parser/testdata/00594_alias_in_distributed/query.sql @@ -0,0 +1,63 @@ +-- Tags: distributed + +DROP TABLE IF EXISTS alias_local10; +DROP TABLE IF EXISTS alias10; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE alias_local10 ( + Id Int8, + EventDate Date DEFAULT '2000-01-01', + field1 Int8, + field2 String, + field3 ALIAS CASE WHEN field1 = 1 THEN field2 ELSE '0' END +) ENGINE = MergeTree(EventDate, (Id, EventDate), 8192); + +CREATE TABLE alias10 AS alias_local10 ENGINE = Distributed(test_shard_localhost, currentDatabase(), alias_local10, cityHash64(Id)); + +INSERT INTO alias_local10 (Id, EventDate, field1, field2) VALUES (1, '2000-01-01', 1, '12345'), (2, '2000-01-01', 2, '54321'), (3, '2000-01-01', 0, ''); + +SELECT field1, field2, field3 FROM alias_local10; +SELECT field1, field2, field3 FROM alias_local10 WHERE EventDate='2000-01-01'; +SELECT field1, field2 FROM alias_local10 WHERE EventDate='2000-01-01'; + +SELECT field1, field2, field3 FROM alias10; +SELECT field1, field2, field3 FROM alias10 WHERE EventDate='2000-01-01'; +SELECT field1, field2 FROM alias10 WHERE EventDate='2000-01-01'; + +SELECT field2, field3 FROM alias10 WHERE EventDate='2000-01-01'; +SELECT field3 FROM alias10 WHERE EventDate='2000-01-01'; +SELECT field2, field3 FROM alias10; +SELECT field3 FROM alias10; + +SELECT field1 FROM alias10 WHERE field3 = '12345'; +SELECT field2 FROM alias10 WHERE field3 = '12345'; +SELECT field3 FROM alias10 WHERE field3 = '12345'; + +DROP TABLE alias10; +CREATE TABLE alias10 ( + Id Int8, + EventDate Date, + field1 Int8, + field2 String, + field3 String +) ENGINE = Distributed(test_shard_localhost, currentDatabase(), alias_local10); + +SELECT field1, field2, field3 FROM alias_local10; +SELECT field1, field2, field3 FROM alias_local10 WHERE EventDate='2000-01-01'; +SELECT field1, field2 FROM alias_local10 WHERE EventDate='2000-01-01'; + +SELECT field1, field2, field3 FROM alias10; +SELECT field1, field2, field3 FROM alias10 WHERE EventDate='2000-01-01'; +SELECT field1, field2 FROM alias10 WHERE EventDate='2000-01-01'; + +SELECT field2, field3 FROM alias10 WHERE EventDate='2000-01-01'; +SELECT field3 FROM alias10 WHERE EventDate='2000-01-01'; +SELECT field2, field3 FROM alias10; +SELECT field3 FROM alias10; + +SELECT field1 FROM alias10 WHERE field3 = '12345'; +SELECT field2 FROM alias10 WHERE field3 = '12345'; +SELECT field3 FROM alias10 WHERE field3 = '12345'; + +DROP TABLE alias_local10; +DROP TABLE alias10; diff --git a/parser/testdata/00597_push_down_predicate_long/ast.json b/parser/testdata/00597_push_down_predicate_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00597_push_down_predicate_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00597_push_down_predicate_long/metadata.json b/parser/testdata/00597_push_down_predicate_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00597_push_down_predicate_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00597_push_down_predicate_long/query.sql b/parser/testdata/00597_push_down_predicate_long/query.sql new file mode 100644 index 000000000..f79b24abe --- /dev/null +++ b/parser/testdata/00597_push_down_predicate_long/query.sql @@ -0,0 +1,156 @@ +-- Tags: long + +SET send_logs_level = 'fatal'; +SET any_join_distinct_right_table_keys = 1; +SET joined_subquery_requires_alias = 0; + +DROP TABLE IF EXISTS test_00597; +DROP TABLE IF EXISTS test_view_00597; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE test_00597(date Date, id Int8, name String, value Int64) ENGINE = MergeTree(date, (id, date), 8192); +CREATE VIEW test_view_00597 AS SELECT * FROM test_00597; + +SELECT * FROM (SELECT floor(floor(1, floor(NULL), id = 257), floor(floor(floor(floor(NULL), '10485.76', '9223372036854775807', NULL), floor(10, floor(65535, NULL), 100.0000991821289), NULL)), '2.56'), b.* FROM (SELECT floor(floor(floor(floor(NULL), 1000.0001220703125))), * FROM test_00597) AS b) WHERE id = 257; + +INSERT INTO test_00597 VALUES('2000-01-01', 1, 'test string 1', 1); +INSERT INTO test_00597 VALUES('2000-01-01', 2, 'test string 2', 2); + +SET enable_optimize_predicate_expression = 1; + +SELECT '-------No need for predicate optimization, but still works-------'; +SELECT 1; +SELECT 1 AS id WHERE id = 1; +SELECT arrayJoin([1,2,3]) AS id WHERE id = 1; +SELECT * FROM test_00597 WHERE id = 1; + +SELECT '-------Forbid push down-------'; + +-- ARRAY JOIN +EXPLAIN SYNTAX SELECT count() FROM (SELECT [number] a, [number * 2] b FROM system.numbers LIMIT 1) AS t ARRAY JOIN a, b WHERE NOT ignore(a + b); +SELECT count() FROM (SELECT [number] a, [number * 2] b FROM system.numbers LIMIT 1) AS t ARRAY JOIN a, b WHERE NOT ignore(a + b); + +-- LEFT JOIN +EXPLAIN SYNTAX SELECT a, b FROM (SELECT 1 AS a) ANY LEFT JOIN (SELECT 1 AS a, 1 AS b) USING (a) WHERE b = 0; +SELECT a, b FROM (SELECT 1 AS a) ANY LEFT JOIN (SELECT 1 AS a, 1 AS b) USING (a) WHERE b = 0; + +-- RIGHT JOIN +EXPLAIN SYNTAX SELECT a, b FROM (SELECT 1 AS a, 1 as b) ANY RIGHT JOIN (SELECT 1 AS a) USING (a) WHERE b = 0; +SELECT a, b FROM (SELECT 1 AS a, 1 as b) ANY RIGHT JOIN (SELECT 1 AS a) USING (a) WHERE b = 0; + +-- FULL JOIN +EXPLAIN SYNTAX SELECT a, b FROM (SELECT 1 AS a) ANY FULL JOIN (SELECT 1 AS a, 1 AS b) USING (a) WHERE b = 0; +SELECT a, b FROM (SELECT 1 AS a) ANY FULL JOIN (SELECT 1 AS a, 1 AS b) USING (a) WHERE b = 0; + +EXPLAIN SYNTAX SELECT a, b FROM (SELECT 1 AS a, 1 AS b) ANY FULL JOIN (SELECT 1 AS a) USING (a) WHERE b = 0; +SELECT a, b FROM (SELECT 1 AS a) ANY FULL JOIN (SELECT 1 AS a, 1 AS b) USING (a) WHERE b = 0; + +SELECT '-------Need push down-------'; + +EXPLAIN SYNTAX SELECT toString(value) AS value FROM (SELECT 1 AS value) WHERE value = '1'; +SELECT toString(value) AS value FROM (SELECT 1 AS value) WHERE value = '1'; + +EXPLAIN SYNTAX SELECT * FROM (SELECT 1 AS id UNION ALL SELECT 2) WHERE id = 1; +SELECT * FROM (SELECT 1 AS id UNION ALL SELECT 2) WHERE id = 1; + +EXPLAIN SYNTAX SELECT * FROM (SELECT arrayJoin([1, 2, 3]) AS id) WHERE id = 1; +SELECT * FROM (SELECT arrayJoin([1, 2, 3]) AS id) WHERE id = 1; + +EXPLAIN SYNTAX SELECT id FROM (SELECT arrayJoin([1, 2, 3]) AS id) WHERE id = 1; +SELECT id FROM (SELECT arrayJoin([1, 2, 3]) AS id) WHERE id = 1; + +EXPLAIN SYNTAX SELECT * FROM (SELECT 1 AS id, (SELECT 1) as subquery) WHERE subquery = 1; +SELECT * FROM (SELECT 1 AS id, (SELECT 1) as subquery) WHERE subquery = 1; + +-- Optimize predicate expressions using tables +EXPLAIN SYNTAX SELECT * FROM (SELECT toUInt64(b) AS a, sum(id) AS b FROM test_00597) WHERE a = 3; +SELECT * FROM (SELECT toUInt64(b) AS a, sum(id) AS b FROM test_00597) WHERE a = 3; + +EXPLAIN SYNTAX SELECT date, id, name, value FROM (SELECT date, name, value, min(id) AS id FROM test_00597 GROUP BY date, name, value) WHERE id = 1; +SELECT date, id, name, value FROM (SELECT date, name, value, min(id) AS id FROM test_00597 GROUP BY date, name, value) WHERE id = 1; + +EXPLAIN SYNTAX SELECT * FROM (SELECT toUInt64(b) AS a, sum(id) AS b FROM test_00597 AS table_alias) AS outer_table_alias WHERE outer_table_alias.b = 3; +SELECT * FROM (SELECT toUInt64(b) AS a, sum(id) AS b FROM test_00597 AS table_alias) AS outer_table_alias WHERE outer_table_alias.b = 3; + +-- Optimize predicate expression with asterisk +EXPLAIN SYNTAX SELECT * FROM (SELECT * FROM test_00597) WHERE id = 1; +SELECT * FROM (SELECT * FROM test_00597) WHERE id = 1; + +-- Optimize predicate expression with asterisk and nested subquery +EXPLAIN SYNTAX SELECT * FROM (SELECT * FROM (SELECT * FROM test_00597)) WHERE id = 1; +SELECT * FROM (SELECT * FROM (SELECT * FROM test_00597)) WHERE id = 1; + +-- Optimize predicate expression with qualified asterisk +EXPLAIN SYNTAX SELECT * FROM (SELECT b.* FROM (SELECT * FROM test_00597) AS b) WHERE id = 1; +SELECT * FROM (SELECT b.* FROM (SELECT * FROM test_00597) AS b) WHERE id = 1; + +-- Optimize predicate expression without asterisk +EXPLAIN SYNTAX SELECT * FROM (SELECT date, id, name, value FROM test_00597) WHERE id = 1; +SELECT * FROM (SELECT date, id, name, value FROM test_00597) WHERE id = 1; + +-- Optimize predicate expression without asterisk and contains nested subquery +EXPLAIN SYNTAX SELECT * FROM (SELECT date, id, name, value FROM (SELECT date, id, name, value FROM test_00597)) WHERE id = 1; +SELECT * FROM (SELECT date, id, name, value FROM (SELECT date, id, name, value FROM test_00597)) WHERE id = 1; + +-- Optimize predicate expression with qualified +EXPLAIN SYNTAX SELECT * FROM (SELECT * FROM test_00597) AS b WHERE b.id = 1; +SELECT * FROM (SELECT * FROM test_00597) AS b WHERE b.id = 1; + +-- Optimize predicate expression with qualified and nested subquery +EXPLAIN SYNTAX SELECT * FROM (SELECT * FROM (SELECT * FROM test_00597) AS a) AS b WHERE b.id = 1; +SELECT * FROM (SELECT * FROM (SELECT * FROM test_00597) AS a) AS b WHERE b.id = 1; + +-- Optimize predicate expression with aggregate function +EXPLAIN SYNTAX SELECT * FROM (SELECT id, date, min(value) AS value FROM test_00597 GROUP BY id, date) WHERE id = 1; +SELECT * FROM (SELECT id, date, min(value) AS value FROM test_00597 GROUP BY id, date) WHERE id = 1; + +-- Optimize predicate expression with union all query +EXPLAIN SYNTAX SELECT * FROM (SELECT * FROM test_00597 UNION ALL SELECT * FROM test_00597) WHERE id = 1; +SELECT * FROM (SELECT * FROM test_00597 UNION ALL SELECT * FROM test_00597) WHERE id = 1; + +-- Optimize predicate expression with join query +EXPLAIN SYNTAX SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) USING id WHERE id = 1; +SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) USING id WHERE id = 1 SETTINGS enable_analyzer=0; +SELECT '------- enable_analyzer=1 -------'; +SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) USING id WHERE id = 1 SETTINGS enable_analyzer=1; + +EXPLAIN SYNTAX SELECT * FROM (SELECT toInt8(1) AS id) ANY LEFT JOIN test_00597 USING id WHERE value = 1; +SELECT * FROM (SELECT toInt8(1) AS id) ANY LEFT JOIN test_00597 USING id WHERE value = 1; + +-- FIXME: no support for aliased tables for now. +EXPLAIN SYNTAX SELECT b.value FROM (SELECT toInt8(1) AS id) ANY LEFT JOIN test_00597 AS b USING id WHERE value = 1; +SELECT b.value FROM (SELECT toInt8(1) AS id) ANY LEFT JOIN test_00597 AS b USING id WHERE value = 1; + +-- Optimize predicate expression with join and nested subquery +EXPLAIN SYNTAX SELECT * FROM (SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) USING id) WHERE id = 1; +SELECT * FROM (SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) USING id) WHERE id = 1 SETTINGS enable_analyzer=0; +SELECT '------- enable_analyzer=1 -------'; +SELECT * FROM (SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) USING id) WHERE id = 1 SETTINGS enable_analyzer=1; + +-- Optimize predicate expression with join query and qualified +EXPLAIN SYNTAX SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) AS b USING id WHERE b.id = 1; +SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) AS b USING id WHERE b.id = 1 SETTINGS enable_analyzer=0; +SELECT '------- enable_analyzer=1 -------'; +SELECT * FROM (SELECT * FROM test_00597) ANY LEFT JOIN (SELECT * FROM test_00597) AS b USING id WHERE b.id = 1 SETTINGS enable_analyzer=1; + +-- Compatibility test +EXPLAIN SYNTAX SELECT * FROM (SELECT toInt8(1) AS id, toDate('2000-01-01') AS date FROM system.numbers LIMIT 1) ANY LEFT JOIN (SELECT * FROM test_00597) AS b USING date, id WHERE b.date = toDate('2000-01-01'); +SELECT * FROM (SELECT toInt8(1) AS id, toDate('2000-01-01') AS date FROM system.numbers LIMIT 1) ANY LEFT JOIN (SELECT * FROM test_00597) AS b USING date, id WHERE b.date = toDate('2000-01-01') SETTINGS enable_analyzer=0; +SELECT '------- enable_analyzer=1 -------'; +SELECT * FROM (SELECT toInt8(1) AS id, toDate('2000-01-01') AS date FROM system.numbers LIMIT 1) ANY LEFT JOIN (SELECT * FROM test_00597) AS b USING date, id WHERE b.date = toDate('2000-01-01') SETTINGS enable_analyzer=1; + +EXPLAIN SYNTAX SELECT * FROM (SELECT * FROM (SELECT * FROM test_00597) AS a ANY LEFT JOIN (SELECT * FROM test_00597) AS b ON a.id = b.id) WHERE id = 1; +SELECT * FROM (SELECT * FROM (SELECT * FROM test_00597) AS a ANY LEFT JOIN (SELECT * FROM test_00597) AS b ON a.id = b.id) WHERE id = 1; + +-- Explain with join subquery +EXPLAIN SYNTAX SELECT * FROM (SELECT * FROM test_00597) ANY INNER JOIN (SELECT * FROM (SELECT * FROM test_00597)) as r USING id WHERE r.id = 1; +SELECT * FROM (SELECT * FROM test_00597) ANY INNER JOIN (SELECT * FROM (SELECT * FROM test_00597)) as r USING id WHERE r.id = 1 SETTINGS enable_analyzer=0; +SELECT '------- enable_analyzer=1 -------'; +SELECT * FROM (SELECT * FROM test_00597) ANY INNER JOIN (SELECT * FROM (SELECT * FROM test_00597)) as r USING id WHERE r.id = 1 SETTINGS enable_analyzer=1; + +-- issue 20497 +EXPLAIN SYNTAX SELECT value + t1.value AS expr FROM (SELECT t0.value, t1.value FROM test_00597 AS t0 FULL JOIN test_00597 AS t1 USING date) WHERE expr < 3; +SELECT value + t1.value AS expr FROM (SELECT t0.value, t1.value FROM test_00597 AS t0 FULL JOIN test_00597 AS t1 USING date) WHERE expr < 3; + +DROP TABLE IF EXISTS test_00597; +DROP TABLE IF EXISTS test_view_00597; diff --git a/parser/testdata/00597_with_totals_on_empty_set/ast.json b/parser/testdata/00597_with_totals_on_empty_set/ast.json new file mode 100644 index 000000000..643f1f4fc --- /dev/null +++ b/parser/testdata/00597_with_totals_on_empty_set/ast.json @@ -0,0 +1,73 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function count (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal Int64_-1" + }, + { + "explain": " Identifier Vertical" + } + ], + + "rows": 17, + + "statistics": + { + "elapsed": 0.001431811, + "rows_read": 17, + "bytes_read": 633 + } +} diff --git a/parser/testdata/00597_with_totals_on_empty_set/metadata.json b/parser/testdata/00597_with_totals_on_empty_set/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00597_with_totals_on_empty_set/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00597_with_totals_on_empty_set/query.sql b/parser/testdata/00597_with_totals_on_empty_set/query.sql new file mode 100644 index 000000000..8e02e47da --- /dev/null +++ b/parser/testdata/00597_with_totals_on_empty_set/query.sql @@ -0,0 +1,6 @@ +SELECT count() FROM numbers(10) WHERE number = -1 WITH TOTALS FORMAT Vertical; +SELECT count() FROM numbers(10) WHERE number = -1 GROUP BY number WITH TOTALS FORMAT Vertical; +SELECT number, count() FROM numbers(10) WHERE number = -1 GROUP BY number WITH TOTALS FORMAT Vertical; +SELECT groupArray(number) FROM numbers(10) WHERE number = -1 WITH TOTALS FORMAT Vertical; +SELECT groupArray(number) FROM numbers(10) WHERE number = -1 GROUP BY number WITH TOTALS FORMAT Vertical; +SELECT number, groupArray(number) FROM numbers(10) WHERE number = -1 GROUP BY number WITH TOTALS FORMAT Vertical; diff --git a/parser/testdata/00599_create_view_with_subquery/ast.json b/parser/testdata/00599_create_view_with_subquery/ast.json new file mode 100644 index 000000000..22ee1f16b --- /dev/null +++ b/parser/testdata/00599_create_view_with_subquery/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_00599 (children 1)" + }, + { + "explain": " Identifier test_00599" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001283599, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/00599_create_view_with_subquery/metadata.json b/parser/testdata/00599_create_view_with_subquery/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00599_create_view_with_subquery/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00599_create_view_with_subquery/query.sql b/parser/testdata/00599_create_view_with_subquery/query.sql new file mode 100644 index 000000000..9fcc99388 --- /dev/null +++ b/parser/testdata/00599_create_view_with_subquery/query.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS test_00599; +DROP TABLE IF EXISTS test_view_00599; + +CREATE TABLE test_00599(id UInt64) ENGINE = Log; +CREATE VIEW test_view_00599 AS SELECT * FROM test_00599 WHERE id = (SELECT 1); + +DETACH TABLE test_view_00599; +ATTACH TABLE test_view_00599; + +SHOW CREATE TABLE test_view_00599; + +DROP TABLE IF EXISTS test_00599; +DROP TABLE IF EXISTS test_view_00599; diff --git a/parser/testdata/00600_create_temporary_table_if_not_exists/ast.json b/parser/testdata/00600_create_temporary_table_if_not_exists/ast.json new file mode 100644 index 000000000..b595e4c18 --- /dev/null +++ b/parser/testdata/00600_create_temporary_table_if_not_exists/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery temporary_table (children 3)" + }, + { + "explain": " Identifier temporary_table" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration column (children 1)" + }, + { + "explain": " DataType UInt32" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function Memory" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.00134787, + "rows_read": 8, + "bytes_read": 300 + } +} diff --git a/parser/testdata/00600_create_temporary_table_if_not_exists/metadata.json b/parser/testdata/00600_create_temporary_table_if_not_exists/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00600_create_temporary_table_if_not_exists/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00600_create_temporary_table_if_not_exists/query.sql b/parser/testdata/00600_create_temporary_table_if_not_exists/query.sql new file mode 100644 index 000000000..3f4c80ddc --- /dev/null +++ b/parser/testdata/00600_create_temporary_table_if_not_exists/query.sql @@ -0,0 +1,4 @@ +CREATE TEMPORARY TABLE IF NOT EXISTS temporary_table (column UInt32) ENGINE = Memory; +CREATE TEMPORARY TABLE IF NOT EXISTS temporary_table (column UInt32) ENGINE = Memory; +INSERT INTO temporary_table VALUES (1), (2), (3); +SELECT column FROM temporary_table ORDER BY column; \ No newline at end of file diff --git a/parser/testdata/00603_system_parts_nonexistent_database/ast.json b/parser/testdata/00603_system_parts_nonexistent_database/ast.json new file mode 100644 index 000000000..406f9edc7 --- /dev/null +++ b/parser/testdata/00603_system_parts_nonexistent_database/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function count (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.parts" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier database" + }, + { + "explain": " Literal 'T5yajf3DLcMjJJvpCeX5ajUy1P0VTk51zMEp1kDKXZAGr5EpleuIKbuY8cKaThkaBqllUm2EFxDX'" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001537147, + "rows_read": 14, + "bytes_read": 601 + } +} diff --git a/parser/testdata/00603_system_parts_nonexistent_database/metadata.json b/parser/testdata/00603_system_parts_nonexistent_database/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00603_system_parts_nonexistent_database/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00603_system_parts_nonexistent_database/query.sql b/parser/testdata/00603_system_parts_nonexistent_database/query.sql new file mode 100644 index 000000000..378d9dc80 --- /dev/null +++ b/parser/testdata/00603_system_parts_nonexistent_database/query.sql @@ -0,0 +1 @@ +SELECT count() FROM system.parts WHERE database = 'T5yajf3DLcMjJJvpCeX5ajUy1P0VTk51zMEp1kDKXZAGr5EpleuIKbuY8cKaThkaBqllUm2EFxDX'; diff --git a/parser/testdata/00604_shard_remote_and_columns_with_defaults/ast.json b/parser/testdata/00604_shard_remote_and_columns_with_defaults/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00604_shard_remote_and_columns_with_defaults/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00604_shard_remote_and_columns_with_defaults/metadata.json b/parser/testdata/00604_shard_remote_and_columns_with_defaults/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00604_shard_remote_and_columns_with_defaults/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00604_shard_remote_and_columns_with_defaults/query.sql b/parser/testdata/00604_shard_remote_and_columns_with_defaults/query.sql new file mode 100644 index 000000000..304131d40 --- /dev/null +++ b/parser/testdata/00604_shard_remote_and_columns_with_defaults/query.sql @@ -0,0 +1,40 @@ +-- Tags: shard + +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS t3; +DROP TABLE IF EXISTS t4; + +CREATE TABLE t1(x UInt32, y UInt32) ENGINE TinyLog; +CREATE TABLE t2(x UInt32, y UInt32 DEFAULT x + 1) ENGINE TinyLog; +CREATE TABLE t3(x UInt32, y UInt32 MATERIALIZED x + 1) ENGINE TinyLog; +CREATE TABLE t4(x UInt32, y UInt32 ALIAS x + 1) ENGINE TinyLog; + +INSERT INTO t1 VALUES (1, 1); +INSERT INTO t2 VALUES (1, 1); +INSERT INTO t3 VALUES (1); +INSERT INTO t4 VALUES (1); + +INSERT INTO FUNCTION remote('127.0.0.2', currentDatabase(), t1) VALUES (2, 2); +INSERT INTO FUNCTION remote('127.0.0.2', currentDatabase(), t2) VALUES (2, 2); +--TODO: INSERT into remote tables with MATERIALIZED columns. +--INSERT INTO FUNCTION remote('127.0.0.2', currentDatabase(), t3) VALUES (2); +INSERT INTO FUNCTION remote('127.0.0.2', currentDatabase(), t4) VALUES (2); + +SELECT * FROM remote('127.0.0.2', currentDatabase(), t1) ORDER BY x; + +SELECT '*** With a DEFAULT column ***'; +SELECT * FROM remote('127.0.0.2', currentDatabase(), t2) ORDER BY x; + +SELECT '*** With a MATERIALIZED column ***'; +SELECT * FROM remote('127.0.0.2', currentDatabase(), t3) ORDER BY x; +SELECT x, y FROM remote('127.0.0.2', currentDatabase(), t3) ORDER BY x; + +SELECT '*** With an ALIAS column ***'; +SELECT * FROM remote('127.0.0.2', currentDatabase(), t4) ORDER BY x; +SELECT x, y FROM remote('127.0.0.2', currentDatabase(), t4) ORDER BY x; + +DROP TABLE t1; +DROP TABLE t2; +DROP TABLE t3; +DROP TABLE t4; diff --git a/parser/testdata/00604_show_create_database/ast.json b/parser/testdata/00604_show_create_database/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00604_show_create_database/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00604_show_create_database/metadata.json b/parser/testdata/00604_show_create_database/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00604_show_create_database/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00604_show_create_database/query.sql b/parser/testdata/00604_show_create_database/query.sql new file mode 100644 index 000000000..23ebd23c9 --- /dev/null +++ b/parser/testdata/00604_show_create_database/query.sql @@ -0,0 +1,3 @@ +-- Tags: no-ordinary-database, no-replicated-database + +show create database {CLICKHOUSE_DATABASE:Identifier}; diff --git a/parser/testdata/00605_intersections_aggregate_functions/ast.json b/parser/testdata/00605_intersections_aggregate_functions/ast.json new file mode 100644 index 000000000..beb2e4936 --- /dev/null +++ b/parser/testdata/00605_intersections_aggregate_functions/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001372219, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/00605_intersections_aggregate_functions/metadata.json b/parser/testdata/00605_intersections_aggregate_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00605_intersections_aggregate_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00605_intersections_aggregate_functions/query.sql b/parser/testdata/00605_intersections_aggregate_functions/query.sql new file mode 100644 index 000000000..29d458d51 --- /dev/null +++ b/parser/testdata/00605_intersections_aggregate_functions/query.sql @@ -0,0 +1,20 @@ +DROP TABLE IF EXISTS test; +CREATE TABLE test(start Integer, end Integer) engine = Memory; +INSERT INTO test(start,end) VALUES (1,3),(2,7),(3,999),(4,7),(5,8); + +/* +1 2 3 4 5 6 7 8 9 +------------------> +1---3 + 2---------7 + 3------------- + 4-----7 + 5-----8 +------------------> +1 2 3 3 4 4 4 2 1 //intersections count for each point +*/ + +SELECT maxIntersections(start,end) FROM test; +SELECT maxIntersectionsPosition(start,end) FROM test; + +DROP TABLE test; diff --git a/parser/testdata/00606_quantiles_and_nans/ast.json b/parser/testdata/00606_quantiles_and_nans/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00606_quantiles_and_nans/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00606_quantiles_and_nans/metadata.json b/parser/testdata/00606_quantiles_and_nans/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00606_quantiles_and_nans/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00606_quantiles_and_nans/query.sql b/parser/testdata/00606_quantiles_and_nans/query.sql new file mode 100644 index 000000000..f685a6988 --- /dev/null +++ b/parser/testdata/00606_quantiles_and_nans/query.sql @@ -0,0 +1,16 @@ +SELECT DISTINCT + eq +FROM + ( + WITH + range(2 + number % 10) AS arr, -- minimum two elements, to avoid nan result -- + arrayMap(x -> x = intDiv(number, 10) ? nan : x, arr) AS arr_with_nan, + arrayFilter(x -> x != intDiv(number, 10), arr) AS arr_filtered + SELECT + number, + arrayReduce('quantileExact', arr_with_nan) AS q1, + arrayReduce('quantileExact', arr_filtered) AS q2, + q1 = q2 AS eq + FROM + numbers(100) + ); diff --git a/parser/testdata/00607_index_in_in/ast.json b/parser/testdata/00607_index_in_in/ast.json new file mode 100644 index 000000000..fa2e67099 --- /dev/null +++ b/parser/testdata/00607_index_in_in/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery merge_tree (children 1)" + }, + { + "explain": " Identifier merge_tree" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001418482, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/00607_index_in_in/metadata.json b/parser/testdata/00607_index_in_in/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00607_index_in_in/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00607_index_in_in/query.sql b/parser/testdata/00607_index_in_in/query.sql new file mode 100644 index 000000000..868b7873b --- /dev/null +++ b/parser/testdata/00607_index_in_in/query.sql @@ -0,0 +1,19 @@ +DROP TABLE IF EXISTS merge_tree; +CREATE TABLE merge_tree (x UInt32) ENGINE = MergeTree ORDER BY x SETTINGS index_granularity = 1; +INSERT INTO merge_tree VALUES (0), (1); + +SET force_primary_key = 1; +SET max_rows_to_read = 1; + +-- Prevent remote replicas from skipping index analysis in Parallel Replicas. Otherwise, they may return full ranges and trigger max_rows_to_read validation failures. +SET parallel_replicas_index_analysis_only_on_coordinator = 0; + +SELECT count() FROM merge_tree WHERE x = 0; +SELECT count() FROM merge_tree WHERE toUInt32(x) = 0; +SELECT count() FROM merge_tree WHERE toUInt64(x) = 0; + +SELECT count() FROM merge_tree WHERE x IN (0, 0); +SELECT count() FROM merge_tree WHERE toUInt32(x) IN (0, 0); +SELECT count() FROM merge_tree WHERE toUInt64(x) IN (0, 0); + +DROP TABLE merge_tree; diff --git a/parser/testdata/00608_uniq_array/ast.json b/parser/testdata/00608_uniq_array/ast.json new file mode 100644 index 000000000..ec743da29 --- /dev/null +++ b/parser/testdata/00608_uniq_array/ast.json @@ -0,0 +1,94 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function uniq (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayJoin (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_2]" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_2]" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_2, UInt64_3]" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 24, + + "statistics": + { + "elapsed": 0.001256172, + "rows_read": 24, + "bytes_read": 1073 + } +} diff --git a/parser/testdata/00608_uniq_array/metadata.json b/parser/testdata/00608_uniq_array/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00608_uniq_array/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00608_uniq_array/query.sql b/parser/testdata/00608_uniq_array/query.sql new file mode 100644 index 000000000..d4b0efd8f --- /dev/null +++ b/parser/testdata/00608_uniq_array/query.sql @@ -0,0 +1,3 @@ +SELECT uniq(x) FROM (SELECT arrayJoin([[1, 2], [1, 2], [1, 2, 3], []]) AS x); +SELECT uniqExact(x) FROM (SELECT arrayJoin([[1, 2], [1, 2], [1, 2, 3], []]) AS x); +SELECT uniqUpTo(2)(x) FROM (SELECT arrayJoin([[1, 2], [1, 2], [1, 2, 3], []]) AS x); diff --git a/parser/testdata/00609_distributed_with_case_when_then/ast.json b/parser/testdata/00609_distributed_with_case_when_then/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00609_distributed_with_case_when_then/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00609_distributed_with_case_when_then/metadata.json b/parser/testdata/00609_distributed_with_case_when_then/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00609_distributed_with_case_when_then/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00609_distributed_with_case_when_then/query.sql b/parser/testdata/00609_distributed_with_case_when_then/query.sql new file mode 100644 index 000000000..260982010 --- /dev/null +++ b/parser/testdata/00609_distributed_with_case_when_then/query.sql @@ -0,0 +1,18 @@ +-- Tags: distributed + +DROP TABLE IF EXISTS mergetree_00609; +DROP TABLE IF EXISTS distributed_00609; + +CREATE TABLE mergetree_00609 (x UInt64, s String) ENGINE = MergeTree ORDER BY x; +INSERT INTO mergetree_00609 VALUES (1, 'hello'), (2, 'world'); + +SELECT CASE x WHEN 1 THEN 'hello' WHEN 2 THEN 'world' ELSE 'unknow' END FROM mergetree_00609; +SELECT count() AS cnt FROM (SELECT CASE x WHEN 1 THEN 'hello' WHEN 2 THEN 'world' ELSE 'unknow' END FROM mergetree_00609); + +CREATE TABLE distributed_00609 AS mergetree_00609 ENGINE = Distributed(test_shard_localhost, currentDatabase(), mergetree_00609); + +SELECT CASE x WHEN 1 THEN 'hello' WHEN 2 THEN 'world' ELSE 'unknow' END FROM distributed_00609; +SELECT count() AS cnt FROM (SELECT CASE x WHEN 1 THEN 'hello' WHEN 2 THEN 'world' ELSE 'unknow' END FROM distributed_00609); + +DROP TABLE mergetree_00609; +DROP TABLE distributed_00609; diff --git a/parser/testdata/00609_mv_index_in_in/ast.json b/parser/testdata/00609_mv_index_in_in/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00609_mv_index_in_in/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00609_mv_index_in_in/metadata.json b/parser/testdata/00609_mv_index_in_in/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00609_mv_index_in_in/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00609_mv_index_in_in/query.sql b/parser/testdata/00609_mv_index_in_in/query.sql new file mode 100644 index 000000000..848938780 --- /dev/null +++ b/parser/testdata/00609_mv_index_in_in/query.sql @@ -0,0 +1,18 @@ +-- Tags: no-ordinary-database, no-parallel + +DROP TABLE IF EXISTS test_00609; +DROP TABLE IF EXISTS test_mv_00609; + +create table test_00609 (a Int8) engine=Memory; + +insert into test_00609 values (1); +set database_replicated_allow_explicit_uuid=3; +set allow_deprecated_syntax_for_merge_tree=1; +create materialized view test_mv_00609 uuid '00000609-1000-4000-8000-000000000001' Engine=MergeTree(date, (a), 8192) populate as select a, toDate('2000-01-01') date from test_00609; + +select * from test_mv_00609; -- OK +select * from test_mv_00609 where a in (select a from test_mv_00609); -- EMPTY (bug) +select * from ".inner_id.00000609-1000-4000-8000-000000000001" where a in (select a from test_mv_00609); -- OK + +DROP TABLE test_00609; +DROP TABLE test_mv_00609; diff --git a/parser/testdata/00609_prewhere_and_default/ast.json b/parser/testdata/00609_prewhere_and_default/ast.json new file mode 100644 index 000000000..e68df1c03 --- /dev/null +++ b/parser/testdata/00609_prewhere_and_default/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery table_00609 (children 1)" + }, + { + "explain": " Identifier table_00609" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001354986, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/00609_prewhere_and_default/metadata.json b/parser/testdata/00609_prewhere_and_default/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00609_prewhere_and_default/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00609_prewhere_and_default/query.sql b/parser/testdata/00609_prewhere_and_default/query.sql new file mode 100644 index 000000000..f1aa69c13 --- /dev/null +++ b/parser/testdata/00609_prewhere_and_default/query.sql @@ -0,0 +1,27 @@ +drop table if exists `table_00609`; +create table `table_00609` (key UInt64, val UInt64) engine = MergeTree order by key settings index_granularity=8192; +insert into `table_00609` select number, number / 8192 from system.numbers limit 100000; +alter table `table_00609` add column def UInt64 default val + 1; +select * from `table_00609` prewhere val > 2 format Null; +select * from `table_00609` prewhere val > 2 format Null SETTINGS max_block_size=100; +select * from `table_00609` prewhere val > 2 format Null SETTINGS max_block_size=1000; +select * from `table_00609` prewhere val > 2 format Null SETTINGS max_block_size=10000; +select * from `table_00609` prewhere val > 2 format Null SETTINGS max_block_size=20000; +select * from `table_00609` prewhere val > 2 format Null SETTINGS max_block_size=30000; +select * from `table_00609` prewhere val > 2 format Null SETTINGS max_block_size=40000; +select * from `table_00609` prewhere val > 2 format Null SETTINGS max_block_size=80000; + +drop table if exists `table_00609`; +create table `table_00609` (key UInt64, val UInt64) engine = MergeTree order by key settings index_granularity=8192; +insert into `table_00609` select number, number / 8192 from system.numbers limit 100000; +alter table `table_00609` add column def UInt64; +select * from `table_00609` prewhere val > 2 format Null; +select * from `table_00609` prewhere val > 2 format Null SETTINGS max_block_size=100; +select * from `table_00609` prewhere val > 2 format Null SETTINGS max_block_size=1000; +select * from `table_00609` prewhere val > 2 format Null SETTINGS max_block_size=10000; +select * from `table_00609` prewhere val > 2 format Null SETTINGS max_block_size=20000; +select * from `table_00609` prewhere val > 2 format Null SETTINGS max_block_size=30000; +select * from `table_00609` prewhere val > 2 format Null SETTINGS max_block_size=40000; +select * from `table_00609` prewhere val > 2 format Null SETTINGS max_block_size=80000; + +drop table if exists `table_00609`; diff --git a/parser/testdata/00610_materialized_view_forward_alter_partition_statements/ast.json b/parser/testdata/00610_materialized_view_forward_alter_partition_statements/ast.json new file mode 100644 index 000000000..a17317cf8 --- /dev/null +++ b/parser/testdata/00610_materialized_view_forward_alter_partition_statements/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tab_00610 (children 1)" + }, + { + "explain": " Identifier tab_00610" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001349234, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/00610_materialized_view_forward_alter_partition_statements/metadata.json b/parser/testdata/00610_materialized_view_forward_alter_partition_statements/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00610_materialized_view_forward_alter_partition_statements/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00610_materialized_view_forward_alter_partition_statements/query.sql b/parser/testdata/00610_materialized_view_forward_alter_partition_statements/query.sql new file mode 100644 index 000000000..8830204ec --- /dev/null +++ b/parser/testdata/00610_materialized_view_forward_alter_partition_statements/query.sql @@ -0,0 +1,19 @@ +DROP TABLE IF EXISTS tab_00610; +DROP TABLE IF EXISTS mv_00610; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE tab_00610(d Date, x UInt32) ENGINE MergeTree(d, x, 8192); +CREATE MATERIALIZED VIEW mv_00610(d Date, y UInt64) ENGINE MergeTree(d, y, 8192) AS SELECT d, x + 1 AS y FROM tab_00610; + +INSERT INTO tab_00610 VALUES ('2018-01-01', 1), ('2018-01-01', 2), ('2018-02-01', 3); + +SELECT '-- Before DROP PARTITION --'; +SELECT * FROM mv_00610 ORDER BY y; + +ALTER TABLE mv_00610 DROP PARTITION 201801; + +SELECT '-- After DROP PARTITION --'; +SELECT * FROM mv_00610 ORDER BY y; + +DROP TABLE tab_00610; +DROP TABLE mv_00610; diff --git a/parser/testdata/00612_count/ast.json b/parser/testdata/00612_count/ast.json new file mode 100644 index 000000000..dd3933209 --- /dev/null +++ b/parser/testdata/00612_count/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery count (children 1)" + }, + { + "explain": " Identifier count" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001142725, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/00612_count/metadata.json b/parser/testdata/00612_count/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00612_count/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00612_count/query.sql b/parser/testdata/00612_count/query.sql new file mode 100644 index 000000000..9c435bd97 --- /dev/null +++ b/parser/testdata/00612_count/query.sql @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS count; + +CREATE TABLE count (x UInt64) ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +INSERT INTO count SELECT * FROM numbers(1234567); + +SELECT count() FROM count; +SELECT count() * 2 FROM count; +SELECT count() FROM (SELECT * FROM count UNION ALL SELECT * FROM count); +SELECT count() FROM count WITH TOTALS; +SELECT arrayJoin([count(), count()]) FROM count; +SELECT arrayJoin([count(), count()]) FROM count LIMIT 1; +SELECT arrayJoin([count(), count()]) FROM count LIMIT 1, 1; +SELECT arrayJoin([count(), count()]) AS x FROM count LIMIT 1 BY x; +SELECT arrayJoin([count(), count() + 1]) AS x FROM count LIMIT 1 BY x; +SELECT count() FROM count HAVING count() = 1234567; +SELECT count() FROM count HAVING count() != 1234567; + +DROP TABLE count; diff --git a/parser/testdata/00612_http_max_query_size_for_distributed/ast.json b/parser/testdata/00612_http_max_query_size_for_distributed/ast.json new file mode 100644 index 000000000..f5de26b06 --- /dev/null +++ b/parser/testdata/00612_http_max_query_size_for_distributed/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery data_00612 (children 1)" + }, + { + "explain": " Identifier data_00612" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001278013, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/00612_http_max_query_size_for_distributed/metadata.json b/parser/testdata/00612_http_max_query_size_for_distributed/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00612_http_max_query_size_for_distributed/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00612_http_max_query_size_for_distributed/query.sql b/parser/testdata/00612_http_max_query_size_for_distributed/query.sql new file mode 100644 index 000000000..39ca03bc2 --- /dev/null +++ b/parser/testdata/00612_http_max_query_size_for_distributed/query.sql @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS data_00612; +DROP TABLE IF EXISTS dist_00612; + +CREATE TABLE data_00612 (key UInt64, val UInt64) ENGINE = MergeTree ORDER BY key; +CREATE TABLE dist_00612 AS data_00612 ENGINE = Distributed(test_shard_localhost, currentDatabase(), data_00612, rand()); + +SET distributed_foreground_insert=1; +SET prefer_localhost_replica=0; +SET max_query_size=29; +INSERT INTO dist_00612 VALUES(1, 1), (2, 2), (3, 3), (4, 4), (5, 5); +SELECT key FROM dist_00612; + +SET max_query_size=262144; +SET distributed_foreground_insert=0; +SET prefer_localhost_replica=1; +DROP TABLE dist_00612; +DROP TABLE data_00612; diff --git a/parser/testdata/00612_pk_in_tuple/ast.json b/parser/testdata/00612_pk_in_tuple/ast.json new file mode 100644 index 000000000..c22e192c3 --- /dev/null +++ b/parser/testdata/00612_pk_in_tuple/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tab_00612 (children 1)" + }, + { + "explain": " Identifier tab_00612" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001339276, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/00612_pk_in_tuple/metadata.json b/parser/testdata/00612_pk_in_tuple/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00612_pk_in_tuple/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00612_pk_in_tuple/query.sql b/parser/testdata/00612_pk_in_tuple/query.sql new file mode 100644 index 000000000..7c928019a --- /dev/null +++ b/parser/testdata/00612_pk_in_tuple/query.sql @@ -0,0 +1,48 @@ +drop table if exists tab_00612; +create table tab_00612 (key UInt64, arr Array(UInt64)) Engine = MergeTree order by key; +insert into tab_00612 values (1, [1]); +insert into tab_00612 values (2, [2]); +select 'all'; +select * from tab_00612 order by key; +select 'key, arrayJoin(arr) in (1, 1)'; +select key, arrayJoin(arr) as val from tab_00612 where (key, val) in (1, 1); +select 'key, arrayJoin(arr) in ((1, 1), (2, 2))'; +select key, arrayJoin(arr) as val from tab_00612 where (key, val) in ((1, 1), (2, 2)) order by key; +select '(key, left array join arr) in (1, 1)'; +select key from tab_00612 left array join arr as val where (key, val) in (1, 1); +select '(key, left array join arr) in ((1, 1), (2, 2))'; +select key from tab_00612 left array join arr as val where (key, val) in ((1, 1), (2, 2)) order by key; + +drop table if exists tab_00612; +create table tab_00612 (key UInt64, n Nested(x UInt64)) Engine = MergeTree order by key; +insert into tab_00612 values (1, [1]); +insert into tab_00612 values (2, [2]); +select 'all'; +select * from tab_00612 order by key; +select 'key, arrayJoin(n.x) in (1, 1)'; +select key, arrayJoin(n.x) as val from tab_00612 where (key, val) in (1, 1); +select 'key, arrayJoin(n.x) in ((1, 1), (2, 2))'; +select key, arrayJoin(n.x) as val from tab_00612 where (key, val) in ((1, 1), (2, 2)) order by key; +select '(key, left array join n.x) in (1, 1)'; +select key from tab_00612 left array join n.x as val where (key, val) in (1, 1); +select '(key, left array join n.x) in ((1, 1), (2, 2))'; +select key from tab_00612 left array join n.x as val where (key, val) in ((1, 1), (2, 2)) order by key; +select 'max(key) from tab_00612 where (key, left array join n.x) in (1, 1)'; +select max(key) from tab_00612 left array join `n.x` as val where (key, val) in ((1, 1)); +select max(key) from tab_00612 left array join n as val where (key, val.x) in (1, 1); +select 'max(key) from tab_00612 where (key, left array join n.x) in ((1, 1), (2, 2))'; +select max(key) from tab_00612 left array join `n.x` as val where (key, val) in ((1, 1), (2, 2)); +select max(key) from tab_00612 left array join n as val where (key, val.x) in ((1, 1), (2, 2)); +select 'max(key) from tab_00612 any left join (select key, arrayJoin(n.x) as val from tab_00612) using key where (key, val) in (1, 1)'; +select max(key) from tab_00612 any left join (select key, arrayJoin(n.x) as val from tab_00612) js2 using key where (key, val) in (1, 1); +select 'max(key) from tab_00612 any left join (select key, arrayJoin(n.x) as val from tab_00612) using key where (key, val) in ((1, 1), (2, 2))'; +select max(key) from tab_00612 any left join (select key, arrayJoin(n.x) as val from tab_00612) js2 using key where (key, val) in ((1, 1), (2, 2)); + +drop table if exists tab_00612; +CREATE TABLE tab_00612 (key1 Int32, id1 Int64, c1 Int64) ENGINE = MergeTree PARTITION BY id1 ORDER BY (key1); +insert into tab_00612 values ( -1, 1, 0 ); +SELECT count(*) FROM tab_00612 PREWHERE id1 IN (1); + +SELECT count() FROM tab_00612 WHERE (key1, id1) IN (-1, 1) AND (key1, 1) IN (-1, 1) SETTINGS force_primary_key = 1; + +drop table tab_00612; diff --git a/parser/testdata/00612_shard_count/ast.json b/parser/testdata/00612_shard_count/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00612_shard_count/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00612_shard_count/metadata.json b/parser/testdata/00612_shard_count/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00612_shard_count/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00612_shard_count/query.sql b/parser/testdata/00612_shard_count/query.sql new file mode 100644 index 000000000..ee73caf7f --- /dev/null +++ b/parser/testdata/00612_shard_count/query.sql @@ -0,0 +1,11 @@ +-- Tags: shard + +DROP TABLE IF EXISTS count; + +CREATE TABLE count (x UInt64) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO count SELECT * FROM numbers(1234567); + +SELECT count() FROM remote('127.0.0.{1,2}', currentDatabase(), count); +SELECT count() / 2 FROM remote('127.0.0.{1,2}', currentDatabase(), count); + +DROP TABLE count; diff --git a/parser/testdata/00612_union_query_with_subquery/ast.json b/parser/testdata/00612_union_query_with_subquery/ast.json new file mode 100644 index 000000000..98a0784a4 --- /dev/null +++ b/parser/testdata/00612_union_query_with_subquery/ast.json @@ -0,0 +1,136 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier number" + } + ], + + "rows": 38, + + "statistics": + { + "elapsed": 0.001737901, + "rows_read": 38, + "bytes_read": 1599 + } +} diff --git a/parser/testdata/00612_union_query_with_subquery/metadata.json b/parser/testdata/00612_union_query_with_subquery/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00612_union_query_with_subquery/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00612_union_query_with_subquery/query.sql b/parser/testdata/00612_union_query_with_subquery/query.sql new file mode 100644 index 000000000..5c8b16a2f --- /dev/null +++ b/parser/testdata/00612_union_query_with_subquery/query.sql @@ -0,0 +1,3 @@ +SELECT * FROM ((SELECT * FROM system.numbers LIMIT 1) UNION ALL SELECT * FROM system.numbers LIMIT 2 UNION ALL (SELECT * FROM system.numbers LIMIT 3)) ORDER BY number; +SELECT * FROM (SELECT * FROM system.numbers LIMIT 1 UNION ALL (SELECT * FROM system.numbers LIMIT 2 UNION ALL (SELECT * FROM system.numbers LIMIT 3))) ORDER BY number; +select count() from view(select 1 union all (select 2 union all select 3)); diff --git a/parser/testdata/00613_shard_distributed_max_execution_time/ast.json b/parser/testdata/00613_shard_distributed_max_execution_time/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00613_shard_distributed_max_execution_time/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00613_shard_distributed_max_execution_time/metadata.json b/parser/testdata/00613_shard_distributed_max_execution_time/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00613_shard_distributed_max_execution_time/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00613_shard_distributed_max_execution_time/query.sql b/parser/testdata/00613_shard_distributed_max_execution_time/query.sql new file mode 100644 index 000000000..1f4cb2a36 --- /dev/null +++ b/parser/testdata/00613_shard_distributed_max_execution_time/query.sql @@ -0,0 +1,4 @@ +-- Tags: distributed + +SET max_execution_time = 1, timeout_overflow_mode = 'break'; +SELECT * FROM remote('127.0.0.{2,3}', system.numbers) WHERE number < 10 FORMAT Null; diff --git a/parser/testdata/00614_array_nullable/ast.json b/parser/testdata/00614_array_nullable/ast.json new file mode 100644 index 000000000..633675e02 --- /dev/null +++ b/parser/testdata/00614_array_nullable/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001187012, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/00614_array_nullable/metadata.json b/parser/testdata/00614_array_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00614_array_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00614_array_nullable/query.sql b/parser/testdata/00614_array_nullable/query.sql new file mode 100644 index 000000000..1cbfbf128 --- /dev/null +++ b/parser/testdata/00614_array_nullable/query.sql @@ -0,0 +1,7 @@ +DROP TABLE IF EXISTS test; +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE test(date Date, keys Array(Nullable(UInt8))) ENGINE = MergeTree(date, date, 1); +INSERT INTO test VALUES ('2017-09-10', [1, 2, 3, 4, 5, 6, 7, NULL]); +SELECT * FROM test LIMIT 1; +SELECT avgArray(keys) FROM test; +DROP TABLE test; diff --git a/parser/testdata/00614_shard_same_header_for_local_and_remote_node_in_distributed_query/ast.json b/parser/testdata/00614_shard_same_header_for_local_and_remote_node_in_distributed_query/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00614_shard_same_header_for_local_and_remote_node_in_distributed_query/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00614_shard_same_header_for_local_and_remote_node_in_distributed_query/metadata.json b/parser/testdata/00614_shard_same_header_for_local_and_remote_node_in_distributed_query/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00614_shard_same_header_for_local_and_remote_node_in_distributed_query/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00614_shard_same_header_for_local_and_remote_node_in_distributed_query/query.sql b/parser/testdata/00614_shard_same_header_for_local_and_remote_node_in_distributed_query/query.sql new file mode 100644 index 000000000..17cc76670 --- /dev/null +++ b/parser/testdata/00614_shard_same_header_for_local_and_remote_node_in_distributed_query/query.sql @@ -0,0 +1,8 @@ +-- Tags: distributed + +drop table if exists tab; +set allow_deprecated_syntax_for_merge_tree=1; +create table tab (date Date, time DateTime, data String) ENGINE = MergeTree(date, (time, data), 8192); +insert into tab values ('2018-01-21','2018-01-21 15:12:13','test'); +select time FROM remote('127.0.0.{1,2}', currentDatabase(), tab) WHERE date = '2018-01-21' limit 2; +drop table tab; diff --git a/parser/testdata/00615_nullable_alter_optimize/ast.json b/parser/testdata/00615_nullable_alter_optimize/ast.json new file mode 100644 index 000000000..83097fc2d --- /dev/null +++ b/parser/testdata/00615_nullable_alter_optimize/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_00615 (children 1)" + }, + { + "explain": " Identifier test_00615" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001285897, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/00615_nullable_alter_optimize/metadata.json b/parser/testdata/00615_nullable_alter_optimize/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00615_nullable_alter_optimize/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00615_nullable_alter_optimize/query.sql b/parser/testdata/00615_nullable_alter_optimize/query.sql new file mode 100644 index 000000000..26ff3b78d --- /dev/null +++ b/parser/testdata/00615_nullable_alter_optimize/query.sql @@ -0,0 +1,23 @@ +DROP TABLE IF EXISTS test_00615; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE test_00615 +( + dt Date, + id Int32, + key String, + data Nullable(Int8) +) ENGINE = MergeTree(dt, (id, key, dt), 8192); + +INSERT INTO test_00615 (dt,id, key,data) VALUES ('2000-01-01', 100, 'key', 100500); + +alter table test_00615 drop column data; +alter table test_00615 add column data Nullable(Float64); + +INSERT INTO test_00615 (dt,id, key,data) VALUES ('2000-01-01', 100, 'key', 100500); + +SELECT * FROM test_00615 ORDER BY data NULLS FIRST; +OPTIMIZE TABLE test_00615; +SELECT * FROM test_00615 ORDER BY data NULLS FIRST; + +DROP TABLE test_00615; diff --git a/parser/testdata/00616_final_single_part/ast.json b/parser/testdata/00616_final_single_part/ast.json new file mode 100644 index 000000000..f0212ed0c --- /dev/null +++ b/parser/testdata/00616_final_single_part/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001233848, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00616_final_single_part/metadata.json b/parser/testdata/00616_final_single_part/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00616_final_single_part/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00616_final_single_part/query.sql b/parser/testdata/00616_final_single_part/query.sql new file mode 100644 index 000000000..8c7720f89 --- /dev/null +++ b/parser/testdata/00616_final_single_part/query.sql @@ -0,0 +1,30 @@ +SET optimize_on_insert = 0; + +DROP TABLE IF EXISTS test_00616; +DROP TABLE IF EXISTS replacing_00616; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE test_00616 +( + date Date, + x Int32, + ver UInt64 +) +ENGINE = MergeTree(date, x, 4096); + +INSERT INTO test_00616 VALUES ('2018-03-21', 1, 1), ('2018-03-21', 1, 2); +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE replacing_00616 ENGINE = ReplacingMergeTree(date, x, 4096, ver) AS SELECT * FROM test_00616; + +SELECT * FROM test_00616 ORDER BY ver; + +SELECT * FROM replacing_00616 ORDER BY ver; +SELECT * FROM replacing_00616 FINAL ORDER BY ver; + +OPTIMIZE TABLE replacing_00616 PARTITION '201803' FINAL; + +SELECT * FROM replacing_00616 ORDER BY ver; +SELECT * FROM replacing_00616 FINAL ORDER BY ver; + +DROP TABLE test_00616; +DROP TABLE replacing_00616; diff --git a/parser/testdata/00617_array_in/ast.json b/parser/testdata/00617_array_in/ast.json new file mode 100644 index 000000000..c4ef13812 --- /dev/null +++ b/parser/testdata/00617_array_in/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_array_ops (children 1)" + }, + { + "explain": " Identifier test_array_ops" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001279648, + "rows_read": 2, + "bytes_read": 80 + } +} diff --git a/parser/testdata/00617_array_in/metadata.json b/parser/testdata/00617_array_in/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00617_array_in/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00617_array_in/query.sql b/parser/testdata/00617_array_in/query.sql new file mode 100644 index 000000000..8e61c7dbd --- /dev/null +++ b/parser/testdata/00617_array_in/query.sql @@ -0,0 +1,15 @@ +DROP TABLE IF EXISTS test_array_ops; +CREATE TABLE test_array_ops(arr Array(Nullable(Int64))) ENGINE = Memory; + +INSERT INTO test_array_ops(arr) values ([null, 10, -20]); +INSERT INTO test_array_ops(arr) values ([10, -20]); +INSERT INTO test_array_ops(arr) values ([]); + +SELECT count(*) FROM test_array_ops where arr < CAST([10, -20] AS Array(Nullable(Int64))); +SELECT count(*) FROM test_array_ops where arr > CAST([10, -20] AS Array(Nullable(Int64))); +SELECT count(*) FROM test_array_ops where arr >= CAST([10, -20] AS Array(Nullable(Int64))); +SELECT count(*) FROM test_array_ops where arr <= CAST([10, -20] AS Array(Nullable(Int64))); +SELECT count(*) FROM test_array_ops where arr = CAST([10, -20] AS Array(Nullable(Int64))); +SELECT count(*) FROM test_array_ops where arr IN( CAST([10, -20] AS Array(Nullable(Int64))), CAST([null,10, -20] AS Array(Nullable(Int64)))); + +DROP TABLE test_array_ops; diff --git a/parser/testdata/00618_nullable_in/ast.json b/parser/testdata/00618_nullable_in/ast.json new file mode 100644 index 000000000..4778226ca --- /dev/null +++ b/parser/testdata/00618_nullable_in/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function sum (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function in (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toNullable (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'a'" + }, + { + "explain": " Literal 'a'" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001246875, + "rows_read": 12, + "bytes_read": 454 + } +} diff --git a/parser/testdata/00618_nullable_in/metadata.json b/parser/testdata/00618_nullable_in/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00618_nullable_in/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00618_nullable_in/query.sql b/parser/testdata/00618_nullable_in/query.sql new file mode 100644 index 000000000..f039f1fb9 --- /dev/null +++ b/parser/testdata/00618_nullable_in/query.sql @@ -0,0 +1,14 @@ +SELECT sum(toNullable('a') IN 'a'); +SELECT countIf(number, toNullable('a') IN ('a', 'b')) FROM numbers(100); + +SELECT + uniqExact(x) AS u, + uniqExactIf(x, name = 'a') AS ue, + uniqExactIf(x, name IN ('a', 'b')) AS ui +FROM +( + SELECT + toNullable('a') AS name, + arrayJoin(range(10)) AS x +) +WHERE name = 'a'; diff --git a/parser/testdata/00619_extract/ast.json b/parser/testdata/00619_extract/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00619_extract/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00619_extract/metadata.json b/parser/testdata/00619_extract/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00619_extract/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00619_extract/query.sql b/parser/testdata/00619_extract/query.sql new file mode 100644 index 000000000..73f1bc061 --- /dev/null +++ b/parser/testdata/00619_extract/query.sql @@ -0,0 +1,39 @@ + +SELECT EXTRACT(DAY FROM toDate('2017-06-15')); +SELECT EXTRACT (MONTH FROM toDate('2017-06-15')); +SELECT EXTRACT(YEAR FROM toDate('2017-06-15')); +SELECT EXTRACT(SECOND FROM toDateTime('2017-12-31 18:59:58')); +SELECT EXTRACT(MINUTE FROM toDateTime('2017-12-31 18:59:58')); +SELECT EXTRACT(HOUR FROM toDateTime('2017-12-31 18:59:58')); +SELECT EXTRACT(DAY from toDateTime('2017-12-31 18:59:58')); +SELECT extract(MONTH FROM toDateTime('2017-12-31 18:59:58')); +SELECT EXTRACT(year FROM toDateTime('2017-12-31 18:59:58')); + + +DROP TABLE IF EXISTS Orders; +CREATE TABLE Orders (OrderId UInt64, OrderName String, OrderDate DateTime) engine = Log; +insert into Orders values (1, 'Jarlsberg Cheese', toDateTime('2008-10-11 13:23:44')); +SELECT EXTRACT(YYYY FROM OrderDate) AS OrderYear, EXTRACT(MONTH FROM OrderDate) AS OrderMonth, EXTRACT(DAY FROM OrderDate) AS OrderDay, + EXTRACT(HOUR FROM OrderDate), EXTRACT(MINUTE FROM OrderDate), EXTRACT(SECOND FROM OrderDate) FROM Orders WHERE OrderId=1; +DROP TABLE Orders; + + +-- TODO: +-- SELECT EXTRACT(WEEK FROM toDate('2017-06-15')); +-- SELECT EXTRACT(WEEK FROM toDateTime('2017-12-31 18:59:58')); +-- SELECT EXTRACT(MINUTE_SECOND FROM toDateTime('2017-12-31 18:59:58')); +-- SELECT EXTRACT(HOUR_SECOND FROM toDateTime('2017-12-31 18:59:58')); +-- SELECT EXTRACT(HOUR_MINUTE FROM toDateTime('2017-12-31 18:59:58')); +-- SELECT EXTRACT(DAY_SECOND FROM toDateTime('2017-12-31 18:59:58')); +-- SELECT EXTRACT(DAY_MINUTE FROM toDateTime('2017-12-31 18:59:58')); +-- SELECT EXTRACT(DAY_HOUR FROM toDateTime('2017-12-31 18:59:58')); +-- SELECT EXTRACT(YEAR_MONTH FROM toDateTime('2017-12-31 18:59:58')); +-- SELECT EXTRACT(QUARTER FROM toDate('2017-06-15')); +-- SELECT EXTRACT(DAY_SECOND FROM toDate('2017-06-15')); +-- SELECT EXTRACT(DAY_MINUTE FROM toDate('2017-06-15')); +-- SELECT EXTRACT(DAY_HOUR FROM toDate('2017-06-15')); +-- SELECT EXTRACT(YEAR_MONTH FROM toDate('2017-06-15')); +-- SELECT EXTRACT(QUARTER FROM toDateTime('2017-12-31 18:59:58')); + +-- Maybe: +-- SELECT EXTRACT (YEAR FROM DATE '2014-08-22') AS RESULT; diff --git a/parser/testdata/00619_union_highlite/ast.json b/parser/testdata/00619_union_highlite/ast.json new file mode 100644 index 000000000..38c87ca82 --- /dev/null +++ b/parser/testdata/00619_union_highlite/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery union (children 1)" + }, + { + "explain": " Identifier union" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001138975, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/00619_union_highlite/metadata.json b/parser/testdata/00619_union_highlite/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00619_union_highlite/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00619_union_highlite/query.sql b/parser/testdata/00619_union_highlite/query.sql new file mode 100644 index 000000000..9a41a1326 --- /dev/null +++ b/parser/testdata/00619_union_highlite/query.sql @@ -0,0 +1,12 @@ +DROP TABLE IF EXISTS union; + +create view union as select 1 as test union all select 2; + +SELECT * FROM union ORDER BY test; + +DETACH TABLE union; +ATTACH TABLE union; + +SELECT * FROM union ORDER BY test; + +DROP TABLE union; diff --git a/parser/testdata/00620_optimize_on_nonleader_replica_zookeeper/ast.json b/parser/testdata/00620_optimize_on_nonleader_replica_zookeeper/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00620_optimize_on_nonleader_replica_zookeeper/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00620_optimize_on_nonleader_replica_zookeeper/metadata.json b/parser/testdata/00620_optimize_on_nonleader_replica_zookeeper/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00620_optimize_on_nonleader_replica_zookeeper/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00620_optimize_on_nonleader_replica_zookeeper/query.sql b/parser/testdata/00620_optimize_on_nonleader_replica_zookeeper/query.sql new file mode 100644 index 000000000..6c5eb6556 --- /dev/null +++ b/parser/testdata/00620_optimize_on_nonleader_replica_zookeeper/query.sql @@ -0,0 +1,31 @@ +-- Tags: replica, no-replicated-database +-- Tag no-replicated-database: Fails due to additional replicas or shards + +-- The test is mostly outdated as now every replica is leader and can do OPTIMIZE locally. + +DROP TABLE IF EXISTS rename1; +DROP TABLE IF EXISTS rename2; +DROP TABLE IF EXISTS rename3; +CREATE TABLE rename1 (p Int64, i Int64, v UInt64) ENGINE = ReplicatedReplacingMergeTree('/clickhouse/tables/{database}/rename', '1', v) PARTITION BY p ORDER BY i; +CREATE TABLE rename2 (p Int64, i Int64, v UInt64) ENGINE = ReplicatedReplacingMergeTree('/clickhouse/tables/{database}/rename', '2', v) PARTITION BY p ORDER BY i; + +INSERT INTO rename1 VALUES (0, 1, 0); +INSERT INTO rename1 VALUES (0, 1, 1); + +SYSTEM SYNC REPLICA rename1 PULL; -- Avoid "Cannot select parts for optimization: Entry for part 0_1_1_0 hasn't been read from the replication log yet" +SYSTEM SYNC REPLICA rename2; +OPTIMIZE TABLE rename1 FINAL; +OPTIMIZE TABLE rename2 FINAL; +SELECT * FROM rename1; + +RENAME TABLE rename2 TO rename3; + +INSERT INTO rename1 VALUES (0, 1, 2); +SYSTEM SYNC REPLICA rename3; -- Make "rename3" to see all data parts. +OPTIMIZE TABLE rename3; +SYSTEM SYNC REPLICA rename1; -- Make "rename1" to see and process all scheduled merges. +SELECT * FROM rename1; + +DROP TABLE IF EXISTS rename1; +DROP TABLE IF EXISTS rename2; +DROP TABLE IF EXISTS rename3; diff --git a/parser/testdata/00621_regression_for_in_operator/ast.json b/parser/testdata/00621_regression_for_in_operator/ast.json new file mode 100644 index 000000000..b0e3a468b --- /dev/null +++ b/parser/testdata/00621_regression_for_in_operator/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery regression_for_in_operator_view (children 1)" + }, + { + "explain": " Identifier regression_for_in_operator_view" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001121794, + "rows_read": 2, + "bytes_read": 114 + } +} diff --git a/parser/testdata/00621_regression_for_in_operator/metadata.json b/parser/testdata/00621_regression_for_in_operator/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00621_regression_for_in_operator/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00621_regression_for_in_operator/query.sql b/parser/testdata/00621_regression_for_in_operator/query.sql new file mode 100644 index 000000000..0d8c4933c --- /dev/null +++ b/parser/testdata/00621_regression_for_in_operator/query.sql @@ -0,0 +1,24 @@ +DROP TABLE IF EXISTS regression_for_in_operator_view; +DROP TABLE IF EXISTS regression_for_in_operator; +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE regression_for_in_operator (d Date, v UInt32, g String) ENGINE=MergeTree(d, d, 8192); +CREATE MATERIALIZED VIEW regression_for_in_operator_view ENGINE=AggregatingMergeTree(d, (d,g), 8192) AS SELECT d, g, maxState(v) FROM regression_for_in_operator GROUP BY d, g; + +INSERT INTO regression_for_in_operator SELECT today(), toString(number % 10), number FROM system.numbers limit 1000; + +SELECT count() FROM regression_for_in_operator_view WHERE g = '5'; +SELECT count() FROM regression_for_in_operator_view WHERE g IN ('5'); +SELECT count() FROM regression_for_in_operator_view WHERE g IN ('5','6'); + +SET optimize_min_equality_disjunction_chain_length = 1; +SELECT count() FROM regression_for_in_operator_view WHERE g = '5' OR g = '6'; +SELECT count() FROM regression_for_in_operator_view WHERE g = '5' OR g = '6' SETTINGS enable_analyzer = 1; +EXPLAIN QUERY TREE SELECT count() FROM regression_for_in_operator_view WHERE g = '5' OR g = '6' SETTINGS enable_analyzer = 1; + +SET optimize_min_equality_disjunction_chain_length = 3; +SELECT count() FROM regression_for_in_operator_view WHERE g = '5' OR g = '6'; +SELECT count() FROM regression_for_in_operator_view WHERE g = '5' OR g = '6' SETTINGS enable_analyzer = 1; +EXPLAIN QUERY TREE SELECT count() FROM regression_for_in_operator_view WHERE g = '5' OR g = '6' SETTINGS enable_analyzer = 1; + +DROP TABLE regression_for_in_operator_view; +DROP TABLE regression_for_in_operator; diff --git a/parser/testdata/00622_select_in_parens/ast.json b/parser/testdata/00622_select_in_parens/ast.json new file mode 100644 index 000000000..665ff0089 --- /dev/null +++ b/parser/testdata/00622_select_in_parens/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001179172, + "rows_read": 5, + "bytes_read": 177 + } +} diff --git a/parser/testdata/00622_select_in_parens/metadata.json b/parser/testdata/00622_select_in_parens/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00622_select_in_parens/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00622_select_in_parens/query.sql b/parser/testdata/00622_select_in_parens/query.sql new file mode 100644 index 000000000..cbaa0065d --- /dev/null +++ b/parser/testdata/00622_select_in_parens/query.sql @@ -0,0 +1,3 @@ +(SELECT 1); +(SELECT 2) UNION ALL (SELECT 2); +((SELECT (((3))))); diff --git a/parser/testdata/00623_in_partition_key/ast.json b/parser/testdata/00623_in_partition_key/ast.json new file mode 100644 index 000000000..b2ccbf1b8 --- /dev/null +++ b/parser/testdata/00623_in_partition_key/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test54378 (children 1)" + }, + { + "explain": " Identifier test54378" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001511713, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/00623_in_partition_key/metadata.json b/parser/testdata/00623_in_partition_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00623_in_partition_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00623_in_partition_key/query.sql b/parser/testdata/00623_in_partition_key/query.sql new file mode 100644 index 000000000..6cb33f96f --- /dev/null +++ b/parser/testdata/00623_in_partition_key/query.sql @@ -0,0 +1,76 @@ +drop table if exists test54378; +set allow_deprecated_syntax_for_merge_tree=1; +create table test54378 (part_date Date, pk_date Date, date Date) Engine=MergeTree(part_date, pk_date, 8192); +insert into test54378 values ('2018-04-19', '2018-04-19', '2018-04-19'); + +select 111 from test54378 where part_date = '2018-04-19'; +select 112 from test54378 where part_date in ('2018-04-19'); +select 113 from test54378 where pk_date in ('2018-04-19'); +select 114 from test54378 where date in ('2018-04-19'); +SELECT '-'; +select 121 from test54378 where part_date = toDate('2018-04-19'); +select 122 from test54378 where part_date in (toDate('2018-04-19')); +select 123 from test54378 where pk_date in (toDate('2018-04-19')); +select 124 from test54378 where date in (toDate('2018-04-19')); +SELECT '-'; +select 131 from test54378 where part_date = (SELECT toDate('2018-04-19')); +select 132 from test54378 where part_date in (SELECT toDate('2018-04-19')); +select 133 from test54378 where pk_date in (SELECT toDate('2018-04-19')); +select 134 from test54378 where date in (SELECT toDate('2018-04-19')); + +SELECT '---'; + +select 211 from test54378 prewhere part_date = '2018-04-19'; +select 212 from test54378 prewhere part_date in ('2018-04-19'); +select 213 from test54378 prewhere pk_date in ('2018-04-19'); +select 214 from test54378 prewhere date in ('2018-04-19'); +SELECT '-'; +select 221 from test54378 prewhere part_date = toDate('2018-04-19'); +select 222 from test54378 prewhere part_date in (toDate('2018-04-19')); +select 223 from test54378 prewhere pk_date in (toDate('2018-04-19')); +select 224 from test54378 prewhere date in (toDate('2018-04-19')); +SELECT '-'; +select 231 from test54378 prewhere part_date = (SELECT toDate('2018-04-19')); +select 232 from test54378 prewhere part_date in (SELECT toDate('2018-04-19')); +select 233 from test54378 prewhere pk_date in (SELECT toDate('2018-04-19')); +select 234 from test54378 prewhere date in (SELECT toDate('2018-04-19')); + +SELECT '---'; + +SET optimize_move_to_prewhere = 0; + +select 311 from test54378 where part_date = '2018-04-19'; +select 312 from test54378 where part_date in ('2018-04-19'); +select 313 from test54378 where pk_date in ('2018-04-19'); +select 314 from test54378 where date in ('2018-04-19'); +SELECT '-'; +select 321 from test54378 where part_date = toDate('2018-04-19'); +select 322 from test54378 where part_date in (toDate('2018-04-19')); +select 323 from test54378 where pk_date in (toDate('2018-04-19')); +select 324 from test54378 where date in (toDate('2018-04-19')); +SELECT '-'; +select 331 from test54378 where part_date = (SELECT toDate('2018-04-19')); +select 332 from test54378 where part_date in (SELECT toDate('2018-04-19')); +select 333 from test54378 where pk_date in (SELECT toDate('2018-04-19')); +select 334 from test54378 where date in (SELECT toDate('2018-04-19')); + +SELECT '---'; + +SET optimize_move_to_prewhere = 1; + +select 411 from test54378 where part_date = '2018-04-19'; +select 412 from test54378 where part_date in ('2018-04-19'); +select 413 from test54378 where pk_date in ('2018-04-19'); +select 414 from test54378 where date in ('2018-04-19'); +SELECT '-'; +select 421 from test54378 where part_date = toDate('2018-04-19'); +select 422 from test54378 where part_date in (toDate('2018-04-19')); +select 423 from test54378 where pk_date in (toDate('2018-04-19')); +select 424 from test54378 where date in (toDate('2018-04-19')); +SELECT '-'; +select 431 from test54378 where part_date = (SELECT toDate('2018-04-19')); +select 432 from test54378 where part_date in (SELECT toDate('2018-04-19')); +select 433 from test54378 where pk_date in (SELECT toDate('2018-04-19')); +select 434 from test54378 where date in (SELECT toDate('2018-04-19')); + +drop table test54378; diff --git a/parser/testdata/00623_replicated_truncate_table_zookeeper_long/ast.json b/parser/testdata/00623_replicated_truncate_table_zookeeper_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00623_replicated_truncate_table_zookeeper_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00623_replicated_truncate_table_zookeeper_long/metadata.json b/parser/testdata/00623_replicated_truncate_table_zookeeper_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00623_replicated_truncate_table_zookeeper_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00623_replicated_truncate_table_zookeeper_long/query.sql b/parser/testdata/00623_replicated_truncate_table_zookeeper_long/query.sql new file mode 100644 index 000000000..53197a5d7 --- /dev/null +++ b/parser/testdata/00623_replicated_truncate_table_zookeeper_long/query.sql @@ -0,0 +1,34 @@ +-- Tags: long, replica, no-shared-merge-tree +-- no-shared-merge-tree: require sync replica, added new test + +DROP TABLE IF EXISTS replicated_truncate1; +DROP TABLE IF EXISTS replicated_truncate2; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE replicated_truncate1 (d Date, k UInt64, i32 Int32) ENGINE=ReplicatedMergeTree('/clickhouse/tables/{database}/test_00623/truncate', 'r1') order by k partition by toYYYYMM(d); +CREATE TABLE replicated_truncate2 (d Date, k UInt64, i32 Int32) ENGINE=ReplicatedMergeTree('/clickhouse/tables/{database}/test_00623/truncate', 'r2') order by k partition by toYYYYMM(d); + +SELECT '======Before Truncate======'; +INSERT INTO replicated_truncate1 VALUES ('2015-01-01', 10, 42); + +SYSTEM SYNC REPLICA replicated_truncate2; + +SELECT * FROM replicated_truncate1 ORDER BY k; +SELECT * FROM replicated_truncate2 ORDER BY k; + +SELECT '======After Truncate And Empty======'; +TRUNCATE TABLE replicated_truncate1 SETTINGS replication_alter_partitions_sync=2; + +SELECT * FROM replicated_truncate1 ORDER BY k; +SELECT * FROM replicated_truncate2 ORDER BY k; + +SELECT '======After Truncate And Insert Data======'; +INSERT INTO replicated_truncate1 VALUES ('2015-01-01', 10, 42); + +SYSTEM SYNC REPLICA replicated_truncate2; + +SELECT * FROM replicated_truncate1 ORDER BY k; +SELECT * FROM replicated_truncate2 ORDER BY k; + +DROP TABLE IF EXISTS replicated_truncate1; +DROP TABLE IF EXISTS replicated_truncate2; diff --git a/parser/testdata/00623_truncate_all_tables/ast.json b/parser/testdata/00623_truncate_all_tables/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00623_truncate_all_tables/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00623_truncate_all_tables/metadata.json b/parser/testdata/00623_truncate_all_tables/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00623_truncate_all_tables/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00623_truncate_all_tables/query.sql b/parser/testdata/00623_truncate_all_tables/query.sql new file mode 100644 index 000000000..7006764eb --- /dev/null +++ b/parser/testdata/00623_truncate_all_tables/query.sql @@ -0,0 +1,47 @@ +-- Tags: no-replicated-database +-- no-replicated-database: TRUNCATE ALL TABLES essentially does TRUNCATE DATABASE which is disallowed for Replicated databases. +-- This is tested in 03362_create_table_after_truncate_replicated_database.sql + +CREATE TABLE IF NOT EXISTS truncate_test_set(id UInt64) ENGINE = Set; +CREATE TABLE IF NOT EXISTS truncate_test_log(id UInt64) ENGINE = Log; +CREATE TABLE IF NOT EXISTS truncate_test_memory(id UInt64) ENGINE = Memory; +CREATE TABLE IF NOT EXISTS truncate_test_tiny_log(id UInt64) ENGINE = TinyLog; +CREATE TABLE IF NOT EXISTS truncate_test_stripe_log(id UInt64) ENGINE = StripeLog; +CREATE TABLE IF NOT EXISTS truncate_test_merge_tree(p Date, k UInt64) ENGINE = MergeTree ORDER BY p; + +SELECT '======Before Truncate======'; +INSERT INTO truncate_test_set VALUES(0); +INSERT INTO truncate_test_log VALUES(1); +INSERT INTO truncate_test_memory VALUES(1); +INSERT INTO truncate_test_tiny_log VALUES(1); +INSERT INTO truncate_test_stripe_log VALUES(1); +INSERT INTO truncate_test_merge_tree VALUES('2000-01-01', 1); +SELECT * FROM system.numbers WHERE number NOT IN truncate_test_set LIMIT 1; +SELECT * FROM truncate_test_log; +SELECT * FROM truncate_test_memory; +SELECT * FROM truncate_test_tiny_log; +SELECT * FROM truncate_test_stripe_log; +SELECT * FROM truncate_test_merge_tree; + +SELECT '======After Truncate And Empty======'; +TRUNCATE ALL TABLES FROM IF EXISTS {CLICKHOUSE_DATABASE:Identifier}; +SELECT * FROM system.numbers WHERE number NOT IN truncate_test_set LIMIT 1; +SELECT * FROM truncate_test_log; +SELECT * FROM truncate_test_memory; +SELECT * FROM truncate_test_tiny_log; +SELECT * FROM truncate_test_stripe_log; +SELECT * FROM truncate_test_merge_tree; + +SELECT '======After Truncate And Insert Data======'; +INSERT INTO truncate_test_set VALUES(0); +INSERT INTO truncate_test_log VALUES(1); +INSERT INTO truncate_test_memory VALUES(1); +INSERT INTO truncate_test_tiny_log VALUES(1); +INSERT INTO truncate_test_stripe_log VALUES(1); +INSERT INTO truncate_test_merge_tree VALUES('2000-01-01', 1); +SELECT * FROM system.numbers WHERE number NOT IN truncate_test_set LIMIT 1; +SELECT * FROM truncate_test_log; +SELECT * FROM truncate_test_memory; +SELECT * FROM truncate_test_tiny_log; +SELECT * FROM truncate_test_stripe_log; +SELECT * FROM truncate_test_merge_tree; diff --git a/parser/testdata/00623_truncate_table/ast.json b/parser/testdata/00623_truncate_table/ast.json new file mode 100644 index 000000000..4ee04911f --- /dev/null +++ b/parser/testdata/00623_truncate_table/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001290147, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00623_truncate_table/metadata.json b/parser/testdata/00623_truncate_table/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00623_truncate_table/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00623_truncate_table/query.sql b/parser/testdata/00623_truncate_table/query.sql new file mode 100644 index 000000000..e35803db1 --- /dev/null +++ b/parser/testdata/00623_truncate_table/query.sql @@ -0,0 +1,75 @@ +set allow_deprecated_syntax_for_merge_tree=1; + +DROP TABLE IF EXISTS truncate_test_log; +DROP TABLE IF EXISTS truncate_test_memory; +DROP TABLE IF EXISTS truncate_test_tiny_log; +DROP TABLE IF EXISTS truncate_test_stripe_log; +DROP TABLE IF EXISTS truncate_test_merge_tree; +DROP TABLE IF EXISTS truncate_test_materialized_view; +DROP TABLE IF EXISTS truncate_test_materialized_depend; + +CREATE TABLE truncate_test_set(id UInt64) ENGINE = Set; +CREATE TABLE truncate_test_log(id UInt64) ENGINE = Log; +CREATE TABLE truncate_test_memory(id UInt64) ENGINE = Memory; +CREATE TABLE truncate_test_tiny_log(id UInt64) ENGINE = TinyLog; +CREATE TABLE truncate_test_stripe_log(id UInt64) ENGINE = StripeLog; +CREATE TABLE truncate_test_merge_tree(p Date, k UInt64) ENGINE = MergeTree(p, k, 1); +CREATE TABLE truncate_test_materialized_depend(p Date, k UInt64) ENGINE = Null; +CREATE MATERIALIZED VIEW truncate_test_materialized_view ENGINE = MergeTree(p, k, 1) AS SELECT * FROM truncate_test_materialized_depend; + +SELECT '======Before Truncate======'; +INSERT INTO truncate_test_set VALUES(0); +INSERT INTO truncate_test_log VALUES(1); +INSERT INTO truncate_test_memory VALUES(1); +INSERT INTO truncate_test_tiny_log VALUES(1); +INSERT INTO truncate_test_stripe_log VALUES(1); +INSERT INTO truncate_test_merge_tree VALUES('2000-01-01', 1); +INSERT INTO truncate_test_materialized_depend VALUES('2000-01-01', 1); +SELECT * FROM system.numbers WHERE number NOT IN truncate_test_set LIMIT 1; +SELECT * FROM truncate_test_log; +SELECT * FROM truncate_test_memory; +SELECT * FROM truncate_test_tiny_log; +SELECT * FROM truncate_test_stripe_log; +SELECT * FROM truncate_test_merge_tree; +SELECT * FROM truncate_test_materialized_view; + +SELECT '======After Truncate And Empty======'; +TRUNCATE TABLE truncate_test_set; +TRUNCATE TABLE truncate_test_log; +TRUNCATE TABLE truncate_test_memory; +TRUNCATE TABLE truncate_test_tiny_log; +TRUNCATE TABLE truncate_test_stripe_log; +TRUNCATE TABLE truncate_test_merge_tree; +TRUNCATE TABLE truncate_test_materialized_view; +SELECT * FROM system.numbers WHERE number NOT IN truncate_test_set LIMIT 1; +SELECT * FROM truncate_test_log; +SELECT * FROM truncate_test_memory; +SELECT * FROM truncate_test_tiny_log; +SELECT * FROM truncate_test_stripe_log; +SELECT * FROM truncate_test_merge_tree; +SELECT * FROM truncate_test_materialized_view; + +SELECT '======After Truncate And Insert Data======'; +INSERT INTO truncate_test_set VALUES(0); +INSERT INTO truncate_test_log VALUES(1); +INSERT INTO truncate_test_memory VALUES(1); +INSERT INTO truncate_test_tiny_log VALUES(1); +INSERT INTO truncate_test_stripe_log VALUES(1); +INSERT INTO truncate_test_merge_tree VALUES('2000-01-01', 1); +INSERT INTO truncate_test_materialized_depend VALUES('2000-01-01', 1); +SELECT * FROM system.numbers WHERE number NOT IN truncate_test_set LIMIT 1; +SELECT * FROM truncate_test_log; +SELECT * FROM truncate_test_memory; +SELECT * FROM truncate_test_tiny_log; +SELECT * FROM truncate_test_stripe_log; +SELECT * FROM truncate_test_merge_tree; +SELECT * FROM truncate_test_materialized_view; + +DROP TABLE IF EXISTS truncate_test_set; +DROP TABLE IF EXISTS truncate_test_log; +DROP TABLE IF EXISTS truncate_test_memory; +DROP TABLE IF EXISTS truncate_test_tiny_log; +DROP TABLE IF EXISTS truncate_test_stripe_log; +DROP TABLE IF EXISTS truncate_test_merge_tree; +DROP TABLE IF EXISTS truncate_test_materialized_view; +DROP TABLE IF EXISTS truncate_test_materialized_depend; diff --git a/parser/testdata/00624_length_utf8/ast.json b/parser/testdata/00624_length_utf8/ast.json new file mode 100644 index 000000000..fe8c2e2d8 --- /dev/null +++ b/parser/testdata/00624_length_utf8/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'привет пр' (alias x)" + }, + { + "explain": " Function lengthUTF8 (alias y) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001340059, + "rows_read": 8, + "bytes_read": 318 + } +} diff --git a/parser/testdata/00624_length_utf8/metadata.json b/parser/testdata/00624_length_utf8/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00624_length_utf8/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00624_length_utf8/query.sql b/parser/testdata/00624_length_utf8/query.sql new file mode 100644 index 000000000..21b50a9f6 --- /dev/null +++ b/parser/testdata/00624_length_utf8/query.sql @@ -0,0 +1,2 @@ +SELECT 'привет пр' AS x, lengthUTF8(x) AS y; +SELECT x, lengthUTF8(x) AS y FROM (SELECT arrayJoin(['', 'h', 'hello', 'hello hello hello', 'п', 'пр', 'привет', 'привет привет', 'привет привет привет', '你好', '你好 你好', '你好你好你好', '你好你好你好你好', '你好 你好 你好 你好 你好']) AS x); diff --git a/parser/testdata/00625_arrays_in_nested/ast.json b/parser/testdata/00625_arrays_in_nested/ast.json new file mode 100644 index 000000000..2e11d849e --- /dev/null +++ b/parser/testdata/00625_arrays_in_nested/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery nested (children 1)" + }, + { + "explain": " Identifier nested" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00147293, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/00625_arrays_in_nested/metadata.json b/parser/testdata/00625_arrays_in_nested/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00625_arrays_in_nested/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00625_arrays_in_nested/query.sql b/parser/testdata/00625_arrays_in_nested/query.sql new file mode 100644 index 000000000..3b418731c --- /dev/null +++ b/parser/testdata/00625_arrays_in_nested/query.sql @@ -0,0 +1,97 @@ +DROP TABLE IF EXISTS nested; +CREATE TABLE nested +( + column Nested + ( + name String, + names Array(String), + types Array(Enum8('PU' = 1, 'US' = 2, 'OTHER' = 3)) + ) +) ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO nested VALUES (['Hello', 'World'], [['a'], ['b', 'c']], [['PU', 'US'], ['OTHER']]); + +SELECT * FROM nested; + +DETACH TABLE nested; +ATTACH TABLE nested; + +SELECT * FROM nested; + +INSERT INTO nested VALUES (['GoodBye'], [['1', '2']], [['PU', 'US', 'OTHER']]); + +SELECT * FROM nested ORDER BY column.name; +OPTIMIZE TABLE nested PARTITION tuple() FINAL; +SELECT * FROM nested ORDER BY column.name; + +DETACH TABLE nested; +ATTACH TABLE nested; + +SELECT * FROM nested ORDER BY column.name; + + +DROP TABLE IF EXISTS nested; +CREATE TABLE nested +( + column Nested + ( + name String, + names Array(String), + types Array(Enum8('PU' = 1, 'US' = 2, 'OTHER' = 3)) + ) +) ENGINE = Log; + +INSERT INTO nested VALUES (['Hello', 'World'], [['a'], ['b', 'c']], [['PU', 'US'], ['OTHER']]); + +SELECT * FROM nested; + + +DROP TABLE IF EXISTS nested; +CREATE TABLE nested +( + column Nested + ( + name String, + names Array(String), + types Array(Enum8('PU' = 1, 'US' = 2, 'OTHER' = 3)) + ) +) ENGINE = TinyLog; + +INSERT INTO nested VALUES (['Hello', 'World'], [['a'], ['b', 'c']], [['PU', 'US'], ['OTHER']]); + +SELECT * FROM nested; + + +DROP TABLE IF EXISTS nested; +CREATE TABLE nested +( + column Nested + ( + name String, + names Array(String), + types Array(Enum8('PU' = 1, 'US' = 2, 'OTHER' = 3)) + ) +) ENGINE = StripeLog; + +INSERT INTO nested VALUES (['Hello', 'World'], [['a'], ['b', 'c']], [['PU', 'US'], ['OTHER']]); + +SELECT * FROM nested; + + +DROP TABLE IF EXISTS nested; +CREATE TABLE nested +( + column Nested + ( + name String, + names Array(String), + types Array(Enum8('PU' = 1, 'US' = 2, 'OTHER' = 3)) + ) +) ENGINE = Memory; + +INSERT INTO nested VALUES (['Hello', 'World'], [['a'], ['b', 'c']], [['PU', 'US'], ['OTHER']]); + +SELECT * FROM nested; + + +DROP TABLE nested; diff --git a/parser/testdata/00625_summing_merge_tree_merge/ast.json b/parser/testdata/00625_summing_merge_tree_merge/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00625_summing_merge_tree_merge/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00625_summing_merge_tree_merge/metadata.json b/parser/testdata/00625_summing_merge_tree_merge/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00625_summing_merge_tree_merge/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00625_summing_merge_tree_merge/query.sql b/parser/testdata/00625_summing_merge_tree_merge/query.sql new file mode 100644 index 000000000..164f6f63f --- /dev/null +++ b/parser/testdata/00625_summing_merge_tree_merge/query.sql @@ -0,0 +1,35 @@ +-- Tags: no-msan +-- msan: too slow + +DROP TABLE IF EXISTS tab_00625; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE tab_00625 +( + date Date, + key UInt32, + testMap Nested( + k UInt16, + v UInt64) +) +ENGINE = SummingMergeTree(date, (date, key), 1); + +INSERT INTO tab_00625 SELECT + today(), + number, + [toUInt16(number)], + [number] +FROM system.numbers +LIMIT 8190; + +INSERT INTO tab_00625 SELECT + today(), + number + 8190, + [toUInt16(number)], + [number + 8190] +FROM system.numbers +LIMIT 10; + +OPTIMIZE TABLE tab_00625; + +DROP TABLE tab_00625; diff --git a/parser/testdata/00626_in_syntax/ast.json b/parser/testdata/00626_in_syntax/ast.json new file mode 100644 index 000000000..be4ed88fe --- /dev/null +++ b/parser/testdata/00626_in_syntax/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function in (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Tuple_(UInt64_1, UInt64_2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Tuple_(UInt64_1, UInt64_2)" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001477808, + "rows_read": 10, + "bytes_read": 406 + } +} diff --git a/parser/testdata/00626_in_syntax/metadata.json b/parser/testdata/00626_in_syntax/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00626_in_syntax/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00626_in_syntax/query.sql b/parser/testdata/00626_in_syntax/query.sql new file mode 100644 index 000000000..ac7ec8977 --- /dev/null +++ b/parser/testdata/00626_in_syntax/query.sql @@ -0,0 +1,46 @@ +select (1, 2) in tuple((1, 2)); +select (1, 2) in ((1, 2), (3, 4)); +select ((1, 2), (3, 4)) in ((1, 2), (3, 4)); +select ((1, 2), (3, 4)) in (((1, 2), (3, 4))); +select ((1, 2), (3, 4)) in tuple(((1, 2), (3, 4))); +select ((1, 2), (3, 4)) in (((1, 2), (3, 4)), ((5, 6), (7, 8))); + +select '-'; +select 1 in 1; +select 1 in tuple(1); +select tuple(1) in tuple(1); +select tuple(1) in tuple(tuple(1)); +select tuple(tuple(1)) in tuple(tuple(1)); +select tuple(tuple(1)) in tuple(tuple(tuple(1))); +select tuple(tuple(tuple(1))) in tuple(tuple(tuple(1))); + +select '-'; +select 1 in Null; +select 1 in tuple(Null); +select 1 in tuple(Null, 1); +select tuple(1) in tuple(tuple(Null)); +select tuple(1) in tuple(tuple(Null), tuple(1)); +select tuple(tuple(Null), tuple(1)) in tuple(tuple(Null), tuple(1)); + +select '-'; +select 1 in (1 + 1, 1 - 1); +select 1 in (0 + 1, 1, toInt8(sin(5))); +select (0 + 1, 1, toInt8(sin(5))) in (0 + 1, 1, toInt8(sin(5))); +select identity(tuple(1)) in (tuple(1), tuple(2)); +select identity(tuple(1)) in (tuple(0), tuple(2)); + +select '-'; +select identity((1, 2)) in (1, 2); +select identity((1, 2)) in ((1, 2), (3, 4)); + +select '-'; +select (1,2) as x, ((1,2),(3,4)) as y, 1 in x, x in y; + +select '-'; +select 1 in (select 1); +select tuple(1) in (select tuple(1)); +select (1, 2) in (select 1, 2); +select (1, 2) in (select (1, 2)); +select identity(tuple(1)) in (select tuple(1)); +select identity((1, 2)) in (select 1, 2); +select identity((1, 2)) in (select (1, 2)); diff --git a/parser/testdata/00626_replace_partition_from_table/ast.json b/parser/testdata/00626_replace_partition_from_table/ast.json new file mode 100644 index 000000000..a2d027b7e --- /dev/null +++ b/parser/testdata/00626_replace_partition_from_table/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery src (children 1)" + }, + { + "explain": " Identifier src" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001465219, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/00626_replace_partition_from_table/metadata.json b/parser/testdata/00626_replace_partition_from_table/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00626_replace_partition_from_table/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00626_replace_partition_from_table/query.sql b/parser/testdata/00626_replace_partition_from_table/query.sql new file mode 100644 index 000000000..f893883c7 --- /dev/null +++ b/parser/testdata/00626_replace_partition_from_table/query.sql @@ -0,0 +1,88 @@ +DROP TABLE IF EXISTS src; +DROP TABLE IF EXISTS dst; + +CREATE TABLE src (p UInt64, k String, d UInt64) ENGINE = MergeTree PARTITION BY p ORDER BY k; +CREATE TABLE dst (p UInt64, k String, d UInt64) ENGINE = MergeTree PARTITION BY p ORDER BY k SETTINGS merge_selector_base=1000; + +SELECT 'Initial'; +INSERT INTO src VALUES (0, '0', 1); +INSERT INTO src VALUES (1, '0', 1); +INSERT INTO src VALUES (1, '1', 1); +INSERT INTO src VALUES (2, '0', 1); + +INSERT INTO dst VALUES (0, '1', 2); +INSERT INTO dst VALUES (1, '1', 2), (1, '2', 2); +INSERT INTO dst VALUES (2, '1', 2); + +SELECT count(), sum(d) FROM src; +SELECT count(), sum(d) FROM dst; + + +SELECT 'REPLACE simple'; +ALTER TABLE dst REPLACE PARTITION 1 FROM src; +ALTER TABLE src DROP PARTITION 1; +SELECT count(), sum(d) FROM src; +SELECT count(), sum(d) FROM dst; + + +SELECT 'REPLACE empty'; +ALTER TABLE src DROP PARTITION 1; +ALTER TABLE dst REPLACE PARTITION 1 FROM src; +SELECT count(), sum(d) FROM dst; + + +SELECT 'REPLACE recursive'; +ALTER TABLE dst DROP PARTITION 1; +INSERT INTO dst VALUES (1, '1', 2), (1, '2', 2); + +CREATE TEMPORARY table test_block_numbers (m UInt64); +INSERT INTO test_block_numbers SELECT max(max_block_number) AS m FROM system.parts WHERE database=currentDatabase() AND table='dst' AND active AND name LIKE '1_%'; + +ALTER TABLE dst REPLACE PARTITION 1 FROM dst; +SELECT count(), sum(d) FROM dst; + +INSERT INTO test_block_numbers SELECT max(max_block_number) AS m FROM system.parts WHERE database=currentDatabase() AND table='dst' AND active AND name LIKE '1_%'; +SELECT (max(m) - min(m) > 1) AS new_block_is_generated FROM test_block_numbers; +DROP TEMPORARY TABLE test_block_numbers; + + +SELECT 'ATTACH FROM'; +ALTER TABLE dst DROP PARTITION 1; +DROP TABLE src; + +CREATE TABLE src (p UInt64, k String, d UInt64) ENGINE = MergeTree PARTITION BY p ORDER BY k; +INSERT INTO src VALUES (1, '0', 1); +INSERT INTO src VALUES (1, '1', 1); +INSERT INTO src VALUES (2, '2', 1); +INSERT INTO src VALUES (3, '3', 1); + +SYSTEM STOP MERGES dst; +INSERT INTO dst VALUES (1, '1', 2), (1, '2', 0); +ALTER TABLE dst ATTACH PARTITION 1 FROM src; +SELECT count(), sum(d) FROM dst; + +ALTER TABLE dst ATTACH PARTITION ALL FROM src; +SELECT count(), sum(d) FROM dst; + +SELECT 'OPTIMIZE'; +SELECT count(), sum(d), uniqExact(_part) FROM dst; +SYSTEM START MERGES dst; +SET optimize_throw_if_noop=1; +OPTIMIZE TABLE dst; +SELECT count(), sum(d), uniqExact(_part) FROM dst; + + +SELECT 'After restart'; +DETACH TABLE dst; +ATTACH TABLE dst; +SELECT count(), sum(d) FROM dst; + +SELECT 'DETACH+ATTACH PARTITION'; +ALTER TABLE dst DETACH PARTITION 0; +ALTER TABLE dst DETACH PARTITION 1; +ALTER TABLE dst DETACH PARTITION 2; +ALTER TABLE dst ATTACH PARTITION 1; +SELECT count(), sum(d) FROM dst; + +DROP TABLE IF EXISTS src; +DROP TABLE IF EXISTS dst; diff --git a/parser/testdata/00627_recursive_alias/ast.json b/parser/testdata/00627_recursive_alias/ast.json new file mode 100644 index 000000000..48e66d887 --- /dev/null +++ b/parser/testdata/00627_recursive_alias/ast.json @@ -0,0 +1,94 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier dummy (alias x)" + }, + { + "explain": " Function plus (alias dummy) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier dummy" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Function identity (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Identifier Null" + } + ], + + "rows": 24, + + "statistics": + { + "elapsed": 0.001381207, + "rows_read": 24, + "bytes_read": 992 + } +} diff --git a/parser/testdata/00627_recursive_alias/metadata.json b/parser/testdata/00627_recursive_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00627_recursive_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00627_recursive_alias/query.sql b/parser/testdata/00627_recursive_alias/query.sql new file mode 100644 index 000000000..75c3911ec --- /dev/null +++ b/parser/testdata/00627_recursive_alias/query.sql @@ -0,0 +1 @@ +select x from (select dummy as x, dummy + 1 as dummy order by identity(x)) format Null; diff --git a/parser/testdata/00628_in_lambda_on_merge_table_bug/ast.json b/parser/testdata/00628_in_lambda_on_merge_table_bug/ast.json new file mode 100644 index 000000000..0546f0f5b --- /dev/null +++ b/parser/testdata/00628_in_lambda_on_merge_table_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_in_tuple_1 (children 1)" + }, + { + "explain": " Identifier test_in_tuple_1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001607269, + "rows_read": 2, + "bytes_read": 82 + } +} diff --git a/parser/testdata/00628_in_lambda_on_merge_table_bug/metadata.json b/parser/testdata/00628_in_lambda_on_merge_table_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00628_in_lambda_on_merge_table_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00628_in_lambda_on_merge_table_bug/query.sql b/parser/testdata/00628_in_lambda_on_merge_table_bug/query.sql new file mode 100644 index 000000000..ddf98149c --- /dev/null +++ b/parser/testdata/00628_in_lambda_on_merge_table_bug/query.sql @@ -0,0 +1,19 @@ +drop table if exists test_in_tuple_1; +drop table if exists test_in_tuple_2; +drop table if exists test_in_tuple; + +create table test_in_tuple_1 (key Int32, key_2 Int32, x Array(Int32), y Array(Int32)) engine = MergeTree order by (key, key_2); +create table test_in_tuple_2 (key Int32, key_2 Int32, x Array(Int32), y Array(Int32)) engine = MergeTree order by (key, key_2); +create table test_in_tuple as test_in_tuple_1 engine = Merge(currentDatabase(), '^test_in_tuple_[0-9]+$'); + +insert into test_in_tuple_1 values (1, 1, [1, 2], [1, 2]); +insert into test_in_tuple_2 values (2, 1, [1, 2], [1, 2]); +select key, arr_x, arr_y, _table from test_in_tuple left array join x as arr_x, y as arr_y order by _table, arr_x, arr_y; +select '-'; +select key, arr_x, arr_y, _table from test_in_tuple left array join x as arr_x, y as arr_y where (key_2, arr_x, arr_y) in (1, 1, 1) order by _table, arr_x, arr_y; +select '-'; +select key, arr_x, arr_y, _table from test_in_tuple left array join arrayFilter((t, x_0, x_1) -> (key_2, x_0, x_1) in (1, 1, 1), x, x ,y) as arr_x, arrayFilter((t, x_0, x_1) -> (key_2, x_0, x_1) in (1, 1, 1), y, x ,y) as arr_y where (key_2, arr_x, arr_y) in (1, 1, 1) order by _table, arr_x, arr_y; + +drop table if exists test_in_tuple_1; +drop table if exists test_in_tuple_2; +drop table if exists test_in_tuple; diff --git a/parser/testdata/00632_aggregation_window_funnel/ast.json b/parser/testdata/00632_aggregation_window_funnel/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00632_aggregation_window_funnel/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00632_aggregation_window_funnel/metadata.json b/parser/testdata/00632_aggregation_window_funnel/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00632_aggregation_window_funnel/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00632_aggregation_window_funnel/query.sql b/parser/testdata/00632_aggregation_window_funnel/query.sql new file mode 100644 index 000000000..4c0d21409 --- /dev/null +++ b/parser/testdata/00632_aggregation_window_funnel/query.sql @@ -0,0 +1,130 @@ +-- { echoOn } +drop table if exists funnel_test; + +create table funnel_test (timestamp UInt32, event UInt32) engine=Memory; +insert into funnel_test values (0,1000),(1,1001),(2,1002),(3,1003),(4,1004),(5,1005),(6,1006),(7,1007),(8,1008); + +select 1 = windowFunnel(10000)(timestamp, event = 1000) from funnel_test; +select 2 = windowFunnel(10000)(timestamp, event = 1000, event = 1001) from funnel_test; +select 3 = windowFunnel(10000)(timestamp, event = 1000, event = 1001, event = 1002) from funnel_test; +select 4 = windowFunnel(10000)(timestamp, event = 1000, event = 1001, event = 1002, event = 1008) from funnel_test; + + +select 1 = windowFunnel(1)(timestamp, event = 1000) from funnel_test; +select 3 = windowFunnel(2)(timestamp, event = 1003, event = 1004, event = 1005, event = 1006, event = 1007) from funnel_test; +select 4 = windowFunnel(3)(timestamp, event = 1003, event = 1004, event = 1005, event = 1006, event = 1007) from funnel_test; +select 5 = windowFunnel(4)(timestamp, event = 1003, event = 1004, event = 1005, event = 1006, event = 1007) from funnel_test; + + +drop table if exists funnel_test2; +create table funnel_test2 (uid UInt32 default 1,timestamp DateTime, event UInt32) engine=Memory; +insert into funnel_test2(timestamp, event) values ('2018-01-01 01:01:01',1001),('2018-01-01 01:01:02',1002),('2018-01-01 01:01:03',1003),('2018-01-01 01:01:04',1004),('2018-01-01 01:01:05',1005),('2018-01-01 01:01:06',1006),('2018-01-01 01:01:07',1007),('2018-01-01 01:01:08',1008); + + +select 5 = windowFunnel(4)(timestamp, event = 1003, event = 1004, event = 1005, event = 1006, event = 1007) from funnel_test2; +select 2 = windowFunnel(10000)(timestamp, event = 1001, event = 1008) from funnel_test2; +select 1 = windowFunnel(10000)(timestamp, event = 1008, event = 1001) from funnel_test2; +select 5 = windowFunnel(4)(timestamp, event = 1003, event = 1004, event = 1005, event = 1006, event = 1007) from funnel_test2; +select 4 = windowFunnel(4)(timestamp, event <= 1007, event >= 1002, event <= 1006, event >= 1004) from funnel_test2; + + +drop table if exists funnel_test_u64; +create table funnel_test_u64 (uid UInt32 default 1,timestamp UInt64, event UInt32) engine=Memory; +insert into funnel_test_u64(timestamp, event) values ( 1e14 + 1 ,1001),(1e14 + 2,1002),(1e14 + 3,1003),(1e14 + 4,1004),(1e14 + 5,1005),(1e14 + 6,1006),(1e14 + 7,1007),(1e14 + 8,1008); + + +select 5 = windowFunnel(4)(timestamp, event = 1003, event = 1004, event = 1005, event = 1006, event = 1007) from funnel_test_u64; +select 2 = windowFunnel(10000)(timestamp, event = 1001, event = 1008) from funnel_test_u64; +select 1 = windowFunnel(10000)(timestamp, event = 1008, event = 1001) from funnel_test_u64; +select 5 = windowFunnel(4)(timestamp, event = 1003, event = 1004, event = 1005, event = 1006, event = 1007) from funnel_test_u64; +select 4 = windowFunnel(4)(timestamp, event <= 1007, event >= 1002, event <= 1006, event >= 1004) from funnel_test_u64; + + +drop table if exists funnel_test_strict; +create table funnel_test_strict (timestamp UInt32, event UInt32) engine=Memory; +insert into funnel_test_strict values (00,1000),(10,1001),(20,1002),(30,1003),(40,1004),(50,1005),(51,1005),(60,1006),(70,1007),(80,1008); + +select 6 = windowFunnel(10000, 'strict_deduplication')(timestamp, event = 1000, event = 1001, event = 1002, event = 1003, event = 1004, event = 1005, event = 1006) from funnel_test_strict; +select 7 = windowFunnel(10000)(timestamp, event = 1000, event = 1001, event = 1002, event = 1003, event = 1004, event = 1005, event = 1006) from funnel_test_strict; + + +drop table funnel_test; +drop table funnel_test2; +drop table funnel_test_u64; +drop table funnel_test_strict; + +drop table if exists funnel_test_strict_order; +create table funnel_test_strict_order (dt DateTime, user int, event String) engine = MergeTree() partition by dt order by user; +insert into funnel_test_strict_order values (1, 1, 'a') (2, 1, 'b') (3, 1, 'c'); +insert into funnel_test_strict_order values (1, 2, 'a') (2, 2, 'd') (3, 2, 'b') (4, 2, 'c'); +insert into funnel_test_strict_order values (1, 3, 'a') (2, 3, 'a') (3, 3, 'b') (4, 3, 'b') (5, 3, 'c') (6, 3, 'c'); +insert into funnel_test_strict_order values (1, 4, 'a') (2, 4, 'a') (3, 4, 'a') (4, 4, 'a') (5, 4, 'b') (6, 4, 'b') (7, 4, 'c') (8, 4, 'c'); +insert into funnel_test_strict_order values (1, 5, 'a') (2, 5, 'a') (3, 5, 'b') (4, 5, 'b') (5, 5, 'd') (6, 5, 'c') (7, 5, 'c'); +insert into funnel_test_strict_order values (1, 6, 'c') (2, 6, 'c') (3, 6, 'b') (4, 6, 'b') (5, 6, 'a') (6, 6, 'a'); +select user, windowFunnel(86400)(dt, event='a', event='b', event='c') as s from funnel_test_strict_order group by user order by user format JSONCompactEachRow; +select user, windowFunnel(86400, 'strict_order')(dt, event='a', event='b', event='c') as s from funnel_test_strict_order group by user order by user format JSONCompactEachRow; +select user, windowFunnel(86400, 'strict_deduplication', 'strict_order')(dt, event='a', event='b', event='c') as s from funnel_test_strict_order group by user order by user format JSONCompactEachRow; +insert into funnel_test_strict_order values (1, 7, 'a') (2, 7, 'c') (3, 7, 'b'); +select user, windowFunnel(10, 'strict_order')(dt, event = 'a', event = 'b', event = 'c') as s from funnel_test_strict_order where user = 7 group by user format JSONCompactEachRow; +drop table funnel_test_strict_order; + +--https://github.com/ClickHouse/ClickHouse/issues/27469 +drop table if exists strict_BiteTheDDDD; +create table strict_BiteTheDDDD (ts UInt64, event String) engine = Log(); +insert into strict_BiteTheDDDD values (1,'a') (2,'b') (3,'c') (4,'b') (5,'d'); +select 3 = windowFunnel(86400, 'strict_deduplication')(ts, event='a', event='b', event='c', event='d') from strict_BiteTheDDDD format JSONCompactEachRow; +drop table strict_BiteTheDDDD; + +drop table if exists funnel_test_non_null; +create table funnel_test_non_null (`dt` DateTime, `u` int, `a` Nullable(String), `b` Nullable(String)) engine = MergeTree() partition by dt order by u; +insert into funnel_test_non_null values (1, 1, 'a1', 'b1') (2, 1, 'a2', 'b2'); +insert into funnel_test_non_null values (1, 2, 'a1', null) (2, 2, 'a2', null); +insert into funnel_test_non_null values (1, 3, null, null); +insert into funnel_test_non_null values (1, 4, null, 'b1') (2, 4, 'a2', null) (3, 4, null, 'b3'); +select u, windowFunnel(86400)(dt, COALESCE(a, '') = 'a1', COALESCE(a, '') = 'a2') as s from funnel_test_non_null group by u order by u format JSONCompactEachRow; +select u, windowFunnel(86400)(dt, COALESCE(a, '') = 'a1', COALESCE(b, '') = 'b2') as s from funnel_test_non_null group by u order by u format JSONCompactEachRow; +select u, windowFunnel(86400)(dt, a is null and b is null) as s from funnel_test_non_null group by u order by u format JSONCompactEachRow; +select u, windowFunnel(86400)(dt, a is null, COALESCE(b, '') = 'b3') as s from funnel_test_non_null group by u order by u format JSONCompactEachRow; +select u, windowFunnel(86400, 'strict_order')(dt, a is null, COALESCE(b, '') = 'b3') as s from funnel_test_non_null group by u order by u format JSONCompactEachRow; +drop table funnel_test_non_null; + +create table funnel_test_strict_increase (timestamp UInt32, event UInt32) engine=Memory; +insert into funnel_test_strict_increase values (0,1000),(1,1001),(1,1002),(1,1003),(2,1004); + +select 5 = windowFunnel(10000)(timestamp, event = 1000, event = 1001, event = 1002, event = 1003, event = 1004) from funnel_test_strict_increase; +select 2 = windowFunnel(10000, 'strict_increase')(timestamp, event = 1000, event = 1001, event = 1002, event = 1003, event = 1004) from funnel_test_strict_increase; +select 3 = windowFunnel(10000)(timestamp, event = 1004, event = 1004, event = 1004) from funnel_test_strict_increase; +select 1 = windowFunnel(10000, 'strict_once')(timestamp, event = 1004, event = 1004, event = 1004) from funnel_test_strict_increase; +select 1 = windowFunnel(10000, 'strict_increase')(timestamp, event = 1004, event = 1004, event = 1004) from funnel_test_strict_increase; + + + +DROP TABLE IF EXISTS funnel_test2; +create table funnel_test2 (event_ts UInt32, result String, uid UInt32) engine=Memory; +insert into funnel_test2 SELECT data.1, data.2, data.3 FROM ( + SELECT arrayJoin([ + (100, 'failure', 234), + (200, 'success', 345), + (210, 'failure', 345), + (230, 'success', 345), + (250, 'failure', 234), + (180, 'failure', 123), + (220, 'failure', 123), + (250, 'success', 123) + ]) data); + +SELECT '-'; +SELECT uid, windowFunnel(200, 'strict_once', 'strict_increase')( toUInt32(event_ts), result='failure', result='failure', result='success' ) +FROM funnel_test2 WHERE event_ts >= 0 AND event_ts <= 300 GROUP BY uid ORDER BY uid; +SELECT '-'; +SELECT uid, windowFunnel(200, 'strict_once')( toUInt32(event_ts), result='failure', result='failure', result='success' ) +FROM funnel_test2 WHERE event_ts >= 0 AND event_ts <= 300 GROUP BY uid ORDER BY uid; +SELECT '-'; +SELECT uid, windowFunnel(200, 'strict_once', 'strict_deduplication')( toUInt32(event_ts), result='failure', result='failure', result='success' ) +FROM funnel_test2 WHERE event_ts >= 0 AND event_ts <= 300 GROUP BY uid ORDER BY uid; +SELECT '-'; + +DROP TABLE IF EXISTS funnel_test2; + + +drop table funnel_test_strict_increase; diff --git a/parser/testdata/00632_get_sample_block_cache/ast.json b/parser/testdata/00632_get_sample_block_cache/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00632_get_sample_block_cache/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00632_get_sample_block_cache/metadata.json b/parser/testdata/00632_get_sample_block_cache/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00632_get_sample_block_cache/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00632_get_sample_block_cache/query.sql b/parser/testdata/00632_get_sample_block_cache/query.sql new file mode 100644 index 000000000..b6d591da0 --- /dev/null +++ b/parser/testdata/00632_get_sample_block_cache/query.sql @@ -0,0 +1,181 @@ +-- Tags: long, no-object-storage, no-asan, no-msan + +SET joined_subquery_requires_alias = 0; + +-- We are no longer interested in the old analyzer. +SET enable_analyzer = 1; + +-- This test (SELECT) without cache can take tens minutes +DROP TABLE IF EXISTS dict_string; +DROP TABLE IF EXISTS dict_ui64; +DROP TABLE IF EXISTS video_views; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE video_views +( + entityIri String, + courseId UInt64, + learnerId UInt64, + actorId UInt64, + duration UInt16, + fullWatched UInt8, + fullWatchedDate DateTime, + fullWatchedDuration UInt16, + fullWatchedTime UInt16, + fullWatchedViews UInt16, + `views.viewId` Array(String), + `views.startedAt` Array(DateTime), + `views.endedAt` Array(DateTime), + `views.viewDuration` Array(UInt16), + `views.watchedPart` Array(Float32), + `views.fullWatched` Array(UInt8), + `views.progress` Array(Float32), + `views.reject` Array(UInt8), + `views.viewNumber` Array(UInt16), + `views.repeatingView` Array(UInt8), + `views.ranges` Array(String), + version DateTime +) +ENGINE = ReplacingMergeTree(version) +PARTITION BY entityIri +ORDER BY (learnerId, entityIri) +SETTINGS index_granularity = 8192; + +CREATE TABLE dict_string (entityIri String) ENGINE = Memory; +CREATE TABLE dict_ui64 (learnerId UInt64) ENGINE = Memory; + +SELECT `entityIri`, `watchers-count`, `time-repeating-average`, `reject-views-duration-average`, `repeating-views-count-average`, `views-duration-average`, `watched-part-average`, `rejects-count`, `progress-average`, `views-count-before-full-watched-average`, `duration`, `full-watched-learners-count`, `overall-watchers-count`, `overall-full-watched-learners-count`, `views-count`, `time-before-full-watched-average`, if (isNaN((`overall-full-watched-learners-count`/`overall-watchers-count`) * 100), 0, (`overall-full-watched-learners-count`/`overall-watchers-count`) * 100) as `overall-watched-part`, if (isNaN((`full-watched-learners-count`/`watchers-count` * 100)), 0, (`full-watched-learners-count`/`watchers-count` * 100)) as `full-watched-part`, if (isNaN((`rejects-count`/`views-count` * 100)), 0, (`rejects-count`/`views-count` * 100)) as `rejects-part` FROM (SELECT `entityIri`, `watchers-count`, `time-repeating-average`, `reject-views-duration-average`, `repeating-views-count-average`, `views-duration-average`, `watched-part-average`, `rejects-count`, `progress-average`, `views-count-before-full-watched-average`, `duration`, `full-watched-learners-count`, `overall-watchers-count`, `overall-full-watched-learners-count`, `views-count`, `time-before-full-watched-average` FROM (SELECT `entityIri`, `watchers-count`, `time-repeating-average`, `reject-views-duration-average`, `repeating-views-count-average`, `views-duration-average`, `watched-part-average`, `rejects-count`, `progress-average`, `views-count-before-full-watched-average`, `duration`, `full-watched-learners-count`, `overall-watchers-count`, `overall-full-watched-learners-count`, `views-count` FROM (SELECT `entityIri`, `watchers-count`, `time-repeating-average`, `reject-views-duration-average`, `repeating-views-count-average`, `views-duration-average`, `watched-part-average`, `rejects-count`, `progress-average`, `views-count-before-full-watched-average`, `duration`, `full-watched-learners-count`, `overall-watchers-count`, `overall-full-watched-learners-count` FROM (SELECT `entityIri`, `watchers-count`, `time-repeating-average`, `reject-views-duration-average`, `repeating-views-count-average`, `views-duration-average`, `watched-part-average`, `rejects-count`, `progress-average`, `views-count-before-full-watched-average`, `duration`, `full-watched-learners-count`, `overall-watchers-count` FROM (SELECT `entityIri`, `watchers-count`, `time-repeating-average`, `reject-views-duration-average`, `repeating-views-count-average`, `views-duration-average`, `watched-part-average`, `rejects-count`, `progress-average`, `views-count-before-full-watched-average`, `duration`, `full-watched-learners-count` FROM (SELECT `entityIri`, `watchers-count`, `time-repeating-average`, `reject-views-duration-average`, `repeating-views-count-average`, `views-duration-average`, `watched-part-average`, `rejects-count`, `progress-average`, `views-count-before-full-watched-average`, `duration` FROM (SELECT `entityIri`, `watchers-count`, `time-repeating-average`, `reject-views-duration-average`, `repeating-views-count-average`, `views-duration-average`, `watched-part-average`, `rejects-count`, `progress-average`, `views-count-before-full-watched-average` FROM (SELECT `entityIri`, `watchers-count`, `time-repeating-average`, `reject-views-duration-average`, `repeating-views-count-average`, `views-duration-average`, `watched-part-average`, `rejects-count`, `progress-average` FROM (SELECT `entityIri`, `watchers-count`, `time-repeating-average`, `reject-views-duration-average`, `repeating-views-count-average`, `views-duration-average`, `watched-part-average`, `rejects-count` FROM (SELECT `entityIri`, `watchers-count`, `time-repeating-average`, `reject-views-duration-average`, `repeating-views-count-average`, `views-duration-average`, `watched-part-average` FROM (SELECT `entityIri`, `watchers-count`, `time-repeating-average`, `reject-views-duration-average`, `repeating-views-count-average`, `views-duration-average` FROM (SELECT `entityIri`, `watchers-count`, `time-repeating-average`, + `reject-views-duration-average`, `repeating-views-count-average` FROM (SELECT `entityIri`, `watchers-count`, `time-repeating-average`, `reject-views-duration-average` FROM (SELECT `entityIri`, `watchers-count`, `time-repeating-average` FROM (SELECT `entityIri`, `watchers-count` FROM (SELECT `entityIri` FROM dict_string) ANY LEFT JOIN (SELECT uniq(learnerId) as `watchers-count`, `entityIri` FROM `video_views` FINAL ARRAY JOIN `views` PREWHERE `entityIri` IN dict_string AND `courseId` = 1 WHERE `learnerId` IN dict_ui64 GROUP BY `entityIri`) USING `entityIri`) ANY LEFT JOIN (SELECT avg(viewDurationSum) as `time-repeating-average`, `entityIri` FROM (SELECT sum(views.viewDuration) as viewDurationSum, `entityIri`, `learnerId` FROM `video_views` FINAL ARRAY JOIN `views` PREWHERE `entityIri` IN dict_string AND `courseId` = 1 WHERE `views`.`repeatingView` = 1 AND `learnerId` IN dict_ui64 GROUP BY `learnerId`, `entityIri`) GROUP BY `entityIri`) USING `entityIri`) ANY LEFT JOIN (SELECT avg(views.viewDuration) as `reject-views-duration-average`, `entityIri` FROM `video_views` FINAL ARRAY JOIN `views` PREWHERE `entityIri` IN dict_string AND `courseId` = 1 WHERE `views`.`reject` = 1 AND `learnerId` IN dict_ui64 GROUP BY `entityIri`) USING `entityIri`) ANY LEFT JOIN (SELECT avg(viewsCount) as `repeating-views-count-average`, `entityIri` FROM (SELECT count() as viewsCount, `learnerId`, `entityIri` FROM `video_views` FINAL ARRAY JOIN `views` PREWHERE `courseId` = 1 AND `entityIri` IN dict_string WHERE `views`.`repeatingView` = 1 AND `learnerId` IN dict_ui64 GROUP BY `learnerId`, `entityIri`) GROUP BY `entityIri`) USING `entityIri`) ANY LEFT JOIN (SELECT avg(views.viewDuration) as `views-duration-average`, `entityIri` FROM `video_views` FINAL ARRAY JOIN `views` PREWHERE `entityIri` IN dict_string AND `courseId` = 1 WHERE `learnerId` IN dict_ui64 GROUP BY `entityIri`) USING `entityIri`) ANY LEFT JOIN (SELECT avg(views.watchedPart) as `watched-part-average`, `entityIri` FROM `video_views` FINAL ARRAY JOIN `views` PREWHERE `entityIri` IN dict_string AND `courseId` = 1 WHERE `learnerId` IN dict_ui64 GROUP BY `entityIri`) USING `entityIri`) ANY LEFT JOIN (SELECT count() as `rejects-count`, `entityIri` FROM `video_views` FINAL ARRAY JOIN `views` PREWHERE `entityIri` IN dict_string AND `courseId` = 1 WHERE `views`.`reject` = 1 AND `learnerId` IN dict_ui64 GROUP BY `entityIri`) USING `entityIri`) ANY LEFT JOIN (SELECT avg(progressMax) as `progress-average`, `entityIri` FROM (SELECT max(views.progress) as progressMax, `entityIri`, `learnerId` FROM `video_views` FINAL ARRAY JOIN `views` PREWHERE `entityIri` IN dict_string AND `courseId` = 1 WHERE `learnerId` IN dict_ui64 GROUP BY `learnerId`, `entityIri`) GROUP BY `entityIri`) USING `entityIri`) ANY LEFT JOIN (SELECT avg(fullWatchedViews) as `views-count-before-full-watched-average`, `entityIri` FROM `video_views` FINAL PREWHERE `entityIri` IN dict_string AND `courseId` = 1 WHERE `learnerId` IN dict_ui64 GROUP BY `entityIri`) USING `entityIri`) ANY LEFT JOIN (SELECT any(duration) as `duration`, `entityIri` FROM `video_views` FINAL PREWHERE `entityIri` IN dict_string AND `courseId` = 1 WHERE `learnerId` IN dict_ui64 GROUP BY `entityIri`) USING `entityIri`) ANY LEFT JOIN (SELECT uniq(learnerId) as `full-watched-learners-count`, `entityIri` FROM `video_views` FINAL PREWHERE `entityIri` IN dict_string AND `courseId` = 1 WHERE `fullWatched` = 1 AND `learnerId` IN dict_ui64 GROUP BY `entityIri`) USING `entityIri`) ANY LEFT JOIN (SELECT uniq(learnerId) as `overall-watchers-count`, `entityIri` FROM `video_views` FINAL ARRAY JOIN `views` PREWHERE `entityIri` IN dict_string AND `courseId` = 1 WHERE `learnerId` IN dict_ui64 GROUP BY `entityIri`) USING `entityIri`) ANY LEFT JOIN (SELECT uniq(learnerId) as `overall-full-watched-learners-count`, + `entityIri` FROM `video_views` FINAL PREWHERE `entityIri` IN dict_string AND `courseId` = 1 WHERE `fullWatched` = 1 AND `learnerId` IN dict_ui64 GROUP BY `entityIri`) USING `entityIri`) ANY LEFT JOIN (SELECT count() as `views-count`, `entityIri` FROM `video_views` FINAL ARRAY JOIN `views` PREWHERE `entityIri` IN dict_string AND `courseId` = 1 WHERE `learnerId` IN dict_ui64 GROUP BY `entityIri`) USING `entityIri`) ANY LEFT JOIN (SELECT avg(fullWatchedTime) as `time-before-full-watched-average`, `entityIri` FROM `video_views` FINAL PREWHERE `entityIri` IN dict_string AND `courseId` = 1 WHERE `learnerId` IN dict_ui64 GROUP BY `entityIri`) USING `entityIri`); + +SELECT 'Still alive'; + +DROP TABLE dict_string; +DROP TABLE dict_ui64; +DROP TABLE video_views; + + + +-- Test for tsan: Ensure cache is used from one thread +SET max_threads = 32, max_memory_usage = '10G'; + +DROP TABLE IF EXISTS sample_00632; + +CREATE TABLE sample_00632 (d Date DEFAULT '2000-01-01', x UInt16) ENGINE = MergeTree(d, x, x, 10); +INSERT INTO sample_00632 (x) SELECT toUInt16(number) AS x FROM system.numbers LIMIT 65536; + +SET max_execution_time = 300; + +SELECT count() +FROM +( + SELECT + x, + count() AS c + FROM + ( + SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + UNION ALL SELECT * FROM ( SELECT * FROM sample_00632 WHERE x > 0 ) + ) + GROUP BY x + ORDER BY x ASC +); +DROP TABLE sample_00632; diff --git a/parser/testdata/00633_func_or_in/ast.json b/parser/testdata/00633_func_or_in/ast.json new file mode 100644 index 000000000..86e7c3268 --- /dev/null +++ b/parser/testdata/00633_func_or_in/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery orin_test (children 1)" + }, + { + "explain": " Identifier orin_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001150066, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/00633_func_or_in/metadata.json b/parser/testdata/00633_func_or_in/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00633_func_or_in/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00633_func_or_in/query.sql b/parser/testdata/00633_func_or_in/query.sql new file mode 100644 index 000000000..8aa467b4c --- /dev/null +++ b/parser/testdata/00633_func_or_in/query.sql @@ -0,0 +1,8 @@ +drop table if exists orin_test; + +create table orin_test (c1 Int32) engine=Memory; +insert into orin_test values(1), (100); + +select minus(c1 = 1 or c1=2 or c1 =3, c1=5) from orin_test; + +drop table orin_test; diff --git a/parser/testdata/00634_rename_view/ast.json b/parser/testdata/00634_rename_view/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00634_rename_view/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00634_rename_view/metadata.json b/parser/testdata/00634_rename_view/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00634_rename_view/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00634_rename_view/query.sql b/parser/testdata/00634_rename_view/query.sql new file mode 100644 index 000000000..7a1c096da --- /dev/null +++ b/parser/testdata/00634_rename_view/query.sql @@ -0,0 +1,24 @@ +-- Tags: no-replicated-database +-- Tag no-replicated-database: Does not support renaming of multiple tables in single query + +DROP TABLE IF EXISTS test1_00634; +DROP TABLE IF EXISTS test2_00634; +DROP TABLE IF EXISTS v_test1; +DROP TABLE IF EXISTS v_test2; +DROP TABLE IF EXISTS v_test11; +DROP TABLE IF EXISTS v_test22; + +create table test1_00634 (id UInt8) engine = TinyLog; +create table test2_00634 (id UInt8) engine = TinyLog; + +create view v_test1 as select id from test1_00634; +create view v_test2 as select id from test2_00634; + +rename table v_test1 to v_test11, v_test2 to v_test22; + +SELECT name, engine FROM system.tables WHERE name IN ('v_test1', 'v_test2', 'v_test11', 'v_test22') AND database = currentDatabase() ORDER BY name; + +DROP TABLE test1_00634; +DROP TABLE test2_00634; +DROP TABLE v_test11; +DROP TABLE v_test22; diff --git a/parser/testdata/00635_shard_distinct_order_by/ast.json b/parser/testdata/00635_shard_distinct_order_by/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00635_shard_distinct_order_by/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00635_shard_distinct_order_by/metadata.json b/parser/testdata/00635_shard_distinct_order_by/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00635_shard_distinct_order_by/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00635_shard_distinct_order_by/query.sql b/parser/testdata/00635_shard_distinct_order_by/query.sql new file mode 100644 index 000000000..1782db30d --- /dev/null +++ b/parser/testdata/00635_shard_distinct_order_by/query.sql @@ -0,0 +1,10 @@ +-- Tags: shard + +DROP TABLE IF EXISTS data; +CREATE TABLE data (s String, x Int8, y Int8) ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO data VALUES ('hello', 0, 0), ('world', 0, 0), ('hello', 1, -1), ('world', -1, 1); + +SELECT DISTINCT s FROM remote('127.0.0.{1,2}', currentDatabase(), data) ORDER BY x + y, s; + +DROP TABLE data; diff --git a/parser/testdata/00639_startsWith/ast.json b/parser/testdata/00639_startsWith/ast.json new file mode 100644 index 000000000..6fd689fe0 --- /dev/null +++ b/parser/testdata/00639_startsWith/ast.json @@ -0,0 +1,79 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function startsWith (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier s" + }, + { + "explain": " Literal 'He'" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayJoin (alias s) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_['', 'H', 'He', 'Hellow', '3434', 'fffffffffdHe']" + } + ], + + "rows": 19, + + "statistics": + { + "elapsed": 0.001743014, + "rows_read": 19, + "bytes_read": 823 + } +} diff --git a/parser/testdata/00639_startsWith/metadata.json b/parser/testdata/00639_startsWith/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00639_startsWith/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00639_startsWith/query.sql b/parser/testdata/00639_startsWith/query.sql new file mode 100644 index 000000000..ae7649b8b --- /dev/null +++ b/parser/testdata/00639_startsWith/query.sql @@ -0,0 +1,15 @@ +SELECT startsWith(s, 'He') FROM (SELECT arrayJoin(['', 'H', 'He', 'Hellow', '3434', 'fffffffffdHe']) AS s); +SELECT startsWith(s, '') FROM (SELECT arrayJoin(['', 'h', 'hi']) AS s); +SELECT startsWith('123', '123'); +SELECT startsWith('123', '12'); +SELECT startsWith('123', '1234'); +SELECT startsWith('123', ''); + +DROP TABLE IF EXISTS startsWith_test; +CREATE TABLE startsWith_test(S1 String, S2 String, S3 FixedString(2)) ENGINE=Memory; +INSERT INTO startsWith_test values ('11', '22', '33'), ('a', 'a', 'bb'), ('abc', 'ab', '23'); + +SELECT COUNT() FROM startsWith_test WHERE startsWith(S1, S1); +SELECT COUNT() FROM startsWith_test WHERE startsWith(S1, S2); +SELECT COUNT() FROM startsWith_test WHERE startsWith(S2, S3); +DROP TABLE startsWith_test; diff --git a/parser/testdata/00640_endsWith/ast.json b/parser/testdata/00640_endsWith/ast.json new file mode 100644 index 000000000..cc62cbe22 --- /dev/null +++ b/parser/testdata/00640_endsWith/ast.json @@ -0,0 +1,79 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function endsWith (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier s" + }, + { + "explain": " Literal 'ow'" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayJoin (alias s) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_['', 'o', 'ow', 'Hellow', '3434', 'owfffffffdHe']" + } + ], + + "rows": 19, + + "statistics": + { + "elapsed": 0.001416866, + "rows_read": 19, + "bytes_read": 821 + } +} diff --git a/parser/testdata/00640_endsWith/metadata.json b/parser/testdata/00640_endsWith/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00640_endsWith/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00640_endsWith/query.sql b/parser/testdata/00640_endsWith/query.sql new file mode 100644 index 000000000..719b6a301 --- /dev/null +++ b/parser/testdata/00640_endsWith/query.sql @@ -0,0 +1,17 @@ +SELECT endsWith(s, 'ow') FROM (SELECT arrayJoin(['', 'o', 'ow', 'Hellow', '3434', 'owfffffffdHe']) AS s); +SELECT endsWith(s, '') FROM (SELECT arrayJoin(['', 'h', 'hi']) AS s); +SELECT endsWith('123', '3'); +SELECT endsWith('123', '23'); +SELECT endsWith('123', '32'); +SELECT endsWith('123', ''); + +DROP TABLE IF EXISTS endsWith_test; +CREATE TABLE endsWith_test(S1 String, S2 String, S3 FixedString(2)) ENGINE=Memory; +INSERT INTO endsWith_test values ('11', '22', '33'), ('a', 'a', 'bb'), ('abc', 'bc', '23'); + +SELECT COUNT() FROM endsWith_test WHERE endsWith(S1, S1); +SELECT COUNT() FROM endsWith_test WHERE endsWith(S1, S2); +SELECT COUNT() FROM endsWith_test WHERE endsWith(S2, S3); + +SELECT endsWith([], 'str'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +DROP TABLE endsWith_test; diff --git a/parser/testdata/00642_cast/ast.json b/parser/testdata/00642_cast/ast.json new file mode 100644 index 000000000..626aba9be --- /dev/null +++ b/parser/testdata/00642_cast/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal 'Enum8(\\'hello\\' = 1, \\'world\\' = 2)'" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001710037, + "rows_read": 8, + "bytes_read": 316 + } +} diff --git a/parser/testdata/00642_cast/metadata.json b/parser/testdata/00642_cast/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00642_cast/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00642_cast/query.sql b/parser/testdata/00642_cast/query.sql new file mode 100644 index 000000000..b4fa21ce3 --- /dev/null +++ b/parser/testdata/00642_cast/query.sql @@ -0,0 +1,48 @@ +SELECT CAST(1 AS Enum8('hello' = 1, 'world' = 2)); +SELECT cast(1 AS Enum8('hello' = 1, 'world' = 2)); + +SELECT CAST(1, 'Enum8(\'hello\' = 1, \'world\' = 2)'); +SELECT cast(1, 'Enum8(\'hello\' = 1, \'world\' = 2)'); + +SELECT CAST(1 AS Enum8( + 'hello' = 1, + 'world' = 2)); + +SELECT cast(1 AS Enum8( + 'hello' = 1, + 'world' = 2)); + +SELECT CAST(1, 'Enum8(\'hello\' = 1,\n\t\'world\' = 2)'); +SELECT cast(1, 'Enum8(\'hello\' = 1,\n\t\'world\' = 2)'); + +SELECT toTimeZone(CAST(1 AS TIMESTAMP), 'UTC'); + +DROP TABLE IF EXISTS cast; +CREATE TABLE cast +( + x UInt8, + e Enum8 + ( + 'hello' = 1, + 'world' = 2 + ) + DEFAULT + CAST + ( + x + AS + Enum8 + ( + 'hello' = 1, + 'world' = 2 + ) + ) +) ENGINE = MergeTree ORDER BY e; + +SHOW CREATE TABLE cast FORMAT TSVRaw; +DESC TABLE cast; + +INSERT INTO cast (x) VALUES (1); +SELECT * FROM cast; + +DROP TABLE cast; diff --git a/parser/testdata/00643_cast_zookeeper_long/ast.json b/parser/testdata/00643_cast_zookeeper_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00643_cast_zookeeper_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00643_cast_zookeeper_long/metadata.json b/parser/testdata/00643_cast_zookeeper_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00643_cast_zookeeper_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00643_cast_zookeeper_long/query.sql b/parser/testdata/00643_cast_zookeeper_long/query.sql new file mode 100644 index 000000000..85f87d11a --- /dev/null +++ b/parser/testdata/00643_cast_zookeeper_long/query.sql @@ -0,0 +1,42 @@ +-- Tags: long, zookeeper + +SET database_atomic_wait_for_drop_and_detach_synchronously=1; + +DROP TABLE IF EXISTS cast1; +DROP TABLE IF EXISTS cast2; + +CREATE TABLE cast1 +( + x UInt8, + e Enum8 + ( + 'hello' = 1, + 'world' = 2 + ) + DEFAULT + CAST + ( + x + AS + Enum8 + ( + 'hello' = 1, + 'world' = 2 + ) + ) +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_00643/cast', 'r1') ORDER BY e; + +SHOW CREATE TABLE cast1 FORMAT TSVRaw; +DESC TABLE cast1; + +INSERT INTO cast1 (x) VALUES (1); +SELECT * FROM cast1; + +CREATE TABLE cast2 AS cast1 ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_00643/cast', 'r2') ORDER BY e; + +SYSTEM SYNC REPLICA cast2; + +SELECT * FROM cast2; + +DROP TABLE cast1; +DROP TABLE cast2; diff --git a/parser/testdata/00644_different_expressions_with_same_alias/ast.json b/parser/testdata/00644_different_expressions_with_same_alias/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00644_different_expressions_with_same_alias/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00644_different_expressions_with_same_alias/metadata.json b/parser/testdata/00644_different_expressions_with_same_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00644_different_expressions_with_same_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00644_different_expressions_with_same_alias/query.sql b/parser/testdata/00644_different_expressions_with_same_alias/query.sql new file mode 100644 index 000000000..85b318e9d --- /dev/null +++ b/parser/testdata/00644_different_expressions_with_same_alias/query.sql @@ -0,0 +1,17 @@ +SELECT + dummy, + SumDum, + ProblemField +FROM +( + SELECT + dummy, + sum(dummy) AS SumDum, + 1 / SumDum AS ProblemField + FROM system.one + GROUP BY dummy + ORDER BY + dummy ASC, + SumDum ASC, + CAST(ifNull(ProblemField, -inf) AS Float64) ASC +); diff --git a/parser/testdata/00645_date_time_input_format/ast.json b/parser/testdata/00645_date_time_input_format/ast.json new file mode 100644 index 000000000..64b512b35 --- /dev/null +++ b/parser/testdata/00645_date_time_input_format/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery test_00645 (children 3)" + }, + { + "explain": " Identifier test_00645" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration d (children 1)" + }, + { + "explain": " DataType DateTime" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function Memory" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001553989, + "rows_read": 8, + "bytes_read": 287 + } +} diff --git a/parser/testdata/00645_date_time_input_format/metadata.json b/parser/testdata/00645_date_time_input_format/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00645_date_time_input_format/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00645_date_time_input_format/query.sql b/parser/testdata/00645_date_time_input_format/query.sql new file mode 100644 index 000000000..3d7930570 --- /dev/null +++ b/parser/testdata/00645_date_time_input_format/query.sql @@ -0,0 +1,4 @@ +CREATE TEMPORARY TABLE test_00645 (d DateTime) ENGINE = Memory; +SET date_time_input_format = 'best_effort'; +INSERT INTO test_00645 VALUES ('2018-06-08T01:02:03.000Z'); +SELECT toTimeZone(d, 'UTC') FROM test_00645; diff --git a/parser/testdata/00646_weird_mmx/ast.json b/parser/testdata/00646_weird_mmx/ast.json new file mode 100644 index 000000000..4b7cc2422 --- /dev/null +++ b/parser/testdata/00646_weird_mmx/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery weird_mmx (children 1)" + }, + { + "explain": " Identifier weird_mmx" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001438474, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/00646_weird_mmx/metadata.json b/parser/testdata/00646_weird_mmx/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00646_weird_mmx/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00646_weird_mmx/query.sql b/parser/testdata/00646_weird_mmx/query.sql new file mode 100644 index 000000000..27016d3d6 --- /dev/null +++ b/parser/testdata/00646_weird_mmx/query.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS weird_mmx; + +CREATE TABLE weird_mmx (x Array(UInt64)) ENGINE = TinyLog; +-- this triggers overlapping matches in LZ4 decompression routine; 915 is the minimum number +-- see comment in LZ4_decompression_faster.cpp about usage of MMX registers +INSERT INTO weird_mmx SELECT range(number % 10) FROM system.numbers LIMIT 915; +SELECT sum(length(*)) FROM weird_mmx; + +DROP TABLE weird_mmx; diff --git a/parser/testdata/00647_histogram/ast.json b/parser/testdata/00647_histogram/ast.json new file mode 100644 index 000000000..4019ead15 --- /dev/null +++ b/parser/testdata/00647_histogram/ast.json @@ -0,0 +1,100 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function histogram (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function minus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_5" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_20" + } + ], + + "rows": 26, + + "statistics": + { + "elapsed": 0.001718506, + "rows_read": 26, + "bytes_read": 1067 + } +} diff --git a/parser/testdata/00647_histogram/metadata.json b/parser/testdata/00647_histogram/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00647_histogram/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00647_histogram/query.sql b/parser/testdata/00647_histogram/query.sql new file mode 100644 index 000000000..08350814d --- /dev/null +++ b/parser/testdata/00647_histogram/query.sql @@ -0,0 +1,9 @@ +select histogram(5)(number-10) from (select * from system.numbers limit 20); +select histogram(5)(number) from (select * from system.numbers limit 20); + +WITH arrayJoin(histogram(3)(sin(number))) AS res select round(res.1, 2), round(res.2, 2), round(res.3, 2) from (select * from system.numbers limit 10); +WITH arrayJoin(histogram(1)(sin(number-40))) AS res SELECT round(res.1, 2), round(res.2, 2), round(res.3, 2) from (select * from system.numbers limit 80); + +SELECT histogram(10)(-2); + +select histogramIf(3)(number, number > 11) from (select * from system.numbers limit 10); diff --git a/parser/testdata/00647_histogram_negative/ast.json b/parser/testdata/00647_histogram_negative/ast.json new file mode 100644 index 000000000..e44525537 --- /dev/null +++ b/parser/testdata/00647_histogram_negative/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery histogram (children 1)" + }, + { + "explain": " Identifier histogram" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001310797, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/00647_histogram_negative/metadata.json b/parser/testdata/00647_histogram_negative/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00647_histogram_negative/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00647_histogram_negative/query.sql b/parser/testdata/00647_histogram_negative/query.sql new file mode 100644 index 000000000..2112f93bc --- /dev/null +++ b/parser/testdata/00647_histogram_negative/query.sql @@ -0,0 +1,6 @@ +drop table if exists histogram; +create table histogram(num Int64) engine=TinyLog; +insert into histogram values(-1); +insert into histogram values(-1); +select histogram(2)(num) from histogram; +drop table if exists histogram; diff --git a/parser/testdata/00647_multiply_aggregation_state/ast.json b/parser/testdata/00647_multiply_aggregation_state/ast.json new file mode 100644 index 000000000..96a9c7aec --- /dev/null +++ b/parser/testdata/00647_multiply_aggregation_state/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001485922, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00647_multiply_aggregation_state/metadata.json b/parser/testdata/00647_multiply_aggregation_state/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00647_multiply_aggregation_state/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00647_multiply_aggregation_state/query.sql b/parser/testdata/00647_multiply_aggregation_state/query.sql new file mode 100644 index 000000000..0cfdd27f9 --- /dev/null +++ b/parser/testdata/00647_multiply_aggregation_state/query.sql @@ -0,0 +1,26 @@ +SET send_logs_level = 'fatal'; + +SELECT countMerge(x) AS y FROM ( SELECT countState() * 2 AS x FROM ( SELECT 1 )); +SELECT countMerge(x) AS y FROM ( SELECT countState() * 0 AS x FROM ( SELECT 1 UNION ALL SELECT 2)); +SELECT sumMerge(y) AS z FROM ( SELECT sumState(x) * 11 AS y FROM ( SELECT 1 AS x UNION ALL SELECT 2 AS x)); +SELECT countMerge(x) AS y FROM ( SELECT 2 * countState() AS x FROM ( SELECT 1 )); +SELECT countMerge(x) AS y FROM ( SELECT 0 * countState() AS x FROM ( SELECT 1 UNION ALL SELECT 2)); +SELECT sumMerge(y) AS z FROM ( SELECT 3 * sumState(x) * 2 AS y FROM ( SELECT 1 AS x UNION ALL SELECT 2 AS x)); + +DROP TABLE IF EXISTS mult_aggregation; +CREATE TABLE mult_aggregation(a UInt32, b UInt32) ENGINE = Memory; +INSERT INTO mult_aggregation VALUES(1, 1); +INSERT INTO mult_aggregation VALUES(1, 3); + +SELECT sumMerge(x * 5), sumMerge(x) FROM (SELECT sumState(b) AS x FROM mult_aggregation); +SELECT uniqMerge(x * 10) FROM (SELECT uniqState(b) AS x FROM mult_aggregation); +SELECT maxMerge(x * 10) FROM (SELECT maxState(b) AS x FROM mult_aggregation); +SELECT avgMerge(x * 10) FROM (SELECT avgState(b) AS x FROM mult_aggregation); + +SELECT groupArrayMerge(y * 5) FROM (SELECT groupArrayState(x) AS y FROM (SELECT 1 AS x)); +SELECT groupArrayMerge(2)(y * 5) FROM (SELECT groupArrayState(2)(x) AS y FROM (SELECT 1 AS x)); +SELECT groupUniqArrayMerge(y * 5) FROM (SELECT groupUniqArrayState(x) AS y FROM (SELECT 1 AS x)); + +SELECT sumMerge(y * a) FROM (SELECT a, sumState(b) AS y FROM mult_aggregation GROUP BY a); -- { serverError ILLEGAL_COLUMN} + +DROP TABLE IF EXISTS mult_aggregation; diff --git a/parser/testdata/00647_select_numbers_with_offset/ast.json b/parser/testdata/00647_select_numbers_with_offset/ast.json new file mode 100644 index 000000000..f459e5dde --- /dev/null +++ b/parser/testdata/00647_select_numbers_with_offset/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001162502, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00647_select_numbers_with_offset/metadata.json b/parser/testdata/00647_select_numbers_with_offset/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00647_select_numbers_with_offset/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00647_select_numbers_with_offset/query.sql b/parser/testdata/00647_select_numbers_with_offset/query.sql new file mode 100644 index 000000000..3cea011a4 --- /dev/null +++ b/parser/testdata/00647_select_numbers_with_offset/query.sql @@ -0,0 +1,2 @@ +SET max_rows_to_read = 1; +SELECT * FROM numbers(1, 1); diff --git a/parser/testdata/00648_replacing_empty_set_from_prewhere/ast.json b/parser/testdata/00648_replacing_empty_set_from_prewhere/ast.json new file mode 100644 index 000000000..0166bb31c --- /dev/null +++ b/parser/testdata/00648_replacing_empty_set_from_prewhere/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery final_test (children 1)" + }, + { + "explain": " Identifier final_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001560772, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/00648_replacing_empty_set_from_prewhere/metadata.json b/parser/testdata/00648_replacing_empty_set_from_prewhere/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00648_replacing_empty_set_from_prewhere/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00648_replacing_empty_set_from_prewhere/query.sql b/parser/testdata/00648_replacing_empty_set_from_prewhere/query.sql new file mode 100644 index 000000000..bbeb4dd31 --- /dev/null +++ b/parser/testdata/00648_replacing_empty_set_from_prewhere/query.sql @@ -0,0 +1,6 @@ +DROP TABLE IF EXISTS final_test; +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE final_test (id String, version Date) ENGINE = ReplacingMergeTree(version, id, 8192); +INSERT INTO final_test (id, version) VALUES ('2018-01-01', '2018-01-01'); +SELECT * FROM final_test FINAL PREWHERE id == '2018-01-02'; +DROP TABLE final_test; diff --git a/parser/testdata/00649_quantile_tdigest_negative/ast.json b/parser/testdata/00649_quantile_tdigest_negative/ast.json new file mode 100644 index 000000000..46c21a73a --- /dev/null +++ b/parser/testdata/00649_quantile_tdigest_negative/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function quantileTDigest (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[Int64_-1, Int64_-2, Int64_-3]" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Float64_0.5" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.00142094, + "rows_read": 11, + "bytes_read": 458 + } +} diff --git a/parser/testdata/00649_quantile_tdigest_negative/metadata.json b/parser/testdata/00649_quantile_tdigest_negative/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00649_quantile_tdigest_negative/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00649_quantile_tdigest_negative/query.sql b/parser/testdata/00649_quantile_tdigest_negative/query.sql new file mode 100644 index 000000000..7e5e78a2b --- /dev/null +++ b/parser/testdata/00649_quantile_tdigest_negative/query.sql @@ -0,0 +1 @@ +SELECT quantileTDigest(0.5)(arrayJoin([-1, -2, -3])); diff --git a/parser/testdata/00650_array_enumerate_uniq_with_tuples/ast.json b/parser/testdata/00650_array_enumerate_uniq_with_tuples/ast.json new file mode 100644 index 000000000..ce1b2be16 --- /dev/null +++ b/parser/testdata/00650_array_enumerate_uniq_with_tuples/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tab_00650 (children 1)" + }, + { + "explain": " Identifier tab_00650" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001466608, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/00650_array_enumerate_uniq_with_tuples/metadata.json b/parser/testdata/00650_array_enumerate_uniq_with_tuples/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00650_array_enumerate_uniq_with_tuples/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00650_array_enumerate_uniq_with_tuples/query.sql b/parser/testdata/00650_array_enumerate_uniq_with_tuples/query.sql new file mode 100644 index 000000000..8ea600f6d --- /dev/null +++ b/parser/testdata/00650_array_enumerate_uniq_with_tuples/query.sql @@ -0,0 +1,21 @@ +drop table if exists tab_00650; +create table tab_00650 (val UInt32, n Nested(x UInt8, y String)) engine = Memory; +insert into tab_00650 values (1, [1, 2, 1, 1, 2, 1], ['a', 'a', 'b', 'a', 'b', 'b']); +select arrayEnumerateUniq(n.x) from tab_00650; +select arrayEnumerateUniq(n.y) from tab_00650; +select arrayEnumerateUniq(n.x, n.y) from tab_00650; +select arrayEnumerateUniq(arrayMap((a, b) -> (a, b), n.x, n.y)) from tab_00650; +select arrayEnumerateUniq(arrayMap((a, b) -> (a, b), n.x, n.y), n.x) from tab_00650; +select arrayEnumerateUniq(arrayMap((a, b) -> (a, b), n.x, n.y), arrayMap((a, b) -> (b, a), n.x, n.y)) from tab_00650; + +drop table tab_00650; +create table tab_00650 (val UInt32, n Nested(x Nullable(UInt8), y String)) engine = Memory; +insert into tab_00650 values (1, [1, Null, 2, 1, 1, 2, 1, Null, Null], ['a', 'a', 'a', 'b', 'a', 'b', 'b', 'b', 'a']); +select arrayEnumerateUniq(n.x) from tab_00650; +select arrayEnumerateUniq(n.y) from tab_00650; +select arrayEnumerateUniq(n.x, n.y) from tab_00650; +select arrayEnumerateUniq(arrayMap((a, b) -> (a, b), n.x, n.y)) from tab_00650; +select arrayEnumerateUniq(arrayMap((a, b) -> (a, b), n.x, n.y), n.x) from tab_00650; +select arrayEnumerateUniq(arrayMap((a, b) -> (a, b), n.x, n.y), arrayMap((a, b) -> (b, a), n.x, n.y)) from tab_00650; + +drop table tab_00650; diff --git a/parser/testdata/00653_monotonic_integer_cast/ast.json b/parser/testdata/00653_monotonic_integer_cast/ast.json new file mode 100644 index 000000000..c8679c21c --- /dev/null +++ b/parser/testdata/00653_monotonic_integer_cast/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery table_00653 (children 1)" + }, + { + "explain": " Identifier table_00653" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001144115, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/00653_monotonic_integer_cast/metadata.json b/parser/testdata/00653_monotonic_integer_cast/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00653_monotonic_integer_cast/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00653_monotonic_integer_cast/query.sql b/parser/testdata/00653_monotonic_integer_cast/query.sql new file mode 100644 index 000000000..5b22b14cf --- /dev/null +++ b/parser/testdata/00653_monotonic_integer_cast/query.sql @@ -0,0 +1,5 @@ +drop table if exists `table_00653`; +create table `table_00653` (val Int32) engine = MergeTree order by val; +insert into `table_00653` values (-2), (0), (2); +select count() from `table_00653` where toUInt64(val) == 0; +drop table table_00653; diff --git a/parser/testdata/00653_running_difference/ast.json b/parser/testdata/00653_running_difference/ast.json new file mode 100644 index 000000000..b8aa0162a --- /dev/null +++ b/parser/testdata/00653_running_difference/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001358504, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00653_running_difference/metadata.json b/parser/testdata/00653_running_difference/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00653_running_difference/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00653_running_difference/query.sql b/parser/testdata/00653_running_difference/query.sql new file mode 100644 index 000000000..d2858a938 --- /dev/null +++ b/parser/testdata/00653_running_difference/query.sql @@ -0,0 +1,22 @@ +SET allow_deprecated_error_prone_window_functions = 1; +select runningDifference(x) from (select arrayJoin([0, 1, 5, 10]) as x); +select '-'; +select runningDifference(x) from (select arrayJoin([2, Null, 3, Null, 10]) as x); +select '-'; +select runningDifference(x) from (select arrayJoin([Null, 1]) as x); +select '-'; +select runningDifference(x) from (select arrayJoin([Null, Null, 1, 3, Null, Null, 5]) as x); +select '-'; +select runningDifference(x) from (select arrayJoin([0, 1, 5, 10, 170141183460469231731687303715884105727]::Array(UInt128)) as x); +select '-'; +select runningDifference(x) from (select arrayJoin([0, 1, 5, 10, 170141183460469231731687303715884105728]::Array(UInt256)) as x); +select '-'; +select runningDifference(x) from (select arrayJoin([0, 1, 5, 10, 170141183460469231731687303715884105727]::Array(Int128)) as x); +select '-'; +select runningDifference(x) from (select arrayJoin([0, 1, 5, 10, 170141183460469231731687303715884105728]::Array(Int256)) as x); +select '--Date Difference--'; +select runningDifference(x) from (select arrayJoin([Null, Null, toDate('1970-1-1'), toDate('1970-12-31'), Null, Null, toDate('2010-8-9')]) as x); +select '-'; +select runningDifference(x) from (select arrayJoin([Null, Null, toDate32('1900-1-1'), toDate32('1930-5-25'), toDate('1990-9-4'), Null, toDate32('2279-5-4')]) as x); +select '-'; +select runningDifference(x) from (select arrayJoin([Null, Null, toDateTime('1970-06-28 23:48:12', 'Asia/Istanbul'), toDateTime('2070-04-12 21:16:41', 'Asia/Istanbul'), Null, Null, toDateTime('2106-02-03 06:38:52', 'Asia/Istanbul')]) as x); diff --git a/parser/testdata/00660_optimize_final_without_partition/ast.json b/parser/testdata/00660_optimize_final_without_partition/ast.json new file mode 100644 index 000000000..f383fb789 --- /dev/null +++ b/parser/testdata/00660_optimize_final_without_partition/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001335004, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00660_optimize_final_without_partition/metadata.json b/parser/testdata/00660_optimize_final_without_partition/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00660_optimize_final_without_partition/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00660_optimize_final_without_partition/query.sql b/parser/testdata/00660_optimize_final_without_partition/query.sql new file mode 100644 index 000000000..6545ad6e8 --- /dev/null +++ b/parser/testdata/00660_optimize_final_without_partition/query.sql @@ -0,0 +1,19 @@ +SET optimize_on_insert = 0; + +DROP TABLE IF EXISTS partitioned_by_tuple; + +CREATE TABLE partitioned_by_tuple (d Date, x UInt8, w String, y UInt8) ENGINE SummingMergeTree (y) PARTITION BY (d, x) ORDER BY (d, x, w); + +INSERT INTO partitioned_by_tuple VALUES ('2000-01-02', 1, 'first', 3); +INSERT INTO partitioned_by_tuple VALUES ('2000-01-01', 2, 'first', 2); +INSERT INTO partitioned_by_tuple VALUES ('2000-01-01', 1, 'first', 1), ('2000-01-01', 1, 'first', 2); + +OPTIMIZE TABLE partitioned_by_tuple; + +SELECT * FROM partitioned_by_tuple ORDER BY d, x, w, y; + +OPTIMIZE TABLE partitioned_by_tuple FINAL; + +SELECT * FROM partitioned_by_tuple ORDER BY d, x, w, y; + +DROP TABLE partitioned_by_tuple; diff --git a/parser/testdata/00661_array_has_silviucpp/ast.json b/parser/testdata/00661_array_has_silviucpp/ast.json new file mode 100644 index 000000000..8690b1c35 --- /dev/null +++ b/parser/testdata/00661_array_has_silviucpp/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery has_function (children 1)" + }, + { + "explain": " Identifier has_function" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001232592, + "rows_read": 2, + "bytes_read": 76 + } +} diff --git a/parser/testdata/00661_array_has_silviucpp/metadata.json b/parser/testdata/00661_array_has_silviucpp/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00661_array_has_silviucpp/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00661_array_has_silviucpp/query.sql b/parser/testdata/00661_array_has_silviucpp/query.sql new file mode 100644 index 000000000..64af9d8d5 --- /dev/null +++ b/parser/testdata/00661_array_has_silviucpp/query.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS has_function; + +CREATE TABLE has_function(arr Array(Nullable(String))) ENGINE = Memory; +INSERT INTO has_function(arr) values ([null, 'str1', 'str2']),(['str1', 'str2']), ([]), ([]); + +SELECT arr, has(`arr`, 'str1') FROM has_function; +SELECT has([null, 'str1', 'str2'], 'str1'); + +DROP TABLE has_function; diff --git a/parser/testdata/00661_optimize_final_replicated_without_partition_zookeeper/ast.json b/parser/testdata/00661_optimize_final_replicated_without_partition_zookeeper/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00661_optimize_final_replicated_without_partition_zookeeper/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00661_optimize_final_replicated_without_partition_zookeeper/metadata.json b/parser/testdata/00661_optimize_final_replicated_without_partition_zookeeper/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00661_optimize_final_replicated_without_partition_zookeeper/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00661_optimize_final_replicated_without_partition_zookeeper/query.sql b/parser/testdata/00661_optimize_final_replicated_without_partition_zookeeper/query.sql new file mode 100644 index 000000000..65e7e182f --- /dev/null +++ b/parser/testdata/00661_optimize_final_replicated_without_partition_zookeeper/query.sql @@ -0,0 +1,29 @@ +-- Tags: replica + +-- May affect part names +SET prefer_warmed_unmerged_parts_seconds=0; +SET ignore_cold_parts_seconds=0; + +SET optimize_on_insert = 0; + +DROP TABLE IF EXISTS partitioned_by_tuple_replica1_00661 SYNC; +DROP TABLE IF EXISTS partitioned_by_tuple_replica2_00661 SYNC; +CREATE TABLE partitioned_by_tuple_replica1_00661(d Date, x UInt8, w String, y UInt8) ENGINE = ReplicatedSummingMergeTree('/clickhouse/tables/{database}/test/partitioned_by_tuple_00661', '1') PARTITION BY (d, x) ORDER BY (d, x, w) SETTINGS cache_populated_by_fetch=0; +CREATE TABLE partitioned_by_tuple_replica2_00661(d Date, x UInt8, w String, y UInt8) ENGINE = ReplicatedSummingMergeTree('/clickhouse/tables/{database}/test/partitioned_by_tuple_00661', '2') PARTITION BY (d, x) ORDER BY (d, x, w) SETTINGS cache_populated_by_fetch=0; + +INSERT INTO partitioned_by_tuple_replica1_00661 VALUES ('2000-01-02', 1, 'first', 3); +INSERT INTO partitioned_by_tuple_replica1_00661 VALUES ('2000-01-01', 2, 'first', 2); +INSERT INTO partitioned_by_tuple_replica1_00661 VALUES ('2000-01-01', 1, 'first', 1), ('2000-01-01', 1, 'first', 2); + +OPTIMIZE TABLE partitioned_by_tuple_replica1_00661; + +SYSTEM SYNC REPLICA partitioned_by_tuple_replica2_00661; +SELECT * FROM partitioned_by_tuple_replica2_00661 ORDER BY d, x, w, y; + +OPTIMIZE TABLE partitioned_by_tuple_replica1_00661 FINAL; + +SYSTEM SYNC REPLICA partitioned_by_tuple_replica2_00661; +SELECT * FROM partitioned_by_tuple_replica2_00661 ORDER BY d, x, w, y; + +DROP TABLE partitioned_by_tuple_replica1_00661 SYNC; +DROP TABLE partitioned_by_tuple_replica2_00661 SYNC; diff --git a/parser/testdata/00662_array_has_nullable/ast.json b/parser/testdata/00662_array_has_nullable/ast.json new file mode 100644 index 000000000..06133e7d5 --- /dev/null +++ b/parser/testdata/00662_array_has_nullable/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function has (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Array_['a', 'b']" + }, + { + "explain": " Literal 'a'" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001325548, + "rows_read": 8, + "bytes_read": 289 + } +} diff --git a/parser/testdata/00662_array_has_nullable/metadata.json b/parser/testdata/00662_array_has_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00662_array_has_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00662_array_has_nullable/query.sql b/parser/testdata/00662_array_has_nullable/query.sql new file mode 100644 index 000000000..6011416de --- /dev/null +++ b/parser/testdata/00662_array_has_nullable/query.sql @@ -0,0 +1,72 @@ +SELECT has(['a', 'b'], 'a'); +SELECT has(['a', 'b'], 'b'); +SELECT has(['a', 'b'], 'c'); +SELECT has(['a', 'b'], NULL); + +SELECT has(['a', NULL, 'b'], 'a'); +SELECT has(['a', NULL, 'b'], 'b'); +SELECT has(['a', NULL, 'b'], 'c'); +SELECT has(['a', NULL, 'b'], NULL); + +SELECT has(materialize(['a', 'b']), 'a'); +SELECT has(materialize(['a', 'b']), 'b'); +SELECT has(materialize(['a', 'b']), 'c'); +SELECT has(materialize(['a', 'b']), NULL); + +SELECT has(materialize(['a', NULL, 'b']), 'a'); +SELECT has(materialize(['a', NULL, 'b']), 'b'); +SELECT has(materialize(['a', NULL, 'b']), 'c'); +SELECT has(materialize(['a', NULL, 'b']), NULL); + +SELECT has(['a', 'b'], materialize('a')); +SELECT has(['a', 'b'], materialize('b')); +SELECT has(['a', 'b'], materialize('c')); + +SELECT has(['a', NULL, 'b'], materialize('a')); +SELECT has(['a', NULL, 'b'], materialize('b')); +SELECT has(['a', NULL, 'b'], materialize('c')); + +SELECT has(materialize(['a', 'b']), materialize('a')); +SELECT has(materialize(['a', 'b']), materialize('b')); +SELECT has(materialize(['a', 'b']), materialize('c')); + +SELECT has(materialize(['a', NULL, 'b']), materialize('a')); +SELECT has(materialize(['a', NULL, 'b']), materialize('b')); +SELECT has(materialize(['a', NULL, 'b']), materialize('c')); + + +SELECT has([111, 222], 111); +SELECT has([111, 222], 222); +SELECT has([111, 222], 333); +SELECT has([111, 222], NULL); + +SELECT has([111, NULL, 222], 111); +SELECT has([111, NULL, 222], 222); +SELECT has([111, NULL, 222], 333); +SELECT has([111, NULL, 222], NULL); + +SELECT has(materialize([111, 222]), 111); +SELECT has(materialize([111, 222]), 222); +SELECT has(materialize([111, 222]), 333); +SELECT has(materialize([111, 222]), NULL); + +SELECT has(materialize([111, NULL, 222]), 111); +SELECT has(materialize([111, NULL, 222]), 222); +SELECT has(materialize([111, NULL, 222]), 333); +SELECT has(materialize([111, NULL, 222]), NULL); + +SELECT has([111, 222], materialize(111)); +SELECT has([111, 222], materialize(222)); +SELECT has([111, 222], materialize(333)); + +SELECT has([111, NULL, 222], materialize(111)); +SELECT has([111, NULL, 222], materialize(222)); +SELECT has([111, NULL, 222], materialize(333)); + +SELECT has(materialize([111, 222]), materialize(111)); +SELECT has(materialize([111, 222]), materialize(222)); +SELECT has(materialize([111, 222]), materialize(333)); + +SELECT has(materialize([111, NULL, 222]), materialize(111)); +SELECT has(materialize([111, NULL, 222]), materialize(222)); +SELECT has(materialize([111, NULL, 222]), materialize(333)); diff --git a/parser/testdata/00662_has_nullable/ast.json b/parser/testdata/00662_has_nullable/ast.json new file mode 100644 index 000000000..59e3a2fa9 --- /dev/null +++ b/parser/testdata/00662_has_nullable/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery 00662_has_nullable (children 1)" + }, + { + "explain": " Identifier 00662_has_nullable" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001154579, + "rows_read": 2, + "bytes_read": 88 + } +} diff --git a/parser/testdata/00662_has_nullable/metadata.json b/parser/testdata/00662_has_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00662_has_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00662_has_nullable/query.sql b/parser/testdata/00662_has_nullable/query.sql new file mode 100644 index 000000000..3fd3bd46b --- /dev/null +++ b/parser/testdata/00662_has_nullable/query.sql @@ -0,0 +1,39 @@ +DROP TABLE IF EXISTS 00662_has_nullable; + +SELECT 'Nullable(UInt64), non-null array'; +CREATE TABLE 00662_has_nullable(a Nullable(UInt64)) ENGINE = Memory; + +INSERT INTO 00662_has_nullable VALUES (1), (Null); +SELECT a, has([0, 1], a) FROM 00662_has_nullable; + +DROP TABLE 00662_has_nullable; + +-------------------------------------------------------------------------------- + +SELECT 'Non-nullable UInt64, nullable array'; +CREATE TABLE 00662_has_nullable(a UInt64) ENGINE = Memory; + +INSERT INTO 00662_has_nullable VALUES (0), (1), (2); +SELECT a, has([NULL, 1, 2], a) FROM 00662_has_nullable; + +DROP TABLE 00662_has_nullable; + +-------------------------------------------------------------------------------- + +SELECT 'Nullable(UInt64), nullable array'; +CREATE TABLE 00662_has_nullable(a Nullable(UInt64)) ENGINE = Memory; + +INSERT INTO 00662_has_nullable VALUES (0), (Null), (1); +SELECT a, has([NULL, 1, 2], a) FROM 00662_has_nullable; + +DROP TABLE 00662_has_nullable; + +-------------------------------------------------------------------------------- + +SELECT 'All NULLs'; +CREATE TABLE 00662_has_nullable(a Nullable(UInt64)) ENGINE = Memory; + +INSERT INTO 00662_has_nullable VALUES (0), (Null); +SELECT a, has([NULL, NULL], a) FROM 00662_has_nullable; + +DROP TABLE 00662_has_nullable; diff --git a/parser/testdata/00663_tiny_log_empty_insert/ast.json b/parser/testdata/00663_tiny_log_empty_insert/ast.json new file mode 100644 index 000000000..e87265963 --- /dev/null +++ b/parser/testdata/00663_tiny_log_empty_insert/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery empty (children 1)" + }, + { + "explain": " Identifier empty" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001443712, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/00663_tiny_log_empty_insert/metadata.json b/parser/testdata/00663_tiny_log_empty_insert/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00663_tiny_log_empty_insert/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00663_tiny_log_empty_insert/query.sql b/parser/testdata/00663_tiny_log_empty_insert/query.sql new file mode 100644 index 000000000..35b59ddff --- /dev/null +++ b/parser/testdata/00663_tiny_log_empty_insert/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS empty; +DROP TABLE IF EXISTS data; + +CREATE TABLE empty (value Int8) ENGINE = TinyLog; +CREATE TABLE data (value Int8) ENGINE = TinyLog; + +INSERT INTO data SELECT * FROM empty; +SELECT * FROM data; + +INSERT INTO data SELECT 1; +SELECT * FROM data; + +DROP TABLE empty; +DROP TABLE data; diff --git a/parser/testdata/00664_cast_from_string_to_nullable/ast.json b/parser/testdata/00664_cast_from_string_to_nullable/ast.json new file mode 100644 index 000000000..b8d77c47c --- /dev/null +++ b/parser/testdata/00664_cast_from_string_to_nullable/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function arrayJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_['', 'abc', '123', '123a', '-123']" + }, + { + "explain": " Literal 'Nullable(UInt8)'" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001414962, + "rows_read": 10, + "bytes_read": 417 + } +} diff --git a/parser/testdata/00664_cast_from_string_to_nullable/metadata.json b/parser/testdata/00664_cast_from_string_to_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00664_cast_from_string_to_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00664_cast_from_string_to_nullable/query.sql b/parser/testdata/00664_cast_from_string_to_nullable/query.sql new file mode 100644 index 000000000..96dd4172f --- /dev/null +++ b/parser/testdata/00664_cast_from_string_to_nullable/query.sql @@ -0,0 +1,11 @@ +SELECT CAST(arrayJoin(['', 'abc', '123', '123a', '-123']) AS Nullable(UInt8)); +SELECT CAST(arrayJoin(['', '2018', '2018-01-02', '2018-1-2', '2018-01-2', '2018-1-02', '2018-ab-cd', '2018-01-02a']) AS Nullable(Date)); +SELECT CAST(arrayJoin(['', '2018', '2018-01-02 01:02:03', '2018-01-02T01:02:03', '2018-01-02 01:02:03 abc']) AS Nullable(DateTime)); +SELECT CAST(arrayJoin(['', 'abc', '123', '123a', '-123']) AS Nullable(String)); + +SELECT toDateOrZero(arrayJoin(['', '2018', '2018-01-02', '2018-1-2', '2018-01-2', '2018-1-02', '2018-ab-cd', '2018-01-02a'])); +SELECT toDateOrNull(arrayJoin(['', '2018', '2018-01-02', '2018-1-2', '2018-01-2', '2018-1-02', '2018-ab-cd', '2018-01-02a'])); + +SELECT toDateTimeOrZero(arrayJoin(['', '2018', '2018-01-02 01:02:03', '2018-01-02T01:02:03', '2018-01-02 01:02:03 abc']), 'UTC'); +SELECT toDateTimeOrNull(arrayJoin(['', '2018', '2018-01-02 01:02:03', '2018-01-02T01:02:03', '2018-01-02 01:02:03 abc'])); + diff --git a/parser/testdata/00665_alter_nullable_string_to_nullable_uint8/ast.json b/parser/testdata/00665_alter_nullable_string_to_nullable_uint8/ast.json new file mode 100644 index 000000000..320acf8fa --- /dev/null +++ b/parser/testdata/00665_alter_nullable_string_to_nullable_uint8/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery alter_00665 (children 1)" + }, + { + "explain": " Identifier alter_00665" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001154758, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/00665_alter_nullable_string_to_nullable_uint8/metadata.json b/parser/testdata/00665_alter_nullable_string_to_nullable_uint8/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00665_alter_nullable_string_to_nullable_uint8/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00665_alter_nullable_string_to_nullable_uint8/query.sql b/parser/testdata/00665_alter_nullable_string_to_nullable_uint8/query.sql new file mode 100644 index 000000000..2dcc81af1 --- /dev/null +++ b/parser/testdata/00665_alter_nullable_string_to_nullable_uint8/query.sql @@ -0,0 +1,12 @@ +DROP TABLE IF EXISTS alter_00665; +CREATE TABLE alter_00665 (`boolean_false` Nullable(String)) ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO alter_00665 (`boolean_false`) VALUES (NULL), (''), ('123'); +SELECT * FROM alter_00665; +SELECT * FROM alter_00665 ORDER BY boolean_false NULLS LAST; + +ALTER TABLE alter_00665 MODIFY COLUMN `boolean_false` Nullable(UInt8); +SELECT * FROM alter_00665; +SELECT * FROM alter_00665 ORDER BY boolean_false NULLS LAST; + +DROP TABLE alter_00665; diff --git a/parser/testdata/00666_uniq_complex_types/ast.json b/parser/testdata/00666_uniq_complex_types/ast.json new file mode 100644 index 000000000..df3d7f23f --- /dev/null +++ b/parser/testdata/00666_uniq_complex_types/ast.json @@ -0,0 +1,97 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function uniq (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayJoin (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Literal Array_['a']" + }, + { + "explain": " Literal Array_['a', 'b']" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 25, + + "statistics": + { + "elapsed": 0.001430357, + "rows_read": 25, + "bytes_read": 1069 + } +} diff --git a/parser/testdata/00666_uniq_complex_types/metadata.json b/parser/testdata/00666_uniq_complex_types/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00666_uniq_complex_types/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00666_uniq_complex_types/query.sql b/parser/testdata/00666_uniq_complex_types/query.sql new file mode 100644 index 000000000..3b43331c6 --- /dev/null +++ b/parser/testdata/00666_uniq_complex_types/query.sql @@ -0,0 +1,29 @@ +SELECT uniq(x) FROM (SELECT arrayJoin([[], ['a'], ['a', 'b'], []]) AS x); +SELECT uniq(x) FROM (SELECT arrayJoin([[[]], [['a', 'b']], [['a'], ['b']], [['a', 'b']]]) AS x); +SELECT uniq(x, x) FROM (SELECT arrayJoin([[], ['a'], ['a', 'b'], []]) AS x); +SELECT uniq(x, arrayMap(elem -> [elem, elem], x)) FROM (SELECT arrayJoin([[], ['a'], ['a', 'b'], []]) AS x); +SELECT uniq(x, toString(x)) FROM (SELECT arrayJoin([[], ['a'], ['a', 'b'], []]) AS x); +SELECT uniq((x, x)) FROM (SELECT arrayJoin([[], ['a'], ['a', 'b'], []]) AS x); +SELECT uniq((x, arrayMap(elem -> [elem, elem], x))) FROM (SELECT arrayJoin([[], ['a'], ['a', 'b'], []]) AS x); +SELECT uniq((x, toString(x))) FROM (SELECT arrayJoin([[], ['a'], ['a', 'b'], []]) AS x); +SELECT uniq(x) FROM (SELECT arrayJoin([[], ['a'], ['a', NULL, 'b'], []]) AS x); + +SELECT uniqExact(x) FROM (SELECT arrayJoin([[], ['a'], ['a', 'b'], []]) AS x); +SELECT uniqExact(x) FROM (SELECT arrayJoin([[[]], [['a', 'b']], [['a'], ['b']], [['a', 'b']]]) AS x); +SELECT uniqExact(x, x) FROM (SELECT arrayJoin([[], ['a'], ['a', 'b'], []]) AS x); +SELECT uniqExact(x, arrayMap(elem -> [elem, elem], x)) FROM (SELECT arrayJoin([[], ['a'], ['a', 'b'], []]) AS x); +SELECT uniqExact(x, toString(x)) FROM (SELECT arrayJoin([[], ['a'], ['a', 'b'], []]) AS x); +SELECT uniqExact((x, x)) FROM (SELECT arrayJoin([[], ['a'], ['a', 'b'], []]) AS x); +SELECT uniqExact((x, arrayMap(elem -> [elem, elem], x))) FROM (SELECT arrayJoin([[], ['a'], ['a', 'b'], []]) AS x); +SELECT uniqExact((x, toString(x))) FROM (SELECT arrayJoin([[], ['a'], ['a', 'b'], []]) AS x); +SELECT uniqExact(x) FROM (SELECT arrayJoin([[], ['a'], ['a', NULL, 'b'], []]) AS x); + +SELECT uniqUpTo(3)(x) FROM (SELECT arrayJoin([[], ['a'], ['a', 'b'], []]) AS x); +SELECT uniqUpTo(3)(x) FROM (SELECT arrayJoin([[[]], [['a', 'b']], [['a'], ['b']], [['a', 'b']]]) AS x); +SELECT uniqUpTo(3)(x, x) FROM (SELECT arrayJoin([[], ['a'], ['a', 'b'], []]) AS x); +SELECT uniqUpTo(3)(x, arrayMap(elem -> [elem, elem], x)) FROM (SELECT arrayJoin([[], ['a'], ['a', 'b'], []]) AS x); +SELECT uniqUpTo(3)(x, toString(x)) FROM (SELECT arrayJoin([[], ['a'], ['a', 'b'], []]) AS x); +SELECT uniqUpTo(3)((x, x)) FROM (SELECT arrayJoin([[], ['a'], ['a', 'b'], []]) AS x); +SELECT uniqUpTo(3)((x, arrayMap(elem -> [elem, elem], x))) FROM (SELECT arrayJoin([[], ['a'], ['a', 'b'], []]) AS x); +SELECT uniqUpTo(3)((x, toString(x))) FROM (SELECT arrayJoin([[], ['a'], ['a', 'b'], []]) AS x); +SELECT uniqUpTo(3)(x) FROM (SELECT arrayJoin([[], ['a'], ['a', NULL, 'b'], []]) AS x); diff --git a/parser/testdata/00667_compare_arrays_of_different_types/ast.json b/parser/testdata/00667_compare_arrays_of_different_types/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00667_compare_arrays_of_different_types/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00667_compare_arrays_of_different_types/metadata.json b/parser/testdata/00667_compare_arrays_of_different_types/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00667_compare_arrays_of_different_types/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00667_compare_arrays_of_different_types/query.sql b/parser/testdata/00667_compare_arrays_of_different_types/query.sql new file mode 100644 index 000000000..9f63be98c --- /dev/null +++ b/parser/testdata/00667_compare_arrays_of_different_types/query.sql @@ -0,0 +1,21 @@ +SELECT + [1] < [1000], + ['abc'] = [NULL], + ['abc'] = [toNullable('abc')], + [[]] = [[]], + [[], [1]] > [[], []], + [[1]] < [[], []], + [[], []] > [[]], + [([], ([], []))] < [([], ([], ['hello']))] +; + +SELECT + materialize([1]) < materialize([1000]), + materialize(['abc']) = materialize([NULL]), + materialize(['abc']) = materialize([toNullable('abc')]), + materialize([[]]) = materialize([[]]), + materialize([[], [1]]) > materialize([[], []]), + materialize([[1]]) < materialize([[], []]), + materialize([[], []]) > materialize([[]]), + materialize([([], ([], []))]) < materialize([([], ([], ['hello']))]) +; diff --git a/parser/testdata/00668_compare_arrays_silviucpp/ast.json b/parser/testdata/00668_compare_arrays_silviucpp/ast.json new file mode 100644 index 000000000..26140e1d8 --- /dev/null +++ b/parser/testdata/00668_compare_arrays_silviucpp/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery array (children 1)" + }, + { + "explain": " Identifier array" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001139857, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/00668_compare_arrays_silviucpp/metadata.json b/parser/testdata/00668_compare_arrays_silviucpp/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00668_compare_arrays_silviucpp/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00668_compare_arrays_silviucpp/query.sql b/parser/testdata/00668_compare_arrays_silviucpp/query.sql new file mode 100644 index 000000000..5f6ad7c1d --- /dev/null +++ b/parser/testdata/00668_compare_arrays_silviucpp/query.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS array; +CREATE TABLE array (arr Array(Nullable(Float64))) ENGINE = Memory; +INSERT INTO array(arr) values ([1,2]),([3,4]),([5,6]),([7,8]); + +select * from array where arr > [12.2]; +select * from array where arr > [null, 12.2]; +select * from array where arr > [null, 12]; + +DROP TABLE array; diff --git a/parser/testdata/00670_truncate_temporary_table/ast.json b/parser/testdata/00670_truncate_temporary_table/ast.json new file mode 100644 index 000000000..1db6728ca --- /dev/null +++ b/parser/testdata/00670_truncate_temporary_table/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_00670 (children 1)" + }, + { + "explain": " Identifier test_00670" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001370996, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/00670_truncate_temporary_table/metadata.json b/parser/testdata/00670_truncate_temporary_table/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00670_truncate_temporary_table/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00670_truncate_temporary_table/query.sql b/parser/testdata/00670_truncate_temporary_table/query.sql new file mode 100644 index 000000000..c8db86d06 --- /dev/null +++ b/parser/testdata/00670_truncate_temporary_table/query.sql @@ -0,0 +1,16 @@ +drop temporary table if exists test_00670; +create temporary table test_00670(id int); + +select '======Before Truncate======'; +insert into test_00670 values(0); +select * from test_00670; + +select '======After Truncate And Empty======'; +truncate temporary table test_00670; +select * from test_00670; + +select '======After Truncate And Insert Data======'; +insert into test_00670 values(0); +select * from test_00670; + +drop temporary table test_00670; diff --git a/parser/testdata/00671_max_intersections/ast.json b/parser/testdata/00671_max_intersections/ast.json new file mode 100644 index 000000000..fcac3494b --- /dev/null +++ b/parser/testdata/00671_max_intersections/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test1_00671 (children 1)" + }, + { + "explain": " Identifier test1_00671" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001223738, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/00671_max_intersections/metadata.json b/parser/testdata/00671_max_intersections/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00671_max_intersections/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00671_max_intersections/query.sql b/parser/testdata/00671_max_intersections/query.sql new file mode 100644 index 000000000..7f03eb7dd --- /dev/null +++ b/parser/testdata/00671_max_intersections/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS test1_00671; +DROP TABLE IF EXISTS test2_00671; + +CREATE TABLE test1_00671(start Integer, end Integer) engine = Memory; +CREATE TABLE test2_00671(start Integer, end Integer) engine = Memory; + +INSERT INTO test1_00671(start,end) VALUES (1,3),(3,5); +INSERT INTO test2_00671(start,end) VALUES (3,5),(1,3); + +SELECT maxIntersections(start,end) from test1_00671; +SELECT maxIntersections(start,end) from test2_00671; + +DROP TABLE test1_00671; +DROP TABLE test2_00671; diff --git a/parser/testdata/00672_arrayDistinct/ast.json b/parser/testdata/00672_arrayDistinct/ast.json new file mode 100644 index 000000000..65f8eaf1a --- /dev/null +++ b/parser/testdata/00672_arrayDistinct/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayDistinct (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_2, UInt64_3]" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001229942, + "rows_read": 7, + "bytes_read": 294 + } +} diff --git a/parser/testdata/00672_arrayDistinct/metadata.json b/parser/testdata/00672_arrayDistinct/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00672_arrayDistinct/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00672_arrayDistinct/query.sql b/parser/testdata/00672_arrayDistinct/query.sql new file mode 100644 index 000000000..8f7918b25 --- /dev/null +++ b/parser/testdata/00672_arrayDistinct/query.sql @@ -0,0 +1,20 @@ +SELECT arrayDistinct([1, 2, 3]); +SELECT arrayDistinct([1, 2, 3, 2, 2]); +SELECT arrayDistinct([1, 2, NULL, 5, 2, NULL]); + +SELECT arrayDistinct(['1212', 'sef', '343r4']); +SELECT arrayDistinct(['1212', 'sef', '343r4', '1212']); +SELECT arrayDistinct(['1212', 'sef', '343r4', NULL, NULL, '232']); + +DROP TABLE IF EXISTS arrayDistinct_test; +CREATE TABLE arrayDistinct_test(arr_int Array(UInt8), arr_string Array(String)) ENGINE=Memory; +INSERT INTO arrayDistinct_test values ([1, 2, 3], ['a', 'b', 'c']), ([21, 21, 21, 21], ['123', '123', '123']); + +SELECT arrayDistinct(arr_int) FROM arrayDistinct_test; +SELECT arrayDistinct(arr_string) FROM arrayDistinct_test; + +DROP TABLE arrayDistinct_test; + +SELECT arrayDistinct([['1212'], ['sef'], ['343r4'], ['1212']]); +SELECT arrayDistinct([(1, 2), (1, 3), (1, 2), (1, 2), (1, 2), (1, 5)]); +SELECT length(arrayDistinct([NULL, NULL, NULL])); diff --git a/parser/testdata/00673_subquery_prepared_set_performance/ast.json b/parser/testdata/00673_subquery_prepared_set_performance/ast.json new file mode 100644 index 000000000..ea07ceaa5 --- /dev/null +++ b/parser/testdata/00673_subquery_prepared_set_performance/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery mergetree_00673 (children 1)" + }, + { + "explain": " Identifier mergetree_00673" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001307853, + "rows_read": 2, + "bytes_read": 82 + } +} diff --git a/parser/testdata/00673_subquery_prepared_set_performance/metadata.json b/parser/testdata/00673_subquery_prepared_set_performance/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00673_subquery_prepared_set_performance/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00673_subquery_prepared_set_performance/query.sql b/parser/testdata/00673_subquery_prepared_set_performance/query.sql new file mode 100644 index 000000000..98c0802ff --- /dev/null +++ b/parser/testdata/00673_subquery_prepared_set_performance/query.sql @@ -0,0 +1,12 @@ +DROP TABLE IF EXISTS mergetree_00673; + +CREATE TABLE mergetree_00673 (x UInt64) ENGINE = MergeTree ORDER BY x; +INSERT INTO mergetree_00673 VALUES (1); + +SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM mergetree_00673 WHERE x IN (SELECT * FROM numbers(1000000)))))))))))))))))))))); + +SET force_primary_key = 1; + +SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM mergetree_00673 WHERE x IN (SELECT * FROM numbers(1000000)))))))))))))))))))))); + +DROP TABLE mergetree_00673; diff --git a/parser/testdata/00674_has_array_enum/ast.json b/parser/testdata/00674_has_array_enum/ast.json new file mode 100644 index 000000000..40f071d5d --- /dev/null +++ b/parser/testdata/00674_has_array_enum/ast.json @@ -0,0 +1,88 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function has (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Literal 'Enum8(\\'hello\\' = 1, \\'world\\' = 2, \\'abc\\' = 10)'" + } + ], + + "rows": 22, + + "statistics": + { + "elapsed": 0.001411144, + "rows_read": 22, + "bytes_read": 935 + } +} diff --git a/parser/testdata/00674_has_array_enum/metadata.json b/parser/testdata/00674_has_array_enum/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00674_has_array_enum/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00674_has_array_enum/query.sql b/parser/testdata/00674_has_array_enum/query.sql new file mode 100644 index 000000000..b8baf6022 --- /dev/null +++ b/parser/testdata/00674_has_array_enum/query.sql @@ -0,0 +1 @@ +SELECT has([x], 10) FROM (SELECT CAST(10 AS Enum8('hello' = 1, 'world' = 2, 'abc' = 10)) AS x); diff --git a/parser/testdata/00674_join_on_syntax/ast.json b/parser/testdata/00674_join_on_syntax/ast.json new file mode 100644 index 000000000..0eff7cfd2 --- /dev/null +++ b/parser/testdata/00674_join_on_syntax/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001381903, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00674_join_on_syntax/metadata.json b/parser/testdata/00674_join_on_syntax/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00674_join_on_syntax/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00674_join_on_syntax/query.sql b/parser/testdata/00674_join_on_syntax/query.sql new file mode 100644 index 000000000..584e43b88 --- /dev/null +++ b/parser/testdata/00674_join_on_syntax/query.sql @@ -0,0 +1,115 @@ +SET joined_subquery_requires_alias = 0; +SET enable_analyzer = 1; + +drop table if exists tab1; +drop table if exists tab2; +drop table if exists tab3; +drop table if exists tab1_copy; + +create table tab1 (a1 Int32, b1 Int32) engine = MergeTree order by a1; +create table tab2 (a2 Int32, b2 Int32) engine = MergeTree order by a2; +create table tab3 (a3 Int32, b3 Int32) engine = MergeTree order by a3; +create table tab1_copy (a1 Int32, b1 Int32) engine = MergeTree order by a1; + +insert into tab1 values (1, 2); +insert into tab2 values (2, 3); +insert into tab3 values (2, 3); +insert into tab1_copy values (2, 3); + + +select 'joind columns from right table'; +select a1 from tab1 any left join tab2 on b1 = a2; +select a1, b1 from tab1 any left join tab2 on b1 = a2; +select a1, a2 from tab1 any left join tab2 on b1 = a2; +select a1, b2 from tab1 any left join tab2 on b1 = a2; +select a1, a2, b2 from tab1 any left join tab2 on b1 = a2; + + +select 'join on expression'; +select b1 from tab1 any left join tab2 on toInt32(a1 + 1) = a2; +select b1, a2 from tab1 any left join tab2 on toInt32(a1 + 1) = a2; +select b1, b2 from tab1 any left join tab2 on toInt32(a1 + 1) = a2; +select a1 from tab1 any left join tab2 on b1 + 1 = a2 + 1; +select a2 from tab1 any left join tab2 on b1 + 1 = a2 + 1; +select a1, b1, a2, b2 from tab1 any left join tab2 on b1 + 1 = a2 + 1; +select a1, b1, a2, b2, a2 + 1 from tab1 any left join tab2 on b1 + 1 = a2 + 1; +select a1, b1, a2, b2 from tab1 any left join tab2 on a1 + 4 = b2 + 2; + + +select 'join on and chain'; +select a2, b2 from tab2 any left join tab3 on a2 = a3 and b2 = b3; +select a3, b3 from tab2 any left join tab3 on a2 = a3 and b2 = b3; +select a2, b2, a3, b3 from tab2 any left join tab3 on a2 = a3 and b2 = b3; +select a1 from tab1 any left join tab2 on b1 + 1 = a2 + 1 and a1 + 4 = b2 + 2; +select a1, b2 from tab1 any left join tab2 on b1 + 1 = a2 + 1 and a1 + 4 = b2 + 2; +select a1, b1, a2, b2 from tab1 any left join tab2 on b1 + 1 = a2 + 1 and a1 + 4 = b2 + 2; +select a2, b2 + 1 from tab1 any left join tab2 on b1 + 1 = a2 + 1 and a1 + 4 = b2 + 2; + + +select 'join on aliases'; +select a1, a2, b1, b2 from tab1 first any left join tab2 second_ on first.b1 = second_.a2; +select a1, a2, b1, b2 from tab1 first any left join tab2 second_ on second_.a2 = first.b1; + +select a1, a2, b1, b2 from tab1 first any left join tab2 second_ on tab1.b1 = tab2.a2; +select a1, a2, b1, b2 from tab1 first any left join tab2 second_ on tab2.a2 = tab1.b1; + +select a1, a2, b1, b2 from tab1 first any left join tab2 second_ on tab1.b1 = tab2.a2; +select a1, a2, b1, b2 from tab1 first any left join tab2 second_ on tab2.a2 = tab1.b1; + +select a1, a2, b1, b2 from tab1 first any left join tab2 second_ on first.b1 = tab2.a2; +select a1, a2, b1, b2 from tab1 first any left join tab2 second_ on tab2.a2 = first.b1; +select a1, a2, b1, b2 from tab1 first any left join tab2 second_ on first.b1 = tab2.a2; +select a1, a2, b1, b2 from tab1 first any left join tab2 second_ on tab2.a2 = first.b1; + +select a1, a2, b1, b2 from tab1 first any left join tab2 second_ on tab1.b1 = second_.a2; +select a1, a2, b1, b2 from tab1 first any left join tab2 second_ on second_.a2 = tab1.b1; +select a1, a2, b1, b2 from tab1 first any left join tab2 second_ on tab1.b1 = second_.a2; +select a1, a2, b1, b2 from tab1 first any left join tab2 second_ on second_.a2 = tab1.b1; + +select a1, a2, first.b1, second_.b2 from tab1 first any left join tab2 second_ on b1 = a2; +select a1, a2, tab1.b1, tab2.b2 from tab1 first any left join tab2 second_ on b1 = a2; +select a1, a2, tab1.b1, tab2.b2 from tab1 first any left join tab2 second_ on b1 = a2; + + +select 'join on complex expression'; +select a2, b2 from tab2 any left join tab3 on a2 + b2 = a3 + b3; +select a2, b2 from tab2 any left join tab3 on a3 + tab3.b3 = a2 + b2; +select a2, b2 from tab2 second_ any left join tab3 on a3 + b3 = a2 + second_.b2; +select a2, b2 from tab2 second_ any left join tab3 third on third.a3 + tab3.b3 = tab2.a2 + second_.b2; +select a2, b2 from tab2 second_ any left join tab3 third on third.a3 + tab3.b3 = tab2.a2 + second_.b2; + +select 'duplicate column names'; +select a1, tab1_copy.a1 from tab1 any left join tab1_copy on tab1.b1 + 3 = tab1_copy.b1 + 2 FORMAT JSONEachRow; +select a1, tab1_copy.a1 from tab1 any left join tab1_copy on tab1.b1 + 3 = tab1_copy.b1 + 2 FORMAT JSONEachRow; +select a1, copy.a1 from tab1 any left join tab1_copy copy on tab1.b1 + 3 = tab1_copy.b1 + 2 FORMAT JSONEachRow; +select a1, tab1_copy.a1 from tab1 any left join tab1_copy copy on tab1.b1 + 3 = tab1_copy.b1 + 2 FORMAT JSONEachRow; +select a1, tab1_copy.a1 from tab1 any left join tab1_copy copy on tab1.b1 + 3 = tab1_copy.b1 + 2 FORMAT JSONEachRow; + +select 'subquery'; +select a1 from tab1 any left join (select * from tab2) on b1 = a2; +select a1 from tab1 any left join (select a2 from tab2) on b1 = a2; +select a1, b1 from tab1 any left join (select * from tab2) on b1 = a2; +select a1, b1, a2, b2 from tab1 any left join (select * from tab2) on b1 = a2; +select a1, a2 from tab1 any left join (select a2 from tab2) on b1 = a2; + +select 'subquery expression'; +select b1 from tab1 any left join (select * from tab2) on toInt32(a1 + 1) = a2; +select a1, b1, a2, b2 from tab1 any left join (select * from tab2) on b1 + 1 = a2 + 1; +select a1, b1, a2 from tab1 any left join (select * from tab2) on b1 + 1 = a2 + 1; + +select 'subquery column alias'; +select a1, b1, a2, b2 from tab1 any left join (select *, a2 as z from tab2) on b1 + 1 = z + 1; +select a1, b1, a2, b2 from tab1 any left join (select *, a2 + 1 as z from tab2) on b1 + 1 = z; +select a1, b1, a2, b2 from tab1 any left join (select *, a2 + 1 as z from tab2) on b1 + 2 = z + 1 format TSV; + +select 'subquery alias'; +select a1, a2, b1, b2 from tab1 first any left join (select * from tab2) second_ on first.b1 = second_.a2; +select a1, a2, b1, b2 from tab1 first any left join (select *, a2 as z from tab2) second_ on first.b1 = second_.z; +select a1, a2, b1, b2 from tab1 first any left join (select *, a2 + 1 as z from tab2) second_ on first.b1 + 1 = second_.z; +select tab1.a1, a2, tab1.b1, second_.b2 from tab1 first any left join (select * from tab2) second_ on first.b1 = second_.a2; +select a1, s.a1 from tab1 any left join (select * from tab1_copy) s on tab1.b1 + 3 = s.b1 + 2 FORMAT JSONEachRow; + +drop table tab1; +drop table tab1_copy; +drop table tab2; +drop table tab3; diff --git a/parser/testdata/00675_shard_remote_with_table_function/ast.json b/parser/testdata/00675_shard_remote_with_table_function/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00675_shard_remote_with_table_function/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00675_shard_remote_with_table_function/metadata.json b/parser/testdata/00675_shard_remote_with_table_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00675_shard_remote_with_table_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00675_shard_remote_with_table_function/query.sql b/parser/testdata/00675_shard_remote_with_table_function/query.sql new file mode 100644 index 000000000..99c2e02e2 --- /dev/null +++ b/parser/testdata/00675_shard_remote_with_table_function/query.sql @@ -0,0 +1,11 @@ +-- Tags: shard + +DROP TABLE IF EXISTS remote_test; +CREATE TABLE remote_test(a1 UInt8) ENGINE=Memory; +INSERT INTO FUNCTION remote('127.0.0.1', currentDatabase(), remote_test) VALUES(1); +INSERT INTO FUNCTION remote('127.0.0.1', currentDatabase(), remote_test) VALUES(2); +INSERT INTO FUNCTION remote('127.0.0.1', currentDatabase(), remote_test) VALUES(3); +INSERT INTO FUNCTION remote('127.0.0.1', currentDatabase(), remote_test) VALUES(4); +SELECT COUNT(*) FROM remote('127.0.0.1', currentDatabase(), remote_test); +SELECT count(*) FROM remote('127.0.0.{1,2}', merge(currentDatabase(), '^remote_test')); +DROP TABLE remote_test; diff --git a/parser/testdata/00676_group_by_in/ast.json b/parser/testdata/00676_group_by_in/ast.json new file mode 100644 index 000000000..9cde47112 --- /dev/null +++ b/parser/testdata/00676_group_by_in/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function in (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier dummy" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Function count (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001170838, + "rows_read": 12, + "bytes_read": 423 + } +} diff --git a/parser/testdata/00676_group_by_in/metadata.json b/parser/testdata/00676_group_by_in/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00676_group_by_in/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00676_group_by_in/query.sql b/parser/testdata/00676_group_by_in/query.sql new file mode 100644 index 000000000..1c5dbb293 --- /dev/null +++ b/parser/testdata/00676_group_by_in/query.sql @@ -0,0 +1,13 @@ +SELECT dummy IN (0) AS x, count() GROUP BY x; + +SELECT 1 IN (0) AS x, count() GROUP BY x; +SELECT 0 IN (0) AS x, count() GROUP BY x; +SELECT materialize(1) IN (0) AS x, count() GROUP BY x; +SELECT materialize(0) IN (0) AS x, count() GROUP BY x; + +SELECT + number IN (1, 2) AS x, + count() +FROM numbers(10) +GROUP BY x +ORDER BY x; diff --git a/parser/testdata/00677_shard_any_heavy_merge/ast.json b/parser/testdata/00677_shard_any_heavy_merge/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00677_shard_any_heavy_merge/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00677_shard_any_heavy_merge/metadata.json b/parser/testdata/00677_shard_any_heavy_merge/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00677_shard_any_heavy_merge/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00677_shard_any_heavy_merge/query.sql b/parser/testdata/00677_shard_any_heavy_merge/query.sql new file mode 100644 index 000000000..6d45ac478 --- /dev/null +++ b/parser/testdata/00677_shard_any_heavy_merge/query.sql @@ -0,0 +1,3 @@ +-- Tags: shard + +WITH arrayJoin(['hello', 'world']) AS s SELECT count(), arraySort(groupUniqArray(s)), anyHeavy(s) FROM remote('127.0.0.{2,3}', system.one); diff --git a/parser/testdata/00678_murmurhash/ast.json b/parser/testdata/00678_murmurhash/ast.json new file mode 100644 index 000000000..f8d802328 --- /dev/null +++ b/parser/testdata/00678_murmurhash/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function murmurHash2_32 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_123456" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001583032, + "rows_read": 7, + "bytes_read": 272 + } +} diff --git a/parser/testdata/00678_murmurhash/metadata.json b/parser/testdata/00678_murmurhash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00678_murmurhash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00678_murmurhash/query.sql b/parser/testdata/00678_murmurhash/query.sql new file mode 100644 index 000000000..705c62480 --- /dev/null +++ b/parser/testdata/00678_murmurhash/query.sql @@ -0,0 +1,36 @@ +SELECT murmurHash2_32(123456); +SELECT murmurHash2_32(CAST(3 AS UInt8)); +SELECT murmurHash2_32(CAST(1.2684 AS Float32)); +SELECT murmurHash2_32(CAST(-154477 AS Int64)); +SELECT murmurHash2_32('foo'); +SELECT murmurHash2_32(CAST('bar' AS FixedString(3))); +SELECT murmurHash2_32(x) FROM (SELECT CAST(1 AS Enum8('a' = 1, 'b' = 2)) as x); + +SELECT murmurHash2_32(''); +SELECT murmurHash2_32('\x01'); +SELECT murmurHash2_32('\x02\0'); +SELECT murmurHash2_32('\x03\0\0'); +SELECT murmurHash2_32(1); +SELECT murmurHash2_32(toUInt16(2)); + +SELECT murmurHash2_32(2) = bitXor(toUInt32(0x5bd1e995 * bitXor(toUInt32(3 * 0x5bd1e995) AS a, bitShiftRight(a, 13))) AS b, bitShiftRight(b, 15)); +SELECT murmurHash2_32('\x02') = bitXor(toUInt32(0x5bd1e995 * bitXor(toUInt32(3 * 0x5bd1e995) AS a, bitShiftRight(a, 13))) AS b, bitShiftRight(b, 15)); + +SELECT murmurHash2_64('foo'); +SELECT murmurHash2_64('\x01'); +SELECT murmurHash2_64(1); + +SELECT murmurHash3_32('foo'); +SELECT murmurHash3_32('\x01'); +SELECT murmurHash3_32(1); + +SELECT murmurHash3_64('foo'); +SELECT murmurHash3_64('\x01'); +SELECT murmurHash3_64(1); + +SELECT gccMurmurHash('foo'); +SELECT gccMurmurHash('\x01'); +SELECT gccMurmurHash(1); + +SELECT hex(murmurHash3_128('foo')); +SELECT hex(murmurHash3_128('\x01')); diff --git a/parser/testdata/00678_shard_funnel_window/ast.json b/parser/testdata/00678_shard_funnel_window/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00678_shard_funnel_window/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00678_shard_funnel_window/metadata.json b/parser/testdata/00678_shard_funnel_window/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00678_shard_funnel_window/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00678_shard_funnel_window/query.sql b/parser/testdata/00678_shard_funnel_window/query.sql new file mode 100644 index 000000000..73e489232 --- /dev/null +++ b/parser/testdata/00678_shard_funnel_window/query.sql @@ -0,0 +1,10 @@ +-- Tags: shard + +DROP TABLE IF EXISTS remote_test; +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE remote_test(uid String, its UInt32, action_code String, day Date) ENGINE = MergeTree(day, (uid, its), 8192); +INSERT INTO remote_test SELECT toString(number) AS uid, number % 3 AS its, toString(number % 3) AS action_code, '2000-01-01' FROM system.numbers LIMIT 10000; +SELECT level, COUNT() FROM (SELECT uid, windowFunnel(3600)(toUInt32(its), action_code != '', action_code = '2') AS level FROM remote('127.0.0.{2,3}', currentDatabase(), remote_test) GROUP BY uid) GROUP BY level; +SELECT level, COUNT() FROM (SELECT uid, windowFunnel(3600)(toUInt32(its), action_code != '', action_code = '2') AS level FROM remote('127.0.0.{2,3}', currentDatabase(), remote_test) GROUP BY uid) GROUP BY level; +SELECT level, COUNT() FROM (SELECT uid, windowFunnel(3600)(toUInt32(its), action_code != '', action_code = '2') AS level FROM remote('127.0.0.{2,3}', currentDatabase(), remote_test) GROUP BY uid) GROUP BY level; +DROP TABLE IF EXISTS remote_test; diff --git a/parser/testdata/00679_replace_asterisk/ast.json b/parser/testdata/00679_replace_asterisk/ast.json new file mode 100644 index 000000000..f46b1f5de --- /dev/null +++ b/parser/testdata/00679_replace_asterisk/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001671906, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00679_replace_asterisk/metadata.json b/parser/testdata/00679_replace_asterisk/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00679_replace_asterisk/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00679_replace_asterisk/query.sql b/parser/testdata/00679_replace_asterisk/query.sql new file mode 100644 index 000000000..19aa939b1 --- /dev/null +++ b/parser/testdata/00679_replace_asterisk/query.sql @@ -0,0 +1,6 @@ +SET joined_subquery_requires_alias = 0; + +SELECT * FROM (SELECT 1 AS id, 2 AS value); +SELECT * FROM (SELECT 1 AS id, 2 AS value, 3 AS A) SEMI LEFT JOIN (SELECT 1 AS id, 4 AS values, 5 AS D) USING id; +SELECT *, d.* FROM ( SELECT 1 AS id, 2 AS value ) SEMI LEFT JOIN ( SELECT 1 AS id, 3 AS values ) AS d USING id; +SELECT *, d.*, d.values FROM ( SELECT 1 AS id, 2 AS value ) SEMI LEFT JOIN ( SELECT 1 AS id, 3 AS values ) AS d USING id; diff --git a/parser/testdata/00679_uuid_in_key/ast.json b/parser/testdata/00679_uuid_in_key/ast.json new file mode 100644 index 000000000..2585cf83e --- /dev/null +++ b/parser/testdata/00679_uuid_in_key/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery uuid (children 1)" + }, + { + "explain": " Identifier uuid" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00121459, + "rows_read": 2, + "bytes_read": 61 + } +} diff --git a/parser/testdata/00679_uuid_in_key/metadata.json b/parser/testdata/00679_uuid_in_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00679_uuid_in_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00679_uuid_in_key/query.sql b/parser/testdata/00679_uuid_in_key/query.sql new file mode 100644 index 000000000..5eefaef6d --- /dev/null +++ b/parser/testdata/00679_uuid_in_key/query.sql @@ -0,0 +1,19 @@ +CREATE TABLE IF NOT EXISTS uuid +( + created_at DateTime, + id UUID +) +ENGINE = MergeTree +PARTITION BY toDate(created_at) +ORDER BY (created_at, id); + +INSERT INTO uuid (created_at, id) VALUES ('2018-01-01 01:02:03', '00000000-0000-03f8-9cb8-cb1b82fb3900'); + +SELECT count() FROM uuid WHERE id = '00000000-0000-03f8-9cb8-cb1b82fb3900'; +SELECT count() FROM uuid WHERE id != '00000000-0000-03f8-9cb8-cb1b82fb3900'; +SELECT count() FROM uuid WHERE id < '00000000-0000-03f8-9cb8-cb1b82fb3900'; +SELECT count() FROM uuid WHERE id > '00000000-0000-03f8-9cb8-cb1b82fb3900'; +SELECT count() FROM uuid WHERE id <= '00000000-0000-03f8-9cb8-cb1b82fb3900'; +SELECT count() FROM uuid WHERE id >= '00000000-0000-03f8-9cb8-cb1b82fb3900'; + +DROP TABLE uuid; diff --git a/parser/testdata/00680_duplicate_columns_inside_union_all/ast.json b/parser/testdata/00680_duplicate_columns_inside_union_all/ast.json new file mode 100644 index 000000000..12bc1f35f --- /dev/null +++ b/parser/testdata/00680_duplicate_columns_inside_union_all/ast.json @@ -0,0 +1,157 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Identifier y" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Identifier y" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1 (alias x)" + }, + { + "explain": " Literal UInt64_2 (alias y)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Identifier x" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_3 (alias x)" + }, + { + "explain": " Literal UInt64_4 (alias y)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier y" + } + ], + + "rows": 45, + + "statistics": + { + "elapsed": 0.001740384, + "rows_read": 45, + "bytes_read": 1934 + } +} diff --git a/parser/testdata/00680_duplicate_columns_inside_union_all/metadata.json b/parser/testdata/00680_duplicate_columns_inside_union_all/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00680_duplicate_columns_inside_union_all/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00680_duplicate_columns_inside_union_all/query.sql b/parser/testdata/00680_duplicate_columns_inside_union_all/query.sql new file mode 100644 index 000000000..c316df368 --- /dev/null +++ b/parser/testdata/00680_duplicate_columns_inside_union_all/query.sql @@ -0,0 +1,4 @@ +SELECT x, y FROM (SELECT x, y FROM (SELECT 1 AS x, 2 AS y) UNION ALL SELECT x, x FROM (SELECT 3 AS x, 4 AS y)) ORDER BY x, y; +SELECT x, y FROM (SELECT x, y FROM (SELECT 1 AS x, 2 AS y) UNION ALL SELECT y, y FROM (SELECT 3 AS x, 4 AS y)) ORDER BY x, y; +SELECT x, y FROM (SELECT x, x, y FROM (SELECT 1 AS x, 2 AS y) UNION ALL SELECT x, y, y FROM (SELECT 3 AS x, 4 AS y)) ORDER BY x, y; +SELECT x, y FROM (SELECT x, y, y FROM (SELECT 1 AS x, 2 AS y) UNION ALL SELECT x, x, y FROM (SELECT 3 AS x, 4 AS y)) ORDER BY x, y; diff --git a/parser/testdata/00681_duplicate_columns_inside_union_all_stas_sviridov/ast.json b/parser/testdata/00681_duplicate_columns_inside_union_all_stas_sviridov/ast.json new file mode 100644 index 000000000..30358e8c8 --- /dev/null +++ b/parser/testdata/00681_duplicate_columns_inside_union_all_stas_sviridov/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_00681 (children 1)" + }, + { + "explain": " Identifier test_00681" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00100034, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/00681_duplicate_columns_inside_union_all_stas_sviridov/metadata.json b/parser/testdata/00681_duplicate_columns_inside_union_all_stas_sviridov/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00681_duplicate_columns_inside_union_all_stas_sviridov/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00681_duplicate_columns_inside_union_all_stas_sviridov/query.sql b/parser/testdata/00681_duplicate_columns_inside_union_all_stas_sviridov/query.sql new file mode 100644 index 000000000..efb650f65 --- /dev/null +++ b/parser/testdata/00681_duplicate_columns_inside_union_all_stas_sviridov/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS test_00681; + +CREATE TABLE test_00681(x Int32) ENGINE = Log; +INSERT INTO test_00681 VALUES (123); + +SELECT a1 +FROM +( + SELECT x AS a1, x AS a2 FROM test_00681 + UNION ALL + SELECT x, x FROM test_00681 +); + +DROP TABLE test_00681; diff --git a/parser/testdata/00685_output_format_json_escape_forward_slashes/ast.json b/parser/testdata/00685_output_format_json_escape_forward_slashes/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00685_output_format_json_escape_forward_slashes/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00685_output_format_json_escape_forward_slashes/metadata.json b/parser/testdata/00685_output_format_json_escape_forward_slashes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00685_output_format_json_escape_forward_slashes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00685_output_format_json_escape_forward_slashes/query.sql b/parser/testdata/00685_output_format_json_escape_forward_slashes/query.sql new file mode 100644 index 000000000..6d64981d2 --- /dev/null +++ b/parser/testdata/00685_output_format_json_escape_forward_slashes/query.sql @@ -0,0 +1,6 @@ +-- Tags: no-fasttest + +SET output_format_json_escape_forward_slashes = 1; +select '/some/cool/url' as url format JSONEachRow; +SET output_format_json_escape_forward_slashes = 0; +select '/some/cool/url' as url format JSONEachRow; diff --git a/parser/testdata/00687_insert_into_mv/ast.json b/parser/testdata/00687_insert_into_mv/ast.json new file mode 100644 index 000000000..0a821292e --- /dev/null +++ b/parser/testdata/00687_insert_into_mv/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_00687 (children 1)" + }, + { + "explain": " Identifier test_00687" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000994697, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/00687_insert_into_mv/metadata.json b/parser/testdata/00687_insert_into_mv/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00687_insert_into_mv/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00687_insert_into_mv/query.sql b/parser/testdata/00687_insert_into_mv/query.sql new file mode 100644 index 000000000..2be46fade --- /dev/null +++ b/parser/testdata/00687_insert_into_mv/query.sql @@ -0,0 +1,31 @@ +DROP TABLE IF EXISTS test_00687; +DROP TABLE IF EXISTS mv_bad; +DROP TABLE IF EXISTS mv_good; +DROP TABLE IF EXISTS mv_group; + +CREATE TABLE test_00687 (x String) ENGINE = Null; + +create MATERIALIZED VIEW mv_bad (x String) +ENGINE = MergeTree Partition by tuple() order by tuple() +AS SELECT DISTINCT x FROM test_00687; + +create MATERIALIZED VIEW mv_good (x String) +ENGINE = MergeTree Partition by tuple() order by tuple() +AS SELECT x FROM test_00687; + +create MATERIALIZED VIEW mv_group (x String) +ENGINE = MergeTree Partition by tuple() order by tuple() +AS SELECT x FROM test_00687 group by x; + +insert into test_00687 values ('stest'), ('stest'); + +select * from mv_bad; +SELECT '---'; +select * from mv_good; +SELECT '---'; +select * from mv_group; + +DROP TABLE mv_bad; +DROP TABLE mv_good; +DROP TABLE mv_group; +DROP TABLE test_00687; diff --git a/parser/testdata/00688_aggregation_retention/ast.json b/parser/testdata/00688_aggregation_retention/ast.json new file mode 100644 index 000000000..cee646d74 --- /dev/null +++ b/parser/testdata/00688_aggregation_retention/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery retention_test (children 1)" + }, + { + "explain": " Identifier retention_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001216029, + "rows_read": 2, + "bytes_read": 80 + } +} diff --git a/parser/testdata/00688_aggregation_retention/metadata.json b/parser/testdata/00688_aggregation_retention/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00688_aggregation_retention/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00688_aggregation_retention/query.sql b/parser/testdata/00688_aggregation_retention/query.sql new file mode 100644 index 000000000..45f62cb05 --- /dev/null +++ b/parser/testdata/00688_aggregation_retention/query.sql @@ -0,0 +1,12 @@ +DROP TABLE IF EXISTS retention_test; + +CREATE TABLE retention_test(date Date, uid Int32)ENGINE = Memory; +INSERT INTO retention_test SELECT '2018-08-06', number FROM numbers(80); +INSERT INTO retention_test SELECT '2018-08-07', number FROM numbers(50); +INSERT INTO retention_test SELECT '2018-08-08', number FROM numbers(60); + +SELECT sum(r[1]) as r1, sum(r[2]) as r2 FROM (SELECT uid, retention(date = '2018-08-06', date = '2018-08-07') AS r FROM retention_test WHERE date IN ('2018-08-06', '2018-08-07') GROUP BY uid); +SELECT sum(r[1]) as r1, sum(r[2]) as r2 FROM (SELECT uid, retention(date = '2018-08-06', date = '2018-08-08') AS r FROM retention_test WHERE date IN ('2018-08-06', '2018-08-08') GROUP BY uid); +SELECT sum(r[1]) as r1, sum(r[2]) as r2, sum(r[3]) as r3 FROM (SELECT uid, retention(date = '2018-08-06', date = '2018-08-07', date = '2018-08-08') AS r FROM retention_test WHERE date IN ('2018-08-06', '2018-08-07', '2018-08-08') GROUP BY uid); + +DROP TABLE retention_test; diff --git a/parser/testdata/00688_case_without_else/ast.json b/parser/testdata/00688_case_without_else/ast.json new file mode 100644 index 000000000..e59a39b06 --- /dev/null +++ b/parser/testdata/00688_case_without_else/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_00688 (children 1)" + }, + { + "explain": " Identifier test_00688" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001307114, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/00688_case_without_else/metadata.json b/parser/testdata/00688_case_without_else/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00688_case_without_else/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00688_case_without_else/query.sql b/parser/testdata/00688_case_without_else/query.sql new file mode 100644 index 000000000..6235781b7 --- /dev/null +++ b/parser/testdata/00688_case_without_else/query.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS test_00688; + +CREATE TABLE test_00688 (a UInt8) ENGINE = Memory; + +INSERT INTO test_00688 VALUES (1), (2), (1), (3); + +SELECT CASE WHEN a=1 THEN 0 END FROM test_00688; + +DROP TABLE test_00688; diff --git a/parser/testdata/00688_low_cardinality_alter_add_column/ast.json b/parser/testdata/00688_low_cardinality_alter_add_column/ast.json new file mode 100644 index 000000000..1f2065de6 --- /dev/null +++ b/parser/testdata/00688_low_cardinality_alter_add_column/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery cardinality (children 1)" + }, + { + "explain": " Identifier cardinality" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001031167, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/00688_low_cardinality_alter_add_column/metadata.json b/parser/testdata/00688_low_cardinality_alter_add_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00688_low_cardinality_alter_add_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00688_low_cardinality_alter_add_column/query.sql b/parser/testdata/00688_low_cardinality_alter_add_column/query.sql new file mode 100644 index 000000000..8847ec223 --- /dev/null +++ b/parser/testdata/00688_low_cardinality_alter_add_column/query.sql @@ -0,0 +1,6 @@ +drop table if exists cardinality; +create table cardinality (x String) engine = MergeTree order by tuple(); +insert into cardinality (x) select concat('v', toString(number)) from numbers(10); +alter table cardinality add column y LowCardinality(String); +select * from cardinality; +drop table if exists cardinality; diff --git a/parser/testdata/00688_low_cardinality_defaults/ast.json b/parser/testdata/00688_low_cardinality_defaults/ast.json new file mode 100644 index 000000000..20c84d6fe --- /dev/null +++ b/parser/testdata/00688_low_cardinality_defaults/ast.json @@ -0,0 +1,85 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toLowCardinality (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier val" + }, + { + "explain": " Literal 'UInt64'" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayJoin (alias val) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_['1']" + } + ], + + "rows": 21, + + "statistics": + { + "elapsed": 0.001032767, + "rows_read": 21, + "bytes_read": 877 + } +} diff --git a/parser/testdata/00688_low_cardinality_defaults/metadata.json b/parser/testdata/00688_low_cardinality_defaults/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00688_low_cardinality_defaults/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00688_low_cardinality_defaults/query.sql b/parser/testdata/00688_low_cardinality_defaults/query.sql new file mode 100644 index 000000000..149d543de --- /dev/null +++ b/parser/testdata/00688_low_cardinality_defaults/query.sql @@ -0,0 +1,4 @@ +select CAST(toLowCardinality(val) as UInt64) from (select arrayJoin(['1']) as val); +select toUInt64(toLowCardinality(val)) from (select arrayJoin(['1']) as val); +select 1 % toLowCardinality(val) from (select arrayJoin([1]) as val); +select gcd(1, toLowCardinality(val)) from (select arrayJoin([1]) as val); diff --git a/parser/testdata/00688_low_cardinality_dictionary_deserialization/ast.json b/parser/testdata/00688_low_cardinality_dictionary_deserialization/ast.json new file mode 100644 index 000000000..7a4fd0542 --- /dev/null +++ b/parser/testdata/00688_low_cardinality_dictionary_deserialization/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery lc_dict_reading (children 1)" + }, + { + "explain": " Identifier lc_dict_reading" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001199466, + "rows_read": 2, + "bytes_read": 82 + } +} diff --git a/parser/testdata/00688_low_cardinality_dictionary_deserialization/metadata.json b/parser/testdata/00688_low_cardinality_dictionary_deserialization/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00688_low_cardinality_dictionary_deserialization/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00688_low_cardinality_dictionary_deserialization/query.sql b/parser/testdata/00688_low_cardinality_dictionary_deserialization/query.sql new file mode 100644 index 000000000..d359efd8d --- /dev/null +++ b/parser/testdata/00688_low_cardinality_dictionary_deserialization/query.sql @@ -0,0 +1,5 @@ +drop table if exists lc_dict_reading; +create table lc_dict_reading (val UInt64, str LowCardinality(String), pat String) engine = MergeTree order by val SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +insert into lc_dict_reading select number, if(number < 8192 * 4, number % 100, number) as s, s from system.numbers limit 1000000; +select sum(toUInt64(str)), sum(toUInt64(pat)) from lc_dict_reading where val < 8129 or val > 8192 * 4; +drop table if exists lc_dict_reading; diff --git a/parser/testdata/00688_low_cardinality_in/ast.json b/parser/testdata/00688_low_cardinality_in/ast.json new file mode 100644 index 000000000..624080ecc --- /dev/null +++ b/parser/testdata/00688_low_cardinality_in/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000978499, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00688_low_cardinality_in/metadata.json b/parser/testdata/00688_low_cardinality_in/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00688_low_cardinality_in/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00688_low_cardinality_in/query.sql b/parser/testdata/00688_low_cardinality_in/query.sql new file mode 100644 index 000000000..c39fdb371 --- /dev/null +++ b/parser/testdata/00688_low_cardinality_in/query.sql @@ -0,0 +1,17 @@ +set allow_suspicious_low_cardinality_types = 1; +drop table if exists lc_00688; +create table lc_00688 (str LowCardinality(String), val LowCardinality(UInt8)) engine = MergeTree order by tuple(); +insert into lc_00688 values ('a', 1), ('b', 2); +select str, str in ('a', 'd') from lc_00688; +select val, val in (1, 3) from lc_00688; +select str, str in (select arrayJoin(['a', 'd'])) from lc_00688; +select val, val in (select arrayJoin([1, 3])) from lc_00688; +select str, str in (select str from lc_00688) from lc_00688; +select val, val in (select val from lc_00688) from lc_00688; +drop table if exists lc_00688; + +drop table if exists ary_lc_null; +CREATE TABLE ary_lc_null (i int, v Array(LowCardinality(Nullable(String)))) ENGINE = MergeTree() ORDER BY i ; +INSERT INTO ary_lc_null VALUES (1, ['1']); +SELECT v FROM ary_lc_null WHERE v IN (SELECT v FROM ary_lc_null); +drop table if exists ary_lc_null; diff --git a/parser/testdata/00688_low_cardinality_nullable_cast/ast.json b/parser/testdata/00688_low_cardinality_nullable_cast/ast.json new file mode 100644 index 000000000..69d9a9a7a --- /dev/null +++ b/parser/testdata/00688_low_cardinality_nullable_cast/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001175879, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00688_low_cardinality_nullable_cast/metadata.json b/parser/testdata/00688_low_cardinality_nullable_cast/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00688_low_cardinality_nullable_cast/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00688_low_cardinality_nullable_cast/query.sql b/parser/testdata/00688_low_cardinality_nullable_cast/query.sql new file mode 100644 index 000000000..5e25e50d4 --- /dev/null +++ b/parser/testdata/00688_low_cardinality_nullable_cast/query.sql @@ -0,0 +1,12 @@ +set allow_suspicious_low_cardinality_types = 1; +SELECT CAST(NULL, 'LowCardinality(Nullable(Int8))'); + +drop table if exists lc_null_int8_defnull; +CREATE TABLE lc_null_int8_defnull (val LowCardinality(Nullable(Int8)) DEFAULT NULL) ENGINE = MergeTree order by tuple(); +insert into lc_null_int8_defnull values (1); +select * from lc_null_int8_defnull values; +alter table lc_null_int8_defnull add column val2 LowCardinality(Nullable(Int8)) DEFAULT NULL; +insert into lc_null_int8_defnull values (2, 3); +select * from lc_null_int8_defnull order by val; +drop table if exists lc_null_int8_defnull; + diff --git a/parser/testdata/00688_low_cardinality_prewhere/ast.json b/parser/testdata/00688_low_cardinality_prewhere/ast.json new file mode 100644 index 000000000..ac280512d --- /dev/null +++ b/parser/testdata/00688_low_cardinality_prewhere/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery lc_prewhere (children 1)" + }, + { + "explain": " Identifier lc_prewhere" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001361055, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/00688_low_cardinality_prewhere/metadata.json b/parser/testdata/00688_low_cardinality_prewhere/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00688_low_cardinality_prewhere/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00688_low_cardinality_prewhere/query.sql b/parser/testdata/00688_low_cardinality_prewhere/query.sql new file mode 100644 index 000000000..17c74b7ca --- /dev/null +++ b/parser/testdata/00688_low_cardinality_prewhere/query.sql @@ -0,0 +1,5 @@ +drop table if exists lc_prewhere; +create table lc_prewhere (key UInt64, val UInt64, str LowCardinality(String), s String) engine = MergeTree order by key settings index_granularity = 8192; +insert into lc_prewhere select number, if(number < 10 or number > 8192 * 9, 1, 0), toString(number) as s, s from system.numbers limit 100000; +select sum(toUInt64(str)), sum(toUInt64(s)) from lc_prewhere prewhere val == 1; +drop table if exists lc_prewhere; diff --git a/parser/testdata/00688_low_cardinality_serialization/ast.json b/parser/testdata/00688_low_cardinality_serialization/ast.json new file mode 100644 index 000000000..d6872bbf5 --- /dev/null +++ b/parser/testdata/00688_low_cardinality_serialization/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'NativeReader'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001241504, + "rows_read": 5, + "bytes_read": 183 + } +} diff --git a/parser/testdata/00688_low_cardinality_serialization/metadata.json b/parser/testdata/00688_low_cardinality_serialization/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00688_low_cardinality_serialization/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00688_low_cardinality_serialization/query.sql b/parser/testdata/00688_low_cardinality_serialization/query.sql new file mode 100644 index 000000000..1e4de3f3d --- /dev/null +++ b/parser/testdata/00688_low_cardinality_serialization/query.sql @@ -0,0 +1,27 @@ +select 'NativeReader'; +select toTypeName(dict), dict, lowCardinalityIndices(dict), lowCardinalityKeys(dict) from (select '123_' || toLowCardinality(v) as dict from (select arrayJoin(['a', 'bb', '', 'a', 'ccc', 'a', 'bb', '', 'dddd']) as v)); +select '-'; +select toTypeName(dict), dict, lowCardinalityIndices(dict), lowCardinalityKeys(dict) from (select '123_' || toLowCardinality(v) as dict from (select arrayJoin(['a', Null, 'bb', '', 'a', Null, 'ccc', 'a', 'bb', '', 'dddd']) as v)); + +select 'MergeTree'; + +drop table if exists lc_small_dict; +drop table if exists lc_big_dict; + +create table lc_small_dict (str LowCardinality(String)) engine = MergeTree order by str SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +create table lc_big_dict (str LowCardinality(String)) engine = MergeTree order by str SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +insert into lc_small_dict select toString(number % 1000) from system.numbers limit 1000000; +insert into lc_big_dict select toString(number) from system.numbers limit 1000000; + +detach table lc_small_dict; +detach table lc_big_dict; + +attach table lc_small_dict; +attach table lc_big_dict; + +select sum(toUInt64OrZero(str)) from lc_small_dict; +select sum(toUInt64OrZero(str)) from lc_big_dict; + +drop table if exists lc_small_dict; +drop table if exists lc_big_dict; diff --git a/parser/testdata/00688_low_cardinality_syntax/ast.json b/parser/testdata/00688_low_cardinality_syntax/ast.json new file mode 100644 index 000000000..3c92eb9b5 --- /dev/null +++ b/parser/testdata/00688_low_cardinality_syntax/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001369762, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00688_low_cardinality_syntax/metadata.json b/parser/testdata/00688_low_cardinality_syntax/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00688_low_cardinality_syntax/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00688_low_cardinality_syntax/query.sql b/parser/testdata/00688_low_cardinality_syntax/query.sql new file mode 100644 index 000000000..dccdac1d9 --- /dev/null +++ b/parser/testdata/00688_low_cardinality_syntax/query.sql @@ -0,0 +1,58 @@ +set allow_suspicious_low_cardinality_types = 1; +drop table if exists lc_str_0; +drop table if exists lc_str_1; +drop table if exists lc_null_str_0; +drop table if exists lc_null_str_1; +drop table if exists lc_int8_0; +drop table if exists lc_int8_1; +drop table if exists lc_null_int8_0; +drop table if exists lc_null_int8_1; +drop table if exists lc_fix_str_0; +drop table if exists lc_fix_str_1; +drop table if exists lc_null_fix_str_0; +drop table if exists lc_null_fix_str_1; + +create table lc_str_0 (str LowCardinality(String)) engine = Memory; +create table lc_null_str_0 (str LowCardinality(Nullable(String))) engine = Memory; +create table lc_int8_0 (val LowCardinality(Int8)) engine = Memory; +create table lc_null_int8_0 (val LowCardinality(Nullable(Int8))) engine = Memory; +create table lc_fix_str_0 (str LowCardinality(FixedString(2))) engine = Memory; +create table lc_null_fix_str_0 (str LowCardinality(Nullable(FixedString(2)))) engine = Memory; + +insert into lc_str_0 select 'a'; +insert into lc_null_str_0 select 'a'; +insert into lc_int8_0 select 1; +insert into lc_null_int8_0 select 1; +insert into lc_fix_str_0 select 'ab'; +insert into lc_null_fix_str_0 select 'ab'; + +select str from lc_str_0; +select str from lc_null_str_0; +select val from lc_int8_0; +select val from lc_null_int8_0; +select str from lc_fix_str_0; +select str from lc_null_fix_str_0; + +drop table if exists lc_str_0; +drop table if exists lc_null_str_0; +drop table if exists lc_int8_0; +drop table if exists lc_null_int8_0; +drop table if exists lc_fix_str_0; +drop table if exists lc_null_fix_str_0; + +select '-'; +SELECT toLowCardinality('a') AS s, toTypeName(s), toTypeName(length(s)) from system.one; +select toLowCardinality('a') as val group by val order by val; +select (toLowCardinality('a') as val) || 'b' group by val order by val; +select toLowCardinality(z) as val from (select arrayJoin(['c', 'd']) as z) group by val order by val; +select (toLowCardinality(z) as val) || 'b' from (select arrayJoin(['c', 'd']) as z) group by val order by val; + +select '-'; +drop table if exists lc_str_uuid; +create table lc_str_uuid(str1 String, str2 LowCardinality(String), str3 LowCardinality(String)) ENGINE=Memory; +select toUUID(str1), toUUID(str2), toUUID(str3) from lc_str_uuid; +select toUUID(str1, '', NULL), toUUID(str2, '', NULL), toUUID(str3, '', NULL) from lc_str_uuid; +insert into lc_str_uuid values ('61f0c404-5cb3-11e7-907b-a6006ad3dba0', '61f0c404-5cb3-11e7-907b-a6006ad3dba0', '61f0c404-5cb3-11e7-907b-a6006ad3dba0'); +select toUUID(str1), toUUID(str2), toUUID(str3) from lc_str_uuid; +select toUUID(str1, '', NULL), toUUID(str2, '', NULL), toUUID(str3, '', NULL) from lc_str_uuid; +drop table if exists lc_str_uuid; diff --git a/parser/testdata/00689_join_table_function/ast.json b/parser/testdata/00689_join_table_function/ast.json new file mode 100644 index 000000000..fa1b9d626 --- /dev/null +++ b/parser/testdata/00689_join_table_function/ast.json @@ -0,0 +1,94 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 2)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (alias a) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " TablesInSelectQueryElement (children 2)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (alias b) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " TableJoin (children 1)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier a.number" + }, + { + "explain": " Identifier b.number" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier a.number" + } + ], + + "rows": 24, + + "statistics": + { + "elapsed": 0.001313124, + "rows_read": 24, + "bytes_read": 948 + } +} diff --git a/parser/testdata/00689_join_table_function/metadata.json b/parser/testdata/00689_join_table_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00689_join_table_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00689_join_table_function/query.sql b/parser/testdata/00689_join_table_function/query.sql new file mode 100644 index 000000000..c8e9100dc --- /dev/null +++ b/parser/testdata/00689_join_table_function/query.sql @@ -0,0 +1 @@ +SELECT * FROM numbers(3) AS a ANY LEFT JOIN numbers(3) AS b ON a.number = b.number ORDER BY a.number; diff --git a/parser/testdata/00691_array_distinct/ast.json b/parser/testdata/00691_array_distinct/ast.json new file mode 100644 index 000000000..ffff0a678 --- /dev/null +++ b/parser/testdata/00691_array_distinct/ast.json @@ -0,0 +1,91 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayDistinct (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayMap (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function lambda (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Function range (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2" + } + ], + + "rows": 23, + + "statistics": + { + "elapsed": 0.001608761, + "rows_read": 23, + "bytes_read": 940 + } +} diff --git a/parser/testdata/00691_array_distinct/metadata.json b/parser/testdata/00691_array_distinct/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00691_array_distinct/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00691_array_distinct/query.sql b/parser/testdata/00691_array_distinct/query.sql new file mode 100644 index 000000000..59a523fc5 --- /dev/null +++ b/parser/testdata/00691_array_distinct/query.sql @@ -0,0 +1,14 @@ +SELECT arrayDistinct(arrayMap(x -> 0, range(2))) FROM numbers(2); + +SELECT arrayDistinct(materialize([[0], [0]])) FROM numbers(2); +SELECT arrayDistinct(materialize(['', '', ''])) FROM numbers(2); +SELECT arrayDistinct(materialize([0, 0, 0])) FROM numbers(2); +SELECT arrayDistinct(materialize([0, 1, 1, 0])) FROM numbers(2); +SELECT arrayDistinct(materialize(['', 'Hello', ''])) FROM numbers(2); + + +SELECT arrayDistinct(materialize([[0], [0]])) FROM numbers(2); +SELECT arrayDistinct(materialize(['', NULL, ''])) FROM numbers(2); +SELECT arrayDistinct(materialize([0, NULL, 0])) FROM numbers(2); +SELECT arrayDistinct(materialize([0, 1, NULL, 0])) FROM numbers(2); +SELECT arrayDistinct(materialize(['', 'Hello', NULL])) FROM numbers(2); diff --git a/parser/testdata/00692_if_exception_code/ast.json b/parser/testdata/00692_if_exception_code/ast.json new file mode 100644 index 000000000..7edc63532 --- /dev/null +++ b/parser/testdata/00692_if_exception_code/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001175111, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00692_if_exception_code/metadata.json b/parser/testdata/00692_if_exception_code/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00692_if_exception_code/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00692_if_exception_code/query.sql b/parser/testdata/00692_if_exception_code/query.sql new file mode 100644 index 000000000..bdca46be6 --- /dev/null +++ b/parser/testdata/00692_if_exception_code/query.sql @@ -0,0 +1,6 @@ +SET send_logs_level = 'fatal'; + +SELECT if(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT if(1); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT if(1, 1); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT if(1, 1, 1); diff --git a/parser/testdata/00693_max_block_size_system_tables_columns/ast.json b/parser/testdata/00693_max_block_size_system_tables_columns/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00693_max_block_size_system_tables_columns/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00693_max_block_size_system_tables_columns/metadata.json b/parser/testdata/00693_max_block_size_system_tables_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00693_max_block_size_system_tables_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00693_max_block_size_system_tables_columns/query.sql b/parser/testdata/00693_max_block_size_system_tables_columns/query.sql new file mode 100644 index 000000000..ba98dc079 --- /dev/null +++ b/parser/testdata/00693_max_block_size_system_tables_columns/query.sql @@ -0,0 +1,20 @@ +-- Tags: no-parallel, memory-engine + +-- NOTE: database = currentDatabase() is not mandatory + +SELECT avg(blockSize()) <= 10 FROM system.tables SETTINGS max_block_size = 10; +SELECT avg(blockSize()) <= 10 FROM system.tables LIMIT 10 SETTINGS max_block_size = 10; +SELECT (SELECT count() FROM system.tables SETTINGS max_block_size = 10) = (SELECT count() FROM system.tables SETTINGS max_block_size = 9); +SELECT (SELECT count() FROM system.tables SETTINGS max_block_size = 100) = (SELECT count() FROM system.tables SETTINGS max_block_size = 1000); + +DROP TEMPORARY TABLE IF EXISTS t_00693; +CREATE TEMPORARY TABLE t_00693 (x UInt8); +SELECT database, name, engine, is_temporary, data_paths, metadata_path, toTimeZone(metadata_modification_time, 'UTC'), dependencies_database, + dependencies_table, create_table_query, engine_full, partition_key, sorting_key, primary_key, sampling_key, storage_policy, + total_rows, total_bytes FROM system.tables WHERE is_temporary AND name='t_00693'; + +SELECT avg(blockSize()) <= 10000 FROM system.columns SETTINGS max_block_size = 10; +SELECT avg(blockSize()) <= 10000 FROM system.columns LIMIT 10 SETTINGS max_block_size = 10; +SELECT (SELECT count() FROM system.columns SETTINGS max_block_size = 10) = (SELECT count() FROM system.columns SETTINGS max_block_size = 9); +SELECT (SELECT count() FROM system.columns SETTINGS max_block_size = 100) = (SELECT count() FROM system.columns SETTINGS max_block_size = 1000); +SELECT (SELECT count() FROM system.columns SETTINGS max_block_size = 13) = (SELECT count() FROM system.columns SETTINGS max_block_size = 1000000); diff --git a/parser/testdata/00695_pretty_max_column_pad_width/ast.json b/parser/testdata/00695_pretty_max_column_pad_width/ast.json new file mode 100644 index 000000000..e265e8f79 --- /dev/null +++ b/parser/testdata/00695_pretty_max_column_pad_width/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00105676, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00695_pretty_max_column_pad_width/metadata.json b/parser/testdata/00695_pretty_max_column_pad_width/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00695_pretty_max_column_pad_width/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00695_pretty_max_column_pad_width/query.sql b/parser/testdata/00695_pretty_max_column_pad_width/query.sql new file mode 100644 index 000000000..ab1707f14 --- /dev/null +++ b/parser/testdata/00695_pretty_max_column_pad_width/query.sql @@ -0,0 +1,4 @@ +SET output_format_pretty_display_footer_column_names=0; +SET output_format_pretty_max_column_pad_width = 250; +SET output_format_pretty_fallback_to_vertical = 0; +SELECT range(number) FROM system.numbers LIMIT 100 FORMAT PrettyCompactNoEscapes; diff --git a/parser/testdata/00696_system_columns_limit/ast.json b/parser/testdata/00696_system_columns_limit/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00696_system_columns_limit/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00696_system_columns_limit/metadata.json b/parser/testdata/00696_system_columns_limit/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00696_system_columns_limit/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00696_system_columns_limit/query.sql b/parser/testdata/00696_system_columns_limit/query.sql new file mode 100644 index 000000000..c7bb53f4c --- /dev/null +++ b/parser/testdata/00696_system_columns_limit/query.sql @@ -0,0 +1,13 @@ +-- NOTE: database = currentDatabase() is not mandatory + +SELECT count() > 0 FROM (SELECT * FROM system.columns LIMIT 0); +SELECT count() > 0 FROM (SELECT * FROM system.columns LIMIT 1); +SELECT count() > 0 FROM (SELECT * FROM system.columns LIMIT 2); +SELECT count() > 0 FROM (SELECT * FROM system.columns LIMIT 3); +SELECT count() > 0 FROM (SELECT * FROM system.columns LIMIT 4); +SELECT count() > 0 FROM (SELECT * FROM system.columns LIMIT 5); +SELECT count() > 0 FROM (SELECT * FROM system.columns LIMIT 6); +SELECT count() > 0 FROM (SELECT * FROM system.columns LIMIT 7); +SELECT count() > 0 FROM (SELECT * FROM system.columns LIMIT 8); +SELECT count() > 0 FROM (SELECT * FROM system.columns LIMIT 9); +SELECT count() > 0 FROM (SELECT * FROM system.columns LIMIT 10); diff --git a/parser/testdata/00697_in_subquery_shard/ast.json b/parser/testdata/00697_in_subquery_shard/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00697_in_subquery_shard/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00697_in_subquery_shard/metadata.json b/parser/testdata/00697_in_subquery_shard/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00697_in_subquery_shard/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00697_in_subquery_shard/query.sql b/parser/testdata/00697_in_subquery_shard/query.sql new file mode 100644 index 000000000..634cfe422 --- /dev/null +++ b/parser/testdata/00697_in_subquery_shard/query.sql @@ -0,0 +1,27 @@ +-- Tags: shard + +SELECT 0 IN 0; +SELECT 0 IN 1; +SELECT 0 IN (SELECT 0); +SELECT 0 IN (SELECT 1); + +SELECT dummy IN (SELECT 0) FROM remote('127.0.0.1', system.one); +SELECT dummy IN (SELECT 1) FROM remote('127.0.0.1', system.one); + +SELECT dummy IN (SELECT 0) FROM remote('127.0.0.{1,2}', system.one); +SELECT dummy IN (SELECT 1) FROM remote('127.0.0.{1,2}', system.one); + +SELECT number IN (SELECT toUInt64(arrayJoin([1, 8]))) FROM remote('127.0.0.{1,2}', numbers(10)); + +SELECT arrayExists(x -> (x IN (SELECT 1)), [1]) FROM remote('127.0.0.{1,2}', system.one); +SELECT sumIf(number, arrayExists(x -> (x IN (SELECT 1)), [1])) FROM remote('127.0.0.{1,2}', numbers(10)); + +SET prefer_localhost_replica = 0; + +SELECT dummy IN (SELECT 0) FROM remote('127.0.0.{1,2}', system.one); +SELECT dummy IN (SELECT 1) FROM remote('127.0.0.{1,2}', system.one); + +SELECT number IN (SELECT toUInt64(arrayJoin([1, 8]))) FROM remote('127.0.0.{1,2}', numbers(10)); + +SELECT arrayExists(x -> (x IN (SELECT 1)), [1]) FROM remote('127.0.0.{1,2}', system.one); +SELECT sumIf(number, arrayExists(x -> (x IN (SELECT 1)), [1])) FROM remote('127.0.0.{1,2}', numbers(10)); diff --git a/parser/testdata/00698_validate_array_sizes_for_nested/ast.json b/parser/testdata/00698_validate_array_sizes_for_nested/ast.json new file mode 100644 index 000000000..4c4c6ea2f --- /dev/null +++ b/parser/testdata/00698_validate_array_sizes_for_nested/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000935214, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00698_validate_array_sizes_for_nested/metadata.json b/parser/testdata/00698_validate_array_sizes_for_nested/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00698_validate_array_sizes_for_nested/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00698_validate_array_sizes_for_nested/query.sql b/parser/testdata/00698_validate_array_sizes_for_nested/query.sql new file mode 100644 index 000000000..a1fe531e6 --- /dev/null +++ b/parser/testdata/00698_validate_array_sizes_for_nested/query.sql @@ -0,0 +1,12 @@ +SET send_logs_level = 'fatal'; + +DROP TABLE IF EXISTS mergetree_00698; +CREATE TABLE mergetree_00698 (k UInt32, `n.x` Array(UInt64), `n.y` Array(UInt64)) ENGINE = MergeTree ORDER BY k; + +INSERT INTO mergetree_00698 VALUES (3, [], [1, 2, 3]), (1, [111], []), (2, [], []); -- { serverError SIZES_OF_ARRAYS_DONT_MATCH } +SELECT * FROM mergetree_00698; + +INSERT INTO mergetree_00698 VALUES (3, [4, 5, 6], [1, 2, 3]), (1, [111], [222]), (2, [], []); +SELECT * FROM mergetree_00698; + +DROP TABLE mergetree_00698; diff --git a/parser/testdata/00698_validate_array_sizes_for_nested_kshvakov/ast.json b/parser/testdata/00698_validate_array_sizes_for_nested_kshvakov/ast.json new file mode 100644 index 000000000..42b44b858 --- /dev/null +++ b/parser/testdata/00698_validate_array_sizes_for_nested_kshvakov/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001123606, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00698_validate_array_sizes_for_nested_kshvakov/metadata.json b/parser/testdata/00698_validate_array_sizes_for_nested_kshvakov/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00698_validate_array_sizes_for_nested_kshvakov/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00698_validate_array_sizes_for_nested_kshvakov/query.sql b/parser/testdata/00698_validate_array_sizes_for_nested_kshvakov/query.sql new file mode 100644 index 000000000..6533f55c8 --- /dev/null +++ b/parser/testdata/00698_validate_array_sizes_for_nested_kshvakov/query.sql @@ -0,0 +1,17 @@ +SET send_logs_level = 'fatal'; + +DROP TABLE IF EXISTS Issue_2231_Invalid_Nested_Columns_Size; +CREATE TABLE Issue_2231_Invalid_Nested_Columns_Size ( + Date Date, + NestedColumn Nested( + ID Int32, + Count Int64 + ) +) Engine = MergeTree + PARTITION BY tuple() + ORDER BY Date; + +INSERT INTO Issue_2231_Invalid_Nested_Columns_Size VALUES (today(), [2,2], [1]), (today(), [2,2], [1, 1]); -- { serverError SIZES_OF_ARRAYS_DONT_MATCH } + +SELECT * FROM Issue_2231_Invalid_Nested_Columns_Size; +DROP TABLE Issue_2231_Invalid_Nested_Columns_Size; diff --git a/parser/testdata/00700_decimal_aggregates/ast.json b/parser/testdata/00700_decimal_aggregates/ast.json new file mode 100644 index 000000000..e30b1f05d --- /dev/null +++ b/parser/testdata/00700_decimal_aggregates/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery decimal (children 1)" + }, + { + "explain": " Identifier decimal" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001099425, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/00700_decimal_aggregates/metadata.json b/parser/testdata/00700_decimal_aggregates/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00700_decimal_aggregates/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00700_decimal_aggregates/query.sql b/parser/testdata/00700_decimal_aggregates/query.sql new file mode 100644 index 000000000..a59bfb76d --- /dev/null +++ b/parser/testdata/00700_decimal_aggregates/query.sql @@ -0,0 +1,118 @@ +DROP TABLE IF EXISTS decimal; + +CREATE TABLE decimal +( + a Decimal32(4), + b Decimal64(8), + c Decimal128(8) +) ENGINE = Memory; + +INSERT INTO decimal (a, b, c) +SELECT toDecimal32(number - 50, 4), toDecimal64(number - 50, 8) / 3, toDecimal128(number - 50, 8) / 5 +FROM system.numbers LIMIT 101; + +SELECT count(a), count(b), count(c) FROM decimal; +SELECT [min(a), max(a)], [min(b), max(b)], [min(c), max(c)] FROM decimal; + +SELECT sum(a), sum(b), sum(c), sumWithOverflow(a), sumWithOverflow(b), sumWithOverflow(c) FROM decimal; +SELECT sum(a), sum(b), sum(c), sumWithOverflow(a), sumWithOverflow(b), sumWithOverflow(c) FROM decimal WHERE a > 0; +SELECT sum(a), sum(b), sum(c), sumWithOverflow(a), sumWithOverflow(b), sumWithOverflow(c) FROM decimal WHERE a < 0; +SELECT sum(a+1), sum(b+1), sum(c+1), sumWithOverflow(a+1), sumWithOverflow(b+1), sumWithOverflow(c+1) FROM decimal; +SELECT sum(a-1), sum(b-1), sum(c-1), sumWithOverflow(a-1), sumWithOverflow(b-1), sumWithOverflow(c-1) FROM decimal; + +SELECT (uniq(a), uniq(b), uniq(c)), + (uniqCombined(a), uniqCombined(b), uniqCombined(c)), + (uniqCombined(17)(a), uniqCombined(17)(b), uniqCombined(17)(c)), + (uniqExact(a), uniqExact(b), uniqExact(c)), + (102 - uniqHLL12(a) >= 0, 102 - uniqHLL12(b) >= 0, 102 - uniqHLL12(c) >= 0, uniqHLL12(a) - 99 >= 0, uniqHLL12(b) - 99 >= 0, uniqHLL12(c) - 99 >= 0) +FROM (SELECT * FROM decimal ORDER BY a); + +SELECT uniqUpTo(10)(a), uniqUpTo(10)(b), uniqUpTo(10)(c) FROM decimal WHERE a >= 0 AND a < 5; +SELECT uniqUpTo(10)(a), uniqUpTo(10)(b), uniqUpTo(10)(c) FROM decimal WHERE a >= 0 AND a < 10; + +SELECT argMin(a, b), argMin(a, c), argMin(b, a), argMin(b, c), argMin(c, a), argMin(c, b) FROM decimal; +SELECT argMin(a, b), argMin(a, c), argMin(b, a), argMin(b, c), argMin(c, a), argMin(c, b) FROM decimal WHERE a > 0; +SELECT argMax(a, b), argMax(a, c), argMax(b, a), argMax(b, c), argMax(c, a), argMax(c, b) FROM decimal; +SELECT argMax(a, b), argMax(a, c), argMax(b, a), argMax(b, c), argMax(c, a), argMax(c, b) FROM decimal WHERE a < 0; + +SELECT median(a), median(b), median(c) as x, toTypeName(x) FROM decimal; +SELECT quantile(a), quantile(b), quantile(c) as x, toTypeName(x) FROM decimal WHERE a < 0; +SELECT quantile(0.0)(a), quantile(0.0)(b), quantile(0.0)(c) FROM decimal WHERE a >= 0; +SELECT quantile(0.2)(a), quantile(0.2)(b), quantile(0.2)(c) FROM decimal WHERE a >= 0; +SELECT quantile(0.4)(a), quantile(0.4)(b), quantile(0.4)(c) FROM decimal WHERE a >= 0; +SELECT quantile(0.6)(a), quantile(0.6)(b), quantile(0.6)(c) FROM decimal WHERE a >= 0; +SELECT quantile(0.8)(a), quantile(0.8)(b), quantile(0.8)(c) FROM decimal WHERE a >= 0; +SELECT quantile(1.0)(a), quantile(1.0)(b), quantile(1.0)(c) FROM decimal WHERE a >= 0; +SELECT quantiles(0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)(a) FROM decimal; +SELECT quantiles(0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)(b) FROM decimal; +SELECT quantiles(0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)(c) FROM decimal; + +SELECT medianExact(a), medianExact(b), medianExact(c) as x, toTypeName(x) FROM decimal; +SELECT quantileExact(a), quantileExact(b), quantileExact(c) as x, toTypeName(x) FROM decimal WHERE a < 0; +SELECT quantileExact(0.0)(a), quantileExact(0.0)(b), quantileExact(0.0)(c) FROM decimal WHERE a >= 0; +SELECT quantileExact(0.2)(a), quantileExact(0.2)(b), quantileExact(0.2)(c) FROM decimal WHERE a >= 0; +SELECT quantileExact(0.4)(a), quantileExact(0.4)(b), quantileExact(0.4)(c) FROM decimal WHERE a >= 0; +SELECT quantileExact(0.6)(a), quantileExact(0.6)(b), quantileExact(0.6)(c) FROM decimal WHERE a >= 0; +SELECT quantileExact(0.8)(a), quantileExact(0.8)(b), quantileExact(0.8)(c) FROM decimal WHERE a >= 0; +SELECT quantileExact(1.0)(a), quantileExact(1.0)(b), quantileExact(1.0)(c) FROM decimal WHERE a >= 0; +SELECT quantilesExact(0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)(a) FROM decimal; +SELECT quantilesExact(0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)(b) FROM decimal; +SELECT quantilesExact(0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)(c) FROM decimal; + +SELECT medianExactLow(a), medianExactLow(b), medianExactLow(c) as x, toTypeName(x) FROM decimal; +SELECT quantileExactLow(a), quantileExactLow(b), quantileExactLow(c) as x, toTypeName(x) FROM decimal WHERE a < 0; +SELECT quantileExactLow(0.0)(a), quantileExactLow(0.0)(b), quantileExactLow(0.0)(c) FROM decimal WHERE a >= 0; +SELECT quantileExactLow(0.2)(a), quantileExactLow(0.2)(b), quantileExactLow(0.2)(c) FROM decimal WHERE a >= 0; +SELECT quantileExactLow(0.4)(a), quantileExactLow(0.4)(b), quantileExactLow(0.4)(c) FROM decimal WHERE a >= 0; +SELECT quantileExactLow(0.6)(a), quantileExactLow(0.6)(b), quantileExactLow(0.6)(c) FROM decimal WHERE a >= 0; +SELECT quantileExactLow(0.8)(a), quantileExactLow(0.8)(b), quantileExactLow(0.8)(c) FROM decimal WHERE a >= 0; +SELECT quantileExactLow(1.0)(a), quantileExactLow(1.0)(b), quantileExactLow(1.0)(c) FROM decimal WHERE a >= 0; +SELECT quantilesExactLow(0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)(a) FROM decimal; +SELECT quantilesExactLow(0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)(b) FROM decimal; +SELECT quantilesExactLow(0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)(c) FROM decimal; + +SELECT medianExactHigh(a), medianExactHigh(b), medianExactHigh(c) as x, toTypeName(x) FROM decimal; +SELECT quantileExactHigh(a), quantileExactHigh(b), quantileExactHigh(c) as x, toTypeName(x) FROM decimal WHERE a < 0; +SELECT quantileExactHigh(0.0)(a), quantileExactHigh(0.0)(b), quantileExactHigh(0.0)(c) FROM decimal WHERE a >= 0; +SELECT quantileExactHigh(0.2)(a), quantileExactHigh(0.2)(b), quantileExactHigh(0.2)(c) FROM decimal WHERE a >= 0; +SELECT quantileExactHigh(0.4)(a), quantileExactHigh(0.4)(b), quantileExactHigh(0.4)(c) FROM decimal WHERE a >= 0; +SELECT quantileExactHigh(0.6)(a), quantileExactHigh(0.6)(b), quantileExactHigh(0.6)(c) FROM decimal WHERE a >= 0; +SELECT quantileExactHigh(0.8)(a), quantileExactHigh(0.8)(b), quantileExactHigh(0.8)(c) FROM decimal WHERE a >= 0; +SELECT quantileExactHigh(1.0)(a), quantileExactHigh(1.0)(b), quantileExactHigh(1.0)(c) FROM decimal WHERE a >= 0; +SELECT quantilesExactHigh(0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)(a) FROM decimal; +SELECT quantilesExactHigh(0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)(b) FROM decimal; +SELECT quantilesExactHigh(0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)(c) FROM decimal; + +SELECT medianExactWeighted(a, 1), medianExactWeighted(b, 2), medianExactWeighted(c, 3) as x, toTypeName(x) FROM decimal; +SELECT quantileExactWeighted(a, 1), quantileExactWeighted(b, 2), quantileExactWeighted(c, 3) as x, toTypeName(x) FROM decimal WHERE a < 0; +SELECT quantileExactWeighted(0.0)(a, 1), quantileExactWeighted(0.0)(b, 2), quantileExactWeighted(0.0)(c, 3) FROM decimal WHERE a >= 0; +SELECT quantileExactWeighted(0.2)(a, 1), quantileExactWeighted(0.2)(b, 2), quantileExactWeighted(0.2)(c, 3) FROM decimal WHERE a >= 0; +SELECT quantileExactWeighted(0.4)(a, 1), quantileExactWeighted(0.4)(b, 2), quantileExactWeighted(0.4)(c, 3) FROM decimal WHERE a >= 0; +SELECT quantileExactWeighted(0.6)(a, 1), quantileExactWeighted(0.6)(b, 2), quantileExactWeighted(0.6)(c, 3) FROM decimal WHERE a >= 0; +SELECT quantileExactWeighted(0.8)(a, 1), quantileExactWeighted(0.8)(b, 2), quantileExactWeighted(0.8)(c, 3) FROM decimal WHERE a >= 0; +SELECT quantileExactWeighted(1.0)(a, 1), quantileExactWeighted(1.0)(b, 2), quantileExactWeighted(1.0)(c, 3) FROM decimal WHERE a >= 0; +SELECT quantilesExactWeighted(0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)(a, 1) FROM decimal; +SELECT quantilesExactWeighted(0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)(b, 2) FROM decimal; +SELECT quantilesExactWeighted(0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)(c, 3) FROM decimal; + +SELECT varPop(a) AS va, varPop(b) AS vb, varPop(c) AS vc, toTypeName(va), toTypeName(vb), toTypeName(vc) FROM decimal; +SELECT varPop(toFloat64(a)), varPop(toFloat64(b)), varPop(toFloat64(c)) FROM decimal; +SELECT varSamp(a) AS va, varSamp(b) AS vb, varSamp(c) AS vc, toTypeName(va), toTypeName(vb), toTypeName(vc) FROM decimal; +SELECT varSamp(toFloat64(a)), varSamp(toFloat64(b)), varSamp(toFloat64(c)) FROM decimal; + +SELECT stddevPop(a) AS da, stddevPop(b) AS db, stddevPop(c) AS dc, toTypeName(da), toTypeName(db), toTypeName(dc) FROM decimal; +SELECT stddevPop(toFloat64(a)), stddevPop(toFloat64(b)), stddevPop(toFloat64(c)) FROM decimal; +SELECT stddevSamp(a) AS da, stddevSamp(b) AS db, stddevSamp(c) AS dc, toTypeName(da), toTypeName(db), toTypeName(dc) FROM decimal; +SELECT stddevSamp(toFloat64(a)), stddevSamp(toFloat64(b)), stddevSamp(toFloat64(c)) FROM decimal; + +SELECT covarPop(a, a), covarPop(b, b), covarPop(c, c) FROM decimal; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT covarSamp(a, a), covarSamp(b, b), covarSamp(c, c) FROM decimal; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT corr(a, a), corr(b, b), corr(c, c) FROM decimal; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT 1 LIMIT 0; + +DROP TABLE decimal; + +-- TODO: sumMap +-- TODO: other quantile(s) +-- TODO: groupArray, groupArrayInsertAt, groupUniqArray +-- TODO: topK diff --git a/parser/testdata/00700_decimal_arithm/ast.json b/parser/testdata/00700_decimal_arithm/ast.json new file mode 100644 index 000000000..e4f055505 --- /dev/null +++ b/parser/testdata/00700_decimal_arithm/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery decimal (children 1)" + }, + { + "explain": " Identifier decimal" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00113477, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/00700_decimal_arithm/metadata.json b/parser/testdata/00700_decimal_arithm/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00700_decimal_arithm/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00700_decimal_arithm/query.sql b/parser/testdata/00700_decimal_arithm/query.sql new file mode 100644 index 000000000..8eaed345a --- /dev/null +++ b/parser/testdata/00700_decimal_arithm/query.sql @@ -0,0 +1,95 @@ +DROP TABLE IF EXISTS decimal; + +CREATE TABLE IF NOT EXISTS decimal +( + a DECIMAL(9,0), + b DECIMAL(18,0), + c DECIMAL(38,0), + d DECIMAL(9, 9), + e DEC(18, 18), + f dec(38, 38), + g Decimal(9, 3), + h decimal(18, 9), + i deciMAL(38, 18), + j dec(4, 2), + k NumEriC(23, 4), + l numeric(9, 3), + m NUMEric(18, 9), + n FixED(12, 6), + o fixed(8, 6) +) ENGINE = Memory; + +INSERT INTO decimal (a, b, c, d, e, f, g, h, i, j, k, l, m, n, o) VALUES (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); +INSERT INTO decimal (a, b, c, d, e, f, g, h, i, j, k, l, m, n, o) VALUES (42, 42, 42, 0.42, 0.42, 0.42, 42.42, 42.42, 42.42, 42.42, 42.42, 42.42, 42.42, 42.42, 42.42); +INSERT INTO decimal (a, b, c, d, e, f, g, h, i, j, k, l, m, n, o) VALUES (-42, -42, -42, -0.42, -0.42, -0.42, -42.42, -42.42, -42.42, -42.42, -42.42, -42.42, -42.42, -42.42, -42.42); + +SELECT a + a, a - a, a * a, a / a, intDiv(a, a), intDivOrZero(a, a) FROM decimal WHERE a = 42; +SELECT b + b, b - b, b * b, b / b, intDiv(b, b), intDivOrZero(b, b) FROM decimal WHERE b = 42; +SELECT c + c, c - c, c * c, c / c, intDiv(c, c), intDivOrZero(c, c) FROM decimal WHERE c = 42; +SELECT e + e, e - e, e * e, e / e, intDiv(e, e), intDivOrZero(e, e) FROM decimal WHERE e > 0; -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT f + f, f - f, f * f, f / f, intDiv(f, f), intDivOrZero(f, f) FROM decimal WHERE f > 0; -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT g + g, g - g, g * g, g / g, intDiv(g, g), intDivOrZero(g, g) FROM decimal WHERE g > 0; +SELECT h + h, h - h, h * h, h / h, intDiv(h, h), intDivOrZero(h, h) FROM decimal WHERE h > 0; -- { serverError DECIMAL_OVERFLOW } +SELECT h + h, h - h FROM decimal WHERE h > 0; +SELECT i + i, i - i, i * i, i / i, intDiv(i, i), intDivOrZero(i, i) FROM decimal WHERE i > 0; +SELECT i + i, i - i FROM decimal WHERE i > 0; +SELECT j + j, j - j, j * j, j / j, intDiv(j, j), intDivOrZero(j, j) FROM decimal WHERE j > 0; + +SELECT a + 21, a - 21, a - 84, a * 21, a * -21, a / 21, a / 84, intDiv(a, 21), intDivOrZero(a, 84) FROM decimal WHERE a = 42; +SELECT b + 21, b - 21, b - 84, b * 21, b * -21, b / 21, b / 84, intDiv(b, 21), intDivOrZero(b, 84) FROM decimal WHERE b = 42; +SELECT c + 21, c - 21, c - 84, c * 21, c * -21, c / 21, c / 84, intDiv(c, 21), intDivOrZero(c, 84) FROM decimal WHERE c = 42; +SELECT e + 21, e - 21, e - 84, e * 21, e * -21, e / 21, e / 84 FROM decimal WHERE e > 0; -- { serverError DECIMAL_OVERFLOW } +SELECT f + 21, f - 21, f - 84, f * 21, f * -21, f / 21, f / 84 FROM decimal WHERE f > 0; +SELECT g + 21, g - 21, g - 84, g * 21, g * -21, g / 21, g / 84, intDiv(g, 21), intDivOrZero(g, 84) FROM decimal WHERE g > 0; +SELECT h + 21, h - 21, h - 84, h * 21, h * -21, h / 21, h / 84, intDiv(h, 21), intDivOrZero(h, 84) FROM decimal WHERE h > 0; +SELECT i + 21, i - 21, i - 84, i * 21, i * -21, i / 21, i / 84, intDiv(i, 21), intDivOrZero(i, 84) FROM decimal WHERE i > 0; +SELECT j + 21, j - 21, j - 84, j * 21, j * -21, j / 21, j / 84, intDiv(j, 21), intDivOrZero(j, 84) FROM decimal WHERE j > 0; + +SELECT 21 + a, 21 - a, 84 - a, 21 * a, -21 * a, 21 / a, 84 / a, intDiv(21, a), intDivOrZero(84, a) FROM decimal WHERE a = 42; +SELECT 21 + b, 21 - b, 84 - b, 21 * b, -21 * b, 21 / b, 84 / b, intDiv(21, b), intDivOrZero(84, b) FROM decimal WHERE b = 42; +SELECT 21 + c, 21 - c, 84 - c, 21 * c, -21 * c, 21 / c, 84 / c, intDiv(21, c), intDivOrZero(84, c) FROM decimal WHERE c = 42; +SELECT 21 + e, 21 - e, 84 - e, 21 * e, -21 * e, 21 / e, 84 / e FROM decimal WHERE e > 0; -- { serverError DECIMAL_OVERFLOW } +SELECT 21 + f, 21 - f, 84 - f, 21 * f, -21 * f, 21 / f, 84 / f FROM decimal WHERE f > 0; +SELECT 21 + g, 21 - g, 84 - g, 21 * g, -21 * g, 21 / g, 84 / g, intDiv(21, g), intDivOrZero(84, g) FROM decimal WHERE g > 0; +SELECT 21 + h, 21 - h, 84 - h, 21 * h, -21 * h, 21 / h, 84 / h FROM decimal WHERE h > 0; -- { serverError DECIMAL_OVERFLOW } +SELECT 21 + h, 21 - h, 84 - h, 21 * h, -21 * h FROM decimal WHERE h > 0; +SELECT 21 + i, 21 - i, 84 - i, 21 * i, -21 * i, 21 / i, 84 / i, intDiv(21, i), intDivOrZero(84, i) FROM decimal WHERE i > 0; +SELECT 21 + j, 21 - j, 84 - j, 21 * j, -21 * j, 21 / j, 84 / j, intDiv(21, j), intDivOrZero(84, j) FROM decimal WHERE j > 0; + +SELECT a, -a, -b, -c, -d, -e, -f, -g, -h, -j from decimal ORDER BY a; +SELECT abs(a), abs(b), abs(c), abs(d), abs(e), abs(f), abs(g), abs(h), abs(j) from decimal ORDER BY a; + +SET decimal_check_overflow = 0; + +SELECT (h * h) != 0, (h / h) != 1 FROM decimal WHERE h > 0; +SELECT (i * i) != 0, (i / i) = 1 FROM decimal WHERE i > 0; + +SELECT e + 1 > e, e + 10 > e, 1 + e > e, 10 + e > e FROM decimal WHERE e > 0; +SELECT f + 1 > f, f + 10 > f, 1 + f > f, 10 + f > f FROM decimal WHERE f > 0; + +SELECT 1 / toDecimal32(0, 0); -- { serverError ILLEGAL_DIVISION } +SELECT 1 / toDecimal64(0, 1); -- { serverError ILLEGAL_DIVISION } +SELECT 1 / toDecimal128(0, 2); -- { serverError ILLEGAL_DIVISION } +SELECT 0 / toDecimal32(0, 3); -- { serverError ILLEGAL_DIVISION } +SELECT 0 / toDecimal64(0, 4); -- { serverError ILLEGAL_DIVISION } +SELECT 0 / toDecimal128(0, 5); -- { serverError ILLEGAL_DIVISION } + +SELECT toDecimal32(0, 0) / toInt8(0); -- { serverError ILLEGAL_DIVISION } +SELECT toDecimal64(0, 1) / toInt32(0); -- { serverError ILLEGAL_DIVISION } +SELECT toDecimal128(0, 2) / toInt64(0); -- { serverError ILLEGAL_DIVISION } + +SELECT toDecimal32(0, 4) AS x, multiIf(x = 0, NULL, intDivOrZero(1, x)), multiIf(x = 0, NULL, intDivOrZero(x, 0)); +SELECT toDecimal64(0, 8) AS x, multiIf(x = 0, NULL, intDivOrZero(1, x)), multiIf(x = 0, NULL, intDivOrZero(x, 0)); +SELECT toDecimal64(0, 18) AS x, multiIf(x = 0, NULL, intDivOrZero(1, x)), multiIf(x = 0, NULL, intDivOrZero(x, 0)); + +-- { echoOn } +SELECT toDecimal128(1, 38) / toDecimal128(1, 0) SETTINGS decimal_check_overflow=1; +SELECT toDecimal128(1, 38) / toDecimal128(1, 1) SETTINGS decimal_check_overflow=1; -- { serverError DECIMAL_OVERFLOW } +SELECT toDecimal128(1, 38) / toDecimal128(1, 1) SETTINGS decimal_check_overflow=0; +SELECT toDecimal128(1, 37) / toDecimal128(1, 1) SETTINGS decimal_check_overflow=1; + +SELECT toDecimal128(1, 19) / toDecimal128(1, 19) SETTINGS decimal_check_overflow=1; +SELECT toDecimal128(1, 20) / toDecimal128(1, 19) SETTINGS decimal_check_overflow=1; -- { serverError DECIMAL_OVERFLOW } +-- { echoOff } + +DROP TABLE IF EXISTS decimal; diff --git a/parser/testdata/00700_decimal_array_functions/ast.json b/parser/testdata/00700_decimal_array_functions/ast.json new file mode 100644 index 000000000..ee076193c --- /dev/null +++ b/parser/testdata/00700_decimal_array_functions/ast.json @@ -0,0 +1,79 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function arrayDifference (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDecimal32 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Float64_0" + }, + { + "explain": " Literal UInt64_4" + }, + { + "explain": " Function toDecimal32 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Float64_1" + }, + { + "explain": " Literal UInt64_4" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + } + ], + + "rows": 19, + + "statistics": + { + "elapsed": 0.001002594, + "rows_read": 19, + "bytes_read": 765 + } +} diff --git a/parser/testdata/00700_decimal_array_functions/metadata.json b/parser/testdata/00700_decimal_array_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00700_decimal_array_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00700_decimal_array_functions/query.sql b/parser/testdata/00700_decimal_array_functions/query.sql new file mode 100644 index 000000000..b21d94cf2 --- /dev/null +++ b/parser/testdata/00700_decimal_array_functions/query.sql @@ -0,0 +1,24 @@ +SELECT arrayDifference([toDecimal32(0.0,4), toDecimal32(1.0,4)]) x, toTypeName(x); +SELECT arrayDifference([toDecimal64(0.0,8), toDecimal64(1.0,8)]) x, toTypeName(x); +SELECT arrayDifference([toDecimal128(0.0,8), toDecimal128(1.0,8)]) x, toTypeName(x); +SELECT '-'; +SELECT arraySum([toDecimal32(0.0,4), toDecimal32(1.0,4)]) x, toTypeName(x); +SELECT arraySum([toDecimal64(0.0,8), toDecimal64(1.0,8)]) x, toTypeName(x); +SELECT arraySum([toDecimal128(0.0,8), toDecimal128(1.0,8)]) x, toTypeName(x); +SELECT '-'; +SELECT arrayCumSum([toDecimal32(1.0,4), toDecimal32(1.0,4)]) x, toTypeName(x); +SELECT arrayCumSum([toDecimal64(1.0,8), toDecimal64(1.0,8)]) x, toTypeName(x); +SELECT arrayCumSum([toDecimal128(1.0,8), toDecimal128(1.0,8)]) x, toTypeName(x); +SELECT '-'; +SELECT arrayCumSumNonNegative([toDecimal32(1.0,4), toDecimal32(1.0,4)]) x, toTypeName(x); +SELECT arrayCumSumNonNegative([toDecimal64(1.0,8), toDecimal64(1.0,8)]) x, toTypeName(x); +SELECT arrayCumSumNonNegative([toDecimal128(1.0,8), toDecimal128(1.0,8)]) x, toTypeName(x); +SELECT '-'; +SELECT arrayCompact([toDecimal32(1.0,4), toDecimal32(1.0,4)]) x, toTypeName(x); +SELECT arrayCompact([toDecimal64(1.0,8), toDecimal64(1.0,8)]) x, toTypeName(x); +SELECT arrayCompact([toDecimal128(1.0,8), toDecimal128(1.0,8)]) x, toTypeName(x); +SELECT '-'; +SELECT arrayRemove([toDecimal32(1.0,4), toDecimal32(2.0,4), toDecimal32(3.0,4)], toDecimal32(1.0,4)) x, toTypeName(x); +SELECT arrayRemove([toDecimal64(1.0,8), toDecimal64(2.0,8), toDecimal64(3.0,8)], toDecimal64(1.0,8)) x, toTypeName(x); +SELECT arrayRemove([toDecimal128(1.0,8), toDecimal128(2.0,8), toDecimal128(3.0,8)], toDecimal128(1.0,8)) x, toTypeName(x); +SELECT '-'; diff --git a/parser/testdata/00700_decimal_bounds/ast.json b/parser/testdata/00700_decimal_bounds/ast.json new file mode 100644 index 000000000..f787a4591 --- /dev/null +++ b/parser/testdata/00700_decimal_bounds/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery decimal (children 1)" + }, + { + "explain": " Identifier decimal" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001310927, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/00700_decimal_bounds/metadata.json b/parser/testdata/00700_decimal_bounds/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00700_decimal_bounds/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00700_decimal_bounds/query.sql b/parser/testdata/00700_decimal_bounds/query.sql new file mode 100644 index 000000000..8148c376f --- /dev/null +++ b/parser/testdata/00700_decimal_bounds/query.sql @@ -0,0 +1,97 @@ +DROP TABLE IF EXISTS decimal; + +CREATE TABLE IF NOT EXISTS decimal (x DECIMAL(10, -2)) ENGINE = Memory; -- { serverError ARGUMENT_OUT_OF_BOUND } +CREATE TABLE IF NOT EXISTS decimal (x DECIMAL(10, 15)) ENGINE = Memory; -- { serverError ARGUMENT_OUT_OF_BOUND } +CREATE TABLE IF NOT EXISTS decimal (x DECIMAL(0, 0)) ENGINE = Memory; -- { serverError ARGUMENT_OUT_OF_BOUND } + +CREATE TABLE IF NOT EXISTS decimal +( + a DECIMAL(9,0), + b DECIMAL(18,0), + c DECIMAL(38,0), + d DECIMAL(9, 9), + e DECIMAL(18, 18), + f DECIMAL(38, 38), + g Decimal(9, 5), + h decimal(18, 9), + i deciMAL(38, 18), + j DECIMAL(1,0) +) ENGINE = Memory; + +INSERT INTO decimal (a) VALUES (1000000000); -- { error ARGUMENT_OUT_OF_BOUND } +INSERT INTO decimal (a) VALUES (-1000000000); -- { error ARGUMENT_OUT_OF_BOUND } +INSERT INTO decimal (b) VALUES (1000000000000000000); -- { error ARGUMENT_OUT_OF_BOUND } +INSERT INTO decimal (b) VALUES (-1000000000000000000); -- { error ARGUMENT_OUT_OF_BOUND } +INSERT INTO decimal (c) VALUES (100000000000000000000000000000000000000); -- { error ARGUMENT_OUT_OF_BOUND } +INSERT INTO decimal (c) VALUES (-100000000000000000000000000000000000000); -- { error ARGUMENT_OUT_OF_BOUND } +INSERT INTO decimal (d) VALUES (1); -- { error ARGUMENT_OUT_OF_BOUND } +INSERT INTO decimal (d) VALUES (-1); -- { error ARGUMENT_OUT_OF_BOUND } +INSERT INTO decimal (e) VALUES (1000000000000000000); -- { error ARGUMENT_OUT_OF_BOUND } +INSERT INTO decimal (e) VALUES (-1000000000000000000); -- { error ARGUMENT_OUT_OF_BOUND } +INSERT INTO decimal (f) VALUES (1); -- { error ARGUMENT_OUT_OF_BOUND } +INSERT INTO decimal (f) VALUES (-1); -- { error ARGUMENT_OUT_OF_BOUND } +INSERT INTO decimal (g) VALUES (10000); -- { error ARGUMENT_OUT_OF_BOUND } +INSERT INTO decimal (g) VALUES (-10000); -- { error ARGUMENT_OUT_OF_BOUND } +INSERT INTO decimal (h) VALUES (1000000000); -- { error ARGUMENT_OUT_OF_BOUND } +INSERT INTO decimal (h) VALUES (-1000000000); -- { error ARGUMENT_OUT_OF_BOUND } +INSERT INTO decimal (i) VALUES (100000000000000000000); -- { error ARGUMENT_OUT_OF_BOUND } +INSERT INTO decimal (i) VALUES (-100000000000000000000); -- { error ARGUMENT_OUT_OF_BOUND } +INSERT INTO decimal (j) VALUES (10); -- { error ARGUMENT_OUT_OF_BOUND } +INSERT INTO decimal (j) VALUES (-10); -- { error ARGUMENT_OUT_OF_BOUND } + +INSERT INTO decimal (a) VALUES (0.1); +INSERT INTO decimal (a) VALUES (-0.1); +INSERT INTO decimal (b) VALUES (0.1); +INSERT INTO decimal (b) VALUES (-0.1); +INSERT INTO decimal (c) VALUES (0.1); +INSERT INTO decimal (c) VALUES (-0.1); +INSERT INTO decimal (d) VALUES (0.0000000001); +INSERT INTO decimal (d) VALUES (-0.0000000001); +INSERT INTO decimal (e) VALUES (0.0000000000000000001); +INSERT INTO decimal (e) VALUES (-0.0000000000000000001); +INSERT INTO decimal (f) VALUES (0.000000000000000000000000000000000000001); +INSERT INTO decimal (f) VALUES (-0.000000000000000000000000000000000000001); +INSERT INTO decimal (g) VALUES (0.000001); +INSERT INTO decimal (g) VALUES (-0.000001); +INSERT INTO decimal (h) VALUES (0.0000000001); +INSERT INTO decimal (h) VALUES (-0.0000000001); +INSERT INTO decimal (i) VALUES (0.0000000000000000001); +INSERT INTO decimal (i) VALUES (-0.0000000000000000001); +INSERT INTO decimal (j) VALUES (0.1); +INSERT INTO decimal (j) VALUES (-0.1); + +INSERT INTO decimal (a, b, d, g) VALUES (999999999, 999999999999999999, 0.999999999, 9999.99999); +INSERT INTO decimal (a, b, d, g) VALUES (-999999999, -999999999999999999, -0.999999999, -9999.99999); +INSERT INTO decimal (c) VALUES (99999999999999999999999999999999999999); +INSERT INTO decimal (c) VALUES (-99999999999999999999999999999999999999); +INSERT INTO decimal (f) VALUES (0.99999999999999999999999999999999999999); +INSERT INTO decimal (f) VALUES (-0.99999999999999999999999999999999999999); +INSERT INTO decimal (e, h) VALUES (0.999999999999999999, 999999999.999999999); +INSERT INTO decimal (e, h) VALUES (-0.999999999999999999, -999999999.999999999); +INSERT INTO decimal (i) VALUES (99999999999999999999.999999999999999999); +INSERT INTO decimal (i) VALUES (-99999999999999999999.999999999999999999); + +INSERT INTO decimal (a, b, c, d, g, j, h) VALUES (1, 1, 1, 0.000000001, 0.00001, 1, 0.000000001); +INSERT INTO decimal (a, b, c, d, g, j, h) VALUES (-1, -1, -1, -0.000000001, -0.00001, -1, -0.000000001); +INSERT INTO decimal (e, f) VALUES (0.000000000000000001, 0.00000000000000000000000000000000000001); +INSERT INTO decimal (e, f) VALUES (-0.000000000000000001, -0.00000000000000000000000000000000000001); +INSERT INTO decimal (i) VALUES (0.000000000000000001); +INSERT INTO decimal (i) VALUES (-0.000000000000000001); + +INSERT INTO decimal (a, b, c, d, e, f, g, h, i, j) VALUES (0, 0, 0, 0, 0, 0, 0, 0, 0, 0); +INSERT INTO decimal (a, b, c, d, e, f, g, h, i, j) VALUES (-0, -0, -0, -0, -0, -0, -0, -0, -0, -0); +INSERT INTO decimal (a, b, c, d, e, f, g, h, i, j) VALUES (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0); +INSERT INTO decimal (a, b, c, d, e, f, g, h, i, j) VALUES (-0.0, -0.0, -0.0, -0.0, -0.0, -0.0, -0.0, -0.0, -0.0, -0.0); + +INSERT INTO decimal (a, b, g) VALUES ('42.00000', 42.0000000000000000000000000000000, '0.999990'); +INSERT INTO decimal (a) VALUES ('-9x'); -- { error CANNOT_PARSE_TEXT } +INSERT INTO decimal (a) VALUES ('0x1'); -- { error CANNOT_PARSE_TEXT } + +INSERT INTO decimal (a, b, c, d, e, f) VALUES ('0.9e9', '0.9e18', '0.9e38', '9e-9', '9e-18', '9e-38'); +INSERT INTO decimal (a, b, c, d, e, f) VALUES ('-0.9e9', '-0.9e18', '-0.9e38', '-9e-9', '-9e-18', '-9e-38'); + +INSERT INTO decimal (a, b, c, d, e, f) VALUES ('1e9', '1e18', '1e38', '1e-10', '1e-19', '1e-39'); -- { error ARGUMENT_OUT_OF_BOUND } +INSERT INTO decimal (a, b, c, d, e, f) VALUES ('-1e9', '-1e18', '-1e38', '-1e-10', '-1e-19', '-1e-39'); -- { error ARGUMENT_OUT_OF_BOUND } + +SELECT * FROM decimal ORDER BY a, b, c, d, e, f, g, h, i, j; +DROP TABLE IF EXISTS decimal; diff --git a/parser/testdata/00700_decimal_casts/ast.json b/parser/testdata/00700_decimal_casts/ast.json new file mode 100644 index 000000000..fe8ce3b80 --- /dev/null +++ b/parser/testdata/00700_decimal_casts/ast.json @@ -0,0 +1,70 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function toDecimal32 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '1.1'" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function toDecimal32 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '1.1'" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function toDecimal32 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '1.1'" + }, + { + "explain": " Literal UInt64_8" + } + ], + + "rows": 16, + + "statistics": + { + "elapsed": 0.001169272, + "rows_read": 16, + "bytes_read": 575 + } +} diff --git a/parser/testdata/00700_decimal_casts/metadata.json b/parser/testdata/00700_decimal_casts/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00700_decimal_casts/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00700_decimal_casts/query.sql b/parser/testdata/00700_decimal_casts/query.sql new file mode 100644 index 000000000..49f486bf9 --- /dev/null +++ b/parser/testdata/00700_decimal_casts/query.sql @@ -0,0 +1,148 @@ +SELECT toDecimal32('1.1', 1), toDecimal32('1.1', 2), toDecimal32('1.1', 8); +SELECT toDecimal32('1.1', 0); +SELECT toDecimal32(1.1, 0), toDecimal32(1.1, 1), toDecimal32(1.1, 2), toDecimal32(1.1, 8); + +SELECT '1000000000' AS x, toDecimal32(x, 0); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT '-1000000000' AS x, toDecimal32(x, 0); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT '1000000000000000000' AS x, toDecimal64(x, 0); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT '-1000000000000000000' AS x, toDecimal64(x, 0); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT '100000000000000000000000000000000000000' AS x, toDecimal128(x, 0); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT '-100000000000000000000000000000000000000' AS x, toDecimal128(x, 0); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT '1' AS x, toDecimal32(x, 9); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT '-1' AS x, toDecimal32(x, 9); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT '1' AS x, toDecimal64(x, 18); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT '-1' AS x, toDecimal64(x, 18); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT '1' AS x, toDecimal128(x, 38); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT '-1' AS x, toDecimal128(x, 38); -- { serverError ARGUMENT_OUT_OF_BOUND } + +SELECT '0.1' AS x, toDecimal32(x, 0); +SELECT '-0.1' AS x, toDecimal32(x, 0); +SELECT '0.1' AS x, toDecimal64(x, 0); +SELECT '-0.1' AS x, toDecimal64(x, 0); +SELECT '0.1' AS x, toDecimal128(x, 0); +SELECT '-0.1' AS x, toDecimal128(x, 0); +SELECT '0.0000000001' AS x, toDecimal32(x, 9); +SELECT '-0.0000000001' AS x, toDecimal32(x, 9); +SELECT '0.0000000000000000001' AS x, toDecimal64(x, 18); +SELECT '-0.0000000000000000001' AS x, toDecimal64(x, 18); +SELECT '0.000000000000000000000000000000000000001' AS x, toDecimal128(x, 38); +SELECT '-0.000000000000000000000000000000000000001' AS x, toDecimal128(x, 38); + +SELECT '1e9' AS x, toDecimal32(x, 0); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT '-1E9' AS x, toDecimal32(x, 0); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT '1E18' AS x, toDecimal64(x, 0); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT '-1e18' AS x, toDecimal64(x, 0); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT '1e38' AS x, toDecimal128(x, 0); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT '-1E38' AS x, toDecimal128(x, 0); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT '1e0' AS x, toDecimal32(x, 9); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT '-1e-0' AS x, toDecimal32(x, 9); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT '1e0' AS x, toDecimal64(x, 18); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT '-1e-0' AS x, toDecimal64(x, 18); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT '1e-0' AS x, toDecimal128(x, 38); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT '-1e0' AS x, toDecimal128(x, 38); -- { serverError ARGUMENT_OUT_OF_BOUND } + +SELECT '1e-1' AS x, toDecimal32(x, 0); +SELECT '-1e-1' AS x, toDecimal32(x, 0); +SELECT '1e-1' AS x, toDecimal64(x, 0); +SELECT '-1e-1' AS x, toDecimal64(x, 0); +SELECT '1e-1' AS x, toDecimal128(x, 0); +SELECT '-1e-1' AS x, toDecimal128(x, 0); +SELECT '1e-10' AS x, toDecimal32(x, 9); +SELECT '-1e-10' AS x, toDecimal32(x, 9); +SELECT '1e-19' AS x, toDecimal64(x, 18); +SELECT '-1e-19' AS x, toDecimal64(x, 18); +SELECT '1e-39' AS x, toDecimal128(x, 38); +SELECT '-1e-39' AS x, toDecimal128(x, 38); + +SELECT toFloat32(9999999) as x, toDecimal32(x, 0), toDecimal32(-x, 0), toDecimal64(x, 0), toDecimal64(-x, 0); +SELECT toFloat32(999999.9) as x, toDecimal32(x, 1), toDecimal32(-x, 1), toDecimal64(x, 1), toDecimal64(-x, 1); +SELECT toFloat32(99999.99) as x, toDecimal32(x, 2), toDecimal32(-x, 2), toDecimal64(x, 2), toDecimal64(-x, 2); +SELECT toFloat32(9999.999) as x, toDecimal32(x, 3), toDecimal32(-x, 3), toDecimal64(x, 3), toDecimal64(-x, 3); +SELECT toFloat32(999.9999) as x, toDecimal32(x, 4), toDecimal32(-x, 4), toDecimal64(x, 4), toDecimal64(-x, 4); +SELECT toFloat32(99.99999) as x, toDecimal32(x, 5), toDecimal32(-x, 5), toDecimal64(x, 5), toDecimal64(-x, 5); +SELECT toFloat32(9.999999) as x, toDecimal32(x, 6), toDecimal32(-x, 6), toDecimal64(x, 6), toDecimal64(-x, 6); +SELECT toFloat32(0.9999999) as x, toDecimal32(x, 7), toDecimal32(-x, 7), toDecimal64(x, 7), toDecimal64(-x, 7); + +SELECT toFloat32(9.99999999) as x, toDecimal32(x, 8), toDecimal32(-x, 8), toDecimal64(x, 8), toDecimal64(-x, 8); +SELECT toFloat32(0.999999999) as x, toDecimal32(x, 9), toDecimal32(-x, 9), toDecimal64(x, 9), toDecimal64(-x, 9); + +SELECT toFloat64(999999999) as x, toDecimal32(x, 0), toDecimal32(-x, 0), toDecimal64(x, 0), toDecimal64(-x, 0); +SELECT toFloat64(99999999.9) as x, toDecimal32(x, 1), toDecimal32(-x, 1), toDecimal64(x, 1), toDecimal64(-x, 1); +SELECT toFloat64(9999999.99) as x, toDecimal32(x, 2), toDecimal32(-x, 2), toDecimal64(x, 2), toDecimal64(-x, 2); +SELECT toFloat64(999999.999) as x, toDecimal32(x, 3), toDecimal32(-x, 3), toDecimal64(x, 3), toDecimal64(-x, 3); +SELECT toFloat64(99999.9999) as x, toDecimal32(x, 4), toDecimal32(-x, 4), toDecimal64(x, 4), toDecimal64(-x, 4); +SELECT toFloat64(9999.99999) as x, toDecimal32(x, 5), toDecimal32(-x, 5), toDecimal64(x, 5), toDecimal64(-x, 5); +SELECT toFloat64(999.999999) as x, toDecimal32(x, 6), toDecimal32(-x, 6), toDecimal64(x, 6), toDecimal64(-x, 6); +SELECT toFloat64(99.9999999) as x, toDecimal32(x, 7), toDecimal32(-x, 7), toDecimal64(x, 7), toDecimal64(-x, 7); +SELECT toFloat64(9.99999999) as x, toDecimal32(x, 8), toDecimal32(-x, 8), toDecimal64(x, 8), toDecimal64(-x, 8); +SELECT toFloat64(0.999999999) as x, toDecimal32(x, 9), toDecimal32(-x, 9), toDecimal64(x, 9), toDecimal64(-x, 9); + +SELECT toFloat64(999999999.999999999) as x, toDecimal64(x, 9), toDecimal64(-x, 9); +SELECT toFloat64(99999999.9999999999) as x, toDecimal64(x, 10), toDecimal64(-x, 10); +SELECT toFloat64(9999999.99999999999) as x, toDecimal64(x, 11), toDecimal64(-x, 11); +SELECT toFloat64(999999.999999999999) as x, toDecimal64(x, 12), toDecimal64(-x, 12); +SELECT toFloat64(99999.9999999999999) as x, toDecimal64(x, 13), toDecimal64(-x, 13); +SELECT toFloat64(9999.99999999999999) as x, toDecimal64(x, 14), toDecimal64(-x, 14); +SELECT toFloat64(999.999999999999999) as x, toDecimal64(x, 15), toDecimal64(-x, 15); +SELECT toFloat64(99.9999999999999999) as x, toDecimal64(x, 16), toDecimal64(-x, 16); +SELECT toFloat64(9.99999999999999999) as x, toDecimal64(x, 17), toDecimal64(-x, 17); +SELECT toFloat64(0.999999999999999999) as x, toDecimal64(x, 18), toDecimal64(-x, 18); + +SELECT toFloat64(999999999999999999) as x, toDecimal128(x, 0), toDecimal128(-x, 0); +SELECT toFloat64(99999999999999999.9) as x, toDecimal128(x, 1), toDecimal128(-x, 1); +SELECT toFloat64(9999999999999999.99) as x, toDecimal128(x, 2), toDecimal128(-x, 2); +SELECT toFloat64(999999999999999.999) as x, toDecimal128(x, 3), toDecimal128(-x, 3); +SELECT toFloat64(99999999999999.9999) as x, toDecimal128(x, 4), toDecimal128(-x, 4); +SELECT toFloat64(9999999999999.99999) as x, toDecimal128(x, 5), toDecimal128(-x, 5); +SELECT toFloat64(999999999999.999999) as x, toDecimal128(x, 6), toDecimal128(-x, 6); +SELECT toFloat64(99999999999.9999999) as x, toDecimal128(x, 7), toDecimal128(-x, 7); +SELECT toFloat64(9999999999.99999999) as x, toDecimal128(x, 8), toDecimal128(-x, 8); +SELECT toFloat64(999999999.999999999) as x, toDecimal128(x, 9), toDecimal128(-x, 9); +SELECT toFloat64(999999999.999999999) as x, toDecimal128(x, 9), toDecimal128(-x, 9); +SELECT toFloat64(99999999.9999999999) as x, toDecimal128(x, 10), toDecimal128(-x, 10); +SELECT toFloat64(9999999.99999999999) as x, toDecimal128(x, 11), toDecimal128(-x, 11); +SELECT toFloat64(999999.999999999999) as x, toDecimal128(x, 12), toDecimal128(-x, 12); +SELECT toFloat64(99999.9999999999999) as x, toDecimal128(x, 13), toDecimal128(-x, 13); +SELECT toFloat64(9999.99999999999999) as x, toDecimal128(x, 14), toDecimal128(-x, 14); +SELECT toFloat64(999.999999999999999) as x, toDecimal128(x, 15), toDecimal128(-x, 15); +SELECT toFloat64(99.9999999999999999) as x, toDecimal128(x, 16), toDecimal128(-x, 16); +SELECT toFloat64(9.99999999999999999) as x, toDecimal128(x, 17), toDecimal128(-x, 17); +SELECT toFloat64(0.999999999999999999) as x, toDecimal128(x, 18), toDecimal128(-x, 18); + +SELECT toDecimal32(number, 4) as n1, toDecimal32(n1 / 9, 2) as n2, toDecimal32(n2, 8) FROM system.numbers LIMIT 10; +SELECT toDecimal32(number, 4) as n1, toDecimal32(n1 / 9, 8) as n2, toDecimal32(n2, 2) FROM system.numbers LIMIT 10; +SELECT toDecimal32(number, 8) as n1, toDecimal32(n1 / 9, 4) as n2, toDecimal32(n2, 2) FROM system.numbers LIMIT 10; + +SELECT toDecimal64(number, 4) as n1, toDecimal64(n1 / 9, 2) as n2, toDecimal64(n2, 8) FROM system.numbers LIMIT 10; +SELECT toDecimal64(number, 4) as n1, toDecimal64(n1 / 9, 8) as n2, toDecimal64(n2, 2) FROM system.numbers LIMIT 10; +SELECT toDecimal64(number, 8) as n1, toDecimal64(n1 / 9, 4) as n2, toDecimal64(n2, 2) FROM system.numbers LIMIT 10; + +SELECT toInt8(99) as x, toDecimal32(x, 0), toDecimal32(-x, 0), toDecimal64(x, 0), toDecimal64(-x, 0); +SELECT toInt16(9999) as x, toDecimal32(x, 0), toDecimal32(-x, 0), toDecimal64(x, 0), toDecimal64(-x, 0); +SELECT toInt32(999999999) as x, toDecimal32(x, 0), toDecimal32(-x, 0), toDecimal64(x, 0), toDecimal64(-x, 0); +SELECT toInt64(999999999) as x, toDecimal32(x, 0), toDecimal32(-x, 0), toDecimal64(x, 0), toDecimal64(-x, 0); +SELECT toInt32(999999999) as x, toDecimal64(x, 9), toDecimal64(-x, 9), toDecimal128(x, 29), toDecimal128(-x, 29); +SELECT toInt64(999999999) as x, toDecimal64(x, 9), toDecimal64(-x, 9), toDecimal128(x, 29), toDecimal128(-x, 29); +SELECT toInt64(999999999999999999) as x, toDecimal64(x, 0), toDecimal64(-x, 0); +SELECT toInt64(999999999999999999) as x, toDecimal128(x, 0), toDecimal128(-x, 0); +SELECT toInt64(999999999999999999) as x, toDecimal128(x, 20), toDecimal128(-x, 20); + +SELECT toUInt8(99) as x, toDecimal32(x, 0), toDecimal64(x, 0); +SELECT toUInt16(9999) as x, toDecimal32(x, 0), toDecimal64(x, 0); +SELECT toUInt32(999999999) as x, toDecimal32(x, 0), toDecimal64(x, 0); +SELECT toUInt64(999999999) as x, toDecimal32(x, 0), toDecimal64(x, 0); + +SELECT CAST('42.4200', 'Decimal(9,2)') AS a, CAST(a, 'Decimal(9,2)'), CAST(a, 'Decimal(18, 2)'), CAST(a, 'Decimal(38, 2)'); +SELECT CAST('42.42', 'Decimal(9,2)') AS a, CAST(a, 'Decimal(9,7)'), CAST(a, 'Decimal(18, 16)'), CAST(a, 'Decimal(38, 36)'); + +SELECT CAST('123456789', 'Decimal(9,0)'), CAST('123456789123456789', 'Decimal(18,0)'); +SELECT CAST('12345678901234567890123456789012345678', 'Decimal(38,0)'); +SELECT CAST('123456789', 'Decimal(9,1)'); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT CAST('123456789123456789', 'Decimal(18,1)'); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT CAST('12345678901234567890123456789012345678', 'Decimal(38,1)'); -- { serverError ARGUMENT_OUT_OF_BOUND } + +SELECT CAST('0.123456789', 'Decimal(9,9)'), CAST('0.123456789123456789', 'Decimal(18,18)'); +SELECT CAST('0.12345678901234567890123456789012345678', 'Decimal(38,38)'); +SELECT CAST('0.123456789', 'Decimal(9,8)'); +SELECT CAST('0.123456789123456789', 'Decimal(18,17)'); +SELECT CAST('0.12345678901234567890123456789012345678', 'Decimal(38,37)'); diff --git a/parser/testdata/00700_decimal_casts_2/ast.json b/parser/testdata/00700_decimal_casts_2/ast.json new file mode 100644 index 000000000..07448a398 --- /dev/null +++ b/parser/testdata/00700_decimal_casts_2/ast.json @@ -0,0 +1,79 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function toDecimal128 (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '1234567890'" + }, + { + "explain": " Literal UInt64_28" + }, + { + "explain": " Function toDecimal128 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal UInt64_29" + }, + { + "explain": " Function toDecimal128 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDecimal128 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '1234567890'" + }, + { + "explain": " Literal UInt64_28" + }, + { + "explain": " Literal UInt64_29" + } + ], + + "rows": 19, + + "statistics": + { + "elapsed": 0.001328942, + "rows_read": 19, + "bytes_read": 729 + } +} diff --git a/parser/testdata/00700_decimal_casts_2/metadata.json b/parser/testdata/00700_decimal_casts_2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00700_decimal_casts_2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00700_decimal_casts_2/query.sql b/parser/testdata/00700_decimal_casts_2/query.sql new file mode 100644 index 000000000..2d3ace866 --- /dev/null +++ b/parser/testdata/00700_decimal_casts_2/query.sql @@ -0,0 +1,119 @@ +SELECT toDecimal128('1234567890', 28) AS x, toDecimal128(x, 29), toDecimal128(toDecimal128('1234567890', 28), 29); +SELECT toDecimal128(toDecimal128('1234567890', 28), 30); + +SELECT toDecimal64('1234567890', 8) AS x, toDecimal64(x, 9), toDecimal64(toDecimal64('1234567890', 8), 9); +SELECT toDecimal64(toDecimal64('1234567890', 8), 10); -- { serverError DECIMAL_OVERFLOW } + +SELECT toDecimal32('12345678', 1) AS x, toDecimal32(x, 2), toDecimal32(toDecimal32('12345678', 1), 2); +SELECT toDecimal32(toDecimal32('12345678', 1), 3); -- { serverError DECIMAL_OVERFLOW } + +SELECT toDecimal64(toDecimal64('92233720368547758.1', 1), 2); -- { serverError DECIMAL_OVERFLOW } +SELECT toDecimal64(toDecimal64('-92233720368547758.1', 1), 2); -- { serverError DECIMAL_OVERFLOW } + +SELECT toDecimal128('9223372036854775807', 6) AS x, toInt64(x), toInt64(-x); +SELECT toDecimal128('9223372036854775809', 6) AS x, toInt64(x); -- { serverError DECIMAL_OVERFLOW } +SELECT toDecimal128('9223372036854775809', 6) AS x, toInt64(-x); -- { serverError DECIMAL_OVERFLOW } +SELECT toDecimal64('922337203685477580', 0) * 10 AS x, toInt64(x), toInt64(-x); +SELECT toDecimal64(toDecimal64('92233720368547758.0', 1), 2) AS x, toInt64(x), toInt64(-x); + +SELECT toDecimal128('2147483647', 10) AS x, toInt32(x), toInt32(-x); +SELECT toDecimal128('2147483649', 10) AS x, toInt32(x), toInt32(-x); -- { serverError DECIMAL_OVERFLOW } +SELECT toDecimal64('2147483647', 2) AS x, toInt32(x), toInt32(-x); +SELECT toDecimal64('2147483649', 2) AS x, toInt32(x), toInt32(-x); -- { serverError DECIMAL_OVERFLOW } + +SELECT toDecimal128('92233720368547757.99', 2) AS x, toInt64(x), toInt64(-x); +SELECT toDecimal64('2147483640.99', 2) AS x, toInt32(x), toInt32(-x); + +SELECT toDecimal128('-0.9', 8) AS x, toUInt64(x); +SELECT toDecimal64('-0.9', 8) AS x, toUInt64(x); +SELECT toDecimal32('-0.9', 8) AS x, toUInt64(x); + +SELECT toDecimal128('-0.8', 4) AS x, toUInt32(x); +SELECT toDecimal64('-0.8', 4) AS x, toUInt32(x); +SELECT toDecimal32('-0.8', 4) AS x, toUInt32(x); + +SELECT toDecimal128('-0.7', 2) AS x, toUInt16(x); +SELECT toDecimal64('-0.7', 2) AS x, toUInt16(x); +SELECT toDecimal32('-0.7', 2) AS x, toUInt16(x); + +SELECT toDecimal128('-0.6', 6) AS x, toUInt8(x); +SELECT toDecimal64('-0.6', 6) AS x, toUInt8(x); +SELECT toDecimal32('-0.6', 6) AS x, toUInt8(x); + +SELECT toDecimal128('-1', 7) AS x, toUInt64(x); -- { serverError DECIMAL_OVERFLOW } +SELECT toDecimal128('-1', 7) AS x, toUInt32(x); -- { serverError DECIMAL_OVERFLOW } +SELECT toDecimal128('-1', 7) AS x, toUInt16(x); -- { serverError DECIMAL_OVERFLOW } +SELECT toDecimal128('-1', 7) AS x, toUInt8(x); -- { serverError DECIMAL_OVERFLOW } + +SELECT toDecimal64('-1', 5) AS x, toUInt64(x); -- { serverError DECIMAL_OVERFLOW } +SELECT toDecimal64('-1', 5) AS x, toUInt32(x); -- { serverError DECIMAL_OVERFLOW } +SELECT toDecimal64('-1', 5) AS x, toUInt16(x); -- { serverError DECIMAL_OVERFLOW } +SELECT toDecimal64('-1', 5) AS x, toUInt8(x); -- { serverError DECIMAL_OVERFLOW } + +SELECT toDecimal32('-1', 3) AS x, toUInt64(x); -- { serverError DECIMAL_OVERFLOW } +SELECT toDecimal32('-1', 3) AS x, toUInt32(x); -- { serverError DECIMAL_OVERFLOW } +SELECT toDecimal32('-1', 3) AS x, toUInt16(x); -- { serverError DECIMAL_OVERFLOW } +SELECT toDecimal32('-1', 3) AS x, toUInt8(x); -- { serverError DECIMAL_OVERFLOW } + +SELECT toDecimal128('18446744073709551615', 0) AS x, toUInt64(x); +SELECT toDecimal128('18446744073709551616', 0) AS x, toUInt64(x); -- { serverError DECIMAL_OVERFLOW } +SELECT toDecimal128('18446744073709551615', 8) AS x, toUInt64(x); +SELECT toDecimal128('18446744073709551616', 8) AS x, toUInt64(x); -- { serverError DECIMAL_OVERFLOW } + +SELECT toDecimal128('4294967295', 0) AS x, toUInt32(x); +SELECT toDecimal128('4294967296', 0) AS x, toUInt32(x); -- { serverError DECIMAL_OVERFLOW } +SELECT toDecimal128('4294967295', 10) AS x, toUInt32(x); +SELECT toDecimal128('4294967296', 10) AS x, toUInt32(x); -- { serverError DECIMAL_OVERFLOW } +SELECT toDecimal64('4294967295', 0) AS x, toUInt32(x); +SELECT toDecimal64('4294967296', 0) AS x, toUInt32(x); -- { serverError DECIMAL_OVERFLOW } +SELECT toDecimal64('4294967295', 4) AS x, toUInt32(x); +SELECT toDecimal64('4294967296', 4) AS x, toUInt32(x); -- { serverError DECIMAL_OVERFLOW } + +SELECT toDecimal128('65535', 0) AS x, toUInt16(x); +SELECT toDecimal128('65536', 0) AS x, toUInt16(x); -- { serverError DECIMAL_OVERFLOW } +SELECT toDecimal128('65535', 10) AS x, toUInt16(x); +SELECT toDecimal128('65536', 10) AS x, toUInt16(x); -- { serverError DECIMAL_OVERFLOW } +SELECT toDecimal64('65535', 0) AS x, toUInt16(x); +SELECT toDecimal64('65536', 0) AS x, toUInt16(x); -- { serverError DECIMAL_OVERFLOW } +SELECT toDecimal64('65535', 4) AS x, toUInt16(x); +SELECT toDecimal64('65536', 4) AS x, toUInt16(x); -- { serverError DECIMAL_OVERFLOW } + +SELECT toInt64('2147483647') AS x, toDecimal32(x, 0); +SELECT toInt64('-2147483647') AS x, toDecimal32(x, 0); +SELECT toUInt64('2147483647') AS x, toDecimal32(x, 0); +SELECT toInt64('2147483649') AS x, toDecimal32(x, 0); -- { serverError DECIMAL_OVERFLOW } +SELECT toInt64('-2147483649') AS x, toDecimal32(x, 0); -- { serverError DECIMAL_OVERFLOW } +SELECT toUInt64('2147483649') AS x, toDecimal32(x, 0); -- { serverError DECIMAL_OVERFLOW } + +SELECT toUInt64('9223372036854775807') AS x, toDecimal64(x, 0); +SELECT toUInt64('9223372036854775809') AS x, toDecimal64(x, 0); -- { serverError DECIMAL_OVERFLOW } + +SELECT toDecimal32(0, rowNumberInBlock()); -- { serverError ILLEGAL_COLUMN } +SELECT toDecimal64(0, rowNumberInBlock()); -- { serverError ILLEGAL_COLUMN } +SELECT toDecimal128(0, rowNumberInBlock()); -- { serverError ILLEGAL_COLUMN } + +SELECT toDecimal32(1/0, 0); -- { serverError DECIMAL_OVERFLOW } +SELECT toDecimal64(1/0, 1); -- { serverError DECIMAL_OVERFLOW } +SELECT toDecimal128(0/0, 2); -- { serverError DECIMAL_OVERFLOW } +SELECT CAST(1/0, 'Decimal(9, 0)'); -- { serverError DECIMAL_OVERFLOW } +SELECT CAST(1/0, 'Decimal(18, 1)'); -- { serverError DECIMAL_OVERFLOW } +SELECT CAST(1/0, 'Decimal(38, 2)'); -- { serverError DECIMAL_OVERFLOW } +SELECT CAST(0/0, 'Decimal(9, 3)'); -- { serverError DECIMAL_OVERFLOW } +SELECT CAST(0/0, 'Decimal(18, 4)'); -- { serverError DECIMAL_OVERFLOW } +SELECT CAST(0/0, 'Decimal(38, 5)'); -- { serverError DECIMAL_OVERFLOW } + +select toDecimal32(10000.1, 6); -- { serverError DECIMAL_OVERFLOW } +select toDecimal64(10000.1, 18); -- { serverError DECIMAL_OVERFLOW } +select toDecimal128(1000000000000000000000.1, 18); -- { serverError DECIMAL_OVERFLOW } + +select toDecimal32(-10000.1, 6); -- { serverError DECIMAL_OVERFLOW } +select toDecimal64(-10000.1, 18); -- { serverError DECIMAL_OVERFLOW } +select toDecimal128(-1000000000000000000000.1, 18); -- { serverError DECIMAL_OVERFLOW } + +select toDecimal32(2147483647.0 + 1.0, 0); -- { serverError DECIMAL_OVERFLOW } +select toDecimal64(9223372036854775807.0, 0); -- { serverError DECIMAL_OVERFLOW } +select toDecimal128(170141183460469231731687303715884105729.0, 0); -- { serverError DECIMAL_OVERFLOW } + +select toDecimal32(-2147483647.0 - 1.0, 0); -- { serverError DECIMAL_OVERFLOW } +select toDecimal64(-9223372036854775807.0, 0); -- { serverError DECIMAL_OVERFLOW } +select toDecimal128(-170141183460469231731687303715884105729.0, 0); -- { serverError DECIMAL_OVERFLOW } diff --git a/parser/testdata/00700_decimal_compare/ast.json b/parser/testdata/00700_decimal_compare/ast.json new file mode 100644 index 000000000..e4f9e87d1 --- /dev/null +++ b/parser/testdata/00700_decimal_compare/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery decimal (children 1)" + }, + { + "explain": " Identifier decimal" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00099215, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/00700_decimal_compare/metadata.json b/parser/testdata/00700_decimal_compare/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00700_decimal_compare/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00700_decimal_compare/query.sql b/parser/testdata/00700_decimal_compare/query.sql new file mode 100644 index 000000000..1b901e04c --- /dev/null +++ b/parser/testdata/00700_decimal_compare/query.sql @@ -0,0 +1,68 @@ +DROP TABLE IF EXISTS decimal; + +CREATE TABLE IF NOT EXISTS decimal +( + a DECIMAL(9,0), + b DECIMAL(18,0), + c DECIMAL(38,0), + d DECIMAL(9, 9), + e Decimal64(18), + f Decimal128(38), + g Decimal32(5), + h Decimal64(9), + i Decimal128(18), + j dec(4,2) +) ENGINE = Memory; + +INSERT INTO decimal (a, b, c, d, e, f, g, h, i, j) VALUES (42, 42, 42, 0.42, 0.42, 0.42, 42.42, 42.42, 42.42, 42.42); +INSERT INTO decimal (a, b, c, d, e, f, g, h, i, j) VALUES (-42, -42, -42, -0.42, -0.42, -0.42, -42.42, -42.42, -42.42, -42.42); + +SELECT a > toFloat64(0) FROM decimal ORDER BY a; +SELECT g > toFloat32(0) FROM decimal ORDER BY g; +SELECT a > '0.0' FROM decimal ORDER BY a; + +SELECT a, b, a = b, a < b, a > b, a != b, a <= b, a >= b FROM decimal ORDER BY a; +SELECT a, g, a = g, a < g, a > g, a != g, a <= g, a >= g FROM decimal ORDER BY a; +SELECT a > 0, b > 0, g > 0 FROM decimal ORDER BY a DESC; +SELECT a, g > toInt8(0), g > toInt16(0), g > toInt32(0), g > toInt64(0) FROM decimal ORDER BY a; +SELECT a, g > toUInt8(0), g > toUInt16(0), g > toUInt32(0), g > toUInt64(0) FROM decimal ORDER BY a; +SELECT a, b, g FROM decimal WHERE a IN(42) AND b IN(42) AND g IN(42); +SELECT a, b, g FROM decimal WHERE a IN(42) AND b IN(42) AND g IN(42) SETTINGS enable_analyzer = 1; +SELECT a, b, g FROM decimal WHERE a > 0 AND a <= 42 AND b <= 42 AND g <= 42; + +SELECT d, e, f from decimal WHERE d > 0 AND d < 1 AND e > 0 AND e < 1 AND f > 0 AND f < 1; +SELECT j, h, i, j from decimal WHERE j > 42 AND h > 42 AND h > 42 AND j > 42; +SELECT j, h, i, j from decimal WHERE j < 42 AND h < 42 AND h < 42 AND j < 42; +SELECT a, b, c FROM decimal WHERE a = toInt8(42) AND b = toInt8(42) AND c = toInt8(42); +SELECT a, b, c FROM decimal WHERE a = toInt16(42) AND b = toInt16(42) AND c = toInt16(42); +SELECT a, b, c FROM decimal WHERE a = toInt32(42) AND b = toInt32(42) AND c = toInt32(42); +SELECT a, b, c FROM decimal WHERE a = toInt64(42) AND b = toInt64(42) AND c = toInt64(42); +SELECT a, b, c FROM decimal WHERE a = toFloat32(42); +SELECT a, b, c FROM decimal WHERE a = toFloat64(42); + +SELECT least(a, b), least(a, g), greatest(a, b), greatest(a, g) FROM decimal ORDER BY a; +SELECT least(a, 0), least(b, 0), least(g, 0) FROM decimal ORDER BY a; +SELECT greatest(a, 0), greatest(b, 0), greatest(g, 0) FROM decimal ORDER BY a; + +SELECT (a, d, g) = (b, e, h), (a, d, g) != (b, e, h) FROM decimal ORDER BY a; +SELECT (a, d, g) = (c, f, i), (a, d, g) != (c, f, i) FROM decimal ORDER BY a; + +SELECT toUInt32(2147483648) AS x, a == x FROM decimal WHERE a = 42; -- { serverError DECIMAL_OVERFLOW } +SELECT toUInt64(2147483648) AS x, b == x, x == ((b - 42) + x) FROM decimal WHERE a = 42; +SELECT toUInt64(9223372036854775808) AS x, b == x FROM decimal WHERE a = 42; -- { serverError DECIMAL_OVERFLOW } +SELECT toUInt64(9223372036854775808) AS x, c == x, x == ((c - 42) + x) FROM decimal WHERE a = 42; + +SELECT g = 10000, (g - g + 10000) == 10000 FROM decimal WHERE a = 42; +SELECT 10000 = g, 10000 = (g - g + 10000) FROM decimal WHERE a = 42; +SELECT g = 30000 FROM decimal WHERE a = 42; -- { serverError DECIMAL_OVERFLOW } +SELECT 30000 = g FROM decimal WHERE a = 42; -- { serverError DECIMAL_OVERFLOW } +SELECT h = 30000, (h - g + 30000) = 30000 FROM decimal WHERE a = 42; +SELECT 30000 = h, 30000 = (h - g + 30000) FROM decimal WHERE a = 42; +SELECT h = 10000000000 FROM decimal WHERE a = 42; -- { serverError DECIMAL_OVERFLOW } +SELECT i = 10000000000, (i - g + 10000000000) = 10000000000 FROM decimal WHERE a = 42; +SELECT 10000000000 = i, 10000000000 = (i - g + 10000000000) FROM decimal WHERE a = 42; + +SELECT min(a), min(b), min(c), min(d), min(e), min(f), min(g), min(h), min(i), min(j) FROM decimal; +SELECT max(a), max(b), max(c), max(d), max(e), max(f), max(g), max(h), max(i), max(j) FROM decimal; + +DROP TABLE IF EXISTS decimal; diff --git a/parser/testdata/00700_decimal_complex_types/ast.json b/parser/testdata/00700_decimal_complex_types/ast.json new file mode 100644 index 000000000..9d1733b3e --- /dev/null +++ b/parser/testdata/00700_decimal_complex_types/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery decimal (children 1)" + }, + { + "explain": " Identifier decimal" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001078482, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/00700_decimal_complex_types/metadata.json b/parser/testdata/00700_decimal_complex_types/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00700_decimal_complex_types/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00700_decimal_complex_types/query.sql b/parser/testdata/00700_decimal_complex_types/query.sql new file mode 100644 index 000000000..979b7aaa2 --- /dev/null +++ b/parser/testdata/00700_decimal_complex_types/query.sql @@ -0,0 +1,173 @@ +DROP TABLE IF EXISTS decimal; + +CREATE TABLE decimal +( + a Array(Decimal32(3)), + b Array(Decimal64(3)), + c Array(Decimal128(3)), + nest Nested + ( + a Decimal(9,2), + b Decimal(18,2), + c Decimal(38,2) + ), + tup Tuple(Decimal32(1), Decimal64(1), Decimal128(1)) +) ENGINE = Memory; + +INSERT INTO decimal (a, b, c, nest.a, nest.b, nest.c, tup) + VALUES ([0.1, 0.2, 0.3], [0.4, 0.5, 0.6], [0.7, 0.8, 0.9], [1.1, 1.2], [2.1, 2.2], [3.1, 3.2], (9.1, 9.2, 9.3)); + +SELECT toTypeName(a), toTypeName(b), toTypeName(c) FROM decimal; +SELECT toTypeName(nest.a), toTypeName(nest.b), toTypeName(nest.c) FROM decimal; +SELECT toTypeName(a[1]), toTypeName(b[2]), toTypeName(c[3]) FROM decimal; +SELECT toTypeName(nest.a[1]), toTypeName(nest.b[1]), toTypeName(nest.c[1]) FROM decimal; +SELECT toTypeName(tup), toTypeName(tup.1), toTypeName(tup.2), toTypeName(tup.3) FROM decimal; + +SELECT arrayJoin(a) FROM decimal; +SELECT arrayJoin(b) FROM decimal; +SELECT arrayJoin(c) FROM decimal; + +SELECT tup, tup.1, tup.2, tup.3 FROM decimal; +SELECT a, arrayPopBack(a), arrayPopFront(a), arrayResize(a, 1), arraySlice(a, 2, 1) FROM decimal; +SELECT b, arrayPopBack(b), arrayPopFront(b), arrayResize(b, 1), arraySlice(b, 2, 1) FROM decimal; +SELECT c, arrayPopBack(c), arrayPopFront(c), arrayResize(c, 1), arraySlice(c, 2, 1) FROM decimal; +SELECT nest.a, arrayPopBack(nest.a), arrayPopFront(nest.a), arrayResize(nest.a, 1), arraySlice(nest.a, 2, 1) FROM decimal; +SELECT nest.b, arrayPopBack(nest.b), arrayPopFront(nest.b), arrayResize(nest.b, 1), arraySlice(nest.b, 2, 1) FROM decimal; +SELECT nest.c, arrayPopBack(nest.c), arrayPopFront(nest.c), arrayResize(nest.c, 1), arraySlice(nest.c, 2, 1) FROM decimal; +SELECT arrayPushBack(a, toDecimal32(0, 3)), arrayPushFront(a, toDecimal32(0, 3)) FROM decimal; +SELECT arrayPushBack(b, toDecimal64(0, 3)), arrayPushFront(b, toDecimal64(0, 3)) FROM decimal; +SELECT arrayPushBack(c, toDecimal128(0, 3)), arrayPushFront(c, toDecimal128(0, 3)) FROM decimal; + +SELECT arrayPushBack(a, toDecimal32(0, 2)) AS x, toTypeName(x) FROM decimal; +SELECT arrayPushBack(b, toDecimal64(0, 2)) AS x, toTypeName(x) FROM decimal; +SELECT arrayPushBack(c, toDecimal128(0, 2)) AS x, toTypeName(x) FROM decimal; +SELECT arrayPushFront(a, toDecimal32(0, 4)) AS x, toTypeName(x) FROM decimal; +SELECT arrayPushFront(b, toDecimal64(0, 4)) AS x, toTypeName(x) FROM decimal; +SELECT arrayPushFront(c, toDecimal128(0, 4)) AS x, toTypeName(x) FROM decimal; + +SELECT length(a), length(b), length(c) FROM decimal; +SELECT length(nest.a), length(nest.b), length(nest.c) FROM decimal; +SELECT empty(a), empty(b), empty(c) FROM decimal; +SELECT empty(nest.a), empty(nest.b), empty(nest.c) FROM decimal; +SELECT notEmpty(a), notEmpty(b), notEmpty(c) FROM decimal; +SELECT notEmpty(nest.a), notEmpty(nest.b), notEmpty(nest.c) FROM decimal; +SELECT arrayUniq(a), arrayUniq(b), arrayUniq(c) FROM decimal; +SELECT arrayUniq(nest.a), arrayUniq(nest.b), arrayUniq(nest.c) FROM decimal; + +SELECT has(a, toDecimal32(0.1, 3)), has(a, toDecimal32(1.0, 3)) FROM decimal; +SELECT has(b, toDecimal64(0.4, 3)), has(b, toDecimal64(1.0, 3)) FROM decimal; +SELECT has(c, toDecimal128(0.7, 3)), has(c, toDecimal128(1.0, 3)) FROM decimal; + +SELECT has(a, toDecimal32(0.1, 2)) FROM decimal; +SELECT has(a, toDecimal32(0.1, 4)) FROM decimal; +SELECT has(a, toDecimal64(0.1, 3)) FROM decimal; +SELECT has(a, toDecimal128(0.1, 3)) FROM decimal; +SELECT has(b, toDecimal32(0.4, 3)) FROM decimal; +SELECT has(b, toDecimal64(0.4, 2)) FROM decimal; +SELECT has(b, toDecimal64(0.4, 4)) FROM decimal; +SELECT has(b, toDecimal128(0.4, 3)) FROM decimal; +SELECT has(c, toDecimal32(0.7, 3)) FROM decimal; +SELECT has(c, toDecimal64(0.7, 3)) FROM decimal; +SELECT has(c, toDecimal128(0.7, 2)) FROM decimal; +SELECT has(c, toDecimal128(0.7, 4)) FROM decimal; + +SELECT indexOf(a, toDecimal32(0.1, 3)), indexOf(a, toDecimal32(1.0, 3)) FROM decimal; +SELECT indexOf(b, toDecimal64(0.5, 3)), indexOf(b, toDecimal64(1.0, 3)) FROM decimal; +SELECT indexOf(c, toDecimal128(0.9, 3)), indexOf(c, toDecimal128(1.0, 3)) FROM decimal; + +SELECT indexOf(a, toDecimal32(0.1, 2)) FROM decimal; +SELECT indexOf(a, toDecimal32(0.1, 4)) FROM decimal; +SELECT indexOf(a, toDecimal64(0.1, 3)) FROM decimal; +SELECT indexOf(a, toDecimal128(0.1, 3)) FROM decimal; +SELECT indexOf(b, toDecimal32(0.4, 3)) FROM decimal; +SELECT indexOf(b, toDecimal64(0.4, 2)) FROM decimal; +SELECT indexOf(b, toDecimal64(0.4, 4)) FROM decimal; +SELECT indexOf(b, toDecimal128(0.4, 3)) FROM decimal; +SELECT indexOf(c, toDecimal32(0.7, 3)) FROM decimal; +SELECT indexOf(c, toDecimal64(0.7, 3)) FROM decimal; +SELECT indexOf(c, toDecimal128(0.7, 2)) FROM decimal; +SELECT indexOf(c, toDecimal128(0.7, 4)) FROM decimal; + +SELECT arrayConcat(a, b) AS x, toTypeName(x) FROM decimal; +SELECT arrayConcat(a, c) AS x, toTypeName(x) FROM decimal; +SELECT arrayConcat(b, c) AS x, toTypeName(x) FROM decimal; +SELECT arrayConcat(a, nest.a) AS x, toTypeName(x) FROM decimal; +SELECT arrayConcat(b, nest.b) AS x, toTypeName(x) FROM decimal; +SELECT arrayConcat(c, nest.c) AS x, toTypeName(x) FROM decimal; +SELECT arrayConcat(a, nest.b) AS x, toTypeName(x) FROM decimal; +SELECT arrayConcat(a, nest.c) AS x, toTypeName(x) FROM decimal; +SELECT arrayConcat(b, nest.a) AS x, toTypeName(x) FROM decimal; +SELECT arrayConcat(b, nest.c) AS x, toTypeName(x) FROM decimal; +SELECT arrayConcat(c, nest.a) AS x, toTypeName(x) FROM decimal; +SELECT arrayConcat(c, nest.b) AS x, toTypeName(x) FROM decimal; + +SELECT toDecimal32(12345.6789, 4) AS x, countEqual([x+1, x, x], x), countEqual([x, x-1, x], x), countEqual([x, x], x-0); +SELECT toDecimal32(-12345.6789, 4) AS x, countEqual([x+1, x, x], x), countEqual([x, x-1, x], x), countEqual([x, x], x+0); +SELECT toDecimal64(123456789.123456789, 9) AS x, countEqual([x+1, x, x], x), countEqual([x, x-1, x], x), countEqual([x, x], x-0); +SELECT toDecimal64(-123456789.123456789, 9) AS x, countEqual([x+1, x, x], x), countEqual([x, x-1, x], x), countEqual([x, x], x+0); +SELECT toDecimal128(0.123456789123456789, 18) AS x, countEqual([x+1, x, x], x), countEqual([x, x-1, x], x), countEqual([x, x], x-0); +SELECT toDecimal128(-0.1234567891123456789, 18) AS x, countEqual([x+1, x, x], x), countEqual([x, x-1, x], x), countEqual([x, x], x+0); + +SELECT toTypeName(x) FROM (SELECT toDecimal32('1234.5', 5) AS x UNION ALL SELECT toInt8(0) AS x) WHERE x = 0; +SELECT toTypeName(x) FROM (SELECT toDecimal32('1234.5', 5) AS x UNION ALL SELECT toUInt8(0) AS x) WHERE x = 0; +SELECT toTypeName(x) FROM (SELECT toDecimal32('12345.0', 4) AS x UNION ALL SELECT toInt16(0) AS x) WHERE x = 0; +SELECT toTypeName(x) FROM (SELECT toDecimal32('12345.0', 4) AS x UNION ALL SELECT toUInt16(0) AS x) WHERE x = 0; + +SELECT toTypeName(x) FROM (SELECT toDecimal32('12.345', 7) AS x UNION ALL SELECT toInt8(0) AS x) WHERE x = 0; +SELECT toTypeName(x) FROM (SELECT toDecimal32('12.345', 7) AS x UNION ALL SELECT toUInt8(0) AS x) WHERE x = 0; +SELECT toTypeName(x) FROM (SELECT toDecimal32('1234.5', 5) AS x UNION ALL SELECT toInt16(0) AS x) WHERE x = 0; +SELECT toTypeName(x) FROM (SELECT toDecimal32('1234.5', 5) AS x UNION ALL SELECT toUInt16(0) AS x) WHERE x = 0; + +SELECT toTypeName(x) FROM (SELECT toDecimal32('12345.00', 4) AS x UNION ALL SELECT toInt32(0) AS x) WHERE x = 0; +SELECT toTypeName(x) FROM (SELECT toDecimal32('12345.00', 4) AS x UNION ALL SELECT toUInt32(0) AS x) WHERE x = 0; +SELECT toTypeName(x) FROM (SELECT toDecimal32('12345.00', 4) AS x UNION ALL SELECT toInt64(0) AS x) WHERE x = 0; +SELECT toTypeName(x) FROM (SELECT toDecimal32('12345.00', 4) AS x UNION ALL SELECT toUInt64(0) AS x) WHERE x = 0; + +SELECT toTypeName(x) FROM (SELECT toDecimal64('12345.00', 4) AS x UNION ALL SELECT toInt8(0) AS x) WHERE x = 0; +SELECT toTypeName(x) FROM (SELECT toDecimal64('12345.00', 4) AS x UNION ALL SELECT toUInt8(0) AS x) WHERE x = 0; +SELECT toTypeName(x) FROM (SELECT toDecimal64('12345.00', 4) AS x UNION ALL SELECT toInt16(0) AS x) WHERE x = 0; +SELECT toTypeName(x) FROM (SELECT toDecimal64('12345.00', 4) AS x UNION ALL SELECT toUInt16(0) AS x) WHERE x = 0; + +SELECT toTypeName(x) FROM (SELECT toDecimal64('12345.00', 4) AS x UNION ALL SELECT toInt32(0) AS x) WHERE x = 0; +SELECT toTypeName(x) FROM (SELECT toDecimal64('12345.00', 4) AS x UNION ALL SELECT toUInt32(0) AS x) WHERE x = 0; +SELECT toTypeName(x) FROM (SELECT toDecimal64('12345.00', 4) AS x UNION ALL SELECT toInt64(0) AS x) WHERE x = 0; +SELECT toTypeName(x) FROM (SELECT toDecimal64('12345.00', 4) AS x UNION ALL SELECT toUInt64(0) AS x) WHERE x = 0; + +SELECT toTypeName(x) FROM (SELECT toDecimal128('12345.00', 4) AS x UNION ALL SELECT toInt8(0) AS x) WHERE x = 0; +SELECT toTypeName(x) FROM (SELECT toDecimal128('12345.00', 4) AS x UNION ALL SELECT toUInt8(0) AS x) WHERE x = 0; +SELECT toTypeName(x) FROM (SELECT toDecimal128('12345.00', 4) AS x UNION ALL SELECT toInt16(0) AS x) WHERE x = 0; +SELECT toTypeName(x) FROM (SELECT toDecimal128('12345.00', 4) AS x UNION ALL SELECT toUInt16(0) AS x) WHERE x = 0; + +SELECT toTypeName(x) FROM (SELECT toDecimal128('12345.00', 4) AS x UNION ALL SELECT toInt32(0) AS x) WHERE x = 0; +SELECT toTypeName(x) FROM (SELECT toDecimal128('12345.00', 4) AS x UNION ALL SELECT toUInt32(0) AS x) WHERE x = 0; +SELECT toTypeName(x) FROM (SELECT toDecimal128('12345.00', 4) AS x UNION ALL SELECT toInt64(0) AS x) WHERE x = 0; +SELECT toTypeName(x) FROM (SELECT toDecimal128('12345.00', 4) AS x UNION ALL SELECT toUInt64(0) AS x) WHERE x = 0; + +SELECT toTypeName(x) FROM (SELECT toDecimal32('12345', 0) AS x UNION ALL SELECT toInt32(0) AS x) WHERE x = 0; +SELECT toTypeName(x) FROM (SELECT toDecimal64('12345', 0) AS x UNION ALL SELECT toInt64(0) AS x) WHERE x = 0; + +SELECT number % 2 ? toDecimal32('32.1', 5) : toDecimal32('32.2', 5) FROM system.numbers LIMIT 2; +SELECT number % 2 ? toDecimal32('32.1', 5) : toDecimal64('64.2', 5) FROM system.numbers LIMIT 2; +SELECT number % 2 ? toDecimal32('32.1', 5) : toDecimal128('128.2', 5) FROM system.numbers LIMIT 2; + +SELECT number % 2 ? toDecimal64('64.1', 5) : toDecimal32('32.2', 5) FROM system.numbers LIMIT 2; +SELECT number % 2 ? toDecimal64('64.1', 5) : toDecimal64('64.2', 5) FROM system.numbers LIMIT 2; +SELECT number % 2 ? toDecimal64('64.1', 5) : toDecimal128('128.2', 5) FROM system.numbers LIMIT 2; + +SELECT number % 2 ? toDecimal128('128.1', 5) : toDecimal32('32.2', 5) FROM system.numbers LIMIT 2; +SELECT number % 2 ? toDecimal128('128.1', 5) : toDecimal64('64.2', 5) FROM system.numbers LIMIT 2; +SELECT number % 2 ? toDecimal128('128.1', 5) : toDecimal128('128.2', 5) FROM system.numbers LIMIT 2; + +SELECT number % 2 ? toDecimal32('32.1', 5) : toDecimal32('32.2', 1) FROM system.numbers LIMIT 2; -- { serverError NOT_IMPLEMENTED } +SELECT number % 2 ? toDecimal32('32.1', 5) : toDecimal64('64.2', 2) FROM system.numbers LIMIT 2; -- { serverError NOT_IMPLEMENTED } +SELECT number % 2 ? toDecimal32('32.1', 5) : toDecimal128('128.2', 3) FROM system.numbers LIMIT 2; -- { serverError NOT_IMPLEMENTED } + +SELECT number % 2 ? toDecimal64('64.1', 5) : toDecimal32('32.2', 1) FROM system.numbers LIMIT 2; -- { serverError NOT_IMPLEMENTED } +SELECT number % 2 ? toDecimal64('64.1', 5) : toDecimal64('64.2', 2) FROM system.numbers LIMIT 2; -- { serverError NOT_IMPLEMENTED } +SELECT number % 2 ? toDecimal64('64.1', 5) : toDecimal128('128.2', 3) FROM system.numbers LIMIT 2; -- { serverError NOT_IMPLEMENTED } + +SELECT number % 2 ? toDecimal128('128.1', 5) : toDecimal32('32.2', 1) FROM system.numbers LIMIT 2; -- { serverError NOT_IMPLEMENTED } +SELECT number % 2 ? toDecimal128('128.1', 5) : toDecimal64('64.2', 2) FROM system.numbers LIMIT 2; -- { serverError NOT_IMPLEMENTED } +SELECT number % 2 ? toDecimal128('128.1', 5) : toDecimal128('128.2', 3) FROM system.numbers LIMIT 2; -- { serverError NOT_IMPLEMENTED } + +DROP TABLE IF EXISTS decimal; diff --git a/parser/testdata/00700_decimal_defaults/ast.json b/parser/testdata/00700_decimal_defaults/ast.json new file mode 100644 index 000000000..4bdc65397 --- /dev/null +++ b/parser/testdata/00700_decimal_defaults/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery decimal (children 1)" + }, + { + "explain": " Identifier decimal" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001041266, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/00700_decimal_defaults/metadata.json b/parser/testdata/00700_decimal_defaults/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00700_decimal_defaults/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00700_decimal_defaults/query.sql b/parser/testdata/00700_decimal_defaults/query.sql new file mode 100644 index 000000000..8694d866e --- /dev/null +++ b/parser/testdata/00700_decimal_defaults/query.sql @@ -0,0 +1,19 @@ +DROP TABLE IF EXISTS decimal; + +CREATE TABLE IF NOT EXISTS decimal +( + a DECIMAL(9,4) DEFAULT 0, + b DECIMAL(18,4) DEFAULT a / 2, + c DECIMAL(38,4) DEFAULT b / 3, + d MATERIALIZED a + toDecimal32('0.2', 1), + e ALIAS b * 2, + f ALIAS c * 6 +) ENGINE = Memory; + +DESC TABLE decimal; + +INSERT INTO decimal (a) VALUES (0), (1), (2), (3); +SELECT * FROM decimal; +SELECT a, b, c, d, e, f FROM decimal; + +DROP TABLE IF EXISTS decimal; diff --git a/parser/testdata/00700_decimal_empty_aggregates/ast.json b/parser/testdata/00700_decimal_empty_aggregates/ast.json new file mode 100644 index 000000000..6dc482a99 --- /dev/null +++ b/parser/testdata/00700_decimal_empty_aggregates/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery decimal (children 1)" + }, + { + "explain": " Identifier decimal" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000983587, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/00700_decimal_empty_aggregates/metadata.json b/parser/testdata/00700_decimal_empty_aggregates/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00700_decimal_empty_aggregates/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00700_decimal_empty_aggregates/query.sql b/parser/testdata/00700_decimal_empty_aggregates/query.sql new file mode 100644 index 000000000..4ee37b3b9 --- /dev/null +++ b/parser/testdata/00700_decimal_empty_aggregates/query.sql @@ -0,0 +1,86 @@ +DROP TABLE IF EXISTS decimal; + +CREATE TABLE decimal +( + a Decimal(6, 4), + b Decimal(16, 7), + c Decimal(20, 8) +) ENGINE = Memory; + +SELECT count(a), count(b), count(c) FROM decimal; +SELECT [min(a), max(a)], [min(b), max(b)], [min(c), max(c)] FROM decimal; + +SELECT sum(a), sum(b), sum(c), sumWithOverflow(a), sumWithOverflow(b), sumWithOverflow(c) FROM decimal; +SELECT sum(a), sum(b), sum(c), sumWithOverflow(a), sumWithOverflow(b), sumWithOverflow(c) FROM decimal WHERE a > 0; +SELECT sum(a), sum(b), sum(c), sumWithOverflow(a), sumWithOverflow(b), sumWithOverflow(c) FROM decimal WHERE a < 0; +SELECT sum(a+1), sum(b+1), sum(c+1), sumWithOverflow(a+1), sumWithOverflow(b+1), sumWithOverflow(c+1) FROM decimal; +SELECT sum(a-1), sum(b-1), sum(c-1), sumWithOverflow(a-1), sumWithOverflow(b-1), sumWithOverflow(c-1) FROM decimal; + +SELECT (uniq(a), uniq(b), uniq(c)), + (uniqCombined(a), uniqCombined(b), uniqCombined(c)), + (uniqCombined(17)(a), uniqCombined(17)(b), uniqCombined(17)(c)), + (uniqExact(a), uniqExact(b), uniqExact(c)), + (uniqHLL12(a), uniqHLL12(b), uniqHLL12(c)) +FROM (SELECT * FROM decimal ORDER BY a); + +SELECT uniqUpTo(10)(a), uniqUpTo(10)(b), uniqUpTo(10)(c) FROM decimal WHERE a >= 0 AND a < 5; +SELECT uniqUpTo(10)(a), uniqUpTo(10)(b), uniqUpTo(10)(c) FROM decimal WHERE a >= 0 AND a < 10; + +SELECT argMin(a, b), argMin(a, c), argMin(b, a), argMin(b, c), argMin(c, a), argMin(c, b) FROM decimal; +SELECT argMin(a, b), argMin(a, c), argMin(b, a), argMin(b, c), argMin(c, a), argMin(c, b) FROM decimal WHERE a > 0; +SELECT argMax(a, b), argMax(a, c), argMax(b, a), argMax(b, c), argMax(c, a), argMax(c, b) FROM decimal; +SELECT argMax(a, b), argMax(a, c), argMax(b, a), argMax(b, c), argMax(c, a), argMax(c, b) FROM decimal WHERE a < 0; + +SELECT median(a) as ma, median(b) as mb, median(c) as mc, toTypeName(ma),toTypeName(mb),toTypeName(mc) FROM decimal; +SELECT quantile(a) as qa, quantile(b) as qb, quantile(c) as qc, toTypeName(qa),toTypeName(qb),toTypeName(qc) FROM decimal WHERE a < 0; +SELECT quantile(0.0)(a), quantile(0.0)(b), quantile(0.0)(c) FROM decimal WHERE a >= 0; +SELECT quantile(0.2)(a), quantile(0.2)(b), quantile(0.2)(c) FROM decimal WHERE a >= 0; +SELECT quantile(0.4)(a), quantile(0.4)(b), quantile(0.4)(c) FROM decimal WHERE a >= 0; +SELECT quantile(0.6)(a), quantile(0.6)(b), quantile(0.6)(c) FROM decimal WHERE a >= 0; +SELECT quantile(0.8)(a), quantile(0.8)(b), quantile(0.8)(c) FROM decimal WHERE a >= 0; +SELECT quantile(1.0)(a), quantile(1.0)(b), quantile(1.0)(c) FROM decimal WHERE a >= 0; +SELECT quantiles(0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)(a) FROM decimal; +SELECT quantiles(0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)(b) FROM decimal; +SELECT quantiles(0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)(c) FROM decimal; + +SELECT medianExact(a), medianExact(b), medianExact(c) as x, toTypeName(x) FROM decimal; +SELECT quantileExact(a), quantileExact(b), quantileExact(c) as x, toTypeName(x) FROM decimal WHERE a < 0; +SELECT quantileExact(0.0)(a), quantileExact(0.0)(b), quantileExact(0.0)(c) FROM decimal WHERE a >= 0; +SELECT quantileExact(0.2)(a), quantileExact(0.2)(b), quantileExact(0.2)(c) FROM decimal WHERE a >= 0; +SELECT quantileExact(0.4)(a), quantileExact(0.4)(b), quantileExact(0.4)(c) FROM decimal WHERE a >= 0; +SELECT quantileExact(0.6)(a), quantileExact(0.6)(b), quantileExact(0.6)(c) FROM decimal WHERE a >= 0; +SELECT quantileExact(0.8)(a), quantileExact(0.8)(b), quantileExact(0.8)(c) FROM decimal WHERE a >= 0; +SELECT quantileExact(1.0)(a), quantileExact(1.0)(b), quantileExact(1.0)(c) FROM decimal WHERE a >= 0; +SELECT quantilesExact(0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)(a) FROM decimal; +SELECT quantilesExact(0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)(b) FROM decimal; +SELECT quantilesExact(0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)(c) FROM decimal; + +SELECT medianExactWeighted(a, 1), medianExactWeighted(b, 2), medianExactWeighted(c, 3) as x, toTypeName(x) FROM decimal; +SELECT quantileExactWeighted(a, 1), quantileExactWeighted(b, 2), quantileExactWeighted(c, 3) as x, toTypeName(x) FROM decimal WHERE a < 0; +SELECT quantileExactWeighted(0.0)(a, 1), quantileExactWeighted(0.0)(b, 2), quantileExactWeighted(0.0)(c, 3) FROM decimal WHERE a >= 0; +SELECT quantileExactWeighted(0.2)(a, 1), quantileExactWeighted(0.2)(b, 2), quantileExactWeighted(0.2)(c, 3) FROM decimal WHERE a >= 0; +SELECT quantileExactWeighted(0.4)(a, 1), quantileExactWeighted(0.4)(b, 2), quantileExactWeighted(0.4)(c, 3) FROM decimal WHERE a >= 0; +SELECT quantileExactWeighted(0.6)(a, 1), quantileExactWeighted(0.6)(b, 2), quantileExactWeighted(0.6)(c, 3) FROM decimal WHERE a >= 0; +SELECT quantileExactWeighted(0.8)(a, 1), quantileExactWeighted(0.8)(b, 2), quantileExactWeighted(0.8)(c, 3) FROM decimal WHERE a >= 0; +SELECT quantileExactWeighted(1.0)(a, 1), quantileExactWeighted(1.0)(b, 2), quantileExactWeighted(1.0)(c, 3) FROM decimal WHERE a >= 0; +SELECT quantilesExactWeighted(0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)(a, 1) FROM decimal; +SELECT quantilesExactWeighted(0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)(b, 2) FROM decimal; +SELECT quantilesExactWeighted(0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)(c, 3) FROM decimal; + +SELECT varPop(a) AS va, varPop(b) AS vb, varPop(c) AS vc, toTypeName(va), toTypeName(vb), toTypeName(vc) FROM decimal; +SELECT varPop(toFloat64(a)), varPop(toFloat64(b)), varPop(toFloat64(c)) FROM decimal; +SELECT varSamp(a) AS va, varSamp(b) AS vb, varSamp(c) AS vc, toTypeName(va), toTypeName(vb), toTypeName(vc) FROM decimal; +SELECT varSamp(toFloat64(a)), varSamp(toFloat64(b)), varSamp(toFloat64(c)) FROM decimal; + +SELECT stddevPop(a) AS da, stddevPop(b) AS db, stddevPop(c) AS dc, toTypeName(da), toTypeName(db), toTypeName(dc) FROM decimal; +SELECT stddevPop(toFloat64(a)), stddevPop(toFloat64(b)), stddevPop(toFloat64(c)) FROM decimal; +SELECT stddevSamp(a) AS da, stddevSamp(b) AS db, stddevSamp(c) AS dc, toTypeName(da), toTypeName(db), toTypeName(dc) FROM decimal; +SELECT stddevSamp(toFloat64(a)), stddevSamp(toFloat64(b)), stddevSamp(toFloat64(c)) FROM decimal; + +SELECT covarPop(a, a), covarPop(b, b), covarPop(c, c) FROM decimal; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT covarSamp(a, a), covarSamp(b, b), covarSamp(c, c) FROM decimal; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT corr(a, a), corr(b, b), corr(c, c) FROM decimal; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT 1 LIMIT 0; + +DROP TABLE decimal; + diff --git a/parser/testdata/00700_decimal_formats/ast.json b/parser/testdata/00700_decimal_formats/ast.json new file mode 100644 index 000000000..222276cc4 --- /dev/null +++ b/parser/testdata/00700_decimal_formats/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery decimal (children 1)" + }, + { + "explain": " Identifier decimal" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001035301, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/00700_decimal_formats/metadata.json b/parser/testdata/00700_decimal_formats/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00700_decimal_formats/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00700_decimal_formats/query.sql b/parser/testdata/00700_decimal_formats/query.sql new file mode 100644 index 000000000..f008897bf --- /dev/null +++ b/parser/testdata/00700_decimal_formats/query.sql @@ -0,0 +1,33 @@ +DROP TABLE IF EXISTS decimal; + +CREATE TABLE IF NOT EXISTS decimal +( + a DEC(9, 3), + b DEC(18, 9), + c DEC(38, 18) +) ENGINE = Memory; + +INSERT INTO decimal (a, b, c) VALUES (42.0, -42.0, 42) (0.42, -0.42, .42) (42.42, -42.42, 42.42); +INSERT INTO decimal (a, b, c) FORMAT JSONEachRow {"a":1.1, "b":-1.1, "c":1.1} {"a":1.0, "b":-1.0, "c":1} {"a":0.1, "b":-0.1, "c":.1}; + +INSERT INTO decimal (a, b, c) FORMAT CSV 2.0,-2.0,2 + +INSERT INTO decimal (a, b, c) FORMAT CSV 0.2 ,-0.2 ,.2 + +INSERT INTO decimal (a, b, c) FORMAT CSV 2.2 , -2.2 , 2.2 + +INSERT INTO decimal (a, b, c) FORMAT TabSeparated 3.3 -3.3 3.3 + +INSERT INTO decimal (a, b, c) FORMAT TabSeparated 3.0 -3.0 3 + +INSERT INTO decimal (a, b, c) FORMAT TabSeparated 0.3 -0.3 .3 + +INSERT INTO decimal (a, b, c) FORMAT CSV 4.4E+5,-4E+8,.4E+20 + +INSERT INTO decimal (a, b, c) FORMAT CSV 5.5e-2, -5e-9 ,.5e-17 + +SELECT * FROM decimal ORDER BY a FORMAT JSONEachRow; +SELECT * FROM decimal ORDER BY b DESC FORMAT CSV; +SELECT * FROM decimal ORDER BY c FORMAT TabSeparated; + +DROP TABLE IF EXISTS decimal; diff --git a/parser/testdata/00700_decimal_gathers/ast.json b/parser/testdata/00700_decimal_gathers/ast.json new file mode 100644 index 000000000..ddeb5916c --- /dev/null +++ b/parser/testdata/00700_decimal_gathers/ast.json @@ -0,0 +1,91 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function if (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Float64_2" + }, + { + "explain": " Literal 'Decimal(9,3)'" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Float64_1" + }, + { + "explain": " Literal 'Decimal(9,3)'" + } + ], + + "rows": 23, + + "statistics": + { + "elapsed": 0.001131618, + "rows_read": 23, + "bytes_read": 941 + } +} diff --git a/parser/testdata/00700_decimal_gathers/metadata.json b/parser/testdata/00700_decimal_gathers/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00700_decimal_gathers/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00700_decimal_gathers/query.sql b/parser/testdata/00700_decimal_gathers/query.sql new file mode 100644 index 000000000..98519577b --- /dev/null +++ b/parser/testdata/00700_decimal_gathers/query.sql @@ -0,0 +1,17 @@ +select if(1, [cast(materialize(2.0),'Decimal(9,3)')], [cast(materialize(1.0),'Decimal(9,3)')]); +select if(1, [cast(materialize(2.0),'Decimal(18,10)')], [cast(materialize(1.0),'Decimal(18,10)')]); +select if(1, [cast(materialize(2.0),'Decimal(38,18)')], [cast(materialize(1.0),'Decimal(38,18)')]); + +select if(0, [cast(materialize(2.0),'Decimal(9,3)')], [cast(materialize(1.0),'Decimal(9,3)')]); +select if(0, [cast(materialize(2.0),'Decimal(18,10)')], [cast(materialize(1.0),'Decimal(18,10)')]); +select if(0, [cast(materialize(2.0),'Decimal(38,18)')], [cast(materialize(1.0),'Decimal(38,18)')]); + +select '-'; + +select if(1, [cast(materialize(2.0),'Decimal(9,3)')], [cast(materialize(1.0),'Decimal(9,0)')]); +select if(0, [cast(materialize(2.0),'Decimal(18,10)')], [cast(materialize(1.0),'Decimal(18,0)')]); +select if(1, [cast(materialize(2.0),'Decimal(38,18)')], [cast(materialize(1.0),'Decimal(38,8)')]); + +select if(0, [cast(materialize(2.0),'Decimal(9,0)')], [cast(materialize(1.0),'Decimal(9,3)')]); +select if(1, [cast(materialize(2.0),'Decimal(18,0)')], [cast(materialize(1.0),'Decimal(18,10)')]); +select if(0, [cast(materialize(2.0),'Decimal(38,0)')], [cast(materialize(1.0),'Decimal(38,18)')]); diff --git a/parser/testdata/00700_decimal_in_keys/ast.json b/parser/testdata/00700_decimal_in_keys/ast.json new file mode 100644 index 000000000..035ea3bfc --- /dev/null +++ b/parser/testdata/00700_decimal_in_keys/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery decimal (children 1)" + }, + { + "explain": " Identifier decimal" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001189775, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/00700_decimal_in_keys/metadata.json b/parser/testdata/00700_decimal_in_keys/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00700_decimal_in_keys/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00700_decimal_in_keys/query.sql b/parser/testdata/00700_decimal_in_keys/query.sql new file mode 100644 index 000000000..817d5e855 --- /dev/null +++ b/parser/testdata/00700_decimal_in_keys/query.sql @@ -0,0 +1,29 @@ +DROP TABLE IF EXISTS decimal; + +CREATE TABLE IF NOT EXISTS decimal +( + d1 DECIMAL(9, 8), + d2 DECIMAL(18, 8), + d3 DECIMAL(38, 8) +) +ENGINE = MergeTree +PARTITION BY toInt32(d1) +ORDER BY (d2, d3); + +INSERT INTO decimal (d1, d2, d3) VALUES (4.2, 4.2, 4.2); + +SELECT count() FROM decimal WHERE d1 = toDecimal32('4.2', 8); +SELECT count() FROM decimal WHERE d1 != toDecimal32('4.2', 8); +SELECT count() FROM decimal WHERE d1 < toDecimal32('4.2', 8); +SELECT count() FROM decimal WHERE d1 > toDecimal32('4.2', 8); +SELECT count() FROM decimal WHERE d1 <= toDecimal32('4.2', 8); +SELECT count() FROM decimal WHERE d1 >= toDecimal32('4.2', 8); + +INSERT INTO decimal (d1, d2, d3) + SELECT toDecimal32(number % 10, 8), toDecimal64(number, 8), toDecimal128(number, 8) FROM system.numbers LIMIT 50; + +SELECT count() FROM decimal WHERE d1 = 1; +SELECT * FROM decimal WHERE d1 > 5 AND d2 < 30 ORDER BY d2 DESC; +SELECT * FROM decimal WHERE d1 IN(1, 3) ORDER BY d2; + +DROP TABLE decimal; diff --git a/parser/testdata/00700_decimal_math/ast.json b/parser/testdata/00700_decimal_math/ast.json new file mode 100644 index 000000000..2ff01cdaa --- /dev/null +++ b/parser/testdata/00700_decimal_math/ast.json @@ -0,0 +1,82 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function toDecimal32 (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '42.42'" + }, + { + "explain": " Literal UInt64_4" + }, + { + "explain": " Function toDecimal32 (alias y) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function log (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal UInt64_4" + }, + { + "explain": " Function round (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function exp (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier y" + }, + { + "explain": " Literal UInt64_6" + } + ], + + "rows": 20, + + "statistics": + { + "elapsed": 0.001108488, + "rows_read": 20, + "bytes_read": 755 + } +} diff --git a/parser/testdata/00700_decimal_math/metadata.json b/parser/testdata/00700_decimal_math/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00700_decimal_math/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00700_decimal_math/query.sql b/parser/testdata/00700_decimal_math/query.sql new file mode 100644 index 000000000..5dc8f8003 --- /dev/null +++ b/parser/testdata/00700_decimal_math/query.sql @@ -0,0 +1,45 @@ +SELECT toDecimal32('42.42', 4) AS x, toDecimal32(log(x), 4) AS y, round(exp(y), 6); +SELECT toDecimal32('42.42', 4) AS x, toDecimal32(log2(x), 4) AS y, round(exp2(y), 6); +SELECT toDecimal32('42.42', 4) AS x, toDecimal32(log10(x), 4) AS y, round(exp10(y), 6); + +SELECT toDecimal32('42.42', 4) AS x, toDecimal32(sqrt(x), 3) AS y, y * y; +SELECT toDecimal32('42.42', 4) AS x, toDecimal32(cbrt(x), 4) AS y, toDecimal64(y, 4) * y * y; +SELECT toDecimal32('1.0', 5) AS x, erf(x), erfc(x); +SELECT toDecimal32('42.42', 4) AS x, round(lgamma(x), 6), round(tgamma(x) / 1e50, 6); + +SELECT toDecimal32('0.0', 2) AS x, round(sin(x), 8), round(cos(x), 8), round(tan(x), 8); +SELECT toDecimal32(pi(), 8) AS x, round(sin(x), 8), round(cos(x), 8), round(tan(x), 8); +SELECT toDecimal32('1.0', 2) AS x, asin(x), acos(x), atan(x); + + +SELECT toDecimal64('42.42', 4) AS x, toDecimal32(log(x), 4) AS y, round(exp(y), 6); +SELECT toDecimal64('42.42', 4) AS x, toDecimal32(log2(x), 4) AS y, round(exp2(y), 6); +SELECT toDecimal64('42.42', 4) AS x, toDecimal32(log10(x), 4) AS y, round(exp10(y), 6); + +SELECT toDecimal64('42.42', 4) AS x, toDecimal32(sqrt(x), 3) AS y, y * y; +SELECT toDecimal64('42.42', 4) AS x, toDecimal32(cbrt(x), 4) AS y, toDecimal64(y, 4) * y * y; +SELECT toDecimal64('1.0', 5) AS x, erf(x), erfc(x); +SELECT toDecimal64('42.42', 4) AS x, round(lgamma(x), 6), round(tgamma(x) / 1e50, 6); + +SELECT toDecimal64('0.0', 2) AS x, round(sin(x), 8), round(cos(x), 8), round(tan(x), 8); +SELECT toDecimal64(pi(), 17) AS x, round(sin(x), 8), round(cos(x), 8), round(tan(x), 8); +SELECT toDecimal64('1.0', 2) AS x, asin(x), acos(x), atan(x); + + +SELECT toDecimal128('42.42', 4) AS x, toDecimal32(log(x), 4) AS y, round(exp(y), 6); +SELECT toDecimal128('42.42', 4) AS x, toDecimal32(log2(x), 4) AS y, round(exp2(y), 6); +SELECT toDecimal128('42.42', 4) AS x, toDecimal32(log10(x), 4) AS y, round(exp10(y), 6); + +SELECT toDecimal128('42.42', 4) AS x, toDecimal32(sqrt(x), 3) AS y, y * y; +SELECT toDecimal128('42.42', 4) AS x, toDecimal32(cbrt(x), 4) AS y, toDecimal64(y, 4) * y * y; +SELECT toDecimal128('1.0', 5) AS x, erf(x), erfc(x); +SELECT toDecimal128('42.42', 4) AS x, round(lgamma(x), 6), round(tgamma(x) / 1e50, 6); + +SELECT toDecimal128('0.0', 2) AS x, round(sin(x), 8), round(cos(x), 8), round(tan(x), 8); +SELECT toDecimal128(pi(), 14) AS x, round(sin(x), 8), round(cos(x), 8), round(tan(x), 8); +SELECT toDecimal128('1.0', 2) AS x, asin(x), acos(x), atan(x); + + +SELECT toDecimal32('4.2', 1) AS x, pow(x, 2), pow(x, 0.5); +SELECT toDecimal64('4.2', 1) AS x, pow(x, 2), pow(x, 0.5); +SELECT toDecimal128('4.2', 1) AS x, pow(x, 2), pow(x, 0.5); diff --git a/parser/testdata/00700_decimal_null/ast.json b/parser/testdata/00700_decimal_null/ast.json new file mode 100644 index 000000000..eab19a78d --- /dev/null +++ b/parser/testdata/00700_decimal_null/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery decimal (children 1)" + }, + { + "explain": " Identifier decimal" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001118402, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/00700_decimal_null/metadata.json b/parser/testdata/00700_decimal_null/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00700_decimal_null/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00700_decimal_null/query.sql b/parser/testdata/00700_decimal_null/query.sql new file mode 100644 index 000000000..dbd9ced43 --- /dev/null +++ b/parser/testdata/00700_decimal_null/query.sql @@ -0,0 +1,60 @@ +DROP TABLE IF EXISTS decimal; + +CREATE TABLE IF NOT EXISTS decimal +( + a DEC(9, 2), + b DEC(18, 5), + c DEC(38, 5), + d Nullable(DEC(9, 4)), + e Nullable(DEC(18, 8)), + f Nullable(DEC(38, 8)) +) ENGINE = Memory; + +SELECT toNullable(toDecimal32(32, 0)) AS x, assumeNotNull(x); +SELECT toNullable(toDecimal64(64, 0)) AS x, assumeNotNull(x); +SELECT toNullable(toDecimal128(128, 0)) AS x, assumeNotNull(x); + +SELECT ifNull(toDecimal32(1, 0), NULL), ifNull(toDecimal64(1, 0), NULL), ifNull(toDecimal128(1, 0), NULL); +SELECT ifNull(toNullable(toDecimal32(2, 0)), NULL), ifNull(toNullable(toDecimal64(2, 0)), NULL), ifNull(toNullable(toDecimal128(2, 0)), NULL); +SELECT ifNull(NULL, toDecimal32(3, 0)), ifNull(NULL, toDecimal64(3, 0)), ifNull(NULL, toDecimal128(3, 0)); +SELECT ifNull(NULL, toNullable(toDecimal32(4, 0))), ifNull(NULL, toNullable(toDecimal64(4, 0))), ifNull(NULL, toNullable(toDecimal128(4, 0))); + +SELECT coalesce(toDecimal32(5, 0), NULL), coalesce(toDecimal64(5, 0), NULL), coalesce(toDecimal128(5, 0), NULL); +SELECT coalesce(NULL, toDecimal32(6, 0)), coalesce(NULL, toDecimal64(6, 0)), coalesce(NULL, toDecimal128(6, 0)); + +SELECT coalesce(toNullable(toDecimal32(7, 0)), NULL), coalesce(toNullable(toDecimal64(7, 0)), NULL), coalesce(toNullable(toDecimal128(7, 0)), NULL); +SELECT coalesce(NULL, toNullable(toDecimal32(8, 0))), coalesce(NULL, toNullable(toDecimal64(8, 0))), coalesce(NULL, toNullable(toDecimal128(8, 0))); + +SELECT nullIf(toNullable(toDecimal32(1, 0)), toDecimal32(1, 0)), nullIf(toNullable(toDecimal64(1, 0)), toDecimal64(1, 0)); +SELECT nullIf(toDecimal32(1, 0), toNullable(toDecimal32(1, 0))), nullIf(toDecimal64(1, 0), toNullable(toDecimal64(1, 0))); +SELECT nullIf(toNullable(toDecimal32(1, 0)), toDecimal32(2, 0)), nullIf(toNullable(toDecimal64(1, 0)), toDecimal64(2, 0)); +SELECT nullIf(toDecimal32(1, 0), toNullable(toDecimal32(2, 0))), nullIf(toDecimal64(1, 0), toNullable(toDecimal64(2, 0))); +SELECT nullIf(toNullable(toDecimal128(1, 0)), toDecimal128(1, 0)); +SELECT nullIf(toDecimal128(1, 0), toNullable(toDecimal128(1, 0))); +SELECT nullIf(toNullable(toDecimal128(1, 0)), toDecimal128(2, 0)); +SELECT nullIf(toDecimal128(1, 0), toNullable(toDecimal128(2, 0))); + +INSERT INTO decimal (a, b, c, d, e, f) VALUES (1.1, 1.1, 1.1, 1.1, 1.1, 1.1); +INSERT INTO decimal (a, b, c, d) VALUES (2.2, 2.2, 2.2, 2.2); +INSERT INTO decimal (a, b, c, e) VALUES (3.3, 3.3, 3.3, 3.3); +INSERT INTO decimal (a, b, c, f) VALUES (4.4, 4.4, 4.4, 4.4); +INSERT INTO decimal (a, b, c) VALUES (5.5, 5.5, 5.5); + +SELECT * FROM decimal ORDER BY d, e, f; +SELECT isNull(a), isNotNull(a) FROM decimal WHERE a = toDecimal32(5.5, 1); +SELECT isNull(b), isNotNull(b) FROM decimal WHERE a = toDecimal32(5.5, 1); +SELECT isNull(c), isNotNull(c) FROM decimal WHERE a = toDecimal32(5.5, 1); +SELECT isNull(d), isNotNull(d) FROM decimal WHERE a = toDecimal32(5.5, 1); +SELECT isNull(e), isNotNull(e) FROM decimal WHERE a = toDecimal32(5.5, 1); +SELECT isNull(f), isNotNull(f) FROM decimal WHERE a = toDecimal32(5.5, 1); +SELECT count() FROM decimal WHERE a IS NOT NULL; +SELECT count() FROM decimal WHERE b IS NOT NULL; +SELECT count() FROM decimal WHERE c IS NOT NULL; +SELECT count() FROM decimal WHERE d IS NULL; +SELECT count() FROM decimal WHERE e IS NULL; +SELECT count() FROM decimal WHERE f IS NULL; +SELECT count() FROM decimal WHERE d IS NULL AND e IS NULL; +SELECT count() FROM decimal WHERE d IS NULL AND f IS NULL; +SELECT count() FROM decimal WHERE e IS NULL AND f IS NULL; + +DROP TABLE IF EXISTS decimal; diff --git a/parser/testdata/00700_decimal_round/ast.json b/parser/testdata/00700_decimal_round/ast.json new file mode 100644 index 000000000..0765cca6e --- /dev/null +++ b/parser/testdata/00700_decimal_round/ast.json @@ -0,0 +1,115 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 7)" + }, + { + "explain": " Function toDecimal32 (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Float64_12345.6789" + }, + { + "explain": " Literal UInt64_4" + }, + { + "explain": " Function round (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function round (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function round (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function round (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Function round (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal UInt64_4" + }, + { + "explain": " Function round (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal UInt64_5" + } + ], + + "rows": 31, + + "statistics": + { + "elapsed": 0.001006645, + "rows_read": 31, + "bytes_read": 1094 + } +} diff --git a/parser/testdata/00700_decimal_round/metadata.json b/parser/testdata/00700_decimal_round/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00700_decimal_round/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00700_decimal_round/query.sql b/parser/testdata/00700_decimal_round/query.sql new file mode 100644 index 000000000..bf2749ac0 --- /dev/null +++ b/parser/testdata/00700_decimal_round/query.sql @@ -0,0 +1,94 @@ +SELECT toDecimal32(12345.6789, 4) AS x, round(x), round(x, 1), round(x, 2), round(x, 3), round(x, 4), round(x, 5); +SELECT toDecimal32(12345.6789, 4) AS x, roundBankers(x), roundBankers(x, 1), roundBankers(x, 2), roundBankers(x, 3), roundBankers(x, 4), roundBankers(x, 5); +SELECT toDecimal32(12345.6789, 4) AS x, ceil(x), ceil(x, 1), ceil(x, 2), ceil(x, 3), ceil(x, 4), ceil(x, 5); +SELECT toDecimal32(12345.6789, 4) AS x, floor(x), floor(x, 1), floor(x, 2), floor(x, 3), floor(x, 4), floor(x, 5); +SELECT toDecimal32(12345.6789, 4) AS x, trunc(x), trunc(x, 1), trunc(x, 2), trunc(x, 3), trunc(x, 4), trunc(x, 5); +SELECT toDecimal32(-12345.6789, 4) AS x, round(x), round(x, 1), round(x, 2), round(x, 3), round(x, 4), round(x, 5); +SELECT toDecimal32(-12345.6789, 4) AS x, roundBankers(x), roundBankers(x, 1), roundBankers(x, 2), roundBankers(x, 3), roundBankers(x, 4), roundBankers(x, 5); +SELECT toDecimal32(-12345.6789, 4) AS x, ceil(x), ceil(x, 1), ceil(x, 2), ceil(x, 3), ceil(x, 4), ceil(x, 5); +SELECT toDecimal32(-12345.6789, 4) AS x, floor(x), floor(x, 1), floor(x, 2), floor(x, 3), floor(x, 4), floor(x, 5); +SELECT toDecimal32(-12345.6789, 4) AS x, trunc(x), trunc(x, 1), trunc(x, 2), trunc(x, 3), trunc(x, 4), trunc(x, 5); +SELECT toDecimal32(12345.6789, 4) AS x, round(x, -1), round(x, -2), round(x, -3), round(x, -4), round(x, -5); +SELECT toDecimal32(12345.6789, 4) AS x, roundBankers(x, -1), roundBankers(x, -2), roundBankers(x, -3), roundBankers(x, -4), roundBankers(x, -5); +SELECT toDecimal32(12345.6789, 4) AS x, ceil(x, -1), ceil(x, -2), ceil(x, -3), ceil(x, -4), ceil(x, -5); +SELECT toDecimal32(12345.6789, 4) AS x, floor(x, -1), floor(x, -2), floor(x, -3), floor(x, -4), floor(x, -5); +SELECT toDecimal32(12345.6789, 4) AS x, trunc(x, -1), trunc(x, -2), trunc(x, -3), trunc(x, -4), trunc(x, -5); +SELECT toDecimal32(-12345.6789, 4) AS x, round(x, -1), round(x, -2), round(x, -3), round(x, -4), round(x, -5); +SELECT toDecimal32(-12345.6789, 4) AS x, roundBankers(x, -1), roundBankers(x, -2), roundBankers(x, -3), roundBankers(x, -4), roundBankers(x, -5); +SELECT toDecimal32(-12345.6789, 4) AS x, ceil(x, -1), ceil(x, -2), ceil(x, -3), ceil(x, -4), ceil(x, -5); +SELECT toDecimal32(-12345.6789, 4) AS x, floor(x, -1), floor(x, -2), floor(x, -3), floor(x, -4), floor(x, -5); +SELECT toDecimal32(-12345.6789, 4) AS x, trunc(x, -1), trunc(x, -2), trunc(x, -3), trunc(x, -4), trunc(x, -5); + +SELECT toDecimal64(12345.6789, 4) AS x, round(x), round(x, 1), round(x, 2), round(x, 3), round(x, 4), round(x, 5); +SELECT toDecimal64(12345.6789, 4) AS x, roundBankers(x), roundBankers(x, 1), roundBankers(x, 2), roundBankers(x, 3), roundBankers(x, 4), roundBankers(x, 5); +SELECT toDecimal64(12345.6789, 4) AS x, ceil(x), ceil(x, 1), ceil(x, 2), ceil(x, 3), ceil(x, 4), ceil(x, 5); +SELECT toDecimal64(12345.6789, 4) AS x, floor(x), floor(x, 1), floor(x, 2), floor(x, 3), floor(x, 4), floor(x, 5); +SELECT toDecimal64(12345.6789, 4) AS x, trunc(x), trunc(x, 1), trunc(x, 2), trunc(x, 3), trunc(x, 4), trunc(x, 5); +SELECT toDecimal64(-12345.6789, 4) AS x, round(x), round(x, 1), round(x, 2), round(x, 3), round(x, 4), round(x, 5); +SELECT toDecimal64(-12345.6789, 4) AS x, roundBankers(x), roundBankers(x, 1), roundBankers(x, 2), roundBankers(x, 3), roundBankers(x, 4), roundBankers(x, 5); +SELECT toDecimal64(-12345.6789, 4) AS x, ceil(x), ceil(x, 1), ceil(x, 2), ceil(x, 3), ceil(x, 4), ceil(x, 5); +SELECT toDecimal64(-12345.6789, 4) AS x, floor(x), floor(x, 1), floor(x, 2), floor(x, 3), floor(x, 4), floor(x, 5); +SELECT toDecimal64(-12345.6789, 4) AS x, trunc(x), trunc(x, 1), trunc(x, 2), trunc(x, 3), trunc(x, 4), trunc(x, 5); +SELECT toDecimal64(12345.6789, 4) AS x, round(x, -1), round(x, -2), round(x, -3), round(x, -4), round(x, -5); +SELECT toDecimal64(12345.6789, 4) AS x, roundBankers(x, -1), roundBankers(x, -2), roundBankers(x, -3), roundBankers(x, -4), roundBankers(x, -5); +SELECT toDecimal64(12345.6789, 4) AS x, ceil(x, -1), ceil(x, -2), ceil(x, -3), ceil(x, -4), ceil(x, -5); +SELECT toDecimal64(12345.6789, 4) AS x, floor(x, -1), floor(x, -2), floor(x, -3), floor(x, -4), floor(x, -5); +SELECT toDecimal64(12345.6789, 4) AS x, trunc(x, -1), trunc(x, -2), trunc(x, -3), trunc(x, -4), trunc(x, -5); +SELECT toDecimal64(-12345.6789, 4) AS x, round(x, -1), round(x, -2), round(x, -3), round(x, -4), round(x, -5); +SELECT toDecimal64(-12345.6789, 4) AS x, roundBankers(x, -1), roundBankers(x, -2), roundBankers(x, -3), roundBankers(x, -4), roundBankers(x, -5); +SELECT toDecimal64(-12345.6789, 4) AS x, ceil(x, -1), ceil(x, -2), ceil(x, -3), ceil(x, -4), ceil(x, -5); +SELECT toDecimal64(-12345.6789, 4) AS x, floor(x, -1), floor(x, -2), floor(x, -3), floor(x, -4), floor(x, -5); +SELECT toDecimal64(-12345.6789, 4) AS x, trunc(x, -1), trunc(x, -2), trunc(x, -3), trunc(x, -4), trunc(x, -5); + +SELECT toDecimal128(12345.6789, 4) AS x, round(x), round(x, 1), round(x, 2), round(x, 3), round(x, 4), round(x, 5); +SELECT toDecimal128(12345.6789, 4) AS x, roundBankers(x), roundBankers(x, 1), roundBankers(x, 2), roundBankers(x, 3), roundBankers(x, 4), roundBankers(x, 5); +SELECT toDecimal128(12345.6789, 4) AS x, ceil(x), ceil(x, 1), ceil(x, 2), ceil(x, 3), ceil(x, 4), ceil(x, 5); +SELECT toDecimal128(12345.6789, 4) AS x, floor(x), floor(x, 1), floor(x, 2), floor(x, 3), floor(x, 4), floor(x, 5); +SELECT toDecimal128(12345.6789, 4) AS x, trunc(x), trunc(x, 1), trunc(x, 2), trunc(x, 3), trunc(x, 4), trunc(x, 5); +SELECT toDecimal128(-12345.6789, 4) AS x, round(x), round(x, 1), round(x, 2), round(x, 3), round(x, 4), round(x, 5); +SELECT toDecimal128(-12345.6789, 4) AS x, roundBankers(x), roundBankers(x, 1), roundBankers(x, 2), roundBankers(x, 3), roundBankers(x, 4), roundBankers(x, 5); +SELECT toDecimal128(-12345.6789, 4) AS x, ceil(x), ceil(x, 1), ceil(x, 2), ceil(x, 3), ceil(x, 4), ceil(x, 5); +SELECT toDecimal128(-12345.6789, 4) AS x, floor(x), floor(x, 1), floor(x, 2), floor(x, 3), floor(x, 4), floor(x, 5); +SELECT toDecimal128(-12345.6789, 4) AS x, trunc(x), trunc(x, 1), trunc(x, 2), trunc(x, 3), trunc(x, 4), trunc(x, 5); +SELECT toDecimal128(12345.6789, 4) AS x, round(x, -1), round(x, -2), round(x, -3), round(x, -4), round(x, -5); +SELECT toDecimal128(12345.6789, 4) AS x, roundBankers(x, -1), roundBankers(x, -2), roundBankers(x, -3), roundBankers(x, -4), roundBankers(x, -5); +SELECT toDecimal128(12345.6789, 4) AS x, ceil(x, -1), ceil(x, -2), ceil(x, -3), ceil(x, -4), ceil(x, -5); +SELECT toDecimal128(12345.6789, 4) AS x, floor(x, -1), floor(x, -2), floor(x, -3), floor(x, -4), floor(x, -5); +SELECT toDecimal128(12345.6789, 4) AS x, trunc(x, -1), trunc(x, -2), trunc(x, -3), trunc(x, -4), trunc(x, -5); +SELECT toDecimal128(-12345.6789, 4) AS x, round(x, -1), round(x, -2), round(x, -3), round(x, -4), round(x, -5); +SELECT toDecimal128(-12345.6789, 4) AS x, roundBankers(x, -1), roundBankers(x, -2), roundBankers(x, -3), roundBankers(x, -4), roundBankers(x, -5); +SELECT toDecimal128(-12345.6789, 4) AS x, ceil(x, -1), ceil(x, -2), ceil(x, -3), ceil(x, -4), ceil(x, -5); +SELECT toDecimal128(-12345.6789, 4) AS x, floor(x, -1), floor(x, -2), floor(x, -3), floor(x, -4), floor(x, -5); +SELECT toDecimal128(-12345.6789, 4) AS x, trunc(x, -1), trunc(x, -2), trunc(x, -3), trunc(x, -4), trunc(x, -5); + +SELECT toDecimal64('123456789.123456789', 9) AS x, -x AS y, round(x), round(y), round(x, 5), round(y, 5), round(x, -5), round(y, -5); +SELECT toDecimal64('123456789.123456789', 9) AS x, -x AS y, roundBankers(x), roundBankers(y), roundBankers(x, 5), roundBankers(y, 5), roundBankers(x, -5), roundBankers(y, -5); +SELECT toDecimal64('123456789.123456789', 9) AS x, -x AS y, ceil(x), ceil(y), ceil(x, 5), ceil(y, 5), ceil(x, -5), ceil(y, -5); +SELECT toDecimal64('123456789.123456789', 9) AS x, -x AS y, floor(x), floor(y), floor(x, 5), floor(y, 5), floor(x, -5), floor(y, -5); +SELECT toDecimal64('123456789.123456789', 9) AS x, -x AS y, trunc(x), trunc(y), trunc(x, 5), trunc(y, 5), trunc(x, -5), trunc(y, -5); + +SELECT toDecimal128('12345678901234567890123456789.123456789', 9) AS x, -x AS y, round(x), round(y), round(x, 3), round(y, 3); +SELECT toDecimal128('12345678901234567890123456789.123456789', 9) AS x, -x AS y, roundBankers(x), roundBankers(y), roundBankers(x, 3), roundBankers(y, 3); +SELECT toDecimal128('12345678901234567890123456789.123456789', 9) AS x, -x AS y, ceil(x), ceil(y), ceil(x, 3), ceil(y, 3); +SELECT toDecimal128('12345678901234567890123456789.123456789', 9) AS x, -x AS y, floor(x), floor(y), floor(x, 3), floor(y, 3); +SELECT toDecimal128('12345678901234567890123456789.123456789', 9) AS x, -x AS y, trunc(x), trunc(y), trunc(x, 3), trunc(y, 3); + +SELECT toDecimal128('12345678901234567890123456789.123456789', 9) AS x, -x AS y, round(x), round(y), round(x, -3), round(y, -3); +SELECT toDecimal128('12345678901234567890123456789.123456789', 9) AS x, -x AS y, roundBankers(x), roundBankers(y), roundBankers(x, -3), roundBankers(y, -3); +SELECT toDecimal128('12345678901234567890123456789.123456789', 9) AS x, -x AS y, ceil(x), ceil(y), ceil(x, -3), ceil(y, -3); +SELECT toDecimal128('12345678901234567890123456789.123456789', 9) AS x, -x AS y, floor(x), floor(y), floor(x, -3), floor(y, -3); +SELECT toDecimal128('12345678901234567890123456789.123456789', 9) AS x, -x AS y, trunc(x), trunc(y), trunc(x, -3), trunc(y, -3); + +select '-- Decimal128, Scale 20'; +SELECT 'round() : ', toDecimal128('1234567890.123456789', 20) AS x, -x AS y, round(x), round(y), round(x, -3), round(y, -3); +SELECT 'roundBankers() : ', toDecimal128('1234567890.123456789', 20) AS x, -x AS y, roundBankers(x), roundBankers(y), roundBankers(x, -3), roundBankers(y, -3); +SELECT 'ceil() : ', toDecimal128('1234567890.123456789', 20) AS x, -x AS y, ceil(x), ceil(y), ceil(x, -3), ceil(y, -3); +SELECT 'floor() : ', toDecimal128('1234567890.123456789', 20) AS x, -x AS y, floor(x), floor(y), floor(x, -3), floor(y, -3); +SELECT 'trunc() : ', toDecimal128('1234567890.123456789', 20) AS x, -x AS y, trunc(x), trunc(y), trunc(x, -3), trunc(y, -3); + +select '-- Decimal256, Scale 40'; +SELECT 'round() : ', toDecimal256('1234567890.123456789', 40) AS x, -x AS y, round(x), round(y), round(x, -3), round(y, -3); +SELECT 'roundBankers() : ', toDecimal256('1234567890.123456789', 40) AS x, -x AS y, roundBankers(x), roundBankers(y), roundBankers(x, -3), roundBankers(y, -3); +SELECT 'ceil() : ', toDecimal256('1234567890.123456789', 40) AS x, -x AS y, ceil(x), ceil(y), ceil(x, -3), ceil(y, -3); +SELECT 'floor() : ', toDecimal256('1234567890.123456789', 40) AS x, -x AS y, floor(x), floor(y), floor(x, -3), floor(y, -3); +SELECT 'trunc() : ', toDecimal256('1234567890.123456789', 40) AS x, -x AS y, trunc(x), trunc(y), trunc(x, -3), trunc(y, -3); diff --git a/parser/testdata/00700_decimal_with_default_precision_and_scale/ast.json b/parser/testdata/00700_decimal_with_default_precision_and_scale/ast.json new file mode 100644 index 000000000..52919b7a1 --- /dev/null +++ b/parser/testdata/00700_decimal_with_default_precision_and_scale/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery decimal (children 1)" + }, + { + "explain": " Identifier decimal" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001266029, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/00700_decimal_with_default_precision_and_scale/metadata.json b/parser/testdata/00700_decimal_with_default_precision_and_scale/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00700_decimal_with_default_precision_and_scale/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00700_decimal_with_default_precision_and_scale/query.sql b/parser/testdata/00700_decimal_with_default_precision_and_scale/query.sql new file mode 100644 index 000000000..5132b593b --- /dev/null +++ b/parser/testdata/00700_decimal_with_default_precision_and_scale/query.sql @@ -0,0 +1,19 @@ +DROP TABLE IF EXISTS decimal; + +CREATE TABLE IF NOT EXISTS decimal +( + d1 DECIMAL(9, 8), + d2 DECIMAL(18), + d3 DECIMAL +) +ENGINE = MergeTree +PARTITION BY toInt32(d1) +ORDER BY (d2, d3); + +INSERT INTO decimal (d1, d2, d3) VALUES (4.2, 4.2, 4.2); + +SELECT type FROM system.columns WHERE table = 'decimal' AND database = currentDatabase() ORDER BY type; + +SELECT toTypeName(d2), toTypeName(d3) FROM decimal LIMIT 1; + +DROP TABLE decimal; diff --git a/parser/testdata/00700_to_decimal_or_something/ast.json b/parser/testdata/00700_to_decimal_or_something/ast.json new file mode 100644 index 000000000..bbca5407e --- /dev/null +++ b/parser/testdata/00700_to_decimal_or_something/ast.json @@ -0,0 +1,70 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function toDecimal32OrZero (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '1.1'" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function toDecimal32OrZero (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '1.1'" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function toDecimal32OrZero (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '1.1'" + }, + { + "explain": " Literal UInt64_8" + } + ], + + "rows": 16, + + "statistics": + { + "elapsed": 0.001144277, + "rows_read": 16, + "bytes_read": 593 + } +} diff --git a/parser/testdata/00700_to_decimal_or_something/metadata.json b/parser/testdata/00700_to_decimal_or_something/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00700_to_decimal_or_something/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00700_to_decimal_or_something/query.sql b/parser/testdata/00700_to_decimal_or_something/query.sql new file mode 100644 index 000000000..8d932d3d7 --- /dev/null +++ b/parser/testdata/00700_to_decimal_or_something/query.sql @@ -0,0 +1,43 @@ +SELECT toDecimal32OrZero('1.1', 1), toDecimal32OrZero('1.1', 2), toDecimal32OrZero('1.1', 8); +SELECT toDecimal32OrZero('1.1', 0); +SELECT toDecimal32OrZero(1.1, 0); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT toDecimal128OrZero('', 0) AS x, toDecimal128OrZero('0.42', 2) AS y; +SELECT toDecimal64OrZero('', 0) AS x, toDecimal64OrZero('0.42', 3) AS y; +SELECT toDecimal32OrZero('', 0) AS x, toDecimal32OrZero('0.42', 4) AS y; + +SELECT toDecimal32OrZero('999999999', 0), toDecimal32OrZero('1000000000', 0); +SELECT toDecimal32OrZero('-999999999', 0), toDecimal32OrZero('-1000000000', 0); +SELECT toDecimal64OrZero('999999999999999999', 0), toDecimal64OrZero('1000000000000000000', 0); +SELECT toDecimal64OrZero('-999999999999999999', 0), toDecimal64OrZero('-1000000000000000000', 0); +SELECT toDecimal128OrZero('99999999999999999999999999999999999999', 0); +SELECT toDecimal64OrZero('100000000000000000000000000000000000000', 0); +SELECT toDecimal128OrZero('-99999999999999999999999999999999999999', 0); +SELECT toDecimal64OrZero('-100000000000000000000000000000000000000', 0); + +SELECT toDecimal32OrZero('1', rowNumberInBlock()); -- { serverError ILLEGAL_COLUMN } +SELECT toDecimal64OrZero('1', rowNumberInBlock()); -- { serverError ILLEGAL_COLUMN } +SELECT toDecimal128OrZero('1', rowNumberInBlock()); -- { serverError ILLEGAL_COLUMN } + +SELECT '----'; + +SELECT toDecimal32OrNull('1.1', 1), toDecimal32OrNull('1.1', 2), toDecimal32OrNull('1.1', 8); +SELECT toDecimal32OrNull('1.1', 0); +SELECT toDecimal32OrNull(1.1, 0); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT toDecimal128OrNull('', 0) AS x, toDecimal128OrNull('-0.42', 2) AS y; +SELECT toDecimal64OrNull('', 0) AS x, toDecimal64OrNull('-0.42', 3) AS y; +SELECT toDecimal32OrNull('', 0) AS x, toDecimal32OrNull('-0.42', 4) AS y; + +SELECT toDecimal32OrNull('999999999', 0), toDecimal32OrNull('1000000000', 0); +SELECT toDecimal32OrNull('-999999999', 0), toDecimal32OrNull('-1000000000', 0); +SELECT toDecimal64OrNull('999999999999999999', 0), toDecimal64OrNull('1000000000000000000', 0); +SELECT toDecimal64OrNull('-999999999999999999', 0), toDecimal64OrNull('-1000000000000000000', 0); +SELECT toDecimal128OrNull('99999999999999999999999999999999999999', 0); +SELECT toDecimal64OrNull('100000000000000000000000000000000000000', 0); +SELECT toDecimal128OrNull('-99999999999999999999999999999999999999', 0); +SELECT toDecimal64OrNull('-100000000000000000000000000000000000000', 0); + +SELECT toDecimal32OrNull('1', rowNumberInBlock()); -- { serverError ILLEGAL_COLUMN } +SELECT toDecimal64OrNull('1', rowNumberInBlock()); -- { serverError ILLEGAL_COLUMN } +SELECT toDecimal128OrNull('1', rowNumberInBlock()); -- { serverError ILLEGAL_COLUMN } diff --git a/parser/testdata/00701_context_use_after_free/ast.json b/parser/testdata/00701_context_use_after_free/ast.json new file mode 100644 index 000000000..e52bfef9e --- /dev/null +++ b/parser/testdata/00701_context_use_after_free/ast.json @@ -0,0 +1,106 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function less (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDecimal128 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '1'" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Function toDecimal128 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '2'" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDecimal128 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '1'" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Function toDecimal128 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '2'" + }, + { + "explain": " Literal UInt64_0" + } + ], + + "rows": 28, + + "statistics": + { + "elapsed": 0.001066477, + "rows_read": 28, + "bytes_read": 1120 + } +} diff --git a/parser/testdata/00701_context_use_after_free/metadata.json b/parser/testdata/00701_context_use_after_free/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00701_context_use_after_free/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00701_context_use_after_free/query.sql b/parser/testdata/00701_context_use_after_free/query.sql new file mode 100644 index 000000000..c7e68fcdf --- /dev/null +++ b/parser/testdata/00701_context_use_after_free/query.sql @@ -0,0 +1 @@ +SELECT (toDecimal128(materialize('1'), 0), toDecimal128('2', 0)) < (toDecimal128('1', 0), toDecimal128('2', 0)); diff --git a/parser/testdata/00701_join_default_strictness/ast.json b/parser/testdata/00701_join_default_strictness/ast.json new file mode 100644 index 000000000..c19fb67ef --- /dev/null +++ b/parser/testdata/00701_join_default_strictness/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery a1 (children 1)" + }, + { + "explain": " Identifier a1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000988396, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/00701_join_default_strictness/metadata.json b/parser/testdata/00701_join_default_strictness/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00701_join_default_strictness/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00701_join_default_strictness/query.sql b/parser/testdata/00701_join_default_strictness/query.sql new file mode 100644 index 000000000..6a10b4b88 --- /dev/null +++ b/parser/testdata/00701_join_default_strictness/query.sql @@ -0,0 +1,20 @@ +DROP TABLE IF EXISTS a1; +DROP TABLE IF EXISTS a2; + +SET send_logs_level = 'fatal'; + +CREATE TABLE a1(a UInt8, b UInt8) ENGINE=Memory; +CREATE TABLE a2(a UInt8, b UInt8) ENGINE=Memory; + +INSERT INTO a1 VALUES (1, 1); +INSERT INTO a1 VALUES (1, 2); +INSERT INTO a1 VALUES (1, 3); +INSERT INTO a2 VALUES (1, 2); +INSERT INTO a2 VALUES (1, 3); +INSERT INTO a2 VALUES (1, 4); + +SELECT a, b FROM a1 LEFT JOIN (SELECT a, b FROM a2) js2 USING a ORDER BY b SETTINGS join_default_strictness='ANY'; +SELECT a, b FROM a1 LEFT JOIN (SELECT a, b FROM a2) js2 USING a ORDER BY b; -- default SETTINGS join_default_strictness='ALL'; + +DROP TABLE IF EXISTS a1; +DROP TABLE IF EXISTS a2; diff --git a/parser/testdata/00701_rollup/ast.json b/parser/testdata/00701_rollup/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00701_rollup/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00701_rollup/metadata.json b/parser/testdata/00701_rollup/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00701_rollup/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00701_rollup/query.sql b/parser/testdata/00701_rollup/query.sql new file mode 100644 index 000000000..4679b123c --- /dev/null +++ b/parser/testdata/00701_rollup/query.sql @@ -0,0 +1,25 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS rollup; +CREATE TABLE rollup(a String, b Int32, s Int32) ENGINE = Memory; + +INSERT INTO rollup VALUES ('a', 1, 10), ('a', 1, 15), ('a', 2, 20); +INSERT INTO rollup VALUES ('a', 2, 25), ('b', 1, 10), ('b', 1, 5); +INSERT INTO rollup VALUES ('b', 2, 20), ('b', 2, 15); + +SELECT a, b, sum(s), count() from rollup GROUP BY ROLLUP(a, b) ORDER BY a, b; + +SELECT a, b, sum(s), count() from rollup GROUP BY ROLLUP(a, b) WITH TOTALS ORDER BY a, b; + +SELECT a, sum(s), count() from rollup GROUP BY ROLLUP(a) ORDER BY a; + +SELECT a, sum(s), count() from rollup GROUP BY a WITH ROLLUP ORDER BY a; + +SELECT a, sum(s), count() from rollup GROUP BY a WITH ROLLUP WITH TOTALS ORDER BY a; + +SET group_by_two_level_threshold = 1; + +SELECT a, sum(s), count() from rollup GROUP BY a WITH ROLLUP ORDER BY a; +SELECT a, b, sum(s), count() from rollup GROUP BY a, b WITH ROLLUP ORDER BY a, b; + +DROP TABLE rollup; diff --git a/parser/testdata/00702_join_on_dups/ast.json b/parser/testdata/00702_join_on_dups/ast.json new file mode 100644 index 000000000..2fd343881 --- /dev/null +++ b/parser/testdata/00702_join_on_dups/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery X (children 1)" + }, + { + "explain": " Identifier X" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000983641, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/00702_join_on_dups/metadata.json b/parser/testdata/00702_join_on_dups/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00702_join_on_dups/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00702_join_on_dups/query.sql b/parser/testdata/00702_join_on_dups/query.sql new file mode 100644 index 000000000..4d7c1204f --- /dev/null +++ b/parser/testdata/00702_join_on_dups/query.sql @@ -0,0 +1,79 @@ +drop table if exists X; +drop table if exists Y; + +create table X (id Int32, x_a String, x_b Nullable(Int32)) engine MergeTree ORDER BY tuple(); +create table Y (id Int32, y_a String, y_b Nullable(String)) engine MergeTree ORDER BY tuple(); + +insert into X (id, x_a, x_b) values (1, 'l1', 1), (2, 'l2', 2), (2, 'l3', 3), (3, 'l4', 4); +insert into X (id, x_a) values (4, 'l5'), (4, 'l6'), (5, 'l7'), (8, 'l8'), (9, 'l9'); +insert into Y (id, y_a) values (1, 'r1'), (1, 'r2'), (2, 'r3'), (3, 'r4'), (3, 'r5'); +insert into Y (id, y_a, y_b) values (4, 'r6', 'nr6'), (6, 'r7', 'nr7'), (7, 'r8', 'nr8'), (9, 'r9', 'nr9'); + +select 'inner'; +select X.*, Y.* from X inner join Y on X.id = Y.id order by X.id, X.x_a, X.x_b, Y.id, Y.y_a, Y.y_b; +select 'inner subs'; +select s.*, j.* from (select * from X) as s inner join (select * from Y) as j on s.id = j.id order by s.id, s.x_a, s.x_b, j.id, j.y_a, j.y_b; +select 'inner expr'; +select X.*, Y.* from X inner join Y on (X.id + 1) = (Y.id + 1) order by X.id, X.x_a, X.x_b, Y.id, Y.y_a, Y.y_b; + +select 'left'; +select X.*, Y.* from X left join Y on X.id = Y.id order by X.id, X.x_a, X.x_b, Y.id, Y.y_a, Y.y_b; +select 'left subs'; +select s.*, j.* from (select * from X) as s left join (select * from Y) as j on s.id = j.id order by s.id, s.x_a, s.x_b, j.id, j.y_a, j.y_b; +select 'left expr'; +select X.*, Y.* from X left join Y on (X.id + 1) = (Y.id + 1) order by X.id, X.x_a, X.x_b, Y.id, Y.y_a, Y.y_b; + +select 'right'; +select X.*, Y.* from X right join Y on X.id = Y.id order by X.id, X.x_a, X.x_b, Y.id, Y.y_a, Y.y_b; +select 'right subs'; +select s.*, j.* from (select * from X) as s right join (select * from Y) as j on s.id = j.id order by s.id, s.x_a, s.x_b, j.id, j.y_a, j.y_b; +--select 'right expr'; +--select X.*, Y.* from X right join Y on (X.id + 1) = (Y.id + 1) order by id; + +select 'full'; +select X.*, Y.* from X full join Y on X.id = Y.id order by X.id, X.x_a, X.x_b, Y.id, Y.y_a, Y.y_b; +select 'full subs'; +select s.*, j.* from (select * from X) as s full join (select * from Y) as j on s.id = j.id order by s.id, s.x_a, s.x_b, j.id, j.y_a, j.y_b; +--select 'full expr'; +--select X.*, Y.* from X full join Y on (X.id + 1) = (Y.id + 1) order by id; + +select 'self inner'; +select X.*, s.* from X inner join (select * from X) as s on X.id = s.id order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +select 'self inner nullable'; +select X.*, s.* from X inner join (select * from X) as s on X.x_b = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +select 'self inner nullable vs not nullable'; +select X.*, s.* from X inner join (select * from X) as s on X.id = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +-- TODO: s.y_b == '' instead of NULL +select 'self inner nullable vs not nullable 2'; +select Y.*, s.* from Y inner join (select * from Y) as s on concat('n', Y.y_a) = s.y_b order by Y.id, Y.y_a, Y.y_b, s.id, s.y_a, s.y_b; + +select 'self left'; +select X.*, s.* from X left join (select * from X) as s on X.id = s.id order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +select 'self left nullable'; +select X.*, s.* from X left join (select * from X) as s on X.x_b = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +select 'self left nullable vs not nullable'; +select X.*, s.* from X left join (select * from X) as s on X.id = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +-- TODO: s.y_b == '' instead of NULL +select 'self left nullable vs not nullable 2'; +select Y.*, s.* from Y left join (select * from Y) as s on concat('n', Y.y_a) = s.y_b order by Y.id, Y.y_a, Y.y_b, s.id, s.y_a, s.y_b; + +select 'self right'; +select X.*, s.* from X right join (select * from X) as s on X.id = s.id order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +select 'self right nullable'; +select X.*, s.* from X right join (select * from X) as s on X.x_b = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +select 'self right nullable vs not nullable'; +select X.*, s.* from X right join (select * from X) as s on X.id = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +--select 'self right nullable vs not nullable 2'; +--select Y.*, s.* from Y right join (select * from Y) as s on concat('n', Y.y_a) = s.y_b order by id; + +select 'self full'; +select X.*, s.* from X full join (select * from X) as s on X.id = s.id order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +select 'self full nullable'; +select X.*, s.* from X full join (select * from X) as s on X.x_b = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +select 'self full nullable vs not nullable'; +select X.*, s.* from X full join (select * from X) as s on X.id = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +--select 'self full nullable vs not nullable 2'; +--select Y.*, s.* from Y full join (select * from Y) as s on concat('n', Y.y_a) = s.y_b order by id; + +drop table X; +drop table Y; diff --git a/parser/testdata/00702_join_with_using/ast.json b/parser/testdata/00702_join_with_using/ast.json new file mode 100644 index 000000000..2ce81ddf1 --- /dev/null +++ b/parser/testdata/00702_join_with_using/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery using1 (children 1)" + }, + { + "explain": " Identifier using1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001108141, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/00702_join_with_using/metadata.json b/parser/testdata/00702_join_with_using/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00702_join_with_using/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00702_join_with_using/query.sql b/parser/testdata/00702_join_with_using/query.sql new file mode 100644 index 000000000..462524e19 --- /dev/null +++ b/parser/testdata/00702_join_with_using/query.sql @@ -0,0 +1,39 @@ +DROP TABLE IF EXISTS using1; +DROP TABLE IF EXISTS using2; + +CREATE TABLE using1(a UInt8, b UInt8) ENGINE=Memory; +CREATE TABLE using2(a UInt8, b UInt8) ENGINE=Memory; + +INSERT INTO using1 VALUES (1, 1) (2, 2) (3, 3); +INSERT INTO using2 VALUES (4, 4) (2, 2) (3, 3); + +SELECT * FROM using1 ALL LEFT JOIN (SELECT * FROM using2) js2 USING (a, b) ORDER BY a; + +DROP TABLE using1; +DROP TABLE using2; + +-- + +drop table if exists persons; +drop table if exists children; + +create table persons (id String, name String) engine MergeTree order by id; +create table children (id String, childName String) engine MergeTree order by id; + +insert into persons (id, name) +values ('1', 'John'), ('2', 'Jack'), ('3', 'Daniel'), ('4', 'James'), ('5', 'Amanda'); + +insert into children (id, childName) +values ('1', 'Robert'), ('1', 'Susan'), ('3', 'Sarah'), ('4', 'David'), ('4', 'Joseph'), ('5', 'Robert'); + +select * from persons all inner join children using id order by id, name, childName; +select * from persons all inner join (select * from children) as j using id order by id, name, childName; +select * from (select * from persons) as s all inner join (select * from children ) as j using id order by id, name, childName; +-- +set joined_subquery_requires_alias = 0; +select * from persons all inner join (select * from children) using id order by id, name, childName; +select * from (select * from persons) all inner join (select * from children) using id order by id, name, childName; +select * from (select * from persons) as s all inner join (select * from children) using id order by id, name, childName; + +drop table persons; +drop table children; diff --git a/parser/testdata/00702_join_with_using_dups/ast.json b/parser/testdata/00702_join_with_using_dups/ast.json new file mode 100644 index 000000000..c966edc9e --- /dev/null +++ b/parser/testdata/00702_join_with_using_dups/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery X (children 1)" + }, + { + "explain": " Identifier X" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001065457, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/00702_join_with_using_dups/metadata.json b/parser/testdata/00702_join_with_using_dups/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00702_join_with_using_dups/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00702_join_with_using_dups/query.sql b/parser/testdata/00702_join_with_using_dups/query.sql new file mode 100644 index 000000000..7af522dd9 --- /dev/null +++ b/parser/testdata/00702_join_with_using_dups/query.sql @@ -0,0 +1,31 @@ +drop table if exists X; +drop table if exists Y; + +create table X (id Int32, x_name String) engine MergeTree ORDER BY tuple(); +create table Y (id Int32, y_name String) engine MergeTree ORDER BY tuple(); + +insert into X (id, x_name) values (1, 'A'), (2, 'B'), (2, 'C'), (3, 'D'), (4, 'E'), (4, 'F'), (5, 'G'), (8, 'H'), (9, 'I'); +insert into Y (id, y_name) values (1, 'a'), (1, 'b'), (2, 'c'), (3, 'd'), (3, 'e'), (4, 'f'), (6, 'g'), (7, 'h'), (9, 'i'); + +select 'inner'; +select X.*, Y.* from X inner join Y using id order by X.id, Y.id, X.x_name, Y.y_name; +select 'inner subs'; +select s.*, j.* from (select * from X) as s inner join (select * from Y) as j using id order by s.id, s.id, s.x_name, j.y_name; + +select 'left'; +select X.*, Y.* from X left join Y using id order by X.id, Y.id, X.x_name, Y.y_name; +select 'left subs'; +select s.*, j.* from (select * from X) as s left join (select * from Y) as j using id order by s.id, j.id, s.x_name, j.y_name; + +select 'right'; +select X.*, Y.* from X right join Y using id order by X.id, Y.id, X.x_name, Y.y_name; +select 'right subs'; +select s.*, j.* from (select * from X) as s right join (select * from Y) as j using id order by s.id, j.id, s.x_name, j.y_name; + +select 'full'; +select X.*, Y.* from X full join Y using id order by X.id, Y.id, X.x_name, Y.y_name; +select 'full subs'; +select s.*, j.* from (select * from X) as s full join (select * from Y) as j using id order by s.id, j.id, s.x_name, j.y_name; + +drop table X; +drop table Y; diff --git a/parser/testdata/00702_where_with_quailified_names/ast.json b/parser/testdata/00702_where_with_quailified_names/ast.json new file mode 100644 index 000000000..45a65f243 --- /dev/null +++ b/parser/testdata/00702_where_with_quailified_names/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery where_qualified (children 1)" + }, + { + "explain": " Identifier where_qualified" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001401823, + "rows_read": 2, + "bytes_read": 82 + } +} diff --git a/parser/testdata/00702_where_with_quailified_names/metadata.json b/parser/testdata/00702_where_with_quailified_names/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00702_where_with_quailified_names/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00702_where_with_quailified_names/query.sql b/parser/testdata/00702_where_with_quailified_names/query.sql new file mode 100644 index 000000000..60dadff14 --- /dev/null +++ b/parser/testdata/00702_where_with_quailified_names/query.sql @@ -0,0 +1,6 @@ +DROP TABLE IF EXISTS where_qualified; +CREATE TABLE where_qualified(a UInt32, b UInt8) ENGINE = Memory; +INSERT INTO where_qualified VALUES(1, 1); +INSERT INTO where_qualified VALUES(2, 0); +SELECT a from where_qualified WHERE where_qualified.b; +DROP TABLE where_qualified; diff --git a/parser/testdata/00703_join_crash/ast.json b/parser/testdata/00703_join_crash/ast.json new file mode 100644 index 000000000..b7b37c5da --- /dev/null +++ b/parser/testdata/00703_join_crash/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tab1 (children 1)" + }, + { + "explain": " Identifier tab1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001077986, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/00703_join_crash/metadata.json b/parser/testdata/00703_join_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00703_join_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00703_join_crash/query.sql b/parser/testdata/00703_join_crash/query.sql new file mode 100644 index 000000000..eb968580b --- /dev/null +++ b/parser/testdata/00703_join_crash/query.sql @@ -0,0 +1,13 @@ +drop table IF EXISTS tab1; +drop table IF EXISTS tab1_copy; + +create table tab1 (a1 Int32, b1 Int32) engine = MergeTree order by a1; +create table tab1_copy (a1 Int32, b1 Int32) engine = MergeTree order by a1; + +insert into tab1 values (1, 2); +insert into tab1_copy values (2, 3); + +select tab1.a1, tab1_copy.a1, tab1.b1 from tab1 any left join tab1_copy on tab1.b1 + 3 = tab1_copy.b1 + 2; + +drop table tab1; +drop table tab1_copy; diff --git a/parser/testdata/00704_arrayCumSumLimited_arrayDifference/ast.json b/parser/testdata/00704_arrayCumSumLimited_arrayDifference/ast.json new file mode 100644 index 000000000..b035b741e --- /dev/null +++ b/parser/testdata/00704_arrayCumSumLimited_arrayDifference/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000902583, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/00704_arrayCumSumLimited_arrayDifference/metadata.json b/parser/testdata/00704_arrayCumSumLimited_arrayDifference/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00704_arrayCumSumLimited_arrayDifference/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00704_arrayCumSumLimited_arrayDifference/query.sql b/parser/testdata/00704_arrayCumSumLimited_arrayDifference/query.sql new file mode 100644 index 000000000..06dfe87a2 --- /dev/null +++ b/parser/testdata/00704_arrayCumSumLimited_arrayDifference/query.sql @@ -0,0 +1,28 @@ +DROP TABLE IF EXISTS test; + +SELECT arrayCumSumNonNegative([1, 2, 3, 4]); + +SELECT arrayCumSumNonNegative([1, -5, 5, -2]); + +SELECT arrayDifference([1, 2, 3, 4]); + +SELECT arrayDifference([1, 7, 100, 5]); + +CREATE TABLE test(a Array(Int64), b Array(Float64), c Array(UInt64)) ENGINE=Memory; + +INSERT INTO test VALUES ([1, -3, 0, 1], [1.0, 0.4, -0.1], [1, 3, 1]); + +SELECT arrayCumSumNonNegative(a) FROM test; + +SELECT arrayCumSumNonNegative(b) FROM test; + +SELECT arrayCumSumNonNegative(c) FROM test; + +SELECT arrayDifference(a) FROM test; + +SELECT arrayDifference(b) FROM test; + +SELECT arrayDifference(c) FROM test; + +DROP TABLE IF EXISTS test; + diff --git a/parser/testdata/00705_aggregate_states_addition/ast.json b/parser/testdata/00705_aggregate_states_addition/ast.json new file mode 100644 index 000000000..fd814770b --- /dev/null +++ b/parser/testdata/00705_aggregate_states_addition/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001124711, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00705_aggregate_states_addition/metadata.json b/parser/testdata/00705_aggregate_states_addition/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00705_aggregate_states_addition/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00705_aggregate_states_addition/query.sql b/parser/testdata/00705_aggregate_states_addition/query.sql new file mode 100644 index 000000000..29510fc93 --- /dev/null +++ b/parser/testdata/00705_aggregate_states_addition/query.sql @@ -0,0 +1,22 @@ +SET send_logs_level = 'fatal'; +DROP TABLE IF EXISTS add_aggregate; +CREATE TABLE add_aggregate(a UInt32, b UInt32) ENGINE = Memory; + +INSERT INTO add_aggregate VALUES(1, 2); +INSERT INTO add_aggregate VALUES(3, 1); + +SELECT countMerge(x + y) FROM (SELECT countState(a) as x, countState(b) as y from add_aggregate); +SELECT sumMerge(x + y), sumMerge(x), sumMerge(y) FROM (SELECT sumState(a) as x, sumState(b) as y from add_aggregate); +SELECT sumMerge(x) FROM (SELECT sumState(a) + countState(b) as x FROM add_aggregate); -- { serverError CANNOT_ADD_DIFFERENT_AGGREGATE_STATES } +SELECT sumMerge(x) FROM (SELECT sumState(a) + sumState(toInt32(b)) as x FROM add_aggregate); -- { serverError CANNOT_ADD_DIFFERENT_AGGREGATE_STATES } + +SELECT minMerge(x) FROM (SELECT minState(a) + minState(b) as x FROM add_aggregate); + +SELECT uniqMerge(x + y) FROM (SELECT uniqState(a) as x, uniqState(b) as y FROM add_aggregate); + +SELECT arraySort(groupArrayMerge(x + y)) FROM (SELECT groupArrayState(a) AS x, groupArrayState(b) as y FROM add_aggregate); +SELECT arraySort(groupUniqArrayMerge(x + y)) FROM (SELECT groupUniqArrayState(a) AS x, groupUniqArrayState(b) as y FROM add_aggregate); + +SELECT uniqMerge(x + y) FROM (SELECT uniqState(65536, a) AS x, uniqState(b) AS y FROM add_aggregate); -- { serverError CANNOT_ADD_DIFFERENT_AGGREGATE_STATES } + +DROP TABLE IF EXISTS add_aggregate; diff --git a/parser/testdata/00706_iso_week_and_day_of_year/ast.json b/parser/testdata/00706_iso_week_and_day_of_year/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00706_iso_week_and_day_of_year/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00706_iso_week_and_day_of_year/metadata.json b/parser/testdata/00706_iso_week_and_day_of_year/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00706_iso_week_and_day_of_year/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00706_iso_week_and_day_of_year/query.sql b/parser/testdata/00706_iso_week_and_day_of_year/query.sql new file mode 100644 index 000000000..64bfc3b1a --- /dev/null +++ b/parser/testdata/00706_iso_week_and_day_of_year/query.sql @@ -0,0 +1,17 @@ +-- International Programmers' Day +SELECT toDayOfYear(toDate('2018-09-13')); + +SELECT toDate('2018-09-17') AS x, toDateTime(x) AS x_t, toISOWeek(x), toISOWeek(x_t), toISOYear(x), toISOYear(x_t), toStartOfISOYear(x), toStartOfISOYear(x_t); + +SELECT toDate('2018-12-25') + number AS x, toDateTime(x) AS x_t, toISOWeek(x) AS w, toISOWeek(x_t) AS wt, toISOYear(x) AS y, toISOYear(x_t) AS yt, toStartOfISOYear(x) AS ys, toStartOfISOYear(x_t) AS yst, toDayOfYear(x) AS dy, toDayOfYear(x_t) AS dyt FROM system.numbers LIMIT 10; +SELECT toDate('2016-12-25') + number AS x, toDateTime(x) AS x_t, toISOWeek(x) AS w, toISOWeek(x_t) AS wt, toISOYear(x) AS y, toISOYear(x_t) AS yt, toStartOfISOYear(x) AS ys, toStartOfISOYear(x_t) AS yst, toDayOfYear(x) AS dy, toDayOfYear(x_t) AS dyt FROM system.numbers LIMIT 10; + +-- ISO year always begins at monday. +SELECT DISTINCT toDayOfWeek(toStartOfISOYear(toDateTime(1000000000 + rand64() % 1000000000))) FROM numbers(10000); +SELECT DISTINCT toDayOfWeek(toStartOfISOYear(toDate(10000 + rand64() % 20000))) FROM numbers(10000); + +-- Year and ISO year don't differ by more than one. +WITH toDateTime(1000000000 + rand64() % 1000000000) AS time SELECT max(abs(toYear(time) - toISOYear(time))) <= 1 FROM numbers(10000); + +-- ISO week is between 1 and 53 +WITH toDateTime(1000000000 + rand64() % 1000000000) AS time SELECT DISTINCT toISOWeek(time) BETWEEN 1 AND 53 FROM numbers(1000000); diff --git a/parser/testdata/00707_float_csv_delimiter/ast.json b/parser/testdata/00707_float_csv_delimiter/ast.json new file mode 100644 index 000000000..3308b1390 --- /dev/null +++ b/parser/testdata/00707_float_csv_delimiter/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_00707 (children 1)" + }, + { + "explain": " Identifier test_00707" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001054198, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/00707_float_csv_delimiter/metadata.json b/parser/testdata/00707_float_csv_delimiter/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00707_float_csv_delimiter/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00707_float_csv_delimiter/query.sql b/parser/testdata/00707_float_csv_delimiter/query.sql new file mode 100644 index 000000000..4f2e94002 --- /dev/null +++ b/parser/testdata/00707_float_csv_delimiter/query.sql @@ -0,0 +1,15 @@ +DROP TEMPORARY TABLE IF EXISTS test_00707; +CREATE TEMPORARY TABLE test_00707 (x Float32, y Float64, z UInt64, s String); + +INSERT INTO test_00707 FORMAT CSV 123.456,789.012,345678,Hello + +SET format_csv_delimiter = ';'; +INSERT INTO test_00707 FORMAT CSV 123.456;789.012;345678;Hello + +SET format_csv_delimiter = ':'; +INSERT INTO test_00707 FORMAT CSV 123.456:789.012:345678:Hello + +SET format_csv_delimiter = '|'; +INSERT INTO test_00707 FORMAT CSV 123.456|789.012|345678|Hello + +SELECT * FROM test_00707; diff --git a/parser/testdata/00709_virtual_column_partition_id/ast.json b/parser/testdata/00709_virtual_column_partition_id/ast.json new file mode 100644 index 000000000..1a702c717 --- /dev/null +++ b/parser/testdata/00709_virtual_column_partition_id/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery partition_id (children 1)" + }, + { + "explain": " Identifier partition_id" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001040986, + "rows_read": 2, + "bytes_read": 76 + } +} diff --git a/parser/testdata/00709_virtual_column_partition_id/metadata.json b/parser/testdata/00709_virtual_column_partition_id/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00709_virtual_column_partition_id/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00709_virtual_column_partition_id/query.sql b/parser/testdata/00709_virtual_column_partition_id/query.sql new file mode 100644 index 000000000..48a3a3fad --- /dev/null +++ b/parser/testdata/00709_virtual_column_partition_id/query.sql @@ -0,0 +1,11 @@ +DROP TABLE IF EXISTS partition_id; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE IF NOT EXISTS partition_id (d Date DEFAULT '2000-01-01', x UInt64) ENGINE = MergeTree(d, x, 5); + +INSERT INTO partition_id VALUES (100, 1), (200, 2), (300, 3); + +SELECT _partition_id FROM partition_id ORDER BY x; + +DROP TABLE IF EXISTS partition_id; + diff --git a/parser/testdata/00710_array_enumerate_dense/ast.json b/parser/testdata/00710_array_enumerate_dense/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00710_array_enumerate_dense/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00710_array_enumerate_dense/metadata.json b/parser/testdata/00710_array_enumerate_dense/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00710_array_enumerate_dense/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00710_array_enumerate_dense/query.sql b/parser/testdata/00710_array_enumerate_dense/query.sql new file mode 100644 index 000000000..194c941b7 --- /dev/null +++ b/parser/testdata/00710_array_enumerate_dense/query.sql @@ -0,0 +1,5 @@ +-- empty result set +SELECT a FROM (SELECT groupArray(intDiv(number, 54321)) AS a, arrayUniq(a) AS u, arrayEnumerateDense(a) AS arr FROM (SELECT number FROM system.numbers LIMIT 1000000) GROUP BY intHash32(number) % 100000) where u <> arrayReverseSort(arr)[1]; + +SELECT arrayEnumerateDense([[1], [2], [34], [1]]); +SELECT arrayEnumerateDense([(1, 2), (3, 4), (1, 2)]); diff --git a/parser/testdata/00711_array_enumerate_variants/ast.json b/parser/testdata/00711_array_enumerate_variants/ast.json new file mode 100644 index 000000000..f5885b225 --- /dev/null +++ b/parser/testdata/00711_array_enumerate_variants/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function arrayEnumerateUniq (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_2, UInt64_3, UInt64_1, UInt64_2]" + }, + { + "explain": " Function arrayEnumerateDense (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_2, UInt64_3, UInt64_1, UInt64_2]" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.000891382, + "rows_read": 10, + "bytes_read": 490 + } +} diff --git a/parser/testdata/00711_array_enumerate_variants/metadata.json b/parser/testdata/00711_array_enumerate_variants/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00711_array_enumerate_variants/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00711_array_enumerate_variants/query.sql b/parser/testdata/00711_array_enumerate_variants/query.sql new file mode 100644 index 000000000..7b8d44fd4 --- /dev/null +++ b/parser/testdata/00711_array_enumerate_variants/query.sql @@ -0,0 +1 @@ +SELECT arrayEnumerateUniq([1, 2, 3, 1, 2]), arrayEnumerateDense([1, 2, 3, 1, 2]); diff --git a/parser/testdata/00712_nan_comparison/ast.json b/parser/testdata/00712_nan_comparison/ast.json new file mode 100644 index 000000000..3ad3e54f5 --- /dev/null +++ b/parser/testdata/00712_nan_comparison/ast.json @@ -0,0 +1,142 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 6)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Float64_nan" + }, + { + "explain": " Function toUInt8 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Function notEquals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Float64_nan" + }, + { + "explain": " Function toUInt8 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Function less (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Float64_nan" + }, + { + "explain": " Function toUInt8 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Function greater (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Float64_nan" + }, + { + "explain": " Function toUInt8 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Function lessOrEquals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Float64_nan" + }, + { + "explain": " Function toUInt8 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Function greaterOrEquals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Float64_nan" + }, + { + "explain": " Function toUInt8 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_0" + } + ], + + "rows": 40, + + "statistics": + { + "elapsed": 0.001634908, + "rows_read": 40, + "bytes_read": 1546 + } +} diff --git a/parser/testdata/00712_nan_comparison/metadata.json b/parser/testdata/00712_nan_comparison/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00712_nan_comparison/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00712_nan_comparison/query.sql b/parser/testdata/00712_nan_comparison/query.sql new file mode 100644 index 000000000..c0e1f692d --- /dev/null +++ b/parser/testdata/00712_nan_comparison/query.sql @@ -0,0 +1,30 @@ +SELECT nan = toUInt8(0), nan != toUInt8(0), nan < toUInt8(0), nan > toUInt8(0), nan <= toUInt8(0), nan >= toUInt8(0); +SELECT nan = toInt8(0), nan != toInt8(0), nan < toInt8(0), nan > toInt8(0), nan <= toInt8(0), nan >= toInt8(0); +SELECT nan = toUInt16(0), nan != toUInt16(0), nan < toUInt16(0), nan > toUInt16(0), nan <= toUInt16(0), nan >= toUInt16(0); +SELECT nan = toInt16(0), nan != toInt16(0), nan < toInt16(0), nan > toInt16(0), nan <= toInt16(0), nan >= toInt16(0); +SELECT nan = toUInt32(0), nan != toUInt32(0), nan < toUInt32(0), nan > toUInt32(0), nan <= toUInt32(0), nan >= toUInt32(0); +SELECT nan = toInt32(0), nan != toInt32(0), nan < toInt32(0), nan > toInt32(0), nan <= toInt32(0), nan >= toInt32(0); +SELECT nan = toUInt64(0), nan != toUInt64(0), nan < toUInt64(0), nan > toUInt64(0), nan <= toUInt64(0), nan >= toUInt64(0); +SELECT nan = toInt64(0), nan != toInt64(0), nan < toInt64(0), nan > toInt64(0), nan <= toInt64(0), nan >= toInt64(0); +SELECT nan = toFloat32(0.0), nan != toFloat32(0.0), nan < toFloat32(0.0), nan > toFloat32(0.0), nan <= toFloat32(0.0), nan >= toFloat32(0.0); +SELECT nan = toFloat64(0.0), nan != toFloat64(0.0), nan < toFloat64(0.0), nan > toFloat64(0.0), nan <= toFloat64(0.0), nan >= toFloat64(0.0); + +SELECT -nan = toUInt8(0), -nan != toUInt8(0), -nan < toUInt8(0), -nan > toUInt8(0), -nan <= toUInt8(0), -nan >= toUInt8(0); +SELECT -nan = toInt8(0), -nan != toInt8(0), -nan < toInt8(0), -nan > toInt8(0), -nan <= toInt8(0), -nan >= toInt8(0); +SELECT -nan = toUInt16(0), -nan != toUInt16(0), -nan < toUInt16(0), -nan > toUInt16(0), -nan <= toUInt16(0), -nan >= toUInt16(0); +SELECT -nan = toInt16(0), -nan != toInt16(0), -nan < toInt16(0), -nan > toInt16(0), -nan <= toInt16(0), -nan >= toInt16(0); +SELECT -nan = toUInt32(0), -nan != toUInt32(0), -nan < toUInt32(0), -nan > toUInt32(0), -nan <= toUInt32(0), -nan >= toUInt32(0); +SELECT -nan = toInt32(0), -nan != toInt32(0), -nan < toInt32(0), -nan > toInt32(0), -nan <= toInt32(0), -nan >= toInt32(0); +SELECT -nan = toUInt64(0), -nan != toUInt64(0), -nan < toUInt64(0), -nan > toUInt64(0), -nan <= toUInt64(0), -nan >= toUInt64(0); +SELECT -nan = toInt64(0), -nan != toInt64(0), -nan < toInt64(0), -nan > toInt64(0), -nan <= toInt64(0), -nan >= toInt64(0); +SELECT -nan = toFloat32(0.0), -nan != toFloat32(0.0), -nan < toFloat32(0.0), -nan > toFloat32(0.0), -nan <= toFloat32(0.0), -nan >= toFloat32(0.0); +SELECT -nan = toFloat64(0.0), -nan != toFloat64(0.0), -nan < toFloat64(0.0), -nan > toFloat64(0.0), -nan <= toFloat64(0.0), -nan >= toFloat64(0.0); + +--SELECT 1 % nan, nan % 1, pow(x, 1), pow(1, x); -- TODO +SELECT 1 + nan, 1 - nan, nan - 1, 1 * nan, 1 / nan, nan / 1; +SELECT nan AS x, isFinite(exp(x)) /* exp(nan) is allowed to return inf */, exp2(x), exp10(x), log(x), log2(x), log10(x), sqrt(x), cbrt(x); +SELECT nan AS x, erf(x), erfc(x), lgamma(x), tgamma(x); +SELECT nan AS x, sin(x), cos(x), tan(x), asin(x), acos(x), atan(x); + +SELECT min(x), max(x) FROM (SELECT arrayJoin([toFloat32(0.0), nan, toFloat32(1.0), toFloat32(-1.0)]) AS x); +SELECT min(x), max(x) FROM (SELECT arrayJoin([toFloat64(0.0), -nan, toFloat64(1.0), toFloat64(-1.0)]) AS x); diff --git a/parser/testdata/00712_prewhere_with_alias/ast.json b/parser/testdata/00712_prewhere_with_alias/ast.json new file mode 100644 index 000000000..5ffd629fa --- /dev/null +++ b/parser/testdata/00712_prewhere_with_alias/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery prewhere_alias (children 1)" + }, + { + "explain": " Identifier prewhere_alias" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001540611, + "rows_read": 2, + "bytes_read": 80 + } +} diff --git a/parser/testdata/00712_prewhere_with_alias/metadata.json b/parser/testdata/00712_prewhere_with_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00712_prewhere_with_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00712_prewhere_with_alias/query.sql b/parser/testdata/00712_prewhere_with_alias/query.sql new file mode 100644 index 000000000..58fe90d6f --- /dev/null +++ b/parser/testdata/00712_prewhere_with_alias/query.sql @@ -0,0 +1,35 @@ +drop table if exists prewhere_alias; +CREATE TABLE prewhere_alias (a UInt8, b Int32, c UInt8 ALIAS a, d Int64 ALIAS b + 1, e Int32 alias a + b) ENGINE = MergeTree ORDER BY tuple(); +insert into prewhere_alias values (1, 1); +select '-'; +select a from prewhere_alias prewhere a = 1; +select b from prewhere_alias prewhere a = 1; +select c from prewhere_alias prewhere a = 1; +select d from prewhere_alias prewhere a = 1; +select '-'; +select a from prewhere_alias prewhere b = 1; +select b from prewhere_alias prewhere b = 1; +select c from prewhere_alias prewhere b = 1; +select d from prewhere_alias prewhere b = 1; +select '-'; +select a from prewhere_alias prewhere c = 1; +select b from prewhere_alias prewhere c = 1; +select c from prewhere_alias prewhere c = 1; +select d from prewhere_alias prewhere c = 1; +select '-'; +select a from prewhere_alias prewhere d = 2; +select b from prewhere_alias prewhere d = 2; +select c from prewhere_alias prewhere d = 2; +select d from prewhere_alias prewhere d = 2; +select '-'; +select a from prewhere_alias prewhere a; +select b from prewhere_alias prewhere a; +select c from prewhere_alias prewhere a; +select d from prewhere_alias prewhere a; +select '-'; +select a from prewhere_alias prewhere c; +select b from prewhere_alias prewhere c; +select c from prewhere_alias prewhere c; +select d from prewhere_alias prewhere c; +drop table if exists prewhere_alias; + diff --git a/parser/testdata/00712_prewhere_with_alias_and_virtual_column/ast.json b/parser/testdata/00712_prewhere_with_alias_and_virtual_column/ast.json new file mode 100644 index 000000000..97c82ba78 --- /dev/null +++ b/parser/testdata/00712_prewhere_with_alias_and_virtual_column/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tab_00712_1 (children 1)" + }, + { + "explain": " Identifier tab_00712_1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001084355, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/00712_prewhere_with_alias_and_virtual_column/metadata.json b/parser/testdata/00712_prewhere_with_alias_and_virtual_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00712_prewhere_with_alias_and_virtual_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00712_prewhere_with_alias_and_virtual_column/query.sql b/parser/testdata/00712_prewhere_with_alias_and_virtual_column/query.sql new file mode 100644 index 000000000..68a03605b --- /dev/null +++ b/parser/testdata/00712_prewhere_with_alias_and_virtual_column/query.sql @@ -0,0 +1,5 @@ +drop table if exists tab_00712_1; +create table tab_00712_1 (a UInt32, b UInt32 alias a + 1, c UInt32) engine = MergeTree order by tuple(); +insert into tab_00712_1 values (1, 2); +select ignore(_part) from tab_00712_1 prewhere b = 2; +drop table tab_00712_1; diff --git a/parser/testdata/00712_prewhere_with_alias_bug/ast.json b/parser/testdata/00712_prewhere_with_alias_bug/ast.json new file mode 100644 index 000000000..f103834f7 --- /dev/null +++ b/parser/testdata/00712_prewhere_with_alias_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery prewhere_alias (children 1)" + }, + { + "explain": " Identifier prewhere_alias" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001392267, + "rows_read": 2, + "bytes_read": 80 + } +} diff --git a/parser/testdata/00712_prewhere_with_alias_bug/metadata.json b/parser/testdata/00712_prewhere_with_alias_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00712_prewhere_with_alias_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00712_prewhere_with_alias_bug/query.sql b/parser/testdata/00712_prewhere_with_alias_bug/query.sql new file mode 100644 index 000000000..83f9748f5 --- /dev/null +++ b/parser/testdata/00712_prewhere_with_alias_bug/query.sql @@ -0,0 +1,5 @@ +drop table if exists prewhere_alias; +create table prewhere_alias (a Int32, b Int32, c alias a + b) engine = MergeTree order by b; +insert into prewhere_alias values(1, 1); +select a, c + toInt32(1), (c + toInt32(1)) * 2 from prewhere_alias prewhere (c + toInt32(1)) * 2 = 6; +drop table prewhere_alias; diff --git a/parser/testdata/00712_prewhere_with_alias_bug_2/ast.json b/parser/testdata/00712_prewhere_with_alias_bug_2/ast.json new file mode 100644 index 000000000..9ee921dae --- /dev/null +++ b/parser/testdata/00712_prewhere_with_alias_bug_2/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery table (children 1)" + }, + { + "explain": " Identifier table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001059862, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/00712_prewhere_with_alias_bug_2/metadata.json b/parser/testdata/00712_prewhere_with_alias_bug_2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00712_prewhere_with_alias_bug_2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00712_prewhere_with_alias_bug_2/query.sql b/parser/testdata/00712_prewhere_with_alias_bug_2/query.sql new file mode 100644 index 000000000..beb986ade --- /dev/null +++ b/parser/testdata/00712_prewhere_with_alias_bug_2/query.sql @@ -0,0 +1,15 @@ +drop table if exists table; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE table (a UInt32, date Date, b UInt64, c UInt64, str String, d Int8, arr Array(UInt64), arr_alias Array(UInt64) ALIAS arr) ENGINE = MergeTree(date, intHash32(c), (a, date, intHash32(c), b), 8192); + +SELECT alias2 AS alias3 +FROM table +ARRAY JOIN + arr_alias AS alias2, + arrayEnumerateUniq(arr_alias) AS _uniq_Event +WHERE (date = toDate('2010-10-10')) AND (a IN (2, 3)) AND (str NOT IN ('z', 'x')) AND (d != -1) +LIMIT 1; + +drop table if exists table; + diff --git a/parser/testdata/00712_prewhere_with_final/ast.json b/parser/testdata/00712_prewhere_with_final/ast.json new file mode 100644 index 000000000..94e2eae26 --- /dev/null +++ b/parser/testdata/00712_prewhere_with_final/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery trepl (children 1)" + }, + { + "explain": " Identifier trepl" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001374667, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/00712_prewhere_with_final/metadata.json b/parser/testdata/00712_prewhere_with_final/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00712_prewhere_with_final/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00712_prewhere_with_final/query.sql b/parser/testdata/00712_prewhere_with_final/query.sql new file mode 100644 index 000000000..6b49e523f --- /dev/null +++ b/parser/testdata/00712_prewhere_with_final/query.sql @@ -0,0 +1,13 @@ +drop table if exists trepl; +set allow_deprecated_syntax_for_merge_tree=1; +create table trepl(d Date,a Int32, b Int32) engine = ReplacingMergeTree(d, (a,b), 8192); +insert into trepl values ('2018-09-19', 1, 1); +select b from trepl FINAL prewhere a < 1000; +drop table trepl; + + +drop table if exists versioned_collapsing; +create table versioned_collapsing(d Date, x UInt32, sign Int8, version UInt32) engine = VersionedCollapsingMergeTree(d, x, 8192, sign, version); +insert into versioned_collapsing values ('2018-09-19', 123, 1, 0); +select x from versioned_collapsing FINAL prewhere version < 1000; +drop table versioned_collapsing; diff --git a/parser/testdata/00712_prewhere_with_missing_columns/ast.json b/parser/testdata/00712_prewhere_with_missing_columns/ast.json new file mode 100644 index 000000000..09e0842bc --- /dev/null +++ b/parser/testdata/00712_prewhere_with_missing_columns/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery mergetree_00712 (children 1)" + }, + { + "explain": " Identifier mergetree_00712" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001040591, + "rows_read": 2, + "bytes_read": 82 + } +} diff --git a/parser/testdata/00712_prewhere_with_missing_columns/metadata.json b/parser/testdata/00712_prewhere_with_missing_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00712_prewhere_with_missing_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00712_prewhere_with_missing_columns/query.sql b/parser/testdata/00712_prewhere_with_missing_columns/query.sql new file mode 100644 index 000000000..7652ebcb7 --- /dev/null +++ b/parser/testdata/00712_prewhere_with_missing_columns/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS mergetree_00712; +CREATE TABLE mergetree_00712 (x UInt8, s String) ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO mergetree_00712 VALUES (1, 'Hello, world!'); +SELECT * FROM mergetree_00712; + +ALTER TABLE mergetree_00712 ADD COLUMN y UInt8 DEFAULT 0; +INSERT INTO mergetree_00712 VALUES (2, 'Goodbye.', 3); +SELECT * FROM mergetree_00712 ORDER BY x; + +SELECT s FROM mergetree_00712 PREWHERE x AND y ORDER BY s; +SELECT s, y FROM mergetree_00712 PREWHERE x AND y ORDER BY s; + +DROP TABLE mergetree_00712; diff --git a/parser/testdata/00712_prewhere_with_missing_columns_2/ast.json b/parser/testdata/00712_prewhere_with_missing_columns_2/ast.json new file mode 100644 index 000000000..02c4a008b --- /dev/null +++ b/parser/testdata/00712_prewhere_with_missing_columns_2/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_00712_1 (children 1)" + }, + { + "explain": " Identifier t_00712_1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001132854, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/00712_prewhere_with_missing_columns_2/metadata.json b/parser/testdata/00712_prewhere_with_missing_columns_2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00712_prewhere_with_missing_columns_2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00712_prewhere_with_missing_columns_2/query.sql b/parser/testdata/00712_prewhere_with_missing_columns_2/query.sql new file mode 100644 index 000000000..fac4552ae --- /dev/null +++ b/parser/testdata/00712_prewhere_with_missing_columns_2/query.sql @@ -0,0 +1,12 @@ +drop table if exists t_00712_1; +create table t_00712_1 (a Int32, b Int32) engine = MergeTree partition by (a,b) order by (a); + +insert into t_00712_1 values (1, 1); +alter table t_00712_1 add column c Int32; + +select b from t_00712_1 prewhere a < 1000; +select c from t_00712_1 where a < 1000; +select c from t_00712_1 prewhere a < 1000; + +drop table t_00712_1; + diff --git a/parser/testdata/00712_prewhere_with_sampling/ast.json b/parser/testdata/00712_prewhere_with_sampling/ast.json new file mode 100644 index 000000000..fe0f96a2d --- /dev/null +++ b/parser/testdata/00712_prewhere_with_sampling/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tab_00712_2 (children 1)" + }, + { + "explain": " Identifier tab_00712_2" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001086783, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/00712_prewhere_with_sampling/metadata.json b/parser/testdata/00712_prewhere_with_sampling/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00712_prewhere_with_sampling/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00712_prewhere_with_sampling/query.sql b/parser/testdata/00712_prewhere_with_sampling/query.sql new file mode 100644 index 000000000..4e94ff8f4 --- /dev/null +++ b/parser/testdata/00712_prewhere_with_sampling/query.sql @@ -0,0 +1,10 @@ +drop table if exists tab_00712_2; +create table tab_00712_2 (a UInt32, b UInt32) engine = MergeTree order by b % 2 sample by b % 2; +insert into tab_00712_2 values (1, 2), (1, 4); +select a from tab_00712_2 sample 1 / 2 prewhere b = 2; +drop table if exists tab_00712_2; + +DROP TABLE IF EXISTS sample_prewhere; +CREATE TABLE sample_prewhere (CounterID UInt32, UserID UInt64) ENGINE = MergeTree ORDER BY UserID SAMPLE BY UserID; +SELECT count() FROM sample_prewhere SAMPLE 1/2 PREWHERE CounterID = 1; +DROP TABLE sample_prewhere; diff --git a/parser/testdata/00712_prewhere_with_sampling_and_alias/ast.json b/parser/testdata/00712_prewhere_with_sampling_and_alias/ast.json new file mode 100644 index 000000000..0e03095e0 --- /dev/null +++ b/parser/testdata/00712_prewhere_with_sampling_and_alias/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_00712_2 (children 1)" + }, + { + "explain": " Identifier t_00712_2" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001140518, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/00712_prewhere_with_sampling_and_alias/metadata.json b/parser/testdata/00712_prewhere_with_sampling_and_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00712_prewhere_with_sampling_and_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00712_prewhere_with_sampling_and_alias/query.sql b/parser/testdata/00712_prewhere_with_sampling_and_alias/query.sql new file mode 100644 index 000000000..7c8ae4eed --- /dev/null +++ b/parser/testdata/00712_prewhere_with_sampling_and_alias/query.sql @@ -0,0 +1,7 @@ +drop table if exists t_00712_2; +set allow_deprecated_syntax_for_merge_tree=1; +create table t_00712_2 (date Date, counter UInt64, sampler UInt64, alias_col alias date + 1) engine = MergeTree(date, intHash32(sampler), (counter, date, intHash32(sampler)), 8192); +insert into t_00712_2 values ('2018-01-01', 1, 1); +select alias_col from t_00712_2 sample 1 / 2 where date = '2018-01-01' and counter = 1 and sampler = 1; +drop table if exists t_00712_2; + diff --git a/parser/testdata/00713_collapsing_merge_tree/ast.json b/parser/testdata/00713_collapsing_merge_tree/ast.json new file mode 100644 index 000000000..4380a2f78 --- /dev/null +++ b/parser/testdata/00713_collapsing_merge_tree/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery collapsing (children 1)" + }, + { + "explain": " Identifier collapsing" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000990802, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/00713_collapsing_merge_tree/metadata.json b/parser/testdata/00713_collapsing_merge_tree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00713_collapsing_merge_tree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00713_collapsing_merge_tree/query.sql b/parser/testdata/00713_collapsing_merge_tree/query.sql new file mode 100644 index 000000000..999618753 --- /dev/null +++ b/parser/testdata/00713_collapsing_merge_tree/query.sql @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS collapsing; + +CREATE TABLE collapsing(key String, value String, sign Int8) ENGINE CollapsingMergeTree(sign) + ORDER BY key + SETTINGS enable_vertical_merge_algorithm=1, + vertical_merge_algorithm_min_rows_to_activate=0, + vertical_merge_algorithm_min_columns_to_activate=0; + +INSERT INTO collapsing VALUES ('k1', 'k1v1', 1); +INSERT INTO collapsing VALUES ('k1', 'k1v1', -1), ('k1', 'k1v2', 1); +INSERT INTO collapsing VALUES ('k2', 'k2v1', 1), ('k2', 'k2v1', -1), ('k3', 'k3v1', 1); +INSERT INTO collapsing VALUES ('k4', 'k4v1', -1), ('k4', 'k4v2', 1), ('k4', 'k4v2', -1); + +OPTIMIZE TABLE collapsing PARTITION tuple() FINAL; + +SELECT * FROM collapsing ORDER BY key; + +DROP TABLE collapsing; diff --git a/parser/testdata/00714_alter_uuid/ast.json b/parser/testdata/00714_alter_uuid/ast.json new file mode 100644 index 000000000..d3f1ca369 --- /dev/null +++ b/parser/testdata/00714_alter_uuid/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '00000000-0000-01f8-9cb8-cb1b82fb3900' (alias str)" + }, + { + "explain": " Function toUUID (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier str" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001393259, + "rows_read": 8, + "bytes_read": 327 + } +} diff --git a/parser/testdata/00714_alter_uuid/metadata.json b/parser/testdata/00714_alter_uuid/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00714_alter_uuid/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00714_alter_uuid/query.sql b/parser/testdata/00714_alter_uuid/query.sql new file mode 100644 index 000000000..40c4981dd --- /dev/null +++ b/parser/testdata/00714_alter_uuid/query.sql @@ -0,0 +1,45 @@ +SELECT '00000000-0000-01f8-9cb8-cb1b82fb3900' AS str, toUUID(str); +SELECT toFixedString('00000000-0000-02f8-9cb8-cb1b82fb3900', 36) AS str, toUUID(str); + +SELECT '00000000-0000-03f8-9cb8-cb1b82fb3900' AS str, CAST(str, 'UUID'); +SELECT toFixedString('00000000-0000-04f8-9cb8-cb1b82fb3900', 36) AS str, CAST(str, 'UUID'); + +DROP TABLE IF EXISTS uuid; +CREATE TABLE IF NOT EXISTS uuid +( + created_at DateTime, + id0 String, + id1 FixedString(36) +) +ENGINE = MergeTree +PARTITION BY toDate(created_at) +ORDER BY (created_at); + +INSERT INTO uuid VALUES ('2018-01-01 01:02:03', '00000000-0000-05f8-9cb8-cb1b82fb3900', '00000000-0000-06f8-9cb8-cb1b82fb3900'); + +ALTER TABLE uuid MODIFY COLUMN id0 UUID; +ALTER TABLE uuid MODIFY COLUMN id1 UUID; + +SELECT id0, id1 FROM uuid; +SELECT toTypeName(id0), toTypeName(id1) FROM uuid; + +DROP TABLE uuid; + +-- with UUID in key + +CREATE TABLE IF NOT EXISTS uuid +( + created_at DateTime, + id0 String, + id1 FixedString(36) +) +ENGINE = MergeTree +PARTITION BY toDate(created_at) +ORDER BY (created_at, id0, id1); + +SET send_logs_level = 'fatal'; + +ALTER TABLE uuid MODIFY COLUMN id0 UUID; -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } +ALTER TABLE uuid MODIFY COLUMN id1 UUID; -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } + +DROP TABLE uuid; diff --git a/parser/testdata/00714_create_temporary_table_with_in_clause/ast.json b/parser/testdata/00714_create_temporary_table_with_in_clause/ast.json new file mode 100644 index 000000000..64c4e1725 --- /dev/null +++ b/parser/testdata/00714_create_temporary_table_with_in_clause/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery temporary_table (children 1)" + }, + { + "explain": " Identifier temporary_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001018152, + "rows_read": 2, + "bytes_read": 82 + } +} diff --git a/parser/testdata/00714_create_temporary_table_with_in_clause/metadata.json b/parser/testdata/00714_create_temporary_table_with_in_clause/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00714_create_temporary_table_with_in_clause/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00714_create_temporary_table_with_in_clause/query.sql b/parser/testdata/00714_create_temporary_table_with_in_clause/query.sql new file mode 100644 index 000000000..e56c3bd1d --- /dev/null +++ b/parser/testdata/00714_create_temporary_table_with_in_clause/query.sql @@ -0,0 +1,10 @@ +DROP TEMPORARY TABLE IF EXISTS temporary_table; +DROP TABLE IF EXISTS test_merge_1; +DROP TABLE IF EXISTS test_merge_2; +CREATE TABLE test_merge_1(id UInt64) ENGINE = Log; +CREATE TABLE test_merge_2(id UInt64) ENGINE = Log; +CREATE TEMPORARY TABLE temporary_table AS SELECT * FROM numbers(1) WHERE number NOT IN (SELECT id FROM merge(currentDatabase(), 'test_merge_1|test_merge_2')); +SELECT * FROM temporary_table; +DROP TEMPORARY TABLE IF EXISTS temporary_table; +DROP TABLE IF EXISTS test_merge_1; +DROP TABLE IF EXISTS test_merge_2; diff --git a/parser/testdata/00715_bounding_ratio/ast.json b/parser/testdata/00715_bounding_ratio/ast.json new file mode 100644 index 000000000..55802b393 --- /dev/null +++ b/parser/testdata/00715_bounding_ratio/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery rate_test (children 1)" + }, + { + "explain": " Identifier rate_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001124993, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/00715_bounding_ratio/metadata.json b/parser/testdata/00715_bounding_ratio/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00715_bounding_ratio/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00715_bounding_ratio/query.sql b/parser/testdata/00715_bounding_ratio/query.sql new file mode 100644 index 000000000..b790aedf7 --- /dev/null +++ b/parser/testdata/00715_bounding_ratio/query.sql @@ -0,0 +1,26 @@ +drop table if exists rate_test; + +create table rate_test (timestamp UInt32, event UInt32) engine=Memory; +insert into rate_test values (0,1000),(1,1001),(2,1002),(3,1003),(4,1004),(5,1005),(6,1006),(7,1007),(8,1008); + +select 1.0 = boundingRatio(timestamp, event) from rate_test; + +drop table if exists rate_test2; +create table rate_test2 (uid UInt32 default 1,timestamp DateTime, event UInt32) engine=Memory; +insert into rate_test2(timestamp, event) values ('2018-01-01 01:01:01',1001),('2018-01-01 01:01:02',1002),('2018-01-01 01:01:03',1003),('2018-01-01 01:01:04',1004),('2018-01-01 01:01:05',1005),('2018-01-01 01:01:06',1006),('2018-01-01 01:01:07',1007),('2018-01-01 01:01:08',1008); + +select 1.0 = boundingRatio(timestamp, event) from rate_test2; + +drop table rate_test; +drop table rate_test2; + + +SELECT boundingRatio(number, number * 1.5) FROM numbers(10); +SELECT boundingRatio(1000 + number, number * 1.5) FROM numbers(10); +SELECT boundingRatio(1000 + number, number * 1.5 - 111) FROM numbers(10); +SELECT number % 10 AS k, boundingRatio(1000 + number, number * 1.5 - 111) FROM numbers(100) GROUP BY k WITH TOTALS ORDER BY k; + +SELECT boundingRatio(1000 + number, number * 1.5 - 111) FROM numbers(2); +SELECT boundingRatio(1000 + number, number * 1.5 - 111) FROM numbers(1); +SELECT boundingRatio(1000 + number, number * 1.5 - 111) FROM numbers(1) WHERE 0; +SELECT boundingRatio(number, exp(number)) = exp(1) - 1 FROM numbers(2); diff --git a/parser/testdata/00715_bounding_ratio_merge_empty/ast.json b/parser/testdata/00715_bounding_ratio_merge_empty/ast.json new file mode 100644 index 000000000..bdaa89c5c --- /dev/null +++ b/parser/testdata/00715_bounding_ratio_merge_empty/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery rate_test (children 1)" + }, + { + "explain": " Identifier rate_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001128656, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/00715_bounding_ratio_merge_empty/metadata.json b/parser/testdata/00715_bounding_ratio_merge_empty/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00715_bounding_ratio_merge_empty/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00715_bounding_ratio_merge_empty/query.sql b/parser/testdata/00715_bounding_ratio_merge_empty/query.sql new file mode 100644 index 000000000..8d2332758 --- /dev/null +++ b/parser/testdata/00715_bounding_ratio_merge_empty/query.sql @@ -0,0 +1,16 @@ +drop table if exists rate_test; +drop table if exists rate_test2; + +create table rate_test (timestamp UInt32, event UInt32) engine=Memory; +insert into rate_test values (0,1000),(1,1001),(2,1002),(3,1003),(4,1004),(5,1005),(6,1006),(7,1007),(8,1008); + +create table rate_test2 (timestamp UInt32, event UInt32) engine=Memory; + +SELECT boundingRatioMerge(state) FROM ( + select boundingRatioState(timestamp, event) as state from rate_test + UNION ALL + SELECT boundingRatioState(timestamp, event) FROM rate_test2 WHERE 1=0 +); + +drop table if exists rate_test; +drop table if exists rate_test2; diff --git a/parser/testdata/00716_allow_ddl/ast.json b/parser/testdata/00716_allow_ddl/ast.json new file mode 100644 index 000000000..d143f642e --- /dev/null +++ b/parser/testdata/00716_allow_ddl/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001015995, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00716_allow_ddl/metadata.json b/parser/testdata/00716_allow_ddl/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00716_allow_ddl/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00716_allow_ddl/query.sql b/parser/testdata/00716_allow_ddl/query.sql new file mode 100644 index 000000000..8d9988736 --- /dev/null +++ b/parser/testdata/00716_allow_ddl/query.sql @@ -0,0 +1,8 @@ +SET send_logs_level = 'fatal'; +SET allow_ddl = 0; + +CREATE DATABASE some_db; -- { serverError QUERY_IS_PROHIBITED } +CREATE TABLE some_table(a Int32) ENGINE = Memory; -- { serverError QUERY_IS_PROHIBITED} +ALTER TABLE some_table DELETE WHERE 1; -- { serverError QUERY_IS_PROHIBITED} +RENAME TABLE some_table TO some_table1; -- { serverError QUERY_IS_PROHIBITED} +SET allow_ddl = 1; -- { serverError QUERY_IS_PROHIBITED} diff --git a/parser/testdata/00717_default_join_type/ast.json b/parser/testdata/00717_default_join_type/ast.json new file mode 100644 index 000000000..67ad0b02c --- /dev/null +++ b/parser/testdata/00717_default_join_type/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery default_join1 (children 1)" + }, + { + "explain": " Identifier default_join1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001021703, + "rows_read": 2, + "bytes_read": 78 + } +} diff --git a/parser/testdata/00717_default_join_type/metadata.json b/parser/testdata/00717_default_join_type/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00717_default_join_type/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00717_default_join_type/query.sql b/parser/testdata/00717_default_join_type/query.sql new file mode 100644 index 000000000..2cdea0814 --- /dev/null +++ b/parser/testdata/00717_default_join_type/query.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS default_join1; +DROP TABLE IF EXISTS default_join2; + +CREATE TABLE default_join1(a Int64, b Int64) ENGINE=Memory; +CREATE TABLE default_join2(a Int64, b Int64) ENGINE=Memory; + +INSERT INTO default_join1 VALUES(1, 1), (2, 2), (3, 3); +INSERT INTO default_join2 VALUES(3, 3), (4, 4); + +SELECT a, b FROM default_join1 JOIN (SELECT a, b FROM default_join2) js2 USING a ORDER BY b SETTINGS join_default_strictness='ANY'; + +DROP TABLE default_join1; +DROP TABLE default_join2; diff --git a/parser/testdata/00717_low_cardinaliry_distributed_group_by/ast.json b/parser/testdata/00717_low_cardinaliry_distributed_group_by/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00717_low_cardinaliry_distributed_group_by/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00717_low_cardinaliry_distributed_group_by/metadata.json b/parser/testdata/00717_low_cardinaliry_distributed_group_by/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00717_low_cardinaliry_distributed_group_by/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00717_low_cardinaliry_distributed_group_by/query.sql b/parser/testdata/00717_low_cardinaliry_distributed_group_by/query.sql new file mode 100644 index 000000000..605f60a74 --- /dev/null +++ b/parser/testdata/00717_low_cardinaliry_distributed_group_by/query.sql @@ -0,0 +1,16 @@ +-- Tags: distributed + +set distributed_foreground_insert = 1; +set allow_suspicious_low_cardinality_types = 1; + +DROP TABLE IF EXISTS test_low_null_float; +DROP TABLE IF EXISTS dist_00717; + +CREATE TABLE test_low_null_float (a LowCardinality(Nullable(Float64))) ENGINE = Memory; +CREATE TABLE dist_00717 (a LowCardinality(Nullable(Float64))) ENGINE = Distributed('test_cluster_two_shards_localhost', currentDatabase(), 'test_low_null_float', rand()); + +INSERT INTO dist_00717 (a) SELECT number FROM system.numbers LIMIT 1000000; +SELECT a, count() FROM dist_00717 GROUP BY a ORDER BY a ASC, count() ASC LIMIT 10; + +DROP TABLE IF EXISTS test_low_null_float; +DROP TABLE IF EXISTS dist_00717; diff --git a/parser/testdata/00717_low_cardinaliry_group_by/ast.json b/parser/testdata/00717_low_cardinaliry_group_by/ast.json new file mode 100644 index 000000000..869db9edc --- /dev/null +++ b/parser/testdata/00717_low_cardinaliry_group_by/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tab_00717 (children 1)" + }, + { + "explain": " Identifier tab_00717" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001180562, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/00717_low_cardinaliry_group_by/metadata.json b/parser/testdata/00717_low_cardinaliry_group_by/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00717_low_cardinaliry_group_by/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00717_low_cardinaliry_group_by/query.sql b/parser/testdata/00717_low_cardinaliry_group_by/query.sql new file mode 100644 index 000000000..3115ab508 --- /dev/null +++ b/parser/testdata/00717_low_cardinaliry_group_by/query.sql @@ -0,0 +1,21 @@ +drop table if exists tab_00717; +create table tab_00717 (a String, b LowCardinality(String)) engine = MergeTree order by a; +insert into tab_00717 values ('a_1', 'b_1'), ('a_2', 'b_2'); +select count() from tab_00717; +select a from tab_00717 group by a order by a; +select b from tab_00717 group by b order by b; +select length(b) as l from tab_00717 group by l; +select sum(length(a)), b from tab_00717 group by b order by b; +select sum(length(b)), a from tab_00717 group by a order by a; +select a, b from tab_00717 group by a, b order by a, b; +select sum(length(a)) from tab_00717 group by b, b || '_'; +select length(b) as l from tab_00717 group by l; +select length(b) as l from tab_00717 group by l, l + 1; +select length(b) as l from tab_00717 group by l, l + 1, l + 2; +select length(b) as l from tab_00717 group by l, l + 1, l + 2, l + 3; +select length(b) as l from tab_00717 group by l, l + 1, l + 2, l + 3, l + 4; +select length(b) as l from tab_00717 group by l, l + 1, l + 2, l + 3, l + 4, l + 5; +select a, length(b) as l from tab_00717 group by a, l, l + 1 order by a; +select b, length(b) as l from tab_00717 group by b, l, l + 1 order by b; +select a, b, length(b) as l from tab_00717 group by a, b, l, l + 1 order by a, b; +drop table if exists tab_00717; diff --git a/parser/testdata/00717_merge_and_distributed/ast.json b/parser/testdata/00717_merge_and_distributed/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00717_merge_and_distributed/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00717_merge_and_distributed/metadata.json b/parser/testdata/00717_merge_and_distributed/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00717_merge_and_distributed/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00717_merge_and_distributed/query.sql b/parser/testdata/00717_merge_and_distributed/query.sql new file mode 100644 index 000000000..022303cef --- /dev/null +++ b/parser/testdata/00717_merge_and_distributed/query.sql @@ -0,0 +1,92 @@ +-- Tags: distributed + +SET enable_analyzer = 1; +SET send_logs_level = 'fatal'; +DROP TABLE IF EXISTS test_local_1; +DROP TABLE IF EXISTS test_local_2; +DROP TABLE IF EXISTS test_distributed_1; +DROP TABLE IF EXISTS test_distributed_2; + +SET merge_table_max_tables_to_look_for_schema_inference = 1; +SET allow_deprecated_syntax_for_merge_tree = 1; +CREATE TABLE test_local_1 (date Date, value UInt32) ENGINE = MergeTree(date, date, 8192); +CREATE TABLE test_local_2 (date Date, value UInt32) ENGINE = MergeTree(date, date, 8192); +CREATE TABLE test_distributed_1 AS test_local_1 ENGINE = Distributed('test_shard_localhost', currentDatabase(), test_local_1, rand()); +CREATE TABLE test_distributed_2 AS test_local_2 ENGINE = Distributed('test_shard_localhost', currentDatabase(), test_local_2, rand()); + +INSERT INTO test_local_1 VALUES ('2018-08-01',100); +INSERT INTO test_local_2 VALUES ('2018-08-01',200); + +SELECT '--------------Single Local------------'; +SELECT * FROM merge(currentDatabase(), 'test_local_1'); +SELECT *, _table FROM merge(currentDatabase(), 'test_local_1') ORDER BY _table; +SELECT sum(value), _table FROM merge(currentDatabase(), 'test_local_1') GROUP BY _table ORDER BY _table; +SELECT * FROM merge(currentDatabase(), 'test_local_1') WHERE _table = 'test_local_1'; +SELECT * FROM merge(currentDatabase(), 'test_local_1') PREWHERE _table = 'test_local_1'; -- { serverError ILLEGAL_PREWHERE } +SELECT * FROM merge(currentDatabase(), 'test_local_1') WHERE _table in ('test_local_1', 'test_local_2'); +SELECT * FROM merge(currentDatabase(), 'test_local_1') PREWHERE _table in ('test_local_1', 'test_local_2'); -- { serverError ILLEGAL_PREWHERE } + +SELECT '--------------Single Distributed------------'; +SELECT * FROM merge(currentDatabase(), 'test_distributed_1'); +SELECT *, _table FROM merge(currentDatabase(), 'test_distributed_1') ORDER BY _table; +SELECT sum(value), _table FROM merge(currentDatabase(), 'test_distributed_1') GROUP BY _table ORDER BY _table; +SELECT * FROM merge(currentDatabase(), 'test_distributed_1') WHERE _table = 'test_local_1'; +SELECT * FROM merge(currentDatabase(), 'test_distributed_1') PREWHERE _table = 'test_local_1'; -- { serverError ILLEGAL_PREWHERE } +SELECT * FROM merge(currentDatabase(), 'test_distributed_1') WHERE _table in ('test_local_1', 'test_local_2'); +SELECT * FROM merge(currentDatabase(), 'test_distributed_1') PREWHERE _table in ('test_local_1', 'test_local_2'); -- { serverError ILLEGAL_PREWHERE } + +SELECT '--------------Local Merge Local------------'; +SELECT * FROM merge(currentDatabase(), 'test_local_1|test_local_2') ORDER BY _table; +SELECT *, _table FROM merge(currentDatabase(), 'test_local_1|test_local_2') ORDER BY _table; +SELECT sum(value), _table FROM merge(currentDatabase(), 'test_local_1|test_local_2') GROUP BY _table ORDER BY _table; +SELECT * FROM merge(currentDatabase(), 'test_local_1|test_local_2') WHERE _table = 'test_local_1'; +SELECT * FROM merge(currentDatabase(), 'test_local_1|test_local_2') PREWHERE _table = 'test_local_1'; -- { serverError ILLEGAL_PREWHERE } +SELECT * FROM merge(currentDatabase(), 'test_local_1|test_local_2') WHERE _table in ('test_local_1', 'test_local_2') ORDER BY value; +SELECT * FROM merge(currentDatabase(), 'test_local_1|test_local_2') PREWHERE _table in ('test_local_1', 'test_local_2') ORDER BY value; -- { serverError ILLEGAL_PREWHERE } + +SELECT '--------------Local Merge Distributed------------'; +SELECT * FROM merge(currentDatabase(), 'test_local_1|test_distributed_2') ORDER BY _table; +SELECT *, _table FROM merge(currentDatabase(), 'test_local_1|test_distributed_2') ORDER BY _table; +SELECT sum(value), _table FROM merge(currentDatabase(), 'test_local_1|test_distributed_2') GROUP BY _table ORDER BY _table; +SELECT * FROM merge(currentDatabase(), 'test_local_1|test_distributed_2') WHERE _table = 'test_local_1'; +SELECT * FROM merge(currentDatabase(), 'test_local_1|test_distributed_2') PREWHERE _table = 'test_local_1'; -- { serverError ILLEGAL_PREWHERE } +SELECT * FROM merge(currentDatabase(), 'test_local_1|test_distributed_2') WHERE _table in ('test_local_1', 'test_local_2') ORDER BY value; +SELECT * FROM merge(currentDatabase(), 'test_local_1|test_distributed_2') PREWHERE _table in ('test_local_1', 'test_local_2') ORDER BY value; -- { serverError ILLEGAL_PREWHERE } + +SELECT '--------------Distributed Merge Distributed------------'; +SELECT * FROM merge(currentDatabase(), 'test_distributed_1|test_distributed_2') ORDER BY _table; +SELECT *, _table FROM merge(currentDatabase(), 'test_distributed_1|test_distributed_2') ORDER BY _table; +SELECT sum(value), _table FROM merge(currentDatabase(), 'test_distributed_1|test_distributed_2') GROUP BY _table ORDER BY _table; +SELECT * FROM merge(currentDatabase(), 'test_distributed_1|test_distributed_2') WHERE _table = 'test_local_1'; +SELECT * FROM merge(currentDatabase(), 'test_distributed_1|test_distributed_2') PREWHERE _table = 'test_local_1'; -- { serverError ILLEGAL_PREWHERE } +SELECT * FROM merge(currentDatabase(), 'test_distributed_1|test_distributed_2') WHERE _table in ('test_local_1', 'test_local_2') ORDER BY value; +SELECT * FROM merge(currentDatabase(), 'test_distributed_1|test_distributed_2') PREWHERE _table in ('test_local_1', 'test_local_2') ORDER BY value; -- { serverError ILLEGAL_PREWHERE } + +DROP TABLE IF EXISTS test_local_1; +DROP TABLE IF EXISTS test_local_2; +DROP TABLE IF EXISTS test_distributed_1; +DROP TABLE IF EXISTS test_distributed_2; + +DROP TABLE IF EXISTS test_u64_local; +DROP TABLE IF EXISTS test_s64_local; +DROP TABLE IF EXISTS test_u64_distributed; +DROP TABLE IF EXISTS test_s64_distributed; + +CREATE TABLE test_s64_local (date Date, value Int64) ENGINE = MergeTree(date, date, 8192); +CREATE TABLE test_u64_local (date Date, value UInt64) ENGINE = MergeTree(date, date, 8192); +CREATE TABLE test_s64_distributed AS test_s64_local ENGINE = Distributed('test_shard_localhost', currentDatabase(), test_s64_local, rand()); +CREATE TABLE test_u64_distributed AS test_u64_local ENGINE = Distributed('test_shard_localhost', currentDatabase(), test_u64_local, rand()); + +INSERT INTO test_s64_local VALUES ('2018-08-01', -1); +INSERT INTO test_u64_local VALUES ('2018-08-01', 1); + +SELECT '--------------Implicit type conversion------------'; +SELECT * FROM merge(currentDatabase(), 'test_s64_distributed|test_u64_distributed') ORDER BY value; +SELECT * FROM merge(currentDatabase(), 'test_s64_distributed|test_u64_distributed') WHERE date = '2018-08-01' ORDER BY value; +SELECT * FROM merge(currentDatabase(), 'test_s64_distributed|test_u64_distributed') WHERE _table = 'test_u64_local' ORDER BY value; +SELECT * FROM merge(currentDatabase(), 'test_s64_distributed|test_u64_distributed') WHERE value = 1; + +DROP TABLE IF EXISTS test_u64_local; +DROP TABLE IF EXISTS test_s64_local; +DROP TABLE IF EXISTS test_u64_distributed; +DROP TABLE IF EXISTS test_s64_distributed; diff --git a/parser/testdata/00718_format_datetime/ast.json b/parser/testdata/00718_format_datetime/ast.json new file mode 100644 index 000000000..1f1928db2 --- /dev/null +++ b/parser/testdata/00718_format_datetime/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00129846, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00718_format_datetime/metadata.json b/parser/testdata/00718_format_datetime/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00718_format_datetime/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00718_format_datetime/query.sql b/parser/testdata/00718_format_datetime/query.sql new file mode 100644 index 000000000..4f2ce7096 --- /dev/null +++ b/parser/testdata/00718_format_datetime/query.sql @@ -0,0 +1,104 @@ +SET send_logs_level = 'fatal'; + +SELECT formatDateTime(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT formatDateTime('not a datetime', 'IGNORED'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT formatDateTime(now(), now()); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT formatDateTime(now(), 'good format pattern', now()); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT formatDateTime(now(), 'unescaped %'); -- { serverError BAD_ARGUMENTS } +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%U'); -- { serverError NOT_IMPLEMENTED } +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%v'); -- { serverError NOT_IMPLEMENTED } +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%x'); -- { serverError NOT_IMPLEMENTED } +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%X'); -- { serverError NOT_IMPLEMENTED } + +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%a'), formatDateTime(toDate32('2018-01-02'), '%a'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%b'), formatDateTime(toDate32('2018-01-02'), '%b'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%c'), formatDateTime(toDate32('2018-01-02'), '%c'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%C'), formatDateTime(toDate32('2018-01-02'), '%C'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%d'), formatDateTime(toDate32('2018-01-02'), '%d'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%D'), formatDateTime(toDate32('2018-01-02'), '%D'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%e'), formatDateTime(toDate32('2018-01-02'), '%e'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%f'), formatDateTime(toDate32('2018-01-02'), '%f'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%F'), formatDateTime(toDate32('2018-01-02'), '%F'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%h'), formatDateTime(toDate32('2018-01-02'), '%h'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%H'), formatDateTime(toDate32('2018-01-02'), '%H'); +SELECT formatDateTime(toDateTime('2018-01-02 02:33:44'), '%H'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%i'), formatDateTime(toDate32('2018-01-02'), '%i'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%I'), formatDateTime(toDate32('2018-01-02'), '%I'); +SELECT formatDateTime(toDateTime('2018-01-02 11:33:44'), '%I'); +SELECT formatDateTime(toDateTime('2018-01-02 00:33:44'), '%I'); +SELECT formatDateTime(toDateTime('2018-01-01 00:33:44'), '%j'), formatDateTime(toDate32('2018-01-01'), '%j'); +SELECT formatDateTime(toDateTime('2000-12-31 00:33:44'), '%j'), formatDateTime(toDate32('2000-12-31'), '%j'); +SELECT formatDateTime(toDateTime('2000-12-31 00:33:44'), '%k'), formatDateTime(toDate32('2000-12-31'), '%k'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%m'), formatDateTime(toDate32('2018-01-02'), '%m'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%M'), formatDateTime(toDate32('2018-01-02'), '%M') SETTINGS formatdatetime_parsedatetime_m_is_month_name = 1; +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%M'), formatDateTime(toDate32('2018-01-02'), '%M') SETTINGS formatdatetime_parsedatetime_m_is_month_name = 0; +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%n'), formatDateTime(toDate32('2018-01-02'), '%n'); +SELECT formatDateTime(toDateTime('2018-01-02 00:33:44'), '%p'), formatDateTime(toDateTime('2018-01-02'), '%p'); +SELECT formatDateTime(toDateTime('2018-01-02 11:33:44'), '%p'); +SELECT formatDateTime(toDateTime('2018-01-02 12:33:44'), '%p'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%r'), formatDateTime(toDate32('2018-01-02'), '%r'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%R'), formatDateTime(toDate32('2018-01-02'), '%R'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%S'), formatDateTime(toDate32('2018-01-02'), '%S'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%t'), formatDateTime(toDate32('2018-01-02'), '%t'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%T'), formatDateTime(toDate32('2018-01-02'), '%T'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%W'), formatDateTime(toDate32('2018-01-02'), '%W'); +SELECT formatDateTime(toDateTime('2018-01-01 22:33:44'), '%u'), formatDateTime(toDateTime('2018-01-07 22:33:44'), '%u'), + formatDateTime(toDate32('2018-01-01'), '%u'), formatDateTime(toDate32('2018-01-07'), '%u'); +SELECT formatDateTime(toDateTime('1996-01-01 22:33:44'), '%V'), formatDateTime(toDateTime('1996-12-31 22:33:44'), '%V'), + formatDateTime(toDateTime('1999-01-01 22:33:44'), '%V'), formatDateTime(toDateTime('1999-12-31 22:33:44'), '%V'), + formatDateTime(toDate32('1996-01-01'), '%V'), formatDateTime(toDate32('1996-12-31'), '%V'), + formatDateTime(toDate32('1999-01-01'), '%V'), formatDateTime(toDate32('1999-12-31'), '%V'); +SELECT formatDateTime(toDateTime('2018-01-01 22:33:44'), '%w'), formatDateTime(toDateTime('2018-01-07 22:33:44'), '%w'), + formatDateTime(toDate32('2018-01-01'), '%w'), formatDateTime(toDate32('2018-01-07'), '%w'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%y'), formatDateTime(toDate32('2018-01-02'), '%y'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%Y'), formatDateTime(toDate32('2018-01-02'), '%Y'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), '%%'), formatDateTime(toDate32('2018-01-02'), '%%'); +SELECT formatDateTime(toDateTime('2018-01-02 22:33:44'), 'no formatting pattern'), formatDateTime(toDate32('2018-01-02'), 'no formatting pattern'); + +SELECT formatDateTime(toDate('2018-01-01'), '%F %T'); +SELECT formatDateTime(toDate32('1927-01-01'), '%F %T'); + +SELECT + formatDateTime(toDateTime('2018-01-01 01:00:00', 'UTC'), '%F %T', 'UTC'), + formatDateTime(toDateTime('2018-01-01 01:00:00', 'UTC'), '%F %T', 'Asia/Istanbul'); + +SELECT formatDateTime(toDateTime('2020-01-01 01:00:00', 'UTC'), '%z'); +SELECT formatDateTime(toDateTime('2020-01-01 01:00:00', 'US/Samoa'), '%z'); +SELECT formatDateTime(toDateTime('2020-01-01 01:00:00', 'Europe/Moscow'), '%z'); +SELECT formatDateTime(toDateTime('1970-01-01 00:00:00', 'Asia/Kolkata'), '%z'); + +-- %f (default settings) +select formatDateTime(toDate('2010-01-04'), '%f') SETTINGS formatdatetime_f_prints_single_zero = 0; +select formatDateTime(toDate32('2010-01-04'), '%f') SETTINGS formatdatetime_f_prints_single_zero = 0; +select formatDateTime(toDateTime('2010-01-04 12:34:56'), '%f') SETTINGS formatdatetime_f_prints_single_zero = 0; +select formatDateTime(toDateTime64('2010-01-04 12:34:56', 0), '%f') SETTINGS formatdatetime_f_prints_single_zero = 0; +select formatDateTime(toDateTime64('2010-01-04 12:34:56.123', 3), '%f') SETTINGS formatdatetime_f_prints_single_zero = 0; +select formatDateTime(toDateTime64('2010-01-04 12:34:56.123456', 6), '%f') SETTINGS formatdatetime_f_prints_single_zero = 0; +select formatDateTime(toDateTime64('2010-01-04 12:34:56.123456789', 9), '%f') SETTINGS formatdatetime_f_prints_single_zero = 0; +-- %f (legacy settings) +select formatDateTime(toDate('2010-01-04'), '%f') SETTINGS formatdatetime_f_prints_single_zero = 1; +select formatDateTime(toDate32('2010-01-04'), '%f') SETTINGS formatdatetime_f_prints_single_zero = 1; +select formatDateTime(toDateTime('2010-01-04 12:34:56'), '%f') SETTINGS formatdatetime_f_prints_single_zero = 1; +select formatDateTime(toDateTime64('2010-01-04 12:34:56', 0), '%f') SETTINGS formatdatetime_f_prints_single_zero = 1; +select formatDateTime(toDateTime64('2010-01-04 12:34:56.123', 3), '%f') SETTINGS formatdatetime_f_prints_single_zero = 1; +select formatDateTime(toDateTime64('2010-01-04 12:34:56.123456', 6), '%f') SETTINGS formatdatetime_f_prints_single_zero = 0; +select formatDateTime(toDateTime64('2010-01-04 12:34:56.123456789', 9), '%f') SETTINGS formatdatetime_f_prints_single_zero = 1; + +select formatDateTime(toDateTime64('2022-12-08 18:11:29.1234', 9, 'UTC'), '%F %T.%f'); +select formatDateTime(toDateTime64('2022-12-08 18:11:29.1234', 1, 'UTC'), '%F %T.%f'); +select formatDateTime(toDateTime64('2022-12-08 18:11:29.1234', 0, 'UTC'), '%F %T.%f'); +select formatDateTime(toDateTime('2022-12-08 18:11:29', 'UTC'), '%F %T.%f'); +select formatDateTime(toDate32('2022-12-08 18:11:29', 'UTC'), '%F %T.%f'); +select formatDateTime(toDate('2022-12-08 18:11:29', 'UTC'), '%F %T.%f'); + +-- %c %k %l with different formatdatetime_format_without_leading_zeros +select formatDateTime(toDateTime('2022-01-08 02:11:29', 'UTC'), '%c') settings formatdatetime_format_without_leading_zeros = 0; +select formatDateTime(toDateTime('2022-01-08 02:11:29', 'UTC'), '%m') settings formatdatetime_format_without_leading_zeros = 0; +select formatDateTime(toDateTime('2022-01-08 02:11:29', 'UTC'), '%k') settings formatdatetime_format_without_leading_zeros = 0; +select formatDateTime(toDateTime('2022-01-08 02:11:29', 'UTC'), '%l') settings formatdatetime_format_without_leading_zeros = 0; +select formatDateTime(toDateTime('2022-01-08 02:11:29', 'UTC'), '%h') settings formatdatetime_format_without_leading_zeros = 0; +select formatDateTime(toDateTime('2022-01-08 02:11:29', 'UTC'), '%c') settings formatdatetime_format_without_leading_zeros = 1; +select formatDateTime(toDateTime('2022-01-08 02:11:29', 'UTC'), '%m') settings formatdatetime_format_without_leading_zeros = 1; +select formatDateTime(toDateTime('2022-01-08 02:11:29', 'UTC'), '%k') settings formatdatetime_format_without_leading_zeros = 1; +select formatDateTime(toDateTime('2022-01-08 02:11:29', 'UTC'), '%l') settings formatdatetime_format_without_leading_zeros = 1; +select formatDateTime(toDateTime('2022-01-08 02:11:29', 'UTC'), '%h') settings formatdatetime_format_without_leading_zeros = 1; diff --git a/parser/testdata/00718_format_datetime_1/ast.json b/parser/testdata/00718_format_datetime_1/ast.json new file mode 100644 index 000000000..dceddc99c --- /dev/null +++ b/parser/testdata/00718_format_datetime_1/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function formatDateTime (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDateTime64 (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal '1900-01-01 00:00:00.000'" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Literal 'UTC'" + }, + { + "explain": " Literal '%F %T.%f'" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001273472, + "rows_read": 12, + "bytes_read": 469 + } +} diff --git a/parser/testdata/00718_format_datetime_1/metadata.json b/parser/testdata/00718_format_datetime_1/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00718_format_datetime_1/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00718_format_datetime_1/query.sql b/parser/testdata/00718_format_datetime_1/query.sql new file mode 100644 index 000000000..855b0506f --- /dev/null +++ b/parser/testdata/00718_format_datetime_1/query.sql @@ -0,0 +1,5 @@ +select formatDateTime(toDateTime64('1900-01-01 00:00:00.000', 3, 'UTC'), '%F %T.%f'); +select formatDateTime(toDateTime64('1962-12-08 18:11:29.123', 3, 'UTC'), '%F %T.%f'); +select formatDateTime(toDateTime64('1969-12-31 23:59:59.999', 3, 'UTC'), '%F %T.%f'); +select formatDateTime(toDateTime64('1970-01-01 00:00:00.000', 3, 'UTC'), '%F %T.%f'); +select formatDateTime(toDateTime64('1970-01-01 00:00:00.001', 3, 'UTC'), '%F %T.%f'); diff --git a/parser/testdata/00718_low_cardinaliry_alter/ast.json b/parser/testdata/00718_low_cardinaliry_alter/ast.json new file mode 100644 index 000000000..4695842e2 --- /dev/null +++ b/parser/testdata/00718_low_cardinaliry_alter/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001406185, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00718_low_cardinaliry_alter/metadata.json b/parser/testdata/00718_low_cardinaliry_alter/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00718_low_cardinaliry_alter/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00718_low_cardinaliry_alter/query.sql b/parser/testdata/00718_low_cardinaliry_alter/query.sql new file mode 100644 index 000000000..524e396bc --- /dev/null +++ b/parser/testdata/00718_low_cardinaliry_alter/query.sql @@ -0,0 +1,18 @@ +set allow_suspicious_low_cardinality_types = 1; +drop table if exists tab_00718; +create table tab_00718 (a String, b LowCardinality(UInt32)) engine = MergeTree order by a; +insert into tab_00718 values ('a', 1); +select *, toTypeName(b) from tab_00718; +alter table tab_00718 modify column b UInt32; +select *, toTypeName(b) from tab_00718; +alter table tab_00718 modify column b LowCardinality(UInt32); +select *, toTypeName(b) from tab_00718; +alter table tab_00718 modify column b LowCardinality(String); +select *, toTypeName(b) from tab_00718; +alter table tab_00718 modify column b LowCardinality(UInt32); +select *, toTypeName(b) from tab_00718; +alter table tab_00718 modify column b String; +select *, toTypeName(b) from tab_00718; +alter table tab_00718 modify column b LowCardinality(UInt32); +select *, toTypeName(b) from tab_00718; +drop table if exists tab_00718; diff --git a/parser/testdata/00719_format_datetime_f_varsize_bug/ast.json b/parser/testdata/00719_format_datetime_f_varsize_bug/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00719_format_datetime_f_varsize_bug/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00719_format_datetime_f_varsize_bug/metadata.json b/parser/testdata/00719_format_datetime_f_varsize_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00719_format_datetime_f_varsize_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00719_format_datetime_f_varsize_bug/query.sql b/parser/testdata/00719_format_datetime_f_varsize_bug/query.sql new file mode 100644 index 000000000..4048505f4 --- /dev/null +++ b/parser/testdata/00719_format_datetime_f_varsize_bug/query.sql @@ -0,0 +1,4 @@ +-- %M is a variable-size formatter. This triggered a bug in %f which forgot to write to all of its 6 output characters. +-- This caused ClickHouse to output whatever happened to be in DRAM in these places. +-- The problem occurs also with a single %f, I just added lots of them to make the issue more likely to occur. +select formatDateTime(Date('2026-01-02'),'%h:%M:%s.%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f'); diff --git a/parser/testdata/00719_format_datetime_rand/ast.json b/parser/testdata/00719_format_datetime_rand/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00719_format_datetime_rand/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00719_format_datetime_rand/metadata.json b/parser/testdata/00719_format_datetime_rand/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00719_format_datetime_rand/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00719_format_datetime_rand/query.sql b/parser/testdata/00719_format_datetime_rand/query.sql new file mode 100644 index 000000000..dcd907c01 --- /dev/null +++ b/parser/testdata/00719_format_datetime_rand/query.sql @@ -0,0 +1,11 @@ +-- We add 1, because function toString has special behaviour for zero datetime +WITH toDateTime(1 + rand() % 0xFFFFFFFF) AS t SELECT count() FROM numbers(1000000) WHERE formatDateTime(t, '%F %T') != toString(t); +WITH toDateTime(1 + rand() % 0xFFFFFFFF) AS t SELECT count() FROM numbers(1000000) WHERE formatDateTime(t, '%Y-%m-%d %H:%i:%S') != toString(t); +WITH toDateTime(1 + rand() % 0xFFFFFFFF) AS t SELECT count() FROM numbers(1000000) WHERE formatDateTime(t, '%Y-%m-%d %R:%S') != toString(t); +WITH toDateTime(1 + rand() % 0xFFFFFFFF) AS t SELECT count() FROM numbers(1000000) WHERE formatDateTime(t, '%F %R:%S') != toString(t); + +WITH toDate(today() + rand() % 4096) AS t SELECT count() FROM numbers(1000000) WHERE formatDateTime(t, '%F') != toString(t); + +-- Note: in some other timezones, daylight saving time change happens in midnight, so the first time of day is 01:00:00 instead of 00:00:00. +-- Stick to Moscow timezone to avoid this issue. +WITH toDate(today() + rand() % 4096) AS t SELECT count() FROM numbers(1000000) WHERE formatDateTime(t, '%F %T', 'Asia/Istanbul') != toString(toDateTime(t, 'Asia/Istanbul')); diff --git a/parser/testdata/00720_combinations_of_aggregate_combinators/ast.json b/parser/testdata/00720_combinations_of_aggregate_combinators/ast.json new file mode 100644 index 000000000..f6785c331 --- /dev/null +++ b/parser/testdata/00720_combinations_of_aggregate_combinators/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function sumForEachArray (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[Array_[UInt64_1], Array_[UInt64_2]]" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001087791, + "rows_read": 7, + "bytes_read": 302 + } +} diff --git a/parser/testdata/00720_combinations_of_aggregate_combinators/metadata.json b/parser/testdata/00720_combinations_of_aggregate_combinators/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00720_combinations_of_aggregate_combinators/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00720_combinations_of_aggregate_combinators/query.sql b/parser/testdata/00720_combinations_of_aggregate_combinators/query.sql new file mode 100644 index 000000000..a0d4c27d8 --- /dev/null +++ b/parser/testdata/00720_combinations_of_aggregate_combinators/query.sql @@ -0,0 +1,7 @@ +SELECT sumForEachArray([[1],[2]]); +SELECT sumForEachArrayIf([[number],[number%2]], number < 5) from numbers(10); + +SELECT groupUniqArrayMerge(x) from (select groupUniqArrayStateArray([]) as x); +SELECT groupUniqArrayArrayMerge(x) from (select groupUniqArrayArrayState([]) as x); + +SELECT groupUniqArrayForEachMerge(x) from (select groupUniqArrayForEachStateArray([[1],[1],[1]]) as x); diff --git a/parser/testdata/00720_with_cube/ast.json b/parser/testdata/00720_with_cube/ast.json new file mode 100644 index 000000000..094dc418c --- /dev/null +++ b/parser/testdata/00720_with_cube/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery cube (children 1)" + }, + { + "explain": " Identifier cube" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001008939, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/00720_with_cube/metadata.json b/parser/testdata/00720_with_cube/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00720_with_cube/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00720_with_cube/query.sql b/parser/testdata/00720_with_cube/query.sql new file mode 100644 index 000000000..d236e9da3 --- /dev/null +++ b/parser/testdata/00720_with_cube/query.sql @@ -0,0 +1,19 @@ +DROP TABLE IF EXISTS cube; +CREATE TABLE cube(a String, b Int32, s Int32) ENGINE = Memory; + +INSERT INTO cube VALUES ('a', 1, 10), ('a', 1, 15), ('a', 2, 20); +INSERT INTO cube VALUES ('a', 2, 25), ('b', 1, 10), ('b', 1, 5); +INSERT INTO cube VALUES ('b', 2, 20), ('b', 2, 15); + +SELECT a, b, sum(s), count() from cube GROUP BY CUBE(a, b) ORDER BY a, b; + +SELECT a, b, sum(s), count() from cube GROUP BY CUBE(a, b) WITH TOTALS ORDER BY a, b; + +SELECT a, b, sum(s), count() from cube GROUP BY a, b WITH CUBE ORDER BY a, b; + +SELECT a, b, sum(s), count() from cube GROUP BY a, b WITH CUBE WITH TOTALS ORDER BY a, b; + +SET group_by_two_level_threshold = 1; +SELECT a, b, sum(s), count() from cube GROUP BY a, b WITH CUBE ORDER BY a, b; + +DROP TABLE cube; diff --git a/parser/testdata/00721_force_by_identical_result_after_merge_zookeeper_long/ast.json b/parser/testdata/00721_force_by_identical_result_after_merge_zookeeper_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00721_force_by_identical_result_after_merge_zookeeper_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00721_force_by_identical_result_after_merge_zookeeper_long/metadata.json b/parser/testdata/00721_force_by_identical_result_after_merge_zookeeper_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00721_force_by_identical_result_after_merge_zookeeper_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00721_force_by_identical_result_after_merge_zookeeper_long/query.sql b/parser/testdata/00721_force_by_identical_result_after_merge_zookeeper_long/query.sql new file mode 100644 index 000000000..8afa58840 --- /dev/null +++ b/parser/testdata/00721_force_by_identical_result_after_merge_zookeeper_long/query.sql @@ -0,0 +1,26 @@ +-- Tags: long, zookeeper + +SET prefer_warmed_unmerged_parts_seconds = 0; + +DROP TABLE IF EXISTS byte_identical_r1; +DROP TABLE IF EXISTS byte_identical_r2; + +CREATE TABLE byte_identical_r1(x UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00721/byte_identical', 'r1') ORDER BY x; +CREATE TABLE byte_identical_r2(x UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00721/byte_identical', 'r2') ORDER BY x; + +INSERT INTO byte_identical_r1(x) VALUES (1), (2), (3); +SYSTEM SYNC REPLICA byte_identical_r2; + +-- Add a column with a default expression that will yield different values on different replicas. +-- Call optimize to materialize it. Replicas should compare checksums and restore consistency. +ALTER TABLE byte_identical_r1 ADD COLUMN y UInt64 DEFAULT rand(); +SYSTEM SYNC REPLICA byte_identical_r1; +SYSTEM SYNC REPLICA byte_identical_r2; +SET replication_alter_partitions_sync=2; +OPTIMIZE TABLE byte_identical_r1 PARTITION tuple() FINAL; +SYSTEM SYNC REPLICA byte_identical_r2; + +SELECT x, t1.y - t2.y FROM byte_identical_r1 t1 SEMI LEFT JOIN byte_identical_r2 t2 USING x ORDER BY x; + +DROP TABLE byte_identical_r1; +DROP TABLE byte_identical_r2; diff --git a/parser/testdata/00722_inner_join/ast.json b/parser/testdata/00722_inner_join/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00722_inner_join/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00722_inner_join/metadata.json b/parser/testdata/00722_inner_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00722_inner_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00722_inner_join/query.sql b/parser/testdata/00722_inner_join/query.sql new file mode 100644 index 000000000..aa590f470 --- /dev/null +++ b/parser/testdata/00722_inner_join/query.sql @@ -0,0 +1,97 @@ +-- Tags: no-parallel + +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS one; +CREATE TABLE one(dummy UInt8) ENGINE = Memory; + +SELECT database, t.name + FROM system.tables AS t + ALL INNER JOIN (SELECT name AS database FROM system.databases) AS db USING database + WHERE database = 'system' AND t.name = 'one' + FORMAT PrettyCompactNoEscapes; + +SELECT database, t.name + FROM (SELECT name AS database FROM system.databases) AS db + ALL INNER JOIN system.tables AS t USING database + WHERE database = 'system' AND t.name = 'one' + FORMAT PrettyCompactNoEscapes; + +SELECT database, t.name + FROM (SELECT name, database FROM system.tables) AS t + ALL INNER JOIN (SELECT name AS database FROM system.databases) AS db USING database + WHERE database = 'system' AND t.name = 'one' + FORMAT PrettyCompactNoEscapes; + +SELECT x, t.name + FROM (SELECT name, database AS x FROM system.tables) AS t + ALL INNER JOIN (SELECT name AS x FROM system.databases) AS db USING x + WHERE x = 'system' AND t.name = 'one' + FORMAT PrettyCompactNoEscapes; + +SELECT database, t.name + FROM (SELECT name, database FROM system.tables) AS t + JOIN (SELECT name AS database FROM system.databases) AS db USING database + WHERE database = 'system' AND t.name = 'one' + SETTINGS join_default_strictness = 'ALL' + FORMAT PrettyCompactNoEscapes; + +SELECT x, t.name + FROM (SELECT name, database AS x FROM system.tables) AS t + JOIN (SELECT name AS x FROM system.databases) AS db USING x + WHERE x = 'system' AND t.name = 'one' + SETTINGS join_default_strictness = 'ALL' + FORMAT PrettyCompactNoEscapes; + +SET join_default_strictness = 'ALL'; + +SELECT database, t.name + FROM (SELECT * FROM system.tables) AS t + JOIN (SELECT name, name AS database FROM system.databases) AS db USING database + WHERE db.name = 'system' AND t.name = 'one' + FORMAT PrettyCompactNoEscapes; + +SELECT db.x, t.name + FROM (SELECT name, database AS x FROM system.tables) AS t + JOIN (SELECT name AS x FROM system.databases) AS db USING x + WHERE x = 'system' AND t.name = 'one' + FORMAT PrettyCompactNoEscapes; + +SELECT db.name, t.name + FROM (SELECT name, database FROM system.tables WHERE name = 'one') AS t + JOIN (SELECT name FROM system.databases WHERE name = 'system') AS db ON t.database = db.name + FORMAT PrettyCompactNoEscapes; + +SELECT db.name, t.name + FROM system.tables AS t + JOIN (SELECT * FROM system.databases WHERE name = 'system') AS db ON t.database = db.name + WHERE t.name = 'one' + FORMAT PrettyCompactNoEscapes; + +SELECT t.database, t.name + FROM system.tables AS t + JOIN (SELECT name, name AS database FROM system.databases) AS db ON t.database = db.name + WHERE t.database = 'system' AND t.name = 'one' + FORMAT PrettyCompactNoEscapes; + +SELECT t.database, t.name + FROM system.tables t + ANY LEFT JOIN (SELECT 'system' AS base, 'one' AS name) db USING name + WHERE t.database = db.base + FORMAT PrettyCompactNoEscapes; + +SELECT count(t.database) + FROM (SELECT * FROM system.tables WHERE name = 'one') AS t + JOIN system.databases AS db ON t.database = db.name; + +SELECT count(db.name) + FROM system.tables AS t + JOIN system.databases AS db ON t.database = db.name + WHERE t.name = 'one'; + +SELECT count() + FROM system.tables AS t + JOIN system.databases AS db ON db.name = t.database + WHERE t.name = 'one'; + +DROP TABLE one; diff --git a/parser/testdata/00723_remerge_sort/ast.json b/parser/testdata/00723_remerge_sort/ast.json new file mode 100644 index 000000000..df43b278d --- /dev/null +++ b/parser/testdata/00723_remerge_sort/ast.json @@ -0,0 +1,127 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toString (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_2000000" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal UInt64_10000" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 35, + + "statistics": + { + "elapsed": 0.001392392, + "rows_read": 35, + "bytes_read": 1586 + } +} diff --git a/parser/testdata/00723_remerge_sort/metadata.json b/parser/testdata/00723_remerge_sort/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00723_remerge_sort/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00723_remerge_sort/query.sql b/parser/testdata/00723_remerge_sort/query.sql new file mode 100644 index 000000000..562a5e802 --- /dev/null +++ b/parser/testdata/00723_remerge_sort/query.sql @@ -0,0 +1,3 @@ +SELECT * FROM (SELECT x FROM (SELECT toString(number) AS x FROM system.numbers LIMIT 2000000) ORDER BY x LIMIT 10000) LIMIT 10; +SET max_bytes_before_remerge_sort = 1000000; +SELECT * FROM (SELECT x FROM (SELECT toString(number) AS x FROM system.numbers LIMIT 2000000) ORDER BY x LIMIT 10000) LIMIT 10; diff --git a/parser/testdata/00724_insert_values_datetime_conversion/ast.json b/parser/testdata/00724_insert_values_datetime_conversion/ast.json new file mode 100644 index 000000000..861543708 --- /dev/null +++ b/parser/testdata/00724_insert_values_datetime_conversion/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_00724 (children 1)" + }, + { + "explain": " Identifier test_00724" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000985371, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/00724_insert_values_datetime_conversion/metadata.json b/parser/testdata/00724_insert_values_datetime_conversion/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00724_insert_values_datetime_conversion/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00724_insert_values_datetime_conversion/query.sql b/parser/testdata/00724_insert_values_datetime_conversion/query.sql new file mode 100644 index 000000000..0e988e0dc --- /dev/null +++ b/parser/testdata/00724_insert_values_datetime_conversion/query.sql @@ -0,0 +1,4 @@ +DROP TEMPORARY TABLE IF EXISTS test_00724; +CREATE TEMPORARY TABLE test_00724 (d Date, dt DateTime); +INSERT INTO test_00724 VALUES (toDateTime('2000-01-01 01:02:03'), toDate('2000-01-01')); +SELECT * FROM test_00724; diff --git a/parser/testdata/00725_comment_columns_long/ast.json b/parser/testdata/00725_comment_columns_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00725_comment_columns_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00725_comment_columns_long/metadata.json b/parser/testdata/00725_comment_columns_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00725_comment_columns_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00725_comment_columns_long/query.sql b/parser/testdata/00725_comment_columns_long/query.sql new file mode 100644 index 000000000..068f35b2e --- /dev/null +++ b/parser/testdata/00725_comment_columns_long/query.sql @@ -0,0 +1,94 @@ +-- Tags: long, no-replicated-database +-- Tag no-replicated-database: Unsupported type of ALTER query +SET output_format_pretty_row_numbers = 0; + +DROP TABLE IF EXISTS check_query_comment_column; + +-- Check COMMENT COLUMN and MODIFY COLUMN statements with simple engine +CREATE TABLE check_query_comment_column + ( + first_column UInt8 DEFAULT 1 COMMENT 'comment 1', + second_column UInt8 MATERIALIZED first_column COMMENT 'comment 2', + third_column UInt8 ALIAS second_column COMMENT 'comment 3', + fourth_column UInt8 COMMENT 'comment 4', + fifth_column UInt8 + ) ENGINE = TinyLog; + +SHOW CREATE TABLE check_query_comment_column; +DESCRIBE TABLE check_query_comment_column; + +SELECT table, name, comment +FROM system.columns +WHERE table = 'check_query_comment_column' AND database = currentDatabase() +FORMAT PrettyCompactNoEscapes; + +ALTER TABLE check_query_comment_column + COMMENT COLUMN first_column 'comment 1_1', + COMMENT COLUMN second_column 'comment 2_1', + COMMENT COLUMN third_column 'comment 3_1', + COMMENT COLUMN fourth_column 'comment 4_1', + COMMENT COLUMN fifth_column 'comment 5_1'; + +SHOW CREATE TABLE check_query_comment_column; + +ALTER TABLE check_query_comment_column + MODIFY COLUMN first_column COMMENT 'comment 1_2', + MODIFY COLUMN second_column COMMENT 'comment 2_2', + MODIFY COLUMN third_column COMMENT 'comment 3_2', + MODIFY COLUMN fourth_column COMMENT 'comment 4_2', + MODIFY COLUMN fifth_column COMMENT 'comment 5_2'; + +SELECT table, name, comment +FROM system.columns +WHERE table = 'check_query_comment_column' AND database = currentDatabase() +FORMAT PrettyCompactNoEscapes; + +SHOW CREATE TABLE check_query_comment_column; +DROP TABLE IF EXISTS check_query_comment_column; + +-- Check `ALTER TABLE table_name COMMENT COLUMN 'comment'` statement with MergeTree engine +CREATE TABLE check_query_comment_column + ( + first_column UInt8 COMMENT 'comment 1', + second_column UInt8 COMMENT 'comment 2', + third_column UInt8 COMMENT 'comment 3' + ) ENGINE = MergeTree() + ORDER BY first_column + PARTITION BY second_column + SAMPLE BY first_column; + +SHOW CREATE TABLE check_query_comment_column; +DESCRIBE TABLE check_query_comment_column; + +SELECT table, name, comment +FROM system.columns +WHERE table = 'check_query_comment_column' AND database = currentDatabase() +FORMAT PrettyCompactNoEscapes; + +ALTER TABLE check_query_comment_column + COMMENT COLUMN first_column 'comment 1_2', + COMMENT COLUMN second_column 'comment 2_2', + COMMENT COLUMN third_column 'comment 3_2'; + +SHOW CREATE TABLE check_query_comment_column; + +ALTER TABLE check_query_comment_column + MODIFY COLUMN first_column COMMENT 'comment 1_3', + MODIFY COLUMN second_column COMMENT 'comment 2_3', + MODIFY COLUMN third_column COMMENT 'comment 3_3'; + +SHOW CREATE TABLE check_query_comment_column; + +ALTER TABLE check_query_comment_column + MODIFY COLUMN first_column DEFAULT 1 COMMENT 'comment 1_3', + MODIFY COLUMN second_column COMMENT 'comment 2_3', -- We can't change default value of partition key. + MODIFY COLUMN third_column DEFAULT 1 COMMENT 'comment 3_3'; + +SELECT table, name, comment +FROM system.columns +WHERE table = 'check_query_comment_column' and database = currentDatabase() +FORMAT PrettyCompactNoEscapes; + +DROP TABLE IF EXISTS check_query_comment_column; + +-- TODO: add here tests with ReplicatedMergeTree({database}) diff --git a/parser/testdata/00725_ipv4_ipv6_domains/ast.json b/parser/testdata/00725_ipv4_ipv6_domains/ast.json new file mode 100644 index 000000000..0d66d19f4 --- /dev/null +++ b/parser/testdata/00725_ipv4_ipv6_domains/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery ipv4_test (children 1)" + }, + { + "explain": " Identifier ipv4_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001130826, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/00725_ipv4_ipv6_domains/metadata.json b/parser/testdata/00725_ipv4_ipv6_domains/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00725_ipv4_ipv6_domains/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00725_ipv4_ipv6_domains/query.sql b/parser/testdata/00725_ipv4_ipv6_domains/query.sql new file mode 100644 index 000000000..5815afb16 --- /dev/null +++ b/parser/testdata/00725_ipv4_ipv6_domains/query.sql @@ -0,0 +1,92 @@ +DROP TABLE IF EXISTS ipv4_test; + +-- Only valid values for IPv4 +CREATE TABLE ipv4_test (ipv4_ IPv4) ENGINE = Memory; + +-- ipv4_ column shoud have type 'IPv4' +SHOW CREATE TABLE ipv4_test; + +INSERT INTO ipv4_test (ipv4_) VALUES ('0.0.0.0'), ('255.255.255.255'), ('192.168.0.91'), ('127.0.0.1'), ('8.8.8.8'); + +SELECT ipv4_, hex(ipv4_) FROM ipv4_test ORDER BY ipv4_; + +SELECT '< 127.0.0.1', ipv4_ FROM ipv4_test + WHERE ipv4_ < toIPv4('127.0.0.1') + ORDER BY ipv4_; + +SELECT '> 127.0.0.1', ipv4_ FROM ipv4_test + WHERE ipv4_ > toIPv4('127.0.0.1') + ORDER BY ipv4_; + +SELECT '= 127.0.0.1', ipv4_ FROM ipv4_test + WHERE ipv4_ = toIPv4('127.0.0.1') + ORDER BY ipv4_; + +-- TODO: Assert that invalid values can't be inserted into IPv4 column. + +DROP TABLE IF EXISTS ipv4_test; + + +select 'euqality of IPv4-mapped IPv6 value and IPv4 promoted to IPv6 with function:', toIPv6('::ffff:127.0.0.1') = IPv4ToIPv6(toIPv4('127.0.0.1')); + + +DROP TABLE IF EXISTS ipv6_test; + +-- Only valid values for IPv6 +CREATE TABLE ipv6_test (ipv6_ IPv6) ENGINE = Memory; + +-- ipv6_ column shoud have type 'IPv6' +SHOW CREATE TABLE ipv6_test; + +INSERT INTO ipv6_test VALUES ('::'), ('0:0:0:0:0:0:0:0'), ('FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF'), ('2001:0DB8:AC10:FE01:FEED:BABE:CAFE:F00D'), ('0000:0000:0000:0000:0000:FFFF:C1FC:110A'), ('::ffff:127.0.0.1'), ('::ffff:8.8.8.8'); + +SELECT ipv6_, hex(ipv6_) FROM ipv6_test ORDER BY ipv6_; + +SELECT '< 127.0.0.1', ipv6_ FROM ipv6_test + WHERE ipv6_ < IPv4ToIPv6(toIPv4('127.0.0.1')) + ORDER BY ipv6_; + +SELECT '> 127.0.0.1', ipv6_ FROM ipv6_test + WHERE ipv6_ > IPv4ToIPv6(toIPv4('127.0.0.1')) + ORDER BY ipv6_; + +SELECT '= 127.0.0.1', ipv6_ FROM ipv6_test + WHERE ipv6_ = IPv4ToIPv6(toIPv4('127.0.0.1')) + ORDER BY ipv6_; + +-- TODO: Assert that invalid values can't be inserted into IPv6 column. + +DROP TABLE IF EXISTS ipv6_test; + +SELECT '0.0.0.0 is ipv4 string: ', isIPv4String('0.0.0.0'); +SELECT '255.255.255.255 is ipv4 string: ', isIPv4String('255.255.255.255'); +SELECT '192.168.0.91 is ipv4 string: ', isIPv4String('192.168.0.91'); +SELECT '127.0.0.1 is ipv4 string: ', isIPv4String('127.0.0.1'); +SELECT '8.8.8.8 is ipv4 string: ', isIPv4String('8.8.8.8'); +SELECT 'hello is ipv4 string: ', isIPv4String('hello'); +SELECT '0:0:0:0:0:0:0:0 is ipv4 string: ', isIPv4String('0:0:0:0:0:0:0:0'); +SELECT '0000:0000:0000:0000:0000:FFFF:C1FC:110A is ipv4 string: ', isIPv4String('0000:0000:0000:0000:0000:FFFF:C1FC:110A'); +SELECT 'FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF is ipv4 string: ', isIPv4String('FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF'); +SELECT '::ffff:127.0.0.1 is ipv4 string: ', isIPv4String('::ffff:127.0.0.1'); +SELECT '::ffff:8.8.8.8 is ipv4 string: ', isIPv4String('::ffff:8.8.8.8'); +SELECT '2001:0DB8:AC10:FE01:FEED:BABE:CAFE:F00D is ipv4 string: ', isIPv4String('2001:0DB8:AC10:FE01:FEED:BABE:CAFE:F00D'); + +SELECT '0.0.0.0 is ipv6 string: ', isIPv6String('0.0.0.0'); +SELECT '255.255.255.255 is ipv6 string: ', isIPv6String('255.255.255.255'); +SELECT '192.168.0.91 is ipv6 string: ', isIPv6String('192.168.0.91'); +SELECT '127.0.0.1 is ipv6 string: ', isIPv6String('127.0.0.1'); +SELECT '8.8.8.8 is ipv6 string: ', isIPv6String('8.8.8.8'); +SELECT 'hello is ipv6 string: ', isIPv6String('hello'); +SELECT '0:0:0:0:0:0:0:0 is ipv6 string: ', isIPv6String('0:0:0:0:0:0:0:0'); +SELECT '0000:0000:0000:0000:0000:FFFF:C1FC:110A is ipv6 string: ', isIPv6String('0000:0000:0000:0000:0000:FFFF:C1FC:110A'); +SELECT 'FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF is ipv6 string: ', isIPv6String('FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF'); +SELECT '::ffff:127.0.0.1 is ipv6 string: ', isIPv6String('::ffff:127.0.0.1'); +SELECT '::ffff:8.8.8.8 is ipv6 string: ', isIPv6String('::ffff:8.8.8.8'); +SELECT '2001:0DB8:AC10:FE01:FEED:BABE:CAFE:F00D is ipv6 string: ', isIPv6String('2001:0DB8:AC10:FE01:FEED:BABE:CAFE:F00D'); + +-- IPV6 functions parse IPv4 addresses. + +SELECT toIPv6('0.0.0.0'); +SELECT toIPv6('127.0.0.1'); +SELECT cutIPv6(IPv6StringToNum('127.0.0.1'), 0, 0); +SELECT toIPv6('127.0.0.' || toString(number)) FROM numbers(13); diff --git a/parser/testdata/00725_join_on_bug_1/ast.json b/parser/testdata/00725_join_on_bug_1/ast.json new file mode 100644 index 000000000..3db474b60 --- /dev/null +++ b/parser/testdata/00725_join_on_bug_1/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery a1 (children 1)" + }, + { + "explain": " Identifier a1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00094043, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/00725_join_on_bug_1/metadata.json b/parser/testdata/00725_join_on_bug_1/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00725_join_on_bug_1/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00725_join_on_bug_1/query.sql b/parser/testdata/00725_join_on_bug_1/query.sql new file mode 100644 index 000000000..3e1501768 --- /dev/null +++ b/parser/testdata/00725_join_on_bug_1/query.sql @@ -0,0 +1,15 @@ +DROP TABLE IF EXISTS a1; +DROP TABLE IF EXISTS a2; + +CREATE TABLE a1(a UInt8, b UInt8) ENGINE=Memory; +CREATE TABLE a2(a UInt8, b UInt8) ENGINE=Memory; + +INSERT INTO a1 VALUES (1, 1), (1, 2), (2, 3); +INSERT INTO a2 VALUES (1, 2), (1, 3), (1, 4); + +SELECT * FROM a1 as a left JOIN a2 as b on a.a=b.a ORDER BY b SETTINGS join_default_strictness='ANY'; +SELECT '-'; +SELECT a1.*, a2.* FROM a1 ANY LEFT JOIN a2 USING a ORDER BY b; + +DROP TABLE IF EXISTS a1; +DROP TABLE IF EXISTS a2; diff --git a/parser/testdata/00725_join_on_bug_2/ast.json b/parser/testdata/00725_join_on_bug_2/ast.json new file mode 100644 index 000000000..3d91b1c5d --- /dev/null +++ b/parser/testdata/00725_join_on_bug_2/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001017732, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00725_join_on_bug_2/metadata.json b/parser/testdata/00725_join_on_bug_2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00725_join_on_bug_2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00725_join_on_bug_2/query.sql b/parser/testdata/00725_join_on_bug_2/query.sql new file mode 100644 index 000000000..a1a2a9738 --- /dev/null +++ b/parser/testdata/00725_join_on_bug_2/query.sql @@ -0,0 +1,25 @@ +set joined_subquery_requires_alias = 0; + +drop table if exists t_00725_2; +drop table if exists s_00725_2; + +create table t_00725_2(a Int64, b Int64) engine = MergeTree ORDER BY tuple(); +insert into t_00725_2 values(1,1); +insert into t_00725_2 values(2,2); +create table s_00725_2(a Int64, b Int64) engine = MergeTree ORDER BY tuple(); +insert into s_00725_2 values(1,1); + +select a, b, s_a, s_b from t_00725_2 all left join (select a,b,a s_a, b s_b from s_00725_2) using (a,b) ORDER BY ALL; +select '-'; +select t_00725_2.*, s_00725_2.* from t_00725_2 all left join s_00725_2 using (a,b) ORDER BY ALL; +select '-'; +select a,b,s_a,s_b from t_00725_2 all left join (select a, b, a s_a, b s_b from s_00725_2) s_00725_2 on (s_00725_2.a = t_00725_2.a and s_00725_2.b = t_00725_2.b) ORDER BY ALL; +select '-'; +select * from t_00725_2 all left join (select a s_a, b s_b from s_00725_2) on (s_a = t_00725_2.a and s_b = t_00725_2.b) ORDER BY ALL; +select '-'; +select a,b,s_a,s_b from t_00725_2 all left join (select a,b, a s_a, b s_b from s_00725_2) on (s_a = t_00725_2.a and s_b = t_00725_2.b) ORDER BY ALL; +select '-'; +select t_00725_2.*, s_00725_2.* from t_00725_2 all left join s_00725_2 on (s_00725_2.a = t_00725_2.a and s_00725_2.b = t_00725_2.b) ORDER BY ALL; + +drop table if exists t_00725_2; +drop table if exists s_00725_2; diff --git a/parser/testdata/00725_join_on_bug_3/ast.json b/parser/testdata/00725_join_on_bug_3/ast.json new file mode 100644 index 000000000..29220fbec --- /dev/null +++ b/parser/testdata/00725_join_on_bug_3/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_00725_3 (children 1)" + }, + { + "explain": " Identifier t_00725_3" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00098925, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/00725_join_on_bug_3/metadata.json b/parser/testdata/00725_join_on_bug_3/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00725_join_on_bug_3/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00725_join_on_bug_3/query.sql b/parser/testdata/00725_join_on_bug_3/query.sql new file mode 100644 index 000000000..678516b4f --- /dev/null +++ b/parser/testdata/00725_join_on_bug_3/query.sql @@ -0,0 +1,14 @@ +drop table if exists t_00725_3; +drop table if exists z_00725_3; + +create table t_00725_3(a Int64, b Int64) engine = TinyLog; +insert into t_00725_3 values(1,1); +insert into t_00725_3 values(2,2); +create table z_00725_3(c Int64, d Int64, e Int64) engine = TinyLog; +insert into z_00725_3 values(1,1,1); + +select * from t_00725_3 all left join z_00725_3 on (z_00725_3.c = t_00725_3.a and z_00725_3.d = t_00725_3.b) ORDER BY t_00725_3.a; + +drop table if exists t_00725_3; +drop table if exists z_00725_3; + diff --git a/parser/testdata/00725_join_on_bug_4/ast.json b/parser/testdata/00725_join_on_bug_4/ast.json new file mode 100644 index 000000000..b6d367c2e --- /dev/null +++ b/parser/testdata/00725_join_on_bug_4/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_00725_4 (children 1)" + }, + { + "explain": " Identifier t_00725_4" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001018788, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/00725_join_on_bug_4/metadata.json b/parser/testdata/00725_join_on_bug_4/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00725_join_on_bug_4/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00725_join_on_bug_4/query.sql b/parser/testdata/00725_join_on_bug_4/query.sql new file mode 100644 index 000000000..a456408bb --- /dev/null +++ b/parser/testdata/00725_join_on_bug_4/query.sql @@ -0,0 +1,13 @@ +drop table if exists t_00725_4; +drop table if exists s_00725_4; + +create table t_00725_4(a Int64, b Int64, c String) engine = TinyLog; +insert into t_00725_4 values(1,1,'a'),(2,2,'b'); +create table s_00725_4(a Int64, b Int64, c String) engine = TinyLog; +insert into s_00725_4 values(1,1,'a'); + + +select t_00725_4.* from t_00725_4 all left join s_00725_4 on (s_00725_4.a = t_00725_4.a and s_00725_4.b = t_00725_4.b) where s_00725_4.a = 0 and s_00725_4.b = 0; + +drop table if exists t_00725_4; +drop table if exists s_00725_4; diff --git a/parser/testdata/00725_memory_tracking/ast.json b/parser/testdata/00725_memory_tracking/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00725_memory_tracking/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00725_memory_tracking/metadata.json b/parser/testdata/00725_memory_tracking/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00725_memory_tracking/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00725_memory_tracking/query.sql b/parser/testdata/00725_memory_tracking/query.sql new file mode 100644 index 000000000..b7356f0a6 --- /dev/null +++ b/parser/testdata/00725_memory_tracking/query.sql @@ -0,0 +1,5 @@ +-- Tags: no-replicated-database + +SELECT least(value, 0) FROM system.metrics WHERE metric = 'MemoryTracking'; +SELECT length(range(100000000)); +SELECT least(value, 0) FROM system.metrics WHERE metric = 'MemoryTracking'; diff --git a/parser/testdata/00725_quantiles_shard/ast.json b/parser/testdata/00725_quantiles_shard/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00725_quantiles_shard/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00725_quantiles_shard/metadata.json b/parser/testdata/00725_quantiles_shard/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00725_quantiles_shard/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00725_quantiles_shard/query.sql b/parser/testdata/00725_quantiles_shard/query.sql new file mode 100644 index 000000000..a3742ce87 --- /dev/null +++ b/parser/testdata/00725_quantiles_shard/query.sql @@ -0,0 +1,6 @@ +-- Tags: shard + +SELECT quantiles(0.5, 0.9)(number) FROM remote('127.0.0.{1,2}', numbers(10)); +SELECT quantilesExact(0.5, 0.9)(number) FROM remote('127.0.0.{1,2}', numbers(10)); +SELECT quantilesTDigest(0.5, 0.9)(number) FROM remote('127.0.0.{1,2}', numbers(10)); +SELECT quantilesDeterministic(0.5, 0.9)(number, number) FROM remote('127.0.0.{1,2}', numbers(10)); diff --git a/parser/testdata/00726_length_aliases/ast.json b/parser/testdata/00726_length_aliases/ast.json new file mode 100644 index 000000000..63810ecec --- /dev/null +++ b/parser/testdata/00726_length_aliases/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function LENGTH (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'корова'" + }, + { + "explain": " Function CHAR_LENGTH (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'ворона'" + }, + { + "explain": " Function CHARACTER_LENGTH (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'фейхоа'" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001183514, + "rows_read": 13, + "bytes_read": 512 + } +} diff --git a/parser/testdata/00726_length_aliases/metadata.json b/parser/testdata/00726_length_aliases/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00726_length_aliases/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00726_length_aliases/query.sql b/parser/testdata/00726_length_aliases/query.sql new file mode 100644 index 000000000..ac8c469c7 --- /dev/null +++ b/parser/testdata/00726_length_aliases/query.sql @@ -0,0 +1 @@ +SELECT LENGTH('корова'), CHAR_LENGTH('ворона'), CHARACTER_LENGTH('фейхоа'); diff --git a/parser/testdata/00726_materialized_view_concurrent/ast.json b/parser/testdata/00726_materialized_view_concurrent/ast.json new file mode 100644 index 000000000..32467830e --- /dev/null +++ b/parser/testdata/00726_materialized_view_concurrent/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery src_00726 (children 1)" + }, + { + "explain": " Identifier src_00726" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001136545, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/00726_materialized_view_concurrent/metadata.json b/parser/testdata/00726_materialized_view_concurrent/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00726_materialized_view_concurrent/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00726_materialized_view_concurrent/query.sql b/parser/testdata/00726_materialized_view_concurrent/query.sql new file mode 100644 index 000000000..3c03f97cf --- /dev/null +++ b/parser/testdata/00726_materialized_view_concurrent/query.sql @@ -0,0 +1,20 @@ +DROP TABLE IF EXISTS src_00726; +DROP TABLE IF EXISTS mv1_00726; +DROP TABLE IF EXISTS mv2_00726; + +CREATE TABLE src_00726 (x UInt8) ENGINE = Null; +CREATE MATERIALIZED VIEW mv1_00726 ENGINE = Memory AS SELECT x FROM src_00726 WHERE x % 2 = 0; +CREATE MATERIALIZED VIEW mv2_00726 ENGINE = Memory AS SELECT x FROM src_00726 WHERE x % 2 = 1; + +SET parallel_view_processing = 1; +INSERT INTO src_00726 VALUES (1), (2); + +SET parallel_view_processing = 0; +INSERT INTO src_00726 VALUES (3), (4); + +SELECT * FROM mv1_00726 ORDER BY x; +SELECT * FROM mv2_00726 ORDER BY x; + +DROP TABLE mv1_00726; +DROP TABLE mv2_00726; +DROP TABLE src_00726; diff --git a/parser/testdata/00726_modulo_for_date/ast.json b/parser/testdata/00726_modulo_for_date/ast.json new file mode 100644 index 000000000..dd9dde04d --- /dev/null +++ b/parser/testdata/00726_modulo_for_date/ast.json @@ -0,0 +1,82 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDate (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '2018-06-21'" + }, + { + "explain": " Literal UInt64_234" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toUInt16 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDate (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '2018-06-21'" + }, + { + "explain": " Literal UInt64_234" + } + ], + + "rows": 20, + + "statistics": + { + "elapsed": 0.001018434, + "rows_read": 20, + "bytes_read": 813 + } +} diff --git a/parser/testdata/00726_modulo_for_date/metadata.json b/parser/testdata/00726_modulo_for_date/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00726_modulo_for_date/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00726_modulo_for_date/query.sql b/parser/testdata/00726_modulo_for_date/query.sql new file mode 100644 index 000000000..3501f6862 --- /dev/null +++ b/parser/testdata/00726_modulo_for_date/query.sql @@ -0,0 +1,13 @@ +SELECT toDate('2018-06-21') % 234 = toUInt16(toDate('2018-06-21')) % 234; +SELECT toDate('2018-06-21') % 23456 = toUInt16(toDate('2018-06-21')) % 23456; +SELECT toDate('2018-06-21') % 12376 = toUInt16(toDate('2018-06-21')) % 12376; +SELECT toDateTime('2018-06-21 12:12:12') % 234 = toUInt32(toDateTime('2018-06-21 12:12:12')) % 234; +SELECT toDateTime('2018-06-21 12:12:12') % 23456 = toUInt32(toDateTime('2018-06-21 12:12:12')) % 23456; +SELECT toDateTime('2018-06-21 12:12:12') % 12376 = toUInt32(toDateTime('2018-06-21 12:12:12')) % 12376; + +SELECT toDate('2018-06-21') % 234.8 = toUInt16(toDate('2018-06-21')) % 234.8; +SELECT toDate('2018-06-21') % 23456.8 = toUInt16(toDate('2018-06-21')) % 23456.8; +SELECT toDate('2018-06-21') % 12376.8 = toUInt16(toDate('2018-06-21')) % 12376.8; +SELECT toDateTime('2018-06-21 12:12:12') % 234.8 = toUInt32(toDateTime('2018-06-21 12:12:12')) % 234.8; +SELECT toDateTime('2018-06-21 12:12:12') % 23456.8 = toUInt32(toDateTime('2018-06-21 12:12:12')) % 23456.8; +SELECT toDateTime('2018-06-21 12:12:12') % 12376.8 = toUInt32(toDateTime('2018-06-21 12:12:12')) % 12376.8; diff --git a/parser/testdata/00727_concat/ast.json b/parser/testdata/00727_concat/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00727_concat/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00727_concat/metadata.json b/parser/testdata/00727_concat/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00727_concat/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00727_concat/query.sql b/parser/testdata/00727_concat/query.sql new file mode 100644 index 000000000..fe0c4a00d --- /dev/null +++ b/parser/testdata/00727_concat/query.sql @@ -0,0 +1,98 @@ +-- Tags: no-fasttest +-- no-fasttest: json type needs rapidjson library, geo types need s2 geometry + +SET enable_json_type = 1; +SET allow_suspicious_low_cardinality_types=1; + +SELECT '-- Const string + non-const arbitrary type'; +SELECT concat('With ', materialize(42 :: Int8)); +SELECT concat('With ', materialize(43 :: Int16)); +SELECT concat('With ', materialize(44 :: Int32)); +SELECT concat('With ', materialize(45 :: Int64)); +SELECT concat('With ', materialize(46 :: Int128)); +SELECT concat('With ', materialize(47 :: Int256)); +SELECT concat('With ', materialize(48 :: UInt8)); +SELECT concat('With ', materialize(49 :: UInt16)); +SELECT concat('With ', materialize(50 :: UInt32)); +SELECT concat('With ', materialize(51 :: UInt64)); +SELECT concat('With ', materialize(52 :: UInt128)); +SELECT concat('With ', materialize(53 :: UInt256)); +SELECT concat('With ', materialize(42.42 :: Float32)); +SELECT concat('With ', materialize(43.43 :: Float64)); +SELECT concat('With ', materialize(44.44 :: Decimal(2))); +SELECT concat('With ', materialize(true :: Bool)); +SELECT concat('With ', materialize(false :: Bool)); +SELECT concat('With ', materialize('foo' :: String)); +SELECT concat('With ', materialize('bar' :: FixedString(3))); +SELECT concat('With ', materialize('foo' :: Nullable(String))); +SELECT concat('With ', materialize('bar' :: Nullable(FixedString(3)))); +SELECT concat('With ', materialize('foo' :: LowCardinality(String))); +SELECT concat('With ', materialize('bar' :: LowCardinality(FixedString(3)))); +SELECT concat('With ', materialize('foo' :: LowCardinality(Nullable(String)))); +SELECT concat('With ', materialize('bar' :: LowCardinality(Nullable(FixedString(3))))); +SELECT concat('With ', materialize(42 :: LowCardinality(Nullable(UInt32)))); +SELECT concat('With ', materialize(42 :: LowCardinality(UInt32))); +SELECT concat('With ', materialize('fae310ca-d52a-4923-9e9b-02bf67f4b009' :: UUID)); +SELECT concat('With ', materialize('2023-11-14' :: Date)); +SELECT concat('With ', materialize('2123-11-14' :: Date32)); +SELECT concat('With ', materialize('2023-11-14 05:50:12' :: DateTime('Europe/Amsterdam'))); +SELECT concat('With ', materialize('2023-11-14 05:50:12.123' :: DateTime64(3, 'Europe/Amsterdam'))); +SELECT concat('With ', materialize('hallo' :: Enum('hallo' = 1))); +SELECT concat('With ', materialize(['foo', 'bar'] :: Array(String))); +SELECT concat('With ', materialize('{"foo": "bar"}' :: JSON)); +SELECT concat('With ', materialize((42, 'foo') :: Tuple(Int32, String))); +SELECT concat('With ', materialize(map(42, 'foo') :: Map(Int32, String))); +SELECT concat('With ', materialize('122.233.64.201' :: IPv4)); +SELECT concat('With ', materialize('2001:0001:130F:0002:0003:09C0:876A:130B' :: IPv6)); +SELECT concat('With ', materialize((42, 43) :: Point)); +SELECT concat('With ', materialize([(0,0),(10,0),(10,10),(0,10)] :: Ring)); +SELECT concat('With ', materialize([[(20, 20), (50, 20), (50, 50), (20, 50)], [(30, 30), (50, 50), (50, 30)]] :: Polygon)); +SELECT concat('With ', materialize([[[(0, 0), (10, 0), (10, 10), (0, 10)]], [[(20, 20), (50, 20), (50, 50), (20, 50)],[(30, 30), (50, 50), (50, 30)]]] :: MultiPolygon)); + +SELECT '-- SimpleAggregateFunction'; +DROP TABLE IF EXISTS concat_saf_test; +CREATE TABLE concat_saf_test(x SimpleAggregateFunction(max, Int32)) ENGINE=MergeTree ORDER BY tuple(); +INSERT INTO concat_saf_test VALUES (42); +INSERT INTO concat_saf_test SELECT max(number) FROM numbers(5); +SELECT concat('With ', x) FROM concat_saf_test ORDER BY x DESC; +DROP TABLE concat_saf_test; + +SELECT '-- Nested'; +DROP TABLE IF EXISTS concat_nested_test; +CREATE TABLE concat_nested_test(attrs Nested(k String, v String)) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO concat_nested_test VALUES (['foo', 'bar'], ['qaz', 'qux']); +SELECT concat('With ', attrs.k, attrs.v) FROM concat_nested_test; +DROP TABLE concat_nested_test; + +SELECT '-- NULL arguments'; +SELECT concat(NULL, NULL); +SELECT concat(NULL, materialize(NULL :: Nullable(UInt64))); +SELECT concat(materialize(NULL :: Nullable(UInt64)), materialize(NULL :: Nullable(UInt64))); +SELECT concat(42, materialize(NULL :: Nullable(UInt64))); +SELECT concat('42', materialize(NULL :: Nullable(UInt64))); +SELECT concat(42, materialize(NULL :: Nullable(UInt64)), materialize(NULL :: Nullable(UInt64))); +SELECT concat('42', materialize(NULL :: Nullable(UInt64)), materialize(NULL :: Nullable(UInt64))); + +SELECT '-- Various arguments tests'; +SELECT concat(materialize('Non-const'), materialize(' strings')); +SELECT concat('Two arguments ', 'test'); +SELECT concat('Three ', 'arguments', ' test'); +SELECT concat(materialize(3 :: Int64), ' arguments test', ' with int type'); +SELECT concat(materialize(42 :: Int32), materialize(144 :: UInt64)); +SELECT concat(materialize(42 :: Int32), materialize(144 :: UInt64), materialize(255 :: UInt32)); +SELECT concat(42, 144); +SELECT concat(42, 144, 255); + +SELECT '-- Single argument tests'; +SELECT concat(42); +SELECT concat(materialize(42)); +SELECT concat('foo'); +SELECT concat(materialize('foo')); +SELECT concat(NULL); +SELECT concat(materialize(NULL :: Nullable(UInt64))); + +SELECT CONCAT('Testing the ', 'alias'); + +SELECT '-- Empty argument tests'; +SELECT concat(); +select toTypeName(concat()); diff --git a/parser/testdata/00729_prewhere_array_join/ast.json b/parser/testdata/00729_prewhere_array_join/ast.json new file mode 100644 index 000000000..ba7069915 --- /dev/null +++ b/parser/testdata/00729_prewhere_array_join/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001478529, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00729_prewhere_array_join/metadata.json b/parser/testdata/00729_prewhere_array_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00729_prewhere_array_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00729_prewhere_array_join/query.sql b/parser/testdata/00729_prewhere_array_join/query.sql new file mode 100644 index 000000000..38fe4aed8 --- /dev/null +++ b/parser/testdata/00729_prewhere_array_join/query.sql @@ -0,0 +1,33 @@ +SET send_logs_level = 'fatal'; + +drop table if exists t1_00729; +set allow_deprecated_syntax_for_merge_tree=1; +create table t1_00729 (id UInt64, val Array(String),nid UInt64, eDate Date)ENGINE = MergeTree(eDate, (id, eDate), 8192); + +insert into t1_00729 (id,val,nid,eDate) values (1,['background','foreground','heading','image'],1,'2018-09-27'); +insert into t1_00729 (id,val,nid,eDate) values (1,['background','foreground','heading','image'],1,'2018-09-27'); +insert into t1_00729 (id,val,nid,eDate) values (2,['background','foreground','heading','image'],1,'2018-09-27'); +insert into t1_00729 (id,val,nid,eDate) values (2,[],2,'2018-09-27'); +insert into t1_00729 (id,val,nid,eDate) values (3,[],4,'2018-09-27'); +insert into t1_00729 (id,val,nid,eDate) values (3,[],5,'2018-09-27'); +insert into t1_00729 (id,val,nid,eDate) values (3,[],6,'2018-09-27'); +insert into t1_00729 (id,val,nid,eDate) values (3,[],7,'2018-09-27'); +insert into t1_00729 (id,val,nid,eDate) values (3,[],8,'2018-09-27'); + +select arrayJoin(val) as nameGroup6 from t1_00729 prewhere notEmpty(toString(nameGroup6)) group by nameGroup6 order by nameGroup6; -- { serverError ILLEGAL_PREWHERE } +select arrayJoin(val) as nameGroup6, countDistinct(nid) as rowids from t1_00729 where notEmpty(toString(nameGroup6)) group by nameGroup6 order by nameGroup6; +select arrayJoin(val) as nameGroup6, countDistinct(nid) as rowids from t1_00729 prewhere notEmpty(toString(nameGroup6)) group by nameGroup6 order by nameGroup6; -- { serverError ILLEGAL_PREWHERE } + +drop table t1_00729; +create table t1_00729 (id UInt64, val Array(String),nid UInt64, eDate Date) ENGINE = MergeTree(eDate, (id, eDate), 8192); + +insert into t1_00729 (id,val,nid,eDate) values (1,['background','foreground','heading','image'],1,'2018-09-27'); +insert into t1_00729 (id,val,nid,eDate) values (1,['background','foreground','heading','image'],1,'2018-09-27'); +insert into t1_00729 (id,val,nid,eDate) values (2,['background','foreground','heading','image'],1,'2018-09-27'); +insert into t1_00729 (id,val,nid,eDate) values (2,[],2,'2018-09-27'); + +select arrayJoin(val) as nameGroup6 from t1_00729 prewhere notEmpty(toString(nameGroup6)) group by nameGroup6 order by nameGroup6; -- { serverError ILLEGAL_PREWHERE } +select arrayJoin(val) as nameGroup6, countDistinct(nid) as rowids from t1_00729 where notEmpty(toString(nameGroup6)) group by nameGroup6 order by nameGroup6; +select arrayJoin(val) as nameGroup6, countDistinct(nid) as rowids from t1_00729 prewhere notEmpty(toString(nameGroup6)) group by nameGroup6 order by nameGroup6; -- { serverError ILLEGAL_PREWHERE } + +drop table t1_00729; diff --git a/parser/testdata/00730_unicode_terminal_format/ast.json b/parser/testdata/00730_unicode_terminal_format/ast.json new file mode 100644 index 000000000..0034b6a97 --- /dev/null +++ b/parser/testdata/00730_unicode_terminal_format/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001044457, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00730_unicode_terminal_format/metadata.json b/parser/testdata/00730_unicode_terminal_format/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00730_unicode_terminal_format/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00730_unicode_terminal_format/query.sql b/parser/testdata/00730_unicode_terminal_format/query.sql new file mode 100644 index 000000000..19750d13b --- /dev/null +++ b/parser/testdata/00730_unicode_terminal_format/query.sql @@ -0,0 +1,30 @@ +SET output_format_pretty_squash_consecutive_ms = 0; +SET output_format_pretty_max_column_name_width_cut_to = 0; +DROP TABLE IF EXISTS unicode; + +CREATE TABLE unicode(c1 String, c2 String) ENGINE = Memory; +INSERT INTO unicode VALUES ('Здравствуйте', 'Этот код можно отредактировать и запустить!'); +INSERT INTO unicode VALUES ('你好', '这段代码是可以编辑并且能够运行的!'); +INSERT INTO unicode VALUES ('Hola', '¡Este código es editable y ejecutable!'); +INSERT INTO unicode VALUES ('Bonjour', 'Ce code est modifiable et exécutable !'); +INSERT INTO unicode VALUES ('Ciao', 'Questo codice è modificabile ed eseguibile!'); +INSERT INTO unicode VALUES ('こんにちは', 'このコードは編集して実行出来ます!'); +INSERT INTO unicode VALUES ('안녕하세요', '여기에서 코드를 수정하고 실행할 수 있습니다!'); +INSERT INTO unicode VALUES ('Cześć', 'Ten kod można edytować oraz uruchomić!'); +INSERT INTO unicode VALUES ('Olá', 'Este código é editável e executável!'); +INSERT INTO unicode VALUES ('Chào bạn', 'Bạn có thể edit và run code trực tiếp!'); +INSERT INTO unicode VALUES ('Hallo', 'Dieser Code kann bearbeitet und ausgeführt werden!'); +INSERT INTO unicode VALUES ('Hej', 'Den här koden kan redigeras och köras!'); +INSERT INTO unicode VALUES ('Ahoj', 'Tento kód můžete upravit a spustit'); +INSERT INTO unicode VALUES ('Tabs \t Tabs', 'Non-first \t Tabs'); +INSERT INTO unicode VALUES ('Control characters \x1f\x1f\x1f\x1f with zero width', 'Invalid UTF-8 which eats pending characters \xf0, or invalid by itself \x80 with zero width'); +INSERT INTO unicode VALUES ('Russian ё and ё ', 'Zero bytes \0 \0 in middle'); +SELECT * FROM unicode SETTINGS max_threads = 1 FORMAT PrettyNoEscapes; +SELECT 'Tabs \t Tabs', 'Long\tTitle' FORMAT PrettyNoEscapes; + +SELECT '你好', '世界' FORMAT Vertical; +SELECT 'Tabs \t Tabs', 'Non-first \t Tabs' FORMAT Vertical; +SELECT 'Control characters \x1f\x1f\x1f\x1f with zero width', 'Invalid UTF-8 which eats pending characters \xf0, and invalid by itself \x80 with zero width' FORMAT Vertical; +SELECT 'Russian ё and ё', 'Zero bytes \0 \0 in middle' FORMAT Vertical; + +DROP TABLE IF EXISTS unicode; diff --git a/parser/testdata/00732_base64_functions/ast.json b/parser/testdata/00732_base64_functions/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00732_base64_functions/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00732_base64_functions/metadata.json b/parser/testdata/00732_base64_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00732_base64_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00732_base64_functions/query.sql b/parser/testdata/00732_base64_functions/query.sql new file mode 100644 index 000000000..b4be8db4e --- /dev/null +++ b/parser/testdata/00732_base64_functions/query.sql @@ -0,0 +1,35 @@ +-- Tags: no-fasttest +-- no-fasttest because aklomp-base64 library is required + +SELECT base64Encode(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT base64Decode(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT tryBase64Decode(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT base64Encode('foo', 'excess argument'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT base64Decode('foo', 'excess argument'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT tryBase64Decode('foo', 'excess argument'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +-- test with valid inputs + +SELECT base64Encode(val) FROM (select arrayJoin(['', 'f', 'fo', 'foo', 'foob', 'fooba', 'foobar']) val); +SELECT base64Decode(val) FROM (select arrayJoin(['', 'Zg==', 'Zm8=', 'Zm9v', 'Zm9vYg==', 'Zm9vYmE=', 'Zm9vYmFy']) val); +SELECT tryBase64Decode(val) FROM (select arrayJoin(['', 'Zg==', 'Zm8=', 'Zm9v', 'Zm9vYg==', 'Zm9vYmE=', 'Zm9vYmFy']) val); + +SELECT base64Decode(base64Encode('foo')) = 'foo', base64Encode(base64Decode('Zm9v')) == 'Zm9v'; +SELECT tryBase64Decode(base64Encode('foo')) = 'foo', base64Encode(tryBase64Decode('Zm9v')) == 'Zm9v'; + +-- test with invalid inputs + +SELECT base64Decode('Zm9vYmF=Zm9v'); -- { serverError INCORRECT_DATA } +SELECT tryBase64Decode('Zm9vYmF=Zm9v'); + +SELECT base64Decode('foo'); -- { serverError INCORRECT_DATA } +SELECT tryBase64Decode('foo'); + +SELECT base64Decode('aoeo054640eu='); -- { serverError INCORRECT_DATA } +SELECT tryBase64Decode('aoeo054640eu='); + +-- test FixedString arguments + +select base64Encode(toFixedString('foo', 3)); +select base64Decode(toFixedString('Zm9v', 4)); +select tryBase64Decode(toFixedString('Zm9v', 4)); diff --git a/parser/testdata/00732_decimal_summing_merge_tree/ast.json b/parser/testdata/00732_decimal_summing_merge_tree/ast.json new file mode 100644 index 000000000..ab1827a9a --- /dev/null +++ b/parser/testdata/00732_decimal_summing_merge_tree/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery decimal_sum (children 1)" + }, + { + "explain": " Identifier decimal_sum" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001148066, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/00732_decimal_summing_merge_tree/metadata.json b/parser/testdata/00732_decimal_summing_merge_tree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00732_decimal_summing_merge_tree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00732_decimal_summing_merge_tree/query.sql b/parser/testdata/00732_decimal_summing_merge_tree/query.sql new file mode 100644 index 000000000..96e16a130 --- /dev/null +++ b/parser/testdata/00732_decimal_summing_merge_tree/query.sql @@ -0,0 +1,27 @@ +DROP TABLE IF EXISTS decimal_sum; +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE decimal_sum +( + date Date, + sum32 Decimal32(4), + sum64 Decimal64(8), + sum128 Decimal128(10) +) Engine = SummingMergeTree(date, (date), 8192); + +INSERT INTO decimal_sum VALUES ('2001-01-01', 1, 1, -1); +INSERT INTO decimal_sum VALUES ('2001-01-01', 1, -1, -1); + +OPTIMIZE TABLE decimal_sum; +SELECT * FROM decimal_sum; + +INSERT INTO decimal_sum VALUES ('2001-01-01', -2, 1, 2); + +OPTIMIZE TABLE decimal_sum; +SELECT * FROM decimal_sum; + +INSERT INTO decimal_sum VALUES ('2001-01-01', 0, -1, 0); + +OPTIMIZE TABLE decimal_sum; +SELECT * FROM decimal_sum; + +drop table decimal_sum; diff --git a/parser/testdata/00732_quorum_insert_have_data_before_quorum_zookeeper_long/ast.json b/parser/testdata/00732_quorum_insert_have_data_before_quorum_zookeeper_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00732_quorum_insert_have_data_before_quorum_zookeeper_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00732_quorum_insert_have_data_before_quorum_zookeeper_long/metadata.json b/parser/testdata/00732_quorum_insert_have_data_before_quorum_zookeeper_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00732_quorum_insert_have_data_before_quorum_zookeeper_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00732_quorum_insert_have_data_before_quorum_zookeeper_long/query.sql b/parser/testdata/00732_quorum_insert_have_data_before_quorum_zookeeper_long/query.sql new file mode 100644 index 000000000..35b00a884 --- /dev/null +++ b/parser/testdata/00732_quorum_insert_have_data_before_quorum_zookeeper_long/query.sql @@ -0,0 +1,34 @@ +-- Tags: long, zookeeper, no-replicated-database, no-shared-merge-tree +-- Tag no-replicated-database: Fails due to additional replicas or shards +-- Tag no-shared-merge-tree: no-shared-merge-tree: No quorum + +SET send_logs_level = 'fatal'; + +DROP TABLE IF EXISTS quorum1; +DROP TABLE IF EXISTS quorum2; + +CREATE TABLE quorum1(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00732/quorum_have_data', '1') ORDER BY x PARTITION BY y; +CREATE TABLE quorum2(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00732/quorum_have_data', '2') ORDER BY x PARTITION BY y; + +INSERT INTO quorum1 VALUES (1, '1990-11-15'); +INSERT INTO quorum1 VALUES (2, '1990-11-15'); +INSERT INTO quorum1 VALUES (3, '2020-12-16'); + +SYSTEM SYNC REPLICA quorum2; + +SET select_sequential_consistency=1; + +SELECT x FROM quorum1 ORDER BY x; +SELECT x FROM quorum2 ORDER BY x; + +SET insert_quorum=2, insert_quorum_parallel=0; + +INSERT INTO quorum1 VALUES (4, '1990-11-15'); +INSERT INTO quorum1 VALUES (5, '1990-11-15'); +INSERT INTO quorum1 VALUES (6, '2020-12-16'); + +SELECT x FROM quorum1 ORDER BY x; +SELECT x FROM quorum2 ORDER BY x; + +DROP TABLE quorum1; +DROP TABLE quorum2; diff --git a/parser/testdata/00732_quorum_insert_lost_part_and_alive_part_zookeeper_long/ast.json b/parser/testdata/00732_quorum_insert_lost_part_and_alive_part_zookeeper_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00732_quorum_insert_lost_part_and_alive_part_zookeeper_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00732_quorum_insert_lost_part_and_alive_part_zookeeper_long/metadata.json b/parser/testdata/00732_quorum_insert_lost_part_and_alive_part_zookeeper_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00732_quorum_insert_lost_part_and_alive_part_zookeeper_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00732_quorum_insert_lost_part_and_alive_part_zookeeper_long/query.sql b/parser/testdata/00732_quorum_insert_lost_part_and_alive_part_zookeeper_long/query.sql new file mode 100644 index 000000000..865b56357 --- /dev/null +++ b/parser/testdata/00732_quorum_insert_lost_part_and_alive_part_zookeeper_long/query.sql @@ -0,0 +1,42 @@ +-- Tags: long, zookeeper, no-replicated-database, no-shared-merge-tree +-- Tag no-replicated-database: Fails due to additional replicas or shards +-- Tag no-shared-merge-tree: no-shared-merge-tree: No quorum + +SET send_logs_level = 'fatal'; + +DROP TABLE IF EXISTS quorum1; +DROP TABLE IF EXISTS quorum2; + +CREATE TABLE quorum1(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00732/quorum_lost_alive', '1') ORDER BY x PARTITION BY y; +CREATE TABLE quorum2(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00732/quorum_lost_alive', '2') ORDER BY x PARTITION BY y; + +SET insert_quorum=2, insert_quorum_parallel=0; +SET select_sequential_consistency=1; + +INSERT INTO quorum1 VALUES (1, '2018-11-15'); +INSERT INTO quorum1 VALUES (2, '2018-11-15'); +INSERT INTO quorum1 VALUES (3, '2018-12-16'); + +SET insert_quorum_timeout=0; + +SYSTEM STOP FETCHES quorum1; + +INSERT INTO quorum2 VALUES (4, toDate('2018-12-16')); -- { serverError UNKNOWN_STATUS_OF_INSERT } + +SELECT x FROM quorum1 ORDER BY x; +SELECT x FROM quorum2 ORDER BY x; + +SET select_sequential_consistency=0; + +SELECT x FROM quorum2 ORDER BY x; + +SET select_sequential_consistency=1; + +SYSTEM START FETCHES quorum1; +SYSTEM SYNC REPLICA quorum1; + +SELECT x FROM quorum1 ORDER BY x; +SELECT x FROM quorum2 ORDER BY x; + +DROP TABLE quorum1; +DROP TABLE quorum2; diff --git a/parser/testdata/00732_quorum_insert_lost_part_zookeeper_long/ast.json b/parser/testdata/00732_quorum_insert_lost_part_zookeeper_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00732_quorum_insert_lost_part_zookeeper_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00732_quorum_insert_lost_part_zookeeper_long/metadata.json b/parser/testdata/00732_quorum_insert_lost_part_zookeeper_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00732_quorum_insert_lost_part_zookeeper_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00732_quorum_insert_lost_part_zookeeper_long/query.sql b/parser/testdata/00732_quorum_insert_lost_part_zookeeper_long/query.sql new file mode 100644 index 000000000..11f258f58 --- /dev/null +++ b/parser/testdata/00732_quorum_insert_lost_part_zookeeper_long/query.sql @@ -0,0 +1,39 @@ +-- Tags: long, zookeeper, no-replicated-database, no-shared-merge-tree +-- Tag no-replicated-database: Fails due to additional replicas or shards +-- Tag no-shared-merge-tree: no-shared-merge-tree: No quorum + +SET send_logs_level = 'fatal'; + +DROP TABLE IF EXISTS quorum1; +DROP TABLE IF EXISTS quorum2; + +CREATE TABLE quorum1(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00732/quorum_lost', '1') ORDER BY x PARTITION BY y; +CREATE TABLE quorum2(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00732/quorum_lost', '2') ORDER BY x PARTITION BY y; + +SET insert_quorum=2, insert_quorum_parallel=0; +SET select_sequential_consistency=1; + +SET insert_quorum_timeout=0; + +SYSTEM STOP FETCHES quorum1; + +INSERT INTO quorum2 VALUES (1, '2018-11-15'); -- { serverError UNKNOWN_STATUS_OF_INSERT } + +SELECT count(*) FROM quorum1; +SELECT count(*) FROM quorum2; + +SET select_sequential_consistency=0; + +SELECT x FROM quorum2 ORDER BY x; +SET select_sequential_consistency=1; + +SET insert_quorum_timeout=100; + +SYSTEM START FETCHES quorum1; +SYSTEM SYNC REPLICA quorum1; + +SELECT x FROM quorum1 ORDER BY x; +SELECT x FROM quorum2 ORDER BY x; + +DROP TABLE quorum1; +DROP TABLE quorum2; diff --git a/parser/testdata/00732_quorum_insert_select_with_old_data_and_without_quorum_zookeeper_long/ast.json b/parser/testdata/00732_quorum_insert_select_with_old_data_and_without_quorum_zookeeper_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00732_quorum_insert_select_with_old_data_and_without_quorum_zookeeper_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00732_quorum_insert_select_with_old_data_and_without_quorum_zookeeper_long/metadata.json b/parser/testdata/00732_quorum_insert_select_with_old_data_and_without_quorum_zookeeper_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00732_quorum_insert_select_with_old_data_and_without_quorum_zookeeper_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00732_quorum_insert_select_with_old_data_and_without_quorum_zookeeper_long/query.sql b/parser/testdata/00732_quorum_insert_select_with_old_data_and_without_quorum_zookeeper_long/query.sql new file mode 100644 index 000000000..e56565949 --- /dev/null +++ b/parser/testdata/00732_quorum_insert_select_with_old_data_and_without_quorum_zookeeper_long/query.sql @@ -0,0 +1,32 @@ +-- Tags: long, zookeeper, no-replicated-database, no-shared-merge-tree +-- Tag no-replicated-database: Fails due to additional replicas or shards +-- Tag no-shared-merge-tree: no-shared-merge-tree: No quorum + +SET send_logs_level = 'fatal'; + +DROP TABLE IF EXISTS quorum1; +DROP TABLE IF EXISTS quorum2; + +CREATE TABLE quorum1(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00732/quorum_old_data', '1') ORDER BY x PARTITION BY y; +CREATE TABLE quorum2(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00732/quorum_old_data', '2') ORDER BY x PARTITION BY y; + +INSERT INTO quorum1 VALUES (1, '1990-11-15'); +INSERT INTO quorum1 VALUES (2, '1990-11-15'); +INSERT INTO quorum1 VALUES (3, '2020-12-16'); + +SYSTEM SYNC REPLICA quorum2; + +SET select_sequential_consistency=1; +SET insert_quorum=2, insert_quorum_parallel=0; + +SET insert_quorum_timeout=0; + +SYSTEM STOP FETCHES quorum1; + +INSERT INTO quorum2 VALUES (4, toDate('2020-12-16')); -- { serverError UNKNOWN_STATUS_OF_INSERT } + +SELECT x FROM quorum1 ORDER BY x; +SELECT x FROM quorum2 ORDER BY x; + +DROP TABLE quorum1; +DROP TABLE quorum2; diff --git a/parser/testdata/00732_quorum_insert_simple_test_1_parts_zookeeper_long/ast.json b/parser/testdata/00732_quorum_insert_simple_test_1_parts_zookeeper_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00732_quorum_insert_simple_test_1_parts_zookeeper_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00732_quorum_insert_simple_test_1_parts_zookeeper_long/metadata.json b/parser/testdata/00732_quorum_insert_simple_test_1_parts_zookeeper_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00732_quorum_insert_simple_test_1_parts_zookeeper_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00732_quorum_insert_simple_test_1_parts_zookeeper_long/query.sql b/parser/testdata/00732_quorum_insert_simple_test_1_parts_zookeeper_long/query.sql new file mode 100644 index 000000000..86c55359d --- /dev/null +++ b/parser/testdata/00732_quorum_insert_simple_test_1_parts_zookeeper_long/query.sql @@ -0,0 +1,38 @@ +-- Tags: long, zookeeper, no-replicated-database, no-shared-merge-tree +-- Tag no-replicated-database: Fails due to additional replicas or shards +-- Tag no-shared-merge-tree: no-shared-merge-tree: No quorum + +SET send_logs_level = 'fatal'; + +DROP TABLE IF EXISTS quorum1; +DROP TABLE IF EXISTS quorum2; + +CREATE TABLE quorum1(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00732/quorum1', '1') ORDER BY x PARTITION BY y; +CREATE TABLE quorum2(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00732/quorum1', '2') ORDER BY x PARTITION BY y; + +SET insert_quorum=2, insert_quorum_parallel=0; +SET select_sequential_consistency=1; + +INSERT INTO quorum1 VALUES (1, '2018-11-15'); +INSERT INTO quorum1 VALUES (2, '2018-11-15'); + +SELECT x FROM quorum1 ORDER BY x; +SELECT x FROM quorum2 ORDER BY x; + +OPTIMIZE TABLE quorum1 PARTITION '2018-11-15' FINAL; + +-- everything works fine after merge +SELECT x FROM quorum1 ORDER BY x; +SELECT x FROM quorum2 ORDER BY x; + +SELECT count(*) FROM system.parts WHERE active AND database = currentDatabase() AND table='quorum1'; + +INSERT INTO quorum1 VALUES (3, '2018-11-15'); +INSERT INTO quorum1 VALUES (4, '2018-11-15'); + +-- and after we add new parts +SELECT sum(x) FROM quorum1; +SELECT sum(x) FROM quorum2; + +DROP TABLE quorum1; +DROP TABLE quorum2; diff --git a/parser/testdata/00732_quorum_insert_simple_test_2_parts_zookeeper_long/ast.json b/parser/testdata/00732_quorum_insert_simple_test_2_parts_zookeeper_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00732_quorum_insert_simple_test_2_parts_zookeeper_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00732_quorum_insert_simple_test_2_parts_zookeeper_long/metadata.json b/parser/testdata/00732_quorum_insert_simple_test_2_parts_zookeeper_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00732_quorum_insert_simple_test_2_parts_zookeeper_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00732_quorum_insert_simple_test_2_parts_zookeeper_long/query.sql b/parser/testdata/00732_quorum_insert_simple_test_2_parts_zookeeper_long/query.sql new file mode 100644 index 000000000..2d4c42941 --- /dev/null +++ b/parser/testdata/00732_quorum_insert_simple_test_2_parts_zookeeper_long/query.sql @@ -0,0 +1,24 @@ +-- Tags: long, zookeeper, no-replicated-database, no-shared-merge-tree +-- Tag no-replicated-database: Fails due to additional replicas or shards +-- Tag no-shared-merge-tree: no-shared-merge-tree: No quorum + +SET send_logs_level = 'fatal'; + +DROP TABLE IF EXISTS quorum1; +DROP TABLE IF EXISTS quorum2; + +CREATE TABLE quorum1(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00732/quorum2', '1') ORDER BY x PARTITION BY y; +CREATE TABLE quorum2(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00732/quorum2', '2') ORDER BY x PARTITION BY y; + +SET insert_quorum=2, insert_quorum_parallel=0; +SET select_sequential_consistency=1; + +INSERT INTO quorum1 VALUES (1, '2018-11-15'); +INSERT INTO quorum1 VALUES (2, '2018-11-15'); +INSERT INTO quorum1 VALUES (3, '2018-12-16'); + +SELECT x FROM quorum1 ORDER BY x; +SELECT x FROM quorum2 ORDER BY x; + +DROP TABLE quorum1; +DROP TABLE quorum2; diff --git a/parser/testdata/00733_if_datetime/ast.json b/parser/testdata/00733_if_datetime/ast.json new file mode 100644 index 000000000..3664bb5c6 --- /dev/null +++ b/parser/testdata/00733_if_datetime/ast.json @@ -0,0 +1,88 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function if (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function toDateTime (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '2000-01-01 00:00:00'" + }, + { + "explain": " Function toDateTime (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '2001-02-03 04:05:06'" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2" + } + ], + + "rows": 22, + + "statistics": + { + "elapsed": 0.001109087, + "rows_read": 22, + "bytes_read": 892 + } +} diff --git a/parser/testdata/00733_if_datetime/metadata.json b/parser/testdata/00733_if_datetime/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00733_if_datetime/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00733_if_datetime/query.sql b/parser/testdata/00733_if_datetime/query.sql new file mode 100644 index 000000000..23ebc3d8a --- /dev/null +++ b/parser/testdata/00733_if_datetime/query.sql @@ -0,0 +1,9 @@ +SELECT number % 2 ? toDateTime('2000-01-01 00:00:00') : toDateTime('2001-02-03 04:05:06') FROM numbers(2); +SELECT number % 2 ? toDateTime('2000-01-01 00:00:00') : materialize(toDateTime('2001-02-03 04:05:06')) FROM numbers(2); +SELECT number % 2 ? materialize(toDateTime('2000-01-01 00:00:00')) : toDateTime('2001-02-03 04:05:06') FROM numbers(2); +SELECT number % 2 ? materialize(toDateTime('2000-01-01 00:00:00')) : materialize(toDateTime('2001-02-03 04:05:06')) FROM numbers(2); + +SELECT number % 2 ? toDate('2000-01-01') : toDate('2001-02-03') FROM numbers(2); +SELECT number % 2 ? toDate('2000-01-01') : materialize(toDate('2001-02-03')) FROM numbers(2); +SELECT number % 2 ? materialize(toDate('2000-01-01')) : toDate('2001-02-03') FROM numbers(2); +SELECT number % 2 ? materialize(toDate('2000-01-01')) : materialize(toDate('2001-02-03')) FROM numbers(2); diff --git a/parser/testdata/00734_timeslot/ast.json b/parser/testdata/00734_timeslot/ast.json new file mode 100644 index 000000000..7ff5f9bf9 --- /dev/null +++ b/parser/testdata/00734_timeslot/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function timeSlot (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDateTime (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '2000-01-02 03:04:05'" + }, + { + "explain": " Literal 'UTC'" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001282468, + "rows_read": 10, + "bytes_read": 393 + } +} diff --git a/parser/testdata/00734_timeslot/metadata.json b/parser/testdata/00734_timeslot/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00734_timeslot/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00734_timeslot/query.sql b/parser/testdata/00734_timeslot/query.sql new file mode 100644 index 000000000..25be570bd --- /dev/null +++ b/parser/testdata/00734_timeslot/query.sql @@ -0,0 +1,7 @@ +SELECT timeSlot(toDateTime('2000-01-02 03:04:05', 'UTC')); +SELECT timeSlots(toDateTime('2000-01-02 03:04:05', 'UTC'), toUInt32(10000)); +SELECT timeSlots(toDateTime('2000-01-02 03:04:05', 'UTC'), toUInt32(10000), 600); +SELECT timeSlots(toDateTime('2000-01-02 03:04:05', 'UTC'), toUInt32(600), 30); +SELECT timeSlots(toDateTime('2000-01-02 03:04:05', 'UTC'), 'wrong argument'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT timeSlots(toDateTime('2000-01-02 03:04:05', 'UTC'), toUInt32(600), 'wrong argument'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT timeSlots(toDateTime('2000-01-02 03:04:05', 'UTC'), toUInt32(600), 0); -- { serverError ILLEGAL_COLUMN } \ No newline at end of file diff --git a/parser/testdata/00735_long_conditional/ast.json b/parser/testdata/00735_long_conditional/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00735_long_conditional/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00735_long_conditional/metadata.json b/parser/testdata/00735_long_conditional/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00735_long_conditional/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00735_long_conditional/query.sql b/parser/testdata/00735_long_conditional/query.sql new file mode 100644 index 000000000..25f7fbaf8 --- /dev/null +++ b/parser/testdata/00735_long_conditional/query.sql @@ -0,0 +1,327 @@ +-- Tags: long + +SET send_logs_level = 'fatal'; + +SELECT 'value vs value'; + +SELECT toInt8(0) AS x, toInt8(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toInt8(0) AS x, toInt16(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toInt8(0) AS x, toInt32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toInt8(0) AS x, toInt64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toInt8(0) AS x, toUInt8(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toInt8(0) AS x, toUInt16(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toInt8(0) AS x, toUInt32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toInt8(0) AS x, toUInt64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT toInt8(0) AS x, toFloat32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toInt8(0) AS x, toFloat64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toInt8(0) AS x, toDate(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toInt8(0) AS x, toDateTime(1, 'Asia/Istanbul') AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT toInt8(0) AS x, toDecimal32(1, 0) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toInt8(0) AS x, toDecimal64(1, 0) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toInt8(0) AS x, toDecimal128(1, 0) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); + +SELECT toInt16(0) AS x, toInt8(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toInt16(0) AS x, toInt16(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toInt16(0) AS x, toInt32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toInt16(0) AS x, toInt64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toInt16(0) AS x, toUInt8(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toInt16(0) AS x, toUInt16(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toInt16(0) AS x, toUInt32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toInt16(0) AS x, toUInt64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT toInt16(0) AS x, toFloat32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toInt16(0) AS x, toFloat64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toInt16(0) AS x, toDate(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toInt16(0) AS x, toDateTime(1, 'Asia/Istanbul') AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT toInt16(0) AS x, toDecimal32(1, 0) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toInt16(0) AS x, toDecimal64(1, 0) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toInt16(0) AS x, toDecimal128(1, 0) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); + +SELECT toInt32(0) AS x, toInt8(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toInt32(0) AS x, toInt16(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toInt32(0) AS x, toInt32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toInt32(0) AS x, toInt64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toInt32(0) AS x, toUInt8(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toInt32(0) AS x, toUInt16(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toInt32(0) AS x, toUInt32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toInt32(0) AS x, toUInt64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT toInt32(0) AS x, toFloat32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toInt32(0) AS x, toFloat64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toInt32(0) AS x, toDate(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toInt32(0) AS x, toDateTime(1, 'Asia/Istanbul') AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT toInt32(0) AS x, toDecimal32(1, 0) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toInt32(0) AS x, toDecimal64(1, 0) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toInt32(0) AS x, toDecimal128(1, 0) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); + +SELECT toInt64(0) AS x, toInt8(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toInt64(0) AS x, toInt16(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toInt64(0) AS x, toInt32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toInt64(0) AS x, toInt64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toInt64(0) AS x, toUInt8(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toInt64(0) AS x, toUInt16(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toInt64(0) AS x, toUInt32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toInt64(0) AS x, toUInt64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT toInt64(0) AS x, toFloat32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT toInt64(0) AS x, toFloat64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT toInt64(0) AS x, toDate(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toInt64(0) AS x, toDateTime(1, 'Asia/Istanbul') AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT toInt64(0) AS x, toDecimal32(1, 0) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toInt64(0) AS x, toDecimal64(1, 0) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toInt64(0) AS x, toDecimal128(1, 0) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); + +SELECT toUInt8(0) AS x, toInt8(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toUInt8(0) AS x, toInt16(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toUInt8(0) AS x, toInt32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toUInt8(0) AS x, toInt64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toUInt8(0) AS x, toUInt8(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toUInt8(0) AS x, toUInt16(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toUInt8(0) AS x, toUInt32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toUInt8(0) AS x, toUInt64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toUInt8(0) AS x, toFloat32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toUInt8(0) AS x, toFloat64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toUInt8(0) AS x, toDate(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toUInt8(0) AS x, toDateTime(1, 'Asia/Istanbul') AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT toUInt8(0) AS x, toDecimal32(1, 0) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toUInt8(0) AS x, toDecimal64(1, 0) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toUInt8(0) AS x, toDecimal128(1, 0) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); + +SELECT toUInt16(0) AS x, toInt8(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toUInt16(0) AS x, toInt16(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toUInt16(0) AS x, toInt32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toUInt16(0) AS x, toInt64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toUInt16(0) AS x, toUInt8(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toUInt16(0) AS x, toUInt16(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toUInt16(0) AS x, toUInt32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toUInt16(0) AS x, toUInt64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toUInt16(0) AS x, toFloat32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toUInt16(0) AS x, toFloat64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toUInt16(0) AS x, toDate(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toUInt16(0) AS x, toDateTime(1, 'Asia/Istanbul') AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT toUInt16(0) AS x, toDecimal32(1, 0) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toUInt16(0) AS x, toDecimal64(1, 0) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toUInt16(0) AS x, toDecimal128(1, 0) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); + +SELECT toUInt32(0) AS x, toInt8(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toUInt32(0) AS x, toInt16(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toUInt32(0) AS x, toInt32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toUInt32(0) AS x, toInt64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toUInt32(0) AS x, toUInt8(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toUInt32(0) AS x, toUInt16(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toUInt32(0) AS x, toUInt32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toUInt32(0) AS x, toUInt64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toUInt32(0) AS x, toFloat32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toUInt32(0) AS x, toFloat64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toUInt32(0) AS x, toDate(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toUInt32(0) AS x, toDateTime(1, 'Asia/Istanbul') AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT toUInt32(0) AS x, toDecimal32(1, 0) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toUInt32(0) AS x, toDecimal64(1, 0) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toUInt32(0) AS x, toDecimal128(1, 0) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); + +SELECT toUInt64(0) AS x, toInt8(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT toUInt64(0) AS x, toInt16(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT toUInt64(0) AS x, toInt32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT toUInt64(0) AS x, toInt64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT toUInt64(0) AS x, toUInt8(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toUInt64(0) AS x, toUInt16(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toUInt64(0) AS x, toUInt32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toUInt64(0) AS x, toUInt64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toUInt64(0) AS x, toFloat32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT toUInt64(0) AS x, toFloat64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT toUInt64(0) AS x, toDate(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toUInt64(0) AS x, toDateTime(1, 'Asia/Istanbul') AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT toUInt64(0) AS x, toDecimal32(1, 0) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toUInt64(0) AS x, toDecimal64(1, 0) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toUInt64(0) AS x, toDecimal128(1, 0) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); + +SELECT toDate(0) AS x, toInt8(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toDate(0) AS x, toInt16(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toDate(0) AS x, toInt32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toDate(0) AS x, toInt64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toDate(0) AS x, toUInt8(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toDate(0) AS x, toUInt16(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toDate(0) AS x, toUInt32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toDate(0) AS x, toUInt64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toDate(0) AS x, toFloat32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toDate(0) AS x, toFloat64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toDate(0) AS x, toDate(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toDate('2000-01-01') AS x, toDateTime('2000-01-01 00:00:01', 'Asia/Istanbul') AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toDate(0) AS x, toDecimal32(1, 0) AS y, ((x = 0) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toDate(0) AS x, toDecimal64(1, 0) AS y, ((x = 0) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toDate(0) AS x, toDecimal128(1, 0) AS y, ((x = 0) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT toDateTime(0, 'Asia/Istanbul') AS x, toInt8(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT toDateTime(0, 'Asia/Istanbul') AS x, toInt16(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT toDateTime(0, 'Asia/Istanbul') AS x, toInt32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT toDateTime(0, 'Asia/Istanbul') AS x, toInt64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT toDateTime(0, 'Asia/Istanbul') AS x, toUInt8(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT toDateTime(0, 'Asia/Istanbul') AS x, toUInt16(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT toDateTime(0, 'Asia/Istanbul') AS x, toUInt32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT toDateTime(0, 'Asia/Istanbul') AS x, toUInt64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT toDateTime(0, 'Asia/Istanbul') AS x, toFloat32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT toDateTime(0, 'Asia/Istanbul') AS x, toFloat64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT toDateTime('2000-01-01 00:00:00', 'Asia/Istanbul') AS x, toDate('2000-01-02') AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toDateTime(0, 'Asia/Istanbul') AS x, toDateTime(1, 'Asia/Istanbul') AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT toDateTime(0, 'Asia/Istanbul') AS x, toDecimal32(1, 0) AS y, ((x = 0) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT toDateTime(0, 'Asia/Istanbul') AS x, toDecimal64(1, 0) AS y, ((x = 0) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT toDateTime(0, 'Asia/Istanbul') AS x, toDecimal128(1, 0) AS y, ((x = 0) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } + +SELECT 'column vs value'; + +SELECT materialize(toInt8(0)) AS x, toInt8(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toInt8(0)) AS x, toInt16(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toInt8(0)) AS x, toInt32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toInt8(0)) AS x, toInt64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toInt8(0)) AS x, toUInt8(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toInt8(0)) AS x, toUInt16(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toInt8(0)) AS x, toUInt32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toInt8(0)) AS x, toUInt64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT materialize(toInt8(0)) AS x, toFloat32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toInt8(0)) AS x, toFloat64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toInt8(0)) AS x, toDate(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT materialize(toInt8(0)) AS x, toDateTime(1, 'Asia/Istanbul') AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT materialize(toInt8(0)) AS x, toDecimal32(1, 0) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toInt8(0)) AS x, toDecimal64(1, 0) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toInt8(0)) AS x, toDecimal128(1, 0) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); + +SELECT materialize(toInt16(0)) AS x, toInt8(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toInt16(0)) AS x, toInt16(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toInt16(0)) AS x, toInt32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toInt16(0)) AS x, toInt64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toInt16(0)) AS x, toUInt8(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toInt16(0)) AS x, toUInt16(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toInt16(0)) AS x, toUInt32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toInt16(0)) AS x, toUInt64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT materialize(toInt16(0)) AS x, toFloat32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toInt16(0)) AS x, toFloat64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toInt16(0)) AS x, toDate(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT materialize(toInt16(0)) AS x, toDateTime(1, 'Asia/Istanbul') AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT materialize(toInt16(0)) AS x, toDecimal32(1, 0) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toInt16(0)) AS x, toDecimal64(1, 0) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toInt16(0)) AS x, toDecimal128(1, 0) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); + +SELECT materialize(toInt32(0)) AS x, toInt8(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toInt32(0)) AS x, toInt16(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toInt32(0)) AS x, toInt32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toInt32(0)) AS x, toInt64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toInt32(0)) AS x, toUInt8(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toInt32(0)) AS x, toUInt16(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toInt32(0)) AS x, toUInt32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toInt32(0)) AS x, toUInt64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT materialize(toInt32(0)) AS x, toFloat32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toInt32(0)) AS x, toFloat64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toInt32(0)) AS x, toDate(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT materialize(toInt32(0)) AS x, toDateTime(1, 'Asia/Istanbul') AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT materialize(toInt32(0)) AS x, toDecimal32(1, 0) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toInt32(0)) AS x, toDecimal64(1, 0) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toInt32(0)) AS x, toDecimal128(1, 0) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); + +SELECT materialize(toInt64(0)) AS x, toInt8(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toInt64(0)) AS x, toInt16(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toInt64(0)) AS x, toInt32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toInt64(0)) AS x, toInt64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toInt64(0)) AS x, toUInt8(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toInt64(0)) AS x, toUInt16(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toInt64(0)) AS x, toUInt32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toInt64(0)) AS x, toUInt64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT materialize(toInt64(0)) AS x, toFloat32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT materialize(toInt64(0)) AS x, toFloat64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT materialize(toInt64(0)) AS x, toDate(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT materialize(toInt64(0)) AS x, toDateTime(1, 'Asia/Istanbul') AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT materialize(toInt64(0)) AS x, toDecimal32(1, 0) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toInt64(0)) AS x, toDecimal64(1, 0) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toInt64(0)) AS x, toDecimal128(1, 0) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); + +SELECT materialize(toUInt8(0)) AS x, toInt8(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toUInt8(0)) AS x, toInt16(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toUInt8(0)) AS x, toInt32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toUInt8(0)) AS x, toInt64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toUInt8(0)) AS x, toUInt8(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toUInt8(0)) AS x, toUInt16(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toUInt8(0)) AS x, toUInt32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toUInt8(0)) AS x, toUInt64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toUInt8(0)) AS x, toFloat32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toUInt8(0)) AS x, toFloat64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toUInt8(0)) AS x, toDate(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT materialize(toUInt8(0)) AS x, toDateTime(1, 'Asia/Istanbul') AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT materialize(toUInt8(0)) AS x, toDecimal32(1, 0) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toUInt8(0)) AS x, toDecimal64(1, 0) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toUInt8(0)) AS x, toDecimal128(1, 0) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); + +SELECT materialize(toUInt16(0)) AS x, toInt8(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toUInt16(0)) AS x, toInt16(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toUInt16(0)) AS x, toInt32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toUInt16(0)) AS x, toInt64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toUInt16(0)) AS x, toUInt8(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toUInt16(0)) AS x, toUInt16(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toUInt16(0)) AS x, toUInt32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toUInt16(0)) AS x, toUInt64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toUInt16(0)) AS x, toFloat32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toUInt16(0)) AS x, toFloat64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toUInt16(0)) AS x, toDate(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT materialize(toUInt16(0)) AS x, toDateTime(1, 'Asia/Istanbul') AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT materialize(toUInt16(0)) AS x, toDecimal32(1, 0) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toUInt16(0)) AS x, toDecimal64(1, 0) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toUInt16(0)) AS x, toDecimal128(1, 0) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); + +SELECT materialize(toUInt32(0)) AS x, toInt8(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toUInt32(0)) AS x, toInt16(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toUInt32(0)) AS x, toInt32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toUInt32(0)) AS x, toInt64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toUInt32(0)) AS x, toUInt8(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toUInt32(0)) AS x, toUInt16(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toUInt32(0)) AS x, toUInt32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toUInt32(0)) AS x, toUInt64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toUInt32(0)) AS x, toFloat32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toUInt32(0)) AS x, toFloat64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toUInt32(0)) AS x, toDate(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT materialize(toUInt32(0)) AS x, toDateTime(1, 'Asia/Istanbul') AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT materialize(toUInt32(0)) AS x, toDecimal32(1, 0) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toUInt32(0)) AS x, toDecimal64(1, 0) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toUInt32(0)) AS x, toDecimal128(1, 0) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); + +SELECT materialize(toUInt64(0)) AS x, toInt8(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT materialize(toUInt64(0)) AS x, toInt16(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT materialize(toUInt64(0)) AS x, toInt32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT materialize(toUInt64(0)) AS x, toInt64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT materialize(toUInt64(0)) AS x, toUInt8(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toUInt64(0)) AS x, toUInt16(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toUInt64(0)) AS x, toUInt32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toUInt64(0)) AS x, toUInt64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toUInt64(0)) AS x, toFloat32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT materialize(toUInt64(0)) AS x, toFloat64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT materialize(toUInt64(0)) AS x, toDate(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT materialize(toUInt64(0)) AS x, toDateTime(1, 'Asia/Istanbul') AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT materialize(toUInt64(0)) AS x, toDecimal32(1, 0) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toUInt64(0)) AS x, toDecimal64(1, 0) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toUInt64(0)) AS x, toDecimal128(1, 0) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); + +SELECT materialize(toDate(0)) AS x, toInt8(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT materialize(toDate(0)) AS x, toInt16(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT materialize(toDate(0)) AS x, toInt32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT materialize(toDate(0)) AS x, toInt64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT materialize(toDate(0)) AS x, toUInt8(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT materialize(toDate(0)) AS x, toUInt16(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT materialize(toDate(0)) AS x, toUInt32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT materialize(toDate(0)) AS x, toUInt64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT materialize(toDate(0)) AS x, toFloat32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT materialize(toDate(0)) AS x, toFloat64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT materialize(toDate(0)) AS x, toDate(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toDate('2000-01-01')) AS x, toDateTime('2000-01-01 00:00:01', 'Asia/Istanbul') AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toDate(0)) AS x, toDecimal32(1, 0) AS y, ((x = 0) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT materialize(toDate(0)) AS x, toDecimal64(1, 0) AS y, ((x = 0) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT materialize(toDate(0)) AS x, toDecimal128(1, 0) AS y, ((x = 0) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT materialize(toDateTime(0, 'Asia/Istanbul')) AS x, toInt8(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT materialize(toDateTime(0, 'Asia/Istanbul')) AS x, toInt16(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT materialize(toDateTime(0, 'Asia/Istanbul')) AS x, toInt32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT materialize(toDateTime(0, 'Asia/Istanbul')) AS x, toInt64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT materialize(toDateTime(0, 'Asia/Istanbul')) AS x, toUInt8(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT materialize(toDateTime(0, 'Asia/Istanbul')) AS x, toUInt16(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT materialize(toDateTime(0, 'Asia/Istanbul')) AS x, toUInt32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT materialize(toDateTime(0, 'Asia/Istanbul')) AS x, toUInt64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT materialize(toDateTime(0, 'Asia/Istanbul')) AS x, toFloat32(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT materialize(toDateTime(0, 'Asia/Istanbul')) AS x, toFloat64(1) AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT materialize(toDateTime('2000-01-01 00:00:00', 'Asia/Istanbul')) AS x, toDate('2000-01-02') AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toDateTime(0, 'Asia/Istanbul')) AS x, toDateTime(1, 'Asia/Istanbul') AS y, ((x > y) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); +SELECT materialize(toDateTime(0, 'Asia/Istanbul')) AS x, toDecimal32(1, 0) AS y, ((x = 0) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT materialize(toDateTime(0, 'Asia/Istanbul')) AS x, toDecimal64(1, 0) AS y, ((x = 0) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } +SELECT materialize(toDateTime(0, 'Asia/Istanbul')) AS x, toDecimal128(1, 0) AS y, ((x = 0) ? x : y) AS z, toTypeName(x), toTypeName(y), toTypeName(z); -- { serverError NO_COMMON_TYPE } diff --git a/parser/testdata/00735_or_expr_optimize_bug/ast.json b/parser/testdata/00735_or_expr_optimize_bug/ast.json new file mode 100644 index 000000000..bfb1f878f --- /dev/null +++ b/parser/testdata/00735_or_expr_optimize_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery or_expr_bug (children 1)" + }, + { + "explain": " Identifier or_expr_bug" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001288478, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/00735_or_expr_optimize_bug/metadata.json b/parser/testdata/00735_or_expr_optimize_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00735_or_expr_optimize_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00735_or_expr_optimize_bug/query.sql b/parser/testdata/00735_or_expr_optimize_bug/query.sql new file mode 100644 index 000000000..0004ee51a --- /dev/null +++ b/parser/testdata/00735_or_expr_optimize_bug/query.sql @@ -0,0 +1,7 @@ +DROP TABLE IF EXISTS or_expr_bug; +CREATE TABLE or_expr_bug (a UInt64, b UInt64) ENGINE = Memory; + +INSERT INTO or_expr_bug VALUES(1,21),(1,22),(1,23),(2,21),(2,22),(2,23),(3,21),(3,22),(3,23); + +SELECT count(*) FROM or_expr_bug WHERE (a=1 OR a=2 OR a=3) AND (b=21 OR b=22 OR b=23); +DROP TABLE or_expr_bug; diff --git a/parser/testdata/00736_disjunction_optimisation/ast.json b/parser/testdata/00736_disjunction_optimisation/ast.json new file mode 100644 index 000000000..2c06213bd --- /dev/null +++ b/parser/testdata/00736_disjunction_optimisation/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery bug (children 1)" + }, + { + "explain": " Identifier bug" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001352923, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/00736_disjunction_optimisation/metadata.json b/parser/testdata/00736_disjunction_optimisation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00736_disjunction_optimisation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00736_disjunction_optimisation/query.sql b/parser/testdata/00736_disjunction_optimisation/query.sql new file mode 100644 index 000000000..38f77622b --- /dev/null +++ b/parser/testdata/00736_disjunction_optimisation/query.sql @@ -0,0 +1,49 @@ +DROP TABLE IF EXISTS bug; +CREATE TABLE IF NOT EXISTS bug(k UInt64, s UInt64) ENGINE = Memory; +insert into bug values(1,21),(1,22),(1,23),(2,21),(2,22),(2,23),(3,21),(3,22),(3,23); + +set optimize_min_equality_disjunction_chain_length = 2; + +select * from bug; + +select * from bug where (k =1 or k=2 or k =3) and (s=21 or s=22 or s=23); +select * from bug where (k =1 or k=2 or k =3) and (s=21 or s=22 or s=23) SETTINGS enable_analyzer = 1; +explain query tree select * from bug where (k =1 or k=2 or k =3) and (s=21 or s=22 or s=23) SETTINGS enable_analyzer = 1;; + +select * from (select * from bug where k=1 or k=2 or k=3) where (s=21 or s=22 or s=23); +select * from (select * from bug where k=1 or k=2 or k=3) where (s=21 or s=22 or s=23) SETTINGS enable_analyzer = 1;; +explain query tree select * from (select * from bug where k=1 or k=2 or k=3) where (s=21 or s=22 or s=23) SETTINGS enable_analyzer = 1;; + +select k, (k=1 or k=2 or k=3), s, (s=21), (s=21 or s=22), (s=21 or s=22 or s=23) from bug; +select k, (k=1 or k=2 or k=3), s, (s=21), (s=21 or s=22), (s=21 or s=22 or s=23) from bug SETTINGS enable_analyzer = 1;; +explain query tree select k, (k=1 or k=2 or k=3), s, (s=21), (s=21 or s=22), (s=21 or s=22 or s=23) from bug SETTINGS enable_analyzer = 1;; + +select s, (s=21 or s=22 or s=23) from bug; +select s, (s=21 or s=22 or s=23) from bug SETTINGS enable_analyzer = 1;; +explain query tree select s, (s=21 or s=22 or s=23) from bug SETTINGS enable_analyzer = 1;; + +set optimize_min_equality_disjunction_chain_length = 3; + +select * from bug; + +select * from bug where (k =1 or k=2 or k =3) and (s=21 or s=22 or s=23); +select * from bug where (k =1 or k=2 or k =3) and (s=21 or s=22 or s=23) SETTINGS enable_analyzer = 1; +explain query tree select * from bug where (k =1 or k=2 or k =3) and (s=21 or s=22 or s=23) SETTINGS enable_analyzer = 1;; + +select * from (select * from bug where k=1 or k=2 or k=3) where (s=21 or s=22 or s=23); +select * from (select * from bug where k=1 or k=2 or k=3) where (s=21 or s=22 or s=23) SETTINGS enable_analyzer = 1;; +explain query tree select * from (select * from bug where k=1 or k=2 or k=3) where (s=21 or s=22 or s=23) SETTINGS enable_analyzer = 1;; + +select k, (k=1 or k=2 or k=3), s, (s=21), (s=21 or s=22), (s=21 or s=22 or s=23) from bug; +select k, (k=1 or k=2 or k=3), s, (s=21), (s=21 or s=22), (s=21 or s=22 or s=23) from bug SETTINGS enable_analyzer = 1;; +explain query tree select k, (k=1 or k=2 or k=3), s, (s=21), (s=21 or s=22), (s=21 or s=22 or s=23) from bug SETTINGS enable_analyzer = 1;; + +select s, (s=21 or s=22 or s=23) from bug; +select s, (s=21 or s=22 or s=23) from bug SETTINGS enable_analyzer = 1;; +explain query tree select s, (s=21 or s=22 or s=23) from bug SETTINGS enable_analyzer = 1;; + +select s, (s=21 or 22=s or 23=s) from bug; +select s, (s=21 or 22=s or 23=s) from bug SETTINGS enable_analyzer = 1;; +explain query tree select s, (s=21 or 22=s or 23=s) from bug SETTINGS enable_analyzer = 1;; + +DROP TABLE bug; diff --git a/parser/testdata/00737_decimal_group_by/ast.json b/parser/testdata/00737_decimal_group_by/ast.json new file mode 100644 index 000000000..29b816377 --- /dev/null +++ b/parser/testdata/00737_decimal_group_by/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDecimal32 (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Float64_1.1" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.00136027, + "rows_read": 10, + "bytes_read": 369 + } +} diff --git a/parser/testdata/00737_decimal_group_by/metadata.json b/parser/testdata/00737_decimal_group_by/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00737_decimal_group_by/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00737_decimal_group_by/query.sql b/parser/testdata/00737_decimal_group_by/query.sql new file mode 100644 index 000000000..c9db69b6c --- /dev/null +++ b/parser/testdata/00737_decimal_group_by/query.sql @@ -0,0 +1,26 @@ +select toDecimal32(1.1, 2) as x group by x; +select toDecimal64(2.1, 4) as x group by x; +select toDecimal128(3.1, 12) as x group by x; + +select materialize(toDecimal32(1.2, 2)) as x group by x; +select materialize(toDecimal64(2.2, 4)) as x group by x; +select materialize(toDecimal128(3.2, 12)) as x group by x; + +select x from (select toDecimal32(1.3, 2) x) group by x; +select x from (select toDecimal64(2.3, 4) x) group by x; +select x from (select toDecimal128(3.3, 12) x) group by x; + +DROP TABLE IF EXISTS decimal; +CREATE TABLE IF NOT EXISTS decimal +( + A UInt64, + B Decimal128(18), + C Decimal128(18) +) Engine = Memory; + +INSERT INTO decimal VALUES (1,1,1), (1,1,2), (1,1,3), (1,1,4); + +SELECT A, toString(B) AS B_str, toString(SUM(C)) AS c_str FROM decimal GROUP BY A, B_str; +SELECT A, B_str, toString(cc) FROM (SELECT A, toString(B) AS B_str, SUM(C) AS cc FROM decimal GROUP BY A, B_str); + +DROP TABLE decimal; diff --git a/parser/testdata/00738_nested_merge_multidimensional_array/ast.json b/parser/testdata/00738_nested_merge_multidimensional_array/ast.json new file mode 100644 index 000000000..7d5ddc8a5 --- /dev/null +++ b/parser/testdata/00738_nested_merge_multidimensional_array/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery sites (children 1)" + }, + { + "explain": " Identifier sites" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001220615, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/00738_nested_merge_multidimensional_array/metadata.json b/parser/testdata/00738_nested_merge_multidimensional_array/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00738_nested_merge_multidimensional_array/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00738_nested_merge_multidimensional_array/query.sql b/parser/testdata/00738_nested_merge_multidimensional_array/query.sql new file mode 100644 index 000000000..6efeb2e6e --- /dev/null +++ b/parser/testdata/00738_nested_merge_multidimensional_array/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS sites; +CREATE TABLE sites (Domain UInt8, `Users.UserID` Array(UInt64), `Users.Dates` Array(Array(Date))) ENGINE = MergeTree ORDER BY Domain SETTINGS vertical_merge_algorithm_min_rows_to_activate = 0, vertical_merge_algorithm_min_columns_to_activate = 0; + +SYSTEM STOP MERGES sites; + +INSERT INTO sites VALUES (1,[1],[[]]); +INSERT INTO sites VALUES (2,[1],[['2018-06-22']]); + +SELECT count(), countArray(Users.Dates), countArrayArray(Users.Dates) FROM sites; +SYSTEM START MERGES sites; +OPTIMIZE TABLE sites FINAL; +SELECT count(), countArray(Users.Dates), countArrayArray(Users.Dates) FROM sites; + +DROP TABLE sites; diff --git a/parser/testdata/00739_array_element_nullable_string_mattrobenolt/ast.json b/parser/testdata/00739_array_element_nullable_string_mattrobenolt/ast.json new file mode 100644 index 000000000..5e487473f --- /dev/null +++ b/parser/testdata/00739_array_element_nullable_string_mattrobenolt/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery wups (children 1)" + }, + { + "explain": " Identifier wups" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001293186, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/00739_array_element_nullable_string_mattrobenolt/metadata.json b/parser/testdata/00739_array_element_nullable_string_mattrobenolt/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00739_array_element_nullable_string_mattrobenolt/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00739_array_element_nullable_string_mattrobenolt/query.sql b/parser/testdata/00739_array_element_nullable_string_mattrobenolt/query.sql new file mode 100644 index 000000000..26db08824 --- /dev/null +++ b/parser/testdata/00739_array_element_nullable_string_mattrobenolt/query.sql @@ -0,0 +1,13 @@ +drop temporary table if exists wups; +create temporary table wups (a Array(Nullable(String))); +select count(), a[1] from wups group by a[1]; +insert into wups (a) values(['foo']); +select count(), a[1] from wups group by a[1]; +insert into wups (a) values([]); +select count(), a[1] from wups group by a[1] order by a[1]; + +drop temporary table wups; + +create temporary table wups (a Array(Nullable(String))); +insert into wups (a) values([]); +select a[1] from wups; diff --git a/parser/testdata/00740_database_in_nested_view/ast.json b/parser/testdata/00740_database_in_nested_view/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00740_database_in_nested_view/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00740_database_in_nested_view/metadata.json b/parser/testdata/00740_database_in_nested_view/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00740_database_in_nested_view/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00740_database_in_nested_view/query.sql b/parser/testdata/00740_database_in_nested_view/query.sql new file mode 100644 index 000000000..42c26a709 --- /dev/null +++ b/parser/testdata/00740_database_in_nested_view/query.sql @@ -0,0 +1,23 @@ + +DROP TABLE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}; +DROP TABLE IF EXISTS test_view_00740; +DROP TABLE IF EXISTS test_nested_view_00740; +DROP TABLE IF EXISTS test_joined_view_00740; + +CREATE VIEW test_00740 AS SELECT 1 AS N; +CREATE VIEW test_view_00740 AS SELECT * FROM test_00740; +CREATE VIEW test_nested_view_00740 AS SELECT * FROM (SELECT * FROM test_00740); +CREATE VIEW test_joined_view_00740 AS SELECT *, N AS x FROM test_00740 ANY LEFT JOIN test_00740 USING N; + +SELECT * FROM test_view_00740; +SELECT * FROM test_nested_view_00740; +SELECT * FROM test_joined_view_00740; + +USE default; +SELECT * FROM {CLICKHOUSE_DATABASE:Identifier}.test_view_00740; +SELECT * FROM {CLICKHOUSE_DATABASE:Identifier}.test_nested_view_00740; +SELECT * FROM {CLICKHOUSE_DATABASE:Identifier}.test_joined_view_00740; + +DROP TABLE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}.test_00740; +DROP TABLE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}.test_view_00740; +DROP TABLE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}.test_nested_view_00740; diff --git a/parser/testdata/00740_optimize_predicate_expression/ast.json b/parser/testdata/00740_optimize_predicate_expression/ast.json new file mode 100644 index 000000000..c234fa9f8 --- /dev/null +++ b/parser/testdata/00740_optimize_predicate_expression/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery perf (children 1)" + }, + { + "explain": " Identifier perf" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001473344, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/00740_optimize_predicate_expression/metadata.json b/parser/testdata/00740_optimize_predicate_expression/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00740_optimize_predicate_expression/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00740_optimize_predicate_expression/query.sql b/parser/testdata/00740_optimize_predicate_expression/query.sql new file mode 100644 index 000000000..65b066358 --- /dev/null +++ b/parser/testdata/00740_optimize_predicate_expression/query.sql @@ -0,0 +1,35 @@ +DROP TABLE IF EXISTS perf; +CREATE TABLE perf (site String, user_id UInt64, z Float64) ENGINE = Log; + +SELECT * FROM (SELECT perf_1.z AS z_1 FROM perf AS perf_1); + +SELECT sum(mul)/sqrt(sum(sqr_dif_1) * sum(sqr_dif_2)) AS z_r +FROM( +SELECT + (SELECT avg(z_1) AS z_1_avg, + avg(z_2) AS z_2_avg + FROM ( + SELECT perf_1.site, perf_1.z AS z_1 + FROM perf AS perf_1 + WHERE user_id = 000 + ) jss1 ALL INNER JOIN ( + SELECT perf_2.site, perf_2.z AS z_2 + FROM perf AS perf_2 + WHERE user_id = 999 + ) jss2 USING site) as avg_values, + z_1 - avg_values.1 AS dif_1, + z_2 - avg_values.2 AS dif_2, + dif_1 * dif_2 AS mul, + dif_1*dif_1 AS sqr_dif_1, + dif_2*dif_2 AS sqr_dif_2 +FROM ( + SELECT perf_1.site, perf_1.z AS z_1 + FROM perf AS perf_1 + WHERE user_id = 000 +) js1 ALL INNER JOIN ( + SELECT perf_2.site, perf_2.z AS z_2 + FROM perf AS perf_2 + WHERE user_id = 999 +) js2 USING site); + +DROP TABLE perf; diff --git a/parser/testdata/00741_client_comment_multiline/ast.json b/parser/testdata/00741_client_comment_multiline/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00741_client_comment_multiline/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00741_client_comment_multiline/metadata.json b/parser/testdata/00741_client_comment_multiline/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00741_client_comment_multiline/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00741_client_comment_multiline/query.sql b/parser/testdata/00741_client_comment_multiline/query.sql new file mode 100644 index 000000000..ebe9a1927 --- /dev/null +++ b/parser/testdata/00741_client_comment_multiline/query.sql @@ -0,0 +1,4 @@ +-- Tags: no-fasttest + +CREATE DATABASE IF NOT EXISTS test_00741; -- foo +DROP DATABASE test_00741; diff --git a/parser/testdata/00742_require_join_strictness/ast.json b/parser/testdata/00742_require_join_strictness/ast.json new file mode 100644 index 000000000..ee7971359 --- /dev/null +++ b/parser/testdata/00742_require_join_strictness/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001322616, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00742_require_join_strictness/metadata.json b/parser/testdata/00742_require_join_strictness/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00742_require_join_strictness/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00742_require_join_strictness/query.sql b/parser/testdata/00742_require_join_strictness/query.sql new file mode 100644 index 000000000..a3a5315e7 --- /dev/null +++ b/parser/testdata/00742_require_join_strictness/query.sql @@ -0,0 +1,3 @@ +SET send_logs_level = 'fatal'; +SET join_default_strictness = ''; +SELECT * FROM system.one INNER JOIN (SELECT number AS k FROM system.numbers) js2 ON dummy = k; -- { serverError EXPECTED_ALL_OR_ANY } diff --git a/parser/testdata/00743_limit_by_not_found_column/ast.json b/parser/testdata/00743_limit_by_not_found_column/ast.json new file mode 100644 index 000000000..36cbde26a --- /dev/null +++ b/parser/testdata/00743_limit_by_not_found_column/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery installation_stats (children 1)" + }, + { + "explain": " Identifier installation_stats" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000962691, + "rows_read": 2, + "bytes_read": 88 + } +} diff --git a/parser/testdata/00743_limit_by_not_found_column/metadata.json b/parser/testdata/00743_limit_by_not_found_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00743_limit_by_not_found_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00743_limit_by_not_found_column/query.sql b/parser/testdata/00743_limit_by_not_found_column/query.sql new file mode 100644 index 000000000..831d67f62 --- /dev/null +++ b/parser/testdata/00743_limit_by_not_found_column/query.sql @@ -0,0 +1,64 @@ +DROP TABLE IF EXISTS installation_stats; +CREATE TABLE installation_stats (message String, info String, message_type String) ENGINE = Log; + +SELECT count(*) AS total +FROM +( + SELECT + message, + info, + count() AS cnt + FROM installation_stats + WHERE message_type LIKE 'fail' + GROUP BY + message, + info + ORDER BY cnt DESC + LIMIT 5 BY message +); + +DROP TABLE installation_stats; + +CREATE TEMPORARY TABLE Accounts (AccountID UInt64, Currency String); + +SELECT AccountID +FROM +( + SELECT + AccountID, + Currency + FROM Accounts + LIMIT 2 BY Currency +); + +CREATE TEMPORARY TABLE commententry1 (created_date Date, link_id String, subreddit String); +INSERT INTO commententry1 VALUES ('2016-01-01', 'xyz', 'cpp'); + +SELECT concat('http://reddit.com/r/', subreddit, '/comments/', replaceRegexpOne(link_id, 't[0-9]_', '')) +FROM +( + SELECT + y, + subreddit, + link_id, + cnt + FROM + ( + SELECT + created_date AS y, + link_id, + subreddit, + count(*) AS cnt + FROM commententry1 + WHERE toYear(created_date) = 2016 + GROUP BY + y, + link_id, + subreddit + ORDER BY y ASC + ) + ORDER BY + y ASC, + cnt DESC + LIMIT 1 BY y +); diff --git a/parser/testdata/00744_join_not_found_column/ast.json b/parser/testdata/00744_join_not_found_column/ast.json new file mode 100644 index 000000000..e995ca282 --- /dev/null +++ b/parser/testdata/00744_join_not_found_column/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001269005, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00744_join_not_found_column/metadata.json b/parser/testdata/00744_join_not_found_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00744_join_not_found_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00744_join_not_found_column/query.sql b/parser/testdata/00744_join_not_found_column/query.sql new file mode 100644 index 000000000..72969e793 --- /dev/null +++ b/parser/testdata/00744_join_not_found_column/query.sql @@ -0,0 +1,45 @@ +SET joined_subquery_requires_alias = 0; + +DROP TEMPORARY TABLE IF EXISTS test_00744; +CREATE TEMPORARY TABLE test_00744 +( + x Int32 +); + +INSERT INTO test_00744 VALUES (1); + +SELECT x +FROM +( + SELECT + x, + `1` + FROM + ( + SELECT x, 1 FROM test_00744 + ) + ALL INNER JOIN + ( + SELECT + count(), + 1 + FROM test_00744 + ) jss2 USING (`1`) + LIMIT 10 +); + +SELECT + x, + `1` +FROM +( + SELECT x, 1 FROM test_00744 +) +ALL INNER JOIN +( + SELECT + count(), + 1 + FROM test_00744 +) js2 USING (`1`) +LIMIT 10; diff --git a/parser/testdata/00745_compile_scalar_subquery/ast.json b/parser/testdata/00745_compile_scalar_subquery/ast.json new file mode 100644 index 000000000..26429ba18 --- /dev/null +++ b/parser/testdata/00745_compile_scalar_subquery/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001526845, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00745_compile_scalar_subquery/metadata.json b/parser/testdata/00745_compile_scalar_subquery/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00745_compile_scalar_subquery/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00745_compile_scalar_subquery/query.sql b/parser/testdata/00745_compile_scalar_subquery/query.sql new file mode 100644 index 000000000..5baf61444 --- /dev/null +++ b/parser/testdata/00745_compile_scalar_subquery/query.sql @@ -0,0 +1,22 @@ +SET compile_expressions = 1; +SET min_count_to_compile_expression = 1; +SET optimize_move_to_prewhere = 0; + +DROP TABLE IF EXISTS dt; +DROP TABLE IF EXISTS testx; + +CREATE TABLE dt(tkey Int32) ENGINE = MergeTree order by tuple(); +INSERT INTO dt VALUES (300000); +CREATE TABLE testx(t Int32, a UInt8) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO testx VALUES (100000, 0); + +SELECT COUNT(*) FROM testx WHERE NOT a AND t < (SELECT tkey FROM dt); + +DROP TABLE dt; +CREATE TABLE dt(tkey Int32) ENGINE = MergeTree order by tuple(); +INSERT INTO dt VALUES (0); + +SELECT COUNT(*) FROM testx WHERE NOT a AND t < (SELECT tkey FROM dt); + +DROP TABLE IF EXISTS dt; +DROP TABLE IF EXISTS testx; diff --git a/parser/testdata/00746_compile_non_deterministic_function/ast.json b/parser/testdata/00746_compile_non_deterministic_function/ast.json new file mode 100644 index 000000000..dbd090285 --- /dev/null +++ b/parser/testdata/00746_compile_non_deterministic_function/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001111161, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00746_compile_non_deterministic_function/metadata.json b/parser/testdata/00746_compile_non_deterministic_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00746_compile_non_deterministic_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00746_compile_non_deterministic_function/query.sql b/parser/testdata/00746_compile_non_deterministic_function/query.sql new file mode 100644 index 000000000..b1e7fe356 --- /dev/null +++ b/parser/testdata/00746_compile_non_deterministic_function/query.sql @@ -0,0 +1,20 @@ +SET compile_expressions = 1; +SET min_count_to_compile_expression = 1; + +DROP TABLE IF EXISTS time_table; + +CREATE TABLE time_table(timecol DateTime, value Int32) ENGINE = MergeTree order by tuple(); + +INSERT INTO time_table VALUES (now() - 5, 5), (now() - 3, 3); + +SELECT COUNT() from time_table WHERE value < now() - 1 AND value != 0 AND modulo(value, 2) != 0 AND timecol < now() - 1; + +SELECT sleep(3); + +INSERT INTO time_table VALUES (now(), 101); + +SELECT sleep(3); + +SELECT COUNT() from time_table WHERE value < now() - 1 AND value != 0 AND modulo(value, 2) != 0 AND timecol < now() - 1; + +DROP TABLE IF EXISTS time_table; diff --git a/parser/testdata/00746_hashing_tuples/ast.json b/parser/testdata/00746_hashing_tuples/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00746_hashing_tuples/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00746_hashing_tuples/metadata.json b/parser/testdata/00746_hashing_tuples/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00746_hashing_tuples/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00746_hashing_tuples/query.sql b/parser/testdata/00746_hashing_tuples/query.sql new file mode 100644 index 000000000..28303141f --- /dev/null +++ b/parser/testdata/00746_hashing_tuples/query.sql @@ -0,0 +1,34 @@ +-- Tags: no-fasttest, no-openssl-fips + +SELECT sipHash64(1, 2, 3); +SELECT sipHash64(1, 3, 2); +SELECT sipHash64(('a', [1, 2, 3], 4, (4, ['foo', 'bar'], 1, (1, 2)))); + +SELECT hex(sipHash128('foo')); +SELECT hex(sipHash128('\x01')); +SELECT hex(sipHash128('foo', 'foo')); +SELECT hex(sipHash128('foo', 'foo', 'foo')); +SELECT hex(sipHash128(1, 2, 3)); + +SELECT halfMD5(1, 2, 3); +SELECT halfMD5(1, 3, 2); +SELECT halfMD5(('a', [1, 2, 3], 4, (4, ['foo', 'bar'], 1, (1, 2)))); + +SELECT murmurHash2_32(1, 2, 3); +SELECT murmurHash2_32(1, 3, 2); +SELECT murmurHash2_32(('a', [1, 2, 3], 4, (4, ['foo', 'bar'], (1, 2)))); + +SELECT murmurHash2_64(1, 2, 3); +SELECT murmurHash2_64(1, 3, 2); +SELECT murmurHash2_64(('a', [1, 2, 3], 4, (4, ['foo', 'bar'], 1, (1, 2)))); + +SELECT murmurHash3_64(1, 2, 3); +SELECT murmurHash3_64(1, 3, 2); +SELECT murmurHash3_64(('a', [1, 2, 3], 4, (4, ['foo', 'bar'], 1, (1, 2)))); + +SELECT hex(murmurHash3_128('foo', 'foo')); +SELECT hex(murmurHash3_128('foo', 'foo', 'foo')); + +SELECT gccMurmurHash(1, 2, 3); +SELECT gccMurmurHash(1, 3, 2); +SELECT gccMurmurHash(('a', [1, 2, 3], 4, (4, ['foo', 'bar'], 1, (1, 2)))); diff --git a/parser/testdata/00747_contributors/ast.json b/parser/testdata/00747_contributors/ast.json new file mode 100644 index 000000000..1afaaa0b2 --- /dev/null +++ b/parser/testdata/00747_contributors/ast.json @@ -0,0 +1,91 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function if (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function greater (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function count (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.contributors" + }, + { + "explain": " Literal UInt64_200" + }, + { + "explain": " Literal 'ok'" + }, + { + "explain": " Literal 'fail'" + } + ], + + "rows": 23, + + "statistics": + { + "elapsed": 0.001337056, + "rows_read": 23, + "bytes_read": 968 + } +} diff --git a/parser/testdata/00747_contributors/metadata.json b/parser/testdata/00747_contributors/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00747_contributors/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00747_contributors/query.sql b/parser/testdata/00747_contributors/query.sql new file mode 100644 index 000000000..d32cd367e --- /dev/null +++ b/parser/testdata/00747_contributors/query.sql @@ -0,0 +1 @@ +SELECT if ((SELECT count(*) FROM system.contributors) > 200, 'ok', 'fail'); diff --git a/parser/testdata/00748_insert_array_with_null/ast.json b/parser/testdata/00748_insert_array_with_null/ast.json new file mode 100644 index 000000000..ff53118a6 --- /dev/null +++ b/parser/testdata/00748_insert_array_with_null/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery arraytest (children 1)" + }, + { + "explain": " Identifier arraytest" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001284166, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/00748_insert_array_with_null/metadata.json b/parser/testdata/00748_insert_array_with_null/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00748_insert_array_with_null/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00748_insert_array_with_null/query.sql b/parser/testdata/00748_insert_array_with_null/query.sql new file mode 100644 index 000000000..84fc446ab --- /dev/null +++ b/parser/testdata/00748_insert_array_with_null/query.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS arraytest; + +set allow_deprecated_syntax_for_merge_tree=1; +set input_format_null_as_default=0; +CREATE TABLE arraytest ( created_date Date DEFAULT toDate(created_at), created_at DateTime DEFAULT now(), strings Array(String) DEFAULT emptyArrayString()) ENGINE = MergeTree(created_date, cityHash64(created_at), (created_date, cityHash64(created_at)), 8192); + +INSERT INTO arraytest (created_at, strings) VALUES (now(), ['aaaaa', 'bbbbb', 'ccccc']); +INSERT INTO arraytest (created_at, strings) VALUES (now(), ['aaaaa', 'bbbbb', null]); -- { error CANNOT_INSERT_NULL_IN_ORDINARY_COLUMN } + +SELECT strings from arraytest; + +DROP TABLE IF EXISTS arraytest; + diff --git a/parser/testdata/00749_inner_join_of_unnamed_subqueries/ast.json b/parser/testdata/00749_inner_join_of_unnamed_subqueries/ast.json new file mode 100644 index 000000000..7dcd4e93b --- /dev/null +++ b/parser/testdata/00749_inner_join_of_unnamed_subqueries/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001600091, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00749_inner_join_of_unnamed_subqueries/metadata.json b/parser/testdata/00749_inner_join_of_unnamed_subqueries/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00749_inner_join_of_unnamed_subqueries/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00749_inner_join_of_unnamed_subqueries/query.sql b/parser/testdata/00749_inner_join_of_unnamed_subqueries/query.sql new file mode 100644 index 000000000..760e33639 --- /dev/null +++ b/parser/testdata/00749_inner_join_of_unnamed_subqueries/query.sql @@ -0,0 +1,42 @@ +set joined_subquery_requires_alias = 0; + +DROP TABLE IF EXISTS left_table; +DROP TABLE IF EXISTS right_table; + +CREATE TABLE left_table(APIKey Int32, SomeColumn String) ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO left_table VALUES(1, 'somestr'); + +CREATE TABLE right_table(APIKey Int32, EventValueForPostback String) ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO right_table VALUES(1, 'hello'), (2, 'WORLD'); + +SELECT + APIKey, + ConversionEventValue +FROM + left_table AS left_table +ALL INNER JOIN + ( + SELECT * + FROM + ( + SELECT + APIKey, + EventValueForPostback AS ConversionEventValue + FROM + right_table AS right_table + ) + ALL INNER JOIN + ( + SELECT + APIKey + FROM + left_table as left_table + GROUP BY + APIKey + ) USING (APIKey) + ) USING (APIKey); + +DROP TABLE IF EXISTS left_table; +DROP TABLE IF EXISTS right_table; diff --git a/parser/testdata/00750_merge_tree_merge_with_o_direct/ast.json b/parser/testdata/00750_merge_tree_merge_with_o_direct/ast.json new file mode 100644 index 000000000..017e69eda --- /dev/null +++ b/parser/testdata/00750_merge_tree_merge_with_o_direct/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery sample_merge_tree (children 1)" + }, + { + "explain": " Identifier sample_merge_tree" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001278963, + "rows_read": 2, + "bytes_read": 86 + } +} diff --git a/parser/testdata/00750_merge_tree_merge_with_o_direct/metadata.json b/parser/testdata/00750_merge_tree_merge_with_o_direct/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00750_merge_tree_merge_with_o_direct/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00750_merge_tree_merge_with_o_direct/query.sql b/parser/testdata/00750_merge_tree_merge_with_o_direct/query.sql new file mode 100644 index 000000000..89cf5a616 --- /dev/null +++ b/parser/testdata/00750_merge_tree_merge_with_o_direct/query.sql @@ -0,0 +1,11 @@ +DROP TABLE IF EXISTS sample_merge_tree; + +CREATE TABLE sample_merge_tree (dt DateTime, x UInt64) ENGINE = MergeTree PARTITION BY toYYYYMMDD(dt) ORDER BY x SETTINGS min_merge_bytes_to_use_direct_io=1, index_granularity = 8192; + +INSERT INTO sample_merge_tree VALUES (toDateTime('2018-10-31 05:05:00'), 0), (toDateTime('2018-10-31 06:06:00'), 10), (toDateTime('2018-10-28 10:00:00'), 20); + +OPTIMIZE TABLE sample_merge_tree FINAL; + +SELECT * FROM sample_merge_tree ORDER BY x; + +DROP TABLE IF EXISTS sample_merge_tree; diff --git a/parser/testdata/00751_default_databasename_for_view/ast.json b/parser/testdata/00751_default_databasename_for_view/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00751_default_databasename_for_view/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00751_default_databasename_for_view/metadata.json b/parser/testdata/00751_default_databasename_for_view/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00751_default_databasename_for_view/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00751_default_databasename_for_view/query.sql b/parser/testdata/00751_default_databasename_for_view/query.sql new file mode 100644 index 000000000..599ef5b89 --- /dev/null +++ b/parser/testdata/00751_default_databasename_for_view/query.sql @@ -0,0 +1,44 @@ + +DROP TABLE IF EXISTS t_00751; +DROP TABLE IF EXISTS t_mv_00751; +DROP TABLE IF EXISTS u_00751; +DROP TABLE IF EXISTS v_00751; + +CREATE TABLE t_00751 +( + date Date, + platform Enum8('a' = 0, 'b' = 1), + app Enum8('a' = 0, 'b' = 1) +) ENGINE = Memory; + +CREATE TABLE u_00751 (app Enum8('a' = 0, 'b' = 1)) ENGINE = Memory; +CREATE TABLE v_00751 (platform Enum8('a' = 0, 'b' = 1)) ENGINE = Memory; + +INSERT INTO u_00751 VALUES ('b'); +INSERT INTO v_00751 VALUES ('b'); + +CREATE MATERIALIZED VIEW t_mv_00751 ENGINE = MergeTree ORDER BY date + AS SELECT date, platform, app FROM t_00751 + WHERE app = (SELECT min(app) from u_00751) AND platform = (SELECT (SELECT min(platform) from v_00751)); + +SHOW CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.t_mv_00751 FORMAT TabSeparatedRaw; + +USE default; +DETACH TABLE {CLICKHOUSE_DATABASE:Identifier}.t_mv_00751; +ATTACH TABLE {CLICKHOUSE_DATABASE:Identifier}.t_mv_00751; + +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.t_00751 VALUES ('2000-01-01', 'a', 'a') ('2000-01-02', 'b', 'b'); + +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.u_00751 VALUES ('a'); +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.v_00751 VALUES ('a'); + +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.t_00751 VALUES ('2000-01-03', 'a', 'a') ('2000-01-04', 'b', 'b'); + +SELECT * FROM {CLICKHOUSE_DATABASE:Identifier}.t_00751 ORDER BY date; +SELECT * FROM {CLICKHOUSE_DATABASE:Identifier}.t_mv_00751 ORDER BY date; + +DROP TABLE {CLICKHOUSE_DATABASE:Identifier}.t_00751; +DROP TABLE {CLICKHOUSE_DATABASE:Identifier}.t_mv_00751; +DROP TABLE {CLICKHOUSE_DATABASE:Identifier}.u_00751; +DROP TABLE {CLICKHOUSE_DATABASE:Identifier}.v_00751; + diff --git a/parser/testdata/00751_hashing_ints/ast.json b/parser/testdata/00751_hashing_ints/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00751_hashing_ints/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00751_hashing_ints/metadata.json b/parser/testdata/00751_hashing_ints/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00751_hashing_ints/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00751_hashing_ints/query.sql b/parser/testdata/00751_hashing_ints/query.sql new file mode 100644 index 000000000..32d1fcab7 --- /dev/null +++ b/parser/testdata/00751_hashing_ints/query.sql @@ -0,0 +1,11 @@ +-- Tags: no-fasttest, no-openssl-fips + +SELECT halfMD5(123456); +SELECT sipHash64(123456); +SELECT cityHash64(123456); +SELECT farmFingerprint64(123456); +SELECT farmFingerprint64('123456'); +SELECT farmHash64(123456); +SELECT metroHash64(123456); +SELECT murmurHash2_32(123456); +SELECT murmurHash2_64(123456); diff --git a/parser/testdata/00751_low_cardinality_nullable_group_by/ast.json b/parser/testdata/00751_low_cardinality_nullable_group_by/ast.json new file mode 100644 index 000000000..44fb39167 --- /dev/null +++ b/parser/testdata/00751_low_cardinality_nullable_group_by/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001493558, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00751_low_cardinality_nullable_group_by/metadata.json b/parser/testdata/00751_low_cardinality_nullable_group_by/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00751_low_cardinality_nullable_group_by/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00751_low_cardinality_nullable_group_by/query.sql b/parser/testdata/00751_low_cardinality_nullable_group_by/query.sql new file mode 100644 index 000000000..0a92037fa --- /dev/null +++ b/parser/testdata/00751_low_cardinality_nullable_group_by/query.sql @@ -0,0 +1,8 @@ +set allow_suspicious_low_cardinality_types = 1; +drop table if exists low_null_float; +CREATE TABLE low_null_float (a LowCardinality(Nullable(Float64))) ENGINE = MergeTree order by tuple(); +INSERT INTO low_null_float (a) SELECT if(number % 3 == 0, Null, number) FROM system.numbers LIMIT 1000000; + +SELECT a, count() FROM low_null_float GROUP BY a ORDER BY count() desc, a LIMIT 10; +drop table if exists low_null_float; + diff --git a/parser/testdata/00752_low_cardinality_array_result/ast.json b/parser/testdata/00752_low_cardinality_array_result/ast.json new file mode 100644 index 000000000..f6e7715b1 --- /dev/null +++ b/parser/testdata/00752_low_cardinality_array_result/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function splitByChar (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal ','" + }, + { + "explain": " Function toLowCardinality (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'a,b,c'" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001248582, + "rows_read": 12, + "bytes_read": 479 + } +} diff --git a/parser/testdata/00752_low_cardinality_array_result/metadata.json b/parser/testdata/00752_low_cardinality_array_result/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00752_low_cardinality_array_result/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00752_low_cardinality_array_result/query.sql b/parser/testdata/00752_low_cardinality_array_result/query.sql new file mode 100644 index 000000000..a14138f19 --- /dev/null +++ b/parser/testdata/00752_low_cardinality_array_result/query.sql @@ -0,0 +1,2 @@ +SELECT arrayJoin(splitByChar(',', toLowCardinality('a,b,c'))); + diff --git a/parser/testdata/00752_low_cardinality_lambda_argument/ast.json b/parser/testdata/00752_low_cardinality_lambda_argument/ast.json new file mode 100644 index 000000000..ccacde20b --- /dev/null +++ b/parser/testdata/00752_low_cardinality_lambda_argument/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001408733, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00752_low_cardinality_lambda_argument/metadata.json b/parser/testdata/00752_low_cardinality_lambda_argument/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00752_low_cardinality_lambda_argument/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00752_low_cardinality_lambda_argument/query.sql b/parser/testdata/00752_low_cardinality_lambda_argument/query.sql new file mode 100644 index 000000000..998ff2f54 --- /dev/null +++ b/parser/testdata/00752_low_cardinality_lambda_argument/query.sql @@ -0,0 +1,14 @@ +set allow_suspicious_low_cardinality_types=1; +drop table if exists lc_lambda; +create table lc_lambda (arr Array(LowCardinality(UInt64))) engine = Memory; +insert into lc_lambda select range(number) from system.numbers limit 10; +select arrayFilter(x -> x % 2 == 0, arr) from lc_lambda; +drop table if exists lc_lambda; + +drop table if exists test_array; +CREATE TABLE test_array(resources_host Array(LowCardinality(String))) ENGINE = MergeTree() ORDER BY (resources_host); +insert into test_array values (['a']); +SELECT arrayMap(i -> [resources_host[i]], arrayEnumerate(resources_host)) FROM test_array; +drop table if exists test_array; + +SELECT arrayMap(x -> (x + (arrayMap(y -> ((x + y) + toLowCardinality(1)), [])[1])), []); diff --git a/parser/testdata/00752_low_cardinality_left_array_join/ast.json b/parser/testdata/00752_low_cardinality_left_array_join/ast.json new file mode 100644 index 000000000..e52cfc03c --- /dev/null +++ b/parser/testdata/00752_low_cardinality_left_array_join/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001071991, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00752_low_cardinality_left_array_join/metadata.json b/parser/testdata/00752_low_cardinality_left_array_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00752_low_cardinality_left_array_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00752_low_cardinality_left_array_join/query.sql b/parser/testdata/00752_low_cardinality_left_array_join/query.sql new file mode 100644 index 000000000..2d65f01a1 --- /dev/null +++ b/parser/testdata/00752_low_cardinality_left_array_join/query.sql @@ -0,0 +1,22 @@ +set allow_suspicious_low_cardinality_types=1; +drop table if exists lc_left_aj; +CREATE TABLE lc_left_aj +( + str Array(LowCardinality(String)), + null_str Array(LowCardinality(Nullable(String))), + val Array(LowCardinality(Float64)), + null_val Array(LowCardinality(Nullable(Float64))) +) +ENGINE = Memory; + +insert into lc_left_aj values (['a', 'b'], ['c', Null], [1, 2.0], [3., Null]), ([], ['c', Null], [1, 2.0], [3., Null]), (['a', 'b'], [], [1, 2.0], [3., Null]), (['a', 'b'], ['c', Null], [], [3., Null]), (['a', 'b'], ['c', Null], [1, 2.0], []); + +select *, arr from lc_left_aj left array join str as arr; +select '-'; +select *, arr from lc_left_aj left array join null_str as arr; +select '-'; +select *, arr from lc_left_aj left array join val as arr; +select '-'; +select *, arr from lc_left_aj left array join null_val as arr; +drop table if exists lc_left_aj; + diff --git a/parser/testdata/00752_low_cardinality_mv_1/ast.json b/parser/testdata/00752_low_cardinality_mv_1/ast.json new file mode 100644 index 000000000..43c96fb90 --- /dev/null +++ b/parser/testdata/00752_low_cardinality_mv_1/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery lc_00752 (children 1)" + }, + { + "explain": " Identifier lc_00752" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001089849, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/00752_low_cardinality_mv_1/metadata.json b/parser/testdata/00752_low_cardinality_mv_1/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00752_low_cardinality_mv_1/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00752_low_cardinality_mv_1/query.sql b/parser/testdata/00752_low_cardinality_mv_1/query.sql new file mode 100644 index 000000000..5ed3a95f9 --- /dev/null +++ b/parser/testdata/00752_low_cardinality_mv_1/query.sql @@ -0,0 +1,15 @@ +drop table if exists lc_00752; +drop table if exists lc_mv_00752; + +create table lc_00752 (str LowCardinality(String)) engine = MergeTree order by tuple(); + +insert into lc_00752 values ('a'), ('bbb'), ('ab'), ('accccc'), ('baasddas'), ('bcde'); + +SET allow_suspicious_primary_key = 1; +CREATE MATERIALIZED VIEW lc_mv_00752 ENGINE = AggregatingMergeTree() ORDER BY tuple() populate AS SELECT substring(str, 1, 1) as letter, min(length(str)) AS min_len, max(length(str)) AS max_len FROM lc_00752 GROUP BY substring(str, 1, 1); + +insert into lc_00752 values ('a'), ('bbb'), ('ab'), ('accccc'), ('baasddas'), ('bcde'); +select * from lc_mv_00752 order by letter; + +drop table if exists lc_00752; +drop table if exists lc_mv_00752; diff --git a/parser/testdata/00752_low_cardinality_mv_2/ast.json b/parser/testdata/00752_low_cardinality_mv_2/ast.json new file mode 100644 index 000000000..10664a73e --- /dev/null +++ b/parser/testdata/00752_low_cardinality_mv_2/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery radacct (children 1)" + }, + { + "explain": " Identifier radacct" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001424077, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/00752_low_cardinality_mv_2/metadata.json b/parser/testdata/00752_low_cardinality_mv_2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00752_low_cardinality_mv_2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00752_low_cardinality_mv_2/query.sql b/parser/testdata/00752_low_cardinality_mv_2/query.sql new file mode 100644 index 000000000..83c6b1c1a --- /dev/null +++ b/parser/testdata/00752_low_cardinality_mv_2/query.sql @@ -0,0 +1,16 @@ +drop table if exists radacct; +drop table if exists mv_traffic_by_tadig15min; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE radacct ( radacctid UInt64, f3gppchargingid Nullable(String), f3gppggsnaddress Nullable(String), f3gppggsnmccmnc Nullable(String), f3gppgprsqos Nullable(String), f3gppimeisv Nullable(String), f3gppimsi Nullable(UInt64), f3gppimsimccmnc Nullable(String), f3gpploci Nullable(String), f3gppnsapi Nullable(String), f3gpprattype Nullable(String), f3gppsgsnaddress Nullable(String), f3gppsgsnmccmnc Nullable(String), acctdelaytime Nullable(UInt32), acctinputoctets Nullable(UInt64), acctinputpackets Nullable(UInt64), acctoutputoctets Nullable(UInt64), acctoutputpackets Nullable(UInt64), acctsessionid String, acctstatustype Nullable(String), acctuniqueid String, calledstationid Nullable(String), callingstationid Nullable(String), framedipaddress Nullable(String), nasidentifier Nullable(String), nasipaddress Nullable(String), acctstarttime Nullable(DateTime), acctstoptime Nullable(DateTime), acctsessiontime Nullable(UInt32), acctterminatecause Nullable(String), acctstartdelay Nullable(UInt32), acctstopdelay Nullable(UInt32), connectinfo_start Nullable(String), connectinfo_stop Nullable(String), timestamp DateTime, username Nullable(String), realm Nullable(String), f3gppimsi_int UInt64, f3gppsgsnaddress_int Nullable(UInt32), timestamp_date Date, tac Nullable(String), mnc Nullable(String), tadig LowCardinality(String), country LowCardinality(String), tadig_op_ip Nullable(String) DEFAULT CAST('TADIG NOT FOUND', 'Nullable(String)'), mcc Nullable(UInt16) MATERIALIZED toUInt16OrNull(substring(f3gppsgsnmccmnc, 1, 6))) ENGINE = MergeTree(timestamp_date, (timestamp, radacctid, acctuniqueid), 8192); + +insert into radacct values (1, 'a', 'b', 'c', 'd', 'e', 2, 'a', 'b', 'c', 'd', 'e', 'f', 3, 4, 5, 6, 7, 'a', 'Stop', 'c', 'd', 'e', 'f', 'g', 'h', '2018-10-10 15:54:21', '2018-10-10 15:54:21', 8, 'a', 9, 10, 'a', 'b', '2018-10-10 15:54:21', 'a', 'b', 11, 12, '2018-10-10', 'a', 'b', 'c', 'd', 'e'); + +SELECT any(acctstatustype = 'Stop') FROM radacct WHERE (acctstatustype = 'Stop') AND ((acctinputoctets + acctoutputoctets) > 0); +create materialized view mv_traffic_by_tadig15min Engine=AggregatingMergeTree partition by tadig order by (ts,tadig) populate as select toStartOfFifteenMinutes(timestamp) ts,toDayOfWeek(timestamp) dow, tadig, sumState(acctinputoctets+acctoutputoctets) traffic_bytes,maxState(timestamp) last_stop, minState(radacctid) min_radacctid,maxState(radacctid) max_radacctid from radacct where acctstatustype='Stop' and acctinputoctets+acctoutputoctets > 0 group by tadig,ts,dow; + +select tadig, ts, dow, sumMerge(traffic_bytes), maxMerge(last_stop), minMerge(min_radacctid), maxMerge(max_radacctid) from mv_traffic_by_tadig15min group by tadig, ts, dow; + +drop table if exists radacct; +drop table if exists mv_traffic_by_tadig15min; + diff --git a/parser/testdata/00752_low_cardinality_permute/ast.json b/parser/testdata/00752_low_cardinality_permute/ast.json new file mode 100644 index 000000000..a87fc9ef1 --- /dev/null +++ b/parser/testdata/00752_low_cardinality_permute/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery lc_perm (children 1)" + }, + { + "explain": " Identifier lc_perm" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001113123, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/00752_low_cardinality_permute/metadata.json b/parser/testdata/00752_low_cardinality_permute/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00752_low_cardinality_permute/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00752_low_cardinality_permute/query.sql b/parser/testdata/00752_low_cardinality_permute/query.sql new file mode 100644 index 000000000..5c0716a92 --- /dev/null +++ b/parser/testdata/00752_low_cardinality_permute/query.sql @@ -0,0 +1,7 @@ +drop table if exists lc_perm; +create table lc_perm (val UInt32, str LowCardinality(String)) engine = MergeTree order by val; +insert into lc_perm values (1, 'w'), (10, 'x'), (3, 'y'), (8, 'z'), (4, 'w'), (6, 'y'), (11, 'x'), (0, 'z'), (12, 'a'), (13, 'b'), (14, 'c'), (15, 'd'), (16, 'e'), (17, 'f'), (18, 'g'), (19, 'h'); +select * from lc_perm; +select str from lc_perm where val < 12 order by str; +drop table if exists lc_perm; + diff --git a/parser/testdata/00753_alter_attach/ast.json b/parser/testdata/00753_alter_attach/ast.json new file mode 100644 index 000000000..607ff285d --- /dev/null +++ b/parser/testdata/00753_alter_attach/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery alter_attach (children 1)" + }, + { + "explain": " Identifier alter_attach" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001366057, + "rows_read": 2, + "bytes_read": 76 + } +} diff --git a/parser/testdata/00753_alter_attach/metadata.json b/parser/testdata/00753_alter_attach/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00753_alter_attach/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00753_alter_attach/query.sql b/parser/testdata/00753_alter_attach/query.sql new file mode 100644 index 000000000..b22a95a83 --- /dev/null +++ b/parser/testdata/00753_alter_attach/query.sql @@ -0,0 +1,135 @@ +DROP TABLE IF EXISTS alter_attach; +CREATE TABLE alter_attach (x UInt64, p UInt8) ENGINE = MergeTree ORDER BY tuple() PARTITION BY p; +INSERT INTO alter_attach VALUES (1, 1), (2, 1), (3, 1); + +ALTER TABLE alter_attach DETACH PARTITION 1; + +ALTER TABLE alter_attach ADD COLUMN s String; +INSERT INTO alter_attach VALUES (4, 2, 'Hello'), (5, 2, 'World'); + +ALTER TABLE alter_attach ATTACH PARTITION 1; +SELECT * FROM alter_attach ORDER BY x; + +ALTER TABLE alter_attach DETACH PARTITION 2; +ALTER TABLE alter_attach DROP COLUMN s; +INSERT INTO alter_attach VALUES (6, 3), (7, 3); + +ALTER TABLE alter_attach ATTACH PARTITION 2; +SELECT * FROM alter_attach ORDER BY x; + +ALTER TABLE alter_attach DETACH PARTITION ALL; +SELECT * FROM alter_attach ORDER BY x; + +ALTER TABLE alter_attach ATTACH PARTITION 2; +SELECT * FROM alter_attach ORDER BY x; + +DROP TABLE IF EXISTS detach_all_no_partition; +CREATE TABLE detach_all_no_partition (x UInt64, p UInt8) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO detach_all_no_partition VALUES (1, 1), (2, 1), (3, 1); +SELECT * FROM detach_all_no_partition ORDER BY x; + +ALTER TABLE detach_all_no_partition DETACH PARTITION ALL; +SELECT * FROM detach_all_no_partition ORDER BY x; + +ALTER TABLE detach_all_no_partition ATTACH PARTITION tuple(); +SELECT * FROM detach_all_no_partition ORDER BY x; + +DROP TABLE alter_attach; +DROP TABLE detach_all_no_partition; + +DROP TABLE IF EXISTS replicated_table_detach_all1; +DROP TABLE IF EXISTS replicated_table_detach_all2; + +CREATE TABLE replicated_table_detach_all1 ( + id UInt64, + Data String +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_00753_{database}/replicated_table_detach_all', '1') ORDER BY id PARTITION BY id; + +CREATE TABLE replicated_table_detach_all2 ( + id UInt64, + Data String +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_00753_{database}/replicated_table_detach_all', '2') ORDER BY id PARTITION BY id; + + +INSERT INTO replicated_table_detach_all1 VALUES (1, '1'), (2, '2'); +select * from replicated_table_detach_all1 order by id; + +ALTER TABLE replicated_table_detach_all1 DETACH PARTITION ALL; +select * from replicated_table_detach_all1 order by id; +SYSTEM SYNC REPLICA replicated_table_detach_all2; +select * from replicated_table_detach_all2 order by id; + +ALTER TABLE replicated_table_detach_all1 ATTACH PARTITION tuple(1); +select * from replicated_table_detach_all1 order by id; +SYSTEM SYNC REPLICA replicated_table_detach_all2; +select * from replicated_table_detach_all2 order by id; + +ALTER TABLE replicated_table_detach_all1 FETCH PARTITION ALL FROM '/clickhouse/tables/test_00753_{database}/replicated_table_detach_all1'; -- { serverError SUPPORT_IS_DISABLED } + +DROP TABLE replicated_table_detach_all1; +DROP TABLE replicated_table_detach_all2; + +DROP TABLE IF EXISTS partition_all; +DROP TABLE IF EXISTS partition_all2; + +CREATE TABLE partition_all (x UInt64, p UInt8, q UInt8) ENGINE = MergeTree ORDER BY tuple() PARTITION BY p; +INSERT INTO partition_all VALUES (4, 1, 2), (5, 1, 3), (3, 1, 4); + +CREATE TABLE partition_all2 (x UInt64, p UInt8, q UInt8) ENGINE = MergeTree ORDER BY tuple() PARTITION BY p; +INSERT INTO partition_all2 VALUES (4, 1, 2), (5, 1, 3), (3, 1, 4); + +-- test PARTITION ALL +ALTER TABLE partition_all2 REPLACE PARTITION ALL FROM partition_all; -- { serverError SUPPORT_IS_DISABLED } +ALTER TABLE partition_all MOVE PARTITION ALL TO TABLE partition_all2; -- { serverError SUPPORT_IS_DISABLED } +ALTER TABLE partition_all2 CLEAR INDEX p IN PARTITION ALL; -- { serverError SUPPORT_IS_DISABLED } +ALTER TABLE partition_all2 CLEAR COLUMN q IN PARTITION ALL; -- { serverError SUPPORT_IS_DISABLED } +ALTER TABLE partition_all2 UPDATE q = q + 1 IN PARTITION ALL where p = 1; -- { serverError SUPPORT_IS_DISABLED } +ALTER TABLE partition_all2 FREEZE PARTITION ALL; -- { serverError SUPPORT_IS_DISABLED } +CHECK TABLE partition_all2 PARTITION ALL; -- { serverError SUPPORT_IS_DISABLED } +OPTIMIZE TABLE partition_all2 PARTITION ALL; -- { serverError SUPPORT_IS_DISABLED } + +DROP TABLE partition_all; +DROP TABLE partition_all2; + +-- test ATTACH ALL +CREATE TABLE partition_attach_all (x UInt64, p UInt8) ENGINE = MergeTree ORDER BY x PARTITION BY p; +INSERT INTO partition_attach_all VALUES (1, 1), (2, 2), (3, 3); + +ALTER TABLE partition_attach_all DETACH PARTITION ALL; +SELECT * FROM partition_attach_all ORDER BY x; +ALTER TABLE partition_attach_all ATTACH PARTITION ALL; +SELECT * FROM partition_attach_all ORDER BY x; + +ALTER TABLE partition_attach_all DETACH PARTITION 1; +SELECT * FROM partition_attach_all ORDER BY x; +ALTER TABLE partition_attach_all ATTACH PARTITION ALL; +SELECT * FROM partition_attach_all ORDER BY x; + +ALTER TABLE partition_attach_all DROP PARTITION ALL; +SELECT * FROM partition_attach_all ORDER BY x; +ALTER TABLE partition_attach_all ATTACH PARTITION ALL; +SELECT * FROM partition_attach_all ORDER BY x; + +CREATE TABLE replicated_partition_attach_all (x UInt64, p UInt8) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_00753_{database}/replicated_partition_attach_all', '1') + ORDER BY x + PARTITION BY p; +INSERT INTO replicated_partition_attach_all VALUES (1, 1), (2, 2), (3, 3); + +ALTER TABLE replicated_partition_attach_all DETACH PARTITION ALL; +SELECT * FROM replicated_partition_attach_all ORDER BY x; +ALTER TABLE replicated_partition_attach_all ATTACH PARTITION ALL; +SELECT * FROM replicated_partition_attach_all ORDER BY x; + +ALTER TABLE replicated_partition_attach_all DETACH PARTITION 1; +SELECT * FROM replicated_partition_attach_all ORDER BY x; +ALTER TABLE replicated_partition_attach_all ATTACH PARTITION ALL; +SELECT * FROM replicated_partition_attach_all ORDER BY x; + +ALTER TABLE replicated_partition_attach_all DROP PARTITION ALL; +SELECT * FROM replicated_partition_attach_all ORDER BY x; +ALTER TABLE replicated_partition_attach_all ATTACH PARTITION ALL; +SELECT * FROM replicated_partition_attach_all ORDER BY x; + +DROP TABLE partition_attach_all; +DROP TABLE replicated_partition_attach_all; diff --git a/parser/testdata/00753_alter_destination_for_storage_buffer/ast.json b/parser/testdata/00753_alter_destination_for_storage_buffer/ast.json new file mode 100644 index 000000000..64c686999 --- /dev/null +++ b/parser/testdata/00753_alter_destination_for_storage_buffer/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery dst_00753 (children 1)" + }, + { + "explain": " Identifier dst_00753" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001336, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/00753_alter_destination_for_storage_buffer/metadata.json b/parser/testdata/00753_alter_destination_for_storage_buffer/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00753_alter_destination_for_storage_buffer/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00753_alter_destination_for_storage_buffer/query.sql b/parser/testdata/00753_alter_destination_for_storage_buffer/query.sql new file mode 100644 index 000000000..99da3154a --- /dev/null +++ b/parser/testdata/00753_alter_destination_for_storage_buffer/query.sql @@ -0,0 +1,32 @@ +DROP TABLE IF EXISTS dst_00753; +DROP TABLE IF EXISTS buffer_00753; +SET send_logs_level = 'error'; + +CREATE TABLE dst_00753 (x UInt64, y UInt64) ENGINE = MergeTree ORDER BY tuple(); +CREATE TABLE buffer_00753 (x UInt64, y UInt64) ENGINE = Buffer(currentDatabase(), dst_00753, 1, 99999, 99999, 1, 1, 99999, 99999); + +INSERT INTO buffer_00753 VALUES (1, 100); +INSERT INTO buffer_00753 VALUES (2, 200); +INSERT INTO buffer_00753 VALUES (3, 300); +SELECT 'init'; +SELECT * FROM dst_00753 ORDER BY x; +SELECT '-'; +SELECT * FROM buffer_00753 ORDER BY x; + +ALTER TABLE dst_00753 DROP COLUMN x, MODIFY COLUMN y String, ADD COLUMN z String DEFAULT 'DEFZ'; + +INSERT INTO buffer_00753 VALUES (4, 400); +SELECT 'alt'; +SELECT * FROM dst_00753 ORDER BY y; +SELECT '-'; +SELECT * FROM buffer_00753 ORDER BY y; + +OPTIMIZE TABLE buffer_00753; +SELECT 'opt'; +SELECT * FROM dst_00753 ORDER BY y; +SELECT '-'; +SELECT * FROM buffer_00753 ORDER BY y; + +SET send_logs_level = 'warning'; +DROP TABLE IF EXISTS dst_00753; +DROP TABLE IF EXISTS buffer_00753; diff --git a/parser/testdata/00753_comment_columns_zookeeper/ast.json b/parser/testdata/00753_comment_columns_zookeeper/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00753_comment_columns_zookeeper/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00753_comment_columns_zookeeper/metadata.json b/parser/testdata/00753_comment_columns_zookeeper/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00753_comment_columns_zookeeper/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00753_comment_columns_zookeeper/query.sql b/parser/testdata/00753_comment_columns_zookeeper/query.sql new file mode 100644 index 000000000..6b785d2f3 --- /dev/null +++ b/parser/testdata/00753_comment_columns_zookeeper/query.sql @@ -0,0 +1,22 @@ +-- Tags: zookeeper + +DROP TABLE IF EXISTS check_comments; + +CREATE TABLE check_comments + ( + column_name1 UInt8 DEFAULT 1 COMMENT 'comment', + column_name2 UInt8 COMMENT 'non default comment' + ) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_00753/comments', 'r1') + ORDER BY column_name1; + +SHOW CREATE check_comments; +DESC check_comments; + +ALTER TABLE check_comments COMMENT COLUMN column_name1 'another comment'; + +SHOW CREATE check_comments; +DESC check_comments; + +SELECT * FROM system.columns WHERE table = 'check.comments' and database = currentDatabase(); + +DROP TABLE check_comments; diff --git a/parser/testdata/00753_distributed_system_columns_and_system_tables/ast.json b/parser/testdata/00753_distributed_system_columns_and_system_tables/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00753_distributed_system_columns_and_system_tables/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00753_distributed_system_columns_and_system_tables/metadata.json b/parser/testdata/00753_distributed_system_columns_and_system_tables/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00753_distributed_system_columns_and_system_tables/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00753_distributed_system_columns_and_system_tables/query.sql b/parser/testdata/00753_distributed_system_columns_and_system_tables/query.sql new file mode 100644 index 000000000..4cc330c70 --- /dev/null +++ b/parser/testdata/00753_distributed_system_columns_and_system_tables/query.sql @@ -0,0 +1,13 @@ +-- Tags: distributed + +SELECT 'Check total_bytes/total_rows for Distributed'; +CREATE TABLE check_system_tables_null (key Int) Engine=Null(); +CREATE TABLE check_system_tables AS check_system_tables_null Engine=Distributed(test_shard_localhost, currentDatabase(), check_system_tables_null); +SYSTEM STOP DISTRIBUTED SENDS check_system_tables; +SELECT total_bytes, total_rows FROM system.tables WHERE database = currentDatabase() AND name = 'check_system_tables'; +INSERT INTO check_system_tables SELECT * FROM numbers(1) SETTINGS prefer_localhost_replica=0; +SELECT total_bytes>0, total_rows FROM system.tables WHERE database = currentDatabase() AND name = 'check_system_tables'; +SYSTEM FLUSH DISTRIBUTED check_system_tables; +SELECT total_bytes, total_rows FROM system.tables WHERE database = currentDatabase() AND name = 'check_system_tables'; +DROP TABLE check_system_tables_null; +DROP TABLE check_system_tables; diff --git a/parser/testdata/00753_quantile_format/ast.json b/parser/testdata/00753_quantile_format/ast.json new file mode 100644 index 000000000..9b1a61eec --- /dev/null +++ b/parser/testdata/00753_quantile_format/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery datetime (children 1)" + }, + { + "explain": " Identifier datetime" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001053642, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/00753_quantile_format/metadata.json b/parser/testdata/00753_quantile_format/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00753_quantile_format/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00753_quantile_format/query.sql b/parser/testdata/00753_quantile_format/query.sql new file mode 100644 index 000000000..4d599b066 --- /dev/null +++ b/parser/testdata/00753_quantile_format/query.sql @@ -0,0 +1,42 @@ +DROP TABLE IF EXISTS datetime; + +CREATE TABLE datetime (d DateTime('UTC')) ENGINE = Memory; +INSERT INTO datetime(d) VALUES(toDateTime('2016-06-15 23:00:00', 'UTC')); + +SELECT quantile(0.2)(d) FROM datetime; +SELECT quantiles(0.2)(d) FROM datetime; + +SELECT quantileDeterministic(0.2)(d, 1) FROM datetime; +SELECT quantilesDeterministic(0.2)(d, 1) FROM datetime; + +SELECT quantileExact(0.2)(d) FROM datetime; +SELECT quantilesExact(0.2)(d) FROM datetime; + +SELECT quantileExactWeighted(0.2)(d, 1) FROM datetime; +SELECT quantilesExactWeighted(0.2)(d, 1) FROM datetime; + +SELECT quantileInterpolatedWeighted(0.2)(d, 1) FROM datetime; +SELECT quantilesInterpolatedWeighted(0.2)(d, 1) FROM datetime; + +SELECT quantileExactWeightedInterpolated(0.2)(d, 1) FROM datetime; +SELECT quantilesExactWeightedInterpolated(0.2)(d, 1) FROM datetime; + +SELECT quantileTiming(0.2)(d) FROM datetime; +SELECT quantilesTiming(0.2)(d) FROM datetime; + +SELECT quantileTimingWeighted(0.2)(d, 1) FROM datetime; +SELECT quantilesTimingWeighted(0.2)(d, 1) FROM datetime; + +SELECT quantileTDigest(0.2)(d) FROM datetime; +SELECT quantilesTDigest(0.2)(d) FROM datetime; + +SELECT quantileTDigestWeighted(0.2)(d, 1) FROM datetime; +SELECT quantilesTDigestWeighted(0.2)(d, 1) FROM datetime; + +SELECT quantileBFloat16(0.2)(d) FROM datetime; +SELECT quantilesBFloat16(0.2)(d) FROM datetime; + +SELECT quantileBFloat16Weighted(0.2)(d, 1) FROM datetime; +SELECT quantilesBFloat16Weighted(0.2)(d, 1) FROM datetime; + +DROP TABLE datetime; diff --git a/parser/testdata/00753_system_columns_and_system_tables_long/ast.json b/parser/testdata/00753_system_columns_and_system_tables_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00753_system_columns_and_system_tables_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00753_system_columns_and_system_tables_long/metadata.json b/parser/testdata/00753_system_columns_and_system_tables_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00753_system_columns_and_system_tables_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00753_system_columns_and_system_tables_long/query.sql b/parser/testdata/00753_system_columns_and_system_tables_long/query.sql new file mode 100644 index 000000000..c416a4541 --- /dev/null +++ b/parser/testdata/00753_system_columns_and_system_tables_long/query.sql @@ -0,0 +1,162 @@ +-- Tags: long, no-object-storage, no-random-merge-tree-settings +SET output_format_pretty_row_numbers = 0; + +DROP TABLE IF EXISTS check_system_tables; + +-- Check MergeTree declaration in new format +CREATE TABLE check_system_tables + ( + name1 UInt8, + name2 UInt8, + name3 UInt8 + ) ENGINE = MergeTree() + ORDER BY name1 + PARTITION BY name2 + SAMPLE BY name1 + SETTINGS min_bytes_for_wide_part = 0, compress_marks = false, compress_primary_key = false, ratio_of_defaults_for_sparse_serialization = 1, serialization_info_version = 'basic', auto_statistics_types = ''; + +SELECT name, partition_key, sorting_key, primary_key, sampling_key, storage_policy, total_rows +FROM system.tables WHERE name = 'check_system_tables' AND database = currentDatabase() +FORMAT PrettyCompactNoEscapes; + +SELECT name, is_in_partition_key, is_in_sorting_key, is_in_primary_key, is_in_sampling_key +FROM system.columns WHERE table = 'check_system_tables' AND database = currentDatabase() +FORMAT PrettyCompactNoEscapes; + +INSERT INTO check_system_tables VALUES (1, 1, 1); +SELECT total_bytes_uncompressed, total_bytes, total_rows FROM system.tables WHERE name = 'check_system_tables' AND database = currentDatabase(); + +DROP TABLE IF EXISTS check_system_tables; + +-- Check VersionedCollapsingMergeTree +CREATE TABLE check_system_tables + ( + date Date, + value String, + version UInt64, + sign Int8 + ) ENGINE = VersionedCollapsingMergeTree(sign, version) + PARTITION BY date + ORDER BY date + SETTINGS compress_marks=false, compress_primary_key=false, auto_statistics_types = ''; + +SELECT name, partition_key, sorting_key, primary_key, sampling_key +FROM system.tables WHERE name = 'check_system_tables' AND database = currentDatabase() +FORMAT PrettyCompactNoEscapes; + +SELECT name, is_in_partition_key, is_in_sorting_key, is_in_primary_key, is_in_sampling_key +FROM system.columns WHERE table = 'check_system_tables' AND database = currentDatabase() +FORMAT PrettyCompactNoEscapes; + +DROP TABLE IF EXISTS check_system_tables; + +-- Check MergeTree declaration in old format +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE check_system_tables + ( + Event Date, + UserId UInt32, + Counter UInt32 + ) ENGINE = MergeTree(Event, intHash32(UserId), (Counter, Event, intHash32(UserId)), 8192); + +SELECT name, partition_key, sorting_key, primary_key, sampling_key +FROM system.tables WHERE name = 'check_system_tables' AND database = currentDatabase() +FORMAT PrettyCompactNoEscapes; + +SELECT name, is_in_partition_key, is_in_sorting_key, is_in_primary_key, is_in_sampling_key +FROM system.columns WHERE table = 'check_system_tables' AND database = currentDatabase() +FORMAT PrettyCompactNoEscapes; + +DROP TABLE IF EXISTS check_system_tables; + +SELECT 'Check total_bytes/total_rows for TinyLog'; +CREATE TABLE check_system_tables (key UInt8) ENGINE = TinyLog(); +SELECT total_bytes, total_rows FROM system.tables WHERE name = 'check_system_tables' AND database = currentDatabase(); +INSERT INTO check_system_tables VALUES (1); +SELECT total_bytes, total_rows FROM system.tables WHERE name = 'check_system_tables' AND database = currentDatabase(); +DROP TABLE check_system_tables; + +SELECT 'Check total_bytes/total_rows for Log'; +CREATE TABLE check_system_tables (key UInt8) ENGINE = Log(); +SELECT total_bytes, total_rows FROM system.tables WHERE name = 'check_system_tables' AND database = currentDatabase(); +INSERT INTO check_system_tables VALUES (1); +SELECT total_bytes, total_rows FROM system.tables WHERE name = 'check_system_tables' AND database = currentDatabase(); +DROP TABLE check_system_tables; + +SELECT 'Check total_bytes/total_rows for StripeLog'; +CREATE TABLE check_system_tables (key UInt8) ENGINE = StripeLog(); +SELECT total_bytes, total_rows FROM system.tables WHERE name = 'check_system_tables' AND database = currentDatabase(); +INSERT INTO check_system_tables VALUES (1); +SELECT total_bytes, total_rows FROM system.tables WHERE name = 'check_system_tables' AND database = currentDatabase(); +DROP TABLE check_system_tables; + +SELECT 'Check total_bytes/total_rows for Memory'; +CREATE TABLE check_system_tables (key UInt16) ENGINE = Memory(); +SELECT total_bytes, total_rows FROM system.tables WHERE name = 'check_system_tables' AND database = currentDatabase(); +-- it will take 130 bytes, 2 + padding left+right (64x2) +INSERT INTO check_system_tables VALUES (1); +SELECT total_bytes, total_rows FROM system.tables WHERE name = 'check_system_tables' AND database = currentDatabase(); +DROP TABLE check_system_tables; + +SELECT 'Check total_bytes/total_rows for Buffer'; +DROP TABLE IF EXISTS check_system_tables; +DROP TABLE IF EXISTS check_system_tables_null; +CREATE TABLE check_system_tables_null (key UInt16) ENGINE = Null(); +CREATE TABLE check_system_tables (key UInt16) ENGINE = Buffer( + currentDatabase(), + check_system_tables_null, + 2, + 0, 100, /* min_time /max_time */ + 100, 100, /* min_rows /max_rows */ + 0, 1e6 /* min_bytes/max_bytes */ +); +SELECT total_bytes, total_rows FROM system.tables WHERE name = 'check_system_tables' AND database = currentDatabase(); +INSERT INTO check_system_tables SELECT * FROM numbers_mt(50); +SELECT total_bytes, total_rows FROM system.tables WHERE name = 'check_system_tables' AND database = currentDatabase(); + +SELECT 'Check lifetime_bytes/lifetime_rows for Buffer'; +SELECT lifetime_bytes, lifetime_rows FROM system.tables WHERE name = 'check_system_tables' AND database = currentDatabase(); +OPTIMIZE TABLE check_system_tables; -- flush +SELECT lifetime_bytes, lifetime_rows FROM system.tables WHERE name = 'check_system_tables' AND database = currentDatabase(); +INSERT INTO check_system_tables SELECT * FROM numbers_mt(50); +SELECT lifetime_bytes, lifetime_rows FROM system.tables WHERE name = 'check_system_tables' AND database = currentDatabase(); +OPTIMIZE TABLE check_system_tables; -- flush +SELECT lifetime_bytes, lifetime_rows FROM system.tables WHERE name = 'check_system_tables' AND database = currentDatabase(); +INSERT INTO check_system_tables SELECT * FROM numbers_mt(101); -- direct block write (due to min_rows exceeded) +SELECT lifetime_bytes, lifetime_rows FROM system.tables WHERE name = 'check_system_tables' AND database = currentDatabase(); +DROP TABLE check_system_tables; +DROP TABLE check_system_tables_null; + +SELECT 'Check total_bytes/total_rows for Set'; +CREATE TABLE check_system_tables Engine=Set() AS SELECT * FROM numbers(50); +SELECT total_bytes, total_rows FROM system.tables WHERE name = 'check_system_tables' AND database = currentDatabase(); +INSERT INTO check_system_tables SELECT number+50 FROM numbers(50); +SELECT total_bytes, total_rows FROM system.tables WHERE name = 'check_system_tables' AND database = currentDatabase(); +DROP TABLE check_system_tables; + +SELECT 'Check total_bytes/total_rows for Join'; +CREATE TABLE check_system_tables Engine=Join(ANY, LEFT, number) AS SELECT * FROM numbers(50); +SELECT total_bytes BETWEEN 5000 AND 15000, total_rows FROM system.tables WHERE name = 'check_system_tables' AND database = currentDatabase(); +INSERT INTO check_system_tables SELECT number+50 FROM numbers(50); +SELECT total_bytes BETWEEN 5000 AND 15000, total_rows FROM system.tables WHERE name = 'check_system_tables' AND database = currentDatabase(); +DROP TABLE check_system_tables; + +-- Build MergeTree table for Materialized view +CREATE TABLE check_system_tables + ( + name1 UInt8, + name2 UInt8, + name3 UInt8 + ) ENGINE = MergeTree() + ORDER BY name1 + PARTITION BY name2 + SAMPLE BY name1 + SETTINGS min_bytes_for_wide_part = 0, compress_marks = false, compress_primary_key = false, ratio_of_defaults_for_sparse_serialization = 1; + +SELECT 'Check total_uncompressed_bytes/total_bytes/total_rows for Materialized views'; +CREATE MATERIALIZED VIEW check_system_tables_mv ENGINE = MergeTree() ORDER BY name2 AS SELECT name1, name2, name3 FROM check_system_tables; +SELECT total_bytes_uncompressed, total_bytes, total_rows FROM system.tables WHERE name = 'check_system_tables_mv' AND database = currentDatabase(); +INSERT INTO check_system_tables VALUES (1, 1, 1); +SELECT total_bytes_uncompressed > 0, total_bytes > 0, total_rows FROM system.tables WHERE name = 'check_system_tables_mv' AND database = currentDatabase(); +DROP TABLE check_system_tables_mv; +DROP TABLE check_system_tables; diff --git a/parser/testdata/00753_with_with_single_alias/ast.json b/parser/testdata/00753_with_with_single_alias/ast.json new file mode 100644 index 000000000..3007e7b72 --- /dev/null +++ b/parser/testdata/00753_with_with_single_alias/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier dummy (alias myName)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier myName" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.one" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001089799, + "rows_read": 11, + "bytes_read": 434 + } +} diff --git a/parser/testdata/00753_with_with_single_alias/metadata.json b/parser/testdata/00753_with_with_single_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00753_with_with_single_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00753_with_with_single_alias/query.sql b/parser/testdata/00753_with_with_single_alias/query.sql new file mode 100644 index 000000000..ff3791409 --- /dev/null +++ b/parser/testdata/00753_with_with_single_alias/query.sql @@ -0,0 +1,2 @@ +WITH dummy AS myName SELECT myName FROM system.one; +WITH dummy AS myName SELECT myName + 1 FROM system.one; diff --git a/parser/testdata/00754_alter_modify_column_partitions/ast.json b/parser/testdata/00754_alter_modify_column_partitions/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00754_alter_modify_column_partitions/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00754_alter_modify_column_partitions/metadata.json b/parser/testdata/00754_alter_modify_column_partitions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00754_alter_modify_column_partitions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00754_alter_modify_column_partitions/query.sql b/parser/testdata/00754_alter_modify_column_partitions/query.sql new file mode 100644 index 000000000..4aa7ab8ad --- /dev/null +++ b/parser/testdata/00754_alter_modify_column_partitions/query.sql @@ -0,0 +1,24 @@ +-- check ALTER MODIFY COLUMN with partitions + +SET send_logs_level = 'fatal'; + +DROP TABLE IF EXISTS alter_column; + +CREATE TABLE alter_column(x UInt32, y Int32) ENGINE MergeTree PARTITION BY x ORDER BY x; +INSERT INTO alter_column (x, y) SELECT number AS x, -number AS y FROM system.numbers LIMIT 50; + +SELECT '*** Check SHOW CREATE TABLE ***'; +SHOW CREATE TABLE alter_column; + +SELECT '*** Check parts ***'; +SELECT * FROM alter_column ORDER BY _part; + +ALTER TABLE alter_column MODIFY COLUMN y Int64; + +SELECT '*** Check SHOW CREATE TABLE after ALTER MODIFY ***'; +SHOW CREATE TABLE alter_column; + +SELECT '*** Check parts after ALTER MODIFY ***'; +SELECT * FROM alter_column ORDER BY _part; + +DROP TABLE alter_column; diff --git a/parser/testdata/00754_alter_modify_order_by/ast.json b/parser/testdata/00754_alter_modify_order_by/ast.json new file mode 100644 index 000000000..b6a26573b --- /dev/null +++ b/parser/testdata/00754_alter_modify_order_by/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001290675, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00754_alter_modify_order_by/metadata.json b/parser/testdata/00754_alter_modify_order_by/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00754_alter_modify_order_by/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00754_alter_modify_order_by/query.sql b/parser/testdata/00754_alter_modify_order_by/query.sql new file mode 100644 index 000000000..ece3cfdc0 --- /dev/null +++ b/parser/testdata/00754_alter_modify_order_by/query.sql @@ -0,0 +1,46 @@ +SET send_logs_level = 'fatal'; +SET optimize_on_insert = 0; + +DROP TABLE IF EXISTS no_order; +CREATE TABLE no_order(a UInt32, b UInt32) ENGINE = MergeTree ORDER BY tuple(); +ALTER TABLE no_order MODIFY ORDER BY (a); -- { serverError BAD_ARGUMENTS} + +DROP TABLE no_order; + +DROP TABLE IF EXISTS old_style; +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE old_style(d Date, x UInt32) ENGINE MergeTree(d, x, 8192); +ALTER TABLE old_style ADD COLUMN y UInt32, MODIFY ORDER BY (x, y); -- { serverError BAD_ARGUMENTS} +DROP TABLE old_style; + +DROP TABLE IF EXISTS summing; +CREATE TABLE summing(x UInt32, y UInt32, val UInt32) ENGINE SummingMergeTree ORDER BY (x, y); + +/* Can't add an expression with existing column to ORDER BY. */ +ALTER TABLE summing MODIFY ORDER BY (x, y, -val); -- { serverError BAD_ARGUMENTS} + +/* Can't add an expression with existing column to ORDER BY. */ +ALTER TABLE summing ADD COLUMN z UInt32 DEFAULT x + 1, MODIFY ORDER BY (x, y, -z); -- { serverError BAD_ARGUMENTS} + +/* Can't add nonexistent column to ORDER BY. */ +ALTER TABLE summing MODIFY ORDER BY (x, y, nonexistent); -- { serverError UNKNOWN_IDENTIFIER} + +/* Can't modyfy ORDER BY so that it is no longer a prefix of the PRIMARY KEY. */ +ALTER TABLE summing MODIFY ORDER BY x; -- { serverError BAD_ARGUMENTS} + +ALTER TABLE summing ADD COLUMN z UInt32 AFTER y, MODIFY ORDER BY (x, y, -z); + +INSERT INTO summing(x, y, z, val) values (1, 2, 0, 10), (1, 2, 1, 30), (1, 2, 2, 40); + +SELECT '*** Check that the parts are sorted according to the new key. ***'; +SELECT * FROM summing; + +INSERT INTO summing(x, y, z, val) values (1, 2, 0, 20), (1, 2, 2, 50); + +SELECT '*** Check that the rows are collapsed according to the new key. ***'; +SELECT * FROM summing FINAL ORDER BY x, y, z; + +SELECT '*** Check SHOW CREATE TABLE ***'; +SHOW CREATE TABLE summing; + +DROP TABLE summing; diff --git a/parser/testdata/00754_alter_modify_order_by_replicated_zookeeper_long/ast.json b/parser/testdata/00754_alter_modify_order_by_replicated_zookeeper_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00754_alter_modify_order_by_replicated_zookeeper_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00754_alter_modify_order_by_replicated_zookeeper_long/metadata.json b/parser/testdata/00754_alter_modify_order_by_replicated_zookeeper_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00754_alter_modify_order_by_replicated_zookeeper_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00754_alter_modify_order_by_replicated_zookeeper_long/query.sql b/parser/testdata/00754_alter_modify_order_by_replicated_zookeeper_long/query.sql new file mode 100644 index 000000000..a4fd1b581 --- /dev/null +++ b/parser/testdata/00754_alter_modify_order_by_replicated_zookeeper_long/query.sql @@ -0,0 +1,59 @@ +-- Tags: long, replica, no-replicated-database, no-shared-merge-tree +-- Tag no-replicated-database: Old syntax is not allowed +-- no-shared-merge-tree -- old syntax not supported, for new syntax additional test + +SET optimize_on_insert = 0; + +SET send_logs_level = 'fatal'; + +DROP TABLE IF EXISTS old_style; +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE old_style(d Date, x UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00754/old_style', 'r1', d, x, 8192); +ALTER TABLE old_style ADD COLUMN y UInt32, MODIFY ORDER BY (x, y); -- { serverError BAD_ARGUMENTS } +DROP TABLE old_style; + +DROP TABLE IF EXISTS summing_r1; +DROP TABLE IF EXISTS summing_r2; +CREATE TABLE summing_r1(x UInt32, y UInt32, val UInt32) ENGINE ReplicatedSummingMergeTree('/clickhouse/tables/{database}/test_00754/summing', 'r1') ORDER BY (x, y); +CREATE TABLE summing_r2(x UInt32, y UInt32, val UInt32) ENGINE ReplicatedSummingMergeTree('/clickhouse/tables/{database}/test_00754/summing', 'r2') ORDER BY (x, y); + +/* Can't add an expression with existing column to ORDER BY. */ +ALTER TABLE summing_r1 MODIFY ORDER BY (x, y, -val); -- { serverError BAD_ARGUMENTS } + +/* Can't add an expression with existing column to ORDER BY. */ +ALTER TABLE summing_r1 ADD COLUMN z UInt32 DEFAULT x + 1, MODIFY ORDER BY (x, y, -z); -- { serverError BAD_ARGUMENTS } + +/* Can't add nonexistent column to ORDER BY. */ +ALTER TABLE summing_r1 MODIFY ORDER BY (x, y, nonexistent); -- { serverError UNKNOWN_IDENTIFIER } + +/* Can't modyfy ORDER BY so that it is no longer a prefix of the PRIMARY KEY. */ +ALTER TABLE summing_r1 MODIFY ORDER BY x; -- { serverError BAD_ARGUMENTS } + +ALTER TABLE summing_r1 ADD COLUMN z UInt32 AFTER y, MODIFY ORDER BY (x, y, -z); + +INSERT INTO summing_r1(x, y, z, val) values (1, 2, 0, 10), (1, 2, 1, 30), (1, 2, 2, 40); +SYSTEM SYNC REPLICA summing_r2; + +SELECT '*** Check that the parts are sorted according to the new key. ***'; +SELECT * FROM summing_r2; + +INSERT INTO summing_r1(x, y, z, val) values (1, 2, 0, 20), (1, 2, 2, 50); +SYSTEM SYNC REPLICA summing_r2; + +SELECT '*** Check that the rows are collapsed according to the new key. ***'; +SELECT * FROM summing_r2 FINAL ORDER BY x, y, z; + +SELECT '*** Check SHOW CREATE TABLE ***'; +SHOW CREATE TABLE summing_r2; + +DETACH TABLE summing_r2; +ALTER TABLE summing_r1 ADD COLUMN t UInt32 AFTER z, MODIFY ORDER BY (x, y, t * t) SETTINGS replication_alter_partitions_sync = 2; -- { serverError UNFINISHED } +ATTACH TABLE summing_r2; + +SYSTEM SYNC REPLICA summing_r2; + +SELECT '*** Check SHOW CREATE TABLE after offline ALTER ***'; +SHOW CREATE TABLE summing_r2; + +DROP TABLE summing_r1; +DROP TABLE summing_r2; diff --git a/parser/testdata/00754_first_significant_subdomain_more/ast.json b/parser/testdata/00754_first_significant_subdomain_more/ast.json new file mode 100644 index 000000000..dafa5edbb --- /dev/null +++ b/parser/testdata/00754_first_significant_subdomain_more/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function firstSignificantSubdomain (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_['http:\/\/usa.gov.com\/cgi-bin\/yabb.pl?password=qwerty', 'https:\/\/www2.pentagon.mil.net\/index.phtml', 'ftp:\/\/stanford.edu.org\/~ivanov\/phd-thesis.SHTM']" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001224939, + "rows_read": 9, + "bytes_read": 514 + } +} diff --git a/parser/testdata/00754_first_significant_subdomain_more/metadata.json b/parser/testdata/00754_first_significant_subdomain_more/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00754_first_significant_subdomain_more/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00754_first_significant_subdomain_more/query.sql b/parser/testdata/00754_first_significant_subdomain_more/query.sql new file mode 100644 index 000000000..32a4c24fb --- /dev/null +++ b/parser/testdata/00754_first_significant_subdomain_more/query.sql @@ -0,0 +1 @@ +SELECT firstSignificantSubdomain(arrayJoin(['http://usa.gov.com/cgi-bin/yabb.pl?password=qwerty', 'https://www2.pentagon.mil.net/index.phtml', 'ftp://stanford.edu.org/~ivanov/phd-thesis.SHTM'])); diff --git a/parser/testdata/00755_avg_value_size_hint_passing/ast.json b/parser/testdata/00755_avg_value_size_hint_passing/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00755_avg_value_size_hint_passing/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00755_avg_value_size_hint_passing/metadata.json b/parser/testdata/00755_avg_value_size_hint_passing/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00755_avg_value_size_hint_passing/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00755_avg_value_size_hint_passing/query.sql b/parser/testdata/00755_avg_value_size_hint_passing/query.sql new file mode 100644 index 000000000..7aaff3757 --- /dev/null +++ b/parser/testdata/00755_avg_value_size_hint_passing/query.sql @@ -0,0 +1,13 @@ +-- Tags: no-parallel-replicas + +DROP TABLE IF EXISTS size_hint; +CREATE TABLE size_hint (s Array(String)) ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity = 1000, index_granularity_bytes = '10Mi'; + +SET max_block_size = 1000; +SET max_memory_usage = 1000000000; +INSERT INTO size_hint SELECT arrayMap(x -> 'Hello', range(1000)) FROM numbers(10000); + +SET max_memory_usage = 105000000, max_threads = 2; +SELECT count(), sum(length(s)) FROM size_hint; + +DROP TABLE size_hint; diff --git a/parser/testdata/00756_power_alias/ast.json b/parser/testdata/00756_power_alias/ast.json new file mode 100644 index 000000000..dd882d42d --- /dev/null +++ b/parser/testdata/00756_power_alias/ast.json @@ -0,0 +1,82 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Function pow (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function POW (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function power (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function POWER (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Literal UInt64_2" + } + ], + + "rows": 20, + + "statistics": + { + "elapsed": 0.001178041, + "rows_read": 20, + "bytes_read": 701 + } +} diff --git a/parser/testdata/00756_power_alias/metadata.json b/parser/testdata/00756_power_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00756_power_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00756_power_alias/query.sql b/parser/testdata/00756_power_alias/query.sql new file mode 100644 index 000000000..e8f543ffb --- /dev/null +++ b/parser/testdata/00756_power_alias/query.sql @@ -0,0 +1 @@ +SELECT pow(3, 2), POW(3, 2), power(3, 2), POWER(3, 2); diff --git a/parser/testdata/00757_enum_defaults/ast.json b/parser/testdata/00757_enum_defaults/ast.json new file mode 100644 index 000000000..d508141b5 --- /dev/null +++ b/parser/testdata/00757_enum_defaults/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery auto_assign_enum (children 1)" + }, + { + "explain": " Identifier auto_assign_enum" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00155601, + "rows_read": 2, + "bytes_read": 84 + } +} diff --git a/parser/testdata/00757_enum_defaults/metadata.json b/parser/testdata/00757_enum_defaults/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00757_enum_defaults/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00757_enum_defaults/query.sql b/parser/testdata/00757_enum_defaults/query.sql new file mode 100644 index 000000000..d69ba9ffc --- /dev/null +++ b/parser/testdata/00757_enum_defaults/query.sql @@ -0,0 +1,38 @@ +DROP TABLE IF EXISTS auto_assign_enum; +DROP TABLE IF EXISTS auto_assign_enum1; +DROP TABLE IF EXISTS auto_assign_enum2; +DROP TABLE IF EXISTS auto_assign_enum3; + +CREATE TABLE auto_assign_enum (x enum('a', 'b')) ENGINE=MergeTree() order by x; +INSERT INTO auto_assign_enum VALUES('a'), ('b'); +select * from auto_assign_enum; +select CAST(x, 'Int8') from auto_assign_enum; +select * from auto_assign_enum where x = 1; + +CREATE TABLE auto_assign_enum1 (x enum('a' = -1000, 'b')) ENGINE=MergeTree() order by x; +INSERT INTO auto_assign_enum1 VALUES('a'), ('b'); +select * from auto_assign_enum1; +select CAST(x, 'Int16') from auto_assign_enum1; +select * from auto_assign_enum1 where x = -999; + +CREATE TABLE auto_assign_enum2 (x enum('a' = -1000, 'b', 'c' = -99)) ENGINE=MergeTree() order by x; -- { serverError UNEXPECTED_AST_STRUCTURE } + +CREATE TABLE auto_assign_enum2 (x Enum8( + '00' = -128 ,'01','02','03','04','05','06','07','08','09','0A','0B','0C','0D','0E','0F', + '10','11','12','13','14','15','16','17','18','19','1A','1B','1C','1D','1E','1F', + '20','21','22','23','24','25','26','27','28','29','2A','2B','2C','2D','2E','2F', + '30','31','32','33','34','35','36','37','38','39','3A','3B','3C','3D','3E','3F', + '40','41','42','43','44','45','46','47','48','49','4A','4B','4C','4D','4E','4F', + '50','51','52','53','54','55','56','57','58','59','5A','5B','5C','5D','5E','5F', + '60','61','62','63','64','65','66','67','68','69','6A','6B','6C','6D','6E','6F', + '70','71','72','73','74','75','76','77','78','79','7A','7B','7C','7D','7E','7F' + )) ENGINE=MergeTree() order by x; + +INSERT INTO auto_assign_enum2 VALUES('7F'); +select CAST(x, 'Int8') from auto_assign_enum2; + +CREATE TABLE auto_assign_enum3 (x enum('a', 'b', NULL)) ENGINE=MergeTree() order by x; -- { serverError UNEXPECTED_AST_STRUCTURE } + +DROP TABLE auto_assign_enum; +DROP TABLE auto_assign_enum1; +DROP TABLE auto_assign_enum2; diff --git a/parser/testdata/00757_enum_defaults_const/ast.json b/parser/testdata/00757_enum_defaults_const/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00757_enum_defaults_const/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00757_enum_defaults_const/metadata.json b/parser/testdata/00757_enum_defaults_const/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00757_enum_defaults_const/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00757_enum_defaults_const/query.sql b/parser/testdata/00757_enum_defaults_const/query.sql new file mode 100644 index 000000000..048c9dee8 --- /dev/null +++ b/parser/testdata/00757_enum_defaults_const/query.sql @@ -0,0 +1,3 @@ +SET enable_analyzer=0; +select os_name, count() from (SELECT CAST('iphone' AS Enum8('iphone' = 1, 'android' = 2)) AS os_name) group by os_name WITH TOTALS; +select toNullable(os_name) AS os_name, count() from (SELECT CAST('iphone' AS Enum8('iphone' = 1, 'android' = 2)) AS os_name) group by os_name WITH TOTALS; diff --git a/parser/testdata/00757_enum_defaults_const_analyzer/ast.json b/parser/testdata/00757_enum_defaults_const_analyzer/ast.json new file mode 100644 index 000000000..163da531b --- /dev/null +++ b/parser/testdata/00757_enum_defaults_const_analyzer/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001302531, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00757_enum_defaults_const_analyzer/metadata.json b/parser/testdata/00757_enum_defaults_const_analyzer/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00757_enum_defaults_const_analyzer/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00757_enum_defaults_const_analyzer/query.sql b/parser/testdata/00757_enum_defaults_const_analyzer/query.sql new file mode 100644 index 000000000..c202ed630 --- /dev/null +++ b/parser/testdata/00757_enum_defaults_const_analyzer/query.sql @@ -0,0 +1,3 @@ +SET enable_analyzer=1; +select os_name, count() from (SELECT CAST('iphone' AS Enum8('iphone' = 1, 'android' = 2)) AS os_name) group by os_name WITH TOTALS; +select toNullable(os_name) AS os_name, count() from (SELECT CAST('iphone' AS Enum8('iphone' = 1, 'android' = 2)) AS os_name) group by os_name WITH TOTALS; diff --git a/parser/testdata/00758_array_reverse/ast.json b/parser/testdata/00758_array_reverse/ast.json new file mode 100644 index 000000000..39056f507 --- /dev/null +++ b/parser/testdata/00758_array_reverse/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function reverse (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[NULL, '\\0']" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001382457, + "rows_read": 7, + "bytes_read": 270 + } +} diff --git a/parser/testdata/00758_array_reverse/metadata.json b/parser/testdata/00758_array_reverse/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00758_array_reverse/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00758_array_reverse/query.sql b/parser/testdata/00758_array_reverse/query.sql new file mode 100644 index 000000000..c6a6c66cc --- /dev/null +++ b/parser/testdata/00758_array_reverse/query.sql @@ -0,0 +1,15 @@ +SELECT reverse([NULL, '\0']); +SELECT reverse([NULL, 123, NULL]); +SELECT reverse([toFixedString('Hello', 5), NULL]); +SELECT reverse(['Hello', 'world']); +SELECT reverse(['Hello', NULL, 'world']); +SELECT reverse([NULL, NULL, NULL]); +SELECT reverse([[], [''], [' ']]); +SELECT reverse([[], [''], [NULL]]); +SELECT reverse([(1, 'Hello', []), (nan, 'World', [NULL])]); +SELECT reverse(NULL); +SELECT reverse([]); +SELECT reverse([[[[]]]]); + +SET send_logs_level = 'fatal'; +SELECT '[RE7', ( SELECT '\0' ) AS riwwq, ( SELECT reverse([( SELECT bitTestAll(NULL) ) , ( SELECT '\0' ) AS ddfweeuy]) ) AS xuvv, '', ( SELECT * FROM file() ) AS wqgdswyc, ( SELECT * FROM file() ); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } diff --git a/parser/testdata/00759_kodieg/ast.json b/parser/testdata/00759_kodieg/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00759_kodieg/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00759_kodieg/metadata.json b/parser/testdata/00759_kodieg/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00759_kodieg/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00759_kodieg/query.sql b/parser/testdata/00759_kodieg/query.sql new file mode 100644 index 000000000..9cbe2a0cd --- /dev/null +++ b/parser/testdata/00759_kodieg/query.sql @@ -0,0 +1,4 @@ +SELECT + [1, 2, 3, 1, 3] AS a, + indexOf(arrayReverse(arraySlice(a, 1, -1)), 3) AS offset_from_right, + arraySlice(a, multiIf(offset_from_right = 0, 1, (length(a) - offset_from_right) + 1)); diff --git a/parser/testdata/00760_insert_json_with_defaults/ast.json b/parser/testdata/00760_insert_json_with_defaults/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00760_insert_json_with_defaults/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00760_insert_json_with_defaults/metadata.json b/parser/testdata/00760_insert_json_with_defaults/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00760_insert_json_with_defaults/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00760_insert_json_with_defaults/query.sql b/parser/testdata/00760_insert_json_with_defaults/query.sql new file mode 100644 index 000000000..549872583 --- /dev/null +++ b/parser/testdata/00760_insert_json_with_defaults/query.sql @@ -0,0 +1,43 @@ +-- Tags: no-fasttest + +SET input_format_defaults_for_omitted_fields=1; + +DROP TABLE IF EXISTS defaults; +CREATE TABLE defaults +( + x UInt32, + y UInt32, + a DEFAULT x + y, + b Float32 DEFAULT round(log(1 + x + y), 5), + c UInt32 DEFAULT 42, + e MATERIALIZED x + y, + f ALIAS x + y +) ENGINE = MergeTree ORDER BY x; + +INSERT INTO defaults FORMAT JSONEachRow {"x":1, "y":1}; + +INSERT INTO defaults (x, y) SELECT x, y FROM defaults LIMIT 1; + +INSERT INTO defaults FORMAT JSONEachRow {"x":2, "y":2, "c":2}; + +INSERT INTO defaults FORMAT JSONEachRow {"x":3, "y":3, "a":3, "b":3, "c":3}; + +INSERT INTO defaults FORMAT JSONEachRow {"x":4} {"y":5, "c":5} {"a":6, "b":6, "c":6}; + +SELECT * FROM defaults ORDER BY (x, y); + +ALTER TABLE defaults ADD COLUMN n Nested(a UInt64, b String); + +INSERT INTO defaults FORMAT JSONEachRow {"x":7, "y":7, "n.a":[1,2], "n.b":["a","b"]}; + +SELECT * FROM defaults WHERE x = 7 FORMAT JSONEachRow; + +ALTER TABLE defaults ADD COLUMN n.c Array(UInt8) DEFAULT arrayMap(x -> 0, n.a) AFTER n.a; + +INSERT INTO defaults FORMAT JSONEachRow {"x":8, "y":8, "n.a":[3,4], "n.b":["c","d"]}; + +INSERT INTO defaults FORMAT JSONEachRow {"x":9, "y":9}; + +SELECT * FROM defaults WHERE x > 7 ORDER BY x FORMAT JSONEachRow; + +DROP TABLE defaults; diff --git a/parser/testdata/00760_url_functions_overflow/ast.json b/parser/testdata/00760_url_functions_overflow/ast.json new file mode 100644 index 000000000..c9486cfa1 --- /dev/null +++ b/parser/testdata/00760_url_functions_overflow/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function extractURLParameter (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '?_'" + }, + { + "explain": " Literal '\\0_________________________________'" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.00145762, + "rows_read": 8, + "bytes_read": 327 + } +} diff --git a/parser/testdata/00760_url_functions_overflow/metadata.json b/parser/testdata/00760_url_functions_overflow/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00760_url_functions_overflow/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00760_url_functions_overflow/query.sql b/parser/testdata/00760_url_functions_overflow/query.sql new file mode 100644 index 000000000..f4ab4b555 --- /dev/null +++ b/parser/testdata/00760_url_functions_overflow/query.sql @@ -0,0 +1,6 @@ +SELECT extractURLParameter('?_', '\0_________________________________'); +SELECT extractURLParameter('?abc=def', 'abc\0def'); +SELECT extractURLParameter('?abc\0def=Hello', 'abc\0def'); +SELECT extractURLParameter('?_', '\0'); +SELECT extractURLParameter('ZiqSZeh?', '\0'); +SELECT 'Xx|sfF', match('', '\0'), [], ( SELECT cutURLParameter('C,Ai?X', '\0') ), '\0'; diff --git a/parser/testdata/00761_lower_utf8_bug/ast.json b/parser/testdata/00761_lower_utf8_bug/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00761_lower_utf8_bug/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00761_lower_utf8_bug/metadata.json b/parser/testdata/00761_lower_utf8_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00761_lower_utf8_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00761_lower_utf8_bug/query.sql b/parser/testdata/00761_lower_utf8_bug/query.sql new file mode 100644 index 000000000..a0ab55edc --- /dev/null +++ b/parser/testdata/00761_lower_utf8_bug/query.sql @@ -0,0 +1,4 @@ +-- Tags: no-fasttest +-- no-fasttest: upper/lowerUTF8 use ICU + +SELECT lowerUTF8('\xF0') = lowerUTF8('\xF0'); diff --git a/parser/testdata/00762_date_comparsion/ast.json b/parser/testdata/00762_date_comparsion/ast.json new file mode 100644 index 000000000..f84d83f09 --- /dev/null +++ b/parser/testdata/00762_date_comparsion/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001451692, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00762_date_comparsion/metadata.json b/parser/testdata/00762_date_comparsion/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00762_date_comparsion/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00762_date_comparsion/query.sql b/parser/testdata/00762_date_comparsion/query.sql new file mode 100644 index 000000000..16e5b2354 --- /dev/null +++ b/parser/testdata/00762_date_comparsion/query.sql @@ -0,0 +1,17 @@ +SET send_logs_level = 'fatal'; + +select today() < 2018-11-14; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select toDate('2018-01-01') < '2018-11-14'; + +select toDate('2018-01-01') < '2018-01-01'; +select toDate('2018-01-01') == '2018-01-01'; +select toDate('2018-01-01') != '2018-01-01'; +select toDate('2018-01-01') < toDate('2018-01-01'); +select toDate('2018-01-01') == toDate('2018-01-01'); +select toDate('2018-01-01') != toDate('2018-01-01'); + +select toDate('2018-01-01') < 1; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select toDate('2018-01-01') == 1; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select toDate('2018-01-01') != 1; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + + diff --git a/parser/testdata/00763_create_query_as_table_engine_bug/ast.json b/parser/testdata/00763_create_query_as_table_engine_bug/ast.json new file mode 100644 index 000000000..dd9fa785d --- /dev/null +++ b/parser/testdata/00763_create_query_as_table_engine_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001477189, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/00763_create_query_as_table_engine_bug/metadata.json b/parser/testdata/00763_create_query_as_table_engine_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00763_create_query_as_table_engine_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00763_create_query_as_table_engine_bug/query.sql b/parser/testdata/00763_create_query_as_table_engine_bug/query.sql new file mode 100644 index 000000000..5d2d2c3d5 --- /dev/null +++ b/parser/testdata/00763_create_query_as_table_engine_bug/query.sql @@ -0,0 +1,7 @@ +drop table if exists t; +drop table if exists td; +create table t (val UInt32) engine = MergeTree order by val; +create table td engine = Distributed(test_shard_localhost, currentDatabase(), 't') as t; +select engine from system.tables where database = currentDatabase() and name = 'td'; +drop table if exists t; +drop table if exists td; diff --git a/parser/testdata/00765_locate/ast.json b/parser/testdata/00765_locate/ast.json new file mode 100644 index 000000000..4193829d2 --- /dev/null +++ b/parser/testdata/00765_locate/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001176219, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00765_locate/metadata.json b/parser/testdata/00765_locate/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00765_locate/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00765_locate/query.sql b/parser/testdata/00765_locate/query.sql new file mode 100644 index 000000000..3467ebd42 --- /dev/null +++ b/parser/testdata/00765_locate/query.sql @@ -0,0 +1,15 @@ +SET send_logs_level = 'fatal'; + +SELECT '-- negative tests'; +SELECT locate(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT locate(1, 'abc'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT locate('abc', 1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT locate('abc', 'abc', 'abc'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT '-- test mysql compatibility setting'; +SELECT locate('abcabc', 'ca'); +SELECT locate('abcabc', 'ca') SETTINGS function_locate_has_mysql_compatible_argument_order = true; +SELECT locate('abcabc', 'ca') SETTINGS function_locate_has_mysql_compatible_argument_order = false; + +SELECT '-- the function name needs to be case-insensitive for historical reasons'; +SELECT LoCaTe('abcabc', 'ca'); diff --git a/parser/testdata/00765_sql_compatibility_aliases/ast.json b/parser/testdata/00765_sql_compatibility_aliases/ast.json new file mode 100644 index 000000000..fdb5446e8 --- /dev/null +++ b/parser/testdata/00765_sql_compatibility_aliases/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001156181, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00765_sql_compatibility_aliases/metadata.json b/parser/testdata/00765_sql_compatibility_aliases/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00765_sql_compatibility_aliases/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00765_sql_compatibility_aliases/query.sql b/parser/testdata/00765_sql_compatibility_aliases/query.sql new file mode 100644 index 000000000..995aaef9e --- /dev/null +++ b/parser/testdata/00765_sql_compatibility_aliases/query.sql @@ -0,0 +1,30 @@ +SET send_logs_level = 'fatal'; + +select lcase('FOO'); +select ucase('foo'); +select LOWER('Foo'); +select UPPER('Foo'); +select REPLACE('bar', 'r', 'z'); +select REGEXP_REPLACE('bar', '.', 'z'); +select SUBSTRING('foo', 1, 2); +select Substr('foo', 2); +select mid('foo', 3); +select IF(3>2, 1, 0); +select substring('foo' from 1 + 1); +select SUBSTRING('foo' FROM 2 FOR 1); +select left('foo', 2); +select LEFT('foo', 123); +select RIGHT('bar', 1); +select right('bar', 123); +select ltrim('') || rtrim('') || trim(''); +select ltrim(' foo'); +select RTRIM(' foo '); +select trim(TRAILING 'x' FROM 'xxfooxx'); +select Trim(LEADING 'ab' FROM 'abbafooabba'); +select TRIM(both 'ab' FROM 'abbafooabbafooabba'); +select trim(LEADING '*[]{}|\\' FROM '\\|[[[}}}*foo*'); +select DATE_DIFF(MONTH, toDate('2018-12-18'), toDate('2018-01-01')); +select DATE_DIFF(QQ, toDate('2018-12-18'), toDate('2018-01-01')); +select DATE_ADD(YEAR, 3, toDate('2018-01-01')); +select timestamp_sub(SQL_TSI_MONTH, 5, toDateTime('2018-12-18 01:02:03')); +select timestamp_ADD(toDate('2018-01-01'), INTERVAL 3 MONTH); diff --git a/parser/testdata/00779_all_right_join_max_block_size/ast.json b/parser/testdata/00779_all_right_join_max_block_size/ast.json new file mode 100644 index 000000000..74e61b1dd --- /dev/null +++ b/parser/testdata/00779_all_right_join_max_block_size/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001480251, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00779_all_right_join_max_block_size/metadata.json b/parser/testdata/00779_all_right_join_max_block_size/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00779_all_right_join_max_block_size/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00779_all_right_join_max_block_size/query.sql b/parser/testdata/00779_all_right_join_max_block_size/query.sql new file mode 100644 index 000000000..56d01dea3 --- /dev/null +++ b/parser/testdata/00779_all_right_join_max_block_size/query.sql @@ -0,0 +1,6 @@ +SET min_joined_block_size_bytes = 0; +SET max_block_size = 6; +SET query_plan_join_swap_table=false; +SET join_algorithm='hash'; + +SELECT blockSize() bs FROM (SELECT 1 s) js1 ALL RIGHT JOIN (SELECT arrayJoin([2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3]) s) js2 USING (s) GROUP BY bs ORDER BY bs; diff --git a/parser/testdata/00780_unaligned_array_join/ast.json b/parser/testdata/00780_unaligned_array_join/ast.json new file mode 100644 index 000000000..1d989cfd0 --- /dev/null +++ b/parser/testdata/00780_unaligned_array_join/ast.json @@ -0,0 +1,145 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 5)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Identifier arr1" + }, + { + "explain": " Identifier arr2" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Identifier y" + }, + { + "explain": " TablesInSelectQuery (children 2)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function range (alias arr1) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function range (alias arr2) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " ArrayJoin (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier arr1 (alias x)" + }, + { + "explain": " Identifier arr2 (alias y)" + }, + { + "explain": " Set" + } + ], + + "rows": 41, + + "statistics": + { + "elapsed": 0.001397651, + "rows_read": 41, + "bytes_read": 1692 + } +} diff --git a/parser/testdata/00780_unaligned_array_join/metadata.json b/parser/testdata/00780_unaligned_array_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00780_unaligned_array_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00780_unaligned_array_join/query.sql b/parser/testdata/00780_unaligned_array_join/query.sql new file mode 100644 index 000000000..b46e9f31e --- /dev/null +++ b/parser/testdata/00780_unaligned_array_join/query.sql @@ -0,0 +1,2 @@ +SELECT number, arr1, arr2, x, y FROM (SELECT number, range(number % 2) AS arr1, range(number % 3) arr2 FROM system.numbers LIMIT 10) ARRAY JOIN arr1 AS x, arr2 AS y SETTINGS enable_unaligned_array_join = 1; +SELECT number, arr1, arr2, x, y FROM (SELECT number, range(number % 2) AS arr1, range(number % 3) arr2 FROM system.numbers LIMIT 10) LEFT ARRAY JOIN arr1 AS x, arr2 AS y SETTINGS enable_unaligned_array_join = 1; diff --git a/parser/testdata/00794_materialized_view_with_column_defaults/ast.json b/parser/testdata/00794_materialized_view_with_column_defaults/ast.json new file mode 100644 index 000000000..3af1c6361 --- /dev/null +++ b/parser/testdata/00794_materialized_view_with_column_defaults/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery table_view (children 1)" + }, + { + "explain": " Identifier table_view" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00197913, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/00794_materialized_view_with_column_defaults/metadata.json b/parser/testdata/00794_materialized_view_with_column_defaults/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00794_materialized_view_with_column_defaults/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00794_materialized_view_with_column_defaults/query.sql b/parser/testdata/00794_materialized_view_with_column_defaults/query.sql new file mode 100644 index 000000000..43dcb322f --- /dev/null +++ b/parser/testdata/00794_materialized_view_with_column_defaults/query.sql @@ -0,0 +1,29 @@ +DROP TABLE IF EXISTS table_view; +DROP TABLE IF EXISTS source_table; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE source_table ( + date Date, + datetime DateTime, + zoneId UInt64, + test1 ALIAS zoneId == 1, + test2 DEFAULT zoneId * 3, + test3 MATERIALIZED zoneId * 5 +) ENGINE = MergeTree(date, (date, zoneId), 8192); + +CREATE MATERIALIZED VIEW table_view +ENGINE = MergeTree(date, (date, zoneId), 8192) +AS SELECT + date, + zoneId, + test1, + test2, + test3 +FROM source_table; + +INSERT INTO source_table (date, datetime, zoneId) VALUES ('2018-12-10', '2018-12-10 23:59:59', 1); + +SELECT * from table_view; + +DROP TABLE IF EXISTS table_view; +DROP TABLE IF EXISTS source_table; diff --git a/parser/testdata/00799_function_dry_run/ast.json b/parser/testdata/00799_function_dry_run/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00799_function_dry_run/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00799_function_dry_run/metadata.json b/parser/testdata/00799_function_dry_run/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00799_function_dry_run/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00799_function_dry_run/query.sql b/parser/testdata/00799_function_dry_run/query.sql new file mode 100644 index 000000000..bf6fb3de3 --- /dev/null +++ b/parser/testdata/00799_function_dry_run/query.sql @@ -0,0 +1,35 @@ +-- https://stackoverflow.com/questions/53416531/clickhouse-moving-average + +DROP TABLE IF EXISTS bm; + +CREATE TABLE bm (amount float, business_dttm DateTime) engine Log; + +INSERT INTO bm VALUES (0.3,'2018-11-19 13:00:00'), (0.3,'2018-11-19 13:05:00'), (0.4,'2018-11-19 13:10:00'), (0.5,'2018-11-19 13:15:00'), (0.6,'2018-11-19 13:20:00'), (0.7,'2018-11-19 13:25:00'), (0.8,'2018-11-19 13:30:00'), (0.9,'2018-11-19 13:45:00'), (0.5,'2018-11-19 13:50:00'); + +WITH + ( + SELECT arrayCumSum(groupArray(amount)) + FROM + ( + SELECT + amount + FROM bm + ORDER BY business_dttm + ) + ) AS arr, + 1 + rowNumberInAllBlocks() AS id, + 3 AS window_size +SELECT + amount, + business_dttm, + if(id < window_size, NULL, round(arr[id] - arr[id - window_size], 4)) AS moving_sum +FROM +( + SELECT + amount, + business_dttm + FROM bm + ORDER BY business_dttm +) ORDER BY business_dttm; + +DROP TABLE bm; diff --git a/parser/testdata/00800_function_java_hash/ast.json b/parser/testdata/00800_function_java_hash/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00800_function_java_hash/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00800_function_java_hash/metadata.json b/parser/testdata/00800_function_java_hash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00800_function_java_hash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00800_function_java_hash/query.sql b/parser/testdata/00800_function_java_hash/query.sql new file mode 100644 index 000000000..fc4a05575 --- /dev/null +++ b/parser/testdata/00800_function_java_hash/query.sql @@ -0,0 +1,20 @@ +-- Tags: no-fasttest + +select javaHash(toInt8(123)); +select javaHash(toInt8(-123)); +select javaHash(toInt16(123)); +select javaHash(toInt16(-123)); +select javaHash(toInt32(123)); +select javaHash(toInt32(-123)); +select javaHash(toInt64(123)); +select javaHash(toInt64(-123)); +select javaHash(toInt64(12345678901)); +select javaHash(toInt64(-12345678901)); +select javaHash('abc'); +select javaHash('874293087'); +select javaHashUTF16LE(convertCharset('a1가', 'utf-8', 'utf-16le')); +select javaHashUTF16LE(convertCharset('가나다라마바사아자차카타파하', 'utf-8', 'utf-16le')); +select javaHashUTF16LE(convertCharset('FJKLDSJFIOLD_389159837589429', 'utf-8', 'utf-16le')); +select javaHashUTF16LE(convertCharset('𐐀𐐁𐐂𐐃𐐄', 'utf-8', 'utf-16le')); +select hiveHash('abc'); +select hiveHash('874293087'); diff --git a/parser/testdata/00800_low_cardinality_array_group_by_arg/ast.json b/parser/testdata/00800_low_cardinality_array_group_by_arg/ast.json new file mode 100644 index 000000000..4d1d3b1b9 --- /dev/null +++ b/parser/testdata/00800_low_cardinality_array_group_by_arg/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery table1 (children 1)" + }, + { + "explain": " Identifier table1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001219751, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/00800_low_cardinality_array_group_by_arg/metadata.json b/parser/testdata/00800_low_cardinality_array_group_by_arg/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00800_low_cardinality_array_group_by_arg/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00800_low_cardinality_array_group_by_arg/query.sql b/parser/testdata/00800_low_cardinality_array_group_by_arg/query.sql new file mode 100644 index 000000000..3f4268cfc --- /dev/null +++ b/parser/testdata/00800_low_cardinality_array_group_by_arg/query.sql @@ -0,0 +1,34 @@ +DROP TABLE IF EXISTS table1; +DROP TABLE IF EXISTS table2; + +CREATE TABLE table1 +( +dt Date, +id Int32, +arr Array(LowCardinality(String)) +) ENGINE = MergeTree PARTITION BY toMonday(dt) +ORDER BY (dt, id) SETTINGS index_granularity = 8192; + +CREATE TABLE table2 +( +dt Date, +id Int32, +arr Array(LowCardinality(String)) +) ENGINE = MergeTree PARTITION BY toMonday(dt) +ORDER BY (dt, id) SETTINGS index_granularity = 8192; + +insert into table1 (dt, id, arr) values ('2019-01-14', 1, ['aaa']); +insert into table2 (dt, id, arr) values ('2019-01-14', 1, ['aaa','bbb','ccc']); + +select dt, id, arraySort(groupArrayArray(arr)) +from ( + select dt, id, arr from table1 + where dt = '2019-01-14' and id = 1 + UNION ALL + select dt, id, arr from table2 + where dt = '2019-01-14' and id = 1 +) +group by dt, id; + +DROP TABLE table1; +DROP TABLE table2; diff --git a/parser/testdata/00800_low_cardinality_distinct_numeric/ast.json b/parser/testdata/00800_low_cardinality_distinct_numeric/ast.json new file mode 100644 index 000000000..fa6a48253 --- /dev/null +++ b/parser/testdata/00800_low_cardinality_distinct_numeric/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001430763, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00800_low_cardinality_distinct_numeric/metadata.json b/parser/testdata/00800_low_cardinality_distinct_numeric/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00800_low_cardinality_distinct_numeric/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00800_low_cardinality_distinct_numeric/query.sql b/parser/testdata/00800_low_cardinality_distinct_numeric/query.sql new file mode 100644 index 000000000..8ba95ce7a --- /dev/null +++ b/parser/testdata/00800_low_cardinality_distinct_numeric/query.sql @@ -0,0 +1,7 @@ +set allow_suspicious_low_cardinality_types = 1; +drop table if exists lc_00800_2; +create table lc_00800_2 (val LowCardinality(UInt64)) engine = MergeTree order by val; +insert into lc_00800_2 select number % 123 from system.numbers limit 100000; +select distinct(val) from lc_00800_2 order by val; +drop table if exists lc_00800_2 +; diff --git a/parser/testdata/00800_low_cardinality_distributed_insert/ast.json b/parser/testdata/00800_low_cardinality_distributed_insert/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00800_low_cardinality_distributed_insert/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00800_low_cardinality_distributed_insert/metadata.json b/parser/testdata/00800_low_cardinality_distributed_insert/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00800_low_cardinality_distributed_insert/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00800_low_cardinality_distributed_insert/query.sql b/parser/testdata/00800_low_cardinality_distributed_insert/query.sql new file mode 100644 index 000000000..b79caaf1d --- /dev/null +++ b/parser/testdata/00800_low_cardinality_distributed_insert/query.sql @@ -0,0 +1,16 @@ +-- Tags: distributed + +SET distributed_foreground_insert = 1; + +DROP TABLE IF EXISTS low_cardinality; +DROP TABLE IF EXISTS low_cardinality_all; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE low_cardinality (d Date, x UInt32, s LowCardinality(String)) ENGINE = MergeTree(d, x, 8192); +CREATE TABLE low_cardinality_all (d Date, x UInt32, s LowCardinality(String)) ENGINE = Distributed(test_shard_localhost, currentDatabase(), low_cardinality, sipHash64(s)); + +INSERT INTO low_cardinality_all (d,x,s) VALUES ('2018-11-12',1,'123'); +SELECT s FROM low_cardinality_all; + +DROP TABLE IF EXISTS low_cardinality; +DROP TABLE IF EXISTS low_cardinality_all; diff --git a/parser/testdata/00800_low_cardinality_empty_array/ast.json b/parser/testdata/00800_low_cardinality_empty_array/ast.json new file mode 100644 index 000000000..555d229d1 --- /dev/null +++ b/parser/testdata/00800_low_cardinality_empty_array/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery lc_00800_1 (children 1)" + }, + { + "explain": " Identifier lc_00800_1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001353523, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/00800_low_cardinality_empty_array/metadata.json b/parser/testdata/00800_low_cardinality_empty_array/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00800_low_cardinality_empty_array/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00800_low_cardinality_empty_array/query.sql b/parser/testdata/00800_low_cardinality_empty_array/query.sql new file mode 100644 index 000000000..62d01b118 --- /dev/null +++ b/parser/testdata/00800_low_cardinality_empty_array/query.sql @@ -0,0 +1,7 @@ +drop table if exists lc_00800_1; +create table lc_00800_1 (names Array(LowCardinality(String))) engine=MergeTree order by tuple(); +insert into lc_00800_1 values ([]); +insert into lc_00800_1 select emptyArrayString(); +select * from lc_00800_1; +drop table if exists lc_00800_1; + diff --git a/parser/testdata/00800_low_cardinality_join/ast.json b/parser/testdata/00800_low_cardinality_join/ast.json new file mode 100644 index 000000000..3ab23752a --- /dev/null +++ b/parser/testdata/00800_low_cardinality_join/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001435779, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00800_low_cardinality_join/metadata.json b/parser/testdata/00800_low_cardinality_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00800_low_cardinality_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00800_low_cardinality_join/query.sql b/parser/testdata/00800_low_cardinality_join/query.sql new file mode 100644 index 000000000..a6c3b98fd --- /dev/null +++ b/parser/testdata/00800_low_cardinality_join/query.sql @@ -0,0 +1,31 @@ +set enable_analyzer = 1; +set joined_subquery_requires_alias = 0; + +select * from (select dummy as val from system.one) any left join (select dummy as val from system.one) using val order by all; +select * from (select toLowCardinality(dummy) as val from system.one) any left join (select dummy as val from system.one) using val order by all; +select * from (select dummy as val from system.one) any left join (select toLowCardinality(dummy) as val from system.one) using val order by all; +select * from (select toLowCardinality(dummy) as val from system.one) any left join (select toLowCardinality(dummy) as val from system.one) using val order by all; +select * from (select toLowCardinality(toNullable(dummy)) as val from system.one) any left join (select dummy as val from system.one) using val order by all; +select * from (select dummy as val from system.one) any left join (select toLowCardinality(toNullable(dummy)) as val from system.one) using val order by all; +select * from (select toLowCardinality(toNullable(dummy)) as val from system.one) any left join (select toLowCardinality(dummy) as val from system.one) using val order by all; +select * from (select toLowCardinality(dummy) as val from system.one) any left join (select toLowCardinality(toNullable(dummy)) as val from system.one) using val order by all; +select * from (select toLowCardinality(toNullable(dummy)) as val from system.one) any left join (select toLowCardinality(toNullable(dummy)) as val from system.one) using val order by all; +select '-'; +select * from (select dummy as val from system.one) any left join (select dummy as val from system.one) on val + 0 = val * 1 order by all; -- { serverError INVALID_JOIN_ON_EXPRESSION } +select * from (select dummy as val from system.one) any left join (select dummy as rval from system.one) on val + 0 = rval * 1 order by all; +select * from (select toLowCardinality(dummy) as val from system.one) any left join (select dummy as rval from system.one) on val + 0 = rval * 1 order by all; +select * from (select dummy as val from system.one) any left join (select toLowCardinality(dummy) as rval from system.one) on val + 0 = rval * 1 order by all; +select * from (select toLowCardinality(dummy) as val from system.one) any left join (select toLowCardinality(dummy) as rval from system.one) on val + 0 = rval * 1 order by all; +select * from (select toLowCardinality(toNullable(dummy)) as val from system.one) any left join (select dummy as rval from system.one) on val + 0 = rval * 1 order by all; +select * from (select dummy as val from system.one) any left join (select toLowCardinality(toNullable(dummy)) as rval from system.one) on val + 0 = rval * 1 order by all; +select * from (select toLowCardinality(toNullable(dummy)) as val from system.one) any left join (select toLowCardinality(dummy) as rval from system.one) on val + 0 = rval * 1 order by all; +select * from (select toLowCardinality(dummy) as val from system.one) any left join (select toLowCardinality(toNullable(dummy)) as rval from system.one) on val + 0 = rval * 1 order by all; +select * from (select toLowCardinality(toNullable(dummy)) as val from system.one) any left join (select toLowCardinality(toNullable(dummy)) as rval from system.one) on val + 0 = rval * 1 order by all; +select '-'; +select * from (select number as l from system.numbers limit 3) any left join (select number as r from system.numbers limit 3) on l + 1 = r * 1 order by all; +select * from (select toLowCardinality(number) as l from system.numbers limit 3) any left join (select number as r from system.numbers limit 3) on l + 1 = r * 1 order by all; +select * from (select number as l from system.numbers limit 3) any left join (select toLowCardinality(number) as r from system.numbers limit 3) on l + 1 = r * 1 order by all; +select * from (select toLowCardinality(number) as l from system.numbers limit 3) any left join (select toLowCardinality(number) as r from system.numbers limit 3) on l + 1 = r * 1 order by all; +select * from (select toLowCardinality(toNullable(number)) as l from system.numbers limit 3) any left join (select toLowCardinality(number) as r from system.numbers limit 3) on l + 1 = r * 1 order by all; +select * from (select toLowCardinality(number) as l from system.numbers limit 3) any left join (select toLowCardinality(toNullable(number)) as r from system.numbers limit 3) on l + 1 = r * 1 order by all; +select * from (select toLowCardinality(toNullable(number)) as l from system.numbers limit 3) any left join (select toLowCardinality(toNullable(number)) as r from system.numbers limit 3) on l + 1 = r * 1 order by all; diff --git a/parser/testdata/00800_versatile_storage_join/ast.json b/parser/testdata/00800_versatile_storage_join/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00800_versatile_storage_join/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00800_versatile_storage_join/metadata.json b/parser/testdata/00800_versatile_storage_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00800_versatile_storage_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00800_versatile_storage_join/query.sql b/parser/testdata/00800_versatile_storage_join/query.sql new file mode 100644 index 000000000..cccc655e3 --- /dev/null +++ b/parser/testdata/00800_versatile_storage_join/query.sql @@ -0,0 +1,70 @@ + +DROP TABLE IF EXISTS join_any_inner; +DROP TABLE IF EXISTS join_any_left; +DROP TABLE IF EXISTS join_any_left_null; +DROP TABLE IF EXISTS join_all_inner; +DROP TABLE IF EXISTS join_all_left; +DROP TABLE IF EXISTS join_string_key; + +CREATE TABLE join_any_inner (s String, x Array(UInt8), k UInt64) ENGINE = Join(ANY, INNER, k); +CREATE TABLE join_any_left (s String, x Array(UInt8), k UInt64) ENGINE = Join(ANY, LEFT, k); +CREATE TABLE join_all_inner (s String, x Array(UInt8), k UInt64) ENGINE = Join(ALL, INNER, k); +CREATE TABLE join_all_left (s String, x Array(UInt8), k UInt64) ENGINE = Join(ALL, LEFT, k); + +INSERT INTO join_any_inner VALUES ('abc', [0], 1), ('def', [1, 2], 2); +INSERT INTO join_any_left VALUES ('abc', [0], 1), ('def', [1, 2], 2); +INSERT INTO join_all_inner VALUES ('abc', [0], 1), ('def', [1, 2], 2); +INSERT INTO join_all_left VALUES ('abc', [0], 1), ('def', [1, 2], 2); + +-- read from StorageJoin + +SELECT '--------read--------'; +SELECT * from join_any_inner ORDER BY k; +SELECT * from join_any_left ORDER BY k; +SELECT * from join_all_inner ORDER BY k; +SELECT * from join_all_left ORDER BY k; + +-- create StorageJoin tables with customized settings + +CREATE TABLE join_any_left_null (s String, k UInt64) ENGINE = Join(ANY, LEFT, k) SETTINGS join_use_nulls = 1; +INSERT INTO join_any_left_null VALUES ('abc', 1), ('def', 2); + +-- joinGet +SELECT '--------joinGet--------'; +SELECT joinGet('join_any_left', 's', number) FROM numbers(3); +SELECT ''; +SELECT joinGet('join_any_left_null', 's', number) FROM numbers(3); +SELECT ''; + +-- Using identifier as the first argument + +SELECT joinGet(join_any_left, 's', number) FROM numbers(3); +SELECT ''; +SELECT joinGet(join_any_left_null, 's', number) FROM numbers(3); +SELECT ''; + +CREATE TABLE join_string_key (s String, x Array(UInt8), k UInt64) ENGINE = Join(ANY, LEFT, s); +INSERT INTO join_string_key VALUES ('abc', [0], 1), ('def', [1, 2], 2); +SELECT joinGet('join_string_key', 'x', 'abc'), joinGet('join_string_key', 'k', 'abc'); + +USE default; + +DROP TABLE {CLICKHOUSE_DATABASE:Identifier}.join_any_inner; +DROP TABLE {CLICKHOUSE_DATABASE:Identifier}.join_any_left; +DROP TABLE {CLICKHOUSE_DATABASE:Identifier}.join_any_left_null; +DROP TABLE {CLICKHOUSE_DATABASE:Identifier}.join_all_inner; +DROP TABLE {CLICKHOUSE_DATABASE:Identifier}.join_all_left; +DROP TABLE {CLICKHOUSE_DATABASE:Identifier}.join_string_key; + +-- test provided by Alexander Zaitsev +DROP TABLE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}.join_test; +CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.join_test (a UInt8, b UInt8) Engine = Join(ANY, LEFT, a); + +USE {CLICKHOUSE_DATABASE:Identifier}; +select joinGet('join_test', 'b', 1); + +USE system; +SELECT joinGet({CLICKHOUSE_DATABASE:String} || '.join_test', 'b', 1); + +USE default; +DROP TABLE {CLICKHOUSE_DATABASE:Identifier}.join_test; diff --git a/parser/testdata/00801_daylight_saving_time_hour_underflow/ast.json b/parser/testdata/00801_daylight_saving_time_hour_underflow/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00801_daylight_saving_time_hour_underflow/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00801_daylight_saving_time_hour_underflow/metadata.json b/parser/testdata/00801_daylight_saving_time_hour_underflow/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00801_daylight_saving_time_hour_underflow/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00801_daylight_saving_time_hour_underflow/query.sql b/parser/testdata/00801_daylight_saving_time_hour_underflow/query.sql new file mode 100644 index 000000000..d23972ee4 --- /dev/null +++ b/parser/testdata/00801_daylight_saving_time_hour_underflow/query.sql @@ -0,0 +1,6 @@ +-- See comment in DateLUTImpl.cpp: "We doesn't support cases when time change results in switching to previous day..." +SELECT + ignore(toDateTime(370641600, 'Asia/Istanbul') AS t), + replaceRegexpAll(toString(t), '\\d', 'x'), + toHour(t) < 24, + replaceRegexpAll(formatDateTime(t, '%Y-%m-%d %H:%i:%S; %R:%S; %F %T'), '\\d', 'x'); diff --git a/parser/testdata/00802_daylight_saving_time_shift_backwards_at_midnight/ast.json b/parser/testdata/00802_daylight_saving_time_shift_backwards_at_midnight/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00802_daylight_saving_time_shift_backwards_at_midnight/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00802_daylight_saving_time_shift_backwards_at_midnight/metadata.json b/parser/testdata/00802_daylight_saving_time_shift_backwards_at_midnight/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00802_daylight_saving_time_shift_backwards_at_midnight/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00802_daylight_saving_time_shift_backwards_at_midnight/query.sql b/parser/testdata/00802_daylight_saving_time_shift_backwards_at_midnight/query.sql new file mode 100644 index 000000000..4244ce203 --- /dev/null +++ b/parser/testdata/00802_daylight_saving_time_shift_backwards_at_midnight/query.sql @@ -0,0 +1,3 @@ +-- concat with empty string to defeat injectiveness of toString assumption. +SELECT concat('', toString(toDateTime('1981-09-29 00:00:00', 'Europe/Moscow') + INTERVAL number * 300 SECOND)) AS k FROM numbers(10000) GROUP BY k HAVING count() > 1 ORDER BY k; +SELECT concat('', toString(toDateTime('2018-09-19 00:00:00', 'Asia/Tehran') + INTERVAL number * 300 SECOND)) AS k FROM numbers(1000) GROUP BY k HAVING count() > 1 ORDER BY k; diff --git a/parser/testdata/00802_system_parts_with_datetime_partition/ast.json b/parser/testdata/00802_system_parts_with_datetime_partition/ast.json new file mode 100644 index 000000000..0568bb3ae --- /dev/null +++ b/parser/testdata/00802_system_parts_with_datetime_partition/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery datetime_table (children 1)" + }, + { + "explain": " Identifier datetime_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001357384, + "rows_read": 2, + "bytes_read": 80 + } +} diff --git a/parser/testdata/00802_system_parts_with_datetime_partition/metadata.json b/parser/testdata/00802_system_parts_with_datetime_partition/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00802_system_parts_with_datetime_partition/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00802_system_parts_with_datetime_partition/query.sql b/parser/testdata/00802_system_parts_with_datetime_partition/query.sql new file mode 100644 index 000000000..70042e4bf --- /dev/null +++ b/parser/testdata/00802_system_parts_with_datetime_partition/query.sql @@ -0,0 +1,82 @@ +DROP TABLE IF EXISTS datetime_table; + +-- Create a table with DateTime column, but not used in partition key +CREATE TABLE datetime_table + ( + t DateTime('UTC'), + name String, + value UInt32 + ) ENGINE = MergeTree() + ORDER BY (t, name) + PARTITION BY value; + +INSERT INTO datetime_table VALUES ('2016-01-01 00:00:00','name1',2); +INSERT INTO datetime_table VALUES ('2016-01-02 00:00:00','name2',2); +INSERT INTO datetime_table VALUES ('2016-01-03 00:00:00','name1',4); + +-- min_time and max_time are not filled + +SELECT partition, toTimeZone(MIN(min_time), 'UTC') as min_time, toTimeZone(MAX(max_time), 'UTC') as max_time +FROM system.parts +WHERE database = currentDatabase() and table = 'datetime_table' AND active = 1 +GROUP BY partition +ORDER BY partition ASC +FORMAT CSV; + +DROP TABLE IF EXISTS datetime_table; + +-- Create a table with DateTime column, this time used in partition key +CREATE TABLE datetime_table + ( + t DateTime('UTC'), + name String, + value UInt32 + ) ENGINE = MergeTree() + ORDER BY (t, name) + PARTITION BY toStartOfDay(t); + +INSERT INTO datetime_table VALUES ('2016-01-01 00:00:00','name1',2); +INSERT INTO datetime_table VALUES ('2016-01-01 02:00:00','name1',3); +INSERT INTO datetime_table VALUES ('2016-01-02 01:00:00','name2',2); +INSERT INTO datetime_table VALUES ('2016-01-02 23:00:00','name2',5); +INSERT INTO datetime_table VALUES ('2016-01-03 04:00:00','name1',4); + +-- min_time and max_time are now filled + +SELECT partition, toTimeZone(MIN(min_time), 'UTC') as min_time, toTimeZone(MAX(max_time), 'UTC') as max_time +FROM system.parts +WHERE database = currentDatabase() and table = 'datetime_table' AND active = 1 +GROUP BY partition +ORDER BY partition ASC +FORMAT CSV; + +DROP TABLE IF EXISTS datetime_table; + +-- Create a table with DateTime column, this time used in partition key, but not at the first level +CREATE TABLE datetime_table + ( + t DateTime('UTC'), + name String, + value UInt32 + ) ENGINE = MergeTree() + ORDER BY (t, name) + PARTITION BY (name, toUInt32(toUnixTimestamp(t)/(60*60*24)) ); + +-- We are using a daily aggregation that is independant of the timezone, add data also + +INSERT INTO datetime_table VALUES (1451606400,'name1',2); +INSERT INTO datetime_table VALUES (1451613600,'name1',3); +INSERT INTO datetime_table VALUES (1451696400,'name2',2); +INSERT INTO datetime_table VALUES (1451775600,'name2',5); +INSERT INTO datetime_table VALUES (1451793600,'name1',4); + +-- min_time and max_time are now filled + +SELECT partition, toUnixTimestamp(MIN(min_time)) as min_unix_time, toUnixTimestamp(MAX(max_time)) as max_unix_time +FROM system.parts +WHERE database = currentDatabase() and table = 'datetime_table' AND active = 1 +GROUP BY partition +ORDER BY partition ASC +FORMAT CSV; + +DROP TABLE IF EXISTS datetime_table; diff --git a/parser/testdata/00803_odbc_driver_2_format/ast.json b/parser/testdata/00803_odbc_driver_2_format/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00803_odbc_driver_2_format/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00803_odbc_driver_2_format/metadata.json b/parser/testdata/00803_odbc_driver_2_format/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00803_odbc_driver_2_format/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00803_odbc_driver_2_format/query.sql b/parser/testdata/00803_odbc_driver_2_format/query.sql new file mode 100644 index 000000000..86c3864f3 --- /dev/null +++ b/parser/testdata/00803_odbc_driver_2_format/query.sql @@ -0,0 +1,4 @@ +-- Tags: no-fasttest + +SELECT 1 AS x, [2, 3] AS y, 'Hello' AS z, NULL AS a FORMAT ODBCDriver2; +SELECT number % 10 AS k, count() AS c FROM numbers(100) GROUP BY k WITH TOTALS FORMAT ODBCDriver2; diff --git a/parser/testdata/00803_xxhash/ast.json b/parser/testdata/00803_xxhash/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00803_xxhash/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00803_xxhash/metadata.json b/parser/testdata/00803_xxhash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00803_xxhash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00803_xxhash/query.sql b/parser/testdata/00803_xxhash/query.sql new file mode 100644 index 000000000..30917ec04 --- /dev/null +++ b/parser/testdata/00803_xxhash/query.sql @@ -0,0 +1,78 @@ + +SELECT hex(xxHash64('')) = upper('ef46db3751d8e999'); +SELECT hex(xxHash32('')) = upper('02cc5d05'); + +SELECT hex(xxHash64('ABC')) = upper('e66ae7354fcfee98'); +SELECT hex(xxHash32('ABC')) = upper('80712ed5'); + +SELECT hex(xxHash64('xxhash')) = upper('32dd38952c4bc720'); + +-- + +SELECT xxHash64(NULL) is NULL; +SELECT xxHash64() = toUInt64(16324913028386710556); + +SELECT xxHash64(0) = toUInt64(16804241149081757544); +SELECT xxHash64(123456) = toUInt64(9049736899514479480); + +select xxHash64(toUInt8(0)) = xxHash64('\0'); +select xxHash64(toUInt16(0)) = xxHash64('\0\0'); +select xxHash64(toUInt32(0)) = xxHash64('\0\0\0\0'); +select xxHash64(toUInt64(0)) = xxHash64('\0\0\0\0\0\0\0\0'); + +SELECT xxHash64(CAST(3 AS UInt8)) = toUInt64(2244420788148980662); +SELECT xxHash64(CAST(1.2684 AS Float32)) = toUInt64(6662491266811474554); +SELECT xxHash64(CAST(-154477 AS Int64)) = toUInt64(1162348840373071858); + +SELECT xxHash64('') = toUInt64(17241709254077376921); +SELECT xxHash64('foo') = toUInt64(3728699739546630719); +SELECT xxHash64(CAST('foo' AS FixedString(3))) = xxHash64('foo'); +SELECT xxHash64(CAST('bar' AS FixedString(3))) = toUInt64(5234164152756840025); +SELECT xxHash64(x) = toUInt64(9962287286179718960) FROM (SELECT CAST(1 AS Enum8('a' = 1, 'b' = 2)) as x); + +SELECT xxHash64('\x01') = toUInt64(9962287286179718960); +SELECT xxHash64('\x02\0') = toUInt64(6482051057365497128); +SELECT xxHash64('\x03\0\0\0') = toUInt64(13361037350151369407); + +SELECT xxHash64(1) = toUInt64(9962287286179718960); +SELECT xxHash64(toUInt16(2)) = toUInt64(6482051057365497128); +SELECT xxHash64(toUInt32(3)) = toUInt64(13361037350151369407); + +SELECT xxHash64(1, 2, 3) = toUInt64(13728743482242651702); +SELECT xxHash64(1, 3, 2) = toUInt64(10226792638577471533); +SELECT xxHash64(('a', [1, 2, 3], 4, (4, ['foo', 'bar'], 1, (1, 2)))) = toUInt64(3521288460171939489); + +-- + +SELECT xxHash32(NULL) is NULL; +SELECT xxHash32() = toUInt32(4263699484); + +SELECT xxHash32(0) = toUInt32(3479547966); +SELECT xxHash32(123456) = toUInt32(1434661961); + +select xxHash32(toUInt8(0)) = xxHash32('\0'); +select xxHash32(toUInt16(0)) = xxHash32('\0\0'); +select xxHash32(toUInt32(0)) = xxHash32('\0\0\0\0'); + +SELECT xxHash32(CAST(3 AS UInt8)) = toUInt32(565077562); +SELECT xxHash32(CAST(1.2684 AS Float32)) = toUInt32(3120514536); +SELECT xxHash32(CAST(-154477 AS Int32)) = toUInt32(3279223048); + +SELECT xxHash32('') = toUInt32(46947589); +SELECT xxHash32('foo') = toUInt32(3792637401); +SELECT xxHash32(CAST('foo' AS FixedString(3))) = xxHash32('foo'); +SELECT xxHash32(CAST('bar' AS FixedString(3))) = toUInt32(1101146924); +SELECT xxHash32(x) = toUInt32(949155633) FROM (SELECT CAST(1 AS Enum8('a' = 1, 'b' = 2)) as x); + +SELECT xxHash32('\x01') = toUInt32(949155633); +SELECT xxHash32('\x02\0') = toUInt32(332955956); +SELECT xxHash32('\x03\0\0\0') = toUInt32(2158931063); + +SELECT xxHash32(1) = toUInt32(949155633); +SELECT xxHash32(toUInt16(2)) = toUInt32(332955956); +SELECT xxHash32(toUInt32(3)) = toUInt32(2158931063); + +SELECT xxHash32(1, 2, 3) = toUInt32(441104368); +SELECT xxHash32(1, 3, 2) = toUInt32(912264289); +SELECT xxHash32(('a', [1, 2, 3], 4, (4, ['foo', 'bar'], 1, (1, 2)))) = toUInt32(1930126291); + diff --git a/parser/testdata/00804_rollup_with_having/ast.json b/parser/testdata/00804_rollup_with_having/ast.json new file mode 100644 index 000000000..5362e71e9 --- /dev/null +++ b/parser/testdata/00804_rollup_with_having/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery rollup_having (children 1)" + }, + { + "explain": " Identifier rollup_having" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001595913, + "rows_read": 2, + "bytes_read": 78 + } +} diff --git a/parser/testdata/00804_rollup_with_having/metadata.json b/parser/testdata/00804_rollup_with_having/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00804_rollup_with_having/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00804_rollup_with_having/query.sql b/parser/testdata/00804_rollup_with_having/query.sql new file mode 100644 index 000000000..852e7d32f --- /dev/null +++ b/parser/testdata/00804_rollup_with_having/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS rollup_having; +CREATE TABLE rollup_having ( + a Nullable(String), + b Nullable(String) +) ENGINE = Memory; + +INSERT INTO rollup_having VALUES (NULL, NULL); +INSERT INTO rollup_having VALUES ('a', NULL); +INSERT INTO rollup_having VALUES ('a', 'b'); + +SELECT a, b, count(*) as count FROM rollup_having GROUP BY a, b WITH ROLLUP HAVING a IS NOT NULL ORDER BY a, b, count; +SELECT a, b, count(*) as count FROM rollup_having GROUP BY a, b WITH ROLLUP HAVING a IS NOT NULL and b IS NOT NULL ORDER BY a, b, count; + +DROP TABLE rollup_having; diff --git a/parser/testdata/00804_test_alter_compression_codecs/ast.json b/parser/testdata/00804_test_alter_compression_codecs/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00804_test_alter_compression_codecs/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00804_test_alter_compression_codecs/metadata.json b/parser/testdata/00804_test_alter_compression_codecs/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00804_test_alter_compression_codecs/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00804_test_alter_compression_codecs/query.sql b/parser/testdata/00804_test_alter_compression_codecs/query.sql new file mode 100644 index 000000000..f2620b79d --- /dev/null +++ b/parser/testdata/00804_test_alter_compression_codecs/query.sql @@ -0,0 +1,94 @@ +-- Tags: no-msan +-- (because the INSERT with 300k rows sometimes takes >5 minutes in msan build, I didn't investigate why) + +SET send_logs_level = 'fatal'; + +DROP TABLE IF EXISTS alter_compression_codec; + +CREATE TABLE alter_compression_codec ( + somedate Date CODEC(LZ4), + id UInt64 CODEC(NONE) +) ENGINE = MergeTree() PARTITION BY somedate ORDER BY id; + +INSERT INTO alter_compression_codec VALUES('2018-01-01', 1); +INSERT INTO alter_compression_codec VALUES('2018-01-01', 2); +SELECT * FROM alter_compression_codec ORDER BY id; + +ALTER TABLE alter_compression_codec ADD COLUMN alter_column String DEFAULT 'default_value' CODEC(ZSTD); +SELECT compression_codec FROM system.columns WHERE database = currentDatabase() AND table = 'alter_compression_codec' AND name = 'alter_column'; + +INSERT INTO alter_compression_codec VALUES('2018-01-01', 3, '3'); +INSERT INTO alter_compression_codec VALUES('2018-01-01', 4, '4'); +SELECT * FROM alter_compression_codec ORDER BY id; + +ALTER TABLE alter_compression_codec MODIFY COLUMN alter_column CODEC(NONE); +SELECT compression_codec FROM system.columns WHERE database = currentDatabase() AND table = 'alter_compression_codec' AND name = 'alter_column'; + +INSERT INTO alter_compression_codec VALUES('2018-01-01', 5, '5'); +INSERT INTO alter_compression_codec VALUES('2018-01-01', 6, '6'); +SELECT * FROM alter_compression_codec ORDER BY id; + +OPTIMIZE TABLE alter_compression_codec FINAL; +SELECT * FROM alter_compression_codec ORDER BY id; + +SET allow_suspicious_codecs = 1; +ALTER TABLE alter_compression_codec MODIFY COLUMN alter_column CODEC(ZSTD, LZ4HC, LZ4, LZ4, NONE); +SELECT compression_codec FROM system.columns WHERE database = currentDatabase() AND table = 'alter_compression_codec' AND name = 'alter_column'; + +INSERT INTO alter_compression_codec VALUES('2018-01-01', 7, '7'); +INSERT INTO alter_compression_codec VALUES('2018-01-01', 8, '8'); +OPTIMIZE TABLE alter_compression_codec FINAL; +SELECT * FROM alter_compression_codec ORDER BY id; + +ALTER TABLE alter_compression_codec MODIFY COLUMN alter_column FixedString(100); +SELECT compression_codec FROM system.columns WHERE database = currentDatabase() AND table = 'alter_compression_codec' AND name = 'alter_column'; + + +DROP TABLE IF EXISTS alter_compression_codec; + +DROP TABLE IF EXISTS alter_bad_codec; + +CREATE TABLE alter_bad_codec ( + somedate Date CODEC(LZ4), + id UInt64 CODEC(NONE) +) ENGINE = MergeTree() ORDER BY tuple(); + +ALTER TABLE alter_bad_codec ADD COLUMN alter_column DateTime DEFAULT '2019-01-01 00:00:00' CODEC(gbdgkjsdh); -- { serverError UNKNOWN_CODEC } + +ALTER TABLE alter_bad_codec ADD COLUMN alter_column DateTime DEFAULT '2019-01-01 00:00:00' CODEC(ZSTD(100)); -- { serverError ILLEGAL_CODEC_PARAMETER } + +DROP TABLE IF EXISTS alter_bad_codec; + +DROP TABLE IF EXISTS large_alter_table_00804; +DROP TABLE IF EXISTS store_of_hash_00804; + +CREATE TABLE large_alter_table_00804 ( + somedate Date CODEC(ZSTD, ZSTD, ZSTD(12), LZ4HC(12)), + id UInt64 CODEC(LZ4, ZSTD, NONE, LZ4HC), + data String CODEC(ZSTD(2), LZ4HC, NONE, LZ4, LZ4) +) ENGINE = MergeTree() PARTITION BY somedate ORDER BY id SETTINGS index_granularity = 2, index_granularity_bytes = '10Mi', min_bytes_for_wide_part = 0; + +SET max_execution_time = 300; + +INSERT INTO large_alter_table_00804 SELECT toDate('2019-01-01'), number, toString(number + rand()) FROM system.numbers LIMIT 300000; + +CREATE TABLE store_of_hash_00804 (hash UInt64) ENGINE = Memory(); + +INSERT INTO store_of_hash_00804 SELECT sum(cityHash64(*)) FROM large_alter_table_00804; + +ALTER TABLE large_alter_table_00804 MODIFY COLUMN data CODEC(NONE, LZ4, LZ4HC, ZSTD); + +OPTIMIZE TABLE large_alter_table_00804; + +SELECT compression_codec FROM system.columns WHERE database = currentDatabase() AND table = 'large_alter_table_00804' AND name = 'data'; + +DETACH TABLE large_alter_table_00804; +ATTACH TABLE large_alter_table_00804; + +INSERT INTO store_of_hash_00804 SELECT sum(cityHash64(*)) FROM large_alter_table_00804; + +SELECT COUNT(hash) FROM store_of_hash_00804; +SELECT COUNT(DISTINCT hash) FROM store_of_hash_00804; + +DROP TABLE IF EXISTS large_alter_table_00804; +DROP TABLE IF EXISTS store_of_hash_00804; diff --git a/parser/testdata/00804_test_custom_compression_codecs/ast.json b/parser/testdata/00804_test_custom_compression_codecs/ast.json new file mode 100644 index 000000000..67933b02b --- /dev/null +++ b/parser/testdata/00804_test_custom_compression_codecs/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001393953, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00804_test_custom_compression_codecs/metadata.json b/parser/testdata/00804_test_custom_compression_codecs/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00804_test_custom_compression_codecs/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00804_test_custom_compression_codecs/query.sql b/parser/testdata/00804_test_custom_compression_codecs/query.sql new file mode 100644 index 000000000..b874ab05e --- /dev/null +++ b/parser/testdata/00804_test_custom_compression_codecs/query.sql @@ -0,0 +1,153 @@ +SET send_logs_level = 'fatal'; +SET allow_suspicious_codecs = 1; + +DROP TABLE IF EXISTS compression_codec; + +CREATE TABLE compression_codec( + id UInt64 CODEC(LZ4), + data String CODEC(ZSTD), + ddd Date CODEC(NONE), + somenum Float64 CODEC(ZSTD(2)), + somestr FixedString(3) CODEC(LZ4HC(7)), + othernum Int64 CODEC(Delta) +) ENGINE = MergeTree() ORDER BY tuple(); + +INSERT INTO compression_codec VALUES(1, 'hello', toDate('2018-12-14'), 1.1, 'aaa', 5); +INSERT INTO compression_codec VALUES(2, 'world', toDate('2018-12-15'), 2.2, 'bbb', 6); +INSERT INTO compression_codec VALUES(3, '!', toDate('2018-12-16'), 3.3, 'ccc', 7); + +SELECT * FROM compression_codec ORDER BY id; + +OPTIMIZE TABLE compression_codec FINAL; + +INSERT INTO compression_codec VALUES(2, '', toDate('2018-12-13'), 4.4, 'ddd', 8); + +DETACH TABLE compression_codec; +ATTACH TABLE compression_codec; + +SELECT count(*) FROM compression_codec WHERE id = 2 GROUP BY id; + +DROP TABLE IF EXISTS compression_codec; + +DROP TABLE IF EXISTS bad_codec; +DROP TABLE IF EXISTS params_when_no_params; +DROP TABLE IF EXISTS too_many_params; +DROP TABLE IF EXISTS codec_multiple_direct_specification_1; +DROP TABLE IF EXISTS codec_multiple_direct_specification_2; +DROP TABLE IF EXISTS delta_bad_params1; +DROP TABLE IF EXISTS delta_bad_params2; + +CREATE TABLE bad_codec(id UInt64 CODEC(adssadads)) ENGINE = MergeTree() order by tuple(); -- { serverError UNKNOWN_CODEC } +CREATE TABLE too_many_params(id UInt64 CODEC(ZSTD(2,3,4,5))) ENGINE = MergeTree() order by tuple(); -- { serverError ILLEGAL_SYNTAX_FOR_CODEC_TYPE } +CREATE TABLE params_when_no_params(id UInt64 CODEC(LZ4(1))) ENGINE = MergeTree() ORDER BY tuple(); -- { serverError DATA_TYPE_CANNOT_HAVE_ARGUMENTS } +CREATE TABLE codec_multiple_direct_specification_1(id UInt64 CODEC(MULTIPLE(LZ4, ZSTD))) ENGINE = MergeTree() ORDER BY tuple(); -- { serverError UNKNOWN_CODEC } +CREATE TABLE codec_multiple_direct_specification_2(id UInt64 CODEC(multiple(LZ4, ZSTD))) ENGINE = MergeTree() ORDER BY tuple(); -- { serverError UNKNOWN_CODEC } +CREATE TABLE delta_bad_params1(id UInt64 CODEC(Delta(3))) ENGINE = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_CODEC_PARAMETER } +CREATE TABLE delta_bad_params2(id UInt64 CODEC(Delta(16))) ENGINE = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_CODEC_PARAMETER } + +DROP TABLE IF EXISTS bad_codec; +DROP TABLE IF EXISTS params_when_no_params; +DROP TABLE IF EXISTS too_many_params; +DROP TABLE IF EXISTS codec_multiple_direct_specification_1; +DROP TABLE IF EXISTS codec_multiple_direct_specification_2; +DROP TABLE IF EXISTS delta_bad_params1; +DROP TABLE IF EXISTS delta_bad_params2; + +DROP TABLE IF EXISTS compression_codec_multiple; + +SET network_compression_method = 'lz4hc'; + +CREATE TABLE compression_codec_multiple ( + id UInt64 CODEC(LZ4, ZSTD, NONE, LZ4HC, Delta(4)), + data String CODEC(ZSTD(2), NONE, Delta(2), LZ4HC, LZ4, LZ4, Delta(8)), + ddd Date CODEC(NONE, NONE, NONE, Delta(1), LZ4, ZSTD, LZ4HC, LZ4HC), + somenum Float64 CODEC(Delta(4), LZ4, LZ4, ZSTD(2), LZ4HC(5), ZSTD(3), ZSTD) +) ENGINE = MergeTree() ORDER BY tuple(); + +INSERT INTO compression_codec_multiple VALUES (1, 'world', toDate('2018-10-05'), 1.1), (2, 'hello', toDate('2018-10-01'), 2.2), (3, 'buy', toDate('2018-10-11'), 3.3); + +SELECT * FROM compression_codec_multiple ORDER BY id; + +INSERT INTO compression_codec_multiple select modulo(number, 100), toString(number), toDate('2018-12-01'), 5.5 * number FROM system.numbers limit 10000; + +SELECT count(*) FROM compression_codec_multiple; + +SELECT count(distinct data) FROM compression_codec_multiple; + +SELECT floor(sum(somenum), 1) FROM compression_codec_multiple; + +TRUNCATE TABLE compression_codec_multiple; + +INSERT INTO compression_codec_multiple select modulo(number, 100), toString(number), toDate('2018-12-01'), 5.5 * number FROM system.numbers limit 10000; + +SELECT sum(cityHash64(*)) FROM compression_codec_multiple; + +DROP TABLE IF EXISTS compression_codec_multiple_more_types; + +CREATE TABLE compression_codec_multiple_more_types ( + id Decimal128(13) CODEC(ZSTD, LZ4, ZSTD, ZSTD, Delta(2), Delta(4), Delta(1), LZ4HC), + data FixedString(12) CODEC(ZSTD, ZSTD, Delta, Delta, Delta, NONE, NONE, NONE, LZ4HC), + ddd Nested (age UInt8, Name String) CODEC(LZ4, LZ4HC, NONE, NONE, NONE, ZSTD, Delta(8)) +) ENGINE = MergeTree() ORDER BY tuple(); -- { serverError BAD_ARGUMENTS } + +CREATE TABLE compression_codec_multiple_more_types ( + id Decimal128(13) CODEC(ZSTD, LZ4, ZSTD, ZSTD, Delta(2), Delta(4), Delta(1), LZ4HC), + data FixedString(12) CODEC(ZSTD, ZSTD, NONE, NONE, NONE, LZ4HC), + ddd Nested (age UInt8, Name String) CODEC(LZ4, LZ4HC, NONE, NONE, NONE, ZSTD, Delta(8)) +) ENGINE = MergeTree() ORDER BY tuple(); + +SHOW CREATE TABLE compression_codec_multiple_more_types; + +INSERT INTO compression_codec_multiple_more_types VALUES(1.5555555555555, 'hello world!', [77], ['John']); +INSERT INTO compression_codec_multiple_more_types VALUES(7.1, 'xxxxxxxxxxxx', [127], ['Henry']); + +SELECT * FROM compression_codec_multiple_more_types order by id; + +DROP TABLE IF EXISTS compression_codec_multiple_with_key; + +SET network_compression_method = 'zstd'; +SET network_zstd_compression_level = 5; + +CREATE TABLE compression_codec_multiple_with_key ( + somedate Date CODEC(ZSTD, ZSTD, ZSTD(12), LZ4HC(12), Delta, Delta), + id UInt64 CODEC(LZ4, ZSTD, Delta, NONE, LZ4HC, Delta), + data String CODEC(ZSTD(2), Delta(1), LZ4HC, NONE, LZ4, LZ4) +) ENGINE = MergeTree() PARTITION BY somedate ORDER BY id SETTINGS index_granularity = 2, index_granularity_bytes = '10Mi'; + + +INSERT INTO compression_codec_multiple_with_key VALUES(toDate('2018-10-12'), 100000, 'hello'), (toDate('2018-10-12'), 100002, 'world'), (toDate('2018-10-12'), 1111, '!'); + +SELECT data FROM compression_codec_multiple_with_key WHERE id BETWEEN 3 AND 1112; + +INSERT INTO compression_codec_multiple_with_key SELECT toDate('2018-10-12'), number, toString(number) FROM system.numbers LIMIT 1000; + +SELECT COUNT(DISTINCT data) FROM compression_codec_multiple_with_key WHERE id < 222; + +-- method in lowercase +SET network_compression_method = 'ZSTD'; +SET network_zstd_compression_level = 7; + +INSERT INTO compression_codec_multiple_with_key VALUES(toDate('2018-10-13'), 100001, 'hello1'), (toDate('2018-10-14'), 100003, 'world1'), (toDate('2018-10-15'), 2222, '!ZSTD'); + +SELECT data FROM compression_codec_multiple_with_key WHERE id = 2222; + +DROP TABLE IF EXISTS compression_codec_multiple_with_key; + +DROP TABLE IF EXISTS test_default_delta; + +CREATE TABLE test_default_delta( + id UInt64 CODEC(Delta), + data String CODEC(Delta(1)), + somedate Date CODEC(Delta), + somenum Float64 CODEC(Delta), + somestr FixedString(3) CODEC(Delta(1)), + othernum Int64 CODEC(Delta), + yetothernum Float32 CODEC(Delta), + ddd Nested (age UInt8, Name String, OName String, BName String) CODEC(Delta(1)) +) ENGINE = MergeTree() ORDER BY tuple(); + +SHOW CREATE TABLE test_default_delta; + +DROP TABLE IF EXISTS test_default_delta; +DROP TABLE compression_codec_multiple; +DROP TABLE compression_codec_multiple_more_types; diff --git a/parser/testdata/00804_test_custom_compression_codes_log_storages/ast.json b/parser/testdata/00804_test_custom_compression_codes_log_storages/ast.json new file mode 100644 index 000000000..3c9fc4eec --- /dev/null +++ b/parser/testdata/00804_test_custom_compression_codes_log_storages/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001359793, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00804_test_custom_compression_codes_log_storages/metadata.json b/parser/testdata/00804_test_custom_compression_codes_log_storages/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00804_test_custom_compression_codes_log_storages/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00804_test_custom_compression_codes_log_storages/query.sql b/parser/testdata/00804_test_custom_compression_codes_log_storages/query.sql new file mode 100644 index 000000000..fba6a2167 --- /dev/null +++ b/parser/testdata/00804_test_custom_compression_codes_log_storages/query.sql @@ -0,0 +1,122 @@ +SET send_logs_level = 'fatal'; +SET allow_suspicious_codecs = 1; + +-- copy-paste for storage log + +DROP TABLE IF EXISTS compression_codec_log; + +CREATE TABLE compression_codec_log( + id UInt64 CODEC(LZ4), + data String CODEC(ZSTD), + ddd Date CODEC(NONE), + somenum Float64 CODEC(ZSTD(2)), + somestr FixedString(3) CODEC(LZ4HC(7)), + othernum Int64 CODEC(Delta) +) ENGINE = Log(); + +SHOW CREATE TABLE compression_codec_log; + +INSERT INTO compression_codec_log VALUES(1, 'hello', toDate('2018-12-14'), 1.1, 'aaa', 5); +INSERT INTO compression_codec_log VALUES(2, 'world', toDate('2018-12-15'), 2.2, 'bbb', 6); +INSERT INTO compression_codec_log VALUES(3, '!', toDate('2018-12-16'), 3.3, 'ccc', 7); + +SELECT * FROM compression_codec_log ORDER BY id; + +INSERT INTO compression_codec_log VALUES(2, '', toDate('2018-12-13'), 4.4, 'ddd', 8); + +DETACH TABLE compression_codec_log; +ATTACH TABLE compression_codec_log; + +SELECT count(*) FROM compression_codec_log WHERE id = 2 GROUP BY id; + +DROP TABLE IF EXISTS compression_codec_log; + +DROP TABLE IF EXISTS compression_codec_multiple_log; + +CREATE TABLE compression_codec_multiple_log ( + id UInt64 CODEC(LZ4, ZSTD, NONE, LZ4HC, Delta(4)), + data String CODEC(ZSTD(2), NONE, Delta(2), LZ4HC, LZ4, LZ4, Delta(8)), + ddd Date CODEC(NONE, NONE, NONE, Delta(1), LZ4, ZSTD, LZ4HC, LZ4HC), + somenum Float64 CODEC(Delta(4), LZ4, LZ4, ZSTD(2), LZ4HC(5), ZSTD(3), ZSTD) +) ENGINE = Log(); + +SHOW CREATE TABLE compression_codec_multiple_log; + +INSERT INTO compression_codec_multiple_log VALUES (1, 'world', toDate('2018-10-05'), 1.1), (2, 'hello', toDate('2018-10-01'), 2.2), (3, 'buy', toDate('2018-10-11'), 3.3); + +SELECT * FROM compression_codec_multiple_log ORDER BY id; + +INSERT INTO compression_codec_multiple_log select modulo(number, 100), toString(number), toDate('2018-12-01'), 5.5 * number FROM system.numbers limit 10000; + +SELECT count(*) FROM compression_codec_multiple_log; + +SELECT count(distinct data) FROM compression_codec_multiple_log; + +SELECT floor(sum(somenum), 1) FROM compression_codec_multiple_log; + +TRUNCATE TABLE compression_codec_multiple_log; + +INSERT INTO compression_codec_multiple_log select modulo(number, 100), toString(number), toDate('2018-12-01'), 5.5 * number FROM system.numbers limit 10000; + +SELECT sum(cityHash64(*)) FROM compression_codec_multiple_log; + +-- copy-paste for storage tiny log +DROP TABLE IF EXISTS compression_codec_tiny_log; + +CREATE TABLE compression_codec_tiny_log( + id UInt64 CODEC(LZ4), + data String CODEC(ZSTD), + ddd Date CODEC(NONE), + somenum Float64 CODEC(ZSTD(2)), + somestr FixedString(3) CODEC(LZ4HC(7)), + othernum Int64 CODEC(Delta) +) ENGINE = TinyLog(); + +SHOW CREATE TABLE compression_codec_tiny_log; + +INSERT INTO compression_codec_tiny_log VALUES(1, 'hello', toDate('2018-12-14'), 1.1, 'aaa', 5); +INSERT INTO compression_codec_tiny_log VALUES(2, 'world', toDate('2018-12-15'), 2.2, 'bbb', 6); +INSERT INTO compression_codec_tiny_log VALUES(3, '!', toDate('2018-12-16'), 3.3, 'ccc', 7); + +SELECT * FROM compression_codec_tiny_log ORDER BY id; + +INSERT INTO compression_codec_tiny_log VALUES(2, '', toDate('2018-12-13'), 4.4, 'ddd', 8); + +DETACH TABLE compression_codec_tiny_log; +ATTACH TABLE compression_codec_tiny_log; + +SELECT count(*) FROM compression_codec_tiny_log WHERE id = 2 GROUP BY id; + +DROP TABLE IF EXISTS compression_codec_tiny_log; + +DROP TABLE IF EXISTS compression_codec_multiple_tiny_log; + +CREATE TABLE compression_codec_multiple_tiny_log ( + id UInt64 CODEC(LZ4, ZSTD, NONE, LZ4HC, Delta(4)), + data String CODEC(ZSTD(2), NONE, Delta(2), LZ4HC, LZ4, LZ4, Delta(8)), + ddd Date CODEC(NONE, NONE, NONE, Delta(1), LZ4, ZSTD, LZ4HC, LZ4HC), + somenum Float64 CODEC(Delta(4), LZ4, LZ4, ZSTD(2), LZ4HC(5), ZSTD(3), ZSTD) +) ENGINE = TinyLog(); + +SHOW CREATE TABLE compression_codec_multiple_tiny_log; + +INSERT INTO compression_codec_multiple_tiny_log VALUES (1, 'world', toDate('2018-10-05'), 1.1), (2, 'hello', toDate('2018-10-01'), 2.2), (3, 'buy', toDate('2018-10-11'), 3.3); + +SELECT * FROM compression_codec_multiple_tiny_log ORDER BY id; + +INSERT INTO compression_codec_multiple_tiny_log select modulo(number, 100), toString(number), toDate('2018-12-01'), 5.5 * number FROM system.numbers limit 10000; + +SELECT count(*) FROM compression_codec_multiple_tiny_log; + +SELECT count(distinct data) FROM compression_codec_multiple_tiny_log; + +SELECT floor(sum(somenum), 1) FROM compression_codec_multiple_tiny_log; + +TRUNCATE TABLE compression_codec_multiple_tiny_log; + +INSERT INTO compression_codec_multiple_tiny_log select modulo(number, 100), toString(number), toDate('2018-12-01'), 5.5 * number FROM system.numbers limit 10000; + +SELECT sum(cityHash64(*)) FROM compression_codec_multiple_tiny_log; + +DROP TABLE compression_codec_multiple_log; +DROP TABLE compression_codec_multiple_tiny_log; diff --git a/parser/testdata/00804_test_deflate_qpl_codec_compression/ast.json b/parser/testdata/00804_test_deflate_qpl_codec_compression/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00804_test_deflate_qpl_codec_compression/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00804_test_deflate_qpl_codec_compression/metadata.json b/parser/testdata/00804_test_deflate_qpl_codec_compression/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00804_test_deflate_qpl_codec_compression/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00804_test_deflate_qpl_codec_compression/query.sql b/parser/testdata/00804_test_deflate_qpl_codec_compression/query.sql new file mode 100644 index 000000000..d8c28a7d9 --- /dev/null +++ b/parser/testdata/00804_test_deflate_qpl_codec_compression/query.sql @@ -0,0 +1,49 @@ +--Tags: no-fasttest, no-cpu-aarch64, no-cpu-s390x +-- no-fasttest because DEFLATE_QPL isn't available in fasttest +-- no-cpu-aarch64 and no-cpu-s390x because DEFLATE_QPL is x86-only + +-- A bunch of random DDLs to test the DEFLATE_QPL codec. + +SET enable_deflate_qpl_codec = 1; + +-- Suppress test failures because stderr contains warning "Initialization of hardware-assisted DeflateQpl failed, falling +-- back to software DeflateQpl coded." +SET send_logs_level = 'fatal'; + +DROP TABLE IF EXISTS compression_codec; + +CREATE TABLE compression_codec( + id UInt64 CODEC(DEFLATE_QPL), + data String CODEC(DEFLATE_QPL), + ddd Date CODEC(DEFLATE_QPL), + ddd32 Date32 CODEC(DEFLATE_QPL), + somenum Float64 CODEC(DEFLATE_QPL), + somestr FixedString(3) CODEC(DEFLATE_QPL), + othernum Int64 CODEC(DEFLATE_QPL), + somearray Array(UInt8) CODEC(DEFLATE_QPL), + somemap Map(String, UInt32) CODEC(DEFLATE_QPL), + sometuple Tuple(UInt16, UInt64) CODEC(DEFLATE_QPL), +) ENGINE = MergeTree() ORDER BY tuple(); + +SHOW CREATE TABLE compression_codec; + +INSERT INTO compression_codec VALUES(1, 'hello', toDate('2018-12-14'), toDate32('2018-12-14'), 1.1, 'aaa', 5, [1,2,3], map('k1',1,'k2',2), tuple(1,2)); +INSERT INTO compression_codec VALUES(2, 'world', toDate('2018-12-15'), toDate32('2018-12-15'), 2.2, 'bbb', 6, [4,5,6], map('k3',3,'k4',4), tuple(3,4)); +INSERT INTO compression_codec VALUES(3, '!', toDate('2018-12-16'), toDate32('2018-12-16'), 3.3, 'ccc', 7, [7,8,9], map('k5',5,'k6',6), tuple(5,6)); + +SELECT * FROM compression_codec ORDER BY id; + +OPTIMIZE TABLE compression_codec FINAL; + +INSERT INTO compression_codec VALUES(2, '', toDate('2018-12-13'), toDate32('2018-12-13'), 4.4, 'ddd', 8, [10,11,12], map('k7',7,'k8',8), tuple(7,8)); + +DETACH TABLE compression_codec; +ATTACH TABLE compression_codec; + +SELECT count(*) FROM compression_codec WHERE id = 2 GROUP BY id; + +INSERT INTO compression_codec SELECT 3, '!', toDate('2018-12-16'), toDate32('2018-12-16'), 3.3, 'ccc', 7, [7,8,9], map('k5',5,'k6',6), tuple(5,6) FROM system.numbers LIMIT 10000; + +SELECT count(*) FROM compression_codec WHERE id = 3 GROUP BY id; + +DROP TABLE IF EXISTS compression_codec; diff --git a/parser/testdata/00804_test_delta_codec_compression/ast.json b/parser/testdata/00804_test_delta_codec_compression/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00804_test_delta_codec_compression/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00804_test_delta_codec_compression/metadata.json b/parser/testdata/00804_test_delta_codec_compression/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00804_test_delta_codec_compression/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00804_test_delta_codec_compression/query.sql b/parser/testdata/00804_test_delta_codec_compression/query.sql new file mode 100644 index 000000000..1e71803fb --- /dev/null +++ b/parser/testdata/00804_test_delta_codec_compression/query.sql @@ -0,0 +1,119 @@ +-- Tags: no-random-merge-tree-settings + +SET send_logs_level = 'fatal'; +SET joined_subquery_requires_alias = 0; + +DROP TABLE IF EXISTS delta_codec_synthetic; +DROP TABLE IF EXISTS default_codec_synthetic; + +CREATE TABLE delta_codec_synthetic +( + id UInt64 Codec(Delta, ZSTD(3)) +) ENGINE MergeTree() ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0, compress_marks = false, compress_primary_key = false, ratio_of_defaults_for_sparse_serialization = 1, enable_block_number_column = 0, serialization_info_version = 'basic', auto_statistics_types = ''; + +CREATE TABLE default_codec_synthetic +( + id UInt64 Codec(ZSTD(3)) +) ENGINE MergeTree() ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0, compress_marks = false, compress_primary_key = false, ratio_of_defaults_for_sparse_serialization = 1, enable_block_number_column = 0, auto_statistics_types = ''; + +set max_insert_threads = 1; + +INSERT INTO delta_codec_synthetic SELECT number FROM system.numbers LIMIT 5000000; +INSERT INTO default_codec_synthetic SELECT number FROM system.numbers LIMIT 5000000; + +OPTIMIZE TABLE delta_codec_synthetic FINAL; +OPTIMIZE TABLE default_codec_synthetic FINAL; + +SELECT + floor(big_size / small_size) AS ratio +FROM + (SELECT 1 AS key, sum(bytes_on_disk) AS small_size FROM system.parts WHERE database == currentDatabase() and table == 'delta_codec_synthetic' and active) +INNER JOIN + (SELECT 1 AS key, sum(bytes_on_disk) as big_size FROM system.parts WHERE database == currentDatabase() and table == 'default_codec_synthetic' and active) +USING(key); + +SELECT + small_hash == big_hash +FROM + (SELECT 1 AS key, sum(cityHash64(*)) AS small_hash FROM delta_codec_synthetic) +INNER JOIN + (SELECT 1 AS key, sum(cityHash64(*)) AS big_hash FROM default_codec_synthetic) +USING(key); + +DROP TABLE IF EXISTS delta_codec_synthetic; +DROP TABLE IF EXISTS default_codec_synthetic; + +DROP TABLE IF EXISTS delta_codec_float; +DROP TABLE IF EXISTS default_codec_float; + +CREATE TABLE delta_codec_float +( + id Float64 Codec(Delta, LZ4HC) +) ENGINE MergeTree() ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0, compress_marks = false, compress_primary_key = false, ratio_of_defaults_for_sparse_serialization = 1, enable_block_number_column = 0, auto_statistics_types = ''; + +CREATE TABLE default_codec_float +( + id Float64 Codec(LZ4HC) +) ENGINE MergeTree() ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0, compress_marks = false, compress_primary_key = false, ratio_of_defaults_for_sparse_serialization = 1, enable_block_number_column = 0, auto_statistics_types = ''; + +INSERT INTO delta_codec_float SELECT number FROM numbers(1547510400, 500000) WHERE number % 3 == 0 OR number % 5 == 0 OR number % 7 == 0 OR number % 11 == 0; +INSERT INTO default_codec_float SELECT * from delta_codec_float; + +OPTIMIZE TABLE delta_codec_float FINAL; +OPTIMIZE TABLE default_codec_float FINAL; + +SELECT + floor(big_size / small_size) as ratio +FROM + (SELECT 1 AS key, sum(bytes_on_disk) AS small_size FROM system.parts WHERE database = currentDatabase() and table = 'delta_codec_float' and active) +INNER JOIN + (SELECT 1 AS key, sum(bytes_on_disk) as big_size FROM system.parts WHERE database = currentDatabase() and table = 'default_codec_float' and active) USING(key); + +SELECT + small_hash == big_hash +FROM + (SELECT 1 AS key, sum(cityHash64(*)) AS small_hash FROM delta_codec_float) +INNER JOIN + (SELECT 1 AS key, sum(cityHash64(*)) AS big_hash FROM default_codec_float) +USING(key); + +DROP TABLE IF EXISTS delta_codec_float; +DROP TABLE IF EXISTS default_codec_float; + + +DROP TABLE IF EXISTS delta_codec_string; +DROP TABLE IF EXISTS default_codec_string; + +CREATE TABLE delta_codec_string +( + id Float64 Codec(Delta, LZ4) +) ENGINE MergeTree() ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0, compress_marks = false, compress_primary_key = false, ratio_of_defaults_for_sparse_serialization = 1, enable_block_number_column = 0, auto_statistics_types = ''; + +CREATE TABLE default_codec_string +( + id Float64 Codec(LZ4) +) ENGINE MergeTree() ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0, compress_marks = false, compress_primary_key = false, ratio_of_defaults_for_sparse_serialization = 1, enable_block_number_column = 0, auto_statistics_types = ''; + +INSERT INTO delta_codec_string SELECT concat(toString(number), toString(number % 100)) FROM numbers(1547510400, 500000); +INSERT INTO default_codec_string SELECT * from delta_codec_string; + +OPTIMIZE TABLE delta_codec_string FINAL; +OPTIMIZE TABLE default_codec_string FINAL; + +SELECT + floor(big_size / small_size) as ratio +FROM + (SELECT 1 AS key, sum(bytes_on_disk) AS small_size FROM system.parts WHERE database = currentDatabase() and table = 'delta_codec_string' and active) +INNER JOIN + (SELECT 1 AS key, sum(bytes_on_disk) as big_size FROM system.parts WHERE database = currentDatabase() and table = 'default_codec_string' and active) USING(key); + +SELECT + small_hash == big_hash +FROM + (SELECT 1 AS key, sum(cityHash64(*)) AS small_hash FROM delta_codec_string) +INNER JOIN + (SELECT 1 AS key, sum(cityHash64(*)) AS big_hash FROM default_codec_string) +USING(key); + +DROP TABLE IF EXISTS delta_codec_string; +DROP TABLE IF EXISTS default_codec_string; diff --git a/parser/testdata/00804_test_delta_codec_no_type_alter/ast.json b/parser/testdata/00804_test_delta_codec_no_type_alter/ast.json new file mode 100644 index 000000000..5335bbd46 --- /dev/null +++ b/parser/testdata/00804_test_delta_codec_no_type_alter/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001753029, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00804_test_delta_codec_no_type_alter/metadata.json b/parser/testdata/00804_test_delta_codec_no_type_alter/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00804_test_delta_codec_no_type_alter/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00804_test_delta_codec_no_type_alter/query.sql b/parser/testdata/00804_test_delta_codec_no_type_alter/query.sql new file mode 100644 index 000000000..4cb34b638 --- /dev/null +++ b/parser/testdata/00804_test_delta_codec_no_type_alter/query.sql @@ -0,0 +1,11 @@ +SET send_logs_level = 'fatal'; +SET allow_suspicious_codecs = 1; + +DROP TABLE IF EXISTS delta_codec_for_alter; +CREATE TABLE delta_codec_for_alter (date Date, x UInt32 Codec(Delta), s FixedString(128)) ENGINE = MergeTree ORDER BY tuple(); +SELECT compression_codec FROM system.columns WHERE database = currentDatabase() AND table = 'delta_codec_for_alter' AND name = 'x'; +ALTER TABLE delta_codec_for_alter MODIFY COLUMN x Codec(Delta, LZ4); +SELECT compression_codec FROM system.columns WHERE database = currentDatabase() AND table = 'delta_codec_for_alter' AND name = 'x'; +ALTER TABLE delta_codec_for_alter MODIFY COLUMN x UInt64 Codec(Delta, LZ4); +SELECT compression_codec FROM system.columns WHERE database = currentDatabase() AND table = 'delta_codec_for_alter' AND name = 'x'; +DROP TABLE IF EXISTS delta_codec_for_alter; diff --git a/parser/testdata/00804_test_zstd_qat_codec_compression/ast.json b/parser/testdata/00804_test_zstd_qat_codec_compression/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00804_test_zstd_qat_codec_compression/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00804_test_zstd_qat_codec_compression/metadata.json b/parser/testdata/00804_test_zstd_qat_codec_compression/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00804_test_zstd_qat_codec_compression/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00804_test_zstd_qat_codec_compression/query.sql b/parser/testdata/00804_test_zstd_qat_codec_compression/query.sql new file mode 100644 index 000000000..92748efd2 --- /dev/null +++ b/parser/testdata/00804_test_zstd_qat_codec_compression/query.sql @@ -0,0 +1,50 @@ +--Tags: no-fasttest, no-cpu-aarch64, no-cpu-s390x +-- no-fasttest because ZSTD_QAT isn't available in fasttest +-- no-cpu-aarch64 and no-cpu-s390x because ZSTD_QAT is x86-only + +SET enable_zstd_qat_codec = 1; + +-- Suppress test failures because stderr contains warning "Initialization of hardware-assisted ZSTD_QAT codec failed, falling back to software ZSTD coded." +SET send_logs_level = 'fatal'; + +DROP TABLE IF EXISTS compression_codec; + +-- negative test +CREATE TABLE compression_codec(id UInt64 CODEC(ZSTD_QAT(0))) ENGINE = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_CODEC_PARAMETER } +CREATE TABLE compression_codec(id UInt64 CODEC(ZSTD_QAT(13))) ENGINE = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_CODEC_PARAMETER } + +CREATE TABLE compression_codec( + id UInt64 CODEC(ZSTD_QAT), + data String CODEC(ZSTD_QAT), + ddd Date CODEC(ZSTD_QAT), + ddd32 Date32 CODEC(ZSTD_QAT), + somenum Float64 CODEC(ZSTD_QAT), + somestr FixedString(3) CODEC(ZSTD_QAT), + othernum Int64 CODEC(ZSTD_QAT), + somearray Array(UInt8) CODEC(ZSTD_QAT), + somemap Map(String, UInt32) CODEC(ZSTD_QAT), + sometuple Tuple(UInt16, UInt64) CODEC(ZSTD_QAT), +) ENGINE = MergeTree() ORDER BY tuple(); + +SHOW CREATE TABLE compression_codec; + +INSERT INTO compression_codec VALUES(1, 'hello', toDate('2018-12-14'), toDate32('2018-12-14'), 1.1, 'aaa', 5, [1,2,3], map('k1',1,'k2',2), tuple(1,2)); +INSERT INTO compression_codec VALUES(2, 'world', toDate('2018-12-15'), toDate32('2018-12-15'), 2.2, 'bbb', 6, [4,5,6], map('k3',3,'k4',4), tuple(3,4)); +INSERT INTO compression_codec VALUES(3, '!', toDate('2018-12-16'), toDate32('2018-12-16'), 3.3, 'ccc', 7, [7,8,9], map('k5',5,'k6',6), tuple(5,6)); + +SELECT * FROM compression_codec ORDER BY id; + +OPTIMIZE TABLE compression_codec FINAL; + +INSERT INTO compression_codec VALUES(2, '', toDate('2018-12-13'), toDate32('2018-12-13'), 4.4, 'ddd', 8, [10,11,12], map('k7',7,'k8',8), tuple(7,8)); + +DETACH TABLE compression_codec; +ATTACH TABLE compression_codec; + +SELECT count(*) FROM compression_codec WHERE id = 2 GROUP BY id; + +INSERT INTO compression_codec SELECT 3, '!', toDate('2018-12-16'), toDate32('2018-12-16'), 3.3, 'ccc', 7, [7,8,9], map('k5',5,'k6',6), tuple(5,6) FROM system.numbers LIMIT 10000; + +SELECT count(*) FROM compression_codec WHERE id = 3 GROUP BY id; + +DROP TABLE IF EXISTS compression_codec; diff --git a/parser/testdata/00805_round_down/ast.json b/parser/testdata/00805_round_down/ast.json new file mode 100644 index 000000000..1e9deecf6 --- /dev/null +++ b/parser/testdata/00805_round_down/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number (alias x)" + }, + { + "explain": " Function roundDown (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal Array_[UInt64_0, UInt64_1, UInt64_2, UInt64_3, UInt64_4, UInt64_5]" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001586231, + "rows_read": 14, + "bytes_read": 592 + } +} diff --git a/parser/testdata/00805_round_down/metadata.json b/parser/testdata/00805_round_down/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00805_round_down/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00805_round_down/query.sql b/parser/testdata/00805_round_down/query.sql new file mode 100644 index 000000000..28377580a --- /dev/null +++ b/parser/testdata/00805_round_down/query.sql @@ -0,0 +1,19 @@ +SELECT number as x, roundDown(x, [0, 1, 2, 3, 4, 5]) FROM system.numbers LIMIT 10; +SELECT toUInt8(number) as x, roundDown(x, [-1.5, e(), pi(), 5.5]) FROM system.numbers LIMIT 10; +SELECT toInt32(number) as x, roundDown(x, [e(), pi(), pi(), e()]) FROM system.numbers LIMIT 10; +SELECT number as x, roundDown(x, [6, 5, 4]) FROM system.numbers LIMIT 10; +SELECT 1 as x, roundDown(x, [6, 5, 4]); + +SET send_logs_level = 'fatal'; +SELECT 1 as x, roundDown(x, []); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT 1 as x, roundDown(x, emptyArrayUInt8()); -- { serverError ILLEGAL_COLUMN } +SELECT roundDown(number, [number]) FROM system.numbers LIMIT 10; -- { serverError ILLEGAL_COLUMN } + +SELECT 1 as x, roundDown(x, [1]); +SELECT 1 as x, roundDown(x, [1.5]); + +SELECT number % 10 as x, roundDown(x, (SELECT groupArray(number * 1.25) FROM numbers(100000))) FROM system.numbers LIMIT 10; + +SELECT toDecimal64(number, 5) / 100 as x, roundDown(x, [4, 5, 6]) FROM system.numbers LIMIT 10; +SELECT toDecimal64(number, 5) / 100 as x, roundDown(x, [toDecimal64(0.04, 5), toDecimal64(0.05, 5), toDecimal64(0.06, 5)]) FROM system.numbers LIMIT 10; +SELECT toDecimal64(number, 5) / 100 as x, roundDown(x, [toDecimal32(0.04, 2), toDecimal32(0.05, 2), toDecimal32(0.06, 2)]) FROM system.numbers LIMIT 10; diff --git a/parser/testdata/00806_alter_update/ast.json b/parser/testdata/00806_alter_update/ast.json new file mode 100644 index 000000000..561224967 --- /dev/null +++ b/parser/testdata/00806_alter_update/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery alter_update_00806 (children 1)" + }, + { + "explain": " Identifier alter_update_00806" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001231919, + "rows_read": 2, + "bytes_read": 88 + } +} diff --git a/parser/testdata/00806_alter_update/metadata.json b/parser/testdata/00806_alter_update/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00806_alter_update/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00806_alter_update/query.sql b/parser/testdata/00806_alter_update/query.sql new file mode 100644 index 000000000..c9b1bfb2d --- /dev/null +++ b/parser/testdata/00806_alter_update/query.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS alter_update_00806; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE alter_update_00806 (d Date, e Enum8('foo'=1, 'bar'=2)) Engine = MergeTree(d, (d), 8192); +INSERT INTO alter_update_00806 (d, e) VALUES ('2018-01-01', 'foo'); +INSERT INTO alter_update_00806 (d, e) VALUES ('2018-01-02', 'bar'); + +ALTER TABLE alter_update_00806 UPDATE e = CAST('foo', 'Enum8(\'foo\' = 1, \'bar\' = 2)') WHERE d='2018-01-02' SETTINGS mutations_sync = 1; + + +SELECT e FROM alter_update_00806 ORDER BY d; + +DROP TABLE alter_update_00806; diff --git a/parser/testdata/00807_regexp_quote_meta/ast.json b/parser/testdata/00807_regexp_quote_meta/ast.json new file mode 100644 index 000000000..99581bab2 --- /dev/null +++ b/parser/testdata/00807_regexp_quote_meta/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function regexpQuoteMeta (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'hello'" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001829427, + "rows_read": 7, + "bytes_read": 267 + } +} diff --git a/parser/testdata/00807_regexp_quote_meta/metadata.json b/parser/testdata/00807_regexp_quote_meta/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00807_regexp_quote_meta/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00807_regexp_quote_meta/query.sql b/parser/testdata/00807_regexp_quote_meta/query.sql new file mode 100644 index 000000000..afac2d7be --- /dev/null +++ b/parser/testdata/00807_regexp_quote_meta/query.sql @@ -0,0 +1,12 @@ +SELECT regexpQuoteMeta('hello'); +SELECT regexpQuoteMeta('hel\\lo'); +SELECT regexpQuoteMeta('h{ell}o'); +SELECT regexpQuoteMeta('(h{ell}o)'); +SELECT regexpQuoteMeta(''); +SELECT regexpQuoteMeta('('); +SELECT regexpQuoteMeta('Hello('); +SELECT regexpQuoteMeta('(Hello'); +SELECT regexpQuoteMeta('((((((((('); +SELECT regexpQuoteMeta('\\'); +SELECT regexpQuoteMeta('\0\\|()^$.[?*+{'); +SELECT DISTINCT regexpQuoteMeta(toString(number)) = toString(number) FROM numbers(100000); diff --git a/parser/testdata/00808_array_enumerate_segfault/ast.json b/parser/testdata/00808_array_enumerate_segfault/ast.json new file mode 100644 index 000000000..d7baa4d78 --- /dev/null +++ b/parser/testdata/00808_array_enumerate_segfault/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001160254, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00808_array_enumerate_segfault/metadata.json b/parser/testdata/00808_array_enumerate_segfault/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00808_array_enumerate_segfault/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00808_array_enumerate_segfault/query.sql b/parser/testdata/00808_array_enumerate_segfault/query.sql new file mode 100644 index 000000000..16c94aeb9 --- /dev/null +++ b/parser/testdata/00808_array_enumerate_segfault/query.sql @@ -0,0 +1,4 @@ +SET send_logs_level = 'fatal'; +SELECT arrayEnumerateUniq(anyHeavy([]), []); +SELECT arrayEnumerateDense([], [sequenceCount(NULL)]); -- { serverError SIZES_OF_ARRAYS_DONT_MATCH } +SELECT arrayEnumerateDense([STDDEV_SAMP(NULL, 910947.571364)], [NULL]); diff --git a/parser/testdata/00808_not_optimize_predicate/ast.json b/parser/testdata/00808_not_optimize_predicate/ast.json new file mode 100644 index 000000000..1b9d7d561 --- /dev/null +++ b/parser/testdata/00808_not_optimize_predicate/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001265607, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00808_not_optimize_predicate/metadata.json b/parser/testdata/00808_not_optimize_predicate/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00808_not_optimize_predicate/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00808_not_optimize_predicate/query.sql b/parser/testdata/00808_not_optimize_predicate/query.sql new file mode 100644 index 000000000..560ace5ef --- /dev/null +++ b/parser/testdata/00808_not_optimize_predicate/query.sql @@ -0,0 +1,78 @@ +SET send_logs_level = 'fatal'; +SET convert_query_to_cnf = 0; +SET allow_deprecated_error_prone_window_functions = 1; + +DROP TABLE IF EXISTS test_00808; +CREATE TABLE test_00808(date Date, id Int8, name String, value Int64, sign Int8) ENGINE = CollapsingMergeTree(sign) ORDER BY (id, date); + +INSERT INTO test_00808 VALUES('2000-01-01', 1, 'test string 1', 1, 1); +INSERT INTO test_00808 VALUES('2000-01-01', 2, 'test string 2', 2, 1); + +SET enable_optimize_predicate_expression = 1; + +SELECT '-------ENABLE OPTIMIZE PREDICATE-------'; +SELECT * FROM (SELECT * FROM test_00808 FINAL) WHERE id = 1; +SELECT * FROM (SELECT * FROM test_00808 ORDER BY id LIMIT 1) WHERE id = 1; +SELECT * FROM (SELECT id FROM test_00808 GROUP BY id LIMIT 1 BY id) WHERE id = 1; + +SET force_primary_key = 1; + +SELECT '-------FORCE PRIMARY KEY-------'; +SELECT * FROM (SELECT * FROM test_00808 LIMIT 1) WHERE id = 1; -- { serverError INDEX_NOT_USED } +SELECT * FROM (SELECT id FROM test_00808 GROUP BY id LIMIT 1 BY id) WHERE id = 1; -- { serverError INDEX_NOT_USED } + +SELECT '-------CHECK STATEFUL FUNCTIONS-------'; +SELECT n, z, changed FROM ( + SELECT n, z, runningDifferenceStartingWithFirstValue(n) AS changed FROM ( + SELECT ts, n,z FROM system.one ARRAY JOIN [1,3,4,5,6] AS ts, + [1,2,2,2,1] AS n, ['a', 'a', 'b', 'a', 'b'] AS z + ORDER BY n, ts DESC + ) +) WHERE changed = 0; + + +SELECT arrayJoin(arrayMap(x -> x, arraySort(groupArray((ts, n))))) AS k FROM ( + SELECT ts, n, z FROM system.one ARRAY JOIN [1, 3, 4, 5, 6] AS ts, [1, 2, 2, 2, 1] AS n, ['a', 'a', 'b', 'a', 'b'] AS z + ORDER BY n ASC, ts DESC +) WHERE z = 'a' GROUP BY z; + + +DROP TABLE IF EXISTS test_00808; + +SELECT '-------finalizeAggregation should not be stateful (issue #14847)-------'; + +DROP TABLE IF EXISTS test_00808_push_down_with_finalizeAggregation; + +CREATE TABLE test_00808_push_down_with_finalizeAggregation ENGINE = AggregatingMergeTree +ORDER BY n AS +SELECT + intDiv(number, 25) AS n, + avgState(number) AS s +FROM numbers(2500) +GROUP BY n +ORDER BY n; + +SET force_primary_key = 1, enable_optimize_predicate_expression = 1; + +SELECT * +FROM +( + SELECT + n, + finalizeAggregation(s) + FROM test_00808_push_down_with_finalizeAggregation +) +WHERE (n >= 2) AND (n <= 5) +ORDER BY n; + +EXPLAIN SYNTAX SELECT * +FROM +( + SELECT + n, + finalizeAggregation(s) + FROM test_00808_push_down_with_finalizeAggregation +) +WHERE (n >= 2) AND (n <= 5); + +DROP TABLE IF EXISTS test_00808_push_down_with_finalizeAggregation; diff --git a/parser/testdata/00809_add_days_segfault/ast.json b/parser/testdata/00809_add_days_segfault/ast.json new file mode 100644 index 000000000..b54eb97ab --- /dev/null +++ b/parser/testdata/00809_add_days_segfault/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function ignore (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function addDays (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDateTime (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal Int64_-1" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001741598, + "rows_read": 12, + "bytes_read": 472 + } +} diff --git a/parser/testdata/00809_add_days_segfault/metadata.json b/parser/testdata/00809_add_days_segfault/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00809_add_days_segfault/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00809_add_days_segfault/query.sql b/parser/testdata/00809_add_days_segfault/query.sql new file mode 100644 index 000000000..3be654f71 --- /dev/null +++ b/parser/testdata/00809_add_days_segfault/query.sql @@ -0,0 +1,12 @@ +SELECT ignore(addDays(toDateTime(0), -1)); +SELECT ignore(subtractDays(toDateTime(0), 1)); + +SELECT ignore(addDays(toDate(0), -1)); +SELECT ignore(subtractDays(toDate(0), 1)); + +SET send_logs_level = 'fatal'; + +SELECT ignore(addDays((CAST((96.338) AS DateTime)), -3)); +SELECT ignore(subtractDays((CAST((-5263074.47) AS DateTime)), -737895)); +SELECT quantileDeterministic([], identity(( SELECT subtractDays((CAST((566450.398706) AS DateTime)), 54) ) )), '\0', []; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT sequenceCount((CAST((( SELECT NULL ) AS rg, ( SELECT ( SELECT [], 'A') AS String))]]); -- { serverError ILLEGAL_COLUMN } diff --git a/parser/testdata/00812_prewhere_alias_array/ast.json b/parser/testdata/00812_prewhere_alias_array/ast.json new file mode 100644 index 000000000..8d43682f1 --- /dev/null +++ b/parser/testdata/00812_prewhere_alias_array/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery prewhere (children 1)" + }, + { + "explain": " Identifier prewhere" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001488, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/00812_prewhere_alias_array/metadata.json b/parser/testdata/00812_prewhere_alias_array/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00812_prewhere_alias_array/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00812_prewhere_alias_array/query.sql b/parser/testdata/00812_prewhere_alias_array/query.sql new file mode 100644 index 000000000..7637dd23b --- /dev/null +++ b/parser/testdata/00812_prewhere_alias_array/query.sql @@ -0,0 +1,4 @@ +DROP TABLE IF EXISTS prewhere; +CREATE TABLE prewhere (x Array(UInt64), y ALIAS x, s String) ENGINE = MergeTree ORDER BY tuple(); +SELECT count() FROM prewhere PREWHERE (length(s) >= 1) = 0 WHERE NOT ignore(y); +DROP TABLE prewhere; diff --git a/parser/testdata/00813_parse_date_time_best_effort_more/ast.json b/parser/testdata/00813_parse_date_time_best_effort_more/ast.json new file mode 100644 index 000000000..1200e4f3a --- /dev/null +++ b/parser/testdata/00813_parse_date_time_best_effort_more/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001413947, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00813_parse_date_time_best_effort_more/metadata.json b/parser/testdata/00813_parse_date_time_best_effort_more/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00813_parse_date_time_best_effort_more/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00813_parse_date_time_best_effort_more/query.sql b/parser/testdata/00813_parse_date_time_best_effort_more/query.sql new file mode 100644 index 000000000..7c2b75cdd --- /dev/null +++ b/parser/testdata/00813_parse_date_time_best_effort_more/query.sql @@ -0,0 +1,30 @@ +SET output_format_pretty_display_footer_column_names=0; +SELECT + s, + parseDateTimeBestEffortOrNull(s, 'UTC') AS a, + parseDateTimeBestEffortOrZero(s, 'UTC') AS b +FROM +( + SELECT arrayJoin([ +'24.12.2018', +'24-12-2018', +'24.12.18', +'24-12-18', +'24-Dec-18', +'24/DEC/18', +'24/DEC/2018', +'01-OCT-2015', +'24.12.2018', +'24-12-2018', +'24.12.18', +'24-12-18', +'24-Dec-18', +'24/DEC/18', +'24/DEC/2018', +'01-OCT-2015', +'24.12.18 010203', +'24.12.18 01:02:03', +'24.DEC.18T01:02:03.000+0300', +'01-September-2018 11:22' +]) AS s) +FORMAT PrettySpaceNoEscapes; diff --git a/parser/testdata/00814_parsing_ub/ast.json b/parser/testdata/00814_parsing_ub/ast.json new file mode 100644 index 000000000..a1308cb7c --- /dev/null +++ b/parser/testdata/00814_parsing_ub/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toInt8 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '-128'" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.0011827, + "rows_read": 7, + "bytes_read": 257 + } +} diff --git a/parser/testdata/00814_parsing_ub/metadata.json b/parser/testdata/00814_parsing_ub/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00814_parsing_ub/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00814_parsing_ub/query.sql b/parser/testdata/00814_parsing_ub/query.sql new file mode 100644 index 000000000..d58e63197 --- /dev/null +++ b/parser/testdata/00814_parsing_ub/query.sql @@ -0,0 +1,4 @@ +SELECT toInt8('-128'); +SELECT toInt16('-32768'); +SELECT toInt32('-2147483648'); +SELECT toInt64('-9223372036854775808'); diff --git a/parser/testdata/00815_left_join_on_stepanel/ast.json b/parser/testdata/00815_left_join_on_stepanel/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00815_left_join_on_stepanel/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00815_left_join_on_stepanel/metadata.json b/parser/testdata/00815_left_join_on_stepanel/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00815_left_join_on_stepanel/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00815_left_join_on_stepanel/query.sql b/parser/testdata/00815_left_join_on_stepanel/query.sql new file mode 100644 index 000000000..725c9523c --- /dev/null +++ b/parser/testdata/00815_left_join_on_stepanel/query.sql @@ -0,0 +1,17 @@ + +DROP TABLE IF EXISTS fact_cpc_clicks; +DROP TABLE IF EXISTS dim_model; + +CREATE TABLE fact_cpc_clicks (model_id UInt8) ENGINE = Memory; +CREATE TABLE dim_model (model_id UInt8) ENGINE = Memory; + +INSERT INTO fact_cpc_clicks VALUES (1); +INSERT INTO dim_model VALUES (1); + +select f.model_id from fact_cpc_clicks as f left join dim_model as d on f.model_id=d.model_id limit 10; + +USE default; + +select f.model_id from {CLICKHOUSE_DATABASE:Identifier}.fact_cpc_clicks as f left join {CLICKHOUSE_DATABASE:Identifier}.dim_model as d on f.model_id=d.model_id limit 10; + +DROP DATABASE {CLICKHOUSE_DATABASE:Identifier}; diff --git a/parser/testdata/00816_join_column_names_sarg/ast.json b/parser/testdata/00816_join_column_names_sarg/ast.json new file mode 100644 index 000000000..efdc17672 --- /dev/null +++ b/parser/testdata/00816_join_column_names_sarg/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1_00816 (children 1)" + }, + { + "explain": " Identifier t1_00816" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00116398, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/00816_join_column_names_sarg/metadata.json b/parser/testdata/00816_join_column_names_sarg/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00816_join_column_names_sarg/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00816_join_column_names_sarg/query.sql b/parser/testdata/00816_join_column_names_sarg/query.sql new file mode 100644 index 000000000..f0363aefa --- /dev/null +++ b/parser/testdata/00816_join_column_names_sarg/query.sql @@ -0,0 +1,21 @@ +drop table if exists t1_00816; +drop table if exists t2_00816; +create table t1_00816 (a Int8, val Float32) engine=Memory(); +create table t2_00816 (a Int8, val Float32) engine=Memory(); + +INSERT INTO t1_00816 VALUES (1, 123); +INSERT INTO t2_00816 VALUES (1, 456); + + +select t1_00816.a, t2_00816.a from t1_00816 all inner join t2_00816 on t1_00816.a=t2_00816.a; +-- Received exception from server (version 18.14.1): +-- Code: 47. DB::Exception: Received from localhost:9000, 127.0.0.1. DB::Exception: Unknown identifier: t2_00816.a. + +-- this query works fine +select t1_00816.a, t2_00816.* from t1_00816 all inner join t2_00816 on t1_00816.a=t2_00816.a; + +-- and this +select t1_00816.a, t2_00816.val from t1_00816 all inner join t2_00816 on t1_00816.a=t2_00816.a; + +DROP TABLE t1_00816; +DROP TABLE t2_00816; diff --git a/parser/testdata/00817_with_simple/ast.json b/parser/testdata/00817_with_simple/ast.json new file mode 100644 index 000000000..04770a299 --- /dev/null +++ b/parser/testdata/00817_with_simple/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number (alias k)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier k" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.00166686, + "rows_read": 12, + "bytes_read": 457 + } +} diff --git a/parser/testdata/00817_with_simple/metadata.json b/parser/testdata/00817_with_simple/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00817_with_simple/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00817_with_simple/query.sql b/parser/testdata/00817_with_simple/query.sql new file mode 100644 index 000000000..8f1b8e6fe --- /dev/null +++ b/parser/testdata/00817_with_simple/query.sql @@ -0,0 +1 @@ +WITH number AS k SELECT k FROM system.numbers LIMIT 10; diff --git a/parser/testdata/00818_alias_bug_4110/ast.json b/parser/testdata/00818_alias_bug_4110/ast.json new file mode 100644 index 000000000..9ac85e2b3 --- /dev/null +++ b/parser/testdata/00818_alias_bug_4110/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001281393, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00818_alias_bug_4110/metadata.json b/parser/testdata/00818_alias_bug_4110/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00818_alias_bug_4110/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00818_alias_bug_4110/query.sql b/parser/testdata/00818_alias_bug_4110/query.sql new file mode 100644 index 000000000..dc4ada180 --- /dev/null +++ b/parser/testdata/00818_alias_bug_4110/query.sql @@ -0,0 +1,27 @@ +SET enable_analyzer = 1; + +select s.a as a, s.a + 1 as b from (select 10 as a) s; +select s.a + 1 as a, s.a as b from (select 10 as a) s; +select s.a + 1 as a, s.a + 1 as b from (select 10 as a) s; +select s.a + 1 as b, s.a + 2 as a from (select 10 as a) s; +select s.a + 2 as b, s.a + 1 as a from (select 10 as a) s; + +select a, a as a from (select 10 as a); +select s.a, a, a + 1 as a from (select 10 as a) as s; +select s.a + 2 as b, b - 1 as a from (select 10 as a) s; +select s.a as a, s.a + 2 as b from (select 10 as a) s; +select s.a + 1 as a, s.a + 2 as b from (select 10 as a) s; +select a + 1 as a, a + 1 as b from (select 10 as a); +select a + 1 as b, b + 1 as a from (select 10 as a); -- { serverError CYCLIC_ALIASES, UNKNOWN_IDENTIFIER } +select 10 as a, a + 1 as a; -- { serverError UNKNOWN_IDENTIFIER } +with 10 as a select a as a; -- { serverError UNKNOWN_IDENTIFIER } +with 10 as a select a + 1 as a; -- { serverError UNKNOWN_IDENTIFIER } + +SELECT 0 as t FROM (SELECT 1 as t) as inn WHERE inn.t = 1; +SELECT sum(value) as value FROM (SELECT 1 as value) as data WHERE data.value > 0; + +DROP TABLE IF EXISTS test_00818; +CREATE TABLE test_00818 (field String, not_field String) ENGINE = Memory; +INSERT INTO test_00818 (field, not_field) VALUES ('123', '456'); +SELECT test_00818.field AS other_field, test_00818.not_field AS field FROM test_00818; +DROP TABLE test_00818; diff --git a/parser/testdata/00818_inner_join_bug_3567/ast.json b/parser/testdata/00818_inner_join_bug_3567/ast.json new file mode 100644 index 000000000..90b8e8063 --- /dev/null +++ b/parser/testdata/00818_inner_join_bug_3567/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001183373, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00818_inner_join_bug_3567/metadata.json b/parser/testdata/00818_inner_join_bug_3567/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00818_inner_join_bug_3567/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00818_inner_join_bug_3567/query.sql b/parser/testdata/00818_inner_join_bug_3567/query.sql new file mode 100644 index 000000000..3b4b3cd77 --- /dev/null +++ b/parser/testdata/00818_inner_join_bug_3567/query.sql @@ -0,0 +1,19 @@ +SET output_format_pretty_color = 1; +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS table1; +DROP TABLE IF EXISTS table2; + +CREATE TABLE table1(a String, b Date) ENGINE MergeTree order by a; +CREATE TABLE table2(c String, a String, d Date) ENGINE MergeTree order by c; + +INSERT INTO table1 VALUES ('a', '2018-01-01') ('b', '2018-01-01') ('c', '2018-01-01'); +INSERT INTO table2 VALUES ('D', 'd', '2018-01-01') ('B', 'b', '2018-01-01') ('C', 'c', '2018-01-01'); + +SELECT * FROM table1 t1 FORMAT PrettyCompact; +SELECT *, c as a, d as b FROM table2 FORMAT PrettyCompact; +SELECT * FROM table1 t1 ALL LEFT JOIN (SELECT *, c, d as b FROM table2) t2 USING (a, b) ORDER BY d, t1.a FORMAT PrettyCompact; +SELECT * FROM table1 t1 ALL INNER JOIN (SELECT *, c, d as b FROM table2) t2 USING (a, b) ORDER BY d, t1.a FORMAT PrettyCompact; + +DROP TABLE table1; +DROP TABLE table2; diff --git a/parser/testdata/00818_join_bug_4271/ast.json b/parser/testdata/00818_join_bug_4271/ast.json new file mode 100644 index 000000000..61985c556 --- /dev/null +++ b/parser/testdata/00818_join_bug_4271/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_00818 (children 1)" + }, + { + "explain": " Identifier t_00818" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001444315, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/00818_join_bug_4271/metadata.json b/parser/testdata/00818_join_bug_4271/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00818_join_bug_4271/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00818_join_bug_4271/query.sql b/parser/testdata/00818_join_bug_4271/query.sql new file mode 100644 index 000000000..7bf3b4bff --- /dev/null +++ b/parser/testdata/00818_join_bug_4271/query.sql @@ -0,0 +1,17 @@ +drop table if exists t_00818; +drop table if exists s_00818; + +create table t_00818(a Nullable(Int64), b Nullable(Int64), c Nullable(String)) engine = Memory; +create table s_00818(a Nullable(Int64), b Nullable(Int64), c Nullable(String)) engine = Memory; + +insert into t_00818 values(1,1,'a'), (2,2,'b'); +insert into s_00818 values(1,1,'a'); + +select * from t_00818 left join s_00818 on t_00818.a = s_00818.a ORDER BY t_00818.a; +select * from t_00818 left join s_00818 on t_00818.a = s_00818.a and t_00818.a = s_00818.b ORDER BY t_00818.a; +select * from t_00818 left join s_00818 on t_00818.a = s_00818.a where s_00818.a = 1 ORDER BY t_00818.a; +select * from t_00818 left join s_00818 on t_00818.a = s_00818.a and t_00818.a = s_00818.a ORDER BY t_00818.a; +select * from t_00818 left join s_00818 on t_00818.a = s_00818.a and t_00818.b = s_00818.a ORDER BY t_00818.a; + +drop table t_00818; +drop table s_00818; diff --git a/parser/testdata/00819_ast_refactoring_bugs/ast.json b/parser/testdata/00819_ast_refactoring_bugs/ast.json new file mode 100644 index 000000000..8b068f4b3 --- /dev/null +++ b/parser/testdata/00819_ast_refactoring_bugs/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery visits1 (children 1)" + }, + { + "explain": " Identifier visits1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001346539, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/00819_ast_refactoring_bugs/metadata.json b/parser/testdata/00819_ast_refactoring_bugs/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00819_ast_refactoring_bugs/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00819_ast_refactoring_bugs/query.sql b/parser/testdata/00819_ast_refactoring_bugs/query.sql new file mode 100644 index 000000000..9737d8947 --- /dev/null +++ b/parser/testdata/00819_ast_refactoring_bugs/query.sql @@ -0,0 +1,30 @@ +DROP TABLE IF EXISTS visits1; +CREATE TABLE visits1 +( + Sign Int8, + Arr Array(Int8), + `ParsedParams.Key1` Array(String), + `ParsedParams.Key2` Array(String), + CounterID UInt32 +) ENGINE = Memory; + +SELECT arrayMap(x -> x * Sign, Arr) FROM visits1; + +SELECT PP.Key2 AS `ym:s:pl2` +FROM visits1 +ARRAY JOIN + `ParsedParams.Key2` AS `PP.Key2`, + `ParsedParams.Key1` AS `PP.Key1`, + arrayEnumerateUniq(`ParsedParams.Key2`, arrayMap(x_0 -> 1, `ParsedParams.Key1`)) AS `upp_==_yes_`, + arrayEnumerateUniq(`ParsedParams.Key2`) AS _uniq_ParsedParams +WHERE CounterID = 100500; + +DROP TABLE visits1; + +select u, cumSum from ( + select u, min(d) mn, max(d) mx, groupArray(d) dg, groupArray(v) vg, + arrayMap(x -> x + mn, range(toUInt32(mx - mn + 1))) days, + toString(arrayCumSum(arrayMap( x -> vg[indexOf(dg, x)] , days))) cumSum + from (select 1 u, today()-1 d, 1 v) + group by u +); diff --git a/parser/testdata/00819_full_join_wrong_columns_in_block/ast.json b/parser/testdata/00819_full_join_wrong_columns_in_block/ast.json new file mode 100644 index 000000000..86fbdf795 --- /dev/null +++ b/parser/testdata/00819_full_join_wrong_columns_in_block/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001182406, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00819_full_join_wrong_columns_in_block/metadata.json b/parser/testdata/00819_full_join_wrong_columns_in_block/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00819_full_join_wrong_columns_in_block/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00819_full_join_wrong_columns_in_block/query.sql b/parser/testdata/00819_full_join_wrong_columns_in_block/query.sql new file mode 100644 index 000000000..8b3d3ec1b --- /dev/null +++ b/parser/testdata/00819_full_join_wrong_columns_in_block/query.sql @@ -0,0 +1,19 @@ +SET any_join_distinct_right_table_keys = 1; +SET joined_subquery_requires_alias = 0; +SET enable_analyzer = 1; + +SELECT * FROM (SELECT 1 AS a, 'x' AS b) join (SELECT 1 as a, 'y' as b) using a; +SELECT * FROM (SELECT 1 AS a, 'x' AS b) left join (SELECT 1 as a, 'y' as b) using a; +SELECT * FROM (SELECT 1 AS a, 'x' AS b) full join (SELECT 1 as a, 'y' as b) using a; +SELECT * FROM (SELECT 1 AS a, 'x' AS b) right join (SELECT 1 as a, 'y' as b) using a; + +SELECT * FROM (SELECT 1 AS a, 'x' AS b) any join (SELECT 1 as a, 'y' as b) using a; +SELECT * FROM (SELECT 1 AS a, 'x' AS b) any left join (SELECT 1 as a, 'y' as b) using a; +SELECT * FROM (SELECT 1 AS a, 'x' AS b) any full join (SELECT 1 as a, 'y' as b) using a; +SELECT * FROM (SELECT 1 AS a, 'x' AS b) any right join (SELECT 1 as a, 'y' as b) using a; + +SET any_join_distinct_right_table_keys = 0; +SELECT * FROM (SELECT 1 AS a, 'x' AS b) any join (SELECT 1 as a, 'y' as b) using a; +SELECT * FROM (SELECT 1 AS a, 'x' AS b) left join (SELECT 1 as a, 'y' as b) using a; +SELECT * FROM (SELECT 1 AS a, 'x' AS b) any right join (SELECT 1 as a, 'y' as b) using a; +SELECT * FROM (SELECT 1 AS a, 'x' AS b) any full join (SELECT 1 as a, 'y' as b) using a; -- { serverError NOT_IMPLEMENTED } diff --git a/parser/testdata/00820_multiple_joins/ast.json b/parser/testdata/00820_multiple_joins/ast.json new file mode 100644 index 000000000..c55e07605 --- /dev/null +++ b/parser/testdata/00820_multiple_joins/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001163864, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00820_multiple_joins/metadata.json b/parser/testdata/00820_multiple_joins/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00820_multiple_joins/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00820_multiple_joins/query.sql b/parser/testdata/00820_multiple_joins/query.sql new file mode 100644 index 000000000..b4197570c --- /dev/null +++ b/parser/testdata/00820_multiple_joins/query.sql @@ -0,0 +1,108 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS table1; +DROP TABLE IF EXISTS table2; +DROP TABLE IF EXISTS table3; +DROP TABLE IF EXISTS table5; +DROP TABLE IF EXISTS table_set; + +CREATE TABLE table1 (a UInt32) ENGINE = Memory; +CREATE TABLE table2 (a UInt32, b UInt32) ENGINE = Memory; +CREATE TABLE table3 (b UInt32, c UInt32) ENGINE = Memory; +CREATE TABLE table5 (a UInt32, b UInt32, c UInt32) ENGINE = Memory; + +INSERT INTO table1 SELECT number FROM numbers(21); +INSERT INTO table2 SELECT number * 2, number * 20 FROM numbers(11); +INSERT INTO table3 SELECT number * 30, number * 300 FROM numbers(10); +INSERT INTO table5 SELECT number * 5, number * 50, number * 500 FROM numbers(10); + +select t1.a, t2.b, t3.c from table1 as t1 join table2 as t2 on t1.a = t2.a join table3 as t3 on t2.b = t3.b ORDER BY t1.a; +select t1.a, t2.b, t5.c from table1 as t1 join table2 as t2 on t1.a = t2.a join table5 as t5 on t1.a = t5.a AND t2.b = t5.b ORDER BY t1.a; + +select t1.a, t2.a, t2.b, t3.b, t3.c, t5.a, t5.b, t5.c +from table1 as t1 +join table2 as t2 on t1.a = t2.a +join table3 as t3 on t2.b = t3.b +join table5 as t5 on t3.c = t5.c +ORDER BY t1.a +FORMAT PrettyCompactNoEscapes; + +select t1.a as t1_a, t2.a as t2_a, t2.b as t2_b, t3.b as t3_b +from table1 as t1 +join table2 as t2 on t1_a = t2_a +join table3 as t3 on t2_b = t3_b +ORDER BY t1.a +; + +select t1.a as t1_a, t2.a as t2_a, t2.b as t2_b, t3.b as t3_b +from table1 as t1 +join table2 as t2 on t1.a = t2.a +join table3 as t3 on t2.b = t3.b +ORDER BY t1.a +; + +select t1.a as t1_a, t2.a as t2_a, t2.b as t2_b, t3.b as t3_b +from table1 as t1 +join table2 as t2 on table1.a = table2.a +join table3 as t3 on table2.b = table3.b +ORDER BY t1.a +; + +select t1.a, t2.a, t2.b, t3.b +from table1 as t1 +join table2 as t2 on table1.a = table2.a +join table3 as t3 on table2.b = table3.b +ORDER BY t1.a +; + +select t1.a, t2.a, t2.b, t3.b +from table1 as t1 +join table2 as t2 on t1.a = t2.a +join table3 as t3 on t2.b = t3.b +ORDER BY t1.a +; + +select table1.a, table2.a, table2.b, table3.b +from table1 as t1 +join table2 as t2 on table1.a = table2.a +join table3 as t3 on table2.b = table3.b +ORDER BY t1.a +; + +select t1.*, t2.*, t3.* +from table1 as t1 +join table2 as t2 on table1.a = table2.a +join table3 as t3 on table2.b = table3.b +ORDER BY t1.a +FORMAT PrettyCompactNoEscapes; + +select * +from table1 as t1 +join table2 as t2 on t1.a = t2.a +join table3 as t3 on t2.b = t3.b +ORDER BY t1.a +FORMAT PrettyCompactNoEscapes; + +select t1.a as t1_a, t2.a as t2_a, t2.b as t2_b, t3.b as t3_b, + (t1.a + table2.b) as t1_t2_x, (table1.a + table3.b) as t1_t3_x, (t2.b + t3.b) as t2_t3_x +from table1 as t1 +join table2 as t2 on t1_a = t2_a +join table3 as t3 on t2_b = t3_b +ORDER BY t1.a +; + +CREATE TABLE table_set ( x UInt32 ) ENGINE = Set; +INSERT INTO table_set VALUES (0), (1), (2); + +select count() +from table1 as t1 +join table2 as t2 on t1.a = t2.a +join table3 as t3 on t2.b = t3.b +join table5 as t5 on t3.c = t5.c +WHERE t1.a in table_set; + +DROP TABLE table_set; +DROP TABLE table1; +DROP TABLE table2; +DROP TABLE table3; +DROP TABLE table5; diff --git a/parser/testdata/00820_multiple_joins_subquery_requires_alias/ast.json b/parser/testdata/00820_multiple_joins_subquery_requires_alias/ast.json new file mode 100644 index 000000000..ec2bad700 --- /dev/null +++ b/parser/testdata/00820_multiple_joins_subquery_requires_alias/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001430002, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00820_multiple_joins_subquery_requires_alias/metadata.json b/parser/testdata/00820_multiple_joins_subquery_requires_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00820_multiple_joins_subquery_requires_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00820_multiple_joins_subquery_requires_alias/query.sql b/parser/testdata/00820_multiple_joins_subquery_requires_alias/query.sql new file mode 100644 index 000000000..538e6967f --- /dev/null +++ b/parser/testdata/00820_multiple_joins_subquery_requires_alias/query.sql @@ -0,0 +1,91 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS table1; +DROP TABLE IF EXISTS table2; +DROP TABLE IF EXISTS table3; +DROP TABLE IF EXISTS table5; + +CREATE TABLE table1 (a UInt32) ENGINE = Memory; +CREATE TABLE table2 (a UInt32, b UInt32) ENGINE = Memory; +CREATE TABLE table3 (b UInt32, c UInt32) ENGINE = Memory; +CREATE TABLE table5 (a UInt32, b UInt32, c UInt32) ENGINE = Memory; + +INSERT INTO table1 SELECT number FROM numbers(21); +INSERT INTO table2 SELECT number * 2, number * 20 FROM numbers(11); +INSERT INTO table3 SELECT number * 30, number * 300 FROM numbers(10); +INSERT INTO table5 SELECT number * 5, number * 50, number * 500 FROM numbers(10); + +SET joined_subquery_requires_alias = 1; + +select t1.a, t2.b, t3.c from table1 as t1 join table2 as t2 on t1.a = t2.a join table3 as t3 on t2.b = t3.b ORDER BY t1.a; +select t1.a, t2.b, t5.c from table1 as t1 join table2 as t2 on t1.a = t2.a join table5 as t5 on t1.a = t5.a AND t2.b = t5.b ORDER BY t1.a; + +select t1.a, t2.a, t2.b, t3.b, t3.c, t5.a, t5.b, t5.c +from table1 as t1 +join table2 as t2 on t1.a = t2.a +join table3 as t3 on t2.b = t3.b +join table5 as t5 on t3.c = t5.c +ORDER BY t1.a +FORMAT PrettyCompactNoEscapes; + +select t1.a as t1_a, t2.a as t2_a, t2.b as t2_b, t3.b as t3_b +from table1 as t1 +join table2 as t2 on t1_a = t2_a +join table3 as t3 on t2_b = t3_b +ORDER BY t1.a; + +select t1.a as t1_a, t2.a as t2_a, t2.b as t2_b, t3.b as t3_b +from table1 as t1 +join table2 as t2 on t1.a = t2.a +join table3 as t3 on t2.b = t3.b +ORDER BY t1.a; + +select t1.a as t1_a, t2.a as t2_a, t2.b as t2_b, t3.b as t3_b +from table1 as t1 +join table2 as t2 on table1.a = table2.a +join table3 as t3 on table2.b = table3.b +ORDER BY t1.a; + +select t1.a, t2.a, t2.b, t3.b +from table1 as t1 +join table2 as t2 on table1.a = table2.a +join table3 as t3 on table2.b = table3.b +ORDER BY t1.a; + +select t1.a, t2.a, t2.b, t3.b +from table1 as t1 +join table2 as t2 on t1.a = t2.a +join table3 as t3 on t2.b = t3.b +ORDER BY t1.a; + +select table1.a, table2.a, table2.b, table3.b +from table1 as t1 +join table2 as t2 on table1.a = table2.a +join table3 as t3 on table2.b = table3.b +ORDER BY t1.a; + +select t1.*, t2.*, t3.* +from table1 as t1 +join table2 as t2 on table1.a = table2.a +join table3 as t3 on table2.b = table3.b +ORDER BY t1.a +FORMAT PrettyCompactNoEscapes; + +select * +from table1 as t1 +join table2 as t2 on t1.a = t2.a +join table3 as t3 on t2.b = t3.b +ORDER BY t1.a +FORMAT PrettyCompactNoEscapes; + +select t1.a as t1_a, t2.a as t2_a, t2.b as t2_b, t3.b as t3_b, + (t1.a + table2.b) as t1_t2_x, (table1.a + table3.b) as t1_t3_x, (t2.b + t3.b) as t2_t3_x +from table1 as t1 +join table2 as t2 on t1_a = t2_a +join table3 as t3 on t2_b = t3_b +ORDER BY t1.a; + +DROP TABLE table1; +DROP TABLE table2; +DROP TABLE table3; +DROP TABLE table5; diff --git a/parser/testdata/00821_distributed_storage_with_join_on/ast.json b/parser/testdata/00821_distributed_storage_with_join_on/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00821_distributed_storage_with_join_on/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00821_distributed_storage_with_join_on/metadata.json b/parser/testdata/00821_distributed_storage_with_join_on/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00821_distributed_storage_with_join_on/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00821_distributed_storage_with_join_on/query.sql b/parser/testdata/00821_distributed_storage_with_join_on/query.sql new file mode 100644 index 000000000..392421944 --- /dev/null +++ b/parser/testdata/00821_distributed_storage_with_join_on/query.sql @@ -0,0 +1,16 @@ +-- Tags: distributed + +-- NOTE: database = currentDatabase() is not mandatory + +DROP TABLE IF EXISTS table1; +DROP TABLE IF EXISTS table2; + +CREATE TABLE table1 AS system.columns ENGINE = Distributed('test_shard_localhost', system, columns); +CREATE TABLE table2 AS system.tables ENGINE = Distributed('test_shard_localhost', system, tables); + +SELECT 1 FROM table1 T1 ALL INNER JOIN table2 T2 ON T1.table = T2.name LIMIT 1; +SELECT 1 FROM cluster('test_shard_localhost', system.columns) T1 ALL INNER JOIN cluster('test_shard_localhost', system.tables) T2 ON T1.table = T2.name LIMIT 1; +SELECT 1 FROM (SELECT * FROM table1) T1 ALL INNER JOIN (SELECT * FROM table2) T2 ON T1.table = T2.name LIMIT 1; + +DROP TABLE IF EXISTS table1; +DROP TABLE IF EXISTS table2; diff --git a/parser/testdata/00822_array_insert_default/ast.json b/parser/testdata/00822_array_insert_default/ast.json new file mode 100644 index 000000000..b8dd8cb06 --- /dev/null +++ b/parser/testdata/00822_array_insert_default/ast.json @@ -0,0 +1,136 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function sum (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function ignore (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayFirst (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function lambda (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function empty (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10000000" + } + ], + + "rows": 38, + + "statistics": + { + "elapsed": 0.001615115, + "rows_read": 38, + "bytes_read": 1694 + } +} diff --git a/parser/testdata/00822_array_insert_default/metadata.json b/parser/testdata/00822_array_insert_default/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00822_array_insert_default/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00822_array_insert_default/query.sql b/parser/testdata/00822_array_insert_default/query.sql new file mode 100644 index 000000000..762d423dd --- /dev/null +++ b/parser/testdata/00822_array_insert_default/query.sql @@ -0,0 +1 @@ +SELECT sum(ignore(*)) FROM (SELECT arrayFirst(x -> empty(x), [[number]]) FROM numbers(10000000)); diff --git a/parser/testdata/00823_sequence_match_dfa/ast.json b/parser/testdata/00823_sequence_match_dfa/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00823_sequence_match_dfa/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00823_sequence_match_dfa/metadata.json b/parser/testdata/00823_sequence_match_dfa/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00823_sequence_match_dfa/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00823_sequence_match_dfa/query.sql b/parser/testdata/00823_sequence_match_dfa/query.sql new file mode 100644 index 000000000..e10ffa9aa --- /dev/null +++ b/parser/testdata/00823_sequence_match_dfa/query.sql @@ -0,0 +1,36 @@ +-- this test cannot pass without the new DFA matching algorithm of sequenceMatch + +DROP TABLE IF EXISTS sequence; + +CREATE TABLE sequence +( + userID UInt64, + eventType Enum8('A' = 1, 'B' = 2, 'C' = 3, 'D' = 4), + EventTime UInt64 +) +ENGINE = Memory; + +INSERT INTO sequence SELECT 1, number = 0 ? 'A' : (number < 1000000 ? 'B' : 'C'), number FROM numbers(1000001); +INSERT INTO sequence SELECT 1, 'D', 1e14; + +SELECT 'ABC' +FROM sequence +GROUP BY userID +HAVING sequenceMatch('(?1).*(?2).*(?3)')(toDateTime(EventTime), eventType = 'A', eventType = 'B', eventType = 'C'); + +SELECT 'ABA' +FROM sequence +GROUP BY userID +HAVING sequenceMatch('(?1).*(?2).*(?3)')(toDateTime(EventTime), eventType = 'A', eventType = 'B', eventType = 'A'); + +SELECT 'ABBC' +FROM sequence +GROUP BY userID +HAVING sequenceMatch('(?1).*(?2).*(?3).*(?4)')(EventTime, eventType = 'A', eventType = 'B', eventType = 'B',eventType = 'C'); + +SELECT 'CD' +FROM sequence +GROUP BY userID +HAVING sequenceMatch('(?1)(?t>=10000000000000)(?2)')(EventTime, eventType = 'C', eventType = 'D'); + +DROP TABLE sequence; diff --git a/parser/testdata/00824_filesystem/ast.json b/parser/testdata/00824_filesystem/ast.json new file mode 100644 index 000000000..480d190cc --- /dev/null +++ b/parser/testdata/00824_filesystem/ast.json @@ -0,0 +1,88 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function and (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function greaterOrEquals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function filesystemCapacity (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function filesystemAvailable (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function greaterOrEquals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function filesystemAvailable (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Function greaterOrEquals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function filesystemUnreserved (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Literal UInt64_0" + } + ], + + "rows": 22, + + "statistics": + { + "elapsed": 0.001592709, + "rows_read": 22, + "bytes_read": 921 + } +} diff --git a/parser/testdata/00824_filesystem/metadata.json b/parser/testdata/00824_filesystem/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00824_filesystem/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00824_filesystem/query.sql b/parser/testdata/00824_filesystem/query.sql new file mode 100644 index 000000000..c8ac9179d --- /dev/null +++ b/parser/testdata/00824_filesystem/query.sql @@ -0,0 +1 @@ +SELECT filesystemCapacity() >= filesystemAvailable() AND filesystemAvailable() >= 0 AND filesystemUnreserved() >= 0; diff --git a/parser/testdata/00826_cross_to_inner_join/ast.json b/parser/testdata/00826_cross_to_inner_join/ast.json new file mode 100644 index 000000000..03ef32f37 --- /dev/null +++ b/parser/testdata/00826_cross_to_inner_join/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001441651, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00826_cross_to_inner_join/metadata.json b/parser/testdata/00826_cross_to_inner_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00826_cross_to_inner_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00826_cross_to_inner_join/query.sql b/parser/testdata/00826_cross_to_inner_join/query.sql new file mode 100644 index 000000000..6d037dfff --- /dev/null +++ b/parser/testdata/00826_cross_to_inner_join/query.sql @@ -0,0 +1,93 @@ +SET enable_optimize_predicate_expression = 0; +SET optimize_move_to_prewhere = 1; +SET convert_query_to_cnf = 0; + +select * from system.one l cross join system.one r order by all; + +DROP TABLE IF EXISTS t1_00826; +DROP TABLE IF EXISTS t2_00826; + +CREATE TABLE t1_00826 (a Int8, b Nullable(Int8)) ENGINE = Memory; +CREATE TABLE t2_00826 (a Int8, b Nullable(Int8)) ENGINE = Memory; + +INSERT INTO t1_00826 values (1,1), (2,2); +INSERT INTO t2_00826 values (1,1), (1,2); +INSERT INTO t2_00826 (a) values (2), (3); + +SELECT '--- cross ---'; +SELECT * FROM t1_00826 cross join t2_00826 where t1_00826.a = t2_00826.a ORDER BY ALL; +SELECT '--- cross nullable ---'; +SELECT * FROM t1_00826 cross join t2_00826 where t1_00826.b = t2_00826.b ORDER BY ALL; +SELECT '--- cross nullable vs not nullable ---'; +SELECT * FROM t1_00826 cross join t2_00826 where t1_00826.a = t2_00826.b ORDER BY t1_00826.a; +SELECT '--- cross self ---'; +SELECT * FROM t1_00826 x cross join t1_00826 y where x.a = y.a and x.b = y.b ORDER BY x.a; +SELECT '--- cross one table expr ---'; +SELECT * FROM t1_00826 cross join t2_00826 where t1_00826.a = t1_00826.b order by (t1_00826.a, t2_00826.a, t2_00826.b); +SELECT '--- cross multiple ands ---'; +select * from t1_00826 cross join t2_00826 where t1_00826.a = t2_00826.a and t1_00826.b = t2_00826.b order by all; +SELECT '--- cross and inside and ---'; +select * from t1_00826 cross join t2_00826 where t1_00826.a = t2_00826.a and (t1_00826.b = t2_00826.b and 1) order by all; +SELECT '--- cross split conjunction ---'; +select * from t1_00826 cross join t2_00826 where t1_00826.a = t2_00826.a and t1_00826.b = t2_00826.b and t1_00826.a >= 1 and t2_00826.b = 1 order by all; + +SELECT '--- and or ---'; +select * from t1_00826 cross join t2_00826 where t1_00826.a = t2_00826.a and t1_00826.b = t2_00826.b and (t1_00826.a >= 1 OR t2_00826.b = 1) order by all; + +SELECT '--- arithmetic expr ---'; +select * from t1_00826 cross join t2_00826 where t1_00826.a + 1 = t2_00826.a + t2_00826.b AND (t1_00826.a + t1_00826.b + t2_00826.a + t2_00826.b > 5) order by all; + +SELECT '--- is null or ---'; +select * from t1_00826 cross join t2_00826 where t1_00826.b = t2_00826.a AND (t2_00826.b IS NULL OR t2_00826.b > t2_00826.a) ORDER BY t1_00826.a; + +SELECT '--- do not rewrite alias ---'; +SELECT a as b FROM t1_00826 cross join t2_00826 where t1_00826.b = t2_00826.a AND b > 0 ORDER BY ALL; + +SELECT '--- comma ---'; +SELECT * FROM t1_00826, t2_00826 where t1_00826.a = t2_00826.a ORDER BY ALL; +SELECT '--- comma nullable ---'; +SELECT * FROM t1_00826, t2_00826 where t1_00826.b = t2_00826.b ORDER BY ALL; +SELECT '--- comma and or ---'; +SELECT * FROM t1_00826, t2_00826 where t1_00826.a = t2_00826.a AND (t2_00826.b IS NULL OR t2_00826.b < 2) +ORDER BY ALL; + + +SELECT '--- cross ---'; +EXPLAIN SYNTAX select * from t1_00826 cross join t2_00826 where t1_00826.a = t2_00826.a order by all; +SELECT '--- cross nullable ---'; +EXPLAIN SYNTAX select * from t1_00826, t2_00826 where t1_00826.a = t2_00826.a order by all; +SELECT '--- cross nullable vs not nullable ---'; +EXPLAIN SYNTAX select * from t1_00826 cross join t2_00826 where t1_00826.a = t2_00826.b order by all; +SELECT '--- cross self ---'; +EXPLAIN SYNTAX select * from t1_00826 x cross join t1_00826 y where x.a = y.a and x.b = y.b order by all; +SELECT '--- cross one table expr ---'; +EXPLAIN SYNTAX select * from t1_00826 cross join t2_00826 where t1_00826.a = t1_00826.b order by all; +SELECT '--- cross multiple ands ---'; +EXPLAIN SYNTAX select * from t1_00826 cross join t2_00826 where t1_00826.a = t2_00826.a and t1_00826.b = t2_00826.b order by all; +SELECT '--- cross and inside and ---'; +EXPLAIN SYNTAX select * from t1_00826 cross join t2_00826 where t1_00826.a = t2_00826.a and (t1_00826.a = t2_00826.a and (t1_00826.a = t2_00826.a and t1_00826.b = t2_00826.b)) order by all; + +SELECT '--- cross split conjunction ---'; +EXPLAIN SYNTAX select * from t1_00826 cross join t2_00826 where t1_00826.a = t2_00826.a and t1_00826.b = t2_00826.b and t1_00826.a >= 1 and t2_00826.b > 0 order by all; + +SELECT '--- and or ---'; +EXPLAIN SYNTAX select * from t1_00826 cross join t2_00826 where t1_00826.a = t2_00826.a and t1_00826.b = t2_00826.b and (t1_00826.a >= 1 OR t2_00826.b = 1) order by all; + +SELECT '--- arithmetic expr ---'; +EXPLAIN SYNTAX select * from t1_00826 cross join t2_00826 where t1_00826.a + 1 = t2_00826.a + t2_00826.b AND (t1_00826.a + t1_00826.b + t2_00826.a + t2_00826.b > 5) order by all; + +SELECT '--- is null or ---'; +EXPLAIN SYNTAX select * from t1_00826 cross join t2_00826 where t1_00826.b = t2_00826.a AND (t2_00826.b IS NULL OR t2_00826.b > t2_00826.a) order by all; + +SELECT '--- do not rewrite alias ---'; +EXPLAIN SYNTAX select a as b from t1_00826 cross join t2_00826 where t1_00826.b = t2_00826.a AND b > 0 order by all; + +SELECT '--- comma ---'; +EXPLAIN SYNTAX select * from t1_00826, t2_00826 where t1_00826.a = t2_00826.a order by all; +SELECT '--- comma nullable ---'; +EXPLAIN SYNTAX select * from t1_00826, t2_00826 where t1_00826.b = t2_00826.b order by all; +SELECT '--- comma and or ---'; +EXPLAIN SYNTAX select * from t1_00826, t2_00826 where t1_00826.a = t2_00826.a AND (t2_00826.b IS NULL OR t2_00826.b < 2) order by all; + +DROP TABLE t1_00826; +DROP TABLE t2_00826; diff --git a/parser/testdata/00829_bitmap64_function/ast.json b/parser/testdata/00829_bitmap64_function/ast.json new file mode 100644 index 000000000..967ebb6c6 --- /dev/null +++ b/parser/testdata/00829_bitmap64_function/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery bitmap_test (children 1)" + }, + { + "explain": " Identifier bitmap_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001264702, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/00829_bitmap64_function/metadata.json b/parser/testdata/00829_bitmap64_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00829_bitmap64_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00829_bitmap64_function/query.sql b/parser/testdata/00829_bitmap64_function/query.sql new file mode 100644 index 000000000..94704d2f6 --- /dev/null +++ b/parser/testdata/00829_bitmap64_function/query.sql @@ -0,0 +1,75 @@ +DROP TABLE IF EXISTS bitmap_test; +CREATE TABLE bitmap_test(pickup_date Date, city_id UInt32, uid UInt64)ENGINE = Memory; +INSERT INTO bitmap_test SELECT '2019-01-01', 1, 4294967295 + number FROM numbers(1,100); +INSERT INTO bitmap_test SELECT '2019-01-02', 1, 4294967295 + number FROM numbers(90,110); +INSERT INTO bitmap_test SELECT '2019-01-03', 2, 4294967295 + number FROM numbers(1,210); + + +DROP TABLE IF EXISTS bitmap_state_test; +CREATE TABLE bitmap_state_test +( + pickup_date Date, + city_id UInt32, + uv AggregateFunction( groupBitmap, UInt64 ) +) +ENGINE = AggregatingMergeTree() PARTITION BY toYYYYMM(pickup_date) ORDER BY (pickup_date, city_id); + + +INSERT INTO bitmap_state_test SELECT + pickup_date, + city_id, + groupBitmapState(uid) AS uv +FROM bitmap_test +GROUP BY pickup_date, city_id +ORDER BY pickup_date, city_id; + +SELECT pickup_date, groupBitmapMerge(uv) AS users from bitmap_state_test group by pickup_date order by pickup_date; + +SELECT groupBitmap( uid ) AS user_num FROM bitmap_test; + +SELECT pickup_date, groupBitmap( uid ) AS user_num, bitmapToArray(groupBitmapState( uid )) AS users FROM bitmap_test GROUP BY pickup_date order by pickup_date; + +SELECT + bitmapCardinality(day_today) AS today_users, + bitmapCardinality(day_before) AS before_users, + bitmapOrCardinality(day_today, day_before) AS all_users, + bitmapAndCardinality(day_today, day_before) AS old_users, + bitmapAndnotCardinality(day_today, day_before) AS new_users, + bitmapXorCardinality(day_today, day_before) AS diff_users +FROM +( + SELECT city_id, groupBitmapState( uid ) AS day_today FROM bitmap_test WHERE pickup_date = '2019-01-02' GROUP BY city_id ORDER BY city_id +) js1 +ALL LEFT JOIN +( + SELECT city_id, groupBitmapState( uid ) AS day_before FROM bitmap_test WHERE pickup_date = '2019-01-01' GROUP BY city_id ORDER BY city_id +) js2 +USING city_id; + +SELECT + bitmapCardinality(day_today) AS today_users, + bitmapCardinality(day_before) AS before_users, + bitmapCardinality(bitmapOr(day_today, day_before))ll_users, + bitmapCardinality(bitmapAnd(day_today, day_before)) AS old_users, + bitmapCardinality(bitmapAndnot(day_today, day_before)) AS new_users, + bitmapCardinality(bitmapXor(day_today, day_before)) AS diff_users +FROM +( + SELECT city_id, groupBitmapState( uid ) AS day_today FROM bitmap_test WHERE pickup_date = '2019-01-02' GROUP BY city_id ORDER BY city_id +) js1 +ALL LEFT JOIN +( + SELECT city_id, groupBitmapState( uid ) AS day_before FROM bitmap_test WHERE pickup_date = '2019-01-01' GROUP BY city_id ORDER BY city_id +) js2 +USING city_id; + +SELECT count(*) FROM bitmap_test WHERE bitmapHasAny((SELECT groupBitmapState(uid) FROM bitmap_test WHERE pickup_date = '2019-01-01'), bitmapBuild([uid])); + +SELECT count(*) FROM bitmap_test WHERE bitmapHasAny(bitmapBuild([uid]), (SELECT groupBitmapState(uid) FROM bitmap_test WHERE pickup_date = '2019-01-01')); + +SELECT count(*) FROM bitmap_test WHERE 0 = bitmapHasAny((SELECT groupBitmapState(uid) FROM bitmap_test WHERE pickup_date = '2019-01-01'), bitmapBuild([uid])); + +SELECT bitmapToArray(bitmapAnd(groupBitmapState(uid), bitmapBuild(CAST([4294967296, 4294967297, 4294967298], 'Array(UInt64)')))) FROM bitmap_test GROUP BY city_id ORDER BY city_id; + +DROP TABLE bitmap_state_test; +DROP TABLE bitmap_test; diff --git a/parser/testdata/00829_bitmap_function/ast.json b/parser/testdata/00829_bitmap_function/ast.json new file mode 100644 index 000000000..49d3c8aaa --- /dev/null +++ b/parser/testdata/00829_bitmap_function/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function bitmapToArray (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function bitmapBuild (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_2, UInt64_3, UInt64_4, UInt64_5]" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001162562, + "rows_read": 9, + "bytes_read": 405 + } +} diff --git a/parser/testdata/00829_bitmap_function/metadata.json b/parser/testdata/00829_bitmap_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00829_bitmap_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00829_bitmap_function/query.sql b/parser/testdata/00829_bitmap_function/query.sql new file mode 100644 index 000000000..74a4a11ea --- /dev/null +++ b/parser/testdata/00829_bitmap_function/query.sql @@ -0,0 +1,368 @@ +SELECT bitmapToArray(bitmapBuild([1, 2, 3, 4, 5])); +SELECT bitmapToArray(bitmapAnd(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))); +SELECT bitmapToArray(bitmapOr(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))); +SELECT bitmapToArray(bitmapXor(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))); +SELECT bitmapToArray(bitmapAndnot(bitmapBuild([1,2,3]),bitmapBuild([3,4,5]))); +SELECT bitmapCardinality(bitmapBuild([1, 2, 3, 4, 5])); +SELECT bitmapAndCardinality(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])); +SELECT bitmapOrCardinality(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])); +SELECT bitmapXorCardinality(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])); +SELECT bitmapAndnotCardinality(bitmapBuild([1,2,3]),bitmapBuild([3,4,5])); +SELECT bitmapAndCardinality(bitmapBuild([100, 200, 500]), bitmapBuild(CAST([100, 200], 'Array(UInt16)'))); +SELECT bitmapToArray(bitmapAnd(bitmapBuild([100, 200, 500]), bitmapBuild(CAST([100, 200], 'Array(UInt16)')))); + +DROP TABLE IF EXISTS bitmap_test; +CREATE TABLE bitmap_test(pickup_date Date, city_id UInt32, uid UInt32)ENGINE = Memory; +INSERT INTO bitmap_test SELECT '2019-01-01', 1, number FROM numbers(1,50); +INSERT INTO bitmap_test SELECT '2019-01-02', 1, number FROM numbers(11,60); +INSERT INTO bitmap_test SELECT '2019-01-03', 2, number FROM numbers(1,10); + + +SELECT groupBitmap( uid ) AS user_num FROM bitmap_test; + +SELECT pickup_date, groupBitmap( uid ) AS user_num, bitmapToArray(groupBitmapState( uid )) AS users FROM bitmap_test GROUP BY pickup_date ORDER BY pickup_date; + +SELECT + bitmapCardinality(day_today) AS today_users, + bitmapCardinality(day_before) AS before_users, + bitmapOrCardinality(day_today, day_before) AS all_users, + bitmapAndCardinality(day_today, day_before) AS old_users, + bitmapAndnotCardinality(day_today, day_before) AS new_users, + bitmapXorCardinality(day_today, day_before) AS diff_users +FROM +( + SELECT city_id, groupBitmapState( uid ) AS day_today FROM bitmap_test WHERE pickup_date = '2019-01-02' GROUP BY city_id ORDER BY city_id +) js1 +ALL LEFT JOIN +( + SELECT city_id, groupBitmapState( uid ) AS day_before FROM bitmap_test WHERE pickup_date = '2019-01-01' GROUP BY city_id ORDER BY city_id +) js2 +USING city_id; + +SELECT + bitmapCardinality(day_today) AS today_users, + bitmapCardinality(day_before) AS before_users, + bitmapCardinality(bitmapOr(day_today, day_before))ll_users, + bitmapCardinality(bitmapAnd(day_today, day_before)) AS old_users, + bitmapCardinality(bitmapAndnot(day_today, day_before)) AS new_users, + bitmapCardinality(bitmapXor(day_today, day_before)) AS diff_users +FROM +( + SELECT city_id, groupBitmapState( uid ) AS day_today FROM bitmap_test WHERE pickup_date = '2019-01-02' GROUP BY city_id ORDER BY city_id +) js1 +ALL LEFT JOIN +( + SELECT city_id, groupBitmapState( uid ) AS day_before FROM bitmap_test WHERE pickup_date = '2019-01-01' GROUP BY city_id ORDER BY city_id +) js2 +USING city_id; + +SELECT count(*) FROM bitmap_test WHERE bitmapHasAny((SELECT groupBitmapState(uid) FROM bitmap_test WHERE pickup_date = '2019-01-01'), bitmapBuild([uid])); + +SELECT count(*) FROM bitmap_test WHERE bitmapHasAny(bitmapBuild([uid]), (SELECT groupBitmapState(uid) FROM bitmap_test WHERE pickup_date = '2019-01-01')); + +SELECT count(*) FROM bitmap_test WHERE 0 = bitmapHasAny((SELECT groupBitmapState(uid) FROM bitmap_test WHERE pickup_date = '2019-01-01'), bitmapBuild([uid])); + +SELECT count(*) FROM bitmap_test WHERE bitmapContains((SELECT groupBitmapState(uid) FROM bitmap_test WHERE pickup_date = '2019-01-01'), uid); + +SELECT count(*) FROM bitmap_test WHERE 0 = bitmapContains((SELECT groupBitmapState(uid) FROM bitmap_test WHERE pickup_date = '2019-01-01'), uid); + +-- PR#8082 +SELECT bitmapToArray(bitmapAnd(groupBitmapState(uid), bitmapBuild(CAST([1, 2, 3], 'Array(UInt32)')))) FROM bitmap_test GROUP BY city_id ORDER BY city_id; + +-- bitmap state test +DROP TABLE IF EXISTS bitmap_state_test; +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE bitmap_state_test +( + pickup_date Date, + city_id UInt32, + uv AggregateFunction( groupBitmap, UInt32 ) +) +ENGINE = AggregatingMergeTree( pickup_date, ( pickup_date, city_id ), 8192); + +INSERT INTO bitmap_state_test SELECT + pickup_date, + city_id, + groupBitmapState(uid) AS uv +FROM bitmap_test +GROUP BY pickup_date, city_id; + +SELECT pickup_date, groupBitmapMerge(uv) AS users from bitmap_state_test group by pickup_date order by pickup_date; + +-- between column and expression test +DROP TABLE IF EXISTS bitmap_column_expr_test; +CREATE TABLE bitmap_column_expr_test +( + t DateTime, + z AggregateFunction(groupBitmap, UInt32) +) +ENGINE = MergeTree +PARTITION BY toYYYYMMDD(t) +ORDER BY t; + +INSERT INTO bitmap_column_expr_test VALUES (now(), bitmapBuild(cast([3,19,47] as Array(UInt32)))); + +SELECT bitmapAndCardinality( bitmapBuild(cast([19,7] AS Array(UInt32))), z) FROM bitmap_column_expr_test; +SELECT bitmapAndCardinality( z, bitmapBuild(cast([19,7] AS Array(UInt32))) ) FROM bitmap_column_expr_test; + +SELECT bitmapCardinality(bitmapAnd(bitmapBuild(cast([19,7] AS Array(UInt32))), z )) FROM bitmap_column_expr_test; +SELECT bitmapCardinality(bitmapAnd(z, bitmapBuild(cast([19,7] AS Array(UInt32))))) FROM bitmap_column_expr_test; + +DROP TABLE IF EXISTS bitmap_column_expr_test2; +CREATE TABLE bitmap_column_expr_test2 +( + tag_id String, + z AggregateFunction(groupBitmap, UInt32) +) +ENGINE = MergeTree +ORDER BY tag_id; + +INSERT INTO bitmap_column_expr_test2 VALUES ('tag1', bitmapBuild(cast([1,2,3,4,5,6,7,8,9,10] as Array(UInt32)))); +INSERT INTO bitmap_column_expr_test2 VALUES ('tag2', bitmapBuild(cast([6,7,8,9,10,11,12,13,14,15] as Array(UInt32)))); +INSERT INTO bitmap_column_expr_test2 VALUES ('tag3', bitmapBuild(cast([2,4,6,8,10,12] as Array(UInt32)))); + +SELECT groupBitmapMerge(z) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); +SELECT arraySort(bitmapToArray(groupBitmapMergeState(z))) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); + +SELECT groupBitmapOr(z) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); +SELECT arraySort(bitmapToArray(groupBitmapOrState(z))) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); + +SELECT groupBitmapAnd(z) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); +SELECT arraySort(bitmapToArray(groupBitmapAndState(z))) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); + +SELECT groupBitmapXor(z) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); +SELECT arraySort(bitmapToArray(groupBitmapXorState(z))) FROM bitmap_column_expr_test2 WHERE like(tag_id, 'tag%'); + + +DROP TABLE IF EXISTS bitmap_column_expr_test3; +CREATE TABLE bitmap_column_expr_test3 +( + tag_id String, + z AggregateFunction(groupBitmap, UInt64), + replace Nested ( + from UInt16, + to UInt64 + ) +) +ENGINE = MergeTree +ORDER BY tag_id; + +DROP TABLE IF EXISTS numbers10; +CREATE VIEW numbers10 AS SELECT number FROM system.numbers LIMIT 10; + +INSERT INTO bitmap_column_expr_test3(tag_id, z, replace.from, replace.to) SELECT 'tag1', groupBitmapState(toUInt64(number)), cast([] as Array(UInt16)), cast([] as Array(UInt64)) FROM numbers10; +INSERT INTO bitmap_column_expr_test3(tag_id, z, replace.from, replace.to) SELECT 'tag2', groupBitmapState(toUInt64(number)), cast([0] as Array(UInt16)), cast([2] as Array(UInt64)) FROM numbers10; +INSERT INTO bitmap_column_expr_test3(tag_id, z, replace.from, replace.to) SELECT 'tag3', groupBitmapState(toUInt64(number)), cast([0,7] as Array(UInt16)), cast([3,101] as Array(UInt64)) FROM numbers10; +INSERT INTO bitmap_column_expr_test3(tag_id, z, replace.from, replace.to) SELECT 'tag4', groupBitmapState(toUInt64(number)), cast([5,999,2] as Array(UInt16)), cast([2,888,20] as Array(UInt64)) FROM numbers10; + +SELECT tag_id, bitmapToArray(z), replace.from, replace.to, bitmapToArray(bitmapTransform(z, replace.from, replace.to)) FROM bitmap_column_expr_test3 ORDER BY tag_id; + + +DROP TABLE IF EXISTS bitmap_test; +DROP TABLE IF EXISTS bitmap_state_test; +DROP TABLE IF EXISTS bitmap_column_expr_test; +DROP TABLE IF EXISTS bitmap_column_expr_test2; +DROP TABLE IF EXISTS numbers10; +DROP TABLE IF EXISTS bitmap_column_expr_test3; + +-- bitmapHasAny: +---- Empty +SELECT bitmapHasAny(bitmapBuild([1, 2, 3, 5]), bitmapBuild(emptyArrayUInt8())); +SELECT bitmapHasAny(bitmapBuild(emptyArrayUInt32()), bitmapBuild(emptyArrayUInt32())); +SELECT bitmapHasAny(bitmapBuild(emptyArrayUInt16()), bitmapBuild([1, 2, 3, 500])); +---- Small x Small +SELECT bitmapHasAny(bitmapBuild([1, 2, 3, 5]),bitmapBuild([0, 3, 7])); +SELECT bitmapHasAny(bitmapBuild([1, 2, 3, 5]),bitmapBuild([0, 4, 7])); +---- Small x Large +select bitmapHasAny(bitmapBuild([100,110,120]),bitmapBuild([ 99, 100, 101, + 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33])); +select bitmapHasAny(bitmapBuild([100,200,500]),bitmapBuild([ 99, 101, 600, + 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33])); +---- Large x Small +select bitmapHasAny(bitmapBuild([ + 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33, + 100,200,230]),bitmapBuild([ 99, 100, 101])); +select bitmapHasAny(bitmapBuild([ + 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33, + 100,200,500]),bitmapBuild([ 99, 101, 600])); +---- Large x Large +select bitmapHasAny(bitmapBuild([ + 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33, + 40,50,60]),bitmapBuild([ 41, 50, 61, + 99,98,97,96,95,94,93,92,91,90,89,88,87,86,85,84,83,82,81,80,79,78,77,76,75,74,73,72,71,70,69,68,67,66,65])); +select bitmapHasAny(bitmapBuild([ + 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33, + 40,50,60]),bitmapBuild([ 41, 49, 51, 61, + 99,98,97,96,95,94,93,92,91,90,89,88,87,86,85,84,83,82,81,80,79,78,77,76,75,74,73,72,71,70,69,68,67,66,65])); + +-- bitmapHasAll: +---- Empty +SELECT bitmapHasAll(bitmapBuild([1, 2, 3, 5]), bitmapBuild(emptyArrayUInt8())); +SELECT bitmapHasAll(bitmapBuild(emptyArrayUInt32()), bitmapBuild(emptyArrayUInt32())); +SELECT bitmapHasAll(bitmapBuild(emptyArrayUInt16()), bitmapBuild([1, 2, 3, 500])); +---- Small x Small +select bitmapHasAll(bitmapBuild([1,5,7,9]),bitmapBuild([5,7])); +select bitmapHasAll(bitmapBuild([1,5,7,9]),bitmapBuild([5,7,2])); +---- Small x Large +select bitmapHasAll(bitmapBuild([100,110,120]),bitmapBuild([ 99, 100, 101, + 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33])); +select bitmapHasAll(bitmapBuild([100,200,500]),bitmapBuild([ 99, 101, 600, + 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33])); +---- Small x LargeSmall +select bitmapHasAll(bitmapBuild([1,5,7,9]),bitmapXor(bitmapBuild([1,5,7]), bitmapBuild([5,7,9]))); +select bitmapHasAll(bitmapBuild([1,5,7,9]),bitmapXor(bitmapBuild([1,5,7]), bitmapBuild([2,5,7]))); +---- Large x Small +select bitmapHasAll(bitmapBuild([ + 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33, + 100,200,500]),bitmapBuild([100, 500])); +select bitmapHasAll(bitmapBuild([ + 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33, + 100,200,500]),bitmapBuild([ 99, 100, 500])); +---- LargeSmall x Small +select bitmapHasAll(bitmapXor(bitmapBuild([1,7]), bitmapBuild([5,7,9])), bitmapBuild([1,5])); +select bitmapHasAll(bitmapXor(bitmapBuild([1,7]), bitmapBuild([5,7,9])), bitmapBuild([1,5,7])); +---- Large x Large +select bitmapHasAll(bitmapBuild([ + 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33, + 100,200,500]),bitmapBuild([ 100, 200, 500, + 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33])); +select bitmapHasAll(bitmapBuild([ + 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33, + 100,200,500]),bitmapBuild([ 100, 200, 501, + 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33])); + +-- bitmapContains: +---- Empty +SELECT bitmapContains(bitmapBuild(emptyArrayUInt32()), toUInt8(0)); +SELECT bitmapContains(bitmapBuild(emptyArrayUInt16()), toUInt16(5)); +---- Small +select bitmapContains(bitmapBuild([1,5,7,9]),toUInt32(0)); +select bitmapContains(bitmapBuild([1,5,7,9]),toUInt64(9)); +---- Large +select bitmapContains(bitmapBuild([ + 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33, + 100,200,500]),toUInt32(100)); +select bitmapContains(bitmapBuild([ + 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33, + 100,200,500]),toUInt32(101)); +select bitmapContains(bitmapBuild([ + 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33, + 100,200,500]),toUInt32(500)); + +-- bitmapSubsetInRange: +---- Empty +SELECT bitmapToArray(bitmapSubsetInRange(bitmapBuild(emptyArrayUInt32()), toUInt64(0), toUInt32(10))); +SELECT bitmapToArray(bitmapSubsetInRange(bitmapBuild(emptyArrayUInt16()), toUInt32(0), toUInt64(10))); +---- Small +select bitmapToArray(bitmapSubsetInRange(bitmapBuild([1,5,7,9]), toUInt8(0), toUInt16(4))); +select bitmapToArray(bitmapSubsetInRange(bitmapBuild([1,5,7,9]), toUInt32(10), toUInt64(10))); +select bitmapToArray(bitmapSubsetInRange(bitmapBuild([1,5,7,9]), toUInt64(3), toUInt32(7))); +---- Large +select bitmapToArray(bitmapSubsetInRange(bitmapBuild([ + 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33, + 100,200,500]), toUInt8(0), toUInt32(100))); +select bitmapToArray(bitmapSubsetInRange(bitmapBuild([ + 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33, + 100,200,500]), toUInt64(30), toUInt32(200))); +select bitmapToArray(bitmapSubsetInRange(bitmapBuild([ + 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33, + 100,200,500]), toUInt32(100), toUInt64(200))); + +-- bitmapSubsetLimit: +---- Empty +SELECT bitmapToArray(bitmapSubsetLimit(bitmapBuild(emptyArrayUInt32()), toUInt8(0), toUInt32(10))); +SELECT bitmapToArray(bitmapSubsetLimit(bitmapBuild(emptyArrayUInt16()), toUInt32(0), toUInt64(10))); +---- Small +select bitmapToArray(bitmapSubsetLimit(bitmapBuild([1,5,7,9]), toUInt8(0), toUInt32(4))); +select bitmapToArray(bitmapSubsetLimit(bitmapBuild([1,5,7,9]), toUInt32(10), toUInt64(10))); +select bitmapToArray(bitmapSubsetLimit(bitmapBuild([1,5,7,9]), toUInt16(3), toUInt32(7))); +---- Large +select bitmapToArray(bitmapSubsetLimit(bitmapBuild([ + 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33, + 100,200,500]), toUInt32(0), toUInt32(100))); +select bitmapToArray(bitmapSubsetLimit(bitmapBuild([ + 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33, + 100,200,500]), toUInt32(30), toUInt32(200))); +select bitmapToArray(bitmapSubsetLimit(bitmapBuild([ + 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33, + 100,200,500]), toUInt32(100), toUInt16(200))); + +-- subBitmap: +---- Empty +SELECT bitmapToArray(subBitmap(bitmapBuild(emptyArrayUInt32()), toUInt8(0), toUInt32(10))); +SELECT bitmapToArray(subBitmap(bitmapBuild(emptyArrayUInt16()), toUInt32(0), toUInt64(10))); +---- Small +select bitmapToArray(subBitmap(bitmapBuild([1,5,7,9]), toUInt8(0), toUInt32(4))); +select bitmapToArray(subBitmap(bitmapBuild([1,5,7,9]), toUInt32(1), toUInt64(4))); +select bitmapToArray(subBitmap(bitmapBuild([1,5,7,9]), toUInt16(1), toUInt32(2))); +---- Large +select bitmapToArray(subBitmap(bitmapBuild([ + 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33, + 100,200,500]), toUInt32(0), toUInt32(10))); +select bitmapToArray(subBitmap(bitmapBuild([ + 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33, + 100,200,500]), toUInt32(30), toUInt32(200))); +select bitmapToArray(subBitmap(bitmapBuild([ + 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33, + 100,200,500]), toUInt32(34), toUInt16(3))); + +-- bitmapMin: +---- Empty +SELECT bitmapMin(bitmapBuild(emptyArrayUInt8())); +SELECT bitmapMin(bitmapBuild(emptyArrayUInt16())); +SELECT bitmapMin(bitmapBuild(emptyArrayUInt32())); +SELECT bitmapMin(bitmapBuild(emptyArrayUInt64())); +---- Small +select bitmapMin(bitmapBuild([1,5,7,9])); +---- Large +select bitmapMin(bitmapBuild([ + 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33, + 100,200,500])); + +-- bitmapMax: +---- Empty +SELECT bitmapMax(bitmapBuild(emptyArrayUInt8())); +SELECT bitmapMax(bitmapBuild(emptyArrayUInt16())); +SELECT bitmapMax(bitmapBuild(emptyArrayUInt32())); +---- Small +select bitmapMax(bitmapBuild([1,5,7,9])); +---- Large +select bitmapMax(bitmapBuild([ + 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33, + 100,200,500])); + + +-- reproduce #18911 +CREATE TABLE bitmap_test(pickup_date Date, city_id UInt32, uid UInt32)ENGINE = Memory; +INSERT INTO bitmap_test SELECT '2019-01-01', 1, number FROM numbers(1,50); +INSERT INTO bitmap_test SELECT '2019-01-02', 1, number FROM numbers(11,60); +INSERT INTO bitmap_test SELECT '2019-01-03', 2, number FROM numbers(1,10); + +SELECT + bitmapCardinality(day_today) AS today_users, + bitmapCardinality(day_before) AS before_users, + bitmapOrCardinality(day_today, day_before) AS all_users, + bitmapAndCardinality(day_today, day_before) AS old_users, + bitmapAndnotCardinality(day_today, day_before) AS new_users, + bitmapXorCardinality(day_today, day_before) AS diff_users +FROM +( + SELECT + city_id, + groupBitmapState(uid) AS day_today + FROM bitmap_test + WHERE pickup_date = '2019-01-02' + GROUP BY + rand((rand((rand('') % nan) = NULL) % 7) % rand(NULL)), + city_id +) AS js1 +ALL LEFT JOIN +( + SELECT + city_id, + groupBitmapState(uid) AS day_before + FROM bitmap_test + WHERE pickup_date = '2019-01-01' + GROUP BY city_id +) AS js2 USING (city_id) FORMAT Null; +drop table bitmap_test; diff --git a/parser/testdata/00830_join_overwrite/ast.json b/parser/testdata/00830_join_overwrite/ast.json new file mode 100644 index 000000000..d081f6f65 --- /dev/null +++ b/parser/testdata/00830_join_overwrite/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery kv (children 1)" + }, + { + "explain": " Identifier kv" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001494259, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/00830_join_overwrite/metadata.json b/parser/testdata/00830_join_overwrite/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00830_join_overwrite/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00830_join_overwrite/query.sql b/parser/testdata/00830_join_overwrite/query.sql new file mode 100644 index 000000000..4f19d5fc1 --- /dev/null +++ b/parser/testdata/00830_join_overwrite/query.sql @@ -0,0 +1,23 @@ +DROP TABLE IF EXISTS kv; + +CREATE TABLE kv (k UInt32, v UInt32) ENGINE Join(Any, Left, k); +INSERT INTO kv VALUES (1, 2); +INSERT INTO kv VALUES (1, 3); +SELECT joinGet('kv', 'v', toUInt32(1)); +CREATE TABLE kv_overwrite (k UInt32, v UInt32) ENGINE Join(Any, Left, k) SETTINGS join_any_take_last_row = 1; +INSERT INTO kv_overwrite VALUES (1, 2); +INSERT INTO kv_overwrite VALUES (1, 3); +SELECT joinGet('kv_overwrite', 'v', toUInt32(1)); + + +CREATE TABLE t2 (k UInt32, v UInt32) ENGINE = Memory; +INSERT INTO t2 VALUES (1, 2), (1, 3); + +SET enable_analyzer = 1; +SET join_algorithm = 'hash'; + +SELECT v FROM (SELECT 1 as k) t1 ANY INNER JOIN t2 USING (k) SETTINGS join_any_take_last_row = 0; +SELECT v FROM (SELECT 1 as k) t1 ANY INNER JOIN t2 USING (k) SETTINGS join_any_take_last_row = 1; + +DROP TABLE kv; +DROP TABLE kv_overwrite; diff --git a/parser/testdata/00831_quantile_weighted_parameter_check/ast.json b/parser/testdata/00831_quantile_weighted_parameter_check/ast.json new file mode 100644 index 000000000..bbf721dab --- /dev/null +++ b/parser/testdata/00831_quantile_weighted_parameter_check/ast.json @@ -0,0 +1,70 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function quantileExactWeighted (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Identifier number" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Float64_0.5" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 16, + + "statistics": + { + "elapsed": 0.001107757, + "rows_read": 16, + "bytes_read": 632 + } +} diff --git a/parser/testdata/00831_quantile_weighted_parameter_check/metadata.json b/parser/testdata/00831_quantile_weighted_parameter_check/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00831_quantile_weighted_parameter_check/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00831_quantile_weighted_parameter_check/query.sql b/parser/testdata/00831_quantile_weighted_parameter_check/query.sql new file mode 100644 index 000000000..e16a3157e --- /dev/null +++ b/parser/testdata/00831_quantile_weighted_parameter_check/query.sql @@ -0,0 +1,2 @@ +SELECT quantileExactWeighted(0.5)(number, number) FROM numbers(10); +SELECT quantileExactWeighted(0.5)(number, 0.1) FROM numbers(10); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } diff --git a/parser/testdata/00832_storage_file_lock/ast.json b/parser/testdata/00832_storage_file_lock/ast.json new file mode 100644 index 000000000..45489e7fd --- /dev/null +++ b/parser/testdata/00832_storage_file_lock/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery file (children 1)" + }, + { + "explain": " Identifier file" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001186875, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/00832_storage_file_lock/metadata.json b/parser/testdata/00832_storage_file_lock/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00832_storage_file_lock/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00832_storage_file_lock/query.sql b/parser/testdata/00832_storage_file_lock/query.sql new file mode 100644 index 000000000..a1312e8a9 --- /dev/null +++ b/parser/testdata/00832_storage_file_lock/query.sql @@ -0,0 +1,6 @@ +DROP TABLE IF EXISTS file; +CREATE TABLE file (number UInt64) ENGINE = File(TSV); +SELECT * FROM file; -- { serverError FILE_DOESNT_EXIST } +INSERT INTO file VALUES (1); +SELECT * FROM file; +DROP TABLE file; diff --git a/parser/testdata/00833_sleep_overflow/ast.json b/parser/testdata/00833_sleep_overflow/ast.json new file mode 100644 index 000000000..6bd2cc7ad --- /dev/null +++ b/parser/testdata/00833_sleep_overflow/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function sleep (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Float64_4295.967296" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001134043, + "rows_read": 7, + "bytes_read": 269 + } +} diff --git a/parser/testdata/00833_sleep_overflow/metadata.json b/parser/testdata/00833_sleep_overflow/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00833_sleep_overflow/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00833_sleep_overflow/query.sql b/parser/testdata/00833_sleep_overflow/query.sql new file mode 100644 index 000000000..dc38bee8b --- /dev/null +++ b/parser/testdata/00833_sleep_overflow/query.sql @@ -0,0 +1 @@ +SELECT sleep(4295.967296); -- { serverError TOO_SLOW } diff --git a/parser/testdata/00834_date_datetime_cmp/ast.json b/parser/testdata/00834_date_datetime_cmp/ast.json new file mode 100644 index 000000000..4be3c7da2 --- /dev/null +++ b/parser/testdata/00834_date_datetime_cmp/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function less (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDateTime (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '2017-06-28 12:01:01'" + }, + { + "explain": " Function toDate (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '2017-07-01'" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001369085, + "rows_read": 12, + "bytes_read": 480 + } +} diff --git a/parser/testdata/00834_date_datetime_cmp/metadata.json b/parser/testdata/00834_date_datetime_cmp/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00834_date_datetime_cmp/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00834_date_datetime_cmp/query.sql b/parser/testdata/00834_date_datetime_cmp/query.sql new file mode 100644 index 000000000..20fbb76ec --- /dev/null +++ b/parser/testdata/00834_date_datetime_cmp/query.sql @@ -0,0 +1,4 @@ +SELECT toDateTime('2017-06-28 12:01:01') < toDate('2017-07-01'); +SELECT toDateTime('2017-06-28 12:01:01') > toDate('2017-07-01'); +SELECT toDate('2017-06-28') < toDate('2017-07-01'); +SELECT toDate('2017-06-28') > toDate('2017-07-01'); diff --git a/parser/testdata/00834_limit_with_constant_expressions/ast.json b/parser/testdata/00834_limit_with_constant_expressions/ast.json new file mode 100644 index 000000000..80e16982d --- /dev/null +++ b/parser/testdata/00834_limit_with_constant_expressions/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Function plus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001009176, + "rows_read": 15, + "bytes_read": 565 + } +} diff --git a/parser/testdata/00834_limit_with_constant_expressions/metadata.json b/parser/testdata/00834_limit_with_constant_expressions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00834_limit_with_constant_expressions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00834_limit_with_constant_expressions/query.sql b/parser/testdata/00834_limit_with_constant_expressions/query.sql new file mode 100644 index 000000000..9247f1432 --- /dev/null +++ b/parser/testdata/00834_limit_with_constant_expressions/query.sql @@ -0,0 +1,28 @@ +SELECT number FROM numbers(10) LIMIT 0 + 1; +SELECT number FROM numbers(10) LIMIT 1 - 1; +SELECT number FROM numbers(10) LIMIT 2 - 1; +SELECT number FROM numbers(10) LIMIT 0 - 1; +SELECT number FROM numbers(10) LIMIT 1.0; +SELECT number FROM numbers(10) LIMIT 1.5; -- { serverError INVALID_LIMIT_EXPRESSION } +SELECT number FROM numbers(10) LIMIT '1'; -- { serverError INVALID_LIMIT_EXPRESSION } +SELECT number FROM numbers(10) LIMIT now(); -- { serverError INVALID_LIMIT_EXPRESSION } +SELECT number FROM numbers(10) LIMIT today(); -- { serverError INVALID_LIMIT_EXPRESSION } +SELECT number FROM numbers(10) LIMIT toUInt8('1'); +SELECT number FROM numbers(10) LIMIT toFloat32('1'); +SELECT number FROM numbers(10) LIMIT rand(); -- { serverError BAD_ARGUMENTS, INVALID_LIMIT_EXPRESSION } + +SELECT count() <= 1 FROM (SELECT number FROM numbers(10) LIMIT randConstant() % 2); + +SELECT number FROM numbers(10) LIMIT 0 + 1 BY number; +SELECT number FROM numbers(10) LIMIT 0 BY number; + +SELECT TOP 5 * FROM numbers(10); + +SELECT * FROM numbers(10) LIMIT 0.33 / 0.165 - 0.33 + 0.67; -- { serverError INVALID_LIMIT_EXPRESSION } +SELECT * FROM numbers(10) LIMIT LENGTH('NNN') + COS(0), toDate('0000-00-02'); -- { serverError INVALID_LIMIT_EXPRESSION } +SELECT * FROM numbers(10) LIMIT LENGTH('NNN') + COS(0), toDate('0000-00-02'); -- { serverError INVALID_LIMIT_EXPRESSION } +SELECT * FROM numbers(10) LIMIT a + 5 - a; -- { serverError UNKNOWN_IDENTIFIER } +SELECT * FROM numbers(10) LIMIT a + b; -- { serverError UNKNOWN_IDENTIFIER } +SELECT * FROM numbers(10) LIMIT 'Hello'; -- { serverError INVALID_LIMIT_EXPRESSION } + +SELECT number from numbers(10) order by number limit (select sum(number), count() from numbers(3)).1; diff --git a/parser/testdata/00834_not_between/ast.json b/parser/testdata/00834_not_between/ast.json new file mode 100644 index 000000000..3ea68b26a --- /dev/null +++ b/parser/testdata/00834_not_between/ast.json @@ -0,0 +1,82 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function or (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function less (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function plus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function greater (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function minus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_4" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 20, + + "statistics": + { + "elapsed": 0.001257893, + "rows_read": 20, + "bytes_read": 765 + } +} diff --git a/parser/testdata/00834_not_between/metadata.json b/parser/testdata/00834_not_between/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00834_not_between/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00834_not_between/query.sql b/parser/testdata/00834_not_between/query.sql new file mode 100644 index 000000000..45709c7a1 --- /dev/null +++ b/parser/testdata/00834_not_between/query.sql @@ -0,0 +1,4 @@ +SELECT 2 NOT BETWEEN 2 + 1 AND 4 - 1; +SELECT number FROM ( SELECT number FROM system.numbers LIMIT 10 ) WHERE number NOT BETWEEN 2 AND 4; + +SELECT number BETWEEN 4 AND 6, NOT number NOT BETWEEN 4 AND 6 AND 1 FROM numbers(10); diff --git a/parser/testdata/00835_if_generic_case/ast.json b/parser/testdata/00835_if_generic_case/ast.json new file mode 100644 index 000000000..bba06cacc --- /dev/null +++ b/parser/testdata/00835_if_generic_case/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001369162, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00835_if_generic_case/metadata.json b/parser/testdata/00835_if_generic_case/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00835_if_generic_case/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00835_if_generic_case/query.sql b/parser/testdata/00835_if_generic_case/query.sql new file mode 100644 index 000000000..e06e610db --- /dev/null +++ b/parser/testdata/00835_if_generic_case/query.sql @@ -0,0 +1,20 @@ +SET enable_analyzer = 1; + +SELECT toDateTime('2000-01-01 00:00:00', 'Asia/Istanbul') AS x, toDate('2000-01-02') AS y, x > y ? x : y AS z; +SELECT materialize(toDateTime('2000-01-01 00:00:00', 'Asia/Istanbul')) AS x, toDate('2000-01-02') AS y, x > y ? x : y AS z; +SELECT toDateTime('2000-01-01 00:00:00', 'Asia/Istanbul') AS x, materialize(toDate('2000-01-02')) AS y, x > y ? x : y AS z; +SELECT materialize(toDateTime('2000-01-01 00:00:00', 'Asia/Istanbul')) AS x, materialize(toDate('2000-01-02')) AS y, x > y ? x : y AS z; + +SELECT toDateTime('2000-01-01 00:00:00', 'Asia/Istanbul') AS x, toDate('2000-01-02') AS y, 0 ? x : y AS z; +SELECT materialize(toDateTime('2000-01-01 00:00:00', 'Asia/Istanbul')) AS x, toDate('2000-01-02') AS y, 0 ? x : y AS z; +SELECT toDateTime('2000-01-01 00:00:00', 'Asia/Istanbul') AS x, materialize(toDate('2000-01-02')) AS y, 0 ? x : y AS z; +SELECT materialize(toDateTime('2000-01-01 00:00:00', 'Asia/Istanbul')) AS x, materialize(toDate('2000-01-02')) AS y, 0 ? x : y AS z; + +SELECT toDateTime('2000-01-01 00:00:00', 'Asia/Istanbul') AS x, toDate('2000-01-02') AS y, 1 ? x : y AS z; +SELECT materialize(toDateTime('2000-01-01 00:00:00', 'Asia/Istanbul')) AS x, toDate('2000-01-02') AS y, 1 ? x : y AS z; +SELECT toDateTime('2000-01-01 00:00:00', 'Asia/Istanbul') AS x, materialize(toDate('2000-01-02')) AS y, 1 ? x : y AS z; +SELECT materialize(toDateTime('2000-01-01 00:00:00', 'Asia/Istanbul')) AS x, materialize(toDate('2000-01-02')) AS y, 1 ? x : y AS z; + +SELECT rand() % 2 = 0 ? number : number FROM numbers(5); + +SELECT rand() % 2 = 0 ? number : toString(number) FROM numbers(5); -- { serverError NO_COMMON_TYPE } diff --git a/parser/testdata/00836_indices_alter/ast.json b/parser/testdata/00836_indices_alter/ast.json new file mode 100644 index 000000000..36df91f54 --- /dev/null +++ b/parser/testdata/00836_indices_alter/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery minmax_idx (children 1)" + }, + { + "explain": " Identifier minmax_idx" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001015581, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/00836_indices_alter/metadata.json b/parser/testdata/00836_indices_alter/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00836_indices_alter/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00836_indices_alter/query.sql b/parser/testdata/00836_indices_alter/query.sql new file mode 100644 index 000000000..c059c6210 --- /dev/null +++ b/parser/testdata/00836_indices_alter/query.sql @@ -0,0 +1,69 @@ +DROP TABLE IF EXISTS minmax_idx; +DROP TABLE IF EXISTS minmax_idx2; + + +CREATE TABLE minmax_idx +( + u64 UInt64, + i32 Int32 +) ENGINE = MergeTree() +ORDER BY u64; + +INSERT INTO minmax_idx VALUES (1, 2); + +ALTER TABLE minmax_idx ADD INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10; +ALTER TABLE minmax_idx ADD INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10; +ALTER TABLE minmax_idx ADD INDEX idx3 (u64 - i32) TYPE minmax GRANULARITY 10 AFTER idx1; + +SHOW CREATE TABLE minmax_idx; + +SELECT * FROM minmax_idx WHERE u64 * i32 = 2; + +INSERT INTO minmax_idx VALUES (1, 2); +INSERT INTO minmax_idx VALUES (1, 2); +INSERT INTO minmax_idx VALUES (1, 2); +INSERT INTO minmax_idx VALUES (1, 2); +INSERT INTO minmax_idx VALUES (1, 2); + +SELECT * FROM minmax_idx WHERE u64 * i32 = 2; + +ALTER TABLE minmax_idx DROP INDEX idx1; + +SHOW CREATE TABLE minmax_idx; + +SELECT * FROM minmax_idx WHERE u64 * i32 = 2; + +ALTER TABLE minmax_idx DROP INDEX idx2; +ALTER TABLE minmax_idx DROP INDEX idx3; + +SHOW CREATE TABLE minmax_idx; + +ALTER TABLE minmax_idx ADD INDEX idx1 (u64 * i32) TYPE minmax GRANULARITY 10; + +SHOW CREATE TABLE minmax_idx; + +SELECT * FROM minmax_idx WHERE u64 * i32 = 2; + + +CREATE TABLE minmax_idx2 +( + u64 UInt64, + i32 Int32, + INDEX idx1 (u64 + i32) TYPE minmax GRANULARITY 10, + INDEX idx2 u64 * i32 TYPE minmax GRANULARITY 10 +) ENGINE = MergeTree() +ORDER BY u64; + +INSERT INTO minmax_idx2 VALUES (1, 2); +INSERT INTO minmax_idx2 VALUES (1, 2); + +SELECT * FROM minmax_idx2 WHERE u64 * i32 = 2; + +ALTER TABLE minmax_idx2 DROP INDEX idx1, DROP INDEX idx2; + +SHOW CREATE TABLE minmax_idx2; + +SELECT * FROM minmax_idx2 WHERE u64 * i32 = 2; + +DROP TABLE minmax_idx; +DROP TABLE minmax_idx2; diff --git a/parser/testdata/00836_indices_alter_replicated_zookeeper_long/ast.json b/parser/testdata/00836_indices_alter_replicated_zookeeper_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00836_indices_alter_replicated_zookeeper_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00836_indices_alter_replicated_zookeeper_long/metadata.json b/parser/testdata/00836_indices_alter_replicated_zookeeper_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00836_indices_alter_replicated_zookeeper_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00836_indices_alter_replicated_zookeeper_long/query.sql b/parser/testdata/00836_indices_alter_replicated_zookeeper_long/query.sql new file mode 100644 index 000000000..7ea6c2660 --- /dev/null +++ b/parser/testdata/00836_indices_alter_replicated_zookeeper_long/query.sql @@ -0,0 +1,115 @@ +-- Tags: long, replica + +DROP TABLE IF EXISTS minmax_idx; +DROP TABLE IF EXISTS minmax_idx_r; +DROP TABLE IF EXISTS minmax_idx2; +DROP TABLE IF EXISTS minmax_idx2_r; + +SET replication_alter_partitions_sync = 2; + +CREATE TABLE minmax_idx +( + u64 UInt64, + i32 Int32 +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_00836/indices_alter1', 'r1') +ORDER BY u64; + +CREATE TABLE minmax_idx_r +( + u64 UInt64, + i32 Int32 +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_00836/indices_alter1', 'r2') +ORDER BY u64; + +INSERT INTO minmax_idx VALUES (1, 2); + +SYSTEM SYNC REPLICA minmax_idx_r; + +ALTER TABLE minmax_idx ADD INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10; +ALTER TABLE minmax_idx_r ADD INDEX idx2 u64 + i32 TYPE minmax GRANULARITY 10; +ALTER TABLE minmax_idx ADD INDEX idx3 u64 - i32 TYPE minmax GRANULARITY 10 AFTER idx1; + +SHOW CREATE TABLE minmax_idx; +SHOW CREATE TABLE minmax_idx_r; + +SELECT * FROM minmax_idx WHERE u64 * i32 = 2 ORDER BY (u64, i32); +SELECT * FROM minmax_idx_r WHERE u64 * i32 = 2 ORDER BY (u64, i32); + +INSERT INTO minmax_idx VALUES (1, 4); +INSERT INTO minmax_idx_r VALUES (3, 2); +INSERT INTO minmax_idx VALUES (1, 5); +INSERT INTO minmax_idx_r VALUES (65, 75); +INSERT INTO minmax_idx VALUES (19, 9); + +SYSTEM SYNC REPLICA minmax_idx; +SYSTEM SYNC REPLICA minmax_idx_r; + +SELECT * FROM minmax_idx WHERE u64 * i32 > 1 ORDER BY (u64, i32); +SELECT * FROM minmax_idx_r WHERE u64 * i32 > 1 ORDER BY (u64, i32); + +ALTER TABLE minmax_idx DROP INDEX idx1; + +SHOW CREATE TABLE minmax_idx; +SHOW CREATE TABLE minmax_idx_r; + +SELECT * FROM minmax_idx WHERE u64 * i32 > 1 ORDER BY (u64, i32); +SELECT * FROM minmax_idx_r WHERE u64 * i32 > 1 ORDER BY (u64, i32); + +ALTER TABLE minmax_idx DROP INDEX idx2; +ALTER TABLE minmax_idx_r DROP INDEX idx3; + +SHOW CREATE TABLE minmax_idx; +SHOW CREATE TABLE minmax_idx_r; + +ALTER TABLE minmax_idx ADD INDEX idx1 u64 * i32 TYPE minmax GRANULARITY 10; + +SHOW CREATE TABLE minmax_idx; +SHOW CREATE TABLE minmax_idx_r; + +SELECT * FROM minmax_idx WHERE u64 * i32 > 1 ORDER BY (u64, i32); +SELECT * FROM minmax_idx_r WHERE u64 * i32 > 1 ORDER BY (u64, i32); + + +CREATE TABLE minmax_idx2 +( + u64 UInt64, + i32 Int32, + INDEX idx1 u64 + i32 TYPE minmax GRANULARITY 10, + INDEX idx2 u64 * i32 TYPE minmax GRANULARITY 10 +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_00836/indices_alter2', 'r1') +ORDER BY u64; + +CREATE TABLE minmax_idx2_r +( + u64 UInt64, + i32 Int32, + INDEX idx1 u64 + i32 TYPE minmax GRANULARITY 10, + INDEX idx2 u64 * i32 TYPE minmax GRANULARITY 10 +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_00836/indices_alter2', 'r2') +ORDER BY u64; + + +SHOW CREATE TABLE minmax_idx2; +SHOW CREATE TABLE minmax_idx2_r; + +INSERT INTO minmax_idx2 VALUES (1, 2); +INSERT INTO minmax_idx2_r VALUES (1, 3); + +SYSTEM SYNC REPLICA minmax_idx2; +SYSTEM SYNC REPLICA minmax_idx2_r; + +SELECT * FROM minmax_idx2 WHERE u64 * i32 >= 2 ORDER BY (u64, i32); +SELECT * FROM minmax_idx2_r WHERE u64 * i32 >= 2 ORDER BY (u64, i32); + +ALTER TABLE minmax_idx2_r DROP INDEX idx1, DROP INDEX idx2; + +SHOW CREATE TABLE minmax_idx2; +SHOW CREATE TABLE minmax_idx2_r; + +SELECT * FROM minmax_idx2 WHERE u64 * i32 >= 2 ORDER BY (u64, i32); +SELECT * FROM minmax_idx2_r WHERE u64 * i32 >= 2 ORDER BY (u64, i32); + +DROP TABLE minmax_idx; +DROP TABLE minmax_idx_r; +DROP TABLE minmax_idx2; +DROP TABLE minmax_idx2_r; diff --git a/parser/testdata/00836_numbers_table_function_zero/ast.json b/parser/testdata/00836_numbers_table_function_zero/ast.json new file mode 100644 index 000000000..dc98ce1d5 --- /dev/null +++ b/parser/testdata/00836_numbers_table_function_zero/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function count (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_0" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001264635, + "rows_read": 12, + "bytes_read": 467 + } +} diff --git a/parser/testdata/00836_numbers_table_function_zero/metadata.json b/parser/testdata/00836_numbers_table_function_zero/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00836_numbers_table_function_zero/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00836_numbers_table_function_zero/query.sql b/parser/testdata/00836_numbers_table_function_zero/query.sql new file mode 100644 index 000000000..e70086e64 --- /dev/null +++ b/parser/testdata/00836_numbers_table_function_zero/query.sql @@ -0,0 +1,8 @@ +SELECT count() FROM numbers(0); +SELECT count() FROM numbers(1); +SELECT count() FROM numbers(10); +SELECT count() FROM numbers(100000); + +SELECT * FROM numbers(0); +SELECT * FROM numbers(1); +SELECT * FROM numbers(10); diff --git a/parser/testdata/00837_insert_select_and_read_prefix/ast.json b/parser/testdata/00837_insert_select_and_read_prefix/ast.json new file mode 100644 index 000000000..0fb385be4 --- /dev/null +++ b/parser/testdata/00837_insert_select_and_read_prefix/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery file (children 1)" + }, + { + "explain": " Identifier file" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001361939, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/00837_insert_select_and_read_prefix/metadata.json b/parser/testdata/00837_insert_select_and_read_prefix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00837_insert_select_and_read_prefix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00837_insert_select_and_read_prefix/query.sql b/parser/testdata/00837_insert_select_and_read_prefix/query.sql new file mode 100644 index 000000000..5fc8b60b1 --- /dev/null +++ b/parser/testdata/00837_insert_select_and_read_prefix/query.sql @@ -0,0 +1,10 @@ +DROP TABLE IF EXISTS file; +CREATE TABLE file (s String, n UInt32) ENGINE = File(CSVWithNames); +-- BTW, WithNames formats are totally unsuitable for more than a single INSERT +INSERT INTO file VALUES ('hello', 1), ('world', 2); + +SELECT * FROM file; +CREATE TEMPORARY TABLE file2 AS SELECT * FROM file; +SELECT * FROM file2; + +DROP TABLE file; diff --git a/parser/testdata/00837_minmax_index_replicated_zookeeper_long/ast.json b/parser/testdata/00837_minmax_index_replicated_zookeeper_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00837_minmax_index_replicated_zookeeper_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00837_minmax_index_replicated_zookeeper_long/metadata.json b/parser/testdata/00837_minmax_index_replicated_zookeeper_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00837_minmax_index_replicated_zookeeper_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00837_minmax_index_replicated_zookeeper_long/query.sql b/parser/testdata/00837_minmax_index_replicated_zookeeper_long/query.sql new file mode 100644 index 000000000..ba0809495 --- /dev/null +++ b/parser/testdata/00837_minmax_index_replicated_zookeeper_long/query.sql @@ -0,0 +1,75 @@ +-- Tags: long, replica + +DROP TABLE IF EXISTS minmax_idx1; +DROP TABLE IF EXISTS minmax_idx2; + + +CREATE TABLE minmax_idx1 +( + u64 UInt64, + i32 Int32, + f64 Float64, + d Decimal(10, 2), + s String, + e Enum8('a' = 1, 'b' = 2, 'c' = 3), + dt Date, + INDEX + idx_all (i32, i32 + f64, d, s, e, dt) TYPE minmax GRANULARITY 1, + INDEX + idx_2 (u64 + toYear(dt), substring(s, 2, 4)) TYPE minmax GRANULARITY 3 +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_00837/minmax', 'r1') +ORDER BY u64 +SETTINGS index_granularity = 2, index_granularity_bytes = '10Mi'; + +CREATE TABLE minmax_idx2 +( + u64 UInt64, + i32 Int32, + f64 Float64, + d Decimal(10, 2), + s String, + e Enum8('a' = 1, 'b' = 2, 'c' = 3), + dt Date, + INDEX + idx_all (i32, i32 + f64, d, s, e, dt) TYPE minmax GRANULARITY 1, + INDEX + idx_2 (u64 + toYear(dt), substring(s, 2, 4)) TYPE minmax GRANULARITY 3 +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_00837/minmax', 'r2') +ORDER BY u64 +SETTINGS index_granularity = 2, index_granularity_bytes = '10Mi'; + + +/* many small inserts => table will make merges */ +INSERT INTO minmax_idx1 VALUES (2, 2, 4.5, 2.5, 'abc', 'a', '2014-01-01'); +INSERT INTO minmax_idx1 VALUES (0, 5, 4.7, 6.5, 'cba', 'b', '2014-01-04'); +INSERT INTO minmax_idx2 VALUES (3, 5, 6.9, 1.57, 'bac', 'c', '2017-01-01'); +INSERT INTO minmax_idx2 VALUES (4, 2, 4.5, 2.5, 'abc', 'a', '2016-01-01'); +INSERT INTO minmax_idx2 VALUES (13, 5, 4.7, 6.5, 'cba', 'b', '2015-01-01'); +INSERT INTO minmax_idx1 VALUES (5, 5, 6.9, 1.57, 'bac', 'c', '2014-11-11'); + +SYSTEM SYNC REPLICA minmax_idx1; +SYSTEM SYNC REPLICA minmax_idx2; + +INSERT INTO minmax_idx1 VALUES (6, 2, 4.5, 2.5, 'abc', 'a', '2014-02-11'); +INSERT INTO minmax_idx1 VALUES (1, 5, 4.7, 6.5, 'cba', 'b', '2014-03-11'); +INSERT INTO minmax_idx1 VALUES (7, 5, 6.9, 1.57, 'bac', 'c', '2014-04-11'); +INSERT INTO minmax_idx1 VALUES (8, 2, 4.5, 2.5, 'abc', 'a', '2014-05-11'); +INSERT INTO minmax_idx2 VALUES (12, 5, 4.7, 6.5, 'cba', 'b', '2014-06-11'); +INSERT INTO minmax_idx2 VALUES (9, 5, 6.9, 1.57, 'bac', 'c', '2014-07-11'); + +SYSTEM SYNC REPLICA minmax_idx1; +SYSTEM SYNC REPLICA minmax_idx2; + +OPTIMIZE TABLE minmax_idx1; +OPTIMIZE TABLE minmax_idx2; + +/* simple select */ +SELECT * FROM minmax_idx1 WHERE i32 = 5 AND i32 + f64 < 12 AND 3 < d AND d < 7 AND (s = 'bac' OR s = 'cba') ORDER BY dt; +SELECT * FROM minmax_idx2 WHERE i32 = 5 AND i32 + f64 < 12 AND 3 < d AND d < 7 AND (s = 'bac' OR s = 'cba') ORDER BY dt; + +/* select with hole made by primary key */ +SELECT * FROM minmax_idx1 WHERE (u64 < 2 OR u64 > 10) AND e != 'b' ORDER BY dt; +SELECT * FROM minmax_idx2 WHERE (u64 < 2 OR u64 > 10) AND e != 'b' ORDER BY dt; + +DROP TABLE minmax_idx1; +DROP TABLE minmax_idx2; diff --git a/parser/testdata/00839_bitmask_negative/ast.json b/parser/testdata/00839_bitmask_negative/ast.json new file mode 100644 index 000000000..7494c720a --- /dev/null +++ b/parser/testdata/00839_bitmask_negative/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function bitmaskToList (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_0" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001115139, + "rows_read": 7, + "bytes_read": 266 + } +} diff --git a/parser/testdata/00839_bitmask_negative/metadata.json b/parser/testdata/00839_bitmask_negative/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00839_bitmask_negative/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00839_bitmask_negative/query.sql b/parser/testdata/00839_bitmask_negative/query.sql new file mode 100644 index 000000000..cfa801b84 --- /dev/null +++ b/parser/testdata/00839_bitmask_negative/query.sql @@ -0,0 +1,17 @@ +SELECT bitmaskToList(0); +SELECT bitmaskToArray(0); +SELECT bitmaskToList(7); +SELECT bitmaskToArray(7); +SELECT bitmaskToList(-1); +SELECT bitmaskToArray(-1); +SELECT bitmaskToList(-128); +SELECT bitmaskToArray(-128); + +SELECT bitmaskToList(toInt64(0)); +SELECT bitmaskToArray(toInt64(0)); +SELECT bitmaskToList(toInt64(7)); +SELECT bitmaskToArray(toInt64(7)); +SELECT bitmaskToList(toInt64(-1)); +SELECT bitmaskToArray(toInt64(-1)); +SELECT bitmaskToList(toInt64(-128)); +SELECT bitmaskToArray(toInt64(-128)); diff --git a/parser/testdata/00840_top_k_weighted/ast.json b/parser/testdata/00840_top_k_weighted/ast.json new file mode 100644 index 000000000..e8ebdd488 --- /dev/null +++ b/parser/testdata/00840_top_k_weighted/ast.json @@ -0,0 +1,148 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function topKWeighted (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Identifier weight" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tupleElement (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier t" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function tupleElement (alias weight) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier t" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayJoin (alias t) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Literal Tuple_('hello', UInt64_1)" + }, + { + "explain": " Literal Tuple_('world', UInt64_2)" + }, + { + "explain": " Literal Tuple_('goodbye', UInt64_3)" + }, + { + "explain": " Literal Tuple_('abc', UInt64_1)" + } + ], + + "rows": 42, + + "statistics": + { + "elapsed": 0.001596902, + "rows_read": 42, + "bytes_read": 1961 + } +} diff --git a/parser/testdata/00840_top_k_weighted/metadata.json b/parser/testdata/00840_top_k_weighted/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00840_top_k_weighted/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00840_top_k_weighted/query.sql b/parser/testdata/00840_top_k_weighted/query.sql new file mode 100644 index 000000000..4b5c938e5 --- /dev/null +++ b/parser/testdata/00840_top_k_weighted/query.sql @@ -0,0 +1,2 @@ +SELECT topKWeighted(2)(x, weight) FROM (SELECT t.1 AS x, t.2 AS weight FROM (SELECT arrayJoin([('hello', 1), ('world', 2), ('goodbye', 3), ('abc', 1)]) AS t)); +SELECT topKWeighted(5)(n, weight) FROM (SELECT number as n, number as weight from system.numbers LIMIT 100); diff --git a/parser/testdata/00841_temporary_table_database/ast.json b/parser/testdata/00841_temporary_table_database/ast.json new file mode 100644 index 000000000..63e2421aa --- /dev/null +++ b/parser/testdata/00841_temporary_table_database/ast.json @@ -0,0 +1,40 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery t1_00841 (children 2)" + }, + { + "explain": " Identifier t1_00841" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration x (children 1)" + }, + { + "explain": " DataType UInt8" + } + ], + + "rows": 6, + + "statistics": + { + "elapsed": 0.001543336, + "rows_read": 6, + "bytes_read": 215 + } +} diff --git a/parser/testdata/00841_temporary_table_database/metadata.json b/parser/testdata/00841_temporary_table_database/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00841_temporary_table_database/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00841_temporary_table_database/query.sql b/parser/testdata/00841_temporary_table_database/query.sql new file mode 100644 index 000000000..6f4f2ca80 --- /dev/null +++ b/parser/testdata/00841_temporary_table_database/query.sql @@ -0,0 +1,5 @@ +CREATE TEMPORARY TABLE t1_00841 (x UInt8); +INSERT INTO t1_00841 VALUES (1); +SELECT * FROM t1_00841; + +CREATE TEMPORARY TABLE test.t2_00841 (x UInt8); -- { serverError BAD_DATABASE_FOR_TEMPORARY_TABLE } diff --git a/parser/testdata/00842_array_with_constant_overflow/ast.json b/parser/testdata/00842_array_with_constant_overflow/ast.json new file mode 100644 index 000000000..7bbdff916 --- /dev/null +++ b/parser/testdata/00842_array_with_constant_overflow/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayWithConstant (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Float64_-231.37104" + }, + { + "explain": " Literal Int64_-138" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001146614, + "rows_read": 8, + "bytes_read": 312 + } +} diff --git a/parser/testdata/00842_array_with_constant_overflow/metadata.json b/parser/testdata/00842_array_with_constant_overflow/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00842_array_with_constant_overflow/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00842_array_with_constant_overflow/query.sql b/parser/testdata/00842_array_with_constant_overflow/query.sql new file mode 100644 index 000000000..aa22f02a5 --- /dev/null +++ b/parser/testdata/00842_array_with_constant_overflow/query.sql @@ -0,0 +1 @@ +SELECT arrayWithConstant(-231.37104, -138); -- { serverError TOO_LARGE_ARRAY_SIZE } diff --git a/parser/testdata/00843_optimize_predicate_and_rename_table/ast.json b/parser/testdata/00843_optimize_predicate_and_rename_table/ast.json new file mode 100644 index 000000000..40fb519b7 --- /dev/null +++ b/parser/testdata/00843_optimize_predicate_and_rename_table/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001354085, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00843_optimize_predicate_and_rename_table/metadata.json b/parser/testdata/00843_optimize_predicate_and_rename_table/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00843_optimize_predicate_and_rename_table/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00843_optimize_predicate_and_rename_table/query.sql b/parser/testdata/00843_optimize_predicate_and_rename_table/query.sql new file mode 100644 index 000000000..3e1e64978 --- /dev/null +++ b/parser/testdata/00843_optimize_predicate_and_rename_table/query.sql @@ -0,0 +1,18 @@ +SET enable_optimize_predicate_expression = 1; + +DROP TABLE IF EXISTS test1_00843; +DROP TABLE IF EXISTS test2_00843; +DROP TABLE IF EXISTS view_00843; + +CREATE TABLE test1_00843 (a UInt8) ENGINE = Memory; +INSERT INTO test1_00843 VALUES (1); + +CREATE VIEW view_00843 AS SELECT * FROM test1_00843; +SELECT * FROM view_00843; +RENAME TABLE test1_00843 TO test2_00843; +SELECT * FROM view_00843; -- { serverError UNKNOWN_TABLE } +RENAME TABLE test2_00843 TO test1_00843; +SELECT * FROM view_00843; + +DROP TABLE test1_00843; +DROP TABLE view_00843; diff --git a/parser/testdata/00844_join_lightee2/ast.json b/parser/testdata/00844_join_lightee2/ast.json new file mode 100644 index 000000000..92437d257 --- /dev/null +++ b/parser/testdata/00844_join_lightee2/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1_00844 (children 1)" + }, + { + "explain": " Identifier t1_00844" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001447436, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/00844_join_lightee2/metadata.json b/parser/testdata/00844_join_lightee2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00844_join_lightee2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00844_join_lightee2/query.sql b/parser/testdata/00844_join_lightee2/query.sql new file mode 100644 index 000000000..9e7f2b40f --- /dev/null +++ b/parser/testdata/00844_join_lightee2/query.sql @@ -0,0 +1,21 @@ +DROP TABLE IF EXISTS t1_00844; +DROP TABLE IF EXISTS t2_00844; + +CREATE TABLE IF NOT EXISTS t1_00844 ( +f1 UInt32, +f2 String +) ENGINE = MergeTree ORDER BY (f1); + +CREATE TABLE IF NOT EXISTS t2_00844 ( +f1 String, +f3 String +) ENGINE = MergeTree ORDER BY (f1); + +insert into t1_00844 values(1,'1'); +insert into t2_00844 values('1','name1'); + +select t1_00844.f1,t2_00844.f3 from t1_00844 all inner join t2_00844 on t1_00844.f2 = t2_00844.f1 +where t2_00844.f1 = '1'; + +DROP TABLE t1_00844; +DROP TABLE t2_00844; diff --git a/parser/testdata/00845_join_on_aliases/ast.json b/parser/testdata/00845_join_on_aliases/ast.json new file mode 100644 index 000000000..e8ffc92ed --- /dev/null +++ b/parser/testdata/00845_join_on_aliases/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery table1 (children 1)" + }, + { + "explain": " Identifier table1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001365687, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/00845_join_on_aliases/metadata.json b/parser/testdata/00845_join_on_aliases/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00845_join_on_aliases/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00845_join_on_aliases/query.sql b/parser/testdata/00845_join_on_aliases/query.sql new file mode 100644 index 000000000..769ed2190 --- /dev/null +++ b/parser/testdata/00845_join_on_aliases/query.sql @@ -0,0 +1,48 @@ +DROP TABLE IF EXISTS table1; +DROP TABLE IF EXISTS table2; + +CREATE TABLE table1 (a UInt32, b UInt32) ENGINE = Memory; +CREATE TABLE table2 (a UInt32, b UInt32) ENGINE = Memory; + +INSERT INTO table1 SELECT number, number FROM numbers(10); +INSERT INTO table2 SELECT number * 2, number * 20 FROM numbers(6); + +select t1.a t1_a, t2.a +from table1 as t1 +join table2 as t2 on table1.a = table2.a and t1.a = table2.a and t1_a = table2.a +order by all; + +select t1.a t1_a, t2.a +from table1 as t1 +join table2 as t2 on table1.a = t2.a and t1.a = t2.a and t1_a = t2.a +order by all; + +select t1.a as t1_a, t2.a t2_a +from table1 as t1 +join table2 as t2 on table1.a = t2_a and t1.a = t2_a and t1_a = t2_a +order by all; + +select t1.a t1_a, t2.a +from table1 as t1 +join table2 as t2 on table1.a = table2.a and t1.a = t2.a and t1_a = t2.a +order by all; + +select t1.a t1_a, t2.a as t2_a +from table1 as t1 +join table2 as t2 on table1.a = table2.a and t1.a = t2.a and t1_a = t2_a +order by all; + +select * +from table1 as t1 +join table2 as t2 on t1_a = t2_a +where (table1.a as t1_a) > 4 and (table2.a as t2_a) > 2 +order by all; + +select t1.*, t2.* +from table1 as t1 +join table2 as t2 on t1_a = t2_a +where (t1.a as t1_a) > 2 and (t2.a as t2_a) > 4 +order by all; + +DROP TABLE table1; +DROP TABLE table2; diff --git a/parser/testdata/00846_join_using_tuple_crash/ast.json b/parser/testdata/00846_join_using_tuple_crash/ast.json new file mode 100644 index 000000000..698641032 --- /dev/null +++ b/parser/testdata/00846_join_using_tuple_crash/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001820977, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00846_join_using_tuple_crash/metadata.json b/parser/testdata/00846_join_using_tuple_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00846_join_using_tuple_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00846_join_using_tuple_crash/query.sql b/parser/testdata/00846_join_using_tuple_crash/query.sql new file mode 100644 index 000000000..e02ef87f4 --- /dev/null +++ b/parser/testdata/00846_join_using_tuple_crash/query.sql @@ -0,0 +1,9 @@ +SET any_join_distinct_right_table_keys = 1; + +SELECT * FROM (SELECT dummy as a, (toUInt8(0), toUInt8(0)) AS tup FROM system.one) js1 +JOIN (SELECT dummy as a, (toUInt8(0), toUInt8(0)) AS tup FROM system.one) js2 +USING (a, tup); + +SELECT * FROM (SELECT dummy as a, (toUInt8(0), toUInt8(0)) AS tup FROM system.one) js1 +GLOBAL ANY FULL OUTER JOIN (SELECT dummy as a, (toUInt8(0), toUInt8(0)) AS tup FROM system.one) js2 +USING (a, tup); diff --git a/parser/testdata/00847_multiple_join_same_column/ast.json b/parser/testdata/00847_multiple_join_same_column/ast.json new file mode 100644 index 000000000..4ebd783d0 --- /dev/null +++ b/parser/testdata/00847_multiple_join_same_column/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001225142, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/00847_multiple_join_same_column/metadata.json b/parser/testdata/00847_multiple_join_same_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00847_multiple_join_same_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00847_multiple_join_same_column/query.sql b/parser/testdata/00847_multiple_join_same_column/query.sql new file mode 100644 index 000000000..bbb4eb124 --- /dev/null +++ b/parser/testdata/00847_multiple_join_same_column/query.sql @@ -0,0 +1,62 @@ +drop table if exists t; +drop table if exists s; +drop table if exists y; + +create table t(a Int64, b Int64) engine = TinyLog; +create table s(a Int64, b Int64) engine = TinyLog; +create table y(a Int64, b Int64) engine = TinyLog; + +insert into t values (1,1), (2,2); +insert into s values (1,1); +insert into y values (1,1); + +select t.a, s.b, s.a, s.b, y.a, y.b from t +left join s on (t.a = s.a and t.b = s.b) +left join y on (y.a = s.a and y.b = s.b) +order by t.a +format Vertical; + +select t.a, s.b, s.a, s.b, y.a, y.b from t +left join s on (t.a = s.a and s.b = t.b) +left join y on (y.a = s.a and y.b = s.b) +order by t.a +format PrettyCompactMonoBlock; + +select t.a as t_a from t +left join s on s.a = t_a +order by t.a +format PrettyCompactMonoBlock; + +select t.a, s.a as s_a from t +left join s on s.a = t.a +left join y on y.b = s.b +order by t.a +format PrettyCompactMonoBlock; + +select t.a, t.a, t.b as t_b from t +left join s on t.a = s.a +left join y on y.b = s.b +order by t.a +format PrettyCompactMonoBlock; + +select s.a, s.a, s.b as s_b, s.b from t +left join s on s.a = t.a +left join y on s.b = y.b +order by t.a +format PrettyCompactMonoBlock; + +select y.a, y.a, y.b as y_b, y.b from t +left join s on s.a = t.a +left join y on y.b = s.b +order by t.a +format PrettyCompactMonoBlock; + +select t.a, t.a as t_a, s.a, s.a as s_a, y.a, y.a as y_a from t +left join s on t.a = s.a +left join y on y.b = s.b +order by t.a +format PrettyCompactMonoBlock; + +drop table t; +drop table s; +drop table y; diff --git a/parser/testdata/00848_join_use_nulls_segfault/ast.json b/parser/testdata/00848_join_use_nulls_segfault/ast.json new file mode 100644 index 000000000..c89aba498 --- /dev/null +++ b/parser/testdata/00848_join_use_nulls_segfault/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001357237, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00848_join_use_nulls_segfault/metadata.json b/parser/testdata/00848_join_use_nulls_segfault/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00848_join_use_nulls_segfault/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00848_join_use_nulls_segfault/query.sql b/parser/testdata/00848_join_use_nulls_segfault/query.sql new file mode 100644 index 000000000..275968236 --- /dev/null +++ b/parser/testdata/00848_join_use_nulls_segfault/query.sql @@ -0,0 +1,70 @@ +SET any_join_distinct_right_table_keys = 1; +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS t1_00848; +DROP TABLE IF EXISTS t2_00848; +DROP TABLE IF EXISTS t3_00848; +CREATE TABLE t1_00848 ( id String ) ENGINE = Memory; +CREATE TABLE t2_00848 ( id Nullable(String) ) ENGINE = Memory; +CREATE TABLE t3_00848 ( id Nullable(String), not_id Nullable(String) ) ENGINE = Memory; + +insert into t1_00848 values ('l'); +insert into t3_00848 (id) values ('r'); + +SELECT 'on'; + +SELECT *, toTypeName(t1.id), toTypeName(t3.id) FROM t1_00848 t1 ANY LEFT JOIN t3_00848 t3 ON t1.id = t3.id ORDER BY t1.id, t3.id; +SELECT *, toTypeName(t1.id), toTypeName(t3.id) FROM t1_00848 t1 ANY FULL JOIN t3_00848 t3 ON t1.id = t3.id ORDER BY t1.id, t3.id; +SELECT *, toTypeName(t2.id), toTypeName(t3.id) FROM t2_00848 t2 ANY FULL JOIN t3_00848 t3 ON t2.id = t3.id ORDER BY t2.id, t3.id; + +SELECT *, toTypeName(t1.id), toTypeName(t3.id) FROM t1_00848 t1 LEFT JOIN t3_00848 t3 ON t1.id = t3.id ORDER BY t1.id, t3.id; +SELECT *, toTypeName(t1.id), toTypeName(t3.id) FROM t1_00848 t1 FULL JOIN t3_00848 t3 ON t1.id = t3.id ORDER BY t1.id, t3.id; +SELECT *, toTypeName(t2.id), toTypeName(t3.id) FROM t2_00848 t2 FULL JOIN t3_00848 t3 ON t2.id = t3.id ORDER BY t2.id, t3.id; + +SELECT t3.id = 'l', t3.not_id = 'l' FROM t1_00848 t1 ANY LEFT JOIN t3_00848 t3 ON t1.id = t3.id ORDER BY t1.id, t3.id; +SELECT t3.id = 'l', t3.not_id = 'l' FROM t1_00848 t1 LEFT JOIN t3_00848 t3 ON t1.id = t3.id ORDER BY t1.id, t3.id; + +SELECT 'using'; + +SELECT *, toTypeName(t1.id), toTypeName(t3.id) FROM t1_00848 t1 ANY LEFT JOIN t3_00848 t3 USING(id) ORDER BY t1.id, t3.id; +SELECT *, toTypeName(t1.id), toTypeName(t3.id) FROM t1_00848 t1 ANY FULL JOIN t3_00848 t3 USING(id) ORDER BY t1.id, t3.id; +SELECT *, toTypeName(t2.id), toTypeName(t3.id) FROM t2_00848 t2 ANY FULL JOIN t3_00848 t3 USING(id) ORDER BY t2.id, t3.id; + +SELECT *, toTypeName(t1.id), toTypeName(t3.id) FROM t1_00848 t1 LEFT JOIN t3_00848 t3 USING(id) ORDER BY t1.id, t3.id; +SELECT *, toTypeName(t1.id), toTypeName(t3.id) FROM t1_00848 t1 FULL JOIN t3_00848 t3 USING(id) ORDER BY t1.id, t3.id; +SELECT *, toTypeName(t2.id), toTypeName(t3.id) FROM t2_00848 t2 FULL JOIN t3_00848 t3 USING(id) ORDER BY t2.id, t3.id; + +SELECT t3.id = 'l', t3.not_id = 'l' FROM t1_00848 t1 ANY LEFT JOIN t3_00848 t3 USING(id) ORDER BY t1.id, t3.id; +SELECT t3.id = 'l', t3.not_id = 'l' FROM t1_00848 t1 LEFT JOIN t3_00848 t3 USING(id) ORDER BY t1.id, t3.id; + +SET join_use_nulls = 1; + +SELECT 'on + join_use_nulls'; + +SELECT *, toTypeName(t1.id), toTypeName(t3.id) FROM t1_00848 t1 ANY LEFT JOIN t3_00848 t3 ON t1.id = t3.id ORDER BY t1.id, t3.id; +SELECT *, toTypeName(t1.id), toTypeName(t3.id) FROM t1_00848 t1 ANY FULL JOIN t3_00848 t3 ON t1.id = t3.id ORDER BY t1.id, t3.id; +SELECT *, toTypeName(t2.id), toTypeName(t3.id) FROM t2_00848 t2 ANY FULL JOIN t3_00848 t3 ON t2.id = t3.id ORDER BY t2.id, t3.id; + +SELECT *, toTypeName(t1.id), toTypeName(t3.id) FROM t1_00848 t1 LEFT JOIN t3_00848 t3 ON t1.id = t3.id ORDER BY t1.id, t3.id; +SELECT *, toTypeName(t1.id), toTypeName(t3.id) FROM t1_00848 t1 FULL JOIN t3_00848 t3 ON t1.id = t3.id ORDER BY t1.id, t3.id; +SELECT *, toTypeName(t2.id), toTypeName(t3.id) FROM t2_00848 t2 FULL JOIN t3_00848 t3 ON t2.id = t3.id ORDER BY t2.id, t3.id; + +SELECT t3.id = 'l', t3.not_id = 'l' FROM t1_00848 t1 ANY LEFT JOIN t3_00848 t3 ON t1.id = t3.id ORDER BY t1.id, t3.id; +SELECT t3.id = 'l', t3.not_id = 'l' FROM t1_00848 t1 LEFT JOIN t3_00848 t3 ON t1.id = t3.id ORDER BY t1.id, t3.id; + +SELECT 'using + join_use_nulls'; + +SELECT *, toTypeName(t1.id), toTypeName(t3.id) FROM t1_00848 t1 ANY LEFT JOIN t3_00848 t3 USING(id) ORDER BY id; +SELECT *, toTypeName(t1.id), toTypeName(t3.id) FROM t1_00848 t1 ANY FULL JOIN t3_00848 t3 USING(id) ORDER BY id; +SELECT *, toTypeName(t2.id), toTypeName(t3.id) FROM t2_00848 t2 ANY FULL JOIN t3_00848 t3 USING(id) ORDER BY id; + +SELECT *, toTypeName(t1.id), toTypeName(t3.id) FROM t1_00848 t1 LEFT JOIN t3_00848 t3 USING(id) ORDER BY id; +SELECT *, toTypeName(t1.id), toTypeName(t3.id) FROM t1_00848 t1 FULL JOIN t3_00848 t3 USING(id) ORDER BY id; +SELECT *, toTypeName(t2.id), toTypeName(t3.id) FROM t2_00848 t2 FULL JOIN t3_00848 t3 USING(id) ORDER BY id; + +SELECT t3.id = 'l', t3.not_id = 'l' FROM t1_00848 t1 ANY LEFT JOIN t3_00848 t3 USING(id) ORDER BY id; +SELECT t3.id = 'l', t3.not_id = 'l' FROM t1_00848 t1 LEFT JOIN t3_00848 t3 USING(id) ORDER BY id; + +DROP TABLE t1_00848; +DROP TABLE t2_00848; +DROP TABLE t3_00848; diff --git a/parser/testdata/00849_multiple_comma_join_2/ast.json b/parser/testdata/00849_multiple_comma_join_2/ast.json new file mode 100644 index 000000000..3488a7726 --- /dev/null +++ b/parser/testdata/00849_multiple_comma_join_2/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001199943, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00849_multiple_comma_join_2/metadata.json b/parser/testdata/00849_multiple_comma_join_2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00849_multiple_comma_join_2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00849_multiple_comma_join_2/query.sql b/parser/testdata/00849_multiple_comma_join_2/query.sql new file mode 100644 index 000000000..6f3765837 --- /dev/null +++ b/parser/testdata/00849_multiple_comma_join_2/query.sql @@ -0,0 +1,148 @@ +SET enable_optimize_predicate_expression = 0; +SET convert_query_to_cnf = 0; +SET cross_to_inner_join_rewrite = 1; + +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS t3; +DROP TABLE IF EXISTS t4; + +CREATE TABLE t1 (a UInt32, b Nullable(Int32)) ENGINE = Memory; +CREATE TABLE t2 (a UInt32, b Nullable(Int32)) ENGINE = Memory; +CREATE TABLE t3 (a UInt32, b Nullable(Int32)) ENGINE = Memory; +CREATE TABLE t4 (a UInt32, b Nullable(Int32)) ENGINE = Memory; + +SET enable_analyzer = 0; + +--- EXPLAIN SYNTAX (old AST based optimization) +SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM ( + EXPLAIN SYNTAX SELECT t1.a FROM t1, t2 WHERE t1.a = t2.a); + +SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM ( + EXPLAIN SYNTAX SELECT t1.a FROM t1, t2 WHERE t1.b = t2.b); + +SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM ( + EXPLAIN SYNTAX SELECT t1.a FROM t1, t2, t3 WHERE t1.a = t2.a AND t1.a = t3.a); + +SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM ( + EXPLAIN SYNTAX SELECT t1.a FROM t1, t2, t3 WHERE t1.b = t2.b AND t1.b = t3.b); + +SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM ( + EXPLAIN SYNTAX SELECT t1.a FROM t1, t2, t3, t4 WHERE t1.a = t2.a AND t1.a = t3.a AND t1.a = t4.a); + +SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM ( + EXPLAIN SYNTAX SELECT t1.a FROM t1, t2, t3, t4 WHERE t1.b = t2.b AND t1.b = t3.b AND t1.b = t4.b); + +SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM ( + EXPLAIN SYNTAX SELECT t1.a FROM t1, t2, t3, t4 WHERE t2.a = t1.a AND t2.a = t3.a AND t2.a = t4.a); + +SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM ( + EXPLAIN SYNTAX SELECT t1.a FROM t1, t2, t3, t4 WHERE t3.a = t1.a AND t3.a = t2.a AND t3.a = t4.a); + +SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM ( + EXPLAIN SYNTAX SELECT t1.a FROM t1, t2, t3, t4 WHERE t4.a = t1.a AND t4.a = t2.a AND t4.a = t3.a); + +SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM ( + EXPLAIN SYNTAX SELECT t1.a FROM t1, t2, t3, t4 WHERE t1.a = t2.a AND t2.a = t3.a AND t3.a = t4.a); + +SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM ( + EXPLAIN SYNTAX SELECT t1.a FROM t1, t2, t3, t4); + +SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM ( + EXPLAIN SYNTAX SELECT t1.a FROM t1 CROSS JOIN t2 CROSS JOIN t3 CROSS JOIN t4); + +SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM ( + EXPLAIN SYNTAX SELECT t1.a FROM t1, t2 CROSS JOIN t3); + +SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM ( + EXPLAIN SYNTAX SELECT t1.a FROM t1 JOIN t2 USING a CROSS JOIN t3); + +SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM ( + EXPLAIN SYNTAX SELECT t1.a FROM t1 JOIN t2 ON t1.a = t2.a CROSS JOIN t3); + +-- {echoOn} +--- EXPLAIN QUERY TREE +SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM ( + EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2 WHERE t1.a = t2.a) SETTINGS enable_analyzer = 1; + +SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM ( + EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2 WHERE t1.b = t2.b) SETTINGS enable_analyzer = 1; + +SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM ( + EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3 WHERE t1.a = t2.a AND t1.a = t3.a) SETTINGS enable_analyzer = 1; + +SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM ( + EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3 WHERE t1.b = t2.b AND t1.b = t3.b) SETTINGS enable_analyzer = 1; + +SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM ( + EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3, t4 WHERE t1.a = t2.a AND t1.a = t3.a AND t1.a = t4.a) SETTINGS enable_analyzer = 1; + +SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM ( + EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3, t4 WHERE t1.b = t2.b AND t1.b = t3.b AND t1.b = t4.b) SETTINGS enable_analyzer = 1; + +SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM ( + EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3, t4 WHERE t2.a = t1.a AND t2.a = t3.a AND t2.a = t4.a) SETTINGS enable_analyzer = 1; + +SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM ( + EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3, t4 WHERE t3.a = t1.a AND t3.a = t2.a AND t3.a = t4.a) SETTINGS enable_analyzer = 1; + +SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM ( + EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3, t4 WHERE t4.a = t1.a AND t4.a = t2.a AND t4.a = t3.a) SETTINGS enable_analyzer = 1; + +SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM ( + EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3, t4 WHERE t1.a = t2.a AND t2.a = t3.a AND t3.a = t4.a) SETTINGS enable_analyzer = 1; + +SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM ( + EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2, t3, t4) SETTINGS enable_analyzer = 1; + +SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM ( + EXPLAIN QUERY TREE SELECT t1.a FROM t1 CROSS JOIN t2 CROSS JOIN t3 CROSS JOIN t4) SETTINGS enable_analyzer = 1; + +SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM ( + EXPLAIN QUERY TREE SELECT t1.a FROM t1, t2 CROSS JOIN t3) SETTINGS enable_analyzer = 1; + +SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM ( + EXPLAIN QUERY TREE SELECT t1.a FROM t1 JOIN t2 USING a CROSS JOIN t3) SETTINGS enable_analyzer = 1; + +SELECT countIf(explain like '%COMMA%' OR explain like '%CROSS%'), countIf(explain like '%INNER%') FROM ( + EXPLAIN QUERY TREE SELECT t1.a FROM t1 JOIN t2 ON t1.a = t2.a CROSS JOIN t3) SETTINGS enable_analyzer = 1; + +-- {echoOff} + +INSERT INTO t1 values (1,1), (2,2), (3,3), (4,4); +INSERT INTO t2 values (1,1), (1, Null); +INSERT INTO t3 values (1,1), (1, Null); +INSERT INTO t4 values (1,1), (1, Null); + +SET enable_analyzer = 1; + +SELECT 'SELECT * FROM t1, t2'; +SELECT * FROM t1, t2 +ORDER BY t1.a, t2.b; +SELECT 'SELECT * FROM t1, t2 WHERE t1.a = t2.a'; +SELECT * FROM t1, t2 WHERE t1.a = t2.a +ORDER BY t1.a, t2.b; +SELECT 'SELECT t1.a, t2.a FROM t1, t2 WHERE t1.b = t2.b'; +SELECT t1.a, t2.b FROM t1, t2 WHERE t1.b = t2.b; +SELECT 'SELECT t1.a, t2.b, t3.b FROM t1, t2, t3 WHERE t1.a = t2.a AND t1.a = t3.a'; +SELECT t1.a, t2.b, t3.b FROM t1, t2, t3 +WHERE t1.a = t2.a AND t1.a = t3.a +ORDER BY t2.b, t3.b; +SELECT 'SELECT t1.a, t2.b, t3.b FROM t1, t2, t3 WHERE t1.b = t2.b AND t1.b = t3.b'; +SELECT t1.a, t2.b, t3.b FROM t1, t2, t3 WHERE t1.b = t2.b AND t1.b = t3.b; +SELECT 'SELECT t1.a, t2.b, t3.b, t4.b FROM t1, t2, t3, t4 WHERE t1.a = t2.a AND t1.a = t3.a AND t1.a = t4.a'; +SELECT t1.a, t2.b, t3.b, t4.b FROM t1, t2, t3, t4 +WHERE t1.a = t2.a AND t1.a = t3.a AND t1.a = t4.a +ORDER BY t2.b, t3.b, t4.b; +SELECT 'SELECT t1.a, t2.b, t3.b, t4.b FROM t1, t2, t3, t4 WHERE t1.b = t2.b AND t1.b = t3.b AND t1.b = t4.b'; +SELECT t1.a, t2.b, t3.b, t4.b FROM t1, t2, t3, t4 +WHERE t1.b = t2.b AND t1.b = t3.b AND t1.b = t4.b; +SELECT 'SELECT t1.a, t2.b, t3.b, t4.b FROM t1, t2, t3, t4 WHERE t1.a = t2.a AND t2.a = t3.a AND t3.a = t4.a'; +SELECT t1.a, t2.b, t3.b, t4.b FROM t1, t2, t3, t4 +WHERE t1.a = t2.a AND t2.a = t3.a AND t3.a = t4.a +ORDER BY t2.b, t3.b, t4.b; + +DROP TABLE t1; +DROP TABLE t2; +DROP TABLE t3; +DROP TABLE t4; diff --git a/parser/testdata/00850_global_join_dups/ast.json b/parser/testdata/00850_global_join_dups/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00850_global_join_dups/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00850_global_join_dups/metadata.json b/parser/testdata/00850_global_join_dups/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00850_global_join_dups/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00850_global_join_dups/query.sql b/parser/testdata/00850_global_join_dups/query.sql new file mode 100644 index 000000000..5a3ab0f1a --- /dev/null +++ b/parser/testdata/00850_global_join_dups/query.sql @@ -0,0 +1,86 @@ +-- Tags: global + +DROP TABLE IF EXISTS t_local; +DROP TABLE IF EXISTS t1_00850; +DROP TABLE IF EXISTS t2_00850; + +CREATE TABLE t_local (dummy UInt8) ENGINE = Memory; +CREATE TABLE t1_00850 (dummy UInt8) ENGINE = Distributed(test_shard_localhost, currentDatabase(), 't_local'); +CREATE TABLE t2_00850 (dummy UInt8) ENGINE = Distributed(test_shard_localhost, currentDatabase(), 't_local'); + +INSERT INTO t_local VALUES (1); + +SET joined_subquery_requires_alias = 0; + +SELECT * FROM t1_00850 +GLOBAL INNER JOIN +( + SELECT * + FROM ( SELECT * FROM t2_00850 ) + INNER JOIN ( SELECT * FROM t1_00850 ) + USING dummy +) USING dummy; + +-- query from fuzzer +SELECT toDateTime64(toString(toString('0000-00-00 00:00:000000-00-00 00:00:00', toDateTime64(toDateTime64('655.36', -2, NULL)))), NULL) FROM t1_00850 GLOBAL INNER JOIN (SELECT toDateTime64(toDateTime64('6553.6', '', NULL), NULL), * FROM (SELECT * FROM t2_00850) INNER JOIN (SELECT toDateTime64('6553.7', 1024, NULL), * FROM t1_00850) USING (dummy)) USING (dummy); + +SELECT toString('0000-00-00 00:00:000000-00-00 00:00:00', toDateTime64(toDateTime64('655.36', -2, NULL))); + +DROP TABLE t_local; +DROP TABLE t1_00850; +DROP TABLE t2_00850; + + +SELECT * FROM remote('127.0.0.2', system.one) +GLOBAL INNER JOIN +( + SELECT * + FROM ( SELECT dummy FROM remote('127.0.0.2', system.one) ) t1_00850 + GLOBAL INNER JOIN ( SELECT dummy FROM remote('127.0.0.3', system.one) ) t2_00850 + USING dummy +) USING dummy; + +SELECT * FROM remote('127.0.0.2', system.one) +GLOBAL INNER JOIN +( + SELECT *, dummy + FROM ( SELECT dummy FROM remote('127.0.0.2', system.one) ) t1_00850 + GLOBAL INNER JOIN ( SELECT dummy FROM remote('127.0.0.3', system.one) ) t2_00850 + USING dummy +) USING dummy; + +SELECT * FROM remote('127.0.0.2', system.one) +GLOBAL INNER JOIN +( + SELECT *, t1_00850.*, t2_00850.* + FROM ( SELECT toUInt8(0) AS dummy ) t1_00850 + INNER JOIN ( SELECT toUInt8(0) AS dummy ) t2_00850 + USING dummy +) USING dummy; + +SELECT * FROM remote('127.0.0.2', system.one) +GLOBAL INNER JOIN +( + SELECT *, dummy + FROM ( SELECT toUInt8(0) AS dummy ) t1_00850 + INNER JOIN ( SELECT toUInt8(0) AS dummy ) t2_00850 + USING dummy +) USING dummy; + +SELECT * FROM remote('127.0.0.2', system.one) +GLOBAL INNER JOIN +( + SELECT *, dummy as other + FROM ( SELECT dummy FROM remote('127.0.0.3', system.one) ) t1_00850 + GLOBAL INNER JOIN ( SELECT toUInt8(0) AS dummy ) t2_00850 + USING dummy +) USING dummy; + +SELECT * FROM remote('127.0.0.2', system.one) +GLOBAL INNER JOIN +( + SELECT *, dummy, dummy as other + FROM ( SELECT toUInt8(0) AS dummy ) t1_00850 + GLOBAL INNER JOIN ( SELECT dummy FROM remote('127.0.0.3', system.one) ) t2_00850 + USING dummy +) USING dummy; diff --git a/parser/testdata/00852_any_join_nulls/ast.json b/parser/testdata/00852_any_join_nulls/ast.json new file mode 100644 index 000000000..fed94eee7 --- /dev/null +++ b/parser/testdata/00852_any_join_nulls/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery table1 (children 1)" + }, + { + "explain": " Identifier table1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00137349, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/00852_any_join_nulls/metadata.json b/parser/testdata/00852_any_join_nulls/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00852_any_join_nulls/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00852_any_join_nulls/query.sql b/parser/testdata/00852_any_join_nulls/query.sql new file mode 100644 index 000000000..5f23e1481 --- /dev/null +++ b/parser/testdata/00852_any_join_nulls/query.sql @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS table1; +DROP TABLE IF EXISTS table2; +CREATE TABLE table1 ( id String ) ENGINE = Log; +CREATE TABLE table2 ( parent_id String ) ENGINE = Log; + +insert into table1 values ('1'); + +SELECT table2.parent_id = '', isNull(table2.parent_id) +FROM table1 ANY LEFT JOIN table2 ON table1.id = table2.parent_id; + +SET join_use_nulls = 1; + +SELECT table2.parent_id = '', isNull(table2.parent_id) +FROM table1 ANY LEFT JOIN table2 ON table1.id = table2.parent_id; + +DROP TABLE table1; +DROP TABLE table2; diff --git a/parser/testdata/00853_join_with_nulls_crash/ast.json b/parser/testdata/00853_join_with_nulls_crash/ast.json new file mode 100644 index 000000000..91195eb6e --- /dev/null +++ b/parser/testdata/00853_join_with_nulls_crash/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery table_a (children 1)" + }, + { + "explain": " Identifier table_a" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001090699, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/00853_join_with_nulls_crash/metadata.json b/parser/testdata/00853_join_with_nulls_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00853_join_with_nulls_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00853_join_with_nulls_crash/query.sql b/parser/testdata/00853_join_with_nulls_crash/query.sql new file mode 100644 index 000000000..b620b8a79 --- /dev/null +++ b/parser/testdata/00853_join_with_nulls_crash/query.sql @@ -0,0 +1,57 @@ +DROP TABLE IF EXISTS table_a; +DROP TABLE IF EXISTS table_b; + +CREATE TABLE table_a ( + event_id UInt64, + something String, + other Nullable(String) +) ENGINE = MergeTree ORDER BY (event_id); + +CREATE TABLE table_b ( + event_id UInt64, + something Nullable(String), + other String +) ENGINE = MergeTree ORDER BY (event_id); + +INSERT INTO table_a VALUES (1, 'foo', 'foo'), (2, 'foo', 'foo'), (3, 'bar', 'bar'); +INSERT INTO table_b VALUES (1, 'bar', 'bar'), (2, 'bar', 'bar'), (3, 'test', 'test'), (4, NULL, ''); + +SELECT s1.other, s2.other, count_a, count_b, toTypeName(s1.other), toTypeName(s2.other) FROM + ( SELECT other, count() AS count_a FROM table_a GROUP BY other ) s1 +ALL FULL JOIN + ( SELECT other, count() AS count_b FROM table_b GROUP BY other ) s2 +ON s1.other = s2.other +ORDER BY s2.other DESC, count_a, s1.other; + +SELECT s1.other, s2.other, count_a, count_b, toTypeName(s1.other), toTypeName(s2.other) FROM + ( SELECT other, count() AS count_a FROM table_a GROUP BY other ) s1 +ALL FULL JOIN + ( SELECT other, count() AS count_b FROM table_b GROUP BY other ) s2 +ON s1.other = s2.other +ORDER BY s2.other DESC, count_a, s1.other; + +SELECT s1.something, s2.something, count_a, count_b, toTypeName(s1.something), toTypeName(s2.something) FROM + ( SELECT something, count() AS count_a FROM table_a GROUP BY something ) s1 +ALL FULL JOIN + ( SELECT something, count() AS count_b FROM table_b GROUP BY something ) s2 +ON s1.something = s2.something +ORDER BY count_a DESC, something, s2.something; + +SELECT s1.something, s2.something, count_a, count_b, toTypeName(s1.something), toTypeName(s2.something) FROM + ( SELECT something, count() AS count_a FROM table_a GROUP BY something ) s1 +ALL RIGHT JOIN + ( SELECT something, count() AS count_b FROM table_b GROUP BY something ) s2 +ON s1.something = s2.something +ORDER BY count_a DESC, s1.something, s2.something; + +SET joined_subquery_requires_alias = 0; + +SELECT something, count_a, count_b, toTypeName(something) FROM + ( SELECT something, count() AS count_a FROM table_a GROUP BY something ) as s1 +ALL FULL JOIN + ( SELECT something, count() AS count_b FROM table_b GROUP BY something ) as s2 +ON s1.something = s2.something +ORDER BY count_a DESC, something DESC; + +DROP TABLE table_a; +DROP TABLE table_b; diff --git a/parser/testdata/00854_multiple_join_asterisks/ast.json b/parser/testdata/00854_multiple_join_asterisks/ast.json new file mode 100644 index 000000000..766506c6a --- /dev/null +++ b/parser/testdata/00854_multiple_join_asterisks/ast.json @@ -0,0 +1,103 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Identifier t1.dummy" + }, + { + "explain": " Identifier t2.dummy" + }, + { + "explain": " Identifier t3.dummy" + }, + { + "explain": " TablesInSelectQuery (children 3)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.one (alias t1)" + }, + { + "explain": " TablesInSelectQueryElement (children 2)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.one (alias t2)" + }, + { + "explain": " TableJoin (children 1)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier t1.dummy" + }, + { + "explain": " Identifier t2.dummy" + }, + { + "explain": " TablesInSelectQueryElement (children 2)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.one (alias t3)" + }, + { + "explain": " TableJoin (children 1)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier t1.dummy" + }, + { + "explain": " Identifier t3.dummy" + } + ], + + "rows": 27, + + "statistics": + { + "elapsed": 0.001088407, + "rows_read": 27, + "bytes_read": 1092 + } +} diff --git a/parser/testdata/00854_multiple_join_asterisks/metadata.json b/parser/testdata/00854_multiple_join_asterisks/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00854_multiple_join_asterisks/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00854_multiple_join_asterisks/query.sql b/parser/testdata/00854_multiple_join_asterisks/query.sql new file mode 100644 index 000000000..6794f178f --- /dev/null +++ b/parser/testdata/00854_multiple_join_asterisks/query.sql @@ -0,0 +1,10 @@ +select t1.dummy, t2.dummy, t3.dummy from system.one t1 join system.one t2 on t1.dummy = t2.dummy join system.one t3 ON t1.dummy = t3.dummy; +select * from system.one t1 join system.one t2 on t1.dummy = t2.dummy join system.one t3 ON t1.dummy = t3.dummy; +select t1.* from system.one t1 join system.one t2 on t1.dummy = t2.dummy join system.one t3 ON t1.dummy = t3.dummy; +select t2.*, t3.* from system.one t1 join system.one t2 on t1.dummy = t2.dummy join system.one t3 ON t1.dummy = t3.dummy; +select t1.dummy, t2.*, t3.dummy from system.one t1 join system.one t2 on t1.dummy = t2.dummy join system.one t3 ON t1.dummy = t3.dummy; +select t1.dummy, t2.*, t3.dummy from system.one t1 join (select dummy,0 as another_one from system.one) t2 on t1.dummy = t2.dummy OR t1.dummy = t2.another_one join system.one t3 ON t1.dummy = t3.dummy; + +select t1.dummy, t2.*, t3.dummy from (select * from system.one) t1 +join system.one t2 on t1.dummy = t2.dummy +join system.one t3 ON t1.dummy = t3.dummy; diff --git a/parser/testdata/00855_join_with_array_join/ast.json b/parser/testdata/00855_join_with_array_join/ast.json new file mode 100644 index 000000000..4841965af --- /dev/null +++ b/parser/testdata/00855_join_with_array_join/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001061232, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00855_join_with_array_join/metadata.json b/parser/testdata/00855_join_with_array_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00855_join_with_array_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00855_join_with_array_join/query.sql b/parser/testdata/00855_join_with_array_join/query.sql new file mode 100644 index 000000000..2d5bc6cb1 --- /dev/null +++ b/parser/testdata/00855_join_with_array_join/query.sql @@ -0,0 +1,51 @@ +SET joined_subquery_requires_alias = 0; +SET enable_analyzer = 1; + +SELECT ax, c FROM (SELECT [1,2] ax, 0 c) ARRAY JOIN ax JOIN (SELECT 0 c) USING (c); +SELECT ax, c FROM (SELECT [3,4] ax, 0 c) JOIN (SELECT 0 c) USING (c) ARRAY JOIN ax; +SELECT ax, c FROM (SELECT [5,6] ax, 0 c) s1 JOIN system.one s2 ON s1.c = s2.dummy ARRAY JOIN ax; +SELECT ax, c, d FROM (SELECT [7,8] ax, 1 c, 0 d) s1 JOIN system.one s2 ON s1.c = s2.dummy OR s1.d = s2.dummy ARRAY JOIN ax; + + +SELECT ax, c FROM (SELECT [101,102] ax, 0 c) s1 +JOIN system.one s2 ON s1.c = s2.dummy +JOIN system.one s3 ON s1.c = s3.dummy +ARRAY JOIN ax; + +SELECT '-'; + +SET joined_subquery_requires_alias = 1; + +DROP TABLE IF EXISTS f; +DROP TABLE IF EXISTS d; + +CREATE TABLE f (`d_ids` Array(Int64) ) ENGINE = TinyLog; +INSERT INTO f VALUES ([1, 2]); + +CREATE TABLE d (`id` Int64, `name` String ) ENGINE = TinyLog; + +INSERT INTO d VALUES (2, 'a2'), (3, 'a3'); + +SELECT d_ids, id, name FROM f LEFT ARRAY JOIN d_ids LEFT JOIN d ON d.id = d_ids ORDER BY id; +SELECT did, id, name FROM f LEFT ARRAY JOIN d_ids as did LEFT JOIN d ON d.id = did ORDER BY id; + +SELECT id, name FROM f LEFT ARRAY JOIN d_ids as id LEFT JOIN d ON d.id = id ORDER BY id; + +SELECT * FROM ( SELECT [dummy, dummy] AS dummy FROM system.one ) AS x ARRAY JOIN dummy +JOIN system.one AS y ON x.dummy == y.dummy; + +SELECT * FROM ( SELECT [dummy, dummy] AS dummy FROM system.one ) AS x ARRAY JOIN dummy +JOIN system.one AS y ON x.dummy + 1 == y.dummy + 1; + +SELECT * FROM ( SELECT [dummy, dummy] AS dummy FROM system.one ) AS x ARRAY JOIN dummy +JOIN system.one AS y USING dummy; + +SELECT * FROM ( SELECT [toUInt32(dummy), toUInt32(dummy)] AS dummy FROM system.one ) AS x ARRAY JOIN dummy +JOIN (select toInt32(dummy) as dummy from system.one ) AS y USING dummy; + +SELECT dummy > 0, toTypeName(any(dummy)), any(toTypeName(dummy)) +FROM ( SELECT [toUInt32(dummy), toUInt32(dummy)] AS dummy FROM system.one ) AS x ARRAY JOIN dummy +JOIN ( SELECT toInt32(dummy) AS dummy FROM system.one ) AS y USING dummy GROUP BY (dummy > 0); + +DROP TABLE IF EXISTS f; +DROP TABLE IF EXISTS d; diff --git a/parser/testdata/00856_no_column_issue_4242/ast.json b/parser/testdata/00856_no_column_issue_4242/ast.json new file mode 100644 index 000000000..d2963afb5 --- /dev/null +++ b/parser/testdata/00856_no_column_issue_4242/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1_00856 (children 1)" + }, + { + "explain": " Identifier t1_00856" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000963391, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/00856_no_column_issue_4242/metadata.json b/parser/testdata/00856_no_column_issue_4242/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00856_no_column_issue_4242/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00856_no_column_issue_4242/query.sql b/parser/testdata/00856_no_column_issue_4242/query.sql new file mode 100644 index 000000000..ed2315199 --- /dev/null +++ b/parser/testdata/00856_no_column_issue_4242/query.sql @@ -0,0 +1,11 @@ +DROP TABLE IF EXISTS t1_00856; +DROP TABLE IF EXISTS t2_00856; +CREATE TABLE t1_00856 (n Int32) ENGINE = Memory; +CREATE TABLE t2_00856 (a Int32, n Int32) ENGINE = Memory; + +SELECT count() FROM t1_00856 WHERE if(1, 1, n = 0); +SELECT count(n) FROM t2_00856 WHERE if(1, 1, n = 0); +SELECT count() FROM t2_00856 WHERE if(1, 1, n = 0); + +DROP TABLE t1_00856; +DROP TABLE t2_00856; diff --git a/parser/testdata/00857_global_joinsavel_table_alias/ast.json b/parser/testdata/00857_global_joinsavel_table_alias/ast.json new file mode 100644 index 000000000..88e7fd540 --- /dev/null +++ b/parser/testdata/00857_global_joinsavel_table_alias/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery local_table (children 1)" + }, + { + "explain": " Identifier local_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001061547, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/00857_global_joinsavel_table_alias/metadata.json b/parser/testdata/00857_global_joinsavel_table_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00857_global_joinsavel_table_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00857_global_joinsavel_table_alias/query.sql b/parser/testdata/00857_global_joinsavel_table_alias/query.sql new file mode 100644 index 000000000..092b071cb --- /dev/null +++ b/parser/testdata/00857_global_joinsavel_table_alias/query.sql @@ -0,0 +1,56 @@ +DROP TABLE IF EXISTS local_table; +DROP TABLE IF EXISTS other_table; + +CREATE TABLE local_table +( + id Int32, + name String, + ts DateTime, + oth_id Int32 +) ENGINE = MergeTree() PARTITION BY toMonday(ts) ORDER BY (ts, id); + +CREATE TABLE other_table +( + id Int32, + name String, + ts DateTime, + trd_id Int32 +) ENGINE = MergeTree() PARTITION BY toMonday(ts) ORDER BY (ts, id); + +INSERT INTO local_table VALUES(1, 'One', now(), 100); +INSERT INTO local_table VALUES(2, 'Two', now(), 200); +INSERT INTO other_table VALUES(100, 'One Hundred', now(), 1000); +INSERT INTO other_table VALUES(200, 'Two Hundred', now(), 2000); + +select t2.name from remote('127.0.0.2', currentDatabase(), 'local_table') as t1 +left join {CLICKHOUSE_DATABASE:Identifier}.other_table as t2 -- FIXME: doesn't work properly on remote without explicit database prefix +on t1.oth_id = t2.id +order by t2.name; + +select t2.name from other_table as t2 +global right join remote('127.0.0.2', currentDatabase(), 'local_table') as t1 +on t1.oth_id = t2.id +order by t2.name; + +select t2.name from remote('127.0.0.2', currentDatabase(), 'local_table') as t1 +global left join other_table as t2 +on t1.oth_id = t2.id +order by t2.name; + +select t2.name from remote('127.0.0.2', currentDatabase(), 'local_table') as t1 +global left join other_table as t2 +on t1.oth_id = t2.id +order by t2.name; + +select other_table.name from remote('127.0.0.2', currentDatabase(), 'local_table') as t1 +global left join other_table +on t1.oth_id = other_table.id +order by other_table.name; + +select other_table.name from remote('127.0.0.2', currentDatabase(), 'local_table') as t1 +global left join other_table as t2 +on t1.oth_id = other_table.id +order by other_table.name; + +DROP TABLE local_table; +DROP TABLE other_table; diff --git a/parser/testdata/00858_issue_4756/ast.json b/parser/testdata/00858_issue_4756/ast.json new file mode 100644 index 000000000..75c9cbe4f --- /dev/null +++ b/parser/testdata/00858_issue_4756/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001407137, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00858_issue_4756/metadata.json b/parser/testdata/00858_issue_4756/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00858_issue_4756/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00858_issue_4756/query.sql b/parser/testdata/00858_issue_4756/query.sql new file mode 100644 index 000000000..3f6ab037c --- /dev/null +++ b/parser/testdata/00858_issue_4756/query.sql @@ -0,0 +1,48 @@ +set enable_analyzer = 1; +set distributed_product_mode = 'local'; + +drop table if exists shard1; +drop table if exists shard2; +drop table if exists distr1; +drop table if exists distr2; + +create table shard1 (id Int32) engine = MergeTree order by cityHash64(id); +create table shard2 (id Int32) engine = MergeTree order by cityHash64(id); + +create table distr1 as shard1 engine Distributed (test_cluster_two_shards_localhost, currentDatabase(), shard1, cityHash64(id)); +create table distr2 as shard2 engine Distributed (test_cluster_two_shards_localhost, currentDatabase(), shard2, cityHash64(id)); + +insert into shard1 (id) values (0), (1); +insert into shard2 (id) values (1), (2); + +select distinct(distr1.id) from distr1 +where distr1.id in +( + select distr1.id + from distr1 + join distr2 on distr1.id = distr2.id + where distr1.id > 0 +); + +select distinct(d0.id) from distr1 d0 +where d0.id in +( + select d1.id + from distr1 as d1 + join distr2 as d2 on d1.id = d2.id + where d1.id > 0 +); + +select distinct(distr1.id) from distr1 +where distr1.id in +( + select distr1.id + from distr1 as d1 + join distr2 as d2 on distr1.id = distr2.id + where distr1.id > 0 +); + +drop table shard1; +drop table shard2; +drop table distr1; +drop table distr2; diff --git a/parser/testdata/00859_distinct_with_join/ast.json b/parser/testdata/00859_distinct_with_join/ast.json new file mode 100644 index 000000000..804348ddd --- /dev/null +++ b/parser/testdata/00859_distinct_with_join/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery fooL (children 1)" + }, + { + "explain": " Identifier fooL" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00113707, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/00859_distinct_with_join/metadata.json b/parser/testdata/00859_distinct_with_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00859_distinct_with_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00859_distinct_with_join/query.sql b/parser/testdata/00859_distinct_with_join/query.sql new file mode 100644 index 000000000..4fb6f4ec0 --- /dev/null +++ b/parser/testdata/00859_distinct_with_join/query.sql @@ -0,0 +1,13 @@ +drop table if exists fooL; +drop table if exists fooR; +create table fooL (a Int32, v String) engine = Memory; +create table fooR (a Int32, v String) engine = Memory; + +insert into fooL select number, 'L' || toString(number) from numbers(2); +insert into fooL select number, 'LL' || toString(number) from numbers(2); +insert into fooR select number, 'R' || toString(number) from numbers(2); + +select distinct a from fooL semi left join fooR using(a) order by a; + +drop table fooL; +drop table fooR; diff --git a/parser/testdata/00860_unknown_identifier_bug/ast.json b/parser/testdata/00860_unknown_identifier_bug/ast.json new file mode 100644 index 000000000..3d3128986 --- /dev/null +++ b/parser/testdata/00860_unknown_identifier_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery appointment_events (children 1)" + }, + { + "explain": " Identifier appointment_events" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001240836, + "rows_read": 2, + "bytes_read": 88 + } +} diff --git a/parser/testdata/00860_unknown_identifier_bug/metadata.json b/parser/testdata/00860_unknown_identifier_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00860_unknown_identifier_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00860_unknown_identifier_bug/query.sql b/parser/testdata/00860_unknown_identifier_bug/query.sql new file mode 100644 index 000000000..9bc056036 --- /dev/null +++ b/parser/testdata/00860_unknown_identifier_bug/query.sql @@ -0,0 +1,40 @@ +DROP TABLE IF EXISTS appointment_events; +CREATE TABLE appointment_events +( + _appointment_id UInt32, + _id String, + _status String, + _set_by_id String, + _company_id String, + _client_id String, + _type String, + _at String, + _vacancy_id String, + _set_at UInt32, + _job_requisition_id String +) ENGINE = Memory; + +INSERT INTO appointment_events (_appointment_id, _set_at, _status) values (1, 1, 'Created'), (2, 2, 'Created'); + +SELECT A._appointment_id, + A._id, + A._status, + A._set_by_id, + A._company_id, + A._client_id, + A._type, + A._at, + A._vacancy_id, + A._set_at, + A._job_requisition_id +FROM appointment_events A ANY +LEFT JOIN + (SELECT _appointment_id, + MAX(_set_at) AS max_set_at + FROM appointment_events + WHERE _status in ('Created', 'Transferred') + GROUP BY _appointment_id ) B USING _appointment_id +WHERE A._set_at = B.max_set_at +ORDER BY ALL; + +DROP TABLE appointment_events; diff --git a/parser/testdata/00861_decimal_quoted_csv/ast.json b/parser/testdata/00861_decimal_quoted_csv/ast.json new file mode 100644 index 000000000..089ef0fde --- /dev/null +++ b/parser/testdata/00861_decimal_quoted_csv/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_00861 (children 1)" + }, + { + "explain": " Identifier test_00861" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001207856, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/00861_decimal_quoted_csv/metadata.json b/parser/testdata/00861_decimal_quoted_csv/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00861_decimal_quoted_csv/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00861_decimal_quoted_csv/query.sql b/parser/testdata/00861_decimal_quoted_csv/query.sql new file mode 100644 index 000000000..d7211c55d --- /dev/null +++ b/parser/testdata/00861_decimal_quoted_csv/query.sql @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS test_00861; +CREATE TABLE test_00861 (key UInt64, d32 Decimal32(2), d64 Decimal64(2), d128 Decimal128(2)) ENGINE = Memory; + +INSERT INTO test_00861 FORMAT CSV "1","1","1","1" + +INSERT INTO test_00861 FORMAT CSV "2","-1","-1","-1" + +INSERT INTO test_00861 FORMAT CSV "3","1.0","1.0","1.0" + +INSERT INTO test_00861 FORMAT CSV "4","-0.1","-0.1","-0.1" + +INSERT INTO test_00861 FORMAT CSV "5","0.010","0.010","0.010" + +SELECT * FROM test_00861 ORDER BY key; + +DROP TABLE test_00861; diff --git a/parser/testdata/00862_decimal_in/ast.json b/parser/testdata/00862_decimal_in/ast.json new file mode 100644 index 000000000..5020d5ec8 --- /dev/null +++ b/parser/testdata/00862_decimal_in/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery temp (children 1)" + }, + { + "explain": " Identifier temp" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00106926, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/00862_decimal_in/metadata.json b/parser/testdata/00862_decimal_in/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00862_decimal_in/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00862_decimal_in/query.sql b/parser/testdata/00862_decimal_in/query.sql new file mode 100644 index 000000000..3aa7bb18c --- /dev/null +++ b/parser/testdata/00862_decimal_in/query.sql @@ -0,0 +1,38 @@ +DROP TABLE IF EXISTS temp; +CREATE TABLE temp +( + x Decimal(38, 2), + y Nullable(Decimal(38, 2)) +) ENGINE = Memory; + +INSERT INTO temp VALUES (32, 32), (64, 64), (128, 128), (256, 256); + +SELECT * FROM temp WHERE x IN (toDecimal128(128, 1)); +SELECT * FROM temp WHERE x IN (toDecimal128(128, 2)); +SELECT * FROM temp WHERE x IN (toDecimal128(128, 3)); +SELECT * FROM temp WHERE y IN (toDecimal128(128, 1)); +SELECT * FROM temp WHERE y IN (toDecimal128(128, 2)); +SELECT * FROM temp WHERE y IN (toDecimal128(128, 3)); + +SELECT * FROM temp WHERE x IN (toDecimal32(32, 1)); +SELECT * FROM temp WHERE x IN (toDecimal32(32, 2)); +SELECT * FROM temp WHERE x IN (toDecimal32(32, 3)); +SELECT * FROM temp WHERE y IN (toDecimal32(32, 1)); +SELECT * FROM temp WHERE y IN (toDecimal32(32, 2)); +SELECT * FROM temp WHERE y IN (toDecimal32(32, 3)); + +SELECT * FROM temp WHERE x IN (toDecimal64(64, 1)); +SELECT * FROM temp WHERE x IN (toDecimal64(64, 2)); +SELECT * FROM temp WHERE x IN (toDecimal64(64, 3)); +SELECT * FROM temp WHERE y IN (toDecimal64(64, 1)); +SELECT * FROM temp WHERE y IN (toDecimal64(64, 2)); +SELECT * FROM temp WHERE y IN (toDecimal64(64, 3)); + +SELECT * FROM temp WHERE x IN (toDecimal256(256, 1)); +SELECT * FROM temp WHERE x IN (toDecimal256(256, 2)); +SELECT * FROM temp WHERE x IN (toDecimal256(256, 3)); +SELECT * FROM temp WHERE y IN (toDecimal256(256, 1)); +SELECT * FROM temp WHERE y IN (toDecimal256(256, 2)); +SELECT * FROM temp WHERE y IN (toDecimal256(256, 3)); + +DROP TABLE IF EXISTS temp; diff --git a/parser/testdata/00863_comma_join_in/ast.json b/parser/testdata/00863_comma_join_in/ast.json new file mode 100644 index 000000000..ff1bbbe93 --- /dev/null +++ b/parser/testdata/00863_comma_join_in/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test1_00863 (children 1)" + }, + { + "explain": " Identifier test1_00863" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001097952, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/00863_comma_join_in/metadata.json b/parser/testdata/00863_comma_join_in/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00863_comma_join_in/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00863_comma_join_in/query.sql b/parser/testdata/00863_comma_join_in/query.sql new file mode 100644 index 000000000..b81d9485a --- /dev/null +++ b/parser/testdata/00863_comma_join_in/query.sql @@ -0,0 +1,26 @@ +drop table if exists test1_00863; +drop table if exists test2_00863; +drop table if exists test3_00863; + +create table test1_00863 (id UInt64, code String) engine = Memory; +create table test3_00863 (id UInt64, code String) engine = Memory; +create table test2_00863 (id UInt64, code String, test1_id UInt64, test3_id UInt64) engine = Memory; + +insert into test1_00863 (id, code) select number, toString(number) FROM numbers(100000); +insert into test3_00863 (id, code) select number, toString(number) FROM numbers(100000); +insert into test2_00863 (id, code, test1_id, test3_id) select number, toString(number), number, number FROM numbers(100000); + +-- `parallel_hash` uses two-level hash tables (that have 256 tables internally), each preallocates 256 elements by default, +-- so we're getting max_threads * 256 * 256 * number_of_joins +SET max_memory_usage = 300000000; + +select test2_00863.id +from test1_00863, test2_00863, test3_00863 +where test1_00863.code in ('1', '2', '3') + and test2_00863.test1_id = test1_00863.id + and test2_00863.test3_id = test3_00863.id +order by all; + +drop table test1_00863; +drop table test2_00863; +drop table test3_00863; diff --git a/parser/testdata/00864_union_all_supertype/ast.json b/parser/testdata/00864_union_all_supertype/ast.json new file mode 100644 index 000000000..c267d01a2 --- /dev/null +++ b/parser/testdata/00864_union_all_supertype/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000925325, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00864_union_all_supertype/metadata.json b/parser/testdata/00864_union_all_supertype/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00864_union_all_supertype/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00864_union_all_supertype/query.sql b/parser/testdata/00864_union_all_supertype/query.sql new file mode 100644 index 000000000..de7835e29 --- /dev/null +++ b/parser/testdata/00864_union_all_supertype/query.sql @@ -0,0 +1,33 @@ +SET joined_subquery_requires_alias = 0; + +select toTypeName(key), toTypeName(value) from ( + select 1 as key, '' as value + union all + select toUInt64(2) as key, toNullable('') as value +); + +select toTypeName(key), toTypeName(value) from ( + select toDecimal64(2, 8) as key, toNullable('') as value + union all + select toDecimal32(2, 4) as key, toFixedString('', 1) as value +); + +select * from ( + select 'v1' as c1, null as c2 + union all + select 'v2' as c1, '' as c2 +) ALL FULL JOIN ( + select 'v1' as c1, 'w1' as c2 +) using c1,c2 +order by c1, c2; + +select key, s1.value, s2.value +from ( + select 'key1' as key, 'value1' as value +) s1 +all left join ( + select 'key1' as key, '' as value + union all + select 'key2' as key, toNullable('') as value +) s2 +using key; diff --git a/parser/testdata/00870_t64_codec/ast.json b/parser/testdata/00870_t64_codec/ast.json new file mode 100644 index 000000000..bb0f4f727 --- /dev/null +++ b/parser/testdata/00870_t64_codec/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t64 (children 1)" + }, + { + "explain": " Identifier t64" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001322454, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/00870_t64_codec/metadata.json b/parser/testdata/00870_t64_codec/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00870_t64_codec/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00870_t64_codec/query.sql b/parser/testdata/00870_t64_codec/query.sql new file mode 100644 index 000000000..a8a86b6fd --- /dev/null +++ b/parser/testdata/00870_t64_codec/query.sql @@ -0,0 +1,101 @@ +DROP TABLE IF EXISTS t64; + +CREATE TABLE t64 +( + u8 UInt8, + t_u8 UInt8 Codec(T64, ZSTD), + u16 UInt16, + t_u16 UInt16 Codec(T64, ZSTD), + u32 UInt32, + t_u32 UInt32 Codec(T64, ZSTD), + u64 UInt64, + t_u64 UInt64 Codec(T64, ZSTD) +) ENGINE MergeTree() ORDER BY tuple(); + +INSERT INTO t64 SELECT number AS x, x, x, x, x, x, x, x FROM numbers(1); +INSERT INTO t64 SELECT number AS x, x, x, x, x, x, x, x FROM numbers(2); +INSERT INTO t64 SELECT 42 AS x, x, x, x, x, x, x, x FROM numbers(4); + +SELECT * FROM t64 ORDER BY u64; + +INSERT INTO t64 SELECT number AS x, x, x, x, x, x, x, x FROM numbers(intExp2(8)); +INSERT INTO t64 SELECT number AS x, x, x, x, x, x, x, x FROM numbers(intExp2(9)); +SELECT * FROM t64 WHERE u8 != t_u8; +SELECT * FROM t64 WHERE u16 != t_u16; +SELECT * FROM t64 WHERE u32 != t_u32; +SELECT * FROM t64 WHERE u64 != t_u64; + +INSERT INTO t64 SELECT (intExp2(16) - 10 + number) AS x, x, x, x, x, x, x, x FROM numbers(10); +INSERT INTO t64 SELECT (intExp2(16) - 10 + number) AS x, x, x, x, x, x, x, x FROM numbers(11); +INSERT INTO t64 SELECT (intExp2(16) - 64 + number) AS x, x, x, x, x, x, x, x FROM numbers(64); +INSERT INTO t64 SELECT (intExp2(16) - 64 + number) AS x, x, x, x, x, x, x, x FROM numbers(65); +INSERT INTO t64 SELECT (intExp2(16) - 1 + number) AS x, x, x, x, x, x, x, x FROM numbers(65); +SELECT * FROM t64 WHERE u8 != t_u8; +SELECT * FROM t64 WHERE u16 != t_u16; +SELECT * FROM t64 WHERE u32 != t_u32; +SELECT * FROM t64 WHERE u64 != t_u64; + +INSERT INTO t64 SELECT (intExp2(24) - 10 + number) AS x, x, x, x, x, x, x, x FROM numbers(10); +INSERT INTO t64 SELECT (intExp2(24) - 10 + number) AS x, x, x, x, x, x, x, x FROM numbers(11); +INSERT INTO t64 SELECT (intExp2(24) - 64 + number) AS x, x, x, x, x, x, x, x FROM numbers(128); +INSERT INTO t64 SELECT (intExp2(24) - 64 + number) AS x, x, x, x, x, x, x, x FROM numbers(129); +INSERT INTO t64 SELECT (intExp2(24) - 1 + number) AS x, x, x, x, x, x, x, x FROM numbers(129); +SELECT * FROM t64 WHERE u8 != t_u8; +SELECT * FROM t64 WHERE u16 != t_u16; +SELECT * FROM t64 WHERE u32 != t_u32; +SELECT * FROM t64 WHERE u64 != t_u64; + +INSERT INTO t64 SELECT (intExp2(32) - 10 + number) AS x, x, x, x, x, x, x, x FROM numbers(10); +INSERT INTO t64 SELECT (intExp2(32) - 10 + number) AS x, x, x, x, x, x, x, x FROM numbers(20); +INSERT INTO t64 SELECT (intExp2(32) - 64 + number) AS x, x, x, x, x, x, x, x FROM numbers(256); +INSERT INTO t64 SELECT (intExp2(32) - 64 + number) AS x, x, x, x, x, x, x, x FROM numbers(257); +INSERT INTO t64 SELECT (intExp2(32) - 1 + number) AS x, x, x, x, x, x, x, x FROM numbers(257); +SELECT * FROM t64 WHERE u8 != t_u8; +SELECT * FROM t64 WHERE u16 != t_u16; +SELECT * FROM t64 WHERE u32 != t_u32; +SELECT * FROM t64 WHERE u64 != t_u64; + +INSERT INTO t64 SELECT (intExp2(40) - 10 + number) AS x, x, x, x, x, x, x, x FROM numbers(10); +INSERT INTO t64 SELECT (intExp2(40) - 10 + number) AS x, x, x, x, x, x, x, x FROM numbers(20); +INSERT INTO t64 SELECT (intExp2(40) - 64 + number) AS x, x, x, x, x, x, x, x FROM numbers(512); +INSERT INTO t64 SELECT (intExp2(40) - 64 + number) AS x, x, x, x, x, x, x, x FROM numbers(513); +INSERT INTO t64 SELECT (intExp2(40) - 1 + number) AS x, x, x, x, x, x, x, x FROM numbers(513); +SELECT * FROM t64 WHERE u8 != t_u8; +SELECT * FROM t64 WHERE u16 != t_u16; +SELECT * FROM t64 WHERE u32 != t_u32; +SELECT * FROM t64 WHERE u64 != t_u64; + +INSERT INTO t64 SELECT (intExp2(48) - 10 + number) AS x, x, x, x, x, x, x, x FROM numbers(10); +INSERT INTO t64 SELECT (intExp2(48) - 10 + number) AS x, x, x, x, x, x, x, x FROM numbers(20); +INSERT INTO t64 SELECT (intExp2(48) - 64 + number) AS x, x, x, x, x, x, x, x FROM numbers(1024); +INSERT INTO t64 SELECT (intExp2(48) - 64 + number) AS x, x, x, x, x, x, x, x FROM numbers(1025); +INSERT INTO t64 SELECT (intExp2(48) - 1 + number) AS x, x, x, x, x, x, x, x FROM numbers(1025); +SELECT * FROM t64 WHERE u8 != t_u8; +SELECT * FROM t64 WHERE u16 != t_u16; +SELECT * FROM t64 WHERE u32 != t_u32; +SELECT * FROM t64 WHERE u64 != t_u64; + +INSERT INTO t64 SELECT (intExp2(56) - 10 + number) AS x, x, x, x, x, x, x, x FROM numbers(10); +INSERT INTO t64 SELECT (intExp2(56) - 10 + number) AS x, x, x, x, x, x, x, x FROM numbers(20); +INSERT INTO t64 SELECT (intExp2(56) - 64 + number) AS x, x, x, x, x, x, x, x FROM numbers(2048); +INSERT INTO t64 SELECT (intExp2(56) - 64 + number) AS x, x, x, x, x, x, x, x FROM numbers(2049); +INSERT INTO t64 SELECT (intExp2(56) - 1 + number) AS x, x, x, x, x, x, x, x FROM numbers(2049); +SELECT * FROM t64 WHERE u8 != t_u8; +SELECT * FROM t64 WHERE u16 != t_u16; +SELECT * FROM t64 WHERE u32 != t_u32; +SELECT * FROM t64 WHERE u64 != t_u64; + +INSERT INTO t64 SELECT (intExp2(63) + number * intExp2(62)) AS x, x, x, x, x, x, x, x FROM numbers(10); +SELECT * FROM t64 WHERE u8 != t_u8; +SELECT * FROM t64 WHERE u16 != t_u16; +SELECT * FROM t64 WHERE u32 != t_u32; +SELECT * FROM t64 WHERE u64 != t_u64; + +OPTIMIZE TABLE t64 FINAL; + +SELECT * FROM t64 WHERE u8 != t_u8; +SELECT * FROM t64 WHERE u16 != t_u16; +SELECT * FROM t64 WHERE u32 != t_u32; +SELECT * FROM t64 WHERE u64 != t_u64; + +DROP TABLE t64; diff --git a/parser/testdata/00871_t64_codec_signed/ast.json b/parser/testdata/00871_t64_codec_signed/ast.json new file mode 100644 index 000000000..7aabcb8d6 --- /dev/null +++ b/parser/testdata/00871_t64_codec_signed/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t64 (children 1)" + }, + { + "explain": " Identifier t64" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001015083, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/00871_t64_codec_signed/metadata.json b/parser/testdata/00871_t64_codec_signed/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00871_t64_codec_signed/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00871_t64_codec_signed/query.sql b/parser/testdata/00871_t64_codec_signed/query.sql new file mode 100644 index 000000000..f21325c12 --- /dev/null +++ b/parser/testdata/00871_t64_codec_signed/query.sql @@ -0,0 +1,128 @@ +DROP TABLE IF EXISTS t64; + +CREATE TABLE t64 +( + i8 Int8, + t_i8 Int8 Codec(T64, LZ4), + i16 Int16, + t_i16 Int16 Codec(T64, LZ4), + i32 Int32, + t_i32 Int32 Codec(T64, LZ4), + i64 Int64, + t_i64 Int64 Codec(T64, LZ4) +) ENGINE MergeTree() ORDER BY tuple(); + +INSERT INTO t64 SELECT toInt32(number)-1 AS x, x, x, x, x, x, x, x FROM numbers(2); +INSERT INTO t64 SELECT toInt32(number)-1 AS x, x, x, x, x, x, x, x FROM numbers(3); +INSERT INTO t64 SELECT 42 AS x, x, x, x, x, x, x, x FROM numbers(4); + +SELECT * FROM t64 ORDER BY i64; + +INSERT INTO t64 SELECT (intExp2(8) - 10 + number) AS x, x, x, x, x, x, x, x FROM numbers(20); +SELECT i8, t_i8 FROM t64 WHERE i8 != t_i8; + +INSERT INTO t64 SELECT number AS x, x, x, x, x, x, x, x FROM numbers(intExp2(8)); +INSERT INTO t64 SELECT number AS x, x, x, x, x, x, x, x FROM numbers(intExp2(9)); +SELECT * FROM t64 WHERE i8 != t_i8; +SELECT * FROM t64 WHERE i16 != t_i16; +SELECT * FROM t64 WHERE i32 != t_i32; +SELECT * FROM t64 WHERE i64 != t_i64; + +INSERT INTO t64 SELECT (intExp2(16) - 10 + number) AS x, x, x, x, x, x, x, x FROM numbers(10); +INSERT INTO t64 SELECT (intExp2(16) - 10 + number) AS x, x, x, x, x, x, x, x FROM numbers(11); +INSERT INTO t64 SELECT (intExp2(16) - 64 + number) AS x, x, x, x, x, x, x, x FROM numbers(64); +INSERT INTO t64 SELECT (intExp2(16) - 64 + number) AS x, x, x, x, x, x, x, x FROM numbers(65); +INSERT INTO t64 SELECT (intExp2(16) - 1 + number) AS x, x, x, x, x, x, x, x FROM numbers(65); +INSERT INTO t64 SELECT (10 - toInt64(intExp2(16)) + number) AS x, x, x, x, x, x, x, x FROM numbers(10); +INSERT INTO t64 SELECT (10 - toInt64(intExp2(16)) + number) AS x, x, x, x, x, x, x, x FROM numbers(11); +INSERT INTO t64 SELECT (64 - toInt64(intExp2(16)) + number) AS x, x, x, x, x, x, x, x FROM numbers(64); +INSERT INTO t64 SELECT (64 - toInt64(intExp2(16)) + number) AS x, x, x, x, x, x, x, x FROM numbers(65); +INSERT INTO t64 SELECT (1 - toInt64(intExp2(16)) + number) AS x, x, x, x, x, x, x, x FROM numbers(65); +SELECT * FROM t64 WHERE i8 != t_i8; +SELECT * FROM t64 WHERE i16 != t_i16; +SELECT * FROM t64 WHERE i32 != t_i32; +SELECT * FROM t64 WHERE i64 != t_i64; + +INSERT INTO t64 SELECT (intExp2(24) - 10 + number) AS x, x, x, x, x, x, x, x FROM numbers(10); +INSERT INTO t64 SELECT (intExp2(24) - 10 + number) AS x, x, x, x, x, x, x, x FROM numbers(11); +INSERT INTO t64 SELECT (intExp2(24) - 64 + number) AS x, x, x, x, x, x, x, x FROM numbers(128); +INSERT INTO t64 SELECT (intExp2(24) - 64 + number) AS x, x, x, x, x, x, x, x FROM numbers(129); +INSERT INTO t64 SELECT (intExp2(24) - 1 + number) AS x, x, x, x, x, x, x, x FROM numbers(129); +INSERT INTO t64 SELECT (10 - toInt64(intExp2(24)) + number) AS x, x, x, x, x, x, x, x FROM numbers(10); +INSERT INTO t64 SELECT (10 - toInt64(intExp2(24)) + number) AS x, x, x, x, x, x, x, x FROM numbers(11); +INSERT INTO t64 SELECT (64 - toInt64(intExp2(24)) + number) AS x, x, x, x, x, x, x, x FROM numbers(128); +INSERT INTO t64 SELECT (64 - toInt64(intExp2(24)) + number) AS x, x, x, x, x, x, x, x FROM numbers(129); +INSERT INTO t64 SELECT (1 - toInt64(intExp2(24)) + number) AS x, x, x, x, x, x, x, x FROM numbers(129); +SELECT * FROM t64 WHERE i8 != t_i8; +SELECT * FROM t64 WHERE i16 != t_i16; +SELECT * FROM t64 WHERE i32 != t_i32; +SELECT * FROM t64 WHERE i64 != t_i64; + +INSERT INTO t64 SELECT (intExp2(32) - 2 + number) AS x, x, x, x, x, x, x, x FROM numbers(2); +INSERT INTO t64 SELECT (intExp2(32) - 2 + number) AS x, x, x, x, x, x, x, x FROM numbers(3); +INSERT INTO t64 SELECT (intExp2(32) - 64 + number) AS x, x, x, x, x, x, x, x FROM numbers(64); +INSERT INTO t64 SELECT (intExp2(32) - 64 + number) AS x, x, x, x, x, x, x, x FROM numbers(65); +INSERT INTO t64 SELECT (intExp2(32) - 1 + number) AS x, x, x, x, x, x, x, x FROM numbers(65); +INSERT INTO t64 SELECT (10 - toInt64(intExp2(32)) + number) AS x, x, x, x, x, x, x, x FROM numbers(10); +INSERT INTO t64 SELECT (10 - toInt64(intExp2(32)) + number) AS x, x, x, x, x, x, x, x FROM numbers(11); +INSERT INTO t64 SELECT (64 - toInt64(intExp2(32)) + number) AS x, x, x, x, x, x, x, x FROM numbers(64); +INSERT INTO t64 SELECT (64 - toInt64(intExp2(32)) + number) AS x, x, x, x, x, x, x, x FROM numbers(65); +INSERT INTO t64 SELECT (1 - toInt64(intExp2(32)) + number) AS x, x, x, x, x, x, x, x FROM numbers(65); +SELECT * FROM t64 WHERE i8 != t_i8; +SELECT * FROM t64 WHERE i16 != t_i16; +SELECT * FROM t64 WHERE i32 != t_i32; +SELECT * FROM t64 WHERE i64 != t_i64; + +INSERT INTO t64 SELECT (intExp2(40) - 10 + number) AS x, x, x, x, x, x, x, x FROM numbers(10); +INSERT INTO t64 SELECT (intExp2(40) - 10 + number) AS x, x, x, x, x, x, x, x FROM numbers(20); +INSERT INTO t64 SELECT (intExp2(40) - 64 + number) AS x, x, x, x, x, x, x, x FROM numbers(512); +INSERT INTO t64 SELECT (intExp2(40) - 64 + number) AS x, x, x, x, x, x, x, x FROM numbers(513); +INSERT INTO t64 SELECT (intExp2(40) - 1 + number) AS x, x, x, x, x, x, x, x FROM numbers(513); +INSERT INTO t64 SELECT (10 - toInt64(intExp2(40)) + number) AS x, x, x, x, x, x, x, x FROM numbers(10); +INSERT INTO t64 SELECT (10 - toInt64(intExp2(40)) + number) AS x, x, x, x, x, x, x, x FROM numbers(20); +INSERT INTO t64 SELECT (64 - toInt64(intExp2(40)) + number) AS x, x, x, x, x, x, x, x FROM numbers(512); +INSERT INTO t64 SELECT (64 - toInt64(intExp2(40)) + number) AS x, x, x, x, x, x, x, x FROM numbers(513); +INSERT INTO t64 SELECT (1 - toInt64(intExp2(40)) + number) AS x, x, x, x, x, x, x, x FROM numbers(513); +SELECT * FROM t64 WHERE i8 != t_i8; +SELECT * FROM t64 WHERE i16 != t_i16; +SELECT * FROM t64 WHERE i32 != t_i32; +SELECT * FROM t64 WHERE i64 != t_i64; + +INSERT INTO t64 SELECT (intExp2(48) - 10 + number) AS x, x, x, x, x, x, x, x FROM numbers(10); +INSERT INTO t64 SELECT (intExp2(48) - 10 + number) AS x, x, x, x, x, x, x, x FROM numbers(20); +INSERT INTO t64 SELECT (intExp2(48) - 64 + number) AS x, x, x, x, x, x, x, x FROM numbers(1024); +INSERT INTO t64 SELECT (intExp2(48) - 64 + number) AS x, x, x, x, x, x, x, x FROM numbers(1025); +INSERT INTO t64 SELECT (intExp2(48) - 1 + number) AS x, x, x, x, x, x, x, x FROM numbers(1025); +INSERT INTO t64 SELECT (10 - toInt64(intExp2(48)) + number) AS x, x, x, x, x, x, x, x FROM numbers(10); +INSERT INTO t64 SELECT (10 - toInt64(intExp2(48)) + number) AS x, x, x, x, x, x, x, x FROM numbers(20); +INSERT INTO t64 SELECT (64 - toInt64(intExp2(48)) + number) AS x, x, x, x, x, x, x, x FROM numbers(1024); +INSERT INTO t64 SELECT (64 - toInt64(intExp2(48)) + number) AS x, x, x, x, x, x, x, x FROM numbers(1025); +INSERT INTO t64 SELECT (1 - toInt64(intExp2(48)) + number) AS x, x, x, x, x, x, x, x FROM numbers(1025); +SELECT * FROM t64 WHERE i8 != t_i8; +SELECT * FROM t64 WHERE i16 != t_i16; +SELECT * FROM t64 WHERE i32 != t_i32; +SELECT * FROM t64 WHERE i64 != t_i64; + +INSERT INTO t64 SELECT (intExp2(56) - 10 + number) AS x, x, x, x, x, x, x, x FROM numbers(10); +INSERT INTO t64 SELECT (intExp2(56) - 10 + number) AS x, x, x, x, x, x, x, x FROM numbers(20); +INSERT INTO t64 SELECT (intExp2(56) - 64 + number) AS x, x, x, x, x, x, x, x FROM numbers(2048); +INSERT INTO t64 SELECT (intExp2(56) - 64 + number) AS x, x, x, x, x, x, x, x FROM numbers(2049); +INSERT INTO t64 SELECT (intExp2(56) - 1 + number) AS x, x, x, x, x, x, x, x FROM numbers(2049); +INSERT INTO t64 SELECT (10 - toInt64(intExp2(56)) + number) AS x, x, x, x, x, x, x, x FROM numbers(10); +INSERT INTO t64 SELECT (10 - toInt64(intExp2(56)) + number) AS x, x, x, x, x, x, x, x FROM numbers(20); +INSERT INTO t64 SELECT (64 - toInt64(intExp2(56)) + number) AS x, x, x, x, x, x, x, x FROM numbers(2048); +INSERT INTO t64 SELECT (64 - toInt64(intExp2(56)) + number) AS x, x, x, x, x, x, x, x FROM numbers(2049); +INSERT INTO t64 SELECT (1 - toInt64(intExp2(56)) + number) AS x, x, x, x, x, x, x, x FROM numbers(2049); +SELECT * FROM t64 WHERE i8 != t_i8; +SELECT * FROM t64 WHERE i16 != t_i16; +SELECT * FROM t64 WHERE i32 != t_i32; +SELECT * FROM t64 WHERE i64 != t_i64; + +OPTIMIZE TABLE t64 FINAL; + +SELECT * FROM t64 WHERE i8 != t_i8; +SELECT * FROM t64 WHERE i16 != t_i16; +SELECT * FROM t64 WHERE i32 != t_i32; +SELECT * FROM t64 WHERE i64 != t_i64; + +DROP TABLE t64; diff --git a/parser/testdata/00872_t64_bit_codec/ast.json b/parser/testdata/00872_t64_bit_codec/ast.json new file mode 100644 index 000000000..acd2a95f1 --- /dev/null +++ b/parser/testdata/00872_t64_bit_codec/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t64 (children 1)" + }, + { + "explain": " Identifier t64" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001343863, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/00872_t64_bit_codec/metadata.json b/parser/testdata/00872_t64_bit_codec/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00872_t64_bit_codec/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00872_t64_bit_codec/query.sql b/parser/testdata/00872_t64_bit_codec/query.sql new file mode 100644 index 000000000..affab1056 --- /dev/null +++ b/parser/testdata/00872_t64_bit_codec/query.sql @@ -0,0 +1,103 @@ +DROP TABLE IF EXISTS t64; + +CREATE TABLE t64 +( + u8 UInt8, + t_u8 UInt8 Codec(T64('bit'), LZ4), + u16 UInt16, + t_u16 UInt16 Codec(T64('bit'), LZ4), + u32 UInt32, + t_u32 UInt32 Codec(T64('bit'), LZ4), + u64 UInt64, + t_u64 UInt64 Codec(T64('bit'), LZ4) +) ENGINE MergeTree() ORDER BY tuple(); + +-- { echoOn } + +INSERT INTO t64 SELECT number AS x, x, x, x, x, x, x, x FROM numbers(1); +INSERT INTO t64 SELECT number AS x, x, x, x, x, x, x, x FROM numbers(2); +INSERT INTO t64 SELECT 42 AS x, x, x, x, x, x, x, x FROM numbers(4); + +SELECT * FROM t64 ORDER BY u64; + +INSERT INTO t64 SELECT number AS x, x, x, x, x, x, x, x FROM numbers(intExp2(8)); +INSERT INTO t64 SELECT number AS x, x, x, x, x, x, x, x FROM numbers(intExp2(9)); +SELECT * FROM t64 WHERE u8 != t_u8; +SELECT * FROM t64 WHERE u16 != t_u16; +SELECT * FROM t64 WHERE u32 != t_u32; +SELECT * FROM t64 WHERE u64 != t_u64; + +INSERT INTO t64 SELECT (intExp2(16) - 10 + number) AS x, x, x, x, x, x, x, x FROM numbers(10); +INSERT INTO t64 SELECT (intExp2(16) - 10 + number) AS x, x, x, x, x, x, x, x FROM numbers(11); +INSERT INTO t64 SELECT (intExp2(16) - 64 + number) AS x, x, x, x, x, x, x, x FROM numbers(64); +INSERT INTO t64 SELECT (intExp2(16) - 64 + number) AS x, x, x, x, x, x, x, x FROM numbers(65); +INSERT INTO t64 SELECT (intExp2(16) - 1 + number) AS x, x, x, x, x, x, x, x FROM numbers(65); +SELECT * FROM t64 WHERE u8 != t_u8; +SELECT * FROM t64 WHERE u16 != t_u16; +SELECT * FROM t64 WHERE u32 != t_u32; +SELECT * FROM t64 WHERE u64 != t_u64; + +INSERT INTO t64 SELECT (intExp2(24) - 10 + number) AS x, x, x, x, x, x, x, x FROM numbers(10); +INSERT INTO t64 SELECT (intExp2(24) - 10 + number) AS x, x, x, x, x, x, x, x FROM numbers(11); +INSERT INTO t64 SELECT (intExp2(24) - 64 + number) AS x, x, x, x, x, x, x, x FROM numbers(128); +INSERT INTO t64 SELECT (intExp2(24) - 64 + number) AS x, x, x, x, x, x, x, x FROM numbers(129); +INSERT INTO t64 SELECT (intExp2(24) - 1 + number) AS x, x, x, x, x, x, x, x FROM numbers(129); +SELECT * FROM t64 WHERE u8 != t_u8; +SELECT * FROM t64 WHERE u16 != t_u16; +SELECT * FROM t64 WHERE u32 != t_u32; +SELECT * FROM t64 WHERE u64 != t_u64; + +INSERT INTO t64 SELECT (intExp2(32) - 10 + number) AS x, x, x, x, x, x, x, x FROM numbers(10); +INSERT INTO t64 SELECT (intExp2(32) - 10 + number) AS x, x, x, x, x, x, x, x FROM numbers(20); +INSERT INTO t64 SELECT (intExp2(32) - 64 + number) AS x, x, x, x, x, x, x, x FROM numbers(256); +INSERT INTO t64 SELECT (intExp2(32) - 64 + number) AS x, x, x, x, x, x, x, x FROM numbers(257); +INSERT INTO t64 SELECT (intExp2(32) - 1 + number) AS x, x, x, x, x, x, x, x FROM numbers(257); +SELECT * FROM t64 WHERE u8 != t_u8; +SELECT * FROM t64 WHERE u16 != t_u16; +SELECT * FROM t64 WHERE u32 != t_u32; +SELECT * FROM t64 WHERE u64 != t_u64; + +INSERT INTO t64 SELECT (intExp2(40) - 10 + number) AS x, x, x, x, x, x, x, x FROM numbers(10); +INSERT INTO t64 SELECT (intExp2(40) - 10 + number) AS x, x, x, x, x, x, x, x FROM numbers(20); +INSERT INTO t64 SELECT (intExp2(40) - 64 + number) AS x, x, x, x, x, x, x, x FROM numbers(512); +INSERT INTO t64 SELECT (intExp2(40) - 64 + number) AS x, x, x, x, x, x, x, x FROM numbers(513); +INSERT INTO t64 SELECT (intExp2(40) - 1 + number) AS x, x, x, x, x, x, x, x FROM numbers(513); +SELECT * FROM t64 WHERE u8 != t_u8; +SELECT * FROM t64 WHERE u16 != t_u16; +SELECT * FROM t64 WHERE u32 != t_u32; +SELECT * FROM t64 WHERE u64 != t_u64; + +INSERT INTO t64 SELECT (intExp2(48) - 10 + number) AS x, x, x, x, x, x, x, x FROM numbers(10); +INSERT INTO t64 SELECT (intExp2(48) - 10 + number) AS x, x, x, x, x, x, x, x FROM numbers(20); +INSERT INTO t64 SELECT (intExp2(48) - 64 + number) AS x, x, x, x, x, x, x, x FROM numbers(1024); +INSERT INTO t64 SELECT (intExp2(48) - 64 + number) AS x, x, x, x, x, x, x, x FROM numbers(1025); +INSERT INTO t64 SELECT (intExp2(48) - 1 + number) AS x, x, x, x, x, x, x, x FROM numbers(1025); +SELECT * FROM t64 WHERE u8 != t_u8; +SELECT * FROM t64 WHERE u16 != t_u16; +SELECT * FROM t64 WHERE u32 != t_u32; +SELECT * FROM t64 WHERE u64 != t_u64; + +INSERT INTO t64 SELECT (intExp2(56) - 10 + number) AS x, x, x, x, x, x, x, x FROM numbers(10); +INSERT INTO t64 SELECT (intExp2(56) - 10 + number) AS x, x, x, x, x, x, x, x FROM numbers(20); +INSERT INTO t64 SELECT (intExp2(56) - 64 + number) AS x, x, x, x, x, x, x, x FROM numbers(2048); +INSERT INTO t64 SELECT (intExp2(56) - 64 + number) AS x, x, x, x, x, x, x, x FROM numbers(2049); +INSERT INTO t64 SELECT (intExp2(56) - 1 + number) AS x, x, x, x, x, x, x, x FROM numbers(2049); +SELECT * FROM t64 WHERE u8 != t_u8; +SELECT * FROM t64 WHERE u16 != t_u16; +SELECT * FROM t64 WHERE u32 != t_u32; +SELECT * FROM t64 WHERE u64 != t_u64; + +INSERT INTO t64 SELECT (intExp2(63) + number * intExp2(62)) AS x, x, x, x, x, x, x, x FROM numbers(10); +SELECT * FROM t64 WHERE u8 != t_u8; +SELECT * FROM t64 WHERE u16 != t_u16; +SELECT * FROM t64 WHERE u32 != t_u32; +SELECT * FROM t64 WHERE u64 != t_u64; + +OPTIMIZE TABLE t64 FINAL; + +SELECT * FROM t64 WHERE u8 != t_u8; +SELECT * FROM t64 WHERE u16 != t_u16; +SELECT * FROM t64 WHERE u32 != t_u32; +SELECT * FROM t64 WHERE u64 != t_u64; + +DROP TABLE t64; diff --git a/parser/testdata/00873_t64_codec_date/ast.json b/parser/testdata/00873_t64_codec_date/ast.json new file mode 100644 index 000000000..b03f84fde --- /dev/null +++ b/parser/testdata/00873_t64_codec_date/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t64 (children 1)" + }, + { + "explain": " Identifier t64" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001188643, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/00873_t64_codec_date/metadata.json b/parser/testdata/00873_t64_codec_date/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00873_t64_codec_date/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00873_t64_codec_date/query.sql b/parser/testdata/00873_t64_codec_date/query.sql new file mode 100644 index 000000000..c6e21baba --- /dev/null +++ b/parser/testdata/00873_t64_codec_date/query.sql @@ -0,0 +1,26 @@ +DROP TABLE IF EXISTS t64; + +CREATE TABLE t64 +( + date16 Date, + t_date16 Date Codec(T64, ZSTD), + date_32 Date32, + t_date32 Date32 Codec(T64, ZSTD) +) ENGINE MergeTree() ORDER BY tuple(); + +INSERT INTO t64 values ('1970-01-01', '1970-01-01', '1970-01-01', '1970-01-01'); +INSERT INTO t64 values ('2149-06-06', '2149-06-06', '2149-06-06', '2149-06-06'); +INSERT INTO t64 values ('2149-06-08', '2149-06-08', '2149-06-08', '2149-06-08'); +INSERT INTO t64 values ('1950-01-01', '1950-01-01', '1950-01-01', '1950-01-01'); + +SELECT * FROM t64 ORDER BY date_32; + +SELECT * FROM t64 WHERE date16 != t_date16; +SELECT * FROM t64 WHERE date_32 != t_date32; + +OPTIMIZE TABLE t64 FINAL; + +SELECT * FROM t64 WHERE date16 != t_date16; +SELECT * FROM t64 WHERE date_32 != t_date32; + +DROP TABLE t64; diff --git a/parser/testdata/00874_issue_3495/ast.json b/parser/testdata/00874_issue_3495/ast.json new file mode 100644 index 000000000..4679c011f --- /dev/null +++ b/parser/testdata/00874_issue_3495/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00102652, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/00874_issue_3495/metadata.json b/parser/testdata/00874_issue_3495/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00874_issue_3495/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00874_issue_3495/query.sql b/parser/testdata/00874_issue_3495/query.sql new file mode 100644 index 000000000..02c70ac74 --- /dev/null +++ b/parser/testdata/00874_issue_3495/query.sql @@ -0,0 +1,21 @@ +drop table if exists t; +create table t (a Int8, val Float32) engine = Memory(); +insert into t values (1,1.1), (1,1.2), (2,2.1); + +SET enable_optimize_predicate_expression = 0; + +SELECT * FROM ( + SELECT a, t1.val as val1, t2.val as val2 + FROM t t1 + ANY LEFT JOIN t t2 USING a +) ORDER BY val1; + +SET enable_optimize_predicate_expression = 1; + +SELECT * FROM ( + SELECT a, t1.val as val1, t2.val as val2 + FROM t t1 + ANY LEFT JOIN t t2 USING a +) ORDER BY val1; + +drop table t; diff --git a/parser/testdata/00875_join_right_nulls/ast.json b/parser/testdata/00875_join_right_nulls/ast.json new file mode 100644 index 000000000..6c72fa84d --- /dev/null +++ b/parser/testdata/00875_join_right_nulls/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001339422, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/00875_join_right_nulls/metadata.json b/parser/testdata/00875_join_right_nulls/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00875_join_right_nulls/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00875_join_right_nulls/query.sql b/parser/testdata/00875_join_right_nulls/query.sql new file mode 100644 index 000000000..df54e55af --- /dev/null +++ b/parser/testdata/00875_join_right_nulls/query.sql @@ -0,0 +1,49 @@ +DROP TABLE IF EXISTS t; +DROP TABLE IF EXISTS nt; + +CREATE TABLE t (x String) ENGINE = MergeTree ORDER BY tuple(); +CREATE TABLE nt (x Nullable(String)) ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO t (x) VALUES ('id'), ('1'); +INSERT INTO nt (x) VALUES ('id'), (NULL), ('1'); + + +SET join_use_nulls = 1; + +SELECT 'on'; + +SELECT 'n rj n', t1.x, t2.x FROM nt AS t1 RIGHT JOIN nt AS t2 ON t1.x = t2.x ORDER BY t1.x; +SELECT 'n fj n', t1.x, t2.x FROM nt AS t1 FULL JOIN nt AS t2 ON t1.x = t2.x ORDER BY t1.x; + +SELECT 't rj n', t1.x, t2.x FROM t AS t1 RIGHT JOIN nt AS t2 ON t1.x = t2.x ORDER BY t1.x; +SELECT 't fj n', t1.x, t2.x FROM t AS t1 FULL JOIN nt AS t2 ON t1.x = t2.x ORDER BY t1.x; + +SELECT 'n rj t', t1.x, t2.x FROM nt AS t1 RIGHT JOIN t AS t2 ON t1.x = t2.x ORDER BY t1.x; +SELECT 'n fj t', t1.x, t2.x FROM nt AS t1 FULL JOIN t AS t2 ON t1.x = t2.x ORDER BY t1.x; + +SELECT 'using'; + +SELECT 'n rj n', t1.x, t2.x FROM nt AS t1 RIGHT JOIN nt AS t2 USING(x) ORDER BY t1.x; +SELECT 'n fj n', t1.x, t2.x FROM nt AS t1 FULL JOIN nt AS t2 USING(x) ORDER BY t1.x; + +SELECT 't rj n', t1.x, t2.x FROM t AS t1 RIGHT JOIN nt AS t2 USING(x) ORDER BY t1.x; +SELECT 't fj n', t1.x, t2.x FROM t AS t1 FULL JOIN nt AS t2 USING(x) ORDER BY t1.x; + +SELECT 'n rj t', t1.x, t2.x FROM nt AS t1 RIGHT JOIN t AS t2 USING(x) ORDER BY t1.x; +SELECT 'n fj t', t1.x, t2.x FROM nt AS t1 FULL JOIN t AS t2 USING(x) ORDER BY t1.x; + + +INSERT INTO nt (x) SELECT NULL as x FROM numbers(1000); + +SELECT sum(isNull(t1.x)), count(t1.x) FROM nt AS t1 INNER JOIN nt AS t2 ON t1.x = t2.x; +SELECT sum(isNull(t1.x)), count(t1.x) FROM nt AS t1 LEFT JOIN nt AS t2 ON t1.x = t2.x; +SELECT sum(isNull(t1.x)), count(t1.x) FROM nt AS t1 RIGHT JOIN nt AS t2 ON t1.x = t2.x; +SELECT sum(isNull(t1.x)), count(t1.x) FROM nt AS t1 FULL JOIN nt AS t2 ON t1.x = t2.x; + +SELECT sum(isNull(t1.x)), count(t1.x) FROM nt AS t1 INNER JOIN nt AS t2 USING(x); +SELECT sum(isNull(t1.x)), count(t1.x) FROM nt AS t1 LEFT JOIN nt AS t2 USING(x); +SELECT sum(isNull(t1.x)), count(t1.x) FROM nt AS t1 RIGHT JOIN nt AS t2 USING(x); +SELECT sum(isNull(t1.x)), count(t1.x) FROM nt AS t1 FULL JOIN nt AS t2 USING(x); + +DROP TABLE t; +DROP TABLE nt; diff --git a/parser/testdata/00875_join_right_nulls_ors/ast.json b/parser/testdata/00875_join_right_nulls_ors/ast.json new file mode 100644 index 000000000..237cdf2c2 --- /dev/null +++ b/parser/testdata/00875_join_right_nulls_ors/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000939031, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/00875_join_right_nulls_ors/metadata.json b/parser/testdata/00875_join_right_nulls_ors/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00875_join_right_nulls_ors/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00875_join_right_nulls_ors/query.sql b/parser/testdata/00875_join_right_nulls_ors/query.sql new file mode 100644 index 000000000..9c427a120 --- /dev/null +++ b/parser/testdata/00875_join_right_nulls_ors/query.sql @@ -0,0 +1,31 @@ +DROP TABLE IF EXISTS t; +DROP TABLE IF EXISTS nt; +DROP TABLE IF EXISTS ntxy; + +CREATE TABLE t (x String) ENGINE = MergeTree ORDER BY tuple(); +CREATE TABLE nt (x Nullable(String)) ENGINE = MergeTree ORDER BY tuple(); +CREATE TABLE ntxy (x Nullable(String), y Nullable(String)) ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO t (x) VALUES ('id'), ('1'); +INSERT INTO nt (x) VALUES ('id'), (NULL), ('1'); +INSERT INTO ntxy (x, y) VALUES ('id', 'id'), (NULL, NULL), ('1', '1'); + + +SET join_use_nulls = 1; + +SELECT 'on with or'; +SELECT 'n rj n', t1.x, t2.x FROM nt AS t1 RIGHT JOIN ntxy AS t2 ON t1.x = t2.x OR t1.x = t2.y ORDER BY t1.x; +SELECT 'n a rj n', t1.x, t2.x FROM nt AS t1 ANY RIGHT JOIN ntxy AS t2 ON t1.x = t2.x OR t1.x = t2.y ORDER BY t1.x; +SELECT 'n fj n', t1.x, t2.x FROM nt AS t1 FULL JOIN ntxy AS t2 ON t1.x = t2.x OR t1.x = t2.y ORDER BY t1.x; + +SELECT 't rj n', t1.x, t2.x FROM t AS t1 RIGHT JOIN ntxy AS t2 ON t1.x = t2.x OR t1.x = t2.y ORDER BY t1.x; +SELECT 't fj n', t1.x, t2.x FROM t AS t1 FULL JOIN ntxy AS t2 ON t1.x = t2.x OR t1.x = t2.y ORDER BY t1.x; + +SELECT 'n rj t', t1.x, t2.x FROM ntxy AS t1 RIGHT JOIN t AS t2 ON t1.x = t2.x OR t1.y = t2.x ORDER BY t1.x; +SELECT 'n a rj t', t1.x, t2.x FROM ntxy AS t1 ANY RIGHT JOIN t AS t2 ON t1.x = t2.x OR t1.y = t2.x ORDER BY t1.x; +SELECT 'n fj t', t1.x, t2.x FROM ntxy AS t1 FULL JOIN t AS t2 ON t1.x = t2.x OR t2.x = t1.y ORDER BY t1.x; +SELECT 'n fj t', t1.x, t2.x FROM ntxy AS t1 FULL JOIN t AS t2 ON t2.x = t1.y OR t1.x = t2.x ORDER BY t1.x; + +DROP TABLE t; +DROP TABLE nt; +DROP TABLE ntxy; diff --git a/parser/testdata/00876_wrong_arraj_join_column/ast.json b/parser/testdata/00876_wrong_arraj_join_column/ast.json new file mode 100644 index 000000000..b6a734bd8 --- /dev/null +++ b/parser/testdata/00876_wrong_arraj_join_column/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery visits (children 1)" + }, + { + "explain": " Identifier visits" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001177041, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/00876_wrong_arraj_join_column/metadata.json b/parser/testdata/00876_wrong_arraj_join_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00876_wrong_arraj_join_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00876_wrong_arraj_join_column/query.sql b/parser/testdata/00876_wrong_arraj_join_column/query.sql new file mode 100644 index 000000000..0e72f9a67 --- /dev/null +++ b/parser/testdata/00876_wrong_arraj_join_column/query.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS visits; +CREATE TABLE visits (str String) ENGINE = MergeTree ORDER BY (str); + +SELECT 1 +FROM visits +ARRAY JOIN arrayFilter(t -> 1, arrayMap(x -> tuple(x), [42])) AS i +WHERE ((str, i.1) IN ('x', 0)); + +DROP TABLE visits; diff --git a/parser/testdata/00877_memory_limit_for_new_delete/ast.json b/parser/testdata/00877_memory_limit_for_new_delete/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00877_memory_limit_for_new_delete/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00877_memory_limit_for_new_delete/metadata.json b/parser/testdata/00877_memory_limit_for_new_delete/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00877_memory_limit_for_new_delete/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00877_memory_limit_for_new_delete/query.sql b/parser/testdata/00877_memory_limit_for_new_delete/query.sql new file mode 100644 index 000000000..6ba7dd3bd --- /dev/null +++ b/parser/testdata/00877_memory_limit_for_new_delete/query.sql @@ -0,0 +1,12 @@ +-- Tags: no-tsan, no-asan, no-msan, no-parallel, no-fasttest +-- Tag no-msan: memory limits don't work correctly under msan because it replaces malloc/free + +SET max_memory_usage = 1000000000; +SET max_bytes_before_external_group_by = 0; +SET max_bytes_ratio_before_external_group_by = 0; + +SELECT sum(ignore(*)) FROM ( + SELECT number, argMax(number, (number, toFixedString(toString(number), 1024))) + FROM numbers(1000000) + GROUP BY number +) -- { serverError MEMORY_LIMIT_EXCEEDED } diff --git a/parser/testdata/00878_join_unexpected_results/ast.json b/parser/testdata/00878_join_unexpected_results/ast.json new file mode 100644 index 000000000..54a00b76f --- /dev/null +++ b/parser/testdata/00878_join_unexpected_results/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001126559, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/00878_join_unexpected_results/metadata.json b/parser/testdata/00878_join_unexpected_results/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00878_join_unexpected_results/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00878_join_unexpected_results/query.sql b/parser/testdata/00878_join_unexpected_results/query.sql new file mode 100644 index 000000000..00429499f --- /dev/null +++ b/parser/testdata/00878_join_unexpected_results/query.sql @@ -0,0 +1,71 @@ +drop table if exists t; +drop table if exists s; + +create table t(a Int64, b Int64) engine = MergeTree ORDER BY tuple(); +create table s(a Int64, b Int64) engine = MergeTree ORDER BY tuple(); + +insert into t values (1,1), (2,2); +insert into s values (1,1); + +select 'join_use_nulls = 1'; +set join_use_nulls = 1; +select * from t left outer join s using (a,b) order by t.a; +select '-'; +select * from t join s using (a,b); +select '-'; +select * from t join s on (t.a=s.a and t.b=s.b); +select '-'; +select t.* from t left join s on (t.a=s.a and t.b=s.b) order by t.a; +select '-'; +select t.*, s.* from t left join s on (t.a=s.a and t.b=s.b) order by t.a; +select '-'; +select t.*, s.* from t left join s on (s.a=t.a and t.b=s.b) order by t.a; +select '-'; +select t.*, s.* from t right join s on (t.a=s.a and t.b=s.b); +select '-'; +select * from t left outer join s using (a,b) where s.a is null; +select '-'; +select * from t left outer join s on (t.a=s.a and t.b=s.b) where s.a is null; +select '-'; +select s.* from t left outer join s on (t.a=s.a and t.b=s.b) where s.a is null; +select '-'; +select t.*, s.* from t left join s on (s.a=t.a and t.b=s.b and t.a=toInt64(2)) order by t.a; +select '-'; +select t.*, s.* from t left join s on (s.a=t.a) order by t.a; +select '-'; +select t.*, s.* from t left join s on (t.b=toInt64(1) and s.a=t.a) where s.b=1; +select '-'; +select t.*, s.* from t left join s on (t.b=toInt64(2) and s.a=t.a) where t.b=2; + +select 'join_use_nulls = 0'; +set join_use_nulls = 0; +select * from t left outer join s using (a,b) order by t.a; +select '-'; +select * from t join s using (a,b); +select '-'; +select * from t join s on (t.a=s.a and t.b=s.b); +select '-'; +select t.* from t left join s on (t.a=s.a and t.b=s.b) order by t.a; +select '-'; +select t.*, s.* from t left join s on (t.a=s.a and t.b=s.b) order by t.a; +select '-'; +select t.*, s.* from t left join s on (s.a=t.a and t.b=s.b) order by t.a; +select '-'; +select t.*, s.* from t right join s on (t.a=s.a and t.b=s.b); +select '-'; +-- select * from t left outer join s using (a,b) where s.a is null; -- TODO +select '-'; +-- select * from t left outer join s on (t.a=s.a and t.b=s.b) where s.a is null; -- TODO +select '-'; +-- select s.* from t left outer join s on (t.a=s.a and t.b=s.b) where s.a is null; -- TODO +select '-'; +select t.*, s.* from t left join s on (s.a=t.a and t.b=s.b and t.a=toInt64(2)) order by t.a; +select '-'; +select t.*, s.* from t left join s on (s.a=t.a) order by t.a; +select '-'; +select t.*, s.* from t left join s on (t.b=toInt64(1) and s.a=t.a) where s.b=1; +select '-'; +select t.*, s.* from t left join s on (t.b=toInt64(2) and s.a=t.a) where t.b=2; + +drop table t; +drop table s; diff --git a/parser/testdata/00879_cast_to_decimal_crash/ast.json b/parser/testdata/00879_cast_to_decimal_crash/ast.json new file mode 100644 index 000000000..f8feb0786 --- /dev/null +++ b/parser/testdata/00879_cast_to_decimal_crash/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toIntervalDay (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal 'Nullable(Decimal(10, 10))'" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001135672, + "rows_read": 10, + "bytes_read": 399 + } +} diff --git a/parser/testdata/00879_cast_to_decimal_crash/metadata.json b/parser/testdata/00879_cast_to_decimal_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00879_cast_to_decimal_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00879_cast_to_decimal_crash/query.sql b/parser/testdata/00879_cast_to_decimal_crash/query.sql new file mode 100644 index 000000000..58d728027 --- /dev/null +++ b/parser/testdata/00879_cast_to_decimal_crash/query.sql @@ -0,0 +1 @@ +select cast(toIntervalDay(1) as Nullable(Decimal(10, 10))); -- { serverError CANNOT_CONVERT_TYPE } diff --git a/parser/testdata/00880_decimal_in_key/ast.json b/parser/testdata/00880_decimal_in_key/ast.json new file mode 100644 index 000000000..fa3640d30 --- /dev/null +++ b/parser/testdata/00880_decimal_in_key/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001240897, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/00880_decimal_in_key/metadata.json b/parser/testdata/00880_decimal_in_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00880_decimal_in_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00880_decimal_in_key/query.sql b/parser/testdata/00880_decimal_in_key/query.sql new file mode 100644 index 000000000..44edf725a --- /dev/null +++ b/parser/testdata/00880_decimal_in_key/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE TABLE t1 (str String, dec Decimal64(8)) ENGINE = MergeTree ORDER BY str; +CREATE TABLE t2 (str String, dec Decimal64(8)) ENGINE = MergeTree ORDER BY dec; + +INSERT INTO t1 SELECT toString(number), toDecimal64(number, 8) FROM system.numbers LIMIT 1000000; +SELECT count() FROM t1; + +INSERT INTO t2 SELECT toString(number), toDecimal64(number, 8) FROM system.numbers LIMIT 1000000; +SELECT count() FROM t2; + +DROP TABLE t1; +DROP TABLE t2; diff --git a/parser/testdata/00881_unknown_identifier_in_in/ast.json b/parser/testdata/00881_unknown_identifier_in_in/ast.json new file mode 100644 index 000000000..c1be10833 --- /dev/null +++ b/parser/testdata/00881_unknown_identifier_in_in/ast.json @@ -0,0 +1,70 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toUInt64 (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 16, + + "statistics": + { + "elapsed": 0.001368095, + "rows_read": 16, + "bytes_read": 653 + } +} diff --git a/parser/testdata/00881_unknown_identifier_in_in/metadata.json b/parser/testdata/00881_unknown_identifier_in_in/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00881_unknown_identifier_in_in/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00881_unknown_identifier_in_in/query.sql b/parser/testdata/00881_unknown_identifier_in_in/query.sql new file mode 100644 index 000000000..2ce709c45 --- /dev/null +++ b/parser/testdata/00881_unknown_identifier_in_in/query.sql @@ -0,0 +1,4 @@ +SELECT toUInt64(1) x FROM (select 1) +GROUP BY 1 +HAVING x +IN ( SELECT countIf(y, z == 1) FROM (SELECT 1 y, 1 z) ); diff --git a/parser/testdata/00882_multiple_join_no_alias/ast.json b/parser/testdata/00882_multiple_join_no_alias/ast.json new file mode 100644 index 000000000..e0188e7ea --- /dev/null +++ b/parser/testdata/00882_multiple_join_no_alias/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00126944, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/00882_multiple_join_no_alias/metadata.json b/parser/testdata/00882_multiple_join_no_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00882_multiple_join_no_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00882_multiple_join_no_alias/query.sql b/parser/testdata/00882_multiple_join_no_alias/query.sql new file mode 100644 index 000000000..4a96e73c6 --- /dev/null +++ b/parser/testdata/00882_multiple_join_no_alias/query.sql @@ -0,0 +1,35 @@ +drop table if exists t; +drop table if exists s; +drop table if exists y; + +create table t(a Int64, b Int64) engine = Memory; +create table s(a Int64, b Int64) engine = Memory; +create table y(a Int64, b Int64) engine = Memory; + +insert into t values (1,1), (2,2); +insert into s values (1,1); +insert into y values (1,1); + +select s.a, s.a, s.b as s_b, s.b from t +left join s on s.a = t.a +left join y on s.b = y.b +order by t.a, s.a, s.b; + +select max(s.a) from t +left join s on s.a = t.a +left join y on s.b = y.b +group by t.a order by t.a; + +select t.a, t.a as t_a, s.a, s.a as s_a, y.a, y.a as y_a from t +left join s on t.a = s.a +left join y on y.b = s.b +order by t.a, s.a, y.a; + +select t.a, t.a as t_a, max(s.a) from t +left join s on t.a = s.a +left join y on y.b = s.b +group by t.a order by t.a; + +drop table t; +drop table s; +drop table y; diff --git a/parser/testdata/00897_flatten/ast.json b/parser/testdata/00897_flatten/ast.json new file mode 100644 index 000000000..5d32ed991 --- /dev/null +++ b/parser/testdata/00897_flatten/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function flatten (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[Array_[Array_[UInt64_1, UInt64_2, UInt64_3], Array_[UInt64_4, UInt64_5]], Array_[Array_[UInt64_6], Array_[UInt64_7, UInt64_8]]]" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001353328, + "rows_read": 9, + "bytes_read": 475 + } +} diff --git a/parser/testdata/00897_flatten/metadata.json b/parser/testdata/00897_flatten/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00897_flatten/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00897_flatten/query.sql b/parser/testdata/00897_flatten/query.sql new file mode 100644 index 000000000..45d1a225a --- /dev/null +++ b/parser/testdata/00897_flatten/query.sql @@ -0,0 +1,8 @@ +SELECT flatten(arrayJoin([[[1, 2, 3], [4, 5]], [[6], [7, 8]]])); +SELECT arrayFlatten(arrayJoin([[[[]], [[1], [], [2, 3]]], [[[4]]]])); +SELECT flatten(arrayMap(x -> arrayMap(y -> arrayMap(z -> range(x), range(x)), range(x)), range(number))) FROM numbers(6); +SELECT flatten(arrayMap(x -> arrayMap(y -> arrayMap(z -> range(z), range(y)), range(x)), range(number))) FROM numbers(6); +SELECT flatten(arrayMap(x -> arrayMap(x -> arrayMap(x -> range(x), range(x)), range(x)), range(number))) FROM numbers(6) SETTINGS enable_analyzer=1; +SELECT arrayFlatten([[[1, 2, 3], [4, 5]], [[6], [7, 8]]]); +SELECT flatten([[[]]]); +SELECT arrayFlatten([]); diff --git a/parser/testdata/00898_quantile_timing_parameter_check/ast.json b/parser/testdata/00898_quantile_timing_parameter_check/ast.json new file mode 100644 index 000000000..1b7b3b408 --- /dev/null +++ b/parser/testdata/00898_quantile_timing_parameter_check/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function quantileTiming (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Float64_0.5" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001194993, + "rows_read": 15, + "bytes_read": 594 + } +} diff --git a/parser/testdata/00898_quantile_timing_parameter_check/metadata.json b/parser/testdata/00898_quantile_timing_parameter_check/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00898_quantile_timing_parameter_check/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00898_quantile_timing_parameter_check/query.sql b/parser/testdata/00898_quantile_timing_parameter_check/query.sql new file mode 100644 index 000000000..ce1f3e897 --- /dev/null +++ b/parser/testdata/00898_quantile_timing_parameter_check/query.sql @@ -0,0 +1,2 @@ +SELECT quantileTiming(0.5)(number) FROM numbers(10); +SELECT quantileTiming(0.5)(number / 2) FROM numbers(10); diff --git a/parser/testdata/00899_long_attach_memory_limit/ast.json b/parser/testdata/00899_long_attach_memory_limit/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00899_long_attach_memory_limit/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00899_long_attach_memory_limit/metadata.json b/parser/testdata/00899_long_attach_memory_limit/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00899_long_attach_memory_limit/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00899_long_attach_memory_limit/query.sql b/parser/testdata/00899_long_attach_memory_limit/query.sql new file mode 100644 index 000000000..d4aa2a0eb --- /dev/null +++ b/parser/testdata/00899_long_attach_memory_limit/query.sql @@ -0,0 +1,12 @@ +-- Tags: long, no-debug, no-parallel, no-fasttest, no-msan, no-tsan +-- This test is slow under MSan or TSan. + +DROP TABLE IF EXISTS index_memory; +CREATE TABLE index_memory (x UInt64) ENGINE = MergeTree ORDER BY x SETTINGS index_granularity = 1; +INSERT INTO index_memory SELECT * FROM system.numbers LIMIT 5000000; +SELECT count() FROM index_memory; +DETACH TABLE index_memory; +SET max_memory_usage = 39000000; +ATTACH TABLE index_memory; +SELECT count() FROM index_memory; +DROP TABLE index_memory; diff --git a/parser/testdata/00900_entropy_shard/ast.json b/parser/testdata/00900_entropy_shard/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00900_entropy_shard/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00900_entropy_shard/metadata.json b/parser/testdata/00900_entropy_shard/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00900_entropy_shard/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00900_entropy_shard/query.sql b/parser/testdata/00900_entropy_shard/query.sql new file mode 100644 index 000000000..10cc5481d --- /dev/null +++ b/parser/testdata/00900_entropy_shard/query.sql @@ -0,0 +1,4 @@ +-- Tags: shard + +SELECT round(entropy(number), 6) FROM remote('127.0.0.{1,2}', numbers(256)); +SELECT entropy(rand64()) > 8 FROM remote('127.0.0.{1,2}', numbers(256)); diff --git a/parser/testdata/00901_joint_entropy/ast.json b/parser/testdata/00901_joint_entropy/ast.json new file mode 100644 index 000000000..aa22c76a7 --- /dev/null +++ b/parser/testdata/00901_joint_entropy/ast.json @@ -0,0 +1,388 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function less (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function minus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function max (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function min (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal Float64_0.000001" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 8)" + }, + { + "explain": " Function entropy (alias e1) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_5" + }, + { + "explain": " Function log2 (alias e2) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Function log2 (alias e3) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function uniq (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_5" + }, + { + "explain": " Function entropy (alias e4) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function entropy (alias e5) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function entropy (alias e6) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function if (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal 'hello'" + }, + { + "explain": " Literal 'world'" + }, + { + "explain": " Function range (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_5" + }, + { + "explain": " Function entropy (alias e7) (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function plus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function minus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function entropy (alias e8) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayJoin (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 8)" + }, + { + "explain": " Identifier e1" + }, + { + "explain": " Identifier e2" + }, + { + "explain": " Identifier e3" + }, + { + "explain": " Identifier e4" + }, + { + "explain": " Identifier e5" + }, + { + "explain": " Identifier e6" + }, + { + "explain": " Identifier e7" + }, + { + "explain": " Identifier e8" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 122, + + "statistics": + { + "elapsed": 0.001678139, + "rows_read": 122, + "bytes_read": 5543 + } +} diff --git a/parser/testdata/00901_joint_entropy/metadata.json b/parser/testdata/00901_joint_entropy/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00901_joint_entropy/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00901_joint_entropy/query.sql b/parser/testdata/00901_joint_entropy/query.sql new file mode 100644 index 000000000..1bf7bc787 --- /dev/null +++ b/parser/testdata/00901_joint_entropy/query.sql @@ -0,0 +1,2 @@ +SELECT max(x) - min(x) < 0.000001 FROM (WITH entropy(number % 2, number % 5) AS e1, log2(10) AS e2, log2(uniq(number % 2, number % 5)) AS e3, entropy(number) AS e4, entropy(toString(number)) AS e5, entropy(number % 2 ? 'hello' : 'world', range(number % 5)) AS e6, entropy(number, number + 1, number - 1) AS e7, entropy(([[number], [number, number]], [[], [number]])) AS e8 SELECT arrayJoin([e1, e2, e3, e4, e5, e6, e7, e8]) AS x FROM numbers(10)); +SELECT abs(entropy(number) - 8) < 0.000001, abs(entropy(number % 64, number % 32) - 6) < 0.000001 FROM numbers(256); diff --git a/parser/testdata/00902_entropy/ast.json b/parser/testdata/00902_entropy/ast.json new file mode 100644 index 000000000..3028249c0 --- /dev/null +++ b/parser/testdata/00902_entropy/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery defaults (children 1)" + }, + { + "explain": " Identifier defaults" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001024726, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/00902_entropy/metadata.json b/parser/testdata/00902_entropy/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00902_entropy/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00902_entropy/query.sql b/parser/testdata/00902_entropy/query.sql new file mode 100644 index 000000000..6e1364d5e --- /dev/null +++ b/parser/testdata/00902_entropy/query.sql @@ -0,0 +1,46 @@ +DROP TABLE IF EXISTS defaults; +CREATE TABLE IF NOT EXISTS defaults +( + vals String +) ENGINE = Memory; + +insert into defaults values ('ba'), ('aa'), ('ba'), ('b'), ('ba'), ('aa'); +select val < 1.5 and val > 1.459 from (select entropy(vals) as val from defaults); + + +DROP TABLE IF EXISTS defaults; +CREATE TABLE IF NOT EXISTS defaults +( + vals UInt64 +) ENGINE = Memory; +insert into defaults values (0), (0), (1), (0), (0), (0), (1), (2), (3), (5), (3), (1), (1), (4), (5), (2); +select val < 2.4 and val > 2.3393 from (select entropy(vals) as val from defaults); + + +DROP TABLE IF EXISTS defaults; +CREATE TABLE IF NOT EXISTS defaults +( + vals UInt32 +) ENGINE = Memory; +insert into defaults values (0), (0), (1), (0), (0), (0), (1), (2), (3), (5), (3), (1), (1), (4), (5), (2); +select val < 2.4 and val > 2.3393 from (select entropy(vals) as val from defaults); + + +DROP TABLE IF EXISTS defaults; +CREATE TABLE IF NOT EXISTS defaults +( + vals Int32 +) ENGINE = Memory; +insert into defaults values (0), (0), (-1), (0), (0), (0), (-1), (2), (3), (5), (3), (-1), (-1), (4), (5), (2); +select val < 2.4 and val > 2.3393 from (select entropy(vals) as val from defaults); + + +DROP TABLE IF EXISTS defaults; +CREATE TABLE IF NOT EXISTS defaults +( + vals DateTime +) ENGINE = Memory; +insert into defaults values (toDateTime('2016-06-15 23:00:00')), (toDateTime('2016-06-15 23:00:00')), (toDateTime('2016-06-15 23:00:00')), (toDateTime('2016-06-15 23:00:00')), (toDateTime('2016-06-15 24:00:00')), (toDateTime('2016-06-15 24:00:00')), (toDateTime('2016-06-15 24:00:00')), (toDateTime('2017-06-15 24:00:00')), (toDateTime('2017-06-15 24:00:00')), (toDateTime('2018-06-15 24:00:00')), (toDateTime('2018-06-15 24:00:00')), (toDateTime('2019-06-15 24:00:00')); +select val < 2.189 and val > 2.1886 from (select entropy(vals) as val from defaults); + +DROP TABLE defaults; diff --git a/parser/testdata/00903_array_with_constant_function/ast.json b/parser/testdata/00903_array_with_constant_function/ast.json new file mode 100644 index 000000000..6a50f7111 --- /dev/null +++ b/parser/testdata/00903_array_with_constant_function/ast.json @@ -0,0 +1,70 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function arrayWithConstant (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal 'qwerty'" + }, + { + "explain": " Function arrayWithConstant (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal Int64_-1" + }, + { + "explain": " Function arrayWithConstant (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 16, + + "statistics": + { + "elapsed": 0.001176951, + "rows_read": 16, + "bytes_read": 602 + } +} diff --git a/parser/testdata/00903_array_with_constant_function/metadata.json b/parser/testdata/00903_array_with_constant_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00903_array_with_constant_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00903_array_with_constant_function/query.sql b/parser/testdata/00903_array_with_constant_function/query.sql new file mode 100644 index 000000000..f434bbb4d --- /dev/null +++ b/parser/testdata/00903_array_with_constant_function/query.sql @@ -0,0 +1 @@ +select arrayWithConstant(2, 'qwerty'), arrayWithConstant(0, -1), arrayWithConstant(1, 1) diff --git a/parser/testdata/00904_array_with_constant_2/ast.json b/parser/testdata/00904_array_with_constant_2/ast.json new file mode 100644 index 000000000..486e9b6a5 --- /dev/null +++ b/parser/testdata/00904_array_with_constant_2/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayWithConstant (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001282493, + "rows_read": 14, + "bytes_read": 554 + } +} diff --git a/parser/testdata/00904_array_with_constant_2/metadata.json b/parser/testdata/00904_array_with_constant_2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00904_array_with_constant_2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00904_array_with_constant_2/query.sql b/parser/testdata/00904_array_with_constant_2/query.sql new file mode 100644 index 000000000..6e578a12b --- /dev/null +++ b/parser/testdata/00904_array_with_constant_2/query.sql @@ -0,0 +1,4 @@ +SELECT arrayWithConstant(3, number) FROM numbers(10); +SELECT arrayWithConstant(number, 'Hello') FROM numbers(10); +SELECT arrayWithConstant(number % 3, number % 2 ? 'Hello' : NULL) FROM numbers(10); +SELECT arrayWithConstant(number, []) FROM numbers(10); diff --git a/parser/testdata/00905_compile_expressions_compare_big_dates/ast.json b/parser/testdata/00905_compile_expressions_compare_big_dates/ast.json new file mode 100644 index 000000000..a9616e836 --- /dev/null +++ b/parser/testdata/00905_compile_expressions_compare_big_dates/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001315155, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00905_compile_expressions_compare_big_dates/metadata.json b/parser/testdata/00905_compile_expressions_compare_big_dates/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00905_compile_expressions_compare_big_dates/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00905_compile_expressions_compare_big_dates/query.sql b/parser/testdata/00905_compile_expressions_compare_big_dates/query.sql new file mode 100644 index 000000000..beff4cd03 --- /dev/null +++ b/parser/testdata/00905_compile_expressions_compare_big_dates/query.sql @@ -0,0 +1,14 @@ +SET compile_expressions = 1; +SET min_count_to_compile_expression = 1; + +DROP TABLE IF EXISTS foo_c; + +CREATE TABLE foo_c(d DateTime) ENGINE = Memory; + +INSERT INTO foo_c VALUES ('2019-02-06 01:01:01'),('2019-02-07 01:01:01'),('2019-02-08 01:01:01'),('2021-02-06 01:01:01'),('2093-05-29 01:01:01'),('2100-06-06 01:01:01'),('2100-10-14 01:01:01'),('2100-11-01 01:01:01'),('2100-11-15 01:01:01'),('2100-11-30 01:01:01'),('2100-12-11 01:01:01'),('2100-12-21 01:01:01'); + +SELECT toDate(d) AS dd FROM foo_c WHERE (dd >= '2019-02-06') AND (toDate(d) <= toDate('2019-08-09')) GROUP BY dd ORDER BY dd; + +SELECT toDate(d) FROM foo_c WHERE (d > toDate('2019-02-10')) AND (d <= toDate('2022-01-01')) ORDER BY d; + +DROP TABLE IF EXISTS foo_c; diff --git a/parser/testdata/00905_field_with_aggregate_function_state/ast.json b/parser/testdata/00905_field_with_aggregate_function_state/ast.json new file mode 100644 index 000000000..8b1ffafd0 --- /dev/null +++ b/parser/testdata/00905_field_with_aggregate_function_state/ast.json @@ -0,0 +1,70 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Subquery (alias s) (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function sumState (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function sumMerge (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier s" + } + ], + + "rows": 16, + + "statistics": + { + "elapsed": 0.001191696, + "rows_read": 16, + "bytes_read": 634 + } +} diff --git a/parser/testdata/00905_field_with_aggregate_function_state/metadata.json b/parser/testdata/00905_field_with_aggregate_function_state/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00905_field_with_aggregate_function_state/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00905_field_with_aggregate_function_state/query.sql b/parser/testdata/00905_field_with_aggregate_function_state/query.sql new file mode 100644 index 000000000..b0470ac99 --- /dev/null +++ b/parser/testdata/00905_field_with_aggregate_function_state/query.sql @@ -0,0 +1,4 @@ +with (select sumState(1)) as s select sumMerge(s); +with (select sumState(number) from (select * from system.numbers limit 10)) as s select sumMerge(s); +with (select quantileState(0.5)(number) from (select * from system.numbers limit 10)) as s select quantileMerge(s); + diff --git a/parser/testdata/00906_low_cardinality_cache/ast.json b/parser/testdata/00906_low_cardinality_cache/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00906_low_cardinality_cache/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00906_low_cardinality_cache/metadata.json b/parser/testdata/00906_low_cardinality_cache/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00906_low_cardinality_cache/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00906_low_cardinality_cache/query.sql b/parser/testdata/00906_low_cardinality_cache/query.sql new file mode 100644 index 000000000..81574ac6f --- /dev/null +++ b/parser/testdata/00906_low_cardinality_cache/query.sql @@ -0,0 +1,8 @@ +-- Tags: long + +SET max_rows_to_read = '100M', max_execution_time = 600; +drop table if exists lc_00906; +create table lc_00906 (b LowCardinality(String)) engine=MergeTree order by b SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi', vertical_merge_algorithm_min_rows_to_activate=100000000; +insert into lc_00906 select '0123456789' from numbers(100000000) SETTINGS max_insert_threads=6, max_threads=4; +select count(), b from lc_00906 group by b; +drop table if exists lc_00906; diff --git a/parser/testdata/00906_low_cardinality_const_argument/ast.json b/parser/testdata/00906_low_cardinality_const_argument/ast.json new file mode 100644 index 000000000..b736d856a --- /dev/null +++ b/parser/testdata/00906_low_cardinality_const_argument/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toLowCardinality (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'a'" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001107768, + "rows_read": 9, + "bytes_read": 355 + } +} diff --git a/parser/testdata/00906_low_cardinality_const_argument/metadata.json b/parser/testdata/00906_low_cardinality_const_argument/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00906_low_cardinality_const_argument/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00906_low_cardinality_const_argument/query.sql b/parser/testdata/00906_low_cardinality_const_argument/query.sql new file mode 100644 index 000000000..831a4534f --- /dev/null +++ b/parser/testdata/00906_low_cardinality_const_argument/query.sql @@ -0,0 +1,2 @@ +select materialize(toLowCardinality('a')); + diff --git a/parser/testdata/00906_low_cardinality_rollup/ast.json b/parser/testdata/00906_low_cardinality_rollup/ast.json new file mode 100644 index 000000000..7c98c6794 --- /dev/null +++ b/parser/testdata/00906_low_cardinality_rollup/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery lc (children 1)" + }, + { + "explain": " Identifier lc" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00129689, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/00906_low_cardinality_rollup/metadata.json b/parser/testdata/00906_low_cardinality_rollup/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00906_low_cardinality_rollup/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00906_low_cardinality_rollup/query.sql b/parser/testdata/00906_low_cardinality_rollup/query.sql new file mode 100644 index 000000000..125529ad3 --- /dev/null +++ b/parser/testdata/00906_low_cardinality_rollup/query.sql @@ -0,0 +1,12 @@ +DROP TABLE if exists lc; +CREATE TABLE lc (a LowCardinality(Nullable(String)), b LowCardinality(Nullable(String))) ENGINE = MergeTree order by tuple(); +INSERT INTO lc VALUES ('a', 'b'); +INSERT INTO lc VALUES ('c', 'd'); + +SELECT a, b, count(a) FROM lc GROUP BY a, b WITH ROLLUP ORDER BY a, b; +SELECT a, count(a) FROM lc GROUP BY a WITH ROLLUP ORDER BY a; + +SELECT a, b, count(a) FROM lc GROUP BY a, b WITH CUBE ORDER BY a, b; +SELECT a, count(a) FROM lc GROUP BY a WITH CUBE ORDER BY a; + +DROP TABLE if exists lc; diff --git a/parser/testdata/00907_set_index_with_nullable_and_low_cardinality/ast.json b/parser/testdata/00907_set_index_with_nullable_and_low_cardinality/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00907_set_index_with_nullable_and_low_cardinality/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00907_set_index_with_nullable_and_low_cardinality/metadata.json b/parser/testdata/00907_set_index_with_nullable_and_low_cardinality/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00907_set_index_with_nullable_and_low_cardinality/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00907_set_index_with_nullable_and_low_cardinality/query.sql b/parser/testdata/00907_set_index_with_nullable_and_low_cardinality/query.sql new file mode 100644 index 000000000..9ef5662c1 --- /dev/null +++ b/parser/testdata/00907_set_index_with_nullable_and_low_cardinality/query.sql @@ -0,0 +1,68 @@ + +drop table if exists nullable_set_index; +create table nullable_set_index (a UInt64, b Nullable(String), INDEX b_index b TYPE set(0) GRANULARITY 8192) engine = MergeTree order by a; +insert into nullable_set_index values (1, 'a'); +insert into nullable_set_index values (2, 'b'); +select * from nullable_set_index where b = 'a'; +select '-'; +select * from nullable_set_index where b = 'b'; +select '-'; +select * from nullable_set_index where b = 'c'; +select '--'; + +drop table if exists nullable_set_index; +create table nullable_set_index (a UInt64, b Nullable(String), INDEX b_index b TYPE set(1) GRANULARITY 8192) engine = MergeTree order by a; +insert into nullable_set_index values (1, 'a'); +insert into nullable_set_index values (2, 'b'); +select * from nullable_set_index where b = 'a'; +select '-'; +select * from nullable_set_index where b = 'b'; +select '-'; +select * from nullable_set_index where b = 'c'; +select '--'; + +drop table if exists nullable_set_index; +create table nullable_set_index (a UInt64, b Nullable(String), INDEX b_index b TYPE set(0) GRANULARITY 8192) engine = MergeTree order by a; +insert into nullable_set_index values (1, 'a'), (2, 'b'); +select * from nullable_set_index where b = 'a'; +select '-'; +select * from nullable_set_index where b = 'b'; +select '-'; +select * from nullable_set_index where b = 'c'; +select '----'; + + +drop table if exists nullable_set_index; +create table nullable_set_index (a UInt64, b LowCardinality(Nullable(String)), INDEX b_index b TYPE set(0) GRANULARITY 8192) engine = MergeTree order by a; +insert into nullable_set_index values (1, 'a'); +insert into nullable_set_index values (2, 'b'); +select * from nullable_set_index where b = 'a'; +select '-'; +select * from nullable_set_index where b = 'b'; +select '-'; +select * from nullable_set_index where b = 'c'; +select '--'; + +drop table if exists nullable_set_index; +create table nullable_set_index (a UInt64, b LowCardinality(Nullable(String)), INDEX b_index b TYPE set(1) GRANULARITY 8192) engine = MergeTree order by a; +insert into nullable_set_index values (1, 'a'); +insert into nullable_set_index values (2, 'b'); +select * from nullable_set_index where b = 'a'; +select '-'; +select * from nullable_set_index where b = 'b'; +select '-'; +select * from nullable_set_index where b = 'c'; +select '--'; + +drop table if exists nullable_set_index; +create table nullable_set_index (a UInt64, b LowCardinality(Nullable(String)), INDEX b_index b TYPE set(0) GRANULARITY 8192) engine = MergeTree order by a; +insert into nullable_set_index values (1, 'a'), (2, 'b'); +select * from nullable_set_index where b = 'a'; +select '-'; +select * from nullable_set_index where b = 'b'; +select '-'; +select * from nullable_set_index where b = 'c'; +select '----'; + +drop table if exists nullable_set_index; + diff --git a/parser/testdata/00907_set_index_with_nullable_and_low_cardinality_bug/ast.json b/parser/testdata/00907_set_index_with_nullable_and_low_cardinality_bug/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00907_set_index_with_nullable_and_low_cardinality_bug/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00907_set_index_with_nullable_and_low_cardinality_bug/metadata.json b/parser/testdata/00907_set_index_with_nullable_and_low_cardinality_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00907_set_index_with_nullable_and_low_cardinality_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00907_set_index_with_nullable_and_low_cardinality_bug/query.sql b/parser/testdata/00907_set_index_with_nullable_and_low_cardinality_bug/query.sql new file mode 100644 index 000000000..336d9984e --- /dev/null +++ b/parser/testdata/00907_set_index_with_nullable_and_low_cardinality_bug/query.sql @@ -0,0 +1,18 @@ + +drop table if exists null_lc_set_index; + +CREATE TABLE null_lc_set_index ( + timestamp DateTime, + action LowCardinality(Nullable(String)), + user LowCardinality(Nullable(String)), + INDEX test_user_idx (user) TYPE set(0) GRANULARITY 8192 +) ENGINE=MergeTree + PARTITION BY toYYYYMMDD(timestamp) + ORDER BY (timestamp, action, cityHash64(user)) SETTINGS allow_nullable_key = 1; +INSERT INTO null_lc_set_index VALUES (1550883010, 'subscribe', 'alice'); +INSERT INTO null_lc_set_index VALUES (1550883020, 'follow', 'bob'); + +SELECT action, user FROM null_lc_set_index WHERE user = 'alice'; + +drop table if exists null_lc_set_index; + diff --git a/parser/testdata/00908_analyze_query/ast.json b/parser/testdata/00908_analyze_query/ast.json new file mode 100644 index 000000000..8d7a55500 --- /dev/null +++ b/parser/testdata/00908_analyze_query/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery a (children 1)" + }, + { + "explain": " Identifier a" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001034147, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/00908_analyze_query/metadata.json b/parser/testdata/00908_analyze_query/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00908_analyze_query/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00908_analyze_query/query.sql b/parser/testdata/00908_analyze_query/query.sql new file mode 100644 index 000000000..6ef1cf483 --- /dev/null +++ b/parser/testdata/00908_analyze_query/query.sql @@ -0,0 +1,6 @@ +DROP TABLE IF EXISTS a; +CREATE TABLE a (a UInt8, b UInt8) ENGINE MergeTree ORDER BY a; + +EXPLAIN SYNTAX SELECT * FROM a; + +DROP TABLE a; diff --git a/parser/testdata/00909_arrayEnumerateUniq/ast.json b/parser/testdata/00909_arrayEnumerateUniq/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00909_arrayEnumerateUniq/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00909_arrayEnumerateUniq/metadata.json b/parser/testdata/00909_arrayEnumerateUniq/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00909_arrayEnumerateUniq/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00909_arrayEnumerateUniq/query.sql b/parser/testdata/00909_arrayEnumerateUniq/query.sql new file mode 100644 index 000000000..e952eac2e --- /dev/null +++ b/parser/testdata/00909_arrayEnumerateUniq/query.sql @@ -0,0 +1,318 @@ +-- env SQL_FUZZY_FUNCTIONS=arrayEnumerateUniqRanked,arrayEnumerateDenseRanked SQL_FUZZY_RUNS=1000 clickhouse-test fuzzy + +SELECT arrayEnumerateUniq( [1,1,2,2,1,1], [1,2,1,2,2,2]); +SELECT arrayEnumerateUniqRanked(1, [1,1,2,2,1,1], 1, [1,2,1,2,2,2],1); + +-- x=[1,2,1] +-- x2=['a','b','c'] +-- y=[[1,2,3],[2,2,1],[3]] +-- y2=[['a','b','a'],['a','b','a'],['c']] +-- z=[[[1,2,3],[1,2,3],[1,2,3]],[[1,2,3],[1,2,3],[1,2,3]],[[1,2]]] + +SELECT 'same as arrayEnumerateUniq:'; +SELECT '[1,1,2] ='; +SELECT arrayEnumerateUniqRanked(1, [1,2,1], 1); -- f(1, x,1) =[1,1,2] -- 1 2 1 +SELECT arrayEnumerateUniq( [1,2,1]); +SELECT '[1,1,1] ='; +SELECT arrayEnumerateUniqRanked(1, ['a','b','c'], 1); -- f(1, x2,1) =[1,1,1] -- a b c +SELECT arrayEnumerateUniq( ['a','b','c']); +SELECT '[1,1,1] ='; +SELECT arrayEnumerateUniqRanked(1, [1,2,1], 1, ['a','b','c'], 1); -- f(1, x,1,x2,1)=[1,1,1] -- (1,a) (2,b) (1,c) +SELECT arrayEnumerateUniq( [1,2,1], ['a','b','c']); +SELECT '[1,1,1] ='; +SELECT arrayEnumerateUniqRanked(1, [1,2,1], 1, [[1,2,3],[2,2,1],[3]], 1); -- f(1, x,1,y,1) =[1,1,1] -- (1,[1,2,3]) (2,[2,2,1]) (1,[3]) +SELECT arrayEnumerateUniq( [1,2,1], [[1,2,3],[2,2,1],[3]]); +SELECT '[1,2,1] ='; +SELECT arrayEnumerateUniqRanked(1, [['a','b','a'],['a','b','a'],['c']], 1); -- f(1, y2,1) =[1,2,1] -- [a,b,a] [a,b,a] [c] +SELECT arrayEnumerateUniq( [['a','b','a'],['a','b','a'],['c']]); +SELECT '[1,2,1] ='; +SELECT arrayEnumerateUniqRanked(1, [[[1,2,3],[1,2,3],[1,2,3]],[[1,2,3],[1,2,3],[1,2,3]],[[1,2]]],1); -- f(1, z,1) =[1,2,1] -- [[1,2,3],[1,2,3],[1,2,3]] [[1,2,3],[1,2,3],[1,2,3]] [[1,2]] +SELECT arrayEnumerateUniq( [[[1,2,3],[1,2,3],[1,2,3]],[[1,2,3],[1,2,3],[1,2,3]],[[1,2]]]); + +select '1,..,2'; +-- подсчитываем вхождения глобально по всему значению в столбце, смотрим в глубину на два уровня, +-- ответ [[,,],[,,],[]] +SELECT '[[1,1,1],[2,3,2],[2]] ='; +SELECT arrayEnumerateUniqRanked(1, [[1,2,3],[2,2,1],[3]], 2); -- f(1, y,2) =[[1,1,1],[2,3,2],[2]] -- 1 2 3 2 2 1 3 +SELECT '[[1,1,2],[3,2,4],[1]] ='; +SELECT arrayEnumerateUniqRanked(1, [['a','b','a'],['a','b','a'],['c']], 2); -- f(1, y2,2) =[[1,1,2],[3,2,4],[1]] -- a b a a b a c +SELECT '[[1,2,3],[4,5,6],[1]] ='; +SELECT arrayEnumerateUniqRanked(1, [[[1,2,3],[1,2,3],[1,2,3]],[[1,2,3],[1,2,3],[1,2,3]],[[1,2]]], 2); -- f(1, z,2) =[[1,2,3],[4,5,6],[1]] -- [1,2,3] [1,2,3] [1,2,3] [1,2,3] [1,2,3] [1,2,3] [1,2] +SELECT '[[1,1,1],[1,2,2],[1]] ='; +SELECT arrayEnumerateUniqRanked(1, [[1,2,3],[2,2,1],[3]], 2, [['a','b','a'],['a','b','a'],['c']], 2); -- f(1, y,2,y2,2)=[[1,1,1],[1,2,2],[1]] -- (1,a) (2,b) (3,a) (2,a) (2,b) (1,a) (3,c) +SELECT '[[1,1,1],[2,3,2],[1]] ='; +SELECT arrayEnumerateUniqRanked(1, [[1,2,3],[2,2,1],[3]], 2, [[[1,2,3],[1,2,3],[1,2,3]],[[1,2,3],[1,2,3],[1,2,3]],[[1,2]]], 2); -- f(1, y,2,z, 2)=[[1,1,1],[2,3,2],[1]] -- (1,[1,2,3]) (2,[1,2,3]) (3,[1,2,3]) (2,[1,2,3]) (2,[1,2,3]) (1,[1,2,3]) (3,[1,2]) + + +-- подсчитываем вхождения глобально по всему значению в столбце, смотрим в глубину на два уровня, +-- "одномерные" массивы мысленно растягиваем до "двухмерных", ответ [[,,],[,,],[]] +SELECT '[[1,1,1],[1,2,1],[2]] ='; +SELECT arrayEnumerateUniqRanked(1, [1,2,1],1,[[1,2,3],[2,2,1],[3]],2); -- f(1, x,1,y,2)=[[1,1,1],[1,2,1],[2]] -- (1,1) (1,2) (1,3) (2,2) (2,2) (2,1) (1,3) +SELECT '[[1,1,1],[1,2,1],[1]] ='; +SELECT arrayEnumerateUniqRanked(1, ['a','b','c'],1,[[1,2,3],[2,2,1],[3]],2); -- f(1, x2,1,y,2)=[[1,1,1],[1,2,1],[1]] -- (a,1) (a,2) (a,3) (b,2) (b,2) (b,1) (c,3) +SELECT '[[1,1,2],[1,1,2],[1]] ='; +SELECT arrayEnumerateUniqRanked(1, [1,2,1],1,[['a','b','a'],['a','b','a'],['c']],2); -- f(1, x,1,y2,2)=[[1,1,2],[1,1,2],[1]] -- (1,a) (1,b) (1,a) (2,a) (2,b) (2,a) (1,c) +SELECT '[[1,1,2],[1,1,2],[1]] ='; +SELECT arrayEnumerateUniqRanked(1, ['a','b','c'],1,[['a','b','a'],['a','b','a'],['c']],2); -- f(1, x2,1,y2,2)=[[1,1,2],[1,1,2],[1]] -- (a,a) (a,b) (a,a) (b,a) (b,b) (b,a) (c,c) +SELECT '[[1,2,3],[1,2,3],[1]] ='; +SELECT arrayEnumerateUniqRanked(1, [1,2,1],1,[[[1,2,3],[1,2,3],[1,2,3]],[[1,2,3],[1,2,3],[1,2,3]],[[1,2]]],2); -- f(1, x,1,z,2)=[[1,2,3],[1,2,3],[1]] -- (1,[1,2,3]) (1,[1,2,3]) (1,[1,2,3]) (2,[1,2,3]) (2,[1,2,3]) (2,[1,2,3]) (1,[1,2]) +SELECT '[[1,1,1],[1,2,1],[1]] ='; +SELECT arrayEnumerateUniqRanked(1, [1,2,1],1,['a','b','c'],1,[[1,2,3],[2,2,1],[3]],2); -- f(1, x,1,x2,1,y,2)=[[1,1,1],[1,2,1],[1]] -- (1,a,1) (1,a,2) (1,a,3) (2,b,2) (2,b,2) (2,b,1) (1,c,3) +SELECT '[[1,1,1],[1,1,1],[1]] ='; +SELECT arrayEnumerateUniqRanked(1, [1,2,1],1,[[1,2,3],[2,2,1],[3]],2,[['a','b','a'],['a','b','a'],['c']],2); -- f(1, x,1,y,2,y2,2)=[[1,1,1],[1,1,1],[1]] -- (1,1,a) (1,2,b) (1,3,a) (2,2,a) (2,2,b) (2,1,a) (1,3,c) +SELECT '[[1,1,2],[1,1,2],[1]] ='; +SELECT arrayEnumerateUniqRanked(1, [[1,2,3],[2,2,1],[3]],1,[['a','b','a'],['a','b','a'],['c']],2); -- f(1, y,1,y2,2)=[[1,1,2],[1,1,2],[1]] -- ([1,2,3],a) ([1,2,3],b) ([1,2,3],a) ([2,2,1],a) ([2,2,1],b) ([2,2,1],a) ([3],c) +SELECT '[[1,1,1],[2,3,2],[1]] ='; +SELECT arrayEnumerateUniqRanked(1, [[1,2,3],[2,2,1],[3]],2,[['a','b','a'],['a','b','a'],['c']],1); -- f(1, y,2,y2,1)=[[1,1,1],[2,3,2],[1]] -- (1,[a,b,a]) (2,[a,b,a]) (3,[a,b,a]) (2,[a,b,a]) (2,[a,b,a]) (1,[a,b,a]) (3,[c]) + +select '2,..,2'; + +-- подсчитываем вхождения в отдельных массивах первого уровня, смотрим в глубину на два уровня, +-- дублируем логику arrayMap( aEU), ответ [[,,],[,,],[]] +SELECT '[[1,1,1],[1,2,1],[1]] ='; +SELECT arrayEnumerateUniqRanked(2, [[1,2,3],[2,2,1],[3]], 2); -- f(2, y,2)=[[1,1,1],[1,2,1],[1]] -- 1 2 3, 2 2 1, 3 +SELECT '[[1,1,2],[1,1,2],[1]] ='; +SELECT arrayEnumerateUniqRanked(2, [['a','b','a'],['a','b','a'],['c']], 2); -- f(2, y2,2)=[[1,1,2],[1,1,2],[1]] -- a b a, a b a, c +SELECT '[[1,1,1],[1,1,1],[1]] ='; +SELECT arrayEnumerateUniqRanked(2, [[1,2,3],[2,2,1],[3]], 2, [['a','b','a'],['a','b','a'],['c']], 2); -- f(2, y,2,y2,2)=[[1,1,1],[1,1,1],[1]] -- (1,a) (2,b) (3,a), (2,a) (2,b) (1,a), (3,c) +SELECT '[[1,1,1],[1,2,1],[1]] ='; +SELECT arrayEnumerateUniqRanked(2, [[1,2,3],[2,2,1],[3]], 2, [[[1,2,3],[1,2,3],[1,2,3]],[[1,2,3],[1,2,3],[1,2,3]],[[1,2]]], 2); -- f(2, y,2,z,2)=[[1,1,1],[1,2,1],[1]] -- (1,[1,2,3]) (2,[1,2,3]) (3,[1,2,3]), (2,[1,2,3]) (2,[1,2,3]) (1,[1,2,3]), (3,[1,2]) + +-- подсчитываем вхождения в отдельных массивах первого уровня, смотрим в глубину на два уровня, +-- "одномерные" массивы мысленно растягиваем до "двухмерных", дублируем логику arrayMap( aEU), ответ [[,,],[,,],[]] +SELECT '[[1,1,1],[1,2,1],[1]] ='; +SELECT arrayEnumerateUniqRanked(2, [1,2,1],1,[[1,2,3],[2,2,1],[3]],2); -- f(2, x,1,y,2)=[[1,1,1],[1,2,1],[1]] -- (1,1) (1,2) (1,3), (2,2) (2,2) (2,1), (1,3) +SELECT '[[1,1,2],[1,1,2],[1]] ='; +SELECT arrayEnumerateUniqRanked(2, [1,2,1],1,[['a','b','a'],['a','b','a'],['c']],2); -- f(2, x,1,y2,2)=[[1,1,2],[1,1,2],[1]] -- (1,a) (1,b) (1,a), (2,a) (2,b) (2,a), (1,c) +SELECT '[[1,2,3],[1,2,3],[1]] ='; +SELECT arrayEnumerateUniqRanked(2, [1,2,1],1,[[[1,2,3],[1,2,3],[1,2,3]],[[1,2,3],[1,2,3],[1,2,3]],[[1,2]]],2); -- f(2, x,1,z,2)=[[1,2,3],[1,2,3],[1]] -- (1,[1,2,3]) (1,[1,2,3]) (1,[1,2,3]), (2,[1,2,3]) (2,[1,2,3]) (2,[1,2,3]), (1,[1,2]) +SELECT '[[1,2,3],[1,2,3],[1]] ='; +SELECT arrayEnumerateUniqRanked(2, [[1,2,3],[2,2,1],[3]],1,[[[1,2,3],[1,2,3],[1,2,3]],[[1,2,3],[1,2,3],[1,2,3]],[[1,2]]],2); -- f(2, y,1,z,2)=[[1,2,3],[1,2,3],[1]] -- ([1,2,3],[1,2,3]) ([1,2,3],[1,2,3]) ([1,2,3],[1,2,3]), ([2,2,1],[1,2,3]) ([2,2,1],[1,2,3]) ([2,2,1],[1,2,3]), ([3],[1,2]) + +select 'more:'; +SELECT arrayEnumerateUniqRanked(2, [[1,2,3],[2,2,1],[3]], 2, [[[1,2,3],[1,2,3],[1,2,3]],[[1,2,3],[1,2,3],[1,2,3]],[[1,2]]], 3); +SELECT arrayEnumerateUniqRanked(2, [[1,2,3],[2,2,1],[3]], [[[1,2,3],[1,2,3],[1,2,3]],[[1,2,3],[1,2,3],[1,2,3]],[[1,2]]]); -- same +SELECT arrayEnumerateUniqRanked(3, [[[1,2,3],[1,2,3],[1,2,3]],[[1,2,3],[1,2,3],[1,2,3]],[[1,2]]], 3); +SELECT arrayEnumerateUniqRanked(2, [[[1,2,3],[1,2,3],[1,2,3]],[[1,2,3],[1,2,3],[1,2,3]],[[1,2]]], 3); +SELECT arrayEnumerateUniqRanked(1, [[[1,2,3],[1,2,3],[1,2,3]],[[1,2,3],[1,2,3],[1,2,3]],[[1,2]]], 3); +SELECT arrayEnumerateUniqRanked([[[1,2,3],[1,2,3],[1,2,3]],[[1,2,3],[1,2,3],[1,2,3]],[[1,2]]]); --same + +select '---------Dense'; +SELECT arrayEnumerateDenseRanked(1, [10,20,10,30], 1); +SELECT arrayEnumerateDense([10,20,10,30]); + +SELECT arrayEnumerateDenseRanked(3, [[[1,2,3],[1,2,3],[1,2,3]],[[1,2,3],[1,2,3],[1,2,3]],[[1,2]]], 3); +SELECT arrayEnumerateDenseRanked(2, [[[1,2,3],[1,2,3],[1,2,3]],[[1,2,3],[1,2,3],[1,2,3]],[[1,2]]], 3); +SELECT arrayEnumerateDenseRanked(1, [[[1,2,3],[1,2,3],[1,2,3]],[[1,2,3],[1,2,3],[1,2,3]],[[1,2]]], 3); +SELECT arrayEnumerateDenseRanked(2, [[[1,2,3],[1,2,3],[1,2,3]],[[1,2,3],[1,2,3],[1,2,3]],[[1,2]]], 2); +SELECT arrayEnumerateDenseRanked(1, [[[1,2,3],[1,2,3],[1,2,3]],[[1,2,3],[1,2,3],[1,2,3]],[[1,2]]], 2); +SELECT arrayEnumerateDenseRanked(1, [[[1,2,3],[1,2,3],[1,2,3]],[[1,2,3],[1,2,3],[1,2,3]],[[1,2]]], 1); + +select '---------table'; + +DROP TABLE IF EXISTS arrays_test; +CREATE TABLE arrays_test (a1 Array(UInt16), a2 Array(UInt16), a3 Array(Array(UInt16)), a4 Array(Array(UInt16)) ) ENGINE = Memory; + +--INSERT INTO arrays_test VALUES ([1,2,3], [2,2,1], [[1,2,3,4],[2,2,1],[3]], [[1,2,4,4],[2,2,1],[3]]), ([1,2,4], [2,2,1], [[1,2,3,4],[2,2,1],[3]], [[1,2,5,4],[2,2,1],[3]]), ([1,2,3], [2,2,1], [[1,2,3,4],[2,2,1],[3]], [[1,2,4,4],[2,2,1],[3]]), ([1,2,3], [2,2,1], [[1,2,3,4],[2,2,1],[3]], [[1,2,4,4],[2,2,1],[3]]); +INSERT INTO arrays_test VALUES ([1,2,3], [2,2,1], [[1,2,3,4],[2,2,1],[3]], [[1,2,4,4],[2,2,1],[3]]), ([21,22,24], [22,22,21], [[21,22,23,24],[22,22,21],[23]], [[21,22,25,24],[22,22,21],[23]]), ([31,32,33], [32,32,31], [[31,32,33,34],[32,32,31],[33]], [[31,32,34,34],[32,32,31],[33]]), ([41,42,43], [42,42,41], [[41,42,43,44],[42,42,41],[43]], [[41,42,44,44],[42,42,41],[43]]); +--INSERT INTO arrays_test VALUES ([1,2,3], [1,2,1], [[1,2,3,4],[2,2,1],[3]], [[1,2,4,4],[2,2,1],[3]]), ([21,22,24], [21,22,21], [[21,22,23,24],[22,22,21],[23]], [[21,22,25,24],[22,22,21],[23]]); +INSERT INTO arrays_test VALUES ([1,1,1], [1,1,1], [[1,1,1],[1,1,1],[1]], [[1,1,1],[1,1,1],[1]]); +INSERT INTO arrays_test VALUES ([1,2,3], [4,5,6], [[7,8,9],[10,11,12],[13]], [[14,15,16],[17,18,19],[20]]); + + +SELECT * FROM arrays_test ORDER BY a1, a2; +select '---------GO1'; +SELECT '1,a1,1', arrayEnumerateUniqRanked(1,a1,1) FROM arrays_test ORDER BY a1, a2; +SELECT '1,a2,1', arrayEnumerateUniqRanked(1,a2,1) FROM arrays_test ORDER BY a1, a2; +SELECT '1,a3,1', arrayEnumerateUniqRanked(1,a3,1) FROM arrays_test ORDER BY a1, a2; +SELECT '1,a4,1', arrayEnumerateUniqRanked(1,a4,1) FROM arrays_test ORDER BY a1, a2; +SELECT 'arrayEnumerateUniqRanked(1,a1,1,a2,1) ='; +SELECT '1,a1,1,a2,1', arrayEnumerateUniqRanked(1,a1,1,a2,1) FROM arrays_test ORDER BY a1, a2; +select 'arrayEnumerateUniq(a1, a2) ='; +SELECT arrayEnumerateUniq(a1, a2) FROM arrays_test ORDER BY a1, a2; +select '---------GO2'; +SELECT '1,a3,1,a4,1', arrayEnumerateUniqRanked(1,a3,1,a4,1) FROM arrays_test ORDER BY a1, a2; +SELECT '1,a3,2,a4,1', arrayEnumerateUniqRanked(1,a3,2,a4,1) FROM arrays_test ORDER BY a1, a2; +SELECT '1,a3,1,a4,2', arrayEnumerateUniqRanked(1,a3,1,a4,2) FROM arrays_test ORDER BY a1, a2; +SELECT '1,a3,2,a4,2', arrayEnumerateUniqRanked(1,a3,2,a4,2) FROM arrays_test ORDER BY a1, a2; +SELECT '2,a3,2,a4,2', arrayEnumerateUniqRanked(2,a3,2,a4,2) FROM arrays_test ORDER BY a1, a2; +SELECT '2,a3,2,a4,1', arrayEnumerateUniqRanked(2,a3,2,a4,1) FROM arrays_test ORDER BY a1, a2; +SELECT '2,a3,1,a4,2', arrayEnumerateUniqRanked(2,a3,1,a4,2) FROM arrays_test ORDER BY a1, a2; +select '---------END'; +DROP TABLE arrays_test; + +CREATE TABLE arrays_test (a3 Array(Array(UInt8)), a4 Array(Array(UInt32)) ) ENGINE = Memory; +INSERT INTO arrays_test VALUES ([[]], [[]]), ([[1,2]], [[3,4]]), ([[5,6]], [[7,8]]), ([[]], [[]]), ([[9,10]], [[11,12]]), ([[13,14]], [[15,16]]); +SELECT 'a3,a4 1..n', arrayEnumerateUniqRanked(a3,a4) FROM arrays_test ORDER BY a3, a4; +TRUNCATE TABLE arrays_test; +INSERT INTO arrays_test VALUES ([[]], [[]]), ([[1,1]], [[1,1]]), ([[1,1]], [[1,1]]), ([[]], [[]]), ([[1,1]], [[1,1]]), ([[1,1]], [[1,1]]); +SELECT 'a3,a4 1..1', arrayEnumerateUniqRanked(a3,a4) FROM arrays_test ORDER BY a3, a4; +DROP TABLE arrays_test; + + + +select '---------BAD'; +SELECT arrayEnumerateUniqRanked(); -- { serverError TOO_FEW_ARGUMENTS_FOR_FUNCTION } +SELECT arrayEnumerateUniqRanked([]); +SELECT arrayEnumerateUniqRanked(1); -- { serverError BAD_ARGUMENTS } +SELECT arrayEnumerateUniqRanked(2,[]); -- { serverError BAD_ARGUMENTS } +SELECT arrayEnumerateUniqRanked(2,[],2); -- { serverError BAD_ARGUMENTS } +SELECT arrayEnumerateUniqRanked(2,[],[]); -- { serverError BAD_ARGUMENTS } +SELECT arrayEnumerateUniqRanked(2,[],[],3); -- { serverError BAD_ARGUMENTS } +SELECT arrayEnumerateUniqRanked([],2); -- { serverError BAD_ARGUMENTS } +SELECT arrayEnumerateUniqRanked([],2,[]); -- { serverError BAD_ARGUMENTS } +SELECT arrayEnumerateUniqRanked(0,[],0); -- { serverError BAD_ARGUMENTS } +SELECT arrayEnumerateUniqRanked(0,0,0); -- { serverError BAD_ARGUMENTS } +SELECT arrayEnumerateUniqRanked(1,1,1); -- { serverError BAD_ARGUMENTS } +SELECT arrayEnumerateDenseRanked(1, [10,20,10,30], 0); -- { serverError BAD_ARGUMENTS } +SELECT arrayEnumerateUniqRanked(1, [[7,8,9,10],[10,11,12]], 2, [[14,15,16],[17,18,19],[20],[21]], 2); -- { serverError SIZES_OF_ARRAYS_DONT_MATCH } + +SELECT arrayEnumerateUniqRanked(1, [1,2], 1, ['a', 'b', 'c', 'd'],1); -- { serverError SIZES_OF_ARRAYS_DONT_MATCH } +SELECT arrayEnumerateUniqRanked(1, [1,2], 1, [14,15,16,17,18,19], 1); -- { serverError SIZES_OF_ARRAYS_DONT_MATCH } +SELECT arrayEnumerateUniqRanked(1, [14,15,16,17,18,19], 1, [1,2], 1); -- { serverError SIZES_OF_ARRAYS_DONT_MATCH } +SELECT arrayEnumerateUniqRanked(1, [1,1,1,1,1,1], 1, [1,1], 1); -- { serverError SIZES_OF_ARRAYS_DONT_MATCH } +SELECT arrayEnumerateUniqRanked(1, [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1], 1, [1,1], 1); -- { serverError SIZES_OF_ARRAYS_DONT_MATCH } +SELECT arrayEnumerateDenseRanked([], [], []); +SELECT arrayEnumerateDenseRanked([], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], []); +SELECT arrayEnumerateDenseRanked([1,2], [1,2], [1,2]); +SELECT arrayEnumerateUniqRanked([1,2], [1,2], [1,2]); +SELECT arrayEnumerateUniqRanked([1,2], 3, 4, 5); -- { serverError BAD_ARGUMENTS } +SELECT arrayEnumerateUniqRanked([1,2], 1, 2); -- { serverError BAD_ARGUMENTS } +SELECT arrayEnumerateUniqRanked([1,2], 1, 3, 4, 5); -- { serverError BAD_ARGUMENTS } +SELECT arrayEnumerateUniqRanked([1,2], 1, 3, [4], 5); -- { serverError BAD_ARGUMENTS } +SELECT arrayEnumerateDenseRanked([[[[[[[[[[42]]]]]]]]]]); +SELECT arrayEnumerateUniqRanked('wat', [1,2]); -- { serverError BAD_ARGUMENTS } +SELECT arrayEnumerateUniqRanked(1, [1,2], 'boom'); -- { serverError BAD_ARGUMENTS } +SELECT arrayEnumerateDenseRanked(['\0'], -8363126); -- { serverError BAD_ARGUMENTS } +SELECT arrayEnumerateDenseRanked(-10, ['\0'], -8363126); -- { serverError BAD_ARGUMENTS } +SELECT arrayEnumerateDenseRanked(1, ['\0'], -8363126); -- { serverError BAD_ARGUMENTS } +SELECT arrayEnumerateDenseRanked(-101, ['\0']); -- { serverError BAD_ARGUMENTS } +SELECT arrayEnumerateDenseRanked(1.1, [10,20,10,30]); -- { serverError BAD_ARGUMENTS } +SELECT arrayEnumerateDenseRanked([10,20,10,30], 0.4); -- { serverError BAD_ARGUMENTS } +SELECT arrayEnumerateDenseRanked([10,20,10,30], 1.8); -- { serverError BAD_ARGUMENTS } +SELECT arrayEnumerateUniqRanked(1, [], 1000000000); -- { serverError BAD_ARGUMENTS } + + +-- skipping empty arrays +SELECT arrayEnumerateUniqRanked(2, [[3], [3]]); +SELECT arrayEnumerateUniqRanked(2, [[], [3], [3]]); +SELECT arrayEnumerateUniqRanked(2, [[], [], [], [3], [], [3]]); +SELECT arrayEnumerateUniqRanked(2, [[], [], [], [], [3], [3]]); +SELECT arrayEnumerateUniqRanked(2, [[3], [], [3]]); +SELECT arrayEnumerateUniqRanked(2, [[3], [], [], [3]]); +SELECT arrayEnumerateUniqRanked(2, [[3], [], [], [3], [3]]); + + +select '-- no order'; +SELECT * FROM (SELECT a, arrayEnumerateUniqRanked(a) FROM ( SELECT * FROM ( SELECT [[], [1, 2, 3, 4]] AS a UNION ALL SELECT [[3, 4, 5]] AS a ) ) ) ORDER BY a ASC; +select '-- order no arr'; +SELECT a, arrayEnumerateUniqRanked(a) FROM ( SELECT * FROM ( SELECT [[1, 2, 3, 4]] AS a UNION ALL SELECT [[3, 4, 5]] AS a ) ORDER BY a ASC ); +select '-- order two arr'; +SELECT a, arrayEnumerateUniqRanked(a) FROM ( SELECT * FROM ( SELECT [[], [1, 2, 3, 4]] AS a UNION ALL SELECT [[], [3, 4, 5]] AS a ) ORDER BY a ASC ); +select '-- order non empt'; +SELECT a, arrayEnumerateUniqRanked(a) FROM ( SELECT * FROM ( SELECT [[6], [1, 2, 3, 4]] AS a UNION ALL SELECT [[3, 4, 5]] AS a ) ORDER BY a ASC ); +select '-- order'; +SELECT a, arrayEnumerateUniqRanked(a) FROM ( SELECT * FROM ( SELECT [[], [1, 2, 3, 4]] AS a UNION ALL SELECT [[3, 4, 5]] AS a ) ORDER BY a ASC ); +select '-- '; +SELECT arrayEnumerateUniqRanked(2,[[1, 2, 3, 4], [3, 4, 5, 6]]); +SELECT arrayEnumerateUniqRanked(2, [[], [1, 2, 3, 4], [3, 4, 5, 6]]); +SELECT arrayEnumerateUniqRanked(2, [[], [1, 2, 3, 4], [], [], [3, 4, 5, 6]]); +SELECT arrayEnumerateUniqRanked(2, [[1, 2, 3, 4], [], [], [3, 4, 5, 6]]); +SELECT arrayEnumerateUniqRanked(2,[[1], [1]]); +SELECT arrayEnumerateUniqRanked(2, [[], [1], [1]]); +SELECT a, arrayEnumerateUniqRanked(a) FROM ( SELECT * FROM ( SELECT [[], [4]] AS a UNION ALL SELECT [[4]] AS a ) ORDER BY a ASC ); +select '-- '; +SELECT a, arrayEnumerateUniqRanked(a) FROM ( SELECT * FROM ( SELECT [[], [1, 2, 3, 4]] AS a UNION ALL SELECT [[], [3, 4, 5]] AS a ) ORDER BY a ASC ); +select '-- '; +SELECT a, arrayEnumerateUniqRanked(a) FROM ( SELECT * FROM ( SELECT [[], [1, 2, 3, 4]] AS a UNION ALL SELECT [[3, 4, 5]] AS a ) ORDER BY a ASC ); +select '-- '; +SELECT a, arrayEnumerateUniqRanked(a) FROM ( SELECT * FROM ( SELECT [[], [], [1, 2, 3, 4]] AS a UNION ALL SELECT [[3, 4, 5]] AS a ) ORDER BY a ASC ); +select '-- '; +SELECT a, arrayEnumerateUniqRanked(a) FROM ( SELECT * FROM ( SELECT [[], [], [1, 2, 3, 4]] AS a UNION ALL SELECT [[], [], [3, 4, 5]] AS a ) ORDER BY a ASC ); +select '-- '; +SELECT a, arrayEnumerateUniqRanked(a) FROM ( SELECT * FROM ( SELECT [[], [], [1, 2, 1, 4]] AS a UNION ALL SELECT [[], [], [3, 4, 5, 4]] AS a ) ORDER BY a ASC ); +select '-- '; + + +DROP TABLE IF EXISTS arrays_test; +CREATE TABLE arrays_test (a1 Array(UInt8), a2 Array(UInt32) ) ENGINE = Memory; +INSERT INTO arrays_test VALUES ([], []),([10], [11]), ([], []), ([12], [13]); +SELECT 'a1,a2 n', arrayEnumerateUniqRanked(a1,a2) FROM arrays_test ORDER BY a1, a2; + +TRUNCATE TABLE arrays_test; +INSERT INTO arrays_test VALUES ([], []),([1], [1]), ([], []), ([1], [1]); +SELECT 'a1,a2 1', arrayEnumerateUniqRanked(a1,a2) FROM arrays_test ORDER BY a1, a2; + +TRUNCATE TABLE arrays_test; +INSERT INTO arrays_test VALUES ([], []), ([1,2], [3,4]), ([5,6], [7,8]), ([], []), ([9,10], [11,12]), ([13,14], [15,16]); +SELECT 'a1,a2 n2', arrayEnumerateUniqRanked(a1,a2) FROM arrays_test ORDER BY a1, a2; + +TRUNCATE TABLE arrays_test; +INSERT INTO arrays_test VALUES ([], []), ([1,1], [1,1]), ([1,1], [1,1]), ([], []), ([1,1], [1,1]), ([1,1], [1,1]); +SELECT 'a1,a2 12', arrayEnumerateUniqRanked(a1,a2) FROM arrays_test ORDER BY a1, a2; + +DROP TABLE arrays_test; + + + +DROP TABLE IF EXISTS arr_tests_visits; + +CREATE TABLE arr_tests_visits +( + CounterID UInt32, + StartDate Date, + Sign Int8, + VisitID UInt64, + UserID UInt64, + VisitVersion UInt16, + `Test.BannerID` Array(UInt64), + `Test.Load` Array(UInt8), + `Test.PuidKey` Array(Array(UInt8)), + `Test.PuidVal` Array(Array(UInt32)) +) ENGINE = MergeTree() PARTITION BY toMonday(StartDate) ORDER BY (CounterID, StartDate, intHash32(UserID), VisitID) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192; + +truncate table arr_tests_visits; +insert into arr_tests_visits (CounterID, StartDate, Sign, VisitID, UserID, VisitVersion, `Test.BannerID`, `Test.Load`, `Test.PuidKey`, `Test.PuidVal`) +values (1, toDate('2019-06-06'), 1, 1, 1, 1, [1], [1], [[]], [[]]), (1, toDate('2019-06-06'), -1, 1, 1, 1, [1], [1], [[]], [[]]), (1, toDate('2019-06-06'), 1, 1, 1, 2, [1,2], [1,1], [[],[1,2,3,4]], [[],[1001, 1002, 1003, 1004]]), (1, toDate('2019-06-06'), 1, 2, 1, 1, [3], [1], [[3,4,5]], [[2001, 2002, 2003]]), (1, toDate('2019-06-06'), 1, 3, 2, 1, [4, 5], [1, 0], [[5,6],[]], [[3001, 3002],[]]), (1, toDate('2019-06-06'), 1, 4, 2, 1, [5, 5, 6], [1, 0, 0], [[1,2], [1, 2], [3]], [[1001, 1002],[1002, 1003], [2001]]); + +select CounterID, StartDate, Sign, VisitID, UserID, VisitVersion, BannerID, Load, PuidKeyArr, PuidValArr, arrayEnumerateUniqRanked(PuidKeyArr, PuidValArr) as uniqTestPuid + from arr_tests_visits + array join + Test.BannerID as BannerID, + Test.Load as Load, + Test.PuidKey as PuidKeyArr, + Test.PuidVal as PuidValArr; + +select '--'; + +SELECT + CounterID, + StartDate, + Sign, + VisitID, + UserID, + VisitVersion, + BannerID, + Load, + PuidKeyArr, + PuidValArr, + arrayEnumerateUniqRanked(PuidKeyArr, PuidValArr) AS uniqTestPuid +FROM arr_tests_visits +ARRAY JOIN + Test.BannerID AS BannerID, + Test.Load AS Load, + Test.PuidKey AS PuidKeyArr, + Test.PuidVal AS PuidValArr; + +DROP TABLE arr_tests_visits; + + +select '-- empty'; +SELECT arrayEnumerateUniqRanked([['a'], [], ['a']]); +SELECT arrayEnumerateUniqRanked([[1], [], [1]]); +SELECT arrayEnumerateUniqRanked([[1], [], [1], [], [1], [], [1], [], [1], [], [1], [], [1], [], [1], [], [1]]); +SELECT arrayEnumerateUniqRanked([[], [1], [], [1], [], [1], [], [1], [], [1], [], [1], [], [1], [], [1]]); +SELECT arrayEnumerateUniqRanked([[1], [1], [], [1]]); + +select '-- empty corner'; +SELECT a, arrayEnumerateUniqRanked(a) FROM ( SELECT * FROM ( SELECT [[],[1],[]] AS a UNION ALL SELECT [[1]] AS a ) ORDER BY a ASC ); diff --git a/parser/testdata/00909_ngram_distance/ast.json b/parser/testdata/00909_ngram_distance/ast.json new file mode 100644 index 000000000..dfb9794ac --- /dev/null +++ b/parser/testdata/00909_ngram_distance/ast.json @@ -0,0 +1,82 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function round (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function multiply (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1000" + }, + { + "explain": " Function ngramDistanceUTF8 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal ''" + }, + { + "explain": " Literal ''" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_5" + } + ], + + "rows": 20, + + "statistics": + { + "elapsed": 0.001300923, + "rows_read": 20, + "bytes_read": 809 + } +} diff --git a/parser/testdata/00909_ngram_distance/metadata.json b/parser/testdata/00909_ngram_distance/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00909_ngram_distance/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00909_ngram_distance/query.sql b/parser/testdata/00909_ngram_distance/query.sql new file mode 100644 index 000000000..28aff50d2 --- /dev/null +++ b/parser/testdata/00909_ngram_distance/query.sql @@ -0,0 +1,180 @@ +select round(1000 * ngramDistanceUTF8(materialize(''), '')) from system.numbers limit 5; +select round(1000 * ngramDistanceUTF8(materialize('абв'), '')) from system.numbers limit 5; +select round(1000 * ngramDistanceUTF8(materialize(''), 'абв')) from system.numbers limit 5; +select round(1000 * ngramDistanceUTF8(materialize('абвгдеёжз'), 'абвгдеёжз')) from system.numbers limit 5; +select round(1000 * ngramDistanceUTF8(materialize('абвгдеёжз'), 'абвгдеёж')) from system.numbers limit 5; +select round(1000 * ngramDistanceUTF8(materialize('абвгдеёжз'), 'гдеёзд')) from system.numbers limit 5; +select round(1000 * ngramDistanceUTF8(materialize('абвгдеёжз'), 'ёёёёёёёё')) from system.numbers limit 5; + +select round(1000 * ngramDistanceUTF8(materialize(''), materialize('')))=round(1000 * ngramDistanceUTF8(materialize(''), '')) from system.numbers limit 5; +select round(1000 * ngramDistanceUTF8(materialize('абв'), materialize('')))=round(1000 * ngramDistanceUTF8(materialize('абв'), '')) from system.numbers limit 5; +select round(1000 * ngramDistanceUTF8(materialize(''), materialize('абв')))=round(1000 * ngramDistanceUTF8(materialize(''), 'абв')) from system.numbers limit 5; +select round(1000 * ngramDistanceUTF8(materialize('абвгдеёжз'), materialize('абвгдеёжз')))=round(1000 * ngramDistanceUTF8(materialize('абвгдеёжз'), 'абвгдеёжз')) from system.numbers limit 5; +select round(1000 * ngramDistanceUTF8(materialize('абвгдеёжз'), materialize('абвгдеёж')))=round(1000 * ngramDistanceUTF8(materialize('абвгдеёжз'), 'абвгдеёж')) from system.numbers limit 5; +select round(1000 * ngramDistanceUTF8(materialize('абвгдеёжз'), materialize('гдеёзд')))=round(1000 * ngramDistanceUTF8(materialize('абвгдеёжз'), 'гдеёзд')) from system.numbers limit 5; +select round(1000 * ngramDistanceUTF8(materialize('абвгдеёжз'), materialize('ёёёёёёёё')))=round(1000 * ngramDistanceUTF8(materialize('абвгдеёжз'), 'ёёёёёёёё')) from system.numbers limit 5; + +select round(1000 * ngramDistanceUTF8('', materialize('')))=round(1000 * ngramDistanceUTF8(materialize(''), '')) from system.numbers limit 5; +select round(1000 * ngramDistanceUTF8('абв', materialize('')))=round(1000 * ngramDistanceUTF8(materialize('абв'), '')) from system.numbers limit 5; +select round(1000 * ngramDistanceUTF8('', materialize('абв')))=round(1000 * ngramDistanceUTF8(materialize(''), 'абв')) from system.numbers limit 5; +select round(1000 * ngramDistanceUTF8('абвгдеёжз', materialize('абвгдеёжз')))=round(1000 * ngramDistanceUTF8(materialize('абвгдеёжз'), 'абвгдеёжз')) from system.numbers limit 5; +select round(1000 * ngramDistanceUTF8('абвгдеёжз', materialize('абвгдеёж')))=round(1000 * ngramDistanceUTF8(materialize('абвгдеёжз'), 'абвгдеёж')) from system.numbers limit 5; +select round(1000 * ngramDistanceUTF8('абвгдеёжз', materialize('гдеёзд')))=round(1000 * ngramDistanceUTF8(materialize('абвгдеёжз'), 'гдеёзд')) from system.numbers limit 5; +select round(1000 * ngramDistanceUTF8('абвгдеёжз', materialize('ёёёёёёёё')))=round(1000 * ngramDistanceUTF8(materialize('абвгдеёжз'), 'ёёёёёёёё')) from system.numbers limit 5; + +select round(1000 * ngramDistanceUTF8('', '')); +select round(1000 * ngramDistanceUTF8('абв', '')); +select round(1000 * ngramDistanceUTF8('', 'абв')); +select round(1000 * ngramDistanceUTF8('абвгдеёжз', 'абвгдеёжз')); +select round(1000 * ngramDistanceUTF8('абвгдеёжз', 'абвгдеёж')); +select round(1000 * ngramDistanceUTF8('абвгдеёжз', 'гдеёзд')); +select round(1000 * ngramDistanceUTF8('абвгдеёжз', 'ёёёёёёёё')); + +drop table if exists test_distance; +create table test_distance (Title String) engine = Memory; +insert into test_distance values ('привет как дела?... Херсон'), ('привет как дела клип - TUT.BY'), ('привет'), ('пап привет как дела - TUT.BY'), ('привет братан как дела - TUT.BY'), ('http://metric.ru/'), ('http://autometric.ru/'), ('http://top.bigmir.net/'), ('http://metris.ru/'), ('http://metrika.ru/'), (''); + +SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistanceUTF8(Title, Title) as distance, Title; +SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistanceUTF8(Title, extract(Title, 'как дела')) as distance, Title; +SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistanceUTF8(Title, extract(Title, 'metr')) as distance, Title; + +SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistanceUTF8(Title, 'привет как дела') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistanceUTF8(Title, 'как привет дела') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistanceUTF8(Title, 'metrika') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistanceUTF8(Title, 'metrica') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistanceUTF8(Title, 'metriks') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistanceUTF8(Title, 'metrics') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistanceUTF8(Title, 'bigmir') as distance, Title; + + +select round(1000 * ngramDistanceCaseInsensitiveUTF8(materialize(''), '')) from system.numbers limit 5; +select round(1000 * ngramDistanceCaseInsensitiveUTF8(materialize('абв'), '')) from system.numbers limit 5; +select round(1000 * ngramDistanceCaseInsensitiveUTF8(materialize(''), 'абв')) from system.numbers limit 5; +select round(1000 * ngramDistanceCaseInsensitiveUTF8(materialize('абвГДЕёжз'), 'АбвгдЕёжз')) from system.numbers limit 5; +select round(1000 * ngramDistanceCaseInsensitiveUTF8(materialize('аБВГдеёЖз'), 'АбвГдеёж')) from system.numbers limit 5; +select round(1000 * ngramDistanceCaseInsensitiveUTF8(materialize('абвгдеёжз'), 'гдеёЗД')) from system.numbers limit 5; +select round(1000 * ngramDistanceCaseInsensitiveUTF8(materialize('абвгдеёжз'), 'ЁЁЁЁЁЁЁЁ')) from system.numbers limit 5; + +select round(1000 * ngramDistanceCaseInsensitiveUTF8(materialize(''),materialize(''))) = round(1000 * ngramDistanceCaseInsensitiveUTF8(materialize(''), '')) from system.numbers limit 5; +select round(1000 * ngramDistanceCaseInsensitiveUTF8(materialize('абв'),materialize(''))) = round(1000 * ngramDistanceCaseInsensitiveUTF8(materialize('абв'), '')) from system.numbers limit 5; +select round(1000 * ngramDistanceCaseInsensitiveUTF8(materialize(''), materialize('абв'))) = round(1000 * ngramDistanceCaseInsensitiveUTF8(materialize(''), 'абв')) from system.numbers limit 5; +select round(1000 * ngramDistanceCaseInsensitiveUTF8(materialize('абвГДЕёжз'), materialize('АбвгдЕёжз'))) = round(1000 * ngramDistanceCaseInsensitiveUTF8(materialize('абвГДЕёжз'), 'АбвгдЕёжз')) from system.numbers limit 5; +select round(1000 * ngramDistanceCaseInsensitiveUTF8(materialize('аБВГдеёЖз'), materialize('АбвГдеёж'))) = round(1000 * ngramDistanceCaseInsensitiveUTF8(materialize('аБВГдеёЖз'), 'АбвГдеёж')) from system.numbers limit 5; +select round(1000 * ngramDistanceCaseInsensitiveUTF8(materialize('абвгдеёжз'), materialize('гдеёЗД'))) = round(1000 * ngramDistanceCaseInsensitiveUTF8(materialize('абвгдеёжз'), 'гдеёЗД')) from system.numbers limit 5; +select round(1000 * ngramDistanceCaseInsensitiveUTF8(materialize('абвгдеёжз'), materialize('ЁЁЁЁЁЁЁЁ'))) = round(1000 * ngramDistanceCaseInsensitiveUTF8(materialize('абвгдеёжз'), 'ЁЁЁЁЁЁЁЁ')) from system.numbers limit 5; + +select round(1000 * ngramDistanceCaseInsensitiveUTF8('', materialize(''))) = round(1000 * ngramDistanceCaseInsensitiveUTF8(materialize(''), '')) from system.numbers limit 5; +select round(1000 * ngramDistanceCaseInsensitiveUTF8('абв',materialize(''))) = round(1000 * ngramDistanceCaseInsensitiveUTF8(materialize('абв'), '')) from system.numbers limit 5; +select round(1000 * ngramDistanceCaseInsensitiveUTF8('', materialize('абв'))) = round(1000 * ngramDistanceCaseInsensitiveUTF8(materialize(''), 'абв')) from system.numbers limit 5; +select round(1000 * ngramDistanceCaseInsensitiveUTF8('абвГДЕёжз', materialize('АбвгдЕёжз'))) = round(1000 * ngramDistanceCaseInsensitiveUTF8(materialize('абвГДЕёжз'), 'АбвгдЕёжз')) from system.numbers limit 5; +select round(1000 * ngramDistanceCaseInsensitiveUTF8('аБВГдеёЖз', materialize('АбвГдеёж'))) = round(1000 * ngramDistanceCaseInsensitiveUTF8(materialize('аБВГдеёЖз'), 'АбвГдеёж')) from system.numbers limit 5; +select round(1000 * ngramDistanceCaseInsensitiveUTF8('абвгдеёжз', materialize('гдеёЗД'))) = round(1000 * ngramDistanceCaseInsensitiveUTF8(materialize('абвгдеёжз'), 'гдеёЗД')) from system.numbers limit 5; +select round(1000 * ngramDistanceCaseInsensitiveUTF8('абвгдеёжз', materialize('ЁЁЁЁЁЁЁЁ'))) = round(1000 * ngramDistanceCaseInsensitiveUTF8(materialize('абвгдеёжз'), 'ЁЁЁЁЁЁЁЁ')) from system.numbers limit 5; + + +select round(1000 * ngramDistanceCaseInsensitiveUTF8('', '')); +select round(1000 * ngramDistanceCaseInsensitiveUTF8('абв', '')); +select round(1000 * ngramDistanceCaseInsensitiveUTF8('', 'абв')); +select round(1000 * ngramDistanceCaseInsensitiveUTF8('абвГДЕёжз', 'АбвгдЕЁжз')); +select round(1000 * ngramDistanceCaseInsensitiveUTF8('аБВГдеёЖз', 'АбвГдеёж')); +select round(1000 * ngramDistanceCaseInsensitiveUTF8('абвгдеёжз', 'гдеёЗД')); +select round(1000 * ngramDistanceCaseInsensitiveUTF8('АБВГДеёжз', 'ЁЁЁЁЁЁЁЁ')); + +SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistanceCaseInsensitiveUTF8(Title, Title) as distance, Title; +SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistanceCaseInsensitiveUTF8(Title, extract(Title, 'как дела')) as distance, Title; +SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistanceCaseInsensitiveUTF8(Title, extract(Title, 'metr')) as distance, Title; + +SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistanceCaseInsensitiveUTF8(Title, 'ПрИвЕт кАК ДЕЛа') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistanceCaseInsensitiveUTF8(Title, 'как ПРИВЕТ дела') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistanceCaseInsensitiveUTF8(Title, 'metrika') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistanceCaseInsensitiveUTF8(Title, 'Metrika') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistanceCaseInsensitiveUTF8(Title, 'mEtrica') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistanceCaseInsensitiveUTF8(Title, 'metriKS') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistanceCaseInsensitiveUTF8(Title, 'metrics') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistanceCaseInsensitiveUTF8(Title, 'BigMIR') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistanceCaseInsensitiveUTF8(Title, 'приВЕТ КАк ДеЛа КлИп - TuT.by') as distance, Title; + + +select round(1000 * ngramDistance(materialize(''), '')) from system.numbers limit 5; +select round(1000 * ngramDistance(materialize('abc'), '')) from system.numbers limit 5; +select round(1000 * ngramDistance(materialize(''), 'abc')) from system.numbers limit 5; +select round(1000 * ngramDistance(materialize('abcdefgh'), 'abcdefgh')) from system.numbers limit 5; +select round(1000 * ngramDistance(materialize('abcdefgh'), 'abcdefg')) from system.numbers limit 5; +select round(1000 * ngramDistance(materialize('abcdefgh'), 'defgh')) from system.numbers limit 5; +select round(1000 * ngramDistance(materialize('abcdefgh'), 'aaaaaaaa')) from system.numbers limit 5; + +select round(1000 * ngramDistance(materialize(''),materialize('')))=round(1000 * ngramDistance(materialize(''), '')) from system.numbers limit 5; +select round(1000 * ngramDistance(materialize('abc'),materialize('')))=round(1000 * ngramDistance(materialize('abc'), '')) from system.numbers limit 5; +select round(1000 * ngramDistance(materialize(''), materialize('abc')))=round(1000 * ngramDistance(materialize(''), 'abc')) from system.numbers limit 5; +select round(1000 * ngramDistance(materialize('abcdefgh'), materialize('abcdefgh')))=round(1000 * ngramDistance(materialize('abcdefgh'), 'abcdefgh')) from system.numbers limit 5; +select round(1000 * ngramDistance(materialize('abcdefgh'), materialize('abcdefg')))=round(1000 * ngramDistance(materialize('abcdefgh'), 'abcdefg')) from system.numbers limit 5; +select round(1000 * ngramDistance(materialize('abcdefgh'), materialize('defgh')))=round(1000 * ngramDistance(materialize('abcdefgh'), 'defgh')) from system.numbers limit 5; +select round(1000 * ngramDistance(materialize('abcdefgh'), materialize('aaaaaaaa')))=round(1000 * ngramDistance(materialize('abcdefgh'), 'aaaaaaaa')) from system.numbers limit 5; + +select round(1000 * ngramDistance('',materialize('')))=round(1000 * ngramDistance(materialize(''), '')) from system.numbers limit 5; +select round(1000 * ngramDistance('abc', materialize('')))=round(1000 * ngramDistance(materialize('abc'), '')) from system.numbers limit 5; +select round(1000 * ngramDistance('', materialize('abc')))=round(1000 * ngramDistance(materialize(''), 'abc')) from system.numbers limit 5; +select round(1000 * ngramDistance('abcdefgh', materialize('abcdefgh')))=round(1000 * ngramDistance(materialize('abcdefgh'), 'abcdefgh')) from system.numbers limit 5; +select round(1000 * ngramDistance('abcdefgh', materialize('abcdefg')))=round(1000 * ngramDistance(materialize('abcdefgh'), 'abcdefg')) from system.numbers limit 5; +select round(1000 * ngramDistance('abcdefgh', materialize('defgh')))=round(1000 * ngramDistance(materialize('abcdefgh'), 'defgh')) from system.numbers limit 5; +select round(1000 * ngramDistance('abcdefgh', materialize('aaaaaaaa')))=round(1000 * ngramDistance(materialize('abcdefgh'), 'aaaaaaaa')) from system.numbers limit 5; + + +select round(1000 * ngramDistance('', '')); +select round(1000 * ngramDistance('abc', '')); +select round(1000 * ngramDistance('', 'abc')); +select round(1000 * ngramDistance('abcdefgh', 'abcdefgh')); +select round(1000 * ngramDistance('abcdefgh', 'abcdefg')); +select round(1000 * ngramDistance('abcdefgh', 'defgh')); +select round(1000 * ngramDistance('abcdefgh', 'aaaaaaaa')); + +SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistance(Title, 'привет как дела') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistance(Title, 'как привет дела') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistance(Title, 'metrika') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistance(Title, 'metrica') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistance(Title, 'metriks') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistance(Title, 'metrics') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistance(Title, 'bigmir') as distance, Title; + +select round(1000 * ngramDistanceCaseInsensitive(materialize(''), '')) from system.numbers limit 5; +select round(1000 * ngramDistanceCaseInsensitive(materialize('abc'), '')) from system.numbers limit 5; +select round(1000 * ngramDistanceCaseInsensitive(materialize(''), 'abc')) from system.numbers limit 5; +select round(1000 * ngramDistanceCaseInsensitive(materialize('abCdefgH'), 'Abcdefgh')) from system.numbers limit 5; +select round(1000 * ngramDistanceCaseInsensitive(materialize('abcdefgh'), 'abcdeFG')) from system.numbers limit 5; +select round(1000 * ngramDistanceCaseInsensitive(materialize('AAAAbcdefgh'), 'defgh')) from system.numbers limit 5; +select round(1000 * ngramDistanceCaseInsensitive(materialize('ABCdefgH'), 'aaaaaaaa')) from system.numbers limit 5; + +select round(1000 * ngramDistanceCaseInsensitive(materialize(''), materialize('')))=round(1000 * ngramDistanceCaseInsensitive(materialize(''), '')) from system.numbers limit 5; +select round(1000 * ngramDistanceCaseInsensitive(materialize('abc'), materialize('')))=round(1000 * ngramDistanceCaseInsensitive(materialize('abc'), '')) from system.numbers limit 5; +select round(1000 * ngramDistanceCaseInsensitive(materialize(''), materialize('abc')))=round(1000 * ngramDistanceCaseInsensitive(materialize(''), 'abc')) from system.numbers limit 5; +select round(1000 * ngramDistanceCaseInsensitive(materialize('abCdefgH'), materialize('Abcdefgh')))=round(1000 * ngramDistanceCaseInsensitive(materialize('abCdefgH'), 'Abcdefgh')) from system.numbers limit 5; +select round(1000 * ngramDistanceCaseInsensitive(materialize('abcdefgh'), materialize('abcdeFG')))=round(1000 * ngramDistanceCaseInsensitive(materialize('abcdefgh'), 'abcdeFG')) from system.numbers limit 5; +select round(1000 * ngramDistanceCaseInsensitive(materialize('AAAAbcdefgh'), materialize('defgh')))=round(1000 * ngramDistanceCaseInsensitive(materialize('AAAAbcdefgh'), 'defgh')) from system.numbers limit 5; +select round(1000 * ngramDistanceCaseInsensitive(materialize('ABCdefgH'), materialize('aaaaaaaa')))=round(1000 * ngramDistanceCaseInsensitive(materialize('ABCdefgH'), 'aaaaaaaa')) from system.numbers limit 5; + +select round(1000 * ngramDistanceCaseInsensitive('', materialize('')))=round(1000 * ngramDistanceCaseInsensitive(materialize(''), '')) from system.numbers limit 5; +select round(1000 * ngramDistanceCaseInsensitive('abc', materialize('')))=round(1000 * ngramDistanceCaseInsensitive(materialize('abc'), '')) from system.numbers limit 5; +select round(1000 * ngramDistanceCaseInsensitive('', materialize('abc')))=round(1000 * ngramDistanceCaseInsensitive(materialize(''), 'abc')) from system.numbers limit 5; +select round(1000 * ngramDistanceCaseInsensitive('abCdefgH', materialize('Abcdefgh')))=round(1000 * ngramDistanceCaseInsensitive(materialize('abCdefgH'), 'Abcdefgh')) from system.numbers limit 5; +select round(1000 * ngramDistanceCaseInsensitive('abcdefgh', materialize('abcdeFG')))=round(1000 * ngramDistanceCaseInsensitive(materialize('abcdefgh'), 'abcdeFG')) from system.numbers limit 5; +select round(1000 * ngramDistanceCaseInsensitive('AAAAbcdefgh', materialize('defgh')))=round(1000 * ngramDistanceCaseInsensitive(materialize('AAAAbcdefgh'), 'defgh')) from system.numbers limit 5; +select round(1000 * ngramDistanceCaseInsensitive('ABCdefgH', materialize('aaaaaaaa')))=round(1000 * ngramDistanceCaseInsensitive(materialize('ABCdefgH'), 'aaaaaaaa')) from system.numbers limit 5; + +select round(1000 * ngramDistanceCaseInsensitive('', '')); +select round(1000 * ngramDistanceCaseInsensitive('abc', '')); +select round(1000 * ngramDistanceCaseInsensitive('', 'abc')); +select round(1000 * ngramDistanceCaseInsensitive('abCdefgH', 'Abcdefgh')); +select round(1000 * ngramDistanceCaseInsensitive('abcdefgh', 'abcdeFG')); +select round(1000 * ngramDistanceCaseInsensitive('AAAAbcdefgh', 'defgh')); +select round(1000 * ngramDistanceCaseInsensitive('ABCdefgH', 'aaaaaaaa')); + +SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistanceCaseInsensitive(Title, 'ПрИвЕт кАК ДЕЛа') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistanceCaseInsensitive(Title, 'как ПРИВЕТ дела') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistanceCaseInsensitive(Title, 'metrika') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistanceCaseInsensitive(Title, 'Metrika') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistanceCaseInsensitive(Title, 'mEtrica') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistanceCaseInsensitive(Title, 'metriKS') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistanceCaseInsensitive(Title, 'metrics') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_distance ORDER BY ngramDistanceCaseInsensitive(Title, 'BigMIR') as distance, Title; + +drop table if exists test_distance; diff --git a/parser/testdata/00910_buffer_prewhere/ast.json b/parser/testdata/00910_buffer_prewhere/ast.json new file mode 100644 index 000000000..9ce482db7 --- /dev/null +++ b/parser/testdata/00910_buffer_prewhere/ast.json @@ -0,0 +1,79 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery mt (children 3)" + }, + { + "explain": " Identifier mt" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " ColumnDeclaration uid (children 1)" + }, + { + "explain": " DataType UInt64" + }, + { + "explain": " ColumnDeclaration ts (children 1)" + }, + { + "explain": " DataType DateTime" + }, + { + "explain": " ColumnDeclaration val (children 1)" + }, + { + "explain": " DataType Float64" + }, + { + "explain": " Storage definition (children 3)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Function toDate (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier ts" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier uid" + }, + { + "explain": " Identifier ts" + } + ], + + "rows": 19, + + "statistics": + { + "elapsed": 0.001461231, + "rows_read": 19, + "bytes_read": 647 + } +} diff --git a/parser/testdata/00910_buffer_prewhere/metadata.json b/parser/testdata/00910_buffer_prewhere/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00910_buffer_prewhere/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00910_buffer_prewhere/query.sql b/parser/testdata/00910_buffer_prewhere/query.sql new file mode 100644 index 000000000..e6b1cc424 --- /dev/null +++ b/parser/testdata/00910_buffer_prewhere/query.sql @@ -0,0 +1,4 @@ +CREATE TABLE mt (uid UInt64, ts DateTime, val Float64) ENGINE = MergeTree PARTITION BY toDate(ts) ORDER BY (uid, ts); +CREATE TABLE buf as mt ENGINE = Buffer({CLICKHOUSE_DATABASE:Identifier}, mt, 2, 10, 60, 10000, 100000, 1000000, 10000000); +INSERT INTO buf VALUES (1, '2019-03-01 10:00:00', 0.5), (2, '2019-03-02 10:00:00', 0.15), (1, '2019-03-03 10:00:00', 0.25); +SELECT count() from buf prewhere ts > toDateTime('2019-03-01 12:00:00') and ts < toDateTime('2019-03-02 12:00:00'); diff --git a/parser/testdata/00910_buffer_prewhere_different_types/ast.json b/parser/testdata/00910_buffer_prewhere_different_types/ast.json new file mode 100644 index 000000000..536a25a25 --- /dev/null +++ b/parser/testdata/00910_buffer_prewhere_different_types/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery buffer_table1__fuzz_28 (children 1)" + }, + { + "explain": " Identifier buffer_table1__fuzz_28" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000970567, + "rows_read": 2, + "bytes_read": 96 + } +} diff --git a/parser/testdata/00910_buffer_prewhere_different_types/metadata.json b/parser/testdata/00910_buffer_prewhere_different_types/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00910_buffer_prewhere_different_types/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00910_buffer_prewhere_different_types/query.sql b/parser/testdata/00910_buffer_prewhere_different_types/query.sql new file mode 100644 index 000000000..18469769b --- /dev/null +++ b/parser/testdata/00910_buffer_prewhere_different_types/query.sql @@ -0,0 +1,21 @@ +DROP TABLE IF EXISTS buffer_table1__fuzz_28; +DROP TABLE IF EXISTS merge_tree_table1; + +CREATE TABLE merge_tree_table1 (`x` UInt32) ENGINE = MergeTree ORDER BY x; + +CREATE TABLE buffer_table1__fuzz_24 (`s` Nullable(Int128), `x` Nullable(FixedString(17))) ENGINE = Buffer(currentDatabase(), 'merge_tree_table1', 16, 10, 60, 10, 1000, 1048576, 2097152); + +SET send_logs_level='error'; +SELECT s FROM buffer_table1__fuzz_24 PREWHERE factorial(toNullable(10)); + +INSERT INTO merge_tree_table1 VALUES (1), (2), (3), (4), (5), (6), (7), (8), (9), (10); + +SELECT s FROM buffer_table1__fuzz_24 PREWHERE factorial(toNullable(10)); + +CREATE TABLE buffer_table1__fuzz_28 (`x` Nullable(UInt32)) ENGINE = Buffer(currentDatabase(), 'merge_tree_table1', 16, 10, 60, 10, 1000, 1048576, 2097152); + +SELECT * FROM buffer_table1__fuzz_28 PREWHERE x = toLowCardinality(1); + +CREATE ROW POLICY rp ON buffer_table1__fuzz_28 FOR SELECT USING x = toLowCardinality(1) TO default; + +SELECT * FROM buffer_table1__fuzz_28; diff --git a/parser/testdata/00910_crash_when_distributed_modify_order_by/ast.json b/parser/testdata/00910_crash_when_distributed_modify_order_by/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00910_crash_when_distributed_modify_order_by/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00910_crash_when_distributed_modify_order_by/metadata.json b/parser/testdata/00910_crash_when_distributed_modify_order_by/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00910_crash_when_distributed_modify_order_by/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00910_crash_when_distributed_modify_order_by/query.sql b/parser/testdata/00910_crash_when_distributed_modify_order_by/query.sql new file mode 100644 index 000000000..67a104358 --- /dev/null +++ b/parser/testdata/00910_crash_when_distributed_modify_order_by/query.sql @@ -0,0 +1,10 @@ +-- Tags: distributed + +DROP TABLE IF EXISTS union1; +DROP TABLE IF EXISTS union2; +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE union1 ( date Date, a Int32, b Int32, c Int32, d Int32) ENGINE = MergeTree(date, (a, date), 8192); +CREATE TABLE union2 ( date Date, a Int32, b Int32, c Int32, d Int32) ENGINE = Distributed(test_shard_localhost, currentDatabase(), 'union1'); +ALTER TABLE union2 MODIFY ORDER BY a; -- { serverError NOT_IMPLEMENTED } +DROP TABLE union1; +DROP TABLE union2; diff --git a/parser/testdata/00910_decimal_group_array_crash_3783/ast.json b/parser/testdata/00910_decimal_group_array_crash_3783/ast.json new file mode 100644 index 000000000..61d477761 --- /dev/null +++ b/parser/testdata/00910_decimal_group_array_crash_3783/ast.json @@ -0,0 +1,112 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function groupArray (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier s" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function sum (alias s) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier n" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDecimal32 (alias n) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_2" + } + ], + + "rows": 30, + + "statistics": + { + "elapsed": 0.001538419, + "rows_read": 30, + "bytes_read": 1354 + } +} diff --git a/parser/testdata/00910_decimal_group_array_crash_3783/metadata.json b/parser/testdata/00910_decimal_group_array_crash_3783/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00910_decimal_group_array_crash_3783/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00910_decimal_group_array_crash_3783/query.sql b/parser/testdata/00910_decimal_group_array_crash_3783/query.sql new file mode 100644 index 000000000..c6151d3bd --- /dev/null +++ b/parser/testdata/00910_decimal_group_array_crash_3783/query.sql @@ -0,0 +1,62 @@ +select groupArray(s) from (select sum(n) s from (select toDecimal32(1, 2) as n)); +select groupArray(s) from (select sum(n) s from (select toDecimal64(1, 5) as n)); +select groupArray(s) from (select sum(n) s from (select toDecimal128(1, 10) as n)); + +select groupArray(s) from (select sum(n) s from (select toDecimal32(number, 2) as n from numbers(1000))); +select groupArray(s) from (select sum(n) s from (select toDecimal64(number, 5) as n from numbers(1000))); +select groupArray(s) from (select sum(n) s from (select toDecimal128(number, 10) as n from numbers(1000))); + +DROP TABLE IF EXISTS sensor_value; +CREATE TABLE sensor_value ( + received_at DateTime('Asia/Istanbul'), + device_id UUID, + sensor_id UUID, + value Nullable(Decimal(18, 4)), + low_warning Nullable(Decimal(18, 4)), + low_critical Nullable(Decimal(18, 4)), + high_warning Nullable(Decimal(18, 4)), + high_critical Nullable(Decimal(18, 4)) +) ENGINE = MergeTree +PARTITION BY toDate(received_at) +ORDER BY (device_id, sensor_id); + +INSERT INTO sensor_value (received_at, device_id, sensor_id, value, low_warning, low_critical, high_warning, high_critical) VALUES ('2018-12-18 00:16:07', 'a4d92414-09aa-4dbd-80b2-124ddaacf333', 'ed87e57c-9331-462a-80b4-9f0c005e88c8', '0.4400', '-10000000.0000', '-10000000.0000', '10000000.0000', '10000000.0000'); + +SELECT `time`, groupArray((sensor_id, volume)) AS groupArr FROM ( + SELECT (intDiv(toUInt32(received_at), 900) * 900) AS `time`, sensor_id, avg(value) AS volume + FROM sensor_value + WHERE received_at BETWEEN '2018-12-12 00:00:00' AND '2018-12-30 00:00:00' + GROUP BY `time`,sensor_id + ORDER BY `time` +) GROUP BY `time` ORDER BY `time`; + +DROP TABLE sensor_value; + +select s.a, s.b, max(s.dt1) dt1, s.c, s.d, s.f, s.i, max(s.dt2) dt2 from ( + select toUInt64(4360430) a + , toUInt64(5681495) b + , toDateTime('2018-11-01 10:44:58', 'Asia/Istanbul') dt1 + , 'txt' c + , toDecimal128('274.350000000000', 12) d + , toDecimal128(268.970000000000, 12) f + , toDecimal128(0.000000000000, 12) i + , toDateTime('2018-11-02 00:00:00', 'Asia/Istanbul') dt2 + union all + select toUInt64(4341757) a + , toUInt64(5657967) b + , toDateTime('2018-11-01 16:47:46', 'Asia/Istanbul') dt1 + , 'txt' c + , toDecimal128('321.380000000000', 12) d + , toDecimal128(315.080000000000, 12) f + , toDecimal128(0.000000000000, 12) i + , toDateTime('2018-11-02 00:00:00', 'Asia/Istanbul') dt2 + union all + select toUInt64(4360430) a + , toUInt64(5681495) b + , toDateTime('2018-11-02 09:00:07', 'Asia/Istanbul') dt1 + , 'txt' c + , toDecimal128('274.350000000000', 12) d + , toDecimal128(268.970000000000, 12) f + , toDecimal128(0.000000000000, 12) i + , toDateTime('2018-11-02 00:00:00', 'Asia/Istanbul') dt2 +) s group by s.a, s.b, s.c, s.d, s.f, s.i ORDER BY s.a, s.b, s.c, s.d, s.f, s.i; diff --git a/parser/testdata/00910_zookeeper_custom_compression_codecs_replicated_long/ast.json b/parser/testdata/00910_zookeeper_custom_compression_codecs_replicated_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00910_zookeeper_custom_compression_codecs_replicated_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00910_zookeeper_custom_compression_codecs_replicated_long/metadata.json b/parser/testdata/00910_zookeeper_custom_compression_codecs_replicated_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00910_zookeeper_custom_compression_codecs_replicated_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00910_zookeeper_custom_compression_codecs_replicated_long/query.sql b/parser/testdata/00910_zookeeper_custom_compression_codecs_replicated_long/query.sql new file mode 100644 index 000000000..ed4d9dd49 --- /dev/null +++ b/parser/testdata/00910_zookeeper_custom_compression_codecs_replicated_long/query.sql @@ -0,0 +1,141 @@ +-- Tags: long, replica + +SET send_logs_level = 'fatal'; +SET allow_suspicious_codecs = 1; + +DROP TABLE IF EXISTS compression_codec_replicated1; +DROP TABLE IF EXISTS compression_codec_replicated2; + +CREATE TABLE compression_codec_replicated1( + id UInt64 CODEC(LZ4), + data String CODEC(ZSTD), + ddd Date CODEC(NONE), + somenum Float64 CODEC(ZSTD(2)), + somestr FixedString(3) CODEC(LZ4HC(7)), + othernum Int64 CODEC(Delta) +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_00910/compression_codec_replicated', '1') ORDER BY tuple(); + +CREATE TABLE compression_codec_replicated2( + id UInt64 CODEC(LZ4), + data String CODEC(ZSTD), + ddd Date CODEC(NONE), + somenum Float64 CODEC(ZSTD(2)), + somestr FixedString(3) CODEC(LZ4HC(7)), + othernum Int64 CODEC(Delta) +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_00910/compression_codec_replicated', '2') ORDER BY tuple(); + + +INSERT INTO compression_codec_replicated1 VALUES(1, 'hello', toDate('2018-12-14'), 1.1, 'aaa', 5); +INSERT INTO compression_codec_replicated1 VALUES(2, 'world', toDate('2018-12-15'), 2.2, 'bbb', 6); +INSERT INTO compression_codec_replicated1 VALUES(3, '!', toDate('2018-12-16'), 3.3, 'ccc', 7); + +SYSTEM SYNC REPLICA compression_codec_replicated2; + +SELECT * FROM compression_codec_replicated1 ORDER BY id; +SELECT * FROM compression_codec_replicated2 ORDER BY id; + +OPTIMIZE TABLE compression_codec_replicated1 FINAL; + +INSERT INTO compression_codec_replicated1 VALUES(2, '', toDate('2018-12-13'), 4.4, 'ddd', 8); + +SYSTEM SYNC REPLICA compression_codec_replicated2; + +DETACH TABLE compression_codec_replicated1; +ATTACH TABLE compression_codec_replicated1; + +SELECT count(*) FROM compression_codec_replicated1 WHERE id = 2 GROUP BY id; +SELECT count(*) FROM compression_codec_replicated2 WHERE id = 2 GROUP BY id; + +DROP TABLE IF EXISTS compression_codec_replicated1; +DROP TABLE IF EXISTS compression_codec_replicated2; + +DROP TABLE IF EXISTS compression_codec_multiple_replicated1; +DROP TABLE IF EXISTS compression_codec_multiple_replicated2; + +SET network_compression_method = 'lz4hc'; + +CREATE TABLE compression_codec_multiple_replicated1 ( + id UInt64 CODEC(LZ4, ZSTD, NONE, LZ4HC, Delta(4)), + data String CODEC(ZSTD(2), NONE, Delta(2), LZ4HC, LZ4, LZ4, Delta(8)), + ddd Date CODEC(NONE, NONE, NONE, Delta(1), LZ4, ZSTD, LZ4HC, LZ4HC), + somenum Float64 CODEC(Delta(4), LZ4, LZ4, ZSTD(2), LZ4HC(5), ZSTD(3), ZSTD) +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_00910/compression_codec_multiple', '1') ORDER BY tuple(); + +CREATE TABLE compression_codec_multiple_replicated2 ( + id UInt64 CODEC(LZ4, ZSTD, NONE, LZ4HC, Delta(4)), + data String CODEC(ZSTD(2), NONE, Delta(2), LZ4HC, LZ4, LZ4, Delta(8)), + ddd Date CODEC(NONE, NONE, NONE, Delta(1), LZ4, ZSTD, LZ4HC, LZ4HC), + somenum Float64 CODEC(Delta(4), LZ4, LZ4, ZSTD(2), LZ4HC(5), ZSTD(3), ZSTD) +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_00910/compression_codec_multiple', '2') ORDER BY tuple(); + + +INSERT INTO compression_codec_multiple_replicated2 VALUES (1, 'world', toDate('2018-10-05'), 1.1), (2, 'hello', toDate('2018-10-01'), 2.2), (3, 'buy', toDate('2018-10-11'), 3.3); + +SYSTEM SYNC REPLICA compression_codec_multiple_replicated1; + +SELECT * FROM compression_codec_multiple_replicated2 ORDER BY id; +SELECT * FROM compression_codec_multiple_replicated1 ORDER BY id; + +INSERT INTO compression_codec_multiple_replicated1 select modulo(number, 100), toString(number), toDate('2018-12-01'), 5.5 * number FROM system.numbers limit 10000; + +SYSTEM SYNC REPLICA compression_codec_multiple_replicated2; + +SELECT count(*) FROM compression_codec_multiple_replicated1; +SELECT count(*) FROM compression_codec_multiple_replicated2; + +SELECT count(distinct data) FROM compression_codec_multiple_replicated1; +SELECT count(distinct data) FROM compression_codec_multiple_replicated2; + +SELECT floor(sum(somenum), 1) FROM compression_codec_multiple_replicated1; +SELECT floor(sum(somenum), 1) FROM compression_codec_multiple_replicated2; + +TRUNCATE TABLE compression_codec_multiple_replicated1; +SYSTEM SYNC REPLICA compression_codec_multiple_replicated2; + +INSERT INTO compression_codec_multiple_replicated1 select modulo(number, 100), toString(number), toDate('2018-12-01'), 5.5 * number FROM system.numbers limit 10000; + +SYSTEM SYNC REPLICA compression_codec_multiple_replicated2; + +SELECT sum(cityHash64(*)) FROM compression_codec_multiple_replicated2; +SELECT sum(cityHash64(*)) FROM compression_codec_multiple_replicated1; + +DROP TABLE IF EXISTS compression_codec_multiple_replicated1; +DROP TABLE IF EXISTS compression_codec_multiple_replicated2; + +DROP TABLE IF EXISTS compression_codec_multiple_more_types_replicated; + +CREATE TABLE compression_codec_multiple_more_types_replicated ( + id Decimal128(13) CODEC(ZSTD, LZ4, ZSTD, ZSTD, Delta(2), Delta(4), Delta(1), LZ4HC), + data FixedString(12) CODEC(ZSTD, ZSTD, Delta(1), Delta(1), Delta(1), NONE, NONE, NONE, LZ4HC), + ddd Nested (age UInt8, Name String) CODEC(LZ4, LZ4HC, NONE, NONE, NONE, ZSTD, Delta(8)) +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_00910/compression_codec_multiple_more_types_replicated', '1') ORDER BY tuple(); + +SHOW CREATE TABLE compression_codec_multiple_more_types_replicated; + +INSERT INTO compression_codec_multiple_more_types_replicated VALUES(1.5555555555555, 'hello world!', [77], ['John']); +INSERT INTO compression_codec_multiple_more_types_replicated VALUES(7.1, 'xxxxxxxxxxxx', [127], ['Henry']); + +SELECT * FROM compression_codec_multiple_more_types_replicated order by id; + +DROP TABLE IF EXISTS compression_codec_multiple_with_key_replicated; + +SET network_compression_method = 'zstd'; +SET network_zstd_compression_level = 5; + +CREATE TABLE compression_codec_multiple_with_key_replicated ( + somedate Date CODEC(ZSTD, ZSTD, ZSTD(12), LZ4HC(12), Delta, Delta), + id UInt64 CODEC(LZ4, ZSTD, Delta, NONE, LZ4HC, Delta), + data String CODEC(ZSTD(2), Delta(1), LZ4HC, NONE, LZ4, LZ4) +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_00910/compression_codec_multiple_with_key_replicated', '1') PARTITION BY somedate ORDER BY id SETTINGS index_granularity = 2, index_granularity_bytes = '10Mi'; + + +INSERT INTO compression_codec_multiple_with_key_replicated VALUES(toDate('2018-10-12'), 100000, 'hello'), (toDate('2018-10-12'), 100002, 'world'), (toDate('2018-10-12'), 1111, '!'); + +SELECT data FROM compression_codec_multiple_with_key_replicated WHERE id BETWEEN 3 AND 1112; + +INSERT INTO compression_codec_multiple_with_key_replicated SELECT toDate('2018-10-12'), number, toString(number) FROM system.numbers LIMIT 1000; + +SELECT COUNT(DISTINCT data) FROM compression_codec_multiple_with_key_replicated WHERE id < 222; + +DROP TABLE IF EXISTS compression_codec_multiple_with_key_replicated; +DROP TABLE compression_codec_multiple_more_types_replicated; diff --git a/parser/testdata/00910_zookeeper_test_alter_compression_codecs_long/ast.json b/parser/testdata/00910_zookeeper_test_alter_compression_codecs_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00910_zookeeper_test_alter_compression_codecs_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00910_zookeeper_test_alter_compression_codecs_long/metadata.json b/parser/testdata/00910_zookeeper_test_alter_compression_codecs_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00910_zookeeper_test_alter_compression_codecs_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00910_zookeeper_test_alter_compression_codecs_long/query.sql b/parser/testdata/00910_zookeeper_test_alter_compression_codecs_long/query.sql new file mode 100644 index 000000000..ffc1e0906 --- /dev/null +++ b/parser/testdata/00910_zookeeper_test_alter_compression_codecs_long/query.sql @@ -0,0 +1,71 @@ +-- Tags: long, zookeeper + +SET send_logs_level = 'fatal'; +SET replication_alter_partitions_sync = 2; + +DROP TABLE IF EXISTS alter_compression_codec1; +DROP TABLE IF EXISTS alter_compression_codec2; + +CREATE TABLE alter_compression_codec1 ( + somedate Date CODEC(LZ4), + id UInt64 CODEC(NONE) +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_00910/'||currentDatabase()||'alter_compression_codecs/{shard}', '1_{replica}') PARTITION BY somedate ORDER BY id; + +CREATE TABLE alter_compression_codec2 ( + somedate Date CODEC(LZ4), + id UInt64 CODEC(NONE) +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_00910/'||currentDatabase()||'alter_compression_codecs/{shard}', '2_{replica}') PARTITION BY somedate ORDER BY id; + +INSERT INTO alter_compression_codec1 VALUES('2018-01-01', 1); +INSERT INTO alter_compression_codec1 VALUES('2018-01-01', 2); +SYSTEM SYNC REPLICA alter_compression_codec2; + +SELECT * FROM alter_compression_codec1 ORDER BY id; +SELECT * FROM alter_compression_codec2 ORDER BY id; + +ALTER TABLE alter_compression_codec1 ADD COLUMN alter_column String DEFAULT 'default_value' CODEC(ZSTD); +SYSTEM SYNC REPLICA alter_compression_codec1; +SYSTEM SYNC REPLICA alter_compression_codec2; + +SELECT compression_codec FROM system.columns WHERE table = 'alter_compression_codec1' AND name = 'alter_column' AND database = currentDatabase(); +SELECT compression_codec FROM system.columns WHERE table = 'alter_compression_codec2' AND name = 'alter_column' AND database = currentDatabase(); + +INSERT INTO alter_compression_codec1 VALUES('2018-01-01', 3, '3'); +INSERT INTO alter_compression_codec1 VALUES('2018-01-01', 4, '4'); +SYSTEM SYNC REPLICA alter_compression_codec1; +SYSTEM SYNC REPLICA alter_compression_codec2; + +SELECT * FROM alter_compression_codec1 ORDER BY id; +SELECT * FROM alter_compression_codec2 ORDER BY id; + +ALTER TABLE alter_compression_codec1 MODIFY COLUMN alter_column CODEC(NONE); +SELECT compression_codec FROM system.columns WHERE table = 'alter_compression_codec1' AND name = 'alter_column' AND database = currentDatabase(); +SELECT compression_codec FROM system.columns WHERE table = 'alter_compression_codec2' AND name = 'alter_column' AND database = currentDatabase(); + +INSERT INTO alter_compression_codec2 VALUES('2018-01-01', 5, '5'); +INSERT INTO alter_compression_codec2 VALUES('2018-01-01', 6, '6'); +SYSTEM SYNC REPLICA alter_compression_codec1; +SELECT * FROM alter_compression_codec1 ORDER BY id; +SELECT * FROM alter_compression_codec2 ORDER BY id; + +SET allow_suspicious_codecs = 1; +ALTER TABLE alter_compression_codec1 MODIFY COLUMN alter_column CODEC(ZSTD, LZ4HC, LZ4, LZ4, NONE); +SYSTEM SYNC REPLICA alter_compression_codec1; +SYSTEM SYNC REPLICA alter_compression_codec2; +SELECT compression_codec FROM system.columns WHERE table = 'alter_compression_codec1' AND name = 'alter_column' AND database = currentDatabase(); +SELECT compression_codec FROM system.columns WHERE table = 'alter_compression_codec2' AND name = 'alter_column' AND database = currentDatabase(); + +INSERT INTO alter_compression_codec1 VALUES('2018-01-01', 7, '7'); +INSERT INTO alter_compression_codec2 VALUES('2018-01-01', 8, '8'); +SYSTEM SYNC REPLICA alter_compression_codec2; +SYSTEM SYNC REPLICA alter_compression_codec1; +SELECT * FROM alter_compression_codec1 ORDER BY id; +SELECT * FROM alter_compression_codec2 ORDER BY id; + +ALTER TABLE alter_compression_codec1 MODIFY COLUMN alter_column FixedString(100); +SYSTEM SYNC REPLICA alter_compression_codec2; +SELECT compression_codec FROM system.columns WHERE table = 'alter_compression_codec1' AND name = 'alter_column' AND database = currentDatabase(); +SELECT compression_codec FROM system.columns WHERE table = 'alter_compression_codec2' AND name = 'alter_column' AND database = currentDatabase(); + +DROP TABLE IF EXISTS alter_compression_codec1; +DROP TABLE IF EXISTS alter_compression_codec2; diff --git a/parser/testdata/00911_tautological_compare/ast.json b/parser/testdata/00911_tautological_compare/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00911_tautological_compare/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00911_tautological_compare/metadata.json b/parser/testdata/00911_tautological_compare/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00911_tautological_compare/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00911_tautological_compare/query.sql b/parser/testdata/00911_tautological_compare/query.sql new file mode 100644 index 000000000..f1b3fc6b6 --- /dev/null +++ b/parser/testdata/00911_tautological_compare/query.sql @@ -0,0 +1,43 @@ +-- Tags: no-fasttest + +-- TODO: Tautological optimization breaks JIT expression compilation, because it can return constant result +-- for non constant columns. And then sample blocks from same ActionsDAGs can be mismatched. +-- This optimization cannot be performed on AST rewrite level, because we does not have information about types +-- and equals(tuple(NULL), tuple(NULL)) have same hash code, but should not be optimized. +-- Return this test after refactoring of InterpreterSelectQuery. + +-- SELECT count() FROM system.numbers WHERE number != number; +-- SELECT count() FROM system.numbers WHERE number < number; +-- SELECT count() FROM system.numbers WHERE number > number; + +-- SELECT count() FROM system.numbers WHERE NOT (number = number); +-- SELECT count() FROM system.numbers WHERE NOT (number <= number); +-- SELECT count() FROM system.numbers WHERE NOT (number >= number); + +-- SELECT count() FROM system.numbers WHERE SHA256(toString(number)) != SHA256(toString(number)); +-- SELECT count() FROM system.numbers WHERE SHA256(toString(number)) != SHA256(toString(number)) AND rand() > 10; + +-- column_column_comparison.xml +-- + +-- +-- +-- +-- +-- +-- +-- +-- +-- +-- +-- +-- +-- +-- +-- +-- +-- +-- +-- + +-- diff --git a/parser/testdata/00912_string_comparison/ast.json b/parser/testdata/00912_string_comparison/ast.json new file mode 100644 index 000000000..7c3f7bf8d --- /dev/null +++ b/parser/testdata/00912_string_comparison/ast.json @@ -0,0 +1,154 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function substring (alias prefix) (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function concat (alias a) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier prefix" + }, + { + "explain": " Literal 'x'" + }, + { + "explain": " Function concat (alias b) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier prefix" + }, + { + "explain": " Literal 'y'" + }, + { + "explain": " ExpressionList (children 5)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier a" + }, + { + "explain": " Identifier b" + }, + { + "explain": " Function less (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier a" + }, + { + "explain": " Identifier b" + }, + { + "explain": " Function greater (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier a" + }, + { + "explain": " Identifier b" + }, + { + "explain": " Function lessOrEquals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier a" + }, + { + "explain": " Identifier b" + }, + { + "explain": " Function greaterOrEquals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier a" + }, + { + "explain": " Identifier b" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_40" + } + ], + + "rows": 44, + + "statistics": + { + "elapsed": 0.001927177, + "rows_read": 44, + "bytes_read": 1630 + } +} diff --git a/parser/testdata/00912_string_comparison/metadata.json b/parser/testdata/00912_string_comparison/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00912_string_comparison/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00912_string_comparison/query.sql b/parser/testdata/00912_string_comparison/query.sql new file mode 100644 index 000000000..089ec4ab3 --- /dev/null +++ b/parser/testdata/00912_string_comparison/query.sql @@ -0,0 +1,18 @@ +WITH substring('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 1, number) AS prefix, prefix || 'x' AS a, prefix || 'y' AS b SELECT a = b, a < b, a > b, a <= b, a >= b FROM numbers(40); +WITH substring('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 1, number) AS prefix, prefix || 'y' AS a, prefix || 'x' AS b SELECT a = b, a < b, a > b, a <= b, a >= b FROM numbers(40); +WITH substring('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 1, number) AS prefix, prefix || 'x' AS a, prefix || 'x' AS b SELECT a = b, a < b, a > b, a <= b, a >= b FROM numbers(40); + +WITH substring('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 1, number) AS prefix, prefix || 'x' || prefix AS a, prefix || 'y' || prefix AS b SELECT a = b, a < b, a > b, a <= b, a >= b FROM numbers(40); +WITH substring('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 1, number) AS prefix, prefix || 'y' || prefix AS a, prefix || 'x' || prefix AS b SELECT a = b, a < b, a > b, a <= b, a >= b FROM numbers(40); +WITH substring('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 1, number) AS prefix, prefix || 'x' || prefix AS a, prefix || 'x' || prefix AS b SELECT a = b, a < b, a > b, a <= b, a >= b FROM numbers(40); + +WITH substring('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 1, number) AS prefix, prefix || 'x' || prefix AS a, prefix || 'y' AS b SELECT a = b, a < b, a > b, a <= b, a >= b FROM numbers(40); +WITH substring('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 1, number) AS prefix, prefix || 'y' || prefix AS a, prefix || 'x' AS b SELECT a = b, a < b, a > b, a <= b, a >= b FROM numbers(40); +WITH substring('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 1, number) AS prefix, prefix || 'x' || prefix AS a, prefix || 'x' AS b SELECT a = b, a < b, a > b, a <= b, a >= b FROM numbers(40); + +WITH arrayJoin(['aaa', 'bbb']) AS a, 'aaa\0bbb' AS b SELECT a = b, a < b, a > b, a <= b, a >= b; +WITH arrayJoin(['aaa', 'zzz']) AS a, 'aaa\0bbb' AS b SELECT a = b, a < b, a > b, a <= b, a >= b; +WITH arrayJoin(['aaa', 'bbb']) AS a, materialize('aaa\0bbb') AS b SELECT a = b, a < b, a > b, a <= b, a >= b; +WITH arrayJoin(['aaa', 'zzz']) AS a, materialize('aaa\0bbb') AS b SELECT a = b, a < b, a > b, a <= b, a >= b; + +SELECT empty(toFixedString('', 1 + randConstant() % 100)); diff --git a/parser/testdata/00913_many_threads/ast.json b/parser/testdata/00913_many_threads/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00913_many_threads/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00913_many_threads/metadata.json b/parser/testdata/00913_many_threads/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00913_many_threads/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00913_many_threads/query.sql b/parser/testdata/00913_many_threads/query.sql new file mode 100644 index 000000000..431423bb3 --- /dev/null +++ b/parser/testdata/00913_many_threads/query.sql @@ -0,0 +1,15 @@ +-- Tags: no-parallel + +-- This test creates many threads to test a case when ThreadPool will remove some threads from pool after job is done. +SET max_block_size = 1, min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0; + +CREATE TEMPORARY TABLE t (x UInt64); +INSERT INTO t SELECT * FROM system.numbers LIMIT 1500; + +SELECT DISTINCT blockSize() FROM t; + +SET max_threads = 1500; + +SELECT count() FROM t; +SELECT sum(sleep(0.1)) FROM t; -- All threads have time to be created. +SELECT 'Ok.'; diff --git a/parser/testdata/00914_join_bgranvea/ast.json b/parser/testdata/00914_join_bgranvea/ast.json new file mode 100644 index 000000000..dbae65941 --- /dev/null +++ b/parser/testdata/00914_join_bgranvea/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery table1 (children 1)" + }, + { + "explain": " Identifier table1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001395627, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/00914_join_bgranvea/metadata.json b/parser/testdata/00914_join_bgranvea/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00914_join_bgranvea/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00914_join_bgranvea/query.sql b/parser/testdata/00914_join_bgranvea/query.sql new file mode 100644 index 000000000..11cd082a6 --- /dev/null +++ b/parser/testdata/00914_join_bgranvea/query.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS table1; +DROP TABLE IF EXISTS table2; + +CREATE TABLE table1 (A String, B String, ts DateTime) ENGINE = MergeTree PARTITION BY toStartOfDay(ts) ORDER BY (ts, A, B); +CREATE TABLE table2 (B String, ts DateTime) ENGINE = MergeTree PARTITION BY toStartOfDay(ts) ORDER BY (ts, B); + +insert into table1 values('a1','b1','2019-02-05 16:50:00'),('a1','b1','2019-02-05 16:55:00'); +insert into table2 values('b1','2019-02-05 16:50:00'),('b1','2019-02-05 16:55:00'); + +SELECT t1.B, t2.B FROM table1 t1 ALL INNER JOIN table2 t2 ON t1.B = t2.B ORDER BY t1.B, t2.B; + +DROP TABLE table1; +DROP TABLE table2; diff --git a/parser/testdata/00914_replicate/ast.json b/parser/testdata/00914_replicate/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00914_replicate/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00914_replicate/metadata.json b/parser/testdata/00914_replicate/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00914_replicate/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00914_replicate/query.sql b/parser/testdata/00914_replicate/query.sql new file mode 100644 index 000000000..0e6ac2bd3 --- /dev/null +++ b/parser/testdata/00914_replicate/query.sql @@ -0,0 +1,3 @@ +-- Tags: replica + +SELECT CAST(replicate(['a'], [1]) AS String); diff --git a/parser/testdata/00915_simple_aggregate_function/ast.json b/parser/testdata/00915_simple_aggregate_function/ast.json new file mode 100644 index 000000000..809558bb9 --- /dev/null +++ b/parser/testdata/00915_simple_aggregate_function/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000971321, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00915_simple_aggregate_function/metadata.json b/parser/testdata/00915_simple_aggregate_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00915_simple_aggregate_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00915_simple_aggregate_function/query.sql b/parser/testdata/00915_simple_aggregate_function/query.sql new file mode 100644 index 000000000..535a2562e --- /dev/null +++ b/parser/testdata/00915_simple_aggregate_function/query.sql @@ -0,0 +1,62 @@ +set optimize_throw_if_noop = 1; + +-- basic test +drop table if exists simple; + +create table simple (id UInt64,val SimpleAggregateFunction(sum,Double)) engine=AggregatingMergeTree order by id; +insert into simple select number,number from system.numbers limit 10; + +select * from simple; +select * from simple final order by id; +select toTypeName(val) from simple limit 1; + +-- merge +insert into simple select number,number from system.numbers limit 10; + +select * from simple final order by id; + +optimize table simple final; +select * from simple; + +-- complex types +drop table if exists simple; + +create table simple ( + id UInt64, + nullable_str SimpleAggregateFunction(anyLast,Nullable(String)), + nullable_str_respect_nulls SimpleAggregateFunction(anyLastRespectNulls,Nullable(String)), + low_str SimpleAggregateFunction(anyLast,LowCardinality(Nullable(String))), + ip SimpleAggregateFunction(anyLast,IPv4), + status SimpleAggregateFunction(groupBitOr, UInt32), + tup SimpleAggregateFunction(sumMap, Tuple(Array(Int32), Array(Int64))), + tup_min SimpleAggregateFunction(minMap, Tuple(Array(Int32), Array(Int64))), + tup_max SimpleAggregateFunction(maxMap, Tuple(Array(Int32), Array(Int64))), + arr SimpleAggregateFunction(groupArrayArray, Array(Int32)), + uniq_arr SimpleAggregateFunction(groupUniqArrayArray, Array(Int32)), + map_uniq_arr SimpleAggregateFunction(groupUniqArrayArrayMap, Map(Int32, Array(Int64))) +) engine=AggregatingMergeTree order by id; +insert into simple values(1,'1','1','1','1.1.1.1', 1, ([1,2], [1,1]), ([1,2], [1,1]), ([1,2], [1,1]), [1,2], [1,2], {1: [1,2], 2: [5,6]}); +insert into simple values(1,null,null,'2','2.2.2.2', 2, ([1,3], [1,1]), ([1,3], [2,2]), ([1,3], [2,2]), [2,3,4], [2,3,4], {1: [2,3], 2: [4,5,6]}); +-- String longer then MAX_SMALL_STRING_SIZE (actual string length is 100) +insert into simple values(10,'10',null,'10','10.10.10.10', 4, ([2,3], [1,1]), ([2,3], [3,3]), ([2,3], [3,3]), [], [], {}); +insert into simple values(10,'2222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222','10','20','20.20.20.20', 1, ([2, 4], [1,1]), ([2, 4], [4,4]), ([2, 4], [4,4]), [], [], {}); + +select * from simple final order by id; +select toTypeName(nullable_str),toTypeName(nullable_str_respect_nulls),toTypeName(low_str),toTypeName(ip),toTypeName(status), toTypeName(tup), toTypeName(tup_min), toTypeName(tup_max), toTypeName(arr), toTypeName(uniq_arr), toTypeName(map_uniq_arr) from simple limit 1; + +optimize table simple final; + +drop table simple; + +drop table if exists with_overflow; +create table with_overflow ( + id UInt64, + s SimpleAggregateFunction(sumWithOverflow, UInt8) +) engine AggregatingMergeTree order by id; + +insert into with_overflow select 1, 1 from numbers(256); + +optimize table with_overflow final; + +select 'with_overflow', * from with_overflow; +drop table with_overflow; diff --git a/parser/testdata/00915_simple_aggregate_function_summing_merge_tree/ast.json b/parser/testdata/00915_simple_aggregate_function_summing_merge_tree/ast.json new file mode 100644 index 000000000..92e69b614 --- /dev/null +++ b/parser/testdata/00915_simple_aggregate_function_summing_merge_tree/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001293726, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00915_simple_aggregate_function_summing_merge_tree/metadata.json b/parser/testdata/00915_simple_aggregate_function_summing_merge_tree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00915_simple_aggregate_function_summing_merge_tree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00915_simple_aggregate_function_summing_merge_tree/query.sql b/parser/testdata/00915_simple_aggregate_function_summing_merge_tree/query.sql new file mode 100644 index 000000000..d870351b0 --- /dev/null +++ b/parser/testdata/00915_simple_aggregate_function_summing_merge_tree/query.sql @@ -0,0 +1,62 @@ +set optimize_throw_if_noop = 1; + +-- basic test +drop table if exists simple; + +create table simple (id UInt64,val SimpleAggregateFunction(sum,Double)) engine=SummingMergeTree order by id; +insert into simple select number,number from system.numbers limit 10; + +select * from simple; +select * from simple final order by id; +select toTypeName(val) from simple limit 1; + +-- merge +insert into simple select number,number from system.numbers limit 10; + +select * from simple final order by id; + +optimize table simple final; +select * from simple; + +-- complex types +drop table if exists simple; + +create table simple ( + id UInt64, + nullable_str SimpleAggregateFunction(anyLast,Nullable(String)), + nullable_str_respect_nulls SimpleAggregateFunction(anyLastRespectNulls,Nullable(String)), + low_str SimpleAggregateFunction(anyLast,LowCardinality(Nullable(String))), + ip SimpleAggregateFunction(anyLast,IPv4), + status SimpleAggregateFunction(groupBitOr, UInt32), + tup SimpleAggregateFunction(sumMap, Tuple(Array(Int32), Array(Int64))), + tup_min SimpleAggregateFunction(minMap, Tuple(Array(Int32), Array(Int64))), + tup_max SimpleAggregateFunction(maxMap, Tuple(Array(Int32), Array(Int64))), + arr SimpleAggregateFunction(groupArrayArray, Array(Int32)), + uniq_arr SimpleAggregateFunction(groupUniqArrayArray, Array(Int32)), + map_uniq_arr SimpleAggregateFunction(groupUniqArrayArrayMap, Map(Int32, Array(Int64))) +) engine=SummingMergeTree order by id; +insert into simple values(1,'1','1','1','1.1.1.1', 1, ([1,2], [1,1]), ([1,2], [1,1]), ([1,2], [1,1]), [1,2], [1,2], {1: [1,2], 2: [5,6]}); +insert into simple values(1,null,null,'2','2.2.2.2', 2, ([1,3], [1,1]), ([1,3], [2,2]), ([1,3], [2,2]), [2,3,4], [2,3,4], {1: [2,3], 2: [4,5,6]}); +-- String longer then MAX_SMALL_STRING_SIZE (actual string length is 100) +insert into simple values(10,'10',null,'10','10.10.10.10', 4, ([2,3], [1,1]), ([2,3], [3,3]), ([2,3], [3,3]), [], [], {}); +insert into simple values(10,'2222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222','10','20','20.20.20.20', 1, ([2, 4], [1,1]), ([2, 4], [4,4]), ([2, 4], [4,4]), [], [], {}); + +select * from simple final order by id; +select toTypeName(nullable_str),toTypeName(nullable_str_respect_nulls),toTypeName(low_str),toTypeName(ip),toTypeName(status), toTypeName(tup), toTypeName(tup_min), toTypeName(tup_max), toTypeName(arr), toTypeName(uniq_arr), toTypeName(map_uniq_arr) from simple limit 1; + +optimize table simple final; + +drop table simple; + +drop table if exists with_overflow; +create table with_overflow ( + id UInt64, + s SimpleAggregateFunction(sumWithOverflow, UInt8) +) engine SummingMergeTree order by id; + +insert into with_overflow select 1, 1 from numbers(256); + +optimize table with_overflow final; + +select 'with_overflow', * from with_overflow; +drop table with_overflow; diff --git a/parser/testdata/00915_tuple_orantius/ast.json b/parser/testdata/00915_tuple_orantius/ast.json new file mode 100644 index 000000000..e7539b78a --- /dev/null +++ b/parser/testdata/00915_tuple_orantius/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal UInt64_1 (alias x)" + }, + { + "explain": " Literal Tuple_(UInt64_1, UInt64_2, UInt64_3) (alias y)" + }, + { + "explain": " Function in (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Identifier y" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001388997, + "rows_read": 10, + "bytes_read": 381 + } +} diff --git a/parser/testdata/00915_tuple_orantius/metadata.json b/parser/testdata/00915_tuple_orantius/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00915_tuple_orantius/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00915_tuple_orantius/query.sql b/parser/testdata/00915_tuple_orantius/query.sql new file mode 100644 index 000000000..938260c51 --- /dev/null +++ b/parser/testdata/00915_tuple_orantius/query.sql @@ -0,0 +1 @@ +select 1 as x, (1,2,3) as y, x in y; diff --git a/parser/testdata/00916_add_materialized_column_after/ast.json b/parser/testdata/00916_add_materialized_column_after/ast.json new file mode 100644 index 000000000..f9b95014f --- /dev/null +++ b/parser/testdata/00916_add_materialized_column_after/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery add_materialized_column_after (children 1)" + }, + { + "explain": " Identifier add_materialized_column_after" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001071529, + "rows_read": 2, + "bytes_read": 110 + } +} diff --git a/parser/testdata/00916_add_materialized_column_after/metadata.json b/parser/testdata/00916_add_materialized_column_after/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00916_add_materialized_column_after/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00916_add_materialized_column_after/query.sql b/parser/testdata/00916_add_materialized_column_after/query.sql new file mode 100644 index 000000000..6a1f9bca9 --- /dev/null +++ b/parser/testdata/00916_add_materialized_column_after/query.sql @@ -0,0 +1,8 @@ +DROP TABLE IF EXISTS add_materialized_column_after; + +CREATE TABLE add_materialized_column_after (x UInt32, z UInt64) ENGINE MergeTree ORDER BY x; +ALTER TABLE add_materialized_column_after ADD COLUMN y String MATERIALIZED toString(x) AFTER x; + +DESC TABLE add_materialized_column_after; + +DROP TABLE add_materialized_column_after; diff --git a/parser/testdata/00916_create_or_replace_view/ast.json b/parser/testdata/00916_create_or_replace_view/ast.json new file mode 100644 index 000000000..c96f75fdc --- /dev/null +++ b/parser/testdata/00916_create_or_replace_view/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001225972, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/00916_create_or_replace_view/metadata.json b/parser/testdata/00916_create_or_replace_view/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00916_create_or_replace_view/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00916_create_or_replace_view/query.sql b/parser/testdata/00916_create_or_replace_view/query.sql new file mode 100644 index 000000000..adb046020 --- /dev/null +++ b/parser/testdata/00916_create_or_replace_view/query.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS t; + +CREATE OR REPLACE VIEW t (number UInt64) AS SELECT number FROM system.numbers; +SHOW CREATE TABLE t; + +CREATE OR REPLACE VIEW t AS SELECT number+1 AS next_number FROM system.numbers; +SHOW CREATE TABLE t; + +DROP TABLE t; diff --git a/parser/testdata/00916_join_using_duplicate_columns/ast.json b/parser/testdata/00916_join_using_duplicate_columns/ast.json new file mode 100644 index 000000000..dc9387fb1 --- /dev/null +++ b/parser/testdata/00916_join_using_duplicate_columns/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00121557, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00916_join_using_duplicate_columns/metadata.json b/parser/testdata/00916_join_using_duplicate_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00916_join_using_duplicate_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00916_join_using_duplicate_columns/query.sql b/parser/testdata/00916_join_using_duplicate_columns/query.sql new file mode 100644 index 000000000..c1713ee44 --- /dev/null +++ b/parser/testdata/00916_join_using_duplicate_columns/query.sql @@ -0,0 +1,32 @@ +SET any_join_distinct_right_table_keys = 1; +SET joined_subquery_requires_alias = 0; + +SELECT * FROM (SELECT 1 AS x) ALL LEFT JOIN (SELECT 1 AS x) USING x; +SELECT * FROM (SELECT 1 AS x) ALL LEFT JOIN (SELECT 2 AS x) USING x; + +SELECT * FROM (SELECT 1 AS x) AS t1 ALL LEFT JOIN (SELECT 1 AS x) AS t2 USING x; +SELECT * FROM (SELECT 1 AS x) AS t1 ALL LEFT JOIN (SELECT 2 AS x) AS t2 USING x; + +SELECT * FROM (SELECT 1 AS x) AS t1 ALL LEFT JOIN (SELECT 1 AS x) AS t2 ON t1.x = t2.x; +SELECT * FROM (SELECT 1 AS x) AS t1 ALL LEFT JOIN (SELECT 2 AS x) AS t2 ON t1.x = t2.x; +SELECT * FROM (SELECT materialize(1) AS x) AS t1 ALL LEFT JOIN (SELECT 2 AS x) AS t2 ON t1.x = t2.x; +SELECT * FROM (SELECT 1 AS x) AS t1 ALL LEFT JOIN (SELECT materialize(2) AS x) AS t2 ON t1.x = t2.x; + +SELECT * FROM (SELECT 1 AS x) AS t1 ANY LEFT JOIN (SELECT 1 AS x) AS t2 ON t1.x = t2.x; +SELECT * FROM (SELECT 1 AS x) AS t1 ANY LEFT JOIN (SELECT 2 AS x) AS t2 ON t1.x = t2.x; +SELECT * FROM (SELECT materialize(1) AS x) AS t1 ANY LEFT JOIN (SELECT 2 AS x) AS t2 ON t1.x = t2.x; +SELECT * FROM (SELECT 1 AS x) AS t1 ANY LEFT JOIN (SELECT materialize(2) AS x) AS t2 ON t1.x = t2.x; + +SELECT * FROM (SELECT 1 AS x) AS t1 ALL RIGHT JOIN (SELECT 1 AS x) AS t2 ON t1.x = t2.x; +SELECT * FROM (SELECT 1 AS x) AS t1 ALL RIGHT JOIN (SELECT 2 AS x) AS t2 ON t1.x = t2.x; +SELECT * FROM (SELECT materialize(1) AS x) AS t1 ALL RIGHT JOIN (SELECT 2 AS x) AS t2 ON t1.x = t2.x; +SELECT * FROM (SELECT 1 AS x) AS t1 ALL RIGHT JOIN (SELECT materialize(2) AS x) AS t2 ON t1.x = t2.x; + +SELECT * FROM (SELECT 1 AS x) AS t1 ANY RIGHT JOIN (SELECT 1 AS x) AS t2 ON t1.x = t2.x; +SELECT * FROM (SELECT 1 AS x) AS t1 ANY RIGHT JOIN (SELECT 2 AS x) AS t2 ON t1.x = t2.x; +SELECT * FROM (SELECT materialize(1) AS x) AS t1 ANY RIGHT JOIN (SELECT 2 AS x) AS t2 ON t1.x = t2.x; +SELECT * FROM (SELECT 1 AS x) AS t1 ANY RIGHT JOIN (SELECT materialize(2) AS x) AS t2 ON t1.x = t2.x; + +-- SET join_use_nulls = 1; +-- SELECT * FROM (SELECT 1 AS x) AS t1 ALL LEFT JOIN (SELECT 2 AS x) AS t2 ON t1.x = t2.x; +-- SELECT * FROM (SELECT 1 AS x) AS t1 ALL RIGHT JOIN (SELECT 2 AS x) AS t2 ON t1.x = t2.x; diff --git a/parser/testdata/00917_least_sqr/ast.json b/parser/testdata/00917_least_sqr/ast.json new file mode 100644 index 000000000..bed5117da --- /dev/null +++ b/parser/testdata/00917_least_sqr/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayReduce (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal 'simpleLinearRegression'" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_2, UInt64_3, UInt64_4]" + }, + { + "explain": " Literal Array_[UInt64_100, UInt64_110, UInt64_120, UInt64_130]" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.00126065, + "rows_read": 9, + "bytes_read": 424 + } +} diff --git a/parser/testdata/00917_least_sqr/metadata.json b/parser/testdata/00917_least_sqr/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00917_least_sqr/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00917_least_sqr/query.sql b/parser/testdata/00917_least_sqr/query.sql new file mode 100644 index 000000000..5fa018302 --- /dev/null +++ b/parser/testdata/00917_least_sqr/query.sql @@ -0,0 +1,9 @@ +select arrayReduce('simpleLinearRegression', [1, 2, 3, 4], [100, 110, 120, 130]); +select arrayReduce('simpleLinearRegression', [1, 2, 3, 4], [100, 110, 120, 131]); +select arrayReduce('simpleLinearRegression', [-1, -2, -3, -4], [-100, -110, -120, -130]); +select arrayReduce('simpleLinearRegression', [5, 5.1], [6, 6.1]); +select arrayReduce('simpleLinearRegression', [0], [0]); +select arrayReduce('simpleLinearRegression', [3, 4], [3, 3]); +select arrayReduce('simpleLinearRegression', [3, 3], [3, 4]); +select arrayReduce('simpleLinearRegression', emptyArrayUInt8(), emptyArrayUInt8()); +select arrayReduce('simpleLinearRegression', [1, 2, 3, 4], [1000000000, 1100000000, 1200000000, 1300000000]); diff --git a/parser/testdata/00917_multiple_joins_denny_crane/ast.json b/parser/testdata/00917_multiple_joins_denny_crane/ast.json new file mode 100644 index 000000000..ef507b08e --- /dev/null +++ b/parser/testdata/00917_multiple_joins_denny_crane/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001226094, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00917_multiple_joins_denny_crane/metadata.json b/parser/testdata/00917_multiple_joins_denny_crane/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00917_multiple_joins_denny_crane/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00917_multiple_joins_denny_crane/query.sql b/parser/testdata/00917_multiple_joins_denny_crane/query.sql new file mode 100644 index 000000000..ff534544d --- /dev/null +++ b/parser/testdata/00917_multiple_joins_denny_crane/query.sql @@ -0,0 +1,22 @@ +SET joined_subquery_requires_alias = 0; + +DROP TABLE IF EXISTS ANIMAL; + +CREATE TABLE ANIMAL ( ANIMAL Nullable(String) ) engine = MergeTree ORDER BY tuple(); +INSERT INTO ANIMAL (ANIMAL) VALUES ('CAT'), ('FISH'), ('DOG'), ('HORSE'), ('BIRD'); + +select * from ( +select x.b x, count(distinct x.c) ANIMAL +from ( +select a.ANIMAL a, 'CAT' b, c.ANIMAL c, d.ANIMAL d +from ANIMAL a join ANIMAL b on a.ANIMAL = b.ANIMAL + left outer join ANIMAL c on (b.ANIMAL = c.ANIMAL) + right outer join (select * from ANIMAL union all select * from ANIMAL + union all select * from ANIMAL) d on (a.ANIMAL = d.ANIMAL) +where d.ANIMAL <> 'CAT' and c.ANIMAL <>'DOG' and b.ANIMAL <> 'FISH') as x +where x.b >= 'CAT' +group by x.b +having ANIMAL >= 0) ANIMAL +where ANIMAL.ANIMAL >= 0; + +DROP TABLE ANIMAL; diff --git a/parser/testdata/00918_has_unsufficient_type_check/ast.json b/parser/testdata/00918_has_unsufficient_type_check/ast.json new file mode 100644 index 000000000..238c1ed53 --- /dev/null +++ b/parser/testdata/00918_has_unsufficient_type_check/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function hasAny (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Array_[Array_['Hello, world']]" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.000965785, + "rows_read": 13, + "bytes_read": 529 + } +} diff --git a/parser/testdata/00918_has_unsufficient_type_check/metadata.json b/parser/testdata/00918_has_unsufficient_type_check/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00918_has_unsufficient_type_check/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00918_has_unsufficient_type_check/query.sql b/parser/testdata/00918_has_unsufficient_type_check/query.sql new file mode 100644 index 000000000..4f5213a2d --- /dev/null +++ b/parser/testdata/00918_has_unsufficient_type_check/query.sql @@ -0,0 +1,3 @@ +SELECT hasAny([['Hello, world']], [[[]]]); -- { serverError NO_COMMON_TYPE } +SELECT hasAny([['Hello, world']], [['Hello', 'world'], ['Hello, world']]); +SELECT hasAll([['Hello, world']], [['Hello', 'world'], ['Hello, world']]); diff --git a/parser/testdata/00918_json_functions/ast.json b/parser/testdata/00918_json_functions/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00918_json_functions/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00918_json_functions/metadata.json b/parser/testdata/00918_json_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00918_json_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00918_json_functions/query.sql b/parser/testdata/00918_json_functions/query.sql new file mode 100644 index 000000000..3d30ce841 --- /dev/null +++ b/parser/testdata/00918_json_functions/query.sql @@ -0,0 +1,334 @@ +-- Tags: no-fasttest +-- Tag: no-fasttest due to only SIMD JSON is available in fasttest + +SELECT '--allow_simdjson=1--'; +SET allow_simdjson=1; + +SELECT '--JSONLength--'; +SELECT JSONLength('{"a": "hello", "b": [-100, 200.0, 300]}'); +SELECT JSONLength('{"a": "hello", "b": [-100, 200.0, 300]}', 'b'); +SELECT JSONLength('{}'); + +SELECT '--JSONHas--'; +SELECT JSONHas('{"a": "hello", "b": [-100, 200.0, 300]}', 'a'); +SELECT JSONHas('{"a": "hello", "b": [-100, 200.0, 300]}', 'b'); +SELECT JSONHas('{"a": "hello", "b": [-100, 200.0, 300]}', 'c'); + +SELECT '--isValidJSON--'; +SELECT isValidJSON('{"a": "hello", "b": [-100, 200.0, 300]}'); +SELECT isValidJSON('not a json'); +SELECT isValidJSON('"HX-='); + +SELECT '--JSONKey--'; +SELECT JSONKey('{"a": "hello", "b": [-100, 200.0, 300]}', 1); +SELECT JSONKey('{"a": "hello", "b": [-100, 200.0, 300]}', 2); +SELECT JSONKey('{"a": "hello", "b": [-100, 200.0, 300]}', -1); +SELECT JSONKey('{"a": "hello", "b": [-100, 200.0, 300]}', -2); + +SELECT '--JSONType--'; +SELECT JSONType('{"a": "hello", "b": [-100, 200.0, 300]}'); +SELECT JSONType('{"a": "hello", "b": [-100, 200.0, 300]}', 'b'); +SELECT JSONType('{"a": true}', 'a'); + +SELECT '--JSONExtract--'; +SELECT JSONExtractInt('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 1); +SELECT JSONExtractFloat('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 2); +SELECT JSONExtractUInt('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', -1); +SELECT JSONExtractBool('{"passed": true}', 'passed'); +SELECT JSONExtractBool('"HX-='); +SELECT JSONExtractBool('-1'); + +SELECT '--JSONExtractString--'; +SELECT JSONExtractString('{"a": "hello", "b": [-100, 200.0, 300]}', 'a'); +SELECT JSONExtractString('{"a": "hello", "b": [-100, 200.0, 300]}', 1); +select JSONExtractString('{"abc":"\\n\\u0000"}', 'abc'); +select JSONExtractString('{"abc":"\\u263a"}', 'abc'); +select JSONExtractString('{"abc":"\\u263"}', 'abc'); +select JSONExtractString('{"abc":"hello}', 'abc'); + +SELECT '--JSONExtract (generic)--'; +SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'Tuple(String, Array(Float64))'); +SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'Tuple(a String, b Array(Float64))'); +SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'Tuple(b Array(Float64), a String)'); +SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'Tuple(a FixedString(6), c UInt8)'); +SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'a', 'String'); +SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 'Array(Float32)'); +SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 'Tuple(Int8, Float32, UInt16)'); +SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 'Array(Int8)'); +SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 'Array(Nullable(Int8))'); +SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 'Array(LowCardinality(Nullable(Int8)))'); +SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 'Array(UInt8)'); +SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 'Array(Nullable(UInt8))'); +SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 'Array(LowCardinality(Nullable(UInt8)))'); +SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 1, 'Int8'); +SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 2, 'Int32'); +SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 4, 'Nullable(Int64)'); +SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 4, 'LowCardinality(Nullable(Int64))'); +SELECT JSONExtract('{"passed": true}', 'passed', 'UInt8'); +SELECT JSONExtract('{"day": "Thursday"}', 'day', 'Enum8(\'Sunday\' = 0, \'Monday\' = 1, \'Tuesday\' = 2, \'Wednesday\' = 3, \'Thursday\' = 4, \'Friday\' = 5, \'Saturday\' = 6)'); +SELECT JSONExtract('{"day": 5}', 'day', 'Enum8(\'Sunday\' = 0, \'Monday\' = 1, \'Tuesday\' = 2, \'Wednesday\' = 3, \'Thursday\' = 4, \'Friday\' = 5, \'Saturday\' = 6)'); +SELECT JSONExtract('{"a":3,"b":5,"c":7}', 'Tuple(a Int, b Int)'); +SELECT JSONExtract('{"a":3,"b":5,"c":7}', 'Tuple(c Int, a Int)'); +SELECT JSONExtract('{"a":3,"b":5,"c":7}', 'Tuple(b Int, d Int)'); +SELECT JSONExtract('{"a":3,"b":5,"c":7}', 'Tuple(Int, Int)'); +SELECT JSONExtract('{"a":3}', 'Tuple(Int, Int)'); +SELECT JSONExtract('[3,5,7]', 'Tuple(Int, Int)'); +SELECT JSONExtract('[3]', 'Tuple(Int, Int)'); +SELECT JSONExtract('{"a":123456, "b":3.55}', 'Tuple(a LowCardinality(Int32), b Decimal(5, 2))'); +SELECT JSONExtract('{"a":1, "b":"417ddc5d-e556-4d27-95dd-a34d84e46a50"}', 'Tuple(a Int8, b UUID)'); +SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'a', 'LowCardinality(String)'); +SELECT JSONExtract('{"a":3333.6333333333333333333333, "b":"test"}', 'Tuple(a Decimal(10,1), b LowCardinality(String))'); +SELECT JSONExtract('{"a":"3333.6333333333333333333333", "b":"test"}', 'Tuple(a Decimal(10,1), b LowCardinality(String))'); +SELECT JSONExtract('{"a":3333.6333333333333333333333, "b":"test"}', 'Tuple(a Decimal(20,10), b LowCardinality(String))'); +SELECT JSONExtract('{"a":"3333.6333333333333333333333", "b":"test"}', 'Tuple(a Decimal(20,10), b LowCardinality(String))'); +SELECT JSONExtract('{"a":123456.123456}', 'a', 'Decimal(20, 4)') as a, toTypeName(a); +SELECT JSONExtract('{"a":"123456.123456"}', 'a', 'Decimal(20, 4)') as a, toTypeName(a); +SELECT JSONExtract('{"a":"123456789012345.12"}', 'a', 'Decimal(30, 4)') as a, toTypeName(a); +SELECT JSONExtract('{"a":"1234567890.12345678901234567890", "b":"test"}', 'Tuple(a Decimal(35,20), b LowCardinality(String))') as a, toTypeName(a); +SELECT JSONExtract('{"a":"1234567890.123456789012345678901234567890", "b":"test"}', 'Tuple(a Decimal(45,30), b LowCardinality(String))') as a, toTypeName(a); +SELECT toDecimal64(123456789012345.12, 4), JSONExtract('{"a":123456789012345.12}', 'a', 'Decimal(30, 4)'); +SELECT toDecimal128(1234567890.12345678901234567890, 20), JSONExtract('{"a":1234567890.12345678901234567890, "b":"test"}', 'Tuple(a Decimal(35,20), b LowCardinality(String))'); +SELECT toDecimal256(1234567890.123456789012345678901234567890, 30), JSONExtract('{"a":1234567890.12345678901234567890, "b":"test"}', 'Tuple(a Decimal(45,30), b LowCardinality(String))'); +SELECT JSONExtract('{"a":-1234567890}', 'a', 'Int32') as a, toTypeName(a); +SELECT JSONExtract('{"a":1234567890}', 'a', 'UInt32') as a, toTypeName(a); +SELECT JSONExtract('{"a":-1234567890123456789}', 'a', 'Int64') as a, toTypeName(a); +SELECT JSONExtract('{"a":1234567890123456789}', 'a', 'UInt64') as a, toTypeName(a); +SELECT JSONExtract('{"a":-1234567890123456789}', 'a', 'Int128') as a, toTypeName(a); +SELECT JSONExtract('{"a":1234567890123456789}', 'a', 'UInt128') as a, toTypeName(a); +SELECT JSONExtract('{"a":-1234567890123456789}', 'a', 'Int256') as a, toTypeName(a); +SELECT JSONExtract('{"a":1234567890123456789}', 'a', 'UInt256') as a, toTypeName(a); +SELECT JSONExtract('{"a":-123456789.345}', 'a', 'Int32') as a, toTypeName(a); +SELECT JSONExtract('{"a":123456789.345}', 'a', 'UInt32') as a, toTypeName(a); +SELECT JSONExtract('{"a":-123456789012.345}', 'a', 'Int64') as a, toTypeName(a); +SELECT JSONExtract('{"a":123456789012.345}', 'a', 'UInt64') as a, toTypeName(a); +SELECT JSONExtract('{"a":-123456789012.345}', 'a', 'Int128') as a, toTypeName(a); +SELECT JSONExtract('{"a":123456789012.345}', 'a', 'UInt128') as a, toTypeName(a); +SELECT JSONExtract('{"a":-123456789012.345}', 'a', 'Int256') as a, toTypeName(a); +SELECT JSONExtract('{"a":123456789012.345}', 'a', 'UInt256') as a, toTypeName(a); +SELECT JSONExtract('{"a":"-123456789"}', 'a', 'Int32') as a, toTypeName(a); +SELECT JSONExtract('{"a":"123456789"}', 'a', 'UInt32') as a, toTypeName(a); +SELECT JSONExtract('{"a":"-1234567890123456789"}', 'a', 'Int64') as a, toTypeName(a); +SELECT JSONExtract('{"a":"1234567890123456789"}', 'a', 'UInt64') as a, toTypeName(a); +SELECT JSONExtract('{"a":"-12345678901234567890123456789012345678"}', 'a', 'Int128') as a, toTypeName(a); +SELECT JSONExtract('{"a":"12345678901234567890123456789012345678"}', 'a', 'UInt128') as a, toTypeName(a); +SELECT JSONExtract('{"a":"-11345678901234567890123456789012345678901234567890123456789012345678901234567"}', 'a', 'Int256') as a, toTypeName(a); +SELECT JSONExtract('{"a":"11345678901234567890123456789012345678901234567890123456789012345678901234567"}', 'a', 'UInt256') as a, toTypeName(a); +SELECT JSONExtract('{"a":"-1234567899999"}', 'a', 'Int32') as a, toTypeName(a); +SELECT JSONExtract('{"a":"1234567899999"}', 'a', 'UInt32') as a, toTypeName(a); +SELECT JSONExtract('{"a":"-1234567890123456789999"}', 'a', 'Int64') as a, toTypeName(a); +SELECT JSONExtract('{"a":"1234567890123456789999"}', 'a', 'UInt64') as a, toTypeName(a); +SELECT JSONExtract('{"a":0}', 'a', 'Bool') as a, toTypeName(a); +SELECT JSONExtract('{"a":1}', 'a', 'Bool') as a, toTypeName(a); + +SELECT JSONExtract('{"a": "-123456789012.345"}', 'a', 'Int64') as a, toTypeName(a); +SELECT JSONExtract('{"a": "123456789012.345"}', 'a', 'UInt64') as a, toTypeName(a); + +SELECT JSONExtract('{"a": "-2000.22"}', 'a', 'UInt64') as a, toTypeName(a); +SELECT JSONExtract('{"a": "-2000.22"}', 'a', 'Int8') as a, toTypeName(a); + +SELECT JSONExtract('{"a": "hello", "b": "world"}', 'Map(String, String)'); +SELECT JSONExtract('{"a": "hello", "b": "world"}', 'Map(LowCardinality(String), String)'); +SELECT JSONExtract('{"a": ["hello", 100.0], "b": ["world", 200]}', 'Map(String, Tuple(String, Float64))'); +SELECT JSONExtract('{"a": [100.0, 200], "b": [-100, 200.0, 300]}', 'Map(String, Array(Float64))'); +SELECT JSONExtract('{"a": {"c": "hello"}, "b": {"d": "world"}}', 'Map(String, Map(String, String))'); +SELECT JSONExtract('{"a": {"c": "hello"}, "b": {"d": "world"}}', 'a', 'Map(String, String)'); + +SELECT '--JSONExtractKeysAndValues--'; +SELECT JSONExtractKeysAndValues('{"a": "hello", "b": [-100, 200.0, 300]}', 'String'); +SELECT JSONExtractKeysAndValues('{"a": "hello", "b": [-100, 200.0, 300]}', 'Array(Float64)'); +SELECT JSONExtractKeysAndValues('{"a": "hello", "b": "world"}', 'String'); +SELECT JSONExtractKeysAndValues('{"x": {"a": 5, "b": 7, "c": 11}}', 'x', 'Int8'); +SELECT JSONExtractKeysAndValues('{"a": "hello", "b": "world"}', 'LowCardinality(String)'); + +SELECT '--JSONExtractRaw--'; +SELECT JSONExtractRaw('{"a": "hello", "b": [-100, 200.0, 300]}'); +SELECT JSONExtractRaw('{"a": "hello", "b": [-100, 200.0, 300]}', 'a'); +SELECT JSONExtractRaw('{"a": "hello", "b": [-100, 200.0, 300]}', 'b'); +SELECT JSONExtractRaw('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 1); +SELECT JSONExtractRaw('{"a": "hello", "b": [-100, 200.0, 300], "c":{"d":[121,144]}}'); +SELECT JSONExtractRaw('{"a": "hello", "b": [-100, 200.0, 300], "c":{"d":[121,144]}}', 'c'); +SELECT JSONExtractRaw('{"a": "hello", "b": [-100, 200.0, 300], "c":{"d":[121,144]}}', 'c', 'd'); +SELECT JSONExtractRaw('{"a": "hello", "b": [-100, 200.0, 300], "c":{"d":[121,144]}}', 'c', 'd', 2); +SELECT JSONExtractRaw('{"a": "hello", "b": [-100, 200.0, 300], "c":{"d":[121,144]}}', 'c', 'd', 3); +SELECT JSONExtractRaw('{"passed": true}'); +SELECT JSONExtractRaw('{}'); +SELECT JSONExtractRaw('{"abc":"\\n\\u0000"}', 'abc'); +SELECT JSONExtractRaw('{"abc":"\\u263a"}', 'abc'); + +SELECT '--JSONExtractArrayRaw--'; +SELECT JSONExtractArrayRaw(''); +SELECT JSONExtractArrayRaw('{"a": "hello", "b": "not_array"}'); +SELECT JSONExtractArrayRaw('[]'); +SELECT JSONExtractArrayRaw('[[],[]]'); +SELECT JSONExtractArrayRaw('{"a": "hello", "b": [-100, 200.0, 300]}', 'b'); +SELECT JSONExtractArrayRaw('[1,2,3,4,5,"hello"]'); +SELECT JSONExtractArrayRaw(arrayJoin(JSONExtractArrayRaw('[[1,2,3],[4,5,6]]'))); + +SELECT '--JSONExtractKeysAndValuesRaw--'; +SELECT JSONExtractKeysAndValuesRaw('{"a": "hello", "b": [-100, 200.0, 300]}', 'a'); +SELECT JSONExtractKeysAndValuesRaw('{"a": "hello", "b": [-100, 200.0, 300]}', 'b'); +SELECT JSONExtractKeysAndValuesRaw('{"a": "hello", "b": [-100, 200.0, 300]}'); +SELECT JSONExtractKeysAndValuesRaw('{"a": "hello", "b": [-100, 200.0, 300], "c":{"d":[121,144]}}'); +SELECT JSONExtractKeysAndValuesRaw('{"a": "hello", "b": [-100, 200.0, 300], "c":{"d":[121,144]}}', 'c'); + +SELECT '--const/non-const mixed--'; +SELECT JSONExtractString('["a", "b", "c", "d", "e"]', idx) FROM (SELECT arrayJoin([1,2,3,4,5]) AS idx); +SELECT JSONExtractString(json, 's') FROM (SELECT arrayJoin(['{"s":"u"}', '{"s":"v"}']) AS json); + +SELECT '--show error: type should be const string'; +SELECT JSONExtractKeysAndValues([], JSONLength('^?V{LSwp')); -- { serverError ILLEGAL_COLUMN } +WITH '{"i": 1, "f": 1.2}' AS json SELECT JSONExtract(json, 'i', JSONType(json, 'i')); -- { serverError ILLEGAL_COLUMN } + +SELECT '--show error: key of map type should be String'; +SELECT JSONExtract('{"a": [100.0, 200], "b": [-100, 200.0, 300]}', 'Map(Int64, Array(Float64))'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + + +SELECT '--allow_simdjson=0--'; +SET allow_simdjson=0; + +SELECT '--JSONLength--'; +SELECT JSONLength('{"a": "hello", "b": [-100, 200.0, 300]}'); +SELECT JSONLength('{"a": "hello", "b": [-100, 200.0, 300]}', 'b'); +SELECT JSONLength('{}'); + +SELECT '--JSONHas--'; +SELECT JSONHas('{"a": "hello", "b": [-100, 200.0, 300]}', 'a'); +SELECT JSONHas('{"a": "hello", "b": [-100, 200.0, 300]}', 'b'); +SELECT JSONHas('{"a": "hello", "b": [-100, 200.0, 300]}', 'c'); + +SELECT '--isValidJSON--'; +SELECT isValidJSON('{"a": "hello", "b": [-100, 200.0, 300]}'); +SELECT isValidJSON('not a json'); +SELECT isValidJSON('"HX-='); + +SELECT '--JSONKey--'; +SELECT JSONKey('{"a": "hello", "b": [-100, 200.0, 300]}', 1); +SELECT JSONKey('{"a": "hello", "b": [-100, 200.0, 300]}', 2); +SELECT JSONKey('{"a": "hello", "b": [-100, 200.0, 300]}', -1); +SELECT JSONKey('{"a": "hello", "b": [-100, 200.0, 300]}', -2); + +SELECT '--JSONType--'; +SELECT JSONType('{"a": "hello", "b": [-100, 200.0, 300]}'); +SELECT JSONType('{"a": "hello", "b": [-100, 200.0, 300]}', 'b'); +SELECT JSONType('{"a": true}', 'a'); + +SELECT '--JSONExtract--'; +SELECT JSONExtractInt('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 1); +SELECT JSONExtractFloat('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 2); +SELECT JSONExtractUInt('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', -1); +SELECT JSONExtractBool('{"passed": true}', 'passed'); +SELECT JSONExtractBool('"HX-='); +SELECT JSONExtractBool('-1'); + +SELECT JSONExtract('{"a": "-123456789012.345"}', 'a', 'Int64') as a, toTypeName(a); +SELECT JSONExtract('{"a": "123456789012.345"}', 'a', 'UInt64') as a, toTypeName(a); + +SELECT JSONExtract('{"a": "-2000.22"}', 'a', 'UInt64') as a, toTypeName(a); +SELECT JSONExtract('{"a": "-2000.22"}', 'a', 'Int8') as a, toTypeName(a); + +SELECT '--JSONExtractString--'; +SELECT JSONExtractString('{"a": "hello", "b": [-100, 200.0, 300]}', 'a'); +SELECT JSONExtractString('{"a": "hello", "b": [-100, 200.0, 300]}', 1); +select JSONExtractString('{"abc":"\\n\\u0000"}', 'abc'); +select JSONExtractString('{"abc":"\\u263a"}', 'abc'); +select JSONExtractString('{"abc":"\\u263"}', 'abc'); +select JSONExtractString('{"abc":"hello}', 'abc'); + +SELECT '--JSONExtract (generic)--'; +SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'Tuple(String, Array(Float64))'); +SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'Tuple(a String, b Array(Float64))'); +SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'Tuple(b Array(Float64), a String)'); +SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'Tuple(a FixedString(6), c UInt8)'); +SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'a', 'String'); +SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 'Array(Float32)'); +SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 'Tuple(Int8, Float32, UInt16)'); +SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 'Array(Int8)'); +SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 'Array(Nullable(Int8))'); +SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 'Array(LowCardinality(Nullable(Int8)))'); +SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 'Array(UInt8)'); +SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 'Array(Nullable(UInt8))'); +SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 'Array(LowCardinality(Nullable(UInt8)))'); +SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 1, 'Int8'); +SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 2, 'Int32'); +SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 4, 'Nullable(Int64)'); +SELECT JSONExtract('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 4, 'LowCardinality(Nullable(Int64))'); +SELECT JSONExtract('{"passed": true}', 'passed', 'UInt8'); +SELECT JSONExtract('{"day": "Thursday"}', 'day', 'Enum8(\'Sunday\' = 0, \'Monday\' = 1, \'Tuesday\' = 2, \'Wednesday\' = 3, \'Thursday\' = 4, \'Friday\' = 5, \'Saturday\' = 6)'); +SELECT JSONExtract('{"day": 5}', 'day', 'Enum8(\'Sunday\' = 0, \'Monday\' = 1, \'Tuesday\' = 2, \'Wednesday\' = 3, \'Thursday\' = 4, \'Friday\' = 5, \'Saturday\' = 6)'); +SELECT JSONExtract('{"a":3,"b":5,"c":7}', 'Tuple(a Int, b Int)'); +SELECT JSONExtract('{"a":3,"b":5,"c":7}', 'Tuple(c Int, a Int)'); +SELECT JSONExtract('{"a":3,"b":5,"c":7}', 'Tuple(b Int, d Int)'); +SELECT JSONExtract('{"a":3,"b":5,"c":7}', 'Tuple(Int, Int)'); +SELECT JSONExtract('{"a":3}', 'Tuple(Int, Int)'); +SELECT JSONExtract('[3,5,7]', 'Tuple(Int, Int)'); +SELECT JSONExtract('[3]', 'Tuple(Int, Int)'); + +SELECT JSONExtract('{"a": "hello", "b": "world"}', 'Map(String, String)'); +SELECT JSONExtract('{"a": "hello", "b": "world"}', 'Map(LowCardinality(String), String)'); +SELECT JSONExtract('{"a": ["hello", 100.0], "b": ["world", 200]}', 'Map(String, Tuple(String, Float64))'); +SELECT JSONExtract('{"a": [100.0, 200], "b": [-100, 200.0, 300]}', 'Map(String, Array(Float64))'); +SELECT JSONExtract('{"a": {"c": "hello"}, "b": {"d": "world"}}', 'Map(String, Map(String, String))'); +SELECT JSONExtract('{"a": {"c": "hello"}, "b": {"d": "world"}}', 'a', 'Map(String, String)'); + +SELECT '--JSONExtractKeysAndValues--'; +SELECT JSONExtractKeysAndValues('{"a": "hello", "b": [-100, 200.0, 300]}', 'String'); +SELECT JSONExtractKeysAndValues('{"a": "hello", "b": [-100, 200.0, 300]}', 'Array(Float64)'); +SELECT JSONExtractKeysAndValues('{"a": "hello", "b": "world"}', 'String'); +SELECT JSONExtractKeysAndValues('{"x": {"a": 5, "b": 7, "c": 11}}', 'x', 'Int8'); + +SELECT '--JSONExtractRaw--'; +SELECT JSONExtractRaw('{"a": "hello", "b": [-100, 200.0, 300]}'); +SELECT JSONExtractRaw('{"a": "hello", "b": [-100, 200.0, 300]}', 'a'); +SELECT JSONExtractRaw('{"a": "hello", "b": [-100, 200.0, 300]}', 'b'); +SELECT JSONExtractRaw('{"a": "hello", "b": [-100, 200.0, 300]}', 'b', 1); +SELECT JSONExtractRaw('{"a": "hello", "b": [-100, 200.0, 300], "c":{"d":[121,144]}}'); +SELECT JSONExtractRaw('{"a": "hello", "b": [-100, 200.0, 300], "c":{"d":[121,144]}}', 'c'); +SELECT JSONExtractRaw('{"a": "hello", "b": [-100, 200.0, 300], "c":{"d":[121,144]}}', 'c', 'd'); +SELECT JSONExtractRaw('{"a": "hello", "b": [-100, 200.0, 300], "c":{"d":[121,144]}}', 'c', 'd', 2); +SELECT JSONExtractRaw('{"a": "hello", "b": [-100, 200.0, 300], "c":{"d":[121,144]}}', 'c', 'd', 3); +SELECT JSONExtractRaw('{"passed": true}'); +SELECT JSONExtractRaw('{}'); +SELECT JSONExtractRaw('{"abc":"\\n\\u0000"}', 'abc'); +SELECT JSONExtractRaw('{"abc":"\\u263a"}', 'abc'); + +SELECT '--JSONExtractArrayRaw--'; +SELECT JSONExtractArrayRaw(''); +SELECT JSONExtractArrayRaw('{"a": "hello", "b": "not_array"}'); +SELECT JSONExtractArrayRaw('[]'); +SELECT JSONExtractArrayRaw('[[],[]]'); +SELECT JSONExtractArrayRaw('{"a": "hello", "b": [-100, 200.0, 300]}', 'b'); +SELECT JSONExtractArrayRaw('[1,2,3,4,5,"hello"]'); +SELECT JSONExtractArrayRaw(arrayJoin(JSONExtractArrayRaw('[[1,2,3],[4,5,6]]'))); + +SELECT '--JSONExtractKeysAndValuesRaw--'; +SELECT JSONExtractKeysAndValuesRaw('{"a": "hello", "b": [-100, 200.0, 300]}', 'a'); +SELECT JSONExtractKeysAndValuesRaw('{"a": "hello", "b": [-100, 200.0, 300]}', 'b'); +SELECT JSONExtractKeysAndValuesRaw('{"a": "hello", "b": [-100, 200.0, 300]}'); +SELECT JSONExtractKeysAndValuesRaw('{"a": "hello", "b": [-100, 200.0, 300], "c":{"d":[121,144]}}'); +SELECT JSONExtractKeysAndValuesRaw('{"a": "hello", "b": [-100, 200.0, 300], "c":{"d":[121,144]}}', 'c'); + +SELECT '--JSONExtractKeys--'; +SELECT JSONExtractKeys('{"a": "hello", "b": [-100, 200.0, 300]}'); +SELECT JSONExtractKeys('{"a": "hello", "b": [-100, 200.0, 300]}', 'b'); +SELECT JSONExtractKeys('{"a": "hello", "b": [-100, 200.0, 300]}', 'a'); +SELECT JSONExtractKeys('{"a": "hello", "b": [-100, 200.0, 300], "c":{"d":[121,144]}}', 'c'); + +SELECT '--const/non-const mixed--'; +SELECT JSONExtractString('["a", "b", "c", "d", "e"]', idx) FROM (SELECT arrayJoin([1,2,3,4,5]) AS idx); +SELECT JSONExtractString(json, 's') FROM (SELECT arrayJoin(['{"s":"u"}', '{"s":"v"}']) AS json); + +SELECT '--show error: type should be const string'; +SELECT JSONExtractKeysAndValues([], JSONLength('^?V{LSwp')); -- { serverError ILLEGAL_COLUMN } +WITH '{"i": 1, "f": 1.2}' AS json SELECT JSONExtract(json, 'i', JSONType(json, 'i')); -- { serverError ILLEGAL_COLUMN } + +SELECT '--show error: index type should be integer'; +SELECT JSONExtract('[]', JSONExtract('0', 'UInt256'), 'UInt256'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT '--show error: key of map type should be String'; +SELECT JSONExtract('{"a": [100.0, 200], "b": [-100, 200.0, 300]}', 'Map(Int64, Array(Float64))'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT JSONExtract(materialize(toLowCardinality('{"string_value":null}')), materialize('string_value'), 'LowCardinality(Nullable(String))'); +SELECT JSONExtract(materialize('{"string_value":null}'), materialize('string_value'), 'LowCardinality(Nullable(String))'); +SELECT JSONExtract(materialize('{"string_value":"Hello"}'), materialize('string_value'), 'LowCardinality(Nullable(String))') AS x; +SELECT JSONExtract(materialize(toLowCardinality('{"string_value":"Hello"}')), materialize('string_value'), 'LowCardinality(Nullable(String))') AS x; +SELECT JSONExtract(materialize('{"string_value":"Hello"}'), materialize(toLowCardinality('string_value')), 'LowCardinality(Nullable(String))') AS x; +SELECT JSONExtract(materialize(toLowCardinality('{"string_value":"Hello"}')), materialize(toLowCardinality('string_value')), 'LowCardinality(Nullable(String))') AS x; diff --git a/parser/testdata/00919_histogram_merge/ast.json b/parser/testdata/00919_histogram_merge/ast.json new file mode 100644 index 000000000..d3be91a98 --- /dev/null +++ b/parser/testdata/00919_histogram_merge/ast.json @@ -0,0 +1,211 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayJoin (alias hist) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function finalizeAggregation (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function plus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function histogramState (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Literal UInt64_190" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function histogramState (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal UInt64_100" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function round (alias l) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function tupleElement (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier hist" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function round (alias r) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function tupleElement (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier hist" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function round (alias cnt) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function tupleElement (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier hist" + }, + { + "explain": " Literal UInt64_3" + } + ], + + "rows": 63, + + "statistics": + { + "elapsed": 0.001349774, + "rows_read": 63, + "bytes_read": 2873 + } +} diff --git a/parser/testdata/00919_histogram_merge/metadata.json b/parser/testdata/00919_histogram_merge/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00919_histogram_merge/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00919_histogram_merge/query.sql b/parser/testdata/00919_histogram_merge/query.sql new file mode 100644 index 000000000..5dec43740 --- /dev/null +++ b/parser/testdata/00919_histogram_merge/query.sql @@ -0,0 +1,2 @@ +WITH arrayJoin(finalizeAggregation((SELECT histogramState(3)(number) FROM numbers(10, 190)) + (SELECT histogramState(3)(number) FROM numbers(0, 100)))) AS hist SELECT round(hist.1) AS l, round(hist.2) AS r, round(hist.3) AS cnt; +WITH arrayJoin(finalizeAggregation((SELECT histogramState(3)(number) FROM numbers(0, 100)) + (SELECT histogramState(3)(number) FROM numbers(10, 190)))) AS hist SELECT round(hist.1) AS l, round(hist.2) AS r, round(hist.3) AS cnt; diff --git a/parser/testdata/00919_sum_aggregate_states_constants/ast.json b/parser/testdata/00919_sum_aggregate_states_constants/ast.json new file mode 100644 index 000000000..dc3cf2c10 --- /dev/null +++ b/parser/testdata/00919_sum_aggregate_states_constants/ast.json @@ -0,0 +1,130 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function finalizeAggregation (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function plus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function sumState (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function sumState (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 36, + + "statistics": + { + "elapsed": 0.001752637, + "rows_read": 36, + "bytes_read": 1662 + } +} diff --git a/parser/testdata/00919_sum_aggregate_states_constants/metadata.json b/parser/testdata/00919_sum_aggregate_states_constants/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00919_sum_aggregate_states_constants/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00919_sum_aggregate_states_constants/query.sql b/parser/testdata/00919_sum_aggregate_states_constants/query.sql new file mode 100644 index 000000000..a0962e99f --- /dev/null +++ b/parser/testdata/00919_sum_aggregate_states_constants/query.sql @@ -0,0 +1,6 @@ +SELECT finalizeAggregation((SELECT sumState(number) FROM numbers(10)) + (SELECT sumState(number) FROM numbers(10))); +SELECT finalizeAggregation((SELECT sumState(number) FROM numbers(10)) + materialize((SELECT sumState(number) FROM numbers(10)))); +SELECT finalizeAggregation(materialize((SELECT sumState(number) FROM numbers(10))) + (SELECT sumState(number) FROM numbers(10))); +SELECT finalizeAggregation(materialize((SELECT sumState(number) FROM numbers(10))) + materialize((SELECT sumState(number) FROM numbers(10)))); +SELECT finalizeAggregation(materialize((SELECT sumState(number) FROM numbers(10)) + (SELECT sumState(number) FROM numbers(10)))); +SELECT materialize(finalizeAggregation((SELECT sumState(number) FROM numbers(10)) + (SELECT sumState(number) FROM numbers(10)))); diff --git a/parser/testdata/00920_multiply_aggregate_states_constants/ast.json b/parser/testdata/00920_multiply_aggregate_states_constants/ast.json new file mode 100644 index 000000000..566b7bd40 --- /dev/null +++ b/parser/testdata/00920_multiply_aggregate_states_constants/ast.json @@ -0,0 +1,91 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function finalizeAggregation (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function multiply (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function sumState (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 23, + + "statistics": + { + "elapsed": 0.001328661, + "rows_read": 23, + "bytes_read": 1030 + } +} diff --git a/parser/testdata/00920_multiply_aggregate_states_constants/metadata.json b/parser/testdata/00920_multiply_aggregate_states_constants/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00920_multiply_aggregate_states_constants/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00920_multiply_aggregate_states_constants/query.sql b/parser/testdata/00920_multiply_aggregate_states_constants/query.sql new file mode 100644 index 000000000..e88b5d520 --- /dev/null +++ b/parser/testdata/00920_multiply_aggregate_states_constants/query.sql @@ -0,0 +1,4 @@ +SELECT finalizeAggregation((SELECT sumState(number) FROM numbers(10)) * 10); +SELECT finalizeAggregation(materialize((SELECT sumState(number) FROM numbers(10))) * 10); +SELECT finalizeAggregation(materialize((SELECT sumState(number) FROM numbers(10)) * 10)); +SELECT materialize(finalizeAggregation((SELECT sumState(number) FROM numbers(10)) * 10)); diff --git a/parser/testdata/00921_datetime64_basic/ast.json b/parser/testdata/00921_datetime64_basic/ast.json new file mode 100644 index 000000000..bea1adbb1 --- /dev/null +++ b/parser/testdata/00921_datetime64_basic/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery A (children 1)" + }, + { + "explain": " Identifier A" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001383662, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/00921_datetime64_basic/metadata.json b/parser/testdata/00921_datetime64_basic/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00921_datetime64_basic/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00921_datetime64_basic/query.sql b/parser/testdata/00921_datetime64_basic/query.sql new file mode 100644 index 000000000..e554c8a01 --- /dev/null +++ b/parser/testdata/00921_datetime64_basic/query.sql @@ -0,0 +1,31 @@ +DROP TABLE IF EXISTS A; + +SELECT CAST(1 as DateTime64('abc')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } # Invalid scale parameter type +SELECT CAST(1 as DateTime64(100)); -- { serverError ARGUMENT_OUT_OF_BOUND } # too big scale +SELECT CAST(1 as DateTime64(-1)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } # signed scale parameter type +SELECT CAST(1 as DateTime64(3, 'qqq')); -- { serverError BAD_ARGUMENTS } # invalid timezone + +SELECT toDateTime64('2019-09-16 19:20:11.234', 'abc'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } # invalid scale +SELECT toDateTime64('2019-09-16 19:20:11.234', 100); -- { serverError ARGUMENT_OUT_OF_BOUND } # too big scale +SELECT toDateTime64(CAST([['CLb5Ph ']], 'String'), uniqHLL12('2Gs1V', 752)); -- { serverError ILLEGAL_COLUMN } # non-const string and non-const scale +SELECT toDateTime64('2019-09-16 19:20:11.234', 3, 'qqq'); -- { serverError BAD_ARGUMENTS } # invalid timezone + +SELECT ignore(now64(gccMurmurHash())); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } # Illegal argument type +SELECT ignore(now64('abcd')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } # Illegal argument type +SELECT ignore(now64(number)) FROM system.numbers LIMIT 10; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } # Illegal argument type +SELECT ignore(now64(3, 'invalid timezone')); -- { serverError BAD_ARGUMENTS } +SELECT ignore(now64(3, 1111)); -- { serverError ILLEGAL_COLUMN } # invalid timezone parameter type + +WITH 'UTC' as timezone SELECT timezone, timeZoneOf(now64(3, timezone)) == timezone; +WITH 'Europe/Minsk' as timezone SELECT timezone, timeZoneOf(now64(3, timezone)) == timezone; + +SELECT toDateTime64('2019-09-16 19:20:11', 3, 'UTC'); -- this now works OK and produces timestamp with no subsecond part + +CREATE TABLE A(t DateTime64(3, 'UTC')) ENGINE = MergeTree() ORDER BY t; +INSERT INTO A(t) VALUES ('2019-05-03 11:25:25.123456789'); + +SELECT toString(t, 'UTC'), toDate(t), toStartOfDay(t), toStartOfQuarter(t), toTimeWithFixedDate(t), toStartOfMinute(t) FROM A ORDER BY t; + +SELECT toDateTime64('2019-09-16 19:20:11.234', 3, 'Europe/Minsk'); + +DROP TABLE A; diff --git a/parser/testdata/00926_adaptive_index_granularity_collapsing_merge_tree/ast.json b/parser/testdata/00926_adaptive_index_granularity_collapsing_merge_tree/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00926_adaptive_index_granularity_collapsing_merge_tree/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00926_adaptive_index_granularity_collapsing_merge_tree/metadata.json b/parser/testdata/00926_adaptive_index_granularity_collapsing_merge_tree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00926_adaptive_index_granularity_collapsing_merge_tree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00926_adaptive_index_granularity_collapsing_merge_tree/query.sql b/parser/testdata/00926_adaptive_index_granularity_collapsing_merge_tree/query.sql new file mode 100644 index 000000000..d4560109a --- /dev/null +++ b/parser/testdata/00926_adaptive_index_granularity_collapsing_merge_tree/query.sql @@ -0,0 +1,81 @@ +-- Tags: no-random-merge-tree-settings + +----- Group of very similar simple tests ------ +DROP TABLE IF EXISTS zero_rows_per_granule; + +CREATE TABLE zero_rows_per_granule ( + p Date, + k UInt64, + v1 UInt64, + v2 Int64, + Sign Int8 +) ENGINE CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(p) ORDER BY k + SETTINGS index_granularity_bytes=20, min_index_granularity_bytes=10, write_final_mark = 0, + min_bytes_for_wide_part = 0, + min_rows_for_wide_part = 0, + enable_vertical_merge_algorithm=1, + vertical_merge_algorithm_min_rows_to_activate=0, + vertical_merge_algorithm_min_columns_to_activate=0, + min_bytes_for_full_part_storage = 0; + +INSERT INTO zero_rows_per_granule (p, k, v1, v2, Sign) VALUES ('2018-05-15', 1, 1000, 2000, 1), ('2018-05-16', 2, 3000, 4000, 1), ('2018-05-17', 3, 5000, 6000, 1), ('2018-05-18', 4, 7000, 8000, 1); + +SELECT COUNT(*) FROM zero_rows_per_granule; + +SELECT distinct(marks) from system.parts WHERE table = 'zero_rows_per_granule' and database=currentDatabase() and active=1; + +INSERT INTO zero_rows_per_granule (p, k, v1, v2, Sign) VALUES ('2018-05-15', 5, 1000, 2000, 1), ('2018-05-16', 6, 3000, 4000, 1), ('2018-05-17', 7, 5000, 6000, 1), ('2018-05-19', 8, 7000, 8000, 1); + +OPTIMIZE TABLE zero_rows_per_granule FINAL; + +SELECT COUNT(*) FROM zero_rows_per_granule FINAL; + +SELECT sum(marks) from system.parts WHERE table = 'zero_rows_per_granule' and database=currentDatabase() and active=1; + +DROP TABLE IF EXISTS zero_rows_per_granule; + +SELECT '-----'; +DROP TABLE IF EXISTS four_rows_per_granule; + +CREATE TABLE four_rows_per_granule ( + p Date, + k UInt64, + v1 UInt64, + v2 Int64, + Sign Int8 +) ENGINE CollapsingMergeTree(Sign) PARTITION BY toYYYYMM(p) ORDER BY k + SETTINGS index_granularity_bytes=110, min_index_granularity_bytes=100, write_final_mark = 0, + min_bytes_for_wide_part = 0, + min_rows_for_wide_part = 0, + enable_vertical_merge_algorithm=1, + vertical_merge_algorithm_min_rows_to_activate=0, + vertical_merge_algorithm_min_columns_to_activate=0, + min_bytes_for_full_part_storage = 0; + +INSERT INTO four_rows_per_granule (p, k, v1, v2, Sign) VALUES ('2018-05-15', 1, 1000, 2000, 1), ('2018-05-16', 2, 3000, 4000, 1), ('2018-05-17', 3, 5000, 6000, 1), ('2018-05-18', 4, 7000, 8000, 1); + +SELECT COUNT(*) FROM four_rows_per_granule; + +SELECT distinct(marks) from system.parts WHERE table = 'four_rows_per_granule' and database=currentDatabase() and active=1; + +INSERT INTO four_rows_per_granule (p, k, v1, v2, Sign) VALUES ('2018-05-15', 1, 1000, 2000, -1), ('2018-05-16', 2, 3000, 4000, -1), ('2018-05-17', 3, 5000, 6000, -1), ('2018-05-18', 4, 7000, 8000, -1); + +OPTIMIZE TABLE four_rows_per_granule FINAL; + +SELECT COUNT(*) FROM four_rows_per_granule; + +SELECT sum(marks) from system.parts WHERE table = 'four_rows_per_granule' and database=currentDatabase() and active=1; + +INSERT INTO four_rows_per_granule (p, k, v1, v2, Sign) VALUES ('2018-05-15', 1, 1000, 2000, 1), ('2018-05-16', 2, 3000, 4000, 1), ('2018-05-17', 3, 5000, 6000, 1), ('2018-05-18', 4, 7000, 8000, 1); + +INSERT INTO four_rows_per_granule (p, k, v1, v2, Sign) VALUES ('2018-05-15', 5, 1000, 2000, 1), ('2018-05-16', 6, 3000, 4000, 1), ('2018-05-17', 7, 5000, 6000, 1), ('2018-05-18', 8, 7000, 8000, 1); + +INSERT INTO four_rows_per_granule (p, k, v1, v2, Sign) VALUES ('2018-05-15', 5, 1000, 2000, -1), ('2018-05-17', 7, 5000, 6000, -1); + +OPTIMIZE TABLE four_rows_per_granule FINAL; + +SELECT COUNT(*) FROM four_rows_per_granule; + +SELECT distinct(marks) from system.parts WHERE table = 'four_rows_per_granule' and database=currentDatabase() and active=1; + +DROP TABLE IF EXISTS four_rows_per_granule; diff --git a/parser/testdata/00926_adaptive_index_granularity_merge_tree/ast.json b/parser/testdata/00926_adaptive_index_granularity_merge_tree/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00926_adaptive_index_granularity_merge_tree/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00926_adaptive_index_granularity_merge_tree/metadata.json b/parser/testdata/00926_adaptive_index_granularity_merge_tree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00926_adaptive_index_granularity_merge_tree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00926_adaptive_index_granularity_merge_tree/query.sql b/parser/testdata/00926_adaptive_index_granularity_merge_tree/query.sql new file mode 100644 index 000000000..5a687a454 --- /dev/null +++ b/parser/testdata/00926_adaptive_index_granularity_merge_tree/query.sql @@ -0,0 +1,383 @@ +-- Tags: no-random-merge-tree-settings + +----- Group of very similar simple tests ------ +select '----HORIZONTAL MERGE TESTS----'; +DROP TABLE IF EXISTS zero_rows_per_granule; + +CREATE TABLE zero_rows_per_granule ( + p Date, + k UInt64, + v1 UInt64, + v2 Int64 +) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 20, min_index_granularity_bytes = 10, write_final_mark = 0, min_bytes_for_wide_part = 0, min_bytes_for_full_part_storage = 0; + +INSERT INTO zero_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); + +SELECT COUNT(*) FROM zero_rows_per_granule; + +SELECT distinct(marks) from system.parts WHERE table = 'zero_rows_per_granule' and database=currentDatabase() and active=1; + +INSERT INTO zero_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 5, 1000, 2000), ('2018-05-16', 6, 3000, 4000), ('2018-05-17', 7, 5000, 6000), ('2018-05-19', 8, 7000, 8000); + +OPTIMIZE TABLE zero_rows_per_granule FINAL; + +SELECT COUNT(*) FROM zero_rows_per_granule; + +SELECT distinct(marks) from system.parts WHERE table = 'zero_rows_per_granule' and database=currentDatabase() and active=1; + +DROP TABLE IF EXISTS zero_rows_per_granule; + +SELECT '-----'; + +DROP TABLE IF EXISTS two_rows_per_granule; + +CREATE TABLE two_rows_per_granule ( + p Date, + k UInt64, + v1 UInt64, + v2 Int64 +) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 40, min_index_granularity_bytes = 10, write_final_mark = 0, min_bytes_for_wide_part = 0, min_bytes_for_full_part_storage = 0; + +INSERT INTO two_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); + +SELECT COUNT(*) FROM two_rows_per_granule; + +SELECT distinct(marks) from system.parts WHERE table = 'two_rows_per_granule' and database=currentDatabase() and active=1; + +INSERT INTO two_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 5, 1000, 2000), ('2018-05-16', 6, 3000, 4000), ('2018-05-17', 7, 5000, 6000), ('2018-05-19', 8, 7000, 8000); + +OPTIMIZE TABLE two_rows_per_granule FINAL; + +SELECT COUNT(*) FROM two_rows_per_granule; + +SELECT distinct(marks) from system.parts WHERE table = 'two_rows_per_granule' and database=currentDatabase() and active=1; + +DROP TABLE IF EXISTS two_rows_per_granule; + +SELECT '-----'; + +DROP TABLE IF EXISTS four_rows_per_granule; + +CREATE TABLE four_rows_per_granule ( + p Date, + k UInt64, + v1 UInt64, + v2 Int64 +) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 110, min_index_granularity_bytes = 10, write_final_mark = 0, min_bytes_for_wide_part = 0, min_rows_for_wide_part = 0; + +INSERT INTO four_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); + +SELECT COUNT(*) FROM four_rows_per_granule; + +SELECT distinct(marks) from system.parts WHERE table = 'four_rows_per_granule' and database=currentDatabase() and active=1; +DETACH TABLE four_rows_per_granule; +ATTACH TABLE four_rows_per_granule; + +INSERT INTO four_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 5, 1000, 2000), ('2018-05-16', 6, 3000, 4000), ('2018-05-17', 7, 5000, 6000), ('2018-05-19', 8, 7000, 8000); + +OPTIMIZE TABLE four_rows_per_granule FINAL; + +DETACH TABLE four_rows_per_granule; + +ATTACH TABLE four_rows_per_granule; + +SELECT COUNT(*) FROM four_rows_per_granule; + +SELECT distinct(marks) from system.parts WHERE table = 'four_rows_per_granule' and database=currentDatabase() and active=1; + +DROP TABLE IF EXISTS four_rows_per_granule; + +----- More interesting tests ------ +SELECT '-----'; + +DROP TABLE IF EXISTS huge_granularity_small_blocks; + +CREATE TABLE huge_granularity_small_blocks ( + p Date, + k UInt64, + v1 UInt64, + v2 Int64 +) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 1000000, write_final_mark = 0, min_bytes_for_wide_part = 0, min_bytes_for_full_part_storage = 0; + +INSERT INTO huge_granularity_small_blocks (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); + +SELECT COUNT(*) FROM huge_granularity_small_blocks; + +SELECT distinct(marks) from system.parts WHERE table = 'huge_granularity_small_blocks' and database=currentDatabase() and active=1; + +INSERT INTO huge_granularity_small_blocks (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 5, 3000, 4000), ('2018-05-17', 6, 5000, 6000), ('2018-05-19', 7, 7000, 8000); + +DETACH TABLE huge_granularity_small_blocks; + +ATTACH TABLE huge_granularity_small_blocks; + +OPTIMIZE TABLE huge_granularity_small_blocks FINAL; + +SELECT COUNT(*) FROM huge_granularity_small_blocks; + +SELECT distinct(marks) from system.parts WHERE table = 'huge_granularity_small_blocks' and database=currentDatabase() and active=1; + +DROP TABLE IF EXISTS huge_granularity_small_blocks; + +SELECT '-----'; + +DROP TABLE IF EXISTS adaptive_granularity_alter; + +CREATE TABLE adaptive_granularity_alter ( + p Date, + k UInt64, + v1 UInt64, + v2 Int64 +) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 110, min_index_granularity_bytes = 100, write_final_mark = 0, min_bytes_for_wide_part = 0, min_bytes_for_full_part_storage = 0; + +INSERT INTO adaptive_granularity_alter (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); + +SELECT COUNT(*) FROM adaptive_granularity_alter; + +SELECT distinct(marks) from system.parts WHERE table = 'adaptive_granularity_alter' and database=currentDatabase() and active=1; + +OPTIMIZE TABLE adaptive_granularity_alter FINAL; + +ALTER TABLE adaptive_granularity_alter MODIFY COLUMN v1 Int16; + +DETACH TABLE adaptive_granularity_alter; + +ATTACH TABLE adaptive_granularity_alter; + +SELECT COUNT(*) FROM adaptive_granularity_alter; + +SELECT distinct(marks) from system.parts WHERE table = 'adaptive_granularity_alter' and database=currentDatabase() and active=1; + +INSERT INTO adaptive_granularity_alter (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 5, 3000, 4000), ('2018-05-17', 6, 5000, 6000), ('2018-05-19', 42, 42, 42); + +SELECT COUNT(*) FROM adaptive_granularity_alter; + +SELECT distinct(marks) from system.parts WHERE table = 'adaptive_granularity_alter' and database=currentDatabase() and active=1; + +ALTER TABLE adaptive_granularity_alter MODIFY COLUMN v2 String; + +DETACH TABLE adaptive_granularity_alter; + +ATTACH TABLE adaptive_granularity_alter; + +INSERT INTO adaptive_granularity_alter (p, k, v1, v2) VALUES ('2018-05-15', 100, 1000, 'aaaa'), ('2018-05-16', 101, 3000, 'bbbb'), ('2018-05-17', 102, 5000, 'cccc'), ('2018-05-19', 103, 7000, 'dddd'); + +OPTIMIZE TABLE adaptive_granularity_alter FINAL; + +SELECT k, v2 FROM adaptive_granularity_alter WHERE k >= 100 OR k = 42 ORDER BY k; + +SELECT sum(marks) from system.parts WHERE table = 'adaptive_granularity_alter' and database=currentDatabase() and active=1; + +DROP TABLE IF EXISTS adaptive_granularity_alter; + +----------------------------------------------- +-------------VERTICAL MERGE TESTS-------------- +----------------------------------------------- +select '----VERTICAL MERGE TESTS----'; +DROP TABLE IF EXISTS zero_rows_per_granule; + +CREATE TABLE zero_rows_per_granule ( + p Date, + k UInt64, + v1 UInt64, + v2 Int64 +) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k + SETTINGS index_granularity_bytes = 20, + min_index_granularity_bytes = 10, + write_final_mark = 0, + enable_vertical_merge_algorithm=1, + vertical_merge_algorithm_min_rows_to_activate=0, + vertical_merge_algorithm_min_columns_to_activate=0, + min_bytes_for_wide_part = 0, + min_bytes_for_full_part_storage = 0; + + +INSERT INTO zero_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); + +SELECT COUNT(*) FROM zero_rows_per_granule; + +SELECT distinct(marks) from system.parts WHERE table = 'zero_rows_per_granule' and database=currentDatabase() and active=1; + +INSERT INTO zero_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 5, 1000, 2000), ('2018-05-16', 6, 3000, 4000), ('2018-05-17', 7, 5000, 6000), ('2018-05-19', 8, 7000, 8000); + +OPTIMIZE TABLE zero_rows_per_granule FINAL; + +SELECT COUNT(*) FROM zero_rows_per_granule; + +SELECT distinct(marks) from system.parts WHERE table = 'zero_rows_per_granule' and database=currentDatabase() and active=1; + +DROP TABLE IF EXISTS zero_rows_per_granule; + +SELECT '-----'; + +DROP TABLE IF EXISTS two_rows_per_granule; + +CREATE TABLE two_rows_per_granule ( + p Date, + k UInt64, + v1 UInt64, + v2 Int64 +) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k + SETTINGS index_granularity_bytes=40, + min_index_granularity_bytes = 10, + write_final_mark = 0, + enable_vertical_merge_algorithm=1, + vertical_merge_algorithm_min_rows_to_activate=0, + vertical_merge_algorithm_min_columns_to_activate=0, + min_bytes_for_wide_part = 0, + min_bytes_for_full_part_storage = 0; + +INSERT INTO two_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); + +SELECT COUNT(*) FROM two_rows_per_granule; + +SELECT distinct(marks) from system.parts WHERE table = 'two_rows_per_granule' and database=currentDatabase() and active=1; + +INSERT INTO two_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 5, 1000, 2000), ('2018-05-16', 6, 3000, 4000), ('2018-05-17', 7, 5000, 6000), ('2018-05-19', 8, 7000, 8000); + +OPTIMIZE TABLE two_rows_per_granule FINAL; + +SELECT COUNT(*) FROM two_rows_per_granule; + +SELECT distinct(marks) from system.parts WHERE table = 'two_rows_per_granule' and database=currentDatabase() and active=1; + +DROP TABLE IF EXISTS two_rows_per_granule; + +SELECT '-----'; + +DROP TABLE IF EXISTS four_rows_per_granule; + +CREATE TABLE four_rows_per_granule ( + p Date, + k UInt64, + v1 UInt64, + v2 Int64 +) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k + SETTINGS index_granularity_bytes = 110, + min_index_granularity_bytes = 10, + write_final_mark = 0, + enable_vertical_merge_algorithm=1, + vertical_merge_algorithm_min_rows_to_activate=0, + vertical_merge_algorithm_min_columns_to_activate=0, + min_bytes_for_wide_part = 0, + min_bytes_for_full_part_storage = 0; + +INSERT INTO four_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); + +SELECT COUNT(*) FROM four_rows_per_granule; + +SELECT distinct(marks) from system.parts WHERE table = 'four_rows_per_granule' and database=currentDatabase() and active=1; +DETACH TABLE four_rows_per_granule; +ATTACH TABLE four_rows_per_granule; + +INSERT INTO four_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 5, 1000, 2000), ('2018-05-16', 6, 3000, 4000), ('2018-05-17', 7, 5000, 6000), ('2018-05-19', 8, 7000, 8000); + +OPTIMIZE TABLE four_rows_per_granule FINAL; + +DETACH TABLE four_rows_per_granule; + +ATTACH TABLE four_rows_per_granule; + +SELECT COUNT(*) FROM four_rows_per_granule; + +SELECT distinct(marks) from system.parts WHERE table = 'four_rows_per_granule' and database=currentDatabase() and active=1; + +DROP TABLE IF EXISTS four_rows_per_granule; + +----- More interesting tests ------ +SELECT '-----'; + +DROP TABLE IF EXISTS huge_granularity_small_blocks; + +CREATE TABLE huge_granularity_small_blocks ( + p Date, + k UInt64, + v1 UInt64, + v2 Int64 +) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k + SETTINGS index_granularity_bytes=1000000, write_final_mark = 0, + enable_vertical_merge_algorithm=1, + vertical_merge_algorithm_min_rows_to_activate=0, + vertical_merge_algorithm_min_columns_to_activate=0, + min_bytes_for_wide_part = 0, + min_bytes_for_full_part_storage = 0; + +INSERT INTO huge_granularity_small_blocks (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); + +SELECT COUNT(*) FROM huge_granularity_small_blocks; + +SELECT distinct(marks) from system.parts WHERE table = 'huge_granularity_small_blocks' and database=currentDatabase() and active=1; + +INSERT INTO huge_granularity_small_blocks (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 5, 3000, 4000), ('2018-05-17', 6, 5000, 6000), ('2018-05-19', 7, 7000, 8000); + +DETACH TABLE huge_granularity_small_blocks; + +ATTACH TABLE huge_granularity_small_blocks; + +OPTIMIZE TABLE huge_granularity_small_blocks FINAL; + +SELECT COUNT(*) FROM huge_granularity_small_blocks; + +SELECT distinct(marks) from system.parts WHERE table = 'huge_granularity_small_blocks' and database=currentDatabase() and active=1; + +DROP TABLE IF EXISTS huge_granularity_small_blocks; + +SELECT '-----'; + +DROP TABLE IF EXISTS adaptive_granularity_alter; + +CREATE TABLE adaptive_granularity_alter ( + p Date, + k UInt64, + v1 UInt64, + v2 Int64 +) ENGINE MergeTree() PARTITION BY toYYYYMM(p) ORDER BY k + SETTINGS index_granularity_bytes=110, + min_index_granularity_bytes = 100, + write_final_mark = 0, + enable_vertical_merge_algorithm=1, + vertical_merge_algorithm_min_rows_to_activate=0, + vertical_merge_algorithm_min_columns_to_activate=0, + min_bytes_for_wide_part = 0, + min_bytes_for_full_part_storage = 0; + + +INSERT INTO adaptive_granularity_alter (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); + +SELECT COUNT(*) FROM adaptive_granularity_alter; + +SELECT distinct(marks) from system.parts WHERE table = 'adaptive_granularity_alter' and database=currentDatabase() and active=1; + +OPTIMIZE TABLE adaptive_granularity_alter FINAL; + +ALTER TABLE adaptive_granularity_alter MODIFY COLUMN v1 Int16; + +DETACH TABLE adaptive_granularity_alter; + +ATTACH TABLE adaptive_granularity_alter; + +SELECT COUNT(*) FROM adaptive_granularity_alter; + +SELECT distinct(marks) from system.parts WHERE table = 'adaptive_granularity_alter' and database=currentDatabase() and active=1; + +INSERT INTO adaptive_granularity_alter (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 5, 3000, 4000), ('2018-05-17', 6, 5000, 6000), ('2018-05-19', 42, 42, 42); + +SELECT COUNT(*) FROM adaptive_granularity_alter; + +SELECT distinct(marks) from system.parts WHERE table = 'adaptive_granularity_alter' and database=currentDatabase() and active=1; + +ALTER TABLE adaptive_granularity_alter MODIFY COLUMN v2 String; + +DETACH TABLE adaptive_granularity_alter; + +ATTACH TABLE adaptive_granularity_alter; + +INSERT INTO adaptive_granularity_alter (p, k, v1, v2) VALUES ('2018-05-15', 100, 1000, 'aaaa'), ('2018-05-16', 101, 3000, 'bbbb'), ('2018-05-17', 102, 5000, 'cccc'), ('2018-05-19', 103, 7000, 'dddd'); + +OPTIMIZE TABLE adaptive_granularity_alter FINAL; + +SELECT k, v2 FROM adaptive_granularity_alter WHERE k >= 100 OR k = 42 ORDER BY k; + +SELECT sum(marks) from system.parts WHERE table = 'adaptive_granularity_alter' and database=currentDatabase() and active=1; + +DROP TABLE IF EXISTS adaptive_granularity_alter; diff --git a/parser/testdata/00926_adaptive_index_granularity_pk/ast.json b/parser/testdata/00926_adaptive_index_granularity_pk/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00926_adaptive_index_granularity_pk/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00926_adaptive_index_granularity_pk/metadata.json b/parser/testdata/00926_adaptive_index_granularity_pk/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00926_adaptive_index_granularity_pk/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00926_adaptive_index_granularity_pk/query.sql b/parser/testdata/00926_adaptive_index_granularity_pk/query.sql new file mode 100644 index 000000000..2a7b6934e --- /dev/null +++ b/parser/testdata/00926_adaptive_index_granularity_pk/query.sql @@ -0,0 +1,101 @@ +-- Tags: no-random-merge-tree-settings + +SET send_logs_level = 'fatal'; +SELECT '----00489----'; +DROP TABLE IF EXISTS pk; + +-- NOTE: here the timezone is pinned to UTC, to avoid issues with "partial +-- timezones" (timezones that does not starts from 00:00), like +-- Africa/Monrovia, for which toStartOfMinute(0) and toStartOfMinute(59) can +-- give different values: +-- +-- SELECT +-- toDateTime(0, 'Africa/Monrovia') AS sec0, +-- toDateTime(59, 'Africa/Monrovia') AS sec59 +-- +-- ┌────────────────sec0─┬───────────────sec59─┐ +-- │ 1969-12-31 23:15:30 │ 1969-12-31 23:16:29 │ +-- └─────────────────────┴─────────────────────┘ +-- +CREATE TABLE pk (d Date DEFAULT '2000-01-01', x DateTime, y UInt64, z UInt64) ENGINE = MergeTree() PARTITION BY d ORDER BY (toStartOfMinute(x, 'UTC'), y, z) SETTINGS index_granularity_bytes=19, min_index_granularity_bytes=9, write_final_mark = 0; -- one row granule + +INSERT INTO pk (x, y, z) VALUES (1, 11, 1235), (2, 11, 4395), (3, 22, 3545), (4, 22, 6984), (5, 33, 4596), (61, 11, 4563), (62, 11, 4578), (63, 11, 3572), (64, 22, 5786), (65, 22, 5786), (66, 22, 2791), (67, 22, 2791), (121, 33, 2791), (122, 33, 2791), (123, 33, 1235), (124, 44, 4935), (125, 44, 4578), (126, 55, 5786), (127, 55, 2791), (128, 55, 1235); + +SET max_block_size = 1; + +-- Test inferred limit +SET max_rows_to_read = 5; +SELECT toUInt32(x), y, z FROM pk WHERE x BETWEEN toDateTime(0) AND toDateTime(59); + +SET max_rows_to_read = 9; +SELECT toUInt32(x), y, z FROM pk WHERE x BETWEEN toDateTime(120) AND toDateTime(240); + +-- Index is coarse, cannot read single row +SET max_rows_to_read = 5; +SELECT toUInt32(x), y, z FROM pk WHERE x = toDateTime(1); + +-- Index works on interval 00:01:00 - 00:01:59 +SET max_rows_to_read = 4; +SELECT toUInt32(x), y, z FROM pk WHERE (x BETWEEN toDateTime(60) AND toDateTime(119)) AND y = 11; + +-- Cannot read less rows as PK is coarser on interval 00:01:00 - 00:02:00 +SET max_rows_to_read = 5; +SELECT toUInt32(x), y, z FROM pk WHERE (x BETWEEN toDateTime(60) AND toDateTime(120)) AND y = 11; + +DROP TABLE pk; + +SET max_block_size = 8192; +SELECT '----00607----'; + +SET max_rows_to_read = 0; +DROP TABLE IF EXISTS merge_tree; +CREATE TABLE merge_tree (x UInt32) ENGINE = MergeTree ORDER BY x SETTINGS index_granularity_bytes = 4, min_index_granularity_bytes=1, write_final_mark = 0; +INSERT INTO merge_tree VALUES (0), (1); + +SET force_primary_key = 1; +SET max_rows_to_read = 1; + +SELECT count() FROM merge_tree WHERE x = 0; +SELECT count() FROM merge_tree WHERE toUInt32(x) = 0; +SELECT count() FROM merge_tree WHERE toUInt64(x) = 0; + +SELECT count() FROM merge_tree WHERE x IN (0, 0); +SELECT count() FROM merge_tree WHERE toUInt32(x) IN (0, 0); +SELECT count() FROM merge_tree WHERE toUInt64(x) IN (0, 0); + +DROP TABLE merge_tree; + +SELECT '----00804----'; +SET max_rows_to_read = 0; +SET force_primary_key = 0; + +DROP TABLE IF EXISTS large_alter_table_00926; +DROP TABLE IF EXISTS store_of_hash_00926; + +SET allow_suspicious_codecs = 1; +CREATE TABLE large_alter_table_00926 ( + somedate Date CODEC(ZSTD, ZSTD, ZSTD(12), LZ4HC(12)), + id UInt64 CODEC(LZ4, ZSTD, NONE, LZ4HC), + data String CODEC(ZSTD(2), LZ4HC, NONE, LZ4, LZ4) +) ENGINE = MergeTree() PARTITION BY somedate ORDER BY id SETTINGS min_index_granularity_bytes=30, write_final_mark = 0, min_bytes_for_wide_part = '10M', min_rows_for_wide_part = 0; + +INSERT INTO large_alter_table_00926 SELECT toDate('2019-01-01'), number, toString(number + rand()) FROM system.numbers LIMIT 300000; + +CREATE TABLE store_of_hash_00926 (hash UInt64) ENGINE = Memory(); + +INSERT INTO store_of_hash_00926 SELECT sum(cityHash64(*)) FROM large_alter_table_00926; + +ALTER TABLE large_alter_table_00926 MODIFY COLUMN data CODEC(NONE, LZ4, LZ4HC, ZSTD); + +OPTIMIZE TABLE large_alter_table_00926; + +DETACH TABLE large_alter_table_00926; +ATTACH TABLE large_alter_table_00926; + +INSERT INTO store_of_hash_00926 SELECT sum(cityHash64(*)) FROM large_alter_table_00926; + +SELECT COUNT(hash) FROM store_of_hash_00926; +SELECT COUNT(DISTINCT hash) FROM store_of_hash_00926; + +DROP TABLE IF EXISTS large_alter_table_00926; +DROP TABLE IF EXISTS store_of_hash_00926; diff --git a/parser/testdata/00926_adaptive_index_granularity_replacing_merge_tree/ast.json b/parser/testdata/00926_adaptive_index_granularity_replacing_merge_tree/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00926_adaptive_index_granularity_replacing_merge_tree/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00926_adaptive_index_granularity_replacing_merge_tree/metadata.json b/parser/testdata/00926_adaptive_index_granularity_replacing_merge_tree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00926_adaptive_index_granularity_replacing_merge_tree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00926_adaptive_index_granularity_replacing_merge_tree/query.sql b/parser/testdata/00926_adaptive_index_granularity_replacing_merge_tree/query.sql new file mode 100644 index 000000000..05d4074e8 --- /dev/null +++ b/parser/testdata/00926_adaptive_index_granularity_replacing_merge_tree/query.sql @@ -0,0 +1,217 @@ +-- Tags: no-random-merge-tree-settings + +----- Group of very similar simple tests ------ +DROP TABLE IF EXISTS zero_rows_per_granule; + +CREATE TABLE zero_rows_per_granule ( + p Date, + k UInt64, + v1 UInt64, + v2 Int64 +) ENGINE ReplacingMergeTree() PARTITION BY toYYYYMM(p) ORDER BY k + SETTINGS index_granularity_bytes=20, + min_index_granularity_bytes = 10, + write_final_mark = 0, + enable_vertical_merge_algorithm=1, + vertical_merge_algorithm_min_rows_to_activate=0, + vertical_merge_algorithm_min_columns_to_activate=0, + min_bytes_for_wide_part = 0, + min_bytes_for_full_part_storage = 0; + +INSERT INTO zero_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); + +SELECT COUNT(*) FROM zero_rows_per_granule; + +SELECT distinct(marks) from system.parts WHERE table = 'zero_rows_per_granule' and database=currentDatabase() and active=1; + +INSERT INTO zero_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 5, 1000, 2000), ('2018-05-16', 6, 3000, 4000), ('2018-05-17', 7, 5000, 6000), ('2018-05-19', 8, 7000, 8000); + +OPTIMIZE TABLE zero_rows_per_granule FINAL; + +SELECT COUNT(*) FROM zero_rows_per_granule FINAL; + +SELECT sum(marks) from system.parts WHERE table = 'zero_rows_per_granule' and database=currentDatabase() and active=1; + +DROP TABLE IF EXISTS zero_rows_per_granule; + +SELECT '-----'; + +DROP TABLE IF EXISTS two_rows_per_granule; + +CREATE TABLE two_rows_per_granule ( + p Date, + k UInt64, + v1 UInt64, + v2 Int64 +) ENGINE ReplacingMergeTree() PARTITION BY toYYYYMM(p) ORDER BY k + SETTINGS index_granularity_bytes = 40, + min_index_granularity_bytes = 10, + write_final_mark = 0, + enable_vertical_merge_algorithm=1, + vertical_merge_algorithm_min_rows_to_activate=0, + vertical_merge_algorithm_min_columns_to_activate=0, + min_bytes_for_wide_part = 0, + min_bytes_for_full_part_storage = 0; + +INSERT INTO two_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); + +SELECT COUNT(*) FROM two_rows_per_granule FINAL; + +SELECT distinct(marks) from system.parts WHERE table = 'two_rows_per_granule' and database=currentDatabase() and active=1; + +INSERT INTO two_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 5, 1000, 2000), ('2018-05-16', 6, 3000, 4000), ('2018-05-17', 7, 5000, 6000), ('2018-05-19', 8, 7000, 8000); + +OPTIMIZE TABLE two_rows_per_granule FINAL; + +SELECT COUNT(*) FROM two_rows_per_granule FINAL; + +SELECT distinct(marks) from system.parts WHERE table = 'two_rows_per_granule' and database=currentDatabase() and active=1; + +DROP TABLE IF EXISTS two_rows_per_granule; + +SELECT '-----'; + +DROP TABLE IF EXISTS four_rows_per_granule; + +CREATE TABLE four_rows_per_granule ( + p Date, + k UInt64, + v1 UInt64, + v2 Int64 +) ENGINE ReplacingMergeTree() PARTITION BY toYYYYMM(p) ORDER BY k + SETTINGS index_granularity_bytes = 110, + min_index_granularity_bytes=100, + write_final_mark = 0, + enable_vertical_merge_algorithm=1, + vertical_merge_algorithm_min_rows_to_activate=0, + vertical_merge_algorithm_min_columns_to_activate=0, + min_bytes_for_wide_part = 0, + min_bytes_for_full_part_storage = 0; + +INSERT INTO four_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); + +SELECT COUNT(*) FROM four_rows_per_granule; + +SELECT distinct(marks) from system.parts WHERE table = 'four_rows_per_granule' and database=currentDatabase() and active=1; +DETACH TABLE four_rows_per_granule; +ATTACH TABLE four_rows_per_granule; + +INSERT INTO four_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 5, 1000, 2000), ('2018-05-16', 6, 3000, 4000), ('2018-05-17', 7, 5000, 6000), ('2018-05-19', 8, 7000, 8000); + +INSERT INTO four_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 5, 1000, 2000), ('2018-05-16', 6, 3000, 4000), ('2018-05-17', 7, 5000, 6000), ('2018-05-19', 8, 7000, 8000); + +INSERT INTO four_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 5, 1000, 2000), ('2018-05-16', 6, 3000, 4000), ('2018-05-17', 7, 5000, 6000), ('2018-05-19', 8, 7000, 8000); + +INSERT INTO four_rows_per_granule (p, k, v1, v2) VALUES ('2018-05-15', 5, 1000, 2000), ('2018-05-16', 6, 3000, 4000), ('2018-05-17', 7, 5000, 6000), ('2018-05-19', 8, 7000, 8000); + +SELECT sleep(0.5) Format Null; + +OPTIMIZE TABLE four_rows_per_granule FINAL; + +DETACH TABLE four_rows_per_granule; + +ATTACH TABLE four_rows_per_granule; + +SELECT COUNT(*) FROM four_rows_per_granule FINAL; + +SELECT distinct(marks) from system.parts WHERE table = 'four_rows_per_granule' and database=currentDatabase() and active=1; + +DROP TABLE IF EXISTS four_rows_per_granule; + +----- More interesting tests ------ +SELECT '-----'; + +DROP TABLE IF EXISTS huge_granularity_small_blocks; + +CREATE TABLE huge_granularity_small_blocks ( + p Date, + k UInt64, + v1 UInt64, + v2 Int64 +) ENGINE ReplacingMergeTree() PARTITION BY toYYYYMM(p) ORDER BY k + SETTINGS index_granularity_bytes=1000000, write_final_mark = 0, + enable_vertical_merge_algorithm=1, + vertical_merge_algorithm_min_rows_to_activate=0, + vertical_merge_algorithm_min_columns_to_activate=0, + min_bytes_for_wide_part = 0, + min_bytes_for_full_part_storage = 0; + +INSERT INTO huge_granularity_small_blocks (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); + +SELECT COUNT(*) FROM huge_granularity_small_blocks; + +SELECT distinct(marks) from system.parts WHERE table = 'huge_granularity_small_blocks' and database=currentDatabase() and active=1; + +INSERT INTO huge_granularity_small_blocks (p, k, v1, v2) VALUES ('2018-05-15', 5, 1000, 2000), ('2018-05-16', 6, 3000, 4000), ('2018-05-17', 7, 5000, 6000), ('2018-05-19', 8, 7000, 8000); + +DETACH TABLE huge_granularity_small_blocks; + +ATTACH TABLE huge_granularity_small_blocks; + +OPTIMIZE TABLE huge_granularity_small_blocks FINAL; + +SELECT COUNT(*) FROM huge_granularity_small_blocks FINAL; + +SELECT distinct(marks) from system.parts WHERE table = 'huge_granularity_small_blocks' and database=currentDatabase() and active=1; + +DROP TABLE IF EXISTS huge_granularity_small_blocks; + +----- Some alter tests ---- +SELECT '-----'; + +DROP TABLE IF EXISTS adaptive_granularity_alter; + +CREATE TABLE adaptive_granularity_alter ( + p Date, + k UInt64, + v1 UInt64, + v2 Int64 +) ENGINE ReplacingMergeTree() PARTITION BY toYYYYMM(p) ORDER BY k + SETTINGS index_granularity_bytes=110, + min_index_granularity_bytes=40, + write_final_mark = 0, + enable_vertical_merge_algorithm=1, + vertical_merge_algorithm_min_rows_to_activate=0, + vertical_merge_algorithm_min_columns_to_activate=0, + min_bytes_for_wide_part = 0, + min_bytes_for_full_part_storage = 0; + +INSERT INTO adaptive_granularity_alter (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); + +SELECT COUNT(*) FROM adaptive_granularity_alter; + +SELECT distinct(marks) from system.parts WHERE table = 'adaptive_granularity_alter' and database=currentDatabase() and active=1; + +ALTER TABLE adaptive_granularity_alter MODIFY COLUMN v1 Int16; + +DETACH TABLE adaptive_granularity_alter; + +ATTACH TABLE adaptive_granularity_alter; + +SELECT COUNT(*) FROM adaptive_granularity_alter; + +SELECT distinct(marks) from system.parts WHERE table = 'adaptive_granularity_alter' and database=currentDatabase() and active=1; + +INSERT INTO adaptive_granularity_alter (p, k, v1, v2) VALUES ('2018-05-15', 5, 1000, 2000), ('2018-05-16', 6, 3000, 4000), ('2018-05-17', 7, 5000, 6000), ('2018-05-19', 42, 42, 42); + +OPTIMIZE TABLE adaptive_granularity_alter FINAL; + +SELECT COUNT(*) FROM adaptive_granularity_alter; + +SELECT distinct(marks) from system.parts WHERE table = 'adaptive_granularity_alter' and database=currentDatabase() and active=1; + +ALTER TABLE adaptive_granularity_alter MODIFY COLUMN v2 String; + +DETACH TABLE adaptive_granularity_alter; + +ATTACH TABLE adaptive_granularity_alter; + +INSERT INTO adaptive_granularity_alter (p, k, v1, v2) VALUES ('2018-05-15', 100, 1000, 'aaaa'), ('2018-05-16', 101, 3000, 'bbbb'), ('2018-05-17', 102, 5000, 'cccc'), ('2018-05-19', 103, 7000, 'dddd'); + +OPTIMIZE TABLE adaptive_granularity_alter FINAL; + +SELECT k, v2 FROM adaptive_granularity_alter WHERE k >= 100 OR k = 42; + +SELECT distinct(marks) from system.parts WHERE table = 'adaptive_granularity_alter' and database=currentDatabase() and active=1; + +DROP TABLE IF EXISTS adaptive_granularity_alter; diff --git a/parser/testdata/00926_adaptive_index_granularity_versioned_collapsing_merge_tree/ast.json b/parser/testdata/00926_adaptive_index_granularity_versioned_collapsing_merge_tree/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00926_adaptive_index_granularity_versioned_collapsing_merge_tree/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00926_adaptive_index_granularity_versioned_collapsing_merge_tree/metadata.json b/parser/testdata/00926_adaptive_index_granularity_versioned_collapsing_merge_tree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00926_adaptive_index_granularity_versioned_collapsing_merge_tree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00926_adaptive_index_granularity_versioned_collapsing_merge_tree/query.sql b/parser/testdata/00926_adaptive_index_granularity_versioned_collapsing_merge_tree/query.sql new file mode 100644 index 000000000..5a57b1139 --- /dev/null +++ b/parser/testdata/00926_adaptive_index_granularity_versioned_collapsing_merge_tree/query.sql @@ -0,0 +1,141 @@ +-- Tags: no-random-merge-tree-settings + +----- Group of very similar simple tests ------ +DROP TABLE IF EXISTS zero_rows_per_granule; + +CREATE TABLE zero_rows_per_granule ( + p Date, + k UInt64, + v1 UInt64, + v2 Int64, + Sign Int8, + Version UInt8 +) ENGINE VersionedCollapsingMergeTree(Sign, Version) PARTITION BY toYYYYMM(p) ORDER BY k + SETTINGS index_granularity_bytes = 20, + min_index_granularity_bytes = 10, + write_final_mark = 0, + enable_vertical_merge_algorithm=1, + vertical_merge_algorithm_min_rows_to_activate=0, + vertical_merge_algorithm_min_columns_to_activate=0, + min_bytes_for_wide_part = 0, + min_bytes_for_full_part_storage = 0; + +INSERT INTO zero_rows_per_granule (p, k, v1, v2, Sign, Version) VALUES ('2018-05-15', 1, 1000, 2000, 1, 1), ('2018-05-16', 2, 3000, 4000, 1, 1), ('2018-05-17', 3, 5000, 6000, 1, 1), ('2018-05-18', 4, 7000, 8000, 1, 1); + +SELECT COUNT(*) FROM zero_rows_per_granule; + +SELECT distinct(marks) from system.parts WHERE table = 'zero_rows_per_granule' and database=currentDatabase() and active=1; + +INSERT INTO zero_rows_per_granule (p, k, v1, v2, Sign, Version) VALUES ('2018-05-15', 5, 1000, 2000, 1, 1), ('2018-05-16', 6, 3000, 4000, 1, 1), ('2018-05-17', 7, 5000, 6000, 1, 1), ('2018-05-19', 8, 7000, 8000, 1, 1); + +OPTIMIZE TABLE zero_rows_per_granule FINAL; + +SELECT COUNT(*) FROM zero_rows_per_granule FINAL; + +SELECT sum(marks) from system.parts WHERE table = 'zero_rows_per_granule' and database=currentDatabase() and active=1; + +DROP TABLE IF EXISTS zero_rows_per_granule; + +SELECT '-----'; +DROP TABLE IF EXISTS four_rows_per_granule; + +CREATE TABLE four_rows_per_granule ( + p Date, + k UInt64, + v1 UInt64, + v2 Int64, + Sign Int8, + Version UInt8 +) ENGINE VersionedCollapsingMergeTree(Sign, Version) PARTITION BY toYYYYMM(p) ORDER BY k + SETTINGS index_granularity_bytes=120, + min_index_granularity_bytes = 100, + write_final_mark = 0, + enable_vertical_merge_algorithm=1, + vertical_merge_algorithm_min_rows_to_activate=0, + vertical_merge_algorithm_min_columns_to_activate=0, + min_bytes_for_wide_part = 0, + min_bytes_for_full_part_storage = 0; + +INSERT INTO four_rows_per_granule (p, k, v1, v2, Sign, Version) VALUES ('2018-05-15', 1, 1000, 2000, 1, 1), ('2018-05-16', 2, 3000, 4000, 1, 1), ('2018-05-17', 3, 5000, 6000, 1, 1), ('2018-05-18', 4, 7000, 8000, 1, 1); + +SELECT COUNT(*) FROM four_rows_per_granule; + +SELECT distinct(marks) from system.parts WHERE table = 'four_rows_per_granule' and database=currentDatabase() and active=1; + +INSERT INTO four_rows_per_granule (p, k, v1, v2, Sign, Version) VALUES ('2018-05-15', 1, 1000, 2000, -1, 1), ('2018-05-16', 2, 3000, 4000, -1, 1), ('2018-05-17', 3, 5000, 6000, -1, 1), ('2018-05-18', 4, 7000, 8000, -1, 1); + +OPTIMIZE TABLE four_rows_per_granule FINAL; + +SELECT COUNT(*) FROM four_rows_per_granule; + +-- We expect zero marks here, so we might get zero rows if all the parts were +-- deleted already. This can happen in parallel runs where there may be a long delay +-- between queries. So we must write the query in such a way that it always returns +-- zero rows if OK. +SELECT distinct(marks) d from system.parts WHERE table = 'four_rows_per_granule' and database=currentDatabase() and active=1 having d > 0; + +INSERT INTO four_rows_per_granule (p, k, v1, v2, Sign, Version) VALUES ('2018-05-15', 1, 1000, 2000, 1, 1), ('2018-05-16', 2, 3000, 4000, 1, 1), ('2018-05-17', 3, 5000, 6000, 1, 1), ('2018-05-18', 4, 7000, 8000, 1, 1); + +INSERT INTO four_rows_per_granule (p, k, v1, v2, Sign, Version) VALUES ('2018-05-15', 5, 1000, 2000, 1, 1), ('2018-05-16', 6, 3000, 4000, 1, 1), ('2018-05-17', 7, 5000, 6000, 1, 1), ('2018-05-18', 8, 7000, 8000, 1, 1); + +INSERT INTO four_rows_per_granule (p, k, v1, v2, Sign, Version) VALUES ('2018-05-15', 5, 1000, 2000, -1, 1), ('2018-05-17', 7, 5000, 6000, -1, 1); + +OPTIMIZE TABLE four_rows_per_granule FINAL; + +SELECT COUNT(*) FROM four_rows_per_granule; + +SELECT distinct(marks) from system.parts WHERE table = 'four_rows_per_granule' and database=currentDatabase() and active=1; + +DROP TABLE IF EXISTS four_rows_per_granule; + +SELECT '-----'; + +DROP TABLE IF EXISTS six_rows_per_granule; + +CREATE TABLE six_rows_per_granule ( + p Date, + k UInt64, + v1 UInt64, + v2 Int64, + Sign Int8, + Version UInt8 +) ENGINE VersionedCollapsingMergeTree(Sign, Version) PARTITION BY toYYYYMM(p) ORDER BY k + SETTINGS index_granularity_bytes=170, + min_index_granularity_bytes = 100, + write_final_mark = 0, + enable_vertical_merge_algorithm=1, + vertical_merge_algorithm_min_rows_to_activate=0, + vertical_merge_algorithm_min_columns_to_activate=0, + min_bytes_for_wide_part = 0, + min_bytes_for_full_part_storage = 0; + + +INSERT INTO six_rows_per_granule (p, k, v1, v2, Sign, Version) VALUES ('2018-05-15', 1, 1000, 2000, 1, 1), ('2018-05-16', 1, 1000, 2000, -1, 2); + +INSERT INTO six_rows_per_granule (p, k, v1, v2, Sign, Version) VALUES ('2018-05-15', 2, 1000, 2000, 1, 1), ('2018-05-16', 2, 1000, 2000, -1, 2); + +INSERT INTO six_rows_per_granule (p, k, v1, v2, Sign, Version) VALUES ('2018-05-15', 3, 1000, 2000, 1, 1), ('2018-05-16', 3, 1000, 2000, -1, 2); + +OPTIMIZE TABLE six_rows_per_granule FINAL; + +SELECT COUNT(*) FROM six_rows_per_granule; + +SELECT distinct(marks) from system.parts WHERE table = 'six_rows_per_granule' and database=currentDatabase() and active=1; + +INSERT INTO six_rows_per_granule (p, k, v1, v2, Sign, Version) VALUES ('2018-05-15', 1, 1000, 2000, -1, 1), ('2018-05-16', 1, 1000, 2000, 1, 2); + +INSERT INTO six_rows_per_granule (p, k, v1, v2, Sign, Version) VALUES ('2018-05-15', 2, 1000, 2000, -1, 1), ('2018-05-16', 2, 1000, 2000, 1, 2); + +INSERT INTO six_rows_per_granule (p, k, v1, v2, Sign, Version) VALUES ('2018-05-15', 3, 1000, 2000, -1, 1), ('2018-05-16', 3, 1000, 2000, 1, 2); + +OPTIMIZE TABLE six_rows_per_granule FINAL; + +SELECT COUNT(*) FROM six_rows_per_granule; + +-- We expect zero marks here, so we might get zero rows if all the parts were +-- deleted already. This can happen in parallel runs where there may be a long delay +-- between queries. So we must write the query in such a way that it always returns +-- zero rows if OK. +SELECT distinct(marks) d from system.parts WHERE table = 'six_rows_per_granule' and database=currentDatabase() and active=1 having d > 0; + +DROP TABLE IF EXISTS six_rows_per_granule; diff --git a/parser/testdata/00926_geo_to_h3/ast.json b/parser/testdata/00926_geo_to_h3/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00926_geo_to_h3/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00926_geo_to_h3/metadata.json b/parser/testdata/00926_geo_to_h3/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00926_geo_to_h3/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00926_geo_to_h3/query.sql b/parser/testdata/00926_geo_to_h3/query.sql new file mode 100644 index 000000000..b81351c7d --- /dev/null +++ b/parser/testdata/00926_geo_to_h3/query.sql @@ -0,0 +1,20 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS table1; + +CREATE TABLE table1 (lat Float64, lon Float64, resolution UInt8) ENGINE = Memory; + +INSERT INTO table1 VALUES(55.77922738, 37.63098076, 15); +INSERT INTO table1 VALUES(55.76324100, 37.66018300, 15); +INSERT INTO table1 VALUES(55.72076200, 37.59813500, 15); +INSERT INTO table1 VALUES(55.72076201, 37.59813500, 15); +INSERT INTO table1 VALUES(55.72076200, 37.59813500, 14); + +select geoToH3(55.77922738, 37.63098076, 15); +select geoToH3(55.77922738, 37.63098076, 24); -- { serverError ARGUMENT_OUT_OF_BOUND } +select geoToH3(lat, lon, resolution) from table1 order by lat, lon, resolution; +select geoToH3(lat, lon, resolution) AS k from table1 order by lat, lon, k; +select lat, lon, geoToH3(lat, lon, resolution) AS k from table1 order by lat, lon, k; +select geoToH3(lat, lon, resolution) AS k, count(*) from table1 group by k order by k; + +DROP TABLE table1 diff --git a/parser/testdata/00926_multimatch/ast.json b/parser/testdata/00926_multimatch/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00926_multimatch/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00926_multimatch/metadata.json b/parser/testdata/00926_multimatch/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00926_multimatch/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00926_multimatch/query.sql b/parser/testdata/00926_multimatch/query.sql new file mode 100644 index 000000000..7cea5c03d --- /dev/null +++ b/parser/testdata/00926_multimatch/query.sql @@ -0,0 +1,195 @@ +-- Tags: no-fasttest, use-vectorscan + +SELECT '-- With const pattern'; + +select 0 = multiMatchAny(materialize('mpnsguhwsitzvuleiwebwjfitmsg'), ['wbirxqoabpblrnvvmjizj', 'cfcxhuvrexyzyjsh', 'oldhtubemyuqlqbwvwwkwin', 'bumoozxdkjglzu', 'intxlfohlxmajjomw', 'dxkeghohv', 'arsvmwwkjeopnlwnan', 'ouugllgowpqtaxslcopkytbfhifaxbgt', 'hkedmjlbcrzvryaopjqdjjc', 'tbqkljywstuahzh', 'o', 'wowoclosyfcuwotmvjygzuzhrery', 'vpefjiffkhlggntcu', 'ytdixvasrorhripzfhjdmlhqksmctyycwp']) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('qjjzqexjpgkglgxpzrbqbnskq'), ['vaiatcjacmlffdzsejpdareqzy', 'xspcfzdufkmecud', 'bcvtbuqtctq', 'nkcopwbfytgemkqcfnnno', 'dylxnzuyhq', 'tno', 'scukuhufly', 'cdyquzuqlptv', 'ohluyfeksyxepezdhqmtfmgkvzsyph', 'ualzwtahvqvtijwp', 'jg', 'gwbawqlngzcknzgtmlj', 'qimvjcgbkkp', 'eaedbcgyrdvv', 'qcwrncjoewwedyyewcdkh', 'uqcvhngoqngmitjfxpznqomertqnqcveoqk', 'ydrgjiankgygpm', 'axepgap']) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('fdkmtqmxnegwvnjhghjq'), ['vynkybvdmhgeezybbdqfrukibisj', 'knazzamgjjpavwhvdkwigykh', 'peumnifrmdhhmrqqnemw', 'lmsnyvqoisinlaqobxojlwfbi', 'oqwfzs', 'dymudxxeodwjpgbibnkvr', 'vomtfsnizkplgzktqyoiw', 'yoyfuhlpgrzds', 'cefao', 'gi', 'srpgxfjwl', 'etsjusdeiwbfe', 'ikvtzdopxo', 'ljfkavrau', 'soqdhxtenfrkmeic', 'ktprjwfcelzbup', 'pcvuoddqwsaurcqdtjfnczekwni', 'agkqkqxkfbkfgyqliahsljim']) from system.numbers limit 10; +select 1 = multiMatchAny(materialize('khljxzxlpcrxpkrfybbfk'), ['', 'lpc', 'rxpkrfybb', 'crxp', '', 'pkr', 'jxzxlpcrxpkrf', '', 'xzxlpcr', 'xpk', 'fyb', 'xzxlpcrxpkrfybbfk', 'k', 'lpcrxp', 'ljxzxlpcr', 'r', 'pkr', 'fk']) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('rbrizgjbigvzfnpgmpkqxoqxvdj'), ['ee', 'cohqnb', 'msol', 'yhlujcvhklnhuomy', 'ietn', 'vgmnlkcsybtokrepzrm', 'wspiryefojxysgrzsxyrluykxfnnbzdstcel', 'mxisnsivndbefqxwznimwgazuulupbaihavg', 'vpzdjvqqeizascxmzdhuq', 'pgvncohlxcqjhfkm', 'mbaypcnfapltsegquurahlsruqvipfhrhq', 'ioxjbcyyqujfveujfhnfdfokfcrlsincjbdt', 'cnvlujyowompdrqjwjx', 'wobwed', 'kdfhaoxiuifotmptcmdbk', 'leoamsnorcvtlmokdomkzuo', 'jjw', 'ogugysetxuqmvggneosbsfbonszepsatq']) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('uymwxzyjbfegbhgswiqhinf'), ['lizxzbzlwljkr', 'ukxygktlpzuyijcqeqktxenlaqi', 'onperabgbdiafsxwbvpjtyt', 'xfqgoqvhqph', 'aflmcwabtwgmajmmqelxwkaolyyhmdlc', 'yfz', 'meffuiaicvwed', 'hhzvgmifzamgftkifaeowayjrnnzw', 'nwewybtajv', 'ectiye', 'epjeiljegmqqjncubj', 'zsjgftqjrn', 'pssng', 'raqoarfhdoeujulvqmdo']) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('omgghgnzjmecpzqmtcvw'), ['fjhlzbszodmzavzg', 'gfofrnwrxprkfiokv', 'jmjiiqpgznlmyrxwewzqzbe', 'pkyrsqkltlmxr', 'crqgkgqkkyujcyoc', 'endagbcxwqhueczuasykmajfsvtcmh', 'xytmxtrnkdysuwltqomehddp', 'etmdxyyfotfyifwvbykghijvwv', 'mwqtgrncyhkfhjdg', 'iuvymofrqpp', 'pgllsdanlhzqhkstwsmzzftp', 'disjylcceufxtjdvhy']) from system.numbers limit 10; +select 1 = multiMatchAny(materialize('mznihnmshftvnmmhnrulizzpslq'), ['nrul', 'mshftvnmmhnr', 'z', 'mhnrulizzps', 'hftvnmmhnrul', 'ihnmshftvnmmhnrulizzp', 'izz', '', 'uli', 'nihnmshftvnmmhnru', 'hnrulizzp', 'nrulizz']) from system.numbers limit 10; +select 1 = multiMatchAny(materialize('ruqmqrsxrbftvruvahonradau'), ['uqmqrsxrbft', 'ftv', 'tvruvahonrad', 'mqrsxrbftvruvahon', 'rbftvruvah', 'qrsxrbftvru', 'o', 'ahonradau', 'a', 'ft', '', 'u', 'rsxrbftvruvahonradau', 'ruvahon', 'bftvruvahonradau', 'qrsxrbftvru', 't', 'vahonrada', 'vruvahonradau', 'onra']) from system.numbers limit 10; +select 1 = multiMatchAny(materialize('gpsevxtcoeexrltyzduyidmtzxf'), ['exrltyzduyid', 'vxtcoeexrltyz', 'xr', 'ltyzduyidmt', 'yzduy', 'exr', 'coeexrltyzduy', 'coeexrltyzduy', 'rlty', 'rltyzduyidm', 'exrltyz', 'xtcoeexrlty', 'vxtcoeexrltyzduyidm', '', 'coeexrl', 'sevxtcoeexrltyzdu', 'dmt', '']) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('dyhycfhzyewaikgursyxfkuv'), ['sktnofpugrmyxmbizzrivmhn', 'fhlgadpoqcvktbfzncxbllvwutdawmw', 'eewzjpcgzrqmltbgmhafwlwqb', 'tpogbkyj', 'rtllntxjgkzs', 'mirbvsqexscnzglogigbujgdwjvcv', 'iktwpgjsakemewmahgqza', 'xgfvzkvqgiuoihjjnxwwpznxhz', 'nxaumpaknreklbwynvxdsmatjekdlxvklh', 'zadzwqhgfxqllihuudozxeixyokhny', 'tdqpgfpzexlkslodps', 'slztannufxaabqfcjyfquafgfhfb', 'xvjldhfuwurvkb', 'aecv', 'uycfsughpikqsbcmwvqygdyexkcykhbnau', 'jr']) from system.numbers limit 10; +select 1 = multiMatchAny(materialize('vbcsettndwuntnruiyclvvwoo'), ['dwuntnru', '', 'ttndwuntnruiyclvv', 'ntnr', 'nruiyclvvw', 'wo', '', 'bcsettndwuntnruiycl', 'yc', 'untnruiyclvvw', 'csettndwuntnr', 'ntnruiyclvvwo']) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('pqqnugshlczcuxhpjxjbcnro'), ['dpeedqy', 'rtsc', 'jdgla', 'qkgudqjiyzvlvsj', 'xmfxawhijgxxtydbd', 'ebgzazqthb', 'wyrjhvhwzhmpybnylirrn', 'iviqbyuclayqketooztwegtkgwnsezfl', 'bhvidy', 'hijctxxweboq', 't', 'osnzfbziidteiaifgaanm']) from system.numbers limit 10; +select 1 = multiMatchAny(materialize('loqchlxspwuvvccucskuytr'), ['', 'k', 'qchlxspwu', 'u', 'hlxspwuvv', 'wuvvccucsku', 'vcc', 'uyt', 'uvv', 'spwu', 'ytr', 'wuvvccucs', 'xspwuv', 'lxspwuvvccuc', 'spwuvvccu', 'oqchlxspwuvvccucskuy']) from system.numbers limit 10; +select 1 = multiMatchAny(materialize('pjjyzupzwllshlnatiujmwvaofr'), ['lnatiujmwvao', '', 'zupzwllsh', 'nati', 'wllshl', 'hlnatiujmwv', 'mwvao', 'shlnat', 'ati', 'wllshlnatiujmwvao', 'wllshlnatiujmwvaofr', 'nat']) from system.numbers limit 10; +select 1 = multiMatchAny(materialize('iketunkleyaqaxdlocci'), ['nkleyaqaxd', 'etunkleyaq', 'yaqaxdlocci', 'tunkleyaq', 'eyaqaxdlocc', 'leyaq', 'nkleyaqaxdl', 'tunkleya', 'kleyaqa', 'etunkleya', 'leyaqa', 'dlo', 'yaqa', 'leyaqaxd', 'etunkleyaq', '']) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('drqianqtangmgbdwruvblkqd'), ['wusajejyucamkyl', 'wsgibljugzrpkniliy', 'lhwqqiuafwffyersqjgjvvvfurx', 'jfokpzzxfdonelorqu', 'ccwkpcgac', 'jmyulqpndkmzbfztobwtm', 'rwrgfkccgxht', 'ggldjecrgbngkonphtcxrkcviujihidjx', 'spwweavbiokizv', 'lv', 'krb', 'vstnhvkbwlqbconaxgbfobqky', 'pvxwdc', 'thrl', 'ahsblffdveamceonqwrbeyxzccmux', 'yozji', 'oejtaxwmeovtqtz', 'zsnzznvqpxdvdxhznxrjn', 'hse', 'kcmkrccxmljzizracxwmpoaggywhdfpxkq']) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('yasnpckniistxcejowfijjsvkdajz'), ['slkpxhtsmrtvtm', 'crsbq', 'rdeshtxbfrlfwpsqojassxmvlfbzefldavmgme', 'ipetilcbpsfroefkjirquciwtxhrimbmwnlyv', 'knjpwkmdwbvdbapuyqbtsw', 'horueidziztxovqhsicnklmharuxhtgrsr', 'ofohrgpz', 'oneqnwyevbaqsonrcpmxcynflojmsnix', 'shg', 'nglqzczevgevwawdfperpeytuodjlf']) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('ueptpscfgxhplwsueckkxs'), ['ohhygchclbpcdwmftperprn', 'dvpjdqmqckekndvcerqrpkxen', 'lohhvarnmyi', 'zppd', 'qmqxgfewitsunbuhffozcpjtc', 'hsjbioisycsrawktqssjovkmltxodjgv', 'dbzuunwbkrtosyvctdujqtvaawfnvuq', 'gupbvpqthqxae', 'abjdmijaaiasnccgxttmqdsz', 'uccyumqoyqe', 'kxxliepyzlc', 'wbqcqtbyyjbqcgdbpkmzugksmcxhvr', 'piedxm', 'uncpphzoif', 'exkdankwck', 'qeitzozdrqopsergzr', 'hesgrhaftgesnzflrrtjdobxhbepjoas', 'wfpexx']) from system.numbers limit 10; +select 1 = multiMatchAny(materialize('ldrzgttlqaphekkkdukgngl'), ['gttlqaphekkkdukgn', 'ekkkd', 'gttlqaphe', 'qaphek', 'h', 'kdu', 'he', 'phek', '', 'drzgttlqaphekkkd']) from system.numbers limit 10; +select 1 = multiMatchAny(materialize('ololo'), ['ololo', 'ololo', 'ololo']); +SELECT 1 = multiMatchAny(materialize('khljxzxlpcrxpkrfybbfk'), ['k']); + +select 1 = multiMatchAny(materialize(''), ['']); +select 0 = multiMatchAny(materialize(''), ['some string']); +select 1 = multiMatchAny(materialize('abc'), ['']); +select 1 = multiMatchAny(materialize('abc'), ['']) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('abc'), []::Array(String)) from system.numbers limit 5; + +select 0 = multiMatchAny(materialize('abc'), ['defgh']); +select 0 = multiMatchAny(materialize('abc'), ['defg']); +select 0 = multiMatchAny(materialize('abc'), ['def']); +select 0 = multiMatchAny(materialize('abc'), ['de']); +select 0 = multiMatchAny(materialize('abc'), ['d']); + + +select 1 = multiMatchAny(materialize('abc'), ['b']) from system.numbers limit 10; +select 1 = multiMatchAny(materialize('abc'), ['bc']) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('abc'), ['bcde']) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('abc'), ['bcdef']) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('abc'), ['bcdefg']) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('abc'), ['bcdefgh']) from system.numbers limit 10; + + +select 0 = multiMatchAny(materialize('abc'), ['abcdefg']) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('abc'), ['abcdef']) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('abc'), ['abcde']) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('abc'), ['abcd']) from system.numbers limit 10; +select 1 = multiMatchAny(materialize('abc'), ['abc']) from system.numbers limit 10; +select 1 = multiMatchAny(materialize('abc'), ['ab']) from system.numbers limit 10; +select 1 = multiMatchAny(materialize('abc'), ['a']) from system.numbers limit 10; + +select 1 = multiMatchAny(materialize('abcd'), ['c']) from system.numbers limit 10; +select 1 = multiMatchAny(materialize('abcd'), ['cd']) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('abcd'), ['cde']) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('abcd'), ['cdef']) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('abcd'), ['cdefg']) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('abcd'), ['cdefgh']) from system.numbers limit 10; + +select 0 = multiMatchAny(materialize('abc'), ['defgh']) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('abc'), ['defg']) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('abc'), ['def']) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('abc'), ['de']) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('abc'), ['d']) from system.numbers limit 10; + +select 1 = multiMatchAny(materialize('abc'), ['...']) from system.numbers limit 10; +select 1 = multiMatchAny(materialize('a\nbc'), ['a?bc']) from system.numbers limit 10; +select 1 = multiMatchAny(materialize('a\nbc'), ['a.bc']) from system.numbers limit 10; +select 1 = multiMatchAny(materialize('a\0bc'), ['a?bc']) from system.numbers limit 10; +select 1 = multiMatchAny(materialize('a\0bc'), ['a.bc']) from system.numbers limit 10; +select 1 = multiMatchAny(materialize('abcdef'), ['a.....']) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('abcdef'), ['a......']) from system.numbers limit 10; +select 1 = multiMatchAny(materialize('abcdef'), ['a......', 'a.....']) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('aaaa'), ['.*aa.*aaa.*', 'aaaaaa{2}', '\(aa\){3}']) from system.numbers limit 10; +select 1 = multiMatchAny(materialize('abc'), ['a\0d']) from system.numbers limit 10; + +select 0 = multiMatchAnyIndex(materialize('gogleuedeuniangoogle'), []::Array(String)) from system.numbers limit 5; +select 1 = multiMatchAnyIndex(materialize('gogleuedeuniangoogle'), ['google', 'unian1']) from system.numbers limit 10; +select 2 = multiMatchAnyIndex(materialize('gogleuedeuniangoogle'), ['google1', 'unian']) from system.numbers limit 10; +select 0 != multiMatchAnyIndex(materialize('gogleuedeuniangoogle'), ['.*goo.*', '.*yan.*']) from system.numbers limit 10; +select 5 = multiMatchAnyIndex(materialize('vladizlvav dabe don\'t heart me no more'), ['what', 'is', 'love', 'baby', 'no mo??', 'dont', 'h.rt me']) from system.numbers limit 10; + +SELECT multiMatchAny(materialize('/odezhda-dlya-bega/'), ['/odezhda-dlya-bega/', 'kurtki-i-vetrovki-dlya-bega', 'futbolki-i-mayki-dlya-bega']); +SELECT 1 = multiMatchAny('фабрикант', ['f[ae]b[ei]rl', 'ф[иаэе]б[еэи][рпл]', 'афиукд', 'a[ft],th', '^ф[аиеэ]?б?[еэи]?$', 'берлик', 'fab', 'фа[беьв]+е?[рлко]']); + +-- All indices tests +SELECT [] = multiMatchAllIndices(materialize('Butterbrot!'), []::Array(String)) from system.numbers limit 5; +SELECT [1, 2] = arraySort(multiMatchAllIndices(materialize('gogleuedeuniangoogle'), ['.*goo.*', '.*yan.*'])) from system.numbers limit 5; +SELECT [1, 3] = arraySort(multiMatchAllIndices(materialize('gogleuedeuniangoogle'), ['.*goo.*', 'neverexisted', '.*yan.*'])) from system.numbers limit 5; +SELECT [] = multiMatchAllIndices(materialize('gogleuedeuniangoogle'), ['neverexisted', 'anotherone', 'andanotherone']) from system.numbers limit 5; +SELECT [1, 2, 3, 11] = arraySort(multiMatchAllIndices('фабрикант', ['', 'рикан', 'а', 'f[ae]b[ei]rl', 'ф[иаэе]б[еэи][рпл]', 'афиукд', 'a[ft],th', '^ф[аиеэ]?б?[еэи]?$', 'берлик', 'fab', 'фа[беьв]+е?[рлко]'])); +SELECT [1] = multiMatchAllIndices(materialize('/odezhda-dlya-bega/'), ['/odezhda-dlya-bega/', 'kurtki-i-vetrovki-dlya-bega', 'futbolki-i-mayki-dlya-bega']); +SELECT [] = multiMatchAllIndices(materialize('aaaa'), ['.*aa.*aaa.*', 'aaaaaa{2}', '\(aa\){3}']); +SELECT 'All tests above must return 1, all tests below return something.'; +SELECT arraySort(multiMatchAllIndices(arrayJoin(['aaaa', 'aaaaaa', 'bbbb', 'aaaaaaaaaaaaaa']), ['.*aa.*aaa.*', 'aaaaaa{2}', '(aa){3}'])); + +SELECT '-- With non-const pattern'; + +select 0 = multiMatchAny(materialize('mpnsguhwsitzvuleiwebwjfitmsg'), materialize(['wbirxqoabpblrnvvmjizj', 'cfcxhuvrexyzyjsh', 'oldhtubemyuqlqbwvwwkwin', 'bumoozxdkjglzu', 'intxlfohlxmajjomw', 'dxkeghohv', 'arsvmwwkjeopnlwnan', 'ouugllgowpqtaxslcopkytbfhifaxbgt', 'hkedmjlbcrzvryaopjqdjjc', 'tbqkljywstuahzh', 'o', 'wowoclosyfcuwotmvjygzuzhrery', 'vpefjiffkhlggntcu', 'ytdixvasrorhripzfhjdmlhqksmctyycwp'])) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('qjjzqexjpgkglgxpzrbqbnskq'), materialize(['vaiatcjacmlffdzsejpdareqzy', 'xspcfzdufkmecud', 'bcvtbuqtctq', 'nkcopwbfytgemkqcfnnno', 'dylxnzuyhq', 'tno', 'scukuhufly', 'cdyquzuqlptv', 'ohluyfeksyxepezdhqmtfmgkvzsyph', 'ualzwtahvqvtijwp', 'jg', 'gwbawqlngzcknzgtmlj', 'qimvjcgbkkp', 'eaedbcgyrdvv', 'qcwrncjoewwedyyewcdkh', 'uqcvhngoqngmitjfxpznqomertqnqcveoqk', 'ydrgjiankgygpm', 'axepgap'])) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('fdkmtqmxnegwvnjhghjq'), materialize(['vynkybvdmhgeezybbdqfrukibisj', 'knazzamgjjpavwhvdkwigykh', 'peumnifrmdhhmrqqnemw', 'lmsnyvqoisinlaqobxojlwfbi', 'oqwfzs', 'dymudxxeodwjpgbibnkvr', 'vomtfsnizkplgzktqyoiw', 'yoyfuhlpgrzds', 'cefao', 'gi', 'srpgxfjwl', 'etsjusdeiwbfe', 'ikvtzdopxo', 'ljfkavrau', 'soqdhxtenfrkmeic', 'ktprjwfcelzbup', 'pcvuoddqwsaurcqdtjfnczekwni', 'agkqkqxkfbkfgyqliahsljim'])) from system.numbers limit 10; +select 1 = multiMatchAny(materialize('khljxzxlpcrxpkrfybbfk'), materialize(['', 'lpc', 'rxpkrfybb', 'crxp', '', 'pkr', 'jxzxlpcrxpkrf', '', 'xzxlpcr', 'xpk', 'fyb', 'xzxlpcrxpkrfybbfk', 'k', 'lpcrxp', 'ljxzxlpcr', 'r', 'pkr', 'fk'])) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('rbrizgjbigvzfnpgmpkqxoqxvdj'), materialize(['ee', 'cohqnb', 'msol', 'yhlujcvhklnhuomy', 'ietn', 'vgmnlkcsybtokrepzrm', 'wspiryefojxysgrzsxyrluykxfnnbzdstcel', 'mxisnsivndbefqxwznimwgazuulupbaihavg', 'vpzdjvqqeizascxmzdhuq', 'pgvncohlxcqjhfkm', 'mbaypcnfapltsegquurahlsruqvipfhrhq', 'ioxjbcyyqujfveujfhnfdfokfcrlsincjbdt', 'cnvlujyowompdrqjwjx', 'wobwed', 'kdfhaoxiuifotmptcmdbk', 'leoamsnorcvtlmokdomkzuo', 'jjw', 'ogugysetxuqmvggneosbsfbonszepsatq'])) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('uymwxzyjbfegbhgswiqhinf'), materialize(['lizxzbzlwljkr', 'ukxygktlpzuyijcqeqktxenlaqi', 'onperabgbdiafsxwbvpjtyt', 'xfqgoqvhqph', 'aflmcwabtwgmajmmqelxwkaolyyhmdlc', 'yfz', 'meffuiaicvwed', 'hhzvgmifzamgftkifaeowayjrnnzw', 'nwewybtajv', 'ectiye', 'epjeiljegmqqjncubj', 'zsjgftqjrn', 'pssng', 'raqoarfhdoeujulvqmdo'])) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('omgghgnzjmecpzqmtcvw'), materialize(['fjhlzbszodmzavzg', 'gfofrnwrxprkfiokv', 'jmjiiqpgznlmyrxwewzqzbe', 'pkyrsqkltlmxr', 'crqgkgqkkyujcyoc', 'endagbcxwqhueczuasykmajfsvtcmh', 'xytmxtrnkdysuwltqomehddp', 'etmdxyyfotfyifwvbykghijvwv', 'mwqtgrncyhkfhjdg', 'iuvymofrqpp', 'pgllsdanlhzqhkstwsmzzftp', 'disjylcceufxtjdvhy'])) from system.numbers limit 10; +select 1 = multiMatchAny(materialize('mznihnmshftvnmmhnrulizzpslq'), materialize(['nrul', 'mshftvnmmhnr', 'z', 'mhnrulizzps', 'hftvnmmhnrul', 'ihnmshftvnmmhnrulizzp', 'izz', '', 'uli', 'nihnmshftvnmmhnru', 'hnrulizzp', 'nrulizz'])) from system.numbers limit 10; +select 1 = multiMatchAny(materialize('ruqmqrsxrbftvruvahonradau'), materialize(['uqmqrsxrbft', 'ftv', 'tvruvahonrad', 'mqrsxrbftvruvahon', 'rbftvruvah', 'qrsxrbftvru', 'o', 'ahonradau', 'a', 'ft', '', 'u', 'rsxrbftvruvahonradau', 'ruvahon', 'bftvruvahonradau', 'qrsxrbftvru', 't', 'vahonrada', 'vruvahonradau', 'onra'])) from system.numbers limit 10; +select 1 = multiMatchAny(materialize('gpsevxtcoeexrltyzduyidmtzxf'), materialize(['exrltyzduyid', 'vxtcoeexrltyz', 'xr', 'ltyzduyidmt', 'yzduy', 'exr', 'coeexrltyzduy', 'coeexrltyzduy', 'rlty', 'rltyzduyidm', 'exrltyz', 'xtcoeexrlty', 'vxtcoeexrltyzduyidm', '', 'coeexrl', 'sevxtcoeexrltyzdu', 'dmt', ''])) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('dyhycfhzyewaikgursyxfkuv'), materialize(['sktnofpugrmyxmbizzrivmhn', 'fhlgadpoqcvktbfzncxbllvwutdawmw', 'eewzjpcgzrqmltbgmhafwlwqb', 'tpogbkyj', 'rtllntxjgkzs', 'mirbvsqexscnzglogigbujgdwjvcv', 'iktwpgjsakemewmahgqza', 'xgfvzkvqgiuoihjjnxwwpznxhz', 'nxaumpaknreklbwynvxdsmatjekdlxvklh', 'zadzwqhgfxqllihuudozxeixyokhny', 'tdqpgfpzexlkslodps', 'slztannufxaabqfcjyfquafgfhfb', 'xvjldhfuwurvkb', 'aecv', 'uycfsughpikqsbcmwvqygdyexkcykhbnau', 'jr'])) from system.numbers limit 10; +select 1 = multiMatchAny(materialize('vbcsettndwuntnruiyclvvwoo'), materialize(['dwuntnru', '', 'ttndwuntnruiyclvv', 'ntnr', 'nruiyclvvw', 'wo', '', 'bcsettndwuntnruiycl', 'yc', 'untnruiyclvvw', 'csettndwuntnr', 'ntnruiyclvvwo'])) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('pqqnugshlczcuxhpjxjbcnro'), materialize(['dpeedqy', 'rtsc', 'jdgla', 'qkgudqjiyzvlvsj', 'xmfxawhijgxxtydbd', 'ebgzazqthb', 'wyrjhvhwzhmpybnylirrn', 'iviqbyuclayqketooztwegtkgwnsezfl', 'bhvidy', 'hijctxxweboq', 't', 'osnzfbziidteiaifgaanm'])) from system.numbers limit 10; +select 1 = multiMatchAny(materialize('loqchlxspwuvvccucskuytr'), materialize(['', 'k', 'qchlxspwu', 'u', 'hlxspwuvv', 'wuvvccucsku', 'vcc', 'uyt', 'uvv', 'spwu', 'ytr', 'wuvvccucs', 'xspwuv', 'lxspwuvvccuc', 'spwuvvccu', 'oqchlxspwuvvccucskuy'])) from system.numbers limit 10; +select 1 = multiMatchAny(materialize('pjjyzupzwllshlnatiujmwvaofr'), materialize(['lnatiujmwvao', '', 'zupzwllsh', 'nati', 'wllshl', 'hlnatiujmwv', 'mwvao', 'shlnat', 'ati', 'wllshlnatiujmwvao', 'wllshlnatiujmwvaofr', 'nat'])) from system.numbers limit 10; +select 1 = multiMatchAny(materialize('iketunkleyaqaxdlocci'), materialize(['nkleyaqaxd', 'etunkleyaq', 'yaqaxdlocci', 'tunkleyaq', 'eyaqaxdlocc', 'leyaq', 'nkleyaqaxdl', 'tunkleya', 'kleyaqa', 'etunkleya', 'leyaqa', 'dlo', 'yaqa', 'leyaqaxd', 'etunkleyaq', ''])) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('drqianqtangmgbdwruvblkqd'), materialize(['wusajejyucamkyl', 'wsgibljugzrpkniliy', 'lhwqqiuafwffyersqjgjvvvfurx', 'jfokpzzxfdonelorqu', 'ccwkpcgac', 'jmyulqpndkmzbfztobwtm', 'rwrgfkccgxht', 'ggldjecrgbngkonphtcxrkcviujihidjx', 'spwweavbiokizv', 'lv', 'krb', 'vstnhvkbwlqbconaxgbfobqky', 'pvxwdc', 'thrl', 'ahsblffdveamceonqwrbeyxzccmux', 'yozji', 'oejtaxwmeovtqtz', 'zsnzznvqpxdvdxhznxrjn', 'hse', 'kcmkrccxmljzizracxwmpoaggywhdfpxkq'])) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('yasnpckniistxcejowfijjsvkdajz'), materialize(['slkpxhtsmrtvtm', 'crsbq', 'rdeshtxbfrlfwpsqojassxmvlfbzefldavmgme', 'ipetilcbpsfroefkjirquciwtxhrimbmwnlyv', 'knjpwkmdwbvdbapuyqbtsw', 'horueidziztxovqhsicnklmharuxhtgrsr', 'ofohrgpz', 'oneqnwyevbaqsonrcpmxcynflojmsnix', 'shg', 'nglqzczevgevwawdfperpeytuodjlf'])) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('ueptpscfgxhplwsueckkxs'), materialize(['ohhygchclbpcdwmftperprn', 'dvpjdqmqckekndvcerqrpkxen', 'lohhvarnmyi', 'zppd', 'qmqxgfewitsunbuhffozcpjtc', 'hsjbioisycsrawktqssjovkmltxodjgv', 'dbzuunwbkrtosyvctdujqtvaawfnvuq', 'gupbvpqthqxae', 'abjdmijaaiasnccgxttmqdsz', 'uccyumqoyqe', 'kxxliepyzlc', 'wbqcqtbyyjbqcgdbpkmzugksmcxhvr', 'piedxm', 'uncpphzoif', 'exkdankwck', 'qeitzozdrqopsergzr', 'hesgrhaftgesnzflrrtjdobxhbepjoas', 'wfpexx'])) from system.numbers limit 10; +select 1 = multiMatchAny(materialize('ldrzgttlqaphekkkdukgngl'), materialize(['gttlqaphekkkdukgn', 'ekkkd', 'gttlqaphe', 'qaphek', 'h', 'kdu', 'he', 'phek', '', 'drzgttlqaphekkkd'])) from system.numbers limit 10; +select 1 = multiMatchAny(materialize('ololo'), materialize(['ololo', 'ololo', 'ololo'])); +SELECT 1 = multiMatchAny(materialize('khljxzxlpcrxpkrfybbfk'), materialize(['k'])); + +select 1 = multiMatchAny(materialize(''), materialize([''])); +select 0 = multiMatchAny(materialize(''), materialize(['some string'])); +select 1 = multiMatchAny(materialize('abc'), materialize([''])); +select 1 = multiMatchAny(materialize('abc'), materialize([''])) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('abc'), materialize([]::Array(String))) from system.numbers limit 5; + +select 0 = multiMatchAny(materialize('abc'), materialize(['defgh'])); +select 0 = multiMatchAny(materialize('abc'), materialize(['defg'])); +select 0 = multiMatchAny(materialize('abc'), materialize(['def'])); +select 0 = multiMatchAny(materialize('abc'), materialize(['de'])); +select 0 = multiMatchAny(materialize('abc'), materialize(['d'])); + + +select 1 = multiMatchAny(materialize('abc'), materialize(['b'])) from system.numbers limit 10; +select 1 = multiMatchAny(materialize('abc'), materialize(['bc'])) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('abc'), materialize(['bcde'])) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('abc'), materialize(['bcdef'])) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('abc'), materialize(['bcdefg'])) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('abc'), materialize(['bcdefgh'])) from system.numbers limit 10; + + +select 0 = multiMatchAny(materialize('abc'), materialize(['abcdefg'])) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('abc'), materialize(['abcdef'])) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('abc'), materialize(['abcde'])) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('abc'), materialize(['abcd'])) from system.numbers limit 10; +select 1 = multiMatchAny(materialize('abc'), materialize(['abc'])) from system.numbers limit 10; +select 1 = multiMatchAny(materialize('abc'), materialize(['ab'])) from system.numbers limit 10; +select 1 = multiMatchAny(materialize('abc'), materialize(['a'])) from system.numbers limit 10; + +select 1 = multiMatchAny(materialize('abcd'), materialize(['c'])) from system.numbers limit 10; +select 1 = multiMatchAny(materialize('abcd'), materialize(['cd'])) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('abcd'), materialize(['cde'])) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('abcd'), materialize(['cdef'])) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('abcd'), materialize(['cdefg'])) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('abcd'), materialize(['cdefgh'])) from system.numbers limit 10; + +select 0 = multiMatchAny(materialize('abc'), materialize(['defgh'])) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('abc'), materialize(['defg'])) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('abc'), materialize(['def'])) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('abc'), materialize(['de'])) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('abc'), materialize(['d'])) from system.numbers limit 10; + +select 1 = multiMatchAny(materialize('abc'), materialize(['...'])) from system.numbers limit 10; +select 1 = multiMatchAny(materialize('a\nbc'), materialize(['a?bc'])) from system.numbers limit 10; +select 1 = multiMatchAny(materialize('a\nbc'), materialize(['a.bc'])) from system.numbers limit 10; +select 1 = multiMatchAny(materialize('a\0bc'), materialize(['a?bc'])) from system.numbers limit 10; +select 1 = multiMatchAny(materialize('a\0bc'), materialize(['a.bc'])) from system.numbers limit 10; +select 1 = multiMatchAny(materialize('abcdef'), materialize(['a.....'])) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('abcdef'), materialize(['a......'])) from system.numbers limit 10; +select 1 = multiMatchAny(materialize('abcdef'), materialize(['a......', 'a.....'])) from system.numbers limit 10; +select 0 = multiMatchAny(materialize('aaaa'), materialize(['.*aa.*aaa.*', 'aaaaaa{2}', '\(aa\){3}'])) from system.numbers limit 10; +select 1 = multiMatchAny(materialize('abc'), materialize(['a\0d'])) from system.numbers limit 10; + +select 0 = multiMatchAnyIndex(materialize('gogleuedeuniangoogle'), materialize([]::Array(String))) from system.numbers limit 5; +select 1 = multiMatchAnyIndex(materialize('gogleuedeuniangoogle'), materialize(['google', 'unian1'])) from system.numbers limit 10; +select 2 = multiMatchAnyIndex(materialize('gogleuedeuniangoogle'), materialize(['google1', 'unian'])) from system.numbers limit 10; +select 0 != multiMatchAnyIndex(materialize('gogleuedeuniangoogle'), materialize(['.*goo.*', '.*yan.*'])) from system.numbers limit 10; +select 5 = multiMatchAnyIndex(materialize('vladizlvav dabe don\'t heart me no more'), materialize(['what', 'is', 'love', 'baby', 'no mo??', 'dont', 'h.rt me'])) from system.numbers limit 10; + +SELECT multiMatchAny(materialize('/odezhda-dlya-bega/'), materialize(['/odezhda-dlya-bega/', 'kurtki-i-vetrovki-dlya-bega', 'futbolki-i-mayki-dlya-bega'])); +SELECT 1 = multiMatchAny(materialize('фабрикант'), materialize(['f[ae]b[ei]rl', 'ф[иаэе]б[еэи][рпл]', 'афиукд', 'a[ft],th', '^ф[аиеэ]?б?[еэи]?$', 'берлик', 'fab', 'фа[беьв]+е?[рлко]'])); + +-- All indices tests +SELECT [] = multiMatchAllIndices(materialize('Butterbrot!'), materialize([]::Array(String))) from system.numbers limit 5; +SELECT [1, 2] = arraySort(multiMatchAllIndices(materialize('gogleuedeuniangoogle'), materialize(['.*goo.*', '.*yan.*']))) from system.numbers limit 5; +SELECT [1, 3] = arraySort(multiMatchAllIndices(materialize('gogleuedeuniangoogle'), materialize(['.*goo.*', 'neverexisted', '.*yan.*']))) from system.numbers limit 5; +SELECT [] = multiMatchAllIndices(materialize('gogleuedeuniangoogle'), materialize(['neverexisted', 'anotherone', 'andanotherone'])) from system.numbers limit 5; +SELECT [1, 2, 3, 11] = arraySort(multiMatchAllIndices(materialize('фабрикант'), materialize(['', 'рикан', 'а', 'f[ae]b[ei]rl', 'ф[иаэе]б[еэи][рпл]', 'афиукд', 'a[ft],th', '^ф[аиеэ]?б?[еэи]?$', 'берлик', 'fab', 'фа[беьв]+е?[рлко]']))); +SELECT [1] = multiMatchAllIndices(materialize('/odezhda-dlya-bega/'), materialize(['/odezhda-dlya-bega/', 'kurtki-i-vetrovki-dlya-bega', 'futbolki-i-mayki-dlya-bega'])); +SELECT [] = multiMatchAllIndices(materialize('aaaa'), materialize(['.*aa.*aaa.*', 'aaaaaa{2}', '\(aa\){3}'])); diff --git a/parser/testdata/00926_zookeeper_adaptive_index_granularity_replicated_merge_tree_long/ast.json b/parser/testdata/00926_zookeeper_adaptive_index_granularity_replicated_merge_tree_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00926_zookeeper_adaptive_index_granularity_replicated_merge_tree_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00926_zookeeper_adaptive_index_granularity_replicated_merge_tree_long/metadata.json b/parser/testdata/00926_zookeeper_adaptive_index_granularity_replicated_merge_tree_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00926_zookeeper_adaptive_index_granularity_replicated_merge_tree_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00926_zookeeper_adaptive_index_granularity_replicated_merge_tree_long/query.sql b/parser/testdata/00926_zookeeper_adaptive_index_granularity_replicated_merge_tree_long/query.sql new file mode 100644 index 000000000..123e81642 --- /dev/null +++ b/parser/testdata/00926_zookeeper_adaptive_index_granularity_replicated_merge_tree_long/query.sql @@ -0,0 +1,216 @@ +-- Tags: long, replica, no-random-merge-tree-settings + +----- Group of very similar simple tests ------ +select '----HORIZONTAL MERGE TESTS----'; +DROP TABLE IF EXISTS zero_rows_per_granule1; +DROP TABLE IF EXISTS zero_rows_per_granule2; + +CREATE TABLE zero_rows_per_granule1 ( + p Date, + k UInt64, + v1 UInt64, + v2 Int64 +) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00926/zero_rows_in_granule', '1') PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 20, min_index_granularity_bytes = 10, write_final_mark = 0; + +CREATE TABLE zero_rows_per_granule2 ( + p Date, + k UInt64, + v1 UInt64, + v2 Int64 +) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00926/zero_rows_in_granule', '2') PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 20, min_index_granularity_bytes = 10, write_final_mark = 0; + +INSERT INTO zero_rows_per_granule1 (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); + +SYSTEM SYNC REPLICA zero_rows_per_granule2; + +SELECT 'Replica synced'; + +SELECT COUNT(*) FROM zero_rows_per_granule1; + +SELECT distinct(marks) from system.parts WHERE table = 'zero_rows_per_granule1' and database=currentDatabase() and active=1; + +SELECT COUNT(*) FROM zero_rows_per_granule2; + +SELECT distinct(marks) from system.parts WHERE table = 'zero_rows_per_granule2' and database=currentDatabase() and active=1; + +INSERT INTO zero_rows_per_granule2 (p, k, v1, v2) VALUES ('2018-05-15', 5, 1000, 2000), ('2018-05-16', 6, 3000, 4000), ('2018-05-17', 7, 5000, 6000), ('2018-05-19', 8, 7000, 8000); + +SELECT distinct(marks) from system.parts WHERE table = 'zero_rows_per_granule2' and database=currentDatabase() and active=1; + +SELECT distinct(marks) from system.parts WHERE table = 'zero_rows_per_granule1' and database=currentDatabase() and active=1; + +SELECT sleep(0.7) Format Null; + +OPTIMIZE TABLE zero_rows_per_granule2 FINAL; + +SELECT 'Parts optimized'; + +SYSTEM SYNC REPLICA zero_rows_per_granule1; + +SELECT 'Replica synced'; + +SELECT COUNT(*) FROM zero_rows_per_granule2; + +SELECT distinct(marks) from system.parts WHERE table = 'zero_rows_per_granule2' and database=currentDatabase() and active=1; + +SELECT COUNT(*) FROM zero_rows_per_granule1; + +SELECT distinct(marks) from system.parts WHERE table = 'zero_rows_per_granule1' and database=currentDatabase() and active=1; + +DROP TABLE IF EXISTS zero_rows_per_granule1; +DROP TABLE IF EXISTS zero_rows_per_granule2; + +SELECT '-----'; + +DROP TABLE IF EXISTS four_rows_per_granule1; +DROP TABLE IF EXISTS four_rows_per_granule2; + +CREATE TABLE four_rows_per_granule1 ( + p Date, + k UInt64, + v1 UInt64, + v2 Int64 +) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00926/four_rows_in_granule', '1') PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 110, min_index_granularity_bytes = 100, write_final_mark = 0; + +CREATE TABLE four_rows_per_granule2 ( + p Date, + k UInt64, + v1 UInt64, + v2 Int64 +) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00926/four_rows_in_granule', '2') PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 110, min_index_granularity_bytes = 100 ,write_final_mark = 0; + +INSERT INTO four_rows_per_granule1 (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); + +SELECT COUNT(*) FROM four_rows_per_granule1; + +SELECT distinct(marks) from system.parts WHERE table = 'four_rows_per_granule1' and database=currentDatabase() and active=1; + +SYSTEM SYNC REPLICA four_rows_per_granule2; + +SELECT 'Replica synced'; + +SELECT COUNT(*) FROM four_rows_per_granule2; + +SELECT distinct(marks) from system.parts WHERE table = 'four_rows_per_granule2' and database=currentDatabase() and active=1; + +DETACH TABLE four_rows_per_granule2; +ATTACH TABLE four_rows_per_granule2; + +SELECT 'Table attached'; + +INSERT INTO four_rows_per_granule2 (p, k, v1, v2) VALUES ('2018-05-15', 5, 1000, 2000), ('2018-05-16', 6, 3000, 4000), ('2018-05-17', 7, 5000, 6000), ('2018-05-19', 8, 7000, 8000); + +SELECT distinct(marks) from system.parts WHERE table = 'four_rows_per_granule2' and database=currentDatabase() and active=1; + +SELECT distinct(marks) from system.parts WHERE table = 'four_rows_per_granule1' and database=currentDatabase() and active=1; + +SELECT sleep(0.7) Format Null; + +OPTIMIZE TABLE four_rows_per_granule2 FINAL; +SELECT 'Parts optimized'; + +DETACH TABLE four_rows_per_granule2; + +ATTACH TABLE four_rows_per_granule2; + +SELECT COUNT(*) FROM four_rows_per_granule2; + +--SELECT distinct(marks) from system.parts WHERE table = 'four_rows_per_granule2' and database=currentDatabase() and active=1; + +SYSTEM SYNC REPLICA four_rows_per_granule1; +SELECT 'Replica synced'; + +SELECT COUNT(*) FROM four_rows_per_granule1; + +--SELECT distinct(marks) from system.parts WHERE table = 'four_rows_per_granule1' and database=currentDatabase() and active=1; + +DROP TABLE IF EXISTS four_rows_per_granule1; +DROP TABLE IF EXISTS four_rows_per_granule2; + +SELECT '-----'; + +DROP TABLE IF EXISTS adaptive_granularity_alter1; +DROP TABLE IF EXISTS adaptive_granularity_alter2; + +CREATE TABLE adaptive_granularity_alter1 ( + p Date, + k UInt64, + v1 UInt64, + v2 Int64 +) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00926/adaptive_granularity_alter', '1') PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 110, min_index_granularity_bytes = 100, write_final_mark = 0; + +CREATE TABLE adaptive_granularity_alter2 ( + p Date, + k UInt64, + v1 UInt64, + v2 Int64 +) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_00926/adaptive_granularity_alter', '2') PARTITION BY toYYYYMM(p) ORDER BY k SETTINGS index_granularity_bytes = 110, min_index_granularity_bytes = 100, write_final_mark = 0; + +INSERT INTO adaptive_granularity_alter1 (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); + +SELECT COUNT(*) FROM adaptive_granularity_alter1; + +SELECT distinct(marks) from system.parts WHERE table = 'adaptive_granularity_alter1' and database=currentDatabase() and active=1; + +SYSTEM SYNC REPLICA adaptive_granularity_alter2; +SELECT 'Replica synced'; + +SELECT COUNT(*) FROM adaptive_granularity_alter2; + +SELECT distinct(marks) from system.parts WHERE table = 'adaptive_granularity_alter2' and database=currentDatabase() and active=1; + +ALTER TABLE adaptive_granularity_alter2 MODIFY COLUMN v1 Int16; + +DETACH TABLE adaptive_granularity_alter2; + +ATTACH TABLE adaptive_granularity_alter2; + +SELECT COUNT(*) FROM adaptive_granularity_alter2; + +SELECT distinct(marks) from system.parts WHERE table = 'adaptive_granularity_alter2' and database=currentDatabase() and active=1; + +SYSTEM SYNC REPLICA adaptive_granularity_alter1; +SELECT 'Replica synced'; + +SELECT COUNT(*) FROM adaptive_granularity_alter1; + +SELECT distinct(marks) from system.parts WHERE table = 'adaptive_granularity_alter1' and database=currentDatabase() and active=1; + +INSERT INTO adaptive_granularity_alter1 (p, k, v1, v2) VALUES ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 5, 3000, 4000), ('2018-05-17', 6, 5000, 6000), ('2018-05-19', 42, 42, 42); + +SELECT COUNT(*) FROM adaptive_granularity_alter1; + +SELECT distinct(marks) from system.parts WHERE table = 'adaptive_granularity_alter1' and database=currentDatabase() and active=1; + +SYSTEM SYNC REPLICA adaptive_granularity_alter2; + +SELECT COUNT(*) FROM adaptive_granularity_alter2; + +SELECT distinct(marks) from system.parts WHERE table = 'adaptive_granularity_alter2' and database=currentDatabase() and active=1; + +ALTER TABLE adaptive_granularity_alter1 MODIFY COLUMN v2 String; + +DETACH TABLE adaptive_granularity_alter1; + +ATTACH TABLE adaptive_granularity_alter1; + +INSERT INTO adaptive_granularity_alter1 (p, k, v1, v2) VALUES ('2018-05-15', 100, 1000, 'aaaa'), ('2018-05-16', 101, 3000, 'bbbb'), ('2018-05-17', 102, 5000, 'cccc'), ('2018-05-19', 103, 7000, 'dddd'); + +SELECT sleep(0.7) Format Null; + +OPTIMIZE TABLE adaptive_granularity_alter1 FINAL; +SELECT 'Parts optimized'; + +SELECT k, v2 FROM adaptive_granularity_alter1 WHERE k >= 100 OR k = 42 ORDER BY k; + +SELECT sum(marks) from system.parts WHERE table = 'adaptive_granularity_alter1' and database=currentDatabase() and active=1; + +SYSTEM SYNC REPLICA adaptive_granularity_alter2; +SELECT 'Replica synced'; + +SELECT k, v2 FROM adaptive_granularity_alter2 WHERE k >= 100 OR k = 42 ORDER BY k; + +SELECT sum(marks) from system.parts WHERE table = 'adaptive_granularity_alter2' and database=currentDatabase() and active=1; + +DROP TABLE IF EXISTS adaptive_granularity_alter1; +DROP TABLE IF EXISTS adaptive_granularity_alter2; diff --git a/parser/testdata/00927_asof_join_correct_bt/ast.json b/parser/testdata/00927_asof_join_correct_bt/ast.json new file mode 100644 index 000000000..1436750f6 --- /dev/null +++ b/parser/testdata/00927_asof_join_correct_bt/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery A (children 1)" + }, + { + "explain": " Identifier A" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001322189, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/00927_asof_join_correct_bt/metadata.json b/parser/testdata/00927_asof_join_correct_bt/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00927_asof_join_correct_bt/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00927_asof_join_correct_bt/query.sql b/parser/testdata/00927_asof_join_correct_bt/query.sql new file mode 100644 index 000000000..d796b62d3 --- /dev/null +++ b/parser/testdata/00927_asof_join_correct_bt/query.sql @@ -0,0 +1,32 @@ +DROP TABLE IF EXISTS A; +DROP TABLE IF EXISTS B; + +CREATE TABLE A(k UInt32, t UInt32, a UInt64) ENGINE = MergeTree() ORDER BY (k, t); +INSERT INTO A(k,t,a) VALUES (1,101,1),(1,102,2),(1,103,3),(1,104,4),(1,105,5); + +CREATE TABLE B1(k UInt32, t UInt32, b UInt64) ENGINE = MergeTree() ORDER BY (k, t); +INSERT INTO B1(k,t,b) VALUES (1,102,2), (1,104,4); + +CREATE TABLE B2(t UInt32, k UInt32, b UInt64) ENGINE = MergeTree() ORDER BY (k, t); +INSERT INTO B2(k,t,b) VALUES (1,102,2), (1,104,4); + +CREATE TABLE B3(k UInt32, b UInt64, t UInt32) ENGINE = MergeTree() ORDER BY (k, t); +INSERT INTO B3(k,t,b) VALUES (1,102,2), (1,104,4); + +-- { echoOn } +SELECT A.k, A.t, A.a, B.b, B.t, B.k FROM A ASOF LEFT JOIN B1 B USING(k,t) ORDER BY (A.k, A.t); +SELECT A.k, A.t, A.a, B.b, B.t, B.k FROM A ASOF LEFT JOIN B2 B USING(k,t) ORDER BY (A.k, A.t); +SELECT A.k, A.t, A.a, B.b, B.t, B.k FROM A ASOF LEFT JOIN B3 B USING(k,t) ORDER BY (A.k, A.t); + +SET join_algorithm = 'full_sorting_merge'; +SELECT A.k, A.t, A.a, B.b, B.t, B.k FROM A ASOF LEFT JOIN B1 B USING(k,t) ORDER BY (A.k, A.t); +SELECT A.k, A.t, A.a, B.b, B.t, B.k FROM A ASOF LEFT JOIN B2 B USING(k,t) ORDER BY (A.k, A.t); +SELECT A.k, A.t, A.a, B.b, B.t, B.k FROM A ASOF LEFT JOIN B3 B USING(k,t) ORDER BY (A.k, A.t); + +-- { echoOff } + +DROP TABLE B1; +DROP TABLE B2; +DROP TABLE B3; + +DROP TABLE A; diff --git a/parser/testdata/00927_asof_join_long/ast.json b/parser/testdata/00927_asof_join_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00927_asof_join_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00927_asof_join_long/metadata.json b/parser/testdata/00927_asof_join_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00927_asof_join_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00927_asof_join_long/query.sql b/parser/testdata/00927_asof_join_long/query.sql new file mode 100644 index 000000000..f2c304a1b --- /dev/null +++ b/parser/testdata/00927_asof_join_long/query.sql @@ -0,0 +1,29 @@ +-- Tags: long, no-asan, no-msan, no-tsan + +DROP TABLE IF EXISTS tvs; + +-- to use different algorithms for in subquery +SET enable_analyzer = 1; + +CREATE TABLE tvs(k UInt32, t UInt32, tv UInt64) ENGINE = Memory; +INSERT INTO tvs(k,t,tv) SELECT k, t, t +FROM (SELECT toUInt32(number) AS k FROM numbers(1000)) keys +CROSS JOIN (SELECT toUInt32(number * 3) as t FROM numbers(10000)) tv_times +SETTINGS join_algorithm = 'hash'; + +SELECT SUM(trades.price - tvs.tv) FROM +(SELECT k, t, t as price + FROM (SELECT toUInt32(number) AS k FROM numbers(1000)) keys + CROSS JOIN (SELECT toUInt32(number * 10) AS t FROM numbers(3000)) trade_times + SETTINGS join_algorithm = 'hash') trades +ASOF LEFT JOIN tvs USING(k,t); + +SELECT SUM(trades.price - tvs.tv) FROM +(SELECT k, t, t as price + FROM (SELECT toUInt32(number) AS k FROM numbers(1000)) keys + CROSS JOIN (SELECT toUInt32(number * 10) AS t FROM numbers(3000)) trade_times + SETTINGS join_algorithm = 'hash') trades +ASOF LEFT JOIN tvs USING(k,t) +SETTINGS join_algorithm = 'full_sorting_merge'; + +DROP TABLE tvs; diff --git a/parser/testdata/00927_asof_join_noninclusive/ast.json b/parser/testdata/00927_asof_join_noninclusive/ast.json new file mode 100644 index 000000000..fbf9ffac6 --- /dev/null +++ b/parser/testdata/00927_asof_join_noninclusive/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery A (children 1)" + }, + { + "explain": " Identifier A" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001468212, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/00927_asof_join_noninclusive/metadata.json b/parser/testdata/00927_asof_join_noninclusive/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00927_asof_join_noninclusive/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00927_asof_join_noninclusive/query.sql b/parser/testdata/00927_asof_join_noninclusive/query.sql new file mode 100644 index 000000000..3cc99df44 --- /dev/null +++ b/parser/testdata/00927_asof_join_noninclusive/query.sql @@ -0,0 +1,23 @@ +DROP TABLE IF EXISTS A; +DROP TABLE IF EXISTS B; + +CREATE TABLE A(k UInt32, t DateTime, a Float64) ENGINE = MergeTree() ORDER BY (k, t); +INSERT INTO A(k,t,a) VALUES (1,1,1),(1,2,2),(1,3,3),(1,4,4),(1,5,5); -- multiple joined values +INSERT INTO A(k,t,a) VALUES (2,1,1),(2,2,2),(2,3,3),(2,4,4),(2,5,5); -- one joined value +INSERT INTO A(k,t,a) VALUES (3,1,1),(3,2,2),(3,3,3),(3,4,4),(3,5,5); -- no joined values + +CREATE TABLE B(k UInt32, t DateTime, b Float64) ENGINE = MergeTree() ORDER BY (k, t); +INSERT INTO B(k,t,b) VALUES (1,2,2),(1,4,4); +INSERT INTO B(k,t,b) VALUES (2,3,3); + +SELECT A.k, toString(A.t, 'UTC'), A.a, B.b, toString(B.t, 'UTC'), B.k FROM A ASOF LEFT JOIN B USING(k,t) ORDER BY (A.k, A.t); +SELECT A.k, toString(A.t, 'UTC'), A.a, B.b, toString(B.t, 'UTC'), B.k FROM A ASOF INNER JOIN B ON A.k == B.k AND A.t >= B.t ORDER BY (A.k, A.t); +SELECT A.k, toString(A.t, 'UTC'), A.a, B.b, toString(B.t, 'UTC'), B.k FROM A ASOF JOIN B USING(k,t) ORDER BY (A.k, A.t); + +SET join_algorithm = 'full_sorting_merge'; +SELECT A.k, toString(A.t, 'UTC'), A.a, B.b, toString(B.t, 'UTC'), B.k FROM A ASOF LEFT JOIN B USING(k,t) ORDER BY (A.k, A.t); +SELECT A.k, toString(A.t, 'UTC'), A.a, B.b, toString(B.t, 'UTC'), B.k FROM A ASOF INNER JOIN B ON A.k == B.k AND A.t >= B.t ORDER BY (A.k, A.t); +SELECT A.k, toString(A.t, 'UTC'), A.a, B.b, toString(B.t, 'UTC'), B.k FROM A ASOF JOIN B USING(k,t) ORDER BY (A.k, A.t); + +DROP TABLE A; +DROP TABLE B; diff --git a/parser/testdata/00927_asof_joins/ast.json b/parser/testdata/00927_asof_joins/ast.json new file mode 100644 index 000000000..c150cd242 --- /dev/null +++ b/parser/testdata/00927_asof_joins/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery md (children 1)" + }, + { + "explain": " Identifier md" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001391423, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/00927_asof_joins/metadata.json b/parser/testdata/00927_asof_joins/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00927_asof_joins/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00927_asof_joins/query.sql b/parser/testdata/00927_asof_joins/query.sql new file mode 100644 index 000000000..2186e88e0 --- /dev/null +++ b/parser/testdata/00927_asof_joins/query.sql @@ -0,0 +1,21 @@ +DROP TABLE IF EXISTS md; +DROP TABLE IF EXISTS tv; + +CREATE TABLE md(key UInt32, t DateTime, bid Float64, ask Float64) ENGINE = MergeTree() ORDER BY (key, t); +INSERT INTO md(key,t,bid,ask) VALUES (1,20,7,8),(1,5,1,2),(1,10,11,12),(1,15,5,6); +INSERT INTO md(key,t,bid,ask) VALUES (2,20,17,18),(2,5,11,12),(2,10,21,22),(2,15,5,6); + +CREATE TABLE tv(key UInt32, t DateTime, tv Float64) ENGINE = MergeTree() ORDER BY (key, t); +INSERT INTO tv(key,t,tv) VALUES (1,5,1.5),(1,6,1.51),(1,10,11.5),(1,11,11.51),(1,15,5.5),(1,16,5.6),(1,20,7.5); +INSERT INTO tv(key,t,tv) VALUES (2,5,2.5),(2,6,2.51),(2,10,12.5),(2,11,12.51),(2,15,6.5),(2,16,5.6),(2,20,8.5); + +SELECT tv.key, toString(tv.t, 'UTC'), md.bid, tv.tv, md.ask FROM tv ASOF LEFT JOIN md USING(key,t) ORDER BY (tv.key, tv.t) +; + +SELECT '-'; + +SELECT tv.key, toString(tv.t, 'UTC'), md.bid, tv.tv, md.ask FROM tv ASOF LEFT JOIN md USING(key,t) ORDER BY (tv.key, tv.t) +SETTINGS join_algorithm = 'full_sorting_merge'; + +DROP TABLE md; +DROP TABLE tv; diff --git a/parser/testdata/00927_disable_hyperscan/ast.json b/parser/testdata/00927_disable_hyperscan/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00927_disable_hyperscan/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00927_disable_hyperscan/metadata.json b/parser/testdata/00927_disable_hyperscan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00927_disable_hyperscan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00927_disable_hyperscan/query.sql b/parser/testdata/00927_disable_hyperscan/query.sql new file mode 100644 index 000000000..24ec7a35a --- /dev/null +++ b/parser/testdata/00927_disable_hyperscan/query.sql @@ -0,0 +1,16 @@ +-- Tags: no-fasttest + +SET allow_hyperscan = 1; + +SELECT multiMatchAny(arrayJoin(['hello', 'world', 'hellllllllo', 'wororld', 'abc']), ['hel+o', 'w(or)*ld']); +SELECT multiMatchAny(arrayJoin(['hello', 'world', 'hellllllllo', 'wororld', 'abc']), materialize(['hel+o', 'w(or)*ld'])); + +SET allow_hyperscan = 0; + +SELECT multiMatchAny(arrayJoin(['hello', 'world', 'hellllllllo', 'wororld', 'abc']), ['hel+o', 'w(or)*ld']); -- { serverError FUNCTION_NOT_ALLOWED } +SELECT multiMatchAny(arrayJoin(['hello', 'world', 'hellllllllo', 'wororld', 'abc']), materialize(['hel+o', 'w(or)*ld'])); -- { serverError FUNCTION_NOT_ALLOWED } + +SELECT multiMatchAllIndices(arrayJoin(['hello', 'world', 'hellllllllo', 'wororld', 'abc']), ['hel+o', 'w(or)*ld']); -- { serverError FUNCTION_NOT_ALLOWED } +SELECT multiMatchAllIndices(arrayJoin(['hello', 'world', 'hellllllllo', 'wororld', 'abc']), materialize(['hel+o', 'w(or)*ld'])); -- { serverError FUNCTION_NOT_ALLOWED } + +SELECT multiSearchAny(arrayJoin(['hello', 'world', 'hello, world', 'abc']), ['hello', 'world']); diff --git a/parser/testdata/00928_multi_match_constant_constant/ast.json b/parser/testdata/00928_multi_match_constant_constant/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00928_multi_match_constant_constant/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00928_multi_match_constant_constant/metadata.json b/parser/testdata/00928_multi_match_constant_constant/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00928_multi_match_constant_constant/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00928_multi_match_constant_constant/query.sql b/parser/testdata/00928_multi_match_constant_constant/query.sql new file mode 100644 index 000000000..1cfc90927 --- /dev/null +++ b/parser/testdata/00928_multi_match_constant_constant/query.sql @@ -0,0 +1,5 @@ +-- Tags: no-fasttest +-- no-fasttest: Requires vectorscan +SELECT multiMatchAny('goodbye', ['^hello[, ]+world$', 'go+d *bye', 'w(or)+ld']); +SELECT multiMatchAnyIndex('goodbye', ['^hello[, ]+world$', 'go+d *bye', 'w(or)+ld']); +SELECT multiSearchAllPositions('hello, world', ['hello', 'world']); diff --git a/parser/testdata/00929_multi_match_edit_distance/ast.json b/parser/testdata/00929_multi_match_edit_distance/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00929_multi_match_edit_distance/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00929_multi_match_edit_distance/metadata.json b/parser/testdata/00929_multi_match_edit_distance/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00929_multi_match_edit_distance/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00929_multi_match_edit_distance/query.sql b/parser/testdata/00929_multi_match_edit_distance/query.sql new file mode 100644 index 000000000..a74a9d716 --- /dev/null +++ b/parser/testdata/00929_multi_match_edit_distance/query.sql @@ -0,0 +1,61 @@ +-- Tags: no-fasttest, use-vectorscan + +SET send_logs_level = 'fatal'; + +SELECT '- const pattern'; + +-- run queries multiple times to test the pattern caching +select multiFuzzyMatchAny('abc', 0, ['a1c']) from system.numbers limit 3; +select multiFuzzyMatchAny('abc', 1, ['a1c']) from system.numbers limit 3; +select multiFuzzyMatchAny('abc', 2, ['a1c']) from system.numbers limit 3; +select multiFuzzyMatchAny('abc', 3, ['a1c']) from system.numbers limit 3; -- { serverError BAD_ARGUMENTS } +select multiFuzzyMatchAny('abc', 4, ['a1c']) from system.numbers limit 3; -- { serverError BAD_ARGUMENTS } + +select multiFuzzyMatchAny('leftabcright', 1, ['a1c']) from system.numbers limit 3; + +select multiFuzzyMatchAny('hello some world', 0, ['^hello.*world$']); +select multiFuzzyMatchAny('hallo some world', 1, ['^hello.*world$']); +select multiFuzzyMatchAny('halo some wrld', 2, ['^hello.*world$']); +select multiFuzzyMatchAny('halo some wrld', 2, ['^hello.*world$', '^halo.*world$']); +select multiFuzzyMatchAny('halo some wrld', 2, ['^halo.*world$', '^hello.*world$']); +select multiFuzzyMatchAny('halo some wrld', 3, ['^hello.*world$']); +select multiFuzzyMatchAny('hello some world', 10, ['^hello.*world$']); -- { serverError BAD_ARGUMENTS } +select multiFuzzyMatchAny('hello some world', -1, ['^hello.*world$']); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select multiFuzzyMatchAny('hello some world', 10000000000, ['^hello.*world$']); -- { serverError ILLEGAL_COLUMN } +select multiFuzzyMatchAny('http://hyperscan_is_nice.de/st', 2, ['http://hyperscan_is_nice.de/(st\\d\\d$|st\\d\\d\\.|st1[0-4]\\d|st150|st\\d$|gl|rz|ch)']); +select multiFuzzyMatchAny('string', 0, ['zorro$', '^tring', 'in$', 'how.*', 'it{2}', 'works']); +select multiFuzzyMatchAny('string', 1, ['zorro$', '^tring', 'ip$', 'how.*', 'it{2}', 'works']); +select multiFuzzyMatchAnyIndex('string', 1, ['zorro$', '^tring', 'ip$', 'how.*', 'it{2}', 'works']); +select multiFuzzyMatchAnyIndex('halo some wrld', 2, ['^hello.*world$', '^halo.*world$']); +select multiFuzzyMatchAnyIndex('halo some wrld', 2, ['^halo.*world$', '^hello.*world$']); +-- +select arraySort(multiFuzzyMatchAllIndices('halo some wrld', 2, ['some random string', '^halo.*world$', '^halo.*world$', '^halo.*world$', '^hallllo.*world$'])); +select multiFuzzyMatchAllIndices('halo some wrld', 2, ['^halllllo.*world$', 'some random string']); + +SELECT '- non-const pattern'; + +select multiFuzzyMatchAny(materialize('abc'), 0, materialize(['a1c'])) from system.numbers limit 3; +select multiFuzzyMatchAny(materialize('abc'), 1, materialize(['a1c'])) from system.numbers limit 3; +select multiFuzzyMatchAny(materialize('abc'), 2, materialize(['a1c'])) from system.numbers limit 3; +select multiFuzzyMatchAny(materialize('abc'), 3, materialize(['a1c'])) from system.numbers limit 3; -- { serverError BAD_ARGUMENTS} +select multiFuzzyMatchAny(materialize('abc'), 4, materialize(['a1c'])) from system.numbers limit 3; -- { serverError BAD_ARGUMENTS} + +select multiFuzzyMatchAny(materialize('leftabcright'), 1, materialize(['a1c'])); + +select multiFuzzyMatchAny(materialize('hello some world'), 0, materialize(['^hello.*world$'])); +select multiFuzzyMatchAny(materialize('hallo some world'), 1, materialize(['^hello.*world$'])); +select multiFuzzyMatchAny(materialize('halo some wrld'), 2, materialize(['^hello.*world$'])); +select multiFuzzyMatchAny(materialize('halo some wrld'), 2, materialize(['^hello.*world$', '^halo.*world$'])); +select multiFuzzyMatchAny(materialize('halo some wrld'), 2, materialize(['^halo.*world$', '^hello.*world$'])); +select multiFuzzyMatchAny(materialize('halo some wrld'), 3, materialize(['^hello.*world$'])); +select multiFuzzyMatchAny(materialize('hello some world'), 10, materialize(['^hello.*world$'])); -- { serverError BAD_ARGUMENTS } +select multiFuzzyMatchAny(materialize('hello some world'), -1, materialize(['^hello.*world$'])); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select multiFuzzyMatchAny(materialize('hello some world'), 10000000000, materialize(['^hello.*world$'])); -- { serverError ILLEGAL_COLUMN } +select multiFuzzyMatchAny(materialize('http://hyperscan_is_nice.de/st'), 2, materialize(['http://hyperscan_is_nice.de/(st\\d\\d$|st\\d\\d\\.|st1[0-4]\\d|st150|st\\d$|gl|rz|ch)'])); +select multiFuzzyMatchAny(materialize('string'), 0, materialize(['zorro$', '^tring', 'in$', 'how.*', 'it{2}', 'works'])); +select multiFuzzyMatchAny(materialize('string'), 1, materialize(['zorro$', '^tring', 'ip$', 'how.*', 'it{2}', 'works'])); +select multiFuzzyMatchAnyIndex(materialize('string'), 1, materialize(['zorro$', '^tring', 'ip$', 'how.*', 'it{2}', 'works'])); +select multiFuzzyMatchAnyIndex(materialize('halo some wrld'), 2, materialize(['^hello.*world$', '^halo.*world$'])); +select multiFuzzyMatchAnyIndex(materialize('halo some wrld'), 2, materialize(['^halo.*world$', '^hello.*world$'])); +select arraySort(multiFuzzyMatchAllIndices(materialize('halo some wrld'), 2, materialize(['some random string', '^halo.*world$', '^halo.*world$', '^halo.*world$', '^hallllo.*world$']))); +select multiFuzzyMatchAllIndices(materialize('halo some wrld'), 2, materialize(['^halllllo.*world$', 'some random string'])); diff --git a/parser/testdata/00930_arrayIntersect/ast.json b/parser/testdata/00930_arrayIntersect/ast.json new file mode 100644 index 000000000..75fe136f8 --- /dev/null +++ b/parser/testdata/00930_arrayIntersect/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery array_intersect (children 1)" + }, + { + "explain": " Identifier array_intersect" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001030341, + "rows_read": 2, + "bytes_read": 82 + } +} diff --git a/parser/testdata/00930_arrayIntersect/metadata.json b/parser/testdata/00930_arrayIntersect/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00930_arrayIntersect/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00930_arrayIntersect/query.sql b/parser/testdata/00930_arrayIntersect/query.sql new file mode 100644 index 000000000..837e13b2c --- /dev/null +++ b/parser/testdata/00930_arrayIntersect/query.sql @@ -0,0 +1,30 @@ +drop table if exists array_intersect; + +create table array_intersect (date Date, arr Array(UInt8)) engine=MergeTree partition by date order by date; + +insert into array_intersect values ('2019-01-01', [1,2,3]); +insert into array_intersect values ('2019-01-01', [1,2]); +insert into array_intersect values ('2019-01-01', [1]); +insert into array_intersect values ('2019-01-01', []); + +select arraySort(arrayIntersect(arr, [1,2])) from array_intersect order by arr; +select arraySort(arrayIntersect(arr, [])) from array_intersect order by arr; +select arraySort(arrayIntersect([], arr)) from array_intersect order by arr; +select arraySort(arrayIntersect([1,2], arr)) from array_intersect order by arr; +select arraySort(arrayIntersect([1,2], [1,2,3,4])) from array_intersect order by arr; +select arraySort(arrayIntersect([], [])) from array_intersect order by arr; + +optimize table array_intersect; + +select arraySort(arrayIntersect(arr, [1,2])) from array_intersect order by arr; +select arraySort(arrayIntersect(arr, [])) from array_intersect order by arr; +select arraySort(arrayIntersect([], arr)) from array_intersect order by arr; +select arraySort(arrayIntersect([1,2], arr)) from array_intersect order by arr; +select arraySort(arrayIntersect([1,2], [1,2,3,4])) from array_intersect order by arr; +select arraySort(arrayIntersect([], [])) from array_intersect order by arr; + +drop table if exists array_intersect; + +select '-'; +select arraySort(arrayIntersect([-100], [156])); +select arraySort(arrayIntersect([1], [257])); diff --git a/parser/testdata/00930_max_partitions_per_insert_block/ast.json b/parser/testdata/00930_max_partitions_per_insert_block/ast.json new file mode 100644 index 000000000..a8aa40b56 --- /dev/null +++ b/parser/testdata/00930_max_partitions_per_insert_block/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery partitions (children 1)" + }, + { + "explain": " Identifier partitions" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001083644, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/00930_max_partitions_per_insert_block/metadata.json b/parser/testdata/00930_max_partitions_per_insert_block/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00930_max_partitions_per_insert_block/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00930_max_partitions_per_insert_block/query.sql b/parser/testdata/00930_max_partitions_per_insert_block/query.sql new file mode 100644 index 000000000..3d45a3e02 --- /dev/null +++ b/parser/testdata/00930_max_partitions_per_insert_block/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS partitions; +CREATE TABLE partitions (x UInt64) ENGINE = MergeTree ORDER BY x PARTITION BY x; + +INSERT INTO partitions SELECT * FROM system.numbers LIMIT 100; +SELECT count() FROM system.parts WHERE database = currentDatabase() AND table = 'partitions'; +INSERT INTO partitions SELECT * FROM system.numbers LIMIT 100; +SELECT count() FROM system.parts WHERE database = currentDatabase() AND table = 'partitions'; + +SET max_partitions_per_insert_block = 1; + +INSERT INTO partitions SELECT * FROM system.numbers LIMIT 1; +INSERT INTO partitions SELECT * FROM system.numbers LIMIT 2; -- { serverError TOO_MANY_PARTS } + +DROP TABLE partitions; diff --git a/parser/testdata/00931_low_cardinality_nullable_aggregate_function_type/ast.json b/parser/testdata/00931_low_cardinality_nullable_aggregate_function_type/ast.json new file mode 100644 index 000000000..74f7f26b1 --- /dev/null +++ b/parser/testdata/00931_low_cardinality_nullable_aggregate_function_type/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery lc (children 1)" + }, + { + "explain": " Identifier lc" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001249718, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/00931_low_cardinality_nullable_aggregate_function_type/metadata.json b/parser/testdata/00931_low_cardinality_nullable_aggregate_function_type/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00931_low_cardinality_nullable_aggregate_function_type/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00931_low_cardinality_nullable_aggregate_function_type/query.sql b/parser/testdata/00931_low_cardinality_nullable_aggregate_function_type/query.sql new file mode 100644 index 000000000..3c4fe033d --- /dev/null +++ b/parser/testdata/00931_low_cardinality_nullable_aggregate_function_type/query.sql @@ -0,0 +1,8 @@ +drop table if exists lc; + +CREATE TABLE lc (`date` Date, `name` LowCardinality(Nullable(String)), `clicks` Nullable(Int32)) ENGINE = MergeTree() ORDER BY date SETTINGS index_granularity = 8192; +INSERT INTO lc SELECT '2019-01-01', null, 0 FROM numbers(1000000); +SELECT date, argMax(name, clicks) FROM lc GROUP BY date; + +drop table if exists lc; + diff --git a/parser/testdata/00931_low_cardinality_read_with_empty_array/ast.json b/parser/testdata/00931_low_cardinality_read_with_empty_array/ast.json new file mode 100644 index 000000000..64aebcf44 --- /dev/null +++ b/parser/testdata/00931_low_cardinality_read_with_empty_array/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery lc_00931 (children 1)" + }, + { + "explain": " Identifier lc_00931" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001022496, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/00931_low_cardinality_read_with_empty_array/metadata.json b/parser/testdata/00931_low_cardinality_read_with_empty_array/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00931_low_cardinality_read_with_empty_array/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00931_low_cardinality_read_with_empty_array/query.sql b/parser/testdata/00931_low_cardinality_read_with_empty_array/query.sql new file mode 100644 index 000000000..257116fe0 --- /dev/null +++ b/parser/testdata/00931_low_cardinality_read_with_empty_array/query.sql @@ -0,0 +1,23 @@ +DROP TABLE IF EXISTS lc_00931; + +CREATE TABLE lc_00931 ( + key UInt64, + value Array(LowCardinality(String))) +ENGINE = MergeTree +ORDER BY key; + +INSERT INTO lc_00931 SELECT number, +if (number < 10000 OR number > 100000, + [toString(number)], + emptyArrayString()) + FROM system.numbers LIMIT 200000; + +SELECT * FROM lc_00931 +WHERE (key < 100 OR key > 50000) + AND NOT has(value, toString(key)) + AND length(value) == 1 +LIMIT 10 +SETTINGS max_block_size = 8192, + max_threads = 1; + +DROP TABLE IF EXISTS lc_00931; diff --git a/parser/testdata/00931_low_cardinality_set_index_in_key_condition/ast.json b/parser/testdata/00931_low_cardinality_set_index_in_key_condition/ast.json new file mode 100644 index 000000000..8ea39c153 --- /dev/null +++ b/parser/testdata/00931_low_cardinality_set_index_in_key_condition/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_in (children 1)" + }, + { + "explain": " Identifier test_in" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001029122, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/00931_low_cardinality_set_index_in_key_condition/metadata.json b/parser/testdata/00931_low_cardinality_set_index_in_key_condition/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00931_low_cardinality_set_index_in_key_condition/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00931_low_cardinality_set_index_in_key_condition/query.sql b/parser/testdata/00931_low_cardinality_set_index_in_key_condition/query.sql new file mode 100644 index 000000000..4a43b4dce --- /dev/null +++ b/parser/testdata/00931_low_cardinality_set_index_in_key_condition/query.sql @@ -0,0 +1,7 @@ +drop table if exists test_in; +create table test_in (a LowCardinality(String)) Engine = MergeTree order by a; + +insert into test_in values ('a'); +select * from test_in where a in ('a'); + +drop table if exists test_in; diff --git a/parser/testdata/00932_array_intersect_bug/ast.json b/parser/testdata/00932_array_intersect_bug/ast.json new file mode 100644 index 000000000..ad5f68693 --- /dev/null +++ b/parser/testdata/00932_array_intersect_bug/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arraySort (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayIntersect (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Array_['a', 'b', 'c']" + }, + { + "explain": " Literal Array_['a', 'a']" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001068454, + "rows_read": 10, + "bytes_read": 409 + } +} diff --git a/parser/testdata/00932_array_intersect_bug/metadata.json b/parser/testdata/00932_array_intersect_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00932_array_intersect_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00932_array_intersect_bug/query.sql b/parser/testdata/00932_array_intersect_bug/query.sql new file mode 100644 index 000000000..fc9c03c6d --- /dev/null +++ b/parser/testdata/00932_array_intersect_bug/query.sql @@ -0,0 +1,9 @@ +SELECT arraySort(arrayIntersect(['a', 'b', 'c'], ['a', 'a'])); +SELECT arraySort(arrayIntersect([1, 1], [2, 2])); +SELECT arraySort(arrayIntersect([1, 1], [1, 2])); +SELECT arraySort(arrayIntersect([1, 1, 1], [3], [2, 2, 2])); +SELECT arraySort(arrayIntersect([1, 2], [1, 2], [2])); +SELECT arraySort(arrayIntersect([1, 1], [2, 1], [2, 2], [1])); +SELECT arraySort(arrayIntersect([])); +SELECT arraySort(arrayIntersect([1, 2, 3])); +SELECT arraySort(arrayIntersect([1, 1], [2, 1], [2, 2], [2, 2, 2])); diff --git a/parser/testdata/00932_geohash_support/ast.json b/parser/testdata/00932_geohash_support/ast.json new file mode 100644 index 000000000..9bc7343c5 --- /dev/null +++ b/parser/testdata/00932_geohash_support/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery geohash_test_data (children 1)" + }, + { + "explain": " Identifier geohash_test_data" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00124198, + "rows_read": 2, + "bytes_read": 86 + } +} diff --git a/parser/testdata/00932_geohash_support/metadata.json b/parser/testdata/00932_geohash_support/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00932_geohash_support/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00932_geohash_support/query.sql b/parser/testdata/00932_geohash_support/query.sql new file mode 100644 index 000000000..42027da91 --- /dev/null +++ b/parser/testdata/00932_geohash_support/query.sql @@ -0,0 +1,70 @@ +drop table if exists geohash_test_data; + +create table geohash_test_data ( + latitude Float64, + longitude Float64, + encoded String +) engine = MergeTree order by (latitude, longitude, encoded); + +-- data obtained from geohash.com +insert into geohash_test_data values (-25.427, -49.315, '6'), (-25.427, -49.315, '6g'), (-25.427, -49.315, '6gk'), (-25.427, -49.315, '6gkz'), (-25.427, -49.315, '6gkzm'), (-25.427, -49.315, '6gkzmg'), (-25.427, -49.315, '6gkzmg1'), (-25.427, -49.315, '6gkzmg1u'), (-25.383, -49.266, '6'), (-25.383, -49.266, '6g'), (-25.383, -49.266, '6gk'), (-25.383, -49.266, '6gkz'), (-25.383, -49.266, '6gkzw'), (-25.383, -49.266, '6gkzwg'), (-25.383, -49.266, '6gkzwgj'), (-25.383, -49.266, '6gkzwgjt'), (-25.382708, -49.265506, '6'), (-25.382708, -49.265506, '6g'), (-25.382708, -49.265506, '6gk'), (-25.382708, -49.265506, '6gkz'), (-25.382708, -49.265506, '6gkzw'), (-25.382708, -49.265506, '6gkzwg'), (-25.382708, -49.265506, '6gkzwgj'), (-25.382708, -49.265506, '6gkzwgjz'), (-25.382708, -49.265506, '6gkzwgjzn'), (-25.382708, -49.265506, '6gkzwgjzn8'), (-25.382708, -49.265506, '6gkzwgjzn82'), (-25.382708, -49.265506, '6gkzwgjzn820'), (-0.1, -0.1, '7'), (-0.1, -0.1, '7z'), (-0.1, -0.1, '7zz'), (-0.1, -0.1, '7zzz'), (-0.1, -0.1, '7zzzm'), (-0.1, -0.01, '7'), (-0.1, -0.01, '7z'), (-0.1, -0.01, '7zz'), (-0.1, -0.01, '7zzz'), (-0.1, -0.01, '7zzzr'), (-0.1, -0.01, '7zzzrv'), (-0.1, -0.01, '7zzzrvb'), (-0.1, 0, 'k'), (-0.1, 0, 'kp'), (-0.1, 0, 'kpb'), (-0.1, 0, 'kpbp'), (-0.1, 0, 'kpbp2'), (-0.1, 0.01, 'k'), (-0.1, 0.01, 'kp'), (-0.1, 0.01, 'kpb'), (-0.1, 0.01, 'kpbp'), (-0.1, 0.01, 'kpbp2'), (-0.1, 0.01, 'kpbp2j'), (-0.1, 0.01, 'kpbp2jz'), (-0.1, 0.1, 'k'), (-0.1, 0.1, 'kp'), (-0.1, 0.1, 'kpb'), (-0.1, 0.1, 'kpbp'), (-0.1, 0.1, 'kpbp6'), (-0.01, -0.1, '7'), (-0.01, -0.1, '7z'), (-0.01, -0.1, '7zz'), (-0.01, -0.1, '7zzz'), (-0.01, -0.1, '7zzzv'), (-0.01, -0.1, '7zzzvw'), (-0.01, -0.01, '7'), (-0.01, -0.01, '7z'), (-0.01, -0.01, '7zz'), (-0.01, -0.01, '7zzz'), (-0.01, -0.01, '7zzzz'), (-0.01, -0.01, '7zzzzy'), (-0.01, -0.01, '7zzzzy0'), (-0.01, 0, 'k'), (-0.01, 0, 'kp'), (-0.01, 0, 'kpb'), (-0.01, 0, 'kpbp'), (-0.01, 0, 'kpbpb'), (-0.01, 0, 'kpbpbn'), (-0.01, 0.01, 'k'), (-0.01, 0.01, 'kp'), (-0.01, 0.01, 'kpb'), (-0.01, 0.01, 'kpbp'), (-0.01, 0.01, 'kpbpb'), (-0.01, 0.01, 'kpbpbn'), (-0.01, 0.01, 'kpbpbnp'), (-0.01, 0.1, 'k'), (-0.01, 0.1, 'kp'), (-0.01, 0.1, 'kpb'), (-0.01, 0.1, 'kpbp'), (-0.01, 0.1, 'kpbpf'), (-0.01, 0.1, 'kpbpfq'), (0, -0.1, 'e'), (0, -0.1, 'eb'), (0, -0.1, 'ebp'), (0, -0.1, 'ebpb'), (0, -0.1, 'ebpbj'), (0, -0.01, 'e'), (0, -0.01, 'eb'), (0, -0.01, 'ebp'), (0, -0.01, 'ebpb'), (0, -0.01, 'ebpbp'), (0, -0.01, 'ebpbpb'), (0, -0.01, 'ebpbpb0'), (0, 0, 's'), (0, 0, 's0'), (0, 0, 's00'), (0, 0, 's000'), (0, 0.01, 's'), (0, 0.01, 's0'), (0, 0.01, 's00'), (0, 0.01, 's000'), (0, 0.01, 's0000'), (0, 0.01, 's00000'), (0, 0.01, 's00000p'), (0, 0.1, 's'), (0, 0.1, 's0'), (0, 0.1, 's00'), (0, 0.1, 's000'), (0, 0.1, 's0004'), (0.01, -0.1, 'e'), (0.01, -0.1, 'eb'), (0.01, -0.1, 'ebp'), (0.01, -0.1, 'ebpb'), (0.01, -0.1, 'ebpbj'), (0.01, -0.1, 'ebpbj9'), (0.01, -0.01, 'e'), (0.01, -0.01, 'eb'), (0.01, -0.01, 'ebp'), (0.01, -0.01, 'ebpb'), (0.01, -0.01, 'ebpbp'), (0.01, -0.01, 'ebpbpc'), (0.01, -0.01, 'ebpbpcb'), (0.01, 0, 's'), (0.01, 0, 's0'), (0.01, 0, 's00'), (0.01, 0, 's000'), (0.01, 0, 's0000'), (0.01, 0, 's00001'), (0.01, 0.01, 's'), (0.01, 0.01, 's0'), (0.01, 0.01, 's00'), (0.01, 0.01, 's000'), (0.01, 0.01, 's0000'), (0.01, 0.01, 's00001'), (0.01, 0.01, 's00001z'), (0.01, 0.1, 's'), (0.01, 0.1, 's0'), (0.01, 0.1, 's00'), (0.01, 0.1, 's000'), (0.01, 0.1, 's0004'), (0.01, 0.1, 's00043'), (0.1, -0.1, 'e'), (0.1, -0.1, 'eb'), (0.1, -0.1, 'ebp'), (0.1, -0.1, 'ebpb'), (0.1, -0.1, 'ebpbt'), (0.1, -0.01, 'e'), (0.1, -0.01, 'eb'), (0.1, -0.01, 'ebp'), (0.1, -0.01, 'ebpb'), (0.1, -0.01, 'ebpbx'), (0.1, -0.01, 'ebpbxf'), (0.1, -0.01, 'ebpbxf0'), (0.1, 0, 's'), (0.1, 0, 's0'), (0.1, 0, 's00'), (0.1, 0, 's000'), (0.1, 0, 's0008'), (0.1, 0.01, 's'), (0.1, 0.01, 's0'), (0.1, 0.01, 's00'), (0.1, 0.01, 's000'), (0.1, 0.01, 's0008'), (0.1, 0.01, 's00084'), (0.1, 0.01, 's00084p'), (0.1, 0.1, 's'), (0.1, 0.1, 's0'), (0.1, 0.1, 's00'), (0.1, 0.1, 's000'), (0.1, 0.1, 's000d'), (7.880886, 98.3640363, 'w'), (7.880886, 98.3640363, 'w1'), (7.880886, 98.3640363, 'w1m'), (7.880886, 98.3640363, 'w1mu'), (7.880886, 98.3640363, 'w1muy'), (7.880886, 98.3640363, 'w1muy6'), (7.880886, 98.3640363, 'w1muy6d'), (7.880886, 98.3640363, 'w1muy6dt'), (7.880886, 98.3640363, 'w1muy6dt2'), (7.880886, 98.3640363, 'w1muy6dt2p'), (7.880886, 98.3640363, 'w1muy6dt2pt'), (7.880886, 98.3640363, 'w1muy6dt2ptk'), (51.523242, -0.07914, 'g'), (51.523242, -0.07914, 'gc'), (51.523242, -0.07914, 'gcp'), (51.523242, -0.07914, 'gcpv'), (51.523242, -0.07914, 'gcpvn'), (51.523242, -0.07914, 'gcpvn5'), (51.523242, -0.07914, 'gcpvn5w'), (51.523242, -0.07914, 'gcpvn5w2'), (51.523242, -0.07914, 'gcpvn5w2e'), (51.523242, -0.07914, 'gcpvn5w2eu'), (51.523242, -0.07914, 'gcpvn5w2euk'), (51.523242, -0.07914, 'gcpvn5w2euky'), (53.923107, 27.606682, 'u'), (53.923107, 27.606682, 'u9'), (53.923107, 27.606682, 'u9e'), (53.923107, 27.606682, 'u9ed'), (53.923107, 27.606682, 'u9edu'), (53.923107, 27.606682, 'u9edu0'), (53.923107, 27.606682, 'u9edu0q'), (53.923107, 27.606682, 'u9edu0qs'), (53.923107, 27.606682, 'u9edu0qsf'), (53.923107, 27.606682, 'u9edu0qsf7'), (53.923107, 27.606682, 'u9edu0qsf7d'), (53.923107, 27.606682, 'u9edu0qsf7dn'); + + +select 'invalid values:'; -- must not crash +select geohashEncode(181.0, 91.0); +select geohashEncode(-181.0, -91.0); +select count(geohashDecode('abcdefghijklmnopqrstuvwxyz')); + +select 'constant values:'; +select geohashEncode(-5.60302734375, 42.593994140625, 0); +select round(geohashDecode('ezs42').1, 5), round(geohashDecode('ezs42').2, 5); + +select 'default precision:'; +select geohashEncode(-5.60302734375, 42.593994140625); + +select 'mixing const and non-const-columns:'; +select geohashEncode(materialize(-5.60302734375), materialize(42.593994140625), 0); +select geohashEncode(materialize(-5.60302734375), materialize(42.593994140625), materialize(0)); +select geohashEncode(-5.60302734375, materialize(42.593994140625), 0); +select geohashEncode(materialize(-5.60302734375), 42.593994140625, 0); +select geohashEncode(-5.60302734375, 42.593994140625, 0); + + +select 'from table (with const precision):'; + +-- here results are strings, so reference may contain values to match for equality. +select 1 as p, geohashEncode(longitude, latitude, p) as actual, if(actual = encoded, 'Ok', concat('expected: ', encoded)) from geohash_test_data WHERE length(encoded) = p order by all; +select 2 as p, geohashEncode(longitude, latitude, p) as actual, if(actual = encoded, 'Ok', concat('expected: ', encoded)) from geohash_test_data WHERE length(encoded) = p order by all; +select 3 as p, geohashEncode(longitude, latitude, p) as actual, if(actual = encoded, 'Ok', concat('expected: ', encoded)) from geohash_test_data WHERE length(encoded) = p order by all; +select 4 as p, geohashEncode(longitude, latitude, p) as actual, if(actual = encoded, 'Ok', concat('expected: ', encoded)) from geohash_test_data WHERE length(encoded) = p order by all; +select 5 as p, geohashEncode(longitude, latitude, p) as actual, if(actual = encoded, 'Ok', concat('expected: ', encoded)) from geohash_test_data WHERE length(encoded) = p order by all; +select 6 as p, geohashEncode(longitude, latitude, p) as actual, if(actual = encoded, 'Ok', concat('expected: ', encoded)) from geohash_test_data WHERE length(encoded) = p order by all; +select 7 as p, geohashEncode(longitude, latitude, p) as actual, if(actual = encoded, 'Ok', concat('expected: ', encoded)) from geohash_test_data WHERE length(encoded) = p order by all; +select 8 as p, geohashEncode(longitude, latitude, p) as actual, if(actual = encoded, 'Ok', concat('expected: ', encoded)) from geohash_test_data WHERE length(encoded) = p order by all; +select 9 as p, geohashEncode(longitude, latitude, p) as actual, if(actual = encoded, 'Ok', concat('expected: ', encoded)) from geohash_test_data WHERE length(encoded) = p order by all; +select 10 as p, geohashEncode(longitude, latitude, p) as actual, if(actual = encoded, 'Ok', concat('expected: ', encoded)) from geohash_test_data WHERE length(encoded) = p order by all; +select 11 as p, geohashEncode(longitude, latitude, p) as actual, if(actual = encoded, 'Ok', concat('expected: ', encoded)) from geohash_test_data WHERE length(encoded) = p order by all; +select 12 as p, geohashEncode(longitude, latitude, p) as actual, if(actual = encoded, 'Ok', concat('expected: ', encoded)) from geohash_test_data WHERE length(encoded) = p order by all; + +-- Here results are floats, and hence may not be compared for equality directly. +-- We select all values that are off by some reasonable value: +-- each byte of encoded string provides 5 bits of precision, (roughly 2.5 for lon and lat) +-- each bit of precision divides value range by 2. +-- hence max error is roughly value range 2.5 times divided by 2 for each precision bit. +-- initial value range is [-90..90] for latitude and [-180..180] for longitude. +select 'incorrectly decoded values:'; +select + geohashDecode(encoded) as actual, + 'expected:', encoded, '=>', latitude, longitude, + 'length:', length(encoded), + 'max lat error:', 180 / power(2, 2.5 * length(encoded)) as latitude_max_error, + 'max lon error:', 360 / power(2, 2.5 * length(encoded)) as longitude_max_error, + 'err:', (actual.2 - latitude) as lat_error, (actual.1 - longitude) as lon_error, + 'derr:', abs(lat_error) - latitude_max_error, abs(lon_error) - longitude_max_error +from geohash_test_data +where + abs(lat_error) > latitude_max_error + or + abs(lon_error) > longitude_max_error; + +drop table if exists geohash_test_data; diff --git a/parser/testdata/00933_alter_ttl/ast.json b/parser/testdata/00933_alter_ttl/ast.json new file mode 100644 index 000000000..d855dc94b --- /dev/null +++ b/parser/testdata/00933_alter_ttl/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00118671, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00933_alter_ttl/metadata.json b/parser/testdata/00933_alter_ttl/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00933_alter_ttl/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00933_alter_ttl/query.sql b/parser/testdata/00933_alter_ttl/query.sql new file mode 100644 index 000000000..9ec134756 --- /dev/null +++ b/parser/testdata/00933_alter_ttl/query.sql @@ -0,0 +1,26 @@ +set send_logs_level = 'fatal'; + +drop table if exists ttl; + +create table ttl (d Date, a Int) engine = MergeTree order by a partition by toDayOfMonth(d) settings remove_empty_parts = 0; +alter table ttl modify ttl d + interval 1 day; +show create table ttl; +insert into ttl values (toDateTime('2000-10-10 00:00:00'), 1); +insert into ttl values (toDateTime('2000-10-10 00:00:00'), 2); +insert into ttl values (toDateTime('2100-10-10 00:00:00'), 3); +insert into ttl values (toDateTime('2100-10-10 00:00:00'), 4); +optimize table ttl partition 10 final; + +select * from ttl order by d, a; + +alter table ttl modify ttl a; -- { serverError BAD_TTL_EXPRESSION } + +drop table if exists ttl; + +create table ttl (d Date, a Int) engine = MergeTree order by tuple() partition by toDayOfMonth(d) settings remove_empty_parts = 0; +alter table ttl modify column a Int ttl d + interval 1 day; +desc table ttl; +alter table ttl modify column d Int ttl d + interval 1 day; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +alter table ttl modify column d DateTime ttl d + interval 1 day; -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } + +drop table if exists ttl; diff --git a/parser/testdata/00933_reserved_word/ast.json b/parser/testdata/00933_reserved_word/ast.json new file mode 100644 index 000000000..8b4ef23d7 --- /dev/null +++ b/parser/testdata/00933_reserved_word/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery reserved_word_table (children 1)" + }, + { + "explain": " Identifier reserved_word_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001078836, + "rows_read": 2, + "bytes_read": 90 + } +} diff --git a/parser/testdata/00933_reserved_word/metadata.json b/parser/testdata/00933_reserved_word/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00933_reserved_word/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00933_reserved_word/query.sql b/parser/testdata/00933_reserved_word/query.sql new file mode 100644 index 000000000..447a618bf --- /dev/null +++ b/parser/testdata/00933_reserved_word/query.sql @@ -0,0 +1,7 @@ +DROP TABLE IF EXISTS reserved_word_table; +CREATE TABLE reserved_word_table (`index` UInt8) ENGINE = MergeTree ORDER BY `index`; + +DETACH TABLE reserved_word_table; +ATTACH TABLE reserved_word_table; + +DROP TABLE reserved_word_table; diff --git a/parser/testdata/00933_ttl_formatting/ast.json b/parser/testdata/00933_ttl_formatting/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00933_ttl_formatting/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00933_ttl_formatting/metadata.json b/parser/testdata/00933_ttl_formatting/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00933_ttl_formatting/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00933_ttl_formatting/query.sql b/parser/testdata/00933_ttl_formatting/query.sql new file mode 100644 index 000000000..12105fa7f --- /dev/null +++ b/parser/testdata/00933_ttl_formatting/query.sql @@ -0,0 +1,7 @@ +--- Tests for Github issue 88306 + +CREATE TABLE tab(col Int) ENGINE = MergeTree() ORDER BY tuple() TTL greater(materialize(2), 1); -- { serverError BAD_ARGUMENTS } + +CREATE TABLE tab(col Int TTL (1 AS alias)) ENGINE = Memory; -- { serverError BAD_ARGUMENTS } + +CREATE TABLE tab(col Int TTL (1 AS alias)) ENGINE = ReplacingMergeTree ORDER BY col; -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/00933_ttl_simple/ast.json b/parser/testdata/00933_ttl_simple/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00933_ttl_simple/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00933_ttl_simple/metadata.json b/parser/testdata/00933_ttl_simple/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00933_ttl_simple/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00933_ttl_simple/query.sql b/parser/testdata/00933_ttl_simple/query.sql new file mode 100644 index 000000000..6a8ed0e17 --- /dev/null +++ b/parser/testdata/00933_ttl_simple/query.sql @@ -0,0 +1,114 @@ +-- Tags: log-engine +-- disable timezone randomization since otherwise TTL may fail at particular datetime, i.e.: +-- +-- SELECT +-- now(), +-- toDate(toTimeZone(now(), 'America/Mazatlan')), +-- today() +-- +-- ┌───────────────now()─┬─toDate(toTimeZone(now(), 'America/Mazatlan'))─┬────today()─┐ +-- │ 2023-07-24 06:24:06 │ 2023-07-23 │ 2023-07-24 │ +-- └─────────────────────┴───────────────────────────────────────────────┴────────────┘ +SET session_timezone = ''; +SET allow_suspicious_ttl_expressions = 1; + +drop table if exists ttl_00933_1; + +-- Column TTL works only with wide parts, because it's very expensive to apply it for compact parts + +create table ttl_00933_1 (d DateTime, a Int ttl d + interval 1 second, b Int ttl d + interval 1 second) engine = MergeTree order by tuple() partition by toMinute(d) settings min_bytes_for_wide_part = 0; +insert into ttl_00933_1 values (now(), 1, 2); +insert into ttl_00933_1 values (now(), 3, 4); +select sleep(1.1) format Null; +optimize table ttl_00933_1 final; +select a, b from ttl_00933_1; + +drop table if exists ttl_00933_1; + +create table ttl_00933_1 (d DateTime, a Int, b Int) + engine = MergeTree order by toDate(d) partition by tuple() ttl d + interval 1 second + settings remove_empty_parts = 0; +insert into ttl_00933_1 values (now(), 1, 2); +insert into ttl_00933_1 values (now(), 3, 4); +insert into ttl_00933_1 values (now() + 1000, 5, 6); +select sleep(1.1) format Null; +optimize table ttl_00933_1 final; -- check ttl merge for part with both expired and unexpired values +select a, b from ttl_00933_1; + +drop table if exists ttl_00933_1; + +create table ttl_00933_1 (d DateTime, a Int ttl d + interval 1 DAY) engine = MergeTree order by tuple() partition by toDayOfMonth(d) settings min_bytes_for_wide_part = 0; +insert into ttl_00933_1 values (toDateTime('2000-10-10 00:00:00'), 1); +insert into ttl_00933_1 values (toDateTime('2000-10-10 00:00:00'), 2); +insert into ttl_00933_1 values (toDateTime('2000-10-10 00:00:00'), 3); +optimize table ttl_00933_1 final; +select * from ttl_00933_1 order by d; + +drop table if exists ttl_00933_1; + +create table ttl_00933_1 (d DateTime, a Int) + engine = MergeTree order by tuple() partition by tuple() ttl d + interval 1 day + settings remove_empty_parts = 0; +insert into ttl_00933_1 values (toDateTime('2000-10-10 00:00:00'), 1); +insert into ttl_00933_1 values (toDateTime('2000-10-10 00:00:00'), 2); +insert into ttl_00933_1 values (toDateTime('2100-10-10 00:00:00'), 3); +optimize table ttl_00933_1 final; +select * from ttl_00933_1 order by d; + +drop table if exists ttl_00933_1; + +create table ttl_00933_1 (d Date, a Int) + engine = MergeTree order by a partition by toDayOfMonth(d) ttl d + interval 1 day + settings remove_empty_parts = 0; +insert into ttl_00933_1 values (toDate('2000-10-10'), 1); +insert into ttl_00933_1 values (toDate('2100-10-10'), 2); +optimize table ttl_00933_1 final; +select * from ttl_00933_1 order by d; + +-- const DateTime TTL positive +drop table if exists ttl_00933_1; +create table ttl_00933_1 (b Int, a Int ttl '2000-10-10 00:00:00'::DateTime) +engine = MergeTree order by tuple() partition by tuple() settings min_bytes_for_wide_part = 0; + +show create table ttl_00933_1; +insert into ttl_00933_1 values (1, 1); +optimize table ttl_00933_1 final; +select * from ttl_00933_1; + +-- const DateTime TTL negative +drop table if exists ttl_00933_1; +create table ttl_00933_1 (b Int, a Int ttl '2100-10-10 00:00:00'::DateTime) engine = MergeTree order by tuple() partition by tuple() settings min_bytes_for_wide_part = 0; +show create table ttl_00933_1; +insert into ttl_00933_1 values (1, 1); +optimize table ttl_00933_1 final; +select * from ttl_00933_1; + +-- const Date TTL positive +drop table if exists ttl_00933_1; +create table ttl_00933_1 (b Int, a Int ttl '2000-10-10'::Date) engine = MergeTree order by tuple() partition by tuple() settings min_bytes_for_wide_part = 0; +show create table ttl_00933_1; +insert into ttl_00933_1 values (1, 1); +optimize table ttl_00933_1 final; +select * from ttl_00933_1; + +-- const Date TTL negative +drop table if exists ttl_00933_1; +create table ttl_00933_1 (b Int, a Int ttl '2100-10-10'::Date) engine = MergeTree order by tuple() partition by tuple() settings min_bytes_for_wide_part = 0; +show create table ttl_00933_1; +insert into ttl_00933_1 values (1, 1); +optimize table ttl_00933_1 final; +select * from ttl_00933_1; + +set send_logs_level = 'fatal'; + +drop table if exists ttl_00933_1; + +create table ttl_00933_1 (d DateTime ttl d) engine = MergeTree order by tuple() partition by toSecond(d); -- { serverError ILLEGAL_COLUMN} +create table ttl_00933_1 (d DateTime, a Int ttl d) engine = MergeTree order by a partition by toSecond(d); -- { serverError ILLEGAL_COLUMN} +create table ttl_00933_1 (d DateTime, a Int ttl 2 + 2) engine = MergeTree order by tuple() partition by toSecond(d); -- { serverError BAD_TTL_EXPRESSION } +create table ttl_00933_1 (d DateTime, a Int ttl d - d) engine = MergeTree order by tuple() partition by toSecond(d); -- { serverError BAD_TTL_EXPRESSION } + +create table ttl_00933_1 (d DateTime, a Int ttl d + interval 1 day) engine = Log; -- { serverError BAD_ARGUMENTS } +create table ttl_00933_1 (d DateTime, a Int) engine = Log ttl d + interval 1 day; -- { serverError BAD_ARGUMENTS } + +drop table if exists ttl_00933_1; diff --git a/parser/testdata/00933_ttl_with_default/ast.json b/parser/testdata/00933_ttl_with_default/ast.json new file mode 100644 index 000000000..060af8e17 --- /dev/null +++ b/parser/testdata/00933_ttl_with_default/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery ttl_00933_2 (children 1)" + }, + { + "explain": " Identifier ttl_00933_2" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00104667, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/00933_ttl_with_default/metadata.json b/parser/testdata/00933_ttl_with_default/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00933_ttl_with_default/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00933_ttl_with_default/query.sql b/parser/testdata/00933_ttl_with_default/query.sql new file mode 100644 index 000000000..e6c0a6e70 --- /dev/null +++ b/parser/testdata/00933_ttl_with_default/query.sql @@ -0,0 +1,31 @@ +drop table if exists ttl_00933_2; + +create table ttl_00933_2 (d DateTime, a Int default 111 ttl d + interval 1 DAY) engine = MergeTree order by tuple() partition by toDayOfMonth(d); +insert into ttl_00933_2 values (toDateTime('2000-10-10 00:00:00'), 1); +insert into ttl_00933_2 values (toDateTime('2000-10-10 00:00:00'), 2); +insert into ttl_00933_2 values (toDateTime('2100-10-10 00:00:00'), 3); +insert into ttl_00933_2 values (toDateTime('2100-10-10 00:00:00'), 4); +optimize table ttl_00933_2 final; +select a from ttl_00933_2 order by a; + +drop table if exists ttl_00933_2; + +create table ttl_00933_2 (d DateTime, a Int, b default a * 2 ttl d + interval 1 DAY) engine = MergeTree order by tuple() partition by toDayOfMonth(d); +insert into ttl_00933_2 values (toDateTime('2000-10-10 00:00:00'), 1, 100); +insert into ttl_00933_2 values (toDateTime('2000-10-10 00:00:00'), 2, 200); +insert into ttl_00933_2 values (toDateTime('2100-10-10 00:00:00'), 3, 300); +insert into ttl_00933_2 values (toDateTime('2100-10-10 00:00:00'), 4, 400); +optimize table ttl_00933_2 final; +select a, b from ttl_00933_2 order by a; + +drop table if exists ttl_00933_2; + +create table ttl_00933_2 (d DateTime, a Int, b default 222 ttl d + interval 1 DAY) engine = MergeTree order by tuple() partition by toDayOfMonth(d); +insert into ttl_00933_2 values (toDateTime('2000-10-10 00:00:00'), 1, 5); +insert into ttl_00933_2 values (toDateTime('2000-10-10 00:00:00'), 2, 10); +insert into ttl_00933_2 values (toDateTime('2100-10-10 00:00:00'), 3, 15); +insert into ttl_00933_2 values (toDateTime('2100-10-10 00:00:00'), 4, 20); +optimize table ttl_00933_2 final; +select a, b from ttl_00933_2 order by a; + +drop table if exists ttl_00933_2; diff --git a/parser/testdata/00934_is_valid_utf8/ast.json b/parser/testdata/00934_is_valid_utf8/ast.json new file mode 100644 index 000000000..3aeddc309 --- /dev/null +++ b/parser/testdata/00934_is_valid_utf8/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function isValidUTF8 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal ''" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001009949, + "rows_read": 15, + "bytes_read": 581 + } +} diff --git a/parser/testdata/00934_is_valid_utf8/metadata.json b/parser/testdata/00934_is_valid_utf8/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00934_is_valid_utf8/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00934_is_valid_utf8/query.sql b/parser/testdata/00934_is_valid_utf8/query.sql new file mode 100644 index 000000000..e4075656e --- /dev/null +++ b/parser/testdata/00934_is_valid_utf8/query.sql @@ -0,0 +1,127 @@ +select 1 = isValidUTF8('') from system.numbers limit 10; +select 1 = isValidUTF8('some text') from system.numbers limit 10; +select 1 = isValidUTF8('какой-то текст') from system.numbers limit 10; +select 1 = isValidUTF8('\x00') from system.numbers limit 10; +select 1 = isValidUTF8('\x66') from system.numbers limit 10; +select 1 = isValidUTF8('\x7F') from system.numbers limit 10; +select 1 = isValidUTF8('\x00\x7F') from system.numbers limit 10; +select 1 = isValidUTF8('\x7F\x00') from system.numbers limit 10; +select 1 = isValidUTF8('\xC2\x80') from system.numbers limit 10; +select 1 = isValidUTF8('\xDF\xBF') from system.numbers limit 10; +select 1 = isValidUTF8('\xE0\xA0\x80') from system.numbers limit 10; +select 1 = isValidUTF8('\xE0\xA0\xBF') from system.numbers limit 10; +select 1 = isValidUTF8('\xED\x9F\x80') from system.numbers limit 10; +select 1 = isValidUTF8('\xEF\x80\xBF') from system.numbers limit 10; +select 1 = isValidUTF8('\xF0\x90\xBF\x80') from system.numbers limit 10; +select 1 = isValidUTF8('\xF2\x81\xBE\x99') from system.numbers limit 10; +select 1 = isValidUTF8('\xF4\x8F\x88\xAA') from system.numbers limit 10; + +select 1 = isValidUTF8('a') from system.numbers limit 10; +select 1 = isValidUTF8('\xc3\xb1') from system.numbers limit 10; +select 1 = isValidUTF8('\xe2\x82\xa1') from system.numbers limit 10; +select 1 = isValidUTF8('\xf0\x90\x8c\xbc') from system.numbers limit 10; +select 1 = isValidUTF8('안녕하세요, 세상') from system.numbers limit 10; + +select 0 = isValidUTF8('\xc3\x28') from system.numbers limit 10; +select 0 = isValidUTF8('\xa0\xa1') from system.numbers limit 10; +select 0 = isValidUTF8('\xe2\x28\xa1') from system.numbers limit 10; +select 0 = isValidUTF8('\xe2\x82\x28') from system.numbers limit 10; +select 0 = isValidUTF8('\xf0\x28\x8c\xbc') from system.numbers limit 10; +select 0 = isValidUTF8('\xf0\x90\x28\xbc') from system.numbers limit 10; +select 0 = isValidUTF8('\xf0\x28\x8c\x28') from system.numbers limit 10; +select 0 = isValidUTF8('\xc0\x9f') from system.numbers limit 10; +select 0 = isValidUTF8('\xf5\xff\xff\xff') from system.numbers limit 10; +select 0 = isValidUTF8('\xed\xa0\x81') from system.numbers limit 10; +select 0 = isValidUTF8('\xf8\x90\x80\x80\x80') from system.numbers limit 10; +select 0 = isValidUTF8('12345678901234\xed') from system.numbers limit 10; +select 0 = isValidUTF8('123456789012345\xed') from system.numbers limit 10; +select 0 = isValidUTF8('123456789012345\xed123456789012345\xed') from system.numbers limit 10; +select 0 = isValidUTF8('123456789012345\xf1') from system.numbers limit 10; +select 0 = isValidUTF8('123456789012345\xc2') from system.numbers limit 10; +select 0 = isValidUTF8('\xC2\x7F') from system.numbers limit 10; + +select 0 = isValidUTF8('\x80') from system.numbers limit 10; +select 0 = isValidUTF8('\xBF') from system.numbers limit 10; +select 0 = isValidUTF8('\xC0\x80') from system.numbers limit 10; +select 0 = isValidUTF8('\xC1\x00') from system.numbers limit 10; +select 0 = isValidUTF8('\xC2\x7F') from system.numbers limit 10; +select 0 = isValidUTF8('\xDF\xC0') from system.numbers limit 10; +select 0 = isValidUTF8('\xE0\x9F\x80') from system.numbers limit 10; +select 0 = isValidUTF8('\xE0\xC2\x80') from system.numbers limit 10; +select 0 = isValidUTF8('\xED\xA0\x80') from system.numbers limit 10; +select 0 = isValidUTF8('\xED\x7F\x80') from system.numbers limit 10; +select 0 = isValidUTF8('\xEF\x80\x00') from system.numbers limit 10; +select 0 = isValidUTF8('\xF0\x8F\x80\x80') from system.numbers limit 10; +select 0 = isValidUTF8('\xF0\xEE\x80\x80') from system.numbers limit 10; +select 0 = isValidUTF8('\xF2\x90\x91\x7F') from system.numbers limit 10; +select 0 = isValidUTF8('\xF4\x90\x88\xAA') from system.numbers limit 10; +select 0 = isValidUTF8('\xF4\x00\xBF\xBF') from system.numbers limit 10; +select 0 = isValidUTF8('\x00\x00\x00\x00\x00\xC2\x80\x00\x00\x00\xE1\x80\x80\x00\x00\xC2\xC2\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00') from system.numbers limit 10; +select 0 = isValidUTF8('\x00\x00\x00\x00\x00\xC2\xC2\x80\x00\x00\xE1\x80\x80\x00\x00\x00') from system.numbers limit 10; +select 0 = isValidUTF8('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xF1\x80') from system.numbers limit 10; +select 0 = isValidUTF8('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xF1') from system.numbers limit 10; +select 0 = isValidUTF8('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xF1\x80\x80') from system.numbers limit 10; +select 0 = isValidUTF8('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xF1\x80\xC2\x80') from system.numbers limit 10; +select 0 = isValidUTF8('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xF0\x80\x80\x80') from system.numbers limit 10; + +select 1 = isValidUTF8(toFixedString('some text', 9)) from system.numbers limit 10; +select 1 = isValidUTF8(toFixedString('какой-то текст', 27)) from system.numbers limit 10; +select 1 = isValidUTF8(toFixedString('\x00', 1)) from system.numbers limit 10; +select 1 = isValidUTF8(toFixedString('\x66', 1)) from system.numbers limit 10; +select 1 = isValidUTF8(toFixedString('\x7F', 1)) from system.numbers limit 10; +select 1 = isValidUTF8(toFixedString('\x00\x7F', 2)) from system.numbers limit 10; +select 1 = isValidUTF8(toFixedString('\x7F\x00', 2)) from system.numbers limit 10; +select 1 = isValidUTF8(toFixedString('\xC2\x80', 2)) from system.numbers limit 10; +select 1 = isValidUTF8(toFixedString('\xDF\xBF', 2)) from system.numbers limit 10; +select 1 = isValidUTF8(toFixedString('\xE0\xA0\x80', 3)) from system.numbers limit 10; +select 1 = isValidUTF8(toFixedString('\xE0\xA0\xBF', 3)) from system.numbers limit 10; +select 1 = isValidUTF8(toFixedString('\xED\x9F\x80', 3)) from system.numbers limit 10; +select 1 = isValidUTF8(toFixedString('\xEF\x80\xBF', 3)) from system.numbers limit 10; +select 1 = isValidUTF8(toFixedString('\xF0\x90\xBF\x80', 4)) from system.numbers limit 10; +select 1 = isValidUTF8(toFixedString('\xF2\x81\xBE\x99', 4)) from system.numbers limit 10; +select 1 = isValidUTF8(toFixedString('\xF4\x8F\x88\xAA', 4)) from system.numbers limit 10; + +select 0 = isValidUTF8(toFixedString('\x80', 1)) from system.numbers limit 10; +select 0 = isValidUTF8(toFixedString('\xBF', 1)) from system.numbers limit 10; +select 0 = isValidUTF8(toFixedString('\xC0\x80', 2)) from system.numbers limit 10; +select 0 = isValidUTF8(toFixedString('\xC1\x00', 2)) from system.numbers limit 10; +select 0 = isValidUTF8(toFixedString('\xC2\x7F', 2)) from system.numbers limit 10; +select 0 = isValidUTF8(toFixedString('\xDF\xC0', 2)) from system.numbers limit 10; +select 0 = isValidUTF8(toFixedString('\xE0\x9F\x80', 3)) from system.numbers limit 10; +select 0 = isValidUTF8(toFixedString('\xE0\xC2\x80', 3)) from system.numbers limit 10; +select 0 = isValidUTF8(toFixedString('\xED\xA0\x80', 3)) from system.numbers limit 10; +select 0 = isValidUTF8(toFixedString('\xED\x7F\x80', 3)) from system.numbers limit 10; +select 0 = isValidUTF8(toFixedString('\xEF\x80\x00', 3)) from system.numbers limit 10; +select 0 = isValidUTF8(toFixedString('\xF0\x8F\x80\x80', 4)) from system.numbers limit 10; +select 0 = isValidUTF8(toFixedString('\xF0\xEE\x80\x80', 4)) from system.numbers limit 10; +select 0 = isValidUTF8(toFixedString('\xF2\x90\x91\x7F', 4)) from system.numbers limit 10; +select 0 = isValidUTF8(toFixedString('\xF4\x90\x88\xAA', 4)) from system.numbers limit 10; +select 0 = isValidUTF8(toFixedString('\xF4\x00\xBF\xBF', 4)) from system.numbers limit 10; +select 0 = isValidUTF8(toFixedString('\x00\x00\x00\x00\x00\xC2\x80\x00\x00\x00\xE1\x80\x80\x00\x00\xC2\xC2\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', 32)) from system.numbers limit 10; +select 0 = isValidUTF8(toFixedString('\x00\x00\x00\x00\x00\xC2\xC2\x80\x00\x00\xE1\x80\x80\x00\x00\x00', 16)) from system.numbers limit 10; +select 0 = isValidUTF8(toFixedString('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xF1\x80', 32)) from system.numbers limit 10; +select 0 = isValidUTF8(toFixedString('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xF1', 32)) from system.numbers limit 10; +select 0 = isValidUTF8(toFixedString('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xF1\x80\x80', 33)) from system.numbers limit 10; +select 0 = isValidUTF8(toFixedString('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xF1\x80\xC2\x80', 34)) from system.numbers limit 10; +select 0 = isValidUTF8(toFixedString('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xF0\x80\x80\x80', 35)) from system.numbers limit 10; + +select 1 = isValidUTF8(toFixedString('a', 1)) from system.numbers limit 10; +select 1 = isValidUTF8(toFixedString('\xc3\xb1', 2)) from system.numbers limit 10; +select 1 = isValidUTF8(toFixedString('\xe2\x82\xa1', 3)) from system.numbers limit 10; +select 1 = isValidUTF8(toFixedString('\xf0\x90\x8c\xbc', 4)) from system.numbers limit 10; + +select 0 = isValidUTF8(toFixedString('\xc3\x28', 2)) from system.numbers limit 10; +select 0 = isValidUTF8(toFixedString('\xa0\xa1', 2)) from system.numbers limit 10; +select 0 = isValidUTF8(toFixedString('\xe2\x28\xa1', 3)) from system.numbers limit 10; +select 0 = isValidUTF8(toFixedString('\xe2\x82\x28', 3)) from system.numbers limit 10; +select 0 = isValidUTF8(toFixedString('\xf0\x28\x8c\xbc', 4)) from system.numbers limit 10; +select 0 = isValidUTF8(toFixedString('\xf0\x90\x28\xbc', 4)) from system.numbers limit 10; +select 0 = isValidUTF8(toFixedString('\xf0\x28\x8c\x28', 4)) from system.numbers limit 10; +select 0 = isValidUTF8(toFixedString('\xc0\x9f', 2)) from system.numbers limit 10; +select 0 = isValidUTF8(toFixedString('\xf5\xff\xff\xff', 4)) from system.numbers limit 10; +select 0 = isValidUTF8(toFixedString('\xed\xa0\x81', 3)) from system.numbers limit 10; +select 0 = isValidUTF8(toFixedString('\xf8\x90\x80\x80\x80', 5)) from system.numbers limit 10; +select 0 = isValidUTF8(toFixedString('123456789012345\xed', 16)) from system.numbers limit 10; +select 0 = isValidUTF8(toFixedString('123456789012345\xf1', 16)) from system.numbers limit 10; +select 0 = isValidUTF8(toFixedString('123456789012345\xc2', 16)) from system.numbers limit 10; +select 0 = isValidUTF8(toFixedString('\xC2\x7F', 2)) from system.numbers limit 10; diff --git a/parser/testdata/00935_to_iso_week_first_year/ast.json b/parser/testdata/00935_to_iso_week_first_year/ast.json new file mode 100644 index 000000000..bf18cc272 --- /dev/null +++ b/parser/testdata/00935_to_iso_week_first_year/ast.json @@ -0,0 +1,88 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function plus (alias d) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDate (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '1970-01-01'" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function toISOWeek (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier d" + }, + { + "explain": " Function toISOYear (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier d" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_15" + } + ], + + "rows": 22, + + "statistics": + { + "elapsed": 0.001070195, + "rows_read": 22, + "bytes_read": 859 + } +} diff --git a/parser/testdata/00935_to_iso_week_first_year/metadata.json b/parser/testdata/00935_to_iso_week_first_year/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00935_to_iso_week_first_year/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00935_to_iso_week_first_year/query.sql b/parser/testdata/00935_to_iso_week_first_year/query.sql new file mode 100644 index 000000000..eb1c4135b --- /dev/null +++ b/parser/testdata/00935_to_iso_week_first_year/query.sql @@ -0,0 +1,3 @@ +SELECT toDate('1970-01-01') + number AS d, toISOWeek(d), toISOYear(d) FROM numbers(15); +-- Note that 1970-01-01 00:00:00 in Moscow is before unix epoch. +SELECT toDateTime(toDate('1970-01-02') + number, 'Asia/Istanbul') AS t, toISOWeek(t), toISOYear(t) FROM numbers(15); diff --git a/parser/testdata/00936_crc_functions/ast.json b/parser/testdata/00936_crc_functions/ast.json new file mode 100644 index 000000000..2def2fda7 --- /dev/null +++ b/parser/testdata/00936_crc_functions/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery table1 (children 1)" + }, + { + "explain": " Identifier table1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001139815, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/00936_crc_functions/metadata.json b/parser/testdata/00936_crc_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00936_crc_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00936_crc_functions/query.sql b/parser/testdata/00936_crc_functions/query.sql new file mode 100644 index 000000000..4794062b5 --- /dev/null +++ b/parser/testdata/00936_crc_functions/query.sql @@ -0,0 +1,23 @@ +DROP TABLE IF EXISTS table1; + +CREATE TABLE table1 (str1 String, str2 String) ENGINE = Memory; + +INSERT INTO table1 VALUES('qwerty', 'string'); +INSERT INTO table1 VALUES('qqq', 'aaa'); +INSERT INTO table1 VALUES('aasq', 'xxz'); +INSERT INTO table1 VALUES('zxcqwer', ''); +INSERT INTO table1 VALUES('', ''); + +select CRC32('string'); +select CrC32('string'), crc32('test'); -- We want to test, that function name is case-insensitive +select CRC32(str1) from table1 order by CRC32(str1); +select CRC32(str2) from table1 order by CRC32(str2); +select CRC32(str1), CRC32(str2) from table1 order by CRC32(str1), CRC32(str2); +select str1, str2, CRC32(str1), CRC32(str2) from table1 order by CRC32(str1), CRC32(str2); + +DROP TABLE table1; + +SELECT 'CRC32IEEE()'; +SELECT hex(CRC32IEEE('foo')); +SELECT 'CRC64()'; +SELECT hex(CRC64('foo')); diff --git a/parser/testdata/00936_function_result_with_operator_in/ast.json b/parser/testdata/00936_function_result_with_operator_in/ast.json new file mode 100644 index 000000000..6ca225bbd --- /dev/null +++ b/parser/testdata/00936_function_result_with_operator_in/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001438444, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00936_function_result_with_operator_in/metadata.json b/parser/testdata/00936_function_result_with_operator_in/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00936_function_result_with_operator_in/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00936_function_result_with_operator_in/query.sql b/parser/testdata/00936_function_result_with_operator_in/query.sql new file mode 100644 index 000000000..859796006 --- /dev/null +++ b/parser/testdata/00936_function_result_with_operator_in/query.sql @@ -0,0 +1,34 @@ +SET force_primary_key = 1; + +DROP TABLE IF EXISTS samples; +CREATE TABLE samples (key UInt32, value UInt32) ENGINE = MergeTree() ORDER BY key PRIMARY KEY key; +INSERT INTO samples VALUES (1, 1)(2, 2)(3, 3)(4, 4)(5, 5); + +-- all etries, verify that index is used +SELECT count() FROM samples WHERE key IN range(10); + +-- some entries: +SELECT count() FROM samples WHERE key IN arraySlice(range(100), 5, 10); + +-- different type +SELECT count() FROM samples WHERE toUInt64(key) IN range(100); + +SELECT 'empty:'; +-- should be empty +SELECT count() FROM samples WHERE key IN arraySlice(range(100), 10, 10); + +-- not only ints: +SELECT 'a' IN splitByChar('c', 'abcdef'); + +SELECT 'errors:'; +-- non-constant expressions in the right side of IN +SELECT count() FROM samples WHERE 1 IN range(samples.value); -- { serverError UNSUPPORTED_METHOD, 47 } +SELECT count() FROM samples WHERE 1 IN range(rand() % 1000); -- { serverError UNSUPPORTED_METHOD, 36 } + +-- index is not used +SELECT count() FROM samples WHERE value IN range(3); -- { serverError INDEX_NOT_USED } + +-- wrong type +SELECT 123 IN splitByChar('c', 'abcdef'); -- { serverError TYPE_MISMATCH } + +DROP TABLE samples; diff --git a/parser/testdata/00936_substring_utf8_non_const/ast.json b/parser/testdata/00936_substring_utf8_non_const/ast.json new file mode 100644 index 000000000..cbbfb7523 --- /dev/null +++ b/parser/testdata/00936_substring_utf8_non_const/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function substringUTF8 (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal 'hello, привет'" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_16" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001385257, + "rows_read": 15, + "bytes_read": 593 + } +} diff --git a/parser/testdata/00936_substring_utf8_non_const/metadata.json b/parser/testdata/00936_substring_utf8_non_const/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00936_substring_utf8_non_const/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00936_substring_utf8_non_const/query.sql b/parser/testdata/00936_substring_utf8_non_const/query.sql new file mode 100644 index 000000000..a2b115d77 --- /dev/null +++ b/parser/testdata/00936_substring_utf8_non_const/query.sql @@ -0,0 +1,42 @@ +SELECT substringUTF8('hello, привет', 1, number) FROM numbers(16); +SELECT substringUTF8('hello, привет', number + 1, 3) FROM numbers(16); +SELECT substringUTF8('hello, привет', number + 1, number) FROM numbers(16); +SELECT substringUTF8('hello, привет', -1 - number, 5) FROM numbers(16); +SELECT substringUTF8('hello, привет', -1 - number) FROM numbers(16); +SELECT substringUTF8('hello, привет', 1 + number) FROM numbers(16); + +SELECT substringUTF8('hello, привет', 1) FROM numbers(3); +SELECT substringUTF8('hello, привет', 5) FROM numbers(3); +SELECT substringUTF8('hello, привет', 1, 10) FROM numbers(3); +SELECT substringUTF8('hello, привет', 5, 5) FROM numbers(3); +SELECT substringUTF8('hello, привет', -5) FROM numbers(3); +SELECT substringUTF8('hello, привет', -10, 5) FROM numbers(3); + +SELECT substringUTF8(materialize('hello, привет'), 1, number) FROM numbers(16); +SELECT substringUTF8(materialize('hello, привет'), number + 1, 3) FROM numbers(16); +SELECT substringUTF8(materialize('hello, привет'), number + 1, number) FROM numbers(16); +SELECT substringUTF8(materialize('hello, привет'), -1 - number, 5) FROM numbers(16); +SELECT substringUTF8(materialize('hello, привет'), -1 - number) FROM numbers(16); +SELECT substringUTF8(materialize('hello, привет'), 1 + number) FROM numbers(16); + +SELECT substringUTF8(materialize('hello, привет'), 1) FROM numbers(3); +SELECT substringUTF8(materialize('hello, привет'), 5) FROM numbers(3); +SELECT substringUTF8(materialize('hello, привет'), 1, 10) FROM numbers(3); +SELECT substringUTF8(materialize('hello, привет'), 5, 5) FROM numbers(3); +SELECT substringUTF8(materialize('hello, привет'), -5) FROM numbers(3); +SELECT substringUTF8(materialize('hello, привет'), -10, 5) FROM numbers(3); + +SELECT DISTINCT substring(toString(range(rand(1) % 50)), rand(2) % 50, rand(3) % 50) = substringUTF8(toString(range(rand(1) % 50)), rand(2) % 50, rand(3) % 50) AS res FROM numbers(1000000); +SELECT DISTINCT substring(toString(range(rand(1) % 50)), rand(2) % 50) = substringUTF8(toString(range(rand(1) % 50)), rand(2) % 50) AS res FROM numbers(1000000); + +-- NOTE: The behaviour of substring and substringUTF8 is inconsistent when negative offset is greater than string size: +-- substring: +-- hello +-- ^-----^ - offset -10, length 7, result: "he" +-- substringUTF8: +-- hello +-- ^-----^ - offset -10, length 7, result: "hello" + +-- SELECT DISTINCT substring(toString(range(rand(1) % 50)), -(rand(2) % 50), rand(3) % 50) = substringUTF8(toString(range(rand(1) % 50)), -(rand(2) % 50), rand(3) % 50) AS res FROM numbers(1000000); + +SELECT DISTINCT substring(toString(range(rand(1) % 50)), -(rand(2) % 50)) = substringUTF8(toString(range(rand(1) % 50)), -(rand(2) % 50)) AS res FROM numbers(1000000); diff --git a/parser/testdata/00937_ipv4_cidr_range/ast.json b/parser/testdata/00937_ipv4_cidr_range/ast.json new file mode 100644 index 000000000..2424db5c4 --- /dev/null +++ b/parser/testdata/00937_ipv4_cidr_range/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'tests'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001518412, + "rows_read": 5, + "bytes_read": 176 + } +} diff --git a/parser/testdata/00937_ipv4_cidr_range/metadata.json b/parser/testdata/00937_ipv4_cidr_range/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00937_ipv4_cidr_range/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00937_ipv4_cidr_range/query.sql b/parser/testdata/00937_ipv4_cidr_range/query.sql new file mode 100644 index 000000000..2cc33eb9f --- /dev/null +++ b/parser/testdata/00937_ipv4_cidr_range/query.sql @@ -0,0 +1,26 @@ +SELECT 'tests'; + +DROP TABLE IF EXISTS ipv4_range; +CREATE TABLE ipv4_range(ip IPv4, cidr UInt8) ENGINE = Memory; + +INSERT INTO ipv4_range (ip, cidr) VALUES (toIPv4('192.168.5.2'), 0), (toIPv4('192.168.5.20'), 32), (toIPv4('255.255.255.255'), 16), (toIPv4('192.142.32.2'), 32), (toIPv4('192.172.5.2'), 16), (toIPv4('0.0.0.0'), 8), (toIPv4('255.0.0.0'), 4); + +WITH IPv4CIDRToRange(toIPv4('192.168.0.0'), 8) as ip_range SELECT COUNT(*) FROM ipv4_range WHERE ip BETWEEN tupleElement(ip_range, 1) AND tupleElement(ip_range, 2); + +WITH IPv4CIDRToRange(toIPv4('192.168.0.0'), 13) as ip_range SELECT COUNT(*) FROM ipv4_range WHERE ip BETWEEN tupleElement(ip_range, 1) AND tupleElement(ip_range, 2); + +WITH IPv4CIDRToRange(toIPv4('192.168.0.0'), 16) as ip_range SELECT COUNT(*) FROM ipv4_range WHERE ip BETWEEN tupleElement(ip_range, 1) AND tupleElement(ip_range, 2); + +WITH IPv4CIDRToRange(toIPv4('192.168.0.0'), 0) as ip_range SELECT COUNT(*) FROM ipv4_range WHERE ip BETWEEN tupleElement(ip_range, 1) AND tupleElement(ip_range, 2); + +WITH IPv4CIDRToRange(ip, cidr) as ip_range SELECT ip, cidr, IPv4NumToString(tupleElement(ip_range, 1)), ip_range FROM ipv4_range; + +DROP TABLE ipv4_range; + +SELECT IPv4CIDRToRange(toIPv4('192.168.5.2'), 0); +SELECT IPv4CIDRToRange(toIPv4('255.255.255.255'), 8); +SELECT IPv4CIDRToRange(toIPv4('192.168.5.2'), 32); +SELECT IPv4CIDRToRange(toIPv4('0.0.0.0'), 8); +SELECT IPv4CIDRToRange(toIPv4('255.0.0.0'), 4); + +SELECT IPv4CIDRToRange(toIPv4('255.0.0.0'), toUInt8(4 + number)) FROM numbers(2); diff --git a/parser/testdata/00938_basename/ast.json b/parser/testdata/00938_basename/ast.json new file mode 100644 index 000000000..363b3705d --- /dev/null +++ b/parser/testdata/00938_basename/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function basename (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '\/usr\/bin\/bash'" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001175897, + "rows_read": 7, + "bytes_read": 268 + } +} diff --git a/parser/testdata/00938_basename/metadata.json b/parser/testdata/00938_basename/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00938_basename/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00938_basename/query.sql b/parser/testdata/00938_basename/query.sql new file mode 100644 index 000000000..cea11e9a4 --- /dev/null +++ b/parser/testdata/00938_basename/query.sql @@ -0,0 +1,5 @@ +SELECT basename('/usr/bin/bash'); +SELECT basename('/usr/bin/bash/'); +SELECT basename('bash'); +SELECT basename('C:\\\\Users\\Documents\\test_file'); +SELECT basename(path('http://example.com/folder_1/folder_2/script.php')) diff --git a/parser/testdata/00938_dataset_test/ast.json b/parser/testdata/00938_dataset_test/ast.json new file mode 100644 index 000000000..e727ab7e1 --- /dev/null +++ b/parser/testdata/00938_dataset_test/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery defaults (children 1)" + }, + { + "explain": " Identifier defaults" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001568839, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/00938_dataset_test/metadata.json b/parser/testdata/00938_dataset_test/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00938_dataset_test/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00938_dataset_test/query.sql b/parser/testdata/00938_dataset_test/query.sql new file mode 100644 index 000000000..ec22918a7 --- /dev/null +++ b/parser/testdata/00938_dataset_test/query.sql @@ -0,0 +1,19 @@ +DROP TABLE IF EXISTS defaults; +CREATE TABLE IF NOT EXISTS defaults +( + param1 Float64, + param2 Float64, + target Float64, + predict1 Float64, + predict2 Float64 +) ENGINE = Memory; +insert into defaults values (-3.273, -1.452, 4.267, 20.0, 40.0), (0.121, -0.615, 4.290, 20.0, 40.0), (-1.099, 2.755, -3.060, 20.0, 40.0), (1.090, 2.945, -2.346, 20.0, 40.0), (0.305, 2.179, -1.205, 20.0, 40.0), (-0.925, 0.702, 1.134, 20.0, 40.0), (3.178, -1.316, 7.221, 20.0, 40.0), (-2.756, -0.473, 2.569, 20.0, 40.0), (3.665, 2.303, 0.226, 20.0, 40.0), (1.662, 1.951, -0.070, 20.0, 40.0), (2.869, 0.593, 3.249, 20.0, 40.0), (0.818, -0.593, 4.594, 20.0, 40.0), (-1.917, 0.916, 0.209, 20.0, 40.0), (2.706, 1.523, 1.307, 20.0, 40.0), (0.219, 2.162, -1.214, 20.0, 40.0), (-4.510, 1.376, -2.007, 20.0, 40.0), (4.284, -0.515, 6.173, 20.0, 40.0), (-1.101, 2.810, -3.170, 20.0, 40.0), (-1.810, -1.117, 4.329, 20.0, 40.0), (0.055, 1.115, 0.797, 20.0, 40.0), (-2.178, 2.904, -3.898, 20.0, 40.0), (-3.494, -1.814, 4.882, 20.0, 40.0), (3.027, 0.476, 3.562, 20.0, 40.0), (-1.434, 1.151, -0.018, 20.0, 40.0), (1.180, 0.992, 1.606, 20.0, 40.0), (0.015, 0.971, 1.067, 20.0, 40.0), (-0.511, -0.875, 4.495, 20.0, 40.0), (0.961, 2.348, -1.216, 20.0, 40.0), (-2.279, 0.038, 1.785, 20.0, 40.0), (-1.568, -0.248, 2.712, 20.0, 40.0), (-0.496, 0.366, 2.020, 20.0, 40.0), (1.177, -1.401, 6.390, 20.0, 40.0), (2.882, -1.442, 7.325, 20.0, 40.0), (-1.066, 1.817, -1.167, 20.0, 40.0), (-2.144, 2.791, -3.655, 20.0, 40.0), (-4.370, 2.228, -3.642, 20.0, 40.0), (3.996, 2.775, -0.553, 20.0, 40.0), (0.289, 2.055, -0.965, 20.0, 40.0), (-0.588, -1.601, 5.908, 20.0, 40.0), (-1.801, 0.417, 1.265, 20.0, 40.0), (4.375, -1.499, 8.186, 20.0, 40.0), (-2.618, 0.038, 1.615, 20.0, 40.0), (3.616, -0.833, 6.475, 20.0, 40.0), (-4.045, -1.558, 4.094, 20.0, 40.0), (-3.962, 0.636, -0.253, 20.0, 40.0), (3.505, 2.625, -0.497, 20.0, 40.0), (3.029, -0.523, 5.560, 20.0, 40.0), (-3.520, -0.474, 2.188, 20.0, 40.0), (2.430, -1.469, 7.154, 20.0, 40.0), (1.547, -1.654, 7.082, 20.0, 40.0), (-1.370, 0.575, 1.165, 20.0, 40.0), (-1.869, -1.555, 5.176, 20.0, 40.0), (3.536, 2.841, -0.913, 20.0, 40.0), (-3.810, 1.220, -1.344, 20.0, 40.0), (-1.971, 1.462, -0.910, 20.0, 40.0), (-0.243, 0.167, 2.545, 20.0, 40.0), (-1.403, 2.645, -2.991, 20.0, 40.0), (0.532, -0.114, 3.494, 20.0, 40.0), (-1.678, 0.975, 0.212, 20.0, 40.0), (-0.656, 2.140, -1.609, 20.0, 40.0), (1.743, 2.631, -1.390, 20.0, 40.0), (2.586, 2.943, -1.593, 20.0, 40.0), (-0.512, 2.969, -3.195, 20.0, 40.0), (2.283, -0.100, 4.342, 20.0, 40.0), (-4.293, 0.872, -0.890, 20.0, 40.0), (3.411, 1.300, 2.106, 20.0, 40.0), (-0.281, 2.951, -3.042, 20.0, 40.0), (-4.442, 0.384, 0.012, 20.0, 40.0), (1.194, 1.746, 0.104, 20.0, 40.0), (-1.152, 1.862, -1.300, 20.0, 40.0), (1.362, -1.341, 6.363, 20.0, 40.0), (-4.488, 2.618, -4.481, 20.0, 40.0), (3.419, -0.564, 5.837, 20.0, 40.0), (-3.392, 0.396, 0.512, 20.0, 40.0), (-1.629, -0.909, 4.003, 20.0, 40.0), (4.447, -1.088, 7.399, 20.0, 40.0), (-1.232, 1.699, -1.014, 20.0, 40.0), (-1.286, -0.609, 3.575, 20.0, 40.0), (2.437, 2.796, -1.374, 20.0, 40.0), (-4.864, 1.989, -3.410, 20.0, 40.0), (-1.716, -1.399, 4.940, 20.0, 40.0), (-3.084, 1.858, -2.259, 20.0, 40.0), (2.828, -0.319, 5.053, 20.0, 40.0), (-1.226, 2.586, -2.786, 20.0, 40.0), (2.456, 0.092, 4.044, 20.0, 40.0), (-0.989, 2.375, -2.245, 20.0, 40.0), (3.268, 0.935, 2.765, 20.0, 40.0), (-4.128, -1.995, 4.927, 20.0, 40.0), (-1.083, 2.197, -1.935, 20.0, 40.0), (-3.471, -1.198, 3.660, 20.0, 40.0), (4.617, -1.136, 7.579, 20.0, 40.0), (2.054, -1.675, 7.378, 20.0, 40.0), (4.106, 2.326, 0.402, 20.0, 40.0), (1.558, 0.310, 3.158, 20.0, 40.0), (0.792, 0.900, 1.596, 20.0, 40.0), (-3.229, 0.300, 0.785, 20.0, 40.0), (3.787, -0.793, 6.479, 20.0, 40.0), (1.786, 2.288, -0.684, 20.0, 40.0), (2.643, 0.223, 3.875, 20.0, 40.0), (-3.592, 2.122, -3.040, 20.0, 40.0), (4.519, -1.760, 8.779, 20.0, 40.0), (3.221, 2.255, 0.101, 20.0, 40.0), (4.151, 1.788, 1.500, 20.0, 40.0), (-1.033, -1.195, 4.874, 20.0, 40.0), (-1.636, -1.037, 4.257, 20.0, 40.0), (-3.548, 1.911, -2.596, 20.0, 40.0), (4.829, -0.293, 6.001, 20.0, 40.0), (-4.684, -1.664, 3.986, 20.0, 40.0), (4.531, -0.503, 6.271, 20.0, 40.0), (-3.503, -1.606, 4.460, 20.0, 40.0), (-2.036, -1.522, 5.027, 20.0, 40.0), (-0.473, -0.617, 3.997, 20.0, 40.0), (-1.554, -1.630, 5.483, 20.0, 40.0), (-3.567, -1.043, 3.302, 20.0, 40.0), (-2.038, 0.579, 0.823, 20.0, 40.0), (-3.040, 0.857, -0.233, 20.0, 40.0), (4.610, 0.562, 4.181, 20.0, 40.0), (-3.323, -1.938, 5.215, 20.0, 40.0), (4.314, 1.720, 1.717, 20.0, 40.0), (-1.220, 0.615, 1.161, 20.0, 40.0), (-2.556, 1.120, -0.519, 20.0, 40.0), (-3.717, -0.108, 1.358, 20.0, 40.0), (4.689, -1.826, 8.996, 20.0, 40.0), (3.452, 0.506, 3.713, 20.0, 40.0), (2.472, 0.612, 3.012, 20.0, 40.0), (3.452, 0.450, 3.826, 20.0, 40.0), (1.207, 2.585, -1.567, 20.0, 40.0), (-4.826, 1.090, -1.593, 20.0, 40.0), (3.116, -1.118, 6.794, 20.0, 40.0), (0.448, 2.732, -2.240, 20.0, 40.0), (-1.096, -0.525, 3.503, 20.0, 40.0), (-4.680, -0.238, 1.137, 20.0, 40.0), (2.552, -1.403, 7.082, 20.0, 40.0), (0.719, 2.997, -2.635, 20.0, 40.0), (0.347, -1.966, 7.105, 20.0, 40.0), (2.958, -0.404, 5.288, 20.0, 40.0), (0.722, -1.950, 7.261, 20.0, 40.0), (-2.851, -0.986, 3.546, 20.0, 40.0), (-4.316, -0.439, 1.721, 20.0, 40.0), (-1.685, -0.201, 2.560, 20.0, 40.0), (1.856, 0.190, 3.549, 20.0, 40.0), (-2.052, 0.206, 1.562, 20.0, 40.0), (-2.504, -0.646, 3.041, 20.0, 40.0), (3.235, 0.882, 2.854, 20.0, 40.0), (-1.366, -1.573, 5.463, 20.0, 40.0), (-3.447, 2.419, -3.562, 20.0, 40.0), (4.155, 2.092, 0.893, 20.0, 40.0), (-0.935, 0.209, 2.116, 20.0, 40.0), (3.117, -1.821, 8.201, 20.0, 40.0), (3.759, 0.577, 3.725, 20.0, 40.0), (-0.938, 2.992, -3.453, 20.0, 40.0), (-0.525, 2.341, -1.945, 20.0, 40.0), (4.540, 2.625, 0.019, 20.0, 40.0), (-2.097, 1.190, -0.429, 20.0, 40.0), (-2.672, 1.983, -2.302, 20.0, 40.0), (-3.038, -1.490, 4.460, 20.0, 40.0), (-0.943, 2.149, -1.770, 20.0, 40.0), (0.739, 1.598, 0.174, 20.0, 40.0), (1.828, 1.853, 0.208, 20.0, 40.0), (4.856, 0.137, 5.153, 20.0, 40.0), (-1.617, 0.468, 1.255, 20.0, 40.0), (-1.972, 2.053, -2.092, 20.0, 40.0), (-4.633, 1.389, -2.094, 20.0, 40.0), (-3.628, -1.156, 3.498, 20.0, 40.0), (3.597, 1.034, 2.731, 20.0, 40.0), (-1.488, -0.002, 2.261, 20.0, 40.0), (0.749, 1.921, -0.468, 20.0, 40.0), (1.304, -1.371, 6.394, 20.0, 40.0), (4.587, 2.936, -0.579, 20.0, 40.0), (-2.241, 1.791, -1.703, 20.0, 40.0), (-2.945, 1.372, -1.216, 20.0, 40.0), (1.375, 0.395, 2.898, 20.0, 40.0), (-1.281, -0.641, 3.642, 20.0, 40.0), (2.178, 0.895, 2.299, 20.0, 40.0), (3.031, -0.786, 6.087, 20.0, 40.0), (-1.385, -0.375, 3.058, 20.0, 40.0), (4.041, -0.431, 5.882, 20.0, 40.0), (0.480, -0.507, 4.254, 20.0, 40.0), (-3.797, 0.140, 0.822, 20.0, 40.0), (2.355, 2.502, -0.827, 20.0, 40.0), (1.376, -1.583, 6.854, 20.0, 40.0), (0.164, 1.405, 0.273, 20.0, 40.0), (-1.273, 1.471, -0.579, 20.0, 40.0), (0.770, 2.246, -1.107, 20.0, 40.0), (4.552, 2.904, -0.533, 20.0, 40.0), (4.259, -1.772, 8.674, 20.0, 40.0), (-0.309, 1.159, 0.528, 20.0, 40.0), (3.581, 2.700, -0.610, 20.0, 40.0), (-3.202, 0.346, 0.707, 20.0, 40.0), (-1.575, 1.242, -0.271, 20.0, 40.0), (-1.584, -0.493, 3.194, 20.0, 40.0), (-3.778, 0.150, 0.810, 20.0, 40.0), (-4.675, 1.749, -2.835, 20.0, 40.0), (3.567, -0.792, 6.367, 20.0, 40.0), (-0.417, 1.399, -0.006, 20.0, 40.0), (-4.672, 2.007, -3.349, 20.0, 40.0), (-1.034, 0.196, 2.090, 20.0, 40.0), (-3.796, 2.496, -3.890, 20.0, 40.0), (3.532, -0.497, 5.759, 20.0, 40.0), (4.868, -1.359, 8.151, 20.0, 40.0), (-0.769, 0.302, 2.011, 20.0, 40.0), (4.475, 2.612, 0.014, 20.0, 40.0), (-3.532, -0.395, 2.024, 20.0, 40.0), (0.322, 0.675, 1.812, 20.0, 40.0), (-2.028, -1.942, 5.870, 20.0, 40.0), (1.810, -1.244, 6.392, 20.0, 40.0), (-0.783, 1.242, 0.124, 20.0, 40.0), (-4.745, -1.300, 3.227, 20.0, 40.0), (1.902, 1.973, 0.005, 20.0, 40.0), (-3.453, -1.429, 4.132, 20.0, 40.0), (1.559, 0.986, 1.808, 20.0, 40.0), (0.128, 2.754, -2.443, 20.0, 40.0), (2.759, 1.727, 0.926, 20.0, 40.0), (-4.468, 1.690, -2.614, 20.0, 40.0), (-2.368, -1.922, 5.659, 20.0, 40.0), (-2.766, 2.128, -2.640, 20.0, 40.0), (0.967, -1.825, 7.133, 20.0, 40.0), (-2.854, 2.855, -4.136, 20.0, 40.0), (-2.944, 1.875, -2.222, 20.0, 40.0), (-2.632, -0.983, 3.649, 20.0, 40.0), (2.427, 2.239, -0.266, 20.0, 40.0), (-1.726, -0.838, 3.812, 20.0, 40.0), (0.007, -0.903, 4.809, 20.0, 40.0), (-2.013, 1.092, -0.191, 20.0, 40.0), (-0.449, 0.970, 0.836, 20.0, 40.0), (1.396, 0.411, 2.876, 20.0, 40.0), (-1.115, -1.790, 6.023, 20.0, 40.0), (3.748, 1.917, 1.039, 20.0, 40.0), (2.978, 1.043, 2.404, 20.0, 40.0), (-3.969, 2.514, -4.013, 20.0, 40.0), (4.455, -0.050, 5.328, 20.0, 40.0), (-3.065, -0.846, 3.160, 20.0, 40.0), (-1.069, 2.167, -1.869, 20.0, 40.0), (3.016, -1.393, 7.294, 20.0, 40.0), (0.045, -1.928, 6.879, 20.0, 40.0), (-2.555, -0.984, 3.690, 20.0, 40.0), (-1.995, -0.054, 2.111, 20.0, 40.0), (4.600, -0.509, 6.318, 20.0, 40.0), (-1.942, 1.215, -0.402, 20.0, 40.0), (1.262, 2.765, -1.899, 20.0, 40.0), (2.617, -1.106, 6.521, 20.0, 40.0), (1.737, 0.554, 2.761, 20.0, 40.0), (-2.197, 0.632, 0.638, 20.0, 40.0), (4.768, 2.618, 0.147, 20.0, 40.0), (-3.737, -0.939, 3.010, 20.0, 40.0), (-2.623, 0.595, 0.499, 20.0, 40.0), (4.752, -0.340, 6.057, 20.0, 40.0), (2.333, -1.037, 6.240, 20.0, 40.0), (4.234, -1.882, 8.881, 20.0, 40.0), (-3.393, -0.812, 2.927, 20.0, 40.0), (0.885, 1.383, 0.678, 20.0, 40.0), (0.123, 2.937, -2.812, 20.0, 40.0), (2.969, 0.760, 2.964, 20.0, 40.0), (-4.929, 1.251, -1.967, 20.0, 40.0), (1.916, 2.223, -0.488, 20.0, 40.0), (-0.020, -1.740, 6.469, 20.0, 40.0), (0.702, -1.272, 5.895, 20.0, 40.0), (2.496, 2.648, -1.048, 20.0, 40.0), (4.067, -1.475, 7.984, 20.0, 40.0), (-3.717, 1.851, -2.561, 20.0, 40.0), (1.678, -0.624, 5.088, 20.0, 40.0), (1.073, 0.695, 2.146, 20.0, 40.0), (1.842, -0.749, 5.419, 20.0, 40.0), (-3.518, 1.909, -2.578, 20.0, 40.0), (2.229, 1.189, 1.737, 20.0, 40.0), (4.987, 2.893, -0.292, 20.0, 40.0), (-4.809, 1.043, -1.490, 20.0, 40.0), (-0.241, -0.728, 4.334, 20.0, 40.0), (-3.331, 0.590, 0.156, 20.0, 40.0), (-0.455, 2.621, -2.470, 20.0, 40.0), (1.492, 1.223, 1.301, 20.0, 40.0), (3.948, 2.841, -0.709, 20.0, 40.0), (0.732, 0.446, 2.475, 20.0, 40.0), (2.400, 2.390, -0.579, 20.0, 40.0), (-2.718, 1.427, -1.213, 20.0, 40.0), (-1.826, 1.451, -0.815, 20.0, 40.0), (1.125, 0.438, 2.686, 20.0, 40.0), (-4.918, 1.880, -3.219, 20.0, 40.0), (3.068, -0.442, 5.418, 20.0, 40.0), (1.982, 1.201, 1.589, 20.0, 40.0), (0.701, -1.709, 6.768, 20.0, 40.0), (-1.496, 2.564, -2.877, 20.0, 40.0), (-3.812, 0.974, -0.853, 20.0, 40.0), (-3.405, 2.018, -2.739, 20.0, 40.0), (2.211, 2.889, -1.674, 20.0, 40.0), (-2.481, 2.931, -4.103, 20.0, 40.0), (-3.721, 2.765, -4.391, 20.0, 40.0), (-1.768, -1.292, 4.699, 20.0, 40.0), (-4.462, 1.058, -1.347, 20.0, 40.0), (-3.516, -1.942, 5.126, 20.0, 40.0), (0.485, 2.420, -1.597, 20.0, 40.0), (-0.492, 0.242, 2.270, 20.0, 40.0), (4.245, 1.689, 1.744, 20.0, 40.0), (2.234, 0.364, 3.389, 20.0, 40.0), (2.629, 2.224, -0.134, 20.0, 40.0), (-4.375, 1.221, -1.630, 20.0, 40.0), (-0.618, 1.374, -0.057, 20.0, 40.0), (-2.580, -1.604, 4.918, 20.0, 40.0), (0.159, 1.104, 0.871, 20.0, 40.0), (-3.597, 0.975, -0.749, 20.0, 40.0); + +DROP TABLE IF EXISTS model; +create table model engine = Memory as select stochasticLinearRegressionState(0.03, 0.00001, 2, 'Momentum')(target, param1, param2) as state from defaults; + +select ans > -67.0 and ans < -66.9 from +(with (select state + state + state from model) as model select evalMLMethod(model, predict1, predict2) as ans from defaults order by all limit 1); + +DROP TABLE defaults; +DROP TABLE model; diff --git a/parser/testdata/00938_ipv6_cidr_range/ast.json b/parser/testdata/00938_ipv6_cidr_range/ast.json new file mode 100644 index 000000000..4eb8ad153 --- /dev/null +++ b/parser/testdata/00938_ipv6_cidr_range/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'check invalid params'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001091834, + "rows_read": 5, + "bytes_read": 191 + } +} diff --git a/parser/testdata/00938_ipv6_cidr_range/metadata.json b/parser/testdata/00938_ipv6_cidr_range/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00938_ipv6_cidr_range/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00938_ipv6_cidr_range/query.sql b/parser/testdata/00938_ipv6_cidr_range/query.sql new file mode 100644 index 000000000..795350565 --- /dev/null +++ b/parser/testdata/00938_ipv6_cidr_range/query.sql @@ -0,0 +1,35 @@ +SELECT 'check invalid params'; +SELECT IPv6CIDRToRange(1, 1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT IPv6CIDRToRange('1234', 1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT IPv6CIDRToRange(toFixedString('1234', 10), 1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT IPv6CIDRToRange(toFixedString('1234', 16), toUInt16(1)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT 'tests'; + +DROP TABLE IF EXISTS ipv6_range; +CREATE TABLE ipv6_range(ip IPv6, cidr UInt8) ENGINE = Memory; + +INSERT INTO ipv6_range (ip, cidr) VALUES ('2001:0db8:0000:85a3:0000:0000:ac1f:8001', 0), ('2001:0db8:0000:85a3:ffff:ffff:ffff:ffff', 32), ('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff', 16), ('2001:df8:0:85a3::ac1f:8001', 32), ('2001:0db8:85a3:85a3:0000:0000:ac1f:8001', 16), ('0000:0000:0000:0000:0000:0000:0000:0000', 8), ('ffff:0000:0000:0000:0000:0000:0000:0000', 4); + +WITH IPv6CIDRToRange(IPv6StringToNum('2001:0db8:0000:85a3:0000:0000:ac1f:8001'), 32) as ip_range SELECT COUNT(*) FROM ipv6_range WHERE ip BETWEEN tupleElement(ip_range, 1) AND tupleElement(ip_range, 2); + +WITH IPv6CIDRToRange(IPv6StringToNum('2001:0db8:0000:85a3:0000:0000:ac1f:8001'), 25) as ip_range SELECT COUNT(*) FROM ipv6_range WHERE ip BETWEEN tupleElement(ip_range, 1) AND tupleElement(ip_range, 2); + +WITH IPv6CIDRToRange(IPv6StringToNum('2001:0db8:0000:85a3:0000:0000:ac1f:8001'), 26) as ip_range SELECT COUNT(*) FROM ipv6_range WHERE ip BETWEEN tupleElement(ip_range, 1) AND tupleElement(ip_range, 2); + +WITH IPv6CIDRToRange(IPv6StringToNum('2001:0db8:0000:85a3:0000:0000:ac1f:8001'), 64) as ip_range SELECT COUNT(*) FROM ipv6_range WHERE ip BETWEEN tupleElement(ip_range, 1) AND tupleElement(ip_range, 2); + +WITH IPv6CIDRToRange(IPv6StringToNum('2001:0db8:0000:85a3:0000:0000:ac1f:8001'), 0) as ip_range SELECT COUNT(*) FROM ipv6_range WHERE ip BETWEEN tupleElement(ip_range, 1) AND tupleElement(ip_range, 2); + +SELECT IPv6NumToString(ip), cidr, IPv6CIDRToRange(ip, cidr) FROM ipv6_range; + +DROP TABLE ipv6_range; + +SELECT IPv6CIDRToRange(IPv6StringToNum('2001:0db8:0000:85a3:0000:0000:ac1f:8001'), 0); +SELECT IPv6CIDRToRange(IPv6StringToNum('2001:0db8:0000:85a3:0000:0000:ac1f:8001'), 128); +SELECT IPv6CIDRToRange(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff'), 64); +SELECT IPv6CIDRToRange(IPv6StringToNum('0000:0000:0000:0000:0000:0000:0000:0000'), 8); +SELECT IPv6CIDRToRange(IPv6StringToNum('ffff:0000:0000:0000:0000:0000:0000:0000'), 4); +SELECT IPv6CIDRToRange(IPv6StringToNum('2001:0db8:0000:85a3:0000:0000:ac1f:8001'), 128) = IPv6CIDRToRange(IPv6StringToNum('2001:0db8:0000:85a3:0000:0000:ac1f:8001'), 200) ; + +SELECT IPv6CIDRToRange(IPv6StringToNum('2001:0db8:0000:85a3:0000:0000:ac1f:8001'), toUInt8(128 - number)) FROM numbers(2); diff --git a/parser/testdata/00938_test_retention_function/ast.json b/parser/testdata/00938_test_retention_function/ast.json new file mode 100644 index 000000000..1ab00afa9 --- /dev/null +++ b/parser/testdata/00938_test_retention_function/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery retention_test (children 1)" + }, + { + "explain": " Identifier retention_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001135065, + "rows_read": 2, + "bytes_read": 80 + } +} diff --git a/parser/testdata/00938_test_retention_function/metadata.json b/parser/testdata/00938_test_retention_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00938_test_retention_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00938_test_retention_function/query.sql b/parser/testdata/00938_test_retention_function/query.sql new file mode 100644 index 000000000..450fdcc7f --- /dev/null +++ b/parser/testdata/00938_test_retention_function/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS retention_test; +CREATE TABLE retention_test(date Date, uid Int32)ENGINE = Memory; + +INSERT INTO retention_test SELECT '2018-08-06', number FROM numbers(8); +INSERT INTO retention_test SELECT '2018-08-07', number FROM numbers(6); +INSERT INTO retention_test SELECT '2018-08-08', number FROM numbers(7); + +SELECT uid, retention(date = '2018-08-06', date = '2018-08-07') AS r FROM retention_test WHERE date IN ('2018-08-06', '2018-08-07') GROUP BY uid ORDER BY uid LIMIT 5; +SELECT '----------'; +SELECT uid, retention(1, date = '2018-08-06', date = '2018-08-07') AS r FROM retention_test WHERE date IN ('2018-08-06', '2018-08-07') GROUP BY uid ORDER BY uid LIMIT 5; +SELECT '----------'; +SELECT uid, retention(uid % 2 = 0, date = '2018-08-06', date = '2018-08-07') AS r FROM retention_test WHERE date IN ('2018-08-06', '2018-08-07') GROUP BY uid ORDER BY uid LIMIT 5; + +DROP TABLE IF EXISTS retention_test; diff --git a/parser/testdata/00939_limit_by_offset/ast.json b/parser/testdata/00939_limit_by_offset/ast.json new file mode 100644 index 000000000..068eec55e --- /dev/null +++ b/parser/testdata/00939_limit_by_offset/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery limit_by (children 1)" + }, + { + "explain": " Identifier limit_by" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001106542, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/00939_limit_by_offset/metadata.json b/parser/testdata/00939_limit_by_offset/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00939_limit_by_offset/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00939_limit_by_offset/query.sql b/parser/testdata/00939_limit_by_offset/query.sql new file mode 100644 index 000000000..c5c520d26 --- /dev/null +++ b/parser/testdata/00939_limit_by_offset/query.sql @@ -0,0 +1,12 @@ +drop table if exists limit_by; + +create table limit_by(id Int, val Int) engine = MergeTree ORDER BY tuple(); + +insert into limit_by values(1, 100), (1, 110), (1, 120), (1, 130), (2, 200), (2, 210), (2, 220), (3, 300); + +select * from limit_by order by id, val limit 2, 2 by id; +select * from limit_by order by id, val limit 2 offset 1 by id; +select * from limit_by order by id, val limit 1, 2 by id limit 3; +select * from limit_by order by id, val limit 1, 2 by id limit 3 offset 1; + +drop table limit_by; diff --git a/parser/testdata/00939_test_null_in/ast.json b/parser/testdata/00939_test_null_in/ast.json new file mode 100644 index 000000000..6c7b59b5d --- /dev/null +++ b/parser/testdata/00939_test_null_in/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery nullt (children 1)" + }, + { + "explain": " Identifier nullt" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001119702, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/00939_test_null_in/metadata.json b/parser/testdata/00939_test_null_in/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00939_test_null_in/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00939_test_null_in/query.sql b/parser/testdata/00939_test_null_in/query.sql new file mode 100644 index 000000000..34dc01f82 --- /dev/null +++ b/parser/testdata/00939_test_null_in/query.sql @@ -0,0 +1,11 @@ +DROP TABLE IF EXISTS nullt; + +CREATE TABLE nullt (c1 Nullable(UInt32), c2 Nullable(String))ENGINE = Log; +INSERT INTO nullt VALUES (1, 'abc'), (2, NULL), (NULL, NULL); + +SELECT c2 = ('abc') FROM nullt; +SELECT c2 IN ('abc') FROM nullt; + +SELECT c2 IN ('abc', NULL) FROM nullt; + +DROP TABLE IF EXISTS nullt; diff --git a/parser/testdata/00940_max_parts_in_total/ast.json b/parser/testdata/00940_max_parts_in_total/ast.json new file mode 100644 index 000000000..92d1f3029 --- /dev/null +++ b/parser/testdata/00940_max_parts_in_total/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery max_parts_in_total (children 1)" + }, + { + "explain": " Identifier max_parts_in_total" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001174174, + "rows_read": 2, + "bytes_read": 88 + } +} diff --git a/parser/testdata/00940_max_parts_in_total/metadata.json b/parser/testdata/00940_max_parts_in_total/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00940_max_parts_in_total/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00940_max_parts_in_total/query.sql b/parser/testdata/00940_max_parts_in_total/query.sql new file mode 100644 index 000000000..955f593c9 --- /dev/null +++ b/parser/testdata/00940_max_parts_in_total/query.sql @@ -0,0 +1,9 @@ +drop table if exists max_parts_in_total; +create table max_parts_in_total (x UInt64) ENGINE = MergeTree PARTITION BY x ORDER BY x SETTINGS max_parts_in_total = 10; + +SET max_insert_threads = 1; +INSERT INTO max_parts_in_total SELECT number FROM numbers(10); +SELECT 1; +INSERT INTO max_parts_in_total SELECT 123; -- { serverError TOO_MANY_PARTS } + +drop table max_parts_in_total; diff --git a/parser/testdata/00940_order_by_read_in_order/ast.json b/parser/testdata/00940_order_by_read_in_order/ast.json new file mode 100644 index 000000000..fab15fb03 --- /dev/null +++ b/parser/testdata/00940_order_by_read_in_order/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery pk_order (children 1)" + }, + { + "explain": " Identifier pk_order" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001392495, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/00940_order_by_read_in_order/metadata.json b/parser/testdata/00940_order_by_read_in_order/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00940_order_by_read_in_order/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00940_order_by_read_in_order/query.sql b/parser/testdata/00940_order_by_read_in_order/query.sql new file mode 100644 index 000000000..df872033d --- /dev/null +++ b/parser/testdata/00940_order_by_read_in_order/query.sql @@ -0,0 +1,55 @@ +DROP TABLE IF EXISTS pk_order; + +SET optimize_read_in_order = 1; + +CREATE TABLE pk_order(a UInt64, b UInt64, c UInt64, d UInt64) ENGINE=MergeTree() ORDER BY (a, b); +INSERT INTO pk_order(a, b, c, d) VALUES (1, 1, 101, 1), (1, 2, 102, 1), (1, 3, 103, 1), (1, 4, 104, 1); +INSERT INTO pk_order(a, b, c, d) VALUES (1, 5, 104, 1), (1, 6, 105, 1), (2, 1, 106, 2), (2, 1, 107, 2); + +INSERT INTO pk_order(a, b, c, d) VALUES (2, 2, 107, 2), (2, 3, 108, 2), (2, 4, 109, 2); + +SELECT b FROM pk_order ORDER BY a, b; +SELECT a FROM pk_order ORDER BY a, b; + +SELECT a, b FROM pk_order ORDER BY a, b; +SELECT a, b FROM pk_order ORDER BY a DESC, b; +SELECT a, b FROM pk_order ORDER BY a, b DESC; +SELECT a, b FROM pk_order ORDER BY a DESC, b DESC; +SELECT a FROM pk_order ORDER BY a DESC; + +SELECT a, b, c FROM pk_order ORDER BY a, b, c; +SELECT a, b, c FROM pk_order ORDER BY a DESC, b, c; +SELECT a, b, c FROM pk_order ORDER BY a, b DESC, c; +SELECT a, b, c FROM pk_order ORDER BY a, b, c DESC; +SELECT a, b, c FROM pk_order ORDER BY a DESC, b DESC, c; +SELECT a, b, c FROM pk_order ORDER BY a DESC, b, c DESC; +SELECT a, b, c FROM pk_order ORDER BY a, b DESC, c DESC; +SELECT a, b, c FROM pk_order ORDER BY a DESC, b DESC, c DESC; + +DROP TABLE IF EXISTS pk_order; + +CREATE TABLE pk_order (d DateTime, a Int32, b Int32) ENGINE = MergeTree ORDER BY (d, a) + PARTITION BY toDate(d) SETTINGS index_granularity=1; + +INSERT INTO pk_order + SELECT toDateTime('2019-05-05 00:00:00') + INTERVAL number % 10 DAY, number, intHash32(number) from numbers(100); + +set max_block_size = 1; + +-- Currently checking number of read rows while reading in pk order not working precise. TODO: fix it. +-- SET max_rows_to_read = 10; + +SELECT d FROM pk_order ORDER BY d LIMIT 5; +SELECT d, b FROM pk_order ORDER BY d, b LIMIT 5; +SELECT d, a FROM pk_order ORDER BY d DESC, a DESC LIMIT 5; +SELECT d, a FROM pk_order ORDER BY d DESC, -a LIMIT 5; +SELECT d, a FROM pk_order ORDER BY d DESC, a DESC LIMIT 5; +SELECT toStartOfHour(d) as d1 FROM pk_order ORDER BY d1 LIMIT 5; + +DROP TABLE pk_order; + +CREATE TABLE pk_order (a Int, b Int) ENGINE = MergeTree ORDER BY (a / b); +INSERT INTO pk_order SELECT number % 10 + 1, number % 6 + 1 from numbers(100); +SELECT * FROM pk_order ORDER BY (a / b), a LIMIT 5; + +DROP TABLE pk_order; diff --git a/parser/testdata/00940_order_by_read_in_order_query_plan/ast.json b/parser/testdata/00940_order_by_read_in_order_query_plan/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00940_order_by_read_in_order_query_plan/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00940_order_by_read_in_order_query_plan/metadata.json b/parser/testdata/00940_order_by_read_in_order_query_plan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00940_order_by_read_in_order_query_plan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00940_order_by_read_in_order_query_plan/query.sql b/parser/testdata/00940_order_by_read_in_order_query_plan/query.sql new file mode 100644 index 000000000..81f27f3c4 --- /dev/null +++ b/parser/testdata/00940_order_by_read_in_order_query_plan/query.sql @@ -0,0 +1,156 @@ +SET optimize_read_in_order = 1, query_plan_read_in_order = 1, enable_analyzer = 0; + +drop table if exists tab; +drop table if exists tab2; +drop table if exists tab3; +drop table if exists tab4; +drop table if exists tab5; + +create table tab (a UInt32, b UInt32, c UInt32, d UInt32) engine = MergeTree order by ((a + b) * c, sin(a / b)); +insert into tab select number, number, number, number from numbers(5); +insert into tab select number, number, number, number from numbers(5); + +-- { echoOn } + +-- Exact match, single key +select * from tab order by (a + b) * c; +select * from (explain plan actions = 1 select * from tab order by (a + b) * c) where explain like '%sort description%'; + +select * from tab order by (a + b) * c desc; +select * from (explain plan actions = 1 select * from tab order by (a + b) * c desc) where explain like '%sort description%'; + +-- Exact match, full key +select * from tab order by (a + b) * c, sin(a / b); +select * from (explain plan actions = 1 select * from tab order by (a + b) * c, sin(a / b)) where explain like '%sort description%'; + +select * from tab order by (a + b) * c desc, sin(a / b) desc nulls first; +select * from (explain plan actions = 1 select * from tab order by (a + b) * c desc, sin(a / b) desc nulls first) where explain like '%sort description%'; + +-- Exact match, mixed direction +select * from tab order by (a + b) * c desc, sin(a / b); +select * from (explain plan actions = 1 select * from tab order by (a + b) * c desc, sin(a / b)) where explain like '%sort description%'; + +select * from tab order by (a + b) * c, sin(a / b) desc; +select * from (explain plan actions = 1 select * from tab order by (a + b) * c, sin(a / b) desc) where explain like '%sort description%'; + +-- Wrong order, full sort +select * from tab order by sin(a / b), (a + b) * c; +select * from (explain plan actions = 1 select * from tab order by sin(a / b), (a + b) * c) where explain ilike '%sort description%'; + +-- Fixed point +select * from tab where (a + b) * c = 8 order by sin(a / b); +select * from (explain plan actions = 1 select * from tab where (a + b) * c = 8 order by sin(a / b)) where explain ilike '%sort description%'; + +select * from tab where d + 1 = 2 order by (d + 1) * 4, (a + b) * c; +select * from (explain plan actions = 1 select * from tab where d + 1 = 2 order by (d + 1) * 4, (a + b) * c) where explain ilike '%sort description%'; + +select * from tab where d + 1 = 3 and (a + b) = 4 and c = 2 order by (d + 1) * 4, sin(a / b); +select * from (explain plan actions = 1 select * from tab where d + 1 = 3 and (a + b) = 4 and c = 2 order by (d + 1) * 4, sin(a / b)) where explain ilike '%sort description%'; + +-- Wrong order with fixed point +select * from tab where (a + b) * c = 8 order by sin(b / a); +select * from (explain plan actions = 1 select * from tab where (a + b) * c = 8 order by sin(b / a)) where explain ilike '%sort description%'; + +-- Monotonicity +select * from tab order by intDiv((a + b) * c, 2); +select * from (explain plan actions = 1 select * from tab order by intDiv((a + b) * c, 2)) where explain like '%sort description%'; + +select * from tab order by intDiv((a + b) * c, 2), sin(a / b); +select * from (explain plan actions = 1 select * from tab order by intDiv((a + b) * c, 2), sin(a / b)) where explain like '%sort description%'; + +-- select * from tab order by (a + b) * c, intDiv(sin(a / b), 2); +select * from (explain plan actions = 1 select * from tab order by (a + b) * c, intDiv(sin(a / b), 2)) where explain like '%sort description%'; + +-- select * from tab order by (a + b) * c desc , intDiv(sin(a / b), 2); +select * from (explain plan actions = 1 select * from tab order by (a + b) * c desc , intDiv(sin(a / b), 2)) where explain like '%sort description%'; + +-- select * from tab order by (a + b) * c, intDiv(sin(a / b), 2) desc; +select * from (explain plan actions = 1 select * from tab order by (a + b) * c, intDiv(sin(a / b), 2) desc) where explain like '%sort description%'; + +-- select * from tab order by (a + b) * c desc, intDiv(sin(a / b), 2) desc; +select * from (explain plan actions = 1 select * from tab order by (a + b) * c desc, intDiv(sin(a / b), 2) desc nulls first) where explain like '%sort description%'; + +-- select * from tab order by (a + b) * c desc, intDiv(sin(a / b), -2); +select * from (explain plan actions = 1 select * from tab order by (a + b) * c desc, intDiv(sin(a / b), -2)) where explain like '%sort description%'; + +-- select * from tab order by (a + b) * c desc, intDiv(intDiv(sin(a / b), -2), -3); +select * from (explain plan actions = 1 select * from tab order by (a + b) * c desc, intDiv(intDiv(sin(a / b), -2), -3)) where explain like '%sort description%'; + +-- select * from tab order by (a + b) * c, intDiv(intDiv(sin(a / b), -2), -3); +select * from (explain plan actions = 1 select * from tab order by (a + b) * c, intDiv(intDiv(sin(a / b), -2), -3)) where explain like '%sort description%'; + +-- Aliases +select * from (select *, a + b as x from tab) order by x * c; +select * from (explain plan actions = 1 select * from (select *, a + b as x from tab) order by x * c) where explain like '%sort description%'; + +select * from (select *, a + b as x, a / b as y from tab) order by x * c, sin(y); +select * from (explain plan actions = 1 select * from (select *, a + b as x, a / b as y from tab) order by x * c, sin(y)) where explain like '%sort description%'; + +select * from (select *, a / b as y from (select *, a + b as x from tab)) order by x * c, sin(y); +select * from (explain plan actions = 1 select * from (select *, a / b as y from (select *, a + b as x from tab)) order by x * c, sin(y)) where explain like '%sort description%'; + +-- { echoOff } + +create table tab2 (x DateTime, y UInt32, z UInt32) engine = MergeTree order by (x, y); +insert into tab2 select toDate('2020-02-02') + number, number, number from numbers(4); +insert into tab2 select toDate('2020-02-02') + number, number, number from numbers(4); + +-- { echoOn } + +select * from tab2 order by toTimeZone(toTimezone(x, 'UTC'), 'CET'), intDiv(intDiv(y, -2), -3); +select * from (explain plan actions = 1 select * from tab2 order by toTimeZone(toTimezone(x, 'UTC'), 'CET'), intDiv(intDiv(y, -2), -3)) where explain like '%sort description%'; + +select * from tab2 order by toStartOfDay(x), intDiv(intDiv(y, -2), -3); +select * from (explain plan actions = 1 select * from tab2 order by toStartOfDay(x), intDiv(intDiv(y, -2), -3)) where explain like '%sort description%'; + +-- select * from tab2 where toTimezone(x, 'CET') = '2020-02-03 01:00:00' order by intDiv(intDiv(y, -2), -3); +select * from (explain plan actions = 1 select * from tab2 where toTimezone(x, 'CET') = '2020-02-03 01:00:00' order by intDiv(intDiv(y, -2), -3)) where explain like '%sort description%'; + +-- { echoOff } + +create table tab3 (a UInt32, b UInt32, c UInt32, d UInt32) engine = MergeTree order by ((a + b) * c, sin(a / b)); +insert into tab3 select number, number, number, number from numbers(5); +insert into tab3 select number, number, number, number from numbers(5); + +create table tab4 (a UInt32, b UInt32, c UInt32, d UInt32) engine = MergeTree order by sin(a / b); +insert into tab4 select number, number, number, number from numbers(5); +insert into tab4 select number, number, number, number from numbers(5); + +create table tab5 (a UInt32, b UInt32, c UInt32, d UInt32) engine = MergeTree order by (a + b) * c; +insert into tab5 select number, number, number, number from numbers(5); +insert into tab5 select number, number, number, number from numbers(5); + +-- { echoOn } + +-- Union (not fully supported) +select * from (select * from tab union all select * from tab3) order by (a + b) * c, sin(a / b); +select * from (explain plan actions = 1 select * from (select * from tab union all select * from tab3) order by (a + b) * c, sin(a / b)) where explain like '%sort description%' or explain like '%ReadType%'; + +select * from (select * from tab where (a + b) * c = 8 union all select * from tab3 where (a + b) * c = 18) order by sin(a / b); +select * from (explain plan actions = 1 select * from (select * from tab where (a + b) * c = 8 union all select * from tab3 where (a + b) * c = 18) order by sin(a / b)) where explain like '%sort description%' or explain like '%ReadType%'; + +select * from (select * from tab where (a + b) * c = 8 union all select * from tab4) order by sin(a / b); +select * from (explain plan actions = 1 select * from (select * from tab where (a + b) * c = 8 union all select * from tab4) order by sin(a / b)) where explain like '%sort description%' or explain like '%ReadType%'; + +select * from (select * from tab union all select * from tab5) order by (a + b) * c; +select * from (explain plan actions = 1 select * from (select * from tab union all select * from tab5) order by (a + b) * c) where explain like '%sort description%' or explain like '%ReadType%'; + +select * from (select * from tab union all select * from tab5) order by (a + b) * c, sin(a / b); +select * from (explain plan actions = 1 select * from (select * from tab union all select * from tab5) order by (a + b) * c, sin(a / b)) where explain like '%sort description%' or explain like '%ReadType%'; + +-- Union with limit +select * from (select * from tab union all select * from tab5) order by (a + b) * c, sin(a / b) limit 3; +select * from (explain plan actions = 1 select * from (select * from tab union all select * from tab5) order by (a + b) * c, sin(a / b) limit 3) where explain ilike '%sort description%' or explain like '%ReadType%' or explain like '%Limit%'; + +-- In this example, we read-in-order from tab up to ((a + b) * c, sin(a / b)) and from tab5 up to ((a + b) * c). +-- In case of tab5, there would be two finish sorting transforms: ((a + b) * c) -> ((a + b) * c, sin(a / b)) -> ((a + b) * c, sin(a / b), d). +-- It's important that ((a + b) * c) -> ((a + b) * c does not have LIMIT. We can add LIMIT WITH TIES later, when sorting alog support it. +-- In case of tab4, we do full sorting by ((a + b) * c, sin(a / b), d) with LIMIT. We can replace it to sorting by ((a + b) * c, sin(a / b)) and LIMIT WITH TIES, when sorting alog support it. +select * from (select * from tab union all select * from tab5 union all select * from tab4) order by (a + b) * c, sin(a / b), d limit 3; +select * from (explain plan actions = 1 select * from (select * from tab union all select * from tab5 union all select * from tab4) order by (a + b) * c, sin(a / b), d limit 3) where explain ilike '%sort description%' or explain like '%ReadType%' or explain like '%Limit%'; + +drop table if exists tab; +drop table if exists tab2; +drop table if exists tab3; +drop table if exists tab4; +drop table if exists tab5; diff --git a/parser/testdata/00941_to_custom_week/ast.json b/parser/testdata/00941_to_custom_week/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00941_to_custom_week/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00941_to_custom_week/metadata.json b/parser/testdata/00941_to_custom_week/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00941_to_custom_week/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00941_to_custom_week/query.sql b/parser/testdata/00941_to_custom_week/query.sql new file mode 100644 index 000000000..3281ed23f --- /dev/null +++ b/parser/testdata/00941_to_custom_week/query.sql @@ -0,0 +1,78 @@ +-- week mode [0,7], week test case. refer to the mysql test case +SELECT toWeek(toDate('1998-01-01')), toWeek(toDate('1997-01-01')), toWeek(toDate('1998-01-01'), 1), toWeek(toDate('1997-01-01'), 1); +SELECT toWeek(toDate('1998-12-31')), toWeek(toDate('1997-12-31')), toWeek(toDate('1998-12-31'), 1), toWeek(toDate('1997-12-31'), 1); +SELECT toWeek(toDate('1995-01-01')), toWeek(toDate('1995-01-01'), 1); +SELECT toYearWeek(toDate('1981-12-31'), 1), toYearWeek(toDate('1982-01-01'), 1), toYearWeek(toDate('1982-12-31'), 1), toYearWeek(toDate('1983-01-01'), 1); +SELECT toYearWeek(toDate('1987-01-01'), 1), toYearWeek(toDate('1987-01-01')); + +SELECT toWeek(toDate('2000-01-01'),0) AS w2000, toWeek(toDate('2001-01-01'),0) AS w2001, toWeek(toDate('2002-01-01'),0) AS w2002,toWeek(toDate('2003-01-01'),0) AS w2003, toWeek(toDate('2004-01-01'),0) AS w2004, toWeek(toDate('2005-01-01'),0) AS w2005, toWeek(toDate('2006-01-01'),0) AS w2006; +SELECT toWeek(toDate('2000-01-06'),0) AS w2000, toWeek(toDate('2001-01-06'),0) AS w2001, toWeek(toDate('2002-01-06'),0) AS w2002,toWeek(toDate('2003-01-06'),0) AS w2003, toWeek(toDate('2004-01-06'),0) AS w2004, toWeek(toDate('2005-01-06'),0) AS w2005, toWeek(toDate('2006-01-06'),0) AS w2006; +SELECT toWeek(toDate('2000-01-01'),1) AS w2000, toWeek(toDate('2001-01-01'),1) AS w2001, toWeek(toDate('2002-01-01'),1) AS w2002,toWeek(toDate('2003-01-01'),1) AS w2003, toWeek(toDate('2004-01-01'),1) AS w2004, toWeek(toDate('2005-01-01'),1) AS w2005, toWeek(toDate('2006-01-01'),1) AS w2006; +SELECT toWeek(toDate('2000-01-06'),1) AS w2000, toWeek(toDate('2001-01-06'),1) AS w2001, toWeek(toDate('2002-01-06'),1) AS w2002,toWeek(toDate('2003-01-06'),1) AS w2003, toWeek(toDate('2004-01-06'),1) AS w2004, toWeek(toDate('2005-01-06'),1) AS w2005, toWeek(toDate('2006-01-06'),1) AS w2006; +SELECT toYearWeek(toDate('2000-01-01'),0) AS w2000, toYearWeek(toDate('2001-01-01'),0) AS w2001, toYearWeek(toDate('2002-01-01'),0) AS w2002,toYearWeek(toDate('2003-01-01'),0) AS w2003, toYearWeek(toDate('2004-01-01'),0) AS w2004, toYearWeek(toDate('2005-01-01'),0) AS w2005, toYearWeek(toDate('2006-01-01'),0) AS w2006; +SELECT toYearWeek(toDate('2000-01-06'),0) AS w2000, toYearWeek(toDate('2001-01-06'),0) AS w2001, toYearWeek(toDate('2002-01-06'),0) AS w2002,toYearWeek(toDate('2003-01-06'),0) AS w2003, toYearWeek(toDate('2004-01-06'),0) AS w2004, toYearWeek(toDate('2005-01-06'),0) AS w2005, toYearWeek(toDate('2006-01-06'),0) AS w2006; +SELECT toYearWeek(toDate('2000-01-01'),1) AS w2000, toYearWeek(toDate('2001-01-01'),1) AS w2001, toYearWeek(toDate('2002-01-01'),1) AS w2002,toYearWeek(toDate('2003-01-01'),1) AS w2003, toYearWeek(toDate('2004-01-01'),1) AS w2004, toYearWeek(toDate('2005-01-01'),1) AS w2005, toYearWeek(toDate('2006-01-01'),1) AS w2006; +SELECT toYearWeek(toDate('2000-01-06'),1) AS w2000, toYearWeek(toDate('2001-01-06'),1) AS w2001, toYearWeek(toDate('2002-01-06'),1) AS w2002,toYearWeek(toDate('2003-01-06'),1) AS w2003, toYearWeek(toDate('2004-01-06'),1) AS w2004, toYearWeek(toDate('2005-01-06'),1) AS w2005, toYearWeek(toDate('2006-01-06'),1) AS w2006; +SELECT toWeek(toDate('1998-12-31'),2),toWeek(toDate('1998-12-31'),3), toWeek(toDate('2000-01-01'),2), toWeek(toDate('2000-01-01'),3); +SELECT toWeek(toDate('2000-12-31'),2),toWeek(toDate('2000-12-31'),3); + +SELECT toWeek(toDate('1998-12-31'),0) AS w0, toWeek(toDate('1998-12-31'),1) AS w1, toWeek(toDate('1998-12-31'),2) AS w2, toWeek(toDate('1998-12-31'),3) AS w3, toWeek(toDate('1998-12-31'),4) AS w4, toWeek(toDate('1998-12-31'),5) AS w5, toWeek(toDate('1998-12-31'),6) AS w6, toWeek(toDate('1998-12-31'),7) AS w7; +SELECT toWeek(toDate('2000-01-01'),0) AS w0, toWeek(toDate('2000-01-01'),1) AS w1, toWeek(toDate('2000-01-01'),2) AS w2, toWeek(toDate('2000-01-01'),3) AS w3, toWeek(toDate('2000-01-01'),4) AS w4, toWeek(toDate('2000-01-01'),5) AS w5, toWeek(toDate('2000-01-01'),6) AS w6, toWeek(toDate('2000-01-01'),7) AS w7; +SELECT toWeek(toDate('2000-01-06'),0) AS w0, toWeek(toDate('2000-01-06'),1) AS w1, toWeek(toDate('2000-01-06'),2) AS w2, toWeek(toDate('2000-01-06'),3) AS w3, toWeek(toDate('2000-01-06'),4) AS w4, toWeek(toDate('2000-01-06'),5) AS w5, toWeek(toDate('2000-01-06'),6) AS w6, toWeek(toDate('2000-01-06'),7) AS w7; +SELECT toWeek(toDate('2000-12-31'),0) AS w0, toWeek(toDate('2000-12-31'),1) AS w1, toWeek(toDate('2000-12-31'),2) AS w2, toWeek(toDate('2000-12-31'),3) AS w3, toWeek(toDate('2000-12-31'),4) AS w4, toWeek(toDate('2000-12-31'),5) AS w5, toWeek(toDate('2000-12-31'),6) AS w6, toWeek(toDate('2000-12-31'),7) AS w7; +SELECT toWeek(toDate('2001-01-01'),0) AS w0, toWeek(toDate('2001-01-01'),1) AS w1, toWeek(toDate('2001-01-01'),2) AS w2, toWeek(toDate('2001-01-01'),3) AS w3, toWeek(toDate('2001-01-01'),4) AS w4, toWeek(toDate('2001-01-01'),5) AS w5, toWeek(toDate('2001-01-01'),6) AS w6, toWeek(toDate('2001-01-01'),7) AS w7; + +SELECT toYearWeek(toDate('2000-12-31'),0), toYearWeek(toDate('2000-12-31'),1), toYearWeek(toDate('2000-12-31'),2), toYearWeek(toDate('2000-12-31'),3), toYearWeek(toDate('2000-12-31'),4), toYearWeek(toDate('2000-12-31'),5), toYearWeek(toDate('2000-12-31'),6), toYearWeek(toDate('2000-12-31'),7); + +-- week mode 8,9 +SELECT + toDate('2016-12-21') + number AS d, + toWeek(d, 8) AS week8, + toWeek(d, 9) AS week9, + toYearWeek(d, 8) AS yearWeek8, + toYearWeek(d, 9) AS yearWeek9 +FROM numbers(21); + +SELECT toDateTime(toDate('2016-12-22') + number, 'Asia/Istanbul' ) AS d, + toWeek(d, 8, 'Asia/Istanbul') AS week8, + toWeek(d, 9, 'Asia/Istanbul') AS week9, + toYearWeek(d, 8, 'Asia/Istanbul') AS yearWeek8, + toYearWeek(d, 9, 'Asia/Istanbul') AS yearWeek9 +FROM numbers(21); + +-- toStartOfWeek +WITH + toDate('2018-12-25') + number AS d, + toDate32(d) AS d32, + toDateTime(d) AS dt, + toDateTime64(d, 0) AS dt64 +SELECT + dt64, + toStartOfWeek(d) AS wd_sun, + toStartOfWeek(d32) AS wd32_sun, + toStartOfWeek(dt) AS wdt_sun, + toStartOfWeek(dt64) AS wdt64_sun, + toStartOfWeek(d, 1) AS wd_mon, + toStartOfWeek(d32, 1) AS wd32_mon, + toStartOfWeek(dt, 1) AS wdt_mon, + toStartOfWeek(dt64, 1) AS wdt64_mon +FROM numbers(10); + +-- toLastDayOfWeek +WITH + toDate('2018-12-25') + number AS d, + toDate32(d) AS d32, + toDateTime(d) AS dt, + toDateTime64(d, 0) AS dt64 +SELECT + dt64, + toLastDayOfWeek(d) AS wd_sun, + toLastDayOfWeek(d32) AS wd32_sun, + toLastDayOfWeek(dt) AS wdt_sun, + toLastDayOfWeek(dt64) AS wdt64_sun, + toLastDayOfWeek(d, 1) AS wd_mon, + toLastDayOfWeek(d32, 1) AS wd32_mon, + toLastDayOfWeek(dt, 1) AS wdt_mon, + toLastDayOfWeek(dt64, 1) AS wdt64_mon +FROM numbers(10); + diff --git a/parser/testdata/00942_mv_rename_table/ast.json b/parser/testdata/00942_mv_rename_table/ast.json new file mode 100644 index 000000000..887ffab55 --- /dev/null +++ b/parser/testdata/00942_mv_rename_table/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery src_00942 (children 1)" + }, + { + "explain": " Identifier src_00942" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001348007, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/00942_mv_rename_table/metadata.json b/parser/testdata/00942_mv_rename_table/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00942_mv_rename_table/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00942_mv_rename_table/query.sql b/parser/testdata/00942_mv_rename_table/query.sql new file mode 100644 index 000000000..c0531d98b --- /dev/null +++ b/parser/testdata/00942_mv_rename_table/query.sql @@ -0,0 +1,19 @@ +DROP TABLE IF EXISTS src_00942; +DROP TABLE IF EXISTS view_table_00942; +DROP TABLE IF EXISTS new_view_table_00942; + +CREATE TABLE src_00942 (x UInt8) ENGINE = Null; + +CREATE MATERIALIZED VIEW view_table_00942 Engine = Memory AS SELECT * FROM src_00942; + +INSERT INTO src_00942 VALUES (1), (2), (3); +SELECT * FROM view_table_00942 ORDER BY x; + +--Check if we can rename the view and if we can still fetch datas + +RENAME TABLE view_table_00942 TO new_view_table_00942; +SELECT * FROM new_view_table_00942 ORDER BY x; + +DROP TABLE src_00942; +DROP TABLE IF EXISTS view_table_00942; +DROP TABLE IF EXISTS new_view_table_00942; diff --git a/parser/testdata/00943_mv_rename_without_inner_table/ast.json b/parser/testdata/00943_mv_rename_without_inner_table/ast.json new file mode 100644 index 000000000..9ac1f9316 --- /dev/null +++ b/parser/testdata/00943_mv_rename_without_inner_table/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery src (children 1)" + }, + { + "explain": " Identifier src" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001431417, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/00943_mv_rename_without_inner_table/metadata.json b/parser/testdata/00943_mv_rename_without_inner_table/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00943_mv_rename_without_inner_table/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00943_mv_rename_without_inner_table/query.sql b/parser/testdata/00943_mv_rename_without_inner_table/query.sql new file mode 100644 index 000000000..b7bd2ffd5 --- /dev/null +++ b/parser/testdata/00943_mv_rename_without_inner_table/query.sql @@ -0,0 +1,24 @@ +DROP TABLE IF EXISTS src; +DROP TABLE IF EXISTS dst; +DROP TABLE IF EXISTS original_mv; +DROP TABLE IF EXISTS new_mv; + +CREATE TABLE src (x UInt8) ENGINE = Null; +CREATE TABLE dst (x UInt8) ENGINE = Memory; + +CREATE MATERIALIZED VIEW original_mv TO dst AS SELECT * FROM src; + +INSERT INTO src VALUES (1), (2); +SELECT * FROM original_mv ORDER BY x; + +RENAME TABLE original_mv TO new_mv; + +INSERT INTO src VALUES (3); +SELECT * FROM dst ORDER BY x; + +SELECT * FROM new_mv ORDER BY x; + +DROP TABLE IF EXISTS src; +DROP TABLE IF EXISTS dst; +DROP TABLE IF EXISTS original_mv; +DROP TABLE IF EXISTS new_mv; diff --git a/parser/testdata/00944_minmax_nan/ast.json b/parser/testdata/00944_minmax_nan/ast.json new file mode 100644 index 000000000..3851ebaff --- /dev/null +++ b/parser/testdata/00944_minmax_nan/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001829823, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00944_minmax_nan/metadata.json b/parser/testdata/00944_minmax_nan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00944_minmax_nan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00944_minmax_nan/query.sql b/parser/testdata/00944_minmax_nan/query.sql new file mode 100644 index 000000000..bb6343162 --- /dev/null +++ b/parser/testdata/00944_minmax_nan/query.sql @@ -0,0 +1,62 @@ +SET parallel_replicas_local_plan = 1; + +-- Test for issue #75523 + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab ( + id UInt64, + col Float, + INDEX col_idx col TYPE minmax +) +ENGINE = MergeTree() +ORDER BY id; -- This is important. We want to have additional primary index that does not use the column `col`. + +INSERT INTO tab VALUES + (1, 1.0), + (2, inf), + (3, 2.0), + (4, -inf), + (5, 3.0), + (6, nan), + (7, -nan); + +SELECT 'NaN comparison'; +SELECT count() FROM tab WHERE col = nan; +SELECT count() FROM tab WHERE col <> nan; +SELECT count() FROM tab WHERE col = -nan; +SELECT count() FROM tab WHERE col <> -nan; +SELECT count() FROM tab WHERE isNaN(col); +SELECT count() FROM tab WHERE NOT isNaN(col); + +SELECT 'MinMax index should skip all granules for column = NaN comparison'; +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes=1 + SELECT count() FROM tab WHERE col = nan +) +WHERE explain LIKE '%Description:%' OR explain LIKE '%Parts:%' OR explain LIKE '%Granules:%' +LIMIT 2, 3; -- Skip the primary index parts and granules. + +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes=1 + SELECT count() FROM tab WHERE col = -nan +) +WHERE explain LIKE '%Description:%' OR explain LIKE '%Parts:%' OR explain LIKE '%Granules:%' +LIMIT 2, 3; -- Skip the primary index parts and granules. + +SELECT 'MinMax index should use all granules for column <> NaN comparison'; +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes=1 + SELECT count() FROM tab WHERE col <> nan +) +WHERE explain LIKE '%Description:%' OR explain LIKE '%Parts:%' OR explain LIKE '%Granules:%' +LIMIT 2, 3; -- Skip the primary index parts and granules. + +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes=1 + SELECT count() FROM tab WHERE col <> -nan +) +WHERE explain LIKE '%Description:%' OR explain LIKE '%Parts:%' OR explain LIKE '%Granules:%' +LIMIT 2, 3; -- Skip the primary index parts and granules. + +DROP TABLE tab; diff --git a/parser/testdata/00944_minmax_null/ast.json b/parser/testdata/00944_minmax_null/ast.json new file mode 100644 index 000000000..6e9e6fc7e --- /dev/null +++ b/parser/testdata/00944_minmax_null/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery min_max_with_nullable_string (children 1)" + }, + { + "explain": " Identifier min_max_with_nullable_string" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001673662, + "rows_read": 2, + "bytes_read": 108 + } +} diff --git a/parser/testdata/00944_minmax_null/metadata.json b/parser/testdata/00944_minmax_null/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00944_minmax_null/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00944_minmax_null/query.sql b/parser/testdata/00944_minmax_null/query.sql new file mode 100644 index 000000000..71ae2e772 --- /dev/null +++ b/parser/testdata/00944_minmax_null/query.sql @@ -0,0 +1,23 @@ +DROP TABLE IF EXISTS min_max_with_nullable_string; + +CREATE TABLE min_max_with_nullable_string ( + t DateTime, + nullable_str Nullable(String), + INDEX nullable_str_min_max nullable_str TYPE minmax GRANULARITY 1 +) ENGINE = MergeTree ORDER BY (t); + +INSERT INTO min_max_with_nullable_string(t) VALUES (now()) (now()); + +SELECT count() FROM min_max_with_nullable_string WHERE nullable_str = '.'; + +INSERT INTO min_max_with_nullable_string(t, nullable_str) VALUES (now(), '.') (now(), '.'); + +SELECT count() FROM min_max_with_nullable_string WHERE nullable_str = '.'; + +INSERT INTO min_max_with_nullable_string(t, nullable_str) VALUES (now(), NULL) (now(), '.') (now(), NULL) (now(), '.') (now(), NULL); + +SELECT count() FROM min_max_with_nullable_string WHERE nullable_str = '.'; + +SELECT count() FROM min_max_with_nullable_string WHERE nullable_str = ''; + +DROP TABLE min_max_with_nullable_string; diff --git a/parser/testdata/00944_ml_test/ast.json b/parser/testdata/00944_ml_test/ast.json new file mode 100644 index 000000000..0e28441dd --- /dev/null +++ b/parser/testdata/00944_ml_test/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery defaults (children 1)" + }, + { + "explain": " Identifier defaults" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001674774, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/00944_ml_test/metadata.json b/parser/testdata/00944_ml_test/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00944_ml_test/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00944_ml_test/query.sql b/parser/testdata/00944_ml_test/query.sql new file mode 100644 index 000000000..657d31222 --- /dev/null +++ b/parser/testdata/00944_ml_test/query.sql @@ -0,0 +1,22 @@ +DROP TABLE IF EXISTS defaults; +CREATE TABLE defaults +( + param1 Float64, + param2 Float64, + target Float64, + predict1 Float64, + predict2 Float64 +) ENGINE = Memory; +insert into defaults values (-3.273, -1.452, 4.267, 20.0, 40.0), (0.121, -0.615, 4.290, 20.0, 40.0); + +DROP TABLE IF EXISTS model; +create table model engine = Memory as select stochasticLinearRegressionState(0.1, 0.0, 2, 'SGD')(target, param1, param2) as state from defaults; + +select ans < -61.374 and ans > -61.375 from +(with (select state from remote('127.0.0.1', currentDatabase(), model)) as model select evalMLMethod(model, predict1, predict2) as ans from remote('127.0.0.1', currentDatabase(), defaults)); + +SELECT 0 < ans[1] and ans[1] < 0.15 and 0.95 < ans[2] and ans[2] < 1.0 and 0 < ans[3] and ans[3] < 0.05 FROM +(SELECT stochasticLinearRegression(0.000001, 0.01, 100, 'SGD')(number, rand() % 100, number) AS ans FROM numbers(1000)); + +DROP TABLE model; +DROP TABLE defaults; diff --git a/parser/testdata/00945_bloom_filter_index/ast.json b/parser/testdata/00945_bloom_filter_index/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00945_bloom_filter_index/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00945_bloom_filter_index/metadata.json b/parser/testdata/00945_bloom_filter_index/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00945_bloom_filter_index/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00945_bloom_filter_index/query.sql b/parser/testdata/00945_bloom_filter_index/query.sql new file mode 100644 index 000000000..871484318 --- /dev/null +++ b/parser/testdata/00945_bloom_filter_index/query.sql @@ -0,0 +1,394 @@ +-- Tags: long + +SET allow_suspicious_low_cardinality_types = 1; +SET merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.0; + +-- Prevent remote replicas from skipping index analysis in Parallel Replicas. Otherwise, they may return full ranges and trigger max_rows_to_read validation failures. +SET parallel_replicas_index_analysis_only_on_coordinator = 0; + +DROP TABLE IF EXISTS single_column_bloom_filter; + +CREATE TABLE single_column_bloom_filter (u64 UInt64, i32 Int32, i64 UInt64, INDEX idx (i32) TYPE bloom_filter GRANULARITY 1) ENGINE = MergeTree() ORDER BY u64 SETTINGS index_granularity = 6, index_granularity_bytes = '10Mi'; + +INSERT INTO single_column_bloom_filter SELECT number AS u64, number AS i32, number AS i64 FROM system.numbers LIMIT 100; + +SELECT COUNT() FROM single_column_bloom_filter WHERE i32 = 1 SETTINGS max_rows_to_read = 6; +SELECT COUNT() FROM single_column_bloom_filter WHERE (i32, i32) = (1, 2) SETTINGS max_rows_to_read = 6; +SELECT COUNT() FROM single_column_bloom_filter WHERE (i32, i64) = (1, 1) SETTINGS max_rows_to_read = 6; +SELECT COUNT() FROM single_column_bloom_filter WHERE (i64, (i64, i32)) = (1, (1, 1)) SETTINGS max_rows_to_read = 6; + +SELECT COUNT() FROM single_column_bloom_filter WHERE i32 IN (1, 2) SETTINGS max_rows_to_read = 6; +SELECT COUNT() FROM single_column_bloom_filter WHERE (i32, i32) IN ((1, 2), (2, 3)) SETTINGS max_rows_to_read = 6; +SELECT COUNT() FROM single_column_bloom_filter WHERE (i32, i64) IN ((1, 1), (2, 2)) SETTINGS max_rows_to_read = 6; +SELECT COUNT() FROM single_column_bloom_filter WHERE (i64, (i64, i32)) IN ((1, (1, 1)), (2, (2, 2))) SETTINGS max_rows_to_read = 6; +SELECT COUNT() FROM single_column_bloom_filter WHERE i32 IN (SELECT arrayJoin([toInt32(1), toInt32(2)])) SETTINGS max_rows_to_read = 7; +SELECT COUNT() FROM single_column_bloom_filter WHERE (i32, i32) IN (SELECT arrayJoin([(toInt32(1), toInt32(2)), (toInt32(2), toInt32(3))])) SETTINGS max_rows_to_read = 7; +SELECT COUNT() FROM single_column_bloom_filter WHERE (i32, i64) IN (SELECT arrayJoin([(toInt32(1), toUInt64(1)), (toInt32(2), toUInt64(2))])) SETTINGS max_rows_to_read = 7; +SELECT COUNT() FROM single_column_bloom_filter WHERE (i64, (i64, i32)) IN (SELECT arrayJoin([(toUInt64(1), (toUInt64(1), toInt32(1))), (toUInt64(2), (toUInt64(2), toInt32(2)))])) SETTINGS max_rows_to_read = 7; +WITH (1, 2) AS liter_prepared_set SELECT COUNT() FROM single_column_bloom_filter WHERE i32 IN liter_prepared_set SETTINGS max_rows_to_read = 6; +WITH ((1, 2), (2, 3)) AS liter_prepared_set SELECT COUNT() FROM single_column_bloom_filter WHERE (i32, i32) IN liter_prepared_set SETTINGS max_rows_to_read = 6; +WITH ((1, 1), (2, 2)) AS liter_prepared_set SELECT COUNT() FROM single_column_bloom_filter WHERE (i32, i64) IN liter_prepared_set SETTINGS max_rows_to_read = 6; +WITH ((1, (1, 1)), (2, (2, 2))) AS liter_prepared_set SELECT COUNT() FROM single_column_bloom_filter WHERE (i64, (i64, i32)) IN liter_prepared_set SETTINGS max_rows_to_read = 6; + +-- Check that indexHint() works (but it doesn't work with COUNT()). +SELECT SUM(ignore(*) + 1) FROM single_column_bloom_filter WHERE indexHint(i32 in (3, 15, 50)); + +-- The index doesn't understand expressions like these, but it shouldn't break the query. +SELECT COUNT() FROM single_column_bloom_filter WHERE (i32 = 200) = (i32 = 200); +SELECT SUM(ignore(*) + 1) FROM single_column_bloom_filter WHERE indexHint((i32 = 200) != (i32 = 200)); +SELECT COUNT() FROM single_column_bloom_filter WHERE indexOf([10, 20, 30], i32) != 0; +SELECT COUNT() FROM single_column_bloom_filter WHERE has([100, 200, 300], 200); + +DROP TABLE IF EXISTS single_column_bloom_filter; + + +DROP TABLE IF EXISTS bloom_filter_types_test; + +CREATE TABLE bloom_filter_types_test (order_key UInt64, i8 Int8, i16 Int16, i32 Int32, i64 Int64, u8 UInt8, u16 UInt16, u32 UInt32, u64 UInt64, f32 Float32, f64 Float64, date Date, date_time DateTime('Asia/Istanbul'), str String, fixed_string FixedString(5), dt64 DateTime64(3, 'Asia/Istanbul'), INDEX idx (i8, i16, i32, i64, u8, u16, u32, u64, f32, f64, date, date_time, str, fixed_string, dt64) TYPE bloom_filter GRANULARITY 1) ENGINE = MergeTree() ORDER BY order_key SETTINGS index_granularity = 6, index_granularity_bytes = '10Mi'; +INSERT INTO bloom_filter_types_test SELECT number AS order_key, toInt8(number) AS i8, toInt16(number) AS i16, toInt32(number) AS i32, toInt64(number) AS i64, toUInt8(number) AS u8, toUInt16(number) AS u16, toUInt32(number) AS u32, toUInt64(number) AS u64, toFloat32(number) AS f32, toFloat64(number) AS f64, toDate(number, 'Asia/Istanbul') AS date, toDateTime(number, 'Asia/Istanbul') AS date_time, toString(number) AS str, toFixedString(toString(number), 5) AS fixed_string, toDateTime64(number, 3, 'Asia/Istanbul') as dt64 FROM system.numbers LIMIT 100; + +SELECT COUNT() FROM bloom_filter_types_test WHERE i8 = 1 SETTINGS max_rows_to_read = 6; +SELECT COUNT() FROM bloom_filter_types_test WHERE i16 = 1 SETTINGS max_rows_to_read = 6; +SELECT COUNT() FROM bloom_filter_types_test WHERE i32 = 1 SETTINGS max_rows_to_read = 6; +SELECT COUNT() FROM bloom_filter_types_test WHERE i64 = 1 SETTINGS max_rows_to_read = 6; +SELECT COUNT() FROM bloom_filter_types_test WHERE u8 = 1 SETTINGS max_rows_to_read = 6; +SELECT COUNT() FROM bloom_filter_types_test WHERE u16 = 1 SETTINGS max_rows_to_read = 6; +SELECT COUNT() FROM bloom_filter_types_test WHERE u32 = 1 SETTINGS max_rows_to_read = 6; +SELECT COUNT() FROM bloom_filter_types_test WHERE u64 = 1 SETTINGS max_rows_to_read = 6; +SELECT COUNT() FROM bloom_filter_types_test WHERE f32 = 1 SETTINGS max_rows_to_read = 6; +SELECT COUNT() FROM bloom_filter_types_test WHERE f64 = 1 SETTINGS max_rows_to_read = 6; +SELECT COUNT() FROM bloom_filter_types_test WHERE date = '1970-01-02' SETTINGS max_rows_to_read = 6; +SELECT COUNT() FROM bloom_filter_types_test WHERE date_time = toDateTime('1970-01-01 02:00:01', 'Asia/Istanbul') SETTINGS max_rows_to_read = 6; +SELECT COUNT() FROM bloom_filter_types_test WHERE str = '1' SETTINGS max_rows_to_read = 12; +SELECT COUNT() FROM bloom_filter_types_test WHERE fixed_string = toFixedString('1', 5) SETTINGS max_rows_to_read = 12; +SELECT COUNT() FROM bloom_filter_types_test WHERE dt64 = toDateTime64('1970-01-01 02:00:01', 3, 'Asia/Istanbul') SETTINGS max_rows_to_read = 12; + +SELECT COUNT() FROM bloom_filter_types_test WHERE str IN ( SELECT str FROM bloom_filter_types_test); + +DROP TABLE IF EXISTS bloom_filter_types_test; + +DROP TABLE IF EXISTS bloom_filter_array_types_test; + +CREATE TABLE bloom_filter_array_types_test (order_key Array(UInt64), i8 Array(Int8), i16 Array(Int16), i32 Array(Int32), i64 Array(Int64), u8 Array(UInt8), u16 Array(UInt16), u32 Array(UInt32), u64 Array(UInt64), f32 Array(Float32), f64 Array(Float64), date Array(Date), date_time Array(DateTime('Asia/Istanbul')), str Array(String), fixed_string Array(FixedString(5)), dt64 Array(DateTime64(3, 'Asia/Istanbul')), INDEX idx (i8, i16, i32, i64, u8, u16, u32, u64, f32, f64, date, date_time, str, fixed_string, dt64) TYPE bloom_filter GRANULARITY 1) ENGINE = MergeTree() ORDER BY order_key SETTINGS index_granularity = 6, index_granularity_bytes = '10Mi'; +INSERT INTO bloom_filter_array_types_test SELECT groupArray(number) AS order_key, groupArray(toInt8(number)) AS i8, groupArray(toInt16(number)) AS i16, groupArray(toInt32(number)) AS i32, groupArray(toInt64(number)) AS i64, groupArray(toUInt8(number)) AS u8, groupArray(toUInt16(number)) AS u16, groupArray(toUInt32(number)) AS u32, groupArray(toUInt64(number)) AS u64, groupArray(toFloat32(number)) AS f32, groupArray(toFloat64(number)) AS f64, groupArray(toDate(number, 'Asia/Istanbul')) AS date, groupArray(toDateTime(number, 'Asia/Istanbul')) AS date_time, groupArray(toString(number)) AS str, groupArray(toFixedString(toString(number), 5)) AS fixed_string, groupArray(toDateTime64(number, 3, 'Asia/Istanbul')) as dt64 FROM (SELECT number FROM system.numbers LIMIT 15); +INSERT INTO bloom_filter_array_types_test SELECT groupArray(number) AS order_key, groupArray(toInt8(number)) AS i8, groupArray(toInt16(number)) AS i16, groupArray(toInt32(number)) AS i32, groupArray(toInt64(number)) AS i64, groupArray(toUInt8(number)) AS u8, groupArray(toUInt16(number)) AS u16, groupArray(toUInt32(number)) AS u32, groupArray(toUInt64(number)) AS u64, groupArray(toFloat32(number)) AS f32, groupArray(toFloat64(number)) AS f64, groupArray(toDate(number, 'Asia/Istanbul')) AS date, groupArray(toDateTime(number, 'Asia/Istanbul')) AS date_time, groupArray(toString(number)) AS str, groupArray(toFixedString(toString(number), 5)) AS fixed_string, groupArray(toDateTime64(number, 3, 'Asia/Istanbul')) as dt64 FROM (SELECT number FROM system.numbers WHERE number >= 5 LIMIT 15); +INSERT INTO bloom_filter_array_types_test SELECT groupArray(number) AS order_key, groupArray(toInt8(number)) AS i8, groupArray(toInt16(number)) AS i16, groupArray(toInt32(number)) AS i32, groupArray(toInt64(number)) AS i64, groupArray(toUInt8(number)) AS u8, groupArray(toUInt16(number)) AS u16, groupArray(toUInt32(number)) AS u32, groupArray(toUInt64(number)) AS u64, groupArray(toFloat32(number)) AS f32, groupArray(toFloat64(number)) AS f64, groupArray(toDate(number, 'Asia/Istanbul')) AS date, groupArray(toDateTime(number, 'Asia/Istanbul')) AS date_time, groupArray(toString(number)) AS str, groupArray(toFixedString(toString(number), 5)) AS fixed_string, groupArray(toDateTime64(number, 3, 'Asia/Istanbul')) as dt64 FROM (SELECT number FROM system.numbers WHERE number >= 10 LIMIT 15); + +SELECT COUNT() FROM bloom_filter_array_types_test WHERE has(i8, 1); +SELECT COUNT() FROM bloom_filter_array_types_test WHERE has(i16, 1); +SELECT COUNT() FROM bloom_filter_array_types_test WHERE has(i32, 1); +SELECT COUNT() FROM bloom_filter_array_types_test WHERE has(i64, 1); +SELECT COUNT() FROM bloom_filter_array_types_test WHERE has(u8, 1); +SELECT COUNT() FROM bloom_filter_array_types_test WHERE has(u16, 1); +SELECT COUNT() FROM bloom_filter_array_types_test WHERE has(u32, 1); +SELECT COUNT() FROM bloom_filter_array_types_test WHERE has(u64, 1); +SELECT COUNT() FROM bloom_filter_array_types_test WHERE has(f32, 1); +SELECT COUNT() FROM bloom_filter_array_types_test WHERE has(f64, 1); +SELECT COUNT() FROM bloom_filter_array_types_test WHERE has(date, toDate('1970-01-02')); +SELECT COUNT() FROM bloom_filter_array_types_test WHERE has(date_time, toDateTime('1970-01-01 02:00:01', 'Asia/Istanbul')); +SELECT COUNT() FROM bloom_filter_array_types_test WHERE has(str, '1'); +SELECT COUNT() FROM bloom_filter_array_types_test WHERE has(fixed_string, toFixedString('1', 5)); +SELECT COUNT() FROM bloom_filter_array_types_test WHERE has(dt64, toDateTime64('1970-01-01 02:00:01', 3, 'Asia/Istanbul')); + +SELECT COUNT() FROM bloom_filter_array_types_test WHERE has(i8, 5); +SELECT COUNT() FROM bloom_filter_array_types_test WHERE has(i16, 5); +SELECT COUNT() FROM bloom_filter_array_types_test WHERE has(i32, 5); +SELECT COUNT() FROM bloom_filter_array_types_test WHERE has(i64, 5); +SELECT COUNT() FROM bloom_filter_array_types_test WHERE has(u8, 5); +SELECT COUNT() FROM bloom_filter_array_types_test WHERE has(u16, 5); +SELECT COUNT() FROM bloom_filter_array_types_test WHERE has(u32, 5); +SELECT COUNT() FROM bloom_filter_array_types_test WHERE has(u64, 5); +SELECT COUNT() FROM bloom_filter_array_types_test WHERE has(f32, 5); +SELECT COUNT() FROM bloom_filter_array_types_test WHERE has(f64, 5); +SELECT COUNT() FROM bloom_filter_array_types_test WHERE has(date, toDate('1970-01-06')); +SELECT COUNT() FROM bloom_filter_array_types_test WHERE has(date_time, toDateTime('1970-01-01 02:00:05', 'Asia/Istanbul')); +SELECT COUNT() FROM bloom_filter_array_types_test WHERE has(str, '5'); +SELECT COUNT() FROM bloom_filter_array_types_test WHERE has(fixed_string, toFixedString('5', 5)); +SELECT COUNT() FROM bloom_filter_array_types_test WHERE has(dt64, toDateTime64('1970-01-01 02:00:05', 3, 'Asia/Istanbul')); + +SELECT COUNT() FROM bloom_filter_array_types_test WHERE has(i8, 10); +SELECT COUNT() FROM bloom_filter_array_types_test WHERE has(i16, 10); +SELECT COUNT() FROM bloom_filter_array_types_test WHERE has(i32, 10); +SELECT COUNT() FROM bloom_filter_array_types_test WHERE has(i64, 10); +SELECT COUNT() FROM bloom_filter_array_types_test WHERE has(u8, 10); +SELECT COUNT() FROM bloom_filter_array_types_test WHERE has(u16, 10); +SELECT COUNT() FROM bloom_filter_array_types_test WHERE has(u32, 10); +SELECT COUNT() FROM bloom_filter_array_types_test WHERE has(u64, 10); +SELECT COUNT() FROM bloom_filter_array_types_test WHERE has(f32, 10); +SELECT COUNT() FROM bloom_filter_array_types_test WHERE has(f64, 10); +SELECT COUNT() FROM bloom_filter_array_types_test WHERE has(date, toDate('1970-01-11')); +SELECT COUNT() FROM bloom_filter_array_types_test WHERE has(date_time, toDateTime('1970-01-01 02:00:10', 'Asia/Istanbul')); +SELECT COUNT() FROM bloom_filter_array_types_test WHERE has(str, '10'); +SELECT COUNT() FROM bloom_filter_array_types_test WHERE has(fixed_string, toFixedString('10', 5)); +SELECT COUNT() FROM bloom_filter_array_types_test WHERE has(dt64, toDateTime64('1970-01-01 02:00:10', 3, 'Asia/Istanbul')); + +DROP TABLE IF EXISTS bloom_filter_array_types_test; + +DROP TABLE IF EXISTS bloom_filter_null_types_test; + +CREATE TABLE bloom_filter_null_types_test (order_key UInt64, i8 Nullable(Int8), i16 Nullable(Int16), i32 Nullable(Int32), i64 Nullable(Int64), u8 Nullable(UInt8), u16 Nullable(UInt16), u32 Nullable(UInt32), u64 Nullable(UInt64), f32 Nullable(Float32), f64 Nullable(Float64), date Nullable(Date), date_time Nullable(DateTime('Asia/Istanbul')), str Nullable(String), fixed_string Nullable(FixedString(5)), dt64 Nullable(DateTime64(3, 'Asia/Istanbul')), INDEX idx (i8, i16, i32, i64, u8, u16, u32, u64, f32, f64, date, date_time, str, fixed_string, dt64) TYPE bloom_filter GRANULARITY 1) ENGINE = MergeTree() ORDER BY order_key SETTINGS index_granularity = 6, index_granularity_bytes = '10Mi'; +INSERT INTO bloom_filter_null_types_test SELECT number AS order_key, toInt8(number) AS i8, toInt16(number) AS i16, toInt32(number) AS i32, toInt64(number) AS i64, toUInt8(number) AS u8, toUInt16(number) AS u16, toUInt32(number) AS u32, toUInt64(number) AS u64, toFloat32(number) AS f32, toFloat64(number) AS f64, toDate(number, 'Asia/Istanbul') AS date, toDateTime(number, 'Asia/Istanbul') AS date_time, toString(number) AS str, toFixedString(toString(number), 5) AS fixed_string, toDateTime64(number, 3, 'Asia/Istanbul') AS dt64 FROM system.numbers LIMIT 100; +INSERT INTO bloom_filter_null_types_test SELECT 0 AS order_key, NULL AS i8, NULL AS i16, NULL AS i32, NULL AS i64, NULL AS u8, NULL AS u16, NULL AS u32, NULL AS u64, NULL AS f32, NULL AS f64, NULL AS date, NULL AS date_time, NULL AS str, NULL AS fixed_string, NULL AS dt64; + +SELECT COUNT() FROM bloom_filter_null_types_test WHERE i8 = 1 SETTINGS max_rows_to_read = 6; +SELECT COUNT() FROM bloom_filter_null_types_test WHERE i16 = 1 SETTINGS max_rows_to_read = 6; +SELECT COUNT() FROM bloom_filter_null_types_test WHERE i32 = 1 SETTINGS max_rows_to_read = 6; +SELECT COUNT() FROM bloom_filter_null_types_test WHERE i64 = 1 SETTINGS max_rows_to_read = 6; +SELECT COUNT() FROM bloom_filter_null_types_test WHERE u8 = 1 SETTINGS max_rows_to_read = 6; +SELECT COUNT() FROM bloom_filter_null_types_test WHERE u16 = 1 SETTINGS max_rows_to_read = 6; +SELECT COUNT() FROM bloom_filter_null_types_test WHERE u32 = 1 SETTINGS max_rows_to_read = 6; +SELECT COUNT() FROM bloom_filter_null_types_test WHERE u64 = 1 SETTINGS max_rows_to_read = 6; +SELECT COUNT() FROM bloom_filter_null_types_test WHERE f32 = 1 SETTINGS max_rows_to_read = 6; +SELECT COUNT() FROM bloom_filter_null_types_test WHERE f64 = 1 SETTINGS max_rows_to_read = 6; +SELECT COUNT() FROM bloom_filter_null_types_test WHERE date = '1970-01-02' SETTINGS max_rows_to_read = 6; +SELECT COUNT() FROM bloom_filter_null_types_test WHERE date_time = toDateTime('1970-01-01 02:00:01', 'Asia/Istanbul') SETTINGS max_rows_to_read = 6; +SELECT COUNT() FROM bloom_filter_null_types_test WHERE str = '1' SETTINGS max_rows_to_read = 12; +SELECT COUNT() FROM bloom_filter_null_types_test WHERE fixed_string = toFixedString('1', 5) SETTINGS max_rows_to_read = 12; +SELECT COUNT() FROM bloom_filter_null_types_test WHERE dt64 = toDateTime64('1970-01-01 02:00:01', 3, 'Asia/Istanbul') SETTINGS max_rows_to_read = 12; + +SELECT COUNT() FROM bloom_filter_null_types_test WHERE isNull(i8); +SELECT COUNT() FROM bloom_filter_null_types_test WHERE isNull(i16); +SELECT COUNT() FROM bloom_filter_null_types_test WHERE isNull(i32); +SELECT COUNT() FROM bloom_filter_null_types_test WHERE isNull(i64); +SELECT COUNT() FROM bloom_filter_null_types_test WHERE isNull(u8); +SELECT COUNT() FROM bloom_filter_null_types_test WHERE isNull(u16); +SELECT COUNT() FROM bloom_filter_null_types_test WHERE isNull(u32); +SELECT COUNT() FROM bloom_filter_null_types_test WHERE isNull(u64); +SELECT COUNT() FROM bloom_filter_null_types_test WHERE isNull(f32); +SELECT COUNT() FROM bloom_filter_null_types_test WHERE isNull(f64); +SELECT COUNT() FROM bloom_filter_null_types_test WHERE isNull(date); +SELECT COUNT() FROM bloom_filter_null_types_test WHERE isNull(date_time); +SELECT COUNT() FROM bloom_filter_null_types_test WHERE isNull(str); +SELECT COUNT() FROM bloom_filter_null_types_test WHERE isNull(fixed_string); +SELECT COUNT() FROM bloom_filter_null_types_test WHERE isNull(dt64); + +SELECT COUNT() FROM bloom_filter_null_types_test WHERE str IN ( SELECT str FROM bloom_filter_null_types_test); + +DROP TABLE IF EXISTS bloom_filter_null_types_test; + +DROP TABLE IF EXISTS bloom_filter_lc_null_types_test; + +CREATE TABLE bloom_filter_lc_null_types_test (order_key UInt64, str LowCardinality(Nullable(String)), fixed_string LowCardinality(Nullable(FixedString(5))), INDEX idx (str, fixed_string) TYPE bloom_filter GRANULARITY 1) ENGINE = MergeTree() ORDER BY order_key SETTINGS index_granularity = 6, index_granularity_bytes = '10Mi'; +INSERT INTO bloom_filter_lc_null_types_test SELECT number AS order_key, toString(number) AS str, toFixedString(toString(number), 5) AS fixed_string FROM system.numbers LIMIT 100; +INSERT INTO bloom_filter_lc_null_types_test SELECT 0 AS order_key, NULL AS str, NULL AS fixed_string; + +SELECT COUNT() FROM bloom_filter_lc_null_types_test WHERE str = '1' SETTINGS max_rows_to_read = 12; +SELECT COUNT() FROM bloom_filter_lc_null_types_test WHERE fixed_string = toFixedString('1', 5) SETTINGS max_rows_to_read = 12; + +SELECT COUNT() FROM bloom_filter_lc_null_types_test WHERE isNull(str); +SELECT COUNT() FROM bloom_filter_lc_null_types_test WHERE isNull(fixed_string); + +SELECT COUNT() FROM bloom_filter_lc_null_types_test WHERE str IN ( SELECT str FROM bloom_filter_lc_null_types_test); + +DROP TABLE IF EXISTS bloom_filter_lc_null_types_test; + +DROP TABLE IF EXISTS bloom_filter_array_lc_null_types_test; + +CREATE TABLE bloom_filter_array_lc_null_types_test ( + order_key Array(LowCardinality(Nullable(UInt64))), + + i8 Array(LowCardinality(Nullable(Int8))), + i16 Array(LowCardinality(Nullable(Int16))), + i32 Array(LowCardinality(Nullable(Int32))), + i64 Array(LowCardinality(Nullable(Int64))), + u8 Array(LowCardinality(Nullable(UInt8))), + u16 Array(LowCardinality(Nullable(UInt16))), + u32 Array(LowCardinality(Nullable(UInt32))), + u64 Array(LowCardinality(Nullable(UInt64))), + f32 Array(LowCardinality(Nullable(Float32))), + f64 Array(LowCardinality(Nullable(Float64))), + + date Array(LowCardinality(Nullable(Date))), + date_time Array(LowCardinality(Nullable(DateTime('Asia/Istanbul')))), + + str Array(LowCardinality(Nullable(String))), + fixed_string Array(LowCardinality(Nullable(FixedString(5)))), + INDEX idx (i8, i16, i32, i64, u8, u16, u32, u64, f32, f64, date, date_time, str, fixed_string) + TYPE bloom_filter GRANULARITY 1) +ENGINE = MergeTree() ORDER BY order_key SETTINGS index_granularity = 6, index_granularity_bytes = '10Mi', allow_nullable_key = 1; + +INSERT INTO bloom_filter_array_lc_null_types_test +SELECT groupArray(number) AS order_key, + groupArray(toInt8(number)) AS i8, + groupArray(toInt16(number)) AS i16, + groupArray(toInt32(number)) AS i32, + groupArray(toInt64(number)) AS i64, + groupArray(toUInt8(number)) AS u8, + groupArray(toUInt16(number)) AS u16, + groupArray(toUInt32(number)) AS u32, + groupArray(toUInt64(number)) AS u64, + groupArray(toFloat32(number)) AS f32, + groupArray(toFloat64(number)) AS f64, + groupArray(toDate(number, 'Asia/Istanbul')) AS date, + groupArray(toDateTime(number, 'Asia/Istanbul')) AS date_time, + groupArray(toString(number)) AS str, + groupArray(toFixedString(toString(number), 5)) AS fixed_string + FROM (SELECT number FROM system.numbers LIMIT 15); + +INSERT INTO bloom_filter_array_lc_null_types_test SELECT groupArray(number) AS order_key, groupArray(toInt8(number)) AS i8, groupArray(toInt16(number)) AS i16, groupArray(toInt32(number)) AS i32, groupArray(toInt64(number)) AS i64, groupArray(toUInt8(number)) AS u8, groupArray(toUInt16(number)) AS u16, groupArray(toUInt32(number)) AS u32, groupArray(toUInt64(number)) AS u64, groupArray(toFloat32(number)) AS f32, groupArray(toFloat64(number)) AS f64, groupArray(toDate(number, 'Asia/Istanbul')) AS date, groupArray(toDateTime(number, 'Asia/Istanbul')) AS date_time, groupArray(toString(number)) AS str, groupArray(toFixedString(toString(number), 5)) AS fixed_string FROM (SELECT number FROM system.numbers WHERE number >= 5 LIMIT 15); +INSERT INTO bloom_filter_array_lc_null_types_test SELECT groupArray(number) AS order_key, groupArray(toInt8(number)) AS i8, groupArray(toInt16(number)) AS i16, groupArray(toInt32(number)) AS i32, groupArray(toInt64(number)) AS i64, groupArray(toUInt8(number)) AS u8, groupArray(toUInt16(number)) AS u16, groupArray(toUInt32(number)) AS u32, groupArray(toUInt64(number)) AS u64, groupArray(toFloat32(number)) AS f32, groupArray(toFloat64(number)) AS f64, groupArray(toDate(number, 'Asia/Istanbul')) AS date, groupArray(toDateTime(number, 'Asia/Istanbul')) AS date_time, groupArray(toString(number)) AS str, groupArray(toFixedString(toString(number), 5)) AS fixed_string FROM (SELECT number FROM system.numbers WHERE number >= 10 LIMIT 15); +INSERT INTO bloom_filter_array_lc_null_types_test SELECT n AS order_key, n AS i8, n AS i16, n AS i32, n AS i64, n AS u8, n AS u16, n AS u32, n AS u64, n AS f32, n AS f64, n AS date, n AS date_time, n AS str, n AS fixed_string FROM (SELECT [NULL] AS n); +INSERT INTO bloom_filter_array_lc_null_types_test SELECT [NULL, n] AS order_key, [NULL, toInt8(n)] AS i8, [NULL, toInt16(n)] AS i16, [NULL, toInt32(n)] AS i32, [NULL, toInt64(n)] AS i64, [NULL, toUInt8(n)] AS u8, [NULL, toUInt16(n)] AS u16, [NULL, toUInt32(n)] AS u32, [NULL, toUInt64(n)] AS u64, [NULL, toFloat32(n)] AS f32, [NULL, toFloat64(n)] AS f64, [NULL, toDate(n, 'Asia/Istanbul')] AS date, [NULL, toDateTime(n, 'Asia/Istanbul')] AS date_time, [NULL, toString(n)] AS str, [NULL, toFixedString(toString(n), 5)] AS fixed_string FROM (SELECT 100 as n); + +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(i8, 1); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(i16, 1); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(i32, 1); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(i64, 1); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(u8, 1); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(u16, 1); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(u32, 1); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(u64, 1); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(f32, 1); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(f64, 1); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(date, toDate('1970-01-02')); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(date_time, toDateTime('1970-01-01 02:00:01', 'Asia/Istanbul')); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(str, '1'); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(fixed_string, toFixedString('1', 5)); + +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(i8, 5); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(i16, 5); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(i32, 5); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(i64, 5); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(u8, 5); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(u16, 5); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(u32, 5); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(u64, 5); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(f32, 5); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(f64, 5); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(date, toDate('1970-01-06')); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(date_time, toDateTime('1970-01-01 02:00:05', 'Asia/Istanbul')); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(str, '5'); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(fixed_string, toFixedString('5', 5)); + +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(i8, 10); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(i16, 10); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(i32, 10); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(i64, 10); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(u8, 10); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(u16, 10); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(u32, 10); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(u64, 10); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(f32, 10); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(f64, 10); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(date, toDate('1970-01-11')); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(date_time, toDateTime('1970-01-01 02:00:10', 'Asia/Istanbul')); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(str, '10'); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(fixed_string, toFixedString('10', 5)); + +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(i8, NULL); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(i16, NULL); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(i32, NULL); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(i64, NULL); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(u8, NULL); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(u16, NULL); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(u32, NULL); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(u64, NULL); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(f32, NULL); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(f64, NULL); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(date, NULL); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(date_time, NULL); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(str, NULL); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(fixed_string, NULL); + +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(i8, 100); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(i16, 100); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(i32, 100); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(i64, 100); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(u8, 100); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(u16, 100); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(u32, 100); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(u64, 100); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(f32, 100); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(f64, 100); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(date, toDate('1970-04-11')); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(date_time, toDateTime('1970-01-01 02:01:40', 'Asia/Istanbul')); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(str, '100'); +SELECT COUNT() FROM bloom_filter_array_lc_null_types_test WHERE has(fixed_string, toFixedString('100', 5)); + +DROP TABLE IF EXISTS bloom_filter_array_lc_null_types_test; + +DROP TABLE IF EXISTS bloom_filter_array_offsets_lc_str; +CREATE TABLE bloom_filter_array_offsets_lc_str (order_key int, str Array(LowCardinality(String)), INDEX idx str TYPE bloom_filter(1.) GRANULARITY 1024) ENGINE = MergeTree() ORDER BY order_key SETTINGS index_granularity = 1024, index_granularity_bytes = '10Mi'; +INSERT INTO bloom_filter_array_offsets_lc_str SELECT number AS i, if(i%2, ['value'], []) FROM system.numbers LIMIT 10000; +SELECT count() FROM bloom_filter_array_offsets_lc_str WHERE has(str, 'value'); +DROP TABLE IF EXISTS bloom_filter_array_offsets_lc_str; + +DROP TABLE IF EXISTS bloom_filter_array_offsets_str; +CREATE TABLE bloom_filter_array_offsets_str (order_key int, str Array(String), INDEX idx str TYPE bloom_filter(1.) GRANULARITY 1024) ENGINE = MergeTree() ORDER BY order_key SETTINGS index_granularity = 1024, index_granularity_bytes = '10Mi'; +INSERT INTO bloom_filter_array_offsets_str SELECT number AS i, if(i%2, ['value'], []) FROM system.numbers LIMIT 10000; +SELECT count() FROM bloom_filter_array_offsets_str WHERE has(str, 'value'); +DROP TABLE IF EXISTS bloom_filter_array_offsets_str; + +DROP TABLE IF EXISTS bloom_filter_array_offsets_i; +CREATE TABLE bloom_filter_array_offsets_i (order_key int, i Array(int), INDEX idx i TYPE bloom_filter(1.) GRANULARITY 1024) ENGINE = MergeTree() ORDER BY order_key SETTINGS index_granularity = 1024, index_granularity_bytes = '10Mi'; +INSERT INTO bloom_filter_array_offsets_i SELECT number AS i, if(i%2, [99999], []) FROM system.numbers LIMIT 10000; +SELECT count() FROM bloom_filter_array_offsets_i WHERE has(i, 99999); +DROP TABLE IF EXISTS bloom_filter_array_offsets_i; + +DROP TABLE IF EXISTS test_bf_indexOf; +CREATE TABLE test_bf_indexOf ( `id` int, `ary` Array(LowCardinality(Nullable(String))), INDEX idx_ary ary TYPE bloom_filter(0.01) GRANULARITY 1) ENGINE = MergeTree() ORDER BY id SETTINGS index_granularity = 1; +INSERT INTO test_bf_indexOf VALUES (1, ['value1', 'value2']); +INSERT INTO test_bf_indexOf VALUES (2, ['value3']); + +SELECT id FROM test_bf_indexOf WHERE indexOf(ary, 'value1') = 0 ORDER BY id FORMAT TSV; +SELECT id FROM test_bf_indexOf WHERE indexOf(ary, 'value1') = 1 ORDER BY id FORMAT TSV; +SELECT id FROM test_bf_indexOf WHERE indexOf(ary, 'value2') = 0 ORDER BY id FORMAT TSV; +SELECT id FROM test_bf_indexOf WHERE indexOf(ary, 'value2') = 2 ORDER BY id FORMAT TSV; +SELECT id FROM test_bf_indexOf WHERE indexOf(ary, 'value3') = 0 ORDER BY id FORMAT TSV; +SELECT id FROM test_bf_indexOf WHERE indexOf(ary, 'value3') = 1 ORDER BY id FORMAT TSV; + +SELECT id FROM test_bf_indexOf WHERE indexOf(ary, 'value1') != 0 ORDER BY id FORMAT TSV; +SELECT id FROM test_bf_indexOf WHERE indexOf(ary, 'value1') != 1 ORDER BY id FORMAT TSV; +SELECT id FROM test_bf_indexOf WHERE indexOf(ary, 'value2') != 0 ORDER BY id FORMAT TSV; +SELECT id FROM test_bf_indexOf WHERE indexOf(ary, 'value2') != 2 ORDER BY id FORMAT TSV; +SELECT id FROM test_bf_indexOf WHERE indexOf(ary, 'value3') != 0 ORDER BY id FORMAT TSV; +SELECT id FROM test_bf_indexOf WHERE indexOf(ary, 'value3') != 1 ORDER BY id FORMAT TSV; + +SELECT id FROM test_bf_indexOf WHERE indexOf(ary, 'value1') = 2 ORDER BY id FORMAT TSV; +SELECT id FROM test_bf_indexOf WHERE indexOf(ary, 'value3') = 2 ORDER BY id FORMAT TSV; +SELECT id FROM test_bf_indexOf WHERE indexOf(ary, 'value1') = 1 OR indexOf(ary, 'value3') = 1 ORDER BY id FORMAT TSV; + +SELECT id FROM test_bf_indexOf WHERE not(indexOf(ary, 'value1')) ORDER BY id FORMAT TSV; +SELECT id FROM test_bf_indexOf WHERE not(indexOf(ary, 'value1') == 0) ORDER BY id FORMAT TSV; +SELECT id FROM test_bf_indexOf WHERE not(indexOf(ary, 'value1') == 1) ORDER BY id FORMAT TSV; +SELECT id FROM test_bf_indexOf WHERE not(indexOf(ary, 'value1') == 2) ORDER BY id FORMAT TSV; +SELECT id FROM test_bf_indexOf WHERE indexOf(ary, 'value1') in (0) ORDER BY id FORMAT TSV; +SELECT id FROM test_bf_indexOf WHERE indexOf(ary, 'value1') in (1) ORDER BY id FORMAT TSV; +SELECT id FROM test_bf_indexOf WHERE indexOf(ary, 'value1') in (2) ORDER BY id FORMAT TSV; +SELECT id FROM test_bf_indexOf WHERE indexOf(ary, 'value1') not in (0) ORDER BY id FORMAT TSV; +SELECT id FROM test_bf_indexOf WHERE indexOf(ary, 'value1') not in (1) ORDER BY id FORMAT TSV; +SELECT id FROM test_bf_indexOf WHERE indexOf(ary, 'value1') not in (2) ORDER BY id FORMAT TSV; + +SELECT id FROM test_bf_indexOf WHERE indexOf(ary, 'value1') > 0 ORDER BY id FORMAT TSV; +SELECT id FROM test_bf_indexOf WHERE 0 < indexOf(ary, 'value1') ORDER BY id FORMAT TSV; +SELECT id FROM test_bf_indexOf WHERE indexOf(ary, 'value1') >= 0 ORDER BY id FORMAT TSV; +SELECT id FROM test_bf_indexOf WHERE 0 <= indexOf(ary, 'value1') ORDER BY id FORMAT TSV; +SELECT id FROM test_bf_indexOf WHERE indexOf(ary, 'value1') > 1 ORDER BY id FORMAT TSV; +SELECT id FROM test_bf_indexOf WHERE 1 < indexOf(ary, 'value1') ORDER BY id FORMAT TSV; +SELECT id FROM test_bf_indexOf WHERE indexOf(ary, 'value1') >= 1 ORDER BY id FORMAT TSV; +SELECT id FROM test_bf_indexOf WHERE 1 <= indexOf(ary, 'value1') ORDER BY id FORMAT TSV; +SELECT id FROM test_bf_indexOf WHERE indexOf(ary, 'value1') >= 2 ORDER BY id FORMAT TSV; +SELECT id FROM test_bf_indexOf WHERE 2 <= indexOf(ary, 'value1') ORDER BY id FORMAT TSV; + +SELECT id FROM test_bf_indexOf WHERE indexOf(ary, 'value1') = toDecimal32(0, 2) ORDER BY id FORMAT TSV; +SELECT id FROM test_bf_indexOf WHERE toDecimal128(0, 2) = indexOf(ary, 'value1') ORDER BY id FORMAT TSV; +SELECT id FROM test_bf_indexOf WHERE indexOf(ary, 'value1') = '0' ORDER BY id FORMAT TSV; +SELECT id FROM test_bf_indexOf WHERE '0' = indexOf(ary, 'value1') ORDER BY id FORMAT TSV; + +SELECT id FROM test_bf_indexOf WHERE indexOf(ary, 'value1') > toDecimal32(0, 2) ORDER BY id FORMAT TSV; +SELECT id FROM test_bf_indexOf WHERE indexOf(ary, 'value1') < toDecimal128(1, 2) ORDER BY id FORMAT TSV; +SELECT id FROM test_bf_indexOf WHERE indexOf(ary, 'value1') > '0' ORDER BY id FORMAT TSV; +SELECT id FROM test_bf_indexOf WHERE indexOf(ary, 'value1') < '1' ORDER BY id FORMAT TSV; + +SELECT id, ary[indexOf(ary, 'value1')] FROM test_bf_indexOf WHERE ary[indexOf(ary, 'value1')] = 'value1' ORDER BY id FORMAT TSV; +SELECT id, ary[indexOf(ary, 'value2')] FROM test_bf_indexOf WHERE ary[indexOf(ary, 'value2')] = 'value2' ORDER BY id FORMAT TSV; +SELECT id, ary[indexOf(ary, 'value3')] FROM test_bf_indexOf WHERE ary[indexOf(ary, 'value3')] = 'value3' ORDER BY id FORMAT TSV; + +DROP TABLE IF EXISTS test_bf_indexOf; + +-- Test for bug #65597 +DROP TABLE IF EXISTS test_bf_cast; +CREATE TABLE test_bf_cast (c Int32, INDEX x1 (c) type bloom_filter) ENGINE = MergeTree ORDER BY c AS SELECT 1; +SELECT count() FROM test_bf_cast WHERE cast(c = 1 OR c = 9999 AS Bool) SETTINGS use_skip_indexes=0; +SELECT count() FROM test_bf_cast WHERE cast(c = 1 OR c = 9999 AS Bool) SETTINGS use_skip_indexes=1; +DROP TABLE test_bf_cast; diff --git a/parser/testdata/00945_ml_test/ast.json b/parser/testdata/00945_ml_test/ast.json new file mode 100644 index 000000000..7e8ef7fd6 --- /dev/null +++ b/parser/testdata/00945_ml_test/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery defaults (children 1)" + }, + { + "explain": " Identifier defaults" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001226265, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/00945_ml_test/metadata.json b/parser/testdata/00945_ml_test/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00945_ml_test/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00945_ml_test/query.sql b/parser/testdata/00945_ml_test/query.sql new file mode 100644 index 000000000..c3a3a51b9 --- /dev/null +++ b/parser/testdata/00945_ml_test/query.sql @@ -0,0 +1,21 @@ +DROP TABLE IF EXISTS defaults; +CREATE TABLE IF NOT EXISTS defaults +( + param1 Float64, + param2 Float64, + target Float64, + predict1 Float64, + predict2 Float64 +) ENGINE = Memory; +insert into defaults values (1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2),(1,2,1,-1,-2),(-1,-2,-1,1,2); +DROP TABLE IF EXISTS model; +create table model engine = Memory as select stochasticLogisticRegressionState(0.1, 0.0, 1.0, 'SGD')(target, param1, param2) as state from defaults; + +select ans < 1.1 and ans > 0.9 from +(with (select state from model) as model select evalMLMethod(model, predict1, predict2) as ans from defaults limit 2); + +select ans > -0.1 and ans < 0.1 from +(with (select state from model) as model select evalMLMethod(model, predict1, predict2) as ans from defaults limit 2); + +DROP TABLE defaults; +DROP TABLE model; diff --git a/parser/testdata/00946_ml_test/ast.json b/parser/testdata/00946_ml_test/ast.json new file mode 100644 index 000000000..859f4b782 --- /dev/null +++ b/parser/testdata/00946_ml_test/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery defaults (children 1)" + }, + { + "explain": " Identifier defaults" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001431434, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/00946_ml_test/metadata.json b/parser/testdata/00946_ml_test/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00946_ml_test/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00946_ml_test/query.sql b/parser/testdata/00946_ml_test/query.sql new file mode 100644 index 000000000..a3da3b7e5 --- /dev/null +++ b/parser/testdata/00946_ml_test/query.sql @@ -0,0 +1,27 @@ +DROP TABLE IF EXISTS defaults; +CREATE TABLE IF NOT EXISTS defaults +( + param1 Float64, + param2 Float64, + param3 Float64, + param4 Float64, + param5 Float64, + param6 Float64, + param7 Float64, + target Float64, + predict1 Float64, + predict2 Float64, + predict3 Float64, + predict4 Float64, + predict5 Float64, + predict6 Float64, + predict7 Float64 + +) ENGINE = Memory; +insert into defaults values (1.76210664421617,1.7469706406568504,0.7988286239230257,1.0938642223599824,1.167321139201246,1.7648182796261376,0.909111664354187,0.92,-0.5940592289464697,-0.7274920231630502,-0.952028633990455,-0.8949798182945309,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(0.6276564089897139,-0.06763531281107672,0.7988286239230257,0.5966532121963541,1.167321139201246,0.4551512643242912,0.909111664354187,0.76,-0.5940592289464697,-0.7274920231630502,-0.952028633990455,-0.8949798182945309,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(-0.07046681268810527,-0.5625278455750569,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,-1.0056311758200724,0.909111664354187,0.72,-0.5940592289464697,-0.7274920231630502,-0.952028633990455,-0.8949798182945309,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(0.4531256035702591,0.4272572199529034,-0.0766000050337147,0.09944220203272576,-1.0614543055744028,0.11933920911869125,0.909111664354187,0.8,-0.5940592289464697,-0.7274920231630502,-0.952028633990455,-0.8949798182945309,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(-0.24499761810756004,-0.7274920231630502,-0.952028633990455,-1.3921908284581592,-0.5042604443804907,-0.6530285178541898,-1.0999748867047898,0.65,-0.5940592289464697,-0.7274920231630502,-0.952028633990455,-0.8949798182945309,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(1.1512488252480781,1.2520781078928702,1.674257252879766,1.0938642223599824,-0.5042604443804907,1.244309594057455,0.909111664354187,0.9,-0.5940592289464697,-0.7274920231630502,-0.952028633990455,-0.8949798182945309,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(0.36586020086053167,0.26229304236491,-0.0766000050337147,-0.3977688081309026,0.6101272780073337,-0.6698191206144725,0.909111664354187,0.75,-0.5940592289464697,-0.7274920231630502,-0.952028633990455,-0.8949798182945309,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(-0.7685900343659244,-1.057420378339037,-0.952028633990455,-0.3977688081309026,0.6101272780073337,-1.1735372034228724,-1.0999748867047898,0.68,-0.5940592289464697,-0.7274920231630502,-0.952028633990455,-0.8949798182945309,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(-1.2921824506242887,-0.8924562007510436,-1.8274572629471952,-1.3921908284581592,-2.175842027962227,-1.0056311758200724,-1.0999748867047898,0.5,-0.5940592289464697,-0.7274920231630502,-0.952028633990455,-0.8949798182945309,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(0.5403910062799865,0.09732886477691666,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,0.0018049897967303734,-1.0999748867047898,0.45,-0.5940592289464697,-0.7274920231630502,-0.952028633990455,-0.8949798182945309,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(0.7149218116994412,-0.2325994903990701,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.3340070654088696,0.909111664354187,0.52,-0.5940592289464697,-0.7274920231630502,-0.952028633990455,-0.8949798182945309,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(0.889452617118896,0.5922213975408968,0.7988286239230257,0.5966532121963541,1.167321139201246,0.6734291002079332,0.909111664354187,0.84,-0.5940592289464697,-0.7274920231630502,-0.952028633990455,-0.8949798182945309,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(0.9767180198286235,0.7571855751288902,0.7988286239230257,0.5966532121963541,1.167321139201246,0.8413351278107333,0.909111664354187,0.78,-0.5940592289464697,-0.7274920231630502,-0.952028633990455,-0.8949798182945309,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(-0.8558554370756518,0.26229304236491,-0.0766000050337147,0.5966532121963541,-0.5042604443804907,-1.0056311758200724,0.909111664354187,0.62,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,1.0428223609340956,0.909111664354187),(-0.5067938262367422,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,-1.618648166768315,-0.6698191206144725,0.909111664354187,0.61,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,1.0428223609340956,0.909111664354187),(-0.24499761810756004,-0.39756366798706344,-0.0766000050337147,0.09944220203272576,-1.0614543055744028,-0.5019130930116695,-1.0999748867047898,0.54,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,1.0428223609340956,0.909111664354187),(0.016798590021622126,-0.06763531281107672,-0.0766000050337147,0.5966532121963541,-0.5042604443804907,0.16971101739953035,-1.0999748867047898,0.66,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,1.0428223609340956,0.909111664354187),(0.1913293954410769,-0.2325994903990701,-0.0766000050337147,0.5966532121963541,-0.5042604443804907,-1.0056311758200724,0.909111664354187,0.65,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,1.0428223609340956,0.909111664354187),(0.10406399273134952,0.4272572199529034,-0.0766000050337147,0.5966532121963541,-0.5042604443804907,0.3376170450023333,-1.0999748867047898,0.63,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,1.0428223609340956,0.909111664354187),(-1.2049170479145614,-0.8924562007510436,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,-0.1661010378060696,-1.0999748867047898,0.62,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,1.0428223609340956,0.909111664354187),(-0.41952842352701486,-0.06763531281107672,-0.0766000050337147,-0.3977688081309026,-1.618648166768315,-1.1735372034228724,0.909111664354187,0.64,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,1.0428223609340956,0.909111664354187),(0.7149218116994412,1.087113930304877,0.7988286239230257,-0.3977688081309026,-1.618648166768315,-0.3340070654088696,-1.0999748867047898,0.7,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,1.0428223609340956,0.909111664354187),(0.9767180198286235,1.4170422854808635,1.674257252879766,1.5910752325236108,1.724515000395158,1.5129592382219361,0.909111664354187,0.94,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,1.0428223609340956,0.909111664354187),(1.5003104360869879,1.9119348182448437,1.674257252879766,1.5910752325236108,1.167321139201246,1.848771293427536,0.909111664354187,0.95,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,1.0428223609340956,0.909111664354187),(1.6748412415064426,1.9119348182448437,1.674257252879766,0.5966532121963541,0.052933416813421515,2.016677321030339,0.909111664354187,0.97,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,1.0428223609340956,0.909111664354187),(2.023902852345352,2.076898995832837,1.674257252879766,1.0938642223599824,1.167321139201246,1.680865265824736,0.909111664354187,0.94,1.6748412415064426,1.9119348182448437,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.7144464713452956,0.909111664354187),(0.4531256035702591,0.26229304236491,1.674257252879766,1.0938642223599824,0.052933416813421515,0.3376170450023333,-1.0999748867047898,0.76,1.6748412415064426,1.9119348182448437,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.7144464713452956,0.909111664354187),(-1.6412440614631982,-1.5523129111030172,-0.952028633990455,-1.8894018386217877,-1.0614543055744028,-1.8451613138340752,0.909111664354187,0.44,1.6748412415064426,1.9119348182448437,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.7144464713452956,0.909111664354187),(-1.9030402695923805,-2.377133799042984,-1.8274572629471952,-1.3921908284581592,-1.618648166768315,-2.348879396642477,-1.0999748867047898,0.46,1.6748412415064426,1.9119348182448437,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.7144464713452956,0.909111664354187),(-0.5940592289464697,-1.3873487335150236,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-2.180973369039677,-1.0999748867047898,0.54,1.6748412415064426,1.9119348182448437,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.7144464713452956,0.909111664354187),(-1.4667132560437435,-1.7172770886910105,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,-0.8377251482172725,0.909111664354187,0.65,1.6748412415064426,1.9119348182448437,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.7144464713452956,0.909111664354187),(0.889452617118896,-0.7274920231630502,-0.0766000050337147,0.5966532121963541,0.6101272780073337,-0.5019130930116695,0.909111664354187,0.74,1.6748412415064426,1.9119348182448437,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.7144464713452956,0.909111664354187),(1.8493720469258974,1.7469706406568504,0.7988286239230257,-0.3977688081309026,1.167321139201246,1.3450532106191362,0.909111664354187,0.91,1.6748412415064426,1.9119348182448437,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.7144464713452956,0.909111664354187),(2.023902852345352,1.087113930304877,1.674257252879766,0.5966532121963541,0.6101272780073337,1.680865265824736,0.909111664354187,0.9,1.6748412415064426,1.9119348182448437,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.7144464713452956,0.909111664354187),(1.2385142279578056,0.7571855751288902,1.674257252879766,0.5966532121963541,1.724515000395158,2.016677321030339,0.909111664354187,0.94,1.6748412415064426,1.9119348182448437,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.7144464713452956,0.909111664354187),(0.2785947981508043,0.4272572199529034,1.674257252879766,1.5910752325236108,1.724515000395158,1.0092411554135332,0.909111664354187,0.88,1.6748412415064426,1.9119348182448437,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.7144464713452956,0.909111664354187),(-1.553978658753471,-0.2325994903990701,-0.952028633990455,0.5966532121963541,0.6101272780073337,-0.3340070654088696,-1.0999748867047898,0.64,1.6748412415064426,1.9119348182448437,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.7144464713452956,0.909111664354187),(-1.4667132560437435,-0.39756366798706344,-1.8274572629471952,-2.386612848785416,-1.618648166768315,-1.3414432310256739,-1.0999748867047898,0.58,0.36586020086053167,0.26229304236491,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-0.0989386267649508,0.909111664354187),(-1.117651645204834,-0.39756366798706344,-1.8274572629471952,-0.3977688081309026,-2.175842027962227,-1.8451613138340752,-1.0999748867047898,0.52,0.36586020086053167,0.26229304236491,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-0.0989386267649508,0.909111664354187),(-0.8558554370756518,0.09732886477691666,-0.952028633990455,0.5966532121963541,0.052933416813421515,-1.5093492586284738,-1.0999748867047898,0.48,0.36586020086053167,0.26229304236491,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-0.0989386267649508,0.909111664354187),(-0.7685900343659244,0.4272572199529034,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,-1.0056311758200724,0.909111664354187,0.46,0.36586020086053167,0.26229304236491,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-0.0989386267649508,0.909111664354187),(-0.07046681268810527,-0.39756366798706344,-0.952028633990455,-0.8949798182945309,-1.0614543055744028,-0.6698191206144725,0.909111664354187,0.49,0.36586020086053167,0.26229304236491,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-0.0989386267649508,0.909111664354187),(-0.3322630208172874,-0.06763531281107672,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-0.1661010378060696,0.909111664354187,0.53,0.36586020086053167,0.26229304236491,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-0.0989386267649508,0.909111664354187),(1.3257796306675331,1.582006463068857,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.8413351278107333,-1.0999748867047898,0.87,0.36586020086053167,0.26229304236491,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-0.0989386267649508,0.909111664354187),(0.8021872144091686,0.9221497527168835,1.674257252879766,1.0938642223599824,0.6101272780073337,1.3450532106191362,0.909111664354187,0.91,0.36586020086053167,0.26229304236491,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-0.0989386267649508,0.909111664354187),(0.4531256035702591,0.4272572199529034,1.674257252879766,1.5910752325236108,0.6101272780073337,0.8413351278107333,0.909111664354187,0.88,0.36586020086053167,0.26229304236491,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-0.0989386267649508,0.909111664354187),(1.0639834225383509,1.087113930304877,1.674257252879766,0.5966532121963541,1.724515000395158,1.1771471830163363,0.909111664354187,0.86,0.36586020086053167,0.26229304236491,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-0.0989386267649508,0.909111664354187),(1.9366374496356247,1.9119348182448437,1.674257252879766,1.0938642223599824,0.6101272780073337,1.848771293427536,-1.0999748867047898,0.89,0.36586020086053167,0.26229304236491,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-0.0989386267649508,0.909111664354187),(0.36586020086053167,0.4272572199529034,-0.0766000050337147,0.09944220203272576,1.724515000395158,0.42157005880373183,0.909111664354187,0.82,0.36586020086053167,0.26229304236491,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-0.0989386267649508,0.909111664354187),(0.889452617118896,0.5922213975408968,0.7988286239230257,-0.3977688081309026,0.6101272780073337,-0.3340070654088696,0.909111664354187,0.78,0.36586020086053167,0.26229304236491,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-0.0989386267649508,0.909111664354187),(-0.3322630208172874,-1.5523129111030172,-0.0766000050337147,-0.8949798182945309,1.167321139201246,-0.5019130930116695,0.909111664354187,0.76,-0.15773221539783266,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(-0.41952842352701486,-1.2223845559270303,-0.952028633990455,-1.8894018386217877,0.052933416813421515,-1.1735372034228724,0.909111664354187,0.56,-0.15773221539783266,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(1.5003104360869879,1.4170422854808635,0.7988286239230257,0.5966532121963541,-0.5042604443804907,-1.0056311758200724,0.909111664354187,0.78,-0.15773221539783266,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(0.6276564089897139,0.7571855751288902,0.7988286239230257,0.5966532121963541,-1.0614543055744028,-0.8377251482172725,0.909111664354187,0.72,-0.15773221539783266,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(0.4531256035702591,0.4272572199529034,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,-1.0056311758200724,-1.0999748867047898,0.7,-0.15773221539783266,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(0.2785947981508043,-0.7274920231630502,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-1.5093492586284738,-1.0999748867047898,0.64,-0.15773221539783266,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(-0.07046681268810527,-0.8924562007510436,-0.0766000050337147,-1.3921908284581592,-0.5042604443804907,-2.013067341436875,-1.0999748867047898,0.64,-0.15773221539783266,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(-1.6412440614631982,-1.3873487335150236,-0.952028633990455,0.5966532121963541,-1.618648166768315,-1.6772552862312753,-1.0999748867047898,0.46,-0.15773221539783266,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(-1.4667132560437435,-1.3873487335150236,-1.8274572629471952,-0.3977688081309026,-1.618648166768315,-3.0205035070536796,0.909111664354187,0.36,-0.15773221539783266,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(-0.5067938262367422,-0.5625278455750569,-0.952028633990455,-1.3921908284581592,-1.618648166768315,-0.5019130930116695,-1.0999748867047898,0.42,-0.15773221539783266,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(-0.681324631656197,-1.2223845559270303,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,-0.8377251482172725,-1.0999748867047898,0.48,-0.15773221539783266,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(-0.8558554370756518,-1.057420378339037,-0.0766000050337147,0.5966532121963541,-0.5042604443804907,-0.6698191206144725,-1.0999748867047898,0.47,-0.15773221539783266,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(-1.117651645204834,-0.39756366798706344,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,-0.6698191206144725,0.909111664354187,0.54,-1.117651645204834,-1.057420378339037,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5765116696695942,-1.0999748867047898),(-0.15773221539783266,-0.06763531281107672,-0.952028633990455,0.5966532121963541,-0.5042604443804907,-0.1661010378060696,0.909111664354187,0.56,-1.117651645204834,-1.057420378339037,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5765116696695942,-1.0999748867047898),(0.7149218116994412,0.5922213975408968,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.16971101739953035,-1.0999748867047898,0.52,-1.117651645204834,-1.057420378339037,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5765116696695942,-1.0999748867047898),(0.7149218116994412,0.7571855751288902,0.7988286239230257,0.09944220203272576,0.052933416813421515,0.5391042781256927,-1.0999748867047898,0.55,-1.117651645204834,-1.057420378339037,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5765116696695942,-1.0999748867047898),(0.889452617118896,1.087113930304877,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,0.7070103057284927,-1.0999748867047898,0.61,-1.117651645204834,-1.057420378339037,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5765116696695942,-1.0999748867047898),(-0.07046681268810527,-0.06763531281107672,-0.952028633990455,0.09944220203272576,0.052933416813421515,0.06896740083785215,0.909111664354187,0.57,-1.117651645204834,-1.057420378339037,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5765116696695942,-1.0999748867047898),(0.10406399273134952,0.26229304236491,-0.0766000050337147,0.09944220203272576,0.6101272780073337,1.0428223609340956,0.909111664354187,0.68,-1.117651645204834,-1.057420378339037,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5765116696695942,-1.0999748867047898),(0.9767180198286235,1.2520781078928702,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.9420787443724145,0.909111664354187,0.78,-1.117651645204834,-1.057420378339037,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5765116696695942,-1.0999748867047898),(1.3257796306675331,1.7469706406568504,1.674257252879766,1.5910752325236108,1.724515000395158,1.7480276768658578,0.909111664354187,0.94,-1.117651645204834,-1.057420378339037,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5765116696695942,-1.0999748867047898),(1.6748412415064426,0.7571855751288902,1.674257252879766,1.5910752325236108,1.724515000395158,1.9495149099892173,0.909111664354187,0.96,-1.117651645204834,-1.057420378339037,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5765116696695942,-1.0999748867047898),(0.36586020086053167,0.5922213975408968,1.674257252879766,1.5910752325236108,1.724515000395158,1.4290062244205346,0.909111664354187,0.93,-1.117651645204834,-1.057420378339037,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5765116696695942,-1.0999748867047898),(-0.24499761810756004,0.09732886477691666,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.7405915112490521,0.909111664354187,0.84,-1.117651645204834,-1.057420378339037,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-1.5765116696695942,-1.0999748867047898),(-0.24499761810756004,-0.2325994903990701,-0.0766000050337147,-0.3977688081309026,1.724515000395158,0.5055230726051333,-1.0999748867047898,0.74,-1.7285094641729257,-1.8822412662790038,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-1.9626955331560363,-1.0999748867047898),(1.0639834225383509,1.087113930304877,-0.952028633990455,-1.3921908284581592,0.6101272780073337,-0.06535742124438843,0.909111664354187,0.72,-1.7285094641729257,-1.8822412662790038,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-1.9626955331560363,-1.0999748867047898),(0.889452617118896,0.7571855751288902,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,0.20329222292009272,0.909111664354187,0.74,-1.7285094641729257,-1.8822412662790038,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-1.9626955331560363,-1.0999748867047898),(-1.3794478533340162,-1.3873487335150236,-0.952028633990455,-0.3977688081309026,-1.618648166768315,-0.6362379150939101,-1.0999748867047898,0.64,-1.7285094641729257,-1.8822412662790038,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-1.9626955331560363,-1.0999748867047898),(-1.8157748668826532,-2.0472054438669973,-0.952028633990455,-0.3977688081309026,-1.618648166768315,-1.777998902792955,0.909111664354187,0.44,-1.7285094641729257,-1.8822412662790038,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-1.9626955331560363,-1.0999748867047898),(-1.990305672302108,-2.377133799042984,-1.8274572629471952,-1.8894018386217877,-1.618648166768315,-2.0802297524779956,-1.0999748867047898,0.46,-1.7285094641729257,-1.8822412662790038,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-1.9626955331560363,-1.0999748867047898),(-0.41952842352701486,-0.39756366798706344,-0.0766000050337147,-1.3921908284581592,-0.5042604443804907,-0.972049970299513,0.909111664354187,0.5,-1.7285094641729257,-1.8822412662790038,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-1.9626955331560363,-1.0999748867047898),(2.023902852345352,2.076898995832837,0.7988286239230257,1.5910752325236108,1.724515000395158,1.5129592382219361,0.909111664354187,0.96,-1.7285094641729257,-1.8822412662790038,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-1.9626955331560363,-1.0999748867047898),(0.2785947981508043,0.4272572199529034,1.674257252879766,1.5910752325236108,1.167321139201246,1.0428223609340956,0.909111664354187,0.92,-1.7285094641729257,-1.8822412662790038,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-1.9626955331560363,-1.0999748867047898),(0.4531256035702591,1.2520781078928702,1.674257252879766,0.5966532121963541,1.167321139201246,1.2778907995780144,0.909111664354187,0.92,-1.7285094641729257,-1.8822412662790038,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-1.9626955331560363,-1.0999748867047898),(2.023902852345352,1.2520781078928702,1.674257252879766,1.0938642223599824,1.167321139201246,1.4290062244205346,0.909111664354187,0.94,-1.7285094641729257,-1.8822412662790038,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-1.9626955331560363,-1.0999748867047898),(0.1913293954410769,-0.7274920231630502,0.7988286239230257,1.0938642223599824,0.052933416813421515,0.10254860635841155,-1.0999748867047898,0.76,-1.7285094641729257,-1.8822412662790038,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-1.9626955331560363,-1.0999748867047898),(-0.15773221539783266,-0.2325994903990701,-0.0766000050337147,1.0938642223599824,0.052933416813421515,-0.30042585988831016,-1.0999748867047898,0.72,-1.7285094641729257,-1.8822412662790038,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-1.9626955331560363,-1.0999748867047898),(0.016798590021622126,-0.06763531281107672,-0.952028633990455,0.09944220203272576,-0.5042604443804907,-0.535494298532232,-1.0999748867047898,0.66,-2.3393672831410175,-1.2223845559270303,-1.8274572629471952,-1.8894018386217877,-1.618648166768315,-1.7444176972723957,-1.0999748867047898),(-0.24499761810756004,0.09732886477691666,-0.0766000050337147,1.0938642223599824,0.052933416813421515,-0.7705627371761508,-1.0999748867047898,0.64,-2.3393672831410175,-1.2223845559270303,-1.8274572629471952,-1.8894018386217877,-1.618648166768315,-1.7444176972723957,-1.0999748867047898),(-0.07046681268810527,0.26229304236491,0.7988286239230257,1.0938642223599824,0.052933416813421515,0.27045463396121155,0.909111664354187,0.74,-2.3393672831410175,-1.2223845559270303,-1.8274572629471952,-1.8894018386217877,-1.618648166768315,-1.7444176972723957,-1.0999748867047898),(0.10406399273134952,-0.2325994903990701,-0.952028633990455,0.5966532121963541,0.6101272780073337,-1.139955997902313,0.909111664354187,0.64,-2.3393672831410175,-1.2223845559270303,-1.8274572629471952,-1.8894018386217877,-1.618648166768315,-1.7444176972723957,-1.0999748867047898),(-1.553978658753471,-1.7172770886910105,-0.0766000050337147,1.5910752325236108,0.052933416813421515,-1.5765116696695942,-1.0999748867047898,0.38,-2.3393672831410175,-1.2223845559270303,-1.8274572629471952,-1.8894018386217877,-1.618648166768315,-1.7444176972723957,-1.0999748867047898),(-1.6412440614631982,-1.5523129111030172,-0.952028633990455,0.5966532121963541,-0.5042604443804907,-0.9552593675392334,-1.0999748867047898,0.34,-2.3393672831410175,-1.2223845559270303,-1.8274572629471952,-1.8894018386217877,-1.618648166768315,-1.7444176972723957,-1.0999748867047898),(-1.3794478533340162,-1.7172770886910105,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,-1.2071184089434333,0.909111664354187,0.44,-2.3393672831410175,-1.2223845559270303,-1.8274572629471952,-1.8894018386217877,-1.618648166768315,-1.7444176972723957,-1.0999748867047898),(-1.2049170479145614,-1.3873487335150236,-0.0766000050337147,-1.3921908284581592,-1.0614543055744028,-1.5765116696695942,-1.0999748867047898,0.36,-2.3393672831410175,-1.2223845559270303,-1.8274572629471952,-1.8894018386217877,-1.618648166768315,-1.7444176972723957,-1.0999748867047898),(-1.117651645204834,-1.2223845559270303,0.7988286239230257,-1.8894018386217877,-1.0614543055744028,-1.2742808199845537,-1.0999748867047898,0.42,-2.3393672831410175,-1.2223845559270303,-1.8274572629471952,-1.8894018386217877,-1.618648166768315,-1.7444176972723957,-1.0999748867047898),(-0.9431208397853792,-1.2223845559270303,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,-1.0056311758200724,-1.0999748867047898,0.48,-2.3393672831410175,-1.2223845559270303,-1.8274572629471952,-1.8894018386217877,-1.618648166768315,-1.7444176972723957,-1.0999748867047898),(1.2385142279578056,2.076898995832837,-0.0766000050337147,0.5966532121963541,0.6101272780073337,0.6062666891668145,0.909111664354187,0.86,-2.3393672831410175,-1.2223845559270303,-1.8274572629471952,-1.8894018386217877,-1.618648166768315,-1.7444176972723957,-1.0999748867047898),(1.3257796306675331,1.9119348182448437,0.7988286239230257,1.5910752325236108,1.167321139201246,1.076403566454655,0.909111664354187,0.9,-2.3393672831410175,-1.2223845559270303,-1.8274572629471952,-1.8894018386217877,-1.618648166768315,-1.7444176972723957,-1.0999748867047898),(0.5403910062799865,0.9221497527168835,-0.0766000050337147,0.5966532121963541,0.6101272780073337,0.47194186708457386,0.909111664354187,0.79,-1.2049170479145614,-1.5523129111030172,-1.8274572629471952,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(0.4531256035702591,-0.06763531281107672,-0.0766000050337147,0.09944220203272576,0.052933416813421515,-0.2332634488471884,0.909111664354187,0.71,-1.2049170479145614,-1.5523129111030172,-1.8274572629471952,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(-0.41952842352701486,-0.39756366798706344,-0.952028633990455,-0.8949798182945309,-0.5042604443804907,-0.8041439426967131,-1.0999748867047898,0.64,-1.2049170479145614,-1.5523129111030172,-1.8274572629471952,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(-0.24499761810756004,-0.2325994903990701,-0.952028633990455,0.5966532121963541,0.052933416813421515,-0.585866106813071,-1.0999748867047898,0.62,-1.2049170479145614,-1.5523129111030172,-1.8274572629471952,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(0.016798590021622126,-0.5625278455750569,-0.952028633990455,1.0938642223599824,0.6101272780073337,-0.2164728460869087,-1.0999748867047898,0.57,-1.2049170479145614,-1.5523129111030172,-1.8274572629471952,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(0.8021872144091686,0.7571855751288902,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,0.7573821140093348,0.909111664354187,0.74,-1.2049170479145614,-1.5523129111030172,-1.8274572629471952,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(-0.07046681268810527,0.4272572199529034,-0.0766000050337147,0.5966532121963541,1.167321139201246,0.30403583948177093,0.909111664354187,0.69,-1.2049170479145614,-1.5523129111030172,-1.8274572629471952,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(1.0639834225383509,0.5922213975408968,0.7988286239230257,1.0938642223599824,1.167321139201246,0.9756599498929738,0.909111664354187,0.87,-1.2049170479145614,-1.5523129111030172,-1.8274572629471952,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(1.8493720469258974,1.582006463068857,0.7988286239230257,0.09944220203272576,1.167321139201246,1.4457968271808173,0.909111664354187,0.91,-1.2049170479145614,-1.5523129111030172,-1.8274572629471952,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(1.2385142279578056,1.4170422854808635,1.674257252879766,1.5910752325236108,1.724515000395158,1.3114720050985766,0.909111664354187,0.93,-1.2049170479145614,-1.5523129111030172,-1.8274572629471952,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(-1.117651645204834,-0.7274920231630502,1.674257252879766,1.5910752325236108,0.6101272780073337,0.06896740083785215,-1.0999748867047898,0.68,-1.2049170479145614,-1.5523129111030172,-1.8274572629471952,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(-1.0303862424951067,0.09732886477691666,1.674257252879766,-0.3977688081309026,-0.5042604443804907,-0.199682243326629,-1.0999748867047898,0.61,-1.2049170479145614,-1.5523129111030172,-1.8274572629471952,-1.3921908284581592,-1.0614543055744028,-1.5933022724298738,-1.0999748867047898),(0.36586020086053167,0.26229304236491,0.7988286239230257,0.5966532121963541,0.6101272780073337,0.13612981187897094,0.909111664354187,0.69,-0.5067938262367422,-1.3873487335150236,-1.8274572629471952,-0.8949798182945309,-0.5042604443804907,-0.2836352571280305,0.909111664354187),(-1.3794478533340162,-0.06763531281107672,-0.0766000050337147,0.09944220203272576,0.052933416813421515,-0.4347506819705508,0.909111664354187,0.62,-0.5067938262367422,-1.3873487335150236,-1.8274572629471952,-0.8949798182945309,-0.5042604443804907,-0.2836352571280305,0.909111664354187),(0.2785947981508043,0.4272572199529034,-0.952028633990455,0.5966532121963541,0.052933416813421515,-0.06535742124438843,-1.0999748867047898,0.72,-0.5067938262367422,-1.3873487335150236,-1.8274572629471952,-0.8949798182945309,-0.5042604443804907,-0.2836352571280305,0.909111664354187),(-0.5067938262367422,-0.39756366798706344,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,-0.2500540516074711,0.909111664354187,0.59,-0.5067938262367422,-1.3873487335150236,-1.8274572629471952,-0.8949798182945309,-0.5042604443804907,-0.2836352571280305,0.909111664354187),(-0.5940592289464697,-0.2325994903990701,0.7988286239230257,1.0938642223599824,1.167321139201246,0.7405915112490521,0.909111664354187,0.66,-0.5067938262367422,-1.3873487335150236,-1.8274572629471952,-0.8949798182945309,-0.5042604443804907,-0.2836352571280305,0.909111664354187),(-1.553978658753471,-0.8924562007510436,-0.0766000050337147,0.5966532121963541,0.052933416813421515,0.03538619531728977,-1.0999748867047898,0.56,-0.5067938262367422,-1.3873487335150236,-1.8274572629471952,-0.8949798182945309,-0.5042604443804907,-0.2836352571280305,0.909111664354187),(-2.3393672831410175,-0.5625278455750569,0.7988286239230257,-1.3921908284581592,-1.0614543055744028,-1.9123237248751956,-1.0999748867047898,0.45,-0.5067938262367422,-1.3873487335150236,-1.8274572629471952,-0.8949798182945309,-0.5042604443804907,-0.2836352571280305,0.909111664354187),(-1.8157748668826532,-1.3873487335150236,-0.952028633990455,-0.3977688081309026,0.052933416813421515,-2.214554574560236,-1.0999748867047898,0.47,-0.5067938262367422,-1.3873487335150236,-1.8274572629471952,-0.8949798182945309,-0.5042604443804907,-0.2836352571280305,0.909111664354187),(0.889452617118896,-0.5625278455750569,1.674257252879766,-0.3977688081309026,0.052933416813421515,0.4047794560434521,0.909111664354187,0.71,-0.5067938262367422,-1.3873487335150236,-1.8274572629471952,-0.8949798182945309,-0.5042604443804907,-0.2836352571280305,0.909111664354187),(1.5875758387967152,1.582006463068857,1.674257252879766,1.5910752325236108,1.724515000395158,1.6137028547836172,0.909111664354187,0.94,-0.5067938262367422,-1.3873487335150236,-1.8274572629471952,-0.8949798182945309,-0.5042604443804907,-0.2836352571280305,0.909111664354187),(1.5003104360869879,1.9119348182448437,1.674257252879766,1.0938642223599824,1.167321139201246,1.4793780327013768,0.909111664354187,0.94,-0.5067938262367422,-1.3873487335150236,-1.8274572629471952,-0.8949798182945309,-0.5042604443804907,-0.2836352571280305,0.909111664354187),(-0.5940592289464697,-0.2325994903990701,0.7988286239230257,-1.8894018386217877,-1.0614543055744028,-0.40116947644999135,-1.0999748867047898,0.57,-0.5067938262367422,-1.3873487335150236,-1.8274572629471952,-0.8949798182945309,-0.5042604443804907,-0.2836352571280305,0.909111664354187),(-0.7685900343659244,0.09732886477691666,-0.0766000050337147,0.09944220203272576,0.052933416813421515,-0.6362379150939101,-1.0999748867047898,0.61,-0.5067938262367422,-1.3873487335150236,-1.8274572629471952,-0.8949798182945309,-0.5042604443804907,-0.2836352571280305,0.909111664354187),(-1.3794478533340162,-0.2325994903990701,0.7988286239230257,-0.8949798182945309,-0.5042604443804907,-0.2164728460869087,-1.0999748867047898,0.57,0.4531256035702591,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.4047794560434521,0.909111664354187),(-1.4667132560437435,-1.2223845559270303,-0.0766000050337147,-1.3921908284581592,-0.5042604443804907,0.10254860635841155,0.909111664354187,0.64,0.4531256035702591,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.4047794560434521,0.909111664354187),(0.5403910062799865,0.9221497527168835,-0.0766000050337147,0.5966532121963541,-0.5042604443804907,1.2107283885368956,0.909111664354187,0.85,0.4531256035702591,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.4047794560434521,0.909111664354187),(0.1913293954410769,0.7571855751288902,-0.0766000050337147,-0.8949798182945309,-1.618648166768315,0.18650162015981303,0.909111664354187,0.78,0.4531256035702591,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.4047794560434521,0.909111664354187),(0.8021872144091686,0.7571855751288902,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,0.8413351278107333,0.909111664354187,0.84,0.4531256035702591,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.4047794560434521,0.909111664354187),(1.4130450333772604,1.7469706406568504,1.674257252879766,1.5910752325236108,1.724515000395158,1.2611001968177347,0.909111664354187,0.92,0.4531256035702591,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.4047794560434521,0.909111664354187),(1.9366374496356247,1.087113930304877,1.674257252879766,0.5966532121963541,1.167321139201246,1.9495149099892173,0.909111664354187,0.96,0.4531256035702591,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.4047794560434521,0.909111664354187),(-1.2049170479145614,-0.39756366798706344,1.674257252879766,1.5910752325236108,1.167321139201246,0.08575800359813185,-1.0999748867047898,0.77,0.4531256035702591,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.4047794560434521,0.909111664354187),(-0.681324631656197,-0.39756366798706344,1.674257252879766,0.09944220203272576,0.052933416813421515,-0.06535742124438843,-1.0999748867047898,0.71,0.4531256035702591,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.4047794560434521,0.909111664354187),(0.5403910062799865,0.7571855751288902,1.674257252879766,0.5966532121963541,1.167321139201246,0.30403583948177093,-1.0999748867047898,0.79,0.4531256035702591,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.4047794560434521,0.909111664354187),(1.4130450333772604,0.9221497527168835,1.674257252879766,0.5966532121963541,0.6101272780073337,1.1435659774957738,0.909111664354187,0.89,0.4531256035702591,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.4047794560434521,0.909111664354187),(-0.24499761810756004,0.26229304236491,0.7988286239230257,0.09944220203272576,0.6101272780073337,0.2872452367214912,0.909111664354187,0.82,0.4531256035702591,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.4047794560434521,0.909111664354187),(-0.41952842352701486,-0.7274920231630502,-0.0766000050337147,1.5910752325236108,0.6101272780073337,-0.2500540516074711,-1.0999748867047898,0.76,0.1913293954410769,-0.39756366798706344,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.11933920911869125,0.909111664354187),(-0.07046681268810527,-1.2223845559270303,-0.952028633990455,-1.8894018386217877,-0.5042604443804907,-0.7369815316555913,0.909111664354187,0.71,0.1913293954410769,-0.39756366798706344,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.11933920911869125,0.909111664354187),(0.8021872144091686,1.4170422854808635,-0.952028633990455,1.0938642223599824,-0.5042604443804907,0.8077539222901738,0.909111664354187,0.8,0.1913293954410769,-0.39756366798706344,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.11933920911869125,0.909111664354187),(0.10406399273134952,0.26229304236491,-1.8274572629471952,0.09944220203272576,0.052933416813421515,0.8749163333312926,-1.0999748867047898,0.78,0.1913293954410769,-0.39756366798706344,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.11933920911869125,0.909111664354187),(1.0639834225383509,0.4272572199529034,-0.952028633990455,0.5966532121963541,-0.5042604443804907,0.9252881416121347,0.909111664354187,0.84,0.1913293954410769,-0.39756366798706344,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.11933920911869125,0.909111664354187),(1.3257796306675331,1.7469706406568504,-0.952028633990455,1.0938642223599824,0.052933416813421515,1.2778907995780144,0.909111664354187,0.9,0.1913293954410769,-0.39756366798706344,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.11933920911869125,0.909111664354187),(1.2385142279578056,1.2520781078928702,1.674257252879766,0.5966532121963541,0.052933416813421515,1.412215621660255,0.909111664354187,0.92,0.1913293954410769,-0.39756366798706344,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.11933920911869125,0.909111664354187),(2.023902852345352,2.076898995832837,0.7988286239230257,1.0938642223599824,0.6101272780073337,2.2181645541536983,0.909111664354187,0.97,0.1913293954410769,-0.39756366798706344,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.11933920911869125,0.909111664354187),(0.7149218116994412,0.7571855751288902,-0.952028633990455,-0.3977688081309026,0.052933416813421515,0.6062666891668145,0.909111664354187,0.8,0.1913293954410769,-0.39756366798706344,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.11933920911869125,0.909111664354187),(0.2785947981508043,0.9221497527168835,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,0.06896740083785215,0.909111664354187,0.81,0.1913293954410769,-0.39756366798706344,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.11933920911869125,0.909111664354187),(-0.15773221539783266,-0.39756366798706344,-0.0766000050337147,-1.3921908284581592,-1.0614543055744028,-0.199682243326629,-1.0999748867047898,0.75,0.1913293954410769,-0.39756366798706344,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.11933920911869125,0.909111664354187),(0.8021872144091686,1.087113930304877,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,0.8581257305710129,0.909111664354187,0.83,0.1913293954410769,-0.39756366798706344,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.11933920911869125,0.909111664354187),(1.9366374496356247,1.4170422854808635,0.7988286239230257,0.5966532121963541,0.052933416813421515,2.016677321030339,0.909111664354187,0.96,0.6276564089897139,0.4272572199529034,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.9252881416121347,0.909111664354187),(-0.5067938262367422,-0.2325994903990701,-0.952028633990455,0.09944220203272576,-0.5042604443804907,-0.5690755040527913,0.909111664354187,0.79,0.6276564089897139,0.4272572199529034,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.9252881416121347,0.909111664354187),(1.5003104360869879,1.087113930304877,0.7988286239230257,0.5966532121963541,0.6101272780073337,1.3954250188999753,0.909111664354187,0.93,0.6276564089897139,0.4272572199529034,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.9252881416121347,0.909111664354187),(1.3257796306675331,1.4170422854808635,1.674257252879766,1.5910752325236108,1.724515000395158,1.1435659774957738,0.909111664354187,0.94,0.6276564089897139,0.4272572199529034,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.9252881416121347,0.909111664354187),(0.36586020086053167,0.7571855751288902,1.674257252879766,1.5910752325236108,1.724515000395158,0.7741727167696144,0.909111664354187,0.86,0.6276564089897139,0.4272572199529034,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.9252881416121347,0.909111664354187),(0.6276564089897139,-0.39756366798706344,-0.0766000050337147,-0.3977688081309026,0.6101272780073337,0.25366403120093184,-1.0999748867047898,0.79,0.6276564089897139,0.4272572199529034,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.9252881416121347,0.909111664354187),(0.8021872144091686,0.09732886477691666,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.4887324698448536,-1.0999748867047898,0.8,0.6276564089897139,0.4272572199529034,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.9252881416121347,0.909111664354187),(-0.41952842352701486,0.26229304236491,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,0.15292041463925066,-1.0999748867047898,0.77,0.6276564089897139,0.4272572199529034,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.9252881416121347,0.909111664354187),(-0.15773221539783266,-0.39756366798706344,-0.0766000050337147,-1.3921908284581592,-1.0614543055744028,-0.4347506819705508,-1.0999748867047898,0.7,0.6276564089897139,0.4272572199529034,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.9252881416121347,0.909111664354187),(-0.681324631656197,-0.5625278455750569,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-0.5690755040527913,-1.0999748867047898,0.65,0.6276564089897139,0.4272572199529034,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.9252881416121347,0.909111664354187),(-0.9431208397853792,-0.2325994903990701,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-0.7705627371761508,-1.0999748867047898,0.61,0.6276564089897139,0.4272572199529034,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.9252881416121347,0.909111664354187),(-1.7285094641729257,-1.2223845559270303,-1.8274572629471952,-1.8894018386217877,-1.618648166768315,-1.1735372034228724,-1.0999748867047898,0.52,0.6276564089897139,0.4272572199529034,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.9252881416121347,0.909111664354187),(-0.15773221539783266,-0.7274920231630502,-1.8274572629471952,-1.8894018386217877,-1.618648166768315,-1.2406996144639928,-1.0999748867047898,0.57,0.6276564089897139,0.4272572199529034,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.9252881416121347,0.909111664354187),(-1.6412440614631982,-1.3873487335150236,-1.8274572629471952,-1.8894018386217877,-0.5042604443804907,-1.9123237248751956,-1.0999748867047898,0.53,-1.4667132560437435,-1.2223845559270303,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,-0.5690755040527913,-1.0999748867047898),(0.10406399273134952,0.26229304236491,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-0.1661010378060696,-1.0999748867047898,0.67,-1.4667132560437435,-1.2223845559270303,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,-0.5690755040527913,-1.0999748867047898),(0.016798590021622126,-0.39756366798706344,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,-0.06535742124438843,-1.0999748867047898,0.68,-1.4667132560437435,-1.2223845559270303,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,-0.5690755040527913,-1.0999748867047898),(1.0639834225383509,0.5922213975408968,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.690219702968213,0.909111664354187,0.81,-1.4667132560437435,-1.2223845559270303,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,-0.5690755040527913,-1.0999748867047898),(0.4531256035702591,0.4272572199529034,1.674257252879766,1.0938642223599824,0.6101272780073337,0.6230572919270941,-1.0999748867047898,0.78,-1.4667132560437435,-1.2223845559270303,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,-0.5690755040527913,-1.0999748867047898),(-1.2921824506242887,-0.8924562007510436,-0.0766000050337147,0.09944220203272576,1.724515000395158,-0.45154128473083044,-1.0999748867047898,0.65,-1.4667132560437435,-1.2223845559270303,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,-0.5690755040527913,-1.0999748867047898),(-0.3322630208172874,-0.8924562007510436,-0.0766000050337147,-1.3921908284581592,-0.5042604443804907,-0.5522849012925116,-1.0999748867047898,0.64,-1.4667132560437435,-1.2223845559270303,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,-0.5690755040527913,-1.0999748867047898),(-2.0775710750118352,-1.7172770886910105,-0.952028633990455,-1.3921908284581592,0.6101272780073337,-1.3414432310256739,0.909111664354187,0.64,-1.4667132560437435,-1.2223845559270303,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,-0.5690755040527913,-1.0999748867047898),(-0.5067938262367422,-1.3873487335150236,-0.952028633990455,-0.8949798182945309,-0.5042604443804907,-1.0392123813406318,-1.0999748867047898,0.65,-1.4667132560437435,-1.2223845559270303,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,-0.5690755040527913,-1.0999748867047898),(-0.41952842352701486,-1.057420378339037,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-0.9384687647789537,0.909111664354187,0.68,-1.4667132560437435,-1.2223845559270303,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,-0.5690755040527913,-1.0999748867047898),(1.5003104360869879,1.582006463068857,1.674257252879766,0.5966532121963541,1.167321139201246,0.7909633195298942,0.909111664354187,0.89,-1.4667132560437435,-1.2223845559270303,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,-0.5690755040527913,-1.0999748867047898),(0.4531256035702591,0.4272572199529034,0.7988286239230257,0.5966532121963541,1.724515000395158,0.8917069360915754,0.909111664354187,0.86,-1.4667132560437435,-1.2223845559270303,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,-0.5690755040527913,-1.0999748867047898),(0.5403910062799865,0.9221497527168835,0.7988286239230257,0.5966532121963541,1.167321139201246,1.0596129636943752,0.909111664354187,0.89,2.023902852345352,0.9221497527168835,0.7988286239230257,1.5910752325236108,1.724515000395158,1.915933704468658,0.909111664354187),(0.36586020086053167,0.5922213975408968,0.7988286239230257,0.5966532121963541,0.6101272780073337,0.6230572919270941,0.909111664354187,0.87,2.023902852345352,0.9221497527168835,0.7988286239230257,1.5910752325236108,1.724515000395158,1.915933704468658,0.909111664354187),(0.2785947981508043,0.5922213975408968,0.7988286239230257,1.0938642223599824,0.052933416813421515,0.4551512643242912,0.909111664354187,0.85,2.023902852345352,0.9221497527168835,0.7988286239230257,1.5910752325236108,1.724515000395158,1.915933704468658,0.909111664354187),(1.0639834225383509,1.9119348182448437,0.7988286239230257,1.0938642223599824,1.167321139201246,0.9420787443724145,0.909111664354187,0.9,2.023902852345352,0.9221497527168835,0.7988286239230257,1.5910752325236108,1.724515000395158,1.915933704468658,0.909111664354187),(0.1913293954410769,0.4272572199529034,-0.0766000050337147,0.09944220203272576,0.052933416813421515,0.7405915112490521,-1.0999748867047898,0.82,2.023902852345352,0.9221497527168835,0.7988286239230257,1.5910752325236108,1.724515000395158,1.915933704468658,0.909111664354187),(-0.681324631656197,0.09732886477691666,-0.0766000050337147,-0.8949798182945309,-0.5042604443804907,-0.8041439426967131,-1.0999748867047898,0.72,2.023902852345352,0.9221497527168835,0.7988286239230257,1.5910752325236108,1.724515000395158,1.915933704468658,0.909111664354187),(-0.8558554370756518,-0.8924562007510436,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-0.5522849012925116,-1.0999748867047898,0.73,2.023902852345352,0.9221497527168835,0.7988286239230257,1.5910752325236108,1.724515000395158,1.915933704468658,0.909111664354187),(-1.4667132560437435,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,-0.7369815316555913,-1.0999748867047898,0.71,2.023902852345352,0.9221497527168835,0.7988286239230257,1.5910752325236108,1.724515000395158,1.915933704468658,0.909111664354187),(-1.0303862424951067,-0.06763531281107672,-0.952028633990455,-0.8949798182945309,-1.0614543055744028,-0.30042585988831016,-1.0999748867047898,0.71,2.023902852345352,0.9221497527168835,0.7988286239230257,1.5910752325236108,1.724515000395158,1.915933704468658,0.909111664354187),(-1.553978658753471,-1.2223845559270303,-0.952028633990455,-0.3977688081309026,0.052933416813421515,-1.2071184089434333,-1.0999748867047898,0.68,2.023902852345352,0.9221497527168835,0.7988286239230257,1.5910752325236108,1.724515000395158,1.915933704468658,0.909111664354187),(-0.24499761810756004,0.4272572199529034,-0.0766000050337147,0.5966532121963541,0.6101272780073337,0.3376170450023333,-1.0999748867047898,0.75,2.023902852345352,0.9221497527168835,0.7988286239230257,1.5910752325236108,1.724515000395158,1.915933704468658,0.909111664354187),(-0.07046681268810527,-0.2325994903990701,-0.952028633990455,-0.8949798182945309,0.6101272780073337,-0.46833188749111015,-1.0999748867047898,0.72,2.023902852345352,0.9221497527168835,0.7988286239230257,1.5910752325236108,1.724515000395158,1.915933704468658,0.909111664354187),(0.889452617118896,0.9221497527168835,0.7988286239230257,1.0938642223599824,1.167321139201246,0.8581257305710129,0.909111664354187,0.89,1.5875758387967152,1.582006463068857,1.674257252879766,1.5910752325236108,1.724515000395158,2.0502585265508984,0.909111664354187),(0.016798590021622126,-0.06763531281107672,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,0.13612981187897094,0.909111664354187,0.84,1.5875758387967152,1.582006463068857,1.674257252879766,1.5910752325236108,1.724515000395158,2.0502585265508984,0.909111664354187),(1.5875758387967152,1.7469706406568504,1.674257252879766,1.0938642223599824,0.052933416813421515,1.412215621660255,0.909111664354187,0.93,1.5875758387967152,1.582006463068857,1.674257252879766,1.5910752325236108,1.724515000395158,2.0502585265508984,0.909111664354187),(1.2385142279578056,1.2520781078928702,1.674257252879766,1.0938642223599824,0.052933416813421515,1.2778907995780144,0.909111664354187,0.93,1.5875758387967152,1.582006463068857,1.674257252879766,1.5910752325236108,1.724515000395158,2.0502585265508984,0.909111664354187),(0.6276564089897139,0.7571855751288902,1.674257252879766,1.5910752325236108,1.724515000395158,0.8077539222901738,0.909111664354187,0.88,1.5875758387967152,1.582006463068857,1.674257252879766,1.5910752325236108,1.724515000395158,2.0502585265508984,0.909111664354187),(0.6276564089897139,0.5922213975408968,1.674257252879766,1.0938642223599824,0.6101272780073337,0.9420787443724145,0.909111664354187,0.9,1.5875758387967152,1.582006463068857,1.674257252879766,1.5910752325236108,1.724515000395158,2.0502585265508984,0.909111664354187),(0.5403910062799865,0.4272572199529034,1.674257252879766,0.5966532121963541,1.724515000395158,0.6398478946873739,0.909111664354187,0.87,1.5875758387967152,1.582006463068857,1.674257252879766,1.5910752325236108,1.724515000395158,2.0502585265508984,0.909111664354187),(0.4531256035702591,1.087113930304877,1.674257252879766,1.0938642223599824,0.6101272780073337,0.572685483646252,0.909111664354187,0.86,1.5875758387967152,1.582006463068857,1.674257252879766,1.5910752325236108,1.724515000395158,2.0502585265508984,0.909111664354187),(1.6748412415064426,1.7469706406568504,1.674257252879766,1.0938642223599824,1.724515000395158,1.5633310465027752,0.909111664354187,0.94,1.5875758387967152,1.582006463068857,1.674257252879766,1.5910752325236108,1.724515000395158,2.0502585265508984,0.909111664354187),(-0.07046681268810527,0.26229304236491,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,0.27045463396121155,-1.0999748867047898,0.77,1.5875758387967152,1.582006463068857,1.674257252879766,1.5910752325236108,1.724515000395158,2.0502585265508984,0.909111664354187),(-0.8558554370756518,-0.06763531281107672,-0.952028633990455,-0.3977688081309026,0.052933416813421515,-0.1325198322855102,0.909111664354187,0.78,1.5875758387967152,1.582006463068857,1.674257252879766,1.5910752325236108,1.724515000395158,2.0502585265508984,0.909111664354187),(-0.9431208397853792,-0.39756366798706344,-0.952028633990455,-0.3977688081309026,-1.0614543055744028,-0.5690755040527913,-1.0999748867047898,0.73,1.5875758387967152,1.582006463068857,1.674257252879766,1.5910752325236108,1.724515000395158,2.0502585265508984,0.909111664354187),(-0.5940592289464697,-0.2325994903990701,-0.952028633990455,0.09944220203272576,-1.0614543055744028,-0.45154128473083044,-1.0999748867047898,0.73,1.5875758387967152,1.582006463068857,1.674257252879766,1.5910752325236108,1.724515000395158,2.0502585265508984,0.909111664354187),(-0.5067938262367422,-0.5625278455750569,-0.0766000050337147,1.0938642223599824,1.167321139201246,-0.2836352571280305,-1.0999748867047898,0.7,-1.2921824506242887,-1.057420378339037,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-1.0727935868611929,-1.0999748867047898),(-0.3322630208172874,-0.06763531281107672,-0.0766000050337147,0.5966532121963541,1.167321139201246,0.15292041463925066,-1.0999748867047898,0.72,-1.2921824506242887,-1.057420378339037,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-1.0727935868611929,-1.0999748867047898),(0.016798590021622126,-0.7274920231630502,-0.0766000050337147,-0.8949798182945309,-0.5042604443804907,-0.0989386267649508,0.909111664354187,0.73,-1.2921824506242887,-1.057420378339037,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-1.0727935868611929,-1.0999748867047898),(-0.15773221539783266,0.4272572199529034,-0.952028633990455,0.09944220203272576,-0.5042604443804907,-0.2332634488471884,0.909111664354187,0.72,-1.2921824506242887,-1.057420378339037,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-1.0727935868611929,-1.0999748867047898),(2.023902852345352,2.076898995832837,1.674257252879766,1.0938642223599824,1.167321139201246,2.2013739513934185,0.909111664354187,0.97,-1.2921824506242887,-1.057420378339037,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-1.0727935868611929,-1.0999748867047898),(1.5003104360869879,2.076898995832837,1.674257252879766,0.5966532121963541,1.724515000395158,2.1342115403522968,0.909111664354187,0.97,-1.2921824506242887,-1.057420378339037,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-1.0727935868611929,-1.0999748867047898),(-1.6412440614631982,-0.39756366798706344,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.0989386267649508,-1.0999748867047898,0.69,-1.2921824506242887,-1.057420378339037,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-1.0727935868611929,-1.0999748867047898),(-1.9030402695923805,-1.3873487335150236,-0.952028633990455,-0.8949798182945309,-0.5042604443804907,-1.5933022724298738,-1.0999748867047898,0.57,-1.2921824506242887,-1.057420378339037,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-1.0727935868611929,-1.0999748867047898),(-0.15773221539783266,-1.3873487335150236,-0.952028633990455,0.09944220203272576,-0.5042604443804907,-1.1903278061831537,-1.0999748867047898,0.63,-1.2921824506242887,-1.057420378339037,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-1.0727935868611929,-1.0999748867047898),(-0.5940592289464697,-0.8924562007510436,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.972049970299513,0.909111664354187,0.66,-1.2921824506242887,-1.057420378339037,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-1.0727935868611929,-1.0999748867047898),(-1.0303862424951067,-0.2325994903990701,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,-0.7369815316555913,-1.0999748867047898,0.64,-1.2921824506242887,-1.057420378339037,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-1.0727935868611929,-1.0999748867047898),(-1.3794478533340162,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.8041439426967131,0.909111664354187,0.68,-1.2921824506242887,-1.057420378339037,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-1.0727935868611929,-1.0999748867047898),(0.7149218116994412,0.09732886477691666,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.7741727167696144,0.909111664354187,0.79,-0.8558554370756518,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,0.052933416813421515,-0.8377251482172725,-1.0999748867047898),(0.9767180198286235,0.4272572199529034,0.7988286239230257,1.5910752325236108,0.6101272780073337,0.908497538851855,0.909111664354187,0.82,-0.8558554370756518,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,0.052933416813421515,-0.8377251482172725,-1.0999748867047898),(1.8493720469258974,2.076898995832837,0.7988286239230257,1.5910752325236108,1.724515000395158,1.7816088823864173,0.909111664354187,0.95,-0.8558554370756518,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,0.052933416813421515,-0.8377251482172725,-1.0999748867047898),(1.4130450333772604,1.9119348182448437,1.674257252879766,1.5910752325236108,1.167321139201246,1.9830961155097766,0.909111664354187,0.96,-0.8558554370756518,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,0.052933416813421515,-0.8377251482172725,-1.0999748867047898),(1.2385142279578056,1.582006463068857,0.7988286239230257,1.0938642223599824,1.724515000395158,1.3786344161396955,0.909111664354187,0.94,-0.8558554370756518,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,0.052933416813421515,-0.8377251482172725,-1.0999748867047898),(1.1512488252480781,1.4170422854808635,1.674257252879766,1.5910752325236108,1.167321139201246,1.2778907995780144,0.909111664354187,0.93,-0.8558554370756518,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,0.052933416813421515,-0.8377251482172725,-1.0999748867047898),(0.4531256035702591,0.7571855751288902,0.7988286239230257,1.0938642223599824,1.167321139201246,1.1099847719752143,0.909111664354187,0.91,-0.8558554370756518,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,0.052933416813421515,-0.8377251482172725,-1.0999748867047898),(0.36586020086053167,0.26229304236491,0.7988286239230257,0.5966532121963541,0.6101272780073337,0.8917069360915754,0.909111664354187,0.85,-0.8558554370756518,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,0.052933416813421515,-0.8377251482172725,-1.0999748867047898),(0.6276564089897139,0.4272572199529034,0.7988286239230257,-0.3977688081309026,0.052933416813421515,0.6230572919270941,0.909111664354187,0.84,-0.8558554370756518,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,0.052933416813421515,-0.8377251482172725,-1.0999748867047898),(-0.41952842352701486,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,0.052933416813421515,-0.30042585988831016,-1.0999748867047898,0.74,-0.8558554370756518,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,0.052933416813421515,-0.8377251482172725,-1.0999748867047898),(-0.3322630208172874,-0.7274920231630502,-0.0766000050337147,0.5966532121963541,0.6101272780073337,0.25366403120093184,-1.0999748867047898,0.76,-0.8558554370756518,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,0.052933416813421515,-0.8377251482172725,-1.0999748867047898),(-0.07046681268810527,0.4272572199529034,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.06535742124438843,-1.0999748867047898,0.75,-0.8558554370756518,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,0.052933416813421515,-0.8377251482172725,-1.0999748867047898),(0.6276564089897139,0.9221497527168835,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.32082644224205065,-1.0999748867047898,0.76,-1.8157748668826532,-1.7172770886910105,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-1.3414432310256739,-1.0999748867047898),(-0.7685900343659244,0.26229304236491,-0.952028633990455,-0.3977688081309026,0.6101272780073337,-0.2500540516074711,-1.0999748867047898,0.71,-1.8157748668826532,-1.7172770886910105,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-1.3414432310256739,-1.0999748867047898),(-1.0303862424951067,-0.39756366798706344,-0.952028633990455,-0.3977688081309026,-1.618648166768315,-0.6194473123336305,-1.0999748867047898,0.67,-1.8157748668826532,-1.7172770886910105,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-1.3414432310256739,-1.0999748867047898),(-1.8157748668826532,-1.3873487335150236,-0.952028633990455,-0.8949798182945309,-1.0614543055744028,-0.9552593675392334,-1.0999748867047898,0.61,-1.8157748668826532,-1.7172770886910105,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-1.3414432310256739,-1.0999748867047898),(-0.9431208397853792,0.4272572199529034,-0.952028633990455,0.09944220203272576,0.6101272780073337,-0.2500540516074711,-1.0999748867047898,0.63,-1.8157748668826532,-1.7172770886910105,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-1.3414432310256739,-1.0999748867047898),(-0.41952842352701486,0.4272572199529034,-0.952028633990455,0.09944220203272576,-0.5042604443804907,-0.1157292295252305,-1.0999748867047898,0.64,-1.8157748668826532,-1.7172770886910105,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-1.3414432310256739,-1.0999748867047898),(0.10406399273134952,0.7571855751288902,-0.0766000050337147,0.5966532121963541,0.052933416813421515,0.11933920911869125,-1.0999748867047898,0.71,-1.8157748668826532,-1.7172770886910105,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-1.3414432310256739,-1.0999748867047898),(0.6276564089897139,0.5922213975408968,0.7988286239230257,-0.3977688081309026,-0.5042604443804907,0.690219702968213,0.909111664354187,0.82,-1.8157748668826532,-1.7172770886910105,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-1.3414432310256739,-1.0999748867047898),(-0.3322630208172874,-0.5625278455750569,-0.0766000050337147,0.5966532121963541,1.167321139201246,0.08575800359813185,-1.0999748867047898,0.73,-1.8157748668826532,-1.7172770886910105,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-1.3414432310256739,-1.0999748867047898),(0.1913293954410769,-0.2325994903990701,-0.0766000050337147,0.09944220203272576,-1.0614543055744028,-0.45154128473083044,0.909111664354187,0.74,-1.8157748668826532,-1.7172770886910105,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-1.3414432310256739,-1.0999748867047898),(-0.41952842352701486,-0.06763531281107672,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-0.5522849012925116,-1.0999748867047898,0.69,-1.8157748668826532,-1.7172770886910105,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-1.3414432310256739,-1.0999748867047898),(-1.117651645204834,-1.2223845559270303,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-0.8880969564981116,-1.0999748867047898,0.64,-1.8157748668826532,-1.7172770886910105,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-1.3414432310256739,-1.0999748867047898),(1.1512488252480781,0.9221497527168835,1.674257252879766,1.5910752325236108,0.6101272780073337,1.1939377857766158,0.909111664354187,0.91,-1.8157748668826532,-1.7172770886910105,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-1.3414432310256739,-1.0999748867047898),(0.8021872144091686,0.5922213975408968,1.674257252879766,1.0938642223599824,0.6101272780073337,1.0596129636943752,0.909111664354187,0.88,0.2785947981508043,0.09732886477691666,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.2668446543677508,0.909111664354187),(0.7149218116994412,0.7571855751288902,0.7988286239230257,0.5966532121963541,1.167321139201246,0.9588693471326941,0.909111664354187,0.85,0.2785947981508043,0.09732886477691666,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.2668446543677508,0.909111664354187),(1.0639834225383509,1.087113930304877,1.674257252879766,1.0938642223599824,1.724515000395158,0.9924505526532535,0.909111664354187,0.86,0.2785947981508043,0.09732886477691666,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.2668446543677508,0.909111664354187),(-0.5940592289464697,-0.5625278455750569,-0.0766000050337147,-1.3921908284581592,0.052933416813421515,-0.3843788736897117,-1.0999748867047898,0.7,0.2785947981508043,0.09732886477691666,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.2668446543677508,0.909111664354187),(-1.553978658753471,-1.2223845559270303,-1.8274572629471952,-1.8894018386217877,-1.618648166768315,-1.1903278061831537,-1.0999748867047898,0.59,0.2785947981508043,0.09732886477691666,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.2668446543677508,0.909111664354187),(-1.8157748668826532,-1.057420378339037,-1.8274572629471952,-0.8949798182945309,-0.5042604443804907,-1.5429304641490347,-1.0999748867047898,0.6,0.2785947981508043,0.09732886477691666,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.2668446543677508,0.909111664354187),(0.016798590021622126,-0.7274920231630502,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-0.753772134415871,-1.0999748867047898,0.65,0.2785947981508043,0.09732886477691666,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.2668446543677508,0.909111664354187),(0.6276564089897139,1.2520781078928702,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,0.27045463396121155,0.909111664354187,0.7,0.2785947981508043,0.09732886477691666,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.2668446543677508,0.909111664354187),(0.7149218116994412,1.087113930304877,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,0.7405915112490521,0.909111664354187,0.76,0.2785947981508043,0.09732886477691666,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.2668446543677508,0.909111664354187),(-0.24499761810756004,-0.06763531281107672,-0.952028633990455,-0.8949798182945309,0.6101272780073337,-0.06535742124438843,-1.0999748867047898,0.63,0.2785947981508043,0.09732886477691666,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.2668446543677508,0.909111664354187),(0.9767180198286235,0.4272572199529034,0.7988286239230257,0.5966532121963541,-1.0614543055744028,0.7070103057284927,0.909111664354187,0.81,0.2785947981508043,0.09732886477691666,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.2668446543677508,0.909111664354187),(-0.07046681268810527,-0.39756366798706344,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.22008282568037243,-1.0999748867047898,0.72,0.2785947981508043,0.09732886477691666,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.2668446543677508,0.909111664354187),(-0.5067938262367422,-0.5625278455750569,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-0.199682243326629,-1.0999748867047898,0.71,-0.24499761810756004,-0.8924562007510436,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.4551512643242912,0.909111664354187,0.8,-0.24499761810756004,-0.8924562007510436,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(0.36586020086053167,0.5922213975408968,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.3879888532831724,0.909111664354187,0.77,-0.24499761810756004,-0.8924562007510436,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(0.2785947981508043,-0.5625278455750569,-0.0766000050337147,-0.3977688081309026,-1.0614543055744028,-0.04856681848410872,0.909111664354187,0.74,-0.24499761810756004,-0.8924562007510436,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(-0.07046681268810527,-1.3873487335150236,-0.952028633990455,-0.8949798182945309,-0.5042604443804907,0.6734291002079332,-1.0999748867047898,0.7,-0.24499761810756004,-0.8924562007510436,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(0.10406399273134952,-1.2223845559270303,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-0.0989386267649508,0.909111664354187,0.71,-0.24499761810756004,-0.8924562007510436,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(1.5875758387967152,1.2520781078928702,0.7988286239230257,1.0938642223599824,1.167321139201246,1.8151900879069767,0.909111664354187,0.93,-0.24499761810756004,-0.8924562007510436,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(0.36586020086053167,1.087113930304877,0.7988286239230257,0.5966532121963541,1.724515000395158,0.8749163333312926,-1.0999748867047898,0.85,-0.24499761810756004,-0.8924562007510436,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(-0.8558554370756518,0.4272572199529034,0.7988286239230257,0.5966532121963541,1.167321139201246,-0.3843788736897117,-1.0999748867047898,0.79,-0.24499761810756004,-0.8924562007510436,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(-0.681324631656197,-1.3873487335150236,-0.0766000050337147,0.5966532121963541,0.6101272780073337,-0.06535742124438843,-1.0999748867047898,0.76,-0.24499761810756004,-0.8924562007510436,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(0.6276564089897139,-1.2223845559270303,-0.0766000050337147,0.5966532121963541,1.724515000395158,0.06896740083785215,0.909111664354187,0.78,-0.24499761810756004,-0.8924562007510436,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(0.8021872144091686,-0.8924562007510436,0.7988286239230257,1.5910752325236108,1.724515000395158,0.27045463396121155,0.909111664354187,0.77,-0.24499761810756004,-0.8924562007510436,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-0.6026567095733507,-1.0999748867047898),(1.2385142279578056,1.9119348182448437,0.7988286239230257,1.5910752325236108,1.167321139201246,1.244309594057455,0.909111664354187,0.9,0.10406399273134952,-0.2325994903990701,-0.0766000050337147,-1.3921908284581592,-0.5042604443804907,0.08575800359813185,-1.0999748867047898),(0.889452617118896,0.09732886477691666,1.674257252879766,1.5910752325236108,0.052933416813421515,0.8917069360915754,0.909111664354187,0.87,0.10406399273134952,-0.2325994903990701,-0.0766000050337147,-1.3921908284581592,-0.5042604443804907,0.08575800359813185,-1.0999748867047898),(-0.41952842352701486,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,0.6101272780073337,-0.8545157509775522,-1.0999748867047898,0.71,0.10406399273134952,-0.2325994903990701,-0.0766000050337147,-1.3921908284581592,-0.5042604443804907,0.08575800359813185,-1.0999748867047898),(-0.7685900343659244,-0.7274920231630502,-0.952028633990455,-0.8949798182945309,0.6101272780073337,-0.40116947644999135,0.909111664354187,0.7,0.10406399273134952,-0.2325994903990701,-0.0766000050337147,-1.3921908284581592,-0.5042604443804907,0.08575800359813185,-1.0999748867047898),(0.6276564089897139,0.5922213975408968,-0.0766000050337147,-0.8949798182945309,-2.175842027962227,0.32082644224205065,0.909111664354187,0.7,0.10406399273134952,-0.2325994903990701,-0.0766000050337147,-1.3921908284581592,-0.5042604443804907,0.08575800359813185,-1.0999748867047898),(0.7149218116994412,0.4272572199529034,-0.952028633990455,-0.3977688081309026,-1.0614543055744028,0.27045463396121155,0.909111664354187,0.75,0.10406399273134952,-0.2325994903990701,-0.0766000050337147,-1.3921908284581592,-0.5042604443804907,0.08575800359813185,-1.0999748867047898),(-0.3322630208172874,-0.8924562007510436,-0.0766000050337147,-0.8949798182945309,-1.0614543055744028,0.13612981187897094,-1.0999748867047898,0.71,0.10406399273134952,-0.2325994903990701,-0.0766000050337147,-1.3921908284581592,-0.5042604443804907,0.08575800359813185,-1.0999748867047898),(-0.41952842352701486,-0.39756366798706344,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-0.2500540516074711,-1.0999748867047898,0.72,0.10406399273134952,-0.2325994903990701,-0.0766000050337147,-1.3921908284581592,-0.5042604443804907,0.08575800359813185,-1.0999748867047898),(-0.24499761810756004,-0.06763531281107672,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,-0.7201909288953117,0.909111664354187,0.73,0.10406399273134952,-0.2325994903990701,-0.0766000050337147,-1.3921908284581592,-0.5042604443804907,0.08575800359813185,-1.0999748867047898),(0.889452617118896,0.9221497527168835,0.7988286239230257,1.0938642223599824,1.724515000395158,0.908497538851855,-1.0999748867047898,0.83,0.10406399273134952,-0.2325994903990701,-0.0766000050337147,-1.3921908284581592,-0.5042604443804907,0.08575800359813185,-1.0999748867047898),(-0.7685900343659244,0.09732886477691666,0.7988286239230257,1.0938642223599824,1.724515000395158,-0.4347506819705508,-1.0999748867047898,0.77,0.10406399273134952,-0.2325994903990701,-0.0766000050337147,-1.3921908284581592,-0.5042604443804907,0.08575800359813185,-1.0999748867047898),(-0.9431208397853792,-0.39756366798706344,-0.952028633990455,-0.8949798182945309,-0.5042604443804907,-0.6362379150939101,0.909111664354187,0.72,0.10406399273134952,-0.2325994903990701,-0.0766000050337147,-1.3921908284581592,-0.5042604443804907,0.08575800359813185,-1.0999748867047898),(-1.553978658753471,-1.8822412662790038,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-1.2406996144639928,-1.0999748867047898,0.54,0.10406399273134952,-0.2325994903990701,-0.0766000050337147,-1.3921908284581592,-0.5042604443804907,0.08575800359813185,-1.0999748867047898),(-1.990305672302108,-2.0472054438669973,-1.8274572629471952,-1.8894018386217877,-2.175842027962227,-1.610092875190155,-1.0999748867047898,0.49,0.8021872144091686,0.7571855751288902,0.7988286239230257,0.5966532121963541,0.052933416813421515,0.8749163333312926,0.909111664354187),(-0.41952842352701486,-1.3873487335150236,-1.8274572629471952,-2.386612848785416,-2.175842027962227,-0.9888405730597928,0.909111664354187,0.52,0.8021872144091686,0.7571855751288902,0.7988286239230257,0.5966532121963541,0.052933416813421515,0.8749163333312926,0.909111664354187),(-0.15773221539783266,-1.2223845559270303,-1.8274572629471952,-1.3921908284581592,-1.0614543055744028,-1.0895841896214724,-1.0999748867047898,0.58,0.8021872144091686,0.7571855751288902,0.7988286239230257,0.5966532121963541,0.052933416813421515,0.8749163333312926,0.909111664354187),(0.4531256035702591,0.4272572199529034,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,0.6062666891668145,0.909111664354187,0.78,0.8021872144091686,0.7571855751288902,0.7988286239230257,0.5966532121963541,0.052933416813421515,0.8749163333312926,0.909111664354187),(1.0639834225383509,0.9221497527168835,1.674257252879766,1.5910752325236108,1.167321139201246,1.4290062244205346,0.909111664354187,0.89,0.8021872144091686,0.7571855751288902,0.7988286239230257,0.5966532121963541,0.052933416813421515,0.8749163333312926,0.909111664354187),(0.2785947981508043,-1.057420378339037,-0.952028633990455,-0.8949798182945309,-0.5042604443804907,0.03538619531728977,-1.0999748867047898,0.7,0.8021872144091686,0.7571855751288902,0.7988286239230257,0.5966532121963541,0.052933416813421515,0.8749163333312926,0.909111664354187),(-0.7685900343659244,-0.7274920231630502,-0.952028633990455,-0.3977688081309026,0.052933416813421515,-0.1828916405663493,-1.0999748867047898,0.66,0.8021872144091686,0.7571855751288902,0.7988286239230257,0.5966532121963541,0.052933416813421515,0.8749163333312926,0.909111664354187),(-1.117651645204834,-0.8924562007510436,-0.952028633990455,-0.3977688081309026,0.6101272780073337,0.22008282568037243,-1.0999748867047898,0.67,0.8021872144091686,0.7571855751288902,0.7988286239230257,0.5966532121963541,0.052933416813421515,0.8749163333312926,0.909111664354187),(-0.5067938262367422,-0.8924562007510436,-0.0766000050337147,1.0938642223599824,0.6101272780073337,0.06896740083785215,0.909111664354187,0.68,0.8021872144091686,0.7571855751288902,0.7988286239230257,0.5966532121963541,0.052933416813421515,0.8749163333312926,0.909111664354187),(0.016798590021622126,0.4272572199529034,-0.0766000050337147,0.5966532121963541,1.167321139201246,0.8581257305710129,0.909111664354187,0.8,0.8021872144091686,0.7571855751288902,0.7988286239230257,0.5966532121963541,0.052933416813421515,0.8749163333312926,0.909111664354187),(-0.41952842352701486,-0.2325994903990701,-0.0766000050337147,0.5966532121963541,0.052933416813421515,0.32082644224205065,0.909111664354187,0.81,0.8021872144091686,0.7571855751288902,0.7988286239230257,0.5966532121963541,0.052933416813421515,0.8749163333312926,0.909111664354187),(0.36586020086053167,0.5922213975408968,-0.0766000050337147,-0.8949798182945309,-0.5042604443804907,0.5055230726051333,0.909111664354187,0.8,0.8021872144091686,0.7571855751288902,0.7988286239230257,0.5966532121963541,0.052933416813421515,0.8749163333312926,0.909111664354187),(2.023902852345352,0.7571855751288902,0.7988286239230257,1.5910752325236108,1.167321139201246,1.7816088823864173,0.909111664354187,0.94,0.016798590021622126,-0.5625278455750569,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,0.27045463396121155,-1.0999748867047898),(1.2385142279578056,1.4170422854808635,1.674257252879766,0.5966532121963541,0.6101272780073337,1.1099847719752143,0.909111664354187,0.93,0.016798590021622126,-0.5625278455750569,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,0.27045463396121155,-1.0999748867047898),(1.6748412415064426,1.7469706406568504,1.674257252879766,1.0938642223599824,0.6101272780073337,0.9924505526532535,0.909111664354187,0.92,0.016798590021622126,-0.5625278455750569,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,0.27045463396121155,-1.0999748867047898),(0.6276564089897139,1.087113930304877,1.674257252879766,1.5910752325236108,1.167321139201246,0.8077539222901738,0.909111664354187,0.89,0.016798590021622126,-0.5625278455750569,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,0.27045463396121155,-1.0999748867047898),(-0.24499761810756004,-0.5625278455750569,0.7988286239230257,1.5910752325236108,1.724515000395158,0.7070103057284927,-1.0999748867047898,0.82,0.016798590021622126,-0.5625278455750569,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,0.27045463396121155,-1.0999748867047898),(-0.3322630208172874,0.26229304236491,-0.0766000050337147,0.5966532121963541,0.052933416813421515,0.6734291002079332,-1.0999748867047898,0.79,0.016798590021622126,-0.5625278455750569,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,0.27045463396121155,-1.0999748867047898),(-0.8558554370756518,-0.39756366798706344,-0.952028633990455,-0.8949798182945309,-0.5042604443804907,-1.5933022724298738,-1.0999748867047898,0.58,0.016798590021622126,-0.5625278455750569,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,0.27045463396121155,-1.0999748867047898),(-1.4667132560437435,-0.8924562007510436,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-1.223909011703713,-1.0999748867047898,0.56,0.016798590021622126,-0.5625278455750569,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,0.27045463396121155,-1.0999748867047898),(-1.2921824506242887,-1.3873487335150236,-0.952028633990455,-2.386612848785416,-1.618648166768315,-1.056002984100913,-1.0999748867047898,0.56,0.016798590021622126,-0.5625278455750569,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,0.27045463396121155,-1.0999748867047898),(-0.41952842352701486,-1.5523129111030172,-1.8274572629471952,0.09944220203272576,-0.5042604443804907,-0.7034003261350319,0.909111664354187,0.64,0.016798590021622126,-0.5625278455750569,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,0.27045463396121155,-1.0999748867047898),(-0.07046681268810527,-1.057420378339037,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-0.46833188749111015,0.909111664354187,0.61,0.016798590021622126,-0.5625278455750569,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,0.27045463396121155,-1.0999748867047898),(0.016798590021622126,-1.2223845559270303,-0.952028633990455,-0.3977688081309026,-1.0614543055744028,-0.04856681848410872,-1.0999748867047898,0.68,0.016798590021622126,-0.5625278455750569,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,0.27045463396121155,-1.0999748867047898),(-0.5940592289464697,-0.06763531281107672,-0.0766000050337147,0.09944220203272576,0.052933416813421515,0.11933920911869125,-1.0999748867047898,0.76,1.0639834225383509,0.5922213975408968,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.0596129636943752,0.909111664354187),(0.2785947981508043,2.076898995832837,-0.0766000050337147,0.5966532121963541,1.167321139201246,0.8581257305710129,-1.0999748867047898,0.86,1.0639834225383509,0.5922213975408968,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.0596129636943752,0.909111664354187),(1.1512488252480781,1.087113930304877,-0.0766000050337147,1.0938642223599824,1.167321139201246,1.076403566454655,0.909111664354187,0.9,1.0639834225383509,0.5922213975408968,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.0596129636943752,0.909111664354187),(-1.0303862424951067,0.7571855751288902,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.08575800359813185,-1.0999748867047898,0.71,1.0639834225383509,0.5922213975408968,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.0596129636943752,0.909111664354187),(-0.681324631656197,-0.2325994903990701,-0.952028633990455,-0.8949798182945309,-1.0614543055744028,-1.0056311758200724,-1.0999748867047898,0.62,1.0639834225383509,0.5922213975408968,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.0596129636943752,0.909111664354187),(0.1913293954410769,0.09732886477691666,-0.952028633990455,-0.8949798182945309,-0.5042604443804907,0.27045463396121155,-1.0999748867047898,0.66,1.0639834225383509,0.5922213975408968,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.0596129636943752,0.909111664354187),(0.4531256035702591,-0.39756366798706344,-0.952028633990455,-0.3977688081309026,-0.5042604443804907,-0.2500540516074711,0.909111664354187,0.65,1.0639834225383509,0.5922213975408968,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.0596129636943752,0.909111664354187),(0.5403910062799865,-0.06763531281107672,-0.0766000050337147,0.09944220203272576,0.052933416813421515,-0.08214802400466813,0.909111664354187,0.73,1.0639834225383509,0.5922213975408968,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.0596129636943752,0.909111664354187),(-0.3322630208172874,-0.2325994903990701,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-0.2836352571280305,-1.0999748867047898,0.62,1.0639834225383509,0.5922213975408968,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.0596129636943752,0.909111664354187),(0.36586020086053167,0.26229304236491,-0.0766000050337147,0.09944220203272576,0.052933416813421515,0.3376170450023333,0.909111664354187,0.74,1.0639834225383509,0.5922213975408968,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.0596129636943752,0.909111664354187),(0.5403910062799865,0.4272572199529034,-0.0766000050337147,0.5966532121963541,0.052933416813421515,0.8413351278107333,0.909111664354187,0.79,1.0639834225383509,0.5922213975408968,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.0596129636943752,0.909111664354187),(0.7149218116994412,0.7571855751288902,0.7988286239230257,0.5966532121963541,0.6101272780073337,0.6734291002079332,0.909111664354187,0.8,1.0639834225383509,0.5922213975408968,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.0596129636943752,0.909111664354187),(-0.41952842352701486,0.09732886477691666,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,-0.1157292295252305,-1.0999748867047898,0.69,1.0639834225383509,0.5922213975408968,0.7988286239230257,1.0938642223599824,0.6101272780073337,1.0596129636943752,0.909111664354187),(-0.7685900343659244,0.4272572199529034,0.7988286239230257,0.09944220203272576,-0.5042604443804907,0.0018049897967303734,-1.0999748867047898,0.7,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,0.052933416813421515,0.7405915112490521,0.909111664354187),(0.2785947981508043,-0.5625278455750569,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.23687342844065212,0.909111664354187,0.76,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,0.052933416813421515,0.7405915112490521,0.909111664354187),(0.9767180198286235,0.09732886477691666,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.9756599498929738,0.909111664354187,0.84,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,0.052933416813421515,0.7405915112490521,0.909111664354187),(-0.5067938262367422,-0.06763531281107672,0.7988286239230257,1.0938642223599824,1.167321139201246,0.6734291002079332,0.909111664354187,0.78,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,0.052933416813421515,0.7405915112490521,0.909111664354187),(-1.3794478533340162,-1.2223845559270303,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,-0.9384687647789537,-1.0999748867047898,0.67,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,0.052933416813421515,0.7405915112490521,0.909111664354187),(-1.0303862424951067,-0.39756366798706344,-0.952028633990455,-0.3977688081309026,0.6101272780073337,-0.7873533399364304,-1.0999748867047898,0.66,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,0.052933416813421515,0.7405915112490521,0.909111664354187),(-0.7685900343659244,-0.5625278455750569,-0.952028633990455,-0.8949798182945309,-0.5042604443804907,-0.8880969564981116,-1.0999748867047898,0.65,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,0.052933416813421515,0.7405915112490521,0.909111664354187),(-1.6412440614631982,-1.057420378339037,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-1.2406996144639928,-1.0999748867047898,0.54,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,0.052933416813421515,0.7405915112490521,0.909111664354187),(-1.4667132560437435,-1.3873487335150236,-1.8274572629471952,-2.386612848785416,-1.0614543055744028,-0.9888405730597928,-1.0999748867047898,0.58,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,0.052933416813421515,0.7405915112490521,0.909111664354187),(0.6276564089897139,0.5922213975408968,-0.0766000050337147,-0.8949798182945309,-1.618648166768315,0.3376170450023333,0.909111664354187,0.79,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,0.052933416813421515,0.7405915112490521,0.909111664354187),(0.889452617118896,0.9221497527168835,0.7988286239230257,0.09944220203272576,-0.5042604443804907,0.15292041463925066,0.909111664354187,0.8,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,0.052933416813421515,0.7405915112490521,0.909111664354187),(0.016798590021622126,-0.2325994903990701,-0.0766000050337147,0.5966532121963541,0.052933416813421515,-0.1661010378060696,0.909111664354187,0.75,0.6276564089897139,0.4272572199529034,-0.0766000050337147,0.09944220203272576,0.052933416813421515,0.7405915112490521,0.909111664354187),(0.5403910062799865,-0.5625278455750569,-0.0766000050337147,0.5966532121963541,0.6101272780073337,-0.2668446543677508,0.909111664354187,0.73,0.7149218116994412,-0.06763531281107672,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.8581257305710129,0.909111664354187),(-0.24499761810756004,-0.06763531281107672,-0.952028633990455,-0.8949798182945309,0.6101272780073337,-0.5522849012925116,-1.0999748867047898,0.72,0.7149218116994412,-0.06763531281107672,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.8581257305710129,0.909111664354187),(-1.0303862424951067,-0.8924562007510436,-0.952028633990455,-1.3921908284581592,-1.0614543055744028,-0.7034003261350319,-1.0999748867047898,0.62,0.7149218116994412,-0.06763531281107672,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.8581257305710129,0.909111664354187),(-0.15773221539783266,-0.5625278455750569,-0.0766000050337147,-0.3977688081309026,-1.0614543055744028,-0.45154128473083044,-1.0999748867047898,0.67,0.7149218116994412,-0.06763531281107672,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.8581257305710129,0.909111664354187),(0.8021872144091686,1.4170422854808635,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.908497538851855,0.909111664354187,0.81,0.7149218116994412,-0.06763531281107672,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.8581257305710129,0.909111664354187),(-1.553978658753471,-1.2223845559270303,-0.0766000050337147,-1.3921908284581592,-1.618648166768315,-0.972049970299513,-1.0999748867047898,0.63,0.7149218116994412,-0.06763531281107672,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.8581257305710129,0.909111664354187),(-1.9030402695923805,-1.057420378339037,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-1.2406996144639928,-1.0999748867047898,0.69,0.7149218116994412,-0.06763531281107672,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.8581257305710129,0.909111664354187),(0.6276564089897139,0.7571855751288902,0.7988286239230257,0.5966532121963541,0.052933416813421515,0.2872452367214912,0.909111664354187,0.8,0.7149218116994412,-0.06763531281107672,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.8581257305710129,0.909111664354187),(-1.7285094641729257,-1.8822412662790038,-0.952028633990455,-0.8949798182945309,-2.175842027962227,-1.1903278061831537,-1.0999748867047898,0.43,0.7149218116994412,-0.06763531281107672,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.8581257305710129,0.909111664354187),(0.889452617118896,0.9221497527168835,-0.0766000050337147,0.09944220203272576,-0.5042604443804907,0.10254860635841155,0.909111664354187,0.8,0.7149218116994412,-0.06763531281107672,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.8581257305710129,0.909111664354187),(-0.5067938262367422,-0.39756366798706344,-0.952028633990455,-0.3977688081309026,-1.618648166768315,-0.8041439426967131,0.909111664354187,0.73,0.7149218116994412,-0.06763531281107672,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.8581257305710129,0.909111664354187),(-0.7685900343659244,-0.2325994903990701,-0.0766000050337147,0.09944220203272576,-1.0614543055744028,-0.6530285178541898,0.909111664354187,0.75,0.7149218116994412,-0.06763531281107672,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,0.8581257305710129,0.909111664354187),(0.1913293954410769,0.09732886477691666,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,-0.0989386267649508,0.909111664354187,0.71,1.1512488252480781,1.4170422854808635,0.7988286239230257,1.5910752325236108,1.167321139201246,1.4290062244205346,0.909111664354187),(-0.41952842352701486,-0.06763531281107672,0.7988286239230257,1.0938642223599824,0.6101272780073337,0.08575800359813185,0.909111664354187,0.73,1.1512488252480781,1.4170422854808635,0.7988286239230257,1.5910752325236108,1.167321139201246,1.4290062244205346,0.909111664354187),(0.7149218116994412,0.5922213975408968,0.7988286239230257,0.5966532121963541,1.167321139201246,0.8581257305710129,0.909111664354187,0.83,1.1512488252480781,1.4170422854808635,0.7988286239230257,1.5910752325236108,1.167321139201246,1.4290062244205346,0.909111664354187),(0.1913293954410769,0.4272572199529034,-0.0766000050337147,-0.3977688081309026,-1.0614543055744028,0.32082644224205065,-1.0999748867047898,0.72,1.1512488252480781,1.4170422854808635,0.7988286239230257,1.5910752325236108,1.167321139201246,1.4290062244205346,0.909111664354187),(1.3257796306675331,1.7469706406568504,1.674257252879766,1.5910752325236108,1.724515000395158,1.462587429941097,0.909111664354187,0.94,1.1512488252480781,1.4170422854808635,0.7988286239230257,1.5910752325236108,1.167321139201246,1.4290062244205346,0.909111664354187),(0.5403910062799865,0.09732886477691666,1.674257252879766,0.5966532121963541,0.6101272780073337,0.23687342844065212,0.909111664354187,0.81,1.1512488252480781,1.4170422854808635,0.7988286239230257,1.5910752325236108,1.167321139201246,1.4290062244205346,0.909111664354187),(0.6276564089897139,-0.06763531281107672,1.674257252879766,0.09944220203272576,0.6101272780073337,0.10254860635841155,0.909111664354187,0.81,1.1512488252480781,1.4170422854808635,0.7988286239230257,1.5910752325236108,1.167321139201246,1.4290062244205346,0.909111664354187),(-0.41952842352701486,-0.06763531281107672,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-0.2332634488471884,0.909111664354187,0.75,1.1512488252480781,1.4170422854808635,0.7988286239230257,1.5910752325236108,1.167321139201246,1.4290062244205346,0.909111664354187),(0.8021872144091686,0.4272572199529034,-0.0766000050337147,0.09944220203272576,0.052933416813421515,0.27045463396121155,0.909111664354187,0.79,1.1512488252480781,1.4170422854808635,0.7988286239230257,1.5910752325236108,1.167321139201246,1.4290062244205346,0.909111664354187),(-0.7685900343659244,-0.2325994903990701,-0.0766000050337147,-0.3977688081309026,-0.5042604443804907,-0.6026567095733507,-1.0999748867047898,0.58,1.1512488252480781,1.4170422854808635,0.7988286239230257,1.5910752325236108,1.167321139201246,1.4290062244205346,0.909111664354187),(-1.0303862424951067,-0.7274920231630502,-0.952028633990455,-0.8949798182945309,0.052933416813421515,-0.7873533399364304,-1.0999748867047898,0.59,1.1512488252480781,1.4170422854808635,0.7988286239230257,1.5910752325236108,1.167321139201246,1.4290062244205346,0.909111664354187),(-1.9030402695923805,-1.8822412662790038,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-2.1138109579985565,-1.0999748867047898,0.47,1.1512488252480781,1.4170422854808635,0.7988286239230257,1.5910752325236108,1.167321139201246,1.4290062244205346,0.909111664354187),(-0.07046681268810527,-1.5523129111030172,-1.8274572629471952,-1.8894018386217877,-1.618648166768315,-1.9626955331560363,-1.0999748867047898,0.49,1.1512488252480781,1.4170422854808635,0.7988286239230257,1.5910752325236108,1.167321139201246,1.4290062244205346,0.909111664354187),(-1.117651645204834,-1.7172770886910105,-0.952028633990455,-1.8894018386217877,-1.618648166768315,-1.610092875190155,-1.0999748867047898,0.47,-0.41952842352701486,-0.7274920231630502,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.30403583948177093,-1.0999748867047898),(-1.553978658753471,-2.2121696214549904,-1.8274572629471952,-2.386612848785416,-2.7330358891561395,-2.1138109579985565,-1.0999748867047898,0.42,-0.41952842352701486,-0.7274920231630502,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.30403583948177093,-1.0999748867047898),(-1.2921824506242887,-1.3873487335150236,-1.8274572629471952,-1.3921908284581592,-1.618648166768315,-2.2649263828410766,-1.0999748867047898,0.57,-0.41952842352701486,-0.7274920231630502,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.30403583948177093,-1.0999748867047898),(-0.3322630208172874,-1.057420378339037,-0.0766000050337147,-0.8949798182945309,-0.5042604443804907,-0.9384687647789537,-1.0999748867047898,0.62,-0.41952842352701486,-0.7274920231630502,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.30403583948177093,-1.0999748867047898),(0.10406399273134952,-0.06763531281107672,-0.0766000050337147,-0.3977688081309026,0.052933416813421515,-0.5522849012925116,0.909111664354187,0.74,-0.41952842352701486,-0.7274920231630502,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.30403583948177093,-1.0999748867047898),(0.7149218116994412,0.4272572199529034,0.7988286239230257,0.09944220203272576,0.6101272780073337,0.11933920911869125,0.909111664354187,0.73,-0.41952842352701486,-0.7274920231630502,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.30403583948177093,-1.0999748867047898),(-1.2049170479145614,-1.2223845559270303,-0.952028633990455,-0.3977688081309026,0.052933416813421515,-0.9048875592583913,0.909111664354187,0.64,-0.41952842352701486,-0.7274920231630502,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.30403583948177093,-1.0999748867047898),(-1.4667132560437435,-0.8924562007510436,-0.0766000050337147,0.09944220203272576,-1.0614543055744028,-0.7201909288953117,-1.0999748867047898,0.63,-0.41952842352701486,-0.7274920231630502,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.30403583948177093,-1.0999748867047898),(-1.7285094641729257,-1.5523129111030172,-0.952028633990455,-0.8949798182945309,-0.5042604443804907,-1.5597210669093144,-1.0999748867047898,0.59,-0.41952842352701486,-0.7274920231630502,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.30403583948177093,-1.0999748867047898),(0.016798590021622126,-0.2325994903990701,-0.952028633990455,-1.3921908284581592,0.052933416813421515,-0.8041439426967131,-1.0999748867047898,0.73,-0.41952842352701486,-0.7274920231630502,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.30403583948177093,-1.0999748867047898),(0.889452617118896,0.26229304236491,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.2872452367214912,0.909111664354187,0.79,-0.41952842352701486,-0.7274920231630502,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.30403583948177093,-1.0999748867047898),(-1.3794478533340162,-0.5625278455750569,-0.952028633990455,0.09944220203272576,0.052933416813421515,-1.1903278061831537,0.909111664354187,0.68,-0.41952842352701486,-0.7274920231630502,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.30403583948177093,-1.0999748867047898),(-0.24499761810756004,-0.39756366798706344,-0.952028633990455,-0.8949798182945309,-1.618648166768315,-1.610092875190155,-1.0999748867047898,0.7,1.4130450333772604,1.582006463068857,0.7988286239230257,1.5910752325236108,0.6101272780073337,1.7816088823864173,0.909111664354187),(0.36586020086053167,-0.06763531281107672,-0.952028633990455,-1.3921908284581592,-2.175842027962227,-0.2668446543677508,-1.0999748867047898,0.81,1.4130450333772604,1.582006463068857,0.7988286239230257,1.5910752325236108,0.6101272780073337,1.7816088823864173,0.909111664354187),(0.4531256035702591,0.4272572199529034,-0.0766000050337147,0.5966532121963541,1.724515000395158,0.06896740083785215,0.909111664354187,0.85,1.4130450333772604,1.582006463068857,0.7988286239230257,1.5910752325236108,0.6101272780073337,1.7816088823864173,0.909111664354187),(1.5003104360869879,1.4170422854808635,0.7988286239230257,0.5966532121963541,0.052933416813421515,1.580121649263055,0.909111664354187,0.93,1.4130450333772604,1.582006463068857,0.7988286239230257,1.5910752325236108,0.6101272780073337,1.7816088823864173,0.909111664354187),(1.8493720469258974,1.2520781078928702,1.674257252879766,1.0938642223599824,1.724515000395158,1.0596129636943752,0.909111664354187,0.91,1.4130450333772604,1.582006463068857,0.7988286239230257,1.5910752325236108,0.6101272780073337,1.7816088823864173,0.909111664354187),(-0.9431208397853792,-0.7274920231630502,-0.952028633990455,-0.8949798182945309,-0.5042604443804907,-0.40116947644999135,-1.0999748867047898,0.69,1.4130450333772604,1.582006463068857,0.7988286239230257,1.5910752325236108,0.6101272780073337,1.7816088823864173,0.909111664354187),(-0.3322630208172874,-0.8924562007510436,-0.0766000050337147,0.09944220203272576,0.6101272780073337,0.5055230726051333,0.909111664354187,0.77,1.4130450333772604,1.582006463068857,0.7988286239230257,1.5910752325236108,0.6101272780073337,1.7816088823864173,0.909111664354187),(1.1512488252480781,1.087113930304877,0.7988286239230257,1.0938642223599824,-0.5042604443804907,0.9588693471326941,0.909111664354187,0.86,1.4130450333772604,1.582006463068857,0.7988286239230257,1.5910752325236108,0.6101272780073337,1.7816088823864173,0.909111664354187),(0.2785947981508043,-0.5625278455750569,-0.0766000050337147,0.09944220203272576,1.167321139201246,-0.4347506819705508,0.909111664354187,0.74,1.4130450333772604,1.582006463068857,0.7988286239230257,1.5910752325236108,0.6101272780073337,1.7816088823864173,0.909111664354187),(-0.5067938262367422,-1.5523129111030172,-1.8274572629471952,-2.386612848785416,-1.0614543055744028,-1.9123237248751956,-1.0999748867047898,0.57,1.4130450333772604,1.582006463068857,0.7988286239230257,1.5910752325236108,0.6101272780073337,1.7816088823864173,0.909111664354187),(-1.6412440614631982,-2.5420979766309775,-1.8274572629471952,-1.3921908284581592,-1.618648166768315,-1.2071184089434333,-1.0999748867047898,0.51,1.4130450333772604,1.582006463068857,0.7988286239230257,1.5910752325236108,0.6101272780073337,1.7816088823864173,0.909111664354187),(-1.3794478533340162,-1.5523129111030172,-1.8274572629471952,-1.3921908284581592,-0.5042604443804907,-0.9552593675392334,0.909111664354187,0.67,1.4130450333772604,1.582006463068857,0.7988286239230257,1.5910752325236108,0.6101272780073337,1.7816088823864173,0.909111664354187); +DROP TABLE IF EXISTS model; +create table model engine = Memory as select stochasticLinearRegressionState(0.1, 0.0, 5, 'SGD')(target, param1, param2, param3, param4, param5, param6, param7) as state from defaults; +with (select state from model) as model select round(evalMLMethod(model, predict1, predict2, predict3, predict4, predict5, predict6, predict7), 12) from defaults; + +DROP TABLE defaults; +DROP TABLE model; diff --git a/parser/testdata/00947_ml_test/ast.json b/parser/testdata/00947_ml_test/ast.json new file mode 100644 index 000000000..0ae4c9ddc --- /dev/null +++ b/parser/testdata/00947_ml_test/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery defaults (children 1)" + }, + { + "explain": " Identifier defaults" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001121271, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/00947_ml_test/metadata.json b/parser/testdata/00947_ml_test/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00947_ml_test/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00947_ml_test/query.sql b/parser/testdata/00947_ml_test/query.sql new file mode 100644 index 000000000..72000103a --- /dev/null +++ b/parser/testdata/00947_ml_test/query.sql @@ -0,0 +1,50 @@ +DROP TABLE IF EXISTS defaults; +CREATE TABLE IF NOT EXISTS defaults +( + param1 Float64, + param2 Float64, + target Float64, + predict1 Float64, + predict2 Float64 +) ENGINE = Memory; +insert into defaults values (-3.273, -1.452, 4.267, 20.0, 40.0), (0.121, -0.615, 4.290, 20.0, 40.0), (-1.099, 2.755, -3.060, 20.0, 40.0), (1.090, 2.945, -2.346, 20.0, 40.0), (0.305, 2.179, -1.205, 20.0, 40.0), (-0.925, 0.702, 1.134, 20.0, 40.0), (3.178, -1.316, 7.221, 20.0, 40.0), (-2.756, -0.473, 2.569, 20.0, 40.0), (3.665, 2.303, 0.226, 20.0, 40.0), (1.662, 1.951, -0.070, 20.0, 40.0), (2.869, 0.593, 3.249, 20.0, 40.0), (0.818, -0.593, 4.594, 20.0, 40.0), (-1.917, 0.916, 0.209, 20.0, 40.0), (2.706, 1.523, 1.307, 20.0, 40.0), (0.219, 2.162, -1.214, 20.0, 40.0), (-4.510, 1.376, -2.007, 20.0, 40.0), (4.284, -0.515, 6.173, 20.0, 40.0), (-1.101, 2.810, -3.170, 20.0, 40.0), (-1.810, -1.117, 4.329, 20.0, 40.0), (0.055, 1.115, 0.797, 20.0, 40.0), (-2.178, 2.904, -3.898, 20.0, 40.0), (-3.494, -1.814, 4.882, 20.0, 40.0), (3.027, 0.476, 3.562, 20.0, 40.0), (-1.434, 1.151, -0.018, 20.0, 40.0), (1.180, 0.992, 1.606, 20.0, 40.0), (0.015, 0.971, 1.067, 20.0, 40.0), (-0.511, -0.875, 4.495, 20.0, 40.0), (0.961, 2.348, -1.216, 20.0, 40.0), (-2.279, 0.038, 1.785, 20.0, 40.0), (-1.568, -0.248, 2.712, 20.0, 40.0), (-0.496, 0.366, 2.020, 20.0, 40.0), (1.177, -1.401, 6.390, 20.0, 40.0), (2.882, -1.442, 7.325, 20.0, 40.0), (-1.066, 1.817, -1.167, 20.0, 40.0), (-2.144, 2.791, -3.655, 20.0, 40.0), (-4.370, 2.228, -3.642, 20.0, 40.0), (3.996, 2.775, -0.553, 20.0, 40.0), (0.289, 2.055, -0.965, 20.0, 40.0), (-0.588, -1.601, 5.908, 20.0, 40.0), (-1.801, 0.417, 1.265, 20.0, 40.0), (4.375, -1.499, 8.186, 20.0, 40.0), (-2.618, 0.038, 1.615, 20.0, 40.0), (3.616, -0.833, 6.475, 20.0, 40.0), (-4.045, -1.558, 4.094, 20.0, 40.0), (-3.962, 0.636, -0.253, 20.0, 40.0), (3.505, 2.625, -0.497, 20.0, 40.0), (3.029, -0.523, 5.560, 20.0, 40.0), (-3.520, -0.474, 2.188, 20.0, 40.0), (2.430, -1.469, 7.154, 20.0, 40.0), (1.547, -1.654, 7.082, 20.0, 40.0), (-1.370, 0.575, 1.165, 20.0, 40.0), (-1.869, -1.555, 5.176, 20.0, 40.0), (3.536, 2.841, -0.913, 20.0, 40.0), (-3.810, 1.220, -1.344, 20.0, 40.0), (-1.971, 1.462, -0.910, 20.0, 40.0), (-0.243, 0.167, 2.545, 20.0, 40.0), (-1.403, 2.645, -2.991, 20.0, 40.0), (0.532, -0.114, 3.494, 20.0, 40.0), (-1.678, 0.975, 0.212, 20.0, 40.0), (-0.656, 2.140, -1.609, 20.0, 40.0), (1.743, 2.631, -1.390, 20.0, 40.0), (2.586, 2.943, -1.593, 20.0, 40.0), (-0.512, 2.969, -3.195, 20.0, 40.0), (2.283, -0.100, 4.342, 20.0, 40.0), (-4.293, 0.872, -0.890, 20.0, 40.0), (3.411, 1.300, 2.106, 20.0, 40.0), (-0.281, 2.951, -3.042, 20.0, 40.0), (-4.442, 0.384, 0.012, 20.0, 40.0), (1.194, 1.746, 0.104, 20.0, 40.0), (-1.152, 1.862, -1.300, 20.0, 40.0), (1.362, -1.341, 6.363, 20.0, 40.0), (-4.488, 2.618, -4.481, 20.0, 40.0), (3.419, -0.564, 5.837, 20.0, 40.0), (-3.392, 0.396, 0.512, 20.0, 40.0), (-1.629, -0.909, 4.003, 20.0, 40.0), (4.447, -1.088, 7.399, 20.0, 40.0), (-1.232, 1.699, -1.014, 20.0, 40.0), (-1.286, -0.609, 3.575, 20.0, 40.0), (2.437, 2.796, -1.374, 20.0, 40.0), (-4.864, 1.989, -3.410, 20.0, 40.0), (-1.716, -1.399, 4.940, 20.0, 40.0), (-3.084, 1.858, -2.259, 20.0, 40.0), (2.828, -0.319, 5.053, 20.0, 40.0), (-1.226, 2.586, -2.786, 20.0, 40.0), (2.456, 0.092, 4.044, 20.0, 40.0), (-0.989, 2.375, -2.245, 20.0, 40.0), (3.268, 0.935, 2.765, 20.0, 40.0), (-4.128, -1.995, 4.927, 20.0, 40.0), (-1.083, 2.197, -1.935, 20.0, 40.0), (-3.471, -1.198, 3.660, 20.0, 40.0), (4.617, -1.136, 7.579, 20.0, 40.0), (2.054, -1.675, 7.378, 20.0, 40.0), (4.106, 2.326, 0.402, 20.0, 40.0), (1.558, 0.310, 3.158, 20.0, 40.0), (0.792, 0.900, 1.596, 20.0, 40.0), (-3.229, 0.300, 0.785, 20.0, 40.0), (3.787, -0.793, 6.479, 20.0, 40.0), (1.786, 2.288, -0.684, 20.0, 40.0), (2.643, 0.223, 3.875, 20.0, 40.0), (-3.592, 2.122, -3.040, 20.0, 40.0), (4.519, -1.760, 8.779, 20.0, 40.0), (3.221, 2.255, 0.101, 20.0, 40.0), (4.151, 1.788, 1.500, 20.0, 40.0), (-1.033, -1.195, 4.874, 20.0, 40.0), (-1.636, -1.037, 4.257, 20.0, 40.0), (-3.548, 1.911, -2.596, 20.0, 40.0), (4.829, -0.293, 6.001, 20.0, 40.0), (-4.684, -1.664, 3.986, 20.0, 40.0), (4.531, -0.503, 6.271, 20.0, 40.0), (-3.503, -1.606, 4.460, 20.0, 40.0), (-2.036, -1.522, 5.027, 20.0, 40.0), (-0.473, -0.617, 3.997, 20.0, 40.0), (-1.554, -1.630, 5.483, 20.0, 40.0), (-3.567, -1.043, 3.302, 20.0, 40.0), (-2.038, 0.579, 0.823, 20.0, 40.0), (-3.040, 0.857, -0.233, 20.0, 40.0), (4.610, 0.562, 4.181, 20.0, 40.0), (-3.323, -1.938, 5.215, 20.0, 40.0), (4.314, 1.720, 1.717, 20.0, 40.0), (-1.220, 0.615, 1.161, 20.0, 40.0), (-2.556, 1.120, -0.519, 20.0, 40.0), (-3.717, -0.108, 1.358, 20.0, 40.0), (4.689, -1.826, 8.996, 20.0, 40.0), (3.452, 0.506, 3.713, 20.0, 40.0), (2.472, 0.612, 3.012, 20.0, 40.0), (3.452, 0.450, 3.826, 20.0, 40.0), (1.207, 2.585, -1.567, 20.0, 40.0), (-4.826, 1.090, -1.593, 20.0, 40.0), (3.116, -1.118, 6.794, 20.0, 40.0), (0.448, 2.732, -2.240, 20.0, 40.0), (-1.096, -0.525, 3.503, 20.0, 40.0), (-4.680, -0.238, 1.137, 20.0, 40.0), (2.552, -1.403, 7.082, 20.0, 40.0), (0.719, 2.997, -2.635, 20.0, 40.0), (0.347, -1.966, 7.105, 20.0, 40.0), (2.958, -0.404, 5.288, 20.0, 40.0), (0.722, -1.950, 7.261, 20.0, 40.0), (-2.851, -0.986, 3.546, 20.0, 40.0), (-4.316, -0.439, 1.721, 20.0, 40.0), (-1.685, -0.201, 2.560, 20.0, 40.0), (1.856, 0.190, 3.549, 20.0, 40.0), (-2.052, 0.206, 1.562, 20.0, 40.0), (-2.504, -0.646, 3.041, 20.0, 40.0), (3.235, 0.882, 2.854, 20.0, 40.0), (-1.366, -1.573, 5.463, 20.0, 40.0), (-3.447, 2.419, -3.562, 20.0, 40.0), (4.155, 2.092, 0.893, 20.0, 40.0), (-0.935, 0.209, 2.116, 20.0, 40.0), (3.117, -1.821, 8.201, 20.0, 40.0), (3.759, 0.577, 3.725, 20.0, 40.0), (-0.938, 2.992, -3.453, 20.0, 40.0), (-0.525, 2.341, -1.945, 20.0, 40.0), (4.540, 2.625, 0.019, 20.0, 40.0), (-2.097, 1.190, -0.429, 20.0, 40.0), (-2.672, 1.983, -2.302, 20.0, 40.0), (-3.038, -1.490, 4.460, 20.0, 40.0), (-0.943, 2.149, -1.770, 20.0, 40.0), (0.739, 1.598, 0.174, 20.0, 40.0), (1.828, 1.853, 0.208, 20.0, 40.0), (4.856, 0.137, 5.153, 20.0, 40.0), (-1.617, 0.468, 1.255, 20.0, 40.0), (-1.972, 2.053, -2.092, 20.0, 40.0), (-4.633, 1.389, -2.094, 20.0, 40.0), (-3.628, -1.156, 3.498, 20.0, 40.0), (3.597, 1.034, 2.731, 20.0, 40.0), (-1.488, -0.002, 2.261, 20.0, 40.0), (0.749, 1.921, -0.468, 20.0, 40.0), (1.304, -1.371, 6.394, 20.0, 40.0), (4.587, 2.936, -0.579, 20.0, 40.0), (-2.241, 1.791, -1.703, 20.0, 40.0), (-2.945, 1.372, -1.216, 20.0, 40.0), (1.375, 0.395, 2.898, 20.0, 40.0), (-1.281, -0.641, 3.642, 20.0, 40.0), (2.178, 0.895, 2.299, 20.0, 40.0), (3.031, -0.786, 6.087, 20.0, 40.0), (-1.385, -0.375, 3.058, 20.0, 40.0), (4.041, -0.431, 5.882, 20.0, 40.0), (0.480, -0.507, 4.254, 20.0, 40.0), (-3.797, 0.140, 0.822, 20.0, 40.0), (2.355, 2.502, -0.827, 20.0, 40.0), (1.376, -1.583, 6.854, 20.0, 40.0), (0.164, 1.405, 0.273, 20.0, 40.0), (-1.273, 1.471, -0.579, 20.0, 40.0), (0.770, 2.246, -1.107, 20.0, 40.0), (4.552, 2.904, -0.533, 20.0, 40.0), (4.259, -1.772, 8.674, 20.0, 40.0), (-0.309, 1.159, 0.528, 20.0, 40.0), (3.581, 2.700, -0.610, 20.0, 40.0), (-3.202, 0.346, 0.707, 20.0, 40.0), (-1.575, 1.242, -0.271, 20.0, 40.0), (-1.584, -0.493, 3.194, 20.0, 40.0), (-3.778, 0.150, 0.810, 20.0, 40.0), (-4.675, 1.749, -2.835, 20.0, 40.0), (3.567, -0.792, 6.367, 20.0, 40.0), (-0.417, 1.399, -0.006, 20.0, 40.0), (-4.672, 2.007, -3.349, 20.0, 40.0), (-1.034, 0.196, 2.090, 20.0, 40.0), (-3.796, 2.496, -3.890, 20.0, 40.0), (3.532, -0.497, 5.759, 20.0, 40.0), (4.868, -1.359, 8.151, 20.0, 40.0), (-0.769, 0.302, 2.011, 20.0, 40.0), (4.475, 2.612, 0.014, 20.0, 40.0), (-3.532, -0.395, 2.024, 20.0, 40.0), (0.322, 0.675, 1.812, 20.0, 40.0), (-2.028, -1.942, 5.870, 20.0, 40.0), (1.810, -1.244, 6.392, 20.0, 40.0), (-0.783, 1.242, 0.124, 20.0, 40.0), (-4.745, -1.300, 3.227, 20.0, 40.0), (1.902, 1.973, 0.005, 20.0, 40.0), (-3.453, -1.429, 4.132, 20.0, 40.0), (1.559, 0.986, 1.808, 20.0, 40.0), (0.128, 2.754, -2.443, 20.0, 40.0), (2.759, 1.727, 0.926, 20.0, 40.0), (-4.468, 1.690, -2.614, 20.0, 40.0), (-2.368, -1.922, 5.659, 20.0, 40.0), (-2.766, 2.128, -2.640, 20.0, 40.0), (0.967, -1.825, 7.133, 20.0, 40.0), (-2.854, 2.855, -4.136, 20.0, 40.0), (-2.944, 1.875, -2.222, 20.0, 40.0), (-2.632, -0.983, 3.649, 20.0, 40.0), (2.427, 2.239, -0.266, 20.0, 40.0), (-1.726, -0.838, 3.812, 20.0, 40.0), (0.007, -0.903, 4.809, 20.0, 40.0), (-2.013, 1.092, -0.191, 20.0, 40.0), (-0.449, 0.970, 0.836, 20.0, 40.0), (1.396, 0.411, 2.876, 20.0, 40.0), (-1.115, -1.790, 6.023, 20.0, 40.0), (3.748, 1.917, 1.039, 20.0, 40.0), (2.978, 1.043, 2.404, 20.0, 40.0), (-3.969, 2.514, -4.013, 20.0, 40.0), (4.455, -0.050, 5.328, 20.0, 40.0), (-3.065, -0.846, 3.160, 20.0, 40.0), (-1.069, 2.167, -1.869, 20.0, 40.0), (3.016, -1.393, 7.294, 20.0, 40.0), (0.045, -1.928, 6.879, 20.0, 40.0), (-2.555, -0.984, 3.690, 20.0, 40.0), (-1.995, -0.054, 2.111, 20.0, 40.0), (4.600, -0.509, 6.318, 20.0, 40.0), (-1.942, 1.215, -0.402, 20.0, 40.0), (1.262, 2.765, -1.899, 20.0, 40.0), (2.617, -1.106, 6.521, 20.0, 40.0), (1.737, 0.554, 2.761, 20.0, 40.0), (-2.197, 0.632, 0.638, 20.0, 40.0), (4.768, 2.618, 0.147, 20.0, 40.0), (-3.737, -0.939, 3.010, 20.0, 40.0), (-2.623, 0.595, 0.499, 20.0, 40.0), (4.752, -0.340, 6.057, 20.0, 40.0), (2.333, -1.037, 6.240, 20.0, 40.0), (4.234, -1.882, 8.881, 20.0, 40.0), (-3.393, -0.812, 2.927, 20.0, 40.0), (0.885, 1.383, 0.678, 20.0, 40.0), (0.123, 2.937, -2.812, 20.0, 40.0), (2.969, 0.760, 2.964, 20.0, 40.0), (-4.929, 1.251, -1.967, 20.0, 40.0), (1.916, 2.223, -0.488, 20.0, 40.0), (-0.020, -1.740, 6.469, 20.0, 40.0), (0.702, -1.272, 5.895, 20.0, 40.0), (2.496, 2.648, -1.048, 20.0, 40.0), (4.067, -1.475, 7.984, 20.0, 40.0), (-3.717, 1.851, -2.561, 20.0, 40.0), (1.678, -0.624, 5.088, 20.0, 40.0), (1.073, 0.695, 2.146, 20.0, 40.0), (1.842, -0.749, 5.419, 20.0, 40.0), (-3.518, 1.909, -2.578, 20.0, 40.0), (2.229, 1.189, 1.737, 20.0, 40.0), (4.987, 2.893, -0.292, 20.0, 40.0), (-4.809, 1.043, -1.490, 20.0, 40.0), (-0.241, -0.728, 4.334, 20.0, 40.0), (-3.331, 0.590, 0.156, 20.0, 40.0), (-0.455, 2.621, -2.470, 20.0, 40.0), (1.492, 1.223, 1.301, 20.0, 40.0), (3.948, 2.841, -0.709, 20.0, 40.0), (0.732, 0.446, 2.475, 20.0, 40.0), (2.400, 2.390, -0.579, 20.0, 40.0), (-2.718, 1.427, -1.213, 20.0, 40.0), (-1.826, 1.451, -0.815, 20.0, 40.0), (1.125, 0.438, 2.686, 20.0, 40.0), (-4.918, 1.880, -3.219, 20.0, 40.0), (3.068, -0.442, 5.418, 20.0, 40.0), (1.982, 1.201, 1.589, 20.0, 40.0), (0.701, -1.709, 6.768, 20.0, 40.0), (-1.496, 2.564, -2.877, 20.0, 40.0), (-3.812, 0.974, -0.853, 20.0, 40.0), (-3.405, 2.018, -2.739, 20.0, 40.0), (2.211, 2.889, -1.674, 20.0, 40.0), (-2.481, 2.931, -4.103, 20.0, 40.0), (-3.721, 2.765, -4.391, 20.0, 40.0), (-1.768, -1.292, 4.699, 20.0, 40.0), (-4.462, 1.058, -1.347, 20.0, 40.0), (-3.516, -1.942, 5.126, 20.0, 40.0), (0.485, 2.420, -1.597, 20.0, 40.0), (-0.492, 0.242, 2.270, 20.0, 40.0), (4.245, 1.689, 1.744, 20.0, 40.0), (2.234, 0.364, 3.389, 20.0, 40.0), (2.629, 2.224, -0.134, 20.0, 40.0), (-4.375, 1.221, -1.630, 20.0, 40.0), (-0.618, 1.374, -0.057, 20.0, 40.0), (-2.580, -1.604, 4.918, 20.0, 40.0), (0.159, 1.104, 0.871, 20.0, 40.0), (-3.597, 0.975, -0.749, 20.0, 40.0); + +DROP TABLE IF EXISTS model; +create table model engine = Memory as select stochasticLinearRegressionState(0.03, 0.00001, 2, 'Nesterov')(target, param1, param2) as state from defaults; + +select ans > -67.01 and ans < -66.9 from +(with (select state from model) as model select evalMLMethod(model, predict1, predict2) as ans from defaults limit 1); + +-- Check that returned weights are close to real +select ans > 0.49 and ans < 0.51 from +(select stochasticLinearRegression(0.03, 0.00001, 2, 'Nesterov')(target, param1, param2)[1] as ans from defaults); + +select ans > -2.01 and ans < -1.99 from +(select stochasticLinearRegression(0.03, 0.00001, 2, 'Nesterov')(target, param1, param2)[2] as ans from defaults); + +select ans > 2.99 and ans < 3.01 from +(select stochasticLinearRegression(0.03, 0.00001, 2, 'Nesterov')(target, param1, param2)[3] as ans from defaults); + + +-- Check GROUP BY + +DROP TABLE IF EXISTS grouptest; +CREATE TABLE IF NOT EXISTS grouptest +( + user_id UInt32, + p1 Float64, + p2 Float64, + target Float64 +) ENGINE = Memory; +INSERT INTO grouptest VALUES +(1, 1.732, 3.653, 11.422), (1, 2.150, 2.103, 7.609), (1, 0.061, 3.310, 7.052), (1, 1.030, 3.671, 10.075), (1, 1.879, 0.578, 2.492), (1, 0.922, 2.552, 6.499), (1, 1.145, -0.095, -0.993), (1, 1.920, 0.373, 1.959), (1, 0.458, 0.094, -1.801), (1, -0.118, 3.273, 6.582), (1, 2.667, 1.472, 6.752), (1, -0.387, -0.529, -5.360), (1, 2.219, 1.790, 6.810), (1, -0.754, 2.139, 1.908), (1, -0.446, -0.668, -5.896), (1, 1.729, 0.914, 3.199), (1, 2.908, -0.420, 1.556), (1, 1.645, 3.581, 11.034), (1, 0.358, -0.950, -5.136), (1, -0.467, 2.339, 3.084), (1, 3.629, 2.959, 13.135), (1, 2.393, 0.926, 4.563), (1, -0.945, 0.281, -4.047), (1, 3.688, -0.570, 2.667), (1, 3.016, 1.775, 8.356), (1, 2.571, 0.139, 2.559), (1, 2.999, 0.956, 5.866), (1, 1.754, -0.809, -1.920), (1, 3.943, 0.382, 6.030), (1, -0.970, 2.315, 2.004), (1, 1.503, 0.790, 2.376), (1, -0.775, 2.563, 3.139), (1, 1.211, 0.113, -0.240), (1, 3.058, 0.977, 6.048), (1, 2.729, 1.634, 7.360), (1, 0.307, 2.759, 5.893), (1, 3.272, 0.181, 4.089), (1, 1.192, 1.963, 5.273), (1, 0.931, 1.447, 3.203), (1, 3.835, 3.447, 15.011), (1, 0.709, 0.008, -1.559), (1, 3.155, -0.676, 1.283), (1, 2.342, 1.047, 4.824), (1, 2.059, 1.262, 4.903), (1, 2.797, 0.855, 5.159), (1, 0.387, 0.645, -0.292), (1, 1.418, 0.408, 1.060), (1, 2.719, -0.826, -0.039), (1, 2.735, 3.736, 13.678), (1, 0.205, 0.777, -0.260), (1, 3.117, 2.063, 9.424), (1, 0.601, 0.178, -1.263), (1, 0.064, 0.157, -2.401), (1, 3.104, -0.455, 1.842), (1, -0.253, 0.672, -1.490), (1, 2.592, -0.408, 0.961), (1, -0.909, 1.314, -0.878), (1, 0.625, 2.594, 6.031), (1, 2.749, -0.210, 1.869), (1, -0.469, 1.532, 0.657), (1, 1.954, 1.827, 6.388), (1, -0.528, 1.136, -0.647), (1, 0.802, -0.583, -3.146), (1, -0.176, 1.584, 1.400), (1, -0.705, -0.785, -6.766), (1, 1.660, 2.365, 7.416), (1, 2.278, 3.977, 13.485), (1, 2.846, 3.845, 14.229), (1, 3.588, -0.401, 2.974), (1, 3.525, 3.831, 15.542), (1, 0.191, 3.312, 7.318), (1, 2.615, -0.287, 1.370), (1, 2.701, -0.446, 1.064), (1, 2.065, -0.556, -0.538), (1, 2.572, 3.618, 12.997), (1, 3.743, -0.708, 2.362), (1, 3.734, 2.319, 11.425), (1, 3.768, 2.777, 12.866), (1, 3.203, 0.958, 6.280), (1, 1.512, 2.635, 7.927), (1, 2.194, 2.323, 8.356), (1, -0.726, 2.729, 3.735), (1, 0.020, 1.704, 2.152), (1, 2.173, 2.856, 9.912), (1, 3.124, 1.705, 8.364), (1, -0.834, 2.142, 1.759), (1, -0.702, 3.024, 4.666), (1, 1.393, 0.583, 1.535), (1, 2.136, 3.770, 12.581), (1, -0.445, 0.991, -0.917), (1, 0.244, -0.835, -5.016), (1, 2.789, 0.691, 4.652), (1, 0.246, 2.661, 5.475), (1, 3.793, 2.671, 12.601), (1, 1.645, -0.973, -2.627), (1, 2.405, 1.842, 7.336), (1, 3.221, 3.109, 12.769), (1, -0.638, 3.220, 5.385), (1, 1.836, 3.025, 9.748), (1, -0.660, 1.818, 1.133), (1, 0.901, 0.981, 1.744), (1, -0.236, 3.087, 5.789), (1, 1.744, 3.864, 12.078), (1, -0.166, 3.186, 6.226), (1, 3.536, -0.090, 3.803), (1, 3.284, 2.026, 9.648), (1, 1.327, 2.822, 8.119), (1, -0.709, 0.105, -4.104), (1, 0.509, -0.989, -4.949), (1, 0.180, -0.934, -5.440), (1, 3.522, 1.374, 8.168), (1, 1.497, -0.764, -2.297), (1, 1.696, 2.364, 7.482), (1, -0.202, -0.032, -3.500), (1, 3.109, -0.138, 2.804), (1, -0.238, 2.992, 5.501), (1, 1.639, 1.634, 5.181), (1, 1.919, 0.341, 1.859), (1, -0.563, 1.750, 1.124), (1, 0.886, 3.589, 9.539), (1, 3.619, 3.020, 13.299), (1, 1.703, -0.493, -1.073), (1, 2.364, 3.764, 13.022), (1, 1.820, 1.854, 6.201), (1, 1.437, -0.765, -2.421), (1, 1.396, 0.959, 2.668), (1, 2.608, 2.032, 8.312), (1, 0.333, -0.040, -2.455), (1, 3.441, 0.824, 6.355), (1, 1.303, 2.767, 7.908), (1, 1.359, 2.404, 6.932), (1, 0.674, 0.241, -0.930), (1, 2.708, -0.077, 2.183), (1, 3.821, 3.215, 14.287), (1, 3.316, 1.591, 8.404), (1, -0.848, 1.145, -1.259), (1, 3.455, 3.081, 13.153), (1, 2.568, 0.259, 2.914), (1, 2.866, 2.636, 10.642), (1, 2.776, -0.309, 1.626), (1, 2.087, 0.619, 3.031), (1, 1.682, 1.201, 3.967), (1, 3.800, 2.600, 12.399), (1, 3.344, -0.780, 1.347), (1, 1.053, -0.817, -3.346), (1, 0.805, 3.085, 7.865), (1, 0.173, 0.069, -2.449), (1, 2.018, 1.309, 4.964), (1, 3.713, 3.804, 15.838), (1, 3.805, -0.063, 4.421), (1, 3.587, 2.854, 12.738), (1, 2.426, -0.179, 1.315), (1, 0.535, 0.572, -0.213), (1, -0.558, 0.142, -3.690), (1, -0.875, 2.700, 3.349), (1, 2.405, 3.933, 13.610), (1, 1.633, 1.222, 3.934), (1, 0.049, 2.853, 5.657), (1, 1.146, 0.907, 2.015), (1, 0.300, 0.219, -1.744), (1, 2.226, 2.526, 9.029), (1, 2.545, -0.762, -0.198), (1, 2.553, 3.956, 13.974), (1, -0.898, 2.836, 3.713), (1, 3.796, -0.202, 3.985), (1, -0.810, 2.963, 4.268), (1, 0.511, 2.104, 4.334), (1, 3.527, 3.741, 15.275), (1, -0.921, 3.094, 4.440), (1, 0.856, 3.108, 8.036), (1, 0.815, 0.565, 0.323), (1, 3.717, 0.693, 6.512), (1, 3.052, 3.558, 13.778), (1, 2.942, 3.034, 11.986), (1, 0.765, 3.177, 8.061), (1, 3.175, -0.525, 1.776), (1, 0.309, 1.006, 0.638), (1, 1.922, 0.835, 3.349), (1, 3.678, 3.314, 14.297), (1, 2.840, -0.486, 1.221), (1, 1.195, 3.396, 9.578), (1, -0.157, 3.122, 6.053), (1, 2.404, 1.434, 6.110), (1, 3.108, 2.210, 9.845), (1, 2.289, 1.188, 5.142), (1, -0.319, -0.044, -3.769), (1, -0.625, 3.701, 6.854), (1, 2.269, -0.276, 0.710), (1, 0.777, 1.963, 4.442), (1, 0.411, 1.893, 3.501), (1, 1.173, 0.461, 0.728), (1, 1.767, 3.077, 9.765), (1, 0.853, 3.076, 7.933), (1, -0.013, 3.149, 6.421), (1, 3.841, 1.526, 9.260), (1, -0.950, 0.277, -4.070), (1, -0.644, -0.747, -6.527), (1, -0.923, 1.733, 0.353), (1, 0.044, 3.037, 6.201), (1, 2.074, 2.494, 8.631), (1, 0.016, 0.961, -0.085), (1, -0.780, -0.448, -5.904), (1, 0.170, 1.936, 3.148), (1, -0.420, 3.730, 7.349), (1, -0.630, 1.504, 0.254), (1, -0.006, 0.045, -2.879), (1, 1.101, -0.985, -3.753), (1, 1.618, 0.555, 1.900), (1, -0.336, 1.408, 0.552), (1, 1.086, 3.284, 9.024), (1, -0.815, 2.032, 1.466), (1, 3.144, -0.380, 2.148), (1, 2.326, 2.077, 7.883), (1, -0.571, 0.964, -1.251), (1, 2.416, 1.255, 5.595), (1, 3.964, 1.379, 9.065), (1, 3.897, 1.553, 9.455), (1, 1.806, 2.667, 8.611), (1, 0.323, 3.809, 9.073), (1, 0.501, 3.256, 7.769), (1, -0.679, 3.539, 6.259), (1, 2.825, 3.856, 14.219), (1, 0.288, -0.536, -4.032), (1, 3.009, 0.725, 5.193), (1, -0.763, 1.140, -1.105), (1, 1.124, 3.807, 10.670), (1, 2.478, 0.204, 2.570), (1, 2.825, 2.639, 10.566), (1, 1.878, -0.883, -1.892), (1, 3.380, 2.942, 12.587), (1, 2.202, 1.739, 6.621), (1, -0.711, -0.680, -6.463), (1, -0.266, 1.827, 1.951), (1, -0.846, 1.003, -1.683), (1, 3.201, 0.132, 3.798), (1, 2.797, 0.085, 2.849), (1, 1.632, 3.269, 10.072), (1, 2.410, 2.727, 10.003), (1, -0.624, 0.853, -1.690), (1, 1.314, 3.268, 9.433), (1, -0.395, 0.450, -2.440), (1, 0.992, 3.168, 8.489), (1, 3.355, 2.106, 10.028), (1, 0.509, -0.888, -4.647), (1, 1.007, 0.797, 1.405), (1, 0.045, 0.211, -2.278), (1, -0.911, 1.093, -1.544), (1, 2.409, 0.273, 2.637), (1, 2.640, 3.540, 12.899), (1, 2.668, -0.433, 1.038), (1, -0.014, 0.341, -2.005), (1, -0.525, -0.344, -5.083), (1, 2.278, 3.517, 12.105), (1, 3.712, 0.901, 7.128), (1, -0.689, 2.842, 4.149), (1, -0.467, 1.263, -0.147), (1, 0.963, -0.653, -3.034), (1, 2.559, 2.590, 9.889), (1, 1.566, 1.393, 4.312), (1, -1.000, 1.809, 0.429), (1, -0.297, 3.221, 6.070), (1, 2.199, 3.820, 12.856), (1, 3.096, 3.251, 12.944), (1, 1.479, 1.835, 5.461), (1, 0.276, 0.773, -0.130), (1, 0.607, 1.382, 2.360), (1, 1.169, -0.108, -0.985), (1, 3.429, 0.475, 5.282), (1, 2.626, 0.104, 2.563), (1, 1.156, 3.512, 9.850), (1, 3.947, 0.796, 7.282), (1, -0.462, 2.425, 3.351), (1, 3.957, 0.366, 6.014), (1, 3.763, -0.330, 3.536), (1, 0.667, 3.361, 8.417), (1, -0.583, 0.892, -1.492), (1, -0.505, 1.344, 0.021), (1, -0.474, 2.714, 4.195), (1, 3.455, 0.014, 3.950), (1, 1.016, 1.828, 4.516), (1, 1.845, 0.193, 1.269), (1, -0.529, 3.930, 7.731), (1, 2.636, 0.045, 2.408), (1, 3.757, -0.918, 1.760), (1, -0.808, 1.160, -1.137), (1, 0.744, 1.435, 2.793), (1, 3.457, 3.566, 14.613), (1, 1.061, 3.140, 8.544), (1, 3.733, 3.368, 14.570), (1, -0.969, 0.879, -2.301), (1, 3.940, 3.136, 14.287), (1, -0.730, 2.107, 1.860), (1, 3.699, 2.820, 12.858), (1, 2.197, -0.636, -0.514), (1, 0.775, -0.979, -4.387), (1, 2.019, 2.828, 9.521), (1, 1.415, 0.113, 0.170), (1, 1.567, 3.410, 10.363), (1, 0.984, -0.960, -3.913), (1, 1.809, 2.487, 8.079), (1, 1.550, 1.130, 3.489), (1, -0.770, 3.027, 4.542), (1, -0.358, 3.326, 6.262), (1, 3.140, 0.096, 3.567), (1, -0.685, 2.213, 2.270), (1, 0.916, 0.692, 0.907), (1, 1.526, 1.159, 3.527), (1, 2.675, -0.568, 0.645), (1, 1.740, 3.019, 9.538), (1, 1.223, 2.088, 5.709), (1, 1.572, -0.125, -0.230), (1, 3.641, 0.362, 5.369), (1, 2.944, 3.897, 14.578), (1, 2.775, 2.461, 9.932), (1, -0.200, 2.492, 4.076), (1, 0.065, 2.055, 3.296), (1, 2.375, -0.639, -0.167), (1, -0.133, 1.138, 0.149), (1, -0.385, 0.163, -3.281), (1, 2.200, 0.863, 3.989), (1, -0.470, 3.492, 6.536), (1, -0.916, -0.547, -6.472), (1, 0.634, 0.927, 1.049), (1, 2.930, 2.655, 10.825), (1, 3.094, 2.802, 11.596), (1, 0.457, 0.539, -0.470), (1, 1.277, 2.229, 6.240), (1, -0.157, 1.270, 0.496), (1, 3.320, 0.640, 5.559), (1, 2.836, 1.067, 5.872), (1, 0.921, -0.716, -3.307), (1, 3.886, 1.487, 9.233), (1, 0.306, -0.142, -2.815), (1, 3.727, -0.410, 3.225), (1, 1.268, -0.801, -2.866), (1, 2.302, 2.493, 9.084), (1, 0.331, 0.373, -1.220), (1, 3.224, -0.857, 0.879), (1, 1.328, 2.786, 8.014), (1, 3.639, 1.601, 9.081), (1, 3.201, -0.484, 1.949), (1, 3.447, -0.734, 1.692), (1, 2.773, -0.143, 2.117), (1, 1.517, -0.493, -1.445), (1, 1.778, -0.428, -0.728), (1, 3.989, 0.099, 5.274), (1, 1.126, 3.985, 11.206), (1, 0.348, 0.756, -0.035), (1, 2.399, 2.576, 9.525), (1, 0.866, 1.800, 4.132), (1, 3.612, 1.598, 9.017), (1, 0.495, 2.239, 4.707), (1, 2.442, 3.712, 13.019), (1, 0.238, -0.844, -5.057), (1, 1.404, 3.095, 9.093), (1, 2.842, 2.044, 8.816), (1, 0.622, 0.322, -0.791), (1, -0.561, 1.242, -0.395), (1, 0.679, 3.822, 9.823), (1, 1.875, 3.526, 11.327), (1, 3.587, 1.050, 7.324), (1, 1.467, 0.588, 1.699), (1, 3.180, 1.571, 8.074), (1, 1.402, 0.430, 1.093), (1, 1.834, 2.209, 7.294), (1, 3.542, -0.259, 3.306), (1, -0.517, 0.174, -3.513), (1, 3.549, 2.210, 10.729), (1, 2.260, 3.393, 11.699), (1, 0.036, 1.893, 2.751), (1, 0.680, 2.815, 6.804), (1, 0.219, 0.368, -1.459), (1, -0.519, 3.987, 7.924), (1, 0.974, 0.761, 1.231), (1, 0.107, 0.620, -0.927), (1, 1.513, 1.910, 5.755), (1, 3.114, 0.894, 5.910), (1, 3.061, 3.052, 12.276), (1, 2.556, 3.779, 13.448), (1, 1.964, 2.692, 9.002), (1, 3.894, -0.032, 4.690), (1, -0.693, 0.910, -1.655), (1, 2.692, 2.908, 11.108), (1, -0.824, 1.190, -1.078), (1, 3.621, 0.918, 6.997), (1, 3.190, 2.442, 10.707), (1, 1.424, -0.546, -1.791), (1, 2.061, -0.427, -0.158), (1, 1.532, 3.158, 9.540), (1, 0.648, 3.557, 8.967), (1, 2.511, 1.665, 7.017), (1, 1.903, -0.168, 0.302), (1, -0.186, -0.718, -5.528), (1, 2.421, 3.896, 13.531), (1, 3.063, 1.841, 8.650), (1, 0.636, 1.699, 3.367), (1, 1.555, 0.688, 2.174), (1, -0.412, 0.454, -2.462), (1, 1.645, 3.207, 9.911), (1, 3.396, 3.766, 15.090), (1, 0.375, -0.256, -3.017), (1, 3.636, 0.732, 6.469), (1, 2.503, 3.133, 11.405), (1, -0.253, 0.693, -1.429), (1, 3.178, 3.110, 12.686), (1, 3.282, -0.725, 1.388), (1, -0.297, 1.222, 0.070), (1, 1.872, 3.211, 10.377), (1, 3.471, 1.446, 8.278), (1, 2.891, 0.197, 3.374), (1, -0.896, 2.198, 1.802), (1, 1.178, -0.717, -2.796), (1, 0.650, 3.371, 8.412), (1, 0.447, 3.248, 7.637), (1, 1.616, -0.109, -0.097), (1, 1.837, 1.092, 3.951), (1, 0.767, 1.384, 2.684), (1, 3.466, -0.600, 2.133), (1, -0.800, -0.734, -6.802), (1, -0.534, 0.068, -3.865), (1, 3.416, -0.459, 2.455), (1, 0.800, -0.132, -1.795), (1, 2.150, 1.190, 4.869), (1, 0.830, 1.220, 2.319), (1, 2.656, 2.587, 10.072), (1, 0.375, -0.219, -2.906), (1, 0.582, -0.637, -3.749), (1, 0.588, -0.723, -3.992), (1, 3.875, 2.126, 11.127), (1, -0.476, 1.909, 1.775), (1, 0.963, 3.597, 9.716), (1, -0.888, 3.933, 7.021), (1, 1.711, -0.868, -2.184), (1, 3.244, 1.990, 9.460), (1, -0.057, 1.537, 1.497), (1, -0.015, 3.511, 7.504), (1, 0.280, 0.582, -0.695), (1, 2.402, 2.731, 9.998), (1, 2.053, 2.253, 7.865), (1, 1.955, 0.172, 1.424), (1, 3.746, 0.872, 7.107), (1, -0.157, 2.381, 3.829), (1, 3.548, -0.918, 1.340), (1, 2.449, 3.195, 11.482), (1, 1.582, 1.055, 3.329), (1, 1.908, -0.839, -1.700), (1, 2.341, 3.137, 11.091), (1, -0.043, 3.873, 8.532), (1, 0.528, -0.752, -4.198), (1, -0.940, 0.261, -4.098), (1, 2.609, 3.531, 12.812), (1, 2.439, 2.486, 9.336), (1, -0.659, -0.150, -4.768), (1, 2.131, 1.973, 7.181), (1, 0.253, 0.304, -1.583), (1, -0.169, 2.273, 3.480), (1, 1.855, 3.974, 12.631), (1, 0.092, 1.160, 0.666), (1, 3.990, 0.402, 6.187), (1, -0.455, 0.932, -1.113), (1, 2.365, 1.152, 5.185), (1, -0.058, 1.244, 0.618), (1, 0.674, 0.481, -0.209), (1, 3.002, 0.246, 3.743), (1, 1.804, 3.765, 11.902), (1, 3.567, -0.752, 1.876), (1, 0.098, 2.257, 3.968), (1, 0.130, -0.889, -5.409), (1, 0.633, 1.891, 3.940), (1, 0.421, 2.533, 5.440), (1, 2.252, 1.853, 7.063), (1, 3.191, -0.980, 0.443), (1, -0.776, 3.241, 5.171), (1, 0.509, 1.737, 3.229), (1, 3.583, 1.274, 7.986), (1, 1.101, 2.896, 7.891), (1, 3.072, -0.008, 3.120), (1, 2.945, -0.295, 2.006), (1, 3.621, -0.161, 3.760), (1, 1.399, 3.759, 11.075), (1, 3.783, -0.866, 1.968), (1, -0.241, 2.902, 5.225), (1, 1.323, 1.934, 5.449), (1, 1.449, 2.855, 8.464), (1, 0.088, 1.526, 1.753), (1, -1.000, 2.161, 1.485), (1, -0.214, 3.358, 6.647), (1, -0.384, 3.230, 5.921), (1, 3.146, 1.228, 6.975), (1, 1.917, 0.860, 3.415), (1, 1.982, 1.735, 6.167), (1, 1.404, 1.851, 5.360), (1, 2.428, -0.674, -0.166), (1, 2.081, -0.505, -0.352), (1, 0.914, -0.543, -2.802), (1, -0.029, -0.482, -4.506), (1, 0.671, 0.184, -1.105), (1, 1.641, -0.524, -1.292), (1, 1.005, 0.361, 0.094), (1, -0.493, 3.582, 6.760), (2, 3.876, 2.563, 21.500), (2, 0.159, -0.309, 7.986), (2, -0.496, 0.417, 12.998), (2, -0.164, -0.512, 7.092), (2, 0.632, 3.200, 28.571), (2, 3.772, 0.493, 9.188), (2, 2.430, -0.797, 2.789), (2, 3.872, -0.775, 1.475), (2, -0.031, -0.256, 8.495), (2, 2.726, 3.000, 25.271), (2, 1.116, -0.269, 7.269), (2, 0.551, 3.402, 29.860), (2, 0.820, 2.500, 24.179), (2, 1.153, -0.453, 6.131), (2, -0.717, -0.360, 8.556), (2, 0.532, 0.531, 12.654), (2, 2.096, 0.981, 13.791), (2, 0.146, -0.433, 7.259), (2, 1.000, 1.075, 15.452), (2, 2.963, -0.090, 6.495), (2, 1.047, 2.052, 21.267), (2, 0.882, 1.778, 19.785), (2, 1.380, 2.702, 24.832), (2, 1.853, 0.401, 10.554), (2, 2.004, 1.770, 18.618), (2, 3.377, 0.772, 11.253), (2, 1.227, -0.169, 7.759), (2, 0.428, 2.052, 21.885), (2, 0.070, 3.648, 31.816), (2, 0.128, -0.938, 4.244), (2, 2.061, 0.753, 12.454), (2, 1.207, -0.301, 6.989), (2, -0.168, 3.765, 32.757), (2, 3.450, 1.801, 17.353), (2, -0.483, 3.344, 30.547), (2, 1.847, 1.884, 19.455), (2, 3.241, 2.369, 20.975), (2, 0.628, 3.590, 30.912), (2, 2.183, 1.741, 18.263), (2, 0.774, 2.638, 25.057), (2, 3.292, 2.867, 23.912), (2, 0.056, 2.651, 25.850), (2, -0.506, 0.300, 12.308), (2, 0.524, 1.182, 16.570), (2, -0.267, 2.563, 25.647), (2, 3.953, -0.334, 4.040), (2, 2.507, 2.319, 21.408), (2, -0.770, 1.017, 16.875), (2, 0.481, 1.591, 19.062), (2, 3.243, 1.060, 13.114), (2, 2.178, -0.325, 5.873), (2, 2.510, 1.235, 14.900), (2, 2.684, 2.370, 21.535), (2, 3.466, 3.656, 28.469), (2, 2.994, 3.960, 30.764), (2, -0.363, 3.592, 31.917), (2, 1.738, 0.074, 8.708), (2, 1.462, 3.727, 30.902), (2, 0.059, 0.180, 11.021), (2, 2.980, 2.317, 20.925), (2, 1.248, 0.965, 14.545), (2, 0.776, -0.229, 7.850), (2, -0.562, 2.839, 27.598), (2, 3.581, 0.244, 7.883), (2, -0.958, 0.901, 16.362), (2, 3.257, 0.364, 8.925), (2, 1.478, 1.718, 18.827), (2, -0.121, -0.436, 7.507), (2, 0.966, 1.444, 17.697), (2, 3.631, 3.463, 27.144), (2, 0.174, -0.663, 5.848), (2, 2.783, 0.124, 7.959), (2, 1.106, -0.936, 3.276), (2, 0.186, -0.942, 4.162), (2, 3.513, 2.456, 21.222), (2, 0.339, 2.316, 23.558), (2, 0.566, 2.515, 24.523), (2, -0.134, 0.746, 14.607), (2, 1.554, 0.106, 9.084), (2, -0.846, 2.748, 27.337), (2, 3.934, 0.564, 9.451), (2, 2.840, -0.966, 1.366), (2, 1.379, 0.307, 10.463), (2, 1.065, -0.780, 4.253), (2, 3.324, 2.145, 19.546), (2, 0.974, -0.543, 5.767), (2, 2.469, 3.976, 31.385), (2, -0.434, 3.689, 32.570), (2, 0.261, 0.481, 12.624), (2, 3.786, 2.605, 21.843), (2, -0.460, -0.536, 7.243), (2, 2.576, 2.880, 24.702), (2, -0.501, 3.551, 31.810), (2, 2.946, 3.263, 26.633), (2, 2.959, -0.813, 2.162), (2, -0.749, 0.490, 13.686), (2, 2.821, 0.335, 9.187), (2, 3.964, 0.272, 7.667), (2, 0.808, -0.700, 4.994), (2, 0.415, 2.183, 22.682), (2, 2.551, 3.785, 30.156), (2, 0.821, 1.120, 15.897), (2, 1.714, 3.019, 26.400), (2, 2.265, 1.950, 19.438), (2, 1.493, 3.317, 28.409), (2, -0.445, 2.282, 24.134), (2, -0.508, 2.508, 25.553), (2, 1.017, -0.621, 5.255), (2, 1.053, 2.246, 22.422), (2, 0.441, 1.637, 19.382), (2, 3.657, 1.246, 13.816), (2, 0.756, 0.808, 14.095), (2, 1.849, 1.599, 17.742), (2, 1.782, -0.000, 8.215), (2, 1.136, 3.940, 32.506), (2, 2.814, 3.288, 26.916), (2, 3.180, 3.198, 26.008), (2, 0.728, -0.054, 8.946), (2, 0.801, 0.775, 13.852), (2, 1.399, -0.546, 5.322), (2, 1.415, 1.753, 19.103), (2, 2.860, 1.796, 17.913), (2, 0.712, 2.902, 26.699), (2, -0.389, 3.093, 28.945), (2, 3.661, 3.666, 28.333), (2, 3.944, 0.996, 12.030), (2, 1.655, 1.385, 16.657), (2, 0.122, -0.662, 5.906), (2, 3.667, 2.763, 22.912), (2, 2.606, 0.630, 11.172), (2, -0.291, 1.492, 19.242), (2, -0.787, 1.223, 18.125), (2, 2.405, 0.325, 9.545), (2, 3.129, -0.412, 4.398), (2, 0.588, 3.964, 33.194), (2, -0.177, 3.636, 31.993), (2, 2.079, 3.280, 27.603), (2, 3.055, 3.958, 30.692), (2, -0.164, 3.188, 29.292), (2, 3.803, 3.151, 25.105), (2, 3.123, -0.891, 1.531), (2, 3.070, -0.824, 1.988), (2, 3.103, -0.931, 1.309), (2, 0.589, 3.353, 29.529), (2, 1.095, 1.973, 20.744), (2, -0.557, 0.370, 12.775), (2, 1.223, 0.307, 10.620), (2, 3.255, -0.768, 2.136), (2, 0.508, 2.157, 22.435), (2, 0.373, 0.319, 11.544), (2, 1.240, 1.736, 19.177), (2, 1.846, 0.970, 13.972), (2, 3.352, -0.534, 3.445), (2, -0.352, -0.290, 8.610), (2, 0.281, 0.193, 10.880), (2, 3.450, -0.059, 6.193), (2, 0.310, 2.575, 25.140), (2, 1.791, 1.127, 14.970), (2, 1.992, 2.347, 22.087), (2, -0.288, 2.881, 27.576), (2, 3.464, 3.664, 28.518), (2, 0.573, 2.789, 26.159), (2, 2.265, 1.583, 17.233), (2, 3.203, 0.730, 11.177), (2, 3.345, 1.368, 14.862), (2, 0.891, 3.690, 31.248), (2, 2.252, -0.311, 5.884), (2, -0.087, 0.804, 14.912), (2, 0.153, 2.510, 24.905), (2, 3.533, -0.965, 0.675), (2, 2.035, 1.953, 19.683), (2, 0.316, 2.448, 24.373), (2, 2.199, 3.858, 30.946), (2, -0.519, 3.647, 32.399), (2, 0.867, 1.961, 20.901), (2, 2.739, 2.268, 20.866), (2, 2.462, -0.664, 3.551), (2, 1.372, 3.419, 29.144), (2, -0.628, 2.723, 26.968), (2, 3.989, -0.225, 4.659), (2, 0.166, 3.190, 28.976), (2, 1.681, 2.937, 25.943), (2, 2.979, 2.263, 20.600), (2, 3.896, -0.419, 3.590), (2, 3.861, 2.224, 19.485), (2, -0.087, -0.861, 4.918), (2, 1.182, 1.886, 20.133), (2, 3.622, 2.320, 20.301), (2, 3.560, 0.008, 6.491), (2, 3.082, -0.605, 3.285), (2, 1.777, 1.324, 16.169), (2, 2.269, 2.436, 22.348), (2, 0.019, 3.074, 28.423), (2, -0.560, 3.868, 33.765), (2, 1.568, 2.886, 25.749), (2, 2.045, 0.222, 9.286), (2, 1.391, 0.352, 10.723), (2, 0.172, 1.908, 21.276), (2, 1.173, -0.726, 4.474), (2, 1.642, 2.576, 23.814), (2, 3.346, 1.377, 14.918), (2, 0.120, 0.411, 12.344), (2, 3.913, 0.820, 11.008), (2, 1.054, 3.732, 31.340), (2, 2.284, 0.108, 8.362), (2, 2.266, 0.066, 8.131), (2, 3.204, 1.156, 13.735), (2, 3.243, 2.032, 18.947), (2, 3.052, -0.121, 6.221), (2, 1.131, 2.189, 22.000), (2, 2.958, 0.658, 10.990), (2, 1.717, 3.708, 30.530), (2, 2.417, 2.070, 20.004), (2, 2.175, 0.881, 13.110), (2, 0.333, 3.494, 30.629), (2, 3.598, 3.940, 30.044), (2, 3.683, -0.110, 5.660), (2, 2.555, 1.196, 14.620), (2, 1.511, 0.453, 11.206), (2, 0.903, 1.390, 17.439), (2, -0.897, 3.303, 30.716), (2, 0.245, 2.129, 22.527), (2, 1.370, 2.715, 24.923), (2, 1.822, -0.917, 2.676), (2, 2.690, -0.109, 6.657), (2, 0.206, 1.561, 19.162), (2, 3.905, 2.710, 22.357), (2, -0.438, 3.207, 29.678), (2, 0.898, 3.445, 29.772), (2, 1.838, 2.871, 25.385), (2, 0.116, 1.401, 18.292), (2, -0.408, 2.375, 24.656), (2, 1.681, 3.338, 28.349), (2, 1.177, -0.318, 6.914), (2, 1.004, 0.626, 12.753), (2, 2.840, 2.589, 22.691), (2, 1.258, 3.993, 32.700), (2, 2.016, 3.489, 28.920), (2, -0.728, 0.164, 11.713), (2, 0.193, 1.479, 18.682), (2, 2.647, -0.969, 1.541), (2, 3.837, 2.602, 21.773), (2, 0.541, 0.205, 10.690), (2, 0.026, 2.756, 26.511), (2, 0.924, 0.909, 14.530), (2, 0.974, -0.074, 8.581), (2, 0.081, 0.005, 9.948), (2, 1.331, 2.942, 26.320), (2, 2.498, 3.405, 27.934), (2, 3.741, 1.554, 15.581), (2, 3.502, -0.089, 5.964), (2, 3.069, 1.768, 17.539), (2, 3.115, -0.008, 6.839), (2, 3.237, -0.503, 3.745), (2, 0.768, -0.135, 8.420), (2, 0.410, 3.974, 33.437), (2, 0.238, -0.700, 5.564), (2, 3.619, 0.350, 8.482), (2, 3.563, 3.059, 24.788), (2, 2.916, 3.101, 25.691), (2, 0.144, 3.282, 29.549), (2, 1.288, 2.642, 24.565), (2, -0.859, 0.229, 12.234), (2, 1.507, -0.711, 4.229), (2, -0.634, 2.608, 26.281), (2, 2.054, -0.834, 2.942), (2, 0.453, 1.072, 15.980), (2, 3.914, 1.159, 13.039), (2, 0.254, 1.835, 20.758), (2, 1.577, 0.428, 10.991), (2, 1.990, 3.569, 29.421), (2, 1.584, 1.803, 19.234), (2, 0.835, 3.603, 30.785), (2, 0.900, 3.033, 27.296), (2, 1.180, 0.280, 10.499), (2, 2.400, 2.802, 24.409), (2, 0.924, 2.462, 23.851), (2, 2.138, 0.722, 12.192), (2, -0.253, -0.809, 5.401), (2, 3.570, -0.116, 5.733), (2, 0.201, -0.182, 8.708), (2, 2.457, 0.454, 10.267), (2, -0.053, 0.443, 12.709), (2, 2.108, 2.069, 20.309), (2, -0.964, -0.441, 8.318), (2, 1.802, 0.403, 10.614), (2, 3.704, 3.902, 29.711), (2, 1.904, 2.418, 22.603), (2, 2.965, 3.429, 27.606), (2, -0.801, -0.072, 10.370), (2, 3.009, 0.491, 9.937), (2, 2.781, 1.026, 13.376), (2, -0.421, 0.744, 14.883), (2, 3.639, -0.148, 5.476), (2, 0.584, 2.041, 21.663), (2, 1.547, -0.391, 6.107), (2, -0.204, 0.727, 14.564), (2, 0.372, 0.464, 12.410), (2, 1.185, 1.732, 19.207), (2, 3.574, 0.755, 10.954), (2, 2.164, 1.425, 16.385), (2, 1.895, 1.374, 16.351), (2, 2.352, 2.188, 20.779), (2, 0.187, 0.677, 13.874), (2, -0.589, 3.686, 32.703), (2, 3.081, 0.414, 9.403), (2, 3.341, 3.246, 26.137), (2, 0.617, -0.201, 8.174), (2, 1.518, 3.833, 31.481), (2, 2.613, -0.350, 5.286), (2, 3.426, 0.751, 11.082), (2, 2.726, 3.586, 28.787), (2, 2.834, -0.219, 5.855), (2, 1.038, 3.607, 30.605), (2, 0.479, 1.226, 16.874), (2, 1.729, 0.297, 10.053), (2, 0.050, 1.815, 20.841), (2, -0.554, 3.538, 31.782), (2, 2.773, 0.973, 13.064), (2, -0.239, 3.425, 30.786), (2, 3.611, 3.700, 28.590), (2, 1.418, 3.625, 30.332), (2, 1.599, 1.626, 18.156), (2, 1.841, 1.518, 17.269), (2, 1.119, 1.996, 20.856), (2, 2.810, 2.293, 20.947), (2, 1.174, 2.062, 21.198), (2, -0.326, -0.279, 8.655), (2, -0.365, 0.816, 15.259), (2, 1.296, -0.095, 8.132), (2, -0.263, 0.511, 13.327), (2, 1.757, 3.012, 26.314), (2, 1.849, 1.065, 14.539), (2, 1.651, 2.244, 21.814), (2, 3.942, 1.026, 12.214), (2, 2.314, 1.944, 19.353), (2, 3.055, -0.002, 6.930), (2, 0.402, 1.350, 17.698), (2, 0.004, 2.288, 23.724), (2, 3.265, 2.962, 24.509), (2, 1.044, -0.684, 4.850), (2, -0.280, 2.278, 23.948), (2, 1.216, 0.726, 13.142), (2, 3.181, 3.518, 27.925), (2, 3.199, -0.124, 6.055), (2, 0.510, -0.622, 5.755), (2, 2.920, 1.067, 13.484), (2, 2.573, 1.844, 18.492), (2, 1.155, 3.505, 29.878), (2, 2.033, 1.756, 18.502), (2, 1.312, 0.114, 9.373), (2, -0.823, 3.339, 30.854), (2, 0.287, 3.891, 33.060), (2, -0.621, -0.210, 9.363), (2, 3.734, 1.574, 15.712), (2, -0.932, 0.772, 15.561), (2, -0.719, 1.604, 20.345), (2, -0.555, 0.773, 15.190), (2, -0.744, 3.934, 34.348), (2, 1.671, -0.425, 5.778), (2, 2.754, 2.690, 23.385), (2, 1.826, 2.185, 21.283), (2, 1.970, 0.021, 8.159), (2, 2.882, 3.494, 28.081), (2, 1.668, -0.030, 8.150), (2, 0.472, 2.184, 22.633), (2, 1.656, 3.393, 28.701), (2, -0.069, 2.331, 24.057), (2, 0.075, 1.341, 17.973), (2, 1.836, 0.565, 11.554), (2, -0.235, 0.520, 13.357), (2, 3.620, 3.169, 25.393), (2, 0.401, -0.062, 9.224), (2, 1.503, 1.667, 18.501), (2, 3.727, 1.149, 13.166), (2, 2.777, -0.081, 6.737), (2, 3.914, -0.234, 4.680), (2, 1.765, 0.750, 12.737), (2, 1.746, 1.818, 19.161), (2, 0.019, 2.819, 26.893), (2, 1.068, 1.917, 20.434), (2, 3.035, 3.158, 25.915), (2, 2.012, 0.724, 12.330), (2, 2.597, 2.264, 20.986), (2, 3.428, 3.239, 26.005), (2, -0.016, -0.529, 6.842), (2, 1.314, 0.735, 13.095), (2, 2.832, -0.567, 3.768), (2, -0.296, 2.641, 26.141), (2, 2.863, 3.889, 30.470), (2, 2.849, 3.997, 31.130), (2, 1.660, 1.813, 19.216), (2, 2.798, 0.977, 13.062), (2, 3.935, 0.549, 9.359), (2, 1.002, 3.557, 30.342), (2, 3.052, 2.207, 20.193), (2, 3.455, 0.458, 9.294), (2, 3.312, 2.138, 19.515), (2, 0.292, 0.058, 10.056), (2, 0.050, -0.211, 8.682), (2, -0.215, 1.108, 16.866), (2, -0.169, 0.647, 14.048), (2, 2.546, 0.876, 12.709), (2, -0.911, -0.209, 9.659), (2, 0.950, 2.894, 26.413), (2, -0.512, -0.167, 9.508), (2, 1.821, -0.747, 3.696), (2, 2.257, 3.945, 31.415), (2, 2.398, -0.586, 4.087), (2, 3.051, 0.815, 11.836), (2, 3.399, 2.131, 19.389), (2, 2.982, 1.549, 16.314), (2, -0.790, -0.329, 8.819), (2, 3.797, 0.327, 8.167), (2, 1.838, 0.290, 9.902), (2, 1.906, 1.782, 18.785), (2, 1.330, -0.208, 7.422), (2, -0.217, 0.854, 15.344), (2, 3.310, 1.582, 16.180), (2, 2.965, 0.917, 12.537), (2, 3.558, -0.164, 5.460), (2, -0.841, 2.060, 23.203), (2, 2.892, 2.621, 22.834), (2, -0.011, -0.198, 8.821), (2, -0.430, 2.999, 28.424), (2, -0.584, 0.894, 15.946), (2, 0.033, 1.310, 17.829), (2, 3.044, 0.410, 9.418), (2, 3.932, 0.295, 7.836), (2, 0.394, 1.315, 17.494), (2, 1.424, -0.167, 7.573), (2, 1.676, 1.118, 15.031), (2, 1.821, 0.714, 12.462), (2, 2.688, 1.497, 16.292), (2, 3.960, 2.344, 20.103), (2, -0.787, -0.161, 9.819), (2, 3.538, 3.651, 28.366), (2, -0.338, 0.458, 13.088), (2, -0.146, 3.162, 29.120), (2, 3.124, 3.352, 26.989), (2, -0.189, 3.685, 32.301), (2, 0.396, 1.004, 15.626), (2, -0.171, 2.114, 22.858), (2, 3.736, 0.732, 10.659), (2, 1.259, 2.564, 24.127), (2, -0.263, 2.426, 24.820), (2, 1.558, -0.858, 3.292), (2, 2.882, 1.110, 13.776), (2, 0.039, 1.284, 17.666), (2, 3.074, 2.379, 21.201), (2, -0.523, 0.303, 12.344), (2, 0.363, 1.082, 16.132), (2, 2.925, 2.187, 20.195), (2, 0.595, -0.335, 7.397), (2, 0.062, -0.232, 8.544), (2, 0.877, 2.155, 22.050), (2, -0.256, 2.922, 27.788), (2, 1.813, 3.161, 27.152), (2, 2.177, 2.532, 23.016), (2, -0.051, 0.035, 10.263), (2, 2.688, 3.599, 28.906), (2, 2.539, -0.076, 7.008), (2, 2.563, 1.467, 16.240), (2, -0.755, 2.276, 24.410), (2, 3.092, 0.660, 10.868), (2, 2.403, 2.693, 23.756), (2, -0.170, 2.178, 23.239), (2, 2.672, -0.603, 3.712), (2, -0.077, -0.493, 7.116), (2, 1.997, 1.934, 19.608), (2, 1.913, -0.792, 3.335), (2, 0.171, -0.329, 7.857), (2, 2.488, 0.171, 8.540), (2, -0.514, 0.331, 12.500), (2, -0.201, 2.484, 25.103), (2, 2.436, 0.032, 7.759), (2, -0.094, 2.530, 25.275), (2, 2.186, 2.591, 23.358), (2, 3.171, -0.766, 2.231), (2, 2.410, 0.183, 8.687), (2, -0.699, -0.329, 8.728), (2, 3.285, 2.252, 20.228), (2, 1.928, -0.059, 7.720), (2, 3.460, 0.399, 8.931), (2, 2.542, 0.224, 8.801), (2, 2.902, 2.101, 19.702), (2, 3.808, 2.528, 21.358), (2, 0.330, 0.642, 13.522), (2, -0.088, 1.286, 17.804), (2, 3.025, 2.354, 21.100), (2, 3.306, 2.049, 18.986), (2, 1.477, 1.720, 18.845), (2, 2.676, 3.601, 28.931), (2, 1.577, 0.170, 9.443), (2, 1.362, 3.534, 29.843), (2, 2.616, 3.106, 26.018), (2, 3.773, 0.378, 8.496), (2, -0.125, 2.057, 22.465), (2, 3.174, 1.382, 15.120), (2, 0.844, 2.058, 21.503); + +SELECT ANS[1] > -1.1 AND ANS[1] < -0.9 AND ANS[2] > 5.9 AND ANS[2] < 6.1 AND ANS[3] > 9.9 AND ANS[3] < 10.1 FROM +(SELECT stochasticLinearRegression(0.05, 0, 1, 'SGD')(target, p1, p2) AS ANS FROM grouptest GROUP BY user_id ORDER BY user_id LIMIT 1, 1); + +SELECT ANS[1] > 1.9 AND ANS[1] < 2.1 AND ANS[2] > 2.9 AND ANS[2] < 3.1 AND ANS[3] > -3.1 AND ANS[3] < -2.9 FROM +(SELECT stochasticLinearRegression(0.05, 0, 1, 'SGD')(target, p1, p2) AS ANS FROM grouptest GROUP BY user_id ORDER BY user_id LIMIT 0, 1); + +DROP TABLE defaults; +DROP TABLE model; +DROP TABLE grouptest; diff --git a/parser/testdata/00948_to_valid_utf8/ast.json b/parser/testdata/00948_to_valid_utf8/ast.json new file mode 100644 index 000000000..c4cf6f949 --- /dev/null +++ b/parser/testdata/00948_to_valid_utf8/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toValidUTF8 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal ''" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001317588, + "rows_read": 12, + "bytes_read": 465 + } +} diff --git a/parser/testdata/00948_to_valid_utf8/metadata.json b/parser/testdata/00948_to_valid_utf8/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00948_to_valid_utf8/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00948_to_valid_utf8/query.sql b/parser/testdata/00948_to_valid_utf8/query.sql new file mode 100644 index 000000000..d5e5ccc42 --- /dev/null +++ b/parser/testdata/00948_to_valid_utf8/query.sql @@ -0,0 +1,135 @@ +select toValidUTF8('') from system.numbers limit 10; +select toValidUTF8('some text') from system.numbers limit 10; +select toValidUTF8('какой-то текст') from system.numbers limit 10; +select toValidUTF8('\x00') from system.numbers limit 10; +select toValidUTF8('\x66') from system.numbers limit 10; +select toValidUTF8('\x7F') from system.numbers limit 10; +select toValidUTF8('\x00\x7F') from system.numbers limit 10; +select toValidUTF8('\x7F\x00') from system.numbers limit 10; +select toValidUTF8('\xC2\x80') from system.numbers limit 10; +select toValidUTF8('\xDF\xBF') from system.numbers limit 10; +select toValidUTF8('\xE0\xA0\x80') from system.numbers limit 10; +select toValidUTF8('\xE0\xA0\xBF') from system.numbers limit 10; +select toValidUTF8('\xED\x9F\x80') from system.numbers limit 10; +select toValidUTF8('\xEF\x80\xBF') from system.numbers limit 10; +select toValidUTF8('\xF0\x90\xBF\x80') from system.numbers limit 10; +select toValidUTF8('\xF2\x81\xBE\x99') from system.numbers limit 10; +select toValidUTF8('\xF4\x8F\x88\xAA') from system.numbers limit 10; + +select toValidUTF8('a') from system.numbers limit 10; +select toValidUTF8('\xc3\xb1') from system.numbers limit 10; +select toValidUTF8('\xe2\x82\xa1') from system.numbers limit 10; +select toValidUTF8('\xf0\x90\x8c\xbc') from system.numbers limit 10; +select toValidUTF8('안녕하세요, 세상') from system.numbers limit 10; + +select toValidUTF8('\xc3\x28') from system.numbers limit 10; +select toValidUTF8('\xa0\xa1') from system.numbers limit 10; +select toValidUTF8('\xe2\x28\xa1') from system.numbers limit 10; +select toValidUTF8('\xe2\x82\x28') from system.numbers limit 10; +select toValidUTF8('\xf0\x28\x8c\xbc') from system.numbers limit 10; +select toValidUTF8('\xf0\x90\x28\xbc') from system.numbers limit 10; +select toValidUTF8('\xf0\x28\x8c\x28') from system.numbers limit 10; +select toValidUTF8('\xc0\x9f') from system.numbers limit 10; +select toValidUTF8('\xf5\xff\xff\xff') from system.numbers limit 10; +select toValidUTF8('\xed\xa0\x81') from system.numbers limit 10; +select toValidUTF8('\xf8\x90\x80\x80\x80') from system.numbers limit 10; +select toValidUTF8('12345678901234\xed') from system.numbers limit 10; +select toValidUTF8('123456789012345\xed') from system.numbers limit 10; +select toValidUTF8('123456789012345\xed123456789012345\xed') from system.numbers limit 10; +select toValidUTF8('123456789012345\xed\xed\xed\xed\xed\xed\xff\xff\xff\xff\xff\xff123456789012345\xed') from system.numbers limit 10; +select toValidUTF8('123456789012345\xf1') from system.numbers limit 10; +select toValidUTF8('123456789012345\xc2') from system.numbers limit 10; +select toValidUTF8('\xC2\x7F') from system.numbers limit 10; + +select toValidUTF8('\x80') from system.numbers limit 10; +select toValidUTF8('\xBF') from system.numbers limit 10; +select toValidUTF8('\xC0\x80') from system.numbers limit 10; +select toValidUTF8('\xC1\x00') from system.numbers limit 10; +select toValidUTF8('\xC2\x7F') from system.numbers limit 10; +select toValidUTF8('\xDF\xC0') from system.numbers limit 10; +select toValidUTF8('\xE0\x9F\x80') from system.numbers limit 10; +select toValidUTF8('\xE0\xC2\x80') from system.numbers limit 10; +select toValidUTF8('\xED\xA0\x80') from system.numbers limit 10; +select toValidUTF8('\xED\x7F\x80') from system.numbers limit 10; +select toValidUTF8('\xEF\x80\x00') from system.numbers limit 10; +select toValidUTF8('\xF0\x8F\x80\x80') from system.numbers limit 10; +select toValidUTF8('\xF0\xEE\x80\x80') from system.numbers limit 10; +select toValidUTF8('\xF2\x90\x91\x7F') from system.numbers limit 10; +select toValidUTF8('\xF4\x90\x88\xAA') from system.numbers limit 10; +select toValidUTF8('\xF4\x00\xBF\xBF') from system.numbers limit 10; +select toValidUTF8('\x00\x00\x00\x00\x00\xC2\x80\x00\x00\x00\xE1\x80\x80\x00\x00\xC2\xC2\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00') from system.numbers limit 10; +select toValidUTF8('\x00\x00\x00\x00\x00\xC2\xC2\x80\x00\x00\xE1\x80\x80\x00\x00\x00') from system.numbers limit 10; +select toValidUTF8('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xF1\x80') from system.numbers limit 10; +select toValidUTF8('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xF1') from system.numbers limit 10; +select toValidUTF8('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xF1\x80\x80') from system.numbers limit 10; +select toValidUTF8('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xF1\x80\xC2\x80') from system.numbers limit 10; +select toValidUTF8('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xF0\x80\x80\x80') from system.numbers limit 10; + + +select 1 = isValidUTF8(toValidUTF8('')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('some text')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('какой-то текст')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('\x00')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('\x66')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('\x7F')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('\x00\x7F')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('\x7F\x00')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('\xC2\x80')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('\xDF\xBF')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('\xE0\xA0\x80')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('\xE0\xA0\xBF')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('\xED\x9F\x80')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('\xEF\x80\xBF')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('\xF0\x90\xBF\x80')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('\xF2\x81\xBE\x99')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('\xF4\x8F\x88\xAA')) from system.numbers limit 10; + +select 1 = isValidUTF8(toValidUTF8('a')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('\xc3\xb1')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('\xe2\x82\xa1')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('\xf0\x90\x8c\xbc')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('안녕하세요, 세상')) from system.numbers limit 10; + +select 1 = isValidUTF8(toValidUTF8('\xc3\x28')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('\xa0\xa1')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('\xe2\x28\xa1')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('\xe2\x82\x28')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('\xf0\x28\x8c\xbc')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('\xf0\x90\x28\xbc')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('\xf0\x28\x8c\x28')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('\xc0\x9f')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('\xf5\xff\xff\xff')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('\xed\xa0\x81')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('\xf8\x90\x80\x80\x80')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('12345678901234\xed')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('123456789012345\xed')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('123456789012345\xed123456789012345\xed')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('123456789012345\xed\xed\xed\xed\xed\xed\xff\xff\xff\xff\xff\xff123456789012345\xed')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('123456789012345\xf1')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('123456789012345\xc2')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('\xC2\x7F')) from system.numbers limit 10; + +select 1 = isValidUTF8(toValidUTF8('\x80')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('\xBF')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('\xC0\x80')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('\xC1\x00')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('\xC2\x7F')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('\xDF\xC0')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('\xE0\x9F\x80')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('\xE0\xC2\x80')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('\xED\xA0\x80')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('\xED\x7F\x80')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('\xEF\x80\x00')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('\xF0\x8F\x80\x80')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('\xF0\xEE\x80\x80')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('\xF2\x90\x91\x7F')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('\xF4\x90\x88\xAA')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('\xF4\x00\xBF\xBF')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('\x00\x00\x00\x00\x00\xC2\x80\x00\x00\x00\xE1\x80\x80\x00\x00\xC2\xC2\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('\x00\x00\x00\x00\x00\xC2\xC2\x80\x00\x00\xE1\x80\x80\x00\x00\x00')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xF1\x80')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xF1')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xF1\x80\x80')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xF1\x80\xC2\x80')) from system.numbers limit 10; +select 1 = isValidUTF8(toValidUTF8('\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xF0\x80\x80\x80')) from system.numbers limit 10; + diff --git a/parser/testdata/00948_values_interpreter_template/ast.json b/parser/testdata/00948_values_interpreter_template/ast.json new file mode 100644 index 000000000..4a61e369e --- /dev/null +++ b/parser/testdata/00948_values_interpreter_template/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery type_names (children 1)" + }, + { + "explain": " Identifier type_names" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001278502, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/00948_values_interpreter_template/metadata.json b/parser/testdata/00948_values_interpreter_template/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00948_values_interpreter_template/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00948_values_interpreter_template/query.sql b/parser/testdata/00948_values_interpreter_template/query.sql new file mode 100644 index 000000000..df8a83cd0 --- /dev/null +++ b/parser/testdata/00948_values_interpreter_template/query.sql @@ -0,0 +1,45 @@ +DROP TABLE IF EXISTS type_names; +DROP TABLE IF EXISTS values_template; +DROP TABLE IF EXISTS values_template_nullable; +DROP TABLE IF EXISTS values_template_fallback; + +SET input_format_null_as_default = 0; + +CREATE TABLE type_names (n UInt8, s1 String, s2 String, s3 String) ENGINE=Memory; +CREATE TABLE values_template (d Date, s String, u UInt8, i Int64, f Float64, a Array(UInt8)) ENGINE = Memory; +CREATE TABLE values_template_nullable (d Date, s Nullable(String), u Nullable(UInt8), a Array(Nullable(Float32))) ENGINE = Memory; +CREATE TABLE values_template_fallback (n UInt8) ENGINE = Memory; + +SET input_format_values_interpret_expressions = 0; + +-- checks type deduction +INSERT INTO type_names VALUES (1, toTypeName([1, 2]), toTypeName((256, -1, 3.14, 'str', [1, -1])), toTypeName([(1, [256]), (256, [1, 2])])), (2, toTypeName([1, -1]), toTypeName((256, -1, 3, 'str', [1, 2])), toTypeName([(256, []), (1, [])])); + +--(1, lower(replaceAll(_STR_1, 'o', 'a')), _NUM_1 + _NUM_2 + _NUM_3, round(_NUM_4 / _NUM_5), _NUM_6 * CAST(_STR_7, 'Int8'), _ARR_8); +-- _NUM_1: UInt64 -> Int64 -> UInt64 +-- _NUM_4: Int64 -> UInt64 +-- _NUM_5: Float64 -> Int64 +INSERT INTO values_template VALUES ((1), lower(replaceAll('Hella', 'a', 'o')), 1 + 2 + 3, round(-4 * 5.0), nan / CAST('42', 'Int8'), reverse([1, 2, 3])), ((2), lower(replaceAll('Warld', 'a', 'o')), -4 + 5 + 6, round(18446744073709551615 * 1e-19), 1.0 / CAST('0', 'Int8'), reverse([])), ((3), lower(replaceAll('Test', 'a', 'o')), 3 + 2 + 1, round(9223372036854775807 * -1), 6.28 / CAST('2', 'Int8'), reverse([4, 5])), ((4), lower(replaceAll('Expressians', 'a', 'o')), 6 + 5 + 4, round(1 * -9223372036854775807), 127.0 / CAST('127', 'Int8'), reverse([6, 7, 8, 9, 0])); + +INSERT INTO values_template_nullable VALUES ((1), lower(replaceAll('Hella', 'a', 'o')), 1 + 2 + 3, arraySort(x -> assumeNotNull(x), [null, NULL::Nullable(UInt8)])), ((2), lower(replaceAll('Warld', 'b', 'o')), 4 - 5 + 6, arraySort(x -> assumeNotNull(x), [+1, -1, Null])), ((3), lower(replaceAll('Test', 'c', 'o')), 3 + 2 - 1, arraySort(x -> assumeNotNull(x), [1, nUlL, 3.14])), ((4), lower(replaceAll(null, 'c', 'o')), 6 + 5 - null, arraySort(x -> assumeNotNull(x), [3, 2, 1])); + +INSERT INTO values_template_fallback VALUES (1 + x); -- { error SYNTAX_ERROR } +INSERT INTO values_template_fallback VALUES (abs(functionThatDoesNotExists(42))); -- { error UNKNOWN_FUNCTION } +INSERT INTO values_template_fallback VALUES ([1]); -- { error ILLEGAL_TYPE_OF_ARGUMENT } + +INSERT INTO values_template_fallback VALUES (CAST(1, 'UInt8')), (CAST('2', 'UInt8')); +SET input_format_values_accurate_types_of_literals = 0; + +INSERT INTO type_names VALUES (3, toTypeName([1, 2]), toTypeName((256, -1, 3.14, 'str', [1, -1])), toTypeName([(1, [256]), (256, [1, 2])])), (4, toTypeName([1, -1]), toTypeName((256, -1, 3, 'str', [1, 2])), toTypeName([(256, []), (1, [])])); +SET input_format_values_interpret_expressions = 1; +INSERT INTO values_template_fallback VALUES (1 + 2), (3 + +04), (5 + 6); +INSERT INTO values_template_fallback VALUES (+020), (+030), (+040); + +SELECT * FROM type_names ORDER BY n; +SELECT * FROM values_template ORDER BY d; +SELECT * FROM values_template_nullable ORDER BY d; +SELECT * FROM values_template_fallback ORDER BY n; +DROP TABLE type_names; +DROP TABLE values_template; +DROP TABLE values_template_nullable; +DROP TABLE values_template_fallback; diff --git a/parser/testdata/00949_format/ast.json b/parser/testdata/00949_format/ast.json new file mode 100644 index 000000000..ee7eae352 --- /dev/null +++ b/parser/testdata/00949_format/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00128735, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00949_format/metadata.json b/parser/testdata/00949_format/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00949_format/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00949_format/query.sql b/parser/testdata/00949_format/query.sql new file mode 100644 index 000000000..8fd44cc8e --- /dev/null +++ b/parser/testdata/00949_format/query.sql @@ -0,0 +1,228 @@ +SET send_logs_level = 'fatal'; + +select format('Hello {1} World {0}', materialize('first'), materialize('second')) from system.numbers limit 1; +select format('Hello {0} World {1}', materialize('first'), materialize('second')) from system.numbers limit 2; +select format('Hello {1} World {1}', materialize('first'), materialize('second')) from system.numbers limit 3; +select format('Hello {0} World {0}', materialize('first'), 'second') from system.numbers limit 2; +select format('Hellooooooooooooooooooooooooooooooooooo {0} Wooooooooooooooooooooooorld {0} {2}{2}', materialize('fiiiiiiiiiiirst'), 'second', materialize('third')) from system.numbers limit 2; + + +select format('{}', 'first'); +select format('{}{}', 'first', toFixedString('second', 6)); +select format('{{}}', materialize('first'), 'second'); +select 50 = length(format((select arrayStringConcat(arrayMap(x ->'{', range(100)))), '')); +select 100 = length(format(concat((select arrayStringConcat(arrayMap(x ->'}', range(100)))), (select arrayStringConcat(arrayMap(x ->'{', range(100))))), '')); + +select format('', 'first'); +select concat('third', 'first', 'second')=format('{2}{0}{1}', 'first', 'second', 'third'); + +select format('{', ''); -- { serverError BAD_ARGUMENTS } +select format('{{}', ''); -- { serverError BAD_ARGUMENTS } +select format('{ {}', ''); -- { serverError BAD_ARGUMENTS } +select format('}', ''); -- { serverError BAD_ARGUMENTS } +select format('{{', ''); +select format('{}}', ''); -- { serverError BAD_ARGUMENTS } +select format('}}', ''); +select format('{2 }', ''); -- { serverError BAD_ARGUMENTS } +select format('{}{}{}{}{}{} }{}', '', '', '', '', '', '', ''); -- { serverError BAD_ARGUMENTS } +select format('{sometext}', ''); -- { serverError BAD_ARGUMENTS } +select format('{\0sometext}', ''); -- { serverError BAD_ARGUMENTS } +select format('{1023}', ''); -- { serverError BAD_ARGUMENTS } +select format('{10000000000000000000000000000000000000000000000000}', ''); -- { serverError BAD_ARGUMENTS } +select format('{} {0}', '', ''); -- { serverError BAD_ARGUMENTS } +select format('{0} {}', '', ''); -- { serverError BAD_ARGUMENTS } +select format('Hello {} World {} {}{}', 'first', 'second', 'third') from system.numbers limit 2; -- { serverError BAD_ARGUMENTS } +select format('Hello {0} World {1} {2}{3}', 'first', 'second', 'third') from system.numbers limit 2; -- { serverError BAD_ARGUMENTS } + +select 50 = length(format((select arrayStringConcat(arrayMap(x ->'{', range(101)))), '')); -- { serverError BAD_ARGUMENTS } + +select format('{}{}{}', materialize(toFixedString('a', 1)), materialize(toFixedString('b', 1)), materialize(toFixedString('c', 1))) == 'abc'; +select format('{}{}{}', materialize(toFixedString('a', 1)), materialize('b'), materialize(toFixedString('c', 1))) == 'abc'; + +select '{{}' == format('{{{}', '{}'); + +select '{ key: fn, value: concat }' == format('{}{}{}{}{}', '{ key: ', toFixedString('fn', 2), ', value: ', 'concat', ' }'); + +select format('{}{}', 'a', 'b') == 'ab'; +select format('{}{}', 'a', materialize('b')) == 'ab'; +select format('{}{}', materialize('a'), 'b') == 'ab'; +select format('{}{}', materialize('a'), materialize('b')) == 'ab'; + +select format('{}{}', 'a', toFixedString('b', 1)) == 'ab'; +select format('{}{}', 'a', materialize(toFixedString('b', 1))) == 'ab'; +select format('{}{}', materialize('a'), toFixedString('b', 1)) == 'ab'; +select format('{}{}', materialize('a'), materialize(toFixedString('b', 1))) == 'ab'; + +select format('{}{}', toFixedString('a', 1), 'b') == 'ab'; +select format('{}{}', toFixedString('a', 1), materialize('b')) == 'ab'; +select format('{}{}', materialize(toFixedString('a', 1)), 'b') == 'ab'; +select format('{}{}', materialize(toFixedString('a', 1)), materialize('b')) == 'ab'; + +select format('{}{}', toFixedString('a', 1), toFixedString('b', 1)) == 'ab'; +select format('{}{}', toFixedString('a', 1), materialize(toFixedString('b', 1))) == 'ab'; +select format('{}{}', materialize(toFixedString('a', 1)), toFixedString('b', 1)) == 'ab'; +select format('{}{}', materialize(toFixedString('a', 1)), materialize(toFixedString('b', 1))) == 'ab'; + +select format('{}{}', 'a', 'b') == 'ab' from system.numbers limit 5; +select format('{}{}', 'a', materialize('b')) == 'ab' from system.numbers limit 5; +select format('{}{}', materialize('a'), 'b') == 'ab' from system.numbers limit 5; +select format('{}{}', materialize('a'), materialize('b')) == 'ab' from system.numbers limit 5; + +select format('{}{}', 'a', toFixedString('b', 1)) == 'ab' from system.numbers limit 5; +select format('{}{}', 'a', materialize(toFixedString('b', 1))) == 'ab' from system.numbers limit 5; +select format('{}{}', materialize('a'), toFixedString('b', 1)) == 'ab' from system.numbers limit 5; +select format('{}{}', materialize('a'), materialize(toFixedString('b', 1))) == 'ab' from system.numbers limit 5; + +select format('{}{}', toFixedString('a', 1), 'b') == 'ab' from system.numbers limit 5; +select format('{}{}', toFixedString('a', 1), materialize('b')) == 'ab' from system.numbers limit 5; +select format('{}{}', materialize(toFixedString('a', 1)), 'b') == 'ab' from system.numbers limit 5; +select format('{}{}', materialize(toFixedString('a', 1)), materialize('b')) == 'ab' from system.numbers limit 5; + +select format('{}{}', toFixedString('a', 1), toFixedString('b', 1)) == 'ab' from system.numbers limit 5; +select format('{}{}', toFixedString('a', 1), materialize(toFixedString('b', 1))) == 'ab' from system.numbers limit 5; +select format('{}{}', materialize(toFixedString('a', 1)), toFixedString('b', 1)) == 'ab' from system.numbers limit 5; +select format('{}{}', materialize(toFixedString('a', 1)), materialize(toFixedString('b', 1))) == 'ab' from system.numbers limit 5; + +select format('{}{}{}', 'a', 'b', 'c') == 'abc'; +select format('{}{}{}', 'a', 'b', materialize('c')) == 'abc'; +select format('{}{}{}', 'a', materialize('b'), 'c') == 'abc'; +select format('{}{}{}', 'a', materialize('b'), materialize('c')) == 'abc'; +select format('{}{}{}', materialize('a'), 'b', 'c') == 'abc'; +select format('{}{}{}', materialize('a'), 'b', materialize('c')) == 'abc'; +select format('{}{}{}', materialize('a'), materialize('b'), 'c') == 'abc'; +select format('{}{}{}', materialize('a'), materialize('b'), materialize('c')) == 'abc'; + +select format('{}{}{}', 'a', 'b', toFixedString('c', 1)) == 'abc'; +select format('{}{}{}', 'a', 'b', materialize(toFixedString('c', 1))) == 'abc'; +select format('{}{}{}', 'a', materialize('b'), toFixedString('c', 1)) == 'abc'; +select format('{}{}{}', 'a', materialize('b'), materialize(toFixedString('c', 1))) == 'abc'; +select format('{}{}{}', materialize('a'), 'b', toFixedString('c', 1)) == 'abc'; +select format('{}{}{}', materialize('a'), 'b', materialize(toFixedString('c', 1))) == 'abc'; +select format('{}{}{}', materialize('a'), materialize('b'), toFixedString('c', 1)) == 'abc'; +select format('{}{}{}', materialize('a'), materialize('b'), materialize(toFixedString('c', 1))) == 'abc'; + +select format('{}{}{}', 'a', toFixedString('b', 1), 'c') == 'abc'; +select format('{}{}{}', 'a', toFixedString('b', 1), materialize('c')) == 'abc'; +select format('{}{}{}', 'a', materialize(toFixedString('b', 1)), 'c') == 'abc'; +select format('{}{}{}', 'a', materialize(toFixedString('b', 1)), materialize('c')) == 'abc'; +select format('{}{}{}', materialize('a'), toFixedString('b', 1), 'c') == 'abc'; +select format('{}{}{}', materialize('a'), toFixedString('b', 1), materialize('c')) == 'abc'; +select format('{}{}{}', materialize('a'), materialize(toFixedString('b', 1)), 'c') == 'abc'; +select format('{}{}{}', materialize('a'), materialize(toFixedString('b', 1)), materialize('c')) == 'abc'; + +select format('{}{}{}', 'a', toFixedString('b', 1), toFixedString('c', 1)) == 'abc'; +select format('{}{}{}', 'a', toFixedString('b', 1), materialize(toFixedString('c', 1))) == 'abc'; +select format('{}{}{}', 'a', materialize(toFixedString('b', 1)), toFixedString('c', 1)) == 'abc'; +select format('{}{}{}', 'a', materialize(toFixedString('b', 1)), materialize(toFixedString('c', 1))) == 'abc'; +select format('{}{}{}', materialize('a'), toFixedString('b', 1), toFixedString('c', 1)) == 'abc'; +select format('{}{}{}', materialize('a'), toFixedString('b', 1), materialize(toFixedString('c', 1))) == 'abc'; +select format('{}{}{}', materialize('a'), materialize(toFixedString('b', 1)), toFixedString('c', 1)) == 'abc'; +select format('{}{}{}', materialize('a'), materialize(toFixedString('b', 1)), materialize(toFixedString('c', 1))) == 'abc'; + +select format('{}{}{}', toFixedString('a', 1), 'b', 'c') == 'abc'; +select format('{}{}{}', toFixedString('a', 1), 'b', materialize('c')) == 'abc'; +select format('{}{}{}', toFixedString('a', 1), materialize('b'), 'c') == 'abc'; +select format('{}{}{}', toFixedString('a', 1), materialize('b'), materialize('c')) == 'abc'; +select format('{}{}{}', materialize(toFixedString('a', 1)), 'b', 'c') == 'abc'; +select format('{}{}{}', materialize(toFixedString('a', 1)), 'b', materialize('c')) == 'abc'; +select format('{}{}{}', materialize(toFixedString('a', 1)), materialize('b'), 'c') == 'abc'; +select format('{}{}{}', materialize(toFixedString('a', 1)), materialize('b'), materialize('c')) == 'abc'; + +select format('{}{}{}', toFixedString('a', 1), 'b', toFixedString('c', 1)) == 'abc'; +select format('{}{}{}', toFixedString('a', 1), 'b', materialize(toFixedString('c', 1))) == 'abc'; +select format('{}{}{}', toFixedString('a', 1), materialize('b'), toFixedString('c', 1)) == 'abc'; +select format('{}{}{}', toFixedString('a', 1), materialize('b'), materialize(toFixedString('c', 1))) == 'abc'; +select format('{}{}{}', materialize(toFixedString('a', 1)), 'b', toFixedString('c', 1)) == 'abc'; +select format('{}{}{}', materialize(toFixedString('a', 1)), 'b', materialize(toFixedString('c', 1))) == 'abc'; +select format('{}{}{}', materialize(toFixedString('a', 1)), materialize('b'), toFixedString('c', 1)) == 'abc'; +select format('{}{}{}', materialize(toFixedString('a', 1)), materialize('b'), materialize(toFixedString('c', 1))) == 'abc'; + +select format('{}{}{}', toFixedString('a', 1), toFixedString('b', 1), 'c') == 'abc'; +select format('{}{}{}', toFixedString('a', 1), toFixedString('b', 1), materialize('c')) == 'abc'; +select format('{}{}{}', toFixedString('a', 1), materialize(toFixedString('b', 1)), 'c') == 'abc'; +select format('{}{}{}', toFixedString('a', 1), materialize(toFixedString('b', 1)), materialize('c')) == 'abc'; +select format('{}{}{}', materialize(toFixedString('a', 1)), toFixedString('b', 1), 'c') == 'abc'; +select format('{}{}{}', materialize(toFixedString('a', 1)), toFixedString('b', 1), materialize('c')) == 'abc'; +select format('{}{}{}', materialize(toFixedString('a', 1)), materialize(toFixedString('b', 1)), 'c') == 'abc'; +select format('{}{}{}', materialize(toFixedString('a', 1)), materialize(toFixedString('b', 1)), materialize('c')) == 'abc'; + +select format('{}{}{}', toFixedString('a', 1), toFixedString('b', 1), toFixedString('c', 1)) == 'abc'; +select format('{}{}{}', toFixedString('a', 1), toFixedString('b', 1), materialize(toFixedString('c', 1))) == 'abc'; +select format('{}{}{}', toFixedString('a', 1), materialize(toFixedString('b', 1)), toFixedString('c', 1)) == 'abc'; +select format('{}{}{}', toFixedString('a', 1), materialize(toFixedString('b', 1)), materialize(toFixedString('c', 1))) == 'abc'; +select format('{}{}{}', materialize(toFixedString('a', 1)), toFixedString('b', 1), toFixedString('c', 1)) == 'abc'; +select format('{}{}{}', materialize(toFixedString('a', 1)), toFixedString('b', 1), materialize(toFixedString('c', 1))) == 'abc'; +select format('{}{}{}', materialize(toFixedString('a', 1)), materialize(toFixedString('b', 1)), toFixedString('c', 1)) == 'abc'; +select format('{}{}{}', materialize(toFixedString('a', 1)), materialize(toFixedString('b', 1)), materialize(toFixedString('c', 1))) == 'abc'; + +select format('{}{}{}', 'a', 'b', 'c') == 'abc' from system.numbers limit 5; +select format('{}{}{}', 'a', 'b', materialize('c')) == 'abc' from system.numbers limit 5; +select format('{}{}{}', 'a', materialize('b'), 'c') == 'abc' from system.numbers limit 5; +select format('{}{}{}', 'a', materialize('b'), materialize('c')) == 'abc' from system.numbers limit 5; +select format('{}{}{}', materialize('a'), 'b', 'c') == 'abc' from system.numbers limit 5; +select format('{}{}{}', materialize('a'), 'b', materialize('c')) == 'abc' from system.numbers limit 5; +select format('{}{}{}', materialize('a'), materialize('b'), 'c') == 'abc' from system.numbers limit 5; +select format('{}{}{}', materialize('a'), materialize('b'), materialize('c')) == 'abc' from system.numbers limit 5; + +select format('{}{}{}', 'a', 'b', toFixedString('c', 1)) == 'abc' from system.numbers limit 5; +select format('{}{}{}', 'a', 'b', materialize(toFixedString('c', 1))) == 'abc' from system.numbers limit 5; +select format('{}{}{}', 'a', materialize('b'), toFixedString('c', 1)) == 'abc' from system.numbers limit 5; +select format('{}{}{}', 'a', materialize('b'), materialize(toFixedString('c', 1))) == 'abc' from system.numbers limit 5; +select format('{}{}{}', materialize('a'), 'b', toFixedString('c', 1)) == 'abc' from system.numbers limit 5; +select format('{}{}{}', materialize('a'), 'b', materialize(toFixedString('c', 1))) == 'abc' from system.numbers limit 5; +select format('{}{}{}', materialize('a'), materialize('b'), toFixedString('c', 1)) == 'abc' from system.numbers limit 5; +select format('{}{}{}', materialize('a'), materialize('b'), materialize(toFixedString('c', 1))) == 'abc' from system.numbers limit 5; + +select format('{}{}{}', 'a', toFixedString('b', 1), 'c') == 'abc' from system.numbers limit 5; +select format('{}{}{}', 'a', toFixedString('b', 1), materialize('c')) == 'abc' from system.numbers limit 5; +select format('{}{}{}', 'a', materialize(toFixedString('b', 1)), 'c') == 'abc' from system.numbers limit 5; +select format('{}{}{}', 'a', materialize(toFixedString('b', 1)), materialize('c')) == 'abc' from system.numbers limit 5; +select format('{}{}{}', materialize('a'), toFixedString('b', 1), 'c') == 'abc' from system.numbers limit 5; +select format('{}{}{}', materialize('a'), toFixedString('b', 1), materialize('c')) == 'abc' from system.numbers limit 5; +select format('{}{}{}', materialize('a'), materialize(toFixedString('b', 1)), 'c') == 'abc' from system.numbers limit 5; +select format('{}{}{}', materialize('a'), materialize(toFixedString('b', 1)), materialize('c')) == 'abc' from system.numbers limit 5; + +select format('{}{}{}', 'a', toFixedString('b', 1), toFixedString('c', 1)) == 'abc' from system.numbers limit 5; +select format('{}{}{}', 'a', toFixedString('b', 1), materialize(toFixedString('c', 1))) == 'abc' from system.numbers limit 5; +select format('{}{}{}', 'a', materialize(toFixedString('b', 1)), toFixedString('c', 1)) == 'abc' from system.numbers limit 5; +select format('{}{}{}', 'a', materialize(toFixedString('b', 1)), materialize(toFixedString('c', 1))) == 'abc' from system.numbers limit 5; +select format('{}{}{}', materialize('a'), toFixedString('b', 1), toFixedString('c', 1)) == 'abc' from system.numbers limit 5; +select format('{}{}{}', materialize('a'), toFixedString('b', 1), materialize(toFixedString('c', 1))) == 'abc' from system.numbers limit 5; +select format('{}{}{}', materialize('a'), materialize(toFixedString('b', 1)), toFixedString('c', 1)) == 'abc' from system.numbers limit 5; +select format('{}{}{}', materialize('a'), materialize(toFixedString('b', 1)), materialize(toFixedString('c', 1))) == 'abc' from system.numbers limit 5; + +select format('{}{}{}', toFixedString('a', 1), 'b', 'c') == 'abc' from system.numbers limit 5; +select format('{}{}{}', toFixedString('a', 1), 'b', materialize('c')) == 'abc' from system.numbers limit 5; +select format('{}{}{}', toFixedString('a', 1), materialize('b'), 'c') == 'abc' from system.numbers limit 5; +select format('{}{}{}', toFixedString('a', 1), materialize('b'), materialize('c')) == 'abc' from system.numbers limit 5; +select format('{}{}{}', materialize(toFixedString('a', 1)), 'b', 'c') == 'abc' from system.numbers limit 5; +select format('{}{}{}', materialize(toFixedString('a', 1)), 'b', materialize('c')) == 'abc' from system.numbers limit 5; +select format('{}{}{}', materialize(toFixedString('a', 1)), materialize('b'), 'c') == 'abc' from system.numbers limit 5; +select format('{}{}{}', materialize(toFixedString('a', 1)), materialize('b'), materialize('c')) == 'abc' from system.numbers limit 5; + +select format('{}{}{}', toFixedString('a', 1), 'b', toFixedString('c', 1)) == 'abc' from system.numbers limit 5; +select format('{}{}{}', toFixedString('a', 1), 'b', materialize(toFixedString('c', 1))) == 'abc' from system.numbers limit 5; +select format('{}{}{}', toFixedString('a', 1), materialize('b'), toFixedString('c', 1)) == 'abc' from system.numbers limit 5; +select format('{}{}{}', toFixedString('a', 1), materialize('b'), materialize(toFixedString('c', 1))) == 'abc' from system.numbers limit 5; +select format('{}{}{}', materialize(toFixedString('a', 1)), 'b', toFixedString('c', 1)) == 'abc' from system.numbers limit 5; +select format('{}{}{}', materialize(toFixedString('a', 1)), 'b', materialize(toFixedString('c', 1))) == 'abc' from system.numbers limit 5; +select format('{}{}{}', materialize(toFixedString('a', 1)), materialize('b'), toFixedString('c', 1)) == 'abc' from system.numbers limit 5; +select format('{}{}{}', materialize(toFixedString('a', 1)), materialize('b'), materialize(toFixedString('c', 1))) == 'abc' from system.numbers limit 5; + +select format('{}{}{}', toFixedString('a', 1), toFixedString('b', 1), 'c') == 'abc' from system.numbers limit 5; +select format('{}{}{}', toFixedString('a', 1), toFixedString('b', 1), materialize('c')) == 'abc' from system.numbers limit 5; +select format('{}{}{}', toFixedString('a', 1), materialize(toFixedString('b', 1)), 'c') == 'abc' from system.numbers limit 5; +select format('{}{}{}', toFixedString('a', 1), materialize(toFixedString('b', 1)), materialize('c')) == 'abc' from system.numbers limit 5; +select format('{}{}{}', materialize(toFixedString('a', 1)), toFixedString('b', 1), 'c') == 'abc' from system.numbers limit 5; +select format('{}{}{}', materialize(toFixedString('a', 1)), toFixedString('b', 1), materialize('c')) == 'abc' from system.numbers limit 5; +select format('{}{}{}', materialize(toFixedString('a', 1)), materialize(toFixedString('b', 1)), 'c') == 'abc' from system.numbers limit 5; +select format('{}{}{}', materialize(toFixedString('a', 1)), materialize(toFixedString('b', 1)), materialize('c')) == 'abc' from system.numbers limit 5; + +select format('{}{}{}', toFixedString('a', 1), toFixedString('b', 1), toFixedString('c', 1)) == 'abc' from system.numbers limit 5; +select format('{}{}{}', toFixedString('a', 1), toFixedString('b', 1), materialize(toFixedString('c', 1))) == 'abc' from system.numbers limit 5; +select format('{}{}{}', toFixedString('a', 1), materialize(toFixedString('b', 1)), toFixedString('c', 1)) == 'abc' from system.numbers limit 5; +select format('{}{}{}', toFixedString('a', 1), materialize(toFixedString('b', 1)), materialize(toFixedString('c', 1))) == 'abc' from system.numbers limit 5; +select format('{}{}{}', materialize(toFixedString('a', 1)), toFixedString('b', 1), toFixedString('c', 1)) == 'abc' from system.numbers limit 5; +select format('{}{}{}', materialize(toFixedString('a', 1)), toFixedString('b', 1), materialize(toFixedString('c', 1))) == 'abc' from system.numbers limit 5; +select format('{}{}{}', materialize(toFixedString('a', 1)), materialize(toFixedString('b', 1)), toFixedString('c', 1)) == 'abc' from system.numbers limit 5; +select format('{}{}{}', materialize(toFixedString('a', 1)), materialize(toFixedString('b', 1)), materialize(toFixedString('c', 1))) == 'abc' from system.numbers limit 5; diff --git a/parser/testdata/00950_bad_alloc_when_truncate_join_storage/ast.json b/parser/testdata/00950_bad_alloc_when_truncate_join_storage/ast.json new file mode 100644 index 000000000..b854fecaa --- /dev/null +++ b/parser/testdata/00950_bad_alloc_when_truncate_join_storage/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery join_test (children 1)" + }, + { + "explain": " Identifier join_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00166642, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/00950_bad_alloc_when_truncate_join_storage/metadata.json b/parser/testdata/00950_bad_alloc_when_truncate_join_storage/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00950_bad_alloc_when_truncate_join_storage/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00950_bad_alloc_when_truncate_join_storage/query.sql b/parser/testdata/00950_bad_alloc_when_truncate_join_storage/query.sql new file mode 100644 index 000000000..21a68bff3 --- /dev/null +++ b/parser/testdata/00950_bad_alloc_when_truncate_join_storage/query.sql @@ -0,0 +1,4 @@ +DROP TABLE IF EXISTS join_test; +CREATE TABLE join_test (number UInt8, value Float32) Engine = Join(ANY, LEFT, number); +TRUNCATE TABLE join_test; +DROP TABLE IF EXISTS join_test; diff --git a/parser/testdata/00950_default_prewhere/ast.json b/parser/testdata/00950_default_prewhere/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00950_default_prewhere/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00950_default_prewhere/metadata.json b/parser/testdata/00950_default_prewhere/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00950_default_prewhere/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00950_default_prewhere/query.sql b/parser/testdata/00950_default_prewhere/query.sql new file mode 100644 index 000000000..0561ce9cf --- /dev/null +++ b/parser/testdata/00950_default_prewhere/query.sql @@ -0,0 +1,21 @@ + +DROP TABLE IF EXISTS test_generic_events_all; + +CREATE TABLE test_generic_events_all (APIKey UInt8, SessionType UInt8) ENGINE = MergeTree() PARTITION BY APIKey ORDER BY tuple(); +INSERT INTO test_generic_events_all VALUES( 42, 42 ); +ALTER TABLE test_generic_events_all ADD COLUMN OperatingSystem UInt64 DEFAULT 42; +SELECT OperatingSystem FROM test_generic_events_all PREWHERE APIKey = 42 WHERE SessionType = 42; +SELECT * FROM test_generic_events_all PREWHERE APIKey = 42 WHERE SessionType = 42; + +DROP TABLE IF EXISTS test_generic_events_all; + +CREATE TABLE test_generic_events_all (APIKey UInt8, SessionType UInt8) ENGINE = MergeTree() PARTITION BY APIKey ORDER BY tuple(); +INSERT INTO test_generic_events_all VALUES( 42, 42 ); +ALTER TABLE test_generic_events_all ADD COLUMN OperatingSystem UInt64 DEFAULT SessionType+1; +SELECT * FROM test_generic_events_all WHERE APIKey = 42 AND SessionType = 42; +SELECT OperatingSystem FROM test_generic_events_all WHERE APIKey = 42; +SELECT OperatingSystem FROM test_generic_events_all WHERE APIKey = 42 AND SessionType = 42; +SELECT OperatingSystem FROM test_generic_events_all PREWHERE APIKey = 42 WHERE SessionType = 42; +SELECT * FROM test_generic_events_all PREWHERE APIKey = 42 WHERE SessionType = 42; + +DROP TABLE IF EXISTS test_generic_events_all; diff --git a/parser/testdata/00950_dict_get/ast.json b/parser/testdata/00950_dict_get/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00950_dict_get/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00950_dict_get/metadata.json b/parser/testdata/00950_dict_get/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00950_dict_get/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00950_dict_get/query.sql b/parser/testdata/00950_dict_get/query.sql new file mode 100644 index 000000000..28f517a73 --- /dev/null +++ b/parser/testdata/00950_dict_get/query.sql @@ -0,0 +1,316 @@ +-- Tags: no-parallel + +-- Must use `system` database and these tables - they're configured in tests/*_dictionary.xml +use system; +drop table if exists ints; +drop table if exists strings; +drop table if exists decimals; + +create table ints (key UInt64, i8 Int8, i16 Int16, i32 Int32, i64 Int64, u8 UInt8, u16 UInt16, u32 UInt32, u64 UInt64) Engine = Memory; +create table strings (key UInt64, str String) Engine = Memory; +create table decimals (key UInt64, d32 Decimal32(4), d64 Decimal64(6), d128 Decimal128(1)) Engine = Memory; + +insert into ints values (1, 1, 1, 1, 1, 1, 1, 1, 1); +insert into strings values (1, '1'); +insert into decimals values (1, 1, 1, 1); + +select 'dictGet', 'flat_ints' as dict_name, toUInt64(1) as k, + dictGet(dict_name, 'i8', k), + dictGet(dict_name, 'i16', k), + dictGet(dict_name, 'i32', k), + dictGet(dict_name, 'i64', k), + dictGet(dict_name, 'u8', k), + dictGet(dict_name, 'u16', k), + dictGet(dict_name, 'u32', k), + dictGet(dict_name, 'u64', k), + dictGet(dict_name, ('i8', 'i16', 'i32'), k); +select 'dictGetOrDefault', 'flat_ints' as dict_name, toUInt64(1) as k, + dictGetOrDefault(dict_name, 'i8', k, toInt8(42)), + dictGetOrDefault(dict_name, 'i16', k, toInt16(42)), + dictGetOrDefault(dict_name, 'i32', k, toInt32(42)), + dictGetOrDefault(dict_name, 'i64', k, toInt64(42)), + dictGetOrDefault(dict_name, 'u8', k, toUInt8(42)), + dictGetOrDefault(dict_name, 'u16', k, toUInt16(42)), + dictGetOrDefault(dict_name, 'u32', k, toUInt32(42)), + dictGetOrDefault(dict_name, 'u64', k, toUInt64(42)), + dictGetOrDefault(dict_name, ('i8', 'i16', 'i32'), k, (toInt8(42), toInt16(42), toInt32(42))); +select 'dictGetOrDefault', 'flat_ints' as dict_name, toUInt64(0) as k, + dictGetOrDefault(dict_name, 'i8', k, toInt8(42)), + dictGetOrDefault(dict_name, 'i16', k, toInt16(42)), + dictGetOrDefault(dict_name, 'i32', k, toInt32(42)), + dictGetOrDefault(dict_name, 'i64', k, toInt64(42)), + dictGetOrDefault(dict_name, 'u8', k, toUInt8(42)), + dictGetOrDefault(dict_name, 'u16', k, toUInt16(42)), + dictGetOrDefault(dict_name, 'u32', k, toUInt32(42)), + dictGetOrDefault(dict_name, 'u64', k, toUInt64(42)), + dictGetOrDefault(dict_name, ('i8', 'i16', 'i32'), k, (toInt8(42), toInt16(42), toInt32(42))); + +select 'dictGet', 'hashed_ints' as dict_name, toUInt64(1) as k, + dictGet(dict_name, 'i8', k), + dictGet(dict_name, 'i16', k), + dictGet(dict_name, 'i32', k), + dictGet(dict_name, 'i64', k), + dictGet(dict_name, 'u8', k), + dictGet(dict_name, 'u16', k), + dictGet(dict_name, 'u32', k), + dictGet(dict_name, 'u64', k), + dictGet(dict_name, ('i8', 'i16', 'i32'), k); +select 'dictGetOrDefault', 'hashed_ints' as dict_name, toUInt64(1) as k, + dictGetOrDefault(dict_name, 'i8', k, toInt8(42)), + dictGetOrDefault(dict_name, 'i16', k, toInt16(42)), + dictGetOrDefault(dict_name, 'i32', k, toInt32(42)), + dictGetOrDefault(dict_name, 'i64', k, toInt64(42)), + dictGetOrDefault(dict_name, 'u8', k, toUInt8(42)), + dictGetOrDefault(dict_name, 'u16', k, toUInt16(42)), + dictGetOrDefault(dict_name, 'u32', k, toUInt32(42)), + dictGetOrDefault(dict_name, 'u64', k, toUInt64(42)), + dictGetOrDefault(dict_name, ('i8', 'i16', 'i32'), k, (toInt8(42), toInt16(42), toInt32(42))); +select 'dictGetOrDefault', 'hashed_ints' as dict_name, toUInt64(0) as k, + dictGetOrDefault(dict_name, 'i8', k, toInt8(42)), + dictGetOrDefault(dict_name, 'i16', k, toInt16(42)), + dictGetOrDefault(dict_name, 'i32', k, toInt32(42)), + dictGetOrDefault(dict_name, 'i64', k, toInt64(42)), + dictGetOrDefault(dict_name, 'u8', k, toUInt8(42)), + dictGetOrDefault(dict_name, 'u16', k, toUInt16(42)), + dictGetOrDefault(dict_name, 'u32', k, toUInt32(42)), + dictGetOrDefault(dict_name, ('i8', 'i16', 'i32'), k, (toInt8(42), toInt16(42), toInt32(42))); + +select 'dictGet', 'hashed_sparse_ints' as dict_name, toUInt64(1) as k, + dictGet(dict_name, 'i8', k), + dictGet(dict_name, 'i16', k), + dictGet(dict_name, 'i32', k), + dictGet(dict_name, 'i64', k), + dictGet(dict_name, 'u8', k), + dictGet(dict_name, 'u16', k), + dictGet(dict_name, 'u32', k), + dictGet(dict_name, 'u64', k), + dictGet(dict_name, ('i8', 'i16', 'i32'), k); +select 'dictGetOrDefault', 'hashed_sparse_ints' as dict_name, toUInt64(1) as k, + dictGetOrDefault(dict_name, 'i8', k, toInt8(42)), + dictGetOrDefault(dict_name, 'i16', k, toInt16(42)), + dictGetOrDefault(dict_name, 'i32', k, toInt32(42)), + dictGetOrDefault(dict_name, 'i64', k, toInt64(42)), + dictGetOrDefault(dict_name, 'u8', k, toUInt8(42)), + dictGetOrDefault(dict_name, 'u16', k, toUInt16(42)), + dictGetOrDefault(dict_name, 'u32', k, toUInt32(42)), + dictGetOrDefault(dict_name, 'u64', k, toUInt64(42)), + dictGetOrDefault(dict_name, ('i8', 'i16', 'i32'), k, (toInt8(42), toInt16(42), toInt32(42))); +select 'dictGetOrDefault', 'hashed_sparse_ints' as dict_name, toUInt64(0) as k, + dictGetOrDefault(dict_name, 'i8', k, toInt8(42)), + dictGetOrDefault(dict_name, 'i16', k, toInt16(42)), + dictGetOrDefault(dict_name, 'i32', k, toInt32(42)), + dictGetOrDefault(dict_name, 'i64', k, toInt64(42)), + dictGetOrDefault(dict_name, 'u8', k, toUInt8(42)), + dictGetOrDefault(dict_name, 'u16', k, toUInt16(42)), + dictGetOrDefault(dict_name, 'u32', k, toUInt32(42)), + dictGetOrDefault(dict_name, 'u64', k, toUInt64(42)), + dictGetOrDefault(dict_name, ('i8', 'i16', 'i32'), k, (toInt8(42), toInt16(42), toInt32(42))); + +select 'dictGet', 'cache_ints' as dict_name, toUInt64(1) as k, + dictGet(dict_name, 'i8', k), + dictGet(dict_name, 'i16', k), + dictGet(dict_name, 'i32', k), + dictGet(dict_name, 'i64', k), + dictGet(dict_name, 'u8', k), + dictGet(dict_name, 'u16', k), + dictGet(dict_name, 'u32', k), + dictGet(dict_name, 'u64', k), + dictGet(dict_name, ('i8', 'i16', 'i32'), k); +select 'dictGetOrDefault', 'cache_ints' as dict_name, toUInt64(1) as k, + dictGetOrDefault(dict_name, 'i8', k, toInt8(42)), + dictGetOrDefault(dict_name, 'i16', k, toInt16(42)), + dictGetOrDefault(dict_name, 'i32', k, toInt32(42)), + dictGetOrDefault(dict_name, 'i64', k, toInt64(42)), + dictGetOrDefault(dict_name, 'u8', k, toUInt8(42)), + dictGetOrDefault(dict_name, 'u16', k, toUInt16(42)), + dictGetOrDefault(dict_name, 'u32', k, toUInt32(42)), + dictGetOrDefault(dict_name, 'u64', k, toUInt64(42)), + dictGetOrDefault(dict_name, ('i8', 'i16', 'i32'), k, (toInt8(42), toInt16(42), toInt32(42))); +select 'dictGetOrDefault', 'cache_ints' as dict_name, toUInt64(0) as k, + dictGetOrDefault(dict_name, 'i8', k, toInt8(42)), + dictGetOrDefault(dict_name, 'i16', k, toInt16(42)), + dictGetOrDefault(dict_name, 'i32', k, toInt32(42)), + dictGetOrDefault(dict_name, 'i64', k, toInt64(42)), + dictGetOrDefault(dict_name, 'u8', k, toUInt8(42)), + dictGetOrDefault(dict_name, 'u16', k, toUInt16(42)), + dictGetOrDefault(dict_name, 'u32', k, toUInt32(42)), + dictGetOrDefault(dict_name, 'u64', k, toUInt64(42)), + dictGetOrDefault(dict_name, ('i8', 'i16', 'i32'), k, (toInt8(42), toInt16(42), toInt32(42))); + +select 'dictGet', 'complex_hashed_ints' as dict_name, tuple(toUInt64(1)) as k, + dictGet(dict_name, 'i8', k), + dictGet(dict_name, 'i16', k), + dictGet(dict_name, 'i32', k), + dictGet(dict_name, 'i64', k), + dictGet(dict_name, 'u8', k), + dictGet(dict_name, 'u16', k), + dictGet(dict_name, 'u32', k), + dictGet(dict_name, 'u64', k), + dictGet(dict_name, ('i8', 'i16', 'i32'), k); +select 'dictGetOrDefault', 'complex_hashed_ints' as dict_name, tuple(toUInt64(1)) as k, + dictGetOrDefault(dict_name, 'i8', k, toInt8(42)), + dictGetOrDefault(dict_name, 'i16', k, toInt16(42)), + dictGetOrDefault(dict_name, 'i32', k, toInt32(42)), + dictGetOrDefault(dict_name, 'i64', k, toInt64(42)), + dictGetOrDefault(dict_name, 'u8', k, toUInt8(42)), + dictGetOrDefault(dict_name, 'u16', k, toUInt16(42)), + dictGetOrDefault(dict_name, 'u32', k, toUInt32(42)), + dictGetOrDefault(dict_name, 'u64', k, toUInt64(42)), + dictGetOrDefault(dict_name, ('i8', 'i16', 'i32'), k, (toInt8(42), toInt16(42), toInt32(42))); +select 'dictGetOrDefault', 'complex_hashed_ints' as dict_name, tuple(toUInt64(0)) as k, + dictGetOrDefault(dict_name, 'i8', k, toInt8(42)), + dictGetOrDefault(dict_name, 'i16', k, toInt16(42)), + dictGetOrDefault(dict_name, 'i32', k, toInt32(42)), + dictGetOrDefault(dict_name, 'i64', k, toInt64(42)), + dictGetOrDefault(dict_name, 'u8', k, toUInt8(42)), + dictGetOrDefault(dict_name, 'u16', k, toUInt16(42)), + dictGetOrDefault(dict_name, 'u32', k, toUInt32(42)), + dictGetOrDefault(dict_name, 'u64', k, toUInt64(42)), + dictGetOrDefault(dict_name, ('i8', 'i16', 'i32'), k, (toInt8(42), toInt16(42), toInt32(42))); + +select 'dictGet', 'complex_cache_ints' as dict_name, tuple(toUInt64(1)) as k, + dictGet(dict_name, 'i8', k), + dictGet(dict_name, 'i16', k), + dictGet(dict_name, 'i32', k), + dictGet(dict_name, 'i64', k), + dictGet(dict_name, 'u8', k), + dictGet(dict_name, 'u16', k), + dictGet(dict_name, 'u32', k), + dictGet(dict_name, 'u64', k), + dictGet(dict_name, ('i8', 'i16', 'i32'), k);; +select 'dictGetOrDefault', 'complex_cache_ints' as dict_name, tuple(toUInt64(1)) as k, + dictGetOrDefault(dict_name, 'i8', k, toInt8(42)), + dictGetOrDefault(dict_name, 'i16', k, toInt16(42)), + dictGetOrDefault(dict_name, 'i32', k, toInt32(42)), + dictGetOrDefault(dict_name, 'i64', k, toInt64(42)), + dictGetOrDefault(dict_name, 'u8', k, toUInt8(42)), + dictGetOrDefault(dict_name, 'u16', k, toUInt16(42)), + dictGetOrDefault(dict_name, 'u32', k, toUInt32(42)), + dictGetOrDefault(dict_name, 'u64', k, toUInt64(42)), + dictGetOrDefault(dict_name, ('i8', 'i16', 'i32'), k, (toInt8(42), toInt16(42), toInt32(42))); +select 'dictGetOrDefault', 'complex_cache_ints' as dict_name, tuple(toUInt64(0)) as k, + dictGetOrDefault(dict_name, 'i8', k, toInt8(42)), + dictGetOrDefault(dict_name, 'i16', k, toInt16(42)), + dictGetOrDefault(dict_name, 'i32', k, toInt32(42)), + dictGetOrDefault(dict_name, 'i64', k, toInt64(42)), + dictGetOrDefault(dict_name, 'u8', k, toUInt8(42)), + dictGetOrDefault(dict_name, 'u16', k, toUInt16(42)), + dictGetOrDefault(dict_name, 'u32', k, toUInt32(42)), + dictGetOrDefault(dict_name, 'u64', k, toUInt64(42)), + dictGetOrDefault(dict_name, ('i8', 'i16', 'i32'), k, (toInt8(42), toInt16(42), toInt32(42))); + +-- + +select 'dictGet', 'flat_strings' as dict_name, toUInt64(1) as k, dictGet(dict_name, 'str', k), dictGet(dict_name, ('str'), k); +select 'dictGetOrDefault', 'flat_strings' as dict_name, toUInt64(1) as k, dictGetOrDefault(dict_name, 'str', k, '*'), dictGetOrDefault(dict_name, ('str'), k, ('*')); +select 'dictGetOrDefault', 'flat_strings' as dict_name, toUInt64(0) as k, dictGetOrDefault(dict_name, 'str', k, '*'), dictGetOrDefault(dict_name, ('str'), k, ('*')); + +select 'dictGet', 'hashed_strings' as dict_name, toUInt64(1) as k, dictGet(dict_name, 'str', k), dictGet(dict_name, ('str'), k); +select 'dictGetOrDefault', 'hashed_strings' as dict_name, toUInt64(1) as k, dictGetOrDefault(dict_name, 'str', k, '*'), dictGetOrDefault(dict_name, ('str'), k, ('*')); +select 'dictGetOrDefault', 'hashed_strings' as dict_name, toUInt64(0) as k, dictGetOrDefault(dict_name, 'str', k, '*'), dictGetOrDefault(dict_name, ('str'), k, ('*')); + +select 'dictGet', 'cache_strings' as dict_name, toUInt64(1) as k, dictGet(dict_name, 'str', k), dictGet(dict_name, ('str'), k); +select 'dictGetOrDefault', 'cache_strings' as dict_name, toUInt64(1) as k, dictGetOrDefault(dict_name, 'str', k, '*'), dictGetOrDefault(dict_name, ('str'), k, ('*')); +select 'dictGetOrDefault', 'cache_strings' as dict_name, toUInt64(0) as k, dictGetOrDefault(dict_name, 'str', k, '*'), dictGetOrDefault(dict_name, ('str'), k, ('*')); + +select 'dictGet', 'complex_hashed_strings' as dict_name, toUInt64(1) as k, dictGet(dict_name, 'str', tuple(k)), dictGet(dict_name, ('str'), tuple(k)); +select 'dictGetOrDefault', 'complex_hashed_strings' as dict_name, toUInt64(1) as k, dictGetOrDefault(dict_name, 'str', tuple(k), '*'), dictGetOrDefault(dict_name, ('str'), tuple(k), ('*')); +select 'dictGetOrDefault', 'complex_hashed_strings' as dict_name, toUInt64(0) as k, dictGetOrDefault(dict_name, 'str', tuple(k), '*'), dictGetOrDefault(dict_name, ('str'), tuple(k), ('*')); + +select 'dictGet', 'complex_cache_strings' as dict_name, toUInt64(1) as k, dictGet(dict_name, 'str', tuple(k)), dictGet(dict_name, ('str'), tuple(k)); +select 'dictGetOrDefault', 'complex_cache_strings' as dict_name, toUInt64(1) as k, dictGetOrDefault(dict_name, 'str', tuple(k), '*'), dictGetOrDefault(dict_name, ('str'), tuple(k), ('*')); +select 'dictGetOrDefault', 'complex_cache_strings' as dict_name, toUInt64(0) as k, dictGetOrDefault(dict_name, 'str', tuple(k), '*'), dictGetOrDefault(dict_name, ('str'), tuple(k), ('*')); + +-- + +select 'dictGet', 'flat_decimals' as dict_name, toUInt64(1) as k, + dictGet(dict_name, 'd32', k), + dictGet(dict_name, 'd64', k), + dictGet(dict_name, 'd128', k), + dictGet(dict_name, ('d32', 'd64', 'd128'), k); +select 'dictGetOrDefault', 'flat_decimals' as dict_name, toUInt64(1) as k, + dictGetOrDefault(dict_name, 'd32', k, toDecimal32(42, 4)), + dictGetOrDefault(dict_name, 'd64', k, toDecimal64(42, 6)), + dictGetOrDefault(dict_name, 'd128', k, toDecimal128(42, 1)), + dictGetOrDefault(dict_name, ('d32', 'd64', 'd128'), k, (toDecimal32(42, 4), toDecimal64(42, 6), toDecimal128(42, 1))); +select 'dictGetOrDefault', 'flat_decimals' as dict_name, toUInt64(0) as k, + dictGetOrDefault(dict_name, 'd32', k, toDecimal32(42, 4)), + dictGetOrDefault(dict_name, 'd64', k, toDecimal64(42, 6)), + dictGetOrDefault(dict_name, 'd128', k, toDecimal128(42, 1)), + dictGetOrDefault(dict_name, ('d32', 'd64', 'd128'), k, (toDecimal32(42, 4), toDecimal64(42, 6), toDecimal128(42, 1))); + +select 'dictGet', 'hashed_decimals' as dict_name, toUInt64(1) as k, + dictGet(dict_name, 'd32', k), + dictGet(dict_name, 'd64', k), + dictGet(dict_name, 'd128', k), + dictGet(dict_name, ('d32', 'd64', 'd128'), k); +select 'dictGetOrDefault', 'hashed_decimals' as dict_name, toUInt64(1) as k, + dictGetOrDefault(dict_name, 'd32', k, toDecimal32(42, 4)), + dictGetOrDefault(dict_name, 'd64', k, toDecimal64(42, 6)), + dictGetOrDefault(dict_name, 'd128', k, toDecimal128(42, 1)), + dictGetOrDefault(dict_name, ('d32', 'd64', 'd128'), k, (toDecimal32(42, 4), toDecimal64(42, 6), toDecimal128(42, 1))); +select 'dictGetOrDefault', 'hashed_decimals' as dict_name, toUInt64(0) as k, + dictGetOrDefault(dict_name, 'd32', k, toDecimal32(42, 4)), + dictGetOrDefault(dict_name, 'd64', k, toDecimal64(42, 6)), + dictGetOrDefault(dict_name, 'd128', k, toDecimal128(42, 1)), + dictGetOrDefault(dict_name, ('d32', 'd64', 'd128'), k, (toDecimal32(42, 4), toDecimal64(42, 6), toDecimal128(42, 1))); + +select 'dictGet', 'cache_decimals' as dict_name, toUInt64(1) as k, + dictGet(dict_name, 'd32', k), + dictGet(dict_name, 'd64', k), + dictGet(dict_name, 'd128', k), + dictGet(dict_name, ('d32', 'd64', 'd128'), k); +select 'dictGetOrDefault', 'cache_decimals' as dict_name, toUInt64(1) as k, + dictGetOrDefault(dict_name, 'd32', k, toDecimal32(42, 4)), + dictGetOrDefault(dict_name, 'd64', k, toDecimal64(42, 6)), + dictGetOrDefault(dict_name, 'd128', k, toDecimal128(42, 1)), + dictGetOrDefault(dict_name, ('d32', 'd64', 'd128'), k, (toDecimal32(42, 4), toDecimal64(42, 6), toDecimal128(42, 1))); +select 'dictGetOrDefault', 'cache_decimals' as dict_name, toUInt64(0) as k, + dictGetOrDefault(dict_name, 'd32', k, toDecimal32(42, 4)), + dictGetOrDefault(dict_name, 'd64', k, toDecimal64(42, 6)), + dictGetOrDefault(dict_name, 'd128', k, toDecimal128(42, 1)), + dictGetOrDefault(dict_name, ('d32', 'd64', 'd128'), k, (toDecimal32(42, 4), toDecimal64(42, 6), toDecimal128(42, 1))); + +select 'dictGet', 'complex_hashed_decimals' as dict_name, tuple(toUInt64(1)) as k, + dictGet(dict_name, 'd32', k), + dictGet(dict_name, 'd64', k), + dictGet(dict_name, 'd128', k), + dictGet(dict_name, ('d32', 'd64', 'd128'), k); +select 'dictGetOrDefault', 'complex_hashed_decimals' as dict_name, tuple(toUInt64(1)) as k, + dictGetOrDefault(dict_name, 'd32', k, toDecimal32(42, 4)), + dictGetOrDefault(dict_name, 'd64', k, toDecimal64(42, 6)), + dictGetOrDefault(dict_name, 'd128', k, toDecimal128(42, 1)), + dictGetOrDefault(dict_name, ('d32', 'd64', 'd128'), k, (toDecimal32(42, 4), toDecimal64(42, 6), toDecimal128(42, 1))); +select 'dictGetOrDefault', 'complex_hashed_decimals' as dict_name, tuple(toUInt64(0)) as k, + dictGetOrDefault(dict_name, 'd32', k, toDecimal32(42, 4)), + dictGetOrDefault(dict_name, 'd64', k, toDecimal64(42, 6)), + dictGetOrDefault(dict_name, 'd128', k, toDecimal128(42, 1)), + dictGetOrDefault(dict_name, ('d32', 'd64', 'd128'), k, (toDecimal32(42, 4), toDecimal64(42, 6), toDecimal128(42, 1))); + +select 'dictGet', 'complex_cache_decimals' as dict_name, tuple(toUInt64(1)) as k, + dictGet(dict_name, 'd32', k), + dictGet(dict_name, 'd64', k), + dictGet(dict_name, 'd128', k), + dictGet(dict_name, ('d32', 'd64', 'd128'), k); +select 'dictGetOrDefault', 'complex_cache_decimals' as dict_name, tuple(toUInt64(1)) as k, + dictGetOrDefault(dict_name, 'd32', k, toDecimal32(42, 4)), + dictGetOrDefault(dict_name, 'd64', k, toDecimal64(42, 6)), + dictGetOrDefault(dict_name, 'd128', k, toDecimal128(42, 1)), + dictGetOrDefault(dict_name, ('d32', 'd64', 'd128'), k, (toDecimal32(42, 4), toDecimal64(42, 6), toDecimal128(42, 1))); +select 'dictGetOrDefault', 'complex_cache_decimals' as dict_name, tuple(toUInt64(0)) as k, + dictGetOrDefault(dict_name, 'd32', k, toDecimal32(42, 4)), + dictGetOrDefault(dict_name, 'd64', k, toDecimal64(42, 6)), + dictGetOrDefault(dict_name, 'd128', k, toDecimal128(42, 1)), + dictGetOrDefault(dict_name, ('d32', 'd64', 'd128'), k, (toDecimal32(42, 4), toDecimal64(42, 6), toDecimal128(42, 1))); + +-- +-- Keep the tables, so that the dictionaries can be reloaded correctly and +-- SYSTEM RELOAD DICTIONARIES doesn't break. +-- We could also: +-- * drop the dictionaries -- not possible, they are configured in a .xml; +-- * switch dictionaries to DDL syntax so that they can be dropped -- tedious, +-- because there are a couple dozens of them, and also we need to have some +-- .xml dictionaries in tests so that we test backward compatibility with this +-- format; +-- * unload dictionaries -- no command for that. +-- diff --git a/parser/testdata/00950_test_double_delta_codec/ast.json b/parser/testdata/00950_test_double_delta_codec/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00950_test_double_delta_codec/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00950_test_double_delta_codec/metadata.json b/parser/testdata/00950_test_double_delta_codec/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00950_test_double_delta_codec/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00950_test_double_delta_codec/query.sql b/parser/testdata/00950_test_double_delta_codec/query.sql new file mode 100644 index 000000000..58cf35b52 --- /dev/null +++ b/parser/testdata/00950_test_double_delta_codec/query.sql @@ -0,0 +1,168 @@ +-- Tags: no-random-merge-tree-settings + +DROP TABLE IF EXISTS codecTest; + +CREATE TABLE codecTest ( + key UInt64, + ref_valueU64 UInt64, + ref_valueU32 UInt32, + ref_valueU16 UInt16, + ref_valueU8 UInt8, + ref_valueI64 Int64, + ref_valueI32 Int32, + ref_valueI16 Int16, + ref_valueI8 Int8, + ref_valueDT DateTime, + ref_valueD Date, + valueU64 UInt64 CODEC(DoubleDelta), + valueU32 UInt32 CODEC(DoubleDelta), + valueU16 UInt16 CODEC(DoubleDelta), + valueU8 UInt8 CODEC(DoubleDelta), + valueI64 Int64 CODEC(DoubleDelta), + valueI32 Int32 CODEC(DoubleDelta), + valueI16 Int16 CODEC(DoubleDelta), + valueI8 Int8 CODEC(DoubleDelta), + valueDT DateTime CODEC(DoubleDelta), + valueD Date CODEC(DoubleDelta) +) Engine = MergeTree ORDER BY key SETTINGS min_bytes_for_wide_part = 0, ratio_of_defaults_for_sparse_serialization = 1; + + +-- checking for overflow +INSERT INTO codecTest (key, ref_valueU64, valueU64, ref_valueI64, valueI64) + VALUES (1, 18446744073709551615, 18446744073709551615, 9223372036854775807, 9223372036854775807), (2, 0, 0, -9223372036854775808, -9223372036854775808), (3, 18446744073709551615, 18446744073709551615, 9223372036854775807, 9223372036854775807); + +-- n^3 covers all double delta storage cases, from small difference between neighbouref_values (stride) to big. +INSERT INTO codecTest (key, ref_valueU64, valueU64, ref_valueU32, valueU32, ref_valueU16, valueU16, ref_valueU8, valueU8, ref_valueI64, valueI64, ref_valueI32, valueI32, ref_valueI16, valueI16, ref_valueI8, valueI8, ref_valueDT, valueDT, ref_valueD, valueD) + SELECT number as n, n * n * n as v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, toDateTime(v), toDateTime(v), toDate(v), toDate(v) + FROM system.numbers LIMIT 101, 1000; + +-- best case - constant stride +INSERT INTO codecTest (key, ref_valueU64, valueU64, ref_valueU32, valueU32, ref_valueU16, valueU16, ref_valueU8, valueU8, ref_valueI64, valueI64, ref_valueI32, valueI32, ref_valueI16, valueI16, ref_valueI8, valueI8, ref_valueDT, valueDT, ref_valueD, valueD) + SELECT number as n, n as v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, toDateTime(v), toDateTime(v), toDate(v), toDate(v) + FROM system.numbers LIMIT 2001, 1000; + + +-- worst case - random stride +INSERT INTO codecTest (key, ref_valueU64, valueU64, ref_valueU32, valueU32, ref_valueU16, valueU16, ref_valueU8, valueU8, ref_valueI64, valueI64, ref_valueI32, valueI32, ref_valueI16, valueI16, ref_valueI8, valueI8, ref_valueDT, valueDT, ref_valueD, valueD) + SELECT number as n, n + (rand64() - 9223372036854775807)/1000 as v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, toDateTime(v), toDateTime(v), toDate(v), toDate(v) + FROM system.numbers LIMIT 3001, 1000; + + +SELECT 'U64'; +SELECT + key, + ref_valueU64, valueU64, ref_valueU64 - valueU64 as dU64 +FROM codecTest +WHERE + dU64 != 0 +LIMIT 10; + + +SELECT 'U32'; +SELECT + key, + ref_valueU32, valueU32, ref_valueU32 - valueU32 as dU32 +FROM codecTest +WHERE + dU32 != 0 +LIMIT 10; + + +SELECT 'U16'; +SELECT + key, + ref_valueU16, valueU16, ref_valueU16 - valueU16 as dU16 +FROM codecTest +WHERE + dU16 != 0 +LIMIT 10; + + +SELECT 'U8'; +SELECT + key, + ref_valueU8, valueU8, ref_valueU8 - valueU8 as dU8 +FROM codecTest +WHERE + dU8 != 0 +LIMIT 10; + + +SELECT 'I64'; +SELECT + key, + ref_valueI64, valueI64, ref_valueI64 - valueI64 as dI64 +FROM codecTest +WHERE + dI64 != 0 +LIMIT 10; + + +SELECT 'I32'; +SELECT + key, + ref_valueI32, valueI32, ref_valueI32 - valueI32 as dI32 +FROM codecTest +WHERE + dI32 != 0 +LIMIT 10; + + +SELECT 'I16'; +SELECT + key, + ref_valueI16, valueI16, ref_valueI16 - valueI16 as dI16 +FROM codecTest +WHERE + dI16 != 0 +LIMIT 10; + + +SELECT 'I8'; +SELECT + key, + ref_valueI8, valueI8, ref_valueI8 - valueI8 as dI8 +FROM codecTest +WHERE + dI8 != 0 +LIMIT 10; + + +SELECT 'DT'; +SELECT + key, + ref_valueDT, valueDT, ref_valueDT - valueDT as dDT +FROM codecTest +WHERE + dDT != 0 +LIMIT 10; + + +SELECT 'D'; +SELECT + key, + ref_valueD, valueD, ref_valueD - valueD as dD +FROM codecTest +WHERE + dD != 0 +LIMIT 10; + +SELECT 'Compression:'; +SELECT + table, name, type, + compression_codec, + data_uncompressed_bytes u, + data_compressed_bytes c, + round(u/c,3) ratio +FROM system.columns +WHERE + table = 'codecTest' + AND database = currentDatabase() +AND + compression_codec != '' +AND + ratio <= 1 +ORDER BY + table, name, type; + +DROP TABLE IF EXISTS codecTest; diff --git a/parser/testdata/00950_test_double_delta_codec_types/ast.json b/parser/testdata/00950_test_double_delta_codec_types/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00950_test_double_delta_codec_types/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00950_test_double_delta_codec_types/metadata.json b/parser/testdata/00950_test_double_delta_codec_types/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00950_test_double_delta_codec_types/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00950_test_double_delta_codec_types/query.sql b/parser/testdata/00950_test_double_delta_codec_types/query.sql new file mode 100644 index 000000000..c3b6bbb3e --- /dev/null +++ b/parser/testdata/00950_test_double_delta_codec_types/query.sql @@ -0,0 +1,33 @@ +-- Test for issue #80220 + +DROP TABLE IF EXISTS tab; + +-- Codec DoubleDelta must not be used on FixedString columns + +CREATE TABLE tab(c0 FixedString(9) CODEC(DoubleDelta)) ENGINE = MergeTree() ORDER BY tuple(); -- { serverError BAD_ARGUMENTS } +CREATE TABLE tab(c0 FixedString(9) CODEC(DoubleDelta(1))) ENGINE = MergeTree() ORDER BY tuple(); -- { serverError BAD_ARGUMENTS } + +CREATE TABLE tab(c0 LowCardinality(FixedString(9)) CODEC(DoubleDelta)) ENGINE = MergeTree() ORDER BY tuple(); -- { serverError BAD_ARGUMENTS } +CREATE TABLE tab(c0 LowCardinality(FixedString(9)) CODEC(DoubleDelta(2))) ENGINE = MergeTree() ORDER BY tuple(); -- { serverError BAD_ARGUMENTS } + +-- Combination DoubleDelta & Time is okay + +SET enable_time_time64_type = 1; +CREATE TABLE tab(c0 Time CODEC(DoubleDelta)) ENGINE = MergeTree() ORDER BY tuple(); +INSERT INTO TABLE tab (c0) VALUES ('100:00:00'); +DROP TABLE tab; + +CREATE TABLE tab(c0 Nullable(Time) CODEC(DoubleDelta)) ENGINE = MergeTree() ORDER BY tuple(); +INSERT INTO TABLE tab(c0) VALUES ('100:00:00'); +INSERT INTO TABLE tab(c0) VALUES (NULL); +DROP TABLE tab; + +-- LowCardinality(Nullable(Time)) is rejected +CREATE TABLE tab(c0 LowCardinality(Nullable(Time)) CODEC(DoubleDelta)) ENGINE = MergeTree() ORDER BY tuple(); -- { serverError BAD_ARGUMENTS } +CREATE TABLE tab(c0 LowCardinality(Nullable(Time)) CODEC(DoubleDelta(2))) ENGINE = MergeTree() ORDER BY tuple(); -- { serverError BAD_ARGUMENTS } + +-- This statement from issue #80220 is expected to fail: +CREATE TABLE tab(c0 String) ENGINE = MergeTree() ORDER BY tuple(); +CREATE MATERIALIZED VIEW v0 REFRESH AFTER 1 SECOND APPEND TO tab (c0 String CODEC(DoubleDelta(2))) EMPTY AS (SELECT 'a' AS c0); -- { serverError BAD_ARGUMENTS } + +DROP TABLE tab; diff --git a/parser/testdata/00950_test_gorilla_codec/ast.json b/parser/testdata/00950_test_gorilla_codec/ast.json new file mode 100644 index 000000000..793c03108 --- /dev/null +++ b/parser/testdata/00950_test_gorilla_codec/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery codecTest (children 1)" + }, + { + "explain": " Identifier codecTest" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00173849, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/00950_test_gorilla_codec/metadata.json b/parser/testdata/00950_test_gorilla_codec/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00950_test_gorilla_codec/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00950_test_gorilla_codec/query.sql b/parser/testdata/00950_test_gorilla_codec/query.sql new file mode 100644 index 000000000..e9582480b --- /dev/null +++ b/parser/testdata/00950_test_gorilla_codec/query.sql @@ -0,0 +1,63 @@ +DROP TABLE IF EXISTS codecTest; + +SET cross_to_inner_join_rewrite = 1; + +CREATE TABLE codecTest ( + key UInt64, + name String, + ref_valueF64 Float64, + ref_valueF32 Float32, + valueF64 Float64 CODEC(Gorilla), + valueF32 Float32 CODEC(Gorilla) +) Engine = MergeTree ORDER BY key; + +-- best case - same value +INSERT INTO codecTest (key, name, ref_valueF64, valueF64, ref_valueF32, valueF32) + SELECT number AS n, 'e()', e() AS v, v, v, v FROM system.numbers LIMIT 1, 100; + +-- good case - values that grow insignificantly +INSERT INTO codecTest (key, name, ref_valueF64, valueF64, ref_valueF32, valueF32) + SELECT number AS n, 'log2(n)', log2(n) AS v, v, v, v FROM system.numbers LIMIT 101, 100; + +-- bad case - values differ significantly +INSERT INTO codecTest (key, name, ref_valueF64, valueF64, ref_valueF32, valueF32) + SELECT number AS n, 'n*sqrt(n)', n*sqrt(n) AS v, v, v, v FROM system.numbers LIMIT 201, 100; + +-- worst case - almost like a random values +INSERT INTO codecTest (key, name, ref_valueF64, valueF64, ref_valueF32, valueF32) + SELECT number AS n, 'sin(n*n*n)*n', sin(n * n * n * n* n) AS v, v, v, v FROM system.numbers LIMIT 301, 100; + + +-- These floating-point values are expected to be BINARY equal, so comparing by-value is Ok here. + +-- referencing previous row key, value, and case name to simplify debugging. +SELECT 'F64'; +SELECT + c1.key, c1.name, + c1.ref_valueF64, c1.valueF64, c1.ref_valueF64 - c1.valueF64 AS dF64, + 'prev:', + c2.key, c2.ref_valueF64 +FROM + codecTest as c1, codecTest as c2 +WHERE + dF64 != 0 +AND + c2.key = c1.key - 1 +LIMIT 10; + + +SELECT 'F32'; +SELECT + c1.key, c1.name, + c1.ref_valueF32, c1.valueF32, c1.ref_valueF32 - c1.valueF32 AS dF32, + 'prev:', + c2.key, c2.ref_valueF32 +FROM + codecTest as c1, codecTest as c2 +WHERE + dF32 != 0 +AND + c2.key = c1.key - 1 +LIMIT 10; + +DROP TABLE IF EXISTS codecTest; diff --git a/parser/testdata/00951_ngram_search/ast.json b/parser/testdata/00951_ngram_search/ast.json new file mode 100644 index 000000000..391a6c6a8 --- /dev/null +++ b/parser/testdata/00951_ngram_search/ast.json @@ -0,0 +1,82 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function round (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function multiply (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1000" + }, + { + "explain": " Function ngramSearchUTF8 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal ''" + }, + { + "explain": " Literal ''" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_5" + } + ], + + "rows": 20, + + "statistics": + { + "elapsed": 0.002204031, + "rows_read": 20, + "bytes_read": 807 + } +} diff --git a/parser/testdata/00951_ngram_search/metadata.json b/parser/testdata/00951_ngram_search/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00951_ngram_search/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00951_ngram_search/query.sql b/parser/testdata/00951_ngram_search/query.sql new file mode 100644 index 000000000..77525d860 --- /dev/null +++ b/parser/testdata/00951_ngram_search/query.sql @@ -0,0 +1,180 @@ +select round(1000 * ngramSearchUTF8(materialize(''), '')) from system.numbers limit 5; +select round(1000 * ngramSearchUTF8(materialize('абв'), '')) from system.numbers limit 5; +select round(1000 * ngramSearchUTF8(materialize(''), 'абв')) from system.numbers limit 5; +select round(1000 * ngramSearchUTF8(materialize('абвгдеёжз'), 'абвгдеёжз')) from system.numbers limit 5; +select round(1000 * ngramSearchUTF8(materialize('абвгдеёжз'), 'абвгдеёж')) from system.numbers limit 5; +select round(1000 * ngramSearchUTF8(materialize('абвгдеёжз'), 'гдеёзд')) from system.numbers limit 5; +select round(1000 * ngramSearchUTF8(materialize('абвгдеёжз'), 'ёёёёёёёё')) from system.numbers limit 5; + +select round(1000 * ngramSearchUTF8(materialize(''), materialize('')))=round(1000 * ngramSearchUTF8(materialize(''), '')) from system.numbers limit 5; +select round(1000 * ngramSearchUTF8(materialize('абв'), materialize('')))=round(1000 * ngramSearchUTF8(materialize('абв'), '')) from system.numbers limit 5; +select round(1000 * ngramSearchUTF8(materialize(''), materialize('абв')))=round(1000 * ngramSearchUTF8(materialize(''), 'абв')) from system.numbers limit 5; +select round(1000 * ngramSearchUTF8(materialize('абвгдеёжз'), materialize('абвгдеёжз')))=round(1000 * ngramSearchUTF8(materialize('абвгдеёжз'), 'абвгдеёжз')) from system.numbers limit 5; +select round(1000 * ngramSearchUTF8(materialize('абвгдеёжз'), materialize('абвгдеёж')))=round(1000 * ngramSearchUTF8(materialize('абвгдеёжз'), 'абвгдеёж')) from system.numbers limit 5; +select round(1000 * ngramSearchUTF8(materialize('абвгдеёжз'), materialize('гдеёзд')))=round(1000 * ngramSearchUTF8(materialize('абвгдеёжз'), 'гдеёзд')) from system.numbers limit 5; +select round(1000 * ngramSearchUTF8(materialize('абвгдеёжз'), materialize('ёёёёёёёё')))=round(1000 * ngramSearchUTF8(materialize('абвгдеёжз'), 'ёёёёёёёё')) from system.numbers limit 5; + +select round(1000 * ngramSearchUTF8('', materialize('')))=round(1000 * ngramSearchUTF8(materialize(''), '')) from system.numbers limit 5; +select round(1000 * ngramSearchUTF8('абв', materialize('')))=round(1000 * ngramSearchUTF8(materialize('абв'), '')) from system.numbers limit 5; +select round(1000 * ngramSearchUTF8('', materialize('абв')))=round(1000 * ngramSearchUTF8(materialize(''), 'абв')) from system.numbers limit 5; +select round(1000 * ngramSearchUTF8('абвгдеёжз', materialize('абвгдеёжз')))=round(1000 * ngramSearchUTF8(materialize('абвгдеёжз'), 'абвгдеёжз')) from system.numbers limit 5; +select round(1000 * ngramSearchUTF8('абвгдеёжз', materialize('абвгдеёж')))=round(1000 * ngramSearchUTF8(materialize('абвгдеёжз'), 'абвгдеёж')) from system.numbers limit 5; +select round(1000 * ngramSearchUTF8('абвгдеёжз', materialize('гдеёзд')))=round(1000 * ngramSearchUTF8(materialize('абвгдеёжз'), 'гдеёзд')) from system.numbers limit 5; +select round(1000 * ngramSearchUTF8('абвгдеёжз', materialize('ёёёёёёёё')))=round(1000 * ngramSearchUTF8(materialize('абвгдеёжз'), 'ёёёёёёёё')) from system.numbers limit 5; + +select round(1000 * ngramSearchUTF8('', '')); +select round(1000 * ngramSearchUTF8('абв', '')); +select round(1000 * ngramSearchUTF8('', 'абв')); +select round(1000 * ngramSearchUTF8('абвгдеёжз', 'абвгдеёжз')); +select round(1000 * ngramSearchUTF8('абвгдеёжз', 'абвгдеёж')); +select round(1000 * ngramSearchUTF8('абвгдеёжз', 'гдеёзд')); +select round(1000 * ngramSearchUTF8('абвгдеёжз', 'ёёёёёёёё')); + +drop table if exists test_entry_distance; +create table test_entry_distance (Title String) engine = Memory; +insert into test_entry_distance values ('привет как дела?... Херсон'), ('привет как дела клип - TUT.BY'), ('привет'), ('пап привет как дела - TUT.BY'), ('привет братан как дела - TUT.BY'), ('http://metric.ru/'), ('http://autometric.ru/'), ('http://top.bigmir.net/'), ('http://metris.ru/'), ('http://metrika.ru/'), (''); + +SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearchUTF8(Title, Title) as distance, Title; +SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearchUTF8(Title, extract(Title, 'как дела')) as distance, Title; +SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearchUTF8(Title, extract(Title, 'metr')) as distance, Title; + +SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearchUTF8(Title, 'привет как дела') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearchUTF8(Title, 'как привет дела') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearchUTF8(Title, 'metrika') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearchUTF8(Title, 'metrica') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearchUTF8(Title, 'metriks') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearchUTF8(Title, 'metrics') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearchUTF8(Title, 'bigmir') as distance, Title; + + +select round(1000 * ngramSearchCaseInsensitiveUTF8(materialize(''), '')) from system.numbers limit 5; +select round(1000 * ngramSearchCaseInsensitiveUTF8(materialize('абв'), '')) from system.numbers limit 5; +select round(1000 * ngramSearchCaseInsensitiveUTF8(materialize(''), 'абв')) from system.numbers limit 5; +select round(1000 * ngramSearchCaseInsensitiveUTF8(materialize('абвГДЕёжз'), 'АбвгдЕёжз')) from system.numbers limit 5; +select round(1000 * ngramSearchCaseInsensitiveUTF8(materialize('аБВГдеёЖз'), 'АбвГдеёж')) from system.numbers limit 5; +select round(1000 * ngramSearchCaseInsensitiveUTF8(materialize('абвгдеёжз'), 'гдеёЗД')) from system.numbers limit 5; +select round(1000 * ngramSearchCaseInsensitiveUTF8(materialize('абвгдеёжз'), 'ЁЁЁЁЁЁЁЁ')) from system.numbers limit 5; + +select round(1000 * ngramSearchCaseInsensitiveUTF8(materialize(''),materialize(''))) = round(1000 * ngramSearchCaseInsensitiveUTF8(materialize(''), '')) from system.numbers limit 5; +select round(1000 * ngramSearchCaseInsensitiveUTF8(materialize('абв'),materialize(''))) = round(1000 * ngramSearchCaseInsensitiveUTF8(materialize('абв'), '')) from system.numbers limit 5; +select round(1000 * ngramSearchCaseInsensitiveUTF8(materialize(''), materialize('абв'))) = round(1000 * ngramSearchCaseInsensitiveUTF8(materialize(''), 'абв')) from system.numbers limit 5; +select round(1000 * ngramSearchCaseInsensitiveUTF8(materialize('абвГДЕёжз'), materialize('АбвгдЕёжз'))) = round(1000 * ngramSearchCaseInsensitiveUTF8(materialize('абвГДЕёжз'), 'АбвгдЕёжз')) from system.numbers limit 5; +select round(1000 * ngramSearchCaseInsensitiveUTF8(materialize('аБВГдеёЖз'), materialize('АбвГдеёж'))) = round(1000 * ngramSearchCaseInsensitiveUTF8(materialize('аБВГдеёЖз'), 'АбвГдеёж')) from system.numbers limit 5; +select round(1000 * ngramSearchCaseInsensitiveUTF8(materialize('абвгдеёжз'), materialize('гдеёЗД'))) = round(1000 * ngramSearchCaseInsensitiveUTF8(materialize('абвгдеёжз'), 'гдеёЗД')) from system.numbers limit 5; +select round(1000 * ngramSearchCaseInsensitiveUTF8(materialize('абвгдеёжз'), materialize('ЁЁЁЁЁЁЁЁ'))) = round(1000 * ngramSearchCaseInsensitiveUTF8(materialize('абвгдеёжз'), 'ЁЁЁЁЁЁЁЁ')) from system.numbers limit 5; + +select round(1000 * ngramSearchCaseInsensitiveUTF8('', materialize(''))) = round(1000 * ngramSearchCaseInsensitiveUTF8(materialize(''), '')) from system.numbers limit 5; +select round(1000 * ngramSearchCaseInsensitiveUTF8('абв',materialize(''))) = round(1000 * ngramSearchCaseInsensitiveUTF8(materialize('абв'), '')) from system.numbers limit 5; +select round(1000 * ngramSearchCaseInsensitiveUTF8('', materialize('абв'))) = round(1000 * ngramSearchCaseInsensitiveUTF8(materialize(''), 'абв')) from system.numbers limit 5; +select round(1000 * ngramSearchCaseInsensitiveUTF8('абвГДЕёжз', materialize('АбвгдЕёжз'))) = round(1000 * ngramSearchCaseInsensitiveUTF8(materialize('абвГДЕёжз'), 'АбвгдЕёжз')) from system.numbers limit 5; +select round(1000 * ngramSearchCaseInsensitiveUTF8('аБВГдеёЖз', materialize('АбвГдеёж'))) = round(1000 * ngramSearchCaseInsensitiveUTF8(materialize('аБВГдеёЖз'), 'АбвГдеёж')) from system.numbers limit 5; +select round(1000 * ngramSearchCaseInsensitiveUTF8('абвгдеёжз', materialize('гдеёЗД'))) = round(1000 * ngramSearchCaseInsensitiveUTF8(materialize('абвгдеёжз'), 'гдеёЗД')) from system.numbers limit 5; +select round(1000 * ngramSearchCaseInsensitiveUTF8('абвгдеёжз', materialize('ЁЁЁЁЁЁЁЁ'))) = round(1000 * ngramSearchCaseInsensitiveUTF8(materialize('абвгдеёжз'), 'ЁЁЁЁЁЁЁЁ')) from system.numbers limit 5; + + +select round(1000 * ngramSearchCaseInsensitiveUTF8('', '')); +select round(1000 * ngramSearchCaseInsensitiveUTF8('абв', '')); +select round(1000 * ngramSearchCaseInsensitiveUTF8('', 'абв')); +select round(1000 * ngramSearchCaseInsensitiveUTF8('абвГДЕёжз', 'АбвгдЕЁжз')); +select round(1000 * ngramSearchCaseInsensitiveUTF8('аБВГдеёЖз', 'АбвГдеёж')); +select round(1000 * ngramSearchCaseInsensitiveUTF8('абвгдеёжз', 'гдеёЗД')); +select round(1000 * ngramSearchCaseInsensitiveUTF8('АБВГДеёжз', 'ЁЁЁЁЁЁЁЁ')); + +SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearchCaseInsensitiveUTF8(Title, Title) as distance, Title; +SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearchCaseInsensitiveUTF8(Title, extract(Title, 'как дела')) as distance, Title; +SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearchCaseInsensitiveUTF8(Title, extract(Title, 'metr')) as distance, Title; + +SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearchCaseInsensitiveUTF8(Title, 'ПрИвЕт кАК ДЕЛа') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearchCaseInsensitiveUTF8(Title, 'как ПРИВЕТ дела') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearchCaseInsensitiveUTF8(Title, 'metrika') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearchCaseInsensitiveUTF8(Title, 'Metrika') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearchCaseInsensitiveUTF8(Title, 'mEtrica') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearchCaseInsensitiveUTF8(Title, 'metriKS') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearchCaseInsensitiveUTF8(Title, 'metrics') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearchCaseInsensitiveUTF8(Title, 'BigMIR') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearchCaseInsensitiveUTF8(Title, 'приВЕТ КАк ДеЛа КлИп - bigMir.Net') as distance, Title; + + +select round(1000 * ngramSearch(materialize(''), '')) from system.numbers limit 5; +select round(1000 * ngramSearch(materialize('abc'), '')) from system.numbers limit 5; +select round(1000 * ngramSearch(materialize(''), 'abc')) from system.numbers limit 5; +select round(1000 * ngramSearch(materialize('abcdefgh'), 'abcdefgh')) from system.numbers limit 5; +select round(1000 * ngramSearch(materialize('abcdefgh'), 'abcdefg')) from system.numbers limit 5; +select round(1000 * ngramSearch(materialize('abcdefgh'), 'defgh')) from system.numbers limit 5; +select round(1000 * ngramSearch(materialize('abcdefgh'), 'aaaaaaaa')) from system.numbers limit 5; + +select round(1000 * ngramSearch(materialize(''),materialize('')))=round(1000 * ngramSearch(materialize(''), '')) from system.numbers limit 5; +select round(1000 * ngramSearch(materialize('abc'),materialize('')))=round(1000 * ngramSearch(materialize('abc'), '')) from system.numbers limit 5; +select round(1000 * ngramSearch(materialize(''), materialize('abc')))=round(1000 * ngramSearch(materialize(''), 'abc')) from system.numbers limit 5; +select round(1000 * ngramSearch(materialize('abcdefgh'), materialize('abcdefgh')))=round(1000 * ngramSearch(materialize('abcdefgh'), 'abcdefgh')) from system.numbers limit 5; +select round(1000 * ngramSearch(materialize('abcdefgh'), materialize('abcdefg')))=round(1000 * ngramSearch(materialize('abcdefgh'), 'abcdefg')) from system.numbers limit 5; +select round(1000 * ngramSearch(materialize('abcdefgh'), materialize('defgh')))=round(1000 * ngramSearch(materialize('abcdefgh'), 'defgh')) from system.numbers limit 5; +select round(1000 * ngramSearch(materialize('abcdefgh'), materialize('aaaaaaaa')))=round(1000 * ngramSearch(materialize('abcdefgh'), 'aaaaaaaa')) from system.numbers limit 5; + +select round(1000 * ngramSearch('',materialize('')))=round(1000 * ngramSearch(materialize(''), '')) from system.numbers limit 5; +select round(1000 * ngramSearch('abc', materialize('')))=round(1000 * ngramSearch(materialize('abc'), '')) from system.numbers limit 5; +select round(1000 * ngramSearch('', materialize('abc')))=round(1000 * ngramSearch(materialize(''), 'abc')) from system.numbers limit 5; +select round(1000 * ngramSearch('abcdefgh', materialize('abcdefgh')))=round(1000 * ngramSearch(materialize('abcdefgh'), 'abcdefgh')) from system.numbers limit 5; +select round(1000 * ngramSearch('abcdefgh', materialize('abcdefg')))=round(1000 * ngramSearch(materialize('abcdefgh'), 'abcdefg')) from system.numbers limit 5; +select round(1000 * ngramSearch('abcdefgh', materialize('defgh')))=round(1000 * ngramSearch(materialize('abcdefgh'), 'defgh')) from system.numbers limit 5; +select round(1000 * ngramSearch('abcdefgh', materialize('aaaaaaaa')))=round(1000 * ngramSearch(materialize('abcdefgh'), 'aaaaaaaa')) from system.numbers limit 5; + + +select round(1000 * ngramSearch('', '')); +select round(1000 * ngramSearch('abc', '')); +select round(1000 * ngramSearch('', 'abc')); +select round(1000 * ngramSearch('abcdefgh', 'abcdefgh')); +select round(1000 * ngramSearch('abcdefgh', 'abcdefg')); +select round(1000 * ngramSearch('abcdefgh', 'defgh')); +select round(1000 * ngramSearch('abcdefghaaaaaaaaaa', 'aaaaaaaa')); + +SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearch(Title, 'привет как дела') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearch(Title, 'как привет дела') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearch(Title, 'metrika') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearch(Title, 'metrica') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearch(Title, 'metriks') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearch(Title, 'metrics') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearch(Title, 'bigmir') as distance, Title; + +select round(1000 * ngramSearchCaseInsensitive(materialize(''), '')) from system.numbers limit 5; +select round(1000 * ngramSearchCaseInsensitive(materialize('abc'), '')) from system.numbers limit 5; +select round(1000 * ngramSearchCaseInsensitive(materialize(''), 'abc')) from system.numbers limit 5; +select round(1000 * ngramSearchCaseInsensitive(materialize('abCdefgH'), 'Abcdefgh')) from system.numbers limit 5; +select round(1000 * ngramSearchCaseInsensitive(materialize('abcdefgh'), 'abcdeFG')) from system.numbers limit 5; +select round(1000 * ngramSearchCaseInsensitive(materialize('AAAAbcdefgh'), 'defgh')) from system.numbers limit 5; +select round(1000 * ngramSearchCaseInsensitive(materialize('ABCdefgH'), 'aaaaaaaa')) from system.numbers limit 5; + +select round(1000 * ngramSearchCaseInsensitive(materialize(''), materialize('')))=round(1000 * ngramSearchCaseInsensitive(materialize(''), '')) from system.numbers limit 5; +select round(1000 * ngramSearchCaseInsensitive(materialize('abc'), materialize('')))=round(1000 * ngramSearchCaseInsensitive(materialize('abc'), '')) from system.numbers limit 5; +select round(1000 * ngramSearchCaseInsensitive(materialize(''), materialize('abc')))=round(1000 * ngramSearchCaseInsensitive(materialize(''), 'abc')) from system.numbers limit 5; +select round(1000 * ngramSearchCaseInsensitive(materialize('abCdefgH'), materialize('Abcdefgh')))=round(1000 * ngramSearchCaseInsensitive(materialize('abCdefgH'), 'Abcdefgh')) from system.numbers limit 5; +select round(1000 * ngramSearchCaseInsensitive(materialize('abcdefgh'), materialize('abcdeFG')))=round(1000 * ngramSearchCaseInsensitive(materialize('abcdefgh'), 'abcdeFG')) from system.numbers limit 5; +select round(1000 * ngramSearchCaseInsensitive(materialize('AAAAbcdefgh'), materialize('defgh')))=round(1000 * ngramSearchCaseInsensitive(materialize('AAAAbcdefgh'), 'defgh')) from system.numbers limit 5; +select round(1000 * ngramSearchCaseInsensitive(materialize('ABCdefgH'), materialize('aaaaaaaa')))=round(1000 * ngramSearchCaseInsensitive(materialize('ABCdefgH'), 'aaaaaaaa')) from system.numbers limit 5; + +select round(1000 * ngramSearchCaseInsensitive('', materialize('')))=round(1000 * ngramSearchCaseInsensitive(materialize(''), '')) from system.numbers limit 5; +select round(1000 * ngramSearchCaseInsensitive('abc', materialize('')))=round(1000 * ngramSearchCaseInsensitive(materialize('abc'), '')) from system.numbers limit 5; +select round(1000 * ngramSearchCaseInsensitive('', materialize('abc')))=round(1000 * ngramSearchCaseInsensitive(materialize(''), 'abc')) from system.numbers limit 5; +select round(1000 * ngramSearchCaseInsensitive('abCdefgH', materialize('Abcdefgh')))=round(1000 * ngramSearchCaseInsensitive(materialize('abCdefgH'), 'Abcdefgh')) from system.numbers limit 5; +select round(1000 * ngramSearchCaseInsensitive('abcdefgh', materialize('abcdeFG')))=round(1000 * ngramSearchCaseInsensitive(materialize('abcdefgh'), 'abcdeFG')) from system.numbers limit 5; +select round(1000 * ngramSearchCaseInsensitive('AAAAbcdefgh', materialize('defgh')))=round(1000 * ngramSearchCaseInsensitive(materialize('AAAAbcdefgh'), 'defgh')) from system.numbers limit 5; +select round(1000 * ngramSearchCaseInsensitive('ABCdefgH', materialize('aaaaaaaa')))=round(1000 * ngramSearchCaseInsensitive(materialize('ABCdefgH'), 'aaaaaaaa')) from system.numbers limit 5; + +select round(1000 * ngramSearchCaseInsensitive('', '')); +select round(1000 * ngramSearchCaseInsensitive('abc', '')); +select round(1000 * ngramSearchCaseInsensitive('', 'abc')); +select round(1000 * ngramSearchCaseInsensitive('abCdefgH', 'Abcdefgh')); +select round(1000 * ngramSearchCaseInsensitive('abcdefgh', 'abcdeFG')); +select round(1000 * ngramSearchCaseInsensitive('AAAAbcdefgh', 'defgh')); +select round(1000 * ngramSearchCaseInsensitive('ABCdefgHaAaaaAaaaAA', 'aaaaaaaa')); + +SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearchCaseInsensitive(Title, 'ПрИвЕт кАК ДЕЛа') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearchCaseInsensitive(Title, 'как ПРИВЕТ дела') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearchCaseInsensitive(Title, 'metrika') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearchCaseInsensitive(Title, 'Metrika') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearchCaseInsensitive(Title, 'mEtrica') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearchCaseInsensitive(Title, 'metriKS') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearchCaseInsensitive(Title, 'metrics') as distance, Title; +SELECT Title, round(1000 * distance) FROM test_entry_distance ORDER BY ngramSearchCaseInsensitive(Title, 'BigMIR') as distance, Title; + +drop table if exists test_entry_distance; diff --git a/parser/testdata/00952_insert_into_distributed_with_materialized_column/ast.json b/parser/testdata/00952_insert_into_distributed_with_materialized_column/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00952_insert_into_distributed_with_materialized_column/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00952_insert_into_distributed_with_materialized_column/metadata.json b/parser/testdata/00952_insert_into_distributed_with_materialized_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00952_insert_into_distributed_with_materialized_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00952_insert_into_distributed_with_materialized_column/query.sql b/parser/testdata/00952_insert_into_distributed_with_materialized_column/query.sql new file mode 100644 index 000000000..fbf24dc2b --- /dev/null +++ b/parser/testdata/00952_insert_into_distributed_with_materialized_column/query.sql @@ -0,0 +1,95 @@ +-- Tags: distributed + +DROP TABLE IF EXISTS local_00952; +DROP TABLE IF EXISTS distributed_00952; + +-- +-- insert_allow_materialized_columns=0 +-- +SELECT 'insert_allow_materialized_columns=0'; +SET insert_allow_materialized_columns=0; + +-- +-- distributed_foreground_insert=0 +-- +SELECT 'distributed_foreground_insert=0'; +SET distributed_foreground_insert=0; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE local_00952 (date Date, value Date MATERIALIZED toDate('2017-08-01')) ENGINE = MergeTree(date, date, 8192); +CREATE TABLE distributed_00952 AS local_00952 ENGINE = Distributed('test_cluster_two_shards', currentDatabase(), local_00952, rand()); + +INSERT INTO distributed_00952 VALUES ('2018-08-01'); +SYSTEM FLUSH DISTRIBUTED distributed_00952; + +SELECT * FROM distributed_00952; +SELECT date, value FROM distributed_00952; +SELECT * FROM local_00952; +SELECT date, value FROM local_00952; + +DROP TABLE distributed_00952; +DROP TABLE local_00952; + +-- +-- distributed_foreground_insert=1 +-- +SELECT 'distributed_foreground_insert=1'; +SET distributed_foreground_insert=1; + +CREATE TABLE local_00952 (date Date, value Date MATERIALIZED toDate('2017-08-01')) ENGINE = MergeTree(date, date, 8192); +CREATE TABLE distributed_00952 AS local_00952 ENGINE = Distributed('test_cluster_two_shards', currentDatabase(), local_00952, rand()); + +INSERT INTO distributed_00952 VALUES ('2018-08-01'); + +SELECT * FROM distributed_00952; +SELECT date, value FROM distributed_00952; +SELECT * FROM local_00952; +SELECT date, value FROM local_00952; + +DROP TABLE distributed_00952; +DROP TABLE local_00952; + +-- +-- insert_allow_materialized_columns=1 +-- +SELECT 'insert_allow_materialized_columns=1'; +SET insert_allow_materialized_columns=1; + +-- +-- distributed_foreground_insert=0 +-- +SELECT 'distributed_foreground_insert=0'; +SET distributed_foreground_insert=0; + +CREATE TABLE local_00952 (date Date, value Date MATERIALIZED toDate('2017-08-01')) ENGINE = MergeTree(date, date, 8192); +CREATE TABLE distributed_00952 AS local_00952 ENGINE = Distributed('test_cluster_two_shards', currentDatabase(), local_00952, rand()); + +INSERT INTO distributed_00952 (date, value) VALUES ('2018-08-01', '2019-08-01'); +SYSTEM FLUSH DISTRIBUTED distributed_00952; + +SELECT * FROM distributed_00952; +SELECT date, value FROM distributed_00952; +SELECT * FROM local_00952; +SELECT date, value FROM local_00952; + +DROP TABLE distributed_00952; +DROP TABLE local_00952; + +-- +-- distributed_foreground_insert=1 +-- +SELECT 'distributed_foreground_insert=1'; +SET distributed_foreground_insert=1; + +CREATE TABLE local_00952 (date Date, value Date MATERIALIZED toDate('2017-08-01')) ENGINE = MergeTree(date, date, 8192); +CREATE TABLE distributed_00952 AS local_00952 ENGINE = Distributed('test_cluster_two_shards', currentDatabase(), local_00952, rand()); + +INSERT INTO distributed_00952 (date, value) VALUES ('2018-08-01', '2019-08-01'); + +SELECT * FROM distributed_00952; +SELECT date, value FROM distributed_00952; +SELECT * FROM local_00952; +SELECT date, value FROM local_00952; + +DROP TABLE distributed_00952; +DROP TABLE local_00952; diff --git a/parser/testdata/00952_part_frozen_info/ast.json b/parser/testdata/00952_part_frozen_info/ast.json new file mode 100644 index 000000000..3d7b5bf8a --- /dev/null +++ b/parser/testdata/00952_part_frozen_info/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery part_info (children 1)" + }, + { + "explain": " Identifier part_info" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001413268, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/00952_part_frozen_info/metadata.json b/parser/testdata/00952_part_frozen_info/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00952_part_frozen_info/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00952_part_frozen_info/query.sql b/parser/testdata/00952_part_frozen_info/query.sql new file mode 100644 index 000000000..cbdbf3cc8 --- /dev/null +++ b/parser/testdata/00952_part_frozen_info/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS part_info; +CREATE TABLE part_info (t DateTime) ENGINE = MergeTree PARTITION BY toDate(t) ORDER BY (t); +INSERT INTO part_info VALUES (toDateTime('1970-10-01 00:00:01')), (toDateTime('1970-10-02 00:00:01')), (toDateTime('1970-10-03 00:00:01')); +SELECT name, is_frozen FROM system.parts WHERE `database` = currentDatabase() AND `table` = 'part_info'; +SELECT 'freeze one'; +ALTER TABLE part_info FREEZE PARTITION '1970-10-02'; +SELECT name, is_frozen FROM system.parts WHERE `database` = currentDatabase() AND `table` = 'part_info'; +SELECT 'freeze all'; +ALTER TABLE part_info FREEZE; +SELECT name, is_frozen FROM system.parts WHERE `database` = currentDatabase() AND `table` = 'part_info'; +INSERT INTO part_info VALUES (toDateTime('1970-10-02 00:00:02')); +select * from part_info order by t; +SELECT name, is_frozen FROM system.parts WHERE `database` = currentDatabase() AND `table` = 'part_info'; +DROP TABLE part_info; diff --git a/parser/testdata/00953_moving_functions/ast.json b/parser/testdata/00953_moving_functions/ast.json new file mode 100644 index 000000000..f0ef33499 --- /dev/null +++ b/parser/testdata/00953_moving_functions/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery moving_sum_num (children 1)" + }, + { + "explain": " Identifier moving_sum_num" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001865886, + "rows_read": 2, + "bytes_read": 80 + } +} diff --git a/parser/testdata/00953_moving_functions/metadata.json b/parser/testdata/00953_moving_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00953_moving_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00953_moving_functions/query.sql b/parser/testdata/00953_moving_functions/query.sql new file mode 100644 index 000000000..b7178d636 --- /dev/null +++ b/parser/testdata/00953_moving_functions/query.sql @@ -0,0 +1,48 @@ +DROP TABLE IF EXISTS moving_sum_num; +DROP TABLE IF EXISTS moving_sum_dec; + +CREATE TABLE moving_sum_num ( + k String, + dt DateTime, + v UInt64 +) +ENGINE = MergeTree ORDER BY (k, dt); + +INSERT INTO moving_sum_num + SELECT 'b' k, toDateTime('2001-02-03 00:00:00')+number as dt, number as v + FROM system.numbers + LIMIT 5 + UNION ALL + SELECT 'a' k, toDateTime('2001-02-03 00:00:00')+number as dt, number as v + FROM system.numbers + LIMIT 5; + +INSERT INTO moving_sum_num + SELECT 'b' k, toDateTime('2001-02-03 01:00:00')+number as dt, 5+number as v + FROM system.numbers + LIMIT 5; + +SELECT * FROM moving_sum_num ORDER BY k,dt FORMAT TabSeparatedWithNames; + +-- Result of function 'groupArrayMovingSum' depends on the order of merging +-- aggregate states which is implementation defined in external aggregation. +SET max_bytes_before_external_group_by = 0; +SET max_bytes_ratio_before_external_group_by = 0; + +SELECT k, groupArrayMovingSum(v) FROM (SELECT * FROM moving_sum_num ORDER BY k, dt) GROUP BY k ORDER BY k FORMAT TabSeparatedWithNamesAndTypes; +SELECT k, groupArrayMovingSum(3)(v) FROM (SELECT * FROM moving_sum_num ORDER BY k, dt) GROUP BY k ORDER BY k FORMAT TabSeparatedWithNamesAndTypes; + +SELECT k, groupArrayMovingAvg(v) FROM (SELECT * FROM moving_sum_num ORDER BY k, dt) GROUP BY k ORDER BY k FORMAT TabSeparatedWithNamesAndTypes; +SELECT k, groupArrayMovingAvg(3)(v) FROM (SELECT * FROM moving_sum_num ORDER BY k, dt) GROUP BY k ORDER BY k FORMAT TabSeparatedWithNamesAndTypes; + +CREATE TABLE moving_sum_dec ENGINE = Memory AS + SELECT k, dt, toDecimal64(v, 2) as v + FROM moving_sum_num; + +SELECT k, groupArrayMovingSum(v) FROM (SELECT * FROM moving_sum_num ORDER BY k, dt) GROUP BY k ORDER BY k FORMAT TabSeparatedWithNamesAndTypes; +SELECT k, groupArrayMovingSum(v) FROM (SELECT * FROM moving_sum_num ORDER BY k, dt) GROUP BY k ORDER BY k FORMAT TabSeparatedWithNamesAndTypes; + +DROP TABLE moving_sum_dec; +DROP TABLE moving_sum_num; + + diff --git a/parser/testdata/00954_resample_combinator/ast.json b/parser/testdata/00954_resample_combinator/ast.json new file mode 100644 index 000000000..49e3ace1c --- /dev/null +++ b/parser/testdata/00954_resample_combinator/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayReduce (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal 'sumResample(1, 7, 1)'" + }, + { + "explain": " Literal Array_[UInt64_10, UInt64_11, UInt64_12, UInt64_13, UInt64_14, UInt64_15, UInt64_16, UInt64_17, UInt64_18, UInt64_19]" + }, + { + "explain": " Literal Array_[UInt64_0, UInt64_1, UInt64_2, UInt64_3, UInt64_4, UInt64_5, UInt64_6, UInt64_7, UInt64_8, UInt64_9]" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001578338, + "rows_read": 9, + "bytes_read": 544 + } +} diff --git a/parser/testdata/00954_resample_combinator/metadata.json b/parser/testdata/00954_resample_combinator/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00954_resample_combinator/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00954_resample_combinator/query.sql b/parser/testdata/00954_resample_combinator/query.sql new file mode 100644 index 000000000..6ec36072c --- /dev/null +++ b/parser/testdata/00954_resample_combinator/query.sql @@ -0,0 +1,16 @@ +select arrayReduce('sumResample(1, 7, 1)', [10, 11, 12, 13, 14, 15, 16, 17, 18, 19], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]); +select arrayReduce('sumResample(3, 8, 2)', [10, 11, 12, 13, 14, 15, 16, 17, 18, 19], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]); +select arrayReduce('sumResample(2, 9, 3)', [10, 11, 12, 13, 14, 15, 16, 17, 18, 19], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]); +select arrayReduce('sumResample(2, 9, 3)', [10, 11, 12, 13, 14, 15, 16, 17, 18, 19], [-0, 1, 2, 3, 4, 5, 6, 7, 8, 9]); +select arrayReduce('stddevPopResample(1, 7, 1)', [10, 11, 12, 13, 14, 15, 16, 17, 18, 19], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]); +select arrayReduce('stddevPopResample(3, 8, 2)', [10, 11, 12, 13, 14, 15, 16, 17, 18, 19], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]); +select arrayReduce('stddevPopResample(2, 9, 3)', [10, 11, 12, 13, 14, 15, 16, 17, 18, 19], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]); +select arrayReduce('stddevPopResample(2, 9, 3)', [10, 11, 12, 13, 14, 15, 16, 17, 18, 19], [-0, 1, 2, 3, 4, 5, 6, 7, 8, 9]); +select arrayReduce('groupArrayResample(1, 7, 1)', [10, 11, 12, 13, 14, 15, 16, 17, 18, 19], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]); +select arrayReduce('groupArrayResample(3, 8, 2)', [10, 11, 12, 13, 14, 15, 16, 17, 18, 19], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]); +select arrayReduce('groupArrayResample(2, 9, 3)', [10, 11, 12, 13, 14, 15, 16, 17, 18, 19], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]); +select arrayReduce('groupArrayResample(2, 9, 3)', [10, 11, 12, 13, 14, 15, 16, 17, 18, 19], [-0, 1, 2, 3, 4, 5, 6, 7, 8, 9]); +select arrayReduce('uniqResample(1, 7, 1)', [10, 11, 12, 13, 14, 15, 16, 17, 18, 19], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]); +select arrayReduce('uniqResample(3, 8, 2)', [10, 11, 12, 13, 14, 15, 16, 17, 18, 19], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]); +select arrayReduce('uniqResample(2, 9, 3)', [10, 11, 12, 13, 14, 15, 16, 17, 18, 19], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]); +select arrayReduce('uniqResample(2, 9, 3)', [10, 11, 12, 13, 14, 15, 16, 17, 18, 19], [-0, 1, 2, 3, 4, 5, 6, 7, 8, 9]); diff --git a/parser/testdata/00955_test_final_mark/ast.json b/parser/testdata/00955_test_final_mark/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00955_test_final_mark/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00955_test_final_mark/metadata.json b/parser/testdata/00955_test_final_mark/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00955_test_final_mark/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00955_test_final_mark/query.sql b/parser/testdata/00955_test_final_mark/query.sql new file mode 100644 index 000000000..f29c61eef --- /dev/null +++ b/parser/testdata/00955_test_final_mark/query.sql @@ -0,0 +1,164 @@ +-- Tags: no-random-merge-tree-settings + +SET send_logs_level = 'fatal'; + +DROP TABLE IF EXISTS mt_with_pk; + +CREATE TABLE mt_with_pk ( + d Date DEFAULT '2000-01-01', + x DateTime, + y Array(UInt64), + z UInt64, + n Nested (Age UInt8, Name String), + w Int16 DEFAULT 10 +) ENGINE = MergeTree() +PARTITION BY toYYYYMM(d) ORDER BY (x, z) SETTINGS index_granularity_bytes=10000; -- write_final_mark=1 by default + +SELECT '===test insert==='; + +INSERT INTO mt_with_pk (d, x, y, z, `n.Age`, `n.Name`) VALUES (toDate('2018-10-01'), toDateTime('2018-10-01 12:57:57'), [1, 1, 1], 11, [77], ['Joe']), (toDate('2018-10-01'), toDateTime('2018-10-01 16:57:57'), [2, 2, 2], 12, [88], ['Mark']), (toDate('2018-10-01'), toDateTime('2018-10-01 19:57:57'), [3, 3, 3], 13, [99], ['Robert']); + +SELECT COUNT(*) FROM mt_with_pk WHERE x > toDateTime('2018-10-01 23:57:57'); + +SELECT sum(marks) FROM system.parts WHERE table = 'mt_with_pk' AND database = currentDatabase() AND active=1 AND database = currentDatabase(); + +SELECT '===test merge==='; +INSERT INTO mt_with_pk (d, x, y, z, `n.Age`, `n.Name`) VALUES (toDate('2018-10-01'), toDateTime('2018-10-01 07:57:57'), [4, 4, 4], 14, [111, 222], ['Lui', 'Dave']), (toDate('2018-10-01'), toDateTime('2018-10-01 08:57:57'), [5, 5, 5], 15, [333, 444], ['John', 'Mike']), (toDate('2018-10-01'), toDateTime('2018-10-01 09:57:57'), [6, 6, 6], 16, [555, 666, 777], ['Alex', 'Jim', 'Tom']); + +OPTIMIZE TABLE mt_with_pk FINAL; + +SELECT COUNT(*) FROM mt_with_pk WHERE x > toDateTime('2018-10-01 23:57:57'); + +SELECT sum(marks) FROM system.parts WHERE table = 'mt_with_pk' AND database = currentDatabase() AND active=1 AND database = currentDatabase(); + +SELECT '===test alter==='; +ALTER TABLE mt_with_pk MODIFY COLUMN y Array(String); + +INSERT INTO mt_with_pk (d, x, y, z, `n.Age`, `n.Name`) VALUES (toDate('2018-10-01'), toDateTime('2018-10-01 05:57:57'), ['a', 'a', 'a'], 14, [888, 999], ['Jack', 'Elvis']); + +OPTIMIZE TABLE mt_with_pk FINAL; + +SELECT COUNT(*) FROM mt_with_pk WHERE x > toDateTime('2018-10-01 23:57:57'); + +SELECT sum(marks) FROM system.parts WHERE table = 'mt_with_pk' AND database = currentDatabase() AND active=1 AND database = currentDatabase(); + +SELECT '===test mutation==='; +ALTER TABLE mt_with_pk UPDATE w = 0 WHERE 1 SETTINGS mutations_sync = 2; +ALTER TABLE mt_with_pk UPDATE y = ['q', 'q', 'q'] WHERE 1 SETTINGS mutations_sync = 2; + +SELECT sum(w) FROM mt_with_pk; +SELECT distinct(y) FROM mt_with_pk; + +OPTIMIZE TABLE mt_with_pk FINAL; + +SELECT '===test skip_idx==='; +ALTER TABLE mt_with_pk ADD INDEX idx1 z + w TYPE minmax GRANULARITY 1; + +INSERT INTO mt_with_pk (d, x, y, z, `n.Age`, `n.Name`, w) VALUES (toDate('2018-10-01'), toDateTime('2018-10-01 03:57:57'), ['z', 'z', 'z'], 15, [1111, 2222], ['Garry', 'Ron'], 1); + +OPTIMIZE TABLE mt_with_pk FINAL; + +SELECT COUNT(*) FROM mt_with_pk WHERE z + w > 5000; + +SELECT sum(marks) FROM system.parts WHERE table = 'mt_with_pk' AND database = currentDatabase() AND active=1 AND database = currentDatabase(); + +DROP TABLE IF EXISTS mt_with_pk; + +SELECT '===test alter attach==='; +DROP TABLE IF EXISTS alter_attach; + +CREATE TABLE alter_attach (x UInt64, p UInt8) ENGINE = MergeTree ORDER BY tuple() PARTITION BY p SETTINGS index_granularity_bytes=10000, write_final_mark=1; + +INSERT INTO alter_attach VALUES (1, 1), (2, 1), (3, 1); + +ALTER TABLE alter_attach DETACH PARTITION 1; + +ALTER TABLE alter_attach ADD COLUMN s String; +INSERT INTO alter_attach VALUES (4, 2, 'Hello'), (5, 2, 'World'); + +ALTER TABLE alter_attach ATTACH PARTITION 1; +SELECT * FROM alter_attach ORDER BY x; + +ALTER TABLE alter_attach DETACH PARTITION 2; +ALTER TABLE alter_attach DROP COLUMN s; +INSERT INTO alter_attach VALUES (6, 3), (7, 3); + +ALTER TABLE alter_attach ATTACH PARTITION 2; +SELECT * FROM alter_attach ORDER BY x; + +DROP TABLE IF EXISTS alter_attach; +DROP TABLE IF EXISTS mt_with_pk; + +SELECT '===test alter update==='; +DROP TABLE IF EXISTS alter_update_00806; + +CREATE TABLE alter_update_00806 (d Date, e Enum8('foo'=1, 'bar'=2)) Engine = MergeTree PARTITION BY d ORDER BY (d) SETTINGS index_granularity_bytes=10000, write_final_mark=1; + +INSERT INTO alter_update_00806 (d, e) VALUES ('2018-01-01', 'foo'); +INSERT INTO alter_update_00806 (d, e) VALUES ('2018-01-02', 'bar'); + +ALTER TABLE alter_update_00806 UPDATE e = CAST('foo', 'Enum8(\'foo\' = 1, \'bar\' = 2)') WHERE d='2018-01-02' SETTINGS mutations_sync = 2; + +SELECT e FROM alter_update_00806 ORDER BY d; + +DROP TABLE IF EXISTS alter_update_00806; + +SELECT '===test no pk==='; + +DROP TABLE IF EXISTS mt_without_pk; + +CREATE TABLE mt_without_pk ( + d Date DEFAULT '2000-01-01', + x DateTime, + y Array(UInt64), + z UInt64, + n Nested (Age UInt8, Name String), + w Int16 DEFAULT 10 +) ENGINE = MergeTree() +PARTITION BY toYYYYMM(d) ORDER BY tuple() SETTINGS index_granularity_bytes=10000, write_final_mark=1; + +INSERT INTO mt_without_pk (d, x, y, z, `n.Age`, `n.Name`) VALUES (toDate('2018-10-01'), toDateTime('2018-10-01 12:57:57'), [1, 1, 1], 11, [77], ['Joe']), (toDate('2018-10-01'), toDateTime('2018-10-01 16:57:57'), [2, 2, 2], 12, [88], ['Mark']), (toDate('2018-10-01'), toDateTime('2018-10-01 19:57:57'), [3, 3, 3], 13, [99], ['Robert']); + +SELECT COUNT(*) FROM mt_without_pk WHERE x > toDateTime('2018-10-01 23:57:57'); + +SELECT sum(marks) FROM system.parts WHERE table = 'mt_without_pk' AND active=1 AND database = currentDatabase(); + +INSERT INTO mt_without_pk (d, x, y, z, `n.Age`, `n.Name`) VALUES (toDate('2018-10-01'), toDateTime('2018-10-01 07:57:57'), [4, 4, 4], 14, [111, 222], ['Lui', 'Dave']), (toDate('2018-10-01'), toDateTime('2018-10-01 08:57:57'), [5, 5, 5], 15, [333, 444], ['John', 'Mike']), (toDate('2018-10-01'), toDateTime('2018-10-01 09:57:57'), [6, 6, 6], 16, [555, 666, 777], ['Alex', 'Jim', 'Tom']); + +OPTIMIZE TABLE mt_without_pk FINAL; + +SELECT COUNT(*) FROM mt_without_pk WHERE x > toDateTime('2018-10-01 23:57:57'); + +SELECT sum(marks) FROM system.parts WHERE table = 'mt_without_pk' AND active=1 AND database = currentDatabase(); + +DROP TABLE IF EXISTS mt_without_pk; + +SELECT '===test a lot of marks==='; + +DROP TABLE IF EXISTS mt_with_small_granularity; + +CREATE TABLE mt_with_small_granularity ( + d Date DEFAULT '2000-01-01', + x DateTime, + y Array(UInt64), + z UInt64, + n Nested (Age UInt8, Name String), + w Int16 DEFAULT 10 +) ENGINE = MergeTree() +PARTITION BY toYYYYMM(d) ORDER BY (x, z) SETTINGS index_granularity_bytes=30, min_index_granularity_bytes=20, write_final_mark=1; + +INSERT INTO mt_with_small_granularity (d, x, y, z, `n.Age`, `n.Name`) VALUES (toDate('2018-10-01'), toDateTime('2018-10-01 12:57:57'), [1, 1, 1], 11, [77], ['Joe']), (toDate('2018-10-01'), toDateTime('2018-10-01 16:57:57'), [2, 2, 2], 12, [88], ['Mark']), (toDate('2018-10-01'), toDateTime('2018-10-01 19:57:57'), [3, 3, 3], 13, [99], ['Robert']); + +SELECT COUNT(*) FROM mt_with_small_granularity WHERE x > toDateTime('2018-10-01 23:57:57'); + +SELECT sum(marks) FROM system.parts WHERE table = 'mt_with_small_granularity' AND active=1 AND database = currentDatabase(); + +INSERT INTO mt_with_small_granularity (d, x, y, z, `n.Age`, `n.Name`) VALUES (toDate('2018-10-01'), toDateTime('2018-10-01 07:57:57'), [4, 4, 4], 14, [111, 222], ['Lui', 'Dave']), (toDate('2018-10-01'), toDateTime('2018-10-01 08:57:57'), [5, 5, 5], 15, [333, 444], ['John', 'Mike']), (toDate('2018-10-01'), toDateTime('2018-10-01 09:57:57'), [6, 6, 6], 16, [555, 666, 777], ['Alex', 'Jim', 'Tom']); + +OPTIMIZE TABLE mt_with_small_granularity FINAL; + +SELECT COUNT(*) FROM mt_with_small_granularity WHERE x > toDateTime('2018-10-01 23:57:57'); + +SELECT sum(marks) FROM system.parts WHERE table = 'mt_with_small_granularity' AND active=1 AND database = currentDatabase(); + +DROP TABLE IF EXISTS mt_with_small_granularity; diff --git a/parser/testdata/00956_join_use_nulls_with_array_column/ast.json b/parser/testdata/00956_join_use_nulls_with_array_column/ast.json new file mode 100644 index 000000000..c782a28e8 --- /dev/null +++ b/parser/testdata/00956_join_use_nulls_with_array_column/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00130326, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00956_join_use_nulls_with_array_column/metadata.json b/parser/testdata/00956_join_use_nulls_with_array_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00956_join_use_nulls_with_array_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00956_join_use_nulls_with_array_column/query.sql b/parser/testdata/00956_join_use_nulls_with_array_column/query.sql new file mode 100644 index 000000000..f70bccd68 --- /dev/null +++ b/parser/testdata/00956_join_use_nulls_with_array_column/query.sql @@ -0,0 +1,3 @@ +SET join_use_nulls = 1; +SELECT number FROM system.numbers SEMI LEFT JOIN (SELECT number, ['test'] FROM system.numbers LIMIT 1) js2 USING (number) LIMIT 1; +SELECT number FROM system.numbers ANY LEFT JOIN (SELECT number, ['test'] FROM system.numbers LIMIT 1) js2 USING (number) LIMIT 1; diff --git a/parser/testdata/00957_coalesce_const_nullable_crash/ast.json b/parser/testdata/00957_coalesce_const_nullable_crash/ast.json new file mode 100644 index 000000000..1adbd161a --- /dev/null +++ b/parser/testdata/00957_coalesce_const_nullable_crash/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function coalesce (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toNullable (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function toNullable (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001549765, + "rows_read": 15, + "bytes_read": 591 + } +} diff --git a/parser/testdata/00957_coalesce_const_nullable_crash/metadata.json b/parser/testdata/00957_coalesce_const_nullable_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00957_coalesce_const_nullable_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00957_coalesce_const_nullable_crash/query.sql b/parser/testdata/00957_coalesce_const_nullable_crash/query.sql new file mode 100644 index 000000000..39e148c57 --- /dev/null +++ b/parser/testdata/00957_coalesce_const_nullable_crash/query.sql @@ -0,0 +1,13 @@ +SELECT coalesce(toNullable(1), toNullable(2)) as x, toTypeName(x); +SELECT coalesce(NULL, toNullable(2)) as x, toTypeName(x); +SELECT coalesce(toNullable(1), NULL) as x, toTypeName(x); +SELECT coalesce(NULL, NULL) as x, toTypeName(x); + +SELECT coalesce(toNullable(materialize(1)), toNullable(materialize(2))) as x, toTypeName(x); +SELECT coalesce(NULL, toNullable(materialize(2))) as x, toTypeName(x); +SELECT coalesce(toNullable(materialize(1)), NULL) as x, toTypeName(x); +SELECT coalesce(materialize(NULL), materialize(NULL)) as x, toTypeName(x); + +SELECT coalesce(toLowCardinality(toNullable(1)), toLowCardinality(toNullable(2))) as x, toTypeName(x); +SELECT coalesce(NULL, toLowCardinality(toNullable(2))) as x, toTypeName(x); +SELECT coalesce(toLowCardinality(toNullable(1)), NULL) as x, toTypeName(x); diff --git a/parser/testdata/00957_delta_diff_bug/ast.json b/parser/testdata/00957_delta_diff_bug/ast.json new file mode 100644 index 000000000..f0aba1890 --- /dev/null +++ b/parser/testdata/00957_delta_diff_bug/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001463056, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00957_delta_diff_bug/metadata.json b/parser/testdata/00957_delta_diff_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00957_delta_diff_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00957_delta_diff_bug/query.sql b/parser/testdata/00957_delta_diff_bug/query.sql new file mode 100644 index 000000000..86584d3cd --- /dev/null +++ b/parser/testdata/00957_delta_diff_bug/query.sql @@ -0,0 +1,11 @@ +SET allow_suspicious_codecs = 1; + +DROP TABLE IF EXISTS segfault_table; + +CREATE TABLE segfault_table (id UInt16 CODEC(Delta(2))) ENGINE MergeTree() order by tuple(); + +INSERT INTO segfault_table VALUES (1111), (2222); + +SELECT * FROM segfault_table; + +DROP TABLE IF EXISTS segfault_table; diff --git a/parser/testdata/00957_neighbor/ast.json b/parser/testdata/00957_neighbor/ast.json new file mode 100644 index 000000000..2e93b5fe2 --- /dev/null +++ b/parser/testdata/00957_neighbor/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001352728, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00957_neighbor/metadata.json b/parser/testdata/00957_neighbor/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00957_neighbor/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00957_neighbor/query.sql b/parser/testdata/00957_neighbor/query.sql new file mode 100644 index 000000000..ee71b9626 --- /dev/null +++ b/parser/testdata/00957_neighbor/query.sql @@ -0,0 +1,43 @@ +SET allow_deprecated_error_prone_window_functions = 1; +-- no arguments +select neighbor(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +-- single argument +select neighbor(1); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +-- greater than 3 arguments +select neighbor(1,2,3,4); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +-- bad default value +select neighbor(dummy, 1, 'hello'); -- { serverError NO_COMMON_TYPE } +-- types without common supertype (UInt64 and Int8) +select number, neighbor(number, 1, -10) from numbers(3); -- { serverError NO_COMMON_TYPE } +-- nullable offset is not allowed +select number, if(number > 1, number, null) as offset, neighbor(number, offset) from numbers(3); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select 'Zero offset'; +select number, neighbor(number, 0) from numbers(3); +select 'Nullable values'; +select if(number > 1, number, null) as value, number as offset, neighbor(value, offset) as neighbor from numbers(3); +select 'Result with different type'; +select toInt32(number) as n, neighbor(n, 1, -10) from numbers(3); +select 'Offset > block'; +select number, neighbor(number, 10) from numbers(3); +select 'Abs(Offset) > block'; +select number, neighbor(number, -10) from numbers(3); +select 'Positive offset'; +select number, neighbor(number, 1) from numbers(3); +select 'Negative offset'; +select number, neighbor(number, 1) from numbers(3); +select 'Positive offset with defaults'; +select number, neighbor(number, 2, number + 10) from numbers(4); +select 'Negative offset with defaults'; +select number, neighbor(number, -2, number + 10) from numbers(4); +select 'Positive offset with const defaults'; +select number, neighbor(number, 1, 1000) from numbers(3); +select 'Negative offset with const defaults'; +select number, neighbor(number, -1, 1000) from numbers(3); +select 'Dynamic column and offset, out of bounds'; +select number, number * 2 as offset, neighbor(number, offset, number * 10) from numbers(4); +select 'Dynamic column and offset, negative'; +select number, -number * 2 as offset, neighbor(number, offset, number * 10) from numbers(6); +select 'Dynamic column and offset, without defaults'; +select number, -(number - 2) * 2 as offset, neighbor(number, offset) from numbers(6); +select 'Constant column'; +select number, neighbor(1000, 10) from numbers(3); diff --git a/parser/testdata/00960_eval_ml_method_const/ast.json b/parser/testdata/00960_eval_ml_method_const/ast.json new file mode 100644 index 000000000..d94ab251d --- /dev/null +++ b/parser/testdata/00960_eval_ml_method_const/ast.json @@ -0,0 +1,94 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Subquery (alias model) (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function stochasticLinearRegressionState (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function evalMLMethod (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Identifier model" + }, + { + "explain": " Function toFloat64 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function toFloat64 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 24, + + "statistics": + { + "elapsed": 0.001439231, + "rows_read": 24, + "bytes_read": 977 + } +} diff --git a/parser/testdata/00960_eval_ml_method_const/metadata.json b/parser/testdata/00960_eval_ml_method_const/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00960_eval_ml_method_const/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00960_eval_ml_method_const/query.sql b/parser/testdata/00960_eval_ml_method_const/query.sql new file mode 100644 index 000000000..401c83af9 --- /dev/null +++ b/parser/testdata/00960_eval_ml_method_const/query.sql @@ -0,0 +1 @@ +WITH (SELECT stochasticLinearRegressionState(1, 2, 3)) AS model SELECT evalMLMethod(model, toFloat64(1), toFloat64(1)); diff --git a/parser/testdata/00961_check_table/ast.json b/parser/testdata/00961_check_table/ast.json new file mode 100644 index 000000000..94b1e5e43 --- /dev/null +++ b/parser/testdata/00961_check_table/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001453106, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00961_check_table/metadata.json b/parser/testdata/00961_check_table/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00961_check_table/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00961_check_table/query.sql b/parser/testdata/00961_check_table/query.sql new file mode 100644 index 000000000..fc3c54356 --- /dev/null +++ b/parser/testdata/00961_check_table/query.sql @@ -0,0 +1,44 @@ +SET check_query_single_value_result = 0; +DROP TABLE IF EXISTS mt_table; + +CREATE TABLE mt_table (d Date, key UInt64, data String) ENGINE = MergeTree() PARTITION BY toYYYYMM(d) ORDER BY key; + +CHECK TABLE mt_table SETTINGS max_threads = 1; + +INSERT INTO mt_table VALUES (toDate('2018-01-01'), 1, 'old'); + +INSERT INTO mt_table VALUES (toDate('2019-01-02'), 1, 'Hello'), (toDate('2019-01-02'), 2, 'World'); + +CHECK TABLE mt_table SETTINGS max_threads = 1; + +INSERT INTO mt_table VALUES (toDate('2019-01-02'), 3, 'quick'), (toDate('2019-01-02'), 4, 'brown'); + +SELECT '========'; + +CHECK TABLE mt_table SETTINGS max_threads = 1; + +OPTIMIZE TABLE mt_table FINAL; + +SELECT '========'; + +CHECK TABLE mt_table SETTINGS max_threads = 1; + +SELECT '========'; + +INSERT INTO mt_table VALUES (toDate('2019-02-03'), 5, '!'), (toDate('2019-02-03'), 6, '?'); + +CHECK TABLE mt_table SETTINGS max_threads = 1; + +SELECT '========'; + +INSERT INTO mt_table VALUES (toDate('2019-02-03'), 7, 'jump'), (toDate('2019-02-03'), 8, 'around'); + +OPTIMIZE TABLE mt_table FINAL; + +CHECK TABLE mt_table PARTITION 201902 SETTINGS max_threads = 1; + +SELECT '========'; + +CHECK TABLE mt_table PART '201801_1_1_2'; + +DROP TABLE IF EXISTS mt_table; diff --git a/parser/testdata/00961_checksums_in_system_parts_columns_table/ast.json b/parser/testdata/00961_checksums_in_system_parts_columns_table/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00961_checksums_in_system_parts_columns_table/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00961_checksums_in_system_parts_columns_table/metadata.json b/parser/testdata/00961_checksums_in_system_parts_columns_table/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00961_checksums_in_system_parts_columns_table/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00961_checksums_in_system_parts_columns_table/query.sql b/parser/testdata/00961_checksums_in_system_parts_columns_table/query.sql new file mode 100644 index 000000000..785703ad7 --- /dev/null +++ b/parser/testdata/00961_checksums_in_system_parts_columns_table/query.sql @@ -0,0 +1,28 @@ +-- Tags: no-random-merge-tree-settings + +DROP TABLE IF EXISTS test_00961; + +CREATE TABLE test_00961 (d Date, a String, b UInt8, x String, y Int8, z UInt32) + ENGINE = MergeTree PARTITION BY d ORDER BY (a, b) + SETTINGS index_granularity = 111, + min_bytes_for_wide_part = 0, + compress_marks = 0, + compress_primary_key = 0, + index_granularity_bytes = '10Mi', + ratio_of_defaults_for_sparse_serialization = 1, + serialization_info_version = 'basic', + replace_long_file_name_to_hash = 0, + auto_statistics_types = ''; + +INSERT INTO test_00961 VALUES ('2000-01-01', 'Hello, world!', 123, 'xxx yyy', -123, 123456789); + +SELECT + name, + table, + hash_of_all_files, + hash_of_uncompressed_files, + uncompressed_hash_of_compressed_files +FROM system.parts +WHERE table = 'test_00961' and database = currentDatabase(); + +DROP TABLE test_00961; diff --git a/parser/testdata/00961_visit_param_buffer_underflow/ast.json b/parser/testdata/00961_visit_param_buffer_underflow/ast.json new file mode 100644 index 000000000..eeb73c9c4 --- /dev/null +++ b/parser/testdata/00961_visit_param_buffer_underflow/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function visitParamExtractRaw (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '\"a\":'" + }, + { + "explain": " Literal 'a'" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001290653, + "rows_read": 8, + "bytes_read": 296 + } +} diff --git a/parser/testdata/00961_visit_param_buffer_underflow/metadata.json b/parser/testdata/00961_visit_param_buffer_underflow/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00961_visit_param_buffer_underflow/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00961_visit_param_buffer_underflow/query.sql b/parser/testdata/00961_visit_param_buffer_underflow/query.sql new file mode 100644 index 000000000..92b7501d7 --- /dev/null +++ b/parser/testdata/00961_visit_param_buffer_underflow/query.sql @@ -0,0 +1 @@ +SELECT visitParamExtractRaw('\"a\":', 'a'); diff --git a/parser/testdata/00962_enumNotExect/ast.json b/parser/testdata/00962_enumNotExect/ast.json new file mode 100644 index 000000000..4f267e001 --- /dev/null +++ b/parser/testdata/00962_enumNotExect/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_enum8 (children 1)" + }, + { + "explain": " Identifier t_enum8" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001371926, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/00962_enumNotExect/metadata.json b/parser/testdata/00962_enumNotExect/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00962_enumNotExect/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00962_enumNotExect/query.sql b/parser/testdata/00962_enumNotExect/query.sql new file mode 100644 index 000000000..3431f238a --- /dev/null +++ b/parser/testdata/00962_enumNotExect/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS t_enum8; +CREATE TABLE t_enum8( x Enum('hello' = 1, 'world' = 2) ) ENGINE = TinyLog; +INSERT INTO t_enum8 Values('hello'),('world'),('hello'); +SELECT * FROM t_enum8; +SELECT CAST(x, 'Int8') FROM t_enum8; +DROP TABLE t_enum8; +DROP TABLE IF EXISTS t_enum16; +CREATE TABLE t_enum16( x Enum('hello' = 1, 'world' = 128) ) ENGINE = TinyLog; +INSERT INTO t_enum16 Values('hello'),('world'),('hello'); +SELECT * FROM t_enum16; +SELECT CAST(x, 'Int16') FROM t_enum16; +DROP TABLE t_enum16; +SELECT toTypeName(CAST('a', 'Enum(\'a\' = 2, \'b\' = 128)')); +SELECT toTypeName(CAST('a', 'Enum(\'a\' = 2, \'b\' = 127)')); diff --git a/parser/testdata/00962_visit_param_various/ast.json b/parser/testdata/00962_visit_param_various/ast.json new file mode 100644 index 000000000..37136384e --- /dev/null +++ b/parser/testdata/00962_visit_param_various/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function visitParamExtractUInt (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '\"a\":123'" + }, + { + "explain": " Literal 'a'" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001544707, + "rows_read": 8, + "bytes_read": 300 + } +} diff --git a/parser/testdata/00962_visit_param_various/metadata.json b/parser/testdata/00962_visit_param_various/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00962_visit_param_various/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00962_visit_param_various/query.sql b/parser/testdata/00962_visit_param_various/query.sql new file mode 100644 index 000000000..d65cb88c3 --- /dev/null +++ b/parser/testdata/00962_visit_param_various/query.sql @@ -0,0 +1,5 @@ +SELECT visitParamExtractUInt('"a":123', 'a'); +SELECT visitParamExtractString('"a":"Hello"', 'a'); +SELECT visitParamExtractRaw('"a":Hello}', 'a'); + +SELECT sum(ignore(visitParamExtractRaw(concat('{"a":', reinterpretAsString(rand64())), 'a'))) FROM numbers(1000000); diff --git a/parser/testdata/00963_achimbab/ast.json b/parser/testdata/00963_achimbab/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00963_achimbab/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00963_achimbab/metadata.json b/parser/testdata/00963_achimbab/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00963_achimbab/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00963_achimbab/query.sql b/parser/testdata/00963_achimbab/query.sql new file mode 100644 index 000000000..60fd1911e --- /dev/null +++ b/parser/testdata/00963_achimbab/query.sql @@ -0,0 +1,20 @@ +-- Tags: no-parallel, long + +SET output_format_write_statistics = 0; + +select + sum(cnt) > 0 as total, + k[1], k[2] + from + ( + select + arrayMap( x -> x % 3 ? toNullable(number%5 + x) : null, range(3)) as k, + number % 4 ? toNullable( rand() ) : Null as cnt + from system.numbers_mt + where number < 1000000 + limit 1000000 + ) +group by k with totals +order by k[2] +SETTINGS max_threads = 100, max_execution_time = 120 +format JSON; diff --git a/parser/testdata/00963_startsWith_force_primary_key/ast.json b/parser/testdata/00963_startsWith_force_primary_key/ast.json new file mode 100644 index 000000000..94339ac4d --- /dev/null +++ b/parser/testdata/00963_startsWith_force_primary_key/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_startsWith (children 1)" + }, + { + "explain": " Identifier test_startsWith" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001263212, + "rows_read": 2, + "bytes_read": 82 + } +} diff --git a/parser/testdata/00963_startsWith_force_primary_key/metadata.json b/parser/testdata/00963_startsWith_force_primary_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00963_startsWith_force_primary_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00963_startsWith_force_primary_key/query.sql b/parser/testdata/00963_startsWith_force_primary_key/query.sql new file mode 100644 index 000000000..b3895a93b --- /dev/null +++ b/parser/testdata/00963_startsWith_force_primary_key/query.sql @@ -0,0 +1,6 @@ +DROP TABLE IF EXISTS test_startsWith; +CREATE TABLE test_startsWith (a String) Engine = MergeTree PARTITION BY tuple() ORDER BY a; +INSERT INTO test_startsWith (a) values ('a'), ('abcd'), ('bbb'), (''), ('abc'); +SELECT count() from test_startsWith where startsWith(a, 'a') settings force_primary_key=1; +SELECT count() from test_startsWith where startsWith(a, 'abc') settings force_primary_key=1; +DROP TABLE test_startsWith; diff --git a/parser/testdata/00964_os_thread_priority/ast.json b/parser/testdata/00964_os_thread_priority/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00964_os_thread_priority/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00964_os_thread_priority/metadata.json b/parser/testdata/00964_os_thread_priority/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00964_os_thread_priority/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00964_os_thread_priority/query.sql b/parser/testdata/00964_os_thread_priority/query.sql new file mode 100644 index 000000000..a1ef8c5e0 --- /dev/null +++ b/parser/testdata/00964_os_thread_priority/query.sql @@ -0,0 +1,3 @@ +-- the setting exists and server does not crash +SET os_threads_nice_value_query = 10; +SELECT count() FROM numbers(1000); diff --git a/parser/testdata/00965_shard_unresolvable_addresses/ast.json b/parser/testdata/00965_shard_unresolvable_addresses/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00965_shard_unresolvable_addresses/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00965_shard_unresolvable_addresses/metadata.json b/parser/testdata/00965_shard_unresolvable_addresses/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00965_shard_unresolvable_addresses/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00965_shard_unresolvable_addresses/query.sql b/parser/testdata/00965_shard_unresolvable_addresses/query.sql new file mode 100644 index 000000000..f2afb974a --- /dev/null +++ b/parser/testdata/00965_shard_unresolvable_addresses/query.sql @@ -0,0 +1,10 @@ +-- Tags: shard, no-fasttest +-- no-fasttest: Slow timeouts + +SET prefer_localhost_replica = 1; +SET connections_with_failover_max_tries=1; +SET connect_timeout_with_failover_ms=2000; +SET connect_timeout_with_failover_secure_ms=2000; + +SELECT count() FROM remote('127.0.0.1,localhos', system.one); -- { serverError ALL_CONNECTION_TRIES_FAILED } +SELECT count() FROM remote('127.0.0.1|localhos', system.one); diff --git a/parser/testdata/00966_invalid_json_must_not_parse/ast.json b/parser/testdata/00966_invalid_json_must_not_parse/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00966_invalid_json_must_not_parse/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00966_invalid_json_must_not_parse/metadata.json b/parser/testdata/00966_invalid_json_must_not_parse/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00966_invalid_json_must_not_parse/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00966_invalid_json_must_not_parse/query.sql b/parser/testdata/00966_invalid_json_must_not_parse/query.sql new file mode 100644 index 000000000..ca8d6ace6 --- /dev/null +++ b/parser/testdata/00966_invalid_json_must_not_parse/query.sql @@ -0,0 +1,18 @@ +-- Tags: no-fasttest + +SET allow_simdjson=1; + +SELECT JSONLength('"HX-='); +SELECT JSONLength('[9]\0\x42\xD3\x36\xE3'); +SELECT JSONLength(unhex('5B30000E06D7AA5D')); +SELECT JSONLength('{"success"test:"123"}'); +SELECT isValidJSON('{"success"test:"123"}'); + + +SET allow_simdjson=0; + +SELECT JSONLength('"HX-='); +SELECT JSONLength('[9]\0\x42\xD3\x36\xE3'); +SELECT JSONLength(unhex('5B30000E06D7AA5D')); +SELECT JSONLength('{"success"test:"123"}'); +SELECT isValidJSON('{"success"test:"123"}'); diff --git a/parser/testdata/00967_insert_into_distributed_different_types/ast.json b/parser/testdata/00967_insert_into_distributed_different_types/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00967_insert_into_distributed_different_types/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00967_insert_into_distributed_different_types/metadata.json b/parser/testdata/00967_insert_into_distributed_different_types/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00967_insert_into_distributed_different_types/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00967_insert_into_distributed_different_types/query.sql b/parser/testdata/00967_insert_into_distributed_different_types/query.sql new file mode 100644 index 000000000..35b8ddf00 --- /dev/null +++ b/parser/testdata/00967_insert_into_distributed_different_types/query.sql @@ -0,0 +1,18 @@ +-- Tags: distributed + +set distributed_foreground_insert=1; + +DROP TABLE IF EXISTS dist_00967; +DROP TABLE IF EXISTS underlying_00967; + +-- To suppress "Structure does not match (...), implicit conversion will be done." message +SET send_logs_level='error'; + +CREATE TABLE dist_00967 (key UInt64) Engine=Distributed('test_shard_localhost', currentDatabase(), underlying_00967); +CREATE TABLE underlying_00967 (key Nullable(UInt64)) Engine=TinyLog(); +INSERT INTO dist_00967 SELECT toUInt64(number) FROM system.numbers LIMIT 1; + +SELECT * FROM dist_00967; + +DROP TABLE dist_00967; +DROP TABLE underlying_00967; diff --git a/parser/testdata/00968_file_engine_in_subquery/ast.json b/parser/testdata/00968_file_engine_in_subquery/ast.json new file mode 100644 index 000000000..4265a76df --- /dev/null +++ b/parser/testdata/00968_file_engine_in_subquery/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tableFile_00968 (children 1)" + }, + { + "explain": " Identifier tableFile_00968" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001267468, + "rows_read": 2, + "bytes_read": 82 + } +} diff --git a/parser/testdata/00968_file_engine_in_subquery/metadata.json b/parser/testdata/00968_file_engine_in_subquery/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00968_file_engine_in_subquery/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00968_file_engine_in_subquery/query.sql b/parser/testdata/00968_file_engine_in_subquery/query.sql new file mode 100644 index 000000000..44df28d57 --- /dev/null +++ b/parser/testdata/00968_file_engine_in_subquery/query.sql @@ -0,0 +1,12 @@ +DROP TABLE IF EXISTS tableFile_00968; +DROP TABLE IF EXISTS tableMergeTree_00968; +CREATE TABLE tableFile_00968(number UInt64) ENGINE = File('TSV'); +CREATE TABLE tableMergeTree_00968(id UInt64) ENGINE = MergeTree() PARTITION BY id ORDER BY id; + +INSERT INTO tableFile_00968 SELECT number FROM system.numbers LIMIT 10; +INSERT INTO tableMergeTree_00968 SELECT number FROM system.numbers LIMIT 100; + +SELECT id FROM tableMergeTree_00968 WHERE id IN (SELECT number FROM tableFile_00968) ORDER BY id; + +DROP TABLE tableFile_00968; +DROP TABLE tableMergeTree_00968; diff --git a/parser/testdata/00968_roundAge/ast.json b/parser/testdata/00968_roundAge/ast.json new file mode 100644 index 000000000..aa25a0662 --- /dev/null +++ b/parser/testdata/00968_roundAge/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function roundAge (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_0" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001424376, + "rows_read": 7, + "bytes_read": 261 + } +} diff --git a/parser/testdata/00968_roundAge/metadata.json b/parser/testdata/00968_roundAge/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00968_roundAge/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00968_roundAge/query.sql b/parser/testdata/00968_roundAge/query.sql new file mode 100644 index 000000000..c8e5a5579 --- /dev/null +++ b/parser/testdata/00968_roundAge/query.sql @@ -0,0 +1,7 @@ +SELECT roundAge(0); +SELECT roundAge(18); +SELECT roundAge(25); +SELECT roundAge(35); +SELECT roundAge(45); +SELECT roundAge(55); +SELECT roundAge(56); \ No newline at end of file diff --git a/parser/testdata/00969_columns_clause/ast.json b/parser/testdata/00969_columns_clause/ast.json new file mode 100644 index 000000000..d82368938 --- /dev/null +++ b/parser/testdata/00969_columns_clause/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery ColumnsClauseTest (children 1)" + }, + { + "explain": " Identifier ColumnsClauseTest" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001473366, + "rows_read": 2, + "bytes_read": 86 + } +} diff --git a/parser/testdata/00969_columns_clause/metadata.json b/parser/testdata/00969_columns_clause/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00969_columns_clause/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00969_columns_clause/query.sql b/parser/testdata/00969_columns_clause/query.sql new file mode 100644 index 000000000..e6ae59a2f --- /dev/null +++ b/parser/testdata/00969_columns_clause/query.sql @@ -0,0 +1,27 @@ +DROP TABLE IF EXISTS ColumnsClauseTest; + +CREATE TABLE ColumnsClauseTest (product_price Int64, product_weight Int16, amount Int64) Engine=TinyLog; +INSERT INTO ColumnsClauseTest VALUES (100, 10, 324), (120, 8, 23); +SELECT COLUMNS('product.*') from ColumnsClauseTest ORDER BY product_price; + +DROP TABLE ColumnsClauseTest; + +SELECT number, COLUMNS('') FROM numbers(2); +SELECT number, COLUMNS('ber') FROM numbers(2); -- It works for unanchored regular expressions. +SELECT number, COLUMNS('x') FROM numbers(2); +SELECT COLUMNS('') FROM numbers(2); + +SELECT COLUMNS('x') FROM numbers(10) WHERE number > 5; -- { serverError EMPTY_LIST_OF_COLUMNS_QUERIED } + +SELECT * FROM numbers(2) WHERE NOT ignore(); +SELECT * FROM numbers(2) WHERE NOT ignore(*); +SELECT * FROM numbers(2) WHERE NOT ignore(COLUMNS('.+')); +SELECT * FROM numbers(2) WHERE NOT ignore(COLUMNS('x')); +SELECT COLUMNS('n') + COLUMNS('u') FROM system.numbers LIMIT 2; + +SELECT COLUMNS('n') + COLUMNS('u') FROM (SELECT 1 AS a, 2 AS b); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT COLUMNS('a') + COLUMNS('b') FROM (SELECT 1 AS a, 2 AS b); +SELECT COLUMNS('a') + COLUMNS('a') FROM (SELECT 1 AS a, 2 AS b); +SELECT COLUMNS('b') + COLUMNS('b') FROM (SELECT 1 AS a, 2 AS b); +SELECT COLUMNS('a|b') + COLUMNS('b') FROM (SELECT 1 AS a, 2 AS b); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT plus(COLUMNS('^(a|b)$')) FROM (SELECT 1 AS a, 2 AS b); diff --git a/parser/testdata/00969_roundDuration/ast.json b/parser/testdata/00969_roundDuration/ast.json new file mode 100644 index 000000000..53c465b89 --- /dev/null +++ b/parser/testdata/00969_roundDuration/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function roundDuration (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_0" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001078251, + "rows_read": 7, + "bytes_read": 266 + } +} diff --git a/parser/testdata/00969_roundDuration/metadata.json b/parser/testdata/00969_roundDuration/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00969_roundDuration/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00969_roundDuration/query.sql b/parser/testdata/00969_roundDuration/query.sql new file mode 100644 index 000000000..200ae2b78 --- /dev/null +++ b/parser/testdata/00969_roundDuration/query.sql @@ -0,0 +1,16 @@ +SELECT roundDuration(0); +SELECT roundDuration(10); +SELECT roundDuration(30); +SELECT roundDuration(60); +SELECT roundDuration(120); +SELECT roundDuration(180); +SELECT roundDuration(240); +SELECT roundDuration(300); +SELECT roundDuration(600); +SELECT roundDuration(1200); +SELECT roundDuration(1800); +SELECT roundDuration(3600); +SELECT roundDuration(7200); +SELECT roundDuration(18000); +SELECT roundDuration(36000); +SELECT roundDuration(37000); \ No newline at end of file diff --git a/parser/testdata/00971_merge_tree_uniform_read_distribution_and_max_rows_to_read/ast.json b/parser/testdata/00971_merge_tree_uniform_read_distribution_and_max_rows_to_read/ast.json new file mode 100644 index 000000000..6d12edae4 --- /dev/null +++ b/parser/testdata/00971_merge_tree_uniform_read_distribution_and_max_rows_to_read/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery merge_tree (children 1)" + }, + { + "explain": " Identifier merge_tree" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001173544, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/00971_merge_tree_uniform_read_distribution_and_max_rows_to_read/metadata.json b/parser/testdata/00971_merge_tree_uniform_read_distribution_and_max_rows_to_read/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00971_merge_tree_uniform_read_distribution_and_max_rows_to_read/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00971_merge_tree_uniform_read_distribution_and_max_rows_to_read/query.sql b/parser/testdata/00971_merge_tree_uniform_read_distribution_and_max_rows_to_read/query.sql new file mode 100644 index 000000000..a5b148934 --- /dev/null +++ b/parser/testdata/00971_merge_tree_uniform_read_distribution_and_max_rows_to_read/query.sql @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS merge_tree; +CREATE TABLE merge_tree (x UInt8) ENGINE = MergeTree ORDER BY x; +INSERT INTO merge_tree SELECT 0 FROM numbers(1000000); + +SET max_threads = 4; +SET max_rows_to_read = 1100000; + +SELECT count() FROM merge_tree; +SELECT count() FROM merge_tree; + +SET max_rows_to_read = 900000; + +-- constant ignore will be pruned by part pruner. ignore(*) is used. +SELECT count() FROM merge_tree WHERE not ignore(*); -- { serverError TOO_MANY_ROWS } +SELECT count() FROM merge_tree WHERE not ignore(*); -- { serverError TOO_MANY_ROWS } + +DROP TABLE merge_tree; diff --git a/parser/testdata/00972_desc_table_virtual_columns/ast.json b/parser/testdata/00972_desc_table_virtual_columns/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00972_desc_table_virtual_columns/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00972_desc_table_virtual_columns/metadata.json b/parser/testdata/00972_desc_table_virtual_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00972_desc_table_virtual_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00972_desc_table_virtual_columns/query.sql b/parser/testdata/00972_desc_table_virtual_columns/query.sql new file mode 100644 index 000000000..920025a84 --- /dev/null +++ b/parser/testdata/00972_desc_table_virtual_columns/query.sql @@ -0,0 +1,9 @@ +-- No virtual columns should be output in DESC TABLE query. + +DROP TABLE IF EXISTS upyachka; +CREATE TABLE upyachka (x UInt64) ENGINE = Memory; + +-- Merge table has virtual column `_table` +DESC TABLE merge(currentDatabase(), 'upyachka'); + +DROP TABLE upyachka; diff --git a/parser/testdata/00972_geohashesInBox/ast.json b/parser/testdata/00972_geohashesInBox/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00972_geohashesInBox/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00972_geohashesInBox/metadata.json b/parser/testdata/00972_geohashesInBox/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00972_geohashesInBox/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00972_geohashesInBox/query.sql b/parser/testdata/00972_geohashesInBox/query.sql new file mode 100644 index 000000000..636474e7a --- /dev/null +++ b/parser/testdata/00972_geohashesInBox/query.sql @@ -0,0 +1,72 @@ +-- test data acquired with: https://github.com/sunng87/node-geohash +-- geohash.bboxes(minlat, minlon, maxlat, maxlon, precision) +-- as +-- geohashesInBox(minlon, minlat, maxlon, maxlat, precision) +-- except for the cases when JS-version produces result outside of given region, +-- typically at wrap points: poles, 0-latitude and 0-longitude. + +SELECT 'center'; +SELECT arraySort(geohashesInBox(-1.0, -1.0, 1.0, 1.0, 3)); +SELECT arraySort(geohashesInBox(-0.1, -0.1, 0.1, 0.1, 5)); +SELECT arraySort(geohashesInBox(-0.01, -0.01, 0.01, 0.01, 5)); + +SELECT 'north pole'; +SELECT arraySort(geohashesInBox(-180.0, 89.0, -179.0, 90.0, 3)); +SELECT arraySort(geohashesInBox(-1.0, 89.0, 0.0, 90.0, 3)); +SELECT arraySort(geohashesInBox(0.0, 89.0, 1.0, 90.0, 3)); +SELECT arraySort(geohashesInBox(179.0, 89.0, 180.0, 90.0, 3)); + +SELECT 'south pole'; +SELECT arraySort(geohashesInBox(-180.0, -90.0, -179.0, -89.0, 3)); +SELECT arraySort(geohashesInBox(-1.0, -90.0, 0.0, -89.0, 3)); +SELECT arraySort(geohashesInBox(0.0, -90.0, 1.0, -89.0, 3)); +SELECT arraySort(geohashesInBox(179.0, -90.0, 180.0, -89.0, 3)); + +SELECT 'wrap point around equator'; +SELECT arraySort(geohashesInBox(179.0, -1.0, 180.0, 0.0, 3)); +SELECT arraySort(geohashesInBox(179.0, 0.0, 180.0, 1.0, 3)); +SELECT arraySort(geohashesInBox(-180.0, -1.0, -179.0, 0.0, 3)); +SELECT arraySort(geohashesInBox(-180.0, 0.0, -179.0, 1.0, 3)); + +SELECT 'arbitrary values in all 4 quarters'; +SELECT arraySort(geohashesInBox(98.36, 7.88, 98.37, 7.89, 6)); +SELECT arraySort(geohashesInBox(53.8, 27.6, 53.9, 27.7, 5)); +SELECT arraySort(geohashesInBox(-49.26, -25.38, -49.25, -25.37, 6)); +SELECT arraySort(geohashesInBox(23.11, -82.37, 23.12, -82.36, 6)); + +SELECT 'small range always produces array of length 1'; +SELECT lon/5 - 180 AS lon1, lat/5 - 90 AS lat1, lon1 AS lon2, lat1 AS lat2, geohashesInBox(lon1, lat1, lon2, lat2, 1) AS g +FROM (SELECT arrayJoin(range(360*5)) AS lon, arrayJoin(range(180*5)) AS lat) WHERE length(g) != 1; + +SELECT lon/5 - 40 AS lon1, lat/5 - 20 AS lat1, lon1 AS lon2, lat1 AS lat2, geohashesInBox(lon1, lat1, lon2, lat2, 12) AS g +FROM (SELECT arrayJoin(range(80*5)) AS lon, arrayJoin(range(10*5)) AS lat) WHERE length(g) != 1; + +SELECT lon/5 - 40 AS lon1, lat/5 - 20 AS lat1, lon1 + 0.0000000001 AS lon2, lat1 + 0.0000000001 AS lat2, geohashesInBox(lon1, lat1, lon2, lat2, 1) AS g +FROM (SELECT arrayJoin(range(80*5)) AS lon, arrayJoin(range(10*5)) AS lat) WHERE length(g) != 1; + +SELECT 'zooming'; +SELECT arraySort(geohashesInBox(20.0, 20.0, 21.0, 21.0, 2)); +SELECT arraySort(geohashesInBox(20.0, 20.0, 21.0, 21.0, 3)); +SELECT arraySort(geohashesInBox(20.0, 20.0, 21.0, 21.0, 4)); +SELECT arraySort(geohashesInBox(20.0, 20.0, 20.25, 20.25, 5)); +SELECT arraySort(geohashesInBox(20.0, 20.0, 20.0625, 20.0625, 6)); +SELECT arraySort(geohashesInBox(20.0, 20.0, 20.01, 20.01, 7)); +SELECT arraySort(geohashesInBox(20.0, 20.0, 20.001, 20.001, 8)); +SELECT arraySort(geohashesInBox(20.0, 20.0, 20.0001, 20.0001, 9)); +SELECT arraySort(geohashesInBox(20.0, 20.0, 20.00001, 20.00001, 10)); +SELECT arraySort(geohashesInBox(20.0, 20.0, 20.000001, 20.000001, 11)); +SELECT arraySort(geohashesInBox(20.0, 20.0, 20.000001, 20.000001, 12)); + + -- precision greater than 12 is truncated to 12, so these two calls would produce same result as above +SELECT arraySort(geohashesInBox(20.0, 20.0, 20.000001, 20.000001, 13)); +SELECT arraySort(geohashesInBox(20.0, 20.0, 20.000001, 20.000001, 14)); + +SELECT 'input values are clamped to -90..90, -180..180 range'; +SELECT length(geohashesInBox(-inf, -inf, inf, inf, 3)); + +SELECT 'errors'; +SELECT geohashesInBox(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } -- not enough arguments +SELECT geohashesInBox(1, 2, 3, 4, 5); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } -- wrong types of arguments +SELECT geohashesInBox(toFloat32(1.0), 2.0, 3.0, 4.0, 5); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } -- all lats and longs should be of the same type +SELECT geohashesInBox(24.48, 40.56, 24.785, 40.81, 12); -- { serverError TOO_LARGE_ARRAY_SIZE } -- to many elements in array + diff --git a/parser/testdata/00973_create_table_as_table_function/ast.json b/parser/testdata/00973_create_table_as_table_function/ast.json new file mode 100644 index 000000000..f6de2aa2c --- /dev/null +++ b/parser/testdata/00973_create_table_as_table_function/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001283414, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/00973_create_table_as_table_function/metadata.json b/parser/testdata/00973_create_table_as_table_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00973_create_table_as_table_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00973_create_table_as_table_function/query.sql b/parser/testdata/00973_create_table_as_table_function/query.sql new file mode 100644 index 000000000..147cb87ff --- /dev/null +++ b/parser/testdata/00973_create_table_as_table_function/query.sql @@ -0,0 +1,21 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS t3; +DROP TABLE IF EXISTS t4; + +CREATE TABLE t1 AS remote('127.0.0.1', system.one); +SELECT count() FROM t1; + +CREATE TABLE t2 AS remote('127.0.0.1', system.numbers); +SELECT * FROM t2 LIMIT 18; + +CREATE TABLE t3 AS remote('127.0.0.1', numbers(100)); +SELECT * FROM t3 where number > 17 and number < 25; + +CREATE TABLE t4 AS numbers(100); +SELECT count() FROM t4 where number > 74; + +DROP TABLE t1; +DROP TABLE t2; +DROP TABLE t3; +DROP TABLE t4; diff --git a/parser/testdata/00973_uniq_non_associativity/ast.json b/parser/testdata/00973_uniq_non_associativity/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00973_uniq_non_associativity/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00973_uniq_non_associativity/metadata.json b/parser/testdata/00973_uniq_non_associativity/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00973_uniq_non_associativity/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00973_uniq_non_associativity/query.sql b/parser/testdata/00973_uniq_non_associativity/query.sql new file mode 100644 index 000000000..91d62a134 --- /dev/null +++ b/parser/testdata/00973_uniq_non_associativity/query.sql @@ -0,0 +1,133 @@ +/* Aggregate function 'uniq' is intended to be associative and provide deterministic results regardless to the schedule of query execution threads and remote servers in a cluster. + * But due to subtle bug in implementation it is not associative in very rare cases. + * In this test we fill data structure with specific pattern that reproduces this behaviour. + */ + +DROP TABLE IF EXISTS part_a; +DROP TABLE IF EXISTS part_b; +DROP TABLE IF EXISTS part_c; +DROP TABLE IF EXISTS part_d; + +/* Create values that will resize hash table to the maximum (131072 cells) and fill it with less than max_fill (65536 cells) + * and occupy cells near the end except last 10 cells: + * [ ----------- ] + * Pick values that will vanish if table will be rehashed. + */ +CREATE TABLE part_a ENGINE = TinyLog AS SELECT * FROM +( +WITH + number AS k1, + bitXor(k1, bitShiftRight(k1, 33)) AS k2, + k2 * 0xff51afd7ed558ccd AS k3, + bitXor(k3, bitShiftRight(k3, 33)) AS k4, + k4 * 0xc4ceb9fe1a85ec53 AS k5, + bitXor(k5, bitShiftRight(k5, 33)) AS k6, + k6 AS hash, + bitShiftRight(hash, 15) % 0x20000 AS place, + hash % 2 = 0 AS will_remain +SELECT hash, number, place FROM system.numbers WHERE place >= 90000 AND place < 131062 AND NOT will_remain LIMIT 1 BY place LIMIT 41062 +) ORDER BY place; + +/* Create values that will resize hash table to the maximum (131072 cells) and fill it with less than max_fill (65536 cells), + * but if we use both "a" and "b", it will force rehash. + * [ ----------- ] + * Pick values that will remain after rehash. + */ +CREATE TABLE part_b ENGINE = TinyLog AS SELECT * FROM +( +WITH + number AS k1, + bitXor(k1, bitShiftRight(k1, 33)) AS k2, + k2 * 0xff51afd7ed558ccd AS k3, + bitXor(k3, bitShiftRight(k3, 33)) AS k4, + k4 * 0xc4ceb9fe1a85ec53 AS k5, + bitXor(k5, bitShiftRight(k5, 33)) AS k6, + k6 AS hash, + bitShiftRight(hash, 15) % 0x20000 AS place, + hash % 2 = 0 AS will_remain +SELECT hash, number, place FROM system.numbers WHERE place >= 50000 AND place < 90000 AND will_remain LIMIT 1 BY place LIMIT 40000 +) ORDER BY place; + +/* Occupy 10 cells near the end of "a": + * a: [ ----------- ] + * c: [ -- ] + * If we insert "a" then "c", these values will be placed at the end of hash table due to collision resolution: + * a + c: [ aaaaaaaaaaacc] + */ +CREATE TABLE part_c ENGINE = TinyLog AS SELECT * FROM +( +WITH + number AS k1, + bitXor(k1, bitShiftRight(k1, 33)) AS k2, + k2 * 0xff51afd7ed558ccd AS k3, + bitXor(k3, bitShiftRight(k3, 33)) AS k4, + k4 * 0xc4ceb9fe1a85ec53 AS k5, + bitXor(k5, bitShiftRight(k5, 33)) AS k6, + k6 AS hash, + bitShiftRight(hash, 15) % 0x20000 AS place, + hash % 2 = 0 AS will_remain +SELECT hash, number, place FROM system.numbers WHERE place >= 131052 AND place < 131062 AND will_remain AND hash NOT IN (SELECT hash FROM part_a) LIMIT 1 BY place LIMIT 10 +) ORDER BY place; + +/* Occupy 10 cells at the end of hash table, after "a": + * a: [ ----------- ] + * d: [ --] + * a + d: [ aaaaaaaaaaadd] + * But if we insert "a" then "c" then "d", these values will be placed at the beginning of the hash table due to collision resolution: + * a+c+d: [dd aaaaaaaaaaacc] + */ +CREATE TABLE part_d ENGINE = TinyLog AS SELECT * FROM +( +WITH + number AS k1, + bitXor(k1, bitShiftRight(k1, 33)) AS k2, + k2 * 0xff51afd7ed558ccd AS k3, + bitXor(k3, bitShiftRight(k3, 33)) AS k4, + k4 * 0xc4ceb9fe1a85ec53 AS k5, + bitXor(k5, bitShiftRight(k5, 33)) AS k6, + k6 AS hash, + bitShiftRight(hash, 15) % 0x20000 AS place, + hash % 2 = 0 AS will_remain +SELECT hash, number, place FROM system.numbers WHERE place >= 131062 AND will_remain LIMIT 1 BY place LIMIT 10 +) ORDER BY place; + +/** What happens if we insert a then c then d then b? + * Insertion of b forces rehash. + * a will be removed, but c, d, b remain: + * [dd bbbbbbbbbb cc] + * Then we go through hash table and move elements to better places in collision resolution chain. + * c will be moved left to their right place: + * [dd bbbbbbbbbb cc ] + * + * And d must be moved also: + * [ bbbbbbbbbb ccdd] + * But our algorithm was incorrect and it doesn't happen. + * + * If we insert d again, it will be placed twice because original d will not found: + * [dd bbbbbbbbbb ccdd] + * This will lead to slightly higher return value of "uniq" aggregate function and it is dependent on insertion order. + */ + + +SET max_threads = 1; + +/** Results of these two queries must match: */ + +SELECT uniq(number) FROM ( + SELECT * FROM part_a +UNION ALL SELECT * FROM part_c +UNION ALL SELECT * FROM part_d +UNION ALL SELECT * FROM part_b); + +SELECT uniq(number) FROM ( + SELECT * FROM part_a +UNION ALL SELECT * FROM part_c +UNION ALL SELECT * FROM part_d +UNION ALL SELECT * FROM part_b +UNION ALL SELECT * FROM part_d); + + +DROP TABLE part_a; +DROP TABLE part_b; +DROP TABLE part_c; +DROP TABLE part_d; diff --git a/parser/testdata/00974_adaptive_granularity_secondary_index/ast.json b/parser/testdata/00974_adaptive_granularity_secondary_index/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00974_adaptive_granularity_secondary_index/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00974_adaptive_granularity_secondary_index/metadata.json b/parser/testdata/00974_adaptive_granularity_secondary_index/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00974_adaptive_granularity_secondary_index/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00974_adaptive_granularity_secondary_index/query.sql b/parser/testdata/00974_adaptive_granularity_secondary_index/query.sql new file mode 100644 index 000000000..4ceca29c1 --- /dev/null +++ b/parser/testdata/00974_adaptive_granularity_secondary_index/query.sql @@ -0,0 +1,56 @@ + +DROP TABLE IF EXISTS indexed_table; + +CREATE TABLE indexed_table +( + `tm` DateTime, + `log_message` String, + INDEX log_message log_message TYPE tokenbf_v1(4096, 2, 0) GRANULARITY 1 +) +ENGINE = MergeTree +ORDER BY (tm) +SETTINGS index_granularity_bytes = 50, min_index_granularity_bytes = 40; + +INSERT INTO indexed_table SELECT toDateTime('2019-05-27 10:00:00') + number % 100, 'h' FROM numbers(1000); + +INSERT INTO indexed_table +SELECT + toDateTime('2019-05-27 10:00:00') + number % 100, + concat('hhhhhhhhhhhhhhhhhhhhhhhhh', 'xxxxxxxxxxxxxxxxxxxxxxxxxxxx', 'yyyyyyyyyyyyyyyyyyyyyyyyyy', toString(rand())) +FROM numbers(1000); + +OPTIMIZE TABLE indexed_table FINAL; + +SELECT COUNT() FROM indexed_table WHERE log_message like '%x%'; + +DROP TABLE IF EXISTS indexed_table; + +DROP TABLE IF EXISTS another_indexed_table; + +CREATE TABLE another_indexed_table +( + `tm` DateTime, + `log_message` String, + INDEX log_message log_message TYPE tokenbf_v1(4096, 2, 0) GRANULARITY 1 +) +ENGINE = MergeTree +ORDER BY (tm) +SETTINGS index_granularity_bytes = 50, + min_index_granularity_bytes = 40, + vertical_merge_algorithm_min_rows_to_activate=0, + vertical_merge_algorithm_min_columns_to_activate=0; + + +INSERT INTO another_indexed_table SELECT toDateTime('2019-05-27 10:00:00') + number % 100, 'h' FROM numbers(1000); + +INSERT INTO another_indexed_table +SELECT + toDateTime('2019-05-27 10:00:00') + number % 100, + concat('hhhhhhhhhhhhhhhhhhhhhhhhh', 'xxxxxxxxxxxxxxxxxxxxxxxxxxxx', 'yyyyyyyyyyyyyyyyyyyyyyyyyy', toString(rand())) + FROM numbers(1000); + +OPTIMIZE TABLE another_indexed_table FINAL; + +SELECT COUNT() FROM another_indexed_table WHERE log_message like '%x%'; + +DROP TABLE IF EXISTS another_indexed_table; diff --git a/parser/testdata/00974_bitmapContains_with_primary_key/ast.json b/parser/testdata/00974_bitmapContains_with_primary_key/ast.json new file mode 100644 index 000000000..087811637 --- /dev/null +++ b/parser/testdata/00974_bitmapContains_with_primary_key/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001154634, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/00974_bitmapContains_with_primary_key/metadata.json b/parser/testdata/00974_bitmapContains_with_primary_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00974_bitmapContains_with_primary_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00974_bitmapContains_with_primary_key/query.sql b/parser/testdata/00974_bitmapContains_with_primary_key/query.sql new file mode 100644 index 000000000..520b4a030 --- /dev/null +++ b/parser/testdata/00974_bitmapContains_with_primary_key/query.sql @@ -0,0 +1,8 @@ +DROP TABLE IF EXISTS test; +CREATE TABLE test (num UInt64, str String) ENGINE = MergeTree ORDER BY num; +INSERT INTO test (num) VALUES (1), (2), (10), (15), (23); +SELECT count(*) FROM test WHERE bitmapContains(bitmapBuild([1, 5, 7, 9]), toUInt8(num)); +SELECT count(*) FROM test WHERE bitmapContains(bitmapBuild([1, 5, 7, 9]), toUInt16(num)); +SELECT count(*) FROM test WHERE bitmapContains(bitmapBuild([1, 5, 7, 9]), toUInt32(num)); +SELECT count(*) FROM test WHERE bitmapContains(bitmapBuild([1, 5, 7, 9]), toUInt64(num)); +DROP TABLE test; diff --git a/parser/testdata/00974_distributed_join_on/ast.json b/parser/testdata/00974_distributed_join_on/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00974_distributed_join_on/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00974_distributed_join_on/metadata.json b/parser/testdata/00974_distributed_join_on/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00974_distributed_join_on/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00974_distributed_join_on/query.sql b/parser/testdata/00974_distributed_join_on/query.sql new file mode 100644 index 000000000..2138fb090 --- /dev/null +++ b/parser/testdata/00974_distributed_join_on/query.sql @@ -0,0 +1,36 @@ +-- Tags: distributed + +DROP TABLE IF EXISTS source_table1; +DROP TABLE IF EXISTS source_table2; +DROP TABLE IF EXISTS distributed_table1; +DROP TABLE IF EXISTS distributed_table2; + +CREATE TABLE source_table1 (a Int64, b String) ENGINE = Memory; +CREATE TABLE source_table2 (c Int64, d String) ENGINE = Memory; + +INSERT INTO source_table1 VALUES (42, 'qwe'); +INSERT INTO source_table2 VALUES (42, 'qwe'); + +CREATE TABLE distributed_table1 AS source_table1 +ENGINE = Distributed('test_shard_localhost', currentDatabase(), source_table1); + +CREATE TABLE distributed_table2 AS source_table2 +ENGINE = Distributed('test_shard_localhost', currentDatabase(), source_table2); + +SET prefer_localhost_replica = 1; +SELECT 1 FROM distributed_table1 AS t1 GLOBAL JOIN distributed_table2 AS t2 ON t1.a = t2.c LIMIT 1; +SELECT 1 FROM distributed_table1 AS t1 GLOBAL JOIN distributed_table2 AS t2 ON t2.c = t1.a LIMIT 1; +SELECT 1 FROM distributed_table1 AS t1 GLOBAL JOIN distributed_table1 AS t2 ON t1.a = t2.a LIMIT 1; + +SET prefer_localhost_replica = 0; +SELECT 1 FROM distributed_table1 AS t1 GLOBAL JOIN distributed_table2 AS t2 ON t1.a = t2.c LIMIT 1; +SELECT 1 FROM distributed_table1 AS t1 GLOBAL JOIN distributed_table2 AS t2 ON t2.c = t1.a LIMIT 1; +SELECT 1 FROM distributed_table1 AS t1 GLOBAL JOIN distributed_table1 AS t2 ON t1.a = t2.a LIMIT 1; + +SELECT t1.a as t1_a, t2.a as t2_a FROM source_table1 AS t1 JOIN source_table1 AS t2 ON t1_a = t2_a LIMIT 1; +SELECT t1.a as t1_a, t2.a as t2_a FROM distributed_table1 AS t1 GLOBAL JOIN distributed_table1 AS t2 ON t1_a = t2_a LIMIT 1; + +DROP TABLE source_table1; +DROP TABLE source_table2; +DROP TABLE distributed_table1; +DROP TABLE distributed_table2; diff --git a/parser/testdata/00974_final_predicate_push_down/ast.json b/parser/testdata/00974_final_predicate_push_down/ast.json new file mode 100644 index 000000000..365488285 --- /dev/null +++ b/parser/testdata/00974_final_predicate_push_down/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_00974 (children 1)" + }, + { + "explain": " Identifier test_00974" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001064839, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/00974_final_predicate_push_down/metadata.json b/parser/testdata/00974_final_predicate_push_down/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00974_final_predicate_push_down/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00974_final_predicate_push_down/query.sql b/parser/testdata/00974_final_predicate_push_down/query.sql new file mode 100644 index 000000000..7a6378692 --- /dev/null +++ b/parser/testdata/00974_final_predicate_push_down/query.sql @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS test_00974; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE test_00974 +( + date Date, + x Int32, + ver UInt64 +) +ENGINE = ReplacingMergeTree(date, x, 1); + +INSERT INTO test_00974 VALUES ('2019-07-23', 1, 1), ('2019-07-23', 1, 2); +INSERT INTO test_00974 VALUES ('2019-07-23', 2, 1), ('2019-07-23', 2, 2); + +SELECT COUNT() FROM (SELECT * FROM test_00974 FINAL) where x = 1 SETTINGS enable_optimize_predicate_expression_to_final_subquery = 0; +SELECT COUNT() FROM (SELECT * FROM test_00974 FINAL) where x = 1 SETTINGS enable_optimize_predicate_expression_to_final_subquery = 1, max_rows_to_read = 2; + +DROP TABLE test_00974; diff --git a/parser/testdata/00974_fix_join_on/ast.json b/parser/testdata/00974_fix_join_on/ast.json new file mode 100644 index 000000000..9e7a99182 --- /dev/null +++ b/parser/testdata/00974_fix_join_on/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00157429, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/00974_fix_join_on/metadata.json b/parser/testdata/00974_fix_join_on/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00974_fix_join_on/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00974_fix_join_on/query.sql b/parser/testdata/00974_fix_join_on/query.sql new file mode 100644 index 000000000..1b3c7d867 --- /dev/null +++ b/parser/testdata/00974_fix_join_on/query.sql @@ -0,0 +1,79 @@ +drop table if exists t1; +drop table if exists t2; +drop table if exists t3; + +create table t1 (a UInt32, b String) engine = Memory; +create table t2 (c UInt32, d String) engine = Memory; +create table t3 (a UInt32) engine = Memory; + +insert into t1 values (1, 'x'), (2, 'y'), (3, 'z'); +insert into t2 values (2, 'w'), (4, 'y'); +insert into t3 values (3); + +set enable_optimize_predicate_expression = 0; + +select * from t1 join t2 on a = c; +select * from t1 join t2 on c = a; + +select t1.a, t2.c from t1 join t2 on a = c; +select t1.a, t2.c from t1 join t2 on c = a; +select t1.b, t2.d from t1 join t2 on a = c; +select t1.b, t2.d from t1 join t2 on c = a; + +select a, c from t1 join t2 on a = c; +select a, c from t1 join t2 on c = a; +select b, d from t1 join t2 on a = c; +select b, d from t1 join t2 on c = a; + +select b as a, d as c from t1 join t2 on a = c; +select b as a, d as c from t1 join t2 on c = a; +select b as c, d as a from t1 join t2 on a = c; +select b as c, d as a from t1 join t2 on c = a; + +select t1.a as a, t2.c as c from t1 join t2 on a = c; +select t1.a as a, t2.c as c from t1 join t2 on c = a; +select t1.a as c, t2.c as a from t1 join t2 on a = c; +select t1.a as c, t2.c as a from t1 join t2 on c = a; + +select t1.a as c, t2.c as a from t1 join t2 on t1.a = t2.c; +select t1.a as c, t2.c as a from t1 join t2 on t2.c = t1.a; + +select t1.a, t3.a from t1 join t3 on t1.a = t3.a; +select t1.a as t1_a, t3.a as t3_a from t1 join t3 on t1_a = t3_a; +select table1.a as t1_a, table3.a as t3_a from t1 as table1 join t3 as table3 on t1_a = t3_a; + +set enable_optimize_predicate_expression = 1; + +select * from t1 join t2 on a = c; +select * from t1 join t2 on c = a; + +select t1.a, t2.c from t1 join t2 on a = c; +select t1.a, t2.c from t1 join t2 on c = a; +select t1.b, t2.d from t1 join t2 on a = c; +select t1.b, t2.d from t1 join t2 on c = a; + +select a, c from t1 join t2 on a = c; +select a, c from t1 join t2 on c = a; +select b, d from t1 join t2 on a = c; +select b, d from t1 join t2 on c = a; + +select b as a, d as c from t1 join t2 on a = c; +select b as a, d as c from t1 join t2 on c = a; +select b as c, d as a from t1 join t2 on a = c; +select b as c, d as a from t1 join t2 on c = a; + +select t1.a as a, t2.c as c from t1 join t2 on a = c; +select t1.a as a, t2.c as c from t1 join t2 on c = a; +select t1.a as c, t2.c as a from t1 join t2 on a = c; +select t1.a as c, t2.c as a from t1 join t2 on c = a; + +select t1.a as c, t2.c as a from t1 join t2 on t1.a = t2.c; +select t1.a as c, t2.c as a from t1 join t2 on t2.c = t1.a; + +select t1.a, t3.a from t1 join t3 on t1.a = t3.a; +select t1.a as t1_a, t3.a as t3_a from t1 join t3 on t1_a = t3_a; +select table1.a as t1_a, table3.a as t3_a from t1 as table1 join t3 as table3 on t1_a = t3_a; + +drop table t1; +drop table t2; +drop table t3; diff --git a/parser/testdata/00974_full_outer_join/ast.json b/parser/testdata/00974_full_outer_join/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00974_full_outer_join/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00974_full_outer_join/metadata.json b/parser/testdata/00974_full_outer_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00974_full_outer_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00974_full_outer_join/query.sql b/parser/testdata/00974_full_outer_join/query.sql new file mode 100644 index 000000000..2d4ebf976 --- /dev/null +++ b/parser/testdata/00974_full_outer_join/query.sql @@ -0,0 +1,21 @@ +SELECT + q0.dt, + q0.cnt, + q1.cnt2 +FROM +( + SELECT + toDate(addDays(toDate('2015-12-01'), number)) AS dt, + sum(number) AS cnt + FROM numbers(2) + GROUP BY dt +) AS q0 +ALL FULL OUTER JOIN +( + SELECT + toDate(addDays(toDate('2015-12-01'), number)) AS dt, + sum(number) AS cnt2 + FROM numbers(5) + GROUP BY dt +) AS q1 ON q0.dt = q1.dt +ORDER BY q1.cnt2; diff --git a/parser/testdata/00974_low_cardinality_cast/ast.json b/parser/testdata/00974_low_cardinality_cast/ast.json new file mode 100644 index 000000000..c171cf26c --- /dev/null +++ b/parser/testdata/00974_low_cardinality_cast/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001198986, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00974_low_cardinality_cast/metadata.json b/parser/testdata/00974_low_cardinality_cast/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00974_low_cardinality_cast/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00974_low_cardinality_cast/query.sql b/parser/testdata/00974_low_cardinality_cast/query.sql new file mode 100644 index 000000000..04a6785f8 --- /dev/null +++ b/parser/testdata/00974_low_cardinality_cast/query.sql @@ -0,0 +1,8 @@ +SET cast_keep_nullable = 0; + +SELECT CAST('Hello' AS LowCardinality(Nullable(String))); +SELECT CAST(Null AS LowCardinality(Nullable(String))); +SELECT CAST(CAST('Hello' AS LowCardinality(Nullable(String))) AS String); +SELECT CAST(CAST(Null AS LowCardinality(Nullable(String))) AS String); -- { serverError CANNOT_INSERT_NULL_IN_ORDINARY_COLUMN } +SELECT CAST(CAST('Hello' AS Nullable(String)) AS String); +SELECT CAST(CAST(Null AS Nullable(String)) AS String); -- { serverError CANNOT_INSERT_NULL_IN_ORDINARY_COLUMN } diff --git a/parser/testdata/00974_query_profiler/ast.json b/parser/testdata/00974_query_profiler/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00974_query_profiler/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00974_query_profiler/metadata.json b/parser/testdata/00974_query_profiler/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00974_query_profiler/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00974_query_profiler/query.sql b/parser/testdata/00974_query_profiler/query.sql new file mode 100644 index 000000000..1c3deff99 --- /dev/null +++ b/parser/testdata/00974_query_profiler/query.sql @@ -0,0 +1,26 @@ +-- Tags: no-tsan, no-asan, no-ubsan, no-msan, no-debug, no-fasttest +-- Tag no-fasttest: Not sure why fail even in sequential mode. Disabled for now to make some progress. + +SET allow_introspection_functions = 1; +SET trace_profile_events = 0; -- This can inhibit profiler from working, because it prevents sending samples from different profilers concurrently. + +SET query_profiler_cpu_time_period_ns = 0; +SET query_profiler_real_time_period_ns = 100000000; +SET log_queries = 1; +SELECT sleep(0.5), ignore('test real time query profiler'); +SET log_queries = 0; +SYSTEM FLUSH LOGS trace_log, query_log; + +WITH addressToLine(arrayJoin(trace) AS addr) || '#' || demangle(addressToSymbol(addr)) AS symbol +SELECT count() > 0 FROM system.trace_log t WHERE query_id = (SELECT query_id FROM system.query_log WHERE current_database = currentDatabase() AND query LIKE '%test real time query profiler%' AND query NOT LIKE '%system%' ORDER BY event_time DESC LIMIT 1) AND symbol LIKE '%FunctionSleep%'; + +SET query_profiler_real_time_period_ns = 0; +SET query_profiler_cpu_time_period_ns = 1000000; +SET log_queries = 1; +SET max_rows_to_read = 0; +SELECT count(), ignore('test cpu time query profiler') FROM numbers_mt(10000000000); +SET log_queries = 0; +SYSTEM FLUSH LOGS trace_log, query_log; + +WITH addressToLine(arrayJoin(trace) AS addr) || '#' || demangle(addressToSymbol(addr)) AS symbol +SELECT count() > 0 FROM system.trace_log t WHERE query_id = (SELECT query_id FROM system.query_log WHERE current_database = currentDatabase() AND query LIKE '%test cpu time query profiler%' AND query NOT LIKE '%system%' ORDER BY event_time DESC LIMIT 1) AND symbol LIKE '%Source%'; diff --git a/parser/testdata/00975_json_hang/ast.json b/parser/testdata/00975_json_hang/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00975_json_hang/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00975_json_hang/metadata.json b/parser/testdata/00975_json_hang/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00975_json_hang/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00975_json_hang/query.sql b/parser/testdata/00975_json_hang/query.sql new file mode 100644 index 000000000..4c2a1a8ee --- /dev/null +++ b/parser/testdata/00975_json_hang/query.sql @@ -0,0 +1,2 @@ + +SELECT DISTINCT JSONExtractRaw(concat('{"x":', rand() % 2 ? 'true' : 'false', '}'), 'x') AS res FROM numbers(1000000) ORDER BY res; diff --git a/parser/testdata/00975_move_partition_merge_tree/ast.json b/parser/testdata/00975_move_partition_merge_tree/ast.json new file mode 100644 index 000000000..84d101da6 --- /dev/null +++ b/parser/testdata/00975_move_partition_merge_tree/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_move_partition_src (children 1)" + }, + { + "explain": " Identifier test_move_partition_src" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001370317, + "rows_read": 2, + "bytes_read": 98 + } +} diff --git a/parser/testdata/00975_move_partition_merge_tree/metadata.json b/parser/testdata/00975_move_partition_merge_tree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00975_move_partition_merge_tree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00975_move_partition_merge_tree/query.sql b/parser/testdata/00975_move_partition_merge_tree/query.sql new file mode 100644 index 000000000..c17f7c57d --- /dev/null +++ b/parser/testdata/00975_move_partition_merge_tree/query.sql @@ -0,0 +1,30 @@ +DROP TABLE IF EXISTS test_move_partition_src; +DROP TABLE IF EXISTS test_move_partition_dest; + +CREATE TABLE IF NOT EXISTS test_move_partition_src ( + pk UInt8, + val UInt32 +) Engine = MergeTree() + PARTITION BY pk + ORDER BY (pk, val) SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +CREATE TABLE IF NOT EXISTS test_move_partition_dest ( + pk UInt8, + val UInt32 +) Engine = MergeTree() + PARTITION BY pk + ORDER BY (pk, val) SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +INSERT INTO test_move_partition_src SELECT number % 2, number FROM system.numbers LIMIT 10000000; + +SELECT count() FROM test_move_partition_src; +SELECT count() FROM test_move_partition_dest; + +ALTER TABLE test_move_partition_src MOVE PARTITION 1 TO TABLE test_move_partition_dest; +ALTER TABLE test_move_partition_src MOVE PART '0_1_1_0' TO TABLE test_move_partition_dest; -- { clientError SYNTAX_ERROR } + +SELECT count() FROM test_move_partition_src; +SELECT count() FROM test_move_partition_dest; + +DROP TABLE test_move_partition_src; +DROP TABLE test_move_partition_dest; diff --git a/parser/testdata/00975_recursive_materialized_view/ast.json b/parser/testdata/00975_recursive_materialized_view/ast.json new file mode 100644 index 000000000..eca9f2d29 --- /dev/null +++ b/parser/testdata/00975_recursive_materialized_view/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery src (children 1)" + }, + { + "explain": " Identifier src" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001375898, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/00975_recursive_materialized_view/metadata.json b/parser/testdata/00975_recursive_materialized_view/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00975_recursive_materialized_view/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00975_recursive_materialized_view/query.sql b/parser/testdata/00975_recursive_materialized_view/query.sql new file mode 100644 index 000000000..cd5330553 --- /dev/null +++ b/parser/testdata/00975_recursive_materialized_view/query.sql @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS src; +DROP TABLE IF EXISTS dst1; +DROP TABLE IF EXISTS src_to_dst1; +DROP TABLE IF EXISTS dst2; + +CREATE TABLE src (x UInt8) ENGINE Memory; +CREATE TABLE dst1 (x UInt8) ENGINE Memory; +CREATE MATERIALIZED VIEW src_to_dst1 TO dst1 AS SELECT x + 1 as x FROM src; +CREATE MATERIALIZED VIEW dst2 ENGINE Memory AS SELECT x + 1 as x FROM dst1; + +INSERT INTO src VALUES (1), (2); +SELECT * FROM dst1 ORDER BY x; +SELECT * FROM dst2 ORDER BY x; + +DROP TABLE src; +DROP TABLE src_to_dst1; +DROP TABLE dst1; +DROP TABLE dst2; diff --git a/parser/testdata/00975_sample_prewhere_distributed/ast.json b/parser/testdata/00975_sample_prewhere_distributed/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00975_sample_prewhere_distributed/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00975_sample_prewhere_distributed/metadata.json b/parser/testdata/00975_sample_prewhere_distributed/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00975_sample_prewhere_distributed/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00975_sample_prewhere_distributed/query.sql b/parser/testdata/00975_sample_prewhere_distributed/query.sql new file mode 100644 index 000000000..15e12c4ce --- /dev/null +++ b/parser/testdata/00975_sample_prewhere_distributed/query.sql @@ -0,0 +1,11 @@ +-- Tags: distributed + +create table if not exists sample_prewhere (date Date, id Int32, time Int64) engine = MergeTree partition by date order by (id, time, intHash64(time)) sample by intHash64(time); + +insert into sample_prewhere values ('2019-01-01', 2, toDateTime('2019-07-20 00:00:01')); +insert into sample_prewhere values ('2019-01-01', 1, toDateTime('2019-07-20 00:00:02')); +insert into sample_prewhere values ('2019-01-02', 3, toDateTime('2019-07-20 00:00:03')); + +select id from remote('127.0.0.{1,3}', currentDatabase(), sample_prewhere) SAMPLE 1 where toDateTime(time) = '2019-07-20 00:00:00'; + +drop table sample_prewhere; diff --git a/parser/testdata/00975_values_list/ast.json b/parser/testdata/00975_values_list/ast.json new file mode 100644 index 000000000..fecb76a93 --- /dev/null +++ b/parser/testdata/00975_values_list/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery values_list (children 1)" + }, + { + "explain": " Identifier values_list" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001706747, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/00975_values_list/metadata.json b/parser/testdata/00975_values_list/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00975_values_list/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00975_values_list/query.sql b/parser/testdata/00975_values_list/query.sql new file mode 100644 index 000000000..c1e3a2fbf --- /dev/null +++ b/parser/testdata/00975_values_list/query.sql @@ -0,0 +1,19 @@ +DROP TABLE IF EXISTS values_list; + +SELECT * FROM VALUES('a UInt64, s String', (1, 'one'), (2, 'two'), (3, 'three')); +CREATE TABLE values_list AS VALUES('a UInt64, s String', (1, 'one'), (2, 'two'), (3, 'three')); +SELECT * FROM values_list; + +SELECT subtractYears(date, 1), subtractYears(date_time, 1) FROM VALUES('date Date, date_time DateTime', (toDate('2019-01-01'), toDateTime('2019-01-01 00:00:00'))); + +SELECT * FROM VALUES('s String', ('abra'), ('cadabra'), ('abracadabra')); + +SELECT * FROM VALUES('n UInt64, s String, ss String', (1 + 22, '23', toString(23)), (toUInt64('24'), '24', concat('2', '4'))); + +SELECT * FROM VALUES('a Decimal(4, 4), b String, c String', (divide(toDecimal32(5, 3), 3), 'a', 'b')); + +SELECT * FROM VALUES('x Float64', toUInt64(-1)); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT * FROM VALUES('x Float64', NULL); -- { serverError TYPE_MISMATCH } +SELECT * FROM VALUES('x Nullable(Float64)', NULL); + +DROP TABLE values_list; diff --git a/parser/testdata/00976_max_execution_speed/ast.json b/parser/testdata/00976_max_execution_speed/ast.json new file mode 100644 index 000000000..4aa00214a --- /dev/null +++ b/parser/testdata/00976_max_execution_speed/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001134665, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00976_max_execution_speed/metadata.json b/parser/testdata/00976_max_execution_speed/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00976_max_execution_speed/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00976_max_execution_speed/query.sql b/parser/testdata/00976_max_execution_speed/query.sql new file mode 100644 index 000000000..413747127 --- /dev/null +++ b/parser/testdata/00976_max_execution_speed/query.sql @@ -0,0 +1,2 @@ +SET max_execution_speed = 1, max_execution_time = 3, max_rows_to_read = 0; +SELECT count() FROM system.numbers; -- { serverError TIMEOUT_EXCEEDED } diff --git a/parser/testdata/00976_shard_low_cardinality_achimbab/ast.json b/parser/testdata/00976_shard_low_cardinality_achimbab/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00976_shard_low_cardinality_achimbab/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00976_shard_low_cardinality_achimbab/metadata.json b/parser/testdata/00976_shard_low_cardinality_achimbab/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00976_shard_low_cardinality_achimbab/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00976_shard_low_cardinality_achimbab/query.sql b/parser/testdata/00976_shard_low_cardinality_achimbab/query.sql new file mode 100644 index 000000000..945292c54 --- /dev/null +++ b/parser/testdata/00976_shard_low_cardinality_achimbab/query.sql @@ -0,0 +1,8 @@ +-- Tags: shard + +DROP TABLE IF EXISTS han_1; +CREATE TABLE han_1 (k Int32, date_dt LowCardinality(Nullable(String))) +ENGINE = MergeTree() PARTITION BY k ORDER BY k; +INSERT INTO han_1 values (1, '2019-07-31'); +SELECT k, uniq(date_dt) FROM remote('127.0.0.{1,2}', currentDatabase(), han_1) GROUP BY k; +DROP TABLE IF EXISTS han_1; diff --git a/parser/testdata/00976_system_stop_ttl_merges/ast.json b/parser/testdata/00976_system_stop_ttl_merges/ast.json new file mode 100644 index 000000000..54678810b --- /dev/null +++ b/parser/testdata/00976_system_stop_ttl_merges/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery ttl (children 1)" + }, + { + "explain": " Identifier ttl" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001154295, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/00976_system_stop_ttl_merges/metadata.json b/parser/testdata/00976_system_stop_ttl_merges/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00976_system_stop_ttl_merges/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00976_system_stop_ttl_merges/query.sql b/parser/testdata/00976_system_stop_ttl_merges/query.sql new file mode 100644 index 000000000..2ab85d90a --- /dev/null +++ b/parser/testdata/00976_system_stop_ttl_merges/query.sql @@ -0,0 +1,18 @@ +drop table if exists ttl; + +create table ttl (d Date, a Int) engine = MergeTree order by a partition by toDayOfMonth(d) ttl d + interval 1 day; + +system stop ttl merges ttl; + +insert into ttl values (toDateTime('2000-10-10 00:00:00'), 1), (toDateTime('2000-10-10 00:00:00'), 2); +insert into ttl values (toDateTime('2100-10-10 00:00:00'), 3), (toDateTime('2100-10-10 00:00:00'), 4); + +select sleep(1) format Null; -- wait if very fast merge happen +optimize table ttl partition 10 final; +select * from ttl order by d, a; + +system start ttl merges ttl; +optimize table ttl partition 10 final; +select * from ttl order by d, a; + +drop table if exists ttl; diff --git a/parser/testdata/00976_ttl_with_old_parts/ast.json b/parser/testdata/00976_ttl_with_old_parts/ast.json new file mode 100644 index 000000000..d8de1d187 --- /dev/null +++ b/parser/testdata/00976_ttl_with_old_parts/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery ttl (children 1)" + }, + { + "explain": " Identifier ttl" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001146388, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/00976_ttl_with_old_parts/metadata.json b/parser/testdata/00976_ttl_with_old_parts/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00976_ttl_with_old_parts/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00976_ttl_with_old_parts/query.sql b/parser/testdata/00976_ttl_with_old_parts/query.sql new file mode 100644 index 000000000..084112681 --- /dev/null +++ b/parser/testdata/00976_ttl_with_old_parts/query.sql @@ -0,0 +1,16 @@ +drop table if exists ttl; + +create table ttl (d Date, a Int) engine = MergeTree order by a partition by toDayOfMonth(d) settings remove_empty_parts = 0; +insert into ttl values (toDateTime('2000-10-10 00:00:00'), 1); +insert into ttl values (toDateTime('2000-10-10 00:00:00'), 2); +insert into ttl values (toDateTime('2100-10-10 00:00:00'), 3); +insert into ttl values (toDateTime('2100-10-10 00:00:00'), 4); + +alter table ttl modify ttl d + interval 1 day; + +select sleep(1) format Null; -- wait if very fast merge happen +optimize table ttl partition 10 final; + +select * from ttl order by d, a; + +drop table if exists ttl; diff --git a/parser/testdata/00977_int_div/ast.json b/parser/testdata/00977_int_div/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00977_int_div/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00977_int_div/metadata.json b/parser/testdata/00977_int_div/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00977_int_div/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00977_int_div/query.sql b/parser/testdata/00977_int_div/query.sql new file mode 100644 index 000000000..04fafbfcd --- /dev/null +++ b/parser/testdata/00977_int_div/query.sql @@ -0,0 +1,31 @@ +SELECT + sum(ASD) AS asd, + intDiv(toInt64(asd), abs(toInt64(asd))) AS int_div_with_abs, + intDiv(toInt64(asd), toInt64(asd)) AS int_div_without_abs +FROM +( + SELECT ASD + FROM + ( + SELECT [-1000, -1000] AS asds + ) + ARRAY JOIN asds AS ASD +); + +SELECT intDivOrZero( CAST(-1000, 'Int64') , CAST(1000, 'UInt64') ); +SELECT intDivOrZero( CAST(-1000, 'Int64') , CAST(1000, 'Int64') ); + +SELECT intDiv(-1, number) FROM numbers(1, 10); +SELECT intDivOrZero(-1, number) FROM numbers(1, 10); +SELECT intDiv(toInt32(number), -1) FROM numbers(1, 10); +SELECT intDivOrZero(toInt32(number), -1) FROM numbers(1, 10); +SELECT intDiv(toInt64(number), -1) FROM numbers(1, 10); +SELECT intDivOrZero(toInt64(number), -1) FROM numbers(1, 10); +SELECT intDiv(number, -number) FROM numbers(1, 10); +SELECT intDivOrZero(number, -number) FROM numbers(1, 10); + +SELECT -1 DIV number FROM numbers(1, 10); +SELECT toInt32(number) DIV -1 FROM numbers(1, 10); +SELECT toInt64(number) DIV -1 FROM numbers(1, 10); +SELECT number DIV -number FROM numbers(1, 10); +SELECT -1 DIV 0; -- { serverError ILLEGAL_DIVISION } diff --git a/parser/testdata/00977_join_use_nulls_denny_crane/ast.json b/parser/testdata/00977_join_use_nulls_denny_crane/ast.json new file mode 100644 index 000000000..c994186ee --- /dev/null +++ b/parser/testdata/00977_join_use_nulls_denny_crane/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001436362, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/00977_join_use_nulls_denny_crane/metadata.json b/parser/testdata/00977_join_use_nulls_denny_crane/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00977_join_use_nulls_denny_crane/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00977_join_use_nulls_denny_crane/query.sql b/parser/testdata/00977_join_use_nulls_denny_crane/query.sql new file mode 100644 index 000000000..286703d1b --- /dev/null +++ b/parser/testdata/00977_join_use_nulls_denny_crane/query.sql @@ -0,0 +1,72 @@ +drop table if exists t; +drop table if exists s; + +create table t(a Int64, b Int64, c String) engine = MergeTree ORDER BY tuple(); +create table s(a Int64, b Int64, c String) engine = MergeTree ORDER BY tuple(); + +insert into t values(1,1,'a'); +insert into s values(2,2,'a'); + +select t.*, s.a, s.b, s.c from t left join s on (s.a = t.a and s.b = t.b); +select t.*, s.a, s.b, s.c from t right join s on (s.a = t.a and s.b = t.b); +select t.*, s.a, s.b, s.c from t left join s on (s.a = t.a and s.b = t.b) SETTINGS join_use_nulls = 1; +select t.*, s.a, s.b, s.c from t right join s on (s.a = t.a and s.b = t.b) SETTINGS join_use_nulls = 1; + +drop table t; +drop table s; + +create table t(a Int64, b Int64, c Nullable(String)) engine = MergeTree ORDER BY tuple(); +create table s(a Int64, b Int64, c Nullable(String)) engine = MergeTree ORDER BY tuple(); + +insert into t values(1,1,'a'); +insert into s values(2,2,'a'); + +select * from t left join s on (s.a = t.a and s.b = t.b); +select * from t right join s on (s.a = t.a and s.b = t.b); +select t.*, s.* from t left join s on (s.a = t.a and s.b = t.b) SETTINGS join_use_nulls = 1; +select t.*, s.* from t right join s on (s.a = t.a and s.b = t.b) SETTINGS join_use_nulls = 1; + +drop table t; +drop table s; + +create table t(a Int64, b Nullable(Int64), c String) engine = MergeTree ORDER BY tuple(); +create table s(a Int64, b Nullable(Int64), c String) engine = MergeTree ORDER BY tuple(); + +insert into t values(1,1,'a'); +insert into s values(2,2,'a'); + +select t.*, s.* from t left join s on (s.a = t.a and s.b = t.b); +select t.*, s.* from t right join s on (s.a = t.a and s.b = t.b); +select * from t left join s on (s.a = t.a and s.b = t.b) SETTINGS join_use_nulls = 1; +select * from t right join s on (s.a = t.a and s.b = t.b) SETTINGS join_use_nulls = 1; + +drop table t; +drop table s; + +create table t(a Int64, b Nullable(Int64), c Nullable(String)) engine = MergeTree ORDER BY tuple(); +create table s(a Int64, b Nullable(Int64), c Nullable(String)) engine = MergeTree ORDER BY tuple(); + +insert into t values(1,1,'a'); +insert into s values(2,2,'a'); + +select t.*, s.a, s.b, s.c from t left join s on (s.a = t.a and s.b = t.b); +select t.*, s.a, s.b, s.c from t right join s on (s.a = t.a and s.b = t.b); +select * from t left join s on (s.a = t.a and s.b = t.b) SETTINGS join_use_nulls = 1; +select * from t right join s on (s.a = t.a and s.b = t.b) SETTINGS join_use_nulls = 1; + +drop table t; +drop table s; + +create table t(a Nullable(Int64), b Nullable(Int64), c Nullable(String)) engine = MergeTree ORDER BY tuple(); +create table s(a Nullable(Int64), b Nullable(Int64), c Nullable(String)) engine = MergeTree ORDER BY tuple(); + +insert into t values(1,1,'a'); +insert into s values(2,2,'a'); + +select * from t left join s on (s.a = t.a and s.b = t.b); +select * from t right join s on (s.a = t.a and s.b = t.b); +select t.*, s.a, s.b, s.c from t left join s on (s.a = t.a and s.b = t.b) SETTINGS join_use_nulls = 1; +select t.*, s.a, s.b, s.c from t right join s on (s.a = t.a and s.b = t.b) SETTINGS join_use_nulls = 1; + +drop table t; +drop table s; diff --git a/parser/testdata/00978_ml_math/ast.json b/parser/testdata/00978_ml_math/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00978_ml_math/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00978_ml_math/metadata.json b/parser/testdata/00978_ml_math/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00978_ml_math/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00978_ml_math/query.sql b/parser/testdata/00978_ml_math/query.sql new file mode 100644 index 000000000..8523eb05a --- /dev/null +++ b/parser/testdata/00978_ml_math/query.sql @@ -0,0 +1,4 @@ +SELECT + round(sigmoid(x), 5), round(sigmoid(toFloat32(x)), 5), round(sigmoid(toFloat64(x)), 5), + round(tanh(x), 5), round(TANH(toFloat32(x)), 5), round(TANh(toFloat64(x)), 5) +FROM (SELECT arrayJoin([-1, 0, 1]) AS x); diff --git a/parser/testdata/00978_sum_map_bugfix/ast.json b/parser/testdata/00978_sum_map_bugfix/ast.json new file mode 100644 index 000000000..c38853f37 --- /dev/null +++ b/parser/testdata/00978_sum_map_bugfix/ast.json @@ -0,0 +1,115 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function arrayReduce (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal 'sumMap'" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier a" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier b" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal Array_[UInt64_100, UInt64_100, UInt64_200] (alias a)" + }, + { + "explain": " Literal Array_[UInt64_10, UInt64_20, UInt64_30] (alias b)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 31, + + "statistics": + { + "elapsed": 0.001787539, + "rows_read": 31, + "bytes_read": 1347 + } +} diff --git a/parser/testdata/00978_sum_map_bugfix/metadata.json b/parser/testdata/00978_sum_map_bugfix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00978_sum_map_bugfix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00978_sum_map_bugfix/query.sql b/parser/testdata/00978_sum_map_bugfix/query.sql new file mode 100644 index 000000000..091c56c68 --- /dev/null +++ b/parser/testdata/00978_sum_map_bugfix/query.sql @@ -0,0 +1,7 @@ +select number, arrayReduce( 'sumMap', [a],[b] ) from (select [100,100,200] a,[10,20,30] b, number from numbers(1)); +select number, arrayReduce( 'sumMap', [a],[b] ) from (select materialize([100,100,200]) a,materialize([10,20,30]) b, number from numbers(10)); +select number, arrayReduce( 'sumMap', [a],[b] ) from (select [100,100,200] a,[10,20,30] b, number from numbers(10)); +select number, arrayReduce( 'sum', a) from (select materialize([100,100,200]) a, number from numbers(10)); +select number, arrayReduce( 'max', [a] ) from (select materialize([100,100,200]) a, number from numbers(10)); + +select dumpColumnStructure([a]), arrayReduce('sumMap', [a], [a]) from (select [1, 2] a FROM numbers(2)); diff --git a/parser/testdata/00978_table_function_values_alias/ast.json b/parser/testdata/00978_table_function_values_alias/ast.json new file mode 100644 index 000000000..c382196dd --- /dev/null +++ b/parser/testdata/00978_table_function_values_alias/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Identifier s" + }, + { + "explain": " Identifier z" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function VALUES (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal 'x UInt64, s String, z ALIAS concat(toString(x), \\': \\', s)'" + }, + { + "explain": " Literal Tuple_(UInt64_1, 'hello')" + }, + { + "explain": " Literal Tuple_(UInt64_2, 'world')" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001281842, + "rows_read": 15, + "bytes_read": 622 + } +} diff --git a/parser/testdata/00978_table_function_values_alias/metadata.json b/parser/testdata/00978_table_function_values_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00978_table_function_values_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00978_table_function_values_alias/query.sql b/parser/testdata/00978_table_function_values_alias/query.sql new file mode 100644 index 000000000..93da57ae2 --- /dev/null +++ b/parser/testdata/00978_table_function_values_alias/query.sql @@ -0,0 +1 @@ +SELECT x, s, z FROM VALUES('x UInt64, s String, z ALIAS concat(toString(x), \': \', s)', (1, 'hello'), (2, 'world')); diff --git a/parser/testdata/00979_quantileExcatExclusive_and_Inclusive/ast.json b/parser/testdata/00979_quantileExcatExclusive_and_Inclusive/ast.json new file mode 100644 index 000000000..f91532d58 --- /dev/null +++ b/parser/testdata/00979_quantileExcatExclusive_and_Inclusive/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery num (children 1)" + }, + { + "explain": " Identifier num" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001574046, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/00979_quantileExcatExclusive_and_Inclusive/metadata.json b/parser/testdata/00979_quantileExcatExclusive_and_Inclusive/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00979_quantileExcatExclusive_and_Inclusive/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00979_quantileExcatExclusive_and_Inclusive/query.sql b/parser/testdata/00979_quantileExcatExclusive_and_Inclusive/query.sql new file mode 100644 index 000000000..99cbcfbd6 --- /dev/null +++ b/parser/testdata/00979_quantileExcatExclusive_and_Inclusive/query.sql @@ -0,0 +1,12 @@ +DROP TABLE IF EXISTS num; +CREATE TABLE num AS numbers(1000); + +SELECT quantilesExactExclusive(0.25, 0.5, 0.75, 0.9, 0.95, 0.99, 0.999)(x) FROM (SELECT number AS x FROM num); +SELECT quantilesExactInclusive(0.25, 0.5, 0.75, 0.9, 0.95, 0.99, 0.999)(x) FROM (SELECT number AS x FROM num); +SELECT quantilesExact(0.25, 0.5, 0.75, 0.9, 0.95, 0.99, 0.999)(x) FROM (SELECT number AS x FROM num); + +SELECT quantileExactExclusive(0.6)(x) FROM (SELECT number AS x FROM num); +SELECT quantileExactInclusive(0.6)(x) FROM (SELECT number AS x FROM num); +SELECT quantileExact(0.6)(x) FROM (SELECT number AS x FROM num); + +DROP TABLE num; diff --git a/parser/testdata/00979_set_index_not/ast.json b/parser/testdata/00979_set_index_not/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00979_set_index_not/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00979_set_index_not/metadata.json b/parser/testdata/00979_set_index_not/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00979_set_index_not/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00979_set_index_not/query.sql b/parser/testdata/00979_set_index_not/query.sql new file mode 100644 index 000000000..13a0b4cbb --- /dev/null +++ b/parser/testdata/00979_set_index_not/query.sql @@ -0,0 +1,17 @@ + +DROP TABLE IF EXISTS set_index_not; + +CREATE TABLE set_index_not +( name String, status Enum8('alive' = 0, 'rip' = 1), + INDEX idx_status status TYPE set(2) GRANULARITY 1 +) +ENGINE = MergeTree() ORDER BY name SETTINGS index_granularity = 8192; + +insert into set_index_not values ('Jon','alive'),('Ramsey','rip'); + +select * from set_index_not where status!='rip'; +select * from set_index_not where NOT (status ='rip'); +select * from set_index_not where NOT (status!='rip'); +select * from set_index_not where NOT (NOT (status ='rip')); + +DROP TABLE set_index_not; diff --git a/parser/testdata/00979_toFloat_monotonicity/ast.json b/parser/testdata/00979_toFloat_monotonicity/ast.json new file mode 100644 index 000000000..79504af66 --- /dev/null +++ b/parser/testdata/00979_toFloat_monotonicity/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001251737, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00979_toFloat_monotonicity/metadata.json b/parser/testdata/00979_toFloat_monotonicity/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00979_toFloat_monotonicity/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00979_toFloat_monotonicity/query.sql b/parser/testdata/00979_toFloat_monotonicity/query.sql new file mode 100644 index 000000000..63d577984 --- /dev/null +++ b/parser/testdata/00979_toFloat_monotonicity/query.sql @@ -0,0 +1,26 @@ +SET merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.0; + +DROP TABLE IF EXISTS test1; +DROP TABLE IF EXISTS test2; +DROP TABLE IF EXISTS test3; +CREATE TABLE test1 (n UInt64) ENGINE = MergeTree ORDER BY n SETTINGS index_granularity = 1; +CREATE TABLE test2 (s String) ENGINE = MergeTree ORDER BY s SETTINGS index_granularity = 1; +CREATE TABLE test3 (d Decimal(4, 3)) ENGINE = MergeTree ORDER BY d SETTINGS index_granularity = 1; + +INSERT INTO test1 SELECT * FROM numbers(10000); +-- Set `parallel_replicas_index_analysis_only_on_coordinator = 0` to prevent remote replicas from skipping index analysis in Parallel Replicas. +-- Otherwise, they may return full ranges and trigger max_rows_to_read validation failures. +SELECT n FROM test1 WHERE toFloat64(n) = 7777.0 SETTINGS max_rows_to_read = 2, parallel_replicas_index_analysis_only_on_coordinator = 0; +SELECT n FROM test1 WHERE toFloat32(n) = 7777.0 SETTINGS max_rows_to_read = 2, parallel_replicas_index_analysis_only_on_coordinator = 0; + +INSERT INTO test2 SELECT toString(number) FROM numbers(10000); +SELECT s FROM test2 WHERE toFloat64(s) = 7777.0; +SELECT s FROM test2 WHERE toFloat32(s) = 7777.0; + +INSERT INTO test3 SELECT toDecimal64(number, 3) FROM numbers(10000); +SELECT d FROM test3 WHERE toFloat64(d) = 7777.0 SETTINGS max_rows_to_read = 2, parallel_replicas_index_analysis_only_on_coordinator = 0; +SELECT d FROM test3 WHERE toFloat32(d) = 7777.0 SETTINGS max_rows_to_read = 2, parallel_replicas_index_analysis_only_on_coordinator = 0; + +DROP TABLE test1; +DROP TABLE test2; +DROP TABLE test3; diff --git a/parser/testdata/00979_yandex_consistent_hash_fpe/ast.json b/parser/testdata/00979_yandex_consistent_hash_fpe/ast.json new file mode 100644 index 000000000..099744662 --- /dev/null +++ b/parser/testdata/00979_yandex_consistent_hash_fpe/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function kostikConsistentHash (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Int64_-1" + }, + { + "explain": " Literal UInt64_40000" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001406129, + "rows_read": 8, + "bytes_read": 307 + } +} diff --git a/parser/testdata/00979_yandex_consistent_hash_fpe/metadata.json b/parser/testdata/00979_yandex_consistent_hash_fpe/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00979_yandex_consistent_hash_fpe/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00979_yandex_consistent_hash_fpe/query.sql b/parser/testdata/00979_yandex_consistent_hash_fpe/query.sql new file mode 100644 index 000000000..60b25111b --- /dev/null +++ b/parser/testdata/00979_yandex_consistent_hash_fpe/query.sql @@ -0,0 +1 @@ +SELECT kostikConsistentHash(-1, 40000); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/00980_crash_nullable_decimal/ast.json b/parser/testdata/00980_crash_nullable_decimal/ast.json new file mode 100644 index 000000000..ec5ef64de --- /dev/null +++ b/parser/testdata/00980_crash_nullable_decimal/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayReduce (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'median'" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDecimal32OrNull (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '1'" + }, + { + "explain": " Literal UInt64_2" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001682907, + "rows_read": 13, + "bytes_read": 509 + } +} diff --git a/parser/testdata/00980_crash_nullable_decimal/metadata.json b/parser/testdata/00980_crash_nullable_decimal/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00980_crash_nullable_decimal/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00980_crash_nullable_decimal/query.sql b/parser/testdata/00980_crash_nullable_decimal/query.sql new file mode 100644 index 000000000..f71214232 --- /dev/null +++ b/parser/testdata/00980_crash_nullable_decimal/query.sql @@ -0,0 +1,8 @@ +select arrayReduce('median', [toDecimal32OrNull('1', 2)]); +select arrayReduce('median', [toDecimal64OrNull('1', 2)]); +select arrayReduce('median', [toDecimal128OrZero('1', 2)]); +select arrayReduce('sum', [toDecimal128OrNull('1', 2)]); + +select arrayReduce('median', [toDecimal128OrNull('1', 2)]); +select arrayReduce('quantile(0.2)', [toDecimal128OrNull('1', 2)]); +select arrayReduce('medianExact', [toDecimal128OrNull('1', 2)]); diff --git a/parser/testdata/00980_full_join_crash_fancyqlx/ast.json b/parser/testdata/00980_full_join_crash_fancyqlx/ast.json new file mode 100644 index 000000000..53b6847f6 --- /dev/null +++ b/parser/testdata/00980_full_join_crash_fancyqlx/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_join (children 1)" + }, + { + "explain": " Identifier test_join" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001240839, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/00980_full_join_crash_fancyqlx/metadata.json b/parser/testdata/00980_full_join_crash_fancyqlx/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00980_full_join_crash_fancyqlx/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00980_full_join_crash_fancyqlx/query.sql b/parser/testdata/00980_full_join_crash_fancyqlx/query.sql new file mode 100644 index 000000000..20209f453 --- /dev/null +++ b/parser/testdata/00980_full_join_crash_fancyqlx/query.sql @@ -0,0 +1,15 @@ +drop table if exists test_join; + +create table test_join (date Date, id Int32, name Nullable(String)) engine = MergeTree partition by date order by id; + +insert into test_join values ('2019-01-01', 1, 'a'); +insert into test_join values ('2019-01-01', 2, 'b'); +insert into test_join values ('2019-01-01', 3, 'c'); +insert into test_join values ('2019-01-01', 1, null); + +SELECT id, date, name FROM (SELECT id, date, name FROM test_join GROUP BY id, name, date) js1 +FULL OUTER JOIN (SELECT id, date, name FROM test_join GROUP BY id, name, date) js2 +USING (id, name, date) +ORDER BY id, name; + +drop table test_join; diff --git a/parser/testdata/00980_merge_alter_settings/ast.json b/parser/testdata/00980_merge_alter_settings/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00980_merge_alter_settings/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00980_merge_alter_settings/metadata.json b/parser/testdata/00980_merge_alter_settings/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00980_merge_alter_settings/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00980_merge_alter_settings/query.sql b/parser/testdata/00980_merge_alter_settings/query.sql new file mode 100644 index 000000000..34daa702a --- /dev/null +++ b/parser/testdata/00980_merge_alter_settings/query.sql @@ -0,0 +1,105 @@ +-- Tags: no-replicated-database, log-engine +-- Tag no-replicated-database: Unsupported type of ALTER query + +DROP TABLE IF EXISTS log_for_alter; + +CREATE TABLE log_for_alter ( + id UInt64, + Data String +) ENGINE = Log(); + +ALTER TABLE log_for_alter MODIFY SETTING aaa=123; -- { serverError NOT_IMPLEMENTED } + +DROP TABLE IF EXISTS log_for_alter; + +DROP TABLE IF EXISTS table_for_alter; + +CREATE TABLE table_for_alter ( + id UInt64, + Data String +) ENGINE = MergeTree() ORDER BY id SETTINGS index_granularity=4096, index_granularity_bytes = '10Mi'; + +ALTER TABLE table_for_alter MODIFY SETTING index_granularity=555; -- { serverError READONLY_SETTING } + +SHOW CREATE TABLE table_for_alter; + +ALTER TABLE table_for_alter MODIFY SETTING parts_to_throw_insert = 1, parts_to_delay_insert = 1; + +SHOW CREATE TABLE table_for_alter; + +INSERT INTO table_for_alter VALUES (1, '1'); +INSERT INTO table_for_alter VALUES (2, '2'); -- { serverError TOO_MANY_PARTS } + +DETACH TABLE table_for_alter; + +ATTACH TABLE table_for_alter; + +INSERT INTO table_for_alter VALUES (2, '2'); -- { serverError TOO_MANY_PARTS } + +ALTER TABLE table_for_alter MODIFY SETTING xxx_yyy=124; -- { serverError UNKNOWN_SETTING } + +ALTER TABLE table_for_alter MODIFY SETTING parts_to_throw_insert = 100, parts_to_delay_insert = 100; + +INSERT INTO table_for_alter VALUES (2, '2'); + +SHOW CREATE TABLE table_for_alter; + +SELECT COUNT() FROM table_for_alter; + +ALTER TABLE table_for_alter MODIFY SETTING check_delay_period=10, check_delay_period=20, check_delay_period=30; + +SHOW CREATE TABLE table_for_alter; + +ALTER TABLE table_for_alter ADD COLUMN Data2 UInt64, MODIFY SETTING check_delay_period=5, check_delay_period=10, check_delay_period=15; + +SHOW CREATE TABLE table_for_alter; + +DROP TABLE IF EXISTS table_for_alter; + + +DROP TABLE IF EXISTS table_for_reset_setting; + +CREATE TABLE table_for_reset_setting ( + id UInt64, + Data String +) ENGINE = MergeTree() ORDER BY id SETTINGS index_granularity=4096, index_granularity_bytes = '10Mi'; + +ALTER TABLE table_for_reset_setting MODIFY SETTING index_granularity=555; -- { serverError READONLY_SETTING } + +SHOW CREATE TABLE table_for_reset_setting; + +INSERT INTO table_for_reset_setting VALUES (1, '1'); +INSERT INTO table_for_reset_setting VALUES (2, '2'); + +ALTER TABLE table_for_reset_setting MODIFY SETTING parts_to_throw_insert = 1, parts_to_delay_insert = 1; + +SHOW CREATE TABLE table_for_reset_setting; + +INSERT INTO table_for_reset_setting VALUES (1, '1'); -- { serverError TOO_MANY_PARTS } + +ALTER TABLE table_for_reset_setting RESET SETTING parts_to_delay_insert, parts_to_throw_insert; + +SHOW CREATE TABLE table_for_reset_setting; + +INSERT INTO table_for_reset_setting VALUES (1, '1'); +INSERT INTO table_for_reset_setting VALUES (2, '2'); + +DETACH TABLE table_for_reset_setting; +ATTACH TABLE table_for_reset_setting; + +SHOW CREATE TABLE table_for_reset_setting; + +ALTER TABLE table_for_reset_setting RESET SETTING index_granularity; -- { serverError READONLY_SETTING } + +-- don't execute alter with incorrect setting +ALTER TABLE table_for_reset_setting RESET SETTING merge_with_ttl_timeout, unknown_setting; -- { serverError BAD_ARGUMENTS } + +ALTER TABLE table_for_reset_setting MODIFY SETTING merge_with_ttl_timeout = 300, max_concurrent_queries = 1; + +SHOW CREATE TABLE table_for_reset_setting; + +ALTER TABLE table_for_reset_setting RESET SETTING max_concurrent_queries, merge_with_ttl_timeout; + +SHOW CREATE TABLE table_for_reset_setting; + +DROP TABLE IF EXISTS table_for_reset_setting; diff --git a/parser/testdata/00980_shard_aggregation_state_deserialization/ast.json b/parser/testdata/00980_shard_aggregation_state_deserialization/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00980_shard_aggregation_state_deserialization/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00980_shard_aggregation_state_deserialization/metadata.json b/parser/testdata/00980_shard_aggregation_state_deserialization/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00980_shard_aggregation_state_deserialization/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00980_shard_aggregation_state_deserialization/query.sql b/parser/testdata/00980_shard_aggregation_state_deserialization/query.sql new file mode 100644 index 000000000..786e8c5d7 --- /dev/null +++ b/parser/testdata/00980_shard_aggregation_state_deserialization/query.sql @@ -0,0 +1,10 @@ +-- Tags: shard + +DROP TABLE IF EXISTS numbers500k; +CREATE VIEW numbers500k AS SELECT number FROM system.numbers LIMIT 500000; + +SET max_query_size = 1073741824; + +SELECT count(*) FROM remote('127.0.0.{2,3}', currentDatabase(), numbers500k) WHERE bitmapContains((SELECT groupBitmapState(number) FROM numbers500k), toUInt32(number)); + +DROP TABLE numbers500k; diff --git a/parser/testdata/00980_skip_unused_shards_without_sharding_key/ast.json b/parser/testdata/00980_skip_unused_shards_without_sharding_key/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00980_skip_unused_shards_without_sharding_key/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00980_skip_unused_shards_without_sharding_key/metadata.json b/parser/testdata/00980_skip_unused_shards_without_sharding_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00980_skip_unused_shards_without_sharding_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00980_skip_unused_shards_without_sharding_key/query.sql b/parser/testdata/00980_skip_unused_shards_without_sharding_key/query.sql new file mode 100644 index 000000000..bdabfc7b5 --- /dev/null +++ b/parser/testdata/00980_skip_unused_shards_without_sharding_key/query.sql @@ -0,0 +1,14 @@ +-- Tags: shard + +DROP TABLE IF EXISTS t_local; +DROP TABLE IF EXISTS t_distr; + +CREATE TABLE t_local (a Int) ENGINE = Memory; +CREATE TABLE t_distr (a Int) ENGINE = Distributed(test_shard_localhost, currentDatabase(), 't_local'); + +INSERT INTO t_local VALUES (1), (2); +SET optimize_skip_unused_shards = 1; +SELECT * FROM t_distr WHERE a = 1; + +DROP table t_local; +DROP table t_distr; diff --git a/parser/testdata/00980_zookeeper_merge_tree_alter_settings/ast.json b/parser/testdata/00980_zookeeper_merge_tree_alter_settings/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00980_zookeeper_merge_tree_alter_settings/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00980_zookeeper_merge_tree_alter_settings/metadata.json b/parser/testdata/00980_zookeeper_merge_tree_alter_settings/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00980_zookeeper_merge_tree_alter_settings/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00980_zookeeper_merge_tree_alter_settings/query.sql b/parser/testdata/00980_zookeeper_merge_tree_alter_settings/query.sql new file mode 100644 index 000000000..b9ba70682 --- /dev/null +++ b/parser/testdata/00980_zookeeper_merge_tree_alter_settings/query.sql @@ -0,0 +1,121 @@ +-- Tags: zookeeper, no-replicated-database, no-shared-merge-tree +-- Tag no-replicated-database: Unsupported type of ALTER query +-- Tag no-shared-merge-tree: for smt works + +DROP TABLE IF EXISTS replicated_table_for_alter1; +DROP TABLE IF EXISTS replicated_table_for_alter2; + +SET replication_alter_partitions_sync = 2; + +CREATE TABLE replicated_table_for_alter1 ( + id UInt64, + Data String +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_00980_{database}/replicated_table_for_alter', '1') ORDER BY id; + +CREATE TABLE replicated_table_for_alter2 ( + id UInt64, + Data String +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_00980_{database}/replicated_table_for_alter', '2') ORDER BY id; + +SHOW CREATE TABLE replicated_table_for_alter1; + +ALTER TABLE replicated_table_for_alter1 MODIFY SETTING index_granularity = 4096; -- { serverError READONLY_SETTING } + +SHOW CREATE TABLE replicated_table_for_alter1; + +INSERT INTO replicated_table_for_alter2 VALUES (1, '1'), (2, '2'); + +SYSTEM SYNC REPLICA replicated_table_for_alter1; + +ALTER TABLE replicated_table_for_alter1 MODIFY SETTING use_minimalistic_part_header_in_zookeeper = 1; + +INSERT INTO replicated_table_for_alter1 VALUES (3, '3'), (4, '4'); + +SYSTEM SYNC REPLICA replicated_table_for_alter2; + +SELECT COUNT() FROM replicated_table_for_alter1; +SELECT COUNT() FROM replicated_table_for_alter2; + +DETACH TABLE replicated_table_for_alter2; +ATTACH TABLE replicated_table_for_alter2; + +DETACH TABLE replicated_table_for_alter1; +ATTACH TABLE replicated_table_for_alter1; + +SELECT COUNT() FROM replicated_table_for_alter1; +SELECT COUNT() FROM replicated_table_for_alter2; + +ALTER TABLE replicated_table_for_alter2 MODIFY SETTING parts_to_throw_insert = 1, parts_to_delay_insert = 1; +INSERT INTO replicated_table_for_alter2 VALUES (3, '1'), (4, '2'); -- { serverError TOO_MANY_PARTS } + +INSERT INTO replicated_table_for_alter1 VALUES (5, '5'), (6, '6'); + +SYSTEM SYNC REPLICA replicated_table_for_alter2; + +SELECT COUNT() FROM replicated_table_for_alter1; +SELECT COUNT() FROM replicated_table_for_alter2; + +DETACH TABLE replicated_table_for_alter2; +ATTACH TABLE replicated_table_for_alter2; + +DETACH TABLE replicated_table_for_alter1; +ATTACH TABLE replicated_table_for_alter1; + +SHOW CREATE TABLE replicated_table_for_alter1; +SHOW CREATE TABLE replicated_table_for_alter2; + +ALTER TABLE replicated_table_for_alter1 ADD COLUMN Data2 UInt64, MODIFY SETTING check_delay_period=5, check_delay_period=10, check_delay_period=15; + +SHOW CREATE TABLE replicated_table_for_alter1; +SHOW CREATE TABLE replicated_table_for_alter2; + +DROP TABLE IF EXISTS replicated_table_for_alter2; +DROP TABLE IF EXISTS replicated_table_for_alter1; + +DROP TABLE IF EXISTS replicated_table_for_reset_setting1; +DROP TABLE IF EXISTS replicated_table_for_reset_setting2; + +SET replication_alter_partitions_sync = 2; + +CREATE TABLE replicated_table_for_reset_setting1 ( + id UInt64, + Data String +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_00980_{database}/replicated_table_for_reset_setting', '1') ORDER BY id; + +CREATE TABLE replicated_table_for_reset_setting2 ( + id UInt64, + Data String +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_00980_{database}/replicated_table_for_reset_setting', '2') ORDER BY id; + +SHOW CREATE TABLE replicated_table_for_reset_setting1; +SHOW CREATE TABLE replicated_table_for_reset_setting2; + +ALTER TABLE replicated_table_for_reset_setting1 MODIFY SETTING index_granularity = 4096; -- { serverError READONLY_SETTING } + +SHOW CREATE TABLE replicated_table_for_reset_setting1; + +ALTER TABLE replicated_table_for_reset_setting1 MODIFY SETTING merge_with_ttl_timeout = 100; +ALTER TABLE replicated_table_for_reset_setting2 MODIFY SETTING merge_with_ttl_timeout = 200; + +SHOW CREATE TABLE replicated_table_for_reset_setting1; +SHOW CREATE TABLE replicated_table_for_reset_setting2; + +DETACH TABLE replicated_table_for_reset_setting2; +ATTACH TABLE replicated_table_for_reset_setting2; + +DETACH TABLE replicated_table_for_reset_setting1; +ATTACH TABLE replicated_table_for_reset_setting1; + +SHOW CREATE TABLE replicated_table_for_reset_setting1; +SHOW CREATE TABLE replicated_table_for_reset_setting2; + +-- don't execute alter with incorrect setting +ALTER TABLE replicated_table_for_reset_setting1 RESET SETTING check_delay_period, unknown_setting; -- { serverError BAD_ARGUMENTS } +ALTER TABLE replicated_table_for_reset_setting1 RESET SETTING merge_with_ttl_timeout; +ALTER TABLE replicated_table_for_reset_setting2 RESET SETTING merge_with_ttl_timeout; + +SHOW CREATE TABLE replicated_table_for_reset_setting1; +SHOW CREATE TABLE replicated_table_for_reset_setting2; + +DROP TABLE IF EXISTS replicated_table_for_reset_setting2; +DROP TABLE IF EXISTS replicated_table_for_reset_setting1; diff --git a/parser/testdata/00981_no_virtual_columns/ast.json b/parser/testdata/00981_no_virtual_columns/ast.json new file mode 100644 index 000000000..3b67065f3 --- /dev/null +++ b/parser/testdata/00981_no_virtual_columns/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery merge_a (children 1)" + }, + { + "explain": " Identifier merge_a" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001077256, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/00981_no_virtual_columns/metadata.json b/parser/testdata/00981_no_virtual_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00981_no_virtual_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00981_no_virtual_columns/query.sql b/parser/testdata/00981_no_virtual_columns/query.sql new file mode 100644 index 000000000..b39461545 --- /dev/null +++ b/parser/testdata/00981_no_virtual_columns/query.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS merge_a; +DROP TABLE IF EXISTS merge_b; +DROP TABLE IF EXISTS merge_ab; + +CREATE TABLE merge_a (x UInt8) ENGINE = StripeLog; +CREATE TABLE merge_b (x UInt8) ENGINE = StripeLog; +CREATE TABLE merge_ab AS merge(currentDatabase(), '^merge_[ab]$'); + +SELECT name FROM system.columns WHERE database = currentDatabase() AND table = 'merge_ab'; + +DROP TABLE merge_a; +DROP TABLE merge_b; +DROP TABLE merge_ab; diff --git a/parser/testdata/00981_topK_topKWeighted_long/ast.json b/parser/testdata/00981_topK_topKWeighted_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00981_topK_topKWeighted_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00981_topK_topKWeighted_long/metadata.json b/parser/testdata/00981_topK_topKWeighted_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00981_topK_topKWeighted_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00981_topK_topKWeighted_long/query.sql b/parser/testdata/00981_topK_topKWeighted_long/query.sql new file mode 100644 index 000000000..7ee38867b --- /dev/null +++ b/parser/testdata/00981_topK_topKWeighted_long/query.sql @@ -0,0 +1,13 @@ +-- Tags: long + +DROP TABLE IF EXISTS topk; + +CREATE TABLE topk (val1 String, val2 UInt32) ENGINE = MergeTree ORDER BY val1 SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +INSERT INTO topk WITH number % 7 = 0 AS frequent SELECT toString(frequent ? number % 10 : number), frequent ? 999999999 : number FROM numbers(4000000); + +SELECT arraySort(topK(10)(val1)) FROM topk; +SELECT arraySort(topKWeighted(10)(val1, val2)) FROM topk; +SELECT topKWeighted(10)(toString(number), number) from numbers(3000000); + +DROP TABLE topk; diff --git a/parser/testdata/00982_array_enumerate_uniq_ranked/ast.json b/parser/testdata/00982_array_enumerate_uniq_ranked/ast.json new file mode 100644 index 000000000..ed4e05bb1 --- /dev/null +++ b/parser/testdata/00982_array_enumerate_uniq_ranked/ast.json @@ -0,0 +1,82 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayEnumerateUniqRanked (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function VALUES (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Literal 'x Array(Array(String))'" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Literal Array_[Array_['a'], Array_['a'], Array_['b']]" + }, + { + "explain": " Literal Array_[Array_['a'], Array_['a'], Array_['b']]" + } + ], + + "rows": 20, + + "statistics": + { + "elapsed": 0.001355791, + "rows_read": 20, + "bytes_read": 873 + } +} diff --git a/parser/testdata/00982_array_enumerate_uniq_ranked/metadata.json b/parser/testdata/00982_array_enumerate_uniq_ranked/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00982_array_enumerate_uniq_ranked/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00982_array_enumerate_uniq_ranked/query.sql b/parser/testdata/00982_array_enumerate_uniq_ranked/query.sql new file mode 100644 index 000000000..46bb03e46 --- /dev/null +++ b/parser/testdata/00982_array_enumerate_uniq_ranked/query.sql @@ -0,0 +1 @@ +SELECT arrayEnumerateUniqRanked(x, 2) FROM VALUES('x Array(Array(String))', ([[]]), ([['a'], ['a'], ['b']]), ([['a'], ['a'], ['b']])); diff --git a/parser/testdata/00982_low_cardinality_setting_in_mv/ast.json b/parser/testdata/00982_low_cardinality_setting_in_mv/ast.json new file mode 100644 index 000000000..0312255bd --- /dev/null +++ b/parser/testdata/00982_low_cardinality_setting_in_mv/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test1 (children 1)" + }, + { + "explain": " Identifier test1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001391469, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/00982_low_cardinality_setting_in_mv/metadata.json b/parser/testdata/00982_low_cardinality_setting_in_mv/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00982_low_cardinality_setting_in_mv/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00982_low_cardinality_setting_in_mv/query.sql b/parser/testdata/00982_low_cardinality_setting_in_mv/query.sql new file mode 100644 index 000000000..e545dec90 --- /dev/null +++ b/parser/testdata/00982_low_cardinality_setting_in_mv/query.sql @@ -0,0 +1,11 @@ +DROP TABLE IF EXISTS test1; +DROP TABLE IF EXISTS test2; +DROP TABLE IF EXISTS mat_view; + +CREATE TABLE test1 (a LowCardinality(String)) ENGINE=MergeTree() ORDER BY a; +CREATE TABLE test2 (a UInt64) engine=MergeTree() ORDER BY a; +CREATE MATERIALIZED VIEW test_mv TO test2 AS SELECT toUInt64(a = 'test') AS a FROM test1; + +DROP TABLE test_mv; +DROP TABLE test1; +DROP TABLE test2; diff --git a/parser/testdata/00983_summing_merge_tree_not_an_identifier/ast.json b/parser/testdata/00983_summing_merge_tree_not_an_identifier/ast.json new file mode 100644 index 000000000..83b90ee0b --- /dev/null +++ b/parser/testdata/00983_summing_merge_tree_not_an_identifier/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery xx (children 1)" + }, + { + "explain": " Identifier xx" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000984961, + "rows_read": 2, + "bytes_read": 57 + } +} diff --git a/parser/testdata/00983_summing_merge_tree_not_an_identifier/metadata.json b/parser/testdata/00983_summing_merge_tree_not_an_identifier/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00983_summing_merge_tree_not_an_identifier/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00983_summing_merge_tree_not_an_identifier/query.sql b/parser/testdata/00983_summing_merge_tree_not_an_identifier/query.sql new file mode 100644 index 000000000..091fce9de --- /dev/null +++ b/parser/testdata/00983_summing_merge_tree_not_an_identifier/query.sql @@ -0,0 +1,13 @@ +CREATE TABLE xx +( + `date` Date, + `id` Int64, + `clicks` Int64, + `price` Float64, + `spend` Float64 +) +ENGINE = SummingMergeTree([price, spend]) +PARTITION BY toYYYYMM(date) +ORDER BY id +SAMPLE BY id +SETTINGS index_granularity = 8192; -- { serverError UNEXPECTED_AST_STRUCTURE } diff --git a/parser/testdata/00984_materialized_view_to_columns/ast.json b/parser/testdata/00984_materialized_view_to_columns/ast.json new file mode 100644 index 000000000..a7e00e1ac --- /dev/null +++ b/parser/testdata/00984_materialized_view_to_columns/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test1 (children 1)" + }, + { + "explain": " Identifier test1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001369257, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/00984_materialized_view_to_columns/metadata.json b/parser/testdata/00984_materialized_view_to_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00984_materialized_view_to_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00984_materialized_view_to_columns/query.sql b/parser/testdata/00984_materialized_view_to_columns/query.sql new file mode 100644 index 000000000..948b32fe2 --- /dev/null +++ b/parser/testdata/00984_materialized_view_to_columns/query.sql @@ -0,0 +1,15 @@ +DROP TABLE IF EXISTS test1; +DROP TABLE IF EXISTS test2; +DROP TABLE IF EXISTS mv; + +CREATE TABLE test1 (a UInt8, b String) ENGINE MergeTree ORDER BY a; +CREATE TABLE test2 (c UInt8, d String) ENGINE MergeTree ORDER BY c; +CREATE MATERIALIZED VIEW mv TO test1 (b String, a UInt8) AS SELECT d AS b, c AS a FROM test2; + +INSERT INTO test2 VALUES (1, 'test'); + +SELECT * FROM test1; + +DROP TABLE test1; +DROP TABLE test2; +DROP TABLE mv; diff --git a/parser/testdata/00985_merge_stack_overflow/ast.json b/parser/testdata/00985_merge_stack_overflow/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00985_merge_stack_overflow/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00985_merge_stack_overflow/metadata.json b/parser/testdata/00985_merge_stack_overflow/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00985_merge_stack_overflow/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00985_merge_stack_overflow/query.sql b/parser/testdata/00985_merge_stack_overflow/query.sql new file mode 100644 index 000000000..1f114f471 --- /dev/null +++ b/parser/testdata/00985_merge_stack_overflow/query.sql @@ -0,0 +1,14 @@ +-- Tags: no-parallel +-- ^^^^^^^^^^^ otherwise you may hit TOO_DEEP_RECURSION error during querying system.columns + +DROP TABLE IF EXISTS merge1; +DROP TABLE IF EXISTS merge2; + +CREATE TABLE IF NOT EXISTS merge1 (x UInt64) ENGINE = Merge(currentDatabase(), '^merge\\d$'); +CREATE TABLE IF NOT EXISTS merge2 (x UInt64) ENGINE = Merge(currentDatabase(), '^merge\\d$'); + +SELECT * FROM merge1; -- { serverError TOO_DEEP_RECURSION } +SELECT * FROM merge2; -- { serverError TOO_DEEP_RECURSION } + +DROP TABLE merge1; +DROP TABLE merge2; diff --git a/parser/testdata/00986_materialized_view_stack_overflow/ast.json b/parser/testdata/00986_materialized_view_stack_overflow/ast.json new file mode 100644 index 000000000..d5135ebc8 --- /dev/null +++ b/parser/testdata/00986_materialized_view_stack_overflow/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test1 (children 1)" + }, + { + "explain": " Identifier test1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001288888, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/00986_materialized_view_stack_overflow/metadata.json b/parser/testdata/00986_materialized_view_stack_overflow/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00986_materialized_view_stack_overflow/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00986_materialized_view_stack_overflow/query.sql b/parser/testdata/00986_materialized_view_stack_overflow/query.sql new file mode 100644 index 000000000..bb95ee6ab --- /dev/null +++ b/parser/testdata/00986_materialized_view_stack_overflow/query.sql @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS test1; +DROP TABLE IF EXISTS test2; +DROP TABLE IF EXISTS mv1; +DROP TABLE IF EXISTS mv2; + +CREATE TABLE test1 (a UInt8) ENGINE MergeTree ORDER BY a; +CREATE TABLE test2 (a UInt8) ENGINE MergeTree ORDER BY a; + +CREATE MATERIALIZED VIEW mv1 TO test1 AS SELECT a FROM test2; +CREATE MATERIALIZED VIEW mv2 TO test2 AS SELECT a FROM test1; + +insert into test1 values (1); -- { serverError TOO_DEEP_RECURSION } + +DROP TABLE test1; +DROP TABLE test2; +DROP TABLE mv1; +DROP TABLE mv2; diff --git a/parser/testdata/00987_distributed_stack_overflow/ast.json b/parser/testdata/00987_distributed_stack_overflow/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00987_distributed_stack_overflow/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00987_distributed_stack_overflow/metadata.json b/parser/testdata/00987_distributed_stack_overflow/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00987_distributed_stack_overflow/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00987_distributed_stack_overflow/query.sql b/parser/testdata/00987_distributed_stack_overflow/query.sql new file mode 100644 index 000000000..ba58713fe --- /dev/null +++ b/parser/testdata/00987_distributed_stack_overflow/query.sql @@ -0,0 +1,14 @@ +-- Tags: distributed + +DROP TABLE IF EXISTS distr0; +DROP TABLE IF EXISTS distr1; +DROP TABLE IF EXISTS distr2; + +CREATE TABLE distr (x UInt8) ENGINE = Distributed(test_shard_localhost, currentDatabase(), distr); -- { serverError INFINITE_LOOP } + +CREATE TABLE distr0 (x UInt8) ENGINE = Distributed(test_shard_localhost, '', distr0); -- { serverError INFINITE_LOOP } + +CREATE TABLE distr1 (x UInt8) ENGINE = Distributed(test_shard_localhost, currentDatabase(), distr2); +CREATE TABLE distr2 (x UInt8) ENGINE = Distributed(test_shard_localhost, currentDatabase(), distr1); -- { serverError INFINITE_LOOP } + +DROP TABLE distr1; diff --git a/parser/testdata/00988_constraints_replication_zookeeper_long/ast.json b/parser/testdata/00988_constraints_replication_zookeeper_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00988_constraints_replication_zookeeper_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00988_constraints_replication_zookeeper_long/metadata.json b/parser/testdata/00988_constraints_replication_zookeeper_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00988_constraints_replication_zookeeper_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00988_constraints_replication_zookeeper_long/query.sql b/parser/testdata/00988_constraints_replication_zookeeper_long/query.sql new file mode 100644 index 000000000..0cad1af58 --- /dev/null +++ b/parser/testdata/00988_constraints_replication_zookeeper_long/query.sql @@ -0,0 +1,40 @@ +-- Tags: long, replica + +DROP TABLE IF EXISTS replicated_constraints1; +DROP TABLE IF EXISTS replicated_constraints2; + +CREATE TABLE replicated_constraints1 +( + a UInt32, + b UInt32, + CONSTRAINT a_constraint CHECK a < 10 +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_00988/alter_constraints', 'r1') ORDER BY (a); + +CREATE TABLE replicated_constraints2 +( + a UInt32, + b UInt32, + CONSTRAINT a_constraint CHECK a < 10 +) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_00988/alter_constraints', 'r2') ORDER BY (a); + +INSERT INTO replicated_constraints1 VALUES (1, 2); +INSERT INTO replicated_constraints2 VALUES (3, 4); + +SYSTEM SYNC REPLICA replicated_constraints1; +SYSTEM SYNC REPLICA replicated_constraints2; + +INSERT INTO replicated_constraints1 VALUES (10, 10); -- { serverError VIOLATED_CONSTRAINT } + +ALTER TABLE replicated_constraints1 DROP CONSTRAINT a_constraint SETTINGS alter_sync=2; + +INSERT INTO replicated_constraints1 VALUES (10, 10); +INSERT INTO replicated_constraints2 VALUES (10, 10); + +ALTER TABLE replicated_constraints1 ADD CONSTRAINT b_constraint CHECK b > 10 SETTINGS alter_sync=2; +ALTER TABLE replicated_constraints2 ADD CONSTRAINT a_constraint CHECK a < 10 SETTINGS alter_sync=2; + +INSERT INTO replicated_constraints1 VALUES (10, 11); -- { serverError VIOLATED_CONSTRAINT } +INSERT INTO replicated_constraints2 VALUES (9, 10); -- { serverError VIOLATED_CONSTRAINT } + +DROP TABLE replicated_constraints1; +DROP TABLE replicated_constraints2; diff --git a/parser/testdata/00988_expansion_aliases_limit/ast.json b/parser/testdata/00988_expansion_aliases_limit/ast.json new file mode 100644 index 000000000..1a32f1a11 --- /dev/null +++ b/parser/testdata/00988_expansion_aliases_limit/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001369603, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00988_expansion_aliases_limit/metadata.json b/parser/testdata/00988_expansion_aliases_limit/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00988_expansion_aliases_limit/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00988_expansion_aliases_limit/query.sql b/parser/testdata/00988_expansion_aliases_limit/query.sql new file mode 100644 index 000000000..fce55bb68 --- /dev/null +++ b/parser/testdata/00988_expansion_aliases_limit/query.sql @@ -0,0 +1,2 @@ +SET max_expanded_ast_elements = 10000; +SELECT 1 AS a, a + a AS b, b + b AS c, c + c AS d, d + d AS e, e + e AS f, f + f AS g, g + g AS h, h + h AS i, i + i AS j, j + j AS k, k + k AS l, l + l AS m, m + m AS n, n + n AS o, o + o AS p, p + p AS q, q + q AS r, r + r AS s, s + s AS t, t + t AS u, u + u AS v, v + v AS w, w + w AS x, x + x AS y, y + y AS z; -- { serverError BAD_ARGUMENTS, 168 } diff --git a/parser/testdata/00988_parallel_parts_removal/ast.json b/parser/testdata/00988_parallel_parts_removal/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00988_parallel_parts_removal/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00988_parallel_parts_removal/metadata.json b/parser/testdata/00988_parallel_parts_removal/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00988_parallel_parts_removal/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00988_parallel_parts_removal/query.sql b/parser/testdata/00988_parallel_parts_removal/query.sql new file mode 100644 index 000000000..3e23046d5 --- /dev/null +++ b/parser/testdata/00988_parallel_parts_removal/query.sql @@ -0,0 +1,22 @@ +-- Tags: long, no-object-storage + +DROP TABLE IF EXISTS mt; + +CREATE TABLE mt (x UInt64) ENGINE = MergeTree ORDER BY x + SETTINGS cleanup_delay_period = 1, cleanup_delay_period_random_add = 0, + cleanup_thread_preferred_points_per_iteration=0, old_parts_lifetime = 1, parts_to_delay_insert = 100000, parts_to_throw_insert = 100000; + +SYSTEM STOP MERGES mt; + +SET max_block_size = 1, min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0, max_insert_delayed_streams_for_parallel_write = 1000, max_execution_time = 600; +INSERT INTO mt SELECT * FROM numbers(1000); +SET max_block_size = 65536; + +SELECT count(), sum(x) FROM mt; + +SYSTEM START MERGES mt; +OPTIMIZE TABLE mt FINAL; + +SELECT count(), sum(x) FROM mt; + +DROP TABLE mt; diff --git a/parser/testdata/00989_parallel_parts_loading/ast.json b/parser/testdata/00989_parallel_parts_loading/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00989_parallel_parts_loading/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00989_parallel_parts_loading/metadata.json b/parser/testdata/00989_parallel_parts_loading/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00989_parallel_parts_loading/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00989_parallel_parts_loading/query.sql b/parser/testdata/00989_parallel_parts_loading/query.sql new file mode 100644 index 000000000..fe2ee756d --- /dev/null +++ b/parser/testdata/00989_parallel_parts_loading/query.sql @@ -0,0 +1,21 @@ +-- Tags: no-random-settings, no-random-merge-tree-settings, no-msan, no-tsan, no-asan, no-debug, no-object-storage +-- small number of insert threads can make insert terribly slow, especially with some build like msan +DROP TABLE IF EXISTS mt; + +CREATE TABLE mt (x UInt64) ENGINE = MergeTree ORDER BY x SETTINGS parts_to_delay_insert = 100000, parts_to_throw_insert = 100000; + +SYSTEM STOP MERGES mt; + +SET max_block_size = 1, min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0, max_execution_time = 600; +INSERT INTO mt SELECT * FROM numbers(1000); +SET max_block_size = 65536; + +SELECT count(), sum(x) FROM mt; + +DETACH TABLE mt; +ATTACH TABLE mt; + +SELECT count(), sum(x) FROM mt; + +SYSTEM START MERGES mt; +DROP TABLE mt; diff --git a/parser/testdata/00990_function_current_user/ast.json b/parser/testdata/00990_function_current_user/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00990_function_current_user/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00990_function_current_user/metadata.json b/parser/testdata/00990_function_current_user/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00990_function_current_user/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00990_function_current_user/query.sql b/parser/testdata/00990_function_current_user/query.sql new file mode 100644 index 000000000..729e8b3de --- /dev/null +++ b/parser/testdata/00990_function_current_user/query.sql @@ -0,0 +1,5 @@ +-- Since the actual user name is unknown, have to perform just smoke tests +select currentUser() IS NOT NULL; +select length(currentUser()) > 0; +select currentUser() = user(), currentUser() = USER(), current_user() = currentUser(); +select currentUser() = initial_user from system.processes where query like '%$!@#%' AND current_database = currentDatabase(); diff --git a/parser/testdata/00990_hasToken_and_tokenbf/ast.json b/parser/testdata/00990_hasToken_and_tokenbf/ast.json new file mode 100644 index 000000000..fe294734f --- /dev/null +++ b/parser/testdata/00990_hasToken_and_tokenbf/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery bloom_filter (children 1)" + }, + { + "explain": " Identifier bloom_filter" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001284224, + "rows_read": 2, + "bytes_read": 76 + } +} diff --git a/parser/testdata/00990_hasToken_and_tokenbf/metadata.json b/parser/testdata/00990_hasToken_and_tokenbf/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00990_hasToken_and_tokenbf/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00990_hasToken_and_tokenbf/query.sql b/parser/testdata/00990_hasToken_and_tokenbf/query.sql new file mode 100644 index 000000000..403b61b75 --- /dev/null +++ b/parser/testdata/00990_hasToken_and_tokenbf/query.sql @@ -0,0 +1,67 @@ +DROP TABLE IF EXISTS bloom_filter; + +CREATE TABLE bloom_filter +( + id UInt64, + s String, + INDEX tok_bf (s, lower(s)) TYPE tokenbf_v1(512, 3, 0) GRANULARITY 1 +) ENGINE = MergeTree() ORDER BY id SETTINGS index_granularity = 8, index_granularity_bytes = '10Mi'; + +insert into bloom_filter select number, 'yyy,uuu' from numbers(1024); +insert into bloom_filter select number+2000, 'abc,def,zzz' from numbers(8); +insert into bloom_filter select number+3000, 'yyy,uuu' from numbers(1024); +insert into bloom_filter select number+3000, 'abcdefzzz' from numbers(1024); + +SELECT max(id) FROM bloom_filter WHERE hasToken(s, 'abc,def,zzz'); -- { serverError BAD_ARGUMENTS } +SELECT max(id) FROM bloom_filter WHERE hasTokenCaseInsensitive(s, 'abc,def,zzz'); -- { serverError BAD_ARGUMENTS } + +SELECT max(id) FROM bloom_filter WHERE hasTokenOrNull(s, 'abc,def,zzz'); +SELECT max(id) FROM bloom_filter WHERE hasTokenCaseInsensitiveOrNull(s, 'abc,def,zzz'); + +-- as table "bloom_filter" but w/o index_granularity_bytes +drop table if exists bloom_filter2; +create table bloom_filter2 +( + id UInt64, + s String, + index tok_bf3 (s, lower(s)) type tokenbf_v1(512, 3, 0) GRANULARITY 1 +) engine = MergeTree() order by id settings index_granularity = 8; + +insert into bloom_filter2 select number, 'yyy,uuu' from numbers(1024); +insert into bloom_filter2 select number+2000, 'ABC,def,zzz' from numbers(8); +insert into bloom_filter2 select number+3000, 'yyy,uuu' from numbers(1024); +insert into bloom_filter2 select number+3000, 'abcdefzzz' from numbers(1024); + +set max_rows_to_read = 16; + +SELECT max(id) FROM bloom_filter WHERE hasToken(s, 'abc'); +SELECT max(id) FROM bloom_filter WHERE hasTokenOrNull(s, 'abc'); +SELECT max(id) FROM bloom_filter WHERE hasToken(s, 'ABC'); +select max(id) from bloom_filter where hasTokenCaseInsensitive(s, 'ABC'); +select max(id) from bloom_filter where hasTokenCaseInsensitiveOrNull(s, 'ABC'); +SELECT max(id) FROM bloom_filter WHERE hasToken(s, 'def'); +SELECT max(id) FROM bloom_filter WHERE hasToken(s, 'zzz'); +select max(id) from bloom_filter where hasTokenCaseInsensitive(s, 'zZz'); + +select max(id) from bloom_filter2 where hasToken(s, 'ABC'); +select max(id) from bloom_filter2 where hasToken(s, 'abc'); +select max(id) from bloom_filter2 where hasTokenCaseInsensitive(s, 'abc'); +select max(id) from bloom_filter2 where hasTokenCaseInsensitive(s, 'ABC'); + +-- invert result +-- this does not work as expected, reading more rows that it should +-- SELECT max(id) FROM bloom_filter WHERE NOT hasToken(s, 'yyy'); + +-- accessing to many rows +SELECT max(id) FROM bloom_filter WHERE hasToken(s, 'yyy'); -- { serverError TOO_MANY_ROWS } + +-- this syntax is not supported by tokenbf +SELECT max(id) FROM bloom_filter WHERE hasToken(s, 'zzz') == 1; -- { serverError TOO_MANY_ROWS } + +DROP TABLE bloom_filter; + +-- AST fuzzer crash, issue #54541 +CREATE TABLE tab (row_id UInt32, str String, INDEX idx str TYPE tokenbf_v1(256, 2, 0)) ENGINE = MergeTree ORDER BY row_id; +INSERT INTO tab VALUES (0, 'a'); +SELECT * FROM tab WHERE str == 'else' AND 1.0; +DROP TABLE tab; diff --git a/parser/testdata/00990_metric_log_table_not_empty/ast.json b/parser/testdata/00990_metric_log_table_not_empty/ast.json new file mode 100644 index 000000000..3b2f1f6e9 --- /dev/null +++ b/parser/testdata/00990_metric_log_table_not_empty/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function sleep (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Identifier Null" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001023362, + "rows_read": 8, + "bytes_read": 282 + } +} diff --git a/parser/testdata/00990_metric_log_table_not_empty/metadata.json b/parser/testdata/00990_metric_log_table_not_empty/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00990_metric_log_table_not_empty/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00990_metric_log_table_not_empty/query.sql b/parser/testdata/00990_metric_log_table_not_empty/query.sql new file mode 100644 index 000000000..ed613b761 --- /dev/null +++ b/parser/testdata/00990_metric_log_table_not_empty/query.sql @@ -0,0 +1,5 @@ +select sleep(2) format Null; --More than collect_interval_milliseconds + +system flush logs metric_log; + +select count()>0 from system.metric_log diff --git a/parser/testdata/00990_request_splitting/ast.json b/parser/testdata/00990_request_splitting/ast.json new file mode 100644 index 000000000..36b278fee --- /dev/null +++ b/parser/testdata/00990_request_splitting/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function url (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal 'http:\/\/127.0.0.1:1337\/? HTTP\/1.1\\r\\nTest: test'" + }, + { + "explain": " Identifier CSV" + }, + { + "explain": " Literal 'column1 String'" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001285023, + "rows_read": 13, + "bytes_read": 527 + } +} diff --git a/parser/testdata/00990_request_splitting/metadata.json b/parser/testdata/00990_request_splitting/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00990_request_splitting/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00990_request_splitting/query.sql b/parser/testdata/00990_request_splitting/query.sql new file mode 100644 index 000000000..6a1e3902d --- /dev/null +++ b/parser/testdata/00990_request_splitting/query.sql @@ -0,0 +1 @@ +SELECT * FROM url('http://127.0.0.1:1337/? HTTP/1.1\r\nTest: test', CSV, 'column1 String'); -- { serverError POCO_EXCEPTION } diff --git a/parser/testdata/00994_table_function_numbers_mt/ast.json b/parser/testdata/00994_table_function_numbers_mt/ast.json new file mode 100644 index 000000000..875230069 --- /dev/null +++ b/parser/testdata/00994_table_function_numbers_mt/ast.json @@ -0,0 +1,79 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function min (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function max (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function sum (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers_mt (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10000000" + } + ], + + "rows": 19, + + "statistics": + { + "elapsed": 0.001188996, + "rows_read": 19, + "bytes_read": 735 + } +} diff --git a/parser/testdata/00994_table_function_numbers_mt/metadata.json b/parser/testdata/00994_table_function_numbers_mt/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00994_table_function_numbers_mt/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00994_table_function_numbers_mt/query.sql b/parser/testdata/00994_table_function_numbers_mt/query.sql new file mode 100644 index 000000000..f2bbbb438 --- /dev/null +++ b/parser/testdata/00994_table_function_numbers_mt/query.sql @@ -0,0 +1,3 @@ +SELECT min(number), max(number), sum(number) FROM numbers_mt(10000000); +SELECT min(number), max(number), sum(number) FROM numbers(10000000, 5000000); +SELECT min(number), max(number), sum(number) FROM numbers_mt(10000000, 5000000); diff --git a/parser/testdata/00995_optimize_read_in_order_with_aggregation/ast.json b/parser/testdata/00995_optimize_read_in_order_with_aggregation/ast.json new file mode 100644 index 000000000..c8bc8aeff --- /dev/null +++ b/parser/testdata/00995_optimize_read_in_order_with_aggregation/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001221973, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00995_optimize_read_in_order_with_aggregation/metadata.json b/parser/testdata/00995_optimize_read_in_order_with_aggregation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00995_optimize_read_in_order_with_aggregation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00995_optimize_read_in_order_with_aggregation/query.sql b/parser/testdata/00995_optimize_read_in_order_with_aggregation/query.sql new file mode 100644 index 000000000..56a7a8c87 --- /dev/null +++ b/parser/testdata/00995_optimize_read_in_order_with_aggregation/query.sql @@ -0,0 +1,8 @@ +SET optimize_read_in_order = 1; +DROP TABLE IF EXISTS order_with_aggr; +CREATE TABLE order_with_aggr(a Int) ENGINE = MergeTree ORDER BY a; + +INSERT INTO order_with_aggr SELECT * FROM numbers(100); +SELECT sum(a) as s FROM order_with_aggr ORDER BY s; + +DROP TABLE order_with_aggr; diff --git a/parser/testdata/00995_order_by_with_fill/ast.json b/parser/testdata/00995_order_by_with_fill/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00995_order_by_with_fill/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00995_order_by_with_fill/metadata.json b/parser/testdata/00995_order_by_with_fill/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00995_order_by_with_fill/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00995_order_by_with_fill/query.sql b/parser/testdata/00995_order_by_with_fill/query.sql new file mode 100644 index 000000000..7140ca748 --- /dev/null +++ b/parser/testdata/00995_order_by_with_fill/query.sql @@ -0,0 +1,39 @@ +--{ echoOn } +DROP TABLE IF EXISTS fill; +CREATE TABLE fill (date Date, val Int, str String) ENGINE = Memory; +INSERT INTO fill VALUES (toDate('2019-05-24'), 13, 'sd0')(toDate('2019-05-10'), 16, 'vp7')(toDate('2019-05-25'), 17, '0ei')(toDate('2019-05-30'), 18, '3kd')(toDate('2019-05-15'), 27, 'enb')(toDate('2019-06-04'), 5, '6az')(toDate('2019-05-23'), 15, '01v')(toDate('2019-05-08'), 28, 'otf')(toDate('2019-05-19'), 20, 'yfh')(toDate('2019-05-07'), 26, '2ke')(toDate('2019-05-07'), 18, 'prh')(toDate('2019-05-09'), 25, '798')(toDate('2019-05-10'), 1, 'myj')(toDate('2019-05-11'), 18, '3s2')(toDate('2019-05-23'), 29, '72y'); + +-- *** table without fill to compare *** +SELECT * FROM fill ORDER BY date, val; + +-- Some useful cases + +SELECT * FROM fill ORDER BY date WITH FILL, val; + +SELECT * FROM fill ORDER BY date WITH FILL FROM toDate('2019-05-01') TO toDate('2019-05-31'), val WITH FILL; + +SELECT * FROM fill ORDER BY date DESC WITH FILL, val WITH FILL FROM 1 TO 6; + +-- Some weird cases + +SELECT * FROM fill ORDER BY date DESC WITH FILL TO toDate('2019-05-01') STEP -2, val DESC WITH FILL FROM 10 TO -5 STEP -3; + +SELECT * FROM fill ORDER BY date WITH FILL TO toDate('2019-06-23') STEP 3, val WITH FILL FROM -10 STEP 2; + +DROP TABLE fill; +CREATE TABLE fill (a UInt32, b Int32) ENGINE = Memory; +INSERT INTO fill VALUES (1, -2), (1, 3), (3, 2), (5, -1), (6, 5), (8, 0); + +-- *** table without fill to compare *** +SELECT * FROM fill ORDER BY a, b; + +SELECT * FROM fill ORDER BY a WITH FILL, b WITH fill; + +SELECT * FROM fill ORDER BY a WITH FILL, b WITH fill TO 6 STEP 2; + +SELECT * FROM fill ORDER BY a WITH FILL STEP -1; -- { serverError INVALID_WITH_FILL_EXPRESSION } +SELECT * FROM fill ORDER BY a WITH FILL FROM 10 TO 1; -- { serverError INVALID_WITH_FILL_EXPRESSION } +SELECT * FROM fill ORDER BY a DESC WITH FILL FROM 1 TO 10; -- { serverError INVALID_WITH_FILL_EXPRESSION } +SELECT * FROM fill ORDER BY a WITH FILL FROM -10 to 10; -- { serverError INVALID_WITH_FILL_EXPRESSION } + +DROP TABLE fill; diff --git a/parser/testdata/00996_limit_with_ties/ast.json b/parser/testdata/00996_limit_with_ties/ast.json new file mode 100644 index 000000000..cca59a83e --- /dev/null +++ b/parser/testdata/00996_limit_with_ties/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery ties (children 1)" + }, + { + "explain": " Identifier ties" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001348825, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/00996_limit_with_ties/metadata.json b/parser/testdata/00996_limit_with_ties/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00996_limit_with_ties/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00996_limit_with_ties/query.sql b/parser/testdata/00996_limit_with_ties/query.sql new file mode 100644 index 000000000..f67c8df05 --- /dev/null +++ b/parser/testdata/00996_limit_with_ties/query.sql @@ -0,0 +1,58 @@ +DROP TABLE IF EXISTS ties; +CREATE TABLE ties (a Int) ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO ties VALUES (1), (1), (2), (2), (2), (2) (3), (3); + +SELECT a FROM ties order by a limit 1 with ties; +SELECT '*'; +SELECT a FROM ties order by a limit 3 with ties; +SELECT '*'; +SELECT a FROM ties order by a limit 5 with ties; +SELECT '*'; + +SET max_block_size = 2; +SELECT a FROM ties order by a limit 1, 1 with ties; +SELECT '*'; +SELECT a FROM ties order by a limit 1, 2 with ties; +SELECT '*'; +SELECT a FROM ties order by a limit 2 with ties; +SELECT '*'; +SELECT a FROM ties order by a limit 2, 3 with ties; +SELECT '*'; +SELECT a FROM ties order by a limit 4 with ties; +SELECT '*'; + +SET max_block_size = 3; +SELECT a FROM ties order by a limit 1 with ties; +SELECT '*'; +SELECT a FROM ties order by a limit 2, 3 with ties; +SELECT '*'; +SELECT a FROM ties order by a limit 3, 2 with ties; +SELECT '*'; + +select count() from (select number > 100 from numbers(2000) order by number > 100 limit 1, 7 with ties); --TODO replace "number > 100" with "number > 100 as n" +select count() from (select number, number < 100 from numbers(2000) order by number < 100 desc limit 10 with ties); +SET max_block_size = 5; +select count() from (select number < 100, number from numbers(2000) order by number < 100 desc limit 10 with ties); + +SELECT count() FROM (WITH data AS ( + SELECT * FROM numbers(0, 10) + UNION ALL + SELECT * FROM numbers(10, 10) +) +SELECT number div 10 AS ten, number +FROM data +ORDER BY ten +LIMIT 8,6 WITH TIES); + +SELECT count() FROM (WITH data AS ( + SELECT * FROM numbers(0, 10) + UNION ALL + SELECT * FROM numbers(10, 10) +) +SELECT number div 11 AS eleven, number +FROM data +ORDER BY eleven +LIMIT 8,6 WITH TIES); + +DROP TABLE ties; diff --git a/parser/testdata/00996_neighbor/ast.json b/parser/testdata/00996_neighbor/ast.json new file mode 100644 index 000000000..b5e2c63ee --- /dev/null +++ b/parser/testdata/00996_neighbor/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001287693, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00996_neighbor/metadata.json b/parser/testdata/00996_neighbor/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00996_neighbor/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00996_neighbor/query.sql b/parser/testdata/00996_neighbor/query.sql new file mode 100644 index 000000000..f9cbf69a8 --- /dev/null +++ b/parser/testdata/00996_neighbor/query.sql @@ -0,0 +1,43 @@ +SET allow_deprecated_error_prone_window_functions = 1; +SELECT number, neighbor(toString(number), 0) FROM numbers(10); + +SELECT number, neighbor(toString(number), 5) FROM numbers(10); +SELECT number, neighbor(toString(number), -5) FROM numbers(10); + +SELECT number, neighbor(toString(number), 10) FROM numbers(10); +SELECT number, neighbor(toString(number), -10) FROM numbers(10); + +SELECT number, neighbor(toString(number), 15) FROM numbers(10); +SELECT number, neighbor(toString(number), -15) FROM numbers(10); + +SELECT number, neighbor(toString(number), 5, 'Hello') FROM numbers(10); +SELECT number, neighbor(toString(number), -5, 'World') FROM numbers(10); + +SELECT number, neighbor(toString(number), 5, concat('Hello ', toString(number))) FROM numbers(10); +SELECT number, neighbor(toString(number), -5, concat('World ', toString(number))) FROM numbers(10); + + +SELECT number, neighbor('ClickHouse', 0) FROM numbers(10); + +SELECT number, neighbor('ClickHouse', 5) FROM numbers(10); +SELECT number, neighbor('ClickHouse', -5) FROM numbers(10); + +SELECT number, neighbor('ClickHouse', 10) FROM numbers(10); +SELECT number, neighbor('ClickHouse', -10) FROM numbers(10); + +SELECT number, neighbor('ClickHouse', 15) FROM numbers(10); +SELECT number, neighbor('ClickHouse', -15) FROM numbers(10); + +SELECT number, neighbor('ClickHouse', 5, 'Hello') FROM numbers(10); +SELECT number, neighbor('ClickHouse', -5, 'World') FROM numbers(10); + +SELECT number, neighbor('ClickHouse', 5, concat('Hello ', toString(number))) FROM numbers(10); +SELECT number, neighbor('ClickHouse', -5, concat('World ', toString(number))) FROM numbers(10); + + +SELECT number, neighbor(toString(number), number) FROM numbers(10); +SELECT number, neighbor(toString(number), intDiv(number, 2)) FROM numbers(10); + +SELECT number, neighbor('Hello', number) FROM numbers(10); +SELECT number, neighbor('Hello', -3) FROM numbers(10); +SELECT number, neighbor('Hello', -3, 'World') FROM numbers(10); diff --git a/parser/testdata/00997_extract_all_crash_6627/ast.json b/parser/testdata/00997_extract_all_crash_6627/ast.json new file mode 100644 index 000000000..914e3aea1 --- /dev/null +++ b/parser/testdata/00997_extract_all_crash_6627/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function extractAll (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'Mozilla\/5.0 (Windows NT 10.0; WOW64) AppleWebKit\/537.36 (KHTML, like Gecko) Chrome\/75.0.3770.143 YaBrowser\/19.7.2.455 Yowser\/2.5 Safari\/537.36'" + }, + { + "explain": " Literal '[Y][a-zA-Z]{8}\/[1-9]([1-9]+)?(((.?)([0-9]+)?){0,4})?'" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001443644, + "rows_read": 8, + "bytes_read": 475 + } +} diff --git a/parser/testdata/00997_extract_all_crash_6627/metadata.json b/parser/testdata/00997_extract_all_crash_6627/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00997_extract_all_crash_6627/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00997_extract_all_crash_6627/query.sql b/parser/testdata/00997_extract_all_crash_6627/query.sql new file mode 100644 index 000000000..06de4ec8a --- /dev/null +++ b/parser/testdata/00997_extract_all_crash_6627/query.sql @@ -0,0 +1 @@ +SELECT extractAll('Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.143 YaBrowser/19.7.2.455 Yowser/2.5 Safari/537.36', '[Y][a-zA-Z]{8}/[1-9]([1-9]+)?(((.?)([0-9]+)?){0,4})?'); diff --git a/parser/testdata/00997_set_index_array/ast.json b/parser/testdata/00997_set_index_array/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00997_set_index_array/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00997_set_index_array/metadata.json b/parser/testdata/00997_set_index_array/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00997_set_index_array/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00997_set_index_array/query.sql b/parser/testdata/00997_set_index_array/query.sql new file mode 100644 index 000000000..7b600a05b --- /dev/null +++ b/parser/testdata/00997_set_index_array/query.sql @@ -0,0 +1,29 @@ +-- Tags: no-random-merge-tree-settings + +-- Prevent remote replicas from skipping index analysis in Parallel Replicas. Otherwise, they may return full ranges and trigger max_rows_to_read validation failures. +SET parallel_replicas_index_analysis_only_on_coordinator = 0; + +DROP TABLE IF EXISTS set_array; + +CREATE TABLE set_array +( + primary_key String, + index_array Array(UInt64), + INDEX additional_index_array (index_array) TYPE set(10000) GRANULARITY 1 +) ENGINE = MergeTree() +ORDER BY (primary_key); + +INSERT INTO set_array +select + toString(intDiv(number, 100000)) as primary_key, + array(number) as index_array +from system.numbers +limit 1000000; + +OPTIMIZE TABLE set_array FINAL; + +SET max_rows_to_read = 8192; + +select count() from set_array where has(index_array, 333); + +DROP TABLE set_array; diff --git a/parser/testdata/00997_trim/ast.json b/parser/testdata/00997_trim/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/00997_trim/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/00997_trim/metadata.json b/parser/testdata/00997_trim/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00997_trim/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00997_trim/query.sql b/parser/testdata/00997_trim/query.sql new file mode 100644 index 000000000..7519877ec --- /dev/null +++ b/parser/testdata/00997_trim/query.sql @@ -0,0 +1,20 @@ +WITH + '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' AS x, + replaceRegexpAll(x, '.', ' ') AS spaces, + concat(substring(spaces, 1, rand(1) % 62), substring(x, 1, rand(2) % 62), substring(spaces, 1, rand(3) % 62)) AS s, + trimLeft(s) AS sl, + trimRight(s) AS sr, + trimBoth(s) AS t, + replaceRegexpOne(s, '^ +', '') AS slr, + replaceRegexpOne(s, ' +$', '') AS srr, + replaceRegexpOne(s, '^ *(.*?) *$', '\\1') AS tr +SELECT + replaceAll(s, ' ', '_'), + replaceAll(sl, ' ', '_'), + replaceAll(slr, ' ', '_'), + replaceAll(sr, ' ', '_'), + replaceAll(srr, ' ', '_'), + replaceAll(t, ' ', '_'), + replaceAll(tr, ' ', '_') +FROM numbers(100000) +WHERE NOT ((sl = slr) AND (sr = srr) AND (t = tr)) diff --git a/parser/testdata/00998_constraints_all_tables/ast.json b/parser/testdata/00998_constraints_all_tables/ast.json new file mode 100644 index 000000000..410018693 --- /dev/null +++ b/parser/testdata/00998_constraints_all_tables/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery constrained (children 1)" + }, + { + "explain": " Identifier constrained" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001352695, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/00998_constraints_all_tables/metadata.json b/parser/testdata/00998_constraints_all_tables/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00998_constraints_all_tables/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00998_constraints_all_tables/query.sql b/parser/testdata/00998_constraints_all_tables/query.sql new file mode 100644 index 000000000..0985e9a4e --- /dev/null +++ b/parser/testdata/00998_constraints_all_tables/query.sql @@ -0,0 +1,53 @@ +DROP TABLE IF EXISTS constrained; +CREATE TABLE constrained (URL String, CONSTRAINT is_censor CHECK domainWithoutWWW(URL) = 'censor.net', CONSTRAINT is_utf8 CHECK isValidUTF8(URL)) ENGINE = Null; +INSERT INTO constrained VALUES ('https://www.censor.net/?q=upyachka'), ('Hello'), ('test'); -- { serverError VIOLATED_CONSTRAINT } +INSERT INTO constrained VALUES ('https://www.censor.net/?q=upyachka'), ('ftp://censor.net/Hello'), ('https://censor.net/te\xFFst'); -- { serverError VIOLATED_CONSTRAINT } +INSERT INTO constrained VALUES ('https://www.censor.net/?q=upyachka'), ('ftp://censor.net/Hello'), (toValidUTF8('https://censor.net/te\xFFst')); +DROP TABLE constrained; + +CREATE TABLE constrained (URL String, CONSTRAINT is_censor CHECK domainWithoutWWW(URL) = 'censor.net', CONSTRAINT is_utf8 CHECK isValidUTF8(URL)) ENGINE = Memory; +INSERT INTO constrained VALUES ('https://www.censor.net/?q=upyachka'), ('Hello'), ('test'); -- { serverError VIOLATED_CONSTRAINT } +SELECT count() FROM constrained; +INSERT INTO constrained VALUES ('https://www.censor.net/?q=upyachka'), ('ftp://censor.net/Hello'), ('https://censor.net/te\xFFst'); -- { serverError VIOLATED_CONSTRAINT } +SELECT count() FROM constrained; +INSERT INTO constrained VALUES ('https://www.censor.net/?q=upyachka'), ('ftp://censor.net/Hello'), (toValidUTF8('https://censor.net/te\xFFst')); +SELECT count() FROM constrained; +DROP TABLE constrained; + +CREATE TABLE constrained (URL String, CONSTRAINT is_censor CHECK domainWithoutWWW(URL) = 'censor.net', CONSTRAINT is_utf8 CHECK isValidUTF8(URL)) ENGINE = StripeLog; +INSERT INTO constrained VALUES ('https://www.censor.net/?q=upyachka'), ('Hello'), ('test'); -- { serverError VIOLATED_CONSTRAINT } +SELECT count() FROM constrained; +INSERT INTO constrained VALUES ('https://www.censor.net/?q=upyachka'), ('ftp://censor.net/Hello'), ('https://censor.net/te\xFFst'); -- { serverError VIOLATED_CONSTRAINT } +SELECT count() FROM constrained; +INSERT INTO constrained VALUES ('https://www.censor.net/?q=upyachka'), ('ftp://censor.net/Hello'), (toValidUTF8('https://censor.net/te\xFFst')); +SELECT count() FROM constrained; +DROP TABLE constrained; + +CREATE TABLE constrained (URL String, CONSTRAINT is_censor CHECK domainWithoutWWW(URL) = 'censor.net', CONSTRAINT is_utf8 CHECK isValidUTF8(URL)) ENGINE = TinyLog; +INSERT INTO constrained VALUES ('https://www.censor.net/?q=upyachka'), ('Hello'), ('test'); -- { serverError VIOLATED_CONSTRAINT } +SELECT count() FROM constrained; +INSERT INTO constrained VALUES ('https://www.censor.net/?q=upyachka'), ('ftp://censor.net/Hello'), ('https://censor.net/te\xFFst'); -- { serverError VIOLATED_CONSTRAINT } +SELECT count() FROM constrained; +INSERT INTO constrained VALUES ('https://www.censor.net/?q=upyachka'), ('ftp://censor.net/Hello'), (toValidUTF8('https://censor.net/te\xFFst')); +SELECT count() FROM constrained; +DROP TABLE constrained; + +CREATE TABLE constrained (URL String, CONSTRAINT is_censor CHECK domainWithoutWWW(URL) = 'censor.net', CONSTRAINT is_utf8 CHECK isValidUTF8(URL)) ENGINE = Log; +INSERT INTO constrained VALUES ('https://www.censor.net/?q=upyachka'), ('Hello'), ('test'); -- { serverError VIOLATED_CONSTRAINT } +SELECT count() FROM constrained; +INSERT INTO constrained VALUES ('https://www.censor.net/?q=upyachka'), ('ftp://censor.net/Hello'), ('https://censor.net/te\xFFst'); -- { serverError VIOLATED_CONSTRAINT } +SELECT count() FROM constrained; +INSERT INTO constrained VALUES ('https://www.censor.net/?q=upyachka'), ('ftp://censor.net/Hello'), (toValidUTF8('https://censor.net/te\xFFst')); +SELECT count() FROM constrained; +DROP TABLE constrained; + + +DROP TABLE IF EXISTS constrained2; +CREATE TABLE constrained (URL String, CONSTRAINT is_censor CHECK domainWithoutWWW(URL) = 'censor.net', CONSTRAINT is_utf8 CHECK isValidUTF8(URL)) ENGINE = Log; +CREATE TABLE constrained2 AS constrained; +SHOW CREATE TABLE constrained; +SHOW CREATE TABLE constrained2; +INSERT INTO constrained VALUES ('https://www.censor.net/?q=upyachka'), ('Hello'), ('test'); -- { serverError VIOLATED_CONSTRAINT } +INSERT INTO constrained2 VALUES ('https://www.censor.net/?q=upyachka'), ('Hello'), ('test'); -- { serverError VIOLATED_CONSTRAINT } +DROP TABLE constrained; +DROP TABLE constrained2; diff --git a/parser/testdata/00999_full_join_dup_keys_crash/ast.json b/parser/testdata/00999_full_join_dup_keys_crash/ast.json new file mode 100644 index 000000000..cbaa31448 --- /dev/null +++ b/parser/testdata/00999_full_join_dup_keys_crash/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001220261, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00999_full_join_dup_keys_crash/metadata.json b/parser/testdata/00999_full_join_dup_keys_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00999_full_join_dup_keys_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00999_full_join_dup_keys_crash/query.sql b/parser/testdata/00999_full_join_dup_keys_crash/query.sql new file mode 100644 index 000000000..cd323a3cc --- /dev/null +++ b/parser/testdata/00999_full_join_dup_keys_crash/query.sql @@ -0,0 +1,63 @@ +SET join_use_nulls = 0; + +SELECT * FROM (SELECT 1 AS a, 2 AS b) AS foo FULL JOIN (SELECT 1 AS a, 2 AS b) AS bar ON (foo.a = bar.b) AND (foo.b = bar.b) ORDER BY foo.a, foo.b, bar.a, bar.b; +SELECT '-'; +SELECT * FROM (SELECT 1 AS a, 2 AS b) AS foo RIGHT JOIN (SELECT 1 AS a, 2 AS b) AS bar ON (foo.a = bar.b) AND (foo.b = bar.b) ORDER BY foo.a, foo.b, bar.a, bar.b; +SELECT '-'; + +SELECT * FROM (SELECT 1 AS a, 2 AS b) AS foo FULL JOIN (SELECT 1 AS a, 2 AS b) AS bar ON (foo.b = bar.a) AND (foo.b = bar.b) ORDER BY foo.a, foo.b, bar.a, bar.b; +SELECT '-'; +SELECT * FROM (SELECT 1 AS a, 2 AS b) AS foo RIGHT JOIN (SELECT 1 AS a, 2 AS b) AS bar ON (foo.b = bar.a) AND (foo.b = bar.b) ORDER BY foo.a, foo.b, bar.a, bar.b; +SELECT '-'; + +SELECT foo.a FROM (SELECT 1 AS a, 2 AS b) AS foo FULL JOIN (SELECT 1 AS a, 2 AS b) AS bar ON (foo.a = bar.b) AND (foo.b = bar.b) ORDER BY foo.a; +SELECT '-'; +SELECT foo.a FROM (SELECT 1 AS a, 2 AS b) AS foo RIGHT JOIN (SELECT 1 AS a, 2 AS b) AS bar ON (foo.a = bar.b) AND (foo.b = bar.b) ORDER BY foo.a; +SELECT '-'; + +SELECT foo.a FROM (SELECT 1 AS a, 2 AS b) AS foo FULL JOIN (SELECT 1 AS a, 2 AS b) AS bar ON (foo.b = bar.a) AND (foo.b = bar.b) ORDER BY foo.a; +SELECT '-'; +SELECT foo.a FROM (SELECT 1 AS a, 2 AS b) AS foo RIGHT JOIN (SELECT 1 AS a, 2 AS b) AS bar ON (foo.b = bar.a) AND (foo.b = bar.b) ORDER BY foo.a; +SELECT '-'; + +SELECT bar.a FROM (SELECT 1 AS a, 2 AS b) AS foo FULL JOIN (SELECT 1 AS a, 2 AS b) AS bar ON (foo.a = bar.b) AND (foo.b = bar.b) ORDER BY bar.a; +SELECT '-'; +SELECT bar.a FROM (SELECT 1 AS a, 2 AS b) AS foo RIGHT JOIN (SELECT 1 AS a, 2 AS b) AS bar ON (foo.a = bar.b) AND (foo.b = bar.b) ORDER BY bar.a; +SELECT '-'; + +SELECT bar.a FROM (SELECT 1 AS a, 2 AS b) AS foo FULL JOIN (SELECT 1 AS a, 2 AS b) AS bar ON (foo.b = bar.a) AND (foo.b = bar.b) ORDER BY bar.a; +SELECT '-'; +SELECT bar.a FROM (SELECT 1 AS a, 2 AS b) AS foo RIGHT JOIN (SELECT 1 AS a, 2 AS b) AS bar ON (foo.b = bar.a) AND (foo.b = bar.b) ORDER BY bar.a; +SELECT '-'; + +SET join_use_nulls = 1; + +SELECT * FROM (SELECT 1 AS a, 2 AS b) AS foo FULL JOIN (SELECT 1 AS a, 2 AS b) AS bar ON (foo.a = bar.b) AND (foo.b = bar.b) ORDER BY foo.a, foo.b, bar.a, bar.b; +SELECT '-'; +SELECT * FROM (SELECT 1 AS a, 2 AS b) AS foo RIGHT JOIN (SELECT 1 AS a, 2 AS b) AS bar ON (foo.a = bar.b) AND (foo.b = bar.b) ORDER BY foo.a, foo.b, bar.a, bar.b; +SELECT '-'; + +SELECT * FROM (SELECT 1 AS a, 2 AS b) AS foo FULL JOIN (SELECT 1 AS a, 2 AS b) AS bar ON (foo.b = bar.a) AND (foo.b = bar.b) ORDER BY foo.a, foo.b, bar.a, bar.b; +SELECT '-'; +SELECT * FROM (SELECT 1 AS a, 2 AS b) AS foo RIGHT JOIN (SELECT 1 AS a, 2 AS b) AS bar ON (foo.b = bar.a) AND (foo.b = bar.b) ORDER BY foo.a, foo.b, bar.a, bar.b; +SELECT '-'; + +SELECT foo.a FROM (SELECT 1 AS a, 2 AS b) AS foo FULL JOIN (SELECT 1 AS a, 2 AS b) AS bar ON (foo.a = bar.b) AND (foo.b = bar.b) ORDER BY foo.a; +SELECT '-'; +SELECT foo.a FROM (SELECT 1 AS a, 2 AS b) AS foo RIGHT JOIN (SELECT 1 AS a, 2 AS b) AS bar ON (foo.a = bar.b) AND (foo.b = bar.b) ORDER BY foo.a; +SELECT '-'; + +SELECT foo.a FROM (SELECT 1 AS a, 2 AS b) AS foo FULL JOIN (SELECT 1 AS a, 2 AS b) AS bar ON (foo.b = bar.a) AND (foo.b = bar.b) ORDER BY foo.a; +SELECT '-'; +SELECT foo.a FROM (SELECT 1 AS a, 2 AS b) AS foo RIGHT JOIN (SELECT 1 AS a, 2 AS b) AS bar ON (foo.b = bar.a) AND (foo.b = bar.b) ORDER BY foo.a; +SELECT '-'; + +SELECT bar.a FROM (SELECT 1 AS a, 2 AS b) AS foo FULL JOIN (SELECT 1 AS a, 2 AS b) AS bar ON (foo.a = bar.b) AND (foo.b = bar.b) ORDER BY bar.a; +SELECT '-'; +SELECT bar.a FROM (SELECT 1 AS a, 2 AS b) AS foo RIGHT JOIN (SELECT 1 AS a, 2 AS b) AS bar ON (foo.a = bar.b) AND (foo.b = bar.b) ORDER BY bar.a; +SELECT '-'; + +SELECT bar.a FROM (SELECT 1 AS a, 2 AS b) AS foo FULL JOIN (SELECT 1 AS a, 2 AS b) AS bar ON (foo.b = bar.a) AND (foo.b = bar.b) ORDER BY bar.a; +SELECT '-'; +SELECT bar.a FROM (SELECT 1 AS a, 2 AS b) AS foo RIGHT JOIN (SELECT 1 AS a, 2 AS b) AS bar ON (foo.b = bar.a) AND (foo.b = bar.b) ORDER BY bar.a; +SELECT '-'; diff --git a/parser/testdata/00999_join_not_nullable_types/ast.json b/parser/testdata/00999_join_not_nullable_types/ast.json new file mode 100644 index 000000000..a13df4ecb --- /dev/null +++ b/parser/testdata/00999_join_not_nullable_types/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001200111, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/00999_join_not_nullable_types/metadata.json b/parser/testdata/00999_join_not_nullable_types/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00999_join_not_nullable_types/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00999_join_not_nullable_types/query.sql b/parser/testdata/00999_join_not_nullable_types/query.sql new file mode 100644 index 000000000..523996eca --- /dev/null +++ b/parser/testdata/00999_join_not_nullable_types/query.sql @@ -0,0 +1,34 @@ +SET join_use_nulls = 1; + +SELECT * FROM +( + SELECT number, ['left'] as ar, number AS left_number FROM system.numbers LIMIT 2 +) js1 +FULL JOIN +( + SELECT number, ['right'] as ar, number AS right_number FROM system.numbers LIMIT 1, 2 +) js2 +USING (number) +ORDER BY number; + +SELECT * FROM +( + SELECT ['left'] as ar, number AS left_number FROM system.numbers LIMIT 2 +) js1 +FULL JOIN +( + SELECT ['right'] as ar, number AS right_number FROM system.numbers LIMIT 1, 2 +) js2 +ON left_number = right_number +ORDER BY left_number; + +SELECT * FROM +( + SELECT ['left'] as ar, 42 AS left_number +) js1 +FULL JOIN +( + SELECT ['right'] as ar, 42 AS right_number +) js2 +USING(ar) +ORDER BY left_number; diff --git a/parser/testdata/00999_join_on_expression/ast.json b/parser/testdata/00999_join_on_expression/ast.json new file mode 100644 index 000000000..e94bad06d --- /dev/null +++ b/parser/testdata/00999_join_on_expression/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery X (children 1)" + }, + { + "explain": " Identifier X" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001144492, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/00999_join_on_expression/metadata.json b/parser/testdata/00999_join_on_expression/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00999_join_on_expression/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00999_join_on_expression/query.sql b/parser/testdata/00999_join_on_expression/query.sql new file mode 100644 index 000000000..6e8909b1c --- /dev/null +++ b/parser/testdata/00999_join_on_expression/query.sql @@ -0,0 +1,54 @@ +drop table if exists X; +drop table if exists Y; +create table X (id Int64) Engine = MergeTree ORDER BY tuple(); +create table Y (id Int64) Engine = MergeTree ORDER BY tuple(); + +insert into X (id) values (1); +insert into Y (id) values (2); + +set join_use_nulls = 0; + +select X.id, Y.id from X right join Y on X.id = Y.id order by X.id, Y.id; +select '-'; +select X.id, Y.id from X full join Y on Y.id = X.id order by X.id, Y.id; +select '-'; + +select X.id, Y.id from X right join Y on X.id = (Y.id - 1) order by X.id, Y.id; +select '-'; +select X.id, Y.id from X full join Y on (Y.id - 1) = X.id order by X.id, Y.id; +select '-'; + +select X.id, Y.id from X right join Y on (X.id + 1) = Y.id order by X.id, Y.id; +select '-'; +select X.id, Y.id from X full join Y on Y.id = (X.id + 1) order by X.id, Y.id; +select '-'; + +select X.id, Y.id from X right join Y on (X.id + 1) = (Y.id + 1) order by X.id, Y.id; +select '-'; +select X.id, Y.id from X full join Y on (Y.id + 1) = (X.id + 1) order by X.id, Y.id; +select '----'; + +set join_use_nulls = 1; + +select X.id, Y.id from X right join Y on X.id = Y.id order by X.id, Y.id; +select '-'; +select X.id, Y.id from X full join Y on Y.id = X.id order by X.id, Y.id; +select '-'; + +select X.id, Y.id from X right join Y on X.id = (Y.id - 1) order by X.id, Y.id; +select '-'; +select X.id, Y.id from X full join Y on (Y.id - 1) = X.id order by X.id, Y.id; +select '-'; + +select X.id, Y.id from X right join Y on (X.id + 1) = Y.id order by X.id, Y.id; +select '-'; +select X.id, Y.id from X full join Y on Y.id = (X.id + 1) order by X.id, Y.id; +select '-'; + +select X.id, Y.id from X right join Y on (X.id + 1) = (Y.id + 1) order by X.id, Y.id; +select '-'; +select X.id, Y.id from X full join Y on (Y.id + 1) = (X.id + 1) order by X.id, Y.id; +select '-'; + +drop table X; +drop table Y; diff --git a/parser/testdata/00999_nullable_nested_types_4877/ast.json b/parser/testdata/00999_nullable_nested_types_4877/ast.json new file mode 100644 index 000000000..ac3888c28 --- /dev/null +++ b/parser/testdata/00999_nullable_nested_types_4877/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery l (children 1)" + }, + { + "explain": " Identifier l" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001210449, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/00999_nullable_nested_types_4877/metadata.json b/parser/testdata/00999_nullable_nested_types_4877/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00999_nullable_nested_types_4877/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00999_nullable_nested_types_4877/query.sql b/parser/testdata/00999_nullable_nested_types_4877/query.sql new file mode 100644 index 000000000..cebd1721c --- /dev/null +++ b/parser/testdata/00999_nullable_nested_types_4877/query.sql @@ -0,0 +1,41 @@ +DROP TABLE IF EXISTS l; +DROP TABLE IF EXISTS r; + +CREATE TABLE l (a String, b Tuple(String, String)) ENGINE = MergeTree ORDER BY tuple(); +CREATE TABLE r (a String, c Tuple(String, String)) ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO l (a, b) VALUES ('a', ('b', 'c')), ('d', ('e', 'f')); +INSERT INTO r (a, c) VALUES ('a', ('b', 'c')), ('x', ('y', 'z')); + +SET join_use_nulls = 0; +SELECT * from l LEFT JOIN r USING a ORDER BY a; +SELECT a from l RIGHT JOIN r USING a ORDER BY a; +SELECT * from l RIGHT JOIN r USING a ORDER BY a; + +SET join_use_nulls = 1; +SELECT a from l LEFT JOIN r USING a ORDER BY a; +SELECT a from l RIGHT JOIN r USING a ORDER BY a; +SELECT * from l LEFT JOIN r USING a ORDER BY a; +SELECT * from l RIGHT JOIN r USING a ORDER BY a; + +DROP TABLE l; +DROP TABLE r; + +CREATE TABLE l (a String, b String) ENGINE = MergeTree ORDER BY tuple(); +CREATE TABLE r (a String, c Array(String)) ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO l (a, b) VALUES ('a', 'b'), ('d', 'e'); +INSERT INTO r (a, c) VALUES ('a', ['b', 'c']), ('x', ['y', 'z']); + +SET join_use_nulls = 0; +SELECT * from l LEFT JOIN r USING a ORDER BY a; +SELECT * from l RIGHT JOIN r USING a ORDER BY a; + +SET join_use_nulls = 1; +SELECT a from l LEFT JOIN r USING a ORDER BY a; +SELECT a from l RIGHT JOIN r USING a ORDER BY a; +SELECT * from l LEFT JOIN r USING a ORDER BY a; +SELECT * from l RIGHT JOIN r USING a ORDER BY a; + +DROP TABLE l; +DROP TABLE r; diff --git a/parser/testdata/00999_settings_no_extra_quotes/ast.json b/parser/testdata/00999_settings_no_extra_quotes/ast.json new file mode 100644 index 000000000..a9a892923 --- /dev/null +++ b/parser/testdata/00999_settings_no_extra_quotes/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function like (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier description" + }, + { + "explain": " Literal '\"%\"'" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.settings" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001238797, + "rows_read": 12, + "bytes_read": 470 + } +} diff --git a/parser/testdata/00999_settings_no_extra_quotes/metadata.json b/parser/testdata/00999_settings_no_extra_quotes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00999_settings_no_extra_quotes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00999_settings_no_extra_quotes/query.sql b/parser/testdata/00999_settings_no_extra_quotes/query.sql new file mode 100644 index 000000000..55d9ff278 --- /dev/null +++ b/parser/testdata/00999_settings_no_extra_quotes/query.sql @@ -0,0 +1 @@ +SELECT DISTINCT description LIKE '"%"' FROM system.settings; diff --git a/parser/testdata/00999_test_skip_indices_with_alter_and_merge/ast.json b/parser/testdata/00999_test_skip_indices_with_alter_and_merge/ast.json new file mode 100644 index 000000000..032cb0f09 --- /dev/null +++ b/parser/testdata/00999_test_skip_indices_with_alter_and_merge/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_vertical_merge (children 1)" + }, + { + "explain": " Identifier test_vertical_merge" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001619283, + "rows_read": 2, + "bytes_read": 90 + } +} diff --git a/parser/testdata/00999_test_skip_indices_with_alter_and_merge/metadata.json b/parser/testdata/00999_test_skip_indices_with_alter_and_merge/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/00999_test_skip_indices_with_alter_and_merge/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/00999_test_skip_indices_with_alter_and_merge/query.sql b/parser/testdata/00999_test_skip_indices_with_alter_and_merge/query.sql new file mode 100644 index 000000000..596e0d9cb --- /dev/null +++ b/parser/testdata/00999_test_skip_indices_with_alter_and_merge/query.sql @@ -0,0 +1,21 @@ +DROP TABLE IF EXISTS test_vertical_merge; + +CREATE TABLE test_vertical_merge ( + k UInt64, + val1 UInt64, + val2 UInt64, + INDEX idx1 val1 * val2 TYPE minmax GRANULARITY 1, + INDEX idx2 val1 * k TYPE minmax GRANULARITY 1 +) ENGINE MergeTree() +ORDER BY k +SETTINGS vertical_merge_algorithm_min_rows_to_activate = 1, vertical_merge_algorithm_min_columns_to_activate = 1; + +INSERT INTO test_vertical_merge SELECT number, number + 5, number * 12 from numbers(1000); + +SELECT COUNT() from test_vertical_merge WHERE val2 <= 2400; + +OPTIMIZE TABLE test_vertical_merge FINAL; + +SELECT COUNT() from test_vertical_merge WHERE val2 <= 2400; + +DROP TABLE IF EXISTS test_vertical_merge; diff --git a/parser/testdata/01000_bad_size_of_marks_skip_idx/ast.json b/parser/testdata/01000_bad_size_of_marks_skip_idx/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01000_bad_size_of_marks_skip_idx/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01000_bad_size_of_marks_skip_idx/metadata.json b/parser/testdata/01000_bad_size_of_marks_skip_idx/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01000_bad_size_of_marks_skip_idx/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01000_bad_size_of_marks_skip_idx/query.sql b/parser/testdata/01000_bad_size_of_marks_skip_idx/query.sql new file mode 100644 index 000000000..a6006262b --- /dev/null +++ b/parser/testdata/01000_bad_size_of_marks_skip_idx/query.sql @@ -0,0 +1,27 @@ + +DROP TABLE IF EXISTS bad_skip_idx; + +CREATE TABLE bad_skip_idx +( + id UInt64, + value String +) ENGINE MergeTree() +ORDER BY id SETTINGS index_granularity_bytes = 64, min_index_granularity_bytes = 10, vertical_merge_algorithm_min_rows_to_activate = 0, vertical_merge_algorithm_min_columns_to_activate = 0; -- actually vertical merge is not required condition for this bug, but it's more easy to reproduce (becuse we don't recalc granularities) + +-- 7 rows per granule +INSERT INTO bad_skip_idx SELECT number, concat('x', toString(number)) FROM numbers(1000); + +-- 3 rows per granule +INSERT INTO bad_skip_idx SELECT number, concat('xxxxxxxxxx', toString(number)) FROM numbers(1000,1000); + +SELECT COUNT(*) from bad_skip_idx WHERE value = 'xxxxxxxxxx1015'; -- check no exception + +INSERT INTO bad_skip_idx SELECT number, concat('x', toString(number)) FROM numbers(1000); + +ALTER TABLE bad_skip_idx ADD INDEX idx value TYPE bloom_filter(0.01) GRANULARITY 4; + +OPTIMIZE TABLE bad_skip_idx FINAL; + +SELECT COUNT(*) from bad_skip_idx WHERE value = 'xxxxxxxxxx1015'; -- check no exception + +DROP TABLE IF EXISTS bad_skip_idx; diff --git a/parser/testdata/01000_subquery_requires_alias/ast.json b/parser/testdata/01000_subquery_requires_alias/ast.json new file mode 100644 index 000000000..04ddf9b76 --- /dev/null +++ b/parser/testdata/01000_subquery_requires_alias/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001351679, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01000_subquery_requires_alias/metadata.json b/parser/testdata/01000_subquery_requires_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01000_subquery_requires_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01000_subquery_requires_alias/query.sql b/parser/testdata/01000_subquery_requires_alias/query.sql new file mode 100644 index 000000000..38ba1798d --- /dev/null +++ b/parser/testdata/01000_subquery_requires_alias/query.sql @@ -0,0 +1,20 @@ +SET enable_analyzer = 1; +SET joined_subquery_requires_alias = 1; + +SELECT * FROM (SELECT 1 as A, 2 as B) X +ALL LEFT JOIN (SELECT 3 as A, 2 as B) Y +USING (B); + +SELECT * FROM (SELECT 1 as A, 2 as B) X +ALL LEFT JOIN (SELECT 3 as A, 2 as B) +USING (B); -- { serverError ALIAS_REQUIRED } + +SELECT * FROM (SELECT 1 as A, 2 as B) +ALL LEFT JOIN (SELECT 3 as A, 2 as B) Y +USING (B); -- { serverError ALIAS_REQUIRED } + +set joined_subquery_requires_alias = 0; + +SELECT * FROM (SELECT 1 as A, 2 as B) +ALL LEFT JOIN (SELECT 3 as A, 2 as B) Y +USING (B); diff --git a/parser/testdata/01001_enums_in_in_section/ast.json b/parser/testdata/01001_enums_in_in_section/ast.json new file mode 100644 index 000000000..6c90c2ddd --- /dev/null +++ b/parser/testdata/01001_enums_in_in_section/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery enums (children 1)" + }, + { + "explain": " Identifier enums" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001674102, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/01001_enums_in_in_section/metadata.json b/parser/testdata/01001_enums_in_in_section/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01001_enums_in_in_section/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01001_enums_in_in_section/query.sql b/parser/testdata/01001_enums_in_in_section/query.sql new file mode 100644 index 000000000..d9932421f --- /dev/null +++ b/parser/testdata/01001_enums_in_in_section/query.sql @@ -0,0 +1,5 @@ +DROP TABLE IF EXISTS enums; +CREATE TABLE enums AS VALUES('x Enum8(\'hello\' = 0, \'world\' = 1, \'foo\' = -1), y String', ('hello', 'find me'), (0, 'and me'), (-1, 'also me'), ('world', 'don\'t find me')); +SELECT y FROM enums WHERE x IN (0, -1); +SELECT y FROM enums WHERE x IN ('hello', -1); +DROP TABLE enums; diff --git a/parser/testdata/01006_ttl_with_default_2/ast.json b/parser/testdata/01006_ttl_with_default_2/ast.json new file mode 100644 index 000000000..673df1019 --- /dev/null +++ b/parser/testdata/01006_ttl_with_default_2/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery ttl_with_default (children 1)" + }, + { + "explain": " Identifier ttl_with_default" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001244135, + "rows_read": 2, + "bytes_read": 84 + } +} diff --git a/parser/testdata/01006_ttl_with_default_2/metadata.json b/parser/testdata/01006_ttl_with_default_2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01006_ttl_with_default_2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01006_ttl_with_default_2/query.sql b/parser/testdata/01006_ttl_with_default_2/query.sql new file mode 100644 index 000000000..cbc226352 --- /dev/null +++ b/parser/testdata/01006_ttl_with_default_2/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS ttl_with_default; + +CREATE TABLE ttl_with_default (d DateTime, a Int default 777 ttl d + interval 5 SECOND) ENGINE = MergeTree ORDER BY d; +INSERT INTO ttl_with_default VALUES (now() - 1000, 1) (now() - 1000, 2) (now() + 1000, 3)(now() + 1000, 4); +SELECT sleep(0.7) FORMAT Null; -- wait if very fast merge happen +OPTIMIZE TABLE ttl_with_default FINAL; + +-- check that after second merge there are still user defaults in column +SELECT sleep(0.7) FORMAT Null; +OPTIMIZE TABLE ttl_with_default FINAL; + +SELECT a FROM ttl_with_default ORDER BY a; + +DROP TABLE ttl_with_default; diff --git a/parser/testdata/01008_materialized_view_henyihanwobushi/ast.json b/parser/testdata/01008_materialized_view_henyihanwobushi/ast.json new file mode 100644 index 000000000..70f9b28df --- /dev/null +++ b/parser/testdata/01008_materialized_view_henyihanwobushi/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery foo (children 1)" + }, + { + "explain": " Identifier foo" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.0014109, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/01008_materialized_view_henyihanwobushi/metadata.json b/parser/testdata/01008_materialized_view_henyihanwobushi/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01008_materialized_view_henyihanwobushi/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01008_materialized_view_henyihanwobushi/query.sql b/parser/testdata/01008_materialized_view_henyihanwobushi/query.sql new file mode 100644 index 000000000..1e91f6daf --- /dev/null +++ b/parser/testdata/01008_materialized_view_henyihanwobushi/query.sql @@ -0,0 +1,15 @@ +DROP TABLE IF EXISTS foo; +DROP TABLE IF EXISTS bar; +DROP TABLE IF EXISTS view_foo_bar; + +set allow_deprecated_syntax_for_merge_tree=1; +create table foo (ddate Date, id Int64, n String) ENGINE = ReplacingMergeTree(ddate, (id), 8192); +create table bar (ddate Date, id Int64, n String, foo_id Int64) ENGINE = ReplacingMergeTree(ddate, (id), 8192); +insert into bar (id, n, foo_id) values (1, 'bar_n_1', 1); +create MATERIALIZED view view_foo_bar ENGINE = ReplacingMergeTree(ddate, (bar_id), 8192) as select ddate, bar_id, bar_n, foo_id, foo_n from (select ddate, id as bar_id, n as bar_n, foo_id from bar) js1 any left join (select id as foo_id, n as foo_n from foo) js2 using foo_id; +insert into bar (id, n, foo_id) values (1, 'bar_n_1', 1); +SELECT * FROM view_foo_bar; + +DROP TABLE foo; +DROP TABLE bar; +DROP TABLE view_foo_bar; diff --git a/parser/testdata/01009_global_array_join_names/ast.json b/parser/testdata/01009_global_array_join_names/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01009_global_array_join_names/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01009_global_array_join_names/metadata.json b/parser/testdata/01009_global_array_join_names/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01009_global_array_join_names/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01009_global_array_join_names/query.sql b/parser/testdata/01009_global_array_join_names/query.sql new file mode 100644 index 000000000..26e7c68ed --- /dev/null +++ b/parser/testdata/01009_global_array_join_names/query.sql @@ -0,0 +1,21 @@ +-- Tags: global + +DROP TABLE IF EXISTS test1; +DROP TABLE IF EXISTS test2; + +CREATE TABLE test1 (a UInt8, b Array(DateTime)) ENGINE Memory; +CREATE TABLE test2 as test1 ENGINE Distributed(test_shard_localhost, currentDatabase(), test1); + +INSERT INTO test1 VALUES (1, [1, 2, 3]); + +SELECT 1 +FROM test2 AS test2 +ARRAY JOIN arrayFilter(t -> (t GLOBAL IN + ( + SELECT DISTINCT now() AS `ym:a` + WHERE 1 + )), test2.b) AS test2_b +WHERE 1; + +DROP TABLE test1; +DROP TABLE test2; diff --git a/parser/testdata/01009_insert_select_data_loss/ast.json b/parser/testdata/01009_insert_select_data_loss/ast.json new file mode 100644 index 000000000..beb1eafff --- /dev/null +++ b/parser/testdata/01009_insert_select_data_loss/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tab (children 1)" + }, + { + "explain": " Identifier tab" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001002066, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/01009_insert_select_data_loss/metadata.json b/parser/testdata/01009_insert_select_data_loss/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01009_insert_select_data_loss/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01009_insert_select_data_loss/query.sql b/parser/testdata/01009_insert_select_data_loss/query.sql new file mode 100644 index 000000000..cbf87c4d0 --- /dev/null +++ b/parser/testdata/01009_insert_select_data_loss/query.sql @@ -0,0 +1,9 @@ +drop table if exists tab; +create table tab (x UInt64) engine = MergeTree order by tuple(); + +insert into tab select n from (SELECT number AS n FROM numbers(20)) nums +semi left join (select number * 10 as n from numbers(2)) js2 using(n) +settings max_block_size = 5; +select * from tab order by x; + +drop table tab; diff --git a/parser/testdata/01009_insert_select_nicelulu/ast.json b/parser/testdata/01009_insert_select_nicelulu/ast.json new file mode 100644 index 000000000..c63668006 --- /dev/null +++ b/parser/testdata/01009_insert_select_nicelulu/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_insert_t1 (children 1)" + }, + { + "explain": " Identifier test_insert_t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001343014, + "rows_read": 2, + "bytes_read": 80 + } +} diff --git a/parser/testdata/01009_insert_select_nicelulu/metadata.json b/parser/testdata/01009_insert_select_nicelulu/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01009_insert_select_nicelulu/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01009_insert_select_nicelulu/query.sql b/parser/testdata/01009_insert_select_nicelulu/query.sql new file mode 100644 index 000000000..3fe7ec04e --- /dev/null +++ b/parser/testdata/01009_insert_select_nicelulu/query.sql @@ -0,0 +1,23 @@ +DROP TABLE IF EXISTS test_insert_t1; +DROP TABLE IF EXISTS test_insert_t2; +DROP TABLE IF EXISTS test_insert_t3; + +CREATE TABLE test_insert_t1 (`dt` Date, `uid` String, `name` String, `city` String) ENGINE = MergeTree PARTITION BY toYYYYMMDD(dt) ORDER BY name SETTINGS index_granularity = 8192; +CREATE TABLE test_insert_t2 (`dt` Date, `uid` String) ENGINE = MergeTree PARTITION BY toYYYYMMDD(dt) ORDER BY uid SETTINGS index_granularity = 8192; +CREATE TABLE test_insert_t3 (`dt` Date, `uid` String, `name` String, `city` String) ENGINE = MergeTree PARTITION BY toYYYYMMDD(dt) ORDER BY name SETTINGS index_granularity = 8192; + +INSERT INTO test_insert_t1 SELECT '2019-09-01',toString(number),toString(rand()),toString(rand()) FROM system.numbers WHERE number > 10 limit 1000000; +INSERT INTO test_insert_t2 SELECT '2019-09-01',toString(number) FROM system.numbers WHERE number >=0 limit 200; +INSERT INTO test_insert_t2 SELECT '2019-09-01',toString(number) FROM system.numbers WHERE number >=100000 limit 200; +INSERT INTO test_insert_t2 SELECT '2019-09-01',toString(number) FROM system.numbers WHERE number >=300000 limit 200; +INSERT INTO test_insert_t2 SELECT '2019-09-01',toString(number) FROM system.numbers WHERE number >=500000 limit 200; +INSERT INTO test_insert_t2 SELECT '2019-09-01',toString(number) FROM system.numbers WHERE number >=700000 limit 200; +INSERT INTO test_insert_t2 SELECT '2019-09-01',toString(number) FROM system.numbers WHERE number >=900000 limit 200; + +INSERT INTO test_insert_t3 SELECT '2019-09-01', uid, name, city FROM ( SELECT dt, uid, name, city FROM test_insert_t1 WHERE dt = '2019-09-01') t1 GLOBAL SEMI LEFT JOIN (SELECT uid FROM test_insert_t2 WHERE dt = '2019-09-01') t2 ON t1.uid=t2.uid; + +SELECT count(*) FROM test_insert_t3; + +DROP TABLE test_insert_t1; +DROP TABLE test_insert_t2; +DROP TABLE test_insert_t3; diff --git a/parser/testdata/01010_partial_merge_join/ast.json b/parser/testdata/01010_partial_merge_join/ast.json new file mode 100644 index 000000000..67b57af0d --- /dev/null +++ b/parser/testdata/01010_partial_merge_join/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t0 (children 1)" + }, + { + "explain": " Identifier t0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001311402, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/01010_partial_merge_join/metadata.json b/parser/testdata/01010_partial_merge_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01010_partial_merge_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01010_partial_merge_join/query.sql b/parser/testdata/01010_partial_merge_join/query.sql new file mode 100644 index 000000000..a978437bc --- /dev/null +++ b/parser/testdata/01010_partial_merge_join/query.sql @@ -0,0 +1,164 @@ +DROP TABLE IF EXISTS t0; +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE TABLE t0 (x UInt32, y UInt64) engine = MergeTree ORDER BY (x,y); +CREATE TABLE t1 (x UInt32, y UInt64) engine = MergeTree ORDER BY (x,y); +CREATE TABLE t2 (x UInt32, y UInt64) engine = MergeTree ORDER BY (x,y); + +INSERT INTO t1 (x, y) VALUES (0, 0); + +SET join_algorithm = 'prefer_partial_merge'; +SET any_join_distinct_right_table_keys = 1; + +SELECT 't join none using'; +SELECT * FROM t1 ANY LEFT JOIN t0 USING (x) ORDER BY x; +SELECT '-'; +SELECT * FROM t1 LEFT JOIN t0 USING (x) ORDER BY x; +SELECT '-'; +SELECT * FROM t1 ANY INNER JOIN t0 USING (x) ORDER BY x; +SELECT '-'; +SELECT * FROM t1 INNER JOIN t0 USING (x) ORDER BY x; +SELECT 't join none on'; +SELECT * FROM t1 ANY LEFT JOIN t0 ON t1.x = t0.x ORDER BY x; +SELECT '-'; +SELECT * FROM t1 LEFT JOIN t0 ON t1.x = t0.x ORDER BY x; +SELECT '-'; +SELECT * FROM t1 ANY INNER JOIN t0 ON t1.x = t0.x ORDER BY x; +SELECT '-'; +SELECT * FROM t1 INNER JOIN t0 ON t1.x = t0.x ORDER BY x; +SELECT 'none join t using'; +SELECT * FROM t0 ANY LEFT JOIN t1 USING (x); +SELECT * FROM t0 LEFT JOIN t1 USING (x); +SELECT * FROM t0 ANY INNER JOIN t1 USING (x); +SELECT * FROM t0 INNER JOIN t1 USING (x); +SELECT 'none join t on'; +SELECT * FROM t0 ANY LEFT JOIN t1 ON t1.x = t0.x; +SELECT * FROM t0 LEFT JOIN t1 ON t1.x = t0.x; +SELECT * FROM t0 ANY INNER JOIN t1 ON t1.x = t0.x; +SELECT * FROM t0 INNER JOIN t1 ON t1.x = t0.x; +SELECT '/none'; + +SET join_use_nulls = 1; + +SELECT 't join none using'; +SELECT * FROM t1 ANY LEFT JOIN t0 USING (x) ORDER BY x; +SELECT '-'; +SELECT * FROM t1 LEFT JOIN t0 USING (x) ORDER BY x; +SELECT '-'; +SELECT * FROM t1 ANY INNER JOIN t0 USING (x) ORDER BY x; +SELECT '-'; +SELECT * FROM t1 INNER JOIN t0 USING (x) ORDER BY x; +SELECT 't join none on'; +SELECT * FROM t1 ANY LEFT JOIN t0 ON t1.x = t0.x ORDER BY x; +SELECT '-'; +SELECT * FROM t1 LEFT JOIN t0 ON t1.x = t0.x ORDER BY x; +SELECT '-'; +SELECT * FROM t1 ANY INNER JOIN t0 ON t1.x = t0.x ORDER BY x; +SELECT '-'; +SELECT * FROM t1 INNER JOIN t0 ON t1.x = t0.x ORDER BY x; +SELECT 'none join t using'; +SELECT * FROM t0 ANY LEFT JOIN t1 USING (x); +SELECT * FROM t0 LEFT JOIN t1 USING (x); +SELECT * FROM t0 ANY INNER JOIN t1 USING (x); +SELECT * FROM t0 INNER JOIN t1 USING (x); +SELECT 'none join t on'; +SELECT * FROM t0 ANY LEFT JOIN t1 ON t1.x = t0.x; +SELECT * FROM t0 LEFT JOIN t1 ON t1.x = t0.x; +SELECT * FROM t0 ANY INNER JOIN t1 ON t1.x = t0.x; +SELECT * FROM t0 INNER JOIN t1 ON t1.x = t0.x; +SELECT '/none'; + +INSERT INTO t1 (x, y) VALUES (1, 10) (2, 20); +INSERT INTO t1 (x, y) VALUES (4, 40) (3, 30); + +INSERT INTO t2 (x, y) VALUES (4, 41) (2, 21) (2, 22); +INSERT INTO t2 (x, y) VALUES (0, 0) (5, 50) (4, 42); + +SET join_use_nulls = 0; + +SELECT 'any left'; +SELECT t1.*, t2.x FROM t1 ANY LEFT JOIN t2 USING (x) ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.x FROM t1 ANY LEFT JOIN t2 USING (x,y) ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.x FROM t1 ANY LEFT JOIN t2 USING (x) ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.x FROM t1 ANY LEFT JOIN t2 USING (x,y) ORDER BY x; + +SELECT 'all left'; +SELECT t1.*, t2.* FROM t1 LEFT JOIN t2 ON t1.x = t2.x ORDER BY x, t2.y; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 LEFT JOIN t2 ON t1.y = t2.y ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 LEFT JOIN t2 ON t1.x = t2.x AND t1.y = t2.y ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 LEFT JOIN t2 ON t1.x = t2.x AND toUInt32(intDiv(t1.y,10)) = t2.x ORDER BY x, t2.y; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 LEFT JOIN t2 ON t1.x = t2.x AND toUInt64(t1.x) = intDiv(t2.y,10) ORDER BY x, t2.y; + +SELECT 'any inner'; +SELECT t1.*, t2.x FROM t1 ANY INNER JOIN t2 USING (x) ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.x FROM t1 ANY INNER JOIN t2 USING (x,y) ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.x FROM t1 ANY INNER JOIN t2 USING (x) ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.x FROM t1 ANY INNER JOIN t2 USING (x,y) ORDER BY x; + +SELECT 'all inner'; +SELECT t1.*, t2.* FROM t1 INNER JOIN t2 ON t1.x = t2.x ORDER BY x, t2.y; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 INNER JOIN t2 ON t1.y = t2.y ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 INNER JOIN t2 ON t1.x = t2.x AND t1.y = t2.y ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 INNER JOIN t2 ON t1.x = t2.x AND toUInt32(intDiv(t1.y,10)) = t2.x ORDER BY x, t2.y; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 INNER JOIN t2 ON t1.x = t2.x AND toUInt64(t1.x) = intDiv(t2.y,10) ORDER BY x, t2.y; + +SET join_use_nulls = 1; + +SELECT 'any left'; +SELECT t1.*, t2.x FROM t1 ANY LEFT JOIN t2 USING (x) ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.x FROM t1 ANY LEFT JOIN t2 USING (x,y) ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.x FROM t1 ANY LEFT JOIN t2 USING (x) ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.x FROM t1 ANY LEFT JOIN t2 USING (x,y) ORDER BY x; + +SELECT 'all left'; +SELECT t1.*, t2.* FROM t1 LEFT JOIN t2 ON t1.x = t2.x ORDER BY x, t2.y; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 LEFT JOIN t2 ON t1.y = t2.y ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 LEFT JOIN t2 ON t1.x = t2.x AND t1.y = t2.y ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 LEFT JOIN t2 ON t1.x = t2.x AND toUInt32(intDiv(t1.y,10)) = t2.x ORDER BY x, t2.y; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 LEFT JOIN t2 ON t1.x = t2.x AND toUInt64(t1.x) = intDiv(t2.y,10) ORDER BY x, t2.y; + +SELECT 'any inner'; +SELECT t1.*, t2.x FROM t1 ANY INNER JOIN t2 USING (x) ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.x FROM t1 ANY INNER JOIN t2 USING (x,y) ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.x FROM t1 ANY INNER JOIN t2 USING (x) ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.x FROM t1 ANY INNER JOIN t2 USING (x,y) ORDER BY x; + +SELECT 'all inner'; +SELECT t1.*, t2.* FROM t1 INNER JOIN t2 ON t1.x = t2.x ORDER BY x, t2.y; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 INNER JOIN t2 ON t1.y = t2.y ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 INNER JOIN t2 ON t1.x = t2.x AND t1.y = t2.y ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 INNER JOIN t2 ON t1.x = t2.x AND toUInt32(intDiv(t1.y,10)) = t2.x ORDER BY x, t2.y; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 INNER JOIN t2 ON t1.x = t2.x AND toUInt64(t1.x) = intDiv(t2.y,10) ORDER BY x, t2.y; + +DROP TABLE t0; +DROP TABLE t1; +DROP TABLE t2; diff --git a/parser/testdata/01010_partial_merge_join_const_and_lc/ast.json b/parser/testdata/01010_partial_merge_join_const_and_lc/ast.json new file mode 100644 index 000000000..77b85e35e --- /dev/null +++ b/parser/testdata/01010_partial_merge_join_const_and_lc/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001530699, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01010_partial_merge_join_const_and_lc/metadata.json b/parser/testdata/01010_partial_merge_join_const_and_lc/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01010_partial_merge_join_const_and_lc/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01010_partial_merge_join_const_and_lc/query.sql b/parser/testdata/01010_partial_merge_join_const_and_lc/query.sql new file mode 100644 index 000000000..515598971 --- /dev/null +++ b/parser/testdata/01010_partial_merge_join_const_and_lc/query.sql @@ -0,0 +1,23 @@ +SET join_algorithm = 'partial_merge'; + +select s1.x, s2.x from (select 1 as x) s1 left join (select 1 as x) s2 using x; +select * from (select materialize(2) as x) s1 left join (select 2 as x) s2 using x; +select * from (select 3 as x) s1 left join (select materialize(3) as x) s2 using x; +select * from (select toLowCardinality(4) as x) s1 left join (select 4 as x) s2 using x; +select * from (select 5 as x) s1 left join (select toLowCardinality(5) as x) s2 using x; + +SET join_algorithm = 'full_sorting_merge'; + +select s1.x, s2.x from (select 1 as x) s1 left join (select 1 as x) s2 using x; +select * from (select materialize(2) as x) s1 left join (select 2 as x) s2 using x; +select * from (select 3 as x) s1 left join (select materialize(3) as x) s2 using x; +select * from (select toLowCardinality(4) as x) s1 left join (select 4 as x) s2 using x; +select * from (select 5 as x) s1 left join (select toLowCardinality(5) as x) s2 using x; + +SET join_algorithm = 'grace_hash'; + +select s1.x, s2.x from (select 1 as x) s1 left join (select 1 as x) s2 using x; +select * from (select materialize(2) as x) s1 left join (select 2 as x) s2 using x; +select * from (select 3 as x) s1 left join (select materialize(3) as x) s2 using x; +select * from (select toLowCardinality(4) as x) s1 left join (select 4 as x) s2 using x; +select * from (select 5 as x) s1 left join (select toLowCardinality(5) as x) s2 using x; diff --git a/parser/testdata/01010_partial_merge_join_negative/ast.json b/parser/testdata/01010_partial_merge_join_negative/ast.json new file mode 100644 index 000000000..f35dfd698 --- /dev/null +++ b/parser/testdata/01010_partial_merge_join_negative/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t0 (children 1)" + }, + { + "explain": " Identifier t0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000951379, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/01010_partial_merge_join_negative/metadata.json b/parser/testdata/01010_partial_merge_join_negative/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01010_partial_merge_join_negative/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01010_partial_merge_join_negative/query.sql b/parser/testdata/01010_partial_merge_join_negative/query.sql new file mode 100644 index 000000000..757e1e929 --- /dev/null +++ b/parser/testdata/01010_partial_merge_join_negative/query.sql @@ -0,0 +1,55 @@ +DROP TABLE IF EXISTS t0; +DROP TABLE IF EXISTS t1; + +CREATE TABLE t0 (x UInt32, y UInt64) engine = MergeTree ORDER BY (x,y); +CREATE TABLE t1 (x UInt32, y UInt64) engine = MergeTree ORDER BY (x,y); + +SET join_algorithm = 'partial_merge'; + +SELECT 'all'; + +SELECT * FROM t0 LEFT JOIN t1 ON t1.x = t0.x; +SELECT * FROM t0 INNER JOIN t1 ON t1.x = t0.x; +SELECT * FROM t0 RIGHT JOIN t1 ON t1.x = t0.x; +SELECT * FROM t0 FULL JOIN t1 ON t1.x = t0.x; + +SELECT * FROM t0 LEFT JOIN t1 USING x; +SELECT * FROM t0 INNER JOIN t1 USING x; +SELECT * FROM t0 RIGHT JOIN t1 USING x; +SELECT * FROM t0 FULL JOIN t1 USING x; + +SELECT 'any'; + +SELECT * FROM t0 ANY LEFT JOIN t1 ON t1.x = t0.x; +SELECT * FROM t0 ANY INNER JOIN t1 ON t1.x = t0.x; +SELECT * FROM t0 ANY RIGHT JOIN t1 ON t1.x = t0.x; -- { serverError NOT_IMPLEMENTED } +SELECT * FROM t0 ANY FULL JOIN t1 ON t1.x = t0.x; -- { serverError NOT_IMPLEMENTED } + +SELECT * FROM t0 ANY LEFT JOIN t1 USING (x); +SELECT * FROM t0 ANY INNER JOIN t1 USING (x); +SELECT * FROM t0 ANY RIGHT JOIN t1 USING (x); -- { serverError NOT_IMPLEMENTED } +SELECT * FROM t0 ANY FULL JOIN t1 USING (x); -- { serverError NOT_IMPLEMENTED } + +SELECT 'semi'; + +SELECT * FROM t0 SEMI LEFT JOIN t1 ON t1.x = t0.x; +SELECT * FROM t0 SEMI RIGHT JOIN t1 ON t1.x = t0.x; -- { serverError NOT_IMPLEMENTED } + +SELECT * FROM t0 SEMI LEFT JOIN t1 USING (x); +SELECT * FROM t0 SEMI RIGHT JOIN t1 USING (x); -- { serverError NOT_IMPLEMENTED } + +SELECT 'anti'; + +SELECT * FROM t0 ANTI LEFT JOIN t1 ON t1.x = t0.x; -- { serverError NOT_IMPLEMENTED } +SELECT * FROM t0 ANTI RIGHT JOIN t1 ON t1.x = t0.x; -- { serverError NOT_IMPLEMENTED } + +SELECT * FROM t0 ANTI LEFT JOIN t1 USING (x); -- { serverError NOT_IMPLEMENTED } +SELECT * FROM t0 ANTI RIGHT JOIN t1 USING (x); -- { serverError NOT_IMPLEMENTED } + +SELECT 'asof'; + +SELECT * FROM t0 ASOF LEFT JOIN t1 ON t1.x = t0.x AND t0.y > t1.y; -- { serverError NOT_IMPLEMENTED } +SELECT * FROM t0 ASOF LEFT JOIN t1 USING (x, y); -- { serverError NOT_IMPLEMENTED } + +DROP TABLE t0; +DROP TABLE t1; diff --git a/parser/testdata/01010_pm_join_all_join_bug/ast.json b/parser/testdata/01010_pm_join_all_join_bug/ast.json new file mode 100644 index 000000000..0ce62e5bc --- /dev/null +++ b/parser/testdata/01010_pm_join_all_join_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery ints (children 1)" + }, + { + "explain": " Identifier ints" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001341239, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/01010_pm_join_all_join_bug/metadata.json b/parser/testdata/01010_pm_join_all_join_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01010_pm_join_all_join_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01010_pm_join_all_join_bug/query.sql b/parser/testdata/01010_pm_join_all_join_bug/query.sql new file mode 100644 index 000000000..278aa46a4 --- /dev/null +++ b/parser/testdata/01010_pm_join_all_join_bug/query.sql @@ -0,0 +1,15 @@ +DROP TABLE IF EXISTS ints; +CREATE TABLE ints (i64 Int64, i32 Int32) ENGINE = Memory; + +SET join_algorithm = 'partial_merge'; + +INSERT INTO ints SELECT 1 AS i64, number AS i32 FROM numbers(2); + +SELECT * FROM ints l LEFT JOIN ints r USING i64 ORDER BY l.i32, r.i32; +SELECT '-'; +SELECT * FROM ints l INNER JOIN ints r USING i64 ORDER BY l.i32, r.i32; + +SELECT '-'; +SELECT count() FROM ( SELECT [1], count(1) ) AS t1 ALL RIGHT JOIN ( SELECT number AS s FROM numbers(2) ) AS t2 USING (s); -- { serverError NOT_FOUND_COLUMN_IN_BLOCK, UNKNOWN_IDENTIFIER } + +DROP TABLE ints; diff --git a/parser/testdata/01010_pmj_on_disk/ast.json b/parser/testdata/01010_pmj_on_disk/ast.json new file mode 100644 index 000000000..ec5b6ff69 --- /dev/null +++ b/parser/testdata/01010_pmj_on_disk/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001533257, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01010_pmj_on_disk/metadata.json b/parser/testdata/01010_pmj_on_disk/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01010_pmj_on_disk/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01010_pmj_on_disk/query.sql b/parser/testdata/01010_pmj_on_disk/query.sql new file mode 100644 index 000000000..fb21a158b --- /dev/null +++ b/parser/testdata/01010_pmj_on_disk/query.sql @@ -0,0 +1,60 @@ +SET join_algorithm = 'hash'; +SET query_plan_join_swap_table=false; + +SELECT n, j FROM (SELECT number as n FROM numbers(4)) nums +ANY LEFT JOIN ( + SELECT number * 2 AS n, number + 10 AS j + FROM numbers(4000) +) js2 +USING n +ORDER BY n; + +SET max_rows_in_join = 1000; + +SELECT n, j FROM (SELECT number AS n FROM numbers(4)) nums +ANY LEFT JOIN ( + SELECT number * 2 AS n, number + 10 AS j + FROM numbers(4000) +) js2 +USING n +ORDER BY n; -- { serverError SET_SIZE_LIMIT_EXCEEDED } + +SET join_algorithm = 'partial_merge'; + +SELECT n, j FROM (SELECT number as n FROM numbers(4)) nums +ANY LEFT JOIN ( + SELECT number * 2 AS n, number + 10 AS j + FROM numbers(4000) +) js2 +USING n +ORDER BY n; + +SET partial_merge_join_optimizations = 1; + +SELECT n, j FROM (SELECT number AS n FROM numbers(4)) nums +ANY LEFT JOIN ( + SELECT number * 2 AS n, number + 10 AS j + FROM numbers(4000) +) js2 +USING n +ORDER BY n; + +SET join_algorithm = 'auto'; + +SELECT n, j FROM (SELECT number AS n FROM numbers(4)) nums +ANY LEFT JOIN ( + SELECT number * 2 AS n, number + 10 AS j + FROM numbers(4000) +) js2 +USING n +ORDER BY n; + +SET max_rows_in_join = '10'; + +SELECT n, j FROM (SELECT number AS n FROM numbers(4)) nums +ANY LEFT JOIN ( + SELECT number * 2 AS n, number + 10 AS j + FROM numbers(4000) +) js2 +USING n +ORDER BY n; diff --git a/parser/testdata/01010_pmj_one_row_blocks/ast.json b/parser/testdata/01010_pmj_one_row_blocks/ast.json new file mode 100644 index 000000000..c33cbf7c3 --- /dev/null +++ b/parser/testdata/01010_pmj_one_row_blocks/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t0 (children 1)" + }, + { + "explain": " Identifier t0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001214267, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/01010_pmj_one_row_blocks/metadata.json b/parser/testdata/01010_pmj_one_row_blocks/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01010_pmj_one_row_blocks/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01010_pmj_one_row_blocks/query.sql b/parser/testdata/01010_pmj_one_row_blocks/query.sql new file mode 100644 index 000000000..23f468294 --- /dev/null +++ b/parser/testdata/01010_pmj_one_row_blocks/query.sql @@ -0,0 +1,106 @@ +DROP TABLE IF EXISTS t0; +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE TABLE t0 (x UInt32, y UInt64) engine = MergeTree ORDER BY (x,y); +CREATE TABLE t1 (x UInt32, y UInt64) engine = MergeTree ORDER BY (x,y); +CREATE TABLE t2 (x UInt32, y UInt64) engine = MergeTree ORDER BY (x,y); + +SET join_algorithm = 'prefer_partial_merge'; +SET partial_merge_join_rows_in_right_blocks = 1; +SET any_join_distinct_right_table_keys = 1; + +INSERT INTO t1 (x, y) VALUES (0, 0); +INSERT INTO t1 (x, y) VALUES (1, 10) (2, 20); +INSERT INTO t1 (x, y) VALUES (4, 40) (3, 30); + +INSERT INTO t2 (x, y) VALUES (4, 41) (2, 21) (2, 22); +INSERT INTO t2 (x, y) VALUES (0, 0) (5, 50) (4, 42); + +SET join_use_nulls = 0; + +SELECT 'any left'; +SELECT t1.*, t2.x FROM t1 ANY LEFT JOIN t2 USING (x) ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.x FROM t1 ANY LEFT JOIN t2 USING (x,y) ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.x FROM t1 ANY LEFT JOIN t2 USING (x) ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.x FROM t1 ANY LEFT JOIN t2 USING (x,y) ORDER BY x; + +SELECT 'all left'; +SELECT t1.*, t2.* FROM t1 LEFT JOIN t2 ON t1.x = t2.x ORDER BY x, t2.y; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 LEFT JOIN t2 ON t1.y = t2.y ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 LEFT JOIN t2 ON t1.x = t2.x AND t1.y = t2.y ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 LEFT JOIN t2 ON t1.x = t2.x AND toUInt32(intDiv(t1.y,10)) = t2.x ORDER BY x, t2.y; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 LEFT JOIN t2 ON t1.x = t2.x AND toUInt64(t1.x) = intDiv(t2.y,10) ORDER BY x, t2.y; + +SELECT 'any inner'; +SELECT t1.*, t2.x FROM t1 ANY INNER JOIN t2 USING (x) ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.x FROM t1 ANY INNER JOIN t2 USING (x,y) ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.x FROM t1 ANY INNER JOIN t2 USING (x) ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.x FROM t1 ANY INNER JOIN t2 USING (x,y) ORDER BY x; + +SELECT 'all inner'; +SELECT t1.*, t2.* FROM t1 INNER JOIN t2 ON t1.x = t2.x ORDER BY x, t2.y; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 INNER JOIN t2 ON t1.y = t2.y ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 INNER JOIN t2 ON t1.x = t2.x AND t1.y = t2.y ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 INNER JOIN t2 ON t1.x = t2.x AND toUInt32(intDiv(t1.y,10)) = t2.x ORDER BY x, t2.y; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 INNER JOIN t2 ON t1.x = t2.x AND toUInt64(t1.x) = intDiv(t2.y,10) ORDER BY x, t2.y; + +SET join_use_nulls = 1; + +SELECT 'any left'; +SELECT t1.*, t2.x FROM t1 ANY LEFT JOIN t2 USING (x) ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.x FROM t1 ANY LEFT JOIN t2 USING (x,y) ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.x FROM t1 ANY LEFT JOIN t2 USING (x) ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.x FROM t1 ANY LEFT JOIN t2 USING (x,y) ORDER BY x; + +SELECT 'all left'; +SELECT t1.*, t2.* FROM t1 LEFT JOIN t2 ON t1.x = t2.x ORDER BY x, t2.y; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 LEFT JOIN t2 ON t1.y = t2.y ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 LEFT JOIN t2 ON t1.x = t2.x AND t1.y = t2.y ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 LEFT JOIN t2 ON t1.x = t2.x AND toUInt32(intDiv(t1.y,10)) = t2.x ORDER BY x, t2.y; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 LEFT JOIN t2 ON t1.x = t2.x AND toUInt64(t1.x) = intDiv(t2.y,10) ORDER BY x, t2.y; + +SELECT 'any inner'; +SELECT t1.*, t2.x FROM t1 ANY INNER JOIN t2 USING (x) ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.x FROM t1 ANY INNER JOIN t2 USING (x,y) ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.x FROM t1 ANY INNER JOIN t2 USING (x) ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.x FROM t1 ANY INNER JOIN t2 USING (x,y) ORDER BY x; + +SELECT 'all inner'; +SELECT t1.*, t2.* FROM t1 INNER JOIN t2 ON t1.x = t2.x ORDER BY x, t2.y; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 INNER JOIN t2 ON t1.y = t2.y ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 INNER JOIN t2 ON t1.x = t2.x AND t1.y = t2.y ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 INNER JOIN t2 ON t1.x = t2.x AND toUInt32(intDiv(t1.y,10)) = t2.x ORDER BY x, t2.y; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 INNER JOIN t2 ON t1.x = t2.x AND toUInt64(t1.x) = intDiv(t2.y,10) ORDER BY x, t2.y; + +DROP TABLE t0; +DROP TABLE t1; +DROP TABLE t2; diff --git a/parser/testdata/01010_pmj_right_table_memory_limits/ast.json b/parser/testdata/01010_pmj_right_table_memory_limits/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01010_pmj_right_table_memory_limits/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01010_pmj_right_table_memory_limits/metadata.json b/parser/testdata/01010_pmj_right_table_memory_limits/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01010_pmj_right_table_memory_limits/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01010_pmj_right_table_memory_limits/query.sql b/parser/testdata/01010_pmj_right_table_memory_limits/query.sql new file mode 100644 index 000000000..b8f2596f3 --- /dev/null +++ b/parser/testdata/01010_pmj_right_table_memory_limits/query.sql @@ -0,0 +1,79 @@ +-- Tags: no-parallel, no-fasttest, no-random-settings + +SET max_bytes_in_join = 0; +SET max_rows_in_join = 0; +SET max_memory_usage = 32000000; +SET join_on_disk_max_files_to_merge = 4; + +SELECT n, j FROM +( + SELECT number * 200000 as n FROM numbers(5) +) nums +ANY LEFT JOIN ( + SELECT number * 2 AS n, number AS j + FROM numbers(1000000) +) js2 +USING n; -- { serverError MEMORY_LIMIT_EXCEEDED } + +SET join_algorithm = 'partial_merge'; +SET default_max_bytes_in_join = 0; + +SELECT n, j FROM +( + SELECT number * 200000 as n FROM numbers(5) +) nums +ANY LEFT JOIN ( + SELECT number * 2 AS n, number AS j + FROM numbers(1000000) +) js2 +USING n; -- { serverError PARAMETER_OUT_OF_BOUND } + +SELECT n, j FROM +( + SELECT number * 200000 as n FROM numbers(5) +) nums +ANY LEFT JOIN ( + SELECT number * 2 AS n, number AS j + FROM numbers(1000000) +) js2 +USING n +SETTINGS max_bytes_in_join = 30000000; -- { serverError MEMORY_LIMIT_EXCEEDED } + +SELECT n, j FROM +( + SELECT number * 200000 as n FROM numbers(5) +) nums +ANY LEFT JOIN ( + SELECT number * 2 AS n, number AS j + FROM numbers(1000000) +) js2 +USING n +ORDER BY n +SETTINGS max_bytes_in_join = 10000000; + +SET partial_merge_join_optimizations = 1; + +SELECT n, j FROM +( + SELECT number * 200000 as n FROM numbers(5) +) nums +LEFT JOIN ( + SELECT number * 2 AS n, number AS j + FROM numbers(1000000) +) js2 +USING n +ORDER BY n +SETTINGS max_rows_in_join = 100000; + +SET default_max_bytes_in_join = 10000000; + +SELECT n, j FROM +( + SELECT number * 200000 as n FROM numbers(5) +) nums +JOIN ( + SELECT number * 2 AS n, number AS j + FROM numbers(1000000) +) js2 +USING n +ORDER BY n; diff --git a/parser/testdata/01010_pmj_skip_blocks/ast.json b/parser/testdata/01010_pmj_skip_blocks/ast.json new file mode 100644 index 000000000..8ea35fa87 --- /dev/null +++ b/parser/testdata/01010_pmj_skip_blocks/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t0 (children 1)" + }, + { + "explain": " Identifier t0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001181707, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/01010_pmj_skip_blocks/metadata.json b/parser/testdata/01010_pmj_skip_blocks/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01010_pmj_skip_blocks/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01010_pmj_skip_blocks/query.sql b/parser/testdata/01010_pmj_skip_blocks/query.sql new file mode 100644 index 000000000..7815f711d --- /dev/null +++ b/parser/testdata/01010_pmj_skip_blocks/query.sql @@ -0,0 +1,106 @@ +DROP TABLE IF EXISTS t0; +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE TABLE t0 (x UInt32, y UInt64) engine = MergeTree ORDER BY (x,y); +CREATE TABLE t1 (x UInt32, y UInt64) engine = MergeTree ORDER BY (x,y); +CREATE TABLE t2 (x UInt32, y UInt64) engine = MergeTree ORDER BY (x,y); + +SET join_algorithm = 'prefer_partial_merge'; +SET partial_merge_join_optimizations = 1; +SET any_join_distinct_right_table_keys = 1; + +INSERT INTO t1 (x, y) VALUES (0, 0); +INSERT INTO t1 (x, y) VALUES (1, 10) (2, 20); +INSERT INTO t1 (x, y) VALUES (4, 40) (3, 30); + +INSERT INTO t2 (x, y) VALUES (4, 41) (2, 21) (2, 22); +INSERT INTO t2 (x, y) VALUES (0, 0) (5, 50) (4, 42); + +SET join_use_nulls = 0; + +SELECT 'any left'; +SELECT t1.*, t2.x FROM t1 ANY LEFT JOIN t2 USING (x) ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.x FROM t1 ANY LEFT JOIN t2 USING (x,y) ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.x FROM t1 ANY LEFT JOIN t2 USING (x) ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.x FROM t1 ANY LEFT JOIN t2 USING (x,y) ORDER BY x; + +SELECT 'all left'; +SELECT t1.*, t2.* FROM t1 LEFT JOIN t2 ON t1.x = t2.x ORDER BY x, t2.y; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 LEFT JOIN t2 ON t1.y = t2.y ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 LEFT JOIN t2 ON t1.x = t2.x AND t1.y = t2.y ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 LEFT JOIN t2 ON t1.x = t2.x AND toUInt32(intDiv(t1.y,10)) = t2.x ORDER BY x, t2.y; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 LEFT JOIN t2 ON t1.x = t2.x AND toUInt64(t1.x) = intDiv(t2.y,10) ORDER BY x, t2.y; + +SELECT 'any inner'; +SELECT t1.*, t2.x FROM t1 ANY INNER JOIN t2 USING (x) ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.x FROM t1 ANY INNER JOIN t2 USING (x,y) ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.x FROM t1 ANY INNER JOIN t2 USING (x) ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.x FROM t1 ANY INNER JOIN t2 USING (x,y) ORDER BY x; + +SELECT 'all inner'; +SELECT t1.*, t2.* FROM t1 INNER JOIN t2 ON t1.x = t2.x ORDER BY x, t2.y; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 INNER JOIN t2 ON t1.y = t2.y ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 INNER JOIN t2 ON t1.x = t2.x AND t1.y = t2.y ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 INNER JOIN t2 ON t1.x = t2.x AND toUInt32(intDiv(t1.y,10)) = t2.x ORDER BY x, t2.y; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 INNER JOIN t2 ON t1.x = t2.x AND toUInt64(t1.x) = intDiv(t2.y,10) ORDER BY x, t2.y; + +SET join_use_nulls = 1; + +SELECT 'any left'; +SELECT t1.*, t2.x FROM t1 ANY LEFT JOIN t2 USING (x) ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.x FROM t1 ANY LEFT JOIN t2 USING (x,y) ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.x FROM t1 ANY LEFT JOIN t2 USING (x) ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.x FROM t1 ANY LEFT JOIN t2 USING (x,y) ORDER BY x; + +SELECT 'all left'; +SELECT t1.*, t2.* FROM t1 LEFT JOIN t2 ON t1.x = t2.x ORDER BY x, t2.y; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 LEFT JOIN t2 ON t1.y = t2.y ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 LEFT JOIN t2 ON t1.x = t2.x AND t1.y = t2.y ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 LEFT JOIN t2 ON t1.x = t2.x AND toUInt32(intDiv(t1.y,10)) = t2.x ORDER BY x, t2.y; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 LEFT JOIN t2 ON t1.x = t2.x AND toUInt64(t1.x) = intDiv(t2.y,10) ORDER BY x, t2.y; + +SELECT 'any inner'; +SELECT t1.*, t2.x FROM t1 ANY INNER JOIN t2 USING (x) ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.x FROM t1 ANY INNER JOIN t2 USING (x,y) ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.x FROM t1 ANY INNER JOIN t2 USING (x) ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.x FROM t1 ANY INNER JOIN t2 USING (x,y) ORDER BY x; + +SELECT 'all inner'; +SELECT t1.*, t2.* FROM t1 INNER JOIN t2 ON t1.x = t2.x ORDER BY x, t2.y; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 INNER JOIN t2 ON t1.y = t2.y ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 INNER JOIN t2 ON t1.x = t2.x AND t1.y = t2.y ORDER BY x; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 INNER JOIN t2 ON t1.x = t2.x AND toUInt32(intDiv(t1.y,10)) = t2.x ORDER BY x, t2.y; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 INNER JOIN t2 ON t1.x = t2.x AND toUInt64(t1.x) = intDiv(t2.y,10) ORDER BY x, t2.y; + +DROP TABLE t0; +DROP TABLE t1; +DROP TABLE t2; diff --git a/parser/testdata/01011_group_uniq_array_memsan/ast.json b/parser/testdata/01011_group_uniq_array_memsan/ast.json new file mode 100644 index 000000000..1e9a1967e --- /dev/null +++ b/parser/testdata/01011_group_uniq_array_memsan/ast.json @@ -0,0 +1,94 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function groupUniqArray (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier v" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function values (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal 'id int, v Array(int)'" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal Array_[UInt64_2]" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier id" + } + ], + + "rows": 24, + + "statistics": + { + "elapsed": 0.001360763, + "rows_read": 24, + "bytes_read": 953 + } +} diff --git a/parser/testdata/01011_group_uniq_array_memsan/metadata.json b/parser/testdata/01011_group_uniq_array_memsan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01011_group_uniq_array_memsan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01011_group_uniq_array_memsan/query.sql b/parser/testdata/01011_group_uniq_array_memsan/query.sql new file mode 100644 index 000000000..b8c16e48c --- /dev/null +++ b/parser/testdata/01011_group_uniq_array_memsan/query.sql @@ -0,0 +1 @@ +select groupUniqArray(v) from values('id int, v Array(int)', (1, [2]), (1, [])) group by id; diff --git a/parser/testdata/01011_test_create_as_skip_indices/ast.json b/parser/testdata/01011_test_create_as_skip_indices/ast.json new file mode 100644 index 000000000..b930667ec --- /dev/null +++ b/parser/testdata/01011_test_create_as_skip_indices/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery foo (children 3)" + }, + { + "explain": " Identifier foo" + }, + { + "explain": " Columns definition (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration key (children 1)" + }, + { + "explain": " DataType int" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Index (children 2)" + }, + { + "explain": " Identifier key" + }, + { + "explain": " Function minmax (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Identifier key" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001444356, + "rows_read": 15, + "bytes_read": 494 + } +} diff --git a/parser/testdata/01011_test_create_as_skip_indices/metadata.json b/parser/testdata/01011_test_create_as_skip_indices/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01011_test_create_as_skip_indices/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01011_test_create_as_skip_indices/query.sql b/parser/testdata/01011_test_create_as_skip_indices/query.sql new file mode 100644 index 000000000..ed2a50f81 --- /dev/null +++ b/parser/testdata/01011_test_create_as_skip_indices/query.sql @@ -0,0 +1,8 @@ +CREATE TABLE foo (key int, INDEX i1 key TYPE minmax GRANULARITY 1) Engine=MergeTree() ORDER BY key; +CREATE TABLE as_foo AS foo; +CREATE TABLE dist (key int, INDEX i1 key TYPE minmax GRANULARITY 1) Engine=Distributed(test_shard_localhost, currentDatabase(), 'foo'); -- { serverError BAD_ARGUMENTS } +CREATE TABLE dist_as_foo Engine=Distributed(test_shard_localhost, currentDatabase(), 'foo') AS foo; + +DROP TABLE foo; +DROP TABLE as_foo; +DROP TABLE dist_as_foo; diff --git a/parser/testdata/01012_reset_running_accumulate/ast.json b/parser/testdata/01012_reset_running_accumulate/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01012_reset_running_accumulate/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01012_reset_running_accumulate/metadata.json b/parser/testdata/01012_reset_running_accumulate/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01012_reset_running_accumulate/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01012_reset_running_accumulate/query.sql b/parser/testdata/01012_reset_running_accumulate/query.sql new file mode 100644 index 000000000..3b2a6c407 --- /dev/null +++ b/parser/testdata/01012_reset_running_accumulate/query.sql @@ -0,0 +1,16 @@ +-- Disable external aggregation because the state is reset for each new block of data in 'runningAccumulate' function. +SET max_bytes_before_external_group_by = 0; +SET max_bytes_ratio_before_external_group_by = 0; +SET allow_deprecated_error_prone_window_functions = 1; + +SELECT grouping, + item, + runningAccumulate(state, grouping) +FROM ( + SELECT number % 6 AS grouping, + number AS item, + sumState(number) AS state + FROM (SELECT number FROM system.numbers LIMIT 30) + GROUP BY grouping, item + ORDER BY grouping, item +); diff --git a/parser/testdata/01012_select_limit_x_0/ast.json b/parser/testdata/01012_select_limit_x_0/ast.json new file mode 100644 index 000000000..bc29def83 --- /dev/null +++ b/parser/testdata/01012_select_limit_x_0/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function count (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_0" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001283773, + "rows_read": 12, + "bytes_read": 448 + } +} diff --git a/parser/testdata/01012_select_limit_x_0/metadata.json b/parser/testdata/01012_select_limit_x_0/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01012_select_limit_x_0/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01012_select_limit_x_0/query.sql b/parser/testdata/01012_select_limit_x_0/query.sql new file mode 100644 index 000000000..5a0549dea --- /dev/null +++ b/parser/testdata/01012_select_limit_x_0/query.sql @@ -0,0 +1 @@ +SELECT count() FROM system.numbers LIMIT 1, 0; diff --git a/parser/testdata/01012_serialize_array_memory_usage/ast.json b/parser/testdata/01012_serialize_array_memory_usage/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01012_serialize_array_memory_usage/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01012_serialize_array_memory_usage/metadata.json b/parser/testdata/01012_serialize_array_memory_usage/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01012_serialize_array_memory_usage/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01012_serialize_array_memory_usage/query.sql b/parser/testdata/01012_serialize_array_memory_usage/query.sql new file mode 100644 index 000000000..eaee502f0 --- /dev/null +++ b/parser/testdata/01012_serialize_array_memory_usage/query.sql @@ -0,0 +1,5 @@ +-- Tags: no-replicated-database + +-- serialization of big arrays shouldn't use too much memory +set max_memory_usage = 300000000; +select ignore(x) from (select groupArray(number) x from numbers(3355443)) group by x format Null; diff --git a/parser/testdata/01012_show_tables_limit/ast.json b/parser/testdata/01012_show_tables_limit/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01012_show_tables_limit/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01012_show_tables_limit/metadata.json b/parser/testdata/01012_show_tables_limit/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01012_show_tables_limit/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01012_show_tables_limit/query.sql b/parser/testdata/01012_show_tables_limit/query.sql new file mode 100644 index 000000000..18a11f66d --- /dev/null +++ b/parser/testdata/01012_show_tables_limit/query.sql @@ -0,0 +1,15 @@ + +CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.test1 (test UInt8) ENGINE = TinyLog; +CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.test2 (test UInt8) ENGINE = TinyLog; +CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.test3 (test UInt8) ENGINE = TinyLog; +CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.test4 (test UInt8) ENGINE = TinyLog; +CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.test5 (test UInt8) ENGINE = TinyLog; +CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.test6 (test UInt8) ENGINE = TinyLog; + +SELECT '*** Should show 6: ***'; +SHOW TABLES FROM {CLICKHOUSE_DATABASE:Identifier}; +SELECT '*** Should show 2: ***'; +SHOW TABLES FROM {CLICKHOUSE_DATABASE:Identifier} LIMIT 2; +SELECT '*** Should show 4: ***'; +SHOW TABLES FROM {CLICKHOUSE_DATABASE:Identifier} LIMIT 2 * 2; + diff --git a/parser/testdata/01013_hex_decimal/ast.json b/parser/testdata/01013_hex_decimal/ast.json new file mode 100644 index 000000000..c22067062 --- /dev/null +++ b/parser/testdata/01013_hex_decimal/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function hex (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDecimal32 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Float64_1" + }, + { + "explain": " Literal UInt64_2" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.0014093, + "rows_read": 10, + "bytes_read": 380 + } +} diff --git a/parser/testdata/01013_hex_decimal/metadata.json b/parser/testdata/01013_hex_decimal/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01013_hex_decimal/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01013_hex_decimal/query.sql b/parser/testdata/01013_hex_decimal/query.sql new file mode 100644 index 000000000..97bb57fc2 --- /dev/null +++ b/parser/testdata/01013_hex_decimal/query.sql @@ -0,0 +1,8 @@ +SELECT hex(toDecimal32(1.0, 2)); +SELECT hex(toDecimal32(1., 2)); +SELECT hex(toDecimal32(0.000578, 6)); +SELECT hex(toDecimal64(-123.978, 3)); +SELECT hex(toDecimal128(99.67, 2)); +SELECT hex(toDecimal32(number, 3)) FROM numbers(200, 2); +SELECT hex(toDecimal64(number, 5)) FROM numbers(202, 2); +SELECT hex(toDecimal128(number, 9)) FROM numbers(120, 2); diff --git a/parser/testdata/01013_hex_float/ast.json b/parser/testdata/01013_hex_float/ast.json new file mode 100644 index 000000000..ce432675a --- /dev/null +++ b/parser/testdata/01013_hex_float/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function hex (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Float64_1" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001317225, + "rows_read": 7, + "bytes_read": 257 + } +} diff --git a/parser/testdata/01013_hex_float/metadata.json b/parser/testdata/01013_hex_float/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01013_hex_float/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01013_hex_float/query.sql b/parser/testdata/01013_hex_float/query.sql new file mode 100644 index 000000000..30869529d --- /dev/null +++ b/parser/testdata/01013_hex_float/query.sql @@ -0,0 +1,10 @@ +SELECT hex(1.0); +SELECT hex(101.); +SELECT hex(1e+18); +SELECT hex(1e-20); +SELECT hex(1e+100); +SELECT hex(0.000578); +SELECT hex(-123.978); +SELECT hex(toFloat32(99.67)); +SELECT hex(toFloat32(number)) FROM numbers(200, 2); +SELECT hex(toFloat64(number)) FROM numbers(202, 2); diff --git a/parser/testdata/01013_repeat_function/ast.json b/parser/testdata/01013_repeat_function/ast.json new file mode 100644 index 000000000..01b074c9b --- /dev/null +++ b/parser/testdata/01013_repeat_function/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function repeat (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'abc'" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001227627, + "rows_read": 8, + "bytes_read": 287 + } +} diff --git a/parser/testdata/01013_repeat_function/metadata.json b/parser/testdata/01013_repeat_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01013_repeat_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01013_repeat_function/query.sql b/parser/testdata/01013_repeat_function/query.sql new file mode 100644 index 000000000..b29cc032f --- /dev/null +++ b/parser/testdata/01013_repeat_function/query.sql @@ -0,0 +1,26 @@ +SELECT repeat('abc', 10); +DROP TABLE IF EXISTS defaults; +CREATE TABLE defaults +( + strings String, + i8 Int8, + u16 UInt16, + u32 UInt32, + u64 UInt64 +)ENGINE = Memory(); + +INSERT INTO defaults values ('abc', 3, 12, 4, 56) ('sdfgg', -2, 10, 21, 200) ('xywq', -1, 4, 9, 5) ('plkf', 0, 5, 7,77); + +SELECT repeat(strings, i8) FROM defaults; +SELECT repeat(strings, u16) FROM defaults; +SELECT repeat(strings, u32) from defaults; +SELECT repeat(strings, u64) FROM defaults; +SELECT repeat(strings, 10) FROM defaults; +SELECT repeat('abc', i8) FROM defaults; +SELECT repeat('abc', u16) FROM defaults; +SELECT repeat('abc', u32) FROM defaults; +SELECT repeat('abc', u64) FROM defaults; + +SELECT repeat('Hello, world! ', 3); + +DROP TABLE defaults; diff --git a/parser/testdata/01013_totals_without_aggregation/ast.json b/parser/testdata/01013_totals_without_aggregation/ast.json new file mode 100644 index 000000000..9fcea61bb --- /dev/null +++ b/parser/testdata/01013_totals_without_aggregation/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001263355, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01013_totals_without_aggregation/metadata.json b/parser/testdata/01013_totals_without_aggregation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01013_totals_without_aggregation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01013_totals_without_aggregation/query.sql b/parser/testdata/01013_totals_without_aggregation/query.sql new file mode 100644 index 000000000..08be45754 --- /dev/null +++ b/parser/testdata/01013_totals_without_aggregation/query.sql @@ -0,0 +1,8 @@ +SET enable_analyzer = 1; + +SELECT 11 AS n GROUP BY n WITH TOTALS; +SELECT 12 AS n GROUP BY n WITH ROLLUP; +SELECT 13 AS n GROUP BY n WITH CUBE; +SELECT 1 AS n WITH TOTALS; -- { serverError NOT_IMPLEMENTED } +SELECT 1 AS n WITH ROLLUP; -- { serverError NOT_IMPLEMENTED } +SELECT 1 AS n WITH CUBE; -- { serverError NOT_IMPLEMENTED } diff --git a/parser/testdata/01014_count_of_merges_metrics/ast.json b/parser/testdata/01014_count_of_merges_metrics/ast.json new file mode 100644 index 000000000..6163da253 --- /dev/null +++ b/parser/testdata/01014_count_of_merges_metrics/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery new_table_test (children 1)" + }, + { + "explain": " Identifier new_table_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001403574, + "rows_read": 2, + "bytes_read": 80 + } +} diff --git a/parser/testdata/01014_count_of_merges_metrics/metadata.json b/parser/testdata/01014_count_of_merges_metrics/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01014_count_of_merges_metrics/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01014_count_of_merges_metrics/query.sql b/parser/testdata/01014_count_of_merges_metrics/query.sql new file mode 100644 index 000000000..85dd8707a --- /dev/null +++ b/parser/testdata/01014_count_of_merges_metrics/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS new_table_test; +DROP TABLE IF EXISTS check_table_test; + +CREATE TABLE new_table_test(name String) ENGINE = MergeTree ORDER BY name; +INSERT INTO new_table_test VALUES ('test'); +CREATE TABLE check_table_test(value1 UInt64, value2 UInt64) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO check_table_test (value1) SELECT value FROM system.events WHERE event = 'Merge'; +OPTIMIZE TABLE new_table_test FINAL; +INSERT INTO check_table_test (value2) SELECT value FROM system.events WHERE event = 'Merge'; +SELECT count() FROM check_table_test WHERE value2 > value1; + + +DROP TABLE new_table_test; +DROP TABLE check_table_test; diff --git a/parser/testdata/01014_function_repeat_corner_cases/ast.json b/parser/testdata/01014_function_repeat_corner_cases/ast.json new file mode 100644 index 000000000..d6bb46969 --- /dev/null +++ b/parser/testdata/01014_function_repeat_corner_cases/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function length (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function repeat (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'x'" + }, + { + "explain": " Literal UInt64_1000000" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001200025, + "rows_read": 10, + "bytes_read": 378 + } +} diff --git a/parser/testdata/01014_function_repeat_corner_cases/metadata.json b/parser/testdata/01014_function_repeat_corner_cases/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01014_function_repeat_corner_cases/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01014_function_repeat_corner_cases/query.sql b/parser/testdata/01014_function_repeat_corner_cases/query.sql new file mode 100644 index 000000000..cedd55bce --- /dev/null +++ b/parser/testdata/01014_function_repeat_corner_cases/query.sql @@ -0,0 +1,6 @@ +SELECT length(repeat('x', 1000000)); +SELECT length(repeat('', 1000000)); +SELECT length(repeat('x', 1000001)); -- { serverError TOO_LARGE_STRING_SIZE } +SET max_memory_usage = 100000000; +SELECT length(repeat(repeat('Hello, world!', 1000000), 10)); -- { serverError MEMORY_LIMIT_EXCEEDED } +SELECT repeat(toString(number), number) FROM system.numbers LIMIT 11; diff --git a/parser/testdata/01015_array_split/ast.json b/parser/testdata/01015_array_split/ast.json new file mode 100644 index 000000000..550a13142 --- /dev/null +++ b/parser/testdata/01015_array_split/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arraySplit (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function lambda (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Identifier y" + }, + { + "explain": " Identifier y" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_2, UInt64_3, UInt64_4, UInt64_5]" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_0, UInt64_0, UInt64_1, UInt64_0]" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001640658, + "rows_read": 15, + "bytes_read": 648 + } +} diff --git a/parser/testdata/01015_array_split/metadata.json b/parser/testdata/01015_array_split/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01015_array_split/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01015_array_split/query.sql b/parser/testdata/01015_array_split/query.sql new file mode 100644 index 000000000..8ae96ba01 --- /dev/null +++ b/parser/testdata/01015_array_split/query.sql @@ -0,0 +1,21 @@ +SELECT arraySplit((x, y) -> y, [1, 2, 3, 4, 5], [1, 0, 0, 1, 0]); +SELECT arrayReverseSplit((x, y) -> y, [1, 2, 3, 4, 5], [1, 0, 0, 1, 0]); + +SELECT arraySplit(x -> 0, [1, 2, 3, 4, 5]); +SELECT arrayReverseSplit(x -> 0, [1, 2, 3, 4, 5]); +SELECT arraySplit(x -> 1, [1, 2, 3, 4, 5]); +SELECT arrayReverseSplit(x -> 1, [1, 2, 3, 4, 5]); +SELECT arraySplit(x -> x % 2 = 1, [1, 2, 3, 4, 5]); +SELECT arrayReverseSplit(x -> x % 2 = 1, [1, 2, 3, 4, 5]); + +SELECT arraySplit(x -> 0, []); +SELECT arrayReverseSplit(x -> 0, []); +SELECT arraySplit(x -> 1, []); +SELECT arrayReverseSplit(x -> 1, []); +SELECT arraySplit(x -> x, emptyArrayUInt8()); +SELECT arrayReverseSplit(x -> x, emptyArrayUInt8()); + +SELECT arraySplit(x -> x % 2 = 1, [1]); +SELECT arrayReverseSplit(x -> x % 2 = 1, [1]); +SELECT arraySplit(x -> x % 2 = 1, [2]); +SELECT arrayReverseSplit(x -> x % 2 = 1, [2]); diff --git a/parser/testdata/01015_attach_part/ast.json b/parser/testdata/01015_attach_part/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01015_attach_part/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01015_attach_part/metadata.json b/parser/testdata/01015_attach_part/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01015_attach_part/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01015_attach_part/query.sql b/parser/testdata/01015_attach_part/query.sql new file mode 100644 index 000000000..9ff505efd --- /dev/null +++ b/parser/testdata/01015_attach_part/query.sql @@ -0,0 +1,27 @@ + +DROP TABLE IF EXISTS table_01; + +CREATE TABLE table_01 ( + date Date, + n Int32 +) ENGINE = MergeTree() +PARTITION BY date +ORDER BY date; + +INSERT INTO table_01 SELECT toDate('2019-10-01'), number FROM system.numbers LIMIT 1000; + +SELECT COUNT() FROM table_01; + +ALTER TABLE table_01 DETACH PARTITION ID '20191001'; + +SELECT COUNT() FROM table_01; + +ALTER TABLE table_01 ATTACH PART '20191001_1_1_0'; + +SELECT COUNT() FROM table_01; + +ALTER TABLE table_01 DETACH PARTITION ALL; + +SELECT COUNT() FROM table_01; + +DROP TABLE IF EXISTS table_01; diff --git a/parser/testdata/01015_random_constant/ast.json b/parser/testdata/01015_random_constant/ast.json new file mode 100644 index 000000000..dada07cae --- /dev/null +++ b/parser/testdata/01015_random_constant/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function greaterOrEquals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function randConstant (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Literal UInt64_0" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001027564, + "rows_read": 9, + "bytes_read": 345 + } +} diff --git a/parser/testdata/01015_random_constant/metadata.json b/parser/testdata/01015_random_constant/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01015_random_constant/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01015_random_constant/query.sql b/parser/testdata/01015_random_constant/query.sql new file mode 100644 index 000000000..b25dd9420 --- /dev/null +++ b/parser/testdata/01015_random_constant/query.sql @@ -0,0 +1,3 @@ +select randConstant() >= 0; +select randConstant() % 10 < 10; +select uniqExact(x) from (select randConstant() as x); diff --git a/parser/testdata/01016_index_tuple_field_type/ast.json b/parser/testdata/01016_index_tuple_field_type/ast.json new file mode 100644 index 000000000..3493dcdd8 --- /dev/null +++ b/parser/testdata/01016_index_tuple_field_type/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tuple_01016 (children 1)" + }, + { + "explain": " Identifier tuple_01016" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001129149, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/01016_index_tuple_field_type/metadata.json b/parser/testdata/01016_index_tuple_field_type/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01016_index_tuple_field_type/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01016_index_tuple_field_type/query.sql b/parser/testdata/01016_index_tuple_field_type/query.sql new file mode 100644 index 000000000..1c5e6d81a --- /dev/null +++ b/parser/testdata/01016_index_tuple_field_type/query.sql @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS tuple_01016; + +CREATE TABLE tuple_01016(a Tuple(DateTime, Int32)) ENGINE = MergeTree() ORDER BY a; + +-- repeat a couple of times, because it doesn't always reproduce well +INSERT INTO tuple_01016 VALUES (('2018-01-01 00:00:00', 1)); +SELECT * FROM tuple_01016 WHERE a < tuple(toDateTime('2019-01-01 00:00:00'), 0) format Null; +INSERT INTO tuple_01016 VALUES (('2018-01-01 00:00:00', 1)); +SELECT * FROM tuple_01016 WHERE a < tuple(toDateTime('2019-01-01 00:00:00'), 0) format Null; +INSERT INTO tuple_01016 VALUES (('2018-01-01 00:00:00', 1)); +SELECT * FROM tuple_01016 WHERE a < tuple(toDateTime('2019-01-01 00:00:00'), 0) format Null; +INSERT INTO tuple_01016 VALUES (('2018-01-01 00:00:00', 1)); +SELECT * FROM tuple_01016 WHERE a < tuple(toDateTime('2019-01-01 00:00:00'), 0) format Null; +INSERT INTO tuple_01016 VALUES (('2018-01-01 00:00:00', 1)); +SELECT * FROM tuple_01016 WHERE a < tuple(toDateTime('2019-01-01 00:00:00'), 0) format Null; + +DROP TABLE tuple_01016; diff --git a/parser/testdata/01016_macros/ast.json b/parser/testdata/01016_macros/ast.json new file mode 100644 index 000000000..9f6a5d288 --- /dev/null +++ b/parser/testdata/01016_macros/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.macros" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier macro" + }, + { + "explain": " Literal 'test'" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001331825, + "rows_read": 13, + "bytes_read": 481 + } +} diff --git a/parser/testdata/01016_macros/metadata.json b/parser/testdata/01016_macros/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01016_macros/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01016_macros/query.sql b/parser/testdata/01016_macros/query.sql new file mode 100644 index 000000000..75b32239c --- /dev/null +++ b/parser/testdata/01016_macros/query.sql @@ -0,0 +1,3 @@ +SELECT * FROM system.macros WHERE macro = 'test'; +SELECT getMacro('test'); +select isConstant(getMacro('test')); diff --git a/parser/testdata/01016_null_part_minmax/ast.json b/parser/testdata/01016_null_part_minmax/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01016_null_part_minmax/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01016_null_part_minmax/metadata.json b/parser/testdata/01016_null_part_minmax/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01016_null_part_minmax/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01016_null_part_minmax/query.sql b/parser/testdata/01016_null_part_minmax/query.sql new file mode 100644 index 000000000..3d471a33a --- /dev/null +++ b/parser/testdata/01016_null_part_minmax/query.sql @@ -0,0 +1,5 @@ +-- this test checks that null values are correctly serialized inside minmax index (issue #7113) +drop table if exists null_01016; +create table if not exists null_01016 (x Nullable(String)) engine MergeTree order by ifNull(x, 'order-null') partition by ifNull(x, 'partition-null'); +insert into null_01016 values (null); +drop table null_01016; diff --git a/parser/testdata/01016_simhash_minhash/ast.json b/parser/testdata/01016_simhash_minhash/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01016_simhash_minhash/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01016_simhash_minhash/metadata.json b/parser/testdata/01016_simhash_minhash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01016_simhash_minhash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01016_simhash_minhash/query.sql b/parser/testdata/01016_simhash_minhash/query.sql new file mode 100644 index 000000000..79abb018d --- /dev/null +++ b/parser/testdata/01016_simhash_minhash/query.sql @@ -0,0 +1,118 @@ +-- Tags: no-cpu-ppc64le +-- Tag no-cpu-ppc64le: Depending on the target platform, CRC32C function returns different hash values. So, should not run on PowerPC. Whenever a new test gets added here, same has to be updated in 01016_simhash_minhash_ppc.sql + +SELECT ngramSimHash(''); +SELECT ngramSimHash('what a cute cat.'); +SELECT ngramSimHashCaseInsensitive('what a cute cat.'); +SELECT ngramSimHashUTF8('what a cute cat.'); +SELECT ngramSimHashCaseInsensitiveUTF8('what a cute cat.'); +SELECT wordShingleSimHash('what a cute cat.'); +SELECT wordShingleSimHashCaseInsensitive('what a cute cat.'); +SELECT wordShingleSimHashUTF8('what a cute cat.'); +SELECT wordShingleSimHashCaseInsensitiveUTF8('what a cute cat.'); + +SELECT ngramMinHash(''); +SELECT ngramMinHash('what a cute cat.'); +SELECT ngramMinHashCaseInsensitive('what a cute cat.'); +SELECT ngramMinHashUTF8('what a cute cat.'); +SELECT ngramMinHashCaseInsensitiveUTF8('what a cute cat.'); +SELECT wordShingleMinHash('what a cute cat.'); +SELECT wordShingleMinHashCaseInsensitive('what a cute cat.'); +SELECT wordShingleMinHashUTF8('what a cute cat.'); +SELECT wordShingleMinHashCaseInsensitiveUTF8('what a cute cat.'); + +DROP TABLE IF EXISTS defaults; +CREATE TABLE defaults +( + s String +)ENGINE = Memory(); + +INSERT INTO defaults values ('It is the latest occurrence of the Southeast European haze, the issue that occurs in constant intensity during every wet season. It has mainly been caused by forest fires resulting from illegal slash-and-burn clearing performed on behalf of the palm oil industry in Kazakhstan, principally on the islands, which then spread quickly in the dry season.') ('It is the latest occurrence of the Southeast Asian haze, the issue that occurs in constant intensity during every wet season. It has mainly been caused by forest fires resulting from illegal slash-and-burn clearing performed on behalf of the palm oil industry in Kazakhstan, principally on the islands, which then spread quickly in the dry season.'); + +SELECT ngramSimHash(s) FROM defaults; +SELECT ngramSimHashCaseInsensitive(s) FROM defaults; +SELECT ngramSimHashUTF8(s) FROM defaults; +SELECT ngramSimHashCaseInsensitiveUTF8(s) FROM defaults; +SELECT wordShingleSimHash(s) FROM defaults; +SELECT wordShingleSimHashCaseInsensitive(s) FROM defaults; +SELECT wordShingleSimHashUTF8(s) FROM defaults; +SELECT wordShingleSimHashCaseInsensitiveUTF8(s) FROM defaults; + +SELECT ngramMinHash(s) FROM defaults; +SELECT ngramMinHashCaseInsensitive(s) FROM defaults; +SELECT ngramMinHashUTF8(s) FROM defaults; +SELECT ngramMinHashCaseInsensitiveUTF8(s) FROM defaults; +SELECT wordShingleMinHash(s) FROM defaults; +SELECT wordShingleMinHashCaseInsensitive(s) FROM defaults; +SELECT wordShingleMinHashUTF8(s) FROM defaults; +SELECT wordShingleMinHashCaseInsensitiveUTF8(s) FROM defaults; + +TRUNCATE TABLE defaults; +INSERT INTO defaults SELECT arrayJoin(splitByString('\n\n', +'ClickHouse uses all available hardware to its full potential to process each query as fast as possible. Peak processing performance for a single query stands at more than 2 terabytes per second (after decompression, only used columns). In distributed setup reads are automatically balanced among healthy replicas to avoid increasing latency. +ClickHouse supports multi-master asynchronous replication and can be deployed across multiple datacenters. All nodes are equal, which allows avoiding having single points of failure. Downtime of a single node or the whole datacenter wont affect the systems availability for both reads and writes. +ClickHouse is simple and works out-of-the-box. It streamlines all your data processing: ingest all your structured data into the system and it becomes instantly available for building reports. SQL dialect allows expressing the desired result without involving any custom non-standard API that could be found in some alternative systems. + +ClickHouse makes full use of all available hardware to process every request as quickly as possible. Peak performance for a single query is over 2 terabytes per second (only used columns after unpacking). In a distributed setup, reads are automatically balanced across healthy replicas to avoid increased latency. +ClickHouse supports asynchronous multi-master replication and can be deployed across multiple data centers. All nodes are equal to avoid single points of failure. Downtime for one site or the entire data center will not affect the system''s read and write availability. +ClickHouse is simple and works out of the box. It simplifies all the processing of your data: it loads all your structured data into the system, and they immediately become available for building reports. The SQL dialect allows you to express the desired result without resorting to any non-standard APIs that can be found in some alternative systems. + +ClickHouse makes full use of all available hardware to process each request as quickly as possible. Peak performance for a single query is over 2 terabytes per second (used columns only after unpacking). In a distributed setup, reads are automatically balanced across healthy replicas to avoid increased latency. +ClickHouse supports asynchronous multi-master replication and can be deployed across multiple data centers. All nodes are equal to avoid a single point of failure. Downtime for one site or the entire data center will not affect the system''s read / write availability. +ClickHouse is simple and works out of the box. It simplifies all the processing of your data: it loads all your structured data into the system, and they are immediately available for building reports. The SQL dialect allows you to express the desired result without resorting to any of the non-standard APIs found in some alternative systems. + +ClickHouse makes full use of all available hardware to process each request as quickly as possible. Peak performance for a single query is over 2 terabytes per second (using columns only after unpacking). In a distributed setup, reads are automatically balanced across healthy replicas to avoid increased latency. +ClickHouse supports asynchronous multi-master replication and can be deployed across multiple data centers. All nodes are equal to avoid a single point of failure. Downtime for one site or the entire data center will not affect the read / write availability of the system. +ClickHouse is simple and works out of the box. It simplifies all the processing of your data: it loads all of your structured data into the system, and it is immediately available for building reports. The SQL dialect allows you to express the desired result without resorting to any of the non-standard APIs found in some alternative systems. + +ClickHouse makes full use of all available hardware to process each request as quickly as possible. Peak performance for a single query is over 2 terabytes per second (using columns after decompression only). In a distributed setup, reads are automatically balanced across healthy replicas to avoid increased latency. +ClickHouse supports asynchronous multi-master replication and can be deployed across multiple data centers. All nodes are equal to avoid a single point of failure. Downtime for one site or the entire data center will not affect the read / write availability of the system. +ClickHouse is simple and works out of the box. It simplifies all processing of your data: it loads all your structured data into the system and immediately becomes available for building reports. The SQL dialect allows you to express the desired result without resorting to any of the non-standard APIs found in some alternative systems. + +ClickHouse makes full use of all available hardware to process each request as quickly as possible. Peak performance for a single query is over 2 terabytes per second (using columns after decompression only). In a distributed setup, reads are automatically balanced across healthy replicas to avoid increased latency. +ClickHouse supports asynchronous multi-master replication and can be deployed across multiple data centers. All nodes are equal to avoid a single point of failure. Downtime for one site or the entire data center will not affect the read / write availability of the system. +ClickHouse is simple and works out of the box. It simplifies all processing of your data: it loads all structured data into the system and immediately becomes available for building reports. The SQL dialect allows you to express the desired result without resorting to any of the non-standard APIs found in some alternative systems.' +)); + +SELECT 'uniqExact', uniqExact(s) FROM defaults; + + +SELECT 'ngramSimHash'; +SELECT arrayStringConcat(groupArray(s), '\n:::::::\n'), count(), ngramSimHash(s) as h FROM defaults GROUP BY h ORDER BY h; +SELECT 'ngramSimHashCaseInsensitive'; +SELECT arrayStringConcat(groupArray(s), '\n:::::::\n'), count(), ngramSimHashCaseInsensitive(s) as h FROM defaults GROUP BY h ORDER BY h; +SELECT 'ngramSimHashUTF8'; +SELECT arrayStringConcat(groupArray(s), '\n:::::::\n'), count(), ngramSimHashUTF8(s) as h FROM defaults GROUP BY h ORDER BY h; +SELECT 'ngramSimHashCaseInsensitiveUTF8'; +SELECT arrayStringConcat(groupArray(s), '\n:::::::\n'), count(), ngramSimHashCaseInsensitiveUTF8(s) as h FROM defaults GROUP BY h ORDER BY h; +SELECT 'wordShingleSimHash'; +SELECT arrayStringConcat(groupArray(s), '\n:::::::\n'), count(), wordShingleSimHash(s, 2) as h FROM defaults GROUP BY h ORDER BY h; +SELECT 'wordShingleSimHashCaseInsensitive'; +SELECT arrayStringConcat(groupArray(s), '\n:::::::\n'), count(), wordShingleSimHashCaseInsensitive(s, 2) as h FROM defaults GROUP BY h ORDER BY h; +SELECT 'wordShingleSimHashUTF8'; +SELECT arrayStringConcat(groupArray(s), '\n:::::::\n'), count(), wordShingleSimHashUTF8(s, 2) as h FROM defaults GROUP BY h ORDER BY h; +SELECT 'wordShingleSimHashCaseInsensitiveUTF8'; +SELECT arrayStringConcat(groupArray(s), '\n:::::::\n'), count(), wordShingleSimHashCaseInsensitiveUTF8(s, 2) as h FROM defaults GROUP BY h ORDER BY h; + +SELECT 'ngramMinHash'; +SELECT arrayStringConcat(groupArray(s), '\n:::::::\n'), count(), ngramMinHash(s) as h FROM defaults GROUP BY h ORDER BY h; +SELECT 'ngramMinHashCaseInsensitive'; +SELECT arrayStringConcat(groupArray(s), '\n:::::::\n'), count(), ngramMinHashCaseInsensitive(s) as h FROM defaults GROUP BY h ORDER BY h; +SELECT 'ngramMinHashUTF8'; +SELECT arrayStringConcat(groupArray(s), '\n:::::::\n'), count(), ngramMinHashUTF8(s) as h FROM defaults GROUP BY h ORDER BY h; +SELECT 'ngramMinHashCaseInsensitiveUTF8'; +SELECT arrayStringConcat(groupArray(s), '\n:::::::\n'), count(), ngramMinHashCaseInsensitiveUTF8(s) as h FROM defaults GROUP BY h ORDER BY h; +SELECT 'wordShingleMinHash'; +SELECT arrayStringConcat(groupArray(s), '\n:::::::\n'), count(), wordShingleMinHash(s, 2, 3) as h FROM defaults GROUP BY h ORDER BY h; +SELECT 'wordShingleMinHashCaseInsensitive'; +SELECT arrayStringConcat(groupArray(s), '\n:::::::\n'), count(), wordShingleMinHashCaseInsensitive(s, 2, 3) as h FROM defaults GROUP BY h ORDER BY h; +SELECT 'wordShingleMinHashUTF8'; +SELECT arrayStringConcat(groupArray(s), '\n:::::::\n'), count(), wordShingleMinHashUTF8(s, 2, 3) as h FROM defaults GROUP BY h ORDER BY h; +SELECT 'wordShingleMinHashCaseInsensitiveUTF8'; +SELECT arrayStringConcat(groupArray(s), '\n:::::::\n'), count(), wordShingleMinHashCaseInsensitiveUTF8(s, 2, 3) as h FROM defaults GROUP BY h ORDER BY h; + +SELECT wordShingleSimHash('foobar', 9223372036854775807); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT wordShingleSimHash('foobar', 1001); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT wordShingleSimHash('foobar', 0); -- { serverError ARGUMENT_OUT_OF_BOUND } + +DROP TABLE defaults; diff --git a/parser/testdata/01016_simhash_minhash_ppc/ast.json b/parser/testdata/01016_simhash_minhash_ppc/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01016_simhash_minhash_ppc/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01016_simhash_minhash_ppc/metadata.json b/parser/testdata/01016_simhash_minhash_ppc/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01016_simhash_minhash_ppc/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01016_simhash_minhash_ppc/query.sql b/parser/testdata/01016_simhash_minhash_ppc/query.sql new file mode 100644 index 000000000..d7f3eeccf --- /dev/null +++ b/parser/testdata/01016_simhash_minhash_ppc/query.sql @@ -0,0 +1,118 @@ +-- Tags: no-cpu-x86_64, no-cpu-aarch64 +-- Tag no-cpu-x86_64 and no-cpu-aarch64: Depending on the target platform, CRC32C function returns different hash values. So, should not run on X86_64 and ARM. Whenever a new test gets added here, same has to be updated in 01016_simhash_minhash.sql + +SELECT ngramSimHash(''); +SELECT ngramSimHash('what a cute cat.'); +SELECT ngramSimHashCaseInsensitive('what a cute cat.'); +SELECT ngramSimHashUTF8('what a cute cat.'); +SELECT ngramSimHashCaseInsensitiveUTF8('what a cute cat.'); +SELECT wordShingleSimHash('what a cute cat.'); +SELECT wordShingleSimHashCaseInsensitive('what a cute cat.'); +SELECT wordShingleSimHashUTF8('what a cute cat.'); +SELECT wordShingleSimHashCaseInsensitiveUTF8('what a cute cat.'); + +SELECT ngramMinHash(''); +SELECT ngramMinHash('what a cute cat.'); +SELECT ngramMinHashCaseInsensitive('what a cute cat.'); +SELECT ngramMinHashUTF8('what a cute cat.'); +SELECT ngramMinHashCaseInsensitiveUTF8('what a cute cat.'); +SELECT wordShingleMinHash('what a cute cat.'); +SELECT wordShingleMinHashCaseInsensitive('what a cute cat.'); +SELECT wordShingleMinHashUTF8('what a cute cat.'); +SELECT wordShingleMinHashCaseInsensitiveUTF8('what a cute cat.'); + +DROP TABLE IF EXISTS defaults; +CREATE TABLE defaults +( + s String +)ENGINE = Memory(); + +INSERT INTO defaults values ('It is the latest occurrence of the Southeast European haze, the issue that occurs in constant intensity during every wet season. It has mainly been caused by forest fires resulting from illegal slash-and-burn clearing performed on behalf of the palm oil industry in Kazakhstan, principally on the islands, which then spread quickly in the dry season.') ('It is the latest occurrence of the Southeast Asian haze, the issue that occurs in constant intensity during every wet season. It has mainly been caused by forest fires resulting from illegal slash-and-burn clearing performed on behalf of the palm oil industry in Kazakhstan, principally on the islands, which then spread quickly in the dry season.'); + +SELECT ngramSimHash(s) FROM defaults; +SELECT ngramSimHashCaseInsensitive(s) FROM defaults; +SELECT ngramSimHashUTF8(s) FROM defaults; +SELECT ngramSimHashCaseInsensitiveUTF8(s) FROM defaults; +SELECT wordShingleSimHash(s) FROM defaults; +SELECT wordShingleSimHashCaseInsensitive(s) FROM defaults; +SELECT wordShingleSimHashUTF8(s) FROM defaults; +SELECT wordShingleSimHashCaseInsensitiveUTF8(s) FROM defaults; + +SELECT ngramMinHash(s) FROM defaults; +SELECT ngramMinHashCaseInsensitive(s) FROM defaults; +SELECT ngramMinHashUTF8(s) FROM defaults; +SELECT ngramMinHashCaseInsensitiveUTF8(s) FROM defaults; +SELECT wordShingleMinHash(s) FROM defaults; +SELECT wordShingleMinHashCaseInsensitive(s) FROM defaults; +SELECT wordShingleMinHashUTF8(s) FROM defaults; +SELECT wordShingleMinHashCaseInsensitiveUTF8(s) FROM defaults; + +TRUNCATE TABLE defaults; +INSERT INTO defaults SELECT arrayJoin(splitByString('\n\n', +'ClickHouse uses all available hardware to its full potential to process each query as fast as possible. Peak processing performance for a single query stands at more than 2 terabytes per second (after decompression, only used columns). In distributed setup reads are automatically balanced among healthy replicas to avoid increasing latency. +ClickHouse supports multi-master asynchronous replication and can be deployed across multiple datacenters. All nodes are equal, which allows avoiding having single points of failure. Downtime of a single node or the whole datacenter wont affect the systems availability for both reads and writes. +ClickHouse is simple and works out-of-the-box. It streamlines all your data processing: ingest all your structured data into the system and it becomes instantly available for building reports. SQL dialect allows expressing the desired result without involving any custom non-standard API that could be found in some alternative systems. + +ClickHouse makes full use of all available hardware to process every request as quickly as possible. Peak performance for a single query is over 2 terabytes per second (only used columns after unpacking). In a distributed setup, reads are automatically balanced across healthy replicas to avoid increased latency. +ClickHouse supports asynchronous multi-master replication and can be deployed across multiple data centers. All nodes are equal to avoid single points of failure. Downtime for one site or the entire data center will not affect the system''s read and write availability. +ClickHouse is simple and works out of the box. It simplifies all the processing of your data: it loads all your structured data into the system, and they immediately become available for building reports. The SQL dialect allows you to express the desired result without resorting to any non-standard APIs that can be found in some alternative systems. + +ClickHouse makes full use of all available hardware to process each request as quickly as possible. Peak performance for a single query is over 2 terabytes per second (used columns only after unpacking). In a distributed setup, reads are automatically balanced across healthy replicas to avoid increased latency. +ClickHouse supports asynchronous multi-master replication and can be deployed across multiple data centers. All nodes are equal to avoid a single point of failure. Downtime for one site or the entire data center will not affect the system''s read / write availability. +ClickHouse is simple and works out of the box. It simplifies all the processing of your data: it loads all your structured data into the system, and they are immediately available for building reports. The SQL dialect allows you to express the desired result without resorting to any of the non-standard APIs found in some alternative systems. + +ClickHouse makes full use of all available hardware to process each request as quickly as possible. Peak performance for a single query is over 2 terabytes per second (using columns only after unpacking). In a distributed setup, reads are automatically balanced across healthy replicas to avoid increased latency. +ClickHouse supports asynchronous multi-master replication and can be deployed across multiple data centers. All nodes are equal to avoid a single point of failure. Downtime for one site or the entire data center will not affect the read / write availability of the system. +ClickHouse is simple and works out of the box. It simplifies all the processing of your data: it loads all of your structured data into the system, and it is immediately available for building reports. The SQL dialect allows you to express the desired result without resorting to any of the non-standard APIs found in some alternative systems. + +ClickHouse makes full use of all available hardware to process each request as quickly as possible. Peak performance for a single query is over 2 terabytes per second (using columns after decompression only). In a distributed setup, reads are automatically balanced across healthy replicas to avoid increased latency. +ClickHouse supports asynchronous multi-master replication and can be deployed across multiple data centers. All nodes are equal to avoid a single point of failure. Downtime for one site or the entire data center will not affect the read / write availability of the system. +ClickHouse is simple and works out of the box. It simplifies all processing of your data: it loads all your structured data into the system and immediately becomes available for building reports. The SQL dialect allows you to express the desired result without resorting to any of the non-standard APIs found in some alternative systems. + +ClickHouse makes full use of all available hardware to process each request as quickly as possible. Peak performance for a single query is over 2 terabytes per second (using columns after decompression only). In a distributed setup, reads are automatically balanced across healthy replicas to avoid increased latency. +ClickHouse supports asynchronous multi-master replication and can be deployed across multiple data centers. All nodes are equal to avoid a single point of failure. Downtime for one site or the entire data center will not affect the read / write availability of the system. +ClickHouse is simple and works out of the box. It simplifies all processing of your data: it loads all structured data into the system and immediately becomes available for building reports. The SQL dialect allows you to express the desired result without resorting to any of the non-standard APIs found in some alternative systems.' +)); + +SELECT 'uniqExact', uniqExact(s) FROM defaults; + + +SELECT 'ngramSimHash'; +SELECT arrayStringConcat(groupArray(s), '\n:::::::\n'), count(), ngramSimHash(s) as h FROM defaults GROUP BY h ORDER BY h; +SELECT 'ngramSimHashCaseInsensitive'; +SELECT arrayStringConcat(groupArray(s), '\n:::::::\n'), count(), ngramSimHashCaseInsensitive(s) as h FROM defaults GROUP BY h ORDER BY h; +SELECT 'ngramSimHashUTF8'; +SELECT arrayStringConcat(groupArray(s), '\n:::::::\n'), count(), ngramSimHashUTF8(s) as h FROM defaults GROUP BY h ORDER BY h; +SELECT 'ngramSimHashCaseInsensitiveUTF8'; +SELECT arrayStringConcat(groupArray(s), '\n:::::::\n'), count(), ngramSimHashCaseInsensitiveUTF8(s) as h FROM defaults GROUP BY h ORDER BY h; +SELECT 'wordShingleSimHash'; +SELECT arrayStringConcat(groupArray(s), '\n:::::::\n'), count(), wordShingleSimHash(s, 2) as h FROM defaults GROUP BY h ORDER BY h; +SELECT 'wordShingleSimHashCaseInsensitive'; +SELECT arrayStringConcat(groupArray(s), '\n:::::::\n'), count(), wordShingleSimHashCaseInsensitive(s, 2) as h FROM defaults GROUP BY h ORDER BY h; +SELECT 'wordShingleSimHashUTF8'; +SELECT arrayStringConcat(groupArray(s), '\n:::::::\n'), count(), wordShingleSimHashUTF8(s, 2) as h FROM defaults GROUP BY h ORDER BY h; +SELECT 'wordShingleSimHashCaseInsensitiveUTF8'; +SELECT arrayStringConcat(groupArray(s), '\n:::::::\n'), count(), wordShingleSimHashCaseInsensitiveUTF8(s, 2) as h FROM defaults GROUP BY h ORDER BY h; + +SELECT 'ngramMinHash'; +SELECT arrayStringConcat(groupArray(s), '\n:::::::\n'), count(), ngramMinHash(s) as h FROM defaults GROUP BY h ORDER BY h; +SELECT 'ngramMinHashCaseInsensitive'; +SELECT arrayStringConcat(groupArray(s), '\n:::::::\n'), count(), ngramMinHashCaseInsensitive(s) as h FROM defaults GROUP BY h ORDER BY h; +SELECT 'ngramMinHashUTF8'; +SELECT arrayStringConcat(groupArray(s), '\n:::::::\n'), count(), ngramMinHashUTF8(s) as h FROM defaults GROUP BY h ORDER BY h; +SELECT 'ngramMinHashCaseInsensitiveUTF8'; +SELECT arrayStringConcat(groupArray(s), '\n:::::::\n'), count(), ngramMinHashCaseInsensitiveUTF8(s) as h FROM defaults GROUP BY h ORDER BY h; +SELECT 'wordShingleMinHash'; +SELECT arrayStringConcat(groupArray(s), '\n:::::::\n'), count(), wordShingleMinHash(s, 2, 3) as h FROM defaults GROUP BY h ORDER BY h; +SELECT 'wordShingleMinHashCaseInsensitive'; +SELECT arrayStringConcat(groupArray(s), '\n:::::::\n'), count(), wordShingleMinHashCaseInsensitive(s, 2, 3) as h FROM defaults GROUP BY h ORDER BY h; +SELECT 'wordShingleMinHashUTF8'; +SELECT arrayStringConcat(groupArray(s), '\n:::::::\n'), count(), wordShingleMinHashUTF8(s, 2, 3) as h FROM defaults GROUP BY h ORDER BY h; +SELECT 'wordShingleMinHashCaseInsensitiveUTF8'; +SELECT arrayStringConcat(groupArray(s), '\n:::::::\n'), count(), wordShingleMinHashCaseInsensitiveUTF8(s, 2, 3) as h FROM defaults GROUP BY h ORDER BY h; + +SELECT wordShingleSimHash('foobar', 9223372036854775807); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT wordShingleSimHash('foobar', 1001); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT wordShingleSimHash('foobar', 0); -- { serverError ARGUMENT_OUT_OF_BOUND } + +DROP TABLE defaults; diff --git a/parser/testdata/01016_uniqCombined64/ast.json b/parser/testdata/01016_uniqCombined64/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01016_uniqCombined64/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01016_uniqCombined64/metadata.json b/parser/testdata/01016_uniqCombined64/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01016_uniqCombined64/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01016_uniqCombined64/query.sql b/parser/testdata/01016_uniqCombined64/query.sql new file mode 100644 index 000000000..acf813576 --- /dev/null +++ b/parser/testdata/01016_uniqCombined64/query.sql @@ -0,0 +1,9 @@ +-- for small cardinality the 64 bit hash perform worse, but for 1e10: +-- 4 byte hash: 2.8832809652e10 +-- 8 byte hash: 0.9998568925e10 +-- but hence checking with 1e10 values takes too much time (~45 secs), this +-- test is just to ensure that the result is different (and to document the +-- outcome). + +SELECT uniqCombined(number) FROM numbers(1e7); +SELECT uniqCombined64(number) FROM numbers(1e7); diff --git a/parser/testdata/01017_bithamming_distance/ast.json b/parser/testdata/01017_bithamming_distance/ast.json new file mode 100644 index 000000000..e8506b844 --- /dev/null +++ b/parser/testdata/01017_bithamming_distance/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function bitHammingDistance (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_5" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001397929, + "rows_read": 8, + "bytes_read": 301 + } +} diff --git a/parser/testdata/01017_bithamming_distance/metadata.json b/parser/testdata/01017_bithamming_distance/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01017_bithamming_distance/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01017_bithamming_distance/query.sql b/parser/testdata/01017_bithamming_distance/query.sql new file mode 100644 index 000000000..11f4f27d9 --- /dev/null +++ b/parser/testdata/01017_bithamming_distance/query.sql @@ -0,0 +1,44 @@ +SELECT bitHammingDistance(1, 5); +SELECT bitHammingDistance(100, 100000); +SELECT bitHammingDistance(-1, 1); + +DROP TABLE IF EXISTS defaults; +CREATE TABLE defaults +( + n1 UInt8, + n2 UInt16, + n3 UInt32, + n4 UInt64 +)ENGINE = Memory(); + +INSERT INTO defaults VALUES (1, 2, 3, 4) (12, 4345, 435, 1233) (45, 675, 32343, 54566) (90, 784, 9034, 778752); + +SELECT bitHammingDistance(4, n1) FROM defaults; +SELECT bitHammingDistance(n2, 100) FROM defaults; +SELECT bitHammingDistance(n3, n4) FROM defaults; + +DROP TABLE defaults; + +DROP TABLE IF EXISTS test_string; + +CREATE TABLE test_string +( + s1 String, + s2 String, + s3 FixedString(10), + s4 FixedString(10), +) ENGINE = Memory; + +INSERT INTO test_string VALUES ('hello', 'hello', 'hello', 'hello') ('hello', 'hellow', 'hello', 'hellow') ('clickhouse', '012345', 'clickhouse', '012345'); + +SELECT bitHammingDistance('hello', 'hello'); +SELECT bitHammingDistance('hello', 'hellow'); +SELECT bitHammingDistance(toFixedString('hello', 6), toFixedString('hellow', 6)); + +SELECT bitHammingDistance(s1, s2) FROM test_string; +SELECT bitHammingDistance(s3, s4) FROM test_string; + +SELECT bitHammingDistance('hello', s2) FROM test_string; +SELECT bitHammingDistance(s4, toFixedString('hello', 10)) FROM test_string; + +DROP TABLE test_string; diff --git a/parser/testdata/01017_in_unconvertible_complex_type/ast.json b/parser/testdata/01017_in_unconvertible_complex_type/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01017_in_unconvertible_complex_type/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01017_in_unconvertible_complex_type/metadata.json b/parser/testdata/01017_in_unconvertible_complex_type/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01017_in_unconvertible_complex_type/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01017_in_unconvertible_complex_type/query.sql b/parser/testdata/01017_in_unconvertible_complex_type/query.sql new file mode 100644 index 000000000..48eb8ce5c --- /dev/null +++ b/parser/testdata/01017_in_unconvertible_complex_type/query.sql @@ -0,0 +1,13 @@ +-- When left and right element types are compatible, but the particular value +-- on the right is not in the range of the left type, it should be ignored. +select (toUInt8(1)) in (-1); +select (toUInt8(0)) in (-1); +select (toUInt8(255)) in (-1); + +select [toUInt8(1)] in [-1]; +select [toUInt8(0)] in [-1]; +select [toUInt8(255)] in [-1]; + +-- When left and right element types are not compatible, we should get an error. +select (toUInt8(1)) in ('a'); -- { serverError TYPE_MISMATCH } +select [toUInt8(1)] in ['a']; -- { serverError TYPE_MISMATCH } diff --git a/parser/testdata/01017_tuplehamming_distance/ast.json b/parser/testdata/01017_tuplehamming_distance/ast.json new file mode 100644 index 000000000..4bdfcf58c --- /dev/null +++ b/parser/testdata/01017_tuplehamming_distance/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function tupleHammingDistance (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Tuple_(UInt64_1, UInt64_2)" + }, + { + "explain": " Literal Tuple_(UInt64_3, UInt64_4)" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001222795, + "rows_read": 8, + "bytes_read": 339 + } +} diff --git a/parser/testdata/01017_tuplehamming_distance/metadata.json b/parser/testdata/01017_tuplehamming_distance/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01017_tuplehamming_distance/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01017_tuplehamming_distance/query.sql b/parser/testdata/01017_tuplehamming_distance/query.sql new file mode 100644 index 000000000..d0ed1cee0 --- /dev/null +++ b/parser/testdata/01017_tuplehamming_distance/query.sql @@ -0,0 +1,19 @@ +SELECT tupleHammingDistance((1, 2), (3, 4)); +SELECT tupleHammingDistance((120, 243), (120, 434)); +SELECT tupleHammingDistance((-12, 434), (434, 434)); + +DROP TABLE IF EXISTS defaults; +CREATE TABLE defaults +( + t1 Tuple(UInt16, UInt16), + t2 Tuple(UInt32, UInt32), + t3 Tuple(Int64, Int64) +)ENGINE = Memory(); + +INSERT INTO defaults VALUES ((12, 43), (12312, 43453) ,(-10, 32)) ((1, 4), (546, 12345), (546, 12345)) ((90, 9875), (43456, 234203), (1231, -123)) ((87, 987), (545645, 768354634), (9123, 909)); + +SELECT tupleHammingDistance((12, 43), t1) FROM defaults; +SELECT tupleHammingDistance(t2, (546, 456)) FROM defaults; +SELECT tupleHammingDistance(t2, t3) FROM defaults; + +DROP TABLE defaults; diff --git a/parser/testdata/01017_uniqCombined_memory_usage/ast.json b/parser/testdata/01017_uniqCombined_memory_usage/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01017_uniqCombined_memory_usage/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01017_uniqCombined_memory_usage/metadata.json b/parser/testdata/01017_uniqCombined_memory_usage/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01017_uniqCombined_memory_usage/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01017_uniqCombined_memory_usage/query.sql b/parser/testdata/01017_uniqCombined_memory_usage/query.sql new file mode 100644 index 000000000..eca370d94 --- /dev/null +++ b/parser/testdata/01017_uniqCombined_memory_usage/query.sql @@ -0,0 +1,59 @@ +-- Tags: no-tsan, no-asan, no-msan, no-replicated-database, no-random-settings +-- Tag no-tsan: Fine thresholds on memory usage +-- Tag no-asan: Fine thresholds on memory usage +-- Tag no-msan: Fine thresholds on memory usage + +-- each uniqCombined state should not use > sizeof(HLL) in memory, +-- sizeof(HLL) is (2^K * 6 / 8) +-- hence max_memory_usage for 100 rows = (96<<10)*100 = 9830400 + +SET use_uncompressed_cache = 0; +SET memory_profiler_step = 1; + +-- HashTable for UInt32 (used until (1<<13) elements), hence 8192 elements +SELECT 'UInt32'; +SET max_memory_usage = 4000000; +SELECT sum(u) FROM (SELECT intDiv(number, 8192) AS k, uniqCombined(number % 8192) u FROM numbers(8192 * 100) GROUP BY k); -- { serverError MEMORY_LIMIT_EXCEEDED } +SET max_memory_usage = 9830400; +SELECT sum(u) FROM (SELECT intDiv(number, 8192) AS k, uniqCombined(number % 8192) u FROM numbers(8192 * 100) GROUP BY k); + +-- HashTable for UInt64 (used until (1<<12) elements), hence 4096 elements +SELECT 'UInt64'; +SET max_memory_usage = 4000000; +SELECT sum(u) FROM (SELECT intDiv(number, 4096) AS k, uniqCombined(reinterpretAsString(number % 4096)) u FROM numbers(4096 * 100) GROUP BY k); -- { serverError MEMORY_LIMIT_EXCEEDED } +SET max_memory_usage = 9830400; + + +SELECT sum(u) FROM (SELECT intDiv(number, 4096) AS k, uniqCombined(reinterpretAsString(number % 4096)) u FROM numbers(4096 * 100) GROUP BY k); + +SELECT 'K=16'; + +-- HashTable for UInt32 (used until (1<<12) elements), hence 4096 elements +SELECT 'UInt32'; +SET max_memory_usage = 2000000; +SELECT sum(u) FROM (SELECT intDiv(number, 4096) AS k, uniqCombined(16)(number % 4096) u FROM numbers(4096 * 100) GROUP BY k); -- { serverError MEMORY_LIMIT_EXCEEDED } +SET max_memory_usage = 5230000; +SELECT sum(u) FROM (SELECT intDiv(number, 4096) AS k, uniqCombined(16)(number % 4096) u FROM numbers(4096 * 100) GROUP BY k); + +-- HashTable for UInt64 (used until (1<<11) elements), hence 2048 elements +SELECT 'UInt64'; +SET max_memory_usage = 2000000; +SELECT sum(u) FROM (SELECT intDiv(number, 2048) AS k, uniqCombined(16)(reinterpretAsString(number % 2048)) u FROM numbers(2048 * 100) GROUP BY k); -- { serverError MEMORY_LIMIT_EXCEEDED } +SET max_memory_usage = 5900000; +SELECT sum(u) FROM (SELECT intDiv(number, 2048) AS k, uniqCombined(16)(reinterpretAsString(number % 2048)) u FROM numbers(2048 * 100) GROUP BY k); + +SELECT 'K=18'; + +-- HashTable for UInt32 (used until (1<<14) elements), hence 16384 elements +SELECT 'UInt32'; +SET max_memory_usage = 8000000; +SELECT sum(u) FROM (SELECT intDiv(number, 16384) AS k, uniqCombined(18)(number % 16384) u FROM numbers(16384 * 100) GROUP BY k); -- { serverError MEMORY_LIMIT_EXCEEDED } +SET max_memory_usage = 19660800; +SELECT sum(u) FROM (SELECT intDiv(number, 16384) AS k, uniqCombined(18)(number % 16384) u FROM numbers(16384 * 100) GROUP BY k); + +-- HashTable for UInt64 (used until (1<<13) elements), hence 8192 elements +SELECT 'UInt64'; +SET max_memory_usage = 8000000; +SELECT sum(u) FROM (SELECT intDiv(number, 8192) AS k, uniqCombined(18)(reinterpretAsString(number % 8192)) u FROM numbers(8192 * 100) GROUP BY k); -- { serverError MEMORY_LIMIT_EXCEEDED } +SET max_memory_usage = 19660800; +SELECT sum(u) FROM (SELECT intDiv(number, 8192) AS k, uniqCombined(18)(reinterpretAsString(number % 8192)) u FROM numbers(8192 * 100) GROUP BY k); diff --git a/parser/testdata/01018_Distributed__shard_num/ast.json b/parser/testdata/01018_Distributed__shard_num/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01018_Distributed__shard_num/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01018_Distributed__shard_num/metadata.json b/parser/testdata/01018_Distributed__shard_num/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01018_Distributed__shard_num/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01018_Distributed__shard_num/query.sql b/parser/testdata/01018_Distributed__shard_num/query.sql new file mode 100644 index 000000000..79e357a67 --- /dev/null +++ b/parser/testdata/01018_Distributed__shard_num/query.sql @@ -0,0 +1,97 @@ +-- Tags: shard + +-- make the order static +SET max_threads = 1; + +-- data should be inserted into Distributed table synchronously +SET distributed_foreground_insert = 1; + +DROP TABLE IF EXISTS mem1; +DROP TABLE IF EXISTS mem2; +DROP TABLE IF EXISTS mem3; +DROP TABLE IF EXISTS dist_1; +DROP TABLE IF EXISTS dist_2; +DROP TABLE IF EXISTS dist_3; + +CREATE TABLE mem1 (key Int) Engine=Memory(); +INSERT INTO mem1 VALUES (10); +CREATE TABLE dist_1 AS mem1 Engine=Distributed(test_shard_localhost, currentDatabase(), mem1); +INSERT INTO dist_1 VALUES (20); + +CREATE TABLE mem2 (key Int) Engine=Memory(); +INSERT INTO mem2 VALUES (100); +CREATE TABLE dist_2 AS mem2 Engine=Distributed(test_cluster_two_shards_localhost, currentDatabase(), mem2); + +CREATE TABLE mem3 (key Int, _shard_num String) Engine=Memory(); +INSERT INTO mem3 VALUES (100, 'foo'); +CREATE TABLE dist_3 AS mem3 Engine=Distributed(test_shard_localhost, currentDatabase(), mem3); + +-- { echoOn } + +-- remote(system.one) +SELECT 'remote(system.one)'; +SELECT * FROM remote('127.0.0.1', system.one); +SELECT * FROM remote('127.0.0.{1,2}', system.one); +SELECT _shard_num, * FROM remote('127.0.0.1', system.one); +SELECT _shard_num, * FROM remote('127.0.0.{1,2}', system.one) order by _shard_num; +SELECT _shard_num, * FROM remote('127.0.0.{1,2}', system.one) WHERE _shard_num = 1; + +-- dist_1 using test_shard_localhost +SELECT 'dist_1'; +SELECT _shard_num FROM dist_1 order by _shard_num; + +SELECT _shard_num FROM dist_1 order by _shard_num; +SELECT _shard_num, key FROM dist_1 order by _shard_num; +SELECT key FROM dist_1; + +SELECT _shard_num FROM dist_1 order by _shard_num; +SELECT _shard_num, key FROM dist_1 order by _shard_num, key; +SELECT key FROM dist_1; + +-- dist_2 using test_cluster_two_shards_localhost +SELECT 'dist_2'; +SELECT _shard_num FROM dist_2 order by _shard_num; + +SELECT _shard_num FROM dist_2 order by _shard_num; +SELECT _shard_num, key FROM dist_2 order by _shard_num, key; +SELECT key FROM dist_2; + +-- multiple _shard_num +SELECT 'remote(Distributed)'; +SELECT _shard_num, key FROM remote('127.0.0.1', currentDatabase(), dist_2) order by _shard_num, key settings serialize_query_plan=0; +SELECT 'remote(DistributedQueryPlan)'; +-- distributed over distributed does not work, because _shard_num is not analyzed from QueryPlan. +SELECT _shard_num, key FROM remote('127.0.0.1', currentDatabase(), dist_2) order by _shard_num, key settings serialize_query_plan=1, prefer_localhost_replica=0, enable_analyzer=1; + +-- JOIN system.clusters +SELECT 'JOIN system.clusters'; + +SELECT a._shard_num, a.key, b.host_name, b.host_address IN ('::1', '127.0.0.1'), b.port +FROM (SELECT *, _shard_num FROM dist_1) a +JOIN system.clusters b +ON a._shard_num = b.shard_num +WHERE b.cluster = 'test_cluster_two_shards_localhost'; + +SELECT _shard_num, key, b.host_name, b.host_address IN ('::1', '127.0.0.1'), b.port +FROM dist_1 a +JOIN system.clusters b +ON _shard_num = b.shard_num +WHERE b.cluster = 'test_cluster_two_shards_localhost' +ORDER BY key +SETTINGS enable_analyzer = 1; + +SELECT 'Rewrite with alias'; +SELECT a._shard_num, key FROM dist_1 a; + +-- the same with JOIN, just in case +SELECT a._shard_num, a.key, b.host_name, b.host_address IN ('::1', '127.0.0.1'), b.port +FROM dist_1 a +JOIN system.clusters b +ON a._shard_num = b.shard_num +WHERE b.cluster = 'test_cluster_two_shards_localhost' +ORDER BY key +SETTINGS enable_analyzer = 1; + +SELECT 'dist_3'; +SELECT * FROM dist_3; +SELECT _shard_num, * FROM dist_3 order by _shard_num; diff --git a/parser/testdata/01018_ambiguous_column/ast.json b/parser/testdata/01018_ambiguous_column/ast.json new file mode 100644 index 000000000..b6400f120 --- /dev/null +++ b/parser/testdata/01018_ambiguous_column/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001235141, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01018_ambiguous_column/metadata.json b/parser/testdata/01018_ambiguous_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01018_ambiguous_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01018_ambiguous_column/query.sql b/parser/testdata/01018_ambiguous_column/query.sql new file mode 100644 index 000000000..57ed86a2e --- /dev/null +++ b/parser/testdata/01018_ambiguous_column/query.sql @@ -0,0 +1,29 @@ +SET output_format_pretty_color=1; +SET enable_analyzer = 1; + +select * from system.one cross join system.one; +select * from system.one cross join system.one r; +select * from system.one l cross join system.one; +select * from system.one left join system.one using dummy; +select dummy from system.one left join system.one using dummy; + +USE system; + +SELECT dummy FROM one AS A JOIN one ON A.dummy = one.dummy; +SELECT dummy FROM one JOIN one AS A ON A.dummy = one.dummy; +SELECT dummy FROM one l JOIN one r ON dummy = r.dummy; +SELECT dummy FROM one l JOIN one r ON one.dummy = r.dummy; +SELECT dummy FROM one l JOIN one r ON l.dummy = dummy; +SELECT dummy FROM one l JOIN one r ON l.dummy = one.dummy; + +SELECT * from one +JOIN one A ON one.dummy = A.dummy +JOIN one B ON one.dummy = B.dummy +FORMAT PrettyCompact; + +SELECT * from one A +JOIN system.one one ON A.dummy = one.dummy +JOIN system.one two ON A.dummy = two.dummy +FORMAT PrettyCompact; + +SELECT one.dummy FROM one AS A JOIN (SELECT 0 AS dummy) B USING dummy; diff --git a/parser/testdata/01018_ddl_dictionaries_create/ast.json b/parser/testdata/01018_ddl_dictionaries_create/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01018_ddl_dictionaries_create/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01018_ddl_dictionaries_create/metadata.json b/parser/testdata/01018_ddl_dictionaries_create/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01018_ddl_dictionaries_create/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01018_ddl_dictionaries_create/query.sql b/parser/testdata/01018_ddl_dictionaries_create/query.sql new file mode 100644 index 000000000..c74ea9b7c --- /dev/null +++ b/parser/testdata/01018_ddl_dictionaries_create/query.sql @@ -0,0 +1,166 @@ +-- Tags: no-parallel, no-fasttest + +SET send_logs_level = 'fatal'; + +DROP DATABASE IF EXISTS memory_db; +DROP DATABASE IF EXISTS db_01018; +DROP DATABASE IF EXISTS database_for_dict_01018; + +CREATE DATABASE database_for_dict_01018; + + +CREATE TABLE database_for_dict_01018.table_for_dict +( + key_column UInt64, + second_column UInt8, + third_column String +) +ENGINE = MergeTree() +ORDER BY key_column; + +INSERT INTO database_for_dict_01018.table_for_dict VALUES (1, 100, 'Hello world'); + +DROP DATABASE IF EXISTS db_01018; + +CREATE DATABASE db_01018; + +SELECT '=DICTIONARY in Ordinary DB'; + +CREATE DICTIONARY db_01018.dict1 +( + key_column UInt64 DEFAULT 0, + second_column UInt8 DEFAULT 1, + third_column String DEFAULT 'qqq' +) +PRIMARY KEY key_column +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'table_for_dict' PASSWORD '' DB 'database_for_dict_01018')) +LIFETIME(MIN 1 MAX 10) +LAYOUT(FLAT()); + +SHOW CREATE DICTIONARY db_01018.dict1; + +SHOW DICTIONARIES FROM db_01018 LIKE 'dict1'; + +EXISTS DICTIONARY db_01018.dict1; + +SELECT database, name FROM system.dictionaries WHERE database='db_01018' AND name LIKE 'dict1'; + +SELECT '==DETACH DICTIONARY'; +DETACH DICTIONARY db_01018.dict1; + +SHOW DICTIONARIES FROM db_01018 LIKE 'dict1'; + +EXISTS DICTIONARY db_01018.dict1; + +SELECT database, name FROM system.dictionaries WHERE database='db_01018' AND name LIKE 'dict1'; + +SELECT '==ATTACH DICTIONARY'; +ATTACH DICTIONARY db_01018.dict1; + +SHOW DICTIONARIES FROM db_01018 LIKE 'dict1'; + +EXISTS DICTIONARY db_01018.dict1; + +SELECT database, name FROM system.dictionaries WHERE database='db_01018' AND name LIKE 'dict1'; + +SELECT '==DROP DICTIONARY'; + +DROP DICTIONARY IF EXISTS db_01018.dict1; + +SHOW DICTIONARIES FROM db_01018 LIKE 'dict1'; + +EXISTS DICTIONARY db_01018.dict1; + +SELECT database, name FROM system.dictionaries WHERE database='db_01018' AND name LIKE 'dict1'; + +DROP DATABASE IF EXISTS db_01018; + +DROP DATABASE IF EXISTS memory_db; + +CREATE DATABASE memory_db ENGINE = Memory; + +SELECT '=DICTIONARY in Memory DB'; + +CREATE DICTIONARY memory_db.dict2 +( + key_column UInt64 DEFAULT 0 INJECTIVE, + second_column UInt8 DEFAULT 1 EXPRESSION rand() % 222, + third_column String DEFAULT 'qqq' +) +PRIMARY KEY key_column +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'table_for_dict' PASSWORD '' DB 'database_for_dict_01018')) +LIFETIME(MIN 1 MAX 10) +LAYOUT(FLAT()); + +SHOW CREATE DICTIONARY memory_db.dict2; + +SHOW DICTIONARIES FROM memory_db LIKE 'dict2'; + +EXISTS DICTIONARY memory_db.dict2; + +SELECT database, name FROM system.dictionaries WHERE database='memory_db' AND name LIKE 'dict2'; + +SELECT '=DICTIONARY in Lazy DB'; + +DROP DATABASE IF EXISTS lazy_db; + +CREATE DATABASE lazy_db ENGINE = Lazy(1); + +CREATE DICTIONARY lazy_db.dict3 +( + key_column UInt64 DEFAULT 0 INJECTIVE, + second_column UInt8 DEFAULT 1 EXPRESSION rand() % 222, + third_column String DEFAULT 'qqq' +) +PRIMARY KEY key_column, second_column +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'table_for_dict' PASSWORD '' DB 'database_for_dict_01018')) +LIFETIME(MIN 1 MAX 10) +LAYOUT(COMPLEX_KEY_HASHED()); --{serverError UNSUPPORTED_METHOD} + +DROP DATABASE IF EXISTS lazy_db; + +SELECT '=DROP DATABASE WITH DICTIONARY'; + +DROP DATABASE IF EXISTS db_01018; + +CREATE DATABASE db_01018; + +CREATE DICTIONARY db_01018.dict4 +( + key_column UInt64 DEFAULT 0, + second_column UInt8 DEFAULT 1, + third_column String DEFAULT 'qqq' +) +PRIMARY KEY key_column +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'table_for_dict' PASSWORD '' DB 'database_for_dict_01018')) +LIFETIME(MIN 1 MAX 10) +LAYOUT(FLAT()); + +SHOW DICTIONARIES FROM db_01018; + +DROP DATABASE IF EXISTS db_01018; + +CREATE DATABASE db_01018; + +SHOW DICTIONARIES FROM db_01018; + +CREATE DICTIONARY db_01018.dict4 +( + key_column UInt64 DEFAULT 0, + second_column UInt8 DEFAULT 1, + third_column String DEFAULT 'qqq' +) +PRIMARY KEY key_column +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'table_for_dict' PASSWORD '' DB 'database_for_dict_01018')) +LIFETIME(MIN 1 MAX 10) +LAYOUT(FLAT()); + +SHOW DICTIONARIES FROM db_01018; + +DROP DATABASE IF EXISTS db_01018; + +DROP DICTIONARY memory_db.dict2; +DROP TABLE IF EXISTS database_for_dict_01018.table_for_dict; + +DROP DATABASE IF EXISTS database_for_dict_01018; +DROP DATABASE IF EXISTS memory_db; diff --git a/parser/testdata/01018_ddl_dictionaries_select/ast.json b/parser/testdata/01018_ddl_dictionaries_select/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01018_ddl_dictionaries_select/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01018_ddl_dictionaries_select/metadata.json b/parser/testdata/01018_ddl_dictionaries_select/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01018_ddl_dictionaries_select/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01018_ddl_dictionaries_select/query.sql b/parser/testdata/01018_ddl_dictionaries_select/query.sql new file mode 100644 index 000000000..4c4bcc440 --- /dev/null +++ b/parser/testdata/01018_ddl_dictionaries_select/query.sql @@ -0,0 +1,144 @@ +-- Tags: no-fasttest + +SET send_logs_level = 'fatal'; +SET check_table_dependencies=0; + +CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.table_for_dict +( + key_column UInt64, + second_column UInt8, + third_column String, + fourth_column Float64 +) +ENGINE = MergeTree() +ORDER BY key_column; + +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.table_for_dict SELECT number, number % 17, toString(number * number), number / 2.0 from numbers(100); + +CREATE DICTIONARY {CLICKHOUSE_DATABASE:Identifier}.dict1 +( + key_column UInt64 DEFAULT 0, + second_column UInt8 DEFAULT 1, + third_column String DEFAULT 'qqq', + fourth_column Float64 DEFAULT 42.0 +) +PRIMARY KEY key_column +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'table_for_dict' PASSWORD '' DB currentDatabase())) +LIFETIME(MIN 1 MAX 10) +LAYOUT(FLAT()); + +SELECT dictGetUInt8({CLICKHOUSE_DATABASE:String} || '.dict1', 'second_column', toUInt64(11)); +SELECT second_column FROM {CLICKHOUSE_DATABASE:Identifier}.dict1 WHERE key_column = 11; +SELECT dictGetString({CLICKHOUSE_DATABASE:String} || '.dict1', 'third_column', toUInt64(12)); +SELECT third_column FROM {CLICKHOUSE_DATABASE:Identifier}.dict1 WHERE key_column = 12; +SELECT dictGetFloat64({CLICKHOUSE_DATABASE:String} || '.dict1', 'fourth_column', toUInt64(14)); +SELECT fourth_column FROM {CLICKHOUSE_DATABASE:Identifier}.dict1 WHERE key_column = 14; + +SELECT count(distinct(dictGetUInt8({CLICKHOUSE_DATABASE:String} || '.dict1', 'second_column', toUInt64(number)))) from numbers(100); + +DETACH DICTIONARY {CLICKHOUSE_DATABASE:Identifier}.dict1; + +SELECT dictGetUInt8({CLICKHOUSE_DATABASE:String} || '.dict1', 'second_column', toUInt64(11)); -- {serverError BAD_ARGUMENTS} + +ATTACH DICTIONARY {CLICKHOUSE_DATABASE:Identifier}.dict1; + +SELECT dictGetUInt8({CLICKHOUSE_DATABASE:String} || '.dict1', 'second_column', toUInt64(11)); + +DROP DICTIONARY {CLICKHOUSE_DATABASE:Identifier}.dict1; + +SELECT dictGetUInt8({CLICKHOUSE_DATABASE:String} || '.dict1', 'second_column', toUInt64(11)); -- {serverError BAD_ARGUMENTS} + +-- SOURCE(CLICKHOUSE(...)) uses default params if not specified +DROP DICTIONARY IF EXISTS {CLICKHOUSE_DATABASE:Identifier}.dict1; + +CREATE DICTIONARY {CLICKHOUSE_DATABASE:Identifier}.dict1 +( + key_column UInt64 DEFAULT 0, + second_column UInt8 DEFAULT 1, + third_column String DEFAULT 'qqq', + fourth_column Float64 DEFAULT 42.0 +) +PRIMARY KEY key_column +SOURCE(CLICKHOUSE(TABLE 'table_for_dict' DB currentDatabase())) +LIFETIME(MIN 1 MAX 10) +LAYOUT(FLAT()); + +SELECT dictGetUInt8({CLICKHOUSE_DATABASE:String} || '.dict1', 'second_column', toUInt64(11)); + +SELECT count(distinct(dictGetUInt8({CLICKHOUSE_DATABASE:String} || '.dict1', 'second_column', toUInt64(number)))) from numbers(100); + +DROP DICTIONARY {CLICKHOUSE_DATABASE:Identifier}.dict1; + +CREATE DICTIONARY {CLICKHOUSE_DATABASE:Identifier}.dict1 +( + key_column UInt64 DEFAULT 0, + second_column UInt8 DEFAULT 1, + third_column String DEFAULT 'qqq', + fourth_column Float64 DEFAULT 42.0 +) +PRIMARY KEY key_column, third_column +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'table_for_dict' DB currentDatabase())) +LIFETIME(MIN 1 MAX 10) +LAYOUT(COMPLEX_KEY_CACHE(SIZE_IN_CELLS 1)); + +SELECT dictGetUInt8({CLICKHOUSE_DATABASE:String} || '.dict1', 'second_column', tuple(toUInt64(11), '121')); +SELECT dictGetFloat64({CLICKHOUSE_DATABASE:String} || '.dict1', 'fourth_column', tuple(toUInt64(14), '196')); + +DETACH DICTIONARY {CLICKHOUSE_DATABASE:Identifier}.dict1; + +SELECT dictGetUInt8({CLICKHOUSE_DATABASE:String} || '.dict1', 'second_column', tuple(toUInt64(11), '121')); -- {serverError BAD_ARGUMENTS} + +ATTACH DICTIONARY {CLICKHOUSE_DATABASE:Identifier}.dict1; + +SELECT dictGetUInt8({CLICKHOUSE_DATABASE:String} || '.dict1', 'second_column', tuple(toUInt64(11), '121')); + +CREATE DICTIONARY {CLICKHOUSE_DATABASE:Identifier}.dict2 +( + key_column UInt64 DEFAULT 0, + some_column String EXPRESSION toString(fourth_column), + fourth_column Float64 DEFAULT 42.0 +) +PRIMARY KEY key_column +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'table_for_dict' DB currentDatabase())) +LIFETIME(MIN 1 MAX 10) +LAYOUT(HASHED()); + +SELECT dictGetString({CLICKHOUSE_DATABASE:String} || '.dict2', 'some_column', toUInt64(12)); + +-- NOTE: database = currentDatabase() is not mandatory +SELECT name, engine FROM system.tables WHERE database = {CLICKHOUSE_DATABASE:String} ORDER BY name; + +SELECT database, name, type FROM system.dictionaries WHERE database = {CLICKHOUSE_DATABASE:String} ORDER BY name; + +-- check dictionary will not update +CREATE DICTIONARY {CLICKHOUSE_DATABASE:Identifier}.dict3 +( + key_column UInt64 DEFAULT 0, + some_column String EXPRESSION toString(fourth_column), + fourth_column Float64 DEFAULT 42.0 +) +PRIMARY KEY key_column +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'table_for_dict' DB currentDatabase())) +LIFETIME(0) +LAYOUT(HASHED()); + +SELECT dictGetString({CLICKHOUSE_DATABASE:String} || '.dict3', 'some_column', toUInt64(12)); + +-- dictGet with table name +USE {CLICKHOUSE_DATABASE:Identifier}; +SELECT dictGetString(dict3, 'some_column', toUInt64(12)); +SELECT dictGetString({CLICKHOUSE_DATABASE:Identifier}.dict3, 'some_column', toUInt64(12)); +SELECT dictGetString(default.dict3, 'some_column', toUInt64(12)); -- {serverError BAD_ARGUMENTS} +SELECT dictGet(dict3, 'some_column', toUInt64(12)); +SELECT dictGet({CLICKHOUSE_DATABASE:Identifier}.dict3, 'some_column', toUInt64(12)); +SELECT dictGet(default.dict3, 'some_column', toUInt64(12)); -- {serverError BAD_ARGUMENTS} +USE default; + +-- alias should be handled correctly +SELECT {CLICKHOUSE_DATABASE:String} || '.dict3' as n, dictGet(n, 'some_column', toUInt64(12)); + +DROP TABLE {CLICKHOUSE_DATABASE:Identifier}.table_for_dict; + +SYSTEM RELOAD DICTIONARY {CLICKHOUSE_DATABASE:Identifier}.dict3; -- {serverError UNKNOWN_TABLE} + +SELECT dictGetString({CLICKHOUSE_DATABASE:String} || '.dict3', 'some_column', toUInt64(12)); diff --git a/parser/testdata/01018_ddl_dictionaries_special/ast.json b/parser/testdata/01018_ddl_dictionaries_special/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01018_ddl_dictionaries_special/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01018_ddl_dictionaries_special/metadata.json b/parser/testdata/01018_ddl_dictionaries_special/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01018_ddl_dictionaries_special/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01018_ddl_dictionaries_special/query.sql b/parser/testdata/01018_ddl_dictionaries_special/query.sql new file mode 100644 index 000000000..51e1eb2e1 --- /dev/null +++ b/parser/testdata/01018_ddl_dictionaries_special/query.sql @@ -0,0 +1,104 @@ +-- Tags: no-fasttest + +SET send_logs_level = 'fatal'; + +SELECT '***date dict***'; + +CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.date_table +( + CountryID UInt64, + StartDate Date, + EndDate Date, + Tax Float64 +) +ENGINE = MergeTree() +ORDER BY CountryID; + +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.date_table VALUES(1, toDate('2019-05-05'), toDate('2019-05-20'), 0.33); +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.date_table VALUES(1, toDate('2019-05-21'), toDate('2019-05-30'), 0.42); +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.date_table VALUES(2, toDate('2019-05-21'), toDate('2019-05-30'), 0.46); + +CREATE DICTIONARY {CLICKHOUSE_DATABASE:Identifier}.dict1 +( + CountryID UInt64, + StartDate Date, + EndDate Date, + Tax Float64 +) +PRIMARY KEY CountryID +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'date_table' DB currentDatabase())) +LIFETIME(MIN 1 MAX 1000) +LAYOUT(RANGE_HASHED()) +RANGE(MIN StartDate MAX EndDate); + +SELECT dictGetFloat64({CLICKHOUSE_DATABASE:String} || '.dict1', 'Tax', toUInt64(1), toDate('2019-05-15')); +SELECT dictGetFloat64({CLICKHOUSE_DATABASE:String} || '.dict1', 'Tax', toUInt64(1), toDate('2019-05-29')); +SELECT dictGetFloat64({CLICKHOUSE_DATABASE:String} || '.dict1', 'Tax', toUInt64(2), toDate('2019-05-29')); +SELECT dictGetFloat64({CLICKHOUSE_DATABASE:String} || '.dict1', 'Tax', toUInt64(2), toDate('2019-05-31')); + +SELECT '***datetime dict***'; + +CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.datetime_table +( + CountryID UInt64, + StartDate DateTime, + EndDate DateTime, + Tax Float64 +) +ENGINE = MergeTree() +ORDER BY CountryID; + +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.datetime_table VALUES(1, toDateTime('2019-05-05 00:00:00'), toDateTime('2019-05-20 00:00:00'), 0.33); +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.datetime_table VALUES(1, toDateTime('2019-05-21 00:00:00'), toDateTime('2019-05-30 00:00:00'), 0.42); +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.datetime_table VALUES(2, toDateTime('2019-05-21 00:00:00'), toDateTime('2019-05-30 00:00:00'), 0.46); + +CREATE DICTIONARY {CLICKHOUSE_DATABASE:Identifier}.dict2 +( + CountryID UInt64, + StartDate DateTime, + EndDate DateTime, + Tax Float64 +) +PRIMARY KEY CountryID +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'datetime_table' DB currentDatabase())) +LIFETIME(MIN 1 MAX 1000) +LAYOUT(RANGE_HASHED()) +RANGE(MIN StartDate MAX EndDate); + +SELECT dictGetFloat64({CLICKHOUSE_DATABASE:String} || '.dict2', 'Tax', toUInt64(1), toDateTime('2019-05-15 00:00:00')); +SELECT dictGetFloat64({CLICKHOUSE_DATABASE:String} || '.dict2', 'Tax', toUInt64(1), toDateTime('2019-05-29 00:00:00')); +SELECT dictGetFloat64({CLICKHOUSE_DATABASE:String} || '.dict2', 'Tax', toUInt64(2), toDateTime('2019-05-29 00:00:00')); +SELECT dictGetFloat64({CLICKHOUSE_DATABASE:String} || '.dict2', 'Tax', toUInt64(2), toDateTime('2019-05-31 00:00:00')); + +SELECT '***hierarchy dict***'; + +CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.table_with_hierarchy +( + RegionID UInt64, + ParentRegionID UInt64, + RegionName String +) +ENGINE = MergeTree() +ORDER BY RegionID; + +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.table_with_hierarchy VALUES (3, 2, 'Hamovniki'), (2, 1, 'Moscow'), (1, 10000, 'Russia') (7, 10000, 'Ulan-Ude'); + + +CREATE DICTIONARY {CLICKHOUSE_DATABASE:Identifier}.dictionary_with_hierarchy +( + RegionID UInt64, + ParentRegionID UInt64 HIERARCHICAL, + RegionName String +) +PRIMARY KEY RegionID +SOURCE(CLICKHOUSE(host 'localhost' port tcpPort() user 'default' db currentDatabase() table 'table_with_hierarchy')) +LAYOUT(HASHED()) +LIFETIME(MIN 1 MAX 1000); + +SELECT dictGetString({CLICKHOUSE_DATABASE:String} || '.dictionary_with_hierarchy', 'RegionName', toUInt64(2)); +SELECT dictGetHierarchy({CLICKHOUSE_DATABASE:String} || '.dictionary_with_hierarchy', toUInt64(3)); +SELECT dictIsIn({CLICKHOUSE_DATABASE:String} || '.dictionary_with_hierarchy', toUInt64(3), toUInt64(2)); +SELECT dictIsIn({CLICKHOUSE_DATABASE:String} || '.dictionary_with_hierarchy', toUInt64(7), toUInt64(10000)); +SELECT dictIsIn({CLICKHOUSE_DATABASE:String} || '.dictionary_with_hierarchy', toUInt64(1), toUInt64(5)); + +DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}; diff --git a/parser/testdata/01018_dictionaries_from_dictionaries/ast.json b/parser/testdata/01018_dictionaries_from_dictionaries/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01018_dictionaries_from_dictionaries/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01018_dictionaries_from_dictionaries/metadata.json b/parser/testdata/01018_dictionaries_from_dictionaries/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01018_dictionaries_from_dictionaries/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01018_dictionaries_from_dictionaries/query.sql b/parser/testdata/01018_dictionaries_from_dictionaries/query.sql new file mode 100644 index 000000000..010aff24c --- /dev/null +++ b/parser/testdata/01018_dictionaries_from_dictionaries/query.sql @@ -0,0 +1,100 @@ +-- Tags: no-parallel + +SET send_logs_level = 'fatal'; + +DROP DATABASE IF EXISTS database_for_dict; + +CREATE DATABASE database_for_dict; + +CREATE TABLE database_for_dict.table_for_dict +( + key_column UInt64, + second_column UInt8, + third_column String, + fourth_column Float64 +) +ENGINE = MergeTree() +ORDER BY key_column; + +INSERT INTO database_for_dict.table_for_dict SELECT number, number % 17, toString(number * number), number / 2.0 from numbers(100); + +CREATE DICTIONARY database_for_dict.dict1 +( + key_column UInt64 DEFAULT 0, + second_column UInt8 DEFAULT 1, + third_column String DEFAULT 'qqq', + fourth_column Float64 DEFAULT 42.0 +) +PRIMARY KEY key_column +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'table_for_dict' DB 'database_for_dict')) +LIFETIME(MIN 1 MAX 10) +LAYOUT(FLAT()); + +SELECT count(*) from database_for_dict.dict1; + +CREATE DICTIONARY database_for_dict.dict2 +( + key_column UInt64 DEFAULT 0, + second_column UInt8 DEFAULT 1, + third_column String DEFAULT 'qqq', + fourth_column Float64 DEFAULT 42.0 +) +PRIMARY KEY key_column +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'dict1' DB 'database_for_dict')) +LIFETIME(MIN 1 MAX 10) +LAYOUT(HASHED()); + +SELECT count(*) FROM database_for_dict.dict2; + +INSERT INTO database_for_dict.table_for_dict SELECT number, number % 17, toString(number * number), number / 2.0 from numbers(100, 100); + +SYSTEM RELOAD DICTIONARIES; + +SELECT count(*) from database_for_dict.dict2; +SELECT count(*) from database_for_dict.dict1; + +CREATE DICTIONARY database_for_dict.dict3 +( + key_column UInt64 DEFAULT 0, + second_column UInt8 DEFAULT 1, + third_column String DEFAULT 'qqq', + fourth_column Float64 DEFAULT 42.0 +) +PRIMARY KEY key_column +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'dict2' DB 'database_for_dict')) +LIFETIME(MIN 1 MAX 10) +LAYOUT(HASHED()); + +SELECT count(*) FROM database_for_dict.dict3; + +INSERT INTO database_for_dict.table_for_dict SELECT number, number % 17, toString(number * number), number / 2.0 from numbers(200, 100); + +SYSTEM RELOAD DICTIONARIES; + +SELECT count(*) from database_for_dict.dict3; +SELECT count(*) from database_for_dict.dict2; +SELECT count(*) from database_for_dict.dict1; + + +CREATE DICTIONARY database_for_dict.dict4 +( + key_column UInt64 DEFAULT 0, + second_column UInt8 DEFAULT 1, + third_column String DEFAULT 'qqq', + fourth_column Float64 DEFAULT 42.0 +) +PRIMARY KEY key_column +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'non_existing_table' DB 'database_for_dict')) +LIFETIME(MIN 1 MAX 10) +LAYOUT(HASHED()); + +SELECT count(*) FROM database_for_dict.dict4; -- {serverError UNKNOWN_TABLE} + +SELECT name from system.tables WHERE database = 'database_for_dict' ORDER BY name; +SELECT name from system.dictionaries WHERE database = 'database_for_dict' ORDER BY name; + +DROP DATABASE IF EXISTS database_for_dict; + +SELECT count(*) from database_for_dict.dict3; --{serverError UNKNOWN_DATABASE} +SELECT count(*) from database_for_dict.dict2; --{serverError UNKNOWN_DATABASE} +SELECT count(*) from database_for_dict.dict1; --{serverError UNKNOWN_DATABASE} diff --git a/parser/testdata/01018_empty_aggregation_filling/ast.json b/parser/testdata/01018_empty_aggregation_filling/ast.json new file mode 100644 index 000000000..a33842bc1 --- /dev/null +++ b/parser/testdata/01018_empty_aggregation_filling/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '--- Int Empty ---'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.000984544, + "rows_read": 5, + "bytes_read": 188 + } +} diff --git a/parser/testdata/01018_empty_aggregation_filling/metadata.json b/parser/testdata/01018_empty_aggregation_filling/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01018_empty_aggregation_filling/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01018_empty_aggregation_filling/query.sql b/parser/testdata/01018_empty_aggregation_filling/query.sql new file mode 100644 index 000000000..443eda3d6 --- /dev/null +++ b/parser/testdata/01018_empty_aggregation_filling/query.sql @@ -0,0 +1,75 @@ +SELECT '--- Int Empty ---'; + +SELECT arrayReduce('avgOrDefault', arrayPopBack([1])); +SELECT arrayReduce('avgOrNull', arrayPopBack([1])); +SELECT arrayReduce('stddevSampOrDefault', arrayPopBack([1])); +SELECT arrayReduce('stddevSampOrNull', arrayPopBack([1])); +SELECT arrayReduce('maxOrDefault', arrayPopBack([1])); +SELECT arrayReduce('maxOrNull', arrayPopBack([1])); + +SELECT avgOrDefaultIf(x, x > 1) FROM (SELECT 1 AS x); +SELECT avgOrNullIf(x, x > 1) FROM (SELECT 1 AS x); +SELECT stddevSampOrDefaultIf(x, x > 1) FROM (SELECT 1 AS x); +SELECT stddevSampOrNullIf(x, x > 1) FROM (SELECT 1 AS x); +SELECT maxOrDefaultIf(x, x > 1) FROM (SELECT 1 AS x); +SELECT maxOrNullIf(x, x > 1) FROM (SELECT 1 AS x); + +SELECT avgOrDefaultIfMerge(state) FROM (SELECT avgOrDefaultIfState(x, x > 1) AS state FROM (SELECT 1 AS x)); +SELECT avgOrNullIfMerge(state) FROM (SELECT avgOrNullIfState(x, x > 1) AS state FROM (SELECT 1 AS x)); +SELECT stddevSampOrDefaultIfMerge(state) FROM (SELECT stddevSampOrDefaultIfState(x, x > 1) AS state FROM (SELECT 1 AS x)); +SELECT stddevSampOrNullIfMerge(state) FROM (SELECT stddevSampOrNullIfState(x, x > 1) AS state FROM (SELECT 1 AS x)); +SELECT maxOrDefaultIfMerge(state) FROM (SELECT maxOrDefaultIfState(x, x > 1) AS state FROM (SELECT 1 AS x)); +SELECT maxOrNullIfMerge(state) FROM (SELECT maxOrNullIfState(x, x > 1) AS state FROM (SELECT 1 AS x)); + +SELECT '--- Int Non-empty ---'; + +SELECT arrayReduce('avgOrDefault', [1]); +SELECT arrayReduce('avgOrNull', [1]); +SELECT arrayReduce('stddevSampOrDefault', [1]); +SELECT arrayReduce('stddevSampOrNull', [1]); +SELECT arrayReduce('maxOrDefault', [1]); +SELECT arrayReduce('maxOrNull', [1]); + +SELECT avgOrDefaultIf(x, x > 0) FROM (SELECT 1 AS x); +SELECT avgOrNullIf(x, x > 0) FROM (SELECT 1 AS x); +SELECT stddevSampOrDefaultIf(x, x > 0) FROM (SELECT 1 AS x); +SELECT stddevSampOrNullIf(x, x > 0) FROM (SELECT 1 AS x); +SELECT maxOrDefaultIf(x, x > 0) FROM (SELECT 1 AS x); +SELECT maxOrNullIf(x, x > 0) FROM (SELECT 1 AS x); + +SELECT avgOrDefaultIfMerge(state) FROM (SELECT avgOrDefaultIfState(x, x > 0) AS state FROM (SELECT 1 AS x)); +SELECT avgOrNullIfMerge(state) FROM (SELECT avgOrNullIfState(x, x > 0) AS state FROM (SELECT 1 AS x)); +SELECT stddevSampOrDefaultIfMerge(state) FROM (SELECT stddevSampOrDefaultIfState(x, x > 0) AS state FROM (SELECT 1 AS x)); +SELECT stddevSampOrNullIfMerge(state) FROM (SELECT stddevSampOrNullIfState(x, x > 0) AS state FROM (SELECT 1 AS x)); +SELECT maxOrDefaultIfMerge(state) FROM (SELECT maxOrDefaultIfState(x, x > 0) AS state FROM (SELECT 1 AS x)); +SELECT maxOrNullIfMerge(state) FROM (SELECT maxOrNullIfState(x, x > 0) AS state FROM (SELECT 1 AS x)); + +SELECT '--- Other Types Empty ---'; + +SELECT arrayReduce('maxOrDefault', arrayPopBack(['hello'])); +SELECT arrayReduce('maxOrNull', arrayPopBack(['hello'])); + +SELECT arrayReduce('maxOrDefault', arrayPopBack(arrayPopBack([toDateTime('2011-04-05 14:19:19'), null]))); +SELECT arrayReduce('maxOrNull', arrayPopBack(arrayPopBack([toDateTime('2011-04-05 14:19:19'), null]))); + +SELECT arrayReduce('avgOrDefault', arrayPopBack([toDecimal128(-123.45, 2)])); +SELECT arrayReduce('avgOrNull', arrayPopBack([toDecimal128(-123.45, 2)])); +SELECT arrayReduce('stddevSampOrDefault', arrayPopBack([toDecimal128(-123.45, 2)])); +SELECT arrayReduce('stddevSampOrNull', arrayPopBack([toDecimal128(-123.45, 2)])); +SELECT arrayReduce('maxOrDefault', arrayPopBack([toDecimal128(-123.45, 2)])); +SELECT arrayReduce('maxOrNull', arrayPopBack([toDecimal128(-123.45, 2)])); + +SELECT '--- Other Types Non-empty ---'; + +SELECT arrayReduce('maxOrDefault', ['hello']); +SELECT arrayReduce('maxOrNull', ['hello']); + +SELECT arrayReduce('maxOrDefault', [toDateTime('2011-04-05 14:19:19'), null]); +SELECT arrayReduce('maxOrNull', [toDateTime('2011-04-05 14:19:19'), null]); + +SELECT arrayReduce('avgOrDefault', [toDecimal128(-123.45, 2)]); +SELECT arrayReduce('avgOrNull', [toDecimal128(-123.45, 2)]); +SELECT arrayReduce('stddevSampOrDefault', [toDecimal128(-123.45, 2)]); +SELECT arrayReduce('stddevSampOrNull', [toDecimal128(-123.45, 2)]); +SELECT arrayReduce('maxOrDefault', [toDecimal128(-123.45, 2)]); +SELECT arrayReduce('maxOrNull', [toDecimal128(-123.45, 2)]); diff --git a/parser/testdata/01018_ip_dictionary_long/ast.json b/parser/testdata/01018_ip_dictionary_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01018_ip_dictionary_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01018_ip_dictionary_long/metadata.json b/parser/testdata/01018_ip_dictionary_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01018_ip_dictionary_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01018_ip_dictionary_long/query.sql b/parser/testdata/01018_ip_dictionary_long/query.sql new file mode 100644 index 000000000..0fa2d7c94 --- /dev/null +++ b/parser/testdata/01018_ip_dictionary_long/query.sql @@ -0,0 +1,661 @@ +-- Tags: long + +SET send_logs_level = 'fatal'; + +SELECT '***ipv4 trie dict***'; +CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.table_ipv4_trie +( + prefix String, + asn UInt32, + cca2 String +) +engine = TinyLog; + +-- numbers reordered to test sorting criteria too +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.table_ipv4_trie +SELECT + '255.255.255.255/' || toString((number + 1) * 13 % 33) AS prefix, + toUInt32((number + 1) * 13 % 33) AS asn, + 'NA' as cca2 +FROM system.numbers LIMIT 33; + +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.table_ipv4_trie VALUES ('127.0.0.2', 1272, 'RU'); +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.table_ipv4_trie VALUES ('127.0.0.0/8', 1270, 'RU'); +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.table_ipv4_trie VALUES ('202.79.32.2', 11211, 'NP'); +-- non-unique entries will be squashed into one +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.table_ipv4_trie VALUES ('202.79.32.2', 11211, 'NP'); +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.table_ipv4_trie VALUES ('202.79.32.2', 11211, 'NP'); +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.table_ipv4_trie VALUES ('202.79.32.2', 11211, 'NP'); +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.table_ipv4_trie VALUES ('101.79.55.22', 11212, 'UK'); + +CREATE DICTIONARY {CLICKHOUSE_DATABASE:Identifier}.dict_ipv4_trie +( + prefix String, + asn UInt32, + cca2 String +) +PRIMARY KEY prefix +SOURCE(CLICKHOUSE(host 'localhost' port tcpPort() user 'default' db currentDatabase() table 'table_ipv4_trie')) +LAYOUT(IP_TRIE()) +LIFETIME(MIN 10 MAX 100) +SETTINGS(dictionary_use_async_executor=1, max_threads=8) +; + +-- fuzzer +SELECT '127.0.0.0/24' = dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'prefixprefixprefixprefix', tuple(IPv4StringToNumOrDefault('127.0.0.0127.0.0.0'))); -- { serverError BAD_ARGUMENTS } + +SELECT 0 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'asn', tuple(IPv4StringToNum('0.0.0.0'))); +SELECT 1 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'asn', tuple(IPv4StringToNum('128.0.0.0'))); +SELECT 2 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'asn', tuple(IPv4StringToNum('192.0.0.0'))); +SELECT 3 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'asn', tuple(IPv4StringToNum('224.0.0.0'))); +SELECT 4 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'asn', tuple(IPv4StringToNum('240.0.0.0'))); +SELECT 5 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'asn', tuple(IPv4StringToNum('248.0.0.0'))); +SELECT 6 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'asn', tuple(IPv4StringToNum('252.0.0.0'))); +SELECT 7 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'asn', tuple(IPv4StringToNum('254.0.0.0'))); +SELECT 8 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'asn', tuple(IPv4StringToNum('255.0.0.0'))); +SELECT 9 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'asn', tuple(IPv4StringToNum('255.128.0.0'))); +SELECT 10 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'asn', tuple(IPv4StringToNum('255.192.0.0'))); +SELECT 11 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'asn', tuple(IPv4StringToNum('255.224.0.0'))); +SELECT 12 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'asn', tuple(IPv4StringToNum('255.240.0.0'))); +SELECT 13 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'asn', tuple(IPv4StringToNum('255.248.0.0'))); +SELECT 14 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'asn', tuple(IPv4StringToNum('255.252.0.0'))); +SELECT 15 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'asn', tuple(IPv4StringToNum('255.254.0.0'))); +SELECT 16 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'asn', tuple(IPv4StringToNum('255.255.0.0'))); +SELECT 17 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'asn', tuple(IPv4StringToNum('255.255.128.0'))); +SELECT 18 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'asn', tuple(IPv4StringToNum('255.255.192.0'))); +SELECT 19 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'asn', tuple(IPv4StringToNum('255.255.224.0'))); +SELECT 20 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'asn', tuple(IPv4StringToNum('255.255.240.0'))); +SELECT 21 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'asn', tuple(IPv4StringToNum('255.255.248.0'))); +SELECT 22 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'asn', tuple(IPv4StringToNum('255.255.252.0'))); +SELECT 23 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'asn', tuple(IPv4StringToNum('255.255.254.0'))); +SELECT 24 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'asn', tuple(IPv4StringToNum('255.255.255.0'))); +SELECT 25 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'asn', tuple(IPv4StringToNum('255.255.255.128'))); +SELECT 26 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'asn', tuple(IPv4StringToNum('255.255.255.192'))); +SELECT 27 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'asn', tuple(IPv4StringToNum('255.255.255.224'))); +SELECT 28 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'asn', tuple(IPv4StringToNum('255.255.255.240'))); +SELECT 29 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'asn', tuple(IPv4StringToNum('255.255.255.248'))); +SELECT 30 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'asn', tuple(IPv4StringToNum('255.255.255.252'))); +SELECT 31 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'asn', tuple(IPv4StringToNum('255.255.255.254'))); +SELECT 32 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'asn', tuple(IPv4StringToNum('255.255.255.255'))); + +SELECT 'RU' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'cca2', tuple(IPv4StringToNum('127.0.0.1'))); + +SELECT 1270 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'asn', tuple(IPv4StringToNum('127.0.0.0'))); +SELECT 1270 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'asn', tuple(IPv4StringToNum('127.0.0.1'))); +SELECT 1272 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'asn', tuple(IPv4StringToNum('127.0.0.2'))); +SELECT 1270 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'asn', tuple(IPv4StringToNum('127.0.0.3'))); +SELECT 1270 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'asn', tuple(IPv4StringToNum('127.0.0.255'))); + +SELECT 1 == dictHas({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', tuple(IPv4StringToNum('127.0.0.0'))); +SELECT 1 == dictHas({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', tuple(IPv4StringToNum('127.0.0.1'))); +SELECT 1 == dictHas({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', tuple(IPv4StringToNum('127.0.0.2'))); +SELECT 1 == dictHas({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', tuple(IPv4StringToNum('127.0.0.3'))); +SELECT 1 == dictHas({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', tuple(IPv4StringToNum('127.0.0.255'))); + +SELECT 11212 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'asn', tuple(IPv4StringToNum('101.79.55.22'))); +SELECT 11212 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'asn', tuple(IPv6StringToNum('::ffff:654f:3716'))); +SELECT 11212 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'asn', tuple(IPv6StringToNum('::ffff:101.79.55.22'))); + +SELECT 11211 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'asn', tuple(IPv4StringToNum('202.79.32.2'))); + +-- check that dictionary works with aliased types `IPv4` and `IPv6` +SELECT 11211 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'asn', tuple(toIPv4('202.79.32.2'))); +SELECT 11212 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'asn', tuple(toIPv6('::ffff:101.79.55.22'))); + +CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.table_from_ipv4_trie_dict +( + prefix String, + asn UInt32, + cca2 String +) ENGINE = Dictionary({CLICKHOUSE_DATABASE:Identifier}.dict_ipv4_trie); + +SELECT 1272 == asn AND 'RU' == cca2 FROM {CLICKHOUSE_DATABASE:Identifier}.table_from_ipv4_trie_dict +WHERE prefix == '127.0.0.2/32'; + +SELECT 37 == COUNT(*) FROM {CLICKHOUSE_DATABASE:Identifier}.table_from_ipv4_trie_dict; +SELECT 37 == COUNT(DISTINCT prefix) FROM {CLICKHOUSE_DATABASE:Identifier}.table_from_ipv4_trie_dict; + +DROP TABLE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}.table_from_ipv4_trie_dict; +DROP DICTIONARY IF EXISTS {CLICKHOUSE_DATABASE:Identifier}.dict_ipv4_trie; +DROP TABLE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}.table_ipv4_trie; + +SELECT '***ipv4 trie dict mask***'; +CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.table_ipv4_trie +( + prefix String, + val UInt32 +) +engine = TinyLog; + +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.table_ipv4_trie +SELECT + '255.255.255.255/' || toString(number) AS prefix, + toUInt32(number) AS val +FROM VALUES ('number UInt32', 5, 13, 24, 30); + +CREATE DICTIONARY {CLICKHOUSE_DATABASE:Identifier}.dict_ipv4_trie +( + prefix String, + val UInt32 +) +PRIMARY KEY prefix +SOURCE(CLICKHOUSE(host 'localhost' port tcpPort() user 'default' db currentDatabase() table 'table_ipv4_trie')) +LAYOUT(IP_TRIE()) +LIFETIME(MIN 10 MAX 100); + +SELECT 0 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'val', tuple(IPv4StringToNum('0.0.0.0'))); +SELECT 0 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'val', tuple(IPv4StringToNum('128.0.0.0'))); +SELECT 0 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'val', tuple(IPv4StringToNum('192.0.0.0'))); +SELECT 0 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'val', tuple(IPv4StringToNum('224.0.0.0'))); +SELECT 0 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'val', tuple(IPv4StringToNum('240.0.0.0'))); +SELECT 5 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'val', tuple(IPv4StringToNum('248.0.0.0'))); +SELECT 5 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'val', tuple(IPv4StringToNum('252.0.0.0'))); +SELECT 5 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'val', tuple(IPv4StringToNum('254.0.0.0'))); +SELECT 5 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'val', tuple(IPv4StringToNum('255.0.0.0'))); +SELECT 5 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'val', tuple(IPv4StringToNum('255.128.0.0'))); +SELECT 5 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'val', tuple(IPv4StringToNum('255.192.0.0'))); +SELECT 5 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'val', tuple(IPv4StringToNum('255.224.0.0'))); +SELECT 5 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'val', tuple(IPv4StringToNum('255.240.0.0'))); +SELECT 13 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'val', tuple(IPv4StringToNum('255.248.0.0'))); +SELECT 13 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'val', tuple(IPv4StringToNum('255.252.0.0'))); +SELECT 13 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'val', tuple(IPv4StringToNum('255.254.0.0'))); +SELECT 13 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'val', tuple(IPv4StringToNum('255.255.0.0'))); +SELECT 13 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'val', tuple(IPv4StringToNum('255.255.128.0'))); +SELECT 13 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'val', tuple(IPv4StringToNum('255.255.192.0'))); +SELECT 13 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'val', tuple(IPv4StringToNum('255.255.224.0'))); +SELECT 13 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'val', tuple(IPv4StringToNum('255.255.240.0'))); +SELECT 13 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'val', tuple(IPv4StringToNum('255.255.248.0'))); +SELECT 13 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'val', tuple(IPv4StringToNum('255.255.252.0'))); +SELECT 13 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'val', tuple(IPv4StringToNum('255.255.254.0'))); +SELECT 24 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'val', tuple(IPv4StringToNum('255.255.255.0'))); +SELECT 24 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'val', tuple(IPv4StringToNum('255.255.255.128'))); +SELECT 24 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'val', tuple(IPv4StringToNum('255.255.255.192'))); +SELECT 24 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'val', tuple(IPv4StringToNum('255.255.255.224'))); +SELECT 24 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'val', tuple(IPv4StringToNum('255.255.255.240'))); +SELECT 24 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'val', tuple(IPv4StringToNum('255.255.255.248'))); +SELECT 30 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'val', tuple(IPv4StringToNum('255.255.255.252'))); +SELECT 30 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'val', tuple(IPv4StringToNum('255.255.255.254'))); +SELECT 30 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'val', tuple(IPv4StringToNum('255.255.255.255'))); + +DROP DICTIONARY IF EXISTS {CLICKHOUSE_DATABASE:Identifier}.dict_ipv4_trie; +DROP TABLE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}.table_from_ipv4_trie_dict; +DROP TABLE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}.table_ipv4_trie; + +SELECT '***ipv4 trie dict pt2***'; + +CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.table_ipv4_trie ( prefix String, val UInt32 ) engine = TinyLog; + +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.table_ipv4_trie VALUES ('127.0.0.0/8', 1); +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.table_ipv4_trie VALUES ('127.0.0.0/16', 2); +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.table_ipv4_trie VALUES ('127.0.0.0/24', 3); +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.table_ipv4_trie VALUES ('127.0.0.1/32', 4); +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.table_ipv4_trie VALUES ('127.0.127.0/32', 5); +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.table_ipv4_trie VALUES ('127.0.128.1/32', 6); +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.table_ipv4_trie VALUES ('127.0.255.0/32', 7); +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.table_ipv4_trie VALUES ('127.0.255.1/32', 8); +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.table_ipv4_trie VALUES ('127.0.255.255/32', 9); +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.table_ipv4_trie VALUES ('127.1.0.0/16', 10); +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.table_ipv4_trie VALUES ('127.1.1.0', 11); +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.table_ipv4_trie VALUES ('127.1.255.0/24', 12); +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.table_ipv4_trie VALUES ('127.254.0.0/15', 13); +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.table_ipv4_trie VALUES ('127.254.0.127', 14); +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.table_ipv4_trie VALUES ('127.255.0.0/16', 15); +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.table_ipv4_trie VALUES ('127.255.128.0/24', 16); +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.table_ipv4_trie VALUES ('127.255.128.1/32', 17); +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.table_ipv4_trie VALUES ('127.255.128.10/32', 18); +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.table_ipv4_trie VALUES ('127.255.128.128/25', 19); +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.table_ipv4_trie VALUES ('127.255.255.128/32', 20); +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.table_ipv4_trie VALUES ('127.255.255.255/32', 21); + +CREATE DICTIONARY {CLICKHOUSE_DATABASE:Identifier}.dict_ipv4_trie ( prefix String, val UInt32 ) +PRIMARY KEY prefix +SOURCE(CLICKHOUSE(host 'localhost' port tcpPort() user 'default' db currentDatabase() table 'table_ipv4_trie')) +LAYOUT(IP_TRIE(ACCESS_TO_KEY_FROM_ATTRIBUTES 1)) +LIFETIME(MIN 10 MAX 100); + +SELECT '127.0.0.0/24' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'prefix', tuple(IPv4StringToNum('127.0.0.0'))); +SELECT '127.0.0.1/32' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'prefix', tuple(IPv4StringToNum('127.0.0.1'))); +SELECT '127.0.0.0/24' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'prefix', tuple(IPv4StringToNum('127.0.0.127'))); +SELECT '127.0.0.0/16' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'prefix', tuple(IPv4StringToNum('127.0.255.127'))); +SELECT '127.255.0.0/16' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'prefix', tuple(IPv4StringToNum('127.255.127.127'))); +SELECT '127.255.128.0/24' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'prefix', tuple(IPv4StringToNum('127.255.128.9'))); +SELECT '127.255.128.0/24' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'prefix', tuple(IPv4StringToNum('127.255.128.127'))); +SELECT '127.255.128.10/32' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'prefix', tuple(IPv4StringToNum('127.255.128.10'))); +SELECT '127.255.128.128/25' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'prefix', tuple(IPv4StringToNum('127.255.128.255'))); +SELECT '127.255.255.128/32' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'prefix', tuple(IPv4StringToNum('127.255.255.128'))); + +SELECT 3 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'val', tuple(IPv4StringToNum('127.0.0.0'))); +SELECT 4 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'val', tuple(IPv4StringToNum('127.0.0.1'))); +SELECT 3 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'val', tuple(IPv4StringToNum('127.0.0.127'))); +SELECT 2 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'val', tuple(IPv4StringToNum('127.0.255.127'))); +SELECT 15 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'val', tuple(IPv4StringToNum('127.255.127.127'))); +SELECT 16 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'val', tuple(IPv4StringToNum('127.255.128.9'))); +SELECT 16 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'val', tuple(IPv4StringToNum('127.255.128.127'))); +SELECT 18 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'val', tuple(IPv4StringToNum('127.255.128.10'))); +SELECT 19 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'val', tuple(IPv4StringToNum('127.255.128.255'))); +SELECT 20 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'val', tuple(IPv4StringToNum('127.255.255.128'))); + +SELECT 3 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'val', tuple(IPv6StringToNum('::ffff:7f00:0'))); +SELECT 4 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'val', tuple(IPv6StringToNum('::ffff:7f00:1'))); +SELECT 3 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'val', tuple(IPv6StringToNum('::ffff:7f00:7f'))); +SELECT 2 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'val', tuple(IPv6StringToNum('::ffff:7f00:ff7f'))); +SELECT 15 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'val', tuple(IPv6StringToNum('::ffff:7fff:7f7f'))); +SELECT 16 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'val', tuple(IPv6StringToNum('::ffff:7fff:8009'))); +SELECT 16 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'val', tuple(IPv6StringToNum('::ffff:7fff:807f'))); +SELECT 18 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'val', tuple(IPv6StringToNum('::ffff:7fff:800a'))); +SELECT 19 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'val', tuple(IPv6StringToNum('::ffff:7fff:80ff'))); +SELECT 20 == dictGetUInt32({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', 'val', tuple(IPv6StringToNum('::ffff:7fff:ff80'))); + +SELECT 1 == dictHas({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', tuple(IPv4StringToNum('127.0.0.0'))); +SELECT 1 == dictHas({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', tuple(IPv4StringToNum('127.0.0.1'))); +SELECT 1 == dictHas({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', tuple(IPv4StringToNum('127.0.0.127'))); +SELECT 1 == dictHas({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', tuple(IPv4StringToNum('127.0.255.127'))); +SELECT 1 == dictHas({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', tuple(IPv4StringToNum('127.255.127.127'))); +SELECT 1 == dictHas({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', tuple(IPv4StringToNum('127.255.128.9'))); +SELECT 1 == dictHas({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', tuple(IPv4StringToNum('127.255.128.127'))); +SELECT 1 == dictHas({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', tuple(IPv4StringToNum('127.255.128.10'))); +SELECT 1 == dictHas({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', tuple(IPv4StringToNum('127.255.128.255'))); +SELECT 1 == dictHas({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', tuple(IPv4StringToNum('127.255.255.128'))); + +SELECT 0 == dictHas({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', tuple(IPv4StringToNum('128.127.127.127'))); +SELECT 0 == dictHas({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', tuple(IPv4StringToNum('128.127.127.0'))); +SELECT 0 == dictHas({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', tuple(IPv4StringToNum('255.127.127.0'))); +SELECT 0 == dictHas({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', tuple(IPv4StringToNum('255.0.0.0'))); +SELECT 0 == dictHas({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', tuple(IPv4StringToNum('0.0.0.0'))); +SELECT 0 == dictHas({CLICKHOUSE_DATABASE:String} || '.dict_ipv4_trie', tuple(IPv4StringToNum('1.1.1.1'))); + +SELECT '***ipv6 trie dict***'; + +CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.table_ip_trie +( + prefix String, + val String +) +engine = TinyLog; + +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.table_ip_trie VALUES ('101.79.55.22', 'JA'), ('127.0.0.1', 'RU'), ('2620:0:870::/48', 'US'), ('2a02:6b8:1::/48', 'UK'), ('2001:db8::/32', 'ZZ'); + +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.table_ip_trie +SELECT + 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff/' || toString((number + 1) * 13 % 129) AS prefix, + toString((number + 1) * 13 % 129) AS val +FROM system.numbers LIMIT 129; + +CREATE DICTIONARY {CLICKHOUSE_DATABASE:Identifier}.dict_ip_trie +( + prefix String, + val String +) +PRIMARY KEY prefix +SOURCE(CLICKHOUSE(host 'localhost' port tcpPort() user 'default' db currentDatabase() table 'table_ip_trie')) +LAYOUT(IP_TRIE(ACCESS_TO_KEY_FROM_ATTRIBUTES 1)) +LIFETIME(MIN 10 MAX 100); + +SELECT 'US' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('2620:0:870::'))); +SELECT 'UK' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('2a02:6b8:1::'))); +SELECT 'ZZ' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('2001:db8::'))); +SELECT 'ZZ' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('2001:db8:ffff::'))); + +SELECT 1 == dictHas({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', tuple(IPv6StringToNum('2001:db8:ffff::'))); +SELECT 1 == dictHas({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', tuple(IPv6StringToNum('2001:db8:ffff:ffff::'))); +SELECT 1 == dictHas({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', tuple(IPv6StringToNum('2001:db8:ffff:1::'))); + +SELECT '0' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('654f:3716::'))); + +SELECT 'JA' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('::ffff:654f:3716'))); +SELECT 'JA' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('::ffff:101.79.55.22'))); +SELECT 'JA' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv4StringToNum('101.79.55.22'))); +SELECT 1 == dictHas({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', tuple(IPv4StringToNum('127.0.0.1'))); +SELECT 1 == dictHas({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', tuple(IPv6StringToNum('::ffff:127.0.0.1'))); + +SELECT '2620:0:870::/48' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'prefix', tuple(IPv6StringToNum('2620:0:870::'))); +SELECT '2a02:6b8:1::/48' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'prefix', tuple(IPv6StringToNum('2a02:6b8:1::1'))); +SELECT '2001:db8::/32' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'prefix', tuple(IPv6StringToNum('2001:db8::1'))); +SELECT '::ffff:101.79.55.22/128' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'prefix', tuple(IPv6StringToNum('::ffff:654f:3716'))); +SELECT '::ffff:101.79.55.22/128' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'prefix', tuple(IPv6StringToNum('::ffff:101.79.55.22'))); + +SELECT '0' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('::0'))); +SELECT '1' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('8000::'))); +SELECT '2' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('c000::'))); +SELECT '3' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('e000::'))); +SELECT '4' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('f000::'))); +SELECT '5' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('f800::'))); +SELECT '6' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('fc00::'))); +SELECT '7' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('fe00::'))); +SELECT '8' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ff00::'))); +SELECT '9' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ff80::'))); +SELECT '10' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffc0::'))); +SELECT '11' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffe0::'))); +SELECT '12' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('fff0::'))); +SELECT '13' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('fff8::'))); +SELECT '14' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('fffc::'))); +SELECT '15' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('fffe::'))); +SELECT '16' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff::'))); +SELECT '17' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:8000::'))); +SELECT '18' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:c000::'))); +SELECT '19' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:e000::'))); +SELECT '20' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:f000::'))); +SELECT '21' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:f800::'))); +SELECT '22' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:fc00::'))); +SELECT '18' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:c000::'))); +SELECT '19' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:e000::'))); +SELECT '20' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:f000::'))); +SELECT '21' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:f800::'))); +SELECT '22' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:fc00::'))); +SELECT '23' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:fe00::'))); +SELECT '24' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ff00::'))); +SELECT '25' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ff80::'))); +SELECT '26' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffc0::'))); +SELECT '27' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffe0::'))); +SELECT '28' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:fff0::'))); +SELECT '29' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:fff8::'))); +SELECT '30' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:fffc::'))); +SELECT '31' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:fffe::'))); +SELECT '32' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff::'))); +SELECT '33' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:8000::'))); +SELECT '34' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:c000::'))); +SELECT '35' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:e000::'))); +SELECT '36' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:f000::'))); +SELECT '37' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:f800::'))); +SELECT '38' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:fc00::'))); +SELECT '39' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:fe00::'))); +SELECT '40' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ff00::'))); +SELECT '41' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ff80::'))); +SELECT '42' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffc0::'))); +SELECT '43' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffe0::'))); +SELECT '44' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:fff0::'))); +SELECT '45' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:fff8::'))); +SELECT '46' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:fffc::'))); +SELECT '47' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:fffe::'))); +SELECT '48' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff::'))); +SELECT '49' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:8000::'))); +SELECT '50' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:c000::'))); +SELECT '51' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:e000::'))); +SELECT '52' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:f000::'))); +SELECT '53' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:f800::'))); +SELECT '54' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:fc00::'))); +SELECT '55' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:fe00::'))); +SELECT '56' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ff00::'))); +SELECT '57' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ff80::'))); +SELECT '58' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffc0::'))); +SELECT '59' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffe0::'))); +SELECT '60' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:fff0::'))); +SELECT '61' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:fff8::'))); +SELECT '62' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:fffc::'))); +SELECT '63' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:fffe::'))); +SELECT '64' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff::'))); +SELECT '65' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:8000::'))); +SELECT '66' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:c000::'))); +SELECT '67' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:e000::'))); +SELECT '68' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:f000::'))); +SELECT '69' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:f800::'))); +SELECT '70' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:fc00::'))); +SELECT '71' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:fe00::'))); +SELECT '72' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ff00::'))); +SELECT '73' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ff80::'))); +SELECT '74' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffc0::'))); +SELECT '75' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffe0::'))); +SELECT '76' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:fff0::'))); +SELECT '77' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:fff8::'))); +SELECT '78' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:fffc::'))); +SELECT '79' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:fffe::'))); +SELECT '80' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff::'))); +SELECT '81' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:8000::'))); +SELECT '82' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:c000::'))); +SELECT '83' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:e000::'))); +SELECT '84' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:f000::'))); +SELECT '85' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:f800::'))); +SELECT '86' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:fc00::'))); +SELECT '87' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:fe00::'))); +SELECT '88' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ff00::'))); +SELECT '89' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ff80::'))); +SELECT '90' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffc0::'))); +SELECT '91' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffe0::'))); +SELECT '92' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:fff0::'))); +SELECT '93' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:fff8::'))); +SELECT '94' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:fffc::'))); +SELECT '95' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:fffe::'))); +SELECT '96' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff::'))); +SELECT '97' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:8000:0'))); +SELECT '98' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:c000:0'))); +SELECT '99' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:e000:0'))); +SELECT '100' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:f000:0'))); +SELECT '101' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:f800:0'))); +SELECT '102' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:fc00:0'))); +SELECT '103' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:fe00:0'))); +SELECT '104' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:ff00:0'))); +SELECT '105' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:ff80:0'))); +SELECT '106' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:ffc0:0'))); +SELECT '107' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:ffe0:0'))); +SELECT '108' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:fff0:0'))); +SELECT '109' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:fff8:0'))); +SELECT '110' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:fffc:0'))); +SELECT '111' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:fffe:0'))); +SELECT '112' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:ffff:0'))); +SELECT '113' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:ffff:8000'))); +SELECT '114' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:ffff:c000'))); +SELECT '115' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:ffff:e000'))); +SELECT '116' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:ffff:f000'))); +SELECT '117' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:ffff:f800'))); +SELECT '118' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:ffff:fc00'))); +SELECT '119' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:ffff:fe00'))); +SELECT '120' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ff00'))); +SELECT '121' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ff80'))); +SELECT '122' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffc0'))); +SELECT '123' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffe0'))); +SELECT '124' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:ffff:fff0'))); +SELECT '125' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:ffff:fff8'))); +SELECT '126' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:ffff:fffc'))); +SELECT '127' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:ffff:fffe'))); +SELECT '128' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff'))); + +CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.table_from_ip_trie_dict +( + prefix String, + val String +) ENGINE = Dictionary({CLICKHOUSE_DATABASE:Identifier}.dict_ip_trie); + +SELECT MIN(val == 'US') FROM {CLICKHOUSE_DATABASE:Identifier}.table_from_ip_trie_dict +WHERE prefix == '2620:0:870::/48'; + +SELECT 134 == COUNT(*) FROM {CLICKHOUSE_DATABASE:Identifier}.table_from_ip_trie_dict; + +DROP TABLE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}.table_from_ip_trie_dict; +DROP DICTIONARY IF EXISTS {CLICKHOUSE_DATABASE:Identifier}.dict_ip_trie; +DROP TABLE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}.table_ip_trie; + +SELECT '***ipv6 trie dict mask***'; + +CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.table_ip_trie +( + prefix String, + val String +) +engine = TinyLog; + +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.table_ip_trie +SELECT + 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff/' || toString(number) AS prefix, + toString(number) AS val +FROM VALUES ('number UInt32', 5, 13, 24, 48, 49, 99, 127); + +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.table_ip_trie VALUES ('101.79.55.22', 'JA'); + +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.table_ip_trie +SELECT + '255.255.255.255/' || toString(number) AS prefix, + toString(number) AS val +FROM VALUES ('number UInt32', 5, 13, 24, 30); + +CREATE DICTIONARY {CLICKHOUSE_DATABASE:Identifier}.dict_ip_trie +( + prefix String, + val String +) +PRIMARY KEY prefix +SOURCE(CLICKHOUSE(host 'localhost' port tcpPort() user 'default' db currentDatabase() table 'table_ip_trie')) +LAYOUT(IP_TRIE()) +LIFETIME(MIN 10 MAX 100); + +SELECT 0 == dictHas({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', tuple(IPv6StringToNum('::ffff:1:1'))); + +SELECT '' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('654f:3716::'))); +SELECT 0 == dictHas({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', tuple(IPv6StringToNum('654f:3716::'))); +SELECT 0 == dictHas({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', tuple(IPv6StringToNum('654f:3716:ffff::'))); + +SELECT 'JA' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('::ffff:654f:3716'))); +SELECT 'JA' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('::ffff:101.79.55.22'))); +SELECT 'JA' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv4StringToNum('101.79.55.22'))); + +SELECT '' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('::0'))); +SELECT '' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('8000::'))); +SELECT '' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('c000::'))); +SELECT '' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('e000::'))); +SELECT '' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('f000::'))); +SELECT '5' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('f800::'))); +SELECT '5' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('fc00::'))); +SELECT '5' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('fe00::'))); +SELECT '5' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ff00::'))); +SELECT '5' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ff80::'))); +SELECT '5' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffc0::'))); +SELECT '5' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffe0::'))); +SELECT '5' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('fff0::'))); +SELECT '13' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('fff8::'))); +SELECT '13' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('fffc::'))); +SELECT '13' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('fffe::'))); +SELECT '13' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff::'))); +SELECT '13' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:8000::'))); +SELECT '13' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:c000::'))); +SELECT '13' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:e000::'))); +SELECT '13' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:f000::'))); +SELECT '13' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:f800::'))); +SELECT '13' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:fc00::'))); +SELECT '13' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:c000::'))); +SELECT '13' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:e000::'))); +SELECT '13' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:f000::'))); +SELECT '13' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:f800::'))); +SELECT '13' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:fc00::'))); +SELECT '13' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:fe00::'))); +SELECT '24' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ff00::'))); +SELECT '24' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ff80::'))); +SELECT '24' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffc0::'))); +SELECT '24' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffe0::'))); +SELECT '24' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:fff0::'))); +SELECT '24' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:fff8::'))); +SELECT '24' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:fffc::'))); +SELECT '24' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:fffe::'))); +SELECT '24' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff::'))); +SELECT '24' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:8000::'))); +SELECT '24' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:c000::'))); +SELECT '24' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:e000::'))); +SELECT '24' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:f000::'))); +SELECT '24' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:f800::'))); +SELECT '24' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:fc00::'))); +SELECT '24' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:fe00::'))); +SELECT '24' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ff00::'))); +SELECT '24' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ff80::'))); +SELECT '24' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffc0::'))); +SELECT '24' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffe0::'))); +SELECT '24' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:fff0::'))); +SELECT '24' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:fff8::'))); +SELECT '24' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:fffc::'))); +SELECT '24' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:fffe::'))); +SELECT '48' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff::'))); +SELECT '49' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:8000::'))); +SELECT '49' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:c000::'))); +SELECT '49' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:e000::'))); +SELECT '49' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:f000::'))); +SELECT '49' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:f800::'))); +SELECT '49' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:fc00::'))); +SELECT '49' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:fe00::'))); +SELECT '49' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ff00::'))); +SELECT '49' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ff80::'))); +SELECT '49' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffc0::'))); +SELECT '49' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffe0::'))); +SELECT '49' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:fff0::'))); +SELECT '49' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:fff8::'))); +SELECT '49' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:fffc::'))); +SELECT '49' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:fffe::'))); +SELECT '49' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff::'))); +SELECT '49' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:8000::'))); +SELECT '49' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:c000::'))); +SELECT '49' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:e000::'))); +SELECT '49' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:f000::'))); +SELECT '49' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:f800::'))); +SELECT '49' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:fc00::'))); +SELECT '49' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:fe00::'))); +SELECT '49' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ff00::'))); +SELECT '49' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ff80::'))); +SELECT '49' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffc0::'))); +SELECT '49' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffe0::'))); +SELECT '49' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:fff0::'))); +SELECT '49' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:fff8::'))); +SELECT '49' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:fffc::'))); +SELECT '49' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:fffe::'))); +SELECT '49' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff::'))); +SELECT '49' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:8000::'))); +SELECT '49' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:c000::'))); +SELECT '49' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:e000::'))); +SELECT '49' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:f000::'))); +SELECT '49' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:f800::'))); +SELECT '49' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:fc00::'))); +SELECT '49' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:fe00::'))); +SELECT '49' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ff00::'))); +SELECT '49' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ff80::'))); +SELECT '49' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffc0::'))); +SELECT '49' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffe0::'))); +SELECT '49' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:fff0::'))); +SELECT '49' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:fff8::'))); +SELECT '49' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:fffc::'))); +SELECT '49' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:fffe::'))); +SELECT '49' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff::'))); +SELECT '49' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:8000:0'))); +SELECT '49' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:c000:0'))); +SELECT '99' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:e000:0'))); +SELECT '99' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:f000:0'))); +SELECT '99' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:f800:0'))); +SELECT '99' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:fc00:0'))); +SELECT '99' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:fe00:0'))); +SELECT '99' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:ff00:0'))); +SELECT '99' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:ff80:0'))); +SELECT '99' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:ffc0:0'))); +SELECT '99' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:ffe0:0'))); +SELECT '99' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:fff0:0'))); +SELECT '99' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:fff8:0'))); +SELECT '99' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:fffc:0'))); +SELECT '99' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:fffe:0'))); +SELECT '99' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:ffff:0'))); +SELECT '99' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:ffff:8000'))); +SELECT '99' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:ffff:c000'))); +SELECT '99' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:ffff:e000'))); +SELECT '99' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:ffff:f000'))); +SELECT '99' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:ffff:f800'))); +SELECT '99' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:ffff:fc00'))); +SELECT '99' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:ffff:fe00'))); +SELECT '99' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ff00'))); +SELECT '99' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ff80'))); +SELECT '99' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffc0'))); +SELECT '99' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffe0'))); +SELECT '99' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:ffff:fff0'))); +SELECT '99' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:ffff:fff8'))); +SELECT '99' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:ffff:fffc'))); +SELECT '127' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:ffff:fffe'))); +SELECT '127' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv6StringToNum('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff'))); + +SELECT '' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv4StringToNum('0.0.0.0'))); +SELECT '' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv4StringToNum('128.0.0.0'))); +SELECT '' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv4StringToNum('240.0.0.0'))); +SELECT '5' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv4StringToNum('248.0.0.0'))); +SELECT '5' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv4StringToNum('252.0.0.0'))); +SELECT '5' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv4StringToNum('255.240.0.0'))); +SELECT '13' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv4StringToNum('255.248.0.0'))); +SELECT '13' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv4StringToNum('255.252.0.0'))); +SELECT '13' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv4StringToNum('255.255.254.0'))); +SELECT '24' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv4StringToNum('255.255.255.0'))); +SELECT '24' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv4StringToNum('255.255.255.128'))); +SELECT '24' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv4StringToNum('255.255.255.248'))); +SELECT '30' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv4StringToNum('255.255.255.252'))); +SELECT '30' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv4StringToNum('255.255.255.254'))); +SELECT '30' == dictGetString({CLICKHOUSE_DATABASE:String} || '.dict_ip_trie', 'val', tuple(IPv4StringToNum('255.255.255.255'))); + +DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}; diff --git a/parser/testdata/01018_optimize_read_in_order_with_in_subquery/ast.json b/parser/testdata/01018_optimize_read_in_order_with_in_subquery/ast.json new file mode 100644 index 000000000..bac7db7f0 --- /dev/null +++ b/parser/testdata/01018_optimize_read_in_order_with_in_subquery/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001192185, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01018_optimize_read_in_order_with_in_subquery/metadata.json b/parser/testdata/01018_optimize_read_in_order_with_in_subquery/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01018_optimize_read_in_order_with_in_subquery/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01018_optimize_read_in_order_with_in_subquery/query.sql b/parser/testdata/01018_optimize_read_in_order_with_in_subquery/query.sql new file mode 100644 index 000000000..dcec82461 --- /dev/null +++ b/parser/testdata/01018_optimize_read_in_order_with_in_subquery/query.sql @@ -0,0 +1,11 @@ +SET max_threads = 2; +SET optimize_read_in_order = 1; + +DROP TABLE IF EXISTS TESTTABLE4; +CREATE TABLE TESTTABLE4 (_id UInt64, pt String, l String ) +ENGINE = MergeTree() PARTITION BY (pt) ORDER BY (_id); +INSERT INTO TESTTABLE4 VALUES (0,'1','1'), (1,'0','1'); + +SELECT _id FROM TESTTABLE4 PREWHERE l IN (select '1') ORDER BY _id DESC LIMIT 10; + +DROP TABLE TESTTABLE4; diff --git a/parser/testdata/01019_Buffer_and_max_memory_usage/ast.json b/parser/testdata/01019_Buffer_and_max_memory_usage/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01019_Buffer_and_max_memory_usage/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01019_Buffer_and_max_memory_usage/metadata.json b/parser/testdata/01019_Buffer_and_max_memory_usage/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01019_Buffer_and_max_memory_usage/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01019_Buffer_and_max_memory_usage/query.sql b/parser/testdata/01019_Buffer_and_max_memory_usage/query.sql new file mode 100644 index 000000000..78700cb98 --- /dev/null +++ b/parser/testdata/01019_Buffer_and_max_memory_usage/query.sql @@ -0,0 +1,50 @@ +-- Tags: no-replicated-database + +DROP TABLE IF EXISTS null_; +DROP TABLE IF EXISTS buffer_; +DROP TABLE IF EXISTS aggregation_; + +-- Each UInt64 is 8 bytes +-- So 10e6 rows is 80e6 bytes +-- +-- Use LIMIT max_rows+1 to force flush from the query context, and to avoid +-- flushing from the background thread, since in this case it can steal memory +-- the max_memory_usage may be exceeded during squashing other blocks. + + +CREATE TABLE null_ (key UInt64) Engine=Null(); +CREATE TABLE buffer_ (key UInt64) Engine=Buffer(currentDatabase(), null_, + 1, /* num_layers */ + 10e6, /* min_time, placeholder */ + 10e6, /* max_time, placeholder */ + 0, /* min_rows */ + 10e6, /* max_rows */ + 0, /* min_bytes */ + 80e6 /* max_bytes */ +); + +SET max_memory_usage=10e6; +SET max_block_size=100e3; +SET max_insert_threads=1; + +-- Check that max_memory_usage is ignored only on flush and not on squash +SET min_insert_block_size_bytes=9e6; +SET min_insert_block_size_rows=0; +INSERT INTO buffer_ SELECT toUInt64(number) FROM system.numbers LIMIT toUInt64(10e6+1); -- { serverError MEMORY_LIMIT_EXCEEDED } + +OPTIMIZE TABLE buffer_; -- flush just in case + +-- create complex aggregation to fail with Memory limit exceede error while writing to Buffer() +-- String over UInt64 is enough to trigger the problem. +CREATE MATERIALIZED VIEW aggregation_ engine=Memory() AS SELECT toString(key) FROM null_; + +-- Check that max_memory_usage is ignored during write from StorageBuffer +SET min_insert_block_size_bytes=0; +SET min_insert_block_size_rows=100e3; +INSERT INTO buffer_ SELECT toUInt64(number) FROM system.numbers LIMIT toUInt64(10e6+1); +-- Check that 10e6 rows had been flushed from the query, not from the background worker. +SELECT count() FROM buffer_; + +DROP TABLE null_; +DROP TABLE buffer_; +DROP TABLE aggregation_; diff --git a/parser/testdata/01019_alter_materialized_view_query/ast.json b/parser/testdata/01019_alter_materialized_view_query/ast.json new file mode 100644 index 000000000..c948e4dee --- /dev/null +++ b/parser/testdata/01019_alter_materialized_view_query/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery src_01019 (children 1)" + }, + { + "explain": " Identifier src_01019" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001274352, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/01019_alter_materialized_view_query/metadata.json b/parser/testdata/01019_alter_materialized_view_query/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01019_alter_materialized_view_query/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01019_alter_materialized_view_query/query.sql b/parser/testdata/01019_alter_materialized_view_query/query.sql new file mode 100644 index 000000000..0011f5be5 --- /dev/null +++ b/parser/testdata/01019_alter_materialized_view_query/query.sql @@ -0,0 +1,35 @@ +DROP TABLE IF EXISTS src_01019; +DROP TABLE IF EXISTS dest_01019; +DROP TABLE IF EXISTS pipe_01019; + +CREATE TABLE src_01019(v UInt64) ENGINE = Null; +CREATE TABLE dest_01019(v UInt64) Engine = MergeTree() ORDER BY v; + +CREATE MATERIALIZED VIEW pipe_01019 TO dest_01019 AS +SELECT v FROM src_01019; + +INSERT INTO src_01019 VALUES (1), (2), (3); + +SET allow_experimental_alter_materialized_view_structure = 1; + +-- Live alter which changes query logic and adds an extra column. +ALTER TABLE pipe_01019 + MODIFY QUERY + SELECT + v * 2 as v, + 1 as v2 + FROM src_01019; + +INSERT INTO src_01019 VALUES (1), (2), (3); + +SELECT * FROM dest_01019 ORDER BY v; + +ALTER TABLE dest_01019 + ADD COLUMN v2 UInt64; + +INSERT INTO src_01019 VALUES (42); +SELECT * FROM dest_01019 ORDER BY v; + +DROP TABLE src_01019; +DROP TABLE dest_01019; +DROP TABLE pipe_01019; diff --git a/parser/testdata/01019_array_fill/ast.json b/parser/testdata/01019_array_fill/ast.json new file mode 100644 index 000000000..4b9b8736f --- /dev/null +++ b/parser/testdata/01019_array_fill/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayFill (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function lambda (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_2, UInt64_3, UInt64_11, UInt64_12, UInt64_13, UInt64_4, UInt64_5, UInt64_6, UInt64_14, UInt64_15, UInt64_16]" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001294748, + "rows_read": 13, + "bytes_read": 619 + } +} diff --git a/parser/testdata/01019_array_fill/metadata.json b/parser/testdata/01019_array_fill/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01019_array_fill/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01019_array_fill/query.sql b/parser/testdata/01019_array_fill/query.sql new file mode 100644 index 000000000..af48e8d0b --- /dev/null +++ b/parser/testdata/01019_array_fill/query.sql @@ -0,0 +1,11 @@ +SELECT arrayFill(x -> 0, [1, 2, 3, 11, 12, 13, 4, 5, 6, 14, 15, 16]); +SELECT arrayReverseFill(x -> 0, [1, 2, 3, 11, 12, 13, 4, 5, 6, 14, 15, 16]); +SELECT arrayFill(x -> 1, [1, 2, 3, 11, 12, 13, 4, 5, 6, 14, 15, 16]); +SELECT arrayReverseFill(x -> 1, [1, 2, 3, 11, 12, 13, 4, 5, 6, 14, 15, 16]); + +SELECT arrayFill(x -> x < 10, [1, 2, 3, 11, 12, 13, 4, 5, 6, 14, 15, 16]); +SELECT arrayReverseFill(x -> x < 10, [1, 2, 3, 11, 12, 13, 4, 5, 6, 14, 15, 16]); +SELECT arrayFill(x -> not isNull(x), [1, null, 3, 11, 12, null, null, 5, 6, 14, null, null]); +SELECT arrayReverseFill(x -> not isNull(x), [1, null, 3, 11, 12, null, null, 5, 6, 14, null, null]); +SELECT arrayFill((x, y) -> y, [1, 2, 3, 11, 12, 13, 4, 5, 6, 14, 15, 16], [0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0]); +SELECT arrayReverseFill((x, y) -> y, [1, 2, 3, 11, 12, 13, 4, 5, 6, 14, 15, 16], [0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0]); diff --git a/parser/testdata/01019_materialized_view_select_extra_columns/ast.json b/parser/testdata/01019_materialized_view_select_extra_columns/ast.json new file mode 100644 index 000000000..a12b1c908 --- /dev/null +++ b/parser/testdata/01019_materialized_view_select_extra_columns/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00120106, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01019_materialized_view_select_extra_columns/metadata.json b/parser/testdata/01019_materialized_view_select_extra_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01019_materialized_view_select_extra_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01019_materialized_view_select_extra_columns/query.sql b/parser/testdata/01019_materialized_view_select_extra_columns/query.sql new file mode 100644 index 000000000..7feb5ecb8 --- /dev/null +++ b/parser/testdata/01019_materialized_view_select_extra_columns/query.sql @@ -0,0 +1,37 @@ +SET allow_materialized_view_with_bad_select = 1; + +DROP TABLE IF EXISTS mv_extra_columns_dst; +DROP TABLE IF EXISTS mv_extra_columns_src; +DROP TABLE IF EXISTS mv_extra_columns_view; + +CREATE TABLE mv_extra_columns_dst ( + v UInt64 +) ENGINE = MergeTree() + PARTITION BY tuple() + ORDER BY v; + +CREATE TABLE mv_extra_columns_src ( + v1 UInt64, + v2 UInt64 +) ENGINE = Null; + +-- Extra columns are ignored when pushing to destination table. +-- This test exists to prevent unintended changes to existing behaviour. +-- +-- Although this behaviour might not be ideal it can be exploited for 0-downtime changes to materialized views. +-- Step 1: Add new column to source table. Step 2: Create new view reading source column. +-- Step 3: Swap views using `RENAME TABLE`. Step 4: Add new column to destination table as well. +CREATE MATERIALIZED VIEW mv_extra_columns_view TO mv_extra_columns_dst +AS SELECT + v1 as v, + v2 as v2 +FROM mv_extra_columns_src; + +INSERT INTO mv_extra_columns_src VALUES (0, 0), (1, 1), (2, 2); + +SELECT * FROM mv_extra_columns_dst ORDER by v; +SELECT * FROM mv_extra_columns_view; -- { serverError NOT_FOUND_COLUMN_IN_BLOCK } + +DROP TABLE mv_extra_columns_view; +DROP TABLE mv_extra_columns_src; +DROP TABLE mv_extra_columns_dst; diff --git a/parser/testdata/01020_function_array_compact/ast.json b/parser/testdata/01020_function_array_compact/ast.json new file mode 100644 index 000000000..82eca00d3 --- /dev/null +++ b/parser/testdata/01020_function_array_compact/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayCompact (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[UInt64_0]" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001242579, + "rows_read": 7, + "bytes_read": 273 + } +} diff --git a/parser/testdata/01020_function_array_compact/metadata.json b/parser/testdata/01020_function_array_compact/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01020_function_array_compact/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01020_function_array_compact/query.sql b/parser/testdata/01020_function_array_compact/query.sql new file mode 100644 index 000000000..29adb007d --- /dev/null +++ b/parser/testdata/01020_function_array_compact/query.sql @@ -0,0 +1,11 @@ +select arrayCompact([0]); +select arrayCompact([1]); +select arrayCompact([2]); +select arrayCompact([1,1]); +select arrayCompact([1,2]); +select arrayCompact([1,1,2]); +select arrayCompact([1,2,1]); +select arrayCompact([2,1,1]); +select arrayCompact([1,2,2,3,3,3,4,4,4,4,5,5,5,5,5]); +SELECT arrayCompact(arrayMap(x->0, [NULL])); +SELECT toString(arrayCompact(arrayMap(x->0, [NULL]))); diff --git a/parser/testdata/01020_function_char/ast.json b/parser/testdata/01020_function_char/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01020_function_char/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01020_function_char/metadata.json b/parser/testdata/01020_function_char/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01020_function_char/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01020_function_char/query.sql b/parser/testdata/01020_function_char/query.sql new file mode 100644 index 000000000..f726b5ed3 --- /dev/null +++ b/parser/testdata/01020_function_char/query.sql @@ -0,0 +1,4 @@ +/* char function */ +SELECT char(65, 66.1, 67.2, 68.3, 97.4, 98.5, 99.6, 100.7, 101.0, 102.0, 103.0); +SELECT char(65 + 256, 66 + 1024, 66 + 1024 + 1); +SELECT char(65, 66 + number, 67 + number) from numbers(3); diff --git a/parser/testdata/01020_having_without_group_by/ast.json b/parser/testdata/01020_having_without_group_by/ast.json new file mode 100644 index 000000000..b11ad0535 --- /dev/null +++ b/parser/testdata/01020_having_without_group_by/ast.json @@ -0,0 +1,40 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 6, + + "statistics": + { + "elapsed": 0.001123234, + "rows_read": 6, + "bytes_read": 204 + } +} diff --git a/parser/testdata/01020_having_without_group_by/metadata.json b/parser/testdata/01020_having_without_group_by/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01020_having_without_group_by/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01020_having_without_group_by/query.sql b/parser/testdata/01020_having_without_group_by/query.sql new file mode 100644 index 000000000..cf9b59b35 --- /dev/null +++ b/parser/testdata/01020_having_without_group_by/query.sql @@ -0,0 +1 @@ +SELECT 1 HAVING 1; diff --git a/parser/testdata/01021_create_as_select/ast.json b/parser/testdata/01021_create_as_select/ast.json new file mode 100644 index 000000000..1de79fdfe --- /dev/null +++ b/parser/testdata/01021_create_as_select/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery create_as_select_01021 (children 1)" + }, + { + "explain": " Identifier create_as_select_01021" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00111532, + "rows_read": 2, + "bytes_read": 96 + } +} diff --git a/parser/testdata/01021_create_as_select/metadata.json b/parser/testdata/01021_create_as_select/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01021_create_as_select/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01021_create_as_select/query.sql b/parser/testdata/01021_create_as_select/query.sql new file mode 100644 index 000000000..684350e18 --- /dev/null +++ b/parser/testdata/01021_create_as_select/query.sql @@ -0,0 +1,4 @@ +DROP TABLE IF EXISTS create_as_select_01021; +CREATE TABLE create_as_select_01021 engine=Memory AS (SELECT (1, 1)); +SELECT * FROM create_as_select_01021; +DROP TABLE create_as_select_01021; diff --git a/parser/testdata/01021_only_tuple_columns/ast.json b/parser/testdata/01021_only_tuple_columns/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01021_only_tuple_columns/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01021_only_tuple_columns/metadata.json b/parser/testdata/01021_only_tuple_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01021_only_tuple_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01021_only_tuple_columns/query.sql b/parser/testdata/01021_only_tuple_columns/query.sql new file mode 100644 index 000000000..d8d146f59 --- /dev/null +++ b/parser/testdata/01021_only_tuple_columns/query.sql @@ -0,0 +1,25 @@ + +CREATE TABLE test +( + `x` Tuple(UInt64, UInt64) +) +ENGINE = MergeTree +ORDER BY x; + +INSERT INTO test SELECT (number, number) FROM numbers(1000000); + +SELECT COUNT() FROM test; + +ALTER TABLE test DETACH PARTITION tuple(); + +ALTER TABLE test ATTACH PARTITION tuple(); + +SELECT COUNT() FROM test; + +DETACH TABLE test; + +ATTACH TABLE test; + +SELECT COUNT() FROM test; + +DROP TABLE test; diff --git a/parser/testdata/01021_tuple_parser/ast.json b/parser/testdata/01021_tuple_parser/ast.json new file mode 100644 index 000000000..7eb134ec3 --- /dev/null +++ b/parser/testdata/01021_tuple_parser/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001352784, + "rows_read": 12, + "bytes_read": 457 + } +} diff --git a/parser/testdata/01021_tuple_parser/metadata.json b/parser/testdata/01021_tuple_parser/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01021_tuple_parser/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01021_tuple_parser/query.sql b/parser/testdata/01021_tuple_parser/query.sql new file mode 100644 index 000000000..4fc7aa1e3 --- /dev/null +++ b/parser/testdata/01021_tuple_parser/query.sql @@ -0,0 +1,11 @@ +SELECT toTypeName((1,)), (1,); + +EXPLAIN SYNTAX SELECT (1,); + +DROP TABLE IF EXISTS tuple_values; + +CREATE TABLE tuple_values (t Tuple(int)) ENGINE = Memory; + +INSERT INTO tuple_values VALUES ((1)), ((2,)); + +DROP TABLE tuple_values; diff --git a/parser/testdata/01023_materialized_view_query_context/ast.json b/parser/testdata/01023_materialized_view_query_context/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01023_materialized_view_query_context/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01023_materialized_view_query_context/metadata.json b/parser/testdata/01023_materialized_view_query_context/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01023_materialized_view_query_context/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01023_materialized_view_query_context/query.sql b/parser/testdata/01023_materialized_view_query_context/query.sql new file mode 100644 index 000000000..f8c282fed --- /dev/null +++ b/parser/testdata/01023_materialized_view_query_context/query.sql @@ -0,0 +1,56 @@ +-- Tags: no-parallel, no-replicated-database + +-- FIXME: old analyzer does not check db exist, new one checks it and test fails. test is suppressed for replicated. +-- without analyzer: +-- 2024.02.22 18:55:00.320120 [ 116105 ] {61f04f21-6d66-4064-926f-20657de2e66c} executeQuery: (from 0.0.0.0:0, user: ) (comment: 01023_materialized_view_query_context.sql) /* ddl_entry=query-0000000009 */ CREATE MATERIALIZED VIEW test_143n70zj.mv UUID '0572ef25-139a-4705-a213-601675435648' TO test_143n70zj.output (`key` UInt64, `val` UInt64) AS SELECT key, dictGetUInt64('dict_in_01023.dict', 'val', key) AS val FROM test_143n70zj.dist_out (stage: Complete) +-- 2024.02.22 18:55:00.321303 [ 116105 ] {61f04f21-6d66-4064-926f-20657de2e66c} DDLWorker(test_143n70zj): Executed query: /* ddl_entry=query-0000000009 */ CREATE MATERIALIZED VIEW test_143n70zj.mv UUID '0572ef25-139a-4705-a213-601675435648' TO test_143n70zj.output (`key` UInt64, `val` UInt64) AS SELECT key, dictGetUInt64('dict_in_01023.dict', 'val', key) AS val FROM test_143n70zj.dist_out +-- +-- with analyzer: +-- 2024.02.22 19:33:36.266538 [ 108818 ] {0e1586f5-8ae0-4065-81b7-1e7d43b85d82} executeQuery: (from 0.0.0.0:0, user: ) (comment: 01023_materialized_view_query_context.sql) /* ddl_entry=query-0000000009 */ CREATE MATERIALIZED VIEW test_devov0ke.mv UUID 'bf3a2bfe-1446-4a02-b760-bae514488c5a' TO test_devov0ke.output (`key` UInt64, `val` UInt64) AS SELECT key, dictGetUInt64('dict_in_01023.dict', 'val', key) AS val FROM test_devov0ke.dist_out (stage: Complete) +-- 2024.02.22 19:33:36.266796 [ 108818 ] {0e1586f5-8ae0-4065-81b7-1e7d43b85d82} Planner: Query SELECT __table1.key AS key, dictGetUInt64('dict_in_01023.dict', 'val', __table1.key) AS val FROM test_devov0ke.dist_out AS __table1 to stage Complete only analyze +-- 2024.02.22 19:33:36.266855 [ 108818 ] {0e1586f5-8ae0-4065-81b7-1e7d43b85d82} Planner: Query SELECT __table1.key AS key, dictGetUInt64('dict_in_01023.dict', 'val', __table1.key) AS val FROM test_devov0ke.dist_out AS __table1 from stage FetchColumns to stage Complete only analyze +-- 2024.02.22 19:33:36.280740 [ 108818 ] {0e1586f5-8ae0-4065-81b7-1e7d43b85d82} executeQuery: Code: 36. DB::Exception: Dictionary (`dict_in_01023.dict`) not found. (BAD_ARGUMENTS) (version 24.2.1.1429 (official build)) (from 0.0.0.0:0) (comment: 01023_materialized_view_query_context.sql) (in query: /* ddl_entry=query-0000000009 */ CREATE MATERIALIZED VIEW test_devov0ke.mv UUID 'bf3a2bfe-1446-4a02-b760-bae514488c5a' TO test_devov0ke.output (`key` UInt64, `val` UInt64) AS SELECT key, dictGetUInt64('dict_in_01023.dict', 'val', key) AS val FROM test_devov0ke.dist_out), Stack trace (when copying this message, always include the lines below): +-- 2024.02.22 19:33:36.280936 [ 108818 ] {0e1586f5-8ae0-4065-81b7-1e7d43b85d82} DDLWorker(test_devov0ke): Query /* ddl_entry=query-0000000009 */ CREATE MATERIALIZED VIEW test_devov0ke.mv UUID 'bf3a2bfe-1446-4a02-b760-bae514488c5a' TO test_devov0ke.output (`key` UInt64, `val` UInt64) AS SELECT key, dictGetUInt64('dict_in_01023.dict', 'val', key) AS val FROM test_devov0ke.dist_out wasn't finished successfully: Code: 36. DB::Exception: Dictionary (`dict_in_01023.dict`) not found. (BAD_ARGUMENTS), Stack trace (when copying this message, always include the lines below): + +-- Create dictionary, since dictGet*() uses DB::Context in executeImpl() +-- (To cover scope of the Context in PushingToViews chain) + +set distributed_foreground_insert=1; + +DROP TABLE IF EXISTS mv; +DROP DATABASE IF EXISTS dict_in_01023; +CREATE DATABASE dict_in_01023; + +CREATE TABLE dict_in_01023.input (key UInt64, val UInt64) Engine=Memory(); + +CREATE DICTIONARY dict_in_01023.dict +( + key UInt64 DEFAULT 0, + val UInt64 DEFAULT 1 +) +PRIMARY KEY key +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'input' PASSWORD '' DB 'dict_in_01023')) +LIFETIME(MIN 0 MAX 0) +LAYOUT(HASHED()); + +CREATE TABLE input (key UInt64) Engine=Distributed(test_shard_localhost, currentDatabase(), buffer_, key); +CREATE TABLE null_ (key UInt64) Engine=Null(); +CREATE TABLE buffer_ (key UInt64) Engine=Buffer(currentDatabase(), dist_out, 1, 0, 0, 0, 0, 0, 0); +CREATE TABLE dist_out (key UInt64) Engine=Distributed(test_shard_localhost, currentDatabase(), null_, key); + +CREATE TABLE output (key UInt64, val UInt64) Engine=Memory(); +CREATE MATERIALIZED VIEW mv TO output AS SELECT key, dictGetUInt64('dict_in_01023.dict', 'val', key) val FROM dist_out; + +INSERT INTO input VALUES (1); + +SELECT count() FROM output; + +DROP TABLE mv; +DROP TABLE output; +DROP TABLE dist_out; +DROP TABLE buffer_; +DROP TABLE null_; +DROP TABLE input; +DROP DICTIONARY dict_in_01023.dict; +DROP TABLE dict_in_01023.input; +DROP DATABASE dict_in_01023; diff --git a/parser/testdata/01024__getScalar/ast.json b/parser/testdata/01024__getScalar/ast.json new file mode 100644 index 000000000..27bf308f9 --- /dev/null +++ b/parser/testdata/01024__getScalar/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery foo (children 3)" + }, + { + "explain": " Identifier foo" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration key (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " ColumnDeclaration macro (children 2)" + }, + { + "explain": " DataType String" + }, + { + "explain": " Function __getScalar (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier key" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function Null (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001371443, + "rows_read": 14, + "bytes_read": 496 + } +} diff --git a/parser/testdata/01024__getScalar/metadata.json b/parser/testdata/01024__getScalar/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01024__getScalar/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01024__getScalar/query.sql b/parser/testdata/01024__getScalar/query.sql new file mode 100644 index 000000000..1a47ed67e --- /dev/null +++ b/parser/testdata/01024__getScalar/query.sql @@ -0,0 +1 @@ +CREATE TABLE foo (key String, macro String MATERIALIZED __getScalar(key)) Engine=Null(); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } diff --git a/parser/testdata/01025_array_compact_generic/ast.json b/parser/testdata/01025_array_compact_generic/ast.json new file mode 100644 index 000000000..653f24dd9 --- /dev/null +++ b/parser/testdata/01025_array_compact_generic/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayCompact (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001136261, + "rows_read": 8, + "bytes_read": 305 + } +} diff --git a/parser/testdata/01025_array_compact_generic/metadata.json b/parser/testdata/01025_array_compact_generic/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01025_array_compact_generic/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01025_array_compact_generic/query.sql b/parser/testdata/01025_array_compact_generic/query.sql new file mode 100644 index 000000000..4446d10e9 --- /dev/null +++ b/parser/testdata/01025_array_compact_generic/query.sql @@ -0,0 +1,11 @@ +SELECT arrayCompact([]); +SELECT arrayCompact([1, 1, nan, nan, 2, 2, 2]); +SELECT arrayCompact([1, 1, nan, nan, -nan, 2, 2, 2]); +SELECT arrayCompact([1, 1, NULL, NULL, 2, 2, 2]); +SELECT arrayCompact([1, 1, NULL, NULL, nan, nan, 2, 2, 2]); +SELECT arrayCompact(['hello', '', '', '', 'world', 'world']); +SELECT arrayCompact([[[]], [[], []], [[], []], [[]]]); +SELECT arrayCompact(arrayMap(x -> toString(intDiv(x, 3)), range(number))) FROM numbers(10); +SELECT arrayCompact(x -> x.2, groupArray((number, intDiv(number, 3) % 3))) FROM numbers(10); +SELECT arrayCompact(x -> x.2, groupArray((toString(number), toString(intDiv(number, 3) % 3)))) FROM numbers(10); +SELECT arrayCompact(x -> x.2, groupArray((toString(number), intDiv(number, 3) % 3))) FROM numbers(10); diff --git a/parser/testdata/01026_char_utf8/ast.json b/parser/testdata/01026_char_utf8/ast.json new file mode 100644 index 000000000..162dc2d8c --- /dev/null +++ b/parser/testdata/01026_char_utf8/ast.json @@ -0,0 +1,76 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function char (alias hello) (children 1)" + }, + { + "explain": " ExpressionList (children 12)" + }, + { + "explain": " Literal UInt64_208" + }, + { + "explain": " Literal UInt64_191" + }, + { + "explain": " Literal UInt64_209" + }, + { + "explain": " Literal UInt64_128" + }, + { + "explain": " Literal UInt64_208" + }, + { + "explain": " Literal UInt64_184" + }, + { + "explain": " Literal UInt64_208" + }, + { + "explain": " Literal UInt64_178" + }, + { + "explain": " Literal UInt64_208" + }, + { + "explain": " Literal UInt64_181" + }, + { + "explain": " Literal UInt64_209" + }, + { + "explain": " Literal UInt64_130" + } + ], + + "rows": 18, + + "statistics": + { + "elapsed": 0.00132463, + "rows_read": 18, + "bytes_read": 626 + } +} diff --git a/parser/testdata/01026_char_utf8/metadata.json b/parser/testdata/01026_char_utf8/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01026_char_utf8/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01026_char_utf8/query.sql b/parser/testdata/01026_char_utf8/query.sql new file mode 100644 index 000000000..7ee92683b --- /dev/null +++ b/parser/testdata/01026_char_utf8/query.sql @@ -0,0 +1,4 @@ +SELECT char(0xD0, 0xBF, 0xD1, 0x80, 0xD0, 0xB8, 0xD0, 0xB2, 0xD0, 0xB5, 0xD1, 0x82) AS hello; +SELECT char(-48,-65,-47,-128,-48,-72,-48,-78,-48,-75,-47,-126) AS hello; +SELECT char(-48, 0xB0 + number,-47,-128,-48,-72,-48,-78,-48,-75,-47,-126) AS hello FROM numbers(16); +SELECT char(0xe4, 0xbd, 0xa0, 0xe5, 0xa5, 0xbd) AS hello; diff --git a/parser/testdata/01029_early_constant_folding/ast.json b/parser/testdata/01029_early_constant_folding/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01029_early_constant_folding/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01029_early_constant_folding/metadata.json b/parser/testdata/01029_early_constant_folding/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01029_early_constant_folding/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01029_early_constant_folding/query.sql b/parser/testdata/01029_early_constant_folding/query.sql new file mode 100644 index 000000000..6336b62e0 --- /dev/null +++ b/parser/testdata/01029_early_constant_folding/query.sql @@ -0,0 +1,13 @@ +-- constant folding + +EXPLAIN SYNTAX SELECT 1 WHERE 1 = 0; + +EXPLAIN SYNTAX SELECT 1 WHERE 1 IN (0, 1, 2); + +EXPLAIN SYNTAX SELECT 1 WHERE 1 IN (0, 2) AND 2 = ((SELECT 2) AS subquery); + +-- no constant folding + +EXPLAIN SYNTAX SELECT 1 WHERE 1 IN ((SELECT arrayJoin([1, 2, 3])) AS subquery); + +EXPLAIN SYNTAX SELECT 1 WHERE NOT ignore(); diff --git a/parser/testdata/01030_concatenate_equal_fixed_strings/ast.json b/parser/testdata/01030_concatenate_equal_fixed_strings/ast.json new file mode 100644 index 000000000..e9a9ffbf8 --- /dev/null +++ b/parser/testdata/01030_concatenate_equal_fixed_strings/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toFixedString (alias a) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'aa'" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function concat (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier a" + }, + { + "explain": " Identifier a" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001298273, + "rows_read": 12, + "bytes_read": 434 + } +} diff --git a/parser/testdata/01030_concatenate_equal_fixed_strings/metadata.json b/parser/testdata/01030_concatenate_equal_fixed_strings/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01030_concatenate_equal_fixed_strings/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01030_concatenate_equal_fixed_strings/query.sql b/parser/testdata/01030_concatenate_equal_fixed_strings/query.sql new file mode 100644 index 000000000..9870fddff --- /dev/null +++ b/parser/testdata/01030_concatenate_equal_fixed_strings/query.sql @@ -0,0 +1,3 @@ +SELECT toFixedString('aa' , 2 ) as a, concat(a, a); +SELECT toFixedString('aa' , 2 ) as a, length(concat(a, a)); +SELECT toFixedString('aa' , 2 ) as a, toTypeName(concat(a, a)); diff --git a/parser/testdata/01030_final_mark_empty_primary_key/ast.json b/parser/testdata/01030_final_mark_empty_primary_key/ast.json new file mode 100644 index 000000000..90511b665 --- /dev/null +++ b/parser/testdata/01030_final_mark_empty_primary_key/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery empty_pk (children 1)" + }, + { + "explain": " Identifier empty_pk" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001294183, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/01030_final_mark_empty_primary_key/metadata.json b/parser/testdata/01030_final_mark_empty_primary_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01030_final_mark_empty_primary_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01030_final_mark_empty_primary_key/query.sql b/parser/testdata/01030_final_mark_empty_primary_key/query.sql new file mode 100644 index 000000000..7bf2e3e73 --- /dev/null +++ b/parser/testdata/01030_final_mark_empty_primary_key/query.sql @@ -0,0 +1,8 @@ +DROP TABLE IF EXISTS empty_pk; +CREATE TABLE empty_pk (x UInt64) ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity = 256, index_granularity_bytes = '10Mi'; + +INSERT INTO empty_pk SELECT number FROM numbers(100000); + +SELECT sum(x) from empty_pk; + +DROP TABLE empty_pk; diff --git a/parser/testdata/01030_incorrect_count_summing_merge_tree/ast.json b/parser/testdata/01030_incorrect_count_summing_merge_tree/ast.json new file mode 100644 index 000000000..c13434e30 --- /dev/null +++ b/parser/testdata/01030_incorrect_count_summing_merge_tree/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001154377, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01030_incorrect_count_summing_merge_tree/metadata.json b/parser/testdata/01030_incorrect_count_summing_merge_tree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01030_incorrect_count_summing_merge_tree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01030_incorrect_count_summing_merge_tree/query.sql b/parser/testdata/01030_incorrect_count_summing_merge_tree/query.sql new file mode 100644 index 000000000..90b1660e5 --- /dev/null +++ b/parser/testdata/01030_incorrect_count_summing_merge_tree/query.sql @@ -0,0 +1,125 @@ +SET optimize_on_insert = 0; + +select '-- SummingMergeTree with Nullable column without duplicates.'; + +drop table if exists tst; +create table tst (timestamp DateTime, val Nullable(Int8)) engine SummingMergeTree partition by toYYYYMM(timestamp) ORDER by (timestamp); +insert into tst values ('2018-02-01 00:00:00', 1), ('2018-02-02 00:00:00', 2); + +select * from tst final order by timestamp; + +select '-- 2 2'; +select count() from tst; +select count() from tst final; + +select '-- 2 2'; +select count() from tst where timestamp is not null; +select count() from tst final where timestamp is not null; + +select '-- 2 2'; +select count() from tst where val is not null; +select count() from tst final where val is not null; + +select '-- 2 2 2 2'; +select count() from tst final where timestamp>0; +select count() from tst final prewhere timestamp > 0; +select count() from tst final where timestamp > '2017-01-01 00:00:00'; +select count() from tst final prewhere timestamp > '2017-01-01 00:00:00'; + +select '-- 2 2'; +select count() from tst final where val>0; +select count() from tst final prewhere val>0; + +select '-- SummingMergeTree with Nullable column with duplicates'; + +drop table if exists tst; +create table tst (timestamp DateTime, val Nullable(Int8)) engine SummingMergeTree partition by toYYYYMM(timestamp) ORDER by (timestamp); +insert into tst values ('2018-02-01 00:00:00', 1), ('2018-02-02 00:00:00', 2), ('2018-02-01 00:00:00', 3), ('2018-02-02 00:00:00', 4); + +select * from tst final order by timestamp; + +select '-- 4 2'; +select count() from tst; +select count() from tst final; + +select '-- 4 2'; +select count() from tst where timestamp is not null; +select count() from tst final where timestamp is not null; + +select '-- 4 2'; +select count() from tst where val is not null; +select count() from tst final where val is not null; + +select '-- 2 2 2 2'; +select count() from tst final where timestamp>0; +select count() from tst final prewhere timestamp > 0; +select count() from tst final where timestamp > '2017-01-01 00:00:00'; +select count() from tst final prewhere timestamp > '2017-01-01 00:00:00'; + +select '-- 2 2'; +select count() from tst final where val>0; +select count() from tst final prewhere val>0; + +select '-- SummingMergeTree without Nullable column without duplicates.'; + +drop table if exists tst; +create table tst (timestamp DateTime, val Int8) engine SummingMergeTree partition by toYYYYMM(timestamp) ORDER by (timestamp); +insert into tst values ('2018-02-01 00:00:00', 1), ('2018-02-02 00:00:00', 2); + +select * from tst final order by timestamp; + +select '-- 2 2'; +select count() from tst; +select count() from tst final; + +select '-- 2 2 '; +select count() from tst where timestamp is not null; +select count() from tst final where timestamp is not null; + +select '-- 2 2'; +select count() from tst where val is not null; +select count() from tst final where val is not null; + +select '-- 2 2 2 2'; +select count() from tst final where timestamp>0; +select count() from tst final prewhere timestamp > 0; +select count() from tst final where timestamp > '2017-01-01 00:00:00'; +select count() from tst final prewhere timestamp > '2017-01-01 00:00:00'; + +select '-- 2 2'; +select count() from tst final where val>0; +select count() from tst final prewhere val>0; + +drop table tst; + +select '-- SummingMergeTree without Nullable column with duplicates.'; + +drop table if exists tst; +create table tst (timestamp DateTime, val Int8) engine SummingMergeTree partition by toYYYYMM(timestamp) ORDER by (timestamp); +insert into tst values ('2018-02-01 00:00:00', 1), ('2018-02-02 00:00:00', 2), ('2018-02-01 00:00:00', 3), ('2018-02-02 00:00:00', 4); + +select * from tst final order by timestamp; + +select '-- 4 2'; +select count() from tst; +select count() from tst final; + +select '-- 4 2'; +select count() from tst where timestamp is not null; +select count() from tst final where timestamp is not null; + +select '-- 4 2'; +select count() from tst where val is not null; +select count() from tst final where val is not null; + +select '-- 2 2 2 2'; +select count() from tst final where timestamp>0; +select count() from tst final prewhere timestamp > 0; +select count() from tst final where timestamp > '2017-01-01 00:00:00'; +select count() from tst final prewhere timestamp > '2017-01-01 00:00:00'; + +select '-- 2 2'; +select count() from tst final where val>0; +select count() from tst final prewhere val>0; + +drop table tst; diff --git a/parser/testdata/01030_storage_set_supports_read/ast.json b/parser/testdata/01030_storage_set_supports_read/ast.json new file mode 100644 index 000000000..dbe3a5402 --- /dev/null +++ b/parser/testdata/01030_storage_set_supports_read/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery userid_test (children 1)" + }, + { + "explain": " Identifier userid_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001131213, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/01030_storage_set_supports_read/metadata.json b/parser/testdata/01030_storage_set_supports_read/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01030_storage_set_supports_read/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01030_storage_set_supports_read/query.sql b/parser/testdata/01030_storage_set_supports_read/query.sql new file mode 100644 index 000000000..e2823b725 --- /dev/null +++ b/parser/testdata/01030_storage_set_supports_read/query.sql @@ -0,0 +1,22 @@ +DROP TABLE IF EXISTS userid_test; + +SET use_index_for_in_with_subqueries = 1; + +CREATE TABLE userid_test (userid UInt64) ENGINE = MergeTree() PARTITION BY (intDiv(userid, 500)) ORDER BY (userid) SETTINGS index_granularity = 8192; + +INSERT INTO userid_test VALUES (1),(2),(3),(4),(5); + +DROP TABLE IF EXISTS userid_set; + +CREATE TABLE userid_set(userid UInt64) ENGINE = Set; + +INSERT INTO userid_set VALUES (1),(2),(3); + +SELECT * FROM userid_test WHERE userid IN (1, 2, 3); + +SELECT * FROM userid_test WHERE toUInt64(1) IN (userid_set); + +SELECT * FROM userid_test WHERE userid IN (userid_set); + +DROP TABLE userid_test; +DROP TABLE userid_set; diff --git a/parser/testdata/01030_storage_url_syntax/ast.json b/parser/testdata/01030_storage_url_syntax/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01030_storage_url_syntax/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01030_storage_url_syntax/metadata.json b/parser/testdata/01030_storage_url_syntax/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01030_storage_url_syntax/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01030_storage_url_syntax/query.sql b/parser/testdata/01030_storage_url_syntax/query.sql new file mode 100644 index 000000000..c8eba5121 --- /dev/null +++ b/parser/testdata/01030_storage_url_syntax/query.sql @@ -0,0 +1,69 @@ +-- Tags: no-fasttest +-- no-fasttest: Timeout for the first query (CANNOT_DETECT_FORMAT) is too slow: https://github.com/ClickHouse/ClickHouse/issues/67939 + +drop table if exists test_table_url_syntax +; +create table test_table_url_syntax (id UInt32) ENGINE = URL('') +; -- { serverError BAD_ARGUMENTS } +create table test_table_url_syntax (id UInt32) ENGINE = URL('','','','') +; -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +drop table if exists test_table_url_syntax +; + +drop table if exists test_table_url +; + +create table test_table_url(id UInt32) ENGINE = URL('http://localhost/endpoint') +; -- { serverError CANNOT_DETECT_FORMAT } + +create table test_table_url(id UInt32) ENGINE = URL('http://localhost/endpoint.json'); +drop table test_table_url; + +create table test_table_url(id UInt32) ENGINE = URL('http://localhost/endpoint', 'ErrorFormat') +; -- { serverError UNKNOWN_FORMAT } + +create table test_table_url(id UInt32) ENGINE = URL('http://localhost/endpoint', 'JSONEachRow', 'gzip'); +drop table test_table_url; + +create table test_table_url(id UInt32) ENGINE = URL('http://localhost/endpoint', 'JSONEachRow', 'gz'); +drop table test_table_url; + +create table test_table_url(id UInt32) ENGINE = URL('http://localhost/endpoint', 'JSONEachRow', 'deflate'); +drop table test_table_url; + +create table test_table_url(id UInt32) ENGINE = URL('http://localhost/endpoint', 'JSONEachRow', 'brotli'); +drop table test_table_url; + +create table test_table_url(id UInt32) ENGINE = URL('http://localhost/endpoint', 'JSONEachRow', 'lzma'); +drop table test_table_url; + +create table test_table_url(id UInt32) ENGINE = URL('http://localhost/endpoint', 'JSONEachRow', 'zstd'); +drop table test_table_url; + +create table test_table_url(id UInt32) ENGINE = URL('http://localhost/endpoint', 'JSONEachRow', 'lz4'); +drop table test_table_url; + +create table test_table_url(id UInt32) ENGINE = URL('http://localhost/endpoint', 'JSONEachRow', 'bz2'); +drop table test_table_url; + +create table test_table_url(id UInt32) ENGINE = URL('http://localhost/endpoint', 'JSONEachRow', 'snappy'); +drop table test_table_url; + +create table test_table_url(id UInt32) ENGINE = URL('http://localhost/endpoint', 'JSONEachRow', 'none'); +drop table test_table_url; + +create table test_table_url(id UInt32) ENGINE = URL('http://localhost/endpoint', 'JSONEachRow', 'auto'); +drop table test_table_url; + +create table test_table_url(id UInt32) ENGINE = URL('http://localhost/endpoint.gz', 'JSONEachRow'); +drop table test_table_url; + +create table test_table_url(id UInt32) ENGINE = URL('http://localhost/endpoint.fr', 'JSONEachRow'); +drop table test_table_url; + +create table test_table_url(id UInt32) ENGINE = URL('http://localhost/endpoint', 'JSONEachRow'); +drop table test_table_url; + +create table test_table_url(id UInt32) ENGINE = URL('http://localhost/endpoint', 'JSONEachRow', 'zip') +; -- { serverError NOT_IMPLEMENTED } + diff --git a/parser/testdata/01031_pmj_new_any_semi_join/ast.json b/parser/testdata/01031_pmj_new_any_semi_join/ast.json new file mode 100644 index 000000000..9ac998b63 --- /dev/null +++ b/parser/testdata/01031_pmj_new_any_semi_join/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001071447, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/01031_pmj_new_any_semi_join/metadata.json b/parser/testdata/01031_pmj_new_any_semi_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01031_pmj_new_any_semi_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01031_pmj_new_any_semi_join/query.sql b/parser/testdata/01031_pmj_new_any_semi_join/query.sql new file mode 100644 index 000000000..a24b066e1 --- /dev/null +++ b/parser/testdata/01031_pmj_new_any_semi_join/query.sql @@ -0,0 +1,45 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE TABLE t1 (x UInt32, s String) engine = MergeTree ORDER BY tuple(); +CREATE TABLE t2 (x UInt32, s String) engine = MergeTree ORDER BY tuple(); + +INSERT INTO t1 (x, s) VALUES (0, 'a1'), (1, 'a2'), (2, 'a3'), (3, 'a4'), (4, 'a5'); +INSERT INTO t2 (x, s) VALUES (2, 'b1'), (4, 'b2'), (5, 'b4'); + +SET join_algorithm = 'prefer_partial_merge'; +SET join_use_nulls = 0; +SET any_join_distinct_right_table_keys = 0; + +SELECT 'any left'; +SELECT t1.*, t2.* FROM t1 ANY LEFT JOIN t2 USING(x) ORDER BY t1.x, t2.x; + +SELECT 'any left (rev)'; +SELECT t1.*, t2.* FROM t2 ANY LEFT JOIN t1 USING(x) ORDER BY t1.x, t2.x; + +SELECT 'any inner'; +SELECT t1.*, t2.* FROM t1 ANY INNER JOIN t2 USING(x) ORDER BY t1.x, t2.x; + +SELECT 'any inner (rev)'; +SELECT t1.*, t2.* FROM t2 ANY INNER JOIN t1 USING(x) ORDER BY t1.x, t2.x; + +SELECT 'any right'; +SELECT t1.*, t2.* FROM t1 ANY RIGHT JOIN t2 USING(x) ORDER BY t1.x, t2.x; + +SELECT 'any right (rev)'; +SELECT t1.*, t2.* FROM t2 ANY RIGHT JOIN t1 USING(x) ORDER BY t1.x, t2.x; + +SELECT 'semi left'; +SELECT t1.*, t2.* FROM t1 SEMI LEFT JOIN t2 USING(x) ORDER BY t1.x, t2.x; + +SELECT 'semi right'; +SELECT t1.*, t2.* FROM t1 SEMI RIGHT JOIN t2 USING(x) ORDER BY t1.x, t2.x; + +SELECT 'anti left'; +SELECT t1.*, t2.* FROM t1 ANTI LEFT JOIN t2 USING(x) ORDER BY t1.x, t2.x; + +SELECT 'anti right'; +SELECT t1.*, t2.* FROM t1 ANTI RIGHT JOIN t2 USING(x) ORDER BY t1.x, t2.x; + +DROP TABLE t1; +DROP TABLE t2; diff --git a/parser/testdata/01031_semi_anti_join/ast.json b/parser/testdata/01031_semi_anti_join/ast.json new file mode 100644 index 000000000..f32a09962 --- /dev/null +++ b/parser/testdata/01031_semi_anti_join/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001150861, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/01031_semi_anti_join/metadata.json b/parser/testdata/01031_semi_anti_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01031_semi_anti_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01031_semi_anti_join/query.sql b/parser/testdata/01031_semi_anti_join/query.sql new file mode 100644 index 000000000..03ed7e30b --- /dev/null +++ b/parser/testdata/01031_semi_anti_join/query.sql @@ -0,0 +1,25 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE TABLE t1 (x UInt32, s String) engine = MergeTree ORDER BY tuple(); +CREATE TABLE t2 (x UInt32, s String) engine = MergeTree ORDER BY tuple(); + +INSERT INTO t1 (x, s) VALUES (0, 'a1'), (1, 'a2'), (2, 'a3'), (3, 'a4'), (4, 'a5'), (2, 'a6'); +INSERT INTO t2 (x, s) VALUES (2, 'b1'), (2, 'b2'), (4, 'b3'), (4, 'b4'), (4, 'b5'), (5, 'b6'); + +SET join_use_nulls = 0; + +SELECT 'semi left'; +SELECT t1.*, t2.* FROM t1 SEMI LEFT JOIN t2 USING(x) ORDER BY t1.x, t2.x, t1.s, t2.s; + +SELECT 'semi right'; +SELECT t1.*, t2.* FROM t1 SEMI RIGHT JOIN t2 USING(x) ORDER BY t1.x, t2.x, t1.s, t2.s; + +SELECT 'anti left'; +SELECT t1.*, t2.* FROM t1 ANTI LEFT JOIN t2 USING(x) ORDER BY t1.x, t2.x, t1.s, t2.s; + +SELECT 'anti right'; +SELECT t1.*, t2.* FROM t1 ANTI RIGHT JOIN t2 USING(x) ORDER BY t1.x, t2.x, t1.s, t2.s; + +DROP TABLE t1; +DROP TABLE t2; diff --git a/parser/testdata/01032_cityHash64_for_UUID/ast.json b/parser/testdata/01032_cityHash64_for_UUID/ast.json new file mode 100644 index 000000000..f400bf41c --- /dev/null +++ b/parser/testdata/01032_cityHash64_for_UUID/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function cityHash64 (alias uuid) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toUUID (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '61f0c404-5cb3-11e7-907b-a6006ad3dba0'" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001286582, + "rows_read": 9, + "bytes_read": 392 + } +} diff --git a/parser/testdata/01032_cityHash64_for_UUID/metadata.json b/parser/testdata/01032_cityHash64_for_UUID/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01032_cityHash64_for_UUID/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01032_cityHash64_for_UUID/query.sql b/parser/testdata/01032_cityHash64_for_UUID/query.sql new file mode 100644 index 000000000..76a2389b8 --- /dev/null +++ b/parser/testdata/01032_cityHash64_for_UUID/query.sql @@ -0,0 +1,9 @@ +SELECT cityHash64(toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0')) AS uuid; +DROP TABLE IF EXISTS t_uuid; +CREATE TABLE t_uuid (x UUID) ENGINE=TinyLog; +INSERT INTO t_uuid SELECT generateUUIDv4(); +INSERT INTO t_uuid SELECT generateUUIDv4(); +INSERT INTO t_uuid SELECT generateUUIDv4(); +INSERT INTO t_uuid SELECT generateUUIDv4(); +SELECT (SELECT count() FROM t_uuid WHERE cityHash64(reinterpretAsString(x)) = cityHash64(x) and length(reinterpretAsString(x)) = 16) = (SELECT count() AS c2 FROM t_uuid WHERE length(reinterpretAsString(x)) = 16); +DROP TABLE IF EXISTS t_uuid; diff --git a/parser/testdata/01032_cityHash64_for_decimal/ast.json b/parser/testdata/01032_cityHash64_for_decimal/ast.json new file mode 100644 index 000000000..b55ddd3ca --- /dev/null +++ b/parser/testdata/01032_cityHash64_for_decimal/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function cityHash64 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDecimal32 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_32" + }, + { + "explain": " Literal UInt64_2" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001320439, + "rows_read": 10, + "bytes_read": 387 + } +} diff --git a/parser/testdata/01032_cityHash64_for_decimal/metadata.json b/parser/testdata/01032_cityHash64_for_decimal/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01032_cityHash64_for_decimal/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01032_cityHash64_for_decimal/query.sql b/parser/testdata/01032_cityHash64_for_decimal/query.sql new file mode 100644 index 000000000..3b596dd53 --- /dev/null +++ b/parser/testdata/01032_cityHash64_for_decimal/query.sql @@ -0,0 +1,6 @@ +SELECT cityHash64(toDecimal32(32, 2)); +SELECT cityHash64(toDecimal64(64, 5)); +SELECT cityHash64(toDecimal128(128, 24)); +SELECT cityHash64(toDecimal32(number, 3)) from numbers(198, 2); +SELECT cityHash64(toDecimal64(number, 9)) from numbers(297, 2); +SELECT cityHash64(toDecimal128(number, 16)) from numbers(123, 2); diff --git a/parser/testdata/01032_duplicate_column_insert_query/ast.json b/parser/testdata/01032_duplicate_column_insert_query/ast.json new file mode 100644 index 000000000..a74bce8d9 --- /dev/null +++ b/parser/testdata/01032_duplicate_column_insert_query/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery sometable (children 1)" + }, + { + "explain": " Identifier sometable" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001442756, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/01032_duplicate_column_insert_query/metadata.json b/parser/testdata/01032_duplicate_column_insert_query/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01032_duplicate_column_insert_query/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01032_duplicate_column_insert_query/query.sql b/parser/testdata/01032_duplicate_column_insert_query/query.sql new file mode 100644 index 000000000..2fcc846e5 --- /dev/null +++ b/parser/testdata/01032_duplicate_column_insert_query/query.sql @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS sometable; + +CREATE TABLE sometable ( + date Date, + time Int64, + value UInt64 +) ENGINE=MergeTree() +ORDER BY time; + + +INSERT INTO sometable (date, time, value) VALUES ('2019-11-08', 1573185600, 100); + +SELECT COUNT() from sometable; + +INSERT INTO sometable (date, time, value, time) VALUES ('2019-11-08', 1573185600, 100, 1573185600); -- {serverError DUPLICATE_COLUMN} + +DROP TABLE IF EXISTS sometable; diff --git a/parser/testdata/01033_dictionaries_lifetime/ast.json b/parser/testdata/01033_dictionaries_lifetime/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01033_dictionaries_lifetime/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01033_dictionaries_lifetime/metadata.json b/parser/testdata/01033_dictionaries_lifetime/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01033_dictionaries_lifetime/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01033_dictionaries_lifetime/query.sql b/parser/testdata/01033_dictionaries_lifetime/query.sql new file mode 100644 index 000000000..e74ac8bde --- /dev/null +++ b/parser/testdata/01033_dictionaries_lifetime/query.sql @@ -0,0 +1,43 @@ + +SET send_logs_level = 'fatal'; + +CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.table_for_dict +( + key_column UInt64, + second_column UInt8, + third_column String +) +ENGINE = MergeTree() +ORDER BY key_column; + +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.table_for_dict VALUES (1, 100, 'Hello world'); + +DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE_1:Identifier}; + +CREATE DATABASE {CLICKHOUSE_DATABASE_1:Identifier}; + +CREATE DICTIONARY {CLICKHOUSE_DATABASE_1:Identifier}.dict1 +( + key_column UInt64 DEFAULT 0, + second_column UInt8 DEFAULT 1, + third_column String DEFAULT 'qqq' +) +PRIMARY KEY key_column +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'table_for_dict' PASSWORD '' DB currentDatabase())) +LIFETIME(MIN 1 MAX 10) +LAYOUT(FLAT()); + +SELECT 'INITIALIZING DICTIONARY'; + +SELECT dictGetUInt8({CLICKHOUSE_DATABASE_1:String}||'.dict1', 'second_column', toUInt64(100500)); + +SELECT lifetime_min, lifetime_max FROM system.dictionaries WHERE database={CLICKHOUSE_DATABASE_1:String} AND name = 'dict1'; + +DROP DICTIONARY IF EXISTS {CLICKHOUSE_DATABASE_1:Identifier}.dict1; + +DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE_1:Identifier}; + +DROP TABLE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}.table_for_dict; + +DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}; + diff --git a/parser/testdata/01033_function_substring/ast.json b/parser/testdata/01033_function_substring/ast.json new file mode 100644 index 000000000..207280fdf --- /dev/null +++ b/parser/testdata/01033_function_substring/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '-- argument validation'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001049255, + "rows_read": 5, + "bytes_read": 193 + } +} diff --git a/parser/testdata/01033_function_substring/metadata.json b/parser/testdata/01033_function_substring/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01033_function_substring/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01033_function_substring/query.sql b/parser/testdata/01033_function_substring/query.sql new file mode 100644 index 000000000..9955700f3 --- /dev/null +++ b/parser/testdata/01033_function_substring/query.sql @@ -0,0 +1,149 @@ +SELECT '-- argument validation'; + +SELECT substring('hello', []); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT substring('hello', 1, []); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT substring(materialize('hello'), -1, -1); +SELECT substring(materialize('hello'), 0); -- { serverError ZERO_ARRAY_OR_TUPLE_INDEX } + +SELECT '-- FixedString arguments'; + +SELECT substring(toFixedString('hello', 16), 1, 8); +SELECT substring(toFixedString(materialize('hello'), 16), 1, 8); +SELECT substring(toFixedString(toString(number), 16), 1, 8) FROM system.numbers LIMIT 10; +SELECT substring(toFixedString(toString(number), 4), 1, 3) FROM system.numbers LIMIT 995, 10; +SELECT substring(toFixedString(toString(number), 4), 1, number % 5) FROM system.numbers LIMIT 995, 10; +SELECT substring(toFixedString(toString(number), 4), 1 + number % 5) FROM system.numbers LIMIT 995, 10; +SELECT substring(toFixedString(toString(number), 4), 1 + number % 5, 1 + number % 3) FROM system.numbers LIMIT 995, 10; + +SELECT '-- Enum arguments'; + +DROP TABLE IF EXISTS tab; +CREATE TABLE tab(e8 Enum8('hello' = -5, 'world' = 15), e16 Enum16('shark' = -999, 'eagle' = 9999)) ENGINE MergeTree ORDER BY tuple(); +INSERT INTO TABLE tab VALUES ('hello', 'shark'), ('world', 'eagle'); + +-- positive offsets (slice from left) +SELECT substring(e8, 1), substring (e16, 1) FROM tab; +SELECT substring(e8, 2, 10), substring (e16, 2, 10) FROM tab; +-- negative offsets (slice from right) +SELECT substring(e8, -1), substring (e16, -1) FROM tab; +SELECT substring(e8, -2, 10), substring (e16, -2, 10) FROM tab; +-- zero offset/length +SELECT substring(e8, 1, 0), substring (e16, 1, 0) FROM tab; + +SELECT '-- Constant enums'; +SELECT substring(CAST('foo', 'Enum8(\'foo\' = 1)'), 1, 1), substring(CAST('foo', 'Enum16(\'foo\' = 1111)'), 1, 2); + +DROP TABLE tab; + +SELECT '-- negative offset argument'; + +SELECT substring('abc', number - 5) FROM system.numbers LIMIT 10; +SELECT substring(materialize('abc'), number - 5) FROM system.numbers LIMIT 10; +SELECT substring(toFixedString('abc', 3), number - 5) FROM system.numbers LIMIT 10; +SELECT substring(materialize(toFixedString('abc', 3)), number - 5) FROM system.numbers LIMIT 10; + +SELECT substring('clickhouse', 2, -2); +SELECT substring(materialize('clickhouse'), 2, -2); +SELECT substring('clickhouse', materialize(2), -2); +SELECT substring(materialize('clickhouse'), materialize(2), -2); +SELECT substring('clickhouse', 2, materialize(-2)); +SELECT substring(materialize('clickhouse'), 2, materialize(-2)); +SELECT substring('clickhouse', materialize(2), materialize(-2)); +SELECT substring(materialize('clickhouse'), materialize(2), materialize(-2)); + +SELECT '-- negative length argument'; + +SELECT substring('abcdefgh', 2, -2); +SELECT substring('abcdefgh', materialize(2), -2); +SELECT substring('abcdefgh', 2, materialize(-2)); +SELECT substring('abcdefgh', materialize(2), materialize(-2)); + +SELECT substring(cast('abcdefgh' AS FixedString(8)), 2, -2); +SELECT substring(cast('abcdefgh' AS FixedString(8)), materialize(2), -2); +SELECT substring(cast('abcdefgh' AS FixedString(8)), 2, materialize(-2)); +SELECT substring(cast('abcdefgh' AS FixedString(8)), materialize(2), materialize(-2)); + +DROP TABLE IF EXISTS tab; +CREATE TABLE tab (s String, l Int8, r Int8) ENGINE = Memory; +INSERT INTO tab VALUES ('abcdefgh', 2, -2), ('12345678', 3, -3); + +SELECT substring(s, 2, -2) FROM tab; +SELECT substring(s, l, -2) FROM tab; +SELECT substring(s, 2, r) FROM tab; +SELECT substring(s, l, r) FROM tab; + +DROP TABLE IF EXISTS tab; +CREATE TABLE tab (s FixedString(8), l Int8, r Int8) ENGINE = Memory; +INSERT INTO tab VALUES ('abcdefgh', 2, -2), ('12345678', 3, -3); + +SELECT substring(s, 2, -2) FROM tab; +SELECT substring(s, l, -2) FROM tab; +SELECT substring(s, 2, r) FROM tab; +SELECT substring(s, l, r) FROM tab; + +DROP TABLE IF EXISTS tab; + +SELECT '-- negative offset and size'; + +SELECT substring('abcdefgh', -2, -2); +SELECT substring(materialize('abcdefgh'), -2, -2); +SELECT substring(materialize('abcdefgh'), materialize(-2), materialize(-2)); + +SELECT substring('abcdefgh', -2, -1); +SELECT substring(materialize('abcdefgh'), -2, -1); +SELECT substring(materialize('abcdefgh'), materialize(-2), materialize(-1)); + +SELECT substring(cast('abcdefgh' AS FixedString(8)), -2, -2); +SELECT substring(materialize(cast('abcdefgh' AS FixedString(8))), -2, -2); +SELECT substring(materialize(cast('abcdefgh' AS FixedString(8))), materialize(-2), materialize(-2)); + +SELECT substring(cast('abcdefgh' AS FixedString(8)), -2, -1); +SELECT substring(materialize(cast('abcdefgh' AS FixedString(8))), -2, -1); +SELECT substring(materialize(cast('abcdefgh' AS FixedString(8))), materialize(-2), materialize(-1)); + +DROP TABLE IF EXISTS t; +CREATE TABLE t +( + s String, + l Int8, + r Int8 +) ENGINE = Memory; + +INSERT INTO t VALUES ('abcdefgh', -2, -2),('12345678', -3, -3); + +SELECT substring(s, -2, -2) FROM t; +SELECT substring(s, l, -2) FROM t; +SELECT substring(s, -2, r) FROM t; +SELECT substring(s, l, r) FROM t; + +SELECT '-'; +DROP TABLE IF EXISTS t; +CREATE TABLE t( + s FixedString(8), + l Int8, + r Int8 +) engine = Memory; +INSERT INTO t VALUES ('abcdefgh', -2, -2),('12345678', -3, -3); + +SELECT substring(s, -2, -2) FROM t; +SELECT substring(s, l, -2) FROM t; +SELECT substring(s, -2, r) FROM t; +SELECT substring(s, l, r) FROM t; + +DROP table if exists t; + +SELECT '-- UBSAN bug'; + +/** NOTE: The behaviour of substring and substringUTF8 is inconsistent when negative offset is greater than string size: + * substring: + * hello + * ^-----^ - offset -10, length 7, result: "he" + * substringUTF8: + * hello + * ^-----^ - offset -10, length 7, result: "hello" + * This may be subject for change. + */ +SELECT substringUTF8('hello, пÑ�ивеÑ�', -9223372036854775808, number) FROM numbers(16) FORMAT Null; + +SELECT '-- Alias'; +SELECT byteSlice('hello', 2, 2); diff --git a/parser/testdata/01033_quota_dcl/ast.json b/parser/testdata/01033_quota_dcl/ast.json new file mode 100644 index 000000000..a96b4280c --- /dev/null +++ b/parser/testdata/01033_quota_dcl/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SHOW CREATE QUOTA query" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001195458, + "rows_read": 1, + "bytes_read": 31 + } +} diff --git a/parser/testdata/01033_quota_dcl/metadata.json b/parser/testdata/01033_quota_dcl/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01033_quota_dcl/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01033_quota_dcl/query.sql b/parser/testdata/01033_quota_dcl/query.sql new file mode 100644 index 000000000..a7796402b --- /dev/null +++ b/parser/testdata/01033_quota_dcl/query.sql @@ -0,0 +1 @@ +SHOW CREATE QUOTA default; diff --git a/parser/testdata/01033_storage_odbc_parsing_exception_check/ast.json b/parser/testdata/01033_storage_odbc_parsing_exception_check/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01033_storage_odbc_parsing_exception_check/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01033_storage_odbc_parsing_exception_check/metadata.json b/parser/testdata/01033_storage_odbc_parsing_exception_check/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01033_storage_odbc_parsing_exception_check/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01033_storage_odbc_parsing_exception_check/query.sql b/parser/testdata/01033_storage_odbc_parsing_exception_check/query.sql new file mode 100644 index 000000000..5df291fb7 --- /dev/null +++ b/parser/testdata/01033_storage_odbc_parsing_exception_check/query.sql @@ -0,0 +1,11 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS BannerDict; + +CREATE TABLE BannerDict (`BannerID` UInt64, `CompaignID` UInt64) ENGINE = ODBC('DSN=pgconn;Database=postgres', bannerdict); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} + +CREATE TABLE BannerDict (`BannerID` UInt64, `CompaignID` UInt64) ENGINE = ODBC('DSN=pgconn;Database=postgres', somedb, bannerdict); + +SHOW CREATE TABLE BannerDict; + +DROP TABLE IF EXISTS BannerDict; diff --git a/parser/testdata/01034_JSONCompactEachRow/ast.json b/parser/testdata/01034_JSONCompactEachRow/ast.json new file mode 100644 index 000000000..886ff5161 --- /dev/null +++ b/parser/testdata/01034_JSONCompactEachRow/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_table (children 1)" + }, + { + "explain": " Identifier test_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001373616, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/01034_JSONCompactEachRow/metadata.json b/parser/testdata/01034_JSONCompactEachRow/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01034_JSONCompactEachRow/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01034_JSONCompactEachRow/query.sql b/parser/testdata/01034_JSONCompactEachRow/query.sql new file mode 100644 index 000000000..e47e5346c --- /dev/null +++ b/parser/testdata/01034_JSONCompactEachRow/query.sql @@ -0,0 +1,88 @@ +DROP TABLE IF EXISTS test_table; +DROP TABLE IF EXISTS test_table_2; +SET input_format_null_as_default = 0; +SELECT 1; +/* Check JSONCompactEachRow Output */ +CREATE TABLE test_table (value UInt8, name String) ENGINE = MergeTree() ORDER BY value; +INSERT INTO test_table VALUES (1, 'a'), (2, 'b'), (3, 'c'); +SELECT * FROM test_table FORMAT JSONCompactEachRow; +SELECT 2; +/* Check Totals */ +SELECT name, count() AS c FROM test_table GROUP BY name WITH TOTALS ORDER BY name FORMAT JSONCompactEachRow; +SELECT 3; +/* Check JSONCompactEachRowWithNames and JSONCompactEachRowWithNamesAndTypes Output */ +SELECT * FROM test_table FORMAT JSONCompactEachRowWithNamesAndTypes; +SELECT '----------'; +SELECT * FROM test_table FORMAT JSONCompactEachRowWithNames; +SELECT 4; +/* Check Totals */ +SELECT name, count() AS c FROM test_table GROUP BY name WITH TOTALS ORDER BY name FORMAT JSONCompactEachRowWithNamesAndTypes; +DROP TABLE IF EXISTS test_table; +SELECT 5; +/* Check JSONCompactEachRow Input */ +CREATE TABLE test_table (v1 String, v2 UInt8, v3 DEFAULT v2 * 16, v4 UInt8 DEFAULT 8) ENGINE = MergeTree() ORDER BY v2; +INSERT INTO test_table FORMAT JSONCompactEachRow ["first", 1, "2", null] ["second", 2, null, 6]; + +SELECT * FROM test_table FORMAT JSONCompactEachRow; +TRUNCATE TABLE test_table; +SELECT 6; +/* Check input_format_null_as_default = 1 */ +SET input_format_null_as_default = 1; +INSERT INTO test_table FORMAT JSONCompactEachRow ["first", 1, "2", null] ["second", 2, null, 6]; + +SELECT * FROM test_table FORMAT JSONCompactEachRow; +TRUNCATE TABLE test_table; +SELECT 7; +/* Check Nested */ +CREATE TABLE test_table_2 (v1 UInt8, n Nested(id UInt8, name String)) ENGINE = MergeTree() ORDER BY v1; +INSERT INTO test_table_2 FORMAT JSONCompactEachRow [16, [15, 16, null], ["first", "second", "third"]]; + +SELECT * FROM test_table_2 FORMAT JSONCompactEachRow; +TRUNCATE TABLE test_table_2; +SELECT 8; +/* Check JSONCompactEachRowWithNamesAndTypes and JSONCompactEachRowWithNamesAndTypes Input */ +SET input_format_null_as_default = 0; +INSERT INTO test_table FORMAT JSONCompactEachRowWithNamesAndTypes ["v1", "v2", "v3", "v4"]["String","UInt8","UInt16","UInt8"]["first", 1, "2", null]["second", 2, null, 6]; + +INSERT INTO test_table FORMAT JSONCompactEachRowWithNames ["v1", "v2", "v3", "v4"]["first", 1, "2", null]["second", 2, null, 6]; + +SELECT * FROM test_table FORMAT JSONCompactEachRow; +TRUNCATE TABLE test_table; +SELECT 9; +/* Check input_format_null_as_default = 1 */ +SET input_format_null_as_default = 1; +INSERT INTO test_table FORMAT JSONCompactEachRowWithNamesAndTypes ["v1", "v2", "v3", "v4"]["String","UInt8","UInt16","UInt8"]["first", 1, "2", null] ["second", 2, null, 6]; + +INSERT INTO test_table FORMAT JSONCompactEachRowWithNames ["v1", "v2", "v3", "v4"]["first", 1, "2", null] ["second", 2, null, 6]; + +SELECT * FROM test_table FORMAT JSONCompactEachRow; +SELECT 10; +/* Check Header */ +TRUNCATE TABLE test_table; +SET input_format_skip_unknown_fields = 1; +INSERT INTO test_table FORMAT JSONCompactEachRowWithNamesAndTypes ["v1", "v2", "invalid_column"]["String", "UInt8", "UInt8"]["first", 1, 32]["second", 2, "64"]; + +INSERT INTO test_table FORMAT JSONCompactEachRowWithNames ["v1", "v2", "invalid_column"]["first", 1, 32]["second", 2, "64"]; + +SELECT * FROM test_table FORMAT JSONCompactEachRow; +SELECT 11; +TRUNCATE TABLE test_table; +INSERT INTO test_table FORMAT JSONCompactEachRowWithNamesAndTypes ["v4", "v2", "v3"]["UInt8", "UInt8", "UInt16"][1, 2, 3] + +INSERT INTO test_table FORMAT JSONCompactEachRowWithNames ["v4", "v2", "v3"][1, 2, 3] + +SELECT * FROM test_table FORMAT JSONCompactEachRowWithNamesAndTypes; +SELECT '----------'; +SELECT * FROM test_table FORMAT JSONCompactEachRowWithNames; +SELECT 12; +/* Check Nested */ +INSERT INTO test_table_2 FORMAT JSONCompactEachRowWithNamesAndTypes ["v1", "n.id", "n.name"]["UInt8", "Array(UInt8)", "Array(String)"][16, [15, 16, null], ["first", "second", "third"]]; + +INSERT INTO test_table_2 FORMAT JSONCompactEachRowWithNames ["v1", "n.id", "n.name"][16, [15, 16, null], ["first", "second", "third"]]; + +SELECT * FROM test_table_2 FORMAT JSONCompactEachRowWithNamesAndTypes; +SELECT '----------'; +SELECT * FROM test_table_2 FORMAT JSONCompactEachRowWithNames; + +DROP TABLE IF EXISTS test_table; +DROP TABLE IF EXISTS test_table_2; diff --git a/parser/testdata/01034_order_by_pk_prefix/ast.json b/parser/testdata/01034_order_by_pk_prefix/ast.json new file mode 100644 index 000000000..1d07d8f02 --- /dev/null +++ b/parser/testdata/01034_order_by_pk_prefix/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_table (children 1)" + }, + { + "explain": " Identifier test_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001590569, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/01034_order_by_pk_prefix/metadata.json b/parser/testdata/01034_order_by_pk_prefix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01034_order_by_pk_prefix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01034_order_by_pk_prefix/query.sql b/parser/testdata/01034_order_by_pk_prefix/query.sql new file mode 100644 index 000000000..cffac819f --- /dev/null +++ b/parser/testdata/01034_order_by_pk_prefix/query.sql @@ -0,0 +1,12 @@ +DROP TABLE IF EXISTS test_table; + +CREATE TABLE test_table (n Int32, s String) +ENGINE = MergeTree() PARTITION BY n % 10 ORDER BY n; + +INSERT INTO test_table SELECT number, toString(number) FROM system.numbers LIMIT 100; +INSERT INTO test_table SELECT number, toString(number * number) FROM system.numbers LIMIT 100; +INSERT INTO test_table SELECT number, toString(number * number) FROM system.numbers LIMIT 100; + +SELECT * FROM test_table ORDER BY n, s LIMIT 30; + +DROP TABLE test_table; diff --git a/parser/testdata/01034_prewhere_max_parallel_replicas_distributed/ast.json b/parser/testdata/01034_prewhere_max_parallel_replicas_distributed/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01034_prewhere_max_parallel_replicas_distributed/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01034_prewhere_max_parallel_replicas_distributed/metadata.json b/parser/testdata/01034_prewhere_max_parallel_replicas_distributed/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01034_prewhere_max_parallel_replicas_distributed/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01034_prewhere_max_parallel_replicas_distributed/query.sql b/parser/testdata/01034_prewhere_max_parallel_replicas_distributed/query.sql new file mode 100644 index 000000000..566660f67 --- /dev/null +++ b/parser/testdata/01034_prewhere_max_parallel_replicas_distributed/query.sql @@ -0,0 +1,17 @@ +-- Tags: replica, distributed + +drop table if exists test_max_parallel_replicas_lr; + +-- If you wonder why the table is named with "_lr" suffix in this test. +-- No reason. Actually it is the name of the table in our customer and they provided this test case for us. + +CREATE TABLE test_max_parallel_replicas_lr (timestamp UInt64) ENGINE = MergeTree ORDER BY (intHash32(timestamp)) SAMPLE BY intHash32(timestamp); +INSERT INTO test_max_parallel_replicas_lr select number as timestamp from system.numbers limit 100; + +SET enable_parallel_replicas = 1; +SET parallel_replicas_mode='sampling_key'; +SET max_parallel_replicas = 2; +SET parallel_replicas_for_non_replicated_merge_tree = 1; +select count() FROM remote('127.0.0.{2|3}', currentDatabase(), test_max_parallel_replicas_lr) PREWHERE timestamp > 0; + +drop table test_max_parallel_replicas_lr; diff --git a/parser/testdata/01034_sample_final_distributed/ast.json b/parser/testdata/01034_sample_final_distributed/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01034_sample_final_distributed/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01034_sample_final_distributed/metadata.json b/parser/testdata/01034_sample_final_distributed/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01034_sample_final_distributed/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01034_sample_final_distributed/query.sql b/parser/testdata/01034_sample_final_distributed/query.sql new file mode 100644 index 000000000..bbb1b0dcf --- /dev/null +++ b/parser/testdata/01034_sample_final_distributed/query.sql @@ -0,0 +1,24 @@ +-- Tags: distributed + +set enable_parallel_replicas = 1; +set parallel_replicas_mode = 'sampling_key'; +set max_parallel_replicas = 3; +set parallel_replicas_for_non_replicated_merge_tree = 1; + +drop table if exists sample_final; +create table sample_final (CounterID UInt32, EventDate Date, EventTime DateTime, UserID UInt64, Sign Int8) engine = CollapsingMergeTree(Sign) order by (CounterID, EventDate, intHash32(UserID), EventTime) sample by intHash32(UserID) SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +insert into sample_final select number / (8192 * 4), toDate('2019-01-01'), toDateTime('2019-01-01 00:00:01') + number, number / (8192 * 2), number % 3 = 1 ? -1 : 1 from numbers(1000000); + +select 'count'; +select count() from sample_final; +select 'count final'; +select count() from sample_final final; +select 'count sample'; +select count() from sample_final sample 1/2; +select 'count sample final'; +select count() from sample_final final sample 1/2; +select 'count final max_parallel_replicas'; +set max_parallel_replicas=2; +select count() from remote('127.0.0.{2|3}', currentDatabase(), sample_final) final; + +drop table if exists sample_final; diff --git a/parser/testdata/01034_unknown_qualified_column_in_join/ast.json b/parser/testdata/01034_unknown_qualified_column_in_join/ast.json new file mode 100644 index 000000000..39fddd169 --- /dev/null +++ b/parser/testdata/01034_unknown_qualified_column_in_join/ast.json @@ -0,0 +1,103 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier l.c" + }, + { + "explain": " TablesInSelectQuery (children 2)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (alias l) (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1 (alias a)" + }, + { + "explain": " Literal UInt64_2 (alias b)" + }, + { + "explain": " TablesInSelectQueryElement (children 2)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (alias r) (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_2 (alias b)" + }, + { + "explain": " Literal UInt64_3 (alias c)" + }, + { + "explain": " TableJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier b" + } + ], + + "rows": 27, + + "statistics": + { + "elapsed": 0.00127979, + "rows_read": 27, + "bytes_read": 1129 + } +} diff --git a/parser/testdata/01034_unknown_qualified_column_in_join/metadata.json b/parser/testdata/01034_unknown_qualified_column_in_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01034_unknown_qualified_column_in_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01034_unknown_qualified_column_in_join/query.sql b/parser/testdata/01034_unknown_qualified_column_in_join/query.sql new file mode 100644 index 000000000..de5be4d34 --- /dev/null +++ b/parser/testdata/01034_unknown_qualified_column_in_join/query.sql @@ -0,0 +1,3 @@ +SELECT l.c FROM (SELECT 1 AS a, 2 AS b) AS l join (SELECT 2 AS b, 3 AS c) AS r USING b; -- { serverError UNKNOWN_IDENTIFIER } +SELECT r.a FROM (SELECT 1 AS a, 2 AS b) AS l join (SELECT 2 AS b, 3 AS c) AS r USING b; -- { serverError UNKNOWN_IDENTIFIER } +SELECT l.a, r.c FROM (SELECT 1 AS a, 2 AS b) AS l join (SELECT 2 AS b, 3 AS c) AS r USING b; diff --git a/parser/testdata/01034_with_fill_and_push_down_predicate/ast.json b/parser/testdata/01034_with_fill_and_push_down_predicate/ast.json new file mode 100644 index 000000000..81626284f --- /dev/null +++ b/parser/testdata/01034_with_fill_and_push_down_predicate/ast.json @@ -0,0 +1,142 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier date_time" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDateTime (alias date_time) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '2019-11-14 22:15:00'" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDateTime (alias date_time) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '2019-11-15 01:15:00'" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 2)" + }, + { + "explain": " Identifier date_time" + }, + { + "explain": " Literal UInt64_900" + }, + { + "explain": " Function less (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier date_time" + }, + { + "explain": " Function toDateTime (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '2019-11-15 00:15:00'" + } + ], + + "rows": 40, + + "statistics": + { + "elapsed": 0.00193061, + "rows_read": 40, + "bytes_read": 1840 + } +} diff --git a/parser/testdata/01034_with_fill_and_push_down_predicate/metadata.json b/parser/testdata/01034_with_fill_and_push_down_predicate/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01034_with_fill_and_push_down_predicate/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01034_with_fill_and_push_down_predicate/query.sql b/parser/testdata/01034_with_fill_and_push_down_predicate/query.sql new file mode 100644 index 000000000..718e8f292 --- /dev/null +++ b/parser/testdata/01034_with_fill_and_push_down_predicate/query.sql @@ -0,0 +1 @@ +SELECT * FROM ( SELECT date_time FROM ( SELECT toDateTime('2019-11-14 22:15:00') AS date_time UNION ALL SELECT toDateTime('2019-11-15 01:15:00') AS date_time ) ORDER BY date_time WITH fill step 900 ) WHERE date_time < toDateTime('2019-11-15 00:15:00') diff --git a/parser/testdata/01035_avg/ast.json b/parser/testdata/01035_avg/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01035_avg/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01035_avg/metadata.json b/parser/testdata/01035_avg/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01035_avg/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01035_avg/query.sql b/parser/testdata/01035_avg/query.sql new file mode 100644 index 000000000..0f7baddae --- /dev/null +++ b/parser/testdata/01035_avg/query.sql @@ -0,0 +1,49 @@ +CREATE TABLE IF NOT EXISTS test_01035_avg ( + i8 Int8 DEFAULT i64, + i16 Int16 DEFAULT i64, + i32 Int32 DEFAULT i64, + i64 Int64 DEFAULT if(u64 % 2 = 0, toInt64(u64), toInt64(-u64)), + i128 Int128 DEFAULT i64, + i256 Int256 DEFAULT i64, + + u8 UInt8 DEFAULT u64, + u16 UInt16 DEFAULT u64, + u32 UInt32 DEFAULT u64, + u64 UInt64, + u128 UInt128 DEFAULT u64, + u256 UInt256 DEFAULT u64, + + f32 Float32 DEFAULT u64, + f64 Float64 DEFAULT u64, + + d32 Decimal32(4) DEFAULT toDecimal32(i32 / 1000, 4), + d64 Decimal64(18) DEFAULT toDecimal64(u64 / 1000000, 8), + d128 Decimal128(20) DEFAULT toDecimal128(i128 / 100000, 20), + d256 Decimal256(40) DEFAULT toDecimal256(i256 / 100000, 40) +) ENGINE = MergeTree() ORDER BY i64 SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +SELECT avg(i8), avg(i16), avg(i32), avg(i64), avg(i128), avg(i256), + avg(u8), avg(u16), avg(u32), avg(u64), avg(u128), avg(u256), + avg(f32), avg(f64), + avg(d32), avg(d64), avg(d128), avg(d256) FROM test_01035_avg; + +INSERT INTO test_01035_avg (u64) SELECT number FROM system.numbers LIMIT 1000000; + +SELECT avg(i8), avg(i16), avg(i32), avg(i64), avg(i128), avg(i256), + avg(u8), avg(u16), avg(u32), avg(u64), avg(u128), avg(u256), + avg(f32), avg(f64), + avg(d32), avg(d64), avg(d128), avg(d256) FROM test_01035_avg; + +SELECT avg(i8 * i16) FROM test_01035_avg; +SELECT avg(f32 + f64) FROM test_01035_avg; +SELECT avg(d128 - d64) FROM test_01035_avg; + +DROP TABLE IF EXISTS test_01035_avg; + +-- Checks that the internal SUM does not overflow Int8 +SELECT avg(key), avgIf(key, key > 0), avg(key2), avgIf(key2, key2 > 0), avg(key3), avgIf(key3, key3 > 0) +FROM +( + SELECT 1::Int8 as key, Null::Nullable(Int8) AS key2, 1::Nullable(Int8) as key3 + FROM numbers(100000) +) diff --git a/parser/testdata/01035_prewhere_with_alias/ast.json b/parser/testdata/01035_prewhere_with_alias/ast.json new file mode 100644 index 000000000..5326ff062 --- /dev/null +++ b/parser/testdata/01035_prewhere_with_alias/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001164721, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/01035_prewhere_with_alias/metadata.json b/parser/testdata/01035_prewhere_with_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01035_prewhere_with_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01035_prewhere_with_alias/query.sql b/parser/testdata/01035_prewhere_with_alias/query.sql new file mode 100644 index 000000000..6ce5350d1 --- /dev/null +++ b/parser/testdata/01035_prewhere_with_alias/query.sql @@ -0,0 +1,36 @@ +DROP TABLE IF EXISTS test; +CREATE TABLE test (a UInt8, b UInt8, c UInt16 ALIAS a + b) ENGINE = MergeTree ORDER BY a; + +SELECT b FROM test PREWHERE c = 1; + +DROP TABLE test; + +drop table if exists audience_local; +create table audience_local +( + Date Date, + AudienceType Enum8('other' = 0, 'client' = 1, 'group' = 2), + UMA UInt64, + APIKey String, + TrialNameID UInt32, + TrialGroupID UInt32, + AppVersion String, + Arch Enum8('other' = 0, 'x32' = 1, 'x64' = 2), + UserID UInt32, + GroupID UInt8, + OSName Enum8('other' = 0, 'Android' = 1, 'iOS' = 2, 'macOS' = 3, 'Windows' = 4, 'Linux' = 5), + Channel Enum8('other' = 0, 'Canary' = 1, 'Dev' = 2, 'Beta' = 3, 'Stable' = 4), + Hits UInt64, + Sum Int64, + Release String alias splitByChar('-', AppVersion)[1] +) +engine = SummingMergeTree +PARTITION BY (toISOYear(Date), toISOWeek(Date)) +ORDER BY (AudienceType, UMA, APIKey, Date, TrialNameID, TrialGroupID, AppVersion, Arch, UserID, GroupID, OSName, Channel) +SETTINGS index_granularity = 8192; + +SELECT DISTINCT UserID +FROM audience_local +PREWHERE Date = toDate('2019-07-25') AND Release = '17.11.0.542'; + +drop table if exists audience_local; diff --git a/parser/testdata/01036_no_superfluous_dict_reload_on_create_database/ast.json b/parser/testdata/01036_no_superfluous_dict_reload_on_create_database/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01036_no_superfluous_dict_reload_on_create_database/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01036_no_superfluous_dict_reload_on_create_database/metadata.json b/parser/testdata/01036_no_superfluous_dict_reload_on_create_database/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01036_no_superfluous_dict_reload_on_create_database/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01036_no_superfluous_dict_reload_on_create_database/query.sql b/parser/testdata/01036_no_superfluous_dict_reload_on_create_database/query.sql new file mode 100644 index 000000000..1334780a5 --- /dev/null +++ b/parser/testdata/01036_no_superfluous_dict_reload_on_create_database/query.sql @@ -0,0 +1,28 @@ +-- Tags: no-parallel +-- Does not allow if other tests do SYSTEM RELOAD DICTIONARIES at the same time. + +CREATE TABLE dict_data (key UInt64, val UInt64) Engine=Memory(); +CREATE DICTIONARY dict +( + key UInt64 DEFAULT 0, + val UInt64 DEFAULT 10 +) +PRIMARY KEY key +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'dict_data' PASSWORD '' DB currentDatabase())) +LIFETIME(MIN 0 MAX 0) +LAYOUT(FLAT()); + +SELECT query_count FROM system.dictionaries WHERE database = currentDatabase() AND name = 'dict'; +SELECT dictGetUInt64('dict', 'val', toUInt64(0)); +SELECT query_count FROM system.dictionaries WHERE database = currentDatabase() AND name = 'dict'; + +SELECT 'SYSTEM RELOAD DICTIONARY'; +SYSTEM RELOAD DICTIONARY dict; +SELECT query_count FROM system.dictionaries WHERE database = currentDatabase() AND name = 'dict'; +SELECT dictGetUInt64('dict', 'val', toUInt64(0)); +SELECT query_count FROM system.dictionaries WHERE database = currentDatabase() AND name = 'dict'; + +SELECT 'CREATE DATABASE'; +DROP DATABASE IF EXISTS empty_db_01036; +CREATE DATABASE IF NOT EXISTS empty_db_01036; +SELECT query_count FROM system.dictionaries WHERE database = currentDatabase() AND name = 'dict'; diff --git a/parser/testdata/01036_no_superfluous_dict_reload_on_create_database_2/ast.json b/parser/testdata/01036_no_superfluous_dict_reload_on_create_database_2/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01036_no_superfluous_dict_reload_on_create_database_2/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01036_no_superfluous_dict_reload_on_create_database_2/metadata.json b/parser/testdata/01036_no_superfluous_dict_reload_on_create_database_2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01036_no_superfluous_dict_reload_on_create_database_2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01036_no_superfluous_dict_reload_on_create_database_2/query.sql b/parser/testdata/01036_no_superfluous_dict_reload_on_create_database_2/query.sql new file mode 100644 index 000000000..bc84bc068 --- /dev/null +++ b/parser/testdata/01036_no_superfluous_dict_reload_on_create_database_2/query.sql @@ -0,0 +1,35 @@ +-- Tags: no-parallel + +DROP DATABASE IF EXISTS `foo 1234`; +CREATE DATABASE `foo 1234`; + +CREATE TABLE `foo 1234`.dict_data (key UInt64, val UInt64) Engine=Memory(); +CREATE DICTIONARY `foo 1234`.dict +( + key UInt64 DEFAULT 0, + val UInt64 DEFAULT 10 +) +PRIMARY KEY key +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'dict_data' PASSWORD '' DB 'foo 1234')) +LIFETIME(MIN 0 MAX 0) +LAYOUT(FLAT()); + +SELECT query_count FROM system.dictionaries WHERE database = 'foo 1234' AND name = 'dict'; +SELECT dictGetUInt64('foo 1234.dict', 'val', toUInt64(0)); +SELECT query_count FROM system.dictionaries WHERE database = 'foo 1234' AND name = 'dict'; + +SELECT 'SYSTEM RELOAD DICTIONARY'; +SYSTEM RELOAD DICTIONARY `foo 1234`.dict; +SELECT query_count FROM system.dictionaries WHERE database = 'foo 1234' AND name = 'dict'; +SELECT dictGetUInt64('foo 1234.dict', 'val', toUInt64(0)); +SELECT query_count FROM system.dictionaries WHERE database = 'foo 1234' AND name = 'dict'; + +SELECT 'CREATE DATABASE'; +DROP DATABASE IF EXISTS `foo 123`; +CREATE DATABASE `foo 123`; +SELECT query_count FROM system.dictionaries WHERE database = 'foo 1234' AND name = 'dict'; + +DROP DICTIONARY `foo 1234`.dict; +DROP TABLE `foo 1234`.dict_data; +DROP DATABASE `foo 1234`; +DROP DATABASE `foo 123`; diff --git a/parser/testdata/01036_union_different_columns/ast.json b/parser/testdata/01036_union_different_columns/ast.json new file mode 100644 index 000000000..ab27b5cad --- /dev/null +++ b/parser/testdata/01036_union_different_columns/ast.json @@ -0,0 +1,70 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal UInt64_1 (alias c1)" + }, + { + "explain": " Literal UInt64_2 (alias c2)" + }, + { + "explain": " Literal UInt64_3 (alias c3)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal UInt64_1 (alias c1)" + }, + { + "explain": " Literal UInt64_2 (alias c2)" + }, + { + "explain": " Literal UInt64_3 (alias c3)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1 (alias c1)" + }, + { + "explain": " Literal UInt64_2 (alias c2)" + } + ], + + "rows": 16, + + "statistics": + { + "elapsed": 0.001415647, + "rows_read": 16, + "bytes_read": 605 + } +} diff --git a/parser/testdata/01036_union_different_columns/metadata.json b/parser/testdata/01036_union_different_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01036_union_different_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01036_union_different_columns/query.sql b/parser/testdata/01036_union_different_columns/query.sql new file mode 100644 index 000000000..396b7ac4c --- /dev/null +++ b/parser/testdata/01036_union_different_columns/query.sql @@ -0,0 +1 @@ +select 1 as c1, 2 as c2, 3 as c3 union all (select 1 as c1, 2 as c2, 3 as c3 union all select 1 as c1, 2 as c2) -- { serverError UNION_ALL_RESULT_STRUCTURES_MISMATCH } diff --git a/parser/testdata/01037_zookeeper_check_table_empty_pk/ast.json b/parser/testdata/01037_zookeeper_check_table_empty_pk/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01037_zookeeper_check_table_empty_pk/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01037_zookeeper_check_table_empty_pk/metadata.json b/parser/testdata/01037_zookeeper_check_table_empty_pk/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01037_zookeeper_check_table_empty_pk/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01037_zookeeper_check_table_empty_pk/query.sql b/parser/testdata/01037_zookeeper_check_table_empty_pk/query.sql new file mode 100644 index 000000000..6e539774e --- /dev/null +++ b/parser/testdata/01037_zookeeper_check_table_empty_pk/query.sql @@ -0,0 +1,25 @@ +-- Tags: zookeeper + +SET insert_keeper_fault_injection_probability=0; -- disable fault injection; part ids are non-deterministic in case of insert retries +SET check_query_single_value_result = 0; +SET send_logs_level = 'fatal'; + +DROP TABLE IF EXISTS mt_without_pk SYNC; + +CREATE TABLE mt_without_pk (SomeField1 Int64, SomeField2 Double) ENGINE = MergeTree() ORDER BY tuple(); + +INSERT INTO mt_without_pk VALUES (1, 2); + +CHECK TABLE mt_without_pk SETTINGS max_threads = 1; + +DROP TABLE IF EXISTS mt_without_pk SYNC; + +DROP TABLE IF EXISTS replicated_mt_without_pk SYNC; + +CREATE TABLE replicated_mt_without_pk (SomeField1 Int64, SomeField2 Double) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_01037/replicated_mt_without_pk', '1') ORDER BY tuple(); + +INSERT INTO replicated_mt_without_pk VALUES (1, 2); + +CHECK TABLE replicated_mt_without_pk SETTINGS max_threads = 1; + +DROP TABLE IF EXISTS replicated_mt_without_pk SYNC; diff --git a/parser/testdata/01038_array_of_unnamed_tuples/ast.json b/parser/testdata/01038_array_of_unnamed_tuples/ast.json new file mode 100644 index 000000000..806ef0e4c --- /dev/null +++ b/parser/testdata/01038_array_of_unnamed_tuples/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001243254, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01038_array_of_unnamed_tuples/metadata.json b/parser/testdata/01038_array_of_unnamed_tuples/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01038_array_of_unnamed_tuples/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01038_array_of_unnamed_tuples/query.sql b/parser/testdata/01038_array_of_unnamed_tuples/query.sql new file mode 100644 index 000000000..5da319f1a --- /dev/null +++ b/parser/testdata/01038_array_of_unnamed_tuples/query.sql @@ -0,0 +1,16 @@ +SET send_logs_level = 'fatal'; + +DROP TABLE IF EXISTS array_of_tuples; + +CREATE TABLE array_of_tuples +( + f Array(Tuple(Float64, Float64)), + s Array(Tuple(UInt8, UInt16, UInt32)) +) ENGINE = Memory; + +INSERT INTO array_of_tuples values ([(1, 2), (2, 3), (3, 4)], array(tuple(1, 2, 3), tuple(2, 3, 4))), (array((1.0, 2.0)), [tuple(4, 3, 1)]); + +SELECT f from array_of_tuples; +SELECT s from array_of_tuples; + +DROP TABLE array_of_tuples; diff --git a/parser/testdata/01039_mergetree_exec_time/ast.json b/parser/testdata/01039_mergetree_exec_time/ast.json new file mode 100644 index 000000000..049f85f89 --- /dev/null +++ b/parser/testdata/01039_mergetree_exec_time/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tab (children 1)" + }, + { + "explain": " Identifier tab" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001063114, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/01039_mergetree_exec_time/metadata.json b/parser/testdata/01039_mergetree_exec_time/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01039_mergetree_exec_time/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01039_mergetree_exec_time/query.sql b/parser/testdata/01039_mergetree_exec_time/query.sql new file mode 100644 index 000000000..3d522af66 --- /dev/null +++ b/parser/testdata/01039_mergetree_exec_time/query.sql @@ -0,0 +1,5 @@ +DROP TABLE IF EXISTS tab; +create table tab (A Int64) Engine=MergeTree order by tuple() SETTINGS min_bytes_for_wide_part = 0, min_rows_for_wide_part = 0; +insert into tab select cityHash64(number) from numbers(1000); +select sum(sleep(0.1)) from tab settings max_block_size = 1, max_execution_time=1; -- { serverError TIMEOUT_EXCEEDED } +DROP TABLE IF EXISTS tab; diff --git a/parser/testdata/01039_test_setting_parse/ast.json b/parser/testdata/01039_test_setting_parse/ast.json new file mode 100644 index 000000000..38133a569 --- /dev/null +++ b/parser/testdata/01039_test_setting_parse/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001232389, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01039_test_setting_parse/metadata.json b/parser/testdata/01039_test_setting_parse/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01039_test_setting_parse/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01039_test_setting_parse/query.sql b/parser/testdata/01039_test_setting_parse/query.sql new file mode 100644 index 000000000..8f2337fd8 --- /dev/null +++ b/parser/testdata/01039_test_setting_parse/query.sql @@ -0,0 +1,20 @@ +SET max_memory_usage = '1G'; +SELECT value FROM system.settings WHERE name = 'max_memory_usage'; +SET max_memory_usage = '3Gi'; +SELECT value FROM system.settings WHERE name = 'max_memory_usage'; +SET max_memory_usage = '15678k'; +SELECT value FROM system.settings WHERE name = 'max_memory_usage'; +SET max_memory_usage = '12345ki'; +SELECT value FROM system.settings WHERE name = 'max_memory_usage'; +SET max_memory_usage = '15678K'; +SELECT value FROM system.settings WHERE name = 'max_memory_usage'; +SET max_memory_usage = '12345Ki'; +SELECT value FROM system.settings WHERE name = 'max_memory_usage'; +SET max_memory_usage = '12M'; +SELECT value FROM system.settings WHERE name = 'max_memory_usage'; +SET max_memory_usage = '31Mi'; +SELECT value FROM system.settings WHERE name = 'max_memory_usage'; +SET max_memory_usage = '1T'; +SELECT value FROM system.settings WHERE name = 'max_memory_usage'; +SET max_memory_usage = '1Ti'; +SELECT value FROM system.settings WHERE name = 'max_memory_usage'; diff --git a/parser/testdata/01040_distributed_background_insert_batch_inserts/ast.json b/parser/testdata/01040_distributed_background_insert_batch_inserts/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01040_distributed_background_insert_batch_inserts/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01040_distributed_background_insert_batch_inserts/metadata.json b/parser/testdata/01040_distributed_background_insert_batch_inserts/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01040_distributed_background_insert_batch_inserts/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01040_distributed_background_insert_batch_inserts/query.sql b/parser/testdata/01040_distributed_background_insert_batch_inserts/query.sql new file mode 100644 index 000000000..e82c0f78d --- /dev/null +++ b/parser/testdata/01040_distributed_background_insert_batch_inserts/query.sql @@ -0,0 +1,50 @@ +-- Tags: distributed + +DROP TABLE IF EXISTS test_01040; +DROP TABLE IF EXISTS dist_test_01040; + +CREATE TABLE test_01040 (key UInt64) ENGINE=TinyLog(); +CREATE TABLE dist_test_01040 AS test_01040 Engine=Distributed(test_cluster_two_shards, currentDatabase(), test_01040, key) SETTINGS + background_insert_batch=1, + background_insert_sleep_time_ms=10, + background_insert_max_sleep_time_ms=100; + +-- internal_replication=false +SELECT 'test_cluster_two_shards prefer_localhost_replica=0'; +SET prefer_localhost_replica=0; +INSERT INTO dist_test_01040 SELECT toUInt64(number) FROM numbers(2); +SYSTEM FLUSH DISTRIBUTED dist_test_01040; +SELECT * FROM dist_test_01040 ORDER BY key; +TRUNCATE TABLE test_01040; + +SELECT 'test_cluster_two_shards prefer_localhost_replica=1'; +SET prefer_localhost_replica=1; +INSERT INTO dist_test_01040 SELECT toUInt64(number) FROM numbers(2); +SYSTEM FLUSH DISTRIBUTED dist_test_01040; +SELECT * FROM dist_test_01040 ORDER BY key; +TRUNCATE TABLE test_01040; + +DROP TABLE dist_test_01040; + +-- internal_replication=true +CREATE TABLE dist_test_01040 AS test_01040 Engine=Distributed(test_cluster_two_shards_internal_replication, currentDatabase(), test_01040, key) SETTINGS + background_insert_batch=1, + background_insert_sleep_time_ms=10, + background_insert_max_sleep_time_ms=100; +SELECT 'test_cluster_two_shards_internal_replication prefer_localhost_replica=0'; +SET prefer_localhost_replica=0; +INSERT INTO dist_test_01040 SELECT toUInt64(number) FROM numbers(2); +SYSTEM FLUSH DISTRIBUTED dist_test_01040; +SELECT * FROM dist_test_01040 ORDER BY key; +TRUNCATE TABLE test_01040; + +SELECT 'test_cluster_two_shards_internal_replication prefer_localhost_replica=1'; +SET prefer_localhost_replica=1; +INSERT INTO dist_test_01040 SELECT toUInt64(number) FROM numbers(2); +SYSTEM FLUSH DISTRIBUTED dist_test_01040; +SELECT * FROM dist_test_01040 ORDER BY key; +TRUNCATE TABLE test_01040; + + +DROP TABLE dist_test_01040; +DROP TABLE test_01040; diff --git a/parser/testdata/01040_h3_get_resolution/ast.json b/parser/testdata/01040_h3_get_resolution/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01040_h3_get_resolution/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01040_h3_get_resolution/metadata.json b/parser/testdata/01040_h3_get_resolution/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01040_h3_get_resolution/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01040_h3_get_resolution/query.sql b/parser/testdata/01040_h3_get_resolution/query.sql new file mode 100644 index 000000000..cc6cc5779 --- /dev/null +++ b/parser/testdata/01040_h3_get_resolution/query.sql @@ -0,0 +1,5 @@ +-- Tags: no-fasttest + +SELECT h3GetResolution(581276613233082367); +SELECT h3GetResolution(621807531097128959); +SELECT h3GetResolution(644325529233966508); diff --git a/parser/testdata/01041_create_dictionary_if_not_exists/ast.json b/parser/testdata/01041_create_dictionary_if_not_exists/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01041_create_dictionary_if_not_exists/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01041_create_dictionary_if_not_exists/metadata.json b/parser/testdata/01041_create_dictionary_if_not_exists/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01041_create_dictionary_if_not_exists/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01041_create_dictionary_if_not_exists/query.sql b/parser/testdata/01041_create_dictionary_if_not_exists/query.sql new file mode 100644 index 000000000..ab0b5a243 --- /dev/null +++ b/parser/testdata/01041_create_dictionary_if_not_exists/query.sql @@ -0,0 +1,38 @@ + +CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.table_for_dict +( + key_column UInt64, + value Float64 +) +ENGINE = MergeTree() +ORDER BY key_column; + +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.table_for_dict VALUES (1, 1.1); + +CREATE DICTIONARY IF NOT EXISTS {CLICKHOUSE_DATABASE:Identifier}.dict_exists +( + key_column UInt64, + value Float64 DEFAULT 77.77 +) +PRIMARY KEY key_column +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'table_for_dict' DB currentDatabase())) +LIFETIME(1) +LAYOUT(FLAT()); + +SELECT dictGetFloat64({CLICKHOUSE_DATABASE:String} || '.dict_exists', 'value', toUInt64(1)); + + +CREATE DICTIONARY IF NOT EXISTS {CLICKHOUSE_DATABASE:Identifier}.dict_exists +( + key_column UInt64, + value Float64 DEFAULT 77.77 +) +PRIMARY KEY key_column +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'table_for_dict' DB currentDatabase())) +LIFETIME(1) +LAYOUT(FLAT()); + +SELECT dictGetFloat64({CLICKHOUSE_DATABASE:String} || '.dict_exists', 'value', toUInt64(1)); + +DROP DICTIONARY {CLICKHOUSE_DATABASE:Identifier}.dict_exists; +DROP TABLE {CLICKHOUSE_DATABASE:Identifier}.table_for_dict; diff --git a/parser/testdata/01041_h3_is_valid/ast.json b/parser/testdata/01041_h3_is_valid/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01041_h3_is_valid/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01041_h3_is_valid/metadata.json b/parser/testdata/01041_h3_is_valid/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01041_h3_is_valid/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01041_h3_is_valid/query.sql b/parser/testdata/01041_h3_is_valid/query.sql new file mode 100644 index 000000000..95c1b3f0f --- /dev/null +++ b/parser/testdata/01041_h3_is_valid/query.sql @@ -0,0 +1,6 @@ +-- Tags: no-fasttest + +SELECT h3IsValid(581276613233082367); +SELECT h3IsValid(621807531097128959); +SELECT h3IsValid(Cast(0, 'UInt64')); +SELECT h3IsValid(100000000000000000); diff --git a/parser/testdata/01042_check_query_and_last_granule_size/ast.json b/parser/testdata/01042_check_query_and_last_granule_size/ast.json new file mode 100644 index 000000000..1b197f55b --- /dev/null +++ b/parser/testdata/01042_check_query_and_last_granule_size/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001366818, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01042_check_query_and_last_granule_size/metadata.json b/parser/testdata/01042_check_query_and_last_granule_size/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01042_check_query_and_last_granule_size/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01042_check_query_and_last_granule_size/query.sql b/parser/testdata/01042_check_query_and_last_granule_size/query.sql new file mode 100644 index 000000000..7b7d17063 --- /dev/null +++ b/parser/testdata/01042_check_query_and_last_granule_size/query.sql @@ -0,0 +1,40 @@ +SET optimize_trivial_insert_select = 1; +SET check_query_single_value_result = 0; + +DROP TABLE IF EXISTS check_query_test; + +CREATE TABLE check_query_test (SomeKey UInt64, SomeValue String) ENGINE = MergeTree() ORDER BY SomeKey SETTINGS min_bytes_for_wide_part = 0, min_rows_for_wide_part = 0; + +-- Number of rows in last granule should be equals to granularity. +-- Rows in this table are short, so granularity will be 8192. +INSERT INTO check_query_test SELECT number, toString(number) FROM system.numbers LIMIT 81920; + +CHECK TABLE check_query_test SETTINGS max_threads = 1; + +OPTIMIZE TABLE check_query_test; + +CHECK TABLE check_query_test SETTINGS max_threads = 1; + +DROP TABLE IF EXISTS check_query_test; + +DROP TABLE IF EXISTS check_query_test_non_adaptive; + +CREATE TABLE check_query_test_non_adaptive (SomeKey UInt64, SomeValue String) ENGINE = MergeTree() ORDER BY SomeKey SETTINGS index_granularity_bytes = 0, min_bytes_for_wide_part = 0, min_rows_for_wide_part = 0; + +INSERT INTO check_query_test_non_adaptive SELECT number, toString(number) FROM system.numbers LIMIT 81920; + +CHECK TABLE check_query_test_non_adaptive SETTINGS max_threads = 1; + +OPTIMIZE TABLE check_query_test_non_adaptive; + +CHECK TABLE check_query_test_non_adaptive SETTINGS max_threads = 1; + +INSERT INTO check_query_test_non_adaptive SELECT number, toString(number) FROM system.numbers LIMIT 77; + +CHECK TABLE check_query_test_non_adaptive SETTINGS max_threads = 1; + +OPTIMIZE TABLE check_query_test_non_adaptive; + +CHECK TABLE check_query_test_non_adaptive SETTINGS max_threads = 1; + +DROP TABLE IF EXISTS check_query_test_non_adaptive; diff --git a/parser/testdata/01042_h3_k_ring/ast.json b/parser/testdata/01042_h3_k_ring/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01042_h3_k_ring/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01042_h3_k_ring/metadata.json b/parser/testdata/01042_h3_k_ring/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01042_h3_k_ring/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01042_h3_k_ring/query.sql b/parser/testdata/01042_h3_k_ring/query.sql new file mode 100644 index 000000000..da4955683 --- /dev/null +++ b/parser/testdata/01042_h3_k_ring/query.sql @@ -0,0 +1,39 @@ +-- Tags: no-fasttest + +SELECT arraySort(h3kRing(581276613233082367, toUInt16(1))); +SELECT h3kRing(581276613233082367, toUInt16(0)); +SELECT h3kRing(581276613233082367, -1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT h3kRing(581276613233082367, toUInt16(-1)); -- { serverError PARAMETER_OUT_OF_BOUND } + +SELECT arraySort(h3kRing(581276613233082367, 1)); +SELECT h3kRing(581276613233082367, 0); +SELECT h3kRing(581276613233082367, -1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +DROP TABLE IF EXISTS h3_indexes; + +-- Test h3 indices and k selected from original test fixture: https://github.com/uber/h3/blob/master/src/apps/testapps + +CREATE TABLE h3_indexes (h3_index UInt64, k UInt16) ENGINE = Memory; + + +INSERT INTO h3_indexes VALUES (579205133326352383,1); +INSERT INTO h3_indexes VALUES (581263419093549055,2); +INSERT INTO h3_indexes VALUES (589753847883235327,3); +INSERT INTO h3_indexes VALUES (594082350283882495,4); +INSERT INTO h3_indexes VALUES (598372386957426687,5); +INSERT INTO h3_indexes VALUES (599542359671177215,6); +INSERT INTO h3_indexes VALUES (604296355086598143,7); +INSERT INTO h3_indexes VALUES (608785214872748031,8); +INSERT INTO h3_indexes VALUES (615732192485572607,9); +INSERT INTO h3_indexes VALUES (617056794467368959,10); +INSERT INTO h3_indexes VALUES (624586477873168383,11); +INSERT INTO h3_indexes VALUES (627882919484481535,12); +INSERT INTO h3_indexes VALUES (634600058503392255,13); +INSERT INTO h3_indexes VALUES (635544851677385791,14); +INSERT INTO h3_indexes VALUES (639763125756281263,15); +INSERT INTO h3_indexes VALUES (644178757620501158,16); + + +SELECT arraySort(h3kRing(h3_index, k)) FROM h3_indexes ORDER BY h3_index; + +DROP TABLE h3_indexes; diff --git a/parser/testdata/01043_categorical_iv/ast.json b/parser/testdata/01043_categorical_iv/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01043_categorical_iv/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01043_categorical_iv/metadata.json b/parser/testdata/01043_categorical_iv/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01043_categorical_iv/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01043_categorical_iv/query.sql b/parser/testdata/01043_categorical_iv/query.sql new file mode 100644 index 000000000..049070a69 --- /dev/null +++ b/parser/testdata/01043_categorical_iv/query.sql @@ -0,0 +1,116 @@ +-- trivial + +SELECT + categoricalInformationValue(x.1, x.2) +FROM ( + SELECT + arrayJoin(arrayPopBack([(1, 0)])) as x +); + +SELECT + categoricalInformationValue(x.1, x.2) +FROM ( + SELECT + arrayJoin([(0, 0)]) as x +); + +SELECT + categoricalInformationValue(x.1, x.2) +FROM ( + SELECT + arrayJoin([(1, 0)]) as x +); + +-- single category + +SELECT + arrayMap(x -> x = 0 ? 0 : x, categoricalInformationValue(x.1, x.2)) -- remove negative zeros +FROM ( + SELECT + arrayJoin([(1, 0), (1, 0), (1, 0), (1, 1), (1, 1)]) as x +); + +SELECT + categoricalInformationValue(x.1, x.2) +FROM ( + SELECT + arrayJoin([(0, 0), (0, 1), (1, 0), (1, 1)]) as x +); + +SELECT + categoricalInformationValue(x.1, x.2) +FROM ( + SELECT + arrayJoin([(0, 0), (0, 0), (1, 0), (1, 0)]) as x +); + +SELECT + categoricalInformationValue(x.1, x.2) +FROM ( + SELECT + arrayJoin([(0, 1), (0, 1), (1, 1), (1, 1)]) as x +); + +SELECT + categoricalInformationValue(x.1, x.2) +FROM ( + SELECT + arrayJoin([(0, 0), (0, 1), (1, 1), (1, 1)]) as x +); + +SELECT + categoricalInformationValue(x.1, x.2) +FROM ( + SELECT + arrayJoin([(0, 0), (0, 1), (1, 0), (1, 0)]) as x +); + +SELECT + round(categoricalInformationValue(x.1, x.2)[1], 6), + round((2 / 2 - 2 / 3) * (log(2 / 2) - log(2 / 3)), 6) +FROM ( + SELECT + arrayJoin([(0, 0), (1, 0), (1, 0), (1, 1), (1, 1)]) as x +); + +-- multiple category + +SELECT + categoricalInformationValue(x.1, x.2, x.3) +FROM ( + SELECT + arrayJoin([(1, 0, 0), (1, 0, 0), (1, 0, 1), (0, 1, 0), (0, 1, 0), (0, 1, 1)]) as x +); + +SELECT + round(categoricalInformationValue(x.1, x.2, x.3)[1], 6), + round(categoricalInformationValue(x.1, x.2, x.3)[2], 6), + round((2 / 4 - 1 / 3) * (log(2 / 4) - log(1 / 3)), 6), + round((2 / 4 - 2 / 3) * (log(2 / 4) - log(2 / 3)), 6) +FROM ( + SELECT + arrayJoin([(1, 0, 0), (1, 0, 0), (1, 0, 1), (0, 1, 0), (0, 1, 0), (0, 1, 1), (0, 1, 1)]) as x +); + +-- multiple category, larger data size + +SELECT + categoricalInformationValue(x.1, x.2, x.3) +FROM ( + SELECT + arrayJoin([(1, 0, 0), (1, 0, 0), (1, 0, 1), (0, 1, 0), (0, 1, 0), (0, 1, 1)]) as x + FROM + numbers(1000) +); + +SELECT + round(categoricalInformationValue(x.1, x.2, x.3)[1], 6), + round(categoricalInformationValue(x.1, x.2, x.3)[2], 6), + round((2 / 4 - 1 / 3) * (log(2 / 4) - log(1 / 3)), 6), + round((2 / 4 - 2 / 3) * (log(2 / 4) - log(2 / 3)), 6) +FROM ( + SELECT + arrayJoin([(1, 0, 0), (1, 0, 0), (1, 0, 1), (0, 1, 0), (0, 1, 0), (0, 1, 1), (0, 1, 1)]) as x + FROM + numbers(1000) +); diff --git a/parser/testdata/01043_dictionary_attribute_properties_values/ast.json b/parser/testdata/01043_dictionary_attribute_properties_values/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01043_dictionary_attribute_properties_values/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01043_dictionary_attribute_properties_values/metadata.json b/parser/testdata/01043_dictionary_attribute_properties_values/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01043_dictionary_attribute_properties_values/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01043_dictionary_attribute_properties_values/query.sql b/parser/testdata/01043_dictionary_attribute_properties_values/query.sql new file mode 100644 index 000000000..4f0784999 --- /dev/null +++ b/parser/testdata/01043_dictionary_attribute_properties_values/query.sql @@ -0,0 +1,23 @@ + +CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.dicttbl(key Int64, value_default String, value_expression String) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.dicttbl VALUES (12, 'hello', '55:66:77'); + + +CREATE DICTIONARY {CLICKHOUSE_DATABASE:Identifier}.dict +( + key Int64 DEFAULT -1, + value_default String DEFAULT 'world', + value_expression String DEFAULT 'xxx' EXPRESSION 'toString(127 * 172)' + +) +PRIMARY KEY key +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'dicttbl' DB currentDatabase())) +LAYOUT(FLAT()) +LIFETIME(1); + + +SELECT dictGetString({CLICKHOUSE_DATABASE:String} || '.dict', 'value_default', toUInt64(12)); +SELECT dictGetString({CLICKHOUSE_DATABASE:String} || '.dict', 'value_default', toUInt64(14)); + +SELECT dictGetString({CLICKHOUSE_DATABASE:String} || '.dict', 'value_expression', toUInt64(12)); +SELECT dictGetString({CLICKHOUSE_DATABASE:String} || '.dict', 'value_expression', toUInt64(14)); diff --git a/parser/testdata/01043_geo_distance/ast.json b/parser/testdata/01043_geo_distance/ast.json new file mode 100644 index 000000000..5e0ee268a --- /dev/null +++ b/parser/testdata/01043_geo_distance/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000877728, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01043_geo_distance/metadata.json b/parser/testdata/01043_geo_distance/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01043_geo_distance/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01043_geo_distance/query.sql b/parser/testdata/01043_geo_distance/query.sql new file mode 100644 index 000000000..cf877d05b --- /dev/null +++ b/parser/testdata/01043_geo_distance/query.sql @@ -0,0 +1,27 @@ +SET geo_distance_returns_float64_on_float64_arguments = 0; + +SELECT greatCircleDistance(0., 0., 0., 1.); +SELECT greatCircleDistance(0., 89., 0, 90.); + +SELECT geoDistance(0., 0., 0., 1.); +SELECT geoDistance(0., 89., 0., 90.); + +SELECT greatCircleDistance(0., 0., 90., 0.); +SELECT greatCircleDistance(0., 0., 0., 90.); + +SELECT geoDistance(0., 0., 90., 0.); +SELECT geoDistance(0., 0., 0., 90.); + +SET geo_distance_returns_float64_on_float64_arguments = 1; + +SELECT greatCircleDistance(0., 0., 0., 1.); +SELECT greatCircleDistance(0., 89., 0, 90.); + +SELECT geoDistance(0., 0., 0., 1.); +SELECT geoDistance(0., 89., 0., 90.); + +SELECT greatCircleDistance(0., 0., 90., 0.); +SELECT greatCircleDistance(0., 0., 0., 90.); + +SELECT geoDistance(0., 0., 90., 0.); +SELECT geoDistance(0., 0., 0., 90.); diff --git a/parser/testdata/01043_h3_edge_length_m/ast.json b/parser/testdata/01043_h3_edge_length_m/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01043_h3_edge_length_m/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01043_h3_edge_length_m/metadata.json b/parser/testdata/01043_h3_edge_length_m/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01043_h3_edge_length_m/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01043_h3_edge_length_m/query.sql b/parser/testdata/01043_h3_edge_length_m/query.sql new file mode 100644 index 000000000..d21f4b17e --- /dev/null +++ b/parser/testdata/01043_h3_edge_length_m/query.sql @@ -0,0 +1,4 @@ +-- Tags: no-fasttest + +SELECT h3EdgeLengthM(1); +SELECT h3EdgeLengthM(4); diff --git a/parser/testdata/01044_great_circle_angle/ast.json b/parser/testdata/01044_great_circle_angle/ast.json new file mode 100644 index 000000000..796091dea --- /dev/null +++ b/parser/testdata/01044_great_circle_angle/ast.json @@ -0,0 +1,100 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function minus (alias lat) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_90" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function greatCircleAngle (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Identifier lat" + }, + { + "explain": " Function abs (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier lat" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_180" + } + ], + + "rows": 26, + + "statistics": + { + "elapsed": 0.001104242, + "rows_read": 26, + "bytes_read": 1005 + } +} diff --git a/parser/testdata/01044_great_circle_angle/metadata.json b/parser/testdata/01044_great_circle_angle/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01044_great_circle_angle/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01044_great_circle_angle/query.sql b/parser/testdata/01044_great_circle_angle/query.sql new file mode 100644 index 000000000..0fac783dd --- /dev/null +++ b/parser/testdata/01044_great_circle_angle/query.sql @@ -0,0 +1,3 @@ +WITH number - 90 AS lat SELECT DISTINCT greatCircleAngle(0, 0, 0, lat) = abs(lat) FROM numbers(180); +WITH number - 180 AS lon SELECT lon, round(greatCircleAngle(0, 0, lon, 0) - abs(lon) AS err, 2) FROM numbers(360) WHERE abs(err) > 0.01; +SELECT bar((greatCircleAngle(0, 0, number, number) - number) * 100, 0, 2000, 100) FROM numbers(90); diff --git a/parser/testdata/01044_h3_edge_angle/ast.json b/parser/testdata/01044_h3_edge_angle/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01044_h3_edge_angle/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01044_h3_edge_angle/metadata.json b/parser/testdata/01044_h3_edge_angle/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01044_h3_edge_angle/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01044_h3_edge_angle/query.sql b/parser/testdata/01044_h3_edge_angle/query.sql new file mode 100644 index 000000000..6af0911a8 --- /dev/null +++ b/parser/testdata/01044_h3_edge_angle/query.sql @@ -0,0 +1,4 @@ +-- Tags: no-fasttest + +SELECT h3EdgeAngle(10); +SELECT h3EdgeLengthM(2) * 180 / pi() / 6371007.180918475 - h3EdgeAngle(2); diff --git a/parser/testdata/01045_array_zip/ast.json b/parser/testdata/01045_array_zip/ast.json new file mode 100644 index 000000000..d3eb266c8 --- /dev/null +++ b/parser/testdata/01045_array_zip/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayZip (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Array_['a', 'b', 'c']" + }, + { + "explain": " Literal Array_['d', 'e', 'f']" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001118462, + "rows_read": 8, + "bytes_read": 317 + } +} diff --git a/parser/testdata/01045_array_zip/metadata.json b/parser/testdata/01045_array_zip/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01045_array_zip/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01045_array_zip/query.sql b/parser/testdata/01045_array_zip/query.sql new file mode 100644 index 000000000..801df5a32 --- /dev/null +++ b/parser/testdata/01045_array_zip/query.sql @@ -0,0 +1,9 @@ +SELECT arrayZip(['a', 'b', 'c'], ['d', 'e', 'f']); + +SELECT arrayZip(['a', 'b', 'c'], ['d', 'e', 'f'], ['g', 'h', 'i']); + +SELECT arrayZip(); + +SELECT arrayZip('a', 'b', 'c'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT arrayZip(['a', 'b', 'c'], ['d', 'e', 'f', 'd']); -- { serverError SIZES_OF_ARRAYS_DONT_MATCH } diff --git a/parser/testdata/01045_bloom_filter_null_array/ast.json b/parser/testdata/01045_bloom_filter_null_array/ast.json new file mode 100644 index 000000000..8071c5e36 --- /dev/null +++ b/parser/testdata/01045_bloom_filter_null_array/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery bloom_filter_null_array (children 1)" + }, + { + "explain": " Identifier bloom_filter_null_array" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001310062, + "rows_read": 2, + "bytes_read": 98 + } +} diff --git a/parser/testdata/01045_bloom_filter_null_array/metadata.json b/parser/testdata/01045_bloom_filter_null_array/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01045_bloom_filter_null_array/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01045_bloom_filter_null_array/query.sql b/parser/testdata/01045_bloom_filter_null_array/query.sql new file mode 100644 index 000000000..4a5741b4e --- /dev/null +++ b/parser/testdata/01045_bloom_filter_null_array/query.sql @@ -0,0 +1,15 @@ +DROP TABLE IF EXISTS bloom_filter_null_array; + +CREATE TABLE bloom_filter_null_array (v Array(LowCardinality(Nullable(String))), INDEX idx v TYPE bloom_filter(0.1) GRANULARITY 1) ENGINE = MergeTree() ORDER BY v SETTINGS allow_nullable_key = 1; + +INSERT INTO bloom_filter_null_array VALUES ([]); +INSERT INTO bloom_filter_null_array VALUES (['1', '2']) ([]) ([]); +INSERT INTO bloom_filter_null_array VALUES ([]) ([]) (['2', '3']); + +SELECT COUNT() FROM bloom_filter_null_array; +SELECT COUNT() FROM bloom_filter_null_array WHERE has(v, '1'); +SELECT COUNT() FROM bloom_filter_null_array WHERE has(v, '2'); +SELECT COUNT() FROM bloom_filter_null_array WHERE has(v, '3'); +SELECT COUNT() FROM bloom_filter_null_array WHERE has(v, '4'); + +DROP TABLE IF EXISTS bloom_filter_null_array; diff --git a/parser/testdata/01045_dictionaries_restrictions/ast.json b/parser/testdata/01045_dictionaries_restrictions/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01045_dictionaries_restrictions/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01045_dictionaries_restrictions/metadata.json b/parser/testdata/01045_dictionaries_restrictions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01045_dictionaries_restrictions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01045_dictionaries_restrictions/query.sql b/parser/testdata/01045_dictionaries_restrictions/query.sql new file mode 100644 index 000000000..702e05071 --- /dev/null +++ b/parser/testdata/01045_dictionaries_restrictions/query.sql @@ -0,0 +1,17 @@ + +CREATE DICTIONARY {CLICKHOUSE_DATABASE:Identifier}.restricted_dict ( + key UInt64, + value String +) +PRIMARY KEY key +SOURCE(EXECUTABLE(COMMAND 'echo -E "1\thello"' FORMAT TabSeparated)) +LIFETIME(MIN 0 MAX 1) +LAYOUT(CACHE(SIZE_IN_CELLS 10)); + +-- because of lazy load we can check only in dictGet query +select dictGetString({CLICKHOUSE_DATABASE:String} || '.restricted_dict', 'value', toUInt64(1)); -- {serverError DICTIONARY_ACCESS_DENIED} + +select 'Ok.'; + +DROP DICTIONARY IF EXISTS {CLICKHOUSE_DATABASE:Identifier}.restricted_dict; + diff --git a/parser/testdata/01046_materialized_view_with_join_over_distributed/ast.json b/parser/testdata/01046_materialized_view_with_join_over_distributed/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01046_materialized_view_with_join_over_distributed/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01046_materialized_view_with_join_over_distributed/metadata.json b/parser/testdata/01046_materialized_view_with_join_over_distributed/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01046_materialized_view_with_join_over_distributed/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01046_materialized_view_with_join_over_distributed/query.sql b/parser/testdata/01046_materialized_view_with_join_over_distributed/query.sql new file mode 100644 index 000000000..781a5f34e --- /dev/null +++ b/parser/testdata/01046_materialized_view_with_join_over_distributed/query.sql @@ -0,0 +1,22 @@ +-- Tags: distributed + +-- from https://github.com/ClickHouse/ClickHouse/issues/5142 + +set distributed_foreground_insert = 1; + +DROP TABLE IF EXISTS t; +DROP TABLE IF EXISTS t_d; +DROP TABLE IF EXISTS t_v; +CREATE TABLE t (`A` Int64) ENGINE = MergeTree() ORDER BY tuple(); +CREATE TABLE t_d AS t ENGINE = Distributed(test_shard_localhost, currentDatabase(), t); +CREATE MATERIALIZED VIEW t_v ENGINE = MergeTree() ORDER BY tuple() AS SELECT A FROM t LEFT JOIN ( SELECT toInt64(dummy) AS A FROM system.one ) js2 USING (A); + +INSERT INTO t_d SELECT number FROM numbers(2); +SELECT * FROM t_v ORDER BY A; + +INSERT INTO t SELECT number+2 FROM numbers(2); +SELECT * FROM t_v ORDER BY A; + +DROP TABLE IF EXISTS t_v; +DROP TABLE IF EXISTS t_d; +DROP TABLE IF EXISTS t; diff --git a/parser/testdata/01046_trivial_count_query_distributed/ast.json b/parser/testdata/01046_trivial_count_query_distributed/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01046_trivial_count_query_distributed/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01046_trivial_count_query_distributed/metadata.json b/parser/testdata/01046_trivial_count_query_distributed/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01046_trivial_count_query_distributed/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01046_trivial_count_query_distributed/query.sql b/parser/testdata/01046_trivial_count_query_distributed/query.sql new file mode 100644 index 000000000..40dde29d8 --- /dev/null +++ b/parser/testdata/01046_trivial_count_query_distributed/query.sql @@ -0,0 +1,11 @@ +-- Tags: distributed + +DROP TABLE IF EXISTS test_count; + +CREATE TABLE test_count (`pt` Date) ENGINE = MergeTree PARTITION BY pt ORDER BY pt SETTINGS index_granularity = 8192; + +INSERT INTO test_count values ('2019-12-12'); + +SELECT count(1) FROM remote('127.0.0.{1,1,2}', currentDatabase(), test_count); + +DROP TABLE test_count; diff --git a/parser/testdata/01047_no_alias_columns_with_table_aliases/ast.json b/parser/testdata/01047_no_alias_columns_with_table_aliases/ast.json new file mode 100644 index 000000000..32afb7025 --- /dev/null +++ b/parser/testdata/01047_no_alias_columns_with_table_aliases/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery requests (children 1)" + }, + { + "explain": " Identifier requests" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001040094, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/01047_no_alias_columns_with_table_aliases/metadata.json b/parser/testdata/01047_no_alias_columns_with_table_aliases/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01047_no_alias_columns_with_table_aliases/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01047_no_alias_columns_with_table_aliases/query.sql b/parser/testdata/01047_no_alias_columns_with_table_aliases/query.sql new file mode 100644 index 000000000..72b0a12af --- /dev/null +++ b/parser/testdata/01047_no_alias_columns_with_table_aliases/query.sql @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS requests; +CREATE TABLE requests ( + event_time DateTime, + event_date Date MATERIALIZED toDate(event_time), + event_tm DateTime ALIAS event_time +) ENGINE = MergeTree ORDER BY (event_time); + +INSERT INTO requests (event_time) VALUES ('2010-01-01 00:00:00'); + +select * from requests where event_date > '2000-01-01'; + +select * from requests as t where t.event_date > '2000-01-01'; +select * from requests as "t" where "t".event_date > '2000-01-01'; + +select * from requests as t where t.event_tm > toDate('2000-01-01'); +select * from requests as `t` where `t`.event_tm > toDate('2000-01-01'); + +DROP TABLE requests; diff --git a/parser/testdata/01047_nullable_rand/ast.json b/parser/testdata/01047_nullable_rand/ast.json new file mode 100644 index 000000000..21b072c5e --- /dev/null +++ b/parser/testdata/01047_nullable_rand/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function rand (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_4" + }, + { + "explain": " Literal 'Nullable(UInt8)'" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001103867, + "rows_read": 12, + "bytes_read": 478 + } +} diff --git a/parser/testdata/01047_nullable_rand/metadata.json b/parser/testdata/01047_nullable_rand/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01047_nullable_rand/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01047_nullable_rand/query.sql b/parser/testdata/01047_nullable_rand/query.sql new file mode 100644 index 000000000..e5633637d --- /dev/null +++ b/parser/testdata/01047_nullable_rand/query.sql @@ -0,0 +1,13 @@ +select toTypeName(rand(cast(4 as Nullable(UInt8)))); +select toTypeName(randCanonical(CAST(4 as Nullable(UInt8)))); +select toTypeName(randConstant(CAST(4 as Nullable(UInt8)))); +select toTypeName(rand(Null)); +select toTypeName(randCanonical(Null)); +select toTypeName(randConstant(Null)); + +select rand(cast(4 as Nullable(UInt8))) * 0; +select randCanonical(cast(4 as Nullable(UInt8))) * 0; +select randConstant(CAST(4 as Nullable(UInt8))) * 0; +select rand(Null) * 0; +select randCanonical(Null) * 0; +select randConstant(Null) * 0; diff --git a/parser/testdata/01047_simple_aggregate_sizes_of_columns_bug/ast.json b/parser/testdata/01047_simple_aggregate_sizes_of_columns_bug/ast.json new file mode 100644 index 000000000..9ac95ef16 --- /dev/null +++ b/parser/testdata/01047_simple_aggregate_sizes_of_columns_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery column_size_bug (children 1)" + }, + { + "explain": " Identifier column_size_bug" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00112351, + "rows_read": 2, + "bytes_read": 82 + } +} diff --git a/parser/testdata/01047_simple_aggregate_sizes_of_columns_bug/metadata.json b/parser/testdata/01047_simple_aggregate_sizes_of_columns_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01047_simple_aggregate_sizes_of_columns_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01047_simple_aggregate_sizes_of_columns_bug/query.sql b/parser/testdata/01047_simple_aggregate_sizes_of_columns_bug/query.sql new file mode 100644 index 000000000..a2ea13b26 --- /dev/null +++ b/parser/testdata/01047_simple_aggregate_sizes_of_columns_bug/query.sql @@ -0,0 +1,15 @@ +DROP TABLE IF EXISTS column_size_bug; + +CREATE TABLE column_size_bug (date_time DateTime, value SimpleAggregateFunction(sum,UInt64)) ENGINE = AggregatingMergeTree PARTITION BY toStartOfInterval(date_time, INTERVAL 1 DAY) ORDER BY (date_time) SETTINGS remove_empty_parts = 0; + +INSERT INTO column_size_bug VALUES(now(),1); +INSERT INTO column_size_bug VALUES(now(),1); + +ALTER TABLE column_size_bug DELETE WHERE value=1; + +-- wait for DELETE +SELECT sleep(1); + +OPTIMIZE TABLE column_size_bug; + +DROP TABLE column_size_bug; diff --git a/parser/testdata/01048_exists_query/ast.json b/parser/testdata/01048_exists_query/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01048_exists_query/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01048_exists_query/metadata.json b/parser/testdata/01048_exists_query/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01048_exists_query/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01048_exists_query/query.sql b/parser/testdata/01048_exists_query/query.sql new file mode 100644 index 000000000..8d0077f2c --- /dev/null +++ b/parser/testdata/01048_exists_query/query.sql @@ -0,0 +1,62 @@ +-- Tags: no-parallel + +EXISTS db_01048.t_01048; +EXISTS TABLE db_01048.t_01048; +EXISTS DICTIONARY db_01048.t_01048; + +DROP DATABASE IF EXISTS db_01048; +EXISTS DATABASE db_01048; +CREATE DATABASE db_01048; +EXISTS DATABASE db_01048; + +DROP TABLE IF EXISTS db_01048.t_01048; +EXISTS db_01048.t_01048; +EXISTS TABLE db_01048.t_01048; +EXISTS DICTIONARY db_01048.t_01048; + +CREATE TABLE db_01048.t_01048 (x UInt8) ENGINE = Memory; +EXISTS db_01048.t_01048; +EXISTS TABLE db_01048.t_01048; +EXISTS DICTIONARY db_01048.t_01048; + +DROP TABLE db_01048.t_01048; +EXISTS db_01048.t_01048; +EXISTS TABLE db_01048.t_01048; +EXISTS DICTIONARY db_01048.t_01048; + +DROP DICTIONARY IF EXISTS t_01048; +CREATE TEMPORARY TABLE t_01048 (x UInt8); +EXISTS t_01048; -- Does not work for temporary tables. Maybe have to fix. +EXISTS TABLE t_01048; +EXISTS DICTIONARY t_01048; + +CREATE DICTIONARY db_01048.t_01048 (k UInt64, v String) PRIMARY KEY k LAYOUT(FLAT()) SOURCE(HTTP(URL 'http://example.test/' FORMAT 'TSV')) LIFETIME(1000); +EXISTS db_01048.t_01048; +EXISTS TABLE db_01048.t_01048; -- Dictionaries are tables as well. But not all tables are dictionaries. +EXISTS DICTIONARY db_01048.t_01048; + +-- But dictionary-tables cannot be dropped as usual tables. +DROP TABLE db_01048.t_01048; -- { serverError CANNOT_DETACH_DICTIONARY_AS_TABLE } +DROP DICTIONARY db_01048.t_01048; +EXISTS db_01048.t_01048; +EXISTS TABLE db_01048.t_01048; +EXISTS DICTIONARY db_01048.t_01048; + + +CREATE TABLE db_01048.t_01048_2 (x UInt8) ENGINE = Memory; +CREATE VIEW db_01048.v_01048 AS SELECT * FROM db_01048.t_01048_2; +EXISTS VIEW db_01048.v_01048; +EXISTS VIEW db_01048.t_01048_2; +EXISTS VIEW db_01048.v_not_exist; +DROP VIEW db_01048.v_01048; +EXISTS VIEW db_01048.v_01048; +EXISTS VIEW db_01048.t_01048_2; +EXISTS VIEW db_01048.v_not_exist; +EXISTS VIEW db_not_exists.v_not_exist; +DROP TABLE db_01048.t_01048_2; + + +DROP DATABASE db_01048; +EXISTS db_01048.t_01048; +EXISTS TABLE db_01048.t_01048; +EXISTS DICTIONARY db_01048.t_01048; diff --git a/parser/testdata/01049_join_low_card_crash/ast.json b/parser/testdata/01049_join_low_card_crash/ast.json new file mode 100644 index 000000000..1e34ab6be --- /dev/null +++ b/parser/testdata/01049_join_low_card_crash/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery Alpha (children 1)" + }, + { + "explain": " Identifier Alpha" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001230767, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/01049_join_low_card_crash/metadata.json b/parser/testdata/01049_join_low_card_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01049_join_low_card_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01049_join_low_card_crash/query.sql b/parser/testdata/01049_join_low_card_crash/query.sql new file mode 100644 index 000000000..57d162dd8 --- /dev/null +++ b/parser/testdata/01049_join_low_card_crash/query.sql @@ -0,0 +1,25 @@ +DROP TABLE IF EXISTS Alpha; +DROP TABLE IF EXISTS Beta; + +CREATE TABLE Alpha (foo String, bar UInt64) ENGINE = MergeTree ORDER BY tuple(); +CREATE TABLE Beta (foo LowCardinality(String), baz UInt64) ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO Alpha VALUES ('a', 1); +INSERT INTO Beta VALUES ('a', 2), ('b', 3); + +SELECT * FROM Alpha FULL JOIN (SELECT 'b' as foo) js2 USING (foo) ORDER BY foo; +SELECT * FROM Alpha FULL JOIN Beta USING (foo) ORDER BY foo; +SELECT * FROM Alpha FULL JOIN Beta ON Alpha.foo = Beta.foo ORDER BY foo; + +-- https://github.com/ClickHouse/ClickHouse/issues/20315#issuecomment-789579457 +SELECT materialize(js2.k) FROM (SELECT toLowCardinality(number) AS k FROM numbers(1)) AS js1 FULL OUTER JOIN (SELECT number + 7 AS k FROM numbers(1)) AS js2 USING (k) ORDER BY js2.k; + +SET join_use_nulls = 1; + +SELECT * FROM Alpha FULL JOIN (SELECT 'b' as foo) js2 USING (foo) ORDER BY foo; +SELECT * FROM Alpha FULL JOIN Beta USING (foo) ORDER BY foo; +SELECT * FROM Alpha FULL JOIN Beta ON Alpha.foo = Beta.foo ORDER BY foo; +SELECT materialize(js2.k) FROM (SELECT toLowCardinality(number) AS k FROM numbers(1)) AS js1 FULL OUTER JOIN (SELECT number + 7 AS k FROM numbers(1)) AS js2 USING (k) ORDER BY js2.k; + +DROP TABLE Alpha; +DROP TABLE Beta; diff --git a/parser/testdata/01049_window_view_window_functions/ast.json b/parser/testdata/01049_window_view_window_functions/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01049_window_view_window_functions/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01049_window_view_window_functions/metadata.json b/parser/testdata/01049_window_view_window_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01049_window_view_window_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01049_window_view_window_functions/query.sql b/parser/testdata/01049_window_view_window_functions/query.sql new file mode 100644 index 000000000..f98e9a80b --- /dev/null +++ b/parser/testdata/01049_window_view_window_functions/query.sql @@ -0,0 +1,51 @@ +-- { echo } +SELECT tumble(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL 1 SECOND, 'US/Samoa'); +SELECT tumble(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL 1 MINUTE, 'US/Samoa'); +SELECT tumble(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL '1' HOUR, 'US/Samoa'); +SELECT tumble(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL '1' DAY, 'US/Samoa'); +SELECT tumble(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL 1 WEEK, 'US/Samoa'); +SELECT tumble(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL '1' MONTH, 'US/Samoa'); +SELECT tumble(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL '1' QUARTER, 'US/Samoa'); +SELECT tumble(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL '1' YEAR, 'US/Samoa'); + +SELECT tumble(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL '1' DAY, 'US/Samoa'); +SELECT tumbleStart(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL '1' DAY, 'US/Samoa'); +SELECT toDateTime(tumbleStart(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL '1' DAY, 'US/Samoa'), 'US/Samoa'); +SELECT toDateTime(tumbleStart(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL '1' DAY, 'US/Samoa'), 'US/Samoa'); +SELECT tumbleStart(tumble(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL '1' DAY, 'US/Samoa')); +SELECT tumbleEnd(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL '1' DAY, 'US/Samoa'); +SELECT toDateTime(tumbleEnd(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL '1' DAY, 'US/Samoa'), 'US/Samoa'); +SELECT toDateTime(tumbleEnd(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL '1' DAY, 'US/Samoa'), 'US/Samoa'); +SELECT tumbleEnd(tumble(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL '1' DAY, 'US/Samoa')); + +SELECT hop(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL 1 SECOND, INTERVAL 3 SECOND, 'US/Samoa'); +SELECT hop(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL 1 MINUTE, INTERVAL 3 MINUTE, 'US/Samoa'); +SELECT hop(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL 1 HOUR, INTERVAL 3 HOUR, 'US/Samoa'); +SELECT hop(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL 1 DAY, INTERVAL 3 DAY, 'US/Samoa'); +SELECT hop(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL 1 WEEK, INTERVAL 3 WEEK, 'US/Samoa'); +SELECT hop(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL 1 MONTH, INTERVAL 3 MONTH, 'US/Samoa'); +SELECT hop(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL 1 QUARTER, INTERVAL 3 QUARTER, 'US/Samoa'); +SELECT hop(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL 1 YEAR, INTERVAL 3 YEAR, 'US/Samoa'); + +SELECT hop(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL '1' DAY, INTERVAL '3' DAY, 'US/Samoa'); +SELECT hopStart(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL '1' DAY, INTERVAL '3' DAY, 'US/Samoa'); +SELECT toDateTime(hopStart(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL '1' DAY, INTERVAL '3' DAY, 'US/Samoa'), 'US/Samoa'); +SELECT toDateTime(hopStart(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL '1' DAY, INTERVAL '3' DAY, 'US/Samoa'), 'US/Samoa'); +SELECT hopStart(hop(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL '1' DAY, INTERVAL '3' DAY, 'US/Samoa')); +SELECT hopEnd(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL '1' DAY, INTERVAL '3' DAY, 'US/Samoa'); +SELECT toDateTime(hopEnd(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL '1' DAY, INTERVAL '3' DAY, 'US/Samoa'), 'US/Samoa'); +SELECT toDateTime(hopEnd(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL '1' DAY, INTERVAL '3' DAY, 'US/Samoa'), 'US/Samoa'); +SELECT hopEnd(hop(toDateTime('2019-01-09 12:00:01', 'US/Samoa'), INTERVAL '1' DAY, INTERVAL '3' DAY, 'US/Samoa')); + +SELECT hopStart(tuple()); -- { serverError ILLEGAL_COLUMN } +SELECT hopEnd(tuple()); -- { serverError ILLEGAL_COLUMN } +SELECT tumbleStart(tuple()); -- { serverError ILLEGAL_COLUMN } +SELECT tumbleEnd(tuple()); -- { serverError ILLEGAL_COLUMN } + +SELECT tumbleStart(toUInt32(42)) SETTINGS session_timezone='UTC'; +SELECT tumbleStart((now(), now(), 'meow')); -- { serverError ILLEGAL_COLUMN } +-- Check that it's not LOGICAL_ERROR. +create window view v to nonexist (x Int8) inner engine AggregatingMergeTree order by x as select x from nonexist group by tumble(now()) settings allow_experimental_window_view = 1, allow_experimental_analyzer = 0; -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +select hopEnd((makeDateTime(null), toDateTime('2025-02-07 17:23:42'))) SETTINGS session_timezone='UTC'; +select hopStart((toDateTime('2025-02-07 17:23:42'), makeDateTime(null))) SETTINGS session_timezone='UTC'; +select hopEnd((toDateTime('2025-02-07 17:23:42'), makeDateTime(null))); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } diff --git a/parser/testdata/01049_zookeeper_synchronous_mutations_long/ast.json b/parser/testdata/01049_zookeeper_synchronous_mutations_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01049_zookeeper_synchronous_mutations_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01049_zookeeper_synchronous_mutations_long/metadata.json b/parser/testdata/01049_zookeeper_synchronous_mutations_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01049_zookeeper_synchronous_mutations_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01049_zookeeper_synchronous_mutations_long/query.sql b/parser/testdata/01049_zookeeper_synchronous_mutations_long/query.sql new file mode 100644 index 000000000..2458fe149 --- /dev/null +++ b/parser/testdata/01049_zookeeper_synchronous_mutations_long/query.sql @@ -0,0 +1,45 @@ +-- Tags: long, zookeeper + +DROP TABLE IF EXISTS table_for_synchronous_mutations1; +DROP TABLE IF EXISTS table_for_synchronous_mutations2; + +SELECT 'Replicated'; + +CREATE TABLE table_for_synchronous_mutations1(k UInt32, v1 UInt64) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_01049/table_for_synchronous_mutations', '1') ORDER BY k SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +CREATE TABLE table_for_synchronous_mutations2(k UInt32, v1 UInt64) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_01049/table_for_synchronous_mutations', '2') ORDER BY k SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +INSERT INTO table_for_synchronous_mutations1 select number, number from numbers(100000); + +SYSTEM SYNC REPLICA table_for_synchronous_mutations2; + +ALTER TABLE table_for_synchronous_mutations1 UPDATE v1 = v1 + 1 WHERE 1 SETTINGS mutations_sync = 2; + +SELECT is_done FROM system.mutations where database = currentDatabase() and table = 'table_for_synchronous_mutations1'; + +-- Another mutation, just to be sure, that previous finished +ALTER TABLE table_for_synchronous_mutations1 UPDATE v1 = v1 + 1 WHERE 1 SETTINGS mutations_sync = 2; + +SELECT is_done FROM system.mutations where database = currentDatabase() and table = 'table_for_synchronous_mutations1'; + +DROP TABLE IF EXISTS table_for_synchronous_mutations1; +DROP TABLE IF EXISTS table_for_synchronous_mutations2; + +SELECT 'Normal'; + +DROP TABLE IF EXISTS table_for_synchronous_mutations_no_replication; + +CREATE TABLE table_for_synchronous_mutations_no_replication(k UInt32, v1 UInt64) ENGINE MergeTree ORDER BY k SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +INSERT INTO table_for_synchronous_mutations_no_replication select number, number from numbers(100000); + +ALTER TABLE table_for_synchronous_mutations_no_replication UPDATE v1 = v1 + 1 WHERE 1 SETTINGS mutations_sync = 2; + +SELECT is_done FROM system.mutations where database = currentDatabase() and table = 'table_for_synchronous_mutations_no_replication'; + +-- Another mutation, just to be sure, that previous finished +ALTER TABLE table_for_synchronous_mutations_no_replication UPDATE v1 = v1 + 1 WHERE 1 SETTINGS mutations_sync = 2; + +SELECT is_done FROM system.mutations where database = currentDatabase() and table = 'table_for_synchronous_mutations_no_replication'; + +DROP TABLE IF EXISTS table_for_synchronous_mutations_no_replication; diff --git a/parser/testdata/01050_clickhouse_dict_source_with_subquery/ast.json b/parser/testdata/01050_clickhouse_dict_source_with_subquery/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01050_clickhouse_dict_source_with_subquery/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01050_clickhouse_dict_source_with_subquery/metadata.json b/parser/testdata/01050_clickhouse_dict_source_with_subquery/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01050_clickhouse_dict_source_with_subquery/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01050_clickhouse_dict_source_with_subquery/query.sql b/parser/testdata/01050_clickhouse_dict_source_with_subquery/query.sql new file mode 100644 index 000000000..a790f384f --- /dev/null +++ b/parser/testdata/01050_clickhouse_dict_source_with_subquery/query.sql @@ -0,0 +1,17 @@ + +drop dictionary if exists {CLICKHOUSE_DATABASE:Identifier}.test_dict_01051_d; +drop table if exists {CLICKHOUSE_DATABASE:Identifier}.test_01051_d; +drop table if exists {CLICKHOUSE_DATABASE:Identifier}.test_view_01051_d; + +create table {CLICKHOUSE_DATABASE:Identifier}.test_01051_d (key UInt64, value String) engine = MergeTree order by key; +create view {CLICKHOUSE_DATABASE:Identifier}.test_view_01051_d (key UInt64, value String) as select k2 + 1 as key, v2 || '_x' as value from (select key + 2 as k2, value || '_y' as v2 from test_01051_d); + +insert into {CLICKHOUSE_DATABASE:Identifier}.test_01051_d values (1, 'a'); + +create dictionary {CLICKHOUSE_DATABASE:Identifier}.test_dict_01051_d (key UInt64, value String) primary key key source(clickhouse(host 'localhost' port '9000' user 'default' password '' db currentDatabase() table 'test_view_01051_d')) layout(flat()) lifetime(100500); + +select dictGet({CLICKHOUSE_DATABASE:String} || '.test_dict_01051_d', 'value', toUInt64(4)); + +drop dictionary if exists {CLICKHOUSE_DATABASE:Identifier}.test_dict_01051_d; +drop table if exists {CLICKHOUSE_DATABASE:Identifier}.test_01051_d; +drop table if exists {CLICKHOUSE_DATABASE:Identifier}.test_view_01051_d; diff --git a/parser/testdata/01050_engine_join_crash/ast.json b/parser/testdata/01050_engine_join_crash/ast.json new file mode 100644 index 000000000..ae3bda2de --- /dev/null +++ b/parser/testdata/01050_engine_join_crash/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery testJoinTable (children 1)" + }, + { + "explain": " Identifier testJoinTable" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001084096, + "rows_read": 2, + "bytes_read": 78 + } +} diff --git a/parser/testdata/01050_engine_join_crash/metadata.json b/parser/testdata/01050_engine_join_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01050_engine_join_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01050_engine_join_crash/query.sql b/parser/testdata/01050_engine_join_crash/query.sql new file mode 100644 index 000000000..db35497df --- /dev/null +++ b/parser/testdata/01050_engine_join_crash/query.sql @@ -0,0 +1,46 @@ +DROP TABLE IF EXISTS testJoinTable; + +SET any_join_distinct_right_table_keys = 1; +SET enable_optimize_predicate_expression = 0; + +CREATE TABLE testJoinTable (number UInt64, data String) ENGINE = Join(ANY, INNER, number) SETTINGS any_join_distinct_right_table_keys = 1; + +INSERT INTO testJoinTable VALUES (1, '1'), (2, '2'), (3, '3'); + +SELECT * FROM (SELECT * FROM numbers(10)) js1 INNER JOIN testJoinTable USING number; -- { serverError INCOMPATIBLE_TYPE_OF_JOIN } +SELECT * FROM (SELECT * FROM numbers(10)) js1 INNER JOIN (SELECT * FROM testJoinTable) js2 USING number ORDER BY number; +SELECT * FROM (SELECT * FROM numbers(10)) js1 ANY INNER JOIN testJoinTable USING number ORDER BY number; +SELECT * FROM testJoinTable ORDER BY number; + +DROP TABLE testJoinTable; + +SELECT '-'; + +DROP TABLE IF EXISTS master; +DROP TABLE IF EXISTS transaction; + +CREATE TABLE transaction (id Int32, value Float64, master_id Int32) ENGINE = MergeTree() ORDER BY id; +CREATE TABLE master (id Int32, name String) ENGINE = Join (ANY, LEFT, id) SETTINGS any_join_distinct_right_table_keys = 1; + +INSERT INTO master VALUES (1, 'ONE'); +INSERT INTO transaction VALUES (1, 52.5, 1); + +SELECT tx.id, tx.value, m.name FROM transaction tx ANY LEFT JOIN master m ON m.id = tx.master_id ORDER BY tx.id; + +DROP TABLE master; +DROP TABLE transaction; + +SELECT '-'; + +DROP TABLE IF EXISTS some_join; +DROP TABLE IF EXISTS tbl; + +CREATE TABLE tbl (eventDate Date, id String) ENGINE = MergeTree() PARTITION BY tuple() ORDER BY eventDate; +CREATE TABLE some_join (id String, value String) ENGINE = Join(ANY, LEFT, id) SETTINGS any_join_distinct_right_table_keys = 1; + +SELECT * FROM tbl AS t ANY LEFT JOIN some_join USING (id) ORDER BY id; +SELECT * FROM tbl AS t ANY LEFT JOIN some_join AS d USING (id) ORDER BY id; +-- TODO SELECT t.*, d.* FROM tbl AS t ANY LEFT JOIN some_join AS d USING (id); + +DROP TABLE some_join; +DROP TABLE tbl; diff --git a/parser/testdata/01050_engine_join_view_crash/ast.json b/parser/testdata/01050_engine_join_view_crash/ast.json new file mode 100644 index 000000000..591bd84ee --- /dev/null +++ b/parser/testdata/01050_engine_join_view_crash/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery a (children 1)" + }, + { + "explain": " Identifier a" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001083945, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/01050_engine_join_view_crash/metadata.json b/parser/testdata/01050_engine_join_view_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01050_engine_join_view_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01050_engine_join_view_crash/query.sql b/parser/testdata/01050_engine_join_view_crash/query.sql new file mode 100644 index 000000000..7da8613cd --- /dev/null +++ b/parser/testdata/01050_engine_join_view_crash/query.sql @@ -0,0 +1,23 @@ +DROP TABLE IF EXISTS a; +DROP TABLE IF EXISTS b; +DROP TABLE IF EXISTS id1; +DROP TABLE IF EXISTS id2; + +CREATE TABLE a(`id1` UInt32, `id2` UInt32, `valA` UInt32) ENGINE = TinyLog; +CREATE TABLE id1(`id1` UInt32, `val1` UInt8) ENGINE = Join(ANY, LEFT, id1); +CREATE TABLE id2(`id2` UInt32, `val2` UInt8) ENGINE = Join(ANY, LEFT, id2); + +INSERT INTO a VALUES (1,1,1)(2,2,2)(3,3,3); +INSERT INTO id1 VALUES (1,1)(2,2)(3,3); +INSERT INTO id2 VALUES (1,1)(2,2)(3,3); + +SELECT * from (SELECT * FROM a ANY LEFT OUTER JOIN id1 USING id1) js1 ANY LEFT OUTER JOIN id2 USING id2; + +create view b as (SELECT * from (SELECT * FROM a ANY LEFT OUTER JOIN id1 USING id1) js1 ANY LEFT OUTER JOIN id2 USING id2); +SELECT '-'; +SELECT * FROM b; + +DROP TABLE a; +DROP TABLE b; +DROP TABLE id1; +DROP TABLE id2; diff --git a/parser/testdata/01050_group_array_sample/ast.json b/parser/testdata/01050_group_array_sample/ast.json new file mode 100644 index 000000000..b2f1415cd --- /dev/null +++ b/parser/testdata/01050_group_array_sample/ast.json @@ -0,0 +1,127 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier k" + }, + { + "explain": " Function groupArraySample (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier v" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Literal UInt64_123456" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function modulo (alias k) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_4" + }, + { + "explain": " Identifier number (alias v)" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1024" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier k" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier k" + } + ], + + "rows": 35, + + "statistics": + { + "elapsed": 0.001173794, + "rows_read": 35, + "bytes_read": 1428 + } +} diff --git a/parser/testdata/01050_group_array_sample/metadata.json b/parser/testdata/01050_group_array_sample/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01050_group_array_sample/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01050_group_array_sample/query.sql b/parser/testdata/01050_group_array_sample/query.sql new file mode 100644 index 000000000..58b9abf73 --- /dev/null +++ b/parser/testdata/01050_group_array_sample/query.sql @@ -0,0 +1,4 @@ +select k, groupArraySample(10, 123456)(v) from (select number % 4 as k, number as v from numbers(1024)) group by k order by k; + +-- different seed +select k, groupArraySample(10, 1)(v) from (select number % 4 as k, number as v from numbers(1024)) group by k order by k; diff --git a/parser/testdata/01051_aggregate_function_crash/ast.json b/parser/testdata/01051_aggregate_function_crash/ast.json new file mode 100644 index 000000000..abfa47de0 --- /dev/null +++ b/parser/testdata/01051_aggregate_function_crash/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000951119, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01051_aggregate_function_crash/metadata.json b/parser/testdata/01051_aggregate_function_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01051_aggregate_function_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01051_aggregate_function_crash/query.sql b/parser/testdata/01051_aggregate_function_crash/query.sql new file mode 100644 index 000000000..9d208db3c --- /dev/null +++ b/parser/testdata/01051_aggregate_function_crash/query.sql @@ -0,0 +1,6 @@ +SET allow_deprecated_error_prone_window_functions = 1; + +SELECT runningAccumulate(string_state) +FROM ( + SELECT argMaxState(repeat('a', 48), 1) AS string_state +) diff --git a/parser/testdata/01051_all_join_engine/ast.json b/parser/testdata/01051_all_join_engine/ast.json new file mode 100644 index 000000000..73eab34e3 --- /dev/null +++ b/parser/testdata/01051_all_join_engine/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000936644, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/01051_all_join_engine/metadata.json b/parser/testdata/01051_all_join_engine/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01051_all_join_engine/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01051_all_join_engine/query.sql b/parser/testdata/01051_all_join_engine/query.sql new file mode 100644 index 000000000..2a8da8b20 --- /dev/null +++ b/parser/testdata/01051_all_join_engine/query.sql @@ -0,0 +1,90 @@ +DROP TABLE IF EXISTS t1; + +DROP TABLE IF EXISTS left_join; +DROP TABLE IF EXISTS inner_join; +DROP TABLE IF EXISTS right_join; +DROP TABLE IF EXISTS full_join; + +CREATE TABLE t1 (x UInt32, str String) engine = Memory; + +CREATE TABLE left_join (x UInt32, s String) engine = Join(ALL, LEFT, x); +CREATE TABLE inner_join (x UInt32, s String) engine = Join(ALL, INNER, x); +CREATE TABLE right_join (x UInt32, s String) engine = Join(ALL, RIGHT, x); +CREATE TABLE full_join (x UInt32, s String) engine = Join(ALL, FULL, x); + +INSERT INTO t1 (x, str) VALUES (0, 'a1'), (1, 'a2'), (2, 'a3'), (3, 'a4'), (4, 'a5'); + +INSERT INTO left_join (x, s) VALUES (2, 'b1'), (2, 'b2'), (4, 'b3'), (4, 'b4'), (4, 'b5'), (5, 'b6'); +INSERT INTO inner_join (x, s) VALUES (2, 'b1'), (2, 'b2'), (4, 'b3'), (4, 'b4'), (4, 'b5'), (5, 'b6'); +INSERT INTO right_join (x, s) VALUES (2, 'b1'), (2, 'b2'), (4, 'b3'), (4, 'b4'), (4, 'b5'), (5, 'b6'); +INSERT INTO full_join (x, s) VALUES (2, 'b1'), (2, 'b2'), (4, 'b3'), (4, 'b4'), (4, 'b5'), (5, 'b6'); + +SET join_use_nulls = 0; + +SELECT 'left'; +SELECT * FROM t1 LEFT JOIN left_join j USING(x) ORDER BY x, str, s; + +SELECT 'inner'; +SELECT * FROM t1 INNER JOIN inner_join j USING(x) ORDER BY x, str, s; + +SELECT 'right'; +SELECT * FROM t1 RIGHT JOIN right_join j USING(x) ORDER BY x, str, s; + +SELECT 'full'; +SELECT * FROM t1 FULL JOIN full_join j USING(x) ORDER BY x, str, s; + +SET join_use_nulls = 1; + +SELECT * FROM t1 LEFT JOIN left_join j USING(x) ORDER BY x, str, s; -- { serverError INCOMPATIBLE_TYPE_OF_JOIN } +SELECT * FROM t1 FULL JOIN full_join j USING(x) ORDER BY x, str, s; -- { serverError INCOMPATIBLE_TYPE_OF_JOIN } + +SELECT 'inner (join_use_nulls mix)'; +SELECT * FROM t1 INNER JOIN inner_join j USING(x) ORDER BY x, str, s; + +SELECT 'right (join_use_nulls mix)'; +SELECT * FROM t1 RIGHT JOIN right_join j USING(x) ORDER BY x, str, s; + +DROP TABLE left_join; +DROP TABLE inner_join; +DROP TABLE right_join; +DROP TABLE full_join; + +CREATE TABLE left_join (x UInt32, s String) engine = Join(ALL, LEFT, x) SETTINGS join_use_nulls = 1; +CREATE TABLE inner_join (x UInt32, s String) engine = Join(ALL, INNER, x) SETTINGS join_use_nulls = 1; +CREATE TABLE right_join (x UInt32, s String) engine = Join(ALL, RIGHT, x) SETTINGS join_use_nulls = 1; +CREATE TABLE full_join (x UInt32, s String) engine = Join(ALL, FULL, x) SETTINGS join_use_nulls = 1; + +INSERT INTO left_join (x, s) VALUES (2, 'b1'), (2, 'b2'), (4, 'b3'), (4, 'b4'), (4, 'b5'), (5, 'b6'); +INSERT INTO inner_join (x, s) VALUES (2, 'b1'), (2, 'b2'), (4, 'b3'), (4, 'b4'), (4, 'b5'), (5, 'b6'); +INSERT INTO right_join (x, s) VALUES (2, 'b1'), (2, 'b2'), (4, 'b3'), (4, 'b4'), (4, 'b5'), (5, 'b6'); +INSERT INTO full_join (x, s) VALUES (2, 'b1'), (2, 'b2'), (4, 'b3'), (4, 'b4'), (4, 'b5'), (5, 'b6'); + +SELECT 'left (join_use_nulls)'; +SELECT * FROM t1 LEFT JOIN left_join j USING(x) ORDER BY x, str, s; + +SELECT 'inner (join_use_nulls)'; +SELECT * FROM t1 INNER JOIN inner_join j USING(x) ORDER BY x, str, s; + +SELECT 'right (join_use_nulls)'; +SELECT * FROM t1 RIGHT JOIN right_join j USING(x) ORDER BY x, str, s; + +SELECT 'full (join_use_nulls)'; +SELECT * FROM t1 FULL JOIN full_join j USING(x) ORDER BY x, str, s; + +SET join_use_nulls = 0; + +SELECT * FROM t1 LEFT JOIN left_join j USING(x) ORDER BY x, str, s; -- { serverError INCOMPATIBLE_TYPE_OF_JOIN } +SELECT * FROM t1 FULL JOIN full_join j USING(x) ORDER BY x, str, s; -- { serverError INCOMPATIBLE_TYPE_OF_JOIN } + +SELECT 'inner (join_use_nulls mix2)'; +SELECT * FROM t1 INNER JOIN inner_join j USING(x) ORDER BY x, str, s; + +SELECT 'right (join_use_nulls mix2)'; +SELECT * FROM t1 RIGHT JOIN right_join j USING(x) ORDER BY x, str, s; + +DROP TABLE t1; + +DROP TABLE left_join; +DROP TABLE inner_join; +DROP TABLE right_join; +DROP TABLE full_join; diff --git a/parser/testdata/01051_new_any_join_engine/ast.json b/parser/testdata/01051_new_any_join_engine/ast.json new file mode 100644 index 000000000..d563ad762 --- /dev/null +++ b/parser/testdata/01051_new_any_join_engine/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001054874, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/01051_new_any_join_engine/metadata.json b/parser/testdata/01051_new_any_join_engine/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01051_new_any_join_engine/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01051_new_any_join_engine/query.sql b/parser/testdata/01051_new_any_join_engine/query.sql new file mode 100644 index 000000000..5282e3a3a --- /dev/null +++ b/parser/testdata/01051_new_any_join_engine/query.sql @@ -0,0 +1,93 @@ +DROP TABLE IF EXISTS t1; + +DROP TABLE IF EXISTS any_left_join; +DROP TABLE IF EXISTS any_inner_join; +DROP TABLE IF EXISTS any_right_join; +DROP TABLE IF EXISTS any_full_join; + +DROP TABLE IF EXISTS semi_left_join; +DROP TABLE IF EXISTS semi_right_join; +DROP TABLE IF EXISTS anti_left_join; +DROP TABLE IF EXISTS anti_right_join; + +CREATE TABLE t1 (x UInt32, str String) engine = MergeTree ORDER BY tuple(); + +CREATE TABLE any_left_join (x UInt32, s String) engine = Join(ANY, LEFT, x); +CREATE TABLE any_inner_join (x UInt32, s String) engine = Join(ANY, INNER, x); +CREATE TABLE any_right_join (x UInt32, s String) engine = Join(ANY, RIGHT, x); + +CREATE TABLE semi_left_join (x UInt32, s String) engine = Join(SEMI, LEFT, x); +CREATE TABLE semi_right_join (x UInt32, s String) engine = Join(SEMI, RIGHT, x); + +CREATE TABLE anti_left_join (x UInt32, s String) engine = Join(ANTI, LEFT, x); +CREATE TABLE anti_right_join (x UInt32, s String) engine = Join(ANTI, RIGHT, x); + +INSERT INTO t1 (x, str) VALUES (0, 'a1'), (1, 'a2'), (2, 'a3'), (3, 'a4'), (4, 'a5'); + +INSERT INTO any_left_join (x, s) VALUES (2, 'b1'), (2, 'b2'), (4, 'b3'), (4, 'b4'), (4, 'b5'), (5, 'b6'); +INSERT INTO any_inner_join (x, s) VALUES (2, 'b1'), (2, 'b2'), (4, 'b3'), (4, 'b4'), (4, 'b5'), (5, 'b6'); +INSERT INTO any_right_join (x, s) VALUES (2, 'b1'), (2, 'b2'), (4, 'b3'), (4, 'b4'), (4, 'b5'), (5, 'b6'); + +INSERT INTO semi_left_join (x, s) VALUES (2, 'b1'), (2, 'b2'), (4, 'b3'), (4, 'b4'), (4, 'b5'), (5, 'b6'); +INSERT INTO semi_right_join (x, s) VALUES (2, 'b1'), (2, 'b2'), (4, 'b3'), (4, 'b4'), (4, 'b5'), (5, 'b6'); +INSERT INTO anti_left_join (x, s) VALUES (2, 'b1'), (2, 'b2'), (4, 'b3'), (4, 'b4'), (4, 'b5'), (5, 'b6'); +INSERT INTO anti_right_join (x, s) VALUES (2, 'b1'), (2, 'b2'), (4, 'b3'), (4, 'b4'), (4, 'b5'), (5, 'b6'); + +SET join_use_nulls = 0; +SET any_join_distinct_right_table_keys = 0; +SET parallel_replicas_local_plan=1; + +SELECT 'any left'; +SELECT * FROM t1 ANY LEFT JOIN any_left_join j USING(x) ORDER BY x, str, s; + +SELECT 'any inner'; +SELECT * FROM t1 ANY INNER JOIN any_inner_join j USING(x) ORDER BY x, str, s; + +SELECT 'any right'; +SELECT * FROM t1 ANY RIGHT JOIN any_right_join j USING(x) ORDER BY x, str, s; + +SELECT 'semi left'; +SELECT * FROM t1 SEMI LEFT JOIN semi_left_join j USING(x) ORDER BY x, str, s; + +SELECT 'semi right'; +SELECT * FROM t1 SEMI RIGHT JOIN semi_right_join j USING(x) ORDER BY x, str, s; + +SELECT 'anti left'; +SELECT * FROM t1 ANTI LEFT JOIN anti_left_join j USING(x) ORDER BY x, str, s; + +SELECT 'anti right'; +SELECT * FROM t1 ANTI RIGHT JOIN anti_right_join j USING(x) ORDER BY x, str, s; + +-- run queries once more time (issue #16991) + +SELECT 'any left'; +SELECT * FROM t1 ANY LEFT JOIN any_left_join j USING(x) ORDER BY x, str, s; + +SELECT 'any inner'; +SELECT * FROM t1 ANY INNER JOIN any_inner_join j USING(x) ORDER BY x, str, s; + +SELECT 'any right'; +SELECT * FROM t1 ANY RIGHT JOIN any_right_join j USING(x) ORDER BY x, str, s; + +SELECT 'semi left'; +SELECT * FROM t1 SEMI LEFT JOIN semi_left_join j USING(x) ORDER BY x, str, s; + +SELECT 'semi right'; +SELECT * FROM t1 SEMI RIGHT JOIN semi_right_join j USING(x) ORDER BY x, str, s; + +SELECT 'anti left'; +SELECT * FROM t1 ANTI LEFT JOIN anti_left_join j USING(x) ORDER BY x, str, s; + +SELECT 'anti right'; +SELECT * FROM t1 ANTI RIGHT JOIN anti_right_join j USING(x) ORDER BY x, str, s; + +DROP TABLE t1; + +DROP TABLE any_left_join; +DROP TABLE any_inner_join; +DROP TABLE any_right_join; + +DROP TABLE semi_left_join; +DROP TABLE semi_right_join; +DROP TABLE anti_left_join; +DROP TABLE anti_right_join; diff --git a/parser/testdata/01051_random_printable_ascii/ast.json b/parser/testdata/01051_random_printable_ascii/ast.json new file mode 100644 index 000000000..ae183a4e8 --- /dev/null +++ b/parser/testdata/01051_random_printable_ascii/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function randomPrintableASCII (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1000" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.000962277, + "rows_read": 9, + "bytes_read": 366 + } +} diff --git a/parser/testdata/01051_random_printable_ascii/metadata.json b/parser/testdata/01051_random_printable_ascii/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01051_random_printable_ascii/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01051_random_printable_ascii/query.sql b/parser/testdata/01051_random_printable_ascii/query.sql new file mode 100644 index 000000000..8c259671b --- /dev/null +++ b/parser/testdata/01051_random_printable_ascii/query.sql @@ -0,0 +1,2 @@ +SELECT toTypeName(randomPrintableASCII(1000)); +SELECT length(randomPrintableASCII(1000)); diff --git a/parser/testdata/01051_same_name_alias_with_joins/ast.json b/parser/testdata/01051_same_name_alias_with_joins/ast.json new file mode 100644 index 000000000..893346fca --- /dev/null +++ b/parser/testdata/01051_same_name_alias_with_joins/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery a (children 1)" + }, + { + "explain": " Identifier a" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001037978, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/01051_same_name_alias_with_joins/metadata.json b/parser/testdata/01051_same_name_alias_with_joins/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01051_same_name_alias_with_joins/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01051_same_name_alias_with_joins/query.sql b/parser/testdata/01051_same_name_alias_with_joins/query.sql new file mode 100644 index 000000000..f42eea468 --- /dev/null +++ b/parser/testdata/01051_same_name_alias_with_joins/query.sql @@ -0,0 +1,29 @@ +DROP TABLE IF EXISTS a; +DROP TABLE IF EXISTS b; +DROP TABLE IF EXISTS c; + +CREATE TABLE a (x UInt64) ENGINE = Memory; +CREATE TABLE b (x UInt64) ENGINE = Memory; +CREATE TABLE c (x UInt64) ENGINE = Memory; + +SET enable_optimize_predicate_expression = 0; + +SELECT a.x AS x FROM a +LEFT JOIN b ON a.x = b.x +LEFT JOIN c ON a.x = c.x; + +SELECT a.x AS x FROM a +LEFT JOIN b ON a.x = b.x +LEFT JOIN c ON b.x = c.x; + +SELECT b.x AS x FROM a +LEFT JOIN b ON a.x = b.x +LEFT JOIN c ON b.x = c.x; + +SELECT c.x AS x FROM a +LEFT JOIN b ON a.x = b.x +LEFT JOIN c ON b.x = c.x; + +DROP TABLE a; +DROP TABLE b; +DROP TABLE c; diff --git a/parser/testdata/01051_scalar_optimization/ast.json b/parser/testdata/01051_scalar_optimization/ast.json new file mode 100644 index 000000000..e7c883c05 --- /dev/null +++ b/parser/testdata/01051_scalar_optimization/ast.json @@ -0,0 +1,82 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_100" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 20, + + "statistics": + { + "elapsed": 0.00127747, + "rows_read": 20, + "bytes_read": 823 + } +} diff --git a/parser/testdata/01051_scalar_optimization/metadata.json b/parser/testdata/01051_scalar_optimization/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01051_scalar_optimization/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01051_scalar_optimization/query.sql b/parser/testdata/01051_scalar_optimization/query.sql new file mode 100644 index 000000000..3aa218d22 --- /dev/null +++ b/parser/testdata/01051_scalar_optimization/query.sql @@ -0,0 +1,6 @@ +SELECT (SELECT number FROM numbers(100) ORDER BY number LIMIT 1), + (SELECT number FROM numbers(100) ORDER BY number DESC LIMIT 1); + +SELECT 1 + WHERE 0=(SELECT number FROM numbers(2) ORDER BY number LIMIT 1) + AND 1=(SELECT number FROM numbers(2) ORDER BY number DESC LIMIT 1); diff --git a/parser/testdata/01052_array_reduce_exception/ast.json b/parser/testdata/01052_array_reduce_exception/ast.json new file mode 100644 index 000000000..1743308d2 --- /dev/null +++ b/parser/testdata/01052_array_reduce_exception/ast.json @@ -0,0 +1,76 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayReduce (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'aggThrow(0.0001)'" + }, + { + "explain": " Function range (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Identifier Null" + } + ], + + "rows": 18, + + "statistics": + { + "elapsed": 0.001177616, + "rows_read": 18, + "bytes_read": 718 + } +} diff --git a/parser/testdata/01052_array_reduce_exception/metadata.json b/parser/testdata/01052_array_reduce_exception/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01052_array_reduce_exception/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01052_array_reduce_exception/query.sql b/parser/testdata/01052_array_reduce_exception/query.sql new file mode 100644 index 000000000..55dfe8c15 --- /dev/null +++ b/parser/testdata/01052_array_reduce_exception/query.sql @@ -0,0 +1 @@ +SELECT arrayReduce('aggThrow(0.0001)', range(number % 10)) FROM system.numbers FORMAT Null; -- { serverError AGGREGATE_FUNCTION_THROW } diff --git a/parser/testdata/01053_drop_database_mat_view/ast.json b/parser/testdata/01053_drop_database_mat_view/ast.json new file mode 100644 index 000000000..44af576b1 --- /dev/null +++ b/parser/testdata/01053_drop_database_mat_view/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001171057, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01053_drop_database_mat_view/metadata.json b/parser/testdata/01053_drop_database_mat_view/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01053_drop_database_mat_view/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01053_drop_database_mat_view/query.sql b/parser/testdata/01053_drop_database_mat_view/query.sql new file mode 100644 index 000000000..6ab31fce6 --- /dev/null +++ b/parser/testdata/01053_drop_database_mat_view/query.sql @@ -0,0 +1,15 @@ +SET send_logs_level = 'fatal'; + +DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}; +set allow_deprecated_database_ordinary=1; +-- Creation of a database with Ordinary engine emits a warning. +CREATE DATABASE {CLICKHOUSE_DATABASE:Identifier} ENGINE=Ordinary; -- Different inner table name with Atomic + +set allow_deprecated_syntax_for_merge_tree=1; +create table {CLICKHOUSE_DATABASE:Identifier}.my_table ENGINE = MergeTree(day, (day), 8192) as select today() as day, 'mystring' as str; +show tables from {CLICKHOUSE_DATABASE:Identifier}; +create materialized view {CLICKHOUSE_DATABASE:Identifier}.my_materialized_view ENGINE = MergeTree(day, (day), 8192) as select * from {CLICKHOUSE_DATABASE:Identifier}.my_table; +show tables from {CLICKHOUSE_DATABASE:Identifier}; +select * from {CLICKHOUSE_DATABASE:Identifier}.my_materialized_view; + +DROP DATABASE {CLICKHOUSE_DATABASE:Identifier}; diff --git a/parser/testdata/01053_if_chain_check/ast.json b/parser/testdata/01053_if_chain_check/ast.json new file mode 100644 index 000000000..4aea12a0a --- /dev/null +++ b/parser/testdata/01053_if_chain_check/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001167079, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01053_if_chain_check/metadata.json b/parser/testdata/01053_if_chain_check/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01053_if_chain_check/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01053_if_chain_check/query.sql b/parser/testdata/01053_if_chain_check/query.sql new file mode 100644 index 000000000..ee5ccf588 --- /dev/null +++ b/parser/testdata/01053_if_chain_check/query.sql @@ -0,0 +1,3 @@ +SET max_parser_depth = 4000; +SELECT x FROM (SELECT number % 16 = 0 ? nan : (number % 24 = 0 ? NULL : (number % 37 = 0 ? nan : (number % 34 = 0 ? nan : (number % 3 = 0 ? NULL : (number % 68 = 0 ? 42 : (number % 28 = 0 ? nan : (number % 46 = 0 ? nan : (number % 13 = 0 ? nan : (number % 27 = 0 ? NULL : (number % 39 = 0 ? NULL : (number % 27 = 0 ? NULL : (number % 30 = 0 ? NULL : (number % 72 = 0 ? NULL : (number % 36 = 0 ? NULL : (number % 51 = 0 ? NULL : (number % 58 = 0 ? nan : (number % 26 = 0 ? 42 : (number % 13 = 0 ? nan : (number % 12 = 0 ? NULL : (number % 22 = 0 ? nan : (number % 36 = 0 ? NULL : (number % 63 = 0 ? NULL : (number % 27 = 0 ? NULL : (number % 18 = 0 ? NULL : (number % 69 = 0 ? NULL : (number % 76 = 0 ? nan : (number % 42 = 0 ? NULL : (number % 9 = 0 ? NULL : (toFloat64(number)))))))))))))))))))))))))))))) AS x FROM system.numbers LIMIT 1001) ORDER BY x ASC NULLS FIRST; +SELECT x FROM (SELECT number % 22 = 0 ? nan : (number % 56 = 0 ? 42 : (number % 45 = 0 ? NULL : (number % 47 = 0 ? 42 : (number % 39 = 0 ? NULL : (number % 1 = 0 ? nan : (number % 43 = 0 ? nan : (number % 40 = 0 ? nan : (number % 42 = 0 ? NULL : (number % 26 = 0 ? 42 : (number % 41 = 0 ? 42 : (number % 6 = 0 ? NULL : (number % 39 = 0 ? NULL : (number % 34 = 0 ? nan : (number % 74 = 0 ? 42 : (number % 40 = 0 ? nan : (number % 37 = 0 ? nan : (number % 51 = 0 ? NULL : (number % 46 = 0 ? nan : (toFloat64(number)))))))))))))))))))) AS x FROM system.numbers LIMIT 1001) ORDER BY x ASC NULLS FIRST; diff --git a/parser/testdata/01054_cache_dictionary_overflow_cell/ast.json b/parser/testdata/01054_cache_dictionary_overflow_cell/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01054_cache_dictionary_overflow_cell/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01054_cache_dictionary_overflow_cell/metadata.json b/parser/testdata/01054_cache_dictionary_overflow_cell/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01054_cache_dictionary_overflow_cell/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01054_cache_dictionary_overflow_cell/query.sql b/parser/testdata/01054_cache_dictionary_overflow_cell/query.sql new file mode 100644 index 000000000..88c590141 --- /dev/null +++ b/parser/testdata/01054_cache_dictionary_overflow_cell/query.sql @@ -0,0 +1,58 @@ +-- Tags: no-parallel + +create database if not exists test_01054_overflow; +drop table if exists test_01054_overflow.ints; + +create table test_01054_overflow.ints (key UInt64, i8 Int8, i16 Int16, i32 Int32, i64 Int64, u8 UInt8, u16 UInt16, u32 UInt32, u64 UInt64) Engine = Memory; + +insert into test_01054_overflow.ints values (1, 1, 1, 1, 1, 1, 1, 1, 1); +insert into test_01054_overflow.ints values (2, 2, 2, 2, 2, 2, 2, 2, 2); +insert into test_01054_overflow.ints values (3, 3, 3, 3, 3, 3, 3, 3, 3); +insert into test_01054_overflow.ints values (4, 4, 4, 4, 4, 4, 4, 4, 4); +insert into test_01054_overflow.ints values (5, 5, 5, 5, 5, 5, 5, 5, 5); +insert into test_01054_overflow.ints values (6, 6, 6, 6, 6, 6, 6, 6, 6); +insert into test_01054_overflow.ints values (7, 7, 7, 7, 7, 7, 7, 7, 7); +insert into test_01054_overflow.ints values (8, 8, 8, 8, 8, 8, 8, 8, 8); +insert into test_01054_overflow.ints values (9, 9, 9, 9, 9, 9, 9, 9, 9); +insert into test_01054_overflow.ints values (10, 10, 10, 10, 10, 10, 10, 10, 10); +insert into test_01054_overflow.ints values (11, 11, 11, 11, 11, 11, 11, 11, 11); +insert into test_01054_overflow.ints values (12, 12, 12, 12, 12, 12, 12, 12, 12); +insert into test_01054_overflow.ints values (13, 13, 13, 13, 13, 13, 13, 13, 13); +insert into test_01054_overflow.ints values (14, 14, 14, 14, 14, 14, 14, 14, 14); +insert into test_01054_overflow.ints values (15, 15, 15, 15, 15, 15, 15, 15, 15); +insert into test_01054_overflow.ints values (16, 16, 16, 16, 16, 16, 16, 16, 16); +insert into test_01054_overflow.ints values (17, 17, 17, 17, 17, 17, 17, 17, 17); +insert into test_01054_overflow.ints values (18, 18, 18, 18, 18, 18, 18, 18, 18); +insert into test_01054_overflow.ints values (19, 19, 19, 19, 19, 19, 19, 19, 19); +insert into test_01054_overflow.ints values (20, 20, 20, 20, 20, 20, 20, 20, 20); + +select +dictGet('one_cell_cache_ints_overflow', 'i8', toUInt64(1)), +dictGet('one_cell_cache_ints_overflow', 'i8', toUInt64(2)), +dictGet('one_cell_cache_ints_overflow', 'i8', toUInt64(3)), +dictGet('one_cell_cache_ints_overflow', 'i8', toUInt64(4)), +dictGet('one_cell_cache_ints_overflow', 'i8', toUInt64(5)), +dictGet('one_cell_cache_ints_overflow', 'i8', toUInt64(6)), +dictGet('one_cell_cache_ints_overflow', 'i8', toUInt64(7)), +dictGet('one_cell_cache_ints_overflow', 'i8', toUInt64(8)), +dictGet('one_cell_cache_ints_overflow', 'i8', toUInt64(9)), +dictGet('one_cell_cache_ints_overflow', 'i8', toUInt64(10)), +dictGet('one_cell_cache_ints_overflow', 'i8', toUInt64(11)), +dictGet('one_cell_cache_ints_overflow', 'i8', toUInt64(12)), +dictGet('one_cell_cache_ints_overflow', 'i8', toUInt64(13)), +dictGet('one_cell_cache_ints_overflow', 'i8', toUInt64(14)), +dictGet('one_cell_cache_ints_overflow', 'i8', toUInt64(15)), +dictGet('one_cell_cache_ints_overflow', 'i8', toUInt64(16)), +dictGet('one_cell_cache_ints_overflow', 'i8', toUInt64(17)), +dictGet('one_cell_cache_ints_overflow', 'i8', toUInt64(18)), +dictGet('one_cell_cache_ints_overflow', 'i8', toUInt64(19)), +dictGet('one_cell_cache_ints_overflow', 'i8', toUInt64(20)); + +SELECT arrayMap(x -> dictGet('one_cell_cache_ints_overflow', 'i8', toUInt64(x)), array) +FROM +( + SELECT [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20] AS array +); + +DROP TABLE if exists test_01054.ints; +DROP DATABASE test_01054_overflow; diff --git a/parser/testdata/01055_compact_parts/ast.json b/parser/testdata/01055_compact_parts/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01055_compact_parts/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01055_compact_parts/metadata.json b/parser/testdata/01055_compact_parts/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01055_compact_parts/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01055_compact_parts/query.sql b/parser/testdata/01055_compact_parts/query.sql new file mode 100644 index 000000000..e99af7643 --- /dev/null +++ b/parser/testdata/01055_compact_parts/query.sql @@ -0,0 +1,37 @@ +-- Testing basic functionality with compact parts +set mutations_sync = 2; +drop table if exists mt_compact; + +create table mt_compact(a UInt64, b UInt64 DEFAULT a * a, s String, n Nested(x UInt32, y String), lc LowCardinality(String)) +engine = MergeTree +order by a partition by a % 10 +settings index_granularity = 8, +min_bytes_for_wide_part = 0, +min_rows_for_wide_part = 10; + +insert into mt_compact (a, s, n.y, lc) select number, toString((number * 2132214234 + 5434543) % 2133443), ['a', 'b', 'c'], number % 2 ? 'bar' : 'baz' from numbers(90); + +select * from mt_compact order by a limit 10; +select '====================='; + +select distinct part_type from system.parts where database = currentDatabase() and table = 'mt_compact' and active; + +insert into mt_compact (a, s, n.x, lc) select number % 3, toString((number * 75434535 + 645645) % 2133443), [1, 2], toString(number) from numbers(5); + +optimize table mt_compact final; + +select part_type, count() from system.parts where database = currentDatabase() and table = 'mt_compact' and active group by part_type order by part_type; +select * from mt_compact order by a, s limit 10; +select '====================='; + +alter table mt_compact drop column n.y; +alter table mt_compact add column n.y Array(String) DEFAULT ['qwqw'] after n.x; +select * from mt_compact order by a, s limit 10; +select '====================='; + +alter table mt_compact update b = 42 where 1; + +select * from mt_compact where a > 1 order by a, s limit 10; +select '====================='; + +drop table if exists mt_compact; diff --git a/parser/testdata/01055_compact_parts_1/ast.json b/parser/testdata/01055_compact_parts_1/ast.json new file mode 100644 index 000000000..5ee8ab9bf --- /dev/null +++ b/parser/testdata/01055_compact_parts_1/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery mt_compact (children 3)" + }, + { + "explain": " Identifier mt_compact" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration a (children 1)" + }, + { + "explain": " DataType Int" + }, + { + "explain": " ColumnDeclaration s (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " Storage definition (children 3)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Identifier a" + }, + { + "explain": " Identifier a" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001062659, + "rows_read": 12, + "bytes_read": 399 + } +} diff --git a/parser/testdata/01055_compact_parts_1/metadata.json b/parser/testdata/01055_compact_parts_1/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01055_compact_parts_1/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01055_compact_parts_1/query.sql b/parser/testdata/01055_compact_parts_1/query.sql new file mode 100644 index 000000000..72048c59a --- /dev/null +++ b/parser/testdata/01055_compact_parts_1/query.sql @@ -0,0 +1,22 @@ +create table mt_compact (a Int, s String) engine = MergeTree order by a partition by a +settings index_granularity_bytes = 0; +alter table mt_compact modify setting min_rows_for_wide_part = 1000; -- { serverError NOT_IMPLEMENTED } +show create table mt_compact; + +create table mt_compact_2 (a Int, s String) engine = MergeTree order by a partition by a +settings min_rows_for_wide_part = 1000; +insert into mt_compact_2 values (1, 'a'); +alter table mt_compact attach partition 1 from mt_compact_2; -- { serverError BAD_ARGUMENTS } + +drop table mt_compact; +drop table mt_compact_2; + +set send_logs_level = 'error'; +create table mt_compact (a Int, s String) engine = MergeTree order by a partition by a +settings index_granularity_bytes = 0, min_rows_for_wide_part = 1000; + +-- Check that alter of other settings works +alter table mt_compact modify setting parts_to_delay_insert = 300; +alter table mt_compact modify setting min_rows_for_wide_part = 0; + +show create table mt_compact; diff --git a/parser/testdata/01055_prewhere_bugs/ast.json b/parser/testdata/01055_prewhere_bugs/ast.json new file mode 100644 index 000000000..d0ffcdcca --- /dev/null +++ b/parser/testdata/01055_prewhere_bugs/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_prewhere_default_column (children 1)" + }, + { + "explain": " Identifier test_prewhere_default_column" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001337136, + "rows_read": 2, + "bytes_read": 108 + } +} diff --git a/parser/testdata/01055_prewhere_bugs/metadata.json b/parser/testdata/01055_prewhere_bugs/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01055_prewhere_bugs/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01055_prewhere_bugs/query.sql b/parser/testdata/01055_prewhere_bugs/query.sql new file mode 100644 index 000000000..3929356ce --- /dev/null +++ b/parser/testdata/01055_prewhere_bugs/query.sql @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS test_prewhere_default_column; +DROP TABLE IF EXISTS test_prewhere_column_type; + +CREATE TABLE test_prewhere_default_column (APIKey UInt8, SessionType UInt8) ENGINE = MergeTree() PARTITION BY APIKey ORDER BY tuple(); +INSERT INTO test_prewhere_default_column VALUES( 42, 42 ); +ALTER TABLE test_prewhere_default_column ADD COLUMN OperatingSystem UInt64 DEFAULT SessionType+1; + +SELECT OperatingSystem FROM test_prewhere_default_column PREWHERE SessionType = 42; + + +CREATE TABLE test_prewhere_column_type (`a` LowCardinality(String), `x` Nullable(Int32)) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO test_prewhere_column_type VALUES ('', 2); + +SELECT a, y FROM test_prewhere_column_type prewhere (x = 2) AS y; +SELECT a, toTypeName(x = 2), toTypeName(x) FROM test_prewhere_column_type where (x = 2) AS y; + +DROP TABLE test_prewhere_default_column; +DROP TABLE test_prewhere_column_type; diff --git a/parser/testdata/01056_create_table_as/ast.json b/parser/testdata/01056_create_table_as/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01056_create_table_as/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01056_create_table_as/metadata.json b/parser/testdata/01056_create_table_as/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01056_create_table_as/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01056_create_table_as/query.sql b/parser/testdata/01056_create_table_as/query.sql new file mode 100644 index 000000000..dbcab489f --- /dev/null +++ b/parser/testdata/01056_create_table_as/query.sql @@ -0,0 +1,50 @@ + +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS t3; +DROP TABLE IF EXISTS v; +DROP TABLE IF EXISTS lv; + +CREATE TABLE t1 (key Int) Engine=Memory; +CREATE TABLE t2 AS t1; +DROP TABLE t2; +CREATE TABLE t2 Engine=Memory AS t1; +DROP TABLE t2; +CREATE TABLE t2 AS t1 Engine=Memory; +DROP TABLE t2; +CREATE TABLE t3 AS numbers(10); +DROP TABLE t3; + +-- view +CREATE VIEW v AS SELECT * FROM t1; +CREATE TABLE t3 AS v; -- { serverError INCORRECT_QUERY } +DROP TABLE v; + +-- dictionary +DROP DICTIONARY IF EXISTS dict; +DROP DATABASE if exists {CLICKHOUSE_DATABASE_1:Identifier}; +CREATE DATABASE {CLICKHOUSE_DATABASE_1:Identifier}; +CREATE TABLE {CLICKHOUSE_DATABASE_1:Identifier}.dict_data (key Int, value UInt16) Engine=Memory(); +CREATE DICTIONARY dict +( + `key` UInt64, + `value` UInt16 +) +PRIMARY KEY key +SOURCE(CLICKHOUSE( + HOST '127.0.0.1' PORT tcpPort() + TABLE 'dict_data' DB concat(currentDatabase(), '_1') USER 'default' PASSWORD '')) +LIFETIME(MIN 0 MAX 0) +LAYOUT(SPARSE_HASHED()); +CREATE TABLE t3 AS dict; -- { serverError INCORRECT_QUERY } + +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t3; +DROP DICTIONARY dict; +DROP TABLE {CLICKHOUSE_DATABASE_1:Identifier}.dict_data; + +DROP DATABASE {CLICKHOUSE_DATABASE_1:Identifier}; + +CREATE TABLE t1 (x String) ENGINE = Memory AS SELECT 1; +SELECT x, toTypeName(x) FROM t1; +DROP TABLE t1; diff --git a/parser/testdata/01056_create_table_as_with_sorting_clauses/ast.json b/parser/testdata/01056_create_table_as_with_sorting_clauses/ast.json new file mode 100644 index 000000000..835396393 --- /dev/null +++ b/parser/testdata/01056_create_table_as_with_sorting_clauses/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery x (children 1)" + }, + { + "explain": " Identifier x" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001104735, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/01056_create_table_as_with_sorting_clauses/metadata.json b/parser/testdata/01056_create_table_as_with_sorting_clauses/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01056_create_table_as_with_sorting_clauses/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01056_create_table_as_with_sorting_clauses/query.sql b/parser/testdata/01056_create_table_as_with_sorting_clauses/query.sql new file mode 100644 index 000000000..9ecfdbabd --- /dev/null +++ b/parser/testdata/01056_create_table_as_with_sorting_clauses/query.sql @@ -0,0 +1,58 @@ +DROP TABLE IF EXISTS x; +DROP TABLE IF EXISTS x_as; + +SELECT '-------------- Test copy sorting clauses from source table --------------'; +CREATE TABLE x (`CounterID` UInt32, `EventDate` Date, `UserID` UInt64) ENGINE = MergeTree PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID); +CREATE TABLE x_as AS x ENGINE = MergeTree SETTINGS enable_block_number_column = 1, enable_block_offset_column = 1; + +SHOW CREATE TABLE x FORMAT TSVRaw; +SELECT '-------------------------------------------------------------------------'; +SHOW CREATE TABLE x_as FORMAT TSVRaw; + +DROP TABLE x; +DROP TABLE x_as; + +SELECT '-------------- Test copy sorting clauses from destination table (source table without the same type clauses) --------------'; +CREATE TABLE x (`CounterID` UInt32, `EventDate` Date, `UserID` UInt64) ENGINE = MergeTree PRIMARY KEY (CounterID, EventDate, intHash32(UserID)); +CREATE TABLE x_as AS x ENGINE = MergeTree PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS enable_block_number_column = 1, enable_block_offset_column = 1; + +SHOW CREATE TABLE x FORMAT TSVRaw; +SELECT '-------------------------------------------------------------------------'; +SHOW CREATE TABLE x_as FORMAT TSVRaw; + +DROP TABLE x; +DROP TABLE x_as; + +SELECT '-------------- Test copy sorting clauses from destination table (source table with the same type clauses) --------------'; +CREATE TABLE x (`CounterID` UInt32, `EventDate` Date, `UserID` UInt64) ENGINE = MergeTree ORDER BY (CounterID); +CREATE TABLE x_as AS x ENGINE = MergeTree PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS enable_block_number_column = 1, enable_block_offset_column = 1; + +SHOW CREATE TABLE x FORMAT TSVRaw; +SELECT '-------------------------------------------------------------------------'; +SHOW CREATE TABLE x_as FORMAT TSVRaw; + +DROP TABLE x; +DROP TABLE x_as; + +SELECT '-------------- Test compatibility with allow_deprecated_syntax_for_merge_tree (source table is old syntax) --------------'; +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE x (`CounterID` UInt32, `EventDate` Date, `UserID` UInt64) ENGINE = MergeTree(EventDate, intHash32(UserID), (CounterID, EventDate, intHash32(UserID)), 8192); +CREATE TABLE x_as AS x; + +SHOW CREATE TABLE x FORMAT TSVRaw; +SELECT '-------------------------------------------------------------------------'; +SHOW CREATE TABLE x_as FORMAT TSVRaw; + +DROP TABLE x; +DROP TABLE x_as; + +SELECT '-------------- Test compatibility with allow_deprecated_syntax_for_merge_tree (source table is new syntax) --------------'; +CREATE TABLE x (`CounterID` UInt32, `EventDate` Date, `UserID` UInt64) ENGINE = MergeTree PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID); +CREATE TABLE x_as AS x ENGINE = MergeTree SETTINGS enable_block_number_column = 1, enable_block_offset_column = 1; + +SHOW CREATE TABLE x FORMAT TSVRaw; +SELECT '-------------------------------------------------------------------------'; +SHOW CREATE TABLE x_as FORMAT TSVRaw; + +DROP TABLE x; +DROP TABLE x_as; \ No newline at end of file diff --git a/parser/testdata/01056_negative_with_bloom_filter/ast.json b/parser/testdata/01056_negative_with_bloom_filter/ast.json new file mode 100644 index 000000000..cfbf20b3f --- /dev/null +++ b/parser/testdata/01056_negative_with_bloom_filter/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001205337, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/01056_negative_with_bloom_filter/metadata.json b/parser/testdata/01056_negative_with_bloom_filter/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01056_negative_with_bloom_filter/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01056_negative_with_bloom_filter/query.sql b/parser/testdata/01056_negative_with_bloom_filter/query.sql new file mode 100644 index 000000000..4816d865c --- /dev/null +++ b/parser/testdata/01056_negative_with_bloom_filter/query.sql @@ -0,0 +1,12 @@ +DROP TABLE IF EXISTS test; + +CREATE TABLE test (`int8` Int8, `int16` Int16, `int32` Int32, `int64` Int64, INDEX idx (`int8`, `int16`, `int32`, `int64`) TYPE bloom_filter(0.01) GRANULARITY 8192 ) ENGINE = MergeTree() ORDER BY `int8`; + +INSERT INTO test VALUES (-1, -1, -1, -1); + +SELECT * FROM test WHERE `int8` = -1; +SELECT * FROM test WHERE `int16` = -1; +SELECT * FROM test WHERE `int32` = -1; +SELECT * FROM test WHERE `int64` = -1; + +DROP TABLE IF EXISTS test; diff --git a/parser/testdata/01056_predicate_optimizer_bugs/ast.json b/parser/testdata/01056_predicate_optimizer_bugs/ast.json new file mode 100644 index 000000000..a0c1a88c6 --- /dev/null +++ b/parser/testdata/01056_predicate_optimizer_bugs/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001155915, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01056_predicate_optimizer_bugs/metadata.json b/parser/testdata/01056_predicate_optimizer_bugs/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01056_predicate_optimizer_bugs/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01056_predicate_optimizer_bugs/query.sql b/parser/testdata/01056_predicate_optimizer_bugs/query.sql new file mode 100644 index 000000000..07f94c03e --- /dev/null +++ b/parser/testdata/01056_predicate_optimizer_bugs/query.sql @@ -0,0 +1,87 @@ +SET enable_optimize_predicate_expression = 1; +SET joined_subquery_requires_alias = 0; +SET convert_query_to_cnf = 0; +SET allow_deprecated_error_prone_window_functions = 1; + +-- https://github.com/ClickHouse/ClickHouse/issues/3885 +-- https://github.com/ClickHouse/ClickHouse/issues/5485 +EXPLAIN SYNTAX SELECT k, v, d, i FROM (SELECT t.1 AS k, t.2 AS v, runningDifference(v) AS d, runningDifference(cityHash64(t.1)) AS i FROM ( SELECT arrayJoin([('a', 1), ('a', 2), ('a', 3), ('b', 11), ('b', 13), ('b', 15)]) AS t)) WHERE i = 0; +SELECT k, v, d, i FROM (SELECT t.1 AS k, t.2 AS v, runningDifference(v) AS d, runningDifference(cityHash64(t.1)) AS i FROM ( SELECT arrayJoin([('a', 1), ('a', 2), ('a', 3), ('b', 11), ('b', 13), ('b', 15)]) AS t)) WHERE i = 0; + +-- https://github.com/ClickHouse/ClickHouse/issues/5682 +EXPLAIN SYNTAX SELECT co,co2,co3,num FROM ( SELECT co,co2,co3,count() AS num FROM (SELECT dummy+1 AS co,dummy+2 AS co2 ,dummy+3 AS co3) GROUP BY cube (co,co2,co3) ) WHERE co!=0 AND co2 !=2; +SELECT co,co2,co3,num FROM ( SELECT co,co2,co3,count() AS num FROM (SELECT dummy+1 AS co,dummy+2 AS co2 ,dummy+3 AS co3) GROUP BY cube (co,co2,co3) ) WHERE co!=0 AND co2 !=2; + +-- https://github.com/ClickHouse/ClickHouse/issues/6734 +EXPLAIN SYNTAX SELECT name FROM ( SELECT name FROM system.settings ) ANY INNER JOIN ( SELECT name FROM system.settings ) USING (name) WHERE name = 'enable_optimize_predicate_expression'; +SELECT name FROM ( SELECT name FROM system.settings ) ANY INNER JOIN ( SELECT name FROM system.settings ) USING (name) WHERE name = 'enable_optimize_predicate_expression'; + +-- https://github.com/ClickHouse/ClickHouse/issues/6767 +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS t3; +DROP TABLE IF EXISTS view1; + +CREATE TABLE t1 (id UInt32, value1 String ) ENGINE ReplacingMergeTree() ORDER BY id; +CREATE TABLE t2 (id UInt32, value2 String ) ENGINE ReplacingMergeTree() ORDER BY id; +CREATE TABLE t3 (id UInt32, value3 String ) ENGINE ReplacingMergeTree() ORDER BY id; + +INSERT INTO t1 (id, value1) VALUES (1, 'val11'); +INSERT INTO t2 (id, value2) VALUES (1, 'val21'); +INSERT INTO t3 (id, value3) VALUES (1, 'val31'); + +CREATE VIEW IF NOT EXISTS view1 AS SELECT t1.id AS id, t1.value1 AS value1, t2.value2 AS value2, t3.value3 AS value3 FROM t1 LEFT JOIN t2 ON t1.id = t2.id LEFT JOIN t3 ON t1.id = t3.id WHERE t1.id > 0; +SELECT * FROM view1 WHERE id = 1; + +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS t3; +DROP TABLE IF EXISTS view1; + +-- https://github.com/ClickHouse/ClickHouse/issues/7136 +EXPLAIN SYNTAX SELECT ccc FROM ( SELECT 1 AS ccc UNION ALL SELECT * FROM ( SELECT 2 AS ccc ) ANY INNER JOIN ( SELECT 2 AS ccc ) USING (ccc) ) WHERE ccc > 1; +SELECT ccc FROM ( SELECT 1 AS ccc UNION ALL SELECT * FROM ( SELECT 2 AS ccc ) ANY INNER JOIN ( SELECT 2 AS ccc ) USING (ccc) ) WHERE ccc > 1; + +-- https://github.com/ClickHouse/ClickHouse/issues/5674 +-- https://github.com/ClickHouse/ClickHouse/issues/4731 +-- https://github.com/ClickHouse/ClickHouse/issues/4904 +DROP TABLE IF EXISTS A; +DROP TABLE IF EXISTS B; + +CREATE TABLE A (ts DateTime, id String, id_b String) ENGINE = MergeTree PARTITION BY toStartOfHour(ts) ORDER BY (ts,id); +CREATE TABLE B (ts DateTime, id String, id_c String) ENGINE = MergeTree PARTITION BY toStartOfHour(ts) ORDER BY (ts,id); + +EXPLAIN SYNTAX SELECT ts, id, id_b, b.ts, b.id, id_c FROM (SELECT ts, id, id_b FROM A) AS a ALL LEFT JOIN B AS b ON b.id = a.id_b WHERE a.ts <= toDateTime('1970-01-01 03:00:00'); +EXPLAIN SYNTAX SELECT ts AS `--a.ts`, id AS `--a.id`, id_b AS `--a.id_b`, b.ts AS `--b.ts`, b.id AS `--b.id`, id_c AS `--b.id_c` FROM (SELECT ts, id, id_b FROM A) AS a ALL LEFT JOIN B AS b ON `--b.id` = `--a.id_b` WHERE `--a.ts` <= toDateTime('1970-01-01 03:00:00'); + +DROP TABLE IF EXISTS A; +DROP TABLE IF EXISTS B; + +-- https://github.com/ClickHouse/ClickHouse/issues/7802 +DROP TABLE IF EXISTS test; + +CREATE TABLE test ( A Int32, B Int32 ) ENGINE = Memory(); + +INSERT INTO test VALUES(1, 2)(0, 3)(1, 4)(0, 5); + +SELECT B, neighbor(B, 1) AS next_B FROM (SELECT * FROM test ORDER BY B); +SELECT B, neighbor(B, 1) AS next_B FROM (SELECT * FROM test ORDER BY B) WHERE A == 1; +SELECT B, next_B FROM (SELECT A, B, neighbor(B, 1) AS next_B FROM (SELECT * FROM test ORDER BY B)) WHERE A == 1; + +DROP TABLE IF EXISTS test; + +EXPLAIN SYNTAX SELECT * FROM (SELECT * FROM system.one) WHERE arrayMap(x -> x + 1, [dummy]) = [1]; +SELECT * FROM (SELECT * FROM system.one) WHERE arrayMap(x -> x + 1, [dummy]) = [1]; + +EXPLAIN SYNTAX SELECT * FROM (SELECT 1 AS id, 2 AS value) INNER JOIN (SELECT 1 AS id, 3 AS value_1) USING id WHERE arrayMap(x -> x + value + value_1, [1]) = [6]; +SELECT * FROM (SELECT 1 AS id, 2 AS value) INNER JOIN (SELECT 1 AS id, 3 AS value_1) USING id WHERE arrayMap(x -> x + value + value_1, [1]) = [6]; + +-- check order is preserved +EXPLAIN SYNTAX SELECT * FROM system.one HAVING dummy > 0 AND dummy < 0; + +-- from #10613 +SELECT name, count() AS cnt +FROM remote('127.{1,2}', system.settings) +GROUP BY name +HAVING (max(value) > '9') AND (min(changed) = 0) +FORMAT Null; diff --git a/parser/testdata/01060_defaults_all_columns/ast.json b/parser/testdata/01060_defaults_all_columns/ast.json new file mode 100644 index 000000000..4069749de --- /dev/null +++ b/parser/testdata/01060_defaults_all_columns/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery defaults_all_columns (children 1)" + }, + { + "explain": " Identifier defaults_all_columns" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001230696, + "rows_read": 2, + "bytes_read": 92 + } +} diff --git a/parser/testdata/01060_defaults_all_columns/metadata.json b/parser/testdata/01060_defaults_all_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01060_defaults_all_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01060_defaults_all_columns/query.sql b/parser/testdata/01060_defaults_all_columns/query.sql new file mode 100644 index 000000000..74fad7f75 --- /dev/null +++ b/parser/testdata/01060_defaults_all_columns/query.sql @@ -0,0 +1,11 @@ +DROP TABLE IF EXISTS defaults_all_columns; + +CREATE TABLE defaults_all_columns (n UInt8 DEFAULT 42, s String DEFAULT concat('test', CAST(n, 'String'))) ENGINE = Memory; + +INSERT INTO defaults_all_columns FORMAT JSONEachRow {"n": 1, "s": "hello"} {}; + +INSERT INTO defaults_all_columns FORMAT JSONEachRow {"n": 2}, {"s": "world"}; + +SELECT * FROM defaults_all_columns ORDER BY n, s; + +DROP TABLE defaults_all_columns; diff --git a/parser/testdata/01060_shutdown_table_after_detach/ast.json b/parser/testdata/01060_shutdown_table_after_detach/ast.json new file mode 100644 index 000000000..c8fc105af --- /dev/null +++ b/parser/testdata/01060_shutdown_table_after_detach/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001392019, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/01060_shutdown_table_after_detach/metadata.json b/parser/testdata/01060_shutdown_table_after_detach/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01060_shutdown_table_after_detach/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01060_shutdown_table_after_detach/query.sql b/parser/testdata/01060_shutdown_table_after_detach/query.sql new file mode 100644 index 000000000..a63d58bae --- /dev/null +++ b/parser/testdata/01060_shutdown_table_after_detach/query.sql @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS test; +CREATE TABLE test Engine = MergeTree ORDER BY number SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi' AS SELECT number, toString(rand()) x from numbers(10000000); + +SELECT count() FROM test; + +ALTER TABLE test DETACH PARTITION tuple(); + +SELECT count() FROM test; + +DETACH TABLE test; +ATTACH TABLE test; + +ALTER TABLE test ATTACH PARTITION tuple(); + +SELECT count() FROM test; + +DROP TABLE test; diff --git a/parser/testdata/01061_alter_codec_with_type/ast.json b/parser/testdata/01061_alter_codec_with_type/ast.json new file mode 100644 index 000000000..d0f4da8ee --- /dev/null +++ b/parser/testdata/01061_alter_codec_with_type/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery alter_bug (children 1)" + }, + { + "explain": " Identifier alter_bug" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00128822, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/01061_alter_codec_with_type/metadata.json b/parser/testdata/01061_alter_codec_with_type/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01061_alter_codec_with_type/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01061_alter_codec_with_type/query.sql b/parser/testdata/01061_alter_codec_with_type/query.sql new file mode 100644 index 000000000..7f662c746 --- /dev/null +++ b/parser/testdata/01061_alter_codec_with_type/query.sql @@ -0,0 +1,19 @@ +DROP TABLE IF EXISTS alter_bug; + +create table alter_bug ( + epoch UInt64 CODEC(Delta,LZ4), + _time_dec Float64 +) Engine = MergeTree ORDER BY (epoch); + + +SELECT name, type, compression_codec FROM system.columns WHERE table='alter_bug' AND database=currentDatabase(); + +ALTER TABLE alter_bug MODIFY COLUMN epoch DEFAULT toUInt64(_time_dec) CODEC(Delta,LZ4); + +SELECT name, type, default_expression, compression_codec FROM system.columns WHERE table='alter_bug' AND database=currentDatabase(); + +INSERT INTO alter_bug(_time_dec) VALUES(1577351080); + +SELECT * FROM alter_bug; + +DROP TABLE IF EXISTS alter_bug; diff --git a/parser/testdata/01062_alter_on_mutataion_zookeeper_long/ast.json b/parser/testdata/01062_alter_on_mutataion_zookeeper_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01062_alter_on_mutataion_zookeeper_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01062_alter_on_mutataion_zookeeper_long/metadata.json b/parser/testdata/01062_alter_on_mutataion_zookeeper_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01062_alter_on_mutataion_zookeeper_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01062_alter_on_mutataion_zookeeper_long/query.sql b/parser/testdata/01062_alter_on_mutataion_zookeeper_long/query.sql new file mode 100644 index 000000000..8443fb785 --- /dev/null +++ b/parser/testdata/01062_alter_on_mutataion_zookeeper_long/query.sql @@ -0,0 +1,75 @@ +-- Tags: long, zookeeper, no-replicated-database, no-shared-merge-tree +-- Tag no-replicated-database: Old syntax is not allowed +-- no-shared-merge-tree: old syntax not allowed + +DROP TABLE IF EXISTS test_alter_on_mutation; + +CREATE TABLE test_alter_on_mutation +( + date Date, + key UInt64, + value String +) +ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_01062/alter_on_mutation', '1') +ORDER BY key PARTITION BY date; + +INSERT INTO test_alter_on_mutation select toDate('2020-01-05'), number, toString(number) from system.numbers limit 100; +INSERT INTO test_alter_on_mutation select toDate('2020-01-06'), number, toString(number) from system.numbers limit 100; +INSERT INTO test_alter_on_mutation select toDate('2020-01-07'), number, toString(number) from system.numbers limit 100; + +SELECT sum(cast(value as UInt64)) from test_alter_on_mutation; + +ALTER TABLE test_alter_on_mutation MODIFY COLUMN value UInt64; + +SELECT sum(value) from test_alter_on_mutation; + +INSERT INTO test_alter_on_mutation select toDate('2020-01-05'), number, toString(number) from system.numbers limit 100, 100; +INSERT INTO test_alter_on_mutation select toDate('2020-01-06'), number, toString(number) from system.numbers limit 100, 100; +INSERT INTO test_alter_on_mutation select toDate('2020-01-07'), number, toString(number) from system.numbers limit 100, 100; + +OPTIMIZE TABLE test_alter_on_mutation FINAL; + +SELECT sum(value) from test_alter_on_mutation; + +ALTER TABLE test_alter_on_mutation MODIFY COLUMN value String; + +SELECT sum(cast(value as UInt64)) from test_alter_on_mutation; + +OPTIMIZE TABLE test_alter_on_mutation FINAL; + +SELECT sum(cast(value as UInt64)) from test_alter_on_mutation; + +ALTER TABLE test_alter_on_mutation ADD COLUMN value1 Float64; + +SELECT sum(value1) from test_alter_on_mutation; + +ALTER TABLE test_alter_on_mutation DROP COLUMN value; + +SELECT sum(value) from test_alter_on_mutation; -- {serverError UNKNOWN_IDENTIFIER} + +ALTER TABLE test_alter_on_mutation ADD COLUMN value String DEFAULT '10'; + +SELECT sum(cast(value as UInt64)) from test_alter_on_mutation; + +OPTIMIZE table test_alter_on_mutation FINAL; + +ALTER TABLE test_alter_on_mutation MODIFY COLUMN value UInt64 DEFAULT 10; + +SELECT sum(value) from test_alter_on_mutation; + +DROP TABLE IF EXISTS test_alter_on_mutation; + +DROP TABLE IF EXISTS nested_alter; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE nested_alter (`d` Date, `k` UInt64, `i32` Int32, `dt` DateTime, `n.ui8` Array(UInt8), `n.s` Array(String), `n.d` Array(Date), `s` String DEFAULT '0') ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_01062/nested_alter', 'r2', d, k, 8192); + +INSERT INTO nested_alter VALUES ('2015-01-01', 6,38,'2014-07-15 13:26:50',[10,20,30],['asd','qwe','qwe'],['2000-01-01','2000-01-01','2000-01-03'],'100500'); + +SELECT * FROM nested_alter; + +ALTER TABLE nested_alter DROP COLUMN `n.d`; + +SELECT * FROM nested_alter; + +DROP TABLE nested_alter; diff --git a/parser/testdata/01062_pm_all_join_with_block_continuation/ast.json b/parser/testdata/01062_pm_all_join_with_block_continuation/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01062_pm_all_join_with_block_continuation/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01062_pm_all_join_with_block_continuation/metadata.json b/parser/testdata/01062_pm_all_join_with_block_continuation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01062_pm_all_join_with_block_continuation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01062_pm_all_join_with_block_continuation/query.sql b/parser/testdata/01062_pm_all_join_with_block_continuation/query.sql new file mode 100644 index 000000000..4170ae13e --- /dev/null +++ b/parser/testdata/01062_pm_all_join_with_block_continuation/query.sql @@ -0,0 +1,82 @@ +-- Tags: no-asan +-- test is slow to pass flaky check when changed + +SET max_memory_usage = 50000000; +SET join_algorithm = 'partial_merge'; +SET analyzer_compatibility_join_using_top_level_identifier = 1; +SELECT 'defaults'; + +SELECT count(1) FROM ( + SELECT materialize(1) as k, n FROM numbers(10) nums + JOIN (SELECT materialize(1) AS k, number n FROM numbers(1000000)) j + USING k); + +SELECT count(1) FROM ( + SELECT materialize(1) as k, n FROM numbers(1000) nums + JOIN (SELECT materialize(1) AS k, number n FROM numbers(10000)) j + USING k); + +SELECT count(1), uniqExact(n) FROM ( + SELECT materialize(1) as k, n FROM numbers(1000000) nums + JOIN (SELECT materialize(1) AS k, number n FROM numbers(10)) j + USING k); + +SET max_joined_block_size_rows = 0; + +SET query_plan_join_swap_table = 'false'; + +-- Because of the optimizations in the analyzer the following queries started to run without issues. To keep the essence of the test, we test both cases. +SELECT count(1) FROM ( + SELECT materialize(1) as k, n FROM numbers(10) nums + JOIN (SELECT materialize(1) AS k, number n FROM numbers(1000000)) j + USING k) SETTINGS enable_analyzer = 0; -- { serverError MEMORY_LIMIT_EXCEEDED } + +SELECT count(1) FROM ( + SELECT materialize(1) as k, n FROM numbers(1000) nums + JOIN (SELECT materialize(1) AS k, number n FROM numbers(10000)) j + USING k) SETTINGS enable_analyzer = 0; -- { serverError MEMORY_LIMIT_EXCEEDED } + +SELECT count(1) FROM ( + SELECT materialize(1) as k, n FROM numbers(10) nums + JOIN (SELECT materialize(1) AS k, number n FROM numbers(1000000)) j + USING k) SETTINGS enable_analyzer = 1; +SELECT count(1) FROM ( + SELECT materialize(1) as k, n FROM numbers(1000) nums + JOIN (SELECT materialize(1) AS k, number n FROM numbers(10000)) j + USING k) SETTINGS enable_analyzer = 1; + +SELECT 'max_joined_block_size_rows = 2000'; +SET max_joined_block_size_rows = 2000; + +SELECT count(1) FROM ( + SELECT materialize(1) as k, n FROM numbers(10) nums + JOIN (SELECT materialize(1) AS k, number n FROM numbers(1000000)) j + USING k); + +SELECT count(1), uniqExact(n) FROM ( + SELECT materialize(1) as k, n FROM numbers(1000) nums + JOIN (SELECT materialize(1) AS k, number n FROM numbers(10000)) j + USING k); + +SELECT count(1), uniqExact(n) FROM ( + SELECT materialize(1) as k, n FROM numbers(1000000) nums + JOIN (SELECT materialize(1) AS k, number n FROM numbers(10)) j + USING k); + +SELECT 'max_rows_in_join = 1000'; +SET max_rows_in_join = 1000; + +SELECT count(1) FROM ( + SELECT materialize(1) as k, n FROM numbers(10) nums + JOIN (SELECT materialize(1) AS k, number n FROM numbers(1000000)) j + USING k); + +SELECT count(1), uniqExact(n) FROM ( + SELECT materialize(1) as k, n FROM numbers(1000) nums + JOIN (SELECT materialize(1) AS k, number n FROM numbers(10000)) j + USING k); + +SELECT count(1), uniqExact(n) FROM ( + SELECT materialize(1) as k, n FROM numbers(1000000) nums + JOIN (SELECT materialize(1) AS k, number n FROM numbers(10)) j + USING k); diff --git a/parser/testdata/01062_pm_multiple_all_join_same_value/ast.json b/parser/testdata/01062_pm_multiple_all_join_same_value/ast.json new file mode 100644 index 000000000..dc8565e87 --- /dev/null +++ b/parser/testdata/01062_pm_multiple_all_join_same_value/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001699966, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01062_pm_multiple_all_join_same_value/metadata.json b/parser/testdata/01062_pm_multiple_all_join_same_value/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01062_pm_multiple_all_join_same_value/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01062_pm_multiple_all_join_same_value/query.sql b/parser/testdata/01062_pm_multiple_all_join_same_value/query.sql new file mode 100644 index 000000000..048da47de --- /dev/null +++ b/parser/testdata/01062_pm_multiple_all_join_same_value/query.sql @@ -0,0 +1,9 @@ +SET max_memory_usage = 50000000; +SET join_algorithm = 'partial_merge'; + +SELECT count(1) FROM ( + SELECT t2.n FROM numbers(10) t1 + JOIN (SELECT toUInt32(1) AS k, number n FROM numbers(100)) t2 ON toUInt32(t1.number) = t2.k + JOIN (SELECT toUInt32(1) AS k, number n FROM numbers(100)) t3 ON t2.k = t3.k + JOIN (SELECT toUInt32(1) AS k, number n FROM numbers(100)) t4 ON t2.k = t4.k +); diff --git a/parser/testdata/01063_create_column_set/ast.json b/parser/testdata/01063_create_column_set/ast.json new file mode 100644 index 000000000..35f3c7238 --- /dev/null +++ b/parser/testdata/01063_create_column_set/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery mt (children 1)" + }, + { + "explain": " Identifier mt" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001239046, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/01063_create_column_set/metadata.json b/parser/testdata/01063_create_column_set/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01063_create_column_set/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01063_create_column_set/query.sql b/parser/testdata/01063_create_column_set/query.sql new file mode 100644 index 000000000..b283fa22f --- /dev/null +++ b/parser/testdata/01063_create_column_set/query.sql @@ -0,0 +1,12 @@ +DROP TABLE IF EXISTS mt; +CREATE TABLE mt (x UInt8, y Date) ENGINE = MergeTree ORDER BY x; + +SELECT count() +FROM mt +ANY LEFT JOIN +( + SELECT 1 AS x +) js2 USING (x) +PREWHERE x IN (1) WHERE y = today(); + +DROP TABLE mt; diff --git a/parser/testdata/01064_arrayROCAUC/ast.json b/parser/testdata/01064_arrayROCAUC/ast.json new file mode 100644 index 000000000..5a0f95654 --- /dev/null +++ b/parser/testdata/01064_arrayROCAUC/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayROCAUC (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Array_[Float64_0.1, Float64_0.4, Float64_0.35, Float64_0.8]" + }, + { + "explain": " Literal Array_[UInt64_0, UInt64_0, UInt64_1, UInt64_1]" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001323021, + "rows_read": 8, + "bytes_read": 383 + } +} diff --git a/parser/testdata/01064_arrayROCAUC/metadata.json b/parser/testdata/01064_arrayROCAUC/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01064_arrayROCAUC/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01064_arrayROCAUC/query.sql b/parser/testdata/01064_arrayROCAUC/query.sql new file mode 100644 index 000000000..077ad4bab --- /dev/null +++ b/parser/testdata/01064_arrayROCAUC/query.sql @@ -0,0 +1,94 @@ +select arrayROCAUC([0.1, 0.4, 0.35, 0.8], [0, 0, 1, 1]); +select arrayROCAUC([0.1, 0.4, 0.35, 0.8], cast([0, 0, 1, 1] as Array(Int8))); +select arrayROCAUC([0.1, 0.4, 0.35, 0.8], cast([-1, -1, 1, 1] as Array(Int8))); +select arrayROCAUC([0.1, 0.4, 0.35, 0.8], cast(['false', 'false', 'true', 'true'] as Array(Enum8('false' = 0, 'true' = 1)))); +select arrayROCAUC([0.1, 0.4, 0.35, 0.8], cast(['false', 'false', 'true', 'true'] as Array(Enum8('false' = -1, 'true' = 1)))); +select arrayROCAUC(cast([10, 40, 35, 80] as Array(UInt8)), [0, 0, 1, 1]); +select arrayROCAUC(cast([10, 40, 35, 80] as Array(UInt16)), [0, 0, 1, 1]); +select arrayROCAUC(cast([10, 40, 35, 80] as Array(UInt32)), [0, 0, 1, 1]); +select arrayROCAUC(cast([10, 40, 35, 80] as Array(UInt64)), [0, 0, 1, 1]); +select arrayROCAUC(cast([-10, -40, -35, -80] as Array(Int8)), [0, 0, 1, 1]); +select arrayROCAUC(cast([-10, -40, -35, -80] as Array(Int16)), [0, 0, 1, 1]); +select arrayROCAUC(cast([-10, -40, -35, -80] as Array(Int32)), [0, 0, 1, 1]); +select arrayROCAUC(cast([-10, -40, -35, -80] as Array(Int64)), [0, 0, 1, 1]); +select arrayROCAUC(cast([-0.1, -0.4, -0.35, -0.8] as Array(Float32)) , [0, 0, 1, 1]); +select arrayROCAUC([0, 3, 5, 6, 7.5, 8], [1, 0, 1, 0, 0, 0]); +select arrayROCAUC([0.1, 0.35, 0.4, 0.8], [1, 0, 1, 0]); + +-- passing scale = true +select arrayROCAUC([0.1, 0.4, 0.35, 0.8], [0, 0, 1, 1], true); +select arrayROCAUC([0.1, 0.4, 0.35, 0.8], cast([0, 0, 1, 1] as Array(Int8)), true); +select arrayROCAUC([0.1, 0.4, 0.35, 0.8], cast([-1, -1, 1, 1] as Array(Int8)), true); +select arrayROCAUC([0.1, 0.4, 0.35, 0.8], cast(['false', 'false', 'true', 'true'] as Array(Enum8('false' = 0, 'true' = 1))), true); +select arrayROCAUC([0.1, 0.4, 0.35, 0.8], cast(['false', 'false', 'true', 'true'] as Array(Enum8('false' = -1, 'true' = 1))), true); +select arrayROCAUC(cast([10, 40, 35, 80] as Array(UInt8)), [0, 0, 1, 1], true); +select arrayROCAUC(cast([10, 40, 35, 80] as Array(UInt16)), [0, 0, 1, 1], true); +select arrayROCAUC(cast([10, 40, 35, 80] as Array(UInt32)), [0, 0, 1, 1], true); +select arrayROCAUC(cast([10, 40, 35, 80] as Array(UInt64)), [0, 0, 1, 1], true); +select arrayROCAUC(cast([-10, -40, -35, -80] as Array(Int8)), [0, 0, 1, 1], true); +select arrayROCAUC(cast([-10, -40, -35, -80] as Array(Int16)), [0, 0, 1, 1], true); +select arrayROCAUC(cast([-10, -40, -35, -80] as Array(Int32)), [0, 0, 1, 1], true); +select arrayROCAUC(cast([-10, -40, -35, -80] as Array(Int64)), [0, 0, 1, 1], true); +select arrayROCAUC(cast([-0.1, -0.4, -0.35, -0.8] as Array(Float32)) , [0, 0, 1, 1], true); +select arrayROCAUC([0, 3, 5, 6, 7.5, 8], [1, 0, 1, 0, 0, 0], true); +select arrayROCAUC([0.1, 0.35, 0.4, 0.8], [1, 0, 1, 0], true); + +-- passing scale = false +select arrayROCAUC([0.1, 0.4, 0.35, 0.8], [0, 0, 1, 1], false); +select arrayROCAUC([0.1, 0.4, 0.35, 0.8], cast([0, 0, 1, 1] as Array(Int8)), false); +select arrayROCAUC([0.1, 0.4, 0.35, 0.8], cast([-1, -1, 1, 1] as Array(Int8)), false); +select arrayROCAUC([0.1, 0.4, 0.35, 0.8], cast(['false', 'false', 'true', 'true'] as Array(Enum8('false' = 0, 'true' = 1))), false); +select arrayROCAUC([0.1, 0.4, 0.35, 0.8], cast(['false', 'false', 'true', 'true'] as Array(Enum8('false' = -1, 'true' = 1))), false); +select arrayROCAUC(cast([10, 40, 35, 80] as Array(UInt8)), [0, 0, 1, 1], false); +select arrayROCAUC(cast([10, 40, 35, 80] as Array(UInt16)), [0, 0, 1, 1], false); +select arrayROCAUC(cast([10, 40, 35, 80] as Array(UInt32)), [0, 0, 1, 1], false); +select arrayROCAUC(cast([10, 40, 35, 80] as Array(UInt64)), [0, 0, 1, 1], false); +select arrayROCAUC(cast([-10, -40, -35, -80] as Array(Int8)), [0, 0, 1, 1], false); +select arrayROCAUC(cast([-10, -40, -35, -80] as Array(Int16)), [0, 0, 1, 1], false); +select arrayROCAUC(cast([-10, -40, -35, -80] as Array(Int32)), [0, 0, 1, 1], false); +select arrayROCAUC(cast([-10, -40, -35, -80] as Array(Int64)), [0, 0, 1, 1], false); +select arrayROCAUC(cast([-0.1, -0.4, -0.35, -0.8] as Array(Float32)) , [0, 0, 1, 1], false); +select arrayROCAUC([0, 3, 5, 6, 7.5, 8], [1, 0, 1, 0, 0, 0], false); +select arrayROCAUC([0.1, 0.35, 0.4, 0.8], [1, 0, 1, 0], false); + +-- passing offsets as [0, 0, 0, 0] +select arrayROCAUC([0.1, 0.4, 0.35, 0.8], [0, 0, 1, 1], true, [0, 0, 0, 0]); +select arrayROCAUC([0.1, 0.4, 0.35, 0.8], cast([0, 0, 1, 1] as Array(Int8)), true, [0, 0, 0, 0]); +select arrayROCAUC([0.1, 0.4, 0.35, 0.8], cast([-1, -1, 1, 1] as Array(Int8)), true, [0, 0, 0, 0]); +select arrayROCAUC([0.1, 0.4, 0.35, 0.8], cast(['false', 'false', 'true', 'true'] as Array(Enum8('false' = 0, 'true' = 1))), true, [0, 0, 0, 0]); +select arrayROCAUC([0.1, 0.4, 0.35, 0.8], cast(['false', 'false', 'true', 'true'] as Array(Enum8('false' = -1, 'true' = 1))), true, [0, 0, 0, 0]); +select arrayROCAUC(cast([10, 40, 35, 80] as Array(UInt8)), [0, 0, 1, 1], true, [0, 0, 0, 0]); +select arrayROCAUC(cast([10, 40, 35, 80] as Array(UInt16)), [0, 0, 1, 1], true, [0, 0, 0, 0]); +select arrayROCAUC(cast([10, 40, 35, 80] as Array(UInt32)), [0, 0, 1, 1], true, [0, 0, 0, 0]); +select arrayROCAUC(cast([10, 40, 35, 80] as Array(UInt64)), [0, 0, 1, 1], true, [0, 0, 0, 0]); +select arrayROCAUC(cast([-10, -40, -35, -80] as Array(Int8)), [0, 0, 1, 1], true, [0, 0, 0, 0]); +select arrayROCAUC(cast([-10, -40, -35, -80] as Array(Int16)), [0, 0, 1, 1], true, [0, 0, 0, 0]); +select arrayROCAUC(cast([-10, -40, -35, -80] as Array(Int32)), [0, 0, 1, 1], true, [0, 0, 0, 0]); +select arrayROCAUC(cast([-10, -40, -35, -80] as Array(Int64)), [0, 0, 1, 1], true, [0, 0, 0, 0]); +select arrayROCAUC(cast([-0.1, -0.4, -0.35, -0.8] as Array(Float32)) , [0, 0, 1, 1], true, [0, 0, 0, 0]); +select arrayROCAUC([0, 3, 5, 6, 7.5, 8], [1, 0, 1, 0, 0, 0], true, [0, 0, 0, 0]); +select arrayROCAUC([0.1, 0.35, 0.4, 0.8], [1, 0, 1, 0], true, [0, 0, 0, 0]); + +-- alias +select arrayAUC([0.1, 0.4, 0.35, 0.8], [0, 0, 1, 1], false); + +-- negative tests +select arrayROCAUC([0, 0, 1, 1]); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +select arrayROCAUC([0.1, 0.35], [0, 0, 1, 1]); -- { serverError BAD_ARGUMENTS } +select arrayROCAUC([0.1, 0.4, 0.35, 0.8], [0, 0, 1, 1], materialize(true)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select arrayROCAUC([0.1, 0.4, 0.35, 0.8], [0, 0, 1, 1], [0, 0, 0, 0]); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select arrayROCAUC([0.1, 0.4, 0.35, 0.8], [0, 0, 1, 1], true, [0, 0, 0, 0], true); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +select arrayROCAUC([0.1, 0.4, 0.35, 0.8], [0, 0, 1, 1], true, [0, 0, 0, NULL]); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select arrayROCAUC([0.1, 0.4, 0.35, 0.8], [0, 0, 1, 1], true, ['a', 'b', 'c', 'd']); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select arrayROCAUC([0.1, 0.4, 0.35, 0.8], [0, 0, 1, 1], true, [0, 1, 0, 0, 0]); -- { serverError BAD_ARGUMENTS } +select arrayROCAUC([0.1, 0.4, 0.35, 0.8], [0, 0, 1, 1], true, [-1, 0, 0, 0]); -- { serverError BAD_ARGUMENTS } +select arrayROCAUC(x, y, true, z) from ( + select [1] as x, [0] as y, [0, 0, 0, 0, 0, 0, 0, 0] as z + UNION ALL + select [1] as x, [0] as y, [] as z +); -- { serverError BAD_ARGUMENTS } +select arrayROCAUC(x, y, true, z) from ( + select [1] as x, [0] as y, [0, 0, 0] as z + UNION ALL + select [1] as x, [1] as y, [0, 0, 0, 0, 0] as z +); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/01064_incremental_streaming_from_2_src_with_feedback/ast.json b/parser/testdata/01064_incremental_streaming_from_2_src_with_feedback/ast.json new file mode 100644 index 000000000..f5f304b20 --- /dev/null +++ b/parser/testdata/01064_incremental_streaming_from_2_src_with_feedback/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001206104, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01064_incremental_streaming_from_2_src_with_feedback/metadata.json b/parser/testdata/01064_incremental_streaming_from_2_src_with_feedback/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01064_incremental_streaming_from_2_src_with_feedback/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01064_incremental_streaming_from_2_src_with_feedback/query.sql b/parser/testdata/01064_incremental_streaming_from_2_src_with_feedback/query.sql new file mode 100644 index 000000000..f365a4c86 --- /dev/null +++ b/parser/testdata/01064_incremental_streaming_from_2_src_with_feedback/query.sql @@ -0,0 +1,149 @@ +SET joined_subquery_requires_alias = 0; +SET max_threads = 1; +-- It affects number of read rows and max_rows_to_read. +SET max_bytes_before_external_sort = 0; +SET max_bytes_ratio_before_external_sort = 0; +SET max_bytes_before_external_group_by = 0; +SET max_bytes_ratio_before_external_group_by = 0; + +-- incremental streaming usecase +-- that has sense only if data filling order has guarantees of chronological order + +DROP TABLE IF EXISTS target_table; +DROP TABLE IF EXISTS logins; +DROP TABLE IF EXISTS mv_logins2target; +DROP TABLE IF EXISTS checkouts; +DROP TABLE IF EXISTS mv_checkouts2target; + +-- that is the final table, which is filled incrementally from 2 different sources + +CREATE TABLE target_table Engine=SummingMergeTree() ORDER BY id +SETTINGS index_granularity=128, index_granularity_bytes = '10Mi' +AS + SELECT + number as id, + maxState( toDateTime(0, 'UTC') ) as latest_login_time, + maxState( toDateTime(0, 'UTC') ) as latest_checkout_time, + minState( toUInt64(-1) ) as fastest_session, + maxState( toUInt64(0) ) as biggest_inactivity_period +FROM numbers(50000) +GROUP BY id +SETTINGS max_insert_threads=1; + +-- source table #1 + +CREATE TABLE logins ( + id UInt64, + ts DateTime('UTC') +) Engine=MergeTree ORDER BY id; + + +-- and mv with something like feedback from target table + +CREATE MATERIALIZED VIEW mv_logins2target TO target_table +AS + SELECT + id, + maxState( ts ) as latest_login_time, + maxState( toDateTime(0, 'UTC') ) as latest_checkout_time, + minState( toUInt64(-1) ) as fastest_session, + if(max(current_latest_checkout_time) > 0, maxState(toUInt64(ts - current_latest_checkout_time)), maxState( toUInt64(0) ) ) as biggest_inactivity_period + FROM logins + LEFT JOIN ( + SELECT + id, + maxMerge(latest_checkout_time) as current_latest_checkout_time + + -- normal MV sees only the incoming block, but we need something like feedback here + -- so we do join with target table, the most important thing here is that + -- we extract from target table only row affected by that MV, referencing src table + -- it second time + FROM target_table + WHERE id IN (SELECT id FROM logins) + GROUP BY id + ) USING (id) + GROUP BY id; + + +-- the same for second pipeline +CREATE TABLE checkouts ( + id UInt64, + ts DateTime('UTC') +) Engine=MergeTree ORDER BY id; + +CREATE MATERIALIZED VIEW mv_checkouts2target TO target_table +AS + SELECT + id, + maxState( toDateTime(0, 'UTC') ) as latest_login_time, + maxState( ts ) as latest_checkout_time, + if(max(current_latest_login_time) > 0, minState( toUInt64(ts - current_latest_login_time)), minState( toUInt64(-1) ) ) as fastest_session, + maxState( toUInt64(0) ) as biggest_inactivity_period + FROM checkouts + LEFT JOIN (SELECT id, maxMerge(latest_login_time) as current_latest_login_time FROM target_table WHERE id IN (SELECT id FROM checkouts) GROUP BY id) USING (id) + GROUP BY id; + +-- This query has effect only for existing tables, so it must be located after CREATE. +SYSTEM STOP MERGES target_table; +SYSTEM STOP MERGES checkouts; +SYSTEM STOP MERGES logins; + +-- feed with some initial values +INSERT INTO logins SELECT number as id, '2000-01-01 08:00:00' from numbers(50000); +INSERT INTO checkouts SELECT number as id, '2000-01-01 10:00:00' from numbers(50000); + +-- ensure that we don't read whole target table during join +-- by this time we should have 3 parts for target_table because of prev inserts +-- and we plan to make two more inserts. With index_granularity=128 and max id=1000 +-- we expect to read not more than: +-- 1000 rows read from numbers(1000) in the INSERT itself +-- 1000 rows in the `IN (SELECT id FROM table)` in the mat views +-- (1000/128) marks per part * (3 + 2) parts * 128 granularity = 5120 rows +-- Total: 7120 +set max_rows_to_read = 7120; + +INSERT INTO logins SELECT number as id, '2000-01-01 11:00:00' from numbers(1000); +INSERT INTO checkouts SELECT number as id, '2000-01-01 11:10:00' from numbers(1000); + +-- by this time we should have 5 parts for target_table because of prev inserts +-- and we plan to make two more inserts. With index_granularity=128 and max id=1 +-- we expect to read not more than: +-- 1 mark per part * (5 + 2) parts * 128 granularity + 1 (numbers(1)) = 897 rows +set max_rows_to_read = 897; + +INSERT INTO logins SELECT number+2 as id, '2001-01-01 11:10:01' from numbers(1); +INSERT INTO checkouts SELECT number+2 as id, '2001-01-01 11:10:02' from numbers(1); + + +set max_rows_to_read = 0; + +select '-- unmerged state'; + +select + id, + finalizeAggregation(latest_login_time) as current_latest_login_time, + finalizeAggregation(latest_checkout_time) as current_latest_checkout_time, + finalizeAggregation(fastest_session) as current_fastest_session, + finalizeAggregation(biggest_inactivity_period) as current_biggest_inactivity_period +from target_table +where id in (1,2) +ORDER BY id, current_latest_login_time, current_latest_checkout_time; + +select '-- merged state'; + +SELECT + id, + maxMerge(latest_login_time) as current_latest_login_time, + maxMerge(latest_checkout_time) as current_latest_checkout_time, + minMerge(fastest_session) as current_fastest_session, + maxMerge(biggest_inactivity_period) as current_biggest_inactivity_period +FROM target_table +where id in (1,2) +GROUP BY id +ORDER BY id; + +DROP TABLE IF EXISTS logins; +DROP TABLE IF EXISTS mv_logins2target; +DROP TABLE IF EXISTS checkouts; +DROP TABLE IF EXISTS mv_checkouts2target; +DROP TABLE target_table; diff --git a/parser/testdata/01064_pm_all_join_const_and_nullable/ast.json b/parser/testdata/01064_pm_all_join_const_and_nullable/ast.json new file mode 100644 index 000000000..c8dcf3c6e --- /dev/null +++ b/parser/testdata/01064_pm_all_join_const_and_nullable/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001162494, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01064_pm_all_join_const_and_nullable/metadata.json b/parser/testdata/01064_pm_all_join_const_and_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01064_pm_all_join_const_and_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01064_pm_all_join_const_and_nullable/query.sql b/parser/testdata/01064_pm_all_join_const_and_nullable/query.sql new file mode 100644 index 000000000..bc5e1d682 --- /dev/null +++ b/parser/testdata/01064_pm_all_join_const_and_nullable/query.sql @@ -0,0 +1,87 @@ +SET join_algorithm = 'partial_merge'; + +SELECT count(1), uniqExact(1) FROM ( +SELECT k FROM (SELECT materialize(1) AS k FROM numbers(1)) nums +JOIN (SELECT materialize(1) AS k, number n FROM numbers(100000)) j +USING k); + +SELECT count(1), uniqExact(1) FROM ( +SELECT k FROM (SELECT materialize(1) as k FROM numbers(1)) nums +JOIN (SELECT 1 AS k, number n FROM numbers(100000)) j +USING k); + +SELECT count(1), uniqExact(1) FROM ( +SELECT k FROM (SELECT 1 AS k FROM numbers(1)) nums +JOIN (SELECT materialize(1) AS k, number n FROM numbers(100000)) j +USING k); + +SELECT count(1), uniqExact(1) FROM ( +SELECT k FROM (SELECT 1 as k FROM numbers(1)) nums +JOIN (SELECT 1 AS k, number n FROM numbers(100000)) j +USING k); + +SELECT 'first nullable'; + +SELECT count(1), uniqExact(1) FROM ( +SELECT k FROM (SELECT materialize(toNullable(1)) AS k FROM numbers(1)) nums +JOIN (SELECT materialize(1) AS k, number n FROM numbers(100000)) j +USING k); + +SELECT count(1), uniqExact(1) FROM ( +SELECT k FROM (SELECT materialize(toNullable(1)) as k FROM numbers(1)) nums +JOIN (SELECT 1 AS k, number n FROM numbers(100000)) j +USING k); + +SELECT count(1), uniqExact(1) FROM ( +SELECT k FROM (SELECT toNullable(1) as k FROM numbers(1)) nums +JOIN (SELECT materialize(1) AS k, number n FROM numbers(100000)) j +USING k); + +SELECT count(1), uniqExact(1) FROM ( +SELECT k FROM (SELECT toNullable(1) as k FROM numbers(1)) nums +JOIN (SELECT 1 AS k, number n FROM numbers(100000)) j +USING k); + +SELECT 'second nullable'; + +SELECT count(1), uniqExact(1) FROM ( +SELECT k FROM (SELECT materialize(1) as k FROM numbers(1)) nums +JOIN (SELECT materialize(toNullable(1)) AS k, number n FROM numbers(100000)) j +USING k); + +SELECT count(1), uniqExact(1) FROM ( +SELECT k FROM (SELECT materialize(1) as k FROM numbers(1)) nums +JOIN (SELECT toNullable(1) AS k, number n FROM numbers(100000)) j +USING k); + +SELECT count(1), uniqExact(1) FROM ( +SELECT k FROM (SELECT 1 as k FROM numbers(1)) nums +JOIN (SELECT materialize(toNullable(1)) AS k, number n FROM numbers(100000)) j +USING k); + +SELECT count(1), uniqExact(1) FROM ( +SELECT k FROM (SELECT 1 as k FROM numbers(1)) nums +JOIN (SELECT toNullable(1) AS k, number n FROM numbers(100000)) j +USING k); + +SELECT 'both nullable'; + +SELECT count(1), uniqExact(1) FROM ( +SELECT k FROM (SELECT materialize(toNullable(1)) as k FROM numbers(1)) nums +JOIN (SELECT materialize(toNullable(1)) AS k, number n FROM numbers(100000)) j +USING k); + +SELECT count(1), uniqExact(1) FROM ( +SELECT k FROM (SELECT materialize(toNullable(1)) as k FROM numbers(1)) nums +JOIN (SELECT toNullable(1) AS k, number n FROM numbers(100000)) j +USING k); + +SELECT count(1), uniqExact(1) FROM ( +SELECT k FROM (SELECT toNullable(1) as k FROM numbers(1)) nums +JOIN (SELECT materialize(toNullable(1)) AS k, number n FROM numbers(100000)) j +USING k); + +SELECT count(1), uniqExact(1) FROM ( +SELECT k FROM (SELECT toNullable(1) as k FROM numbers(1)) nums +JOIN (SELECT toNullable(1) AS k, number n FROM numbers(100000)) j +USING k); diff --git a/parser/testdata/01065_array_zip_mixed_const/ast.json b/parser/testdata/01065_array_zip_mixed_const/ast.json new file mode 100644 index 000000000..399effb64 --- /dev/null +++ b/parser/testdata/01065_array_zip_mixed_const/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayZip (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Array_[UInt64_0, UInt64_1]" + }, + { + "explain": " Literal Array_['hello', 'world']" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001358425, + "rows_read": 8, + "bytes_read": 325 + } +} diff --git a/parser/testdata/01065_array_zip_mixed_const/metadata.json b/parser/testdata/01065_array_zip_mixed_const/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01065_array_zip_mixed_const/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01065_array_zip_mixed_const/query.sql b/parser/testdata/01065_array_zip_mixed_const/query.sql new file mode 100644 index 000000000..0cd369739 --- /dev/null +++ b/parser/testdata/01065_array_zip_mixed_const/query.sql @@ -0,0 +1,7 @@ +SELECT arrayZip([0, 1], ['hello', 'world']); +SELECT arrayZip(materialize([0, 1]), ['hello', 'world']); +SELECT arrayZip([0, 1], materialize(['hello', 'world'])); +SELECT arrayZip(materialize([0, 1]), materialize(['hello', 'world'])); + +SELECT arrayZip([0, number], [toString(number), 'world']) FROM numbers(10); +SELECT arrayZip([1, number, number * number], [[], [], []]) FROM numbers(10); diff --git a/parser/testdata/01065_if_not_finite/ast.json b/parser/testdata/01065_if_not_finite/ast.json new file mode 100644 index 000000000..3596fbd90 --- /dev/null +++ b/parser/testdata/01065_if_not_finite/ast.json @@ -0,0 +1,82 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function ifNotFinite (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function round (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function divide (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_111" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 20, + + "statistics": + { + "elapsed": 0.001301694, + "rows_read": 20, + "bytes_read": 791 + } +} diff --git a/parser/testdata/01065_if_not_finite/metadata.json b/parser/testdata/01065_if_not_finite/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01065_if_not_finite/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01065_if_not_finite/query.sql b/parser/testdata/01065_if_not_finite/query.sql new file mode 100644 index 000000000..8d44644e3 --- /dev/null +++ b/parser/testdata/01065_if_not_finite/query.sql @@ -0,0 +1,11 @@ +SELECT ifNotFinite(round(1 / number, 2), 111) FROM numbers(10); + +SELECT ifNotFinite(1, 2); +SELECT ifNotFinite(-1.0, 2); +SELECT ifNotFinite(nan, 2); +SELECT ifNotFinite(-1 / 0, 2); +SELECT ifNotFinite(log(0), NULL); +SELECT ifNotFinite(sqrt(-1), -42); +SELECT ifNotFinite(12345678901234567890, -12345678901234567890); -- { serverError NO_COMMON_TYPE } + +SELECT ifNotFinite(NULL, 1); diff --git a/parser/testdata/01066_bit_count/ast.json b/parser/testdata/01066_bit_count/ast.json new file mode 100644 index 000000000..a850dea55 --- /dev/null +++ b/parser/testdata/01066_bit_count/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function bitCount (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001428517, + "rows_read": 13, + "bytes_read": 515 + } +} diff --git a/parser/testdata/01066_bit_count/metadata.json b/parser/testdata/01066_bit_count/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01066_bit_count/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01066_bit_count/query.sql b/parser/testdata/01066_bit_count/query.sql new file mode 100644 index 000000000..0b1b2dc82 --- /dev/null +++ b/parser/testdata/01066_bit_count/query.sql @@ -0,0 +1,19 @@ +SELECT bitCount(number) FROM numbers(10); +SELECT avg(bitCount(number)) FROM numbers(256); + +SELECT bitCount(0); +SELECT bitCount(1); +SELECT bitCount(-1); + +SELECT bitCount(toInt64(-1)); +SELECT bitCount(toInt32(-1)); +SELECT bitCount(toInt16(-1)); +SELECT bitCount(toInt8(-1)); + +SELECT x, bitCount(x), hex(reinterpretAsString(x)) FROM VALUES ('x Float64', (1), (-1), (inf)); + +SELECT toFixedString('Hello, world!!!!', 16) AS x, bitCount(x); + +SELECT length(replaceAll(bin('clickhouse cloud'), '0', '')); +SELECT bitCount('clickhouse cloud'); +SELECT length(replaceAll(bin('clickhouse cloud'), '0', '')) = bitCount('clickhouse cloud'); diff --git a/parser/testdata/01067_join_null/ast.json b/parser/testdata/01067_join_null/ast.json new file mode 100644 index 000000000..6db12d58e --- /dev/null +++ b/parser/testdata/01067_join_null/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier id" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001370201, + "rows_read": 5, + "bytes_read": 174 + } +} diff --git a/parser/testdata/01067_join_null/metadata.json b/parser/testdata/01067_join_null/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01067_join_null/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01067_join_null/query.sql b/parser/testdata/01067_join_null/query.sql new file mode 100644 index 000000000..a00c34512 --- /dev/null +++ b/parser/testdata/01067_join_null/query.sql @@ -0,0 +1,42 @@ +SELECT id +FROM +( + SELECT 1 AS id + UNION ALL + SELECT NULL + UNION ALL + SELECT NULL +) js1 +ALL FULL OUTER JOIN +( + SELECT 1 AS id + UNION ALL + SELECT NULL + UNION ALL + SELECT NULL +) js2 USING (id) +ORDER BY id; + +SELECT '---'; + +SELECT * +FROM +( + SELECT NULL AS x +) js1 +INNER JOIN +( + SELECT NULL AS x +) js2 USING (x); + +SELECT '---'; + +SELECT * +FROM +( + SELECT NULL AS x +) js1 +FULL OUTER JOIN +( + SELECT NULL AS x +) js2 USING (x); diff --git a/parser/testdata/01068_parens/ast.json b/parser/testdata/01068_parens/ast.json new file mode 100644 index 000000000..1a5b833e5 --- /dev/null +++ b/parser/testdata/01068_parens/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001049568, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01068_parens/metadata.json b/parser/testdata/01068_parens/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01068_parens/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01068_parens/query.sql b/parser/testdata/01068_parens/query.sql new file mode 100644 index 000000000..429487605 --- /dev/null +++ b/parser/testdata/01068_parens/query.sql @@ -0,0 +1,2 @@ +SET max_parser_depth = 10000; +((((((((((((((SELECT((((((((((((((((((((((((((((((((1)))))))))))))))))))))))))))))))))))))))))))))); diff --git a/parser/testdata/01069_database_memory/ast.json b/parser/testdata/01069_database_memory/ast.json new file mode 100644 index 000000000..0ea348cd3 --- /dev/null +++ b/parser/testdata/01069_database_memory/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery memory_01069 (children 1)" + }, + { + "explain": " Identifier memory_01069" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001513574, + "rows_read": 2, + "bytes_read": 76 + } +} diff --git a/parser/testdata/01069_database_memory/metadata.json b/parser/testdata/01069_database_memory/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01069_database_memory/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01069_database_memory/query.sql b/parser/testdata/01069_database_memory/query.sql new file mode 100644 index 000000000..5d2fa4ea1 --- /dev/null +++ b/parser/testdata/01069_database_memory/query.sql @@ -0,0 +1,21 @@ +DROP DATABASE IF EXISTS memory_01069; +CREATE DATABASE memory_01069 ENGINE = Memory; +SHOW CREATE DATABASE memory_01069; + +CREATE TABLE memory_01069.mt (n UInt8) ENGINE = MergeTree() ORDER BY n; +CREATE TABLE memory_01069.file (n UInt8) ENGINE = File(CSV); + +INSERT INTO memory_01069.mt VALUES (1), (2); +INSERT INTO memory_01069.file VALUES (3), (4); + +SELECT * FROM memory_01069.mt ORDER BY n; +SELECT * FROM memory_01069.file ORDER BY n; + +DROP TABLE memory_01069.mt; +SELECT * FROM memory_01069.mt ORDER BY n; -- { serverError UNKNOWN_TABLE } +SELECT * FROM memory_01069.file ORDER BY n; + +SHOW CREATE TABLE memory_01069.mt; -- { serverError UNKNOWN_TABLE } +SHOW CREATE TABLE memory_01069.file; + +DROP DATABASE memory_01069; diff --git a/parser/testdata/01069_insert_float_as_nullable_unit8/ast.json b/parser/testdata/01069_insert_float_as_nullable_unit8/ast.json new file mode 100644 index 000000000..accc5d18e --- /dev/null +++ b/parser/testdata/01069_insert_float_as_nullable_unit8/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery t1 (children 2)" + }, + { + "explain": " Identifier t1" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration a (children 1)" + }, + { + "explain": " DataType Nullable (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " DataType UInt8" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001557449, + "rows_read": 8, + "bytes_read": 287 + } +} diff --git a/parser/testdata/01069_insert_float_as_nullable_unit8/metadata.json b/parser/testdata/01069_insert_float_as_nullable_unit8/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01069_insert_float_as_nullable_unit8/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01069_insert_float_as_nullable_unit8/query.sql b/parser/testdata/01069_insert_float_as_nullable_unit8/query.sql new file mode 100644 index 000000000..f1780fa1e --- /dev/null +++ b/parser/testdata/01069_insert_float_as_nullable_unit8/query.sql @@ -0,0 +1,7 @@ +create temporary table t1 (a Nullable(UInt8)); +insert into t1 values (2.4); +select * from t1; + +create temporary table t2 (a UInt8); +insert into t2 values (2.4); +select * from t2; diff --git a/parser/testdata/01069_materialized_view_alter_target_table/ast.json b/parser/testdata/01069_materialized_view_alter_target_table/ast.json new file mode 100644 index 000000000..7677e9a57 --- /dev/null +++ b/parser/testdata/01069_materialized_view_alter_target_table/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery mv (children 1)" + }, + { + "explain": " Identifier mv" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001501316, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/01069_materialized_view_alter_target_table/metadata.json b/parser/testdata/01069_materialized_view_alter_target_table/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01069_materialized_view_alter_target_table/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01069_materialized_view_alter_target_table/query.sql b/parser/testdata/01069_materialized_view_alter_target_table/query.sql new file mode 100644 index 000000000..b65835ace --- /dev/null +++ b/parser/testdata/01069_materialized_view_alter_target_table/query.sql @@ -0,0 +1,20 @@ +DROP TABLE IF EXISTS mv; +DROP TABLE IF EXISTS mv_source; +DROP TABLE IF EXISTS mv_target; + +CREATE TABLE mv_source (`a` UInt64) ENGINE = MergeTree ORDER BY tuple(); +CREATE TABLE mv_target (`a` UInt64) ENGINE = MergeTree ORDER BY tuple(); + +CREATE MATERIALIZED VIEW mv TO mv_target AS SELECT * FROM mv_source; + +INSERT INTO mv_source VALUES (1); + +ALTER TABLE mv_target ADD COLUMN b UInt8; +INSERT INTO mv_source VALUES (1),(2),(3); + +SELECT * FROM mv ORDER BY a; +SELECT * FROM mv_target ORDER BY a; + +DROP TABLE mv; +DROP TABLE mv_source; +DROP TABLE mv_target; diff --git a/parser/testdata/01069_materialized_view_alter_target_table_with_default_expression/ast.json b/parser/testdata/01069_materialized_view_alter_target_table_with_default_expression/ast.json new file mode 100644 index 000000000..862bd077e --- /dev/null +++ b/parser/testdata/01069_materialized_view_alter_target_table_with_default_expression/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery mv (children 1)" + }, + { + "explain": " Identifier mv" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001364414, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/01069_materialized_view_alter_target_table_with_default_expression/metadata.json b/parser/testdata/01069_materialized_view_alter_target_table_with_default_expression/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01069_materialized_view_alter_target_table_with_default_expression/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01069_materialized_view_alter_target_table_with_default_expression/query.sql b/parser/testdata/01069_materialized_view_alter_target_table_with_default_expression/query.sql new file mode 100644 index 000000000..da2078de8 --- /dev/null +++ b/parser/testdata/01069_materialized_view_alter_target_table_with_default_expression/query.sql @@ -0,0 +1,20 @@ +DROP TABLE IF EXISTS mv; +DROP TABLE IF EXISTS mv_source; +DROP TABLE IF EXISTS mv_target; + +CREATE TABLE mv_source (`a` UInt64) ENGINE = MergeTree ORDER BY tuple(); +CREATE TABLE mv_target (`a` UInt64) ENGINE = MergeTree ORDER BY tuple(); + +CREATE MATERIALIZED VIEW mv TO mv_target AS SELECT * FROM mv_source; + +INSERT INTO mv_source VALUES (1); + +ALTER TABLE mv_target ADD COLUMN b UInt8 DEFAULT a + 1; +INSERT INTO mv_source VALUES (1),(2),(3); + +SELECT * FROM mv ORDER BY a; +SELECT * FROM mv_target ORDER BY a; + +DROP TABLE mv; +DROP TABLE mv_source; +DROP TABLE mv_target; diff --git a/parser/testdata/01069_set_in_group_by/ast.json b/parser/testdata/01069_set_in_group_by/ast.json new file mode 100644 index 000000000..abb76324e --- /dev/null +++ b/parser/testdata/01069_set_in_group_by/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery testmt (children 1)" + }, + { + "explain": " Identifier testmt" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001149229, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/01069_set_in_group_by/metadata.json b/parser/testdata/01069_set_in_group_by/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01069_set_in_group_by/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01069_set_in_group_by/query.sql b/parser/testdata/01069_set_in_group_by/query.sql new file mode 100644 index 000000000..5884fd0e1 --- /dev/null +++ b/parser/testdata/01069_set_in_group_by/query.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS testmt; + +CREATE TABLE testmt (`CounterID` UInt64, `value` String) ENGINE = MergeTree() ORDER BY CounterID; + +INSERT INTO testmt VALUES (1, '1'), (2, '2'); + +SELECT arrayJoin([CounterID NOT IN (2)]) AS counter FROM testmt WHERE CounterID IN (2) GROUP BY counter; + +DROP TABLE testmt; diff --git a/parser/testdata/01070_alter_with_ttl/ast.json b/parser/testdata/01070_alter_with_ttl/ast.json new file mode 100644 index 000000000..36d0eb76a --- /dev/null +++ b/parser/testdata/01070_alter_with_ttl/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery alter_ttl (children 1)" + }, + { + "explain": " Identifier alter_ttl" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001440556, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/01070_alter_with_ttl/metadata.json b/parser/testdata/01070_alter_with_ttl/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01070_alter_with_ttl/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01070_alter_with_ttl/query.sql b/parser/testdata/01070_alter_with_ttl/query.sql new file mode 100644 index 000000000..44d422cbe --- /dev/null +++ b/parser/testdata/01070_alter_with_ttl/query.sql @@ -0,0 +1,14 @@ +drop table if exists alter_ttl; + +SET allow_suspicious_ttl_expressions = 1; + +create table alter_ttl(i Int) engine = MergeTree order by i ttl toDate('2020-05-05'); +alter table alter_ttl add column s String; +alter table alter_ttl modify column s String ttl toDate('2020-01-01'); +show create table alter_ttl; +drop table alter_ttl; + +create table alter_ttl(d Date, s String) engine = MergeTree order by d ttl d + interval 1 month; +alter table alter_ttl modify column s String ttl d + interval 1 day; +show create table alter_ttl; +drop table alter_ttl; diff --git a/parser/testdata/01070_exception_code_in_query_log_table/ast.json b/parser/testdata/01070_exception_code_in_query_log_table/ast.json new file mode 100644 index 000000000..165d27ad4 --- /dev/null +++ b/parser/testdata/01070_exception_code_in_query_log_table/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_table_for_01070_exception_code_in_query_log_table (children 1)" + }, + { + "explain": " Identifier test_table_for_01070_exception_code_in_query_log_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001345817, + "rows_read": 2, + "bytes_read": 160 + } +} diff --git a/parser/testdata/01070_exception_code_in_query_log_table/metadata.json b/parser/testdata/01070_exception_code_in_query_log_table/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01070_exception_code_in_query_log_table/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01070_exception_code_in_query_log_table/query.sql b/parser/testdata/01070_exception_code_in_query_log_table/query.sql new file mode 100644 index 000000000..010da3b6f --- /dev/null +++ b/parser/testdata/01070_exception_code_in_query_log_table/query.sql @@ -0,0 +1,7 @@ +DROP TABLE IF EXISTS test_table_for_01070_exception_code_in_query_log_table; +SELECT * FROM test_table_for_01070_exception_code_in_query_log_table; -- { serverError UNKNOWN_TABLE } +CREATE TABLE test_table_for_01070_exception_code_in_query_log_table (value UInt64) ENGINE=Memory(); +SELECT * FROM test_table_for_01070_exception_code_in_query_log_table; +SYSTEM FLUSH LOGS query_log; +SELECT exception_code FROM system.query_log WHERE current_database = currentDatabase() AND lower(query) LIKE lower('SELECT * FROM test_table_for_01070_exception_code_in_query_log_table%') AND event_date >= yesterday() AND event_time > now() - INTERVAL 5 MINUTE ORDER BY exception_code; +DROP TABLE IF EXISTS test_table_for_01070_exception_code_in_query_log_table; diff --git a/parser/testdata/01070_h3_get_base_cell/ast.json b/parser/testdata/01070_h3_get_base_cell/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01070_h3_get_base_cell/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01070_h3_get_base_cell/metadata.json b/parser/testdata/01070_h3_get_base_cell/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01070_h3_get_base_cell/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01070_h3_get_base_cell/query.sql b/parser/testdata/01070_h3_get_base_cell/query.sql new file mode 100644 index 000000000..33389ffcd --- /dev/null +++ b/parser/testdata/01070_h3_get_base_cell/query.sql @@ -0,0 +1,3 @@ +-- Tags: no-fasttest + +SELECT h3GetBaseCell(612916788725809151); diff --git a/parser/testdata/01070_h3_hex_area_m2/ast.json b/parser/testdata/01070_h3_hex_area_m2/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01070_h3_hex_area_m2/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01070_h3_hex_area_m2/metadata.json b/parser/testdata/01070_h3_hex_area_m2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01070_h3_hex_area_m2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01070_h3_hex_area_m2/query.sql b/parser/testdata/01070_h3_hex_area_m2/query.sql new file mode 100644 index 000000000..d9ce5dc45 --- /dev/null +++ b/parser/testdata/01070_h3_hex_area_m2/query.sql @@ -0,0 +1,4 @@ +-- Tags: no-fasttest + +SELECT h3HexAreaM2(5); +SELECT h3HexAreaM2(13); diff --git a/parser/testdata/01070_h3_indexes_are_neighbors/ast.json b/parser/testdata/01070_h3_indexes_are_neighbors/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01070_h3_indexes_are_neighbors/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01070_h3_indexes_are_neighbors/metadata.json b/parser/testdata/01070_h3_indexes_are_neighbors/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01070_h3_indexes_are_neighbors/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01070_h3_indexes_are_neighbors/query.sql b/parser/testdata/01070_h3_indexes_are_neighbors/query.sql new file mode 100644 index 000000000..19a3f6ca5 --- /dev/null +++ b/parser/testdata/01070_h3_indexes_are_neighbors/query.sql @@ -0,0 +1,5 @@ +-- Tags: no-fasttest + +SELECT h3IndexesAreNeighbors(617420388352917503, 617420388352655359); +SELECT h3IndexesAreNeighbors(617420388351344639, 617420388352655359); +SELECT h3IndexesAreNeighbors(617420388351344639, 617420388351344639); diff --git a/parser/testdata/01070_h3_to_children/ast.json b/parser/testdata/01070_h3_to_children/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01070_h3_to_children/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01070_h3_to_children/metadata.json b/parser/testdata/01070_h3_to_children/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01070_h3_to_children/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01070_h3_to_children/query.sql b/parser/testdata/01070_h3_to_children/query.sql new file mode 100644 index 000000000..ac40d14e1 --- /dev/null +++ b/parser/testdata/01070_h3_to_children/query.sql @@ -0,0 +1,16 @@ +-- Tags: no-fasttest + +SELECT h3ToChildren(599405990164561919, 16); -- { serverError ARGUMENT_OUT_OF_BOUND } + +DROP TABLE IF EXISTS h3_indexes; + +CREATE TABLE h3_indexes (h3_index UInt64, res UInt8) ENGINE = Memory; + +INSERT INTO h3_indexes VALUES (599405990164561919, 3); +INSERT INTO h3_indexes VALUES (599405990164561919, 6); +INSERT INTO h3_indexes VALUES (599405990164561919, 8); + + +SELECT arraySort(h3ToChildren(h3_index,res)) FROM h3_indexes ORDER BY res; + +DROP TABLE h3_indexes; diff --git a/parser/testdata/01070_h3_to_parent/ast.json b/parser/testdata/01070_h3_to_parent/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01070_h3_to_parent/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01070_h3_to_parent/metadata.json b/parser/testdata/01070_h3_to_parent/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01070_h3_to_parent/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01070_h3_to_parent/query.sql b/parser/testdata/01070_h3_to_parent/query.sql new file mode 100644 index 000000000..c5cc54298 --- /dev/null +++ b/parser/testdata/01070_h3_to_parent/query.sql @@ -0,0 +1,4 @@ +-- Tags: no-fasttest + +SELECT h3ToParent(599405990164561919, 3); +SELECT h3ToParent(599405990164561919, 0); diff --git a/parser/testdata/01070_h3_to_string/ast.json b/parser/testdata/01070_h3_to_string/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01070_h3_to_string/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01070_h3_to_string/metadata.json b/parser/testdata/01070_h3_to_string/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01070_h3_to_string/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01070_h3_to_string/query.sql b/parser/testdata/01070_h3_to_string/query.sql new file mode 100644 index 000000000..85724421d --- /dev/null +++ b/parser/testdata/01070_h3_to_string/query.sql @@ -0,0 +1,3 @@ +-- Tags: no-fasttest + +SELECT h3ToString(617420388352917503); diff --git a/parser/testdata/01070_materialize_ttl/ast.json b/parser/testdata/01070_materialize_ttl/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01070_materialize_ttl/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01070_materialize_ttl/metadata.json b/parser/testdata/01070_materialize_ttl/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01070_materialize_ttl/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01070_materialize_ttl/query.sql b/parser/testdata/01070_materialize_ttl/query.sql new file mode 100644 index 000000000..a633ce069 --- /dev/null +++ b/parser/testdata/01070_materialize_ttl/query.sql @@ -0,0 +1,66 @@ +-- Tags: no-parallel + +SET allow_suspicious_ttl_expressions = 1; + +drop table if exists ttl; + +create table ttl (d Date, a Int) engine = MergeTree order by a partition by toDayOfMonth(d); +insert into ttl values (toDateTime('2000-10-10 00:00:00'), 1); +insert into ttl values (toDateTime('2000-10-10 00:00:00'), 2); +insert into ttl values (toDateTime('2100-10-10 00:00:00'), 3); +insert into ttl values (toDateTime('2100-10-10 00:00:00'), 4); + +set materialize_ttl_after_modify = 0; + +alter table ttl materialize ttl; -- { serverError INCORRECT_QUERY } + +alter table ttl modify ttl d + interval 1 day; +-- TTL should not be applied +select * from ttl order by a; + +alter table ttl materialize ttl settings mutations_sync=2; +select * from ttl order by a; + +drop table if exists ttl; + +create table ttl (i Int, s String) engine = MergeTree order by i; +insert into ttl values (1, 'a') (2, 'b') (3, 'c') (4, 'd'); + +alter table ttl modify ttl i % 2 = 0 ? today() - 10 : toDate('2100-01-01'); +alter table ttl materialize ttl settings mutations_sync=2; +select * from ttl order by i; + +alter table ttl modify ttl toDate('2000-01-01'); +alter table ttl materialize ttl settings mutations_sync=2; +select * from ttl order by i; + +drop table if exists ttl; + +create table ttl (i Int, s String) engine = MergeTree order by i; +insert into ttl values (1, 'a') (2, 'b') (3, 'c') (4, 'd'); + +alter table ttl modify column s String ttl i % 2 = 0 ? today() - 10 : toDate('2100-01-01'); +-- TTL should not be applied +select * from ttl order by i; + +alter table ttl materialize ttl settings mutations_sync=2; +select * from ttl order by i; + +alter table ttl modify column s String ttl toDate('2000-01-01'); +alter table ttl materialize ttl settings mutations_sync=2; +select * from ttl order by i; + +drop table if exists ttl; + +create table ttl (d Date, i Int, s String) engine = MergeTree order by i; +insert into ttl values (toDate('2000-01-02'), 1, 'a') (toDate('2000-01-03'), 2, 'b') (toDate('2080-01-01'), 3, 'c') (toDate('2080-01-03'), 4, 'd'); + +alter table ttl modify ttl i % 3 = 0 ? today() - 10 : toDate('2100-01-01'); +alter table ttl materialize ttl settings mutations_sync=2; +select i, s from ttl order by i; + +alter table ttl modify column s String ttl d + interval 1 month; +alter table ttl materialize ttl settings mutations_sync=2; +select i, s from ttl order by i; + +drop table if exists ttl; diff --git a/parser/testdata/01070_modify_ttl/ast.json b/parser/testdata/01070_modify_ttl/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01070_modify_ttl/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01070_modify_ttl/metadata.json b/parser/testdata/01070_modify_ttl/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01070_modify_ttl/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01070_modify_ttl/query.sql b/parser/testdata/01070_modify_ttl/query.sql new file mode 100644 index 000000000..4ffd59fc8 --- /dev/null +++ b/parser/testdata/01070_modify_ttl/query.sql @@ -0,0 +1,78 @@ +-- Tags: no-parallel + +SET allow_suspicious_ttl_expressions = 1; + +drop table if exists ttl; + +create table ttl (d Date, a Int) engine = MergeTree order by a partition by toDayOfMonth(d); +insert into ttl values (toDateTime('2000-10-10 00:00:00'), 1); +insert into ttl values (toDateTime('2000-10-10 00:00:00'), 2); +insert into ttl values (toDateTime('2100-10-10 00:00:00'), 3); +insert into ttl values (toDateTime('2100-10-10 00:00:00'), 4); + +set mutations_sync = 2; + +alter table ttl modify ttl d + interval 1 day; +select * from ttl order by a; +select '============='; + +drop table if exists ttl; + +create table ttl (i Int, s String) engine = MergeTree order by i; +insert into ttl values (1, 'a') (2, 'b') (3, 'c') (4, 'd'); + +alter table ttl modify ttl i % 2 = 0 ? today() - 10 : toDate('2100-01-01'); +select * from ttl order by i; +select '============='; + +alter table ttl modify ttl toDate('2000-01-01'); +select * from ttl order by i; +select '============='; + +drop table if exists ttl; + +create table ttl (i Int, s String) engine = MergeTree order by i; +insert into ttl values (1, 'a') (2, 'b') (3, 'c') (4, 'd'); + +alter table ttl modify column s String ttl i % 2 = 0 ? today() - 10 : toDate('2100-01-01'); +select * from ttl order by i; +select '============='; + +alter table ttl modify column s String ttl toDate('2000-01-01'); +select * from ttl order by i; +select '============='; + +drop table if exists ttl; + +create table ttl (d Date, i Int, s String) engine = MergeTree order by i; +insert into ttl values (toDate('2000-01-02'), 1, 'a') (toDate('2000-01-03'), 2, 'b') (toDate('2080-01-01'), 3, 'c') (toDate('2080-01-03'), 4, 'd'); + +alter table ttl modify ttl i % 3 = 0 ? today() - 10 : toDate('2100-01-01'); +select i, s from ttl order by i; +select '============='; + +alter table ttl modify column s String ttl d + interval 1 month; +select i, s from ttl order by i; +select '============='; + +drop table if exists ttl; + +create table ttl (i Int, s String, t String) engine = MergeTree order by i; +insert into ttl values (1, 'a', 'aa') (2, 'b', 'bb') (3, 'c', 'cc') (4, 'd', 'dd'); + +alter table ttl modify column s String ttl i % 3 = 0 ? today() - 10 : toDate('2100-01-01'), + modify column t String ttl i % 3 = 1 ? today() - 10 : toDate('2100-01-01'); + +select i, s, t from ttl order by i; +-- MATERIALIZE TTL ran only once +select count() from system.mutations where database = currentDatabase() and table = 'ttl' and is_done; +select '============='; + +drop table if exists ttl; + +-- Nothing changed, don't run mutation +create table ttl (i Int, s String ttl toDate('2000-01-02')) engine = MergeTree order by i; +alter table ttl modify column s String ttl toDate('2000-01-02'); +select count() from system.mutations where database = currentDatabase() and table = 'ttl' and is_done; + +drop table if exists ttl; diff --git a/parser/testdata/01070_modify_ttl_recalc_only/ast.json b/parser/testdata/01070_modify_ttl_recalc_only/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01070_modify_ttl_recalc_only/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01070_modify_ttl_recalc_only/metadata.json b/parser/testdata/01070_modify_ttl_recalc_only/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01070_modify_ttl_recalc_only/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01070_modify_ttl_recalc_only/query.sql b/parser/testdata/01070_modify_ttl_recalc_only/query.sql new file mode 100644 index 000000000..2700cc03f --- /dev/null +++ b/parser/testdata/01070_modify_ttl_recalc_only/query.sql @@ -0,0 +1,114 @@ +-- Tags: no-parallel + +set mutations_sync = 2; + +-- system.parts has server default, timezone cannot be randomized +set session_timezone = ''; + +SET allow_suspicious_ttl_expressions = 1; + +drop table if exists ttl; + +create table ttl (d Date, a Int) engine = MergeTree order by a partition by toDayOfMonth(d) +SETTINGS max_number_of_merges_with_ttl_in_pool=0,materialize_ttl_recalculate_only=true; + +insert into ttl values (toDateTime('2000-10-10 00:00:00'), 1); +insert into ttl values (toDateTime('2000-10-10 00:00:00'), 2); +insert into ttl values (toDateTime('2100-10-10 00:00:00'), 3); +insert into ttl values (toDateTime('2100-10-10 00:00:00'), 4); + + +alter table ttl modify ttl d + interval 1 day; +select * from ttl order by a; +select delete_ttl_info_min, delete_ttl_info_max from system.parts where database = currentDatabase() and table = 'ttl' and active > 0 order by name asc; +optimize table ttl final; +select * from ttl order by a; +select '============='; + +drop table if exists ttl; + +create table ttl (i Int, s String) engine = MergeTree order by i +SETTINGS max_number_of_merges_with_ttl_in_pool=0,materialize_ttl_recalculate_only=true; + +insert into ttl values (1, 'a') (2, 'b') (3, 'c') (4, 'd'); + +alter table ttl modify ttl i % 2 = 0 ? toDate('2000-01-01') : toDate('2100-01-01'); +select * from ttl order by i; +select delete_ttl_info_min, delete_ttl_info_max from system.parts where database = currentDatabase() and table = 'ttl' and active > 0; +optimize table ttl final; +select * from ttl order by i; +select '============='; + +alter table ttl modify ttl toDate('2000-01-01'); +select * from ttl order by i; +select delete_ttl_info_min, delete_ttl_info_max from system.parts where database = currentDatabase() and table = 'ttl' and active > 0; +optimize table ttl final; +select * from ttl order by i; +select '============='; + +drop table if exists ttl; + +create table ttl (i Int, s String) engine = MergeTree order by i +SETTINGS max_number_of_merges_with_ttl_in_pool=0,materialize_ttl_recalculate_only=true; + +insert into ttl values (1, 'a') (2, 'b') (3, 'c') (4, 'd'); + +alter table ttl modify column s String ttl i % 2 = 0 ? today() - 10 : toDate('2100-01-01'); +select * from ttl order by i; +optimize table ttl final; +select * from ttl order by i; +select '============='; + +alter table ttl modify column s String ttl toDate('2000-01-01'); +select * from ttl order by i; +optimize table ttl final; +select * from ttl order by i; +select '============='; + +drop table if exists ttl; + +create table ttl (d Date, i Int, s String) engine = MergeTree order by i +SETTINGS max_number_of_merges_with_ttl_in_pool=0,materialize_ttl_recalculate_only=true; + +insert into ttl values (toDate('2000-01-02'), 1, 'a') (toDate('2000-01-03'), 2, 'b') (toDate('2080-01-01'), 3, 'c') (toDate('2080-01-03'), 4, 'd'); + +alter table ttl modify ttl i % 3 = 0 ? toDate('2000-01-01') : toDate('2100-01-01'); +select i, s from ttl order by i; +select delete_ttl_info_min, delete_ttl_info_max from system.parts where database = currentDatabase() and table = 'ttl' and active > 0; +optimize table ttl final; +select i, s from ttl order by i; +select '============='; + +alter table ttl modify column s String ttl d + interval 1 month; +select i, s from ttl order by i; +optimize table ttl final; +select i, s from ttl order by i; +select '============='; + +drop table if exists ttl; + +create table ttl (i Int, s String, t String) engine = MergeTree order by i +SETTINGS max_number_of_merges_with_ttl_in_pool=0,materialize_ttl_recalculate_only=true; + +insert into ttl values (1, 'a', 'aa') (2, 'b', 'bb') (3, 'c', 'cc') (4, 'd', 'dd'); + +alter table ttl modify column s String ttl i % 3 = 0 ? today() - 10 : toDate('2100-01-01'), + modify column t String ttl i % 3 = 1 ? today() - 10 : toDate('2100-01-01'); + +select i, s, t from ttl order by i; +optimize table ttl final; +select i, s, t from ttl order by i; +-- MATERIALIZE TTL ran only once +select count() from system.mutations where database = currentDatabase() and table = 'ttl' and is_done; +select '============='; + +drop table if exists ttl; + +-- Nothing changed, don't run mutation +create table ttl (i Int, s String ttl toDate('2000-01-02')) engine = MergeTree order by i +SETTINGS max_number_of_merges_with_ttl_in_pool=0,materialize_ttl_recalculate_only=true; + +alter table ttl modify column s String ttl toDate('2000-01-02'); +select count() from system.mutations where database = currentDatabase() and table = 'ttl' and is_done; + +drop table if exists ttl; diff --git a/parser/testdata/01070_mutations_with_dependencies/ast.json b/parser/testdata/01070_mutations_with_dependencies/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01070_mutations_with_dependencies/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01070_mutations_with_dependencies/metadata.json b/parser/testdata/01070_mutations_with_dependencies/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01070_mutations_with_dependencies/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01070_mutations_with_dependencies/query.sql b/parser/testdata/01070_mutations_with_dependencies/query.sql new file mode 100644 index 000000000..4d1cd5430 --- /dev/null +++ b/parser/testdata/01070_mutations_with_dependencies/query.sql @@ -0,0 +1,49 @@ +-- Tags: no-parallel, no-object-storage +-- With s3 policy TTL TO DISK 'default' doesn't work (because we have no default, only 's3') + +drop table if exists ttl; +set mutations_sync = 2; + +-- check that ttl info was updated after mutation. +create table ttl (i Int, a Int, s String) engine = MergeTree order by i; +insert into ttl values (1, 1, 'a') (2, 1, 'b') (3, 1, 'c') (4, 1, 'd'); + +alter table ttl modify ttl a % 2 = 0 ? today() - 10 : toDate('2100-01-01'); +alter table ttl materialize ttl; + +select * from ttl order by i; +alter table ttl update a = 0 where i % 2 = 0; +select * from ttl order by i; + +drop table ttl; + +select '==================='; + +-- check that skip index is updated after column was modified by ttl. +create table ttl (i Int, a Int, s String default 'b' ttl a % 2 = 0 ? today() - 10 : toDate('2100-01-01'), + index ind_s (s) type set(1) granularity 1) engine = MergeTree order by i; +insert into ttl values (1, 1, 'a') (2, 1, 'a') (3, 1, 'a') (4, 1, 'a'); + +select count() from ttl where s = 'a'; + +alter table ttl update a = 0 where i % 2 = 0; + +select count() from ttl where s = 'a'; +select count() from ttl where s = 'b'; + +drop table ttl; + +-- check only that it doesn't throw exceptions. +SET allow_suspicious_ttl_expressions = 1; + +create table ttl (i Int, s String) engine = MergeTree order by i ttl toDate('2000-01-01') TO DISK 'default'; +alter table ttl materialize ttl; +drop table ttl; + +create table ttl (a Int, b Int, c Int default 42 ttl d, d Date, index ind (b * c) type minmax granularity 1) +engine = MergeTree order by a; +insert into ttl values (1, 2, 3, '2100-01-01'); +alter table ttl update d = '2000-01-01' where 1; +alter table ttl materialize ttl; +select * from ttl; +drop table ttl; diff --git a/parser/testdata/01070_string_to_h3/ast.json b/parser/testdata/01070_string_to_h3/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01070_string_to_h3/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01070_string_to_h3/metadata.json b/parser/testdata/01070_string_to_h3/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01070_string_to_h3/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01070_string_to_h3/query.sql b/parser/testdata/01070_string_to_h3/query.sql new file mode 100644 index 000000000..877a3ab94 --- /dev/null +++ b/parser/testdata/01070_string_to_h3/query.sql @@ -0,0 +1,3 @@ +-- Tags: no-fasttest + +SELECT stringToH3('89184926cc3ffff'); diff --git a/parser/testdata/01070_template_empty_file/ast.json b/parser/testdata/01070_template_empty_file/ast.json new file mode 100644 index 000000000..829fcc78b --- /dev/null +++ b/parser/testdata/01070_template_empty_file/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Identifier Template" + }, + { + "explain": " Set" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001145141, + "rows_read": 7, + "bytes_read": 217 + } +} diff --git a/parser/testdata/01070_template_empty_file/metadata.json b/parser/testdata/01070_template_empty_file/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01070_template_empty_file/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01070_template_empty_file/query.sql b/parser/testdata/01070_template_empty_file/query.sql new file mode 100644 index 000000000..bbc67584f --- /dev/null +++ b/parser/testdata/01070_template_empty_file/query.sql @@ -0,0 +1,2 @@ +select 1 format Template settings format_template_row='01070_nonexistent_file.txt'; -- { clientError FILE_DOESNT_EXIST } +select 1 format Template settings format_template_row='/dev/null'; -- { clientError INVALID_TEMPLATE_FORMAT } diff --git a/parser/testdata/01070_to_decimal_or_null_exception/ast.json b/parser/testdata/01070_to_decimal_or_null_exception/ast.json new file mode 100644 index 000000000..9cde90e76 --- /dev/null +++ b/parser/testdata/01070_to_decimal_or_null_exception/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDecimal32 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'e'" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001494972, + "rows_read": 8, + "bytes_read": 289 + } +} diff --git a/parser/testdata/01070_to_decimal_or_null_exception/metadata.json b/parser/testdata/01070_to_decimal_or_null_exception/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01070_to_decimal_or_null_exception/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01070_to_decimal_or_null_exception/query.sql b/parser/testdata/01070_to_decimal_or_null_exception/query.sql new file mode 100644 index 000000000..7430d7eac --- /dev/null +++ b/parser/testdata/01070_to_decimal_or_null_exception/query.sql @@ -0,0 +1,7 @@ +SELECT toDecimal32('e', 1); -- { serverError CANNOT_PARSE_NUMBER } +SELECT toDecimal64('e', 2); -- { serverError CANNOT_PARSE_NUMBER } +SELECT toDecimal128('e', 3); -- { serverError CANNOT_PARSE_NUMBER } + +SELECT toDecimal32OrNull('e', 1) x, isNull(x); +SELECT toDecimal64OrNull('e', 2) x, isNull(x); +SELECT toDecimal128OrNull('e', 3) x, isNull(x); diff --git a/parser/testdata/01071_force_optimize_skip_unused_shards/ast.json b/parser/testdata/01071_force_optimize_skip_unused_shards/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01071_force_optimize_skip_unused_shards/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01071_force_optimize_skip_unused_shards/metadata.json b/parser/testdata/01071_force_optimize_skip_unused_shards/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01071_force_optimize_skip_unused_shards/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01071_force_optimize_skip_unused_shards/query.sql b/parser/testdata/01071_force_optimize_skip_unused_shards/query.sql new file mode 100644 index 000000000..aa51233b6 --- /dev/null +++ b/parser/testdata/01071_force_optimize_skip_unused_shards/query.sql @@ -0,0 +1,50 @@ +-- Tags: shard + +set optimize_skip_unused_shards=1; + +drop table if exists data_01071; +drop table if exists dist_01071; +drop table if exists data2_01071; +drop table if exists dist2_01071; +drop table if exists dist2_layer_01071; + +create table data_01071 (key Int) Engine=Null(); + +create table dist_01071 as data_01071 Engine=Distributed(test_cluster_two_shards, currentDatabase(), data_01071); +set force_optimize_skip_unused_shards=0; +select * from dist_01071; +set force_optimize_skip_unused_shards=1; +select * from dist_01071; +set force_optimize_skip_unused_shards=2; +select * from dist_01071; -- { serverError UNABLE_TO_SKIP_UNUSED_SHARDS } + +drop table if exists dist_01071; +create table dist_01071 as data_01071 Engine=Distributed(test_cluster_two_shards, currentDatabase(), data_01071, key%2); +set force_optimize_skip_unused_shards=0; +select * from dist_01071; +set force_optimize_skip_unused_shards=1; +select * from dist_01071; -- { serverError UNABLE_TO_SKIP_UNUSED_SHARDS } +set force_optimize_skip_unused_shards=2; +select * from dist_01071; -- { serverError UNABLE_TO_SKIP_UNUSED_SHARDS } +drop table if exists dist_01071; + +-- non deterministic function (i.e. rand()) +create table dist_01071 as data_01071 Engine=Distributed(test_cluster_two_shards, currentDatabase(), data_01071, key + rand()); +set force_optimize_skip_unused_shards=1; +select * from dist_01071 where key = 0; -- { serverError UNABLE_TO_SKIP_UNUSED_SHARDS } + +drop table if exists data_01071; +drop table if exists dist_01071; + +-- Distributed on Distributed +set distributed_group_by_no_merge=1; +set force_optimize_skip_unused_shards=2; +create table data2_01071 (key Int, sub_key Int) Engine=Null(); +create table dist2_layer_01071 as data2_01071 Engine=Distributed(test_cluster_two_shards, currentDatabase(), data2_01071, sub_key%2); +create table dist2_01071 as data2_01071 Engine=Distributed(test_cluster_two_shards, currentDatabase(), dist2_layer_01071, key%2); +select * from dist2_01071 where key = 1; -- { serverError UNABLE_TO_SKIP_UNUSED_SHARDS } +set force_optimize_skip_unused_shards_nesting=1; +select * from dist2_01071 where key = 1; +drop table if exists data2_01071; +drop table if exists dist2_layer_01071; +drop table if exists dist2_01071; diff --git a/parser/testdata/01071_in_array/ast.json b/parser/testdata/01071_in_array/ast.json new file mode 100644 index 000000000..48ba30229 --- /dev/null +++ b/parser/testdata/01071_in_array/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function in (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_2]" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_2]" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001814348, + "rows_read": 8, + "bytes_read": 321 + } +} diff --git a/parser/testdata/01071_in_array/metadata.json b/parser/testdata/01071_in_array/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01071_in_array/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01071_in_array/query.sql b/parser/testdata/01071_in_array/query.sql new file mode 100644 index 000000000..1f2406605 --- /dev/null +++ b/parser/testdata/01071_in_array/query.sql @@ -0,0 +1,8 @@ +select [1, 2] in [1, 2]; +select (1, 2) in (1, 2); +select (1, 2) in [(1, 3), (1, 2)]; +select [1] in [[1], [2, 3]]; +select NULL in NULL; +select ([1], [2]) in ([NULL], [NULL]); +select ([1], [2]) in (([NULL], [NULL]), ([1], [2])); +select ([1], [2]) in [([NULL], [NULL]), ([1], [2])]; diff --git a/parser/testdata/01071_prohibition_secondary_index_with_old_format_merge_tree/ast.json b/parser/testdata/01071_prohibition_secondary_index_with_old_format_merge_tree/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01071_prohibition_secondary_index_with_old_format_merge_tree/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01071_prohibition_secondary_index_with_old_format_merge_tree/metadata.json b/parser/testdata/01071_prohibition_secondary_index_with_old_format_merge_tree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01071_prohibition_secondary_index_with_old_format_merge_tree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01071_prohibition_secondary_index_with_old_format_merge_tree/query.sql b/parser/testdata/01071_prohibition_secondary_index_with_old_format_merge_tree/query.sql new file mode 100644 index 000000000..e8c40f77b --- /dev/null +++ b/parser/testdata/01071_prohibition_secondary_index_with_old_format_merge_tree/query.sql @@ -0,0 +1,10 @@ + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE old_syntax_01071_test (date Date, id UInt8) ENGINE = MergeTree(date, id, 8192); +ALTER TABLE old_syntax_01071_test ADD INDEX id_minmax id TYPE minmax GRANULARITY 1; -- { serverError BAD_ARGUMENTS } +CREATE TABLE new_syntax_01071_test (date Date, id UInt8) ENGINE = MergeTree() ORDER BY id; +ALTER TABLE new_syntax_01071_test ADD INDEX id_minmax id TYPE minmax GRANULARITY 1; +DETACH TABLE new_syntax_01071_test; +ATTACH TABLE new_syntax_01071_test; +DROP TABLE IF EXISTS old_syntax_01071_test; +DROP TABLE IF EXISTS new_syntax_01071_test; diff --git a/parser/testdata/01072_drop_temporary_table_with_same_name/ast.json b/parser/testdata/01072_drop_temporary_table_with_same_name/ast.json new file mode 100644 index 000000000..9f34c8f8c --- /dev/null +++ b/parser/testdata/01072_drop_temporary_table_with_same_name/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery table_to_drop (children 1)" + }, + { + "explain": " Identifier table_to_drop" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001294869, + "rows_read": 2, + "bytes_read": 78 + } +} diff --git a/parser/testdata/01072_drop_temporary_table_with_same_name/metadata.json b/parser/testdata/01072_drop_temporary_table_with_same_name/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01072_drop_temporary_table_with_same_name/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01072_drop_temporary_table_with_same_name/query.sql b/parser/testdata/01072_drop_temporary_table_with_same_name/query.sql new file mode 100644 index 000000000..d8d796837 --- /dev/null +++ b/parser/testdata/01072_drop_temporary_table_with_same_name/query.sql @@ -0,0 +1,15 @@ +DROP TEMPORARY TABLE IF EXISTS table_to_drop; +DROP TABLE IF EXISTS table_to_drop; + +CREATE TABLE table_to_drop(x Int8) ENGINE=Log; +CREATE TEMPORARY TABLE table_to_drop(x Int8); +DROP TEMPORARY TABLE table_to_drop; +DROP TEMPORARY TABLE table_to_drop; -- { serverError UNKNOWN_TABLE } +DROP TABLE table_to_drop; +DROP TABLE table_to_drop; -- { serverError UNKNOWN_TABLE } + +CREATE TABLE table_to_drop(x Int8) ENGINE=Log; +CREATE TEMPORARY TABLE table_to_drop(x Int8); +DROP TABLE table_to_drop; +DROP TABLE table_to_drop; +DROP TABLE table_to_drop; -- { serverError UNKNOWN_TABLE } diff --git a/parser/testdata/01072_json_each_row_data_in_square_brackets/ast.json b/parser/testdata/01072_json_each_row_data_in_square_brackets/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01072_json_each_row_data_in_square_brackets/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01072_json_each_row_data_in_square_brackets/metadata.json b/parser/testdata/01072_json_each_row_data_in_square_brackets/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01072_json_each_row_data_in_square_brackets/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01072_json_each_row_data_in_square_brackets/query.sql b/parser/testdata/01072_json_each_row_data_in_square_brackets/query.sql new file mode 100644 index 000000000..ae5e86ec3 --- /dev/null +++ b/parser/testdata/01072_json_each_row_data_in_square_brackets/query.sql @@ -0,0 +1,15 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS json_square_brackets; +CREATE TABLE json_square_brackets (id UInt32, name String) ENGINE = Memory; + +INSERT INTO json_square_brackets FORMAT JSONEachRow [{"id": 1, "name": "name1"}, {"id": 2, "name": "name2"}]; + +INSERT INTO json_square_brackets FORMAT JSONEachRow[]; + +INSERT INTO json_square_brackets FORMAT JSONEachRow [ ] ; + +INSERT INTO json_square_brackets FORMAT JSONEachRow ; + +SELECT * FROM json_square_brackets ORDER BY id; +DROP TABLE IF EXISTS json_square_brackets; diff --git a/parser/testdata/01072_nullable_jit/ast.json b/parser/testdata/01072_nullable_jit/ast.json new file mode 100644 index 000000000..cce4a55b9 --- /dev/null +++ b/parser/testdata/01072_nullable_jit/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001029213, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01072_nullable_jit/metadata.json b/parser/testdata/01072_nullable_jit/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01072_nullable_jit/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01072_nullable_jit/query.sql b/parser/testdata/01072_nullable_jit/query.sql new file mode 100644 index 000000000..ae220fda4 --- /dev/null +++ b/parser/testdata/01072_nullable_jit/query.sql @@ -0,0 +1,20 @@ +SET compile_expressions = 1; + +DROP TABLE IF EXISTS foo; + +CREATE TABLE foo ( + id UInt32, + a Float64, + b Float64, + c Float64, + d Float64 +) Engine = MergeTree() + PARTITION BY id + ORDER BY id; + +INSERT INTO foo VALUES (1, 0.5, 0.2, 0.3, 0.8); + +SELECT divide(sum(a) + sum(b), nullIf(sum(c) + sum(d), 0)) FROM foo; +SELECT divide(sum(a) + sum(b), nullIf(sum(c) + sum(d), 0)) FROM foo; + +DROP TABLE foo; diff --git a/parser/testdata/01072_optimize_skip_unused_shards_const_expr_eval/ast.json b/parser/testdata/01072_optimize_skip_unused_shards_const_expr_eval/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01072_optimize_skip_unused_shards_const_expr_eval/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01072_optimize_skip_unused_shards_const_expr_eval/metadata.json b/parser/testdata/01072_optimize_skip_unused_shards_const_expr_eval/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01072_optimize_skip_unused_shards_const_expr_eval/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01072_optimize_skip_unused_shards_const_expr_eval/query.sql b/parser/testdata/01072_optimize_skip_unused_shards_const_expr_eval/query.sql new file mode 100644 index 000000000..77d09f5b1 --- /dev/null +++ b/parser/testdata/01072_optimize_skip_unused_shards_const_expr_eval/query.sql @@ -0,0 +1,58 @@ +-- Tags: shard + +drop table if exists data_01072; +drop table if exists dist_01072; + +set optimize_skip_unused_shards=1; +set force_optimize_skip_unused_shards=1; + +create table data_01072 (key Int, value Int, str String) Engine=Null(); +create table dist_01072 (key Int, value Int, str String) Engine=Distributed(test_cluster_two_shards, currentDatabase(), data_01072, key%2); + +select * from dist_01072 where key=0 and length(str)=0; +select * from dist_01072 where key=0 and str=''; +select * from dist_01072 where xxHash64(0)==xxHash64(0) and key=0; +select * from dist_01072 where key=toInt32OrZero(toString(xxHash64(0))); +select * from dist_01072 where key=toInt32(xxHash32(0)); +select * from dist_01072 where key=toInt32(toInt32(xxHash32(0))); +select * from dist_01072 where key=toInt32(toInt32(toInt32(xxHash32(0)))); +select * from dist_01072 where key=value; -- { serverError UNABLE_TO_SKIP_UNUSED_SHARDS } +select * from dist_01072 where key=toInt32(value); -- { serverError UNABLE_TO_SKIP_UNUSED_SHARDS } +select * from dist_01072 where key=value settings force_optimize_skip_unused_shards=0; +select * from dist_01072 where key=toInt32(value) settings force_optimize_skip_unused_shards=0; + +drop table dist_01072; +create table dist_01072 (key Int, value Nullable(Int), str String) Engine=Distributed(test_cluster_two_shards, currentDatabase(), data_01072, key%2); +select * from dist_01072 where key=toInt32(xxHash32(0)); +select * from dist_01072 where key=value; -- { serverError UNABLE_TO_SKIP_UNUSED_SHARDS } +select * from dist_01072 where key=toInt32(value); -- { serverError UNABLE_TO_SKIP_UNUSED_SHARDS } +select * from dist_01072 where key=value settings force_optimize_skip_unused_shards=0; +select * from dist_01072 where key=toInt32(value) settings force_optimize_skip_unused_shards=0; + +set allow_suspicious_low_cardinality_types=1; + +drop table dist_01072; +create table dist_01072 (key Int, value LowCardinality(Int), str String) Engine=Distributed(test_cluster_two_shards, currentDatabase(), data_01072, key%2); +select * from dist_01072 where key=toInt32(xxHash32(0)); +select * from dist_01072 where key=value; -- { serverError UNABLE_TO_SKIP_UNUSED_SHARDS } +select * from dist_01072 where key=toInt32(value); -- { serverError UNABLE_TO_SKIP_UNUSED_SHARDS } +select * from dist_01072 where key=value settings force_optimize_skip_unused_shards=0; +select * from dist_01072 where key=toInt32(value) settings force_optimize_skip_unused_shards=0; + +drop table dist_01072; +create table dist_01072 (key Int, value LowCardinality(Nullable(Int)), str String) Engine=Distributed(test_cluster_two_shards, currentDatabase(), data_01072, key%2); +select * from dist_01072 where key=toInt32(xxHash32(0)); +select * from dist_01072 where key=value; -- { serverError UNABLE_TO_SKIP_UNUSED_SHARDS } +select * from dist_01072 where key=toInt32(value); -- { serverError UNABLE_TO_SKIP_UNUSED_SHARDS } +select * from dist_01072 where key=value settings force_optimize_skip_unused_shards=0; +select * from dist_01072 where key=toInt32(value) settings force_optimize_skip_unused_shards=0; + +-- check virtual columns +drop table data_01072; +drop table dist_01072; +create table data_01072 (key Int) Engine=MergeTree() ORDER BY key; +create table dist_01072 (key Int) Engine=Distributed(test_cluster_two_shards, currentDatabase(), data_01072, key); +select * from dist_01072 where key=0 and _part='0' settings force_optimize_skip_unused_shards=2; + +drop table data_01072; +drop table dist_01072; diff --git a/parser/testdata/01072_select_constant_limit/ast.json b/parser/testdata/01072_select_constant_limit/ast.json new file mode 100644 index 000000000..35e160c6d --- /dev/null +++ b/parser/testdata/01072_select_constant_limit/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_42 (alias foo)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier foo" + }, + { + "explain": " Literal UInt64_2" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001045502, + "rows_read": 9, + "bytes_read": 321 + } +} diff --git a/parser/testdata/01072_select_constant_limit/metadata.json b/parser/testdata/01072_select_constant_limit/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01072_select_constant_limit/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01072_select_constant_limit/query.sql b/parser/testdata/01072_select_constant_limit/query.sql new file mode 100644 index 000000000..eaaa6a0ac --- /dev/null +++ b/parser/testdata/01072_select_constant_limit/query.sql @@ -0,0 +1 @@ +SELECT 42 AS foo ORDER BY foo LIMIT 2 diff --git a/parser/testdata/01073_attach_if_not_exists/ast.json b/parser/testdata/01073_attach_if_not_exists/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01073_attach_if_not_exists/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01073_attach_if_not_exists/metadata.json b/parser/testdata/01073_attach_if_not_exists/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01073_attach_if_not_exists/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01073_attach_if_not_exists/query.sql b/parser/testdata/01073_attach_if_not_exists/query.sql new file mode 100644 index 000000000..1b507bf47 --- /dev/null +++ b/parser/testdata/01073_attach_if_not_exists/query.sql @@ -0,0 +1,8 @@ + +CREATE TABLE aine (a Int) ENGINE = Log; +ATTACH TABLE aine; -- { serverError TABLE_ALREADY_EXISTS } +ATTACH TABLE IF NOT EXISTS aine; +DETACH TABLE aine; +ATTACH TABLE IF NOT EXISTS aine; +EXISTS TABLE aine; +DROP TABLE aine; diff --git a/parser/testdata/01073_bad_alter_partition/ast.json b/parser/testdata/01073_bad_alter_partition/ast.json new file mode 100644 index 000000000..cc7963fb7 --- /dev/null +++ b/parser/testdata/01073_bad_alter_partition/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery merge_tree (children 1)" + }, + { + "explain": " Identifier merge_tree" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001100084, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/01073_bad_alter_partition/metadata.json b/parser/testdata/01073_bad_alter_partition/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01073_bad_alter_partition/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01073_bad_alter_partition/query.sql b/parser/testdata/01073_bad_alter_partition/query.sql new file mode 100644 index 000000000..e179a64f3 --- /dev/null +++ b/parser/testdata/01073_bad_alter_partition/query.sql @@ -0,0 +1,22 @@ +DROP TABLE IF EXISTS merge_tree; +CREATE TABLE merge_tree (d Date) ENGINE = MergeTree ORDER BY d PARTITION BY d; + +INSERT INTO merge_tree VALUES ('2020-01-01'), ('2020-01-02'), ('2020-01-03'), ('2020-01-04'), ('2020-01-05'), ('2020-01-06'); +SELECT 1, * FROM merge_tree ORDER BY d; + +-- ALTER TABLE merge_tree DROP PARTITION 2020-01-02; -- This does not even parse +-- SELECT 2, * FROM merge_tree; + +ALTER TABLE merge_tree DROP PARTITION 20200103; -- unfortunately, this works, but not as user expected. +SELECT 3, * FROM merge_tree ORDER BY d; + +ALTER TABLE merge_tree DROP PARTITION '20200104'; +SELECT 4, * FROM merge_tree ORDER BY d; + +ALTER TABLE merge_tree DROP PARTITION '2020-01-05'; +SELECT 5, * FROM merge_tree ORDER BY d; + +ALTER TABLE merge_tree DROP PARTITION '202001-06'; -- { serverError CANNOT_PARSE_DATE } +SELECT 6, * FROM merge_tree ORDER BY d; + +DROP TABLE merge_tree; diff --git a/parser/testdata/01073_blockSerializedSize/ast.json b/parser/testdata/01073_blockSerializedSize/ast.json new file mode 100644 index 000000000..fcee1aa02 --- /dev/null +++ b/parser/testdata/01073_blockSerializedSize/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'UInt8'" + }, + { + "explain": " Function blockSerializedSize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_0" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001375742, + "rows_read": 8, + "bytes_read": 299 + } +} diff --git a/parser/testdata/01073_blockSerializedSize/metadata.json b/parser/testdata/01073_blockSerializedSize/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01073_blockSerializedSize/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01073_blockSerializedSize/query.sql b/parser/testdata/01073_blockSerializedSize/query.sql new file mode 100644 index 000000000..bfe3c9542 --- /dev/null +++ b/parser/testdata/01073_blockSerializedSize/query.sql @@ -0,0 +1,29 @@ +select 'UInt8', blockSerializedSize(0); +select 'Nullable(UInt8)', blockSerializedSize(toNullable(0)); +select 'UInt32', blockSerializedSize(0xdeadbeaf); +select 'UInt64', blockSerializedSize(0xdeadbeafdead); +select 'Nullable(UInt64)', blockSerializedSize(toNullable(0xdeadbeafdead)); + +select ''; +select 'String', blockSerializedSize('foo'); +select 'FixedString(32)', blockSerializedSize(cast('foo', 'FixedString(32)')); + +select ''; +select 'Enum8', blockSerializedSize(cast('a' as Enum8('a' = 1, 'b' = 2))); + +select ''; +select 'Array', blockSerializedSize(['foo']); + +select ''; +select 'uniqCombinedState(100)', blockSerializedSize(uniqCombinedState(number)) from (select number from system.numbers limit 100); +select 'uniqCombinedState(10000)', blockSerializedSize(uniqCombinedState(number)) from (select number from system.numbers limit 10000); +select 'uniqCombinedState(100000)', blockSerializedSize(uniqCombinedState(number)) from (select number from system.numbers limit 100000); +select 'uniqCombinedState(1000000)', blockSerializedSize(uniqCombinedState(number)) from (select number from system.numbers limit 1000000); +select 'uniqCombinedState(10000000)', blockSerializedSize(uniqCombinedState(number)) from (select number from system.numbers limit 10000000); +select 'uniqCombined64State(10000000)', blockSerializedSize(uniqCombined64State(number)) from (select number from system.numbers limit 10000000); + +select ''; +select 'String,UInt8', blockSerializedSize('foo', 1); + +select ''; +select 'Block(UInt32)', blockSerializedSize(number) from numbers(2); diff --git a/parser/testdata/01073_crlf_end_of_line/ast.json b/parser/testdata/01073_crlf_end_of_line/ast.json new file mode 100644 index 000000000..a2dba7aaf --- /dev/null +++ b/parser/testdata/01073_crlf_end_of_line/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_01073_crlf_end_of_line (children 1)" + }, + { + "explain": " Identifier test_01073_crlf_end_of_line" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001449773, + "rows_read": 2, + "bytes_read": 106 + } +} diff --git a/parser/testdata/01073_crlf_end_of_line/metadata.json b/parser/testdata/01073_crlf_end_of_line/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01073_crlf_end_of_line/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01073_crlf_end_of_line/query.sql b/parser/testdata/01073_crlf_end_of_line/query.sql new file mode 100644 index 000000000..3a2fc30ca --- /dev/null +++ b/parser/testdata/01073_crlf_end_of_line/query.sql @@ -0,0 +1,8 @@ +DROP TABLE IF EXISTS test_01073_crlf_end_of_line; +CREATE TABLE test_01073_crlf_end_of_line (value UInt8, word String) ENGINE = MergeTree() ORDER BY value; +INSERT INTO test_01073_crlf_end_of_line VALUES (1, 'hello'), (2, 'world'); +SELECT * FROM test_01073_crlf_end_of_line FORMAT CSV SETTINGS output_format_csv_crlf_end_of_line = 1; +SELECT * FROM test_01073_crlf_end_of_line FORMAT CSV SETTINGS output_format_csv_crlf_end_of_line = 0; +SELECT * FROM test_01073_crlf_end_of_line FORMAT TSV SETTINGS output_format_tsv_crlf_end_of_line = 1; +SELECT * FROM test_01073_crlf_end_of_line FORMAT TSV SETTINGS output_format_tsv_crlf_end_of_line = 0; +DROP TABLE IF EXISTS test_01073_crlf_end_of_line; diff --git a/parser/testdata/01073_grant_and_revoke/ast.json b/parser/testdata/01073_grant_and_revoke/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01073_grant_and_revoke/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01073_grant_and_revoke/metadata.json b/parser/testdata/01073_grant_and_revoke/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01073_grant_and_revoke/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01073_grant_and_revoke/query.sql b/parser/testdata/01073_grant_and_revoke/query.sql new file mode 100644 index 000000000..2600a0082 --- /dev/null +++ b/parser/testdata/01073_grant_and_revoke/query.sql @@ -0,0 +1,41 @@ +-- Tags: no-parallel + +DROP USER IF EXISTS test_user_01073; +DROP ROLE IF EXISTS test_role_01073; + +SELECT 'A'; +CREATE USER test_user_01073; +SHOW CREATE USER test_user_01073; + +SELECT 'B'; +SHOW GRANTS FOR test_user_01073; + +SELECT 'C'; +GRANT SELECT ON db1.* TO test_user_01073; +GRANT SELECT ON db2.table TO test_user_01073; +GRANT SELECT(col1) ON db3.table TO test_user_01073; +GRANT SELECT(col1, col2) ON db4.table TO test_user_01073; +GRANT INSERT ON *.* TO test_user_01073; +GRANT DELETE ON *.* TO test_user_01073; +GRANT SELECT(col1) ON *.* TO test_user_01073; -- { clientError SYNTAX_ERROR } +GRANT SELECT(col1) ON db1.* TO test_user_01073; -- { clientError SYNTAX_ERROR } +GRANT INSERT(col1, col2) ON db1.* TO test_user_01073; -- { clientError SYNTAX_ERROR } +SHOW GRANTS FOR test_user_01073; + +SELECT 'D'; +REVOKE SELECT ON db1.* FROM test_user_01073; +REVOKE SELECT ON db2.table FROM test_user_01073; +REVOKE SELECT ON db3.table FROM test_user_01073; +REVOKE SELECT(col2) ON db4.table FROM test_user_01073; +REVOKE INSERT ON *.* FROM test_user_01073; +SHOW GRANTS FOR test_user_01073; + +SELECT 'E'; +CREATE ROLE test_role_01073; +GRANT SELECT ON db1.* TO test_role_01073; +REVOKE SELECT(c1, c2, c3, c4, c5) ON db1.table1 FROM test_role_01073; +REVOKE SELECT(c1) ON db1.table2 FROM test_role_01073; +SHOW GRANTS FOR test_role_01073; + +DROP USER test_user_01073; +DROP ROLE test_role_01073; diff --git a/parser/testdata/01073_show_tables_not_like/ast.json b/parser/testdata/01073_show_tables_not_like/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01073_show_tables_not_like/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01073_show_tables_not_like/metadata.json b/parser/testdata/01073_show_tables_not_like/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01073_show_tables_not_like/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01073_show_tables_not_like/query.sql b/parser/testdata/01073_show_tables_not_like/query.sql new file mode 100644 index 000000000..9ff2afe7f --- /dev/null +++ b/parser/testdata/01073_show_tables_not_like/query.sql @@ -0,0 +1,30 @@ + +SHOW TABLES NOT LIKE '%'; + +SHOW TABLES; +SELECT '---'; +CREATE TABLE test1 (x UInt8) ENGINE = Memory; +CREATE TABLE test2 (x UInt8) ENGINE = Memory; + +SHOW TABLES; +SELECT '--'; +SHOW TABLES LIKE 'tes%'; +SELECT '--'; +SHOW TABLES NOT LIKE 'tes%'; +SELECT '--'; +SHOW TABLES LIKE 'tes%1'; +SELECT '--'; +SHOW TABLES NOT LIKE 'tes%2'; + +SELECT '---'; +SHOW TABLES FROM {CLICKHOUSE_DATABASE:Identifier}; +SELECT '--'; +SHOW TABLES FROM {CLICKHOUSE_DATABASE:Identifier} LIKE 'tes%'; +SELECT '--'; +SHOW TABLES FROM {CLICKHOUSE_DATABASE:Identifier} NOT LIKE 'tes%'; +SELECT '--'; +SHOW TABLES FROM {CLICKHOUSE_DATABASE:Identifier} LIKE 'tes%1'; +SELECT '--'; +SHOW TABLES FROM {CLICKHOUSE_DATABASE:Identifier} NOT LIKE 'tes%2'; + +DROP DATABASE {CLICKHOUSE_DATABASE:Identifier}; diff --git a/parser/testdata/01074_h3_range_check/ast.json b/parser/testdata/01074_h3_range_check/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01074_h3_range_check/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01074_h3_range_check/metadata.json b/parser/testdata/01074_h3_range_check/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01074_h3_range_check/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01074_h3_range_check/query.sql b/parser/testdata/01074_h3_range_check/query.sql new file mode 100644 index 000000000..3e3f5a332 --- /dev/null +++ b/parser/testdata/01074_h3_range_check/query.sql @@ -0,0 +1,5 @@ +-- Tags: no-fasttest + +SELECT h3EdgeLengthM(100); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT h3HexAreaM2(100); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT h3HexAreaKm2(100); -- { serverError ARGUMENT_OUT_OF_BOUND } diff --git a/parser/testdata/01074_partial_revokes/ast.json b/parser/testdata/01074_partial_revokes/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01074_partial_revokes/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01074_partial_revokes/metadata.json b/parser/testdata/01074_partial_revokes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01074_partial_revokes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01074_partial_revokes/query.sql b/parser/testdata/01074_partial_revokes/query.sql new file mode 100644 index 000000000..53b8ca591 --- /dev/null +++ b/parser/testdata/01074_partial_revokes/query.sql @@ -0,0 +1,108 @@ +-- Tags: no-parallel + +DROP USER IF EXISTS test_user_01074; +CREATE USER test_user_01074; + +SELECT '--simple 1'; +GRANT SELECT ON *.* TO test_user_01074; +REVOKE SELECT ON db.* FROM test_user_01074; +SHOW GRANTS FOR test_user_01074; + +SELECT '--cleanup'; +REVOKE SELECT ON *.* FROM test_user_01074; +SHOW GRANTS FOR test_user_01074; + +SELECT '--simple 2'; +GRANT SELECT ON db.* TO test_user_01074; +REVOKE SELECT ON db.table FROM test_user_01074; +SHOW GRANTS FOR test_user_01074; + +SELECT '--cleanup'; +REVOKE SELECT ON *.* FROM test_user_01074; +SHOW GRANTS FOR test_user_01074; + +SELECT '--simple 3'; +GRANT SELECT ON db.table TO test_user_01074; +REVOKE SELECT(col1) ON db.table FROM test_user_01074; +SHOW GRANTS FOR test_user_01074; + +SELECT '--cleanup'; +REVOKE SELECT ON *.* FROM test_user_01074; +SHOW GRANTS FOR test_user_01074; + +SELECT '--complex 1'; +GRANT SELECT ON *.* TO test_user_01074; +REVOKE SELECT(col1, col2) ON db.table FROM test_user_01074; +SHOW GRANTS FOR test_user_01074; + +SELECT '--cleanup'; +REVOKE SELECT ON *.* FROM test_user_01074; +SHOW GRANTS FOR test_user_01074; + +SELECT '--complex 2'; +GRANT SELECT ON *.* TO test_user_01074; +REVOKE SELECT ON db.* FROM test_user_01074; +GRANT SELECT ON db.table TO test_user_01074; +REVOKE SELECT(col1) ON db.table FROM test_user_01074; +SHOW GRANTS FOR test_user_01074; +SELECT * FROM system.grants WHERE user_name = 'test_user_01074' SETTINGS output_format_pretty_color=1 FORMAT Pretty; + +SELECT '--cleanup'; +REVOKE SELECT ON *.* FROM test_user_01074; +SHOW GRANTS FOR test_user_01074; + +SELECT '--revoke 1'; +GRANT SELECT ON *.* TO test_user_01074; +REVOKE SELECT ON db.table FROM test_user_01074; +REVOKE SELECT ON db.* FROM test_user_01074; +SHOW GRANTS FOR test_user_01074; + +SELECT '--cleanup'; +REVOKE SELECT ON *.* FROM test_user_01074; +SHOW GRANTS FOR test_user_01074; + +SELECT '--revoke 2'; +GRANT SELECT ON *.* TO test_user_01074; +REVOKE SELECT ON db.table FROM test_user_01074; +GRANT SELECT ON db.* TO test_user_01074; +SHOW GRANTS FOR test_user_01074; + +SELECT '--cleanup'; +REVOKE SELECT ON *.* FROM test_user_01074; +SHOW GRANTS FOR test_user_01074; + +SELECT '--grant option 1'; +GRANT SELECT ON *.* TO test_user_01074 WITH GRANT OPTION; +REVOKE GRANT OPTION FOR SELECT(col1) ON db.table FROM test_user_01074; +SHOW GRANTS FOR test_user_01074; +SELECT * FROM system.grants WHERE user_name = 'test_user_01074' SETTINGS output_format_pretty_color=1 FORMAT Pretty; + +SELECT '--cleanup'; +REVOKE SELECT ON *.* FROM test_user_01074; +SHOW GRANTS FOR test_user_01074; + +SELECT '--grant option 2'; +GRANT SELECT ON *.* TO test_user_01074 WITH GRANT OPTION; +REVOKE SELECT(col1) ON db.table FROM test_user_01074; +SHOW GRANTS FOR test_user_01074; + +SELECT '--cleanup'; +REVOKE SELECT ON *.* FROM test_user_01074; +SHOW GRANTS FOR test_user_01074; + +SELECT '--grant option 3'; +GRANT SELECT ON *.* TO test_user_01074; +REVOKE GRANT OPTION FOR SELECT(col1) ON db.table FROM test_user_01074; +SHOW GRANTS FOR test_user_01074; + +SELECT '--cleanup'; +REVOKE SELECT ON *.* FROM test_user_01074; +SHOW GRANTS FOR test_user_01074; + +SELECT '--grant option 4'; +GRANT SELECT ON *.* TO test_user_01074; +REVOKE SELECT ON db.table FROM test_user_01074; +GRANT SELECT ON db.* TO test_user_01074 WITH GRANT OPTION; +SHOW GRANTS FOR test_user_01074; + +DROP USER test_user_01074; diff --git a/parser/testdata/01075_allowed_client_hosts/ast.json b/parser/testdata/01075_allowed_client_hosts/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01075_allowed_client_hosts/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01075_allowed_client_hosts/metadata.json b/parser/testdata/01075_allowed_client_hosts/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01075_allowed_client_hosts/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01075_allowed_client_hosts/query.sql b/parser/testdata/01075_allowed_client_hosts/query.sql new file mode 100644 index 000000000..8c25d45f4 --- /dev/null +++ b/parser/testdata/01075_allowed_client_hosts/query.sql @@ -0,0 +1,58 @@ +-- Tags: no-fasttest, no-parallel + +DROP USER IF EXISTS test_user_01075, test_user_01075_x, test_user_01075_x@localhost, test_user_01075_x@'192.168.23.15'; + +CREATE USER test_user_01075; +SHOW CREATE USER test_user_01075; + +ALTER USER test_user_01075 HOST ANY; +SHOW CREATE USER test_user_01075; + +ALTER USER test_user_01075 HOST NONE; +SHOW CREATE USER test_user_01075; + +ALTER USER test_user_01075 HOST LOCAL; +SHOW CREATE USER test_user_01075; + +ALTER USER test_user_01075 HOST IP '192.168.23.15'; +SHOW CREATE USER test_user_01075; + +ALTER USER test_user_01075 HOST IP '2001:0db8:11a3:09d7:1f34:8a2e:07a0:765d'; +SHOW CREATE USER test_user_01075; + +ALTER USER test_user_01075 ADD HOST IP '127.0.0.1'; +SHOW CREATE USER test_user_01075; + +ALTER USER test_user_01075 DROP HOST IP '2001:0db8:11a3:09d7:1f34:8a2e:07a0:765d'; +SHOW CREATE USER test_user_01075; + +ALTER USER test_user_01075 DROP HOST NAME 'localhost'; +SHOW CREATE USER test_user_01075; + +ALTER USER test_user_01075 HOST LIKE '@.somesite.com'; +SHOW CREATE USER test_user_01075; + +ALTER USER test_user_01075 HOST REGEXP '.*\.anothersite\.com'; +SHOW CREATE USER test_user_01075; + +ALTER USER test_user_01075 HOST REGEXP '.*\.anothersite\.com', '.*\.anothersite\.org'; +SHOW CREATE USER test_user_01075; + +ALTER USER test_user_01075 HOST REGEXP '.*\.anothersite2\.com', REGEXP '.*\.anothersite2\.org'; +SHOW CREATE USER test_user_01075; + +ALTER USER test_user_01075 HOST REGEXP '.*\.anothersite3\.com' HOST REGEXP '.*\.anothersite3\.org'; +SHOW CREATE USER test_user_01075; + +DROP USER test_user_01075; + +CREATE USER test_user_01075_x@localhost; +SHOW CREATE USER test_user_01075_x@localhost; + +ALTER USER test_user_01075_x@localhost RENAME TO test_user_01075_x@'%'; +SHOW CREATE USER test_user_01075_x; + +ALTER USER test_user_01075_x RENAME TO test_user_01075_x@'192.168.23.15'; +SHOW CREATE USER 'test_user_01075_x@192.168.23.15'; + +DROP USER 'test_user_01075_x@192.168.23.15'; diff --git a/parser/testdata/01075_in_arrays_enmk/ast.json b/parser/testdata/01075_in_arrays_enmk/ast.json new file mode 100644 index 000000000..d87155d81 --- /dev/null +++ b/parser/testdata/01075_in_arrays_enmk/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function in (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_2]" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001678151, + "rows_read": 8, + "bytes_read": 303 + } +} diff --git a/parser/testdata/01075_in_arrays_enmk/metadata.json b/parser/testdata/01075_in_arrays_enmk/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01075_in_arrays_enmk/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01075_in_arrays_enmk/query.sql b/parser/testdata/01075_in_arrays_enmk/query.sql new file mode 100644 index 000000000..7dfd75932 --- /dev/null +++ b/parser/testdata/01075_in_arrays_enmk/query.sql @@ -0,0 +1,8 @@ +select 1 in [1, 2]; +select 3 in [1, 2]; +select 1 in 1; +select 3 in 1; +select (1) in [1, 2]; +select (3) in [1, 2]; +select [1] in [1, 2]; +select [3] in [1, 2]; diff --git a/parser/testdata/01076_array_join_prewhere_const_folding/ast.json b/parser/testdata/01076_array_join_prewhere_const_folding/ast.json new file mode 100644 index 000000000..dfb69b3a6 --- /dev/null +++ b/parser/testdata/01076_array_join_prewhere_const_folding/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00139283, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01076_array_join_prewhere_const_folding/metadata.json b/parser/testdata/01076_array_join_prewhere_const_folding/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01076_array_join_prewhere_const_folding/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01076_array_join_prewhere_const_folding/query.sql b/parser/testdata/01076_array_join_prewhere_const_folding/query.sql new file mode 100644 index 000000000..a6da51f9b --- /dev/null +++ b/parser/testdata/01076_array_join_prewhere_const_folding/query.sql @@ -0,0 +1,8 @@ +SET log_queries = 1; +SELECT 1 LIMIT 0; +SYSTEM FLUSH LOGS query_log; + +SELECT * FROM system.query_log +PREWHERE ProfileEvents['Query'] > 0 and current_database = currentDatabase() + +LIMIT 0; diff --git a/parser/testdata/01076_predicate_optimizer_with_view/ast.json b/parser/testdata/01076_predicate_optimizer_with_view/ast.json new file mode 100644 index 000000000..e4c3dabef --- /dev/null +++ b/parser/testdata/01076_predicate_optimizer_with_view/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001240786, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/01076_predicate_optimizer_with_view/metadata.json b/parser/testdata/01076_predicate_optimizer_with_view/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01076_predicate_optimizer_with_view/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01076_predicate_optimizer_with_view/query.sql b/parser/testdata/01076_predicate_optimizer_with_view/query.sql new file mode 100644 index 000000000..6b035e280 --- /dev/null +++ b/parser/testdata/01076_predicate_optimizer_with_view/query.sql @@ -0,0 +1,19 @@ +DROP TABLE IF EXISTS test; +DROP TABLE IF EXISTS test_view; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE test(date Date, id Int8, name String, value Int64) ENGINE = MergeTree(date, (id, date), 8192); +CREATE VIEW test_view AS SELECT * FROM test; + +SET enable_optimize_predicate_expression = 1; + +-- Optimize predicate expression with view +EXPLAIN SYNTAX SELECT * FROM test_view WHERE id = 1; +EXPLAIN SYNTAX SELECT * FROM test_view WHERE id = 2; +EXPLAIN SYNTAX SELECT id FROM test_view WHERE id = 1; +EXPLAIN SYNTAX SELECT s.id FROM test_view AS s WHERE s.id = 1; + +SELECT * FROM (SELECT toUInt64(b), sum(id) AS b FROM test) WHERE `toUInt64(sum(id))` = 3; -- { serverError UNKNOWN_IDENTIFIER } + +DROP TABLE IF EXISTS test; +DROP TABLE IF EXISTS test_view; diff --git a/parser/testdata/01076_range_reader_segfault/ast.json b/parser/testdata/01076_range_reader_segfault/ast.json new file mode 100644 index 000000000..a42714007 --- /dev/null +++ b/parser/testdata/01076_range_reader_segfault/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001277585, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/01076_range_reader_segfault/metadata.json b/parser/testdata/01076_range_reader_segfault/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01076_range_reader_segfault/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01076_range_reader_segfault/query.sql b/parser/testdata/01076_range_reader_segfault/query.sql new file mode 100644 index 000000000..86a568ff6 --- /dev/null +++ b/parser/testdata/01076_range_reader_segfault/query.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS t; + +CREATE TABLE t (a Int, b Int, c Int) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO t SELECT number, number * 2, number * 3 FROM numbers(100); + +SELECT count() FROM t PREWHERE NOT ignore(a) WHERE b > 0; +SELECT sum(a) FROM t PREWHERE isNotNull(a) WHERE isNotNull(b) AND c > 0; + +DROP TABLE t; diff --git a/parser/testdata/01077_yet_another_prewhere_test/ast.json b/parser/testdata/01077_yet_another_prewhere_test/ast.json new file mode 100644 index 000000000..5ace0e5f7 --- /dev/null +++ b/parser/testdata/01077_yet_another_prewhere_test/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t50 (children 1)" + }, + { + "explain": " Identifier t50" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001419937, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/01077_yet_another_prewhere_test/metadata.json b/parser/testdata/01077_yet_another_prewhere_test/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01077_yet_another_prewhere_test/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01077_yet_another_prewhere_test/query.sql b/parser/testdata/01077_yet_another_prewhere_test/query.sql new file mode 100644 index 000000000..eac0f2a5f --- /dev/null +++ b/parser/testdata/01077_yet_another_prewhere_test/query.sql @@ -0,0 +1,12 @@ +drop table if exists t50; + +create table t50 (a Int, b Int, s String) engine = MergeTree order by a settings index_granularity = 50, index_granularity_bytes=1000, min_index_granularity_bytes=500; + +-- some magic to satisfy conditions to run optimizations in MergeTreeRangeReader +insert into t50 select 0, 1, repeat('a', 10000); +insert into t50 select number, multiIf(number < 5, 1, number < 50, 0, number < 55, 1, number < 100, 0, number < 105, 1, 0), '' from numbers(150); +optimize table t50 final; + +select a, b from t50 prewhere b = 1 order by a; + +drop table t50; diff --git a/parser/testdata/01078_bloom_filter_operator_not_has/ast.json b/parser/testdata/01078_bloom_filter_operator_not_has/ast.json new file mode 100644 index 000000000..0fe407f42 --- /dev/null +++ b/parser/testdata/01078_bloom_filter_operator_not_has/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery bloom_filter_not_has (children 1)" + }, + { + "explain": " Identifier bloom_filter_not_has" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001225349, + "rows_read": 2, + "bytes_read": 92 + } +} diff --git a/parser/testdata/01078_bloom_filter_operator_not_has/metadata.json b/parser/testdata/01078_bloom_filter_operator_not_has/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01078_bloom_filter_operator_not_has/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01078_bloom_filter_operator_not_has/query.sql b/parser/testdata/01078_bloom_filter_operator_not_has/query.sql new file mode 100644 index 000000000..20eabdb08 --- /dev/null +++ b/parser/testdata/01078_bloom_filter_operator_not_has/query.sql @@ -0,0 +1,23 @@ +DROP TABLE IF EXISTS bloom_filter_not_has; + +CREATE TABLE bloom_filter_not_has (ary Array(LowCardinality(Nullable(String))), d Date, INDEX idx_ary ary TYPE bloom_filter(0.01) GRANULARITY 1024) ENGINE = MergeTree() PARTITION BY d ORDER BY d; + +INSERT INTO bloom_filter_not_has VALUES ([], '2020-02-27') (['o','a'], '2020-02-27') (['e','a','b'], '2020-02-27'); +INSERT INTO bloom_filter_not_has VALUES (['o','a','b','c'], '2020-02-27') (['e','a','b','c','d'], '2020-02-27'); + +SELECT count() FROM bloom_filter_not_has WHERE has(ary, 'a'); +SELECT count() FROM bloom_filter_not_has WHERE NOT has(ary, 'a'); + +SELECT count() FROM bloom_filter_not_has WHERE has(ary, 'b'); +SELECT * FROM bloom_filter_not_has WHERE NOT has(ary, 'b') ORDER BY ary; + +SELECT count() FROM bloom_filter_not_has WHERE has(ary, 'c'); +SELECT * FROM bloom_filter_not_has WHERE NOT has(ary, 'c') ORDER BY ary; + +SELECT count() FROM bloom_filter_not_has WHERE has(ary, 'd'); +SELECT * FROM bloom_filter_not_has WHERE NOT has(ary, 'd') ORDER BY ary; + +SELECT count() FROM bloom_filter_not_has WHERE has(ary, 'f'); +SELECT * FROM bloom_filter_not_has WHERE NOT has(ary, 'f') ORDER BY ary; + +DROP TABLE IF EXISTS bloom_filter_not_has; diff --git a/parser/testdata/01078_merge_tree_read_one_thread/ast.json b/parser/testdata/01078_merge_tree_read_one_thread/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01078_merge_tree_read_one_thread/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01078_merge_tree_read_one_thread/metadata.json b/parser/testdata/01078_merge_tree_read_one_thread/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01078_merge_tree_read_one_thread/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01078_merge_tree_read_one_thread/query.sql b/parser/testdata/01078_merge_tree_read_one_thread/query.sql new file mode 100644 index 000000000..166f44df2 --- /dev/null +++ b/parser/testdata/01078_merge_tree_read_one_thread/query.sql @@ -0,0 +1,20 @@ +-- Tags: no-object-storage +-- Output slightly different plan +drop table if exists t; + +create table t (a Int, b Int) engine = MergeTree order by (a, b) settings index_granularity = 400; + +insert into t select 0, 0 from numbers(50); +insert into t select 0, 1 from numbers(350); +insert into t select 1, 2 from numbers(400); +insert into t select 2, 2 from numbers(400); +insert into t select 3, 0 from numbers(100); + +select sleep(1) format Null; -- sleep a bit to wait possible merges after insert + +set max_threads = 1; +optimize table t final; + +select sum(a) from t where a in (0, 3) and b = 0; + +drop table t; diff --git a/parser/testdata/01079_alter_default_zookeeper_long/ast.json b/parser/testdata/01079_alter_default_zookeeper_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01079_alter_default_zookeeper_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01079_alter_default_zookeeper_long/metadata.json b/parser/testdata/01079_alter_default_zookeeper_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01079_alter_default_zookeeper_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01079_alter_default_zookeeper_long/query.sql b/parser/testdata/01079_alter_default_zookeeper_long/query.sql new file mode 100644 index 000000000..36f5dbb8b --- /dev/null +++ b/parser/testdata/01079_alter_default_zookeeper_long/query.sql @@ -0,0 +1,62 @@ +-- Tags: long, zookeeper + +DROP TABLE IF EXISTS alter_default; + +CREATE TABLE alter_default +( + date Date, + key UInt64 +) +ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_01079/alter_default', '1') +ORDER BY key; + +INSERT INTO alter_default select toDate('2020-01-05'), number from system.numbers limit 100; + +-- Cannot add column without type +ALTER TABLE alter_default ADD COLUMN value DEFAULT '10'; --{serverError BAD_ARGUMENTS} + +ALTER TABLE alter_default ADD COLUMN value String DEFAULT '10'; + +SHOW CREATE TABLE alter_default; + +SELECT sum(cast(value as UInt64)) FROM alter_default; + +ALTER TABLE alter_default MODIFY COLUMN value UInt64; + +SHOW CREATE TABLE alter_default; + +ALTER TABLE alter_default MODIFY COLUMN value UInt64 DEFAULT 10; + +SHOW CREATE TABLE alter_default; + +SELECT sum(value) from alter_default; + +ALTER TABLE alter_default MODIFY COLUMN value DEFAULT 100; + +SHOW CREATE TABLE alter_default; + +ALTER TABLE alter_default MODIFY COLUMN value UInt16 DEFAULT 100; + +SHOW CREATE TABLE alter_default; + +SELECT sum(value) from alter_default; + +ALTER TABLE alter_default MODIFY COLUMN value UInt8 DEFAULT 10; + +SHOW CREATE TABLE alter_default; + +ALTER TABLE alter_default ADD COLUMN bad_column UInt8 DEFAULT 'q'; --{serverError CANNOT_PARSE_TEXT} + +ALTER TABLE alter_default ADD COLUMN better_column UInt8 DEFAULT '1'; + +SHOW CREATE TABLE alter_default; + +ALTER TABLE alter_default ADD COLUMN other_date String DEFAULT '0'; + +ALTER TABLE alter_default MODIFY COLUMN other_date DateTime; --{serverError CANNOT_PARSE_DATETIME} + +ALTER TABLE alter_default MODIFY COLUMN other_date DEFAULT 1; + +SHOW CREATE TABLE alter_default; + +DROP TABLE IF EXISTS alter_default; diff --git a/parser/testdata/01079_bit_operations_using_bitset/ast.json b/parser/testdata/01079_bit_operations_using_bitset/ast.json new file mode 100644 index 000000000..cb656367c --- /dev/null +++ b/parser/testdata/01079_bit_operations_using_bitset/ast.json @@ -0,0 +1,79 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function IPv6NumToString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function bitAnd (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function IPv6StringToNum (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '2001:0db8:85a3:8d3a:b2da:8a2e:0370:7334'" + }, + { + "explain": " Function IPv6StringToNum (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'ffff:ffff:ffff:0000:0000:0000:0000:0000'" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 19, + + "statistics": + { + "elapsed": 0.00154548, + "rows_read": 19, + "bytes_read": 857 + } +} diff --git a/parser/testdata/01079_bit_operations_using_bitset/metadata.json b/parser/testdata/01079_bit_operations_using_bitset/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01079_bit_operations_using_bitset/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01079_bit_operations_using_bitset/query.sql b/parser/testdata/01079_bit_operations_using_bitset/query.sql new file mode 100644 index 000000000..1d9683d43 --- /dev/null +++ b/parser/testdata/01079_bit_operations_using_bitset/query.sql @@ -0,0 +1,16 @@ +SELECT IPv6NumToString(bitAnd(IPv6StringToNum('2001:0db8:85a3:8d3a:b2da:8a2e:0370:7334'), IPv6StringToNum('ffff:ffff:ffff:0000:0000:0000:0000:0000'))) FROM system.numbers LIMIT 10; +SELECT IPv6NumToString(bitAnd(materialize(IPv6StringToNum('2001:0db8:85a3:8d3a:b2da:8a2e:0370:7334')), IPv6StringToNum('ffff:ffff:ffff:0000:0000:0000:0000:0000'))) FROM system.numbers LIMIT 10; +SELECT IPv6NumToString(bitAnd(IPv6StringToNum('2001:0db8:85a3:8d3a:b2da:8a2e:0370:7334'), materialize(IPv6StringToNum('ffff:ffff:ffff:0000:0000:0000:0000:0000')))) FROM system.numbers LIMIT 10; +SELECT IPv6NumToString(bitAnd(IPv6StringToNum('2001:0db8:85a3:8d3a:b2da:8a2e:0370:7334'), materialize(IPv6StringToNum('ffff:ffff:ffff:0000:0000:0000:0000:0000')))) FROM system.numbers LIMIT 10; + +SELECT IPv6NumToString(bitOr(IPv6StringToNum('2001:0db8:85a3:8d3a:b2da:8a2e:0370:7334'), IPv6StringToNum('2ff0:0000:0000:0000:0000:0000:0000:0000'))) FROM system.numbers LIMIT 10; +SELECT IPv6NumToString(bitOr(materialize(IPv6StringToNum('2001:0db8:85a3:8d3a:b2da:8a2e:0370:7334')), IPv6StringToNum('2ff0:0000:0000:0000:0000:0000:0000:0000'))) FROM system.numbers LIMIT 10; +SELECT IPv6NumToString(bitOr(IPv6StringToNum('2001:0db8:85a3:8d3a:b2da:8a2e:0370:7334'), materialize(IPv6StringToNum('2ff0:0000:0000:0000:0000:0000:0000:0000')))) FROM system.numbers LIMIT 10; +SELECT IPv6NumToString(bitOr(IPv6StringToNum('2001:0db8:85a3:8d3a:b2da:8a2e:0370:7334'), materialize(IPv6StringToNum('2ff0:0000:0000:0000:0000:0000:0000:0000')))) FROM system.numbers LIMIT 10; + +SELECT IPv6NumToString(bitXor(IPv6StringToNum('2001:0db8:85a3:8d3a:b2da:8a2e:0370:7334'), IPv6StringToNum('fe80::1ff:fe23:4567:890a'))) FROM system.numbers LIMIT 10; +SELECT IPv6NumToString(bitXor(materialize(IPv6StringToNum('2001:0db8:85a3:8d3a:b2da:8a2e:0370:7334')), IPv6StringToNum('fe80::1ff:fe23:4567:890a'))) FROM system.numbers LIMIT 10; +SELECT IPv6NumToString(bitXor(IPv6StringToNum('2001:0db8:85a3:8d3a:b2da:8a2e:0370:7334'), materialize(IPv6StringToNum('fe80::1ff:fe23:4567:890a')))) FROM system.numbers LIMIT 10; +SELECT IPv6NumToString(bitXor(IPv6StringToNum('2001:0db8:85a3:8d3a:b2da:8a2e:0370:7334'), materialize(IPv6StringToNum('fe80::1ff:fe23:4567:890a')))) FROM system.numbers LIMIT 10; + +SELECT IPv6NumToString(bitNot(IPv6StringToNum('2001:0db8:85a3:8d3a:b2da:8a2e:0370:7334'))) FROM system.numbers LIMIT 10; diff --git a/parser/testdata/01079_new_range_reader_segfault/ast.json b/parser/testdata/01079_new_range_reader_segfault/ast.json new file mode 100644 index 000000000..fbfeb2eb6 --- /dev/null +++ b/parser/testdata/01079_new_range_reader_segfault/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00118218, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/01079_new_range_reader_segfault/metadata.json b/parser/testdata/01079_new_range_reader_segfault/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01079_new_range_reader_segfault/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01079_new_range_reader_segfault/query.sql b/parser/testdata/01079_new_range_reader_segfault/query.sql new file mode 100644 index 000000000..804d4316e --- /dev/null +++ b/parser/testdata/01079_new_range_reader_segfault/query.sql @@ -0,0 +1,11 @@ +drop table if exists t; + +create table t (a Int) engine = MergeTree order by a; + +-- some magic to satisfy conditions to run optimizations in MergeTreeRangeReader +insert into t select number < 20 ? 0 : 1 from numbers(50); +alter table t add column s String default 'foo'; + +select s from t prewhere a != 1 where rowNumberInBlock() % 2 = 0 limit 1; + +drop table t; diff --git a/parser/testdata/01079_order_by_pk/ast.json b/parser/testdata/01079_order_by_pk/ast.json new file mode 100644 index 000000000..45e00bfdb --- /dev/null +++ b/parser/testdata/01079_order_by_pk/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery mt_pk (children 1)" + }, + { + "explain": " Identifier mt_pk" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001315492, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/01079_order_by_pk/metadata.json b/parser/testdata/01079_order_by_pk/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01079_order_by_pk/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01079_order_by_pk/query.sql b/parser/testdata/01079_order_by_pk/query.sql new file mode 100644 index 000000000..b207e7584 --- /dev/null +++ b/parser/testdata/01079_order_by_pk/query.sql @@ -0,0 +1,7 @@ +DROP TABLE IF EXISTS mt_pk; + +CREATE TABLE mt_pk ENGINE = MergeTree PARTITION BY d ORDER BY x SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi' +AS SELECT toDate(number % 32) AS d, number AS x FROM system.numbers LIMIT 1000010; +SELECT x FROM mt_pk ORDER BY x ASC LIMIT 1000000, 1; + +DROP TABLE mt_pk; diff --git a/parser/testdata/01079_reinterpret_as_fixed_string/ast.json b/parser/testdata/01079_reinterpret_as_fixed_string/ast.json new file mode 100644 index 000000000..d25535541 --- /dev/null +++ b/parser/testdata/01079_reinterpret_as_fixed_string/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function reinterpretAsFixedString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_3735928559" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001328636, + "rows_read": 9, + "bytes_read": 376 + } +} diff --git a/parser/testdata/01079_reinterpret_as_fixed_string/metadata.json b/parser/testdata/01079_reinterpret_as_fixed_string/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01079_reinterpret_as_fixed_string/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01079_reinterpret_as_fixed_string/query.sql b/parser/testdata/01079_reinterpret_as_fixed_string/query.sql new file mode 100644 index 000000000..0571985b8 --- /dev/null +++ b/parser/testdata/01079_reinterpret_as_fixed_string/query.sql @@ -0,0 +1 @@ +select toTypeName(reinterpretAsFixedString(0xdeadbeef)); diff --git a/parser/testdata/01080_check_for_error_incorrect_size_of_nested_column/ast.json b/parser/testdata/01080_check_for_error_incorrect_size_of_nested_column/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01080_check_for_error_incorrect_size_of_nested_column/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01080_check_for_error_incorrect_size_of_nested_column/metadata.json b/parser/testdata/01080_check_for_error_incorrect_size_of_nested_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01080_check_for_error_incorrect_size_of_nested_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01080_check_for_error_incorrect_size_of_nested_column/query.sql b/parser/testdata/01080_check_for_error_incorrect_size_of_nested_column/query.sql new file mode 100644 index 000000000..d7b05bb7d --- /dev/null +++ b/parser/testdata/01080_check_for_error_incorrect_size_of_nested_column/query.sql @@ -0,0 +1,33 @@ + +drop table if exists {CLICKHOUSE_DATABASE:Identifier}.test_table_01080; +CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.test_table_01080 (dim_key Int64, dim_id String) ENGINE = MergeTree Order by (dim_key); +insert into {CLICKHOUSE_DATABASE:Identifier}.test_table_01080 values(1,'test1'); + +drop DICTIONARY if exists {CLICKHOUSE_DATABASE:Identifier}.test_dict_01080; + +CREATE DICTIONARY {CLICKHOUSE_DATABASE:Identifier}.test_dict_01080 ( dim_key Int64, dim_id String ) +PRIMARY KEY dim_key +source(clickhouse(host 'localhost' port tcpPort() user 'default' password '' db currentDatabase() table 'test_table_01080')) +LIFETIME(MIN 0 MAX 0) LAYOUT(complex_key_hashed()); + +SELECT dictGetString({CLICKHOUSE_DATABASE:String} || '.test_dict_01080', 'dim_id', tuple(toInt64(1))); + +SELECT dictGetString({CLICKHOUSE_DATABASE:String} || '.test_dict_01080', 'dim_id', tuple(toInt64(0))); + +select dictGetString({CLICKHOUSE_DATABASE:String} || '.test_dict_01080', 'dim_id', x) from (select tuple(toInt64(0)) as x); + +select dictGetString({CLICKHOUSE_DATABASE:String} || '.test_dict_01080', 'dim_id', x) from (select tuple(toInt64(1)) as x); + +select dictGetString({CLICKHOUSE_DATABASE:String} || '.test_dict_01080', 'dim_id', x) from (select tuple(toInt64(number)) as x from numbers(5)); + +select dictGetString({CLICKHOUSE_DATABASE:String} || '.test_dict_01080', 'dim_id', x) from (select tuple(toInt64(rand64()*0)) as x); + +select dictGetString({CLICKHOUSE_DATABASE:String} || '.test_dict_01080', 'dim_id', x) from (select tuple(toInt64(blockSize()=0)) as x); + +select dictGetString({CLICKHOUSE_DATABASE:String} || '.test_dict_01080', 'dim_id', x) from (select tuple(toInt64(materialize(0))) as x); + +select dictGetString({CLICKHOUSE_DATABASE:String} || '.test_dict_01080', 'dim_id', x) from (select tuple(toInt64(materialize(1))) as x); + + +drop DICTIONARY {CLICKHOUSE_DATABASE:Identifier}.test_dict_01080; +drop table {CLICKHOUSE_DATABASE:Identifier}.test_table_01080; diff --git a/parser/testdata/01080_engine_merge_prewhere_tupleelement_error/ast.json b/parser/testdata/01080_engine_merge_prewhere_tupleelement_error/ast.json new file mode 100644 index 000000000..7ba105e99 --- /dev/null +++ b/parser/testdata/01080_engine_merge_prewhere_tupleelement_error/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery A1 (children 1)" + }, + { + "explain": " Identifier A1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001027207, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/01080_engine_merge_prewhere_tupleelement_error/metadata.json b/parser/testdata/01080_engine_merge_prewhere_tupleelement_error/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01080_engine_merge_prewhere_tupleelement_error/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01080_engine_merge_prewhere_tupleelement_error/query.sql b/parser/testdata/01080_engine_merge_prewhere_tupleelement_error/query.sql new file mode 100644 index 000000000..3a6c6b36d --- /dev/null +++ b/parser/testdata/01080_engine_merge_prewhere_tupleelement_error/query.sql @@ -0,0 +1,16 @@ +drop table if exists A1; +drop table if exists A_M; +CREATE TABLE A1( a DateTime ) ENGINE = MergeTree ORDER BY tuple(); +CREATE TABLE A_M as A1 ENGINE = Merge(currentDatabase(), '^A1$'); +insert into A1(a) select now(); + +set optimize_move_to_prewhere=0; + +SELECT tupleElement(arrayJoin([(1, 1)]), 1) FROM A_M PREWHERE tupleElement((1, 1), 1) =1; + +SELECT tupleElement(arrayJoin([(1, 1)]), 1) FROM A_M WHERE tupleElement((1, 1), 1) =1; + +SELECT tupleElement(arrayJoin([(1, 1)]), 1) FROM A1 PREWHERE tupleElement((1, 1), 1) =1; + +drop table A1; +drop table A_M; diff --git a/parser/testdata/01080_join_get_null/ast.json b/parser/testdata/01080_join_get_null/ast.json new file mode 100644 index 000000000..cfedc0ca0 --- /dev/null +++ b/parser/testdata/01080_join_get_null/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_joinGet (children 1)" + }, + { + "explain": " Identifier test_joinGet" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001495223, + "rows_read": 2, + "bytes_read": 76 + } +} diff --git a/parser/testdata/01080_join_get_null/metadata.json b/parser/testdata/01080_join_get_null/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01080_join_get_null/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01080_join_get_null/query.sql b/parser/testdata/01080_join_get_null/query.sql new file mode 100644 index 000000000..9f782452d --- /dev/null +++ b/parser/testdata/01080_join_get_null/query.sql @@ -0,0 +1,12 @@ +DROP TABLE IF EXISTS test_joinGet; + +CREATE TABLE test_joinGet(user_id Nullable(Int32), name String) Engine = Join(ANY, LEFT, user_id); + +INSERT INTO test_joinGet VALUES (2, 'a'), (6, 'b'), (10, 'c'), (null, 'd'); + +SELECT toNullable(toInt32(2)) user_id WHERE joinGet(test_joinGet, 'name', user_id) != ''; + +-- If the JOIN keys are Nullable fields, the rows where at least one of the keys has the value NULL are not joined. +SELECT cast(null AS Nullable(Int32)) user_id WHERE joinGet(test_joinGet, 'name', user_id) != ''; + +DROP TABLE test_joinGet; diff --git a/parser/testdata/01081_PartialSortingTransform_full_column/ast.json b/parser/testdata/01081_PartialSortingTransform_full_column/ast.json new file mode 100644 index 000000000..d6f52d196 --- /dev/null +++ b/parser/testdata/01081_PartialSortingTransform_full_column/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_01081 (children 1)" + }, + { + "explain": " Identifier test_01081" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001080749, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/01081_PartialSortingTransform_full_column/metadata.json b/parser/testdata/01081_PartialSortingTransform_full_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01081_PartialSortingTransform_full_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01081_PartialSortingTransform_full_column/query.sql b/parser/testdata/01081_PartialSortingTransform_full_column/query.sql new file mode 100644 index 000000000..d891d8517 --- /dev/null +++ b/parser/testdata/01081_PartialSortingTransform_full_column/query.sql @@ -0,0 +1,26 @@ +drop table if exists test_01081; + +create table test_01081 (key Int) engine=MergeTree() order by key; +insert into test_01081 select * from system.numbers limit 10; + +select 1 from remote('127.{1,2}', currentDatabase(), test_01081) lhs join system.one as rhs on rhs.dummy = 1 order by 1 +SETTINGS enable_analyzer = 0; -- { serverError INVALID_JOIN_ON_EXPRESSION } + + +select 1 from remote('127.{1,2}', currentDatabase(), test_01081) lhs join system.one as rhs on rhs.dummy = 1 order by 1 +SETTINGS enable_analyzer = 1; + +-- With multiple blocks triggers: +-- +-- Code: 171. DB::Exception: Received from localhost:9000. DB::Exception: Received from 127.2:9000. DB::Exception: Block structure mismatch in function connect between PartialSortingTransform and LazyOutputFormat stream: different columns: +-- _dummy Int Int32(size = 0), 1 UInt8 UInt8(size = 0) +-- _dummy Int Int32(size = 0), 1 UInt8 Const(size = 0, UInt8(size = 1)). + +insert into test_01081 select * from system.numbers limit 10; +select 1 from remote('127.{1,2}', currentDatabase(), test_01081) lhs join system.one as rhs on rhs.dummy = 1 order by 1 +SETTINGS enable_analyzer = 0; -- { serverError INVALID_JOIN_ON_EXPRESSION } + +select 1 from remote('127.{1,2}', currentDatabase(), test_01081) lhs join system.one as rhs on rhs.dummy = 1 order by 1 +SETTINGS enable_analyzer = 1; + +drop table if exists test_01081; diff --git a/parser/testdata/01081_demangle/ast.json b/parser/testdata/01081_demangle/ast.json new file mode 100644 index 000000000..7df75baa0 --- /dev/null +++ b/parser/testdata/01081_demangle/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001229136, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01081_demangle/metadata.json b/parser/testdata/01081_demangle/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01081_demangle/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01081_demangle/query.sql b/parser/testdata/01081_demangle/query.sql new file mode 100644 index 000000000..8fa540c2a --- /dev/null +++ b/parser/testdata/01081_demangle/query.sql @@ -0,0 +1,2 @@ +SET allow_introspection_functions = 1; +SELECT demangle('_ZNKSt3__18functionIFvvEEclEv'); diff --git a/parser/testdata/01081_keywords_formatting/ast.json b/parser/testdata/01081_keywords_formatting/ast.json new file mode 100644 index 000000000..cc92d83be --- /dev/null +++ b/parser/testdata/01081_keywords_formatting/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function plus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1 (alias interval)" + }, + { + "explain": " Identifier interval" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001311276, + "rows_read": 8, + "bytes_read": 307 + } +} diff --git a/parser/testdata/01081_keywords_formatting/metadata.json b/parser/testdata/01081_keywords_formatting/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01081_keywords_formatting/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01081_keywords_formatting/query.sql b/parser/testdata/01081_keywords_formatting/query.sql new file mode 100644 index 000000000..6044f383c --- /dev/null +++ b/parser/testdata/01081_keywords_formatting/query.sql @@ -0,0 +1 @@ +SELECT (1 AS `interval`) + `interval`; diff --git a/parser/testdata/01082_bit_test_out_of_bound/ast.json b/parser/testdata/01082_bit_test_out_of_bound/ast.json new file mode 100644 index 000000000..67e028840 --- /dev/null +++ b/parser/testdata/01082_bit_test_out_of_bound/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '-- bitTestAny'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001172411, + "rows_read": 5, + "bytes_read": 184 + } +} diff --git a/parser/testdata/01082_bit_test_out_of_bound/metadata.json b/parser/testdata/01082_bit_test_out_of_bound/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01082_bit_test_out_of_bound/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01082_bit_test_out_of_bound/query.sql b/parser/testdata/01082_bit_test_out_of_bound/query.sql new file mode 100644 index 000000000..e741cb249 --- /dev/null +++ b/parser/testdata/01082_bit_test_out_of_bound/query.sql @@ -0,0 +1,13 @@ +SELECT '-- bitTestAny'; +SELECT number, bitTestAny(toUInt8(1 + 4 + 16 + 64), number) FROM numbers(8); +SELECT number, bitTestAny(toUInt8(1 + 4 + 16 + 64), number) FROM numbers(8, 16); -- { serverError PARAMETER_OUT_OF_BOUND } + +SELECT '-- bitTestAll'; +SELECT number, bitTestAll(toUInt8(1 + 4 + 16 + 64), number) FROM numbers(8); +SELECT number, bitTestAll(toUInt8(1 + 4 + 16 + 64), number) FROM numbers(8, 16); -- { serverError PARAMETER_OUT_OF_BOUND } + +SELECT '-- bitTest'; +SELECT number, bitTest(toUInt8(1 + 4 + 16 + 64), number) FROM numbers(8); +SELECT number, bitTest(toUInt8(1 + 4 + 16 + 64), number) FROM numbers(8, 16); -- { serverError PARAMETER_OUT_OF_BOUND } +SELECT number, bitTest(toUInt16(1 + 4 + 16 + 64 + 256 + 1024 + 4096 + 16384 + 65536), number) FROM numbers(16); +SELECT -number, bitTest(toUInt16(1), -number) FROM numbers(8); -- { serverError PARAMETER_OUT_OF_BOUND } diff --git a/parser/testdata/01083_aggregation_memory_efficient_bug/ast.json b/parser/testdata/01083_aggregation_memory_efficient_bug/ast.json new file mode 100644 index 000000000..65748e74e --- /dev/null +++ b/parser/testdata/01083_aggregation_memory_efficient_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery da_memory_efficient_shard (children 1)" + }, + { + "explain": " Identifier da_memory_efficient_shard" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000992593, + "rows_read": 2, + "bytes_read": 102 + } +} diff --git a/parser/testdata/01083_aggregation_memory_efficient_bug/metadata.json b/parser/testdata/01083_aggregation_memory_efficient_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01083_aggregation_memory_efficient_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01083_aggregation_memory_efficient_bug/query.sql b/parser/testdata/01083_aggregation_memory_efficient_bug/query.sql new file mode 100644 index 000000000..134131cc7 --- /dev/null +++ b/parser/testdata/01083_aggregation_memory_efficient_bug/query.sql @@ -0,0 +1,9 @@ +drop table if exists da_memory_efficient_shard; +create table da_memory_efficient_shard(A Int64, B Int64) Engine=MergeTree order by A partition by B % 2; +insert into da_memory_efficient_shard select number, number from numbers(100000); + +set distributed_aggregation_memory_efficient = 1, group_by_two_level_threshold = 1, group_by_two_level_threshold_bytes=1; + +select sum(a) from (SELECT B, uniqExact(A) a FROM remote('localhost,127.0.0.1', currentDatabase(), da_memory_efficient_shard) GROUP BY B); + +drop table if exists da_memory_efficient_shard; diff --git a/parser/testdata/01083_cross_to_inner_with_in_bug/ast.json b/parser/testdata/01083_cross_to_inner_with_in_bug/ast.json new file mode 100644 index 000000000..5920bcfb7 --- /dev/null +++ b/parser/testdata/01083_cross_to_inner_with_in_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery ax (children 1)" + }, + { + "explain": " Identifier ax" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001103506, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/01083_cross_to_inner_with_in_bug/metadata.json b/parser/testdata/01083_cross_to_inner_with_in_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01083_cross_to_inner_with_in_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01083_cross_to_inner_with_in_bug/query.sql b/parser/testdata/01083_cross_to_inner_with_in_bug/query.sql new file mode 100644 index 000000000..f6d788512 --- /dev/null +++ b/parser/testdata/01083_cross_to_inner_with_in_bug/query.sql @@ -0,0 +1,13 @@ +drop table if exists ax; +drop table if exists bx; + +create table ax (A Int64, B Int64) Engine = Memory; +create table bx (A Int64) Engine = Memory; + +insert into ax values (1, 1), (2, 1); +insert into bx values (2), (4); + +select * from bx, ax where ax.A = bx.A and ax.B in (1,2); + +drop table ax; +drop table bx; diff --git a/parser/testdata/01083_cross_to_inner_with_like/ast.json b/parser/testdata/01083_cross_to_inner_with_like/ast.json new file mode 100644 index 000000000..3dc7a73fe --- /dev/null +++ b/parser/testdata/01083_cross_to_inner_with_like/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001205997, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01083_cross_to_inner_with_like/metadata.json b/parser/testdata/01083_cross_to_inner_with_like/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01083_cross_to_inner_with_like/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01083_cross_to_inner_with_like/query.sql b/parser/testdata/01083_cross_to_inner_with_like/query.sql new file mode 100644 index 000000000..6ec6e8069 --- /dev/null +++ b/parser/testdata/01083_cross_to_inner_with_like/query.sql @@ -0,0 +1,16 @@ +SET convert_query_to_cnf = 0; + +DROP TABLE IF EXISTS n; +DROP TABLE IF EXISTS r; + +CREATE TABLE n (k UInt32) ENGINE = Memory; +CREATE TABLE r (k UInt32, name String) ENGINE = Memory; + +SET enable_optimize_predicate_expression = 0; + +EXPLAIN SYNTAX SELECT * FROM n, r WHERE n.k = r.k AND r.name = 'A'; +EXPLAIN SYNTAX SELECT * FROM n, r WHERE n.k = r.k AND r.name LIKE 'A%'; +EXPLAIN SYNTAX SELECT * FROM n, r WHERE n.k = r.k AND r.name NOT LIKE 'A%'; + +DROP TABLE n; +DROP TABLE r; diff --git a/parser/testdata/01083_expressions_in_engine_arguments/ast.json b/parser/testdata/01083_expressions_in_engine_arguments/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01083_expressions_in_engine_arguments/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01083_expressions_in_engine_arguments/metadata.json b/parser/testdata/01083_expressions_in_engine_arguments/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01083_expressions_in_engine_arguments/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01083_expressions_in_engine_arguments/query.sql b/parser/testdata/01083_expressions_in_engine_arguments/query.sql new file mode 100644 index 000000000..e73a0249a --- /dev/null +++ b/parser/testdata/01083_expressions_in_engine_arguments/query.sql @@ -0,0 +1,100 @@ +-- Tags: no-parallel, no-fasttest + +SET prefer_localhost_replica=1; + +DROP TABLE IF EXISTS file; +DROP TABLE IF EXISTS url; +DROP TABLE IF EXISTS view; +DROP TABLE IF EXISTS buffer; +DROP TABLE IF EXISTS merge; +DROP TABLE IF EXISTS merge_tf; +DROP TABLE IF EXISTS distributed; +DROP TABLE IF EXISTS distributed_tf; +DROP TABLE IF EXISTS rich_syntax; +DROP DICTIONARY IF EXISTS dict; + +CREATE TABLE file (n Int8) ENGINE = File(upper('tsv') || 'WithNames' || 'AndTypes'); +CREATE TABLE buffer (n Int8) ENGINE = Buffer(currentDatabase(), file, 16, 10, 200, 10000, 1000000, 10000000, 1000000000); +CREATE TABLE merge (n Int8) ENGINE = Merge('', lower('DISTRIBUTED')); +CREATE TABLE merge_tf as merge(currentDatabase(), '.*'); +CREATE TABLE distributed (n Int8) ENGINE = Distributed(test_shard_localhost, currentDatabase(), 'fi' || 'le'); +CREATE TABLE distributed_tf as cluster('test' || '_' || 'shard_localhost', '', 'buf' || 'fer'); + +INSERT INTO buffer VALUES (1); +DETACH TABLE buffer; -- trigger flushing +ATTACH TABLE buffer; + +CREATE TABLE url (n UInt64, col String) ENGINE=URL +( + replace + ( + 'https://localhost:' || getServerPort('https_port') || '/?query=' || 'select n, _table from ' || currentDatabase() || '.merge format CSV', ' ', '+' + ), + CSV +); + +CREATE VIEW view AS SELECT toInt64(n) as n FROM (SELECT toString(n) as n from merge WHERE _table != 'qwerty' ORDER BY _table) UNION ALL SELECT * FROM file; + +-- The following line is needed just to disable checking stderr for emptiness +SELECT nonexistentsomething; -- { serverError UNKNOWN_IDENTIFIER } + +CREATE DICTIONARY dict (n UInt64, col String DEFAULT '42') PRIMARY KEY n +SOURCE(CLICKHOUSE(HOST 'localhost' PORT getServerPort('tcp_port_secure') SECURE 1 USER 'default' TABLE 'url')) LIFETIME(1) LAYOUT(CACHE(SIZE_IN_CELLS 1)); + +-- dict --> url --> merge |-> distributed -> file (1) +-- |-> distributed_tf -> buffer -> file (1) + +-- TODO make fuzz test from this +CREATE TABLE rich_syntax as remote +( + 'localhos{x|y|t}', + cluster + ( + 'test' || '_' || 'shard_localhost', + remote + ( + '127.0.0.{1..4}', + if + ( + toString(40 + 2) NOT IN ('hello', dictGetString(currentDatabase() || '.dict', 'col', toUInt64('0001'))), + currentDatabase(), + 'FAIL' + ), + extract('123view456', '[a-z]+') + ) + ) +); + + +SHOW CREATE file; +SHOW CREATE buffer; +SHOW CREATE merge; +SHOW CREATE merge_tf; +SHOW CREATE distributed; +SHOW CREATE distributed_tf; +SHOW CREATE url; +SHOW CREATE rich_syntax; +SHOW CREATE VIEW view; +SHOW CREATE dict; + +INSERT INTO buffer VALUES (1); +-- remote(localhost) --> cluster(test_shard_localhost) |-> remote(127.0.0.1) --> view |-> subquery --> merge |-> distributed --> file (1) +-- | | |-> distributed_tf -> buffer (1) -> file (1) +-- | |-> file (1) +-- |-> remote(127.0.0.2) --> ... +SELECT sum(n) from rich_syntax settings enable_parallel_replicas=0; +SELECT sum(n) from rich_syntax settings serialize_query_plan=0; + +-- Clear cache to avoid future errors in the logs +SYSTEM DROP DNS CACHE; + +DROP TABLE file; +DROP DICTIONARY dict; +DROP TABLE url; +DROP TABLE view; +DROP TABLE buffer; +DROP TABLE merge; +DROP TABLE merge_tf; +DROP TABLE distributed; +DROP TABLE distributed_tf; +DROP TABLE rich_syntax; diff --git a/parser/testdata/01083_functional_index_in_mergetree/ast.json b/parser/testdata/01083_functional_index_in_mergetree/ast.json new file mode 100644 index 000000000..a4917ff91 --- /dev/null +++ b/parser/testdata/01083_functional_index_in_mergetree/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001119647, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01083_functional_index_in_mergetree/metadata.json b/parser/testdata/01083_functional_index_in_mergetree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01083_functional_index_in_mergetree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01083_functional_index_in_mergetree/query.sql b/parser/testdata/01083_functional_index_in_mergetree/query.sql new file mode 100644 index 000000000..d0fbf3356 --- /dev/null +++ b/parser/testdata/01083_functional_index_in_mergetree/query.sql @@ -0,0 +1,33 @@ +SET max_threads = 1; + +CREATE TABLE IF NOT EXISTS functional_index_mergetree (x Float64) ENGINE = MergeTree ORDER BY round(x); +INSERT INTO functional_index_mergetree VALUES (7.42)(7.41)(7.51); + +SELECT 'TP1'; +SELECT * FROM functional_index_mergetree WHERE x > 7.42; +SELECT * FROM functional_index_mergetree WHERE x < 7.49; +SELECT * FROM functional_index_mergetree WHERE x < 7.5; + +SELECT * FROM functional_index_mergetree WHERE NOT (NOT x < 7.49); +SELECT * FROM functional_index_mergetree WHERE NOT (NOT x < 7.5); +SELECT * FROM functional_index_mergetree WHERE NOT (NOT x > 7.42); + +SELECT 'TP2'; +SELECT * FROM functional_index_mergetree WHERE NOT x > 7.49; +SELECT * FROM functional_index_mergetree WHERE NOT x < 7.42; +SELECT * FROM functional_index_mergetree WHERE NOT x < 7.41; +SELECT * FROM functional_index_mergetree WHERE NOT x < 7.5; + +SELECT 'TP3'; +SELECT * FROM functional_index_mergetree WHERE x > 7.41 AND x < 7.51; +SELECT * FROM functional_index_mergetree WHERE NOT (x > 7.41 AND x < 7.51); + +SELECT 'TP4'; +SELECT * FROM functional_index_mergetree WHERE NOT x < 7.41 AND NOT x > 7.49; +SELECT * FROM functional_index_mergetree WHERE NOT x < 7.42 AND NOT x > 7.42; +SELECT * FROM functional_index_mergetree WHERE (NOT x < 7.4) AND (NOT x > 7.49); + +SELECT 'TP5'; +SELECT * FROM functional_index_mergetree WHERE NOT or(NOT x, toUInt64(x) AND NOT floor(x) > 6, x >= 7.42 AND round(x) <= 7); + +DROP TABLE functional_index_mergetree; diff --git a/parser/testdata/01083_log_first_column_alias/ast.json b/parser/testdata/01083_log_first_column_alias/ast.json new file mode 100644 index 000000000..89b9c0346 --- /dev/null +++ b/parser/testdata/01083_log_first_column_alias/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_alias (children 1)" + }, + { + "explain": " Identifier test_alias" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001004612, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/01083_log_first_column_alias/metadata.json b/parser/testdata/01083_log_first_column_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01083_log_first_column_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01083_log_first_column_alias/query.sql b/parser/testdata/01083_log_first_column_alias/query.sql new file mode 100644 index 000000000..465280299 --- /dev/null +++ b/parser/testdata/01083_log_first_column_alias/query.sql @@ -0,0 +1,7 @@ +DROP TABLE IF EXISTS test_alias; + +CREATE TABLE test_alias (a UInt8 ALIAS b, b UInt8) ENGINE Log; + +SELECT count() FROM test_alias; + +DROP TABLE test_alias; diff --git a/parser/testdata/01083_match_zero_byte/ast.json b/parser/testdata/01083_match_zero_byte/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01083_match_zero_byte/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01083_match_zero_byte/metadata.json b/parser/testdata/01083_match_zero_byte/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01083_match_zero_byte/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01083_match_zero_byte/query.sql b/parser/testdata/01083_match_zero_byte/query.sql new file mode 100644 index 000000000..d39d5f937 --- /dev/null +++ b/parser/testdata/01083_match_zero_byte/query.sql @@ -0,0 +1,19 @@ +-- Tags: no-fasttest +-- no-fasttest: Requires vectorscan +select match('a key="v" ', 'key="(.*?)"'); +select match(materialize('a key="v" '), 'key="(.*?)"'); + +select match('\0 key="v" ', 'key="(.*?)"'); +select match(materialize('\0 key="v" '), 'key="(.*?)"'); + +select multiMatchAny('\0 key="v" ', ['key="(.*?)"']); +select multiMatchAny(materialize('\0 key="v" '), ['key="(.*?)"']); + +select unhex('34') || ' key="v" ' as haystack, length(haystack), extract( haystack, 'key="(.*?)"') as needle; +-- works, result = v + +select unhex('00') || ' key="v" ' as haystack, length(haystack), extract( haystack, 'key="(.*?)"') as needle; +-- before fix: returns nothing (zero-byte in the begining of haystack) + +select number as char_code, extract( char(char_code) || ' key="v" ' as haystack, 'key="(.*?)"') as needle from numbers(256); +-- every other chars codes (except of zero byte) works ok diff --git a/parser/testdata/01084_defaults_on_aliases/ast.json b/parser/testdata/01084_defaults_on_aliases/ast.json new file mode 100644 index 000000000..67542b71c --- /dev/null +++ b/parser/testdata/01084_defaults_on_aliases/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery table_with_defaults_on_aliases (children 1)" + }, + { + "explain": " Identifier table_with_defaults_on_aliases" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001128068, + "rows_read": 2, + "bytes_read": 112 + } +} diff --git a/parser/testdata/01084_defaults_on_aliases/metadata.json b/parser/testdata/01084_defaults_on_aliases/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01084_defaults_on_aliases/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01084_defaults_on_aliases/query.sql b/parser/testdata/01084_defaults_on_aliases/query.sql new file mode 100644 index 000000000..2f9d82273 --- /dev/null +++ b/parser/testdata/01084_defaults_on_aliases/query.sql @@ -0,0 +1,33 @@ +DROP TABLE IF EXISTS table_with_defaults_on_aliases; + +CREATE TABLE table_with_defaults_on_aliases (col1 UInt32, col2 ALIAS col1, col3 DEFAULT col2) Engine = MergeTree() ORDER BY tuple(); + +SYSTEM STOP MERGES table_with_defaults_on_aliases; + +INSERT INTO table_with_defaults_on_aliases (col1) VALUES (1); + +SELECT * FROM table_with_defaults_on_aliases WHERE col1 = 1; + +SELECT col1, col2, col3 FROM table_with_defaults_on_aliases WHERE col1 = 1; + +SELECT col3 FROM table_with_defaults_on_aliases; -- important to check without WHERE + +ALTER TABLE table_with_defaults_on_aliases ADD COLUMN col4 UInt64 DEFAULT col2 * col3; + +INSERT INTO table_with_defaults_on_aliases (col1) VALUES (2); + +SELECT * FROM table_with_defaults_on_aliases WHERE col1 = 2; + +SELECT col1, col2, col3, col4 FROM table_with_defaults_on_aliases WHERE col1 = 2; + +ALTER TABLE table_with_defaults_on_aliases ADD COLUMN col5 UInt64 ALIAS col2 * col4; + +INSERT INTO table_with_defaults_on_aliases (col1) VALUES (3); + +SELECT * FROM table_with_defaults_on_aliases WHERE col1 = 3; + +SELECT col1, col2, col3, col4, col5 FROM table_with_defaults_on_aliases WHERE col1 = 3; + +ALTER TABLE table_with_defaults_on_aliases ADD COLUMN col6 UInt64 MATERIALIZED col2 * col4; + +DROP TABLE IF EXISTS table_with_defaults_on_aliases; diff --git a/parser/testdata/01084_regexp_empty/ast.json b/parser/testdata/01084_regexp_empty/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01084_regexp_empty/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01084_regexp_empty/metadata.json b/parser/testdata/01084_regexp_empty/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01084_regexp_empty/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01084_regexp_empty/query.sql b/parser/testdata/01084_regexp_empty/query.sql new file mode 100644 index 000000000..5dd060ab5 --- /dev/null +++ b/parser/testdata/01084_regexp_empty/query.sql @@ -0,0 +1,9 @@ + +DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE_1:Identifier}; +CREATE DATABASE {CLICKHOUSE_DATABASE_1:Identifier}; +USE {CLICKHOUSE_DATABASE_1:Identifier}; +CREATE TABLE t (x UInt8) ENGINE = Memory; + +SELECT * FROM merge('', ''); + +DROP DATABASE {CLICKHOUSE_DATABASE_1:Identifier}; diff --git a/parser/testdata/01085_datetime_arithmetic_preserve_timezone/ast.json b/parser/testdata/01085_datetime_arithmetic_preserve_timezone/ast.json new file mode 100644 index 000000000..a6876cd0b --- /dev/null +++ b/parser/testdata/01085_datetime_arithmetic_preserve_timezone/ast.json @@ -0,0 +1,163 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 8)" + }, + { + "explain": " Function toDateTime (alias t) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '2020-01-01 00:00:00'" + }, + { + "explain": " Literal 'UTC'" + }, + { + "explain": " Function plus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier t" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function plus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDate (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier t" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function plus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier t" + }, + { + "explain": " Function toIntervalSecond (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function plus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier t" + }, + { + "explain": " Function toIntervalDay (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function plus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier t" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function plus (alias dt64) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDateTime64 (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Identifier t" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Literal 'UTC'" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier dt64" + } + ], + + "rows": 47, + + "statistics": + { + "elapsed": 0.001308458, + "rows_read": 47, + "bytes_read": 1763 + } +} diff --git a/parser/testdata/01085_datetime_arithmetic_preserve_timezone/metadata.json b/parser/testdata/01085_datetime_arithmetic_preserve_timezone/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01085_datetime_arithmetic_preserve_timezone/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01085_datetime_arithmetic_preserve_timezone/query.sql b/parser/testdata/01085_datetime_arithmetic_preserve_timezone/query.sql new file mode 100644 index 000000000..b3057457d --- /dev/null +++ b/parser/testdata/01085_datetime_arithmetic_preserve_timezone/query.sql @@ -0,0 +1 @@ +SELECT toDateTime('2020-01-01 00:00:00', 'UTC') AS t, t + 1, toDate(t) + 1, t + INTERVAL 1 SECOND, t + INTERVAL 1 DAY, toTypeName(t + 1), toDateTime64(t, 3, 'UTC') + 1 AS dt64, toTypeName(dt64); diff --git a/parser/testdata/01085_extract_all_empty/ast.json b/parser/testdata/01085_extract_all_empty/ast.json new file mode 100644 index 000000000..fe0871f4b --- /dev/null +++ b/parser/testdata/01085_extract_all_empty/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function extractAll (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '\\0'" + }, + { + "explain": " Literal ''" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001025591, + "rows_read": 8, + "bytes_read": 283 + } +} diff --git a/parser/testdata/01085_extract_all_empty/metadata.json b/parser/testdata/01085_extract_all_empty/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01085_extract_all_empty/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01085_extract_all_empty/query.sql b/parser/testdata/01085_extract_all_empty/query.sql new file mode 100644 index 000000000..fdfc9593d --- /dev/null +++ b/parser/testdata/01085_extract_all_empty/query.sql @@ -0,0 +1 @@ +SELECT extractAll('\0', ''); diff --git a/parser/testdata/01085_simdjson_uint64/ast.json b/parser/testdata/01085_simdjson_uint64/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01085_simdjson_uint64/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01085_simdjson_uint64/metadata.json b/parser/testdata/01085_simdjson_uint64/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01085_simdjson_uint64/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01085_simdjson_uint64/query.sql b/parser/testdata/01085_simdjson_uint64/query.sql new file mode 100644 index 000000000..5fc3bc7db --- /dev/null +++ b/parser/testdata/01085_simdjson_uint64/query.sql @@ -0,0 +1,3 @@ + +WITH '{"a": "hello", "b": 12345678901234567890}' AS json +SELECT JSONExtractRaw(json, 'a'); diff --git a/parser/testdata/01086_modulo_or_zero/ast.json b/parser/testdata/01086_modulo_or_zero/ast.json new file mode 100644 index 000000000..4e69c48c2 --- /dev/null +++ b/parser/testdata/01086_modulo_or_zero/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function moduloOrZero (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal UInt64_0" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001028464, + "rows_read": 11, + "bytes_read": 413 + } +} diff --git a/parser/testdata/01086_modulo_or_zero/metadata.json b/parser/testdata/01086_modulo_or_zero/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01086_modulo_or_zero/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01086_modulo_or_zero/query.sql b/parser/testdata/01086_modulo_or_zero/query.sql new file mode 100644 index 000000000..97068c2ff --- /dev/null +++ b/parser/testdata/01086_modulo_or_zero/query.sql @@ -0,0 +1,5 @@ +select moduloOrZero(0, 0) = 0; +select moduloOrZero(-128, -1) = 0; +select moduloOrZero(-127, -1) = 0; +select moduloOrZero(1, 1) = 0; +select moduloOrZero(5, 3) = 2; diff --git a/parser/testdata/01087_index_set_ubsan/ast.json b/parser/testdata/01087_index_set_ubsan/ast.json new file mode 100644 index 000000000..5bb5afb1f --- /dev/null +++ b/parser/testdata/01087_index_set_ubsan/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001428574, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/01087_index_set_ubsan/metadata.json b/parser/testdata/01087_index_set_ubsan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01087_index_set_ubsan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01087_index_set_ubsan/query.sql b/parser/testdata/01087_index_set_ubsan/query.sql new file mode 100644 index 000000000..e22e58396 --- /dev/null +++ b/parser/testdata/01087_index_set_ubsan/query.sql @@ -0,0 +1,5 @@ +DROP TABLE IF EXISTS t; +create table t (i Int, a Int, s String, index ind_s (s) type set(1) granularity 1) engine = MergeTree order by i; +insert into t values (1, 1, 'a') (2, 1, 'a') (3, 1, 'a') (4, 1, 'a'); +SELECT a, i from t ORDER BY a, i; +DROP TABLE t; diff --git a/parser/testdata/01087_storage_generate/ast.json b/parser/testdata/01087_storage_generate/ast.json new file mode 100644 index 000000000..218dfcc2c --- /dev/null +++ b/parser/testdata/01087_storage_generate/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_table (children 1)" + }, + { + "explain": " Identifier test_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001184913, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/01087_storage_generate/metadata.json b/parser/testdata/01087_storage_generate/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01087_storage_generate/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01087_storage_generate/query.sql b/parser/testdata/01087_storage_generate/query.sql new file mode 100644 index 000000000..a93207918 --- /dev/null +++ b/parser/testdata/01087_storage_generate/query.sql @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table(a Array(Int8), d Decimal32(4), c Tuple(DateTime64(3), UUID)) ENGINE = GenerateRandom(); +SELECT COUNT(*) FROM (SELECT * FROM test_table LIMIT 100); + +DROP TABLE IF EXISTS test_table; + +SELECT '-'; + +DROP TABLE IF EXISTS test_table_2; +CREATE TABLE test_table_2(a Array(Int8), d Decimal32(4), c Tuple(DateTime64(3, 'UTC'), UUID)) ENGINE = GenerateRandom(10, 5, 3); + +SELECT * FROM test_table_2 LIMIT 100; + +SELECT '-'; + +DROP TABLE IF EXISTS test_table_2; diff --git a/parser/testdata/01087_table_function_generate/ast.json b/parser/testdata/01087_table_function_generate/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01087_table_function_generate/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01087_table_function_generate/metadata.json b/parser/testdata/01087_table_function_generate/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01087_table_function_generate/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01087_table_function_generate/query.sql b/parser/testdata/01087_table_function_generate/query.sql new file mode 100644 index 000000000..ff7c3f347 --- /dev/null +++ b/parser/testdata/01087_table_function_generate/query.sql @@ -0,0 +1,199 @@ +SELECT + toTypeName(ui64), toTypeName(i64), + toTypeName(ui32), toTypeName(i32), + toTypeName(ui16), toTypeName(i16), + toTypeName(ui8), toTypeName(i8) +FROM generateRandom('ui64 UInt64, i64 Int64, ui32 UInt32, i32 Int32, ui16 UInt16, i16 Int16, ui8 UInt8, i8 Int8') +LIMIT 1; +SELECT + ui64, i64, + ui32, i32, + ui16, i16, + ui8, i8 +FROM generateRandom('ui64 UInt64, i64 Int64, ui32 UInt32, i32 Int32, ui16 UInt16, i16 Int16, ui8 UInt8, i8 Int8', 1, 10, 10) +LIMIT 10; +SELECT '-'; +SELECT + toTypeName(i) +FROM generateRandom('i Enum8(\'hello\' = 1, \'world\' = 5)') +LIMIT 1; +SELECT + i +FROM generateRandom('i Enum8(\'hello\' = 1, \'world\' = 5)', 1, 10, 10) +LIMIT 10; +SELECT '-'; +SELECT + toTypeName(i) +FROM generateRandom('i Array(Nullable(Enum8(\'hello\' = 1, \'world\' = 5)))') +LIMIT 1; +SELECT + i +FROM generateRandom('i Array(Nullable(Enum8(\'hello\' = 1, \'world\' = 5)))', 1, 10, 10) +LIMIT 10; +SELECT '-'; +SELECT + toTypeName(i)s +FROM generateRandom('i Nullable(Enum16(\'h\' = 1, \'w\' = 5 , \'o\' = -200))') +LIMIT 1; +SELECT + i +FROM generateRandom('i Nullable(Enum16(\'h\' = 1, \'w\' = 5 , \'o\' = -200))', 1, 10, 10) +LIMIT 10; +SELECT '-'; +SELECT +toTypeName(d), toTypeName(dt), toTypeName(dtm) +FROM generateRandom('d Date, dt DateTime(\'UTC\'), dtm DateTime(\'UTC\')') +LIMIT 1; +SELECT +d, dt, dtm +FROM generateRandom('d Date, dt DateTime(\'UTC\'), dtm DateTime(\'UTC\')', 1, 10, 10) +LIMIT 10; +SELECT '-'; +SELECT +toTypeName(dt64), toTypeName(dts64), toTypeName(dtms64) +FROM generateRandom('dt64 DateTime64(3, \'UTC\'), dts64 DateTime64(6, \'UTC\'), dtms64 DateTime64(6 ,\'UTC\')') +LIMIT 1; +SELECT +dt64, dts64, dtms64 +FROM generateRandom('dt64 DateTime64(3, \'UTC\'), dts64 DateTime64(6, \'UTC\'), dtms64 DateTime64(6 ,\'UTC\')', 1, 10, 10) +LIMIT 10; +SELECT +toTypeName(d32) +FROM generateRandom('d32 Date32') +LIMIT 1; +SELECT +d32 +FROM generateRandom('d32 Date32', 1, 10, 10) +LIMIT 10; +SELECT '-'; +SELECT + toTypeName(f32), toTypeName(f64) +FROM generateRandom('f32 Float32, f64 Float64') +LIMIT 1; +SELECT + f32, f64 +FROM generateRandom('f32 Float32, f64 Float64', 1, 10, 10) +LIMIT 10; +SELECT '-'; +SELECT + toTypeName(d32), toTypeName(d64), toTypeName(d64) +FROM generateRandom('d32 Decimal32(4), d64 Decimal64(8), d128 Decimal128(16)') +LIMIT 1; +SELECT + d32, d64, d128 +FROM generateRandom('d32 Decimal32(4), d64 Decimal64(8), d128 Decimal128(16)', 1, 10, 10) +LIMIT 10; +SELECT '-'; +SELECT + toTypeName(i) +FROM generateRandom('i Tuple(Int32, Int64)') +LIMIT 1; +SELECT + i +FROM generateRandom('i Tuple(Int32, Int64)', 1, 10, 10) +LIMIT 10; +SELECT '-'; +SELECT + toTypeName(i) +FROM generateRandom('i Array(Int8)') +LIMIT 1; +SELECT + i +FROM generateRandom('i Array(Int8)', 1, 10, 10) +LIMIT 10; +SELECT '-'; +SELECT + toTypeName(i) +FROM generateRandom('i Array(Nullable(Int32))') +LIMIT 1; +SELECT + i +FROM generateRandom('i Array(Nullable(Int32))', 1, 10, 10) +LIMIT 10; +SELECT '-'; +SELECT + toTypeName(i) +FROM generateRandom('i Tuple(Int32, Array(Int64))') +LIMIT 1; +SELECT + i +FROM generateRandom('i Tuple(Int32, Array(Int64))', 1, 10, 10) +LIMIT 10; +SELECT '-'; +SELECT + toTypeName(i) +FROM generateRandom('i Nullable(String)', 1) +LIMIT 1; +SELECT + i +FROM generateRandom('i Nullable(String)', 1, 10, 10) +LIMIT 10; +SELECT '-'; +SELECT + toTypeName(i) +FROM generateRandom('i Array(String)') +LIMIT 1; +SELECT + i +FROM generateRandom('i Array(String)', 1, 10, 10) +LIMIT 10; + +SELECT '-'; +SELECT + toTypeName(i) +FROM generateRandom('i UUID') +LIMIT 1; +SELECT + i +FROM generateRandom('i UUID', 1, 10, 10) +LIMIT 10; +SELECT '-'; +SELECT + toTypeName(i) +FROM generateRandom('i Array(Nullable(UUID))') +LIMIT 1; +SELECT + i +FROM generateRandom('i Array(Nullable(UUID))', 1, 10, 10) +LIMIT 10; +SELECT '-'; +SELECT + toTypeName(i) +FROM generateRandom('i FixedString(4)') +LIMIT 1; +SELECT + hex(i) +FROM generateRandom('i FixedString(4)', 1, 10, 10) +LIMIT 10; +SELECT '-'; +SELECT + toTypeName(i) +FROM generateRandom('i String') +LIMIT 1; +SELECT + i +FROM generateRandom('i String', 1, 10, 10) +LIMIT 10; +SELECT '-'; +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table(a Array(Int8), d Decimal32(4), c Tuple(DateTime64(3, 'UTC'), UUID)) ENGINE=Memory; +INSERT INTO test_table SELECT * FROM generateRandom('a Array(Int8), d Decimal32(4), c Tuple(DateTime64(3, \'UTC\'), UUID)', 1, 10, 2) +LIMIT 10; + +SELECT * FROM test_table ORDER BY a, d, c; + +DROP TABLE IF EXISTS test_table; + +SELECT '-'; + +DROP TABLE IF EXISTS test_table_2; +CREATE TABLE test_table_2(a Array(Int8), b UInt32, c Nullable(String), d Decimal32(4), e Nullable(Enum16('h' = 1, 'w' = 5 , 'o' = -200)), f Float64, g Tuple(Date, DateTime('UTC'), DateTime64(3, 'UTC'), UUID), h FixedString(2)) ENGINE=Memory; +INSERT INTO test_table_2 SELECT * FROM generateRandom('a Array(Int8), b UInt32, c Nullable(String), d Decimal32(4), e Nullable(Enum16(\'h\' = 1, \'w\' = 5 , \'o\' = -200)), f Float64, g Tuple(Date, DateTime(\'UTC\'), DateTime64(3, \'UTC\'), UUID), h FixedString(2)', 10, 5, 3) +LIMIT 10; + +SELECT a, b, c, d, e, f, g, hex(h) FROM test_table_2 ORDER BY a, b, c, d, e, f, g, h; +SELECT '-'; + +DROP TABLE IF EXISTS test_table_2; + +select * from generateRandom('x UInt64', Null, 10, 2) limit 2 format Null; diff --git a/parser/testdata/01088_array_slice_of_aggregate_functions/ast.json b/parser/testdata/01088_array_slice_of_aggregate_functions/ast.json new file mode 100644 index 000000000..a1cfd376b --- /dev/null +++ b/parser/testdata/01088_array_slice_of_aggregate_functions/ast.json @@ -0,0 +1,121 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arraySlice (alias y) (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function groupArray (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function uniqState (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier number" + } + ], + + "rows": 33, + + "statistics": + { + "elapsed": 0.001374214, + "rows_read": 33, + "bytes_read": 1415 + } +} diff --git a/parser/testdata/01088_array_slice_of_aggregate_functions/metadata.json b/parser/testdata/01088_array_slice_of_aggregate_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01088_array_slice_of_aggregate_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01088_array_slice_of_aggregate_functions/query.sql b/parser/testdata/01088_array_slice_of_aggregate_functions/query.sql new file mode 100644 index 000000000..c8466b570 --- /dev/null +++ b/parser/testdata/01088_array_slice_of_aggregate_functions/query.sql @@ -0,0 +1 @@ +select arraySlice(groupArray(x), 1, 1) as y from (select uniqState(number) as x from numbers(10) group by number order by number); diff --git a/parser/testdata/01089_alter_settings_old_format/ast.json b/parser/testdata/01089_alter_settings_old_format/ast.json new file mode 100644 index 000000000..3c1d71de3 --- /dev/null +++ b/parser/testdata/01089_alter_settings_old_format/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery old_format_mt (children 1)" + }, + { + "explain": " Identifier old_format_mt" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001507742, + "rows_read": 2, + "bytes_read": 78 + } +} diff --git a/parser/testdata/01089_alter_settings_old_format/metadata.json b/parser/testdata/01089_alter_settings_old_format/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01089_alter_settings_old_format/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01089_alter_settings_old_format/query.sql b/parser/testdata/01089_alter_settings_old_format/query.sql new file mode 100644 index 000000000..daeed522f --- /dev/null +++ b/parser/testdata/01089_alter_settings_old_format/query.sql @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS old_format_mt; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE old_format_mt ( + event_date Date, + key UInt64, + value1 UInt64, + value2 String +) +ENGINE = MergeTree(event_date, (key, value1), 8192); + +ALTER TABLE old_format_mt MODIFY SETTING enable_mixed_granularity_parts = 1; --{serverError BAD_ARGUMENTS} + +SELECT 1; + +DROP TABLE IF EXISTS old_format_mt; diff --git a/parser/testdata/01090_fixed_string_bit_ops/ast.json b/parser/testdata/01090_fixed_string_bit_ops/ast.json new file mode 100644 index 000000000..8d7f0f906 --- /dev/null +++ b/parser/testdata/01090_fixed_string_bit_ops/ast.json @@ -0,0 +1,88 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function bitXor (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toFixedString (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'abc'" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Function toFixedString (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '\\0\u0001\u0002'" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 22, + + "statistics": + { + "elapsed": 0.001480929, + "rows_read": 22, + "bytes_read": 884 + } +} diff --git a/parser/testdata/01090_fixed_string_bit_ops/metadata.json b/parser/testdata/01090_fixed_string_bit_ops/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01090_fixed_string_bit_ops/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01090_fixed_string_bit_ops/query.sql b/parser/testdata/01090_fixed_string_bit_ops/query.sql new file mode 100644 index 000000000..72ef1c374 --- /dev/null +++ b/parser/testdata/01090_fixed_string_bit_ops/query.sql @@ -0,0 +1,5 @@ +SELECT DISTINCT bitXor(materialize(toFixedString('abc', 3)), toFixedString('\x00\x01\x02', 3)) FROM numbers(10); +SELECT DISTINCT bitXor(materialize(toFixedString('abcdef', 6)), toFixedString('\x00\x01\x02\x03\x04\x05', 6)) FROM numbers(10); + +SELECT DISTINCT bitXor(toFixedString('\x00\x01\x02', 3), materialize(toFixedString('abc', 3))) FROM numbers(10); +SELECT DISTINCT bitXor(toFixedString('\x00\x01\x02\x03\x04\x05', 6), materialize(toFixedString('abcdef', 6))) FROM numbers(10); diff --git a/parser/testdata/01090_zookeeper_mutations_and_insert_quorum_long/ast.json b/parser/testdata/01090_zookeeper_mutations_and_insert_quorum_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01090_zookeeper_mutations_and_insert_quorum_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01090_zookeeper_mutations_and_insert_quorum_long/metadata.json b/parser/testdata/01090_zookeeper_mutations_and_insert_quorum_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01090_zookeeper_mutations_and_insert_quorum_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01090_zookeeper_mutations_and_insert_quorum_long/query.sql b/parser/testdata/01090_zookeeper_mutations_and_insert_quorum_long/query.sql new file mode 100644 index 000000000..67534a461 --- /dev/null +++ b/parser/testdata/01090_zookeeper_mutations_and_insert_quorum_long/query.sql @@ -0,0 +1,23 @@ +-- Tags: long, zookeeper, no-replicated-database +-- Tag no-replicated-database: Fails due to additional replicas or shards + +DROP TABLE IF EXISTS mutations_and_quorum1 SYNC; +DROP TABLE IF EXISTS mutations_and_quorum2 SYNC; + +CREATE TABLE mutations_and_quorum1 (`server_date` Date, `something` String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_01090/mutations_and_quorum', '1') PARTITION BY toYYYYMM(server_date) ORDER BY (server_date, something); +CREATE TABLE mutations_and_quorum2 (`server_date` Date, `something` String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_01090/mutations_and_quorum', '2') PARTITION BY toYYYYMM(server_date) ORDER BY (server_date, something); + +-- Should not be larger then 600e6 (default timeout in clickhouse-test) +SET insert_quorum=2, insert_quorum_parallel=0, insert_quorum_timeout=300e3; + +INSERT INTO mutations_and_quorum1 VALUES ('2019-01-01', 'test1'), ('2019-02-01', 'test2'), ('2019-03-01', 'test3'), ('2019-04-01', 'test4'), ('2019-05-01', 'test1'), ('2019-06-01', 'test2'), ('2019-07-01', 'test3'), ('2019-08-01', 'test4'), ('2019-09-01', 'test1'), ('2019-10-01', 'test2'), ('2019-11-01', 'test3'), ('2019-12-01', 'test4'); + +ALTER TABLE mutations_and_quorum1 DELETE WHERE something = 'test1' SETTINGS mutations_sync=2; + +SELECT COUNT() FROM mutations_and_quorum1; +SELECT COUNT() FROM mutations_and_quorum2; + +SELECT COUNT() FROM system.mutations WHERE database = currentDatabase() AND table like 'mutations_and_quorum%' and is_done = 0; + +DROP TABLE IF EXISTS mutations_and_quorum1 SYNC; +DROP TABLE IF EXISTS mutations_and_quorum2 SYNC; diff --git a/parser/testdata/01091_insert_with_default_json/ast.json b/parser/testdata/01091_insert_with_default_json/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01091_insert_with_default_json/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01091_insert_with_default_json/metadata.json b/parser/testdata/01091_insert_with_default_json/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01091_insert_with_default_json/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01091_insert_with_default_json/query.sql b/parser/testdata/01091_insert_with_default_json/query.sql new file mode 100644 index 000000000..40de4eb06 --- /dev/null +++ b/parser/testdata/01091_insert_with_default_json/query.sql @@ -0,0 +1,28 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS table_with_complex_default; + +CREATE TABLE table_with_complex_default (i Int8, n UInt8 DEFAULT 42, s String DEFAULT concat('test', CAST(n, 'String'))) ENGINE=TinyLog; + +INSERT INTO table_with_complex_default FORMAT JSONEachRow {"i":0, "n": 0} + +SELECT * FROM table_with_complex_default; + +DROP TABLE IF EXISTS table_with_complex_default; + +DROP TABLE IF EXISTS test_default_using_alias; + +CREATE TABLE test_default_using_alias +( + what String, + a String DEFAULT concat(c, ' is great'), + b String DEFAULT concat(c, ' is fast'), + c String ALIAS concat(what, 'House') +) +ENGINE = TinyLog; + +INSERT INTO test_default_using_alias(what) VALUES ('Click'); + +SELECT a, b FROM test_default_using_alias; + +DROP TABLE IF EXISTS test_default_using_alias; diff --git a/parser/testdata/01091_num_threads/ast.json b/parser/testdata/01091_num_threads/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01091_num_threads/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01091_num_threads/metadata.json b/parser/testdata/01091_num_threads/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01091_num_threads/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01091_num_threads/query.sql b/parser/testdata/01091_num_threads/query.sql new file mode 100644 index 000000000..61dddc7d2 --- /dev/null +++ b/parser/testdata/01091_num_threads/query.sql @@ -0,0 +1,51 @@ +-- Tags: no-parallel + +set log_queries=1; +set log_query_threads=1; +set max_threads=0; +set use_concurrency_control=0; + +WITH 01091 AS id SELECT 1; +SYSTEM FLUSH LOGS query_log, query_thread_log; + +WITH + ( + SELECT query_id + FROM system.query_log + WHERE current_database = currentDatabase() AND (normalizeQuery(query) like normalizeQuery('WITH 01091 AS id SELECT 1;')) AND (event_date >= (today() - 1)) + ORDER BY event_time DESC + LIMIT 1 + ) AS id +SELECT uniqExact(thread_id) +FROM system.query_thread_log +WHERE (event_date >= (today() - 1)) AND (query_id = id) AND (thread_id != master_thread_id); + +with 01091 as id select sum(number) from numbers(1000000); +SYSTEM FLUSH LOGS query_log, query_thread_log; + +WITH + ( + SELECT query_id + FROM system.query_log + WHERE current_database = currentDatabase() AND (normalizeQuery(query) = normalizeQuery('with 01091 as id select sum(number) from numbers(1000000);')) AND (event_date >= (today() - 1)) + ORDER BY event_time DESC + LIMIT 1 + ) AS id +SELECT uniqExact(thread_id) > 2 +FROM system.query_thread_log +WHERE (event_date >= (today() - 1)) AND (query_id = id) AND (thread_id != master_thread_id); + +with 01091 as id select sum(number) from numbers_mt(1000000); +SYSTEM FLUSH LOGS query_log, query_thread_log; + +WITH + ( + SELECT query_id + FROM system.query_log + WHERE current_database = currentDatabase() AND (normalizeQuery(query) = normalizeQuery('with 01091 as id select sum(number) from numbers_mt(1000000);')) AND (event_date >= (today() - 1)) + ORDER BY event_time DESC + LIMIT 1 + ) AS id +SELECT uniqExact(thread_id) > 2 +FROM system.query_thread_log +WHERE (event_date >= (today() - 1)) AND (query_id = id) AND (thread_id != master_thread_id); diff --git a/parser/testdata/01091_query_profiler_does_not_hang/ast.json b/parser/testdata/01091_query_profiler_does_not_hang/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01091_query_profiler_does_not_hang/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01091_query_profiler_does_not_hang/metadata.json b/parser/testdata/01091_query_profiler_does_not_hang/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01091_query_profiler_does_not_hang/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01091_query_profiler_does_not_hang/query.sql b/parser/testdata/01091_query_profiler_does_not_hang/query.sql new file mode 100644 index 000000000..45f1a00ae --- /dev/null +++ b/parser/testdata/01091_query_profiler_does_not_hang/query.sql @@ -0,0 +1,4 @@ +-- Tags: no-tsan, no-asan, no-ubsan, no-msan, no-debug + +SET query_profiler_cpu_time_period_ns = 1, max_rows_to_read = 0; +SELECT count() FROM numbers_mt(1000000000); diff --git a/parser/testdata/01092_memory_profiler/ast.json b/parser/testdata/01092_memory_profiler/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01092_memory_profiler/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01092_memory_profiler/metadata.json b/parser/testdata/01092_memory_profiler/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01092_memory_profiler/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01092_memory_profiler/query.sql b/parser/testdata/01092_memory_profiler/query.sql new file mode 100644 index 000000000..7d4c23754 --- /dev/null +++ b/parser/testdata/01092_memory_profiler/query.sql @@ -0,0 +1,14 @@ +-- Tags: no-tsan, no-asan, no-ubsan, no-msan, no-debug, no-parallel, no-fasttest + +SET allow_introspection_functions = 1; + +SET memory_profiler_step = 1000000; +SET memory_profiler_sample_probability = 1; +SET log_queries = 1; + +SELECT ignore(groupArray(number), 'test memory profiler') FROM numbers(10000000) SETTINGS log_comment = '01092_memory_profiler'; + +SYSTEM FLUSH LOGS trace_log, query_log; +WITH addressToSymbol(arrayJoin(trace)) AS symbol SELECT count() > 0 FROM system.trace_log t WHERE event_date >= yesterday() AND trace_type = 'Memory' AND query_id = (SELECT query_id FROM system.query_log WHERE current_database = currentDatabase() AND event_date >= yesterday() AND query LIKE '%test memory profiler%' AND has(used_table_functions, 'numbers') AND log_comment = '01092_memory_profiler' ORDER BY event_time DESC LIMIT 1); +WITH addressToSymbol(arrayJoin(trace)) AS symbol SELECT count() > 0 FROM system.trace_log t WHERE event_date >= yesterday() AND trace_type = 'MemoryPeak' AND query_id = (SELECT query_id FROM system.query_log WHERE current_database = currentDatabase() AND event_date >= yesterday() AND query LIKE '%test memory profiler%' AND has(used_table_functions, 'numbers') AND log_comment = '01092_memory_profiler' ORDER BY event_time DESC LIMIT 1); +WITH addressToSymbol(arrayJoin(trace)) AS symbol SELECT count() > 0 FROM system.trace_log t WHERE event_date >= yesterday() AND trace_type = 'MemorySample' AND query_id = (SELECT query_id FROM system.query_log WHERE current_database = currentDatabase() AND event_date >= yesterday() AND query LIKE '%test memory profiler%' AND has(used_table_functions, 'numbers') AND log_comment = '01092_memory_profiler' ORDER BY event_time DESC LIMIT 1); diff --git a/parser/testdata/01093_cyclic_defaults_filimonov/ast.json b/parser/testdata/01093_cyclic_defaults_filimonov/ast.json new file mode 100644 index 000000000..bb16a7455 --- /dev/null +++ b/parser/testdata/01093_cyclic_defaults_filimonov/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.002023331, + "rows_read": 2, + "bytes_read": 61 + } +} diff --git a/parser/testdata/01093_cyclic_defaults_filimonov/metadata.json b/parser/testdata/01093_cyclic_defaults_filimonov/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01093_cyclic_defaults_filimonov/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01093_cyclic_defaults_filimonov/query.sql b/parser/testdata/01093_cyclic_defaults_filimonov/query.sql new file mode 100644 index 000000000..06010c983 --- /dev/null +++ b/parser/testdata/01093_cyclic_defaults_filimonov/query.sql @@ -0,0 +1,19 @@ +CREATE TABLE test +( + `a0` UInt64 DEFAULT a1 + 1, + `a1` UInt64 DEFAULT a0 + 1, + `a2` UInt64 DEFAULT a3 + a4, + `a3` UInt64 DEFAULT a2 + 1, + `a4` UInt64 ALIAS a3 + 1 +) +ENGINE = Log; -- { serverError CYCLIC_ALIASES } + +CREATE TABLE pythagoras +( + `a` Float64 DEFAULT sqrt((c * c) - (b * b)), + `b` Float64 DEFAULT sqrt((c * c) - (a * a)), + `c` Float64 DEFAULT sqrt((a * a) + (b * b)) +) +ENGINE = Log; -- { serverError CYCLIC_ALIASES } + +-- TODO: It works but should not: CREATE TABLE test (a DEFAULT b, b DEFAULT a) ENGINE = Memory diff --git a/parser/testdata/01095_tpch_like_smoke/ast.json b/parser/testdata/01095_tpch_like_smoke/ast.json new file mode 100644 index 000000000..dc6e9adbd --- /dev/null +++ b/parser/testdata/01095_tpch_like_smoke/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery part (children 1)" + }, + { + "explain": " Identifier part" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00183956, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/01095_tpch_like_smoke/metadata.json b/parser/testdata/01095_tpch_like_smoke/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01095_tpch_like_smoke/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01095_tpch_like_smoke/query.sql b/parser/testdata/01095_tpch_like_smoke/query.sql new file mode 100644 index 000000000..ba8bc617e --- /dev/null +++ b/parser/testdata/01095_tpch_like_smoke/query.sql @@ -0,0 +1,809 @@ +DROP TABLE IF EXISTS part; +DROP TABLE IF EXISTS supplier; +DROP TABLE IF EXISTS partsupp; +DROP TABLE IF EXISTS customer; +DROP TABLE IF EXISTS orders; +DROP TABLE IF EXISTS lineitem; +DROP TABLE IF EXISTS nation; +DROP TABLE IF EXISTS region; + +SET cross_to_inner_join_rewrite = 1; +SET allow_experimental_correlated_subqueries = 1; + +CREATE TABLE part +( + p_partkey Int32, -- PK + p_name String, -- variable text, size 55 + p_mfgr FixedString(25), + p_brand FixedString(10), + p_type String, -- variable text, size 25 + p_size Int32, -- integer + p_container FixedString(10), + p_retailprice Decimal(18,2), + p_comment String, -- variable text, size 23 + CONSTRAINT pk CHECK p_partkey >= 0, + CONSTRAINT positive CHECK (p_size >= 0 AND p_retailprice >= 0) +) engine = MergeTree ORDER BY (p_partkey); + +CREATE TABLE supplier +( + s_suppkey Int32, -- PK + s_name FixedString(25), + s_address String, -- variable text, size 40 + s_nationkey Int32, -- FK n_nationkey + s_phone FixedString(15), + s_acctbal Decimal(18,2), + s_comment String, -- variable text, size 101 + CONSTRAINT pk CHECK s_suppkey >= 0 +) engine = MergeTree ORDER BY (s_suppkey); + +CREATE TABLE partsupp +( + ps_partkey Int32, -- PK(1), FK p_partkey + ps_suppkey Int32, -- PK(2), FK s_suppkey + ps_availqty Int32, -- integer + ps_supplycost Decimal(18,2), + ps_comment String, -- variable text, size 199 + CONSTRAINT pk CHECK ps_partkey >= 0, + CONSTRAINT c1 CHECK (ps_availqty >= 0 AND ps_supplycost >= 0) +) engine = MergeTree ORDER BY (ps_partkey, ps_suppkey); + +CREATE TABLE customer +( + c_custkey Int32, -- PK + c_name String, -- variable text, size 25 + c_address String, -- variable text, size 40 + c_nationkey Int32, -- FK n_nationkey + c_phone FixedString(15), + c_acctbal Decimal(18,2), + c_mktsegment FixedString(10), + c_comment String, -- variable text, size 117 + CONSTRAINT pk CHECK c_custkey >= 0 +) engine = MergeTree ORDER BY (c_custkey); + +CREATE TABLE orders +( + o_orderkey Int32, -- PK + o_custkey Int32, -- FK c_custkey + o_orderstatus FixedString(1), + o_totalprice Decimal(18,2), + o_orderdate Date, + o_orderpriority FixedString(15), + o_clerk FixedString(15), + o_shippriority Int32, -- integer + o_comment String, -- variable text, size 79 + CONSTRAINT c1 CHECK o_totalprice >= 0 +) engine = MergeTree ORDER BY (o_orderdate, o_orderkey); + +CREATE TABLE lineitem +( + l_orderkey Int32, -- PK(1), FK o_orderkey + l_partkey Int32, -- FK ps_partkey + l_suppkey Int32, -- FK ps_suppkey + l_linenumber Int32, -- PK(2) + l_quantity Decimal(18,2), + l_extendedprice Decimal(18,2), + l_discount Decimal(18,2), + l_tax Decimal(18,2), + l_returnflag FixedString(1), + l_linestatus FixedString(1), + l_shipdate Date, + l_commitdate Date, + l_receiptdate Date, + l_shipinstruct FixedString(25), + l_shipmode FixedString(10), + l_comment String, -- variable text size 44 + CONSTRAINT c1 CHECK (l_quantity >= 0 AND l_extendedprice >= 0 AND l_tax >= 0 AND l_shipdate <= l_receiptdate) +-- CONSTRAINT c2 CHECK (l_discount >= 0 AND l_discount <= 1) +) engine = MergeTree ORDER BY (l_shipdate, l_receiptdate, l_orderkey, l_linenumber); + +CREATE TABLE nation +( + n_nationkey Int32, -- PK + n_name FixedString(25), + n_regionkey Int32, -- FK r_regionkey + n_comment String, -- variable text, size 152 + CONSTRAINT pk CHECK n_nationkey >= 0 +) Engine = MergeTree ORDER BY (n_nationkey); + +CREATE TABLE region +( + r_regionkey Int32, -- PK + r_name FixedString(25), + r_comment String, -- variable text, size 152 + CONSTRAINT pk CHECK r_regionkey >= 0 +) engine = MergeTree ORDER BY (r_regionkey); + +select 1; +select + l_returnflag, + l_linestatus, + sum(l_quantity) as sum_qty, + sum(l_extendedprice) as sum_base_price, + sum(l_extendedprice * (1 - l_discount)) as sum_disc_price, + sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) as sum_charge, + avg(l_quantity) as avg_qty, + avg(l_extendedprice) as avg_price, + avg(l_discount) as avg_disc, + count(*) as count_order +from + lineitem +where + l_shipdate <= date '1998-12-01' - interval 90 day +group by + l_returnflag, + l_linestatus +order by + l_returnflag, + l_linestatus; + +select 2; +select + s_acctbal, + s_name, + n_name, + p_partkey, + p_mfgr, + s_address, + s_phone, + s_comment +from + part, + supplier, + partsupp, + nation, + region +where + p_partkey = ps_partkey + and s_suppkey = ps_suppkey + and p_size = 15 + and p_type like '%BRASS' + and s_nationkey = n_nationkey + and n_regionkey = r_regionkey + and r_name = 'EUROPE' + and ps_supplycost = ( + select + min(ps_supplycost) + from + partsupp, + supplier, + nation, + region + where + p_partkey = ps_partkey + and s_suppkey = ps_suppkey + and s_nationkey = n_nationkey + and n_regionkey = r_regionkey + and r_name = 'EUROPE' + ) +order by + s_acctbal desc, + n_name, + s_name, + p_partkey +limit 100 +SETTINGS enable_analyzer=1; + +select 3; +select + l_orderkey, + sum(l_extendedprice * (1 - l_discount)) as revenue, + o_orderdate, + o_shippriority +from + customer, + orders, + lineitem +where + c_mktsegment = 'BUILDING' + and c_custkey = o_custkey + and l_orderkey = o_orderkey + and o_orderdate < date '1995-03-15' + and l_shipdate > date '1995-03-15' +group by + l_orderkey, + o_orderdate, + o_shippriority +order by + revenue desc, + o_orderdate +limit 10; + +select 4; +select + o_orderpriority, + count(*) as order_count +from + orders +where + o_orderdate >= date '1993-07-01' + and o_orderdate < date '1993-07-01' + interval '3' month + and exists ( + select + * + from + lineitem + where + l_orderkey = o_orderkey + and l_commitdate < l_receiptdate + ) +group by + o_orderpriority +order by + o_orderpriority +SETTINGS enable_analyzer=1; + +select 5; +select + n_name, + sum(l_extendedprice * (1 - l_discount)) as revenue +from + customer, + orders, + lineitem, + supplier, + nation, + region +where + c_custkey = o_custkey + and l_orderkey = o_orderkey + and l_suppkey = s_suppkey + and c_nationkey = s_nationkey + and s_nationkey = n_nationkey + and n_regionkey = r_regionkey + and r_name = 'ASIA' + and o_orderdate >= date '1994-01-01' + and o_orderdate < date '1994-01-01' + interval '1' year +group by + n_name +order by + revenue desc; + +select 6; +select + sum(l_extendedprice * l_discount) as revenue +from + lineitem +where + l_shipdate >= date '1994-01-01' + and l_shipdate < date '1994-01-01' + interval '1' year + and l_discount between toDecimal32(0.06, 2) - toDecimal32(0.01, 2) + and toDecimal32(0.06, 2) + toDecimal32(0.01, 2) + and l_quantity < 24; + +select 7; +select + supp_nation, + cust_nation, + l_year, + sum(volume) as revenue +from + ( + select + n1.n_name as supp_nation, + n2.n_name as cust_nation, + extract(year from l_shipdate) as l_year, + l_extendedprice * (1 - l_discount) as volume + from + supplier, + lineitem, + orders, + customer, + nation n1, + nation n2 + where + s_suppkey = l_suppkey + and o_orderkey = l_orderkey + and c_custkey = o_custkey + and s_nationkey = n1.n_nationkey + and c_nationkey = n2.n_nationkey + and ( + (n1.n_name = 'FRANCE' and n2.n_name = 'GERMANY') + or (n1.n_name = 'GERMANY' and n2.n_name = 'FRANCE') + ) + and l_shipdate between date '1995-01-01' and date '1996-12-31' + ) as shipping +group by + supp_nation, + cust_nation, + l_year +order by + supp_nation, + cust_nation, + l_year; + +select 8; +select + o_year, + sum(case + when nation = 'BRAZIL' then volume + else 0 + end) / sum(volume) as mkt_share +from + ( + select + extract(year from o_orderdate) as o_year, + l_extendedprice * (1 - l_discount) as volume, + n2.n_name as nation + from + part, + supplier, + lineitem, + orders, + customer, + nation n1, + nation n2, + region + where + p_partkey = l_partkey + and s_suppkey = l_suppkey + and l_orderkey = o_orderkey + and o_custkey = c_custkey + and c_nationkey = n1.n_nationkey + and n1.n_regionkey = r_regionkey + and r_name = 'AMERICA' + and s_nationkey = n2.n_nationkey + and o_orderdate between date '1995-01-01' and date '1996-12-31' + and p_type = 'ECONOMY ANODIZED STEEL' + ) as all_nations +group by + o_year +order by + o_year; + +select 9; +select + nation, + o_year, + sum(amount) as sum_profit +from + ( + select + n_name as nation, + extract(year from o_orderdate) as o_year, + l_extendedprice * (1 - l_discount) - ps_supplycost * l_quantity as amount + from + part, + supplier, + lineitem, + partsupp, + orders, + nation + where + s_suppkey = l_suppkey + and ps_suppkey = l_suppkey + and ps_partkey = l_partkey + and p_partkey = l_partkey + and o_orderkey = l_orderkey + and s_nationkey = n_nationkey + and p_name like '%green%' + ) as profit +group by + nation, + o_year +order by + nation, + o_year desc; + +select 10; +select + c_custkey, + c_name, + sum(l_extendedprice * (1 - l_discount)) as revenue, + c_acctbal, + n_name, + c_address, + c_phone, + c_comment +from + customer, + orders, + lineitem, + nation +where + c_custkey = o_custkey + and l_orderkey = o_orderkey + and o_orderdate >= date '1993-10-01' + and o_orderdate < date '1993-10-01' + interval '3' month + and l_returnflag = 'R' + and c_nationkey = n_nationkey +group by + c_custkey, + c_name, + c_acctbal, + c_phone, + n_name, + c_address, + c_comment +order by + revenue desc +limit 20; + +select 11; -- TODO: remove toDecimal() +select + ps_partkey, + sum(ps_supplycost * ps_availqty) as value +from + partsupp, + supplier, + nation +where + ps_suppkey = s_suppkey + and s_nationkey = n_nationkey + and n_name = 'GERMANY' +group by + ps_partkey having + sum(ps_supplycost * ps_availqty) > ( + select + sum(ps_supplycost * ps_availqty) * toDecimal64('0.0100000000', 2) + -- ^^^^^^^^^^^^ + -- The above constant needs to be adjusted according + -- to the scale factor (SF): constant = 0.0001 / SF. + from + partsupp, + supplier, + nation + where + ps_suppkey = s_suppkey + and s_nationkey = n_nationkey + and n_name = 'GERMANY' + ) +order by + value desc; + +select 12; +select + l_shipmode, + sum(case + when o_orderpriority = '1-URGENT' + or o_orderpriority = '2-HIGH' + then 1 + else 0 + end) as high_line_count, + sum(case + when o_orderpriority <> '1-URGENT' + and o_orderpriority <> '2-HIGH' + then 1 + else 0 + end) as low_line_count +from + orders, + lineitem +where + o_orderkey = l_orderkey + and l_shipmode in ('MAIL', 'SHIP') + and l_commitdate < l_receiptdate + and l_shipdate < l_commitdate + and l_receiptdate >= date '1994-01-01' + and l_receiptdate < date '1994-01-01' + interval '1' year +group by + l_shipmode +order by + l_shipmode; + +select 13; +select + c_count, + count(*) as custdist +from + ( + select + c_custkey, + count(o_orderkey) as c_count + from + customer left outer join orders on + c_custkey = o_custkey + and o_comment not like '%special%requests%' + group by + c_custkey + ) as c_orders +group by + c_count +order by + custdist desc, + c_count desc; + +select 14; +select + toDecimal32(100.00, 2) * sum(case + when p_type like 'PROMO%' + then l_extendedprice * (1 - l_discount) + else 0 + end) / (1 + sum(l_extendedprice * (1 - l_discount))) as promo_revenue +from + lineitem, + part +where + l_partkey = p_partkey + and l_shipdate >= date '1995-09-01' + and l_shipdate < date '1995-09-01' + interval '1' month; + +select 15, 'fail: correlated subquery'; -- TODO: Missing columns: 'total_revenue' +drop view if exists revenue0; +create view revenue0 as + select + l_suppkey, + sum(l_extendedprice * (1 - l_discount)) + from + lineitem + where + l_shipdate >= date '1996-01-01' + and l_shipdate < date '1996-01-01' + interval '3' month + group by + l_suppkey; +select + s_suppkey, + s_name, + s_address, + s_phone, + total_revenue +from + supplier, + revenue0 +where + s_suppkey = supplier_no + and total_revenue = ( + select + max(total_revenue) + from + revenue0 + ) +order by + s_suppkey; -- { serverError UNKNOWN_IDENTIFIER } +drop view revenue0; + +select 16; +select + p_brand, + p_type, + p_size, + count(distinct ps_suppkey) as supplier_cnt +from + partsupp, + part +where + p_partkey = ps_partkey + and p_brand <> 'Brand#45' + and p_type not like 'MEDIUM POLISHED%' + and p_size in (49, 14, 23, 45, 19, 3, 36, 9) + and ps_suppkey not in ( + select + s_suppkey + from + supplier + where + s_comment like '%Customer%Complaints%' + ) +group by + p_brand, + p_type, + p_size +order by + supplier_cnt desc, + p_brand, + p_type, + p_size; + +select 17; +select + sum(l_extendedprice) / 7.0 as avg_yearly +from + lineitem, + part +where + p_partkey = l_partkey + and p_brand = 'Brand#23' + and p_container = 'MED BOX' + and l_quantity < ( + select + 0.2 * avg(l_quantity) + from + lineitem + where + l_partkey = p_partkey + ) +SETTINGS enable_analyzer=1; + +select 18; +select + c_name, + c_custkey, + o_orderkey, + o_orderdate, + o_totalprice, + sum(l_quantity) +from + customer, + orders, + lineitem +where + o_orderkey in ( + select + l_orderkey + from + lineitem + group by + l_orderkey having + sum(l_quantity) > 300 + ) + and c_custkey = o_custkey + and o_orderkey = l_orderkey +group by + c_name, + c_custkey, + o_orderkey, + o_orderdate, + o_totalprice +order by + o_totalprice desc, + o_orderdate +limit 100; + +select 19; +select + sum(l_extendedprice* (1 - l_discount)) as revenue +from + lineitem, + part +where + ( + p_partkey = l_partkey + and p_brand = 'Brand#12' + and p_container in ('SM CASE', 'SM BOX', 'SM PACK', 'SM PKG') + and l_quantity >= 1 and l_quantity <= 1 + 10 + and p_size between 1 and 5 + and l_shipmode in ('AIR', 'AIR REG') + and l_shipinstruct = 'DELIVER IN PERSON' + ) + or + ( + p_partkey = l_partkey + and p_brand = 'Brand#23' + and p_container in ('MED BAG', 'MED BOX', 'MED PKG', 'MED PACK') + and l_quantity >= 10 and l_quantity <= 10 + 10 + and p_size between 1 and 10 + and l_shipmode in ('AIR', 'AIR REG') + and l_shipinstruct = 'DELIVER IN PERSON' + ) + or + ( + p_partkey = l_partkey + and p_brand = 'Brand#34' + and p_container in ('LG CASE', 'LG BOX', 'LG PACK', 'LG PKG') + and l_quantity >= 20 and l_quantity <= 20 + 10 + and p_size between 1 and 15 + and l_shipmode in ('AIR', 'AIR REG') + and l_shipinstruct = 'DELIVER IN PERSON' + ); + +select 20; +select + s_name, + s_address +from + supplier, + nation +where + s_suppkey in ( + select + ps_suppkey + from + partsupp + where + ps_partkey in ( + select + p_partkey + from + part + where + p_name like 'forest%' + ) + and ps_availqty > ( + select + 0.5 * sum(l_quantity) + from + lineitem + where + l_partkey = ps_partkey + and l_suppkey = ps_suppkey + and l_shipdate >= date '1994-01-01' + and l_shipdate < date '1994-01-01' + interval '1' year + ) + ) + and s_nationkey = n_nationkey + and n_name = 'CANADA' +order by + s_name +SETTINGS enable_analyzer=1; + +select 21; +select + s_name, + count(*) as numwait +from + supplier, + lineitem l1, + orders, + nation +where + s_suppkey = l1.l_suppkey + and o_orderkey = l1.l_orderkey + and o_orderstatus = 'F' + and l1.l_receiptdate > l1.l_commitdate + and exists ( + select + * + from + lineitem l2 + where + l2.l_orderkey = l1.l_orderkey + and l2.l_suppkey <> l1.l_suppkey + ) + and not exists ( + select + * + from + lineitem l3 + where + l3.l_orderkey = l1.l_orderkey + and l3.l_suppkey <> l1.l_suppkey + and l3.l_receiptdate > l3.l_commitdate + ) + and s_nationkey = n_nationkey + and n_name = 'SAUDI ARABIA' +group by + s_name +order by + numwait desc, + s_name +limit 100 +SETTINGS enable_analyzer=1; + +select 22; +select + cntrycode, + count(*) as numcust, + sum(c_acctbal) as totacctbal +from + ( + select + substring(c_phone from 1 for 2) as cntrycode, + c_acctbal + from + customer + where + substring(c_phone from 1 for 2) in + ('13', '31', '23', '29', '30', '18', '17') + and c_acctbal > ( + select + avg(c_acctbal) + from + customer + where + c_acctbal > 0.00 + and substring(c_phone from 1 for 2) in + ('13', '31', '23', '29', '30', '18', '17') + ) + and not exists ( + select + * + from + orders + where + o_custkey = c_custkey + ) + ) as custsale +group by + cntrycode +order by + cntrycode +SETTINGS enable_analyzer=1; + +DROP TABLE part; +DROP TABLE supplier; +DROP TABLE partsupp; +DROP TABLE customer; +DROP TABLE orders; +DROP TABLE lineitem; +DROP TABLE nation; +DROP TABLE region; diff --git a/parser/testdata/01096_array_reduce_in_ranges/ast.json b/parser/testdata/01096_array_reduce_in_ranges/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01096_array_reduce_in_ranges/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01096_array_reduce_in_ranges/metadata.json b/parser/testdata/01096_array_reduce_in_ranges/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01096_array_reduce_in_ranges/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01096_array_reduce_in_ranges/query.sql b/parser/testdata/01096_array_reduce_in_ranges/query.sql new file mode 100644 index 000000000..051eaf7ca --- /dev/null +++ b/parser/testdata/01096_array_reduce_in_ranges/query.sql @@ -0,0 +1,32 @@ +SELECT + arrayReduceInRanges( + 'groupArray', + [(1, 3), (2, 3), (3, 3)], + ['a', 'b', 'c', 'd', 'e'] + ); + +SELECT + arrayReduceInRanges( + 'sum', + [ + (-6, 0), (-4, 0), (-2, 0), (0, 0), (2, 0), (4, 0), + (-6, 1), (-4, 1), (-2, 1), (0, 1), (2, 1), (4, 1), + (-6, 2), (-4, 2), (-2, 2), (0, 2), (2, 2), (4, 2), + (-6, 3), (-4, 3), (-2, 3), (0, 3), (2, 3), (4, 3) + ], + [100, 200, 300, 400] + ); + +WITH + arrayMap(x -> x + 1, range(50)) as data +SELECT + arrayReduceInRanges('groupArray', [(a, c), (b, d)], data) = + [arraySlice(data, a, c), arraySlice(data, b, d)] +FROM ( + SELECT + cityHash64(number + 100) % 40 as a, + cityHash64(number + 200) % 60 as b, + cityHash64(number + 300) % 20 as c, + cityHash64(number + 400) % 30 as d + FROM numbers(20) +); diff --git a/parser/testdata/01096_block_serialized_state/ast.json b/parser/testdata/01096_block_serialized_state/ast.json new file mode 100644 index 000000000..c976ed12b --- /dev/null +++ b/parser/testdata/01096_block_serialized_state/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function ignore (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function blockSerializedSize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Tuple_(UInt64_1, UInt64_1)" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001414243, + "rows_read": 9, + "bytes_read": 376 + } +} diff --git a/parser/testdata/01096_block_serialized_state/metadata.json b/parser/testdata/01096_block_serialized_state/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01096_block_serialized_state/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01096_block_serialized_state/query.sql b/parser/testdata/01096_block_serialized_state/query.sql new file mode 100644 index 000000000..c07642f32 --- /dev/null +++ b/parser/testdata/01096_block_serialized_state/query.sql @@ -0,0 +1 @@ +SELECT ignore(blockSerializedSize((1, 1))); diff --git a/parser/testdata/01096_zeros/ast.json b/parser/testdata/01096_zeros/ast.json new file mode 100644 index 000000000..9c9b64eaf --- /dev/null +++ b/parser/testdata/01096_zeros/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier zero" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.zeros" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Set" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001346812, + "rows_read": 11, + "bytes_read": 395 + } +} diff --git a/parser/testdata/01096_zeros/metadata.json b/parser/testdata/01096_zeros/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01096_zeros/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01096_zeros/query.sql b/parser/testdata/01096_zeros/query.sql new file mode 100644 index 000000000..b0c665494 --- /dev/null +++ b/parser/testdata/01096_zeros/query.sql @@ -0,0 +1,12 @@ +select zero from system.zeros limit 10 settings max_block_size = 3; +select '-'; +select zero from system.zeros_mt limit 10 settings max_block_size = 3, max_threads = 2; +select '-'; +select zero from zeros(10) settings max_block_size = 3; +select '-'; +select zero from zeros_mt(10) settings max_block_size = 3, max_threads=3; +select '-'; +select sum(zero), count() from (select * from system.zeros limit 10000000); +select sum(zero), count() from (select * from system.zeros_mt limit 10000000); +select sum(zero), count() from zeros(10000000); +select sum(zero), count() from zeros_mt(10000000); diff --git a/parser/testdata/01097_cyclic_defaults/ast.json b/parser/testdata/01097_cyclic_defaults/ast.json new file mode 100644 index 000000000..70b05bc8a --- /dev/null +++ b/parser/testdata/01097_cyclic_defaults/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery table_with_cyclic_defaults (children 1)" + }, + { + "explain": " Identifier table_with_cyclic_defaults" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001453357, + "rows_read": 2, + "bytes_read": 104 + } +} diff --git a/parser/testdata/01097_cyclic_defaults/metadata.json b/parser/testdata/01097_cyclic_defaults/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01097_cyclic_defaults/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01097_cyclic_defaults/query.sql b/parser/testdata/01097_cyclic_defaults/query.sql new file mode 100644 index 000000000..570c93c51 --- /dev/null +++ b/parser/testdata/01097_cyclic_defaults/query.sql @@ -0,0 +1,21 @@ +DROP TABLE IF EXISTS table_with_cyclic_defaults; + +CREATE TABLE table_with_cyclic_defaults (a DEFAULT b, b DEFAULT a) ENGINE = Memory; --{serverError CYCLIC_ALIASES} + +CREATE TABLE table_with_cyclic_defaults (a DEFAULT b + 1, b DEFAULT a * a) ENGINE = Memory; --{serverError CYCLIC_ALIASES} + +CREATE TABLE table_with_cyclic_defaults (a DEFAULT b, b DEFAULT toString(c), c DEFAULT concat(a, '1')) ENGINE = Memory; --{serverError CYCLIC_ALIASES} + +CREATE TABLE table_with_cyclic_defaults (a DEFAULT b, b DEFAULT c, c DEFAULT a * b) ENGINE = Memory; --{serverError CYCLIC_ALIASES} + +CREATE TABLE table_with_cyclic_defaults (a String DEFAULT b, b String DEFAULT a) ENGINE = Memory; --{serverError CYCLIC_ALIASES} + +CREATE TABLE table_with_cyclic_defaults (a String) ENGINE = Memory; + +ALTER TABLE table_with_cyclic_defaults ADD COLUMN c String DEFAULT b, ADD COLUMN b String DEFAULT c; --{serverError CYCLIC_ALIASES} + +ALTER TABLE table_with_cyclic_defaults ADD COLUMN b String DEFAULT a, MODIFY COLUMN a DEFAULT b; --{serverError CYCLIC_ALIASES} + +SELECT 1; + +DROP TABLE IF EXISTS table_with_cyclic_defaults; diff --git a/parser/testdata/01097_one_more_range_reader_test/ast.json b/parser/testdata/01097_one_more_range_reader_test/ast.json new file mode 100644 index 000000000..e93b309f8 --- /dev/null +++ b/parser/testdata/01097_one_more_range_reader_test/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001227425, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/01097_one_more_range_reader_test/metadata.json b/parser/testdata/01097_one_more_range_reader_test/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01097_one_more_range_reader_test/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01097_one_more_range_reader_test/query.sql b/parser/testdata/01097_one_more_range_reader_test/query.sql new file mode 100644 index 000000000..53eab0dc5 --- /dev/null +++ b/parser/testdata/01097_one_more_range_reader_test/query.sql @@ -0,0 +1,17 @@ +drop table if exists t; + +create table t (id UInt32, a Int) engine = MergeTree order by id; + +insert into t values (1, 0) (2, 1) (3, 0) (4, 0) (5, 0); +alter table t add column s String default 'foo'; +select s from t prewhere a = 1; + +drop table t; + +create table t (id UInt32, a Int) engine = MergeTree order by id; + +insert into t values (1, 1) (2, 1) (3, 0) (4, 0) (5, 0); +alter table t add column s String default 'foo'; +select s from t prewhere a = 1; + +drop table t; diff --git a/parser/testdata/01097_one_more_range_reader_test_wide_part/ast.json b/parser/testdata/01097_one_more_range_reader_test_wide_part/ast.json new file mode 100644 index 000000000..50871d235 --- /dev/null +++ b/parser/testdata/01097_one_more_range_reader_test_wide_part/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001446343, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/01097_one_more_range_reader_test_wide_part/metadata.json b/parser/testdata/01097_one_more_range_reader_test_wide_part/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01097_one_more_range_reader_test_wide_part/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01097_one_more_range_reader_test_wide_part/query.sql b/parser/testdata/01097_one_more_range_reader_test_wide_part/query.sql new file mode 100644 index 000000000..244f58b67 --- /dev/null +++ b/parser/testdata/01097_one_more_range_reader_test_wide_part/query.sql @@ -0,0 +1,17 @@ +drop table if exists t; + +create table t (id UInt32, a Int) engine = MergeTree order by id settings min_bytes_for_wide_part=0; + +insert into t values (1, 0) (2, 1) (3, 0) (4, 0) (5, 0); +alter table t add column s String default 'foo'; +select s from t prewhere a = 1; + +drop table t; + +create table t (id UInt32, a Int) engine = MergeTree order by id settings min_bytes_for_wide_part=0; + +insert into t values (1, 1) (2, 1) (3, 0) (4, 0) (5, 0); +alter table t add column s String default 'foo'; +select s from t prewhere a = 1; + +drop table t; diff --git a/parser/testdata/01097_pre_limit/ast.json b/parser/testdata/01097_pre_limit/ast.json new file mode 100644 index 000000000..19bac39fb --- /dev/null +++ b/parser/testdata/01097_pre_limit/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers_mt" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_1000000" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001497323, + "rows_read": 14, + "bytes_read": 521 + } +} diff --git a/parser/testdata/01097_pre_limit/metadata.json b/parser/testdata/01097_pre_limit/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01097_pre_limit/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01097_pre_limit/query.sql b/parser/testdata/01097_pre_limit/query.sql new file mode 100644 index 000000000..03b9f441d --- /dev/null +++ b/parser/testdata/01097_pre_limit/query.sql @@ -0,0 +1 @@ +SELECT * FROM system.numbers_mt WHERE number = 1000000 LIMIT 1 diff --git a/parser/testdata/01098_sum/ast.json b/parser/testdata/01098_sum/ast.json new file mode 100644 index 000000000..a46d60c9c --- /dev/null +++ b/parser/testdata/01098_sum/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function sumKahan (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier dummy" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function remote (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '127.{2,3}'" + }, + { + "explain": " Identifier system.one" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.00124415, + "rows_read": 14, + "bytes_read": 552 + } +} diff --git a/parser/testdata/01098_sum/metadata.json b/parser/testdata/01098_sum/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01098_sum/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01098_sum/query.sql b/parser/testdata/01098_sum/query.sql new file mode 100644 index 000000000..d660cd41f --- /dev/null +++ b/parser/testdata/01098_sum/query.sql @@ -0,0 +1,3 @@ +select sumKahan(dummy) from remote('127.{2,3}', system.one); +select sumWithOverflow(dummy) from remote('127.{2,3}', system.one); +select sum(dummy) from remote('127.{2,3}', system.one); diff --git a/parser/testdata/01099_operators_date_and_timestamp/ast.json b/parser/testdata/01099_operators_date_and_timestamp/ast.json new file mode 100644 index 000000000..6db461c40 --- /dev/null +++ b/parser/testdata/01099_operators_date_and_timestamp/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function toIntervalSecond (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function toIntervalMinute (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function toIntervalHour (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001299059, + "rows_read": 13, + "bytes_read": 507 + } +} diff --git a/parser/testdata/01099_operators_date_and_timestamp/metadata.json b/parser/testdata/01099_operators_date_and_timestamp/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01099_operators_date_and_timestamp/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01099_operators_date_and_timestamp/query.sql b/parser/testdata/01099_operators_date_and_timestamp/query.sql new file mode 100644 index 000000000..6140bad46 --- /dev/null +++ b/parser/testdata/01099_operators_date_and_timestamp/query.sql @@ -0,0 +1,49 @@ +select interval 1 second, interval 1 minute, interval 1 hour; +select interval 1 day, interval 1 week, interval 1 month; +select interval 1 quarter, interval 1 year; + +select date '2001-09-29'; +select (date '2001-09-29' + interval 7 day) x, toTypeName(x); +select (date '2001-10-01' - interval 7 day) x, toTypeName(x); +select (date '2001-09-29' + 7) x, toTypeName(x); +select (date '2001-10-01' - 7) x, toTypeName(x); +select (date '2001-09-29' + interval 1 hour) x, toTypeName(x); +select (date '2001-09-29' - interval 1 hour) x, toTypeName(x); +select (date '2001-10-01' - date '2001-09-28') x, toTypeName(x); +select timestamp '2001-09-28 01:00:00' + interval 23 hour; +select timestamp '2001-09-28 23:00:00' - interval 23 hour; + +SET session_timezone = 'Europe/Amsterdam'; + +select (date '2001-09-29' + interval 12345 second) x, toTypeName(x); +select (date '2001-09-29' + interval 12345 millisecond) x, toTypeName(x); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select (date '2001-09-29' + interval 12345 microsecond) x, toTypeName(x); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select (date '2001-09-29' + interval 12345 nanosecond) x, toTypeName(x); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select (date '2001-09-29' - interval 12345 second) x, toTypeName(x); +select (date '2001-09-29' - interval 12345 millisecond) x, toTypeName(x); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select (date '2001-09-29' - interval 12345 microsecond) x, toTypeName(x); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select (date '2001-09-29' - interval 12345 nanosecond) x, toTypeName(x); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select (toDate32('2001-09-29') + interval 12345 second) x, toTypeName(x); +select (toDate32('2001-09-29') + interval 12345 millisecond) x, toTypeName(x); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select (toDate32('2001-09-29') + interval 12345 microsecond) x, toTypeName(x); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select (toDate32('2001-09-29') + interval 12345 nanosecond) x, toTypeName(x); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select (toDate32('2001-09-29') - interval 12345 second) x, toTypeName(x); +select (toDate32('2001-09-29') - interval 12345 millisecond) x, toTypeName(x); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select (toDate32('2001-09-29') - interval 12345 microsecond) x, toTypeName(x); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select (toDate32('2001-09-29') - interval 12345 nanosecond) x, toTypeName(x); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +select (timestamp '2001-12-29 03:00:00' - timestamp '2001-12-27 12:00:00') x, toTypeName(x); + +select -interval 23 hour; +select interval 1 day + interval 1 hour; +select interval '1 day' - interval '1 hour'; + +-- select date '2001-09-28' + time '03:00'; +-- select time '01:00' + interval '3 hours'; +-- select time '05:00' - time '03:00'; +-- select time '05:00' - interval '2 hours'; + +-- select 900 * interval '1 second'; -- interval '00:15:00' +-- select (21 * interval '1 day') x, toTypeName(x); -- interval '21 days' +-- select (double precision '3.5' * interval '1 hour') x, toTypeName(x); -- interval '03:30:00' +-- select (interval '1 hour' / double precision '1.5') x, toTypeName(x); -- interval '00:40:00' diff --git a/parser/testdata/01099_parallel_distributed_insert_select/ast.json b/parser/testdata/01099_parallel_distributed_insert_select/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01099_parallel_distributed_insert_select/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01099_parallel_distributed_insert_select/metadata.json b/parser/testdata/01099_parallel_distributed_insert_select/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01099_parallel_distributed_insert_select/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01099_parallel_distributed_insert_select/query.sql b/parser/testdata/01099_parallel_distributed_insert_select/query.sql new file mode 100644 index 000000000..bb7721c5b --- /dev/null +++ b/parser/testdata/01099_parallel_distributed_insert_select/query.sql @@ -0,0 +1,296 @@ +-- Tags: distributed + +-- ConnectionPoolWithFailover: Connection failed at try №1 - is not a problem +SET send_logs_level = 'fatal'; + +SET prefer_localhost_replica = 1; + +DROP TABLE IF EXISTS local_01099_a; +DROP TABLE IF EXISTS local_01099_b; +DROP TABLE IF EXISTS distributed_01099_a; +DROP TABLE IF EXISTS distributed_01099_b; + +SET parallel_distributed_insert_select=1; +SELECT 'parallel_distributed_insert_select=1'; + +-- +-- test_shard_localhost +-- + +SELECT 'test_shard_localhost'; + +CREATE TABLE local_01099_a (number UInt64) ENGINE = Log; +CREATE TABLE local_01099_b (number UInt64) ENGINE = Log; +CREATE TABLE distributed_01099_a AS local_01099_a ENGINE = Distributed('test_shard_localhost', currentDatabase(), local_01099_a, rand()); +CREATE TABLE distributed_01099_b AS local_01099_b ENGINE = Distributed('test_shard_localhost', currentDatabase(), local_01099_b, rand()); + +INSERT INTO local_01099_a SELECT number from system.numbers limit 3; +INSERT INTO distributed_01099_b SELECT * from distributed_01099_a; + +SELECT * FROM distributed_01099_b; + +DROP TABLE local_01099_a; +DROP TABLE local_01099_b; +DROP TABLE distributed_01099_a; +DROP TABLE distributed_01099_b; + +-- +-- test_cluster_two_shards_localhost +-- + +SELECT 'test_cluster_two_shards_localhost'; + +CREATE TABLE local_01099_a (number UInt64) ENGINE = Log; +CREATE TABLE local_01099_b (number UInt64) ENGINE = Log; +CREATE TABLE distributed_01099_a AS local_01099_a ENGINE = Distributed('test_cluster_two_shards_localhost', currentDatabase(), local_01099_a, rand()); +CREATE TABLE distributed_01099_b AS local_01099_b ENGINE = Distributed('test_cluster_two_shards_localhost', currentDatabase(), local_01099_b, rand()); + +INSERT INTO local_01099_a SELECT number from system.numbers limit 3; +INSERT INTO distributed_01099_b SELECT * from distributed_01099_a; + +SELECT number, count(number) FROM local_01099_b group by number order by number; + +DROP TABLE local_01099_a; +DROP TABLE local_01099_b; +DROP TABLE distributed_01099_a; +DROP TABLE distributed_01099_b; + +-- +-- test_cluster_two_shards +-- + +SELECT 'test_cluster_two_shards'; + +CREATE TABLE local_01099_a (number UInt64) ENGINE = Log; +CREATE TABLE local_01099_b (number UInt64) ENGINE = Log; +CREATE TABLE distributed_01099_a AS local_01099_a ENGINE = Distributed('test_cluster_two_shards', currentDatabase(), local_01099_a, rand()); +CREATE TABLE distributed_01099_b AS local_01099_b ENGINE = Distributed('test_cluster_two_shards', currentDatabase(), local_01099_b, rand()); + +SYSTEM STOP DISTRIBUTED SENDS distributed_01099_b; +SET prefer_localhost_replica=0; -- to require distributed send for local replica too +INSERT INTO local_01099_a SELECT number from system.numbers limit 3; +INSERT INTO distributed_01099_b SELECT * from distributed_01099_a; +SET prefer_localhost_replica=1; + +-- distributed sends disabled, 0 rows (since parallel_distributed_insert_select=1) +SELECT 'distributed'; +SELECT number, count(number) FROM distributed_01099_b group by number order by number; +SYSTEM FLUSH DISTRIBUTED distributed_01099_b; + +SELECT 'local'; +SELECT number, count(number) FROM local_01099_b group by number order by number; +SELECT 'distributed'; +SELECT number, count(number) FROM distributed_01099_b group by number order by number; + +DROP TABLE local_01099_a; +DROP TABLE local_01099_b; +DROP TABLE distributed_01099_a; +DROP TABLE distributed_01099_b; + +--- test_cluster_1_shard_3_replicas_1_unavailable + +SELECT 'test_cluster_1_shard_3_replicas_1_unavailable'; + +CREATE TABLE local_01099_a (number UInt64) ENGINE = MergeTree() ORDER BY number; +CREATE TABLE local_01099_b (number UInt64) ENGINE = MergeTree() ORDER BY number; +CREATE TABLE distributed_01099_a AS local_01099_a ENGINE = Distributed('test_cluster_1_shard_3_replicas_1_unavailable', currentDatabase(), local_01099_a, rand()); +CREATE TABLE distributed_01099_b AS local_01099_b ENGINE = Distributed('test_cluster_1_shard_3_replicas_1_unavailable', currentDatabase(), local_01099_b, rand()); + +SYSTEM STOP DISTRIBUTED SENDS distributed_01099_b; +SET prefer_localhost_replica=0; -- to require distributed send for local replica too +INSERT INTO local_01099_a SELECT number from system.numbers limit 3; +INSERT INTO distributed_01099_b SELECT * from distributed_01099_a; +SET prefer_localhost_replica=1; + +-- distributed sends disabled, but they are not required, since insert is done into local table. +-- (since parallel_distributed_insert_select=2) +SELECT 'distributed'; +SELECT number, count(number) FROM distributed_01099_b group by number order by number; +SELECT 'local'; +SELECT number, count(number) FROM local_01099_b group by number order by number; + +DROP TABLE local_01099_a; +DROP TABLE local_01099_b; +SET send_logs_level='fatal'; +DROP TABLE distributed_01099_a; +DROP TABLE distributed_01099_b; +SET send_logs_level='warning'; + +--- test_cluster_1_shard_3_replicas_1_unavailable with storageCluster + +SELECT 'test_cluster_1_shard_3_replicas_1_unavailable with storageCluster'; + +CREATE TABLE local_01099_b (number UInt64) ENGINE = MergeTree() ORDER BY number; +CREATE TABLE distributed_01099_b AS local_01099_b ENGINE = Distributed('test_cluster_1_shard_3_replicas_1_unavailable', currentDatabase(), local_01099_b, rand()); + +SYSTEM STOP DISTRIBUTED SENDS distributed_01099_b; +SET prefer_localhost_replica=0; -- to require distributed send for local replica too +SET send_logs_level='error'; +INSERT INTO distributed_01099_b SELECT * FROM urlCluster('test_cluster_two_shards', 'http://localhost:8123/?query=select+{1,2,3}+format+TSV', 'TSV', 's String'); +SET send_logs_level='warning'; +SET prefer_localhost_replica=1; + +-- distributed sends disabled, but they are not required, since insert is done into local table. +-- (since parallel_distributed_insert_select=2) +SELECT 'distributed'; +SELECT number, count(number) FROM distributed_01099_b group by number order by number; +SELECT 'local'; +SELECT number, count(number) FROM local_01099_b group by number order by number; + +DROP TABLE local_01099_b; +SET send_logs_level='fatal'; +DROP TABLE distributed_01099_b; +SET send_logs_level='warning'; + +SET parallel_distributed_insert_select=2; +SELECT 'parallel_distributed_insert_select=2'; + +-- +-- test_shard_localhost +-- + +SELECT 'test_shard_localhost'; + +CREATE TABLE local_01099_a (number UInt64) ENGINE = Log; +CREATE TABLE local_01099_b (number UInt64) ENGINE = Log; +CREATE TABLE distributed_01099_a AS local_01099_a ENGINE = Distributed('test_shard_localhost', currentDatabase(), local_01099_a, rand()); +CREATE TABLE distributed_01099_b AS local_01099_b ENGINE = Distributed('test_shard_localhost', currentDatabase(), local_01099_b, rand()); + +INSERT INTO local_01099_a SELECT number from system.numbers limit 3; +INSERT INTO distributed_01099_b SELECT * from distributed_01099_a; + +SELECT * FROM distributed_01099_b; + +DROP TABLE local_01099_a; +DROP TABLE local_01099_b; +DROP TABLE distributed_01099_a; +DROP TABLE distributed_01099_b; + +--- https://github.com/ClickHouse/ClickHouse/issues/78464 +CREATE TABLE local_01099_c (n UInt64) ENGINE = Log; +CREATE TABLE distributed_01099_c AS local_01099_c ENGINE = Distributed('test_shard_localhost', currentDatabase(), local_01099_c, rand()); + +INSERT INTO TABLE FUNCTION clusterAllReplicas('test_shard_localhost', currentDatabase(), 'distributed_01099_c') (n) SELECT number FROM remote('localhost', numbers(5)) tx; + +SELECT * FROM distributed_01099_c; + +DROP TABLE local_01099_c; +DROP TABLE distributed_01099_c; + +-- +-- test_cluster_two_shards_localhost +-- + +SELECT 'test_cluster_two_shards_localhost'; + +-- Log engine will lead to deadlock: +-- DB::Exception: std::system_error: Resource deadlock avoided. +-- So use MergeTree instead. +CREATE TABLE local_01099_a (number UInt64) ENGINE = MergeTree() ORDER BY number; +CREATE TABLE local_01099_b (number UInt64) ENGINE = MergeTree() ORDER BY number; +CREATE TABLE distributed_01099_a AS local_01099_a ENGINE = Distributed('test_cluster_two_shards_localhost', currentDatabase(), local_01099_a, rand()); +CREATE TABLE distributed_01099_b AS local_01099_b ENGINE = Distributed('test_cluster_two_shards_localhost', currentDatabase(), local_01099_b, rand()); + +INSERT INTO local_01099_a SELECT number from system.numbers limit 3; +INSERT INTO distributed_01099_b SELECT * from distributed_01099_a; + +SELECT number, count(number) FROM local_01099_b group by number order by number; + +DROP TABLE local_01099_a; +DROP TABLE local_01099_b; +DROP TABLE distributed_01099_a; +DROP TABLE distributed_01099_b; + +-- +-- test_cluster_two_shards +-- + +SELECT 'test_cluster_two_shards'; + +CREATE TABLE local_01099_a (number UInt64) ENGINE = MergeTree() ORDER BY number; +CREATE TABLE local_01099_b (number UInt64) ENGINE = MergeTree() ORDER BY number; +CREATE TABLE distributed_01099_a AS local_01099_a ENGINE = Distributed('test_cluster_two_shards', currentDatabase(), local_01099_a, rand()); +CREATE TABLE distributed_01099_b AS local_01099_b ENGINE = Distributed('test_cluster_two_shards', currentDatabase(), local_01099_b, rand()); + +SYSTEM STOP DISTRIBUTED SENDS distributed_01099_b; +SET prefer_localhost_replica=0; -- to require distributed send for local replica too +INSERT INTO local_01099_a SELECT number from system.numbers limit 3; +INSERT INTO distributed_01099_b SELECT * from distributed_01099_a; +SET prefer_localhost_replica=1; + +-- distributed sends disabled, but they are not required, since insert is done into local table. +-- (since parallel_distributed_insert_select=2) +SELECT 'distributed'; +SELECT number, count(number) FROM distributed_01099_b group by number order by number; +SELECT 'local'; +SELECT number, count(number) FROM local_01099_b group by number order by number; + +DROP TABLE local_01099_a; +DROP TABLE local_01099_b; +DROP TABLE distributed_01099_a; +DROP TABLE distributed_01099_b; + +--- test_cluster_1_shard_3_replicas_1_unavailable +SET send_logs_level='error'; + +SELECT 'test_cluster_1_shard_3_replicas_1_unavailable'; + +CREATE TABLE local_01099_a (number UInt64) ENGINE = MergeTree() ORDER BY number; +CREATE TABLE local_01099_b (number UInt64) ENGINE = MergeTree() ORDER BY number; +CREATE TABLE distributed_01099_a AS local_01099_a ENGINE = Distributed('test_cluster_1_shard_3_replicas_1_unavailable', currentDatabase(), local_01099_a, rand()); +CREATE TABLE distributed_01099_b AS local_01099_b ENGINE = Distributed('test_cluster_1_shard_3_replicas_1_unavailable', currentDatabase(), local_01099_b, rand()); + +SYSTEM STOP DISTRIBUTED SENDS distributed_01099_b; +SET prefer_localhost_replica=0; -- to require distributed send for local replica too +INSERT INTO local_01099_a SELECT number from system.numbers limit 3; +INSERT INTO distributed_01099_b SELECT * from distributed_01099_a; +SET prefer_localhost_replica=1; + +-- distributed sends disabled, but they are not required, since insert is done into local table. +-- (since parallel_distributed_insert_select=2) +SELECT 'distributed'; +SELECT number, count(number) FROM distributed_01099_b group by number order by number; +SELECT 'local'; +SELECT number, count(number) FROM local_01099_b group by number order by number; + +DROP TABLE local_01099_a; +DROP TABLE local_01099_b; +DROP TABLE distributed_01099_a; +DROP TABLE distributed_01099_b; + +--- test_cluster_1_shard_3_replicas_1_unavailable with storageCluster + +SELECT 'test_cluster_1_shard_3_replicas_1_unavailable with storageCluster'; + +CREATE TABLE local_01099_b (number UInt64) ENGINE = MergeTree() ORDER BY number; +CREATE TABLE distributed_01099_b AS local_01099_b ENGINE = Distributed('test_cluster_1_shard_3_replicas_1_unavailable', currentDatabase(), local_01099_b, rand()); + +SYSTEM STOP DISTRIBUTED SENDS distributed_01099_b; +SET prefer_localhost_replica=0; -- to require distributed send for local replica too +SET send_logs_level='error'; +INSERT INTO distributed_01099_b SELECT * FROM urlCluster('test_cluster_two_shards', 'http://localhost:8123/?query=select+{1,2,3}+format+TSV', 'TSV', 's String'); +SET send_logs_level='warning'; +SET prefer_localhost_replica=1; + +-- distributed sends disabled, but they are not required, since insert is done into local table. +-- (since parallel_distributed_insert_select=2) +SELECT 'distributed'; +SELECT number, count(number) FROM distributed_01099_b group by number order by number; +SELECT 'local'; +SELECT number, count(number) FROM local_01099_b group by number order by number; + +truncate table local_01099_b; + +SET send_logs_level='error'; +INSERT INTO distributed_01099_b with 'http://localhost:8123/?query=' || 'select+{1,2,3}+format+TSV' as url SELECT * FROM urlCluster('test_cluster_two_shards', (select url), 'TSV', 's String'); +SET send_logs_level='warning'; + +SELECT 'distributed'; +SELECT number, count(number) FROM distributed_01099_b group by number order by number; +SELECT 'local'; +SELECT number, count(number) FROM local_01099_b group by number order by number; + +DROP TABLE local_01099_b; +SET send_logs_level='fatal'; +DROP TABLE distributed_01099_b; +SET send_logs_level='warning'; diff --git a/parser/testdata/01100_split_by_string/ast.json b/parser/testdata/01100_split_by_string/ast.json new file mode 100644 index 000000000..dc8ca5d58 --- /dev/null +++ b/parser/testdata/01100_split_by_string/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function splitByString (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'ab'" + }, + { + "explain": " Literal 'cdeabcde'" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001473031, + "rows_read": 8, + "bytes_read": 294 + } +} diff --git a/parser/testdata/01100_split_by_string/metadata.json b/parser/testdata/01100_split_by_string/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01100_split_by_string/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01100_split_by_string/query.sql b/parser/testdata/01100_split_by_string/query.sql new file mode 100644 index 000000000..5dfe392f8 --- /dev/null +++ b/parser/testdata/01100_split_by_string/query.sql @@ -0,0 +1,8 @@ +select splitByString('ab', 'cdeabcde'); +select splitByString('ab', 'abcdeabcdeab'); +select splitByString('ab', 'ababab'); +select splitByString('ababab', 'ababab'); +select splitByString('', 'abcde'); +select splitByString(', ', x) from (select arrayJoin(['hello, world', 'gbye, bug']) x); +select splitByString('ab', ''); +select splitByString('', ''); diff --git a/parser/testdata/01101_literal_column_clash/ast.json b/parser/testdata/01101_literal_column_clash/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01101_literal_column_clash/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01101_literal_column_clash/metadata.json b/parser/testdata/01101_literal_column_clash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01101_literal_column_clash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01101_literal_column_clash/query.sql b/parser/testdata/01101_literal_column_clash/query.sql new file mode 100644 index 000000000..bf8a4308f --- /dev/null +++ b/parser/testdata/01101_literal_column_clash/query.sql @@ -0,0 +1,27 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/9810 +select cast(1 as String) +from (select 1 as iid) as t1 +join (select '1' as sid) as t2 on t2.sid = cast(t1.iid as String); + +-- even simpler cases +select cast(7 as String), * from (select 3 "'String'"); +select cast(7 as String), * from (select number "'String'" FROM numbers(2)); +SELECT concat('xyz', 'abc'), * FROM (SELECT 2 AS "'xyz'"); +with 3 as "1" select 1, "1"; -- { serverError AMBIGUOUS_COLUMN_NAME } + +-- https://github.com/ClickHouse/ClickHouse/issues/9953 +select 1, * from (select 2 x) a left join (select 1, 3 y) b on y = x; +select 1, * from (select 2 x, 1) a right join (select 3 y) b on y = x; +select null, isConstant(null), * from (select 2 x) a left join (select null, 3 y) b on y = x; +select null, isConstant(null), * from (select 2 x, null) a right join (select 3 y) b on y = x; + +-- other cases with joins and constants + +select cast(1, 'UInt8') from (select arrayJoin([1, 2]) as a) t1 left join (select 1 as b) t2 on b = ignore('UInt8') SETTINGS enable_analyzer = 0; -- { serverError INVALID_JOIN_ON_EXPRESSION } +select cast(1, 'UInt8') from (select arrayJoin([1, 2]) as a) t1 left join (select 1 as b) t2 on b = ignore('UInt8') SETTINGS enable_analyzer = 1; + +select isConstant('UInt8'), toFixedString('hello', toUInt8(substring('UInt8', 5, 1))) from (select arrayJoin([1, 2]) as a) t1 left join (select 1 as b) t2 on b = ignore('UInt8') SETTINGS enable_analyzer = 0; -- { serverError INVALID_JOIN_ON_EXPRESSION } +select isConstant('UInt8'), toFixedString('hello', toUInt8(substring('UInt8', 5, 1))) from (select arrayJoin([1, 2]) as a) t1 left join (select 1 as b) t2 on b = ignore('UInt8') SETTINGS enable_analyzer = 1; + +-- https://github.com/ClickHouse/ClickHouse/issues/20624 +select 2 as `toString(x)`, x from (select 1 as x); diff --git a/parser/testdata/01101_prewhere_after_alter/ast.json b/parser/testdata/01101_prewhere_after_alter/ast.json new file mode 100644 index 000000000..dd374c4ba --- /dev/null +++ b/parser/testdata/01101_prewhere_after_alter/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_a (children 1)" + }, + { + "explain": " Identifier test_a" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001137141, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/01101_prewhere_after_alter/metadata.json b/parser/testdata/01101_prewhere_after_alter/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01101_prewhere_after_alter/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01101_prewhere_after_alter/query.sql b/parser/testdata/01101_prewhere_after_alter/query.sql new file mode 100644 index 000000000..976eb586a --- /dev/null +++ b/parser/testdata/01101_prewhere_after_alter/query.sql @@ -0,0 +1,37 @@ +DROP TABLE IF EXISTS test_a; +DROP TABLE IF EXISTS test_b; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE test_a +( + OldColumn String DEFAULT '', + EventDate Date DEFAULT toDate(EventTime), + EventTime DateTime +) ENGINE = MergeTree(EventDate, EventTime, 8192); + +CREATE TABLE test_b +( + OldColumn String DEFAULT '', + NewColumn String DEFAULT '', + EventDate Date DEFAULT toDate(EventTime), + EventTime DateTime +) ENGINE = MergeTree(EventDate, EventTime, 8192); + +INSERT INTO test_a (OldColumn, EventTime) VALUES('1', now()); + +INSERT INTO test_b (OldColumn, NewColumn, EventTime) VALUES('1', '1a', now()); +INSERT INTO test_b (OldColumn, NewColumn, EventTime) VALUES('2', '2a', now()); + +ALTER TABLE test_a ADD COLUMN NewColumn String DEFAULT '' AFTER OldColumn; + +INSERT INTO test_a (OldColumn, NewColumn, EventTime) VALUES('2', '2a', now()); + +SELECT NewColumn +FROM test_a +INNER JOIN +(SELECT OldColumn, NewColumn FROM test_b) s +Using OldColumn +PREWHERE NewColumn != ''; + +DROP TABLE test_a; +DROP TABLE test_b; diff --git a/parser/testdata/01102_distributed_local_in_bug/ast.json b/parser/testdata/01102_distributed_local_in_bug/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01102_distributed_local_in_bug/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01102_distributed_local_in_bug/metadata.json b/parser/testdata/01102_distributed_local_in_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01102_distributed_local_in_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01102_distributed_local_in_bug/query.sql b/parser/testdata/01102_distributed_local_in_bug/query.sql new file mode 100644 index 000000000..580f519ba --- /dev/null +++ b/parser/testdata/01102_distributed_local_in_bug/query.sql @@ -0,0 +1,26 @@ +-- Tags: distributed + +DROP TABLE IF EXISTS hits; +DROP TABLE IF EXISTS visits; +DROP TABLE IF EXISTS hits_layer; +DROP TABLE IF EXISTS visits_layer; + +CREATE TABLE visits(StartDate Date) ENGINE MergeTree ORDER BY(StartDate); +CREATE TABLE hits(EventDate Date, WatchID UInt8) ENGINE MergeTree ORDER BY(EventDate); + +CREATE TABLE visits_layer(StartDate Date) ENGINE Distributed(test_cluster_two_shards_localhost, currentDatabase(), 'visits'); +CREATE TABLE hits_layer(EventDate Date, WatchID UInt8) ENGINE Distributed(test_cluster_two_shards_localhost, currentDatabase(), 'hits'); + +SET distributed_product_mode = 'local'; + +SELECT 0 FROM hits_layer AS hl +PREWHERE WatchID IN +( + SELECT 0 FROM visits_layer AS vl +) +WHERE 0; + +DROP TABLE hits; +DROP TABLE visits; +DROP TABLE hits_layer; +DROP TABLE visits_layer; diff --git a/parser/testdata/01103_distributed_product_mode_local_column_renames/ast.json b/parser/testdata/01103_distributed_product_mode_local_column_renames/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01103_distributed_product_mode_local_column_renames/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01103_distributed_product_mode_local_column_renames/metadata.json b/parser/testdata/01103_distributed_product_mode_local_column_renames/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01103_distributed_product_mode_local_column_renames/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01103_distributed_product_mode_local_column_renames/query.sql b/parser/testdata/01103_distributed_product_mode_local_column_renames/query.sql new file mode 100644 index 000000000..db22d3e41 --- /dev/null +++ b/parser/testdata/01103_distributed_product_mode_local_column_renames/query.sql @@ -0,0 +1,89 @@ +-- Tags: distributed, no-parallel + +CREATE DATABASE IF NOT EXISTS test_01103; +USE test_01103; + +DROP TABLE IF EXISTS t1_shard; +DROP TABLE IF EXISTS t2_shard; +DROP TABLE IF EXISTS t1_distr; +DROP TABLE IF EXISTS t2_distr; + +create table t1_shard (id Int32) engine MergeTree order by id; +create table t2_shard (id Int32) engine MergeTree order by id; + +create table t1_distr as t1_shard engine Distributed(test_cluster_two_shards_localhost, test_01103, t1_shard, id); +create table t2_distr as t2_shard engine Distributed(test_cluster_two_shards_localhost, test_01103, t2_shard, id); + +insert into t1_shard values (42); +insert into t2_shard values (42); + +SET distributed_product_mode = 'local'; + +select d0.id +from t1_distr d0 +where d0.id in +( + select d1.id + from t1_distr as d1 + inner join t2_distr as d2 on d1.id = d2.id + where d1.id > 0 + order by d1.id +); + +select t1_distr.id +from t1_distr +where t1_distr.id in +( + select t1_distr.id + from t1_distr as d1 + inner join t2_distr as d2 on t1_distr.id = t2_distr.id + where t1_distr.id > 0 + order by t1_distr.id +); + +select test_01103.t1_distr.id +from test_01103.t1_distr +where test_01103.t1_distr.id in +( + select test_01103.t1_distr.id + from test_01103.t1_distr as d1 + inner join test_01103.t2_distr as d2 on test_01103.t1_distr.id = test_01103.t2_distr.id + where test_01103.t1_distr.id > 0 + order by test_01103.t1_distr.id +); + +select d0.id +from t1_distr d0 +join ( + select d1.id + from t1_distr as d1 + inner join t2_distr as d2 on d1.id = d2.id + where d1.id > 0 + order by d1.id +) s0 using id; + +select t1_distr.id +from t1_distr +join ( + select t1_distr.id + from t1_distr as d1 + inner join t2_distr as d2 on t1_distr.id = t2_distr.id + where t1_distr.id > 0 + order by t1_distr.id +) s0 using id; + +select test_01103.t1_distr.id +from test_01103.t1_distr +join ( + select test_01103.t1_distr.id + from test_01103.t1_distr as d1 + inner join test_01103.t2_distr as d2 on test_01103.t1_distr.id = test_01103.t2_distr.id + where test_01103.t1_distr.id > 0 + order by test_01103.t1_distr.id +) s0 using id; + +DROP TABLE t1_shard; +DROP TABLE t2_shard; +DROP TABLE t1_distr; +DROP TABLE t2_distr; +DROP DATABASE test_01103; diff --git a/parser/testdata/01104_distributed_numbers_test/ast.json b/parser/testdata/01104_distributed_numbers_test/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01104_distributed_numbers_test/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01104_distributed_numbers_test/metadata.json b/parser/testdata/01104_distributed_numbers_test/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01104_distributed_numbers_test/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01104_distributed_numbers_test/query.sql b/parser/testdata/01104_distributed_numbers_test/query.sql new file mode 100644 index 000000000..07237223b --- /dev/null +++ b/parser/testdata/01104_distributed_numbers_test/query.sql @@ -0,0 +1,28 @@ +-- Tags: distributed + +SELECT * +FROM +( + SELECT * + FROM system.numbers + WHERE number = 100 + UNION ALL + SELECT * + FROM system.numbers + WHERE number = 100 +) +LIMIT 2 +SETTINGS max_threads = 1 FORMAT Null; + +DROP TABLE IF EXISTS d_numbers; +CREATE TABLE d_numbers (number UInt32) ENGINE = Distributed(test_cluster_two_shards, system, numbers, rand()); + +SELECT '100' AS number FROM d_numbers AS n WHERE n.number = 100 LIMIT 2; +SELECT '100' AS number FROM d_numbers AS n WHERE n.number = 100 LIMIT 2 SETTINGS max_threads = 1, prefer_localhost_replica=1; +SELECT sum(number) FROM (select * from remote('127.0.0.{1,1,1}', system.numbers) AS n WHERE n.number = 100 LIMIT 3) SETTINGS max_threads = 2, prefer_localhost_replica=1; + +SET distributed_product_mode = 'local'; + +SELECT '100' AS number FROM d_numbers AS n WHERE n.number = 100 LIMIT 2; + +DROP TABLE d_numbers; diff --git a/parser/testdata/01104_distributed_one_test/ast.json b/parser/testdata/01104_distributed_one_test/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01104_distributed_one_test/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01104_distributed_one_test/metadata.json b/parser/testdata/01104_distributed_one_test/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01104_distributed_one_test/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01104_distributed_one_test/query.sql b/parser/testdata/01104_distributed_one_test/query.sql new file mode 100644 index 000000000..fb2c2774c --- /dev/null +++ b/parser/testdata/01104_distributed_one_test/query.sql @@ -0,0 +1,23 @@ +-- Tags: distributed + +DROP TABLE IF EXISTS d_one; +CREATE TABLE d_one (dummy UInt8) ENGINE = Distributed(test_cluster_two_shards, system, one, rand()); + +SELECT 'local_0', toUInt8(1) AS dummy FROM system.one AS o WHERE o.dummy = 0; +SELECT 'local_1', toUInt8(1) AS dummy FROM system.one AS o WHERE o.dummy = 1; + +SELECT 'distributed_0', _shard_num, toUInt8(1) AS dummy FROM d_one AS o WHERE o.dummy = 0 ORDER BY _shard_num; +SELECT 'distributed_1', _shard_num, toUInt8(1) AS dummy FROM d_one AS o WHERE o.dummy = 1 ORDER BY _shard_num; + +SET distributed_product_mode = 'local'; + +SELECT 'local_0', toUInt8(1) AS dummy FROM system.one AS o WHERE o.dummy = 0; +SELECT 'local_1', toUInt8(1) AS dummy FROM system.one AS o WHERE o.dummy = 1; + +SELECT 'distributed_0', _shard_num, toUInt8(1) AS dummy FROM d_one AS o WHERE o.dummy = 0 ORDER BY _shard_num; +SELECT 'distributed_1', _shard_num, toUInt8(1) AS dummy FROM d_one AS o WHERE o.dummy = 1 ORDER BY _shard_num; + +DROP TABLE d_one; + +SELECT 'remote_0', toUInt8(1) AS dummy FROM remote('127.0.0.2', system, one) AS o WHERE o.dummy = 0; +SELECT 'remote_1', toUInt8(1) AS dummy FROM remote('127.0.0.2', system, one) AS o WHERE o.dummy = 1; diff --git a/parser/testdata/01104_fixed_string_like/ast.json b/parser/testdata/01104_fixed_string_like/ast.json new file mode 100644 index 000000000..a297dde2a --- /dev/null +++ b/parser/testdata/01104_fixed_string_like/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function like (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function arrayJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Array_['hello', 'world']" + }, + { + "explain": " Literal 'Array(FixedString(5))'" + }, + { + "explain": " Literal 'hello'" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.00147319, + "rows_read": 13, + "bytes_read": 528 + } +} diff --git a/parser/testdata/01104_fixed_string_like/metadata.json b/parser/testdata/01104_fixed_string_like/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01104_fixed_string_like/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01104_fixed_string_like/query.sql b/parser/testdata/01104_fixed_string_like/query.sql new file mode 100644 index 000000000..fa122cccb --- /dev/null +++ b/parser/testdata/01104_fixed_string_like/query.sql @@ -0,0 +1,46 @@ +SELECT arrayJoin(CAST(['hello', 'world'] AS Array(FixedString(5)))) LIKE 'hello'; +SELECT arrayJoin(CAST(['hello', 'world'] AS Array(FixedString(5)))) LIKE 'world'; +SELECT arrayJoin(CAST(['hello', 'world'] AS Array(FixedString(5)))) LIKE 'xyz'; +SELECT arrayJoin(CAST(['hello', 'world'] AS Array(FixedString(5)))) LIKE 'hell'; +SELECT arrayJoin(CAST(['hello', 'world'] AS Array(FixedString(5)))) LIKE 'orld'; + +SELECT arrayJoin(CAST(['hello', 'world'] AS Array(FixedString(5)))) LIKE '%hello%'; +SELECT arrayJoin(CAST(['hello', 'world'] AS Array(FixedString(5)))) LIKE '%world%'; +SELECT arrayJoin(CAST(['hello', 'world'] AS Array(FixedString(5)))) LIKE '%xyz%'; +SELECT arrayJoin(CAST(['hello', 'world'] AS Array(FixedString(5)))) LIKE '%hell%'; +SELECT arrayJoin(CAST(['hello', 'world'] AS Array(FixedString(5)))) LIKE '%orld%'; + +SELECT arrayJoin(CAST(['hello', 'world'] AS Array(FixedString(5)))) LIKE '%hello'; +SELECT arrayJoin(CAST(['hello', 'world'] AS Array(FixedString(5)))) LIKE '%world'; +SELECT arrayJoin(CAST(['hello', 'world'] AS Array(FixedString(5)))) LIKE '%xyz'; +SELECT arrayJoin(CAST(['hello', 'world'] AS Array(FixedString(5)))) LIKE '%hell'; +SELECT arrayJoin(CAST(['hello', 'world'] AS Array(FixedString(5)))) LIKE '%orld'; + +SELECT arrayJoin(CAST(['hello', 'world'] AS Array(FixedString(5)))) LIKE 'hello%'; +SELECT arrayJoin(CAST(['hello', 'world'] AS Array(FixedString(5)))) LIKE 'world%'; +SELECT arrayJoin(CAST(['hello', 'world'] AS Array(FixedString(5)))) LIKE 'xyz%'; +SELECT arrayJoin(CAST(['hello', 'world'] AS Array(FixedString(5)))) LIKE 'hell%'; +SELECT arrayJoin(CAST(['hello', 'world'] AS Array(FixedString(5)))) LIKE 'orld%'; + +SELECT arrayJoin(CAST(['hello', 'world'] AS Array(FixedString(5)))) LIKE '%he%o%'; +SELECT arrayJoin(CAST(['hello', 'world'] AS Array(FixedString(5)))) LIKE '%w%ld%'; +SELECT arrayJoin(CAST(['hello', 'world'] AS Array(FixedString(5)))) LIKE '%x%z%'; +SELECT arrayJoin(CAST(['hello', 'world'] AS Array(FixedString(5)))) LIKE '%hell_'; +SELECT arrayJoin(CAST(['hello', 'world'] AS Array(FixedString(5)))) LIKE '_orld%'; + +SELECT arrayJoin(CAST(['hello', 'world'] AS Array(FixedString(5)))) LIKE '%he__o%'; +SELECT arrayJoin(CAST(['hello', 'world'] AS Array(FixedString(5)))) LIKE '%w__ld%'; +SELECT arrayJoin(CAST(['hello', 'world'] AS Array(FixedString(5)))) LIKE '%x%z%'; +SELECT arrayJoin(CAST(['hello', 'world'] AS Array(FixedString(5)))) LIKE 'hell_'; +SELECT arrayJoin(CAST(['hello', 'world'] AS Array(FixedString(5)))) LIKE '_orld'; + +SELECT arrayJoin(CAST(['hello', 'world'] AS Array(FixedString(5)))) LIKE 'helloworld'; +SELECT arrayJoin(CAST(['hello', 'world'] AS Array(FixedString(5)))) LIKE '%helloworld%'; +SELECT arrayJoin(CAST(['hello', 'world'] AS Array(FixedString(5)))) LIKE '%elloworl%'; +SELECT arrayJoin(CAST(['hello', 'world'] AS Array(FixedString(5)))) LIKE '%ow%'; +SELECT arrayJoin(CAST(['hello', 'world'] AS Array(FixedString(5)))) LIKE '%o%w%'; + +SELECT arrayJoin(CAST(['hello', 'world'] AS Array(FixedString(5)))) LIKE '%o%'; +SELECT arrayJoin(CAST(['hello', 'world'] AS Array(FixedString(5)))) LIKE '%l%'; +SELECT arrayJoin(CAST(['hello', 'world'] AS Array(FixedString(5)))) LIKE '%l%o%'; +SELECT arrayJoin(CAST(['hello', 'world'] AS Array(FixedString(5)))) LIKE '%o%l%'; diff --git a/parser/testdata/01105_string_like/ast.json b/parser/testdata/01105_string_like/ast.json new file mode 100644 index 000000000..8a44d18d5 --- /dev/null +++ b/parser/testdata/01105_string_like/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function like (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function arrayJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_['hello', 'world']" + }, + { + "explain": " Literal 'hello'" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001243796, + "rows_read": 10, + "bytes_read": 391 + } +} diff --git a/parser/testdata/01105_string_like/metadata.json b/parser/testdata/01105_string_like/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01105_string_like/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01105_string_like/query.sql b/parser/testdata/01105_string_like/query.sql new file mode 100644 index 000000000..dc7ff3018 --- /dev/null +++ b/parser/testdata/01105_string_like/query.sql @@ -0,0 +1,46 @@ +SELECT arrayJoin(['hello', 'world']) LIKE 'hello'; +SELECT arrayJoin(['hello', 'world']) LIKE 'world'; +SELECT arrayJoin(['hello', 'world']) LIKE 'xyz'; +SELECT arrayJoin(['hello', 'world']) LIKE 'hell'; +SELECT arrayJoin(['hello', 'world']) LIKE 'orld'; + +SELECT arrayJoin(['hello', 'world']) LIKE '%hello%'; +SELECT arrayJoin(['hello', 'world']) LIKE '%world%'; +SELECT arrayJoin(['hello', 'world']) LIKE '%xyz%'; +SELECT arrayJoin(['hello', 'world']) LIKE '%hell%'; +SELECT arrayJoin(['hello', 'world']) LIKE '%orld%'; + +SELECT arrayJoin(['hello', 'world']) LIKE '%hello'; +SELECT arrayJoin(['hello', 'world']) LIKE '%world'; +SELECT arrayJoin(['hello', 'world']) LIKE '%xyz'; +SELECT arrayJoin(['hello', 'world']) LIKE '%hell'; +SELECT arrayJoin(['hello', 'world']) LIKE '%orld'; + +SELECT arrayJoin(['hello', 'world']) LIKE 'hello%'; +SELECT arrayJoin(['hello', 'world']) LIKE 'world%'; +SELECT arrayJoin(['hello', 'world']) LIKE 'xyz%'; +SELECT arrayJoin(['hello', 'world']) LIKE 'hell%'; +SELECT arrayJoin(['hello', 'world']) LIKE 'orld%'; + +SELECT arrayJoin(['hello', 'world']) LIKE '%he%o%'; +SELECT arrayJoin(['hello', 'world']) LIKE '%w%ld%'; +SELECT arrayJoin(['hello', 'world']) LIKE '%x%z%'; +SELECT arrayJoin(['hello', 'world']) LIKE '%hell_'; +SELECT arrayJoin(['hello', 'world']) LIKE '_orld%'; + +SELECT arrayJoin(['hello', 'world']) LIKE '%he__o%'; +SELECT arrayJoin(['hello', 'world']) LIKE '%w__ld%'; +SELECT arrayJoin(['hello', 'world']) LIKE '%x%z%'; +SELECT arrayJoin(['hello', 'world']) LIKE 'hell_'; +SELECT arrayJoin(['hello', 'world']) LIKE '_orld'; + +SELECT arrayJoin(['hello', 'world']) LIKE 'helloworld'; +SELECT arrayJoin(['hello', 'world']) LIKE '%helloworld%'; +SELECT arrayJoin(['hello', 'world']) LIKE '%elloworl%'; +SELECT arrayJoin(['hello', 'world']) LIKE '%ow%'; +SELECT arrayJoin(['hello', 'world']) LIKE '%o%w%'; + +SELECT arrayJoin(['hello', 'world']) LIKE '%o%'; +SELECT arrayJoin(['hello', 'world']) LIKE '%l%'; +SELECT arrayJoin(['hello', 'world']) LIKE '%l%o%'; +SELECT arrayJoin(['hello', 'world']) LIKE '%o%l%'; diff --git a/parser/testdata/01106_const_fixed_string_like/ast.json b/parser/testdata/01106_const_fixed_string_like/ast.json new file mode 100644 index 000000000..30b476251 --- /dev/null +++ b/parser/testdata/01106_const_fixed_string_like/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function like (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'hello'" + }, + { + "explain": " Literal 'FixedString(5)'" + }, + { + "explain": " Literal 'hello'" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.00141514, + "rows_read": 11, + "bytes_read": 409 + } +} diff --git a/parser/testdata/01106_const_fixed_string_like/metadata.json b/parser/testdata/01106_const_fixed_string_like/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01106_const_fixed_string_like/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01106_const_fixed_string_like/query.sql b/parser/testdata/01106_const_fixed_string_like/query.sql new file mode 100644 index 000000000..6f945f5f8 --- /dev/null +++ b/parser/testdata/01106_const_fixed_string_like/query.sql @@ -0,0 +1,46 @@ +SELECT CAST('hello' AS FixedString(5)) LIKE 'hello'; +SELECT CAST('hello' AS FixedString(5)) LIKE 'world'; +SELECT CAST('hello' AS FixedString(5)) LIKE 'xyz'; +SELECT CAST('hello' AS FixedString(5)) LIKE 'hell'; +SELECT CAST('hello' AS FixedString(5)) LIKE 'orld'; + +SELECT CAST('hello' AS FixedString(5)) LIKE '%hello%'; +SELECT CAST('hello' AS FixedString(5)) LIKE '%world%'; +SELECT CAST('hello' AS FixedString(5)) LIKE '%xyz%'; +SELECT CAST('hello' AS FixedString(5)) LIKE '%hell%'; +SELECT CAST('hello' AS FixedString(5)) LIKE '%orld%'; + +SELECT CAST('hello' AS FixedString(5)) LIKE '%hello'; +SELECT CAST('hello' AS FixedString(5)) LIKE '%world'; +SELECT CAST('hello' AS FixedString(5)) LIKE '%xyz'; +SELECT CAST('hello' AS FixedString(5)) LIKE '%hell'; +SELECT CAST('hello' AS FixedString(5)) LIKE '%orld'; + +SELECT CAST('hello' AS FixedString(5)) LIKE 'hello%'; +SELECT CAST('hello' AS FixedString(5)) LIKE 'world%'; +SELECT CAST('hello' AS FixedString(5)) LIKE 'xyz%'; +SELECT CAST('hello' AS FixedString(5)) LIKE 'hell%'; +SELECT CAST('hello' AS FixedString(5)) LIKE 'orld%'; + +SELECT CAST('hello' AS FixedString(5)) LIKE '%he%o%'; +SELECT CAST('hello' AS FixedString(5)) LIKE '%w%ld%'; +SELECT CAST('hello' AS FixedString(5)) LIKE '%x%z%'; +SELECT CAST('hello' AS FixedString(5)) LIKE '%hell_'; +SELECT CAST('hello' AS FixedString(5)) LIKE '_orld%'; + +SELECT CAST('hello' AS FixedString(5)) LIKE '%he__o%'; +SELECT CAST('hello' AS FixedString(5)) LIKE '%w__ld%'; +SELECT CAST('hello' AS FixedString(5)) LIKE '%x%z%'; +SELECT CAST('hello' AS FixedString(5)) LIKE 'hell_'; +SELECT CAST('hello' AS FixedString(5)) LIKE '_orld'; + +SELECT CAST('hello' AS FixedString(5)) LIKE 'helloworld'; +SELECT CAST('hello' AS FixedString(5)) LIKE '%helloworld%'; +SELECT CAST('hello' AS FixedString(5)) LIKE '%elloworl%'; +SELECT CAST('hello' AS FixedString(5)) LIKE '%ow%'; +SELECT CAST('hello' AS FixedString(5)) LIKE '%o%w%'; + +SELECT CAST('hello' AS FixedString(5)) LIKE '%o%'; +SELECT CAST('hello' AS FixedString(5)) LIKE '%l%'; +SELECT CAST('hello' AS FixedString(5)) LIKE '%l%o%'; +SELECT CAST('hello' AS FixedString(5)) LIKE '%o%l%'; diff --git a/parser/testdata/01107_join_right_table_totals/ast.json b/parser/testdata/01107_join_right_table_totals/ast.json new file mode 100644 index 000000000..279445579 --- /dev/null +++ b/parser/testdata/01107_join_right_table_totals/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001256729, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/01107_join_right_table_totals/metadata.json b/parser/testdata/01107_join_right_table_totals/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01107_join_right_table_totals/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01107_join_right_table_totals/query.sql b/parser/testdata/01107_join_right_table_totals/query.sql new file mode 100644 index 000000000..7e5492824 --- /dev/null +++ b/parser/testdata/01107_join_right_table_totals/query.sql @@ -0,0 +1,110 @@ +DROP TABLE IF EXISTS t; +CREATE TABLE t (item_id UInt64, price_sold Float32, date Date) ENGINE MergeTree ORDER BY item_id; + +SELECT item_id +FROM (SELECT item_id FROM t GROUP BY item_id WITH TOTALS) l +FULL JOIN (SELECT item_id FROM t GROUP BY item_id WITH TOTALS ORDER BY item_id) r +USING (item_id); + +SELECT id +FROM (SELECT item_id AS id FROM t GROUP BY id WITH TOTALS) l +FULL JOIN (SELECT item_id AS id FROM t GROUP BY id WITH TOTALS ORDER BY item_id) r +USING (id); + +SELECT item_id +FROM (SELECT item_id FROM t GROUP BY item_id WITH TOTALS) l +INNER JOIN (SELECT item_id FROM t GROUP BY item_id WITH TOTALS ORDER BY item_id) r +USING (item_id); + +SELECT id +FROM (SELECT item_id AS id FROM t GROUP BY id WITH TOTALS) l +INNER JOIN (SELECT item_id AS id FROM t GROUP BY id WITH TOTALS) r +USING (id); + +SELECT id, yago, recent +FROM ( + SELECT item_id AS id, SUM(price_sold) AS recent + FROM t WHERE (date BETWEEN '2019-12-16' AND '2020-03-08') + GROUP BY id WITH TOTALS + ORDER BY id +) ll +FULL JOIN +( + SELECT item_id AS id, SUM(price_sold) AS yago + FROM t WHERE (date BETWEEN '2018-12-17' AND '2019-03-10') + GROUP BY id WITH TOTALS + ORDER BY id +) rr +USING (id); + +SELECT id, yago +FROM ( SELECT item_id AS id FROM t GROUP BY id ) AS ll +FULL OUTER JOIN ( SELECT item_id AS id, arrayJoin([111, 222, 333, 444]), SUM(price_sold) AS yago FROM t GROUP BY id WITH TOTALS ORDER BY id ) AS rr +USING (id); + +SELECT id, yago +FROM ( SELECT item_id AS id, arrayJoin([111, 222, 333]) FROM t GROUP BY id WITH TOTALS ORDER BY id ) AS ll +FULL OUTER JOIN ( SELECT item_id AS id, SUM(price_sold) AS yago FROM t GROUP BY id ) AS rr +USING (id); + +SELECT id, yago +FROM ( SELECT item_id AS id, arrayJoin(emptyArrayInt32()) FROM t GROUP BY id WITH TOTALS ORDER BY id ) AS ll +FULL OUTER JOIN ( SELECT item_id AS id, SUM(price_sold) AS yago FROM t GROUP BY id ) AS rr +USING (id); + +SELECT id, yago +FROM ( SELECT item_id AS id FROM t GROUP BY id ) AS ll +FULL OUTER JOIN ( SELECT item_id AS id, arrayJoin(emptyArrayInt32()), SUM(price_sold) AS yago FROM t GROUP BY id WITH TOTALS ORDER BY id ) AS rr +USING (id); + +SELECT id, yago +FROM ( SELECT item_id AS id, arrayJoin([111, 222, 333]) FROM t GROUP BY id WITH TOTALS ORDER BY id ) AS ll +FULL OUTER JOIN ( SELECT item_id AS id, arrayJoin([111, 222, 333, 444]), SUM(price_sold) AS yago FROM t GROUP BY id WITH TOTALS ORDER BY id ) AS rr +USING (id); + +INSERT INTO t VALUES (1, 100, '1970-01-01'), (1, 200, '1970-01-02'); + +SELECT '-'; +SELECT * +FROM (SELECT item_id FROM t GROUP BY item_id WITH TOTALS ORDER BY item_id) l +LEFT JOIN (SELECT item_id FROM t ) r +ON l.item_id = r.item_id; + +SELECT '-'; +SELECT * +FROM (SELECT item_id FROM t GROUP BY item_id WITH TOTALS ORDER BY item_id) l +RIGHT JOIN (SELECT item_id FROM t ) r +ON l.item_id = r.item_id; + +SELECT '-'; +SELECT * +FROM (SELECT item_id FROM t) l +LEFT JOIN (SELECT item_id FROM t GROUP BY item_id WITH TOTALS ORDER BY item_id ) r +ON l.item_id = r.item_id; + +SELECT '-'; +SELECT * +FROM (SELECT item_id FROM t) l +RIGHT JOIN (SELECT item_id FROM t GROUP BY item_id WITH TOTALS ORDER BY item_id ) r +ON l.item_id = r.item_id; + +SELECT '-'; +SELECT * +FROM (SELECT item_id FROM t GROUP BY item_id WITH TOTALS ORDER BY item_id) l +LEFT JOIN (SELECT item_id FROM t GROUP BY item_id WITH TOTALS ORDER BY item_id ) r +ON l.item_id = r.item_id; + +SELECT '-'; +SELECT * +FROM (SELECT item_id, 'foo' AS key, 1 AS val FROM t GROUP BY item_id WITH TOTALS ORDER BY item_id) l +LEFT JOIN (SELECT item_id, sum(price_sold) AS val FROM t GROUP BY item_id WITH TOTALS ORDER BY item_id ) r +ON l.item_id = r.item_id; + +SELECT '-'; +SELECT * +FROM (SELECT * FROM t GROUP BY item_id, price_sold, date WITH TOTALS ORDER BY item_id, price_sold, date) l +LEFT JOIN (SELECT * FROM t GROUP BY item_id, price_sold, date WITH TOTALS ORDER BY item_id, price_sold, date ) r +ON l.item_id = r.item_id +ORDER BY ALL; + +DROP TABLE t; diff --git a/parser/testdata/01109_exchange_tables/ast.json b/parser/testdata/01109_exchange_tables/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01109_exchange_tables/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01109_exchange_tables/metadata.json b/parser/testdata/01109_exchange_tables/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01109_exchange_tables/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01109_exchange_tables/query.sql b/parser/testdata/01109_exchange_tables/query.sql new file mode 100644 index 000000000..28f4a16bb --- /dev/null +++ b/parser/testdata/01109_exchange_tables/query.sql @@ -0,0 +1,67 @@ +-- Tags: no-parallel +SET send_logs_level = 'fatal'; + +DROP DATABASE IF EXISTS test_01109; +CREATE DATABASE test_01109 ENGINE=Atomic; + +USE test_01109; + +CREATE TABLE t0 ENGINE=MergeTree() ORDER BY tuple() AS SELECT rowNumberInAllBlocks(), * FROM (SELECT toLowCardinality(arrayJoin(['exchange', 'tables']))); +-- NOTE: database = currentDatabase() is not mandatory +CREATE TABLE t1 ENGINE=Log() AS SELECT * FROM system.tables AS t JOIN system.databases AS d ON t.database=d.name; +CREATE TABLE t2 ENGINE=MergeTree() ORDER BY tuple() AS SELECT rowNumberInAllBlocks() + (SELECT count() FROM t0), * FROM (SELECT arrayJoin(['hello', 'world'])); + +EXCHANGE TABLES t1 AND t3; -- { serverError UNKNOWN_TABLE } +EXCHANGE TABLES t4 AND t2; -- { serverError UNKNOWN_TABLE } +RENAME TABLE t0 TO t1; -- { serverError TABLE_ALREADY_EXISTS } +DROP TABLE t1; +RENAME TABLE t0 TO t1; +SELECT * FROM t1; +SELECT * FROM t2; + +EXCHANGE TABLES t1 AND t2; +SELECT * FROM t1; +SELECT * FROM t2; + +RENAME TABLE t1 TO t1tmp, t2 TO t2tmp; +RENAME TABLE t1tmp TO t2, t2tmp TO t1; +SELECT * FROM t1; +SELECT * FROM t2; + +DROP DATABASE IF EXISTS test_01109_other_atomic; +DROP DATABASE IF EXISTS test_01109_ordinary; +CREATE DATABASE test_01109_other_atomic; +set allow_deprecated_database_ordinary=1; +-- Creation of a database with Ordinary engine emits a warning. +CREATE DATABASE test_01109_ordinary ENGINE=Ordinary; + +CREATE TABLE test_01109_other_atomic.t3 ENGINE=MergeTree() ORDER BY tuple() + AS SELECT rowNumberInAllBlocks() + (SELECT max((*,*).1.1) + 1 FROM (SELECT (*,) FROM t1 UNION ALL SELECT (*,) FROM t2)), * + FROM (SELECT arrayJoin(['another', 'db'])); + +CREATE TABLE test_01109_ordinary.t4 AS t1; + +EXCHANGE TABLES test_01109_other_atomic.t3 AND test_01109_ordinary.t4; -- { serverError NOT_IMPLEMENTED } +EXCHANGE TABLES test_01109_ordinary.t4 AND test_01109_other_atomic.t3; -- { serverError NOT_IMPLEMENTED } +EXCHANGE TABLES test_01109_ordinary.t4 AND test_01109_ordinary.t4; -- { serverError NOT_IMPLEMENTED } + +EXCHANGE TABLES t1 AND test_01109_other_atomic.t3; +EXCHANGE TABLES t2 AND t2; +SELECT * FROM t1; +SELECT * FROM t2; +SELECT * FROM test_01109_other_atomic.t3; +SELECT * FROM test_01109_ordinary.t4; + +DROP DATABASE IF EXISTS test_01109_rename_exists; +CREATE DATABASE test_01109_rename_exists ENGINE=Atomic; +USE test_01109_rename_exists; +CREATE TABLE t0 ENGINE=Log() AS SELECT * FROM system.numbers limit 2; +RENAME TABLE t0_tmp TO t1; -- { serverError UNKNOWN_TABLE } +RENAME TABLE if exists t0_tmp TO t1; +RENAME TABLE if exists t0 TO t1; +SELECT * FROM t1; + +DROP DATABASE test_01109; +DROP DATABASE test_01109_other_atomic; +DROP DATABASE test_01109_ordinary; +DROP DATABASE test_01109_rename_exists; diff --git a/parser/testdata/01109_inflating_cross_join/ast.json b/parser/testdata/01109_inflating_cross_join/ast.json new file mode 100644 index 000000000..7296bac68 --- /dev/null +++ b/parser/testdata/01109_inflating_cross_join/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001219202, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01109_inflating_cross_join/metadata.json b/parser/testdata/01109_inflating_cross_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01109_inflating_cross_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01109_inflating_cross_join/query.sql b/parser/testdata/01109_inflating_cross_join/query.sql new file mode 100644 index 000000000..bf7ef7c8f --- /dev/null +++ b/parser/testdata/01109_inflating_cross_join/query.sql @@ -0,0 +1,7 @@ +SET max_memory_usage = 16000000; + +SET max_joined_block_size_rows = 10000000; +SELECT count(*) FROM numbers(10000) n1 CROSS JOIN numbers(1000) n2; -- { serverError MEMORY_LIMIT_EXCEEDED } + +SET max_joined_block_size_rows = 1000; +SELECT count(*) FROM numbers(10000) n1 CROSS JOIN numbers(1000) n2; diff --git a/parser/testdata/01109_sc0rp10_string_hash_map_zero_bytes/ast.json b/parser/testdata/01109_sc0rp10_string_hash_map_zero_bytes/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01109_sc0rp10_string_hash_map_zero_bytes/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01109_sc0rp10_string_hash_map_zero_bytes/metadata.json b/parser/testdata/01109_sc0rp10_string_hash_map_zero_bytes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01109_sc0rp10_string_hash_map_zero_bytes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01109_sc0rp10_string_hash_map_zero_bytes/query.sql b/parser/testdata/01109_sc0rp10_string_hash_map_zero_bytes/query.sql new file mode 100644 index 000000000..b7ac6f164 --- /dev/null +++ b/parser/testdata/01109_sc0rp10_string_hash_map_zero_bytes/query.sql @@ -0,0 +1,15 @@ +-- Test that the string hash map works properly with keys containing zero +-- bytes. +-- Keys with no central '1' are mostly duplicates. The unique keys +-- in this group are '', '\0', ...., '\0 x 34', to a total of 35. All other +-- keys are unique. +select count(*) = 18 * 18 * 17 + 35 +from ( + select key + from ( + with 18 as n + select repeat('\0', number % n) + || repeat('1', intDiv(number, n) % n) + || repeat('\0', intDiv(number, n * n) % n) key + from numbers(18 * 18 * 18)) + group by key); diff --git a/parser/testdata/01110_dictionary_layout_without_arguments/ast.json b/parser/testdata/01110_dictionary_layout_without_arguments/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01110_dictionary_layout_without_arguments/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01110_dictionary_layout_without_arguments/metadata.json b/parser/testdata/01110_dictionary_layout_without_arguments/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01110_dictionary_layout_without_arguments/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01110_dictionary_layout_without_arguments/query.sql b/parser/testdata/01110_dictionary_layout_without_arguments/query.sql new file mode 100644 index 000000000..ed2f61e26 --- /dev/null +++ b/parser/testdata/01110_dictionary_layout_without_arguments/query.sql @@ -0,0 +1,35 @@ +-- Tags: no-parallel + +DROP DATABASE IF EXISTS db_for_dict; +CREATE DATABASE db_for_dict; + +CREATE TABLE db_for_dict.table_for_dict +( + key1 UInt64, + value String +) +ENGINE = Memory(); + +INSERT INTO db_for_dict.table_for_dict VALUES (1, 'Hello'), (2, 'World'); + +CREATE DICTIONARY db_for_dict.dict_with_hashed_layout +( + key1 UInt64, + value String +) +PRIMARY KEY key1 +LAYOUT(HASHED) +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'table_for_dict' DB 'db_for_dict')) +LIFETIME(MIN 1 MAX 10); + +SELECT dictGet('db_for_dict.dict_with_hashed_layout', 'value', toUInt64(2)); + +DETACH DICTIONARY db_for_dict.dict_with_hashed_layout; + +ATTACH DICTIONARY db_for_dict.dict_with_hashed_layout; + +SHOW CREATE DICTIONARY db_for_dict.dict_with_hashed_layout; + +SELECT dictGet('db_for_dict.dict_with_hashed_layout', 'value', toUInt64(1)); + +DROP DATABASE IF EXISTS db_for_dict; diff --git a/parser/testdata/01112_check_table_with_index/ast.json b/parser/testdata/01112_check_table_with_index/ast.json new file mode 100644 index 000000000..0c858482c --- /dev/null +++ b/parser/testdata/01112_check_table_with_index/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001693676, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01112_check_table_with_index/metadata.json b/parser/testdata/01112_check_table_with_index/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01112_check_table_with_index/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01112_check_table_with_index/query.sql b/parser/testdata/01112_check_table_with_index/query.sql new file mode 100644 index 000000000..8b5946692 --- /dev/null +++ b/parser/testdata/01112_check_table_with_index/query.sql @@ -0,0 +1,15 @@ +SET check_query_single_value_result = 'false'; + +DROP TABLE IF EXISTS check_table_with_indices; + +CREATE TABLE check_table_with_indices ( + id UInt64, + data String, + INDEX a (id) type minmax GRANULARITY 3 +) ENGINE = MergeTree() ORDER BY id; + +INSERT INTO check_table_with_indices VALUES (0, 'test'), (1, 'test2'); + +CHECK TABLE check_table_with_indices SETTINGS max_threads = 1; + +DROP TABLE check_table_with_indices; diff --git a/parser/testdata/01113_local_dictionary_type_conversion/ast.json b/parser/testdata/01113_local_dictionary_type_conversion/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01113_local_dictionary_type_conversion/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01113_local_dictionary_type_conversion/metadata.json b/parser/testdata/01113_local_dictionary_type_conversion/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01113_local_dictionary_type_conversion/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01113_local_dictionary_type_conversion/query.sql b/parser/testdata/01113_local_dictionary_type_conversion/query.sql new file mode 100644 index 000000000..1dc727930 --- /dev/null +++ b/parser/testdata/01113_local_dictionary_type_conversion/query.sql @@ -0,0 +1,21 @@ +CREATE TABLE table_for_dict ( + CompanyID String, + OSType Enum('UNKNOWN' = 0, 'WINDOWS' = 1, 'LINUX' = 2, 'ANDROID' = 3, 'MAC' = 4), + SomeID Int32 +) +ENGINE = Memory(); + +INSERT INTO table_for_dict VALUES ('First', 'WINDOWS', 1), ('Second', 'LINUX', 2); + +CREATE DICTIONARY dict_with_conversion +( + CompanyID String DEFAULT '', + OSType String DEFAULT '', + SomeID Int32 DEFAULT 0 +) +PRIMARY KEY CompanyID +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'table_for_dict' DB currentDatabase())) +LIFETIME(MIN 1 MAX 20) +LAYOUT(COMPLEX_KEY_HASHED()); + +SELECT * FROM dict_with_conversion ORDER BY CompanyID; diff --git a/parser/testdata/01114_alter_modify_compact_parts/ast.json b/parser/testdata/01114_alter_modify_compact_parts/ast.json new file mode 100644 index 000000000..039cc8918 --- /dev/null +++ b/parser/testdata/01114_alter_modify_compact_parts/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery mt_compact (children 1)" + }, + { + "explain": " Identifier mt_compact" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001728093, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/01114_alter_modify_compact_parts/metadata.json b/parser/testdata/01114_alter_modify_compact_parts/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01114_alter_modify_compact_parts/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01114_alter_modify_compact_parts/query.sql b/parser/testdata/01114_alter_modify_compact_parts/query.sql new file mode 100644 index 000000000..a5aa12548 --- /dev/null +++ b/parser/testdata/01114_alter_modify_compact_parts/query.sql @@ -0,0 +1,12 @@ +DROP TABLE IF EXISTS mt_compact; + +CREATE TABLE mt_compact (d Date, id UInt32, s String) + ENGINE = MergeTree ORDER BY id PARTITION BY d + SETTINGS min_bytes_for_wide_part = 10000000, index_granularity = 128; + +INSERT INTO mt_compact SELECT toDate('2020-01-05'), number, toString(number) FROM numbers(1000); +INSERT INTO mt_compact SELECT toDate('2020-01-06'), number, toString(number) FROM numbers(1000); +ALTER TABLE mt_compact MODIFY COLUMN s UInt64; +SELECT sum(s) from mt_compact; + +DROP TABLE IF EXISTS mt_compact; diff --git a/parser/testdata/01114_clear_column_compact_parts/ast.json b/parser/testdata/01114_clear_column_compact_parts/ast.json new file mode 100644 index 000000000..170d80902 --- /dev/null +++ b/parser/testdata/01114_clear_column_compact_parts/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery clear_column (children 1)" + }, + { + "explain": " Identifier clear_column" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00166968, + "rows_read": 2, + "bytes_read": 76 + } +} diff --git a/parser/testdata/01114_clear_column_compact_parts/metadata.json b/parser/testdata/01114_clear_column_compact_parts/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01114_clear_column_compact_parts/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01114_clear_column_compact_parts/query.sql b/parser/testdata/01114_clear_column_compact_parts/query.sql new file mode 100644 index 000000000..bdfed06ea --- /dev/null +++ b/parser/testdata/01114_clear_column_compact_parts/query.sql @@ -0,0 +1,11 @@ +DROP TABLE IF EXISTS clear_column; + +CREATE TABLE clear_column(x UInt32, y UInt32) ENGINE MergeTree ORDER BY x PARTITION by x; +INSERT INTO clear_column VALUES (1, 1), (2, 3); + +ALTER TABLE clear_column CLEAR COLUMN y IN PARTITION 1; +SELECT * FROM clear_column ORDER BY x; +ALTER TABLE clear_column CLEAR COLUMN y IN PARTITION 2; +SELECT * FROM clear_column ORDER BY x; + +DROP TABLE clear_column; diff --git a/parser/testdata/01114_materialize_clear_index_compact_parts/ast.json b/parser/testdata/01114_materialize_clear_index_compact_parts/ast.json new file mode 100644 index 000000000..901edbb2e --- /dev/null +++ b/parser/testdata/01114_materialize_clear_index_compact_parts/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001948557, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01114_materialize_clear_index_compact_parts/metadata.json b/parser/testdata/01114_materialize_clear_index_compact_parts/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01114_materialize_clear_index_compact_parts/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01114_materialize_clear_index_compact_parts/query.sql b/parser/testdata/01114_materialize_clear_index_compact_parts/query.sql new file mode 100644 index 000000000..f7e78bd83 --- /dev/null +++ b/parser/testdata/01114_materialize_clear_index_compact_parts/query.sql @@ -0,0 +1,40 @@ +SET merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.0; + +-- Prevent remote replicas from skipping index analysis in Parallel Replicas. Otherwise, they may return full ranges and trigger max_rows_to_read validation failures. +SET parallel_replicas_index_analysis_only_on_coordinator = 0; +SET use_skip_indexes_on_data_read = 0; + +DROP TABLE IF EXISTS minmax_compact; + +CREATE TABLE minmax_compact +( + u64 UInt64, + i64 Int64, + i32 Int32 +) ENGINE = MergeTree() +PARTITION BY i32 +ORDER BY u64 +SETTINGS index_granularity = 2, index_granularity_bytes = '10Mi', min_rows_for_wide_part = 1000000; + +INSERT INTO minmax_compact VALUES (0, 2, 1), (1, 1, 1), (2, 1, 1), (3, 1, 1), (4, 1, 1), (5, 2, 1), (6, 1, 2), (7, 1, 2), (8, 1, 2), (9, 1, 2); + +SET mutations_sync = 1; +ALTER TABLE minmax_compact ADD INDEX idx (i64, u64 * i64) TYPE minmax GRANULARITY 1; + +ALTER TABLE minmax_compact MATERIALIZE INDEX idx IN PARTITION 1; +set max_rows_to_read = 8; +SELECT count() FROM minmax_compact WHERE i64 = 2; + +ALTER TABLE minmax_compact MATERIALIZE INDEX idx IN PARTITION 2; +set max_rows_to_read = 6; +SELECT count() FROM minmax_compact WHERE i64 = 2; + +ALTER TABLE minmax_compact CLEAR INDEX idx IN PARTITION 1; +ALTER TABLE minmax_compact CLEAR INDEX idx IN PARTITION 2; + +SELECT count() FROM minmax_compact WHERE i64 = 2; -- { serverError TOO_MANY_ROWS } + +set max_rows_to_read = 10; +SELECT count() FROM minmax_compact WHERE i64 = 2; + +DROP TABLE minmax_compact; diff --git a/parser/testdata/01114_mysql_database_engine_segfault/ast.json b/parser/testdata/01114_mysql_database_engine_segfault/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01114_mysql_database_engine_segfault/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01114_mysql_database_engine_segfault/metadata.json b/parser/testdata/01114_mysql_database_engine_segfault/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01114_mysql_database_engine_segfault/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01114_mysql_database_engine_segfault/query.sql b/parser/testdata/01114_mysql_database_engine_segfault/query.sql new file mode 100644 index 000000000..783a728e3 --- /dev/null +++ b/parser/testdata/01114_mysql_database_engine_segfault/query.sql @@ -0,0 +1,4 @@ +-- Tags: no-fasttest + +DROP DATABASE IF EXISTS conv_main; +CREATE DATABASE conv_main ENGINE = MySQL('127.0.0.1:3456', conv_main, 'metrika', 'password'); -- { serverError CANNOT_CREATE_DATABASE } diff --git a/parser/testdata/01115_join_with_dictionary/ast.json b/parser/testdata/01115_join_with_dictionary/ast.json new file mode 100644 index 000000000..6f12c1764 --- /dev/null +++ b/parser/testdata/01115_join_with_dictionary/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001329995, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/01115_join_with_dictionary/metadata.json b/parser/testdata/01115_join_with_dictionary/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01115_join_with_dictionary/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01115_join_with_dictionary/query.sql b/parser/testdata/01115_join_with_dictionary/query.sql new file mode 100644 index 000000000..83227aa55 --- /dev/null +++ b/parser/testdata/01115_join_with_dictionary/query.sql @@ -0,0 +1,121 @@ +DROP TABLE IF EXISTS t1; + +DROP DICTIONARY IF EXISTS dict_flat; +DROP DICTIONARY IF EXISTS dict_hashed; +DROP DICTIONARY IF EXISTS dict_complex_cache; + +CREATE TABLE t1 (key UInt64, a UInt8, b String, c Float64) ENGINE = MergeTree() ORDER BY key; +INSERT INTO t1 SELECT number, number, toString(number), number from numbers(4); + +CREATE DICTIONARY dict_flat (key UInt64 DEFAULT 0, a UInt8 DEFAULT 42, b String DEFAULT 'x', c Float64 DEFAULT 42.0) +PRIMARY KEY key +SOURCE(CLICKHOUSE(TABLE 't1')) +LIFETIME(MIN 1 MAX 10) +LAYOUT(FLAT()); + +CREATE DICTIONARY dict_hashed (key UInt64 DEFAULT 0, a UInt8 DEFAULT 42, b String DEFAULT 'x', c Float64 DEFAULT 42.0) +PRIMARY KEY key +SOURCE(CLICKHOUSE(TABLE 't1')) +LIFETIME(MIN 1 MAX 10) +LAYOUT(HASHED()); + +CREATE DICTIONARY dict_complex_cache (key UInt64 DEFAULT 0, a UInt8 DEFAULT 42, b String DEFAULT 'x', c Float64 DEFAULT 42.0) +PRIMARY KEY key, b +SOURCE(CLICKHOUSE(TABLE 't1')) +LIFETIME(MIN 1 MAX 10) +LAYOUT(COMPLEX_KEY_CACHE(SIZE_IN_CELLS 1)); + +SET join_use_nulls = 0; + +SET join_algorithm = 'direct'; + +SELECT 'flat: left on'; +SELECT * FROM (SELECT number AS key FROM numbers(5)) s1 LEFT JOIN dict_flat d ON s1.key = d.key ORDER BY s1.key; +SELECT 'flat: left'; +SELECT * FROM (SELECT number AS key FROM numbers(5)) s1 LEFT JOIN dict_flat d USING(key) ORDER BY key; +SELECT 'flat: any left'; +SELECT * FROM (SELECT number AS key FROM numbers(5)) s1 ANY LEFT JOIN dict_flat d USING(key) ORDER BY key; + +SELECT 'flat: semi left'; +SELECT * FROM (SELECT number AS key FROM numbers(5)) s1 SEMI JOIN dict_flat d USING(key) ORDER BY key; +SELECT 'flat: anti left'; +SELECT * FROM (SELECT number AS key FROM numbers(5)) s1 ANTI JOIN dict_flat d USING(key) ORDER BY key; +SELECT 'flat: inner'; +SELECT * FROM (SELECT number AS key FROM numbers(2)) s1 JOIN dict_flat d USING(key); +SELECT 'flat: inner on'; +SELECT * FROM (SELECT number AS k FROM numbers(100)) s1 JOIN dict_flat d ON k = key ORDER BY k; + +SET join_use_nulls = 1; + +SELECT 'hashed: left on'; +SELECT * FROM (SELECT number AS key FROM numbers(5)) s1 LEFT JOIN dict_hashed d ON s1.key = d.key ORDER BY s1.key; +SELECT 'hashed: left'; +SELECT * FROM (SELECT number AS key FROM numbers(5)) s1 LEFT JOIN dict_hashed d USING(key) ORDER BY key; +SELECT 'hashed: any left'; +SELECT * FROM (SELECT number AS key FROM numbers(5)) s1 ANY LEFT JOIN dict_hashed d USING(key) ORDER BY key; +SELECT 'hashed: semi left'; +SELECT * FROM (SELECT number AS key FROM numbers(5)) s1 SEMI JOIN dict_hashed d USING(key) ORDER BY key; +SELECT 'hashed: anti left'; +SELECT * FROM (SELECT number AS key FROM numbers(5)) s1 ANTI JOIN dict_hashed d USING(key) ORDER BY key; +SELECT 'hashed: inner'; +SELECT * FROM (SELECT number AS key FROM numbers(2)) s1 JOIN dict_hashed d USING(key); +SELECT 'hashed: inner on'; +SELECT * FROM (SELECT number AS k FROM numbers(100)) s1 JOIN dict_hashed d ON k = key ORDER BY k; + +SET join_use_nulls = 0; + +-- unsupported cases for dictionary join, falls back to regular join + +SET join_algorithm = 'default'; + +SELECT 'flat: inner or'; +SELECT * FROM (SELECT if(number % 2 = 0, number, number * 1000) AS k FROM numbers(100)) s1 JOIN dict_flat d ON k = key OR k == 1000 * key ORDER BY key; + +SELECT 'flat: any left + any_join_distinct_right_table_keys'; +SELECT * FROM (SELECT number AS key FROM numbers(5)) s1 ANY LEFT JOIN dict_flat d USING(key) ORDER BY key SETTINGS any_join_distinct_right_table_keys = '1'; + +SET join_use_nulls = 1; + +SELECT 'complex_cache (smoke)'; +SELECT * FROM (SELECT number AS key FROM numbers(5)) s1 LEFT JOIN dict_complex_cache d ON s1.key = d.key ORDER BY s1.key; + +SELECT 'not optimized (smoke)'; +SELECT * FROM (SELECT number AS key FROM numbers(2)) s1 RIGHT JOIN dict_flat d USING(key) ORDER BY key; +SELECT '-'; +SELECT * FROM (SELECT number AS key FROM numbers(2)) s1 RIGHT JOIN dict_flat d ON s1.key = d.key ORDER BY d.key; +SELECT '-'; +SELECT * FROM (SELECT number + 2 AS key FROM numbers(4)) s1 FULL JOIN dict_flat d USING(key) ORDER BY s1.key, d.key; +SELECT '-'; +SELECT * FROM (SELECT number AS key FROM numbers(2)) s1 ANY INNER JOIN dict_flat d USING(key) ORDER BY s1.key; +SELECT '-'; +SELECT * FROM (SELECT number AS key FROM numbers(2)) s1 ANY RIGHT JOIN dict_flat d USING(key) ORDER BY key; +SELECT '-'; +SELECT * FROM (SELECT number AS key FROM numbers(2)) s1 SEMI RIGHT JOIN dict_flat d USING(key) ORDER BY s1.key; +SELECT '-'; +SELECT * FROM (SELECT number AS key FROM numbers(2)) s1 ANTI RIGHT JOIN dict_flat d USING(key) ORDER BY key; + +SET join_use_nulls = 0; + +SELECT 'issue 23002'; + +SET join_algorithm = 'auto'; +SELECT '-'; +SELECT * FROM (SELECT number AS key FROM numbers(5)) s1 LEFT JOIN dict_flat d ON s1.key = d.key ORDER BY s1.key; +SELECT '-'; +SELECT * FROM (SELECT number AS key FROM numbers(5)) s1 ANY LEFT JOIN dict_flat d USING(key) ORDER BY key; +SELECT '-'; +SELECT * FROM (SELECT number AS key FROM numbers(2)) s1 RIGHT JOIN dict_flat d ON s1.key = d.key ORDER BY d.key; + +SET join_algorithm = 'partial_merge'; +SELECT '-'; +SELECT * FROM (SELECT number AS key FROM numbers(5)) s1 LEFT JOIN dict_flat d ON s1.key = d.key ORDER BY s1.key; +SELECT '-'; +SELECT * FROM (SELECT number AS key FROM numbers(5)) s1 ANY LEFT JOIN dict_flat d USING(key) ORDER BY key; +SELECT '-'; +SELECT * FROM (SELECT number AS key FROM numbers(2)) s1 RIGHT JOIN dict_flat d ON s1.key = d.key ORDER BY d.key; + +DROP DICTIONARY dict_flat; +DROP DICTIONARY dict_hashed; +DROP DICTIONARY dict_complex_cache; + +DROP TABLE t1; diff --git a/parser/testdata/01115_prewhere_array_join/ast.json b/parser/testdata/01115_prewhere_array_join/ast.json new file mode 100644 index 000000000..34d8aa882 --- /dev/null +++ b/parser/testdata/01115_prewhere_array_join/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery prewhere (children 1)" + }, + { + "explain": " Identifier prewhere" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001135546, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/01115_prewhere_array_join/metadata.json b/parser/testdata/01115_prewhere_array_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01115_prewhere_array_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01115_prewhere_array_join/query.sql b/parser/testdata/01115_prewhere_array_join/query.sql new file mode 100644 index 000000000..6ff86636d --- /dev/null +++ b/parser/testdata/01115_prewhere_array_join/query.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS prewhere; + +CREATE TABLE prewhere (light UInt8, heavy String) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO prewhere SELECT 0, randomPrintableASCII(10000) FROM numbers(10000); +SELECT arrayJoin([light]) != 0 AS cond, length(heavy) FROM prewhere WHERE light != 0 AND cond != 0; + +DROP TABLE prewhere; + +DROP TABLE IF EXISTS testtable; +CREATE TABLE testtable (DT Datetime, Label1 String, Value UInt64) ENGINE = MergeTree() PARTITION BY DT ORDER BY Label1; +INSERT INTO testtable (*) Values (now(), 'app', 1); +SELECT arrayJoin([0, 1]) AS arrayIdx FROM testtable WHERE arrayIdx = 0; +DROP TABLE testtable; diff --git a/parser/testdata/01116_asof_join_dolbyzerr/ast.json b/parser/testdata/01116_asof_join_dolbyzerr/ast.json new file mode 100644 index 000000000..70b417ad6 --- /dev/null +++ b/parser/testdata/01116_asof_join_dolbyzerr/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery sessions (children 2)" + }, + { + "explain": " Identifier sessions" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " ColumnDeclaration date (children 1)" + }, + { + "explain": " DataType DateTime" + }, + { + "explain": " ColumnDeclaration visitorId (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " ColumnDeclaration sessionId (children 1)" + }, + { + "explain": " DataType String" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001323594, + "rows_read": 10, + "bytes_read": 377 + } +} diff --git a/parser/testdata/01116_asof_join_dolbyzerr/metadata.json b/parser/testdata/01116_asof_join_dolbyzerr/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01116_asof_join_dolbyzerr/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01116_asof_join_dolbyzerr/query.sql b/parser/testdata/01116_asof_join_dolbyzerr/query.sql new file mode 100644 index 000000000..652cb35cf --- /dev/null +++ b/parser/testdata/01116_asof_join_dolbyzerr/query.sql @@ -0,0 +1,32 @@ +CREATE TEMPORARY TABLE sessions (date DateTime, visitorId String, sessionId String); +CREATE TEMPORARY TABLE orders (date DateTime, visitorId String, orderId String); + +INSERT INTO sessions VALUES ('2018-01-01 00:00:00', 'v1', 's1'), ('2018-01-02 00:00:00', 'v1', 's2'), ('2018-01-03 00:00:00', 'v2', 's3'), ('2018-01-04 00:00:00', 'v1', 's4'), ('2018-01-05 00:00:00', 'v2', 's5'), ('2018-01-06 00:00:00', 'v3', 's6'); +INSERT INTO orders VALUES ('2018-01-03 00:00:00', 'v1', 'o1'), ('2018-01-05 00:00:00', 'v1', 'o2'), ('2018-01-06 00:00:00', 'v2', 'o3'); + +SELECT + visitorId, + orderId, + groupUniqArray(sessionId) +FROM sessions +ASOF INNER JOIN orders ON (sessions.visitorId = orders.visitorId) AND (sessions.date <= orders.date) +GROUP BY + visitorId, + orderId +ORDER BY + visitorId ASC, + orderId ASC; + +SELECT + visitorId, + orderId, + groupUniqArray(sessionId) +FROM sessions +ASOF INNER JOIN orders ON (sessions.visitorId = orders.visitorId) AND (sessions.date <= orders.date) +GROUP BY + visitorId, + orderId +ORDER BY + visitorId ASC, + orderId ASC +SETTINGS join_algorithm = 'full_sorting_merge'; diff --git a/parser/testdata/01116_cross_count_asterisks/ast.json b/parser/testdata/01116_cross_count_asterisks/ast.json new file mode 100644 index 000000000..bde64ad5d --- /dev/null +++ b/parser/testdata/01116_cross_count_asterisks/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function count (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001290632, + "rows_read": 7, + "bytes_read": 250 + } +} diff --git a/parser/testdata/01116_cross_count_asterisks/metadata.json b/parser/testdata/01116_cross_count_asterisks/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01116_cross_count_asterisks/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01116_cross_count_asterisks/query.sql b/parser/testdata/01116_cross_count_asterisks/query.sql new file mode 100644 index 000000000..aa5adadda --- /dev/null +++ b/parser/testdata/01116_cross_count_asterisks/query.sql @@ -0,0 +1,12 @@ +SELECT count(*) +FROM numbers(2) AS n1, numbers(3) AS n2, numbers(4) AS n3 +WHERE (n1.number = n2.number) AND (n2.number = n3.number); + +SELECT count(*) c FROM ( + SELECT count(*), count(*) as c + FROM numbers(2) AS n1, numbers(3) AS n2, numbers(4) AS n3 + WHERE (n1.number = n2.number) AND (n2.number = n3.number) + AND (SELECT count(*) FROM numbers(1)) = 1 +) +WHERE (SELECT count(*) FROM numbers(2)) = 2 +HAVING c IN(SELECT count(*) c FROM numbers(1)); diff --git a/parser/testdata/01117_chain_finalize_bug/ast.json b/parser/testdata/01117_chain_finalize_bug/ast.json new file mode 100644 index 000000000..14559dd33 --- /dev/null +++ b/parser/testdata/01117_chain_finalize_bug/ast.json @@ -0,0 +1,85 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function arrayJoin (alias index) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayMap (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function lambda (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier i" + }, + { + "explain": " Function plus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier i" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function range (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Identifier number" + } + ], + + "rows": 21, + + "statistics": + { + "elapsed": 0.00152334, + "rows_read": 21, + "bytes_read": 851 + } +} diff --git a/parser/testdata/01117_chain_finalize_bug/metadata.json b/parser/testdata/01117_chain_finalize_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01117_chain_finalize_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01117_chain_finalize_bug/query.sql b/parser/testdata/01117_chain_finalize_bug/query.sql new file mode 100644 index 000000000..43b5fc0f4 --- /dev/null +++ b/parser/testdata/01117_chain_finalize_bug/query.sql @@ -0,0 +1,25 @@ +SELECT arrayJoin(arrayMap(i -> (i + 1), range(2))) AS index, number +FROM numbers(2) +GROUP BY number +ORDER BY index, number; + +SET max_bytes_before_external_group_by = 1; +SET max_bytes_ratio_before_external_group_by = 0; + +SELECT arrayJoin(arrayMap(i -> (i + 1), range(2))) AS index, number +FROM numbers(2) +GROUP BY number +ORDER BY index, number; + +SET group_by_two_level_threshold = 2; + +SELECT count() FROM +( + SELECT + arrayJoin(arrayMap(i -> (i + 1), range(2))) AS index, + number + FROM numbers_mt(100000) + GROUP BY number + ORDER BY index ASC + SETTINGS max_block_size = 100000, max_threads = 2 +); diff --git a/parser/testdata/01117_comma_and_others_join_mix/ast.json b/parser/testdata/01117_comma_and_others_join_mix/ast.json new file mode 100644 index 000000000..0ecfe2666 --- /dev/null +++ b/parser/testdata/01117_comma_and_others_join_mix/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001162842, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01117_comma_and_others_join_mix/metadata.json b/parser/testdata/01117_comma_and_others_join_mix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01117_comma_and_others_join_mix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01117_comma_and_others_join_mix/query.sql b/parser/testdata/01117_comma_and_others_join_mix/query.sql new file mode 100644 index 000000000..5fe297956 --- /dev/null +++ b/parser/testdata/01117_comma_and_others_join_mix/query.sql @@ -0,0 +1,20 @@ +SET join_use_nulls = 1; + +SELECT * +FROM numbers(2) AS n1 +JOIN numbers(3) AS n2 ON n1.number = n2.number, numbers(4) AS n3 +ORDER BY n1.number, n2.number, n3.number; + +SELECT '-'; + +SELECT * +FROM numbers(3) AS n1, numbers(2) AS n2 +LEFT JOIN numbers(2) AS n3 ON n1.number = n3.number +ORDER BY n1.number, n2.number, n3.number; + +SELECT '-'; + +SELECT * +FROM numbers(2) AS n1, numbers(3) AS n2 +RIGHT JOIN numbers(4) AS n3 ON n2.number = n3.number +ORDER BY n1.number, n2.number, n3.number; diff --git a/parser/testdata/01117_greatest_least_case/ast.json b/parser/testdata/01117_greatest_least_case/ast.json new file mode 100644 index 000000000..61c70d062 --- /dev/null +++ b/parser/testdata/01117_greatest_least_case/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function GREATEST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_2" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001495211, + "rows_read": 8, + "bytes_read": 291 + } +} diff --git a/parser/testdata/01117_greatest_least_case/metadata.json b/parser/testdata/01117_greatest_least_case/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01117_greatest_least_case/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01117_greatest_least_case/query.sql b/parser/testdata/01117_greatest_least_case/query.sql new file mode 100644 index 000000000..21bfd240f --- /dev/null +++ b/parser/testdata/01117_greatest_least_case/query.sql @@ -0,0 +1,2 @@ +SELECT GREATEST(1, 2); +SELECT LEAST(1, -1); diff --git a/parser/testdata/01118_is_constant/ast.json b/parser/testdata/01118_is_constant/ast.json new file mode 100644 index 000000000..7131200ec --- /dev/null +++ b/parser/testdata/01118_is_constant/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function isConstant (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001283559, + "rows_read": 7, + "bytes_read": 263 + } +} diff --git a/parser/testdata/01118_is_constant/metadata.json b/parser/testdata/01118_is_constant/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01118_is_constant/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01118_is_constant/query.sql b/parser/testdata/01118_is_constant/query.sql new file mode 100644 index 000000000..9e4121590 --- /dev/null +++ b/parser/testdata/01118_is_constant/query.sql @@ -0,0 +1,10 @@ +select isConstant(1); +select isConstant([1]); +select isConstant(arrayJoin([1])); +SELECT isConstant((SELECT 1)); +SELECT isConstant(x) FROM (SELECT 1 x); +SELECT '---'; +SELECT isConstant(x) FROM (SELECT 1 x UNION ALL SELECT 2); +SELECT '---'; +select isConstant(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +select isConstant(1, 2); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } diff --git a/parser/testdata/01119_optimize_trivial_insert_select/ast.json b/parser/testdata/01119_optimize_trivial_insert_select/ast.json new file mode 100644 index 000000000..a63a2f149 --- /dev/null +++ b/parser/testdata/01119_optimize_trivial_insert_select/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.002115065, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/01119_optimize_trivial_insert_select/metadata.json b/parser/testdata/01119_optimize_trivial_insert_select/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01119_optimize_trivial_insert_select/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01119_optimize_trivial_insert_select/query.sql b/parser/testdata/01119_optimize_trivial_insert_select/query.sql new file mode 100644 index 000000000..2b301d7ac --- /dev/null +++ b/parser/testdata/01119_optimize_trivial_insert_select/query.sql @@ -0,0 +1,17 @@ +drop table if exists t; +create table t(n int, a Int64, s String) engine = MergeTree() order by a; + +set enable_positional_arguments = 0; +set optimize_trivial_insert_select = 1; +set max_rows_to_read = 0; + +-- due to aggregate functions, optimize_trivial_insert_select will not be applied +insert into t select 1, sum(number) as c, getSetting('max_threads') from numbers_mt(100000000) settings max_insert_threads=4, max_threads=2; +-- due to GROUP BY, optimize_trivial_insert_select will not be applied +insert into t select 2, sum(number) as c, getSetting('max_threads') from numbers_mt(100000000) group by 1 settings max_insert_threads=4, max_threads=2; +insert into t select 3, sum(number) as c, getSetting('max_threads') from numbers_mt(10000000) group by 3 settings max_insert_threads=4, max_threads=2; +insert into t select 4, sum(number) as c, getSetting('max_threads') as mt from numbers_mt(10000000) group by mt settings max_insert_threads=4, max_threads=2; + +select n, a, s from t order by n; + +drop table t; diff --git a/parser/testdata/01119_weird_user_names/ast.json b/parser/testdata/01119_weird_user_names/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01119_weird_user_names/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01119_weird_user_names/metadata.json b/parser/testdata/01119_weird_user_names/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01119_weird_user_names/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01119_weird_user_names/query.sql b/parser/testdata/01119_weird_user_names/query.sql new file mode 100644 index 000000000..0d6f02786 --- /dev/null +++ b/parser/testdata/01119_weird_user_names/query.sql @@ -0,0 +1,33 @@ +-- Tags: no-parallel + +drop user if exists " "; +drop user if exists ' spaces'; +drop user if exists 'spaces '; +drop user if exists " spaces "; +drop user if exists "test 01119"; +drop user if exists "Вася Пупкин"; +drop user if exists "无名氏 "; +drop user if exists "🙈 🙉 🙊"; + +create user " "; +create user ' spaces'; +create user 'spaces '; +create user ` INTERSERVER SECRET `; -- { serverError BAD_ARGUMENTS } +create user ''; -- { clientError SYNTAX_ERROR } +create user 'test 01119'; +alter user `test 01119` rename to " spaces "; +alter user " spaces " rename to ''; -- { clientError SYNTAX_ERROR } +alter user " spaces " rename to " INTERSERVER SECRET "; -- { serverError BAD_ARGUMENTS } +create user "Вася Пупкин"; +create user "无名氏 "; +create user "🙈 🙉 🙊"; + +select length(name), name, '.' from system.users where position(name, ' ')!=0 order by name; + +drop user " "; +drop user ' spaces'; +drop user 'spaces '; +drop user " spaces "; +drop user "Вася Пупкин"; +drop user "无名氏 "; +drop user "🙈 🙉 🙊"; diff --git a/parser/testdata/01120_join_constants/ast.json b/parser/testdata/01120_join_constants/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01120_join_constants/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01120_join_constants/metadata.json b/parser/testdata/01120_join_constants/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01120_join_constants/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01120_join_constants/query.sql b/parser/testdata/01120_join_constants/query.sql new file mode 100644 index 000000000..d8c8b5757 --- /dev/null +++ b/parser/testdata/01120_join_constants/query.sql @@ -0,0 +1,35 @@ +SELECT + t1.*, + t2.*, + 'world' AS constant, + isConstant('world') +FROM +( + SELECT + arrayJoin([1, 2]) AS k, + 'hello' +) AS t1 +LEFT JOIN +( + SELECT + arrayJoin([1, 3]) AS k, + 'world' +) AS t2 ON t1.k = t2.k ORDER BY t1.k; + +SELECT + t1.*, + t2.*, + 123 AS constant, + isConstant('world') +FROM +( + SELECT + arrayJoin([1, 2]) AS k, + 321 +) AS t1 +LEFT JOIN +( + SELECT + arrayJoin([1, 3]) AS k, + 123 +) AS t2 ON t1.k = t2.k ORDER BY t1.k; diff --git a/parser/testdata/01121_remote_scalar_subquery/ast.json b/parser/testdata/01121_remote_scalar_subquery/ast.json new file mode 100644 index 000000000..80b4c6c2a --- /dev/null +++ b/parser/testdata/01121_remote_scalar_subquery/ast.json @@ -0,0 +1,73 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function remote (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '127.0.0.{1,2}'" + }, + { + "explain": " Identifier system.one" + } + ], + + "rows": 17, + + "statistics": + { + "elapsed": 0.001332853, + "rows_read": 17, + "bytes_read": 679 + } +} diff --git a/parser/testdata/01121_remote_scalar_subquery/metadata.json b/parser/testdata/01121_remote_scalar_subquery/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01121_remote_scalar_subquery/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01121_remote_scalar_subquery/query.sql b/parser/testdata/01121_remote_scalar_subquery/query.sql new file mode 100644 index 000000000..2d0c842c5 --- /dev/null +++ b/parser/testdata/01121_remote_scalar_subquery/query.sql @@ -0,0 +1,2 @@ +SELECT (SELECT 1) FROM remote('127.0.0.{1,2}', system.one); +SELECT (SELECT 1) FROM remote('127.0.0.{1,2}'); diff --git a/parser/testdata/01122_totals_rollup_having_block_header/ast.json b/parser/testdata/01122_totals_rollup_having_block_header/ast.json new file mode 100644 index 000000000..6ef50f61a --- /dev/null +++ b/parser/testdata/01122_totals_rollup_having_block_header/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery rollup_having (children 1)" + }, + { + "explain": " Identifier rollup_having" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001264408, + "rows_read": 2, + "bytes_read": 78 + } +} diff --git a/parser/testdata/01122_totals_rollup_having_block_header/metadata.json b/parser/testdata/01122_totals_rollup_having_block_header/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01122_totals_rollup_having_block_header/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01122_totals_rollup_having_block_header/query.sql b/parser/testdata/01122_totals_rollup_having_block_header/query.sql new file mode 100644 index 000000000..7f0c29e94 --- /dev/null +++ b/parser/testdata/01122_totals_rollup_having_block_header/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS rollup_having; +CREATE TABLE rollup_having ( + a Nullable(String), + b Nullable(String) +) ENGINE = Memory; + +INSERT INTO rollup_having VALUES (NULL, NULL); +INSERT INTO rollup_having VALUES ('a', NULL); +INSERT INTO rollup_having VALUES ('a', 'b'); + +SELECT a, b, count(*) FROM rollup_having GROUP BY a, b WITH ROLLUP WITH TOTALS HAVING a IS NOT NULL; -- { serverError NOT_IMPLEMENTED } +SELECT a, b, count(*) FROM rollup_having GROUP BY a, b WITH ROLLUP WITH TOTALS HAVING a IS NOT NULL and b IS NOT NULL; -- { serverError NOT_IMPLEMENTED } + +DROP TABLE rollup_having; diff --git a/parser/testdata/01123_parse_date_time_best_effort_even_more/ast.json b/parser/testdata/01123_parse_date_time_best_effort_even_more/ast.json new file mode 100644 index 000000000..2fd2673da --- /dev/null +++ b/parser/testdata/01123_parse_date_time_best_effort_even_more/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toTimeZone (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function parseDateTimeBestEffort (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'Thu, 18 Aug 2018 07:22:16 GMT'" + }, + { + "explain": " Literal 'UTC'" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001435258, + "rows_read": 10, + "bytes_read": 416 + } +} diff --git a/parser/testdata/01123_parse_date_time_best_effort_even_more/metadata.json b/parser/testdata/01123_parse_date_time_best_effort_even_more/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01123_parse_date_time_best_effort_even_more/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01123_parse_date_time_best_effort_even_more/query.sql b/parser/testdata/01123_parse_date_time_best_effort_even_more/query.sql new file mode 100644 index 000000000..a4f6f1734 --- /dev/null +++ b/parser/testdata/01123_parse_date_time_best_effort_even_more/query.sql @@ -0,0 +1,2 @@ +SELECT toTimeZone(parseDateTimeBestEffort('Thu, 18 Aug 2018 07:22:16 GMT'), 'UTC'); +SELECT toTimeZone(parseDateTimeBestEffort('Tue, 16 Aug 2018 07:22:16 GMT'), 'UTC'); diff --git a/parser/testdata/01124_view_bad_types/ast.json b/parser/testdata/01124_view_bad_types/ast.json new file mode 100644 index 000000000..df13fe820 --- /dev/null +++ b/parser/testdata/01124_view_bad_types/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery source_table (children 1)" + }, + { + "explain": " Identifier source_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001526256, + "rows_read": 2, + "bytes_read": 76 + } +} diff --git a/parser/testdata/01124_view_bad_types/metadata.json b/parser/testdata/01124_view_bad_types/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01124_view_bad_types/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01124_view_bad_types/query.sql b/parser/testdata/01124_view_bad_types/query.sql new file mode 100644 index 000000000..715f7b375 --- /dev/null +++ b/parser/testdata/01124_view_bad_types/query.sql @@ -0,0 +1,11 @@ +DROP TABLE IF EXISTS source_table; +CREATE TABLE source_table (x UInt16) ENGINE = TinyLog; +INSERT INTO source_table SELECT * FROM system.numbers LIMIT 10; + +DROP TABLE IF EXISTS dest_view; +CREATE VIEW dest_view (x UInt64) AS SELECT * FROM source_table; + +SELECT x, any(x) FROM dest_view GROUP BY x ORDER BY x; + +DROP TABLE dest_view; +DROP TABLE source_table; diff --git a/parser/testdata/01125_dict_ddl_cannot_add_column/ast.json b/parser/testdata/01125_dict_ddl_cannot_add_column/ast.json new file mode 100644 index 000000000..bd19ead81 --- /dev/null +++ b/parser/testdata/01125_dict_ddl_cannot_add_column/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery date_table (children 1)" + }, + { + "explain": " Identifier date_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001417016, + "rows_read": 2, + "bytes_read": 73 + } +} diff --git a/parser/testdata/01125_dict_ddl_cannot_add_column/metadata.json b/parser/testdata/01125_dict_ddl_cannot_add_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01125_dict_ddl_cannot_add_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01125_dict_ddl_cannot_add_column/query.sql b/parser/testdata/01125_dict_ddl_cannot_add_column/query.sql new file mode 100644 index 000000000..6a818d94a --- /dev/null +++ b/parser/testdata/01125_dict_ddl_cannot_add_column/query.sql @@ -0,0 +1,29 @@ +CREATE TABLE date_table +( + id UInt32, + val String, + start Date, + end Date +) Engine = Memory(); + +INSERT INTO date_table VALUES(1, '1', toDate('2019-01-05'), toDate('2020-01-10')); + +CREATE DICTIONARY somedict +( + id UInt32, + val String, + start Date, + end Date +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'date_table' DB currentDatabase())) +LAYOUT(RANGE_HASHED()) +RANGE (MIN start MAX end) +LIFETIME(MIN 300 MAX 360); + +SELECT * from somedict; + +-- No dictionary columns +SELECT 1 FROM somedict; + +SHOW TABLES; diff --git a/parser/testdata/01125_generate_random_qoega/ast.json b/parser/testdata/01125_generate_random_qoega/ast.json new file mode 100644 index 000000000..5b557d099 --- /dev/null +++ b/parser/testdata/01125_generate_random_qoega/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery mass_table_117 (children 1)" + }, + { + "explain": " Identifier mass_table_117" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001251254, + "rows_read": 2, + "bytes_read": 80 + } +} diff --git a/parser/testdata/01125_generate_random_qoega/metadata.json b/parser/testdata/01125_generate_random_qoega/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01125_generate_random_qoega/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01125_generate_random_qoega/query.sql b/parser/testdata/01125_generate_random_qoega/query.sql new file mode 100644 index 000000000..9088e411a --- /dev/null +++ b/parser/testdata/01125_generate_random_qoega/query.sql @@ -0,0 +1,6 @@ +DROP TABLE IF EXISTS mass_table_117; +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE mass_table_117 (`dt` Date, `site_id` Int32, `site_key` String) ENGINE = MergeTree(dt, (site_id, site_key, dt), 8192); +INSERT INTO mass_table_117 SELECT * FROM generateRandom('`dt` Date,`site_id` Int32,`site_key` String', 1, 10, 2) LIMIT 100; +SELECT count(), sum(cityHash64(*)) FROM mass_table_117; +DROP TABLE mass_table_117; diff --git a/parser/testdata/01126_month_partitioning_consistent_code/ast.json b/parser/testdata/01126_month_partitioning_consistent_code/ast.json new file mode 100644 index 000000000..f4ab1e414 --- /dev/null +++ b/parser/testdata/01126_month_partitioning_consistent_code/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery mt (children 1)" + }, + { + "explain": " Identifier mt" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001585625, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/01126_month_partitioning_consistent_code/metadata.json b/parser/testdata/01126_month_partitioning_consistent_code/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01126_month_partitioning_consistent_code/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01126_month_partitioning_consistent_code/query.sql b/parser/testdata/01126_month_partitioning_consistent_code/query.sql new file mode 100644 index 000000000..f5f04178d --- /dev/null +++ b/parser/testdata/01126_month_partitioning_consistent_code/query.sql @@ -0,0 +1,5 @@ +DROP TABLE IF EXISTS mt; +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE mt (d Date, x UInt8) ENGINE = MergeTree(d, x, 8192); +INSERT INTO mt VALUES (52392, 1), (62677, 2); +DROP TABLE mt; diff --git a/parser/testdata/01127_month_partitioning_consistency_select/ast.json b/parser/testdata/01127_month_partitioning_consistency_select/ast.json new file mode 100644 index 000000000..9ff8f5666 --- /dev/null +++ b/parser/testdata/01127_month_partitioning_consistency_select/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001183783, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01127_month_partitioning_consistency_select/metadata.json b/parser/testdata/01127_month_partitioning_consistency_select/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01127_month_partitioning_consistency_select/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01127_month_partitioning_consistency_select/query.sql b/parser/testdata/01127_month_partitioning_consistency_select/query.sql new file mode 100644 index 000000000..78632ab24 --- /dev/null +++ b/parser/testdata/01127_month_partitioning_consistency_select/query.sql @@ -0,0 +1,14 @@ +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE mt (d Date, x String) ENGINE = MergeTree(d, x, 8192); +INSERT INTO mt VALUES ('2106-02-07', 'Hello'), ('1970-01-01', 'World'); + +SELECT 'Q1', * FROM mt WHERE d = '2106-02-07'; +SELECT 'Q2', * FROM mt WHERE d = '1970-01-01'; + +DETACH TABLE mt; +ATTACH TABLE mt; + +SELECT 'Q1', * FROM mt WHERE d = '2106-02-07'; +SELECT 'Q2', * FROM mt WHERE d = '1970-01-01'; + +DROP TABLE mt; diff --git a/parser/testdata/01128_generate_random_nested/ast.json b/parser/testdata/01128_generate_random_nested/ast.json new file mode 100644 index 000000000..1d1124eba --- /dev/null +++ b/parser/testdata/01128_generate_random_nested/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery mass_table_312 (children 1)" + }, + { + "explain": " Identifier mass_table_312" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001240587, + "rows_read": 2, + "bytes_read": 80 + } +} diff --git a/parser/testdata/01128_generate_random_nested/metadata.json b/parser/testdata/01128_generate_random_nested/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01128_generate_random_nested/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01128_generate_random_nested/query.sql b/parser/testdata/01128_generate_random_nested/query.sql new file mode 100644 index 000000000..8098db894 --- /dev/null +++ b/parser/testdata/01128_generate_random_nested/query.sql @@ -0,0 +1,10 @@ +DROP TABLE IF EXISTS mass_table_312; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE mass_table_312 (d Date DEFAULT '2000-01-01', x UInt64, n Nested(a String, b String)) ENGINE = MergeTree(d, x, 1); +INSERT INTO mass_table_312 SELECT * FROM generateRandom('`d` Date,`x` UInt64,`n.a` Array(String),`n.b` Array(String)', 1, 10, 2) LIMIT 100; + +SELECT count(), sum(cityHash64(*)) FROM mass_table_312; +SELECT count(), sum(cityHash64(*)) FROM mass_table_312 ARRAY JOIN n; + +DROP TABLE mass_table_312; diff --git a/parser/testdata/01129_dict_get_join_lose_constness/ast.json b/parser/testdata/01129_dict_get_join_lose_constness/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01129_dict_get_join_lose_constness/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01129_dict_get_join_lose_constness/metadata.json b/parser/testdata/01129_dict_get_join_lose_constness/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01129_dict_get_join_lose_constness/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01129_dict_get_join_lose_constness/query.sql b/parser/testdata/01129_dict_get_join_lose_constness/query.sql new file mode 100644 index 000000000..fd3e12f7a --- /dev/null +++ b/parser/testdata/01129_dict_get_join_lose_constness/query.sql @@ -0,0 +1,23 @@ +-- Tags: no-parallel + +DROP DICTIONARY IF EXISTS system.dict1; + +CREATE DICTIONARY IF NOT EXISTS system.dict1 +( + bytes_allocated UInt64, + element_count Int32, + loading_start_time DateTime +) +PRIMARY KEY bytes_allocated +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' PASSWORD '' TABLE 'dictionaries' DB 'system')) +LIFETIME(0) +LAYOUT(hashed()); + +SELECT join_key, + toTimeZone(dictGetDateTime('system.dict1', 'loading_start_time', toUInt64(dict_key)), 'UTC') AS datetime +FROM (select dictGetInt32('system.dict1', 'element_count', toUInt64(dict_key)) AS join_key, 1 AS dict_key) js1 +LEFT JOIN (SELECT toInt32(2) AS join_key) js2 +USING (join_key) +WHERE now() >= datetime; + +DROP DICTIONARY IF EXISTS system.dict1; diff --git a/parser/testdata/01131_max_rows_to_sort/ast.json b/parser/testdata/01131_max_rows_to_sort/ast.json new file mode 100644 index 000000000..ebbb7cb05 --- /dev/null +++ b/parser/testdata/01131_max_rows_to_sort/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001459568, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01131_max_rows_to_sort/metadata.json b/parser/testdata/01131_max_rows_to_sort/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01131_max_rows_to_sort/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01131_max_rows_to_sort/query.sql b/parser/testdata/01131_max_rows_to_sort/query.sql new file mode 100644 index 000000000..0d6ff643a --- /dev/null +++ b/parser/testdata/01131_max_rows_to_sort/query.sql @@ -0,0 +1,8 @@ +SET max_rows_to_sort = 100; +SELECT * FROM system.numbers ORDER BY number; -- { serverError TOO_MANY_ROWS_OR_BYTES } + +SET sort_overflow_mode = 'break'; +SET max_block_size = 1000; + +set query_plan_remove_redundant_sorting=0; -- to keep sorting in the query below +SELECT count() >= 100 AND count() <= 1000 FROM (SELECT * FROM system.numbers ORDER BY number); diff --git a/parser/testdata/01132_max_rows_to_read/ast.json b/parser/testdata/01132_max_rows_to_read/ast.json new file mode 100644 index 000000000..81c4db947 --- /dev/null +++ b/parser/testdata/01132_max_rows_to_read/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery row_limits_test (children 1)" + }, + { + "explain": " Identifier row_limits_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001395446, + "rows_read": 2, + "bytes_read": 82 + } +} diff --git a/parser/testdata/01132_max_rows_to_read/metadata.json b/parser/testdata/01132_max_rows_to_read/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01132_max_rows_to_read/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01132_max_rows_to_read/query.sql b/parser/testdata/01132_max_rows_to_read/query.sql new file mode 100644 index 000000000..a76f2215e --- /dev/null +++ b/parser/testdata/01132_max_rows_to_read/query.sql @@ -0,0 +1,97 @@ +DROP TABLE IF EXISTS row_limits_test; + +SET max_block_size = 10; +SET max_rows_to_read = 20; +SET read_overflow_mode = 'throw'; + +SELECT count() FROM numbers(30); -- { serverError TOO_MANY_ROWS } +SELECT count() FROM numbers(19); +SELECT count() FROM numbers(20); +SELECT count() FROM numbers(21); -- { serverError TOO_MANY_ROWS } + +-- check early exception if the estimated number of rows is high +SELECT * FROM numbers(30); -- { serverError TOO_MANY_ROWS } + +SET read_overflow_mode = 'break'; + +SELECT count() FROM numbers(19); +SELECT count() FROM numbers(20); +SELECT count() FROM numbers(21); +SELECT count() FROM numbers(29); +SELECT count() FROM numbers(30); +SELECT count() FROM numbers(31); + +-- check that partial result is returned even if the estimated number of rows is high +SELECT * FROM numbers(30); + +-- the same for uneven block sizes +SET max_block_size = 11; +SELECT * FROM numbers(30); +SET max_block_size = 9; +SELECT * FROM numbers(30); + +-- When reaching row limits, make sure we don't do a large amount of range scans and continue +-- processing all parts when we don't need to. For instance, we create 3 parts below with 10,000 rows in each +-- and we have a row limit <= 1000, we shouldn't exceed this value when max_threads = 1. +-- (process_part in MergeTreeDataSelectExecutor uses a thread pool the size of max_threads to read data, +-- so we can exceed it slightly if max_threads > 1, but we'll still prevent a lot of scans and part processing) + +DROP TABLE IF EXISTS row_limits_fail_fast; +CREATE TABLE row_limits_fail_fast +( + key UInt64, + value String +) ENGINE = MergeTree() ORDER BY key +SETTINGS index_granularity = 100; + +SET max_rows_to_read = 0; -- so we don't hit row limits when populating data + +-- Insert multiple parts with significant data. Multiple parts is important because row limit checks +-- are checked per part when determining what ranges need to be read for the query. +INSERT INTO row_limits_fail_fast SELECT number, toString(number) FROM numbers(10000); +INSERT INTO row_limits_fail_fast SELECT number + 10000, toString(number) FROM numbers(10000); +INSERT INTO row_limits_fail_fast SELECT number + 20000, toString(number) FROM numbers(10000); + +-- to keep the number of parts predictable +SYSTEM STOP MERGES row_limits_fail_fast; + +SET max_rows_to_read = 1000; +SET read_overflow_mode = 'throw'; + +-- Should fail fast during PK filtering - query selects more rows than limit +SELECT count() FROM row_limits_fail_fast WHERE key < 500000; -- { serverError TOO_MANY_ROWS } +SELECT count() FROM row_limits_fail_fast WHERE key < 500; + +-- Test with specific key ranges +SELECT count() FROM row_limits_fail_fast WHERE key BETWEEN 1000 AND 1500; + +-- Test explicit scan to verify fail-fast during data reading +SET max_rows_to_read = 100; +SELECT * FROM row_limits_fail_fast WHERE key < 200 FORMAT Null; -- { serverError TOO_MANY_ROWS } + +-- Test with selective filter - needs at least 1 granule +SET max_rows_to_read = 150; +SELECT count() FROM row_limits_fail_fast WHERE key IN (1, 2, 3, 4, 5); + +-- Test with max_rows_to_read_leaf +SET max_rows_to_read = 0; +SET max_rows_to_read_leaf = 1000; +SET read_overflow_mode_leaf = 'throw'; +SELECT count() FROM row_limits_fail_fast WHERE key < 500000; -- { serverError TOO_MANY_ROWS } + +-- Reset and test break mode still works and we fail fast +SET max_rows_to_read = 600; +SET max_rows_to_read_leaf = 0; +SET read_overflow_mode = 'break'; +SELECT count() FROM row_limits_fail_fast WHERE key < 500; + +-- Test fail-fast with multiple threads +SET max_threads = 4; +SET read_overflow_mode = 'throw'; +SET max_rows_to_read = 500; +SELECT count() FROM row_limits_fail_fast WHERE key < 100000; -- { serverError TOO_MANY_ROWS } + +-- But should succeed when actual filtered result is small +SELECT count() FROM row_limits_fail_fast WHERE key < 400; + +DROP TABLE row_limits_fail_fast; diff --git a/parser/testdata/01134_max_rows_to_group_by/ast.json b/parser/testdata/01134_max_rows_to_group_by/ast.json new file mode 100644 index 000000000..0437edd23 --- /dev/null +++ b/parser/testdata/01134_max_rows_to_group_by/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001569278, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01134_max_rows_to_group_by/metadata.json b/parser/testdata/01134_max_rows_to_group_by/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01134_max_rows_to_group_by/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01134_max_rows_to_group_by/query.sql b/parser/testdata/01134_max_rows_to_group_by/query.sql new file mode 100644 index 000000000..2828ef9cf --- /dev/null +++ b/parser/testdata/01134_max_rows_to_group_by/query.sql @@ -0,0 +1,21 @@ +SET max_block_size = 1; +SET max_rows_to_group_by = 10; +SET group_by_overflow_mode = 'throw'; + +-- Settings 'max_rows_to_group_by' and 'max_bytes_before_external_group_by' are mutually exclusive. +SET max_bytes_before_external_group_by = 0; +SET max_bytes_ratio_before_external_group_by = 0; + +SELECT 'test1', number FROM system.numbers GROUP BY number; -- { serverError TOO_MANY_ROWS } + +SET group_by_overflow_mode = 'break'; +SELECT 'test2', number FROM system.numbers GROUP BY number ORDER BY number; + +SET max_rows_to_read = 500; +SELECT 'test3', number FROM system.numbers GROUP BY number ORDER BY number; + +SET group_by_overflow_mode = 'any'; +SELECT 'test4', number FROM numbers(1000) GROUP BY number ORDER BY number; -- { serverError TOO_MANY_ROWS } + +SET max_rows_to_read = 1000; +SELECT 'test5', number FROM numbers(1000) GROUP BY number ORDER BY number; diff --git a/parser/testdata/01134_set_overflow_mode/ast.json b/parser/testdata/01134_set_overflow_mode/ast.json new file mode 100644 index 000000000..29002d45a --- /dev/null +++ b/parser/testdata/01134_set_overflow_mode/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001848037, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01134_set_overflow_mode/metadata.json b/parser/testdata/01134_set_overflow_mode/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01134_set_overflow_mode/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01134_set_overflow_mode/query.sql b/parser/testdata/01134_set_overflow_mode/query.sql new file mode 100644 index 000000000..c3cf5ffed --- /dev/null +++ b/parser/testdata/01134_set_overflow_mode/query.sql @@ -0,0 +1,17 @@ +SET max_block_size = 10; +SET max_rows_in_set = 20; +SET set_overflow_mode = 'throw'; + +SELECT arrayJoin([5, 25]) IN (SELECT DISTINCT toUInt8(intDiv(number, 10)) FROM numbers(300)); -- { serverError SET_SIZE_LIMIT_EXCEEDED } +SELECT arrayJoin([5, 25]) IN (SELECT DISTINCT toUInt8(intDiv(number, 10)) FROM numbers(190)); +SELECT arrayJoin([5, 25]) IN (SELECT DISTINCT toUInt8(intDiv(number, 10)) FROM numbers(200)); +SELECT arrayJoin([5, 25]) IN (SELECT DISTINCT toUInt8(intDiv(number, 10)) FROM numbers(210)); -- { serverError SET_SIZE_LIMIT_EXCEEDED } + +SET set_overflow_mode = 'break'; + +SELECT '---'; + +SELECT arrayJoin([5, 25]) IN (SELECT DISTINCT toUInt8(intDiv(number, 10)) FROM numbers(300)); +SELECT arrayJoin([5, 25]) IN (SELECT DISTINCT toUInt8(intDiv(number, 10)) FROM numbers(190)); +SELECT arrayJoin([5, 25]) IN (SELECT DISTINCT toUInt8(intDiv(number, 10)) FROM numbers(200)); +SELECT arrayJoin([5, 25]) IN (SELECT DISTINCT toUInt8(intDiv(number, 10)) FROM numbers(210)); diff --git a/parser/testdata/01135_default_and_alter_zookeeper/ast.json b/parser/testdata/01135_default_and_alter_zookeeper/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01135_default_and_alter_zookeeper/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01135_default_and_alter_zookeeper/metadata.json b/parser/testdata/01135_default_and_alter_zookeeper/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01135_default_and_alter_zookeeper/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01135_default_and_alter_zookeeper/query.sql b/parser/testdata/01135_default_and_alter_zookeeper/query.sql new file mode 100644 index 000000000..209694263 --- /dev/null +++ b/parser/testdata/01135_default_and_alter_zookeeper/query.sql @@ -0,0 +1,29 @@ +-- Tags: zookeeper + +DROP TABLE IF EXISTS default_table SYNC; + +CREATE TABLE default_table +( + id UInt64, + enum_column Enum8('undefined' = 0, 'fox' = 1, 'index' = 2) +) +ENGINE ReplicatedMergeTree('/clickhouse/{database}/test_01135/default_table', '1') +ORDER BY tuple(); + +INSERT INTO default_table VALUES(1, 'index'), (2, 'fox'); + +ALTER TABLE default_table MODIFY COLUMN enum_column Enum8('undefined' = 0, 'fox' = 1, 'index' = 2) DEFAULT 'undefined'; + +INSERT INTO default_table (id) VALUES(3), (4); + +DETACH TABLE default_table; + +ATTACH TABLE default_table; + +SELECT COUNT() from default_table; + +ALTER TABLE default_table MODIFY COLUMN enum_column Enum8('undefined' = 0, 'fox' = 1, 'index' = 2) DEFAULT 'fox'; + +SHOW CREATE TABLE default_table; + +DROP TABLE IF EXISTS default_table SYNC; diff --git a/parser/testdata/01136_multiple_sets/ast.json b/parser/testdata/01136_multiple_sets/ast.json new file mode 100644 index 000000000..bf53ad922 --- /dev/null +++ b/parser/testdata/01136_multiple_sets/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001406124, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/01136_multiple_sets/metadata.json b/parser/testdata/01136_multiple_sets/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01136_multiple_sets/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01136_multiple_sets/query.sql b/parser/testdata/01136_multiple_sets/query.sql new file mode 100644 index 000000000..bd8651495 --- /dev/null +++ b/parser/testdata/01136_multiple_sets/query.sql @@ -0,0 +1,12 @@ +drop table if exists test; + +create table test (project LowCardinality(String)) engine=MergeTree() order by project; +insert into test values ('val1'), ('val2'), ('val3'); + +select sum(project in ('val1', 'val2')) from test; +set force_primary_key = 1; +select sum(project in ('val1', 'val2')) from test where project in ('val1', 'val2'); +select count() from test where project in ('val1', 'val2'); +select project in ('val1', 'val2') from test where project in ('val1', 'val2'); + +drop table test; diff --git a/parser/testdata/01137_order_by_func/ast.json b/parser/testdata/01137_order_by_func/ast.json new file mode 100644 index 000000000..af6879ad1 --- /dev/null +++ b/parser/testdata/01137_order_by_func/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery pk_func (children 1)" + }, + { + "explain": " Identifier pk_func" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.002021508, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/01137_order_by_func/metadata.json b/parser/testdata/01137_order_by_func/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01137_order_by_func/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01137_order_by_func/query.sql b/parser/testdata/01137_order_by_func/query.sql new file mode 100644 index 000000000..536f2d1c6 --- /dev/null +++ b/parser/testdata/01137_order_by_func/query.sql @@ -0,0 +1,25 @@ +DROP TABLE IF EXISTS pk_func; +CREATE TABLE pk_func(d DateTime, ui UInt32) ENGINE = MergeTree ORDER BY toDate(d) SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +INSERT INTO pk_func SELECT '2020-05-05 01:00:00', number FROM numbers(1000000); +INSERT INTO pk_func SELECT '2020-05-06 01:00:00', number FROM numbers(1000000); +INSERT INTO pk_func SELECT '2020-05-07 01:00:00', number FROM numbers(1000000); + +SELECT * FROM pk_func ORDER BY toDate(d), ui LIMIT 5; + +DROP TABLE pk_func; + +DROP TABLE IF EXISTS nORX; +CREATE TABLE nORX (`A` Int64, `B` Int64, `V` Int64) ENGINE = MergeTree ORDER BY (A, negate(B)) SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +INSERT INTO nORX SELECT 111, number, number FROM numbers(10000000); + +SELECT * +FROM nORX +WHERE B >= 1000 +ORDER BY + A ASC, + -B ASC +LIMIT 3 +SETTINGS max_threads = 1; + +DROP TABLE nORX; diff --git a/parser/testdata/01137_order_by_func_final/ast.json b/parser/testdata/01137_order_by_func_final/ast.json new file mode 100644 index 000000000..b85178c74 --- /dev/null +++ b/parser/testdata/01137_order_by_func_final/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery pk_func (children 1)" + }, + { + "explain": " Identifier pk_func" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00119676, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/01137_order_by_func_final/metadata.json b/parser/testdata/01137_order_by_func_final/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01137_order_by_func_final/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01137_order_by_func_final/query.sql b/parser/testdata/01137_order_by_func_final/query.sql new file mode 100644 index 000000000..afbced301 --- /dev/null +++ b/parser/testdata/01137_order_by_func_final/query.sql @@ -0,0 +1,10 @@ +DROP TABLE IF EXISTS pk_func; +CREATE TABLE pk_func(d DateTime, ui UInt32) ENGINE = SummingMergeTree ORDER BY toDate(d); + +INSERT INTO pk_func SELECT '2020-05-05 01:00:00', number FROM numbers(100000); +INSERT INTO pk_func SELECT '2020-05-06 01:00:00', number FROM numbers(100000); +INSERT INTO pk_func SELECT '2020-05-07 01:00:00', number FROM numbers(100000); + +SELECT toDate(d), ui FROM pk_func FINAL order by d; + +DROP TABLE pk_func; diff --git a/parser/testdata/01137_sample_final/ast.json b/parser/testdata/01137_sample_final/ast.json new file mode 100644 index 000000000..aa86e084a --- /dev/null +++ b/parser/testdata/01137_sample_final/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tab (children 1)" + }, + { + "explain": " Identifier tab" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001421795, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/01137_sample_final/metadata.json b/parser/testdata/01137_sample_final/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01137_sample_final/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01137_sample_final/query.sql b/parser/testdata/01137_sample_final/query.sql new file mode 100644 index 000000000..99fac5147 --- /dev/null +++ b/parser/testdata/01137_sample_final/query.sql @@ -0,0 +1,13 @@ +drop table if exists tab; + +create table tab (x UInt64, v UInt64) engine = ReplacingMergeTree(v) order by (x, sipHash64(x)) sample by sipHash64(x); +insert into tab select number, number from numbers(1000); +select * from tab final sample 1/2 order by x limit 5; + +drop table tab; + +create table tab (x UInt64, v UInt64) engine = ReplacingMergeTree(v) order by (x, sipHash64(x)) sample by sipHash64(x); +insert into tab select number, number from numbers(1000); +select sipHash64(x) from tab sample 1/2 order by x, sipHash64(x) limit 5; + +drop table tab; diff --git a/parser/testdata/01138_join_on_distributed_and_tmp/ast.json b/parser/testdata/01138_join_on_distributed_and_tmp/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01138_join_on_distributed_and_tmp/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01138_join_on_distributed_and_tmp/metadata.json b/parser/testdata/01138_join_on_distributed_and_tmp/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01138_join_on_distributed_and_tmp/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01138_join_on_distributed_and_tmp/query.sql b/parser/testdata/01138_join_on_distributed_and_tmp/query.sql new file mode 100644 index 000000000..b2f909215 --- /dev/null +++ b/parser/testdata/01138_join_on_distributed_and_tmp/query.sql @@ -0,0 +1,18 @@ +-- Tags: distributed + +DROP TABLE IF EXISTS foo_local; +DROP TABLE IF EXISTS foo_distributed; + +CREATE TABLE foo_local (bar UInt64) +ENGINE = MergeTree() +ORDER BY tuple(); + +CREATE TABLE foo_distributed AS foo_local +ENGINE = Distributed('test_cluster_two_shards_localhost', currentDatabase(), foo_local); + +CREATE TEMPORARY TABLE _tmp_baz (qux UInt64); + +SELECT * FROM foo_distributed JOIN _tmp_baz ON foo_distributed.bar = _tmp_baz.qux; + +DROP TABLE foo_local; +DROP TABLE foo_distributed; diff --git a/parser/testdata/01139_asof_join_types/ast.json b/parser/testdata/01139_asof_join_types/ast.json new file mode 100644 index 000000000..b01366c01 --- /dev/null +++ b/parser/testdata/01139_asof_join_types/ast.json @@ -0,0 +1,118 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 2)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (alias t1) (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_0 (alias k)" + }, + { + "explain": " Function toInt8 (alias v) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " TablesInSelectQueryElement (children 2)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (alias t2) (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_0 (alias k)" + }, + { + "explain": " Function toInt8 (alias v) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " TableJoin (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier k" + }, + { + "explain": " Identifier v" + } + ], + + "rows": 32, + + "statistics": + { + "elapsed": 0.001588609, + "rows_read": 32, + "bytes_read": 1344 + } +} diff --git a/parser/testdata/01139_asof_join_types/metadata.json b/parser/testdata/01139_asof_join_types/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01139_asof_join_types/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01139_asof_join_types/query.sql b/parser/testdata/01139_asof_join_types/query.sql new file mode 100644 index 000000000..1a2308318 --- /dev/null +++ b/parser/testdata/01139_asof_join_types/query.sql @@ -0,0 +1,18 @@ +select * from (select 0 as k, toInt8(1) as v) t1 asof join (select 0 as k, toInt8(0) as v) t2 using(k, v); +select * from (select 0 as k, toInt16(1) as v) t1 asof join (select 0 as k, toInt16(0) as v) t2 using(k, v); +select * from (select 0 as k, toInt32(1) as v) t1 asof join (select 0 as k, toInt32(0) as v) t2 using(k, v); +select * from (select 0 as k, toInt64(1) as v) t1 asof join (select 0 as k, toInt64(0) as v) t2 using(k, v); + +select * from (select 0 as k, toUInt8(1) as v) t1 asof join (select 0 as k, toUInt8(0) as v) t2 using(k, v); +select * from (select 0 as k, toUInt16(1) as v) t1 asof join (select 0 as k, toUInt16(0) as v) t2 using(k, v); +select * from (select 0 as k, toUInt32(1) as v) t1 asof join (select 0 as k, toUInt32(0) as v) t2 using(k, v); +select * from (select 0 as k, toUInt64(1) as v) t1 asof join (select 0 as k, toUInt64(0) as v) t2 using(k, v); + +select * from (select 0 as k, toDecimal32(1, 0) as v) t1 asof join (select 0 as k, toDecimal32(0, 0) as v) t2 using(k, v); +select * from (select 0 as k, toDecimal64(1, 0) as v) t1 asof join (select 0 as k, toDecimal64(0, 0) as v) t2 using(k, v); +select * from (select 0 as k, toDecimal128(1, 0) as v) t1 asof join (select 0 as k, toDecimal128(0, 0) as v) t2 using(k, v); + +select * from (select 0 as k, toDate(0) as v) t1 asof join (select 0 as k, toDate(0) as v) t2 using(k, v); +select * from (select 0 as k, toDateTime(0, 'UTC') as v) t1 asof join (select 0 as k, toDateTime(0, 'UTC') as v) t2 using(k, v); + +select * from (select 0 as k, 'x' as v) t1 asof join (select 0 as k, 'x' as v) t2 using(k, v); -- { serverError BAD_TYPE_OF_FIELD } diff --git a/parser/testdata/01140_select_from_storage_join_fix/ast.json b/parser/testdata/01140_select_from_storage_join_fix/ast.json new file mode 100644 index 000000000..38a8260f3 --- /dev/null +++ b/parser/testdata/01140_select_from_storage_join_fix/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001436769, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/01140_select_from_storage_join_fix/metadata.json b/parser/testdata/01140_select_from_storage_join_fix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01140_select_from_storage_join_fix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01140_select_from_storage_join_fix/query.sql b/parser/testdata/01140_select_from_storage_join_fix/query.sql new file mode 100644 index 000000000..4e64c90f5 --- /dev/null +++ b/parser/testdata/01140_select_from_storage_join_fix/query.sql @@ -0,0 +1,42 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS t3; +DROP TABLE IF EXISTS t4; + +CREATE TABLE t1 (id String, name String, value UInt32) +ENGINE = Join(ANY, LEFT, id) +SETTINGS join_use_nulls = 1; + +CREATE TABLE t2 (id String, name String, value UInt32) +ENGINE = Join(ANY, LEFT, id) +SETTINGS join_use_nulls = 0; + +CREATE TABLE t3 (id Nullable(String), name String, value UInt32) +ENGINE = Join(ANY, LEFT, id) +SETTINGS join_use_nulls = 1; + +CREATE TABLE t4 (id String, name Nullable(String), value UInt32) +ENGINE = Join(ANY, LEFT, id) +SETTINGS join_use_nulls = 0; + +insert into t1 values('1', 's', 1); +insert into t2 values('2', 's', 2); +insert into t3 values('3', 's', 3); +insert into t4 values('4', 's', 4); + +select *, toTypeName(id), toTypeName(name) from t1; +select *, toTypeName(id), toTypeName(name) from t2; +select *, toTypeName(id), toTypeName(name) from t3; +select *, toTypeName(id), toTypeName(name) from t4; + +SET join_use_nulls = 1; + +select *, toTypeName(id), toTypeName(name) from t1; +select *, toTypeName(id), toTypeName(name) from t2; +select *, toTypeName(id), toTypeName(name) from t3; +select *, toTypeName(id), toTypeName(name) from t4; + +DROP TABLE t1; +DROP TABLE t2; +DROP TABLE t3; +DROP TABLE t4; diff --git a/parser/testdata/01141_join_get_negative/ast.json b/parser/testdata/01141_join_get_negative/ast.json new file mode 100644 index 000000000..9b52b3209 --- /dev/null +++ b/parser/testdata/01141_join_get_negative/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001172812, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/01141_join_get_negative/metadata.json b/parser/testdata/01141_join_get_negative/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01141_join_get_negative/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01141_join_get_negative/query.sql b/parser/testdata/01141_join_get_negative/query.sql new file mode 100644 index 000000000..86c00ee43 --- /dev/null +++ b/parser/testdata/01141_join_get_negative/query.sql @@ -0,0 +1,11 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE TABLE t1 (`s` String, `x` Array(UInt8), `k` UInt64) ENGINE = Join(ANY, LEFT, k); +CREATE TABLE t2 (`s` String, `x` Array(UInt8), `k` UInt64) ENGINE = Join(ANY, INNER, k); + +SELECT joinGet('t1', '', number) FROM numbers(2); -- { serverError NO_SUCH_COLUMN_IN_TABLE } +SELECT joinGet('t2', 's', number) FROM numbers(2); -- { serverError INCOMPATIBLE_TYPE_OF_JOIN } + +DROP TABLE t1; +DROP TABLE t2; diff --git a/parser/testdata/01142_join_lc_and_nullable_in_key/ast.json b/parser/testdata/01142_join_lc_and_nullable_in_key/ast.json new file mode 100644 index 000000000..59496aac8 --- /dev/null +++ b/parser/testdata/01142_join_lc_and_nullable_in_key/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001739361, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/01142_join_lc_and_nullable_in_key/metadata.json b/parser/testdata/01142_join_lc_and_nullable_in_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01142_join_lc_and_nullable_in_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01142_join_lc_and_nullable_in_key/query.sql b/parser/testdata/01142_join_lc_and_nullable_in_key/query.sql new file mode 100644 index 000000000..33bdd1944 --- /dev/null +++ b/parser/testdata/01142_join_lc_and_nullable_in_key/query.sql @@ -0,0 +1,77 @@ +DROP TABLE IF EXISTS t; +DROP TABLE IF EXISTS nr; + +CREATE TABLE t (`x` UInt32, `lc` LowCardinality(String)) ENGINE = MergeTree ORDER BY tuple(); +CREATE TABLE nr (`x` Nullable(UInt32), `lc` Nullable(String)) ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO t VALUES (1, 'l'); +INSERT INTO nr VALUES (2, NULL); + +SET join_use_nulls = 0; + +SELECT x, lc, r.lc, toTypeName(r.lc) FROM t AS l LEFT JOIN nr AS r USING (x) ORDER BY x; +SELECT x, lc, r.lc, toTypeName(r.lc) FROM t AS l RIGHT JOIN nr AS r USING (x) ORDER BY x; +SELECT x, lc, r.lc, toTypeName(r.lc) FROM t AS l FULL JOIN nr AS r USING (x) ORDER BY x; + +SELECT '-'; + +-- lc should be supertype for l.lc and r.lc, so expect Nullable(String) +SELECT x, lc, toTypeName(lc), r.lc, toTypeName(r.lc) FROM t AS l LEFT JOIN nr AS r USING (lc) ORDER BY x SETTINGS enable_analyzer = 1; +SELECT x, lc, toTypeName(lc), r.lc, toTypeName(r.lc) FROM t AS l RIGHT JOIN nr AS r USING (lc) ORDER BY x SETTINGS enable_analyzer = 1; +SELECT x, lc, toTypeName(lc), r.lc, toTypeName(r.lc) FROM t AS l FULL JOIN nr AS r USING (lc) ORDER BY x SETTINGS enable_analyzer = 1; + +SELECT '-'; + +-- old behavior is different +SELECT x, lc, toTypeName(lc), r.lc, toTypeName(r.lc) FROM t AS l LEFT JOIN nr AS r USING (lc) ORDER BY x SETTINGS enable_analyzer = 0; +SELECT x, lc, toTypeName(lc), r.lc, toTypeName(r.lc) FROM t AS l RIGHT JOIN nr AS r USING (lc) ORDER BY x SETTINGS enable_analyzer = 0; +SELECT x, lc, toTypeName(lc), r.lc, toTypeName(r.lc) FROM t AS l FULL JOIN nr AS r USING (lc) ORDER BY x SETTINGS enable_analyzer = 0; + +SELECT '-'; + +SELECT x, lc, materialize(r.lc) y, toTypeName(y) FROM t AS l LEFT JOIN nr AS r USING (lc) ORDER BY x SETTINGS enable_analyzer = 1; +SELECT x, lc, materialize(r.lc) y, toTypeName(y) FROM t AS l RIGHT JOIN nr AS r USING (lc) ORDER BY x SETTINGS enable_analyzer = 1; +SELECT x, lc, materialize(r.lc) y, toTypeName(y) FROM t AS l FULL JOIN nr AS r USING (lc) ORDER BY x SETTINGS enable_analyzer = 1; + +SELECT '-'; + +SELECT x, lc, materialize(r.lc) y, toTypeName(y) FROM t AS l LEFT JOIN nr AS r USING (lc) ORDER BY x SETTINGS enable_analyzer = 0; +SELECT x, lc, materialize(r.lc) y, toTypeName(y) FROM t AS l RIGHT JOIN nr AS r USING (lc) ORDER BY x SETTINGS enable_analyzer = 0; +SELECT x, lc, materialize(r.lc) y, toTypeName(y) FROM t AS l FULL JOIN nr AS r USING (lc) ORDER BY x SETTINGS enable_analyzer = 0; + +SELECT '-'; + +SELECT x, lc FROM t AS l RIGHT JOIN nr AS r USING (lc) SETTINGS enable_analyzer = 1; + +SELECT '-'; + +SELECT x, lc FROM t AS l RIGHT JOIN nr AS r USING (lc) SETTINGS enable_analyzer = 0; + +SELECT '-'; + +SET join_use_nulls = 1; + +SELECT x, lc, r.lc, toTypeName(r.lc) FROM t AS l LEFT JOIN nr AS r USING (x) ORDER BY x; +SELECT x, lc, r.lc, toTypeName(r.lc) FROM t AS l RIGHT JOIN nr AS r USING (x) ORDER BY x; +SELECT x, lc, r.lc, toTypeName(r.lc) FROM t AS l FULL JOIN nr AS r USING (x) ORDER BY x; + +SELECT '-'; + +SELECT x, lc, r.lc, toTypeName(r.lc) FROM t AS l LEFT JOIN nr AS r USING (lc) ORDER BY x; +SELECT x, lc, r.lc, toTypeName(r.lc) FROM t AS l RIGHT JOIN nr AS r USING (lc) ORDER BY x; +SELECT x, lc, r.lc, toTypeName(r.lc) FROM t AS l FULL JOIN nr AS r USING (lc) ORDER BY x; + +SELECT '-'; + +SELECT x, lc, materialize(r.lc) y, toTypeName(y) FROM t AS l LEFT JOIN nr AS r USING (lc) ORDER BY x; +SELECT x, lc, materialize(r.lc) y, toTypeName(y) FROM t AS l RIGHT JOIN nr AS r USING (lc) ORDER BY x; +SELECT x, lc, materialize(r.lc) y, toTypeName(y) FROM t AS l FULL JOIN nr AS r USING (lc) ORDER BY x; + +SELECT '-'; + +SELECT x, lc FROM t AS l RIGHT JOIN nr AS r USING (lc); + +SELECT '-'; + +DROP TABLE t; +DROP TABLE nr; diff --git a/parser/testdata/01142_merge_join_lc_and_nullable_in_key/ast.json b/parser/testdata/01142_merge_join_lc_and_nullable_in_key/ast.json new file mode 100644 index 000000000..87794d21f --- /dev/null +++ b/parser/testdata/01142_merge_join_lc_and_nullable_in_key/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001417857, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01142_merge_join_lc_and_nullable_in_key/metadata.json b/parser/testdata/01142_merge_join_lc_and_nullable_in_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01142_merge_join_lc_and_nullable_in_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01142_merge_join_lc_and_nullable_in_key/query.sql b/parser/testdata/01142_merge_join_lc_and_nullable_in_key/query.sql new file mode 100644 index 000000000..c1b0c1ed8 --- /dev/null +++ b/parser/testdata/01142_merge_join_lc_and_nullable_in_key/query.sql @@ -0,0 +1,63 @@ +SET join_algorithm = 'partial_merge'; + +DROP TABLE IF EXISTS t; +DROP TABLE IF EXISTS nr; + +CREATE TABLE t (`x` UInt32, `lc` LowCardinality(String)) ENGINE = MergeTree ORDER BY tuple(); +CREATE TABLE nr (`x` Nullable(UInt32), `lc` Nullable(String)) ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO t VALUES (1, 'l'); +INSERT INTO nr VALUES (2, NULL); + +SET join_use_nulls = 0; + +SELECT x, lc, r.lc, toTypeName(r.lc) FROM t AS l LEFT JOIN nr AS r USING (x) ORDER BY x; +SELECT x, lc, r.lc, toTypeName(r.lc) FROM t AS l RIGHT JOIN nr AS r USING (x) ORDER BY x; +SELECT x, lc, r.lc, toTypeName(r.lc) FROM t AS l FULL JOIN nr AS r USING (x) ORDER BY x; + +SELECT '-'; + +SELECT x, lc, r.lc, toTypeName(r.lc) FROM t AS l LEFT JOIN nr AS r USING (lc) ORDER BY x SETTINGS enable_analyzer = 1; +SELECT x, lc, r.lc, toTypeName(r.lc) FROM t AS l RIGHT JOIN nr AS r USING (lc) ORDER BY x SETTINGS enable_analyzer = 1; +SELECT x, lc, r.lc, toTypeName(r.lc) FROM t AS l FULL JOIN nr AS r USING (lc) ORDER BY x SETTINGS enable_analyzer = 1; + +SELECT '-'; + +SELECT x, lc, materialize(r.lc) y, toTypeName(y) FROM t AS l LEFT JOIN nr AS r USING (lc) ORDER BY x SETTINGS enable_analyzer = 1; +SELECT x, lc, materialize(r.lc) y, toTypeName(y) FROM t AS l RIGHT JOIN nr AS r USING (lc) ORDER BY x SETTINGS enable_analyzer = 1; +SELECT x, lc, materialize(r.lc) y, toTypeName(y) FROM t AS l FULL JOIN nr AS r USING (lc) ORDER BY x SETTINGS enable_analyzer = 1; + +SELECT '-'; + +SELECT x, lc, r.lc, toTypeName(r.lc) FROM t AS l LEFT JOIN nr AS r USING (lc) ORDER BY x SETTINGS enable_analyzer = 0; +SELECT x, lc, r.lc, toTypeName(r.lc) FROM t AS l RIGHT JOIN nr AS r USING (lc) ORDER BY x SETTINGS enable_analyzer = 0; +SELECT x, lc, r.lc, toTypeName(r.lc) FROM t AS l FULL JOIN nr AS r USING (lc) ORDER BY x SETTINGS enable_analyzer = 0; + +SELECT '-'; + +SELECT x, lc, materialize(r.lc) y, toTypeName(y) FROM t AS l LEFT JOIN nr AS r USING (lc) ORDER BY x SETTINGS enable_analyzer = 0; +SELECT x, lc, materialize(r.lc) y, toTypeName(y) FROM t AS l RIGHT JOIN nr AS r USING (lc) ORDER BY x SETTINGS enable_analyzer = 0; +SELECT x, lc, materialize(r.lc) y, toTypeName(y) FROM t AS l FULL JOIN nr AS r USING (lc) ORDER BY x SETTINGS enable_analyzer = 0; + +SELECT '-'; + +SET join_use_nulls = 1; + +SELECT x, lc, r.lc, toTypeName(r.lc) FROM t AS l LEFT JOIN nr AS r USING (x) ORDER BY x; +SELECT x, lc, r.lc, toTypeName(r.lc) FROM t AS l RIGHT JOIN nr AS r USING (x) ORDER BY x; +SELECT x, lc, r.lc, toTypeName(r.lc) FROM t AS l FULL JOIN nr AS r USING (x) ORDER BY x; + +SELECT '-'; + +SELECT x, lc, r.lc, toTypeName(r.lc) FROM t AS l LEFT JOIN nr AS r USING (lc) ORDER BY x; +SELECT x, lc, r.lc, toTypeName(r.lc) FROM t AS l RIGHT JOIN nr AS r USING (lc) ORDER BY x; +SELECT x, lc, r.lc, toTypeName(r.lc) FROM t AS l FULL JOIN nr AS r USING (lc) ORDER BY x; + +SELECT '-'; + +SELECT x, lc, materialize(r.lc) y, toTypeName(y) FROM t AS l LEFT JOIN nr AS r USING (lc) ORDER BY x; +SELECT x, lc, materialize(r.lc) y, toTypeName(y) FROM t AS l RIGHT JOIN nr AS r USING (lc) ORDER BY x; +SELECT x, lc, materialize(r.lc) y, toTypeName(y) FROM t AS l FULL JOIN nr AS r USING (lc) ORDER BY x; + +DROP TABLE nr; +DROP TABLE t; diff --git a/parser/testdata/01142_with_ties_and_aliases/ast.json b/parser/testdata/01142_with_ties_and_aliases/ast.json new file mode 100644 index 000000000..c44ed94c1 --- /dev/null +++ b/parser/testdata/01142_with_ties_and_aliases/ast.json @@ -0,0 +1,121 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function intDiv (alias value) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_5" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_20" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier value" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier value" + } + ], + + "rows": 33, + + "statistics": + { + "elapsed": 0.001754522, + "rows_read": 33, + "bytes_read": 1366 + } +} diff --git a/parser/testdata/01142_with_ties_and_aliases/metadata.json b/parser/testdata/01142_with_ties_and_aliases/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01142_with_ties_and_aliases/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01142_with_ties_and_aliases/query.sql b/parser/testdata/01142_with_ties_and_aliases/query.sql new file mode 100644 index 000000000..de4a9281a --- /dev/null +++ b/parser/testdata/01142_with_ties_and_aliases/query.sql @@ -0,0 +1,12 @@ +select * from (select number, intDiv(number,5) value from numbers(20) order by value limit 3 with ties) ORDER BY number, value; + +drop table if exists wt; +create table wt (a Int, b Int) engine = Memory; +insert into wt select 0, number from numbers(5); + +select 1 from wt order by a limit 3 with ties; +select b from (select b from wt order by a limit 3 with ties) order by b; +select * from (select * from (with a * 2 as c select a, b from wt order by c limit 3 with ties) order by a, b); +select * from (select * from (select a * 2 as c, b from wt order by c limit 3 with ties) order by c, b); + +drop table if exists wt; diff --git a/parser/testdata/01143_trivial_count_with_join/ast.json b/parser/testdata/01143_trivial_count_with_join/ast.json new file mode 100644 index 000000000..1d528c1df --- /dev/null +++ b/parser/testdata/01143_trivial_count_with_join/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001358206, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/01143_trivial_count_with_join/metadata.json b/parser/testdata/01143_trivial_count_with_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01143_trivial_count_with_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01143_trivial_count_with_join/query.sql b/parser/testdata/01143_trivial_count_with_join/query.sql new file mode 100644 index 000000000..d31750e37 --- /dev/null +++ b/parser/testdata/01143_trivial_count_with_join/query.sql @@ -0,0 +1,10 @@ +drop table if exists t; +create table t engine Memory as select * from numbers(2); + +select count(*) from t, numbers(2) r; +select count(*) from t cross join numbers(2) r; +select count() from t cross join numbers(2) r; +select count(t.number) from t cross join numbers(2) r; +select count(r.number) from t cross join numbers(2) r; + +drop table t; diff --git a/parser/testdata/01144_join_rewrite_with_ambiguous_column_and_view/ast.json b/parser/testdata/01144_join_rewrite_with_ambiguous_column_and_view/ast.json new file mode 100644 index 000000000..620a72777 --- /dev/null +++ b/parser/testdata/01144_join_rewrite_with_ambiguous_column_and_view/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001215128, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/01144_join_rewrite_with_ambiguous_column_and_view/metadata.json b/parser/testdata/01144_join_rewrite_with_ambiguous_column_and_view/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01144_join_rewrite_with_ambiguous_column_and_view/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01144_join_rewrite_with_ambiguous_column_and_view/query.sql b/parser/testdata/01144_join_rewrite_with_ambiguous_column_and_view/query.sql new file mode 100644 index 000000000..d73d438d9 --- /dev/null +++ b/parser/testdata/01144_join_rewrite_with_ambiguous_column_and_view/query.sql @@ -0,0 +1,34 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS t3; +DROP TABLE IF EXISTS view1; + +CREATE TABLE t1 (id UInt32, value1 String) ENGINE MergeTree() ORDER BY id; +CREATE TABLE t2 (id UInt32, value2 String) ENGINE MergeTree() ORDER BY id; +CREATE TABLE t3 (id UInt32, value3 String) ENGINE MergeTree() ORDER BY id; + +INSERT INTO t1 (id, value1) VALUES (1, 'val11'); +INSERT INTO t2 (id, value2) VALUES (1, 'val21'); +INSERT INTO t3 (id, value3) VALUES (1, 'val31'); + +SET enable_optimize_predicate_expression = 1; + +SELECT t1.id, t2.id as id, t3.id as value +FROM (select number as id, 42 as value from numbers(4)) t1 +LEFT JOIN (select number as id, 42 as value from numbers(3)) t2 ON t1.id = t2.id +LEFT JOIN (select number as id, 42 as value from numbers(2)) t3 ON t1.id = t3.id +WHERE id > 0 AND value < 42 ORDER BY id; + +CREATE VIEW IF NOT EXISTS view1 AS + SELECT t1.id AS id, t1.value1 AS value1, t2.value2 AS value2, t3.value3 AS value3 + FROM t1 + LEFT JOIN t2 ON t1.id = t2.id + LEFT JOIN t3 ON t1.id = t3.id + WHERE t1.id > 0; + +SELECT * FROM view1 WHERE id = 1 ORDER BY id; + +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS t3; +DROP TABLE IF EXISTS view1; diff --git a/parser/testdata/01144_multiple_joins_rewriter_v2_and_lambdas/ast.json b/parser/testdata/01144_multiple_joins_rewriter_v2_and_lambdas/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01144_multiple_joins_rewriter_v2_and_lambdas/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01144_multiple_joins_rewriter_v2_and_lambdas/metadata.json b/parser/testdata/01144_multiple_joins_rewriter_v2_and_lambdas/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01144_multiple_joins_rewriter_v2_and_lambdas/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01144_multiple_joins_rewriter_v2_and_lambdas/query.sql b/parser/testdata/01144_multiple_joins_rewriter_v2_and_lambdas/query.sql new file mode 100644 index 000000000..6a5ba042c --- /dev/null +++ b/parser/testdata/01144_multiple_joins_rewriter_v2_and_lambdas/query.sql @@ -0,0 +1,68 @@ +select + arrayMap(x, y -> floor((y - x) / x, 3), l, r) diff_percent, + test, query +from (select [1] l) s1, + (select [2] r) s2, + (select 'test' test, 'query' query) any_query, + (select 1 ) check_single_query; + +select + arrayMap(x -> floor(x, 4), original_medians_array.medians_by_version[1] as l) l_rounded, + arrayMap(x -> floor(x, 4), original_medians_array.medians_by_version[2] as r) r_rounded, + arrayMap(x, y -> floor((y - x) / x, 3), l, r) diff_percent, + test, query +from (select 1) rd, + (select [[1,2], [3,4]] medians_by_version) original_medians_array, + (select 'test' test, 'query' query) any_query, + (select 1 as A) check_single_query; + +drop table if exists table; +create table table(query String, test String, run UInt32, metrics Array(UInt32), version UInt32) engine Memory; + +select + arrayMap(x -> floor(x, 4), original_medians_array.medians_by_version[1] as l) l_rounded, + arrayMap(x -> floor(x, 4), original_medians_array.medians_by_version[2] as r) r_rounded, + arrayMap(x, y -> floor((y - x) / x, 3), l, r) diff_percent, + arrayMap(x, y -> floor(x / y, 3), threshold, l) threshold_percent, + test, query +from +( + select quantileExactForEach(0.999)(arrayMap(x, y -> abs(x - y), metrics_by_label[1], metrics_by_label[2]) as d) threshold + from + ( + select virtual_run, groupArrayInsertAt(median_metrics, random_label) metrics_by_label + from + ( + select medianExactForEach(metrics) median_metrics, virtual_run, random_label + from + ( + select *, toUInt32(rowNumberInAllBlocks() % 2) random_label + from + ( + select metrics, number virtual_run + from (select metrics, run, version from table) no_query, numbers(1, 100000) nn + order by virtual_run, rand() + ) virtual_runs + ) relabeled + group by virtual_run, random_label + ) virtual_medians + group by virtual_run + ) virtual_medians_array +) rd, +( + select groupArrayInsertAt(median_metrics, version) medians_by_version + from + ( + select medianExactForEach(metrics) median_metrics, version + from table + group by version + ) original_medians +) original_medians_array, +( + select any(test) test, any(query) query from table +) any_query, +( + select throwIf(uniq((test, query))) from table +) check_single_query; + +drop table table; diff --git a/parser/testdata/01144_multiword_data_types/ast.json b/parser/testdata/01144_multiword_data_types/ast.json new file mode 100644 index 000000000..94dd63d50 --- /dev/null +++ b/parser/testdata/01144_multiword_data_types/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery multiword_types (children 1)" + }, + { + "explain": " Identifier multiword_types" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001604154, + "rows_read": 2, + "bytes_read": 82 + } +} diff --git a/parser/testdata/01144_multiword_data_types/metadata.json b/parser/testdata/01144_multiword_data_types/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01144_multiword_data_types/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01144_multiword_data_types/query.sql b/parser/testdata/01144_multiword_data_types/query.sql new file mode 100644 index 000000000..56def658a --- /dev/null +++ b/parser/testdata/01144_multiword_data_types/query.sql @@ -0,0 +1,51 @@ +DROP TABLE IF EXISTS multiword_types; +DROP TABLE IF EXISTS unsigned_types; + +CREATE TABLE multiword_types ( + a DOUBLE, + b DOUBLE PRECISION, + c CHAR DEFAULT 'str', + d CHAR VARYING, + e CHAR LARGE OBJECT COMMENT 'comment', + f CHARACTER VARYING(123), + g ChArAcTeR large OBJECT, + h nchar varying (456) default toString(a) comment 'comment', + i NCHAR LARGE OBJECT, + j BINARY LARGE OBJECT, + k BINARY VARYING, + l NATIONAL CHAR, + m NATIONAL CHARACTER, + n NATIONAL CHARACTER LARGE OBJECT, + o NATIONAL CHARACTER VARYING, + p NATIONAL CHAR VARYING +) ENGINE=Memory; + +SHOW CREATE TABLE multiword_types; + +INSERT INTO multiword_types(a) VALUES (1); +SELECT toTypeName((*,)) FROM multiword_types SETTINGS enable_named_columns_in_function_tuple = 0; + +CREATE TABLE unsigned_types ( + a TINYINT SIGNED, + b INT1 SIGNED, + c SMALLINT SIGNED, + d INT SIGNED, + e INTEGER SIGNED, + f BIGINT SIGNED, + g TINYINT UNSIGNED, + h INT1 UNSIGNED, + i SMALLINT UNSIGNED, + j INT UNSIGNED, + k INTEGER UNSIGNED, + l BIGINT UNSIGNED +) ENGINE=Memory; + +SHOW CREATE TABLE unsigned_types; + +INSERT INTO unsigned_types(a) VALUES (1); +SELECT toTypeName((*,)) FROM unsigned_types SETTINGS enable_named_columns_in_function_tuple = 0; + +SELECT CAST('42' AS DOUBLE PRECISION), CAST(42, 'NATIONAL CHARACTER VARYING'), CAST(-1 AS tinyint UnSiGnEd), CAST(65535, ' sMaLlInT signed '); + +DROP TABLE multiword_types; +DROP TABLE unsigned_types; diff --git a/parser/testdata/01145_with_fill_const/ast.json b/parser/testdata/01145_with_fill_const/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01145_with_fill_const/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01145_with_fill_const/metadata.json b/parser/testdata/01145_with_fill_const/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01145_with_fill_const/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01145_with_fill_const/query.sql b/parser/testdata/01145_with_fill_const/query.sql new file mode 100644 index 000000000..531d202c0 --- /dev/null +++ b/parser/testdata/01145_with_fill_const/query.sql @@ -0,0 +1,6 @@ +WITH toDateTime('2020-06-16 03:00:00') AS date_time +SELECT date_time ORDER BY date_time ASC +WITH FILL + FROM toDateTime('2020-06-16 00:00:00') + TO toDateTime('2020-06-16 10:00:00') + STEP 1800; diff --git a/parser/testdata/01147_partial_merge_full_join/ast.json b/parser/testdata/01147_partial_merge_full_join/ast.json new file mode 100644 index 000000000..4b217f2d2 --- /dev/null +++ b/parser/testdata/01147_partial_merge_full_join/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t0 (children 1)" + }, + { + "explain": " Identifier t0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001334466, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/01147_partial_merge_full_join/metadata.json b/parser/testdata/01147_partial_merge_full_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01147_partial_merge_full_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01147_partial_merge_full_join/query.sql b/parser/testdata/01147_partial_merge_full_join/query.sql new file mode 100644 index 000000000..0d5eb1333 --- /dev/null +++ b/parser/testdata/01147_partial_merge_full_join/query.sql @@ -0,0 +1,131 @@ +DROP TABLE IF EXISTS t0; +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE TABLE t0 (x UInt32, y UInt64) engine = MergeTree ORDER BY (x,y); +CREATE TABLE t1 (x UInt32, y UInt64) engine = MergeTree ORDER BY (x,y); +CREATE TABLE t2 (x UInt32, y UInt64) engine = MergeTree ORDER BY (x,y); + +INSERT INTO t1 (x, y) VALUES (0, 0); + +SET join_algorithm = 'partial_merge'; + +SELECT 't join none using'; +SELECT * FROM t1 ANY RIGHT JOIN t0 USING (x) ORDER BY x; -- { serverError NOT_IMPLEMENTED } +SELECT * FROM t1 ANY FULL JOIN t0 USING (x) ORDER BY x; -- { serverError NOT_IMPLEMENTED } +SELECT '-'; +SELECT * FROM t1 RIGHT JOIN t0 USING (x) ORDER BY x; +SELECT '-'; +SELECT * FROM t1 FULL JOIN t0 USING (x) ORDER BY x; +SELECT 't join none on'; +SELECT * FROM t1 ANY RIGHT JOIN t0 ON t1.x = t0.x ORDER BY x; -- { serverError NOT_IMPLEMENTED } +SELECT * FROM t1 ANY FULL JOIN t0 ON t1.x = t0.x ORDER BY x; -- { serverError NOT_IMPLEMENTED } +SELECT '-'; +SELECT * FROM t1 RIGHT JOIN t0 ON t1.x = t0.x ORDER BY x; +SELECT '-'; +SELECT * FROM t1 FULL JOIN t0 ON t1.x = t0.x ORDER BY x; +SELECT 'none join t using'; +SELECT * FROM t0 ANY RIGHT JOIN t1 USING (x); -- { serverError NOT_IMPLEMENTED } +SELECT * FROM t0 ANY FULL JOIN t1 USING (x); -- { serverError NOT_IMPLEMENTED } +SELECT '-'; +SELECT * FROM t0 RIGHT JOIN t1 USING (x); +SELECT '-'; +SELECT * FROM t0 FULL JOIN t1 USING (x); +SELECT 'none join t on'; +SELECT * FROM t0 ANY RIGHT JOIN t1 ON t1.x = t0.x; -- { serverError NOT_IMPLEMENTED } +SELECT * FROM t0 ANY FULL JOIN t1 ON t1.x = t0.x; -- { serverError NOT_IMPLEMENTED } +SELECT '-'; +SELECT * FROM t0 RIGHT JOIN t1 ON t1.x = t0.x; +SELECT '-'; +SELECT * FROM t0 FULL JOIN t1 ON t1.x = t0.x; +SELECT '/none'; + +SET join_use_nulls = 1; + +SELECT 't join none using'; +SELECT * FROM t1 ANY RIGHT JOIN t0 USING (x) ORDER BY x; -- { serverError NOT_IMPLEMENTED } +SELECT * FROM t1 ANY FULL JOIN t0 USING (x) ORDER BY x; -- { serverError NOT_IMPLEMENTED } +SELECT '-'; +SELECT * FROM t1 RIGHT JOIN t0 USING (x) ORDER BY x; +SELECT '-'; +SELECT * FROM t1 FULL JOIN t0 USING (x) ORDER BY x; +SELECT 't join none on'; +SELECT * FROM t1 ANY RIGHT JOIN t0 ON t1.x = t0.x ORDER BY x; -- { serverError NOT_IMPLEMENTED } +SELECT * FROM t1 ANY FULL JOIN t0 ON t1.x = t0.x ORDER BY x; -- { serverError NOT_IMPLEMENTED } +SELECT '-'; +SELECT * FROM t1 RIGHT JOIN t0 ON t1.x = t0.x ORDER BY x; +SELECT '-'; +SELECT * FROM t1 FULL JOIN t0 ON t1.x = t0.x ORDER BY x; +SELECT 'none join t using'; +SELECT * FROM t0 ANY RIGHT JOIN t1 USING (x); -- { serverError NOT_IMPLEMENTED } +SELECT * FROM t0 ANY FULL JOIN t1 USING (x); -- { serverError NOT_IMPLEMENTED } +SELECT '-'; +SELECT * FROM t0 RIGHT JOIN t1 USING (x); +SELECT '-'; +SELECT * FROM t0 FULL JOIN t1 USING (x); +SELECT 'none join t on'; +SELECT * FROM t0 ANY RIGHT JOIN t1 ON t1.x = t0.x; -- { serverError NOT_IMPLEMENTED } +SELECT * FROM t0 ANY FULL JOIN t1 ON t1.x = t0.x; -- { serverError NOT_IMPLEMENTED } +SELECT '-'; +SELECT * FROM t0 RIGHT JOIN t1 ON t1.x = t0.x; +SELECT '-'; +SELECT * FROM t0 FULL JOIN t1 ON t1.x = t0.x; +SELECT '/none'; + +INSERT INTO t1 (x, y) VALUES (1, 10) (2, 20); +INSERT INTO t1 (x, y) VALUES (4, 40) (3, 30); + +INSERT INTO t2 (x, y) VALUES (4, 41) (2, 21) (2, 22); +INSERT INTO t2 (x, y) VALUES (0, 0) (5, 50) (4, 42); + +SET join_use_nulls = 0; + +SELECT 'all right'; +SELECT t1.*, t2.* FROM t1 RIGHT JOIN t2 ON t1.x = t2.x ORDER BY x, t2.y; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 RIGHT JOIN t2 ON t1.y = t2.y ORDER BY x, t2.y; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 RIGHT JOIN t2 ON t1.x = t2.x AND t1.y = t2.y ORDER BY x, t2.y; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 RIGHT JOIN t2 ON t1.x = t2.x AND toUInt32(intDiv(t1.y,10)) = t2.x ORDER BY x, t2.y; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 RIGHT JOIN t2 ON t1.x = t2.x AND toUInt64(t1.x) = intDiv(t2.y,10) ORDER BY x, t2.y; + +SELECT 'all full'; +SELECT t1.*, t2.* FROM t1 FULL JOIN t2 ON t1.x = t2.x ORDER BY x, t2.y; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 FULL JOIN t2 ON t1.y = t2.y ORDER BY x, t2.y; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 FULL JOIN t2 ON t1.x = t2.x AND t1.y = t2.y ORDER BY x, t2.y; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 FULL JOIN t2 ON t1.x = t2.x AND toUInt32(intDiv(t1.y,10)) = t2.x ORDER BY x, t2.y; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 FULL JOIN t2 ON t1.x = t2.x AND toUInt64(t1.x) = intDiv(t2.y,10) ORDER BY x, t2.y; + +SET join_use_nulls = 1; + +SELECT 'all right'; +SELECT t1.*, t2.* FROM t1 RIGHT JOIN t2 ON t1.x = t2.x ORDER BY x, t2.y; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 RIGHT JOIN t2 ON t1.y = t2.y ORDER BY x, t2.y; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 RIGHT JOIN t2 ON t1.x = t2.x AND t1.y = t2.y ORDER BY x, t2.y; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 RIGHT JOIN t2 ON t1.x = t2.x AND toUInt32(intDiv(t1.y,10)) = t2.x ORDER BY x, t2.y; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 RIGHT JOIN t2 ON t1.x = t2.x AND toUInt64(t1.x) = intDiv(t2.y,10) ORDER BY x, t2.y; + +SELECT 'all full'; +SELECT t1.*, t2.* FROM t1 FULL JOIN t2 ON t1.x = t2.x ORDER BY x, t2.y; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 FULL JOIN t2 ON t1.y = t2.y ORDER BY x, t2.y; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 FULL JOIN t2 ON t1.x = t2.x AND t1.y = t2.y ORDER BY x, t2.y; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 FULL JOIN t2 ON t1.x = t2.x AND toUInt32(intDiv(t1.y,10)) = t2.x ORDER BY x, t2.y; +SELECT '-'; +SELECT t1.*, t2.* FROM t1 FULL JOIN t2 ON t1.x = t2.x AND toUInt64(t1.x) = intDiv(t2.y,10) ORDER BY x, t2.y; + +DROP TABLE t0; +DROP TABLE t1; +DROP TABLE t2; diff --git a/parser/testdata/01148_zookeeper_path_macros_unfolding/ast.json b/parser/testdata/01148_zookeeper_path_macros_unfolding/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01148_zookeeper_path_macros_unfolding/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01148_zookeeper_path_macros_unfolding/metadata.json b/parser/testdata/01148_zookeeper_path_macros_unfolding/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01148_zookeeper_path_macros_unfolding/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01148_zookeeper_path_macros_unfolding/query.sql b/parser/testdata/01148_zookeeper_path_macros_unfolding/query.sql new file mode 100644 index 000000000..c689542e4 --- /dev/null +++ b/parser/testdata/01148_zookeeper_path_macros_unfolding/query.sql @@ -0,0 +1,53 @@ +-- Tags: zookeeper, no-replicated-database, no-parallel, no-ordinary-database + +SET send_logs_level = 'fatal'; + +DROP TABLE IF EXISTS rmt; +DROP TABLE IF EXISTS rmt1; +DROP TABLE IF EXISTS rmt2; +DROP TABLE IF EXISTS rmt3; + +SET database_replicated_allow_replicated_engine_arguments=1; + +CREATE TABLE rmt (n UInt64, s String) ENGINE = ReplicatedMergeTree('/clickhouse/test_01148/{shard}/{database}/{table}', '{replica}') ORDER BY n; +SHOW CREATE TABLE rmt; +RENAME TABLE rmt TO rmt1; +DETACH TABLE rmt1; +ATTACH TABLE rmt1; +SHOW CREATE TABLE rmt1; + +CREATE TABLE rmt (n UInt64, s String) ENGINE = ReplicatedMergeTree('{default_path_test}{uuid}', '{default_name_test}') ORDER BY n; -- { serverError BAD_ARGUMENTS } +CREATE TABLE rmt (n UInt64, s String) ENGINE = ReplicatedMergeTree('{default_path_test}test_01148', '{default_name_test}') ORDER BY n; +SHOW CREATE TABLE rmt; +RENAME TABLE rmt TO rmt2; -- { serverError NOT_IMPLEMENTED } +DETACH TABLE rmt; +ATTACH TABLE rmt; +SHOW CREATE TABLE rmt; + +SET distributed_ddl_output_mode='none'; +DROP DATABASE IF EXISTS test_01148_atomic; +CREATE DATABASE test_01148_atomic ENGINE=Atomic; +CREATE TABLE test_01148_atomic.rmt2 ON CLUSTER test_shard_localhost (n int, PRIMARY KEY n) ENGINE=ReplicatedMergeTree; +CREATE TABLE test_01148_atomic.rmt3 AS test_01148_atomic.rmt2; -- { serverError BAD_ARGUMENTS } +CREATE TABLE test_01148_atomic.rmt4 ON CLUSTER test_shard_localhost AS test_01148_atomic.rmt2; +SHOW CREATE TABLE test_01148_atomic.rmt2; +RENAME TABLE test_01148_atomic.rmt4 to test_01148_atomic.rmt3; +SHOW CREATE TABLE test_01148_atomic.rmt3; + +DROP DATABASE IF EXISTS test_01148_ordinary; +set allow_deprecated_database_ordinary=1; +-- Creation of a database with Ordinary engine emits a warning. +CREATE DATABASE test_01148_ordinary ENGINE=Ordinary; +RENAME TABLE test_01148_atomic.rmt3 to test_01148_ordinary.rmt3; -- { serverError NOT_IMPLEMENTED } +DROP DATABASE test_01148_ordinary; +DROP DATABASE test_01148_atomic; + +DROP TABLE rmt; +DROP TABLE rmt1; + +DROP DATABASE IF EXISTS imdb_01148; +CREATE DATABASE imdb_01148 ENGINE = Replicated('/test/databases/imdb_01148', '{shard}', '{replica}'); +CREATE TABLE imdb_01148.movie_directors (`director_id` UInt64, `movie_id` UInt64) ENGINE = ReplicatedMergeTree ORDER BY (director_id, movie_id) SETTINGS index_granularity = 8192; +CREATE TABLE imdb_01148.anything AS imdb_01148.movie_directors; +SHOW CREATE TABLE imdb_01148.anything; +DROP DATABASE imdb_01148; diff --git a/parser/testdata/01149_zookeeper_mutation_stuck_after_replace_partition/ast.json b/parser/testdata/01149_zookeeper_mutation_stuck_after_replace_partition/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01149_zookeeper_mutation_stuck_after_replace_partition/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01149_zookeeper_mutation_stuck_after_replace_partition/metadata.json b/parser/testdata/01149_zookeeper_mutation_stuck_after_replace_partition/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01149_zookeeper_mutation_stuck_after_replace_partition/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01149_zookeeper_mutation_stuck_after_replace_partition/query.sql b/parser/testdata/01149_zookeeper_mutation_stuck_after_replace_partition/query.sql new file mode 100644 index 000000000..bd11b24d5 --- /dev/null +++ b/parser/testdata/01149_zookeeper_mutation_stuck_after_replace_partition/query.sql @@ -0,0 +1,47 @@ +-- Tags: zookeeper + +SET insert_keeper_fault_injection_probability=0; -- disable fault injection; part ids are non-deterministic in case of insert retries + +set send_logs_level='error'; +drop table if exists mt; +drop table if exists rmt sync; + +create table mt (n UInt64, s String) engine = MergeTree partition by intDiv(n, 10) order by n; +insert into mt values (3, '3'), (4, '4'); + +create table rmt (n UInt64, s String) engine = ReplicatedMergeTree('/clickhouse/test_01149_{database}/rmt', 'r1') partition by intDiv(n, 10) order by n; +insert into rmt values (1, '1'), (2, '2'); + +select * from rmt; +select * from mt; +select table, partition_id, name, rows from system.parts where database=currentDatabase() and table in ('mt', 'rmt') and active=1 order by table, name; + +SET mutations_sync = 1; +alter table rmt update s = 's'||toString(n) where 1; + +select * from rmt; +alter table rmt replace partition '0' from mt; + +system sync replica rmt; + +select table, partition_id, name, rows from system.parts where database=currentDatabase() and table in ('mt', 'rmt') and active=1 order by table, name; + +alter table rmt drop column s; + +select mutation_id, command, parts_to_do_names, parts_to_do, is_done from system.mutations where database=currentDatabase() and table='rmt'; +select * from rmt; + +drop table rmt sync; + +set replication_alter_partitions_sync=0; +create table rmt (n UInt64, s String) engine = ReplicatedMergeTree('/clickhouse/test_01149_{database}/rmt', 'r1') partition by intDiv(n, 10) order by n; +insert into rmt values (1,'1'), (2, '2'); + +alter table rmt update s = 's'||toString(n) where 1; +alter table rmt drop partition '0'; + +set replication_alter_partitions_sync=1; +alter table rmt drop column s; + +drop table mt; +drop table rmt sync; diff --git a/parser/testdata/01151_storage_merge_filter_tables_by_virtual_column/ast.json b/parser/testdata/01151_storage_merge_filter_tables_by_virtual_column/ast.json new file mode 100644 index 000000000..246ab7ec1 --- /dev/null +++ b/parser/testdata/01151_storage_merge_filter_tables_by_virtual_column/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery src_table_1 (children 1)" + }, + { + "explain": " Identifier src_table_1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001313549, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/01151_storage_merge_filter_tables_by_virtual_column/metadata.json b/parser/testdata/01151_storage_merge_filter_tables_by_virtual_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01151_storage_merge_filter_tables_by_virtual_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01151_storage_merge_filter_tables_by_virtual_column/query.sql b/parser/testdata/01151_storage_merge_filter_tables_by_virtual_column/query.sql new file mode 100644 index 000000000..2a2507256 --- /dev/null +++ b/parser/testdata/01151_storage_merge_filter_tables_by_virtual_column/query.sql @@ -0,0 +1,26 @@ +drop table if exists src_table_1; +drop table if exists src_table_2; +drop table if exists src_table_3; +drop table if exists set; + +create table src_table_1 (n UInt64) engine=Memory as select * from numbers(10); +create table src_table_2 (n UInt64) engine=Log as select number * 10 from numbers(10); +create table src_table_3 (n UInt64) engine=MergeTree order by n as select number * 100 from numbers(10); +create table set (s String) engine=Set as select arrayJoin(['src_table_1', 'src_table_2']); + +create temporary table tmp (s String); +insert into tmp values ('src_table_1'), ('src_table_3'); + +select count(), sum(n) from merge(currentDatabase(), 'src_table'); +-- FIXME #21401 select count(), sum(n) from merge(currentDatabase(), 'src_table') where _table = 'src_table_1' or toInt8(substr(_table, 11, 1)) = 2; +select count(), sum(n) from merge(currentDatabase(), 'src_table') where _table in ('src_table_2', 'src_table_3'); +select count(), sum(n) from merge(currentDatabase(), 'src_table') where _table in ('src_table_2', 'src_table_3') and n % 20 = 0; +select count(), sum(n) from merge(currentDatabase(), 'src_table') where _table in set; +select count(), sum(n) from merge(currentDatabase(), 'src_table') where _table in tmp; +select count(), sum(n) from merge(currentDatabase(), 'src_table') where _table in set and n % 2 = 0; +select count(), sum(n) from merge(currentDatabase(), 'src_table') where n % 2 = 0 and _table in tmp; + +drop table src_table_1; +drop table src_table_2; +drop table src_table_3; +drop table set; diff --git a/parser/testdata/01152_cross_replication/ast.json b/parser/testdata/01152_cross_replication/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01152_cross_replication/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01152_cross_replication/metadata.json b/parser/testdata/01152_cross_replication/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01152_cross_replication/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01152_cross_replication/query.sql b/parser/testdata/01152_cross_replication/query.sql new file mode 100644 index 000000000..40d480922 --- /dev/null +++ b/parser/testdata/01152_cross_replication/query.sql @@ -0,0 +1,34 @@ +-- Tags: replica, no-parallel + +DROP DATABASE IF EXISTS shard_0; +DROP DATABASE IF EXISTS shard_1; +SET distributed_ddl_output_mode='none'; +DROP TABLE IF EXISTS demo_loan_01568_dist; + +CREATE DATABASE shard_0; +CREATE DATABASE shard_1; + +CREATE TABLE demo_loan_01568 ON CLUSTER test_cluster_two_shards_different_databases ( `id` Int64 COMMENT 'id', `date_stat` Date COMMENT 'date of stat', `customer_no` String COMMENT 'customer no', `loan_principal` Float64 COMMENT 'loan principal' ) ENGINE=ReplacingMergeTree() ORDER BY id PARTITION BY toYYYYMM(date_stat); -- { serverError NOT_IMPLEMENTED } +SET distributed_ddl_entry_format_version = 2; +CREATE TABLE demo_loan_01568 ON CLUSTER test_cluster_two_shards_different_databases ( `id` Int64 COMMENT 'id', `date_stat` Date COMMENT 'date of stat', `customer_no` String COMMENT 'customer no', `loan_principal` Float64 COMMENT 'loan principal' ) ENGINE=ReplacingMergeTree() ORDER BY id PARTITION BY toYYYYMM(date_stat); -- { serverError INCONSISTENT_CLUSTER_DEFINITION } +SET distributed_ddl_output_mode='throw'; +CREATE TABLE shard_0.demo_loan_01568 ON CLUSTER test_cluster_two_shards_different_databases ( `id` Int64 COMMENT 'id', `date_stat` Date COMMENT 'date of stat', `customer_no` String COMMENT 'customer no', `loan_principal` Float64 COMMENT 'loan principal' ) ENGINE=ReplacingMergeTree() ORDER BY id PARTITION BY toYYYYMM(date_stat); +CREATE TABLE shard_1.demo_loan_01568 ON CLUSTER test_cluster_two_shards_different_databases ( `id` Int64 COMMENT 'id', `date_stat` Date COMMENT 'date of stat', `customer_no` String COMMENT 'customer no', `loan_principal` Float64 COMMENT 'loan principal' ) ENGINE=ReplacingMergeTree() ORDER BY id PARTITION BY toYYYYMM(date_stat); +SET distributed_ddl_output_mode='none'; + +SHOW TABLES FROM shard_0; +SHOW TABLES FROM shard_1; +SHOW CREATE TABLE shard_0.demo_loan_01568; +SHOW CREATE TABLE shard_1.demo_loan_01568; + +CREATE TABLE demo_loan_01568_dist AS shard_0.demo_loan_01568 ENGINE=Distributed('test_cluster_two_shards_different_databases', '', 'demo_loan_01568', id % 2); +INSERT INTO demo_loan_01568_dist VALUES (1, '2021-04-13', 'qwerty', 3.14159), (2, '2021-04-14', 'asdfgh', 2.71828); +SYSTEM FLUSH DISTRIBUTED demo_loan_01568_dist; +SELECT * FROM demo_loan_01568_dist ORDER BY id; + +SELECT * FROM shard_0.demo_loan_01568; +SELECT * FROM shard_1.demo_loan_01568; + +DROP DATABASE shard_0; +DROP DATABASE shard_1; +DROP TABLE demo_loan_01568_dist; diff --git a/parser/testdata/01153_attach_mv_uuid/ast.json b/parser/testdata/01153_attach_mv_uuid/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01153_attach_mv_uuid/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01153_attach_mv_uuid/metadata.json b/parser/testdata/01153_attach_mv_uuid/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01153_attach_mv_uuid/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01153_attach_mv_uuid/query.sql b/parser/testdata/01153_attach_mv_uuid/query.sql new file mode 100644 index 000000000..0ef164490 --- /dev/null +++ b/parser/testdata/01153_attach_mv_uuid/query.sql @@ -0,0 +1,46 @@ +-- Tags: no-ordinary-database, no-parallel + +DROP TABLE IF EXISTS src; +DROP TABLE IF EXISTS mv; +DROP TABLE IF EXISTS ".inner_id.e15f3ab5-6cae-4df3-b879-f40deafd82c2"; + +CREATE TABLE src (n UInt64) ENGINE=MergeTree ORDER BY n; +CREATE MATERIALIZED VIEW mv (n Int32, n2 Int64) ENGINE = MergeTree PARTITION BY n % 10 ORDER BY n AS SELECT n, n * n AS n2 FROM src; +INSERT INTO src VALUES (1), (2); +SELECT * FROM mv ORDER BY n; +DETACH TABLE mv; +ATTACH TABLE mv; +INSERT INTO src VALUES (3), (4); +SELECT * FROM mv ORDER BY n; +DROP TABLE mv SYNC; + +SET database_replicated_allow_explicit_uuid=3; + +SET show_table_uuid_in_table_create_query_if_not_nil=1; +CREATE TABLE ".inner_id.e15f3ab5-6cae-4df3-b879-f40deafd82c2" (n Int32, n2 Int64) ENGINE = MergeTree PARTITION BY n % 10 ORDER BY n; +ATTACH MATERIALIZED VIEW mv UUID 'e15f3ab5-6cae-4df3-b879-f40deafd82c2' (n Int32, n2 Int64) ENGINE = MergeTree PARTITION BY n % 10 ORDER BY n AS SELECT n, n * n AS n2 FROM src; +SHOW CREATE TABLE mv; +INSERT INTO src VALUES (1), (2); +SELECT * FROM mv ORDER BY n; +DETACH TABLE mv; +ATTACH TABLE mv; +SHOW CREATE TABLE mv; +INSERT INTO src VALUES (3), (4); +SELECT * FROM mv ORDER BY n; +DROP TABLE mv SYNC; + +CREATE TABLE ".inner_id.e15f3ab5-6cae-4df3-b879-f40deafd82c2" UUID '3bd68e3c-2693-4352-ad66-a66eba9e345e' (n Int32, n2 Int64) ENGINE = MergeTree PARTITION BY n % 10 ORDER BY n; +ATTACH MATERIALIZED VIEW mv UUID 'e15f3ab5-6cae-4df3-b879-f40deafd82c2' TO INNER UUID '3bd68e3c-2693-4352-ad66-a66eba9e345e' (n Int32, n2 Int64) ENGINE = MergeTree PARTITION BY n % 10 ORDER BY n AS SELECT n, n * n AS n2 FROM src; +SHOW CREATE TABLE mv; +INSERT INTO src VALUES (1), (2); +SELECT * FROM mv ORDER BY n; +DETACH TABLE mv; +ATTACH TABLE mv; +SHOW CREATE TABLE mv; +INSERT INTO src VALUES (3), (4); +SELECT * FROM mv ORDER BY n; +DROP TABLE mv SYNC; + +ATTACH MATERIALIZED VIEW mv UUID '3bd68e3c-2693-4352-ad66-a66eba9e345e' TO INNER UUID '3bd68e3c-2693-4352-ad66-a66eba9e345e' (n Int32, n2 Int64) ENGINE = MergeTree PARTITION BY n % 10 ORDER BY n AS SELECT n, n * n AS n2 FROM src; -- { serverError BAD_ARGUMENTS } + +DROP TABLE src; diff --git a/parser/testdata/01155_rename_move_materialized_view/ast.json b/parser/testdata/01155_rename_move_materialized_view/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01155_rename_move_materialized_view/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01155_rename_move_materialized_view/metadata.json b/parser/testdata/01155_rename_move_materialized_view/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01155_rename_move_materialized_view/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01155_rename_move_materialized_view/query.sql b/parser/testdata/01155_rename_move_materialized_view/query.sql new file mode 100644 index 000000000..930e01e40 --- /dev/null +++ b/parser/testdata/01155_rename_move_materialized_view/query.sql @@ -0,0 +1,100 @@ +-- Tags: no-parallel + +SET enable_analyzer = 1; +SET send_logs_level = 'fatal'; +SET prefer_localhost_replica = 1; + +DROP DATABASE IF EXISTS test_01155_ordinary; +DROP DATABASE IF EXISTS test_01155_atomic; + +set allow_deprecated_database_ordinary=1; +-- Creation of a database with Ordinary engine emits a warning. +CREATE DATABASE test_01155_ordinary ENGINE=Ordinary; +CREATE DATABASE test_01155_atomic ENGINE=Atomic; + +USE test_01155_ordinary; +CREATE TABLE src (s String, x String DEFAULT 'a') ENGINE=MergeTree() PARTITION BY tuple() ORDER BY s; +CREATE MATERIALIZED VIEW mv1 (s String, x String DEFAULT 'b') ENGINE=MergeTree() PARTITION BY tuple() ORDER BY s AS SELECT (*,).1 || 'mv1' as s FROM src; +CREATE TABLE dst (s String, x String DEFAULT 'c') ENGINE=MergeTree() PARTITION BY tuple() ORDER BY s; +CREATE MATERIALIZED VIEW mv2 TO dst (s String, x String DEFAULT 'd') AS SELECT (*,).1 || 'mv2' as s FROM src; +CREATE TABLE dist (s String, x String DEFAULT 'asdf') ENGINE=Distributed(test_shard_localhost, test_01155_ordinary, src); +INSERT INTO dist(s) VALUES ('before moving tables'); +SYSTEM FLUSH DISTRIBUTED dist; + +CREATE DICTIONARY dict (s String, x String DEFAULT 'qwerty') PRIMARY KEY s +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'dist' DB 'test_01155_ordinary')) +LIFETIME(MIN 0 MAX 2) LAYOUT(COMPLEX_KEY_CACHE(SIZE_IN_CELLS 123)); + +-- FIXME Cannot convert column `1` because it is non constant in source stream but must be constant in result +SELECT * FROM (SELECT materialize(1), substr(_table, 1, 10) as _table, s FROM merge('test_01155_ordinary', '')) ORDER BY _table, s; +SELECT dictGet('test_01155_ordinary.dict', 'x', 'before moving tables'); + +RENAME DICTIONARY test_01155_ordinary.dict TO test_01155_ordinary.dict1; +SELECT dictGet('test_01155_ordinary.dict1', 'x', 'before moving tables'); +SELECT database, name, uuid FROM system.dictionaries WHERE database='test_01155_ordinary'; +RENAME TABLE test_01155_ordinary.dict1 TO test_01155_ordinary.dict; +SELECT dictGet('test_01155_ordinary.dict', 'x', 'before moving tables'); + +-- Move tables with materialized views from Ordinary to Atomic +SELECT 'ordinary:'; +SHOW TABLES FROM test_01155_ordinary; +RENAME TABLE test_01155_ordinary.mv1 TO test_01155_atomic.mv1; +RENAME TABLE test_01155_ordinary.mv2 TO test_01155_atomic.mv2; +RENAME TABLE test_01155_ordinary.dst TO test_01155_atomic.dst; +RENAME TABLE test_01155_ordinary.src TO test_01155_atomic.src; +SET check_table_dependencies=0; -- Otherwise we'll get error "test_01155_ordinary.dict depends on test_01155_ordinary.dist" in the next line. +RENAME TABLE test_01155_ordinary.dist TO test_01155_atomic.dist; +SET check_table_dependencies=1; +RENAME DICTIONARY test_01155_ordinary.dict TO test_01155_atomic.dict; +SELECT 'ordinary after rename:'; +SELECT substr(name, 1, 10) FROM system.tables WHERE database='test_01155_ordinary'; +SELECT 'atomic after rename:'; +SELECT substr(name, 1, 10) FROM system.tables WHERE database='test_01155_atomic'; +DROP DATABASE test_01155_ordinary; +USE default; + +INSERT INTO test_01155_atomic.src(s) VALUES ('after moving tables'); +SELECT materialize(2), substr(_table, 1, 10), s FROM merge('test_01155_atomic', '') ORDER BY _table, s; -- { serverError UNKNOWN_DATABASE } +SELECT dictGet('test_01155_ordinary.dict', 'x', 'after moving tables'); -- { serverError BAD_ARGUMENTS } + +RENAME DATABASE test_01155_atomic TO test_01155_ordinary; +USE test_01155_ordinary; + +INSERT INTO dist(s) VALUES ('after renaming database'); +SYSTEM FLUSH DISTRIBUTED dist; +SELECT * FROM (SELECT materialize(3), substr(_table, 1, 10) as _table, s FROM merge('test_01155_ordinary', '')) ORDER BY _table, s; +SELECT dictGet('test_01155_ordinary.dict', 'x', 'after renaming database'); + +SELECT database, substr(name, 1, 10) FROM system.tables WHERE database like 'test_01155_%'; + +-- Move tables back +SET check_table_dependencies=0; -- Otherwise we'll get error "test_01155_ordinary.dict depends on test_01155_ordinary.dist" in the next line. +RENAME DATABASE test_01155_ordinary TO test_01155_atomic; +SET check_table_dependencies=1; + +set allow_deprecated_database_ordinary=1; +-- Creation of a database with Ordinary engine emits a warning. +SET send_logs_level='fatal'; +CREATE DATABASE test_01155_ordinary ENGINE=Ordinary; +SET send_logs_level='warning'; +SHOW CREATE DATABASE test_01155_atomic; + +RENAME TABLE test_01155_atomic.mv1 TO test_01155_ordinary.mv1; +RENAME TABLE test_01155_atomic.mv2 TO test_01155_ordinary.mv2; +RENAME TABLE test_01155_atomic.dst TO test_01155_ordinary.dst; +RENAME TABLE test_01155_atomic.src TO test_01155_ordinary.src; +RENAME TABLE test_01155_atomic.dist TO test_01155_ordinary.dist; +RENAME DICTIONARY test_01155_atomic.dict TO test_01155_ordinary.dict; + +INSERT INTO dist(s) VALUES ('after renaming tables'); +SYSTEM FLUSH DISTRIBUTED dist; +SELECT * FROM (SELECT materialize(4), substr(_table, 1, 10) as _table, s FROM merge('test_01155_ordinary', '')) ORDER BY _table, s; +SELECT dictGet('test_01155_ordinary.dict', 'x', 'after renaming tables'); +SELECT database, name, uuid FROM system.dictionaries WHERE database='test_01155_ordinary'; +SELECT 'test_01155_ordinary:'; +SHOW TABLES FROM test_01155_ordinary; +SELECT 'test_01155_atomic:'; +SHOW TABLES FROM test_01155_atomic; + +DROP DATABASE IF EXISTS test_01155_atomic; +DROP DATABASE IF EXISTS test_01155_ordinary; diff --git a/parser/testdata/01157_replace_table/ast.json b/parser/testdata/01157_replace_table/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01157_replace_table/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01157_replace_table/metadata.json b/parser/testdata/01157_replace_table/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01157_replace_table/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01157_replace_table/query.sql b/parser/testdata/01157_replace_table/query.sql new file mode 100644 index 000000000..3d07c69ac --- /dev/null +++ b/parser/testdata/01157_replace_table/query.sql @@ -0,0 +1,53 @@ +-- Tags: no-ordinary-database + +drop table if exists t; +drop table if exists dist; +drop table if exists buf; +drop table if exists join; + +select 'test flush on replace'; +create table t (n UInt64, s String default 's' || toString(n)) engine=Memory; +create table dist (n int) engine=Distributed(test_shard_localhost, currentDatabase(), t); +create table buf (n int) engine=Buffer(currentDatabase(), dist, 1, 10, 100, 10, 100, 1000, 1000); + +system stop distributed sends dist; +insert into buf values (1); +replace table buf (n int) engine=Distributed(test_shard_localhost, currentDatabase(), dist); +replace table dist (n int) engine=Buffer(currentDatabase(), t, 1, 10, 100, 10, 100, 1000, 1000); + +system stop distributed sends buf; +insert into buf values (2); +replace table buf (n int) engine=Buffer(currentDatabase(), dist, 1, 10, 100, 10, 100, 1000, 1000); +replace table dist (n int) engine=Distributed(test_shard_localhost, currentDatabase(), t); + +system stop distributed sends dist; +insert into buf values (3); +replace table buf (n int) engine=Null; +replace table dist (n int) engine=Null; + +select * from t order by n; + +select 'exception on create and fill'; +-- table is not created if select fails +create or replace table join engine=Join(ANY, INNER, n) as select * from t where throwIf(n); -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO } +select count() from system.tables where database=currentDatabase() and name='join'; + +-- table is created and filled +create or replace table join engine=Join(ANY, INNER, n) as select * from t; +select * from numbers(10) as t any join join on t.number=join.n order by n; + +-- table is not replaced if select fails +insert into t(n) values (4); +replace table join engine=Join(ANY, INNER, n) as select * from t where throwIf(n); -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO } +select * from numbers(10) as t any join join on t.number=join.n order by n; + +-- table is replaced +replace table join engine=Join(ANY, INNER, n) as select * from t; +select * from numbers(10) as t any join join on t.number=join.n order by n; + +select name from system.tables where database=currentDatabase() order by name; + +drop table t; +drop table dist; +drop table buf; +drop table join; diff --git a/parser/testdata/01158_zookeeper_log_long/ast.json b/parser/testdata/01158_zookeeper_log_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01158_zookeeper_log_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01158_zookeeper_log_long/metadata.json b/parser/testdata/01158_zookeeper_log_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01158_zookeeper_log_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01158_zookeeper_log_long/query.sql b/parser/testdata/01158_zookeeper_log_long/query.sql new file mode 100644 index 000000000..cbd73630b --- /dev/null +++ b/parser/testdata/01158_zookeeper_log_long/query.sql @@ -0,0 +1,71 @@ +-- Tags: long, zookeeper, no-replicated-database, no-polymorphic-parts, no-random-merge-tree-settings, no-shared-merge-tree, no-async-insert +-- Tag no-replicated-database: Fails due to additional replicas or shards +-- no-shared-merge-tree: depends on structure in zookeeper of replicated merge tree +-- no-async-insert: Test expects new part for each insert + +SET insert_keeper_fault_injection_probability=0; -- disable fault injection; part ids are non-deterministic in case of insert retries + +drop table if exists rmt sync; +-- cleanup code will perform extra Exists +-- (so the .reference will not match) +create table rmt (n int) engine=ReplicatedMergeTree('/test/01158/{database}/rmt', '1') + order by n + settings + cleanup_delay_period=86400, + max_cleanup_delay_period=86400, + replicated_can_become_leader=0; +system sync replica rmt; +insert into rmt values (1); +insert into rmt values (1); +system sync replica rmt; +system flush logs zookeeper_log, query_log; + +select 'log'; +select address, type, has_watch, op_num, path, is_ephemeral, is_sequential, version, requests_size, request_idx, error, watch_type, + watch_state, path_created, stat_version, stat_cversion, stat_dataLength, stat_numChildren +from system.zookeeper_log where path like '/test/01158/' || currentDatabase() || '/rmt/log%' and op_num not in (3, 4, 12, 500) +order by xid, type, request_idx; + +select 'parts'; +with now() - interval 1 hour as cutoff_time, +query_ids as +( + select query_id from system.query_log where current_database=currentDatabase() and event_time>=cutoff_time +) +select type, has_watch, op_num, replace(path, toString(serverUUID()), ''), is_ephemeral, is_sequential, if(startsWith(path, '/clickhouse/sessions'), 1, version), requests_size, request_idx, error, watch_type, + watch_state, path_created, stat_version, stat_cversion, stat_dataLength, stat_numChildren +from system.zookeeper_log +where event_time>=cutoff_time and (session_id, xid) in ( + select session_id, xid from system.zookeeper_log where event_time>=cutoff_time + and path='/test/01158/' || currentDatabase() || '/rmt/replicas/1/parts/all_0_0_0' + and (query_id='' or query_id in query_ids) +) +order by xid, type, request_idx; + +select 'blocks'; + +with now() - interval 1 hour as cutoff_time, +query_ids as +( + select query_id from system.query_log where current_database=currentDatabase() and event_time>=cutoff_time +) +select type, has_watch, op_num, path, is_ephemeral, is_sequential, version, requests_size, request_idx, error, watch_type, + watch_state, path_created, stat_version, stat_cversion, stat_dataLength, stat_numChildren +from system.zookeeper_log +where event_time>=cutoff_time and (session_id, xid) in ( + select session_id, xid from system.zookeeper_log where event_time>=cutoff_time + and path like '/test/01158/' || currentDatabase() || '/rmt/blocks/%' + and op_num not in (1, 12, 500) + and (query_id='' or query_id in query_ids) +) +order by xid, type, request_idx; + +drop table rmt sync; + +system flush logs zookeeper_log; +select 'duration_microseconds'; +select count()>0 from system.zookeeper_log where path like '/test/01158/' || currentDatabase() || '/rmt%' and duration_microseconds > 0; + +system flush logs aggregated_zookeeper_log; +select 'aggregated_zookeeper_log'; +select sum(errors[0]) > 0, sum(average_latency) > 0 from system.aggregated_zookeeper_log where parent_path = '/test/01158/' || currentDatabase() || '/rmt' and operation = 'Create'; diff --git a/parser/testdata/01159_combinators_with_parameters/ast.json b/parser/testdata/01159_combinators_with_parameters/ast.json new file mode 100644 index 000000000..1f60724bd --- /dev/null +++ b/parser/testdata/01159_combinators_with_parameters/ast.json @@ -0,0 +1,85 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function topKArrayState (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_100" + } + ], + + "rows": 21, + + "statistics": + { + "elapsed": 0.0012498, + "rows_read": 21, + "bytes_read": 872 + } +} diff --git a/parser/testdata/01159_combinators_with_parameters/metadata.json b/parser/testdata/01159_combinators_with_parameters/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01159_combinators_with_parameters/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01159_combinators_with_parameters/query.sql b/parser/testdata/01159_combinators_with_parameters/query.sql new file mode 100644 index 000000000..8b2dbde64 --- /dev/null +++ b/parser/testdata/01159_combinators_with_parameters/query.sql @@ -0,0 +1,43 @@ +SELECT toTypeName(topKArrayState(10)([toString(number)])) FROM numbers(100); +SELECT toTypeName(topKDistinctState(10)(toString(number))) FROM numbers(100); +SELECT toTypeName(topKForEachState(10)([toString(number)])) FROM numbers(100); +SELECT toTypeName(topKIfState(10)(toString(number), number % 2)) FROM numbers(100); +SELECT toTypeName(topKMergeState(10)(state)) FROM (SELECT topKState(10)(toString(number)) as state FROM numbers(100)); +SELECT toTypeName(topKOrNullState(10)(toString(number))) FROM numbers(100); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toTypeName(topKOrDefaultState(10)(toString(number))) FROM numbers(100); +SELECT toTypeName(topKResampleState(10, 1, 2, 42)(toString(number), number)) FROM numbers(100); +SELECT toTypeName(topKState(10)(toString(number))) FROM numbers(100); +SELECT toTypeName(topKArrayResampleOrDefaultIfState(10, 1, 2, 42)([toString(number)], number, number % 2)) FROM numbers(100); + +CREATE TEMPORARY TABLE t0 AS SELECT quantileArrayState(0.10)([number]) FROM numbers(100); +CREATE TEMPORARY TABLE t1 AS SELECT quantileDistinctState(0.10)(number) FROM numbers(100); +CREATE TEMPORARY TABLE t2 AS SELECT quantileForEachState(0.10)([number]) FROM numbers(100); +CREATE TEMPORARY TABLE t3 AS SELECT quantileIfState(0.10)(number, number % 2) FROM numbers(100); +CREATE TEMPORARY TABLE t4 AS SELECT quantileMergeState(0.10)(state) FROM (SELECT quantileState(0.10)(number) as state FROM numbers(100)); +CREATE TEMPORARY TABLE t5 AS SELECT quantileOrNullState(0.10)(number) FROM numbers(100); +CREATE TEMPORARY TABLE t6 AS SELECT quantileOrDefaultState(0.10)(number) FROM numbers(100); +CREATE TEMPORARY TABLE t7 AS SELECT quantileResampleState(0.10, 1, 2, 42)(number, number) FROM numbers(100); +CREATE TEMPORARY TABLE t8 AS SELECT quantileState(0.10)(number) FROM numbers(100); +CREATE TEMPORARY TABLE t9 AS SELECT quantileArrayResampleOrDefaultIfState(0.10, 1, 2, 42)([number], number, number % 2) FROM numbers(100); + +INSERT INTO t0 SELECT quantileArrayState(0.10)([number]) FROM numbers(100); +INSERT INTO t1 SELECT quantileDistinctState(0.10)(number) FROM numbers(100); +INSERT INTO t2 SELECT quantileForEachState(0.10)([number]) FROM numbers(100); +INSERT INTO t3 SELECT quantileIfState(0.10)(number, number % 2) FROM numbers(100); +INSERT INTO t4 SELECT quantileMergeState(0.10)(state) FROM (SELECT quantileState(0.10)(number) as state FROM numbers(100)); +INSERT INTO t5 SELECT quantileOrNullState(0.10)(number) FROM numbers(100); +INSERT INTO t6 SELECT quantileOrDefaultState(0.10)(number) FROM numbers(100); +INSERT INTO t7 SELECT quantileResampleState(0.10, 1, 2, 42)(number, number) FROM numbers(100); +INSERT INTO t8 SELECT quantileState(0.10)(number) FROM numbers(100); +INSERT INTO t9 SELECT quantileArrayResampleOrDefaultIfState(0.10, 1, 2, 42)([number], number, number % 2) FROM numbers(100); + +SELECT round(quantileArrayMerge(0.10)((*,).1)) FROM t0; +SELECT round(quantileDistinctMerge(0.10)((*,).1)) FROM t1; +SELECT arrayMap(x -> round(x), quantileForEachMerge(0.10)((*,).1)) FROM t2; +SELECT round(quantileIfMerge(0.10)((*,).1)) FROM t3; +SELECT round(quantileMerge(0.10)((*,).1)) FROM t4; +SELECT round(quantileOrNullMerge(0.10)((*,).1)) FROM t5; +SELECT round(quantileOrDefaultMerge(0.10)((*,).1)) FROM t6; +SELECT arrayMap(x -> round(x), quantileResampleMerge(0.10, 1, 2, 42)((*,).1)) FROM t7; +SELECT round(quantileMerge(0.10)((*,).1)) FROM t8; +SELECT arrayMap(x -> round(x), quantileArrayResampleOrDefaultIfMerge(0.10, 1, 2, 42)((*,).1)) FROM t9; diff --git a/parser/testdata/01161_information_schema/ast.json b/parser/testdata/01161_information_schema/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01161_information_schema/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01161_information_schema/metadata.json b/parser/testdata/01161_information_schema/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01161_information_schema/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01161_information_schema/query.sql b/parser/testdata/01161_information_schema/query.sql new file mode 100644 index 000000000..056e53141 --- /dev/null +++ b/parser/testdata/01161_information_schema/query.sql @@ -0,0 +1,56 @@ +-- Tags: memory-engine +-- Uppercase/lowercase are okay, mixed case isn't +SHOW TABLES FROM information_schema; +SHOW TABLES FROM INFORMATION_SCHEMA; +SHOW TABLES FROM INFORMATION_schema; -- { serverError UNKNOWN_DATABASE } + +DROP VIEW IF EXISTS v; +DROP TABLE IF EXISTS t; +DROP VIEW IF EXISTS mv; +DROP TABLE IF EXISTS tmp; +DROP TABLE IF EXISTS kcu1; +DROP TABLE IF EXISTS kcu2; + +CREATE TABLE t (n UInt64, f Float32, s String, fs FixedString(42), d Decimal(9, 6)) ENGINE = Memory; +CREATE VIEW v (n Nullable(Int32), f Float64) AS SELECT n, f FROM t; +CREATE MATERIALIZED VIEW mv ENGINE = Null AS SELECT * FROM system.one; +CREATE TEMPORARY TABLE tmp (d Date, dt DateTime, dtms DateTime64(3)); +CREATE TABLE kcu1 (i UInt32, s String) ENGINE MergeTree ORDER BY i; +CREATE TABLE kcu2 (i UInt32, d Date, u UUID) ENGINE MergeTree ORDER BY (u, d); + + +SELECT '-- information_schema.schemata'; +SELECT * FROM information_schema.schemata WHERE schema_name ilike 'information_schema' ORDER BY schema_name; + +SELECT '-- information_schema.tables'; +SELECT * FROM information_schema.tables WHERE table_schema = currentDatabase() AND table_name NOT LIKE '%inner%' ORDER BY table_name; +-- SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA=currentDatabase() ORDER BY schema_name; +-- FIXME #28687 + +SELECT '-- information_schema.views'; +SELECT * FROM information_schema.views WHERE table_schema = currentDatabase() ORDER BY table_name; +-- SELECT * FROM INFORMATION_SCHEMA.COLUMNS WHERE (TABLE_SCHEMA=currentDatabase() OR TABLE_SCHEMA='') AND TABLE_NAME NOT LIKE '%inner%' + +SELECT '-- information_schema.columns'; +SELECT * FROM information_schema.columns WHERE table_schema = currentDatabase() AND table_name NOT LIKE '%inner%' ORDER BY table_name, column_name; + +SELECT '-- information_schema.key_column_usage'; +SELECT * FROM information_schema.key_column_usage WHERE table_schema = currentDatabase() AND table_name = 'kcu1' ORDER BY table_schema, column_name; +SELECT * FROM information_schema.key_column_usage WHERE table_schema = currentDatabase() AND table_name = 'kcu2' ORDER BY table_schema, column_name; + +SELECT '-- information_schema.referential_constraints'; +SELECT * FROM information_schema.referential_constraints; + +SELECT '-- information_schema.statistics'; +SELECT * FROM information_schema.statistics; +-- +-- mixed upper/lowercase schema and table name: +SELECT count() FROM information_schema.TABLES WHERE table_schema = currentDatabase() AND table_name = 't'; +SELECT count() FROM INFORMATION_SCHEMA.tables WHERE table_schema = currentDatabase() AND table_name = 't'; +SELECT count() FROM information_schema.taBLES WHERE table_schema =currentDatabase() AND table_name = 't'; -- { serverError UNKNOWN_TABLE } + +DROP VIEW mv; +DROP VIEW v; +DROP TABLE t; +DROP TABLE kcu1; +DROP TABLE kcu2; diff --git a/parser/testdata/01163_search_case_insensetive_utf8/ast.json b/parser/testdata/01163_search_case_insensetive_utf8/ast.json new file mode 100644 index 000000000..32a679e04 --- /dev/null +++ b/parser/testdata/01163_search_case_insensetive_utf8/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function positionCaseInsensitiveUTF8 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'сссссс'" + }, + { + "explain": " Literal 'Ё'" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001474085, + "rows_read": 10, + "bytes_read": 403 + } +} diff --git a/parser/testdata/01163_search_case_insensetive_utf8/metadata.json b/parser/testdata/01163_search_case_insensetive_utf8/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01163_search_case_insensetive_utf8/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01163_search_case_insensetive_utf8/query.sql b/parser/testdata/01163_search_case_insensetive_utf8/query.sql new file mode 100644 index 000000000..99bdd38ce --- /dev/null +++ b/parser/testdata/01163_search_case_insensetive_utf8/query.sql @@ -0,0 +1,12 @@ +SELECT positionCaseInsensitiveUTF8(materialize('сссссс'), 'Ё'); +SELECT countSubstringsCaseInsensitiveUTF8(materialize('сссссс'), 'ё'); +SELECT positionCaseInsensitiveUTF8(materialize('сссссссс'), 'ё'); +SELECT countSubstringsCaseInsensitiveUTF8(materialize('сссссссс'), 'Ё'); +SELECT countSubstringsCaseInsensitiveUTF8(materialize('ссссссссссссссссссс'), 'ёёёёёёё'); +SELECT positionCaseInsensitiveUTF8(materialize('ссссссссссссссссссс'), 'ёЁёЁёЁё'); +SELECT countSubstringsCaseInsensitiveUTF8(materialize('ссссссссссссссссссс'), 'ёЁёЁёЁёЁёЁ'); +SELECT positionCaseInsensitiveUTF8(materialize('ссссссссссссссссссс'), 'ЁЁЁЁЁЁЁЁЁЁ'); +SELECT countSubstringsCaseInsensitiveUTF8(materialize('ссссссссссссссссссс'), 'ёЁёЁёЁёссс'); +SELECT positionCaseInsensitiveUTF8(materialize('ссссссссссссссссссс'), 'ёЁёЁёЁёссс'); +SELECT countSubstringsCaseInsensitiveUTF8(materialize('ссссссссссссссссссс'), 'ЁС'); +SELECT positionCaseInsensitiveUTF8(materialize('ссссссссссссссссссс'), 'ёс'); diff --git a/parser/testdata/01164_alter_memory_database/ast.json b/parser/testdata/01164_alter_memory_database/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01164_alter_memory_database/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01164_alter_memory_database/metadata.json b/parser/testdata/01164_alter_memory_database/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01164_alter_memory_database/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01164_alter_memory_database/query.sql b/parser/testdata/01164_alter_memory_database/query.sql new file mode 100644 index 000000000..0beddbfaa --- /dev/null +++ b/parser/testdata/01164_alter_memory_database/query.sql @@ -0,0 +1,13 @@ +-- Tags: zookeeper, no-parallel, no-shared-merge-tree +-- no-shared-merge-tree: doesn't support databases without UUID + +drop database if exists test_1164_memory; +create database test_1164_memory engine=Memory; +create table test_1164_memory.r1 (n int) engine=ReplicatedMergeTree('/test/01164/{database}/t', '1') order by n; +create table test_1164_memory.r2 (n int) engine=ReplicatedMergeTree('/test/01164/{database}/t', '2') order by n; +alter table test_1164_memory.r1 add column m int; +system sync replica test_1164_memory.r1; +system sync replica test_1164_memory.r2; +show create table test_1164_memory.r1; +show create table test_1164_memory.r1; +drop database test_1164_memory; diff --git a/parser/testdata/01165_lost_part_empty_partition/ast.json b/parser/testdata/01165_lost_part_empty_partition/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01165_lost_part_empty_partition/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01165_lost_part_empty_partition/metadata.json b/parser/testdata/01165_lost_part_empty_partition/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01165_lost_part_empty_partition/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01165_lost_part_empty_partition/query.sql b/parser/testdata/01165_lost_part_empty_partition/query.sql new file mode 100644 index 000000000..9cbdd577e --- /dev/null +++ b/parser/testdata/01165_lost_part_empty_partition/query.sql @@ -0,0 +1,46 @@ +-- Tags: zookeeper, no-shared-merge-tree +-- no-shared-merge-tree: shared merge tree doesn't loose data parts + +SET max_rows_to_read = 0; -- system.text_log can be really big + +create table rmt1 (d DateTime, n int) engine=ReplicatedMergeTree('/test/01165/{database}/rmt', '1') order by n partition by toYYYYMMDD(d); +create table rmt2 (d DateTime, n int) engine=ReplicatedMergeTree('/test/01165/{database}/rmt', '2') order by n partition by toYYYYMMDD(d); + +system stop replicated sends rmt1; +insert into rmt1 values (now(), arrayJoin([1, 2])); -- { error BAD_ARGUMENTS } +insert into rmt1(n) select * from system.numbers limit arrayJoin([1, 2]); -- { serverError BAD_ARGUMENTS, INVALID_LIMIT_EXPRESSION } +insert into rmt1 values (now(), rand()); +drop table rmt1; + +system sync replica rmt2; +select lost_part_count from system.replicas where database = currentDatabase() and table = 'rmt2'; +drop table rmt2; +SYSTEM FLUSH LOGS text_log; +select count() from system.text_log where logger_name like '%' || currentDatabase() || '%' and message ilike '%table with non-zero lost_part_count equal to%'; + + +create table rmt1 (d DateTime, n int) engine=ReplicatedMergeTree('/test/01165/{database}/rmt', '1') order by n partition by tuple(); +create table rmt2 (d DateTime, n int) engine=ReplicatedMergeTree('/test/01165/{database}/rmt', '2') order by n partition by tuple(); + +system stop replicated sends rmt1; +insert into rmt1 values (now(), rand()); +drop table rmt1; + +system sync replica rmt2; +select lost_part_count from system.replicas where database = currentDatabase() and table = 'rmt2'; +drop table rmt2; +SYSTEM FLUSH LOGS text_log; +select count() from system.text_log where logger_name like '%' || currentDatabase() || '%' and message ilike '%table with non-zero lost_part_count equal to%'; + + +create table rmt1 (n UInt8, m Int32, d Date, t DateTime) engine=ReplicatedMergeTree('/test/01165/{database}/rmt', '1') order by n partition by (n, m, d, t); +create table rmt2 (n UInt8, m Int32, d Date, t DateTime) engine=ReplicatedMergeTree('/test/01165/{database}/rmt', '2') order by n partition by (n, m, d, t); + +system stop replicated sends rmt1; +insert into rmt1 values (rand(), rand(), now(), now()); +insert into rmt1 values (rand(), rand(), now(), now()); +insert into rmt1 values (rand(), rand(), now(), now()); +drop table rmt1; + +system sync replica rmt2; +drop table rmt2; diff --git a/parser/testdata/01166_truncate_multiple_partitions/ast.json b/parser/testdata/01166_truncate_multiple_partitions/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01166_truncate_multiple_partitions/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01166_truncate_multiple_partitions/metadata.json b/parser/testdata/01166_truncate_multiple_partitions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01166_truncate_multiple_partitions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01166_truncate_multiple_partitions/query.sql b/parser/testdata/01166_truncate_multiple_partitions/query.sql new file mode 100644 index 000000000..8f5d3ccc1 --- /dev/null +++ b/parser/testdata/01166_truncate_multiple_partitions/query.sql @@ -0,0 +1,34 @@ +-- Tags: no-shared-catalog +-- no-shared-catalog: standard MergeTree is not supported + +drop table if exists trunc; + +set default_table_engine='ReplicatedMergeTree'; +create table trunc (n int, primary key n) engine=ReplicatedMergeTree('/test/1166/{database}', '1') partition by n % 10; +insert into trunc select * from numbers(20); +select count(), sum(n) from trunc; +alter table trunc detach partition all; +select count(), sum(n) from trunc; +alter table trunc attach partition id '0'; +alter table trunc attach partition id '1'; +alter table trunc attach partition id '2'; +alter table trunc attach partition id '3'; +select count(), sum(n) from trunc; +truncate trunc; +select count(), sum(n) from trunc; +drop table trunc; + +set default_table_engine='MergeTree'; +create table trunc (n int, primary key n) partition by n % 10; +insert into trunc select * from numbers(20); +select count(), sum(n) from trunc; +alter table trunc detach partition all; +select count(), sum(n) from trunc; +alter table trunc attach partition id '0'; +alter table trunc attach partition id '1'; +alter table trunc attach partition id '2'; +alter table trunc attach partition id '3'; +select count(), sum(n) from trunc; +truncate trunc; +select count(), sum(n) from trunc; +drop table trunc; \ No newline at end of file diff --git a/parser/testdata/01172_transaction_counters/ast.json b/parser/testdata/01172_transaction_counters/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01172_transaction_counters/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01172_transaction_counters/metadata.json b/parser/testdata/01172_transaction_counters/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01172_transaction_counters/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01172_transaction_counters/query.sql b/parser/testdata/01172_transaction_counters/query.sql new file mode 100644 index 000000000..0ecff8be0 --- /dev/null +++ b/parser/testdata/01172_transaction_counters/query.sql @@ -0,0 +1,55 @@ +-- Tags: no-ordinary-database, no-encrypted-storage + +drop table if exists txn_counters; + +create table txn_counters (n Int64, creation_tid DEFAULT transactionID()) engine=MergeTree order by n SETTINGS old_parts_lifetime=3600; + +insert into txn_counters(n) values (1); +select transactionID(); + +-- stop background cleanup +system stop merges txn_counters; + +set throw_on_unsupported_query_inside_transaction=0; + +begin transaction; +insert into txn_counters(n) values (2); +select 1, system.parts.name, txn_counters.creation_tid = system.parts.creation_tid from txn_counters join system.parts on txn_counters._part = system.parts.name where database=currentDatabase() and table='txn_counters' order by system.parts.name; +select 2, name, creation_csn, removal_tid, removal_csn from system.parts where database=currentDatabase() and table='txn_counters' order by system.parts.name; +rollback; + +begin transaction; +insert into txn_counters(n) values (3); +select 3, system.parts.name, txn_counters.creation_tid = system.parts.creation_tid from txn_counters join system.parts on txn_counters._part = system.parts.name where database=currentDatabase() and table='txn_counters' order by system.parts.name; +select 4, name, creation_csn, removal_tid, removal_csn from system.parts where database=currentDatabase() and table='txn_counters' order by system.parts.name; +select 5, transactionID().3 == serverUUID(); +commit; + +detach table txn_counters; +attach table txn_counters; + +begin transaction; +insert into txn_counters(n) values (4); +select 6, system.parts.name, txn_counters.creation_tid = system.parts.creation_tid from txn_counters join system.parts on txn_counters._part = system.parts.name where database=currentDatabase() and table='txn_counters' order by system.parts.name; +select 7, name, removal_tid, removal_csn from system.parts where database=currentDatabase() and table='txn_counters' and active order by system.parts.name; +select 8, transactionID().3 == serverUUID(); +commit; + +begin transaction; +insert into txn_counters(n) values (5); +alter table txn_counters drop partition id 'all'; +rollback; + +system flush logs transactions_info_log; +select indexOf((select arraySort(groupUniqArray(tid)) from system.transactions_info_log where database=currentDatabase() and table='txn_counters'), tid), + type, + thread_id!=0, + length(query_id)=length(queryID()) or type='Commit' and query_id='', -- ignore fault injection after commit + tid_hash!=0, + csn=0, + part +from system.transactions_info_log +where tid in (select tid from system.transactions_info_log where database=currentDatabase() and table='txn_counters' and not (tid.1=1 and tid.2=1)) +or (database=currentDatabase() and table='txn_counters') order by event_time; + +drop table txn_counters; diff --git a/parser/testdata/01173_transaction_control_queries/ast.json b/parser/testdata/01173_transaction_control_queries/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01173_transaction_control_queries/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01173_transaction_control_queries/metadata.json b/parser/testdata/01173_transaction_control_queries/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01173_transaction_control_queries/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01173_transaction_control_queries/query.sql b/parser/testdata/01173_transaction_control_queries/query.sql new file mode 100644 index 000000000..c4191fdb9 --- /dev/null +++ b/parser/testdata/01173_transaction_control_queries/query.sql @@ -0,0 +1,103 @@ +-- Tags: no-ordinary-database, no-encrypted-storage + +drop table if exists mt1; +drop table if exists mt2; + +create table mt1 (n Int64) engine=MergeTree order by n; +create table mt2 (n Int64) engine=MergeTree order by n; + +commit; -- { serverError INVALID_TRANSACTION } -- no transaction +rollback; -- { serverError INVALID_TRANSACTION } + +begin transaction; +insert into mt1 values (1); +insert into mt2 values (10); +select 'commit', arraySort(groupArray(n)) from (select n from mt1 union all select * from mt2); +commit; + +begin transaction; +insert into mt1 values (2); +insert into mt2 values (20); +select 'rollback', arraySort(groupArray(n)) from (select n from mt1 union all select * from mt2); +rollback; + +begin transaction; +select 'no nested', arraySort(groupArray(n)) from (select n from mt1 union all select * from mt2); +begin transaction; -- { serverError INVALID_TRANSACTION } +rollback; + +begin transaction; +insert into mt1 values (3); +insert into mt2 values (30); +select 'on exception before start', arraySort(groupArray(n)) from (select n from mt1 union all select * from mt2); +-- rollback on exception before start +select functionThatDoesNotExist(); -- { serverError UNKNOWN_FUNCTION } +-- cannot commit after exception +commit; -- { serverError INVALID_TRANSACTION } -- after 46 +begin transaction; -- { serverError INVALID_TRANSACTION } +rollback; + +begin transaction; +insert into mt1 values (4); +insert into mt2 values (40); +select 'on exception while processing', arraySort(groupArray(n)) from (select n from mt1 union all select * from mt2); +-- rollback on exception while processing +select throwIf(100 < number) from numbers(1000); -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO } +-- cannot commit after exception +commit; -- { serverError INVALID_TRANSACTION } -- after 395 +insert into mt1 values (5); -- { serverError INVALID_TRANSACTION } +insert into mt2 values (50); -- { serverError INVALID_TRANSACTION } +select 1; -- { serverError INVALID_TRANSACTION } +rollback; + +begin transaction; +insert into mt1 values (6); +insert into mt2 values (60); +select 'on session close', arraySort(groupArray(n)) from (select n from mt1 union all select * from mt2); +insert into mt1 values ([1]); -- { clientError ILLEGAL_TYPE_OF_ARGUMENT } +-- INSERT failures does not produce client reconnect anymore, so rollback can be done +rollback; + +begin transaction; +insert into mt1 values (7); +insert into mt2 values (70); +select 'commit', arraySort(groupArray(n)) from (select n from mt1 union all select * from mt2); +commit; + +begin transaction; +select 'readonly', arraySort(groupArray(n)) from (select n from mt1 union all select * from mt2); +commit; + +begin transaction; +select 'snapshot', count(), sum(n) from mt1; +set transaction snapshot 1; +select 'snapshot1', count(), sum(n) from mt1; +set transaction snapshot 3; +set throw_on_unsupported_query_inside_transaction=0; +select 'snapshot3', count() = (select count() from system.parts where database=currentDatabase() and table='mt1' and _state in ('Active', 'Outdated')) from mt1; +set throw_on_unsupported_query_inside_transaction=1; +set transaction snapshot 1000000000000000; +select 'snapshot100500', count(), sum(n) from mt1; +set transaction snapshot 5; -- { serverError INVALID_TRANSACTION } +rollback; + +begin transaction; +create table m (n int) engine=Memory; -- { serverError NOT_IMPLEMENTED } +commit; -- { serverError INVALID_TRANSACTION } -- after 48 +rollback; + +create table m (n int) engine=Memory; +begin transaction; +insert into m values (1); -- { serverError NOT_IMPLEMENTED } +select * from m; -- { serverError INVALID_TRANSACTION } +commit; -- { serverError INVALID_TRANSACTION } -- after 48 +rollback; + +begin transaction; +select * from m; -- { serverError NOT_IMPLEMENTED } +commit; -- { serverError INVALID_TRANSACTION } -- after 48 +rollback; + +drop table m; +drop table mt1; +drop table mt2; diff --git a/parser/testdata/01177_group_array_moving/ast.json b/parser/testdata/01177_group_array_moving/ast.json new file mode 100644 index 000000000..2309e2c8a --- /dev/null +++ b/parser/testdata/01177_group_array_moving/ast.json @@ -0,0 +1,115 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function groupArrayMovingSum (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Int64_-9223372036854775808" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_257" + }, + { + "explain": " Function groupArrayMovingSum (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_18446744073709551615" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1048575" + }, + { + "explain": " Function groupArrayMovingSum (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function multiply (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_9223372036854775807" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_9223372036854775807" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function remote (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '127.0.0.{1..2}'" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_3" + } + ], + + "rows": 31, + + "statistics": + { + "elapsed": 0.001399146, + "rows_read": 31, + "bytes_read": 1312 + } +} diff --git a/parser/testdata/01177_group_array_moving/metadata.json b/parser/testdata/01177_group_array_moving/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01177_group_array_moving/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01177_group_array_moving/query.sql b/parser/testdata/01177_group_array_moving/query.sql new file mode 100644 index 000000000..5689cd95f --- /dev/null +++ b/parser/testdata/01177_group_array_moving/query.sql @@ -0,0 +1,4 @@ +SELECT groupArrayMovingSum(257)(-9223372036854775808), groupArrayMovingSum(1048575)(18446744073709551615), groupArrayMovingSum(9223372036854775807)(number * 9223372036854775807) FROM remote('127.0.0.{1..2}', numbers(3)); +SELECT groupArrayMovingAvg(257)(-9223372036854775808), groupArrayMovingAvg(1048575)(18446744073709551615), groupArrayMovingAvg(9223372036854775807)(number * 9223372036854775807) FROM remote('127.0.0.{1..2}', numbers(3)); + +SELECT groupArrayMovingSum(257)(-9223372036854775808), groupArrayMovingSum(1)(10.000100135803223, [NULL, NULL], NULL), groupArrayMovingSum(NULL)(NULL) FROM numbers(1023) FORMAT Null; diff --git a/parser/testdata/01178_int_field_to_decimal/ast.json b/parser/testdata/01178_int_field_to_decimal/ast.json new file mode 100644 index 000000000..4c787523a --- /dev/null +++ b/parser/testdata/01178_int_field_to_decimal/ast.json @@ -0,0 +1,73 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier d" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function values (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal 'd Decimal(8, 8)'" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function notIn (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier d" + }, + { + "explain": " Literal Tuple_(Int64_-1, UInt64_0)" + } + ], + + "rows": 17, + + "statistics": + { + "elapsed": 0.000996301, + "rows_read": 17, + "bytes_read": 646 + } +} diff --git a/parser/testdata/01178_int_field_to_decimal/metadata.json b/parser/testdata/01178_int_field_to_decimal/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01178_int_field_to_decimal/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01178_int_field_to_decimal/query.sql b/parser/testdata/01178_int_field_to_decimal/query.sql new file mode 100644 index 000000000..633e8b658 --- /dev/null +++ b/parser/testdata/01178_int_field_to_decimal/query.sql @@ -0,0 +1,10 @@ +select d from values('d Decimal(8, 8)', 0, 1) where d not in (-1, 0); -- { serverError ARGUMENT_OUT_OF_BOUND } +select d from values('d Decimal(8, 8)', 0, 2) where d not in (1, 0); -- { serverError ARGUMENT_OUT_OF_BOUND } +select d from values('d Decimal(9, 8)', 0, 3) where d not in (-9223372036854775808, 0); -- { serverError ARGUMENT_OUT_OF_BOUND } +select d from values('d Decimal(9, 8)', 0, 4) where d not in (18446744073709551615, 0); -- { serverError ARGUMENT_OUT_OF_BOUND } +select d from values('d Decimal(18, 8)', 0, 5) where d not in (-9223372036854775808, 0); -- { serverError ARGUMENT_OUT_OF_BOUND } +select d from values('d Decimal(18, 8)', 0, 6) where d not in (18446744073709551615, 0); -- { serverError ARGUMENT_OUT_OF_BOUND } +select d from values('d Decimal(26, 8)', 0, 7) where d not in (-9223372036854775808, 0); -- { serverError ARGUMENT_OUT_OF_BOUND } +select d from values('d Decimal(27, 8)', 0, 8) where d not in (18446744073709551615, 0); -- { serverError ARGUMENT_OUT_OF_BOUND } +select d from values('d Decimal(27, 8)', 0, 9) where d not in (-9223372036854775808, 0); +select d from values('d Decimal(28, 8)', 0, 10) where d not in (18446744073709551615, 0); diff --git a/parser/testdata/01181_db_atomic_drop_on_cluster/ast.json b/parser/testdata/01181_db_atomic_drop_on_cluster/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01181_db_atomic_drop_on_cluster/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01181_db_atomic_drop_on_cluster/metadata.json b/parser/testdata/01181_db_atomic_drop_on_cluster/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01181_db_atomic_drop_on_cluster/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01181_db_atomic_drop_on_cluster/query.sql b/parser/testdata/01181_db_atomic_drop_on_cluster/query.sql new file mode 100644 index 000000000..6edaaa5c6 --- /dev/null +++ b/parser/testdata/01181_db_atomic_drop_on_cluster/query.sql @@ -0,0 +1,8 @@ +-- Tags: no-replicated-database +-- Tag no-replicated-database: ON CLUSTER is not allowed + +DROP TABLE IF EXISTS test_repl ON CLUSTER test_shard_localhost NO DELAY; +CREATE TABLE test_repl ON CLUSTER test_shard_localhost (n UInt64) ENGINE ReplicatedMergeTree('/clickhouse/test_01181/{database}/test_repl','r1') ORDER BY tuple(); +DETACH TABLE test_repl ON CLUSTER test_shard_localhost NO DELAY; +ATTACH TABLE test_repl ON CLUSTER test_shard_localhost; +DROP TABLE test_repl ON CLUSTER test_shard_localhost NO DELAY; diff --git a/parser/testdata/01182_materialized_view_different_structure/ast.json b/parser/testdata/01182_materialized_view_different_structure/ast.json new file mode 100644 index 000000000..da85fb0fc --- /dev/null +++ b/parser/testdata/01182_materialized_view_different_structure/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000977806, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01182_materialized_view_different_structure/metadata.json b/parser/testdata/01182_materialized_view_different_structure/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01182_materialized_view_different_structure/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01182_materialized_view_different_structure/query.sql b/parser/testdata/01182_materialized_view_different_structure/query.sql new file mode 100644 index 000000000..77af55815 --- /dev/null +++ b/parser/testdata/01182_materialized_view_different_structure/query.sql @@ -0,0 +1,43 @@ +SET allow_materialized_view_with_bad_select = 1; + +DROP TABLE IF EXISTS test_table; +DROP TABLE IF EXISTS numbers; +DROP TABLE IF EXISTS test_mv; +DROP TABLE IF EXISTS src; +DROP TABLE IF EXISTS dst; +DROP TABLE IF EXISTS mv; +DROP TABLE IF EXISTS dist; + +CREATE TABLE test_table (key UInt32, value Decimal(16, 6)) ENGINE = SummingMergeTree() ORDER BY key; +CREATE TABLE numbers (number UInt64) ENGINE=Memory; + +CREATE MATERIALIZED VIEW test_mv TO test_table (number UInt64, value Decimal(38, 6)) +AS SELECT number, sum(number) AS value FROM (SELECT *, toDecimal64(number, 6) AS val FROM numbers) GROUP BY number; + +INSERT INTO numbers SELECT * FROM numbers(100000); + +SELECT sum(value) FROM test_mv; +SELECT sum(value) FROM (SELECT number, sum(number) AS value FROM (SELECT *, toDecimal64(number, 6) AS val FROM numbers) GROUP BY number); + +CREATE TABLE src (n UInt64, s FixedString(16)) ENGINE=Memory; +CREATE TABLE dst (n UInt8, s String) ENGINE = Memory; +CREATE MATERIALIZED VIEW mv TO dst (n String) AS SELECT * FROM src; +CREATE TABLE dist (n Int128) ENGINE=Distributed(test_cluster_two_shards, currentDatabase(), mv); + +INSERT INTO src SELECT number, toString(number) FROM numbers(1000); +INSERT INTO mv SELECT toString(number + 1000) FROM numbers(1000); -- { serverError TYPE_MISMATCH } +INSERT INTO mv SELECT arrayJoin(['42', 'test']); -- { serverError TYPE_MISMATCH } + +SELECT count(), sum(n), sum(toInt64(s)), max(n), min(n) FROM src; +SELECT count(), sum(n), sum(toInt64(s)), max(n), min(n) FROM dst; +SELECT count(), sum(toInt64(n)), max(n), min(n) FROM mv; +SELECT count(), sum(toInt64(n)), max(n), min(n) FROM dist; -- { serverError CANNOT_CONVERT_TYPE } +SELECT count(), sum(toInt64(n)), max(toUInt32(n)), min(toInt128(n)) FROM dist; + +DROP TABLE test_table; +DROP TABLE numbers; +DROP TABLE test_mv; +DROP TABLE src; +DROP TABLE dst; +DROP TABLE mv; +DROP TABLE dist; diff --git a/parser/testdata/01185_create_or_replace_table/ast.json b/parser/testdata/01185_create_or_replace_table/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01185_create_or_replace_table/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01185_create_or_replace_table/metadata.json b/parser/testdata/01185_create_or_replace_table/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01185_create_or_replace_table/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01185_create_or_replace_table/query.sql b/parser/testdata/01185_create_or_replace_table/query.sql new file mode 100644 index 000000000..801a775e0 --- /dev/null +++ b/parser/testdata/01185_create_or_replace_table/query.sql @@ -0,0 +1,24 @@ +-- Tags: no-ordinary-database + +drop table if exists t1; + +replace table t1 (n UInt64, s String) engine=MergeTree order by n; -- { serverError UNKNOWN_TABLE } +show tables; +create or replace table t1 (n UInt64, s String) engine=MergeTree order by n; +show tables; +show create table t1; + +insert into t1 values (1, 'test'); +create or replace table t1 (n UInt64, s Nullable(String)) engine=MergeTree order by n; +insert into t1 values (2, null); +show tables; +show create table t1; +select * from t1; + +replace table t1 (n UInt64) engine=MergeTree order by n; +insert into t1 values (3); +show tables; +show create table t1; +select * from t1; + +drop table t1; diff --git a/parser/testdata/01186_conversion_to_nullable/ast.json b/parser/testdata/01186_conversion_to_nullable/ast.json new file mode 100644 index 000000000..01468ee86 --- /dev/null +++ b/parser/testdata/01186_conversion_to_nullable/ast.json @@ -0,0 +1,76 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toUInt8 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function values (children 1)" + }, + { + "explain": " ExpressionList (children 6)" + }, + { + "explain": " Literal 'x Nullable(String)'" + }, + { + "explain": " Literal '42'" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Literal '0'" + }, + { + "explain": " Literal ''" + }, + { + "explain": " Literal '256'" + } + ], + + "rows": 18, + + "statistics": + { + "elapsed": 0.001203284, + "rows_read": 18, + "bytes_read": 657 + } +} diff --git a/parser/testdata/01186_conversion_to_nullable/metadata.json b/parser/testdata/01186_conversion_to_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01186_conversion_to_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01186_conversion_to_nullable/query.sql b/parser/testdata/01186_conversion_to_nullable/query.sql new file mode 100644 index 000000000..52f7ee91f --- /dev/null +++ b/parser/testdata/01186_conversion_to_nullable/query.sql @@ -0,0 +1,13 @@ +select toUInt8(x) from values('x Nullable(String)', '42', NULL, '0', '', '256'); +select toInt64(x) from values('x Nullable(String)', '42', NULL, '0', '', '256'); + +select toDate(x) from values('x Nullable(String)', '2020-12-24', NULL, '0000-00-00', '', '9999-01-01'); +select toDateTime(x, 'Asia/Istanbul') from values('x Nullable(String)', '2020-12-24 01:02:03', NULL, '0000-00-00 00:00:00', ''); +select toDateTime64(x, 2, 'Asia/Istanbul') from values('x Nullable(String)', '2020-12-24 01:02:03', NULL, '0000-00-00 00:00:00', ''); +select toUnixTimestamp(x, 'Asia/Istanbul') from values ('x Nullable(String)', '2000-01-01 13:12:12', NULL, ''); + +select toDecimal32(x, 2) from values ('x Nullable(String)', '42', NULL, '3.14159'); +select toDecimal64(x, 8) from values ('x Nullable(String)', '42', NULL, '3.14159'); + +select toString(x) from values ('x Nullable(String)', '42', NULL, 'test'); +select toFixedString(x, 8) from values ('x Nullable(String)', '42', NULL, 'test'); diff --git a/parser/testdata/01188_attach_table_from_path/ast.json b/parser/testdata/01188_attach_table_from_path/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01188_attach_table_from_path/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01188_attach_table_from_path/metadata.json b/parser/testdata/01188_attach_table_from_path/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01188_attach_table_from_path/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01188_attach_table_from_path/query.sql b/parser/testdata/01188_attach_table_from_path/query.sql new file mode 100644 index 000000000..7798b9f99 --- /dev/null +++ b/parser/testdata/01188_attach_table_from_path/query.sql @@ -0,0 +1,28 @@ +-- Tags: no-replicated-database, memory-engine + +drop table if exists test; +drop table if exists file; +drop table if exists mt; + +attach table test from 'some/path' (n UInt8) engine=Memory; -- { serverError NOT_IMPLEMENTED } +attach table test from '/etc/passwd' (s String) engine=File(TSVRaw); -- { serverError PATH_ACCESS_DENIED } +attach table test from '../../../../../../../../../etc/passwd' (s String) engine=File(TSVRaw); -- { serverError PATH_ACCESS_DENIED } +attach table test from 42 (s String) engine=File(TSVRaw); -- { clientError SYNTAX_ERROR } + +insert into table function file('01188_attach/file/data.TSV', 'TSV', 's String, n UInt8') values ('file', 42); +attach table file from '01188_attach/file' (s String, n UInt8) engine=File(TSV); +select * from file; +detach table file; +attach table file; +select * from file; + +attach table mt from '01188_attach/file' (n UInt8, s String) engine=MergeTree order by n; +select * from mt; +insert into mt values (42, 'mt'); +select * from mt; +detach table mt; +attach table mt; +select * from mt; + +drop table file; +drop table mt; diff --git a/parser/testdata/01189_create_as_table_as_table_function/ast.json b/parser/testdata/01189_create_as_table_as_table_function/ast.json new file mode 100644 index 000000000..00720608d --- /dev/null +++ b/parser/testdata/01189_create_as_table_as_table_function/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery table2 (children 1)" + }, + { + "explain": " Identifier table2" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00119242, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/01189_create_as_table_as_table_function/metadata.json b/parser/testdata/01189_create_as_table_as_table_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01189_create_as_table_as_table_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01189_create_as_table_as_table_function/query.sql b/parser/testdata/01189_create_as_table_as_table_function/query.sql new file mode 100644 index 000000000..011dcb931 --- /dev/null +++ b/parser/testdata/01189_create_as_table_as_table_function/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS table2; +DROP TABLE IF EXISTS table3; + +CREATE TABLE table2 AS numbers(5); +CREATE TABLE table3 AS table2; + +SHOW CREATE table2; +SHOW CREATE table3; + +SELECT count(), sum(number) FROM table2; +SELECT count(), sum(number) FROM table3; + +DROP TABLE table2; +DROP TABLE table3; diff --git a/parser/testdata/01191_rename_dictionary/ast.json b/parser/testdata/01191_rename_dictionary/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01191_rename_dictionary/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01191_rename_dictionary/metadata.json b/parser/testdata/01191_rename_dictionary/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01191_rename_dictionary/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01191_rename_dictionary/query.sql b/parser/testdata/01191_rename_dictionary/query.sql new file mode 100644 index 000000000..98e31f665 --- /dev/null +++ b/parser/testdata/01191_rename_dictionary/query.sql @@ -0,0 +1,48 @@ +-- Tags: no-parallel, memory-engine + +DROP DATABASE IF EXISTS test_01191; +CREATE DATABASE test_01191 ENGINE=Atomic; + +CREATE TABLE test_01191._ (n UInt64, s String) ENGINE = Memory(); +CREATE TABLE test_01191.t (n UInt64, s String) ENGINE = Memory(); + +CREATE DICTIONARY test_01191.dict (n UInt64, s String) +PRIMARY KEY n +LAYOUT(DIRECT()) +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE '_' DB 'test_01191')); + +INSERT INTO test_01191._ VALUES (42, 'test'); + +SELECT name, status FROM system.dictionaries WHERE database='test_01191'; +SELECT name, engine FROM system.tables WHERE database='test_01191' ORDER BY name; + +RENAME DICTIONARY test_01191.table TO test_01191.table1; -- {serverError UNKNOWN_TABLE} +EXCHANGE DICTIONARIES test_01191._ AND test_01191.dict; -- {serverError INFINITE_LOOP} +EXCHANGE TABLES test_01191.t AND test_01191.dict; +SELECT name, status FROM system.dictionaries WHERE database='test_01191'; +SELECT name, engine FROM system.tables WHERE database='test_01191' ORDER BY name; +SELECT dictGet(test_01191.t, 's', toUInt64(42)); +EXCHANGE TABLES test_01191.dict AND test_01191.t; +RENAME DICTIONARY test_01191.t TO test_01191.dict1; -- {serverError INCORRECT_QUERY} +DROP DICTIONARY test_01191.t; -- {serverError INCORRECT_QUERY} +DROP TABLE test_01191.t; + +DROP DATABASE IF EXISTS dummy_db; +CREATE DATABASE dummy_db ENGINE=Atomic; +RENAME DICTIONARY test_01191.dict TO dummy_db.dict1; +RENAME DICTIONARY dummy_db.dict1 TO test_01191.dict; +DROP DATABASE dummy_db; + +RENAME DICTIONARY test_01191.dict TO test_01191.dict1; + +SELECT name, status FROM system.dictionaries WHERE database='test_01191'; +SELECT name, engine FROM system.tables WHERE database='test_01191' ORDER BY name; +SELECT dictGet(test_01191.dict1, 's', toUInt64(42)); + +RENAME DICTIONARY test_01191.dict1 TO test_01191.dict2; + +SELECT name, status FROM system.dictionaries WHERE database='test_01191'; +SELECT name, engine FROM system.tables WHERE database='test_01191' ORDER BY name; +SELECT dictGet(test_01191.dict2, 's', toUInt64(42)); + +DROP DATABASE test_01191; diff --git a/parser/testdata/01197_summing_enum/ast.json b/parser/testdata/01197_summing_enum/ast.json new file mode 100644 index 000000000..33c2180a4 --- /dev/null +++ b/parser/testdata/01197_summing_enum/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery summing (children 1)" + }, + { + "explain": " Identifier summing" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001037701, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/01197_summing_enum/metadata.json b/parser/testdata/01197_summing_enum/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01197_summing_enum/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01197_summing_enum/query.sql b/parser/testdata/01197_summing_enum/query.sql new file mode 100644 index 000000000..c76f43aca --- /dev/null +++ b/parser/testdata/01197_summing_enum/query.sql @@ -0,0 +1,10 @@ +DROP TABLE IF EXISTS summing; + +CREATE TABLE summing (k String, x UInt64, e Enum('hello' = 1, 'world' = 2)) ENGINE = SummingMergeTree ORDER BY k; +INSERT INTO summing SELECT '', 1, e FROM generateRandom('e Enum(\'hello\' = 1, \'world\' = 2)', 1) LIMIT 1000; +INSERT INTO summing SELECT '', 1, e FROM generateRandom('e Enum(\'hello\' = 1, \'world\' = 2)', 1) LIMIT 1000; + +OPTIMIZE TABLE summing; +SELECT k, x, e FROM summing; + +DROP TABLE summing; \ No newline at end of file diff --git a/parser/testdata/01198_plus_inf/ast.json b/parser/testdata/01198_plus_inf/ast.json new file mode 100644 index 000000000..cbba14335 --- /dev/null +++ b/parser/testdata/01198_plus_inf/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toFloat64 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_['+inf', '+Inf', '+INF', '+infinity', '+Infinity']" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001172675, + "rows_read": 9, + "bytes_read": 399 + } +} diff --git a/parser/testdata/01198_plus_inf/metadata.json b/parser/testdata/01198_plus_inf/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01198_plus_inf/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01198_plus_inf/query.sql b/parser/testdata/01198_plus_inf/query.sql new file mode 100644 index 000000000..e06faa2fd --- /dev/null +++ b/parser/testdata/01198_plus_inf/query.sql @@ -0,0 +1,3 @@ +SELECT DISTINCT toFloat64(arrayJoin(['+inf', '+Inf', '+INF', '+infinity', '+Infinity'])); +SELECT DISTINCT toFloat64(arrayJoin(['-inf', '-Inf', '-INF', '-infinity', '-Infinity'])); +SELECT DISTINCT toFloat64(arrayJoin(['inf', 'Inf', 'INF', 'infinity', 'Infinity'])); diff --git a/parser/testdata/01199_url_functions_path_without_schema_yiurule/ast.json b/parser/testdata/01199_url_functions_path_without_schema_yiurule/ast.json new file mode 100644 index 000000000..f051b0e1c --- /dev/null +++ b/parser/testdata/01199_url_functions_path_without_schema_yiurule/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function path (alias Path) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'www.example.com:443\/a\/b\/c'" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001274891, + "rows_read": 7, + "bytes_read": 289 + } +} diff --git a/parser/testdata/01199_url_functions_path_without_schema_yiurule/metadata.json b/parser/testdata/01199_url_functions_path_without_schema_yiurule/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01199_url_functions_path_without_schema_yiurule/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01199_url_functions_path_without_schema_yiurule/query.sql b/parser/testdata/01199_url_functions_path_without_schema_yiurule/query.sql new file mode 100644 index 000000000..14b0f4fd8 --- /dev/null +++ b/parser/testdata/01199_url_functions_path_without_schema_yiurule/query.sql @@ -0,0 +1,2 @@ +SELECT path('www.example.com:443/a/b/c') AS Path; +SELECT decodeURLComponent(materialize(pathFull('www.example.com/?query=hello%20world+foo%2Bbar'))) AS Path; diff --git a/parser/testdata/01200_mutations_memory_consumption/ast.json b/parser/testdata/01200_mutations_memory_consumption/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01200_mutations_memory_consumption/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01200_mutations_memory_consumption/metadata.json b/parser/testdata/01200_mutations_memory_consumption/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01200_mutations_memory_consumption/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01200_mutations_memory_consumption/query.sql b/parser/testdata/01200_mutations_memory_consumption/query.sql new file mode 100644 index 000000000..faf29eb1f --- /dev/null +++ b/parser/testdata/01200_mutations_memory_consumption/query.sql @@ -0,0 +1,118 @@ +-- Tags: no-debug, no-parallel, long, no-object-storage, no-random-settings, no-random-merge-tree-settings +SET optimize_trivial_insert_select = 1; + +DROP TABLE IF EXISTS table_with_single_pk; + +CREATE TABLE table_with_single_pk +( + key UInt8, + value String +) +ENGINE = MergeTree +ORDER BY key +SETTINGS min_compress_block_size=65536, max_compress_block_size=65536; + +INSERT INTO table_with_single_pk SELECT number, toString(number % 10) FROM numbers(10000000); + +ALTER TABLE table_with_single_pk DELETE WHERE key % 77 = 0 SETTINGS mutations_sync = 1; + +SYSTEM FLUSH LOGS part_log; + +-- Memory usage for all mutations must be almost constant and less than +-- read_bytes +SELECT + arrayDistinct(groupArray(if (read_bytes >= peak_memory_usage, [1], [read_bytes, peak_memory_usage]))) +FROM + system.part_log +WHERE event_type = 'MutatePart' AND table = 'table_with_single_pk' AND database = currentDatabase(); + +DROP TABLE IF EXISTS table_with_single_pk; + +DROP TABLE IF EXISTS table_with_multi_pk; + +CREATE TABLE table_with_multi_pk +( + key1 UInt8, + key2 UInt32, + key3 DateTime64(6, 'UTC'), + value String +) +ENGINE = MergeTree +ORDER BY (key1, key2, key3) +SETTINGS min_compress_block_size=65536, max_compress_block_size=65536; + +INSERT INTO table_with_multi_pk SELECT number % 32, number, toDateTime('2019-10-01 00:00:00'), toString(number % 10) FROM numbers(10000000); + +ALTER TABLE table_with_multi_pk DELETE WHERE key1 % 77 = 0 SETTINGS mutations_sync = 1; + +SYSTEM FLUSH LOGS part_log; + +-- Memory usage for all mutations must be almost constant and less than +-- read_bytes +SELECT + arrayDistinct(groupArray(if (read_bytes >= peak_memory_usage, [1], [read_bytes, peak_memory_usage]))) + FROM + system.part_log + WHERE event_type = 'MutatePart' AND table = 'table_with_multi_pk' AND database = currentDatabase(); + +DROP TABLE IF EXISTS table_with_multi_pk; + + +DROP TABLE IF EXISTS table_with_function_pk; + + +CREATE TABLE table_with_function_pk + ( + key1 UInt8, + key2 UInt32, + key3 DateTime64(6, 'UTC'), + value String + ) +ENGINE = MergeTree +ORDER BY (cast(value as UInt64), key2) +SETTINGS min_compress_block_size=65536, max_compress_block_size=65536; + +INSERT INTO table_with_function_pk SELECT number % 32, number, toDateTime('2019-10-01 00:00:00'), toString(number % 10) FROM numbers(10000000); + +ALTER TABLE table_with_function_pk DELETE WHERE key1 % 77 = 0 SETTINGS mutations_sync = 1; + +SYSTEM FLUSH LOGS part_log; + +-- Memory usage for all mutations must be almost constant and less than +-- read_bytes +SELECT + arrayDistinct(groupArray(if (read_bytes >= peak_memory_usage, [1], [read_bytes, peak_memory_usage]))) + FROM + system.part_log + WHERE event_type = 'MutatePart' AND table = 'table_with_function_pk' AND database = currentDatabase(); + +DROP TABLE IF EXISTS table_with_function_pk; + +DROP TABLE IF EXISTS table_without_pk; + +CREATE TABLE table_without_pk +( + key1 UInt8, + key2 UInt32, + key3 DateTime64(6, 'UTC'), + value String +) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS min_compress_block_size=65536, max_compress_block_size=65536; + +INSERT INTO table_without_pk SELECT number % 32, number, toDateTime('2019-10-01 00:00:00'), toString(number % 10) FROM numbers(10000000); + +ALTER TABLE table_without_pk DELETE WHERE key1 % 77 = 0 SETTINGS mutations_sync = 1; + +SYSTEM FLUSH LOGS part_log; + +-- Memory usage for all mutations must be almost constant and less than +-- read_bytes +SELECT + arrayDistinct(groupArray(if (read_bytes >= peak_memory_usage, [1], [read_bytes, peak_memory_usage]))) + FROM + system.part_log + WHERE event_type = 'MutatePart' AND table = 'table_without_pk' AND database = currentDatabase(); + +DROP TABLE IF EXISTS table_without_pk; diff --git a/parser/testdata/01201_drop_column_compact_part_replicated_zookeeper_long/ast.json b/parser/testdata/01201_drop_column_compact_part_replicated_zookeeper_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01201_drop_column_compact_part_replicated_zookeeper_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01201_drop_column_compact_part_replicated_zookeeper_long/metadata.json b/parser/testdata/01201_drop_column_compact_part_replicated_zookeeper_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01201_drop_column_compact_part_replicated_zookeeper_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01201_drop_column_compact_part_replicated_zookeeper_long/query.sql b/parser/testdata/01201_drop_column_compact_part_replicated_zookeeper_long/query.sql new file mode 100644 index 000000000..78adbee61 --- /dev/null +++ b/parser/testdata/01201_drop_column_compact_part_replicated_zookeeper_long/query.sql @@ -0,0 +1,31 @@ +-- Tags: long, replica + +-- in case of keeper fault injection on insert, set bigger number of retries because partitions +set insert_keeper_max_retries=100; +set insert_keeper_retry_max_backoff_ms=10; + +-- Testing basic functionality with compact parts +set replication_alter_partitions_sync = 2; +drop table if exists mt_compact; + +create table mt_compact(a UInt64, b UInt64 DEFAULT a * a, s String, n Nested(x UInt32, y String), lc LowCardinality(String)) +engine = ReplicatedMergeTree('/clickhouse/{database}/test_01201/mt_compact_replicated', '1') +order by a partition by a % 10 +settings index_granularity = 8, +min_rows_for_wide_part = 10; + +insert into mt_compact (a, s, n.y, lc) select number, toString((number * 2132214234 + 5434543) % 2133443), ['a', 'b', 'c'], number % 2 ? 'bar' : 'baz' from numbers(90); + +insert into mt_compact (a, s, n.x, lc) select number % 3, toString((number * 75434535 + 645645) % 2133443), [1, 2], toString(number) from numbers(5); + +alter table mt_compact drop column n.y; +alter table mt_compact add column n.y Array(String) DEFAULT ['qwqw'] after n.x; +select * from mt_compact order by a, s limit 10; +select '====================='; + +alter table mt_compact update b = 42 where 1 SETTINGS mutations_sync = 2; + +select * from mt_compact where a > 1 order by a, s limit 10; +select '====================='; + +drop table if exists mt_compact; diff --git a/parser/testdata/01201_read_single_thread_in_order/ast.json b/parser/testdata/01201_read_single_thread_in_order/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01201_read_single_thread_in_order/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01201_read_single_thread_in_order/metadata.json b/parser/testdata/01201_read_single_thread_in_order/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01201_read_single_thread_in_order/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01201_read_single_thread_in_order/query.sql b/parser/testdata/01201_read_single_thread_in_order/query.sql new file mode 100644 index 000000000..63087c044 --- /dev/null +++ b/parser/testdata/01201_read_single_thread_in_order/query.sql @@ -0,0 +1,19 @@ +-- Tags: long, no-msan, no-distributed-cache + +DROP TABLE IF EXISTS t; + +CREATE TABLE t +( + number UInt64 +) +ENGINE = MergeTree +ORDER BY number +SETTINGS index_granularity = 128, ratio_of_defaults_for_sparse_serialization = 1.0, index_granularity_bytes = '10Mi'; + +SET min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0, max_insert_threads = 1; +INSERT INTO t SELECT number FROM numbers(10000000); + +SET max_threads = 1, max_block_size = 12345; +SELECT arrayDistinct(arrayPopFront(arrayDifference(groupArray(number)))) FROM t; + +DROP TABLE t; diff --git a/parser/testdata/01202_arrayROCAUC_special/ast.json b/parser/testdata/01202_arrayROCAUC_special/ast.json new file mode 100644 index 000000000..9ee65685f --- /dev/null +++ b/parser/testdata/01202_arrayROCAUC_special/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayROCAUC (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001042266, + "rows_read": 10, + "bytes_read": 374 + } +} diff --git a/parser/testdata/01202_arrayROCAUC_special/metadata.json b/parser/testdata/01202_arrayROCAUC_special/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01202_arrayROCAUC_special/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01202_arrayROCAUC_special/query.sql b/parser/testdata/01202_arrayROCAUC_special/query.sql new file mode 100644 index 000000000..169a790bc --- /dev/null +++ b/parser/testdata/01202_arrayROCAUC_special/query.sql @@ -0,0 +1,46 @@ +SELECT arrayROCAUC([], []); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT arrayROCAUC([1], [1]); +SELECT arrayROCAUC([1], []); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT arrayROCAUC([], [1]); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT arrayROCAUC([1, 2], [3]); -- { serverError BAD_ARGUMENTS } +SELECT arrayROCAUC([1], [2, 3]); -- { serverError BAD_ARGUMENTS } +SELECT arrayROCAUC([1, 1], [1, 1]); +SELECT arrayROCAUC([1, 1], [0, 0]); +SELECT arrayROCAUC([1, 1], [0, 1]); +SELECT arrayROCAUC([0, 1], [0, 1]); +SELECT arrayROCAUC([1, 0], [0, 1]); +SELECT arrayROCAUC([0, 0, 1], [0, 1, 1]); +SELECT arrayROCAUC([0, 1, 1], [0, 1, 1]); +SELECT arrayROCAUC([0, 1, 1], [0, 0, 1]); +SELECT arrayROCAUC([], [], true); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT arrayROCAUC([1], [1], true); +SELECT arrayROCAUC([1], [], true); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT arrayROCAUC([], [1], true); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT arrayROCAUC([1, 2], [3], true); -- { serverError BAD_ARGUMENTS } +SELECT arrayROCAUC([1], [2, 3], true); -- { serverError BAD_ARGUMENTS } +SELECT arrayROCAUC([1, 1], [1, 1], true); +SELECT arrayROCAUC([1, 1], [0, 0], true); +SELECT arrayROCAUC([1, 1], [0, 1], true); +SELECT arrayROCAUC([0, 1], [0, 1], true); +SELECT arrayROCAUC([1, 0], [0, 1], true); +SELECT arrayROCAUC([0, 0, 1], [0, 1, 1], true); +SELECT arrayROCAUC([0, 1, 1], [0, 1, 1], true); +SELECT arrayROCAUC([0, 1, 1], [0, 0, 1], true); +SELECT arrayROCAUC([], [], false); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT arrayROCAUC([1], [1], false); +SELECT arrayROCAUC([1], [], false); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT arrayROCAUC([], [1], false); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT arrayROCAUC([1, 2], [3], false); -- { serverError BAD_ARGUMENTS } +SELECT arrayROCAUC([1], [2, 3], false); -- { serverError BAD_ARGUMENTS } +SELECT arrayROCAUC([1, 1], [1, 1], false); +SELECT arrayROCAUC([1, 1], [0, 0], false); +SELECT arrayROCAUC([1, 1], [0, 1], false); +SELECT arrayROCAUC([0, 1], [0, 1], false); +SELECT arrayROCAUC([1, 0], [0, 1], false); +SELECT arrayROCAUC([0, 0, 1], [0, 1, 1], false); +SELECT arrayROCAUC([0, 1, 1], [0, 1, 1], false); +SELECT arrayROCAUC([0, 1, 1], [0, 0, 1], false); +SELECT arrayROCAUC([0, 1, 1], [0, 0, 1], false, [0, 0, 0], true); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT arrayROCAUC([0, 1, 1]); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT arrayROCAUC([0, 1, 1], [0, 0, 1], 'false'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT arrayROCAUC([0, 1, 1], [0, 0, 1], 4); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } diff --git a/parser/testdata/01210_drop_view/ast.json b/parser/testdata/01210_drop_view/ast.json new file mode 100644 index 000000000..32053a31b --- /dev/null +++ b/parser/testdata/01210_drop_view/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery v_01210 (children 1)" + }, + { + "explain": " Identifier v_01210" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001236516, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/01210_drop_view/metadata.json b/parser/testdata/01210_drop_view/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01210_drop_view/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01210_drop_view/query.sql b/parser/testdata/01210_drop_view/query.sql new file mode 100644 index 000000000..eccf4252e --- /dev/null +++ b/parser/testdata/01210_drop_view/query.sql @@ -0,0 +1,9 @@ +DROP VIEW IF EXISTS v_01210; +DROP TABLE IF EXISTS mv_01210; +DROP TABLE IF EXISTS `.inner.mv_01210`; + +CREATE VIEW IF NOT EXISTS v_01210 AS SELECT 1; +CREATE MATERIALIZED VIEW IF NOT EXISTS mv_01210 ENGINE Log AS SELECT 1; + +DROP VIEW v_01210; +DROP VIEW mv_01210; diff --git a/parser/testdata/01211_optimize_skip_unused_shards_type_mismatch/ast.json b/parser/testdata/01211_optimize_skip_unused_shards_type_mismatch/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01211_optimize_skip_unused_shards_type_mismatch/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01211_optimize_skip_unused_shards_type_mismatch/metadata.json b/parser/testdata/01211_optimize_skip_unused_shards_type_mismatch/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01211_optimize_skip_unused_shards_type_mismatch/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01211_optimize_skip_unused_shards_type_mismatch/query.sql b/parser/testdata/01211_optimize_skip_unused_shards_type_mismatch/query.sql new file mode 100644 index 000000000..0c9aa078e --- /dev/null +++ b/parser/testdata/01211_optimize_skip_unused_shards_type_mismatch/query.sql @@ -0,0 +1,16 @@ +-- Tags: shard + +set optimize_skip_unused_shards=1; + +drop table if exists data_02000; +drop table if exists dist_02000; + +create table data_02000 (key Int) Engine=Null(); +create table dist_02000 as data_02000 Engine=Distributed(test_cluster_two_shards, currentDatabase(), data_02000, key); + +select * from data_02000 where key = 0xdeadbeafdeadbeaf; +select * from dist_02000 where key = 0xdeadbeafdeadbeaf settings force_optimize_skip_unused_shards=2; -- { serverError UNABLE_TO_SKIP_UNUSED_SHARDS, CANNOT_CONVERT_TYPE } +select * from dist_02000 where key = 0xdeadbeafdeadbeaf; + +drop table data_02000; +drop table dist_02000; diff --git a/parser/testdata/01212_empty_join_and_totals/ast.json b/parser/testdata/01212_empty_join_and_totals/ast.json new file mode 100644 index 000000000..2f7a6dbfc --- /dev/null +++ b/parser/testdata/01212_empty_join_and_totals/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.one (alias t1)" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001183562, + "rows_read": 9, + "bytes_read": 355 + } +} diff --git a/parser/testdata/01212_empty_join_and_totals/metadata.json b/parser/testdata/01212_empty_join_and_totals/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01212_empty_join_and_totals/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01212_empty_join_and_totals/query.sql b/parser/testdata/01212_empty_join_and_totals/query.sql new file mode 100644 index 000000000..2424e7bba --- /dev/null +++ b/parser/testdata/01212_empty_join_and_totals/query.sql @@ -0,0 +1,11 @@ +select * from system.one t1 +join system.one t2 +on t1.dummy = t2.dummy +limit 0 +FORMAT TabSeparated; + +select * from system.one t1 +join system.one t2 +on t1.dummy = t2.dummy +where t2.dummy > 0 +FORMAT TabSeparated; diff --git a/parser/testdata/01213_alter_rename_column/ast.json b/parser/testdata/01213_alter_rename_column/ast.json new file mode 100644 index 000000000..bb7e1f1b1 --- /dev/null +++ b/parser/testdata/01213_alter_rename_column/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery table_for_rename (children 1)" + }, + { + "explain": " Identifier table_for_rename" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001067459, + "rows_read": 2, + "bytes_read": 84 + } +} diff --git a/parser/testdata/01213_alter_rename_column/metadata.json b/parser/testdata/01213_alter_rename_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01213_alter_rename_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01213_alter_rename_column/query.sql b/parser/testdata/01213_alter_rename_column/query.sql new file mode 100644 index 000000000..03dcf4d98 --- /dev/null +++ b/parser/testdata/01213_alter_rename_column/query.sql @@ -0,0 +1,38 @@ +DROP TABLE IF EXISTS table_for_rename; + +CREATE TABLE table_for_rename +( + date Date, + key UInt64, + value1 String, + value2 String, + value3 String +) +ENGINE = MergeTree() +PARTITION BY date +ORDER BY key; + +INSERT INTO table_for_rename SELECT toDate('2019-10-01') + number % 3, number, toString(number), toString(number), toString(number) from numbers(9); + +SELECT value1 FROM table_for_rename WHERE key = 1; + +ALTER TABLE table_for_rename RENAME COLUMN value1 to renamed_value1; + +SELECT renamed_value1 FROM table_for_rename WHERE key = 1; + +SELECT * FROM table_for_rename WHERE key = 1 FORMAT TSVWithNames; + +ALTER TABLE table_for_rename RENAME COLUMN value3 to value2; --{serverError DUPLICATE_COLUMN} +ALTER TABLE table_for_rename RENAME COLUMN value3 TO r1, RENAME COLUMN value3 TO r2; --{serverError BAD_ARGUMENTS} +ALTER TABLE table_for_rename RENAME COLUMN value3 TO r1, RENAME COLUMN r1 TO value1; --{serverError NOT_IMPLEMENTED} + +ALTER TABLE table_for_rename RENAME COLUMN value2 TO renamed_value2, RENAME COLUMN value3 TO renamed_value3; + +SELECT renamed_value2, renamed_value3 FROM table_for_rename WHERE key = 7; + +SELECT * FROM table_for_rename WHERE key = 7 FORMAT TSVWithNames; + +ALTER TABLE table_for_rename RENAME COLUMN value100 to renamed_value100; --{serverError NOT_FOUND_COLUMN_IN_BLOCK} +ALTER TABLE table_for_rename RENAME COLUMN IF EXISTS value100 to renamed_value100; + +DROP TABLE IF EXISTS table_for_rename; diff --git a/parser/testdata/01213_alter_rename_compact_part/ast.json b/parser/testdata/01213_alter_rename_compact_part/ast.json new file mode 100644 index 000000000..ed8cbc514 --- /dev/null +++ b/parser/testdata/01213_alter_rename_compact_part/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery table_with_compact_parts (children 1)" + }, + { + "explain": " Identifier table_with_compact_parts" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001607191, + "rows_read": 2, + "bytes_read": 100 + } +} diff --git a/parser/testdata/01213_alter_rename_compact_part/metadata.json b/parser/testdata/01213_alter_rename_compact_part/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01213_alter_rename_compact_part/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01213_alter_rename_compact_part/query.sql b/parser/testdata/01213_alter_rename_compact_part/query.sql new file mode 100644 index 000000000..ebf93521d --- /dev/null +++ b/parser/testdata/01213_alter_rename_compact_part/query.sql @@ -0,0 +1,34 @@ +DROP TABLE IF EXISTS table_with_compact_parts; + +CREATE TABLE table_with_compact_parts +( + date Date, + key UInt64, + value1 String, + value2 String, + value3 String +) +ENGINE = MergeTree() +PARTITION BY date +ORDER BY key +settings index_granularity = 8, +min_rows_for_wide_part = 10, +min_bytes_for_wide_part = '10G'; + +INSERT INTO table_with_compact_parts SELECT toDate('2019-10-01') + number % 3, number, toString(number), toString(number), toString(number) from numbers(9); + +SELECT value1 FROM table_with_compact_parts WHERE key = 1; + +ALTER TABLE table_with_compact_parts RENAME COLUMN value1 to renamed_value1; + +SELECT renamed_value1 FROM table_with_compact_parts WHERE key = 1; + +SELECT * FROM table_with_compact_parts WHERE key = 1 FORMAT TSVWithNames; + +ALTER TABLE table_with_compact_parts RENAME COLUMN value2 TO renamed_value2, RENAME COLUMN value3 TO renamed_value3; + +SELECT renamed_value2, renamed_value3 FROM table_with_compact_parts WHERE key = 7; + +SELECT * FROM table_with_compact_parts WHERE key = 7 FORMAT TSVWithNames; + +DROP TABLE IF EXISTS table_with_compact_parts; diff --git a/parser/testdata/01213_alter_rename_nested/ast.json b/parser/testdata/01213_alter_rename_nested/ast.json new file mode 100644 index 000000000..d548ff498 --- /dev/null +++ b/parser/testdata/01213_alter_rename_nested/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery table_for_rename_nested (children 1)" + }, + { + "explain": " Identifier table_for_rename_nested" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001247185, + "rows_read": 2, + "bytes_read": 98 + } +} diff --git a/parser/testdata/01213_alter_rename_nested/metadata.json b/parser/testdata/01213_alter_rename_nested/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01213_alter_rename_nested/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01213_alter_rename_nested/query.sql b/parser/testdata/01213_alter_rename_nested/query.sql new file mode 100644 index 000000000..cc607e0b4 --- /dev/null +++ b/parser/testdata/01213_alter_rename_nested/query.sql @@ -0,0 +1,42 @@ +DROP TABLE IF EXISTS table_for_rename_nested; +CREATE TABLE table_for_rename_nested +( + date Date, + key UInt64, + n Nested(x UInt32, y String), + value1 Array(Array(LowCardinality(String))) -- column with several files +) +ENGINE = MergeTree() +PARTITION BY date +ORDER BY key; + +INSERT INTO table_for_rename_nested (date, key, n.x, n.y, value1) SELECT toDate('2019-10-01'), number, [number + 1, number + 2, number + 3], ['a', 'b', 'c'], [[toString(number)]] FROM numbers(10); + +SELECT n.x FROM table_for_rename_nested WHERE key = 7; +SELECT n.y FROM table_for_rename_nested WHERE key = 7; + +SHOW CREATE TABLE table_for_rename_nested; + +ALTER TABLE table_for_rename_nested RENAME COLUMN n.x TO n.renamed_x; +ALTER TABLE table_for_rename_nested RENAME COLUMN n.y TO n.renamed_y; + +SHOW CREATE TABLE table_for_rename_nested; + +SELECT key, n.renamed_x FROM table_for_rename_nested WHERE key = 7; +SELECT key, n.renamed_y FROM table_for_rename_nested WHERE key = 7; + +ALTER TABLE table_for_rename_nested RENAME COLUMN n.renamed_x TO not_nested_x; --{serverError BAD_ARGUMENTS} + +-- Currently not implemented +ALTER TABLE table_for_rename_nested RENAME COLUMN n TO renamed_n; --{serverError NOT_IMPLEMENTED} + +ALTER TABLE table_for_rename_nested RENAME COLUMN value1 TO renamed_value1; + +SELECT renamed_value1 FROM table_for_rename_nested WHERE key = 7; + +SHOW CREATE TABLE table_for_rename_nested; + +SELECT * FROM table_for_rename_nested WHERE key = 7 FORMAT TSVWithNames; + +DROP TABLE IF EXISTS table_for_rename_nested; + diff --git a/parser/testdata/01213_alter_rename_primary_key_zookeeper_long/ast.json b/parser/testdata/01213_alter_rename_primary_key_zookeeper_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01213_alter_rename_primary_key_zookeeper_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01213_alter_rename_primary_key_zookeeper_long/metadata.json b/parser/testdata/01213_alter_rename_primary_key_zookeeper_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01213_alter_rename_primary_key_zookeeper_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01213_alter_rename_primary_key_zookeeper_long/query.sql b/parser/testdata/01213_alter_rename_primary_key_zookeeper_long/query.sql new file mode 100644 index 000000000..373e75466 --- /dev/null +++ b/parser/testdata/01213_alter_rename_primary_key_zookeeper_long/query.sql @@ -0,0 +1,55 @@ +-- Tags: long, zookeeper + +DROP TABLE IF EXISTS table_for_rename_pk; + +CREATE TABLE table_for_rename_pk +( + date Date, + key1 UInt64, + key2 UInt64, + key3 UInt64, + value1 String, + value2 String +) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_01213/table_for_rename_pk1', '1') +PARTITION BY date +ORDER BY (key1, pow(key2, 2), key3); + +INSERT INTO table_for_rename_pk SELECT toDate('2019-10-01') + number % 3, number, number, number, toString(number), toString(number) from numbers(9); + +SELECT key1, value1 FROM table_for_rename_pk WHERE key1 = 1 AND key2 = 1 AND key3 = 1; + +ALTER TABLE table_for_rename_pk RENAME COLUMN key1 TO renamed_key1; --{serverError ALTER_OF_COLUMN_IS_FORBIDDEN} + +ALTER TABLE table_for_rename_pk RENAME COLUMN key3 TO renamed_key3; --{serverError ALTER_OF_COLUMN_IS_FORBIDDEN} + +ALTER TABLE table_for_rename_pk RENAME COLUMN key2 TO renamed_key2; --{serverError ALTER_OF_COLUMN_IS_FORBIDDEN} + +DROP TABLE IF EXISTS table_for_rename_pk; + +DROP TABLE IF EXISTS table_for_rename_with_primary_key; + +CREATE TABLE table_for_rename_with_primary_key +( + date Date, + key1 UInt64, + key2 UInt64, + key3 UInt64, + value1 String, + value2 String, + INDEX idx (value1) TYPE set(1) GRANULARITY 1 +) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_01213/table_for_rename_pk2', '1') +PARTITION BY date +ORDER BY (key1, key2, key3) +PRIMARY KEY (key1, key2); + +INSERT INTO table_for_rename_with_primary_key SELECT toDate('2019-10-01') + number % 3, number, number, number, toString(number), toString(number) from numbers(9); + +ALTER TABLE table_for_rename_with_primary_key RENAME COLUMN key1 TO renamed_key1; --{serverError ALTER_OF_COLUMN_IS_FORBIDDEN} + +ALTER TABLE table_for_rename_with_primary_key RENAME COLUMN key2 TO renamed_key2; --{serverError ALTER_OF_COLUMN_IS_FORBIDDEN} + +ALTER TABLE table_for_rename_with_primary_key RENAME COLUMN key3 TO renamed_key3; --{serverError ALTER_OF_COLUMN_IS_FORBIDDEN} + +DROP TABLE IF EXISTS table_for_rename_with_primary_key; diff --git a/parser/testdata/01213_alter_rename_with_default_zookeeper_long/ast.json b/parser/testdata/01213_alter_rename_with_default_zookeeper_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01213_alter_rename_with_default_zookeeper_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01213_alter_rename_with_default_zookeeper_long/metadata.json b/parser/testdata/01213_alter_rename_with_default_zookeeper_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01213_alter_rename_with_default_zookeeper_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01213_alter_rename_with_default_zookeeper_long/query.sql b/parser/testdata/01213_alter_rename_with_default_zookeeper_long/query.sql new file mode 100644 index 000000000..c5c1f2ebf --- /dev/null +++ b/parser/testdata/01213_alter_rename_with_default_zookeeper_long/query.sql @@ -0,0 +1,65 @@ +-- Tags: long, zookeeper + +DROP TABLE IF EXISTS table_rename_with_default; + +CREATE TABLE table_rename_with_default +( + date Date, + key UInt64, + value1 String, + value2 String DEFAULT concat('Hello ', value1), + value3 String ALIAS concat('Word ', value1) +) +ENGINE = MergeTree() +PARTITION BY date +ORDER BY key; + +INSERT INTO table_rename_with_default (date, key, value1) SELECT toDateTime(toDate('2019-10-01') + number % 3, 'Asia/Istanbul'), number, toString(number) from numbers(9); + +SELECT * FROM table_rename_with_default WHERE key = 1 FORMAT TSVWithNames; + +SHOW CREATE TABLE table_rename_with_default; + +ALTER TABLE table_rename_with_default RENAME COLUMN value1 TO renamed_value1; + +SELECT * FROM table_rename_with_default WHERE key = 1 FORMAT TSVWithNames; + +SHOW CREATE TABLE table_rename_with_default; + +SELECT value2 FROM table_rename_with_default WHERE key = 1; +SELECT value3 FROM table_rename_with_default WHERE key = 1; + +DROP TABLE IF EXISTS table_rename_with_default; + +DROP TABLE IF EXISTS table_rename_with_ttl; + +CREATE TABLE table_rename_with_ttl +( + date1 Date, + date2 Date, + value1 String, + value2 String TTL date1 + INTERVAL 500 MONTH +) +ENGINE = ReplicatedMergeTree('/clickhouse/{database}/test_01213/table_rename_with_ttl', '1') +ORDER BY tuple() +TTL date2 + INTERVAL 500 MONTH; + +INSERT INTO table_rename_with_ttl SELECT toDateTime(toDate('2019-10-01') + number % 3, 'Asia/Istanbul'), toDateTime(toDate('2018-10-01') + number % 3, 'Asia/Istanbul'), toString(number), toString(number) from numbers(9); + +SELECT * FROM table_rename_with_ttl WHERE value1 = '1' FORMAT TSVWithNames; + +SHOW CREATE TABLE table_rename_with_ttl; + +ALTER TABLE table_rename_with_ttl RENAME COLUMN date1 TO renamed_date1; + +SELECT * FROM table_rename_with_ttl WHERE value1 = '1' FORMAT TSVWithNames; + +SHOW CREATE TABLE table_rename_with_ttl; + +ALTER TABLE table_rename_with_ttl RENAME COLUMN date2 TO renamed_date2; + +SELECT * FROM table_rename_with_ttl WHERE value1 = '1' FORMAT TSVWithNames; + +SHOW CREATE TABLE table_rename_with_ttl; + +DROP TABLE IF EXISTS table_rename_with_ttl; diff --git a/parser/testdata/01213_alter_table_rename_nested/ast.json b/parser/testdata/01213_alter_table_rename_nested/ast.json new file mode 100644 index 000000000..12f9d530a --- /dev/null +++ b/parser/testdata/01213_alter_table_rename_nested/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery table_for_rename_nested (children 1)" + }, + { + "explain": " Identifier table_for_rename_nested" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001049928, + "rows_read": 2, + "bytes_read": 98 + } +} diff --git a/parser/testdata/01213_alter_table_rename_nested/metadata.json b/parser/testdata/01213_alter_table_rename_nested/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01213_alter_table_rename_nested/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01213_alter_table_rename_nested/query.sql b/parser/testdata/01213_alter_table_rename_nested/query.sql new file mode 100644 index 000000000..5efc06581 --- /dev/null +++ b/parser/testdata/01213_alter_table_rename_nested/query.sql @@ -0,0 +1,38 @@ +DROP TABLE IF EXISTS table_for_rename_nested; +CREATE TABLE table_for_rename_nested +( + date Date, + key UInt64, + n Nested(x UInt32, y String), + value1 String +) +ENGINE = MergeTree() +PARTITION BY date +ORDER BY key; + +INSERT INTO table_for_rename_nested (date, key, n.x, n.y, value1) SELECT toDate('2019-10-01'), number, [number + 1, number + 2, number + 3], ['a', 'b', 'c'], toString(number) FROM numbers(10); + +SELECT n.x FROM table_for_rename_nested WHERE key = 7; +SELECT n.y FROM table_for_rename_nested WHERE key = 7; + +SHOW CREATE TABLE table_for_rename_nested; + +ALTER TABLE table_for_rename_nested RENAME COLUMN n.x TO n.renamed_x; +ALTER TABLE table_for_rename_nested RENAME COLUMN n.y TO n.renamed_y; + +SHOW CREATE TABLE table_for_rename_nested; + +SELECT key, n.renamed_x FROM table_for_rename_nested WHERE key = 7; +SELECT key, n.renamed_y FROM table_for_rename_nested WHERE key = 7; + +ALTER TABLE table_for_rename_nested RENAME COLUMN n.renamed_x TO not_nested_x; --{serverError BAD_ARGUMENTS} + +ALTER TABLE table_for_rename_nested RENAME COLUMN n.renamed_x TO q.renamed_x; --{serverError BAD_ARGUMENTS} + +ALTER TABLE table_for_rename_nested RENAME COLUMN value1 TO q.renamed_x; --{serverError BAD_ARGUMENTS} + +-- Currently not implemented +ALTER TABLE table_for_rename_nested RENAME COLUMN n TO renamed_n; --{serverError NOT_IMPLEMENTED} + +DROP TABLE IF EXISTS table_for_rename_nested; + diff --git a/parser/testdata/01213_optimize_skip_unused_shards_DISTINCT/ast.json b/parser/testdata/01213_optimize_skip_unused_shards_DISTINCT/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01213_optimize_skip_unused_shards_DISTINCT/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01213_optimize_skip_unused_shards_DISTINCT/metadata.json b/parser/testdata/01213_optimize_skip_unused_shards_DISTINCT/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01213_optimize_skip_unused_shards_DISTINCT/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01213_optimize_skip_unused_shards_DISTINCT/query.sql b/parser/testdata/01213_optimize_skip_unused_shards_DISTINCT/query.sql new file mode 100644 index 000000000..e2de2de07 --- /dev/null +++ b/parser/testdata/01213_optimize_skip_unused_shards_DISTINCT/query.sql @@ -0,0 +1,28 @@ +-- Tags: shard + +CREATE TABLE IF NOT EXISTS local_01213 (id Int) ENGINE = MergeTree ORDER BY tuple(); +CREATE TABLE IF NOT EXISTS dist_01213 AS local_01213 ENGINE = Distributed(test_cluster_two_shards_localhost, currentDatabase(), local_01213, id); + +-- at least two parts +INSERT INTO local_01213 SELECT toString(number) FROM numbers(2); +INSERT INTO local_01213 SELECT toString(number) FROM numbers(2); + +-- check that without merge we will have two rows +SELECT 'distributed_group_by_no_merge'; +SELECT DISTINCT id FROM dist_01213 WHERE id = 1 SETTINGS distributed_group_by_no_merge=1; +-- check that with merge there will be only one +SELECT 'optimize_skip_unused_shards'; +SELECT DISTINCT id FROM dist_01213 WHERE id = 1 SETTINGS optimize_skip_unused_shards=1; +-- check that querying all shards is ok +SELECT 'optimize_skip_unused_shards lack of WHERE (optimize_distributed_group_by_sharding_key=0)'; +SELECT DISTINCT id FROM dist_01213 SETTINGS optimize_skip_unused_shards=1, optimize_distributed_group_by_sharding_key=0; +-- with optimize_distributed_group_by_sharding_key=1 there will be 4 rows, +-- since DISTINCT will be done on each shard separatelly, and initiator will +-- not do anything (since we use optimize_skip_unused_shards=1 that must +-- guarantee that the data had been INSERTed according to sharding key, +-- which is not our case, since we use one local table). +SELECT 'optimize_skip_unused_shards lack of WHERE (optimize_distributed_group_by_sharding_key=1)'; +SELECT DISTINCT id FROM dist_01213 SETTINGS optimize_skip_unused_shards=1, optimize_distributed_group_by_sharding_key=1; + +DROP TABLE local_01213; +DROP TABLE dist_01213; diff --git a/parser/testdata/01213_point_in_Myanmar/ast.json b/parser/testdata/01213_point_in_Myanmar/ast.json new file mode 100644 index 000000000..3bef6379d --- /dev/null +++ b/parser/testdata/01213_point_in_Myanmar/ast.json @@ -0,0 +1,379 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function pointInPolygon (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Tuple_(Float64_97.66905, Float64_16.5026053)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 110)" + }, + { + "explain": " Literal Tuple_(Float64_97.66905, Float64_16.5026053)" + }, + { + "explain": " Literal Tuple_(Float64_97.667878, Float64_16.4979175)" + }, + { + "explain": " Literal Tuple_(Float64_97.661433, Float64_16.4917645)" + }, + { + "explain": " Literal Tuple_(Float64_97.656745, Float64_16.4859047)" + }, + { + "explain": " Literal Tuple_(Float64_97.656745, Float64_16.4818029)" + }, + { + "explain": " Literal Tuple_(Float64_97.658796, Float64_16.4785801)" + }, + { + "explain": " Literal Tuple_(Float64_97.665535, Float64_16.4753572)" + }, + { + "explain": " Literal Tuple_(Float64_97.670808, Float64_16.4730135)" + }, + { + "explain": " Literal Tuple_(Float64_97.676082, Float64_16.4697907)" + }, + { + "explain": " Literal Tuple_(Float64_97.680477, Float64_16.4677398)" + }, + { + "explain": " Literal Tuple_(Float64_97.68575, Float64_16.4686189)" + }, + { + "explain": " Literal Tuple_(Float64_97.689559, Float64_16.4727207)" + }, + { + "explain": " Literal Tuple_(Float64_97.69454, Float64_16.4744788)" + }, + { + "explain": " Literal Tuple_(Float64_97.698055, Float64_16.4747718)" + }, + { + "explain": " Literal Tuple_(Float64_97.702157, Float64_16.4724279)" + }, + { + "explain": " Literal Tuple_(Float64_97.703036, Float64_16.4683261)" + }, + { + "explain": " Literal Tuple_(Float64_97.703036, Float64_16.4633453)" + }, + { + "explain": " Literal Tuple_(Float64_97.702451, Float64_16.4594354)" + }, + { + "explain": " Literal Tuple_(Float64_97.699533, Float64_16.4539205)" + }, + { + "explain": " Literal Tuple_(Float64_97.699106, Float64_16.4521467)" + }, + { + "explain": " Literal Tuple_(Float64_97.699896, Float64_16.4500714)" + }, + { + "explain": " Literal Tuple_(Float64_97.701852, Float64_16.4474887)" + }, + { + "explain": " Literal Tuple_(Float64_97.701272, Float64_16.4460233)" + }, + { + "explain": " Literal Tuple_(Float64_97.699896, Float64_16.4439216)" + }, + { + "explain": " Literal Tuple_(Float64_97.699857, Float64_16.4425297)" + }, + { + "explain": " Literal Tuple_(Float64_97.700705, Float64_16.4417585)" + }, + { + "explain": " Literal Tuple_(Float64_97.699266, Float64_16.4404319)" + }, + { + "explain": " Literal Tuple_(Float64_97.696817, Float64_16.439585)" + }, + { + "explain": " Literal Tuple_(Float64_97.69468, Float64_16.4391501)" + }, + { + "explain": " Literal Tuple_(Float64_97.690854, Float64_16.439294)" + }, + { + "explain": " Literal Tuple_(Float64_97.686571, Float64_16.4407665)" + }, + { + "explain": " Literal Tuple_(Float64_97.683728, Float64_16.4428458)" + }, + { + "explain": " Literal Tuple_(Float64_97.680647, Float64_16.444719)" + }, + { + "explain": " Literal Tuple_(Float64_97.678369, Float64_16.445322)" + }, + { + "explain": " Literal Tuple_(Float64_97.675195, Float64_16.4448526)" + }, + { + "explain": " Literal Tuple_(Float64_97.672627, Float64_16.4435941)" + }, + { + "explain": " Literal Tuple_(Float64_97.670568, Float64_16.4419727)" + }, + { + "explain": " Literal Tuple_(Float64_97.667276, Float64_16.4410039)" + }, + { + "explain": " Literal Tuple_(Float64_97.666215, Float64_16.439402)" + }, + { + "explain": " Literal Tuple_(Float64_97.66599, Float64_16.43656)" + }, + { + "explain": " Literal Tuple_(Float64_97.664579, Float64_16.435632)" + }, + { + "explain": " Literal Tuple_(Float64_97.66195, Float64_16.4344612)" + }, + { + "explain": " Literal Tuple_(Float64_97.659174, Float64_16.4324549)" + }, + { + "explain": " Literal Tuple_(Float64_97.658693, Float64_16.4290256)" + }, + { + "explain": " Literal Tuple_(Float64_97.659289, Float64_16.4246502)" + }, + { + "explain": " Literal Tuple_(Float64_97.660882, Float64_16.422609)" + }, + { + "explain": " Literal Tuple_(Float64_97.663533, Float64_16.4225057)" + }, + { + "explain": " Literal Tuple_(Float64_97.666402, Float64_16.4210711)" + }, + { + "explain": " Literal Tuple_(Float64_97.67148, Float64_16.4170395)" + }, + { + "explain": " Literal Tuple_(Float64_97.673433, Float64_16.4146478)" + }, + { + "explain": " Literal Tuple_(Float64_97.674184, Float64_16.4124121)" + }, + { + "explain": " Literal Tuple_(Float64_97.6742, Float64_16.4085257)" + }, + { + "explain": " Literal Tuple_(Float64_97.674894, Float64_16.4055148)" + }, + { + "explain": " Literal Tuple_(Float64_97.675906, Float64_16.4019452)" + }, + { + "explain": " Literal Tuple_(Float64_97.675287, Float64_16.3996593)" + }, + { + "explain": " Literal Tuple_(Float64_97.675062, Float64_16.3963334)" + }, + { + "explain": " Literal Tuple_(Float64_97.675798, Float64_16.3936434)" + }, + { + "explain": " Literal Tuple_(Float64_97.675676, Float64_16.3909321)" + }, + { + "explain": " Literal Tuple_(Float64_97.67508, Float64_16.386655)" + }, + { + "explain": " Literal Tuple_(Float64_97.679839, Float64_16.386241)" + }, + { + "explain": " Literal Tuple_(Float64_97.689403, Float64_16.3726191)" + }, + { + "explain": " Literal Tuple_(Float64_97.692011, Float64_16.372909)" + }, + { + "explain": " Literal Tuple_(Float64_97.696359, Float64_16.3679819)" + }, + { + "explain": " Literal Tuple_(Float64_97.699866, Float64_16.360968)" + }, + { + "explain": " Literal Tuple_(Float64_97.697233, Float64_16.3609438)" + }, + { + "explain": " Literal Tuple_(Float64_97.693077, Float64_16.3596272)" + }, + { + "explain": " Literal Tuple_(Float64_97.686631, Float64_16.3584552)" + }, + { + "explain": " Literal Tuple_(Float64_97.68165, Float64_16.3558182)" + }, + { + "explain": " Literal Tuple_(Float64_97.674619, Float64_16.3496653)" + }, + { + "explain": " Literal Tuple_(Float64_97.667588, Float64_16.3482003)" + }, + { + "explain": " Literal Tuple_(Float64_97.664072, Float64_16.3502511)" + }, + { + "explain": " Literal Tuple_(Float64_97.659384, Float64_16.3540599)" + }, + { + "explain": " Literal Tuple_(Float64_97.652353, Float64_16.3578686)" + }, + { + "explain": " Literal Tuple_(Float64_97.649716, Float64_16.3625565)" + }, + { + "explain": " Literal Tuple_(Float64_97.650595, Float64_16.3672443)" + }, + { + "explain": " Literal Tuple_(Float64_97.65206, Float64_16.3701742)" + }, + { + "explain": " Literal Tuple_(Float64_97.65206, Float64_16.3733971)" + }, + { + "explain": " Literal Tuple_(Float64_97.651181, Float64_16.3760339)" + }, + { + "explain": " Literal Tuple_(Float64_97.646493, Float64_16.3763268)" + }, + { + "explain": " Literal Tuple_(Float64_97.6462, Float64_16.3801357)" + }, + { + "explain": " Literal Tuple_(Float64_97.646786, Float64_16.3851165)" + }, + { + "explain": " Literal Tuple_(Float64_97.643563, Float64_16.3883393)" + }, + { + "explain": " Literal Tuple_(Float64_97.638583, Float64_16.3889252)" + }, + { + "explain": " Literal Tuple_(Float64_97.636239, Float64_16.392148)" + }, + { + "explain": " Literal Tuple_(Float64_97.630379, Float64_16.3933199)" + }, + { + "explain": " Literal Tuple_(Float64_97.629132, Float64_16.3964903)" + }, + { + "explain": " Literal Tuple_(Float64_97.624347, Float64_16.4056104)" + }, + { + "explain": " Literal Tuple_(Float64_97.615377, Float64_16.4165245)" + }, + { + "explain": " Literal Tuple_(Float64_97.614779, Float64_16.4229534)" + }, + { + "explain": " Literal Tuple_(Float64_97.611938, Float64_16.4335685)" + }, + { + "explain": " Literal Tuple_(Float64_97.613882, Float64_16.4410439)" + }, + { + "explain": " Literal Tuple_(Float64_97.619713, Float64_16.4461272)" + }, + { + "explain": " Literal Tuple_(Float64_97.62375, Float64_16.4542007)" + }, + { + "explain": " Literal Tuple_(Float64_97.62345, Float64_16.4640683)" + }, + { + "explain": " Literal Tuple_(Float64_97.618965, Float64_16.4793181)" + }, + { + "explain": " Literal Tuple_(Float64_97.617321, Float64_16.4884382)" + }, + { + "explain": " Literal Tuple_(Float64_97.617747, Float64_16.4985751)" + }, + { + "explain": " Literal Tuple_(Float64_97.623301, Float64_16.5026416)" + }, + { + "explain": " Literal Tuple_(Float64_97.629303, Float64_16.5016624)" + }, + { + "explain": " Literal Tuple_(Float64_97.63272, Float64_16.4986048)" + }, + { + "explain": " Literal Tuple_(Float64_97.640862, Float64_16.498226)" + }, + { + "explain": " Literal Tuple_(Float64_97.647134, Float64_16.5006382)" + }, + { + "explain": " Literal Tuple_(Float64_97.650873, Float64_16.5051263)" + }, + { + "explain": " Literal Tuple_(Float64_97.654987, Float64_16.5089598)" + }, + { + "explain": " Literal Tuple_(Float64_97.65639, Float64_16.5118583)" + }, + { + "explain": " Literal Tuple_(Float64_97.658166, Float64_16.5160658)" + }, + { + "explain": " Literal Tuple_(Float64_97.660395, Float64_16.5197566)" + }, + { + "explain": " Literal Tuple_(Float64_97.66612, Float64_16.5140318)" + }, + { + "explain": " Literal Tuple_(Float64_97.668757, Float64_16.507879)" + }, + { + "explain": " Literal Tuple_(Float64_97.66905, Float64_16.5026053)" + } + ], + + "rows": 119, + + "statistics": + { + "elapsed": 0.002289116, + "rows_read": 119, + "bytes_read": 7941 + } +} diff --git a/parser/testdata/01213_point_in_Myanmar/metadata.json b/parser/testdata/01213_point_in_Myanmar/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01213_point_in_Myanmar/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01213_point_in_Myanmar/query.sql b/parser/testdata/01213_point_in_Myanmar/query.sql new file mode 100644 index 000000000..fe0a3c37e --- /dev/null +++ b/parser/testdata/01213_point_in_Myanmar/query.sql @@ -0,0 +1,2 @@ +SELECT pointInPolygon((97.66905, 16.5026053), [(97.66905, 16.5026053), (97.667878, 16.4979175), (97.661433, 16.4917645), (97.656745, 16.4859047), (97.656745, 16.4818029), (97.658796, 16.4785801), (97.665535, 16.4753572), (97.670808, 16.4730135), (97.676082, 16.4697907), (97.680477, 16.4677398), (97.68575, 16.4686189), (97.689559, 16.4727207), (97.69454, 16.4744788), (97.698055, 16.4747718), (97.702157, 16.4724279), (97.703036, 16.4683261), (97.703036, 16.4633453), (97.702451, 16.4594354), (97.699533, 16.4539205), (97.699106, 16.4521467), (97.699896, 16.4500714), (97.701852, 16.4474887), (97.701272, 16.4460233), (97.699896, 16.4439216), (97.699857, 16.4425297), (97.700705, 16.4417585), (97.699266, 16.4404319), (97.696817, 16.439585), (97.69468, 16.4391501), (97.690854, 16.439294), (97.686571, 16.4407665), (97.683728, 16.4428458), (97.680647, 16.444719), (97.678369, 16.445322), (97.675195, 16.4448526), (97.672627, 16.4435941), (97.670568, 16.4419727), (97.667276, 16.4410039), (97.666215, 16.439402), (97.66599, 16.43656), (97.664579, 16.435632), (97.66195, 16.4344612), (97.659174, 16.4324549), (97.658693, 16.4290256), (97.659289, 16.4246502), (97.660882, 16.422609), (97.663533, 16.4225057), (97.666402, 16.4210711), (97.67148, 16.4170395), (97.673433, 16.4146478), (97.674184, 16.4124121), (97.6742, 16.4085257), (97.674894, 16.4055148), (97.675906, 16.4019452), (97.675287, 16.3996593), (97.675062, 16.3963334), (97.675798, 16.3936434), (97.675676, 16.3909321), (97.67508, 16.386655), (97.679839, 16.386241), (97.689403, 16.3726191), (97.692011, 16.372909), (97.696359, 16.3679819), (97.699866, 16.360968), (97.697233, 16.3609438), (97.693077, 16.3596272), (97.686631, 16.3584552), (97.68165, 16.3558182), (97.674619, 16.3496653), (97.667588, 16.3482003), (97.664072, 16.3502511), (97.659384, 16.3540599), (97.652353, 16.3578686), (97.649716, 16.3625565), (97.650595, 16.3672443), (97.65206, 16.3701742), (97.65206, 16.3733971), (97.651181, 16.3760339), (97.646493, 16.3763268), (97.6462, 16.3801357), (97.646786, 16.3851165), (97.643563, 16.3883393), (97.638583, 16.3889252), (97.636239, 16.392148), (97.630379, 16.3933199), (97.629132, 16.3964903), (97.624347, 16.4056104), (97.615377, 16.4165245), (97.614779, 16.4229534), (97.611938, 16.4335685), (97.613882, 16.4410439), (97.619713, 16.4461272), (97.62375, 16.4542007), (97.62345, 16.4640683), (97.618965, 16.4793181), (97.617321, 16.4884382), (97.617747, 16.4985751), (97.623301, 16.5026416), (97.629303, 16.5016624), (97.63272, 16.4986048), (97.640862, 16.498226), (97.647134, 16.5006382), (97.650873, 16.5051263), (97.654987, 16.5089598), (97.65639, 16.5118583), (97.658166, 16.5160658), (97.660395, 16.5197566), (97.66612, 16.5140318), (97.668757, 16.507879), (97.66905, 16.5026053)]); +SELECT pointInPolygon((97.641933, 16.5076538), [(97.66905, 16.5026053), (97.667878, 16.4979175), (97.661433, 16.4917645), (97.656745, 16.4859047), (97.656745, 16.4818029), (97.658796, 16.4785801), (97.665535, 16.4753572), (97.670808, 16.4730135), (97.676082, 16.4697907), (97.680477, 16.4677398), (97.68575, 16.4686189), (97.689559, 16.4727207), (97.69454, 16.4744788), (97.698055, 16.4747718), (97.702157, 16.4724279), (97.703036, 16.4683261), (97.703036, 16.4633453), (97.702451, 16.4594354), (97.699533, 16.4539205), (97.699106, 16.4521467), (97.699896, 16.4500714), (97.701852, 16.4474887), (97.701272, 16.4460233), (97.699896, 16.4439216), (97.699857, 16.4425297), (97.700705, 16.4417585), (97.699266, 16.4404319), (97.696817, 16.439585), (97.69468, 16.4391501), (97.690854, 16.439294), (97.686571, 16.4407665), (97.683728, 16.4428458), (97.680647, 16.444719), (97.678369, 16.445322), (97.675195, 16.4448526), (97.672627, 16.4435941), (97.670568, 16.4419727), (97.667276, 16.4410039), (97.666215, 16.439402), (97.66599, 16.43656), (97.664579, 16.435632), (97.66195, 16.4344612), (97.659174, 16.4324549), (97.658693, 16.4290256), (97.659289, 16.4246502), (97.660882, 16.422609), (97.663533, 16.4225057), (97.666402, 16.4210711), (97.67148, 16.4170395), (97.673433, 16.4146478), (97.674184, 16.4124121), (97.6742, 16.4085257), (97.674894, 16.4055148), (97.675906, 16.4019452), (97.675287, 16.3996593), (97.675062, 16.3963334), (97.675798, 16.3936434), (97.675676, 16.3909321), (97.67508, 16.386655), (97.679839, 16.386241), (97.689403, 16.3726191), (97.692011, 16.372909), (97.696359, 16.3679819), (97.699866, 16.360968), (97.697233, 16.3609438), (97.693077, 16.3596272), (97.686631, 16.3584552), (97.68165, 16.3558182), (97.674619, 16.3496653), (97.667588, 16.3482003), (97.664072, 16.3502511), (97.659384, 16.3540599), (97.652353, 16.3578686), (97.649716, 16.3625565), (97.650595, 16.3672443), (97.65206, 16.3701742), (97.65206, 16.3733971), (97.651181, 16.3760339), (97.646493, 16.3763268), (97.6462, 16.3801357), (97.646786, 16.3851165), (97.643563, 16.3883393), (97.638583, 16.3889252), (97.636239, 16.392148), (97.630379, 16.3933199), (97.629132, 16.3964903), (97.624347, 16.4056104), (97.615377, 16.4165245), (97.614779, 16.4229534), (97.611938, 16.4335685), (97.613882, 16.4410439), (97.619713, 16.4461272), (97.62375, 16.4542007), (97.62345, 16.4640683), (97.618965, 16.4793181), (97.617321, 16.4884382), (97.617747, 16.4985751), (97.623301, 16.5026416), (97.629303, 16.5016624), (97.63272, 16.4986048), (97.640862, 16.498226), (97.647134, 16.5006382), (97.650873, 16.5051263), (97.654987, 16.5089598), (97.65639, 16.5118583), (97.658166, 16.5160658), (97.660395, 16.5197566), (97.66612, 16.5140318), (97.668757, 16.507879), (97.66905, 16.5026053)], [(97.666491, 16.5599384), (97.665077, 16.5589283), (97.662417, 16.5607013), (97.659315, 16.5700096), (97.655104, 16.5821991), (97.654882, 16.5855235), (97.654593, 16.5931971), (97.659381, 16.5957754), (97.669927, 16.5995844), (97.683111, 16.6022215), (97.695123, 16.6028077), (97.704206, 16.5984131), (97.704499, 16.5825917), (97.70007, 16.5731793), (97.698976, 16.572997), (97.697211, 16.5717833), (97.692114, 16.5691237), (97.684358, 16.5691235), (97.675936, 16.567572), (97.66818, 16.5611446), (97.666491, 16.5599384)], [(97.653232, 16.574263), (97.652445, 16.5679244), (97.655949, 16.5683449), (97.659594, 16.5627383), (97.659734, 16.5585335), (97.662257, 16.5550293), (97.660855, 16.5512449), (97.658613, 16.5490023), (97.659173, 16.544517), (97.654407, 16.5408727), (97.641933, 16.5363874), (97.63086, 16.5303604), (97.628057, 16.5312014), (97.625954, 16.5415736), (97.63072, 16.5613367), (97.638569, 16.5820811), (97.645017, 16.5892294), (97.649743, 16.5887155), (97.653232, 16.574263)], [(97.625696, 16.5488739), (97.623579, 16.5396268), (97.620589, 16.5423678), (97.616353, 16.5530826), (97.611619, 16.5637974), (97.61112, 16.5725187), (97.613339, 16.5792777), (97.635042, 16.5874696), (97.64152, 16.5981844), (97.643015, 16.605909), (97.645756, 16.6066565), (97.650989, 16.6034172), (97.644012, 16.5984335), (97.64219, 16.5877556), (97.636038, 16.5804926), (97.63252, 16.570307), (97.628314, 16.5603089), (97.625696, 16.5488739)], [(97.607902, 16.3798949), (97.604911, 16.3719709), (97.602519, 16.3749612), (97.601323, 16.3955933), (97.604014, 16.406059), (97.604762, 16.4084511), (97.607896, 16.4081673), (97.609397, 16.397537), (97.609397, 16.3882674), (97.607902, 16.3798949)], [(97.64902, 16.5107163), (97.645437, 16.5073734), (97.641933, 16.5076538), (97.641933, 16.5108776), (97.645717, 16.5160636), (97.651112, 16.5211243), (97.655721, 16.5238328), (97.656392, 16.5184349), (97.654359, 16.515696), (97.64902, 16.5107163)]); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/01214_point_in_Mecca/ast.json b/parser/testdata/01214_point_in_Mecca/ast.json new file mode 100644 index 000000000..ef53b1bd8 --- /dev/null +++ b/parser/testdata/01214_point_in_Mecca/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'Outer part of Mecca'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001184562, + "rows_read": 5, + "bytes_read": 190 + } +} diff --git a/parser/testdata/01214_point_in_Mecca/metadata.json b/parser/testdata/01214_point_in_Mecca/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01214_point_in_Mecca/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01214_point_in_Mecca/query.sql b/parser/testdata/01214_point_in_Mecca/query.sql new file mode 100644 index 000000000..b895fc3af --- /dev/null +++ b/parser/testdata/01214_point_in_Mecca/query.sql @@ -0,0 +1,205 @@ +SELECT 'Outer part of Mecca'; +SELECT pointInPolygon((39.840202, 21.451471), [(39.90553, 21.38668),(39.91034, 21.38608),(39.91834, 21.38048),(39.93078, 21.3627),(39.94141, 21.36278),(39.94753, 21.36075),(39.94986, 21.35894),(39.95349, 21.3533),(39.97833, 21.3815),(39.98132, 21.38231),(39.98851, 21.38151),(39.99076, 21.37747),(39.98987, 21.36908),(39.98791, 21.36332),(39.99543, 21.35687),(39.99827, 21.34722),(39.99784, 21.34271),(39.99632, 21.33955),(39.99322, 21.3366),(39.98908, 21.33496),(39.9692, 21.3331),(39.95841, 21.3388),(39.95109, 21.34412),(39.95026, 21.34825),(39.95203, 21.35168),(39.94753, 21.35845),(39.94165, 21.36077),(39.93272, 21.36009),(39.92969, 21.36103),(39.91928, 21.37669),(39.91374, 21.3816),(39.91056, 21.38296),(39.90839, 21.38361),(39.90059, 21.38257),(39.8978, 21.37712),(39.90051, 21.37335),(39.90033, 21.37195),(39.89559, 21.37533),(39.89099, 21.36937),(39.89101, 21.3661),(39.89465, 21.364),(39.92418, 21.35725),(39.92838, 21.35433),(39.94394, 21.33915),(39.96711, 21.32785),(39.97437, 21.32734),(39.99523, 21.33055),(40.01271, 21.3293),(40.01345, 21.3276),(40.00731, 21.32689),(39.99189, 21.32817),(39.97264, 21.3251),(39.96216, 21.32725),(39.95825, 21.32598),(39.95783, 21.32734),(39.96017, 21.32834),(39.94652, 21.33514),(39.94578, 21.33237),(39.94438, 21.33259),(39.94454, 21.33563),(39.92448, 21.3545),(39.92007, 21.3563),(39.89586, 21.3615),(39.86239, 21.35659),(39.85241, 21.35319),(39.85183, 21.35189),(39.84187, 21.3498),(39.83475, 21.35001),(39.82272, 21.35322),(39.80957, 21.34986),(39.80645, 21.34645),(39.80654, 21.34104),(39.82207, 21.29116),(39.82732, 21.26685),(39.82657, 21.22894),(39.82468, 21.22761),(39.82364, 21.22857),(39.82459, 21.22961),(39.82535, 21.26649),(39.82016, 21.29057),(39.81723, 21.29965),(39.81585, 21.30012),(39.81652, 21.30158),(39.81475, 21.30815),(39.80378, 21.34492),(39.8023, 21.34648),(39.79042, 21.34584),(39.78385, 21.34687),(39.77227, 21.34595),(39.7601, 21.34279),(39.73947, 21.34141),(39.71051, 21.34288),(39.70233, 21.34041),(39.68839, 21.33943),(39.65964, 21.33189),(39.64627, 21.3344),(39.64733, 21.33592),(39.65598, 21.33404),(39.66095, 21.33402),(39.68789, 21.34136),(39.70198, 21.34238),(39.71031, 21.34487),(39.74208, 21.34353),(39.76109, 21.34495),(39.77363, 21.34845),(39.77446, 21.35039),(39.76342, 21.37977),(39.75978, 21.39951),(39.75655, 21.40491),(39.73768, 21.39607),(39.72646, 21.38795),(39.71285, 21.3969),(39.69867, 21.37979),(39.66651, 21.36156),(39.6662, 21.36338),(39.69742, 21.38135),(39.7112, 21.39803),(39.70333, 21.40335),(39.70227, 21.40556),(39.70273, 21.40892),(39.71038, 21.41608),(39.71004, 21.42139),(39.68758, 21.414),(39.68099, 21.41398),(39.63179, 21.4366),(39.62927917729339, 21.43855995858338),(39.629299942421596, 21.44105336136311),(39.63273, 21.43836),(39.65768, 21.42753),(39.67404, 21.419),(39.6815, 21.41592),(39.68534, 21.41555),(39.7182, 21.42582),(39.72915, 21.4318),(39.72926, 21.43473),(39.72198, 21.45071),(39.72058, 21.46018),(39.72262, 21.46776),(39.72871, 21.47851),(39.73639, 21.48854),(39.73607, 21.50077),(39.73921, 21.50608),(39.74358, 21.50869),(39.77204, 21.51334),(39.78965, 21.51773),(39.78925, 21.52186),(39.77895, 21.53768),(39.77335, 21.55878),(39.77409, 21.55998),(39.77529, 21.55924),(39.78151, 21.53691),(39.79101, 21.52282),(39.79216, 21.51796),(39.79392, 21.51725),(39.793, 21.51609),(39.79416, 21.49641),(39.79816, 21.47216),(39.8004, 21.46856),(39.80363, 21.4669),(39.80549, 21.46717),(39.80785, 21.46483),(39.8079, 21.45844),(39.80961, 21.45125),(39.81407, 21.45956),(39.8189, 21.46404),(39.82568, 21.4678),(39.82961, 21.47351),(39.83079, 21.47799),(39.84122, 21.47849),(39.84401, 21.47583),(39.84423, 21.47113),(39.84321, 21.46813),(39.84421, 21.46059),(39.85356, 21.44251),(39.85688, 21.44231),(39.86433, 21.45155),(39.86762, 21.45385),(39.87655, 21.45623),(39.88419, 21.46034),(39.89153, 21.46165),(39.8939, 21.46349),(39.89668, 21.46326),(39.9075, 21.47496),(39.91921, 21.48088),(39.9355, 21.48404),(39.94435, 21.48781),(39.96608, 21.48881),(39.96569, 21.49663),(39.95135, 21.53005),(39.94352, 21.56004),(39.94384, 21.56417),(39.94803, 21.56766),(39.95376, 21.56964),(39.95497, 21.56891),(39.9538, 21.56747),(39.94686, 21.56478),(39.94534, 21.56123),(39.95324, 21.53069),(39.96782, 21.49652),(39.96808, 21.48868),(39.98958, 21.49423),(40.00615, 21.4944),(40.01566, 21.50406),(40.03305, 21.5127),(40.0475, 21.52172),(40.05278, 21.52274),(40.06051, 21.52124),(40.05971, 21.51952),(40.05217, 21.52052),(40.04866, 21.51978),(40.03052, 21.50875),(40.01631, 21.50181),(40.01014, 21.49459),(40.00619, 21.49215),(39.98995, 21.49206),(39.96952, 21.48658),(39.94485, 21.48571),(39.93748, 21.48246),(39.95107, 21.45666),(39.97348, 21.46578),(39.97479, 21.46523),(39.97424, 21.46392),(39.95217, 21.45495),(39.95444, 21.45202),(39.97071, 21.44272),(39.97127, 21.44141),(39.97007, 21.44065),(39.95381, 21.44976),(39.95007, 21.45407),(39.94121, 21.45146),(39.93089, 21.45021),(39.92173, 21.4449),(39.9164, 21.44366),(39.91152, 21.44104),(39.90446, 21.44019),(39.90416, 21.43717),(39.9067, 21.43268),(39.90657, 21.42875),(39.91121, 21.40898),(39.91566, 21.40698),(39.91675, 21.40517),(39.91627, 21.40045),(39.91407, 21.39734),(39.91949, 21.39132),(39.92673, 21.38963),(39.93267, 21.39089),(39.93373, 21.38995),(39.93279, 21.38889),(39.92676, 21.38762),(39.91905, 21.38931),(39.91251, 21.39595),(39.91173, 21.40041),(39.90949, 21.39663),(39.91172, 21.3928),(39.91031, 21.39269),(39.90798, 21.39493),(39.90668, 21.39219),(39.90882, 21.38887),(39.90768, 21.38803),(39.90505, 21.39084),(39.90417, 21.38841),(39.90553, 21.38668)]); + +SELECT 'Inner parts of Mecca'; +SELECT pointInPolygon((39.840202, 21.451471), [(39.89317, 21.40473),(39.8952, 21.40371),(39.89574, 21.40332),(39.89629, 21.40285),(39.89739, 21.40172),(39.89925, 21.39916),(39.90055, 21.39718),(39.90145, 21.39501),(39.90173, 21.39491),(39.90189, 21.3948),(39.90252, 21.39409),(39.90289, 21.39384),(39.90396, 21.3934),(39.90422, 21.39338),(39.90436, 21.39343),(39.9044, 21.39349),(39.90404, 21.39369),(39.9037, 21.39396),(39.90351, 21.39416),(39.90319, 21.39463),(39.9027, 21.39571),(39.90267, 21.3959),(39.90268, 21.3961),(39.90281, 21.39646),(39.90293, 21.39662),(39.90307, 21.39675),(39.90324, 21.39685),(39.90362, 21.39695),(39.90382, 21.39694),(39.90418, 21.39681),(39.90447, 21.39655),(39.90505, 21.39544),(39.90523, 21.39531),(39.90547, 21.39528),(39.90556, 21.39529),(39.90572, 21.39537),(39.90592, 21.39552),(39.90662, 21.39645),(39.906, 21.39651),(39.9052, 21.39665),(39.90396, 21.39711),(39.90363, 21.39731),(39.9035, 21.39746),(39.90341, 21.39763),(39.90332, 21.39801),(39.90313, 21.39836),(39.90309, 21.39856),(39.90308, 21.39875),(39.90312, 21.39895),(39.90329, 21.39929),(39.90343, 21.39944),(39.90376, 21.39963),(39.90415, 21.39968),(39.90743, 21.39882),(39.90786, 21.39882),(39.90822, 21.39894),(39.9085, 21.39911),(39.90876, 21.39934),(39.9095, 21.40036),(39.90976, 21.40084),(39.90998, 21.40146),(39.91019, 21.40247),(39.90991, 21.40276),(39.90931, 21.40349),(39.90896, 21.40373),(39.90674, 21.40608),(39.90348, 21.40934),(39.9024, 21.41059),(39.90214, 21.4108),(39.90157, 21.41114),(39.90101, 21.41142),(39.90053, 21.41156),(39.90001, 21.41165),(39.89952, 21.41166),(39.89816, 21.41146),(39.89719, 21.4114),(39.8962, 21.41124),(39.89535, 21.41126),(39.89484, 21.41133),(39.89435, 21.41148),(39.89405, 21.41163),(39.89166, 21.4077),(39.89109, 21.40671),(39.89172, 21.4064),(39.89219, 21.40606),(39.89252, 21.40568),(39.89317, 21.40473)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.89411, 21.37812),(39.89305, 21.37935),(39.89229, 21.38007),(39.89129, 21.38085),(39.88637, 21.38398),(39.88501, 21.38505),(39.88428, 21.38581),(39.87665, 21.38344),(39.87669, 21.38209),(39.87721, 21.38049),(39.87864, 21.37771),(39.8796, 21.37629),(39.88162, 21.37426),(39.88637, 21.36994),(39.88657, 21.36988),(39.88706, 21.37),(39.88876, 21.37081),(39.88896, 21.37101),(39.89013, 21.37233),(39.89156, 21.37426),(39.89332, 21.37678),(39.89411, 21.37812)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.87603, 21.38534),(39.88308, 21.38753),(39.88293, 21.38778),(39.88223, 21.38927),(39.88216, 21.3895),(39.88185, 21.39103),(39.88178, 21.39207),(39.88187, 21.39333),(39.8821, 21.39461),(39.88234, 21.39542),(39.88273, 21.39643),(39.88352, 21.39787),(39.87797, 21.4004),(39.87743, 21.40069),(39.87678, 21.4011),(39.87442, 21.40281),(39.87359, 21.40173),(39.87299, 21.40077),(39.87076, 21.39685),(39.87013, 21.39591),(39.8696, 21.39535),(39.8714, 21.39394),(39.87198, 21.39344),(39.87254, 21.39286),(39.87317, 21.39206),(39.87356, 21.39139),(39.87399, 21.39041),(39.87552, 21.3857),(39.87573, 21.38561),(39.87603, 21.38534)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.8728, 21.40398),(39.86889, 21.40686),(39.86188, 21.41407),(39.8567, 21.41973),(39.85571, 21.42095),(39.85474, 21.4207),(39.85357, 21.42055),(39.8571, 21.4159),(39.8598, 21.4115),(39.86247, 21.40702),(39.86312, 21.40575),(39.86566, 21.39964),(39.86637, 21.39829),(39.86702, 21.39748),(39.86801, 21.39659),(39.86852, 21.39711),(39.86908, 21.39794),(39.87126, 21.40177),(39.87192, 21.40283),(39.8728, 21.40398)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.85537, 21.42292),(39.85579, 21.42569),(39.85581, 21.42611),(39.85559, 21.4266),(39.85335, 21.42856),(39.85159, 21.43039),(39.85001, 21.43114),(39.84921, 21.43092),(39.84693, 21.42992),(39.84558, 21.42953),(39.84158, 21.42902),(39.83488, 21.42846),(39.83661, 21.42511),(39.8371, 21.42353),(39.84406, 21.42267),(39.84527, 21.4226),(39.85309, 21.42252),(39.85438, 21.42267),(39.85537, 21.42292)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.84628, 21.43181),(39.84808, 21.43261),(39.84783, 21.43302),(39.84618, 21.43694),(39.84577, 21.43776),(39.84437, 21.43886),(39.84394, 21.43902),(39.84241, 21.43929),(39.84185, 21.43935),(39.8414, 21.43937),(39.83966, 21.43925),(39.83928, 21.4393),(39.83876, 21.43957),(39.83857, 21.43971),(39.8382, 21.44005),(39.83781, 21.44051),(39.83637, 21.44071),(39.83611, 21.4407),(39.83599, 21.44066),(39.83589, 21.44058),(39.83584, 21.44048),(39.83575, 21.43989),(39.83567, 21.43963),(39.83545, 21.43919),(39.83508, 21.4388),(39.83491, 21.4387),(39.83472, 21.43864),(39.83453, 21.43861),(39.83433, 21.43862),(39.83409, 21.4387),(39.83396, 21.43862),(39.83377, 21.43814),(39.83364, 21.43792),(39.83319, 21.43732),(39.83279, 21.43689),(39.83253, 21.43633),(39.8323, 21.43541),(39.83224, 21.43523),(39.83214, 21.43506),(39.83189, 21.43474),(39.83144, 21.43432),(39.83111, 21.43413),(39.8305, 21.43392),(39.82999, 21.43269),(39.83002, 21.43255),(39.82998, 21.43209),(39.83109, 21.43171),(39.8318, 21.43138),(39.83234, 21.43109),(39.83301, 21.43064),(39.83338, 21.43032),(39.83365, 21.43037),(39.84139, 21.43101),(39.8432, 21.43126),(39.84419, 21.43134),(39.84517, 21.43149),(39.84628, 21.43181)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.83035, 21.42986),(39.8304, 21.42866),(39.83018, 21.42818),(39.83125, 21.42753),(39.83199, 21.42688),(39.83261, 21.42604),(39.83342, 21.42459),(39.83365, 21.42395),(39.83444, 21.42391),(39.83496, 21.4238),(39.83479, 21.42429),(39.83281, 21.42802),(39.83184, 21.42901),(39.83126, 21.4294),(39.83035, 21.42986)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.82819, 21.42907),(39.82814, 21.4304),(39.82488, 21.43059),(39.82445, 21.43066),(39.82337, 21.43101),(39.82191, 21.43183),(39.82049, 21.43295),(39.81955, 21.43393),(39.81912, 21.43421),(39.81892, 21.4343),(39.81862, 21.43437),(39.81829, 21.4344),(39.81774, 21.43434),(39.81632, 21.43374),(39.81706, 21.43236),(39.81724, 21.4319),(39.81748, 21.43143),(39.81812, 21.43039),(39.81845, 21.42992),(39.81919, 21.42907),(39.81933, 21.42884),(39.81964, 21.4281),(39.82006, 21.42826),(39.82103, 21.4285),(39.82135, 21.42873),(39.82154, 21.42879),(39.82284, 21.42907),(39.82412, 21.42923),(39.8253, 21.42933),(39.82659, 21.42933),(39.8273, 21.42926),(39.82819, 21.42907)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.82779, 21.43244),(39.82826, 21.43377),(39.82849, 21.43431),(39.82776, 21.4346),(39.82626, 21.43537),(39.82591, 21.43559),(39.8245, 21.43597),(39.82298, 21.4366),(39.822, 21.43711),(39.82165, 21.43723),(39.82055, 21.43753),(39.81734, 21.43816),(39.81631, 21.43843),(39.81333, 21.43905),(39.81312, 21.43912),(39.81267, 21.43934),(39.81245, 21.4395),(39.81128, 21.4405),(39.81094, 21.44064),(39.81165, 21.43717),(39.81218, 21.43422),(39.81403, 21.43504),(39.81432, 21.4353),(39.8145, 21.43538),(39.81489, 21.43543),(39.81499, 21.43547),(39.81655, 21.4363),(39.81714, 21.4365),(39.81776, 21.43662),(39.81815, 21.43662),(39.81834, 21.43656),(39.81861, 21.43638),(39.81889, 21.43636),(39.81948, 21.43622),(39.82014, 21.43593),(39.82092, 21.4354),(39.82179, 21.43448),(39.82309, 21.43346),(39.82425, 21.43281),(39.82484, 21.43263),(39.82515, 21.43258),(39.82779, 21.43244)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.8207, 21.42636),(39.82125, 21.42508),(39.82206, 21.42403),(39.82275, 21.42279),(39.82346, 21.42235),(39.82418, 21.42178),(39.82499, 21.42142),(39.8265, 21.42122),(39.83136, 21.42199),(39.83197, 21.42264),(39.83163, 21.4237),(39.8309, 21.425),(39.83051, 21.42553),(39.83031, 21.42574),(39.82948, 21.42628),(39.828, 21.42705),(39.82644, 21.42733),(39.8243, 21.42724),(39.82267, 21.42698),(39.82214, 21.42669),(39.8207, 21.42636)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.82102, 21.42106),(39.82062, 21.42128),(39.82017, 21.42162),(39.8194, 21.42239),(39.81905, 21.4228),(39.81802, 21.42299),(39.8179, 21.42265),(39.81779, 21.42189),(39.8176, 21.42155),(39.81729, 21.42131),(39.81711, 21.42124),(39.81692, 21.4212),(39.81653, 21.42125),(39.81619, 21.42144),(39.81595, 21.42175),(39.81588, 21.42193),(39.81584, 21.42212),(39.81585, 21.42232),(39.81597, 21.42319),(39.81504, 21.42328),(39.81423, 21.42329),(39.80999, 21.42266),(39.80755, 21.42223),(39.80737, 21.42203),(39.80706, 21.42179),(39.80695, 21.42166),(39.80685, 21.42142),(39.80686, 21.42123),(39.80695, 21.42105),(39.80755, 21.42041),(39.80768, 21.42022),(39.80789, 21.41962),(39.80793, 21.41935),(39.80801, 21.41519),(39.80809, 21.41439),(39.80827, 21.41351),(39.80867, 21.41233),(39.80881, 21.41233),(39.80924, 21.41409),(39.80931, 21.41428),(39.80941, 21.41444),(39.80969, 21.41471),(39.81006, 21.41484),(39.81026, 21.41485),(39.81064, 21.41475),(39.81095, 21.41452),(39.81107, 21.41437),(39.8112, 21.414),(39.81121, 21.4138),(39.81118, 21.41361),(39.81088, 21.41239),(39.81311, 21.41248),(39.81908, 21.41249),(39.8197, 21.41244),(39.82006, 21.41251),(39.8204, 21.41251),(39.8209, 21.41367),(39.82144, 21.41481),(39.82169, 21.41562),(39.82182, 21.4166),(39.82176, 21.41785),(39.81945, 21.41827),(39.81851, 21.41855),(39.8182, 21.41879),(39.81809, 21.41895),(39.81801, 21.41913),(39.81796, 21.41951),(39.81807, 21.41989),(39.81831, 21.4202),(39.81865, 21.42039),(39.81884, 21.42043),(39.81903, 21.42044),(39.81994, 21.42021),(39.82154, 21.41992),(39.82154, 21.41997),(39.82102, 21.42106)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.81069, 21.41022),(39.81115, 21.40581),(39.8114, 21.4056),(39.81159, 21.40525),(39.81163, 21.40487),(39.81157, 21.40426),(39.81209, 21.40103),(39.81211, 21.39966),(39.81193, 21.39818),(39.81097, 21.39505),(39.8115, 21.39475),(39.81252, 21.39399),(39.8134, 21.39321),(39.81416, 21.39245),(39.81607, 21.38983),(39.81662, 21.38928),(39.81728, 21.38874),(39.81819, 21.38814),(39.81928, 21.38768),(39.82014, 21.38742),(39.82059, 21.38733),(39.82164, 21.38725),(39.82287, 21.38723),(39.82462, 21.3871),(39.82443, 21.38782),(39.82441, 21.38855),(39.82457, 21.39047),(39.82464, 21.39192),(39.82479, 21.3973),(39.82485, 21.3976),(39.82514, 21.39842),(39.82525, 21.39865),(39.8258, 21.39947),(39.82635, 21.4),(39.82679, 21.40031),(39.82736, 21.40062),(39.82928, 21.40137),(39.82948, 21.40151),(39.82914, 21.40441),(39.82897, 21.40551),(39.8282, 21.40889),(39.82795, 21.41023),(39.82696, 21.41),(39.82658, 21.40996),(39.82664, 21.40825),(39.82658, 21.40787),(39.82649, 21.40769),(39.82622, 21.40741),(39.82605, 21.40731),(39.82567, 21.40722),(39.82529, 21.40728),(39.82511, 21.40737),(39.82483, 21.40764),(39.82473, 21.40781),(39.82464, 21.40819),(39.82458, 21.41008),(39.8235, 21.41017),(39.82153, 21.41019),(39.82001, 21.40827),(39.81977, 21.40778),(39.81909, 21.40579),(39.81879, 21.40505),(39.81827, 21.40417),(39.81699, 21.40224),(39.8159, 21.4008),(39.81463, 21.39957),(39.8141, 21.39915),(39.81392, 21.39908),(39.81353, 21.39903),(39.81316, 21.39914),(39.81299, 21.39925),(39.81274, 21.39955),(39.81267, 21.39973),(39.81262, 21.40012),(39.81273, 21.40049),(39.81284, 21.40066),(39.81298, 21.4008),(39.81326, 21.40103),(39.81445, 21.40218),(39.81537, 21.40342),(39.81656, 21.40521),(39.81696, 21.40588),(39.81722, 21.40652),(39.81794, 21.4086),(39.81826, 21.40925),(39.81898, 21.41019),(39.81328, 21.41012),(39.81218, 21.41014),(39.81134, 21.41021),(39.81069, 21.41022)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.80999, 21.39018),(39.80983, 21.38939),(39.80816, 21.38343),(39.80609, 21.3751),(39.80867, 21.37472),(39.80969, 21.37875),(39.81147, 21.38151),(39.81202, 21.38214),(39.81339, 21.38345),(39.81628, 21.38612),(39.81677, 21.38646),(39.81575, 21.38713),(39.81478, 21.38799),(39.81395, 21.38898),(39.81265, 21.39076),(39.81228, 21.39111),(39.8121, 21.3912),(39.81181, 21.39121),(39.81033, 21.39058),(39.80999, 21.39018)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.81909, 21.38545),(39.8181, 21.38496),(39.81759, 21.3846),(39.81476, 21.382),(39.81349, 21.38079),(39.81311, 21.38035),(39.81153, 21.37795),(39.81145, 21.37773),(39.81046, 21.37363),(39.81038, 21.37341),(39.81025, 21.37321),(39.80995, 21.37288),(39.80962, 21.3727),(39.80925, 21.37265),(39.80899, 21.37265),(39.80561, 21.37315),(39.80517, 21.37139),(39.80492, 21.37068),(39.80504, 21.37048),(39.8051, 21.37031),(39.80521, 21.36975),(39.8052, 21.36955),(39.80506, 21.36919),(39.80494, 21.36903),(39.80441, 21.36603),(39.80411, 21.36492),(39.80356, 21.36191),(39.80349, 21.36116),(39.80345, 21.35965),(39.80349, 21.35894),(39.80411, 21.35504),(39.80433, 21.35476),(39.80482, 21.35383),(39.80553, 21.35304),(39.8063, 21.35252),(39.80724, 21.35222),(39.80825, 21.35217),(39.80932, 21.35231),(39.81637, 21.35445),(39.81856, 21.35504),(39.82046, 21.35546),(39.82287, 21.35558),(39.82421, 21.35536),(39.82613, 21.35492),(39.83433, 21.35261),(39.83656, 21.35216),(39.83834, 21.35188),(39.83936, 21.35182),(39.84169, 21.3521),(39.84381, 21.35252),(39.84777, 21.35391),(39.84887, 21.35434),(39.84773, 21.35594),(39.84744, 21.35628),(39.84524, 21.35927),(39.83253, 21.37661),(39.8295, 21.38077),(39.82857, 21.38214),(39.82833, 21.38244),(39.82791, 21.38279),(39.82744, 21.38324),(39.82631, 21.3845),(39.82551, 21.38473),(39.82438, 21.38494),(39.82041, 21.38516),(39.81948, 21.38534),(39.81909, 21.38545)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.80215, 21.35462),(39.8015, 21.35868),(39.80145, 21.35959),(39.80149, 21.36132),(39.80157, 21.36215),(39.80215, 21.36532),(39.80244, 21.36642),(39.80274, 21.36813),(39.79915, 21.3652),(39.79798, 21.36406),(39.79715, 21.36315),(39.79586, 21.36153),(39.79496, 21.36019),(39.79359, 21.35827),(39.79297, 21.35753),(39.79128, 21.35568),(39.79053, 21.35503),(39.78825, 21.35321),(39.78735, 21.35255),(39.78586, 21.35172),(39.78394, 21.35084),(39.78351, 21.35055),(39.78399, 21.34968),(39.78415, 21.34951),(39.78515, 21.34915),(39.78558, 21.34906),(39.78718, 21.34859),(39.78801, 21.34846),(39.78885, 21.34839),(39.78982, 21.34822),(39.79322, 21.3482),(39.79564, 21.34849),(39.79835, 21.34916),(39.79957, 21.34986),(39.80019, 21.35029),(39.80071, 21.35078),(39.80117, 21.35137),(39.80155, 21.35202),(39.80187, 21.35285),(39.80206, 21.35367),(39.80209, 21.35432),(39.80215, 21.35462)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.779, 21.35047),(39.78, 21.35119),(39.78099, 21.35171),(39.78492, 21.35348),(39.78632, 21.35426),(39.78702, 21.35478),(39.78924, 21.35656),(39.78993, 21.35716),(39.79145, 21.35883),(39.79203, 21.35952),(39.79333, 21.36134),(39.79427, 21.36274),(39.79565, 21.36447),(39.79656, 21.36548),(39.79779, 21.36667),(39.80229, 21.37034),(39.80218, 21.37053),(39.80167, 21.37113),(39.80041, 21.37248),(39.79975, 21.3731),(39.79894, 21.37373),(39.79827, 21.37413),(39.79698, 21.37475),(39.79643, 21.37496),(39.79558, 21.37521),(39.79529, 21.37422),(39.79478, 21.3732),(39.79409, 21.37214),(39.79351, 21.37149),(39.793, 21.37101),(39.79201, 21.37035),(39.79122, 21.36997),(39.79048, 21.36972),(39.7891, 21.36947),(39.7887, 21.36944),(39.78832, 21.36949),(39.78814, 21.36957),(39.78784, 21.36982),(39.78767, 21.37017),(39.78763, 21.37037),(39.78718, 21.375),(39.78467, 21.37478),(39.78431, 21.37488),(39.78402, 21.37511),(39.78391, 21.37526),(39.78378, 21.3756),(39.78245, 21.38285),(39.78231, 21.38317),(39.78211, 21.38348),(39.77952, 21.38561),(39.77935, 21.38579),(39.77889, 21.38643),(39.77608, 21.391),(39.77578, 21.39181),(39.77555, 21.39291),(39.77492, 21.39694),(39.76541, 21.3964),(39.76273, 21.39622),(39.76342, 21.3915),(39.76482, 21.38563),(39.76512, 21.38401),(39.76545, 21.38126),(39.76562, 21.38041),(39.76602, 21.37916),(39.76859, 21.37278),(39.76972, 21.36938),(39.77036, 21.36686),(39.77091, 21.36513),(39.77314, 21.35887),(39.77478, 21.35448),(39.77517, 21.35368),(39.77568, 21.35293),(39.77631, 21.35217),(39.77681, 21.35168),(39.77761, 21.3511),(39.77831, 21.35073),(39.779, 21.35047)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.76244, 21.39846),(39.76255, 21.39821),(39.76528, 21.3984),(39.77595, 21.399),(39.77653, 21.39906),(39.78224, 21.39999),(39.78304, 21.40016),(39.78406, 21.40081),(39.78349, 21.40178),(39.78316, 21.40249),(39.78263, 21.40405),(39.78258, 21.40444),(39.78267, 21.40478),(39.78245, 21.40582),(39.78226, 21.407),(39.78167, 21.40978),(39.78069, 21.41362),(39.77994, 21.41671),(39.77776, 21.41693),(39.77732, 21.41693),(39.7769, 21.41689),(39.77624, 21.41674),(39.77593, 21.41661),(39.773, 21.41486),(39.77164, 21.41423),(39.76892, 21.41344),(39.76744, 21.41319),(39.76656, 21.41312),(39.76083, 21.41289),(39.7596, 21.41288),(39.75858, 21.41283),(39.756, 21.4126),(39.75567, 21.41255),(39.75472, 21.41221),(39.75448, 21.41203),(39.7544, 21.41191),(39.75435, 21.41171),(39.75437, 21.41133),(39.75446, 21.41111),(39.75488, 21.41038),(39.75712, 21.40804),(39.75802, 21.40692),(39.75933, 21.40518),(39.7607, 21.40297),(39.76127, 21.40197),(39.76186, 21.40076),(39.76209, 21.40008),(39.76228, 21.39934),(39.76244, 21.39846)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.78571, 21.4023),(39.78613, 21.40226),(39.78735, 21.40325),(39.78684, 21.40459),(39.78685, 21.40595),(39.78943, 21.41476),(39.79045, 21.41882),(39.79071, 21.41927),(39.79203, 21.42035),(39.7921, 21.42071),(39.79178, 21.42117),(39.79074, 21.42159),(39.7902, 21.42203),(39.78993, 21.42249),(39.78974, 21.42357),(39.78774, 21.42363),(39.78577, 21.42328),(39.78521, 21.42357),(39.78486, 21.42441),(39.78493, 21.42495),(39.78537, 21.42543),(39.78674, 21.42583),(39.7884, 21.42607),(39.78817, 21.42683),(39.7881, 21.428),(39.78914, 21.43215),(39.7889, 21.43869),(39.78737, 21.43877),(39.78262, 21.43967),(39.78006, 21.43961),(39.77771, 21.43792),(39.77742, 21.43637),(39.77739, 21.43356),(39.77763, 21.4322),(39.77929, 21.42676),(39.78007, 21.42488),(39.78048, 21.42431),(39.78176, 21.42346),(39.78296, 21.42427),(39.78354, 21.42436),(39.78438, 21.42393),(39.78469, 21.42344),(39.78459, 21.42268),(39.78345, 21.42155),(39.78336, 21.41933),(39.78243, 21.41679),(39.78242, 21.41498),(39.78361, 21.41026),(39.78458, 21.40542),(39.78489, 21.40468),(39.7849, 21.40411),(39.78571, 21.4023)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.77052, 21.43821),(39.76961, 21.43812),(39.76832, 21.4382),(39.76767, 21.43829),(39.76811, 21.43749),(39.76863, 21.4368),(39.76943, 21.43601),(39.77156, 21.43438),(39.77239, 21.43353),(39.77295, 21.43277),(39.77411, 21.43069),(39.7749, 21.42952),(39.77558, 21.42859),(39.77673, 21.42736),(39.77594, 21.42991),(39.77575, 21.4307),(39.77542, 21.43177),(39.77522, 21.43286),(39.77508, 21.43432),(39.77509, 21.43554),(39.77488, 21.43577),(39.77474, 21.43605),(39.77467, 21.43622),(39.77444, 21.43711),(39.77419, 21.43754),(39.77404, 21.43771),(39.77377, 21.43791),(39.77352, 21.43805),(39.77287, 21.43824),(39.77085, 21.43816),(39.77052, 21.43821)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.77683, 21.42438),(39.77605, 21.425),(39.77496, 21.42603),(39.7739, 21.42729),(39.77313, 21.42836),(39.77185, 21.4304),(39.77114, 21.43164),(39.77043, 21.4325),(39.76964, 21.4332),(39.76827, 21.43424),(39.76732, 21.43509),(39.76667, 21.43583),(39.76622, 21.43647),(39.76497, 21.43865),(39.76336, 21.43887),(39.76248, 21.43875),(39.76215, 21.43863),(39.75945, 21.4361),(39.75799, 21.43499),(39.75779, 21.43487),(39.75661, 21.43435),(39.75139, 21.43268),(39.75032, 21.43203),(39.74784, 21.42997),(39.74759, 21.42982),(39.74683, 21.4295),(39.74612, 21.42931),(39.74467, 21.42912),(39.74448, 21.42896),(39.7443, 21.42888),(39.74194, 21.4285),(39.74145, 21.42827),(39.74102, 21.42776),(39.7407, 21.42681),(39.74058, 21.42589),(39.74083, 21.42453),(39.74184, 21.4225),(39.74313, 21.42067),(39.74414, 21.41962),(39.74463, 21.41918),(39.74722, 21.41719),(39.74816, 21.41667),(39.75131, 21.41514),(39.7528, 21.4143),(39.7546, 21.41468),(39.75722, 21.41503),(39.7665, 21.41541),(39.76768, 21.41554),(39.7691, 21.41584),(39.77057, 21.41631),(39.77193, 21.41692),(39.77339, 21.41779),(39.7742, 21.41838),(39.77635, 21.41978),(39.77611, 21.41999),(39.77594, 21.42034),(39.77589, 21.42086),(39.77591, 21.42111),(39.77604, 21.42177),(39.77613, 21.42203),(39.77649, 21.42272),(39.77693, 21.42331),(39.77708, 21.42367),(39.77707, 21.42393),(39.77683, 21.42438)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.73364, 21.42933),(39.73324, 21.42934),(39.72777, 21.42885),(39.72661, 21.42861),(39.72586, 21.42833),(39.71975, 21.42438),(39.71903, 21.424),(39.71198, 21.42198),(39.71237, 21.41843),(39.71241, 21.4169),(39.71237, 21.41582),(39.71224, 21.4153),(39.71207, 21.41499),(39.71028, 21.41276),(39.70957, 21.412),(39.70914, 21.4116),(39.70794, 21.41071),(39.70769, 21.41057),(39.70642, 21.41012),(39.70575, 21.40943),(39.70451, 21.40799),(39.70442, 21.40779),(39.70426, 21.40579),(39.70437, 21.4053),(39.70468, 21.40483),(39.70531, 21.40433),(39.71243, 21.39961),(39.7206, 21.41007),(39.72131, 21.41077),(39.72202, 21.41134),(39.72276, 21.41188),(39.72424, 21.41269),(39.72578, 21.41323),(39.72663, 21.41346),(39.72741, 21.41357),(39.72886, 21.41366),(39.73014, 21.4136),(39.7346, 21.41323),(39.73624, 21.41339),(39.73895, 21.41313),(39.74098, 21.41303),(39.74174, 21.41319),(39.74348, 21.41408),(39.74416, 21.41454),(39.74445, 21.41478),(39.74463, 21.41504),(39.74467, 21.41523),(39.74465, 21.41565),(39.74447, 21.41608),(39.74416, 21.41649),(39.74239, 21.41799),(39.74121, 21.41922),(39.74065, 21.41993),(39.73973, 21.42137),(39.73894, 21.42307),(39.7385, 21.42385),(39.73799, 21.42463),(39.73672, 21.42624),(39.73364, 21.42933)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.71409, 21.39848),(39.7259, 21.39043),(39.72856, 21.39237),(39.72897, 21.39258),(39.72946, 21.39268),(39.72985, 21.39281),(39.73553, 21.39698),(39.73563, 21.39707),(39.73609, 21.39767),(39.73676, 21.39815),(39.74249, 21.40142),(39.743, 21.40162),(39.74336, 21.40168),(39.7445, 21.40156),(39.74465, 21.40162),(39.74535, 21.40201),(39.74586, 21.40221),(39.74731, 21.4031),(39.75127, 21.4053),(39.75207, 21.40561),(39.75415, 21.40664),(39.75489, 21.40686),(39.75501, 21.40693),(39.75368, 21.40835),(39.75296, 21.4087),(39.75071, 21.40918),(39.74696, 21.41005),(39.74601, 21.41024),(39.74227, 21.41003),(39.74019, 21.41017),(39.73721, 21.41052),(39.73608, 21.41079),(39.73591, 21.41089),(39.73567, 21.41113),(39.72999, 21.4116),(39.72892, 21.41166),(39.72757, 21.41157),(39.72697, 21.41149),(39.72641, 21.41133),(39.7251, 21.41088),(39.72389, 21.41023),(39.72326, 21.40977),(39.72261, 21.40924),(39.72205, 21.40869),(39.71409, 21.39848)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.7457, 21.43127),(39.74624, 21.43142),(39.74667, 21.4316),(39.7491, 21.43362),(39.75045, 21.43445),(39.75067, 21.43455),(39.75586, 21.43621),(39.75688, 21.43666),(39.75811, 21.43759),(39.7606, 21.43995),(39.76102, 21.44029),(39.76115, 21.44038),(39.76188, 21.44066),(39.7621, 21.44072),(39.76323, 21.44087),(39.76349, 21.44087),(39.76572, 21.44057),(39.76592, 21.44058),(39.76625, 21.4405),(39.76848, 21.4402),(39.76965, 21.44012),(39.77019, 21.44018),(39.77471, 21.44129),(39.77662, 21.44142),(39.77705, 21.44257),(39.77753, 21.44343),(39.77792, 21.444),(39.77863, 21.44489),(39.77936, 21.44567),(39.7797, 21.44597),(39.7803, 21.44638),(39.78107, 21.44684),(39.7823, 21.4475),(39.78364, 21.44835),(39.78622, 21.45028),(39.78775, 21.45163),(39.78853, 21.45236),(39.79426, 21.45875),(39.79488, 21.45938),(39.79706, 21.46135),(39.79869, 21.4625),(39.80185, 21.46439),(39.80201, 21.46461),(39.80207, 21.46475),(39.80206, 21.46486),(39.80187, 21.46508),(39.79992, 21.4663),(39.79979, 21.46644),(39.79968, 21.46663),(39.79914, 21.467),(39.79814, 21.46788),(39.79751, 21.46867),(39.79695, 21.46959),(39.79658, 21.47045),(39.79623, 21.47164),(39.7932, 21.48709),(39.79286, 21.48954),(39.79217, 21.49622),(39.79191, 21.49914),(39.79191, 21.50009),(39.79167, 21.502),(39.79039, 21.5155),(39.7749, 21.51163),(39.77208, 21.51099),(39.7654, 21.51023),(39.74443, 21.5064),(39.74326, 21.50603),(39.74213, 21.50544),(39.74139, 21.50491),(39.74068, 21.50419),(39.7399, 21.50329),(39.73911, 21.50203),(39.73868, 21.50103),(39.73842, 21.49995),(39.73826, 21.49836),(39.73919, 21.49113),(39.73916, 21.48952),(39.73895, 21.48839),(39.73888, 21.48816),(39.73839, 21.4871),(39.73787, 21.4862),(39.73693, 21.48477),(39.73567, 21.48348),(39.73286, 21.48021),(39.73059, 21.47715),(39.72508, 21.46749),(39.72433, 21.46585),(39.72302, 21.46073),(39.72287, 21.45961),(39.72278, 21.45841),(39.72284, 21.45738),(39.72295, 21.45633),(39.72389, 21.45255),(39.72481, 21.45005),(39.73039, 21.43827),(39.7314, 21.43644),(39.73183, 21.43594),(39.73214, 21.43568),(39.73369, 21.43469),(39.73481, 21.43411),(39.73548, 21.43368),(39.73637, 21.433),(39.73709, 21.43238),(39.73785, 21.43181),(39.73821, 21.43158),(39.73856, 21.43144),(39.73898, 21.43133),(39.73951, 21.43126),(39.74295, 21.43112),(39.7457, 21.43127)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.7791, 21.44156),(39.77939, 21.44157),(39.78003, 21.44187),(39.78046, 21.442),(39.78123, 21.44207),(39.78208, 21.44195),(39.78301, 21.44163),(39.78764, 21.44075),(39.78822, 21.44077),(39.78917, 21.44068),(39.79022, 21.44082),(39.7976, 21.44116),(39.7978, 21.44115),(39.79799, 21.4411),(39.79933, 21.44116),(39.79964, 21.44126),(39.80102, 21.44147),(39.80255, 21.44181),(39.80419, 21.4421),(39.80647, 21.44221),(39.80682, 21.44216),(39.80854, 21.4424),(39.8081, 21.44453),(39.80796, 21.44538),(39.8078, 21.44743),(39.80767, 21.44769),(39.80763, 21.44789),(39.80767, 21.44828),(39.80774, 21.44843),(39.80761, 21.45109),(39.80736, 21.4538),(39.80599, 21.45785),(39.8059, 21.45822),(39.80589, 21.45902),(39.80592, 21.45926),(39.80609, 21.45985),(39.80634, 21.46041),(39.80595, 21.46096),(39.8057, 21.46124),(39.8038, 21.46239),(39.80349, 21.46246),(39.80322, 21.46244),(39.79996, 21.46056),(39.79871, 21.45967),(39.79745, 21.45865),(39.79647, 21.45779),(39.79588, 21.4572),(39.78985, 21.45056),(39.78754, 21.44849),(39.7862, 21.44745),(39.78483, 21.44646),(39.78342, 21.44554),(39.78147, 21.44446),(39.78087, 21.44401),(39.77967, 21.44266),(39.7791, 21.44156)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.93436, 21.4816),(39.93241, 21.48133),(39.92917, 21.48071),(39.92795, 21.48063),(39.92391, 21.48004),(39.92213, 21.47963),(39.92017, 21.47891),(39.91245, 21.47545),(39.91099, 21.47469),(39.9098, 21.47394),(39.90843, 21.47286),(39.90708, 21.4716),(39.90445, 21.46837),(39.90205, 21.46552),(39.90002, 21.46344),(39.89957, 21.4628),(39.8993, 21.46223),(39.89803, 21.45836),(39.898, 21.45747),(39.89804, 21.45702),(39.90192, 21.44447),(39.9022, 21.44387),(39.90238, 21.44363),(39.90272, 21.44337),(39.90406, 21.44277),(39.90471, 21.44266),(39.90601, 21.44278),(39.90621, 21.44277),(39.90655, 21.44267),(39.90946, 21.44274),(39.91033, 21.44282),(39.91091, 21.44294),(39.91136, 21.44311),(39.912, 21.44341),(39.91274, 21.44388),(39.91397, 21.44474),(39.9147, 21.44511),(39.91571, 21.44553),(39.91731, 21.44594),(39.9197, 21.44635),(39.92104, 21.44677),(39.92156, 21.44697),(39.92234, 21.44739),(39.92405, 21.44847),(39.92538, 21.44954),(39.92675, 21.45043),(39.92962, 21.4519),(39.93035, 21.45214),(39.93227, 21.45265),(39.93426, 21.45311),(39.93484, 21.45321),(39.93669, 21.45325),(39.9378, 21.4532),(39.93928, 21.45321),(39.94077, 21.45341),(39.94229, 21.45381),(39.94572, 21.45459),(39.94649, 21.45483),(39.94901, 21.45579),(39.93559, 21.4801),(39.93508, 21.48113),(39.93479, 21.48161),(39.93436, 21.4816)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.9044, 21.42901),(39.90248, 21.42675),(39.90195, 21.4262),(39.90111, 21.42499),(39.90059, 21.42408),(39.90038, 21.42359),(39.89991, 21.42209),(39.89946, 21.42086),(39.89889, 21.41972),(39.89911, 21.41951),(39.89926, 21.41931),(39.89948, 21.41892),(39.89961, 21.41855),(39.89964, 21.41816),(39.89957, 21.41767),(39.89948, 21.41741),(39.89925, 21.417),(39.89899, 21.41667),(39.89887, 21.41624),(39.89879, 21.41607),(39.89867, 21.41591),(39.89852, 21.41578),(39.89835, 21.41569),(39.89816, 21.41562),(39.89777, 21.41561),(39.89741, 21.41575),(39.89725, 21.41587),(39.89712, 21.41602),(39.89703, 21.41619),(39.89696, 21.41645),(39.89537, 21.41379),(39.89576, 21.4136),(39.89645, 21.41343),(39.89707, 21.41339),(39.89798, 21.41346),(39.89948, 21.41366),(39.90026, 21.41364),(39.90105, 21.4135),(39.90182, 21.41325),(39.90257, 21.41288),(39.90323, 21.41248),(39.90385, 21.41197),(39.90475, 21.41093),(39.90476, 21.41108),(39.90482, 21.41126),(39.90492, 21.41143),(39.9052, 21.4117),(39.90538, 21.41179),(39.90589, 21.4119),(39.90633, 21.41211),(39.90744, 21.4129),(39.90772, 21.41318),(39.9078, 21.4133),(39.90791, 21.41361),(39.90793, 21.41377),(39.9079, 21.41405),(39.90795, 21.41444),(39.90814, 21.41478),(39.90724, 21.41976),(39.90704, 21.42054),(39.90545, 21.42577),(39.9044, 21.42901)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.8047, 21.42149),(39.80352, 21.42129),(39.80246, 21.42135),(39.80157, 21.42154),(39.79665, 21.42328),(39.79613, 21.42337),(39.79422, 21.4236),(39.79363, 21.42362),(39.79194, 21.42358),(39.79197, 21.42339),(39.79246, 21.42319),(39.79288, 21.42298),(39.79309, 21.42284),(39.79357, 21.42239),(39.79395, 21.4219),(39.79415, 21.42135),(39.7942, 21.42109),(39.79422, 21.42034),(39.79408, 21.41965),(39.7938, 21.41913),(39.79365, 21.41892),(39.79326, 21.41852),(39.79252, 21.41799),(39.79244, 21.41749),(39.79228, 21.41685),(39.78902, 21.4057),(39.78897, 21.40545),(39.78905, 21.40469),(39.78917, 21.40441),(39.7896, 21.40383),(39.7899, 21.40361),(39.79015, 21.40348),(39.79091, 21.4033),(39.79119, 21.40331),(39.79188, 21.40345),(39.79208, 21.40353),(39.7923, 21.40367),(39.79685, 21.4084),(39.79791, 21.40973),(39.79986, 21.41254),(39.80035, 21.41301),(39.80145, 21.41381),(39.80223, 21.41458),(39.80241, 21.41472),(39.80482, 21.41733),(39.80568, 21.41832),(39.80568, 21.41932),(39.80526, 21.41977),(39.8051, 21.41999),(39.80494, 21.4203),(39.8047, 21.42107),(39.8047, 21.42149)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.79951, 21.40831),(39.79848, 21.40702),(39.79501, 21.40333),(39.79373, 21.40207),(39.79355, 21.40193),(39.79291, 21.40156),(39.79209, 21.40128),(39.79129, 21.40117),(39.7904, 21.40122),(39.78952, 21.40145),(39.78886, 21.40175),(39.78761, 21.40064),(39.78831, 21.39977),(39.78842, 21.39961),(39.7885, 21.39941),(39.78889, 21.39915),(39.78934, 21.39892),(39.78975, 21.39885),(39.79039, 21.39844),(39.7913, 21.39812),(39.79528, 21.39727),(39.79685, 21.39697),(39.79808, 21.39687),(39.80179, 21.39686),(39.80549, 21.39681),(39.80651, 21.39668),(39.80725, 21.39666),(39.80743, 21.3966),(39.80771, 21.39643),(39.80899, 21.39602),(39.80971, 21.39842),(39.80979, 21.39893),(39.80983, 21.39956),(39.80955, 21.39958),(39.80936, 21.39965),(39.8092, 21.39975),(39.80893, 21.40003),(39.8088, 21.4004),(39.80766, 21.40803),(39.80749, 21.40874),(39.8071, 21.4101),(39.80666, 21.41003),(39.80538, 21.40971),(39.8038, 21.40937),(39.80309, 21.40917),(39.79951, 21.40831)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.78528, 21.39922),(39.78493, 21.39899),(39.78591, 21.39763),(39.78619, 21.3968),(39.78824, 21.38477),(39.78896, 21.37718),(39.79196, 21.37747),(39.79386, 21.37747),(39.79431, 21.38367),(39.79466, 21.38521),(39.79527, 21.38685),(39.7963, 21.39301),(39.79642, 21.39481),(39.79092, 21.39593),(39.78942, 21.39642),(39.78822, 21.39699),(39.78722, 21.3976),(39.78651, 21.3981),(39.78528, 21.39922)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.79842, 21.39469),(39.79828, 21.39273),(39.79723, 21.38643),(39.79656, 21.38457),(39.7963, 21.38345),(39.79584, 21.3772),(39.79782, 21.37657),(39.80001, 21.37542),(39.80108, 21.37459),(39.80295, 21.37269),(39.80319, 21.37258),(39.80748, 21.38942),(39.80783, 21.39052),(39.80783, 21.39119),(39.8077, 21.39161),(39.80752, 21.39189),(39.80687, 21.39255),(39.80579, 21.39415),(39.80519, 21.39458),(39.79842, 21.39469)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.86272, 21.42529),(39.86276, 21.42495),(39.86273, 21.42476),(39.86266, 21.42457),(39.86255, 21.42441),(39.8638, 21.42428),(39.8642, 21.42419),(39.86474, 21.42401),(39.86583, 21.42353),(39.86774, 21.42232),(39.87468, 21.41769),(39.87519, 21.41722),(39.87556, 21.41679),(39.87623, 21.41572),(39.87632, 21.41563),(39.87919, 21.41359),(39.88011, 21.41502),(39.88112, 21.41677),(39.88159, 21.4177),(39.88115, 21.41807),(39.88045, 21.41821),(39.87899, 21.41871),(39.87817, 21.41908),(39.87653, 21.41992),(39.87497, 21.42078),(39.87348, 21.42172),(39.87086, 21.42355),(39.86554, 21.42759),(39.86429, 21.4284),(39.86344, 21.42888),(39.86315, 21.42914),(39.86304, 21.42931),(39.86297, 21.42949),(39.86288, 21.42955),(39.86345, 21.42732),(39.86493, 21.42715),(39.86628, 21.42693),(39.86645, 21.42685),(39.86674, 21.42659),(39.86691, 21.42623),(39.86694, 21.42604),(39.86693, 21.42584),(39.8668, 21.42548),(39.86654, 21.42519),(39.86618, 21.42502),(39.86599, 21.42499),(39.86579, 21.425),(39.86465, 21.42517),(39.86303, 21.42536),(39.86272, 21.42529)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.88137, 21.4133),(39.88234, 21.41345),(39.88299, 21.41336),(39.88369, 21.41308),(39.88421, 21.41275),(39.88498, 21.41196),(39.88634, 21.40971),(39.88889, 21.40731),(39.89212, 21.41262),(39.89182, 21.41282),(39.89033, 21.41422),(39.88928, 21.41434),(39.88824, 21.41479),(39.88626, 21.41669),(39.88579, 21.41696),(39.88471, 21.41737),(39.88379, 21.4176),(39.88287, 21.41579),(39.88137, 21.4133)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.89343, 21.41478),(39.89599, 21.41909),(39.8969, 21.42055),(39.89729, 21.42134),(39.89771, 21.42243),(39.8981, 21.42371),(39.89874, 21.4253),(39.89905, 21.42595),(39.89945, 21.42665),(39.89984, 21.42723),(39.90044, 21.42799),(39.90305, 21.43104),(39.9031, 21.43127),(39.90311, 21.43155),(39.90306, 21.43194),(39.9017, 21.43639),(39.90122, 21.43585),(39.90098, 21.43544),(39.90059, 21.43417),(39.90029, 21.43348),(39.89965, 21.43238),(39.89683, 21.4285),(39.89643, 21.42802),(39.89572, 21.4274),(39.89208, 21.42467),(39.8859, 21.42038),(39.88621, 21.41977),(39.8863, 21.41948),(39.88648, 21.41926),(39.88752, 21.4184),(39.88777, 21.4181),(39.88784, 21.41793),(39.88939, 21.41644),(39.88975, 21.41629),(39.89056, 21.41626),(39.891, 21.41612),(39.89343, 21.41478)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.90063, 21.43987),(39.89683, 21.45214),(39.8956, 21.45625),(39.89544, 21.45648),(39.89523, 21.45671),(39.89504, 21.45685),(39.89446, 21.45714),(39.89298, 21.45776),(39.89025, 21.4587),(39.88889, 21.4589),(39.88829, 21.45893),(39.88695, 21.45888),(39.88613, 21.45869),(39.88496, 21.45834),(39.88205, 21.45719),(39.88115, 21.45672),(39.87854, 21.45483),(39.87734, 21.45423),(39.87619, 21.45386),(39.871, 21.45278),(39.87018, 21.45257),(39.86828, 21.45177),(39.86695, 21.45101),(39.86592, 21.45008),(39.86505, 21.44914),(39.86307, 21.44683),(39.86096, 21.44427),(39.86036, 21.44351),(39.85993, 21.44287),(39.85876, 21.44092),(39.85863, 21.4405),(39.85853, 21.4385),(39.85845, 21.43789),(39.85841, 21.4377),(39.85799, 21.43648),(39.85816, 21.43625),(39.85861, 21.4358),(39.86082, 21.43328),(39.86139, 21.43272),(39.86152, 21.43255),(39.86213, 21.43226),(39.86471, 21.4308),(39.86495, 21.4305),(39.86503, 21.43028),(39.86671, 21.42921),(39.87205, 21.42516),(39.87456, 21.4234),(39.87601, 21.42249),(39.87745, 21.42169),(39.87968, 21.42059),(39.88, 21.42048),(39.88078, 21.42029),(39.88189, 21.4201),(39.8827, 21.41988),(39.88297, 21.4204),(39.8836, 21.42141),(39.88376, 21.42162),(39.88439, 21.4222),(39.89129, 21.42698),(39.89334, 21.42853),(39.89452, 21.42953),(39.89488, 21.42994),(39.89779, 21.43386),(39.89841, 21.43486),(39.89868, 21.43546),(39.89962, 21.43833),(39.89972, 21.4385),(39.8999, 21.43867),(39.90027, 21.43939),(39.90063, 21.43987)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.89963, 21.38706),(39.8989, 21.38816),(39.8987, 21.38839),(39.89842, 21.38854),(39.89495, 21.38902),(39.8945, 21.38902),(39.89358, 21.38881),(39.89293, 21.38854),(39.88689, 21.38662),(39.88679, 21.38636),(39.88745, 21.38587),(39.89342, 21.38207),(39.89382, 21.38213),(39.89462, 21.38259),(39.89517, 21.38281),(39.8958, 21.38288),(39.89673, 21.3828),(39.89855, 21.38564),(39.89963, 21.38706)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.89348, 21.39296),(39.89286, 21.39348),(39.89194, 21.39401),(39.88552, 21.39692),(39.8847, 21.3955),(39.88413, 21.39366),(39.88401, 21.39185),(39.88434, 21.39012),(39.88475, 21.38944),(39.88506, 21.38913),(39.88593, 21.38845),(39.88615, 21.38848),(39.88648, 21.38874),(39.88738, 21.38915),(39.89272, 21.39085),(39.89371, 21.39128),(39.89392, 21.39143),(39.89398, 21.39158),(39.89408, 21.39224),(39.89348, 21.39296)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.84886, 21.43817),(39.85002, 21.43712),(39.85126, 21.43554),(39.85158, 21.43529),(39.85427, 21.4341),(39.85471, 21.4338),(39.85655, 21.43378),(39.85785, 21.43364),(39.85715, 21.43444),(39.85687, 21.43452),(39.85609, 21.43509),(39.85562, 21.43524),(39.85418, 21.43543),(39.85305, 21.43578),(39.85245, 21.43611),(39.85064, 21.43763),(39.85024, 21.43783),(39.84886, 21.43817)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.85972, 21.41943),(39.86333, 21.41545),(39.8703, 21.40829),(39.87409, 21.40552),(39.87517, 21.40688),(39.87599, 21.40806),(39.87819, 21.41186),(39.87494, 21.41418),(39.87458, 21.41459),(39.87399, 21.41555),(39.87339, 21.41616),(39.86496, 21.42173),(39.86398, 21.42216),(39.86342, 21.42231),(39.86213, 21.42238),(39.85971, 21.42177),(39.86001, 21.42041),(39.85992, 21.41983),(39.85972, 21.41943)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.87571, 21.40434),(39.87841, 21.40243),(39.88448, 21.39962),(39.88786, 21.4056),(39.8874, 21.40591),(39.8848, 21.40843),(39.88417, 21.40934),(39.8834, 21.41072),(39.88317, 21.41098),(39.8828, 21.41129),(39.88232, 21.41144),(39.88197, 21.41139),(39.88098, 21.41104),(39.88003, 21.41105),(39.8777, 21.40702),(39.87679, 21.40571),(39.87571, 21.40434)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.88646, 21.39869),(39.89349, 21.39545),(39.8941, 21.39505),(39.89698, 21.39264),(39.89974, 21.3939),(39.89882, 21.39616),(39.89759, 21.39804),(39.8959, 21.40038),(39.89488, 21.40142),(39.89409, 21.40204),(39.89129, 21.40342),(39.88971, 21.40437),(39.88646, 21.39869)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.95221, 21.34705),(39.95232, 21.34622),(39.95252, 21.3457),(39.95278, 21.34519),(39.95313, 21.34467),(39.95355, 21.34423),(39.95402, 21.34383),(39.95944, 21.34052),(39.96716, 21.3359),(39.96818, 21.33546),(39.96952, 21.33507),(39.97082, 21.33491),(39.97164, 21.33491),(39.97628, 21.33524),(39.97843, 21.33549),(39.98862, 21.33691),(39.99004, 21.33729),(39.99099, 21.33767),(39.9921, 21.33826),(39.99311, 21.339),(39.99377, 21.33959),(39.99466, 21.34067),(39.99517, 21.3415),(39.9956, 21.34231),(39.99591, 21.34323),(39.99606, 21.34384),(39.99627, 21.34514),(39.99627, 21.34711),(39.99622, 21.34781),(39.99493, 21.3532),(39.99449, 21.35441),(39.99377, 21.35575),(39.99337, 21.3563),(39.99265, 21.35702),(39.99047, 21.35841),(39.98785, 21.36027),(39.98739, 21.36066),(39.98664, 21.36148),(39.98629, 21.36203),(39.98604, 21.36261),(39.98588, 21.36326),(39.98582, 21.36386),(39.98584, 21.36461),(39.98597, 21.36522),(39.98613, 21.36571),(39.98645, 21.36645),(39.98769, 21.36884),(39.98792, 21.36953),(39.98811, 21.37052),(39.98876, 21.37739),(39.98869, 21.37821),(39.98863, 21.37841),(39.98851, 21.37867),(39.98808, 21.37931),(39.98785, 21.37954),(39.98758, 21.37973),(39.98671, 21.38013),(39.98624, 21.38023),(39.98142, 21.38031),(39.9805, 21.38024),(39.97978, 21.38003),(39.97949, 21.37987),(39.97917, 21.37962),(39.97889, 21.37936),(39.97864, 21.37904),(39.9778, 21.37783),(39.97605, 21.37561),(39.96729, 21.36519),(39.96247, 21.36003),(39.95955, 21.35682),(39.95867, 21.35593),(39.95499, 21.3519),(39.95402, 21.35093),(39.95309, 21.34984),(39.95262, 21.34915),(39.95236, 21.34849),(39.95225, 21.34799),(39.95218, 21.34733),(39.95221, 21.34705)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.80959, 21.42471),(39.80961, 21.42502),(39.80967, 21.4252),(39.80976, 21.42538),(39.80988, 21.42553),(39.81003, 21.42565),(39.81021, 21.42574),(39.81039, 21.4258),(39.81059, 21.42582),(39.81079, 21.4258),(39.81097, 21.42574),(39.81115, 21.42565),(39.8113, 21.42553),(39.81142, 21.42538),(39.81157, 21.42502),(39.81422, 21.4254),(39.81499, 21.42539),(39.81683, 21.42523),(39.8172, 21.4259),(39.81788, 21.42678),(39.81735, 21.42785),(39.81708, 21.42816),(39.817, 21.42821),(39.81458, 21.42838),(39.8142, 21.42837),(39.81313, 21.4282),(39.81278, 21.42826),(39.81207, 21.42855),(39.81173, 21.42888),(39.81161, 21.42903),(39.81152, 21.42921),(39.81137, 21.42959),(39.81126, 21.43002),(39.81082, 21.4313),(39.80838, 21.43082),(39.80778, 21.43079),(39.80781, 21.4305),(39.80793, 21.4264),(39.80787, 21.4244),(39.80959, 21.42471)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.80556, 21.43147),(39.80353, 21.43246),(39.80246, 21.43184),(39.80176, 21.43158),(39.80132, 21.43147),(39.80103, 21.43144),(39.80025, 21.43149),(39.79977, 21.43161),(39.79946, 21.43174),(39.79918, 21.43192),(39.79694, 21.43394),(39.79578, 21.43486),(39.79286, 21.43677),(39.79262, 21.43702),(39.79251, 21.43723),(39.79246, 21.43742),(39.79245, 21.43762),(39.79253, 21.43798),(39.79272, 21.43828),(39.79297, 21.43852),(39.79326, 21.43865),(39.79353, 21.43869),(39.79384, 21.43864),(39.79402, 21.43856),(39.79789, 21.43598),(39.80018, 21.4339),(39.80064, 21.43359),(39.80097, 21.43358),(39.80149, 21.43371),(39.80259, 21.43438),(39.80291, 21.4345),(39.8032, 21.43456),(39.80343, 21.43458),(39.80388, 21.43455),(39.8048, 21.43423),(39.80446, 21.43495),(39.80406, 21.43567),(39.80366, 21.43693),(39.80347, 21.43812),(39.80332, 21.43981),(39.80118, 21.43938),(39.79973, 21.43917),(39.79102, 21.43876),(39.79131, 21.43242),(39.79123, 21.43161),(39.79026, 21.42778),(39.79024, 21.42756),(39.79027, 21.42715),(39.79032, 21.42696),(39.79049, 21.42663),(39.79096, 21.42597),(39.79434, 21.4257),(39.79706, 21.42535),(39.80209, 21.42359),(39.80281, 21.42342),(39.8032, 21.42339),(39.80401, 21.42349),(39.80572, 21.42393),(39.80578, 21.4274),(39.80564, 21.43075),(39.80556, 21.43147)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.80743, 21.43297),(39.8081, 21.43292),(39.81017, 21.43334),(39.80879, 21.44042),(39.80641, 21.44006),(39.80544, 21.44004),(39.80577, 21.43726),(39.80612, 21.43633),(39.80671, 21.43526),(39.80743, 21.43297)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.80978, 21.44779),(39.80994, 21.44566),(39.81007, 21.44487),(39.81053, 21.44265),(39.81116, 21.44264),(39.81154, 21.44256),(39.81225, 21.44226),(39.81248, 21.44211),(39.81366, 21.44109),(39.81384, 21.44099),(39.81679, 21.44037),(39.81781, 21.44011),(39.82104, 21.43947),(39.8222, 21.43916),(39.82287, 21.43891),(39.82379, 21.43843),(39.82508, 21.43789),(39.82657, 21.43749),(39.82684, 21.43737),(39.82727, 21.4371),(39.82865, 21.43639),(39.82907, 21.43623),(39.82934, 21.4363),(39.82961, 21.43628),(39.83008, 21.43695),(39.8317, 21.43866),(39.83196, 21.439),(39.83218, 21.43955),(39.8324, 21.43989),(39.83262, 21.44011),(39.83308, 21.44043),(39.83338, 21.44071),(39.83419, 21.44194),(39.83469, 21.44238),(39.83493, 21.44253),(39.83535, 21.44272),(39.83558, 21.44279),(39.83628, 21.44289),(39.83853, 21.44254),(39.83909, 21.44226),(39.8397, 21.44168),(39.83977, 21.44165),(39.84008, 21.44161),(39.84192, 21.44153),(39.843, 21.44143),(39.84509, 21.4411),(39.84799, 21.44052),(39.84946, 21.44019),(39.84981, 21.44021),(39.84994, 21.44025),(39.85134, 21.44181),(39.85116, 21.44253),(39.85117, 21.44267),(39.85047, 21.4439),(39.85008, 21.44479),(39.84952, 21.44592),(39.84796, 21.44857),(39.84741, 21.44971),(39.84589, 21.45357),(39.84554, 21.45432),(39.84375, 21.45712),(39.84259, 21.45929),(39.84225, 21.46015),(39.84203, 21.46152),(39.84189, 21.46327),(39.8414, 21.46556),(39.84112, 21.46739),(39.84112, 21.46789),(39.84122, 21.46847),(39.84135, 21.46887),(39.84146, 21.4691),(39.84191, 21.4698),(39.84204, 21.4701),(39.84217, 21.47069),(39.84223, 21.47113),(39.84204, 21.47546),(39.84198, 21.47572),(39.84187, 21.47595),(39.84166, 21.47621),(39.84137, 21.4764),(39.84125, 21.47644),(39.84102, 21.4765),(39.84071, 21.47651),(39.83664, 21.47574),(39.836, 21.4757),(39.83537, 21.47573),(39.83436, 21.47593),(39.83347, 21.47628),(39.83274, 21.47666),(39.83259, 21.47668),(39.83234, 21.47663),(39.83211, 21.47648),(39.83203, 21.4764),(39.83197, 21.47626),(39.83172, 21.47361),(39.83147, 21.47277),(39.83127, 21.4723),(39.83087, 21.47161),(39.82925, 21.46941),(39.82728, 21.4666),(39.82667, 21.46592),(39.82647, 21.46575),(39.82613, 21.46553),(39.82048, 21.46267),(39.82011, 21.46245),(39.81967, 21.46209),(39.81931, 21.46169),(39.81868, 21.46072),(39.81798, 21.45998),(39.81588, 21.45849),(39.81553, 21.45819),(39.81515, 21.45776),(39.81493, 21.45736),(39.81256, 21.45195),(39.8115, 21.45014),(39.81057, 21.44875),(39.80978, 21.44779)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.78317, 21.39802),(39.78285, 21.39808),(39.77692, 21.3971),(39.77752, 21.39324),(39.77769, 21.39243),(39.77792, 21.39179),(39.78089, 21.38707),(39.78351, 21.38492),(39.7841, 21.38405),(39.78439, 21.38333),(39.78558, 21.37686),(39.78697, 21.37699),(39.78625, 21.38452),(39.78422, 21.39642),(39.78406, 21.39682),(39.78317, 21.39802)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.78917, 21.37519),(39.78953, 21.37157),(39.79052, 21.37185),(39.79107, 21.37212),(39.79169, 21.37253),(39.7921, 21.37292),(39.79255, 21.37342),(39.79307, 21.37423),(39.7934, 21.37489),(39.79358, 21.37547),(39.7921, 21.37547),(39.78917, 21.37519)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.87268, 21.3607),(39.87229, 21.37234),(39.87238, 21.37312),(39.8727, 21.37412),(39.87403, 21.3768),(39.87459, 21.37751),(39.87492, 21.3778),(39.87565, 21.3783),(39.87539, 21.37897),(39.87431, 21.38235),(39.87402, 21.38249),(39.87222, 21.38192),(39.87075, 21.38162),(39.86994, 21.38153),(39.86868, 21.38151),(39.86677, 21.38175),(39.86582, 21.382),(39.86468, 21.38241),(39.86373, 21.38286),(39.86273, 21.38347),(39.8619, 21.3841),(39.86106, 21.38489),(39.85782, 21.38829),(39.85728, 21.38809),(39.85678, 21.38807),(39.85608, 21.38827),(39.85576, 21.38852),(39.85553, 21.38885),(39.8554, 21.38933),(39.85538, 21.38957),(39.85547, 21.38999),(39.85567, 21.39042),(39.85495, 21.39109),(39.85387, 21.39231),(39.8531, 21.39292),(39.85269, 21.39312),(39.85256, 21.39313),(39.85225, 21.39306),(39.8512, 21.39193),(39.84972, 21.39016),(39.84959, 21.39004),(39.84879, 21.38937),(39.8481, 21.38893),(39.84746, 21.38863),(39.84605, 21.38825),(39.84136, 21.38785),(39.84054, 21.3877),(39.84015, 21.38753),(39.83982, 21.38734),(39.8394, 21.38701),(39.83693, 21.38443),(39.83665, 21.38418),(39.83518, 21.38321),(39.83492, 21.38309),(39.83413, 21.38287),(39.83345, 21.38279),(39.83236, 21.38277),(39.83156, 21.38286),(39.83051, 21.38312),(39.83637, 21.37503),(39.84749, 21.35985),(39.84785, 21.35942),(39.84912, 21.35812),(39.84948, 21.35783),(39.84985, 21.35763),(39.85045, 21.35745),(39.8545, 21.35696),(39.85519, 21.35698),(39.85738, 21.35754),(39.85971, 21.35848),(39.86135, 21.35892),(39.86264, 21.35917),(39.8703, 21.36038),(39.87268, 21.3607)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.87658, 21.37651),(39.87598, 21.37607),(39.87578, 21.37582),(39.87453, 21.3733),(39.87429, 21.37232),(39.87467, 21.36095),(39.87758, 21.36131),(39.87798, 21.36151),(39.88107, 21.36223),(39.88281, 21.36325),(39.88379, 21.36404),(39.88498, 21.36539),(39.88608, 21.36683),(39.8788, 21.37364),(39.8776, 21.375),(39.87658, 21.37651)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.82364, 21.41981),(39.82367, 21.41954),(39.82416, 21.41928),(39.82507, 21.41862),(39.82572, 21.41854),(39.8261, 21.4186),(39.8258, 21.41927),(39.82445, 21.41949),(39.82364, 21.41981)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.82391, 21.41697),(39.82383, 21.41571),(39.82364, 21.41473),(39.82274, 21.41253),(39.82449, 21.41261),(39.82443, 21.41485),(39.82469, 21.41537),(39.82519, 21.41566),(39.82577, 21.41563),(39.82623, 21.41527),(39.82642, 21.41472),(39.82648, 21.41309),(39.82695, 21.41321),(39.82661, 21.41499),(39.82657, 21.41567),(39.82683, 21.41674),(39.82634, 21.4166),(39.82514, 21.41656),(39.82446, 21.41671),(39.82391, 21.41697)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.82876, 21.41802),(39.83007, 21.41793),(39.83168, 21.41763),(39.83252, 21.41736),(39.8333, 21.41696),(39.83461, 21.41893),(39.83507, 21.41978),(39.83524, 21.42063),(39.8353, 21.42147),(39.83401, 21.42163),(39.83388, 21.42071),(39.83352, 21.41999),(39.83348, 21.41968),(39.83332, 21.41932),(39.83276, 21.41877),(39.8319, 21.41823),(39.83077, 21.41798),(39.82876, 21.41802)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.83509, 21.41604),(39.839, 21.41402),(39.84021, 21.41316),(39.84098, 21.41247),(39.84179, 21.41159),(39.8438, 21.40921),(39.84747, 21.40466),(39.84802, 21.40381),(39.84838, 21.40303),(39.84864, 21.40216),(39.84886, 21.40096),(39.849, 21.39949),(39.84909, 21.39896),(39.84924, 21.39842),(39.84956, 21.39772),(39.85004, 21.39711),(39.8502, 21.39695),(39.8506, 21.39665),(39.85157, 21.39618),(39.853, 21.39569),(39.85363, 21.39558),(39.85396, 21.39547),(39.8544, 21.39524),(39.8547, 21.39503),(39.85499, 21.39478),(39.85617, 21.39361),(39.85696, 21.39291),(39.85746, 21.39236),(39.8579, 21.39179),(39.85877, 21.39085),(39.86338, 21.39308),(39.86446, 21.39356),(39.86531, 21.39402),(39.86577, 21.39435),(39.86651, 21.39507),(39.86596, 21.3955),(39.86532, 21.39609),(39.86472, 21.39688),(39.86379, 21.39863),(39.86138, 21.40439),(39.86035, 21.4064),(39.85537, 21.41465),(39.85266, 21.41823),(39.85145, 21.41971),(39.85133, 21.4198),(39.85092, 21.41997),(39.84912, 21.42017),(39.84516, 21.42027),(39.84289, 21.42044),(39.83728, 21.4212),(39.83723, 21.42043),(39.83701, 21.4193),(39.83677, 21.41867),(39.83632, 21.41789),(39.83509, 21.41604)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.86021, 21.38932),(39.86365, 21.38583),(39.86459, 21.38516),(39.86624, 21.38442),(39.86768, 21.38403),(39.86962, 21.38393),(39.8717, 21.38431),(39.87299, 21.3847),(39.8729, 21.38575),(39.87303, 21.38632),(39.87188, 21.38979),(39.87157, 21.39051),(39.87068, 21.39174),(39.87013, 21.39226),(39.8681, 21.39384),(39.86709, 21.39285),(39.86632, 21.39229),(39.86021, 21.38932)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.84703, 21.39096),(39.84715, 21.39117),(39.84744, 21.39143),(39.84761, 21.39151),(39.84787, 21.39156),(39.84872, 21.39256),(39.84882, 21.39314),(39.84858, 21.39443),(39.84842, 21.39495),(39.848, 21.39601),(39.84714, 21.39783),(39.84698, 21.39845),(39.84677, 21.40025),(39.84654, 21.40175),(39.84636, 21.40234),(39.84612, 21.40283),(39.84579, 21.40335),(39.84226, 21.4074),(39.8407, 21.40935),(39.83947, 21.41065),(39.83831, 21.41161),(39.8377, 21.41205),(39.83345, 21.41428),(39.83247, 21.41344),(39.83245, 21.41324),(39.83239, 21.41305),(39.8323, 21.41288),(39.83217, 21.41273),(39.83121, 21.41178),(39.83068, 21.41134),(39.83015, 21.41099),(39.83001, 21.41065),(39.83052, 21.40833),(39.83112, 21.40587),(39.8317, 21.40157),(39.83168, 21.40101),(39.83164, 21.40082),(39.83137, 21.40027),(39.83115, 21.39999),(39.83083, 21.39971),(39.83052, 21.39952),(39.8284, 21.39872),(39.82797, 21.39848),(39.82767, 21.39827),(39.82744, 21.39803),(39.82711, 21.39753),(39.827, 21.39726),(39.82693, 21.3969),(39.82679, 21.39172),(39.82659, 21.38808),(39.82672, 21.38766),(39.82705, 21.38702),(39.82746, 21.38644),(39.83156, 21.38522),(39.83244, 21.38503),(39.83282, 21.38502),(39.83372, 21.38517),(39.83421, 21.38535),(39.83473, 21.38565),(39.83527, 21.38603),(39.83533, 21.38622),(39.83555, 21.38655),(39.83588, 21.38676),(39.83607, 21.38681),(39.83738, 21.38827),(39.83788, 21.38875),(39.83886, 21.38944),(39.83905, 21.38954),(39.83962, 21.38978),(39.84022, 21.38998),(39.84135, 21.39018),(39.84179, 21.39021),(39.84192, 21.39029),(39.8423, 21.3904),(39.84269, 21.39035),(39.84282, 21.39029),(39.84546, 21.39049),(39.84593, 21.39055),(39.84632, 21.39064),(39.84672, 21.39079),(39.84703, 21.39096)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.83154, 21.41527),(39.83052, 21.41557),(39.82875, 21.41575),(39.82868, 21.41557),(39.8287, 21.41522),(39.82898, 21.41382),(39.83043, 21.41432),(39.83154, 21.41527)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.88532, 21.36227),(39.89047, 21.36299),(39.88935, 21.36357),(39.889, 21.36359),(39.88711, 21.36335),(39.88654, 21.36324),(39.88587, 21.36295),(39.88576, 21.36273),(39.88532, 21.36227)]); +SELECT pointInPolygon((39.840202, 21.451471), [(39.91229, 21.40316),(39.91302, 21.40213),(39.91357, 21.40253),(39.91432, 21.40334),(39.91464, 21.40392),(39.91478, 21.40442),(39.91478, 21.40479),(39.91465, 21.40516),(39.91433, 21.40548),(39.914, 21.40563),(39.91331, 21.4056),(39.9128, 21.40534),(39.91261, 21.40516),(39.91238, 21.40474),(39.91229, 21.40316)]); + +SELECT 'Check that the point is not in Mecca with multiple holes'; + +SELECT pointInPolygon((39.840202, 21.451471), + +[(39.90553, 21.38668),(39.91034, 21.38608),(39.91834, 21.38048),(39.93078, 21.3627),(39.94141, 21.36278),(39.94753, 21.36075),(39.94986, 21.35894),(39.95349, 21.3533),(39.97833, 21.3815),(39.98132, 21.38231),(39.98851, 21.38151),(39.99076, 21.37747),(39.98987, 21.36908),(39.98791, 21.36332),(39.99543, 21.35687),(39.99827, 21.34722),(39.99784, 21.34271),(39.99632, 21.33955),(39.99322, 21.3366),(39.98908, 21.33496),(39.9692, 21.3331),(39.95841, 21.3388),(39.95109, 21.34412),(39.95026, 21.34825),(39.95203, 21.35168),(39.94753, 21.35845),(39.94165, 21.36077),(39.93272, 21.36009),(39.92969, 21.36103),(39.91928, 21.37669),(39.91374, 21.3816),(39.91056, 21.38296),(39.90839, 21.38361),(39.90059, 21.38257),(39.8978, 21.37712),(39.90051, 21.37335),(39.90033, 21.37195),(39.89559, 21.37533),(39.89099, 21.36937),(39.89101, 21.3661),(39.89465, 21.364),(39.92418, 21.35725),(39.92838, 21.35433),(39.94394, 21.33915),(39.96711, 21.32785),(39.97437, 21.32734),(39.99523, 21.33055),(40.01271, 21.3293),(40.01345, 21.3276),(40.00731, 21.32689),(39.99189, 21.32817),(39.97264, 21.3251),(39.96216, 21.32725),(39.95825, 21.32598),(39.95783, 21.32734),(39.96017, 21.32834),(39.94652, 21.33514),(39.94578, 21.33237),(39.94438, 21.33259),(39.94454, 21.33563),(39.92448, 21.3545),(39.92007, 21.3563),(39.89586, 21.3615),(39.86239, 21.35659),(39.85241, 21.35319),(39.85183, 21.35189),(39.84187, 21.3498),(39.83475, 21.35001),(39.82272, 21.35322),(39.80957, 21.34986),(39.80645, 21.34645),(39.80654, 21.34104),(39.82207, 21.29116),(39.82732, 21.26685),(39.82657, 21.22894),(39.82468, 21.22761),(39.82364, 21.22857),(39.82459, 21.22961),(39.82535, 21.26649),(39.82016, 21.29057),(39.81723, 21.29965),(39.81585, 21.30012),(39.81652, 21.30158),(39.81475, 21.30815),(39.80378, 21.34492),(39.8023, 21.34648),(39.79042, 21.34584),(39.78385, 21.34687),(39.77227, 21.34595),(39.7601, 21.34279),(39.73947, 21.34141),(39.71051, 21.34288),(39.70233, 21.34041),(39.68839, 21.33943),(39.65964, 21.33189),(39.64627, 21.3344),(39.64733, 21.33592),(39.65598, 21.33404),(39.66095, 21.33402),(39.68789, 21.34136),(39.70198, 21.34238),(39.71031, 21.34487),(39.74208, 21.34353),(39.76109, 21.34495),(39.77363, 21.34845),(39.77446, 21.35039),(39.76342, 21.37977),(39.75978, 21.39951),(39.75655, 21.40491),(39.73768, 21.39607),(39.72646, 21.38795),(39.71285, 21.3969),(39.69867, 21.37979),(39.66651, 21.36156),(39.6662, 21.36338),(39.69742, 21.38135),(39.7112, 21.39803),(39.70333, 21.40335),(39.70227, 21.40556),(39.70273, 21.40892),(39.71038, 21.41608),(39.71004, 21.42139),(39.68758, 21.414),(39.68099, 21.41398),(39.63179, 21.4366),(39.62927917729339, 21.43855995858338),(39.629299942421596, 21.44105336136311),(39.63273, 21.43836),(39.65768, 21.42753),(39.67404, 21.419),(39.6815, 21.41592),(39.68534, 21.41555),(39.7182, 21.42582),(39.72915, 21.4318),(39.72926, 21.43473),(39.72198, 21.45071),(39.72058, 21.46018),(39.72262, 21.46776),(39.72871, 21.47851),(39.73639, 21.48854),(39.73607, 21.50077),(39.73921, 21.50608),(39.74358, 21.50869),(39.77204, 21.51334),(39.78965, 21.51773),(39.78925, 21.52186),(39.77895, 21.53768),(39.77335, 21.55878),(39.77409, 21.55998),(39.77529, 21.55924),(39.78151, 21.53691),(39.79101, 21.52282),(39.79216, 21.51796),(39.79392, 21.51725),(39.793, 21.51609),(39.79416, 21.49641),(39.79816, 21.47216),(39.8004, 21.46856),(39.80363, 21.4669),(39.80549, 21.46717),(39.80785, 21.46483),(39.8079, 21.45844),(39.80961, 21.45125),(39.81407, 21.45956),(39.8189, 21.46404),(39.82568, 21.4678),(39.82961, 21.47351),(39.83079, 21.47799),(39.84122, 21.47849),(39.84401, 21.47583),(39.84423, 21.47113),(39.84321, 21.46813),(39.84421, 21.46059),(39.85356, 21.44251),(39.85688, 21.44231),(39.86433, 21.45155),(39.86762, 21.45385),(39.87655, 21.45623),(39.88419, 21.46034),(39.89153, 21.46165),(39.8939, 21.46349),(39.89668, 21.46326),(39.9075, 21.47496),(39.91921, 21.48088),(39.9355, 21.48404),(39.94435, 21.48781),(39.96608, 21.48881),(39.96569, 21.49663),(39.95135, 21.53005),(39.94352, 21.56004),(39.94384, 21.56417),(39.94803, 21.56766),(39.95376, 21.56964),(39.95497, 21.56891),(39.9538, 21.56747),(39.94686, 21.56478),(39.94534, 21.56123),(39.95324, 21.53069),(39.96782, 21.49652),(39.96808, 21.48868),(39.98958, 21.49423),(40.00615, 21.4944),(40.01566, 21.50406),(40.03305, 21.5127),(40.0475, 21.52172),(40.05278, 21.52274),(40.06051, 21.52124),(40.05971, 21.51952),(40.05217, 21.52052),(40.04866, 21.51978),(40.03052, 21.50875),(40.01631, 21.50181),(40.01014, 21.49459),(40.00619, 21.49215),(39.98995, 21.49206),(39.96952, 21.48658),(39.94485, 21.48571),(39.93748, 21.48246),(39.95107, 21.45666),(39.97348, 21.46578),(39.97479, 21.46523),(39.97424, 21.46392),(39.95217, 21.45495),(39.95444, 21.45202),(39.97071, 21.44272),(39.97127, 21.44141),(39.97007, 21.44065),(39.95381, 21.44976),(39.95007, 21.45407),(39.94121, 21.45146),(39.93089, 21.45021),(39.92173, 21.4449),(39.9164, 21.44366),(39.91152, 21.44104),(39.90446, 21.44019),(39.90416, 21.43717),(39.9067, 21.43268),(39.90657, 21.42875),(39.91121, 21.40898),(39.91566, 21.40698),(39.91675, 21.40517),(39.91627, 21.40045),(39.91407, 21.39734),(39.91949, 21.39132),(39.92673, 21.38963),(39.93267, 21.39089),(39.93373, 21.38995),(39.93279, 21.38889),(39.92676, 21.38762),(39.91905, 21.38931),(39.91251, 21.39595),(39.91173, 21.40041),(39.90949, 21.39663),(39.91172, 21.3928),(39.91031, 21.39269),(39.90798, 21.39493),(39.90668, 21.39219),(39.90882, 21.38887),(39.90768, 21.38803),(39.90505, 21.39084),(39.90417, 21.38841),(39.90553, 21.38668)], + +[(39.89317, 21.40473),(39.8952, 21.40371),(39.89574, 21.40332),(39.89629, 21.40285),(39.89739, 21.40172),(39.89925, 21.39916),(39.90055, 21.39718),(39.90145, 21.39501),(39.90173, 21.39491),(39.90189, 21.3948),(39.90252, 21.39409),(39.90289, 21.39384),(39.90396, 21.3934),(39.90422, 21.39338),(39.90436, 21.39343),(39.9044, 21.39349),(39.90404, 21.39369),(39.9037, 21.39396),(39.90351, 21.39416),(39.90319, 21.39463),(39.9027, 21.39571),(39.90267, 21.3959),(39.90268, 21.3961),(39.90281, 21.39646),(39.90293, 21.39662),(39.90307, 21.39675),(39.90324, 21.39685),(39.90362, 21.39695),(39.90382, 21.39694),(39.90418, 21.39681),(39.90447, 21.39655),(39.90505, 21.39544),(39.90523, 21.39531),(39.90547, 21.39528),(39.90556, 21.39529),(39.90572, 21.39537),(39.90592, 21.39552),(39.90662, 21.39645),(39.906, 21.39651),(39.9052, 21.39665),(39.90396, 21.39711),(39.90363, 21.39731),(39.9035, 21.39746),(39.90341, 21.39763),(39.90332, 21.39801),(39.90313, 21.39836),(39.90309, 21.39856),(39.90308, 21.39875),(39.90312, 21.39895),(39.90329, 21.39929),(39.90343, 21.39944),(39.90376, 21.39963),(39.90415, 21.39968),(39.90743, 21.39882),(39.90786, 21.39882),(39.90822, 21.39894),(39.9085, 21.39911),(39.90876, 21.39934),(39.9095, 21.40036),(39.90976, 21.40084),(39.90998, 21.40146),(39.91019, 21.40247),(39.90991, 21.40276),(39.90931, 21.40349),(39.90896, 21.40373),(39.90674, 21.40608),(39.90348, 21.40934),(39.9024, 21.41059),(39.90214, 21.4108),(39.90157, 21.41114),(39.90101, 21.41142),(39.90053, 21.41156),(39.90001, 21.41165),(39.89952, 21.41166),(39.89816, 21.41146),(39.89719, 21.4114),(39.8962, 21.41124),(39.89535, 21.41126),(39.89484, 21.41133),(39.89435, 21.41148),(39.89405, 21.41163),(39.89166, 21.4077),(39.89109, 21.40671),(39.89172, 21.4064),(39.89219, 21.40606),(39.89252, 21.40568),(39.89317, 21.40473)], + +[(39.89411, 21.37812),(39.89305, 21.37935),(39.89229, 21.38007),(39.89129, 21.38085),(39.88637, 21.38398),(39.88501, 21.38505),(39.88428, 21.38581),(39.87665, 21.38344),(39.87669, 21.38209),(39.87721, 21.38049),(39.87864, 21.37771),(39.8796, 21.37629),(39.88162, 21.37426),(39.88637, 21.36994),(39.88657, 21.36988),(39.88706, 21.37),(39.88876, 21.37081),(39.88896, 21.37101),(39.89013, 21.37233),(39.89156, 21.37426),(39.89332, 21.37678),(39.89411, 21.37812)], + +[(39.87603, 21.38534),(39.88308, 21.38753),(39.88293, 21.38778),(39.88223, 21.38927),(39.88216, 21.3895),(39.88185, 21.39103),(39.88178, 21.39207),(39.88187, 21.39333),(39.8821, 21.39461),(39.88234, 21.39542),(39.88273, 21.39643),(39.88352, 21.39787),(39.87797, 21.4004),(39.87743, 21.40069),(39.87678, 21.4011),(39.87442, 21.40281),(39.87359, 21.40173),(39.87299, 21.40077),(39.87076, 21.39685),(39.87013, 21.39591),(39.8696, 21.39535),(39.8714, 21.39394),(39.87198, 21.39344),(39.87254, 21.39286),(39.87317, 21.39206),(39.87356, 21.39139),(39.87399, 21.39041),(39.87552, 21.3857),(39.87573, 21.38561),(39.87603, 21.38534)], + +[(39.8728, 21.40398),(39.86889, 21.40686),(39.86188, 21.41407),(39.8567, 21.41973),(39.85571, 21.42095),(39.85474, 21.4207),(39.85357, 21.42055),(39.8571, 21.4159),(39.8598, 21.4115),(39.86247, 21.40702),(39.86312, 21.40575),(39.86566, 21.39964),(39.86637, 21.39829),(39.86702, 21.39748),(39.86801, 21.39659),(39.86852, 21.39711),(39.86908, 21.39794),(39.87126, 21.40177),(39.87192, 21.40283),(39.8728, 21.40398)], + +[(39.85537, 21.42292),(39.85579, 21.42569),(39.85581, 21.42611),(39.85559, 21.4266),(39.85335, 21.42856),(39.85159, 21.43039),(39.85001, 21.43114),(39.84921, 21.43092),(39.84693, 21.42992),(39.84558, 21.42953),(39.84158, 21.42902),(39.83488, 21.42846),(39.83661, 21.42511),(39.8371, 21.42353),(39.84406, 21.42267),(39.84527, 21.4226),(39.85309, 21.42252),(39.85438, 21.42267),(39.85537, 21.42292)], + +[(39.84628, 21.43181),(39.84808, 21.43261),(39.84783, 21.43302),(39.84618, 21.43694),(39.84577, 21.43776),(39.84437, 21.43886),(39.84394, 21.43902),(39.84241, 21.43929),(39.84185, 21.43935),(39.8414, 21.43937),(39.83966, 21.43925),(39.83928, 21.4393),(39.83876, 21.43957),(39.83857, 21.43971),(39.8382, 21.44005),(39.83781, 21.44051),(39.83637, 21.44071),(39.83611, 21.4407),(39.83599, 21.44066),(39.83589, 21.44058),(39.83584, 21.44048),(39.83575, 21.43989),(39.83567, 21.43963),(39.83545, 21.43919),(39.83508, 21.4388),(39.83491, 21.4387),(39.83472, 21.43864),(39.83453, 21.43861),(39.83433, 21.43862),(39.83409, 21.4387),(39.83396, 21.43862),(39.83377, 21.43814),(39.83364, 21.43792),(39.83319, 21.43732),(39.83279, 21.43689),(39.83253, 21.43633),(39.8323, 21.43541),(39.83224, 21.43523),(39.83214, 21.43506),(39.83189, 21.43474),(39.83144, 21.43432),(39.83111, 21.43413),(39.8305, 21.43392),(39.82999, 21.43269),(39.83002, 21.43255),(39.82998, 21.43209),(39.83109, 21.43171),(39.8318, 21.43138),(39.83234, 21.43109),(39.83301, 21.43064),(39.83338, 21.43032),(39.83365, 21.43037),(39.84139, 21.43101),(39.8432, 21.43126),(39.84419, 21.43134),(39.84517, 21.43149),(39.84628, 21.43181)], + +[(39.83035, 21.42986),(39.8304, 21.42866),(39.83018, 21.42818),(39.83125, 21.42753),(39.83199, 21.42688),(39.83261, 21.42604),(39.83342, 21.42459),(39.83365, 21.42395),(39.83444, 21.42391),(39.83496, 21.4238),(39.83479, 21.42429),(39.83281, 21.42802),(39.83184, 21.42901),(39.83126, 21.4294),(39.83035, 21.42986)], + +[(39.82819, 21.42907),(39.82814, 21.4304),(39.82488, 21.43059),(39.82445, 21.43066),(39.82337, 21.43101),(39.82191, 21.43183),(39.82049, 21.43295),(39.81955, 21.43393),(39.81912, 21.43421),(39.81892, 21.4343),(39.81862, 21.43437),(39.81829, 21.4344),(39.81774, 21.43434),(39.81632, 21.43374),(39.81706, 21.43236),(39.81724, 21.4319),(39.81748, 21.43143),(39.81812, 21.43039),(39.81845, 21.42992),(39.81919, 21.42907),(39.81933, 21.42884),(39.81964, 21.4281),(39.82006, 21.42826),(39.82103, 21.4285),(39.82135, 21.42873),(39.82154, 21.42879),(39.82284, 21.42907),(39.82412, 21.42923),(39.8253, 21.42933),(39.82659, 21.42933),(39.8273, 21.42926),(39.82819, 21.42907)], + +[(39.82779, 21.43244),(39.82826, 21.43377),(39.82849, 21.43431),(39.82776, 21.4346),(39.82626, 21.43537),(39.82591, 21.43559),(39.8245, 21.43597),(39.82298, 21.4366),(39.822, 21.43711),(39.82165, 21.43723),(39.82055, 21.43753),(39.81734, 21.43816),(39.81631, 21.43843),(39.81333, 21.43905),(39.81312, 21.43912),(39.81267, 21.43934),(39.81245, 21.4395),(39.81128, 21.4405),(39.81094, 21.44064),(39.81165, 21.43717),(39.81218, 21.43422),(39.81403, 21.43504),(39.81432, 21.4353),(39.8145, 21.43538),(39.81489, 21.43543),(39.81499, 21.43547),(39.81655, 21.4363),(39.81714, 21.4365),(39.81776, 21.43662),(39.81815, 21.43662),(39.81834, 21.43656),(39.81861, 21.43638),(39.81889, 21.43636),(39.81948, 21.43622),(39.82014, 21.43593),(39.82092, 21.4354),(39.82179, 21.43448),(39.82309, 21.43346),(39.82425, 21.43281),(39.82484, 21.43263),(39.82515, 21.43258),(39.82779, 21.43244)], + +[(39.8207, 21.42636),(39.82125, 21.42508),(39.82206, 21.42403),(39.82275, 21.42279),(39.82346, 21.42235),(39.82418, 21.42178),(39.82499, 21.42142),(39.8265, 21.42122),(39.83136, 21.42199),(39.83197, 21.42264),(39.83163, 21.4237),(39.8309, 21.425),(39.83051, 21.42553),(39.83031, 21.42574),(39.82948, 21.42628),(39.828, 21.42705),(39.82644, 21.42733),(39.8243, 21.42724),(39.82267, 21.42698),(39.82214, 21.42669),(39.8207, 21.42636)], + +[(39.82102, 21.42106),(39.82062, 21.42128),(39.82017, 21.42162),(39.8194, 21.42239),(39.81905, 21.4228),(39.81802, 21.42299),(39.8179, 21.42265),(39.81779, 21.42189),(39.8176, 21.42155),(39.81729, 21.42131),(39.81711, 21.42124),(39.81692, 21.4212),(39.81653, 21.42125),(39.81619, 21.42144),(39.81595, 21.42175),(39.81588, 21.42193),(39.81584, 21.42212),(39.81585, 21.42232),(39.81597, 21.42319),(39.81504, 21.42328),(39.81423, 21.42329),(39.80999, 21.42266),(39.80755, 21.42223),(39.80737, 21.42203),(39.80706, 21.42179),(39.80695, 21.42166),(39.80685, 21.42142),(39.80686, 21.42123),(39.80695, 21.42105),(39.80755, 21.42041),(39.80768, 21.42022),(39.80789, 21.41962),(39.80793, 21.41935),(39.80801, 21.41519),(39.80809, 21.41439),(39.80827, 21.41351),(39.80867, 21.41233),(39.80881, 21.41233),(39.80924, 21.41409),(39.80931, 21.41428),(39.80941, 21.41444),(39.80969, 21.41471),(39.81006, 21.41484),(39.81026, 21.41485),(39.81064, 21.41475),(39.81095, 21.41452),(39.81107, 21.41437),(39.8112, 21.414),(39.81121, 21.4138),(39.81118, 21.41361),(39.81088, 21.41239),(39.81311, 21.41248),(39.81908, 21.41249),(39.8197, 21.41244),(39.82006, 21.41251),(39.8204, 21.41251),(39.8209, 21.41367),(39.82144, 21.41481),(39.82169, 21.41562),(39.82182, 21.4166),(39.82176, 21.41785),(39.81945, 21.41827),(39.81851, 21.41855),(39.8182, 21.41879),(39.81809, 21.41895),(39.81801, 21.41913),(39.81796, 21.41951),(39.81807, 21.41989),(39.81831, 21.4202),(39.81865, 21.42039),(39.81884, 21.42043),(39.81903, 21.42044),(39.81994, 21.42021),(39.82154, 21.41992),(39.82154, 21.41997),(39.82102, 21.42106)], + +[(39.81069, 21.41022),(39.81115, 21.40581),(39.8114, 21.4056),(39.81159, 21.40525),(39.81163, 21.40487),(39.81157, 21.40426),(39.81209, 21.40103),(39.81211, 21.39966),(39.81193, 21.39818),(39.81097, 21.39505),(39.8115, 21.39475),(39.81252, 21.39399),(39.8134, 21.39321),(39.81416, 21.39245),(39.81607, 21.38983),(39.81662, 21.38928),(39.81728, 21.38874),(39.81819, 21.38814),(39.81928, 21.38768),(39.82014, 21.38742),(39.82059, 21.38733),(39.82164, 21.38725),(39.82287, 21.38723),(39.82462, 21.3871),(39.82443, 21.38782),(39.82441, 21.38855),(39.82457, 21.39047),(39.82464, 21.39192),(39.82479, 21.3973),(39.82485, 21.3976),(39.82514, 21.39842),(39.82525, 21.39865),(39.8258, 21.39947),(39.82635, 21.4),(39.82679, 21.40031),(39.82736, 21.40062),(39.82928, 21.40137),(39.82948, 21.40151),(39.82914, 21.40441),(39.82897, 21.40551),(39.8282, 21.40889),(39.82795, 21.41023),(39.82696, 21.41),(39.82658, 21.40996),(39.82664, 21.40825),(39.82658, 21.40787),(39.82649, 21.40769),(39.82622, 21.40741),(39.82605, 21.40731),(39.82567, 21.40722),(39.82529, 21.40728),(39.82511, 21.40737),(39.82483, 21.40764),(39.82473, 21.40781),(39.82464, 21.40819),(39.82458, 21.41008),(39.8235, 21.41017),(39.82153, 21.41019),(39.82001, 21.40827),(39.81977, 21.40778),(39.81909, 21.40579),(39.81879, 21.40505),(39.81827, 21.40417),(39.81699, 21.40224),(39.8159, 21.4008),(39.81463, 21.39957),(39.8141, 21.39915),(39.81392, 21.39908),(39.81353, 21.39903),(39.81316, 21.39914),(39.81299, 21.39925),(39.81274, 21.39955),(39.81267, 21.39973),(39.81262, 21.40012),(39.81273, 21.40049),(39.81284, 21.40066),(39.81298, 21.4008),(39.81326, 21.40103),(39.81445, 21.40218),(39.81537, 21.40342),(39.81656, 21.40521),(39.81696, 21.40588),(39.81722, 21.40652),(39.81794, 21.4086),(39.81826, 21.40925),(39.81898, 21.41019),(39.81328, 21.41012),(39.81218, 21.41014),(39.81134, 21.41021),(39.81069, 21.41022)], + +[(39.80999, 21.39018),(39.80983, 21.38939),(39.80816, 21.38343),(39.80609, 21.3751),(39.80867, 21.37472),(39.80969, 21.37875),(39.81147, 21.38151),(39.81202, 21.38214),(39.81339, 21.38345),(39.81628, 21.38612),(39.81677, 21.38646),(39.81575, 21.38713),(39.81478, 21.38799),(39.81395, 21.38898),(39.81265, 21.39076),(39.81228, 21.39111),(39.8121, 21.3912),(39.81181, 21.39121),(39.81033, 21.39058),(39.80999, 21.39018)], + +[(39.81909, 21.38545),(39.8181, 21.38496),(39.81759, 21.3846),(39.81476, 21.382),(39.81349, 21.38079),(39.81311, 21.38035),(39.81153, 21.37795),(39.81145, 21.37773),(39.81046, 21.37363),(39.81038, 21.37341),(39.81025, 21.37321),(39.80995, 21.37288),(39.80962, 21.3727),(39.80925, 21.37265),(39.80899, 21.37265),(39.80561, 21.37315),(39.80517, 21.37139),(39.80492, 21.37068),(39.80504, 21.37048),(39.8051, 21.37031),(39.80521, 21.36975),(39.8052, 21.36955),(39.80506, 21.36919),(39.80494, 21.36903),(39.80441, 21.36603),(39.80411, 21.36492),(39.80356, 21.36191),(39.80349, 21.36116),(39.80345, 21.35965),(39.80349, 21.35894),(39.80411, 21.35504),(39.80433, 21.35476),(39.80482, 21.35383),(39.80553, 21.35304),(39.8063, 21.35252),(39.80724, 21.35222),(39.80825, 21.35217),(39.80932, 21.35231),(39.81637, 21.35445),(39.81856, 21.35504),(39.82046, 21.35546),(39.82287, 21.35558),(39.82421, 21.35536),(39.82613, 21.35492),(39.83433, 21.35261),(39.83656, 21.35216),(39.83834, 21.35188),(39.83936, 21.35182),(39.84169, 21.3521),(39.84381, 21.35252),(39.84777, 21.35391),(39.84887, 21.35434),(39.84773, 21.35594),(39.84744, 21.35628),(39.84524, 21.35927),(39.83253, 21.37661),(39.8295, 21.38077),(39.82857, 21.38214),(39.82833, 21.38244),(39.82791, 21.38279),(39.82744, 21.38324),(39.82631, 21.3845),(39.82551, 21.38473),(39.82438, 21.38494),(39.82041, 21.38516),(39.81948, 21.38534),(39.81909, 21.38545)], + +[(39.80215, 21.35462),(39.8015, 21.35868),(39.80145, 21.35959),(39.80149, 21.36132),(39.80157, 21.36215),(39.80215, 21.36532),(39.80244, 21.36642),(39.80274, 21.36813),(39.79915, 21.3652),(39.79798, 21.36406),(39.79715, 21.36315),(39.79586, 21.36153),(39.79496, 21.36019),(39.79359, 21.35827),(39.79297, 21.35753),(39.79128, 21.35568),(39.79053, 21.35503),(39.78825, 21.35321),(39.78735, 21.35255),(39.78586, 21.35172),(39.78394, 21.35084),(39.78351, 21.35055),(39.78399, 21.34968),(39.78415, 21.34951),(39.78515, 21.34915),(39.78558, 21.34906),(39.78718, 21.34859),(39.78801, 21.34846),(39.78885, 21.34839),(39.78982, 21.34822),(39.79322, 21.3482),(39.79564, 21.34849),(39.79835, 21.34916),(39.79957, 21.34986),(39.80019, 21.35029),(39.80071, 21.35078),(39.80117, 21.35137),(39.80155, 21.35202),(39.80187, 21.35285),(39.80206, 21.35367),(39.80209, 21.35432),(39.80215, 21.35462)], + +[(39.779, 21.35047),(39.78, 21.35119),(39.78099, 21.35171),(39.78492, 21.35348),(39.78632, 21.35426),(39.78702, 21.35478),(39.78924, 21.35656),(39.78993, 21.35716),(39.79145, 21.35883),(39.79203, 21.35952),(39.79333, 21.36134),(39.79427, 21.36274),(39.79565, 21.36447),(39.79656, 21.36548),(39.79779, 21.36667),(39.80229, 21.37034),(39.80218, 21.37053),(39.80167, 21.37113),(39.80041, 21.37248),(39.79975, 21.3731),(39.79894, 21.37373),(39.79827, 21.37413),(39.79698, 21.37475),(39.79643, 21.37496),(39.79558, 21.37521),(39.79529, 21.37422),(39.79478, 21.3732),(39.79409, 21.37214),(39.79351, 21.37149),(39.793, 21.37101),(39.79201, 21.37035),(39.79122, 21.36997),(39.79048, 21.36972),(39.7891, 21.36947),(39.7887, 21.36944),(39.78832, 21.36949),(39.78814, 21.36957),(39.78784, 21.36982),(39.78767, 21.37017),(39.78763, 21.37037),(39.78718, 21.375),(39.78467, 21.37478),(39.78431, 21.37488),(39.78402, 21.37511),(39.78391, 21.37526),(39.78378, 21.3756),(39.78245, 21.38285),(39.78231, 21.38317),(39.78211, 21.38348),(39.77952, 21.38561),(39.77935, 21.38579),(39.77889, 21.38643),(39.77608, 21.391),(39.77578, 21.39181),(39.77555, 21.39291),(39.77492, 21.39694),(39.76541, 21.3964),(39.76273, 21.39622),(39.76342, 21.3915),(39.76482, 21.38563),(39.76512, 21.38401),(39.76545, 21.38126),(39.76562, 21.38041),(39.76602, 21.37916),(39.76859, 21.37278),(39.76972, 21.36938),(39.77036, 21.36686),(39.77091, 21.36513),(39.77314, 21.35887),(39.77478, 21.35448),(39.77517, 21.35368),(39.77568, 21.35293),(39.77631, 21.35217),(39.77681, 21.35168),(39.77761, 21.3511),(39.77831, 21.35073),(39.779, 21.35047)], + +[(39.76244, 21.39846),(39.76255, 21.39821),(39.76528, 21.3984),(39.77595, 21.399),(39.77653, 21.39906),(39.78224, 21.39999),(39.78304, 21.40016),(39.78406, 21.40081),(39.78349, 21.40178),(39.78316, 21.40249),(39.78263, 21.40405),(39.78258, 21.40444),(39.78267, 21.40478),(39.78245, 21.40582),(39.78226, 21.407),(39.78167, 21.40978),(39.78069, 21.41362),(39.77994, 21.41671),(39.77776, 21.41693),(39.77732, 21.41693),(39.7769, 21.41689),(39.77624, 21.41674),(39.77593, 21.41661),(39.773, 21.41486),(39.77164, 21.41423),(39.76892, 21.41344),(39.76744, 21.41319),(39.76656, 21.41312),(39.76083, 21.41289),(39.7596, 21.41288),(39.75858, 21.41283),(39.756, 21.4126),(39.75567, 21.41255),(39.75472, 21.41221),(39.75448, 21.41203),(39.7544, 21.41191),(39.75435, 21.41171),(39.75437, 21.41133),(39.75446, 21.41111),(39.75488, 21.41038),(39.75712, 21.40804),(39.75802, 21.40692),(39.75933, 21.40518),(39.7607, 21.40297),(39.76127, 21.40197),(39.76186, 21.40076),(39.76209, 21.40008),(39.76228, 21.39934),(39.76244, 21.39846)], + +[(39.78571, 21.4023),(39.78613, 21.40226),(39.78735, 21.40325),(39.78684, 21.40459),(39.78685, 21.40595),(39.78943, 21.41476),(39.79045, 21.41882),(39.79071, 21.41927),(39.79203, 21.42035),(39.7921, 21.42071),(39.79178, 21.42117),(39.79074, 21.42159),(39.7902, 21.42203),(39.78993, 21.42249),(39.78974, 21.42357),(39.78774, 21.42363),(39.78577, 21.42328),(39.78521, 21.42357),(39.78486, 21.42441),(39.78493, 21.42495),(39.78537, 21.42543),(39.78674, 21.42583),(39.7884, 21.42607),(39.78817, 21.42683),(39.7881, 21.428),(39.78914, 21.43215),(39.7889, 21.43869),(39.78737, 21.43877),(39.78262, 21.43967),(39.78006, 21.43961),(39.77771, 21.43792),(39.77742, 21.43637),(39.77739, 21.43356),(39.77763, 21.4322),(39.77929, 21.42676),(39.78007, 21.42488),(39.78048, 21.42431),(39.78176, 21.42346),(39.78296, 21.42427),(39.78354, 21.42436),(39.78438, 21.42393),(39.78469, 21.42344),(39.78459, 21.42268),(39.78345, 21.42155),(39.78336, 21.41933),(39.78243, 21.41679),(39.78242, 21.41498),(39.78361, 21.41026),(39.78458, 21.40542),(39.78489, 21.40468),(39.7849, 21.40411),(39.78571, 21.4023)], + +[(39.77052, 21.43821),(39.76961, 21.43812),(39.76832, 21.4382),(39.76767, 21.43829),(39.76811, 21.43749),(39.76863, 21.4368),(39.76943, 21.43601),(39.77156, 21.43438),(39.77239, 21.43353),(39.77295, 21.43277),(39.77411, 21.43069),(39.7749, 21.42952),(39.77558, 21.42859),(39.77673, 21.42736),(39.77594, 21.42991),(39.77575, 21.4307),(39.77542, 21.43177),(39.77522, 21.43286),(39.77508, 21.43432),(39.77509, 21.43554),(39.77488, 21.43577),(39.77474, 21.43605),(39.77467, 21.43622),(39.77444, 21.43711),(39.77419, 21.43754),(39.77404, 21.43771),(39.77377, 21.43791),(39.77352, 21.43805),(39.77287, 21.43824),(39.77085, 21.43816),(39.77052, 21.43821)], + +[(39.77683, 21.42438),(39.77605, 21.425),(39.77496, 21.42603),(39.7739, 21.42729),(39.77313, 21.42836),(39.77185, 21.4304),(39.77114, 21.43164),(39.77043, 21.4325),(39.76964, 21.4332),(39.76827, 21.43424),(39.76732, 21.43509),(39.76667, 21.43583),(39.76622, 21.43647),(39.76497, 21.43865),(39.76336, 21.43887),(39.76248, 21.43875),(39.76215, 21.43863),(39.75945, 21.4361),(39.75799, 21.43499),(39.75779, 21.43487),(39.75661, 21.43435),(39.75139, 21.43268),(39.75032, 21.43203),(39.74784, 21.42997),(39.74759, 21.42982),(39.74683, 21.4295),(39.74612, 21.42931),(39.74467, 21.42912),(39.74448, 21.42896),(39.7443, 21.42888),(39.74194, 21.4285),(39.74145, 21.42827),(39.74102, 21.42776),(39.7407, 21.42681),(39.74058, 21.42589),(39.74083, 21.42453),(39.74184, 21.4225),(39.74313, 21.42067),(39.74414, 21.41962),(39.74463, 21.41918),(39.74722, 21.41719),(39.74816, 21.41667),(39.75131, 21.41514),(39.7528, 21.4143),(39.7546, 21.41468),(39.75722, 21.41503),(39.7665, 21.41541),(39.76768, 21.41554),(39.7691, 21.41584),(39.77057, 21.41631),(39.77193, 21.41692),(39.77339, 21.41779),(39.7742, 21.41838),(39.77635, 21.41978),(39.77611, 21.41999),(39.77594, 21.42034),(39.77589, 21.42086),(39.77591, 21.42111),(39.77604, 21.42177),(39.77613, 21.42203),(39.77649, 21.42272),(39.77693, 21.42331),(39.77708, 21.42367),(39.77707, 21.42393),(39.77683, 21.42438)], + +[(39.73364, 21.42933),(39.73324, 21.42934),(39.72777, 21.42885),(39.72661, 21.42861),(39.72586, 21.42833),(39.71975, 21.42438),(39.71903, 21.424),(39.71198, 21.42198),(39.71237, 21.41843),(39.71241, 21.4169),(39.71237, 21.41582),(39.71224, 21.4153),(39.71207, 21.41499),(39.71028, 21.41276),(39.70957, 21.412),(39.70914, 21.4116),(39.70794, 21.41071),(39.70769, 21.41057),(39.70642, 21.41012),(39.70575, 21.40943),(39.70451, 21.40799),(39.70442, 21.40779),(39.70426, 21.40579),(39.70437, 21.4053),(39.70468, 21.40483),(39.70531, 21.40433),(39.71243, 21.39961),(39.7206, 21.41007),(39.72131, 21.41077),(39.72202, 21.41134),(39.72276, 21.41188),(39.72424, 21.41269),(39.72578, 21.41323),(39.72663, 21.41346),(39.72741, 21.41357),(39.72886, 21.41366),(39.73014, 21.4136),(39.7346, 21.41323),(39.73624, 21.41339),(39.73895, 21.41313),(39.74098, 21.41303),(39.74174, 21.41319),(39.74348, 21.41408),(39.74416, 21.41454),(39.74445, 21.41478),(39.74463, 21.41504),(39.74467, 21.41523),(39.74465, 21.41565),(39.74447, 21.41608),(39.74416, 21.41649),(39.74239, 21.41799),(39.74121, 21.41922),(39.74065, 21.41993),(39.73973, 21.42137),(39.73894, 21.42307),(39.7385, 21.42385),(39.73799, 21.42463),(39.73672, 21.42624),(39.73364, 21.42933)], + +[(39.71409, 21.39848),(39.7259, 21.39043),(39.72856, 21.39237),(39.72897, 21.39258),(39.72946, 21.39268),(39.72985, 21.39281),(39.73553, 21.39698),(39.73563, 21.39707),(39.73609, 21.39767),(39.73676, 21.39815),(39.74249, 21.40142),(39.743, 21.40162),(39.74336, 21.40168),(39.7445, 21.40156),(39.74465, 21.40162),(39.74535, 21.40201),(39.74586, 21.40221),(39.74731, 21.4031),(39.75127, 21.4053),(39.75207, 21.40561),(39.75415, 21.40664),(39.75489, 21.40686),(39.75501, 21.40693),(39.75368, 21.40835),(39.75296, 21.4087),(39.75071, 21.40918),(39.74696, 21.41005),(39.74601, 21.41024),(39.74227, 21.41003),(39.74019, 21.41017),(39.73721, 21.41052),(39.73608, 21.41079),(39.73591, 21.41089),(39.73567, 21.41113),(39.72999, 21.4116),(39.72892, 21.41166),(39.72757, 21.41157),(39.72697, 21.41149),(39.72641, 21.41133),(39.7251, 21.41088),(39.72389, 21.41023),(39.72326, 21.40977),(39.72261, 21.40924),(39.72205, 21.40869),(39.71409, 21.39848)], + +[(39.7457, 21.43127),(39.74624, 21.43142),(39.74667, 21.4316),(39.7491, 21.43362),(39.75045, 21.43445),(39.75067, 21.43455),(39.75586, 21.43621),(39.75688, 21.43666),(39.75811, 21.43759),(39.7606, 21.43995),(39.76102, 21.44029),(39.76115, 21.44038),(39.76188, 21.44066),(39.7621, 21.44072),(39.76323, 21.44087),(39.76349, 21.44087),(39.76572, 21.44057),(39.76592, 21.44058),(39.76625, 21.4405),(39.76848, 21.4402),(39.76965, 21.44012),(39.77019, 21.44018),(39.77471, 21.44129),(39.77662, 21.44142),(39.77705, 21.44257),(39.77753, 21.44343),(39.77792, 21.444),(39.77863, 21.44489),(39.77936, 21.44567),(39.7797, 21.44597),(39.7803, 21.44638),(39.78107, 21.44684),(39.7823, 21.4475),(39.78364, 21.44835),(39.78622, 21.45028),(39.78775, 21.45163),(39.78853, 21.45236),(39.79426, 21.45875),(39.79488, 21.45938),(39.79706, 21.46135),(39.79869, 21.4625),(39.80185, 21.46439),(39.80201, 21.46461),(39.80207, 21.46475),(39.80206, 21.46486),(39.80187, 21.46508),(39.79992, 21.4663),(39.79979, 21.46644),(39.79968, 21.46663),(39.79914, 21.467),(39.79814, 21.46788),(39.79751, 21.46867),(39.79695, 21.46959),(39.79658, 21.47045),(39.79623, 21.47164),(39.7932, 21.48709),(39.79286, 21.48954),(39.79217, 21.49622),(39.79191, 21.49914),(39.79191, 21.50009),(39.79167, 21.502),(39.79039, 21.5155),(39.7749, 21.51163),(39.77208, 21.51099),(39.7654, 21.51023),(39.74443, 21.5064),(39.74326, 21.50603),(39.74213, 21.50544),(39.74139, 21.50491),(39.74068, 21.50419),(39.7399, 21.50329),(39.73911, 21.50203),(39.73868, 21.50103),(39.73842, 21.49995),(39.73826, 21.49836),(39.73919, 21.49113),(39.73916, 21.48952),(39.73895, 21.48839),(39.73888, 21.48816),(39.73839, 21.4871),(39.73787, 21.4862),(39.73693, 21.48477),(39.73567, 21.48348),(39.73286, 21.48021),(39.73059, 21.47715),(39.72508, 21.46749),(39.72433, 21.46585),(39.72302, 21.46073),(39.72287, 21.45961),(39.72278, 21.45841),(39.72284, 21.45738),(39.72295, 21.45633),(39.72389, 21.45255),(39.72481, 21.45005),(39.73039, 21.43827),(39.7314, 21.43644),(39.73183, 21.43594),(39.73214, 21.43568),(39.73369, 21.43469),(39.73481, 21.43411),(39.73548, 21.43368),(39.73637, 21.433),(39.73709, 21.43238),(39.73785, 21.43181),(39.73821, 21.43158),(39.73856, 21.43144),(39.73898, 21.43133),(39.73951, 21.43126),(39.74295, 21.43112),(39.7457, 21.43127)], + +[(39.7791, 21.44156),(39.77939, 21.44157),(39.78003, 21.44187),(39.78046, 21.442),(39.78123, 21.44207),(39.78208, 21.44195),(39.78301, 21.44163),(39.78764, 21.44075),(39.78822, 21.44077),(39.78917, 21.44068),(39.79022, 21.44082),(39.7976, 21.44116),(39.7978, 21.44115),(39.79799, 21.4411),(39.79933, 21.44116),(39.79964, 21.44126),(39.80102, 21.44147),(39.80255, 21.44181),(39.80419, 21.4421),(39.80647, 21.44221),(39.80682, 21.44216),(39.80854, 21.4424),(39.8081, 21.44453),(39.80796, 21.44538),(39.8078, 21.44743),(39.80767, 21.44769),(39.80763, 21.44789),(39.80767, 21.44828),(39.80774, 21.44843),(39.80761, 21.45109),(39.80736, 21.4538),(39.80599, 21.45785),(39.8059, 21.45822),(39.80589, 21.45902),(39.80592, 21.45926),(39.80609, 21.45985),(39.80634, 21.46041),(39.80595, 21.46096),(39.8057, 21.46124),(39.8038, 21.46239),(39.80349, 21.46246),(39.80322, 21.46244),(39.79996, 21.46056),(39.79871, 21.45967),(39.79745, 21.45865),(39.79647, 21.45779),(39.79588, 21.4572),(39.78985, 21.45056),(39.78754, 21.44849),(39.7862, 21.44745),(39.78483, 21.44646),(39.78342, 21.44554),(39.78147, 21.44446),(39.78087, 21.44401),(39.77967, 21.44266),(39.7791, 21.44156)], + +[(39.93436, 21.4816),(39.93241, 21.48133),(39.92917, 21.48071),(39.92795, 21.48063),(39.92391, 21.48004),(39.92213, 21.47963),(39.92017, 21.47891),(39.91245, 21.47545),(39.91099, 21.47469),(39.9098, 21.47394),(39.90843, 21.47286),(39.90708, 21.4716),(39.90445, 21.46837),(39.90205, 21.46552),(39.90002, 21.46344),(39.89957, 21.4628),(39.8993, 21.46223),(39.89803, 21.45836),(39.898, 21.45747),(39.89804, 21.45702),(39.90192, 21.44447),(39.9022, 21.44387),(39.90238, 21.44363),(39.90272, 21.44337),(39.90406, 21.44277),(39.90471, 21.44266),(39.90601, 21.44278),(39.90621, 21.44277),(39.90655, 21.44267),(39.90946, 21.44274),(39.91033, 21.44282),(39.91091, 21.44294),(39.91136, 21.44311),(39.912, 21.44341),(39.91274, 21.44388),(39.91397, 21.44474),(39.9147, 21.44511),(39.91571, 21.44553),(39.91731, 21.44594),(39.9197, 21.44635),(39.92104, 21.44677),(39.92156, 21.44697),(39.92234, 21.44739),(39.92405, 21.44847),(39.92538, 21.44954),(39.92675, 21.45043),(39.92962, 21.4519),(39.93035, 21.45214),(39.93227, 21.45265),(39.93426, 21.45311),(39.93484, 21.45321),(39.93669, 21.45325),(39.9378, 21.4532),(39.93928, 21.45321),(39.94077, 21.45341),(39.94229, 21.45381),(39.94572, 21.45459),(39.94649, 21.45483),(39.94901, 21.45579),(39.93559, 21.4801),(39.93508, 21.48113),(39.93479, 21.48161),(39.93436, 21.4816)], + +[(39.9044, 21.42901),(39.90248, 21.42675),(39.90195, 21.4262),(39.90111, 21.42499),(39.90059, 21.42408),(39.90038, 21.42359),(39.89991, 21.42209),(39.89946, 21.42086),(39.89889, 21.41972),(39.89911, 21.41951),(39.89926, 21.41931),(39.89948, 21.41892),(39.89961, 21.41855),(39.89964, 21.41816),(39.89957, 21.41767),(39.89948, 21.41741),(39.89925, 21.417),(39.89899, 21.41667),(39.89887, 21.41624),(39.89879, 21.41607),(39.89867, 21.41591),(39.89852, 21.41578),(39.89835, 21.41569),(39.89816, 21.41562),(39.89777, 21.41561),(39.89741, 21.41575),(39.89725, 21.41587),(39.89712, 21.41602),(39.89703, 21.41619),(39.89696, 21.41645),(39.89537, 21.41379),(39.89576, 21.4136),(39.89645, 21.41343),(39.89707, 21.41339),(39.89798, 21.41346),(39.89948, 21.41366),(39.90026, 21.41364),(39.90105, 21.4135),(39.90182, 21.41325),(39.90257, 21.41288),(39.90323, 21.41248),(39.90385, 21.41197),(39.90475, 21.41093),(39.90476, 21.41108),(39.90482, 21.41126),(39.90492, 21.41143),(39.9052, 21.4117),(39.90538, 21.41179),(39.90589, 21.4119),(39.90633, 21.41211),(39.90744, 21.4129),(39.90772, 21.41318),(39.9078, 21.4133),(39.90791, 21.41361),(39.90793, 21.41377),(39.9079, 21.41405),(39.90795, 21.41444),(39.90814, 21.41478),(39.90724, 21.41976),(39.90704, 21.42054),(39.90545, 21.42577),(39.9044, 21.42901)], + +[(39.8047, 21.42149),(39.80352, 21.42129),(39.80246, 21.42135),(39.80157, 21.42154),(39.79665, 21.42328),(39.79613, 21.42337),(39.79422, 21.4236),(39.79363, 21.42362),(39.79194, 21.42358),(39.79197, 21.42339),(39.79246, 21.42319),(39.79288, 21.42298),(39.79309, 21.42284),(39.79357, 21.42239),(39.79395, 21.4219),(39.79415, 21.42135),(39.7942, 21.42109),(39.79422, 21.42034),(39.79408, 21.41965),(39.7938, 21.41913),(39.79365, 21.41892),(39.79326, 21.41852),(39.79252, 21.41799),(39.79244, 21.41749),(39.79228, 21.41685),(39.78902, 21.4057),(39.78897, 21.40545),(39.78905, 21.40469),(39.78917, 21.40441),(39.7896, 21.40383),(39.7899, 21.40361),(39.79015, 21.40348),(39.79091, 21.4033),(39.79119, 21.40331),(39.79188, 21.40345),(39.79208, 21.40353),(39.7923, 21.40367),(39.79685, 21.4084),(39.79791, 21.40973),(39.79986, 21.41254),(39.80035, 21.41301),(39.80145, 21.41381),(39.80223, 21.41458),(39.80241, 21.41472),(39.80482, 21.41733),(39.80568, 21.41832),(39.80568, 21.41932),(39.80526, 21.41977),(39.8051, 21.41999),(39.80494, 21.4203),(39.8047, 21.42107),(39.8047, 21.42149)], + +[(39.79951, 21.40831),(39.79848, 21.40702),(39.79501, 21.40333),(39.79373, 21.40207),(39.79355, 21.40193),(39.79291, 21.40156),(39.79209, 21.40128),(39.79129, 21.40117),(39.7904, 21.40122),(39.78952, 21.40145),(39.78886, 21.40175),(39.78761, 21.40064),(39.78831, 21.39977),(39.78842, 21.39961),(39.7885, 21.39941),(39.78889, 21.39915),(39.78934, 21.39892),(39.78975, 21.39885),(39.79039, 21.39844),(39.7913, 21.39812),(39.79528, 21.39727),(39.79685, 21.39697),(39.79808, 21.39687),(39.80179, 21.39686),(39.80549, 21.39681),(39.80651, 21.39668),(39.80725, 21.39666),(39.80743, 21.3966),(39.80771, 21.39643),(39.80899, 21.39602),(39.80971, 21.39842),(39.80979, 21.39893),(39.80983, 21.39956),(39.80955, 21.39958),(39.80936, 21.39965),(39.8092, 21.39975),(39.80893, 21.40003),(39.8088, 21.4004),(39.80766, 21.40803),(39.80749, 21.40874),(39.8071, 21.4101),(39.80666, 21.41003),(39.80538, 21.40971),(39.8038, 21.40937),(39.80309, 21.40917),(39.79951, 21.40831)], + +[(39.78528, 21.39922),(39.78493, 21.39899),(39.78591, 21.39763),(39.78619, 21.3968),(39.78824, 21.38477),(39.78896, 21.37718),(39.79196, 21.37747),(39.79386, 21.37747),(39.79431, 21.38367),(39.79466, 21.38521),(39.79527, 21.38685),(39.7963, 21.39301),(39.79642, 21.39481),(39.79092, 21.39593),(39.78942, 21.39642),(39.78822, 21.39699),(39.78722, 21.3976),(39.78651, 21.3981),(39.78528, 21.39922)], + +[(39.79842, 21.39469),(39.79828, 21.39273),(39.79723, 21.38643),(39.79656, 21.38457),(39.7963, 21.38345),(39.79584, 21.3772),(39.79782, 21.37657),(39.80001, 21.37542),(39.80108, 21.37459),(39.80295, 21.37269),(39.80319, 21.37258),(39.80748, 21.38942),(39.80783, 21.39052),(39.80783, 21.39119),(39.8077, 21.39161),(39.80752, 21.39189),(39.80687, 21.39255),(39.80579, 21.39415),(39.80519, 21.39458),(39.79842, 21.39469)], + +[(39.86272, 21.42529),(39.86276, 21.42495),(39.86273, 21.42476),(39.86266, 21.42457),(39.86255, 21.42441),(39.8638, 21.42428),(39.8642, 21.42419),(39.86474, 21.42401),(39.86583, 21.42353),(39.86774, 21.42232),(39.87468, 21.41769),(39.87519, 21.41722),(39.87556, 21.41679),(39.87623, 21.41572),(39.87632, 21.41563),(39.87919, 21.41359),(39.88011, 21.41502),(39.88112, 21.41677),(39.88159, 21.4177),(39.88115, 21.41807),(39.88045, 21.41821),(39.87899, 21.41871),(39.87817, 21.41908),(39.87653, 21.41992),(39.87497, 21.42078),(39.87348, 21.42172),(39.87086, 21.42355),(39.86554, 21.42759),(39.86429, 21.4284),(39.86344, 21.42888),(39.86315, 21.42914),(39.86304, 21.42931),(39.86297, 21.42949),(39.86288, 21.42955),(39.86345, 21.42732),(39.86493, 21.42715),(39.86628, 21.42693),(39.86645, 21.42685),(39.86674, 21.42659),(39.86691, 21.42623),(39.86694, 21.42604),(39.86693, 21.42584),(39.8668, 21.42548),(39.86654, 21.42519),(39.86618, 21.42502),(39.86599, 21.42499),(39.86579, 21.425),(39.86465, 21.42517),(39.86303, 21.42536),(39.86272, 21.42529)], + +[(39.88137, 21.4133),(39.88234, 21.41345),(39.88299, 21.41336),(39.88369, 21.41308),(39.88421, 21.41275),(39.88498, 21.41196),(39.88634, 21.40971),(39.88889, 21.40731),(39.89212, 21.41262),(39.89182, 21.41282),(39.89033, 21.41422),(39.88928, 21.41434),(39.88824, 21.41479),(39.88626, 21.41669),(39.88579, 21.41696),(39.88471, 21.41737),(39.88379, 21.4176),(39.88287, 21.41579),(39.88137, 21.4133)], + +[(39.89343, 21.41478),(39.89599, 21.41909),(39.8969, 21.42055),(39.89729, 21.42134),(39.89771, 21.42243),(39.8981, 21.42371),(39.89874, 21.4253),(39.89905, 21.42595),(39.89945, 21.42665),(39.89984, 21.42723),(39.90044, 21.42799),(39.90305, 21.43104),(39.9031, 21.43127),(39.90311, 21.43155),(39.90306, 21.43194),(39.9017, 21.43639),(39.90122, 21.43585),(39.90098, 21.43544),(39.90059, 21.43417),(39.90029, 21.43348),(39.89965, 21.43238),(39.89683, 21.4285),(39.89643, 21.42802),(39.89572, 21.4274),(39.89208, 21.42467),(39.8859, 21.42038),(39.88621, 21.41977),(39.8863, 21.41948),(39.88648, 21.41926),(39.88752, 21.4184),(39.88777, 21.4181),(39.88784, 21.41793),(39.88939, 21.41644),(39.88975, 21.41629),(39.89056, 21.41626),(39.891, 21.41612),(39.89343, 21.41478)], + +[(39.90063, 21.43987),(39.89683, 21.45214),(39.8956, 21.45625),(39.89544, 21.45648),(39.89523, 21.45671),(39.89504, 21.45685),(39.89446, 21.45714),(39.89298, 21.45776),(39.89025, 21.4587),(39.88889, 21.4589),(39.88829, 21.45893),(39.88695, 21.45888),(39.88613, 21.45869),(39.88496, 21.45834),(39.88205, 21.45719),(39.88115, 21.45672),(39.87854, 21.45483),(39.87734, 21.45423),(39.87619, 21.45386),(39.871, 21.45278),(39.87018, 21.45257),(39.86828, 21.45177),(39.86695, 21.45101),(39.86592, 21.45008),(39.86505, 21.44914),(39.86307, 21.44683),(39.86096, 21.44427),(39.86036, 21.44351),(39.85993, 21.44287),(39.85876, 21.44092),(39.85863, 21.4405),(39.85853, 21.4385),(39.85845, 21.43789),(39.85841, 21.4377),(39.85799, 21.43648),(39.85816, 21.43625),(39.85861, 21.4358),(39.86082, 21.43328),(39.86139, 21.43272),(39.86152, 21.43255),(39.86213, 21.43226),(39.86471, 21.4308),(39.86495, 21.4305),(39.86503, 21.43028),(39.86671, 21.42921),(39.87205, 21.42516),(39.87456, 21.4234),(39.87601, 21.42249),(39.87745, 21.42169),(39.87968, 21.42059),(39.88, 21.42048),(39.88078, 21.42029),(39.88189, 21.4201),(39.8827, 21.41988),(39.88297, 21.4204),(39.8836, 21.42141),(39.88376, 21.42162),(39.88439, 21.4222),(39.89129, 21.42698),(39.89334, 21.42853),(39.89452, 21.42953),(39.89488, 21.42994),(39.89779, 21.43386),(39.89841, 21.43486),(39.89868, 21.43546),(39.89962, 21.43833),(39.89972, 21.4385),(39.8999, 21.43867),(39.90027, 21.43939),(39.90063, 21.43987)], + +[(39.89963, 21.38706),(39.8989, 21.38816),(39.8987, 21.38839),(39.89842, 21.38854),(39.89495, 21.38902),(39.8945, 21.38902),(39.89358, 21.38881),(39.89293, 21.38854),(39.88689, 21.38662),(39.88679, 21.38636),(39.88745, 21.38587),(39.89342, 21.38207),(39.89382, 21.38213),(39.89462, 21.38259),(39.89517, 21.38281),(39.8958, 21.38288),(39.89673, 21.3828),(39.89855, 21.38564),(39.89963, 21.38706)], + +[(39.89348, 21.39296),(39.89286, 21.39348),(39.89194, 21.39401),(39.88552, 21.39692),(39.8847, 21.3955),(39.88413, 21.39366),(39.88401, 21.39185),(39.88434, 21.39012),(39.88475, 21.38944),(39.88506, 21.38913),(39.88593, 21.38845),(39.88615, 21.38848),(39.88648, 21.38874),(39.88738, 21.38915),(39.89272, 21.39085),(39.89371, 21.39128),(39.89392, 21.39143),(39.89398, 21.39158),(39.89408, 21.39224),(39.89348, 21.39296)], + +[(39.84886, 21.43817),(39.85002, 21.43712),(39.85126, 21.43554),(39.85158, 21.43529),(39.85427, 21.4341),(39.85471, 21.4338),(39.85655, 21.43378),(39.85785, 21.43364),(39.85715, 21.43444),(39.85687, 21.43452),(39.85609, 21.43509),(39.85562, 21.43524),(39.85418, 21.43543),(39.85305, 21.43578),(39.85245, 21.43611),(39.85064, 21.43763),(39.85024, 21.43783),(39.84886, 21.43817)], + +[(39.85972, 21.41943),(39.86333, 21.41545),(39.8703, 21.40829),(39.87409, 21.40552),(39.87517, 21.40688),(39.87599, 21.40806),(39.87819, 21.41186),(39.87494, 21.41418),(39.87458, 21.41459),(39.87399, 21.41555),(39.87339, 21.41616),(39.86496, 21.42173),(39.86398, 21.42216),(39.86342, 21.42231),(39.86213, 21.42238),(39.85971, 21.42177),(39.86001, 21.42041),(39.85992, 21.41983),(39.85972, 21.41943)], + +[(39.87571, 21.40434),(39.87841, 21.40243),(39.88448, 21.39962),(39.88786, 21.4056),(39.8874, 21.40591),(39.8848, 21.40843),(39.88417, 21.40934),(39.8834, 21.41072),(39.88317, 21.41098),(39.8828, 21.41129),(39.88232, 21.41144),(39.88197, 21.41139),(39.88098, 21.41104),(39.88003, 21.41105),(39.8777, 21.40702),(39.87679, 21.40571),(39.87571, 21.40434)], + +[(39.88646, 21.39869),(39.89349, 21.39545),(39.8941, 21.39505),(39.89698, 21.39264),(39.89974, 21.3939),(39.89882, 21.39616),(39.89759, 21.39804),(39.8959, 21.40038),(39.89488, 21.40142),(39.89409, 21.40204),(39.89129, 21.40342),(39.88971, 21.40437),(39.88646, 21.39869)], + +[(39.95221, 21.34705),(39.95232, 21.34622),(39.95252, 21.3457),(39.95278, 21.34519),(39.95313, 21.34467),(39.95355, 21.34423),(39.95402, 21.34383),(39.95944, 21.34052),(39.96716, 21.3359),(39.96818, 21.33546),(39.96952, 21.33507),(39.97082, 21.33491),(39.97164, 21.33491),(39.97628, 21.33524),(39.97843, 21.33549),(39.98862, 21.33691),(39.99004, 21.33729),(39.99099, 21.33767),(39.9921, 21.33826),(39.99311, 21.339),(39.99377, 21.33959),(39.99466, 21.34067),(39.99517, 21.3415),(39.9956, 21.34231),(39.99591, 21.34323),(39.99606, 21.34384),(39.99627, 21.34514),(39.99627, 21.34711),(39.99622, 21.34781),(39.99493, 21.3532),(39.99449, 21.35441),(39.99377, 21.35575),(39.99337, 21.3563),(39.99265, 21.35702),(39.99047, 21.35841),(39.98785, 21.36027),(39.98739, 21.36066),(39.98664, 21.36148),(39.98629, 21.36203),(39.98604, 21.36261),(39.98588, 21.36326),(39.98582, 21.36386),(39.98584, 21.36461),(39.98597, 21.36522),(39.98613, 21.36571),(39.98645, 21.36645),(39.98769, 21.36884),(39.98792, 21.36953),(39.98811, 21.37052),(39.98876, 21.37739),(39.98869, 21.37821),(39.98863, 21.37841),(39.98851, 21.37867),(39.98808, 21.37931),(39.98785, 21.37954),(39.98758, 21.37973),(39.98671, 21.38013),(39.98624, 21.38023),(39.98142, 21.38031),(39.9805, 21.38024),(39.97978, 21.38003),(39.97949, 21.37987),(39.97917, 21.37962),(39.97889, 21.37936),(39.97864, 21.37904),(39.9778, 21.37783),(39.97605, 21.37561),(39.96729, 21.36519),(39.96247, 21.36003),(39.95955, 21.35682),(39.95867, 21.35593),(39.95499, 21.3519),(39.95402, 21.35093),(39.95309, 21.34984),(39.95262, 21.34915),(39.95236, 21.34849),(39.95225, 21.34799),(39.95218, 21.34733),(39.95221, 21.34705)], + +[(39.80959, 21.42471),(39.80961, 21.42502),(39.80967, 21.4252),(39.80976, 21.42538),(39.80988, 21.42553),(39.81003, 21.42565),(39.81021, 21.42574),(39.81039, 21.4258),(39.81059, 21.42582),(39.81079, 21.4258),(39.81097, 21.42574),(39.81115, 21.42565),(39.8113, 21.42553),(39.81142, 21.42538),(39.81157, 21.42502),(39.81422, 21.4254),(39.81499, 21.42539),(39.81683, 21.42523),(39.8172, 21.4259),(39.81788, 21.42678),(39.81735, 21.42785),(39.81708, 21.42816),(39.817, 21.42821),(39.81458, 21.42838),(39.8142, 21.42837),(39.81313, 21.4282),(39.81278, 21.42826),(39.81207, 21.42855),(39.81173, 21.42888),(39.81161, 21.42903),(39.81152, 21.42921),(39.81137, 21.42959),(39.81126, 21.43002),(39.81082, 21.4313),(39.80838, 21.43082),(39.80778, 21.43079),(39.80781, 21.4305),(39.80793, 21.4264),(39.80787, 21.4244),(39.80959, 21.42471)], + +[(39.80556, 21.43147),(39.80353, 21.43246),(39.80246, 21.43184),(39.80176, 21.43158),(39.80132, 21.43147),(39.80103, 21.43144),(39.80025, 21.43149),(39.79977, 21.43161),(39.79946, 21.43174),(39.79918, 21.43192),(39.79694, 21.43394),(39.79578, 21.43486),(39.79286, 21.43677),(39.79262, 21.43702),(39.79251, 21.43723),(39.79246, 21.43742),(39.79245, 21.43762),(39.79253, 21.43798),(39.79272, 21.43828),(39.79297, 21.43852),(39.79326, 21.43865),(39.79353, 21.43869),(39.79384, 21.43864),(39.79402, 21.43856),(39.79789, 21.43598),(39.80018, 21.4339),(39.80064, 21.43359),(39.80097, 21.43358),(39.80149, 21.43371),(39.80259, 21.43438),(39.80291, 21.4345),(39.8032, 21.43456),(39.80343, 21.43458),(39.80388, 21.43455),(39.8048, 21.43423),(39.80446, 21.43495),(39.80406, 21.43567),(39.80366, 21.43693),(39.80347, 21.43812),(39.80332, 21.43981),(39.80118, 21.43938),(39.79973, 21.43917),(39.79102, 21.43876),(39.79131, 21.43242),(39.79123, 21.43161),(39.79026, 21.42778),(39.79024, 21.42756),(39.79027, 21.42715),(39.79032, 21.42696),(39.79049, 21.42663),(39.79096, 21.42597),(39.79434, 21.4257),(39.79706, 21.42535),(39.80209, 21.42359),(39.80281, 21.42342),(39.8032, 21.42339),(39.80401, 21.42349),(39.80572, 21.42393),(39.80578, 21.4274),(39.80564, 21.43075),(39.80556, 21.43147)], + +[(39.80743, 21.43297),(39.8081, 21.43292),(39.81017, 21.43334),(39.80879, 21.44042),(39.80641, 21.44006),(39.80544, 21.44004),(39.80577, 21.43726),(39.80612, 21.43633),(39.80671, 21.43526),(39.80743, 21.43297)], + +[(39.80978, 21.44779),(39.80994, 21.44566),(39.81007, 21.44487),(39.81053, 21.44265),(39.81116, 21.44264),(39.81154, 21.44256),(39.81225, 21.44226),(39.81248, 21.44211),(39.81366, 21.44109),(39.81384, 21.44099),(39.81679, 21.44037),(39.81781, 21.44011),(39.82104, 21.43947),(39.8222, 21.43916),(39.82287, 21.43891),(39.82379, 21.43843),(39.82508, 21.43789),(39.82657, 21.43749),(39.82684, 21.43737),(39.82727, 21.4371),(39.82865, 21.43639),(39.82907, 21.43623),(39.82934, 21.4363),(39.82961, 21.43628),(39.83008, 21.43695),(39.8317, 21.43866),(39.83196, 21.439),(39.83218, 21.43955),(39.8324, 21.43989),(39.83262, 21.44011),(39.83308, 21.44043),(39.83338, 21.44071),(39.83419, 21.44194),(39.83469, 21.44238),(39.83493, 21.44253),(39.83535, 21.44272),(39.83558, 21.44279),(39.83628, 21.44289),(39.83853, 21.44254),(39.83909, 21.44226),(39.8397, 21.44168),(39.83977, 21.44165),(39.84008, 21.44161),(39.84192, 21.44153),(39.843, 21.44143),(39.84509, 21.4411),(39.84799, 21.44052),(39.84946, 21.44019),(39.84981, 21.44021),(39.84994, 21.44025),(39.85134, 21.44181),(39.85116, 21.44253),(39.85117, 21.44267),(39.85047, 21.4439),(39.85008, 21.44479),(39.84952, 21.44592),(39.84796, 21.44857),(39.84741, 21.44971),(39.84589, 21.45357),(39.84554, 21.45432),(39.84375, 21.45712),(39.84259, 21.45929),(39.84225, 21.46015),(39.84203, 21.46152),(39.84189, 21.46327),(39.8414, 21.46556),(39.84112, 21.46739),(39.84112, 21.46789),(39.84122, 21.46847),(39.84135, 21.46887),(39.84146, 21.4691),(39.84191, 21.4698),(39.84204, 21.4701),(39.84217, 21.47069),(39.84223, 21.47113),(39.84204, 21.47546),(39.84198, 21.47572),(39.84187, 21.47595),(39.84166, 21.47621),(39.84137, 21.4764),(39.84125, 21.47644),(39.84102, 21.4765),(39.84071, 21.47651),(39.83664, 21.47574),(39.836, 21.4757),(39.83537, 21.47573),(39.83436, 21.47593),(39.83347, 21.47628),(39.83274, 21.47666),(39.83259, 21.47668),(39.83234, 21.47663),(39.83211, 21.47648),(39.83203, 21.4764),(39.83197, 21.47626),(39.83172, 21.47361),(39.83147, 21.47277),(39.83127, 21.4723),(39.83087, 21.47161),(39.82925, 21.46941),(39.82728, 21.4666),(39.82667, 21.46592),(39.82647, 21.46575),(39.82613, 21.46553),(39.82048, 21.46267),(39.82011, 21.46245),(39.81967, 21.46209),(39.81931, 21.46169),(39.81868, 21.46072),(39.81798, 21.45998),(39.81588, 21.45849),(39.81553, 21.45819),(39.81515, 21.45776),(39.81493, 21.45736),(39.81256, 21.45195),(39.8115, 21.45014),(39.81057, 21.44875),(39.80978, 21.44779)], + +[(39.78317, 21.39802),(39.78285, 21.39808),(39.77692, 21.3971),(39.77752, 21.39324),(39.77769, 21.39243),(39.77792, 21.39179),(39.78089, 21.38707),(39.78351, 21.38492),(39.7841, 21.38405),(39.78439, 21.38333),(39.78558, 21.37686),(39.78697, 21.37699),(39.78625, 21.38452),(39.78422, 21.39642),(39.78406, 21.39682),(39.78317, 21.39802)], + +[(39.78917, 21.37519),(39.78953, 21.37157),(39.79052, 21.37185),(39.79107, 21.37212),(39.79169, 21.37253),(39.7921, 21.37292),(39.79255, 21.37342),(39.79307, 21.37423),(39.7934, 21.37489),(39.79358, 21.37547),(39.7921, 21.37547),(39.78917, 21.37519)], + +[(39.87268, 21.3607),(39.87229, 21.37234),(39.87238, 21.37312),(39.8727, 21.37412),(39.87403, 21.3768),(39.87459, 21.37751),(39.87492, 21.3778),(39.87565, 21.3783),(39.87539, 21.37897),(39.87431, 21.38235),(39.87402, 21.38249),(39.87222, 21.38192),(39.87075, 21.38162),(39.86994, 21.38153),(39.86868, 21.38151),(39.86677, 21.38175),(39.86582, 21.382),(39.86468, 21.38241),(39.86373, 21.38286),(39.86273, 21.38347),(39.8619, 21.3841),(39.86106, 21.38489),(39.85782, 21.38829),(39.85728, 21.38809),(39.85678, 21.38807),(39.85608, 21.38827),(39.85576, 21.38852),(39.85553, 21.38885),(39.8554, 21.38933),(39.85538, 21.38957),(39.85547, 21.38999),(39.85567, 21.39042),(39.85495, 21.39109),(39.85387, 21.39231),(39.8531, 21.39292),(39.85269, 21.39312),(39.85256, 21.39313),(39.85225, 21.39306),(39.8512, 21.39193),(39.84972, 21.39016),(39.84959, 21.39004),(39.84879, 21.38937),(39.8481, 21.38893),(39.84746, 21.38863),(39.84605, 21.38825),(39.84136, 21.38785),(39.84054, 21.3877),(39.84015, 21.38753),(39.83982, 21.38734),(39.8394, 21.38701),(39.83693, 21.38443),(39.83665, 21.38418),(39.83518, 21.38321),(39.83492, 21.38309),(39.83413, 21.38287),(39.83345, 21.38279),(39.83236, 21.38277),(39.83156, 21.38286),(39.83051, 21.38312),(39.83637, 21.37503),(39.84749, 21.35985),(39.84785, 21.35942),(39.84912, 21.35812),(39.84948, 21.35783),(39.84985, 21.35763),(39.85045, 21.35745),(39.8545, 21.35696),(39.85519, 21.35698),(39.85738, 21.35754),(39.85971, 21.35848),(39.86135, 21.35892),(39.86264, 21.35917),(39.8703, 21.36038),(39.87268, 21.3607)], + +[(39.87658, 21.37651),(39.87598, 21.37607),(39.87578, 21.37582),(39.87453, 21.3733),(39.87429, 21.37232),(39.87467, 21.36095),(39.87758, 21.36131),(39.87798, 21.36151),(39.88107, 21.36223),(39.88281, 21.36325),(39.88379, 21.36404),(39.88498, 21.36539),(39.88608, 21.36683),(39.8788, 21.37364),(39.8776, 21.375),(39.87658, 21.37651)], + +[(39.82364, 21.41981),(39.82367, 21.41954),(39.82416, 21.41928),(39.82507, 21.41862),(39.82572, 21.41854),(39.8261, 21.4186),(39.8258, 21.41927),(39.82445, 21.41949),(39.82364, 21.41981)], + +[(39.82391, 21.41697),(39.82383, 21.41571),(39.82364, 21.41473),(39.82274, 21.41253),(39.82449, 21.41261),(39.82443, 21.41485),(39.82469, 21.41537),(39.82519, 21.41566),(39.82577, 21.41563),(39.82623, 21.41527),(39.82642, 21.41472),(39.82648, 21.41309),(39.82695, 21.41321),(39.82661, 21.41499),(39.82657, 21.41567),(39.82683, 21.41674),(39.82634, 21.4166),(39.82514, 21.41656),(39.82446, 21.41671),(39.82391, 21.41697)], + +[(39.82876, 21.41802),(39.83007, 21.41793),(39.83168, 21.41763),(39.83252, 21.41736),(39.8333, 21.41696),(39.83461, 21.41893),(39.83507, 21.41978),(39.83524, 21.42063),(39.8353, 21.42147),(39.83401, 21.42163),(39.83388, 21.42071),(39.83352, 21.41999),(39.83348, 21.41968),(39.83332, 21.41932),(39.83276, 21.41877),(39.8319, 21.41823),(39.83077, 21.41798),(39.82876, 21.41802)], + +[(39.83509, 21.41604),(39.839, 21.41402),(39.84021, 21.41316),(39.84098, 21.41247),(39.84179, 21.41159),(39.8438, 21.40921),(39.84747, 21.40466),(39.84802, 21.40381),(39.84838, 21.40303),(39.84864, 21.40216),(39.84886, 21.40096),(39.849, 21.39949),(39.84909, 21.39896),(39.84924, 21.39842),(39.84956, 21.39772),(39.85004, 21.39711),(39.8502, 21.39695),(39.8506, 21.39665),(39.85157, 21.39618),(39.853, 21.39569),(39.85363, 21.39558),(39.85396, 21.39547),(39.8544, 21.39524),(39.8547, 21.39503),(39.85499, 21.39478),(39.85617, 21.39361),(39.85696, 21.39291),(39.85746, 21.39236),(39.8579, 21.39179),(39.85877, 21.39085),(39.86338, 21.39308),(39.86446, 21.39356),(39.86531, 21.39402),(39.86577, 21.39435),(39.86651, 21.39507),(39.86596, 21.3955),(39.86532, 21.39609),(39.86472, 21.39688),(39.86379, 21.39863),(39.86138, 21.40439),(39.86035, 21.4064),(39.85537, 21.41465),(39.85266, 21.41823),(39.85145, 21.41971),(39.85133, 21.4198),(39.85092, 21.41997),(39.84912, 21.42017),(39.84516, 21.42027),(39.84289, 21.42044),(39.83728, 21.4212),(39.83723, 21.42043),(39.83701, 21.4193),(39.83677, 21.41867),(39.83632, 21.41789),(39.83509, 21.41604)], + +[(39.86021, 21.38932),(39.86365, 21.38583),(39.86459, 21.38516),(39.86624, 21.38442),(39.86768, 21.38403),(39.86962, 21.38393),(39.8717, 21.38431),(39.87299, 21.3847),(39.8729, 21.38575),(39.87303, 21.38632),(39.87188, 21.38979),(39.87157, 21.39051),(39.87068, 21.39174),(39.87013, 21.39226),(39.8681, 21.39384),(39.86709, 21.39285),(39.86632, 21.39229),(39.86021, 21.38932)], + +[(39.84703, 21.39096),(39.84715, 21.39117),(39.84744, 21.39143),(39.84761, 21.39151),(39.84787, 21.39156),(39.84872, 21.39256),(39.84882, 21.39314),(39.84858, 21.39443),(39.84842, 21.39495),(39.848, 21.39601),(39.84714, 21.39783),(39.84698, 21.39845),(39.84677, 21.40025),(39.84654, 21.40175),(39.84636, 21.40234),(39.84612, 21.40283),(39.84579, 21.40335),(39.84226, 21.4074),(39.8407, 21.40935),(39.83947, 21.41065),(39.83831, 21.41161),(39.8377, 21.41205),(39.83345, 21.41428),(39.83247, 21.41344),(39.83245, 21.41324),(39.83239, 21.41305),(39.8323, 21.41288),(39.83217, 21.41273),(39.83121, 21.41178),(39.83068, 21.41134),(39.83015, 21.41099),(39.83001, 21.41065),(39.83052, 21.40833),(39.83112, 21.40587),(39.8317, 21.40157),(39.83168, 21.40101),(39.83164, 21.40082),(39.83137, 21.40027),(39.83115, 21.39999),(39.83083, 21.39971),(39.83052, 21.39952),(39.8284, 21.39872),(39.82797, 21.39848),(39.82767, 21.39827),(39.82744, 21.39803),(39.82711, 21.39753),(39.827, 21.39726),(39.82693, 21.3969),(39.82679, 21.39172),(39.82659, 21.38808),(39.82672, 21.38766),(39.82705, 21.38702),(39.82746, 21.38644),(39.83156, 21.38522),(39.83244, 21.38503),(39.83282, 21.38502),(39.83372, 21.38517),(39.83421, 21.38535),(39.83473, 21.38565),(39.83527, 21.38603),(39.83533, 21.38622),(39.83555, 21.38655),(39.83588, 21.38676),(39.83607, 21.38681),(39.83738, 21.38827),(39.83788, 21.38875),(39.83886, 21.38944),(39.83905, 21.38954),(39.83962, 21.38978),(39.84022, 21.38998),(39.84135, 21.39018),(39.84179, 21.39021),(39.84192, 21.39029),(39.8423, 21.3904),(39.84269, 21.39035),(39.84282, 21.39029),(39.84546, 21.39049),(39.84593, 21.39055),(39.84632, 21.39064),(39.84672, 21.39079),(39.84703, 21.39096)], + +[(39.83154, 21.41527),(39.83052, 21.41557),(39.82875, 21.41575),(39.82868, 21.41557),(39.8287, 21.41522),(39.82898, 21.41382),(39.83043, 21.41432),(39.83154, 21.41527)], + +[(39.88532, 21.36227),(39.89047, 21.36299),(39.88935, 21.36357),(39.889, 21.36359),(39.88711, 21.36335),(39.88654, 21.36324),(39.88587, 21.36295),(39.88576, 21.36273),(39.88532, 21.36227)], + +[(39.91229, 21.40316),(39.91302, 21.40213),(39.91357, 21.40253),(39.91432, 21.40334),(39.91464, 21.40392),(39.91478, 21.40442),(39.91478, 21.40479),(39.91465, 21.40516),(39.91433, 21.40548),(39.914, 21.40563),(39.91331, 21.4056),(39.9128, 21.40534),(39.91261, 21.40516),(39.91238, 21.40474),(39.91229, 21.40316)]); + + +SELECT 'Simplified version of previous test'; + +SELECT pointInPolygon((39.840202, 21.451471), + +[(39.90553, 21.38668),(39.91034, 21.38608),(39.91834, 21.38048),(39.93078, 21.3627),(39.94141, 21.36278),(39.94753, 21.36075),(39.94986, 21.35894),(39.95349, 21.3533),(39.97833, 21.3815),(39.98132, 21.38231),(39.98851, 21.38151),(39.99076, 21.37747),(39.98987, 21.36908),(39.98791, 21.36332),(39.99543, 21.35687),(39.99827, 21.34722),(39.99784, 21.34271),(39.99632, 21.33955),(39.99322, 21.3366),(39.98908, 21.33496),(39.9692, 21.3331),(39.95841, 21.3388),(39.95109, 21.34412),(39.95026, 21.34825),(39.95203, 21.35168),(39.94753, 21.35845),(39.94165, 21.36077),(39.93272, 21.36009),(39.92969, 21.36103),(39.91928, 21.37669),(39.91374, 21.3816),(39.91056, 21.38296),(39.90839, 21.38361),(39.90059, 21.38257),(39.8978, 21.37712),(39.90051, 21.37335),(39.90033, 21.37195),(39.89559, 21.37533),(39.89099, 21.36937),(39.89101, 21.3661),(39.89465, 21.364),(39.92418, 21.35725),(39.92838, 21.35433),(39.94394, 21.33915),(39.96711, 21.32785),(39.97437, 21.32734),(39.99523, 21.33055),(40.01271, 21.3293),(40.01345, 21.3276),(40.00731, 21.32689),(39.99189, 21.32817),(39.97264, 21.3251),(39.96216, 21.32725),(39.95825, 21.32598),(39.95783, 21.32734),(39.96017, 21.32834),(39.94652, 21.33514),(39.94578, 21.33237),(39.94438, 21.33259),(39.94454, 21.33563),(39.92448, 21.3545),(39.92007, 21.3563),(39.89586, 21.3615),(39.86239, 21.35659),(39.85241, 21.35319),(39.85183, 21.35189),(39.84187, 21.3498),(39.83475, 21.35001),(39.82272, 21.35322),(39.80957, 21.34986),(39.80645, 21.34645),(39.80654, 21.34104),(39.82207, 21.29116),(39.82732, 21.26685),(39.82657, 21.22894),(39.82468, 21.22761),(39.82364, 21.22857),(39.82459, 21.22961),(39.82535, 21.26649),(39.82016, 21.29057),(39.81723, 21.29965),(39.81585, 21.30012),(39.81652, 21.30158),(39.81475, 21.30815),(39.80378, 21.34492),(39.8023, 21.34648),(39.79042, 21.34584),(39.78385, 21.34687),(39.77227, 21.34595),(39.7601, 21.34279),(39.73947, 21.34141),(39.71051, 21.34288),(39.70233, 21.34041),(39.68839, 21.33943),(39.65964, 21.33189),(39.64627, 21.3344),(39.64733, 21.33592),(39.65598, 21.33404),(39.66095, 21.33402),(39.68789, 21.34136),(39.70198, 21.34238),(39.71031, 21.34487),(39.74208, 21.34353),(39.76109, 21.34495),(39.77363, 21.34845),(39.77446, 21.35039),(39.76342, 21.37977),(39.75978, 21.39951),(39.75655, 21.40491),(39.73768, 21.39607),(39.72646, 21.38795),(39.71285, 21.3969),(39.69867, 21.37979),(39.66651, 21.36156),(39.6662, 21.36338),(39.69742, 21.38135),(39.7112, 21.39803),(39.70333, 21.40335),(39.70227, 21.40556),(39.70273, 21.40892),(39.71038, 21.41608),(39.71004, 21.42139),(39.68758, 21.414),(39.68099, 21.41398),(39.63179, 21.4366),(39.62927917729339, 21.43855995858338),(39.629299942421596, 21.44105336136311),(39.63273, 21.43836),(39.65768, 21.42753),(39.67404, 21.419),(39.6815, 21.41592),(39.68534, 21.41555),(39.7182, 21.42582),(39.72915, 21.4318),(39.72926, 21.43473),(39.72198, 21.45071),(39.72058, 21.46018),(39.72262, 21.46776),(39.72871, 21.47851),(39.73639, 21.48854),(39.73607, 21.50077),(39.73921, 21.50608),(39.74358, 21.50869),(39.77204, 21.51334),(39.78965, 21.51773),(39.78925, 21.52186),(39.77895, 21.53768),(39.77335, 21.55878),(39.77409, 21.55998),(39.77529, 21.55924),(39.78151, 21.53691),(39.79101, 21.52282),(39.79216, 21.51796),(39.79392, 21.51725),(39.793, 21.51609),(39.79416, 21.49641),(39.79816, 21.47216),(39.8004, 21.46856),(39.80363, 21.4669),(39.80549, 21.46717),(39.80785, 21.46483),(39.8079, 21.45844),(39.80961, 21.45125),(39.81407, 21.45956),(39.8189, 21.46404),(39.82568, 21.4678),(39.82961, 21.47351),(39.83079, 21.47799),(39.84122, 21.47849),(39.84401, 21.47583),(39.84423, 21.47113),(39.84321, 21.46813),(39.84421, 21.46059),(39.85356, 21.44251),(39.85688, 21.44231),(39.86433, 21.45155),(39.86762, 21.45385),(39.87655, 21.45623),(39.88419, 21.46034),(39.89153, 21.46165),(39.8939, 21.46349),(39.89668, 21.46326),(39.9075, 21.47496),(39.91921, 21.48088),(39.9355, 21.48404),(39.94435, 21.48781),(39.96608, 21.48881),(39.96569, 21.49663),(39.95135, 21.53005),(39.94352, 21.56004),(39.94384, 21.56417),(39.94803, 21.56766),(39.95376, 21.56964),(39.95497, 21.56891),(39.9538, 21.56747),(39.94686, 21.56478),(39.94534, 21.56123),(39.95324, 21.53069),(39.96782, 21.49652),(39.96808, 21.48868),(39.98958, 21.49423),(40.00615, 21.4944),(40.01566, 21.50406),(40.03305, 21.5127),(40.0475, 21.52172),(40.05278, 21.52274),(40.06051, 21.52124),(40.05971, 21.51952),(40.05217, 21.52052),(40.04866, 21.51978),(40.03052, 21.50875),(40.01631, 21.50181),(40.01014, 21.49459),(40.00619, 21.49215),(39.98995, 21.49206),(39.96952, 21.48658),(39.94485, 21.48571),(39.93748, 21.48246),(39.95107, 21.45666),(39.97348, 21.46578),(39.97479, 21.46523),(39.97424, 21.46392),(39.95217, 21.45495),(39.95444, 21.45202),(39.97071, 21.44272),(39.97127, 21.44141),(39.97007, 21.44065),(39.95381, 21.44976),(39.95007, 21.45407),(39.94121, 21.45146),(39.93089, 21.45021),(39.92173, 21.4449),(39.9164, 21.44366),(39.91152, 21.44104),(39.90446, 21.44019),(39.90416, 21.43717),(39.9067, 21.43268),(39.90657, 21.42875),(39.91121, 21.40898),(39.91566, 21.40698),(39.91675, 21.40517),(39.91627, 21.40045),(39.91407, 21.39734),(39.91949, 21.39132),(39.92673, 21.38963),(39.93267, 21.39089),(39.93373, 21.38995),(39.93279, 21.38889),(39.92676, 21.38762),(39.91905, 21.38931),(39.91251, 21.39595),(39.91173, 21.40041),(39.90949, 21.39663),(39.91172, 21.3928),(39.91031, 21.39269),(39.90798, 21.39493),(39.90668, 21.39219),(39.90882, 21.38887),(39.90768, 21.38803),(39.90505, 21.39084),(39.90417, 21.38841),(39.90553, 21.38668)], + +[(39.80978, 21.44779),(39.80994, 21.44566),(39.81007, 21.44487),(39.81053, 21.44265),(39.81116, 21.44264),(39.81154, 21.44256),(39.81225, 21.44226),(39.81248, 21.44211),(39.81366, 21.44109),(39.81384, 21.44099),(39.81679, 21.44037),(39.81781, 21.44011),(39.82104, 21.43947),(39.8222, 21.43916),(39.82287, 21.43891),(39.82379, 21.43843),(39.82508, 21.43789),(39.82657, 21.43749),(39.82684, 21.43737),(39.82727, 21.4371),(39.82865, 21.43639),(39.82907, 21.43623),(39.82934, 21.4363),(39.82961, 21.43628),(39.83008, 21.43695),(39.8317, 21.43866),(39.83196, 21.439),(39.83218, 21.43955),(39.8324, 21.43989),(39.83262, 21.44011),(39.83308, 21.44043),(39.83338, 21.44071),(39.83419, 21.44194),(39.83469, 21.44238),(39.83493, 21.44253),(39.83535, 21.44272),(39.83558, 21.44279),(39.83628, 21.44289),(39.83853, 21.44254),(39.83909, 21.44226),(39.8397, 21.44168),(39.83977, 21.44165),(39.84008, 21.44161),(39.84192, 21.44153),(39.843, 21.44143),(39.84509, 21.4411),(39.84799, 21.44052),(39.84946, 21.44019),(39.84981, 21.44021),(39.84994, 21.44025),(39.85134, 21.44181),(39.85116, 21.44253),(39.85117, 21.44267),(39.85047, 21.4439),(39.85008, 21.44479),(39.84952, 21.44592),(39.84796, 21.44857),(39.84741, 21.44971),(39.84589, 21.45357),(39.84554, 21.45432),(39.84375, 21.45712),(39.84259, 21.45929),(39.84225, 21.46015),(39.84203, 21.46152),(39.84189, 21.46327),(39.8414, 21.46556),(39.84112, 21.46739),(39.84112, 21.46789),(39.84122, 21.46847),(39.84135, 21.46887),(39.84146, 21.4691),(39.84191, 21.4698),(39.84204, 21.4701),(39.84217, 21.47069),(39.84223, 21.47113),(39.84204, 21.47546),(39.84198, 21.47572),(39.84187, 21.47595),(39.84166, 21.47621),(39.84137, 21.4764),(39.84125, 21.47644),(39.84102, 21.4765),(39.84071, 21.47651),(39.83664, 21.47574),(39.836, 21.4757),(39.83537, 21.47573),(39.83436, 21.47593),(39.83347, 21.47628),(39.83274, 21.47666),(39.83259, 21.47668),(39.83234, 21.47663),(39.83211, 21.47648),(39.83203, 21.4764),(39.83197, 21.47626),(39.83172, 21.47361),(39.83147, 21.47277),(39.83127, 21.4723),(39.83087, 21.47161),(39.82925, 21.46941),(39.82728, 21.4666),(39.82667, 21.46592),(39.82647, 21.46575),(39.82613, 21.46553),(39.82048, 21.46267),(39.82011, 21.46245),(39.81967, 21.46209),(39.81931, 21.46169),(39.81868, 21.46072),(39.81798, 21.45998),(39.81588, 21.45849),(39.81553, 21.45819),(39.81515, 21.45776),(39.81493, 21.45736),(39.81256, 21.45195),(39.8115, 21.45014),(39.81057, 21.44875),(39.80978, 21.44779)] +); + + +SELECT 'Very simplified version of previous test'; + +WITH + [(39.82535, 21.26649), (39.63179, 21.4366), (39.94803, 21.56766)] AS outer, + [(39.84994, 21.44025), (39.82728, 21.4666), (39.82667, 21.46592)] AS inner, + (39.840202, 21.451471) AS point +SELECT + pointInPolygon(point, inner) AS inside_inner, + pointInPolygon(point, outer, inner) AS inside_outer; diff --git a/parser/testdata/01214_test_storage_merge_aliases_with_where/ast.json b/parser/testdata/01214_test_storage_merge_aliases_with_where/ast.json new file mode 100644 index 000000000..fd63ac9f4 --- /dev/null +++ b/parser/testdata/01214_test_storage_merge_aliases_with_where/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tt1 (children 1)" + }, + { + "explain": " Identifier tt1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001247391, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/01214_test_storage_merge_aliases_with_where/metadata.json b/parser/testdata/01214_test_storage_merge_aliases_with_where/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01214_test_storage_merge_aliases_with_where/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01214_test_storage_merge_aliases_with_where/query.sql b/parser/testdata/01214_test_storage_merge_aliases_with_where/query.sql new file mode 100644 index 000000000..61c7fe368 --- /dev/null +++ b/parser/testdata/01214_test_storage_merge_aliases_with_where/query.sql @@ -0,0 +1,33 @@ +DROP TABLE IF EXISTS tt1; +DROP TABLE IF EXISTS tt2; +DROP TABLE IF EXISTS tt3; +DROP TABLE IF EXISTS tt4; +DROP TABLE IF EXISTS tt_m; + +CREATE TABLE tt1 (a UInt32, b UInt32 ALIAS a) ENGINE = Memory; +CREATE TABLE tt2 (a UInt32, b UInt32 ALIAS a * 2) ENGINE = Memory; +CREATE TABLE tt3 (a UInt32, b UInt32 ALIAS c, c UInt32) ENGINE = Memory; +CREATE TABLE tt4 (a UInt32, b UInt32 ALIAS 12) ENGINE = Memory; +CREATE TABLE tt_m (a UInt32, b UInt32) ENGINE = Merge(currentDatabase(), 'tt1|tt2|tt3|tt4'); + +INSERT INTO tt1 VALUES (1); +INSERT INTO tt2 VALUES (2); +INSERT INTO tt3(a, c) VALUES (3, 4); +INSERT INTO tt4 VALUES (5); + +-- { echo } +SELECT * FROM tt_m order by a; +SELECT * FROM tt_m WHERE b != 0 order by b, a; +SELECT * FROM tt_m WHERE b != 1 order by b, a; +SELECT * FROM tt_m WHERE b != a * 2 order by b, a; +SELECT * FROM tt_m WHERE b / 2 != a order by b, a; + +SELECT b FROM tt_m WHERE b >= 0 order by b, a; +SELECT b FROM tt_m WHERE b == 12; +SELECT b FROM tt_m ORDER BY b, a; +SELECT b, count() FROM tt_m GROUP BY b order by b; +SELECT b FROM tt_m order by b LIMIT 1 BY b; + +SELECT a FROM tt_m WHERE b = 12; +SELECT max(a) FROM tt_m group by b order by b; +SELECT a FROM tt_m order by b, a; diff --git a/parser/testdata/01220_scalar_optimization_in_alter/ast.json b/parser/testdata/01220_scalar_optimization_in_alter/ast.json new file mode 100644 index 000000000..c28ef607a --- /dev/null +++ b/parser/testdata/01220_scalar_optimization_in_alter/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery cdp_segments (children 1)" + }, + { + "explain": " Identifier cdp_segments" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00152372, + "rows_read": 2, + "bytes_read": 76 + } +} diff --git a/parser/testdata/01220_scalar_optimization_in_alter/metadata.json b/parser/testdata/01220_scalar_optimization_in_alter/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01220_scalar_optimization_in_alter/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01220_scalar_optimization_in_alter/query.sql b/parser/testdata/01220_scalar_optimization_in_alter/query.sql new file mode 100644 index 000000000..de92b29ce --- /dev/null +++ b/parser/testdata/01220_scalar_optimization_in_alter/query.sql @@ -0,0 +1,9 @@ +drop table if exists cdp_segments; +drop table if exists cdp_customers; + +create table cdp_segments (seg_id String, mid_seqs AggregateFunction(groupBitmap, UInt32)) engine=ReplacingMergeTree() order by (seg_id); +create table cdp_customers (mid String, mid_seq UInt32) engine=ReplacingMergeTree() order by (mid_seq); +alter table cdp_segments update mid_seqs = bitmapOr(mid_seqs, (select groupBitmapState(mid_seq) from cdp_customers where mid in ('6bf3c2ee-2b33-3030-9dc2-25c6c618d141'))) where seg_id = '1234567890'; + +drop table cdp_segments; +drop table cdp_customers; diff --git a/parser/testdata/01221_system_settings/ast.json b/parser/testdata/01221_system_settings/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01221_system_settings/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01221_system_settings/metadata.json b/parser/testdata/01221_system_settings/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01221_system_settings/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01221_system_settings/query.sql b/parser/testdata/01221_system_settings/query.sql new file mode 100644 index 000000000..348ea6154 --- /dev/null +++ b/parser/testdata/01221_system_settings/query.sql @@ -0,0 +1,35 @@ +-- Tags: no-object-storage +select * from system.settings where name = 'send_timeout'; +select * from system.merge_tree_settings where name = 'index_granularity'; + +with [ + 'Seconds', + 'Bool', + 'Int64', + 'String', + 'Char', + 'LogsLevel', + 'URI', + 'Float', + 'UInt64', + 'MaxThreads', + 'Milliseconds', + 'JoinStrictness', + 'JoinAlgorithm', + 'OverflowMode', + 'TotalsMode', + 'LoadBalancing', + 'OverflowModeGroupBy', + 'DateTimeInputFormat', + 'DistributedProductMode' +] as types select hasAll(arrayDistinct(groupArray(type)), types) from system.settings; + +with [ + 'Seconds', + 'Bool', + 'Int64', + 'String', + 'Float', + 'UInt64', + 'MaxThreads' +] as types select hasAll(arrayDistinct(groupArray(type)), types) from system.merge_tree_settings; diff --git a/parser/testdata/01222_system_codecs/ast.json b/parser/testdata/01222_system_codecs/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01222_system_codecs/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01222_system_codecs/metadata.json b/parser/testdata/01222_system_codecs/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01222_system_codecs/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01222_system_codecs/query.sql b/parser/testdata/01222_system_codecs/query.sql new file mode 100644 index 000000000..cba8f0a60 --- /dev/null +++ b/parser/testdata/01222_system_codecs/query.sql @@ -0,0 +1,7 @@ +-- Tags: no-object-storage, no-fasttest, no-cpu-aarch64, no-cpu-s390x +-- no-cpu-aarch64 and no-cpu-s390x because DEFLATE_QPL is x86-only +select * from system.codecs order by all; + +select count() from system.codecs; + +select name from system.columns where table = 'codecs' and database = 'system' diff --git a/parser/testdata/01223_dist_on_dist/ast.json b/parser/testdata/01223_dist_on_dist/ast.json new file mode 100644 index 000000000..a4aa9bbee --- /dev/null +++ b/parser/testdata/01223_dist_on_dist/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery merge_dist_01223 (children 1)" + }, + { + "explain": " Identifier merge_dist_01223" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001302458, + "rows_read": 2, + "bytes_read": 84 + } +} diff --git a/parser/testdata/01223_dist_on_dist/metadata.json b/parser/testdata/01223_dist_on_dist/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01223_dist_on_dist/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01223_dist_on_dist/query.sql b/parser/testdata/01223_dist_on_dist/query.sql new file mode 100644 index 000000000..4cd8ffd7a --- /dev/null +++ b/parser/testdata/01223_dist_on_dist/query.sql @@ -0,0 +1,92 @@ +drop table if exists merge_dist_01223; +drop table if exists dist_01223; +drop table if exists dist_layer_01223; +drop table if exists data_01223; + +create table data_01223 (key Int) Engine=Memory(); +create table dist_layer_01223 as data_01223 Engine=Distributed(test_cluster_two_shards, currentDatabase(), data_01223); +create table dist_01223 as data_01223 Engine=Distributed(test_cluster_two_shards, currentDatabase(), dist_layer_01223); + +select * from dist_01223; + +insert into data_01223 select * from numbers(3); + +select 'DISTINCT ORDER BY'; +select distinct * from dist_01223 order by key; +select 'GROUP BY ORDER BY'; +select * from dist_01223 group by key order by key; +select 'GROUP BY ORDER BY LIMIT'; +select * from dist_01223 group by key order by key limit 1; +select 'HAVING'; +select * from dist_01223 having key = 1; +select 'GROUP BY HAVING'; +select * from dist_01223 group by key having key = 1; +select 'ORDER BY'; +select * from dist_01223 order by key; +select 'ORDER BY LIMIT'; +select * from dist_01223 order by key limit 1; +select 'ORDER BY LIMIT BY'; +select * from dist_01223 order by key limit 1 by key; +select 'cluster() ORDER BY'; +select * from cluster(test_cluster_two_shards, currentDatabase(), dist_01223) order by key; +select 'cluster() GROUP BY ORDER BY'; +select * from cluster(test_cluster_two_shards, currentDatabase(), dist_01223) group by key order by key; + +select 'LEFT JOIN'; +select a.key, b.key from (SELECT toInt32(number) key from numbers(2)) a left join (select distinct * from dist_01223) b using key order by b.key; +select 'RIGHT JOIN'; +select a.key, b.key from (SELECT toInt32(number) key from numbers(2)) a right join (select distinct * from dist_01223) b using key order by b.key; + +-- more data for GROUP BY +insert into data_01223 select number%3 from numbers(30); + +-- group_by_two_level_threshold +select 'GROUP BY ORDER BY group_by_two_level_threshold'; +select * from dist_01223 group by key order by key settings +group_by_two_level_threshold=1, +group_by_two_level_threshold_bytes=1; + +-- distributed_aggregation_memory_efficient +select 'GROUP BY ORDER BY distributed_aggregation_memory_efficient'; +select * from dist_01223 group by key order by key settings +distributed_aggregation_memory_efficient=1; + +-- distributed_aggregation_memory_efficient/group_by_two_level_threshold +select 'GROUP BY ORDER BY distributed_aggregation_memory_efficient/group_by_two_level_threshold'; +select * from dist_01223 group by key order by key settings +group_by_two_level_threshold=1, +group_by_two_level_threshold_bytes=1, +distributed_aggregation_memory_efficient=1; + +select 'COUNT'; +select count() from dist_01223; +select 'distributed_group_by_no_merge'; +select count() from dist_01223 settings distributed_group_by_no_merge=1; + +drop table dist_01223; +drop table dist_layer_01223; + +-- only one shard in nested +select 'only one shard in nested'; +create table dist_layer_01223 as data_01223 Engine=Distributed(test_shard_localhost, currentDatabase(), data_01223); +create table dist_01223 as data_01223 Engine=Distributed(test_cluster_two_shards, currentDatabase(), dist_layer_01223); +select count() from dist_01223; + +select 'distributed_group_by_no_merge'; +select count() from dist_01223 settings distributed_group_by_no_merge=1; + +-- wrap with merge() +select 'merge()'; +create table merge_dist_01223 as dist_01223 engine=Merge(currentDatabase(), 'dist_01223'); +select count() from merge_dist_01223; +select 'distributed_group_by_no_merge'; +select count() from merge_dist_01223 settings distributed_group_by_no_merge=1; + +-- global in +select 'GLOBAL IN'; +select distinct * from dist_01223 where key global in (select toInt32(1)); + +drop table merge_dist_01223; +drop table dist_01223; +drop table dist_layer_01223; +drop table data_01223; diff --git a/parser/testdata/01224_no_superfluous_dict_reload/ast.json b/parser/testdata/01224_no_superfluous_dict_reload/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01224_no_superfluous_dict_reload/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01224_no_superfluous_dict_reload/metadata.json b/parser/testdata/01224_no_superfluous_dict_reload/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01224_no_superfluous_dict_reload/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01224_no_superfluous_dict_reload/query.sql b/parser/testdata/01224_no_superfluous_dict_reload/query.sql new file mode 100644 index 000000000..71af1ebdb --- /dev/null +++ b/parser/testdata/01224_no_superfluous_dict_reload/query.sql @@ -0,0 +1,44 @@ +-- Tags: no-parallel + +SET send_logs_level = 'fatal'; + +DROP DATABASE IF EXISTS dict_db_01224; +DROP DATABASE IF EXISTS dict_db_01224_dictionary; +set allow_deprecated_database_ordinary=1; +-- Creation of a database with Ordinary engine emits a warning. +CREATE DATABASE dict_db_01224 ENGINE=Ordinary; -- Different internal dictionary name with Atomic +CREATE DATABASE dict_db_01224_dictionary Engine=Dictionary; + +CREATE TABLE dict_db_01224.dict_data (key UInt64, val UInt64) Engine=Memory(); +CREATE DICTIONARY dict_db_01224.dict +( + key UInt64 DEFAULT 0, + val UInt64 DEFAULT 10 +) +PRIMARY KEY key +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'dict_data' PASSWORD '' DB 'dict_db_01224')) +LIFETIME(MIN 0 MAX 0) +LAYOUT(FLAT()); + +SELECT status FROM system.dictionaries WHERE database = 'dict_db_01224' AND name = 'dict'; + +SELECT * FROM system.tables FORMAT Null; +SELECT status FROM system.dictionaries WHERE database = 'dict_db_01224' AND name = 'dict'; + +SHOW CREATE TABLE dict_db_01224.dict FORMAT TSVRaw; +SELECT status FROM system.dictionaries WHERE database = 'dict_db_01224' AND name = 'dict'; + +SHOW CREATE TABLE dict_db_01224_dictionary.`dict_db_01224.dict` FORMAT TSVRaw; +SELECT status FROM system.dictionaries WHERE database = 'dict_db_01224' AND name = 'dict'; + +SELECT engine, metadata_path LIKE '%metadata/dict\_db\_01224/dict.sql', create_table_query FROM system.tables WHERE database = 'dict_db_01224' AND name = 'dict'; +SELECT status FROM system.dictionaries WHERE database = 'dict_db_01224' AND name = 'dict'; + +SELECT name, type FROM system.columns WHERE database = 'dict_db_01224' AND table = 'dict'; +SELECT status FROM system.dictionaries WHERE database = 'dict_db_01224' AND name = 'dict'; + +DROP DICTIONARY dict_db_01224.dict; +SELECT status FROM system.dictionaries WHERE database = 'dict_db_01224' AND name = 'dict'; + +DROP DATABASE dict_db_01224; +DROP DATABASE dict_db_01224_dictionary; diff --git a/parser/testdata/01225_drop_dictionary_as_table/ast.json b/parser/testdata/01225_drop_dictionary_as_table/ast.json new file mode 100644 index 000000000..e5f93b4e5 --- /dev/null +++ b/parser/testdata/01225_drop_dictionary_as_table/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery dict_data (children 3)" + }, + { + "explain": " Identifier dict_data" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration key (children 1)" + }, + { + "explain": " DataType UInt64" + }, + { + "explain": " ColumnDeclaration val (children 1)" + }, + { + "explain": " DataType UInt64" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function Memory (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001200678, + "rows_read": 11, + "bytes_read": 395 + } +} diff --git a/parser/testdata/01225_drop_dictionary_as_table/metadata.json b/parser/testdata/01225_drop_dictionary_as_table/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01225_drop_dictionary_as_table/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01225_drop_dictionary_as_table/query.sql b/parser/testdata/01225_drop_dictionary_as_table/query.sql new file mode 100644 index 000000000..a0cacd8bc --- /dev/null +++ b/parser/testdata/01225_drop_dictionary_as_table/query.sql @@ -0,0 +1,15 @@ +CREATE TABLE dict_data (key UInt64, val UInt64) Engine=Memory(); +CREATE DICTIONARY dict +( + key UInt64 DEFAULT 0, + val UInt64 DEFAULT 10 +) +PRIMARY KEY key +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'dict_data' PASSWORD '' DB currentDatabase())) +LIFETIME(MIN 0 MAX 0) +LAYOUT(FLAT()); + +SYSTEM RELOAD DICTIONARY dict; + +DROP TABLE dict; -- { serverError CANNOT_DETACH_DICTIONARY_AS_TABLE } +DROP DICTIONARY dict; diff --git a/parser/testdata/01225_show_create_table_from_dictionary/ast.json b/parser/testdata/01225_show_create_table_from_dictionary/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01225_show_create_table_from_dictionary/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01225_show_create_table_from_dictionary/metadata.json b/parser/testdata/01225_show_create_table_from_dictionary/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01225_show_create_table_from_dictionary/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01225_show_create_table_from_dictionary/query.sql b/parser/testdata/01225_show_create_table_from_dictionary/query.sql new file mode 100644 index 000000000..27159528e --- /dev/null +++ b/parser/testdata/01225_show_create_table_from_dictionary/query.sql @@ -0,0 +1,27 @@ +-- Tags: no-parallel + +SET send_logs_level = 'fatal'; + +DROP DATABASE IF EXISTS dict_db_01225; +DROP DATABASE IF EXISTS dict_db_01225_dictionary; +set allow_deprecated_database_ordinary=1; +-- Creation of a database with Ordinary engine emits a warning. +CREATE DATABASE dict_db_01225 ENGINE=Ordinary; -- Different internal dictionary name with Atomic +CREATE DATABASE dict_db_01225_dictionary Engine=Dictionary; + +CREATE TABLE dict_db_01225.dict_data (key UInt64, val UInt64) Engine=Memory(); +CREATE DICTIONARY dict_db_01225.dict +( + key UInt64 DEFAULT 0, + val UInt64 DEFAULT 10 +) +PRIMARY KEY key +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'dict_data' PASSWORD '' DB 'dict_db_01225')) +LIFETIME(MIN 0 MAX 0) +LAYOUT(FLAT()); + +SHOW CREATE TABLE dict_db_01225_dictionary.`dict_db_01225.dict` FORMAT TSVRaw; +SHOW CREATE TABLE dict_db_01225_dictionary.`dict_db_01225.no_such_dict`; -- { serverError CANNOT_GET_CREATE_DICTIONARY_QUERY } + +DROP DATABASE dict_db_01225; +DROP DATABASE dict_db_01225_dictionary; diff --git a/parser/testdata/01226_dist_on_dist_global_in/ast.json b/parser/testdata/01226_dist_on_dist_global_in/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01226_dist_on_dist_global_in/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01226_dist_on_dist_global_in/metadata.json b/parser/testdata/01226_dist_on_dist_global_in/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01226_dist_on_dist_global_in/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01226_dist_on_dist_global_in/query.sql b/parser/testdata/01226_dist_on_dist_global_in/query.sql new file mode 100644 index 000000000..f75e09743 --- /dev/null +++ b/parser/testdata/01226_dist_on_dist_global_in/query.sql @@ -0,0 +1,10 @@ +-- Tags: global + +SELECT 'GLOBAL IN'; +select * from remote('localhost', system.one) where dummy global in (0); +select * from remote('localhost', system.one) where dummy global in system.one; +select * from remote('localhost', system.one) where dummy global in (select 0); +SELECT 'GLOBAL NOT IN'; +select * from remote('localhost', system.one) where dummy global not in (0); +select * from remote('localhost', system.one) where dummy global not in system.one; +select * from remote('localhost', system.one) where dummy global not in (select 0); diff --git a/parser/testdata/01227_distributed_global_in_issue_2610/ast.json b/parser/testdata/01227_distributed_global_in_issue_2610/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01227_distributed_global_in_issue_2610/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01227_distributed_global_in_issue_2610/metadata.json b/parser/testdata/01227_distributed_global_in_issue_2610/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01227_distributed_global_in_issue_2610/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01227_distributed_global_in_issue_2610/query.sql b/parser/testdata/01227_distributed_global_in_issue_2610/query.sql new file mode 100644 index 000000000..d8ff90061 --- /dev/null +++ b/parser/testdata/01227_distributed_global_in_issue_2610/query.sql @@ -0,0 +1,10 @@ +-- Tags: distributed + +-- Test from the issue https://github.com/ClickHouse/ClickHouse/issues/2610 +drop table if exists data_01227; +create table data_01227 (key Int) Engine=MergeTree() order by key; +insert into data_01227 select * from numbers(10); +select * from remote('127.1', currentDatabase(), data_01227) prewhere key global in (select key from data_01227 prewhere key = 2); +select * from cluster('test_cluster_two_shards', currentDatabase(), data_01227) prewhere key global in (select key from data_01227 prewhere key = 2); + +drop table data_01227; diff --git a/parser/testdata/01227_distributed_merge_global_in_primary_key/ast.json b/parser/testdata/01227_distributed_merge_global_in_primary_key/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01227_distributed_merge_global_in_primary_key/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01227_distributed_merge_global_in_primary_key/metadata.json b/parser/testdata/01227_distributed_merge_global_in_primary_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01227_distributed_merge_global_in_primary_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01227_distributed_merge_global_in_primary_key/query.sql b/parser/testdata/01227_distributed_merge_global_in_primary_key/query.sql new file mode 100644 index 000000000..6b0dd4c87 --- /dev/null +++ b/parser/testdata/01227_distributed_merge_global_in_primary_key/query.sql @@ -0,0 +1,83 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/64211 +DROP TABLE IF EXISTS test_merge; +DROP TABLE IF EXISTS test_merge_distributed; +DROP TABLE IF EXISTS test_distributed_merge; +DROP TABLE IF EXISTS test_distributed; +DROP TABLE IF EXISTS test_local; +CREATE TABLE test_local (name String) +ENGINE = MergeTree +ORDER BY name as select 'x'; + +CREATE TABLE test_distributed as test_local +ENGINE = Distributed(test_shard_localhost, currentDatabase(), test_local); + +CREATE TABLE test_merge as test_local +ENGINE = Merge(currentDatabase(), 'test_local'); + +CREATE TABLE test_merge_distributed as test_local +ENGINE = Distributed(test_shard_localhost, currentDatabase(), test_merge); + +CREATE TABLE test_distributed_merge as test_local +ENGINE = Merge(currentDatabase(), 'test_distributed'); + +SELECT '------------------- Distributed ------------------'; +SELECT count() +FROM test_distributed +WHERE name GLOBAL IN (SELECT name FROM test_distributed); + +SELECT '---------- merge() over distributed --------------'; +SELECT count() +FROM merge(currentDatabase(), 'test_distributed') +WHERE name GLOBAL IN (SELECT name FROM test_distributed); + +SELECT '---------- merge() over local --------------------'; +SELECT count() +FROM merge(currentDatabase(), 'test_local') +WHERE name GLOBAL IN (SELECT name FROM test_distributed); + +SELECT count() +FROM merge(currentDatabase(), 'test_local') +WHERE name GLOBAL IN (SELECT name FROM merge(currentDatabase(), 'test_local')); + +SELECT count() +FROM merge(currentDatabase(), 'test_local') +WHERE name GLOBAL IN (SELECT name FROM remote('127.0.0.{1,2}', currentDatabase(), test_merge)); + +SELECT '---------- remote() over Merge -------------------'; +SELECT count() +FROM remote('127.0.0.{1,2}', currentDatabase(), test_merge) +WHERE name GLOBAL IN (SELECT name FROM test_distributed); + +SELECT '---------- Distributed over Merge ----------------'; +SELECT count() +FROM test_merge_distributed +WHERE name GLOBAL IN (SELECT name FROM test_merge_distributed); + +SELECT '---------- remote() over Merge -------------------'; +SELECT count() +FROM remote('127.0.0.{1,2}', currentDatabase(), test_merge) +WHERE name GLOBAL IN (SELECT name FROM remote('127.0.0.{1,2}', currentDatabase(), test_merge)); + +SELECT '---------- Merge over Distributed -----------------'; +SELECT count() +FROM test_distributed_merge +WHERE name GLOBAL IN (SELECT name FROM remote('127.0.0.{1,2}', currentDatabase(), test_merge)); + +SELECT count() +FROM test_distributed_merge +WHERE name GLOBAL IN (SELECT name FROM remote('127.0.0.{1,2}', currentDatabase(), test_distributed_merge)); + +SELECT count() +FROM test_distributed_merge +WHERE name GLOBAL IN (SELECT name FROM test_distributed_merge); + +SELECT count() +FROM remote('127.0.0.{1,2}', currentDatabase(), test_distributed_merge) +WHERE name GLOBAL IN (SELECT name FROM remote('127.0.0.{1,2}', currentDatabase(), test_merge)); + + +DROP TABLE test_merge; +DROP TABLE test_merge_distributed; +DROP TABLE test_distributed_merge; +DROP TABLE test_distributed; +DROP TABLE test_local; diff --git a/parser/testdata/01230_join_get_truncate/ast.json b/parser/testdata/01230_join_get_truncate/ast.json new file mode 100644 index 000000000..16ab4b8c2 --- /dev/null +++ b/parser/testdata/01230_join_get_truncate/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery join_test (children 1)" + }, + { + "explain": " Identifier join_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001766726, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/01230_join_get_truncate/metadata.json b/parser/testdata/01230_join_get_truncate/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01230_join_get_truncate/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01230_join_get_truncate/query.sql b/parser/testdata/01230_join_get_truncate/query.sql new file mode 100644 index 000000000..b9c58fc51 --- /dev/null +++ b/parser/testdata/01230_join_get_truncate/query.sql @@ -0,0 +1,27 @@ +DROP TABLE IF EXISTS join_test; + +CREATE TABLE join_test (id UInt16, num UInt16) engine = Join(ANY, LEFT, id) settings join_any_take_last_row = 1; + +INSERT INTO join_test (id, num) SELECT number, number FROM system.numbers LIMIT 1000; + +SELECT joinGet('join_test', 'num', 500); + +-- joinGet('join_test', 'num', 500) will be 500 and it is fine +-- replace all the values + +INSERT INTO join_test (id, num) SELECT number, number * 2 FROM system.numbers LIMIT 1000; + +SELECT joinGet ('join_test', 'num', 500); + +-- joinGet('join_test', 'num', 500) will be 1000 and it is fine + +TRUNCATE TABLE join_test; + +INSERT INTO join_test (id, num) SELECT number, number FROM system.numbers LIMIT 1000; + +INSERT INTO join_test (id, num) SELECT number, number * 2 FROM system.numbers LIMIT 1000; + +SELECT joinGet('join_test', 'num', 500); + +-- joinGet('join_test', 'num', 500) will be 1000 and it is not fine +DROP TABLE join_test; diff --git a/parser/testdata/01231_distributed_aggregation_memory_efficient_mix_levels/ast.json b/parser/testdata/01231_distributed_aggregation_memory_efficient_mix_levels/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01231_distributed_aggregation_memory_efficient_mix_levels/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01231_distributed_aggregation_memory_efficient_mix_levels/metadata.json b/parser/testdata/01231_distributed_aggregation_memory_efficient_mix_levels/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01231_distributed_aggregation_memory_efficient_mix_levels/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01231_distributed_aggregation_memory_efficient_mix_levels/query.sql b/parser/testdata/01231_distributed_aggregation_memory_efficient_mix_levels/query.sql new file mode 100644 index 000000000..4d97776de --- /dev/null +++ b/parser/testdata/01231_distributed_aggregation_memory_efficient_mix_levels/query.sql @@ -0,0 +1,35 @@ +-- Tags: distributed, no-parallel + +set send_logs_level = 'error'; + +create database if not exists shard_0; +create database if not exists shard_1; + +drop table if exists shard_0.shard_01231_distributed_aggregation_memory_efficient; +drop table if exists shard_1.shard_01231_distributed_aggregation_memory_efficient; +drop table if exists ma_dist; + +create table shard_0.shard_01231_distributed_aggregation_memory_efficient (x UInt64) engine = MergeTree order by x; +create table shard_1.shard_01231_distributed_aggregation_memory_efficient (x UInt64) engine = MergeTree order by x; + +insert into shard_0.shard_01231_distributed_aggregation_memory_efficient select * from numbers(1); +insert into shard_1.shard_01231_distributed_aggregation_memory_efficient select * from numbers(10); + +create table ma_dist (x UInt64) ENGINE = Distributed(test_cluster_two_shards_different_databases, '', 'shard_01231_distributed_aggregation_memory_efficient'); + +set distributed_aggregation_memory_efficient = 1; +set group_by_two_level_threshold = 2; +set max_bytes_before_external_group_by = 16; +set max_bytes_ratio_before_external_group_by = 0; + +select x, count() from ma_dist group by x order by x; + +select arrayFilter(y -> y = x, [x]) as f from ma_dist order by f; + +drop table if exists shard_0.shard_01231_distributed_aggregation_memory_efficient; +drop table if exists shard_1.shard_01231_distributed_aggregation_memory_efficient; + +drop table ma_dist; + +drop database shard_0; +drop database shard_1; diff --git a/parser/testdata/01231_log_queries_min_type/ast.json b/parser/testdata/01231_log_queries_min_type/ast.json new file mode 100644 index 000000000..65297749b --- /dev/null +++ b/parser/testdata/01231_log_queries_min_type/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001750356, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01231_log_queries_min_type/metadata.json b/parser/testdata/01231_log_queries_min_type/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01231_log_queries_min_type/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01231_log_queries_min_type/query.sql b/parser/testdata/01231_log_queries_min_type/query.sql new file mode 100644 index 000000000..f7d46d4a1 --- /dev/null +++ b/parser/testdata/01231_log_queries_min_type/query.sql @@ -0,0 +1,35 @@ +set log_queries=1; + +select '01231_log_queries_min_type/QUERY_START'; +system flush logs query_log; +select count() from system.query_log where current_database = currentDatabase() + and query like 'select \'01231_log_queries_min_type/QUERY_START%' + and event_date >= yesterday(); + +set log_queries_min_type='EXCEPTION_BEFORE_START'; +select '01231_log_queries_min_type/EXCEPTION_BEFORE_START'; +system flush logs query_log; +select count() from system.query_log where current_database = currentDatabase() + and query like 'select \'01231_log_queries_min_type/EXCEPTION_BEFORE_START%' + and event_date >= yesterday(); + +set max_rows_to_read='100K'; +set log_queries_min_type='EXCEPTION_WHILE_PROCESSING'; +select '01231_log_queries_min_type/EXCEPTION_WHILE_PROCESSING', max(number) from system.numbers limit 1e6; -- { serverError TOO_MANY_ROWS } +set max_rows_to_read=0; +system flush logs query_log; +select count() from system.query_log where current_database = currentDatabase() + and query like 'select \'01231_log_queries_min_type/EXCEPTION_WHILE_PROCESSING%' + and event_date >= yesterday() and type = 'ExceptionWhileProcessing'; + +set max_rows_to_read='100K'; +select '01231_log_queries_min_type w/ Settings/EXCEPTION_WHILE_PROCESSING', max(number) from system.numbers limit 1e6; -- { serverError TOO_MANY_ROWS } +system flush logs query_log; +set max_rows_to_read=0; +select count() from system.query_log where + current_database = currentDatabase() and + query like 'select \'01231_log_queries_min_type w/ Settings/EXCEPTION_WHILE_PROCESSING%' and + query not like '%system.query_log%' and + event_date >= yesterday() and + type = 'ExceptionWhileProcessing' and + Settings['max_rows_to_read'] != ''; diff --git a/parser/testdata/01231_markdown_format/ast.json b/parser/testdata/01231_markdown_format/ast.json new file mode 100644 index 000000000..9929ee996 --- /dev/null +++ b/parser/testdata/01231_markdown_format/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery markdown (children 1)" + }, + { + "explain": " Identifier markdown" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001234344, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/01231_markdown_format/metadata.json b/parser/testdata/01231_markdown_format/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01231_markdown_format/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01231_markdown_format/query.sql b/parser/testdata/01231_markdown_format/query.sql new file mode 100644 index 000000000..cc9ffa109 --- /dev/null +++ b/parser/testdata/01231_markdown_format/query.sql @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS markdown; +CREATE TABLE markdown (id UInt32, name String, array Array(Int32), nullable Nullable(String), low_cardinality LowCardinality(String), decimal Decimal32(6)) ENGINE = Memory; +INSERT INTO markdown VALUES (1, 'name1', [1,2,3], 'Some long string', 'name1', 1.11), (2, 'name2', [4,5,60000], Null, 'Another long string', 222.222222), (30000, 'One more long string', [7,8,9], 'name3', 'name3', 3.33); + +SELECT * FROM markdown FORMAT Markdown; +DROP TABLE IF EXISTS markdown; + + +SET output_format_markdown_escape_special_characters = true; +SELECT '!#$%&(*+,-./:<=>?@[^`{|}~' AS a FORMAT Markdown; +SELECT CAST(1 AS Enum('!#$%&(*+,-./:<=>?@[^`{|}~' = 1)) AS a FORMAT Markdown; +SELECT toFixedString('!#$%&(*+,-./:<=>?@[^`{|}~', 25) AS a FORMAT Markdown; + +SET output_format_markdown_escape_special_characters = false; +SELECT '!#$%&(*+,-./:<=>?@[^`{|}~' AS a FORMAT Markdown; +SELECT CAST(1 AS Enum('!#$%&(*+,-./:<=>?@[^`{|}~' = 1)) AS a FORMAT Markdown; +SELECT toFixedString('!#$%&(*+,-./:<=>?@[^`{|}~', 25) AS a FORMAT Markdown; diff --git a/parser/testdata/01231_operator_null_in/ast.json b/parser/testdata/01231_operator_null_in/ast.json new file mode 100644 index 000000000..460c03425 --- /dev/null +++ b/parser/testdata/01231_operator_null_in/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery null_in (children 1)" + }, + { + "explain": " Identifier null_in" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001376231, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/01231_operator_null_in/metadata.json b/parser/testdata/01231_operator_null_in/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01231_operator_null_in/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01231_operator_null_in/query.sql b/parser/testdata/01231_operator_null_in/query.sql new file mode 100644 index 000000000..26f342540 --- /dev/null +++ b/parser/testdata/01231_operator_null_in/query.sql @@ -0,0 +1,144 @@ +DROP TABLE IF EXISTS null_in; +CREATE TABLE null_in (dt DateTime, idx int, i Nullable(int), s Nullable(String)) ENGINE = MergeTree() PARTITION BY dt ORDER BY idx SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +INSERT INTO null_in VALUES (1, 1, 1, '1') (2, 2, NULL, NULL) (3, 3, 3, '3') (4, 4, NULL, NULL) (5, 5, 5, '5'); + +SELECT count() == 2 FROM null_in WHERE i in (1, 3, NULL); +SELECT count() == 2 FROM null_in WHERE i in range(4); +SELECT count() == 2 FROM null_in WHERE s in ('1', '3', NULL); +SELECT count() == 2 FROM null_in WHERE i global in (1, 3, NULL); +SELECT count() == 2 FROM null_in WHERE i global in range(4); +SELECT count() == 2 FROM null_in WHERE s global in ('1', '3', NULL); + +SELECT count() == 1 FROM null_in WHERE i not in (1, 3, NULL); +SELECT count() == 1 FROM null_in WHERE i not in range(4); +SELECT count() == 1 FROM null_in WHERE s not in ('1', '3', NULL); +SELECT count() == 1 FROM null_in WHERE i global not in (1, 3, NULL); +SELECT count() == 1 FROM null_in WHERE i global not in range(4); +SELECT count() == 1 FROM null_in WHERE s global not in ('1', '3', NULL); + +SET transform_null_in = 1; + +SELECT count() == 4 FROM null_in WHERE i in (1, 3, NULL); +SELECT count() == 2 FROM null_in WHERE i in range(4); +SELECT count() == 4 FROM null_in WHERE s in ('1', '3', NULL); +SELECT count() == 4 FROM null_in WHERE i global in (1, 3, NULL); +SELECT count() == 2 FROM null_in WHERE i global in range(4); +SELECT count() == 4 FROM null_in WHERE s global in ('1', '3', NULL); + +SELECT count() == 1 FROM null_in WHERE i not in (1, 3, NULL); +SELECT count() == 3 FROM null_in WHERE i not in range(4); +SELECT count() == 1 FROM null_in WHERE s not in ('1', '3', NULL); +SELECT count() == 1 FROM null_in WHERE i global not in (1, 3, NULL); +SELECT count() == 3 FROM null_in WHERE i global not in range(4); +SELECT count() == 1 FROM null_in WHERE s global not in ('1', '3', NULL); + +SELECT count() == 3 FROM null_in WHERE i not in (1, 3); +SELECT count() == 3 FROM null_in WHERE i not in range(4); +SELECT count() == 3 FROM null_in WHERE s not in ('1', '3'); +SELECT count() == 3 FROM null_in WHERE i global not in (1, 3); +SELECT count() == 3 FROM null_in WHERE i global not in range(4); +SELECT count() == 3 FROM null_in WHERE s global not in ('1', '3'); + +DROP TABLE IF EXISTS test_set; +CREATE TABLE test_set (i Nullable(int)) ENGINE = Set(); +INSERT INTO test_set VALUES (1), (NULL); + +SET transform_null_in = 0; + +SELECT count() == 1 FROM null_in WHERE i in test_set; +SELECT count() == 2 FROM null_in WHERE i not in test_set; +SELECT count() == 1 FROM null_in WHERE i global in test_set; +SELECT count() == 2 FROM null_in WHERE i global not in test_set; + +SET transform_null_in = 1; + +SELECT count() == 3 FROM null_in WHERE i in test_set; +SELECT count() == 2 FROM null_in WHERE i not in test_set; +SELECT count() == 3 FROM null_in WHERE i global in test_set; +SELECT count() == 2 FROM null_in WHERE i global not in test_set; + +-- Create with transform_null_in +CREATE TABLE test_set2 (i Nullable(int)) ENGINE = Set(); +INSERT INTO test_set2 VALUES (1), (NULL); + +SET transform_null_in = 0; + +SELECT count() == 1 FROM null_in WHERE i in test_set2; +SELECT count() == 2 FROM null_in WHERE i not in test_set2; +SELECT count() == 1 FROM null_in WHERE i global in test_set2; +SELECT count() == 2 FROM null_in WHERE i global not in test_set2; + +SET transform_null_in = 1; + +SELECT count() == 3 FROM null_in WHERE i in test_set2; +SELECT count() == 2 FROM null_in WHERE i not in test_set2; +SELECT count() == 3 FROM null_in WHERE i global in test_set2; +SELECT count() == 2 FROM null_in WHERE i global not in test_set2; + +DROP TABLE IF EXISTS test_set; +DROP TABLE IF EXISTS null_in; + + +DROP TABLE IF EXISTS null_in_subquery; +CREATE TABLE null_in_subquery (dt DateTime, idx int, i Nullable(UInt64)) ENGINE = MergeTree() PARTITION BY dt ORDER BY idx SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +INSERT INTO null_in_subquery SELECT number % 3, number, number FROM system.numbers LIMIT 99999; + +SELECT count() == 33333 FROM null_in_subquery WHERE i in (SELECT i FROM null_in_subquery WHERE dt = 0); +SELECT count() == 66666 FROM null_in_subquery WHERE i not in (SELECT i FROM null_in_subquery WHERE dt = 1); +SELECT count() == 33333 FROM null_in_subquery WHERE i global in (SELECT i FROM null_in_subquery WHERE dt = 2); +SELECT count() == 66666 FROM null_in_subquery WHERE i global not in (SELECT i FROM null_in_subquery WHERE dt = 0); + +-- For index column +SELECT count() == 33333 FROM null_in_subquery WHERE idx in (SELECT idx FROM null_in_subquery WHERE dt = 0); +SELECT count() == 66666 FROM null_in_subquery WHERE idx not in (SELECT idx FROM null_in_subquery WHERE dt = 1); +SELECT count() == 33333 FROM null_in_subquery WHERE idx global in (SELECT idx FROM null_in_subquery WHERE dt = 2); +SELECT count() == 66666 FROM null_in_subquery WHERE idx global not in (SELECT idx FROM null_in_subquery WHERE dt = 0); + +INSERT INTO null_in_subquery VALUES (0, 123456780, NULL); +INSERT INTO null_in_subquery VALUES (1, 123456781, NULL); + +SELECT count() == 33335 FROM null_in_subquery WHERE i in (SELECT i FROM null_in_subquery WHERE dt = 0); +SELECT count() == 66666 FROM null_in_subquery WHERE i not in (SELECT i FROM null_in_subquery WHERE dt = 1); +SELECT count() == 33333 FROM null_in_subquery WHERE i in (SELECT i FROM null_in_subquery WHERE dt = 2); +SELECT count() == 66668 FROM null_in_subquery WHERE i not in (SELECT i FROM null_in_subquery WHERE dt = 2); +SELECT count() == 33335 FROM null_in_subquery WHERE i global in (SELECT i FROM null_in_subquery WHERE dt = 0); +SELECT count() == 66666 FROM null_in_subquery WHERE i global not in (SELECT i FROM null_in_subquery WHERE dt = 1); +SELECT count() == 33333 FROM null_in_subquery WHERE i global in (SELECT i FROM null_in_subquery WHERE dt = 2); +SELECT count() == 66668 FROM null_in_subquery WHERE i global not in (SELECT i FROM null_in_subquery WHERE dt = 2); + +DROP TABLE IF EXISTS null_in_subquery; + + +DROP TABLE IF EXISTS null_in_tuple; +CREATE TABLE null_in_tuple (dt DateTime, idx int, t Tuple(Nullable(UInt64), Nullable(String))) ENGINE = MergeTree() PARTITION BY dt ORDER BY idx SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +INSERT INTO null_in_tuple VALUES (1, 1, (1, '1')) (2, 2, (2, NULL)) (3, 3, (NULL, '3')) (4, 4, (NULL, NULL)); + +SET transform_null_in = 0; + +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(1, '1')] FROM null_in_tuple WHERE t in ((1, '1'), (NULL, NULL)); +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(2, NULL), (NULL, '3'), (NULL, NULL)] FROM null_in_tuple WHERE t not in ((1, '1'), (NULL, NULL)); +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(1, '1')] FROM null_in_tuple WHERE t global in ((1, '1'), (NULL, NULL)); +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(2, NULL), (NULL, '3'), (NULL, NULL)] FROM null_in_tuple WHERE t global not in ((1, '1'), (NULL, NULL)); + +SET transform_null_in = 1; + +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(1, '1'), (NULL, NULL)] FROM null_in_tuple WHERE t in ((1, '1'), (NULL, NULL)); +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(2, NULL), (NULL, '3')] FROM null_in_tuple WHERE t not in ((1, '1'), (NULL, NULL)); +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(1, '1'), (NULL, NULL)] FROM null_in_tuple WHERE t global in ((1, '1'), (NULL, NULL)); +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(2, NULL), (NULL, '3')] FROM null_in_tuple WHERE t global not in ((1, '1'), (NULL, NULL)); + +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(1, '1')] FROM null_in_tuple WHERE t in ((1, '1'), (1, NULL)); +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(1, '1')] FROM null_in_tuple WHERE t in ((1, '1'), (NULL, '1')); +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(1, '1'), (2, NULL)] FROM null_in_tuple WHERE t in ((1, '1'), (NULL, '1'), (2, NULL)); +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(1, '1'), (NULL, '3')] FROM null_in_tuple WHERE t in ((1, '1'), (1, NULL), (NULL, '3')); +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(1, '1'), (2, NULL), (NULL, '3'), (NULL, NULL)] FROM null_in_tuple WHERE t in ((1, '1'), (1, NULL), (2, NULL), (NULL, '3'), (NULL, NULL)); + +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(2, NULL), (NULL, '3'), (NULL, NULL)] FROM null_in_tuple WHERE t not in ((1, '1'), (1, NULL)); +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(2, NULL), (NULL, '3'), (NULL, NULL)] FROM null_in_tuple WHERE t not in ((1, '1'), (NULL, '1')); +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(NULL, '3'), (NULL, NULL)] FROM null_in_tuple WHERE t not in ((1, '1'), (NULL, '1'), (2, NULL)); +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [(2, NULL), (NULL, NULL)] FROM null_in_tuple WHERE t not in ((1, '1'), (1, NULL), (NULL, '3')); +SELECT arraySort(x -> (x.1, x.2), groupArray(t)) == [] FROM null_in_tuple WHERE t not in ((1, '1'), (1, NULL), (2, NULL), (NULL, '3'), (NULL, NULL)); + +DROP TABLE IF EXISTS null_in_tuple; +DROP TABLE test_set2; diff --git a/parser/testdata/01232_extremes/ast.json b/parser/testdata/01232_extremes/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01232_extremes/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01232_extremes/metadata.json b/parser/testdata/01232_extremes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01232_extremes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01232_extremes/query.sql b/parser/testdata/01232_extremes/query.sql new file mode 100644 index 000000000..c0a9c8cb6 --- /dev/null +++ b/parser/testdata/01232_extremes/query.sql @@ -0,0 +1,58 @@ +-- Tags: no-parallel + +set send_logs_level = 'error'; +set extremes = 1; + +select * from remote('127.0.0.1', numbers(2)); +select '-'; +select * from remote('127.0.0.{1,1}', numbers(2)); +select '-'; +select * from remote('127.0.0.{1,2}', numbers(2)); +select '-'; +select * from remote('127.0.0.{2,2}', numbers(2)); +select '-'; +select * from remote('127.0.0.2', numbers(2)); +select '------'; + +select * from (select * from numbers(2) union all select * from numbers(3) union all select * from numbers(1)) order by number; +select '-'; +select * from (select * from numbers(1) union all select * from numbers(2) union all select * from numbers(3)) order by number; +select '-'; +select * from (select * from numbers(3) union all select * from numbers(1) union all select * from numbers(2)) order by number; +select '------'; + +create database if not exists shard_0; +create database if not exists shard_1; + +drop table if exists shard_0.num_01232; +drop table if exists shard_0.num2_01232; +drop table if exists shard_1.num_01232; +drop table if exists shard_1.num2_01232; +drop table if exists distr; +drop table if exists distr2; + +create table shard_0.num_01232 (number UInt64) engine = MergeTree order by number; +create table shard_1.num_01232 (number UInt64) engine = MergeTree order by number; +insert into shard_0.num_01232 select number from numbers(2); +insert into shard_1.num_01232 select number from numbers(3); +create table distr (number UInt64) engine = Distributed(test_cluster_two_shards_different_databases, '', num_01232); + +create table shard_0.num2_01232 (number UInt64) engine = MergeTree order by number; +create table shard_1.num2_01232 (number UInt64) engine = MergeTree order by number; +insert into shard_0.num2_01232 select number from numbers(3); +insert into shard_1.num2_01232 select number from numbers(2); +create table distr2 (number UInt64) engine = Distributed(test_cluster_two_shards_different_databases, '', num2_01232); + +select * from distr order by number; +select '-'; +select * from distr2 order by number; + +drop table if exists shard_0.num_01232; +drop table if exists shard_0.num2_01232; +drop table if exists shard_1.num_01232; +drop table if exists shard_1.num2_01232; +drop table if exists distr; +drop table if exists distr2; + +drop database shard_0; +drop database shard_1; diff --git a/parser/testdata/01232_untuple/ast.json b/parser/testdata/01232_untuple/ast.json new file mode 100644 index 000000000..c805f162b --- /dev/null +++ b/parser/testdata/01232_untuple/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001659197, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01232_untuple/metadata.json b/parser/testdata/01232_untuple/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01232_untuple/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01232_untuple/query.sql b/parser/testdata/01232_untuple/query.sql new file mode 100644 index 000000000..e2049e0a4 --- /dev/null +++ b/parser/testdata/01232_untuple/query.sql @@ -0,0 +1,13 @@ +SET enable_analyzer = 1; +SET enable_named_columns_in_function_tuple = 1; + +select untuple((* except (b),)) from (select 1 a, 2 b, 3 c); +select 'hello', untuple((* except (b),)), 'world' from (select 1 a, 2 b, 3 c); +select argMax(untuple(x)) from (select (number, number + 1) as x from numbers(10)); +select argMax(untuple(x)), min(x) from (select (number, number + 1) as x from numbers(10)) having tuple(untuple(min(x))).1 != 42; + +drop table if exists kv; +create table kv (key int, v1 int, v2 int, v3 int, v4 int, v5 int) engine MergeTree order by key; +insert into kv values (1, 10, 20, 10, 20, 30), (2, 11, 20, 10, 20, 30), (1, 18, 20, 10, 20, 30), (1, 20, 20, 10, 20, 30), (3, 70, 20, 10, 20, 30), (4, 10, 20, 10, 20, 30), (1, 10, 20, 10, 20, 30), (5, 10, 20, 10, 20, 30), (1, 10, 20, 10, 20, 30), (8, 30, 20, 10, 20, 30), (1, 10, 20, 10, 20, 30), (6, 10, 20, 10, 20, 30), (1, 10, 20, 10, 20, 30), (7, 18, 20, 10, 20, 30), (1, 10, 20, 10, 20, 30), (7, 10, 20, 10, 20, 30), (1, 10, 20, 10, 20, 30), (8, 10, 20, 10, 20, 30), (1, 10, 20, 10, 20, 30); +select key, untuple(argMax((* except (key),), v1)) from kv group by key order by key format TSVWithNames; +drop table if exists kv; diff --git a/parser/testdata/01234_to_string_monotonic/ast.json b/parser/testdata/01234_to_string_monotonic/ast.json new file mode 100644 index 000000000..8bc8c7ab3 --- /dev/null +++ b/parser/testdata/01234_to_string_monotonic/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001476331, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01234_to_string_monotonic/metadata.json b/parser/testdata/01234_to_string_monotonic/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01234_to_string_monotonic/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01234_to_string_monotonic/query.sql b/parser/testdata/01234_to_string_monotonic/query.sql new file mode 100644 index 000000000..88a1c5614 --- /dev/null +++ b/parser/testdata/01234_to_string_monotonic/query.sql @@ -0,0 +1,19 @@ +SET merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.0; + +DROP TABLE IF EXISTS test1; +DROP TABLE IF EXISTS test2; + +CREATE TABLE test1 (s String) ENGINE = MergeTree ORDER BY s SETTINGS index_granularity = 1; +CREATE TABLE test2 (s LowCardinality(String)) ENGINE = MergeTree ORDER BY s SETTINGS index_granularity = 1; + +INSERT INTO test1 SELECT toString(number) FROM numbers(10000); +INSERT INTO test2 SELECT toString(number) FROM numbers(10000); + +-- Prevent remote replicas from skipping index analysis in Parallel Replicas. Otherwise, they may return full ranges and trigger max_rows_to_read validation failures. +SET parallel_replicas_index_analysis_only_on_coordinator = 0; + +SELECT s FROM test1 WHERE toString(s) = '1234' SETTINGS max_rows_to_read = 2; +SELECT s FROM test2 WHERE toString(s) = '1234' SETTINGS max_rows_to_read = 2; + +DROP TABLE test1; +DROP TABLE test2; diff --git a/parser/testdata/01236_graphite_mt/ast.json b/parser/testdata/01236_graphite_mt/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01236_graphite_mt/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01236_graphite_mt/metadata.json b/parser/testdata/01236_graphite_mt/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01236_graphite_mt/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01236_graphite_mt/query.sql b/parser/testdata/01236_graphite_mt/query.sql new file mode 100644 index 000000000..0ec905fa0 --- /dev/null +++ b/parser/testdata/01236_graphite_mt/query.sql @@ -0,0 +1,41 @@ + +-- Use DateTime('UTC') to have a common rollup window +drop table if exists test_graphite; +create table test_graphite (key UInt32, Path String, Time DateTime('UTC'), Value Float64, Version UInt32, col UInt64) + engine = GraphiteMergeTree('graphite_rollup') order by key settings index_granularity=10; + +SET joined_subquery_requires_alias = 0; + +INSERT into test_graphite +WITH dates AS + ( + SELECT toStartOfDay(toDateTime(now('UTC'), 'UTC')) as today, + today - INTERVAL 3 day as older_date + ) + -- Newer than 2 days are kept in windows of 600 seconds + select 1 AS key, 'sum_1' AS s, today - number * 60 - 30, number, 1, number from dates, numbers(300) union all + select 2, 'sum_1', today - number * 60 - 30, number, 1, number from dates, numbers(300) union all + select 1, 'sum_2', today - number * 60 - 30, number, 1, number from dates, numbers(300) union all + select 2, 'sum_2', today - number * 60 - 30, number, 1, number from dates, numbers(300) union all + select 1, 'max_1', today - number * 60 - 30, number, 1, number from dates, numbers(300) union all + select 2, 'max_1', today - number * 60 - 30, number, 1, number from dates, numbers(300) union all + select 1, 'max_2', today - number * 60 - 30, number, 1, number from dates, numbers(300) union all + select 2, 'max_2', today - number * 60 - 30, number, 1, number from dates, numbers(300) union all + + -- Older than 2 days use 6000 second windows + select 1 AS key, 'sum_1' AS s, older_date - number * 60 - 30, number, 1, number from dates, numbers(1200) union all + select 2, 'sum_1', older_date - number * 60 - 30, number, 1, number from dates, numbers(1200) union all + select 1, 'sum_2', older_date - number * 60 - 30, number, 1, number from dates, numbers(1200) union all + select 2, 'sum_2', older_date - number * 60 - 30, number, 1, number from dates, numbers(1200) union all + select 1, 'max_1', older_date - number * 60 - 30, number, 1, number from dates, numbers(1200) union all + select 2, 'max_1', older_date - number * 60 - 30, number, 1, number from dates, numbers(1200) union all + select 1, 'max_2', older_date - number * 60 - 30, number, 1, number from dates, numbers(1200) union all + select 2, 'max_2', older_date - number * 60 - 30, number, 1, number from dates, numbers(1200); + +select key, Path, Value, Version, col from test_graphite final order by key, Path, Time desc; + +optimize table test_graphite final; + +select key, Path, Value, Version, col from test_graphite order by key, Path, Time desc; + +drop table test_graphite; diff --git a/parser/testdata/01240_join_get_or_null/ast.json b/parser/testdata/01240_join_get_or_null/ast.json new file mode 100644 index 000000000..d08794bee --- /dev/null +++ b/parser/testdata/01240_join_get_or_null/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery join_test (children 1)" + }, + { + "explain": " Identifier join_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00119669, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/01240_join_get_or_null/metadata.json b/parser/testdata/01240_join_get_or_null/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01240_join_get_or_null/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01240_join_get_or_null/query.sql b/parser/testdata/01240_join_get_or_null/query.sql new file mode 100644 index 000000000..eb81860bf --- /dev/null +++ b/parser/testdata/01240_join_get_or_null/query.sql @@ -0,0 +1,24 @@ +DROP TABLE IF EXISTS join_test; + +CREATE TABLE join_test (id UInt16, num UInt16) engine = Join(ANY, LEFT, id); +SELECT joinGetOrNull('join_test', 'num', 500); +DROP TABLE join_test; + +CREATE TABLE join_test (id UInt16, num Nullable(UInt16)) engine = Join(ANY, LEFT, id); +SELECT joinGetOrNull('join_test', 'num', 500); +DROP TABLE join_test; + +CREATE TABLE join_test (id UInt16, num Array(UInt16)) engine = Join(ANY, LEFT, id); +SELECT joinGetOrNull('join_test', 'num', 500); +DROP TABLE join_test; + +drop table if exists test; +create table test (x Date, y String) engine Join(ANY, LEFT, x); +insert into test values ('2017-04-01', '1396-01-12') ,('2017-04-02', '1396-01-13'); + +WITH + A as (SELECT rowNumberInAllBlocks() R, addDays(toDate('2017-04-01'), R) TVV from numbers(5)), + B as (SELECT rowNumberInAllBlocks() R, toDateTime(NULL) TVV from numbers(1)) +SELECT + joinGetOrNull('test', 'y', toDate(A.TVV) ) TV1 +from A LEFT JOIN B USING (R) order by TV1; diff --git a/parser/testdata/01244_optimize_distributed_group_by_sharding_key/ast.json b/parser/testdata/01244_optimize_distributed_group_by_sharding_key/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01244_optimize_distributed_group_by_sharding_key/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01244_optimize_distributed_group_by_sharding_key/metadata.json b/parser/testdata/01244_optimize_distributed_group_by_sharding_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01244_optimize_distributed_group_by_sharding_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01244_optimize_distributed_group_by_sharding_key/query.sql b/parser/testdata/01244_optimize_distributed_group_by_sharding_key/query.sql new file mode 100644 index 000000000..c239efd14 --- /dev/null +++ b/parser/testdata/01244_optimize_distributed_group_by_sharding_key/query.sql @@ -0,0 +1,131 @@ +-- Tags: distributed + +-- TODO: correct testing with real unique shards + +set optimize_distributed_group_by_sharding_key=1; + +-- Some queries in this test require sorting after aggregation. +set max_bytes_before_external_group_by = 0; +set max_bytes_ratio_before_external_group_by = 0; + +drop table if exists dist_01247; +drop table if exists data_01247; + +create table data_01247 as system.numbers engine=Memory(); +insert into data_01247 select * from system.numbers limit 2; +create table dist_01247 as data_01247 engine=Distributed(test_cluster_two_shards, currentDatabase(), data_01247, number); +-- since data is not inserted via distributed it will have duplicates +-- (and this is how we ensure that this optimization will work) + +set max_distributed_connections=1; +set prefer_localhost_replica=0; +set enable_positional_arguments=0; + +select '-'; +select * from dist_01247; + +select 'optimize_skip_unused_shards'; +set optimize_skip_unused_shards=1; +select * from dist_01247; + +select 'GROUP BY number'; +select count(), * from dist_01247 group by number; +select 'GROUP BY number distributed_group_by_no_merge'; +select count(), * from dist_01247 group by number settings distributed_group_by_no_merge=1; + +-- dumb, but should work, since "GROUP BY 1" optimized out +select 'GROUP BY number, 1'; +select count(), * from dist_01247 group by number, 1; +select 'GROUP BY 1'; +select count(), min(number) from dist_01247 group by 1; + +select 'GROUP BY number ORDER BY number DESC'; +select count(), * from dist_01247 group by number order by number desc; + +select 'GROUP BY toString(number)'; +select count(), any(number) from dist_01247 group by toString(number); + +select 'GROUP BY number%2'; +select count(), any(number) from dist_01247 group by number%2; + +select 'countDistinct'; +select count(DISTINCT number) from dist_01247; + +select 'countDistinct GROUP BY number'; +select count(DISTINCT number) from dist_01247 group by number; + +select 'DISTINCT'; +select DISTINCT number from dist_01247; + +select 'HAVING'; +select count() cnt, * from dist_01247 group by number having cnt == 2; + +select 'HAVING LIMIT'; +select count() cnt, * from dist_01247 group by number having cnt == 1 limit 1; + +select 'LIMIT'; +select count(), * from dist_01247 group by number limit 1; +select 'LIMIT OFFSET'; +select count(), * from dist_01247 group by number limit 1 offset 1; +select 'OFFSET distributed_push_down_limit=0'; +select count(), * from dist_01247 group by number offset 1 settings distributed_push_down_limit=0; +select 'OFFSET distributed_push_down_limit=1'; +select count(), * from dist_01247 group by number order by count(), number offset 1 settings distributed_push_down_limit=1; +-- this will emulate different data on for different shards +select 'WHERE LIMIT OFFSET'; +select count(), * from dist_01247 where number = _shard_num-1 group by number order by number limit 1 offset 1; + +select 'LIMIT BY 1'; +select count(), * from dist_01247 group by number order by number limit 1 by number; + +select 'GROUP BY (Distributed-over-Distributed)'; +select count(), * from cluster(test_cluster_two_shards, currentDatabase(), dist_01247) group by number order by number; +select 'GROUP BY (Distributed-over-Distributed) distributed_group_by_no_merge'; +select count(), * from cluster(test_cluster_two_shards, currentDatabase(), dist_01247) group by number order by number settings distributed_group_by_no_merge=1; + +select 'GROUP BY (extemes)'; +select count(), * from dist_01247 group by number settings extremes=1; + +select 'LIMIT (extemes)'; +select count(), * from dist_01247 group by number limit 1 settings extremes=1; + +select 'GROUP BY WITH TOTALS'; +select count(), * from dist_01247 group by number with totals; +select 'GROUP BY WITH ROLLUP'; +select count(), * from dist_01247 group by number with rollup; +select 'GROUP BY WITH CUBE'; +select count(), * from dist_01247 group by number with cube; + +select 'GROUP BY WITH TOTALS ORDER BY'; +select count(), * from dist_01247 group by number with totals order by number; + +select 'GROUP BY WITH TOTALS ORDER BY LIMIT'; +select count(), * from dist_01247 group by number with totals order by number limit 1; + +select 'GROUP BY WITH TOTALS LIMIT'; +select count(), * from dist_01247 group by number with totals limit 1; + +-- GROUP BY (compound) +select 'GROUP BY (compound)'; +drop table if exists dist_01247; +drop table if exists data_01247; +create table data_01247 engine=Memory() as select number key, 0 value from numbers(2); +create table dist_01247 as data_01247 engine=Distributed(test_cluster_two_shards, currentDatabase(), data_01247, key); +select 'GROUP BY sharding_key, ...'; +select * from dist_01247 group by key, value; +select 'GROUP BY ..., sharding_key'; +select * from dist_01247 group by value, key; + +-- sharding_key (compound) +select 'sharding_key (compound)'; +select k1, k2, sum(v) from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v), cityHash64(k1, k2)) group by k1, k2; -- optimization applied +select k1, any(k2), sum(v) from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v), cityHash64(k1, k2)) group by k1; -- optimization does not applied +select distinct k1, k2 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v), cityHash64(k1, k2)); -- optimization applied +select distinct on (k1) k2 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v), cityHash64(k1, k2)); -- optimization does not applied + +-- window functions +select 'window functions'; +select key, sum(sum(value)) over (rows unbounded preceding) from dist_01247 group by key; + +drop table dist_01247; +drop table data_01247; diff --git a/parser/testdata/01245_distributed_group_by_no_merge_with-extremes_and_totals/ast.json b/parser/testdata/01245_distributed_group_by_no_merge_with-extremes_and_totals/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01245_distributed_group_by_no_merge_with-extremes_and_totals/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01245_distributed_group_by_no_merge_with-extremes_and_totals/metadata.json b/parser/testdata/01245_distributed_group_by_no_merge_with-extremes_and_totals/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01245_distributed_group_by_no_merge_with-extremes_and_totals/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01245_distributed_group_by_no_merge_with-extremes_and_totals/query.sql b/parser/testdata/01245_distributed_group_by_no_merge_with-extremes_and_totals/query.sql new file mode 100644 index 000000000..3f7632baf --- /dev/null +++ b/parser/testdata/01245_distributed_group_by_no_merge_with-extremes_and_totals/query.sql @@ -0,0 +1,108 @@ +-- Tags: distributed + +SELECT sum(number) FROM (SELECT * FROM remote('127.0.0.{1,2}', system.numbers) LIMIT 5 SETTINGS distributed_group_by_no_merge = 1); +SELECT sum(number) FROM (SELECT * FROM remote('127.0.0.{1,2}', system.numbers) LIMIT 5 SETTINGS distributed_group_by_no_merge = 1) with totals; + +SELECT 'distributed_group_by_no_merge = 0, extremes = 0'; +SET distributed_group_by_no_merge = 0, extremes = 0; +SELECT sum(number) FROM (SELECT * FROM remote('127.0.0.1', system.numbers) LIMIT 5); +SELECT '-'; +SELECT sum(number) FROM (SELECT * FROM remote('127.0.0.2', system.numbers) LIMIT 5); +SELECT '-'; +SELECT sum(number) FROM (SELECT * FROM remote('127.0.0.1', system.numbers) LIMIT 5) with totals; +SELECT '-'; +SELECT sum(number) FROM (SELECT * FROM remote('127.0.0.2', system.numbers) LIMIT 5) with totals; +SELECT '------'; +SELECT sum(s) FROM (SELECT sum(number) as s FROM remote('127.0.0.1', numbers(5)) with totals); +SELECT '-'; +SELECT sum(s) FROM (SELECT sum(number) as s FROM remote('127.0.0.2', numbers(5)) with totals); +SELECT '-'; +SELECT sum(s) FROM (SELECT sum(number) as s FROM remote('127.0.0.{1,2}', numbers(5)) with totals); +SELECT '-'; +SELECT sum(s) FROM (SELECT sum(number) as s FROM remote('127.0.0.{2,3}', numbers(5)) with totals); +SELECT '------'; +SELECT sum(s) FROM (SELECT sum(number) as s FROM remote('127.0.0.1', numbers(5))) with totals; +SELECT '-'; +SELECT sum(s) FROM (SELECT sum(number) as s FROM remote('127.0.0.2', numbers(5))) with totals; +SELECT '-'; +SELECT sum(s) FROM (SELECT sum(number) as s FROM remote('127.0.0.{1,2}', numbers(5))) with totals; +SELECT '-'; +SELECT sum(s) FROM (SELECT sum(number) as s FROM remote('127.0.0.{2,3}', numbers(5))) with totals; + +SELECT 'distributed_group_by_no_merge = 1, extremes = 0'; +SET distributed_group_by_no_merge = 1, extremes = 0; +SELECT sum(number) FROM (SELECT * FROM remote('127.0.0.1', system.numbers) LIMIT 5); +SELECT '-'; +SELECT sum(number) FROM (SELECT * FROM remote('127.0.0.2', system.numbers) LIMIT 5); +SELECT '-'; +SELECT sum(number) FROM (SELECT * FROM remote('127.0.0.1', system.numbers) LIMIT 5) with totals; +SELECT '-'; +SELECT sum(number) FROM (SELECT * FROM remote('127.0.0.2', system.numbers) LIMIT 5) with totals; +SELECT '------'; +SELECT sum(s) FROM (SELECT sum(number) as s FROM remote('127.0.0.1', numbers(5)) with totals); +SELECT '-'; +SELECT sum(s) FROM (SELECT sum(number) as s FROM remote('127.0.0.2', numbers(5)) with totals); +SELECT '-'; +SELECT sum(s) FROM (SELECT sum(number) as s FROM remote('127.0.0.{1,2}', numbers(5)) with totals); +SELECT '-'; +SELECT sum(s) FROM (SELECT sum(number) as s FROM remote('127.0.0.{2,3}', numbers(5)) with totals); +SELECT '------'; +SELECT sum(s) FROM (SELECT sum(number) as s FROM remote('127.0.0.1', numbers(5))) with totals; +SELECT '-'; +SELECT sum(s) FROM (SELECT sum(number) as s FROM remote('127.0.0.2', numbers(5))) with totals; +SELECT '-'; +SELECT sum(s) FROM (SELECT sum(number) as s FROM remote('127.0.0.{1,2}', numbers(5))) with totals; +SELECT '-'; +SELECT sum(s) FROM (SELECT sum(number) as s FROM remote('127.0.0.{2,3}', numbers(5))) with totals; + +SELECT 'distributed_group_by_no_merge = 0, extremes = 1'; +SET distributed_group_by_no_merge = 0, extremes = 1; +SELECT sum(number) FROM (SELECT * FROM remote('127.0.0.1', system.numbers) LIMIT 5); +SELECT '-'; +SELECT sum(number) FROM (SELECT * FROM remote('127.0.0.2', system.numbers) LIMIT 5); +SELECT '-'; +SELECT sum(number) FROM (SELECT * FROM remote('127.0.0.1', system.numbers) LIMIT 5) with totals; +SELECT '-'; +SELECT sum(number) FROM (SELECT * FROM remote('127.0.0.2', system.numbers) LIMIT 5) with totals; +SELECT '------'; +SELECT sum(s) FROM (SELECT sum(number) as s FROM remote('127.0.0.1', numbers(5)) with totals); +SELECT '-'; +SELECT sum(s) FROM (SELECT sum(number) as s FROM remote('127.0.0.2', numbers(5)) with totals); +SELECT '-'; +SELECT sum(s) FROM (SELECT sum(number) as s FROM remote('127.0.0.{1,2}', numbers(5)) with totals); +SELECT '-'; +SELECT sum(s) FROM (SELECT sum(number) as s FROM remote('127.0.0.{2,3}', numbers(5)) with totals); +SELECT '------'; +SELECT sum(s) FROM (SELECT sum(number) as s FROM remote('127.0.0.1', numbers(5))) with totals; +SELECT '-'; +SELECT sum(s) FROM (SELECT sum(number) as s FROM remote('127.0.0.2', numbers(5))) with totals; +SELECT '-'; +SELECT sum(s) FROM (SELECT sum(number) as s FROM remote('127.0.0.{1,2}', numbers(5))) with totals; +SELECT '-'; +SELECT sum(s) FROM (SELECT sum(number) as s FROM remote('127.0.0.{2,3}', numbers(5))) with totals; + +SELECT 'distributed_group_by_no_merge = 1, extremes = 1'; +SET distributed_group_by_no_merge = 1, extremes = 1; +SELECT sum(number) FROM (SELECT * FROM remote('127.0.0.1', system.numbers) LIMIT 5); +SELECT '-'; +SELECT sum(number) FROM (SELECT * FROM remote('127.0.0.2', system.numbers) LIMIT 5); +SELECT '-'; +SELECT sum(number) FROM (SELECT * FROM remote('127.0.0.1', system.numbers) LIMIT 5) with totals; +SELECT '-'; +SELECT sum(number) FROM (SELECT * FROM remote('127.0.0.2', system.numbers) LIMIT 5) with totals; +SELECT '------'; +SELECT sum(s) FROM (SELECT sum(number) as s FROM remote('127.0.0.1', numbers(5)) with totals); +SELECT '-'; +SELECT sum(s) FROM (SELECT sum(number) as s FROM remote('127.0.0.2', numbers(5)) with totals); +SELECT '-'; +SELECT sum(s) FROM (SELECT sum(number) as s FROM remote('127.0.0.{1,2}', numbers(5)) with totals); +SELECT '-'; +SELECT sum(s) FROM (SELECT sum(number) as s FROM remote('127.0.0.{2,3}', numbers(5)) with totals); +SELECT '------'; +SELECT sum(s) FROM (SELECT sum(number) as s FROM remote('127.0.0.1', numbers(5))) with totals; +SELECT '-'; +SELECT sum(s) FROM (SELECT sum(number) as s FROM remote('127.0.0.2', numbers(5))) with totals; +SELECT '-'; +SELECT sum(s) FROM (SELECT sum(number) as s FROM remote('127.0.0.{1,2}', numbers(5))) with totals; +SELECT '-'; +SELECT sum(s) FROM (SELECT sum(number) as s FROM remote('127.0.0.{2,3}', numbers(5))) with totals; diff --git a/parser/testdata/01245_limit_infinite_sources/ast.json b/parser/testdata/01245_limit_infinite_sources/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01245_limit_infinite_sources/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01245_limit_infinite_sources/metadata.json b/parser/testdata/01245_limit_infinite_sources/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01245_limit_infinite_sources/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01245_limit_infinite_sources/query.sql b/parser/testdata/01245_limit_infinite_sources/query.sql new file mode 100644 index 000000000..bd4c65e98 --- /dev/null +++ b/parser/testdata/01245_limit_infinite_sources/query.sql @@ -0,0 +1,14 @@ +-- Tags: no-asan, no-tsan, no-msan, no-ubsan + +SELECT number +FROM +( + SELECT zero AS number + FROM remote('127.0.0.2', system.zeros) + UNION ALL + SELECT number + sleep(0.5) + FROM system.numbers +) +WHERE number = 1 +LIMIT 1 +SETTINGS max_rows_to_read = 0; diff --git a/parser/testdata/01246_extractAllGroupsHorizontal/ast.json b/parser/testdata/01246_extractAllGroupsHorizontal/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01246_extractAllGroupsHorizontal/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01246_extractAllGroupsHorizontal/metadata.json b/parser/testdata/01246_extractAllGroupsHorizontal/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01246_extractAllGroupsHorizontal/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01246_extractAllGroupsHorizontal/query.sql b/parser/testdata/01246_extractAllGroupsHorizontal/query.sql new file mode 100644 index 000000000..baa39ca30 --- /dev/null +++ b/parser/testdata/01246_extractAllGroupsHorizontal/query.sql @@ -0,0 +1,53 @@ +-- error cases +SELECT extractAllGroupsHorizontal(); --{serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} not enough arguments +SELECT extractAllGroupsHorizontal('hello'); --{serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} not enough arguments +SELECT extractAllGroupsHorizontal('hello', 123); --{serverError ILLEGAL_TYPE_OF_ARGUMENT} invalid argument type +SELECT extractAllGroupsHorizontal(123, 'world'); --{serverError ILLEGAL_TYPE_OF_ARGUMENT} invalid argument type +SELECT extractAllGroupsHorizontal('hello world', '((('); --{serverError CANNOT_COMPILE_REGEXP} invalid re +SELECT extractAllGroupsHorizontal('hello world', materialize('\\w+')); --{serverError ILLEGAL_COLUMN} non-cons needle +SELECT extractAllGroupsHorizontal('hello world', '\\w+'); -- { serverError BAD_ARGUMENTS } 0 groups +SELECT extractAllGroupsHorizontal('hello world', '(\\w+)') SETTINGS regexp_max_matches_per_row = 0; -- { serverError TOO_LARGE_ARRAY_SIZE } to many groups matched per row +SELECT extractAllGroupsHorizontal('hello world', '(\\w+)') SETTINGS regexp_max_matches_per_row = 1; -- { serverError TOO_LARGE_ARRAY_SIZE } to many groups matched per row + +SELECT extractAllGroupsHorizontal('hello world', '(\\w+)') SETTINGS regexp_max_matches_per_row = 1000000 FORMAT Null; -- users now can set limit bigger than previous 1000 matches per row + +SELECT '1 group, multiple matches, String and FixedString'; +SELECT extractAllGroupsHorizontal('hello world', '(\\w+)'); +SELECT extractAllGroupsHorizontal('hello world', CAST('(\\w+)' as FixedString(5))); +SELECT extractAllGroupsHorizontal(CAST('hello world' AS FixedString(12)), '(\\w+)'); +SELECT extractAllGroupsHorizontal(CAST('hello world' AS FixedString(12)), CAST('(\\w+)' as FixedString(5))); +SELECT extractAllGroupsHorizontal(materialize(CAST('hello world' AS FixedString(12))), '(\\w+)'); +SELECT extractAllGroupsHorizontal(materialize(CAST('hello world' AS FixedString(12))), CAST('(\\w+)' as FixedString(5))); + +SELECT 'mutiple groups, multiple matches'; +SELECT extractAllGroupsHorizontal('abc=111, def=222, ghi=333 "jkl mno"="444 foo bar"', '("[^"]+"|\\w+)=("[^"]+"|\\w+)'); + +SELECT 'big match'; +SELECT + length(haystack), length(matches), length(matches[1]), arrayMap((x) -> length(x), matches[1]) +FROM ( + SELECT + repeat('abcdefghijklmnopqrstuvwxyz', number * 10) AS haystack, + extractAllGroupsHorizontal(haystack, '(abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz)') AS matches + FROM numbers(3) +); + +SELECT 'lots of matches'; +SELECT + length(haystack), length(matches), length(matches[1]), arrayReduce('sum', arrayMap((x) -> length(x), matches[1])) +FROM ( + SELECT + repeat('abcdefghijklmnopqrstuvwxyz', number * 10) AS haystack, + extractAllGroupsHorizontal(haystack, '(\\w)') AS matches + FROM numbers(3) +); + +SELECT 'lots of groups'; +SELECT + length(haystack), length(matches), length(matches[1]), arrayMap((x) -> length(x), matches[1]) +FROM ( + SELECT + repeat('abcdefghijklmnopqrstuvwxyz', number * 10) AS haystack, + extractAllGroupsHorizontal(haystack, repeat('(\\w)', 100)) AS matches + FROM numbers(3) +); diff --git a/parser/testdata/01246_extractAllGroupsVertical/ast.json b/parser/testdata/01246_extractAllGroupsVertical/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01246_extractAllGroupsVertical/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01246_extractAllGroupsVertical/metadata.json b/parser/testdata/01246_extractAllGroupsVertical/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01246_extractAllGroupsVertical/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01246_extractAllGroupsVertical/query.sql b/parser/testdata/01246_extractAllGroupsVertical/query.sql new file mode 100644 index 000000000..749980275 --- /dev/null +++ b/parser/testdata/01246_extractAllGroupsVertical/query.sql @@ -0,0 +1,49 @@ +-- error cases +SELECT extractAllGroupsVertical(); --{serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} not enough arguments +SELECT extractAllGroupsVertical('hello'); --{serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} not enough arguments +SELECT extractAllGroupsVertical('hello', 123); --{serverError ILLEGAL_TYPE_OF_ARGUMENT} invalid argument type +SELECT extractAllGroupsVertical(123, 'world'); --{serverError ILLEGAL_TYPE_OF_ARGUMENT} invalid argument type +SELECT extractAllGroupsVertical('hello world', '((('); --{serverError CANNOT_COMPILE_REGEXP} invalid re +SELECT extractAllGroupsVertical('hello world', materialize('\\w+')); --{serverError ILLEGAL_COLUMN} non-const needle +SELECT extractAllGroupsVertical('hello world', '\\w+'); -- { serverError BAD_ARGUMENTS } 0 groups + +SELECT '1 group, multiple matches, String and FixedString'; +SELECT extractAllGroupsVertical('hello world', '(\\w+)'); +SELECT extractAllGroupsVertical('hello world', CAST('(\\w+)' as FixedString(5))); +SELECT extractAllGroupsVertical(CAST('hello world' AS FixedString(12)), '(\\w+)'); +SELECT extractAllGroupsVertical(CAST('hello world' AS FixedString(12)), CAST('(\\w+)' as FixedString(5))); +SELECT extractAllGroupsVertical(materialize(CAST('hello world' AS FixedString(12))), '(\\w+)'); +SELECT extractAllGroupsVertical(materialize(CAST('hello world' AS FixedString(12))), CAST('(\\w+)' as FixedString(5))); + +SELECT 'mutiple groups, multiple matches'; +SELECT extractAllGroupsVertical('abc=111, def=222, ghi=333 "jkl mno"="444 foo bar"', '("[^"]+"|\\w+)=("[^"]+"|\\w+)'); + +SELECT 'big match'; +SELECT + length(haystack), length(matches[1]), length(matches), arrayMap((x) -> length(x), arrayMap(x -> x[1], matches)) +FROM ( + SELECT + repeat('abcdefghijklmnopqrstuvwxyz', number * 10) AS haystack, + extractAllGroupsVertical(haystack, '(abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz)') AS matches + FROM numbers(3) +); + +SELECT 'lots of matches'; +SELECT + length(haystack), length(matches[1]), length(matches), arrayReduce('sum', arrayMap((x) -> length(x), arrayMap(x -> x[1], matches))) +FROM ( + SELECT + repeat('abcdefghijklmnopqrstuvwxyz', number * 10) AS haystack, + extractAllGroupsVertical(haystack, '(\\w)') AS matches + FROM numbers(3) +); + +SELECT 'lots of groups'; +SELECT + length(haystack), length(matches[1]), length(matches), arrayMap((x) -> length(x), arrayMap(x -> x[1], matches)) +FROM ( + SELECT + repeat('abcdefghijklmnopqrstuvwxyz', number * 10) AS haystack, + extractAllGroupsVertical(haystack, repeat('(\\w)', 100)) AS matches + FROM numbers(3) +); diff --git a/parser/testdata/01246_finalize_aggregation_race/ast.json b/parser/testdata/01246_finalize_aggregation_race/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01246_finalize_aggregation_race/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01246_finalize_aggregation_race/metadata.json b/parser/testdata/01246_finalize_aggregation_race/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01246_finalize_aggregation_race/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01246_finalize_aggregation_race/query.sql b/parser/testdata/01246_finalize_aggregation_race/query.sql new file mode 100644 index 000000000..e63f7fb09 --- /dev/null +++ b/parser/testdata/01246_finalize_aggregation_race/query.sql @@ -0,0 +1,26 @@ +-- Tags: race + +drop table if exists test_quantile; +create table test_quantile (x AggregateFunction(quantileTiming(0.2), UInt64)) engine = Memory; +insert into test_quantile select medianTimingState(.2)(number) from (select * from numbers(1000) order by number desc); +select y from ( +select finalizeAggregation(x) as y from test_quantile union all +select finalizeAggregation(x) as y from test_quantile union all +select finalizeAggregation(x) as y from test_quantile union all +select finalizeAggregation(x) as y from test_quantile union all +select finalizeAggregation(x) as y from test_quantile union all +select finalizeAggregation(x) as y from test_quantile union all +select finalizeAggregation(x) as y from test_quantile union all +select finalizeAggregation(x) as y from test_quantile union all +select finalizeAggregation(x) as y from test_quantile union all +select finalizeAggregation(x) as y from test_quantile union all +select finalizeAggregation(x) as y from test_quantile union all +select finalizeAggregation(x) as y from test_quantile union all +select finalizeAggregation(x) as y from test_quantile union all +select finalizeAggregation(x) as y from test_quantile union all +select finalizeAggregation(x) as y from test_quantile union all +select finalizeAggregation(x) as y from test_quantile union all +select finalizeAggregation(x) as y from test_quantile union all +select finalizeAggregation(x) as y from test_quantile) +order by y; +drop table test_quantile; diff --git a/parser/testdata/01246_least_greatest_generic/ast.json b/parser/testdata/01246_least_greatest_generic/ast.json new file mode 100644 index 000000000..4f7e30b65 --- /dev/null +++ b/parser/testdata/01246_least_greatest_generic/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function least (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'hello'" + }, + { + "explain": " Literal 'world'" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001002197, + "rows_read": 8, + "bytes_read": 286 + } +} diff --git a/parser/testdata/01246_least_greatest_generic/metadata.json b/parser/testdata/01246_least_greatest_generic/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01246_least_greatest_generic/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01246_least_greatest_generic/query.sql b/parser/testdata/01246_least_greatest_generic/query.sql new file mode 100644 index 000000000..2744531ea --- /dev/null +++ b/parser/testdata/01246_least_greatest_generic/query.sql @@ -0,0 +1,36 @@ +SELECT least('hello', 'world'); +SELECT greatest('hello', 'world'); +SELECT least('hello', 'world', ''); +SELECT greatest('hello', 'world', 'z'); + +SELECT least('hello'); +SELECT greatest('world'); + +SELECT least(1, inf, nan); +SELECT least(1, inf, nan, NULL); +SELECT greatest(1, inf, nan, NULL); +SELECT greatest(1, inf, nan); +SELECT greatest(1, inf); + +SELECT least(0., -0.); +SELECT least(toNullable(123), 456); + +SELECT LEAST(-1, 18446744073709551615) x, toTypeName(x); +-- This can be improved +SELECT LEAST(-1., 18446744073709551615); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT LEAST(-1., 18446744073709551615.); +SELECT greatest(-1, 1, 4294967295); + +SELECT greatest([], ['hello'], ['world']); + +SELECT least([[[], []]], [[[]]], [[[]], [[]]]); +SELECT greatest([[[], []]], [[[]]], [[[]], [[]]]); + +SELECT least([], [NULL]); +SELECT greatest([], [NULL]); + +SELECT LEAST([NULL], [0]); +SELECT GREATEST([NULL], [0]); + +SELECT Greatest(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } diff --git a/parser/testdata/01247_least_greatest_filimonov/ast.json b/parser/testdata/01247_least_greatest_filimonov/ast.json new file mode 100644 index 000000000..347d7e98a --- /dev/null +++ b/parser/testdata/01247_least_greatest_filimonov/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function GREATEST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_0" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.000993166, + "rows_read": 8, + "bytes_read": 291 + } +} diff --git a/parser/testdata/01247_least_greatest_filimonov/metadata.json b/parser/testdata/01247_least_greatest_filimonov/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01247_least_greatest_filimonov/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01247_least_greatest_filimonov/query.sql b/parser/testdata/01247_least_greatest_filimonov/query.sql new file mode 100644 index 000000000..b845d65dc --- /dev/null +++ b/parser/testdata/01247_least_greatest_filimonov/query.sql @@ -0,0 +1,3 @@ +SELECT GREATEST(2,0); +SELECT GREATEST(34.0,3.0,5.0,767.0); +SELECT GREATEST('B','A','C'); diff --git a/parser/testdata/01247_optimize_distributed_group_by_sharding_key_dist_on_dist/ast.json b/parser/testdata/01247_optimize_distributed_group_by_sharding_key_dist_on_dist/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01247_optimize_distributed_group_by_sharding_key_dist_on_dist/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01247_optimize_distributed_group_by_sharding_key_dist_on_dist/metadata.json b/parser/testdata/01247_optimize_distributed_group_by_sharding_key_dist_on_dist/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01247_optimize_distributed_group_by_sharding_key_dist_on_dist/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01247_optimize_distributed_group_by_sharding_key_dist_on_dist/query.sql b/parser/testdata/01247_optimize_distributed_group_by_sharding_key_dist_on_dist/query.sql new file mode 100644 index 000000000..05d8b36a4 --- /dev/null +++ b/parser/testdata/01247_optimize_distributed_group_by_sharding_key_dist_on_dist/query.sql @@ -0,0 +1,43 @@ +-- Tags: distributed + +-- TODO: correct testing with real unique shards + +set optimize_distributed_group_by_sharding_key=1; + +drop table if exists dist_01247; +drop table if exists dist_layer_01247; +drop table if exists data_01247; + +create table data_01247 as system.numbers engine=Memory(); +-- since data is not inserted via distributed it will have duplicates +-- (and this is how we ensure that this optimization will work) +insert into data_01247 select * from system.numbers limit 2; + +set max_distributed_connections=1; +set optimize_skip_unused_shards=1; + +select 'Distributed(number)-over-Distributed(number)'; +create table dist_layer_01247 as data_01247 engine=Distributed(test_cluster_two_shards, currentDatabase(), data_01247, number); +create table dist_01247 as data_01247 engine=Distributed(test_cluster_two_shards, currentDatabase(), dist_layer_01247, number); +select count(), * from dist_01247 group by number order by number limit 1 settings prefer_localhost_replica=1; +select '-'; +-- Now, sharding key optimization is not supported for distributed over distributed with serialized plan. +select count(), * from dist_01247 group by number order by number limit 1 settings prefer_localhost_replica=0, serialize_query_plan=1, enable_analyzer=1; +drop table if exists dist_01247; +drop table if exists dist_layer_01247; + +select 'Distributed(rand)-over-Distributed(number)'; +create table dist_layer_01247 as data_01247 engine=Distributed(test_cluster_two_shards, currentDatabase(), data_01247, number); +create table dist_01247 as data_01247 engine=Distributed(test_cluster_two_shards, currentDatabase(), dist_layer_01247, rand()); +select count(), * from dist_01247 group by number order by number limit 1; +drop table if exists dist_01247; +drop table if exists dist_layer_01247; + +select 'Distributed(rand)-over-Distributed(rand)'; +create table dist_layer_01247 as data_01247 engine=Distributed(test_cluster_two_shards, currentDatabase(), data_01247, rand()); +create table dist_01247 as data_01247 engine=Distributed(test_cluster_two_shards, currentDatabase(), dist_layer_01247, number); +select count(), * from dist_01247 group by number order by number limit 1; + +drop table dist_01247; +drop table dist_layer_01247; +drop table data_01247; diff --git a/parser/testdata/01247_some_msan_crashs_from_22517/ast.json b/parser/testdata/01247_some_msan_crashs_from_22517/ast.json new file mode 100644 index 000000000..c15497472 --- /dev/null +++ b/parser/testdata/01247_some_msan_crashs_from_22517/ast.json @@ -0,0 +1,88 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier a" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function ignore (alias a) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Identifier a (alias b)" + } + ], + + "rows": 22, + + "statistics": + { + "elapsed": 0.001088912, + "rows_read": 22, + "bytes_read": 954 + } +} diff --git a/parser/testdata/01247_some_msan_crashs_from_22517/metadata.json b/parser/testdata/01247_some_msan_crashs_from_22517/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01247_some_msan_crashs_from_22517/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01247_some_msan_crashs_from_22517/query.sql b/parser/testdata/01247_some_msan_crashs_from_22517/query.sql new file mode 100644 index 000000000..973ec67ba --- /dev/null +++ b/parser/testdata/01247_some_msan_crashs_from_22517/query.sql @@ -0,0 +1,3 @@ +SELECT a FROM (SELECT ignore((SELECT 1)) AS a, a AS b); + +SELECT x FROM (SELECT dummy AS x, plus(ignore(ignore(ignore(ignore('-922337203.6854775808', ignore(NULL)), ArrLen = 256, ignore(100, Arr.C3, ignore(NULL), (SELECT 10.000100135803223, count(*) FROM system.time_zones) > NULL)))), dummy, 65535) AS dummy ORDER BY ignore(-2) ASC, identity(x) DESC NULLS FIRST) FORMAT Null; -- { serverError UNKNOWN_IDENTIFIER } diff --git a/parser/testdata/01248_least_greatest_mixed_const/ast.json b/parser/testdata/01248_least_greatest_mixed_const/ast.json new file mode 100644 index 000000000..36c2b29c1 --- /dev/null +++ b/parser/testdata/01248_least_greatest_mixed_const/ast.json @@ -0,0 +1,82 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function least (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal UInt64_4" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_6" + }, + { + "explain": " Function greatest (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal UInt64_4" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_6" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 20, + + "statistics": + { + "elapsed": 0.001319347, + "rows_read": 20, + "bytes_read": 745 + } +} diff --git a/parser/testdata/01248_least_greatest_mixed_const/metadata.json b/parser/testdata/01248_least_greatest_mixed_const/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01248_least_greatest_mixed_const/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01248_least_greatest_mixed_const/query.sql b/parser/testdata/01248_least_greatest_mixed_const/query.sql new file mode 100644 index 000000000..3fcf20623 --- /dev/null +++ b/parser/testdata/01248_least_greatest_mixed_const/query.sql @@ -0,0 +1 @@ +SELECT least(4, number, 6), greatest(4, number, 6) FROM numbers(10); diff --git a/parser/testdata/01249_bad_arguments_for_bloom_filter/ast.json b/parser/testdata/01249_bad_arguments_for_bloom_filter/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01249_bad_arguments_for_bloom_filter/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01249_bad_arguments_for_bloom_filter/metadata.json b/parser/testdata/01249_bad_arguments_for_bloom_filter/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01249_bad_arguments_for_bloom_filter/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01249_bad_arguments_for_bloom_filter/query.sql b/parser/testdata/01249_bad_arguments_for_bloom_filter/query.sql new file mode 100644 index 000000000..afb387d67 --- /dev/null +++ b/parser/testdata/01249_bad_arguments_for_bloom_filter/query.sql @@ -0,0 +1,27 @@ +-- Tags: no-parallel + +SET send_logs_level = 'fatal'; + +DROP DATABASE IF EXISTS test_01249; +set allow_deprecated_database_ordinary=1; +-- Creation of a database with Ordinary engine emits a warning. +CREATE DATABASE test_01249 ENGINE=Ordinary; -- Full ATTACH requires UUID with Atomic +USE test_01249; + +CREATE TABLE bloom_filter_idx_good(`u64` UInt64, `i32` Int32, `f64` Float64, `d` Decimal(10, 2), `s` String, `e` Enum8('a' = 1, 'b' = 2, 'c' = 3), `dt` Date, INDEX bloom_filter_a i32 TYPE bloom_filter(0, 1) GRANULARITY 1) ENGINE = MergeTree() ORDER BY u64 SETTINGS index_granularity = 8192; -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +CREATE TABLE bloom_filter_idx_good(`u64` UInt64, `i32` Int32, `f64` Float64, `d` Decimal(10, 2), `s` String, `e` Enum8('a' = 1, 'b' = 2, 'c' = 3), `dt` Date, INDEX bloom_filter_a i32 TYPE bloom_filter(-0.1) GRANULARITY 1) ENGINE = MergeTree() ORDER BY u64 SETTINGS index_granularity = 8192; -- { serverError BAD_ARGUMENTS } +CREATE TABLE bloom_filter_idx_good(`u64` UInt64, `i32` Int32, `f64` Float64, `d` Decimal(10, 2), `s` String, `e` Enum8('a' = 1, 'b' = 2, 'c' = 3), `dt` Date, INDEX bloom_filter_a i32 TYPE bloom_filter(1.01) GRANULARITY 1) ENGINE = MergeTree() ORDER BY u64 SETTINGS index_granularity = 8192; -- { serverError BAD_ARGUMENTS } + +DROP TABLE IF EXISTS bloom_filter_idx_good; +ATTACH TABLE bloom_filter_idx_good(`u64` UInt64, `i32` Int32, `f64` Float64, `d` Decimal(10, 2), `s` String, `e` Enum8('a' = 1, 'b' = 2, 'c' = 3), `dt` Date, INDEX bloom_filter_a i32 TYPE bloom_filter(0., 1.) GRANULARITY 1) ENGINE = MergeTree() ORDER BY u64 SETTINGS index_granularity = 8192; +SHOW CREATE TABLE bloom_filter_idx_good; + +DROP TABLE IF EXISTS bloom_filter_idx_good; +ATTACH TABLE bloom_filter_idx_good(`u64` UInt64, `i32` Int32, `f64` Float64, `d` Decimal(10, 2), `s` String, `e` Enum8('a' = 1, 'b' = 2, 'c' = 3), `dt` Date, INDEX bloom_filter_a i32 TYPE bloom_filter(-0.1) GRANULARITY 1) ENGINE = MergeTree() ORDER BY u64 SETTINGS index_granularity = 8192; +SHOW CREATE TABLE bloom_filter_idx_good; + +DROP TABLE IF EXISTS bloom_filter_idx_good; +ATTACH TABLE bloom_filter_idx_good(`u64` UInt64, `i32` Int32, `f64` Float64, `d` Decimal(10, 2), `s` String, `e` Enum8('a' = 1, 'b' = 2, 'c' = 3), `dt` Date, INDEX bloom_filter_a i32 TYPE bloom_filter(1.01) GRANULARITY 1) ENGINE = MergeTree() ORDER BY u64 SETTINGS index_granularity = 8192; +SHOW CREATE TABLE bloom_filter_idx_good; + +DROP DATABASE test_01249; diff --git a/parser/testdata/01250_fixed_string_comparison/ast.json b/parser/testdata/01250_fixed_string_comparison/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01250_fixed_string_comparison/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01250_fixed_string_comparison/metadata.json b/parser/testdata/01250_fixed_string_comparison/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01250_fixed_string_comparison/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01250_fixed_string_comparison/query.sql b/parser/testdata/01250_fixed_string_comparison/query.sql new file mode 100644 index 000000000..d574fd082 --- /dev/null +++ b/parser/testdata/01250_fixed_string_comparison/query.sql @@ -0,0 +1,45 @@ +WITH 'abb' AS b, 'abc' AS c, 'abd' AS d, toFixedString(b, 5) AS bf, toFixedString(c, 5) AS cf, toFixedString(d, 5) AS df +SELECT + b = b, b > b, b < b, + b = c, b > c, b < c, + b = d, b > d, b < d, + b = bf, b > bf, b < bf, + b = cf, b > cf, b < cf, + b = df, b > df, b < df, + + c = b, c > b, c < b, + c = c, c > c, c < c, + c = d, c > d, c < d, + c = bf, c > bf, c < bf, + c = cf, c > cf, c < cf, + c = df, c > df, c < df, + + d = b, d > b, d < b, + d = c, d > c, d < c, + d = d, d > d, d < d, + d = bf, d > bf, d < bf, + d = cf, d > cf, d < cf, + d = df, d > df, d < df, + + bf = b, bf > b, bf < b, + bf = c, bf > c, bf < c, + bf = d, bf > d, bf < d, + bf = bf, bf > bf, bf < bf, + bf = cf, bf > cf, bf < cf, + bf = df, bf > df, bf < df, + + cf = b, cf > b, cf < b, + cf = c, cf > c, cf < c, + cf = d, cf > d, cf < d, + cf = bf, cf > bf, cf < bf, + cf = cf, cf > cf, cf < cf, + cf = df, cf > df, cf < df, + + df = b, df > b, df < b, + df = c, df > c, df < c, + df = d, df > d, df < d, + df = bf, df > bf, df < bf, + df = cf, df > cf, df < cf, + df = df, df > df, df < df + +FORMAT Vertical; diff --git a/parser/testdata/01251_dict_is_in_infinite_loop/ast.json b/parser/testdata/01251_dict_is_in_infinite_loop/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01251_dict_is_in_infinite_loop/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01251_dict_is_in_infinite_loop/metadata.json b/parser/testdata/01251_dict_is_in_infinite_loop/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01251_dict_is_in_infinite_loop/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01251_dict_is_in_infinite_loop/query.sql b/parser/testdata/01251_dict_is_in_infinite_loop/query.sql new file mode 100644 index 000000000..4b59ca4d7 --- /dev/null +++ b/parser/testdata/01251_dict_is_in_infinite_loop/query.sql @@ -0,0 +1,99 @@ +-- Tags: no-parallel, no-fasttest + +DROP DATABASE IF EXISTS database_for_dict; +CREATE DATABASE database_for_dict; + +DROP TABLE IF EXISTS database_for_dict.dict_source; +CREATE TABLE database_for_dict.dict_source (id UInt64, parent_id UInt64, value String) ENGINE = Memory; +INSERT INTO database_for_dict.dict_source VALUES (1, 0, 'hello'), (2, 1, 'world'), (3, 2, 'upyachka'), (11, 22, 'a'), (22, 11, 'b'); + +DROP DICTIONARY IF EXISTS database_for_dict.dictionary_with_hierarchy; + +CREATE DICTIONARY database_for_dict.dictionary_with_hierarchy +( + id UInt64, parent_id UInt64 HIERARCHICAL, value String +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(host 'localhost' port tcpPort() user 'default' db 'database_for_dict' table 'dict_source')) +LAYOUT(HASHED()) +LIFETIME(MIN 1 MAX 1); + +SELECT dictIsIn('database_for_dict.dictionary_with_hierarchy', toUInt64(2), toUInt64(1)); + +SELECT dictIsIn('database_for_dict.dictionary_with_hierarchy', toUInt64(22), toUInt64(11)); +SELECT dictIsIn('database_for_dict.dictionary_with_hierarchy', materialize(toUInt64(22)), toUInt64(11)); +SELECT dictIsIn('database_for_dict.dictionary_with_hierarchy', toUInt64(11), materialize(toUInt64(22))); +SELECT dictIsIn('database_for_dict.dictionary_with_hierarchy', materialize(toUInt64(22)), materialize(toUInt64(11))); + +SELECT dictIsIn('database_for_dict.dictionary_with_hierarchy', toUInt64(22), toUInt64(111)); +SELECT dictIsIn('database_for_dict.dictionary_with_hierarchy', materialize(toUInt64(22)), toUInt64(111)); +SELECT dictIsIn('database_for_dict.dictionary_with_hierarchy', toUInt64(11), materialize(toUInt64(222))); +SELECT dictIsIn('database_for_dict.dictionary_with_hierarchy', materialize(toUInt64(22)), materialize(toUInt64(111))); + +SELECT dictGetHierarchy('database_for_dict.dictionary_with_hierarchy', toUInt64(11)); +SELECT dictGetHierarchy('database_for_dict.dictionary_with_hierarchy', toUInt64(22)); +SELECT dictGetHierarchy('database_for_dict.dictionary_with_hierarchy', materialize(toUInt64(11))); +SELECT dictGetHierarchy('database_for_dict.dictionary_with_hierarchy', materialize(toUInt64(22))); + + +DROP DICTIONARY IF EXISTS database_for_dict.dictionary_with_hierarchy; + +CREATE DICTIONARY database_for_dict.dictionary_with_hierarchy +( + id UInt64, parent_id UInt64 HIERARCHICAL, value String +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(host 'localhost' port tcpPort() user 'default' db 'database_for_dict' table 'dict_source')) +LAYOUT(FLAT()) +LIFETIME(MIN 1 MAX 1); + +SELECT dictIsIn('database_for_dict.dictionary_with_hierarchy', toUInt64(2), toUInt64(1)); + +SELECT dictIsIn('database_for_dict.dictionary_with_hierarchy', toUInt64(22), toUInt64(11)); +SELECT dictIsIn('database_for_dict.dictionary_with_hierarchy', materialize(toUInt64(22)), toUInt64(11)); +SELECT dictIsIn('database_for_dict.dictionary_with_hierarchy', toUInt64(11), materialize(toUInt64(22))); +SELECT dictIsIn('database_for_dict.dictionary_with_hierarchy', materialize(toUInt64(22)), materialize(toUInt64(11))); + +SELECT dictIsIn('database_for_dict.dictionary_with_hierarchy', toUInt64(22), toUInt64(111)); +SELECT dictIsIn('database_for_dict.dictionary_with_hierarchy', materialize(toUInt64(22)), toUInt64(111)); +SELECT dictIsIn('database_for_dict.dictionary_with_hierarchy', toUInt64(11), materialize(toUInt64(222))); +SELECT dictIsIn('database_for_dict.dictionary_with_hierarchy', materialize(toUInt64(22)), materialize(toUInt64(111))); + +SELECT dictGetHierarchy('database_for_dict.dictionary_with_hierarchy', toUInt64(11)); +SELECT dictGetHierarchy('database_for_dict.dictionary_with_hierarchy', toUInt64(22)); +SELECT dictGetHierarchy('database_for_dict.dictionary_with_hierarchy', materialize(toUInt64(11))); +SELECT dictGetHierarchy('database_for_dict.dictionary_with_hierarchy', materialize(toUInt64(22))); + + +DROP DICTIONARY IF EXISTS database_for_dict.dictionary_with_hierarchy; + +CREATE DICTIONARY database_for_dict.dictionary_with_hierarchy +( + id UInt64, parent_id UInt64 HIERARCHICAL, value String +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(host 'localhost' port tcpPort() user 'default' db 'database_for_dict' table 'dict_source')) +LAYOUT(CACHE(SIZE_IN_CELLS 10)) +LIFETIME(MIN 1 MAX 1); + +SELECT dictIsIn('database_for_dict.dictionary_with_hierarchy', toUInt64(2), toUInt64(1)); + +SELECT dictIsIn('database_for_dict.dictionary_with_hierarchy', toUInt64(22), toUInt64(11)); +SELECT dictIsIn('database_for_dict.dictionary_with_hierarchy', materialize(toUInt64(22)), toUInt64(11)); +SELECT dictIsIn('database_for_dict.dictionary_with_hierarchy', toUInt64(11), materialize(toUInt64(22))); +SELECT dictIsIn('database_for_dict.dictionary_with_hierarchy', materialize(toUInt64(22)), materialize(toUInt64(11))); + +SELECT dictIsIn('database_for_dict.dictionary_with_hierarchy', toUInt64(22), toUInt64(111)); +SELECT dictIsIn('database_for_dict.dictionary_with_hierarchy', materialize(toUInt64(22)), toUInt64(111)); +SELECT dictIsIn('database_for_dict.dictionary_with_hierarchy', toUInt64(11), materialize(toUInt64(222))); +SELECT dictIsIn('database_for_dict.dictionary_with_hierarchy', materialize(toUInt64(22)), materialize(toUInt64(111))); + +SELECT dictGetHierarchy('database_for_dict.dictionary_with_hierarchy', toUInt64(11)); +SELECT dictGetHierarchy('database_for_dict.dictionary_with_hierarchy', toUInt64(22)); +SELECT dictGetHierarchy('database_for_dict.dictionary_with_hierarchy', materialize(toUInt64(11))); +SELECT dictGetHierarchy('database_for_dict.dictionary_with_hierarchy', materialize(toUInt64(22))); + + +DROP DICTIONARY database_for_dict.dictionary_with_hierarchy; +DROP TABLE database_for_dict.dict_source; +DROP DATABASE database_for_dict; diff --git a/parser/testdata/01251_string_comparison/ast.json b/parser/testdata/01251_string_comparison/ast.json new file mode 100644 index 000000000..073eb38b2 --- /dev/null +++ b/parser/testdata/01251_string_comparison/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function isConstant (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'a'" + }, + { + "explain": " Literal 'b'" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.00146075, + "rows_read": 10, + "bytes_read": 371 + } +} diff --git a/parser/testdata/01251_string_comparison/metadata.json b/parser/testdata/01251_string_comparison/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01251_string_comparison/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01251_string_comparison/query.sql b/parser/testdata/01251_string_comparison/query.sql new file mode 100644 index 000000000..b2923ba43 --- /dev/null +++ b/parser/testdata/01251_string_comparison/query.sql @@ -0,0 +1 @@ +SELECT isConstant('a' = 'b'); diff --git a/parser/testdata/01252_weird_time_zone/ast.json b/parser/testdata/01252_weird_time_zone/ast.json new file mode 100644 index 000000000..43f99bd4d --- /dev/null +++ b/parser/testdata/01252_weird_time_zone/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Literal 'Pacific\/Kiritimati'" + }, + { + "explain": " Function toDateTime (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '2020-01-02 03:04:05'" + }, + { + "explain": " Literal 'Pacific\/Kiritimati'" + }, + { + "explain": " Function toStartOfDay (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function toHour (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001257948, + "rows_read": 15, + "bytes_read": 586 + } +} diff --git a/parser/testdata/01252_weird_time_zone/metadata.json b/parser/testdata/01252_weird_time_zone/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01252_weird_time_zone/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01252_weird_time_zone/query.sql b/parser/testdata/01252_weird_time_zone/query.sql new file mode 100644 index 000000000..c4919ca4f --- /dev/null +++ b/parser/testdata/01252_weird_time_zone/query.sql @@ -0,0 +1,15 @@ +SELECT 'Pacific/Kiritimati', toDateTime('2020-01-02 03:04:05', 'Pacific/Kiritimati') AS x, toStartOfDay(x), toHour(x); +SELECT 'Africa/El_Aaiun', toDateTime('2020-01-02 03:04:05', 'Africa/El_Aaiun') AS x, toStartOfDay(x), toHour(x); +SELECT 'Asia/Pyongyang', toDateTime('2020-01-02 03:04:05', 'Asia/Pyongyang') AS x, toStartOfDay(x), toHour(x); +SELECT 'Pacific/Kwajalein', toDateTime('2020-01-02 03:04:05', 'Pacific/Kwajalein') AS x, toStartOfDay(x), toHour(x); +SELECT 'Pacific/Apia', toDateTime('2020-01-02 03:04:05', 'Pacific/Apia') AS x, toStartOfDay(x), toHour(x); +SELECT 'Pacific/Enderbury', toDateTime('2020-01-02 03:04:05', 'Pacific/Enderbury') AS x, toStartOfDay(x), toHour(x); +SELECT 'Pacific/Fakaofo', toDateTime('2020-01-02 03:04:05', 'Pacific/Fakaofo') AS x, toStartOfDay(x), toHour(x); + +SELECT 'Pacific/Kiritimati', rand() as r, toHour(toDateTime(r, 'Pacific/Kiritimati') AS t) AS h, t, toTypeName(t) FROM numbers(1000000) WHERE h < 0 OR h > 23 ORDER BY h LIMIT 1 BY h; +SELECT 'Africa/El_Aaiun', rand() as r, toHour(toDateTime(r, 'Africa/El_Aaiun') AS t) AS h, t, toTypeName(t) FROM numbers(1000000) WHERE h < 0 OR h > 23 ORDER BY h LIMIT 1 BY h; +SELECT 'Asia/Pyongyang', rand() as r, toHour(toDateTime(r, 'Asia/Pyongyang') AS t) AS h, t, toTypeName(t) FROM numbers(1000000) WHERE h < 0 OR h > 23 ORDER BY h LIMIT 1 BY h; +SELECT 'Pacific/Kwajalein', rand() as r, toHour(toDateTime(r, 'Pacific/Kwajalein') AS t) AS h, t, toTypeName(t) FROM numbers(1000000) WHERE h < 0 OR h > 23 ORDER BY h LIMIT 1 BY h; +SELECT 'Pacific/Apia', rand() as r, toHour(toDateTime(r, 'Pacific/Apia') AS t) AS h, t, toTypeName(t) FROM numbers(1000000) WHERE h < 0 OR h > 23 ORDER BY h LIMIT 1 BY h; +SELECT 'Pacific/Enderbury', rand() as r, toHour(toDateTime(r, 'Pacific/Enderbury') AS t) AS h, t, toTypeName(t) FROM numbers(1000000) WHERE h < 0 OR h > 23 ORDER BY h LIMIT 1 BY h; +SELECT 'Pacific/Fakaofo', rand() as r, toHour(toDateTime(r, 'Pacific/Fakaofo') AS t) AS h, t, toTypeName(t) FROM numbers(1000000) WHERE h < 0 OR h > 23 ORDER BY h LIMIT 1 BY h; diff --git a/parser/testdata/01253_subquery_in_aggregate_function_JustStranger/ast.json b/parser/testdata/01253_subquery_in_aggregate_function_JustStranger/ast.json new file mode 100644 index 000000000..c0f72aad7 --- /dev/null +++ b/parser/testdata/01253_subquery_in_aggregate_function_JustStranger/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_table (children 1)" + }, + { + "explain": " Identifier test_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001158106, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/01253_subquery_in_aggregate_function_JustStranger/metadata.json b/parser/testdata/01253_subquery_in_aggregate_function_JustStranger/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01253_subquery_in_aggregate_function_JustStranger/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01253_subquery_in_aggregate_function_JustStranger/query.sql b/parser/testdata/01253_subquery_in_aggregate_function_JustStranger/query.sql new file mode 100644 index 000000000..24d17f112 --- /dev/null +++ b/parser/testdata/01253_subquery_in_aggregate_function_JustStranger/query.sql @@ -0,0 +1,33 @@ +DROP TABLE IF EXISTS test_table; +DROP TABLE IF EXISTS test_table_sharded; + +set allow_deprecated_syntax_for_merge_tree=1; +create table + test_table_sharded( + date Date, + text String, + hash UInt64 + ) +engine=MergeTree(date, (hash, date), 8192); + +create table test_table as test_table_sharded +engine=Distributed(test_cluster_two_shards, currentDatabase(), test_table_sharded, hash); + +SET distributed_product_mode = 'local'; +SET distributed_foreground_insert = 1; + +INSERT INTO test_table VALUES ('2020-04-20', 'Hello', 123); + +SELECT + text, + uniqExactIf(hash, hash IN ( + SELECT DISTINCT + hash + FROM test_table AS t1 + )) as counter +FROM test_table AS t2 +GROUP BY text +ORDER BY counter, text; + +DROP TABLE test_table; +DROP TABLE test_table_sharded; diff --git a/parser/testdata/01254_array_of_unnamed_tuples/ast.json b/parser/testdata/01254_array_of_unnamed_tuples/ast.json new file mode 100644 index 000000000..f802d556d --- /dev/null +++ b/parser/testdata/01254_array_of_unnamed_tuples/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery mass_table_457 (children 1)" + }, + { + "explain": " Identifier mass_table_457" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000904143, + "rows_read": 2, + "bytes_read": 80 + } +} diff --git a/parser/testdata/01254_array_of_unnamed_tuples/metadata.json b/parser/testdata/01254_array_of_unnamed_tuples/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01254_array_of_unnamed_tuples/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01254_array_of_unnamed_tuples/query.sql b/parser/testdata/01254_array_of_unnamed_tuples/query.sql new file mode 100644 index 000000000..3660d6662 --- /dev/null +++ b/parser/testdata/01254_array_of_unnamed_tuples/query.sql @@ -0,0 +1,5 @@ +DROP TABLE IF EXISTS mass_table_457; +CREATE TABLE mass_table_457 (key Array(Tuple(Float64, Float64)), name String, value UInt64) ENGINE = Memory; +INSERT INTO mass_table_457 SELECT * FROM generateRandom('`key` Array(Tuple(Float64, Float64)),`name` String,`value` UInt64', 1, 10, 2) LIMIT 10; +SELECT * FROM mass_table_457; +DROP TABLE mass_table_457; diff --git a/parser/testdata/01254_dict_create_without_db/ast.json b/parser/testdata/01254_dict_create_without_db/ast.json new file mode 100644 index 000000000..b68fc13c4 --- /dev/null +++ b/parser/testdata/01254_dict_create_without_db/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery dict_data (children 3)" + }, + { + "explain": " Identifier dict_data" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration key (children 1)" + }, + { + "explain": " DataType UInt64" + }, + { + "explain": " ColumnDeclaration val (children 1)" + }, + { + "explain": " DataType UInt64" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function Memory (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001156797, + "rows_read": 11, + "bytes_read": 395 + } +} diff --git a/parser/testdata/01254_dict_create_without_db/metadata.json b/parser/testdata/01254_dict_create_without_db/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01254_dict_create_without_db/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01254_dict_create_without_db/query.sql b/parser/testdata/01254_dict_create_without_db/query.sql new file mode 100644 index 000000000..2d4da5af9 --- /dev/null +++ b/parser/testdata/01254_dict_create_without_db/query.sql @@ -0,0 +1,16 @@ +CREATE TABLE dict_data (key UInt64, val UInt64) Engine=Memory(); +CREATE DICTIONARY dict +( + key UInt64 DEFAULT 0, + val UInt64 DEFAULT 10 +) +PRIMARY KEY key +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'dict_data' PASSWORD '' DB currentDatabase())) +LIFETIME(MIN 0 MAX 0) +LAYOUT(FLAT()); + +SELECT query_count, status FROM system.dictionaries WHERE database = currentDatabase() AND name = 'dict'; +SYSTEM RELOAD DICTIONARY dict; +SELECT query_count, status FROM system.dictionaries WHERE database = currentDatabase() AND name = 'dict'; +SELECT dictGetUInt64('dict', 'val', toUInt64(0)); +SELECT query_count, status FROM system.dictionaries WHERE database = currentDatabase() AND name = 'dict'; diff --git a/parser/testdata/01254_dict_load_after_detach_attach/ast.json b/parser/testdata/01254_dict_load_after_detach_attach/ast.json new file mode 100644 index 000000000..ae83491c5 --- /dev/null +++ b/parser/testdata/01254_dict_load_after_detach_attach/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery dict_data (children 3)" + }, + { + "explain": " Identifier dict_data" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration key (children 1)" + }, + { + "explain": " DataType UInt64" + }, + { + "explain": " ColumnDeclaration val (children 1)" + }, + { + "explain": " DataType UInt64" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function Memory (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001114798, + "rows_read": 11, + "bytes_read": 395 + } +} diff --git a/parser/testdata/01254_dict_load_after_detach_attach/metadata.json b/parser/testdata/01254_dict_load_after_detach_attach/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01254_dict_load_after_detach_attach/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01254_dict_load_after_detach_attach/query.sql b/parser/testdata/01254_dict_load_after_detach_attach/query.sql new file mode 100644 index 000000000..ef9e940df --- /dev/null +++ b/parser/testdata/01254_dict_load_after_detach_attach/query.sql @@ -0,0 +1,19 @@ +CREATE TABLE dict_data (key UInt64, val UInt64) Engine=Memory(); +CREATE DICTIONARY dict +( + key UInt64 DEFAULT 0, + val UInt64 DEFAULT 10 +) +PRIMARY KEY key +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'dict_data' PASSWORD '' DB currentDatabase())) +LIFETIME(MIN 0 MAX 0) +LAYOUT(FLAT()); + +DETACH DATABASE {CLICKHOUSE_DATABASE:Identifier}; +ATTACH DATABASE {CLICKHOUSE_DATABASE:Identifier}; + +SELECT query_count, status FROM system.dictionaries WHERE database = currentDatabase() AND name = 'dict'; +SYSTEM RELOAD DICTIONARY dict; +SELECT query_count, status FROM system.dictionaries WHERE database = currentDatabase() AND name = 'dict'; +SELECT dictGetUInt64('dict', 'val', toUInt64(0)); +SELECT query_count, status FROM system.dictionaries WHERE database = currentDatabase() AND name = 'dict'; diff --git a/parser/testdata/01255_geo_types_livace/ast.json b/parser/testdata/01255_geo_types_livace/ast.json new file mode 100644 index 000000000..f4c409bb7 --- /dev/null +++ b/parser/testdata/01255_geo_types_livace/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tutorial (children 1)" + }, + { + "explain": " Identifier tutorial" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00103143, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/01255_geo_types_livace/metadata.json b/parser/testdata/01255_geo_types_livace/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01255_geo_types_livace/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01255_geo_types_livace/query.sql b/parser/testdata/01255_geo_types_livace/query.sql new file mode 100644 index 000000000..0838f0fa2 --- /dev/null +++ b/parser/testdata/01255_geo_types_livace/query.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS tutorial; +create table tutorial ( inner_poly Array(Tuple(Int32, Int32)), outer_poly Array(Tuple(Int32, Int32)) ) engine = Log(); + +SELECT * FROM tutorial; + +INSERT INTO tutorial VALUES ([(123, 456), (789, 234)], [(567, 890)]), ([], [(11, 22), (33, 44), (55, 66)]); +SELECT * FROM tutorial; + +DROP TABLE tutorial; diff --git a/parser/testdata/01256_misspell_layout_name_podshumok/ast.json b/parser/testdata/01256_misspell_layout_name_podshumok/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01256_misspell_layout_name_podshumok/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01256_misspell_layout_name_podshumok/metadata.json b/parser/testdata/01256_misspell_layout_name_podshumok/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01256_misspell_layout_name_podshumok/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01256_misspell_layout_name_podshumok/query.sql b/parser/testdata/01256_misspell_layout_name_podshumok/query.sql new file mode 100644 index 000000000..28945e3b1 --- /dev/null +++ b/parser/testdata/01256_misspell_layout_name_podshumok/query.sql @@ -0,0 +1,9 @@ +CREATE DICTIONARY testip +( + `network` String, + `test_field` String +) +PRIMARY KEY network +SOURCE(FILE(PATH '/tmp/test.csv' FORMAT CSVWithNames)) +LIFETIME(MIN 0 MAX 300) +LAYOUT(IPTRIE()); -- { serverError UNKNOWN_ELEMENT_IN_CONFIG } diff --git a/parser/testdata/01256_negative_generate_random/ast.json b/parser/testdata/01256_negative_generate_random/ast.json new file mode 100644 index 000000000..9c298af79 --- /dev/null +++ b/parser/testdata/01256_negative_generate_random/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function generateRandom (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Literal 'i8'" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001107181, + "rows_read": 14, + "bytes_read": 522 + } +} diff --git a/parser/testdata/01256_negative_generate_random/metadata.json b/parser/testdata/01256_negative_generate_random/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01256_negative_generate_random/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01256_negative_generate_random/query.sql b/parser/testdata/01256_negative_generate_random/query.sql new file mode 100644 index 000000000..cbfae490a --- /dev/null +++ b/parser/testdata/01256_negative_generate_random/query.sql @@ -0,0 +1,4 @@ +SELECT * FROM generateRandom('i8', 1, 10, 10); -- { serverError SYNTAX_ERROR } +SELECT * FROM generateRandom; -- { serverError UNKNOWN_TABLE } +SELECT * FROM generateRandom('i8 UInt8', 1, 10, 10, 10, 10); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT * FROM generateRandom('', 1, 10, 10); -- { serverError SYNTAX_ERROR } diff --git a/parser/testdata/01257_dictionary_mismatch_types/ast.json b/parser/testdata/01257_dictionary_mismatch_types/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01257_dictionary_mismatch_types/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01257_dictionary_mismatch_types/metadata.json b/parser/testdata/01257_dictionary_mismatch_types/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01257_dictionary_mismatch_types/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01257_dictionary_mismatch_types/query.sql b/parser/testdata/01257_dictionary_mismatch_types/query.sql new file mode 100644 index 000000000..91849c10c --- /dev/null +++ b/parser/testdata/01257_dictionary_mismatch_types/query.sql @@ -0,0 +1,112 @@ +-- Tags: no-parallel + +DROP DATABASE IF EXISTS test_dict_db; +CREATE DATABASE test_dict_db; + +set check_table_dependencies=0; + +CREATE TABLE test_dict_db.table1 +( + `col1` String, + `col2` Int16, + `col3` String, + `col4` Int32, + `col5` String, + `col6` Nullable(Float64), + `col7` Nullable(Float64), + `col8` Nullable(DateTime('UTC')), + `col9` Nullable(String), + `col10` Nullable(String), + `col11` Nullable(String), + `col12` Nullable(String), + `col13` Nullable(Int32), + `col14` Nullable(DateTime('UTC')), + `col15` Nullable(DateTime('UTC')), + `col16` Nullable(DateTime('UTC')), + `col17` Nullable(DateTime('UTC')), + `col18` Nullable(DateTime('UTC')), + `col19` Nullable(DateTime('UTC')), + `col20` Nullable(String) +) +ENGINE = MergeTree +ORDER BY (col1, col2, col3, col4, col5); + +INSERT INTO test_dict_db.table1 VALUES ('id1',1,'20200127-1',701,'20200127-1-01',0,300,NULL,'N1','Hi','N40',NULL,1,'2020-02-03 10:37:59',NULL,'2020-02-04 11:35:14','2020-02-08 05:32:04',NULL,NULL,'12345'),('id1',1,'20200127-1',701,'20200127-1-01',0,300,NULL,'N1','Hi','N40',NULL,1,'2020-02-03 10:37:59',NULL,'2020-02-04 11:35:14','2020-02-08 05:32:04',NULL,NULL,'12345'),('id1',1,'20200127-1',702,'20200127-1-02',0,300,NULL,'N1','Hi','N40',NULL,1,'2020-02-03 10:37:59',NULL,'2020-02-04 11:35:14','2020-02-08 05:32:04',NULL,NULL,'12345'),('id1',1,'20200127-1',703,'20200127-1-03',0,300,NULL,'N1','Hi','N40',NULL,1,'2020-02-03 10:37:59',NULL,'2020-02-04 11:35:14','2020-02-08 05:32:04',NULL,NULL,'12345'),('id1',1,'20200127-1',704,'20200127-1-04',0,300,NULL,'N1','Hi','N40',NULL,1,'2020-02-03 10:37:59',NULL,'2020-02-04 11:35:14','2020-02-08 05:32:04',NULL,NULL,'12345'),('id1',1,'20200127-1',705,'20200127-1-05',0,300,NULL,'N1','Hi','N40',NULL,1,'2020-02-03 10:37:59',NULL,'2020-02-04 11:35:14','2020-02-08 05:32:04',NULL,NULL,'12345'),('id1',1,'20200202-1',711,'20200202-1-01',0,200,NULL,'C2','Hello','C40',NULL,1,'2020-02-03 11:07:57',NULL,NULL,NULL,'2020-02-03 11:09:23',NULL,NULL),('id1',1,'20200202-2',712,'20200202-2-01',0,0,NULL,'C3','bye','R40',NULL,1,'2020-02-03 14:13:10',NULL,'2020-02-03 16:11:31','2020-02-07 05:32:05','2020-02-07 11:18:15','2020-02-07 11:18:16','123455'),('id1',1,'20200202-2',713,'20200202-2-02',0,0,NULL,'C3','bye','R40',NULL,1,'2020-02-03 14:13:10',NULL,'2020-02-03 16:11:31','2020-02-07 05:32:05','2020-02-07 11:18:15','2020-02-07 11:18:16','123455'),('id1',2,'20200128-1',701,'20200128-1-01',0,0,NULL,'N1','Hi','N40',NULL,2,'2020-02-03 17:07:27',NULL,'2020-02-05 13:33:55','2020-02-13 05:32:04',NULL,NULL,'A123755'),('id1',2,'20200131-1',701,'20200131-1-01',0,0,NULL,'N1','Hi','N40',NULL,1,'2020-02-03 13:07:17',NULL,'2020-02-04 13:47:55','2020-02-12 05:32:04',NULL,NULL,'A123485'),('id1',2,'20200201-1',701,'20200201-1-01',0,0,NULL,'N1','Hi','N40',NULL,1,'2020-02-03 21:07:37',NULL,'2020-02-05 13:40:51','2020-02-13 05:32:04',NULL,NULL,'A123455'),('id1',2,'20200202-1',711,'20200202-1-01',0,0,NULL,'N1','Hi','N40',NULL,1,'2020-02-03 02:06:54',NULL,'2020-02-04 13:36:45','2020-02-12 05:32:04',NULL,NULL,'A123459'),('id1',2,'20200202-1',712,'20200202-1-02',0,0,NULL,'N1','Hi','N40',NULL,1,'2020-02-03 02:06:54',NULL,'2020-02-04 13:36:45','2020-02-12 05:32:04',NULL,NULL,'A123429'),('id2',1,'20200131-1',401,'20200131-1-01',0,210,'2020-02-16 05:22:04','N1','Hi','N40',NULL,1,'2020-02-03 10:11:00',NULL,'2020-02-05 17:30:05','2020-02-09 05:32:05',NULL,NULL,'454545'),('id2',1,'20200131-1',402,'20200131-1-02',0,210,'2020-02-16 05:22:04','N1','Hi','N40',NULL,1,'2020-02-03 10:11:00',NULL,'2020-02-05 17:30:05','2020-02-09 05:32:05',NULL,NULL,'454545'),('id2',1,'20200131-1',403,'20200131-1-03',0,270,'2020-02-16 05:22:04','N1','Hi','N40',NULL,1,'2020-02-03 10:11:00',NULL,'2020-02-05 17:30:05','2020-02-09 05:32:05',NULL,NULL,'454545'),('id2',1,'20200131-1',404,'20200131-1-04',0,270,'2020-02-16 05:22:04','N1','Hi','N40',NULL,1,'2020-02-03 10:11:00',NULL,'2020-02-05 17:30:05','2020-02-09 05:32:05',NULL,NULL,'454545'),('id2',1,'20200131-1',405,'20200131-1-05',0,380,NULL,'N1','Hi','N40',NULL,1,'2020-02-03 10:11:00',NULL,'2020-02-11 16:52:58','2020-02-15 05:32:04',NULL,NULL,'6892144935823'),('id2',1,'20200131-1',406,'20200131-1-06',0,380,NULL,'N1','Hi','N40',NULL,1,'2020-02-03 10:11:00',NULL,'2020-02-11 16:52:58','2020-02-15 05:32:04',NULL,NULL,'6892144935823'),('id2',1,'20200131-1',407,'20200131-1-07',0,280,NULL,'C2','Hello','C40',NULL,1,'2020-02-03 10:11:00',NULL,NULL,NULL,'2020-02-04 11:01:21',NULL,NULL),('id2',1,'20200131-1',408,'20200131-1-08',0,0,NULL,'N1','Hi','N40',NULL,1,'2020-02-03 10:11:00',NULL,'2020-02-05 17:30:05','2020-02-09 05:32:04',NULL,NULL,'454545'),('id2',1,'20200201-1',401,'20200201-1-01',0,190,'2020-02-16 05:22:05','N1','Hi','N40',NULL,1,'2020-02-03 12:06:17',NULL,'2020-02-05 17:30:30','2020-02-09 05:32:03',NULL,NULL,'90071'),('id2',1,'20200201-1',402,'20200201-1-01',0,160,'2020-02-14 05:22:13','N1','Hi','N40',NULL,1,'2020-02-03 06:21:05',NULL,'2020-02-03 17:42:35','2020-02-07 05:32:04',NULL,NULL,'96575'),('id2',1,'20200201-1',403,'20200201-1-02',0,230,'2020-02-14 05:22:13','N1','Hi','N40',NULL,1,'2020-02-03 06:21:05',NULL,'2020-02-03 17:42:35','2020-02-07 05:32:04',NULL,NULL,'96575'),('id2',1,'20200202-1',404,'20200202-1-01',0,130,'2020-02-14 05:22:14','N1','Hi','N40',NULL,1,'2020-02-03 14:00:39',NULL,'2020-02-03 17:42:45','2020-02-07 05:32:04',NULL,NULL,'96850'),('id3',1,'20200130-1',391,'20200130-1-01',0,300,NULL,'N1','Hi','N40',NULL,1,'2020-02-03 10:26:46',NULL,'2020-02-05 15:33:01','2020-02-08 05:32:05',NULL,NULL,'27243'),('id3',1,'20200130-1',392,'20200130-1-02',0,300,NULL,'N1','Hi','N40',NULL,1,'2020-02-03 10:26:46',NULL,'2020-02-10 16:16:11','2020-02-13 05:32:06',NULL,NULL,'92512'),('id3',1,'20200131-1',393,'20200131-1-01',0,0,NULL,'C2','Hello','C40',NULL,1,'2020-02-03 10:24:38',NULL,NULL,NULL,'2020-02-05 14:04:40',NULL,NULL),('id3',1,'20200131-2',391,'20200131-1-01',0,0,NULL,'N1','Hi','N40',NULL,1,'2020-02-03 10:22:08',NULL,'2020-02-06 14:27:06','2020-02-09 05:32:04',NULL,NULL,'46433'),('id3',1,'20200131-2',392,'20200131-1-02',0,0,NULL,'N1','Hi','N40',NULL,1,'2020-02-03 10:22:08',NULL,'2020-02-06 14:27:06','2020-02-09 05:32:02',NULL,NULL,'46433'); + +CREATE DICTIONARY test_dict_db.table1_dict +( + col1 String, + col2 Int16, + col3 String, + col4 Int32, + col5 String, + col6 Float64, + col7 Float64, + col8 DateTime('UTC'), + col9 String, + col10 String, + col11 String, + col12 String, + col13 Int32, + col14 DateTime('UTC'), + col15 DateTime('UTC'), + col16 DateTime('UTC'), + col17 DateTime('UTC'), + col18 DateTime('UTC'), + col19 DateTime('UTC'), + col20 String +) +PRIMARY KEY col1,col2,col3,col4,col5 +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() DB test_dict_db TABLE table1 USER 'default')) +LIFETIME(MIN 0 MAX 0) LAYOUT(COMPLEX_KEY_HASHED()); + +SELECT + dictGet('test_dict_db.table1_dict', 'col6', (col1, col2, col3, col4, col5)), + dictGet('test_dict_db.table1_dict', 'col7', (col1, col2, col3, col4, col5)), + dictGet('test_dict_db.table1_dict', 'col8', (col1, col2, col3, col4, col5)), + dictGet('test_dict_db.table1_dict', 'col9', (col1, col2, col3, col4, col5)) +FROM test_dict_db.table1 +WHERE dictHas('test_dict_db.table1_dict', (col1, col2, col3, col4, col5)); -- { serverError CANNOT_INSERT_NULL_IN_ORDINARY_COLUMN } + +DROP TABLE test_dict_db.table1; +CREATE TABLE test_dict_db.table1 +( + `col1` String, + `col2` Int16, + `col3` String, + `col4` Int32, + `col5` String, + `col6` Float64, + `col7` Float64, + `col8` DateTime('UTC'), + `col9` String, + `col10` String, + `col11` String, + `col12` String, + `col13` Int32, + `col14` DateTime('UTC'), + `col15` DateTime('UTC'), + `col16` DateTime('UTC'), + `col17` DateTime('UTC'), + `col18` DateTime('UTC'), + `col19` DateTime('UTC'), + `col20` String +) +ENGINE = MergeTree +ORDER BY (col1, col2, col3, col4, col5); + +SET input_format_null_as_default = 1; +INSERT INTO test_dict_db.table1 VALUES ('id1',1,'20200127-1',701,'20200127-1-01',0,300,NULL,'N1','Hi','N40',NULL,1,'2020-02-03 10:37:59',NULL,'2020-02-04 11:35:14','2020-02-08 05:32:04',NULL,NULL,'12345'),('id1',1,'20200127-1',701,'20200127-1-01',0,300,NULL,'N1','Hi','N40',NULL,1,'2020-02-03 10:37:59',NULL,'2020-02-04 11:35:14','2020-02-08 05:32:04',NULL,NULL,'12345'),('id1',1,'20200127-1',702,'20200127-1-02',0,300,NULL,'N1','Hi','N40',NULL,1,'2020-02-03 10:37:59',NULL,'2020-02-04 11:35:14','2020-02-08 05:32:04',NULL,NULL,'12345'),('id1',1,'20200127-1',703,'20200127-1-03',0,300,NULL,'N1','Hi','N40',NULL,1,'2020-02-03 10:37:59',NULL,'2020-02-04 11:35:14','2020-02-08 05:32:04',NULL,NULL,'12345'),('id1',1,'20200127-1',704,'20200127-1-04',0,300,NULL,'N1','Hi','N40',NULL,1,'2020-02-03 10:37:59',NULL,'2020-02-04 11:35:14','2020-02-08 05:32:04',NULL,NULL,'12345'),('id1',1,'20200127-1',705,'20200127-1-05',0,300,NULL,'N1','Hi','N40',NULL,1,'2020-02-03 10:37:59',NULL,'2020-02-04 11:35:14','2020-02-08 05:32:04',NULL,NULL,'12345'),('id1',1,'20200202-1',711,'20200202-1-01',0,200,NULL,'C2','Hello','C40',NULL,1,'2020-02-03 11:07:57',NULL,NULL,NULL,'2020-02-03 11:09:23',NULL,NULL),('id1',1,'20200202-2',712,'20200202-2-01',0,0,NULL,'C3','bye','R40',NULL,1,'2020-02-03 14:13:10',NULL,'2020-02-03 16:11:31','2020-02-07 05:32:05','2020-02-07 11:18:15','2020-02-07 11:18:16','123455'),('id1',1,'20200202-2',713,'20200202-2-02',0,0,NULL,'C3','bye','R40',NULL,1,'2020-02-03 14:13:10',NULL,'2020-02-03 16:11:31','2020-02-07 05:32:05','2020-02-07 11:18:15','2020-02-07 11:18:16','123455'),('id1',2,'20200128-1',701,'20200128-1-01',0,0,NULL,'N1','Hi','N40',NULL,2,'2020-02-03 17:07:27',NULL,'2020-02-05 13:33:55','2020-02-13 05:32:04',NULL,NULL,'A123755'),('id1',2,'20200131-1',701,'20200131-1-01',0,0,NULL,'N1','Hi','N40',NULL,1,'2020-02-03 13:07:17',NULL,'2020-02-04 13:47:55','2020-02-12 05:32:04',NULL,NULL,'A123485'),('id1',2,'20200201-1',701,'20200201-1-01',0,0,NULL,'N1','Hi','N40',NULL,1,'2020-02-03 21:07:37',NULL,'2020-02-05 13:40:51','2020-02-13 05:32:04',NULL,NULL,'A123455'),('id1',2,'20200202-1',711,'20200202-1-01',0,0,NULL,'N1','Hi','N40',NULL,1,'2020-02-03 02:06:54',NULL,'2020-02-04 13:36:45','2020-02-12 05:32:04',NULL,NULL,'A123459'),('id1',2,'20200202-1',712,'20200202-1-02',0,0,NULL,'N1','Hi','N40',NULL,1,'2020-02-03 02:06:54',NULL,'2020-02-04 13:36:45','2020-02-12 05:32:04',NULL,NULL,'A123429'),('id2',1,'20200131-1',401,'20200131-1-01',0,210,'2020-02-16 05:22:04','N1','Hi','N40',NULL,1,'2020-02-03 10:11:00',NULL,'2020-02-05 17:30:05','2020-02-09 05:32:05',NULL,NULL,'454545'),('id2',1,'20200131-1',402,'20200131-1-02',0,210,'2020-02-16 05:22:04','N1','Hi','N40',NULL,1,'2020-02-03 10:11:00',NULL,'2020-02-05 17:30:05','2020-02-09 05:32:05',NULL,NULL,'454545'),('id2',1,'20200131-1',403,'20200131-1-03',0,270,'2020-02-16 05:22:04','N1','Hi','N40',NULL,1,'2020-02-03 10:11:00',NULL,'2020-02-05 17:30:05','2020-02-09 05:32:05',NULL,NULL,'454545'),('id2',1,'20200131-1',404,'20200131-1-04',0,270,'2020-02-16 05:22:04','N1','Hi','N40',NULL,1,'2020-02-03 10:11:00',NULL,'2020-02-05 17:30:05','2020-02-09 05:32:05',NULL,NULL,'454545'),('id2',1,'20200131-1',405,'20200131-1-05',0,380,NULL,'N1','Hi','N40',NULL,1,'2020-02-03 10:11:00',NULL,'2020-02-11 16:52:58','2020-02-15 05:32:04',NULL,NULL,'6892144935823'),('id2',1,'20200131-1',406,'20200131-1-06',0,380,NULL,'N1','Hi','N40',NULL,1,'2020-02-03 10:11:00',NULL,'2020-02-11 16:52:58','2020-02-15 05:32:04',NULL,NULL,'6892144935823'),('id2',1,'20200131-1',407,'20200131-1-07',0,280,NULL,'C2','Hello','C40',NULL,1,'2020-02-03 10:11:00',NULL,NULL,NULL,'2020-02-04 11:01:21',NULL,NULL),('id2',1,'20200131-1',408,'20200131-1-08',0,0,NULL,'N1','Hi','N40',NULL,1,'2020-02-03 10:11:00',NULL,'2020-02-05 17:30:05','2020-02-09 05:32:04',NULL,NULL,'454545'),('id2',1,'20200201-1',401,'20200201-1-01',0,190,'2020-02-16 05:22:05','N1','Hi','N40',NULL,1,'2020-02-03 12:06:17',NULL,'2020-02-05 17:30:30','2020-02-09 05:32:03',NULL,NULL,'90071'),('id2',1,'20200201-1',402,'20200201-1-01',0,160,'2020-02-14 05:22:13','N1','Hi','N40',NULL,1,'2020-02-03 06:21:05',NULL,'2020-02-03 17:42:35','2020-02-07 05:32:04',NULL,NULL,'96575'),('id2',1,'20200201-1',403,'20200201-1-02',0,230,'2020-02-14 05:22:13','N1','Hi','N40',NULL,1,'2020-02-03 06:21:05',NULL,'2020-02-03 17:42:35','2020-02-07 05:32:04',NULL,NULL,'96575'),('id2',1,'20200202-1',404,'20200202-1-01',0,130,'2020-02-14 05:22:14','N1','Hi','N40',NULL,1,'2020-02-03 14:00:39',NULL,'2020-02-03 17:42:45','2020-02-07 05:32:04',NULL,NULL,'96850'),('id3',1,'20200130-1',391,'20200130-1-01',0,300,NULL,'N1','Hi','N40',NULL,1,'2020-02-03 10:26:46',NULL,'2020-02-05 15:33:01','2020-02-08 05:32:05',NULL,NULL,'27243'),('id3',1,'20200130-1',392,'20200130-1-02',0,300,NULL,'N1','Hi','N40',NULL,1,'2020-02-03 10:26:46',NULL,'2020-02-10 16:16:11','2020-02-13 05:32:06',NULL,NULL,'92512'),('id3',1,'20200131-1',393,'20200131-1-01',0,0,NULL,'C2','Hello','C40',NULL,1,'2020-02-03 10:24:38',NULL,NULL,NULL,'2020-02-05 14:04:40',NULL,NULL),('id3',1,'20200131-2',391,'20200131-1-01',0,0,NULL,'N1','Hi','N40',NULL,1,'2020-02-03 10:22:08',NULL,'2020-02-06 14:27:06','2020-02-09 05:32:04',NULL,NULL,'46433'),('id3',1,'20200131-2',392,'20200131-1-02',0,0,NULL,'N1','Hi','N40',NULL,1,'2020-02-03 10:22:08',NULL,'2020-02-06 14:27:06','2020-02-09 05:32:02',NULL,NULL,'46433'); + +SYSTEM RELOAD DICTIONARY test_dict_db.table1_dict; + +SELECT + dictGet('test_dict_db.table1_dict', 'col6', (col1, col2, col3, col4, col5)), + dictGet('test_dict_db.table1_dict', 'col7', (col1, col2, col3, col4, col5)), + dictGet('test_dict_db.table1_dict', 'col8', (col1, col2, col3, col4, col5)), + dictGet('test_dict_db.table1_dict', 'col9', (col1, col2, col3, col4, col5)) +FROM test_dict_db.table1 +WHERE dictHas('test_dict_db.table1_dict', (col1, col2, col3, col4, col5)) +ORDER BY col1, col2, col3, col4, col5, col14, col17; + +DROP DATABASE IF EXISTS test_dict_db; diff --git a/parser/testdata/01258_wrong_cast_filimonov/ast.json b/parser/testdata/01258_wrong_cast_filimonov/ast.json new file mode 100644 index 000000000..0d845ff0c --- /dev/null +++ b/parser/testdata/01258_wrong_cast_filimonov/ast.json @@ -0,0 +1,133 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery x (children 3)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration id (children 1)" + }, + { + "explain": " DataType UInt64" + }, + { + "explain": " ColumnDeclaration t (children 2)" + }, + { + "explain": " DataType AggregateFunction (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Identifier argMax" + }, + { + "explain": " DataType Enum8 (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal ''" + }, + { + "explain": " Literal Int64_-1" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'Male'" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'Female'" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " DataType UInt64" + }, + { + "explain": " Function arrayReduce (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal 'argMaxState'" + }, + { + "explain": " Literal Array_['cast(-1, \\'Enum8(\\'\\' = -1, \\'Male\\' = 1, \\'Female\\' = 2)']" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toUInt64 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Identifier id" + } + ], + + "rows": 37, + + "statistics": + { + "elapsed": 0.001408918, + "rows_read": 37, + "bytes_read": 1461 + } +} diff --git a/parser/testdata/01258_wrong_cast_filimonov/metadata.json b/parser/testdata/01258_wrong_cast_filimonov/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01258_wrong_cast_filimonov/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01258_wrong_cast_filimonov/query.sql b/parser/testdata/01258_wrong_cast_filimonov/query.sql new file mode 100644 index 000000000..4817a12cd --- /dev/null +++ b/parser/testdata/01258_wrong_cast_filimonov/query.sql @@ -0,0 +1 @@ +create table x( id UInt64, t AggregateFunction(argMax, Enum8('' = -1, 'Male' = 1, 'Female' = 2), UInt64) DEFAULT arrayReduce('argMaxState', ['cast(-1, \'Enum8(\'\' = -1, \'Male\' = 1, \'Female\' = 2)'], [toUInt64(0)]) ) Engine=MergeTree ORDER BY id; -- { serverError CANNOT_CONVERT_TYPE } diff --git a/parser/testdata/01259_combinator_distinct/ast.json b/parser/testdata/01259_combinator_distinct/ast.json new file mode 100644 index 000000000..edea91a42 --- /dev/null +++ b/parser/testdata/01259_combinator_distinct/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function sumDistinct (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers_mt (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_100000" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001342799, + "rows_read": 13, + "bytes_read": 525 + } +} diff --git a/parser/testdata/01259_combinator_distinct/metadata.json b/parser/testdata/01259_combinator_distinct/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01259_combinator_distinct/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01259_combinator_distinct/query.sql b/parser/testdata/01259_combinator_distinct/query.sql new file mode 100644 index 000000000..543538be0 --- /dev/null +++ b/parser/testdata/01259_combinator_distinct/query.sql @@ -0,0 +1,14 @@ +SELECT sum(DISTINCT number) FROM numbers_mt(100000); +SELECT sum(DISTINCT number % 13) FROM numbers_mt(100000); +SELECT arraySort(groupArray(DISTINCT number % 13)) FROM numbers_mt(100000); +SELECT finalizeAggregation(countState(DISTINCT toString(number % 20))) FROM numbers_mt(100000); +SELECT round(corrStable(DISTINCT x, y), 5) FROM (SELECT number % 10 AS x, number % 5 AS y FROM numbers(1000)); +SELECT round(corrStable(x, y), 5) FROM (SELECT DISTINCT number % 10 AS x, number % 5 AS y FROM numbers(1000)); + +SELECT sum(DISTINCT y) FROM (SELECT number % 5 AS x, number % 15 AS y FROM numbers(1000)) GROUP BY x ORDER BY x; + +SELECT countIf(DISTINCT number % 10, number % 5 = 2) FROM numbers(10000); +EXPLAIN SYNTAX SELECT countIf(DISTINCT number % 10, number % 5 = 2) FROM numbers(10000); + +SELECT sumIf(DISTINCT number % 10, number % 5 = 2) FROM numbers(10000); +EXPLAIN SYNTAX SELECT sumIf(DISTINCT number % 10, number % 5 = 2) FROM numbers(10000); diff --git a/parser/testdata/01259_combinator_distinct_distributed/ast.json b/parser/testdata/01259_combinator_distinct_distributed/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01259_combinator_distinct_distributed/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01259_combinator_distinct_distributed/metadata.json b/parser/testdata/01259_combinator_distinct_distributed/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01259_combinator_distinct_distributed/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01259_combinator_distinct_distributed/query.sql b/parser/testdata/01259_combinator_distinct_distributed/query.sql new file mode 100644 index 000000000..59cfd85ed --- /dev/null +++ b/parser/testdata/01259_combinator_distinct_distributed/query.sql @@ -0,0 +1,15 @@ +-- Tags: distributed + +SET distributed_aggregation_memory_efficient = 1; + +SELECT sum(DISTINCT number % 13) FROM remote('127.0.0.{1,2}', numbers_mt(100000)); +SELECT arraySort(groupArray(DISTINCT number % 13)) FROM remote('127.0.0.{1,2}', numbers_mt(100000)); +SELECT finalizeAggregation(countState(DISTINCT toString(number % 20))) FROM remote('127.0.0.{1,2}', numbers_mt(100000)); +SELECT round(corrStable(DISTINCT x, y), 5) FROM (SELECT number % 10 AS x, number % 5 AS y FROM remote('127.0.0.{1,2}', numbers(1000))); + +SET distributed_aggregation_memory_efficient = 0; + +SELECT sum(DISTINCT number % 13) FROM remote('127.0.0.{1,2}', numbers_mt(100000)); +SELECT arraySort(groupArray(DISTINCT number % 13)) FROM remote('127.0.0.{1,2}', numbers_mt(100000)); +SELECT finalizeAggregation(countState(DISTINCT toString(number % 20))) FROM remote('127.0.0.{1,2}', numbers_mt(100000)); +SELECT round(corrStable(DISTINCT x, y), 5) FROM (SELECT number % 10 AS x, number % 5 AS y FROM remote('127.0.0.{1,2}', numbers(1000))); diff --git a/parser/testdata/01259_datetime64_ubsan/ast.json b/parser/testdata/01259_datetime64_ubsan/ast.json new file mode 100644 index 000000000..b9aecf6a2 --- /dev/null +++ b/parser/testdata/01259_datetime64_ubsan/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function now64 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.0011475, + "rows_read": 7, + "bytes_read": 259 + } +} diff --git a/parser/testdata/01259_datetime64_ubsan/metadata.json b/parser/testdata/01259_datetime64_ubsan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01259_datetime64_ubsan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01259_datetime64_ubsan/query.sql b/parser/testdata/01259_datetime64_ubsan/query.sql new file mode 100644 index 000000000..be8e5dd72 --- /dev/null +++ b/parser/testdata/01259_datetime64_ubsan/query.sql @@ -0,0 +1,2 @@ +select now64(10); -- { serverError ARGUMENT_OUT_OF_BOUND } +select length(toString(now64(9))); diff --git a/parser/testdata/01259_dictionary_custom_settings_ddl/ast.json b/parser/testdata/01259_dictionary_custom_settings_ddl/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01259_dictionary_custom_settings_ddl/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01259_dictionary_custom_settings_ddl/metadata.json b/parser/testdata/01259_dictionary_custom_settings_ddl/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01259_dictionary_custom_settings_ddl/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01259_dictionary_custom_settings_ddl/query.sql b/parser/testdata/01259_dictionary_custom_settings_ddl/query.sql new file mode 100644 index 000000000..be56806f8 --- /dev/null +++ b/parser/testdata/01259_dictionary_custom_settings_ddl/query.sql @@ -0,0 +1,39 @@ +-- Tags: no-fasttest + +CREATE TABLE table_for_dict +( + key_column UInt64, + second_column UInt64, + third_column String +) +ENGINE = MergeTree() +ORDER BY key_column; + +INSERT INTO table_for_dict VALUES (100500, 10000000, 'Hello world'); + +DROP DATABASE IF EXISTS ordinary_db; + +CREATE DATABASE ordinary_db; + +DROP DICTIONARY IF EXISTS ordinary_db.dict1; + +CREATE DICTIONARY ordinary_db.dict1 +( + key_column UInt64 DEFAULT 0, + second_column UInt64 DEFAULT 1, + third_column String DEFAULT 'qqq' +) +PRIMARY KEY key_column +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'table_for_dict' PASSWORD '' DB currentDatabase())) +LIFETIME(MIN 1 MAX 10) +LAYOUT(FLAT()) SETTINGS(max_result_bytes=1); + +SELECT 'INITIALIZING DICTIONARY'; + +SELECT dictGetUInt64('ordinary_db.dict1', 'second_column', toUInt64(100500)); -- { serverError TOO_MANY_ROWS_OR_BYTES } + +SELECT 'END'; + +DROP DATABASE IF EXISTS ordinary_db; + +DROP TABLE IF EXISTS table_for_dict; diff --git a/parser/testdata/01260_ubsan_decimal_parse/ast.json b/parser/testdata/01260_ubsan_decimal_parse/ast.json new file mode 100644 index 000000000..22850c349 --- /dev/null +++ b/parser/testdata/01260_ubsan_decimal_parse/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDecimal32OrZero (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Int64_-7174046" + }, + { + "explain": " Literal 'String'" + }, + { + "explain": " Literal UInt64_6" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.00113122, + "rows_read": 11, + "bytes_read": 422 + } +} diff --git a/parser/testdata/01260_ubsan_decimal_parse/metadata.json b/parser/testdata/01260_ubsan_decimal_parse/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01260_ubsan_decimal_parse/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01260_ubsan_decimal_parse/query.sql b/parser/testdata/01260_ubsan_decimal_parse/query.sql new file mode 100644 index 000000000..2c7cda512 --- /dev/null +++ b/parser/testdata/01260_ubsan_decimal_parse/query.sql @@ -0,0 +1 @@ +SELECT toDecimal32OrZero(CAST(-7174046, 'String'), 6); diff --git a/parser/testdata/01262_fractional_timezone_near_start_of_epoch/ast.json b/parser/testdata/01262_fractional_timezone_near_start_of_epoch/ast.json new file mode 100644 index 000000000..0206a1c16 --- /dev/null +++ b/parser/testdata/01262_fractional_timezone_near_start_of_epoch/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDateTime (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_10000" + }, + { + "explain": " Literal 'Asia\/Calcutta'" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001220828, + "rows_read": 8, + "bytes_read": 304 + } +} diff --git a/parser/testdata/01262_fractional_timezone_near_start_of_epoch/metadata.json b/parser/testdata/01262_fractional_timezone_near_start_of_epoch/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01262_fractional_timezone_near_start_of_epoch/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01262_fractional_timezone_near_start_of_epoch/query.sql b/parser/testdata/01262_fractional_timezone_near_start_of_epoch/query.sql new file mode 100644 index 000000000..62ecc000a --- /dev/null +++ b/parser/testdata/01262_fractional_timezone_near_start_of_epoch/query.sql @@ -0,0 +1,3 @@ +SELECT toDateTime(10000, 'Asia/Calcutta'); +SELECT toMinute(toDateTime(10000, 'Asia/Calcutta')); +SELECT toStartOfHour(toDateTime(10000, 'Asia/Calcutta')); diff --git a/parser/testdata/01262_low_cardinality_remove/ast.json b/parser/testdata/01262_low_cardinality_remove/ast.json new file mode 100644 index 000000000..b157a527c --- /dev/null +++ b/parser/testdata/01262_low_cardinality_remove/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery testView (children 1)" + }, + { + "explain": " Identifier testView" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001268584, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/01262_low_cardinality_remove/metadata.json b/parser/testdata/01262_low_cardinality_remove/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01262_low_cardinality_remove/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01262_low_cardinality_remove/query.sql b/parser/testdata/01262_low_cardinality_remove/query.sql new file mode 100644 index 000000000..e03b895ff --- /dev/null +++ b/parser/testdata/01262_low_cardinality_remove/query.sql @@ -0,0 +1,42 @@ +DROP TABLE IF EXISTS testView; +DROP TABLE IF EXISTS testTable; + +CREATE TABLE IF NOT EXISTS testTable ( + A LowCardinality(String), -- like voter + B Int64 +) ENGINE MergeTree() +ORDER BY (A); + +INSERT INTO testTable VALUES ('A', 1),('B',2),('C',3); + +CREATE VIEW testView AS +SELECT + A as ALow, -- like account + B +FROM + testTable; + +SELECT CAST(ALow, 'String') AS AStr +FROM testView +GROUP BY AStr ORDER BY AStr; + +DROP TABLE testTable; + +CREATE TABLE IF NOT EXISTS testTable ( + A String, -- like voter + B Int64 +) ENGINE MergeTree() +ORDER BY (A); + +SELECT CAST(ALow, 'String') AS AStr +FROM testView +GROUP BY AStr ORDER BY AStr; + +INSERT INTO testTable VALUES ('A', 1),('B',2),('C',3); + +SELECT CAST(ALow, 'String') AS AStr +FROM testView +GROUP BY AStr ORDER BY AStr; + +DROP TABLE IF EXISTS testView; +DROP TABLE IF EXISTS testTable; diff --git a/parser/testdata/01263_type_conversion_nvartolomei/ast.json b/parser/testdata/01263_type_conversion_nvartolomei/ast.json new file mode 100644 index 000000000..4ad0951d3 --- /dev/null +++ b/parser/testdata/01263_type_conversion_nvartolomei/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery m (children 1)" + }, + { + "explain": " Identifier m" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001317891, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/01263_type_conversion_nvartolomei/metadata.json b/parser/testdata/01263_type_conversion_nvartolomei/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01263_type_conversion_nvartolomei/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01263_type_conversion_nvartolomei/query.sql b/parser/testdata/01263_type_conversion_nvartolomei/query.sql new file mode 100644 index 000000000..10bf8be79 --- /dev/null +++ b/parser/testdata/01263_type_conversion_nvartolomei/query.sql @@ -0,0 +1,50 @@ +DROP TABLE IF EXISTS m; +DROP TABLE IF EXISTS d; + +CREATE TABLE m +( + `v` UInt8 +) +ENGINE = MergeTree() +PARTITION BY tuple() +ORDER BY v; + +CREATE TABLE d +( + `v` UInt16 +) +ENGINE = Distributed('test_cluster_two_shards', currentDatabase(), m, rand()); + +INSERT INTO m VALUES (123); +SELECT * FROM d; + + +DROP TABLE m; +DROP TABLE d; + + +CREATE TABLE m +( + `v` Enum8('a' = 1, 'b' = 2) +) +ENGINE = MergeTree() +PARTITION BY tuple() +ORDER BY v; + +CREATE TABLE d +( + `v` Enum8('a' = 1) +) +ENGINE = Distributed('test_cluster_two_shards', currentDatabase(), m, rand()); + +INSERT INTO m VALUES ('a'); +SELECT * FROM d; + +SELECT '---'; + +INSERT INTO m VALUES ('b'); +SELECT toString(v) FROM (SELECT v FROM d ORDER BY v) FORMAT Null; -- { serverError UNKNOWN_ELEMENT_OF_ENUM} + + +DROP TABLE m; +DROP TABLE d; diff --git a/parser/testdata/01264_nested_baloo_bear/ast.json b/parser/testdata/01264_nested_baloo_bear/ast.json new file mode 100644 index 000000000..1247862b6 --- /dev/null +++ b/parser/testdata/01264_nested_baloo_bear/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery LOG_T (children 1)" + }, + { + "explain": " Identifier LOG_T" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00147534, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/01264_nested_baloo_bear/metadata.json b/parser/testdata/01264_nested_baloo_bear/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01264_nested_baloo_bear/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01264_nested_baloo_bear/query.sql b/parser/testdata/01264_nested_baloo_bear/query.sql new file mode 100644 index 000000000..ae9b02866 --- /dev/null +++ b/parser/testdata/01264_nested_baloo_bear/query.sql @@ -0,0 +1,39 @@ +DROP TABLE IF EXISTS LOG_T; + +CREATE TABLE LOG_T +( + `fingerprint` UInt64, + `fields` Nested( + name LowCardinality(String), + value String) +) +ENGINE = MergeTree +ORDER BY fingerprint; + +SELECT + fields.name, + fields.value +FROM +( + SELECT + fields.name, + fields.value + FROM LOG_T +) +WHERE has(['node'], fields.value[indexOf(fields.name, 'ProcessName')]); + +INSERT INTO LOG_T VALUES (123, ['Hello', 'ProcessName'], ['World', 'node']); + +SELECT + fields.name, + fields.value +FROM +( + SELECT + fields.name, + fields.value + FROM LOG_T +) +WHERE has(['node'], fields.value[indexOf(fields.name, 'ProcessName')]); + +DROP TABLE LOG_T; diff --git a/parser/testdata/01265_datetime_string_comparison_felix_mueller/ast.json b/parser/testdata/01265_datetime_string_comparison_felix_mueller/ast.json new file mode 100644 index 000000000..c02e6b310 --- /dev/null +++ b/parser/testdata/01265_datetime_string_comparison_felix_mueller/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tztest (children 1)" + }, + { + "explain": " Identifier tztest" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001378507, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/01265_datetime_string_comparison_felix_mueller/metadata.json b/parser/testdata/01265_datetime_string_comparison_felix_mueller/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01265_datetime_string_comparison_felix_mueller/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01265_datetime_string_comparison_felix_mueller/query.sql b/parser/testdata/01265_datetime_string_comparison_felix_mueller/query.sql new file mode 100644 index 000000000..41cc5b532 --- /dev/null +++ b/parser/testdata/01265_datetime_string_comparison_felix_mueller/query.sql @@ -0,0 +1,29 @@ +DROP TABLE IF EXISTS tztest; + +CREATE TABLE tztest +( + timeBerlin DateTime('Europe/Berlin'), + timeLA DateTime('America/Los_Angeles') +) +ENGINE = Memory; + +INSERT INTO tztest (timeBerlin, timeLA) VALUES ('2019-05-06 12:00:00', '2019-05-06 12:00:00'); + +SELECT + toUnixTimestamp(timeBerlin), + toUnixTimestamp(timeLA) +FROM tztest; + +SELECT 1 +FROM tztest +WHERE timeBerlin = '2019-05-06 12:00:00'; + +SELECT 1 +FROM tztest +WHERE timeLA = '2019-05-06 12:00:00'; + +SELECT 1 +FROM tztest +WHERE '2019-05-06 12:00:00' = timeBerlin; + +DROP TABLE tztest; diff --git a/parser/testdata/01266_default_prewhere_reqq/ast.json b/parser/testdata/01266_default_prewhere_reqq/ast.json new file mode 100644 index 000000000..0658fe3a6 --- /dev/null +++ b/parser/testdata/01266_default_prewhere_reqq/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001237857, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/01266_default_prewhere_reqq/metadata.json b/parser/testdata/01266_default_prewhere_reqq/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01266_default_prewhere_reqq/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01266_default_prewhere_reqq/query.sql b/parser/testdata/01266_default_prewhere_reqq/query.sql new file mode 100644 index 000000000..a192a7e78 --- /dev/null +++ b/parser/testdata/01266_default_prewhere_reqq/query.sql @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS t1; + +CREATE TABLE t1 +( + date Date, + s1 String, + s2 String +) ENGINE = MergeTree() PARTITION BY toYYYYMMDD(date) ORDER BY (date, s1) +SETTINGS index_granularity = 8192; + +insert into t1 (date, s1,s2) values(today()-1,'aaa','bbb'); +alter table t1 add column s3 String DEFAULT concat(s2,'_',s1); +insert into t1 (date, s1,s2) values(today(),'aaa2','bbb2'); +select ignore(date), s3 from t1 where s2='bbb'; + +DROP TABLE t1; diff --git a/parser/testdata/01267_alter_default_key_columns_zookeeper_long/ast.json b/parser/testdata/01267_alter_default_key_columns_zookeeper_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01267_alter_default_key_columns_zookeeper_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01267_alter_default_key_columns_zookeeper_long/metadata.json b/parser/testdata/01267_alter_default_key_columns_zookeeper_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01267_alter_default_key_columns_zookeeper_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01267_alter_default_key_columns_zookeeper_long/query.sql b/parser/testdata/01267_alter_default_key_columns_zookeeper_long/query.sql new file mode 100644 index 000000000..11d774e36 --- /dev/null +++ b/parser/testdata/01267_alter_default_key_columns_zookeeper_long/query.sql @@ -0,0 +1,29 @@ +-- Tags: long, zookeeper + +DROP TABLE IF EXISTS test_alter; +CREATE TABLE test_alter (x Date, s String) ENGINE = MergeTree ORDER BY s PARTITION BY x; +ALTER TABLE test_alter MODIFY COLUMN s DEFAULT 'Hello'; +ALTER TABLE test_alter MODIFY COLUMN x DEFAULT '2000-01-01'; +DESCRIBE TABLE test_alter; +DROP TABLE test_alter; + +DROP TABLE IF EXISTS test_alter_r1; +DROP TABLE IF EXISTS test_alter_r2; + +CREATE TABLE test_alter_r1 (x Date, s String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_01267/alter', 'r1') ORDER BY s PARTITION BY x; +CREATE TABLE test_alter_r2 (x Date, s String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_01267/alter', 'r2') ORDER BY s PARTITION BY x; + +ALTER TABLE test_alter_r1 MODIFY COLUMN s DEFAULT 'Hello' SETTINGS replication_alter_partitions_sync = 2; +ALTER TABLE test_alter_r2 MODIFY COLUMN x DEFAULT '2000-01-01' SETTINGS replication_alter_partitions_sync = 2; + +DESCRIBE TABLE test_alter_r1; +DESCRIBE TABLE test_alter_r2; + +SYSTEM RESTART REPLICA test_alter_r1; +SYSTEM RESTART REPLICA test_alter_r2; + +DESCRIBE TABLE test_alter_r1; +DESCRIBE TABLE test_alter_r2; + +DROP TABLE test_alter_r1; +DROP TABLE test_alter_r2; diff --git a/parser/testdata/01268_DateTime64_in_WHERE/ast.json b/parser/testdata/01268_DateTime64_in_WHERE/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01268_DateTime64_in_WHERE/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01268_DateTime64_in_WHERE/metadata.json b/parser/testdata/01268_DateTime64_in_WHERE/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01268_DateTime64_in_WHERE/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01268_DateTime64_in_WHERE/query.sql b/parser/testdata/01268_DateTime64_in_WHERE/query.sql new file mode 100644 index 000000000..113d4226c --- /dev/null +++ b/parser/testdata/01268_DateTime64_in_WHERE/query.sql @@ -0,0 +1,35 @@ +-- Error cases: +-- non-const string column +WITH '2020-02-05 14:34:12.333' as S, toDateTime64(S, 3) as DT64 SELECT DT64 = materialize(S); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +WITH '2020-02-05 14:34:12.333' as S, toDateTime64(S, 3) as DT64 SELECT materialize(S) = toDateTime64(S, 3); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +WITH '2020-02-05 14:34:12.333' as S, toDateTime64(S, 3) as DT64 SELECT * WHERE DT64 = materialize(S); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +WITH '2020-02-05 14:34:12.333' as S, toDateTime64(S, 3) as DT64 SELECT * WHERE materialize(S) = DT64; -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} + +SELECT * WHERE toDateTime64(123.345, 3) == 'ABCD'; -- {serverError CANNOT_PARSE_DATETIME} -- invalid DateTime64 string +SELECT * WHERE toDateTime64(123.345, 3) == '2020-02-05 14:34:12.33333333333333333333333333333333333333333333333333333333'; + +SELECT 'in SELECT'; +WITH '2020-02-05 14:34:12.333' as S, toDateTime64(S, 3) as DT64 SELECT DT64 = S; +WITH '2020-02-05 14:34:12.333' as S, toDateTime64(S, 3) as DT64 SELECT S = DT64; +WITH '2020-02-05 14:34:12.333' as S, toDateTime64(S, 3) as DT64 SELECT materialize(DT64) = S; +WITH '2020-02-05 14:34:12.333' as S, toDateTime64(S, 3) as DT64 SELECT S = materialize(DT64); + +SELECT 'in WHERE'; +WITH '2020-02-05 14:34:12.333' as S, toDateTime64(S, 3) as DT64 SELECT * WHERE DT64 = S; +WITH '2020-02-05 14:34:12.333' as S, toDateTime64(S, 3) as DT64 SELECT * WHERE S = DT64; +WITH '2020-02-05 14:34:12.333' as S, toDateTime64(S, 3) as DT64 SELECT * WHERE materialize(DT64) = S; +WITH '2020-02-05 14:34:12.333' as S, toDateTime64(S, 3) as DT64 SELECT * WHERE S = materialize(DT64); + +SELECT 'other operators'; +WITH '2020-02-05 14:34:12.333' as S, toDateTime64(S, 3) as DT64 SELECT * WHERE DT64 <= S; +WITH '2020-02-05 14:34:12.333' as S, toDateTime64(S, 3) as DT64 SELECT * WHERE DT64 >= S; +WITH '2020-02-05 14:34:12.333' as S, toDateTime64(S, 3) as DT64 SELECT * WHERE S <= DT64; +WITH '2020-02-05 14:34:12.333' as S, toDateTime64(S, 3) as DT64 SELECT * WHERE S >= DT64; + +-- empty results +WITH '2020-02-05 14:34:12.333' as S, toDateTime64(S, 3) as DT64 SELECT * WHERE DT64 < S; +WITH '2020-02-05 14:34:12.333' as S, toDateTime64(S, 3) as DT64 SELECT * WHERE DT64 > S; +WITH '2020-02-05 14:34:12.333' as S, toDateTime64(S, 3) as DT64 SELECT * WHERE DT64 != S; +WITH '2020-02-05 14:34:12.333' as S, toDateTime64(S, 3) as DT64 SELECT * WHERE S < DT64; +WITH '2020-02-05 14:34:12.333' as S, toDateTime64(S, 3) as DT64 SELECT * WHERE S > DT64; +WITH '2020-02-05 14:34:12.333' as S, toDateTime64(S, 3) as DT64 SELECT * WHERE S != DT64; diff --git a/parser/testdata/01268_data_numeric_parameters/ast.json b/parser/testdata/01268_data_numeric_parameters/ast.json new file mode 100644 index 000000000..bceb85412 --- /dev/null +++ b/parser/testdata/01268_data_numeric_parameters/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery ints (children 1)" + }, + { + "explain": " Identifier ints" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000951471, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/01268_data_numeric_parameters/metadata.json b/parser/testdata/01268_data_numeric_parameters/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01268_data_numeric_parameters/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01268_data_numeric_parameters/query.sql b/parser/testdata/01268_data_numeric_parameters/query.sql new file mode 100644 index 000000000..3450fef9a --- /dev/null +++ b/parser/testdata/01268_data_numeric_parameters/query.sql @@ -0,0 +1,46 @@ +DROP TABLE IF EXISTS ints; +DROP TABLE IF EXISTS floats; +DROP TABLE IF EXISTS strings; + +CREATE TABLE ints ( + a TINYINT, + b TINYINT(8), + c SMALLINT, + d SMALLINT(16), + e INT, + f INT(32), + g BIGINT, + h BIGINT(64) +) engine=Memory; + +INSERT INTO ints VALUES (1, 8, 11, 16, 21, 32, 41, 64); + +SELECT toTypeName(a), toTypeName(b), toTypeName(c), toTypeName(d), toTypeName(e), toTypeName(f), toTypeName(g), toTypeName(h) FROM ints; + +CREATE TABLE floats ( + a FLOAT, + b FLOAT(12), + c FLOAT(15, 22), + d DOUBLE, + e DOUBLE(12), + f DOUBLE(4, 18) + +) engine=Memory; + +INSERT INTO floats VALUES (1.1, 1.2, 1.3, 41.1, 41.1, 42.1); + +SELECT toTypeName(a), toTypeName(b), toTypeName(c), toTypeName(d), toTypeName(e), toTypeName(f) FROM floats; + + +CREATE TABLE strings ( + a VARCHAR, + b VARCHAR(11) +) engine=Memory; + +INSERT INTO strings VALUES ('test', 'string'); + +SELECT toTypeName(a), toTypeName(b) FROM strings; + +DROP TABLE floats; +DROP TABLE ints; +DROP TABLE strings; diff --git a/parser/testdata/01268_dictionary_direct_layout/ast.json b/parser/testdata/01268_dictionary_direct_layout/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01268_dictionary_direct_layout/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01268_dictionary_direct_layout/metadata.json b/parser/testdata/01268_dictionary_direct_layout/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01268_dictionary_direct_layout/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01268_dictionary_direct_layout/query.sql b/parser/testdata/01268_dictionary_direct_layout/query.sql new file mode 100644 index 000000000..66313528a --- /dev/null +++ b/parser/testdata/01268_dictionary_direct_layout/query.sql @@ -0,0 +1,133 @@ +-- Tags: no-parallel, no-fasttest + +DROP DATABASE IF EXISTS database_for_dict_01268; + +CREATE DATABASE database_for_dict_01268; + +DROP TABLE IF EXISTS database_for_dict_01268.table_for_dict1; +DROP TABLE IF EXISTS database_for_dict_01268.table_for_dict2; +DROP TABLE IF EXISTS database_for_dict_01268.table_for_dict3; + +CREATE TABLE database_for_dict_01268.table_for_dict1 +( + key_column UInt64, + second_column UInt64, + third_column String +) +ENGINE = MergeTree() +ORDER BY key_column; + +INSERT INTO database_for_dict_01268.table_for_dict1 VALUES (100500, 10000000, 'Hello world'); + +CREATE TABLE database_for_dict_01268.table_for_dict2 +( + region_id UInt64, + parent_region UInt64, + region_name String +) +ENGINE = MergeTree() +ORDER BY region_id; + +INSERT INTO database_for_dict_01268.table_for_dict2 VALUES (1, 0, 'Russia'); +INSERT INTO database_for_dict_01268.table_for_dict2 VALUES (2, 1, 'Moscow'); +INSERT INTO database_for_dict_01268.table_for_dict2 VALUES (3, 2, 'Center'); +INSERT INTO database_for_dict_01268.table_for_dict2 VALUES (4, 0, 'Great Britain'); +INSERT INTO database_for_dict_01268.table_for_dict2 VALUES (5, 4, 'London'); + +CREATE TABLE database_for_dict_01268.table_for_dict3 +( + region_id UInt64, + parent_region Float32, + region_name String +) +ENGINE = MergeTree() +ORDER BY region_id; + +INSERT INTO database_for_dict_01268.table_for_dict3 VALUES (1, 0.5, 'Russia'); +INSERT INTO database_for_dict_01268.table_for_dict3 VALUES (2, 1.6, 'Moscow'); +INSERT INTO database_for_dict_01268.table_for_dict3 VALUES (3, 2.3, 'Center'); +INSERT INTO database_for_dict_01268.table_for_dict3 VALUES (4, 0.2, 'Great Britain'); +INSERT INTO database_for_dict_01268.table_for_dict3 VALUES (5, 4.9, 'London'); + +DROP DATABASE IF EXISTS db_01268; + +CREATE DATABASE db_01268; + +DROP DICTIONARY IF EXISTS db_01268.dict1; +DROP DICTIONARY IF EXISTS db_01268.dict2; +DROP DICTIONARY IF EXISTS db_01268.dict3; + +CREATE DICTIONARY db_01268.dict1 +( + key_column UInt64 DEFAULT 0, + second_column UInt64 DEFAULT 1, + third_column String DEFAULT 'qqq' +) +PRIMARY KEY key_column +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'table_for_dict1' PASSWORD '' DB 'database_for_dict_01268')) +LAYOUT(DIRECT()) SETTINGS(max_result_bytes=1); + +CREATE DICTIONARY db_01268.dict2 +( + region_id UInt64 DEFAULT 0, + parent_region UInt64 DEFAULT 0 HIERARCHICAL, + region_name String DEFAULT '' +) +PRIMARY KEY region_id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'table_for_dict2' PASSWORD '' DB 'database_for_dict_01268')) +LAYOUT(DIRECT()) SETTINGS(dictionary_use_async_executor=1, max_threads=8); + +CREATE DICTIONARY db_01268.dict3 +( + region_id UInt64 DEFAULT 0, + parent_region Float32 DEFAULT 0, + region_name String DEFAULT '' +) +PRIMARY KEY region_id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'table_for_dict3' PASSWORD '' DB 'database_for_dict_01268')) +LAYOUT(DIRECT()); + +SELECT 'INITIALIZING DICTIONARY'; + +SELECT dictGetHierarchy('db_01268.dict2', toUInt64(3)); +SELECT dictHas('db_01268.dict2', toUInt64(3)); +SELECT dictHas('db_01268.dict2', toUInt64(45)); +SELECT dictIsIn('db_01268.dict2', toUInt64(3), toUInt64(1)); +SELECT dictIsIn('db_01268.dict2', toUInt64(1), toUInt64(3)); +SELECT dictGetUInt64('db_01268.dict2', 'parent_region', toUInt64(3)); +SELECT dictGetUInt64('db_01268.dict2', 'parent_region', toUInt64(99)); +SELECT dictGetFloat32('db_01268.dict3', 'parent_region', toUInt64(3)); +SELECT dictGetFloat32('db_01268.dict3', 'parent_region', toUInt64(2)); +SELECT dictGetFloat32('db_01268.dict3', 'parent_region', toUInt64(1)); +SELECT dictGetString('db_01268.dict2', 'region_name', toUInt64(5)); +SELECT dictGetString('db_01268.dict2', 'region_name', toUInt64(4)); +SELECT dictGetStringOrDefault('db_01268.dict2', 'region_name', toUInt64(100), 'NONE'); + +SELECT number + 1, dictGetStringOrDefault('db_01268.dict2', 'region_name', toUInt64(number + 1), 'NONE') chars FROM numbers(10); +SELECT number + 1, dictGetFloat32OrDefault('db_01268.dict3', 'parent_region', toUInt64(number + 1), toFloat32(0)) chars FROM numbers(10); +SELECT dictGetStringOrDefault('db_01268.dict2', 'region_name', toUInt64(1), 'NONE'); +SELECT dictGetStringOrDefault('db_01268.dict2', 'region_name', toUInt64(2), 'NONE'); +SELECT dictGetStringOrDefault('db_01268.dict2', 'region_name', toUInt64(3), 'NONE'); +SELECT dictGetStringOrDefault('db_01268.dict2', 'region_name', toUInt64(4), 'NONE'); +SELECT dictGetStringOrDefault('db_01268.dict2', 'region_name', toUInt64(5), 'NONE'); +SELECT dictGetStringOrDefault('db_01268.dict2', 'region_name', toUInt64(6), 'NONE'); +SELECT dictGetStringOrDefault('db_01268.dict2', 'region_name', toUInt64(7), 'NONE'); +SELECT dictGetStringOrDefault('db_01268.dict2', 'region_name', toUInt64(8), 'NONE'); +SELECT dictGetStringOrDefault('db_01268.dict2', 'region_name', toUInt64(9), 'NONE'); +SELECT dictGetStringOrDefault('db_01268.dict2', 'region_name', toUInt64(10), 'NONE'); + +SELECT dictGetUInt64('db_01268.dict1', 'second_column', toUInt64(100500)); -- { serverError TOO_MANY_ROWS_OR_BYTES } + +SELECT 'END'; + +DROP DICTIONARY IF EXISTS db_01268.dict1; +DROP DICTIONARY IF EXISTS db_01268.dict2; +DROP DICTIONARY IF EXISTS db_01268.dict3; + +DROP DATABASE IF EXISTS db_01268; + +DROP TABLE IF EXISTS database_for_dict_01268.table_for_dict1; +DROP TABLE IF EXISTS database_for_dict_01268.table_for_dict2; +DROP TABLE IF EXISTS database_for_dict_01268.table_for_dict3; + +DROP DATABASE IF EXISTS database_for_dict_01268; diff --git a/parser/testdata/01268_mergine_sorted_limit/ast.json b/parser/testdata/01268_mergine_sorted_limit/ast.json new file mode 100644 index 000000000..c261ec826 --- /dev/null +++ b/parser/testdata/01268_mergine_sorted_limit/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tab (children 1)" + }, + { + "explain": " Identifier tab" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00111811, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/01268_mergine_sorted_limit/metadata.json b/parser/testdata/01268_mergine_sorted_limit/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01268_mergine_sorted_limit/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01268_mergine_sorted_limit/query.sql b/parser/testdata/01268_mergine_sorted_limit/query.sql new file mode 100644 index 000000000..49d8161bf --- /dev/null +++ b/parser/testdata/01268_mergine_sorted_limit/query.sql @@ -0,0 +1,12 @@ +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab (x UInt32, y UInt32) ENGINE = MergeTree() ORDER BY x; + +INSERT INTO tab VALUES (1,1),(1,2),(1,3),(1,4),(1,5); + +INSERT INTO tab VALUES (2,6),(2,7),(2,8),(2,9),(2,0); + +SELECT * FROM tab ORDER BY x LIMIT 3 SETTINGS optimize_read_in_order=1; +SELECT * FROM tab ORDER BY x LIMIT 4 SETTINGS optimize_read_in_order=1; + +DROP TABLE IF EXISTS tab; diff --git a/parser/testdata/01268_mv_scalars/ast.json b/parser/testdata/01268_mv_scalars/ast.json new file mode 100644 index 000000000..1c7bbbd2c --- /dev/null +++ b/parser/testdata/01268_mv_scalars/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery dest_table_mv (children 1)" + }, + { + "explain": " Identifier dest_table_mv" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00131625, + "rows_read": 2, + "bytes_read": 78 + } +} diff --git a/parser/testdata/01268_mv_scalars/metadata.json b/parser/testdata/01268_mv_scalars/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01268_mv_scalars/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01268_mv_scalars/query.sql b/parser/testdata/01268_mv_scalars/query.sql new file mode 100644 index 000000000..d48a56b80 --- /dev/null +++ b/parser/testdata/01268_mv_scalars/query.sql @@ -0,0 +1,46 @@ +DROP TABLE IF EXISTS dest_table_mv; +DROP TABLE IF EXISTS left_table; +DROP TABLE IF EXISTS right_table; +DROP TABLE IF EXISTS dest_table; +DROP TABLE IF EXISTS src_table; +DROP VIEW IF EXISTS dst_mv; +DROP VIEW IF EXISTS dst_mv_1; +DROP VIEW IF EXISTS dst_mv_2; + + +create table src_table Engine=Memory as system.numbers; +CREATE MATERIALIZED VIEW dst_mv_1 Engine=Memory as select *, (SELECT count() FROM src_table) AS cnt FROM src_table; +insert into src_table select 1 from numbers(3); +insert into src_table select 2 from numbers(2); +insert into src_table select 3 from numbers(1); +select * from dst_mv_1 order by number; + + +CREATE TABLE dest_table (`Date` Date, `Id` UInt64, `Units` Float32) ENGINE = Memory; +create table left_table as dest_table; +create table right_table as dest_table; +insert into right_table select toDate('2020-01-01') + number, number, number / 2 from numbers(10); + +CREATE MATERIALIZED VIEW dest_table_mv TO dest_table as select Date, Id, Units FROM (SELECT * FROM left_table) AS t1 INNER JOIN (WITH (SELECT DISTINCT Date FROM left_table LIMIT 1) AS dt SELECT * FROM right_table WHERE Date = dt) AS t2 USING (Date, Id); + +insert into left_table select toDate('2020-01-01'), 0, number * 2 from numbers(3); +select 'the rows get inserted'; +select * from dest_table order by Date, Id, Units; + +insert into left_table select toDate('2020-01-01'), 5, number * 2 from numbers(3); +select 'no new rows'; +select * from dest_table order by Date, Id, Units; + +truncate table left_table; +insert into left_table select toDate('2020-01-01') + 5, 5, number * 2 from numbers(3); +select 'the rows get inserted'; +select * from dest_table order by Date, Id, Units; + +drop table dest_table_mv; +drop table left_table; +drop table right_table; +drop table dest_table; +drop table src_table; +drop view if exists dst_mv; +drop view if exists dst_mv_1; +drop view if exists dst_mv_2; diff --git a/parser/testdata/01268_shard_avgweighted/ast.json b/parser/testdata/01268_shard_avgweighted/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01268_shard_avgweighted/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01268_shard_avgweighted/metadata.json b/parser/testdata/01268_shard_avgweighted/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01268_shard_avgweighted/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01268_shard_avgweighted/query.sql b/parser/testdata/01268_shard_avgweighted/query.sql new file mode 100644 index 000000000..3ddbf108c --- /dev/null +++ b/parser/testdata/01268_shard_avgweighted/query.sql @@ -0,0 +1,11 @@ +-- Tags: shard + +CREATE TABLE dummy(foo Int64) ENGINE = Memory(); +INSERT INTO dummy VALUES (1); +SELECT avgWeighted(100., .1) FROM remote('127.0.0.{2,3}', currentDatabase(), dummy); +SELECT avgWeighted(10, 100) FROM remote('127.0.0.{2,3}', currentDatabase(), dummy); +SELECT avgWeighted(0, 1) FROM remote('127.0.0.{2,3}', currentDatabase(), dummy); +SELECT avgWeighted(0., 0.) FROM remote('127.0.0.{2,3}', currentDatabase(), dummy); +SELECT avgWeighted(1., 0.) FROM remote('127.0.0.{2,3}', currentDatabase(), dummy); +SELECT avgWeighted(toInt8(100), -1) FROM remote('127.0.0.{2,3}', currentDatabase(), dummy); +DROP TABLE dummy; diff --git a/parser/testdata/01269_alias_type_differs/ast.json b/parser/testdata/01269_alias_type_differs/ast.json new file mode 100644 index 000000000..91d9602fd --- /dev/null +++ b/parser/testdata/01269_alias_type_differs/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery data_01269 (children 1)" + }, + { + "explain": " Identifier data_01269" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001172477, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/01269_alias_type_differs/metadata.json b/parser/testdata/01269_alias_type_differs/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01269_alias_type_differs/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01269_alias_type_differs/query.sql b/parser/testdata/01269_alias_type_differs/query.sql new file mode 100644 index 000000000..b78e46f62 --- /dev/null +++ b/parser/testdata/01269_alias_type_differs/query.sql @@ -0,0 +1,22 @@ +DROP TABLE IF EXISTS data_01269; +CREATE TABLE data_01269 +( + key Int32, + value Nullable(Int32), + alias UInt8 ALIAS value>0 +) +ENGINE = MergeTree() +ORDER BY key; +INSERT INTO data_01269 VALUES (1, 0); + +-- after PR#10441 +SELECT toTypeName(alias) FROM data_01269; +SELECT any(alias) FROM data_01269; + +-- even without PR#10441 +ALTER TABLE data_01269 DROP COLUMN alias; +ALTER TABLE data_01269 ADD COLUMN alias UInt8 ALIAS value>0; +SELECT toTypeName(alias) FROM data_01269; +SELECT any(alias) FROM data_01269; + +DROP TABLE data_01269; diff --git a/parser/testdata/01269_create_with_null/ast.json b/parser/testdata/01269_create_with_null/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01269_create_with_null/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01269_create_with_null/metadata.json b/parser/testdata/01269_create_with_null/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01269_create_with_null/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01269_create_with_null/query.sql b/parser/testdata/01269_create_with_null/query.sql new file mode 100644 index 000000000..30b7fc224 --- /dev/null +++ b/parser/testdata/01269_create_with_null/query.sql @@ -0,0 +1,65 @@ +-- Tags: no-replicated-database + +DROP TABLE IF EXISTS data_null; +DROP TABLE IF EXISTS set_null; +DROP TABLE IF EXISTS cannot_be_nullable; + +SET data_type_default_nullable='false'; + +CREATE TABLE data_null ( + a INT NULL, + b INT NOT NULL, + c Nullable(INT), + d INT +) engine=Memory(); + + +INSERT INTO data_null VALUES (NULL, 2, NULL, 4); + +SELECT toTypeName(a), toTypeName(b), toTypeName(c), toTypeName(d) FROM data_null; + +SHOW CREATE TABLE data_null; + +CREATE TABLE data_null_error ( + a Nullable(INT) NULL, + b INT NOT NULL, + c Nullable(INT) +) engine=Memory(); --{serverError ILLEGAL_SYNTAX_FOR_DATA_TYPE} + + +CREATE TABLE data_null_error ( + a INT NULL, + b Nullable(INT) NOT NULL, + c Nullable(INT) +) engine=Memory(); --{serverError ILLEGAL_SYNTAX_FOR_DATA_TYPE} + +SET data_type_default_nullable='true'; + +CREATE TABLE set_null ( + a INT NULL, + b INT NOT NULL, + c Nullable(INT), + d INT, + f DEFAULT 1 +) engine=Memory(); + + +INSERT INTO set_null VALUES (NULL, 2, NULL, NULL, NULL); + +SELECT toTypeName(a), toTypeName(b), toTypeName(c), toTypeName(d), toTypeName(f) FROM set_null; + +SHOW CREATE TABLE set_null; +DETACH TABLE set_null; +ATTACH TABLE set_null; +SHOW CREATE TABLE set_null; + +CREATE TABLE cannot_be_nullable (n Int8, a Array(UInt8)) ENGINE=Memory; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +CREATE TABLE cannot_be_nullable (n Int8, a Array(UInt8) NOT NULL) ENGINE=Memory; +SHOW CREATE TABLE cannot_be_nullable; +DETACH TABLE cannot_be_nullable; +ATTACH TABLE cannot_be_nullable; +SHOW CREATE TABLE cannot_be_nullable; + +DROP TABLE data_null; +DROP TABLE set_null; +DROP TABLE cannot_be_nullable; diff --git a/parser/testdata/01269_toStartOfSecond/ast.json b/parser/testdata/01269_toStartOfSecond/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01269_toStartOfSecond/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01269_toStartOfSecond/metadata.json b/parser/testdata/01269_toStartOfSecond/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01269_toStartOfSecond/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01269_toStartOfSecond/query.sql b/parser/testdata/01269_toStartOfSecond/query.sql new file mode 100644 index 000000000..6ebfde0aa --- /dev/null +++ b/parser/testdata/01269_toStartOfSecond/query.sql @@ -0,0 +1,13 @@ +-- Error cases +SELECT toStartOfSecond('123'); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT toStartOfSecond(now()); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT toStartOfSecond(); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +SELECT toStartOfSecond(now64(), 123); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} + +WITH toDateTime64('2019-09-16 19:20:11', 3, 'Asia/Istanbul') AS dt64 SELECT toStartOfSecond(dt64, 'UTC') AS res, toTypeName(res); +WITH toDateTime64('2019-09-16 19:20:11', 0, 'UTC') AS dt64 SELECT toStartOfSecond(dt64) AS res, toTypeName(res); +WITH toDateTime64('2019-09-16 19:20:11.123', 3, 'UTC') AS dt64 SELECT toStartOfSecond(dt64) AS res, toTypeName(res); +WITH toDateTime64('2019-09-16 19:20:11.123', 9, 'UTC') AS dt64 SELECT toStartOfSecond(dt64) AS res, toTypeName(res); + +SELECT 'non-const column'; +WITH toDateTime64('2019-09-16 19:20:11.123', 3, 'UTC') AS dt64 SELECT toStartOfSecond(materialize(dt64)) AS res, toTypeName(res); diff --git a/parser/testdata/01270_optimize_skip_unused_shards_low_cardinality/ast.json b/parser/testdata/01270_optimize_skip_unused_shards_low_cardinality/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01270_optimize_skip_unused_shards_low_cardinality/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01270_optimize_skip_unused_shards_low_cardinality/metadata.json b/parser/testdata/01270_optimize_skip_unused_shards_low_cardinality/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01270_optimize_skip_unused_shards_low_cardinality/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01270_optimize_skip_unused_shards_low_cardinality/query.sql b/parser/testdata/01270_optimize_skip_unused_shards_low_cardinality/query.sql new file mode 100644 index 000000000..120561486 --- /dev/null +++ b/parser/testdata/01270_optimize_skip_unused_shards_low_cardinality/query.sql @@ -0,0 +1,15 @@ +-- Tags: shard + +set optimize_skip_unused_shards=1; +set force_optimize_skip_unused_shards=2; +set allow_suspicious_low_cardinality_types=1; + +drop table if exists data_01270; +drop table if exists dist_01270; + +create table data_01270 (key LowCardinality(Int)) Engine=Null(); +create table dist_01270 as data_01270 Engine=Distributed(test_cluster_two_shards, currentDatabase(), data_01270, key); +select * from dist_01270 where key = 1; + +drop table data_01270; +drop table dist_01270; diff --git a/parser/testdata/01271_optimize_arithmetic_operations_in_aggr_func_long/ast.json b/parser/testdata/01271_optimize_arithmetic_operations_in_aggr_func_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01271_optimize_arithmetic_operations_in_aggr_func_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01271_optimize_arithmetic_operations_in_aggr_func_long/metadata.json b/parser/testdata/01271_optimize_arithmetic_operations_in_aggr_func_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01271_optimize_arithmetic_operations_in_aggr_func_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01271_optimize_arithmetic_operations_in_aggr_func_long/query.sql b/parser/testdata/01271_optimize_arithmetic_operations_in_aggr_func_long/query.sql new file mode 100644 index 000000000..de16993a5 --- /dev/null +++ b/parser/testdata/01271_optimize_arithmetic_operations_in_aggr_func_long/query.sql @@ -0,0 +1,185 @@ +-- Tags: long + +SET enable_analyzer = 1; +SET optimize_arithmetic_operations_in_aggregate_functions = 1; + +EXPLAIN SYNTAX SELECT sum(n + 1), sum(1 + n), sum(n - 1), sum(1 - n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT sum(n * 2), sum(2 * n), sum(n / 2), sum(1 / n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT min(n + 1), min(1 + n), min(n - 1), min(1 - n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT min(n * 2), min(2 * n), min(n / 2), min(1 / n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT max(n + 1), max(1 + n), max(n - 1), max(1 - n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT max(n * 2), max(2 * n), max(n / 2), max(1 / n) FROM (SELECT number n FROM numbers(10)); + +EXPLAIN SYNTAX SELECT sum(n + -1), sum(-1 + n), sum(n - -1), sum(-1 - n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT sum(n * -2), sum(-2 * n), sum(n / -2), sum(-1 / n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT min(n + -1), min(-1 + n), min(n - -1), min(-1 - n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT min(n * -2), min(-2 * n), min(n / -2), min(-1 / n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT max(n + -1), max(-1 + n), max(n - -1), max(-1 - n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT max(n * -2), max(-2 * n), max(n / -2), max(-1 / n) FROM (SELECT number n FROM numbers(10)); + +EXPLAIN SYNTAX SELECT sum(abs(2) + 1), sum(abs(2) + n), sum(n - abs(2)), sum(1 - abs(2)) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT sum(abs(2) * 2), sum(abs(2) * n), sum(n / abs(2)), sum(1 / abs(2)) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT min(abs(2) + 1), min(abs(2) + n), min(n - abs(2)), min(1 - abs(2)) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT min(abs(2) * 2), min(abs(2) * n), min(n / abs(2)), min(1 / abs(2)) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT max(abs(2) + 1), max(abs(2) + n), max(n - abs(2)), max(1 - abs(2)) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT max(abs(2) * 2), max(abs(2) * n), max(n / abs(2)), max(1 / abs(2)) FROM (SELECT number n FROM numbers(10)); + +EXPLAIN SYNTAX SELECT sum(abs(n) + 1), sum(abs(n) + n), sum(n - abs(n)), sum(1 - abs(n)) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT sum(abs(n) * 2), sum(abs(n) * n), sum(n / abs(n)), sum(1 / abs(n)) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT min(abs(n) + 1), min(abs(n) + n), min(n - abs(n)), min(1 - abs(n)) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT min(abs(n) * 2), min(abs(n) * n), min(n / abs(n)), min(1 / abs(n)) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT max(abs(n) + 1), max(abs(n) + n), max(n - abs(n)), max(1 - abs(n)) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT max(abs(n) * 2), max(abs(n) * n), max(n / abs(n)), max(1 / abs(n)) FROM (SELECT number n FROM numbers(10)); + +EXPLAIN SYNTAX SELECT sum(n*n + 1), sum(1 + n*n), sum(n*n - 1), sum(1 - n*n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT sum(n*n * 2), sum(2 * n*n), sum(n*n / 2), sum(1 / n*n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT min(n*n + 1), min(1 + n*n), min(n*n - 1), min(1 - n*n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT min(n*n * 2), min(2 * n*n), min(n*n / 2), min(1 / n*n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT max(n*n + 1), max(1 + n*n), max(n*n - 1), max(1 - n*n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT max(n*n * 2), max(2 * n*n), max(n*n / 2), max(1 / n*n) FROM (SELECT number n FROM numbers(10)); + +EXPLAIN SYNTAX SELECT sum(1 + n + 1), sum(1 + 1 + n), sum(1 + n - 1), sum(1 + 1 - n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT sum(1 + n * 2), sum(1 + 2 * n), sum(1 + n / 2), sum(1 + 1 / n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT min(1 + n + 1), min(1 + 1 + n), min(1 + n - 1), min(1 + 1 - n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT min(1 + n * 2), min(1 + 2 * n), min(1 + n / 2), min(1 + 1 / n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT max(1 + n + 1), max(1 + 1 + n), max(1 + n - 1), max(1 + 1 - n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT max(1 + n * 2), max(1 + 2 * n), max(1 + n / 2), max(1 + 1 / n) FROM (SELECT number n FROM numbers(10)); + +EXPLAIN SYNTAX SELECT sum(n + -1 + -1), sum(-1 + n + -1), sum(n - -1 + -1), sum(-1 - n + -1) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT sum(n * -2 * -1), sum(-2 * n * -1), sum(n / -2 / -1), sum(-1 / n / -1) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT min(n + -1 + -1), min(-1 + n + -1), min(n - -1 + -1), min(-1 - n + -1) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT min(n * -2 * -1), min(-2 * n * -1), min(n / -2 / -1), min(-1 / n / -1) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT max(n + -1 + -1), max(-1 + n + -1), max(n - -1 + -1), max(-1 - n + -1) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT max(n * -2 * -1), max(-2 * n * -1), max(n / -2 / -1), max(-1 / n / -1) FROM (SELECT number n FROM numbers(10)); + +EXPLAIN SYNTAX SELECT sum(n + 1) + sum(1 + n) + sum(n - 1) + sum(1 - n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT sum(n * 2) + sum(2 * n) + sum(n / 2) + sum(1 / n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT min(n + 1) + min(1 + n) + min(n - 1) + min(1 - n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT min(n * 2) + min(2 * n) + min(n / 2) + min(1 / n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT max(n + 1) + max(1 + n) + max(n - 1) + max(1 - n) FROM (SELECT number n FROM numbers(10)); +EXPLAIN SYNTAX SELECT max(n * 2) + max(2 * n) + max(n / 2) + max(1 / n) FROM (SELECT number n FROM numbers(10)); + + +SELECT sum(n + 1), sum(1 + n), sum(n - 1), sum(1 - n) FROM (SELECT number n FROM numbers(10)); +SELECT sum(n * 2), sum(2 * n), sum(n / 2), sum(1 / n) FROM (SELECT number n FROM numbers(10)); +SELECT min(n + 1), min(1 + n), min(n - 1), min(1 - n) FROM (SELECT number n FROM numbers(10)); +SELECT min(n * 2), min(2 * n), min(n / 2), min(1 / n) FROM (SELECT number n FROM numbers(10)); +SELECT max(n + 1), max(1 + n), max(n - 1), max(1 - n) FROM (SELECT number n FROM numbers(10)); +SELECT max(n * 2), max(2 * n), max(n / 2), max(1 / n) FROM (SELECT number n FROM numbers(10)); + +SELECT sum(n + -1), sum(-1 + n), sum(n - -1), sum(-1 - n) FROM (SELECT number n FROM numbers(10)); +SELECT sum(n * -2), sum(-2 * n), sum(n / -2), sum(-1 / n) FROM (SELECT number n FROM numbers(10)); +SELECT min(n + -1), min(-1 + n), min(n - -1), min(-1 - n) FROM (SELECT number n FROM numbers(10)); +SELECT min(n * -2), min(-2 * n), min(n / -2), min(-1 / n) FROM (SELECT number n FROM numbers(10)); +SELECT max(n + -1), max(-1 + n), max(n - -1), max(-1 - n) FROM (SELECT number n FROM numbers(10)); +SELECT max(n * -2), max(-2 * n), max(n / -2), max(-1 / n) FROM (SELECT number n FROM numbers(10)); + +SELECT sum(abs(2) + 1), sum(abs(2) + n), sum(n - abs(2)), sum(1 - abs(2)) FROM (SELECT number n FROM numbers(10)); +SELECT sum(abs(2) * 2), sum(abs(2) * n), sum(n / abs(2)), sum(1 / abs(2)) FROM (SELECT number n FROM numbers(10)); +SELECT min(abs(2) + 1), min(abs(2) + n), min(n - abs(2)), min(1 - abs(2)) FROM (SELECT number n FROM numbers(10)); +SELECT min(abs(2) * 2), min(abs(2) * n), min(n / abs(2)), min(1 / abs(2)) FROM (SELECT number n FROM numbers(10)); +SELECT max(abs(2) + 1), max(abs(2) + n), max(n - abs(2)), max(1 - abs(2)) FROM (SELECT number n FROM numbers(10)); +SELECT max(abs(2) * 2), max(abs(2) * n), max(n / abs(2)), max(1 / abs(2)) FROM (SELECT number n FROM numbers(10)); + +SELECT sum(abs(n) + 1), sum(abs(n) + n), sum(n - abs(n)), sum(1 - abs(n)) FROM (SELECT number n FROM numbers(10)); +SELECT sum(abs(n) * 2), sum(abs(n) * n), sum(n / abs(n)), sum(1 / abs(n)) FROM (SELECT number n FROM numbers(10)); +SELECT min(abs(n) + 1), min(abs(n) + n), min(n - abs(n)), min(1 - abs(n)) FROM (SELECT number n FROM numbers(10)); +SELECT min(abs(n) * 2), min(abs(n) * n), min(n / abs(n)), min(1 / abs(n)) FROM (SELECT number n FROM numbers(10)); +SELECT max(abs(n) + 1), max(abs(n) + n), max(n - abs(n)), max(1 - abs(n)) FROM (SELECT number n FROM numbers(10)); +SELECT max(abs(n) * 2), max(abs(n) * n), max(n / abs(n)), max(1 / abs(n)) FROM (SELECT number n FROM numbers(10)); + +SELECT sum(n*n + 1), sum(1 + n*n), sum(n*n - 1), sum(1 - n*n) FROM (SELECT number n FROM numbers(10)); +SELECT sum(n*n * 2), sum(2 * n*n), sum(n*n / 2), sum(1 / n*n) FROM (SELECT number n FROM numbers(10)); +SELECT min(n*n + 1), min(1 + n*n), min(n*n - 1), min(1 - n*n) FROM (SELECT number n FROM numbers(10)); +SELECT min(n*n * 2), min(2 * n*n), min(n*n / 2), min(1 / n*n) FROM (SELECT number n FROM numbers(10)); +SELECT max(n*n + 1), max(1 + n*n), max(n*n - 1), max(1 - n*n) FROM (SELECT number n FROM numbers(10)); +SELECT max(n*n * 2), max(2 * n*n), max(n*n / 2), max(1 / n*n) FROM (SELECT number n FROM numbers(10)); + +SELECT sum(1 + n + 1), sum(1 + 1 + n), sum(1 + n - 1), sum(1 + 1 - n) FROM (SELECT number n FROM numbers(10)); +SELECT sum(1 + n * 2), sum(1 + 2 * n), sum(1 + n / 2), sum(1 + 1 / n) FROM (SELECT number n FROM numbers(10)); +SELECT min(1 + n + 1), min(1 + 1 + n), min(1 + n - 1), min(1 + 1 - n) FROM (SELECT number n FROM numbers(10)); +SELECT min(1 + n * 2), min(1 + 2 * n), min(1 + n / 2), min(1 + 1 / n) FROM (SELECT number n FROM numbers(10)); +SELECT max(1 + n + 1), max(1 + 1 + n), max(1 + n - 1), max(1 + 1 - n) FROM (SELECT number n FROM numbers(10)); +SELECT max(1 + n * 2), max(1 + 2 * n), max(1 + n / 2), max(1 + 1 / n) FROM (SELECT number n FROM numbers(10)); + +SELECT sum(n + -1 + -1), sum(-1 + n + -1), sum(n - -1 + -1), sum(-1 - n + -1) FROM (SELECT number n FROM numbers(10)); +SELECT sum(n * -2 * -1), sum(-2 * n * -1), sum(n / -2 / -1), sum(-1 / n / -1) FROM (SELECT number n FROM numbers(10)); +SELECT min(n + -1 + -1), min(-1 + n + -1), min(n - -1 + -1), min(-1 - n + -1) FROM (SELECT number n FROM numbers(10)); +SELECT min(n * -2 * -1), min(-2 * n * -1), min(n / -2 / -1), min(-1 / n / -1) FROM (SELECT number n FROM numbers(10)); +SELECT max(n + -1 + -1), max(-1 + n + -1), max(n - -1 + -1), max(-1 - n + -1) FROM (SELECT number n FROM numbers(10)); +SELECT max(n * -2 * -1), max(-2 * n * -1), max(n / -2 / -1), max(-1 / n / -1) FROM (SELECT number n FROM numbers(10)); + +SELECT sum(n + 1) + sum(1 + n) + sum(n - 1) + sum(1 - n) FROM (SELECT number n FROM numbers(10)); +SELECT sum(n * 2) + sum(2 * n) + sum(n / 2) + sum(1 / n) FROM (SELECT number n FROM numbers(10)); +SELECT min(n + 1) + min(1 + n) + min(n - 1) + min(1 - n) FROM (SELECT number n FROM numbers(10)); +SELECT min(n * 2) + min(2 * n) + min(n / 2) + min(1 / n) FROM (SELECT number n FROM numbers(10)); +SELECT max(n + 1) + max(1 + n) + max(n - 1) + max(1 - n) FROM (SELECT number n FROM numbers(10)); +SELECT max(n * 2) + max(2 * n) + max(n / 2) + max(1 / n) FROM (SELECT number n FROM numbers(10)); + + +SELECT sum(number * -3) + min(2 * number * -3) - max(-1 * -2 * number * -3) FROM numbers(100); +SELECT max(log(2) * number) FROM numbers(100); +SELECT round(max(log(2) * 3 * sin(0.3) * number * 4)) FROM numbers(100); + +SET optimize_arithmetic_operations_in_aggregate_functions = 0; + +SELECT sum(n + 1), sum(1 + n), sum(n - 1), sum(1 - n) FROM (SELECT number n FROM numbers(10)); +SELECT sum(n * 2), sum(2 * n), sum(n / 2), sum(1 / n) FROM (SELECT number n FROM numbers(10)); +SELECT min(n + 1), min(1 + n), min(n - 1), min(1 - n) FROM (SELECT number n FROM numbers(10)); +SELECT min(n * 2), min(2 * n), min(n / 2), min(1 / n) FROM (SELECT number n FROM numbers(10)); +SELECT max(n + 1), max(1 + n), max(n - 1), max(1 - n) FROM (SELECT number n FROM numbers(10)); +SELECT max(n * 2), max(2 * n), max(n / 2), max(1 / n) FROM (SELECT number n FROM numbers(10)); + +SELECT sum(n + -1), sum(-1 + n), sum(n - -1), sum(-1 - n) FROM (SELECT number n FROM numbers(10)); +SELECT sum(n * -2), sum(-2 * n), sum(n / -2), sum(-1 / n) FROM (SELECT number n FROM numbers(10)); +SELECT min(n + -1), min(-1 + n), min(n - -1), min(-1 - n) FROM (SELECT number n FROM numbers(10)); +SELECT min(n * -2), min(-2 * n), min(n / -2), min(-1 / n) FROM (SELECT number n FROM numbers(10)); +SELECT max(n + -1), max(-1 + n), max(n - -1), max(-1 - n) FROM (SELECT number n FROM numbers(10)); +SELECT max(n * -2), max(-2 * n), max(n / -2), max(-1 / n) FROM (SELECT number n FROM numbers(10)); + +SELECT sum(abs(2) + 1), sum(abs(2) + n), sum(n - abs(2)), sum(1 - abs(2)) FROM (SELECT number n FROM numbers(10)); +SELECT sum(abs(2) * 2), sum(abs(2) * n), sum(n / abs(2)), sum(1 / abs(2)) FROM (SELECT number n FROM numbers(10)); +SELECT min(abs(2) + 1), min(abs(2) + n), min(n - abs(2)), min(1 - abs(2)) FROM (SELECT number n FROM numbers(10)); +SELECT min(abs(2) * 2), min(abs(2) * n), min(n / abs(2)), min(1 / abs(2)) FROM (SELECT number n FROM numbers(10)); +SELECT max(abs(2) + 1), max(abs(2) + n), max(n - abs(2)), max(1 - abs(2)) FROM (SELECT number n FROM numbers(10)); +SELECT max(abs(2) * 2), max(abs(2) * n), max(n / abs(2)), max(1 / abs(2)) FROM (SELECT number n FROM numbers(10)); + +SELECT sum(abs(n) + 1), sum(abs(n) + n), sum(n - abs(n)), sum(1 - abs(n)) FROM (SELECT number n FROM numbers(10)); +SELECT sum(abs(n) * 2), sum(abs(n) * n), sum(n / abs(n)), sum(1 / abs(n)) FROM (SELECT number n FROM numbers(10)); +SELECT min(abs(n) + 1), min(abs(n) + n), min(n - abs(n)), min(1 - abs(n)) FROM (SELECT number n FROM numbers(10)); +SELECT min(abs(n) * 2), min(abs(n) * n), min(n / abs(n)), min(1 / abs(n)) FROM (SELECT number n FROM numbers(10)); +SELECT max(abs(n) + 1), max(abs(n) + n), max(n - abs(n)), max(1 - abs(n)) FROM (SELECT number n FROM numbers(10)); +SELECT max(abs(n) * 2), max(abs(n) * n), max(n / abs(n)), max(1 / abs(n)) FROM (SELECT number n FROM numbers(10)); + +SELECT sum(n*n + 1), sum(1 + n*n), sum(n*n - 1), sum(1 - n*n) FROM (SELECT number n FROM numbers(10)); +SELECT sum(n*n * 2), sum(2 * n*n), sum(n*n / 2), sum(1 / n*n) FROM (SELECT number n FROM numbers(10)); +SELECT min(n*n + 1), min(1 + n*n), min(n*n - 1), min(1 - n*n) FROM (SELECT number n FROM numbers(10)); +SELECT min(n*n * 2), min(2 * n*n), min(n*n / 2), min(1 / n*n) FROM (SELECT number n FROM numbers(10)); +SELECT max(n*n + 1), max(1 + n*n), max(n*n - 1), max(1 - n*n) FROM (SELECT number n FROM numbers(10)); +SELECT max(n*n * 2), max(2 * n*n), max(n*n / 2), max(1 / n*n) FROM (SELECT number n FROM numbers(10)); + +SELECT sum(1 + n + 1), sum(1 + 1 + n), sum(1 + n - 1), sum(1 + 1 - n) FROM (SELECT number n FROM numbers(10)); +SELECT sum(1 + n * 2), sum(1 + 2 * n), sum(1 + n / 2), sum(1 + 1 / n) FROM (SELECT number n FROM numbers(10)); +SELECT min(1 + n + 1), min(1 + 1 + n), min(1 + n - 1), min(1 + 1 - n) FROM (SELECT number n FROM numbers(10)); +SELECT min(1 + n * 2), min(1 + 2 * n), min(1 + n / 2), min(1 + 1 / n) FROM (SELECT number n FROM numbers(10)); +SELECT max(1 + n + 1), max(1 + 1 + n), max(1 + n - 1), max(1 + 1 - n) FROM (SELECT number n FROM numbers(10)); +SELECT max(1 + n * 2), max(1 + 2 * n), max(1 + n / 2), max(1 + 1 / n) FROM (SELECT number n FROM numbers(10)); + +SELECT sum(n + -1 + -1), sum(-1 + n + -1), sum(n - -1 + -1), sum(-1 - n + -1) FROM (SELECT number n FROM numbers(10)); +SELECT sum(n * -2 * -1), sum(-2 * n * -1), sum(n / -2 / -1), sum(-1 / n / -1) FROM (SELECT number n FROM numbers(10)); +SELECT min(n + -1 + -1), min(-1 + n + -1), min(n - -1 + -1), min(-1 - n + -1) FROM (SELECT number n FROM numbers(10)); +SELECT min(n * -2 * -1), min(-2 * n * -1), min(n / -2 / -1), min(-1 / n / -1) FROM (SELECT number n FROM numbers(10)); +SELECT max(n + -1 + -1), max(-1 + n + -1), max(n - -1 + -1), max(-1 - n + -1) FROM (SELECT number n FROM numbers(10)); +SELECT max(n * -2 * -1), max(-2 * n * -1), max(n / -2 / -1), max(-1 / n / -1) FROM (SELECT number n FROM numbers(10)); + +SELECT sum(n + 1) + sum(1 + n) + sum(n - 1) + sum(1 - n) FROM (SELECT number n FROM numbers(10)); +SELECT sum(n * 2) + sum(2 * n) + sum(n / 2) + sum(1 / n) FROM (SELECT number n FROM numbers(10)); +SELECT min(n + 1) + min(1 + n) + min(n - 1) + min(1 - n) FROM (SELECT number n FROM numbers(10)); +SELECT min(n * 2) + min(2 * n) + min(n / 2) + min(1 / n) FROM (SELECT number n FROM numbers(10)); +SELECT max(n + 1) + max(1 + n) + max(n - 1) + max(1 - n) FROM (SELECT number n FROM numbers(10)); +SELECT max(n * 2) + max(2 * n) + max(n / 2) + max(1 / n) FROM (SELECT number n FROM numbers(10)); + + +SELECT sum(number * -3) + min(2 * number * -3) - max(-1 * -2 * number * -3) FROM numbers(100); +SELECT max(log(2) * number) FROM numbers(100); +SELECT round(max(log(2) * 3 * sin(0.3) * number * 4)) FROM numbers(100); diff --git a/parser/testdata/01271_optimize_arithmetic_operations_in_aggr_func_with_alias/ast.json b/parser/testdata/01271_optimize_arithmetic_operations_in_aggr_func_with_alias/ast.json new file mode 100644 index 000000000..c76a91d0d --- /dev/null +++ b/parser/testdata/01271_optimize_arithmetic_operations_in_aggr_func_with_alias/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001625881, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01271_optimize_arithmetic_operations_in_aggr_func_with_alias/metadata.json b/parser/testdata/01271_optimize_arithmetic_operations_in_aggr_func_with_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01271_optimize_arithmetic_operations_in_aggr_func_with_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01271_optimize_arithmetic_operations_in_aggr_func_with_alias/query.sql b/parser/testdata/01271_optimize_arithmetic_operations_in_aggr_func_with_alias/query.sql new file mode 100644 index 000000000..7c27994ac --- /dev/null +++ b/parser/testdata/01271_optimize_arithmetic_operations_in_aggr_func_with_alias/query.sql @@ -0,0 +1,11 @@ +set optimize_arithmetic_operations_in_aggregate_functions = 1; +SET convert_query_to_cnf = 0; + +explain syntax select min((n as a) + (1 as b)) c from (select number n from numbers(10)) where a > 0 and b > 0 having c > 0; +select min((n as a) + (1 as b)) c from (select number n from numbers(10)) where a > 0 and b > 0 having c > 0; + +explain syntax select min((n + 1) as a) c from (select number n from numbers(10)) where a > 0 having c > 0; +select min((n + 1) as a) c from (select number n from numbers(10)) where a > 0 having c > 0; + +explain syntax select min(n + 1) as c from (select number n from numbers(10)) having c > 0; +select min(n + 1) c from (select number n from numbers(10)) having c > 0; diff --git a/parser/testdata/01271_show_privileges/ast.json b/parser/testdata/01271_show_privileges/ast.json new file mode 100644 index 000000000..835374137 --- /dev/null +++ b/parser/testdata/01271_show_privileges/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "ShowPrivilegesQuery" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001139246, + "rows_read": 1, + "bytes_read": 27 + } +} diff --git a/parser/testdata/01271_show_privileges/metadata.json b/parser/testdata/01271_show_privileges/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01271_show_privileges/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01271_show_privileges/query.sql b/parser/testdata/01271_show_privileges/query.sql new file mode 100644 index 000000000..e3210a7ae --- /dev/null +++ b/parser/testdata/01271_show_privileges/query.sql @@ -0,0 +1 @@ +SHOW PRIVILEGES; diff --git a/parser/testdata/01272_offset_without_limit/ast.json b/parser/testdata/01272_offset_without_limit/ast.json new file mode 100644 index 000000000..c4d9a9848 --- /dev/null +++ b/parser/testdata/01272_offset_without_limit/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery offset_without_limit (children 1)" + }, + { + "explain": " Identifier offset_without_limit" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001219041, + "rows_read": 2, + "bytes_read": 92 + } +} diff --git a/parser/testdata/01272_offset_without_limit/metadata.json b/parser/testdata/01272_offset_without_limit/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01272_offset_without_limit/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01272_offset_without_limit/query.sql b/parser/testdata/01272_offset_without_limit/query.sql new file mode 100644 index 000000000..769808b2e --- /dev/null +++ b/parser/testdata/01272_offset_without_limit/query.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS offset_without_limit; + +CREATE TABLE offset_without_limit ( + value UInt32 +) Engine = MergeTree() + PRIMARY KEY value + ORDER BY value; + +INSERT INTO offset_without_limit SELECT * FROM system.numbers LIMIT 50; + +SELECT value FROM offset_without_limit ORDER BY value OFFSET 5; + +DROP TABLE offset_without_limit; diff --git a/parser/testdata/01272_suspicious_codecs/ast.json b/parser/testdata/01272_suspicious_codecs/ast.json new file mode 100644 index 000000000..d395b35da --- /dev/null +++ b/parser/testdata/01272_suspicious_codecs/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery codecs (children 1)" + }, + { + "explain": " Identifier codecs" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001277686, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/01272_suspicious_codecs/metadata.json b/parser/testdata/01272_suspicious_codecs/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01272_suspicious_codecs/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01272_suspicious_codecs/query.sql b/parser/testdata/01272_suspicious_codecs/query.sql new file mode 100644 index 000000000..1c1d7b58d --- /dev/null +++ b/parser/testdata/01272_suspicious_codecs/query.sql @@ -0,0 +1,134 @@ +DROP TABLE IF EXISTS codecs; + +CREATE TABLE codecs +( + a UInt8 CODEC(LZ4), + b UInt16 CODEC(ZSTD), + c Float32 CODEC(Gorilla), + d UInt8 CODEC(Delta, LZ4), + e Float64 CODEC(Gorilla, ZSTD), + f UInt32 CODEC(Delta, Delta, T64), + g DateTime CODEC(DoubleDelta), + h DateTime64 CODEC(DoubleDelta, LZ4), + i String CODEC(NONE) +) ENGINE = MergeTree ORDER BY tuple(); + +DROP TABLE codecs; + +-- test what should not work + +CREATE TABLE codecs (a UInt8 CODEC(NONE, NONE)) ENGINE = MergeTree ORDER BY tuple(); -- { serverError BAD_ARGUMENTS } +CREATE TABLE codecs (a UInt8 CODEC(NONE, LZ4)) ENGINE = MergeTree ORDER BY tuple(); -- { serverError BAD_ARGUMENTS } +CREATE TABLE codecs (a UInt8 CODEC(LZ4, NONE)) ENGINE = MergeTree ORDER BY tuple(); -- { serverError BAD_ARGUMENTS } +CREATE TABLE codecs (a UInt8 CODEC(LZ4, LZ4)) ENGINE = MergeTree ORDER BY tuple(); -- { serverError BAD_ARGUMENTS } +CREATE TABLE codecs (a UInt8 CODEC(LZ4, ZSTD)) ENGINE = MergeTree ORDER BY tuple(); -- { serverError BAD_ARGUMENTS } +CREATE TABLE codecs (a UInt8 CODEC(Delta)) ENGINE = MergeTree ORDER BY tuple(); -- { serverError BAD_ARGUMENTS } +CREATE TABLE codecs (a UInt8 CODEC(Delta, Delta)) ENGINE = MergeTree ORDER BY tuple(); -- { serverError BAD_ARGUMENTS } +CREATE TABLE codecs (a UInt8 CODEC(LZ4, Delta)) ENGINE = MergeTree ORDER BY tuple(); -- { serverError BAD_ARGUMENTS } +CREATE TABLE codecs (a UInt8 CODEC(Gorilla)) ENGINE = MergeTree ORDER BY tuple(); -- { serverError BAD_ARGUMENTS } +CREATE TABLE codecs (a FixedString(2) CODEC(Gorilla)) ENGINE = MergeTree ORDER BY tuple(); -- { serverError BAD_ARGUMENTS } +CREATE TABLE codecs (a Decimal(15,5) CODEC(Gorilla)) ENGINE = MergeTree ORDER BY tuple(); -- { serverError BAD_ARGUMENTS } +CREATE TABLE codecs (a Float64 CODEC(Delta, Gorilla)) ENGINE = MergeTree ORDER BY tuple(); -- { serverError BAD_ARGUMENTS } +CREATE TABLE codecs (a Float32 CODEC(DoubleDelta, FPC)) ENGINE = MergeTree ORDER BY tuple(); -- { serverError BAD_ARGUMENTS } + +-- test that sanity check is not performed in ATTACH query + +DROP TABLE IF EXISTS codecs1; +DROP TABLE IF EXISTS codecs2; +DROP TABLE IF EXISTS codecs3; +DROP TABLE IF EXISTS codecs4; +DROP TABLE IF EXISTS codecs5; +DROP TABLE IF EXISTS codecs6; +DROP TABLE IF EXISTS codecs7; +DROP TABLE IF EXISTS codecs8; +DROP TABLE IF EXISTS codecs9; +DROP TABLE IF EXISTS codecs10; +DROP TABLE IF EXISTS codecs11; + +SET allow_suspicious_codecs = 1; + +CREATE TABLE codecs1 (a UInt8 CODEC(NONE, NONE)) ENGINE = MergeTree ORDER BY tuple(); +CREATE TABLE codecs2 (a UInt8 CODEC(NONE, LZ4)) ENGINE = MergeTree ORDER BY tuple(); +CREATE TABLE codecs3 (a UInt8 CODEC(LZ4, NONE)) ENGINE = MergeTree ORDER BY tuple(); +CREATE TABLE codecs4 (a UInt8 CODEC(LZ4, LZ4)) ENGINE = MergeTree ORDER BY tuple(); +CREATE TABLE codecs5 (a UInt8 CODEC(LZ4, ZSTD)) ENGINE = MergeTree ORDER BY tuple(); +CREATE TABLE codecs6 (a UInt8 CODEC(Delta)) ENGINE = MergeTree ORDER BY tuple(); +CREATE TABLE codecs7 (a UInt8 CODEC(Delta, Delta)) ENGINE = MergeTree ORDER BY tuple(); +CREATE TABLE codecs8 (a UInt8 CODEC(LZ4, Delta)) ENGINE = MergeTree ORDER BY tuple(); +CREATE TABLE codecs9 (a UInt8 CODEC(Gorilla)) ENGINE = MergeTree ORDER BY tuple(); +CREATE TABLE codecs10 (a FixedString(2) CODEC(Gorilla)) ENGINE = MergeTree ORDER BY tuple(); +CREATE TABLE codecs11 (a Decimal(15,5) CODEC(Gorilla)) ENGINE = MergeTree ORDER BY tuple(); + +SET allow_suspicious_codecs = 0; + +SHOW CREATE TABLE codecs1; +SHOW CREATE TABLE codecs2; +SHOW CREATE TABLE codecs3; +SHOW CREATE TABLE codecs4; +SHOW CREATE TABLE codecs5; +SHOW CREATE TABLE codecs6; +SHOW CREATE TABLE codecs7; +SHOW CREATE TABLE codecs8; +SHOW CREATE TABLE codecs9; +SHOW CREATE TABLE codecs10; +SHOW CREATE TABLE codecs11; + +DETACH TABLE codecs1; +DETACH TABLE codecs2; +DETACH TABLE codecs3; +DETACH TABLE codecs4; +DETACH TABLE codecs5; +DETACH TABLE codecs6; +DETACH TABLE codecs7; +DETACH TABLE codecs8; +DETACH TABLE codecs9; +DETACH TABLE codecs10; +DETACH TABLE codecs11; + +ATTACH TABLE codecs1; +ATTACH TABLE codecs2; +ATTACH TABLE codecs3; +ATTACH TABLE codecs4; +ATTACH TABLE codecs5; +ATTACH TABLE codecs6; +ATTACH TABLE codecs7; +ATTACH TABLE codecs8; +ATTACH TABLE codecs9; +ATTACH TABLE codecs10; +ATTACH TABLE codecs11; + +SHOW CREATE TABLE codecs1; +SHOW CREATE TABLE codecs2; +SHOW CREATE TABLE codecs3; +SHOW CREATE TABLE codecs4; +SHOW CREATE TABLE codecs5; +SHOW CREATE TABLE codecs6; +SHOW CREATE TABLE codecs7; +SHOW CREATE TABLE codecs8; +SHOW CREATE TABLE codecs9; +SHOW CREATE TABLE codecs10; +SHOW CREATE TABLE codecs11; + +SELECT * FROM codecs1; +SELECT * FROM codecs2; +SELECT * FROM codecs3; +SELECT * FROM codecs4; +SELECT * FROM codecs5; +SELECT * FROM codecs6; +SELECT * FROM codecs7; +SELECT * FROM codecs8; +SELECT * FROM codecs9; +SELECT * FROM codecs10; +SELECT * FROM codecs11; + +DROP TABLE codecs1; +DROP TABLE codecs2; +DROP TABLE codecs3; +DROP TABLE codecs4; +DROP TABLE codecs5; +DROP TABLE codecs6; +DROP TABLE codecs7; +DROP TABLE codecs8; +DROP TABLE codecs9; +DROP TABLE codecs10; +DROP TABLE codecs11; diff --git a/parser/testdata/01272_totals_and_filter_bug/ast.json b/parser/testdata/01272_totals_and_filter_bug/ast.json new file mode 100644 index 000000000..349f969ec --- /dev/null +++ b/parser/testdata/01272_totals_and_filter_bug/ast.json @@ -0,0 +1,109 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function count (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function greater (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Set" + } + ], + + "rows": 29, + + "statistics": + { + "elapsed": 0.001576097, + "rows_read": 29, + "bytes_read": 1159 + } +} diff --git a/parser/testdata/01272_totals_and_filter_bug/metadata.json b/parser/testdata/01272_totals_and_filter_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01272_totals_and_filter_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01272_totals_and_filter_bug/query.sql b/parser/testdata/01272_totals_and_filter_bug/query.sql new file mode 100644 index 000000000..11992ab05 --- /dev/null +++ b/parser/testdata/01272_totals_and_filter_bug/query.sql @@ -0,0 +1,37 @@ +select * from (select number, count() from numbers(2) group by number with totals) where number > 0 settings enable_optimize_predicate_expression=0; + +select '-'; + +CREATE TABLE foo (server_date Date, dimension_1 String, metric_1 UInt32) ENGINE = MergeTree() PARTITION BY toYYYYMM(server_date) ORDER BY (server_date); +CREATE TABLE bar (server_date Date, dimension_1 String, metric_2 UInt32) ENGINE = MergeTree() PARTITION BY toYYYYMM(server_date) ORDER BY (server_date); + +INSERT INTO foo VALUES ('2020-01-01', 'test1', 10), ('2020-01-01', 'test2', 20); +INSERT INTO bar VALUES ('2020-01-01', 'test2', 30), ('2020-01-01', 'test3', 40); + +SELECT + dimension_1, + sum_metric_1, + sum_metric_2 +FROM +( + SELECT + dimension_1, + sum(metric_1) AS sum_metric_1 + FROM foo + GROUP BY dimension_1 + WITH TOTALS +) AS subquery_1 +ALL FULL OUTER JOIN +( + SELECT + dimension_1, + sum(metric_2) AS sum_metric_2 + FROM bar + GROUP BY dimension_1 + WITH TOTALS +) AS subquery_2 USING (dimension_1) +WHERE sum_metric_2 < 20 +ORDER BY dimension_1 ASC; + +DROP TABLE foo; +DROP TABLE bar; diff --git a/parser/testdata/01273_extractGroups/ast.json b/parser/testdata/01273_extractGroups/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01273_extractGroups/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01273_extractGroups/metadata.json b/parser/testdata/01273_extractGroups/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01273_extractGroups/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01273_extractGroups/query.sql b/parser/testdata/01273_extractGroups/query.sql new file mode 100644 index 000000000..f060b1d42 --- /dev/null +++ b/parser/testdata/01273_extractGroups/query.sql @@ -0,0 +1,51 @@ +-- error cases +SELECT extractGroups(); --{serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} not enough arguments +SELECT extractGroups('hello'); --{serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} not enough arguments +SELECT extractGroups('hello', 123); --{serverError ILLEGAL_TYPE_OF_ARGUMENT} invalid argument type +SELECT extractGroups(123, 'world'); --{serverError ILLEGAL_TYPE_OF_ARGUMENT} invalid argument type +SELECT extractGroups('hello world', '((('); --{serverError CANNOT_COMPILE_REGEXP} invalid re +SELECT extractGroups('hello world', materialize('\\w+')); --{serverError ILLEGAL_COLUMN} non-const needle + +SELECT '0 groups, zero matches'; +SELECT extractGroups('hello world', '\\w+'); -- { serverError BAD_ARGUMENTS } + +SELECT '1 group, multiple matches, String and FixedString'; +SELECT extractGroups('hello world', '(\\w+) (\\w+)'); +SELECT extractGroups('hello world', CAST('(\\w+) (\\w+)' as FixedString(11))); +SELECT extractGroups(CAST('hello world' AS FixedString(12)), '(\\w+) (\\w+)'); +SELECT extractGroups(CAST('hello world' AS FixedString(12)), CAST('(\\w+) (\\w+)' as FixedString(11))); +SELECT extractGroups(materialize(CAST('hello world' AS FixedString(12))), '(\\w+) (\\w+)'); +SELECT extractGroups(materialize(CAST('hello world' AS FixedString(12))), CAST('(\\w+) (\\w+)' as FixedString(11))); + +SELECT 'multiple matches'; +SELECT extractGroups('abc=111, def=222, ghi=333 "jkl mno"="444 foo bar"', '("[^"]+"|\\w+)=("[^"]+"|\\w+)'); + +SELECT 'big match'; +SELECT + length(haystack), length(matches), arrayMap((x) -> length(x), matches) +FROM ( + SELECT + repeat('abcdefghijklmnopqrstuvwxyz', number * 10) AS haystack, + extractGroups(haystack, '(abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz)') AS matches + FROM numbers(3) +); + +SELECT 'lots of matches'; +SELECT + length(haystack), length(matches), arrayReduce('sum', arrayMap((x) -> length(x), matches)) +FROM ( + SELECT + repeat('abcdefghijklmnopqrstuvwxyz', number * 10) AS haystack, + extractGroups(haystack, '(\\w)') AS matches + FROM numbers(3) +); + +SELECT 'lots of groups'; +SELECT + length(haystack), length(matches), arrayMap((x) -> length(x), matches) +FROM ( + SELECT + repeat('abcdefghijklmnopqrstuvwxyz', number * 10) AS haystack, + extractGroups(haystack, repeat('(\\w)', 100)) AS matches + FROM numbers(3) +); diff --git a/parser/testdata/01273_h3EdgeAngle_range_check/ast.json b/parser/testdata/01273_h3EdgeAngle_range_check/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01273_h3EdgeAngle_range_check/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01273_h3EdgeAngle_range_check/metadata.json b/parser/testdata/01273_h3EdgeAngle_range_check/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01273_h3EdgeAngle_range_check/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01273_h3EdgeAngle_range_check/query.sql b/parser/testdata/01273_h3EdgeAngle_range_check/query.sql new file mode 100644 index 000000000..2c5e27f6c --- /dev/null +++ b/parser/testdata/01273_h3EdgeAngle_range_check/query.sql @@ -0,0 +1,3 @@ +-- Tags: no-fasttest + +SELECT h3EdgeAngle(100); -- { serverError ARGUMENT_OUT_OF_BOUND } diff --git a/parser/testdata/01273_lc_fixed_string_field/ast.json b/parser/testdata/01273_lc_fixed_string_field/ast.json new file mode 100644 index 000000000..845d55aa0 --- /dev/null +++ b/parser/testdata/01273_lc_fixed_string_field/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001260331, + "rows_read": 2, + "bytes_read": 55 + } +} diff --git a/parser/testdata/01273_lc_fixed_string_field/metadata.json b/parser/testdata/01273_lc_fixed_string_field/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01273_lc_fixed_string_field/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01273_lc_fixed_string_field/query.sql b/parser/testdata/01273_lc_fixed_string_field/query.sql new file mode 100644 index 000000000..11f93e918 --- /dev/null +++ b/parser/testdata/01273_lc_fixed_string_field/query.sql @@ -0,0 +1,17 @@ +CREATE TABLE t +( + `d` Date, + `s` LowCardinality(FixedString(3)), + `c` UInt32 +) +ENGINE = SummingMergeTree() +PARTITION BY d +ORDER BY (d, s); + +INSERT INTO t (d, s, c) VALUES ('2020-01-01', 'ABC', 1); +INSERT INTO t (d, s, c) VALUES ('2020-01-01', 'ABC', 2); + +OPTIMIZE TABLE t; +SELECT * FROM t; + +DROP TABLE t; diff --git a/parser/testdata/01274_alter_rename_column_distributed/ast.json b/parser/testdata/01274_alter_rename_column_distributed/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01274_alter_rename_column_distributed/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01274_alter_rename_column_distributed/metadata.json b/parser/testdata/01274_alter_rename_column_distributed/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01274_alter_rename_column_distributed/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01274_alter_rename_column_distributed/query.sql b/parser/testdata/01274_alter_rename_column_distributed/query.sql new file mode 100644 index 000000000..a1e9998af --- /dev/null +++ b/parser/testdata/01274_alter_rename_column_distributed/query.sql @@ -0,0 +1,21 @@ +-- Tags: distributed + +set distributed_foreground_insert = 1; + +DROP TABLE IF EXISTS visits; +DROP TABLE IF EXISTS visits_dist; + +CREATE TABLE visits(StartDate Date, Name String) ENGINE MergeTree ORDER BY(StartDate); +CREATE TABLE visits_dist AS visits ENGINE Distributed(test_cluster_two_shards_localhost, currentDatabase(), 'visits', rand()); + +INSERT INTO visits_dist (StartDate, Name) VALUES ('2020-01-01', 'hello'); +INSERT INTO visits_dist (StartDate, Name) VALUES ('2020-01-02', 'hello2'); + +ALTER TABLE visits RENAME COLUMN Name TO Name2; +ALTER TABLE visits_dist RENAME COLUMN Name TO Name2; + +SELECT * FROM visits_dist ORDER BY StartDate, Name2; + +DROP TABLE visits; +DROP TABLE visits_dist; + diff --git a/parser/testdata/01275_alter_rename_column_default_expr/ast.json b/parser/testdata/01275_alter_rename_column_default_expr/ast.json new file mode 100644 index 000000000..130290bc9 --- /dev/null +++ b/parser/testdata/01275_alter_rename_column_default_expr/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery table_for_rename (children 1)" + }, + { + "explain": " Identifier table_for_rename" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001131772, + "rows_read": 2, + "bytes_read": 84 + } +} diff --git a/parser/testdata/01275_alter_rename_column_default_expr/metadata.json b/parser/testdata/01275_alter_rename_column_default_expr/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01275_alter_rename_column_default_expr/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01275_alter_rename_column_default_expr/query.sql b/parser/testdata/01275_alter_rename_column_default_expr/query.sql new file mode 100644 index 000000000..21106d200 --- /dev/null +++ b/parser/testdata/01275_alter_rename_column_default_expr/query.sql @@ -0,0 +1,34 @@ +DROP TABLE IF EXISTS table_for_rename; + +CREATE TABLE table_for_rename +( + date Date, + key UInt64, + value1 String, + value2 String, + value3 String DEFAULT concat(value1, ' + ', value2) +) +ENGINE = MergeTree() +PARTITION BY date +ORDER BY key; + +INSERT INTO table_for_rename (date, key, value1, value2) SELECT toDate('2019-10-01') + number % 3, number, toString(number), toString(number + 1) from numbers(9); +SELECT * FROM table_for_rename ORDER BY key; + +ALTER TABLE table_for_rename RENAME COLUMN value1 TO value4; +ALTER TABLE table_for_rename RENAME COLUMN value2 TO value5; +SHOW CREATE TABLE table_for_rename; +SELECT * FROM table_for_rename ORDER BY key; + +INSERT INTO table_for_rename (date, key, value4, value5) SELECT toDate('2019-10-01') + number % 3, number, toString(number), toString(number + 1) from numbers(10, 10); +SELECT * FROM table_for_rename ORDER BY key; + +ALTER TABLE table_for_rename RENAME COLUMN value4 TO value1; +ALTER TABLE table_for_rename RENAME COLUMN value5 TO value2; +SHOW CREATE TABLE table_for_rename; +SELECT * FROM table_for_rename ORDER BY key; + +INSERT INTO table_for_rename (date, key, value1, value2) SELECT toDate('2019-10-01') + number % 3, number, toString(number), toString(number + 1) from numbers(20,10); +SELECT * FROM table_for_rename ORDER BY key; + +DROP TABLE IF EXISTS table_for_rename; diff --git a/parser/testdata/01275_extract_groups_check/ast.json b/parser/testdata/01275_extract_groups_check/ast.json new file mode 100644 index 000000000..9e7302b15 --- /dev/null +++ b/parser/testdata/01275_extract_groups_check/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function extractGroups (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'hello'" + }, + { + "explain": " Literal ''" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001032404, + "rows_read": 8, + "bytes_read": 289 + } +} diff --git a/parser/testdata/01275_extract_groups_check/metadata.json b/parser/testdata/01275_extract_groups_check/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01275_extract_groups_check/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01275_extract_groups_check/query.sql b/parser/testdata/01275_extract_groups_check/query.sql new file mode 100644 index 000000000..b1fe1a136 --- /dev/null +++ b/parser/testdata/01275_extract_groups_check/query.sql @@ -0,0 +1,14 @@ +SELECT extractGroups('hello', ''); -- { serverError BAD_ARGUMENTS } +SELECT extractAllGroups('hello', ''); -- { serverError BAD_ARGUMENTS } + +SELECT extractGroups('hello', ' '); -- { serverError BAD_ARGUMENTS } +SELECT extractAllGroups('hello', ' '); -- { serverError BAD_ARGUMENTS } + +SELECT extractGroups('hello', '\0'); -- { serverError BAD_ARGUMENTS } +SELECT extractAllGroups('hello', '\0'); -- { serverError BAD_ARGUMENTS } + +SELECT extractGroups('hello', 'world'); -- { serverError BAD_ARGUMENTS } +SELECT extractAllGroups('hello', 'world'); -- { serverError BAD_ARGUMENTS } + +SELECT extractGroups('hello', 'hello|world'); -- { serverError BAD_ARGUMENTS } +SELECT extractAllGroups('hello', 'hello|world'); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/01276_alter_rename_column_materialized_expr/ast.json b/parser/testdata/01276_alter_rename_column_materialized_expr/ast.json new file mode 100644 index 000000000..92ccc13a9 --- /dev/null +++ b/parser/testdata/01276_alter_rename_column_materialized_expr/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery table_for_rename (children 1)" + }, + { + "explain": " Identifier table_for_rename" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001297023, + "rows_read": 2, + "bytes_read": 84 + } +} diff --git a/parser/testdata/01276_alter_rename_column_materialized_expr/metadata.json b/parser/testdata/01276_alter_rename_column_materialized_expr/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01276_alter_rename_column_materialized_expr/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01276_alter_rename_column_materialized_expr/query.sql b/parser/testdata/01276_alter_rename_column_materialized_expr/query.sql new file mode 100644 index 000000000..9089c52ed --- /dev/null +++ b/parser/testdata/01276_alter_rename_column_materialized_expr/query.sql @@ -0,0 +1,37 @@ +DROP TABLE IF EXISTS table_for_rename; + +CREATE TABLE table_for_rename +( + date Date, + key UInt64, + value1 String, + value2 String, + value3 String MATERIALIZED concat(value1, ' + ', value2) +) +ENGINE = MergeTree() +PARTITION BY date +ORDER BY key; + +INSERT INTO table_for_rename (date, key, value1, value2) SELECT toDate('2019-10-01') + number % 3, number, toString(number), toString(number + 1) from numbers(9); +SELECT * FROM table_for_rename ORDER BY key; + +ALTER TABLE table_for_rename RENAME COLUMN value1 TO value4; +ALTER TABLE table_for_rename RENAME COLUMN value2 TO value5; +SHOW CREATE TABLE table_for_rename; +SELECT * FROM table_for_rename ORDER BY key; + +SELECT '-- insert after rename --'; +INSERT INTO table_for_rename (date, key, value4, value5) SELECT toDate('2019-10-01') + number % 3, number, toString(number), toString(number + 1) from numbers(10, 10); +SELECT * FROM table_for_rename ORDER BY key; + +SELECT '-- rename columns back --'; +ALTER TABLE table_for_rename RENAME COLUMN value4 TO value1; +ALTER TABLE table_for_rename RENAME COLUMN value5 TO value2; +SHOW CREATE TABLE table_for_rename; +SELECT * FROM table_for_rename ORDER BY key; + +SELECT '-- insert after rename column --'; +INSERT INTO table_for_rename (date, key, value1, value2) SELECT toDate('2019-10-01') + number % 3, number, toString(number), toString(number + 1) from numbers(20,10); +SELECT * FROM table_for_rename ORDER BY key; + +DROP TABLE IF EXISTS table_for_rename; diff --git a/parser/testdata/01276_random_string/ast.json b/parser/testdata/01276_random_string/ast.json new file mode 100644 index 000000000..0e022ce92 --- /dev/null +++ b/parser/testdata/01276_random_string/ast.json @@ -0,0 +1,181 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function greater (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier c" + }, + { + "explain": " Literal UInt64_30000" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function arrayJoin (alias byte) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayMap (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function lambda (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function reinterpretAsUInt8 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function substring (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function randomString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_100" + }, + { + "explain": " Function plus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function range (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_100" + }, + { + "explain": " Function count (alias c) (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_100000" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier byte" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier byte" + } + ], + + "rows": 53, + + "statistics": + { + "elapsed": 0.001642674, + "rows_read": 53, + "bytes_read": 2478 + } +} diff --git a/parser/testdata/01276_random_string/metadata.json b/parser/testdata/01276_random_string/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01276_random_string/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01276_random_string/query.sql b/parser/testdata/01276_random_string/query.sql new file mode 100644 index 000000000..5a86d88b2 --- /dev/null +++ b/parser/testdata/01276_random_string/query.sql @@ -0,0 +1 @@ +SELECT DISTINCT c > 30000 FROM (SELECT arrayJoin(arrayMap(x -> reinterpretAsUInt8(substring(randomString(100), x + 1, 1)), range(100))) AS byte, count() AS c FROM numbers(100000) GROUP BY byte ORDER BY byte); diff --git a/parser/testdata/01276_system_licenses/ast.json b/parser/testdata/01276_system_licenses/ast.json new file mode 100644 index 000000000..3c08278fb --- /dev/null +++ b/parser/testdata/01276_system_licenses/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function greater (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function count (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.licenses" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001386233, + "rows_read": 13, + "bytes_read": 511 + } +} diff --git a/parser/testdata/01276_system_licenses/metadata.json b/parser/testdata/01276_system_licenses/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01276_system_licenses/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01276_system_licenses/query.sql b/parser/testdata/01276_system_licenses/query.sql new file mode 100644 index 000000000..b6db61a20 --- /dev/null +++ b/parser/testdata/01276_system_licenses/query.sql @@ -0,0 +1,2 @@ +SELECT count() > 10 FROM system.licenses; +SELECT library_name, license_type, license_path FROM system.licenses WHERE library_name = 'abseil-cpp'; diff --git a/parser/testdata/01277_alter_rename_column_constraint/ast.json b/parser/testdata/01277_alter_rename_column_constraint/ast.json new file mode 100644 index 000000000..5222134ed --- /dev/null +++ b/parser/testdata/01277_alter_rename_column_constraint/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery table_for_rename (children 1)" + }, + { + "explain": " Identifier table_for_rename" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001124362, + "rows_read": 2, + "bytes_read": 84 + } +} diff --git a/parser/testdata/01277_alter_rename_column_constraint/metadata.json b/parser/testdata/01277_alter_rename_column_constraint/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01277_alter_rename_column_constraint/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01277_alter_rename_column_constraint/query.sql b/parser/testdata/01277_alter_rename_column_constraint/query.sql new file mode 100644 index 000000000..76c1a3589 --- /dev/null +++ b/parser/testdata/01277_alter_rename_column_constraint/query.sql @@ -0,0 +1,43 @@ +DROP TABLE IF EXISTS table_for_rename; + +CREATE TABLE table_for_rename +( + date Date, + key UInt64, + value1 String, + value2 String, + value3 String, + CONSTRAINT cs_value1 CHECK toInt64(value1) < toInt64(value2), + CONSTRAINT cs_value2 CHECK toInt64(value2) < toInt64(value3) +) +ENGINE = MergeTree() +PARTITION BY date +ORDER BY key; + +INSERT INTO table_for_rename SELECT toDate('2019-10-01') + number % 3, number, toString(number), toString(number + 1), toString(number + 2) from numbers(9); +INSERT INTO table_for_rename SELECT toDate('2019-10-01') + number % 3, number, toString(number), toString(number + 1), toString(number) from numbers(9); --{serverError VIOLATED_CONSTRAINT} + +SELECT * FROM table_for_rename ORDER BY key; + +ALTER TABLE table_for_rename RENAME COLUMN value1 TO value4; +ALTER TABLE table_for_rename RENAME COLUMN value2 TO value5; +SHOW CREATE TABLE table_for_rename; +SELECT * FROM table_for_rename ORDER BY key; + +SELECT '-- insert after rename --'; +INSERT INTO table_for_rename SELECT toDate('2019-10-01') + number % 3, number, toString(number), toString(number + 1), toString(number + 2) from numbers(10, 10); +INSERT INTO table_for_rename SELECT toDate('2019-10-01') + number % 3, number, toString(number), toString(number + 1), toString(number) from numbers(10, 10); --{serverError VIOLATED_CONSTRAINT} +SELECT * FROM table_for_rename ORDER BY key; + +SELECT '-- rename columns back --'; +ALTER TABLE table_for_rename RENAME COLUMN value4 TO value1; +ALTER TABLE table_for_rename RENAME COLUMN value5 TO value2; +SHOW CREATE TABLE table_for_rename; +SELECT * FROM table_for_rename ORDER BY key; + +SELECT '-- insert after rename column --'; +INSERT INTO table_for_rename SELECT toDate('2019-10-01') + number % 3, number, toString(number), toString(number + 1), toString(number + 2) from numbers(20,10); +INSERT INTO table_for_rename SELECT toDate('2019-10-01') + number % 3, number, toString(number), toString(number), toString(number + 2) from numbers(20, 10); --{serverError VIOLATED_CONSTRAINT} +SELECT * FROM table_for_rename ORDER BY key; + +DROP TABLE IF EXISTS table_for_rename; diff --git a/parser/testdata/01277_alter_rename_column_constraint_zookeeper_long/ast.json b/parser/testdata/01277_alter_rename_column_constraint_zookeeper_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01277_alter_rename_column_constraint_zookeeper_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01277_alter_rename_column_constraint_zookeeper_long/metadata.json b/parser/testdata/01277_alter_rename_column_constraint_zookeeper_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01277_alter_rename_column_constraint_zookeeper_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01277_alter_rename_column_constraint_zookeeper_long/query.sql b/parser/testdata/01277_alter_rename_column_constraint_zookeeper_long/query.sql new file mode 100644 index 000000000..ce8d87f9a --- /dev/null +++ b/parser/testdata/01277_alter_rename_column_constraint_zookeeper_long/query.sql @@ -0,0 +1,45 @@ +-- Tags: long, zookeeper + +DROP TABLE IF EXISTS table_for_rename1; + +CREATE TABLE table_for_rename1 +( + date Date, + key UInt64, + value1 String, + value2 String, + value3 String, + CONSTRAINT cs_value1 CHECK toInt64(value1) < toInt64(value2), + CONSTRAINT cs_value2 CHECK toInt64(value2) < toInt64(value3) +) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_01277/test_for_rename', '1') +PARTITION BY date +ORDER BY key; + +INSERT INTO table_for_rename1 SELECT toDate('2019-10-01') + number % 3, number, toString(number), toString(number + 1), toString(number + 2) from numbers(9); +INSERT INTO table_for_rename1 SELECT toDate('2019-10-01') + number % 3, number, toString(number), toString(number + 1), toString(number) from numbers(9); ; --{serverError VIOLATED_CONSTRAINT} + +SELECT * FROM table_for_rename1 ORDER BY key; + +ALTER TABLE table_for_rename1 RENAME COLUMN value1 TO value4; +ALTER TABLE table_for_rename1 RENAME COLUMN value2 TO value5; +SHOW CREATE TABLE table_for_rename1; +SELECT * FROM table_for_rename1 ORDER BY key; + +SELECT '-- insert after rename --'; +INSERT INTO table_for_rename1 SELECT toDate('2019-10-01') + number % 3, number, toString(number), toString(number + 1), toString(number + 2) from numbers(10, 10); +INSERT INTO table_for_rename1 SELECT toDate('2019-10-01') + number % 3, number, toString(number), toString(number + 1), toString(number) from numbers(10, 10); ; --{serverError VIOLATED_CONSTRAINT} +SELECT * FROM table_for_rename1 ORDER BY key; + +SELECT '-- rename columns back --'; +ALTER TABLE table_for_rename1 RENAME COLUMN value4 TO value1; +ALTER TABLE table_for_rename1 RENAME COLUMN value5 TO value2; +SHOW CREATE TABLE table_for_rename1; +SELECT * FROM table_for_rename1 ORDER BY key; + +SELECT '-- insert after rename column --'; +INSERT INTO table_for_rename1 SELECT toDate('2019-10-01') + number % 3, number, toString(number), toString(number + 1), toString(number + 2) from numbers(20,10); +INSERT INTO table_for_rename1 SELECT toDate('2019-10-01') + number % 3, number, toString(number), toString(number), toString(number + 2) from numbers(20, 10); ; --{serverError VIOLATED_CONSTRAINT} +SELECT * FROM table_for_rename1 ORDER BY key; + +DROP TABLE IF EXISTS table_for_rename1; diff --git a/parser/testdata/01277_buffer_column_order/ast.json b/parser/testdata/01277_buffer_column_order/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01277_buffer_column_order/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01277_buffer_column_order/metadata.json b/parser/testdata/01277_buffer_column_order/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01277_buffer_column_order/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01277_buffer_column_order/query.sql b/parser/testdata/01277_buffer_column_order/query.sql new file mode 100644 index 000000000..a7dbccc44 --- /dev/null +++ b/parser/testdata/01277_buffer_column_order/query.sql @@ -0,0 +1,60 @@ +-- Check for Block::sortColumns(), can be done using Buffer. + +drop table if exists out_01277; +drop table if exists in_01277; +drop table if exists buffer_01277; +drop table if exists mv_01277_1; +drop table if exists mv_01277_2; + +create table out_01277 +( + k1 Int, + k2 Int, + a1 Int, + a2 Int, + b1 Int, + b2 Int, + c Int +) Engine=Null(); + +create table buffer_01277 as out_01277 Engine=Buffer(currentDatabase(), out_01277, 1, + 86400, 86400, + 1e5, 1e6, + 10e6, 100e6); +create table in_01277 as out_01277 Engine=Null(); + +-- differs in order of fields in SELECT clause +create materialized view mv_01277_1 to buffer_01277 as select k1, k2, a1, a2, b1, b2, c from in_01277; +create materialized view mv_01277_2 to buffer_01277 as select a1, a2, k1, k2, b1, b2, c from in_01277; + +-- column order is ignored, just for humans +insert into mv_01277_1 select + number k1, + number k2, + number a1, + number a2, + number b1, + number b2, + number c +from numbers(1); + +-- with wrong order in Block::sortColumns() triggers: +-- +-- Code: 171. DB::Exception: Received from localhost:9000. DB::Exception: Block structure mismatch in Buffer stream: different names of columns: +-- c Int32 Int32(size = 1), b2 Int32 Int32(size = 1), a2 Int32 Int32(size = 1), a1 Int32 Int32(size = 1), k2 Int32 Int32(size = 1), b1 Int32 Int32(size = 1), k1 Int32 Int32(size = 1) +-- c Int32 Int32(size = 1), b2 Int32 Int32(size = 1), k2 Int32 Int32(size = 1), a1 Int32 Int32(size = 1), b1 Int32 Int32(size = 1), k1 Int32 Int32(size = 1), a2 Int32 Int32(size = 1). +insert into mv_01277_2 select + number a1, + number a2, + number k1, + number k2, + number b1, + number b2, + number c +from numbers(1); + +drop table mv_01277_1; +drop table mv_01277_2; +drop table buffer_01277; +drop table out_01277; +drop table in_01277; diff --git a/parser/testdata/01277_convert_field_to_type_logical_error/ast.json b/parser/testdata/01277_convert_field_to_type_logical_error/ast.json new file mode 100644 index 000000000..4cf58b072 --- /dev/null +++ b/parser/testdata/01277_convert_field_to_type_logical_error/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal Int64_-2487" + }, + { + "explain": " Function globalNullIn (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toIntervalMinute (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Int64_-88074" + }, + { + "explain": " Literal 'qEkek..'" + }, + { + "explain": " Literal Array_[Float64_-27.537293]" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.0011708, + "rows_read": 12, + "bytes_read": 473 + } +} diff --git a/parser/testdata/01277_convert_field_to_type_logical_error/metadata.json b/parser/testdata/01277_convert_field_to_type_logical_error/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01277_convert_field_to_type_logical_error/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01277_convert_field_to_type_logical_error/query.sql b/parser/testdata/01277_convert_field_to_type_logical_error/query.sql new file mode 100644 index 000000000..f4443135b --- /dev/null +++ b/parser/testdata/01277_convert_field_to_type_logical_error/query.sql @@ -0,0 +1 @@ +SELECT -2487, globalNullIn(toIntervalMinute(-88074), 'qEkek..'), [-27.537293]; -- { serverError TYPE_MISMATCH } diff --git a/parser/testdata/01277_fromUnixTimestamp64/ast.json b/parser/testdata/01277_fromUnixTimestamp64/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01277_fromUnixTimestamp64/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01277_fromUnixTimestamp64/metadata.json b/parser/testdata/01277_fromUnixTimestamp64/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01277_fromUnixTimestamp64/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01277_fromUnixTimestamp64/query.sql b/parser/testdata/01277_fromUnixTimestamp64/query.sql new file mode 100644 index 000000000..62655d1cc --- /dev/null +++ b/parser/testdata/01277_fromUnixTimestamp64/query.sql @@ -0,0 +1,81 @@ +-- -- Error cases +SELECT fromUnixTimestamp64Second(); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +SELECT fromUnixTimestamp64Milli(); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +SELECT fromUnixTimestamp64Micro(); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +SELECT fromUnixTimestamp64Nano(); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} + +SELECT fromUnixTimestamp64Second('abc'); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT fromUnixTimestamp64Milli('abc'); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT fromUnixTimestamp64Micro('abc'); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT fromUnixTimestamp64Nano('abc'); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} + +SELECT fromUnixTimestamp64Second('abc', 123); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT fromUnixTimestamp64Milli('abc', 123); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT fromUnixTimestamp64Micro('abc', 123); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT fromUnixTimestamp64Nano('abc', 123); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} + +SELECT 'const column'; +WITH + CAST(1234567891011 AS Int64) AS i64, + 'UTC' AS tz +SELECT + tz, + i64, + fromUnixTimestamp64Second(i64, tz), + fromUnixTimestamp64Milli(i64, tz), + fromUnixTimestamp64Micro(i64, tz), + fromUnixTimestamp64Nano(i64, tz) as dt64, + toTypeName(dt64); + +WITH + CAST(1234567891011 AS Int64) AS i64, + 'Asia/Makassar' AS tz +SELECT + tz, + i64, + fromUnixTimestamp64Second(i64, tz), + fromUnixTimestamp64Milli(i64, tz), + fromUnixTimestamp64Micro(i64, tz), + fromUnixTimestamp64Nano(i64, tz) as dt64, + toTypeName(dt64); + +SELECT 'non-const column'; +WITH + CAST(1234567891011 AS Int64) AS i64, + 'UTC' AS tz +SELECT + i64, + fromUnixTimestamp64Second(i64, tz), + fromUnixTimestamp64Milli(i64, tz), + fromUnixTimestamp64Micro(i64, tz), + fromUnixTimestamp64Nano(i64, tz) as dt64; + +SELECT 'upper range bound'; +WITH + 10413688942 AS timestamp, + CAST(10413688942 AS Int64) AS second, + CAST(10413688942123 AS Int64) AS milli, + CAST(10413688942123456 AS Int64) AS micro, + CAST(10413688942123456789 AS Int64) AS nano, + 'UTC' AS tz +SELECT + timestamp, + fromUnixTimestamp64Second(second, tz), + fromUnixTimestamp64Milli(milli, tz), + fromUnixTimestamp64Micro(micro, tz), + fromUnixTimestamp64Nano(nano, tz); + +SELECT 'lower range bound'; +WITH + -2208985199 AS timestamp, + CAST(-2208985199 AS Int64) AS second, + CAST(-2208985199123 AS Int64) AS milli, + CAST(-2208985199123456 AS Int64) AS micro, + CAST(-2208985199123456789 AS Int64) AS nano, + 'UTC' AS tz +SELECT + timestamp, + fromUnixTimestamp64Second(milli, tz), + fromUnixTimestamp64Milli(milli, tz), + fromUnixTimestamp64Micro(micro, tz), + fromUnixTimestamp64Nano(nano, tz); diff --git a/parser/testdata/01277_large_tuples/ast.json b/parser/testdata/01277_large_tuples/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01277_large_tuples/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01277_large_tuples/metadata.json b/parser/testdata/01277_large_tuples/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01277_large_tuples/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01277_large_tuples/query.sql b/parser/testdata/01277_large_tuples/query.sql new file mode 100644 index 000000000..74fd05749 --- /dev/null +++ b/parser/testdata/01277_large_tuples/query.sql @@ -0,0 +1,19 @@ +WITH + ( + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, + 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, + 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 294, 295, 296, 297, 298, 299, 300, + 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 378, 379, 380, 381, 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, 392, 393, 394, 395, 396, 397, 398, 399, 400, + 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, 411, 412, 413, 414, 415, 416, 417, 418, 419, 420, 421, 422, 423, 424, 425, 426, 427, 428, 429, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459, 460, 461, 462, 463, 464, 465, 466, 467, 468, 469, 470, 471, 472, 473, 474, 475, 476, 477, 478, 479, 480, 481, 482, 483, 484, 485, 486, 487, 488, 489, 490, 491, 492, 493, 494, 495, 496, 497, 498, 499, 500, + 501, 502, 503, 504, 505, 506, 507, 508, 509, 510, 511, 512, 513, 514, 515, 516, 517, 518, 519, 520, 521, 522, 523, 524, 525, 526, 527, 528, 529, 530, 531, 532, 533, 534, 535, 536, 537, 538, 539, 540, 541, 542, 543, 544, 545, 546, 547, 548, 549, 550, 551, 552, 553, 554, 555, 556, 557, 558, 559, 560, 561, 562, 563, 564, 565, 566, 567, 568, 569, 570, 571, 572, 573, 574, 575, 576, 577, 578, 579, 580, 581, 582, 583, 584, 585, 586, 587, 588, 589, 590, 591, 592, 593, 594, 595, 596, 597, 598, 599, 600, + 601, 602, 603, 604, 605, 606, 607, 608, 609, 610, 611, 612, 613, 614, 615, 616, 617, 618, 619, 620, 621, 622, 623, 624, 625, 626, 627, 628, 629, 630, 631, 632, 633, 634, 635, 636, 637, 638, 639, 640, 641, 642, 643, 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, 654, 655, 656, 657, 658, 659, 660, 661, 662, 663, 664, 665, 666, 667, 668, 669, 670, 671, 672, 673, 674, 675, 676, 677, 678, 679, 680, 681, 682, 683, 684, 685, 686, 687, 688, 689, 690, 691, 692, 693, 694, 695, 696, 697, 698, 699, 700, + 701, 702, 703, 704, 705, 706, 707, 708, 709, 710, 711, 712, 713, 714, 715, 716, 717, 718, 719, 720, 721, 722, 723, 724, 725, 726, 727, 728, 729, 730, 731, 732, 733, 734, 735, 736, 737, 738, 739, 740, 741, 742, 743, 744, 745, 746, 747, 748, 749, 750, 751, 752, 753, 754, 755, 756, 757, 758, 759, 760, 761, 762, 763, 764, 765, 766, 767, 768, 769, 770, 771, 772, 773, 774, 775, 776, 777, 778, 779, 780, 781, 782, 783, 784, 785, 786, 787, 788, 789, 790, 791, 792, 793, 794, 795, 796, 797, 798, 799, 800, + 801, 802, 803, 804, 805, 806, 807, 808, 809, 810, 811, 812, 813, 814, 815, 816, 817, 818, 819, 820, 821, 822, 823, 824, 825, 826, 827, 828, 829, 830, 831, 832, 833, 834, 835, 836, 837, 838, 839, 840, 841, 842, 843, 844, 845, 846, 847, 848, 849, 850, 851, 852, 853, 854, 855, 856, 857, 858, 859, 860, 861, 862, 863, 864, 865, 866, 867, 868, 869, 870, 871, 872, 873, 874, 875, 876, 877, 878, 879, 880, 881, 882, 883, 884, 885, 886, 887, 888, 889, 890, 891, 892, 893, 894, 895, 896, 897, 898, 899, 900, + 901, 902, 903, 904, 905, 906, 907, 908, 909, 910, 911, 912, 913, 914, 915, 916, 917, 918, 919, 920, 921, 922, 923, 924, 925, 926, 927, 928, 929, 930, 931, 932, 933, 934, 935, 936, 937, 938, 939, 940, 941, 942, 943, 944, 945, 946, 947, 948, 949, 950, 951, 952, 953, 954, 955, 956, 957, 958, 959, 960, 961, 962, 963, 964, 965, 966, 967, 968, 969, 970, 971, 972, 973, 974, 975, 976, 977, 978, 979, 980, 981, 982, 983, 984, 985, 986, 987, 988, 989, 990, 991, 992, 993, 994, 995, 996, 997, 998, 999, 1000 + ) AS tuple +SELECT + tuple.1, + tuple.300, + tuple.500, + tuple.700, + tuple.1000; diff --git a/parser/testdata/01277_random_fixed_string/ast.json b/parser/testdata/01277_random_fixed_string/ast.json new file mode 100644 index 000000000..f27daa2ce --- /dev/null +++ b/parser/testdata/01277_random_fixed_string/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function randomFixedString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'string'" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001071226, + "rows_read": 7, + "bytes_read": 270 + } +} diff --git a/parser/testdata/01277_random_fixed_string/metadata.json b/parser/testdata/01277_random_fixed_string/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01277_random_fixed_string/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01277_random_fixed_string/query.sql b/parser/testdata/01277_random_fixed_string/query.sql new file mode 100644 index 000000000..d21ba5142 --- /dev/null +++ b/parser/testdata/01277_random_fixed_string/query.sql @@ -0,0 +1,5 @@ +SELECT randomFixedString('string'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT randomFixedString(0); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT randomFixedString(rand() % 10); -- { serverError ILLEGAL_COLUMN } +SELECT toTypeName(randomFixedString(10)); +SELECT DISTINCT c > 30000 FROM (SELECT arrayJoin(arrayMap(x -> reinterpretAsUInt8(substring(randomFixedString(100), x + 1, 1)), range(100))) AS byte, count() AS c FROM numbers(100000) GROUP BY byte ORDER BY byte); diff --git a/parser/testdata/01277_toUnixTimestamp64/ast.json b/parser/testdata/01277_toUnixTimestamp64/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01277_toUnixTimestamp64/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01277_toUnixTimestamp64/metadata.json b/parser/testdata/01277_toUnixTimestamp64/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01277_toUnixTimestamp64/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01277_toUnixTimestamp64/query.sql b/parser/testdata/01277_toUnixTimestamp64/query.sql new file mode 100644 index 000000000..5ca385d5f --- /dev/null +++ b/parser/testdata/01277_toUnixTimestamp64/query.sql @@ -0,0 +1,35 @@ +-- Error cases +SELECT toUnixTimestamp64Second(); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +SELECT toUnixTimestamp64Milli(); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +SELECT toUnixTimestamp64Micro(); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +SELECT toUnixTimestamp64Nano(); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} + +SELECT toUnixTimestamp64Second('abc'); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT toUnixTimestamp64Milli('abc'); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT toUnixTimestamp64Micro('abc'); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT toUnixTimestamp64Nano('abc'); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} + +SELECT toUnixTimestamp64Second('abc', 123); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +SELECT toUnixTimestamp64Milli('abc', 123); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +SELECT toUnixTimestamp64Micro('abc', 123); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +SELECT toUnixTimestamp64Nano('abc', 123); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} + +SELECT 'const column'; +WITH toDateTime64('2019-09-16 19:20:12.345678910', 3, 'Asia/Istanbul') AS dt64 +SELECT dt64, toUnixTimestamp64Second(dt64), toUnixTimestamp64Milli(dt64), toUnixTimestamp64Micro(dt64), toUnixTimestamp64Nano(dt64); + +WITH toDateTime64('2019-09-16 19:20:12.345678910', 6, 'Asia/Istanbul') AS dt64 +SELECT dt64, toUnixTimestamp64Second(dt64), toUnixTimestamp64Milli(dt64), toUnixTimestamp64Micro(dt64), toUnixTimestamp64Nano(dt64); + +WITH toDateTime64('2019-09-16 19:20:12.345678910', 9, 'Asia/Istanbul') AS dt64 +SELECT dt64, toUnixTimestamp64Second(dt64), toUnixTimestamp64Milli(dt64), toUnixTimestamp64Micro(dt64), toUnixTimestamp64Nano(dt64); + +SELECT 'non-const column'; +WITH toDateTime64('2019-09-16 19:20:12.345678910', 3, 'Asia/Istanbul') AS x +SELECT materialize(x) as dt64, toUnixTimestamp64Second(dt64), toUnixTimestamp64Milli(dt64), toUnixTimestamp64Micro(dt64), toUnixTimestamp64Nano(dt64); + +WITH toDateTime64('2019-09-16 19:20:12.345678910', 6, 'Asia/Istanbul') AS x +SELECT materialize(x) as dt64, toUnixTimestamp64Second(dt64), toUnixTimestamp64Milli(dt64), toUnixTimestamp64Micro(dt64), toUnixTimestamp64Nano(dt64); + +WITH toDateTime64('2019-09-16 19:20:12.345678910', 9, 'Asia/Istanbul') AS x +SELECT materialize(x) as dt64, toUnixTimestamp64Second(dt64), toUnixTimestamp64Milli(dt64), toUnixTimestamp64Micro(dt64), toUnixTimestamp64Nano(dt64); diff --git a/parser/testdata/01277_unixTimestamp64_compatibility/ast.json b/parser/testdata/01277_unixTimestamp64_compatibility/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01277_unixTimestamp64_compatibility/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01277_unixTimestamp64_compatibility/metadata.json b/parser/testdata/01277_unixTimestamp64_compatibility/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01277_unixTimestamp64_compatibility/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01277_unixTimestamp64_compatibility/query.sql b/parser/testdata/01277_unixTimestamp64_compatibility/query.sql new file mode 100644 index 000000000..7d8d0b879 --- /dev/null +++ b/parser/testdata/01277_unixTimestamp64_compatibility/query.sql @@ -0,0 +1,64 @@ +WITH + toDateTime64('2019-09-16 19:20:12.345678910', 3) AS dt64 +SELECT + dt64, + fromUnixTimestamp64Milli(toUnixTimestamp64Milli(dt64)), + fromUnixTimestamp64Micro(toUnixTimestamp64Micro(dt64)), + fromUnixTimestamp64Nano(toUnixTimestamp64Nano(dt64)); + +WITH + toDateTime64('2019-09-16 19:20:12.345678910', 6) AS dt64 +SELECT + dt64, + fromUnixTimestamp64Milli(toUnixTimestamp64Milli(dt64)), + fromUnixTimestamp64Micro(toUnixTimestamp64Micro(dt64)), + fromUnixTimestamp64Nano(toUnixTimestamp64Nano(dt64)); + +WITH + toDateTime64('2019-09-16 19:20:12.345678910', 9) AS dt64 +SELECT + dt64, + fromUnixTimestamp64Milli(toUnixTimestamp64Milli(dt64)), + fromUnixTimestamp64Micro(toUnixTimestamp64Micro(dt64)), + fromUnixTimestamp64Nano(toUnixTimestamp64Nano(dt64)); + +SELECT 'with explicit timezone'; +WITH + 'UTC' as timezone, + toDateTime64('2019-09-16 19:20:12.345678910', 3, timezone) AS dt64 +SELECT + dt64, + fromUnixTimestamp64Milli(toUnixTimestamp64Milli(dt64), timezone), + fromUnixTimestamp64Micro(toUnixTimestamp64Micro(dt64), timezone), + fromUnixTimestamp64Nano(toUnixTimestamp64Nano(dt64), timezone) AS v, + toTypeName(v); + +WITH + 'Asia/Makassar' as timezone, + toDateTime64('2019-09-16 19:20:12.345678910', 3, timezone) AS dt64 +SELECT + dt64, + fromUnixTimestamp64Milli(toUnixTimestamp64Milli(dt64), timezone), + fromUnixTimestamp64Micro(toUnixTimestamp64Micro(dt64), timezone), + fromUnixTimestamp64Nano(toUnixTimestamp64Nano(dt64), timezone) AS v, + toTypeName(v); + + +WITH + CAST(1234567891011 AS Int64) AS val +SELECT + val, + toUnixTimestamp64Milli(fromUnixTimestamp64Milli(val)), + toUnixTimestamp64Micro(fromUnixTimestamp64Micro(val)), + toUnixTimestamp64Nano(fromUnixTimestamp64Nano(val)); + +SELECT 'with explicit timezone'; +WITH + 'UTC' as timezone, + CAST(1234567891011 AS Int64) AS val +SELECT + val, + toUnixTimestamp64Milli(fromUnixTimestamp64Milli(val, timezone)), + toUnixTimestamp64Micro(fromUnixTimestamp64Micro(val, timezone)), + toUnixTimestamp64Nano(fromUnixTimestamp64Nano(val, timezone)) AS v, + toTypeName(v); \ No newline at end of file diff --git a/parser/testdata/01278_alter_rename_combination/ast.json b/parser/testdata/01278_alter_rename_combination/ast.json new file mode 100644 index 000000000..21f53c7cd --- /dev/null +++ b/parser/testdata/01278_alter_rename_combination/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery rename_table (children 1)" + }, + { + "explain": " Identifier rename_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001321256, + "rows_read": 2, + "bytes_read": 76 + } +} diff --git a/parser/testdata/01278_alter_rename_combination/metadata.json b/parser/testdata/01278_alter_rename_combination/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01278_alter_rename_combination/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01278_alter_rename_combination/query.sql b/parser/testdata/01278_alter_rename_combination/query.sql new file mode 100644 index 000000000..51322f5d8 --- /dev/null +++ b/parser/testdata/01278_alter_rename_combination/query.sql @@ -0,0 +1,55 @@ +DROP TABLE IF EXISTS rename_table; + +CREATE TABLE rename_table (key Int32, value1 Int32, value2 Int32) ENGINE = MergeTree ORDER BY tuple() SETTINGS min_bytes_for_wide_part=0; + +INSERT INTO rename_table VALUES (1, 2, 3); + +-- replace one with other +ALTER TABLE rename_table RENAME COLUMN value1 TO old_value1, RENAME COLUMN value2 TO value1; + +SHOW CREATE TABLE rename_table; + +SELECT * FROM rename_table FORMAT TSVWithNames; + +INSERT INTO rename_table VALUES (4, 5, 6); + +-- rename all columns simultaneously +ALTER TABLE rename_table RENAME COLUMN old_value1 TO v1, RENAME COLUMN value1 TO v2, RENAME COLUMN key to k; + +SHOW CREATE TABLE rename_table; + +SELECT * FROM rename_table ORDER BY k FORMAT TSVWithNames; + +DROP TABLE IF EXISTS rename_table; + +SELECT '---polymorphic---'; + +DROP TABLE IF EXISTS rename_table_polymorphic; + +CREATE TABLE rename_table_polymorphic ( + key Int32, + value1 Int32, + value2 Int32 +) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS min_rows_for_wide_part = 10000; + +INSERT INTO rename_table_polymorphic VALUES (1, 2, 3); + +ALTER TABLE rename_table_polymorphic RENAME COLUMN value1 TO old_value1, RENAME COLUMN value2 TO value1; + +SHOW CREATE TABLE rename_table_polymorphic; + +SELECT * FROM rename_table_polymorphic FORMAT TSVWithNames; + +INSERT INTO rename_table_polymorphic VALUES (4, 5, 6); + +-- rename all columns simultaneously +ALTER TABLE rename_table_polymorphic RENAME COLUMN old_value1 TO v1, RENAME COLUMN value1 TO v2, RENAME COLUMN key to k; + +SHOW CREATE TABLE rename_table_polymorphic; + +SELECT * FROM rename_table_polymorphic ORDER BY k FORMAT TSVWithNames; + +DROP TABLE IF EXISTS rename_table_polymorphic; diff --git a/parser/testdata/01278_random_string_utf8/ast.json b/parser/testdata/01278_random_string_utf8/ast.json new file mode 100644 index 000000000..7ef758225 --- /dev/null +++ b/parser/testdata/01278_random_string_utf8/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function randomStringUTF8 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'string'" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001025068, + "rows_read": 7, + "bytes_read": 269 + } +} diff --git a/parser/testdata/01278_random_string_utf8/metadata.json b/parser/testdata/01278_random_string_utf8/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01278_random_string_utf8/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01278_random_string_utf8/query.sql b/parser/testdata/01278_random_string_utf8/query.sql new file mode 100644 index 000000000..c0149dc6f --- /dev/null +++ b/parser/testdata/01278_random_string_utf8/query.sql @@ -0,0 +1,5 @@ +SELECT randomStringUTF8('string'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT lengthUTF8(randomStringUTF8(100)); +SELECT toTypeName(randomStringUTF8(10)); +SELECT isValidUTF8(randomStringUTF8(100000)); +SELECT randomStringUTF8(0); diff --git a/parser/testdata/01278_variance_nonnegative/ast.json b/parser/testdata/01278_variance_nonnegative/ast.json new file mode 100644 index 000000000..3b69862ed --- /dev/null +++ b/parser/testdata/01278_variance_nonnegative/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function varSamp (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Float64_0.1" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1000000" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001080734, + "rows_read": 13, + "bytes_read": 521 + } +} diff --git a/parser/testdata/01278_variance_nonnegative/metadata.json b/parser/testdata/01278_variance_nonnegative/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01278_variance_nonnegative/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01278_variance_nonnegative/query.sql b/parser/testdata/01278_variance_nonnegative/query.sql new file mode 100644 index 000000000..aa676d8b2 --- /dev/null +++ b/parser/testdata/01278_variance_nonnegative/query.sql @@ -0,0 +1,9 @@ +SELECT varSamp(0.1) FROM numbers(1000000); +SELECT varPop(0.1) FROM numbers(1000000); +SELECT stddevSamp(0.1) FROM numbers(1000000); +SELECT stddevPop(0.1) FROM numbers(1000000); + +SELECT varSampStable(0.1) FROM numbers(1000000); +SELECT varPopStable(0.1) FROM numbers(1000000); +SELECT stddevSampStable(0.1) FROM numbers(1000000); +SELECT stddevPopStable(0.1) FROM numbers(1000000); diff --git a/parser/testdata/01279_dist_group_by/ast.json b/parser/testdata/01279_dist_group_by/ast.json new file mode 100644 index 000000000..48524f88a --- /dev/null +++ b/parser/testdata/01279_dist_group_by/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery data_01279 (children 1)" + }, + { + "explain": " Identifier data_01279" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001470515, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/01279_dist_group_by/metadata.json b/parser/testdata/01279_dist_group_by/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01279_dist_group_by/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01279_dist_group_by/query.sql b/parser/testdata/01279_dist_group_by/query.sql new file mode 100644 index 000000000..331efd4b6 --- /dev/null +++ b/parser/testdata/01279_dist_group_by/query.sql @@ -0,0 +1,11 @@ +drop table if exists data_01279; + +create table data_01279 (key String) Engine=TinyLog(); +insert into data_01279 select reinterpretAsString(number) from numbers(100000); + +set max_rows_to_group_by=10; +set group_by_overflow_mode='any'; +set group_by_two_level_threshold=100; +select * from data_01279 group by key format Null; + +drop table data_01279; diff --git a/parser/testdata/01280_min_map_max_map/ast.json b/parser/testdata/01280_min_map_max_map/ast.json new file mode 100644 index 000000000..c4ac56df8 --- /dev/null +++ b/parser/testdata/01280_min_map_max_map/ast.json @@ -0,0 +1,127 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function minMap (alias m) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toInt32 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Function plus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier m" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_100" + } + ], + + "rows": 35, + + "statistics": + { + "elapsed": 0.00128737, + "rows_read": 35, + "bytes_read": 1407 + } +} diff --git a/parser/testdata/01280_min_map_max_map/metadata.json b/parser/testdata/01280_min_map_max_map/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01280_min_map_max_map/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01280_min_map_max_map/query.sql b/parser/testdata/01280_min_map_max_map/query.sql new file mode 100644 index 000000000..9bc8c320d --- /dev/null +++ b/parser/testdata/01280_min_map_max_map/query.sql @@ -0,0 +1,46 @@ +select minMap([toInt32(number % 10), number % 10 + 1], [number, 1]) as m, toTypeName(m) from numbers(1, 100); +select minMap([1], [toInt32(number) - 50]) from numbers(1, 100); +select minMap([cast(1, 'Decimal(10, 2)')], [cast(toInt32(number) - 50, 'Decimal(10, 2)')]) from numbers(1, 100); + +select maxMap([toInt32(number % 10), number % 10 + 1], [number, 1]) as m, toTypeName(m) from numbers(1, 100); +select maxMap([1], [toInt32(number) - 50]) from numbers(1, 100); +select maxMap([cast(1, 'Decimal(10, 2)')], [cast(toInt32(number) - 50, 'Decimal(10, 2)')]) from numbers(1, 100); + +-- check different types for minMap +select minMap(val, cnt) from values ('val Array(UUID), cnt Array(UUID)', + (['01234567-89ab-cdef-0123-456789abcdef'], ['01111111-89ab-cdef-0123-456789abcdef']), + (['01234567-89ab-cdef-0123-456789abcdef'], ['02222222-89ab-cdef-0123-456789abcdef'])); +select minMap(val, cnt) from values ('val Array(String), cnt Array(String)', (['1'], ['1']), (['1'], ['2'])); +select minMap(val, cnt) from values ('val Array(FixedString(1)), cnt Array(FixedString(1))', (['1'], ['1']), (['1'], ['2'])); +select minMap(val, cnt) from values ('val Array(UInt64), cnt Array(UInt64)', ([1], [1]), ([1], [2])); +select minMap(val, cnt) from values ('val Array(Float64), cnt Array(Int8)', ([1], [1]), ([1], [2])); +select minMap(val, cnt) from values ('val Array(Date), cnt Array(Int16)', ([1], [1]), ([1], [2])); +select minMap(val, cnt) from values ('val Array(DateTime(\'Asia/Istanbul\')), cnt Array(Int32)', ([1], [1]), ([1], [2])); +select minMap(val, cnt) from values ('val Array(Decimal(10, 2)), cnt Array(Int16)', (['1.01'], [1]), (['1.01'], [2])); +select minMap(val, cnt) from values ('val Array(Enum16(\'a\'=1)), cnt Array(Int16)', (['a'], [1]), (['a'], [2])); + +-- check different types for maxMap +select maxMap(val, cnt) from values ('val Array(UUID), cnt Array(UUID)', + (['01234567-89ab-cdef-0123-456789abcdef'], ['01111111-89ab-cdef-0123-456789abcdef']), + (['01234567-89ab-cdef-0123-456789abcdef'], ['02222222-89ab-cdef-0123-456789abcdef'])); +select maxMap(val, cnt) from values ('val Array(String), cnt Array(String)', (['1'], ['1']), (['1'], ['2'])); +select maxMap(val, cnt) from values ('val Array(FixedString(1)), cnt Array(FixedString(1))', (['1'], ['1']), (['1'], ['2'])); +select maxMap(val, cnt) from values ('val Array(UInt64), cnt Array(UInt64)', ([1], [1]), ([1], [2])); +select maxMap(val, cnt) from values ('val Array(Float64), cnt Array(Int8)', ([1], [1]), ([1], [2])); +select maxMap(val, cnt) from values ('val Array(Date), cnt Array(Int16)', ([1], [1]), ([1], [2])); +select maxMap(val, cnt) from values ('val Array(DateTime(\'Asia/Istanbul\')), cnt Array(Int32)', ([1], [1]), ([1], [2])); +select maxMap(val, cnt) from values ('val Array(Decimal(10, 2)), cnt Array(Int16)', (['1.01'], [1]), (['1.01'], [2])); +select maxMap(val, cnt) from values ('val Array(Enum16(\'a\'=1)), cnt Array(Int16)', (['a'], [1]), (['a'], [2])); + +-- bugfix, minMap and maxMap should not remove values with zero and empty strings but this behavior should not affect sumMap +select minMap(val, cnt) from values ('val Array(UInt64), cnt Array(UInt64)', ([1], [0]), ([2], [0])); +select maxMap(val, cnt) from values ('val Array(UInt64), cnt Array(UInt64)', ([1], [0]), ([2], [0])); +select minMap(val, cnt) from values ('val Array(String), cnt Array(String)', (['A'], ['']), (['B'], [''])); +select maxMap(val, cnt) from values ('val Array(String), cnt Array(String)', (['A'], ['']), (['B'], [''])); +select sumMap(val, cnt) from values ('val Array(UInt64), cnt Array(UInt64)', ([1], [0]), ([2], [0])); + +-- check working with arrays and tuples as values +select minMap([1, 1, 1], [[1, 2], [1], [1, 2, 3]]); +select maxMap([1, 1, 1], [[1, 2], [1], [1, 2, 3]]); +select minMap([1, 1, 1], [(1, 2), (1, 1), (1, 3)]); +select maxMap([1, 1, 1], [(1, 2), (1, 1), (1, 3)]); diff --git a/parser/testdata/01280_null_in/ast.json b/parser/testdata/01280_null_in/ast.json new file mode 100644 index 000000000..66e5540c6 --- /dev/null +++ b/parser/testdata/01280_null_in/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function count (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function in (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001391083, + "rows_read": 11, + "bytes_read": 410 + } +} diff --git a/parser/testdata/01280_null_in/metadata.json b/parser/testdata/01280_null_in/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01280_null_in/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01280_null_in/query.sql b/parser/testdata/01280_null_in/query.sql new file mode 100644 index 000000000..76fe4db67 --- /dev/null +++ b/parser/testdata/01280_null_in/query.sql @@ -0,0 +1,9 @@ +SELECT count(in(NULL, [])); +SELECT count(notIn(NULL, [])); +SELECT count(nullIn(NULL, [])); +SELECT count(notNullIn(NULL, [])); + +SELECT count(in(NULL, tuple(NULL))); +SELECT count(notIn(NULL, tuple(NULL))); +SELECT count(nullIn(NULL, tuple(NULL))); +SELECT count(notNullIn(NULL, tuple(NULL))); diff --git a/parser/testdata/01280_opencl_bitonic_order_by/ast.json b/parser/testdata/01280_opencl_bitonic_order_by/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01280_opencl_bitonic_order_by/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01280_opencl_bitonic_order_by/metadata.json b/parser/testdata/01280_opencl_bitonic_order_by/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01280_opencl_bitonic_order_by/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01280_opencl_bitonic_order_by/query.sql b/parser/testdata/01280_opencl_bitonic_order_by/query.sql new file mode 100644 index 000000000..ed32a3d37 --- /dev/null +++ b/parser/testdata/01280_opencl_bitonic_order_by/query.sql @@ -0,0 +1,10 @@ +-- TODO: set special_sort = 'opencl_bitonic'; + +select toUInt8(number * 2) as x from numbers(42) order by x desc; +select toInt8(number * 2) as x from numbers(42) order by x desc; +select toUInt16(number * 2) as x from numbers(42) order by x desc; +select toInt16(number * 2) as x from numbers(42) order by x desc; +select toUInt32(number * 2) as x from numbers(42) order by x desc; +select toInt32(number * 2) as x from numbers(42) order by x desc; +select toUInt64(number * 2) as x from numbers(42) order by x desc; +select toInt64(number * 2) as x from numbers(42) order by x desc; diff --git a/parser/testdata/01280_ttl_where_group_by_negative/ast.json b/parser/testdata/01280_ttl_where_group_by_negative/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01280_ttl_where_group_by_negative/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01280_ttl_where_group_by_negative/metadata.json b/parser/testdata/01280_ttl_where_group_by_negative/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01280_ttl_where_group_by_negative/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01280_ttl_where_group_by_negative/query.sql b/parser/testdata/01280_ttl_where_group_by_negative/query.sql new file mode 100644 index 000000000..83c7465e7 --- /dev/null +++ b/parser/testdata/01280_ttl_where_group_by_negative/query.sql @@ -0,0 +1,6 @@ +-- Tags: no-parallel + +create table ttl_01280_error (a Int, b Int, x Int64, y Int64, d DateTime) engine = MergeTree order by (a, b) ttl d + interval 1 second group by x set y = max(y); -- { serverError BAD_TTL_EXPRESSION} +create table ttl_01280_error (a Int, b Int, x Int64, y Int64, d DateTime) engine = MergeTree order by (a, b) ttl d + interval 1 second group by b set y = max(y); -- { serverError BAD_TTL_EXPRESSION} +create table ttl_01280_error (a Int, b Int, x Int64, y Int64, d DateTime) engine = MergeTree order by (a, b) ttl d + interval 1 second group by a, b, x set y = max(y); -- { serverError BAD_TTL_EXPRESSION} +create table ttl_01280_error (a Int, b Int, x Int64, y Int64, d DateTime) engine = MergeTree order by (a, b) ttl d + interval 1 second group by a, b set y = max(y), y = max(y); -- { serverError BAD_TTL_EXPRESSION} diff --git a/parser/testdata/01280_unicode_whitespaces_lexer/ast.json b/parser/testdata/01280_unicode_whitespaces_lexer/ast.json new file mode 100644 index 000000000..5d7f11792 --- /dev/null +++ b/parser/testdata/01280_unicode_whitespaces_lexer/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001077063, + "rows_read": 5, + "bytes_read": 177 + } +} diff --git a/parser/testdata/01280_unicode_whitespaces_lexer/metadata.json b/parser/testdata/01280_unicode_whitespaces_lexer/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01280_unicode_whitespaces_lexer/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01280_unicode_whitespaces_lexer/query.sql b/parser/testdata/01280_unicode_whitespaces_lexer/query.sql new file mode 100644 index 000000000..e3292b509 --- /dev/null +++ b/parser/testdata/01280_unicode_whitespaces_lexer/query.sql @@ -0,0 +1,3 @@ +SELECT1; +SELECT 2; +…   
SELECT
  1 ᠎​+‌‍2⁠; diff --git a/parser/testdata/01281_alter_rename_and_other_renames/ast.json b/parser/testdata/01281_alter_rename_and_other_renames/ast.json new file mode 100644 index 000000000..009c46e62 --- /dev/null +++ b/parser/testdata/01281_alter_rename_and_other_renames/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery rename_table_multiple (children 1)" + }, + { + "explain": " Identifier rename_table_multiple" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001278979, + "rows_read": 2, + "bytes_read": 94 + } +} diff --git a/parser/testdata/01281_alter_rename_and_other_renames/metadata.json b/parser/testdata/01281_alter_rename_and_other_renames/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01281_alter_rename_and_other_renames/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01281_alter_rename_and_other_renames/query.sql b/parser/testdata/01281_alter_rename_and_other_renames/query.sql new file mode 100644 index 000000000..43c477fb6 --- /dev/null +++ b/parser/testdata/01281_alter_rename_and_other_renames/query.sql @@ -0,0 +1,67 @@ +DROP TABLE IF EXISTS rename_table_multiple; + +CREATE TABLE rename_table_multiple (key Int32, value1 String, value2 Int32) ENGINE = MergeTree ORDER BY tuple() SETTINGS min_bytes_for_wide_part=0; + +INSERT INTO rename_table_multiple VALUES (1, 2, 3); + +ALTER TABLE rename_table_multiple RENAME COLUMN value1 TO value1_string, MODIFY COLUMN value1_string String; --{serverError NOT_IMPLEMENTED} +ALTER TABLE rename_table_multiple MODIFY COLUMN value1 String, RENAME COLUMN value1 to value1_string; --{serverError NOT_IMPLEMENTED} + +ALTER TABLE rename_table_multiple RENAME COLUMN value1 TO value1_string; +ALTER TABLE rename_table_multiple MODIFY COLUMN value1_string String; + +SHOW CREATE TABLE rename_table_multiple; + +SELECT * FROM rename_table_multiple FORMAT TSVWithNames; + +INSERT INTO rename_table_multiple VALUES (4, '5', 6); + +ALTER TABLE rename_table_multiple RENAME COLUMN value2 TO value2_old, ADD COLUMN value2 Int64 DEFAULT 7; + +SHOW CREATE TABLE rename_table_multiple; + +SELECT * FROM rename_table_multiple ORDER BY key FORMAT TSVWithNames; + +INSERT INTO rename_table_multiple VALUES (7, '8', 9, 10); + +ALTER TABLE rename_table_multiple DROP COLUMN value2_old, RENAME COLUMN value2 TO value2_old; + +SHOW CREATE TABLE rename_table_multiple; + +SELECT * FROM rename_table_multiple ORDER BY key FORMAT TSVWithNames; + +DROP TABLE IF EXISTS rename_table_multiple; + +DROP TABLE IF EXISTS rename_table_multiple_compact; + +CREATE TABLE rename_table_multiple_compact (key Int32, value1 String, value2 Int32) ENGINE = MergeTree ORDER BY tuple() SETTINGS min_rows_for_wide_part = 100000; + +INSERT INTO rename_table_multiple_compact VALUES (1, 2, 3); + +ALTER TABLE rename_table_multiple_compact RENAME COLUMN value1 TO value1_string, MODIFY COLUMN value1_string String; --{serverError NOT_IMPLEMENTED} +ALTER TABLE rename_table_multiple_compact MODIFY COLUMN value1 String, RENAME COLUMN value1 to value1_string; --{serverError NOT_IMPLEMENTED} + +ALTER TABLE rename_table_multiple_compact RENAME COLUMN value1 TO value1_string; +ALTER TABLE rename_table_multiple_compact MODIFY COLUMN value1_string String; + +SHOW CREATE TABLE rename_table_multiple_compact; + +SELECT * FROM rename_table_multiple_compact FORMAT TSVWithNames; + +INSERT INTO rename_table_multiple_compact VALUES (4, '5', 6); + +ALTER TABLE rename_table_multiple_compact RENAME COLUMN value2 TO value2_old, ADD COLUMN value2 Int64 DEFAULT 7; + +SHOW CREATE TABLE rename_table_multiple_compact; + +SELECT * FROM rename_table_multiple_compact ORDER BY key FORMAT TSVWithNames; + +INSERT INTO rename_table_multiple_compact VALUES (7, '8', 9, 10); + +ALTER TABLE rename_table_multiple_compact DROP COLUMN value2_old, RENAME COLUMN value2 TO value2_old; + +SHOW CREATE TABLE rename_table_multiple_compact; + +SELECT * FROM rename_table_multiple_compact ORDER BY key FORMAT TSVWithNames; + +DROP TABLE IF EXISTS rename_table_multiple_compact; diff --git a/parser/testdata/01281_join_with_prewhere_fix/ast.json b/parser/testdata/01281_join_with_prewhere_fix/ast.json new file mode 100644 index 000000000..82df03df2 --- /dev/null +++ b/parser/testdata/01281_join_with_prewhere_fix/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001028067, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/01281_join_with_prewhere_fix/metadata.json b/parser/testdata/01281_join_with_prewhere_fix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01281_join_with_prewhere_fix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01281_join_with_prewhere_fix/query.sql b/parser/testdata/01281_join_with_prewhere_fix/query.sql new file mode 100644 index 000000000..3d772c7ad --- /dev/null +++ b/parser/testdata/01281_join_with_prewhere_fix/query.sql @@ -0,0 +1,21 @@ +drop table if exists t; + +create table t (x UInt8, id UInt8) ENGINE = MergeTree() order by (id); +insert into t values (1, 1); + +set enable_optimize_predicate_expression = 0; + +select 1 from t as l join t as r on l.id = r.id prewhere l.x; +select 2 from t as l join t as r on l.id = r.id where r.x; +select 3 from t as l join t as r on l.id = r.id prewhere l.x where r.x; +select 4 from t as l join t as r using id prewhere l.x where r.x; + +select 5 from t as l join t as r on l.id = r.id where l.x and r.x; +select 6 from t as l join t as r using id where l.x and r.x; + +set optimize_move_to_prewhere = 0; + +select 7 from t as l join t as r on l.id = r.id where l.x and r.x; +select 8 from t as l join t as r using id where l.x and r.x; + +drop table t; diff --git a/parser/testdata/01281_parseDateTime64BestEffort/ast.json b/parser/testdata/01281_parseDateTime64BestEffort/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01281_parseDateTime64BestEffort/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01281_parseDateTime64BestEffort/metadata.json b/parser/testdata/01281_parseDateTime64BestEffort/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01281_parseDateTime64BestEffort/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01281_parseDateTime64BestEffort/query.sql b/parser/testdata/01281_parseDateTime64BestEffort/query.sql new file mode 100644 index 000000000..37c1a54fe --- /dev/null +++ b/parser/testdata/01281_parseDateTime64BestEffort/query.sql @@ -0,0 +1,38 @@ +-- Error cases +SELECT parseDateTime64BestEffort(); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +SELECT parseDateTime64BestEffort(123); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT parseDateTime64BestEffort('foo'); -- {serverError CANNOT_PARSE_DATETIME} + +SELECT parseDateTime64BestEffort('2020-05-14T03:37:03.253184Z', 'bar'); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} -- invalid scale parameter +SELECT parseDateTime64BestEffort('2020-05-14T03:37:03.253184Z', 3, 4); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} -- invalid timezone parameter +SELECT parseDateTime64BestEffort('2020-05-14T03:37:03.253184Z', 3, 'baz'); -- {serverError BAD_ARGUMENTS} -- unknown timezone + +SELECT parseDateTime64BestEffort('2020-05-14T03:37:03.253184Z', materialize(3), 4); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT, 44} -- non-const precision +SELECT parseDateTime64BestEffort('2020-05-14T03:37:03.253184Z', 3, materialize('UTC')); -- {serverError ILLEGAL_COLUMN} -- non-const timezone + +SELECT parseDateTime64BestEffort('2020-05-14T03:37:03.253184012345678910111213141516171819Z', 3, 'UTC'); -- {serverError CANNOT_PARSE_TEXT} + +SELECT 'orNull'; +SELECT parseDateTime64BestEffortOrNull('2020-05-14T03:37:03.253184Z', 3, 'UTC'); +SELECT parseDateTime64BestEffortOrNull('foo', 3, 'UTC'); + +SELECT 'orZero'; +SELECT parseDateTime64BestEffortOrZero('2020-05-14T03:37:03.253184Z', 3, 'UTC'); +SELECT parseDateTime64BestEffortOrZero('bar', 3, 'UTC'); + +SELECT 'non-const'; +SELECT parseDateTime64BestEffort(materialize('2020-05-14T03:37:03.253184Z'), 3, 'UTC'); + +SELECT 'Timezones'; +SELECT parseDateTime64BestEffort('2020-05-14T03:37:03.253184Z', 3, 'UTC'); +SELECT parseDateTime64BestEffort('2020-05-14T03:37:03.253184Z', 3, 'Europe/Minsk'); + +SELECT 'Formats'; +SELECT parseDateTime64BestEffort('2020-05-14T03:37:03.253184', 3, 'UTC'); +SELECT parseDateTime64BestEffort('2020-05-14T03:37:03', 3, 'UTC'); +SELECT parseDateTime64BestEffort('2020-05-14 03:37:03', 3, 'UTC'); + +SELECT 'Unix Timestamp with Milliseconds'; +SELECT parseDateTime64BestEffort('1640649600123', 3, 'UTC'); +SELECT parseDateTime64BestEffort('1640649600123', 1, 'UTC'); +SELECT parseDateTime64BestEffort('1640649600123', 6, 'UTC'); diff --git a/parser/testdata/01281_sum_nullable/ast.json b/parser/testdata/01281_sum_nullable/ast.json new file mode 100644 index 000000000..3d417493f --- /dev/null +++ b/parser/testdata/01281_sum_nullable/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function sumKahan (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toFloat64 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001417459, + "rows_read": 15, + "bytes_read": 604 + } +} diff --git a/parser/testdata/01281_sum_nullable/metadata.json b/parser/testdata/01281_sum_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01281_sum_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01281_sum_nullable/query.sql b/parser/testdata/01281_sum_nullable/query.sql new file mode 100644 index 000000000..35d593da7 --- /dev/null +++ b/parser/testdata/01281_sum_nullable/query.sql @@ -0,0 +1,6 @@ +SELECT sumKahan(toFloat64(number)) FROM numbers(10); +SELECT sumKahan(toNullable(toFloat64(number))) FROM numbers(10); +SELECT sum(toNullable(number)) FROM numbers(10); +SELECT sum(x) FROM (SELECT 1 AS x UNION ALL SELECT NULL); +SELECT sum(number) FROM numbers(10); +SELECT sum(number < 1000 ? NULL : number) FROM numbers(10); diff --git a/parser/testdata/01281_unsucceeded_insert_select_queries_counter/ast.json b/parser/testdata/01281_unsucceeded_insert_select_queries_counter/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01281_unsucceeded_insert_select_queries_counter/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01281_unsucceeded_insert_select_queries_counter/metadata.json b/parser/testdata/01281_unsucceeded_insert_select_queries_counter/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01281_unsucceeded_insert_select_queries_counter/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01281_unsucceeded_insert_select_queries_counter/query.sql b/parser/testdata/01281_unsucceeded_insert_select_queries_counter/query.sql new file mode 100644 index 000000000..72ea0d1f4 --- /dev/null +++ b/parser/testdata/01281_unsucceeded_insert_select_queries_counter/query.sql @@ -0,0 +1,16 @@ +-- Tags: no-parallel, no-fasttest + +DROP TABLE IF EXISTS to_insert; +CREATE TABLE to_insert (value UInt64) ENGINE = Memory(); + +INSERT INTO table_that_do_not_exists VALUES (42); -- { serverError UNKNOWN_TABLE } +INSERT INTO to_insert SELECT throwIf(1); -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO } +SELECT * FROM table_that_do_not_exists; -- { serverError UNKNOWN_TABLE } +SELECT throwIf(1); -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO } + +SYSTEM FLUSH LOGS query_log; + +SELECT normalizeQuery(query), type, ProfileEvents['FailedSelectQuery'], ProfileEvents['FailedInsertQuery'] +FROM system.query_log +WHERE current_database = currentDatabase() AND query_kind IN ('Select', 'Insert') AND event_date >= yesterday() AND type != 'QueryStart' +ORDER BY event_time_microseconds; diff --git a/parser/testdata/01282_system_parts_ttl_info/ast.json b/parser/testdata/01282_system_parts_ttl_info/ast.json new file mode 100644 index 000000000..80430d031 --- /dev/null +++ b/parser/testdata/01282_system_parts_ttl_info/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery ttl (children 1)" + }, + { + "explain": " Identifier ttl" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000975322, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/01282_system_parts_ttl_info/metadata.json b/parser/testdata/01282_system_parts_ttl_info/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01282_system_parts_ttl_info/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01282_system_parts_ttl_info/query.sql b/parser/testdata/01282_system_parts_ttl_info/query.sql new file mode 100644 index 000000000..ede5350dd --- /dev/null +++ b/parser/testdata/01282_system_parts_ttl_info/query.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS ttl; +CREATE TABLE ttl (d DateTime) ENGINE = MergeTree ORDER BY tuple() TTL d + INTERVAL 10 DAY SETTINGS remove_empty_parts=0; +SYSTEM STOP MERGES ttl; +INSERT INTO ttl VALUES ('2000-01-01 01:02:03'), ('2000-02-03 04:05:06'); +SELECT rows, delete_ttl_info_min, delete_ttl_info_max, move_ttl_info.expression, move_ttl_info.min, move_ttl_info.max FROM system.parts WHERE database = currentDatabase() AND table = 'ttl'; +SYSTEM START MERGES ttl; +OPTIMIZE TABLE ttl FINAL; +SELECT rows, toTimeZone(delete_ttl_info_min, 'UTC'), toTimeZone(delete_ttl_info_max, 'UTC'), move_ttl_info.expression, move_ttl_info.min, move_ttl_info.max FROM system.parts WHERE database = currentDatabase() AND table = 'ttl' AND active; +DROP TABLE ttl; diff --git a/parser/testdata/01283_max_threads_simple_query_optimization/ast.json b/parser/testdata/01283_max_threads_simple_query_optimization/ast.json new file mode 100644 index 000000000..934d9848e --- /dev/null +++ b/parser/testdata/01283_max_threads_simple_query_optimization/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery data_01283 (children 1)" + }, + { + "explain": " Identifier data_01283" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001437387, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/01283_max_threads_simple_query_optimization/metadata.json b/parser/testdata/01283_max_threads_simple_query_optimization/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01283_max_threads_simple_query_optimization/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01283_max_threads_simple_query_optimization/query.sql b/parser/testdata/01283_max_threads_simple_query_optimization/query.sql new file mode 100644 index 000000000..73eddf6be --- /dev/null +++ b/parser/testdata/01283_max_threads_simple_query_optimization/query.sql @@ -0,0 +1,27 @@ +DROP TABLE IF EXISTS data_01283; + +set allow_asynchronous_read_from_io_pool_for_merge_tree = 0; +set remote_filesystem_read_method = 'read'; +set local_filesystem_read_method = 'pread'; +set load_marks_asynchronously = 0; + +CREATE TABLE data_01283 engine=MergeTree() +ORDER BY key +PARTITION BY key +AS SELECT number key FROM numbers(10); + +SET log_queries=1; +SELECT * FROM data_01283 LIMIT 1 FORMAT Null; +SET log_queries=0; +SYSTEM FLUSH LOGS query_log; + +-- 1 for PullingAsyncPipelineExecutor::pull +SELECT + throwIf(count() != 1, 'no query was logged'), + throwIf(length(thread_ids) > 2, 'too many threads used') +FROM system.query_log +WHERE current_database = currentDatabase() AND type = 'QueryFinish' AND query LIKE '%data_01283 LIMIT 1%' +GROUP BY thread_ids +FORMAT Null; + +DROP TABLE data_01283; diff --git a/parser/testdata/01283_strict_resize_bug/ast.json b/parser/testdata/01283_strict_resize_bug/ast.json new file mode 100644 index 000000000..eae8f39c4 --- /dev/null +++ b/parser/testdata/01283_strict_resize_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery num_10m (children 1)" + }, + { + "explain": " Identifier num_10m" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001161001, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/01283_strict_resize_bug/metadata.json b/parser/testdata/01283_strict_resize_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01283_strict_resize_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01283_strict_resize_bug/query.sql b/parser/testdata/01283_strict_resize_bug/query.sql new file mode 100644 index 000000000..ee1aef05c --- /dev/null +++ b/parser/testdata/01283_strict_resize_bug/query.sql @@ -0,0 +1,7 @@ +drop table if exists num_10m; +create table num_10m (number UInt64) engine = MergeTree order by tuple(); +insert into num_10m select * from numbers(10000000); + +select * from (select sum(number) from num_10m union all select sum(number) from num_10m) limit 1 settings max_block_size = 1024; + +drop table if exists num_10m; diff --git a/parser/testdata/01284_escape_sequences_php_mysql_style/ast.json b/parser/testdata/01284_escape_sequences_php_mysql_style/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01284_escape_sequences_php_mysql_style/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01284_escape_sequences_php_mysql_style/metadata.json b/parser/testdata/01284_escape_sequences_php_mysql_style/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01284_escape_sequences_php_mysql_style/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01284_escape_sequences_php_mysql_style/query.sql b/parser/testdata/01284_escape_sequences_php_mysql_style/query.sql new file mode 100644 index 000000000..5d24e2009 --- /dev/null +++ b/parser/testdata/01284_escape_sequences_php_mysql_style/query.sql @@ -0,0 +1,8 @@ +-- Tags: no-fasttest + +SELECT 'a\_\c\l\i\c\k\h\o\u\s\e', 'a\\_\\c\\l\\i\\c\\k\\h\\o\\u\\s\\e'; +select 'aXb' like 'a_b', 'aXb' like 'a\_b', 'a_b' like 'a\_b', 'a_b' like 'a\\_b'; +SELECT match('Hello', '\w+'), match('Hello', '\\w+'), match('Hello', '\\\w+'), match('Hello', '\w\+'), match('Hello', 'w+'); + +SELECT match('Hello', '\He\l\l\o'); -- { serverError CANNOT_COMPILE_REGEXP } +SELECT match('Hello', '\H\e\l\l\o'); -- { serverError CANNOT_COMPILE_REGEXP } diff --git a/parser/testdata/01284_fuzz_bits/ast.json b/parser/testdata/01284_fuzz_bits/ast.json new file mode 100644 index 000000000..2a1888d01 --- /dev/null +++ b/parser/testdata/01284_fuzz_bits/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function fuzzBits (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'string'" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.000989377, + "rows_read": 10, + "bytes_read": 379 + } +} diff --git a/parser/testdata/01284_fuzz_bits/metadata.json b/parser/testdata/01284_fuzz_bits/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01284_fuzz_bits/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01284_fuzz_bits/query.sql b/parser/testdata/01284_fuzz_bits/query.sql new file mode 100644 index 000000000..1055d2aa5 --- /dev/null +++ b/parser/testdata/01284_fuzz_bits/query.sql @@ -0,0 +1,31 @@ +SELECT fuzzBits(toString('string'), 1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT fuzzBits('string', -1.0); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT fuzzBits('', 0.3); +SELECT length(fuzzBits(randomString(100), 0.5)); +SELECT toTypeName(fuzzBits(randomString(100), 0.5)); +SELECT toTypeName(fuzzBits(toFixedString('abacaba', 10), 0.9)); + +SELECT + ( + 0.29 * 8 * 10000 < sum + AND sum < 0.31 * 8 * 10000 + ) AS res +FROM + ( + SELECT + arraySum( + id -> bitCount( + reinterpretAsUInt8( + substring( + fuzzBits( + materialize(arrayStringConcat(arrayMap(x -> toString('\0'), range(10000)))), + 0.3 + ), + id + 1, + 1 + ) + ) + ), + range(10000) + ) as sum + ) diff --git a/parser/testdata/01284_view_and_extremes_bug/ast.json b/parser/testdata/01284_view_and_extremes_bug/ast.json new file mode 100644 index 000000000..fb8d18e4b --- /dev/null +++ b/parser/testdata/01284_view_and_extremes_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery view_bug_const (children 1)" + }, + { + "explain": " Identifier view_bug_const" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001401245, + "rows_read": 2, + "bytes_read": 80 + } +} diff --git a/parser/testdata/01284_view_and_extremes_bug/metadata.json b/parser/testdata/01284_view_and_extremes_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01284_view_and_extremes_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01284_view_and_extremes_bug/query.sql b/parser/testdata/01284_view_and_extremes_bug/query.sql new file mode 100644 index 000000000..c444441a2 --- /dev/null +++ b/parser/testdata/01284_view_and_extremes_bug/query.sql @@ -0,0 +1,4 @@ +drop table if exists view_bug_const; +CREATE VIEW view_bug_const AS SELECT 'World' AS hello FROM (SELECT number FROM system.numbers LIMIT 1) AS n1 JOIN (SELECT number FROM system.numbers LIMIT 1) AS n2 USING (number); +select * from view_bug_const; +drop table if exists view_bug_const; diff --git a/parser/testdata/01285_data_skip_index_over_aggregation/ast.json b/parser/testdata/01285_data_skip_index_over_aggregation/ast.json new file mode 100644 index 000000000..11667d305 --- /dev/null +++ b/parser/testdata/01285_data_skip_index_over_aggregation/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001536109, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01285_data_skip_index_over_aggregation/metadata.json b/parser/testdata/01285_data_skip_index_over_aggregation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01285_data_skip_index_over_aggregation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01285_data_skip_index_over_aggregation/query.sql b/parser/testdata/01285_data_skip_index_over_aggregation/query.sql new file mode 100644 index 000000000..bac113edb --- /dev/null +++ b/parser/testdata/01285_data_skip_index_over_aggregation/query.sql @@ -0,0 +1,38 @@ +SET optimize_on_insert = 0; + +DROP TABLE IF EXISTS data_01285; + +SET max_threads=1; + + +CREATE TABLE data_01285 ( + key Int, + value SimpleAggregateFunction(max, Nullable(Int)), + INDEX value_idx assumeNotNull(value) TYPE minmax GRANULARITY 1 +) +ENGINE=AggregatingMergeTree() +ORDER BY key; + +SELECT 'INSERT'; +INSERT INTO data_01285 SELECT 1, number FROM numbers(2); +SELECT * FROM data_01285; +SELECT * FROM data_01285 WHERE assumeNotNull(value) = 1; +SELECT 'INSERT'; +INSERT INTO data_01285 SELECT 1, number FROM numbers(4); +SELECT * FROM data_01285 ORDER BY ALL; +SELECT * FROM data_01285 WHERE assumeNotNull(value) = 1 ORDER BY ALL; +SELECT * FROM data_01285 WHERE assumeNotNull(value) = 3 ORDER BY ALL; +SELECT 'OPTIMIZE'; +OPTIMIZE TABLE data_01285 FINAL; +SELECT * FROM data_01285; +-- before the fix value_idx contains one range {0, 0} +-- and hence cannot find these record. +SELECT * FROM data_01285 WHERE assumeNotNull(value) = 3; +-- one more time just in case +SELECT 'OPTIMIZE'; +OPTIMIZE TABLE data_01285 FINAL; +SELECT * FROM data_01285; +-- and this passes even without fix. +SELECT * FROM data_01285 WHERE assumeNotNull(value) = 3; + +DROP TABLE data_01285; diff --git a/parser/testdata/01285_date_datetime_key_condition/ast.json b/parser/testdata/01285_date_datetime_key_condition/ast.json new file mode 100644 index 000000000..23209a65e --- /dev/null +++ b/parser/testdata/01285_date_datetime_key_condition/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery date_datetime_key_condition (children 1)" + }, + { + "explain": " Identifier date_datetime_key_condition" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001573989, + "rows_read": 2, + "bytes_read": 106 + } +} diff --git a/parser/testdata/01285_date_datetime_key_condition/metadata.json b/parser/testdata/01285_date_datetime_key_condition/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01285_date_datetime_key_condition/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01285_date_datetime_key_condition/query.sql b/parser/testdata/01285_date_datetime_key_condition/query.sql new file mode 100644 index 000000000..fe1454cd7 --- /dev/null +++ b/parser/testdata/01285_date_datetime_key_condition/query.sql @@ -0,0 +1,22 @@ +DROP TABLE IF EXISTS date_datetime_key_condition; + +CREATE TABLE date_datetime_key_condition (dt DateTime) ENGINE = MergeTree() ORDER BY dt; +INSERT INTO date_datetime_key_condition VALUES ('2020-01-01 00:00:00'), ('2020-01-01 10:00:00'), ('2020-01-02 00:00:00'); + +-- partial +SELECT groupArray(dt) from date_datetime_key_condition WHERE dt > toDate('2020-01-01') AND dt < toDate('2020-01-02'); +SELECT groupArray(dt) from date_datetime_key_condition WHERE dt >= toDate('2020-01-02'); +SELECT groupArray(dt) from date_datetime_key_condition WHERE dt < toDate('2020-01-02'); + +-- inside +SELECT groupArray(dt) from date_datetime_key_condition WHERE dt > toDate('2019-01-02'); +SELECT groupArray(dt) from date_datetime_key_condition WHERE dt < toDate('2021-01-02'); +SELECT groupArray(dt) from date_datetime_key_condition WHERE dt >= toDate('2019-01-02') AND dt < toDate('2021-01-02'); +SELECT groupArray(dt) from date_datetime_key_condition WHERE dt > toDate('2019-01-02') OR dt <= toDate('2021-01-02'); + +-- outside +SELECT groupArray(dt) from date_datetime_key_condition WHERE dt < toDate('2019-01-02'); +SELECT groupArray(dt) from date_datetime_key_condition WHERE dt > toDate('2021-01-02'); +SELECT groupArray(dt) from date_datetime_key_condition WHERE dt < toDate('2019-01-02') OR dt > toDate('2021-01-02'); + +DROP TABLE date_datetime_key_condition; diff --git a/parser/testdata/01286_constraints_on_default/ast.json b/parser/testdata/01286_constraints_on_default/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01286_constraints_on_default/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01286_constraints_on_default/metadata.json b/parser/testdata/01286_constraints_on_default/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01286_constraints_on_default/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01286_constraints_on_default/query.sql b/parser/testdata/01286_constraints_on_default/query.sql new file mode 100644 index 000000000..8d9319b15 --- /dev/null +++ b/parser/testdata/01286_constraints_on_default/query.sql @@ -0,0 +1,40 @@ +-- Tags: no-async-insert +-- - no-async-insert -- due to INSERT is performed in background the connection is preserved, and last CREATE TEMPORARY TABLE will fail with TABLE_ALREADY_EXISTS + +DROP TABLE IF EXISTS default_constraints; +CREATE TABLE default_constraints +( + x UInt8, + y UInt8 DEFAULT x + 1, + CONSTRAINT c CHECK y < 5 +) ENGINE = Memory; + +INSERT INTO default_constraints (x) SELECT number FROM system.numbers LIMIT 5; -- { serverError VIOLATED_CONSTRAINT } +INSERT INTO default_constraints (x) VALUES (0),(1),(2),(3),(4); -- { serverError VIOLATED_CONSTRAINT } + +SELECT y, throwIf(NOT y < 5) FROM default_constraints; +SELECT count() FROM default_constraints; + +DROP TABLE default_constraints; + + +CREATE TEMPORARY TABLE default_constraints +( + x UInt8, + y UInt8 DEFAULT x + 1, + CONSTRAINT c CHECK y < 5 +); + +INSERT INTO default_constraints (x) SELECT number FROM system.numbers LIMIT 5; -- { serverError VIOLATED_CONSTRAINT } +INSERT INTO default_constraints (x) VALUES (0),(1),(2),(3),(4); -- { serverError VIOLATED_CONSTRAINT } + +-- On the previous INSERT the connection got terminated +CREATE TEMPORARY TABLE default_constraints +( + x UInt8, + y UInt8 DEFAULT x + 1, + CONSTRAINT c CHECK y < 5 +); + +SELECT y, throwIf(NOT y < 5) FROM default_constraints; +SELECT count() FROM default_constraints; diff --git a/parser/testdata/01287_max_execution_speed/ast.json b/parser/testdata/01287_max_execution_speed/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01287_max_execution_speed/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01287_max_execution_speed/metadata.json b/parser/testdata/01287_max_execution_speed/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01287_max_execution_speed/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01287_max_execution_speed/query.sql b/parser/testdata/01287_max_execution_speed/query.sql new file mode 100644 index 000000000..8fdd16e0a --- /dev/null +++ b/parser/testdata/01287_max_execution_speed/query.sql @@ -0,0 +1,50 @@ +-- Tags: no-fasttest, no-debug, no-tsan, no-msan, no-asan + +SET max_rows_to_read=0; +SET max_bytes_to_read=0; + +SET min_execution_speed = 100000000000, timeout_before_checking_execution_speed = 0; +SELECT count() FROM system.numbers; -- { serverError TOO_SLOW } +SET min_execution_speed = 0; +SELECT 'Ok (1)'; + +SET min_execution_speed_bytes = 800000000000, timeout_before_checking_execution_speed = 0; +SELECT count() FROM system.numbers; -- { serverError TOO_SLOW } +SET min_execution_speed_bytes = 0; +SELECT 'Ok (2)'; + +SET max_execution_time = 600; +SET max_execution_speed = 1000000; +SET max_block_size = 100; + +CREATE TEMPORARY TABLE times (t DateTime); + +INSERT INTO times SELECT now(); +SELECT count() FROM numbers(2000000); +INSERT INTO times SELECT now(); + +SELECT max(t) - min(t) >= 1 FROM times; +SELECT 'Ok (3)'; +SET max_execution_speed = 0; + +SET max_execution_speed_bytes = 8000000; +TRUNCATE TABLE times; + +INSERT INTO times SELECT now(); +SELECT count() FROM numbers(2000000); +INSERT INTO times SELECT now(); + +SELECT max(t) - min(t) >= 1 FROM times; +SELECT 'Ok (4)'; +SET max_execution_speed_bytes = 0; + +-- Note that 'min_execution_speed' does not count sleeping due to throttling +-- with 'max_execution_speed' and similar limits like 'priority' and 'max_network_bandwidth' + +-- Note: I have to disable this part of the test because it actually can work slower under sanitizers, +-- with debug builds and in presence of random system hiccups in our CI environment. + +--SET max_execution_speed = 1000000, min_execution_speed = 2000000; +-- And this query will work despite the fact that the above settings look contradictory. +--SELECT count() FROM numbers(1000000); +--SELECT 'Ok (5)'; diff --git a/parser/testdata/01288_shard_max_network_bandwidth/ast.json b/parser/testdata/01288_shard_max_network_bandwidth/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01288_shard_max_network_bandwidth/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01288_shard_max_network_bandwidth/metadata.json b/parser/testdata/01288_shard_max_network_bandwidth/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01288_shard_max_network_bandwidth/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01288_shard_max_network_bandwidth/query.sql b/parser/testdata/01288_shard_max_network_bandwidth/query.sql new file mode 100644 index 000000000..d2daf48a1 --- /dev/null +++ b/parser/testdata/01288_shard_max_network_bandwidth/query.sql @@ -0,0 +1,17 @@ +-- Tags: shard + +-- Limit to 100 KB/sec +SET max_network_bandwidth = 100000; + +-- Lower max_block_size, so we can start throttling sooner. Otherwise query will be executed too quickly. +SET max_block_size = 100; + +CREATE TEMPORARY TABLE times (t DateTime); + +-- rand64 is uncompressable data. Each number will take 8 bytes of bandwidth. +-- This query should execute in no less than 1.6 seconds if throttled. +INSERT INTO times SELECT now(); +SELECT sum(ignore(*)) FROM (SELECT rand64() FROM remote('127.0.0.{2,3}', numbers(20000))); +INSERT INTO times SELECT now(); + +SELECT max(t) - min(t) >= 1 FROM times; diff --git a/parser/testdata/01289_min_execution_speed_not_too_early/ast.json b/parser/testdata/01289_min_execution_speed_not_too_early/ast.json new file mode 100644 index 000000000..6e50e39fb --- /dev/null +++ b/parser/testdata/01289_min_execution_speed_not_too_early/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery ES (children 1)" + }, + { + "explain": " Identifier ES" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00115935, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/01289_min_execution_speed_not_too_early/metadata.json b/parser/testdata/01289_min_execution_speed_not_too_early/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01289_min_execution_speed_not_too_early/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01289_min_execution_speed_not_too_early/query.sql b/parser/testdata/01289_min_execution_speed_not_too_early/query.sql new file mode 100644 index 000000000..1abe9bf8c --- /dev/null +++ b/parser/testdata/01289_min_execution_speed_not_too_early/query.sql @@ -0,0 +1,21 @@ +DROP TABLE IF EXISTS ES; + +create table ES(A String) Engine=MergeTree order by tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +insert into ES select toString(number) from numbers(10000000); + +SET max_execution_time = 100, + timeout_before_checking_execution_speed = 100, + max_execution_speed = 1000000, + max_threads = 1, + max_block_size = 1000000; + +-- Exception about execution speed is not thrown from these queries. +SELECT * FROM ES LIMIT 1 format Null; +SELECT * FROM ES LIMIT 10 format Null; +SELECT * FROM ES LIMIT 100 format Null; +SELECT * FROM ES LIMIT 1000 format Null; +SELECT * FROM ES LIMIT 10000 format Null; +SELECT * FROM ES LIMIT 100000 format Null; +SELECT * FROM ES LIMIT 1000000 format Null; + +DROP TABLE ES; diff --git a/parser/testdata/01290_empty_array_index_analysis/ast.json b/parser/testdata/01290_empty_array_index_analysis/ast.json new file mode 100644 index 000000000..6b22bd2f9 --- /dev/null +++ b/parser/testdata/01290_empty_array_index_analysis/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery count_lc_test (children 1)" + }, + { + "explain": " Identifier count_lc_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00112336, + "rows_read": 2, + "bytes_read": 78 + } +} diff --git a/parser/testdata/01290_empty_array_index_analysis/metadata.json b/parser/testdata/01290_empty_array_index_analysis/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01290_empty_array_index_analysis/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01290_empty_array_index_analysis/query.sql b/parser/testdata/01290_empty_array_index_analysis/query.sql new file mode 100644 index 000000000..b1b606794 --- /dev/null +++ b/parser/testdata/01290_empty_array_index_analysis/query.sql @@ -0,0 +1,66 @@ +drop table if exists count_lc_test; + +CREATE TABLE count_lc_test +( + `s` LowCardinality(String), + `arr` Array(LowCardinality(String)), + `num` UInt64 +) +ENGINE = MergeTree +ORDER BY (s, arr); + +INSERT INTO count_lc_test(num, arr) VALUES (1,[]),(2,['a']),(3,['a','b','c']),(4,['aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa']); + +SELECT '--- notEmpty'; +select * from count_lc_test where notEmpty(arr); +SELECT '--- empty'; +select * from count_lc_test where empty(arr); +SELECT '--- = []'; +select * from count_lc_test where arr = []; +SELECT '--- != []'; +select * from count_lc_test where arr != []; +SELECT '--- > []'; +select * from count_lc_test where arr > []; +SELECT '--- < []'; +select * from count_lc_test where arr < []; +SELECT '--- >= []'; +select * from count_lc_test where arr >= []; +SELECT '--- <= []'; +select * from count_lc_test where arr <= []; +SELECT '---'; + +DROP TABLE count_lc_test; + + +drop table if exists count_lc_test; + +CREATE TABLE count_lc_test +( + `s` LowCardinality(String), + `arr` Array(String), + `num` UInt64 +) +ENGINE = MergeTree +ORDER BY (s, arr); + +INSERT INTO count_lc_test(num, arr) VALUES (1,[]),(2,['a']),(3,['a','b','c']),(4,['aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa']); + +SELECT '--- notEmpty'; +select * from count_lc_test where notEmpty(arr); +SELECT '--- empty'; +select * from count_lc_test where empty(arr); +SELECT '--- = []'; +select * from count_lc_test where arr = []; +SELECT '--- != []'; +select * from count_lc_test where arr != []; +SELECT '--- > []'; +select * from count_lc_test where arr > []; +SELECT '--- < []'; +select * from count_lc_test where arr < []; +SELECT '--- >= []'; +select * from count_lc_test where arr >= []; +SELECT '--- <= []'; +select * from count_lc_test where arr <= []; +SELECT '---'; + +DROP TABLE count_lc_test; diff --git a/parser/testdata/01290_max_execution_speed_distributed/ast.json b/parser/testdata/01290_max_execution_speed_distributed/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01290_max_execution_speed_distributed/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01290_max_execution_speed_distributed/metadata.json b/parser/testdata/01290_max_execution_speed_distributed/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01290_max_execution_speed_distributed/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01290_max_execution_speed_distributed/query.sql b/parser/testdata/01290_max_execution_speed_distributed/query.sql new file mode 100644 index 000000000..7c7442dfa --- /dev/null +++ b/parser/testdata/01290_max_execution_speed_distributed/query.sql @@ -0,0 +1,37 @@ +-- Tags: distributed + +SET log_queries=1; + +DROP TABLE IF EXISTS times; +CREATE TEMPORARY TABLE times (t DateTime); + +INSERT INTO times SELECT now(); + +SELECT count('special query for 01290_max_execution_speed_distributed') +FROM +( + SELECT + sleep(0.001), -- sleep for each block is needed to make sure the query cannot finish too fast, + -- i.e. before the first timer tick that might take up to 10ms on some platforms including ARM. + -- the timer's resolution is important because we use `CLOCK_MONOTONIC_COARSE` in `ReadProgressCallback`. + -- ATST `sleep` uses more accurate timer, so we won't spend more than 100ms in total sleeping. + number + FROM remote('127.0.0.{2,3}', numbers(100000)) +) +SETTINGS max_execution_speed = 100000, timeout_before_checking_execution_speed = 0, max_block_size = 1000; + +INSERT INTO times SELECT now(); + +SELECT max(t) - min(t) >= 1 FROM times; + +-- Check that the query was also throttled on "remote" servers. +SYSTEM FLUSH LOGS query_log; +SELECT DISTINCT query_duration_ms >= 500 +FROM system.query_log +WHERE + current_database = currentDatabase() AND + event_date >= yesterday() AND + event_time >= now() - INTERVAL '5 MINUTES' AND -- time limit for tests not marked `long` is 3 minutes, 5 should be more than enough + query LIKE '%special query for 01290_max_execution_speed_distributed%' AND + query NOT LIKE '%system.query_log%' AND + type = 2; diff --git a/parser/testdata/01291_aggregation_in_order/ast.json b/parser/testdata/01291_aggregation_in_order/ast.json new file mode 100644 index 000000000..dcc195bce --- /dev/null +++ b/parser/testdata/01291_aggregation_in_order/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery pk_order (children 1)" + }, + { + "explain": " Identifier pk_order" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001283949, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/01291_aggregation_in_order/metadata.json b/parser/testdata/01291_aggregation_in_order/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01291_aggregation_in_order/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01291_aggregation_in_order/query.sql b/parser/testdata/01291_aggregation_in_order/query.sql new file mode 100644 index 000000000..e93eadc33 --- /dev/null +++ b/parser/testdata/01291_aggregation_in_order/query.sql @@ -0,0 +1,33 @@ +DROP TABLE IF EXISTS pk_order; + +SET optimize_aggregation_in_order = 1; + +CREATE TABLE pk_order(a UInt64, b UInt64, c UInt64, d UInt64) ENGINE=MergeTree() ORDER BY (a, b); +INSERT INTO pk_order(a, b, c, d) VALUES (1, 1, 101, 1), (1, 2, 102, 1), (1, 3, 103, 1), (1, 4, 104, 1); +INSERT INTO pk_order(a, b, c, d) VALUES (1, 5, 104, 1), (1, 6, 105, 1), (2, 1, 106, 2), (2, 1, 107, 2); +INSERT INTO pk_order(a, b, c, d) VALUES (2, 2, 107, 2), (2, 3, 108, 2), (2, 4, 109, 2); + +-- Order after group by in order is determined + +SELECT a, b FROM pk_order GROUP BY a, b ORDER BY a, b; +SELECT a FROM pk_order GROUP BY a ORDER BY a; + +SELECT a, b, sum(c), avg(d) FROM pk_order GROUP BY a, b ORDER BY a, b; +SELECT a, sum(c), avg(d) FROM pk_order GROUP BY a ORDER BY a; +SELECT -a, sum(c), avg(d) FROM pk_order GROUP BY -a ORDER BY -a; + +DROP TABLE IF EXISTS pk_order; + +CREATE TABLE pk_order (d DateTime, a Int32, b Int32) ENGINE = MergeTree ORDER BY (d, a) + PARTITION BY toDate(d) SETTINGS index_granularity=1; + +INSERT INTO pk_order + SELECT toDateTime('2019-05-05 00:00:00') + INTERVAL number % 10 DAY, number, intHash32(number) from numbers(100); + +set max_block_size = 1; + +SELECT d, max(b) FROM pk_order GROUP BY d, a ORDER BY d, a LIMIT 5; +SELECT toString(d), avg(a) FROM pk_order GROUP BY toString(d) ORDER BY toString(d) LIMIT 5; +SELECT toStartOfHour(d) as d1, min(a), max(b) FROM pk_order GROUP BY d1 ORDER BY d1 LIMIT 5; + +DROP TABLE pk_order; diff --git a/parser/testdata/01291_distributed_low_cardinality_memory_efficient/ast.json b/parser/testdata/01291_distributed_low_cardinality_memory_efficient/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01291_distributed_low_cardinality_memory_efficient/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01291_distributed_low_cardinality_memory_efficient/metadata.json b/parser/testdata/01291_distributed_low_cardinality_memory_efficient/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01291_distributed_low_cardinality_memory_efficient/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01291_distributed_low_cardinality_memory_efficient/query.sql b/parser/testdata/01291_distributed_low_cardinality_memory_efficient/query.sql new file mode 100644 index 000000000..a0d7ed1d2 --- /dev/null +++ b/parser/testdata/01291_distributed_low_cardinality_memory_efficient/query.sql @@ -0,0 +1,19 @@ +-- Tags: distributed + +DROP TABLE IF EXISTS data; +DROP TABLE IF EXISTS dist; + +create table data (key String) Engine=Memory(); +create table dist (key LowCardinality(String)) engine=Distributed(test_cluster_two_shards, currentDatabase(), data); +insert into data values ('foo'); + +set distributed_aggregation_memory_efficient=1; + +-- There is an obscure bug in rare corner case. +set max_bytes_before_external_group_by = 0; +set max_bytes_ratio_before_external_group_by = 0; + +select * from dist group by key; + +DROP TABLE data; +DROP TABLE dist; diff --git a/parser/testdata/01291_geo_types/ast.json b/parser/testdata/01291_geo_types/ast.json new file mode 100644 index 000000000..b01098a83 --- /dev/null +++ b/parser/testdata/01291_geo_types/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery geo (children 1)" + }, + { + "explain": " Identifier geo" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00132383, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/01291_geo_types/metadata.json b/parser/testdata/01291_geo_types/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01291_geo_types/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01291_geo_types/query.sql b/parser/testdata/01291_geo_types/query.sql new file mode 100644 index 000000000..4038c5456 --- /dev/null +++ b/parser/testdata/01291_geo_types/query.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS geo; + +CREATE TABLE geo (a Point, b Ring, c Polygon, d MultiPolygon) ENGINE=Memory(); + +INSERT INTO geo VALUES((0, 0), [(0, 0), (10, 0), (10, 10), (0, 10)], [[(20, 20), (50, 20), (50, 50), (20, 50)], [(30, 30), (50, 50), (50, 30)]], [[[(0, 0), (10, 0), (10, 10), (0, 10)]], [[(20, 20), (50, 20), (50, 50), (20, 50)],[(30, 30), (50, 50), (50, 30)]]]); + +SELECT * from geo; + +DROP TABLE geo; diff --git a/parser/testdata/01291_unsupported_conversion_from_decimal/ast.json b/parser/testdata/01291_unsupported_conversion_from_decimal/ast.json new file mode 100644 index 000000000..6130ea749 --- /dev/null +++ b/parser/testdata/01291_unsupported_conversion_from_decimal/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toIntervalSecond (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function now64 (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001416345, + "rows_read": 8, + "bytes_read": 309 + } +} diff --git a/parser/testdata/01291_unsupported_conversion_from_decimal/metadata.json b/parser/testdata/01291_unsupported_conversion_from_decimal/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01291_unsupported_conversion_from_decimal/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01291_unsupported_conversion_from_decimal/query.sql b/parser/testdata/01291_unsupported_conversion_from_decimal/query.sql new file mode 100644 index 000000000..e5948465d --- /dev/null +++ b/parser/testdata/01291_unsupported_conversion_from_decimal/query.sql @@ -0,0 +1,5 @@ +SELECT toIntervalSecond(now64()); -- { serverError CANNOT_CONVERT_TYPE } +SELECT CAST(now64() AS IntervalSecond); -- { serverError CANNOT_CONVERT_TYPE } + +SELECT toIntervalSecond(now64()); -- { serverError CANNOT_CONVERT_TYPE } +SELECT CAST(now64() AS IntervalSecond); -- { serverError CANNOT_CONVERT_TYPE } diff --git a/parser/testdata/01292_create_user/ast.json b/parser/testdata/01292_create_user/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01292_create_user/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01292_create_user/metadata.json b/parser/testdata/01292_create_user/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01292_create_user/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01292_create_user/query.sql b/parser/testdata/01292_create_user/query.sql new file mode 100644 index 000000000..bbd7d17e9 --- /dev/null +++ b/parser/testdata/01292_create_user/query.sql @@ -0,0 +1,267 @@ +-- Tags: no-fasttest, no-parallel + +DROP USER IF EXISTS u1_01292, u2_01292, u3_01292, u4_01292, u5_01292, u6_01292, u7_01292, u8_01292, u9_01292; +DROP USER IF EXISTS u10_01292, u11_01292, u12_01292, u13_01292, u14_01292, u15_01292, u16_01292; +DROP USER IF EXISTS u2_01292_renamed; +DROP USER IF EXISTS u1_01292@'%', 'u2_01292@%.myhost.com', u3_01292@'192.168.%.%', 'u4_01292@::1', u5_01292@'65:ff0c::/96'; +DROP USER IF EXISTS u5_01292@'%.host.com', u6_01292@'%.host.com', u7_01292@'%.host.com', u8_01292@'%.otherhost.com'; +DROP ROLE IF EXISTS r1_01292, r2_01292; + +SELECT '-- default'; +CREATE USER u1_01292; +SHOW CREATE USER u1_01292; + +SELECT '-- same as default'; +CREATE USER u2_01292 NOT IDENTIFIED HOST ANY SETTINGS NONE DEFAULT ROLE ALL; +CREATE USER u3_01292 DEFAULT ROLE ALL IDENTIFIED WITH no_password SETTINGS NONE HOST ANY; +SHOW CREATE USER u2_01292; +SHOW CREATE USER u3_01292; + +SELECT '-- rename'; +ALTER USER u2_01292 RENAME TO 'u2_01292_renamed'; +SHOW CREATE USER u2_01292; -- { serverError UNKNOWN_USER } -- User not found +SHOW CREATE USER u2_01292_renamed; +DROP USER u1_01292, u2_01292_renamed, u3_01292; + +SELECT '-- authentication'; +CREATE USER u1_01292 NOT IDENTIFIED; +CREATE USER u2_01292 IDENTIFIED WITH plaintext_password BY 'qwe123'; +CREATE USER u3_01292 IDENTIFIED BY 'qwe123'; +CREATE USER u4_01292 IDENTIFIED WITH sha256_password BY 'qwe123'; +CREATE USER u5_01292 IDENTIFIED WITH sha256_hash BY '18138372FAD4B94533CD4881F03DC6C69296DD897234E0CEE83F727E2E6B1F63'; +CREATE USER u6_01292 IDENTIFIED WITH double_sha1_password BY 'qwe123'; +CREATE USER u7_01292 IDENTIFIED WITH double_sha1_hash BY '8DCDD69CE7D121DE8013062AEAEB2A148910D50E'; +CREATE USER u8_01292 IDENTIFIED WITH bcrypt_password BY 'qwe123'; +CREATE USER u9_01292 IDENTIFIED WITH bcrypt_hash BY '$2a$12$rz5iy2LhuwBezsM88ZzWiemOVUeJ94xHTzwAlLMDhTzwUxOHaY64q'; +SHOW CREATE USER u1_01292; +SHOW CREATE USER u2_01292; +SHOW CREATE USER u3_01292; +SHOW CREATE USER u4_01292; +SHOW CREATE USER u5_01292; +SHOW CREATE USER u6_01292; +SHOW CREATE USER u7_01292; +SHOW CREATE USER u8_01292; +SHOW CREATE USER u9_01292; +ALTER USER u1_01292 IDENTIFIED BY '123qwe'; +ALTER USER u2_01292 IDENTIFIED BY '123qwe'; +ALTER USER u3_01292 IDENTIFIED BY '123qwe'; +ALTER USER u4_01292 IDENTIFIED WITH plaintext_password BY '123qwe'; +ALTER USER u5_01292 NOT IDENTIFIED; +SHOW CREATE USER u1_01292; +SHOW CREATE USER u2_01292; +SHOW CREATE USER u3_01292; +SHOW CREATE USER u4_01292; +SHOW CREATE USER u5_01292; +DROP USER u1_01292, u2_01292, u3_01292, u4_01292, u5_01292, u6_01292, u7_01292, u8_01292, u9_01292; + +SELECT '-- host'; +CREATE USER u1_01292 HOST ANY; +CREATE USER u2_01292 HOST NONE; +CREATE USER u3_01292 HOST LOCAL; +CREATE USER u4_01292 HOST NAME 'myhost.com'; +CREATE USER u5_01292 HOST NAME 'myhost.com', LOCAL; +CREATE USER u6_01292 HOST LOCAL, NAME 'myhost.com'; +CREATE USER u7_01292 HOST REGEXP '.*\\.myhost\\.com'; +CREATE USER u8_01292 HOST LIKE '%'; +CREATE USER u9_01292 HOST LIKE '%.myhost.com'; +CREATE USER u10_01292 HOST LIKE '%.myhost.com', '%.myhost2.com'; +CREATE USER u11_01292 HOST IP '127.0.0.1'; +CREATE USER u12_01292 HOST IP '192.168.1.1'; +CREATE USER u13_01292 HOST IP '192.168.0.0/16'; +CREATE USER u14_01292 HOST IP '::1'; +CREATE USER u15_01292 HOST IP '2001:0db8:11a3:09d7:1f34:8a2e:07a0:765d'; +CREATE USER u16_01292 HOST IP '65:ff0c::/96', '::1'; +SHOW CREATE USER u1_01292; +SHOW CREATE USER u2_01292; +SHOW CREATE USER u3_01292; +SHOW CREATE USER u4_01292; +SHOW CREATE USER u5_01292; +SHOW CREATE USER u6_01292; +SHOW CREATE USER u7_01292; +SHOW CREATE USER u8_01292; +SHOW CREATE USER u9_01292; +SHOW CREATE USER u10_01292; +SHOW CREATE USER u11_01292; +SHOW CREATE USER u12_01292; +SHOW CREATE USER u13_01292; +SHOW CREATE USER u14_01292; +SHOW CREATE USER u15_01292; +SHOW CREATE USER u16_01292; +ALTER USER u1_01292 HOST NONE; +ALTER USER u2_01292 HOST NAME 'myhost.com'; +ALTER USER u3_01292 ADD HOST NAME 'myhost.com'; +ALTER USER u4_01292 DROP HOST NAME 'myhost.com'; +SHOW CREATE USER u1_01292; +SHOW CREATE USER u2_01292; +SHOW CREATE USER u3_01292; +SHOW CREATE USER u4_01292; +DROP USER u1_01292, u2_01292, u3_01292, u4_01292, u5_01292, u6_01292, u7_01292, u8_01292, u9_01292; +DROP USER u10_01292, u11_01292, u12_01292, u13_01292, u14_01292, u15_01292, u16_01292; + +SELECT '-- host after @'; +CREATE USER u1_01292@'%'; +CREATE USER u2_01292@'%.myhost.com'; +CREATE USER u3_01292@'192.168.%.%'; +CREATE USER u4_01292@'::1'; +CREATE USER u5_01292@'65:ff0c::/96'; +SHOW CREATE USER u1_01292@'%'; +SHOW CREATE USER u1_01292; +SHOW CREATE USER u2_01292@'%.myhost.com'; +SHOW CREATE USER 'u2_01292@%.myhost.com'; +SHOW CREATE USER u3_01292@'192.168.%.%'; +SHOW CREATE USER 'u3_01292@192.168.%.%'; +SHOW CREATE USER u4_01292@'::1'; +SHOW CREATE USER 'u4_01292@::1'; +SHOW CREATE USER u5_01292@'65:ff0c::/96'; +SHOW CREATE USER 'u5_01292@65:ff0c::/96'; +ALTER USER u1_01292@'%' HOST LOCAL; +ALTER USER u2_01292@'%.myhost.com' HOST ANY; +SHOW CREATE USER u1_01292@'%'; +SHOW CREATE USER u2_01292@'%.myhost.com'; +DROP USER u1_01292@'%', 'u2_01292@%.myhost.com', u3_01292@'192.168.%.%', 'u4_01292@::1', u5_01292@'65:ff0c::/96'; + +SELECT '-- settings'; +CREATE USER u1_01292 SETTINGS NONE; +CREATE USER u2_01292 SETTINGS PROFILE 'default'; +CREATE USER u3_01292 SETTINGS max_memory_usage=5000000; +CREATE USER u4_01292 SETTINGS max_memory_usage MIN=5000000; +CREATE USER u5_01292 SETTINGS max_memory_usage MAX=5000000; +CREATE USER u6_01292 SETTINGS max_memory_usage CONST; +CREATE USER u7_01292 SETTINGS max_memory_usage WRITABLE; +CREATE USER u8_01292 SETTINGS max_memory_usage=5000000 MIN 4000000 MAX 6000000 CONST; +CREATE USER u9_01292 SETTINGS PROFILE 'default', max_memory_usage=5000000 WRITABLE; +SHOW CREATE USER u1_01292; +SHOW CREATE USER u2_01292; +SHOW CREATE USER u3_01292; +SHOW CREATE USER u4_01292; +SHOW CREATE USER u5_01292; +SHOW CREATE USER u6_01292; +SHOW CREATE USER u7_01292; +SHOW CREATE USER u8_01292; +SHOW CREATE USER u9_01292; +ALTER USER u1_01292 SETTINGS readonly=1; +ALTER USER u2_01292 SETTINGS readonly=1; +ALTER USER u3_01292 SETTINGS NONE; +SHOW CREATE USER u1_01292; +SHOW CREATE USER u2_01292; +SHOW CREATE USER u3_01292; +DROP USER u1_01292, u2_01292, u3_01292, u4_01292, u5_01292, u6_01292, u7_01292, u8_01292, u9_01292; + +SELECT '-- default role'; +CREATE ROLE r1_01292, r2_01292; +CREATE USER u1_01292 DEFAULT ROLE ALL; +CREATE USER u2_01292 DEFAULT ROLE NONE; +CREATE USER u3_01292 DEFAULT ROLE r1_01292; +CREATE USER u4_01292 DEFAULT ROLE r1_01292, r2_01292; +CREATE USER u5_01292 DEFAULT ROLE ALL EXCEPT r2_01292; +CREATE USER u6_01292 DEFAULT ROLE ALL EXCEPT r1_01292, r2_01292; +SHOW CREATE USER u1_01292; +SHOW CREATE USER u2_01292; +SHOW CREATE USER u3_01292; +SHOW CREATE USER u4_01292; +SHOW CREATE USER u5_01292; +SHOW CREATE USER u6_01292; +GRANT r1_01292, r2_01292 TO u1_01292, u2_01292, u3_01292, u4_01292, u5_01292, u6_01292; +ALTER USER u1_01292 DEFAULT ROLE r1_01292; +ALTER USER u2_01292 DEFAULT ROLE ALL EXCEPT r2_01292; +SET DEFAULT ROLE r2_01292 TO u3_01292; +SET DEFAULT ROLE ALL TO u4_01292; +SET DEFAULT ROLE ALL EXCEPT r1_01292 TO u5_01292; +SET DEFAULT ROLE NONE TO u6_01292; +SHOW CREATE USER u1_01292; +SHOW CREATE USER u2_01292; +SHOW CREATE USER u3_01292; +SHOW CREATE USER u4_01292; +SHOW CREATE USER u5_01292; +SHOW CREATE USER u6_01292; +DROP USER u1_01292, u2_01292, u3_01292, u4_01292, u5_01292, u6_01292; + +SELECT '-- complex'; +CREATE USER u1_01292 IDENTIFIED WITH plaintext_password BY 'qwe123' HOST LOCAL SETTINGS readonly=1; +SHOW CREATE USER u1_01292; +ALTER USER u1_01292 NOT IDENTIFIED HOST LIKE '%.%.myhost.com' DEFAULT ROLE NONE SETTINGS PROFILE 'default'; +SHOW CREATE USER u1_01292; +DROP USER u1_01292; + +SELECT '-- if not exists'; +CREATE USER u1_01292; +GRANT r1_01292 TO u1_01292; +SHOW CREATE USER u1_01292; +SHOW GRANTS FOR u1_01292; +SELECT '-- if not exists-part2'; +CREATE USER IF NOT EXISTS u1_01292; +GRANT r2_01292 TO u1_01292; +SHOW CREATE USER u1_01292; +SHOW GRANTS FOR u1_01292; +SELECT '-- or replace'; +CREATE USER OR REPLACE u1_01292; +SHOW CREATE USER u1_01292; +SHOW GRANTS FOR u1_01292; +CREATE USER IF NOT EXISTS u2_01292; +SHOW CREATE USER u2_01292; +DROP USER u1_01292, u2_01292; + +SELECT '-- multiple users in one command'; +CREATE USER u1_01292, u2_01292 DEFAULT ROLE NONE; +CREATE USER u3_01292, u4_01292 HOST LIKE '%.%.myhost.com'; +CREATE USER u5_01292@'%.host.com', u6_01292@'%.host.com'; +CREATE USER u7_01292@'%.host.com', u8_01292@'%.otherhost.com'; +SHOW CREATE USER u1_01292, u2_01292, u3_01292, u4_01292, u5_01292@'%.host.com', u6_01292@'%.host.com'; +SHOW CREATE USER u7_01292@'%.host.com', u8_01292@'%.otherhost.com'; +ALTER USER u1_01292, u2_01292 SETTINGS readonly=1; +GRANT r1_01292, r2_01292 TO u2_01292, u3_01292, u4_01292; +SET DEFAULT ROLE r1_01292, r2_01292 TO u2_01292, u3_01292, u4_01292; +SHOW CREATE USER u1_01292, u2_01292, u3_01292, u4_01292; +DROP USER u1_01292, u2_01292, u3_01292, u4_01292, u5_01292@'%.host.com', u6_01292@'%.host.com'; +DROP USER u7_01292@'%.host.com', u8_01292@'%.otherhost.com'; + +SELECT '-- system.users'; +CREATE USER u1_01292 IDENTIFIED WITH plaintext_password BY 'qwe123' HOST LOCAL; +CREATE USER u2_01292 NOT IDENTIFIED HOST LIKE '%.%.myhost.com' DEFAULT ROLE NONE; +CREATE USER u3_01292 IDENTIFIED BY 'qwe123' HOST IP '192.168.0.0/16', '192.169.1.1', '::1' DEFAULT ROLE r1_01292; +CREATE USER u4_01292 IDENTIFIED WITH double_sha1_password BY 'qwe123' HOST ANY DEFAULT ROLE ALL EXCEPT r1_01292; +SELECT name, storage, auth_type, auth_params, host_ip, host_names, host_names_regexp, host_names_like, default_roles_all, default_roles_list, default_roles_except FROM system.users WHERE name LIKE 'u%\_01292' ORDER BY name; +DROP USER u1_01292, u2_01292, u3_01292, u4_01292; + +SELECT '-- system.settings_profile_elements'; +CREATE USER u1_01292 SETTINGS readonly=1; +CREATE USER u2_01292 SETTINGS PROFILE 'default'; +CREATE USER u3_01292 SETTINGS max_memory_usage=5000000 MIN 4000000 MAX 6000000 WRITABLE; +CREATE USER u4_01292 SETTINGS PROFILE 'default', max_memory_usage=5000000, readonly=1; +CREATE USER u5_01292 SETTINGS NONE; +SELECT * FROM system.settings_profile_elements WHERE user_name LIKE 'u%\_01292' ORDER BY user_name, index; +DROP USER u1_01292, u2_01292, u3_01292, u4_01292, u5_01292; + +DROP ROLE r1_01292, r2_01292; + +SELECT '-- multiple authentication methods'; +CREATE USER u1_01292 IDENTIFIED WITH plaintext_password by 'qwe123', kerberos REALM 'qwerty10', bcrypt_password by '123qwe', ldap SERVER 'abc'; +SELECT name, auth_type, auth_params FROM system.users WHERE name = 'u1_01292' ORDER BY name; +DROP USER u1_01292; + +SELECT '-- no passwords or hashes in query_log'; +SYSTEM FLUSH LOGS query_log; +SELECT query +FROM system.query_log +WHERE + query NOT LIKE '%query_log%' AND event_date >= yesterday() AND current_database = currentDatabase() AND + (query LIKE '%qwe123%' OR query LIKE '%123qwe%' OR + query LIKE '%18138372FAD4B94533CD4881F03DC6C69296DD897234E0CEE83F727E2E6B1F63%' OR + query LIKE '%8DCDD69CE7D121DE8013062AEAEB2A148910D50E%' OR + query like '%$2a$12$rz5iy2LhuwBezsM88ZzWiemOVUeJ94xHTzwAlLMDhTzwUxOHaY64q%'); + +SELECT '-- query parameters'; +SET param_u1_01292="u1_01292"; +SET param_u2_01292="u2_01292"; +CREATE USER '{u1_01292:Identifier}'; -- { clientError BAD_ARGUMENTS } +CREATE USER {u1_01292:Identifier}; +CREATE USER '{u1_01292:Identifier}@192.168.%.%', '{u2_01292:Identifier}@192.168.%.%'; -- { clientError BAD_ARGUMENTS } +CREATE USER {u1_01292:Identifier}@'192.168.%.%', {u2_01292:Identifier}@'192.168.%.%'; +SHOW CREATE USER u1_01292; +SHOW CREATE USER u1_01292@'192.168.%.%'; +SHOW CREATE USER u2_01292@'192.168.%.%'; +DROP USER u1_01292, u1_01292@'192.168.%.%', u2_01292@'192.168.%.%'; + +SELECT '-- creating user identified with JWT'; +CREATE USER user1 IDENTIFIED WITH jwt BY '1'; -- { clientError BAD_ARGUMENTS } +CREATE USER user1 IDENTIFIED WITH jwt; -- { clientError BAD_ARGUMENTS } diff --git a/parser/testdata/01292_optimize_data_skip_idx_order_by_expr/ast.json b/parser/testdata/01292_optimize_data_skip_idx_order_by_expr/ast.json new file mode 100644 index 000000000..e7ebb3d4d --- /dev/null +++ b/parser/testdata/01292_optimize_data_skip_idx_order_by_expr/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery data_01292 (children 1)" + }, + { + "explain": " Identifier data_01292" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001336868, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/01292_optimize_data_skip_idx_order_by_expr/metadata.json b/parser/testdata/01292_optimize_data_skip_idx_order_by_expr/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01292_optimize_data_skip_idx_order_by_expr/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01292_optimize_data_skip_idx_order_by_expr/query.sql b/parser/testdata/01292_optimize_data_skip_idx_order_by_expr/query.sql new file mode 100644 index 000000000..cf823be1f --- /dev/null +++ b/parser/testdata/01292_optimize_data_skip_idx_order_by_expr/query.sql @@ -0,0 +1,14 @@ +drop table if exists data_01292; + +create table data_01292 ( + key Int, + index key_idx (key) type minmax granularity 1 +) Engine=MergeTree() ORDER BY (key+0); + +insert into data_01292 values (1); + +optimize table data_01292 final; + +select * from data_01292 where key > 0; + +drop table if exists data_01292; diff --git a/parser/testdata/01292_quantile_array_bug/ast.json b/parser/testdata/01292_quantile_array_bug/ast.json new file mode 100644 index 000000000..cc8c9f084 --- /dev/null +++ b/parser/testdata/01292_quantile_array_bug/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function quantilesExactWeightedArray (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function range (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Function range (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Float64_0.5" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001511925, + "rows_read": 14, + "bytes_read": 555 + } +} diff --git a/parser/testdata/01292_quantile_array_bug/metadata.json b/parser/testdata/01292_quantile_array_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01292_quantile_array_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01292_quantile_array_bug/query.sql b/parser/testdata/01292_quantile_array_bug/query.sql new file mode 100644 index 000000000..ecb1028d5 --- /dev/null +++ b/parser/testdata/01292_quantile_array_bug/query.sql @@ -0,0 +1 @@ +select quantilesExactWeightedArray(0.5)(range(10), range(10)) diff --git a/parser/testdata/01293_create_role/ast.json b/parser/testdata/01293_create_role/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01293_create_role/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01293_create_role/metadata.json b/parser/testdata/01293_create_role/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01293_create_role/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01293_create_role/query.sql b/parser/testdata/01293_create_role/query.sql new file mode 100644 index 000000000..4b656ffb1 --- /dev/null +++ b/parser/testdata/01293_create_role/query.sql @@ -0,0 +1,76 @@ +-- Tags: no-parallel + +DROP ROLE IF EXISTS r1_01293, r2_01293, r3_01293, r4_01293, r5_01293, r6_01293, r7_01293, r8_01293, r9_01293; +DROP ROLE IF EXISTS r2_01293_renamed; +DROP ROLE IF EXISTS r1_01293@'%', 'r2_01293@%.myhost.com'; + +SELECT '-- default'; +CREATE ROLE r1_01293; +SHOW CREATE ROLE r1_01293; + +SELECT '-- same as default'; +CREATE ROLE r2_01293 SETTINGS NONE; +SHOW CREATE ROLE r2_01293; + +SELECT '-- rename'; +ALTER ROLE r2_01293 RENAME TO 'r2_01293_renamed'; +SHOW CREATE ROLE r2_01293; -- { serverError UNKNOWN_ROLE } -- Role not found +SHOW CREATE ROLE r2_01293_renamed; +DROP ROLE r1_01293, r2_01293_renamed; + +SELECT '-- host after @'; +CREATE ROLE r1_01293@'%'; +CREATE ROLE r2_01293@'%.myhost.com'; +SHOW CREATE ROLE r1_01293@'%'; +SHOW CREATE ROLE r1_01293; +SHOW CREATE ROLE r2_01293@'%.myhost.com'; +SHOW CREATE ROLE 'r2_01293@%.myhost.com'; +DROP ROLE r1_01293@'%', 'r2_01293@%.myhost.com'; + +SELECT '-- settings'; +CREATE ROLE r1_01293 SETTINGS NONE; +CREATE ROLE r2_01293 SETTINGS PROFILE 'default'; +CREATE ROLE r3_01293 SETTINGS max_memory_usage=5000000; +CREATE ROLE r4_01293 SETTINGS max_memory_usage MIN=5000000; +CREATE ROLE r5_01293 SETTINGS max_memory_usage MAX=5000000; +CREATE ROLE r6_01293 SETTINGS max_memory_usage CONST; +CREATE ROLE r7_01293 SETTINGS max_memory_usage WRITABLE; +CREATE ROLE r8_01293 SETTINGS max_memory_usage=5000000 MIN 4000000 MAX 6000000 CONST; +CREATE ROLE r9_01293 SETTINGS PROFILE 'default', max_memory_usage=5000000 WRITABLE; +SHOW CREATE ROLE r1_01293; +SHOW CREATE ROLE r2_01293; +SHOW CREATE ROLE r3_01293; +SHOW CREATE ROLE r4_01293; +SHOW CREATE ROLE r5_01293; +SHOW CREATE ROLE r6_01293; +SHOW CREATE ROLE r7_01293; +SHOW CREATE ROLE r8_01293; +SHOW CREATE ROLE r9_01293; +ALTER ROLE r1_01293 SETTINGS readonly=1; +ALTER ROLE r2_01293 SETTINGS readonly=1; +ALTER ROLE r3_01293 SETTINGS NONE; +SHOW CREATE ROLE r1_01293; +SHOW CREATE ROLE r2_01293; +SHOW CREATE ROLE r3_01293; +DROP ROLE r1_01293, r2_01293, r3_01293, r4_01293, r5_01293, r6_01293, r7_01293, r8_01293, r9_01293; + +SELECT '-- multiple roles in one command'; +CREATE ROLE r1_01293, r2_01293; +SHOW CREATE ROLE r1_01293, r2_01293; +ALTER ROLE r1_01293, r2_01293 SETTINGS readonly=1; +SHOW CREATE ROLE r1_01293, r2_01293; +DROP ROLE r1_01293, r2_01293; + +SELECT '-- system.roles'; +CREATE ROLE r1_01293; +SELECT name, storage from system.roles WHERE name='r1_01293'; +DROP ROLE r1_01293; + +SELECT '-- system.settings_profile_elements'; +CREATE ROLE r1_01293 SETTINGS readonly=1; +CREATE ROLE r2_01293 SETTINGS PROFILE 'default'; +CREATE ROLE r3_01293 SETTINGS max_memory_usage=5000000 MIN 4000000 MAX 6000000 WRITABLE; +CREATE ROLE r4_01293 SETTINGS PROFILE 'default', max_memory_usage=5000000, readonly=1; +CREATE ROLE r5_01293 SETTINGS NONE; +SELECT * FROM system.settings_profile_elements WHERE role_name LIKE 'r%\_01293' ORDER BY role_name, index; +DROP ROLE r1_01293, r2_01293, r3_01293, r4_01293, r5_01293; diff --git a/parser/testdata/01293_external_sorting_limit_bug/ast.json b/parser/testdata/01293_external_sorting_limit_bug/ast.json new file mode 100644 index 000000000..576772a69 --- /dev/null +++ b/parser/testdata/01293_external_sorting_limit_bug/ast.json @@ -0,0 +1,100 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 6)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_999990" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_100" + }, + { + "explain": " Literal UInt64_65535" + }, + { + "explain": " Set" + }, + { + "explain": " Identifier Null" + } + ], + + "rows": 26, + + "statistics": + { + "elapsed": 0.001794836, + "rows_read": 26, + "bytes_read": 1012 + } +} diff --git a/parser/testdata/01293_external_sorting_limit_bug/metadata.json b/parser/testdata/01293_external_sorting_limit_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01293_external_sorting_limit_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01293_external_sorting_limit_bug/query.sql b/parser/testdata/01293_external_sorting_limit_bug/query.sql new file mode 100644 index 000000000..ef64db316 --- /dev/null +++ b/parser/testdata/01293_external_sorting_limit_bug/query.sql @@ -0,0 +1 @@ +SELECT number FROM (SELECT number FROM system.numbers LIMIT 999990) ORDER BY number ASC LIMIT 100, 65535 SETTINGS max_bytes_before_external_sort = 1000000, max_bytes_ratio_before_external_sort = 0 format Null diff --git a/parser/testdata/01293_pretty_max_value_width/ast.json b/parser/testdata/01293_pretty_max_value_width/ast.json new file mode 100644 index 000000000..9ec432110 --- /dev/null +++ b/parser/testdata/01293_pretty_max_value_width/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001442708, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01293_pretty_max_value_width/metadata.json b/parser/testdata/01293_pretty_max_value_width/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01293_pretty_max_value_width/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01293_pretty_max_value_width/query.sql b/parser/testdata/01293_pretty_max_value_width/query.sql new file mode 100644 index 000000000..f1dc0cd19 --- /dev/null +++ b/parser/testdata/01293_pretty_max_value_width/query.sql @@ -0,0 +1,45 @@ +SET output_format_pretty_color = 1, output_format_pretty_max_value_width_apply_for_single_value = 1, output_format_pretty_row_numbers = 0; +SET output_format_pretty_display_footer_column_names=0; +SELECT 'привет' AS x, 'мир' AS y FORMAT Pretty; + +SET output_format_pretty_max_value_width = 5; +SELECT 'привет' AS x, 'мир' AS y FORMAT Pretty; +SELECT 'привет' AS x, 'мир' AS y FORMAT PrettyCompact; +SELECT 'привет' AS x, 'мир' AS y FORMAT PrettySpace; + +SELECT * FROM VALUES('x String, y String', ('привет', 'мир'), ('мир', 'привет')) FORMAT Pretty; +SELECT * FROM VALUES('x String, y String', ('привет', 'мир'), ('мир', 'привет')) FORMAT PrettyCompact; +SELECT * FROM VALUES('x String, y String', ('привет', 'мир'), ('мир', 'привет')) FORMAT PrettySpace; + +SET output_format_pretty_max_value_width = 6; + +SELECT 'привет' AS x, 'мир' AS y FORMAT Pretty; +SELECT 'привет' AS x, 'мир' AS y FORMAT PrettyCompact; +SELECT 'привет' AS x, 'мир' AS y FORMAT PrettySpace; + +SELECT * FROM VALUES('x String, y String', ('привет', 'мир'), ('мир', 'привет')) FORMAT Pretty; +SELECT * FROM VALUES('x String, y String', ('привет', 'мир'), ('мир', 'привет')) FORMAT PrettyCompact; +SELECT * FROM VALUES('x String, y String', ('привет', 'мир'), ('мир', 'привет')) FORMAT PrettySpace; + +SET output_format_pretty_max_value_width = 1; + +SELECT 'привет' AS x, 'мир' AS y FORMAT Pretty; +SELECT 'привет' AS x, 'мир' AS y FORMAT PrettyCompact; +SELECT 'привет' AS x, 'мир' AS y FORMAT PrettySpace; + +SELECT * FROM VALUES('x String, y String', ('привет', 'мир'), ('мир', 'привет')) FORMAT Pretty; +SELECT * FROM VALUES('x String, y String', ('привет', 'мир'), ('мир', 'привет')) FORMAT PrettyCompact; +SELECT * FROM VALUES('x String, y String', ('привет', 'мир'), ('мир', 'привет')) FORMAT PrettySpace; + +SET output_format_pretty_max_value_width = 0; + +SELECT 'привет' AS x, 'мир' AS y FORMAT Pretty; +SELECT 'привет' AS x, 'мир' AS y FORMAT PrettyCompact; +SELECT 'привет' AS x, 'мир' AS y FORMAT PrettySpace; + +SELECT * FROM VALUES('x String, y String', ('привет', 'мир'), ('мир', 'привет')) FORMAT Pretty; +SELECT * FROM VALUES('x String, y String', ('привет', 'мир'), ('мир', 'привет')) FORMAT PrettyCompact; +SELECT * FROM VALUES('x String, y String', ('привет', 'мир'), ('мир', 'привет')) FORMAT PrettySpace; + +SET output_format_pretty_color = 0; +SELECT 'привет' AS x, 'мир' AS y FORMAT Pretty; diff --git a/parser/testdata/01293_show_settings/ast.json b/parser/testdata/01293_show_settings/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01293_show_settings/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01293_show_settings/metadata.json b/parser/testdata/01293_show_settings/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01293_show_settings/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01293_show_settings/query.sql b/parser/testdata/01293_show_settings/query.sql new file mode 100644 index 000000000..3e55ffb58 --- /dev/null +++ b/parser/testdata/01293_show_settings/query.sql @@ -0,0 +1,5 @@ +-- Tags: no-random-settings + +show settings like 'send_timeout'; +SHOW SETTINGS ILIKE '%CONNECT_timeout%'; +SHOW CHANGED SETTINGS ILIKE '%MEMORY%'; diff --git a/parser/testdata/01293_system_distribution_queue/ast.json b/parser/testdata/01293_system_distribution_queue/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01293_system_distribution_queue/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01293_system_distribution_queue/metadata.json b/parser/testdata/01293_system_distribution_queue/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01293_system_distribution_queue/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01293_system_distribution_queue/query.sql b/parser/testdata/01293_system_distribution_queue/query.sql new file mode 100644 index 000000000..f14c0d64a --- /dev/null +++ b/parser/testdata/01293_system_distribution_queue/query.sql @@ -0,0 +1,27 @@ +-- Tags: no-parallel +set prefer_localhost_replica = 1; + +drop table if exists null_01293; +drop table if exists dist_01293; + +create table null_01293 (key Int) engine=Null(); +create table dist_01293 as null_01293 engine=Distributed(test_cluster_two_shards, currentDatabase(), null_01293, key); + +-- no rows, since no active monitor +select * from system.distribution_queue where database = currentDatabase(); + +select 'INSERT'; +system stop distributed sends dist_01293; +insert into dist_01293 select * from numbers(10); +select is_blocked, error_count, data_files, data_compressed_bytes>100, broken_data_files, broken_data_compressed_bytes from system.distribution_queue where database = currentDatabase(); +system flush distributed dist_01293; + +select 'FLUSH'; +select is_blocked, error_count, data_files, data_compressed_bytes, broken_data_files, broken_data_compressed_bytes from system.distribution_queue where database = currentDatabase(); + +select 'UNBLOCK'; +system start distributed sends dist_01293; +select is_blocked, error_count, data_files, data_compressed_bytes, broken_data_files, broken_data_compressed_bytes from system.distribution_queue where database = currentDatabase(); + +drop table null_01293; +drop table dist_01293; diff --git a/parser/testdata/01294_create_settings_profile/ast.json b/parser/testdata/01294_create_settings_profile/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01294_create_settings_profile/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01294_create_settings_profile/metadata.json b/parser/testdata/01294_create_settings_profile/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01294_create_settings_profile/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01294_create_settings_profile/query.sql b/parser/testdata/01294_create_settings_profile/query.sql new file mode 100644 index 000000000..2a7fadad8 --- /dev/null +++ b/parser/testdata/01294_create_settings_profile/query.sql @@ -0,0 +1,122 @@ +-- Tags: no-parallel + +DROP SETTINGS PROFILE IF EXISTS s1_01294, s2_01294, s3_01294, s4_01294, s5_01294, s6_01294, s7_01294, s8_01294, s9_01294, s10_01294; +DROP SETTINGS PROFILE IF EXISTS s2_01294_renamed; +DROP USER IF EXISTS u1_01294; +DROP ROLE IF EXISTS r1_01294; + +SELECT '-- default'; +CREATE SETTINGS PROFILE s1_01294; +SHOW CREATE SETTINGS PROFILE s1_01294; + +SELECT '-- same as default'; +CREATE SETTINGS PROFILE s2_01294 SETTINGS NONE TO NONE; +CREATE PROFILE s3_01294; +SHOW CREATE PROFILE s2_01294; +SHOW CREATE SETTINGS PROFILE s3_01294; + +SELECT '-- rename'; +ALTER SETTINGS PROFILE s2_01294 RENAME TO 's2_01294_renamed'; +SHOW CREATE SETTINGS PROFILE s2_01294; -- { serverError THERE_IS_NO_PROFILE } -- Profile not found +SHOW CREATE SETTINGS PROFILE s2_01294_renamed; +DROP SETTINGS PROFILE s1_01294, s2_01294_renamed, s3_01294; + +SELECT '-- settings'; +CREATE PROFILE s1_01294 SETTINGS NONE; +CREATE PROFILE s2_01294 SETTINGS INHERIT 'default'; +CREATE PROFILE s3_01294 SETTINGS max_memory_usage=5000000; +CREATE PROFILE s4_01294 SETTINGS max_memory_usage MIN=5000000; +CREATE PROFILE s5_01294 SETTINGS max_memory_usage MAX=5000000; +CREATE PROFILE s6_01294 SETTINGS max_memory_usage CONST; +CREATE PROFILE s7_01294 SETTINGS max_memory_usage WRITABLE; +CREATE PROFILE s8_01294 SETTINGS max_memory_usage=5000000 MIN 4000000 MAX 6000000 CONST; +CREATE PROFILE s9_01294 SETTINGS INHERIT 'default', max_memory_usage=5000000 WRITABLE; +CREATE PROFILE s10_01294 SETTINGS INHERIT s1_01294, s3_01294, INHERIT default, readonly=0, max_memory_usage MAX 6000000; +SHOW CREATE PROFILE s1_01294; +SHOW CREATE PROFILE s2_01294; +SHOW CREATE PROFILE s3_01294; +SHOW CREATE PROFILE s4_01294; +SHOW CREATE PROFILE s5_01294; +SHOW CREATE PROFILE s6_01294; +SHOW CREATE PROFILE s7_01294; +SHOW CREATE PROFILE s8_01294; +SHOW CREATE PROFILE s9_01294; +SHOW CREATE PROFILE s10_01294; +ALTER PROFILE s1_01294 SETTINGS readonly=0; +ALTER PROFILE s2_01294 SETTINGS readonly=1; +ALTER PROFILE s3_01294 SETTINGS NONE; +SHOW CREATE PROFILE s1_01294; +SHOW CREATE PROFILE s2_01294; +SHOW CREATE PROFILE s3_01294; +DROP PROFILE s1_01294, s2_01294, s3_01294, s4_01294, s5_01294, s6_01294, s7_01294, s8_01294, s9_01294, s10_01294; + +SELECT '-- to roles'; +CREATE ROLE r1_01294; +CREATE USER u1_01294; +CREATE PROFILE s1_01294 TO NONE; +CREATE PROFILE s2_01294 TO ALL; +CREATE PROFILE s3_01294 TO r1_01294; +CREATE PROFILE s4_01294 TO u1_01294; +CREATE PROFILE s5_01294 TO r1_01294, u1_01294; +CREATE PROFILE s6_01294 TO ALL EXCEPT r1_01294; +CREATE PROFILE s7_01294 TO ALL EXCEPT r1_01294, u1_01294; +SHOW CREATE PROFILE s1_01294; +SHOW CREATE PROFILE s2_01294; +SHOW CREATE PROFILE s3_01294; +SHOW CREATE PROFILE s4_01294; +SHOW CREATE PROFILE s5_01294; +SHOW CREATE PROFILE s6_01294; +SHOW CREATE PROFILE s7_01294; +ALTER PROFILE s1_01294 TO u1_01294; +ALTER PROFILE s2_01294 TO NONE; +SHOW CREATE PROFILE s1_01294; +SHOW CREATE PROFILE s2_01294; +DROP PROFILE s1_01294, s2_01294, s3_01294, s4_01294, s5_01294, s6_01294, s7_01294; + +SELECT '-- complex'; +CREATE SETTINGS PROFILE s1_01294 SETTINGS readonly=0 TO r1_01294; +SHOW CREATE SETTINGS PROFILE s1_01294; +ALTER SETTINGS PROFILE s1_01294 SETTINGS INHERIT 'default' TO NONE; +SHOW CREATE SETTINGS PROFILE s1_01294; +DROP SETTINGS PROFILE s1_01294; + +SELECT '-- multiple profiles in one command'; +CREATE PROFILE s1_01294, s2_01294 SETTINGS max_memory_usage=5000000; +CREATE PROFILE s3_01294, s4_01294 TO ALL; +SHOW CREATE PROFILE s1_01294, s2_01294, s3_01294, s4_01294; +ALTER PROFILE s1_01294, s2_01294 SETTINGS max_memory_usage=6000000; +SHOW CREATE PROFILE s1_01294, s2_01294, s3_01294, s4_01294; +ALTER PROFILE s2_01294, s3_01294, s4_01294 TO r1_01294; +SHOW CREATE PROFILE s1_01294, s2_01294, s3_01294, s4_01294; +DROP PROFILE s1_01294, s2_01294, s3_01294, s4_01294; + +SELECT '-- readonly ambiguity'; +CREATE PROFILE s1_01294 SETTINGS readonly=1; +CREATE PROFILE s2_01294 SETTINGS readonly readonly; +CREATE PROFILE s3_01294 SETTINGS profile readonly; +CREATE PROFILE s4_01294 SETTINGS profile readonly, readonly; +CREATE PROFILE s5_01294 SETTINGS profile readonly, readonly=1; +CREATE PROFILE s6_01294 SETTINGS profile readonly, readonly readonly; +SHOW CREATE PROFILE s1_01294; +SHOW CREATE PROFILE s2_01294; +SHOW CREATE PROFILE s3_01294; +SHOW CREATE PROFILE s4_01294; +SHOW CREATE PROFILE s5_01294; +SHOW CREATE PROFILE s6_01294; +DROP PROFILE s1_01294, s2_01294, s3_01294, s4_01294, s5_01294, s6_01294; + +SELECT '-- system.settings_profiles'; +CREATE PROFILE s1_01294; +CREATE PROFILE s2_01294 SETTINGS readonly=0 TO r1_01294;; +CREATE PROFILE s3_01294 SETTINGS max_memory_usage=5000000 MIN 4000000 MAX 6000000 CONST TO r1_01294; +CREATE PROFILE s4_01294 SETTINGS max_memory_usage=5000000 TO r1_01294; +CREATE PROFILE s5_01294 SETTINGS INHERIT default, readonly=0, max_memory_usage MAX 6000000 WRITABLE TO u1_01294; +CREATE PROFILE s6_01294 TO ALL EXCEPT u1_01294, r1_01294; +SELECT name, storage, num_elements, apply_to_all, apply_to_list, apply_to_except FROM system.settings_profiles WHERE name LIKE 's%\_01294' ORDER BY name; + +SELECT '-- system.settings_profile_elements'; +SELECT * FROM system.settings_profile_elements WHERE profile_name LIKE 's%\_01294' ORDER BY profile_name, index; +DROP PROFILE s1_01294, s2_01294, s3_01294, s4_01294, s5_01294, s6_01294; + +DROP ROLE r1_01294; +DROP USER u1_01294; diff --git a/parser/testdata/01294_system_distributed_on_cluster/ast.json b/parser/testdata/01294_system_distributed_on_cluster/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01294_system_distributed_on_cluster/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01294_system_distributed_on_cluster/metadata.json b/parser/testdata/01294_system_distributed_on_cluster/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01294_system_distributed_on_cluster/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01294_system_distributed_on_cluster/query.sql b/parser/testdata/01294_system_distributed_on_cluster/query.sql new file mode 100644 index 000000000..5c2636d7a --- /dev/null +++ b/parser/testdata/01294_system_distributed_on_cluster/query.sql @@ -0,0 +1,22 @@ +-- Tags: distributed, no-parallel + +-- just a smoke test + +-- quirk for ON CLUSTER does not uses currentDatabase() +drop database if exists db_01294; +create database db_01294; +set distributed_ddl_output_mode='throw'; + +drop table if exists db_01294.dist_01294; +create table db_01294.dist_01294 as system.one engine=Distributed(test_shard_localhost, system, one); +-- flush +system flush distributed db_01294.dist_01294; +system flush distributed on cluster test_shard_localhost db_01294.dist_01294; +-- stop +system stop distributed sends db_01294.dist_01294; +system stop distributed sends on cluster test_shard_localhost db_01294.dist_01294; +-- start +system start distributed sends db_01294.dist_01294; +system start distributed sends on cluster test_shard_localhost db_01294.dist_01294; + +drop database db_01294; diff --git a/parser/testdata/01295_aggregation_bug_11413/ast.json b/parser/testdata/01295_aggregation_bug_11413/ast.json new file mode 100644 index 000000000..885ae3a00 --- /dev/null +++ b/parser/testdata/01295_aggregation_bug_11413/ast.json @@ -0,0 +1,115 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function remote (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '127.0.0.{1,2}'" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_99" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function and (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function greater (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function count (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Function argMax (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_0" + } + ], + + "rows": 31, + + "statistics": + { + "elapsed": 0.001386735, + "rows_read": 31, + "bytes_read": 1204 + } +} diff --git a/parser/testdata/01295_aggregation_bug_11413/metadata.json b/parser/testdata/01295_aggregation_bug_11413/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01295_aggregation_bug_11413/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01295_aggregation_bug_11413/query.sql b/parser/testdata/01295_aggregation_bug_11413/query.sql new file mode 100644 index 000000000..ec43be9ea --- /dev/null +++ b/parser/testdata/01295_aggregation_bug_11413/query.sql @@ -0,0 +1 @@ +SELECT 1 FROM remote('127.0.0.{1,2}', numbers(99)) GROUP BY materialize(1) HAVING count() > 0 AND argMax(1, tuple(0)) diff --git a/parser/testdata/01295_create_row_policy/ast.json b/parser/testdata/01295_create_row_policy/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01295_create_row_policy/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01295_create_row_policy/metadata.json b/parser/testdata/01295_create_row_policy/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01295_create_row_policy/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01295_create_row_policy/query.sql b/parser/testdata/01295_create_row_policy/query.sql new file mode 100644 index 000000000..e09c2c174 --- /dev/null +++ b/parser/testdata/01295_create_row_policy/query.sql @@ -0,0 +1,81 @@ +-- Tags: no-parallel + +DROP ROW POLICY IF EXISTS p1_01295, p2_01295, p3_01295, p4_01295, p5_01295, p6_01295, p7_01295, p8_01295, p9_01295, p10_01295 ON db.table; +DROP ROW POLICY IF EXISTS p2_01295_renamed ON db.table; +DROP ROW POLICY IF EXISTS p3_01295 ON db.table, db2.table2; +DROP ROW POLICY IF EXISTS p4_01295 ON db.table, p5_01295 ON db2.table2; +DROP USER IF EXISTS u1_01295; +DROP ROLE IF EXISTS r1_01295; + +SELECT '-- default'; +CREATE ROW POLICY p1_01295 ON db.table; +SHOW CREATE ROW POLICY p1_01295 ON db.table; + +SELECT '-- same as default'; +CREATE ROW POLICY p2_01295 ON db.table USING NONE TO NONE; +CREATE POLICY p3_01295 ON db.table; +SHOW CREATE POLICY p2_01295 ON db.table; +SHOW CREATE ROW POLICY p3_01295 ON db.table; + +SELECT '-- rename'; +ALTER ROW POLICY p2_01295 ON db.table RENAME TO 'p2_01295_renamed'; +SHOW CREATE ROW POLICY p2_01295 ON db.table; -- { serverError UNKNOWN_ROW_POLICY } -- Policy not found +SHOW CREATE ROW POLICY p2_01295_renamed ON db.table; +DROP ROW POLICY p1_01295, p2_01295_renamed, p3_01295 ON db.table; + +SELECT '-- filter'; +CREATE ROW POLICY p1_01295 ON db.table USING ad; +CREATE ROW POLICY p2_01295 ON db.table USING id=currentUser() AS RESTRICTIVE; +CREATE ROW POLICY p3_01295 ON db.table USING 1 AS PERMISSIVE; +SHOW CREATE POLICY p1_01295 ON db.table; +SHOW CREATE POLICY p2_01295 ON db.table; +SHOW CREATE POLICY p3_01295 ON db.table; +ALTER ROW POLICY p1_01295 ON db.table FOR SELECT USING 0 AS RESTRICTIVE; +SHOW CREATE POLICY p1_01295 ON db.table; +DROP ROW POLICY p1_01295, p2_01295, p3_01295 ON db.table; + +SELECT '-- to roles'; +CREATE ROLE r1_01295; +CREATE USER u1_01295; +CREATE POLICY p1_01295 ON db.table TO NONE; +CREATE POLICY p2_01295 ON db.table TO ALL; +CREATE POLICY p3_01295 ON db.table TO r1_01295; +CREATE POLICY p4_01295 ON db.table TO u1_01295; +CREATE POLICY p5_01295 ON db.table TO r1_01295, u1_01295; +CREATE POLICY p6_01295 ON db.table TO ALL EXCEPT r1_01295; +CREATE POLICY p7_01295 ON db.table TO ALL EXCEPT r1_01295, u1_01295; +SHOW CREATE POLICY p1_01295 ON db.table; +SHOW CREATE POLICY p2_01295 ON db.table; +SHOW CREATE POLICY p3_01295 ON db.table; +SHOW CREATE POLICY p4_01295 ON db.table; +SHOW CREATE POLICY p5_01295 ON db.table; +SHOW CREATE POLICY p6_01295 ON db.table; +SHOW CREATE POLICY p7_01295 ON db.table; +ALTER POLICY p1_01295 ON db.table TO u1_01295; +ALTER POLICY p2_01295 ON db.table TO NONE; +SHOW CREATE POLICY p1_01295 ON db.table; +SHOW CREATE POLICY p2_01295 ON db.table; +DROP POLICY p1_01295, p2_01295, p3_01295, p4_01295, p5_01295, p6_01295, p7_01295 ON db.table; + +SELECT '-- multiple policies in one command'; +CREATE ROW POLICY p1_01295, p2_01295 ON db.table USING 1; +CREATE ROW POLICY p3_01295 ON db.table, db2.table2 TO u1_01295; +CREATE ROW POLICY p4_01295 ON db.table, p5_01295 ON db2.table2 USING a=b; +SHOW CREATE POLICY p1_01295, p2_01295 ON db.table; +SHOW CREATE POLICY p3_01295 ON db.table, db2.table2; +SHOW CREATE POLICY p4_01295 ON db.table, p5_01295 ON db2.table2; +ALTER POLICY p1_01295, p2_01295 ON db.table TO ALL; +SHOW CREATE POLICY p1_01295, p2_01295 ON db.table; +DROP POLICY p1_01295, p2_01295 ON db.table; +DROP POLICY p3_01295 ON db.table, db2.table2; +DROP POLICY p4_01295 ON db.table, p5_01295 ON db2.table2; + +SELECT '-- system.row_policies'; +CREATE ROW POLICY p1_01295 ON db.table USING ad; +CREATE ROW POLICY p2_01295 ON db.table USING id=currentUser() AS RESTRICTIVE TO u1_01295; +CREATE ROW POLICY p3_01295 ON db.table USING 1 AS PERMISSIVE TO ALL EXCEPT r1_01295; +SELECT name, short_name, database, table, storage, select_filter, is_restrictive, apply_to_all, apply_to_list, apply_to_except from system.row_policies WHERE short_name LIKE 'p%\_01295' ORDER BY name; +DROP ROW POLICY p1_01295, p2_01295, p3_01295 ON db.table; + +DROP ROLE r1_01295; +DROP USER u1_01295; diff --git a/parser/testdata/01296_codecs_bad_arguments/ast.json b/parser/testdata/01296_codecs_bad_arguments/ast.json new file mode 100644 index 000000000..bb5688b0c --- /dev/null +++ b/parser/testdata/01296_codecs_bad_arguments/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery delta_table (children 1)" + }, + { + "explain": " Identifier delta_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001461173, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/01296_codecs_bad_arguments/metadata.json b/parser/testdata/01296_codecs_bad_arguments/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01296_codecs_bad_arguments/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01296_codecs_bad_arguments/query.sql b/parser/testdata/01296_codecs_bad_arguments/query.sql new file mode 100644 index 000000000..a1d22123b --- /dev/null +++ b/parser/testdata/01296_codecs_bad_arguments/query.sql @@ -0,0 +1,15 @@ +DROP TABLE IF EXISTS delta_table; +DROP TABLE IF EXISTS zstd_table; +DROP TABLE IF EXISTS lz4_table; + +CREATE TABLE delta_table (`id` UInt64 CODEC(Delta(tuple()))) ENGINE = MergeTree() ORDER BY tuple(); --{serverError ILLEGAL_CODEC_PARAMETER} +CREATE TABLE zstd_table (`id` UInt64 CODEC(ZSTD(tuple()))) ENGINE = MergeTree() ORDER BY tuple(); --{serverError ILLEGAL_CODEC_PARAMETER} +CREATE TABLE lz4_table (`id` UInt64 CODEC(LZ4HC(tuple()))) ENGINE = MergeTree() ORDER BY tuple(); --{serverError ILLEGAL_CODEC_PARAMETER} + +CREATE TABLE lz4_table (`id` UInt64 CODEC(LZ4(tuple()))) ENGINE = MergeTree() ORDER BY tuple(); --{serverError DATA_TYPE_CANNOT_HAVE_ARGUMENTS} + +SELECT 1; + +DROP TABLE IF EXISTS delta_table; +DROP TABLE IF EXISTS zstd_table; +DROP TABLE IF EXISTS lz4_table; diff --git a/parser/testdata/01296_create_row_policy_in_current_database/ast.json b/parser/testdata/01296_create_row_policy_in_current_database/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01296_create_row_policy_in_current_database/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01296_create_row_policy_in_current_database/metadata.json b/parser/testdata/01296_create_row_policy_in_current_database/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01296_create_row_policy_in_current_database/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01296_create_row_policy_in_current_database/query.sql b/parser/testdata/01296_create_row_policy_in_current_database/query.sql new file mode 100644 index 000000000..a05a92455 --- /dev/null +++ b/parser/testdata/01296_create_row_policy_in_current_database/query.sql @@ -0,0 +1,53 @@ +-- Tags: no-parallel + +DROP ROW POLICY IF EXISTS p1_01296, p2_01296, p3_01296, p4_01296, p5_01296 ON db_01296.table; +DROP ROW POLICY IF EXISTS p3_01296, p5_01296 ON db_01296.table2; +DROP DATABASE IF EXISTS db_01296; +DROP USER IF EXISTS u1_01296; + +CREATE DATABASE db_01296; +USE db_01296; + +SELECT '-- one policy'; +CREATE POLICY p1_01296 ON table; +SHOW CREATE POLICY p1_01296 ON db_01296.table; +SHOW CREATE POLICY p1_01296 ON table; +ALTER POLICY p1_01296 ON table USING 1; +SHOW CREATE POLICY p1_01296 ON db_01296.table; +SHOW CREATE POLICY p1_01296 ON table; +DROP POLICY p1_01296 ON table; +DROP POLICY p1_01296 ON db_01296.table; -- { serverError UNKNOWN_ROW_POLICY } -- Policy not found + +SELECT '-- multiple policies'; +CREATE ROW POLICY p1_01296, p2_01296 ON table USING 1; +CREATE USER u1_01296; +CREATE ROW POLICY p3_01296 ON table, table2 TO u1_01296; +CREATE ROW POLICY p4_01296 ON table, p5_01296 ON table2 USING a=b; +SHOW CREATE POLICY p1_01296 ON table; +SHOW CREATE POLICY p2_01296 ON table; +SHOW CREATE POLICY p3_01296 ON table; +SHOW CREATE POLICY p3_01296 ON table2; +SHOW CREATE POLICY p4_01296 ON table; +SHOW CREATE POLICY p5_01296 ON table2; +SHOW CREATE POLICY p1_01296 ON db_01296.table; +SHOW CREATE POLICY p2_01296 ON db_01296.table; +SHOW CREATE POLICY p3_01296 ON db_01296.table; +SHOW CREATE POLICY p3_01296 ON db_01296.table2; +SHOW CREATE POLICY p4_01296 ON db_01296.table; +SHOW CREATE POLICY p5_01296 ON db_01296.table2; +ALTER POLICY p1_01296, p2_01296 ON table TO ALL; +SHOW CREATE POLICY p1_01296 ON table; +SHOW CREATE POLICY p2_01296 ON table; +DROP POLICY p1_01296, p2_01296 ON table; +DROP POLICY p3_01296 ON table, table2; +DROP POLICY p4_01296 ON table, p5_01296 ON table2; +DROP POLICY p1_01296 ON db_01296.table; -- { serverError UNKNOWN_ROW_POLICY } -- Policy not found +DROP POLICY p2_01296 ON db_01296.table; -- { serverError UNKNOWN_ROW_POLICY } -- Policy not found +DROP POLICY p3_01296 ON db_01296.table; -- { serverError UNKNOWN_ROW_POLICY } -- Policy not found +DROP POLICY p3_01296 ON db_01296.table2; -- { serverError UNKNOWN_ROW_POLICY } -- Policy not found +DROP POLICY p4_01296 ON db_01296.table; -- { serverError UNKNOWN_ROW_POLICY } -- Policy not found +DROP POLICY p5_01296 ON db_01296.table2; -- { serverError UNKNOWN_ROW_POLICY } -- Policy not found + +USE default; +DROP DATABASE db_01296; +DROP USER u1_01296; diff --git a/parser/testdata/01296_pipeline_stuck/ast.json b/parser/testdata/01296_pipeline_stuck/ast.json new file mode 100644 index 000000000..ec72e4f20 --- /dev/null +++ b/parser/testdata/01296_pipeline_stuck/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery data_01295 (children 1)" + }, + { + "explain": " Identifier data_01295" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001272107, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/01296_pipeline_stuck/metadata.json b/parser/testdata/01296_pipeline_stuck/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01296_pipeline_stuck/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01296_pipeline_stuck/query.sql b/parser/testdata/01296_pipeline_stuck/query.sql new file mode 100644 index 000000000..2a23e6a9b --- /dev/null +++ b/parser/testdata/01296_pipeline_stuck/query.sql @@ -0,0 +1,20 @@ +drop table if exists data_01295; +create table data_01295 (key Int) Engine=AggregatingMergeTree() order by key; + +insert into data_01295 values (1); +select * from data_01295; + +select 'INSERT SELECT'; +insert into data_01295 select * from data_01295; -- no stuck for now +select * from data_01295; + +select 'INSERT SELECT max_threads'; +insert into data_01295 select * from data_01295 final settings max_threads=2; -- stuck with multiple threads +select * from data_01295; + +select 'INSERT SELECT max_insert_threads max_threads'; +set max_insert_threads=2; +insert into data_01295 select * from data_01295 final settings max_threads=2; -- no stuck for now +select * from data_01295; + +drop table data_01295; diff --git a/parser/testdata/01297_alter_distributed/ast.json b/parser/testdata/01297_alter_distributed/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01297_alter_distributed/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01297_alter_distributed/metadata.json b/parser/testdata/01297_alter_distributed/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01297_alter_distributed/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01297_alter_distributed/query.sql b/parser/testdata/01297_alter_distributed/query.sql new file mode 100644 index 000000000..a68e137bf --- /dev/null +++ b/parser/testdata/01297_alter_distributed/query.sql @@ -0,0 +1,31 @@ +-- Tags: distributed + +drop table if exists merge_distributed; +drop table if exists merge_distributed1; + +set allow_deprecated_syntax_for_merge_tree=1; +create table merge_distributed1 ( CounterID UInt32, StartDate Date, Sign Int8, VisitID UInt64, UserID UInt64, StartTime DateTime, ClickLogID UInt64) ENGINE = CollapsingMergeTree(StartDate, intHash32(UserID), tuple(CounterID, StartDate, intHash32(UserID), VisitID, ClickLogID), 8192, Sign); +insert into merge_distributed1 values (1, '2013-09-19', 1, 0, 2, '2013-09-19 12:43:06', 3); + +create table merge_distributed ( CounterID UInt32, StartDate Date, Sign Int8, VisitID UInt64, UserID UInt64, StartTime DateTime, ClickLogID UInt64) ENGINE = Distributed(test_shard_localhost, currentDatabase(), merge_distributed1); + +alter table merge_distributed1 add column dummy String after CounterID; +alter table merge_distributed add column dummy String after CounterID; + +describe table merge_distributed; +show create table merge_distributed; + +insert into merge_distributed1 values (1, 'Hello, Alter Table!','2013-09-19', 1, 0, 2, '2013-09-19 12:43:06', 3); +select CounterID, dummy from merge_distributed where dummy <> '' limit 10; + +alter table merge_distributed drop column dummy; + +describe table merge_distributed; +show create table merge_distributed; + +--error: should fall, because there is no `dummy1` column +alter table merge_distributed add column dummy1 String after CounterID; +select CounterID, dummy1 from merge_distributed where dummy1 <> '' limit 10; -- { serverError UNKNOWN_IDENTIFIER } + +drop table merge_distributed; +drop table merge_distributed1; diff --git a/parser/testdata/01297_create_quota/ast.json b/parser/testdata/01297_create_quota/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01297_create_quota/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01297_create_quota/metadata.json b/parser/testdata/01297_create_quota/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01297_create_quota/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01297_create_quota/query.sql b/parser/testdata/01297_create_quota/query.sql new file mode 100644 index 000000000..ab84cbe86 --- /dev/null +++ b/parser/testdata/01297_create_quota/query.sql @@ -0,0 +1,249 @@ +-- Tags: no-parallel + +DROP QUOTA IF EXISTS q1_01297, q2_01297, q3_01297, q4_01297, q5_01297, q6_01297, q7_01297, q8_01297, q9_01297, +q10_01297, q11_01297, q12_01297, q13_01297, q14_01297, q15_01297, q16_01297, q17_01297,q18_01297; + +DROP QUOTA IF EXISTS q2_01297_renamed; +DROP USER IF EXISTS u1_01297; +DROP ROLE IF EXISTS r1_01297; + +SELECT '-- default'; +CREATE QUOTA q1_01297; +SHOW CREATE QUOTA q1_01297; + +SELECT '-- same as default'; +CREATE QUOTA q2_01297 TO NONE; +CREATE QUOTA q3_01297 FOR INTERVAL 1 HOUR NO LIMITS NOT KEYED TO NONE; +CREATE QUOTA q4_01297 KEYED BY none FOR 1 hour NO LIMITS; +SHOW CREATE QUOTA q2_01297; +SHOW CREATE QUOTA q3_01297; +SHOW CREATE QUOTA q4_01297; + +SELECT '-- rename'; +ALTER QUOTA q2_01297 RENAME TO 'q2_01297_renamed'; +SHOW CREATE QUOTA q2_01297; -- { serverError UNKNOWN_QUOTA } -- Policy not found +SHOW CREATE QUOTA q2_01297_renamed; +DROP QUOTA q1_01297, q2_01297_renamed, q3_01297, q4_01297; + +SELECT '-- key'; +CREATE QUOTA q1_01297 NOT KEYED; +CREATE QUOTA q2_01297 KEY BY user_name; +CREATE QUOTA q3_01297 KEY BY ip_address; +CREATE QUOTA q4_01297 KEY BY client_key; +CREATE QUOTA q5_01297 KEY BY client_key, user_name; +CREATE QUOTA q6_01297 KEY BY client_key, ip_address; +CREATE QUOTA q7_01297 KEYED BY 'none'; +CREATE QUOTA q8_01297 KEYED BY 'user name'; +CREATE QUOTA q9_01297 KEYED BY 'IP_ADDRESS'; +CREATE QUOTA q10_01297 KEYED BY CLIENT_KEY; +CREATE QUOTA q11_01297 KEYED BY 'client key or user name'; +CREATE QUOTA q12_01297 KEYED BY 'client key or ip address'; +SHOW CREATE QUOTA q1_01297; +SHOW CREATE QUOTA q2_01297; +SHOW CREATE QUOTA q3_01297; +SHOW CREATE QUOTA q4_01297; +SHOW CREATE QUOTA q5_01297; +SHOW CREATE QUOTA q6_01297; +SHOW CREATE QUOTA q7_01297; +SHOW CREATE QUOTA q8_01297; +SHOW CREATE QUOTA q9_01297; +SHOW CREATE QUOTA q10_01297; +SHOW CREATE QUOTA q11_01297; +SHOW CREATE QUOTA q12_01297; +ALTER QUOTA q1_01297 KEY BY user_name; +ALTER QUOTA q2_01297 KEY BY client_key, user_name; +ALTER QUOTA q3_01297 NOT KEYED; +SHOW CREATE QUOTA q1_01297; +SHOW CREATE QUOTA q2_01297; +SHOW CREATE QUOTA q3_01297; +DROP QUOTA q1_01297, q2_01297, q3_01297, q4_01297, q5_01297, q6_01297, q7_01297, q8_01297, q9_01297, q10_01297, q11_01297, q12_01297; + +SELECT '-- intervals'; +CREATE QUOTA q1_01297 FOR INTERVAL 5 DAY MAX ERRORS = 3; +CREATE QUOTA q2_01297 FOR INTERVAL 30 minute MAX ERRORS 4; +CREATE QUOTA q3_01297 FOR 1 HOUR errors MAX 5; +CREATE QUOTA q4_01297 FOR 2000 SECOND errors MAX 5; +CREATE QUOTA q5_01297 FOR RANDOMIZED INTERVAL 1 YEAR MAX errors = 11, MAX queries = 100; +CREATE QUOTA q6_01297 FOR 2 MONTH MAX errors = 11, queries = 100, result_rows = 1000, result_bytes = 10000, read_rows = 1001, read_bytes = 10001, execution_time=2.5; +CREATE QUOTA q7_01297 FOR 1 QUARTER MAX errors 11, queries 100; +CREATE QUOTA q8_01297 FOR 0.5 year ERRORS MAX 11, QUERIES MAX 100, FOR 2 MONTH RESULT ROWS MAX 1002; +SHOW CREATE QUOTA q1_01297; +SHOW CREATE QUOTA q2_01297; +SHOW CREATE QUOTA q3_01297; +SHOW CREATE QUOTA q4_01297; +SHOW CREATE QUOTA q5_01297; +SHOW CREATE QUOTA q6_01297; +SHOW CREATE QUOTA q7_01297; +SHOW CREATE QUOTA q8_01297; +ALTER QUOTA q1_01297 FOR INTERVAL 5 DAY NO LIMITS; +ALTER QUOTA q2_01297 FOR INTERVAL 30 MINUTE TRACKING ONLY; +ALTER QUOTA q3_01297 FOR INTERVAL 2 HOUR MAX errors = 10, FOR INTERVAL 1 HOUR MAX queries = 70; +ALTER QUOTA q4_01297 FOR RANDOMIZED INTERVAL 2000 SECOND errors MAX 5; +ALTER QUOTA q5_01297 FOR 1 YEAR MAX errors = 111; +SHOW CREATE QUOTA q1_01297; +SHOW CREATE QUOTA q2_01297; +SHOW CREATE QUOTA q3_01297; +SHOW CREATE QUOTA q4_01297; +SHOW CREATE QUOTA q5_01297; +DROP QUOTA q1_01297, q2_01297, q3_01297, q4_01297, q5_01297, q6_01297, q7_01297, q8_01297; + +SELECT '-- to roles'; +CREATE ROLE r1_01297; +CREATE USER u1_01297; +CREATE QUOTA q1_01297 TO NONE; +CREATE QUOTA q2_01297 TO ALL; +CREATE QUOTA q3_01297 TO r1_01297; +CREATE QUOTA q4_01297 TO u1_01297; +CREATE QUOTA q5_01297 TO r1_01297, u1_01297; +CREATE QUOTA q6_01297 TO ALL EXCEPT r1_01297; +CREATE QUOTA q7_01297 TO ALL EXCEPT r1_01297, u1_01297; +SHOW CREATE QUOTA q1_01297; +SHOW CREATE QUOTA q2_01297; +SHOW CREATE QUOTA q3_01297; +SHOW CREATE QUOTA q4_01297; +SHOW CREATE QUOTA q5_01297; +SHOW CREATE QUOTA q6_01297; +SHOW CREATE QUOTA q7_01297; +ALTER QUOTA q1_01297 TO u1_01297; +ALTER QUOTA q2_01297 TO NONE; +SHOW CREATE QUOTA q1_01297; +SHOW CREATE QUOTA q2_01297; +DROP QUOTA q1_01297, q2_01297, q3_01297, q4_01297, q5_01297, q6_01297, q7_01297; + +SELECT '-- multiple quotas in one command'; +CREATE QUOTA q1_01297, q2_01297 FOR 1 day MAX errors=5; +SHOW CREATE QUOTA q1_01297, q2_01297; +ALTER QUOTA q1_01297, q2_01297 FOR 1 day TRACKING ONLY TO r1_01297; +SHOW CREATE QUOTA q1_01297, q2_01297; +DROP QUOTA q1_01297, q2_01297; + +SELECT '-- system.quotas'; +CREATE QUOTA q1_01297 KEYED BY user_name TO r1_01297; +CREATE QUOTA q2_01297 FOR 2 MONTH MAX errors = 11, queries = 100, result_rows = 1000, result_bytes = 10000, read_rows = 1001, read_bytes = 10001, execution_time=2.5 TO r1_01297, u1_01297; +CREATE QUOTA q3_01297 KEYED BY client_key, user_name FOR 0.5 YEAR ERRORS MAX 11, QUERIES MAX 100, FOR 2 MONTH RESULT ROWS MAX 1002; +CREATE QUOTA q4_01297 FOR 1 WEEK TRACKING ONLY TO ALL EXCEPT u1_01297; +SELECT name, storage, keys, durations, apply_to_all, apply_to_list, apply_to_except FROM system.quotas WHERE name LIKE 'q%\_01297' ORDER BY name; + +SELECT '-- system.quota_limits'; +SELECT * FROM system.quota_limits WHERE quota_name LIKE 'q%\_01297' ORDER BY quota_name, duration; +DROP QUOTA q1_01297, q2_01297, q3_01297, q4_01297; + +SELECT '-- query_selects query_inserts'; +CREATE QUOTA q1_01297 KEYED BY user_name FOR INTERVAL 1 minute MAX query_selects = 1 TO r1_01297; +CREATE QUOTA q2_01297 KEYED BY user_name FOR INTERVAL 1 minute MAX query_inserts = 1 TO r1_01297; +SHOW CREATE QUOTA q1_01297; +SHOW CREATE QUOTA q2_01297; +DROP QUOTA q1_01297, q2_01297; + +DROP ROLE r1_01297; +DROP USER u1_01297; + +SELECT '-- size suffix'; +SELECT '-- functional test'; +CREATE QUOTA q1_01297 FOR INTERVAL 1 MINUTE MAX query_selects = '12K'; +CREATE QUOTA q2_01297 FOR INTERVAL 1 MINUTE MAX query_selects = '12Ki'; +CREATE QUOTA q3_01297 FOR INTERVAL 1 MINUTE MAX query_selects = '12M'; +CREATE QUOTA q4_01297 FOR INTERVAL 1 MINUTE MAX query_selects = '12Mi'; +CREATE QUOTA q5_01297 FOR INTERVAL 1 MINUTE MAX query_selects = '12G'; +CREATE QUOTA q6_01297 FOR INTERVAL 1 MINUTE MAX query_selects = '12Gi'; +CREATE QUOTA q7_01297 FOR INTERVAL 1 MINUTE MAX query_selects = '12T'; +CREATE QUOTA q8_01297 FOR INTERVAL 1 MINUTE MAX query_selects = '12Ti'; +CREATE QUOTA q9_01297 FOR INTERVAL 1 MINUTE MAX execution_time = '12K'; +CREATE QUOTA q10_01297 FOR INTERVAL 1 MINUTE MAX execution_time = '12Ki'; +CREATE QUOTA q11_01297 FOR INTERVAL 1 MINUTE MAX execution_time = '12M'; +CREATE QUOTA q12_01297 FOR INTERVAL 1 MINUTE MAX execution_time = '12Mi'; +CREATE QUOTA q13_01297 FOR INTERVAL 1 MINUTE MAX execution_time = '12G'; +CREATE QUOTA q14_01297 FOR INTERVAL 1 MINUTE MAX execution_time = '12Gi'; +CREATE QUOTA q15_01297 FOR INTERVAL 1 MINUTE MAX query_selects = 1.5; +CREATE QUOTA q16_01297 FOR INTERVAL 1 MINUTE MAX execution_time = 1.5; +CREATE QUOTA q17_01297 FOR INTERVAL 1 MINUTE MAX query_selects = '1.5'; -- { clientError CANNOT_PARSE_INPUT_ASSERTION_FAILED } +CREATE QUOTA q18_01297 FOR INTERVAL 1 MINUTE MAX execution_time = '1.5'; -- { clientError CANNOT_PARSE_INPUT_ASSERTION_FAILED } +SHOW CREATE QUOTA q1_01297; +SHOW CREATE QUOTA q2_01297; +SHOW CREATE QUOTA q3_01297; +SHOW CREATE QUOTA q4_01297; +SHOW CREATE QUOTA q5_01297; +SHOW CREATE QUOTA q6_01297; +SHOW CREATE QUOTA q7_01297; +SHOW CREATE QUOTA q8_01297; +SHOW CREATE QUOTA q9_01297; +SHOW CREATE QUOTA q10_01297; +SHOW CREATE QUOTA q11_01297; +SHOW CREATE QUOTA q12_01297; +SHOW CREATE QUOTA q13_01297; +SHOW CREATE QUOTA q14_01297; +SHOW CREATE QUOTA q15_01297; +SHOW CREATE QUOTA q16_01297; +DROP QUOTA IF EXISTS q1_01297; +DROP QUOTA IF EXISTS q2_01297; +DROP QUOTA IF EXISTS q3_01297; +DROP QUOTA IF EXISTS q4_01297; +DROP QUOTA IF EXISTS q5_01297; +DROP QUOTA IF EXISTS q6_01297; +DROP QUOTA IF EXISTS q7_01297; +DROP QUOTA IF EXISTS q8_01297; +DROP QUOTA IF EXISTS q9_01297; +DROP QUOTA IF EXISTS q10_01297; +DROP QUOTA IF EXISTS q11_01297; +DROP QUOTA IF EXISTS q12_01297; +DROP QUOTA IF EXISTS q13_01297; +DROP QUOTA IF EXISTS q14_01297; +DROP QUOTA IF EXISTS q15_01297; +DROP QUOTA IF EXISTS q16_01297; +SELECT '-- overflow test'; +CREATE QUOTA q1_01297 FOR INTERVAL 1 MINUTE MAX query_selects = '18446744073709551615'; +CREATE QUOTA q2_01297 FOR INTERVAL 1 MINUTE MAX execution_time = '18446744073'; +SHOW CREATE QUOTA q1_01297; +SHOW CREATE QUOTA q2_01297; +DROP QUOTA IF EXISTS q1_01297; +DROP QUOTA IF EXISTS q2_01297; +SELECT '-- zero test'; +CREATE QUOTA q1_01297 FOR INTERVAL 1 MINUTE MAX query_selects = '0'; +CREATE QUOTA q2_01297 FOR INTERVAL 1 MINUTE MAX execution_time = '0'; +SHOW CREATE QUOTA q1_01297; +SHOW CREATE QUOTA q2_01297; +DROP QUOTA IF EXISTS q1_01297; +DROP QUOTA IF EXISTS q2_01297; +SELECT '-- underflow test'; +CREATE QUOTA q1_01297 FOR INTERVAL 1 MINUTE MAX query_selects = '-1'; -- { clientError CANNOT_PARSE_NUMBER } +CREATE QUOTA q2_01297 FOR INTERVAL 1 MINUTE MAX execution_time = '-1'; -- { clientError CANNOT_PARSE_NUMBER } +SELECT '-- syntax test'; +CREATE QUOTA q1_01297 FOR INTERVAL 1 MINUTE MAX query_selects = ' 12 '; +CREATE QUOTA q2_01297 FOR INTERVAL 1 MINUTE MAX execution_time = ' 12 '; +CREATE QUOTA q3_01297 FOR INTERVAL 1 MINUTE MAX query_selects = ' 12k '; +CREATE QUOTA q4_01297 FOR INTERVAL 1 MINUTE MAX execution_time = ' 12k '; +CREATE QUOTA q5_01297 FOR INTERVAL 1 MINUTE MAX execution_time = ' 00 '; +CREATE QUOTA q6_01297 FOR INTERVAL 1 MINUTE MAX execution_time = ' 00 '; +CREATE QUOTA q7_01297 FOR INTERVAL 1 MINUTE MAX execution_time = ' 00k '; +CREATE QUOTA q8_01297 FOR INTERVAL 1 MINUTE MAX execution_time = ' 00k '; +CREATE QUOTA q9_01297 FOR INTERVAL 1 MINUTE MAX execution_time = ' 00123k '; +CREATE QUOTA q10_01297 FOR INTERVAL 1 MINUTE MAX execution_time = ' 00123k '; +SHOW CREATE QUOTA q1_01297; +SHOW CREATE QUOTA q2_01297; +SHOW CREATE QUOTA q3_01297; +SHOW CREATE QUOTA q4_01297; +SHOW CREATE QUOTA q5_01297; +SHOW CREATE QUOTA q6_01297; +SHOW CREATE QUOTA q7_01297; +SHOW CREATE QUOTA q8_01297; +SHOW CREATE QUOTA q9_01297; +SHOW CREATE QUOTA q10_01297; +DROP QUOTA IF EXISTS q1_01297; +DROP QUOTA IF EXISTS q2_01297; +DROP QUOTA IF EXISTS q3_01297; +DROP QUOTA IF EXISTS q4_01297; +DROP QUOTA IF EXISTS q5_01297; +DROP QUOTA IF EXISTS q6_01297; +DROP QUOTA IF EXISTS q7_01297; +DROP QUOTA IF EXISTS q8_01297; +DROP QUOTA IF EXISTS q9_01297; +DROP QUOTA IF EXISTS q10_01297; +SELECT '-- bad syntax test'; +CREATE QUOTA q1_01297 FOR INTERVAL 1 MINUTE MAX query_selects = '1 1'; -- { clientError CANNOT_PARSE_INPUT_ASSERTION_FAILED } +CREATE QUOTA q2_01297 FOR INTERVAL 1 MINUTE MAX execution_time = '1 1'; -- { clientError CANNOT_PARSE_INPUT_ASSERTION_FAILED } +CREATE QUOTA q3_01297 FOR INTERVAL 1 MINUTE MAX query_selects = '1K 1'; -- { clientError CANNOT_PARSE_INPUT_ASSERTION_FAILED } +CREATE QUOTA q4_01297 FOR INTERVAL 1 MINUTE MAX execution_time = '1K 1'; -- { clientError CANNOT_PARSE_INPUT_ASSERTION_FAILED } +CREATE QUOTA q5_01297 FOR INTERVAL 1 MINUTE MAX query_selects = '1K1'; -- { clientError CANNOT_PARSE_INPUT_ASSERTION_FAILED } +CREATE QUOTA q6_01297 FOR INTERVAL 1 MINUTE MAX execution_time = '1K1'; -- { clientError CANNOT_PARSE_INPUT_ASSERTION_FAILED } +CREATE QUOTA q7_01297 FOR INTERVAL 1 MINUTE MAX query_selects = 'foo'; -- { clientError CANNOT_PARSE_INPUT_ASSERTION_FAILED } +CREATE QUOTA q8_01297 FOR INTERVAL 1 MINUTE MAX execution_time = 'bar'; -- { clientError CANNOT_PARSE_INPUT_ASSERTION_FAILED } diff --git a/parser/testdata/01298_alter_merge/ast.json b/parser/testdata/01298_alter_merge/ast.json new file mode 100644 index 000000000..0b13acaf4 --- /dev/null +++ b/parser/testdata/01298_alter_merge/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery merge (children 1)" + }, + { + "explain": " Identifier merge" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001343056, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/01298_alter_merge/metadata.json b/parser/testdata/01298_alter_merge/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01298_alter_merge/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01298_alter_merge/query.sql b/parser/testdata/01298_alter_merge/query.sql new file mode 100644 index 000000000..24547086e --- /dev/null +++ b/parser/testdata/01298_alter_merge/query.sql @@ -0,0 +1,37 @@ +drop table if exists merge; +drop table if exists merge1; +drop table if exists merge2; + +set allow_deprecated_syntax_for_merge_tree=1; +create table merge1 ( CounterID UInt32, StartDate Date, Sign Int8, VisitID UInt64, UserID UInt64, StartTime DateTime, ClickLogID UInt64) ENGINE = CollapsingMergeTree(StartDate, intHash32(UserID), tuple(CounterID, StartDate, intHash32(UserID), VisitID, ClickLogID), 8192, Sign); +insert into merge1 values (1, '2013-09-19', 1, 0, 2, '2013-09-19 12:43:06', 3); + +create table merge2 ( CounterID UInt32, StartDate Date, Sign Int8, VisitID UInt64, UserID UInt64, StartTime DateTime, ClickLogID UInt64) ENGINE = CollapsingMergeTree(StartDate, intHash32(UserID), tuple(CounterID, StartDate, intHash32(UserID), VisitID, ClickLogID), 8192, Sign); +insert into merge2 values (2, '2013-09-19', 1, 0, 2, '2013-09-19 12:43:06', 3); + +create table merge ( CounterID UInt32, StartDate Date, Sign Int8, VisitID UInt64, UserID UInt64, StartTime DateTime, ClickLogID UInt64) ENGINE = Merge(currentDatabase(), 'merge\[0-9\]'); + +alter table merge1 add column dummy String after CounterID; +alter table merge2 add column dummy String after CounterID; +alter table merge add column dummy String after CounterID; + +describe table merge; +show create table merge; + +insert into merge1 values (1, 'Hello, Alter Table!','2013-09-19', 1, 0, 2, '2013-09-19 12:43:06', 3); + +select CounterID, dummy from merge where dummy <> '' limit 10; + + +alter table merge drop column dummy; + +describe table merge; +show create table merge; + +--error: must correctly fall into the alter +alter table merge add column dummy1 String after CounterID; +select CounterID, dummy1 from merge where dummy1 <> '' limit 10; + +drop table merge; +drop table merge1; +drop table merge2; diff --git a/parser/testdata/01299_alter_merge_tree/ast.json b/parser/testdata/01299_alter_merge_tree/ast.json new file mode 100644 index 000000000..946216f16 --- /dev/null +++ b/parser/testdata/01299_alter_merge_tree/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery merge_tree (children 1)" + }, + { + "explain": " Identifier merge_tree" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001518014, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/01299_alter_merge_tree/metadata.json b/parser/testdata/01299_alter_merge_tree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01299_alter_merge_tree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01299_alter_merge_tree/query.sql b/parser/testdata/01299_alter_merge_tree/query.sql new file mode 100644 index 000000000..1fa354040 --- /dev/null +++ b/parser/testdata/01299_alter_merge_tree/query.sql @@ -0,0 +1,18 @@ +drop table if exists merge_tree; + +set allow_deprecated_syntax_for_merge_tree=1; +create table merge_tree ( CounterID UInt32, StartDate Date, Sign Int8, VisitID UInt64, UserID UInt64, StartTime DateTime, ClickLogID UInt64) ENGINE = CollapsingMergeTree(StartDate, intHash32(UserID), tuple(CounterID, StartDate, intHash32(UserID), VisitID, ClickLogID), 8192, Sign); + +insert into merge_tree values (1, '2013-09-19', 1, 0, 2, '2013-09-19 12:43:06', 3); +alter table merge_tree add column dummy String after CounterID; +describe table merge_tree; + +insert into merge_tree values (1, 'Hello, Alter Table!','2013-09-19', 1, 0, 2, '2013-09-19 12:43:06', 3); + +select CounterID, dummy from merge_tree where dummy <> '' limit 10; + +alter table merge_tree drop column dummy; + +describe table merge_tree; + +drop table merge_tree; diff --git a/parser/testdata/01300_group_by_other_keys/ast.json b/parser/testdata/01300_group_by_other_keys/ast.json new file mode 100644 index 000000000..74fbbf640 --- /dev/null +++ b/parser/testdata/01300_group_by_other_keys/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001281209, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01300_group_by_other_keys/metadata.json b/parser/testdata/01300_group_by_other_keys/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01300_group_by_other_keys/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01300_group_by_other_keys/query.sql b/parser/testdata/01300_group_by_other_keys/query.sql new file mode 100644 index 000000000..0e37ef55a --- /dev/null +++ b/parser/testdata/01300_group_by_other_keys/query.sql @@ -0,0 +1,31 @@ +set max_block_size = 65505; + +set optimize_group_by_function_keys = 1; + +SELECT round(max(log(2) * number), 6) AS k FROM numbers(10000000) GROUP BY number % 2, number % 3, (number % 2 + number % 3) % 2 ORDER BY k; +SELECT round(avg(log(2) * number), 6) AS k FROM numbers(10000000) GROUP BY number % 5, ((number % 5) * (number % 5)) ORDER BY k; +SELECT round(avg(log(2) * number), 6) AS k FROM numbers(10000000) GROUP BY (number % 2) * (number % 3), number % 3 ORDER BY k; +SELECT round(avg(log(2) * number), 6) AS k FROM numbers(10000000) GROUP BY (number % 2) * (number % 3), number % 3, number % 2 ORDER BY k; +SELECT round(avg(log(2) * number), 6) AS k FROM numbers(10000000) GROUP BY (number % 2) % 3, number % 2 ORDER BY k; + + +EXPLAIN SYNTAX SELECT max(log(2) * number) AS k FROM numbers(10000000) GROUP BY number % 2, number % 3, (number % 2 + number % 3) % 2 ORDER BY k; +EXPLAIN SYNTAX SELECT avg(log(2) * number) AS k FROM numbers(10000000) GROUP BY number % 5, ((number % 5) * (number % 5)) ORDER BY k; +EXPLAIN SYNTAX SELECT avg(log(2) * number) AS k FROM numbers(10000000) GROUP BY (number % 2) * (number % 3), number % 3 ORDER BY k; +EXPLAIN SYNTAX SELECT avg(log(2) * number) AS k FROM numbers(10000000) GROUP BY (number % 2) * (number % 3), number % 3, number % 2 ORDER BY k; +EXPLAIN SYNTAX SELECT avg(log(2) * number) AS k FROM numbers(10000000) GROUP BY (number % 2) % 3, number % 2 ORDER BY k; + +set optimize_group_by_function_keys = 0; + +SELECT round(max(log(2) * number), 6) AS k FROM numbers(10000000) GROUP BY number % 2, number % 3, (number % 2 + number % 3) % 2 ORDER BY k; +SELECT round(avg(log(2) * number), 6) AS k FROM numbers(10000000) GROUP BY number % 5, ((number % 5) * (number % 5)) ORDER BY k; +SELECT round(avg(log(2) * number), 6) AS k FROM numbers(10000000) GROUP BY (number % 2) * (number % 3), number % 3 ORDER BY k; +SELECT round(avg(log(2) * number), 6) AS k FROM numbers(10000000) GROUP BY (number % 2) * (number % 3), number % 3, number % 2 ORDER BY k; +SELECT round(avg(log(2) * number), 6) AS k FROM numbers(10000000) GROUP BY (number % 2) % 3, number % 2 ORDER BY k; + +EXPLAIN SYNTAX SELECT max(log(2) * number) AS k FROM numbers(10000000) GROUP BY number % 2, number % 3, (number % 2 + number % 3) % 2 ORDER BY k; +EXPLAIN SYNTAX SELECT avg(log(2) * number) AS k FROM numbers(10000000) GROUP BY number % 5, ((number % 5) * (number % 5)) ORDER BY k; +EXPLAIN SYNTAX SELECT avg(log(2) * number) AS k FROM numbers(10000000) GROUP BY (number % 2) * (number % 3), number % 3 ORDER BY k; +EXPLAIN SYNTAX SELECT avg(log(2) * number) AS k FROM numbers(10000000) GROUP BY (number % 2) * (number % 3), number % 3, number % 2 ORDER BY k; +EXPLAIN SYNTAX SELECT avg(log(2) * number) AS k FROM numbers(10000000) GROUP BY (number % 2) % 3, number % 2 ORDER BY k; +-- TODO - test with similar variables of different tables (collision) diff --git a/parser/testdata/01300_group_by_other_keys_having/ast.json b/parser/testdata/01300_group_by_other_keys_having/ast.json new file mode 100644 index 000000000..cf3743025 --- /dev/null +++ b/parser/testdata/01300_group_by_other_keys_having/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001319496, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01300_group_by_other_keys_having/metadata.json b/parser/testdata/01300_group_by_other_keys_having/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01300_group_by_other_keys_having/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01300_group_by_other_keys_having/query.sql b/parser/testdata/01300_group_by_other_keys_having/query.sql new file mode 100644 index 000000000..cc15587d0 --- /dev/null +++ b/parser/testdata/01300_group_by_other_keys_having/query.sql @@ -0,0 +1,36 @@ +set optimize_group_by_function_keys = 1; +set optimize_syntax_fuse_functions = 0; +set enable_analyzer = 1; + +-- { echoOn } +SELECT round(avg(log(2) * number), 6) AS k FROM numbers(10000000) GROUP BY (number % 2) * (number % 3), number % 3, number % 2 HAVING avg(log(2) * number) > 3465735.3 ORDER BY k; +SELECT round(avg(log(2) * number), 6) AS k FROM numbers(10000000) GROUP BY (number % 2) * (number % 3), number % 3, number % 2 HAVING avg(log(2) * number) > 3465735.3 ORDER BY k SETTINGS enable_analyzer=1; + +SELECT round(avg(log(2) * number), 6) AS k FROM numbers(10000000) GROUP BY number % 5, ((number % 5) * (number % 5)) HAVING ((number % 5) * (number % 5)) < 5 ORDER BY k; +SELECT round(avg(log(2) * number), 6) AS k FROM numbers(10000000) GROUP BY number % 5, ((number % 5) * (number % 5)) HAVING ((number % 5) * (number % 5)) < 5 ORDER BY k SETTINGS enable_analyzer=1; + +SELECT (number % 5) * (number % 5) AS k FROM numbers(10000000) GROUP BY number % 5, ((number % 5) * (number % 5)) HAVING ((number % 5) * (number % 5)) < 5 ORDER BY k; +SELECT (number % 5) * (number % 5) AS k FROM numbers(10000000) GROUP BY number % 5, ((number % 5) * (number % 5)) HAVING ((number % 5) * (number % 5)) < 5 ORDER BY k SETTINGS enable_analyzer=1; + +-- { echoOff } + +EXPLAIN SYNTAX SELECT avg(log(2) * number) AS k FROM numbers(10000000) GROUP BY (number % 2) * (number % 3), number % 3, number % 2 HAVING avg(log(2) * number) > 3465735.3 ORDER BY k; +EXPLAIN SYNTAX SELECT avg(log(2) * number) AS k FROM numbers(10000000) GROUP BY (number % 2) * (number % 3), number % 3, number % 2 HAVING avg(log(2) * number) > 3465735.3 ORDER BY k SETTINGS enable_analyzer=0; +EXPLAIN QUERY TREE run_passes=1 SELECT avg(log(2) * number) AS k FROM numbers(10000000) GROUP BY (number % 2) * (number % 3), number % 3, number % 2 HAVING avg(log(2) * number) > 3465735.3 ORDER BY k; +EXPLAIN SYNTAX SELECT avg(log(2) * number) AS k FROM numbers(10000000) GROUP BY number % 5, ((number % 5) * (number % 5)) HAVING ((number % 5) * (number % 5)) < 5 ORDER BY k; +EXPLAIN SYNTAX SELECT avg(log(2) * number) AS k FROM numbers(10000000) GROUP BY number % 5, ((number % 5) * (number % 5)) HAVING ((number % 5) * (number % 5)) < 5 ORDER BY k SETTINGS enable_analyzer=0; +EXPLAIN QUERY TREE run_passes=1 SELECT avg(log(2) * number) AS k FROM numbers(10000000) GROUP BY number % 5, ((number % 5) * (number % 5)) HAVING ((number % 5) * (number % 5)) < 5 ORDER BY k; +EXPLAIN SYNTAX SELECT (number % 5) * (number % 5) AS k FROM numbers(10000000) GROUP BY number % 5, ((number % 5) * (number % 5)) HAVING ((number % 5) * (number % 5)) < 5 ORDER BY k; +EXPLAIN SYNTAX SELECT (number % 5) * (number % 5) AS k FROM numbers(10000000) GROUP BY number % 5, ((number % 5) * (number % 5)) HAVING ((number % 5) * (number % 5)) < 5 ORDER BY k SETTINGS enable_analyzer=0; +EXPLAIN QUERY TREE run_passes=1 SELECT (number % 5) * (number % 5) AS k FROM numbers(10000000) GROUP BY number % 5, ((number % 5) * (number % 5)) HAVING ((number % 5) * (number % 5)) < 5 ORDER BY k; + +set optimize_group_by_function_keys = 0; + +SELECT round(avg(log(2) * number), 6) AS k FROM numbers(10000000) GROUP BY (number % 2) * (number % 3), number % 3, number % 2 HAVING avg(log(2) * number) > 3465735.3 ORDER BY k; +SELECT round(avg(log(2) * number), 6) AS k FROM numbers(10000000) GROUP BY number % 5, ((number % 5) * (number % 5)) HAVING ((number % 5) * (number % 5)) < 5 ORDER BY k; +SELECT (number % 5) * (number % 5) AS k FROM numbers(10000000) GROUP BY number % 5, ((number % 5) * (number % 5)) HAVING ((number % 5) * (number % 5)) < 5 ORDER BY k; + +EXPLAIN SYNTAX SELECT avg(log(2) * number) AS k FROM numbers(10000000) GROUP BY (number % 2) * (number % 3), number % 3, number % 2 HAVING avg(log(2) * number) > 3465735.3 ORDER BY k; +EXPLAIN QUERY TREE run_passes=1 SELECT avg(log(2) * number) AS k FROM numbers(10000000) GROUP BY (number % 2) * (number % 3), number % 3, number % 2 HAVING avg(log(2) * number) > 3465735.3 ORDER BY k; +EXPLAIN SYNTAX SELECT avg(log(2) * number) AS k FROM numbers(10000000) GROUP BY number % 5, ((number % 5) * (number % 5)) HAVING ((number % 5) * (number % 5)) < 5 ORDER BY k; +EXPLAIN SYNTAX SELECT (number % 5) * (number % 5) AS k FROM numbers(10000000) GROUP BY number % 5, ((number % 5) * (number % 5)) HAVING ((number % 5) * (number % 5)) < 5 ORDER BY k; diff --git a/parser/testdata/01300_polygon_convex_hull/ast.json b/parser/testdata/01300_polygon_convex_hull/ast.json new file mode 100644 index 000000000..e8a9d94bc --- /dev/null +++ b/parser/testdata/01300_polygon_convex_hull/ast.json @@ -0,0 +1,73 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function polygonConvexHullCartesian (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 5)" + }, + { + "explain": " Literal Tuple_(Float64_0, Float64_0)" + }, + { + "explain": " Literal Tuple_(Float64_0, Float64_5)" + }, + { + "explain": " Literal Tuple_(Float64_5, Float64_5)" + }, + { + "explain": " Literal Tuple_(Float64_5, Float64_0)" + }, + { + "explain": " Literal Tuple_(Float64_2, Float64_3)" + } + ], + + "rows": 17, + + "statistics": + { + "elapsed": 0.001461472, + "rows_read": 17, + "bytes_read": 790 + } +} diff --git a/parser/testdata/01300_polygon_convex_hull/metadata.json b/parser/testdata/01300_polygon_convex_hull/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01300_polygon_convex_hull/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01300_polygon_convex_hull/query.sql b/parser/testdata/01300_polygon_convex_hull/query.sql new file mode 100644 index 000000000..4a4aa66bb --- /dev/null +++ b/parser/testdata/01300_polygon_convex_hull/query.sql @@ -0,0 +1 @@ +select polygonConvexHullCartesian([[[(0., 0.), (0., 5.), (5., 5.), (5., 0.), (2., 3.)]]]); diff --git a/parser/testdata/01300_read_wkt/ast.json b/parser/testdata/01300_read_wkt/ast.json new file mode 100644 index 000000000..af137278e --- /dev/null +++ b/parser/testdata/01300_read_wkt/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function readWKTPoint (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'POINT(0 0)'" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001208339, + "rows_read": 7, + "bytes_read": 269 + } +} diff --git a/parser/testdata/01300_read_wkt/metadata.json b/parser/testdata/01300_read_wkt/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01300_read_wkt/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01300_read_wkt/query.sql b/parser/testdata/01300_read_wkt/query.sql new file mode 100644 index 000000000..1995c5153 --- /dev/null +++ b/parser/testdata/01300_read_wkt/query.sql @@ -0,0 +1,30 @@ +SELECT readWKTPoint('POINT(0 0)'); +SELECT readWKTPolygon('POLYGON((1 0,10 0,10 10,0 10,1 0))'); +SELECT readWKTPolygon('POLYGON((0 0,10 0,10 10,0 10,0 0),(4 4,5 4,5 5,4 5,4 4))'); +SELECT readWKTMultiPolygon('MULTIPOLYGON(((2 0,10 0,10 10,0 10,2 0),(4 4,5 4,5 5,4 5,4 4)),((-10 -10,-10 -9,-9 10,-10 -10)))'); + +DROP TABLE IF EXISTS geo; +CREATE TABLE geo (s String, id Int) engine=Memory(); +INSERT INTO geo VALUES ('POINT(0 0)', 1); +INSERT INTO geo VALUES ('POINT(1 0)', 2); +INSERT INTO geo VALUES ('POINT(2 0)', 3); +SELECT readWKTPoint(s) FROM geo ORDER BY id; + +DROP TABLE IF EXISTS geo; +CREATE TABLE geo (s String, id Int) engine=Memory(); +INSERT INTO geo VALUES ('POLYGON((1 0,10 0,10 10,0 10,1 0))', 1); +INSERT INTO geo VALUES ('POLYGON((0 0,10 0,10 10,0 10,0 0))', 2); +INSERT INTO geo VALUES ('POLYGON((2 0,10 0,10 10,0 10,2 0))', 3); +INSERT INTO geo VALUES ('POLYGON((0 0,10 0,10 10,0 10,0 0),(4 4,5 4,5 5,4 5,4 4))', 4); +INSERT INTO geo VALUES ('POLYGON((2 0,10 0,10 10,0 10,2 0),(4 4,5 4,5 5,4 5,4 4))', 5); +INSERT INTO geo VALUES ('POLYGON((1 0,10 0,10 10,0 10,1 0),(4 4,5 4,5 5,4 5,4 4))', 6); +SELECT readWKTPolygon(s) FROM geo ORDER BY id; + +DROP TABLE IF EXISTS geo; +CREATE TABLE geo (s String, id Int) engine=Memory(); +INSERT INTO geo VALUES ('MULTIPOLYGON(((1 0,10 0,10 10,0 10,1 0),(4 4,5 4,5 5,4 5,4 4)),((-10 -10,-10 -9,-9 10,-10 -10)))', 1); +INSERT INTO geo VALUES ('MULTIPOLYGON(((0 0,10 0,10 10,0 10,0 0),(4 4,5 4,5 5,4 5,4 4)),((-10 -10,-10 -9,-9 10,-10 -10)))', 2); +INSERT INTO geo VALUES ('MULTIPOLYGON(((2 0,10 0,10 10,0 10,2 0),(4 4,5 4,5 5,4 5,4 4)),((-10 -10,-10 -9,-9 10,-10 -10)))', 3); +SELECT readWKTMultiPolygon(s) FROM geo ORDER BY id; + +DROP TABLE geo; diff --git a/parser/testdata/01300_svg/ast.json b/parser/testdata/01300_svg/ast.json new file mode 100644 index 000000000..301e39d2f --- /dev/null +++ b/parser/testdata/01300_svg/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function SVG (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Tuple_(Float64_0, Float64_0)" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001218153, + "rows_read": 7, + "bytes_read": 276 + } +} diff --git a/parser/testdata/01300_svg/metadata.json b/parser/testdata/01300_svg/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01300_svg/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01300_svg/query.sql b/parser/testdata/01300_svg/query.sql new file mode 100644 index 000000000..cf794f219 --- /dev/null +++ b/parser/testdata/01300_svg/query.sql @@ -0,0 +1,50 @@ +SELECT SVG((0., 0.)); +SELECT SVG([(0., 0.), (10, 0), (10, 10), (0, 10)]); +SELECT SVG([[(0., 0.), (10, 0), (10, 10), (0, 10)], [(4., 4.), (5, 4), (5, 5), (4, 5)]]); +SELECT SVG([[[(0., 0.), (10, 0), (10, 10), (0, 10)], [(4., 4.), (5, 4), (5, 5), (4, 5)]], [[(-10., -10.), (-10, -9), (-9, 10)]]]); +SELECT SVG((0., 0.), 'b'); +SELECT SVG([(0., 0.), (10, 0), (10, 10), (0, 10)], 'b'); +SELECT SVG([[(0., 0.), (10, 0), (10, 10), (0, 10)], [(4., 4.), (5, 4), (5, 5), (4, 5)]], 'b'); +SELECT SVG([[[(0., 0.), (10, 0), (10, 10), (0, 10)], [(4., 4.), (5, 4), (5, 5), (4, 5)]], [[(-10., -10.), (-10, -9), (-9, 10)]]], 'b'); + +DROP TABLE IF EXISTS geo; +CREATE TABLE geo (p Tuple(Float64, Float64), s String, id Int) engine=Memory(); +INSERT INTO geo VALUES ((0., 0.), 'b', 1); +INSERT INTO geo VALUES ((1., 0.), 'c', 2); +INSERT INTO geo VALUES ((2., 0.), 'd', 3); +SELECT SVG(p) FROM geo ORDER BY id; +SELECT SVG(p, 'b') FROM geo ORDER BY id; +SELECT SVG((0., 0.), s) FROM geo ORDER BY id; +SELECT SVG(p, s) FROM geo ORDER BY id; + +DROP TABLE IF EXISTS geo; +CREATE TABLE geo (p Array(Tuple(Float64, Float64)), s String, id Int) engine=Memory(); +INSERT INTO geo VALUES ([(0., 0.), (10, 0), (10, 10), (0, 10)], 'b', 1); +INSERT INTO geo VALUES ([(1., 0.), (10, 0), (10, 10), (0, 10)], 'c', 2); +INSERT INTO geo VALUES ([(2., 0.), (10, 0), (10, 10), (0, 10)], 'd', 3); +SELECT SVG(p) FROM geo ORDER BY id; +SELECT SVG(p, 'b') FROM geo ORDER BY id; +SELECT SVG([(0., 0.), (10, 0), (10, 10), (0, 10)], s) FROM geo ORDER BY id; +SELECT SVG(p, s) FROM geo ORDER BY id; + +DROP TABLE IF EXISTS geo; +CREATE TABLE geo (p Array(Array(Tuple(Float64, Float64))), s String, id Int) engine=Memory(); +INSERT INTO geo VALUES ([[(0., 0.), (10, 0), (10, 10), (0, 10)], [(4, 4), (5, 4), (5, 5), (4, 5)]], 'b', 1); +INSERT INTO geo VALUES ([[(1., 0.), (10, 0), (10, 10), (0, 10)], [(4, 4), (5, 4), (5, 5), (4, 5)]], 'c', 2); +INSERT INTO geo VALUES ([[(2., 0.), (10, 0), (10, 10), (0, 10)], [(4, 4), (5, 4), (5, 5), (4, 5)]], 'd', 3); +SELECT SVG(p) FROM geo ORDER BY id; +SELECT SVG(p, 'b') FROM geo ORDER BY id; +SELECT SVG([[(0., 0.), (10, 0), (10, 10), (0, 10)], [(4., 4.), (5, 4), (5, 5), (4, 5)]], s) FROM geo ORDER BY id; +SELECT SVG(p, s) FROM geo ORDER BY id; + +DROP TABLE IF EXISTS geo; +CREATE TABLE geo (p Array(Array(Array(Tuple(Float64, Float64)))), s String, id Int) engine=Memory(); +INSERT INTO geo VALUES ([[[(0., 0.), (10, 0), (10, 10), (0, 10)], [(4., 4.), (5, 4), (5, 5), (4, 5)]], [[(-10., -10.), (-10, -9), (-9, 10)]]], 'b', 1); +INSERT INTO geo VALUES ([[[(1., 0.), (10, 0), (10, 10), (0, 10)], [(4., 4.), (5, 4), (5, 5), (4, 5)]], [[(-10., -10.), (-10, -9), (-9, 10)]]], 'c', 2); +INSERT INTO geo VALUES ([[[(2., 0.), (10, 0), (10, 10), (0, 10)], [(4., 4.), (5, 4), (5, 5), (4, 5)]], [[(-10., -10.), (-10, -9), (-9, 10)]]], 'd', 3); +SELECT SVG(p) FROM geo ORDER BY id; +SELECT SVG(p, 'b') FROM geo ORDER BY id; +SELECT SVG([[[(0., 0.), (10, 0), (10, 10), (0, 10)], [(4., 4.), (5, 4), (5, 5), (4, 5)]], [[(-10., -10.), (-10, -9), (-9, 10)]]], s) FROM geo ORDER BY id; +SELECT SVG(p, s) FROM geo ORDER BY id; + +DROP TABLE geo; diff --git a/parser/testdata/01300_wkt/ast.json b/parser/testdata/01300_wkt/ast.json new file mode 100644 index 000000000..346f088dc --- /dev/null +++ b/parser/testdata/01300_wkt/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function wkt (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Tuple_(Float64_0, Float64_0)" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001199898, + "rows_read": 7, + "bytes_read": 276 + } +} diff --git a/parser/testdata/01300_wkt/metadata.json b/parser/testdata/01300_wkt/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01300_wkt/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01300_wkt/query.sql b/parser/testdata/01300_wkt/query.sql new file mode 100644 index 000000000..00063d0a6 --- /dev/null +++ b/parser/testdata/01300_wkt/query.sql @@ -0,0 +1,34 @@ +SELECT wkt((0., 0.)); +SELECT wkt([(0., 0.), (10., 0.), (10., 10.), (0., 10.)]); +SELECT wkt([[(0., 0.), (10., 0.), (10., 10.), (0., 10.)], [(4., 4.), (5., 4.), (5., 5.), (4., 5.)]]); +SELECT wkt([[[(0., 0.), (10., 0.), (10., 10.), (0., 10.)], [(4., 4.), (5., 4.), (5., 5.), (4., 5.)]], [[(-10., -10.), (-10., -9.), (-9., 10.)]]]); + +DROP TABLE IF EXISTS geo; +CREATE TABLE geo (p Tuple(Float64, Float64), id Int) engine=Memory(); +INSERT INTO geo VALUES ((0, 0), 1); +INSERT INTO geo VALUES ((1, 0), 2); +INSERT INTO geo VALUES ((2, 0), 3); +SELECT wkt(p) FROM geo ORDER BY id; + +DROP TABLE IF EXISTS geo; +CREATE TABLE geo (p Array(Tuple(Float64, Float64)), id Int) engine=Memory(); +INSERT INTO geo VALUES ([(0, 0), (10, 0), (10, 10), (0, 10)], 1); +INSERT INTO geo VALUES ([(1, 0), (10, 0), (10, 10), (0, 10)], 2); +INSERT INTO geo VALUES ([(2, 0), (10, 0), (10, 10), (0, 10)], 3); +SELECT wkt(p) FROM geo ORDER BY id; + +DROP TABLE IF EXISTS geo; +CREATE TABLE geo (p Array(Array(Tuple(Float64, Float64))), id Int) engine=Memory(); +INSERT INTO geo VALUES ([[(0, 0), (10, 0), (10, 10), (0, 10)], [(4, 4), (5, 4), (5, 5), (4, 5)]], 1); +INSERT INTO geo VALUES ([[(1, 0), (10, 0), (10, 10), (0, 10)], [(4, 4), (5, 4), (5, 5), (4, 5)]], 2); +INSERT INTO geo VALUES ([[(2, 0), (10, 0), (10, 10), (0, 10)], [(4, 4), (5, 4), (5, 5), (4, 5)]], 3); +SELECT wkt(p) FROM geo ORDER BY id; + +DROP TABLE IF EXISTS geo; +CREATE TABLE geo (p Array(Array(Array(Tuple(Float64, Float64)))), id Int) engine=Memory(); +INSERT INTO geo VALUES ([[[(0, 0), (10, 0), (10, 10), (0, 10)], [(4, 4), (5, 4), (5, 5), (4, 5)]], [[(-10, -10), (-10, -9), (-9, 10)]]], 1); +INSERT INTO geo VALUES ([[[(1, 0), (10, 0), (10, 10), (0, 10)], [(4, 4), (5, 4), (5, 5), (4, 5)]], [[(-10, -10), (-10, -9), (-9, 10)]]], 2); +INSERT INTO geo VALUES ([[[(2, 0), (10, 0), (10, 10), (0, 10)], [(4, 4), (5, 4), (5, 5), (4, 5)]], [[(-10, -10), (-10, -9), (-9, 10)]]], 3); +SELECT wkt(p) FROM geo ORDER BY id; + +DROP TABLE geo; diff --git a/parser/testdata/01301_polygons_within/ast.json b/parser/testdata/01301_polygons_within/ast.json new file mode 100644 index 000000000..951490e48 --- /dev/null +++ b/parser/testdata/01301_polygons_within/ast.json @@ -0,0 +1,115 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function polygonsWithinCartesian (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 8)" + }, + { + "explain": " Literal Tuple_(UInt64_0, UInt64_0)" + }, + { + "explain": " Literal Tuple_(UInt64_0, UInt64_3)" + }, + { + "explain": " Literal Tuple_(UInt64_1, Float64_2.9)" + }, + { + "explain": " Literal Tuple_(UInt64_2, Float64_2.6)" + }, + { + "explain": " Literal Tuple_(Float64_2.6, UInt64_2)" + }, + { + "explain": " Literal Tuple_(Float64_2.9, UInt64_1)" + }, + { + "explain": " Literal Tuple_(UInt64_3, UInt64_0)" + }, + { + "explain": " Literal Tuple_(UInt64_0, UInt64_0)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 5)" + }, + { + "explain": " Literal Tuple_(Float64_1, Float64_1)" + }, + { + "explain": " Literal Tuple_(Float64_1, Float64_4)" + }, + { + "explain": " Literal Tuple_(Float64_4, Float64_4)" + }, + { + "explain": " Literal Tuple_(Float64_4, Float64_1)" + }, + { + "explain": " Literal Tuple_(Float64_1, Float64_1)" + } + ], + + "rows": 31, + + "statistics": + { + "elapsed": 0.001625949, + "rows_read": 31, + "bytes_read": 1492 + } +} diff --git a/parser/testdata/01301_polygons_within/metadata.json b/parser/testdata/01301_polygons_within/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01301_polygons_within/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01301_polygons_within/query.sql b/parser/testdata/01301_polygons_within/query.sql new file mode 100644 index 000000000..901c7909a --- /dev/null +++ b/parser/testdata/01301_polygons_within/query.sql @@ -0,0 +1,15 @@ +select polygonsWithinCartesian([[[(0, 0),(0, 3),(1, 2.9),(2, 2.6),(2.6, 2),(2.9, 1),(3, 0),(0, 0)]]], [[[(1., 1.),(1., 4.),(4., 4.),(4., 1.),(1., 1.)]]]); +select polygonsWithinCartesian([[[(2., 2.), (2., 3.), (3., 3.), (3., 2.)]]], [[[(1., 1.),(1., 4.),(4., 4.),(4., 1.),(1., 1.)]]]); + +select polygonsWithinSpherical([[[(4.3613577, 50.8651821), (4.349556, 50.8535879), (4.3602419, 50.8435626), (4.3830299, 50.8428851), (4.3904543, 50.8564867), (4.3613148, 50.8651279)]]], [[[(4.346693, 50.858306), (4.367945, 50.852455), (4.366227, 50.840809), (4.344961, 50.833264), (4.338074, 50.848677), (4.346693, 50.858306)]]]); +select polygonsWithinSpherical([[[(4.3501568, 50.8518269), (4.3444920, 50.8439961), (4.3565941, 50.8443213), (4.3501568, 50.8518269)]]], [[[(4.3679450, 50.8524550),(4.3466930, 50.8583060),(4.3380740, 50.8486770),(4.3449610, 50.8332640),(4.3662270, 50.8408090),(4.3679450, 50.8524550)]]]); + +select '-------- MultiPolygon with Polygon'; +select polygonsWithinSpherical([[(29.453587685533865,59.779570356240356),(29.393139070478895,52.276266797422124),(40.636581470703206,59.38168915000267),(41.21084331372543,59.103467777099866),(29.786055068336193,52.146627480315004),(31.23682182965546,52.16517054781818),(41.69443223416517,58.85424941916091),(42.51048853740727,58.47703162291134),(32.59691566839227,52.22075341251539),(34.289476889931414,52.22075341251539),(43.02430176537451,58.07974369546071),(43.02430176537451,57.25537683364851),(35.468224883503325,52.2022335126388),(37.16078610504247,52.23926559241349),(43.02430176537451,56.26136189644947),(43.02430176537451,55.326904361850836),(38.33953409861437,52.16517054781818),(40.09254393520848,52.16517054781818),(44.4146199116388,55.3097062225408),(44.47506852669377,59.80998197603594),(39.72985224487867,59.931351417569715),(30.23941968124846,53.67744677450975),(30.20919537372098,54.63314259659509),(38.73245009647167,59.94649146557819),(37.2816833351524,59.97675082987618),(30.23941968124846,55.2752875586599),(30.33009260383092,56.19415599955667),(36.28428118674541,59.96162460231375),(34.863738732953635,59.97675082987618),(30.178971066193498,56.97640788219866),(30.178971066193498,57.91957806959033),(33.65476643185424,59.94649146557819),(32.32489690064491,59.94649146557819),(30.481214141468342,58.85424941916091),(30.571887064050795,59.99187015036608),(29.453587685533865,59.779570356240356)]], [[[(33.473420586689336,58.85424941916091),(32.23422397806246,58.492830557036),(32.173775363007486,58.03176922751564),(31.508840597402823,57.499784781503735),(31.750635057622702,56.86092686957355),(31.508840597402823,55.941082594334574),(32.20399967053497,55.515591939372456),(31.84130798020516,54.998862226280465),(31.418167674820367,54.422670886434275),(32.47601843828233,53.83826377018255),(32.08310244042503,53.408048308050866),(33.171177511414484,52.82758702113742),(34.77306581037117,52.91880107773494),(34.77306581037117,53.784726518357985),(34.108131044766516,54.17574726780569),(35.07530888564602,54.59813930694554),(34.25925258240394,54.96417435716029),(35.01486027059106,55.361278263643584),(33.50364489421682,55.37845402950552),(32.7480372060297,55.90721384574556),(35.67979503619571,55.68634475630185),(32.83871012861215,56.311688992608396),(34.591719965206266,56.29492065473883),(35.7100193437232,56.311688992608396),(33.83611227701915,56.695333481003644),(32.95960735872209,56.9434497616887),(36.072711034053015,57.091531913901434),(33.171177511414484,57.33702717078384),(36.193608264162954,57.499784781503735),(33.23162612646945,57.77481561306047),(36.43540272438284,58.04776787540811),(33.62454212432676,58.27099811968307),(36.344729801800376,58.54018474404165),(33.83611227701915,58.68186423448108),(34.74284150284369,59.565911441555244),(33.473420586689336,58.85424941916091)]], [[(34.65216858026123,58.91672306881671),(37.19101041256995,58.68186423448108),(36.01226241899805,58.28688958537609),(37.16078610504247,58.04776787540811),(35.74024365125068,57.79092907387934),(37.009664567405046,57.499784781503735),(35.77046795877817,57.25537683364851),(36.979440259877556,57.07510745541089),(34.22902827487645,56.794777197297435),(36.7074214921302,56.210968525786996),(34.712617195316206,56.10998276812964),(36.55629995449277,55.63519693782703),(35.13575750070099,55.53270067649592),(36.43540272438284,55.34409504165558),(34.83351442542614,55.01619492319591),(35.61934642114075,54.49294870011772),(34.89396304048112,54.12264226523038),(35.37755196092087,53.046178687628185),(37.43280487278982,52.95523300597458),(35.92158949641559,53.80257986695776),(36.91899164482259,53.856094327816805),(36.01226241899805,54.75541714463799),(37.765272255592166,55.189110239786885),(36.828318722240134,55.44708256557195),(38.03729102333953,55.652253637168315),(36.64697287707522,55.941082594334574),(38.21863686850443,56.05939028508024),(36.37495410932787,56.64551287174558),(38.30930979108689,56.992876013526654),(37.16078610504247,57.25537683364851),(38.127963945921984,57.516020773674256),(37.43280487278982,57.710289827306724),(38.33953409861437,57.935626886818994),(37.40258056526235,58.31865112960426),(38.58132855883426,58.744648733419496),(37.31190764267989,59.02578062465136),(34.65216858026123,58.91672306881671)]], [[(38.52087994377928,59.11898412389468),(39.54850639971376,58.713270635642914),(38.369758406141855,58.28688958537609),(38.85334732658162,58.06375936407028),(38.33953409861437,57.710289827306724),(38.73245009647167,57.48354156434209),(38.21863686850443,57.271721400459285),(38.97424455669155,56.87744603722649),(37.463029180317314,56.5623320541159),(38.94402024916407,56.05939028508024),(38.18841256097694,55.856355210835915),(38.490655636251795,55.53270067649592),(37.795496563119656,55.39562234093384),(38.30930979108689,55.154587013355666),(36.7074214921302,54.65063295250911),(37.31190764267989,53.92734063371401),(36.979440259877556,53.58783775557231),(37.855945178174615,52.91880107773497),(39.57873070724124,52.69956490610895),(38.33953409861437,53.281741738901104),(40.00187101262603,53.35396273604752),(39.54850639971376,53.58783775557231),(40.24366547284591,53.58783775557231),(39.97164670509855,53.98069568468355),(40.60635716317572,54.03398248547225),(40.39478701048334,54.44025165268903),(39.54850639971376,54.56310590284329),(39.54850639971376,54.87732350170489),(40.39478701048334,54.87732350170489),(40.39478701048334,55.24083903654295),(39.82052516746112,55.2752875586599),(39.760076552406154,55.75443792473942),(40.57613285564824,55.78844000174894),(40.425011318010824,56.19415599955667),(39.82052516746112,56.07626182891758),(39.79030085993364,56.41214455508424),(40.48545993306579,56.495655446714636),(40.33433839542836,56.95993246553937),(39.79030085993364,56.992876013526654),(39.72985224487867,57.46729112028032),(40.33433839542836,57.46729112028032),(40.24366547284591,58.04776787540811),(39.63917932229622,58.04776787540811),(39.63917932229622,58.382088724871295),(40.33433839542836,58.382088724871295),(40.45523562553831,58.9011152358548),(38.52087994377928,59.11898412389468)]]]) format TSV; + +select '-------- MultiPolygon with Polygon with Holes'; +select polygonsWithinSpherical([[[(33.473420586689336,58.85424941916091),(32.23422397806246,58.492830557036),(32.173775363007486,58.03176922751564),(31.508840597402823,57.499784781503735),(31.750635057622702,56.86092686957355),(31.508840597402823,55.941082594334574),(32.20399967053497,55.515591939372456),(31.84130798020516,54.998862226280465),(31.418167674820367,54.422670886434275),(32.47601843828233,53.83826377018255),(32.08310244042503,53.408048308050866),(33.171177511414484,52.82758702113742),(34.77306581037117,52.91880107773494),(34.77306581037117,53.784726518357985),(34.108131044766516,54.17574726780569),(35.07530888564602,54.59813930694554),(34.25925258240394,54.96417435716029),(35.01486027059106,55.361278263643584),(33.50364489421682,55.37845402950552),(32.7480372060297,55.90721384574556),(35.67979503619571,55.68634475630185),(32.83871012861215,56.311688992608396),(34.591719965206266,56.29492065473883),(35.7100193437232,56.311688992608396),(33.83611227701915,56.695333481003644),(32.95960735872209,56.9434497616887),(36.072711034053015,57.091531913901434),(33.171177511414484,57.33702717078384),(36.193608264162954,57.499784781503735),(33.23162612646945,57.77481561306047),(36.43540272438284,58.04776787540811),(33.62454212432676,58.27099811968307),(36.344729801800376,58.54018474404165),(33.83611227701915,58.68186423448108),(34.74284150284369,59.565911441555244),(33.473420586689336,58.85424941916091)]], [[(34.65216858026123,58.91672306881671),(37.19101041256995,58.68186423448108),(36.01226241899805,58.28688958537609),(37.16078610504247,58.04776787540811),(35.74024365125068,57.79092907387934),(37.009664567405046,57.499784781503735),(35.77046795877817,57.25537683364851),(36.979440259877556,57.07510745541089),(34.22902827487645,56.794777197297435),(36.7074214921302,56.210968525786996),(34.712617195316206,56.10998276812964),(36.55629995449277,55.63519693782703),(35.13575750070099,55.53270067649592),(36.43540272438284,55.34409504165558),(34.83351442542614,55.01619492319591),(35.61934642114075,54.49294870011772),(34.89396304048112,54.12264226523038),(35.37755196092087,53.046178687628185),(37.43280487278982,52.95523300597458),(35.92158949641559,53.80257986695776),(36.91899164482259,53.856094327816805),(36.01226241899805,54.75541714463799),(37.765272255592166,55.189110239786885),(36.828318722240134,55.44708256557195),(38.03729102333953,55.652253637168315),(36.64697287707522,55.941082594334574),(38.21863686850443,56.05939028508024),(36.37495410932787,56.64551287174558),(38.30930979108689,56.992876013526654),(37.16078610504247,57.25537683364851),(38.127963945921984,57.516020773674256),(37.43280487278982,57.710289827306724),(38.33953409861437,57.935626886818994),(37.40258056526235,58.31865112960426),(38.58132855883426,58.744648733419496),(37.31190764267989,59.02578062465136),(34.65216858026123,58.91672306881671)]], [[(38.52087994377928,59.11898412389468),(39.54850639971376,58.713270635642914),(38.369758406141855,58.28688958537609),(38.85334732658162,58.06375936407028),(38.33953409861437,57.710289827306724),(38.73245009647167,57.48354156434209),(38.21863686850443,57.271721400459285),(38.97424455669155,56.87744603722649),(37.463029180317314,56.5623320541159),(38.94402024916407,56.05939028508024),(38.18841256097694,55.856355210835915),(38.490655636251795,55.53270067649592),(37.795496563119656,55.39562234093384),(38.30930979108689,55.154587013355666),(36.7074214921302,54.65063295250911),(37.31190764267989,53.92734063371401),(36.979440259877556,53.58783775557231),(37.855945178174615,52.91880107773497),(39.57873070724124,52.69956490610895),(38.33953409861437,53.281741738901104),(40.00187101262603,53.35396273604752),(39.54850639971376,53.58783775557231),(40.24366547284591,53.58783775557231),(39.97164670509855,53.98069568468355),(40.60635716317572,54.03398248547225),(40.39478701048334,54.44025165268903),(39.54850639971376,54.56310590284329),(39.54850639971376,54.87732350170489),(40.39478701048334,54.87732350170489),(40.39478701048334,55.24083903654295),(39.82052516746112,55.2752875586599),(39.760076552406154,55.75443792473942),(40.57613285564824,55.78844000174894),(40.425011318010824,56.19415599955667),(39.82052516746112,56.07626182891758),(39.79030085993364,56.41214455508424),(40.48545993306579,56.495655446714636),(40.33433839542836,56.95993246553937),(39.79030085993364,56.992876013526654),(39.72985224487867,57.46729112028032),(40.33433839542836,57.46729112028032),(40.24366547284591,58.04776787540811),(39.63917932229622,58.04776787540811),(39.63917932229622,58.382088724871295),(40.33433839542836,58.382088724871295),(40.45523562553831,58.9011152358548),(38.52087994377928,59.11898412389468)]]], [[(24.367675781249993,61.45977057029751),(19.577636718749993,58.67693767258692),(19.577636718749993,57.492213666700735),(19.445800781249996,55.87531083569678),(19.445800781249996,54.085173420886775),(17.468261718749996,53.014783245859235),(20.017089843749993,51.563412328675895),(21.203613281249993,50.205033264943324),(26.125488281249993,50.40151532278236),(27.22412109374999,48.980216985374994),(32.80517578124999,49.525208341974405),(35.26611328124999,48.74894534343292),(36.93603515624999,49.66762782262194),(42.56103515625,48.77791275550183),(43.92333984374999,49.8096315635631),(47.17529296875,49.152969656170455),(49.28466796875,50.54136296522162),(48.05419921875,51.17934297928929),(51.39404296875,52.48278022207825),(50.64697265625,53.014783245859235),(52.88818359375,53.93021986394004),(51.65771484374999,54.29088164657006),(52.66845703125,55.825973254619015),(50.25146484375,56.145549500679095),(51.92138671875,57.914847767009206),(49.15283203125,58.17070248348605),(49.59228515625,60.086762746260064),(47.043457031249986,59.88893689676584),(43.57177734375,61.37567331572748),(42.64892578125,60.630101766266705),(36.89208984374999,62.000904713685856),(36.01318359374999,61.143235250840576),(31.398925781249993,62.02152819100766),(30.563964843749996,61.05828537037917),(26.872558593749993,61.71070595883174),(26.652832031249993,61.10078883158897),(24.367675781249993,61.45977057029751)], [(24.455566406249993,59.42272750081452),(21.203613281249993,58.49369382056807),(21.335449218749993,56.89700392127261),(21.599121093749993,55.92458580482949),(25.202636718749993,55.998380955359636),(28.850097656249993,57.06463027327854),(27.09228515625,57.844750992890994),(28.806152343749996,59.17592824927138),(26.257324218749993,59.17592824927138),(24.455566406249993,59.42272750081452)], [(35.13427734375,59.84481485969107),(31.970214843749993,58.97266715450152),(33.20068359374999,56.776808316568406),(36.67236328125,56.41390137600675),(39.08935546874999,57.25528054528888),(42.69287109374999,58.03137242177638),(40.89111328124999,59.26588062825809),(37.28759765625,58.722598828043374),(37.11181640624999,59.66774058164964),(35.13427734375,59.84481485969107)], [(29.157714843749993,55.75184939173528),(22.565917968749993,55.128649068488784),(22.565917968749993,53.54030739150019),(22.038574218749996,51.48138289610097),(26.257324218749993,51.42661449707484),(30.124511718749993,50.54136296522162),(32.18994140624999,51.17934297928929),(30.124511718749993,53.173119202640635),(35.09033203124999,53.173119202640635),(33.11279296875,54.085173420886775),(29.597167968749993,55.50374985927513),(29.157714843749993,55.75184939173528)], [(42.82470703125,56.58369172128337),(36.584472656249986,55.329144408405085),(37.99072265625,53.592504809039355),(34.95849609374999,51.48138289610097),(36.54052734374999,50.40151532278236),(39.66064453124999,50.289339253291786),(39.79248046875,52.13348804077148),(41.77001953125,50.68079714532166),(44.49462890624999,51.97134580885171),(47.30712890624999,52.509534770327264),(44.05517578125,53.54030739150019),(46.60400390625,53.696706475303245),(47.61474609375,55.40406982700608),(45.37353515625,55.40406982700608),(42.82470703125,56.58369172128337)]]) format TSV; + +select '-------- Polygon with Polygon with Holes'; +select polygonsWithinSpherical([[(29.453587685533865,59.779570356240356),(29.393139070478895,52.276266797422124),(40.636581470703206,59.38168915000267),(41.21084331372543,59.103467777099866),(29.786055068336193,52.146627480315004),(31.23682182965546,52.16517054781818),(41.69443223416517,58.85424941916091),(42.51048853740727,58.47703162291134),(32.59691566839227,52.22075341251539),(34.289476889931414,52.22075341251539),(43.02430176537451,58.07974369546071),(43.02430176537451,57.25537683364851),(35.468224883503325,52.2022335126388),(37.16078610504247,52.23926559241349),(43.02430176537451,56.26136189644947),(43.02430176537451,55.326904361850836),(38.33953409861437,52.16517054781818),(40.09254393520848,52.16517054781818),(44.4146199116388,55.3097062225408),(44.47506852669377,59.80998197603594),(39.72985224487867,59.931351417569715),(30.23941968124846,53.67744677450975),(30.20919537372098,54.63314259659509),(38.73245009647167,59.94649146557819),(37.2816833351524,59.97675082987618),(30.23941968124846,55.2752875586599),(30.33009260383092,56.19415599955667),(36.28428118674541,59.96162460231375),(34.863738732953635,59.97675082987618),(30.178971066193498,56.97640788219866),(30.178971066193498,57.91957806959033),(33.65476643185424,59.94649146557819),(32.32489690064491,59.94649146557819),(30.481214141468342,58.85424941916091),(30.571887064050795,59.99187015036608),(29.453587685533865,59.779570356240356)]], [[(24.367675781249993,61.45977057029751),(19.577636718749993,58.67693767258692),(19.577636718749993,57.492213666700735),(19.445800781249996,55.87531083569678),(19.445800781249996,54.085173420886775),(17.468261718749996,53.014783245859235),(20.017089843749993,51.563412328675895),(21.203613281249993,50.205033264943324),(26.125488281249993,50.40151532278236),(27.22412109374999,48.980216985374994),(32.80517578124999,49.525208341974405),(35.26611328124999,48.74894534343292),(36.93603515624999,49.66762782262194),(42.56103515625,48.77791275550183),(43.92333984374999,49.8096315635631),(47.17529296875,49.152969656170455),(49.28466796875,50.54136296522162),(48.05419921875,51.17934297928929),(51.39404296875,52.48278022207825),(50.64697265625,53.014783245859235),(52.88818359375,53.93021986394004),(51.65771484374999,54.29088164657006),(52.66845703125,55.825973254619015),(50.25146484375,56.145549500679095),(51.92138671875,57.914847767009206),(49.15283203125,58.17070248348605),(49.59228515625,60.086762746260064),(47.043457031249986,59.88893689676584),(43.57177734375,61.37567331572748),(42.64892578125,60.630101766266705),(36.89208984374999,62.000904713685856),(36.01318359374999,61.143235250840576),(31.398925781249993,62.02152819100766),(30.563964843749996,61.05828537037917),(26.872558593749993,61.71070595883174),(26.652832031249993,61.10078883158897),(24.367675781249993,61.45977057029751)], [(24.455566406249993,59.42272750081452),(21.203613281249993,58.49369382056807),(21.335449218749993,56.89700392127261),(21.599121093749993,55.92458580482949),(25.202636718749993,55.998380955359636),(28.850097656249993,57.06463027327854),(27.09228515625,57.844750992890994),(28.806152343749996,59.17592824927138),(26.257324218749993,59.17592824927138),(24.455566406249993,59.42272750081452)], [(35.13427734375,59.84481485969107),(31.970214843749993,58.97266715450152),(33.20068359374999,56.776808316568406),(36.67236328125,56.41390137600675),(39.08935546874999,57.25528054528888),(42.69287109374999,58.03137242177638),(40.89111328124999,59.26588062825809),(37.28759765625,58.722598828043374),(37.11181640624999,59.66774058164964),(35.13427734375,59.84481485969107)], [(29.157714843749993,55.75184939173528),(22.565917968749993,55.128649068488784),(22.565917968749993,53.54030739150019),(22.038574218749996,51.48138289610097),(26.257324218749993,51.42661449707484),(30.124511718749993,50.54136296522162),(32.18994140624999,51.17934297928929),(30.124511718749993,53.173119202640635),(35.09033203124999,53.173119202640635),(33.11279296875,54.085173420886775),(29.597167968749993,55.50374985927513),(29.157714843749993,55.75184939173528)], [(42.82470703125,56.58369172128337),(36.584472656249986,55.329144408405085),(37.99072265625,53.592504809039355),(34.95849609374999,51.48138289610097),(36.54052734374999,50.40151532278236),(39.66064453124999,50.289339253291786),(39.79248046875,52.13348804077148),(41.77001953125,50.68079714532166),(44.49462890624999,51.97134580885171),(47.30712890624999,52.509534770327264),(44.05517578125,53.54030739150019),(46.60400390625,53.696706475303245),(47.61474609375,55.40406982700608),(45.37353515625,55.40406982700608),(42.82470703125,56.58369172128337)]]) format TSV; + diff --git a/parser/testdata/01302_polygons_distance/ast.json b/parser/testdata/01302_polygons_distance/ast.json new file mode 100644 index 000000000..e1feef4db --- /dev/null +++ b/parser/testdata/01302_polygons_distance/ast.json @@ -0,0 +1,115 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function polygonsDistanceCartesian (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 8)" + }, + { + "explain": " Literal Tuple_(UInt64_0, UInt64_0)" + }, + { + "explain": " Literal Tuple_(UInt64_0, UInt64_3)" + }, + { + "explain": " Literal Tuple_(UInt64_1, Float64_2.9)" + }, + { + "explain": " Literal Tuple_(UInt64_2, Float64_2.6)" + }, + { + "explain": " Literal Tuple_(Float64_2.6, UInt64_2)" + }, + { + "explain": " Literal Tuple_(Float64_2.9, UInt64_1)" + }, + { + "explain": " Literal Tuple_(UInt64_3, UInt64_0)" + }, + { + "explain": " Literal Tuple_(UInt64_0, UInt64_0)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 5)" + }, + { + "explain": " Literal Tuple_(Float64_1, Float64_1)" + }, + { + "explain": " Literal Tuple_(Float64_1, Float64_4)" + }, + { + "explain": " Literal Tuple_(Float64_4, Float64_4)" + }, + { + "explain": " Literal Tuple_(Float64_4, Float64_1)" + }, + { + "explain": " Literal Tuple_(Float64_1, Float64_1)" + } + ], + + "rows": 31, + + "statistics": + { + "elapsed": 0.001356576, + "rows_read": 31, + "bytes_read": 1494 + } +} diff --git a/parser/testdata/01302_polygons_distance/metadata.json b/parser/testdata/01302_polygons_distance/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01302_polygons_distance/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01302_polygons_distance/query.sql b/parser/testdata/01302_polygons_distance/query.sql new file mode 100644 index 000000000..a69b5017a --- /dev/null +++ b/parser/testdata/01302_polygons_distance/query.sql @@ -0,0 +1,10 @@ +select polygonsDistanceCartesian([[[(0, 0),(0, 3),(1, 2.9),(2, 2.6),(2.6, 2),(2.9, 1),(3, 0),(0, 0)]]], [[[(1., 1.),(1., 4.),(4., 4.),(4., 1.),(1., 1.)]]]); +select polygonsDistanceCartesian([[[(0, 0), (0, 0.1), (0.1, 0.1), (0.1, 0)]]], [[[(1., 1.),(1., 4.),(4., 4.),(4., 1.),(1., 1.)]]]); +select polygonsDistanceSpherical([[[(23.725750, 37.971536)]]], [[[(4.3826169, 50.8119483)]]]); + +drop table if exists polygon_01302; +create table polygon_01302 (x Array(Array(Array(Tuple(Float64, Float64)))), y Array(Array(Array(Tuple(Float64, Float64))))) engine=Memory(); +insert into polygon_01302 values ([[[(23.725750, 37.971536)]]], [[[(4.3826169, 50.8119483)]]]); +select polygonsDistanceSpherical(x, y) from polygon_01302; + +drop table polygon_01302; diff --git a/parser/testdata/01303_polygons_equals/ast.json b/parser/testdata/01303_polygons_equals/ast.json new file mode 100644 index 000000000..58bda818f --- /dev/null +++ b/parser/testdata/01303_polygons_equals/ast.json @@ -0,0 +1,115 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function polygonsEqualsCartesian (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 8)" + }, + { + "explain": " Literal Tuple_(UInt64_0, UInt64_0)" + }, + { + "explain": " Literal Tuple_(UInt64_0, UInt64_3)" + }, + { + "explain": " Literal Tuple_(UInt64_1, Float64_2.9)" + }, + { + "explain": " Literal Tuple_(UInt64_2, Float64_2.6)" + }, + { + "explain": " Literal Tuple_(Float64_2.6, UInt64_2)" + }, + { + "explain": " Literal Tuple_(Float64_2.9, UInt64_1)" + }, + { + "explain": " Literal Tuple_(UInt64_3, UInt64_0)" + }, + { + "explain": " Literal Tuple_(UInt64_0, UInt64_0)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 5)" + }, + { + "explain": " Literal Tuple_(Float64_1, Float64_1)" + }, + { + "explain": " Literal Tuple_(Float64_1, Float64_4)" + }, + { + "explain": " Literal Tuple_(Float64_4, Float64_4)" + }, + { + "explain": " Literal Tuple_(Float64_4, Float64_1)" + }, + { + "explain": " Literal Tuple_(Float64_1, Float64_1)" + } + ], + + "rows": 31, + + "statistics": + { + "elapsed": 0.001136759, + "rows_read": 31, + "bytes_read": 1492 + } +} diff --git a/parser/testdata/01303_polygons_equals/metadata.json b/parser/testdata/01303_polygons_equals/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01303_polygons_equals/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01303_polygons_equals/query.sql b/parser/testdata/01303_polygons_equals/query.sql new file mode 100644 index 000000000..42f1bd469 --- /dev/null +++ b/parser/testdata/01303_polygons_equals/query.sql @@ -0,0 +1,2 @@ +select polygonsEqualsCartesian([[[(0, 0),(0, 3),(1, 2.9),(2, 2.6),(2.6, 2),(2.9, 1),(3, 0),(0, 0)]]], [[[(1., 1.),(1., 4.),(4., 4.),(4., 1.),(1., 1.)]]]); +select polygonsEqualsCartesian([[[(1., 1.),(1., 4.),(4., 4.),(4., 1.)]]], [[[(1., 1.),(1., 4.),(4., 4.),(4., 1.),(1., 1.)]]]); diff --git a/parser/testdata/01304_polygons_sym_difference/ast.json b/parser/testdata/01304_polygons_sym_difference/ast.json new file mode 100644 index 000000000..518996451 --- /dev/null +++ b/parser/testdata/01304_polygons_sym_difference/ast.json @@ -0,0 +1,115 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function polygonsSymDifferenceCartesian (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 8)" + }, + { + "explain": " Literal Tuple_(UInt64_0, UInt64_0)" + }, + { + "explain": " Literal Tuple_(UInt64_0, UInt64_3)" + }, + { + "explain": " Literal Tuple_(UInt64_1, Float64_2.9)" + }, + { + "explain": " Literal Tuple_(UInt64_2, Float64_2.6)" + }, + { + "explain": " Literal Tuple_(Float64_2.6, UInt64_2)" + }, + { + "explain": " Literal Tuple_(Float64_2.9, UInt64_1)" + }, + { + "explain": " Literal Tuple_(UInt64_3, UInt64_0)" + }, + { + "explain": " Literal Tuple_(UInt64_0, UInt64_0)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 5)" + }, + { + "explain": " Literal Tuple_(Float64_1, Float64_1)" + }, + { + "explain": " Literal Tuple_(Float64_1, Float64_4)" + }, + { + "explain": " Literal Tuple_(Float64_4, Float64_4)" + }, + { + "explain": " Literal Tuple_(Float64_4, Float64_1)" + }, + { + "explain": " Literal Tuple_(Float64_1, Float64_1)" + } + ], + + "rows": 31, + + "statistics": + { + "elapsed": 0.001430705, + "rows_read": 31, + "bytes_read": 1499 + } +} diff --git a/parser/testdata/01304_polygons_sym_difference/metadata.json b/parser/testdata/01304_polygons_sym_difference/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01304_polygons_sym_difference/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01304_polygons_sym_difference/query.sql b/parser/testdata/01304_polygons_sym_difference/query.sql new file mode 100644 index 000000000..d0e022e14 --- /dev/null +++ b/parser/testdata/01304_polygons_sym_difference/query.sql @@ -0,0 +1,15 @@ +select polygonsSymDifferenceCartesian([[[(0, 0),(0, 3),(1, 2.9),(2, 2.6),(2.6, 2),(2.9, 1),(3, 0),(0, 0)]]], [[[(1., 1.),(1., 4.),(4., 4.),(4., 1.),(1., 1.)]]]); + +-- Google "draw wkt online" + +select '-------- MultiPolygon with Polygon'; +select wkt(polygonsSymDifferenceSpherical([[(29.453587685533865,59.779570356240356),(29.393139070478895,52.276266797422124),(40.636581470703206,59.38168915000267),(41.21084331372543,59.103467777099866),(29.786055068336193,52.146627480315004),(31.23682182965546,52.16517054781818),(41.69443223416517,58.85424941916091),(42.51048853740727,58.47703162291134),(32.59691566839227,52.22075341251539),(34.289476889931414,52.22075341251539),(43.02430176537451,58.07974369546071),(43.02430176537451,57.25537683364851),(35.468224883503325,52.2022335126388),(37.16078610504247,52.23926559241349),(43.02430176537451,56.26136189644947),(43.02430176537451,55.326904361850836),(38.33953409861437,52.16517054781818),(40.09254393520848,52.16517054781818),(44.4146199116388,55.3097062225408),(44.47506852669377,59.80998197603594),(39.72985224487867,59.931351417569715),(30.23941968124846,53.67744677450975),(30.20919537372098,54.63314259659509),(38.73245009647167,59.94649146557819),(37.2816833351524,59.97675082987618),(30.23941968124846,55.2752875586599),(30.33009260383092,56.19415599955667),(36.28428118674541,59.96162460231375),(34.863738732953635,59.97675082987618),(30.178971066193498,56.97640788219866),(30.178971066193498,57.91957806959033),(33.65476643185424,59.94649146557819),(32.32489690064491,59.94649146557819),(30.481214141468342,58.85424941916091),(30.571887064050795,59.99187015036608),(29.453587685533865,59.779570356240356)]], [[[(33.473420586689336,58.85424941916091),(32.23422397806246,58.492830557036),(32.173775363007486,58.03176922751564),(31.508840597402823,57.499784781503735),(31.750635057622702,56.86092686957355),(31.508840597402823,55.941082594334574),(32.20399967053497,55.515591939372456),(31.84130798020516,54.998862226280465),(31.418167674820367,54.422670886434275),(32.47601843828233,53.83826377018255),(32.08310244042503,53.408048308050866),(33.171177511414484,52.82758702113742),(34.77306581037117,52.91880107773494),(34.77306581037117,53.784726518357985),(34.108131044766516,54.17574726780569),(35.07530888564602,54.59813930694554),(34.25925258240394,54.96417435716029),(35.01486027059106,55.361278263643584),(33.50364489421682,55.37845402950552),(32.7480372060297,55.90721384574556),(35.67979503619571,55.68634475630185),(32.83871012861215,56.311688992608396),(34.591719965206266,56.29492065473883),(35.7100193437232,56.311688992608396),(33.83611227701915,56.695333481003644),(32.95960735872209,56.9434497616887),(36.072711034053015,57.091531913901434),(33.171177511414484,57.33702717078384),(36.193608264162954,57.499784781503735),(33.23162612646945,57.77481561306047),(36.43540272438284,58.04776787540811),(33.62454212432676,58.27099811968307),(36.344729801800376,58.54018474404165),(33.83611227701915,58.68186423448108),(34.74284150284369,59.565911441555244),(33.473420586689336,58.85424941916091)]], [[(34.65216858026123,58.91672306881671),(37.19101041256995,58.68186423448108),(36.01226241899805,58.28688958537609),(37.16078610504247,58.04776787540811),(35.74024365125068,57.79092907387934),(37.009664567405046,57.499784781503735),(35.77046795877817,57.25537683364851),(36.979440259877556,57.07510745541089),(34.22902827487645,56.794777197297435),(36.7074214921302,56.210968525786996),(34.712617195316206,56.10998276812964),(36.55629995449277,55.63519693782703),(35.13575750070099,55.53270067649592),(36.43540272438284,55.34409504165558),(34.83351442542614,55.01619492319591),(35.61934642114075,54.49294870011772),(34.89396304048112,54.12264226523038),(35.37755196092087,53.046178687628185),(37.43280487278982,52.95523300597458),(35.92158949641559,53.80257986695776),(36.91899164482259,53.856094327816805),(36.01226241899805,54.75541714463799),(37.765272255592166,55.189110239786885),(36.828318722240134,55.44708256557195),(38.03729102333953,55.652253637168315),(36.64697287707522,55.941082594334574),(38.21863686850443,56.05939028508024),(36.37495410932787,56.64551287174558),(38.30930979108689,56.992876013526654),(37.16078610504247,57.25537683364851),(38.127963945921984,57.516020773674256),(37.43280487278982,57.710289827306724),(38.33953409861437,57.935626886818994),(37.40258056526235,58.31865112960426),(38.58132855883426,58.744648733419496),(37.31190764267989,59.02578062465136),(34.65216858026123,58.91672306881671)]], [[(38.52087994377928,59.11898412389468),(39.54850639971376,58.713270635642914),(38.369758406141855,58.28688958537609),(38.85334732658162,58.06375936407028),(38.33953409861437,57.710289827306724),(38.73245009647167,57.48354156434209),(38.21863686850443,57.271721400459285),(38.97424455669155,56.87744603722649),(37.463029180317314,56.5623320541159),(38.94402024916407,56.05939028508024),(38.18841256097694,55.856355210835915),(38.490655636251795,55.53270067649592),(37.795496563119656,55.39562234093384),(38.30930979108689,55.154587013355666),(36.7074214921302,54.65063295250911),(37.31190764267989,53.92734063371401),(36.979440259877556,53.58783775557231),(37.855945178174615,52.91880107773497),(39.57873070724124,52.69956490610895),(38.33953409861437,53.281741738901104),(40.00187101262603,53.35396273604752),(39.54850639971376,53.58783775557231),(40.24366547284591,53.58783775557231),(39.97164670509855,53.98069568468355),(40.60635716317572,54.03398248547225),(40.39478701048334,54.44025165268903),(39.54850639971376,54.56310590284329),(39.54850639971376,54.87732350170489),(40.39478701048334,54.87732350170489),(40.39478701048334,55.24083903654295),(39.82052516746112,55.2752875586599),(39.760076552406154,55.75443792473942),(40.57613285564824,55.78844000174894),(40.425011318010824,56.19415599955667),(39.82052516746112,56.07626182891758),(39.79030085993364,56.41214455508424),(40.48545993306579,56.495655446714636),(40.33433839542836,56.95993246553937),(39.79030085993364,56.992876013526654),(39.72985224487867,57.46729112028032),(40.33433839542836,57.46729112028032),(40.24366547284591,58.04776787540811),(39.63917932229622,58.04776787540811),(39.63917932229622,58.382088724871295),(40.33433839542836,58.382088724871295),(40.45523562553831,58.9011152358548),(38.52087994377928,59.11898412389468)]]])) format Null; +SELECT arrayDistinct(arraySort(arrayMap((x, y) -> (round(x, 3), round(y, 3)), arrayFlatten(polygonsSymDifferenceSpherical([[[(10., 10.), (10., 40.), (40., 40.), (40., 10.), (10., 10.)]], [[(-10., -10.), (-10., -40.), (-40., -40.), (-40., -10.), (-10., -10.)]]], [[[(-20., -20.), (-20., 20.), (20., 20.), (20., -20.), (-20., -20.)]]]))))); + +select '-------- MultiPolygon with Polygon with Holes'; +select wkt(polygonsSymDifferenceSpherical([[[(33.473420586689336,58.85424941916091),(32.23422397806246,58.492830557036),(32.173775363007486,58.03176922751564),(31.508840597402823,57.499784781503735),(31.750635057622702,56.86092686957355),(31.508840597402823,55.941082594334574),(32.20399967053497,55.515591939372456),(31.84130798020516,54.998862226280465),(31.418167674820367,54.422670886434275),(32.47601843828233,53.83826377018255),(32.08310244042503,53.408048308050866),(33.171177511414484,52.82758702113742),(34.77306581037117,52.91880107773494),(34.77306581037117,53.784726518357985),(34.108131044766516,54.17574726780569),(35.07530888564602,54.59813930694554),(34.25925258240394,54.96417435716029),(35.01486027059106,55.361278263643584),(33.50364489421682,55.37845402950552),(32.7480372060297,55.90721384574556),(35.67979503619571,55.68634475630185),(32.83871012861215,56.311688992608396),(34.591719965206266,56.29492065473883),(35.7100193437232,56.311688992608396),(33.83611227701915,56.695333481003644),(32.95960735872209,56.9434497616887),(36.072711034053015,57.091531913901434),(33.171177511414484,57.33702717078384),(36.193608264162954,57.499784781503735),(33.23162612646945,57.77481561306047),(36.43540272438284,58.04776787540811),(33.62454212432676,58.27099811968307),(36.344729801800376,58.54018474404165),(33.83611227701915,58.68186423448108),(34.74284150284369,59.565911441555244),(33.473420586689336,58.85424941916091)]], [[(34.65216858026123,58.91672306881671),(37.19101041256995,58.68186423448108),(36.01226241899805,58.28688958537609),(37.16078610504247,58.04776787540811),(35.74024365125068,57.79092907387934),(37.009664567405046,57.499784781503735),(35.77046795877817,57.25537683364851),(36.979440259877556,57.07510745541089),(34.22902827487645,56.794777197297435),(36.7074214921302,56.210968525786996),(34.712617195316206,56.10998276812964),(36.55629995449277,55.63519693782703),(35.13575750070099,55.53270067649592),(36.43540272438284,55.34409504165558),(34.83351442542614,55.01619492319591),(35.61934642114075,54.49294870011772),(34.89396304048112,54.12264226523038),(35.37755196092087,53.046178687628185),(37.43280487278982,52.95523300597458),(35.92158949641559,53.80257986695776),(36.91899164482259,53.856094327816805),(36.01226241899805,54.75541714463799),(37.765272255592166,55.189110239786885),(36.828318722240134,55.44708256557195),(38.03729102333953,55.652253637168315),(36.64697287707522,55.941082594334574),(38.21863686850443,56.05939028508024),(36.37495410932787,56.64551287174558),(38.30930979108689,56.992876013526654),(37.16078610504247,57.25537683364851),(38.127963945921984,57.516020773674256),(37.43280487278982,57.710289827306724),(38.33953409861437,57.935626886818994),(37.40258056526235,58.31865112960426),(38.58132855883426,58.744648733419496),(37.31190764267989,59.02578062465136),(34.65216858026123,58.91672306881671)]], [[(38.52087994377928,59.11898412389468),(39.54850639971376,58.713270635642914),(38.369758406141855,58.28688958537609),(38.85334732658162,58.06375936407028),(38.33953409861437,57.710289827306724),(38.73245009647167,57.48354156434209),(38.21863686850443,57.271721400459285),(38.97424455669155,56.87744603722649),(37.463029180317314,56.5623320541159),(38.94402024916407,56.05939028508024),(38.18841256097694,55.856355210835915),(38.490655636251795,55.53270067649592),(37.795496563119656,55.39562234093384),(38.30930979108689,55.154587013355666),(36.7074214921302,54.65063295250911),(37.31190764267989,53.92734063371401),(36.979440259877556,53.58783775557231),(37.855945178174615,52.91880107773497),(39.57873070724124,52.69956490610895),(38.33953409861437,53.281741738901104),(40.00187101262603,53.35396273604752),(39.54850639971376,53.58783775557231),(40.24366547284591,53.58783775557231),(39.97164670509855,53.98069568468355),(40.60635716317572,54.03398248547225),(40.39478701048334,54.44025165268903),(39.54850639971376,54.56310590284329),(39.54850639971376,54.87732350170489),(40.39478701048334,54.87732350170489),(40.39478701048334,55.24083903654295),(39.82052516746112,55.2752875586599),(39.760076552406154,55.75443792473942),(40.57613285564824,55.78844000174894),(40.425011318010824,56.19415599955667),(39.82052516746112,56.07626182891758),(39.79030085993364,56.41214455508424),(40.48545993306579,56.495655446714636),(40.33433839542836,56.95993246553937),(39.79030085993364,56.992876013526654),(39.72985224487867,57.46729112028032),(40.33433839542836,57.46729112028032),(40.24366547284591,58.04776787540811),(39.63917932229622,58.04776787540811),(39.63917932229622,58.382088724871295),(40.33433839542836,58.382088724871295),(40.45523562553831,58.9011152358548),(38.52087994377928,59.11898412389468)]]], [[(24.367675781249993,61.45977057029751),(19.577636718749993,58.67693767258692),(19.577636718749993,57.492213666700735),(19.445800781249996,55.87531083569678),(19.445800781249996,54.085173420886775),(17.468261718749996,53.014783245859235),(20.017089843749993,51.563412328675895),(21.203613281249993,50.205033264943324),(26.125488281249993,50.40151532278236),(27.22412109374999,48.980216985374994),(32.80517578124999,49.525208341974405),(35.26611328124999,48.74894534343292),(36.93603515624999,49.66762782262194),(42.56103515625,48.77791275550183),(43.92333984374999,49.8096315635631),(47.17529296875,49.152969656170455),(49.28466796875,50.54136296522162),(48.05419921875,51.17934297928929),(51.39404296875,52.48278022207825),(50.64697265625,53.014783245859235),(52.88818359375,53.93021986394004),(51.65771484374999,54.29088164657006),(52.66845703125,55.825973254619015),(50.25146484375,56.145549500679095),(51.92138671875,57.914847767009206),(49.15283203125,58.17070248348605),(49.59228515625,60.086762746260064),(47.043457031249986,59.88893689676584),(43.57177734375,61.37567331572748),(42.64892578125,60.630101766266705),(36.89208984374999,62.000904713685856),(36.01318359374999,61.143235250840576),(31.398925781249993,62.02152819100766),(30.563964843749996,61.05828537037917),(26.872558593749993,61.71070595883174),(26.652832031249993,61.10078883158897),(24.367675781249993,61.45977057029751)], [(24.455566406249993,59.42272750081452),(21.203613281249993,58.49369382056807),(21.335449218749993,56.89700392127261),(21.599121093749993,55.92458580482949),(25.202636718749993,55.998380955359636),(28.850097656249993,57.06463027327854),(27.09228515625,57.844750992890994),(28.806152343749996,59.17592824927138),(26.257324218749993,59.17592824927138),(24.455566406249993,59.42272750081452)], [(35.13427734375,59.84481485969107),(31.970214843749993,58.97266715450152),(33.20068359374999,56.776808316568406),(36.67236328125,56.41390137600675),(39.08935546874999,57.25528054528888),(42.69287109374999,58.03137242177638),(40.89111328124999,59.26588062825809),(37.28759765625,58.722598828043374),(37.11181640624999,59.66774058164964),(35.13427734375,59.84481485969107)], [(29.157714843749993,55.75184939173528),(22.565917968749993,55.128649068488784),(22.565917968749993,53.54030739150019),(22.038574218749996,51.48138289610097),(26.257324218749993,51.42661449707484),(30.124511718749993,50.54136296522162),(32.18994140624999,51.17934297928929),(30.124511718749993,53.173119202640635),(35.09033203124999,53.173119202640635),(33.11279296875,54.085173420886775),(29.597167968749993,55.50374985927513),(29.157714843749993,55.75184939173528)], [(42.82470703125,56.58369172128337),(36.584472656249986,55.329144408405085),(37.99072265625,53.592504809039355),(34.95849609374999,51.48138289610097),(36.54052734374999,50.40151532278236),(39.66064453124999,50.289339253291786),(39.79248046875,52.13348804077148),(41.77001953125,50.68079714532166),(44.49462890624999,51.97134580885171),(47.30712890624999,52.509534770327264),(44.05517578125,53.54030739150019),(46.60400390625,53.696706475303245),(47.61474609375,55.40406982700608),(45.37353515625,55.40406982700608),(42.82470703125,56.58369172128337)]])) format Null; +SELECT arrayDistinct(arraySort(arrayMap((x, y) -> (round(x, 3), round(y, 3)), arrayFlatten(polygonsSymDifferenceSpherical([[(50.,50.),(50.,-50.),(-50.,-50.),(-50.,50.),(50.,50.)],[(10.,10.),(10.,40.),(40.,40.),(40.,10.),(10.,10.)],[(-10.,-10.),(-10.,-40.),(-40.,-40.),(-40.,-10.),(-10.,-10.)]], [[[(-20.,-20.),(-20.,20.),(20.,20.),(20.,-20.),(-20.,-20.)]]]))))); + +select '-------- Polygon with Polygon with Holes'; +select wkt(polygonsSymDifferenceSpherical([[(29.453587685533865,59.779570356240356),(29.393139070478895,52.276266797422124),(40.636581470703206,59.38168915000267),(41.21084331372543,59.103467777099866),(29.786055068336193,52.146627480315004),(31.23682182965546,52.16517054781818),(41.69443223416517,58.85424941916091),(42.51048853740727,58.47703162291134),(32.59691566839227,52.22075341251539),(34.289476889931414,52.22075341251539),(43.02430176537451,58.07974369546071),(43.02430176537451,57.25537683364851),(35.468224883503325,52.2022335126388),(37.16078610504247,52.23926559241349),(43.02430176537451,56.26136189644947),(43.02430176537451,55.326904361850836),(38.33953409861437,52.16517054781818),(40.09254393520848,52.16517054781818),(44.4146199116388,55.3097062225408),(44.47506852669377,59.80998197603594),(39.72985224487867,59.931351417569715),(30.23941968124846,53.67744677450975),(30.20919537372098,54.63314259659509),(38.73245009647167,59.94649146557819),(37.2816833351524,59.97675082987618),(30.23941968124846,55.2752875586599),(30.33009260383092,56.19415599955667),(36.28428118674541,59.96162460231375),(34.863738732953635,59.97675082987618),(30.178971066193498,56.97640788219866),(30.178971066193498,57.91957806959033),(33.65476643185424,59.94649146557819),(32.32489690064491,59.94649146557819),(30.481214141468342,58.85424941916091),(30.571887064050795,59.99187015036608),(29.453587685533865,59.779570356240356)]], [[(24.367675781249993,61.45977057029751),(19.577636718749993,58.67693767258692),(19.577636718749993,57.492213666700735),(19.445800781249996,55.87531083569678),(19.445800781249996,54.085173420886775),(17.468261718749996,53.014783245859235),(20.017089843749993,51.563412328675895),(21.203613281249993,50.205033264943324),(26.125488281249993,50.40151532278236),(27.22412109374999,48.980216985374994),(32.80517578124999,49.525208341974405),(35.26611328124999,48.74894534343292),(36.93603515624999,49.66762782262194),(42.56103515625,48.77791275550183),(43.92333984374999,49.8096315635631),(47.17529296875,49.152969656170455),(49.28466796875,50.54136296522162),(48.05419921875,51.17934297928929),(51.39404296875,52.48278022207825),(50.64697265625,53.014783245859235),(52.88818359375,53.93021986394004),(51.65771484374999,54.29088164657006),(52.66845703125,55.825973254619015),(50.25146484375,56.145549500679095),(51.92138671875,57.914847767009206),(49.15283203125,58.17070248348605),(49.59228515625,60.086762746260064),(47.043457031249986,59.88893689676584),(43.57177734375,61.37567331572748),(42.64892578125,60.630101766266705),(36.89208984374999,62.000904713685856),(36.01318359374999,61.143235250840576),(31.398925781249993,62.02152819100766),(30.563964843749996,61.05828537037917),(26.872558593749993,61.71070595883174),(26.652832031249993,61.10078883158897),(24.367675781249993,61.45977057029751)], [(24.455566406249993,59.42272750081452),(21.203613281249993,58.49369382056807),(21.335449218749993,56.89700392127261),(21.599121093749993,55.92458580482949),(25.202636718749993,55.998380955359636),(28.850097656249993,57.06463027327854),(27.09228515625,57.844750992890994),(28.806152343749996,59.17592824927138),(26.257324218749993,59.17592824927138),(24.455566406249993,59.42272750081452)], [(35.13427734375,59.84481485969107),(31.970214843749993,58.97266715450152),(33.20068359374999,56.776808316568406),(36.67236328125,56.41390137600675),(39.08935546874999,57.25528054528888),(42.69287109374999,58.03137242177638),(40.89111328124999,59.26588062825809),(37.28759765625,58.722598828043374),(37.11181640624999,59.66774058164964),(35.13427734375,59.84481485969107)], [(29.157714843749993,55.75184939173528),(22.565917968749993,55.128649068488784),(22.565917968749993,53.54030739150019),(22.038574218749996,51.48138289610097),(26.257324218749993,51.42661449707484),(30.124511718749993,50.54136296522162),(32.18994140624999,51.17934297928929),(30.124511718749993,53.173119202640635),(35.09033203124999,53.173119202640635),(33.11279296875,54.085173420886775),(29.597167968749993,55.50374985927513),(29.157714843749993,55.75184939173528)], [(42.82470703125,56.58369172128337),(36.584472656249986,55.329144408405085),(37.99072265625,53.592504809039355),(34.95849609374999,51.48138289610097),(36.54052734374999,50.40151532278236),(39.66064453124999,50.289339253291786),(39.79248046875,52.13348804077148),(41.77001953125,50.68079714532166),(44.49462890624999,51.97134580885171),(47.30712890624999,52.509534770327264),(44.05517578125,53.54030739150019),(46.60400390625,53.696706475303245),(47.61474609375,55.40406982700608),(45.37353515625,55.40406982700608),(42.82470703125,56.58369172128337)]])) format Null; +SELECT arrayDistinct(arraySort(arrayMap((x, y) -> (round(x, 3), round(y, 3)), arrayFlatten(polygonsSymDifferenceSpherical([[(50., 50.), (50., -50.), (-50., -50.), (-50., 50.), (50., 50.)], [(10., 10.), (10., 40.), (40., 40.), (40., 10.), (10., 10.)], [(-10., -10.), (-10., -40.), (-40., -40.), (-40., -10.), (-10., -10.)]], [[(-20., -20.), (-20., 20.), (20., 20.), (20., -20.), (-20., -20.)]]))))); diff --git a/parser/testdata/01305_array_join_prewhere_in_subquery/ast.json b/parser/testdata/01305_array_join_prewhere_in_subquery/ast.json new file mode 100644 index 000000000..382e543e5 --- /dev/null +++ b/parser/testdata/01305_array_join_prewhere_in_subquery/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery h (children 1)" + }, + { + "explain": " Identifier h" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001188598, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/01305_array_join_prewhere_in_subquery/metadata.json b/parser/testdata/01305_array_join_prewhere_in_subquery/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01305_array_join_prewhere_in_subquery/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01305_array_join_prewhere_in_subquery/query.sql b/parser/testdata/01305_array_join_prewhere_in_subquery/query.sql new file mode 100644 index 000000000..535dee5eb --- /dev/null +++ b/parser/testdata/01305_array_join_prewhere_in_subquery/query.sql @@ -0,0 +1,5 @@ +drop table if exists h; +create table h (EventDate Date, CounterID UInt64, WatchID UInt64) engine = MergeTree order by (CounterID, EventDate); +insert into h values ('2020-06-10', 16671268, 1); +SELECT count() from h ARRAY JOIN [1] AS a PREWHERE WatchID IN (SELECT toUInt64(1)) WHERE (EventDate = '2020-06-10') AND (CounterID = 16671268); +drop table if exists h; diff --git a/parser/testdata/01305_buffer_final_bug/ast.json b/parser/testdata/01305_buffer_final_bug/ast.json new file mode 100644 index 000000000..8e616065b --- /dev/null +++ b/parser/testdata/01305_buffer_final_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001236371, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/01305_buffer_final_bug/metadata.json b/parser/testdata/01305_buffer_final_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01305_buffer_final_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01305_buffer_final_bug/query.sql b/parser/testdata/01305_buffer_final_bug/query.sql new file mode 100644 index 000000000..8d1586932 --- /dev/null +++ b/parser/testdata/01305_buffer_final_bug/query.sql @@ -0,0 +1,11 @@ +drop table if exists t; +drop table if exists t_buf; + +create table t (x UInt64) engine = MergeTree order by (x, intHash64(x)) sample by intHash64(x); +insert into t select number from numbers(10000); +create table t_buf as t engine = Buffer(currentDatabase(), 't', 16, 20, 100, 100000, 10000000, 50000000, 250000000); +insert into t_buf values (1); +select count() from t_buf sample 1/2 format Null; + +drop table if exists t_buf; +drop table if exists t; diff --git a/parser/testdata/01305_nullable-prewhere_bug/ast.json b/parser/testdata/01305_nullable-prewhere_bug/ast.json new file mode 100644 index 000000000..7d0a0f896 --- /dev/null +++ b/parser/testdata/01305_nullable-prewhere_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery data (children 1)" + }, + { + "explain": " Identifier data" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001147574, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/01305_nullable-prewhere_bug/metadata.json b/parser/testdata/01305_nullable-prewhere_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01305_nullable-prewhere_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01305_nullable-prewhere_bug/query.sql b/parser/testdata/01305_nullable-prewhere_bug/query.sql new file mode 100644 index 000000000..35d376266 --- /dev/null +++ b/parser/testdata/01305_nullable-prewhere_bug/query.sql @@ -0,0 +1,5 @@ +drop table if exists data; +CREATE TABLE data (ts DateTime, field String, num_field Nullable(Float64)) ENGINE = MergeTree() PARTITION BY ts ORDER BY ts; +insert into data values(toDateTime('2020-05-14 02:08:00'),'some_field_value',7.); +SELECT field, countIf(num_field > 6.0) FROM data PREWHERE (num_field>6.0) GROUP BY field; +drop table if exists data; diff --git a/parser/testdata/01305_polygons_union/ast.json b/parser/testdata/01305_polygons_union/ast.json new file mode 100644 index 000000000..abd74de99 --- /dev/null +++ b/parser/testdata/01305_polygons_union/ast.json @@ -0,0 +1,115 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function polygonsUnionCartesian (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 8)" + }, + { + "explain": " Literal Tuple_(Float64_0, Float64_0)" + }, + { + "explain": " Literal Tuple_(Float64_0, Float64_3)" + }, + { + "explain": " Literal Tuple_(Float64_1, Float64_2.9)" + }, + { + "explain": " Literal Tuple_(Float64_2, Float64_2.6)" + }, + { + "explain": " Literal Tuple_(Float64_2.6, Float64_2)" + }, + { + "explain": " Literal Tuple_(Float64_2.9, UInt64_1)" + }, + { + "explain": " Literal Tuple_(Float64_3, Float64_0)" + }, + { + "explain": " Literal Tuple_(Float64_0, Float64_0)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 5)" + }, + { + "explain": " Literal Tuple_(Float64_1, Float64_1)" + }, + { + "explain": " Literal Tuple_(Float64_1, Float64_4)" + }, + { + "explain": " Literal Tuple_(Float64_4, Float64_4)" + }, + { + "explain": " Literal Tuple_(Float64_4, Float64_1)" + }, + { + "explain": " Literal Tuple_(Float64_1, Float64_1)" + } + ], + + "rows": 31, + + "statistics": + { + "elapsed": 0.001152436, + "rows_read": 31, + "bytes_read": 1502 + } +} diff --git a/parser/testdata/01305_polygons_union/metadata.json b/parser/testdata/01305_polygons_union/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01305_polygons_union/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01305_polygons_union/query.sql b/parser/testdata/01305_polygons_union/query.sql new file mode 100644 index 000000000..50c96c325 --- /dev/null +++ b/parser/testdata/01305_polygons_union/query.sql @@ -0,0 +1,18 @@ +select polygonsUnionCartesian([[[(0., 0.),(0., 3.),(1., 2.9),(2., 2.6),(2.6, 2.),(2.9, 1),(3., 0.),(0., 0.)]]], [[[(1., 1.),(1., 4.),(4., 4.),(4., 1.),(1., 1.)]]]); + +SELECT arrayMap(a -> arrayMap(b -> arrayMap(c -> (round(c.1, 6), round(c.2, 6)), b), a), polygonsUnionCartesian([[[(2., 100.0000991821289), (0., 3.), (1., 2.9), (2., 2.6), (2.6, 2.), (2.9, 1), (3., 0.), (100.0000991821289, 2.)]]], [[[(1., 1.), (1000.0001220703125, nan), (4., 4.), (4., 1.), (1., 1.)]]])); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +select arrayMap(a -> arrayMap(b -> arrayMap(c -> (round(c.1, 6), round(c.2, 6)), b), a), polygonsUnionSpherical([[[(4.3613577, 50.8651821), (4.349556, 50.8535879), (4.3602419, 50.8435626), (4.3830299, 50.8428851), (4.3904543, 50.8564867), (4.3613148, 50.8651279)]]], [[[(4.346693, 50.858306), (4.367945, 50.852455), (4.366227, 50.840809), (4.344961, 50.833264), (4.338074, 50.848677), (4.346693, 50.858306)]]])); + +select '-------- MultiPolygon with Polygon'; +select wkt(arrayMap(a -> arrayMap(b -> arrayMap(c -> (round(c.1, 6), round(c.2, 6)), b), a), +polygonsUnionSpherical([[(29.453587685533865,59.779570356240356),(29.393139070478895,52.276266797422124),(40.636581470703206,59.38168915000267),(41.21084331372543,59.103467777099866),(29.786055068336193,52.146627480315004),(31.23682182965546,52.16517054781818),(41.69443223416517,58.85424941916091),(42.51048853740727,58.47703162291134),(32.59691566839227,52.22075341251539),(34.289476889931414,52.22075341251539),(43.02430176537451,58.07974369546071),(43.02430176537451,57.25537683364851),(35.468224883503325,52.2022335126388),(37.16078610504247,52.23926559241349),(43.02430176537451,56.26136189644947),(43.02430176537451,55.326904361850836),(38.33953409861437,52.16517054781818),(40.09254393520848,52.16517054781818),(44.4146199116388,55.3097062225408),(44.47506852669377,59.80998197603594),(39.72985224487867,59.931351417569715),(30.23941968124846,53.67744677450975),(30.20919537372098,54.63314259659509),(38.73245009647167,59.94649146557819),(37.2816833351524,59.97675082987618),(30.23941968124846,55.2752875586599),(30.33009260383092,56.19415599955667),(36.28428118674541,59.96162460231375),(34.863738732953635,59.97675082987618),(30.178971066193498,56.97640788219866),(30.178971066193498,57.91957806959033),(33.65476643185424,59.94649146557819),(32.32489690064491,59.94649146557819),(30.481214141468342,58.85424941916091),(30.571887064050795,59.99187015036608),(29.453587685533865,59.779570356240356)]], [[[(33.473420586689336,58.85424941916091),(32.23422397806246,58.492830557036),(32.173775363007486,58.03176922751564),(31.508840597402823,57.499784781503735),(31.750635057622702,56.86092686957355),(31.508840597402823,55.941082594334574),(32.20399967053497,55.515591939372456),(31.84130798020516,54.998862226280465),(31.418167674820367,54.422670886434275),(32.47601843828233,53.83826377018255),(32.08310244042503,53.408048308050866),(33.171177511414484,52.82758702113742),(34.77306581037117,52.91880107773494),(34.77306581037117,53.784726518357985),(34.108131044766516,54.17574726780569),(35.07530888564602,54.59813930694554),(34.25925258240394,54.96417435716029),(35.01486027059106,55.361278263643584),(33.50364489421682,55.37845402950552),(32.7480372060297,55.90721384574556),(35.67979503619571,55.68634475630185),(32.83871012861215,56.311688992608396),(34.591719965206266,56.29492065473883),(35.7100193437232,56.311688992608396),(33.83611227701915,56.695333481003644),(32.95960735872209,56.9434497616887),(36.072711034053015,57.091531913901434),(33.171177511414484,57.33702717078384),(36.193608264162954,57.499784781503735),(33.23162612646945,57.77481561306047),(36.43540272438284,58.04776787540811),(33.62454212432676,58.27099811968307),(36.344729801800376,58.54018474404165),(33.83611227701915,58.68186423448108),(34.74284150284369,59.565911441555244),(33.473420586689336,58.85424941916091)]], [[(34.65216858026123,58.91672306881671),(37.19101041256995,58.68186423448108),(36.01226241899805,58.28688958537609),(37.16078610504247,58.04776787540811),(35.74024365125068,57.79092907387934),(37.009664567405046,57.499784781503735),(35.77046795877817,57.25537683364851),(36.979440259877556,57.07510745541089),(34.22902827487645,56.794777197297435),(36.7074214921302,56.210968525786996),(34.712617195316206,56.10998276812964),(36.55629995449277,55.63519693782703),(35.13575750070099,55.53270067649592),(36.43540272438284,55.34409504165558),(34.83351442542614,55.01619492319591),(35.61934642114075,54.49294870011772),(34.89396304048112,54.12264226523038),(35.37755196092087,53.046178687628185),(37.43280487278982,52.95523300597458),(35.92158949641559,53.80257986695776),(36.91899164482259,53.856094327816805),(36.01226241899805,54.75541714463799),(37.765272255592166,55.189110239786885),(36.828318722240134,55.44708256557195),(38.03729102333953,55.652253637168315),(36.64697287707522,55.941082594334574),(38.21863686850443,56.05939028508024),(36.37495410932787,56.64551287174558),(38.30930979108689,56.992876013526654),(37.16078610504247,57.25537683364851),(38.127963945921984,57.516020773674256),(37.43280487278982,57.710289827306724),(38.33953409861437,57.935626886818994),(37.40258056526235,58.31865112960426),(38.58132855883426,58.744648733419496),(37.31190764267989,59.02578062465136),(34.65216858026123,58.91672306881671)]], [[(38.52087994377928,59.11898412389468),(39.54850639971376,58.713270635642914),(38.369758406141855,58.28688958537609),(38.85334732658162,58.06375936407028),(38.33953409861437,57.710289827306724),(38.73245009647167,57.48354156434209),(38.21863686850443,57.271721400459285),(38.97424455669155,56.87744603722649),(37.463029180317314,56.5623320541159),(38.94402024916407,56.05939028508024),(38.18841256097694,55.856355210835915),(38.490655636251795,55.53270067649592),(37.795496563119656,55.39562234093384),(38.30930979108689,55.154587013355666),(36.7074214921302,54.65063295250911),(37.31190764267989,53.92734063371401),(36.979440259877556,53.58783775557231),(37.855945178174615,52.91880107773497),(39.57873070724124,52.69956490610895),(38.33953409861437,53.281741738901104),(40.00187101262603,53.35396273604752),(39.54850639971376,53.58783775557231),(40.24366547284591,53.58783775557231),(39.97164670509855,53.98069568468355),(40.60635716317572,54.03398248547225),(40.39478701048334,54.44025165268903),(39.54850639971376,54.56310590284329),(39.54850639971376,54.87732350170489),(40.39478701048334,54.87732350170489),(40.39478701048334,55.24083903654295),(39.82052516746112,55.2752875586599),(39.760076552406154,55.75443792473942),(40.57613285564824,55.78844000174894),(40.425011318010824,56.19415599955667),(39.82052516746112,56.07626182891758),(39.79030085993364,56.41214455508424),(40.48545993306579,56.495655446714636),(40.33433839542836,56.95993246553937),(39.79030085993364,56.992876013526654),(39.72985224487867,57.46729112028032),(40.33433839542836,57.46729112028032),(40.24366547284591,58.04776787540811),(39.63917932229622,58.04776787540811),(39.63917932229622,58.382088724871295),(40.33433839542836,58.382088724871295),(40.45523562553831,58.9011152358548),(38.52087994377928,59.11898412389468)]]]))) format TSV; + +select '-------- MultiPolygon with Polygon with Holes'; +select wkt(arrayMap(a -> arrayMap(b -> arrayMap(c -> (round(c.1, 6), round(c.2, 6)), b), a), +polygonsUnionSpherical([[[(33.473420586689336,58.85424941916091),(32.23422397806246,58.492830557036),(32.173775363007486,58.03176922751564),(31.508840597402823,57.499784781503735),(31.750635057622702,56.86092686957355),(31.508840597402823,55.941082594334574),(32.20399967053497,55.515591939372456),(31.84130798020516,54.998862226280465),(31.418167674820367,54.422670886434275),(32.47601843828233,53.83826377018255),(32.08310244042503,53.408048308050866),(33.171177511414484,52.82758702113742),(34.77306581037117,52.91880107773494),(34.77306581037117,53.784726518357985),(34.108131044766516,54.17574726780569),(35.07530888564602,54.59813930694554),(34.25925258240394,54.96417435716029),(35.01486027059106,55.361278263643584),(33.50364489421682,55.37845402950552),(32.7480372060297,55.90721384574556),(35.67979503619571,55.68634475630185),(32.83871012861215,56.311688992608396),(34.591719965206266,56.29492065473883),(35.7100193437232,56.311688992608396),(33.83611227701915,56.695333481003644),(32.95960735872209,56.9434497616887),(36.072711034053015,57.091531913901434),(33.171177511414484,57.33702717078384),(36.193608264162954,57.499784781503735),(33.23162612646945,57.77481561306047),(36.43540272438284,58.04776787540811),(33.62454212432676,58.27099811968307),(36.344729801800376,58.54018474404165),(33.83611227701915,58.68186423448108),(34.74284150284369,59.565911441555244),(33.473420586689336,58.85424941916091)]], [[(34.65216858026123,58.91672306881671),(37.19101041256995,58.68186423448108),(36.01226241899805,58.28688958537609),(37.16078610504247,58.04776787540811),(35.74024365125068,57.79092907387934),(37.009664567405046,57.499784781503735),(35.77046795877817,57.25537683364851),(36.979440259877556,57.07510745541089),(34.22902827487645,56.794777197297435),(36.7074214921302,56.210968525786996),(34.712617195316206,56.10998276812964),(36.55629995449277,55.63519693782703),(35.13575750070099,55.53270067649592),(36.43540272438284,55.34409504165558),(34.83351442542614,55.01619492319591),(35.61934642114075,54.49294870011772),(34.89396304048112,54.12264226523038),(35.37755196092087,53.046178687628185),(37.43280487278982,52.95523300597458),(35.92158949641559,53.80257986695776),(36.91899164482259,53.856094327816805),(36.01226241899805,54.75541714463799),(37.765272255592166,55.189110239786885),(36.828318722240134,55.44708256557195),(38.03729102333953,55.652253637168315),(36.64697287707522,55.941082594334574),(38.21863686850443,56.05939028508024),(36.37495410932787,56.64551287174558),(38.30930979108689,56.992876013526654),(37.16078610504247,57.25537683364851),(38.127963945921984,57.516020773674256),(37.43280487278982,57.710289827306724),(38.33953409861437,57.935626886818994),(37.40258056526235,58.31865112960426),(38.58132855883426,58.744648733419496),(37.31190764267989,59.02578062465136),(34.65216858026123,58.91672306881671)]], [[(38.52087994377928,59.11898412389468),(39.54850639971376,58.713270635642914),(38.369758406141855,58.28688958537609),(38.85334732658162,58.06375936407028),(38.33953409861437,57.710289827306724),(38.73245009647167,57.48354156434209),(38.21863686850443,57.271721400459285),(38.97424455669155,56.87744603722649),(37.463029180317314,56.5623320541159),(38.94402024916407,56.05939028508024),(38.18841256097694,55.856355210835915),(38.490655636251795,55.53270067649592),(37.795496563119656,55.39562234093384),(38.30930979108689,55.154587013355666),(36.7074214921302,54.65063295250911),(37.31190764267989,53.92734063371401),(36.979440259877556,53.58783775557231),(37.855945178174615,52.91880107773497),(39.57873070724124,52.69956490610895),(38.33953409861437,53.281741738901104),(40.00187101262603,53.35396273604752),(39.54850639971376,53.58783775557231),(40.24366547284591,53.58783775557231),(39.97164670509855,53.98069568468355),(40.60635716317572,54.03398248547225),(40.39478701048334,54.44025165268903),(39.54850639971376,54.56310590284329),(39.54850639971376,54.87732350170489),(40.39478701048334,54.87732350170489),(40.39478701048334,55.24083903654295),(39.82052516746112,55.2752875586599),(39.760076552406154,55.75443792473942),(40.57613285564824,55.78844000174894),(40.425011318010824,56.19415599955667),(39.82052516746112,56.07626182891758),(39.79030085993364,56.41214455508424),(40.48545993306579,56.495655446714636),(40.33433839542836,56.95993246553937),(39.79030085993364,56.992876013526654),(39.72985224487867,57.46729112028032),(40.33433839542836,57.46729112028032),(40.24366547284591,58.04776787540811),(39.63917932229622,58.04776787540811),(39.63917932229622,58.382088724871295),(40.33433839542836,58.382088724871295),(40.45523562553831,58.9011152358548),(38.52087994377928,59.11898412389468)]]], [[(24.367675781249993,61.45977057029751),(19.577636718749993,58.67693767258692),(19.577636718749993,57.492213666700735),(19.445800781249996,55.87531083569678),(19.445800781249996,54.085173420886775),(17.468261718749996,53.014783245859235),(20.017089843749993,51.563412328675895),(21.203613281249993,50.205033264943324),(26.125488281249993,50.40151532278236),(27.22412109374999,48.980216985374994),(32.80517578124999,49.525208341974405),(35.26611328124999,48.74894534343292),(36.93603515624999,49.66762782262194),(42.56103515625,48.77791275550183),(43.92333984374999,49.8096315635631),(47.17529296875,49.152969656170455),(49.28466796875,50.54136296522162),(48.05419921875,51.17934297928929),(51.39404296875,52.48278022207825),(50.64697265625,53.014783245859235),(52.88818359375,53.93021986394004),(51.65771484374999,54.29088164657006),(52.66845703125,55.825973254619015),(50.25146484375,56.145549500679095),(51.92138671875,57.914847767009206),(49.15283203125,58.17070248348605),(49.59228515625,60.086762746260064),(47.043457031249986,59.88893689676584),(43.57177734375,61.37567331572748),(42.64892578125,60.630101766266705),(36.89208984374999,62.000904713685856),(36.01318359374999,61.143235250840576),(31.398925781249993,62.02152819100766),(30.563964843749996,61.05828537037917),(26.872558593749993,61.71070595883174),(26.652832031249993,61.10078883158897),(24.367675781249993,61.45977057029751)], [(24.455566406249993,59.42272750081452),(21.203613281249993,58.49369382056807),(21.335449218749993,56.89700392127261),(21.599121093749993,55.92458580482949),(25.202636718749993,55.998380955359636),(28.850097656249993,57.06463027327854),(27.09228515625,57.844750992890994),(28.806152343749996,59.17592824927138),(26.257324218749993,59.17592824927138),(24.455566406249993,59.42272750081452)], [(35.13427734375,59.84481485969107),(31.970214843749993,58.97266715450152),(33.20068359374999,56.776808316568406),(36.67236328125,56.41390137600675),(39.08935546874999,57.25528054528888),(42.69287109374999,58.03137242177638),(40.89111328124999,59.26588062825809),(37.28759765625,58.722598828043374),(37.11181640624999,59.66774058164964),(35.13427734375,59.84481485969107)], [(29.157714843749993,55.75184939173528),(22.565917968749993,55.128649068488784),(22.565917968749993,53.54030739150019),(22.038574218749996,51.48138289610097),(26.257324218749993,51.42661449707484),(30.124511718749993,50.54136296522162),(32.18994140624999,51.17934297928929),(30.124511718749993,53.173119202640635),(35.09033203124999,53.173119202640635),(33.11279296875,54.085173420886775),(29.597167968749993,55.50374985927513),(29.157714843749993,55.75184939173528)], [(42.82470703125,56.58369172128337),(36.584472656249986,55.329144408405085),(37.99072265625,53.592504809039355),(34.95849609374999,51.48138289610097),(36.54052734374999,50.40151532278236),(39.66064453124999,50.289339253291786),(39.79248046875,52.13348804077148),(41.77001953125,50.68079714532166),(44.49462890624999,51.97134580885171),(47.30712890624999,52.509534770327264),(44.05517578125,53.54030739150019),(46.60400390625,53.696706475303245),(47.61474609375,55.40406982700608),(45.37353515625,55.40406982700608),(42.82470703125,56.58369172128337)]]))) format TSV; + +select '-------- Polygon with Polygon with Holes'; +select wkt(arrayMap(a -> arrayMap(b -> arrayMap(c -> (round(c.1, 6), round(c.2, 6)), b), a), +polygonsUnionSpherical([[(29.453587685533865,59.779570356240356),(29.393139070478895,52.276266797422124),(40.636581470703206,59.38168915000267),(41.21084331372543,59.103467777099866),(29.786055068336193,52.146627480315004),(31.23682182965546,52.16517054781818),(41.69443223416517,58.85424941916091),(42.51048853740727,58.47703162291134),(32.59691566839227,52.22075341251539),(34.289476889931414,52.22075341251539),(43.02430176537451,58.07974369546071),(43.02430176537451,57.25537683364851),(35.468224883503325,52.2022335126388),(37.16078610504247,52.23926559241349),(43.02430176537451,56.26136189644947),(43.02430176537451,55.326904361850836),(38.33953409861437,52.16517054781818),(40.09254393520848,52.16517054781818),(44.4146199116388,55.3097062225408),(44.47506852669377,59.80998197603594),(39.72985224487867,59.931351417569715),(30.23941968124846,53.67744677450975),(30.20919537372098,54.63314259659509),(38.73245009647167,59.94649146557819),(37.2816833351524,59.97675082987618),(30.23941968124846,55.2752875586599),(30.33009260383092,56.19415599955667),(36.28428118674541,59.96162460231375),(34.863738732953635,59.97675082987618),(30.178971066193498,56.97640788219866),(30.178971066193498,57.91957806959033),(33.65476643185424,59.94649146557819),(32.32489690064491,59.94649146557819),(30.481214141468342,58.85424941916091),(30.571887064050795,59.99187015036608),(29.453587685533865,59.779570356240356)]], [[(24.367675781249993,61.45977057029751),(19.577636718749993,58.67693767258692),(19.577636718749993,57.492213666700735),(19.445800781249996,55.87531083569678),(19.445800781249996,54.085173420886775),(17.468261718749996,53.014783245859235),(20.017089843749993,51.563412328675895),(21.203613281249993,50.205033264943324),(26.125488281249993,50.40151532278236),(27.22412109374999,48.980216985374994),(32.80517578124999,49.525208341974405),(35.26611328124999,48.74894534343292),(36.93603515624999,49.66762782262194),(42.56103515625,48.77791275550183),(43.92333984374999,49.8096315635631),(47.17529296875,49.152969656170455),(49.28466796875,50.54136296522162),(48.05419921875,51.17934297928929),(51.39404296875,52.48278022207825),(50.64697265625,53.014783245859235),(52.88818359375,53.93021986394004),(51.65771484374999,54.29088164657006),(52.66845703125,55.825973254619015),(50.25146484375,56.145549500679095),(51.92138671875,57.914847767009206),(49.15283203125,58.17070248348605),(49.59228515625,60.086762746260064),(47.043457031249986,59.88893689676584),(43.57177734375,61.37567331572748),(42.64892578125,60.630101766266705),(36.89208984374999,62.000904713685856),(36.01318359374999,61.143235250840576),(31.398925781249993,62.02152819100766),(30.563964843749996,61.05828537037917),(26.872558593749993,61.71070595883174),(26.652832031249993,61.10078883158897),(24.367675781249993,61.45977057029751)], [(24.455566406249993,59.42272750081452),(21.203613281249993,58.49369382056807),(21.335449218749993,56.89700392127261),(21.599121093749993,55.92458580482949),(25.202636718749993,55.998380955359636),(28.850097656249993,57.06463027327854),(27.09228515625,57.844750992890994),(28.806152343749996,59.17592824927138),(26.257324218749993,59.17592824927138),(24.455566406249993,59.42272750081452)], [(35.13427734375,59.84481485969107),(31.970214843749993,58.97266715450152),(33.20068359374999,56.776808316568406),(36.67236328125,56.41390137600675),(39.08935546874999,57.25528054528888),(42.69287109374999,58.03137242177638),(40.89111328124999,59.26588062825809),(37.28759765625,58.722598828043374),(37.11181640624999,59.66774058164964),(35.13427734375,59.84481485969107)], [(29.157714843749993,55.75184939173528),(22.565917968749993,55.128649068488784),(22.565917968749993,53.54030739150019),(22.038574218749996,51.48138289610097),(26.257324218749993,51.42661449707484),(30.124511718749993,50.54136296522162),(32.18994140624999,51.17934297928929),(30.124511718749993,53.173119202640635),(35.09033203124999,53.173119202640635),(33.11279296875,54.085173420886775),(29.597167968749993,55.50374985927513),(29.157714843749993,55.75184939173528)], [(42.82470703125,56.58369172128337),(36.584472656249986,55.329144408405085),(37.99072265625,53.592504809039355),(34.95849609374999,51.48138289610097),(36.54052734374999,50.40151532278236),(39.66064453124999,50.289339253291786),(39.79248046875,52.13348804077148),(41.77001953125,50.68079714532166),(44.49462890624999,51.97134580885171),(47.30712890624999,52.509534770327264),(44.05517578125,53.54030739150019),(46.60400390625,53.696706475303245),(47.61474609375,55.40406982700608),(45.37353515625,55.40406982700608),(42.82470703125,56.58369172128337)]]))) format TSV; + diff --git a/parser/testdata/01306_polygons_intersection/ast.json b/parser/testdata/01306_polygons_intersection/ast.json new file mode 100644 index 000000000..5fedd3f94 --- /dev/null +++ b/parser/testdata/01306_polygons_intersection/ast.json @@ -0,0 +1,115 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function polygonsIntersectionCartesian (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 8)" + }, + { + "explain": " Literal Tuple_(Float64_0, Float64_0)" + }, + { + "explain": " Literal Tuple_(Float64_0, Float64_3)" + }, + { + "explain": " Literal Tuple_(Float64_1, Float64_2.9)" + }, + { + "explain": " Literal Tuple_(Float64_2, Float64_2.6)" + }, + { + "explain": " Literal Tuple_(Float64_2.6, Float64_2)" + }, + { + "explain": " Literal Tuple_(Float64_2.9, Float64_1)" + }, + { + "explain": " Literal Tuple_(Float64_3, Float64_0)" + }, + { + "explain": " Literal Tuple_(Float64_0, Float64_0)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 5)" + }, + { + "explain": " Literal Tuple_(Float64_1, Float64_1)" + }, + { + "explain": " Literal Tuple_(Float64_1, Float64_4)" + }, + { + "explain": " Literal Tuple_(Float64_4, Float64_4)" + }, + { + "explain": " Literal Tuple_(Float64_4, Float64_1)" + }, + { + "explain": " Literal Tuple_(Float64_1, Float64_1)" + } + ], + + "rows": 31, + + "statistics": + { + "elapsed": 0.001470462, + "rows_read": 31, + "bytes_read": 1510 + } +} diff --git a/parser/testdata/01306_polygons_intersection/metadata.json b/parser/testdata/01306_polygons_intersection/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01306_polygons_intersection/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01306_polygons_intersection/query.sql b/parser/testdata/01306_polygons_intersection/query.sql new file mode 100644 index 000000000..5bfba6124 --- /dev/null +++ b/parser/testdata/01306_polygons_intersection/query.sql @@ -0,0 +1,17 @@ +select polygonsIntersectionCartesian([[[(0., 0.),(0., 3.),(1., 2.9),(2., 2.6),(2.6, 2.),(2.9, 1.),(3., 0.),(0., 0.)]]], [[[(1., 1.),(1., 4.),(4., 4.),(4., 1.),(1., 1.)]]]); +select polygonsIntersectionCartesian([[[(0., 0.),(0., 3.),(1., 2.9),(2., 2.6),(2.6, 2.),(2.9, 1.),(3., 0.),(0., 0.)]]], [[[(3., 3.),(3., 4.),(4., 4.),(4., 3.),(3., 3.)]]]); + +select arrayMap(a -> arrayMap(b -> arrayMap(c -> (round(c.1, 6), round(c.2, 6)), b), a), polygonsIntersectionSpherical([[[(4.346693, 50.858306), (4.367945, 50.852455), (4.366227, 50.840809), (4.344961, 50.833264), (4.338074, 50.848677), (4.346693, 50.858306)]]], [[[(25.0010, 136.9987), (17.7500, 142.5000), (11.3733, 142.5917)]]])); +select arrayMap(a -> arrayMap(b -> arrayMap(c -> (round(c.1, 6), round(c.2, 6)), b), a),polygonsIntersectionSpherical([[[(4.3613577, 50.8651821), (4.349556, 50.8535879), (4.3602419, 50.8435626), (4.3830299, 50.8428851), (4.3904543, 50.8564867), (4.3613148, 50.8651279)]]], [[[(4.346693, 50.858306), (4.367945, 50.852455), (4.366227, 50.840809), (4.344961, 50.833264), (4.338074, 50.848677), (4.346693, 50.858306)]]])); + +select '-------- MultiPolygon with Polygon'; +select wkt(arrayMap(a -> arrayMap(b -> arrayMap(c -> (round(c.1, 6), round(c.2, 6)), b), a), +polygonsIntersectionSpherical([[(29.453587685533865,59.779570356240356),(29.393139070478895,52.276266797422124),(40.636581470703206,59.38168915000267),(41.21084331372543,59.103467777099866),(29.786055068336193,52.146627480315004),(31.23682182965546,52.16517054781818),(41.69443223416517,58.85424941916091),(42.51048853740727,58.47703162291134),(32.59691566839227,52.22075341251539),(34.289476889931414,52.22075341251539),(43.02430176537451,58.07974369546071),(43.02430176537451,57.25537683364851),(35.468224883503325,52.2022335126388),(37.16078610504247,52.23926559241349),(43.02430176537451,56.26136189644947),(43.02430176537451,55.326904361850836),(38.33953409861437,52.16517054781818),(40.09254393520848,52.16517054781818),(44.4146199116388,55.3097062225408),(44.47506852669377,59.80998197603594),(39.72985224487867,59.931351417569715),(30.23941968124846,53.67744677450975),(30.20919537372098,54.63314259659509),(38.73245009647167,59.94649146557819),(37.2816833351524,59.97675082987618),(30.23941968124846,55.2752875586599),(30.33009260383092,56.19415599955667),(36.28428118674541,59.96162460231375),(34.863738732953635,59.97675082987618),(30.178971066193498,56.97640788219866),(30.178971066193498,57.91957806959033),(33.65476643185424,59.94649146557819),(32.32489690064491,59.94649146557819),(30.481214141468342,58.85424941916091),(30.571887064050795,59.99187015036608),(29.453587685533865,59.779570356240356)]], [[[(33.473420586689336,58.85424941916091),(32.23422397806246,58.492830557036),(32.173775363007486,58.03176922751564),(31.508840597402823,57.499784781503735),(31.750635057622702,56.86092686957355),(31.508840597402823,55.941082594334574),(32.20399967053497,55.515591939372456),(31.84130798020516,54.998862226280465),(31.418167674820367,54.422670886434275),(32.47601843828233,53.83826377018255),(32.08310244042503,53.408048308050866),(33.171177511414484,52.82758702113742),(34.77306581037117,52.91880107773494),(34.77306581037117,53.784726518357985),(34.108131044766516,54.17574726780569),(35.07530888564602,54.59813930694554),(34.25925258240394,54.96417435716029),(35.01486027059106,55.361278263643584),(33.50364489421682,55.37845402950552),(32.7480372060297,55.90721384574556),(35.67979503619571,55.68634475630185),(32.83871012861215,56.311688992608396),(34.591719965206266,56.29492065473883),(35.7100193437232,56.311688992608396),(33.83611227701915,56.695333481003644),(32.95960735872209,56.9434497616887),(36.072711034053015,57.091531913901434),(33.171177511414484,57.33702717078384),(36.193608264162954,57.499784781503735),(33.23162612646945,57.77481561306047),(36.43540272438284,58.04776787540811),(33.62454212432676,58.27099811968307),(36.344729801800376,58.54018474404165),(33.83611227701915,58.68186423448108),(34.74284150284369,59.565911441555244),(33.473420586689336,58.85424941916091)]], [[(34.65216858026123,58.91672306881671),(37.19101041256995,58.68186423448108),(36.01226241899805,58.28688958537609),(37.16078610504247,58.04776787540811),(35.74024365125068,57.79092907387934),(37.009664567405046,57.499784781503735),(35.77046795877817,57.25537683364851),(36.979440259877556,57.07510745541089),(34.22902827487645,56.794777197297435),(36.7074214921302,56.210968525786996),(34.712617195316206,56.10998276812964),(36.55629995449277,55.63519693782703),(35.13575750070099,55.53270067649592),(36.43540272438284,55.34409504165558),(34.83351442542614,55.01619492319591),(35.61934642114075,54.49294870011772),(34.89396304048112,54.12264226523038),(35.37755196092087,53.046178687628185),(37.43280487278982,52.95523300597458),(35.92158949641559,53.80257986695776),(36.91899164482259,53.856094327816805),(36.01226241899805,54.75541714463799),(37.765272255592166,55.189110239786885),(36.828318722240134,55.44708256557195),(38.03729102333953,55.652253637168315),(36.64697287707522,55.941082594334574),(38.21863686850443,56.05939028508024),(36.37495410932787,56.64551287174558),(38.30930979108689,56.992876013526654),(37.16078610504247,57.25537683364851),(38.127963945921984,57.516020773674256),(37.43280487278982,57.710289827306724),(38.33953409861437,57.935626886818994),(37.40258056526235,58.31865112960426),(38.58132855883426,58.744648733419496),(37.31190764267989,59.02578062465136),(34.65216858026123,58.91672306881671)]], [[(38.52087994377928,59.11898412389468),(39.54850639971376,58.713270635642914),(38.369758406141855,58.28688958537609),(38.85334732658162,58.06375936407028),(38.33953409861437,57.710289827306724),(38.73245009647167,57.48354156434209),(38.21863686850443,57.271721400459285),(38.97424455669155,56.87744603722649),(37.463029180317314,56.5623320541159),(38.94402024916407,56.05939028508024),(38.18841256097694,55.856355210835915),(38.490655636251795,55.53270067649592),(37.795496563119656,55.39562234093384),(38.30930979108689,55.154587013355666),(36.7074214921302,54.65063295250911),(37.31190764267989,53.92734063371401),(36.979440259877556,53.58783775557231),(37.855945178174615,52.91880107773497),(39.57873070724124,52.69956490610895),(38.33953409861437,53.281741738901104),(40.00187101262603,53.35396273604752),(39.54850639971376,53.58783775557231),(40.24366547284591,53.58783775557231),(39.97164670509855,53.98069568468355),(40.60635716317572,54.03398248547225),(40.39478701048334,54.44025165268903),(39.54850639971376,54.56310590284329),(39.54850639971376,54.87732350170489),(40.39478701048334,54.87732350170489),(40.39478701048334,55.24083903654295),(39.82052516746112,55.2752875586599),(39.760076552406154,55.75443792473942),(40.57613285564824,55.78844000174894),(40.425011318010824,56.19415599955667),(39.82052516746112,56.07626182891758),(39.79030085993364,56.41214455508424),(40.48545993306579,56.495655446714636),(40.33433839542836,56.95993246553937),(39.79030085993364,56.992876013526654),(39.72985224487867,57.46729112028032),(40.33433839542836,57.46729112028032),(40.24366547284591,58.04776787540811),(39.63917932229622,58.04776787540811),(39.63917932229622,58.382088724871295),(40.33433839542836,58.382088724871295),(40.45523562553831,58.9011152358548),(38.52087994377928,59.11898412389468)]]]))) format TSV; + +select '-------- MultiPolygon with Polygon with Holes'; +select wkt(arrayMap(a -> arrayMap(b -> arrayMap(c -> (round(c.1, 6), round(c.2, 6)), b), a), +polygonsIntersectionSpherical([[[(33.473420586689336,58.85424941916091),(32.23422397806246,58.492830557036),(32.173775363007486,58.03176922751564),(31.508840597402823,57.499784781503735),(31.750635057622702,56.86092686957355),(31.508840597402823,55.941082594334574),(32.20399967053497,55.515591939372456),(31.84130798020516,54.998862226280465),(31.418167674820367,54.422670886434275),(32.47601843828233,53.83826377018255),(32.08310244042503,53.408048308050866),(33.171177511414484,52.82758702113742),(34.77306581037117,52.91880107773494),(34.77306581037117,53.784726518357985),(34.108131044766516,54.17574726780569),(35.07530888564602,54.59813930694554),(34.25925258240394,54.96417435716029),(35.01486027059106,55.361278263643584),(33.50364489421682,55.37845402950552),(32.7480372060297,55.90721384574556),(35.67979503619571,55.68634475630185),(32.83871012861215,56.311688992608396),(34.591719965206266,56.29492065473883),(35.7100193437232,56.311688992608396),(33.83611227701915,56.695333481003644),(32.95960735872209,56.9434497616887),(36.072711034053015,57.091531913901434),(33.171177511414484,57.33702717078384),(36.193608264162954,57.499784781503735),(33.23162612646945,57.77481561306047),(36.43540272438284,58.04776787540811),(33.62454212432676,58.27099811968307),(36.344729801800376,58.54018474404165),(33.83611227701915,58.68186423448108),(34.74284150284369,59.565911441555244),(33.473420586689336,58.85424941916091)]], [[(34.65216858026123,58.91672306881671),(37.19101041256995,58.68186423448108),(36.01226241899805,58.28688958537609),(37.16078610504247,58.04776787540811),(35.74024365125068,57.79092907387934),(37.009664567405046,57.499784781503735),(35.77046795877817,57.25537683364851),(36.979440259877556,57.07510745541089),(34.22902827487645,56.794777197297435),(36.7074214921302,56.210968525786996),(34.712617195316206,56.10998276812964),(36.55629995449277,55.63519693782703),(35.13575750070099,55.53270067649592),(36.43540272438284,55.34409504165558),(34.83351442542614,55.01619492319591),(35.61934642114075,54.49294870011772),(34.89396304048112,54.12264226523038),(35.37755196092087,53.046178687628185),(37.43280487278982,52.95523300597458),(35.92158949641559,53.80257986695776),(36.91899164482259,53.856094327816805),(36.01226241899805,54.75541714463799),(37.765272255592166,55.189110239786885),(36.828318722240134,55.44708256557195),(38.03729102333953,55.652253637168315),(36.64697287707522,55.941082594334574),(38.21863686850443,56.05939028508024),(36.37495410932787,56.64551287174558),(38.30930979108689,56.992876013526654),(37.16078610504247,57.25537683364851),(38.127963945921984,57.516020773674256),(37.43280487278982,57.710289827306724),(38.33953409861437,57.935626886818994),(37.40258056526235,58.31865112960426),(38.58132855883426,58.744648733419496),(37.31190764267989,59.02578062465136),(34.65216858026123,58.91672306881671)]], [[(38.52087994377928,59.11898412389468),(39.54850639971376,58.713270635642914),(38.369758406141855,58.28688958537609),(38.85334732658162,58.06375936407028),(38.33953409861437,57.710289827306724),(38.73245009647167,57.48354156434209),(38.21863686850443,57.271721400459285),(38.97424455669155,56.87744603722649),(37.463029180317314,56.5623320541159),(38.94402024916407,56.05939028508024),(38.18841256097694,55.856355210835915),(38.490655636251795,55.53270067649592),(37.795496563119656,55.39562234093384),(38.30930979108689,55.154587013355666),(36.7074214921302,54.65063295250911),(37.31190764267989,53.92734063371401),(36.979440259877556,53.58783775557231),(37.855945178174615,52.91880107773497),(39.57873070724124,52.69956490610895),(38.33953409861437,53.281741738901104),(40.00187101262603,53.35396273604752),(39.54850639971376,53.58783775557231),(40.24366547284591,53.58783775557231),(39.97164670509855,53.98069568468355),(40.60635716317572,54.03398248547225),(40.39478701048334,54.44025165268903),(39.54850639971376,54.56310590284329),(39.54850639971376,54.87732350170489),(40.39478701048334,54.87732350170489),(40.39478701048334,55.24083903654295),(39.82052516746112,55.2752875586599),(39.760076552406154,55.75443792473942),(40.57613285564824,55.78844000174894),(40.425011318010824,56.19415599955667),(39.82052516746112,56.07626182891758),(39.79030085993364,56.41214455508424),(40.48545993306579,56.495655446714636),(40.33433839542836,56.95993246553937),(39.79030085993364,56.992876013526654),(39.72985224487867,57.46729112028032),(40.33433839542836,57.46729112028032),(40.24366547284591,58.04776787540811),(39.63917932229622,58.04776787540811),(39.63917932229622,58.382088724871295),(40.33433839542836,58.382088724871295),(40.45523562553831,58.9011152358548),(38.52087994377928,59.11898412389468)]]], [[(24.367675781249993,61.45977057029751),(19.577636718749993,58.67693767258692),(19.577636718749993,57.492213666700735),(19.445800781249996,55.87531083569678),(19.445800781249996,54.085173420886775),(17.468261718749996,53.014783245859235),(20.017089843749993,51.563412328675895),(21.203613281249993,50.205033264943324),(26.125488281249993,50.40151532278236),(27.22412109374999,48.980216985374994),(32.80517578124999,49.525208341974405),(35.26611328124999,48.74894534343292),(36.93603515624999,49.66762782262194),(42.56103515625,48.77791275550183),(43.92333984374999,49.8096315635631),(47.17529296875,49.152969656170455),(49.28466796875,50.54136296522162),(48.05419921875,51.17934297928929),(51.39404296875,52.48278022207825),(50.64697265625,53.014783245859235),(52.88818359375,53.93021986394004),(51.65771484374999,54.29088164657006),(52.66845703125,55.825973254619015),(50.25146484375,56.145549500679095),(51.92138671875,57.914847767009206),(49.15283203125,58.17070248348605),(49.59228515625,60.086762746260064),(47.043457031249986,59.88893689676584),(43.57177734375,61.37567331572748),(42.64892578125,60.630101766266705),(36.89208984374999,62.000904713685856),(36.01318359374999,61.143235250840576),(31.398925781249993,62.02152819100766),(30.563964843749996,61.05828537037917),(26.872558593749993,61.71070595883174),(26.652832031249993,61.10078883158897),(24.367675781249993,61.45977057029751)], [(24.455566406249993,59.42272750081452),(21.203613281249993,58.49369382056807),(21.335449218749993,56.89700392127261),(21.599121093749993,55.92458580482949),(25.202636718749993,55.998380955359636),(28.850097656249993,57.06463027327854),(27.09228515625,57.844750992890994),(28.806152343749996,59.17592824927138),(26.257324218749993,59.17592824927138),(24.455566406249993,59.42272750081452)], [(35.13427734375,59.84481485969107),(31.970214843749993,58.97266715450152),(33.20068359374999,56.776808316568406),(36.67236328125,56.41390137600675),(39.08935546874999,57.25528054528888),(42.69287109374999,58.03137242177638),(40.89111328124999,59.26588062825809),(37.28759765625,58.722598828043374),(37.11181640624999,59.66774058164964),(35.13427734375,59.84481485969107)], [(29.157714843749993,55.75184939173528),(22.565917968749993,55.128649068488784),(22.565917968749993,53.54030739150019),(22.038574218749996,51.48138289610097),(26.257324218749993,51.42661449707484),(30.124511718749993,50.54136296522162),(32.18994140624999,51.17934297928929),(30.124511718749993,53.173119202640635),(35.09033203124999,53.173119202640635),(33.11279296875,54.085173420886775),(29.597167968749993,55.50374985927513),(29.157714843749993,55.75184939173528)], [(42.82470703125,56.58369172128337),(36.584472656249986,55.329144408405085),(37.99072265625,53.592504809039355),(34.95849609374999,51.48138289610097),(36.54052734374999,50.40151532278236),(39.66064453124999,50.289339253291786),(39.79248046875,52.13348804077148),(41.77001953125,50.68079714532166),(44.49462890624999,51.97134580885171),(47.30712890624999,52.509534770327264),(44.05517578125,53.54030739150019),(46.60400390625,53.696706475303245),(47.61474609375,55.40406982700608),(45.37353515625,55.40406982700608),(42.82470703125,56.58369172128337)]]))) format TSV; + +select '-------- Polygon with Polygon with Holes'; +select wkt(arrayMap(a -> arrayMap(b -> arrayMap(c -> (round(c.1, 6), round(c.2, 6)), b), a), +polygonsIntersectionSpherical([[(29.453587685533865,59.779570356240356),(29.393139070478895,52.276266797422124),(40.636581470703206,59.38168915000267),(41.21084331372543,59.103467777099866),(29.786055068336193,52.146627480315004),(31.23682182965546,52.16517054781818),(41.69443223416517,58.85424941916091),(42.51048853740727,58.47703162291134),(32.59691566839227,52.22075341251539),(34.289476889931414,52.22075341251539),(43.02430176537451,58.07974369546071),(43.02430176537451,57.25537683364851),(35.468224883503325,52.2022335126388),(37.16078610504247,52.23926559241349),(43.02430176537451,56.26136189644947),(43.02430176537451,55.326904361850836),(38.33953409861437,52.16517054781818),(40.09254393520848,52.16517054781818),(44.4146199116388,55.3097062225408),(44.47506852669377,59.80998197603594),(39.72985224487867,59.931351417569715),(30.23941968124846,53.67744677450975),(30.20919537372098,54.63314259659509),(38.73245009647167,59.94649146557819),(37.2816833351524,59.97675082987618),(30.23941968124846,55.2752875586599),(30.33009260383092,56.19415599955667),(36.28428118674541,59.96162460231375),(34.863738732953635,59.97675082987618),(30.178971066193498,56.97640788219866),(30.178971066193498,57.91957806959033),(33.65476643185424,59.94649146557819),(32.32489690064491,59.94649146557819),(30.481214141468342,58.85424941916091),(30.571887064050795,59.99187015036608),(29.453587685533865,59.779570356240356)]], [[(24.367675781249993,61.45977057029751),(19.577636718749993,58.67693767258692),(19.577636718749993,57.492213666700735),(19.445800781249996,55.87531083569678),(19.445800781249996,54.085173420886775),(17.468261718749996,53.014783245859235),(20.017089843749993,51.563412328675895),(21.203613281249993,50.205033264943324),(26.125488281249993,50.40151532278236),(27.22412109374999,48.980216985374994),(32.80517578124999,49.525208341974405),(35.26611328124999,48.74894534343292),(36.93603515624999,49.66762782262194),(42.56103515625,48.77791275550183),(43.92333984374999,49.8096315635631),(47.17529296875,49.152969656170455),(49.28466796875,50.54136296522162),(48.05419921875,51.17934297928929),(51.39404296875,52.48278022207825),(50.64697265625,53.014783245859235),(52.88818359375,53.93021986394004),(51.65771484374999,54.29088164657006),(52.66845703125,55.825973254619015),(50.25146484375,56.145549500679095),(51.92138671875,57.914847767009206),(49.15283203125,58.17070248348605),(49.59228515625,60.086762746260064),(47.043457031249986,59.88893689676584),(43.57177734375,61.37567331572748),(42.64892578125,60.630101766266705),(36.89208984374999,62.000904713685856),(36.01318359374999,61.143235250840576),(31.398925781249993,62.02152819100766),(30.563964843749996,61.05828537037917),(26.872558593749993,61.71070595883174),(26.652832031249993,61.10078883158897),(24.367675781249993,61.45977057029751)], [(24.455566406249993,59.42272750081452),(21.203613281249993,58.49369382056807),(21.335449218749993,56.89700392127261),(21.599121093749993,55.92458580482949),(25.202636718749993,55.998380955359636),(28.850097656249993,57.06463027327854),(27.09228515625,57.844750992890994),(28.806152343749996,59.17592824927138),(26.257324218749993,59.17592824927138),(24.455566406249993,59.42272750081452)], [(35.13427734375,59.84481485969107),(31.970214843749993,58.97266715450152),(33.20068359374999,56.776808316568406),(36.67236328125,56.41390137600675),(39.08935546874999,57.25528054528888),(42.69287109374999,58.03137242177638),(40.89111328124999,59.26588062825809),(37.28759765625,58.722598828043374),(37.11181640624999,59.66774058164964),(35.13427734375,59.84481485969107)], [(29.157714843749993,55.75184939173528),(22.565917968749993,55.128649068488784),(22.565917968749993,53.54030739150019),(22.038574218749996,51.48138289610097),(26.257324218749993,51.42661449707484),(30.124511718749993,50.54136296522162),(32.18994140624999,51.17934297928929),(30.124511718749993,53.173119202640635),(35.09033203124999,53.173119202640635),(33.11279296875,54.085173420886775),(29.597167968749993,55.50374985927513),(29.157714843749993,55.75184939173528)], [(42.82470703125,56.58369172128337),(36.584472656249986,55.329144408405085),(37.99072265625,53.592504809039355),(34.95849609374999,51.48138289610097),(36.54052734374999,50.40151532278236),(39.66064453124999,50.289339253291786),(39.79248046875,52.13348804077148),(41.77001953125,50.68079714532166),(44.49462890624999,51.97134580885171),(47.30712890624999,52.509534770327264),(44.05517578125,53.54030739150019),(46.60400390625,53.696706475303245),(47.61474609375,55.40406982700608),(45.37353515625,55.40406982700608),(42.82470703125,56.58369172128337)]]))) format TSV; diff --git a/parser/testdata/01307_bloom_filter_index_string_multi_granulas/ast.json b/parser/testdata/01307_bloom_filter_index_string_multi_granulas/ast.json new file mode 100644 index 000000000..2d74b0ef2 --- /dev/null +++ b/parser/testdata/01307_bloom_filter_index_string_multi_granulas/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_01307 (children 1)" + }, + { + "explain": " Identifier test_01307" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001062144, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/01307_bloom_filter_index_string_multi_granulas/metadata.json b/parser/testdata/01307_bloom_filter_index_string_multi_granulas/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01307_bloom_filter_index_string_multi_granulas/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01307_bloom_filter_index_string_multi_granulas/query.sql b/parser/testdata/01307_bloom_filter_index_string_multi_granulas/query.sql new file mode 100644 index 000000000..cfb1f45c1 --- /dev/null +++ b/parser/testdata/01307_bloom_filter_index_string_multi_granulas/query.sql @@ -0,0 +1,11 @@ +DROP TABLE IF EXISTS test_01307; + +CREATE TABLE test_01307 (id UInt64, val String, INDEX ind val TYPE bloom_filter() GRANULARITY 1) ENGINE = MergeTree() ORDER BY id SETTINGS index_granularity = 2, index_granularity_bytes = '10Mi'; +INSERT INTO test_01307 (id, val) select number as id, toString(number) as val from numbers(4); +SELECT count() FROM test_01307 WHERE identity(val) = '2'; +SELECT count() FROM test_01307 WHERE val = '2'; +OPTIMIZE TABLE test_01307 FINAL; +SELECT count() FROM test_01307 WHERE identity(val) = '2'; +SELECT count() FROM test_01307 WHERE val = '2'; + +DROP TABLE test_01307; diff --git a/parser/testdata/01307_polygon_perimeter/ast.json b/parser/testdata/01307_polygon_perimeter/ast.json new file mode 100644 index 000000000..966fde2c3 --- /dev/null +++ b/parser/testdata/01307_polygon_perimeter/ast.json @@ -0,0 +1,73 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function polygonPerimeterCartesian (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 5)" + }, + { + "explain": " Literal Tuple_(Float64_0, Float64_0)" + }, + { + "explain": " Literal Tuple_(Float64_0, Float64_5)" + }, + { + "explain": " Literal Tuple_(Float64_5, Float64_5)" + }, + { + "explain": " Literal Tuple_(Float64_5, Float64_0)" + }, + { + "explain": " Literal Tuple_(Float64_0, Float64_0)" + } + ], + + "rows": 17, + + "statistics": + { + "elapsed": 0.001399603, + "rows_read": 17, + "bytes_read": 789 + } +} diff --git a/parser/testdata/01307_polygon_perimeter/metadata.json b/parser/testdata/01307_polygon_perimeter/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01307_polygon_perimeter/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01307_polygon_perimeter/query.sql b/parser/testdata/01307_polygon_perimeter/query.sql new file mode 100644 index 000000000..18f5b3858 --- /dev/null +++ b/parser/testdata/01307_polygon_perimeter/query.sql @@ -0,0 +1 @@ +select polygonPerimeterCartesian([[[(0., 0.), (0., 5.), (5., 5.), (5., 0.), (0., 0.)]]]); diff --git a/parser/testdata/01308_polygon_area/ast.json b/parser/testdata/01308_polygon_area/ast.json new file mode 100644 index 000000000..054084c62 --- /dev/null +++ b/parser/testdata/01308_polygon_area/ast.json @@ -0,0 +1,70 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function polygonAreaCartesian (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Literal Tuple_(Float64_0, Float64_0)" + }, + { + "explain": " Literal Tuple_(Float64_0, Float64_5)" + }, + { + "explain": " Literal Tuple_(Float64_5, Float64_5)" + }, + { + "explain": " Literal Tuple_(Float64_5, Float64_0)" + } + ], + + "rows": 16, + + "statistics": + { + "elapsed": 0.001546649, + "rows_read": 16, + "bytes_read": 728 + } +} diff --git a/parser/testdata/01308_polygon_area/metadata.json b/parser/testdata/01308_polygon_area/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01308_polygon_area/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01308_polygon_area/query.sql b/parser/testdata/01308_polygon_area/query.sql new file mode 100644 index 000000000..26f026ae9 --- /dev/null +++ b/parser/testdata/01308_polygon_area/query.sql @@ -0,0 +1,3 @@ +select polygonAreaCartesian([[[(0., 0.), (0., 5.), (5., 5.), (5., 0.)]]]); +select round(polygonAreaSpherical([[[(4.346693, 50.858306), (4.367945, 50.852455), (4.366227, 50.840809), (4.344961, 50.833264), (4.338074, 50.848677), (4.346693, 50.858306)]]]), 14); +SELECT polygonAreaCartesian([]); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/01308_row_policy_and_trivial_count_query/ast.json b/parser/testdata/01308_row_policy_and_trivial_count_query/ast.json new file mode 100644 index 000000000..67e15cf05 --- /dev/null +++ b/parser/testdata/01308_row_policy_and_trivial_count_query/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001143512, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01308_row_policy_and_trivial_count_query/metadata.json b/parser/testdata/01308_row_policy_and_trivial_count_query/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01308_row_policy_and_trivial_count_query/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01308_row_policy_and_trivial_count_query/query.sql b/parser/testdata/01308_row_policy_and_trivial_count_query/query.sql new file mode 100644 index 000000000..81bd2ad97 --- /dev/null +++ b/parser/testdata/01308_row_policy_and_trivial_count_query/query.sql @@ -0,0 +1,15 @@ +SET optimize_move_to_prewhere = 1; + +DROP TABLE IF EXISTS t; + +CREATE TABLE t (x UInt8) ENGINE = MergeTree ORDER BY x; +INSERT INTO t VALUES (1), (2), (3); + +SELECT count() FROM t; +DROP ROW POLICY IF EXISTS filter ON t; +CREATE ROW POLICY filter ON t USING (x % 2 = 1) TO ALL; +SELECT count() FROM t; +DROP ROW POLICY filter ON t; +SELECT count() FROM t; + +DROP TABLE t; diff --git a/parser/testdata/01310_enum_comparison/ast.json b/parser/testdata/01310_enum_comparison/ast.json new file mode 100644 index 000000000..469747880 --- /dev/null +++ b/parser/testdata/01310_enum_comparison/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery enum (children 2)" + }, + { + "explain": " Identifier enum" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration x (children 1)" + }, + { + "explain": " DataType Enum (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'hello'" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'world'" + }, + { + "explain": " Literal UInt64_2" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001393846, + "rows_read": 15, + "bytes_read": 553 + } +} diff --git a/parser/testdata/01310_enum_comparison/metadata.json b/parser/testdata/01310_enum_comparison/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01310_enum_comparison/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01310_enum_comparison/query.sql b/parser/testdata/01310_enum_comparison/query.sql new file mode 100644 index 000000000..300ef8f37 --- /dev/null +++ b/parser/testdata/01310_enum_comparison/query.sql @@ -0,0 +1,9 @@ +CREATE TEMPORARY TABLE enum (x Enum('hello' = 1, 'world' = 2)); +INSERT INTO enum VALUES ('hello'); + +SELECT count() FROM enum WHERE x = 'hello'; +SELECT count() FROM enum WHERE x = 'world'; +SELECT count() FROM enum WHERE x = 'xyz'; + +SET validate_enum_literals_in_operators = 1; +SELECT count() FROM enum WHERE x = 'xyz'; -- { serverError UNKNOWN_ELEMENT_OF_ENUM } diff --git a/parser/testdata/01311_comparison_with_constant_string/ast.json b/parser/testdata/01311_comparison_with_constant_string/ast.json new file mode 100644 index 000000000..ba60eb0d1 --- /dev/null +++ b/parser/testdata/01311_comparison_with_constant_string/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal '1'" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_3" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001410439, + "rows_read": 14, + "bytes_read": 537 + } +} diff --git a/parser/testdata/01311_comparison_with_constant_string/metadata.json b/parser/testdata/01311_comparison_with_constant_string/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01311_comparison_with_constant_string/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01311_comparison_with_constant_string/query.sql b/parser/testdata/01311_comparison_with_constant_string/query.sql new file mode 100644 index 000000000..5760c09d6 --- /dev/null +++ b/parser/testdata/01311_comparison_with_constant_string/query.sql @@ -0,0 +1,33 @@ +SELECT number = '1' FROM numbers(3); +SELECT '---'; +SELECT '1' != number FROM numbers(3); +SELECT '---'; +SELECT '1' > number FROM numbers(3); +SELECT '---'; +SELECT 1 = '257'; +SELECT '---'; +SELECT 1 IN (1.23, '1', 2); +SELECT 1 IN (1.23, '2', 2); +SELECT '---'; + +-- it should work but it doesn't. +SELECT 1 = '1.0'; -- { serverError TYPE_MISMATCH } +SELECT '---'; + +SELECT 1 = '257'; +SELECT '---'; +SELECT 1 != '257'; +SELECT '---'; +SELECT 1 < '257'; -- this is wrong for now +SELECT '---'; +SELECT 1 > '257'; +SELECT '---'; +SELECT 1 <= '257'; -- this is wrong for now +SELECT '---'; +SELECT 1 >= '257'; +SELECT '---'; + +SELECT toDateTime('2020-06-13 01:02:03') = '2020-06-13T01:02:03'; +SELECT '---'; + +SELECT 0 = ''; -- { serverError ATTEMPT_TO_READ_AFTER_EOF } diff --git a/parser/testdata/01312_case_insensitive_regexp/ast.json b/parser/testdata/01312_case_insensitive_regexp/ast.json new file mode 100644 index 000000000..6c92b17ce --- /dev/null +++ b/parser/testdata/01312_case_insensitive_regexp/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function match (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'Too late'" + }, + { + "explain": " Literal 'Too late'" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001552185, + "rows_read": 8, + "bytes_read": 292 + } +} diff --git a/parser/testdata/01312_case_insensitive_regexp/metadata.json b/parser/testdata/01312_case_insensitive_regexp/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01312_case_insensitive_regexp/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01312_case_insensitive_regexp/query.sql b/parser/testdata/01312_case_insensitive_regexp/query.sql new file mode 100644 index 000000000..ca1398959 --- /dev/null +++ b/parser/testdata/01312_case_insensitive_regexp/query.sql @@ -0,0 +1,8 @@ +SELECT match('Too late', 'Too late'); +select match('Too late', '(?i)Too late'); +select match('Too late', '(?i)too late'); +select match('Too late', '(?i:too late)'); +select match('Too late', '(?i)to{2} late'); +select match('Too late', '(?i)to(?)o late'); +select match('Too late', '(?i)to+ late'); +select match('Too late', '(?i)to(?:o|o) late'); diff --git a/parser/testdata/01312_comparison_with_constant_string_in_index_analysis/ast.json b/parser/testdata/01312_comparison_with_constant_string_in_index_analysis/ast.json new file mode 100644 index 000000000..8be2471f7 --- /dev/null +++ b/parser/testdata/01312_comparison_with_constant_string_in_index_analysis/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001218795, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01312_comparison_with_constant_string_in_index_analysis/metadata.json b/parser/testdata/01312_comparison_with_constant_string_in_index_analysis/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01312_comparison_with_constant_string_in_index_analysis/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01312_comparison_with_constant_string_in_index_analysis/query.sql b/parser/testdata/01312_comparison_with_constant_string_in_index_analysis/query.sql new file mode 100644 index 000000000..2288d6380 --- /dev/null +++ b/parser/testdata/01312_comparison_with_constant_string_in_index_analysis/query.sql @@ -0,0 +1,38 @@ +SET optimize_trivial_insert_select = 1; +SET merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.0; + +-- Prevent remote replicas from skipping index analysis in Parallel Replicas. Otherwise, they may return full ranges and trigger max_rows_to_read validation failures. +SET parallel_replicas_index_analysis_only_on_coordinator = 0; + +DROP TABLE IF EXISTS test; +CREATE TABLE test (x UInt64) ENGINE = MergeTree ORDER BY x SETTINGS index_granularity = 1000, index_granularity_bytes = '10Mi'; +INSERT INTO test SELECT * FROM numbers(1000000); +OPTIMIZE TABLE test; + +SET max_rows_to_read = 2000; +SELECT count() FROM test WHERE x = 100000; +SET max_rows_to_read = 1000000; +SELECT count() FROM test WHERE x != 100000; +SET max_rows_to_read = 101000; +SELECT count() FROM test WHERE x < 100000; +SET max_rows_to_read = 900000; +SELECT count() FROM test WHERE x > 100000; +SET max_rows_to_read = 101000; +SELECT count() FROM test WHERE x <= 100000; +SET max_rows_to_read = 901000; +SELECT count() FROM test WHERE x >= 100000; + +SET max_rows_to_read = 2000; +SELECT count() FROM test WHERE x = '100000'; +SET max_rows_to_read = 1000000; +SELECT count() FROM test WHERE x != '100000'; +SET max_rows_to_read = 101000; +SELECT count() FROM test WHERE x < '100000'; +SET max_rows_to_read = 900000; +SELECT count() FROM test WHERE x > '100000'; +SET max_rows_to_read = 101000; +SELECT count() FROM test WHERE x <= '100000'; +SET max_rows_to_read = 901000; +SELECT count() FROM test WHERE x >= '100000'; + +DROP TABLE test; diff --git a/parser/testdata/01313_parse_date_time_best_effort_null_zero/ast.json b/parser/testdata/01313_parse_date_time_best_effort_null_zero/ast.json new file mode 100644 index 000000000..c06ed6538 --- /dev/null +++ b/parser/testdata/01313_parse_date_time_best_effort_null_zero/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function parseDateTimeBestEffort (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal ''" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001301574, + "rows_read": 7, + "bytes_read": 277 + } +} diff --git a/parser/testdata/01313_parse_date_time_best_effort_null_zero/metadata.json b/parser/testdata/01313_parse_date_time_best_effort_null_zero/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01313_parse_date_time_best_effort_null_zero/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01313_parse_date_time_best_effort_null_zero/query.sql b/parser/testdata/01313_parse_date_time_best_effort_null_zero/query.sql new file mode 100644 index 000000000..2ffa05155 --- /dev/null +++ b/parser/testdata/01313_parse_date_time_best_effort_null_zero/query.sql @@ -0,0 +1,12 @@ +SELECT parseDateTimeBestEffort(''); -- { serverError CANNOT_PARSE_DATETIME } +SELECT parseDateTimeBestEffortOrNull(''); +SELECT parseDateTimeBestEffortOrZero('', 'UTC'); + +SELECT parseDateTime64BestEffort(''); -- { serverError CANNOT_PARSE_DATETIME } +SELECT parseDateTime64BestEffortOrNull(''); +SELECT parseDateTime64BestEffortOrZero('', 0, 'UTC'); + +SET date_time_input_format = 'best_effort'; +SELECT toDateTime(''); -- { serverError CANNOT_PARSE_DATETIME } +SELECT toDateTimeOrNull(''); +SELECT toDateTimeOrZero('', 'UTC'); diff --git a/parser/testdata/01314_position_in_system_columns/ast.json b/parser/testdata/01314_position_in_system_columns/ast.json new file mode 100644 index 000000000..bb2c7b4f6 --- /dev/null +++ b/parser/testdata/01314_position_in_system_columns/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001462661, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/01314_position_in_system_columns/metadata.json b/parser/testdata/01314_position_in_system_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01314_position_in_system_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01314_position_in_system_columns/query.sql b/parser/testdata/01314_position_in_system_columns/query.sql new file mode 100644 index 000000000..7bb0f3b5a --- /dev/null +++ b/parser/testdata/01314_position_in_system_columns/query.sql @@ -0,0 +1,8 @@ +DROP TABLE IF EXISTS test; +CREATE TABLE test (x UInt8, y String, z Array(String)) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO test (x) VALUES (1); + +SELECT name, type, position FROM system.columns WHERE database = currentDatabase() AND table = 'test'; +SELECT column, type, column_position FROM system.parts_columns WHERE database = currentDatabase() AND table = 'test'; + +DROP TABLE test; diff --git a/parser/testdata/01315_count_distinct_return_not_nullable/ast.json b/parser/testdata/01315_count_distinct_return_not_nullable/ast.json new file mode 100644 index 000000000..8deb36f4c --- /dev/null +++ b/parser/testdata/01315_count_distinct_return_not_nullable/ast.json @@ -0,0 +1,82 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function uniq (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function if (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function greaterOrEquals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 20, + + "statistics": + { + "elapsed": 0.001858106, + "rows_read": 20, + "bytes_read": 788 + } +} diff --git a/parser/testdata/01315_count_distinct_return_not_nullable/metadata.json b/parser/testdata/01315_count_distinct_return_not_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01315_count_distinct_return_not_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01315_count_distinct_return_not_nullable/query.sql b/parser/testdata/01315_count_distinct_return_not_nullable/query.sql new file mode 100644 index 000000000..0558d2cfd --- /dev/null +++ b/parser/testdata/01315_count_distinct_return_not_nullable/query.sql @@ -0,0 +1,19 @@ +SELECT uniq(number >= 10 ? number : NULL) FROM numbers(10); +SELECT uniqExact(number >= 10 ? number : NULL) FROM numbers(10); +SELECT count(DISTINCT number >= 10 ? number : NULL) FROM numbers(10); + +SELECT uniq(number >= 5 ? number : NULL) FROM numbers(10); +SELECT uniqExact(number >= 5 ? number : NULL) FROM numbers(10); +SELECT count(DISTINCT number >= 5 ? number : NULL) FROM numbers(10); + +SELECT '---'; +SELECT count(NULL); +SELECT uniq(NULL); +SELECT count(DISTINCT NULL); + +SELECT '---'; +SELECT avg(NULL); +SELECT sum(NULL); +SELECT corr(NULL, NULL); +SELECT corr(1, NULL); +SELECT corr(NULL, 1); diff --git a/parser/testdata/01318_alter_add_column_exists/ast.json b/parser/testdata/01318_alter_add_column_exists/ast.json new file mode 100644 index 000000000..1b8754439 --- /dev/null +++ b/parser/testdata/01318_alter_add_column_exists/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery add_table (children 1)" + }, + { + "explain": " Identifier add_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001632089, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/01318_alter_add_column_exists/metadata.json b/parser/testdata/01318_alter_add_column_exists/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01318_alter_add_column_exists/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01318_alter_add_column_exists/query.sql b/parser/testdata/01318_alter_add_column_exists/query.sql new file mode 100644 index 000000000..5bfa07cd4 --- /dev/null +++ b/parser/testdata/01318_alter_add_column_exists/query.sql @@ -0,0 +1,27 @@ +DROP TABLE IF EXISTS add_table; + +CREATE TABLE add_table +( + key UInt64, + value1 String +) +ENGINE = MergeTree() +ORDER BY key; + +SHOW CREATE TABLE add_table; + +ALTER TABLE add_table ADD COLUMN IF NOT EXISTS value1 UInt64; + +SHOW CREATE TABLE add_table; + +ALTER TABLE add_table ADD COLUMN IF NOT EXISTS key String, ADD COLUMN IF NOT EXISTS value1 UInt64; + +SHOW CREATE TABLE add_table; + +ALTER TABLE add_table ADD COLUMN IF NOT EXISTS value1 UInt64, ADD COLUMN IF NOT EXISTS value2 UInt64; + +SHOW CREATE TABLE add_table; + +ALTER TABLE add_table ADD COLUMN value3 UInt64, ADD COLUMN IF NOT EXISTS value3 UInt32; --{serverError ILLEGAL_COLUMN} + +DROP TABLE IF EXISTS add_table; diff --git a/parser/testdata/01318_decrypt/ast.json b/parser/testdata/01318_decrypt/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01318_decrypt/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01318_decrypt/metadata.json b/parser/testdata/01318_decrypt/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01318_decrypt/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01318_decrypt/query.sql b/parser/testdata/01318_decrypt/query.sql new file mode 100644 index 000000000..a41da46d3 --- /dev/null +++ b/parser/testdata/01318_decrypt/query.sql @@ -0,0 +1,146 @@ +-- Tags: no-fasttest +-- Tag no-fasttest: Depends on OpenSSL + +--- aes_decrypt_mysql(string, key, block_mode[, init_vector, AAD]) +-- The MySQL-compatitable encryption, only ecb, cbc and ofb modes are supported, +-- just like for MySQL +-- https://dev.mysql.com/doc/refman/8.0/en/encryption-functions.html#function_aes-encrypt +-- https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_block_encryption_mode +-- Please note that for keys that exceed mode-specific length, keys are folded in a MySQL-specific way, +-- meaning that whole key is used, but effective key length is still determined by mode. +-- when key doesn't exceed the default mode length, ecryption result equals with AES_encypt() + +----------------------------------------------------------------------------------------- +-- error cases +----------------------------------------------------------------------------------------- +SELECT aes_decrypt_mysql(); --{serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} not enough arguments +SELECT aes_decrypt_mysql('aes-128-ecb'); --{serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} not enough arguments +SELECT aes_decrypt_mysql('aes-128-ecb', 'text'); --{serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} not enough arguments + +-- Mode +SELECT aes_decrypt_mysql(789, 'text', 'key'); --{serverError ILLEGAL_TYPE_OF_ARGUMENT} bad mode type +SELECT aes_decrypt_mysql('blah blah blah', 'text', 'key'); -- {serverError BAD_ARGUMENTS} garbage mode value +SELECT aes_decrypt_mysql('des-ede3-ecb', 'text', 'key'); -- {serverError BAD_ARGUMENTS} bad mode value of valid cipher name +SELECT aes_decrypt_mysql('aes-128-gcm', 'text', 'key'); -- {serverError BAD_ARGUMENTS} mode is not supported by _mysql-functions + +SELECT decrypt(789, 'text', 'key'); --{serverError ILLEGAL_TYPE_OF_ARGUMENT} bad mode type +SELECT decrypt('blah blah blah', 'text', 'key'); -- {serverError BAD_ARGUMENTS} garbage mode value +SELECT decrypt('des-ede3-ecb', 'text', 'key'); -- {serverError BAD_ARGUMENTS} bad mode value of valid cipher name + + +-- Key +SELECT aes_decrypt_mysql('aes-128-ecb', 'text', 456); --{serverError ILLEGAL_TYPE_OF_ARGUMENT} bad key type +SELECT aes_decrypt_mysql('aes-128-ecb', 'text', 'key'); -- {serverError BAD_ARGUMENTS} key is too short + +SELECT decrypt('aes-128-ecb', 'text'); --{serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} key is missing +SELECT decrypt('aes-128-ecb', 'text', 456); --{serverError ILLEGAL_TYPE_OF_ARGUMENT} bad key type +SELECT decrypt('aes-128-ecb', 'text', 'key'); -- {serverError BAD_ARGUMENTS} key is too short +SELECT decrypt('aes-128-ecb', 'text', 'keykeykeykeykeykeykeykeykeykeykeykey'); -- {serverError BAD_ARGUMENTS} key is to long + +-- IV +SELECT aes_decrypt_mysql('aes-128-ecb', 'text', 'key', 1011); --{serverError ILLEGAL_TYPE_OF_ARGUMENT} bad IV type 6 +SELECT aes_decrypt_mysql('aes-128-ecb', 'text', 'key', 'iv'); --{serverError BAD_ARGUMENTS} IV is too short 4 + +SELECT decrypt('aes-128-cbc', 'text', 'keykeykeykeykeyk', 1011); --{serverError ILLEGAL_TYPE_OF_ARGUMENT} bad IV type 1 +SELECT decrypt('aes-128-cbc', 'text', 'keykeykeykeykeyk', 'iviviviviviviviviviviviviviviviviviviviviv'); --{serverError BAD_ARGUMENTS} IV is too long 3 +SELECT decrypt('aes-128-cbc', 'text', 'keykeykeykeykeyk', 'iv'); --{serverError BAD_ARGUMENTS} IV is too short 2 + +--AAD +SELECT aes_decrypt_mysql('aes-128-ecb', 'text', 'key', 'IV', 1213); --{serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} too many arguments + +SELECT decrypt('aes-128-ecb', 'text', 'key', 'IV', 1213); --{serverError ILLEGAL_TYPE_OF_ARGUMENT} bad AAD type +SELECT decrypt('aes-128-gcm', 'text', 'key', 'IV', 1213); --{serverError ILLEGAL_TYPE_OF_ARGUMENT} bad AAD type + +-- Invalid ciphertext should cause an error or produce garbage +SELECT ignore(decrypt('aes-128-ecb', 'hello there', '1111111111111111')); -- {serverError OPENSSL_ERROR} 1 +SELECT ignore(decrypt('aes-128-cbc', 'hello there', '1111111111111111')); -- {serverError OPENSSL_ERROR} 2 +SELECT ignore(decrypt('aes-128-ofb', 'hello there', '1111111111111111')); -- GIGO +SELECT ignore(decrypt('aes-128-ctr', 'hello there', '1111111111111111')); -- GIGO +SELECT decrypt('aes-128-ctr', '', '1111111111111111') == ''; + + +----------------------------------------------------------------------------------------- +-- Validate against predefined ciphertext,plaintext,key and IV for MySQL compatibility mode +----------------------------------------------------------------------------------------- +CREATE TABLE encryption_test +( + input String, + key String DEFAULT unhex('fb9958e2e897ef3fdb49067b51a24af645b3626eed2f9ea1dc7fd4dd71b7e38f9a68db2a3184f952382c783785f9d77bf923577108a88adaacae5c141b1576b0'), + iv String DEFAULT unhex('8CA3554377DFF8A369BC50A89780DD85'), + key32 String DEFAULT substring(key, 1, 32), + key24 String DEFAULT substring(key, 1, 24), + key16 String DEFAULT substring(key, 1, 16) +) Engine = Memory; + +INSERT INTO encryption_test (input) +VALUES (''), ('text'), ('What Is ClickHouse? ClickHouse is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).'); + + +SELECT 'MySQL-compatitable mode, with key folding, no length checks, etc.'; +SELECT 'aes-128-cbc' as mode, aes_decrypt_mysql(mode, aes_encrypt_mysql(mode, input, key, iv), key, iv) == input FROM encryption_test; +SELECT 'aes-192-cbc' as mode, aes_decrypt_mysql(mode, aes_encrypt_mysql(mode, input, key, iv), key, iv) == input FROM encryption_test; +SELECT 'aes-256-cbc' as mode, aes_decrypt_mysql(mode, aes_encrypt_mysql(mode, input, key, iv), key, iv) == input FROM encryption_test; + +SELECT 'aes-128-ecb' as mode, aes_decrypt_mysql(mode, aes_encrypt_mysql(mode, input, key, iv), key, iv) == input FROM encryption_test; +SELECT 'aes-192-ecb' as mode, aes_decrypt_mysql(mode, aes_encrypt_mysql(mode, input, key, iv), key, iv) == input FROM encryption_test; +SELECT 'aes-256-ecb' as mode, aes_decrypt_mysql(mode, aes_encrypt_mysql(mode, input, key, iv), key, iv) == input FROM encryption_test; + +SELECT 'aes-128-ofb' as mode, aes_decrypt_mysql(mode, aes_encrypt_mysql(mode, input, key, iv), key, iv) == input FROM encryption_test; +SELECT 'aes-192-ofb' as mode, aes_decrypt_mysql(mode, aes_encrypt_mysql(mode, input, key, iv), key, iv) == input FROM encryption_test; +SELECT 'aes-256-ofb' as mode, aes_decrypt_mysql(mode, aes_encrypt_mysql(mode, input, key, iv), key, iv) == input FROM encryption_test; + +SELECT 'Strict mode without key folding and proper key and iv lengths checks.'; +SELECT 'aes-128-cbc' as mode, decrypt(mode, encrypt(mode, input, key16, iv), key16, iv) == input FROM encryption_test; +SELECT 'aes-192-cbc' as mode, decrypt(mode, encrypt(mode, input, key24, iv), key24, iv) == input FROM encryption_test; +SELECT 'aes-256-cbc' as mode, decrypt(mode, encrypt(mode, input, key32, iv), key32, iv) == input FROM encryption_test; + +SELECT 'aes-128-ctr' as mode, decrypt(mode, encrypt(mode, input, key16, iv), key16, iv) == input FROM encryption_test; +SELECT 'aes-192-ctr' as mode, decrypt(mode, encrypt(mode, input, key24, iv), key24, iv) == input FROM encryption_test; +SELECT 'aes-256-ctr' as mode, decrypt(mode, encrypt(mode, input, key32, iv), key32, iv) == input FROM encryption_test; + +SELECT 'aes-128-ecb' as mode, decrypt(mode, encrypt(mode, input, key16), key16) == input FROM encryption_test; +SELECT 'aes-192-ecb' as mode, decrypt(mode, encrypt(mode, input, key24), key24) == input FROM encryption_test; +SELECT 'aes-256-ecb' as mode, decrypt(mode, encrypt(mode, input, key32), key32) == input FROM encryption_test; + +SELECT 'aes-128-ofb' as mode, decrypt(mode, encrypt(mode, input, key16, iv), key16, iv) == input FROM encryption_test; +SELECT 'aes-192-ofb' as mode, decrypt(mode, encrypt(mode, input, key24, iv), key24, iv) == input FROM encryption_test; +SELECT 'aes-256-ofb' as mode, decrypt(mode, encrypt(mode, input, key32, iv), key32, iv) == input FROM encryption_test; + +SELECT 'GCM mode with IV'; +SELECT 'aes-128-gcm' as mode, decrypt(mode, encrypt(mode, input, key16, iv), key16, iv) == input FROM encryption_test; +SELECT 'aes-192-gcm' as mode, decrypt(mode, encrypt(mode, input, key24, iv), key24, iv) == input FROM encryption_test; +SELECT 'aes-256-gcm' as mode, decrypt(mode, encrypt(mode, input, key32, iv), key32, iv) == input FROM encryption_test; + +SELECT 'GCM mode with IV and AAD'; +SELECT 'aes-128-gcm' as mode, decrypt(mode, encrypt(mode, input, key16, iv, 'AAD'), key16, iv, 'AAD') == input FROM encryption_test; +SELECT 'aes-192-gcm' as mode, decrypt(mode, encrypt(mode, input, key24, iv, 'AAD'), key24, iv, 'AAD') == input FROM encryption_test; +SELECT 'aes-256-gcm' as mode, decrypt(mode, encrypt(mode, input, key32, iv, 'AAD'), key32, iv, 'AAD') == input FROM encryption_test; + + +-- based on https://github.com/openssl/openssl/blob/master/demos/evp/aesgcm.c#L20 +WITH + unhex('eebc1f57487f51921c0465665f8ae6d1658bb26de6f8a069a3520293a572078f') as key, + unhex('67ba0510262ae487d737ee6298f77e0c') as tag, + unhex('99aa3e68ed8173a0eed06684') as iv, + unhex('f56e87055bc32d0eeb31b2eacc2bf2a5') as plaintext, + unhex('4d23c3cec334b49bdb370c437fec78de') as aad, + unhex('f7264413a84c0e7cd536867eb9f21736') as ciphertext +SELECT + hex(decrypt('aes-256-gcm', concat(ciphertext, tag), key, iv, aad)) as plaintext_actual, + plaintext_actual = hex(plaintext); + +-- tryDecrypt +CREATE TABLE decrypt_null ( + dt DateTime, + user_id UInt32, + encrypted String, + iv String +) ENGINE = Memory; + +INSERT INTO decrypt_null VALUES ('2022-08-02 00:00:00', 1, encrypt('aes-256-gcm', 'value1', 'keykeykeykeykeykeykeykeykeykey01', 'iv1'), 'iv1'), ('2022-09-02 00:00:00', 2, encrypt('aes-256-gcm', 'value2', 'keykeykeykeykeykeykeykeykeykey02', 'iv2'), 'iv2'), ('2022-09-02 00:00:01', 3, encrypt('aes-256-gcm', 'value3', 'keykeykeykeykeykeykeykeykeykey03', 'iv3'), 'iv3'); + +SELECT dt, user_id FROM decrypt_null WHERE (user_id > 0) AND (decrypt('aes-256-gcm', encrypted, 'keykeykeykeykeykeykeykeykeykey02', iv) = 'value2'); --{serverError OPENSSL_ERROR} +SELECT dt, user_id FROM decrypt_null WHERE (user_id > 0) AND (tryDecrypt('aes-256-gcm', encrypted, 'keykeykeykeykeykeykeykeykeykey02', iv) = 'value2'); +SELECT dt, user_id, (tryDecrypt('aes-256-gcm', encrypted, 'keykeykeykeykeykeykeykeykeykey02', iv)) as value FROM decrypt_null ORDER BY user_id; + +DROP TABLE encryption_test; diff --git a/parser/testdata/01318_encrypt/ast.json b/parser/testdata/01318_encrypt/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01318_encrypt/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01318_encrypt/metadata.json b/parser/testdata/01318_encrypt/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01318_encrypt/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01318_encrypt/query.sql b/parser/testdata/01318_encrypt/query.sql new file mode 100644 index 000000000..548d36756 --- /dev/null +++ b/parser/testdata/01318_encrypt/query.sql @@ -0,0 +1,129 @@ +-- Tags: no-fasttest +-- Tag no-fasttest: Depends on OpenSSL + +--- aes_encrypt_mysql(string, key, block_mode[, init_vector, AAD]) +-- The MySQL-compatitable encryption, only ecb, cbc and ofb modes are supported, +-- just like for MySQL +-- https://dev.mysql.com/doc/refman/8.0/en/encryption-functions.html#function_aes-encrypt +-- https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html#sysvar_block_encryption_mode +-- Please note that for keys that exceed mode-specific length, keys are folded in a MySQL-specific way, +-- meaning that whole key is used, but effective key length is still determined by mode. +-- when key doesn't exceed the default mode length, ecryption result equals with AES_encypt() + +----------------------------------------------------------------------------------------- +-- error cases +----------------------------------------------------------------------------------------- +SELECT aes_encrypt_mysql(); --{serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} not enough arguments +SELECT aes_encrypt_mysql('aes-128-ecb'); --{serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} not enough arguments +SELECT aes_encrypt_mysql('aes-128-ecb', 'text'); --{serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} not enough arguments + +-- Mode +SELECT aes_encrypt_mysql(789, 'text', 'key'); --{serverError ILLEGAL_TYPE_OF_ARGUMENT} bad mode type +SELECT aes_encrypt_mysql('blah blah blah', 'text', 'key'); -- {serverError BAD_ARGUMENTS} garbage mode value +SELECT aes_encrypt_mysql('des-ede3-ecb', 'text', 'key'); -- {serverError BAD_ARGUMENTS} bad mode value of valid cipher name +SELECT aes_encrypt_mysql('aes-128-gcm', 'text', 'key'); -- {serverError BAD_ARGUMENTS} mode is not supported by _mysql-functions + +SELECT encrypt(789, 'text', 'key'); --{serverError ILLEGAL_TYPE_OF_ARGUMENT} bad mode type +SELECT encrypt('blah blah blah', 'text', 'key'); -- {serverError BAD_ARGUMENTS} garbage mode value +SELECT encrypt('des-ede3-ecb', 'text', 'key'); -- {serverError BAD_ARGUMENTS} bad mode value of valid cipher name + + +-- Key +SELECT aes_encrypt_mysql('aes-128-ecb', 'text', 456); --{serverError ILLEGAL_TYPE_OF_ARGUMENT} bad key type +SELECT aes_encrypt_mysql('aes-128-ecb', 'text', 'key'); -- {serverError BAD_ARGUMENTS} key is too short + +SELECT encrypt('aes-128-ecb', 'text'); --{serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} key is missing +SELECT encrypt('aes-128-ecb', 'text', 456); --{serverError ILLEGAL_TYPE_OF_ARGUMENT} bad key type +SELECT encrypt('aes-128-ecb', 'text', 'key'); -- {serverError BAD_ARGUMENTS} key is too short +SELECT encrypt('aes-128-ecb', 'text', 'keykeykeykeykeykeykeykeykeykeykeykey'); -- {serverError BAD_ARGUMENTS} key is to long + +-- IV +SELECT aes_encrypt_mysql('aes-128-ecb', 'text', 'key', 1011); --{serverError ILLEGAL_TYPE_OF_ARGUMENT} bad IV type 6 +SELECT aes_encrypt_mysql('aes-128-ecb', 'text', 'key', 'iv'); --{serverError BAD_ARGUMENTS} IV is too short 4 + +SELECT encrypt('aes-128-cbc', 'text', 'keykeykeykeykeyk', 1011); --{serverError ILLEGAL_TYPE_OF_ARGUMENT} bad IV type 1 +SELECT encrypt('aes-128-cbc', 'text', 'keykeykeykeykeyk', 'iviviviviviviviviviviviviviviviviviviviviv'); --{serverError BAD_ARGUMENTS} IV is too long 3 +SELECT encrypt('aes-128-cbc', 'text', 'keykeykeykeykeyk', 'iv'); --{serverError BAD_ARGUMENTS} IV is too short 2 + +--AAD +SELECT aes_encrypt_mysql('aes-128-ecb', 'text', 'key', 'IV', 1213); --{serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} too many arguments + +SELECT encrypt('aes-128-ecb', 'text', 'key', 'IV', 1213); --{serverError ILLEGAL_TYPE_OF_ARGUMENT} bad AAD type +SELECT encrypt('aes-128-gcm', 'text', 'key', 'IV', 1213); --{serverError ILLEGAL_TYPE_OF_ARGUMENT} bad AAD type + +----------------------------------------------------------------------------------------- +-- Validate against predefined ciphertext,plaintext,key and IV for MySQL compatibility mode +----------------------------------------------------------------------------------------- +CREATE TABLE encryption_test +( + input String, + key String DEFAULT unhex('fb9958e2e897ef3fdb49067b51a24af645b3626eed2f9ea1dc7fd4dd71b7e38f9a68db2a3184f952382c783785f9d77bf923577108a88adaacae5c141b1576b0'), + iv String DEFAULT unhex('8CA3554377DFF8A369BC50A89780DD85'), + key32 String DEFAULT substring(key, 1, 32), + key24 String DEFAULT substring(key, 1, 24), + key16 String DEFAULT substring(key, 1, 16) +) Engine = Memory; + +INSERT INTO encryption_test (input) +VALUES (''), ('text'), ('What Is ClickHouse? ClickHouse is a column-oriented database management system (DBMS) for online analytical processing of queries (OLAP).'); + + +SELECT 'MySQL-compatitable mode, with key folding, no length checks, etc.'; +SELECT 'aes-128-cbc' as mode, hex(aes_encrypt_mysql(mode, input, key32, iv)) FROM encryption_test; +SELECT 'aes-192-cbc' as mode, hex(aes_encrypt_mysql(mode, input, key32, iv)) FROM encryption_test; +SELECT 'aes-256-cbc' as mode, hex(aes_encrypt_mysql(mode, input, key32, iv)) FROM encryption_test; + +SELECT 'aes-128-ecb' as mode, hex(aes_encrypt_mysql(mode, input, key32, iv)) FROM encryption_test; +SELECT 'aes-192-ecb' as mode, hex(aes_encrypt_mysql(mode, input, key32, iv)) FROM encryption_test; +SELECT 'aes-256-ecb' as mode, hex(aes_encrypt_mysql(mode, input, key32, iv)) FROM encryption_test; + +SELECT 'aes-128-ofb' as mode, hex(aes_encrypt_mysql(mode, input, key32, iv)) FROM encryption_test; +SELECT 'aes-192-ofb' as mode, hex(aes_encrypt_mysql(mode, input, key32, iv)) FROM encryption_test; +SELECT 'aes-256-ofb' as mode, hex(aes_encrypt_mysql(mode, input, key32, iv)) FROM encryption_test; + + +SELECT 'Strict mode without key folding and proper key and iv lengths checks.'; +SELECT 'aes-128-cbc' as mode, hex(encrypt(mode, input, key16, iv)) FROM encryption_test; +SELECT 'aes-192-cbc' as mode, hex(encrypt(mode, input, key24, iv)) FROM encryption_test; +SELECT 'aes-256-cbc' as mode, hex(encrypt(mode, input, key32, iv)) FROM encryption_test; + +SELECT 'aes-128-ctr' as mode, hex(encrypt(mode, input, key16, iv)) FROM encryption_test; +SELECT 'aes-192-ctr' as mode, hex(encrypt(mode, input, key24, iv)) FROM encryption_test; +SELECT 'aes-256-ctr' as mode, hex(encrypt(mode, input, key32, iv)) FROM encryption_test; + +SELECT 'aes-128-ecb' as mode, hex(encrypt(mode, input, key16)) FROM encryption_test; +SELECT 'aes-192-ecb' as mode, hex(encrypt(mode, input, key24)) FROM encryption_test; +SELECT 'aes-256-ecb' as mode, hex(encrypt(mode, input, key32)) FROM encryption_test; + +SELECT 'aes-128-ofb' as mode, hex(encrypt(mode, input, key16, iv)) FROM encryption_test; +SELECT 'aes-192-ofb' as mode, hex(encrypt(mode, input, key24, iv)) FROM encryption_test; +SELECT 'aes-256-ofb' as mode, hex(encrypt(mode, input, key32, iv)) FROM encryption_test; + +SELECT 'GCM mode with IV'; +SELECT 'aes-128-gcm' as mode, hex(encrypt(mode, input, key16, iv)) FROM encryption_test; +SELECT 'aes-192-gcm' as mode, hex(encrypt(mode, input, key24, iv)) FROM encryption_test; +SELECT 'aes-256-gcm' as mode, hex(encrypt(mode, input, key32, iv)) FROM encryption_test; + +SELECT 'GCM mode with IV and AAD'; +SELECT 'aes-128-gcm' as mode, hex(encrypt(mode, input, key16, iv, 'AAD')) FROM encryption_test; +SELECT 'aes-192-gcm' as mode, hex(encrypt(mode, input, key24, iv, 'AAD')) FROM encryption_test; +SELECT 'aes-256-gcm' as mode, hex(encrypt(mode, input, key32, iv, 'AAD')) FROM encryption_test; + +SELECT 'Nullable and LowCardinality'; +WITH CAST(NULL as Nullable(String)) as input, 'aes-256-ofb' as mode SELECT toTypeName(input), hex(aes_encrypt_mysql(mode, input, key32,iv)) FROM encryption_test LIMIT 1; +WITH CAST('text' as Nullable(String)) as input, 'aes-256-ofb' as mode SELECT toTypeName(input), hex(aes_encrypt_mysql(mode, input, key32, iv)) FROM encryption_test LIMIT 1; +WITH CAST('text' as LowCardinality(String)) as input, 'aes-256-ofb' as mode SELECT toTypeName(input), hex(aes_encrypt_mysql(mode, input, key32, iv)) FROM encryption_test LIMIT 1; + +-- based on https://github.com/openssl/openssl/blob/master/demos/evp/aesgcm.c#L20 +WITH + unhex('eebc1f57487f51921c0465665f8ae6d1658bb26de6f8a069a3520293a572078f') as key, + unhex('67ba0510262ae487d737ee6298f77e0c') as tag, + unhex('99aa3e68ed8173a0eed06684') as iv, + unhex('f56e87055bc32d0eeb31b2eacc2bf2a5') as plaintext, + unhex('4d23c3cec334b49bdb370c437fec78de') as aad, + unhex('f7264413a84c0e7cd536867eb9f21736') as ciphertext +SELECT + hex(encrypt('aes-256-gcm', plaintext, key, iv, aad)) as ciphertext_actual, + ciphertext_actual = concat(hex(ciphertext), hex(tag)); + +DROP TABLE encryption_test; diff --git a/parser/testdata/01318_map_add_map_subtract/ast.json b/parser/testdata/01318_map_add_map_subtract/ast.json new file mode 100644 index 000000000..d748d6282 --- /dev/null +++ b/parser/testdata/01318_map_add_map_subtract/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tab (children 1)" + }, + { + "explain": " Identifier tab" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001387519, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/01318_map_add_map_subtract/metadata.json b/parser/testdata/01318_map_add_map_subtract/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01318_map_add_map_subtract/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01318_map_add_map_subtract/query.sql b/parser/testdata/01318_map_add_map_subtract/query.sql new file mode 100644 index 000000000..83f20b9e9 --- /dev/null +++ b/parser/testdata/01318_map_add_map_subtract/query.sql @@ -0,0 +1,45 @@ +drop table if exists tab; +create table tab engine=Memory() as (select ([1, number], [toInt32(2),2]) as map from numbers(1, 10)); + +-- mapAdd +select mapAdd([1], [1]); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select mapAdd(([1], [1])); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +select mapAdd(([1], [1]), map) from tab; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select mapAdd(([toUInt64(1)], [1]), map) from tab; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select mapAdd(([toUInt64(1), 2], [toInt32(1)]), map) from tab; -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +select mapAdd(([toUInt64(1)], [toInt32(1)]), map) from tab; +select mapAdd(cast(map, 'Tuple(Array(UInt8), Array(UInt8))'), ([1], [1]), ([2],[2]) ) from tab; + +-- cleanup +drop table tab; + +-- check types +select mapAdd(([toUInt8(1), 2], [1, 1]), ([toUInt8(1), 2], [1, 1])) as res, toTypeName(res); +select mapAdd(([toUInt16(1), 2], [toUInt16(1), 1]), ([toUInt16(1), 2], [toUInt16(1), 1])) as res, toTypeName(res); +select mapAdd(([toUInt32(1), 2], [toUInt32(1), 1]), ([toUInt32(1), 2], [toUInt32(1), 1])) as res, toTypeName(res); +select mapAdd(([toUInt64(1), 2], [toUInt64(1), 1]), ([toUInt64(1), 2], [toUInt64(1), 1])) as res, toTypeName(res); + +select mapAdd(([toInt8(1), 2], [toInt8(1), 1]), ([toInt8(1), 2], [toInt8(1), 1])) as res, toTypeName(res); +select mapAdd(([toInt16(1), 2], [toInt16(1), 1]), ([toInt16(1), 2], [toInt16(1), 1])) as res, toTypeName(res); +select mapAdd(([toInt32(1), 2], [toInt32(1), 1]), ([toInt32(1), 2], [toInt32(1), 1])) as res, toTypeName(res); +select mapAdd(([toInt64(1), 2], [toInt64(1), 1]), ([toInt64(1), 2], [toInt64(1), 1])) as res, toTypeName(res); + +select mapAdd(([1, 2], [toFloat32(1.1), 1]), ([1, 2], [2.2, 1])) as res, toTypeName(res); +select mapAdd(([1, 2], [toFloat64(1.1), 1]), ([1, 2], [2.2, 1])) as res, toTypeName(res); +select mapAdd(([toFloat32(1), 2], [toFloat64(1.1), 1]), ([toFloat32(1), 2], [2.2, 1])) as res, toTypeName(res); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select mapAdd(([1, 2], [toFloat64(1.1), 1]), ([1, 2], [1, 1])) as res, toTypeName(res); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select mapAdd((['a', 'b'], [1, 1]), ([key], [1])) from values('key String', ('b'), ('c'), ('d')); +select mapAdd((cast(['a', 'b'], 'Array(FixedString(1))'), [1, 1]), ([key], [1])) as res, toTypeName(res) from values('key FixedString(1)', ('b'), ('c'), ('d')); +select mapAdd((cast(['a', 'b'], 'Array(LowCardinality(String))'), [1, 1]), ([key], [1])) from values('key String', ('b'), ('c'), ('d')); +select mapAdd((key, val), (key, val)) as res, toTypeName(res) from values ('key Array(Enum16(\'a\'=1, \'b\'=2)), val Array(Int16)', (['a'], [1]), (['b'], [1])); +select mapAdd((key, val), (key, val)) as res, toTypeName(res) from values ('key Array(Enum8(\'a\'=1, \'b\'=2)), val Array(Int16)', (['a'], [1]), (['b'], [1])); +select mapAdd((key, val), (key, val)) as res, toTypeName(res) from values ('key Array(UUID), val Array(Int32)', (['00000000-89ab-cdef-0123-456789abcdef'], [1]), (['11111111-89ab-cdef-0123-456789abcdef'], [2])); + +-- mapSubtract, same rules as mapAdd +select mapSubtract(([toUInt8(1), 2], [1, 1]), ([toUInt8(1), 2], [1, 1])) as res, toTypeName(res); +select mapSubtract(([toUInt8(1), 2], [1, 1]), ([toUInt8(1), 2], [2, 2])) as res, toTypeName(res); -- overflow +select mapSubtract(([toUInt8(1), 2], [toInt32(1), 1]), ([toUInt8(1), 2], [toInt16(2), 2])) as res, toTypeName(res); +select mapSubtract(([1, 2], [toFloat32(1.1), 1]), ([1, 2], [2.2, 1])) as res, toTypeName(res); +select mapSubtract(([toUInt8(1), 2], [toInt32(1), 1]), ([toUInt8(1), 2], [toInt16(2), 2])) as res, toTypeName(res); +select mapSubtract(([toUInt8(3)], [toInt32(1)]), ([toUInt8(1), 2], [toInt32(2), 2])) as res, toTypeName(res); diff --git a/parser/testdata/01318_map_add_map_subtract_on_map_type/ast.json b/parser/testdata/01318_map_add_map_subtract_on_map_type/ast.json new file mode 100644 index 000000000..66a75f631 --- /dev/null +++ b/parser/testdata/01318_map_add_map_subtract_on_map_type/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tab (children 1)" + }, + { + "explain": " Identifier tab" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001134037, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/01318_map_add_map_subtract_on_map_type/metadata.json b/parser/testdata/01318_map_add_map_subtract_on_map_type/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01318_map_add_map_subtract_on_map_type/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01318_map_add_map_subtract_on_map_type/query.sql b/parser/testdata/01318_map_add_map_subtract_on_map_type/query.sql new file mode 100644 index 000000000..6dcccda5d --- /dev/null +++ b/parser/testdata/01318_map_add_map_subtract_on_map_type/query.sql @@ -0,0 +1,47 @@ +drop table if exists tab; +create table tab engine=Memory() as (select map(1, toInt32(2), number, 2) as m from numbers(1, 10)); + +-- mapAdd +select mapAdd(map(1, 1)); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +select mapAdd(map(1, 1), m) from tab; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +select mapAdd(map(toUInt64(1), toInt32(1)), m) from tab; +select mapAdd(cast(m, 'Map(UInt8, UInt8)'), map(1, 1), map(2,2)) from tab; + +-- cleanup +drop table tab; + +-- check types +select mapAdd(map(toUInt8(1), 1, 2, 1), map(toUInt8(1), 1, 2, 1)) as res, toTypeName(res); +select mapAdd(map(toUInt16(1), toUInt16(1), 2, 1), map(toUInt16(1), toUInt16(1), 2, 1)) as res, toTypeName(res); +select mapAdd(map(toUInt32(1), toUInt32(1), 2, 1), map(toUInt32(1), toUInt32(1), 2, 1)) as res, toTypeName(res); +select mapAdd(map(toUInt64(1), toUInt64(1), 2, 1), map(toUInt64(1), toUInt64(1), 2, 1)) as res, toTypeName(res); +select mapAdd(map(toUInt128(1), toUInt128(1), 2, 1), map(toUInt128(1), toUInt128(1), 2, 1)) as res, toTypeName(res); +select mapAdd(map(toUInt256(1), toUInt256(1), 2, 1), map(toUInt256(1), toUInt256(1), 2, 1)) as res, toTypeName(res); + +select mapAdd(map(toInt8(1), 1, 2, 1), map(toInt8(1), 1, 2, 1)) as res, toTypeName(res); +select mapAdd(map(toInt16(1), toInt16(1), 2, 1), map(toInt16(1), toInt16(1), 2, 1)) as res, toTypeName(res); +select mapAdd(map(toInt32(1), toInt32(1), 2, 1), map(toInt32(1), toInt32(1), 2, 1)) as res, toTypeName(res); +select mapAdd(map(toInt64(1), toInt64(1), 2, 1), map(toInt64(1), toInt64(1), 2, 1)) as res, toTypeName(res); +select mapAdd(map(toInt128(1), toInt128(1), 2, 1), map(toInt128(1), toInt128(1), 2, 1)) as res, toTypeName(res); +select mapAdd(map(toInt256(1), toInt256(1), 2, 1), map(toInt256(1), toInt256(1), 2, 1)) as res, toTypeName(res); + +select mapAdd(map(1, toFloat32(1.1), 2, 1), map(1, 2.2, 2, 1)) as res, toTypeName(res); +select mapAdd(map(1.0, toFloat32(1.1), 2.0, 1), map(1.0, 2.2, 2.0, 1)) as res, toTypeName(res); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select mapAdd(map(toLowCardinality('ab'), toFloat32(1.1), toLowCardinality('cd'), 1), map(toLowCardinality('ab'), 2.2, toLowCardinality('cd'), 1)) as res, toTypeName(res); +select mapAdd(map(1, toFloat64(1.1), 2, 1), map(1, 2.2, 2, 1)) as res, toTypeName(res); +select mapAdd(map(1, toFloat64(1.1), 2, 1), map(1, 1, 2, 1)) as res, toTypeName(res); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select mapAdd(map('a', 1, 'b', 1), map(key, 1)) from values('key String', ('b'), ('c'), ('d')); +select mapAdd(map(cast('a', 'FixedString(1)'), 1, 'b', 1), map(key, 1)) as res, toTypeName(res) from values('key String', ('b'), ('c'), ('d')); +select mapAdd(map(cast('a', 'LowCardinality(String)'), 1, 'b', 1), map(key, 1)) from values('key String', ('b'), ('c'), ('d')); +select mapAdd(map(key, val), map(key, val)) as res, toTypeName(res) from values ('key Enum16(\'a\'=1, \'b\'=2), val Int16', ('a', 1), ('b', 1)); +select mapAdd(map(key, val), map(key, val)) as res, toTypeName(res) from values ('key Enum8(\'a\'=1, \'b\'=2), val Int16', ('a', 1), ('b', 1)); +select mapAdd(map(key, val), map(key, val)) as res, toTypeName(res) from values ('key UUID, val Int32', ('00000000-89ab-cdef-0123-456789abcdef', 1), ('11111111-89ab-cdef-0123-456789abcdef', 2)); + +-- mapSubtract, same rules as mapAdd +select mapSubtract(map(toUInt8(1), 1, 2, 1), map(toUInt8(1), 1, 2, 1)) as res, toTypeName(res); +select mapSubtract(map(toUInt8(1), 1, 2, 1), map(toUInt8(1), 2, 2, 2)) as res, toTypeName(res); -- overflow +select mapSubtract(map(toUInt8(1), toInt32(1), 2, 1), map(toUInt8(1), toInt16(2), 2, 2)) as res, toTypeName(res); +select mapSubtract(map(1, toFloat32(1.1), 2, 1), map(1, 2.2, 2, 1)) as res, toTypeName(res); +select mapSubtract(map(toUInt8(1), toInt32(1), 2, 1), map(toUInt8(1), toInt16(2), 2, 2)) as res, toTypeName(res); +select mapSubtract(map(toUInt8(3), toInt32(1)), map(toUInt8(1), toInt32(2), 2, 2)) as res, toTypeName(res); diff --git a/parser/testdata/01318_map_populate_series/ast.json b/parser/testdata/01318_map_populate_series/ast.json new file mode 100644 index 000000000..2619525e5 --- /dev/null +++ b/parser/testdata/01318_map_populate_series/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery map_test (children 1)" + }, + { + "explain": " Identifier map_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001578836, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/01318_map_populate_series/metadata.json b/parser/testdata/01318_map_populate_series/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01318_map_populate_series/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01318_map_populate_series/query.sql b/parser/testdata/01318_map_populate_series/query.sql new file mode 100644 index 000000000..351ea87dc --- /dev/null +++ b/parser/testdata/01318_map_populate_series/query.sql @@ -0,0 +1,36 @@ +drop table if exists map_test; +create table map_test engine=TinyLog() as (select (number + 1) as n, ([1, number], [1,2]) as map from numbers(1, 5)); + +select mapPopulateSeries(map.1, map.2) from map_test; +select mapPopulateSeries(map.1, map.2, toUInt64(3)) from map_test; +select mapPopulateSeries(map.1, map.2, toUInt64(10)) from map_test; +select mapPopulateSeries(map.1, map.2, 10) from map_test; +select mapPopulateSeries(map.1, map.2, n) from map_test; +select mapPopulateSeries(map.1, [11,22]) from map_test; +select mapPopulateSeries([3, 4], map.2) from map_test; +select mapPopulateSeries([toUInt64(3), 4], map.2, n) from map_test; + +drop table map_test; + +select mapPopulateSeries([toUInt8(1), 2], [toUInt8(1), 1]) as res, toTypeName(res); +select mapPopulateSeries([toUInt16(1), 2], [toUInt16(1), 1]) as res, toTypeName(res); +select mapPopulateSeries([toUInt32(1), 2], [toUInt32(1), 1]) as res, toTypeName(res); +select mapPopulateSeries([toUInt64(1), 2], [toUInt64(1), 1]) as res, toTypeName(res); + +select mapPopulateSeries([toInt8(1), 2], [toInt8(1), 1]) as res, toTypeName(res); +select mapPopulateSeries([toInt16(1), 2], [toInt16(1), 1]) as res, toTypeName(res); +select mapPopulateSeries([toInt32(1), 2], [toInt32(1), 1]) as res, toTypeName(res); +select mapPopulateSeries([toInt64(1), 2], [toInt64(1), 1]) as res, toTypeName(res); + +select mapPopulateSeries([toInt8(-10), 2], [toInt8(1), 1]) as res, toTypeName(res); +select mapPopulateSeries([toInt16(-10), 2], [toInt16(1), 1]) as res, toTypeName(res); +select mapPopulateSeries([toInt32(-10), 2], [toInt32(1), 1]) as res, toTypeName(res); +select mapPopulateSeries([toInt64(-10), 2], [toInt64(1), 1]) as res, toTypeName(res); +select mapPopulateSeries([toInt64(-10), 2], [toInt64(1), 1], toInt64(-5)) as res, toTypeName(res); + +-- empty +select mapPopulateSeries(cast([], 'Array(UInt8)'), cast([], 'Array(UInt8)'), 5); + +select mapPopulateSeries(['1', '2'], [1, 1]) as res, toTypeName(res); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select mapPopulateSeries([1, 2, 3], [1, 1]) as res, toTypeName(res); -- { serverError BAD_ARGUMENTS } +select mapPopulateSeries([1, 2], [1, 1, 1]) as res, toTypeName(res); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/01318_parallel_final_stuck/ast.json b/parser/testdata/01318_parallel_final_stuck/ast.json new file mode 100644 index 000000000..7522bc4ab --- /dev/null +++ b/parser/testdata/01318_parallel_final_stuck/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery final_bug (children 1)" + }, + { + "explain": " Identifier final_bug" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001607467, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/01318_parallel_final_stuck/metadata.json b/parser/testdata/01318_parallel_final_stuck/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01318_parallel_final_stuck/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01318_parallel_final_stuck/query.sql b/parser/testdata/01318_parallel_final_stuck/query.sql new file mode 100644 index 000000000..1a54a9e0a --- /dev/null +++ b/parser/testdata/01318_parallel_final_stuck/query.sql @@ -0,0 +1,6 @@ +drop table if exists final_bug; +create table final_bug (x UInt64, y UInt8) engine = ReplacingMergeTree(y) order by x settings index_granularity = 8; +insert into final_bug select number % 10, 1 from numbers(1000); +insert into final_bug select number % 10, 1 from numbers(1000); +select x from final_bug final order by x settings max_threads=2, max_final_threads=2, max_block_size=8 format Null; +drop table if exists final_bug; \ No newline at end of file diff --git a/parser/testdata/01319_manual_write_to_replicas_long/ast.json b/parser/testdata/01319_manual_write_to_replicas_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01319_manual_write_to_replicas_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01319_manual_write_to_replicas_long/metadata.json b/parser/testdata/01319_manual_write_to_replicas_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01319_manual_write_to_replicas_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01319_manual_write_to_replicas_long/query.sql b/parser/testdata/01319_manual_write_to_replicas_long/query.sql new file mode 100644 index 000000000..8c9223992 --- /dev/null +++ b/parser/testdata/01319_manual_write_to_replicas_long/query.sql @@ -0,0 +1,31 @@ +-- Tags: long, replica, no-shared-merge-tree +-- no-shared-merge-tree: not possible to stop replicated sends + +DROP TABLE IF EXISTS r1; +DROP TABLE IF EXISTS r2; + +CREATE TABLE r1 (x String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/r', 'r1') ORDER BY x; +CREATE TABLE r2 (x String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/r', 'r2') ORDER BY x; + +SYSTEM STOP REPLICATED SENDS r1; +SYSTEM STOP REPLICATED SENDS r2; + +INSERT INTO r1 VALUES ('Hello, world'); +SELECT * FROM r1; +SELECT * FROM r2; +INSERT INTO r2 VALUES ('Hello, world'); +SELECT '---'; +SELECT * FROM r1; +SELECT * FROM r2; + +SYSTEM START REPLICATED SENDS r1; +SYSTEM START REPLICATED SENDS r2; +SYSTEM SYNC REPLICA r1; +SYSTEM SYNC REPLICA r2; + +SELECT '---'; +SELECT * FROM r1; +SELECT * FROM r2; + +DROP TABLE r1; +DROP TABLE r2; diff --git a/parser/testdata/01319_mv_constants_bug/ast.json b/parser/testdata/01319_mv_constants_bug/ast.json new file mode 100644 index 000000000..4052cf25c --- /dev/null +++ b/parser/testdata/01319_mv_constants_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery distributed_table_merged (children 1)" + }, + { + "explain": " Identifier distributed_table_merged" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00148002, + "rows_read": 2, + "bytes_read": 100 + } +} diff --git a/parser/testdata/01319_mv_constants_bug/metadata.json b/parser/testdata/01319_mv_constants_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01319_mv_constants_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01319_mv_constants_bug/query.sql b/parser/testdata/01319_mv_constants_bug/query.sql new file mode 100644 index 000000000..4abb9d61b --- /dev/null +++ b/parser/testdata/01319_mv_constants_bug/query.sql @@ -0,0 +1,25 @@ +DROP TABLE IF EXISTS distributed_table_merged; +DROP TABLE IF EXISTS distributed_table_1; +DROP TABLE IF EXISTS distributed_table_2; +DROP TABLE IF EXISTS local_table_1; +DROP TABLE IF EXISTS local_table_2; +DROP TABLE IF EXISTS local_table_merged; + +CREATE TABLE local_table_1 (id String) ENGINE = MergeTree ORDER BY (id); +CREATE TABLE local_table_2(id String) ENGINE = MergeTree ORDER BY (id); + +CREATE TABLE local_table_merged (id String) ENGINE = Merge('default', 'local_table_1|local_table_2'); + +CREATE TABLE distributed_table_1 (id String) ENGINE = Distributed(test_shard_localhost, default, local_table_1); +CREATE TABLE distributed_table_2 (id String) ENGINE = Distributed(test_shard_localhost, default, local_table_2); + +CREATE TABLE distributed_table_merged (id String) ENGINE = Merge('default', 'distributed_table_1|distributed_table_2'); + +SELECT 1 FROM distributed_table_merged; + +DROP TABLE IF EXISTS distributed_table_merged; +DROP TABLE IF EXISTS distributed_table_1; +DROP TABLE IF EXISTS distributed_table_2; +DROP TABLE IF EXISTS local_table_1; +DROP TABLE IF EXISTS local_table_2; +DROP TABLE local_table_merged; diff --git a/parser/testdata/01319_optimize_skip_unused_shards_nesting/ast.json b/parser/testdata/01319_optimize_skip_unused_shards_nesting/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01319_optimize_skip_unused_shards_nesting/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01319_optimize_skip_unused_shards_nesting/metadata.json b/parser/testdata/01319_optimize_skip_unused_shards_nesting/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01319_optimize_skip_unused_shards_nesting/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01319_optimize_skip_unused_shards_nesting/query.sql b/parser/testdata/01319_optimize_skip_unused_shards_nesting/query.sql new file mode 100644 index 000000000..09e535e6a --- /dev/null +++ b/parser/testdata/01319_optimize_skip_unused_shards_nesting/query.sql @@ -0,0 +1,28 @@ +-- Tags: shard + +drop table if exists data_01319; +drop table if exists dist_01319; +drop table if exists dist_layer_01319; + +create table data_01319 (key Int, sub_key Int) Engine=Null(); + +create table dist_layer_01319 as data_01319 Engine=Distributed(test_cluster_two_shards, currentDatabase(), data_01319, sub_key); +-- test_unavailable_shard here to check that optimize_skip_unused_shards always +-- remove some nodes from the cluster for the first nesting level +create table dist_01319 as data_01319 Engine=Distributed(test_unavailable_shard, currentDatabase(), dist_layer_01319, key+1); + +set optimize_skip_unused_shards=1; +set force_optimize_skip_unused_shards=1; + +set force_optimize_skip_unused_shards_nesting=2; +set optimize_skip_unused_shards_nesting=2; +select * from dist_01319 where key = 1; -- { serverError UNABLE_TO_SKIP_UNUSED_SHARDS } +set force_optimize_skip_unused_shards_nesting=1; +select * from dist_01319 where key = 1; +set force_optimize_skip_unused_shards_nesting=2; +set optimize_skip_unused_shards_nesting=1; +select * from dist_01319 where key = 1; + +drop table data_01319; +drop table dist_01319; +drop table dist_layer_01319; diff --git a/parser/testdata/01319_query_formatting_in_server_log/ast.json b/parser/testdata/01319_query_formatting_in_server_log/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01319_query_formatting_in_server_log/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01319_query_formatting_in_server_log/metadata.json b/parser/testdata/01319_query_formatting_in_server_log/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01319_query_formatting_in_server_log/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01319_query_formatting_in_server_log/query.sql b/parser/testdata/01319_query_formatting_in_server_log/query.sql new file mode 100644 index 000000000..814ee1468 --- /dev/null +++ b/parser/testdata/01319_query_formatting_in_server_log/query.sql @@ -0,0 +1,7 @@ +SeLeCt 'ab +cd' /* hello */ -- world +, 1; + +SET max_rows_to_read = 0; -- system.text_log can be really big +SYSTEM FLUSH LOGS text_log; +SELECT extract(message, 'SeL.+?;') FROM system.text_log WHERE event_date >= yesterday() AND message LIKE '%SeLeCt \'ab\n%' and logger_name = 'executeQuery' ORDER BY event_time DESC LIMIT 1 FORMAT TSVRaw; diff --git a/parser/testdata/01320_optimize_skip_unused_shards_no_non_deterministic/ast.json b/parser/testdata/01320_optimize_skip_unused_shards_no_non_deterministic/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01320_optimize_skip_unused_shards_no_non_deterministic/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01320_optimize_skip_unused_shards_no_non_deterministic/metadata.json b/parser/testdata/01320_optimize_skip_unused_shards_no_non_deterministic/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01320_optimize_skip_unused_shards_no_non_deterministic/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01320_optimize_skip_unused_shards_no_non_deterministic/query.sql b/parser/testdata/01320_optimize_skip_unused_shards_no_non_deterministic/query.sql new file mode 100644 index 000000000..778192d36 --- /dev/null +++ b/parser/testdata/01320_optimize_skip_unused_shards_no_non_deterministic/query.sql @@ -0,0 +1,15 @@ +-- Tags: shard + +drop table if exists data_01320; +drop table if exists dist_01320; + +create table data_01320 (key Int) Engine=Null(); +-- non deterministic function (i.e. rand()) +create table dist_01320 as data_01320 Engine=Distributed(test_cluster_two_shards, currentDatabase(), data_01320, key + rand()); + +set optimize_skip_unused_shards=1; +set force_optimize_skip_unused_shards=1; +select * from dist_01320 where key = 0; -- { serverError UNABLE_TO_SKIP_UNUSED_SHARDS } + +drop table data_01320; +drop table dist_01320; diff --git a/parser/testdata/01321_aggregate_functions_of_group_by_keys/ast.json b/parser/testdata/01321_aggregate_functions_of_group_by_keys/ast.json new file mode 100644 index 000000000..50dfff415 --- /dev/null +++ b/parser/testdata/01321_aggregate_functions_of_group_by_keys/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001690037, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01321_aggregate_functions_of_group_by_keys/metadata.json b/parser/testdata/01321_aggregate_functions_of_group_by_keys/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01321_aggregate_functions_of_group_by_keys/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01321_aggregate_functions_of_group_by_keys/query.sql b/parser/testdata/01321_aggregate_functions_of_group_by_keys/query.sql new file mode 100644 index 000000000..3f08936e6 --- /dev/null +++ b/parser/testdata/01321_aggregate_functions_of_group_by_keys/query.sql @@ -0,0 +1,24 @@ +set optimize_aggregators_of_group_by_keys = 1; + +SELECT min(number % 2) AS a, max(number % 3) AS b FROM numbers(10000000) GROUP BY number % 2, number % 3 ORDER BY a, b; +SELECT any(number % 2) AS a, anyLast(number % 3) AS b FROM numbers(10000000) GROUP BY number % 2, number % 3 ORDER BY a, b; +SELECT max((number % 5) * (number % 7)) AS a FROM numbers(10000000) GROUP BY number % 7, number % 5 ORDER BY a; +SELECT foo FROM (SELECT anyLast(number) AS foo FROM numbers(1) GROUP BY number); +SELECT anyLast(number) FROM numbers(1) GROUP BY number; + +EXPLAIN SYNTAX SELECT min(number % 2) AS a, max(number % 3) AS b FROM numbers(10000000) GROUP BY number % 2, number % 3 ORDER BY a, b; +EXPLAIN SYNTAX SELECT any(number % 2) AS a, anyLast(number % 3) AS b FROM numbers(10000000) GROUP BY number % 2, number % 3 ORDER BY a, b; +EXPLAIN SYNTAX SELECT max((number % 5) * (number % 7)) AS a FROM numbers(10000000) GROUP BY number % 7, number % 5 ORDER BY a; +EXPLAIN SYNTAX SELECT foo FROM (SELECT anyLast(number) AS foo FROM numbers(1) GROUP BY number); + +set optimize_aggregators_of_group_by_keys = 0; + +SELECT min(number % 2) AS a, max(number % 3) AS b FROM numbers(10000000) GROUP BY number % 2, number % 3 ORDER BY a, b; +SELECT any(number % 2) AS a, anyLast(number % 3) AS b FROM numbers(10000000) GROUP BY number % 2, number % 3 ORDER BY a, b; +SELECT max((number % 5) * (number % 7)) AS a FROM numbers(10000000) GROUP BY number % 7, number % 5 ORDER BY a; +SELECT foo FROM (SELECT anyLast(number) AS foo FROM numbers(1) GROUP BY number); + +EXPLAIN SYNTAX SELECT min(number % 2) AS a, max(number % 3) AS b FROM numbers(10000000) GROUP BY number % 2, number % 3 ORDER BY a, b; +EXPLAIN SYNTAX SELECT any(number % 2) AS a, anyLast(number % 3) AS b FROM numbers(10000000) GROUP BY number % 2, number % 3 ORDER BY a, b; +EXPLAIN SYNTAX SELECT max((number % 5) * (number % 7)) AS a FROM numbers(10000000) GROUP BY number % 7, number % 5 ORDER BY a; +EXPLAIN SYNTAX SELECT foo FROM (SELECT anyLast(number) AS foo FROM numbers(1) GROUP BY number); diff --git a/parser/testdata/01321_monotonous_functions_in_order_by_bug/ast.json b/parser/testdata/01321_monotonous_functions_in_order_by_bug/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01321_monotonous_functions_in_order_by_bug/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01321_monotonous_functions_in_order_by_bug/metadata.json b/parser/testdata/01321_monotonous_functions_in_order_by_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01321_monotonous_functions_in_order_by_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01321_monotonous_functions_in_order_by_bug/query.sql b/parser/testdata/01321_monotonous_functions_in_order_by_bug/query.sql new file mode 100644 index 000000000..4aa52fe6a --- /dev/null +++ b/parser/testdata/01321_monotonous_functions_in_order_by_bug/query.sql @@ -0,0 +1,7 @@ +SELECT + toStartOfHour(c1) AS _c1, + c2 +FROM values((toDateTime('2020-01-01 01:01:01'), 999), (toDateTime('2020-01-01 01:01:59'), 1)) +ORDER BY + _c1 ASC, + c2 ASC diff --git a/parser/testdata/01322_cast_keep_nullable/ast.json b/parser/testdata/01322_cast_keep_nullable/ast.json new file mode 100644 index 000000000..70eeb1ab2 --- /dev/null +++ b/parser/testdata/01322_cast_keep_nullable/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001691037, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01322_cast_keep_nullable/metadata.json b/parser/testdata/01322_cast_keep_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01322_cast_keep_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01322_cast_keep_nullable/query.sql b/parser/testdata/01322_cast_keep_nullable/query.sql new file mode 100644 index 000000000..2f6fd80c3 --- /dev/null +++ b/parser/testdata/01322_cast_keep_nullable/query.sql @@ -0,0 +1,18 @@ +SET cast_keep_nullable = 0; + +SELECT CAST(toNullable(toInt32(0)) AS Int32) as x, toTypeName(x); +SELECT CAST(toNullable(toInt8(0)) AS Int32) as x, toTypeName(x); + +SET cast_keep_nullable = 1; + +SELECT CAST(toNullable(toInt32(1)) AS Int32) as x, toTypeName(x); +SELECT CAST(toNullable(toInt8(1)) AS Int32) as x, toTypeName(x); + +SELECT CAST(toNullable(toFloat32(2)), 'Float32') as x, toTypeName(x); +SELECT CAST(toNullable(toFloat32(2)), 'UInt8') as x, toTypeName(x); + +SELECT CAST(if(1 = 1, toNullable(toInt8(3)), NULL) AS Int32) as x, toTypeName(x); +SELECT CAST(if(1 = 0, toNullable(toInt8(3)), NULL) AS Int32) as x, toTypeName(x); + +SELECT CAST(a, 'Int32') as x, toTypeName(x) FROM (SELECT materialize(CAST(42, 'Nullable(UInt8)')) AS a); +SELECT CAST(a, 'Int32') as x, toTypeName(x) FROM (SELECT materialize(CAST(NULL, 'Nullable(UInt8)')) AS a); diff --git a/parser/testdata/01323_add_scalars_in_time/ast.json b/parser/testdata/01323_add_scalars_in_time/ast.json new file mode 100644 index 000000000..7f7574a9b --- /dev/null +++ b/parser/testdata/01323_add_scalars_in_time/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001235447, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01323_add_scalars_in_time/metadata.json b/parser/testdata/01323_add_scalars_in_time/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01323_add_scalars_in_time/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01323_add_scalars_in_time/query.sql b/parser/testdata/01323_add_scalars_in_time/query.sql new file mode 100644 index 000000000..c337cd86f --- /dev/null +++ b/parser/testdata/01323_add_scalars_in_time/query.sql @@ -0,0 +1,74 @@ +SET optimize_on_insert = 0; + +DROP TABLE IF EXISTS tags; + +CREATE TABLE tags ( + id String, + seqs Array(UInt8), + create_time DateTime DEFAULT now() +) engine=ReplacingMergeTree() +ORDER BY (id); + +INSERT INTO tags(id, seqs) VALUES ('id1', [1,2,3]), ('id2', [0,2,3]), ('id1', [1,3]); + +WITH + (SELECT [0, 1, 2, 3]) AS arr1 +SELECT arraySort(arrayIntersect(argMax(seqs, create_time), arr1)) AS common, id +FROM tags +WHERE id LIKE 'id%' +GROUP BY id +ORDER BY id; + +DROP TABLE tags; + + +-- https://github.com/ClickHouse/ClickHouse/issues/15294 + +drop table if exists TestTable; + +create table TestTable (column String, start DateTime, end DateTime) engine MergeTree order by start; + +insert into TestTable (column, start, end) values('test', toDateTime('2020-07-20 09:00:00'), toDateTime('2020-07-20 20:00:00')),('test1', toDateTime('2020-07-20 09:00:00'), toDateTime('2020-07-20 20:00:00')),('test2', toDateTime('2020-07-20 09:00:00'), toDateTime('2020-07-20 20:00:00')); + +SELECT column, +(SELECT d from (select [1, 2, 3, 4] as d)) as d +FROM TestTable +where column == 'test' +GROUP BY column; + +drop table TestTable; + +-- https://github.com/ClickHouse/ClickHouse/issues/11407 + +drop table if exists aaa; +drop table if exists bbb; + +CREATE TABLE aaa ( + id UInt16, + data String +) +ENGINE = MergeTree() +PARTITION BY tuple() +ORDER BY id; + +INSERT INTO aaa VALUES (1, 'sef'),(2, 'fre'),(3, 'jhg'); + +CREATE TABLE bbb ( + id UInt16, + data String +) +ENGINE = MergeTree() +PARTITION BY tuple() +ORDER BY id; + +INSERT INTO bbb VALUES (2, 'fre'), (3, 'jhg'); + +with (select groupArray(id) from bbb) as ids +select * + from aaa + where has(ids, id) +order by id; + + +drop table aaa; +drop table bbb; diff --git a/parser/testdata/01323_bad_arg_in_arithmetic_operations/ast.json b/parser/testdata/01323_bad_arg_in_arithmetic_operations/ast.json new file mode 100644 index 000000000..0542509a6 --- /dev/null +++ b/parser/testdata/01323_bad_arg_in_arithmetic_operations/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001603697, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01323_bad_arg_in_arithmetic_operations/metadata.json b/parser/testdata/01323_bad_arg_in_arithmetic_operations/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01323_bad_arg_in_arithmetic_operations/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01323_bad_arg_in_arithmetic_operations/query.sql b/parser/testdata/01323_bad_arg_in_arithmetic_operations/query.sql new file mode 100644 index 000000000..f362979b1 --- /dev/null +++ b/parser/testdata/01323_bad_arg_in_arithmetic_operations/query.sql @@ -0,0 +1,15 @@ +SET optimize_arithmetic_operations_in_aggregate_functions = 1; + +SELECT max(multiply(1)); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT min(multiply(2));-- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT sum(multiply(3)); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +SELECT max(plus(1)); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT min(plus(2)); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT sum(plus(3)); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +SELECT max(multiply()); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT min(multiply(1, 2 ,3)); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT sum(plus() + multiply()); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +SELECT sum(plus(multiply(42, 3), multiply(42))); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } diff --git a/parser/testdata/01323_if_with_nulls/ast.json b/parser/testdata/01323_if_with_nulls/ast.json new file mode 100644 index 000000000..ee22c11c3 --- /dev/null +++ b/parser/testdata/01323_if_with_nulls/ast.json @@ -0,0 +1,79 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function if (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Function toNullable (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toUInt8 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + } + ], + + "rows": 19, + + "statistics": + { + "elapsed": 0.001604441, + "rows_read": 19, + "bytes_read": 730 + } +} diff --git a/parser/testdata/01323_if_with_nulls/metadata.json b/parser/testdata/01323_if_with_nulls/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01323_if_with_nulls/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01323_if_with_nulls/query.sql b/parser/testdata/01323_if_with_nulls/query.sql new file mode 100644 index 000000000..6a4df79d7 --- /dev/null +++ b/parser/testdata/01323_if_with_nulls/query.sql @@ -0,0 +1,55 @@ +SELECT if(1 = 0, toNullable(toUInt8(0)), NULL) AS x, toTypeName(x); +SELECT if(1 = 1, toNullable(toUInt8(0)), NULL) AS x, toTypeName(x); +SELECT if(1 = 1, NULL, toNullable(toUInt8(0))) AS x, toTypeName(x); +SELECT if(1 = 0, NULL, toNullable(toUInt8(0))) AS x, toTypeName(x); + +SELECT if(toUInt8(0), NULL, toNullable(toUInt8(0))) AS x, if(x = 0, 'ok', 'fail'); +SELECT if(toUInt8(1), NULL, toNullable(toUInt8(0))) AS x, if(x = 0, 'fail', 'ok'); +SELECT if(toUInt8(1), toNullable(toUInt8(0)), NULL) AS x, if(x = 0, 'ok', 'fail'); +SELECT if(toUInt8(0), toNullable(toUInt8(0)), NULL) AS x, if(x = 0, 'fail', 'ok'); + +SELECT if(x = 0, 'ok', 'fail') FROM (SELECT toNullable(toUInt8(0)) AS x); +SELECT if(x = 0, 'fail', 'ok') FROM (SELECT CAST(NULL, 'Nullable(UInt8)') AS x); +SELECT if(x = 0, 'fail', 'ok') FROM (SELECT materialize(CAST(NULL, 'Nullable(UInt8)')) AS x); + +SELECT if(x = 0, 'ok', 'fail') FROM (SELECT if(toUInt8(1), toNullable(toUInt8(0)), NULL) AS x); +SELECT if(x = 0, 'fail', 'ok') FROM (SELECT if(toUInt8(0), toNullable(toUInt8(0)), NULL) AS x); + +SELECT if(x = 0, 'ok', 'fail') FROM (SELECT if(toUInt8(0), NULL, toNullable(toUInt8(0))) AS x); +SELECT if(x = 0, 'fail', 'ok') FROM (SELECT if(toUInt8(1), NULL, toNullable(toUInt8(0))) AS x); + +SELECT toTypeName(x), x, isNull(x), if(x = 0, 'fail', 'ok'), if(x = 1, 'fail', 'ok'), if(x >= 0, 'fail', 'ok') +FROM (SELECT CAST(NULL, 'Nullable(UInt8)') AS x); + +SELECT toTypeName(x), x, isNull(x), if(x = 0, 'fail', 'ok'), if(x = 1, 'fail', 'ok'), if(x >= 0, 'fail', 'ok') +FROM (SELECT materialize(CAST(NULL, 'Nullable(UInt8)')) AS x); + +SELECT toTypeName(x), x, isNull(x), if(x = 0, 'fail', 'ok'), if(x = 1, 'fail', 'ok'), if(x >= 0, 'fail', 'ok') +FROM (SELECT if(1 = 0, toNullable(toUInt8(0)), NULL) AS x); + +SELECT toTypeName(x), x, isNull(x), if(x = 0, 'fail', 'ok'), if(x = 1, 'fail', 'ok'), if(x >= 0, 'fail', 'ok') +FROM (SELECT materialize(if(1 = 0, toNullable(toUInt8(0)), NULL)) AS x); + +SET join_use_nulls = 1; + +SELECT b_num, isNull(b_num), toTypeName(b_num), b_num = 0, if(b_num = 0, 'fail', 'ok') +FROM (SELECT 1 k, toInt8(1) a_num) AS x +LEFT JOIN (SELECT 2 k, toInt8(1) b_num) AS y +USING (k); + +-- test case from https://github.com/ClickHouse/ClickHouse/issues/7347 +DROP TABLE IF EXISTS test_nullable_float_issue7347; +CREATE TABLE test_nullable_float_issue7347 (ne UInt64,test Nullable(Float64)) ENGINE = MergeTree() PRIMARY KEY (ne) ORDER BY (ne); +INSERT INTO test_nullable_float_issue7347 VALUES (1,NULL); + +SELECT test, toTypeName(test), IF(test = 0, 1, 0) FROM test_nullable_float_issue7347; + +WITH materialize(CAST(NULL, 'Nullable(Float64)')) AS test SELECT test, toTypeName(test), IF(test = 0, 1, 0); + +DROP TABLE test_nullable_float_issue7347; + +-- test case from https://github.com/ClickHouse/ClickHouse/issues/10846 + +SELECT if(isFinite(toUInt64OrZero(toNullable('123'))), 1, 0); + +SELECT if(materialize(isFinite(toUInt64OrZero(toNullable('123')))), 1, 0); diff --git a/parser/testdata/01323_redundant_functions_in_order_by/ast.json b/parser/testdata/01323_redundant_functions_in_order_by/ast.json new file mode 100644 index 000000000..dd8d3e01d --- /dev/null +++ b/parser/testdata/01323_redundant_functions_in_order_by/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001434076, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01323_redundant_functions_in_order_by/metadata.json b/parser/testdata/01323_redundant_functions_in_order_by/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01323_redundant_functions_in_order_by/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01323_redundant_functions_in_order_by/query.sql b/parser/testdata/01323_redundant_functions_in_order_by/query.sql new file mode 100644 index 000000000..fb1eed166 --- /dev/null +++ b/parser/testdata/01323_redundant_functions_in_order_by/query.sql @@ -0,0 +1,60 @@ +SET single_join_prefer_left_table = 0; + +DROP TABLE IF EXISTS test; + +CREATE TABLE test (key UInt64, a UInt8, b String, c Float64) ENGINE = MergeTree() ORDER BY key; +INSERT INTO test SELECT number, number, toString(number), number from numbers(4); + +set optimize_redundant_functions_in_order_by = 1; + +SELECT groupArray(x) from (SELECT number as x FROM numbers(3) ORDER BY x, exp(x)); +SELECT groupArray(x) from (SELECT number as x FROM numbers(3) ORDER BY x, exp(x)) SETTINGS enable_analyzer=1; +SELECT groupArray(x) from (SELECT number as x FROM numbers(3) ORDER BY x, exp(exp(x))); +SELECT groupArray(x) from (SELECT number as x FROM numbers(3) ORDER BY x, exp(exp(x))) SETTINGS enable_analyzer=1; +SELECT groupArray(x) from (SELECT number as x FROM numbers(3) ORDER BY exp(x), x); +SELECT groupArray(x) from (SELECT number as x FROM numbers(3) ORDER BY exp(x), x) SETTINGS enable_analyzer=1; +SELECT * FROM (SELECT number + 2 AS key FROM numbers(4)) s FULL JOIN test t USING(key) ORDER BY s.key, t.key; +SELECT * FROM (SELECT number + 2 AS key FROM numbers(4)) s FULL JOIN test t USING(key) ORDER BY s.key, t.key SETTINGS enable_analyzer=1; +SELECT key, a FROM test ORDER BY key, a, exp(key + a); +SELECT key, a FROM test ORDER BY key, a, exp(key + a) SETTINGS enable_analyzer=1; +SELECT key, a FROM test ORDER BY key, exp(key + a); +SELECT key, a FROM test ORDER BY key, exp(key + a) SETTINGS enable_analyzer=1; +EXPLAIN SYNTAX SELECT groupArray(x) from (SELECT number as x FROM numbers(3) ORDER BY x, exp(x)); +EXPLAIN QUERY TREE run_passes=1 SELECT groupArray(x) from (SELECT number as x FROM numbers(3) ORDER BY x, exp(x)) settings enable_analyzer=1; +EXPLAIN SYNTAX SELECT groupArray(x) from (SELECT number as x FROM numbers(3) ORDER BY x, exp(exp(x))); +EXPLAIN QUERY TREE run_passes=1 SELECT groupArray(x) from (SELECT number as x FROM numbers(3) ORDER BY x, exp(exp(x))) settings enable_analyzer=1; +EXPLAIN SYNTAX SELECT groupArray(x) from (SELECT number as x FROM numbers(3) ORDER BY exp(x), x); +EXPLAIN QUERY TREE run_passes=1 SELECT groupArray(x) from (SELECT number as x FROM numbers(3) ORDER BY exp(x), x) settings enable_analyzer=1; +EXPLAIN SYNTAX SELECT * FROM (SELECT number + 2 AS key FROM numbers(4)) s FULL JOIN test t USING(key) ORDER BY s.key, t.key; +EXPLAIN QUERY TREE run_passes=1 SELECT * FROM (SELECT number + 2 AS key FROM numbers(4)) s FULL JOIN test t USING(key) ORDER BY s.key, t.key settings enable_analyzer=1; +EXPLAIN SYNTAX SELECT key, a FROM test ORDER BY key, a, exp(key + a); +EXPLAIN QUERY TREE run_passes=1 SELECT key, a FROM test ORDER BY key, a, exp(key + a) settings enable_analyzer=1; +EXPLAIN SYNTAX SELECT key, a FROM test ORDER BY key, exp(key + a); +EXPLAIN QUERY TREE run_passes=1 SELECT key, a FROM test ORDER BY key, exp(key + a) settings enable_analyzer=1; +EXPLAIN QUERY TREE run_passes=1 SELECT key FROM test GROUP BY key ORDER BY avg(a), key settings enable_analyzer=1; + +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +CREATE TABLE t1 (id UInt64) ENGINE = MergeTree() ORDER BY id; +CREATE TABLE t2 (id UInt64) ENGINE = MergeTree() ORDER BY id; + +EXPLAIN QUERY TREE run_passes=1 SELECT * FROM t1 INNER JOIN t2 ON t1.id = t2.id ORDER BY t1.id, t2.id settings enable_analyzer=1; + +set optimize_redundant_functions_in_order_by = 0; + +SELECT groupArray(x) from (SELECT number as x FROM numbers(3) ORDER BY x, exp(x)); +SELECT groupArray(x) from (SELECT number as x FROM numbers(3) ORDER BY x, exp(exp(x))); +SELECT groupArray(x) from (SELECT number as x FROM numbers(3) ORDER BY exp(x), x); +SELECT * FROM (SELECT number + 2 AS key FROM numbers(4)) s FULL JOIN test t USING(key) ORDER BY s.key, t.key; +SELECT key, a FROM test ORDER BY key, a, exp(key + a); +SELECT key, a FROM test ORDER BY key, exp(key + a); +EXPLAIN SYNTAX SELECT groupArray(x) from (SELECT number as x FROM numbers(3) ORDER BY x, exp(x)); +EXPLAIN SYNTAX SELECT groupArray(x) from (SELECT number as x FROM numbers(3) ORDER BY x, exp(exp(x))); +EXPLAIN SYNTAX SELECT groupArray(x) from (SELECT number as x FROM numbers(3) ORDER BY exp(x), x); +EXPLAIN SYNTAX SELECT * FROM (SELECT number + 2 AS key FROM numbers(4)) s FULL JOIN test t USING(key) ORDER BY s.key, t.key; +EXPLAIN SYNTAX SELECT key, a FROM test ORDER BY key, a, exp(key + a); +EXPLAIN SYNTAX SELECT key, a FROM test ORDER BY key, exp(key + a); + +DROP TABLE t1; +DROP TABLE t2; +DROP TABLE test; diff --git a/parser/testdata/01323_too_many_threads_bug/ast.json b/parser/testdata/01323_too_many_threads_bug/ast.json new file mode 100644 index 000000000..bf2b7c667 --- /dev/null +++ b/parser/testdata/01323_too_many_threads_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery table_01323_many_parts (children 1)" + }, + { + "explain": " Identifier table_01323_many_parts" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00155205, + "rows_read": 2, + "bytes_read": 96 + } +} diff --git a/parser/testdata/01323_too_many_threads_bug/metadata.json b/parser/testdata/01323_too_many_threads_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01323_too_many_threads_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01323_too_many_threads_bug/query.sql b/parser/testdata/01323_too_many_threads_bug/query.sql new file mode 100644 index 000000000..c938fdbc6 --- /dev/null +++ b/parser/testdata/01323_too_many_threads_bug/query.sql @@ -0,0 +1,24 @@ +drop table if exists table_01323_many_parts; + +set remote_filesystem_read_method = 'read'; +set local_filesystem_read_method = 'pread'; +set load_marks_asynchronously = 0; +set allow_asynchronous_read_from_io_pool_for_merge_tree = 0; + +create table table_01323_many_parts (x UInt64) engine = MergeTree order by x partition by x % 100; +set max_partitions_per_insert_block = 100; +insert into table_01323_many_parts select number from numbers(100000); + +set max_threads = 16; +set log_queries = 1; +select x from table_01323_many_parts limit 10 format Null; + +system flush logs query_log; +select peak_threads_usage <= 4 from system.query_log where current_database = currentDatabase() AND event_date >= today() - 1 and query ilike '%select x from table_01323_many_parts%' and query not like '%system.query_log%' and type = 'QueryFinish' order by query_start_time desc limit 1; + +select x from table_01323_many_parts order by x limit 10 format Null; + +system flush logs query_log; +select peak_threads_usage <= 36 from system.query_log where current_database = currentDatabase() AND event_date >= today() - 1 and query ilike '%select x from table_01323_many_parts order by x%' and query not like '%system.query_log%' and type = 'QueryFinish' order by query_start_time desc limit 1; + +drop table if exists table_01323_many_parts; diff --git a/parser/testdata/01324_if_transform_strings_to_enum/ast.json b/parser/testdata/01324_if_transform_strings_to_enum/ast.json new file mode 100644 index 000000000..6f424cbcf --- /dev/null +++ b/parser/testdata/01324_if_transform_strings_to_enum/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001316437, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01324_if_transform_strings_to_enum/metadata.json b/parser/testdata/01324_if_transform_strings_to_enum/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01324_if_transform_strings_to_enum/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01324_if_transform_strings_to_enum/query.sql b/parser/testdata/01324_if_transform_strings_to_enum/query.sql new file mode 100644 index 000000000..ee2f48a53 --- /dev/null +++ b/parser/testdata/01324_if_transform_strings_to_enum/query.sql @@ -0,0 +1,13 @@ +set optimize_if_transform_strings_to_enum = 1; + +SELECT transform(number, [2, 4, 6], ['google', 'censor.net', 'yahoo'], 'other') FROM system.numbers LIMIT 10; +EXPLAIN SYNTAX SELECT transform(number, [2, 4, 6], ['google', 'censor.net', 'yahoo'], 'other') FROM system.numbers LIMIT 10; +SELECT number > 5 ? 'censor.net' : 'google' FROM system.numbers LIMIT 10; +EXPLAIN SYNTAX SELECT number > 5 ? 'censor.net' : 'google' FROM system.numbers LIMIT 10; + +set optimize_if_transform_strings_to_enum = 0; + +SELECT transform(number, [2, 4, 6], ['google', 'censor.net', 'yahoo'], 'other') FROM system.numbers LIMIT 10; +EXPLAIN SYNTAX SELECT transform(number, [2, 4, 6], ['google', 'censor.net', 'yahoo'], 'other') FROM system.numbers LIMIT 10; +SELECT number > 5 ? 'censor.net' : 'google' FROM system.numbers LIMIT 10; +EXPLAIN SYNTAX SELECT number > 5 ? 'censor.net' : 'google' FROM system.numbers LIMIT 10; diff --git a/parser/testdata/01324_insert_tsv_raw/ast.json b/parser/testdata/01324_insert_tsv_raw/ast.json new file mode 100644 index 000000000..22cdae020 --- /dev/null +++ b/parser/testdata/01324_insert_tsv_raw/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tsv_raw (children 1)" + }, + { + "explain": " Identifier tsv_raw" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001112892, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/01324_insert_tsv_raw/metadata.json b/parser/testdata/01324_insert_tsv_raw/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01324_insert_tsv_raw/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01324_insert_tsv_raw/query.sql b/parser/testdata/01324_insert_tsv_raw/query.sql new file mode 100644 index 000000000..4827f3459 --- /dev/null +++ b/parser/testdata/01324_insert_tsv_raw/query.sql @@ -0,0 +1,7 @@ +drop table if exists tsv_raw; +create table tsv_raw (strval String, intval Int64, b1 String, b2 String, b3 String, b4 String) engine = Memory; +insert into tsv_raw format TSVRaw "a 1 \ \\ "\"" "\\"" + +select * from tsv_raw format TSVRaw; +select * from tsv_raw format JSONCompactEachRow; +drop table tsv_raw; diff --git a/parser/testdata/01324_settings_documentation/ast.json b/parser/testdata/01324_settings_documentation/ast.json new file mode 100644 index 000000000..6c2dde351 --- /dev/null +++ b/parser/testdata/01324_settings_documentation/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'Settings description should start with capital letter'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.00104858, + "rows_read": 5, + "bytes_read": 224 + } +} diff --git a/parser/testdata/01324_settings_documentation/metadata.json b/parser/testdata/01324_settings_documentation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01324_settings_documentation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01324_settings_documentation/query.sql b/parser/testdata/01324_settings_documentation/query.sql new file mode 100644 index 000000000..15736f3bc --- /dev/null +++ b/parser/testdata/01324_settings_documentation/query.sql @@ -0,0 +1,2 @@ +SELECT 'Settings description should start with capital letter'; +SELECT name, description FROM system.settings WHERE substring(description, 1, 1) != upper(substring(description, 1, 1)); diff --git a/parser/testdata/01325_freeze_mutation_stuck/ast.json b/parser/testdata/01325_freeze_mutation_stuck/ast.json new file mode 100644 index 000000000..37724f988 --- /dev/null +++ b/parser/testdata/01325_freeze_mutation_stuck/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery mt (children 1)" + }, + { + "explain": " Identifier mt" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001156993, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/01325_freeze_mutation_stuck/metadata.json b/parser/testdata/01325_freeze_mutation_stuck/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01325_freeze_mutation_stuck/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01325_freeze_mutation_stuck/query.sql b/parser/testdata/01325_freeze_mutation_stuck/query.sql new file mode 100644 index 000000000..ce2f760cd --- /dev/null +++ b/parser/testdata/01325_freeze_mutation_stuck/query.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS mt; +CREATE TABLE mt (x String, y UInt64, INDEX idx (y) TYPE minmax GRANULARITY 1) ENGINE = MergeTree ORDER BY y; +INSERT INTO mt VALUES ('Hello, world', 1); + +SELECT * FROM mt; +ALTER TABLE mt FREEZE; +SELECT * FROM mt; + +SET mutations_sync = 1; +ALTER TABLE mt UPDATE x = 'Goodbye' WHERE y = 1; +SELECT * FROM mt; + +DROP TABLE mt; diff --git a/parser/testdata/01326_build_id/ast.json b/parser/testdata/01326_build_id/ast.json new file mode 100644 index 000000000..3965c09ff --- /dev/null +++ b/parser/testdata/01326_build_id/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function greaterOrEquals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function length (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function buildId (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Literal UInt64_16" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001433511, + "rows_read": 11, + "bytes_read": 429 + } +} diff --git a/parser/testdata/01326_build_id/metadata.json b/parser/testdata/01326_build_id/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01326_build_id/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01326_build_id/query.sql b/parser/testdata/01326_build_id/query.sql new file mode 100644 index 000000000..4451ec360 --- /dev/null +++ b/parser/testdata/01326_build_id/query.sql @@ -0,0 +1 @@ +SELECT length(buildId()) >= 16; diff --git a/parser/testdata/01326_fixed_string_comparison_denny_crane/ast.json b/parser/testdata/01326_fixed_string_comparison_denny_crane/ast.json new file mode 100644 index 000000000..6aa60e189 --- /dev/null +++ b/parser/testdata/01326_fixed_string_comparison_denny_crane/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function greater (alias r) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toFixedString (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function unhex (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '202005295555'" + }, + { + "explain": " Literal UInt64_15" + }, + { + "explain": " Function unhex (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '20200529'" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001167228, + "rows_read": 15, + "bytes_read": 608 + } +} diff --git a/parser/testdata/01326_fixed_string_comparison_denny_crane/metadata.json b/parser/testdata/01326_fixed_string_comparison_denny_crane/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01326_fixed_string_comparison_denny_crane/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01326_fixed_string_comparison_denny_crane/query.sql b/parser/testdata/01326_fixed_string_comparison_denny_crane/query.sql new file mode 100644 index 000000000..c26e78e53 --- /dev/null +++ b/parser/testdata/01326_fixed_string_comparison_denny_crane/query.sql @@ -0,0 +1,2 @@ +select toFixedString(unhex('202005295555'), 15) > unhex('20200529') r; +select materialize(toFixedString(unhex('202005295555'), 15)) > unhex('20200529') r; diff --git a/parser/testdata/01326_hostname_alias/ast.json b/parser/testdata/01326_hostname_alias/ast.json new file mode 100644 index 000000000..536a62112 --- /dev/null +++ b/parser/testdata/01326_hostname_alias/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function hostname (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function hostName (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.00122753, + "rows_read": 10, + "bytes_read": 375 + } +} diff --git a/parser/testdata/01326_hostname_alias/metadata.json b/parser/testdata/01326_hostname_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01326_hostname_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01326_hostname_alias/query.sql b/parser/testdata/01326_hostname_alias/query.sql new file mode 100644 index 000000000..4e53a557e --- /dev/null +++ b/parser/testdata/01326_hostname_alias/query.sql @@ -0,0 +1 @@ +SELECT hostname() = hostName(); diff --git a/parser/testdata/01327_decimal_cut_extra_digits_after_point/ast.json b/parser/testdata/01327_decimal_cut_extra_digits_after_point/ast.json new file mode 100644 index 000000000..9b7f68a44 --- /dev/null +++ b/parser/testdata/01327_decimal_cut_extra_digits_after_point/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '1.1'" + }, + { + "explain": " Literal 'Decimal(10, 5)'" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001574937, + "rows_read": 8, + "bytes_read": 292 + } +} diff --git a/parser/testdata/01327_decimal_cut_extra_digits_after_point/metadata.json b/parser/testdata/01327_decimal_cut_extra_digits_after_point/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01327_decimal_cut_extra_digits_after_point/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01327_decimal_cut_extra_digits_after_point/query.sql b/parser/testdata/01327_decimal_cut_extra_digits_after_point/query.sql new file mode 100644 index 000000000..df171b183 --- /dev/null +++ b/parser/testdata/01327_decimal_cut_extra_digits_after_point/query.sql @@ -0,0 +1,24 @@ +SELECT CAST('1.1' AS Decimal(10, 5)); +SELECT CAST('1.12345' AS Decimal(10, 5)); +SELECT CAST('1.123451' AS Decimal(10, 5)); +SELECT CAST('1.1234511111' AS Decimal(10, 5)); +SELECT CAST('1.12345111111' AS Decimal(10, 5)); +SELECT CAST('1.12345111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111' AS Decimal(10, 5)); +SELECT CAST('12345.1' AS Decimal(10, 5)); + +-- Actually our decimal can contain more than 10 digits for free. +SELECT CAST('123456789123.1' AS Decimal(10, 5)); +SELECT CAST('1234567891234.1' AS Decimal(10, 5)); +SELECT CAST('1234567891234.12345111' AS Decimal(10, 5)); +-- But it's just Decimal64, so there is the limit. +SELECT CAST('12345678912345.1' AS Decimal(10, 5)); -- { serverError ARGUMENT_OUT_OF_BOUND } + +-- The rounding may work in unexpected way: this is just integer rounding. +-- We can improve it but here is the current behaviour: +SELECT CAST('1.123455' AS Decimal(10, 5)); +SELECT CAST('1.123456' AS Decimal(10, 5)); +SELECT CAST('1.123445' AS Decimal(10, 5)); -- Check if suddenly banker's rounding will be implemented. + +CREATE TEMPORARY TABLE test (x Decimal(10, 5)); +INSERT INTO test VALUES (1.12345111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111); +SELECT * FROM test; diff --git a/parser/testdata/01328_bad_peephole_optimization/ast.json b/parser/testdata/01328_bad_peephole_optimization/ast.json new file mode 100644 index 000000000..5fd8d043a --- /dev/null +++ b/parser/testdata/01328_bad_peephole_optimization/ast.json @@ -0,0 +1,82 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function max (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function plus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier a" + }, + { + "explain": " Identifier b" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1 (alias a)" + }, + { + "explain": " Literal UInt64_2 (alias b)" + } + ], + + "rows": 20, + + "statistics": + { + "elapsed": 0.001538867, + "rows_read": 20, + "bytes_read": 801 + } +} diff --git a/parser/testdata/01328_bad_peephole_optimization/metadata.json b/parser/testdata/01328_bad_peephole_optimization/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01328_bad_peephole_optimization/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01328_bad_peephole_optimization/query.sql b/parser/testdata/01328_bad_peephole_optimization/query.sql new file mode 100644 index 000000000..ff07a8037 --- /dev/null +++ b/parser/testdata/01328_bad_peephole_optimization/query.sql @@ -0,0 +1 @@ +select max(a + b) from (SELECT 1 AS a, 2 AS b); diff --git a/parser/testdata/01329_compare_tuple_string_constant/ast.json b/parser/testdata/01329_compare_tuple_string_constant/ast.json new file mode 100644 index 000000000..f89c84934 --- /dev/null +++ b/parser/testdata/01329_compare_tuple_string_constant/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function less (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal ''" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001294439, + "rows_read": 10, + "bytes_read": 366 + } +} diff --git a/parser/testdata/01329_compare_tuple_string_constant/metadata.json b/parser/testdata/01329_compare_tuple_string_constant/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01329_compare_tuple_string_constant/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01329_compare_tuple_string_constant/query.sql b/parser/testdata/01329_compare_tuple_string_constant/query.sql new file mode 100644 index 000000000..c56ffdd20 --- /dev/null +++ b/parser/testdata/01329_compare_tuple_string_constant/query.sql @@ -0,0 +1,4 @@ +SELECT tuple(1) < ''; -- { serverError CANNOT_PARSE_INPUT_ASSERTION_FAILED } +SELECT tuple(1) < materialize(''); -- { serverError NO_COMMON_TYPE } +SELECT (1, 2) < '(1,3)'; +SELECT (1, 2) < '(1, 1)'; diff --git a/parser/testdata/01330_array_join_in_higher_order_function/ast.json b/parser/testdata/01330_array_join_in_higher_order_function/ast.json new file mode 100644 index 000000000..11bba66ad --- /dev/null +++ b/parser/testdata/01330_array_join_in_higher_order_function/ast.json @@ -0,0 +1,76 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayMap (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function lambda (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function arrayJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_2]" + } + ], + + "rows": 18, + + "statistics": + { + "elapsed": 0.001460937, + "rows_read": 18, + "bytes_read": 730 + } +} diff --git a/parser/testdata/01330_array_join_in_higher_order_function/metadata.json b/parser/testdata/01330_array_join_in_higher_order_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01330_array_join_in_higher_order_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01330_array_join_in_higher_order_function/query.sql b/parser/testdata/01330_array_join_in_higher_order_function/query.sql new file mode 100644 index 000000000..7ac8945d0 --- /dev/null +++ b/parser/testdata/01330_array_join_in_higher_order_function/query.sql @@ -0,0 +1 @@ +SELECT arrayMap(x -> arrayJoin([x, 1]), [1, 2]); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/01332_join_type_syntax_position/ast.json b/parser/testdata/01332_join_type_syntax_position/ast.json new file mode 100644 index 000000000..bbe4a9344 --- /dev/null +++ b/parser/testdata/01332_join_type_syntax_position/ast.json @@ -0,0 +1,79 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 2)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (alias t1) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " TablesInSelectQueryElement (children 2)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (alias t2) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " TableJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + } + ], + + "rows": 19, + + "statistics": + { + "elapsed": 0.001066776, + "rows_read": 19, + "bytes_read": 760 + } +} diff --git a/parser/testdata/01332_join_type_syntax_position/metadata.json b/parser/testdata/01332_join_type_syntax_position/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01332_join_type_syntax_position/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01332_join_type_syntax_position/query.sql b/parser/testdata/01332_join_type_syntax_position/query.sql new file mode 100644 index 000000000..b6eef50b1 --- /dev/null +++ b/parser/testdata/01332_join_type_syntax_position/query.sql @@ -0,0 +1,31 @@ +select * from numbers(1) t1 left outer join numbers(1) t2 using number; +select * from numbers(1) t1 right outer join numbers(1) t2 using number; + +select * from numbers(1) t1 left any join numbers(1) t2 using number; +select * from numbers(1) t1 right any join numbers(1) t2 using number; + +select * from numbers(1) t1 left semi join numbers(1) t2 using number; +select * from numbers(1) t1 right semi join numbers(1) t2 using number; + +select * from numbers(1) t1 left anti join numbers(1) t2 using number; +select * from numbers(1) t1 right anti join numbers(1) t2 using number; + +select * from numbers(1) t1 asof join numbers(1) t2 using number; -- { serverError NOT_IMPLEMENTED } +select * from numbers(1) t1 left asof join numbers(1) t2 using number; -- { serverError NOT_IMPLEMENTED } + +-- legacy + +select * from numbers(1) t1 all left join numbers(1) t2 using number; +select * from numbers(1) t1 all right join numbers(1) t2 using number; + +select * from numbers(1) t1 any left join numbers(1) t2 using number; +select * from numbers(1) t1 any right join numbers(1) t2 using number; + +select * from numbers(1) t1 semi left join numbers(1) t2 using number; +select * from numbers(1) t1 semi right join numbers(1) t2 using number; + +select * from numbers(1) t1 anti left join numbers(1) t2 using number; +select * from numbers(1) t1 anti right join numbers(1) t2 using number; + +select * from numbers(1) t1 asof join numbers(1) t2 using number; -- { serverError NOT_IMPLEMENTED } +select * from numbers(1) t1 asof left join numbers(1) t2 using number; -- { serverError NOT_IMPLEMENTED } diff --git a/parser/testdata/01333_select_abc_asterisk/ast.json b/parser/testdata/01333_select_abc_asterisk/ast.json new file mode 100644 index 000000000..e371b85f2 --- /dev/null +++ b/parser/testdata/01333_select_abc_asterisk/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.0010856, + "rows_read": 5, + "bytes_read": 169 + } +} diff --git a/parser/testdata/01333_select_abc_asterisk/metadata.json b/parser/testdata/01333_select_abc_asterisk/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01333_select_abc_asterisk/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01333_select_abc_asterisk/query.sql b/parser/testdata/01333_select_abc_asterisk/query.sql new file mode 100644 index 000000000..78bf2eaff --- /dev/null +++ b/parser/testdata/01333_select_abc_asterisk/query.sql @@ -0,0 +1,6 @@ +select *; + +--error: should be failed for abc.*; +select abc.*; --{serverError UNKNOWN_IDENTIFIER} +select *, abc.*; --{serverError UNKNOWN_IDENTIFIER} +select abc.*, *; --{serverError UNKNOWN_IDENTIFIER} diff --git a/parser/testdata/01337_mysql_global_variables/ast.json b/parser/testdata/01337_mysql_global_variables/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01337_mysql_global_variables/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01337_mysql_global_variables/metadata.json b/parser/testdata/01337_mysql_global_variables/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01337_mysql_global_variables/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01337_mysql_global_variables/query.sql b/parser/testdata/01337_mysql_global_variables/query.sql new file mode 100644 index 000000000..d76baa85b --- /dev/null +++ b/parser/testdata/01337_mysql_global_variables/query.sql @@ -0,0 +1,9 @@ +-- Tags: global, no-fasttest + +SELECT @@test; +SELECT @@max_allowed_packet FORMAT CSVWithNames; +SELECT @@MAX_ALLOWED_PACKET FORMAT CSVWithNames; +SELECT @@max_allowed_packet, number FROM system.numbers LIMIT 3 FORMAT CSVWithNames; +SELECT @@session.auto_increment_increment FORMAT CSVWithNames; +SELECT @@session.auto_increment_increment AS auto_increment_increment FORMAT CSVWithNames; +SELECT @@Version FORMAT CSVWithNames; diff --git a/parser/testdata/01338_sha256_fixedstring/ast.json b/parser/testdata/01338_sha256_fixedstring/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01338_sha256_fixedstring/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01338_sha256_fixedstring/metadata.json b/parser/testdata/01338_sha256_fixedstring/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01338_sha256_fixedstring/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01338_sha256_fixedstring/query.sql b/parser/testdata/01338_sha256_fixedstring/query.sql new file mode 100644 index 000000000..bf66b03ec --- /dev/null +++ b/parser/testdata/01338_sha256_fixedstring/query.sql @@ -0,0 +1,16 @@ +-- Tags: no-fasttest + +SELECT hex(SHA256('')); +SELECT hex(SHA256('abc')); + +DROP TABLE IF EXISTS defaults; +CREATE TABLE defaults +( + s FixedString(20) +)ENGINE = Memory(); + +INSERT INTO defaults SELECT s FROM generateRandom('s FixedString(20)', 1, 1, 1) LIMIT 20; + +SELECT hex(SHA256(s)) FROM defaults; + +DROP TABLE defaults; diff --git a/parser/testdata/01338_uuid_without_separator/ast.json b/parser/testdata/01338_uuid_without_separator/ast.json new file mode 100644 index 000000000..9fda979f7 --- /dev/null +++ b/parser/testdata/01338_uuid_without_separator/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toUUID (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '417ddc5de5564d2795dda34d84e46a50'" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001061685, + "rows_read": 7, + "bytes_read": 285 + } +} diff --git a/parser/testdata/01338_uuid_without_separator/metadata.json b/parser/testdata/01338_uuid_without_separator/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01338_uuid_without_separator/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01338_uuid_without_separator/query.sql b/parser/testdata/01338_uuid_without_separator/query.sql new file mode 100644 index 000000000..efbf4bc28 --- /dev/null +++ b/parser/testdata/01338_uuid_without_separator/query.sql @@ -0,0 +1,11 @@ +SELECT toUUID('417ddc5de5564d2795dda34d84e46a50'); +SELECT toUUID('417ddc5d-e556-4d27-95dd-a34d84e46a50'); + +DROP TABLE IF EXISTS t_uuid; +CREATE TABLE t_uuid (x UInt8, y UUID, z String) ENGINE = TinyLog; + +INSERT INTO t_uuid VALUES (1, '417ddc5de5564d2795dda34d84e46a50', 'Example 1'); +INSERT INTO t_uuid VALUES (2, '417ddc5d-e556-4d27-95dd-a34d84e46a51', 'Example 2'); + +SELECT * FROM t_uuid ORDER BY x ASC; +DROP TABLE IF EXISTS t_uuid; diff --git a/parser/testdata/01340_datetime64_fpe/ast.json b/parser/testdata/01340_datetime64_fpe/ast.json new file mode 100644 index 000000000..a322d318d --- /dev/null +++ b/parser/testdata/01340_datetime64_fpe/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDateTime64 (alias dt64) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '2019-09-16 19:20:12.3456789102019-09-16 19:20:12.345678910'" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier dt64" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001109524, + "rows_read": 10, + "bytes_read": 425 + } +} diff --git a/parser/testdata/01340_datetime64_fpe/metadata.json b/parser/testdata/01340_datetime64_fpe/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01340_datetime64_fpe/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01340_datetime64_fpe/query.sql b/parser/testdata/01340_datetime64_fpe/query.sql new file mode 100644 index 000000000..3ceb465cb --- /dev/null +++ b/parser/testdata/01340_datetime64_fpe/query.sql @@ -0,0 +1,71 @@ +WITH toDateTime64('2019-09-16 19:20:12.3456789102019-09-16 19:20:12.345678910', 0) AS dt64 SELECT dt64; -- { serverError CANNOT_PARSE_TEXT } + +SELECT toDateTime64('2011-11-11 11:11:11.1234567890123456789', 0); +SELECT toDateTime64('2011-11-11 11:11:11.-12345678901234567890', 0); -- { serverError CANNOT_PARSE_TEXT } + + +SELECT toDateTime64('2011-11-11 11:11:11.1', 0); +SELECT toDateTime64('2011-11-11 11:11:11.11', 0); +SELECT toDateTime64('2011-11-11 11:11:11.111', 0); +SELECT toDateTime64('2011-11-11 11:11:11.1111', 0); +SELECT toDateTime64('2011-11-11 11:11:11.11111', 0); +SELECT toDateTime64('2011-11-11 11:11:11.111111', 0); +SELECT toDateTime64('2011-11-11 11:11:11.1111111', 0); +SELECT toDateTime64('2011-11-11 11:11:11.11111111', 0); +SELECT toDateTime64('2011-11-11 11:11:11.111111111', 0); +SELECT toDateTime64('2011-11-11 11:11:11.1111111111', 0); +SELECT toDateTime64('2011-11-11 11:11:11.11111111111', 0); +SELECT toDateTime64('2011-11-11 11:11:11.111111111111', 0); +SELECT toDateTime64('2011-11-11 11:11:11.1111111111111', 0); +SELECT toDateTime64('2011-11-11 11:11:11.11111111111111', 0); +SELECT toDateTime64('2011-11-11 11:11:11.111111111111111', 0); +SELECT toDateTime64('2011-11-11 11:11:11.1111111111111111', 0); +SELECT toDateTime64('2011-11-11 11:11:11.11111111111111111', 0); +SELECT toDateTime64('2011-11-11 11:11:11.111111111111111111', 0); +SELECT toDateTime64('2011-11-11 11:11:11.1111111111111111111', 0); +SELECT toDateTime64('2011-11-11 11:11:11.11111111111111111111', 0); +SELECT toDateTime64('2011-11-11 11:11:11.111111111111111111111', 0); + +SELECT toDateTime64('2011-11-11 11:11:11.-1', 0); -- { serverError CANNOT_PARSE_TEXT } +SELECT toDateTime64('2011-11-11 11:11:11.-11', 0); -- { serverError CANNOT_PARSE_TEXT } +SELECT toDateTime64('2011-11-11 11:11:11.-111', 0); -- { serverError CANNOT_PARSE_TEXT } +SELECT toDateTime64('2011-11-11 11:11:11.-1111', 0); -- { serverError CANNOT_PARSE_TEXT } +SELECT toDateTime64('2011-11-11 11:11:11.-11111', 0); -- { serverError CANNOT_PARSE_TEXT } +SELECT toDateTime64('2011-11-11 11:11:11.-111111', 0); -- { serverError CANNOT_PARSE_TEXT } +SELECT toDateTime64('2011-11-11 11:11:11.-1111111', 0); -- { serverError CANNOT_PARSE_TEXT } +SELECT toDateTime64('2011-11-11 11:11:11.-11111111', 0); -- { serverError CANNOT_PARSE_TEXT } +SELECT toDateTime64('2011-11-11 11:11:11.-111111111', 0); -- { serverError CANNOT_PARSE_TEXT } +SELECT toDateTime64('2011-11-11 11:11:11.-1111111111', 0); -- { serverError CANNOT_PARSE_TEXT } +SELECT toDateTime64('2011-11-11 11:11:11.-11111111111', 0); -- { serverError CANNOT_PARSE_TEXT } +SELECT toDateTime64('2011-11-11 11:11:11.-111111111111', 0); -- { serverError CANNOT_PARSE_TEXT } +SELECT toDateTime64('2011-11-11 11:11:11.-1111111111111', 0); -- { serverError CANNOT_PARSE_TEXT } +SELECT toDateTime64('2011-11-11 11:11:11.-11111111111111', 0); -- { serverError CANNOT_PARSE_TEXT } +SELECT toDateTime64('2011-11-11 11:11:11.-111111111111111', 0); -- { serverError CANNOT_PARSE_TEXT } +SELECT toDateTime64('2011-11-11 11:11:11.-1111111111111111', 0); -- { serverError CANNOT_PARSE_TEXT } +SELECT toDateTime64('2011-11-11 11:11:11.-11111111111111111', 0); -- { serverError CANNOT_PARSE_TEXT } +SELECT toDateTime64('2011-11-11 11:11:11.-111111111111111111', 0); -- { serverError CANNOT_PARSE_TEXT } +SELECT toDateTime64('2011-11-11 11:11:11.-1111111111111111111', 0); -- { serverError CANNOT_PARSE_TEXT } +SELECT toDateTime64('2011-11-11 11:11:11.-11111111111111111111', 0); -- { serverError CANNOT_PARSE_TEXT } +SELECT toDateTime64('2011-11-11 11:11:11.-111111111111111111111', 0); -- { serverError CANNOT_PARSE_TEXT } + +SELECT toDateTime64('2011-11-11 11:11:11.+1', 0); -- { serverError CANNOT_PARSE_TEXT } +SELECT toDateTime64('2011-11-11 11:11:11.++11', 10); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT toDateTime64('2011-11-11 11:11:11.+111', 3); -- { serverError CANNOT_PARSE_TEXT } +SELECT toDateTime64('2011-11-11 11:11:11.+++1111', 5); -- { serverError CANNOT_PARSE_TEXT } +SELECT toDateTime64('2011-11-11 11:11:11.+11111', 7); -- { serverError CANNOT_PARSE_TEXT } +SELECT toDateTime64('2011-11-11 11:11:11.+++++111111', 2); -- { serverError CANNOT_PARSE_TEXT } +SELECT toDateTime64('2011-11-11 11:11:11.+1111111', 1); -- { serverError CANNOT_PARSE_TEXT } +SELECT toDateTime64('2011-11-11 11:11:11.++++++11111111', 8); -- { serverError CANNOT_PARSE_TEXT } +SELECT toDateTime64('2011-11-11 11:11:11.+111111111', 9); -- { serverError CANNOT_PARSE_TEXT } +SELECT toDateTime64('2011-11-11 11:11:11.+++++++1111111111', 6); -- { serverError CANNOT_PARSE_TEXT } +SELECT toDateTime64('2011-11-11 11:11:11.+11111111111', 4); -- { serverError CANNOT_PARSE_TEXT } +SELECT toDateTime64('2011-11-11 11:11:11.++++++++111111111111', 11); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT toDateTime64('2011-11-11 11:11:11.+1111111111111', 15); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT toDateTime64('2011-11-11 11:11:11.+++++++++11111111111111', 13); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT toDateTime64('2011-11-11 11:11:11.+111111111111111', 12); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT toDateTime64('2011-11-11 11:11:11.++++++++++1111111111111111', 16); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT toDateTime64('2011-11-11 11:11:11.+11111111111111111', 14); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT toDateTime64('2011-11-11 11:11:11.+++++++++++111111111111111111', 15); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT toDateTime64('2011-11-11 11:11:11.+1111111111111111111', 17); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT toDateTime64('2011-11-11 11:11:11.++++++++++++11111111111111111111', 19); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT toDateTime64('2011-11-11 11:11:11.+111111111111111111111', 18); -- { serverError ARGUMENT_OUT_OF_BOUND } diff --git a/parser/testdata/01341_datetime64_wrong_supertype/ast.json b/parser/testdata/01341_datetime64_wrong_supertype/ast.json new file mode 100644 index 000000000..8d45d3432 --- /dev/null +++ b/parser/testdata/01341_datetime64_wrong_supertype/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDateTime64 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '2000-01-01 01:01:01.123'" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Function toDateTime64 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '2000-01-01 01:01:01.123456'" + }, + { + "explain": " Literal UInt64_6" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.00094568, + "rows_read": 14, + "bytes_read": 573 + } +} diff --git a/parser/testdata/01341_datetime64_wrong_supertype/metadata.json b/parser/testdata/01341_datetime64_wrong_supertype/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01341_datetime64_wrong_supertype/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01341_datetime64_wrong_supertype/query.sql b/parser/testdata/01341_datetime64_wrong_supertype/query.sql new file mode 100644 index 000000000..39acaf93e --- /dev/null +++ b/parser/testdata/01341_datetime64_wrong_supertype/query.sql @@ -0,0 +1 @@ +SELECT [toDateTime64('2000-01-01 01:01:01.123', 3), toDateTime64('2000-01-01 01:01:01.123456', 6)]; diff --git a/parser/testdata/01343_min_bytes_to_use_mmap_io/ast.json b/parser/testdata/01343_min_bytes_to_use_mmap_io/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01343_min_bytes_to_use_mmap_io/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01343_min_bytes_to_use_mmap_io/metadata.json b/parser/testdata/01343_min_bytes_to_use_mmap_io/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01343_min_bytes_to_use_mmap_io/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01343_min_bytes_to_use_mmap_io/query.sql b/parser/testdata/01343_min_bytes_to_use_mmap_io/query.sql new file mode 100644 index 000000000..e188d64c0 --- /dev/null +++ b/parser/testdata/01343_min_bytes_to_use_mmap_io/query.sql @@ -0,0 +1,12 @@ +-- Tags: no-object-storage +DROP TABLE IF EXISTS test_01343; +CREATE TABLE test_01343 (x String) ENGINE = MergeTree ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0, prewarm_mark_cache = 0, serialization_info_version = 'basic'; +INSERT INTO test_01343 VALUES ('Hello, world'); + +SET local_filesystem_read_method = 'mmap', min_bytes_to_use_mmap_io = 1; +SELECT * FROM test_01343; + +SYSTEM FLUSH LOGS query_log; +SELECT ProfileEvents['CreatedReadBufferMMap'] AS value FROM system.query_log WHERE current_database = currentDatabase() AND event_date >= yesterday() AND event_time >= now() - 300 AND query LIKE 'SELECT * FROM test_01343%' AND type = 2 ORDER BY event_time DESC LIMIT 1; + +DROP TABLE test_01343; diff --git a/parser/testdata/01344_alter_enum_partition_key/ast.json b/parser/testdata/01344_alter_enum_partition_key/ast.json new file mode 100644 index 000000000..3c4d59757 --- /dev/null +++ b/parser/testdata/01344_alter_enum_partition_key/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001179597, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/01344_alter_enum_partition_key/metadata.json b/parser/testdata/01344_alter_enum_partition_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01344_alter_enum_partition_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01344_alter_enum_partition_key/query.sql b/parser/testdata/01344_alter_enum_partition_key/query.sql new file mode 100644 index 000000000..05dce6bec --- /dev/null +++ b/parser/testdata/01344_alter_enum_partition_key/query.sql @@ -0,0 +1,36 @@ +DROP TABLE IF EXISTS test; +CREATE TABLE test (x Enum('hello' = 1, 'world' = 2), y String) ENGINE = MergeTree PARTITION BY x ORDER BY y; +INSERT INTO test VALUES ('hello', 'test'); + +SELECT * FROM test; +SELECT name, partition, partition_id FROM system.parts WHERE database = currentDatabase() AND table = 'test' AND active ORDER BY partition; + +ALTER TABLE test MODIFY COLUMN x Enum('hello' = 1, 'world' = 2, 'goodbye' = 3); +INSERT INTO test VALUES ('goodbye', 'test'); +OPTIMIZE TABLE test FINAL; +SELECT * FROM test ORDER BY x; +SELECT name, partition, partition_id FROM system.parts WHERE database = currentDatabase() AND table = 'test' AND active ORDER BY partition; + +ALTER TABLE test MODIFY COLUMN x Enum('hello' = 1, 'world' = 2); -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } +ALTER TABLE test MODIFY COLUMN x Enum('hello' = 1, 'world' = 2, 'test' = 3); +ALTER TABLE test MODIFY COLUMN x Enum('hello' = 1, 'world' = 2, 'goodbye' = 4); -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } + +ALTER TABLE test MODIFY COLUMN x Int8; +INSERT INTO test VALUES (111, 'abc'); +OPTIMIZE TABLE test FINAL; +SELECT * FROM test ORDER BY x; +SELECT name, partition, partition_id FROM system.parts WHERE database = currentDatabase() AND table = 'test' AND active ORDER BY partition; + +ALTER TABLE test MODIFY COLUMN x Enum8('' = 1); -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } +ALTER TABLE test MODIFY COLUMN x Enum16('' = 1); -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } + +ALTER TABLE test MODIFY COLUMN x UInt64; -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } +ALTER TABLE test MODIFY COLUMN x String; -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } +ALTER TABLE test MODIFY COLUMN x Nullable(Int64); -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } + +ALTER TABLE test RENAME COLUMN x TO z; -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } +ALTER TABLE test RENAME COLUMN y TO z; -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } +ALTER TABLE test DROP COLUMN x; -- { serverError UNKNOWN_IDENTIFIER } +ALTER TABLE test DROP COLUMN y; -- { serverError UNKNOWN_IDENTIFIER } + +DROP TABLE test; diff --git a/parser/testdata/01344_min_bytes_to_use_mmap_io_index/ast.json b/parser/testdata/01344_min_bytes_to_use_mmap_io_index/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01344_min_bytes_to_use_mmap_io_index/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01344_min_bytes_to_use_mmap_io_index/metadata.json b/parser/testdata/01344_min_bytes_to_use_mmap_io_index/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01344_min_bytes_to_use_mmap_io_index/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01344_min_bytes_to_use_mmap_io_index/query.sql b/parser/testdata/01344_min_bytes_to_use_mmap_io_index/query.sql new file mode 100644 index 000000000..199493a90 --- /dev/null +++ b/parser/testdata/01344_min_bytes_to_use_mmap_io_index/query.sql @@ -0,0 +1,13 @@ +-- Tags: no-object-storage +DROP TABLE IF EXISTS test_01344; +CREATE TABLE test_01344 (x String, INDEX idx (x) TYPE set(10) GRANULARITY 1) ENGINE = MergeTree ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0, prewarm_mark_cache = 0, serialization_info_version = 'basic'; +INSERT INTO test_01344 VALUES ('Hello, world'); + +SET local_filesystem_read_method = 'mmap', min_bytes_to_use_mmap_io = 1; +SELECT * FROM test_01344 WHERE x = 'Hello, world'; + +SYSTEM FLUSH LOGS query_log; +SELECT ProfileEvents['CreatedReadBufferMMap'] as value FROM system.query_log + WHERE current_database = currentDatabase() AND event_date >= yesterday() AND query LIKE 'SELECT * FROM test_01344 WHERE x = ''Hello, world''%' AND type = 2 ORDER BY event_time DESC LIMIT 1; + +DROP TABLE test_01344; diff --git a/parser/testdata/01345_array_join_LittleMaverick/ast.json b/parser/testdata/01345_array_join_LittleMaverick/ast.json new file mode 100644 index 000000000..3185aa581 --- /dev/null +++ b/parser/testdata/01345_array_join_LittleMaverick/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001351821, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/01345_array_join_LittleMaverick/metadata.json b/parser/testdata/01345_array_join_LittleMaverick/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01345_array_join_LittleMaverick/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01345_array_join_LittleMaverick/query.sql b/parser/testdata/01345_array_join_LittleMaverick/query.sql new file mode 100644 index 000000000..ba6842886 --- /dev/null +++ b/parser/testdata/01345_array_join_LittleMaverick/query.sql @@ -0,0 +1,28 @@ +DROP TABLE IF EXISTS test; + +CREATE TABLE test +( + `id` Nullable(String), + `status` Nullable(Enum8('NEW' = 0, 'CANCEL' = 1)), + `nested.nestedType` Array(Nullable(String)), + `partition` Date +) ENGINE = MergeTree() PARTITION BY partition +ORDER BY + partition SETTINGS index_granularity = 8192; + +INSERT INTO test VALUES ('1', 'NEW', array('a', 'b'), now()); + +SELECT + status, + count() AS all +FROM test ARRAY JOIN nested as nestedJoined +WHERE (status IN ( + SELECT status + FROM test ARRAY JOIN nested as nestedJoined + GROUP BY status + ORDER BY count() DESC + LIMIT 10)) AND (id IN ('1', '2')) +GROUP BY CUBE(status) +LIMIT 100; + +DROP TABLE test; diff --git a/parser/testdata/01345_index_date_vs_datetime/ast.json b/parser/testdata/01345_index_date_vs_datetime/ast.json new file mode 100644 index 000000000..fd4cdae03 --- /dev/null +++ b/parser/testdata/01345_index_date_vs_datetime/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery index (children 1)" + }, + { + "explain": " Identifier index" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001178335, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/01345_index_date_vs_datetime/metadata.json b/parser/testdata/01345_index_date_vs_datetime/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01345_index_date_vs_datetime/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01345_index_date_vs_datetime/query.sql b/parser/testdata/01345_index_date_vs_datetime/query.sql new file mode 100644 index 000000000..fefe14ded --- /dev/null +++ b/parser/testdata/01345_index_date_vs_datetime/query.sql @@ -0,0 +1,6 @@ +DROP TABLE IF EXISTS index; +CREATE TABLE index (d Date) ENGINE = MergeTree ORDER BY d; +INSERT INTO index VALUES ('2020-04-07'); +SELECT * FROM index WHERE d > toDateTime('2020-04-06 23:59:59'); +SELECT * FROM index WHERE identity(d > toDateTime('2020-04-06 23:59:59')); +DROP TABLE index; diff --git a/parser/testdata/01346_alter_enum_partition_key_replicated_zookeeper_long/ast.json b/parser/testdata/01346_alter_enum_partition_key_replicated_zookeeper_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01346_alter_enum_partition_key_replicated_zookeeper_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01346_alter_enum_partition_key_replicated_zookeeper_long/metadata.json b/parser/testdata/01346_alter_enum_partition_key_replicated_zookeeper_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01346_alter_enum_partition_key_replicated_zookeeper_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01346_alter_enum_partition_key_replicated_zookeeper_long/query.sql b/parser/testdata/01346_alter_enum_partition_key_replicated_zookeeper_long/query.sql new file mode 100644 index 000000000..8a0b7af63 --- /dev/null +++ b/parser/testdata/01346_alter_enum_partition_key_replicated_zookeeper_long/query.sql @@ -0,0 +1,59 @@ +-- Tags: long, replica + +SET insert_keeper_fault_injection_probability=0; -- disable fault injection; part ids are non-deterministic in case of insert retries +SET replication_alter_partitions_sync=2; + +DROP TABLE IF EXISTS test SYNC; +DROP TABLE IF EXISTS test2 SYNC; + +CREATE TABLE test (x Enum('hello' = 1, 'world' = 2), y String) ENGINE = ReplicatedMergeTree('/clickhouse/{database}/test_01346/table', 'r1') PARTITION BY x ORDER BY y; +CREATE TABLE test2 (x Enum('hello' = 1, 'world' = 2), y String) ENGINE = ReplicatedMergeTree('/clickhouse/{database}/test_01346/table', 'r2') PARTITION BY x ORDER BY y; +INSERT INTO test VALUES ('hello', 'test'); + +SELECT * FROM test; +SYSTEM SYNC REPLICA test2; +SELECT * FROM test2; +SELECT min_block_number, max_block_number, partition, partition_id FROM system.parts WHERE database = currentDatabase() AND table = 'test' AND active ORDER BY partition; +SELECT min_block_number, max_block_number, partition, partition_id FROM system.parts WHERE database = currentDatabase() AND table = 'test2' AND active ORDER BY partition; + +ALTER TABLE test MODIFY COLUMN x Enum('hello' = 1, 'world' = 2, 'goodbye' = 3); +INSERT INTO test VALUES ('goodbye', 'test'); +OPTIMIZE TABLE test FINAL; +SYSTEM SYNC REPLICA test2; + +SELECT * FROM test ORDER BY x; +SYSTEM SYNC REPLICA test2; +SELECT * FROM test2 ORDER BY x; +SELECT min_block_number, max_block_number, partition, partition_id FROM system.parts WHERE database = currentDatabase() AND table = 'test' AND active ORDER BY partition; +SELECT min_block_number, max_block_number, partition, partition_id FROM system.parts WHERE database = currentDatabase() AND table = 'test2' AND active ORDER BY partition; + +ALTER TABLE test MODIFY COLUMN x Enum('hello' = 1, 'world' = 2); -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } +ALTER TABLE test MODIFY COLUMN x Enum('hello' = 1, 'world' = 2, 'test' = 3); + +ALTER TABLE test MODIFY COLUMN x Enum('hello' = 1, 'world' = 2, 'goodbye' = 4); -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } + +ALTER TABLE test MODIFY COLUMN x Int8; +INSERT INTO test VALUES (111, 'abc'); +OPTIMIZE TABLE test FINAL; +SYSTEM SYNC REPLICA test2; + +SELECT * FROM test ORDER BY x; +SYSTEM SYNC REPLICA test2; +SELECT * FROM test2 ORDER BY x; +SELECT min_block_number, max_block_number, partition, partition_id FROM system.parts WHERE database = currentDatabase() AND table = 'test' AND active ORDER BY partition; +SELECT min_block_number, max_block_number, partition, partition_id FROM system.parts WHERE database = currentDatabase() AND table = 'test2' AND active ORDER BY partition; + +ALTER TABLE test MODIFY COLUMN x Enum8('' = 1); -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } +ALTER TABLE test MODIFY COLUMN x Enum16('' = 1); -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } + +ALTER TABLE test MODIFY COLUMN x UInt64; -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } +ALTER TABLE test MODIFY COLUMN x String; -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } +ALTER TABLE test MODIFY COLUMN x Nullable(Int64); -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } + +ALTER TABLE test RENAME COLUMN x TO z; -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } +ALTER TABLE test RENAME COLUMN y TO z; -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } +ALTER TABLE test DROP COLUMN x; -- { serverError UNKNOWN_IDENTIFIER } +ALTER TABLE test DROP COLUMN y; -- { serverError UNKNOWN_IDENTIFIER } + +DROP TABLE test SYNC; +DROP TABLE test2 SYNC; diff --git a/parser/testdata/01346_array_join_mrxotey/ast.json b/parser/testdata/01346_array_join_mrxotey/ast.json new file mode 100644 index 000000000..2cc00baad --- /dev/null +++ b/parser/testdata/01346_array_join_mrxotey/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001308031, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/01346_array_join_mrxotey/metadata.json b/parser/testdata/01346_array_join_mrxotey/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01346_array_join_mrxotey/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01346_array_join_mrxotey/query.sql b/parser/testdata/01346_array_join_mrxotey/query.sql new file mode 100644 index 000000000..b57b7fadc --- /dev/null +++ b/parser/testdata/01346_array_join_mrxotey/query.sql @@ -0,0 +1,28 @@ +DROP TABLE IF EXISTS test; + +CREATE TABLE test ( + a Date, + b UInt32, + c UInt64, + p Nested ( + at1 String, + at2 String + ) +) ENGINE = MergeTree() +PARTITION BY a +ORDER BY b +SETTINGS index_granularity = 8192; + +INSERT INTO test (a, b, c, p.at1, p.at2) +VALUES (now(), 1, 2, ['foo', 'bar'], ['baz', 'qux']); + +SELECT b +FROM test +ARRAY JOIN p +WHERE + b = 1 + AND c IN ( + SELECT c FROM test + ); + +DROP TABLE test; diff --git a/parser/testdata/01347_partition_date_vs_datetime/ast.json b/parser/testdata/01347_partition_date_vs_datetime/ast.json new file mode 100644 index 000000000..3c67e9a9c --- /dev/null +++ b/parser/testdata/01347_partition_date_vs_datetime/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_datetime (children 1)" + }, + { + "explain": " Identifier test_datetime" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001322968, + "rows_read": 2, + "bytes_read": 78 + } +} diff --git a/parser/testdata/01347_partition_date_vs_datetime/metadata.json b/parser/testdata/01347_partition_date_vs_datetime/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01347_partition_date_vs_datetime/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01347_partition_date_vs_datetime/query.sql b/parser/testdata/01347_partition_date_vs_datetime/query.sql new file mode 100644 index 000000000..8cf6144f5 --- /dev/null +++ b/parser/testdata/01347_partition_date_vs_datetime/query.sql @@ -0,0 +1,5 @@ +DROP TABLE IF EXISTS test_datetime; +CREATE TABLE test_datetime (time DateTime) ENGINE=MergeTree PARTITION BY time ORDER BY time; +INSERT INTO test_datetime (time) VALUES (toDate(18012)); +SELECT * FROM test_datetime WHERE time=toDate(18012); +DROP TABLE test_datetime; diff --git a/parser/testdata/01349_mutation_datetime_key/ast.json b/parser/testdata/01349_mutation_datetime_key/ast.json new file mode 100644 index 000000000..0f2deb30e --- /dev/null +++ b/parser/testdata/01349_mutation_datetime_key/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery cdp_orders (children 1)" + }, + { + "explain": " Identifier cdp_orders" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00138837, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/01349_mutation_datetime_key/metadata.json b/parser/testdata/01349_mutation_datetime_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01349_mutation_datetime_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01349_mutation_datetime_key/query.sql b/parser/testdata/01349_mutation_datetime_key/query.sql new file mode 100644 index 000000000..8fea6560a --- /dev/null +++ b/parser/testdata/01349_mutation_datetime_key/query.sql @@ -0,0 +1,21 @@ +DROP TABLE IF EXISTS cdp_orders; + +CREATE TABLE cdp_orders +( + `order_id` String, + `order_status` String, + `order_time` DateTime +) +ENGINE = ReplacingMergeTree() +PARTITION BY toYYYYMMDD(order_time) +ORDER BY (order_time, order_id) +SETTINGS index_granularity = 8192; + +INSERT INTO cdp_orders VALUES ('hello', 'world', '2020-01-02 03:04:05'); + +SELECT * FROM cdp_orders; +SET mutations_sync = 1; +ALTER TABLE cdp_orders DELETE WHERE order_time >= '2019-12-03 00:00:00'; +SELECT * FROM cdp_orders; + +DROP TABLE cdp_orders; diff --git a/parser/testdata/01350_intdiv_nontrivial_fpe/ast.json b/parser/testdata/01350_intdiv_nontrivial_fpe/ast.json new file mode 100644 index 000000000..ac4623e4a --- /dev/null +++ b/parser/testdata/01350_intdiv_nontrivial_fpe/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function intDiv (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Int64_-9223372036854775808" + }, + { + "explain": " Literal UInt64_255" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001331905, + "rows_read": 8, + "bytes_read": 309 + } +} diff --git a/parser/testdata/01350_intdiv_nontrivial_fpe/metadata.json b/parser/testdata/01350_intdiv_nontrivial_fpe/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01350_intdiv_nontrivial_fpe/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01350_intdiv_nontrivial_fpe/query.sql b/parser/testdata/01350_intdiv_nontrivial_fpe/query.sql new file mode 100644 index 000000000..9c5523e6f --- /dev/null +++ b/parser/testdata/01350_intdiv_nontrivial_fpe/query.sql @@ -0,0 +1,5 @@ +select intDiv(-9223372036854775808, 255); +select intDiv(-9223372036854775808, 65535); +select intDiv(-9223372036854775808, 4294967295); +select intDiv(-9223372036854775808, 18446744073709551615); -- { serverError ILLEGAL_DIVISION } +select intDiv(-9223372036854775808, -1); -- { serverError ILLEGAL_DIVISION } diff --git a/parser/testdata/01351_geohash_assert/ast.json b/parser/testdata/01351_geohash_assert/ast.json new file mode 100644 index 000000000..c5035741d --- /dev/null +++ b/parser/testdata/01351_geohash_assert/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arraySort (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function geohashesInBox (children 1)" + }, + { + "explain": " ExpressionList (children 5)" + }, + { + "explain": " Literal Float64_-1" + }, + { + "explain": " Literal Float64_-1" + }, + { + "explain": " Literal Float64_1" + }, + { + "explain": " Literal Float64_inf" + }, + { + "explain": " Literal UInt64_3" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001231914, + "rows_read": 13, + "bytes_read": 492 + } +} diff --git a/parser/testdata/01351_geohash_assert/metadata.json b/parser/testdata/01351_geohash_assert/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01351_geohash_assert/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01351_geohash_assert/query.sql b/parser/testdata/01351_geohash_assert/query.sql new file mode 100644 index 000000000..d364407fa --- /dev/null +++ b/parser/testdata/01351_geohash_assert/query.sql @@ -0,0 +1 @@ +SELECT arraySort(geohashesInBox(-1., -1., 1., inf, 3)); diff --git a/parser/testdata/01351_parse_date_time_best_effort_us/ast.json b/parser/testdata/01351_parse_date_time_best_effort_us/ast.json new file mode 100644 index 000000000..f73b48cc9 --- /dev/null +++ b/parser/testdata/01351_parse_date_time_best_effort_us/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001110155, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01351_parse_date_time_best_effort_us/metadata.json b/parser/testdata/01351_parse_date_time_best_effort_us/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01351_parse_date_time_best_effort_us/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01351_parse_date_time_best_effort_us/query.sql b/parser/testdata/01351_parse_date_time_best_effort_us/query.sql new file mode 100644 index 000000000..f83d5d963 --- /dev/null +++ b/parser/testdata/01351_parse_date_time_best_effort_us/query.sql @@ -0,0 +1,56 @@ +SET output_format_pretty_display_footer_column_names=0; +SELECT 'parseDateTimeBestEffortUS'; + +SELECT + s, + parseDateTimeBestEffortUS(s, 'UTC') AS a +FROM +( + SELECT arrayJoin([ +'1970/01/02 010203Z', +'01-02-2001 UTC', +'10.23.1990', +'01-02-2017 03:04:05+1', +'01/02/2017 03:04:05+300', +'01.02.2017 03:04:05GMT', +'01-02-2017 03:04:05 MSD', +'01-02-2017 11:04:05 AM', +'01-02-2017 11:04:05 PM', +'01-02-2017 12:04:05 AM', +'01-02-2017 12:04:05 PM', +'01.02.17 03:04:05 MSD Feb', +'01/02/2017 03:04:05 MSK', +'12/13/2019', +'13/12/2019', +'03/04/2019' +]) AS s) +FORMAT PrettySpaceNoEscapes; + +SELECT 'parseDateTimeBestEffortUSOrZero', 'parseDateTimeBestEffortUSOrNull'; +SELECT + s, + parseDateTimeBestEffortUSOrZero(s, 'UTC') AS a, + parseDateTimeBestEffortUSOrNull(s, 'UTC') AS b +FROM +( + SELECT arrayJoin([ +'1970/01/02 010203Z', +'01-02-2001 UTC', +'10.23.1990', +'01-02-2017 03:04:05+1', +'01/02/2017 03:04:05+300', +'01.02.2017 03:04:05GMT', +'01-02-2017 03:04:05 MSD', +'01-02-2017 11:04:05 AM', +'01-02-2017 11:04:05 PM', +'01-02-2017 12:04:05 AM', +'01-02-2017 12:04:05 PM', +'01.02.17 03:04:05 MSD Feb', +'01/02/2017 03:04:05 MSK', +'12/13/2019', +'13/12/2019', +'03/04/2019', +'', +'xyz' +]) AS s) +FORMAT PrettySpaceNoEscapes; diff --git a/parser/testdata/01352_add_datetime_bad_get/ast.json b/parser/testdata/01352_add_datetime_bad_get/ast.json new file mode 100644 index 000000000..920ca92d4 --- /dev/null +++ b/parser/testdata/01352_add_datetime_bad_get/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function addMonths (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDateTime (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '2017-11-05 08:07:47'" + }, + { + "explain": " Literal Float64_1" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001310428, + "rows_read": 12, + "bytes_read": 491 + } +} diff --git a/parser/testdata/01352_add_datetime_bad_get/metadata.json b/parser/testdata/01352_add_datetime_bad_get/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01352_add_datetime_bad_get/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01352_add_datetime_bad_get/query.sql b/parser/testdata/01352_add_datetime_bad_get/query.sql new file mode 100644 index 000000000..31fb2b96b --- /dev/null +++ b/parser/testdata/01352_add_datetime_bad_get/query.sql @@ -0,0 +1 @@ +SELECT addMonths(materialize(toDateTime('2017-11-05 08:07:47')), 1.); diff --git a/parser/testdata/01352_generate_random_overflow/ast.json b/parser/testdata/01352_generate_random_overflow/ast.json new file mode 100644 index 000000000..9753e7561 --- /dev/null +++ b/parser/testdata/01352_generate_random_overflow/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier i" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function generateRandom (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Literal 'i Array(Nullable(Enum8(\\'hello\\' = 1, \\'world\\' = 5)))'" + }, + { + "explain": " Literal UInt64_1025" + }, + { + "explain": " Literal UInt64_65535" + }, + { + "explain": " Literal UInt64_9223372036854775807" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001174159, + "rows_read": 15, + "bytes_read": 629 + } +} diff --git a/parser/testdata/01352_generate_random_overflow/metadata.json b/parser/testdata/01352_generate_random_overflow/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01352_generate_random_overflow/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01352_generate_random_overflow/query.sql b/parser/testdata/01352_generate_random_overflow/query.sql new file mode 100644 index 000000000..69180b6b1 --- /dev/null +++ b/parser/testdata/01352_generate_random_overflow/query.sql @@ -0,0 +1 @@ +SELECT i FROM generateRandom('i Array(Nullable(Enum8(\'hello\' = 1, \'world\' = 5)))', 1025, 65535, 9223372036854775807) LIMIT 10; -- { serverError TOO_LARGE_ARRAY_SIZE } diff --git a/parser/testdata/01353_low_cardinality_join_types/ast.json b/parser/testdata/01353_low_cardinality_join_types/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01353_low_cardinality_join_types/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01353_low_cardinality_join_types/metadata.json b/parser/testdata/01353_low_cardinality_join_types/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01353_low_cardinality_join_types/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01353_low_cardinality_join_types/query.sql b/parser/testdata/01353_low_cardinality_join_types/query.sql new file mode 100644 index 000000000..2aa42f33f --- /dev/null +++ b/parser/testdata/01353_low_cardinality_join_types/query.sql @@ -0,0 +1,115 @@ +SET enable_analyzer = 0; +set join_algorithm = 'hash'; + +select '-'; +select toTypeName(materialize(js1.k)), toTypeName(materialize(js2.k)), toTypeName(materialize(js1.s)), toTypeName(materialize(js2.s)) +from (select toLowCardinality(number) k, toString(number) s from numbers(2)) as js1 +join (select number+1 k, toLowCardinality(toString(number+1)) s from numbers(2)) as js2 +using k order by js1.k, js2.k; + +select '-'; +select toTypeName(materialize(js1.k)), toTypeName(materialize(js2.k)), toTypeName(materialize(js1.s)), toTypeName(materialize(js2.s)) +from (select number k, toLowCardinality(toString(number)) s from numbers(2)) as js1 +join (select toLowCardinality(number+1) k, toString(number+1) s from numbers(2)) as js2 +using k order by js1.k, js2.k; + +select '-'; +select toTypeName(materialize(js1.k)), toTypeName(materialize(js2.k)), toTypeName(materialize(js1.s)), toTypeName(materialize(js2.s)) +from (select toLowCardinality(number) k, toLowCardinality(toString(number)) s from numbers(2)) as js1 +join (select toLowCardinality(number+1) k, toLowCardinality(toString(number+1)) s from numbers(2)) as js2 +using k order by js1.k, js2.k; + +select '-'; +select toTypeName(materialize(js1.k)), toTypeName(materialize(js2.k)), toTypeName(materialize(js1.s)), toTypeName(materialize(js2.s)) +from (select toLowCardinality(number) k, toString(number) s from numbers(2)) as js1 +full join (select number+1 k, toLowCardinality(toString(number+1)) s from numbers(2)) as js2 +using k order by js1.k, js2.k; + +select '-'; +select toTypeName(materialize(js1.k)), toTypeName(materialize(js2.k)), toTypeName(materialize(js1.s)), toTypeName(materialize(js2.s)) +from (select number k, toLowCardinality(toString(number)) s from numbers(2)) as js1 +full join (select toLowCardinality(number+1) k, toString(number+1) s from numbers(2)) as js2 +using k order by js1.k, js2.k; + +select '-'; +select toTypeName(materialize(js1.k)), toTypeName(materialize(js2.k)), toTypeName(materialize(js1.s)), toTypeName(materialize(js2.s)) +from (select toLowCardinality(number) k, toLowCardinality(toString(number)) s from numbers(2)) as js1 +full join (select toLowCardinality(number+1) k, toLowCardinality(toString(number+1)) s from numbers(2)) as js2 +using k order by js1.k, js2.k; + +set join_algorithm = 'partial_merge'; + +select '-'; +select toTypeName(materialize(js1.k)), toTypeName(materialize(js2.k)), toTypeName(materialize(js1.s)), toTypeName(materialize(js2.s)) +from (select toLowCardinality(number) k, toString(number) s from numbers(2)) as js1 +join (select number+1 k, toLowCardinality(toString(number+1)) s from numbers(2)) as js2 +using k order by js1.k, js2.k; + +select '-'; +select toTypeName(materialize(js1.k)), toTypeName(materialize(js2.k)), toTypeName(materialize(js1.s)), toTypeName(materialize(js2.s)) +from (select number k, toLowCardinality(toString(number)) s from numbers(2)) as js1 +join (select toLowCardinality(number+1) k, toString(number+1) s from numbers(2)) as js2 +using k order by js1.k, js2.k; + +select '-'; +select toTypeName(materialize(js1.k)), toTypeName(materialize(js2.k)), toTypeName(materialize(js1.s)), toTypeName(materialize(js2.s)) +from (select toLowCardinality(number) k, toLowCardinality(toString(number)) s from numbers(2)) as js1 +join (select toLowCardinality(number+1) k, toLowCardinality(toString(number+1)) s from numbers(2)) as js2 +using k order by js1.k, js2.k; + +select '-'; +select toTypeName(materialize(js1.k)), toTypeName(materialize(js2.k)), toTypeName(materialize(js1.s)), toTypeName(materialize(js2.s)) +from (select toLowCardinality(number) k, toString(number) s from numbers(2)) as js1 +full join (select number+1 k, toLowCardinality(toString(number+1)) s from numbers(2)) as js2 +using k order by js1.k, js2.k; + +select '-'; +select toTypeName(materialize(js1.k)), toTypeName(materialize(js2.k)), toTypeName(materialize(js1.s)), toTypeName(materialize(js2.s)) +from (select number k, toLowCardinality(toString(number)) s from numbers(2)) as js1 +full join (select toLowCardinality(number+1) k, toString(number+1) s from numbers(2)) as js2 +using k order by js1.k, js2.k; + +select '-'; +select toTypeName(materialize(js1.k)), toTypeName(materialize(js2.k)), toTypeName(materialize(js1.s)), toTypeName(materialize(js2.s)) +from (select toLowCardinality(number) k, toLowCardinality(toString(number)) s from numbers(2)) as js1 +full join (select toLowCardinality(number+1) k, toLowCardinality(toString(number+1)) s from numbers(2)) as js2 +using k order by js1.k, js2.k; + +SET enable_analyzer = 1; +set join_algorithm = 'hash'; + +select '-'; +select toTypeName(materialize(js1.k)), toTypeName(materialize(js2.k)), toTypeName(materialize(js1.s)), toTypeName(materialize(js2.s)) +from (select toLowCardinality(number) k, toString(number) s from numbers(2)) as js1 +join (select number+1 k, toLowCardinality(toString(number+1)) s from numbers(2)) as js2 +using k order by js1.k, js2.k; + +select '-'; +select toTypeName(materialize(js1.k)), toTypeName(materialize(js2.k)), toTypeName(materialize(js1.s)), toTypeName(materialize(js2.s)) +from (select number k, toLowCardinality(toString(number)) s from numbers(2)) as js1 +join (select toLowCardinality(number+1) k, toString(number+1) s from numbers(2)) as js2 +using k order by js1.k, js2.k; + +select '-'; +select toTypeName(materialize(js1.k)), toTypeName(materialize(js2.k)), toTypeName(materialize(js1.s)), toTypeName(materialize(js2.s)) +from (select toLowCardinality(number) k, toLowCardinality(toString(number)) s from numbers(2)) as js1 +join (select toLowCardinality(number+1) k, toLowCardinality(toString(number+1)) s from numbers(2)) as js2 +using k order by js1.k, js2.k; + +select '-'; +select toTypeName(materialize(js1.k)), toTypeName(materialize(js2.k)), toTypeName(materialize(js1.s)), toTypeName(materialize(js2.s)) +from (select toLowCardinality(number) k, toString(number) s from numbers(2)) as js1 +full join (select number+1 k, toLowCardinality(toString(number+1)) s from numbers(2)) as js2 +using k order by js1.k, js2.k; + +select '-'; +select toTypeName(materialize(js1.k)), toTypeName(materialize(js2.k)), toTypeName(materialize(js1.s)), toTypeName(materialize(js2.s)) +from (select number k, toLowCardinality(toString(number)) s from numbers(2)) as js1 +full join (select toLowCardinality(number+1) k, toString(number+1) s from numbers(2)) as js2 +using k order by js1.k, js2.k; + +select '-'; +select toTypeName(materialize(js1.k)), toTypeName(materialize(js2.k)), toTypeName(materialize(js1.s)), toTypeName(materialize(js2.s)) +from (select toLowCardinality(number) k, toLowCardinality(toString(number)) s from numbers(2)) as js1 +full join (select toLowCardinality(number+1) k, toLowCardinality(toString(number+1)) s from numbers(2)) as js2 +using k order by js1.k, js2.k; diff --git a/parser/testdata/01353_neighbor_overflow/ast.json b/parser/testdata/01353_neighbor_overflow/ast.json new file mode 100644 index 000000000..d58221414 --- /dev/null +++ b/parser/testdata/01353_neighbor_overflow/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001315068, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01353_neighbor_overflow/metadata.json b/parser/testdata/01353_neighbor_overflow/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01353_neighbor_overflow/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01353_neighbor_overflow/query.sql b/parser/testdata/01353_neighbor_overflow/query.sql new file mode 100644 index 000000000..c55f5401d --- /dev/null +++ b/parser/testdata/01353_neighbor_overflow/query.sql @@ -0,0 +1,3 @@ +SET allow_deprecated_error_prone_window_functions = 1; +SELECT neighbor(toString(number), -9223372036854775808) FROM numbers(100); -- { serverError ARGUMENT_OUT_OF_BOUND } +WITH neighbor(toString(number), toInt64(rand64())) AS x SELECT * FROM system.numbers WHERE NOT ignore(x); -- { serverError ARGUMENT_OUT_OF_BOUND } diff --git a/parser/testdata/01353_nullable_tuple/ast.json b/parser/testdata/01353_nullable_tuple/ast.json new file mode 100644 index 000000000..8e5b61afb --- /dev/null +++ b/parser/testdata/01353_nullable_tuple/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'single argument'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001089536, + "rows_read": 5, + "bytes_read": 186 + } +} diff --git a/parser/testdata/01353_nullable_tuple/metadata.json b/parser/testdata/01353_nullable_tuple/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01353_nullable_tuple/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01353_nullable_tuple/query.sql b/parser/testdata/01353_nullable_tuple/query.sql new file mode 100644 index 000000000..f757e2c42 --- /dev/null +++ b/parser/testdata/01353_nullable_tuple/query.sql @@ -0,0 +1,107 @@ +select 'single argument'; +select tuple(number) = tuple(number) from numbers(1); +select tuple(number) = tuple(number + 1) from numbers(1); +select tuple(toNullable(number)) = tuple(number) from numbers(1); +select tuple(toNullable(number)) = tuple(number + 1) from numbers(1); +select tuple(toNullable(number)) = tuple(toNullable(number)) from numbers(1); +select tuple(toNullable(number)) = tuple(toNullable(number + 1)) from numbers(1); +select '- 1'; +select tuple(toNullable(number)) < tuple(number + 1) from numbers(1); +select tuple(number) < tuple(toNullable(number + 1)) from numbers(1); +select tuple(toNullable(number)) < tuple(toNullable(number + 1)) from numbers(1); + +select tuple(toNullable(number)) > tuple(number + 1) from numbers(1); +select tuple(number) > tuple(toNullable(number + 1)) from numbers(1); +select tuple(toNullable(number)) > tuple(toNullable(number + 1)) from numbers(1); + +select tuple(toNullable(number + 1)) < tuple(number) from numbers(1); +select tuple(number + 1) < tuple(toNullable(number)) from numbers(1); +select tuple(toNullable(number + 1)) < tuple(toNullable(number + 1)) from numbers(1); + +select tuple(toNullable(number + 1)) > tuple(number) from numbers(1); +select tuple(number + 1) > tuple(toNullable(number)) from numbers(1); +select tuple(toNullable(number + 1)) > tuple(toNullable(number)) from numbers(1); + +select '- 2'; +select tuple(toNullable(number)) <= tuple(number + 1) from numbers(1); +select tuple(number) <= tuple(toNullable(number + 1)) from numbers(1); +select tuple(toNullable(number)) <= tuple(toNullable(number + 1)) from numbers(1); + +select tuple(toNullable(number)) >= tuple(number + 1) from numbers(1); +select tuple(number) > tuple(toNullable(number + 1)) from numbers(1); +select tuple(toNullable(number)) >= tuple(toNullable(number + 1)) from numbers(1); + +select tuple(toNullable(number + 1)) <= tuple(number) from numbers(1); +select tuple(number + 1) <= tuple(toNullable(number)) from numbers(1); +select tuple(toNullable(number + 1)) <= tuple(toNullable(number + 1)) from numbers(1); + +select tuple(toNullable(number + 1)) >= tuple(number) from numbers(1); +select tuple(number + 1) >= tuple(toNullable(number)) from numbers(1); +select tuple(toNullable(number + 1)) >= tuple(toNullable(number)) from numbers(1); + +select '- 3'; +select tuple(toNullable(number)) <= tuple(number) from numbers(1); +select tuple(number) <= tuple(toNullable(number)) from numbers(1); +select tuple(toNullable(number)) <= tuple(toNullable(number)) from numbers(1); + +select tuple(toNullable(number)) >= tuple(number) from numbers(1); +select tuple(number) >= tuple(toNullable(number)) from numbers(1); +select tuple(toNullable(number)) >= tuple(toNullable(number)) from numbers(1); + +select '- 4'; +select tuple(number) = tuple(materialize(toUInt64OrNull(''))) from numbers(1); +select tuple(materialize(toUInt64OrNull(''))) = tuple(materialize(toUInt64OrNull(''))) from numbers(1); +select tuple(number) <= tuple(materialize(toUInt64OrNull(''))) from numbers(1); +select tuple(materialize(toUInt64OrNull(''))) <= tuple(materialize(toUInt64OrNull(''))) from numbers(1); +select tuple(number) >= tuple(materialize(toUInt64OrNull(''))) from numbers(1); +select tuple(materialize(toUInt64OrNull(''))) >= tuple(materialize(toUInt64OrNull(''))) from numbers(1); + +select 'two arguments'; +select tuple(toNullable(number), number) = tuple(number, number) from numbers(1); +select tuple(toNullable(number), toNullable(number)) = tuple(number, number) from numbers(1); +select tuple(toNullable(number), toNullable(number)) = tuple(toNullable(number), number) from numbers(1); +select tuple(toNullable(number), toNullable(number)) = tuple(toNullable(number), toNullable(number)) from numbers(1); +select tuple(number, toNullable(number)) = tuple(toNullable(number), toNullable(number)) from numbers(1); +select tuple(number, toNullable(number)) = tuple(toNullable(number), number) from numbers(1); + +select '- 1'; +select tuple(toNullable(number), number) < tuple(number, number) from numbers(1); +select tuple(toNullable(number), toNullable(number)) < tuple(number, number) from numbers(1); +select tuple(toNullable(number), toNullable(number)) < tuple(toNullable(number), number) from numbers(1); +select tuple(toNullable(number), toNullable(number)) < tuple(toNullable(number), toNullable(number)) from numbers(1); +select tuple(number, toNullable(number)) < tuple(toNullable(number), toNullable(number)) from numbers(1); +select tuple(number, toNullable(number)) < tuple(toNullable(number), number) from numbers(1); + +select '- 2'; +select tuple(toNullable(number), number) < tuple(number, number + 1) from numbers(1); +select tuple(toNullable(number), toNullable(number)) < tuple(number, number + 1) from numbers(1); +select tuple(toNullable(number), toNullable(number)) < tuple(toNullable(number + 1), number) from numbers(1); +select tuple(toNullable(number), toNullable(number)) < tuple(toNullable(number + 1), toNullable(number)) from numbers(1); +select tuple(number, toNullable(number)) < tuple(toNullable(number), toNullable(number + 1)) from numbers(1); +select tuple(number, toNullable(number)) < tuple(toNullable(number), number + 1) from numbers(1); + +select '- 3'; +select tuple(materialize(toUInt64OrNull('')), number) = tuple(number, number) from numbers(1); +select tuple(materialize(toUInt64OrNull('')), number) = tuple(number, toUInt64OrNull('')) from numbers(1); +select tuple(materialize(toUInt64OrNull('')), toUInt64OrNull('')) = tuple(toUInt64OrNull(''), toUInt64OrNull('')) from numbers(1); +select tuple(number, materialize(toUInt64OrNull(''))) < tuple(number, number) from numbers(1); +select tuple(number, materialize(toUInt64OrNull(''))) <= tuple(number, number) from numbers(1); +select tuple(number, materialize(toUInt64OrNull(''))) < tuple(number + 1, number) from numbers(1); +select tuple(number, materialize(toUInt64OrNull(''))) > tuple(number, number) from numbers(1); +select tuple(number, materialize(toUInt64OrNull(''))) >= tuple(number, number) from numbers(1); +select tuple(number, materialize(toUInt64OrNull(''))) > tuple(number + 1, number) from numbers(1); + +select 'many arguments'; +select tuple(toNullable(number), number, number) = tuple(number, number, number) from numbers(1); +select tuple(toNullable(number), materialize('a'), number) = tuple(number, materialize('a'), number) from numbers(1); +select tuple(toNullable(number), materialize('a'), number) = tuple(number, materialize('a'), number + 1) from numbers(1); +select tuple(toNullable(number), number, number) < tuple(number, number, number) from numbers(1); +select tuple(toNullable(number), number, number) <= tuple(number, number, number) from numbers(1); +select tuple(toNullable(number), materialize('a'), number) < tuple(number, materialize('a'), number) from numbers(1); +select tuple(toNullable(number), materialize('a'), number) < tuple(number, materialize('a'), number + 1) from numbers(1); +select tuple(toNullable(number), number, materialize(toUInt64OrNull(''))) = tuple(number, number, number) from numbers(1); +select tuple(toNullable(number), materialize('a'), materialize(toUInt64OrNull(''))) = tuple(number, materialize('a'), number) from numbers(1); +select tuple(toNullable(number), materialize('a'), materialize(toUInt64OrNull(''))) = tuple(number, materialize('a'), number + 1) from numbers(1); +select tuple(toNullable(number), number, materialize(toUInt64OrNull(''))) <= tuple(number, number, number) from numbers(1); +select tuple(toNullable(number), materialize('a'), materialize(toUInt64OrNull(''))) <= tuple(number, materialize('a'), number) from numbers(1); +select tuple(toNullable(number), materialize('a'), materialize(toUInt64OrNull(''))) <= tuple(number, materialize('a'), number + 1) from numbers(1); diff --git a/parser/testdata/01353_topk_enum/ast.json b/parser/testdata/01353_topk_enum/ast.json new file mode 100644 index 000000000..c2c36ac17 --- /dev/null +++ b/parser/testdata/01353_topk_enum/ast.json @@ -0,0 +1,103 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function round (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function sqrt (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_4" + }, + { + "explain": " Literal 'Enum(\\'\\' = 0, \\'hello\\' = 1, \\'world\\' = 2, \\'test\\' = 3)'" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function topK (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1000" + } + ], + + "rows": 27, + + "statistics": + { + "elapsed": 0.001697208, + "rows_read": 27, + "bytes_read": 1117 + } +} diff --git a/parser/testdata/01353_topk_enum/metadata.json b/parser/testdata/01353_topk_enum/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01353_topk_enum/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01353_topk_enum/query.sql b/parser/testdata/01353_topk_enum/query.sql new file mode 100644 index 000000000..ba048401b --- /dev/null +++ b/parser/testdata/01353_topk_enum/query.sql @@ -0,0 +1 @@ +WITH CAST(round(sqrt(number)) % 4 AS Enum('' = 0, 'hello' = 1, 'world' = 2, 'test' = 3)) AS x SELECT topK(10)(x) FROM numbers(1000); diff --git a/parser/testdata/01354_order_by_tuple_collate_const/ast.json b/parser/testdata/01354_order_by_tuple_collate_const/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01354_order_by_tuple_collate_const/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01354_order_by_tuple_collate_const/metadata.json b/parser/testdata/01354_order_by_tuple_collate_const/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01354_order_by_tuple_collate_const/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01354_order_by_tuple_collate_const/query.sql b/parser/testdata/01354_order_by_tuple_collate_const/query.sql new file mode 100644 index 000000000..132bbb74a --- /dev/null +++ b/parser/testdata/01354_order_by_tuple_collate_const/query.sql @@ -0,0 +1,3 @@ +-- Tags: no-fasttest + +SELECT number FROM numbers(11) ORDER BY arrayJoin(['а', 'я', '\0�', '', 'Я', '']) ASC, toString(number) ASC, 'y' ASC COLLATE 'el'; diff --git a/parser/testdata/01354_tuple_low_cardinality_array_mapped_bug/ast.json b/parser/testdata/01354_tuple_low_cardinality_array_mapped_bug/ast.json new file mode 100644 index 000000000..59ffa4aff --- /dev/null +++ b/parser/testdata/01354_tuple_low_cardinality_array_mapped_bug/ast.json @@ -0,0 +1,103 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayExists (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function lambda (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tupleElement (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal 'pattern'" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'a'" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal 'Array(Tuple(LowCardinality(String), UInt8))'" + } + ], + + "rows": 27, + + "statistics": + { + "elapsed": 0.001264633, + "rows_read": 27, + "bytes_read": 1120 + } +} diff --git a/parser/testdata/01354_tuple_low_cardinality_array_mapped_bug/metadata.json b/parser/testdata/01354_tuple_low_cardinality_array_mapped_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01354_tuple_low_cardinality_array_mapped_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01354_tuple_low_cardinality_array_mapped_bug/query.sql b/parser/testdata/01354_tuple_low_cardinality_array_mapped_bug/query.sql new file mode 100644 index 000000000..80a1a7c46 --- /dev/null +++ b/parser/testdata/01354_tuple_low_cardinality_array_mapped_bug/query.sql @@ -0,0 +1,8 @@ +SELECT arrayExists(x -> ((x.1) = 'pattern'), cast([tuple('a', 1)] as Array(Tuple(LowCardinality(String), UInt8)))); + +DROP TABLE IF EXISTS table; +CREATE TABLE table (id Int32, values Array(Tuple(LowCardinality(String), Int32)), date Date) ENGINE MergeTree() PARTITION BY toYYYYMM(date) ORDER BY (id, date); + +SELECT count(*) FROM table WHERE (arrayExists(x -> ((x.1) = toLowCardinality('pattern')), values) = 1); + +DROP TABLE IF EXISTS table; diff --git a/parser/testdata/01355_alter_column_with_order/ast.json b/parser/testdata/01355_alter_column_with_order/ast.json new file mode 100644 index 000000000..7955cbcc5 --- /dev/null +++ b/parser/testdata/01355_alter_column_with_order/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery alter_01355 (children 1)" + }, + { + "explain": " Identifier alter_01355" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001390693, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/01355_alter_column_with_order/metadata.json b/parser/testdata/01355_alter_column_with_order/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01355_alter_column_with_order/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01355_alter_column_with_order/query.sql b/parser/testdata/01355_alter_column_with_order/query.sql new file mode 100644 index 000000000..405157fd8 --- /dev/null +++ b/parser/testdata/01355_alter_column_with_order/query.sql @@ -0,0 +1,26 @@ +DROP TABLE IF EXISTS alter_01355; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE alter_01355 (CounterID UInt32, StartDate Date, UserID UInt32, VisitID UInt32, NestedColumn Nested(A UInt8, S String), ToDrop UInt32) ENGINE = MergeTree(StartDate, intHash32(UserID), (CounterID, StartDate, intHash32(UserID), VisitID), 8192); + +ALTER TABLE alter_01355 ADD COLUMN Added1 UInt32 FIRST; + +ALTER TABLE alter_01355 ADD COLUMN Added2 UInt32 AFTER NestedColumn; + +ALTER TABLE alter_01355 ADD COLUMN Added3 UInt32 AFTER ToDrop; + +DESC alter_01355; +DETACH TABLE alter_01355; +ATTACH TABLE alter_01355; +DESC alter_01355; + +ALTER TABLE alter_01355 MODIFY COLUMN Added2 UInt32 FIRST; + +ALTER TABLE alter_01355 MODIFY COLUMN Added3 UInt32 AFTER CounterID; + +DESC alter_01355; +DETACH TABLE alter_01355; +ATTACH TABLE alter_01355; +DESC alter_01355; + +DROP TABLE IF EXISTS alter_01355; diff --git a/parser/testdata/01355_defaultValueOfArgumentType_bug/ast.json b/parser/testdata/01355_defaultValueOfArgumentType_bug/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01355_defaultValueOfArgumentType_bug/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01355_defaultValueOfArgumentType_bug/metadata.json b/parser/testdata/01355_defaultValueOfArgumentType_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01355_defaultValueOfArgumentType_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01355_defaultValueOfArgumentType_bug/query.sql b/parser/testdata/01355_defaultValueOfArgumentType_bug/query.sql new file mode 100644 index 000000000..2313cb686 --- /dev/null +++ b/parser/testdata/01355_defaultValueOfArgumentType_bug/query.sql @@ -0,0 +1,4 @@ +SELECT + materialize(toLowCardinality('')) AS lc, + toTypeName(lc) +WHERE lc = defaultValueOfArgumentType(lc) diff --git a/parser/testdata/01355_if_fixed_string/ast.json b/parser/testdata/01355_if_fixed_string/ast.json new file mode 100644 index 000000000..47550b953 --- /dev/null +++ b/parser/testdata/01355_if_fixed_string/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001128376, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01355_if_fixed_string/metadata.json b/parser/testdata/01355_if_fixed_string/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01355_if_fixed_string/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01355_if_fixed_string/query.sql b/parser/testdata/01355_if_fixed_string/query.sql new file mode 100644 index 000000000..0d45b797a --- /dev/null +++ b/parser/testdata/01355_if_fixed_string/query.sql @@ -0,0 +1,7 @@ +SET optimize_multiif_to_if = 0; + +SELECT if(number % 2, toFixedString(toString(number), 2), toFixedString(toString(-number), 5)) AS x, toTypeName(x) FROM system.numbers LIMIT 10; +SELECT if(number % 2, toFixedString(toString(number), 2), toFixedString(toString(-number), 2)) AS x, toTypeName(x) FROM system.numbers LIMIT 10; + +SELECT multiIf(number % 2, toFixedString(toString(number), 2), toFixedString(toString(-number), 5)) AS x, toTypeName(x) FROM system.numbers LIMIT 10; +SELECT multiIf(number % 2, toFixedString(toString(number), 2), toFixedString(toString(-number), 2)) AS x, toTypeName(x) FROM system.numbers LIMIT 10; diff --git a/parser/testdata/01355_ilike/ast.json b/parser/testdata/01355_ilike/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01355_ilike/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01355_ilike/metadata.json b/parser/testdata/01355_ilike/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01355_ilike/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01355_ilike/query.sql b/parser/testdata/01355_ilike/query.sql new file mode 100644 index 000000000..1ceb878a5 --- /dev/null +++ b/parser/testdata/01355_ilike/query.sql @@ -0,0 +1,59 @@ +-- Tags: no-fasttest + +SELECT 'Hello' ILIKE ''; +SELECT 'Hello' ILIKE '%'; +SELECT 'Hello' ILIKE '%%'; +SELECT 'Hello' ILIKE '%%%'; +SELECT 'Hello' ILIKE '%_%'; +SELECT 'Hello' ILIKE '_'; +SELECT 'Hello' ILIKE '_%'; +SELECT 'Hello' ILIKE '%_'; + +SELECT 'Hello' ILIKE 'H%o'; +SELECT 'hello' ILIKE 'H%o'; +SELECT 'hello' ILIKE 'h%o'; +SELECT 'Hello' ILIKE 'h%o'; + +SELECT 'Hello' NOT ILIKE 'H%o'; +SELECT 'hello' NOT ILIKE 'H%o'; +SELECT 'hello' NOT ILIKE 'h%o'; +SELECT 'Hello' NOT ILIKE 'h%o'; + +SELECT 'OHello' ILIKE '%lhell%'; +SELECT 'Ohello' ILIKE '%hell%'; +SELECT 'hEllo' ILIKE '%HEL%'; + +SELECT 'OHello' NOT ILIKE '%lhell%'; +SELECT 'Ohello' NOT ILIKE '%hell%'; +SELECT 'hEllo' NOT ILIKE '%HEL%'; + +SELECT materialize('prepre_f') ILIKE '%pre_f%'; + +SELECT 'abcdef' ILIKE '%aBc%def%'; +SELECT 'ABCDDEF' ILIKE '%abc%def%'; +SELECT 'Abc\nDef' ILIKE '%abc%def%'; +SELECT 'abc\ntdef' ILIKE '%abc%def%'; +SELECT 'abct\ndef' ILIKE '%abc%dEf%'; +SELECT 'abc\n\ndeF' ILIKE '%abc%def%'; +SELECT 'abc\n\ntdef' ILIKE '%abc%deF%'; +SELECT 'Abc\nt\ndef' ILIKE '%abc%def%'; +SELECT 'abct\n\ndef' ILIKE '%abc%def%'; +SELECT 'ab\ndef' ILIKE '%Abc%def%'; +SELECT 'aBc\nef' ILIKE '%ABC%DEF%'; + +SELECT CAST('hello' AS FixedString(5)) ILIKE '%he%o%'; + +SELECT 'ёЁё' ILIKE 'Ё%Ё'; +SELECT 'ощщЁё' ILIKE 'Щ%Ё'; +SELECT 'ощЩЁё' ILIKE '%Щ%Ё'; + +SELECT 'Щущпандер' ILIKE '%щп%е%'; +SELECT 'Щущпандер' ILIKE '%щП%е%'; +SELECT 'ощщЁё' ILIKE '%щ%'; +SELECT 'ощЩЁё' ILIKE '%ё%'; + +SHOW TABLES NOT ILIKE '%'; +CREATE TABLE test1 (x UInt8) ENGINE = Memory; +CREATE TABLE test2 (x UInt8) ENGINE = Memory; +SHOW TABLES ILIKE 'tES%'; +SHOW TABLES NOT ILIKE 'TeS%'; diff --git a/parser/testdata/01356_initialize_aggregation/ast.json b/parser/testdata/01356_initialize_aggregation/ast.json new file mode 100644 index 000000000..9318d006d --- /dev/null +++ b/parser/testdata/01356_initialize_aggregation/ast.json @@ -0,0 +1,103 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function uniqMerge (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier state" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function initializeAggregation (alias state) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'uniqState'" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_10000" + } + ], + + "rows": 27, + + "statistics": + { + "elapsed": 0.001376818, + "rows_read": 27, + "bytes_read": 1194 + } +} diff --git a/parser/testdata/01356_initialize_aggregation/metadata.json b/parser/testdata/01356_initialize_aggregation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01356_initialize_aggregation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01356_initialize_aggregation/query.sql b/parser/testdata/01356_initialize_aggregation/query.sql new file mode 100644 index 000000000..539b27bbd --- /dev/null +++ b/parser/testdata/01356_initialize_aggregation/query.sql @@ -0,0 +1,5 @@ +SELECT uniqMerge(state) FROM (SELECT initializeAggregation('uniqState', number % 3) AS state FROM system.numbers LIMIT 10000); +SELECT topKWeightedMerge(10)(state) FROM (SELECT initializeAggregation('topKWeightedState(10)', number, number) AS state FROM system.numbers LIMIT 1000); +SELECT topKWeightedMerge(10)(state) FROM (SELECT initializeAggregation('topKWeightedState(10)', 1, number) AS state FROM system.numbers LIMIT 1000); +-- Use different weights to ensure the same ordering. Otherwise, with identical weights, the result depends on implementation details (and in debug mode is random) +SELECT topKWeightedMerge(10)(state) FROM (SELECT initializeAggregation('topKWeightedState(10)', number, number) AS state FROM system.numbers LIMIT 1000); diff --git a/parser/testdata/01356_state_resample/ast.json b/parser/testdata/01356_state_resample/ast.json new file mode 100644 index 000000000..a77ef2816 --- /dev/null +++ b/parser/testdata/01356_state_resample/ast.json @@ -0,0 +1,85 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function sumResample (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_20" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal UInt64_20" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_200" + } + ], + + "rows": 21, + + "statistics": + { + "elapsed": 0.001398002, + "rows_read": 21, + "bytes_read": 800 + } +} diff --git a/parser/testdata/01356_state_resample/metadata.json b/parser/testdata/01356_state_resample/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01356_state_resample/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01356_state_resample/query.sql b/parser/testdata/01356_state_resample/query.sql new file mode 100644 index 000000000..a3fb4d59a --- /dev/null +++ b/parser/testdata/01356_state_resample/query.sql @@ -0,0 +1,14 @@ +select sumResample(0, 20, 1)(number, number % 20) from numbers(200); +select arrayMap(x -> finalizeAggregation(x), state) from (select sumStateResample(0, 20, 1)(number, number % 20) as state from numbers(200)); +select arrayMap(x -> finalizeAggregation(x), state) from +( + select sumStateResample(0,20,1)(number, number%20) as state from numbers(200) group by number % 3 order by number % 3 +); + +select groupArrayResample(0, 20, 1)(number, number % 20) from numbers(50); +select arrayMap(x -> finalizeAggregation(x), state) from (select groupArrayStateResample(0, 20, 1)(number, number % 20) state from numbers(50)); + +select arrayMap(x -> finalizeAggregation(x), state) from +( + select sumStateResample(0, 20, 1)(number, number % 20) as state from remote('127.0.0.{1,2}', numbers(200)) +); diff --git a/parser/testdata/01356_view_threads/ast.json b/parser/testdata/01356_view_threads/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01356_view_threads/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01356_view_threads/metadata.json b/parser/testdata/01356_view_threads/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01356_view_threads/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01356_view_threads/query.sql b/parser/testdata/01356_view_threads/query.sql new file mode 100644 index 000000000..de8141d3f --- /dev/null +++ b/parser/testdata/01356_view_threads/query.sql @@ -0,0 +1,15 @@ +-- Tags: no-parallel, no-fasttest +-- no-parallel: it checks the number of threads, which can be lowered in presence of other queries + +drop table if exists table_01356_view_threads; + +create view table_01356_view_threads as select number % 10 as g, sum(number) as s from numbers_mt(1000000) group by g; + +set log_queries = 1; +set max_threads = 16; +select g % 2 as gg, sum(s) from table_01356_view_threads group by gg order by gg; + +system flush logs query_log; +select length(thread_ids) >= 1 from system.query_log where current_database = currentDatabase() AND event_date >= today() - 1 and lower(query) like '%select g % 2 as gg, sum(s) from table_01356_view_threads group by gg order by gg%' and type = 'QueryFinish' order by query_start_time desc limit 1; + +drop table if exists table_01356_view_threads; diff --git a/parser/testdata/01356_wrong_filter-type_bug/ast.json b/parser/testdata/01356_wrong_filter-type_bug/ast.json new file mode 100644 index 000000000..81d97d982 --- /dev/null +++ b/parser/testdata/01356_wrong_filter-type_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t0 (children 1)" + }, + { + "explain": " Identifier t0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001482584, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/01356_wrong_filter-type_bug/metadata.json b/parser/testdata/01356_wrong_filter-type_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01356_wrong_filter-type_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01356_wrong_filter-type_bug/query.sql b/parser/testdata/01356_wrong_filter-type_bug/query.sql new file mode 100644 index 000000000..a9b79da0f --- /dev/null +++ b/parser/testdata/01356_wrong_filter-type_bug/query.sql @@ -0,0 +1,9 @@ +drop table if exists t0; + +CREATE TABLE t0 (`c0` String, `c1` Int32 CODEC(NONE), `c2` Int32) ENGINE = MergeTree() ORDER BY tuple(); +insert into t0 values ('a', 1, 2); + +SELECT t0.c2, t0.c1, t0.c0 FROM t0 PREWHERE t0.c0 ORDER BY ((t0.c2)>=(t0.c1)), (((- (((t0.c0)>(t0.c0))))) IS NULL) FORMAT TabSeparatedWithNamesAndTypes; -- {serverError ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER} +SELECT t0.c2, t0.c1, t0.c0 FROM t0 WHERE t0.c0 ORDER BY ((t0.c2)>=(t0.c1)), (((- (((t0.c0)>(t0.c0))))) IS NULL) FORMAT TabSeparatedWithNamesAndTypes settings optimize_move_to_prewhere=0; -- {serverError ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER} + +drop table if exists t0; diff --git a/parser/testdata/01357_result_rows/ast.json b/parser/testdata/01357_result_rows/ast.json new file mode 100644 index 000000000..101c7f485 --- /dev/null +++ b/parser/testdata/01357_result_rows/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001276403, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01357_result_rows/metadata.json b/parser/testdata/01357_result_rows/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01357_result_rows/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01357_result_rows/query.sql b/parser/testdata/01357_result_rows/query.sql new file mode 100644 index 000000000..510e2b7e7 --- /dev/null +++ b/parser/testdata/01357_result_rows/query.sql @@ -0,0 +1,5 @@ +set log_queries = 1; +select count() > 0 from system.settings; + +system flush logs query_log; +select result_rows, result_bytes >= 8 from system.query_log where current_database = currentDatabase() AND event_date >= today() - 1 and lower(query) like '%select count() > 0 from system.settings%' and type = 'QueryFinish' order by query_start_time desc limit 1; diff --git a/parser/testdata/01357_version_collapsing_attach_detach_zookeeper/ast.json b/parser/testdata/01357_version_collapsing_attach_detach_zookeeper/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01357_version_collapsing_attach_detach_zookeeper/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01357_version_collapsing_attach_detach_zookeeper/metadata.json b/parser/testdata/01357_version_collapsing_attach_detach_zookeeper/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01357_version_collapsing_attach_detach_zookeeper/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01357_version_collapsing_attach_detach_zookeeper/query.sql b/parser/testdata/01357_version_collapsing_attach_detach_zookeeper/query.sql new file mode 100644 index 000000000..8dcb9319c --- /dev/null +++ b/parser/testdata/01357_version_collapsing_attach_detach_zookeeper/query.sql @@ -0,0 +1,28 @@ +-- Tags: zookeeper, no-random-merge-tree-settings + +DROP TABLE IF EXISTS versioned_collapsing_table; + +CREATE TABLE versioned_collapsing_table( + d Date, + key1 UInt64, + key2 UInt32, + value String, + sign Int8, + version UInt16 +) +ENGINE = ReplicatedVersionedCollapsingMergeTree('/clickhouse/{database}/versioned_collapsing_table/{shard}', '{replica}', sign, version) +PARTITION BY d +ORDER BY (key1, key2); + +INSERT INTO versioned_collapsing_table VALUES (toDate('2019-10-10'), 1, 1, 'Hello', -1, 1); + +SELECT value FROM system.zookeeper WHERE path = '/clickhouse/' || currentDatabase() || '/versioned_collapsing_table/s1' and name = 'metadata'; + +SELECT COUNT() FROM versioned_collapsing_table; + +DETACH TABLE versioned_collapsing_table; +ATTACH TABLE versioned_collapsing_table; + +SELECT COUNT() FROM versioned_collapsing_table; + +DROP TABLE IF EXISTS versioned_collapsing_table; diff --git a/parser/testdata/01358_constexpr_constraint/ast.json b/parser/testdata/01358_constexpr_constraint/ast.json new file mode 100644 index 000000000..c38c8c3fe --- /dev/null +++ b/parser/testdata/01358_constexpr_constraint/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery constrained (children 1)" + }, + { + "explain": " Identifier constrained" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001229067, + "rows_read": 2, + "bytes_read": 75 + } +} diff --git a/parser/testdata/01358_constexpr_constraint/metadata.json b/parser/testdata/01358_constexpr_constraint/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01358_constexpr_constraint/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01358_constexpr_constraint/query.sql b/parser/testdata/01358_constexpr_constraint/query.sql new file mode 100644 index 000000000..280fd6bdc --- /dev/null +++ b/parser/testdata/01358_constexpr_constraint/query.sql @@ -0,0 +1,12 @@ +CREATE TEMPORARY TABLE constrained +( + `URL` String, + CONSTRAINT identity CHECK domainWithoutWWW(URL) = domainWithoutWWW(URL), + CONSTRAINT is_utf8 CHECK isValidUTF8(URL) +); + +insert into constrained values ('a'); + +DROP TEMPORARY TABLE constrained; +CREATE TEMPORARY TABLE constrained (x UInt8, CONSTRAINT bogus CHECK 0); +INSERT INTO constrained VALUES (1); -- { serverError VIOLATED_CONSTRAINT } diff --git a/parser/testdata/01358_mutation_delete_null_rows/ast.json b/parser/testdata/01358_mutation_delete_null_rows/ast.json new file mode 100644 index 000000000..fb2967f82 --- /dev/null +++ b/parser/testdata/01358_mutation_delete_null_rows/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '--------'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001147456, + "rows_read": 5, + "bytes_read": 179 + } +} diff --git a/parser/testdata/01358_mutation_delete_null_rows/metadata.json b/parser/testdata/01358_mutation_delete_null_rows/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01358_mutation_delete_null_rows/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01358_mutation_delete_null_rows/query.sql b/parser/testdata/01358_mutation_delete_null_rows/query.sql new file mode 100644 index 000000000..e8aabf1aa --- /dev/null +++ b/parser/testdata/01358_mutation_delete_null_rows/query.sql @@ -0,0 +1,26 @@ +select '--------'; +SELECT arrayJoin([0, 1, 3, NULL]) AS x, x = 0, if(x = 0, 'x=0', 'x<>0') ORDER BY x; + +select '--------'; +drop table if exists mutation_delete_null_rows; + +CREATE TABLE mutation_delete_null_rows +( + `EventDate` Date, + `CounterID` Nullable(String), + `UserID` Nullable(UInt32) +) +ENGINE = MergeTree() +ORDER BY EventDate; + +INSERT INTO mutation_delete_null_rows VALUES ('2020-01-01', '', 2)('2020-01-02', 'aaa', 0); +INSERT INTO mutation_delete_null_rows VALUES ('2020-01-03', '', 2)('2020-01-04', '', 2)('2020-01-05', NULL, 2)('2020-01-06', 'aaa', 0)('2020-01-07', 'aaa', 0)('2020-01-08', 'aaa', NULL); + +SELECT *,UserID = 0 as UserIDEquals0, if(UserID = 0, 'delete', 'leave') as verdict FROM mutation_delete_null_rows ORDER BY EventDate; + +ALTER TABLE mutation_delete_null_rows DELETE WHERE UserID = 0 SETTINGS mutations_sync=1; + +select '--------'; +SELECT * FROM mutation_delete_null_rows ORDER BY EventDate; + +drop table mutation_delete_null_rows; diff --git a/parser/testdata/01358_union_threads_bug/ast.json b/parser/testdata/01358_union_threads_bug/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01358_union_threads_bug/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01358_union_threads_bug/metadata.json b/parser/testdata/01358_union_threads_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01358_union_threads_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01358_union_threads_bug/query.sql b/parser/testdata/01358_union_threads_bug/query.sql new file mode 100644 index 000000000..c3ccc2ac7 --- /dev/null +++ b/parser/testdata/01358_union_threads_bug/query.sql @@ -0,0 +1,10 @@ +-- Tags: no-parallel, no-fasttest +-- no-parallel: it checks the number of threads, which can be lowered in presence of other queries + +set log_queries = 1; +set max_threads = 16; + +SELECT count() FROM (SELECT number FROM numbers_mt(1000000) ORDER BY number DESC LIMIT 100 UNION ALL SELECT number FROM numbers_mt(1000000) ORDER BY number DESC LIMIT 100 UNION ALL SELECT number FROM numbers_mt(1000000) ORDER BY number DESC LIMIT 100); + +system flush logs query_log; +select length(thread_ids) >= 1 from system.query_log where current_database = currentDatabase() AND event_date >= today() - 1 and query like '%SELECT count() FROM (SELECT number FROM numbers_mt(1000000) ORDER BY number DESC LIMIT 100 UNION ALL SELECT number FROM numbers_mt(1000000) ORDER BY number DESC LIMIT 100 UNION ALL SELECT number FROM numbers_mt(1000000) ORDER BY number DESC LIMIT 100)%' and type = 'QueryFinish' order by query_start_time desc limit 1; diff --git a/parser/testdata/01359_codeql/ast.json b/parser/testdata/01359_codeql/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01359_codeql/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01359_codeql/metadata.json b/parser/testdata/01359_codeql/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01359_codeql/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01359_codeql/query.sql b/parser/testdata/01359_codeql/query.sql new file mode 100644 index 000000000..9f68661ee --- /dev/null +++ b/parser/testdata/01359_codeql/query.sql @@ -0,0 +1,2 @@ +-- In previous ClickHouse versions, the multiplications was made in a wrong type leading to overflow. +SELECT round(avgWeighted(x, y)) FROM (SELECT 0xFFFFFFFF AS x, 1000000000 AS y UNION ALL SELECT 1 AS x, 1 AS y); diff --git a/parser/testdata/01359_geodistance_loop/ast.json b/parser/testdata/01359_geodistance_loop/ast.json new file mode 100644 index 000000000..a069b5310 --- /dev/null +++ b/parser/testdata/01359_geodistance_loop/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function geoDistance (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Literal Float64_0" + }, + { + "explain": " Literal Float64_0" + }, + { + "explain": " Literal Float64_-inf" + }, + { + "explain": " Literal Float64_1" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001201953, + "rows_read": 10, + "bytes_read": 361 + } +} diff --git a/parser/testdata/01359_geodistance_loop/metadata.json b/parser/testdata/01359_geodistance_loop/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01359_geodistance_loop/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01359_geodistance_loop/query.sql b/parser/testdata/01359_geodistance_loop/query.sql new file mode 100644 index 000000000..4c555a253 --- /dev/null +++ b/parser/testdata/01359_geodistance_loop/query.sql @@ -0,0 +1 @@ +SELECT geoDistance(0., 0., -inf, 1.); diff --git a/parser/testdata/01360_division_overflow/ast.json b/parser/testdata/01360_division_overflow/ast.json new file mode 100644 index 000000000..86e44cf6c --- /dev/null +++ b/parser/testdata/01360_division_overflow/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function intDiv (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toInt32 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_4294967296" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.00107649, + "rows_read": 12, + "bytes_read": 480 + } +} diff --git a/parser/testdata/01360_division_overflow/metadata.json b/parser/testdata/01360_division_overflow/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01360_division_overflow/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01360_division_overflow/query.sql b/parser/testdata/01360_division_overflow/query.sql new file mode 100644 index 000000000..75601a365 --- /dev/null +++ b/parser/testdata/01360_division_overflow/query.sql @@ -0,0 +1,5 @@ +select intDiv(materialize(toInt32(1)), 0x100000000); +select intDiv(materialize(toInt32(1)), -0x100000000); +select intDiv(materialize(toInt32(1)), -9223372036854775808); +select materialize(toInt32(1)) % -9223372036854775808; +select value % -9223372036854775808 from (select toInt32(arrayJoin([3, 5])) value); diff --git a/parser/testdata/01360_materialized_view_with_join_on_query_log/ast.json b/parser/testdata/01360_materialized_view_with_join_on_query_log/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01360_materialized_view_with_join_on_query_log/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01360_materialized_view_with_join_on_query_log/metadata.json b/parser/testdata/01360_materialized_view_with_join_on_query_log/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01360_materialized_view_with_join_on_query_log/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01360_materialized_view_with_join_on_query_log/query.sql b/parser/testdata/01360_materialized_view_with_join_on_query_log/query.sql new file mode 100644 index 000000000..64da74718 --- /dev/null +++ b/parser/testdata/01360_materialized_view_with_join_on_query_log/query.sql @@ -0,0 +1,61 @@ +-- Tags: no-parallel, memory-engine +-- no-parallel: Slows down query_log + +DROP TABLE IF EXISTS slow_log; +DROP TABLE IF EXISTS expected_times; + +CREATE TABLE expected_times (QUERY_GROUP_ID String, max_query_duration_ms UInt64) Engine=Memory; +INSERT INTO expected_times VALUES('main_dashboard_top_query', 500), ('main_dashboard_bottom_query', 500); + +SET log_queries=1; +SELECT 1; +SYSTEM FLUSH LOGS query_log; + + +-- NOTE: can be rewritten using log_queries_min_query_duration_ms + +CREATE MATERIALIZED VIEW slow_log Engine=Memory AS +( + SELECT * FROM + ( + SELECT + extract(query,'/\\*\\s*QUERY_GROUP_ID:(.*?)\\s*\\*/') as QUERY_GROUP_ID, + * + FROM system.query_log + WHERE type<>1 and event_date >= yesterday() + ) as ql + INNER JOIN expected_times USING (QUERY_GROUP_ID) + WHERE query_duration_ms > max_query_duration_ms +); + +SELECT 1 /* QUERY_GROUP_ID:main_dashboard_top_query */; +SELECT 1 /* QUERY_GROUP_ID:main_dashboard_bottom_query */; + +SELECT 1 WHERE not ignore(sleep(0.520)) /* QUERY_GROUP_ID:main_dashboard_top_query */; +SELECT 1 WHERE not ignore(sleep(0.520)) /* QUERY_GROUP_ID:main_dashboard_bottom_query */; + +SET log_queries=0; +SYSTEM FLUSH LOGS query_log; + +SELECT '=== system.query_log ==='; + +SELECT + extract(query,'/\\*\\s*QUERY_GROUP_ID:(.*?)\\s*\\*/') as QUERY_GROUP_ID, + count() +FROM system.query_log +WHERE current_database = currentDatabase() AND type<>1 and event_date >= yesterday() and QUERY_GROUP_ID<>'' +GROUP BY QUERY_GROUP_ID +ORDER BY QUERY_GROUP_ID; + +SELECT '=== slowlog ==='; + +SELECT + QUERY_GROUP_ID, + count() +FROM slow_log +WHERE current_database = currentDatabase() +GROUP BY QUERY_GROUP_ID +ORDER BY QUERY_GROUP_ID; + +DROP TABLE slow_log; +DROP TABLE expected_times; diff --git a/parser/testdata/01361_buffer_table_flush_with_materialized_view/ast.json b/parser/testdata/01361_buffer_table_flush_with_materialized_view/ast.json new file mode 100644 index 000000000..bdbd48259 --- /dev/null +++ b/parser/testdata/01361_buffer_table_flush_with_materialized_view/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1_01361 (children 1)" + }, + { + "explain": " Identifier t1_01361" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000962487, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/01361_buffer_table_flush_with_materialized_view/metadata.json b/parser/testdata/01361_buffer_table_flush_with_materialized_view/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01361_buffer_table_flush_with_materialized_view/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01361_buffer_table_flush_with_materialized_view/query.sql b/parser/testdata/01361_buffer_table_flush_with_materialized_view/query.sql new file mode 100644 index 000000000..424c38d55 --- /dev/null +++ b/parser/testdata/01361_buffer_table_flush_with_materialized_view/query.sql @@ -0,0 +1,37 @@ +DROP TABLE IF EXISTS t1_01361; +DROP TABLE IF EXISTS t2_01361; +DROP TABLE IF EXISTS mv1_01361; +DROP TABLE IF EXISTS b1_01361; + +CREATE TABLE t1_01361 ( + i UInt32, + time DateTime +) ENGINE = MergeTree() +PARTITION BY time +ORDER BY time; + +CREATE TABLE t2_01361 ( + i UInt32, + time DateTime +) ENGINE = MergeTree() +PARTITION BY time +ORDER BY time; + +CREATE MATERIALIZED VIEW mv1_01361 +TO t2_01361 +AS SELECT * FROM (SELECT * FROM t1_01361); + +CREATE TABLE b1_01361 AS t1_01361 +ENGINE = Buffer(currentDatabase(), t1_01361, 1, 0, 0, 1, 1, 1, 1); + +INSERT INTO b1_01361 VALUES (1, now()); +INSERT INTO b1_01361 VALUES (2, now()); + +SELECT count() FROM b1_01361; +SELECT count() FROM t1_01361; +SELECT count() FROM t2_01361; + +DROP TABLE IF EXISTS t1_01361; +DROP TABLE IF EXISTS t2_01361; +DROP TABLE IF EXISTS mv1_01361; +DROP TABLE IF EXISTS b1_01361; diff --git a/parser/testdata/01362_year_of_ISO8601_week_modificators_for_formatDateTime/ast.json b/parser/testdata/01362_year_of_ISO8601_week_modificators_for_formatDateTime/ast.json new file mode 100644 index 000000000..d98f877d0 --- /dev/null +++ b/parser/testdata/01362_year_of_ISO8601_week_modificators_for_formatDateTime/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function formatDateTime (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDate (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '2010-01-01'" + }, + { + "explain": " Literal '%G'" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001423815, + "rows_read": 10, + "bytes_read": 383 + } +} diff --git a/parser/testdata/01362_year_of_ISO8601_week_modificators_for_formatDateTime/metadata.json b/parser/testdata/01362_year_of_ISO8601_week_modificators_for_formatDateTime/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01362_year_of_ISO8601_week_modificators_for_formatDateTime/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01362_year_of_ISO8601_week_modificators_for_formatDateTime/query.sql b/parser/testdata/01362_year_of_ISO8601_week_modificators_for_formatDateTime/query.sql new file mode 100644 index 000000000..0a7cc047c --- /dev/null +++ b/parser/testdata/01362_year_of_ISO8601_week_modificators_for_formatDateTime/query.sql @@ -0,0 +1,10 @@ +SELECT formatDateTime(toDate('2010-01-01'), '%G'); -- Friday (first day of the year) attributed to week 53 of the previous year (2009) +SELECT formatDateTime(toDate('2010-01-01'), '%g'); +SELECT formatDateTime(toDate('2010-01-03'), '%G'); -- Sunday, last day attributed to week 53 of the previous year (2009) +SELECT formatDateTime(toDate('2010-01-03'), '%g'); +SELECT formatDateTime(toDate('2010-01-04'), '%G'); -- Monday, first day in the year attributed to week 01 of the current year (2010) +SELECT formatDateTime(toDate('2010-01-04'), '%g'); +SELECT formatDateTime(toDate('2018-12-31'), '%G'); -- Monday (last day of the year) attributed to 01 week of next year (2019) +SELECT formatDateTime(toDate('2018-12-31'), '%g'); +SELECT formatDateTime(toDate('2019-01-01'), '%G'); -- Tuesday (first day of the year) attributed to 01 week of this year (2019) +SELECT formatDateTime(toDate('2019-01-01'), '%g'); diff --git a/parser/testdata/01372_remote_table_function_empty_table/ast.json b/parser/testdata/01372_remote_table_function_empty_table/ast.json new file mode 100644 index 000000000..86b267b8e --- /dev/null +++ b/parser/testdata/01372_remote_table_function_empty_table/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function remote (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '127..2'" + }, + { + "explain": " Literal 'a.'" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.000850349, + "rows_read": 12, + "bytes_read": 448 + } +} diff --git a/parser/testdata/01372_remote_table_function_empty_table/metadata.json b/parser/testdata/01372_remote_table_function_empty_table/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01372_remote_table_function_empty_table/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01372_remote_table_function_empty_table/query.sql b/parser/testdata/01372_remote_table_function_empty_table/query.sql new file mode 100644 index 000000000..b2ae15e6e --- /dev/null +++ b/parser/testdata/01372_remote_table_function_empty_table/query.sql @@ -0,0 +1,4 @@ +SELECT * FROM remote('127..2', 'a.'); -- { serverError SYNTAX_ERROR } + +-- Clear cache to avoid future errors in the logs +SYSTEM DROP DNS CACHE diff --git a/parser/testdata/01372_wrong_order_by_removal/ast.json b/parser/testdata/01372_wrong_order_by_removal/ast.json new file mode 100644 index 000000000..6134a1035 --- /dev/null +++ b/parser/testdata/01372_wrong_order_by_removal/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery moving_sum_num (children 1)" + }, + { + "explain": " Identifier moving_sum_num" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001205051, + "rows_read": 2, + "bytes_read": 81 + } +} diff --git a/parser/testdata/01372_wrong_order_by_removal/metadata.json b/parser/testdata/01372_wrong_order_by_removal/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01372_wrong_order_by_removal/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01372_wrong_order_by_removal/query.sql b/parser/testdata/01372_wrong_order_by_removal/query.sql new file mode 100644 index 000000000..3ca5b63f3 --- /dev/null +++ b/parser/testdata/01372_wrong_order_by_removal/query.sql @@ -0,0 +1,9 @@ +CREATE TEMPORARY TABLE moving_sum_num +( + `k` String, + `dt` DateTime, + `v` UInt64 +); + +-- ORDER BY from subquery shall not be removed. +EXPLAIN SYNTAX SELECT k, groupArrayMovingSum(v) FROM (SELECT * FROM moving_sum_num ORDER BY k, dt) GROUP BY k ORDER BY k; diff --git a/parser/testdata/01373_is_zero_or_null/ast.json b/parser/testdata/01373_is_zero_or_null/ast.json new file mode 100644 index 000000000..6f96d51fd --- /dev/null +++ b/parser/testdata/01373_is_zero_or_null/ast.json @@ -0,0 +1,94 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function not (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function isZeroOrNull (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function equals (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function arrayJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_2, UInt64_3, NULL]" + }, + { + "explain": " Literal UInt64_3" + } + ], + + "rows": 24, + + "statistics": + { + "elapsed": 0.001733585, + "rows_read": 24, + "bytes_read": 1026 + } +} diff --git a/parser/testdata/01373_is_zero_or_null/metadata.json b/parser/testdata/01373_is_zero_or_null/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01373_is_zero_or_null/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01373_is_zero_or_null/query.sql b/parser/testdata/01373_is_zero_or_null/query.sql new file mode 100644 index 000000000..dcb4f9649 --- /dev/null +++ b/parser/testdata/01373_is_zero_or_null/query.sql @@ -0,0 +1,33 @@ +SELECT NOT x, isZeroOrNull(x) FROM (SELECT arrayJoin([1, 2, 3, NULL]) = 3 AS x); +SELECT '---'; +SELECT NOT x, isZeroOrNull(x) FROM (SELECT arrayJoin([1, 2, 3]) = 3 AS x); +SELECT '---'; +CREATE TEMPORARY TABLE test (x String NULL); +INSERT INTO test VALUES ('hello'), ('world'), ('xyz'), (NULL); + +SELECT * FROM test WHERE x != 'xyz'; +SELECT '---'; +SELECT * FROM test WHERE NOT x = 'xyz'; +SELECT '---'; +SELECT * FROM test WHERE isZeroOrNull(x = 'xyz'); +SELECT '---'; + +SELECT count() FROM +( + SELECT * FROM test WHERE x != 'xyz' + UNION ALL + SELECT * FROM test WHERE NOT x != 'xyz' +); + +SELECT '---'; + +SELECT count() FROM +( + SELECT * FROM test WHERE x != 'xyz' + UNION ALL + SELECT * FROM test WHERE isZeroOrNull(x != 'xyz') +); + +SELECT '---'; + +select isZeroOrNull(Null); diff --git a/parser/testdata/01373_summing_merge_tree_exclude_partition_key/ast.json b/parser/testdata/01373_summing_merge_tree_exclude_partition_key/ast.json new file mode 100644 index 000000000..f7255ee1b --- /dev/null +++ b/parser/testdata/01373_summing_merge_tree_exclude_partition_key/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001297557, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01373_summing_merge_tree_exclude_partition_key/metadata.json b/parser/testdata/01373_summing_merge_tree_exclude_partition_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01373_summing_merge_tree_exclude_partition_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01373_summing_merge_tree_exclude_partition_key/query.sql b/parser/testdata/01373_summing_merge_tree_exclude_partition_key/query.sql new file mode 100644 index 000000000..baff9df89 --- /dev/null +++ b/parser/testdata/01373_summing_merge_tree_exclude_partition_key/query.sql @@ -0,0 +1,36 @@ +SET optimize_on_insert = 0; + +DROP TABLE IF EXISTS tt_01373; + +CREATE TABLE tt_01373 +(a Int64, d Int64, val Int64) +ENGINE = SummingMergeTree PARTITION BY (a) ORDER BY (d) SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +SYSTEM STOP MERGES tt_01373; + +INSERT INTO tt_01373 SELECT number%13, number%17, 1 from numbers(1000000); + +SELECT '---'; +SELECT count(*) FROM tt_01373; + +SELECT '---'; +SELECT count(*) FROM tt_01373 FINAL; + +SELECT '---'; +SELECT a, count() FROM tt_01373 FINAL GROUP BY a ORDER BY a; + +SYSTEM START MERGES tt_01373; + +OPTIMIZE TABLE tt_01373 FINAL; +SELECT '---'; +SELECT a, count() FROM tt_01373 GROUP BY a ORDER BY a; + +DROP TABLE IF EXISTS tt_01373; + +CREATE TABLE tt_01373_expr (a Int64, d Int64, val Int64) ENGINE = SummingMergeTree PARTITION BY (a % 2) ORDER BY (d + 0); +INSERT INTO tt_01373_expr VALUES (1, 1, 1), (1, 1, 2), (2, 1, 3); + +SELECT '---'; +SELECT *, _partition_id FROM tt_01373_expr ORDER BY (a, d, val); + +DROP TABLE IF EXISTS tt_01373_expr; diff --git a/parser/testdata/01373_summing_merge_tree_explicit_columns_definition/ast.json b/parser/testdata/01373_summing_merge_tree_explicit_columns_definition/ast.json new file mode 100644 index 000000000..118b22e51 --- /dev/null +++ b/parser/testdata/01373_summing_merge_tree_explicit_columns_definition/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tt_error_1373 (children 1)" + }, + { + "explain": " Identifier tt_error_1373" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001303296, + "rows_read": 2, + "bytes_read": 78 + } +} diff --git a/parser/testdata/01373_summing_merge_tree_explicit_columns_definition/metadata.json b/parser/testdata/01373_summing_merge_tree_explicit_columns_definition/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01373_summing_merge_tree_explicit_columns_definition/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01373_summing_merge_tree_explicit_columns_definition/query.sql b/parser/testdata/01373_summing_merge_tree_explicit_columns_definition/query.sql new file mode 100644 index 000000000..406b91502 --- /dev/null +++ b/parser/testdata/01373_summing_merge_tree_explicit_columns_definition/query.sql @@ -0,0 +1,19 @@ +DROP TABLE IF EXISTS tt_error_1373; + +CREATE TABLE tt_error_1373 +( a Int64, d Int64, val Int64 ) +ENGINE = SummingMergeTree((a, val)) PARTITION BY (a) ORDER BY (d); -- { serverError BAD_ARGUMENTS } + +CREATE TABLE tt_error_1373 +( a Int64, d Int64, val Int64 ) +ENGINE = SummingMergeTree((a, val)) PARTITION BY (a % 5) ORDER BY (d); -- { serverError BAD_ARGUMENTS } + +CREATE TABLE tt_error_1373 +( a Int64, d Int64, val Int64 ) + ENGINE = SummingMergeTree((d, val)) PARTITION BY (a) ORDER BY (d); -- { serverError BAD_ARGUMENTS } + +CREATE TABLE tt_error_1373 +( a Int64, d Int64, val Int64 ) + ENGINE = SummingMergeTree((d, val)) PARTITION BY (a) ORDER BY (d % 5); -- { serverError BAD_ARGUMENTS } + +DROP TABLE IF EXISTS tt_error_1373; \ No newline at end of file diff --git a/parser/testdata/01374_if_nullable_filimonov/ast.json b/parser/testdata/01374_if_nullable_filimonov/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01374_if_nullable_filimonov/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01374_if_nullable_filimonov/metadata.json b/parser/testdata/01374_if_nullable_filimonov/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01374_if_nullable_filimonov/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01374_if_nullable_filimonov/query.sql b/parser/testdata/01374_if_nullable_filimonov/query.sql new file mode 100644 index 000000000..0fadfb85f --- /dev/null +++ b/parser/testdata/01374_if_nullable_filimonov/query.sql @@ -0,0 +1,9 @@ +SELECT + UserID, + UserID = 0, + if(UserID = 0, 'delete', 'leave') +FROM VALUES('UserID Nullable(UInt8)', (2), (0), (NULL)); + +SELECT '---'; + +SELECT arrayJoin([0, 1, 3, NULL]) AS x, x = 0, if(x = 0, 'Definitely x = 0', 'We cannot say that x = 0'); diff --git a/parser/testdata/01375_GROUP_BY_injective_elimination_dictGet_BAD_ARGUMENTS/ast.json b/parser/testdata/01375_GROUP_BY_injective_elimination_dictGet_BAD_ARGUMENTS/ast.json new file mode 100644 index 000000000..2d59a9c31 --- /dev/null +++ b/parser/testdata/01375_GROUP_BY_injective_elimination_dictGet_BAD_ARGUMENTS/ast.json @@ -0,0 +1,88 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function dictGetString (alias country) (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function concat (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'default'" + }, + { + "explain": " Literal '.countryId'" + }, + { + "explain": " Literal 'country'" + }, + { + "explain": " Function toUInt64 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier country" + } + ], + + "rows": 22, + + "statistics": + { + "elapsed": 0.001293269, + "rows_read": 22, + "bytes_read": 875 + } +} diff --git a/parser/testdata/01375_GROUP_BY_injective_elimination_dictGet_BAD_ARGUMENTS/metadata.json b/parser/testdata/01375_GROUP_BY_injective_elimination_dictGet_BAD_ARGUMENTS/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01375_GROUP_BY_injective_elimination_dictGet_BAD_ARGUMENTS/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01375_GROUP_BY_injective_elimination_dictGet_BAD_ARGUMENTS/query.sql b/parser/testdata/01375_GROUP_BY_injective_elimination_dictGet_BAD_ARGUMENTS/query.sql new file mode 100644 index 000000000..df228d4e8 --- /dev/null +++ b/parser/testdata/01375_GROUP_BY_injective_elimination_dictGet_BAD_ARGUMENTS/query.sql @@ -0,0 +1 @@ +SELECT dictGetString(concat('default', '.countryId'), 'country', toUInt64(number)) AS country FROM numbers(2) GROUP BY country; -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/01375_compact_parts_codecs/ast.json b/parser/testdata/01375_compact_parts_codecs/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01375_compact_parts_codecs/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01375_compact_parts_codecs/metadata.json b/parser/testdata/01375_compact_parts_codecs/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01375_compact_parts_codecs/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01375_compact_parts_codecs/query.sql b/parser/testdata/01375_compact_parts_codecs/query.sql new file mode 100644 index 000000000..43929203a --- /dev/null +++ b/parser/testdata/01375_compact_parts_codecs/query.sql @@ -0,0 +1,54 @@ +-- Tags: no-parallel, no-random-merge-tree-settings + +DROP TABLE IF EXISTS codecs; + +CREATE TABLE codecs (id UInt32, val UInt32, s String) + ENGINE = MergeTree ORDER BY id + SETTINGS min_rows_for_wide_part = 10000, ratio_of_defaults_for_sparse_serialization = 1, serialization_info_version = 'basic'; +INSERT INTO codecs SELECT number, number, toString(number) FROM numbers(1000); +SELECT sum(data_compressed_bytes), sum(data_uncompressed_bytes) + FROM system.parts + WHERE table = 'codecs' AND database = currentDatabase(); + +SELECT sum(id), sum(val), max(s) FROM codecs; + +DETACH TABLE codecs; +ATTACH table codecs; + +SELECT sum(id), sum(val), max(s) FROM codecs; + +DROP TABLE codecs; + +CREATE TABLE codecs (id UInt32 CODEC(NONE), val UInt32 CODEC(NONE), s String CODEC(NONE)) + ENGINE = MergeTree ORDER BY id + SETTINGS min_rows_for_wide_part = 10000, ratio_of_defaults_for_sparse_serialization = 1, serialization_info_version = 'basic'; +INSERT INTO codecs SELECT number, number, toString(number) FROM numbers(1000); +SELECT sum(data_compressed_bytes), sum(data_uncompressed_bytes) + FROM system.parts + WHERE table = 'codecs' AND database = currentDatabase(); + +SELECT sum(id), sum(val), max(s) FROM codecs; + +DETACH TABLE codecs; +ATTACH table codecs; + +SELECT sum(id), sum(val), max(s) FROM codecs; + +DROP TABLE codecs; + +CREATE TABLE codecs (id UInt32, val UInt32 CODEC(Delta, ZSTD), s String CODEC(ZSTD)) + ENGINE = MergeTree ORDER BY id + SETTINGS min_rows_for_wide_part = 10000, ratio_of_defaults_for_sparse_serialization = 1, serialization_info_version = 'basic'; +INSERT INTO codecs SELECT number, number, toString(number) FROM numbers(1000); +SELECT sum(data_compressed_bytes), sum(data_uncompressed_bytes) + FROM system.parts + WHERE table = 'codecs' AND database = currentDatabase(); + +SELECT sum(id), sum(val), max(s) FROM codecs; + +DETACH TABLE codecs; +ATTACH table codecs; + +SELECT sum(id), sum(val), max(s) FROM codecs; + +DROP TABLE codecs; diff --git a/parser/testdata/01375_null_issue_3767/ast.json b/parser/testdata/01375_null_issue_3767/ast.json new file mode 100644 index 000000000..c413986ad --- /dev/null +++ b/parser/testdata/01375_null_issue_3767/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery null_issue_3767 (children 1)" + }, + { + "explain": " Identifier null_issue_3767" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001200176, + "rows_read": 2, + "bytes_read": 82 + } +} diff --git a/parser/testdata/01375_null_issue_3767/metadata.json b/parser/testdata/01375_null_issue_3767/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01375_null_issue_3767/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01375_null_issue_3767/query.sql b/parser/testdata/01375_null_issue_3767/query.sql new file mode 100644 index 000000000..88b18e001 --- /dev/null +++ b/parser/testdata/01375_null_issue_3767/query.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS null_issue_3767; + +CREATE TABLE null_issue_3767 (value Nullable(String)) ENGINE=Memory; + +INSERT INTO null_issue_3767 (value) VALUES ('A String'), (NULL); + +SELECT value FROM null_issue_3767 WHERE value NOT IN ('A String'); + +DROP TABLE null_issue_3767; diff --git a/parser/testdata/01375_storage_file_write_prefix_csv_with_names/ast.json b/parser/testdata/01375_storage_file_write_prefix_csv_with_names/ast.json new file mode 100644 index 000000000..2ea8ae116 --- /dev/null +++ b/parser/testdata/01375_storage_file_write_prefix_csv_with_names/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tmp_01375 (children 1)" + }, + { + "explain": " Identifier tmp_01375" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001594794, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/01375_storage_file_write_prefix_csv_with_names/metadata.json b/parser/testdata/01375_storage_file_write_prefix_csv_with_names/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01375_storage_file_write_prefix_csv_with_names/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01375_storage_file_write_prefix_csv_with_names/query.sql b/parser/testdata/01375_storage_file_write_prefix_csv_with_names/query.sql new file mode 100644 index 000000000..b335db03f --- /dev/null +++ b/parser/testdata/01375_storage_file_write_prefix_csv_with_names/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS tmp_01375; +DROP TABLE IF EXISTS table_csv_01375; + +CREATE TABLE tmp_01375 (n UInt32, s String) ENGINE = Memory; +CREATE TABLE table_csv_01375 AS tmp_01375 ENGINE = File(CSVWithNames); + +INSERT INTO table_csv_01375 SELECT number as n, toString(n) as s FROM numbers(10); +INSERT INTO table_csv_01375 SELECT number as n, toString(n) as s FROM numbers(10); +INSERT INTO table_csv_01375 SELECT number as n, toString(n) as s FROM numbers(10); + +SELECT * FROM table_csv_01375; + +DROP TABLE IF EXISTS tmp_01375; +DROP TABLE IF EXISTS table_csv_01375; diff --git a/parser/testdata/01375_storage_file_write_prefix_tsv_with_names/ast.json b/parser/testdata/01375_storage_file_write_prefix_tsv_with_names/ast.json new file mode 100644 index 000000000..be7472b85 --- /dev/null +++ b/parser/testdata/01375_storage_file_write_prefix_tsv_with_names/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tmp_01375 (children 1)" + }, + { + "explain": " Identifier tmp_01375" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001547443, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/01375_storage_file_write_prefix_tsv_with_names/metadata.json b/parser/testdata/01375_storage_file_write_prefix_tsv_with_names/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01375_storage_file_write_prefix_tsv_with_names/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01375_storage_file_write_prefix_tsv_with_names/query.sql b/parser/testdata/01375_storage_file_write_prefix_tsv_with_names/query.sql new file mode 100644 index 000000000..55a97eb6e --- /dev/null +++ b/parser/testdata/01375_storage_file_write_prefix_tsv_with_names/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS tmp_01375; +DROP TABLE IF EXISTS table_tsv_01375; + +CREATE TABLE tmp_01375 (n UInt32, s String) ENGINE = Memory; +CREATE TABLE table_tsv_01375 AS tmp_01375 ENGINE = File(TSVWithNames); + +INSERT INTO table_tsv_01375 SELECT number as n, toString(n) as s FROM numbers(10); +INSERT INTO table_tsv_01375 SELECT number as n, toString(n) as s FROM numbers(10); +INSERT INTO table_tsv_01375 SELECT number as n, toString(n) as s FROM numbers(10); + +SELECT * FROM table_tsv_01375; + +DROP TABLE IF EXISTS tmp_01375; +DROP TABLE IF EXISTS table_tsv_01375; diff --git a/parser/testdata/01376_GROUP_BY_injective_elimination_dictGet/ast.json b/parser/testdata/01376_GROUP_BY_injective_elimination_dictGet/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01376_GROUP_BY_injective_elimination_dictGet/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01376_GROUP_BY_injective_elimination_dictGet/metadata.json b/parser/testdata/01376_GROUP_BY_injective_elimination_dictGet/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01376_GROUP_BY_injective_elimination_dictGet/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01376_GROUP_BY_injective_elimination_dictGet/query.sql b/parser/testdata/01376_GROUP_BY_injective_elimination_dictGet/query.sql new file mode 100644 index 000000000..08ca9ed3c --- /dev/null +++ b/parser/testdata/01376_GROUP_BY_injective_elimination_dictGet/query.sql @@ -0,0 +1,45 @@ +-- Tags: no-parallel + +-- https://github.com/ClickHouse/ClickHouse/issues/11469 +SELECT dictGet('default.countryId', 'country', toUInt64(number)) AS country FROM numbers(2) GROUP BY country; -- { serverError BAD_ARGUMENTS } + + +-- with real dictionary +DROP TABLE IF EXISTS dictdb_01376.table_for_dict; +DROP DICTIONARY IF EXISTS dictdb_01376.dict_exists; +DROP DATABASE IF EXISTS dictdb_01376; + +CREATE DATABASE dictdb_01376; + +CREATE TABLE dictdb_01376.table_for_dict +( + key_column UInt64, + value Float64 +) +ENGINE = Memory(); + +INSERT INTO dictdb_01376.table_for_dict VALUES (1, 1.1); + +CREATE DICTIONARY IF NOT EXISTS dictdb_01376.dict_exists +( + key_column UInt64, + value Float64 DEFAULT 77.77 INJECTIVE +) +PRIMARY KEY key_column +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'table_for_dict' DB 'dictdb_01376')) +LIFETIME(1) +LAYOUT(FLAT()); + +SELECT dictGet('dictdb_01376.dict_exists', 'value', toUInt64(1)) as val FROM numbers(2) GROUP BY val; + +EXPLAIN SYNTAX SELECT dictGet('dictdb_01376.dict_exists', 'value', toUInt64(1)) as val FROM numbers(2) GROUP BY val; + +EXPLAIN QUERY TREE +SELECT dictGet('dictdb_01376.dict_exists', 'value', number) as val +FROM numbers(2) +GROUP BY val +SETTINGS enable_analyzer = 1; + +DROP DICTIONARY dictdb_01376.dict_exists; +DROP TABLE dictdb_01376.table_for_dict; +DROP DATABASE dictdb_01376; diff --git a/parser/testdata/01376_array_fill_empty/ast.json b/parser/testdata/01376_array_fill_empty/ast.json new file mode 100644 index 000000000..4c65e35de --- /dev/null +++ b/parser/testdata/01376_array_fill_empty/ast.json @@ -0,0 +1,73 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayFill (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function lambda (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function less (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 17, + + "statistics": + { + "elapsed": 0.001374171, + "rows_read": 17, + "bytes_read": 654 + } +} diff --git a/parser/testdata/01376_array_fill_empty/metadata.json b/parser/testdata/01376_array_fill_empty/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01376_array_fill_empty/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01376_array_fill_empty/query.sql b/parser/testdata/01376_array_fill_empty/query.sql new file mode 100644 index 000000000..38eea8a51 --- /dev/null +++ b/parser/testdata/01376_array_fill_empty/query.sql @@ -0,0 +1,4 @@ +SELECT arrayFill(x -> (x < 10), []); +SELECT arrayFill(x -> (x < 10), emptyArrayUInt8()); +SELECT arrayFill(x -> 1, []); +SELECT arrayFill(x -> 0, []); diff --git a/parser/testdata/01376_null_logical/ast.json b/parser/testdata/01376_null_logical/ast.json new file mode 100644 index 000000000..edd02b5fb --- /dev/null +++ b/parser/testdata/01376_null_logical/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function or (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001173907, + "rows_read": 8, + "bytes_read": 281 + } +} diff --git a/parser/testdata/01376_null_logical/metadata.json b/parser/testdata/01376_null_logical/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01376_null_logical/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01376_null_logical/query.sql b/parser/testdata/01376_null_logical/query.sql new file mode 100644 index 000000000..d8c7a3224 --- /dev/null +++ b/parser/testdata/01376_null_logical/query.sql @@ -0,0 +1,11 @@ +SELECT NULL OR 1; +SELECT materialize(NULL) OR materialize(1); + +SELECT NULL AND 0; +SELECT materialize(NULL) AND materialize(0); + +SELECT NULL OR 0; +SELECT materialize(NULL) OR materialize(0); + +SELECT NULL AND 1; +SELECT materialize(NULL) AND materialize(1); diff --git a/parser/testdata/01377_supertype_low_cardinality/ast.json b/parser/testdata/01377_supertype_low_cardinality/ast.json new file mode 100644 index 000000000..576a78857 --- /dev/null +++ b/parser/testdata/01377_supertype_low_cardinality/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'hello'" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toLowCardinality (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'hello'" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001622423, + "rows_read": 10, + "bytes_read": 367 + } +} diff --git a/parser/testdata/01377_supertype_low_cardinality/metadata.json b/parser/testdata/01377_supertype_low_cardinality/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01377_supertype_low_cardinality/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01377_supertype_low_cardinality/query.sql b/parser/testdata/01377_supertype_low_cardinality/query.sql new file mode 100644 index 000000000..9e7ed1f98 --- /dev/null +++ b/parser/testdata/01377_supertype_low_cardinality/query.sql @@ -0,0 +1,71 @@ +SELECT 'hello' UNION ALL SELECT toLowCardinality('hello'); +SELECT toTypeName(x) FROM (SELECT 'hello' AS x UNION ALL SELECT toLowCardinality('hello')); + +SELECT '---'; + +create temporary table t1(a String); +create temporary table t2(a LowCardinality(String)); +select a from t1 union all select a from t2; + +SELECT '---'; + +CREATE TEMPORARY TABLE a (x String); +CREATE TEMPORARY TABLE b (x LowCardinality(String)); +CREATE TEMPORARY TABLE c (x Nullable(String)); +CREATE TEMPORARY TABLE d (x LowCardinality(Nullable(String))); + +INSERT INTO a VALUES ('hello'); +INSERT INTO b VALUES ('hello'); +INSERT INTO c VALUES ('hello'); +INSERT INTO d VALUES ('hello'); + +SELECT x FROM a; +SELECT x FROM b; +SELECT x FROM c; +SELECT x FROM d; + +SELECT '---'; + +SELECT x FROM a UNION ALL SELECT x FROM b; +SELECT '-'; +SELECT x FROM a UNION ALL SELECT x FROM c; +SELECT '-'; +SELECT x FROM a UNION ALL SELECT x FROM d; +SELECT '-'; +SELECT x FROM b UNION ALL SELECT x FROM a; +SELECT '-'; +SELECT x FROM b UNION ALL SELECT x FROM c; +SELECT '-'; +SELECT x FROM b UNION ALL SELECT x FROM d; +SELECT '-'; +SELECT x FROM c UNION ALL SELECT x FROM a; +SELECT '-'; +SELECT x FROM c UNION ALL SELECT x FROM b; +SELECT '-'; +SELECT x FROM c UNION ALL SELECT x FROM d; +SELECT '-'; +SELECT x FROM d UNION ALL SELECT x FROM a; +SELECT '-'; +SELECT x FROM d UNION ALL SELECT x FROM c; +SELECT '-'; +SELECT x FROM d UNION ALL SELECT x FROM b; + +SELECT '---'; + +SELECT x FROM b UNION ALL SELECT x FROM c UNION ALL SELECT x FROM d; +SELECT '-'; +SELECT x FROM a UNION ALL SELECT x FROM c UNION ALL SELECT x FROM d; +SELECT '-'; +SELECT x FROM a UNION ALL SELECT x FROM b UNION ALL SELECT x FROM d; +SELECT '-'; +SELECT x FROM a UNION ALL SELECT x FROM b UNION ALL SELECT x FROM c; + +SELECT '---'; + +SELECT x FROM a UNION ALL SELECT x FROM b UNION ALL SELECT x FROM c UNION ALL SELECT x FROM d; + +SELECT '---'; + +SELECT [CAST('abc' AS LowCardinality(String)), CAST('def' AS Nullable(String))]; +SELECT [CAST('abc' AS LowCardinality(String)), CAST('def' AS FixedString(3))]; +SELECT [CAST('abc' AS LowCardinality(String)), CAST('def' AS LowCardinality(FixedString(3)))]; diff --git a/parser/testdata/01378_alter_rename_with_ttl_zookeeper/ast.json b/parser/testdata/01378_alter_rename_with_ttl_zookeeper/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01378_alter_rename_with_ttl_zookeeper/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01378_alter_rename_with_ttl_zookeeper/metadata.json b/parser/testdata/01378_alter_rename_with_ttl_zookeeper/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01378_alter_rename_with_ttl_zookeeper/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01378_alter_rename_with_ttl_zookeeper/query.sql b/parser/testdata/01378_alter_rename_with_ttl_zookeeper/query.sql new file mode 100644 index 000000000..b6982910a --- /dev/null +++ b/parser/testdata/01378_alter_rename_with_ttl_zookeeper/query.sql @@ -0,0 +1,28 @@ +-- Tags: zookeeper + +DROP TABLE IF EXISTS table_rename_with_ttl; + +CREATE TABLE table_rename_with_ttl +( + date1 Date, + value1 String +) +ENGINE = ReplicatedMergeTree('/clickhouse/{database}/test/table_rename_with_ttl_01378', '1') +ORDER BY tuple(); + +INSERT INTO table_rename_with_ttl SELECT toDate('2018-10-01') + number % 3, toString(number) from numbers(9); + +SELECT count() FROM table_rename_with_ttl; + +SET materialize_ttl_after_modify = 0; +ALTER TABLE table_rename_with_ttl MODIFY TTL date1 + INTERVAL 1 MONTH; + +SELECT count() FROM table_rename_with_ttl; + +ALTER TABLE table_rename_with_ttl RENAME COLUMN date1 TO renamed_date1; + +ALTER TABLE table_rename_with_ttl materialize TTL settings mutations_sync=2; + +SELECT count() FROM table_rename_with_ttl; + +DROP TABLE IF EXISTS table_rename_with_ttl; diff --git a/parser/testdata/01379_with_fill_several_columns/ast.json b/parser/testdata/01379_with_fill_several_columns/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01379_with_fill_several_columns/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01379_with_fill_several_columns/metadata.json b/parser/testdata/01379_with_fill_several_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01379_with_fill_several_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01379_with_fill_several_columns/query.sql b/parser/testdata/01379_with_fill_several_columns/query.sql new file mode 100644 index 000000000..6bdf7d41b --- /dev/null +++ b/parser/testdata/01379_with_fill_several_columns/query.sql @@ -0,0 +1,21 @@ +SELECT + toDate(toDateTime((number * 10) * 86400, 'Asia/Istanbul')) AS d1, + toDate(toDateTime(number * 86400, 'Asia/Istanbul')) AS d2, + 'original' AS source +FROM numbers(10) +WHERE (number % 3) = 1 +ORDER BY + d2 WITH FILL, + d1 WITH FILL STEP 5; + +SELECT '==============='; + +SELECT + toDate(toDateTime((number * 10) * 86400, 'Asia/Istanbul')) AS d1, + toDate(toDateTime(number * 86400, 'Asia/Istanbul')) AS d2, + 'original' AS source +FROM numbers(10) +WHERE (number % 3) = 1 +ORDER BY + d1 WITH FILL STEP 5, + d2 WITH FILL; diff --git a/parser/testdata/01380_coded_delta_exception_code/ast.json b/parser/testdata/01380_coded_delta_exception_code/ast.json new file mode 100644 index 000000000..87b98bc97 --- /dev/null +++ b/parser/testdata/01380_coded_delta_exception_code/ast.json @@ -0,0 +1,82 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery delta_codec_synthetic (children 3)" + }, + { + "explain": " Identifier delta_codec_synthetic" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration id (children 2)" + }, + { + "explain": " DataType Decimal (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_38" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Function CODEC (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function Delta" + }, + { + "explain": " Function ZSTD (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_22" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 20, + + "statistics": + { + "elapsed": 0.001209356, + "rows_read": 20, + "bytes_read": 749 + } +} diff --git a/parser/testdata/01380_coded_delta_exception_code/metadata.json b/parser/testdata/01380_coded_delta_exception_code/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01380_coded_delta_exception_code/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01380_coded_delta_exception_code/query.sql b/parser/testdata/01380_coded_delta_exception_code/query.sql new file mode 100644 index 000000000..5312a23c1 --- /dev/null +++ b/parser/testdata/01380_coded_delta_exception_code/query.sql @@ -0,0 +1,6 @@ +CREATE TABLE delta_codec_synthetic (`id` Decimal(38, 10) CODEC(Delta, ZSTD(22))) ENGINE = MergeTree() ORDER BY tuple(); -- { serverError BAD_ARGUMENTS } +CREATE TABLE delta_codec_synthetic (`id` Decimal(38, 10) CODEC(DoubleDelta, ZSTD(22))) ENGINE = MergeTree() ORDER BY tuple(); -- { serverError BAD_ARGUMENTS } +CREATE TABLE delta_codec_synthetic (`id` Decimal(38, 10) CODEC(Gorilla, ZSTD(22))) ENGINE = MergeTree() ORDER BY tuple(); -- { serverError BAD_ARGUMENTS } + +CREATE TABLE delta_codec_synthetic (`id` UInt64 CODEC(DoubleDelta(3), ZSTD(22))) ENGINE = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_CODEC_PARAMETER } +CREATE TABLE delta_codec_synthetic (`id` UInt64 CODEC(Gorilla('hello, world'), ZSTD(22))) ENGINE = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_CODEC_PARAMETER } diff --git a/parser/testdata/01380_nullable_state/ast.json b/parser/testdata/01380_nullable_state/ast.json new file mode 100644 index 000000000..2ff64173a --- /dev/null +++ b/parser/testdata/01380_nullable_state/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function hex (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function uniqState (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toNullable (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001939197, + "rows_read": 13, + "bytes_read": 535 + } +} diff --git a/parser/testdata/01380_nullable_state/metadata.json b/parser/testdata/01380_nullable_state/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01380_nullable_state/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01380_nullable_state/query.sql b/parser/testdata/01380_nullable_state/query.sql new file mode 100644 index 000000000..6841a6ce6 --- /dev/null +++ b/parser/testdata/01380_nullable_state/query.sql @@ -0,0 +1,26 @@ +SELECT hex(toString(uniqState(toNullable(1)))) WITH TOTALS; +SELECT '---'; +SELECT hex(toString(uniqState(x))) FROM (SELECT toNullable(1) AS x) WITH TOTALS; +SELECT '---'; +SELECT DISTINCT hex(toString(uniqState(x))) FROM (SELECT materialize(1) AS k, toNullable(1) AS x FROM numbers(1)) GROUP BY k WITH TOTALS ORDER BY k; +SELECT '---'; +SELECT DISTINCT hex(toString(uniqState(x))) FROM (SELECT materialize(1) AS k, toNullable(1) AS x FROM numbers(10)) GROUP BY k WITH TOTALS ORDER BY k; +SELECT '---'; +SELECT DISTINCT hex(toString(uniqState(x))) FROM (SELECT intDiv(number, 3) AS k, toNullable(1) AS x FROM numbers(10)) GROUP BY k WITH TOTALS ORDER BY k; +SELECT '---'; +SELECT DISTINCT hex(toString(uniqState(x))) FROM (SELECT intDiv(number, 3) AS k, toNullable(1) AS x FROM system.numbers LIMIT 100000) GROUP BY k WITH TOTALS ORDER BY k; +SELECT '---'; +SELECT DISTINCT arrayUniq(finalizeAggregation(groupArrayState(x))) FROM (SELECT intDiv(number, 3) AS k, toNullable(1) AS x FROM system.numbers LIMIT 100000) GROUP BY k WITH TOTALS ORDER BY k; +SELECT '---'; +SELECT k, finalizeAggregation(uniqState(x)) FROM (SELECT intDiv(number, 3) AS k, toNullable(1) AS x FROM system.numbers LIMIT 100000) GROUP BY k WITH TOTALS ORDER BY k LIMIT 5; +SELECT '---'; +SELECT k, finalizeAggregation(uniqState(x)) FROM (WITH toNullable(number = 3 ? 3 : 1) AS d SELECT intDiv(number, 3) AS k, number % d AS x FROM system.numbers LIMIT 100000) GROUP BY k WITH TOTALS ORDER BY k LIMIT 5; +SELECT '---'; +SELECT k, finalizeAggregation(quantilesTimingState(0.5)(x)) FROM (WITH toNullable(number = 3 ? 3 : 1) AS d SELECT intDiv(number, 3) AS k, number % d AS x FROM system.numbers LIMIT 100000) GROUP BY k WITH TOTALS ORDER BY k LIMIT 5; +SELECT '---'; +SELECT k, finalizeAggregation(quantilesTimingState(0.5)(x)) FROM (SELECT intDiv(number, if(number = 9223372036854775807, -2, if(number = 3, number = if(number = 1, NULL, 3), 1)) AS d) AS k, number % d AS x FROM system.numbers LIMIT 100000) GROUP BY k WITH TOTALS ORDER BY k ASC LIMIT 5; +SELECT '---'; +SELECT DISTINCT hex(toString(uniqState(x))) FROM (SELECT materialize(1) AS k, toNullable(1) AS x FROM numbers(1)) GROUP BY k WITH ROLLUP ORDER BY k; +SELECT '---'; +SELECT DISTINCT hex(toString(uniqState(x))) FROM (SELECT materialize(1) AS k, toNullable(1) AS x FROM numbers(1)) GROUP BY k WITH CUBE ORDER BY k; +SELECT '---'; diff --git a/parser/testdata/01381_for_each_with_states/ast.json b/parser/testdata/01381_for_each_with_states/ast.json new file mode 100644 index 000000000..4961aee2f --- /dev/null +++ b/parser/testdata/01381_for_each_with_states/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function hex (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function uniqStateForEach (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[UInt64_1, NULL]" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001668955, + "rows_read": 11, + "bytes_read": 458 + } +} diff --git a/parser/testdata/01381_for_each_with_states/metadata.json b/parser/testdata/01381_for_each_with_states/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01381_for_each_with_states/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01381_for_each_with_states/query.sql b/parser/testdata/01381_for_each_with_states/query.sql new file mode 100644 index 000000000..7286ef2cb --- /dev/null +++ b/parser/testdata/01381_for_each_with_states/query.sql @@ -0,0 +1,9 @@ +SELECT hex(toString(uniqStateForEach([1, NULL]))); +SELECT hex(toString(uniqStateForEachState([1, NULL]))); +SELECT arrayMap(x -> hex(toString(x)), finalizeAggregation(uniqStateForEachState([1, NULL]))); +SELECT arrayMap(x -> finalizeAggregation(x), finalizeAggregation(uniqStateForEachState([1, NULL]))); + +SELECT hex(toString(uniqStateForEach([1, NULL]))) WITH TOTALS; +SELECT hex(toString(uniqStateForEachState([1, NULL]))) WITH TOTALS; +SELECT arrayMap(x -> hex(toString(x)), finalizeAggregation(uniqStateForEachState([1, NULL]))) WITH TOTALS; +SELECT arrayMap(x -> finalizeAggregation(x), finalizeAggregation(uniqStateForEachState([1, NULL]))) WITH TOTALS; diff --git a/parser/testdata/01383_remote_ambiguous_column_shard/ast.json b/parser/testdata/01383_remote_ambiguous_column_shard/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01383_remote_ambiguous_column_shard/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01383_remote_ambiguous_column_shard/metadata.json b/parser/testdata/01383_remote_ambiguous_column_shard/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01383_remote_ambiguous_column_shard/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01383_remote_ambiguous_column_shard/query.sql b/parser/testdata/01383_remote_ambiguous_column_shard/query.sql new file mode 100644 index 000000000..b4c018cac --- /dev/null +++ b/parser/testdata/01383_remote_ambiguous_column_shard/query.sql @@ -0,0 +1,13 @@ +-- Tags: shard, no-parallel + +DROP DATABASE IF EXISTS test_01383; +CREATE DATABASE test_01383; + +create table test_01383.fact (id1 Int64, id2 Int64, value Int64) ENGINE = MergeTree() ORDER BY id1; +create table test_01383.dimension (id1 Int64, name String) ENGINE = MergeTree() ORDER BY id1; +insert into test_01383.fact values (1,2,10),(2,2,10),(3,3,10),(4,3,10); +insert into test_01383.dimension values (1,'name_1'),(2,'name_1'),(3,'name_3'),(4, 'name_4'); + +SELECT f.id1 AS ID, d.name AS Name, sum(f.value) FROM remote('127.0.0.{1,2,3}', test_01383.fact) AS f LEFT JOIN test_01383.dimension AS d ON f.id1 = d.id1 WHERE f.id1 = f.id2 GROUP BY ID, Name ORDER BY ID; + +DROP DATABASE test_01383; diff --git a/parser/testdata/01384_bloom_filter_bad_arguments/ast.json b/parser/testdata/01384_bloom_filter_bad_arguments/ast.json new file mode 100644 index 000000000..2f51d45d0 --- /dev/null +++ b/parser/testdata/01384_bloom_filter_bad_arguments/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001372524, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/01384_bloom_filter_bad_arguments/metadata.json b/parser/testdata/01384_bloom_filter_bad_arguments/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01384_bloom_filter_bad_arguments/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01384_bloom_filter_bad_arguments/query.sql b/parser/testdata/01384_bloom_filter_bad_arguments/query.sql new file mode 100644 index 000000000..42379418e --- /dev/null +++ b/parser/testdata/01384_bloom_filter_bad_arguments/query.sql @@ -0,0 +1,10 @@ +DROP TABLE IF EXISTS test; + +create table test (a String, index a a type tokenbf_v1(0, 2, 0) granularity 1) engine MergeTree order by a; -- { serverError BAD_ARGUMENTS } +create table test (a String, index a a type tokenbf_v1(2, 0, 0) granularity 1) engine MergeTree order by a; -- { serverError BAD_ARGUMENTS } +create table test (a String, index a a type tokenbf_v1(0, 1, 1) granularity 1) engine MergeTree order by a; -- { serverError BAD_ARGUMENTS } +create table test (a String, index a a type tokenbf_v1(1, 0, 1) granularity 1) engine MergeTree order by a; -- { serverError BAD_ARGUMENTS } + +create table test (a String, index a a type tokenbf_v1(0.1, 2, 0) granularity 1) engine MergeTree order by a; -- { serverError BAD_ARGUMENTS } +create table test (a String, index a a type tokenbf_v1(-1, 2, 0) granularity 1) engine MergeTree order by a; -- { serverError BAD_ARGUMENTS } +create table test (a String, index a a type tokenbf_v1(0xFFFFFFFF, 2, 0) granularity 1) engine MergeTree order by a; -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/01385_not_function/ast.json b/parser/testdata/01385_not_function/ast.json new file mode 100644 index 000000000..c838e6d1d --- /dev/null +++ b/parser/testdata/01385_not_function/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function notEquals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function not (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.00140836, + "rows_read": 10, + "bytes_read": 375 + } +} diff --git a/parser/testdata/01385_not_function/metadata.json b/parser/testdata/01385_not_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01385_not_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01385_not_function/query.sql b/parser/testdata/01385_not_function/query.sql new file mode 100644 index 000000000..77e69b14b --- /dev/null +++ b/parser/testdata/01385_not_function/query.sql @@ -0,0 +1,2 @@ +SELECT 1 != NOT (1); +SELECT 1 WHERE 10 != NOT ( NOT 10); diff --git a/parser/testdata/01386_negative_float_constant_key_condition/ast.json b/parser/testdata/01386_negative_float_constant_key_condition/ast.json new file mode 100644 index 000000000..386b63afb --- /dev/null +++ b/parser/testdata/01386_negative_float_constant_key_condition/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001334606, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01386_negative_float_constant_key_condition/metadata.json b/parser/testdata/01386_negative_float_constant_key_condition/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01386_negative_float_constant_key_condition/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01386_negative_float_constant_key_condition/query.sql b/parser/testdata/01386_negative_float_constant_key_condition/query.sql new file mode 100644 index 000000000..819b664bb --- /dev/null +++ b/parser/testdata/01386_negative_float_constant_key_condition/query.sql @@ -0,0 +1,21 @@ +SET convert_query_to_cnf = 0; + +DROP TABLE IF EXISTS t0; + +CREATE TABLE t0 +( + `c0` Int32, + `c1` Int32 CODEC(NONE) +) +ENGINE = MergeTree() +ORDER BY tuple() +SETTINGS index_granularity = 8192; + +INSERT INTO t0 VALUES (0, 0); + +SELECT t0.c1 FROM t0 WHERE NOT (t0.c1 OR (t0.c0 AND -1524532316)); +SELECT t0.c1 FROM t0 WHERE NOT (t0.c1 OR (t0.c0 AND -1.0)); -- { serverError CANNOT_CONVERT_TYPE } +SELECT t0.c1 FROM t0 WHERE NOT (t0.c1 OR (t0.c0 AND inf)); +SELECT t0.c1 FROM t0 WHERE NOT (t0.c1 OR (t0.c0 AND nan)); + +DROP TABLE t0; diff --git a/parser/testdata/01387_clear_column_default_depends/ast.json b/parser/testdata/01387_clear_column_default_depends/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01387_clear_column_default_depends/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01387_clear_column_default_depends/metadata.json b/parser/testdata/01387_clear_column_default_depends/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01387_clear_column_default_depends/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01387_clear_column_default_depends/query.sql b/parser/testdata/01387_clear_column_default_depends/query.sql new file mode 100644 index 000000000..30208b8d3 --- /dev/null +++ b/parser/testdata/01387_clear_column_default_depends/query.sql @@ -0,0 +1,37 @@ +-- It's Ok to CLEAR column when there are columns with default expression depending on it. +-- But it's not Ok to DROP such column. + +DROP TABLE IF EXISTS test; +CREATE TABLE test (x UInt8, y UInt8 DEFAULT x + 1) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO test (x) VALUES (1), (2), (3); +SELECT * FROM test ORDER BY x, y; +ALTER TABLE test CLEAR COLUMN x; +SELECT * FROM test ORDER BY x, y; +ALTER TABLE test DROP COLUMN x; -- { serverError ILLEGAL_COLUMN } +DROP TABLE test; + +DROP TABLE IF EXISTS test; +CREATE TABLE test (x UInt8, y UInt8 MATERIALIZED x + 1) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO test (x) VALUES (1), (2), (3); +SELECT x, y FROM test ORDER BY x, y; +ALTER TABLE test CLEAR COLUMN x; +SELECT x, y FROM test ORDER BY x, y; +ALTER TABLE test DROP COLUMN x; -- { serverError ILLEGAL_COLUMN } +DROP TABLE test; + +DROP TABLE IF EXISTS test; +CREATE TABLE test (x UInt8, y UInt8 ALIAS x + 1, z String DEFAULT 'Hello') ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO test (x) VALUES (1), (2), (3); +SELECT x, y FROM test ORDER BY x, y; +ALTER TABLE test CLEAR COLUMN x; +SELECT x, y FROM test ORDER BY x, y; +ALTER TABLE test DROP COLUMN x; -- { serverError ILLEGAL_COLUMN } +DROP TABLE test; + + +-- The original report from Mikhail Petrov +DROP TABLE IF EXISTS Test; +set allow_deprecated_syntax_for_merge_tree=1; +create table Test (impression_id String,impression_id_compressed FixedString(16) DEFAULT UUIDStringToNum(substring(impression_id, 1, 36)), impression_id_hashed UInt16 DEFAULT reinterpretAsUInt16(impression_id_compressed), event_date Date ) ENGINE = MergeTree(event_date, impression_id_hashed, (event_date, impression_id_hashed), 8192); +alter table Test clear column impression_id in partition '202001'; +DROP TABLE Test; diff --git a/parser/testdata/01388_clear_all_columns/ast.json b/parser/testdata/01388_clear_all_columns/ast.json new file mode 100644 index 000000000..420aba45f --- /dev/null +++ b/parser/testdata/01388_clear_all_columns/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001461382, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/01388_clear_all_columns/metadata.json b/parser/testdata/01388_clear_all_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01388_clear_all_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01388_clear_all_columns/query.sql b/parser/testdata/01388_clear_all_columns/query.sql new file mode 100644 index 000000000..07b4fb3de --- /dev/null +++ b/parser/testdata/01388_clear_all_columns/query.sql @@ -0,0 +1,34 @@ +DROP TABLE IF EXISTS test; +CREATE TABLE test (x UInt8) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO test (x) VALUES (1), (2), (3); +ALTER TABLE test CLEAR COLUMN x; --{serverError BAD_ARGUMENTS} +DROP TABLE test; + +DROP TABLE IF EXISTS test; + +CREATE TABLE test (x UInt8, y UInt8) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO test (x, y) VALUES (1, 1), (2, 2), (3, 3); + +ALTER TABLE test CLEAR COLUMN x; + +ALTER TABLE test CLEAR COLUMN x IN PARTITION ''; --{serverError INVALID_PARTITION_VALUE} +ALTER TABLE test CLEAR COLUMN x IN PARTITION 'asdasd'; --{serverError INVALID_PARTITION_VALUE} +ALTER TABLE test CLEAR COLUMN x IN PARTITION '123'; --{serverError INVALID_PARTITION_VALUE} + +ALTER TABLE test CLEAR COLUMN y; --{serverError BAD_ARGUMENTS} + +ALTER TABLE test ADD COLUMN z String DEFAULT 'Hello'; + +-- y is only real column in table +ALTER TABLE test CLEAR COLUMN y; --{serverError BAD_ARGUMENTS} +ALTER TABLE test CLEAR COLUMN x; +ALTER TABLE test CLEAR COLUMN z; + +INSERT INTO test (x, y, z) VALUES (1, 1, 'a'), (2, 2, 'b'), (3, 3, 'c'); + +ALTER TABLE test CLEAR COLUMN z; +ALTER TABLE test CLEAR COLUMN x; + +SELECT * FROM test ORDER BY y; + +DROP TABLE IF EXISTS test; diff --git a/parser/testdata/01388_multi_if_optimization/ast.json b/parser/testdata/01388_multi_if_optimization/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01388_multi_if_optimization/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01388_multi_if_optimization/metadata.json b/parser/testdata/01388_multi_if_optimization/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01388_multi_if_optimization/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01388_multi_if_optimization/query.sql b/parser/testdata/01388_multi_if_optimization/query.sql new file mode 100644 index 000000000..0ad6df9fd --- /dev/null +++ b/parser/testdata/01388_multi_if_optimization/query.sql @@ -0,0 +1,8 @@ +-- If you are reading this test please note that as of now this setting does not provide benefits in most of the cases. +SET optimize_if_chain_to_multiif = 0; +EXPLAIN SYNTAX SELECT number = 1 ? 'hello' : (number = 2 ? 'world' : 'xyz') FROM numbers(10); +SET optimize_if_chain_to_multiif = 1; +EXPLAIN SYNTAX SELECT number = 1 ? 'hello' : (number = 2 ? 'world' : 'xyz') FROM numbers(10); + +-- fuzzed +SELECT now64(if(Null, NULL, if(Null, nan, toFloat64(number))), Null) FROM numbers(2); diff --git a/parser/testdata/01389_filter_by_virtual_columns/ast.json b/parser/testdata/01389_filter_by_virtual_columns/ast.json new file mode 100644 index 000000000..9736428e1 --- /dev/null +++ b/parser/testdata/01389_filter_by_virtual_columns/ast.json @@ -0,0 +1,85 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function count (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.parts" + }, + { + "explain": " Function and (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier table" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier database" + }, + { + "explain": " Function currentDatabase (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 21, + + "statistics": + { + "elapsed": 0.001530954, + "rows_read": 21, + "bytes_read": 805 + } +} diff --git a/parser/testdata/01389_filter_by_virtual_columns/metadata.json b/parser/testdata/01389_filter_by_virtual_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01389_filter_by_virtual_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01389_filter_by_virtual_columns/query.sql b/parser/testdata/01389_filter_by_virtual_columns/query.sql new file mode 100644 index 000000000..43ce3ad40 --- /dev/null +++ b/parser/testdata/01389_filter_by_virtual_columns/query.sql @@ -0,0 +1,2 @@ +SELECT count() FROM system.parts WHERE table = NULL AND database = currentDatabase(); +SELECT DISTINCT marks FROM system.parts WHERE (table = NULL) AND (database = currentDatabase()) AND (active = 1); diff --git a/parser/testdata/01390_check_table_codec/ast.json b/parser/testdata/01390_check_table_codec/ast.json new file mode 100644 index 000000000..55a99a370 --- /dev/null +++ b/parser/testdata/01390_check_table_codec/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001440645, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01390_check_table_codec/metadata.json b/parser/testdata/01390_check_table_codec/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01390_check_table_codec/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01390_check_table_codec/query.sql b/parser/testdata/01390_check_table_codec/query.sql new file mode 100644 index 000000000..83a18b4f1 --- /dev/null +++ b/parser/testdata/01390_check_table_codec/query.sql @@ -0,0 +1,15 @@ +SET check_query_single_value_result = 0; + +DROP TABLE IF EXISTS check_codec; + +CREATE TABLE check_codec(a Int, b Int CODEC(Delta, ZSTD)) ENGINE = MergeTree ORDER BY a SETTINGS min_bytes_for_wide_part = 0; +INSERT INTO check_codec SELECT number, number * 2 FROM numbers(1000); +CHECK TABLE check_codec SETTINGS max_threads = 1; + +DROP TABLE check_codec; + +CREATE TABLE check_codec(a Int, b Int CODEC(Delta, ZSTD)) ENGINE = MergeTree ORDER BY a SETTINGS min_bytes_for_wide_part = '10M'; +INSERT INTO check_codec SELECT number, number * 2 FROM numbers(1000); +CHECK TABLE check_codec SETTINGS max_threads = 1; + +DROP TABLE check_codec; diff --git a/parser/testdata/01390_remove_injective_in_uniq/ast.json b/parser/testdata/01390_remove_injective_in_uniq/ast.json new file mode 100644 index 000000000..93f675948 --- /dev/null +++ b/parser/testdata/01390_remove_injective_in_uniq/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001104517, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01390_remove_injective_in_uniq/metadata.json b/parser/testdata/01390_remove_injective_in_uniq/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01390_remove_injective_in_uniq/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01390_remove_injective_in_uniq/query.sql b/parser/testdata/01390_remove_injective_in_uniq/query.sql new file mode 100644 index 000000000..7753a8fae --- /dev/null +++ b/parser/testdata/01390_remove_injective_in_uniq/query.sql @@ -0,0 +1,46 @@ +set optimize_injective_functions_inside_uniq = 1; + +EXPLAIN SYNTAX select uniq(x), uniqExact(x), uniqHLL12(x), uniqCombined(x), uniqCombined64(x) +from (select number % 2 as x from numbers(10)); + +EXPLAIN SYNTAX select uniq(x + y), uniqExact(x + y), uniqHLL12(x + y), uniqCombined(x + y), uniqCombined64(x + y) +from (select number % 2 as x, number % 3 y from numbers(10)); + +EXPLAIN SYNTAX select uniq(-x), uniqExact(-x), uniqHLL12(-x), uniqCombined(-x), uniqCombined64(-x) +from (select number % 2 as x from numbers(10)); + +EXPLAIN SYNTAX select uniq(bitNot(x)), uniqExact(bitNot(x)), uniqHLL12(bitNot(x)), uniqCombined(bitNot(x)), uniqCombined64(bitNot(x)) +from (select number % 2 as x from numbers(10)); + +EXPLAIN SYNTAX select uniq(bitNot(-x)), uniqExact(bitNot(-x)), uniqHLL12(bitNot(-x)), uniqCombined(bitNot(-x)), uniqCombined64(bitNot(-x)) +from (select number % 2 as x from numbers(10)); + +EXPLAIN SYNTAX select uniq(-bitNot(-x)), uniqExact(-bitNot(-x)), uniqHLL12(-bitNot(-x)), uniqCombined(-bitNot(-x)), uniqCombined64(-bitNot(-x)) +from (select number % 2 as x from numbers(10)); + +EXPLAIN SYNTAX select count(distinct -bitNot(-x)) from (select number % 2 as x from numbers(10)); +EXPLAIN SYNTAX select uniq(concatAssumeInjective('x', 'y')) from numbers(10); + + +set optimize_injective_functions_inside_uniq = 0; + +EXPLAIN SYNTAX select uniq(x), uniqExact(x), uniqHLL12(x), uniqCombined(x), uniqCombined64(x) +from (select number % 2 as x from numbers(10)); + +EXPLAIN SYNTAX select uniq(x + y), uniqExact(x + y), uniqHLL12(x + y), uniqCombined(x + y), uniqCombined64(x + y) +from (select number % 2 as x, number % 3 y from numbers(10)); + +EXPLAIN SYNTAX select uniq(-x), uniqExact(-x), uniqHLL12(-x), uniqCombined(-x), uniqCombined64(-x) +from (select number % 2 as x from numbers(10)); + +EXPLAIN SYNTAX select uniq(bitNot(x)), uniqExact(bitNot(x)), uniqHLL12(bitNot(x)), uniqCombined(bitNot(x)), uniqCombined64(bitNot(x)) +from (select number % 2 as x from numbers(10)); + +EXPLAIN SYNTAX select uniq(bitNot(-x)), uniqExact(bitNot(-x)), uniqHLL12(bitNot(-x)), uniqCombined(bitNot(-x)), uniqCombined64(bitNot(-x)) +from (select number % 2 as x from numbers(10)); + +EXPLAIN SYNTAX select uniq(-bitNot(-x)), uniqExact(-bitNot(-x)), uniqHLL12(-bitNot(-x)), uniqCombined(-bitNot(-x)), uniqCombined64(-bitNot(-x)) +from (select number % 2 as x from numbers(10)); + +EXPLAIN SYNTAX select count(distinct -bitNot(-x)) from (select number % 2 as x from numbers(10)); +EXPLAIN SYNTAX select uniq(concatAssumeInjective('x', 'y')) from numbers(10); diff --git a/parser/testdata/01391_join_on_dict_crash/ast.json b/parser/testdata/01391_join_on_dict_crash/ast.json new file mode 100644 index 000000000..28eb5523d --- /dev/null +++ b/parser/testdata/01391_join_on_dict_crash/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery t (children 3)" + }, + { + "explain": " Identifier t" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration click_city_id (children 1)" + }, + { + "explain": " DataType UInt32" + }, + { + "explain": " ColumnDeclaration click_country_id (children 1)" + }, + { + "explain": " DataType UInt32" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function Memory" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001329086, + "rows_read": 10, + "bytes_read": 364 + } +} diff --git a/parser/testdata/01391_join_on_dict_crash/metadata.json b/parser/testdata/01391_join_on_dict_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01391_join_on_dict_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01391_join_on_dict_crash/query.sql b/parser/testdata/01391_join_on_dict_crash/query.sql new file mode 100644 index 000000000..e056e1475 --- /dev/null +++ b/parser/testdata/01391_join_on_dict_crash/query.sql @@ -0,0 +1,14 @@ +CREATE TABLE t (click_city_id UInt32, click_country_id UInt32) Engine = Memory; +CREATE TABLE d_src (id UInt64, country_id UInt8, name String) Engine = Memory; + +INSERT INTO t VALUES (0, 0); +INSERT INTO d_src VALUES (0, 0, 'n'); + +CREATE DICTIONARY d (id UInt32, country_id UInt8, name String) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' DB currentDatabase() table 'd_src')) +LIFETIME(MIN 1 MAX 1) +LAYOUT(HASHED()); + +SELECT click_country_id FROM t AS cc LEFT JOIN d ON toUInt32(d.id) = cc.click_city_id; +SELECT click_country_id FROM t AS cc LEFT JOIN d ON d.country_id < 99 AND d.id = cc.click_city_id; diff --git a/parser/testdata/01391_limit_overflow/ast.json b/parser/testdata/01391_limit_overflow/ast.json new file mode 100644 index 000000000..c2f9556cb --- /dev/null +++ b/parser/testdata/01391_limit_overflow/ast.json @@ -0,0 +1,70 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 5)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_9223372036854775807" + } + ], + + "rows": 16, + + "statistics": + { + "elapsed": 0.001287378, + "rows_read": 16, + "bytes_read": 610 + } +} diff --git a/parser/testdata/01391_limit_overflow/metadata.json b/parser/testdata/01391_limit_overflow/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01391_limit_overflow/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01391_limit_overflow/query.sql b/parser/testdata/01391_limit_overflow/query.sql new file mode 100644 index 000000000..b06622799 --- /dev/null +++ b/parser/testdata/01391_limit_overflow/query.sql @@ -0,0 +1,12 @@ +SELECT number FROM numbers(10) ORDER BY number ASC LIMIT 2, 9223372036854775807 WITH TIES; + +SELECT '---'; + +CREATE TEMPORARY TABLE a (a UInt64); +INSERT INTO TABLE a SELECT number FROM system.numbers LIMIT 10; + +SELECT a +FROM a +GROUP BY a +ORDER BY a ASC +LIMIT 5, 18446744073709551615; diff --git a/parser/testdata/01392_column_resolve/ast.json b/parser/testdata/01392_column_resolve/ast.json new file mode 100644 index 000000000..ccd986bfa --- /dev/null +++ b/parser/testdata/01392_column_resolve/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery tableConversion (children 3)" + }, + { + "explain": " Identifier tableConversion" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration conversionId (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " ColumnDeclaration value (children 1)" + }, + { + "explain": " DataType Nullable (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " DataType Double" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function Log (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.00102632, + "rows_read": 13, + "bytes_read": 499 + } +} diff --git a/parser/testdata/01392_column_resolve/metadata.json b/parser/testdata/01392_column_resolve/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01392_column_resolve/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01392_column_resolve/query.sql b/parser/testdata/01392_column_resolve/query.sql new file mode 100644 index 000000000..90a7d9b16 --- /dev/null +++ b/parser/testdata/01392_column_resolve/query.sql @@ -0,0 +1,25 @@ +CREATE TABLE tableConversion (conversionId String, value Nullable(Double)) ENGINE = Log(); +CREATE TABLE tableClick (clickId String, conversionId String, value Nullable(Double)) ENGINE = Log(); +CREATE TABLE leftjoin (id String) ENGINE = Log(); + +INSERT INTO tableConversion(conversionId, value) VALUES ('Conversion 1', 1); +INSERT INTO tableClick(clickId, conversionId, value) VALUES ('Click 1', 'Conversion 1', 14); +INSERT INTO tableClick(clickId, conversionId, value) VALUES ('Click 2', 'Conversion 1', 15); +INSERT INTO tableClick(clickId, conversionId, value) VALUES ('Click 3', 'Conversion 1', 16); + +SELECT + conversion.conversionId AS myConversionId, + click.clickId AS myClickId, + click.myValue AS myValue +FROM ( + SELECT conversionId, value as myValue + FROM tableConversion +) AS conversion +INNER JOIN ( + SELECT clickId, conversionId, value as myValue + FROM tableClick +) AS click ON click.conversionId = conversion.conversionId +LEFT JOIN ( + SELECT * FROM leftjoin +) AS dummy ON (dummy.id = conversion.conversionId) +ORDER BY myValue; diff --git a/parser/testdata/01396_low_cardinality_fixed_string_default/ast.json b/parser/testdata/01396_low_cardinality_fixed_string_default/ast.json new file mode 100644 index 000000000..34f50a302 --- /dev/null +++ b/parser/testdata/01396_low_cardinality_fixed_string_default/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00136448, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/01396_low_cardinality_fixed_string_default/metadata.json b/parser/testdata/01396_low_cardinality_fixed_string_default/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01396_low_cardinality_fixed_string_default/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01396_low_cardinality_fixed_string_default/query.sql b/parser/testdata/01396_low_cardinality_fixed_string_default/query.sql new file mode 100644 index 000000000..f0c89cdf8 --- /dev/null +++ b/parser/testdata/01396_low_cardinality_fixed_string_default/query.sql @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS test; +CREATE TABLE test +( + id UInt32, + code LowCardinality(FixedString(2)) DEFAULT '--' +) ENGINE = MergeTree() PARTITION BY id ORDER BY id; + +INSERT INTO test FORMAT CSV 1,RU + +INSERT INTO test FORMAT CSV 1, + +SELECT * FROM test ORDER BY code; +OPTIMIZE TABLE test; +SELECT * FROM test ORDER BY code; + +DROP TABLE test; diff --git a/parser/testdata/01396_negative_datetime_saturate_to_zero/ast.json b/parser/testdata/01396_negative_datetime_saturate_to_zero/ast.json new file mode 100644 index 000000000..5d8e14b5c --- /dev/null +++ b/parser/testdata/01396_negative_datetime_saturate_to_zero/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function greater (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toTimeZone (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function now (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Literal 'Asia\/Istanbul'" + }, + { + "explain": " Literal '1970-01-01 00:00:00'" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001711765, + "rows_read": 12, + "bytes_read": 472 + } +} diff --git a/parser/testdata/01396_negative_datetime_saturate_to_zero/metadata.json b/parser/testdata/01396_negative_datetime_saturate_to_zero/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01396_negative_datetime_saturate_to_zero/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01396_negative_datetime_saturate_to_zero/query.sql b/parser/testdata/01396_negative_datetime_saturate_to_zero/query.sql new file mode 100644 index 000000000..e52c2d3dd --- /dev/null +++ b/parser/testdata/01396_negative_datetime_saturate_to_zero/query.sql @@ -0,0 +1 @@ +SELECT toTimeZone(now(), 'Asia/Istanbul') > '1970-01-01 00:00:00'; diff --git a/parser/testdata/01397_in_bad_arguments/ast.json b/parser/testdata/01397_in_bad_arguments/ast.json new file mode 100644 index 000000000..760a6d1d1 --- /dev/null +++ b/parser/testdata/01397_in_bad_arguments/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function in (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Tuple_(UInt64_1, UInt64_1, UInt64_1, UInt64_1)" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001239341, + "rows_read": 7, + "bytes_read": 293 + } +} diff --git a/parser/testdata/01397_in_bad_arguments/metadata.json b/parser/testdata/01397_in_bad_arguments/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01397_in_bad_arguments/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01397_in_bad_arguments/query.sql b/parser/testdata/01397_in_bad_arguments/query.sql new file mode 100644 index 000000000..a861ffa8f --- /dev/null +++ b/parser/testdata/01397_in_bad_arguments/query.sql @@ -0,0 +1,4 @@ +select in((1, 1, 1, 1)); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +select in(1); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +select in(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +select in(1, 2, 3); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } diff --git a/parser/testdata/01398_in_tuple_func/ast.json b/parser/testdata/01398_in_tuple_func/ast.json new file mode 100644 index 000000000..8b8a4c454 --- /dev/null +++ b/parser/testdata/01398_in_tuple_func/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function in (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 5)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Literal UInt64_4" + }, + { + "explain": " Literal UInt64_5" + }, + { + "explain": " Set" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001903297, + "rows_read": 15, + "bytes_read": 512 + } +} diff --git a/parser/testdata/01398_in_tuple_func/metadata.json b/parser/testdata/01398_in_tuple_func/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01398_in_tuple_func/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01398_in_tuple_func/query.sql b/parser/testdata/01398_in_tuple_func/query.sql new file mode 100644 index 000000000..1cd5e0cf1 --- /dev/null +++ b/parser/testdata/01398_in_tuple_func/query.sql @@ -0,0 +1,16 @@ +select 1 in tuple(1, 2, 3, 4, 5) settings max_temporary_columns = 2; +select (1, 2) in tuple(tuple(1, 2), tuple(3, 4), tuple(5, 6), tuple(7, 8), tuple(9, 10)) settings max_temporary_columns = 4; + +select 1 in array(1, 2, 3, 4, 5) settings max_temporary_columns = 3; +select (1, 2) in array(tuple(1, 2), tuple(3, 4), tuple(5, 6), tuple(7, 8), tuple(9, 10)) settings max_temporary_columns = 4; + +select (1, 2) in tuple(1, 2); +select (1, 2) in array((1, 3), (1, 2)); +select [1] in array([1], [2, 3]); +select ([1], [2]) in tuple([NULL], [NULL]); +select ([1], [2]) in tuple(([NULL], [NULL]), ([1], [2])); + +select 4 in plus(2, 2); +select (1, 'a') in tuple((1, 'a'), (2, 'b'), (3, 'c')); +select (1, 'a') in tuple((2, 'b'), (3, 'c'), (4, 'd')); +select (1, (2, 'foo')) in tuple((1, (3, 'b')), (1, (2, 'foo'))); diff --git a/parser/testdata/01400_join_get_with_multi_keys/ast.json b/parser/testdata/01400_join_get_with_multi_keys/ast.json new file mode 100644 index 000000000..696502cef --- /dev/null +++ b/parser/testdata/01400_join_get_with_multi_keys/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_joinGet (children 1)" + }, + { + "explain": " Identifier test_joinGet" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001281321, + "rows_read": 2, + "bytes_read": 76 + } +} diff --git a/parser/testdata/01400_join_get_with_multi_keys/metadata.json b/parser/testdata/01400_join_get_with_multi_keys/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01400_join_get_with_multi_keys/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01400_join_get_with_multi_keys/query.sql b/parser/testdata/01400_join_get_with_multi_keys/query.sql new file mode 100644 index 000000000..8f83e1c15 --- /dev/null +++ b/parser/testdata/01400_join_get_with_multi_keys/query.sql @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS test_joinGet; + +CREATE TABLE test_joinGet(a String, b String, c Float64) ENGINE = Join(any, left, a, b); + +INSERT INTO test_joinGet VALUES ('ab', '1', 0.1), ('ab', '2', 0.2), ('cd', '3', 0.3); + +SELECT joinGet(test_joinGet, 'c', 'ab', '1'); + +CREATE TABLE test_lc(a LowCardinality(String), b LowCardinality(String), c Float64) ENGINE = Join(any, left, a, b); + +INSERT INTO test_lc VALUES ('ab', '1', 0.1), ('ab', '2', 0.2), ('cd', '3', 0.3); + +SELECT joinGet(test_lc, 'c', 'ab', '1'); + +DROP TABLE test_joinGet; +DROP TABLE test_lc; diff --git a/parser/testdata/01402_cast_nullable_string_to_enum/ast.json b/parser/testdata/01402_cast_nullable_string_to_enum/ast.json new file mode 100644 index 000000000..77d6d9167 --- /dev/null +++ b/parser/testdata/01402_cast_nullable_string_to_enum/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001350655, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01402_cast_nullable_string_to_enum/metadata.json b/parser/testdata/01402_cast_nullable_string_to_enum/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01402_cast_nullable_string_to_enum/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01402_cast_nullable_string_to_enum/query.sql b/parser/testdata/01402_cast_nullable_string_to_enum/query.sql new file mode 100644 index 000000000..cf4d57e43 --- /dev/null +++ b/parser/testdata/01402_cast_nullable_string_to_enum/query.sql @@ -0,0 +1,13 @@ +SET cast_keep_nullable = 0; + +-- https://github.com/ClickHouse/ClickHouse/issues/5818#issuecomment-619628445 +SELECT CAST(CAST(NULL AS Nullable(String)) AS Nullable(Enum8('Hello' = 1))); +SELECT CAST(CAST(NULL AS Nullable(FixedString(1))) AS Nullable(Enum8('Hello' = 1))); + +-- empty string still not acceptable +SELECT CAST(CAST('' AS Nullable(String)) AS Nullable(Enum8('Hello' = 1))); -- { serverError UNKNOWN_ELEMENT_OF_ENUM } +SELECT CAST(CAST('' AS Nullable(FixedString(1))) AS Nullable(Enum8('Hello' = 1))); -- { serverError UNKNOWN_ELEMENT_OF_ENUM } + +-- non-Nullable Enum() still not acceptable +SELECT CAST(CAST(NULL AS Nullable(String)) AS Enum8('Hello' = 1)); -- { serverError CANNOT_INSERT_NULL_IN_ORDINARY_COLUMN } +SELECT CAST(CAST(NULL AS Nullable(FixedString(1))) AS Enum8('Hello' = 1)); -- { serverError CANNOT_INSERT_NULL_IN_ORDINARY_COLUMN } diff --git a/parser/testdata/01403_datetime64_constant_arg/ast.json b/parser/testdata/01403_datetime64_constant_arg/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01403_datetime64_constant_arg/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01403_datetime64_constant_arg/metadata.json b/parser/testdata/01403_datetime64_constant_arg/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01403_datetime64_constant_arg/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01403_datetime64_constant_arg/query.sql b/parser/testdata/01403_datetime64_constant_arg/query.sql new file mode 100644 index 000000000..b47a7a315 --- /dev/null +++ b/parser/testdata/01403_datetime64_constant_arg/query.sql @@ -0,0 +1,5 @@ +-- regression for "DB::Exception: Size of filter doesn't match size of column.." +SELECT toDateTime(fromUnixTimestamp64Micro(toInt64(0)), 'UTC') as ts FROM numbers_mt(2) WHERE ts + 1 = ts; + +-- regression for "Invalid number of rows in Chunk column UInt32: expected 2, got 1." +SELECT toDateTime(fromUnixTimestamp64Micro(toInt64(0)), 'UTC') ts FROM numbers(2); diff --git a/parser/testdata/01404_roundUpToPowerOfTwoOrZero_safety/ast.json b/parser/testdata/01404_roundUpToPowerOfTwoOrZero_safety/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01404_roundUpToPowerOfTwoOrZero_safety/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01404_roundUpToPowerOfTwoOrZero_safety/metadata.json b/parser/testdata/01404_roundUpToPowerOfTwoOrZero_safety/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01404_roundUpToPowerOfTwoOrZero_safety/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01404_roundUpToPowerOfTwoOrZero_safety/query.sql b/parser/testdata/01404_roundUpToPowerOfTwoOrZero_safety/query.sql new file mode 100644 index 000000000..a60be1fa3 --- /dev/null +++ b/parser/testdata/01404_roundUpToPowerOfTwoOrZero_safety/query.sql @@ -0,0 +1,4 @@ +-- repeat() with this length and this number of rows will allocation huge enough region (MSB set), +-- which will cause roundUpToPowerOfTwoOrZero() returns 0 for such allocation (before the fix), +-- and later repeat() will try to use this memory and will got SIGSEGV. +SELECT repeat('0.0001048576', number * (number * (number * 255))) FROM numbers(65535); -- { serverError TOO_LARGE_STRING_SIZE } diff --git a/parser/testdata/01407_lambda_arrayJoin/ast.json b/parser/testdata/01407_lambda_arrayJoin/ast.json new file mode 100644 index 000000000..6e14a3e3a --- /dev/null +++ b/parser/testdata/01407_lambda_arrayJoin/ast.json @@ -0,0 +1,100 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayFilter (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function lambda (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier a" + }, + { + "explain": " Function in (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier a" + }, + { + "explain": " Function arrayJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Literal Array_[NULL]" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 26, + + "statistics": + { + "elapsed": 0.001190665, + "rows_read": 26, + "bytes_read": 1060 + } +} diff --git a/parser/testdata/01407_lambda_arrayJoin/metadata.json b/parser/testdata/01407_lambda_arrayJoin/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01407_lambda_arrayJoin/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01407_lambda_arrayJoin/query.sql b/parser/testdata/01407_lambda_arrayJoin/query.sql new file mode 100644 index 000000000..050bacb78 --- /dev/null +++ b/parser/testdata/01407_lambda_arrayJoin/query.sql @@ -0,0 +1,5 @@ +SELECT arrayFilter((a) -> ((a, arrayJoin([])) IN (Null, [Null])), []); +SELECT arrayFilter((a) -> ((a, arrayJoin([[]])) IN (Null, [Null])), []); + +SELECT * FROM system.one ARRAY JOIN arrayFilter((a) -> ((a, arrayJoin([])) IN (NULL)), []) AS arr_x; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT * FROM numbers(1) LEFT ARRAY JOIN arrayFilter((x_0, x_1) -> (arrayJoin([]) IN (NULL)), [], []) AS arr_x; diff --git a/parser/testdata/01408_range_overflow/ast.json b/parser/testdata/01408_range_overflow/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01408_range_overflow/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01408_range_overflow/metadata.json b/parser/testdata/01408_range_overflow/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01408_range_overflow/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01408_range_overflow/query.sql b/parser/testdata/01408_range_overflow/query.sql new file mode 100644 index 000000000..7c1b1d7a9 --- /dev/null +++ b/parser/testdata/01408_range_overflow/query.sql @@ -0,0 +1,13 @@ +-- executeGeneric() +SELECT range(1025, 1048576 + 9223372036854775807, 9223372036854775807); +SELECT range(1025, 1048576 + (9223372036854775807 AS i), i); +SELECT range(1025, 18446744073709551615, 1); -- { serverError ARGUMENT_OUT_OF_BOUND } + +-- executeConstStep() +SELECT range(number, 1048576 + 9223372036854775807, 9223372036854775807) FROM system.numbers LIMIT 1 OFFSET 1025; + +-- executeConstStartStep() +SELECT range(1025, number + 9223372036854775807, 9223372036854775807) FROM system.numbers LIMIT 1 OFFSET 1048576; + +-- executeConstStart() +SELECT range(1025, 1048576 + 9223372036854775807, number + 9223372036854775807) FROM system.numbers LIMIT 1; diff --git a/parser/testdata/01409_topK_merge/ast.json b/parser/testdata/01409_topK_merge/ast.json new file mode 100644 index 000000000..6df819c35 --- /dev/null +++ b/parser/testdata/01409_topK_merge/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery data_01409 (children 1)" + }, + { + "explain": " Identifier data_01409" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00129564, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/01409_topK_merge/metadata.json b/parser/testdata/01409_topK_merge/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01409_topK_merge/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01409_topK_merge/query.sql b/parser/testdata/01409_topK_merge/query.sql new file mode 100644 index 000000000..d76593515 --- /dev/null +++ b/parser/testdata/01409_topK_merge/query.sql @@ -0,0 +1,15 @@ +drop table if exists data_01409; +create table data_01409 engine=Memory as select * from numbers(20); + +-- easier to check merging via distributed tables +-- but can be done vai topKMerge(topKState()) as well + +select 'AggregateFunctionTopK'; +select length(topK(20)(number)) from remote('127.{1,1}', currentDatabase(), data_01409); +select length(topKWeighted(20)(number, 1)) from remote('127.{1,1}', currentDatabase(), data_01409); + +select 'AggregateFunctionTopKGenericData'; +select length(topK(20)((number, ''))) from remote('127.{1,1}', currentDatabase(), data_01409); +select length(topKWeighted(20)((number, ''), 1)) from remote('127.{1,1}', currentDatabase(), data_01409); + +drop table data_01409; diff --git a/parser/testdata/01410_full_join_and_null_predicates/ast.json b/parser/testdata/01410_full_join_and_null_predicates/ast.json new file mode 100644 index 000000000..76980a791 --- /dev/null +++ b/parser/testdata/01410_full_join_and_null_predicates/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery l (children 1)" + }, + { + "explain": " Identifier l" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001101747, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/01410_full_join_and_null_predicates/metadata.json b/parser/testdata/01410_full_join_and_null_predicates/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01410_full_join_and_null_predicates/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01410_full_join_and_null_predicates/query.sql b/parser/testdata/01410_full_join_and_null_predicates/query.sql new file mode 100644 index 000000000..f7d5fa67b --- /dev/null +++ b/parser/testdata/01410_full_join_and_null_predicates/query.sql @@ -0,0 +1,58 @@ +drop table if EXISTS l; +drop table if EXISTS r; + +CREATE TABLE l (luid Nullable(Int16), name String) +ENGINE=MergeTree order by luid settings allow_nullable_key=1 as +select * from VALUES ((1231, 'John'),(6666, 'Ksenia'),(Null, '---')); + +CREATE TABLE r (ruid Nullable(Int16), name String) +ENGINE=MergeTree order by ruid settings allow_nullable_key=1 as +select * from VALUES ((1231, 'John'),(1232, 'Johny')); + +select 'select 1'; +SELECT * FROM l full outer join r on l.luid = r.ruid +where luid is null + and ruid is not null; + +select 'select 2'; +select * from ( +SELECT * FROM l full outer join r on l.luid = r.ruid) + where luid is null + and ruid is not null; + +select 'select 3'; +select * from ( +SELECT * FROM l full outer join r on l.luid = r.ruid +limit 100000000) + where luid is null + and ruid is not null; + +drop table l; +drop table r; + +CREATE TABLE l (luid Nullable(Int16), name String) ENGINE=MergeTree order by tuple() as +select * from VALUES ((1231, 'John'),(6666, 'Ksenia'),(Null, '---')); + +CREATE TABLE r (ruid Nullable(Int16), name String) ENGINE=MergeTree order by tuple() as +select * from VALUES ((1231, 'John'),(1232, 'Johny')); + +select 'select 4'; +SELECT * FROM l full outer join r on l.luid = r.ruid +where luid is null + and ruid is not null; + +select 'select 5'; +select * from ( +SELECT * FROM l full outer join r on l.luid = r.ruid) + where luid is null + and ruid is not null; + +select 'select 6'; +select * from ( +SELECT * FROM l full outer join r on l.luid = r.ruid +limit 100000000) + where luid is null + and ruid is not null; + +drop table l; +drop table r; diff --git a/parser/testdata/01410_nullable_key_and_index/ast.json b/parser/testdata/01410_nullable_key_and_index/ast.json new file mode 100644 index 000000000..d06ba598c --- /dev/null +++ b/parser/testdata/01410_nullable_key_and_index/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery nullable_key (children 1)" + }, + { + "explain": " Identifier nullable_key" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00125814, + "rows_read": 2, + "bytes_read": 76 + } +} diff --git a/parser/testdata/01410_nullable_key_and_index/metadata.json b/parser/testdata/01410_nullable_key_and_index/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01410_nullable_key_and_index/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01410_nullable_key_and_index/query.sql b/parser/testdata/01410_nullable_key_and_index/query.sql new file mode 100644 index 000000000..d38fb3e08 --- /dev/null +++ b/parser/testdata/01410_nullable_key_and_index/query.sql @@ -0,0 +1,77 @@ +DROP TABLE IF EXISTS nullable_key; +DROP TABLE IF EXISTS nullable_key_without_final_mark; +DROP TABLE IF EXISTS nullable_minmax_index; + +SET max_threads = 1; +SET optimize_read_in_order = 0; + +CREATE TABLE nullable_key (k Nullable(int), v int) ENGINE MergeTree ORDER BY k SETTINGS allow_nullable_key = 1, index_granularity = 1; + +INSERT INTO nullable_key SELECT number * 2, number * 3 FROM numbers(10); +INSERT INTO nullable_key SELECT NULL, -number FROM numbers(3); + +SELECT * FROM nullable_key ORDER BY k, v; + +SET force_primary_key = 1; +SET max_rows_to_read = 3; +SELECT * FROM nullable_key WHERE k IS NULL; +SET max_rows_to_read = 10; +SELECT * FROM nullable_key WHERE k IS NOT NULL; +SET max_rows_to_read = 5; +SELECT * FROM nullable_key WHERE k > 10; +SELECT * FROM nullable_key WHERE k < 10; + +OPTIMIZE TABLE nullable_key FINAL; + +SET max_rows_to_read = 4; -- one additional left mark needs to be read +SELECT * FROM nullable_key WHERE k IS NULL; +SET max_rows_to_read = 10; +SELECT * FROM nullable_key WHERE k IS NOT NULL; + +-- Nullable in set and with transform_null_in = 1 +SET max_rows_to_read = 3; +SELECT * FROM nullable_key WHERE k IN (10, 20) SETTINGS transform_null_in = 1; +SET max_rows_to_read = 5; +SELECT * FROM nullable_key WHERE k IN (3, NULL) SETTINGS transform_null_in = 1; + +CREATE TABLE nullable_key_without_final_mark (s Nullable(String)) ENGINE MergeTree ORDER BY s SETTINGS allow_nullable_key = 1, write_final_mark = 0; +INSERT INTO nullable_key_without_final_mark VALUES ('123'), (NULL); +SET max_rows_to_read = 0; +SELECT * FROM nullable_key_without_final_mark WHERE s IS NULL; +SELECT * FROM nullable_key_without_final_mark WHERE s IS NOT NULL; + +CREATE TABLE nullable_minmax_index (k int, v Nullable(int), INDEX v_minmax v TYPE minmax GRANULARITY 4) ENGINE MergeTree ORDER BY k SETTINGS index_granularity = 1; + +INSERT INTO nullable_minmax_index VALUES (1, 3), (2, 7), (3, 4), (2, NULL); -- [3, +Inf] +INSERT INTO nullable_minmax_index VALUES (1, 1), (2, 2), (3, 2), (2, 1); -- [1, 2] +INSERT INTO nullable_minmax_index VALUES (2, NULL), (3, NULL); -- [+Inf, +Inf] + +SET force_primary_key = 0; +SELECT * FROM nullable_minmax_index ORDER BY k, v; +SET max_rows_to_read = 6; +SELECT * FROM nullable_minmax_index WHERE v IS NULL; +SET max_rows_to_read = 8; +SELECT * FROM nullable_minmax_index WHERE v IS NOT NULL; +SET max_rows_to_read = 6; +SELECT * FROM nullable_minmax_index WHERE v > 2; +SET max_rows_to_read = 4; +SELECT * FROM nullable_minmax_index WHERE v <= 2; + +DROP TABLE nullable_key; +DROP TABLE nullable_key_without_final_mark; +DROP TABLE nullable_minmax_index; + +DROP TABLE IF EXISTS xxxx_null; +CREATE TABLE xxxx_null (`ts` Nullable(DateTime)) ENGINE = MergeTree ORDER BY toStartOfHour(ts) SETTINGS allow_nullable_key = 1; +INSERT INTO xxxx_null SELECT '2021-11-11 00:00:00'; +SELECT * FROM xxxx_null WHERE ts > '2021-10-11 00:00:00'; +DROP TABLE xxxx_null; + +-- nullable keys are forbidden when `allow_nullable_key = 0` +CREATE TABLE invalid_null (id Nullable(String)) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } +CREATE TABLE invalid_lc_null (id LowCardinality(Nullable(String))) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } +CREATE TABLE invalid_array_null (id Array(Nullable(String))) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } +CREATE TABLE invalid_tuple_null (id Tuple(Nullable(String), UInt8)) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } +CREATE TABLE invalid_map_null (id Map(UInt8, Nullable(String))) ENGINE = MergeTree ORDER BY id; -- { serverError ILLEGAL_COLUMN } +CREATE TABLE invalid_simple_agg_state_null (id SimpleAggregateFunction(sum, Nullable(UInt64))) ENGINE = MergeTree ORDER BY id; -- { serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY } +-- AggregateFunctions are not comparable and cannot be used in key expressions. No need to test it. diff --git a/parser/testdata/01410_nullable_key_and_index_negate_cond/ast.json b/parser/testdata/01410_nullable_key_and_index_negate_cond/ast.json new file mode 100644 index 000000000..93f6a1b9b --- /dev/null +++ b/parser/testdata/01410_nullable_key_and_index_negate_cond/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_23634 (children 1)" + }, + { + "explain": " Identifier test_23634" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001483287, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/01410_nullable_key_and_index_negate_cond/metadata.json b/parser/testdata/01410_nullable_key_and_index_negate_cond/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01410_nullable_key_and_index_negate_cond/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01410_nullable_key_and_index_negate_cond/query.sql b/parser/testdata/01410_nullable_key_and_index_negate_cond/query.sql new file mode 100644 index 000000000..0a1e34645 --- /dev/null +++ b/parser/testdata/01410_nullable_key_and_index_negate_cond/query.sql @@ -0,0 +1,40 @@ +drop table if exists test_23634; + +set force_primary_key=1; + +CREATE TABLE test_23634 (id Nullable(String), s Nullable(String), s1 Nullable(String)) +ENGINE = MergeTree() ORDER BY (id,s) SETTINGS allow_nullable_key = 1; + +INSERT into test_23634 values ('s','s','s'), (null,'s1','s1'), (null,null,'s2'), (null,null,null); + +select '---Q1---'; +select * from test_23634 where id !=''; + +select '---Q2---'; +select * from test_23634 where id !='' and s != ''; + +select '---Q3---'; +select * from test_23634 where id !='' and s != '' and s1 != ''; + +set force_primary_key=0; + +select '---Q4---'; +select * from test_23634 where (id, s, s1) != ('', '', '') order by id, s1, s1; + +select '---Q5---'; +select * from test_23634 where (id, s, s1) = ('', '', '') order by id, s1, s1; + +select '---Q6---'; +select * from test_23634 where (id, s, s1) = ('', '', 's2') order by id, s1, s1; + +select '---Q7---'; +select * from test_23634 where (id, s, s1) = ('', 's1', 's1') order by id, s1, s1; + +select '---Q8---'; +select * from test_23634 where (id, s, s1) = ('s', 's', 's') order by id, s1, s1; + +select '---Q9---'; +select * from test_23634 where (id, s, s1) = (null::Nullable(String), null::Nullable(String), null::Nullable(String)) order by id, s1, s1; + +drop table test_23634; + diff --git a/parser/testdata/01411_from_unixtime/ast.json b/parser/testdata/01411_from_unixtime/ast.json new file mode 100644 index 000000000..608c7271e --- /dev/null +++ b/parser/testdata/01411_from_unixtime/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function formatDateTime (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function FROM_UNIXTIME (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_123" + }, + { + "explain": " Literal '%Y-%m-%d %R:%S'" + }, + { + "explain": " Literal 'UTC'" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001200503, + "rows_read": 11, + "bytes_read": 427 + } +} diff --git a/parser/testdata/01411_from_unixtime/metadata.json b/parser/testdata/01411_from_unixtime/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01411_from_unixtime/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01411_from_unixtime/query.sql b/parser/testdata/01411_from_unixtime/query.sql new file mode 100644 index 000000000..3578433e1 --- /dev/null +++ b/parser/testdata/01411_from_unixtime/query.sql @@ -0,0 +1,37 @@ +SELECT formatDateTime(FROM_UNIXTIME(123), '%Y-%m-%d %R:%S', 'UTC'); +SELECT formatDateTime(FROM_UNIXTIME(123456789), '%Y-%m-%d %R:%S', 'UTC'); +SELECT formatDateTime(FROM_UNIXTIME(6457477432), '%Y-%m-%d %R:%S', 'UTC'); +SELECT FROM_UNIXTIME(5345345, '%C', 'UTC'); +SELECT FROM_UNIXTIME(645123, '%H', 'UTC'); +SELECT FROM_UNIXTIME(1232456, '%Y-%m-%d', 'UTC'); +SELECT FROM_UNIXTIME(1234356, '%Y-%m-%d %R:%S', 'UTC'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%a'), FROM_UNIXTIME(toDate32('2018-01-02'), '%a'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%b'), FROM_UNIXTIME(toDate32('2018-01-02'), '%b'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%c'), FROM_UNIXTIME(toDate32('2018-01-02'), '%c'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%C'), FROM_UNIXTIME(toDate32('2018-01-02'), '%C'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%d'), FROM_UNIXTIME(toDate32('2018-01-02'), '%d'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%D'), FROM_UNIXTIME(toDate32('2018-01-02'), '%D'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%e'), FROM_UNIXTIME(toDate32('2018-01-02'), '%e'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%F'), FROM_UNIXTIME(toDate32('2018-01-02'), '%F'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%h'), FROM_UNIXTIME(toDate32('2018-01-02'), '%h'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%H'), FROM_UNIXTIME(toDate32('2018-01-02'), '%H'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 02:33:44'), '%H'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%i'), FROM_UNIXTIME(toDate32('2018-01-02'), '%i'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%I'), FROM_UNIXTIME(toDate32('2018-01-02'), '%I'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 11:33:44'), '%I'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 00:33:44'), '%I'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-01 00:33:44'), '%j'), FROM_UNIXTIME(toDate32('2018-01-01'), '%j'); +SELECT FROM_UNIXTIME(toDateTime('2000-12-31 00:33:44'), '%j'), FROM_UNIXTIME(toDate32('2000-12-31'), '%j'); +SELECT FROM_UNIXTIME(toDateTime('2000-12-31 00:33:44'), '%k'), FROM_UNIXTIME(toDate32('2000-12-31'), '%k'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%m'), FROM_UNIXTIME(toDate32('2018-01-02'), '%m'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%M'), FROM_UNIXTIME(toDate32('2018-01-02'), '%M'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%n'), FROM_UNIXTIME(toDate32('2018-01-02'), '%n'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 00:33:44'), '%p'), FROM_UNIXTIME(toDate32('2018-01-02'), '%p'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 11:33:44'), '%p'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 12:33:44'), '%p'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%r'), FROM_UNIXTIME(toDate32('2018-01-02'), '%r'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%R'), FROM_UNIXTIME(toDate32('2018-01-02'), '%R'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%S'), FROM_UNIXTIME(toDate32('2018-01-02'), '%S'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%t'), FROM_UNIXTIME(toDate32('2018-01-02'), '%t'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%T'), FROM_UNIXTIME(toDate32('2018-01-02'), '%T'); +SELECT FROM_UNIXTIME(toDateTime('2018-01-02 22:33:44'), '%W'), FROM_UNIXTIME(toDate32('2018-01-02'), '%W'); diff --git a/parser/testdata/01411_xor_itai_shirav/ast.json b/parser/testdata/01411_xor_itai_shirav/ast.json new file mode 100644 index 000000000..f78029466 --- /dev/null +++ b/parser/testdata/01411_xor_itai_shirav/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function xor (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_0" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.00132754, + "rows_read": 8, + "bytes_read": 286 + } +} diff --git a/parser/testdata/01411_xor_itai_shirav/metadata.json b/parser/testdata/01411_xor_itai_shirav/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01411_xor_itai_shirav/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01411_xor_itai_shirav/query.sql b/parser/testdata/01411_xor_itai_shirav/query.sql new file mode 100644 index 000000000..fdd2801f8 --- /dev/null +++ b/parser/testdata/01411_xor_itai_shirav/query.sql @@ -0,0 +1 @@ +SELECT xor(1, 0); diff --git a/parser/testdata/01412_group_array_moving_shard/ast.json b/parser/testdata/01412_group_array_moving_shard/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01412_group_array_moving_shard/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01412_group_array_moving_shard/metadata.json b/parser/testdata/01412_group_array_moving_shard/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01412_group_array_moving_shard/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01412_group_array_moving_shard/query.sql b/parser/testdata/01412_group_array_moving_shard/query.sql new file mode 100644 index 000000000..642619dc1 --- /dev/null +++ b/parser/testdata/01412_group_array_moving_shard/query.sql @@ -0,0 +1,35 @@ +-- Tags: shard + +SELECT groupArrayMovingSum(10)(1) FROM remote('127.0.0.{1,2}', numbers(100)); +SELECT groupArrayMovingAvg(10)(1) FROM remote('127.0.0.{1,2}', numbers(100)); + +SELECT groupArrayMovingSum(256)(-1) FROM numbers(300); +SELECT groupArrayMovingSum(256)(-1) FROM remote('127.0.0.{1,2}', numbers(200)); +SELECT groupArrayMovingAvg(256)(1) FROM numbers(300); + +SELECT groupArrayMovingSum(256)(toDecimal32(100000000, 1)) FROM numbers(300); +SELECT groupArrayMovingSum(256)(toDecimal64(-1, 1)) FROM numbers(300); +SELECT groupArrayMovingAvg(256)(toDecimal128(-1, 1)) FROM numbers(300); + + +SELECT groupArrayMovingSum(10)(number) FROM numbers(100); +SELECT groupArrayMovingSum(10)(1) FROM numbers(100); +SELECT groupArrayMovingSum(0)(1) FROM numbers(100); -- { serverError BAD_ARGUMENTS } +SELECT groupArrayMovingSum(0.)(1) FROM numbers(100); -- { serverError BAD_ARGUMENTS } +SELECT groupArrayMovingSum(0.1)(1) FROM numbers(100); -- { serverError BAD_ARGUMENTS } +SELECT groupArrayMovingSum(0.1)(1) FROM remote('127.0.0.{1,2}', numbers(100)); -- { serverError BAD_ARGUMENTS } +SELECT groupArrayMovingSum(256)(1) FROM remote('127.0.0.{1,2}', numbers(100)); +SELECT groupArrayMovingSum(256)(1) FROM remote('127.0.0.{1,2}', numbers(1000)); +SELECT toTypeName(groupArrayMovingSum(256)(-1)) FROM remote('127.0.0.{1,2}', numbers(1000)); +SELECT groupArrayMovingSum(256)(toDecimal32(1, 9)) FROM numbers(300); +SELECT groupArrayMovingSum(256)(toDecimal32(1000000000, 1)) FROM numbers(300); -- { serverError DECIMAL_OVERFLOW } +SELECT groupArrayMovingSum(256)(toDecimal32(100000000, 1)) FROM numbers(300); +SELECT groupArrayMovingSum(256)(toDecimal32(1, 1)) FROM numbers(300); + +SELECT groupArrayMovingAvg(256)(1) FROM remote('127.0.0.{1,2}', numbers(1000)); +SELECT groupArrayMovingAvg(256)(-1) FROM numbers(300); +SELECT arrayMap(x -> round(x, 4), groupArrayMovingAvg(256)(1)) FROM numbers(300); +SELECT groupArrayMovingAvg(256)(toDecimal32(1, 9)) FROM numbers(300); +SELECT toTypeName(groupArrayMovingAvg(256)(toDecimal32(1, 9))) FROM numbers(300); +SELECT groupArrayMovingAvg(100)(toDecimal32(1, 9)) FROM numbers(300); + diff --git a/parser/testdata/01412_mod_float/ast.json b/parser/testdata/01412_mod_float/ast.json new file mode 100644 index 000000000..6e0c82479 --- /dev/null +++ b/parser/testdata/01412_mod_float/ast.json @@ -0,0 +1,115 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Float64_8.5 (alias a)" + }, + { + "explain": " Literal Float64_2.5 (alias b)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier a" + }, + { + "explain": " Identifier b" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function negate (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier a" + }, + { + "explain": " Identifier b" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier a" + }, + { + "explain": " Function negate (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier b" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function negate (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier a" + }, + { + "explain": " Function negate (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier b" + } + ], + + "rows": 31, + + "statistics": + { + "elapsed": 0.001459912, + "rows_read": 31, + "bytes_read": 1141 + } +} diff --git a/parser/testdata/01412_mod_float/metadata.json b/parser/testdata/01412_mod_float/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01412_mod_float/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01412_mod_float/query.sql b/parser/testdata/01412_mod_float/query.sql new file mode 100644 index 000000000..f2a5f2fce --- /dev/null +++ b/parser/testdata/01412_mod_float/query.sql @@ -0,0 +1,7 @@ +WITH 8.5 AS a, 2.5 AS b SELECT a % b, -a % b, a % -b, -a % -b; +WITH 10.125 AS a, 2.5 AS b SELECT a % b, -a % b, a % -b, -a % -b; +WITH 8.5 AS a, 2.5 AS b SELECT mod(a, b), MOD(-a, b), modulo(a, -b), moduloOrZero(-a, -b); +WITH 8.5 AS a, 2.5 AS b SELECT a MOD b, -a MOD b, a MOD -b, -a MOD -b; +WITH 10.125 AS a, 2.5 AS b SELECT a MOD b, -a MOD b, a MOD -b, -a MOD -b; +SELECT 3.5 % 0; +SELECT 3.5 MOD 0; diff --git a/parser/testdata/01412_optimize_deduplicate_bug/ast.json b/parser/testdata/01412_optimize_deduplicate_bug/ast.json new file mode 100644 index 000000000..cfd2547e9 --- /dev/null +++ b/parser/testdata/01412_optimize_deduplicate_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tesd_dedupl (children 1)" + }, + { + "explain": " Identifier tesd_dedupl" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001076686, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/01412_optimize_deduplicate_bug/metadata.json b/parser/testdata/01412_optimize_deduplicate_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01412_optimize_deduplicate_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01412_optimize_deduplicate_bug/query.sql b/parser/testdata/01412_optimize_deduplicate_bug/query.sql new file mode 100644 index 000000000..b75b31243 --- /dev/null +++ b/parser/testdata/01412_optimize_deduplicate_bug/query.sql @@ -0,0 +1,10 @@ +drop table if exists tesd_dedupl; + +create table tesd_dedupl (x UInt32, y UInt32) engine = MergeTree order by x; +insert into tesd_dedupl values (1, 1); +insert into tesd_dedupl values (1, 1); + +OPTIMIZE TABLE tesd_dedupl DEDUPLICATE; +select * from tesd_dedupl; + +drop table if exists tesd_dedupl; diff --git a/parser/testdata/01412_row_from_totals/ast.json b/parser/testdata/01412_row_from_totals/ast.json new file mode 100644 index 000000000..be0d97b6b --- /dev/null +++ b/parser/testdata/01412_row_from_totals/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tracking_events_tmp (children 1)" + }, + { + "explain": " Identifier tracking_events_tmp" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001737267, + "rows_read": 2, + "bytes_read": 90 + } +} diff --git a/parser/testdata/01412_row_from_totals/metadata.json b/parser/testdata/01412_row_from_totals/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01412_row_from_totals/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01412_row_from_totals/query.sql b/parser/testdata/01412_row_from_totals/query.sql new file mode 100644 index 000000000..63c2a1113 --- /dev/null +++ b/parser/testdata/01412_row_from_totals/query.sql @@ -0,0 +1,34 @@ +DROP TABLE IF EXISTS tracking_events_tmp; +DROP TABLE IF EXISTS open_events_tmp; + +CREATE TABLE tracking_events_tmp (`APIKey` UInt32, `EventDate` Date) ENGINE = MergeTree PARTITION BY toYYYYMM(EventDate) ORDER BY (APIKey, EventDate); +CREATE TABLE open_events_tmp (`APIKey` UInt32, `EventDate` Date) ENGINE = MergeTree PARTITION BY toMonday(EventDate) ORDER BY (APIKey, EventDate); + +insert into open_events_tmp select 2, '2020-07-10' from numbers(32); +insert into open_events_tmp select 2, '2020-07-11' from numbers(31); + +insert into tracking_events_tmp select 2, '2020-07-10' from numbers(1881); +insert into tracking_events_tmp select 2, '2020-07-11' from numbers(1623); + +SELECT EventDate +FROM +( + SELECT EventDate + FROM tracking_events_tmp AS t1 + WHERE (EventDate >= toDate('2020-07-10')) AND (EventDate <= toDate('2020-07-11')) AND (APIKey = 2) + GROUP BY EventDate +) +FULL OUTER JOIN +( + SELECT EventDate + FROM remote('127.0.0.{1,3}', currentDatabase(), open_events_tmp) AS t2 + WHERE (EventDate >= toDate('2020-07-10')) AND (EventDate <= toDate('2020-07-11')) AND (APIKey = 2) + GROUP BY EventDate + WITH TOTALS +) USING EventDate +ORDER BY EventDate +settings totals_mode = 'after_having_auto', group_by_overflow_mode = 'any', max_rows_to_group_by = 10000000, joined_subquery_requires_alias=0; + + +DROP TABLE IF EXISTS tracking_events_tmp; +DROP TABLE IF EXISTS open_events_tmp; diff --git a/parser/testdata/01413_allow_non_metadata_alters/ast.json b/parser/testdata/01413_allow_non_metadata_alters/ast.json new file mode 100644 index 000000000..15825384a --- /dev/null +++ b/parser/testdata/01413_allow_non_metadata_alters/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery non_metadata_alters (children 1)" + }, + { + "explain": " Identifier non_metadata_alters" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001586261, + "rows_read": 2, + "bytes_read": 90 + } +} diff --git a/parser/testdata/01413_allow_non_metadata_alters/metadata.json b/parser/testdata/01413_allow_non_metadata_alters/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01413_allow_non_metadata_alters/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01413_allow_non_metadata_alters/query.sql b/parser/testdata/01413_allow_non_metadata_alters/query.sql new file mode 100644 index 000000000..86b113533 --- /dev/null +++ b/parser/testdata/01413_allow_non_metadata_alters/query.sql @@ -0,0 +1,49 @@ +DROP TABLE IF EXISTS non_metadata_alters; + +CREATE TABLE non_metadata_alters ( + key UInt64, + value1 String, + value2 Enum8('Hello' = 1, 'World' = 2), + value3 UInt16, + value4 DateTime, + value5 Date +) +ENGINE = MergeTree() +ORDER BY tuple(); + + +SET allow_non_metadata_alters = 0; + +ALTER TABLE non_metadata_alters MODIFY COLUMN value3 UInt64; --{serverError ALTER_OF_COLUMN_IS_FORBIDDEN} + +ALTER TABLE non_metadata_alters MODIFY COLUMN value1 UInt32; --{serverError ALTER_OF_COLUMN_IS_FORBIDDEN} + +ALTER TABLE non_metadata_alters MODIFY COLUMN value4 Date; --{serverError ALTER_OF_COLUMN_IS_FORBIDDEN} + +ALTER TABLE non_metadata_alters DROP COLUMN value4; --{serverError ALTER_OF_COLUMN_IS_FORBIDDEN} + +ALTER TABLE non_metadata_alters MODIFY COLUMN value2 Enum8('x' = 5, 'y' = 6); --{serverError ALTER_OF_COLUMN_IS_FORBIDDEN} + +ALTER TABLE non_metadata_alters RENAME COLUMN value4 TO renamed_value4; --{serverError ALTER_OF_COLUMN_IS_FORBIDDEN} + +ALTER TABLE non_metadata_alters MODIFY COLUMN value3 UInt16 TTL value5 + INTERVAL 5 DAY; --{serverError ALTER_OF_COLUMN_IS_FORBIDDEN} + +SET materialize_ttl_after_modify = 0; + +ALTER TABLE non_metadata_alters MODIFY COLUMN value3 UInt16 TTL value5 + INTERVAL 5 DAY; + +SHOW CREATE TABLE non_metadata_alters; + +ALTER TABLE non_metadata_alters MODIFY COLUMN value1 String DEFAULT 'X'; + +ALTER TABLE non_metadata_alters MODIFY COLUMN value2 Enum8('Hello' = 1, 'World' = 2, '!' = 3); + +ALTER TABLE non_metadata_alters MODIFY COLUMN value3 Date; + +ALTER TABLE non_metadata_alters MODIFY COLUMN value4 UInt32; + +ALTER TABLE non_metadata_alters ADD COLUMN value6 Decimal(3, 3); + +SHOW CREATE TABLE non_metadata_alters; + +DROP TABLE IF EXISTS non_metadata_alters; diff --git a/parser/testdata/01413_alter_update_supertype/ast.json b/parser/testdata/01413_alter_update_supertype/ast.json new file mode 100644 index 000000000..2593d8b1c --- /dev/null +++ b/parser/testdata/01413_alter_update_supertype/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001574665, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/01413_alter_update_supertype/metadata.json b/parser/testdata/01413_alter_update_supertype/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01413_alter_update_supertype/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01413_alter_update_supertype/query.sql b/parser/testdata/01413_alter_update_supertype/query.sql new file mode 100644 index 000000000..9003d3f34 --- /dev/null +++ b/parser/testdata/01413_alter_update_supertype/query.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS t; +CREATE TABLE t (x UInt64) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO t SELECT number FROM numbers(10); + +SELECT * FROM t; + +SET mutations_sync = 1; +ALTER TABLE t UPDATE x = x - 1 WHERE x % 2 = 1; + +SELECT '---'; +SELECT * FROM t; + +DROP TABLE t; diff --git a/parser/testdata/01413_if_array_uuid/ast.json b/parser/testdata/01413_if_array_uuid/ast.json new file mode 100644 index 000000000..170d2e9cd --- /dev/null +++ b/parser/testdata/01413_if_array_uuid/ast.json @@ -0,0 +1,109 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function if (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toUUID (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '00000000-e1fe-11e9-bb8f-853d60c00749'" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toUUID (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '11111111-e1fe-11e9-bb8f-853d60c00749'" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_5" + } + ], + + "rows": 29, + + "statistics": + { + "elapsed": 0.001691042, + "rows_read": 29, + "bytes_read": 1220 + } +} diff --git a/parser/testdata/01413_if_array_uuid/metadata.json b/parser/testdata/01413_if_array_uuid/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01413_if_array_uuid/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01413_if_array_uuid/query.sql b/parser/testdata/01413_if_array_uuid/query.sql new file mode 100644 index 000000000..6b8fa387c --- /dev/null +++ b/parser/testdata/01413_if_array_uuid/query.sql @@ -0,0 +1 @@ +SELECT if(number % 2 = 0, [toUUID('00000000-e1fe-11e9-bb8f-853d60c00749')], [toUUID('11111111-e1fe-11e9-bb8f-853d60c00749')]) FROM numbers(5); diff --git a/parser/testdata/01413_rows_events/ast.json b/parser/testdata/01413_rows_events/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01413_rows_events/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01413_rows_events/metadata.json b/parser/testdata/01413_rows_events/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01413_rows_events/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01413_rows_events/query.sql b/parser/testdata/01413_rows_events/query.sql new file mode 100644 index 000000000..beb1525cc --- /dev/null +++ b/parser/testdata/01413_rows_events/query.sql @@ -0,0 +1,32 @@ +-- Tags: no-async-insert +-- The correct profile event appears in the secondary query with query_kind: AsyncInsertFlush + +DROP TABLE IF EXISTS rows_events_test; +CREATE TABLE rows_events_test (k UInt32, v UInt32) ENGINE = MergeTree ORDER BY k; + +INSERT INTO /* test 01413, query 1 */ rows_events_test VALUES (1,1); +SYSTEM FLUSH LOGS query_log; + +SELECT written_rows FROM system.query_log WHERE current_database = currentDatabase() AND query LIKE 'INSERT INTO /* test 01413, query 1 */ rows_events_test%' AND type = 2 AND event_date >= yesterday() ORDER BY event_time DESC LIMIT 1; + +SELECT ProfileEvents['InsertedRows'] as value FROM system.query_log WHERE current_database = currentDatabase() AND query LIKE 'INSERT INTO /* test 01413, query 1 */ rows_events_test%' AND type = 2 AND event_date >= yesterday() ORDER BY event_time DESC LIMIT 1; + + +INSERT INTO /* test 01413, query 2 */ rows_events_test VALUES (2,2), (3,3); +SYSTEM FLUSH LOGS query_log; + +SELECT written_rows FROM system.query_log WHERE current_database = currentDatabase() AND query LIKE 'INSERT INTO /* test 01413, query 2 */ rows_events_test%' AND type = 2 AND event_date >= yesterday() ORDER BY event_time DESC LIMIT 1; + +SELECT ProfileEvents['InsertedRows'] as value FROM system.query_log WHERE current_database = currentDatabase() AND query LIKE 'INSERT INTO /* test 01413, query 2 */ rows_events_test%' AND type = 2 AND event_date >= yesterday() ORDER BY event_time DESC LIMIT 1; + + +SELECT * FROM /* test 01413, query 3 */ rows_events_test WHERE v = 2; +SYSTEM FLUSH LOGS query_log; + +SELECT read_rows FROM system.query_log WHERE current_database = currentDatabase() AND query LIKE 'SELECT * FROM /* test 01413, query 3 */ rows_events_test%' AND type = 2 AND event_date >= yesterday() ORDER BY event_time DESC LIMIT 1; + + +SELECT ProfileEvents['SelectedRows'] as value FROM system.query_log WHERE current_database = currentDatabase() AND query LIKE 'SELECT * FROM /* test 01413, query 3 */ rows_events_test%' AND type = 2 AND event_date >= yesterday() ORDER BY event_time DESC LIMIT 1; + + +DROP TABLE rows_events_test; diff --git a/parser/testdata/01413_truncate_without_table_keyword/ast.json b/parser/testdata/01413_truncate_without_table_keyword/ast.json new file mode 100644 index 000000000..39d9289a4 --- /dev/null +++ b/parser/testdata/01413_truncate_without_table_keyword/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery truncate_test (children 1)" + }, + { + "explain": " Identifier truncate_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001469462, + "rows_read": 2, + "bytes_read": 78 + } +} diff --git a/parser/testdata/01413_truncate_without_table_keyword/metadata.json b/parser/testdata/01413_truncate_without_table_keyword/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01413_truncate_without_table_keyword/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01413_truncate_without_table_keyword/query.sql b/parser/testdata/01413_truncate_without_table_keyword/query.sql new file mode 100644 index 000000000..c6819f77e --- /dev/null +++ b/parser/testdata/01413_truncate_without_table_keyword/query.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS truncate_test; + +CREATE TABLE truncate_test(uint8 UInt8) ENGINE = Log; + +INSERT INTO truncate_test VALUES(1), (2), (3); + +SELECT * FROM truncate_test ORDER BY uint8; + +TRUNCATE truncate_test; + +SELECT * FROM truncate_test ORDER BY uint8; + +DROP TABLE truncate_test; diff --git a/parser/testdata/01414_bloom_filter_index_with_const_column/ast.json b/parser/testdata/01414_bloom_filter_index_with_const_column/ast.json new file mode 100644 index 000000000..230147a80 --- /dev/null +++ b/parser/testdata/01414_bloom_filter_index_with_const_column/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_bloom_filter_index (children 1)" + }, + { + "explain": " Identifier test_bloom_filter_index" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001129007, + "rows_read": 2, + "bytes_read": 98 + } +} diff --git a/parser/testdata/01414_bloom_filter_index_with_const_column/metadata.json b/parser/testdata/01414_bloom_filter_index_with_const_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01414_bloom_filter_index_with_const_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01414_bloom_filter_index_with_const_column/query.sql b/parser/testdata/01414_bloom_filter_index_with_const_column/query.sql new file mode 100644 index 000000000..388398a8f --- /dev/null +++ b/parser/testdata/01414_bloom_filter_index_with_const_column/query.sql @@ -0,0 +1,15 @@ +DROP TABLE IF EXISTS test_bloom_filter_index; + +CREATE TABLE test_bloom_filter_index(`WatchID` UInt64, `JavaEnable` UInt8, `Title` String, `GoodEvent` Int16, `EventTime` DateTime, `EventDate` Date, `CounterID` UInt32, `ClientIP` UInt32, `ClientIP6` FixedString(16), `RegionID` UInt32, `UserID` UInt64, `CounterClass` Int8, `OS` UInt8, `UserAgent` UInt8, `URL` String, `Referer` String, `URLDomain` String, `RefererDomain` String, `Refresh` UInt8, `IsRobot` UInt8, `RefererCategories` Array(UInt16), `URLCategories` Array(UInt16), `URLRegions` Array(UInt32), `RefererRegions` Array(UInt32), `ResolutionWidth` UInt16, `ResolutionHeight` UInt16, `ResolutionDepth` UInt8, `FlashMajor` UInt8, `FlashMinor` UInt8, `FlashMinor2` String, `NetMajor` UInt8, `NetMinor` UInt8, `UserAgentMajor` UInt16, `UserAgentMinor` FixedString(2), `CookieEnable` UInt8, `JavascriptEnable` UInt8, `IsMobile` UInt8, `MobilePhone` UInt8, `MobilePhoneModel` String, `Params` String, `IPNetworkID` UInt32, `TraficSourceID` Int8, `SearchEngineID` UInt16, `SearchPhrase` String, `AdvEngineID` UInt8, `IsArtifical` UInt8, `WindowClientWidth` UInt16, `WindowClientHeight` UInt16, `ClientTimeZone` Int16, `ClientEventTime` DateTime, `SilverlightVersion1` UInt8, `SilverlightVersion2` UInt8, `SilverlightVersion3` UInt32, `SilverlightVersion4` UInt16, `PageCharset` String, `CodeVersion` UInt32, `IsLink` UInt8, `IsDownload` UInt8, `IsNotBounce` UInt8, `FUniqID` UInt64, `HID` UInt32, `IsOldCounter` UInt8, `IsEvent` UInt8, `IsParameter` UInt8, `DontCountHits` UInt8, `WithHash` UInt8, `HitColor` FixedString(1), `UTCEventTime` DateTime, `Age` UInt8, `Sex` UInt8, `Income` UInt8, `Interests` UInt16, `Robotness` UInt8, `GeneralInterests` Array(UInt16), `RemoteIP` UInt32, `RemoteIP6` FixedString(16), `WindowName` Int32, `OpenerName` Int32, `HistoryLength` Int16, `BrowserLanguage` FixedString(2), `BrowserCountry` FixedString(2), `SocialNetwork` String, `SocialAction` String, `HTTPError` UInt16, `SendTiming` Int32, `DNSTiming` Int32, `ConnectTiming` Int32, `ResponseStartTiming` Int32, `ResponseEndTiming` Int32, `FetchTiming` Int32, `RedirectTiming` Int32, `DOMInteractiveTiming` Int32, `DOMContentLoadedTiming` Int32, `DOMCompleteTiming` Int32, `LoadEventStartTiming` Int32, `LoadEventEndTiming` Int32, `NSToDOMContentLoadedTiming` Int32, `FirstPaintTiming` Int32, `RedirectCount` Int8, `SocialSourceNetworkID` UInt8, `SocialSourcePage` String, `ParamPrice` Int64, `ParamOrderID` String, `ParamCurrency` FixedString(3), `ParamCurrencyID` UInt16, `GoalsReached` Array(UInt32), `OpenstatServiceName` String, `OpenstatCampaignID` String, `OpenstatAdID` String, `OpenstatSourceID` String, `UTMSource` String, `UTMMedium` String, `UTMCampaign` String, `UTMContent` String, `UTMTerm` String, `FromTag` String, `HasGCLID` UInt8, `RefererHash` UInt64, `URLHash` UInt64, `CLID` UInt32, `YCLID` UInt64, `ShareService` String, `ShareURL` String, `ShareTitle` String, `ParsedParams.Key1` Array(String), `ParsedParams.Key2` Array(String), `ParsedParams.Key3` Array(String), `ParsedParams.Key4` Array(String), `ParsedParams.Key5` Array(String), `ParsedParams.ValueDouble` Array(Float64), `IslandID` FixedString(16), `RequestNum` UInt32, `RequestTry` UInt8, INDEX test1 RegionID TYPE bloom_filter GRANULARITY 8129) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SAMPLE BY intHash32(UserID) SETTINGS index_granularity = 8192; + +SELECT UserID FROM test_bloom_filter_index WHERE (CounterID, EventTime) IN (SELECT toUInt32(25703952), toDateTime('2014-03-19 23:59:58')); + +DROP TABLE IF EXISTS test_bloom_filter_index; + +CREATE TABLE test_bloom_filter_index(`uint8` UInt8, `uint16` UInt16, `index_column` UInt64, INDEX test1 `index_column` TYPE bloom_filter GRANULARITY 1) ENGINE = MergeTree() PARTITION BY tuple() ORDER BY tuple(); + +INSERT INTO test_bloom_filter_index SELECT number, number, number FROM numbers(10000); + +SELECT * FROM test_bloom_filter_index WHERE (`uint16`, `index_column`) IN (SELECT toUInt16(2), toUInt64(2)); + +DROP TABLE IF EXISTS test_bloom_filter_index; diff --git a/parser/testdata/01414_freeze_does_not_prevent_alters/ast.json b/parser/testdata/01414_freeze_does_not_prevent_alters/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01414_freeze_does_not_prevent_alters/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01414_freeze_does_not_prevent_alters/metadata.json b/parser/testdata/01414_freeze_does_not_prevent_alters/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01414_freeze_does_not_prevent_alters/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01414_freeze_does_not_prevent_alters/query.sql b/parser/testdata/01414_freeze_does_not_prevent_alters/query.sql new file mode 100644 index 000000000..ad2010224 --- /dev/null +++ b/parser/testdata/01414_freeze_does_not_prevent_alters/query.sql @@ -0,0 +1,35 @@ +-- In previous ClickHouse versions, parts were not 100% immutable and FREEZE may prevent subsequent ALTERs. +-- It's not longer the case. Let's prove it. + +DROP TABLE IF EXISTS t; +CREATE TABLE t (k UInt64, s String) ENGINE = MergeTree ORDER BY k; +INSERT INTO t VALUES (1, 'hello'), (2, 'world'); + +SELECT * FROM t; +SELECT name, is_frozen FROM system.parts WHERE database = currentDatabase() AND table = 't'; + +SELECT '---'; +ALTER TABLE t FREEZE; +SELECT name, is_frozen FROM system.parts WHERE database = currentDatabase() AND table = 't'; + +SELECT '---'; +SET mutations_sync = 1; +ALTER TABLE t UPDATE s = 'goodbye' WHERE k = 1; +SELECT * FROM t; +SELECT name, is_frozen FROM system.parts WHERE database = currentDatabase() AND table = 't'; + +SELECT '---'; +ALTER TABLE t MODIFY COLUMN s Enum('goodbye' = 1, 'world' = 2); +SELECT * FROM t; +SELECT name, is_frozen FROM system.parts WHERE database = currentDatabase() AND table = 't'; + +SELECT '---'; +ALTER TABLE t FREEZE; +SELECT name, is_frozen FROM system.parts WHERE database = currentDatabase() AND table = 't'; + +SELECT '---'; +ALTER TABLE t MODIFY COLUMN s Enum('hello' = 1, 'world' = 2); +SELECT * FROM t; +SELECT name, is_frozen FROM system.parts WHERE database = currentDatabase() AND table = 't'; + +DROP TABLE t; diff --git a/parser/testdata/01414_low_cardinality_nullable/ast.json b/parser/testdata/01414_low_cardinality_nullable/ast.json new file mode 100644 index 000000000..083c813ee --- /dev/null +++ b/parser/testdata/01414_low_cardinality_nullable/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001499363, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01414_low_cardinality_nullable/metadata.json b/parser/testdata/01414_low_cardinality_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01414_low_cardinality_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01414_low_cardinality_nullable/query.sql b/parser/testdata/01414_low_cardinality_nullable/query.sql new file mode 100644 index 000000000..cd5111faf --- /dev/null +++ b/parser/testdata/01414_low_cardinality_nullable/query.sql @@ -0,0 +1,224 @@ +SET allow_suspicious_low_cardinality_types=1; + +DROP TABLE IF EXISTS lc_nullable; + +CREATE TABLE lc_nullable ( + order_key Array(LowCardinality(Nullable(UInt64))), + + i8 Array(LowCardinality(Nullable(Int8))), + i16 Array(LowCardinality(Nullable(Int16))), + i32 Array(LowCardinality(Nullable(Int32))), + i64 Array(LowCardinality(Nullable(Int64))), + u8 Array(LowCardinality(Nullable(UInt8))), + u16 Array(LowCardinality(Nullable(UInt16))), + u32 Array(LowCardinality(Nullable(UInt32))), + u64 Array(LowCardinality(Nullable(UInt64))), + f32 Array(LowCardinality(Nullable(Float32))), + f64 Array(LowCardinality(Nullable(Float64))), + + date Array(LowCardinality(Nullable(Date))), + date_time Array(LowCardinality(Nullable(DateTime('Asia/Istanbul')))), + + str Array(LowCardinality(Nullable(String))), + fixed_string Array(LowCardinality(Nullable(FixedString(5)))) +) ENGINE = MergeTree() ORDER BY order_key SETTINGS allow_nullable_key = 1; + +INSERT INTO lc_nullable SELECT + groupArray(number) AS order_key, + groupArray(toInt8(number)) AS i8, + groupArray(toInt16(number)) AS i16, + groupArray(toInt32(number)) AS i32, + groupArray(toInt64(number)) AS i64, + groupArray(toUInt8(number)) AS u8, + groupArray(toUInt16(number)) AS u16, + groupArray(toUInt32(number)) AS u32, + groupArray(toUInt64(number)) AS u64, + groupArray(toFloat32(number)) AS f32, + groupArray(toFloat64(number)) AS f64, + groupArray(toDate(number, 'Asia/Istanbul')) AS date, + groupArray(toDateTime(number, 'Asia/Istanbul')) AS date_time, + groupArray(toString(number)) AS str, + groupArray(toFixedString(toString(number), 5)) AS fixed_string + FROM (SELECT number FROM system.numbers LIMIT 15); + +INSERT INTO lc_nullable SELECT + groupArray(num) AS order_key, + groupArray(toInt8(num)) AS i8, + groupArray(toInt16(num)) AS i16, + groupArray(toInt32(num)) AS i32, + groupArray(toInt64(num)) AS i64, + groupArray(toUInt8(num)) AS u8, + groupArray(toUInt16(num)) AS u16, + groupArray(toUInt32(num)) AS u32, + groupArray(toUInt64(num)) AS u64, + groupArray(toFloat32(num)) AS f32, + groupArray(toFloat64(num)) AS f64, + groupArray(toDate(num, 'Asia/Istanbul')) AS date, + groupArray(toDateTime(num, 'Asia/Istanbul')) AS date_time, + groupArray(toString(num)) AS str, + groupArray(toFixedString(toString(num), 5)) AS fixed_string + FROM (SELECT negate(number) as num FROM system.numbers LIMIT 15); + +INSERT INTO lc_nullable SELECT + groupArray(number) AS order_key, + groupArray(toInt8(number)) AS i8, + groupArray(toInt16(number)) AS i16, + groupArray(toInt32(number)) AS i32, + groupArray(toInt64(number)) AS i64, + groupArray(toUInt8(number)) AS u8, + groupArray(toUInt16(number)) AS u16, + groupArray(toUInt32(number)) AS u32, + groupArray(toUInt64(number)) AS u64, + groupArray(toFloat32(number)) AS f32, + groupArray(toFloat64(number)) AS f64, + groupArray(toDate(number, 'Asia/Istanbul')) AS date, + groupArray(toDateTime(number, 'Asia/Istanbul')) AS date_time, + groupArray(toString(number)) AS str, + groupArray(toFixedString(toString(number), 5)) AS fixed_string + FROM (SELECT number FROM system.numbers WHERE number >= 5 LIMIT 15); + +INSERT INTO lc_nullable SELECT + groupArray(number) AS order_key, + groupArray(toInt8(number)) AS i8, + groupArray(toInt16(number)) AS i16, + groupArray(toInt32(number)) AS i32, + groupArray(toInt64(number)) AS i64, + groupArray(toUInt8(number)) AS u8, + groupArray(toUInt16(number)) AS u16, + groupArray(toUInt32(number)) AS u32, + groupArray(toUInt64(number)) AS u64, + groupArray(toFloat32(number)) AS f32, + groupArray(toFloat64(number)) AS f64, + groupArray(toDate(number, 'Asia/Istanbul')) AS date, + groupArray(toDateTime(number, 'Asia/Istanbul')) AS date_time, + groupArray(toString(number)) AS str, + groupArray(toFixedString(toString(number), 5)) AS fixed_string + FROM (SELECT number FROM system.numbers WHERE number >= 10 LIMIT 15); + +INSERT INTO lc_nullable SELECT + n AS order_key, + n AS i8, + n AS i16, + n AS i32, + n AS i64, + n AS u8, + n AS u16, + n AS u32, + n AS u64, + n AS f32, + n AS f64, + n AS date, + n AS date_time, + n AS str, + n AS fixed_string + FROM (SELECT [NULL] AS n); + +INSERT INTO lc_nullable SELECT + [NULL, n] AS order_key, + [NULL, toInt8(n)] AS i8, + [NULL, toInt16(n)] AS i16, + [NULL, toInt32(n)] AS i32, + [NULL, toInt64(n)] AS i64, + [NULL, toUInt8(n)] AS u8, + [NULL, toUInt16(n)] AS u16, + [NULL, toUInt32(n)] AS u32, + [NULL, toUInt64(n)] AS u64, + [NULL, toFloat32(n)] AS f32, + [NULL, toFloat64(n)] AS f64, + [NULL, toDate(n, 'Asia/Istanbul')] AS date, + [NULL, toDateTime(n, 'Asia/Istanbul')] AS date_time, + [NULL, toString(n)] AS str, + [NULL, toFixedString(toString(n), 5)] AS fixed_string + FROM (SELECT 100 as n); + +SELECT count() FROM lc_nullable WHERE has(i8, 1); +SELECT count() FROM lc_nullable WHERE has(i16, 1); +SELECT count() FROM lc_nullable WHERE has(i32, 1); +SELECT count() FROM lc_nullable WHERE has(i64, 1); +SELECT count() FROM lc_nullable WHERE has(u8, 1); +SELECT count() FROM lc_nullable WHERE has(u16, 1); +SELECT count() FROM lc_nullable WHERE has(u32, 1); +SELECT count() FROM lc_nullable WHERE has(u64, 1); +SELECT count() FROM lc_nullable WHERE has(f32, 1); +SELECT count() FROM lc_nullable WHERE has(f64, 1); +SELECT count() FROM lc_nullable WHERE has(date, toDate('1970-01-02')); +SELECT count() FROM lc_nullable WHERE has(date_time, toDateTime('1970-01-01 02:00:01', 'Asia/Istanbul')); +SELECT count() FROM lc_nullable WHERE has(str, '1'); +SELECT count() FROM lc_nullable WHERE has(fixed_string, toFixedString('1', 5)); + +SELECT count() FROM lc_nullable WHERE has(i8, -1); +SELECT count() FROM lc_nullable WHERE has(i16, -1); +SELECT count() FROM lc_nullable WHERE has(i32, -1); +SELECT count() FROM lc_nullable WHERE has(i64, -1); +SELECT count() FROM lc_nullable WHERE has(u8, -1); +SELECT count() FROM lc_nullable WHERE has(u16, -1); +SELECT count() FROM lc_nullable WHERE has(u32, -1); +SELECT count() FROM lc_nullable WHERE has(u64, -1); +SELECT count() FROM lc_nullable WHERE has(f32, -1); +SELECT count() FROM lc_nullable WHERE has(f64, -1); +SELECT count() FROM lc_nullable WHERE has(str, '-1'); +SELECT count() FROM lc_nullable WHERE has(fixed_string, toFixedString('-1', 5)); + +SELECT count() FROM lc_nullable WHERE has(i8, 5); +SELECT count() FROM lc_nullable WHERE has(i16, 5); +SELECT count() FROM lc_nullable WHERE has(i32, 5); +SELECT count() FROM lc_nullable WHERE has(i64, 5); +SELECT count() FROM lc_nullable WHERE has(u8, 5); +SELECT count() FROM lc_nullable WHERE has(u16, 5); +SELECT count() FROM lc_nullable WHERE has(u32, 5); +SELECT count() FROM lc_nullable WHERE has(u64, 5); +SELECT count() FROM lc_nullable WHERE has(f32, 5); +SELECT count() FROM lc_nullable WHERE has(f64, 5); +SELECT count() FROM lc_nullable WHERE has(date, toDate('1970-01-06')); +SELECT count() FROM lc_nullable WHERE has(date_time, toDateTime('1970-01-01 02:00:05', 'Asia/Istanbul')); +SELECT count() FROM lc_nullable WHERE has(str, '5'); +SELECT count() FROM lc_nullable WHERE has(fixed_string, toFixedString('5', 5)); + +SELECT count() FROM lc_nullable WHERE has(i8, 10); +SELECT count() FROM lc_nullable WHERE has(i16, 10); +SELECT count() FROM lc_nullable WHERE has(i32, 10); +SELECT count() FROM lc_nullable WHERE has(i64, 10); +SELECT count() FROM lc_nullable WHERE has(u8, 10); +SELECT count() FROM lc_nullable WHERE has(u16, 10); +SELECT count() FROM lc_nullable WHERE has(u32, 10); +SELECT count() FROM lc_nullable WHERE has(u64, 10); +SELECT count() FROM lc_nullable WHERE has(f32, 10); +SELECT count() FROM lc_nullable WHERE has(f64, 10); +SELECT count() FROM lc_nullable WHERE has(date, toDate('1970-01-11')); +SELECT count() FROM lc_nullable WHERE has(date_time, toDateTime('1970-01-01 02:00:10', 'Asia/Istanbul')); +SELECT count() FROM lc_nullable WHERE has(str, '10'); +SELECT count() FROM lc_nullable WHERE has(fixed_string, toFixedString('10', 5)); + +SELECT count() FROM lc_nullable WHERE has(i8, NULL); +SELECT count() FROM lc_nullable WHERE has(i16, NULL); +SELECT count() FROM lc_nullable WHERE has(i32, NULL); +SELECT count() FROM lc_nullable WHERE has(i64, NULL); +SELECT count() FROM lc_nullable WHERE has(u8, NULL); +SELECT count() FROM lc_nullable WHERE has(u16, NULL); +SELECT count() FROM lc_nullable WHERE has(u32, NULL); +SELECT count() FROM lc_nullable WHERE has(u64, NULL); +SELECT count() FROM lc_nullable WHERE has(f32, NULL); +SELECT count() FROM lc_nullable WHERE has(f64, NULL); +SELECT count() FROM lc_nullable WHERE has(date, NULL); +SELECT count() FROM lc_nullable WHERE has(date_time, NULL); +SELECT count() FROM lc_nullable WHERE has(str, NULL); +SELECT count() FROM lc_nullable WHERE has(fixed_string, NULL); + +SELECT count() FROM lc_nullable WHERE has(i8, 100); +SELECT count() FROM lc_nullable WHERE has(i16, 100); +SELECT count() FROM lc_nullable WHERE has(i32, 100); +SELECT count() FROM lc_nullable WHERE has(i64, 100); +SELECT count() FROM lc_nullable WHERE has(u8, 100); +SELECT count() FROM lc_nullable WHERE has(u16, 100); +SELECT count() FROM lc_nullable WHERE has(u32, 100); +SELECT count() FROM lc_nullable WHERE has(u64, 100); +SELECT count() FROM lc_nullable WHERE has(f32, 100); +SELECT count() FROM lc_nullable WHERE has(f64, 100); +SELECT count() FROM lc_nullable WHERE has(date, toDate('1970-04-11')); +SELECT count() FROM lc_nullable WHERE has(date_time, toDateTime('1970-01-01 02:01:40', 'Asia/Istanbul')); +SELECT count() FROM lc_nullable WHERE has(str, '100'); +SELECT count() FROM lc_nullable WHERE has(fixed_string, toFixedString('100', 5)); + +SELECT count() FROM lc_nullable WHERE has(date, toDate(has(u64, 1), '1970-01\002')); + +DROP TABLE IF EXISTS lc_nullable; diff --git a/parser/testdata/01414_mutations_and_errors/ast.json b/parser/testdata/01414_mutations_and_errors/ast.json new file mode 100644 index 000000000..fe532cc7b --- /dev/null +++ b/parser/testdata/01414_mutations_and_errors/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery mutation_table (children 1)" + }, + { + "explain": " Identifier mutation_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001355982, + "rows_read": 2, + "bytes_read": 80 + } +} diff --git a/parser/testdata/01414_mutations_and_errors/metadata.json b/parser/testdata/01414_mutations_and_errors/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01414_mutations_and_errors/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01414_mutations_and_errors/query.sql b/parser/testdata/01414_mutations_and_errors/query.sql new file mode 100644 index 000000000..b9eabb43e --- /dev/null +++ b/parser/testdata/01414_mutations_and_errors/query.sql @@ -0,0 +1,29 @@ +DROP TABLE IF EXISTS mutation_table; + +CREATE TABLE mutation_table +( + date Date, + key UInt64, + value String +) +ENGINE = MergeTree() +PARTITION BY date +ORDER BY tuple(); + +INSERT INTO mutation_table SELECT toDate('2019-10-01'), number, '42' FROM numbers(100); + +INSERT INTO mutation_table SELECT toDate('2019-10-02'), number, 'Hello' FROM numbers(100); + +SELECT distinct(value) FROM mutation_table ORDER BY value; + +ALTER TABLE mutation_table MODIFY COLUMN value UInt64 SETTINGS mutations_sync = 2; --{serverError UNFINISHED} + +SELECT distinct(value) FROM mutation_table ORDER BY value; --{serverError CANNOT_PARSE_TEXT} + +KILL MUTATION where table = 'mutation_table' and database = currentDatabase(); + +ALTER TABLE mutation_table MODIFY COLUMN value String SETTINGS mutations_sync = 2; + +SELECT distinct(value) FROM mutation_table ORDER BY value; + +DROP TABLE IF EXISTS mutation_table; diff --git a/parser/testdata/01414_push_predicate_when_contains_with_clause/ast.json b/parser/testdata/01414_push_predicate_when_contains_with_clause/ast.json new file mode 100644 index 000000000..6c4a6d306 --- /dev/null +++ b/parser/testdata/01414_push_predicate_when_contains_with_clause/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery numbers_indexed (children 1)" + }, + { + "explain": " Identifier numbers_indexed" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001213146, + "rows_read": 2, + "bytes_read": 82 + } +} diff --git a/parser/testdata/01414_push_predicate_when_contains_with_clause/metadata.json b/parser/testdata/01414_push_predicate_when_contains_with_clause/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01414_push_predicate_when_contains_with_clause/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01414_push_predicate_when_contains_with_clause/query.sql b/parser/testdata/01414_push_predicate_when_contains_with_clause/query.sql new file mode 100644 index 000000000..cf3307205 --- /dev/null +++ b/parser/testdata/01414_push_predicate_when_contains_with_clause/query.sql @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS numbers_indexed; +DROP TABLE IF EXISTS squares; + +CREATE TABLE numbers_indexed Engine=MergeTree ORDER BY number PARTITION BY bitShiftRight(number,8) SETTINGS index_granularity=8 AS SELECT * FROM numbers(16384); + +CREATE VIEW squares AS WITH number*2 AS square_number SELECT number, square_number FROM numbers_indexed; + +SET max_rows_to_read=8, read_overflow_mode='throw'; + +WITH number * 2 AS square_number SELECT number, square_number FROM numbers_indexed WHERE number = 999; + +SELECT * FROM squares WHERE number = 999; + +EXPLAIN SYNTAX SELECT number, square_number FROM ( WITH number * 2 AS square_number SELECT number, square_number FROM numbers_indexed) AS squares WHERE number = 999; + +DROP TABLE IF EXISTS squares; +DROP TABLE IF EXISTS numbers_indexed; diff --git a/parser/testdata/01415_inconsistent_merge_tree_settings/ast.json b/parser/testdata/01415_inconsistent_merge_tree_settings/ast.json new file mode 100644 index 000000000..640a587e1 --- /dev/null +++ b/parser/testdata/01415_inconsistent_merge_tree_settings/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001182733, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/01415_inconsistent_merge_tree_settings/metadata.json b/parser/testdata/01415_inconsistent_merge_tree_settings/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01415_inconsistent_merge_tree_settings/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01415_inconsistent_merge_tree_settings/query.sql b/parser/testdata/01415_inconsistent_merge_tree_settings/query.sql new file mode 100644 index 000000000..2ce0575c4 --- /dev/null +++ b/parser/testdata/01415_inconsistent_merge_tree_settings/query.sql @@ -0,0 +1,12 @@ +DROP TABLE IF EXISTS t; + +SET mutations_sync = 1; +CREATE TABLE t (x UInt8, s String) ENGINE = MergeTree ORDER BY x SETTINGS number_of_free_entries_in_pool_to_execute_mutation = 15; + +INSERT INTO t VALUES (1, 'hello'); +SELECT * FROM t; + +ALTER TABLE t UPDATE s = 'world' WHERE x = 1; +SELECT * FROM t; + +DROP TABLE t; diff --git a/parser/testdata/01415_overlimiting_threads_for_repica_bug/ast.json b/parser/testdata/01415_overlimiting_threads_for_repica_bug/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01415_overlimiting_threads_for_repica_bug/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01415_overlimiting_threads_for_repica_bug/metadata.json b/parser/testdata/01415_overlimiting_threads_for_repica_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01415_overlimiting_threads_for_repica_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01415_overlimiting_threads_for_repica_bug/query.sql b/parser/testdata/01415_overlimiting_threads_for_repica_bug/query.sql new file mode 100644 index 000000000..1a600085e --- /dev/null +++ b/parser/testdata/01415_overlimiting_threads_for_repica_bug/query.sql @@ -0,0 +1,11 @@ +-- Tags: no-parallel, no-fasttest +-- no-parallel: it checks the number of threads, which can be lowered in presence of other queries + +set log_queries = 1; +set max_threads = 16; +set prefer_localhost_replica = 1; + +select sum(number) from remote('127.0.0.{1|2}', numbers_mt(1000000)) group by number % 2 order by number % 2; + +system flush logs query_log; +select length(thread_ids) >= 1 from system.query_log where current_database = currentDatabase() and event_date >= today() - 1 and lower(query) like '%select sum(number) from remote(_127.0.0.{1|2}_, numbers_mt(1000000)) group by number %' and type = 'QueryFinish' order by query_start_time desc limit 1; diff --git a/parser/testdata/01415_table_function_view/ast.json b/parser/testdata/01415_table_function_view/ast.json new file mode 100644 index 000000000..96b034805 --- /dev/null +++ b/parser/testdata/01415_table_function_view/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function view (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001297485, + "rows_read": 15, + "bytes_read": 603 + } +} diff --git a/parser/testdata/01415_table_function_view/metadata.json b/parser/testdata/01415_table_function_view/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01415_table_function_view/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01415_table_function_view/query.sql b/parser/testdata/01415_table_function_view/query.sql new file mode 100644 index 000000000..0beeb64c0 --- /dev/null +++ b/parser/testdata/01415_table_function_view/query.sql @@ -0,0 +1,5 @@ +SELECT * FROM view(SELECT 1); +SELECT * FROM remote('127.0.0.1', view(SELECT 1)); + +EXPLAIN SYNTAX SELECT * FROM view(SELECT 1); +EXPLAIN SYNTAX SELECT * FROM remote('127.0.0.1', view(SELECT 1)); diff --git a/parser/testdata/01416_clear_column_pk/ast.json b/parser/testdata/01416_clear_column_pk/ast.json new file mode 100644 index 000000000..8beff9ab7 --- /dev/null +++ b/parser/testdata/01416_clear_column_pk/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery table_with_pk_clear (children 1)" + }, + { + "explain": " Identifier table_with_pk_clear" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00100595, + "rows_read": 2, + "bytes_read": 90 + } +} diff --git a/parser/testdata/01416_clear_column_pk/metadata.json b/parser/testdata/01416_clear_column_pk/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01416_clear_column_pk/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01416_clear_column_pk/query.sql b/parser/testdata/01416_clear_column_pk/query.sql new file mode 100644 index 000000000..794fb702b --- /dev/null +++ b/parser/testdata/01416_clear_column_pk/query.sql @@ -0,0 +1,22 @@ +DROP TABLE IF EXISTS table_with_pk_clear; + +CREATE TABLE table_with_pk_clear( + key1 UInt64, + key2 String, + value1 String, + value2 String +) +ENGINE = MergeTree() +ORDER by (key1, key2); + +INSERT INTO table_with_pk_clear SELECT number, number * number, toString(number), toString(number * number) FROM numbers(1000); + +ALTER TABLE table_with_pk_clear CLEAR COLUMN key1 IN PARTITION tuple(); --{serverError ALTER_OF_COLUMN_IS_FORBIDDEN} + +SELECT count(distinct key1) FROM table_with_pk_clear; + +ALTER TABLE table_with_pk_clear CLEAR COLUMN key2 IN PARTITION tuple(); --{serverError ALTER_OF_COLUMN_IS_FORBIDDEN} + +SELECT count(distinct key2) FROM table_with_pk_clear; + +DROP TABLE IF EXISTS table_with_pk_clear; diff --git a/parser/testdata/01416_join_totals_header_bug/ast.json b/parser/testdata/01416_join_totals_header_bug/ast.json new file mode 100644 index 000000000..97fbfecea --- /dev/null +++ b/parser/testdata/01416_join_totals_header_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tableCommon (children 1)" + }, + { + "explain": " Identifier tableCommon" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001465284, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/01416_join_totals_header_bug/metadata.json b/parser/testdata/01416_join_totals_header_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01416_join_totals_header_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01416_join_totals_header_bug/query.sql b/parser/testdata/01416_join_totals_header_bug/query.sql new file mode 100644 index 000000000..089a1d4b7 --- /dev/null +++ b/parser/testdata/01416_join_totals_header_bug/query.sql @@ -0,0 +1,63 @@ +DROP TABLE IF EXISTS tableCommon; +DROP TABLE IF EXISTS tableTrees; +DROP TABLE IF EXISTS tableFlowers; + +CREATE TABLE tableCommon (`key` FixedString(15), `value` Nullable(Int8)) ENGINE = Log(); +CREATE TABLE tableTrees (`key` FixedString(15), `name` Nullable(Int8), `name2` Nullable(Int8)) ENGINE = Log(); +CREATE TABLE tableFlowers (`key` FixedString(15), `name` Nullable(Int8)) ENGINE = Log(); + +SELECT * FROM ( + SELECT common.key, common.value, trees.name, trees.name2 + FROM ( + SELECT * + FROM tableCommon + ) as common + INNER JOIN ( + SELECT * + FROM tableTrees + ) trees ON (common.key = trees.key) +) +UNION ALL +( + SELECT common.key, common.value, + null as name, null as name2 + + FROM ( + SELECT * + FROM tableCommon + ) as common + INNER JOIN ( + SELECT * + FROM tableFlowers + ) flowers ON (common.key = flowers.key) +); + +SELECT * FROM ( + SELECT common.key, common.value, trees.name, trees.name2 + FROM ( + SELECT * + FROM tableCommon + ) as common + INNER JOIN ( + SELECT * + FROM tableTrees + ) trees ON (common.key = trees.key) +) +UNION ALL +( + SELECT common.key, common.value, + flowers.name, null as name2 + + FROM ( + SELECT * + FROM tableCommon + ) as common + INNER JOIN ( + SELECT * + FROM tableFlowers + ) flowers ON (common.key = flowers.key) +); + +DROP TABLE IF EXISTS tableCommon; +DROP TABLE IF EXISTS tableTrees; +DROP TABLE IF EXISTS tableFlowers; diff --git a/parser/testdata/01417_update_permutation_crash/ast.json b/parser/testdata/01417_update_permutation_crash/ast.json new file mode 100644 index 000000000..a4147bb36 --- /dev/null +++ b/parser/testdata/01417_update_permutation_crash/ast.json @@ -0,0 +1,85 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function tuple (alias t) (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers_mt (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1000001" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier t" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 21, + + "statistics": + { + "elapsed": 0.002047893, + "rows_read": 21, + "bytes_read": 788 + } +} diff --git a/parser/testdata/01417_update_permutation_crash/metadata.json b/parser/testdata/01417_update_permutation_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01417_update_permutation_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01417_update_permutation_crash/query.sql b/parser/testdata/01417_update_permutation_crash/query.sql new file mode 100644 index 000000000..f59237817 --- /dev/null +++ b/parser/testdata/01417_update_permutation_crash/query.sql @@ -0,0 +1 @@ +select tuple(1, 1, number) as t from numbers_mt(1000001) order by t, number limit 1; diff --git a/parser/testdata/01418_custom_settings/ast.json b/parser/testdata/01418_custom_settings/ast.json new file mode 100644 index 000000000..63bd81947 --- /dev/null +++ b/parser/testdata/01418_custom_settings/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DROP SETTINGS PROFILE query" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001186789, + "rows_read": 1, + "bytes_read": 35 + } +} diff --git a/parser/testdata/01418_custom_settings/metadata.json b/parser/testdata/01418_custom_settings/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01418_custom_settings/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01418_custom_settings/query.sql b/parser/testdata/01418_custom_settings/query.sql new file mode 100644 index 000000000..f121dba05 --- /dev/null +++ b/parser/testdata/01418_custom_settings/query.sql @@ -0,0 +1,63 @@ +DROP SETTINGS PROFILE IF EXISTS s1_01418, s2_01418; + +SELECT '--- assigning ---'; +SET custom_a = 5; +SET custom_b = -177; +SET custom_c = 98.11; +SET custom_d = 'abc def'; +SELECT getSetting('custom_a') as v, toTypeName(v); +SELECT getSetting('custom_b') as v, toTypeName(v); +SELECT getSetting('custom_c') as v, toTypeName(v); +SELECT getSetting('custom_d') as v, toTypeName(v); +SELECT name, value FROM system.settings WHERE name LIKE 'custom_%' ORDER BY name; + +SELECT '--- modifying ---'; +SET custom_a = 'changed'; +SET custom_b = NULL; +SET custom_c = 50000; +SET custom_d = 1.11; +SELECT getSetting('custom_a') as v, toTypeName(v); +SELECT getSetting('custom_b') as v, toTypeName(v); +SELECT getSetting('custom_c') as v, toTypeName(v); +SELECT getSetting('custom_d') as v, toTypeName(v); +SELECT name, value FROM system.settings WHERE name LIKE 'custom_%' ORDER BY name; + +SELECT '--- undefined setting ---'; +SELECT getSetting('custom_e') as v, toTypeName(v); -- { serverError UNKNOWN_SETTING } -- Setting not found. +SET custom_e = 404; +SELECT getSetting('custom_e') as v, toTypeName(v); + +SELECT '--- wrong prefix ---'; +SET invalid_custom = 8; -- { serverError UNKNOWN_SETTING } -- Setting is neither a builtin nor started with one of the registered prefixes for user-defined settings. + +SELECT '--- using query context ---'; +SELECT getSetting('custom_e') as v, toTypeName(v) SETTINGS custom_e = -0.333; +SELECT name, value FROM system.settings WHERE name = 'custom_e' SETTINGS custom_e = -0.333; +SELECT getSetting('custom_e') as v, toTypeName(v); +SELECT name, value FROM system.settings WHERE name = 'custom_e'; + +SELECT getSetting('custom_f') as v, toTypeName(v) SETTINGS custom_f = 'word'; +SELECT name, value FROM system.settings WHERE name = 'custom_f' SETTINGS custom_f = 'word'; +SELECT getSetting('custom_f') as v, toTypeName(v); -- { serverError UNKNOWN_SETTING } -- Setting not found. +SELECT COUNT() FROM system.settings WHERE name = 'custom_f'; + +SELECT '--- compound identifier ---'; +SET custom_compound.identifier.v1 = 'test'; +SELECT getSetting('custom_compound.identifier.v1') as v, toTypeName(v); +SELECT name, value FROM system.settings WHERE name = 'custom_compound.identifier.v1'; + +CREATE SETTINGS PROFILE s1_01418 SETTINGS custom_compound.identifier.v2 = 100; +SHOW CREATE SETTINGS PROFILE s1_01418; +DROP SETTINGS PROFILE s1_01418; + +SELECT '--- null type ---'; +SELECT getSetting('custom_null') as v, toTypeName(v) SETTINGS custom_null = NULL; +SELECT name, value FROM system.settings WHERE name = 'custom_null' SETTINGS custom_null = NULL; + +SET custom_null = NULL; +SELECT getSetting('custom_null') as v, toTypeName(v); +SELECT name, value FROM system.settings WHERE name = 'custom_null'; + +CREATE SETTINGS PROFILE s2_01418 SETTINGS custom_null = NULL; +SHOW CREATE SETTINGS PROFILE s2_01418; +DROP SETTINGS PROFILE s2_01418; diff --git a/parser/testdata/01418_index_analysis_bug/ast.json b/parser/testdata/01418_index_analysis_bug/ast.json new file mode 100644 index 000000000..2d4af7471 --- /dev/null +++ b/parser/testdata/01418_index_analysis_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery mytable_local (children 1)" + }, + { + "explain": " Identifier mytable_local" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00140643, + "rows_read": 2, + "bytes_read": 78 + } +} diff --git a/parser/testdata/01418_index_analysis_bug/metadata.json b/parser/testdata/01418_index_analysis_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01418_index_analysis_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01418_index_analysis_bug/query.sql b/parser/testdata/01418_index_analysis_bug/query.sql new file mode 100644 index 000000000..aae76b63b --- /dev/null +++ b/parser/testdata/01418_index_analysis_bug/query.sql @@ -0,0 +1,43 @@ +DROP TABLE IF EXISTS mytable_local; + +CREATE TABLE mytable_local ( + created DateTime, + eventday Date, + user_id UInt32 +) +ENGINE = MergeTree() +PARTITION BY toYYYYMM(eventday) +ORDER BY (eventday, user_id); + +INSERT INTO mytable_local SELECT + toDateTime('2020-06-01 00:00:00') + toIntervalMinute(number) AS created, + toDate(created) AS eventday, + if((number % 100) > 50, 742522, number % 32141) AS user_id +FROM numbers(100000); + +SELECT + eventday, + count(*) +FROM mytable_local +WHERE (toYYYYMM(eventday) = 202007) AND (user_id = 742522) AND (eventday >= '2020-07-03') AND (eventday <= '2020-07-25') +GROUP BY eventday +ORDER BY eventday; + +DROP TABLE mytable_local; +DROP TABLE IF EXISTS table_float; + +CREATE TABLE table_float +( + f Float64, + u UInt32 +) +ENGINE = MergeTree +ORDER BY (f, u); + +INSERT INTO table_float VALUES (1.2, 1) (1.3, 2) (1.4, 3) (1.5, 4); + +SELECT count() +FROM table_float +WHERE (toUInt64(f) = 1) AND (f >= 1.3) AND (f <= 1.4) AND (u > 0); + +DROP TABLE table_float; diff --git a/parser/testdata/01418_query_scope_constants_and_remote/ast.json b/parser/testdata/01418_query_scope_constants_and_remote/ast.json new file mode 100644 index 000000000..1f350d8ca --- /dev/null +++ b/parser/testdata/01418_query_scope_constants_and_remote/ast.json @@ -0,0 +1,103 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function greaterOrEquals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier c" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function randConstant (alias c) (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function remote (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '127.0.0.{1,2}'" + }, + { + "explain": " Function numbers_mt (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 27, + + "statistics": + { + "elapsed": 0.002025664, + "rows_read": 27, + "bytes_read": 1185 + } +} diff --git a/parser/testdata/01418_query_scope_constants_and_remote/metadata.json b/parser/testdata/01418_query_scope_constants_and_remote/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01418_query_scope_constants_and_remote/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01418_query_scope_constants_and_remote/query.sql b/parser/testdata/01418_query_scope_constants_and_remote/query.sql new file mode 100644 index 000000000..fcdf2f789 --- /dev/null +++ b/parser/testdata/01418_query_scope_constants_and_remote/query.sql @@ -0,0 +1,3 @@ +select c >= 0 from (SELECT randConstant() as c FROM remote('127.0.0.{1,2}', numbers_mt(1))); +select c >= 0 from (SELECT randConstant() as c FROM remote('127.0.0.{3,2}', numbers_mt(1))); +select c >= 0 from (SELECT randConstant() as c FROM remote('127.0.0.1', numbers_mt(1))); \ No newline at end of file diff --git a/parser/testdata/01419_materialize_null/ast.json b/parser/testdata/01419_materialize_null/ast.json new file mode 100644 index 000000000..e3c0ac05c --- /dev/null +++ b/parser/testdata/01419_materialize_null/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function isConstant (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal NULL" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001455659, + "rows_read": 7, + "bytes_read": 259 + } +} diff --git a/parser/testdata/01419_materialize_null/metadata.json b/parser/testdata/01419_materialize_null/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01419_materialize_null/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01419_materialize_null/query.sql b/parser/testdata/01419_materialize_null/query.sql new file mode 100644 index 000000000..5eb5f0a51 --- /dev/null +++ b/parser/testdata/01419_materialize_null/query.sql @@ -0,0 +1,2 @@ +SELECT isConstant(NULL); +SELECT isConstant(materialize(NULL)); diff --git a/parser/testdata/01419_merge_tree_settings_sanity_check/ast.json b/parser/testdata/01419_merge_tree_settings_sanity_check/ast.json new file mode 100644 index 000000000..9f927b876 --- /dev/null +++ b/parser/testdata/01419_merge_tree_settings_sanity_check/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery mytable_local (children 1)" + }, + { + "explain": " Identifier mytable_local" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001296933, + "rows_read": 2, + "bytes_read": 78 + } +} diff --git a/parser/testdata/01419_merge_tree_settings_sanity_check/metadata.json b/parser/testdata/01419_merge_tree_settings_sanity_check/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01419_merge_tree_settings_sanity_check/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01419_merge_tree_settings_sanity_check/query.sql b/parser/testdata/01419_merge_tree_settings_sanity_check/query.sql new file mode 100644 index 000000000..915c9dae5 --- /dev/null +++ b/parser/testdata/01419_merge_tree_settings_sanity_check/query.sql @@ -0,0 +1,48 @@ +DROP TABLE IF EXISTS mytable_local; + +CREATE TABLE mytable_local +( + created DateTime, + eventday Date, + user_id UInt32 +) +ENGINE = MergeTree() +PARTITION BY toYYYYMM(eventday) +ORDER BY (eventday, user_id) +SETTINGS number_of_free_entries_in_pool_to_execute_mutation = 100; -- { serverError BAD_ARGUMENTS } + +CREATE TABLE mytable_local +( + created DateTime, + eventday Date, + user_id UInt32 +) +ENGINE = MergeTree() +PARTITION BY toYYYYMM(eventday) +ORDER BY (eventday, user_id) +SETTINGS number_of_free_entries_in_pool_to_lower_max_size_of_merge = 100; -- { serverError BAD_ARGUMENTS } + +CREATE TABLE mytable_local +( + created DateTime, + eventday Date, + user_id UInt32 +) +ENGINE = MergeTree() +PARTITION BY toYYYYMM(eventday) +ORDER BY (eventday, user_id) +SETTINGS number_of_free_entries_in_pool_to_execute_optimize_entire_partition = 100; -- { serverError BAD_ARGUMENTS } + +CREATE TABLE mytable_local +( + created DateTime, + eventday Date, + user_id UInt32 +) +ENGINE = MergeTree() +PARTITION BY toYYYYMM(eventday) +ORDER BY (eventday, user_id); + +ALTER TABLE mytable_local MODIFY SETTING number_of_free_entries_in_pool_to_execute_mutation = 100; -- { serverError BAD_ARGUMENTS } + +DROP TABLE mytable_local; diff --git a/parser/testdata/01419_skip_index_compact_parts/ast.json b/parser/testdata/01419_skip_index_compact_parts/ast.json new file mode 100644 index 000000000..4b45cbd4f --- /dev/null +++ b/parser/testdata/01419_skip_index_compact_parts/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery index_compact (children 1)" + }, + { + "explain": " Identifier index_compact" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00132683, + "rows_read": 2, + "bytes_read": 78 + } +} diff --git a/parser/testdata/01419_skip_index_compact_parts/metadata.json b/parser/testdata/01419_skip_index_compact_parts/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01419_skip_index_compact_parts/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01419_skip_index_compact_parts/query.sql b/parser/testdata/01419_skip_index_compact_parts/query.sql new file mode 100644 index 000000000..580fcf7f4 --- /dev/null +++ b/parser/testdata/01419_skip_index_compact_parts/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS index_compact; + +CREATE TABLE index_compact(a UInt32, b UInt32, index i1 b type minmax granularity 1) + ENGINE = MergeTree ORDER BY a + SETTINGS min_rows_for_wide_part = 1000, index_granularity = 128, merge_max_block_size = 100; + +INSERT INTO index_compact SELECT number, toString(number) FROM numbers(100); +INSERT INTO index_compact SELECT number, toString(number) FROM numbers(30); + +OPTIMIZE TABLE index_compact FINAL; + +SELECT count() FROM index_compact WHERE b < 10; + +DROP TABLE index_compact; diff --git a/parser/testdata/01420_logical_functions_materialized_null/ast.json b/parser/testdata/01420_logical_functions_materialized_null/ast.json new file mode 100644 index 000000000..72f54c1fa --- /dev/null +++ b/parser/testdata/01420_logical_functions_materialized_null/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function and (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001208737, + "rows_read": 8, + "bytes_read": 282 + } +} diff --git a/parser/testdata/01420_logical_functions_materialized_null/metadata.json b/parser/testdata/01420_logical_functions_materialized_null/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01420_logical_functions_materialized_null/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01420_logical_functions_materialized_null/query.sql b/parser/testdata/01420_logical_functions_materialized_null/query.sql new file mode 100644 index 000000000..7c61295ca --- /dev/null +++ b/parser/testdata/01420_logical_functions_materialized_null/query.sql @@ -0,0 +1,8 @@ +SELECT NULL AND 1; +SELECT NULL OR 1; +SELECT materialize(NULL) AND 1; +SELECT materialize(NULL) OR 1; +SELECT arrayJoin([NULL]) AND 1; +SELECT arrayJoin([NULL]) OR 1; + +SELECT isConstant(arrayJoin([NULL]) AND 1); diff --git a/parser/testdata/01421_array_nullable_element_nullable_index/ast.json b/parser/testdata/01421_array_nullable_element_nullable_index/ast.json new file mode 100644 index 000000000..d63e48a7d --- /dev/null +++ b/parser/testdata/01421_array_nullable_element_nullable_index/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function array (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toNullable (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function arrayElement (alias y) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function toNullable (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001177528, + "rows_read": 15, + "bytes_read": 600 + } +} diff --git a/parser/testdata/01421_array_nullable_element_nullable_index/metadata.json b/parser/testdata/01421_array_nullable_element_nullable_index/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01421_array_nullable_element_nullable_index/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01421_array_nullable_element_nullable_index/query.sql b/parser/testdata/01421_array_nullable_element_nullable_index/query.sql new file mode 100644 index 000000000..5cf8bc8fe --- /dev/null +++ b/parser/testdata/01421_array_nullable_element_nullable_index/query.sql @@ -0,0 +1,4 @@ +SELECT [toNullable(1)] AS x, x[toNullable(1)] AS y; +SELECT materialize([toNullable(1)]) AS x, x[toNullable(1)] AS y; +SELECT [toNullable(1)] AS x, x[materialize(toNullable(1))] AS y; +SELECT materialize([toNullable(1)]) AS x, x[materialize(toNullable(1))] AS y; diff --git a/parser/testdata/01421_assert_in_in/ast.json b/parser/testdata/01421_assert_in_in/ast.json new file mode 100644 index 000000000..3ee82dc1a --- /dev/null +++ b/parser/testdata/01421_assert_in_in/ast.json @@ -0,0 +1,73 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function in (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Tuple_(UInt64_1, UInt64_2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Tuple_(UInt64_1, Tuple_(UInt64_2, UInt64_3))" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function plus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 17, + + "statistics": + { + "elapsed": 0.000897212, + "rows_read": 17, + "bytes_read": 707 + } +} diff --git a/parser/testdata/01421_assert_in_in/metadata.json b/parser/testdata/01421_assert_in_in/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01421_assert_in_in/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01421_assert_in_in/query.sql b/parser/testdata/01421_assert_in_in/query.sql new file mode 100644 index 000000000..22fd2f072 --- /dev/null +++ b/parser/testdata/01421_assert_in_in/query.sql @@ -0,0 +1 @@ +SELECT (1, 2) IN ((1, (2, 3)), (1 + 1, 1)); -- { serverError TYPE_MISMATCH } diff --git a/parser/testdata/01422_array_nullable_element_nullable_index/ast.json b/parser/testdata/01422_array_nullable_element_nullable_index/ast.json new file mode 100644 index 000000000..dc0e0c053 --- /dev/null +++ b/parser/testdata/01422_array_nullable_element_nullable_index/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayElement (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Array_[UInt64_1, NULL]" + }, + { + "explain": " Function toNullable (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001449767, + "rows_read": 10, + "bytes_read": 399 + } +} diff --git a/parser/testdata/01422_array_nullable_element_nullable_index/metadata.json b/parser/testdata/01422_array_nullable_element_nullable_index/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01422_array_nullable_element_nullable_index/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01422_array_nullable_element_nullable_index/query.sql b/parser/testdata/01422_array_nullable_element_nullable_index/query.sql new file mode 100644 index 000000000..82def86ea --- /dev/null +++ b/parser/testdata/01422_array_nullable_element_nullable_index/query.sql @@ -0,0 +1,3 @@ +SELECT [1, NULL][toNullable(1)]; +SELECT [toNullable(1)][toNullable(1)]; +SELECT [NULL][toNullable(1)]; diff --git a/parser/testdata/01422_map_skip_null/ast.json b/parser/testdata/01422_map_skip_null/ast.json new file mode 100644 index 000000000..6b79443a3 --- /dev/null +++ b/parser/testdata/01422_map_skip_null/ast.json @@ -0,0 +1,76 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function minMap (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Array_[UInt64_1]" + }, + { + "explain": " Literal Array_[NULL]" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Array_[UInt64_1]" + }, + { + "explain": " Literal Array_[NULL]" + } + ], + + "rows": 18, + + "statistics": + { + "elapsed": 0.001143215, + "rows_read": 18, + "bytes_read": 753 + } +} diff --git a/parser/testdata/01422_map_skip_null/metadata.json b/parser/testdata/01422_map_skip_null/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01422_map_skip_null/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01422_map_skip_null/query.sql b/parser/testdata/01422_map_skip_null/query.sql new file mode 100644 index 000000000..bc632cb03 --- /dev/null +++ b/parser/testdata/01422_map_skip_null/query.sql @@ -0,0 +1,9 @@ +select minMap(arrayJoin([([1], [null]), ([1], [null])])); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select maxMap(arrayJoin([([1], [null]), ([1], [null])])); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select sumMap(arrayJoin([([1], [null]), ([1], [null])])); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select sumMapWithOverflow(arrayJoin([([1], [null]), ([1], [null])])); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +select minMap(arrayJoin([([1, 2], [null, 11]), ([1, 2], [null, 22])])); +select maxMap(arrayJoin([([1, 2], [null, 11]), ([1, 2], [null, 22])])); +select sumMap(arrayJoin([([1, 2], [null, 11]), ([1, 2], [null, 22])])); +select sumMapWithOverflow(arrayJoin([([1, 2], [null, 11]), ([1, 2], [null, 22])])); diff --git a/parser/testdata/01423_if_nullable_cond/ast.json b/parser/testdata/01423_if_nullable_cond/ast.json new file mode 100644 index 000000000..8fcbae8f1 --- /dev/null +++ b/parser/testdata/01423_if_nullable_cond/ast.json @@ -0,0 +1,94 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function if (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Literal 'Nullable(UInt8)'" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Literal 'Nullable(UInt8)'" + }, + { + "explain": " Literal Int64_-1" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function dumpColumnStructure (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + } + ], + + "rows": 24, + + "statistics": + { + "elapsed": 0.001194216, + "rows_read": 24, + "bytes_read": 920 + } +} diff --git a/parser/testdata/01423_if_nullable_cond/metadata.json b/parser/testdata/01423_if_nullable_cond/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01423_if_nullable_cond/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01423_if_nullable_cond/query.sql b/parser/testdata/01423_if_nullable_cond/query.sql new file mode 100644 index 000000000..9c56e9dbe --- /dev/null +++ b/parser/testdata/01423_if_nullable_cond/query.sql @@ -0,0 +1 @@ +SELECT CAST(null, 'Nullable(UInt8)') = 1 ? CAST(null, 'Nullable(UInt8)') : -1 AS x, toTypeName(x), dumpColumnStructure(x); diff --git a/parser/testdata/01424_parse_date_time_bad_date/ast.json b/parser/testdata/01424_parse_date_time_bad_date/ast.json new file mode 100644 index 000000000..bccbc8fe4 --- /dev/null +++ b/parser/testdata/01424_parse_date_time_bad_date/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function parseDateTime64BestEffort (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '2.55'" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.00102402, + "rows_read": 7, + "bytes_read": 276 + } +} diff --git a/parser/testdata/01424_parse_date_time_bad_date/metadata.json b/parser/testdata/01424_parse_date_time_bad_date/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01424_parse_date_time_bad_date/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01424_parse_date_time_bad_date/query.sql b/parser/testdata/01424_parse_date_time_bad_date/query.sql new file mode 100644 index 000000000..897a20830 --- /dev/null +++ b/parser/testdata/01424_parse_date_time_bad_date/query.sql @@ -0,0 +1,2 @@ +select parseDateTime64BestEffort('2.55'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime64BestEffortOrNull('2.55'); diff --git a/parser/testdata/01425_decimal_parse_big_negative_exponent/ast.json b/parser/testdata/01425_decimal_parse_big_negative_exponent/ast.json new file mode 100644 index 000000000..241ee1f3e --- /dev/null +++ b/parser/testdata/01425_decimal_parse_big_negative_exponent/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '-1E9-1E9-1E9-1E9' (alias x)" + }, + { + "explain": " Function toDecimal32 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal UInt64_0" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001155517, + "rows_read": 9, + "bytes_read": 338 + } +} diff --git a/parser/testdata/01425_decimal_parse_big_negative_exponent/metadata.json b/parser/testdata/01425_decimal_parse_big_negative_exponent/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01425_decimal_parse_big_negative_exponent/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01425_decimal_parse_big_negative_exponent/query.sql b/parser/testdata/01425_decimal_parse_big_negative_exponent/query.sql new file mode 100644 index 000000000..085d015be --- /dev/null +++ b/parser/testdata/01425_decimal_parse_big_negative_exponent/query.sql @@ -0,0 +1,10 @@ +SELECT '-1E9-1E9-1E9-1E9' AS x, toDecimal32(x, 0); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT '-1E9' AS x, toDecimal32(x, 0); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT '1E-9' AS x, toDecimal32(x, 0); +SELECT '1E-8' AS x, toDecimal32(x, 0); +SELECT '1E-7' AS x, toDecimal32(x, 0); +SELECT '1e-7' AS x, toDecimal32(x, 0); +SELECT '1E-9' AS x, toDecimal32(x, 9); +SELECT '1E-9' AS x, toDecimal32(x, 10); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT '1E-10' AS x, toDecimal32(x, 10); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT '1E-10' AS x, toDecimal32(x, 9); diff --git a/parser/testdata/01425_default_value_of_type_name/ast.json b/parser/testdata/01425_default_value_of_type_name/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01425_default_value_of_type_name/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01425_default_value_of_type_name/metadata.json b/parser/testdata/01425_default_value_of_type_name/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01425_default_value_of_type_name/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01425_default_value_of_type_name/query.sql b/parser/testdata/01425_default_value_of_type_name/query.sql new file mode 100644 index 000000000..c783d005a --- /dev/null +++ b/parser/testdata/01425_default_value_of_type_name/query.sql @@ -0,0 +1,8 @@ +SELECT + defaultValueOfTypeName('Int64'), + defaultValueOfTypeName('String'), + defaultValueOfTypeName('UUID'), + defaultValueOfTypeName('IPv4'), + defaultValueOfTypeName('IPv6'), + defaultValueOfTypeName('Decimal128(3)'), + defaultValueOfTypeName('Tuple(Date, DateTime(\'UTC\'), Array(Array(String)), Nullable(UInt8))'); diff --git a/parser/testdata/01426_geohash_constants/ast.json b/parser/testdata/01426_geohash_constants/ast.json new file mode 100644 index 000000000..42da1ffe8 --- /dev/null +++ b/parser/testdata/01426_geohash_constants/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function geohashesInBox (children 1)" + }, + { + "explain": " ExpressionList (children 5)" + }, + { + "explain": " Literal Float64_1" + }, + { + "explain": " Literal Float64_2" + }, + { + "explain": " Literal Float64_3" + }, + { + "explain": " Literal Float64_4" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001346114, + "rows_read": 11, + "bytes_read": 391 + } +} diff --git a/parser/testdata/01426_geohash_constants/metadata.json b/parser/testdata/01426_geohash_constants/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01426_geohash_constants/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01426_geohash_constants/query.sql b/parser/testdata/01426_geohash_constants/query.sql new file mode 100644 index 000000000..4836ed6b2 --- /dev/null +++ b/parser/testdata/01426_geohash_constants/query.sql @@ -0,0 +1,6 @@ +SELECT geohashesInBox(1., 2., 3., 4., 1); +SELECT geohashesInBox(materialize(1.), 2., 3., 4., 2); +SELECT geohashesInBox(1., materialize(2.), 3., 4., 3); +SELECT geohashesInBox(1., 2., materialize(3.), 4., 1); +SELECT geohashesInBox(1., 2., 3., materialize(4.), 2); +SELECT geohashesInBox(1., 2., 3., 4., materialize(3)); diff --git a/parser/testdata/01427_pk_and_expression_with_different_type/ast.json b/parser/testdata/01427_pk_and_expression_with_different_type/ast.json new file mode 100644 index 000000000..70206bbaa --- /dev/null +++ b/parser/testdata/01427_pk_and_expression_with_different_type/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery pk (children 1)" + }, + { + "explain": " Identifier pk" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001603978, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/01427_pk_and_expression_with_different_type/metadata.json b/parser/testdata/01427_pk_and_expression_with_different_type/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01427_pk_and_expression_with_different_type/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01427_pk_and_expression_with_different_type/query.sql b/parser/testdata/01427_pk_and_expression_with_different_type/query.sql new file mode 100644 index 000000000..d905eac4c --- /dev/null +++ b/parser/testdata/01427_pk_and_expression_with_different_type/query.sql @@ -0,0 +1,4 @@ +DROP TABLE IF EXISTS pk; +CREATE TABLE pk (x DateTime) ENGINE = MergeTree ORDER BY toStartOfMinute(x) SETTINGS index_granularity = 1; +SELECT * FROM pk WHERE x >= toDateTime(120) AND x <= toDateTime(NULL); +DROP TABLE pk; diff --git a/parser/testdata/01428_h3_range_check/ast.json b/parser/testdata/01428_h3_range_check/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01428_h3_range_check/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01428_h3_range_check/metadata.json b/parser/testdata/01428_h3_range_check/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01428_h3_range_check/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01428_h3_range_check/query.sql b/parser/testdata/01428_h3_range_check/query.sql new file mode 100644 index 000000000..c60663898 --- /dev/null +++ b/parser/testdata/01428_h3_range_check/query.sql @@ -0,0 +1,4 @@ +-- Tags: no-fasttest + +SELECT h3ToChildren(599405990164561919, 100); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT h3ToParent(599405990164561919, 100); -- { serverError ARGUMENT_OUT_OF_BOUND } diff --git a/parser/testdata/01428_hash_set_nan_key/ast.json b/parser/testdata/01428_hash_set_nan_key/ast.json new file mode 100644 index 000000000..3de39e8ea --- /dev/null +++ b/parser/testdata/01428_hash_set_nan_key/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function uniqExact (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Float64_nan" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1000" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001561111, + "rows_read": 13, + "bytes_read": 520 + } +} diff --git a/parser/testdata/01428_hash_set_nan_key/metadata.json b/parser/testdata/01428_hash_set_nan_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01428_hash_set_nan_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01428_hash_set_nan_key/query.sql b/parser/testdata/01428_hash_set_nan_key/query.sql new file mode 100644 index 000000000..837cd56a4 --- /dev/null +++ b/parser/testdata/01428_hash_set_nan_key/query.sql @@ -0,0 +1,10 @@ +SELECT uniqExact(nan) FROM numbers(1000); +SELECT uniqExact(number + nan) FROM numbers(1000); +SELECT sumDistinct(number + nan) FROM numbers(1000); +SELECT DISTINCT number + nan FROM numbers(1000); + +SELECT topKWeightedMerge(1)(initializeAggregation('topKWeightedState(1)', nan, arrayJoin(range(10)))); + +select number + nan k from numbers(256) group by k; + +SELECT uniqExact(reinterpretAsFloat64(reinterpretAsFixedString(reinterpretAsUInt64(reinterpretAsFixedString(nan)) + number))) FROM numbers(10); diff --git a/parser/testdata/01428_nullable_asof_join/ast.json b/parser/testdata/01428_nullable_asof_join/ast.json new file mode 100644 index 000000000..714af8631 --- /dev/null +++ b/parser/testdata/01428_nullable_asof_join/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001158966, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01428_nullable_asof_join/metadata.json b/parser/testdata/01428_nullable_asof_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01428_nullable_asof_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01428_nullable_asof_join/query.sql b/parser/testdata/01428_nullable_asof_join/query.sql new file mode 100644 index 000000000..41f6ba8a0 --- /dev/null +++ b/parser/testdata/01428_nullable_asof_join/query.sql @@ -0,0 +1,134 @@ +SET join_use_nulls = 1; + +select 'left asof using'; + +SELECT a.pk, b.pk, a.dt, b.dt, toTypeName(a.pk), toTypeName(b.pk), toTypeName(materialize(a.dt)), toTypeName(materialize(b.dt)) +FROM (SELECT toUInt8(number) > 0 as pk, toUInt8(number) as dt FROM numbers(3)) a +ASOF LEFT JOIN (SELECT 1 as pk, 2 as dt) b +USING(pk, dt) +ORDER BY a.dt; + +SELECT a.pk, b.pk, a.dt, b.dt, toTypeName(a.pk), toTypeName(b.pk), toTypeName(materialize(a.dt)), toTypeName(materialize(b.dt)) +FROM (SELECT toUInt8(number) > 0 as pk, toNullable(toUInt8(number)) as dt FROM numbers(3)) a +ASOF LEFT JOIN (SELECT 1 as pk, 2 as dt) b +USING(pk, dt) +ORDER BY a.dt; + +SELECT a.pk, b.pk, a.dt, b.dt, toTypeName(a.pk), toTypeName(b.pk), toTypeName(materialize(a.dt)), toTypeName(materialize(b.dt)) +FROM (SELECT toUInt8(number) > 0 as pk, toUInt8(number) as dt FROM numbers(3)) a +ASOF LEFT JOIN (SELECT 1 as pk, toNullable(0) as dt) b +USING(pk, dt) +ORDER BY a.dt SETTINGS enable_analyzer = 0; + +SELECT a.pk, b.pk, a.dt, b.dt, toTypeName(a.pk), toTypeName(b.pk), toTypeName(materialize(a.dt)), toTypeName(materialize(b.dt)) +FROM (SELECT toUInt8(number) > 0 as pk, toUInt8(number) as dt FROM numbers(3)) a +ASOF LEFT JOIN (SELECT 1 as pk, toNullable(0) as dt) b +USING(pk, dt) +ORDER BY a.dt SETTINGS enable_analyzer = 1; + +SELECT a.pk, b.pk, a.dt, b.dt, toTypeName(a.pk), toTypeName(b.pk), toTypeName(materialize(a.dt)), toTypeName(materialize(b.dt)) +FROM (SELECT toUInt8(number) > 0 as pk, toNullable(toUInt8(number)) as dt FROM numbers(3)) a +ASOF LEFT JOIN (SELECT 1 as pk, toNullable(0) as dt) b +USING(pk, dt) +ORDER BY a.dt; + +select 'left asof on'; + +SELECT a.pk, b.pk, a.dt, b.dt, toTypeName(a.pk), toTypeName(b.pk), toTypeName(materialize(a.dt)), toTypeName(materialize(b.dt)) +FROM (SELECT toUInt8(number) > 0 as pk, toUInt8(number) as dt FROM numbers(3)) a +ASOF LEFT JOIN (SELECT 1 as pk, 2 as dt) b +ON a.pk = b.pk AND a.dt >= b.dt +ORDER BY a.dt; + +SELECT a.pk, b.pk, a.dt, b.dt, toTypeName(a.pk), toTypeName(b.pk), toTypeName(materialize(a.dt)), toTypeName(materialize(b.dt)) +FROM (SELECT toUInt8(number) > 0 as pk, toNullable(toUInt8(number)) as dt FROM numbers(3)) a +ASOF LEFT JOIN (SELECT 1 as pk, 2 as dt) b +ON a.pk = b.pk AND a.dt >= b.dt +ORDER BY a.dt; + +SELECT a.pk, b.pk, a.dt, b.dt, toTypeName(a.pk), toTypeName(b.pk), toTypeName(materialize(a.dt)), toTypeName(materialize(b.dt)) +FROM (SELECT toUInt8(number) > 0 as pk, toUInt8(number) as dt FROM numbers(3)) a +ASOF LEFT JOIN (SELECT 1 as pk, toNullable(0) as dt) b +ON a.pk = b.pk AND a.dt >= b.dt +ORDER BY a.dt; + +SELECT a.pk, b.pk, a.dt, b.dt, toTypeName(a.pk), toTypeName(b.pk), toTypeName(materialize(a.dt)), toTypeName(materialize(b.dt)) +FROM (SELECT toUInt8(number) > 0 as pk, toNullable(toUInt8(number)) as dt FROM numbers(3)) a +ASOF LEFT JOIN (SELECT 1 as pk, toNullable(0) as dt) b +ON a.dt >= b.dt AND a.pk = b.pk +ORDER BY a.dt; + +select 'asof using'; + +SELECT a.pk, b.pk, a.dt, b.dt, toTypeName(a.pk), toTypeName(b.pk), toTypeName(materialize(a.dt)), toTypeName(materialize(b.dt)) +FROM (SELECT toUInt8(number) > 0 as pk, toUInt8(number) as dt FROM numbers(3)) a +ASOF JOIN (SELECT 1 as pk, 2 as dt) b +USING(pk, dt) +ORDER BY a.dt; + +SELECT a.pk, b.pk, a.dt, b.dt, toTypeName(a.pk), toTypeName(b.pk), toTypeName(materialize(a.dt)), toTypeName(materialize(b.dt)) +FROM (SELECT toUInt8(number) > 0 as pk, toNullable(toUInt8(number)) as dt FROM numbers(3)) a +ASOF JOIN (SELECT 1 as pk, 2 as dt) b +USING(pk, dt) +ORDER BY a.dt SETTINGS enable_analyzer = 0; + +SELECT a.pk, b.pk, a.dt, b.dt, toTypeName(a.pk), toTypeName(b.pk), toTypeName(materialize(a.dt)), toTypeName(materialize(b.dt)) +FROM (SELECT toUInt8(number) > 0 as pk, toNullable(toUInt8(number)) as dt FROM numbers(3)) a +ASOF JOIN (SELECT 1 as pk, 2 as dt) b +USING(pk, dt) +ORDER BY a.dt SETTINGS enable_analyzer = 1; + +SELECT a.pk, b.pk, a.dt, b.dt, toTypeName(a.pk), toTypeName(b.pk), toTypeName(materialize(a.dt)), toTypeName(materialize(b.dt)) +FROM (SELECT toUInt8(number) > 0 as pk, toUInt8(number) as dt FROM numbers(3)) a +ASOF JOIN (SELECT 1 as pk, toNullable(0) as dt) b +USING(pk, dt) +ORDER BY a.dt SETTINGS enable_analyzer = 0; + +SELECT a.pk, b.pk, a.dt, b.dt, toTypeName(a.pk), toTypeName(b.pk), toTypeName(materialize(a.dt)), toTypeName(materialize(b.dt)) +FROM (SELECT toUInt8(number) > 0 as pk, toUInt8(number) as dt FROM numbers(3)) a +ASOF JOIN (SELECT 1 as pk, toNullable(0) as dt) b +USING(pk, dt) +ORDER BY a.dt SETTINGS enable_analyzer = 1; + +SELECT a.pk, b.pk, a.dt, b.dt, toTypeName(a.pk), toTypeName(b.pk), toTypeName(materialize(a.dt)), toTypeName(materialize(b.dt)) +FROM (SELECT toUInt8(number) > 0 as pk, toNullable(toUInt8(number)) as dt FROM numbers(3)) a +ASOF JOIN (SELECT 1 as pk, toNullable(0) as dt) b +USING(pk, dt) +ORDER BY a.dt; + +select 'asof on'; + +SELECT a.pk, b.pk, a.dt, b.dt, toTypeName(a.pk), toTypeName(b.pk), toTypeName(materialize(a.dt)), toTypeName(materialize(b.dt)) +FROM (SELECT toUInt8(number) > 0 as pk, toUInt8(number) as dt FROM numbers(3)) a +ASOF JOIN (SELECT 1 as pk, 2 as dt) b +ON a.pk = b.pk AND a.dt >= b.dt +ORDER BY a.dt; + +SELECT a.pk, b.pk, a.dt, b.dt, toTypeName(a.pk), toTypeName(b.pk), toTypeName(materialize(a.dt)), toTypeName(materialize(b.dt)) +FROM (SELECT toUInt8(number) > 0 as pk, toNullable(toUInt8(number)) as dt FROM numbers(3)) a +ASOF JOIN (SELECT 1 as pk, 2 as dt) b +ON a.pk = b.pk AND a.dt >= b.dt +ORDER BY a.dt; + +SELECT a.pk, b.pk, a.dt, b.dt, toTypeName(a.pk), toTypeName(b.pk), toTypeName(materialize(a.dt)), toTypeName(materialize(b.dt)) +FROM (SELECT toUInt8(number) > 0 as pk, toUInt8(number) as dt FROM numbers(3)) a +ASOF JOIN (SELECT 1 as pk, toNullable(0) as dt) b +ON a.pk = b.pk AND a.dt >= b.dt +ORDER BY a.dt; + +SELECT a.pk, b.pk, a.dt, b.dt, toTypeName(a.pk), toTypeName(b.pk), toTypeName(materialize(a.dt)), toTypeName(materialize(b.dt)) +FROM (SELECT toUInt8(number) > 0 as pk, toNullable(toUInt8(number)) as dt FROM numbers(3)) a +ASOF JOIN (SELECT 1 as pk, toNullable(0) as dt) b +ON a.pk = b.pk AND a.dt >= b.dt +ORDER BY a.dt; + +SELECT a.pk, b.pk, a.dt, b.dt, toTypeName(a.pk), toTypeName(b.pk), toTypeName(materialize(a.dt)), toTypeName(materialize(b.dt)) +FROM (SELECT toUInt8(number) > 0 as pk, toNullable(toUInt8(number)) as dt FROM numbers(3)) a +ASOF JOIN (SELECT 1 as pk, toNullable(0) as dt) b +ON a.dt >= b.dt AND a.pk = b.pk +ORDER BY a.dt; + +SELECT * +FROM (SELECT NULL AS y, 1 AS x, '2020-01-01 10:10:10' :: DateTime64 AS t) AS t1 +ASOF LEFT JOIN (SELECT NULL AS y, 1 AS x, '2020-01-01 10:10:10' :: DateTime64 AS t) AS t2 +ON t1.t <= t2.t AND t1.x == t2.x FORMAT Null; diff --git a/parser/testdata/01429_join_on_error_messages/ast.json b/parser/testdata/01429_join_on_error_messages/ast.json new file mode 100644 index 000000000..6dca18ae0 --- /dev/null +++ b/parser/testdata/01429_join_on_error_messages/ast.json @@ -0,0 +1,100 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " TablesInSelectQuery (children 2)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (alias A) (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1 (alias a)" + }, + { + "explain": " TablesInSelectQueryElement (children 2)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (alias B) (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1 (alias b)" + }, + { + "explain": " TableJoin (children 1)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier a" + } + ], + + "rows": 26, + + "statistics": + { + "elapsed": 0.001570879, + "rows_read": 26, + "bytes_read": 1085 + } +} diff --git a/parser/testdata/01429_join_on_error_messages/metadata.json b/parser/testdata/01429_join_on_error_messages/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01429_join_on_error_messages/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01429_join_on_error_messages/query.sql b/parser/testdata/01429_join_on_error_messages/query.sql new file mode 100644 index 000000000..66bcc7111 --- /dev/null +++ b/parser/testdata/01429_join_on_error_messages/query.sql @@ -0,0 +1,10 @@ +SELECT 1 FROM (select 1 a) A JOIN (select 1 b) B ON equals(a); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH, 62 } +SELECT 1 FROM (select 1 a) A JOIN (select 1 b) B ON less(a); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH, 62 } + +SET join_algorithm = 'partial_merge'; +SELECT 1 FROM (select 1 a) A JOIN (select 1 b, 1 c) B ON a = b OR a = c; -- { serverError NOT_IMPLEMENTED } +-- works for a = b OR a = b because of equivalent disjunct optimization + +SET join_algorithm = 'grace_hash'; +SELECT 1 FROM (select 1 a) A JOIN (select 1 b, 1 c) B ON a = b OR a = c; -- { serverError NOT_IMPLEMENTED } +-- works for a = b OR a = b because of equivalent disjunct optimization diff --git a/parser/testdata/01430_fix_any_rewrite_aliases/ast.json b/parser/testdata/01430_fix_any_rewrite_aliases/ast.json new file mode 100644 index 000000000..1d500ea30 --- /dev/null +++ b/parser/testdata/01430_fix_any_rewrite_aliases/ast.json @@ -0,0 +1,94 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function any (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function if (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function if (alias a_) (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Identifier a_" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1 (alias x)" + } + ], + + "rows": 24, + + "statistics": + { + "elapsed": 0.00160022, + "rows_read": 24, + "bytes_read": 952 + } +} diff --git a/parser/testdata/01430_fix_any_rewrite_aliases/metadata.json b/parser/testdata/01430_fix_any_rewrite_aliases/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01430_fix_any_rewrite_aliases/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01430_fix_any_rewrite_aliases/query.sql b/parser/testdata/01430_fix_any_rewrite_aliases/query.sql new file mode 100644 index 000000000..0a5a94ba7 --- /dev/null +++ b/parser/testdata/01430_fix_any_rewrite_aliases/query.sql @@ -0,0 +1 @@ +SELECT any(if(if(x, 1, 2) AS a_, a_, 0)) FROM (SELECT 1 AS x); diff --git a/parser/testdata/01430_modify_sample_by_zookeeper_long/ast.json b/parser/testdata/01430_modify_sample_by_zookeeper_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01430_modify_sample_by_zookeeper_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01430_modify_sample_by_zookeeper_long/metadata.json b/parser/testdata/01430_modify_sample_by_zookeeper_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01430_modify_sample_by_zookeeper_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01430_modify_sample_by_zookeeper_long/query.sql b/parser/testdata/01430_modify_sample_by_zookeeper_long/query.sql new file mode 100644 index 000000000..b0e51f5df --- /dev/null +++ b/parser/testdata/01430_modify_sample_by_zookeeper_long/query.sql @@ -0,0 +1,49 @@ +-- Tags: long, zookeeper + +DROP TABLE IF EXISTS modify_sample; + +SET min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0; +SET max_block_size = 10; + +CREATE TABLE modify_sample (d Date DEFAULT '2000-01-01', x UInt8) ENGINE = MergeTree PARTITION BY d ORDER BY x; +INSERT INTO modify_sample (x) SELECT toUInt8(number) AS x FROM system.numbers LIMIT 256; + +SELECT count(), min(x), max(x), sum(x), uniqExact(x) FROM modify_sample SAMPLE 0.1; -- { serverError SAMPLING_NOT_SUPPORTED } + +ALTER TABLE modify_sample MODIFY SAMPLE BY x; +SELECT count(), min(x), max(x), sum(x), uniqExact(x) FROM modify_sample SAMPLE 0.1; + +CREATE TABLE modify_sample_replicated (d Date DEFAULT '2000-01-01', x UInt8, y UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_01430', 'modify_sample') PARTITION BY d ORDER BY (x, y); + +INSERT INTO modify_sample_replicated (x, y) SELECT toUInt8(number) AS x, toUInt64(number) as y FROM system.numbers LIMIT 256; + +SELECT count(), min(x), max(x), sum(x), uniqExact(x) FROM modify_sample_replicated SAMPLE 0.1; -- { serverError SAMPLING_NOT_SUPPORTED } + +ALTER TABLE modify_sample_replicated MODIFY SAMPLE BY x; +SELECT count(), min(x), max(x), sum(x), uniqExact(x) FROM modify_sample_replicated SAMPLE 0.1; + +DETACH TABLE modify_sample_replicated; +ATTACH TABLE modify_sample_replicated; + +SELECT count(), min(x), max(x), sum(x), uniqExact(x) FROM modify_sample_replicated SAMPLE 0.1; + +ALTER TABLE modify_sample_replicated MODIFY SAMPLE BY d; -- { serverError BAD_ARGUMENTS } +ALTER TABLE modify_sample_replicated MODIFY SAMPLE BY y; + +SELECT count(), min(y), max(y), sum(y), uniqExact(y) FROM modify_sample_replicated SAMPLE 0.1; + +DETACH TABLE modify_sample_replicated; +ATTACH TABLE modify_sample_replicated; + +SELECT count(), min(y), max(y), sum(y), uniqExact(y) FROM modify_sample_replicated SAMPLE 0.1; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE modify_sample_old (d Date DEFAULT '2000-01-01', x UInt8, y UInt64) ENGINE = MergeTree(d, (x, y), 8192); + +ALTER TABLE modify_sample_old MODIFY SAMPLE BY x; -- { serverError BAD_ARGUMENTS } + +DROP TABLE modify_sample; + +DROP TABLE modify_sample_replicated; + +DROP TABLE modify_sample_old; diff --git a/parser/testdata/01430_moving_sum_empty_state/ast.json b/parser/testdata/01430_moving_sum_empty_state/ast.json new file mode 100644 index 000000000..7fc159812 --- /dev/null +++ b/parser/testdata/01430_moving_sum_empty_state/ast.json @@ -0,0 +1,76 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function groupArrayMovingSum (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function remote (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '127.0.0.{1,2}'" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_0" + } + ], + + "rows": 18, + + "statistics": + { + "elapsed": 0.001599723, + "rows_read": 18, + "bytes_read": 724 + } +} diff --git a/parser/testdata/01430_moving_sum_empty_state/metadata.json b/parser/testdata/01430_moving_sum_empty_state/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01430_moving_sum_empty_state/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01430_moving_sum_empty_state/query.sql b/parser/testdata/01430_moving_sum_empty_state/query.sql new file mode 100644 index 000000000..fb2356fe9 --- /dev/null +++ b/parser/testdata/01430_moving_sum_empty_state/query.sql @@ -0,0 +1 @@ +SELECT groupArrayMovingSum(10)(0) FROM remote('127.0.0.{1,2}', numbers(0)) diff --git a/parser/testdata/01431_finish_sorting_with_consts/ast.json b/parser/testdata/01431_finish_sorting_with_consts/ast.json new file mode 100644 index 000000000..09635d0ee --- /dev/null +++ b/parser/testdata/01431_finish_sorting_with_consts/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery pk_func (children 1)" + }, + { + "explain": " Identifier pk_func" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001682912, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/01431_finish_sorting_with_consts/metadata.json b/parser/testdata/01431_finish_sorting_with_consts/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01431_finish_sorting_with_consts/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01431_finish_sorting_with_consts/query.sql b/parser/testdata/01431_finish_sorting_with_consts/query.sql new file mode 100644 index 000000000..8071f4f6c --- /dev/null +++ b/parser/testdata/01431_finish_sorting_with_consts/query.sql @@ -0,0 +1,7 @@ +DROP TABLE IF EXISTS pk_func; + +CREATE TABLE pk_func (`d` DateTime, `ui` UInt32 ) ENGINE = MergeTree ORDER BY toDate(d); +INSERT INTO pk_func SELECT '2020-05-05 01:00:00', number FROM numbers(1000); +SELECT 1, * FROM pk_func ORDER BY toDate(d) ASC, ui ASC LIMIT 3; + +DROP TABLE IF EXISTS pk_func; diff --git a/parser/testdata/01431_utf8_ubsan/ast.json b/parser/testdata/01431_utf8_ubsan/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01431_utf8_ubsan/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01431_utf8_ubsan/metadata.json b/parser/testdata/01431_utf8_ubsan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01431_utf8_ubsan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01431_utf8_ubsan/query.sql b/parser/testdata/01431_utf8_ubsan/query.sql new file mode 100644 index 000000000..3a28e0238 --- /dev/null +++ b/parser/testdata/01431_utf8_ubsan/query.sql @@ -0,0 +1,5 @@ +-- Tags: no-fasttest +-- no-fasttest: upper/lowerUTF8 use ICU + +SELECT hex(lowerUTF8('\xFF')); +SELECT hex(upperUTF8('\xFF')); diff --git a/parser/testdata/01432_parse_date_time_best_effort_timestamp/ast.json b/parser/testdata/01432_parse_date_time_best_effort_timestamp/ast.json new file mode 100644 index 000000000..c87d13ad8 --- /dev/null +++ b/parser/testdata/01432_parse_date_time_best_effort_timestamp/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function parseDateTimeBestEffort (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '1596752940'" + }, + { + "explain": " Literal 'Asia\/Istanbul'" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001452853, + "rows_read": 8, + "bytes_read": 317 + } +} diff --git a/parser/testdata/01432_parse_date_time_best_effort_timestamp/metadata.json b/parser/testdata/01432_parse_date_time_best_effort_timestamp/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01432_parse_date_time_best_effort_timestamp/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01432_parse_date_time_best_effort_timestamp/query.sql b/parser/testdata/01432_parse_date_time_best_effort_timestamp/query.sql new file mode 100644 index 000000000..58759c858 --- /dev/null +++ b/parser/testdata/01432_parse_date_time_best_effort_timestamp/query.sql @@ -0,0 +1,3 @@ +SELECT parseDateTimeBestEffort('1596752940', 'Asia/Istanbul'); +SELECT parseDateTimeBestEffort('100000000', 'Asia/Istanbul'); +SELECT parseDateTimeBestEffort('20200807', 'Asia/Istanbul'); diff --git a/parser/testdata/01433_hex_float/ast.json b/parser/testdata/01433_hex_float/ast.json new file mode 100644 index 000000000..4026fb1f4 --- /dev/null +++ b/parser/testdata/01433_hex_float/ast.json @@ -0,0 +1,40 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery t (children 2)" + }, + { + "explain": " Identifier t" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration x (children 1)" + }, + { + "explain": " DataType Float64" + } + ], + + "rows": 6, + + "statistics": + { + "elapsed": 0.001249077, + "rows_read": 6, + "bytes_read": 203 + } +} diff --git a/parser/testdata/01433_hex_float/metadata.json b/parser/testdata/01433_hex_float/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01433_hex_float/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01433_hex_float/query.sql b/parser/testdata/01433_hex_float/query.sql new file mode 100644 index 000000000..124a1ab07 --- /dev/null +++ b/parser/testdata/01433_hex_float/query.sql @@ -0,0 +1,4 @@ +CREATE TEMPORARY TABLE t (x Float64); +INSERT INTO t VALUES (0x1.f7ced916872b0p-4); +SELECT * FROM t; +SELECT x = 0x1.f7ced916872b0p-4 FROM t; diff --git a/parser/testdata/01434_netloc_fuzz/ast.json b/parser/testdata/01434_netloc_fuzz/ast.json new file mode 100644 index 000000000..3669d67aa --- /dev/null +++ b/parser/testdata/01434_netloc_fuzz/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function netloc (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '<\\'[%UzO'" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001507661, + "rows_read": 7, + "bytes_read": 261 + } +} diff --git a/parser/testdata/01434_netloc_fuzz/metadata.json b/parser/testdata/01434_netloc_fuzz/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01434_netloc_fuzz/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01434_netloc_fuzz/query.sql b/parser/testdata/01434_netloc_fuzz/query.sql new file mode 100644 index 000000000..a409add31 --- /dev/null +++ b/parser/testdata/01434_netloc_fuzz/query.sql @@ -0,0 +1 @@ +SELECT netloc('<\'[%UzO'); diff --git a/parser/testdata/01435_lcm_overflow/ast.json b/parser/testdata/01435_lcm_overflow/ast.json new file mode 100644 index 000000000..fec51bae3 --- /dev/null +++ b/parser/testdata/01435_lcm_overflow/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function lcm (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_15" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001631819, + "rows_read": 8, + "bytes_read": 288 + } +} diff --git a/parser/testdata/01435_lcm_overflow/metadata.json b/parser/testdata/01435_lcm_overflow/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01435_lcm_overflow/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01435_lcm_overflow/query.sql b/parser/testdata/01435_lcm_overflow/query.sql new file mode 100644 index 000000000..0b02a0c6b --- /dev/null +++ b/parser/testdata/01435_lcm_overflow/query.sql @@ -0,0 +1,10 @@ +SELECT lcm(15, 10); +SELECT lcm(-15, 10); +SELECT lcm(15, -10); +SELECT lcm(-15, -10); + +-- Implementation specific result on overflow: +SELECT ignore(lcm(256, 9223372036854775807)); +SELECT ignore(lcm(256, -9223372036854775807)); +SELECT ignore(lcm(-256, 9223372036854775807)); -- { serverError DECIMAL_OVERFLOW } +SELECT ignore(lcm(-256, -9223372036854775807)); diff --git a/parser/testdata/01436_storage_merge_with_join_push_down/ast.json b/parser/testdata/01436_storage_merge_with_join_push_down/ast.json new file mode 100644 index 000000000..ef81b8751 --- /dev/null +++ b/parser/testdata/01436_storage_merge_with_join_push_down/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test1 (children 1)" + }, + { + "explain": " Identifier test1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001188645, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/01436_storage_merge_with_join_push_down/metadata.json b/parser/testdata/01436_storage_merge_with_join_push_down/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01436_storage_merge_with_join_push_down/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01436_storage_merge_with_join_push_down/query.sql b/parser/testdata/01436_storage_merge_with_join_push_down/query.sql new file mode 100644 index 000000000..a3c598c6d --- /dev/null +++ b/parser/testdata/01436_storage_merge_with_join_push_down/query.sql @@ -0,0 +1,30 @@ +DROP TABLE IF EXISTS test1; +DROP TABLE IF EXISTS test1_distributed; +DROP TABLE IF EXISTS test_merge; + +SET enable_optimize_predicate_expression = 1; + +CREATE TABLE test1 (id Int64, name String) ENGINE MergeTree PARTITION BY (id) ORDER BY (id); +CREATE TABLE test1_distributed AS test1 ENGINE = Distributed(test_cluster_two_shards_localhost, default, test1); +CREATE TABLE test_merge AS test1 ENGINE = Merge('default', 'test1_distributed'); + +SELECT count() FROM test_merge +JOIN (SELECT 'anystring' AS name) AS n +USING name +WHERE id = 1; + +DROP TABLE test1; +DROP TABLE test_merge; + + +CREATE TABLE test1 (id Int64, name String) ENGINE MergeTree PARTITION BY (id) ORDER BY (id); +CREATE TABLE test_merge AS test1 ENGINE = Merge('default', 'test1'); + +SELECT count() FROM test_merge +JOIN (SELECT 'anystring' AS name) AS n +USING name +WHERE id = 1; + +DROP TABLE test1; +DROP TABLE test_merge; +DROP TABLE test1_distributed; diff --git a/parser/testdata/01440_big_int_arithm/ast.json b/parser/testdata/01440_big_int_arithm/ast.json new file mode 100644 index 000000000..26d2baa3c --- /dev/null +++ b/parser/testdata/01440_big_int_arithm/ast.json @@ -0,0 +1,100 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Function plus (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toInt128 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Int64_-1" + }, + { + "explain": " Function toInt8 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function plus (alias y) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toInt256 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Int64_-1" + }, + { + "explain": " Function toInt8 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier y" + } + ], + + "rows": 26, + + "statistics": + { + "elapsed": 0.001694493, + "rows_read": 26, + "bytes_read": 1013 + } +} diff --git a/parser/testdata/01440_big_int_arithm/metadata.json b/parser/testdata/01440_big_int_arithm/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01440_big_int_arithm/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01440_big_int_arithm/query.sql b/parser/testdata/01440_big_int_arithm/query.sql new file mode 100644 index 000000000..3eec2b3cc --- /dev/null +++ b/parser/testdata/01440_big_int_arithm/query.sql @@ -0,0 +1,73 @@ +select (toInt128(-1) + toInt8(1)) x, (toInt256(-1) + toInt8(1)) y, toTypeName(x), toTypeName(y); +select (toInt128(-1) + toInt16(1)) x, (toInt256(-1) + toInt16(1)) y, toTypeName(x), toTypeName(y); +select (toInt128(-1) + toInt32(1)) x, (toInt256(-1) + toInt32(1)) y, toTypeName(x), toTypeName(y); +select (toInt128(-1) + toInt64(1)) x, (toInt256(-1) + toInt64(1)) y, toTypeName(x), toTypeName(y); +select (toInt128(-1) + toUInt8(1)) x, (toInt256(-1) + toUInt8(1)) y, toTypeName(x), toTypeName(y); +select (toInt128(-1) + toUInt16(1)) x, (toInt256(-1) + toUInt16(1)) y, toTypeName(x), toTypeName(y); +select (toInt128(-1) + toUInt32(1)) x, (toInt256(-1) + toUInt32(1)) y, toTypeName(x), toTypeName(y); +select (toInt128(-1) + toUInt64(1)) x, (toInt256(-1) + toUInt64(1)) y, toTypeName(x), toTypeName(y); + +select (toInt128(-1) + toInt128(1)) x, (toInt256(-1) + toInt128(1)) y, toTypeName(x), toTypeName(y); +select (toInt128(-1) + toInt256(1)) x, (toInt256(-1) + toInt256(1)) y, toTypeName(x), toTypeName(y); +--select (toInt128(-1) + toUInt128(1)) x, (toInt256(-1) + toUInt128(1)) y, toTypeName(x), toTypeName(y); +select (toInt128(-1) + toUInt256(1)) x, (toInt256(-1) + toUInt256(1)) y, toTypeName(x), toTypeName(y); + + +select (toInt128(-1) - toInt8(1)) x, (toInt256(-1) - toInt8(1)) y, toTypeName(x), toTypeName(y); +select (toInt128(-1) - toInt16(1)) x, (toInt256(-1) - toInt16(1)) y, toTypeName(x), toTypeName(y); +select (toInt128(-1) - toInt32(1)) x, (toInt256(-1) - toInt32(1)) y, toTypeName(x), toTypeName(y); +select (toInt128(-1) - toInt64(1)) x, (toInt256(-1) - toInt64(1)) y, toTypeName(x), toTypeName(y); +select (toInt128(-1) - toUInt8(1)) x, (toInt256(-1) - toUInt8(1)) y, toTypeName(x), toTypeName(y); +select (toInt128(-1) - toUInt16(1)) x, (toInt256(-1) - toUInt16(1)) y, toTypeName(x), toTypeName(y); +select (toInt128(-1) - toUInt32(1)) x, (toInt256(-1) - toUInt32(1)) y, toTypeName(x), toTypeName(y); +select (toInt128(-1) - toUInt64(1)) x, (toInt256(-1) - toUInt64(1)) y, toTypeName(x), toTypeName(y); + +select (toInt128(-1) - toInt128(1)) x, (toInt256(-1) - toInt128(1)) y, toTypeName(x), toTypeName(y); +select (toInt128(-1) - toInt256(1)) x, (toInt256(-1) - toInt256(1)) y, toTypeName(x), toTypeName(y); +--select (toInt128(-1) - toUInt128(1)) x, (toInt256(-1) - toUInt128(1)) y, toTypeName(x), toTypeName(y); +select (toInt128(-1) - toUInt256(1)) x, (toInt256(-1) - toUInt256(1)) y, toTypeName(x), toTypeName(y); + + +select (toInt128(-1) * toInt8(1)) x, (toInt256(-1) * toInt8(1)) y, toTypeName(x), toTypeName(y); +select (toInt128(-1) * toInt16(1)) x, (toInt256(-1) * toInt16(1)) y, toTypeName(x), toTypeName(y); +select (toInt128(-1) * toInt32(1)) x, (toInt256(-1) * toInt32(1)) y, toTypeName(x), toTypeName(y); +select (toInt128(-1) * toInt64(1)) x, (toInt256(-1) * toInt64(1)) y, toTypeName(x), toTypeName(y); +select (toInt128(-1) * toUInt8(1)) x, (toInt256(-1) * toUInt8(1)) y, toTypeName(x), toTypeName(y); +select (toInt128(-1) * toUInt16(1)) x, (toInt256(-1) * toUInt16(1)) y, toTypeName(x), toTypeName(y); +select (toInt128(-1) * toUInt32(1)) x, (toInt256(-1) * toUInt32(1)) y, toTypeName(x), toTypeName(y); +select (toInt128(-1) * toUInt64(1)) x, (toInt256(-1) * toUInt64(1)) y, toTypeName(x), toTypeName(y); + +select (toInt128(-1) * toInt128(1)) x, (toInt256(-1) * toInt128(1)) y, toTypeName(x), toTypeName(y); +select (toInt128(-1) * toInt256(1)) x, (toInt256(-1) * toInt256(1)) y, toTypeName(x), toTypeName(y); +--select (toInt128(-1) * toUInt128(1)) x, (toInt256(-1) * toUInt128(1)) y, toTypeName(x), toTypeName(y); +select (toInt128(-1) * toUInt256(1)) x, (toInt256(-1) * toUInt256(1)) y, toTypeName(x), toTypeName(y); + + +select intDiv(toInt128(-1), toInt8(-1)) x, intDiv(toInt256(-1), toInt8(-1)) y, toTypeName(x), toTypeName(y); +select intDiv(toInt128(-1), toInt16(-1)) x, intDiv(toInt256(-1), toInt16(-1)) y, toTypeName(x), toTypeName(y); +select intDiv(toInt128(-1), toInt32(-1)) x, intDiv(toInt256(-1), toInt32(-1)) y, toTypeName(x), toTypeName(y); +select intDiv(toInt128(-1), toInt64(-1)) x, intDiv(toInt256(-1), toInt64(-1)) y, toTypeName(x), toTypeName(y); +select intDiv(toInt128(-1), toUInt8(1)) x, intDiv(toInt256(-1), toUInt8(1)) y, toTypeName(x), toTypeName(y); +select intDiv(toInt128(-1), toUInt16(1)) x, intDiv(toInt256(-1), toUInt16(1)) y, toTypeName(x), toTypeName(y); +select intDiv(toInt128(-1), toUInt32(1)) x, intDiv(toInt256(-1), toUInt32(1)) y, toTypeName(x), toTypeName(y); +select intDiv(toInt128(-1), toUInt64(1)) x, intDiv(toInt256(-1), toUInt64(1)) y, toTypeName(x), toTypeName(y); + +select intDiv(toInt128(-1), toInt128(-1)) x, intDiv(toInt256(-1), toInt128(-1)) y, toTypeName(x), toTypeName(y); +select intDiv(toInt128(-1), toInt256(-1)) x, intDiv(toInt256(-1), toInt256(-1)) y, toTypeName(x), toTypeName(y); +--select intDiv(toInt128(-1), toUInt128(1)) x, intDiv(toInt256(-1), toUInt128(1)) y, toTypeName(x), toTypeName(y); +select intDiv(toInt128(-1), toUInt256(1)) x, intDiv(toInt256(-1), toUInt256(1)) y, toTypeName(x), toTypeName(y); + + +select (toInt128(-1) / toInt8(-1)) x, (toInt256(-1) / toInt8(-1)) y, toTypeName(x), toTypeName(y); +select (toInt128(-1) / toInt16(-1)) x, (toInt256(-1) / toInt16(-1)) y, toTypeName(x), toTypeName(y); +select (toInt128(-1) / toInt32(-1)) x, (toInt256(-1) / toInt32(-1)) y, toTypeName(x), toTypeName(y); +select (toInt128(-1) / toInt64(-1)) x, (toInt256(-1) / toInt64(-1)) y, toTypeName(x), toTypeName(y); +select (toInt128(-1) / toUInt8(1)) x, (toInt256(-1) / toUInt8(1)) y, toTypeName(x), toTypeName(y); +select (toInt128(-1) / toUInt16(1)) x, (toInt256(-1) / toUInt16(1)) y, toTypeName(x), toTypeName(y); +select (toInt128(-1) / toUInt32(1)) x, (toInt256(-1) / toUInt32(1)) y, toTypeName(x), toTypeName(y); +select (toInt128(-1) / toUInt64(1)) x, (toInt256(-1) / toUInt64(1)) y, toTypeName(x), toTypeName(y); + +select (toInt128(-1) / toInt128(-1)) x, (toInt256(-1) / toInt128(-1)) y, toTypeName(x), toTypeName(y); +select (toInt128(-1) / toInt256(-1)) x, (toInt256(-1) / toInt256(-1)) y, toTypeName(x), toTypeName(y); +--select (toInt128(-1) / toUInt128(1)) x, (toInt256(-1) / toUInt128(1)) y, toTypeName(x), toTypeName(y); +select (toInt128(-1) / toUInt256(1)) x, (toInt256(-1) / toUInt256(1)) y, toTypeName(x), toTypeName(y); diff --git a/parser/testdata/01440_big_int_exotic_casts/ast.json b/parser/testdata/01440_big_int_exotic_casts/ast.json new file mode 100644 index 000000000..f2cb8ee13 --- /dev/null +++ b/parser/testdata/01440_big_int_exotic_casts/ast.json @@ -0,0 +1,136 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 5)" + }, + { + "explain": " Function multiply (alias y) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toUInt32 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function multiply (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function toDecimal32 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier y" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function toDecimal64 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier y" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function toDecimal128 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier y" + }, + { + "explain": " Literal UInt64_6" + }, + { + "explain": " Function toDecimal256 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier y" + }, + { + "explain": " Literal UInt64_7" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers_mt (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier number" + } + ], + + "rows": 38, + + "statistics": + { + "elapsed": 0.002426246, + "rows_read": 38, + "bytes_read": 1447 + } +} diff --git a/parser/testdata/01440_big_int_exotic_casts/metadata.json b/parser/testdata/01440_big_int_exotic_casts/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01440_big_int_exotic_casts/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01440_big_int_exotic_casts/query.sql b/parser/testdata/01440_big_int_exotic_casts/query.sql new file mode 100644 index 000000000..f411af897 --- /dev/null +++ b/parser/testdata/01440_big_int_exotic_casts/query.sql @@ -0,0 +1,46 @@ +SELECT toUInt32(number * number) * number y, toDecimal32(y, 1), toDecimal64(y, 2), toDecimal128(y, 6), toDecimal256(y, 7) FROM numbers_mt(10) ORDER BY number; +SELECT toUInt64(number * number) * number y, toDecimal32(y, 1), toDecimal64(y, 2), toDecimal128(y, 6), toDecimal256(y, 7) FROM numbers_mt(10) ORDER BY number; +SELECT toUInt256(number * number) * number y, toDecimal32(y, 1), toDecimal64(y, 2), toDecimal128(y, 6), toDecimal256(y, 7) FROM numbers_mt(10) ORDER BY number; +SELECT toInt32(number * number) * number y, toDecimal32(y, 1), toDecimal64(y, 2), toDecimal128(y, 6), toDecimal256(y, 7) FROM numbers_mt(10) ORDER BY number; +SELECT toInt64(number * number) * number y, toDecimal32(y, 1), toDecimal64(y, 2), toDecimal128(y, 6), toDecimal256(y, 7) FROM numbers_mt(10) ORDER BY number; +SELECT toInt128(number * number) * number y, toDecimal32(y, 1), toDecimal64(y, 2), toDecimal128(y, 6), toDecimal256(y, 7) FROM numbers_mt(10) ORDER BY number; +SELECT toInt256(number * number) * number y, toDecimal32(y, 1), toDecimal64(y, 2), toDecimal128(y, 6), toDecimal256(y, 7) FROM numbers_mt(10) ORDER BY number; +SELECT toFloat32(number * number) * number y, toDecimal32(y, 1), toDecimal64(y, 2), toDecimal128(y, 6), toDecimal256(y, 7) FROM numbers_mt(10) ORDER BY number; +SELECT toFloat64(number * number) * number y, toDecimal32(y, 1), toDecimal64(y, 2), toDecimal128(y, 6), toDecimal256(y, 7) FROM numbers_mt(10) ORDER BY number; + +SELECT toUInt32(number * number) * -1 y, toDecimal32(y, 1), toDecimal64(y, 2), toDecimal128(y, 6), toDecimal256(y, 7) FROM numbers_mt(10) ORDER BY number; +SELECT toUInt64(number * number) * -1 y, toDecimal32(y, 1), toDecimal64(y, 2), toDecimal128(y, 6), toDecimal256(y, 7) FROM numbers_mt(10) ORDER BY number; +SELECT toUInt256(number * number) * -1 y, toDecimal32(y, 1), toDecimal64(y, 2), toDecimal128(y, 6), toDecimal256(y, 7) FROM numbers_mt(10) ORDER BY number; +SELECT toInt32(number * number) * -1 y, toDecimal32(y, 1), toDecimal64(y, 2), toDecimal128(y, 6), toDecimal256(y, 7) FROM numbers_mt(10) ORDER BY number; +SELECT toInt64(number * number) * -1 y, toDecimal32(y, 1), toDecimal64(y, 2), toDecimal128(y, 6), toDecimal256(y, 7) FROM numbers_mt(10) ORDER BY number; +SELECT toInt128(number * number) * -1 y, toDecimal32(y, 1), toDecimal64(y, 2), toDecimal128(y, 6), toDecimal256(y, 7) FROM numbers_mt(10) ORDER BY number; +SELECT toInt256(number * number) * -1 y, toDecimal32(y, 1), toDecimal64(y, 2), toDecimal128(y, 6), toDecimal256(y, 7) FROM numbers_mt(10) ORDER BY number; +SELECT toFloat32(number * number) * -1 y, toDecimal32(y, 1), toDecimal64(y, 2), toDecimal128(y, 6), toDecimal256(y, 7) FROM numbers_mt(10) ORDER BY number; +SELECT toFloat64(number * number) * -1 y, toDecimal32(y, 1), toDecimal64(y, 2), toDecimal128(y, 6), toDecimal256(y, 7) FROM numbers_mt(10) ORDER BY number; + +SELECT toUInt32(number * -1) * number y, toInt128(y), toInt256(y), toUInt256(y) FROM numbers_mt(10) ORDER BY number; +SELECT toUInt64(number * -1) * number y, toInt128(y), toInt256(y), toUInt256(y) FROM numbers_mt(10) ORDER BY number; +SELECT toUInt256(number * -1) * number y, toInt128(y), toInt256(y), toUInt256(y) FROM numbers_mt(10) ORDER BY number; +SELECT toInt32(number * -1) * number y, toInt128(y), toInt256(y), toUInt256(y) FROM numbers_mt(10) ORDER BY number; +SELECT toInt64(number * -1) * number y, toInt128(y), toInt256(y), toUInt256(y) FROM numbers_mt(10) ORDER BY number; +SELECT toInt128(number * -1) * number y, toInt128(y), toInt256(y), toUInt256(y) FROM numbers_mt(10) ORDER BY number; +SELECT toInt256(number * -1) * number y, toInt128(y), toInt256(y), toUInt256(y) FROM numbers_mt(10) ORDER BY number; +SELECT toFloat32(number * -1) * number y, toInt128(y), toInt256(y), toUInt256(y) FROM numbers_mt(10) ORDER BY number; +SELECT toFloat64(number * -1) * number y, toInt128(y), toInt256(y), toUInt256(y) FROM numbers_mt(10) ORDER BY number; + +SELECT number y, toInt128(number) - y, toInt256(number) - y, toUInt256(number) - y FROM numbers_mt(10) ORDER BY number; +SELECT -number y, toInt128(number) + y, toInt256(number) + y, toUInt256(number) + y FROM numbers_mt(10) ORDER BY number; + + +DROP TABLE IF EXISTS t; +CREATE TABLE t (x UInt64, i256 Int256, u256 UInt256, d256 Decimal256(2)) ENGINE = Memory; + +INSERT INTO t SELECT number * number * number AS x, x AS i256, x AS u256, x AS d256 FROM numbers(10000); + +SELECT sum(x), sum(i256), sum(u256), sum(d256) FROM t; + +INSERT INTO t SELECT -number * number * number AS x, x AS i256, x AS u256, x AS d256 FROM numbers(10000); + +SELECT sum(x), sum(i256), sum(u256), sum(d256) FROM t; + +DROP TABLE t; diff --git a/parser/testdata/01440_big_int_least_greatest/ast.json b/parser/testdata/01440_big_int_least_greatest/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01440_big_int_least_greatest/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01440_big_int_least_greatest/metadata.json b/parser/testdata/01440_big_int_least_greatest/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01440_big_int_least_greatest/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01440_big_int_least_greatest/query.sql b/parser/testdata/01440_big_int_least_greatest/query.sql new file mode 100644 index 000000000..e7153842c --- /dev/null +++ b/parser/testdata/01440_big_int_least_greatest/query.sql @@ -0,0 +1,34 @@ +SELECT least(toInt8(127), toInt128(0)) x, least(toInt8(127), toInt128(128)) x2, + least(toInt8(-128), toInt128(0)) x3, least(toInt8(-128), toInt128(-129)) x4, + greatest(toInt8(127), toInt128(0)) y, greatest(toInt8(127), toInt128(128)) y2, + greatest(toInt8(-128), toInt128(0)) y3, greatest(toInt8(-128), toInt128(-129)) y4, + toTypeName(x), toTypeName(y); + +SELECT least(toInt8(127), toInt256(0)) x, least(toInt8(127), toInt256(128)) x2, + least(toInt8(-128), toInt256(0)) x3, least(toInt8(-128), toInt256(-129)) x4, + greatest(toInt8(127), toInt256(0)) y, greatest(toInt8(127), toInt256(128)) y2, + greatest(toInt8(-128), toInt256(0)) y3, greatest(toInt8(-128), toInt256(-129)) y4, + toTypeName(x), toTypeName(y); + +SELECT least(toInt64(9223372036854775807), toInt128(0)) x, least(toInt64(9223372036854775807), toInt128('9223372036854775808')) x2, + least(toInt64(-9223372036854775808), toInt128(0)) x3, least(toInt64(-9223372036854775808), toInt128('-9223372036854775809')) x4, + greatest(toInt64(9223372036854775807), toInt128(0)) y, greatest(toInt64(9223372036854775807), toInt128('9223372036854775808')) y2, + greatest(toInt64(-9223372036854775808), toInt128(0)) y3, greatest(toInt64(-9223372036854775808), toInt128('-9223372036854775809')) y4, + toTypeName(x), toTypeName(y); + +SELECT least(toInt64(9223372036854775807), toInt256(0)) x, least(toInt64(9223372036854775807), toInt256('9223372036854775808')) x2, + least(toInt64(-9223372036854775808), toInt256(0)) x3, least(toInt64(-9223372036854775808), toInt256('-9223372036854775809')) x4, + greatest(toInt64(9223372036854775807), toInt256(0)) y, greatest(toInt64(9223372036854775807), toInt256('9223372036854775808')) y2, + greatest(toInt64(-9223372036854775808), toInt256(0)) y3, greatest(toInt64(-9223372036854775808), toInt256('-9223372036854775809')) y4, + toTypeName(x), toTypeName(y); + +SELECT least(toUInt8(255), toUInt256(0)) x, least(toUInt8(255), toUInt256(256)) x2, + greatest(toUInt8(255), toUInt256(0)) y, greatest(toUInt8(255), toUInt256(256)) y2, + toTypeName(x), toTypeName(y); + +SELECT least(toUInt64('18446744073709551615'), toUInt256(0)) x, least(toUInt64('18446744073709551615'), toUInt256('18446744073709551616')) x2, + greatest(toUInt64('18446744073709551615'), toUInt256(0)) y, greatest(toUInt64('18446744073709551615'), toUInt256('18446744073709551616')) y2, + toTypeName(x), toTypeName(y); + +SELECT least(toUInt32(0), toInt256(0)), greatest(toInt32(0), toUInt256(0)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT least(toInt32(0), toUInt256(0)), greatest(toInt32(0), toUInt256(0)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } diff --git a/parser/testdata/01440_big_int_shift/ast.json b/parser/testdata/01440_big_int_shift/ast.json new file mode 100644 index 000000000..c5c9cee46 --- /dev/null +++ b/parser/testdata/01440_big_int_shift/ast.json @@ -0,0 +1,109 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Function bitShiftLeft (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toInt128 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function bitShiftRight (alias y) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier y" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_127" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier number" + } + ], + + "rows": 29, + + "statistics": + { + "elapsed": 0.001604217, + "rows_read": 29, + "bytes_read": 1129 + } +} diff --git a/parser/testdata/01440_big_int_shift/metadata.json b/parser/testdata/01440_big_int_shift/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01440_big_int_shift/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01440_big_int_shift/query.sql b/parser/testdata/01440_big_int_shift/query.sql new file mode 100644 index 000000000..e24ae1ba9 --- /dev/null +++ b/parser/testdata/01440_big_int_shift/query.sql @@ -0,0 +1,3 @@ +SELECT bitShiftLeft(toInt128(1), number) x, bitShiftRight(x, number) y, toTypeName(x), toTypeName(y) FROM numbers(127) ORDER BY number; +SELECT bitShiftLeft(toInt256(1), number) x, bitShiftRight(x, number) y, toTypeName(x), toTypeName(y) FROM numbers(255) ORDER BY number; +SELECT bitShiftLeft(toUInt256(1), number) x, bitShiftRight(x, number) y, toTypeName(x), toTypeName(y) FROM numbers(256) ORDER BY number; diff --git a/parser/testdata/01440_to_date_monotonicity/ast.json b/parser/testdata/01440_to_date_monotonicity/ast.json new file mode 100644 index 000000000..fc261fdf0 --- /dev/null +++ b/parser/testdata/01440_to_date_monotonicity/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tdm (children 1)" + }, + { + "explain": " Identifier tdm" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001537029, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/01440_to_date_monotonicity/metadata.json b/parser/testdata/01440_to_date_monotonicity/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01440_to_date_monotonicity/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01440_to_date_monotonicity/query.sql b/parser/testdata/01440_to_date_monotonicity/query.sql new file mode 100644 index 000000000..709f30fa5 --- /dev/null +++ b/parser/testdata/01440_to_date_monotonicity/query.sql @@ -0,0 +1,22 @@ +DROP TABLE IF EXISTS tdm; +DROP TABLE IF EXISTS tdm2; +CREATE TABLE tdm (x DateTime('Asia/Istanbul')) ENGINE = MergeTree ORDER BY x SETTINGS write_final_mark = 0; +INSERT INTO tdm VALUES (now()); +SELECT count(x) FROM tdm WHERE toDate(x) < toDate(now(), 'Asia/Istanbul') SETTINGS max_rows_to_read = 1; + +SELECT toDate(-1), toDate(10000000000000, 'Asia/Istanbul'), toDate(100), toDate(65536, 'UTC'), toDate(65535, 'Asia/Istanbul'); +SELECT toDateTime(-1, 'Asia/Istanbul'), toDateTime(10000000000000, 'Asia/Istanbul'), toDateTime(1000, 'Asia/Istanbul'); + +CREATE TABLE tdm2 (timestamp UInt32) ENGINE = MergeTree ORDER BY timestamp SETTINGS index_granularity = 1; + +INSERT INTO tdm2 VALUES (toUnixTimestamp('2000-01-01 13:12:12')), (toUnixTimestamp('2000-01-01 14:12:12')), (toUnixTimestamp('2000-01-01 15:12:12')); + +SET max_rows_to_read = 1; + +-- Prevent remote replicas from skipping index analysis in Parallel Replicas. Otherwise, they may return full ranges and trigger max_rows_to_read validation failures. +SET parallel_replicas_index_analysis_only_on_coordinator = 0; + +SELECT toDateTime(timestamp) FROM tdm2 WHERE toHour(toDateTime(timestamp)) = 13; + +DROP TABLE tdm; +DROP TABLE tdm2; diff --git a/parser/testdata/01441_array_combinator/ast.json b/parser/testdata/01441_array_combinator/ast.json new file mode 100644 index 000000000..279840ae7 --- /dev/null +++ b/parser/testdata/01441_array_combinator/ast.json @@ -0,0 +1,91 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function modulo (alias k) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_100" + }, + { + "explain": " Function sumArray (alias v) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function emptyArrayUInt8 (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier k" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier k" + } + ], + + "rows": 23, + + "statistics": + { + "elapsed": 0.001783345, + "rows_read": 23, + "bytes_read": 891 + } +} diff --git a/parser/testdata/01441_array_combinator/metadata.json b/parser/testdata/01441_array_combinator/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01441_array_combinator/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01441_array_combinator/query.sql b/parser/testdata/01441_array_combinator/query.sql new file mode 100644 index 000000000..75a511f84 --- /dev/null +++ b/parser/testdata/01441_array_combinator/query.sql @@ -0,0 +1 @@ +SELECT number % 100 AS k, sumArray(emptyArrayUInt8()) AS v FROM numbers(10) GROUP BY k ORDER BY k; diff --git a/parser/testdata/01441_low_cardinality_array_index/ast.json b/parser/testdata/01441_low_cardinality_array_index/ast.json new file mode 100644 index 000000000..83f3ef573 --- /dev/null +++ b/parser/testdata/01441_low_cardinality_array_index/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001404785, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01441_low_cardinality_array_index/metadata.json b/parser/testdata/01441_low_cardinality_array_index/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01441_low_cardinality_array_index/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01441_low_cardinality_array_index/query.sql b/parser/testdata/01441_low_cardinality_array_index/query.sql new file mode 100644 index 000000000..b5e14c957 --- /dev/null +++ b/parser/testdata/01441_low_cardinality_array_index/query.sql @@ -0,0 +1,49 @@ +SET allow_suspicious_low_cardinality_types=1; + +DROP TABLE IF EXISTS t_01411; + +CREATE TABLE t_01411( + str LowCardinality(String), + arr Array(LowCardinality(String)) default [str] +) ENGINE = MergeTree() +ORDER BY tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +INSERT INTO t_01411 (str) SELECT concat('asdf', toString(number % 10000)) FROM numbers(1000000); + +SELECT count() FROM t_01411 WHERE str = 'asdf337'; +SELECT count() FROM t_01411 WHERE arr[1] = 'asdf337'; +SELECT count() FROM t_01411 WHERE has(arr, 'asdf337'); +SELECT count() FROM t_01411 WHERE indexOf(arr, 'asdf337') > 0; + +SELECT count() FROM t_01411 WHERE arr[1] = str; +SELECT count() FROM t_01411 WHERE has(arr, str); +SELECT count() FROM t_01411 WHERE indexOf(arr, str) > 0; + +DROP TABLE IF EXISTS t_01411; +DROP TABLE IF EXISTS t_01411_num; + +CREATE TABLE t_01411_num( + num UInt8, + arr Array(LowCardinality(Int64)) default [num] +) ENGINE = MergeTree() +ORDER BY tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +INSERT INTO t_01411_num (num) SELECT number % 1000 FROM numbers(1000000); + +SELECT count() FROM t_01411_num WHERE num = 42; +SELECT count() FROM t_01411_num WHERE arr[1] = 42; +SELECT count() FROM t_01411_num WHERE has(arr, 42); +SELECT count() FROM t_01411_num WHERE indexOf(arr, 42) > 0; + +SELECT count() FROM t_01411_num WHERE arr[1] = num; +SELECT count() FROM t_01411_num WHERE has(arr, num); +SELECT count() FROM t_01411_num WHERE indexOf(arr, num) > 0; +SELECT count() FROM t_01411_num WHERE indexOf(arr, num % 337) > 0; + +-- Checking Arr(String) and LC(String) +SELECT indexOf(['a', 'b', 'c'], toLowCardinality('a')); + +-- Checking Arr(Nullable(String)) and LC(String) +SELECT indexOf(['a', 'b', NULL], toLowCardinality('a')); + +DROP TABLE IF EXISTS t_01411_num; diff --git a/parser/testdata/01442_date_time_with_params/ast.json b/parser/testdata/01442_date_time_with_params/ast.json new file mode 100644 index 000000000..97a5a2ca1 --- /dev/null +++ b/parser/testdata/01442_date_time_with_params/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000987735, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/01442_date_time_with_params/metadata.json b/parser/testdata/01442_date_time_with_params/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01442_date_time_with_params/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01442_date_time_with_params/query.sql b/parser/testdata/01442_date_time_with_params/query.sql new file mode 100644 index 000000000..4dc1f7914 --- /dev/null +++ b/parser/testdata/01442_date_time_with_params/query.sql @@ -0,0 +1,117 @@ +DROP TABLE IF EXISTS test; + +CREATE TABLE test (a DateTime, b DateTime(), c DateTime(2), d DateTime('Asia/Istanbul'), e DateTime(3, 'Asia/Istanbul'), f DateTime32, g DateTime32('Asia/Istanbul'), h DateTime(0)) ENGINE = MergeTree ORDER BY a; + +INSERT INTO test VALUES('2020-01-01 00:00:00', '2020-01-01 00:01:00', '2020-01-01 00:02:00.11', '2020-01-01 00:03:00', '2020-01-01 00:04:00.22', '2020-01-01 00:05:00', '2020-01-01 00:06:00', '2020-01-01 00:06:00'); + +SELECT a, toTypeName(a), b, toTypeName(b), c, toTypeName(c), d, toTypeName(d), e, toTypeName(e), f, toTypeName(f), g, toTypeName(g), h, toTypeName(h) FROM test; + +SELECT toDateTime('2020-01-01 00:00:00') AS a, toTypeName(a), toDateTime('2020-01-01 00:02:00.11', 2) AS b, toTypeName(b), toDateTime('2020-01-01 00:03:00', 'Asia/Istanbul') AS c, toTypeName(c), toDateTime('2020-01-01 00:04:00.22', 3, 'Asia/Istanbul') AS d, toTypeName(d), toDateTime('2020-01-01 00:05:00', 0) AS e, toTypeName(e); + +SELECT CAST('2020-01-01 00:00:00', 'DateTime') AS a, toTypeName(a), CAST('2020-01-01 00:02:00.11', 'DateTime(2)') AS b, toTypeName(b), CAST('2020-01-01 00:03:00', 'DateTime(\'Asia/Istanbul\')') AS c, toTypeName(c), CAST('2020-01-01 00:04:00.22', 'DateTime(3, \'Asia/Istanbul\')') AS d, toTypeName(d), CAST('2020-01-01 00:05:00', 'DateTime(0)') AS e, toTypeName(e); + +SELECT toDateTime32('2020-01-01 00:00:00') AS a, toTypeName(a); + +SELECT 'parseDateTimeBestEffort'; +SELECT parseDateTimeBestEffort('', 3) AS a, toTypeName(a); -- {serverError CANNOT_PARSE_DATETIME} +SELECT parseDateTimeBestEffort('2020-05-14T03:37:03', 3, 'UTC') AS a, toTypeName(a); +SELECT parseDateTimeBestEffort('2020-05-14 03:37:03', 3, 'UTC') AS a, toTypeName(a); +SELECT parseDateTimeBestEffort('2020-05-14 11:37:03 AM', 3, 'UTC') AS a, toTypeName(a); +SELECT parseDateTimeBestEffort('2020-05-14 11:37:03 PM', 3, 'UTC') AS a, toTypeName(a); +SELECT parseDateTimeBestEffort('2020-05-14 12:37:03 AM', 3, 'UTC') AS a, toTypeName(a); +SELECT parseDateTimeBestEffort('2020-05-14 12:37:03 PM', 3, 'UTC') AS a, toTypeName(a); +SELECT parseDateTimeBestEffort('2020-05-14T03:37:03.253184', 3, 'UTC') AS a, toTypeName(a); +SELECT parseDateTimeBestEffort('2020-05-14T03:37:03.253184Z', 3, 'UTC') AS a, toTypeName(a); +SELECT parseDateTimeBestEffort('2020-05-14T03:37:03.253184Z', 3, 'Europe/Minsk') AS a, toTypeName(a); +SELECT parseDateTimeBestEffort(materialize('2020-05-14T03:37:03.253184Z'), 3, 'UTC') AS a, toTypeName(a); +SELECT parseDateTimeBestEffort('1640649600123', 3, 'UTC') AS a, toTypeName(a); +SELECT parseDateTimeBestEffort('1640649600123', 'UTC') AS a, toTypeName(a); +SELECT parseDateTimeBestEffort('Dec 15, 2021') AS a, toTypeName(a); +SELECT parseDateTimeBestEffort('Dec 15, 2021', 'UTC') AS a, toTypeName(a); +SELECT parseDateTimeBestEffort('Dec 15, 2021', 3, 'UTC') AS a, toTypeName(a); + +SELECT 'parseDateTimeBestEffortOrNull'; +SELECT parseDateTimeBestEffortOrNull('', 3) AS a, toTypeName(a); +SELECT parseDateTimeBestEffortOrNull('2020-05-14T03:37:03', 3, 'UTC') AS a, toTypeName(a); +SELECT parseDateTimeBestEffortOrNull('2020-05-14 03:37:03', 3, 'UTC') AS a, toTypeName(a); +SELECT parseDateTimeBestEffortOrNull('2020-05-14 11:37:03 AM', 3, 'UTC') AS a, toTypeName(a); +SELECT parseDateTimeBestEffortOrNull('2020-05-14 11:37:03 PM', 3, 'UTC') AS a, toTypeName(a); +SELECT parseDateTimeBestEffortOrNull('2020-05-14 12:37:03 AM', 3, 'UTC') AS a, toTypeName(a); +SELECT parseDateTimeBestEffortOrNull('2020-05-14 12:37:03 PM', 3, 'UTC') AS a, toTypeName(a); +SELECT parseDateTimeBestEffortOrNull('2020-05-14T03:37:03.253184', 3, 'UTC') AS a, toTypeName(a); +SELECT parseDateTimeBestEffortOrNull('2020-05-14T03:37:03.253184Z', 3, 'UTC') AS a, toTypeName(a); +SELECT parseDateTimeBestEffortOrNull('2020-05-14T03:37:03.253184Z', 3, 'Europe/Minsk') AS a, toTypeName(a); +SELECT parseDateTimeBestEffortOrNull(materialize('2020-05-14T03:37:03.253184Z'), 3, 'UTC') AS a, toTypeName(a); +SELECT parseDateTimeBestEffortOrNull('1640649600123', 3, 'UTC') AS a, toTypeName(a); +SELECT parseDateTimeBestEffortOrNull('1640649600123', 'UTC') AS a, toTypeName(a); +SELECT parseDateTimeBestEffortOrNull('Dec 15, 2021') AS a, toTypeName(a); +SELECT parseDateTimeBestEffortOrNull('Dec 15, 2021', 'UTC') AS a, toTypeName(a); +SELECT parseDateTimeBestEffortOrNull('Dec 15, 2021', 3, 'UTC') AS a, toTypeName(a); + +SELECT 'parseDateTimeBestEffortOrZero'; +SELECT parseDateTimeBestEffortOrZero('', 3, 'UTC') AS a, toTypeName(a); +SELECT parseDateTimeBestEffortOrZero('2020-05-14T03:37:03', 3, 'UTC') AS a, toTypeName(a); +SELECT parseDateTimeBestEffortOrZero('2020-05-14 03:37:03', 3, 'UTC') AS a, toTypeName(a); +SELECT parseDateTimeBestEffortOrZero('2020-05-14 11:37:03 AM', 3, 'UTC') AS a, toTypeName(a); +SELECT parseDateTimeBestEffortOrZero('2020-05-14 11:37:03 PM', 3, 'UTC') AS a, toTypeName(a); +SELECT parseDateTimeBestEffortOrZero('2020-05-14 12:37:03 AM', 3, 'UTC') AS a, toTypeName(a); +SELECT parseDateTimeBestEffortOrZero('2020-05-14 12:37:03 PM', 3, 'UTC') AS a, toTypeName(a); +SELECT parseDateTimeBestEffortOrZero('2020-05-14T03:37:03.253184', 3, 'UTC') AS a, toTypeName(a); +SELECT parseDateTimeBestEffortOrZero('2020-05-14T03:37:03.253184Z', 3, 'UTC') AS a, toTypeName(a); +SELECT parseDateTimeBestEffortOrZero('2020-05-14T03:37:03.253184Z', 3, 'Europe/Minsk') AS a, toTypeName(a); +SELECT parseDateTimeBestEffortOrZero(materialize('2020-05-14T03:37:03.253184Z'), 3, 'UTC') AS a, toTypeName(a); +SELECT parseDateTimeBestEffortOrZero('1640649600123', 3, 'UTC') AS a, toTypeName(a); +SELECT parseDateTimeBestEffortOrZero('1640649600123', 'UTC') AS a, toTypeName(a); +SELECT parseDateTimeBestEffortOrZero('Dec 15, 2021') AS a, toTypeName(a); +SELECT parseDateTimeBestEffortOrZero('Dec 15, 2021', 'UTC') AS a, toTypeName(a); +SELECT parseDateTimeBestEffortOrZero('Dec 15, 2021', 3, 'UTC') AS a, toTypeName(a); + +SELECT 'parseDateTime32BestEffort'; +SELECT parseDateTime32BestEffort('') AS a, toTypeName(a); -- {serverError CANNOT_PARSE_DATETIME} +SELECT parseDateTime32BestEffort('2020-05-14T03:37:03', 'UTC') AS a, toTypeName(a); +SELECT parseDateTime32BestEffort('2020-05-14 03:37:03', 'UTC') AS a, toTypeName(a); +SELECT parseDateTime32BestEffort('2020-05-14 11:37:03 AM', 'UTC') AS a, toTypeName(a); +SELECT parseDateTime32BestEffort('2020-05-14 11:37:03 PM', 'UTC') AS a, toTypeName(a); +SELECT parseDateTime32BestEffort('2020-05-14 12:37:03 AM', 'UTC') AS a, toTypeName(a); +SELECT parseDateTime32BestEffort('2020-05-14 12:37:03 PM', 'UTC') AS a, toTypeName(a); +SELECT parseDateTime32BestEffort('2020-05-14T03:37:03.253184', 'UTC') AS a, toTypeName(a); +SELECT parseDateTime32BestEffort('2020-05-14T03:37:03.253184Z', 'UTC') AS a, toTypeName(a); +SELECT parseDateTime32BestEffort('2020-05-14T03:37:03.253184Z', 'Europe/Minsk') AS a, toTypeName(a); +SELECT parseDateTime32BestEffort(materialize('2020-05-14T03:37:03.253184Z'), 'UTC') AS a, toTypeName(a); +SELECT parseDateTime32BestEffort('1640649600123', 'UTC') AS a, toTypeName(a); +SELECT parseDateTime32BestEffort('Dec 15, 2021') AS a, toTypeName(a); +SELECT parseDateTime32BestEffort('Dec 15, 2021', 'UTC') AS a, toTypeName(a); + +SELECT 'parseDateTime32BestEffortOrNull'; +SELECT parseDateTime32BestEffortOrNull('') AS a, toTypeName(a); +SELECT parseDateTime32BestEffortOrNull('2020-05-14T03:37:03', 'UTC') AS a, toTypeName(a); +SELECT parseDateTime32BestEffortOrNull('2020-05-14 03:37:03', 'UTC') AS a, toTypeName(a); +SELECT parseDateTime32BestEffortOrNull('2020-05-14 11:37:03 AM', 'UTC') AS a, toTypeName(a); +SELECT parseDateTime32BestEffortOrNull('2020-05-14 11:37:03 PM', 'UTC') AS a, toTypeName(a); +SELECT parseDateTime32BestEffortOrNull('2020-05-14 12:37:03 AM', 'UTC') AS a, toTypeName(a); +SELECT parseDateTime32BestEffortOrNull('2020-05-14 12:37:03 PM', 'UTC') AS a, toTypeName(a); +SELECT parseDateTime32BestEffortOrNull('2020-05-14T03:37:03.253184', 'UTC') AS a, toTypeName(a); +SELECT parseDateTime32BestEffortOrNull('2020-05-14T03:37:03.253184Z', 'UTC') AS a, toTypeName(a); +SELECT parseDateTime32BestEffortOrNull('2020-05-14T03:37:03.253184Z', 'Europe/Minsk') AS a, toTypeName(a); +SELECT parseDateTime32BestEffortOrNull(materialize('2020-05-14T03:37:03.253184Z'), 'UTC') AS a, toTypeName(a); +SELECT parseDateTime32BestEffortOrNull('1640649600123', 'UTC') AS a, toTypeName(a); +SELECT parseDateTime32BestEffortOrNull('Dec 15, 2021') AS a, toTypeName(a); +SELECT parseDateTime32BestEffortOrNull('Dec 15, 2021', 'UTC') AS a, toTypeName(a); + +SELECT 'parseDateTime32BestEffortOrZero'; +SELECT parseDateTime32BestEffortOrZero('', 'UTC') AS a, toTypeName(a); +SELECT parseDateTime32BestEffortOrZero('2020-05-14T03:37:03', 'UTC') AS a, toTypeName(a); +SELECT parseDateTime32BestEffortOrZero('2020-05-14 03:37:03', 'UTC') AS a, toTypeName(a); +SELECT parseDateTime32BestEffortOrZero('2020-05-14 11:37:03 AM', 'UTC') AS a, toTypeName(a); +SELECT parseDateTime32BestEffortOrZero('2020-05-14 11:37:03 PM', 'UTC') AS a, toTypeName(a); +SELECT parseDateTime32BestEffortOrZero('2020-05-14 12:37:03 AM', 'UTC') AS a, toTypeName(a); +SELECT parseDateTime32BestEffortOrZero('2020-05-14 12:37:03 PM', 'UTC') AS a, toTypeName(a); +SELECT parseDateTime32BestEffortOrZero('2020-05-14T03:37:03.253184', 'UTC') AS a, toTypeName(a); +SELECT parseDateTime32BestEffortOrZero('2020-05-14T03:37:03.253184Z', 'UTC') AS a, toTypeName(a); +SELECT parseDateTime32BestEffortOrZero('2020-05-14T03:37:03.253184Z', 'Europe/Minsk') AS a, toTypeName(a); +SELECT parseDateTime32BestEffortOrZero(materialize('2020-05-14T03:37:03.253184Z'), 'UTC') AS a, toTypeName(a); +SELECT parseDateTime32BestEffortOrZero('1640649600123', 'UTC') AS a, toTypeName(a); +SELECT parseDateTime32BestEffortOrZero('Dec 15, 2021') AS a, toTypeName(a); +SELECT parseDateTime32BestEffortOrZero('Dec 15, 2021', 'UTC') AS a, toTypeName(a); + +DROP TABLE IF EXISTS test; diff --git a/parser/testdata/01442_h3kring_range_check/ast.json b/parser/testdata/01442_h3kring_range_check/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01442_h3kring_range_check/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01442_h3kring_range_check/metadata.json b/parser/testdata/01442_h3kring_range_check/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01442_h3kring_range_check/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01442_h3kring_range_check/query.sql b/parser/testdata/01442_h3kring_range_check/query.sql new file mode 100644 index 000000000..644cb2563 --- /dev/null +++ b/parser/testdata/01442_h3kring_range_check/query.sql @@ -0,0 +1,6 @@ +-- Tags: no-fasttest + +SELECT h3kRing(581276613233082367, 65535); -- { serverError PARAMETER_OUT_OF_BOUND } +SELECT h3kRing(581276613233082367, -1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT length(h3kRing(111111111111, 1000)); +SELECT h3kRing(581276613233082367, nan); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } diff --git a/parser/testdata/01447_json_strings/ast.json b/parser/testdata/01447_json_strings/ast.json new file mode 100644 index 000000000..f31bf750b --- /dev/null +++ b/parser/testdata/01447_json_strings/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001169762, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01447_json_strings/metadata.json b/parser/testdata/01447_json_strings/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01447_json_strings/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01447_json_strings/query.sql b/parser/testdata/01447_json_strings/query.sql new file mode 100644 index 000000000..45fc4a56d --- /dev/null +++ b/parser/testdata/01447_json_strings/query.sql @@ -0,0 +1,10 @@ +SET output_format_write_statistics = 0; + +SELECT + 1, + 'a', + [1, 2, 3], + (1, 'a'), + null, + nan +FORMAT JSONStrings; diff --git a/parser/testdata/01448_json_compact_strings_each_row/ast.json b/parser/testdata/01448_json_compact_strings_each_row/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01448_json_compact_strings_each_row/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01448_json_compact_strings_each_row/metadata.json b/parser/testdata/01448_json_compact_strings_each_row/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01448_json_compact_strings_each_row/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01448_json_compact_strings_each_row/query.sql b/parser/testdata/01448_json_compact_strings_each_row/query.sql new file mode 100644 index 000000000..52f0eba8b --- /dev/null +++ b/parser/testdata/01448_json_compact_strings_each_row/query.sql @@ -0,0 +1,90 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS test_table; +DROP TABLE IF EXISTS test_table_2; +SET input_format_null_as_default = 0; +SELECT 1; +/* Check JSONCompactStringsEachRow Output */ +CREATE TABLE test_table (value UInt8, name String) ENGINE = MergeTree() ORDER BY value; +INSERT INTO test_table VALUES (1, 'a'), (2, 'b'), (3, 'c'); +SELECT * FROM test_table FORMAT JSONCompactStringsEachRow; +SELECT 2; +/* Check Totals */ +SELECT name, count() AS c FROM test_table GROUP BY name WITH TOTALS ORDER BY name FORMAT JSONCompactStringsEachRow; +SELECT 3; +/* Check JSONCompactStringsEachRowWithNames and JSONCompactStringsEachRowWithNamesAndTypes Output */ +SELECT * FROM test_table FORMAT JSONCompactStringsEachRowWithNamesAndTypes; +SELECT '----------'; +SELECT * FROM test_table FORMAT JSONCompactStringsEachRowWithNames; +SELECT 4; +/* Check Totals */ +SELECT name, count() AS c FROM test_table GROUP BY name WITH TOTALS ORDER BY name FORMAT JSONCompactStringsEachRowWithNamesAndTypes; +DROP TABLE IF EXISTS test_table; +SELECT 5; +/* Check JSONCompactStringsEachRow Input */ +CREATE TABLE test_table (v1 String, v2 UInt8, v3 DEFAULT v2 * 16, v4 UInt8 DEFAULT 8) ENGINE = MergeTree() ORDER BY v2; +INSERT INTO test_table FORMAT JSONCompactStringsEachRow ["first", "1", "2", "3"] ["second", "2", "3", "6"]; + +SELECT * FROM test_table FORMAT JSONCompactStringsEachRow; +TRUNCATE TABLE test_table; +SELECT 6; +/* Check input_format_null_as_default = 1 */ +SET input_format_null_as_default = 1; +INSERT INTO test_table FORMAT JSONCompactStringsEachRow ["first", "1", "2", "ᴺᵁᴸᴸ"] ["second", "2", "null", "6"]; + +SELECT * FROM test_table FORMAT JSONCompactStringsEachRow; +TRUNCATE TABLE test_table; +SELECT 7; +/* Check Nested */ +CREATE TABLE test_table_2 (v1 UInt8, n Nested(id UInt8, name String)) ENGINE = MergeTree() ORDER BY v1; +INSERT INTO test_table_2 FORMAT JSONCompactStringsEachRow ["16", "[15, 16, 17]", "['first', 'second', 'third']"]; + +SELECT * FROM test_table_2 FORMAT JSONCompactStringsEachRow; +TRUNCATE TABLE test_table_2; +SELECT 8; +/* Check JSONCompactStringsEachRowWithNames and JSONCompactStringsEachRowWithNamesAndTypes Input */ +SET input_format_null_as_default = 0; +INSERT INTO test_table FORMAT JSONCompactStringsEachRowWithNamesAndTypes ["v1", "v2", "v3", "v4"]["String","UInt8","UInt16","UInt8"]["first", "1", "2", "3"]["second", "2", "3", "6"]; + +INSERT INTO test_table FORMAT JSONCompactStringsEachRowWithNames ["v1", "v2", "v3", "v4"]["first", "1", "2", "3"]["second", "2", "3", "6"]; + +SELECT * FROM test_table FORMAT JSONCompactStringsEachRow; +TRUNCATE TABLE test_table; +SELECT 9; +/* Check input_format_null_as_default = 1 */ +SET input_format_null_as_default = 1; +INSERT INTO test_table FORMAT JSONCompactStringsEachRowWithNamesAndTypes ["v1", "v2", "v3", "v4"]["String","UInt8","UInt16","UInt8"]["first", "1", "2", "null"] ["second", "2", "null", "6"]; + +INSERT INTO test_table FORMAT JSONCompactStringsEachRowWithNames ["v1", "v2", "v3", "v4"]["first", "1", "2", "null"] ["second", "2", "null", "6"]; + +SELECT * FROM test_table FORMAT JSONCompactStringsEachRow; +SELECT 10; +/* Check Header */ +TRUNCATE TABLE test_table; +SET input_format_skip_unknown_fields = 1; +INSERT INTO test_table FORMAT JSONCompactStringsEachRowWithNamesAndTypes ["v1", "v2", "invalid_column"]["String", "UInt8", "UInt8"]["first", "1", "32"]["second", "2", "64"]; + +INSERT INTO test_table FORMAT JSONCompactStringsEachRowWithNames ["v1", "v2", "invalid_column"]["first", "1", "32"]["second", "2", "64"]; + +SELECT * FROM test_table FORMAT JSONCompactStringsEachRow; +SELECT 11; +TRUNCATE TABLE test_table; +INSERT INTO test_table FORMAT JSONCompactStringsEachRowWithNamesAndTypes ["v4", "v2", "v3"]["UInt8", "UInt8", "UInt16"]["1", "2", "3"] + +INSERT INTO test_table FORMAT JSONCompactStringsEachRowWithNames ["v4", "v2", "v3"]["1", "2", "3"] + +SELECT * FROM test_table FORMAT JSONCompactStringsEachRowWithNamesAndTypes; +SELECT '---------'; +SELECT * FROM test_table FORMAT JSONCompactStringsEachRowWithNames; +SELECT 12; +/* Check Nested */ +INSERT INTO test_table_2 FORMAT JSONCompactStringsEachRowWithNamesAndTypes ["v1", "n.id", "n.name"]["UInt8", "Array(UInt8)", "Array(String)"]["16", "[15, 16, 17]", "['first', 'second', 'third']"]; + +INSERT INTO test_table_2 FORMAT JSONCompactStringsEachRowWithNames ["v1", "n.id", "n.name"]["16", "[15, 16, 17]", "['first', 'second', 'third']"]; + +SELECT * FROM test_table_2 FORMAT JSONCompactStringsEachRowWithNamesAndTypes; +SELECT '---------'; +SELECT * FROM test_table_2 FORMAT JSONCompactStringsEachRowWithNames; + +DROP TABLE IF EXISTS test_table; +DROP TABLE IF EXISTS test_table_2; diff --git a/parser/testdata/01449_json_compact_strings/ast.json b/parser/testdata/01449_json_compact_strings/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01449_json_compact_strings/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01449_json_compact_strings/metadata.json b/parser/testdata/01449_json_compact_strings/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01449_json_compact_strings/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01449_json_compact_strings/query.sql b/parser/testdata/01449_json_compact_strings/query.sql new file mode 100644 index 000000000..d0a9d72af --- /dev/null +++ b/parser/testdata/01449_json_compact_strings/query.sql @@ -0,0 +1,12 @@ +-- Tags: no-fasttest + +SET output_format_write_statistics = 0; + +SELECT + 1, + 'a', + [1, 2, 3], + (1, 'a'), + null, + nan +FORMAT JSONCompactStrings; diff --git a/parser/testdata/01450_set_null_const/ast.json b/parser/testdata/01450_set_null_const/ast.json new file mode 100644 index 000000000..ff6e8f99b --- /dev/null +++ b/parser/testdata/01450_set_null_const/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_mtree (children 1)" + }, + { + "explain": " Identifier test_mtree" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000999686, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/01450_set_null_const/metadata.json b/parser/testdata/01450_set_null_const/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01450_set_null_const/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01450_set_null_const/query.sql b/parser/testdata/01450_set_null_const/query.sql new file mode 100644 index 000000000..c47176a88 --- /dev/null +++ b/parser/testdata/01450_set_null_const/query.sql @@ -0,0 +1,7 @@ +DROP TABLE IF EXISTS test_mtree; + +CREATE TABLE test_mtree (`x` String, INDEX idx x TYPE set(10) GRANULARITY 1) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO test_mtree VALUES ('Hello, world'); +SELECT count() FROM test_mtree WHERE x = NULL; + +DROP TABLE test_mtree; diff --git a/parser/testdata/01451_detach_drop_part/ast.json b/parser/testdata/01451_detach_drop_part/ast.json new file mode 100644 index 000000000..7fad24c6d --- /dev/null +++ b/parser/testdata/01451_detach_drop_part/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery mt_01451 (children 1)" + }, + { + "explain": " Identifier mt_01451" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000936728, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/01451_detach_drop_part/metadata.json b/parser/testdata/01451_detach_drop_part/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01451_detach_drop_part/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01451_detach_drop_part/query.sql b/parser/testdata/01451_detach_drop_part/query.sql new file mode 100644 index 000000000..87188cebf --- /dev/null +++ b/parser/testdata/01451_detach_drop_part/query.sql @@ -0,0 +1,44 @@ +DROP TABLE IF EXISTS mt_01451; + +CREATE TABLE mt_01451 (v UInt8) ENGINE = MergeTree() order by tuple() SETTINGS old_parts_lifetime=0; +SYSTEM STOP MERGES mt_01451; + +INSERT INTO mt_01451 VALUES (0); +INSERT INTO mt_01451 VALUES (1); +INSERT INTO mt_01451 VALUES (2); + +SELECT v FROM mt_01451 ORDER BY v; + +ALTER TABLE mt_01451 DETACH PART 'all_100_100_0'; -- { serverError NO_SUCH_DATA_PART } + +ALTER TABLE mt_01451 DETACH PART 'all_2_2_0'; + +SELECT v FROM mt_01451 ORDER BY v; + +SELECT name FROM system.detached_parts WHERE table = 'mt_01451' AND database = currentDatabase(); + +ALTER TABLE mt_01451 ATTACH PART 'all_2_2_0'; + +SELECT v FROM mt_01451 ORDER BY v; + +SELECT name FROM system.detached_parts WHERE table = 'mt_01451' AND database = currentDatabase(); + +SELECT '-- drop part --'; + +ALTER TABLE mt_01451 DROP PART 'all_4_4_0'; + +ALTER TABLE mt_01451 ATTACH PART 'all_4_4_0'; -- { serverError BAD_DATA_PART_NAME } + +SELECT v FROM mt_01451 ORDER BY v; + +SELECT name FROM system.parts WHERE table = 'mt_01451' AND active AND database = currentDatabase(); + +SELECT '-- resume merges --'; +SYSTEM START MERGES mt_01451; +OPTIMIZE TABLE mt_01451 FINAL; + +SELECT v FROM mt_01451 ORDER BY v; + +SELECT name FROM system.parts WHERE table = 'mt_01451' AND active AND database = currentDatabase(); + +DROP TABLE mt_01451; diff --git a/parser/testdata/01451_normalize_query/ast.json b/parser/testdata/01451_normalize_query/ast.json new file mode 100644 index 000000000..4b08d14a8 --- /dev/null +++ b/parser/testdata/01451_normalize_query/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function normalizeQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'SELECT 1'" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001270608, + "rows_read": 7, + "bytes_read": 269 + } +} diff --git a/parser/testdata/01451_normalize_query/metadata.json b/parser/testdata/01451_normalize_query/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01451_normalize_query/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01451_normalize_query/query.sql b/parser/testdata/01451_normalize_query/query.sql new file mode 100644 index 000000000..14c74d950 --- /dev/null +++ b/parser/testdata/01451_normalize_query/query.sql @@ -0,0 +1,45 @@ +SELECT normalizeQuery('SELECT 1'); +SELECT normalizeQuery('SELECT 1'); +SELECT normalizeQuery('SELECT 1, 1, 1'); +SELECT normalizeQuery('SELECT 1, 1, 1, /* Hwllo */'); +SELECT normalizeQuery('SELECT 1, 1, 1, /* Hello */'); +SELECT normalizeQuery('SELECT 1, 1, 1, /* Hello */ \'abc\''); +SELECT normalizeQuery('SELECT 1, 1, 1, /* Hello */ \'abc\' WHERE 1'); +SELECT normalizeQuery('SELECT 1, 1, 1, /* Hello */ \'abc\' WHERE 1 = 1'); +SELECT normalizeQuery('SELECT 1, 1, 1, /* Hello */ \'abc\' WHERE 1 = 1 AND (x, y)'); +SELECT normalizeQuery('SELECT 1, 1, 1, /* Hello */ \'abc\' WHERE 1 = 1 AND (1, y)'); +SELECT normalizeQuery('[1, 2, 3]'); +SELECT normalizeQuery('[1, 2, 3, x]'); +SELECT normalizeQuery('SELECT 1, 1, 1, /* Hello */ \'abc\' WHERE 1 = 1 AND (1, y) LIMIT 1, 1'); +SELECT normalizeQuery('SELECT 1 AS `xyz`'); +SELECT normalizeQuery('SELECT 1 AS `xyz1`'); +SELECT normalizeQuery('SELECT 1 AS `xyz11`'); +SELECT normalizeQuery('SELECT 1 AS xyz111'); +SELECT normalizeQuery('SELECT 1 AS xyz1'); +SELECT normalizeQuery('SELECT 1 AS xyz11'); +SELECT normalizeQuery('SELECT 1 xyz11'); +SELECT normalizeQuery('SELECT 1, xyz11'); +SELECT normalizeQuery('SELECT 1, ''xyz11'''); +SELECT normalizeQuery('SELECT $doc$VALUE$doc$ xyz11'); +SELECT normalizeQuery('SELECT $doc$VALUE$doc$, xyz11'); +SELECT normalizeQuery('SELECT $doc$VALUE$doc$, ''xyz11'''); +SELECT normalizeQuery('1 - 2'); +SELECT normalizeQuery('1, -2, 3'); +SELECT normalizeQuery('-1, -2, 3'); +SELECT normalizeQuery('1 - 2, 3, 4'); +SELECT normalizeQuery('f(-2, 3)'); +SELECT normalizeQuery('[-1, 2, 3]'); +SELECT normalizeQuery('1 + 2'); +SELECT normalizeQuery('1, +2, 3'); +SELECT normalizeQuery('-1, +2, 3'); +SELECT normalizeQuery('1 + 2, 3, 4'); +SELECT normalizeQuery('f(+2, 3)'); +SELECT normalizeQuery('[+1, 2, 3]'); +SELECT normalizeQuery('1, 2, 3 + 4'); +SELECT normalizeQuery('1, 2 - 3, 4'); +SELECT normalizeQuery('f(+2, 3), 1'); +SELECT normalizeQuery('[+1, 2, 3] - 1'); +SELECT normalizeQuery('-1, 1 - [+1, 2, 3] - 1'); +SELECT normalizeQuery('(+1, 2, 3) - 1'); +SELECT normalizeQuery('-1, 1 - (+1, 2, 3) - 1'); +SELECT normalizeQuery('(+1, 2, -3)'); diff --git a/parser/testdata/01451_replicated_detach_drop_and_quorum_long/ast.json b/parser/testdata/01451_replicated_detach_drop_and_quorum_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01451_replicated_detach_drop_and_quorum_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01451_replicated_detach_drop_and_quorum_long/metadata.json b/parser/testdata/01451_replicated_detach_drop_and_quorum_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01451_replicated_detach_drop_and_quorum_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01451_replicated_detach_drop_and_quorum_long/query.sql b/parser/testdata/01451_replicated_detach_drop_and_quorum_long/query.sql new file mode 100644 index 000000000..651be4896 --- /dev/null +++ b/parser/testdata/01451_replicated_detach_drop_and_quorum_long/query.sql @@ -0,0 +1,52 @@ +-- Tags: long, replica, no-replicated-database, no-shared-merge-tree +-- no-shared-merge-tree: depends on max_replicated_merges_in_queue + +SET replication_alter_partitions_sync = 2; + + +DROP TABLE IF EXISTS replica1; +DROP TABLE IF EXISTS replica2; + +CREATE TABLE replica1 (v UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test/01451/quorum', 'r1') order by tuple() settings max_replicated_merges_in_queue = 0; +CREATE TABLE replica2 (v UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test/01451/quorum', 'r2') order by tuple() settings max_replicated_merges_in_queue = 0; + +INSERT INTO replica1 SETTINGS insert_keeper_fault_injection_probability=0 VALUES (0); + +SYSTEM SYNC REPLICA replica2; + +SELECT name FROM system.parts WHERE table = 'replica2' and database = currentDatabase() and active = 1; + +ALTER TABLE replica2 DETACH PART 'all_0_0_0'; + +SELECT * FROM replica1; + +SELECT * FROM replica2; + +-- drop of empty partition works +ALTER TABLE replica2 DROP PARTITION ID 'all'; + +SET insert_quorum = 2, insert_quorum_parallel = 0; + +INSERT INTO replica2 SETTINGS insert_keeper_fault_injection_probability=0 VALUES (1); + +SYSTEM SYNC REPLICA replica2; + +ALTER TABLE replica1 DETACH PART 'all_2_2_0'; --{serverError NOT_IMPLEMENTED} + +SELECT name FROM system.parts WHERE table = 'replica1' and database = currentDatabase() and active = 1 ORDER BY name; + +SELECT COUNT() FROM replica1; + +SET insert_quorum_parallel=1; + +INSERT INTO replica2 SETTINGS insert_keeper_fault_injection_probability=0 VALUES (2); + +-- should work, parallel quorum nodes exists only during insert +ALTER TABLE replica1 DROP PART 'all_3_3_0'; + +SELECT name FROM system.parts WHERE table = 'replica1' and database = currentDatabase() and active = 1 ORDER BY name; + +SELECT COUNT() FROM replica1; + +DROP TABLE IF EXISTS replica1; +DROP TABLE IF EXISTS replica2; diff --git a/parser/testdata/01451_replicated_detach_drop_part_long/ast.json b/parser/testdata/01451_replicated_detach_drop_part_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01451_replicated_detach_drop_part_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01451_replicated_detach_drop_part_long/metadata.json b/parser/testdata/01451_replicated_detach_drop_part_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01451_replicated_detach_drop_part_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01451_replicated_detach_drop_part_long/query.sql b/parser/testdata/01451_replicated_detach_drop_part_long/query.sql new file mode 100644 index 000000000..b6b784775 --- /dev/null +++ b/parser/testdata/01451_replicated_detach_drop_part_long/query.sql @@ -0,0 +1,53 @@ +-- Tags: long, replica, no-replicated-database, no-shared-merge-tree +-- Tag no-replicated-database: Fails due to additional replicas or shards +-- no-shared-merge-tree: depends on max_replicated_merges_in_queue + +SET replication_alter_partitions_sync = 2; + +DROP TABLE IF EXISTS replica1 SYNC; +DROP TABLE IF EXISTS replica2 SYNC; + +CREATE TABLE replica1 (v UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/'||currentDatabase()||'test/01451/attach', 'r1') order by tuple() settings max_replicated_merges_in_queue = 0; +CREATE TABLE replica2 (v UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/'||currentDatabase()||'test/01451/attach', 'r2') order by tuple() settings max_replicated_merges_in_queue = 0; + +INSERT INTO replica1 SETTINGS insert_keeper_fault_injection_probability=0 VALUES (0); +INSERT INTO replica1 SETTINGS insert_keeper_fault_injection_probability=0 VALUES (1); +INSERT INTO replica1 SETTINGS insert_keeper_fault_injection_probability=0 VALUES (2); + +ALTER TABLE replica1 DETACH PART 'all_100_100_0'; -- { serverError NO_SUCH_DATA_PART } + +SELECT v FROM replica1 ORDER BY v; + +SYSTEM SYNC REPLICA replica2; +ALTER TABLE replica2 DETACH PART 'all_1_1_0'; + +SELECT v FROM replica1 ORDER BY v; + +SELECT name FROM system.detached_parts WHERE table = 'replica2' AND database = currentDatabase(); + +ALTER TABLE replica2 ATTACH PART 'all_1_1_0' SETTINGS insert_keeper_fault_injection_probability=0; + +SYSTEM SYNC REPLICA replica1; +SELECT v FROM replica1 ORDER BY v; + +SELECT name FROM system.detached_parts WHERE table = 'replica2' AND database = currentDatabase(); + +SELECT '-- drop part --'; + +ALTER TABLE replica1 DROP PART 'all_3_3_0'; + +ALTER TABLE replica1 ATTACH PART 'all_3_3_0'; -- { serverError BAD_DATA_PART_NAME } + +SELECT v FROM replica1 ORDER BY v; + +SELECT '-- resume merges --'; + +ALTER TABLE replica1 MODIFY SETTING max_replicated_merges_in_queue = 1; +OPTIMIZE TABLE replica1 FINAL; + +SELECT v FROM replica1 ORDER BY v; + +SELECT name FROM system.parts WHERE table = 'replica2' AND active AND database = currentDatabase(); + +DROP TABLE replica1 SYNC; +DROP TABLE replica2 SYNC; diff --git a/parser/testdata/01452_normalized_query_hash/ast.json b/parser/testdata/01452_normalized_query_hash/ast.json new file mode 100644 index 000000000..9728719c3 --- /dev/null +++ b/parser/testdata/01452_normalized_query_hash/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function normalizedQueryHash (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'SELECT 1'" + }, + { + "explain": " Function normalizedQueryHash (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'SELECT 2'" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001176912, + "rows_read": 12, + "bytes_read": 491 + } +} diff --git a/parser/testdata/01452_normalized_query_hash/metadata.json b/parser/testdata/01452_normalized_query_hash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01452_normalized_query_hash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01452_normalized_query_hash/query.sql b/parser/testdata/01452_normalized_query_hash/query.sql new file mode 100644 index 000000000..0ae95b529 --- /dev/null +++ b/parser/testdata/01452_normalized_query_hash/query.sql @@ -0,0 +1,11 @@ +SELECT normalizedQueryHash('SELECT 1') = normalizedQueryHash('SELECT 2'); +SELECT normalizedQueryHash('SELECT 1') != normalizedQueryHash('SELECT 1, 1, 1'); +SELECT normalizedQueryHash('SELECT 1, 1, 1, /* Hello */ \'abc\'') = normalizedQueryHash('SELECT 2, 3'); +SELECT normalizedQueryHash('[1, 2, 3]') = normalizedQueryHash('[1, ''x'']'); +SELECT normalizedQueryHash('[1, 2, 3, x]') != normalizedQueryHash('[1, x]'); +SELECT normalizedQueryHash('SELECT 1 AS `xyz`') != normalizedQueryHash('SELECT 1 AS `abc`'); +SELECT normalizedQueryHash('SELECT 1 AS xyz111') = normalizedQueryHash('SELECT 2 AS xyz234'); +SELECT normalizedQueryHash('SELECT $doc$VALUE$doc$ AS `xyz`') != normalizedQueryHash('SELECT $doc$VALUE$doc$ AS `abc`'); +SELECT normalizedQueryHash('SELECT $doc$VALUE$doc$ AS xyz111') = normalizedQueryHash('SELECT $doc$VALUE$doc$ AS xyz234'); + + diff --git a/parser/testdata/01453_fixsed_string_sort/ast.json b/parser/testdata/01453_fixsed_string_sort/ast.json new file mode 100644 index 000000000..9ffb554df --- /dev/null +++ b/parser/testdata/01453_fixsed_string_sort/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery badFixedStringSort (children 1)" + }, + { + "explain": " Identifier badFixedStringSort" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001304994, + "rows_read": 2, + "bytes_read": 88 + } +} diff --git a/parser/testdata/01453_fixsed_string_sort/metadata.json b/parser/testdata/01453_fixsed_string_sort/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01453_fixsed_string_sort/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01453_fixsed_string_sort/query.sql b/parser/testdata/01453_fixsed_string_sort/query.sql new file mode 100644 index 000000000..9fd27d6dc --- /dev/null +++ b/parser/testdata/01453_fixsed_string_sort/query.sql @@ -0,0 +1,22 @@ +drop table if exists badFixedStringSort; +CREATE TABLE IF NOT EXISTS badFixedStringSort (uuid5_old FixedString(16), subitem String) engine=MergeTree order by tuple(); + +INSERT INTO badFixedStringSort values (UUIDStringToNum('999e1140-66ef-5610-9c3a-b3fb33e0fda9'), '1'); +INSERT INTO badFixedStringSort values (UUIDStringToNum('999e1140-66ef-5610-9c3a-b3fb33e0fda9'), '2'); +INSERT INTO badFixedStringSort values (UUIDStringToNum('999e1140-66ef-5610-9c3a-b3fb33e0fda9'), '1'); +INSERT INTO badFixedStringSort values (UUIDStringToNum('999e1140-66ef-5610-9c3a-b3fb33e0fda9'), '2'); + +INSERT INTO badFixedStringSort values (UUIDStringToNum('8ad8fc5e-a49e-544c-98e6-1140afd79f80'), '2'); +INSERT INTO badFixedStringSort values (UUIDStringToNum('8ad8fc5e-a49e-544c-98e6-1140afd79f80'), '1'); +INSERT INTO badFixedStringSort values (UUIDStringToNum('8ad8fc5e-a49e-544c-98e6-1140afd79f80'), '2'); +INSERT INTO badFixedStringSort values (UUIDStringToNum('8ad8fc5e-a49e-544c-98e6-1140afd79f80'), '1'); + +INSERT INTO badFixedStringSort values (UUIDStringToNum('999e1140-66ef-5610-9c3a-b3fb33e0fda9'), '1'); +INSERT INTO badFixedStringSort values (UUIDStringToNum('999e1140-66ef-5610-9c3a-b3fb33e0fda9'), '2'); +INSERT INTO badFixedStringSort values (UUIDStringToNum('999e1140-66ef-5610-9c3a-b3fb33e0fda9'), '1'); +INSERT INTO badFixedStringSort values (UUIDStringToNum('999e1140-66ef-5610-9c3a-b3fb33e0fda9'), '2'); + +optimize table badFixedStringSort final; +select hex(uuid5_old), subitem from badFixedStringSort ORDER BY uuid5_old, subitem; + +drop table if exists badFixedStringSort; diff --git a/parser/testdata/01453_normalize_query_alias_uuid/ast.json b/parser/testdata/01453_normalize_query_alias_uuid/ast.json new file mode 100644 index 000000000..5a8bbc281 --- /dev/null +++ b/parser/testdata/01453_normalize_query_alias_uuid/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function normalizeQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'SELECT 1 AS `aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee`'" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001647162, + "rows_read": 7, + "bytes_read": 311 + } +} diff --git a/parser/testdata/01453_normalize_query_alias_uuid/metadata.json b/parser/testdata/01453_normalize_query_alias_uuid/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01453_normalize_query_alias_uuid/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01453_normalize_query_alias_uuid/query.sql b/parser/testdata/01453_normalize_query_alias_uuid/query.sql new file mode 100644 index 000000000..4d3e095d2 --- /dev/null +++ b/parser/testdata/01453_normalize_query_alias_uuid/query.sql @@ -0,0 +1,3 @@ +SELECT normalizeQuery('SELECT 1 AS `aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee`'), + normalizedQueryHash('SELECT 1 AS `aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee`') + = normalizedQueryHash('SELECT 2 AS `aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeef`'); diff --git a/parser/testdata/01455_default_compression/ast.json b/parser/testdata/01455_default_compression/ast.json new file mode 100644 index 000000000..cae160763 --- /dev/null +++ b/parser/testdata/01455_default_compression/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery compress_table (children 1)" + }, + { + "explain": " Identifier compress_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001646142, + "rows_read": 2, + "bytes_read": 80 + } +} diff --git a/parser/testdata/01455_default_compression/metadata.json b/parser/testdata/01455_default_compression/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01455_default_compression/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01455_default_compression/query.sql b/parser/testdata/01455_default_compression/query.sql new file mode 100644 index 000000000..099e419bd --- /dev/null +++ b/parser/testdata/01455_default_compression/query.sql @@ -0,0 +1,29 @@ +DROP TABLE IF EXISTS compress_table; + +CREATE TABLE compress_table +( + key UInt64, + value1 String CODEC(Default), + value2 UInt64 CODEC(Delta, Default), + value3 String CODEC(ZSTD(10)) +) +ENGINE = MergeTree() +ORDER BY key; + +INSERT INTO compress_table VALUES(1, '1', '1', '1'); + +SELECT * FROM compress_table; + +ALTER TABLE compress_table MODIFY COLUMN value3 CODEC(Default); + +INSERT INTO compress_table VALUES(2, '2', '2', '2'); + +SELECT * FROM compress_table ORDER BY key; + +DESCRIBE TABLE compress_table; + +SHOW CREATE TABLE compress_table; + +ALTER TABLE compress_table MODIFY COLUMN value2 CODEC(Default(5)); --{serverError BAD_ARGUMENTS} + +DROP TABLE IF EXISTS compress_table; diff --git a/parser/testdata/01455_nullable_type_with_if_agg_combinator/ast.json b/parser/testdata/01455_nullable_type_with_if_agg_combinator/ast.json new file mode 100644 index 000000000..069daae72 --- /dev/null +++ b/parser/testdata/01455_nullable_type_with_if_agg_combinator/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001052875, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01455_nullable_type_with_if_agg_combinator/metadata.json b/parser/testdata/01455_nullable_type_with_if_agg_combinator/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01455_nullable_type_with_if_agg_combinator/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01455_nullable_type_with_if_agg_combinator/query.sql b/parser/testdata/01455_nullable_type_with_if_agg_combinator/query.sql new file mode 100644 index 000000000..0e951af73 --- /dev/null +++ b/parser/testdata/01455_nullable_type_with_if_agg_combinator/query.sql @@ -0,0 +1,8 @@ +SET cast_keep_nullable = 0; + +-- Value nullable +SELECT anyIf(CAST(number, 'Nullable(UInt8)'), number = 3) AS a, toTypeName(a) FROM numbers(2); +-- Value and condition nullable +SELECT anyIf(number, number = 3) AS a, toTypeName(a) FROM (SELECT CAST(number, 'Nullable(UInt8)') AS number FROM numbers(2)); +-- Condition nullable +SELECT anyIf(CAST(number, 'UInt8'), number = 3) AS a, toTypeName(a) FROM (SELECT CAST(number, 'Nullable(UInt8)') AS number FROM numbers(2)); diff --git a/parser/testdata/01455_optimize_trivial_insert_select/ast.json b/parser/testdata/01455_optimize_trivial_insert_select/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01455_optimize_trivial_insert_select/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01455_optimize_trivial_insert_select/metadata.json b/parser/testdata/01455_optimize_trivial_insert_select/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01455_optimize_trivial_insert_select/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01455_optimize_trivial_insert_select/query.sql b/parser/testdata/01455_optimize_trivial_insert_select/query.sql new file mode 100644 index 000000000..30f088a6c --- /dev/null +++ b/parser/testdata/01455_optimize_trivial_insert_select/query.sql @@ -0,0 +1,12 @@ +-- Tags: log-engine +SET max_insert_threads = 1, max_threads = 100, min_insert_block_size_rows = 1048576, max_block_size = 65536; +SET allow_deprecated_error_prone_window_functions = 1; +DROP TABLE IF EXISTS t; +CREATE TABLE t (x UInt64) ENGINE = StripeLog; +-- For trivial INSERT SELECT, max_threads is lowered to max_insert_threads and max_block_size is changed to min_insert_block_size_rows. +SET optimize_trivial_insert_select = 1; +INSERT INTO t SELECT * FROM numbers_mt(1000000); +SET max_threads = 1; +-- If data was inserted by more threads, we will probably see data out of order. +SELECT DISTINCT blockSize(), runningDifference(x) FROM t; +DROP TABLE t; diff --git a/parser/testdata/01455_rank_correlation_spearman/ast.json b/parser/testdata/01455_rank_correlation_spearman/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01455_rank_correlation_spearman/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01455_rank_correlation_spearman/metadata.json b/parser/testdata/01455_rank_correlation_spearman/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01455_rank_correlation_spearman/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01455_rank_correlation_spearman/query.sql b/parser/testdata/01455_rank_correlation_spearman/query.sql new file mode 100644 index 000000000..b792d709b --- /dev/null +++ b/parser/testdata/01455_rank_correlation_spearman/query.sql @@ -0,0 +1,31 @@ +-- Tags: no-parallel + +CREATE DATABASE IF NOT EXISTS db_01455_rank_correlation; +USE db_01455_rank_correlation; +DROP TABLE IF EXISTS moons; +DROP TABLE IF EXISTS circles; + +SELECT '1'; +SELECT rankCorr(number, number) FROM numbers(100); + +SELECT '-1'; +SELECT rankCorr(number, -1 * CAST(number AS Int64)) FROM numbers(100); + +SELECT '-0.037'; +SELECT roundBankers(rankCorr(exp(number), sin(number)), 3) FROM numbers(100); + +CREATE TABLE moons(a Float64, b Float64) Engine=Memory(); +INSERT INTO moons VALUES (1.230365,1.291454), (1.93851,0.6499), (1.574085,0.744109), (1.416457,1.41872), (1.90165,1.298199), (2.023844,1.142459), (1.828602,0.636404), (1.568649,1.157387), (1.968863,1.160039), (1.790198,0.860815), (1.238993,0.252486), (1.690338,0.573545), (1.678741,0.739649), (1.363346,0.514698), (1.924442,0.484331), (0.849071,0.585017), (1.859407,1.098124), (1.657176,1.314958), (1.085181,0.761741), (1.184481,0.639135), (1.59856,0.688384), (1.304818,1.212579), (1.913821,0.663551), (1.872619,0.510627), (1.29273,0.795267), (1.767669,0.892397), (1.790311,1.21813), (1.621893,1.229768), (1.525505,0.752643), (1.513535,1.016012), (1.120456,1.427238), (1.71505,0.716654), (1.394756,0.733629), (1.746027,1.422821), (1.5376,1.387397), (1.358968,0.575393), (1.941569,0.572639), (1.904995,0.966926), (1.967455,0.436449), (2.045535,0.582434), (1.365599,0.446582), (2.035874,0.468542), (1.419283,0.739308), (1.718267,0.895579), (1.285871,1.014628), (2.010657,1.631207), (1.78226,0.576882), (1.78274,0.727585), (1.454934,1.285701), (1.657208,0.581418); +SELECT '-0.108'; +SELECT roundBankers(rankCorr(a, b), 3) from moons; + +CREATE TABLE circles(a Float64, b Float64) Engine=Memory(); +INSERT INTO circles VALUES (1.20848,0.505643), (1.577706,1.726383), (1.945215,1.638926), (0.493616,0.792443), (0.827802,1.41133), (1.012179,1.654582), (1.815329,0.254426), (-0.068102,1.456476), (1.235432,1.565291), (1.269633,1.857153), (0.687433,1.24911), (0.131356,1.610389), (1.991372,0.204134), (1.678587,1.456911), (0.501133,0.68513), (0.924535,0.541514), (0.574115,0.340542), (-0.013384,1.17037), (0.917257,1.799431), (1.364786,0.396457), (1.931339,1.093935), (0.575076,0.427512), (2.084798,1.752707), (0.694029,0.257422), (-0.003821,0.160859), (0.037966,0.217695), (1.986527,1.249144), (1.864518,0.521483), (0.038928,0.175741), (1.855737,1.678827), (0.779503,0.963619), (0.035384,0.238397), (0.136108,0.128737), (0.0581,1.093712), (-0.012542,0.713137), (1.53441,0.447265), (0.198885,1.232961), (1.66781,0.259156), (1.478017,1.256315), (1.148358,1.659979), (0.340698,0.76793), (0.376184,0.578202), (0.251495,1.765917), (1.836389,1.75769), (1.573166,1.753943), (0.448309,0.965337), (1.704437,1.138451), (1.93234,1.723736), (1.412218,0.603027), (1.978789,0.938132); +SELECT '0.286'; +SELECT roundBankers(rankCorr(a, b), 3) from circles; + +DROP TABLE IF EXISTS moons; +DROP TABLE IF EXISTS circles; +DROP DATABASE IF EXISTS db_01455_rank_correlation; + + diff --git a/parser/testdata/01455_shard_leaf_max_rows_bytes_to_read/ast.json b/parser/testdata/01455_shard_leaf_max_rows_bytes_to_read/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01455_shard_leaf_max_rows_bytes_to_read/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01455_shard_leaf_max_rows_bytes_to_read/metadata.json b/parser/testdata/01455_shard_leaf_max_rows_bytes_to_read/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01455_shard_leaf_max_rows_bytes_to_read/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01455_shard_leaf_max_rows_bytes_to_read/query.sql b/parser/testdata/01455_shard_leaf_max_rows_bytes_to_read/query.sql new file mode 100644 index 000000000..ea07b545a --- /dev/null +++ b/parser/testdata/01455_shard_leaf_max_rows_bytes_to_read/query.sql @@ -0,0 +1,39 @@ +-- Tags: shard + +-- Leaf limits is unreliable w/ prefer_localhost_replica=1. +-- Since in this case initial query and the query on the local node (to the +-- underlying table) has the same counters, so if query on the remote node +-- will be finished before local, then local node will already have some rows +-- read, and leaf limit will fail. +SET prefer_localhost_replica=0; + +SELECT count() FROM (SELECT * FROM remote('127.0.0.1', system.numbers) LIMIT 100) SETTINGS max_rows_to_read_leaf=1; -- { serverError TOO_MANY_ROWS } +SELECT count() FROM (SELECT * FROM remote('127.0.0.1', system.numbers) LIMIT 100) SETTINGS max_bytes_to_read_leaf=1; -- { serverError TOO_MANY_BYTES } +SELECT count() FROM (SELECT * FROM remote('127.0.0.1', system.numbers) LIMIT 100) SETTINGS max_rows_to_read_leaf=100; +SELECT count() FROM (SELECT * FROM remote('127.0.0.1', system.numbers) LIMIT 100) SETTINGS max_bytes_to_read_leaf=1000; + +SELECT count() FROM (SELECT * FROM remote('127.0.0.2', system.numbers) LIMIT 100) SETTINGS max_rows_to_read_leaf=1; -- { serverError TOO_MANY_ROWS } +SELECT count() FROM (SELECT * FROM remote('127.0.0.2', system.numbers) LIMIT 100) SETTINGS max_bytes_to_read_leaf=1; -- { serverError TOO_MANY_BYTES } +SELECT count() FROM (SELECT * FROM remote('127.0.0.2', system.numbers) LIMIT 100) SETTINGS max_rows_to_read_leaf=100; +SELECT count() FROM (SELECT * FROM remote('127.0.0.2', system.numbers) LIMIT 100) SETTINGS max_bytes_to_read_leaf=1000; + +DROP TABLE IF EXISTS test_local; +DROP TABLE IF EXISTS test_distributed; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE test_local (date Date, value UInt32) ENGINE = MergeTree(date, date, 8192); +CREATE TABLE test_distributed AS test_local ENGINE = Distributed(test_cluster_two_shards, currentDatabase(), test_local, rand()); + +INSERT INTO test_local SELECT '2000-08-01', number as value from numbers(50000); + +SELECT count() FROM (SELECT * FROM test_distributed) SETTINGS max_rows_to_read_leaf = 40000; -- { serverError TOO_MANY_ROWS } +SELECT count() FROM (SELECT * FROM test_distributed) SETTINGS max_bytes_to_read_leaf = 40000; -- { serverError TOO_MANY_BYTES } + +SELECT count() FROM (SELECT * FROM test_distributed) SETTINGS max_rows_to_read = 60000; -- { serverError TOO_MANY_ROWS } +SELECT count() FROM (SELECT * FROM test_distributed) SETTINGS max_rows_to_read_leaf = 60000; + +SELECT count() FROM (SELECT * FROM test_distributed) SETTINGS max_bytes_to_read = 100000; -- { serverError TOO_MANY_BYTES } +SELECT count() FROM (SELECT * FROM test_distributed) SETTINGS max_bytes_to_read_leaf = 100000; + +DROP TABLE IF EXISTS test_local; +DROP TABLE IF EXISTS test_distributed; diff --git a/parser/testdata/01455_time_zones/ast.json b/parser/testdata/01455_time_zones/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01455_time_zones/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01455_time_zones/metadata.json b/parser/testdata/01455_time_zones/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01455_time_zones/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01455_time_zones/query.sql b/parser/testdata/01455_time_zones/query.sql new file mode 100644 index 000000000..67d19a446 --- /dev/null +++ b/parser/testdata/01455_time_zones/query.sql @@ -0,0 +1,10 @@ + +-- There are currently 594 timezones which are used from the binary embedded inside the ClickHouse binary +-- Refer: contrib/cctz-cmake/CMakeLists.txt for the complete list. The count may change but we expect there will be at least 500 timezones. +-- SELECT count(*) +-- FROM system.time_zones +-- +-- ┌─count()─┐ +-- │ 594 │ +-- └─────────┘ +SELECT if ((SELECT count(*) FROM system.time_zones) > 500, 'ok', 'fail'); diff --git a/parser/testdata/01456_ast_optimizations_over_distributed/ast.json b/parser/testdata/01456_ast_optimizations_over_distributed/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01456_ast_optimizations_over_distributed/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01456_ast_optimizations_over_distributed/metadata.json b/parser/testdata/01456_ast_optimizations_over_distributed/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01456_ast_optimizations_over_distributed/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01456_ast_optimizations_over_distributed/query.sql b/parser/testdata/01456_ast_optimizations_over_distributed/query.sql new file mode 100644 index 000000000..91044859c --- /dev/null +++ b/parser/testdata/01456_ast_optimizations_over_distributed/query.sql @@ -0,0 +1,27 @@ +-- Tags: distributed + +SET optimize_injective_functions_inside_uniq = 1; +SET optimize_arithmetic_operations_in_aggregate_functions = 1; +SET optimize_if_transform_strings_to_enum = 1; + +SELECT uniq(bitNot(number)) FROM numbers(1); +SELECT sum(number + 1) FROM numbers(1); +SELECT transform(number, [1, 2], ['google', 'censor.net'], 'other') FROM numbers(1); +SELECT number > 0 ? 'censor.net' : 'google' FROM numbers(1); + + +DROP TABLE IF EXISTS local_table; +DROP TABLE IF EXISTS dist; + +CREATE TABLE local_table (number UInt64) ENGINE = Memory; +CREATE TABLE dist AS local_table ENGINE = Distributed(test_cluster_two_shards_localhost, currentDatabase(), local_table); + +INSERT INTO local_table SELECT number FROM numbers(1); + +SELECT uniq(bitNot(number)) FROM dist; +SELECT sum(number + 1) FROM dist; +SELECT transform(number, [1, 2], ['google', 'censor.net'], 'other') FROM dist; +SELECT number > 0 ? 'censor.net' : 'google' FROM dist; + +DROP TABLE local_table; +DROP TABLE dist; diff --git a/parser/testdata/01456_low_cardinality_sorting_bugfix/ast.json b/parser/testdata/01456_low_cardinality_sorting_bugfix/ast.json new file mode 100644 index 000000000..fb94e327b --- /dev/null +++ b/parser/testdata/01456_low_cardinality_sorting_bugfix/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery order_test1 (children 1)" + }, + { + "explain": " Identifier order_test1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001125787, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/01456_low_cardinality_sorting_bugfix/metadata.json b/parser/testdata/01456_low_cardinality_sorting_bugfix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01456_low_cardinality_sorting_bugfix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01456_low_cardinality_sorting_bugfix/query.sql b/parser/testdata/01456_low_cardinality_sorting_bugfix/query.sql new file mode 100644 index 000000000..06412d4b0 --- /dev/null +++ b/parser/testdata/01456_low_cardinality_sorting_bugfix/query.sql @@ -0,0 +1,43 @@ +drop table if exists order_test1; + +create table order_test1 +( + timestamp DateTime64(3), + color LowCardinality(String) +) engine = MergeTree() ORDER BY tuple(); + +insert into order_test1 values ('2020-08-21 18:46:08.000','red')('2020-08-21 18:46:08.000','green'); +insert into order_test1 values ('2020-08-21 18:46:07.000','red')('2020-08-21 18:46:07.000','green'); +insert into order_test1 values ('2020-08-21 18:46:06.000','red')('2020-08-21 18:46:06.000','green'); + +SELECT color, toDateTime(timestamp) AS second +FROM order_test1 +GROUP BY color, second +ORDER BY color ASC, second DESC; + +select ''; +select ''; + +SELECT color, timestamp +FROM order_test1 +GROUP BY color, timestamp +ORDER BY color ASC, timestamp DESC; + +select ''; +select '------cast to String----'; +select ''; + +SELECT cast(color,'String') color, toDateTime(timestamp) AS second +FROM order_test1 +GROUP BY color, second +ORDER BY color ASC, second DESC; + +select ''; +select ''; + +SELECT cast(color,'String') color, timestamp +FROM order_test1 +GROUP BY color, timestamp +ORDER BY color ASC, timestamp DESC; + +DROP TABLE order_test1; diff --git a/parser/testdata/01456_min_negative_decimal_formatting/ast.json b/parser/testdata/01456_min_negative_decimal_formatting/ast.json new file mode 100644 index 000000000..02efd9661 --- /dev/null +++ b/parser/testdata/01456_min_negative_decimal_formatting/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDecimal64 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Int64_-9223372036854775808" + }, + { + "explain": " Literal UInt64_0" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001241073, + "rows_read": 8, + "bytes_read": 312 + } +} diff --git a/parser/testdata/01456_min_negative_decimal_formatting/metadata.json b/parser/testdata/01456_min_negative_decimal_formatting/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01456_min_negative_decimal_formatting/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01456_min_negative_decimal_formatting/query.sql b/parser/testdata/01456_min_negative_decimal_formatting/query.sql new file mode 100644 index 000000000..f785ec365 --- /dev/null +++ b/parser/testdata/01456_min_negative_decimal_formatting/query.sql @@ -0,0 +1 @@ +select toDecimal64(-9223372036854775808, 0); diff --git a/parser/testdata/01456_modify_column_type_via_add_drop_update/ast.json b/parser/testdata/01456_modify_column_type_via_add_drop_update/ast.json new file mode 100644 index 000000000..dd19bc524 --- /dev/null +++ b/parser/testdata/01456_modify_column_type_via_add_drop_update/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tbl (children 1)" + }, + { + "explain": " Identifier tbl" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001217854, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/01456_modify_column_type_via_add_drop_update/metadata.json b/parser/testdata/01456_modify_column_type_via_add_drop_update/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01456_modify_column_type_via_add_drop_update/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01456_modify_column_type_via_add_drop_update/query.sql b/parser/testdata/01456_modify_column_type_via_add_drop_update/query.sql new file mode 100644 index 000000000..a2e4804f1 --- /dev/null +++ b/parser/testdata/01456_modify_column_type_via_add_drop_update/query.sql @@ -0,0 +1,54 @@ +DROP TABLE IF EXISTS tbl; +CREATE TABLE tbl(a String, b UInt32, c Float64, d Int64, e UInt8) ENGINE=MergeTree ORDER BY tuple(); +INSERT INTO tbl SELECT number, number * 2, number * 3, number * 4, number * 5 FROM system.numbers LIMIT 10; + +SET mutations_sync = 1; + +-- Change the types of columns by adding a temporary column and updating and dropping. +-- Alters should be executed in sequential order. +ALTER TABLE tbl ADD COLUMN xi Int64; +ALTER TABLE tbl UPDATE xi = a WHERE 1; +ALTER TABLE tbl DROP COLUMN a; +ALTER TABLE tbl ADD COLUMN a Int64; +ALTER TABLE tbl UPDATE a = xi WHERE 1; +ALTER TABLE tbl DROP COLUMN xi; + +ALTER TABLE tbl ADD COLUMN xi String; +ALTER TABLE tbl UPDATE xi = b WHERE 1; +ALTER TABLE tbl DROP COLUMN b; +ALTER TABLE tbl ADD COLUMN b String; +ALTER TABLE tbl UPDATE b = xi WHERE 1; +ALTER TABLE tbl DROP COLUMN xi; + +ALTER TABLE tbl ADD COLUMN xi UInt8; +ALTER TABLE tbl UPDATE xi = c WHERE 1; +ALTER TABLE tbl DROP COLUMN c; +ALTER TABLE tbl ADD COLUMN c UInt8; +ALTER TABLE tbl UPDATE c = xi WHERE 1; +ALTER TABLE tbl DROP COLUMN xi; + +ALTER TABLE tbl ADD COLUMN xi Float64; +ALTER TABLE tbl UPDATE xi = d WHERE 1; +ALTER TABLE tbl DROP COLUMN d; +ALTER TABLE tbl ADD COLUMN d Float64; +ALTER TABLE tbl UPDATE d = xi WHERE 1; +ALTER TABLE tbl DROP COLUMN xi; + +ALTER TABLE tbl ADD COLUMN xi UInt32; +ALTER TABLE tbl UPDATE xi = e WHERE 1; +ALTER TABLE tbl DROP COLUMN e; +ALTER TABLE tbl ADD COLUMN e UInt32; +ALTER TABLE tbl UPDATE e = xi WHERE 1; +ALTER TABLE tbl DROP COLUMN xi; + +SELECT * FROM tbl FORMAT TabSeparatedWithNamesAndTypes; + +DROP TABLE tbl; + +-- Do the same thing again but with MODIFY COLUMN. +CREATE TABLE tbl(a String, b UInt32, c Float64, d Int64, e UInt8) ENGINE=MergeTree ORDER BY tuple(); +INSERT INTO tbl SELECT number, number * 2, number * 3, number * 4, number * 5 FROM system.numbers LIMIT 10; +ALTER TABLE tbl MODIFY COLUMN a Int64, MODIFY COLUMN b String, MODIFY COLUMN c UInt8, MODIFY COLUMN d Float64, MODIFY COLUMN e UInt32; +SELECT * FROM tbl FORMAT TabSeparatedWithNamesAndTypes; + +DROP TABLE tbl; diff --git a/parser/testdata/01457_compile_expressions_fuzzer/ast.json b/parser/testdata/01457_compile_expressions_fuzzer/ast.json new file mode 100644 index 000000000..dde38222c --- /dev/null +++ b/parser/testdata/01457_compile_expressions_fuzzer/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001404736, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01457_compile_expressions_fuzzer/metadata.json b/parser/testdata/01457_compile_expressions_fuzzer/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01457_compile_expressions_fuzzer/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01457_compile_expressions_fuzzer/query.sql b/parser/testdata/01457_compile_expressions_fuzzer/query.sql new file mode 100644 index 000000000..923ecf5d9 --- /dev/null +++ b/parser/testdata/01457_compile_expressions_fuzzer/query.sql @@ -0,0 +1,2 @@ +SET compile_expressions = 1; +SELECT GREATEST(2,0); diff --git a/parser/testdata/01457_create_as_table_function_structure/ast.json b/parser/testdata/01457_create_as_table_function_structure/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01457_create_as_table_function_structure/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01457_create_as_table_function_structure/metadata.json b/parser/testdata/01457_create_as_table_function_structure/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01457_create_as_table_function_structure/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01457_create_as_table_function_structure/query.sql b/parser/testdata/01457_create_as_table_function_structure/query.sql new file mode 100644 index 000000000..bc677698d --- /dev/null +++ b/parser/testdata/01457_create_as_table_function_structure/query.sql @@ -0,0 +1,42 @@ +-- Tags: no-parallel + +SET prefer_localhost_replica = 1; + +DROP DATABASE IF EXISTS test_01457; + +CREATE DATABASE test_01457; + +CREATE TABLE tmp (n Int8) ENGINE=Memory; + +CREATE TABLE test_01457.tf_remote AS remote('localhost', currentDatabase(), 'tmp'); +SHOW CREATE TABLE test_01457.tf_remote; +CREATE TABLE test_01457.tf_remote_explicit_structure (n UInt64) AS remote('localhost', currentDatabase(), 'tmp'); +SHOW CREATE TABLE test_01457.tf_remote_explicit_structure; +CREATE TABLE test_01457.tf_numbers (number String) AS numbers(1); +SHOW CREATE TABLE test_01457.tf_numbers; +CREATE TABLE test_01457.tf_merge AS merge(currentDatabase(), 'tmp'); +SHOW CREATE TABLE test_01457.tf_merge; + +DROP TABLE tmp; + +DETACH DATABASE test_01457; +ATTACH DATABASE test_01457; + +-- To suppress "Structure does not match (...), implicit conversion will be done." message +SET send_logs_level='error'; + +CREATE TABLE tmp (n Int8) ENGINE=Memory; +INSERT INTO test_01457.tf_remote_explicit_structure VALUES ('42'); +SELECT * FROM tmp; +TRUNCATE TABLE tmp; +INSERT INTO test_01457.tf_remote VALUES (0); + +SELECT (*,).1 AS c, toTypeName(c) FROM tmp; +SELECT (*,).1 AS c, toTypeName(c) FROM test_01457.tf_remote; +SELECT (*,).1 AS c, toTypeName(c) FROM test_01457.tf_remote_explicit_structure; +SELECT (*,).1 AS c, toTypeName(c) FROM test_01457.tf_numbers; +SELECT (*,).1 AS c, toTypeName(c) FROM test_01457.tf_merge; + +DROP DATABASE test_01457; + +DROP TABLE tmp; diff --git a/parser/testdata/01457_int256_hashing/ast.json b/parser/testdata/01457_int256_hashing/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01457_int256_hashing/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01457_int256_hashing/metadata.json b/parser/testdata/01457_int256_hashing/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01457_int256_hashing/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01457_int256_hashing/query.sql b/parser/testdata/01457_int256_hashing/query.sql new file mode 100644 index 000000000..510d25f6b --- /dev/null +++ b/parser/testdata/01457_int256_hashing/query.sql @@ -0,0 +1,43 @@ +-- Tags: no-fasttest + +SET joined_subquery_requires_alias = 0; + +SELECT toUInt256(123) IN (NULL); +SELECT toUInt256(123) AS k GROUP BY k; +SELECT k FROM (SELECT toUInt256(123) AS k FROM system.one) INNER JOIN (SELECT toUInt256(123) AS k) t USING k; +SELECT arrayEnumerateUniq([toUInt256(123), toUInt256(456), toUInt256(123)]); + +SELECT toInt256(123) IN (NULL); +SELECT toInt256(123) AS k GROUP BY k; +SELECT k FROM (SELECT toInt256(123) AS k FROM system.one) INNER JOIN (SELECT toInt256(123) AS k) t USING k; +SELECT arrayEnumerateUniq([toInt256(123), toInt256(456), toInt256(123)]); + +-- SELECT toUInt128(123) IN (NULL); +-- SELECT toUInt128(123) AS k GROUP BY k; +-- SELECT toUInt128(123) AS k FROM system.one INNER JOIN (SELECT toUInt128(123) AS k) t USING k; +-- SELECT arrayEnumerateUniq([toUInt128(123), toUInt128(456), toUInt128(123)]); + +SELECT toInt128(123) IN (NULL); +SELECT toInt128(123) AS k GROUP BY k; +SELECT k FROM (SELECT toInt128(123) AS k FROM system.one) INNER JOIN (SELECT toInt128(123) AS k) t USING k; +SELECT arrayEnumerateUniq([toInt128(123), toInt128(456), toInt128(123)]); + +SELECT toNullable(toUInt256(321)) IN (NULL); +SELECT toNullable(toUInt256(321)) AS k GROUP BY k; +SELECT k FROM (SELECT toNullable(toUInt256(321)) AS k FROM system.one) INNER JOIN (SELECT toUInt256(321) AS k) t USING k; +SELECT arrayEnumerateUniq([toNullable(toUInt256(321)), toNullable(toUInt256(456)), toNullable(toUInt256(321))]); + +SELECT toNullable(toInt256(321)) IN (NULL); +SELECT toNullable(toInt256(321)) AS k GROUP BY k; +SELECT k FROM (SELECT toNullable(toInt256(321)) AS k FROM system.one) INNER JOIN (SELECT toInt256(321) AS k) t USING k; +SELECT arrayEnumerateUniq([toNullable(toInt256(321)), toNullable(toInt256(456)), toNullable(toInt256(321))]); + +-- SELECT toNullable(toUInt128(321)) IN (NULL); +-- SELECT toNullable(toUInt128(321)) AS k GROUP BY k; +-- SELECT toNullable(toUInt128(321)) AS k FROM system.one INNER JOIN (SELECT toUInt128(321) AS k) t USING k; +-- SELECT arrayEnumerateUniq([toNullable(toUInt128(321)), toNullable(toUInt128(456)), toNullable(toUInt128(321))]); + +SELECT toNullable(toInt128(321)) IN (NULL); +SELECT toNullable(toInt128(321)) AS k GROUP BY k; +SELECT k FROM (SELECT toNullable(toInt128(321)) AS k FROM system.one) INNER JOIN (SELECT toInt128(321) AS k) t USING k; +SELECT arrayEnumerateUniq([toNullable(toInt128(321)), toNullable(toInt128(456)), toNullable(toInt128(321))]); diff --git a/parser/testdata/01457_min_index_granularity_bytes_setting/ast.json b/parser/testdata/01457_min_index_granularity_bytes_setting/ast.json new file mode 100644 index 000000000..cedd60229 --- /dev/null +++ b/parser/testdata/01457_min_index_granularity_bytes_setting/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery invalid_min_index_granularity_bytes_setting (children 1)" + }, + { + "explain": " Identifier invalid_min_index_granularity_bytes_setting" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001899778, + "rows_read": 2, + "bytes_read": 138 + } +} diff --git a/parser/testdata/01457_min_index_granularity_bytes_setting/metadata.json b/parser/testdata/01457_min_index_granularity_bytes_setting/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01457_min_index_granularity_bytes_setting/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01457_min_index_granularity_bytes_setting/query.sql b/parser/testdata/01457_min_index_granularity_bytes_setting/query.sql new file mode 100644 index 000000000..4f5fcccd1 --- /dev/null +++ b/parser/testdata/01457_min_index_granularity_bytes_setting/query.sql @@ -0,0 +1,23 @@ +DROP TABLE IF EXISTS invalid_min_index_granularity_bytes_setting; + +CREATE TABLE invalid_min_index_granularity_bytes_setting +( + id UInt64, + value String +) ENGINE MergeTree() +ORDER BY id SETTINGS index_granularity_bytes = 1, min_index_granularity_bytes = 1024; -- { serverError BAD_ARGUMENTS } + +DROP TABLE IF EXISTS valid_min_index_granularity_bytes_setting; + +CREATE TABLE valid_min_index_granularity_bytes_setting +( + id UInt64, + value String +) ENGINE MergeTree() +ORDER BY id SETTINGS index_granularity_bytes = 2024, min_index_granularity_bytes = 1024; + +INSERT INTO valid_min_index_granularity_bytes_setting SELECT number, concat('xxxxxxxxxx', toString(number)) FROM numbers(1000,1000); + +SELECT COUNT(*) from valid_min_index_granularity_bytes_setting WHERE value = 'xxxxxxxxxx1015'; + +DROP TABLE IF EXISTS valid_min_index_granularity_bytes_setting; diff --git a/parser/testdata/01457_order_by_limit/ast.json b/parser/testdata/01457_order_by_limit/ast.json new file mode 100644 index 000000000..a3eca75e3 --- /dev/null +++ b/parser/testdata/01457_order_by_limit/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery order_by_another (children 1)" + }, + { + "explain": " Identifier order_by_another" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001602259, + "rows_read": 2, + "bytes_read": 84 + } +} diff --git a/parser/testdata/01457_order_by_limit/metadata.json b/parser/testdata/01457_order_by_limit/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01457_order_by_limit/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01457_order_by_limit/query.sql b/parser/testdata/01457_order_by_limit/query.sql new file mode 100644 index 000000000..514aaeac4 --- /dev/null +++ b/parser/testdata/01457_order_by_limit/query.sql @@ -0,0 +1,30 @@ +drop table if exists order_by_another; + +create table order_by_another (a Nullable(UInt64), b UInt64) Engine = MergeTree order by tuple(); +insert into order_by_another values (1, 8), (1, 7), (1, 6), (1, 5), (1, 4), (1, 3), (1, 2), (1, 1); + +select 'asc nulls last, asc'; +select a, b from order_by_another order by a asc nulls last, b asc limit 4; + +select 'asc nulls first, asc'; +select a, b from order_by_another order by a asc nulls first, b asc limit 4; + +select 'desc nulls last, asc'; +select a, b from order_by_another order by a desc nulls last, b asc limit 4; + +select 'desc nulls first, asc'; +select a, b from order_by_another order by a desc nulls first, b asc limit 4; + +select 'asc nulls last, desc'; +select a, b from order_by_another order by a asc nulls last, b desc limit 4; + +select 'asc nulls first, desc'; +select a, b from order_by_another order by a asc nulls first, b desc limit 4; + +select 'desc nulls last, desc'; +select a, b from order_by_another order by a desc nulls last, b desc limit 4; + +select 'desc nulls first, desc'; +select a, b from order_by_another order by a desc nulls first, b desc limit 4; + +drop table if exists order_by_another; \ No newline at end of file diff --git a/parser/testdata/01457_order_by_nulls_first/ast.json b/parser/testdata/01457_order_by_nulls_first/ast.json new file mode 100644 index 000000000..698adef49 --- /dev/null +++ b/parser/testdata/01457_order_by_nulls_first/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery order_by_nulls_first (children 1)" + }, + { + "explain": " Identifier order_by_nulls_first" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001685377, + "rows_read": 2, + "bytes_read": 92 + } +} diff --git a/parser/testdata/01457_order_by_nulls_first/metadata.json b/parser/testdata/01457_order_by_nulls_first/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01457_order_by_nulls_first/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01457_order_by_nulls_first/query.sql b/parser/testdata/01457_order_by_nulls_first/query.sql new file mode 100644 index 000000000..100c87fbe --- /dev/null +++ b/parser/testdata/01457_order_by_nulls_first/query.sql @@ -0,0 +1,96 @@ +drop table if exists order_by_nulls_first; + +CREATE TABLE order_by_nulls_first +(diff Nullable(Int16), traf UInt64) +ENGINE = MergeTree ORDER BY tuple(); + +insert into order_by_nulls_first values (NULL,1),(NULL,0),(NULL,0),(NULL,0),(NULL,0),(NULL,0),(28,0),(0,0); + +SELECT + diff, + traf +FROM order_by_nulls_first +order by diff desc NULLS FIRST, traf +limit 1, 4; + +select '--- DESC NULLS FIRST, ASC'; + +SELECT + diff, + traf +FROM order_by_nulls_first +ORDER BY + diff DESC NULLS FIRST, + traf ASC; + +select '--- DESC NULLS LAST, ASC'; + +SELECT + diff, + traf +FROM order_by_nulls_first +ORDER BY + diff DESC NULLS LAST, + traf ASC; + +select '--- ASC NULLS FIRST, ASC'; + +SELECT + diff, + traf +FROM order_by_nulls_first +ORDER BY + diff ASC NULLS FIRST, + traf ASC; + +select '--- ASC NULLS LAST, ASC'; + +SELECT + diff, + traf +FROM order_by_nulls_first +ORDER BY + diff ASC NULLS LAST, + traf ASC; + +select '--- DESC NULLS FIRST, DESC'; + +SELECT + diff, + traf +FROM order_by_nulls_first +ORDER BY + diff DESC NULLS FIRST, + traf DESC; + +select '--- DESC NULLS LAST, DESC'; + +SELECT + diff, + traf +FROM order_by_nulls_first +ORDER BY + diff DESC NULLS LAST, + traf DESC; + +select '--- ASC NULLS FIRST, DESC'; + +SELECT + diff, + traf +FROM order_by_nulls_first +ORDER BY + diff ASC NULLS FIRST, + traf DESC; + +select '--- ASC NULLS LAST, DESC'; + +SELECT + diff, + traf +FROM order_by_nulls_first +ORDER BY + diff ASC NULLS LAST, + traf DESC; + +drop table if exists order_by_nulls_first; \ No newline at end of file diff --git a/parser/testdata/01458_count_digits/ast.json b/parser/testdata/01458_count_digits/ast.json new file mode 100644 index 000000000..42398949f --- /dev/null +++ b/parser/testdata/01458_count_digits/ast.json @@ -0,0 +1,88 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function countDigits (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDecimal32 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Function countDigits (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDecimal32 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_42" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Function countDigits (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDecimal32 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Float64_4.2" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 22, + + "statistics": + { + "elapsed": 0.001291395, + "rows_read": 22, + "bytes_read": 867 + } +} diff --git a/parser/testdata/01458_count_digits/metadata.json b/parser/testdata/01458_count_digits/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01458_count_digits/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01458_count_digits/query.sql b/parser/testdata/01458_count_digits/query.sql new file mode 100644 index 000000000..4a95af38d --- /dev/null +++ b/parser/testdata/01458_count_digits/query.sql @@ -0,0 +1,30 @@ +SELECT countDigits(toDecimal32(0, 0)), countDigits(toDecimal32(42, 0)), countDigits(toDecimal32(4.2, 1)), + countDigits(toDecimal64(0, 0)), countDigits(toDecimal64(42, 0)), countDigits(toDecimal64(4.2, 2)), + countDigits(toDecimal128(0, 0)), countDigits(toDecimal128(42, 0)), countDigits(toDecimal128(4.2, 3)); + +SELECT countDigits(materialize(toDecimal32(4.2, 1))), + countDigits(materialize(toDecimal64(4.2, 2))), + countDigits(materialize(toDecimal128(4.2, 3))); + +SELECT countDigits(toDecimal32(1, 9)), countDigits(toDecimal32(-1, 9)), + countDigits(toDecimal64(1, 18)), countDigits(toDecimal64(-1, 18)), + countDigits(toDecimal128(1, 38)), countDigits(toDecimal128(-1, 38)); + +SELECT countDigits(toInt8(42)), countDigits(toInt8(-42)), countDigits(toUInt8(42)), + countDigits(toInt16(42)), countDigits(toInt16(-42)), countDigits(toUInt16(42)), + countDigits(toInt32(42)), countDigits(toInt32(-42)), countDigits(toUInt32(42)), + countDigits(toInt64(42)), countDigits(toInt64(-42)), countDigits(toUInt64(42)); + +SELECT countDigits(toInt8(0)), countDigits(toInt8(0)), countDigits(toUInt8(0)), + countDigits(toInt16(0)), countDigits(toInt16(0)), countDigits(toUInt16(0)), + countDigits(toInt32(0)), countDigits(toInt32(0)), countDigits(toUInt32(0)), + countDigits(toInt64(0)), countDigits(toInt64(0)), countDigits(toUInt64(0)); + +SELECT countDigits(toInt8(127)), countDigits(toInt8(-128)), countDigits(toUInt8(255)), + countDigits(toInt16(32767)), countDigits(toInt16(-32768)), countDigits(toUInt16(65535)), + countDigits(toInt32(2147483647)), countDigits(toInt32(-2147483648)), countDigits(toUInt32(4294967295)), + countDigits(toInt64(9223372036854775807)), countDigits(toInt64(-9223372036854775808)), countDigits(toUInt64(18446744073709551615)); + +SELECT countDigits(toNullable(toDecimal32(4.2, 1))), countDigits(materialize(toNullable(toDecimal32(4.2, 2)))), + countDigits(toNullable(toDecimal64(4.2, 3))), countDigits(materialize(toNullable(toDecimal64(4.2, 4)))), + countDigits(toNullable(toDecimal128(4.2, 5))), countDigits(materialize(toNullable(toDecimal128(4.2, 6)))); diff --git a/parser/testdata/01458_is_decimal_overflow/ast.json b/parser/testdata/01458_is_decimal_overflow/ast.json new file mode 100644 index 000000000..a0bcb9f4a --- /dev/null +++ b/parser/testdata/01458_is_decimal_overflow/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function isDecimalOverflow (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDecimal32 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal UInt64_0" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001337575, + "rows_read": 11, + "bytes_read": 423 + } +} diff --git a/parser/testdata/01458_is_decimal_overflow/metadata.json b/parser/testdata/01458_is_decimal_overflow/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01458_is_decimal_overflow/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01458_is_decimal_overflow/query.sql b/parser/testdata/01458_is_decimal_overflow/query.sql new file mode 100644 index 000000000..abf96e714 --- /dev/null +++ b/parser/testdata/01458_is_decimal_overflow/query.sql @@ -0,0 +1,100 @@ +SELECT isDecimalOverflow(toDecimal32(0, 0), 0), + isDecimalOverflow(toDecimal64(0, 0), 0), + isDecimalOverflow(toDecimal128(0, 0), 0); + +SELECT isDecimalOverflow(toDecimal32(1000000000, 0), 9), + isDecimalOverflow(toDecimal32(1000000000, 0)), + isDecimalOverflow(toDecimal32(-1000000000, 0), 9), + isDecimalOverflow(toDecimal32(-1000000000, 0)); +SELECT isDecimalOverflow(toDecimal32(999999999, 0), 9), + isDecimalOverflow(toDecimal32(999999999, 0)), + isDecimalOverflow(toDecimal32(-999999999, 0), 9), + isDecimalOverflow(toDecimal32(-999999999, 0)); +SELECT isDecimalOverflow(toDecimal32(999999999, 0), 8), + isDecimalOverflow(toDecimal32(10, 0), 1), + isDecimalOverflow(toDecimal32(1, 0), 0), + isDecimalOverflow(toDecimal32(-999999999, 0), 8), + isDecimalOverflow(toDecimal32(-10, 0), 1), + isDecimalOverflow(toDecimal32(-1, 0), 0); + +SELECT isDecimalOverflow(materialize(toDecimal32(1000000000, 0)), 9), + isDecimalOverflow(materialize(toDecimal32(1000000000, 0))), + isDecimalOverflow(materialize(toDecimal32(-1000000000, 0)), 9), + isDecimalOverflow(materialize(toDecimal32(-1000000000, 0))); +SELECT isDecimalOverflow(materialize(toDecimal32(999999999, 0)), 9), + isDecimalOverflow(materialize(toDecimal32(999999999, 0))), + isDecimalOverflow(materialize(toDecimal32(-999999999, 0)), 9), + isDecimalOverflow(materialize(toDecimal32(-999999999, 0))); +SELECT isDecimalOverflow(materialize(toDecimal32(999999999, 0)), 8), + isDecimalOverflow(materialize(toDecimal32(10, 0)), 1), + isDecimalOverflow(materialize(toDecimal32(1, 0)), 0), + isDecimalOverflow(materialize(toDecimal32(-999999999, 0)), 8), + isDecimalOverflow(materialize(toDecimal32(-10, 0)), 1), + isDecimalOverflow(materialize(toDecimal32(-1, 0)), 0); + +SELECT isDecimalOverflow(toDecimal64(1000000000000000000, 0), 18), + isDecimalOverflow(toDecimal64(1000000000000000000, 0)), + isDecimalOverflow(toDecimal64(-1000000000000000000, 0), 18), + isDecimalOverflow(toDecimal64(-1000000000000000000, 0)); +SELECT isDecimalOverflow(toDecimal64(999999999999999999, 0), 18), + isDecimalOverflow(toDecimal64(999999999999999999, 0)), + isDecimalOverflow(toDecimal64(-999999999999999999, 0), 18), + isDecimalOverflow(toDecimal64(-999999999999999999, 0)); +SELECT isDecimalOverflow(toDecimal64(999999999999999999, 0), 17), + isDecimalOverflow(toDecimal64(10, 0), 1), + isDecimalOverflow(toDecimal64(1, 0), 0), + isDecimalOverflow(toDecimal64(-999999999999999999, 0), 17), + isDecimalOverflow(toDecimal64(-10, 0), 1), + isDecimalOverflow(toDecimal64(-1, 0), 0); + +SELECT isDecimalOverflow(materialize(toDecimal64(1000000000000000000, 0)), 18), + isDecimalOverflow(materialize(toDecimal64(1000000000000000000, 0))), + isDecimalOverflow(materialize(toDecimal64(-1000000000000000000, 0)), 18), + isDecimalOverflow(materialize(toDecimal64(-1000000000000000000, 0))); +SELECT isDecimalOverflow(materialize(toDecimal64(999999999999999999, 0)), 18), + isDecimalOverflow(materialize(toDecimal64(999999999999999999, 0))), + isDecimalOverflow(materialize(toDecimal64(-999999999999999999, 0)), 18), + isDecimalOverflow(materialize(toDecimal64(-999999999999999999, 0))); +SELECT isDecimalOverflow(materialize(toDecimal64(999999999999999999, 0)), 17), + isDecimalOverflow(materialize(toDecimal64(10, 0)), 1), + isDecimalOverflow(materialize(toDecimal64(1, 0)), 0), + isDecimalOverflow(materialize(toDecimal64(-999999999999999999, 0)), 17), + isDecimalOverflow(materialize(toDecimal64(-10, 0)), 1), + isDecimalOverflow(materialize(toDecimal64(-1, 0)), 0); + +SELECT isDecimalOverflow(toDecimal128('99999999999999999999999999999999999999', 0) + 1, 38), + isDecimalOverflow(toDecimal128('99999999999999999999999999999999999999', 0) + 1), + isDecimalOverflow(toDecimal128('-99999999999999999999999999999999999999', 0) - 1, 38), + isDecimalOverflow(toDecimal128('-99999999999999999999999999999999999999', 0) - 1); +SELECT isDecimalOverflow(toDecimal128('99999999999999999999999999999999999999', 0), 38), + isDecimalOverflow(toDecimal128('99999999999999999999999999999999999999', 0)), + isDecimalOverflow(toDecimal128('-99999999999999999999999999999999999999', 0), 38), + isDecimalOverflow(toDecimal128('-99999999999999999999999999999999999999', 0)); +SELECT isDecimalOverflow(toDecimal128('99999999999999999999999999999999999999', 0), 37), + isDecimalOverflow(toDecimal128('10', 0), 1), + isDecimalOverflow(toDecimal128('1', 0), 0), + isDecimalOverflow(toDecimal128('-99999999999999999999999999999999999999', 0), 37), + isDecimalOverflow(toDecimal128('-10', 0), 1), + isDecimalOverflow(toDecimal128('-1', 0), 0); + +SELECT isDecimalOverflow(materialize(toDecimal128('99999999999999999999999999999999999999', 0)) + 1, 38), + isDecimalOverflow(materialize(toDecimal128('99999999999999999999999999999999999999', 0)) + 1), + isDecimalOverflow(materialize(toDecimal128('-99999999999999999999999999999999999999', 0)) - 1, 38), + isDecimalOverflow(materialize(toDecimal128('-99999999999999999999999999999999999999', 0)) - 1); +SELECT isDecimalOverflow(materialize(toDecimal128('99999999999999999999999999999999999999', 0)), 38), + isDecimalOverflow(materialize(toDecimal128('99999999999999999999999999999999999999', 0))), + isDecimalOverflow(materialize(toDecimal128('-99999999999999999999999999999999999999', 0)), 38), + isDecimalOverflow(materialize(toDecimal128('-99999999999999999999999999999999999999', 0))); +SELECT isDecimalOverflow(materialize(toDecimal128('99999999999999999999999999999999999999', 0)), 37), + isDecimalOverflow(materialize(toDecimal128('10', 0)), 1), + isDecimalOverflow(materialize(toDecimal128('1', 0)), 0), + isDecimalOverflow(materialize(toDecimal128('-99999999999999999999999999999999999999', 0)), 37), + isDecimalOverflow(materialize(toDecimal128('-10', 0)), 1), + isDecimalOverflow(materialize(toDecimal128('-1', 0)), 0); + +SELECT isDecimalOverflow(toNullable(toDecimal32(42, 0)), 1), + isDecimalOverflow(materialize(toNullable(toDecimal32(42, 0))), 2), + isDecimalOverflow(toNullable(toDecimal64(42, 0)), 1), + isDecimalOverflow(materialize(toNullable(toDecimal64(42, 0))), 2), + isDecimalOverflow(toNullable(toDecimal128(42, 0)), 1), + isDecimalOverflow(materialize(toNullable(toDecimal128(42, 0))), 2); diff --git a/parser/testdata/01458_named_tuple_millin/ast.json b/parser/testdata/01458_named_tuple_millin/ast.json new file mode 100644 index 000000000..8ded8804d --- /dev/null +++ b/parser/testdata/01458_named_tuple_millin/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tuple (children 1)" + }, + { + "explain": " Identifier tuple" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001057839, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/01458_named_tuple_millin/metadata.json b/parser/testdata/01458_named_tuple_millin/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01458_named_tuple_millin/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01458_named_tuple_millin/query.sql b/parser/testdata/01458_named_tuple_millin/query.sql new file mode 100644 index 000000000..05e7cf899 --- /dev/null +++ b/parser/testdata/01458_named_tuple_millin/query.sql @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS tuple; + +CREATE TABLE tuple +( + `j` Tuple(a Int8, b String) +) +ENGINE = Memory; + +SHOW CREATE TABLE tuple; +DESC tuple; +DROP TABLE tuple; + +CREATE TABLE tuple ENGINE = Memory AS SELECT CAST((1, 'Test'), 'Tuple(a Int8, b String)') AS j; + +SHOW CREATE TABLE tuple; +DESC tuple; +DROP TABLE tuple; diff --git a/parser/testdata/01459_decimal_casts/ast.json b/parser/testdata/01459_decimal_casts/ast.json new file mode 100644 index 000000000..e0d4c0108 --- /dev/null +++ b/parser/testdata/01459_decimal_casts/ast.json @@ -0,0 +1,109 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 5)" + }, + { + "explain": " Function toUInt32 (alias y) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function toDecimal32 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier y" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function toDecimal64 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier y" + }, + { + "explain": " Literal UInt64_5" + }, + { + "explain": " Function toDecimal128 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier y" + }, + { + "explain": " Literal UInt64_6" + }, + { + "explain": " Function toDecimal256 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier y" + }, + { + "explain": " Literal UInt64_7" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 29, + + "statistics": + { + "elapsed": 0.001286131, + "rows_read": 29, + "bytes_read": 1090 + } +} diff --git a/parser/testdata/01459_decimal_casts/metadata.json b/parser/testdata/01459_decimal_casts/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01459_decimal_casts/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01459_decimal_casts/query.sql b/parser/testdata/01459_decimal_casts/query.sql new file mode 100644 index 000000000..f262ef201 --- /dev/null +++ b/parser/testdata/01459_decimal_casts/query.sql @@ -0,0 +1,16 @@ +SELECT toUInt32(number) y, toDecimal32(y, 1), toDecimal64(y, 5), toDecimal128(y, 6), toDecimal256(y, 7) FROM numbers(1); +SELECT toInt32(number) y, toDecimal32(y, 1), toDecimal64(y, 5), toDecimal128(y, 6), toDecimal256(y, 7) FROM numbers(1, 1); +SELECT toInt64(number) y, toDecimal32(y, 1), toDecimal64(y, 5), toDecimal128(y, 6), toDecimal256(y, 7) FROM numbers(2, 1); +SELECT toUInt64(number) y, toDecimal32(y, 1), toDecimal64(y, 5), toDecimal128(y, 6), toDecimal256(y, 7) FROM numbers(3, 1); +SELECT toInt128(number) y, toDecimal32(y, 1), toDecimal64(y, 5), toDecimal128(y, 6), toDecimal256(y, 7) FROM numbers(4, 1); +SELECT toInt256(number) y, toDecimal32(y, 1), toDecimal64(y, 5), toDecimal128(y, 6), toDecimal256(y, 7) FROM numbers(5, 1); +SELECT toUInt256(number) y, toDecimal32(y, 1), toDecimal64(y, 5), toDecimal128(y, 6), toDecimal256(y, 7) FROM numbers(6, 1); +SELECT toFloat32(number) y, toDecimal32(y, 1), toDecimal64(y, 5), toDecimal128(y, 6), toDecimal256(y, 7) FROM numbers(7, 1); +SELECT toFloat64(number) y, toDecimal32(y, 1), toDecimal64(y, 5), toDecimal128(y, 6), toDecimal256(y, 7) FROM numbers(8, 1); + +SELECT toInt32(toDecimal32(number, 1)), toInt64(toDecimal32(number, 1)), toInt128(toDecimal32(number, 1)) FROM numbers(9, 1); +SELECT toInt32(toDecimal64(number, 2)), toInt64(toDecimal64(number, 2)), toInt128(toDecimal64(number, 2)) FROM numbers(10, 1); +SELECT toInt32(toDecimal128(number, 3)), toInt64(toDecimal128(number, 3)), toInt128(toDecimal128(number, 3)) FROM numbers(11, 1); +SELECT toFloat32(toDecimal32(number, 1)), toFloat32(toDecimal64(number, 2)), toFloat32(toDecimal128(number, 3)) FROM numbers(12, 1); +SELECT toFloat64(toDecimal32(number, 1)), toFloat64(toDecimal64(number, 2)), toFloat64(toDecimal128(number, 3)) FROM numbers(13, 1); +SELECT toInt256(toDecimal32(number, 1)), toInt256(toDecimal64(number, 2)), toInt256(toDecimal128(number, 3)) FROM numbers(14, 1); diff --git a/parser/testdata/01459_default_value_of_argument_type_nullptr_dereference/ast.json b/parser/testdata/01459_default_value_of_argument_type_nullptr_dereference/ast.json new file mode 100644 index 000000000..4b1344de7 --- /dev/null +++ b/parser/testdata/01459_default_value_of_argument_type_nullptr_dereference/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function defaultValueOfTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function FQDN (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.00113181, + "rows_read": 8, + "bytes_read": 314 + } +} diff --git a/parser/testdata/01459_default_value_of_argument_type_nullptr_dereference/metadata.json b/parser/testdata/01459_default_value_of_argument_type_nullptr_dereference/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01459_default_value_of_argument_type_nullptr_dereference/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01459_default_value_of_argument_type_nullptr_dereference/query.sql b/parser/testdata/01459_default_value_of_argument_type_nullptr_dereference/query.sql new file mode 100644 index 000000000..ab5c3e4a3 --- /dev/null +++ b/parser/testdata/01459_default_value_of_argument_type_nullptr_dereference/query.sql @@ -0,0 +1 @@ +SELECT defaultValueOfTypeName(FQDN()); -- { serverError ILLEGAL_COLUMN } diff --git a/parser/testdata/01460_allow_dollar_and_number_in_identifier/ast.json b/parser/testdata/01460_allow_dollar_and_number_in_identifier/ast.json new file mode 100644 index 000000000..29883e92f --- /dev/null +++ b/parser/testdata/01460_allow_dollar_and_number_in_identifier/ast.json @@ -0,0 +1,40 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1 (alias $alias$name$)" + }, + { + "explain": " Identifier TSVWithNames" + } + ], + + "rows": 6, + + "statistics": + { + "elapsed": 0.001468206, + "rows_read": 6, + "bytes_read": 230 + } +} diff --git a/parser/testdata/01460_allow_dollar_and_number_in_identifier/metadata.json b/parser/testdata/01460_allow_dollar_and_number_in_identifier/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01460_allow_dollar_and_number_in_identifier/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01460_allow_dollar_and_number_in_identifier/query.sql b/parser/testdata/01460_allow_dollar_and_number_in_identifier/query.sql new file mode 100644 index 000000000..5a3aa9968 --- /dev/null +++ b/parser/testdata/01460_allow_dollar_and_number_in_identifier/query.sql @@ -0,0 +1,2 @@ +SELECT 1 AS $alias$name$ FORMAT TSVWithNames; +SELECT 1 AS 1alias1name1 FORMAT TSVWithNames; diff --git a/parser/testdata/01460_mark_inclusion_search_crash/ast.json b/parser/testdata/01460_mark_inclusion_search_crash/ast.json new file mode 100644 index 000000000..2586c77b2 --- /dev/null +++ b/parser/testdata/01460_mark_inclusion_search_crash/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery pk (children 1)" + }, + { + "explain": " Identifier pk" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001460094, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/01460_mark_inclusion_search_crash/metadata.json b/parser/testdata/01460_mark_inclusion_search_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01460_mark_inclusion_search_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01460_mark_inclusion_search_crash/query.sql b/parser/testdata/01460_mark_inclusion_search_crash/query.sql new file mode 100644 index 000000000..a6dba8d0c --- /dev/null +++ b/parser/testdata/01460_mark_inclusion_search_crash/query.sql @@ -0,0 +1,15 @@ +DROP TABLE IF EXISTS pk; + +CREATE TABLE pk (d Date DEFAULT '2000-01-01', x DateTime, y UInt64, z UInt64) ENGINE = MergeTree() PARTITION BY d ORDER BY (toStartOfMinute(x), y, z) SETTINGS index_granularity_bytes=19, min_index_granularity_bytes=1, write_final_mark = 0; -- one row granule + +INSERT INTO pk (x, y, z) VALUES (1, 11, 1235), (2, 11, 4395), (3, 22, 3545), (4, 22, 6984), (5, 33, 4596), (61, 11, 4563), (62, 11, 4578), (63, 11, 3572), (64, 22, 5786), (65, 22, 5786), (66, 22, 2791), (67, 22, 2791), (121, 33, 2791), (122, 33, 2791), (123, 33, 1235), (124, 44, 4935), (125, 44, 4578), (126, 55, 5786), (127, 55, 2791), (128, 55, 1235); + +SET max_block_size = 1; +SET max_rows_to_read = 5; + +-- Prevent remote replicas from skipping index analysis in Parallel Replicas. Otherwise, they may return full ranges and trigger max_rows_to_read validation failures. +SET parallel_replicas_index_analysis_only_on_coordinator = 0; + +SELECT toUInt32(x), y, z FROM pk WHERE (x >= toDateTime(100000)) AND (x <= toDateTime(90000)); + +DROP TABLE IF EXISTS pk; diff --git a/parser/testdata/01461_alter_table_function/ast.json b/parser/testdata/01461_alter_table_function/ast.json new file mode 100644 index 000000000..90befe643 --- /dev/null +++ b/parser/testdata/01461_alter_table_function/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery table_from_remote (children 1)" + }, + { + "explain": " Identifier table_from_remote" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.0010409, + "rows_read": 2, + "bytes_read": 86 + } +} diff --git a/parser/testdata/01461_alter_table_function/metadata.json b/parser/testdata/01461_alter_table_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01461_alter_table_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01461_alter_table_function/query.sql b/parser/testdata/01461_alter_table_function/query.sql new file mode 100644 index 000000000..95f488c37 --- /dev/null +++ b/parser/testdata/01461_alter_table_function/query.sql @@ -0,0 +1,31 @@ +DROP TABLE IF EXISTS table_from_remote; +DROP TABLE IF EXISTS table_from_select; +DROP TABLE IF EXISTS table_from_numbers; + +CREATE TABLE table_from_remote AS remote('localhost', 'system', 'numbers'); + +SHOW CREATE TABLE table_from_remote; + +ALTER TABLE table_from_remote ADD COLUMN col UInt8; + +SHOW CREATE TABLE table_from_remote; + +CREATE TABLE table_from_numbers AS numbers(1000); + +SHOW CREATE TABLE table_from_numbers; + +ALTER TABLE table_from_numbers ADD COLUMN col UInt8; --{serverError NOT_IMPLEMENTED} + +SHOW CREATE TABLE table_from_numbers; + +CREATE TABLE table_from_select ENGINE = MergeTree() ORDER BY tuple() AS SELECT number from system.numbers LIMIT 1; + +SHOW CREATE TABLE table_from_select; + +ALTER TABLE table_from_select ADD COLUMN col UInt8; + +SHOW CREATE TABLE table_from_select; + +DROP TABLE IF EXISTS table_from_remote; +DROP TABLE IF EXISTS table_from_select; +DROP TABLE IF EXISTS table_from_numbers; diff --git a/parser/testdata/01461_query_start_time_microseconds/ast.json b/parser/testdata/01461_query_start_time_microseconds/ast.json new file mode 100644 index 000000000..65c947508 --- /dev/null +++ b/parser/testdata/01461_query_start_time_microseconds/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001495021, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01461_query_start_time_microseconds/metadata.json b/parser/testdata/01461_query_start_time_microseconds/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01461_query_start_time_microseconds/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01461_query_start_time_microseconds/query.sql b/parser/testdata/01461_query_start_time_microseconds/query.sql new file mode 100644 index 000000000..b410ce07b --- /dev/null +++ b/parser/testdata/01461_query_start_time_microseconds/query.sql @@ -0,0 +1,49 @@ +SET log_queries = 1; +SELECT '01461_query_log_query_start_time_milliseconds_test'; +SYSTEM FLUSH LOGS query_log; +-- assumes that the query_start_time field is already accurate. +WITH ( + ( + SELECT query_start_time_microseconds + FROM system.query_log + WHERE current_database = currentDatabase() + AND query like 'SELECT \'01461_query%' + AND event_date >= yesterday() + ORDER BY query_start_time DESC + LIMIT 1 + ) AS time_with_microseconds, + ( + SELECT query_start_time + FROM system.query_log + WHERE current_database = currentDatabase() + AND query like 'SELECT \'01461_query%' + AND event_date >= yesterday() + ORDER BY query_start_time DESC + LIMIT 1 + ) AS t) +SELECT if(dateDiff('second', toDateTime(time_with_microseconds), toDateTime(t)) = 0, 'ok', 'fail'); -- + +SET log_query_threads = 1; +SELECT '01461_query_thread_log_query_start_time_milliseconds_test'; +SYSTEM FLUSH LOGS query_thread_log; +-- assumes that the query_start_time field is already accurate. +WITH ( + ( + SELECT query_start_time_microseconds + FROM system.query_thread_log + WHERE current_database = currentDatabase() + AND query like 'SELECT \'01461_query%' + AND event_date >= yesterday() + ORDER BY query_start_time DESC + LIMIT 1 + ) AS time_with_microseconds, + ( + SELECT query_start_time + FROM system.query_thread_log + WHERE current_database = currentDatabase() + AND query like 'SELECT \'01461_query%' + AND event_date >= yesterday() + ORDER BY query_start_time DESC + LIMIT 1 + ) AS t) +SELECT if(dateDiff('second', toDateTime(time_with_microseconds), toDateTime(t)) = 0, 'ok', 'fail'); -- diff --git a/parser/testdata/01462_test_codec_on_alias/ast.json b/parser/testdata/01462_test_codec_on_alias/ast.json new file mode 100644 index 000000000..e1617e4cb --- /dev/null +++ b/parser/testdata/01462_test_codec_on_alias/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery compression_codec_on_alias (children 1)" + }, + { + "explain": " Identifier compression_codec_on_alias" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001066811, + "rows_read": 2, + "bytes_read": 104 + } +} diff --git a/parser/testdata/01462_test_codec_on_alias/metadata.json b/parser/testdata/01462_test_codec_on_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01462_test_codec_on_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01462_test_codec_on_alias/query.sql b/parser/testdata/01462_test_codec_on_alias/query.sql new file mode 100644 index 000000000..b09dfa50b --- /dev/null +++ b/parser/testdata/01462_test_codec_on_alias/query.sql @@ -0,0 +1,25 @@ +DROP TABLE IF EXISTS compression_codec_on_alias; + +select 'create table compression_codec_on_alias with CODEC on ALIAS type'; + +CREATE TABLE compression_codec_on_alias ( + `c0` ALIAS c1 CODEC(ZSTD), + c1 UInt64 +) ENGINE = MergeTree() PARTITION BY c0 ORDER BY c1; -- { serverError BAD_ARGUMENTS } + +select 'create table compression_codec_on_alias with proper CODEC'; + +CREATE TABLE compression_codec_on_alias ( + c0 UInt64 CODEC(ZSTD), + c1 UInt64 +) ENGINE = MergeTree() PARTITION BY c0 ORDER BY c1; -- success + +select 'alter table compression_codec_on_alias add column (ALIAS type) with CODEC'; + +ALTER TABLE compression_codec_on_alias ADD COLUMN `c3` ALIAS c2 CODEC(ZSTD) AFTER c2; -- { serverError BAD_ARGUMENTS } + +select 'alter table compression_codec_on_alias add column (NOT ALIAS type) with CODEC'; + +ALTER TABLE compression_codec_on_alias ADD COLUMN c2 UInt64 CODEC(ZSTD) AFTER c1; -- success + +DROP TABLE IF EXISTS compression_codec_on_alias; diff --git a/parser/testdata/01463_resample_overflow/ast.json b/parser/testdata/01463_resample_overflow/ast.json new file mode 100644 index 000000000..97f110385 --- /dev/null +++ b/parser/testdata/01463_resample_overflow/ast.json @@ -0,0 +1,82 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function groupArrayResample (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function toInt64 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal Int64_-9223372036854775808" + }, + { + "explain": " Literal UInt64_9223372036854775807" + }, + { + "explain": " Literal UInt64_9223372036854775807" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_7" + } + ], + + "rows": 20, + + "statistics": + { + "elapsed": 0.001237909, + "rows_read": 20, + "bytes_read": 826 + } +} diff --git a/parser/testdata/01463_resample_overflow/metadata.json b/parser/testdata/01463_resample_overflow/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01463_resample_overflow/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01463_resample_overflow/query.sql b/parser/testdata/01463_resample_overflow/query.sql new file mode 100644 index 000000000..872f46628 --- /dev/null +++ b/parser/testdata/01463_resample_overflow/query.sql @@ -0,0 +1 @@ +select groupArrayResample(-9223372036854775808, 9223372036854775807, 9223372036854775807)(number, toInt64(number)) FROM numbers(7); -- { serverError ARGUMENT_OUT_OF_BOUND } diff --git a/parser/testdata/01465_ttl_recompression/ast.json b/parser/testdata/01465_ttl_recompression/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01465_ttl_recompression/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01465_ttl_recompression/metadata.json b/parser/testdata/01465_ttl_recompression/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01465_ttl_recompression/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01465_ttl_recompression/query.sql b/parser/testdata/01465_ttl_recompression/query.sql new file mode 100644 index 000000000..c1e09e64d --- /dev/null +++ b/parser/testdata/01465_ttl_recompression/query.sql @@ -0,0 +1,80 @@ +-- Tags: no-parallel + +DROP TABLE IF EXISTS recompression_table; + +CREATE TABLE recompression_table +( + dt DateTime, + key UInt64, + value String + +) ENGINE MergeTree() +ORDER BY tuple() +PARTITION BY key +TTL dt + INTERVAL 1 MONTH RECOMPRESS CODEC(ZSTD(17)), dt + INTERVAL 1 YEAR RECOMPRESS CODEC(LZ4HC(10)) +SETTINGS min_rows_for_wide_part = 0, min_bytes_for_wide_part = 0, min_bytes_for_full_part_storage = 0; + +SHOW CREATE TABLE recompression_table; + +SYSTEM STOP TTL MERGES recompression_table; + +INSERT INTO recompression_table SELECT now(), 1, toString(number) from numbers(1000); + +INSERT INTO recompression_table SELECT now() - INTERVAL 2 MONTH, 2, toString(number) from numbers(1000, 1000); + +INSERT INTO recompression_table SELECT now() - INTERVAL 2 YEAR, 3, toString(number) from numbers(2000, 1000); + +SELECT COUNT() FROM recompression_table; + +SELECT substring(name, 1, length(name) - 2), default_compression_codec FROM system.parts WHERE table = 'recompression_table' and active = 1 and database = currentDatabase() ORDER BY name; + +OPTIMIZE TABLE recompression_table FINAL; + +-- merge level and mutation in part name is not important +SELECT substring(name, 1, length(name) - 2), default_compression_codec FROM system.parts WHERE table = 'recompression_table' and active = 1 and database = currentDatabase() ORDER BY name; + +ALTER TABLE recompression_table MODIFY TTL dt + INTERVAL 1 DAY RECOMPRESS CODEC(ZSTD(12)) SETTINGS mutations_sync = 2; + +SHOW CREATE TABLE recompression_table; + +SELECT substring(name, 1, length(name) - 4), default_compression_codec FROM system.parts WHERE table = 'recompression_table' and active = 1 and database = currentDatabase() ORDER BY name; + +SYSTEM START TTL MERGES recompression_table; +-- Additional merge can happen here +OPTIMIZE TABLE recompression_table FINAL; + +-- merge level and mutation in part name is not important +SELECT substring(name, 1, length(name) - 4), default_compression_codec FROM system.parts WHERE table = 'recompression_table' and active = 1 and database = currentDatabase() ORDER BY name; + +SELECT substring(name, 1, length(name) - 4), recompression_ttl_info.expression FROM system.parts WHERE table = 'recompression_table' and active = 1 and database = currentDatabase() ORDER BY name; + +DROP TABLE IF EXISTS recompression_table; + +CREATE TABLE recompression_table_compact +( + dt DateTime, + key UInt64, + value String + +) ENGINE MergeTree() +ORDER BY tuple() +PARTITION BY key +TTL dt + INTERVAL 1 MONTH RECOMPRESS CODEC(ZSTD(17)), dt + INTERVAL 1 YEAR RECOMPRESS CODEC(LZ4HC(10)) +SETTINGS min_rows_for_wide_part = 10000, min_bytes_for_full_part_storage = 0; + +SYSTEM STOP TTL MERGES recompression_table_compact; + +INSERT INTO recompression_table_compact SELECT now(), 1, toString(number) from numbers(1000); + +INSERT INTO recompression_table_compact SELECT now() - INTERVAL 2 MONTH, 2, toString(number) from numbers(1000, 1000); + +INSERT INTO recompression_table_compact SELECT now() - INTERVAL 2 YEAR, 3, toString(number) from numbers(2000, 1000); + +SELECT substring(name, 1, length(name) - 2), default_compression_codec FROM system.parts WHERE table = 'recompression_table_compact' and active = 1 and database = currentDatabase() ORDER BY name; + +ALTER TABLE recompression_table_compact MODIFY TTL dt + INTERVAL 1 MONTH RECOMPRESS CODEC(ZSTD(12)) SETTINGS mutations_sync = 2; -- mutation affect all columns, so codec changes + +-- merge level and mutation in part name is not important +SELECT substring(name, 1, length(name) - 4), default_compression_codec FROM system.parts WHERE table = 'recompression_table_compact' and active = 1 and database = currentDatabase() ORDER BY name; + +DROP TABLE recompression_table_compact; diff --git a/parser/testdata/01470_columns_transformers/ast.json b/parser/testdata/01470_columns_transformers/ast.json new file mode 100644 index 000000000..3079ddb44 --- /dev/null +++ b/parser/testdata/01470_columns_transformers/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery columns_transformers (children 1)" + }, + { + "explain": " Identifier columns_transformers" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001603351, + "rows_read": 2, + "bytes_read": 92 + } +} diff --git a/parser/testdata/01470_columns_transformers/metadata.json b/parser/testdata/01470_columns_transformers/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01470_columns_transformers/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01470_columns_transformers/query.sql b/parser/testdata/01470_columns_transformers/query.sql new file mode 100644 index 000000000..021582dc0 --- /dev/null +++ b/parser/testdata/01470_columns_transformers/query.sql @@ -0,0 +1,59 @@ +DROP TABLE IF EXISTS columns_transformers; + +CREATE TABLE columns_transformers (i Int64, j Int16, k Int64) Engine=TinyLog; +INSERT INTO columns_transformers VALUES (100, 10, 324), (120, 8, 23); + +SELECT * APPLY(sum) from columns_transformers; +SELECT * APPLY sum from columns_transformers; +SELECT columns_transformers.* APPLY(avg) from columns_transformers; +SELECT a.* APPLY(toDate) APPLY(any) from columns_transformers a; +SELECT COLUMNS('[jk]') APPLY(toString) APPLY(length) from columns_transformers; + +SELECT * EXCEPT(i) APPLY(sum) from columns_transformers; +SELECT columns_transformers.* EXCEPT(j) APPLY(avg) from columns_transformers; +-- EXCEPT after APPLY will not match anything +SELECT a.* APPLY(toDate) EXCEPT(i, j) APPLY(any) from columns_transformers a; + +SELECT * EXCEPT STRICT i from columns_transformers; +SELECT * EXCEPT STRICT (i, j) from columns_transformers; +SELECT * EXCEPT STRICT i, j1 from columns_transformers; -- { serverError UNKNOWN_IDENTIFIER } +SELECT * EXCEPT STRICT(i, j1) from columns_transformers; -- { serverError NO_SUCH_COLUMN_IN_TABLE , BAD_ARGUMENTS } +SELECT * REPLACE STRICT i + 1 AS i from columns_transformers; +SELECT * REPLACE STRICT(i + 1 AS col) from columns_transformers; -- { serverError NO_SUCH_COLUMN_IN_TABLE, BAD_ARGUMENTS } +SELECT * REPLACE(i + 1 AS i) APPLY(sum) from columns_transformers; +SELECT columns_transformers.* REPLACE(j + 2 AS j, i + 1 AS i) APPLY(avg) from columns_transformers; +SELECT columns_transformers.* REPLACE(j + 1 AS j, j + 2 AS j) APPLY(avg) from columns_transformers; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +-- REPLACE after APPLY will not match anything +SELECT a.* APPLY(toDate) REPLACE(i + 1 AS i) APPLY(any) from columns_transformers a; +SELECT a.* APPLY(toDate) REPLACE STRICT(i + 1 AS i) APPLY(any) from columns_transformers a; -- { serverError NO_SUCH_COLUMN_IN_TABLE, BAD_ARGUMENTS } + +EXPLAIN SYNTAX SELECT * APPLY(sum) from columns_transformers; +EXPLAIN SYNTAX SELECT columns_transformers.* APPLY(avg) from columns_transformers; +EXPLAIN SYNTAX SELECT a.* APPLY(toDate) APPLY(any) from columns_transformers a; +EXPLAIN SYNTAX SELECT COLUMNS('[jk]') APPLY(toString) APPLY(length) from columns_transformers; +EXPLAIN SYNTAX SELECT * EXCEPT(i) APPLY(sum) from columns_transformers; +EXPLAIN SYNTAX SELECT columns_transformers.* EXCEPT(j) APPLY(avg) from columns_transformers; +EXPLAIN SYNTAX SELECT a.* APPLY(toDate) EXCEPT(i, j) APPLY(any) from columns_transformers a; +EXPLAIN SYNTAX SELECT * REPLACE(i + 1 AS i) APPLY(sum) from columns_transformers; +EXPLAIN AST SELECT * REPLACE(i + 1 AS i) APPLY(sum) from columns_transformers; +EXPLAIN SYNTAX SELECT sum(i + 1 AS m) from columns_transformers; +EXPLAIN AST SELECT sum(i + 1 AS m) from columns_transformers; +EXPLAIN SYNTAX SELECT columns_transformers.* REPLACE(j + 2 AS j, i + 1 AS i) APPLY(avg) from columns_transformers; +EXPLAIN SYNTAX SELECT a.* APPLY(toDate) REPLACE(i + 1 AS i) APPLY(any) from columns_transformers a; + +-- Multiple REPLACE in a row +EXPLAIN SYNTAX SELECT * REPLACE(i + 1 AS i) REPLACE(i + 1 AS i) from columns_transformers; + +-- Explicit column list +SELECT COLUMNS(i, j, k) APPLY(sum) from columns_transformers; +EXPLAIN SYNTAX SELECT COLUMNS(i, j, k) APPLY(sum) from columns_transformers; + +-- Multiple column matchers and transformers +SELECT i, j, COLUMNS(i, j, k) APPLY(toFloat64), COLUMNS(i, j) EXCEPT (i) from columns_transformers; +EXPLAIN SYNTAX SELECT i, j, COLUMNS(i, j, k) APPLY(toFloat64), COLUMNS(i, j) EXCEPT (i) from columns_transformers; + +-- APPLY with parameterized function +SELECT COLUMNS(i, j, k) APPLY(quantiles(0.5)) from columns_transformers; +EXPLAIN SYNTAX SELECT COLUMNS(i, j, k) APPLY(quantiles(0.5)) from columns_transformers; + +DROP TABLE columns_transformers; diff --git a/parser/testdata/01470_columns_transformers2/ast.json b/parser/testdata/01470_columns_transformers2/ast.json new file mode 100644 index 000000000..f31f679b2 --- /dev/null +++ b/parser/testdata/01470_columns_transformers2/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery columns_transformers (children 1)" + }, + { + "explain": " Identifier columns_transformers" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001384668, + "rows_read": 2, + "bytes_read": 92 + } +} diff --git a/parser/testdata/01470_columns_transformers2/metadata.json b/parser/testdata/01470_columns_transformers2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01470_columns_transformers2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01470_columns_transformers2/query.sql b/parser/testdata/01470_columns_transformers2/query.sql new file mode 100644 index 000000000..88513d023 --- /dev/null +++ b/parser/testdata/01470_columns_transformers2/query.sql @@ -0,0 +1,10 @@ +DROP TABLE IF EXISTS columns_transformers; + +CREATE TABLE columns_transformers (i int, j int, k int, a_bytes int, b_bytes int, c_bytes int) Engine=TinyLog; +INSERT INTO columns_transformers VALUES (100, 10, 324, 120, 8, 23); +SELECT * EXCEPT 'bytes', COLUMNS('bytes') APPLY formatReadableSize FROM columns_transformers; + +DROP TABLE IF EXISTS columns_transformers; + +SELECT * APPLY x->argMax(x, number) FROM numbers(1); +EXPLAIN SYNTAX SELECT * APPLY x->argMax(x, number) FROM numbers(1); diff --git a/parser/testdata/01470_explain/ast.json b/parser/testdata/01470_explain/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01470_explain/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01470_explain/metadata.json b/parser/testdata/01470_explain/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01470_explain/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01470_explain/query.sql b/parser/testdata/01470_explain/query.sql new file mode 100644 index 000000000..8fd145e7f --- /dev/null +++ b/parser/testdata/01470_explain/query.sql @@ -0,0 +1,6 @@ +-- +-- regressions +-- + +-- SIGSEGV regression due to QueryPlan lifetime +EXPLAIN PIPELINE graph=1 SELECT * FROM remote('127.{1,2}', system.one) FORMAT Null; diff --git a/parser/testdata/01470_show_databases_like/ast.json b/parser/testdata/01470_show_databases_like/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01470_show_databases_like/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01470_show_databases_like/metadata.json b/parser/testdata/01470_show_databases_like/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01470_show_databases_like/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01470_show_databases_like/query.sql b/parser/testdata/01470_show_databases_like/query.sql new file mode 100644 index 000000000..33fee27ac --- /dev/null +++ b/parser/testdata/01470_show_databases_like/query.sql @@ -0,0 +1,5 @@ +-- Tags: no-parallel + +create database if not exists test_01470; +show databases like '%01470'; +drop database test_01470; diff --git a/parser/testdata/01470_test_insert_select_asterisk/ast.json b/parser/testdata/01470_test_insert_select_asterisk/ast.json new file mode 100644 index 000000000..ce5ce805c --- /dev/null +++ b/parser/testdata/01470_test_insert_select_asterisk/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery insert_select_dst (children 1)" + }, + { + "explain": " Identifier insert_select_dst" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001555192, + "rows_read": 2, + "bytes_read": 86 + } +} diff --git a/parser/testdata/01470_test_insert_select_asterisk/metadata.json b/parser/testdata/01470_test_insert_select_asterisk/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01470_test_insert_select_asterisk/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01470_test_insert_select_asterisk/query.sql b/parser/testdata/01470_test_insert_select_asterisk/query.sql new file mode 100644 index 000000000..815ebd761 --- /dev/null +++ b/parser/testdata/01470_test_insert_select_asterisk/query.sql @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS insert_select_dst; +DROP TABLE IF EXISTS insert_select_src; + +CREATE TABLE insert_select_dst (i int, middle_a int, middle_b int, j int) ENGINE = Log; + +CREATE TABLE insert_select_src (i int, j int) ENGINE = Log; + +INSERT INTO insert_select_src VALUES (1, 2), (3, 4); + +INSERT INTO insert_select_dst(* EXCEPT (middle_a, middle_b)) SELECT * FROM insert_select_src; +INSERT INTO insert_select_dst(insert_select_dst.* EXCEPT (middle_a, middle_b)) SELECT * FROM insert_select_src; +INSERT INTO insert_select_dst(COLUMNS('.*') EXCEPT (middle_a, middle_b)) SELECT * FROM insert_select_src; +INSERT INTO insert_select_dst(insert_select_src.* EXCEPT (middle_a, middle_b)) SELECT * FROM insert_select_src; -- { serverError UNKNOWN_IDENTIFIER } + +SELECT * FROM insert_select_dst; + +DROP TABLE IF EXISTS insert_select_dst; +DROP TABLE IF EXISTS insert_select_src; diff --git a/parser/testdata/01471_calculate_ttl_during_merge/ast.json b/parser/testdata/01471_calculate_ttl_during_merge/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01471_calculate_ttl_during_merge/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01471_calculate_ttl_during_merge/metadata.json b/parser/testdata/01471_calculate_ttl_during_merge/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01471_calculate_ttl_during_merge/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01471_calculate_ttl_during_merge/query.sql b/parser/testdata/01471_calculate_ttl_during_merge/query.sql new file mode 100644 index 000000000..ea8aa1814 --- /dev/null +++ b/parser/testdata/01471_calculate_ttl_during_merge/query.sql @@ -0,0 +1,39 @@ +-- Tags: no-parallel + +DROP TABLE IF EXISTS table_for_ttl; + +CREATE TABLE table_for_ttl( + d DateTime, + key UInt64, + value String) +ENGINE = MergeTree() +ORDER BY tuple() +PARTITION BY key; + +INSERT INTO table_for_ttl SELECT now() - INTERVAL 2 YEAR, 1, toString(number) from numbers(1000); + +INSERT INTO table_for_ttl SELECT now() - INTERVAL 2 DAY, 3, toString(number) from numbers(2000, 1000); + +INSERT INTO table_for_ttl SELECT now(), 4, toString(number) from numbers(3000, 1000); + +SELECT count() FROM table_for_ttl; + +ALTER TABLE table_for_ttl MODIFY TTL d + INTERVAL 1 YEAR SETTINGS materialize_ttl_after_modify = 0; + +SELECT count() FROM table_for_ttl; + +OPTIMIZE TABLE table_for_ttl FINAL; + +SELECT count() FROM table_for_ttl; + +ALTER TABLE table_for_ttl MODIFY COLUMN value String TTL d + INTERVAL 1 DAY SETTINGS materialize_ttl_after_modify = 0; + +SELECT count(distinct value) FROM table_for_ttl; + +OPTIMIZE TABLE table_for_ttl FINAL; + +SELECT count(distinct value) FROM table_for_ttl; + +OPTIMIZE TABLE table_for_ttl FINAL; -- Just check in logs, that it doesn't run with force again + +DROP TABLE IF EXISTS table_for_ttl; diff --git a/parser/testdata/01471_limit_by_format/ast.json b/parser/testdata/01471_limit_by_format/ast.json new file mode 100644 index 000000000..2d6bd8df1 --- /dev/null +++ b/parser/testdata/01471_limit_by_format/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Explain EXPLAIN SYNTAX (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 5)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.one" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001100606, + "rows_read": 14, + "bytes_read": 512 + } +} diff --git a/parser/testdata/01471_limit_by_format/metadata.json b/parser/testdata/01471_limit_by_format/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01471_limit_by_format/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01471_limit_by_format/query.sql b/parser/testdata/01471_limit_by_format/query.sql new file mode 100644 index 000000000..a58099a22 --- /dev/null +++ b/parser/testdata/01471_limit_by_format/query.sql @@ -0,0 +1,2 @@ +EXPLAIN SYNTAX SELECT * FROM system.one LIMIT 1 BY * LIMIT 1; +EXPLAIN SYNTAX SELECT * FROM system.one LIMIT 1 BY 0+dummy, 0-dummy LIMIT 1; diff --git a/parser/testdata/01471_top_k_range_check/ast.json b/parser/testdata/01471_top_k_range_check/ast.json new file mode 100644 index 000000000..2776fc2c1 --- /dev/null +++ b/parser/testdata/01471_top_k_range_check/ast.json @@ -0,0 +1,73 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function length (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function topKWeighted (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_1025" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal Int64_-9223372036854775808" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + } + ], + + "rows": 17, + + "statistics": + { + "elapsed": 0.001436329, + "rows_read": 17, + "bytes_read": 690 + } +} diff --git a/parser/testdata/01471_top_k_range_check/metadata.json b/parser/testdata/01471_top_k_range_check/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01471_top_k_range_check/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01471_top_k_range_check/query.sql b/parser/testdata/01471_top_k_range_check/query.sql new file mode 100644 index 000000000..ea4990c32 --- /dev/null +++ b/parser/testdata/01471_top_k_range_check/query.sql @@ -0,0 +1 @@ +SELECT length(topKWeighted(2, -9223372036854775808)(number, 1025)) FROM system.numbers; -- { serverError ARGUMENT_OUT_OF_BOUND } diff --git a/parser/testdata/01471_with_format/ast.json b/parser/testdata/01471_with_format/ast.json new file mode 100644 index 000000000..c7ff6327d --- /dev/null +++ b/parser/testdata/01471_with_format/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Explain EXPLAIN SYNTAX (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001360113, + "rows_read": 8, + "bytes_read": 293 + } +} diff --git a/parser/testdata/01471_with_format/metadata.json b/parser/testdata/01471_with_format/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01471_with_format/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01471_with_format/query.sql b/parser/testdata/01471_with_format/query.sql new file mode 100644 index 000000000..60f6fe413 --- /dev/null +++ b/parser/testdata/01471_with_format/query.sql @@ -0,0 +1,2 @@ +EXPLAIN SYNTAX WITH 1 SELECT 1; +EXPLAIN SYNTAX WITH 1, 2 SELECT 1; diff --git a/parser/testdata/01472_many_rows_in_totals/ast.json b/parser/testdata/01472_many_rows_in_totals/ast.json new file mode 100644 index 000000000..d353924fe --- /dev/null +++ b/parser/testdata/01472_many_rows_in_totals/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001786925, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01472_many_rows_in_totals/metadata.json b/parser/testdata/01472_many_rows_in_totals/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01472_many_rows_in_totals/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01472_many_rows_in_totals/query.sql b/parser/testdata/01472_many_rows_in_totals/query.sql new file mode 100644 index 000000000..df12b87a6 --- /dev/null +++ b/parser/testdata/01472_many_rows_in_totals/query.sql @@ -0,0 +1,15 @@ +set output_format_pretty_color=1; + +-- Disable external aggregation because it may produce several blocks instead of one. +set max_bytes_before_external_group_by = 0; +set max_bytes_ratio_before_external_group_by = 0; +set output_format_write_statistics = 0; + +select g, s from (select g, sum(number) as s from numbers(4) group by bitAnd(number, 1) as g with totals order by g) array join [1, 2] as a format Pretty; +select '--'; + +select g, s from (select g, sum(number) as s from numbers(4) group by bitAnd(number, 1) as g with totals order by g) array join [1, 2] as a format TSV; +select '--'; + +select g, s from (select g, sum(number) as s from numbers(4) group by bitAnd(number, 1) as g with totals order by g) array join [1, 2] as a format JSON; +select '--'; diff --git a/parser/testdata/01472_toBoundsOfInterval_disallow_empty_tz_field/ast.json b/parser/testdata/01472_toBoundsOfInterval_disallow_empty_tz_field/ast.json new file mode 100644 index 000000000..ad9a6fb60 --- /dev/null +++ b/parser/testdata/01472_toBoundsOfInterval_disallow_empty_tz_field/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toStartOfDay (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDateTime (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '2017-12-31 00:00:00'" + }, + { + "explain": " Literal 'UTC'" + }, + { + "explain": " Literal ''" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001003395, + "rows_read": 11, + "bytes_read": 421 + } +} diff --git a/parser/testdata/01472_toBoundsOfInterval_disallow_empty_tz_field/metadata.json b/parser/testdata/01472_toBoundsOfInterval_disallow_empty_tz_field/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01472_toBoundsOfInterval_disallow_empty_tz_field/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01472_toBoundsOfInterval_disallow_empty_tz_field/query.sql b/parser/testdata/01472_toBoundsOfInterval_disallow_empty_tz_field/query.sql new file mode 100644 index 000000000..78f7c5b21 --- /dev/null +++ b/parser/testdata/01472_toBoundsOfInterval_disallow_empty_tz_field/query.sql @@ -0,0 +1,42 @@ +SELECT toStartOfDay(toDateTime('2017-12-31 00:00:00', 'UTC'), ''); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT toStartOfDay(toDateTime('2017-12-31 03:45:00', 'UTC'), 'UTC'); -- success + +SELECT toMonday(toDateTime('2017-12-31 00:00:00', 'UTC'), ''); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT toMonday(toDateTime('2017-12-31 00:00:00', 'UTC'), 'UTC'); -- success + +SELECT toStartOfWeek(toDateTime('2017-12-31 00:00:00', 'UTC'), 0, ''); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT toStartOfWeek(toDateTime('2017-12-31 00:00:00', 'UTC'), 0, 'UTC'); -- success + +SELECT toStartOfWeek(toDateTime('2017-12-31 00:00:00', 'UTC'), 1, ''); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT toStartOfWeek(toDateTime('2017-12-31 00:00:00', 'UTC'), 1, 'UTC'); -- success + +SELECT toLastDayOfWeek(toDateTime('2017-12-31 00:00:00', 'UTC'), 0, ''); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT toLastDayOfWeek(toDateTime('2017-12-31 00:00:00', 'UTC'), 0, 'UTC'); -- success + +SELECT toLastDayOfWeek(toDateTime('2017-12-31 00:00:00', 'UTC'), 1, ''); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT toLastDayOfWeek(toDateTime('2017-12-31 00:00:00', 'UTC'), 1, 'UTC'); -- success + +SELECT toStartOfMonth(toDateTime('2017-12-31 00:00:00', 'UTC'), ''); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT toStartOfMonth(toDateTime('2017-12-31 00:00:00', 'UTC'), 'UTC'); -- success + +SELECT toStartOfQuarter(toDateTime('2017-12-31 00:00:00', 'UTC'), ''); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT toStartOfQuarter(toDateTime('2017-12-31 00:00:00', 'UTC'), 'UTC'); -- success + +SELECT toStartOfYear(toDateTime('2017-12-31 00:00:00', 'UTC'), ''); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT toStartOfYear(toDateTime('2017-12-31 00:00:00', 'UTC'), 'UTC'); -- success + +SELECT toStartOfTenMinutes(toDateTime('2017-12-31 00:00:00', 'UTC'), ''); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT toStartOfTenMinutes(toDateTime('2017-12-31 05:12:30', 'UTC'), 'UTC'); -- success + +SELECT toStartOfFifteenMinutes(toDateTime('2017-12-31 00:00:00', 'UTC'), ''); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT toStartOfFifteenMinutes(toDateTime('2017-12-31 01:17:00', 'UTC'), 'UTC'); -- success + +SELECT toStartOfHour(toDateTime('2017-12-31 00:00:00', 'UTC'), ''); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT toStartOfHour(toDateTime('2017-12-31 01:59:00', 'UTC'), 'UTC'); -- success + +SELECT toStartOfMinute(toDateTime('2017-12-31 00:00:00', 'UTC'), ''); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT toStartOfMinute(toDateTime('2017-12-31 00:01:30', 'UTC'), 'UTC'); -- success + +-- special case - allow empty time_zone when using functions like today(), yesterday() etc. +SELECT toStartOfDay(today()) FORMAT Null; -- success +SELECT toStartOfDay(yesterday()) FORMAT Null; -- success diff --git a/parser/testdata/01473_event_time_microseconds/ast.json b/parser/testdata/01473_event_time_microseconds/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01473_event_time_microseconds/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01473_event_time_microseconds/metadata.json b/parser/testdata/01473_event_time_microseconds/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01473_event_time_microseconds/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01473_event_time_microseconds/query.sql b/parser/testdata/01473_event_time_microseconds/query.sql new file mode 100644 index 000000000..a72e10da9 --- /dev/null +++ b/parser/testdata/01473_event_time_microseconds/query.sql @@ -0,0 +1,53 @@ +-- Tags: no-tsan, no-asan, no-ubsan, no-msan, no-debug + +-- This file contains tests for the event_time_microseconds field for various tables. +-- Note: Only event_time_microseconds for asynchronous_metric_log table is tested via +-- an integration test as those metrics take 60s by default to be updated. +-- Refer: tests/integration/test_asynchronous_metric_log_table. + +SET log_queries = 1; +SET log_query_threads = 1; +SET query_profiler_real_time_period_ns = 100000000; +-- a long enough query to trigger the query profiler and to record trace log +SELECT sleep(2) FORMAT Null; +SET query_profiler_real_time_period_ns = 1000000000; +SYSTEM FLUSH LOGS metric_log, trace_log, query_log, query_thread_log; + +SELECT '01473_metric_log_table_event_start_time_microseconds_test'; +-- query assumes that the event_time field is accurate. +WITH ( + SELECT event_time_microseconds, event_time + FROM system.metric_log + ORDER BY event_time DESC + LIMIT 1 + ) AS time +SELECT if(dateDiff('second', toDateTime(time.1), toDateTime(time.2)) = 0, 'ok', toString(time)); + +SELECT '01473_trace_log_table_event_start_time_microseconds_test'; +WITH ( + SELECT event_time_microseconds, event_time + FROM system.trace_log + ORDER BY event_time DESC + LIMIT 1 + ) AS time +SELECT if(dateDiff('second', toDateTime(time.1), toDateTime(time.2)) = 0, 'ok', toString(time)); + +SELECT '01473_query_log_table_event_start_time_microseconds_test'; +WITH ( + SELECT event_time_microseconds, event_time + FROM system.query_log + WHERE current_database = currentDatabase() + ORDER BY event_time DESC + LIMIT 1 + ) AS time +SELECT if(dateDiff('second', toDateTime(time.1), toDateTime(time.2)) = 0, 'ok', toString(time)); + +SELECT '01473_query_thread_log_table_event_start_time_microseconds_test'; +WITH ( + SELECT event_time_microseconds, event_time + FROM system.query_thread_log + WHERE current_database = currentDatabase() + ORDER BY event_time DESC + LIMIT 1 + ) AS time +SELECT if(dateDiff('second', toDateTime(time.1), toDateTime(time.2)) = 0, 'ok', toString(time)); diff --git a/parser/testdata/01473_system_events_zeroes/ast.json b/parser/testdata/01473_system_events_zeroes/ast.json new file mode 100644 index 000000000..ccd4f82b2 --- /dev/null +++ b/parser/testdata/01473_system_events_zeroes/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001376352, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01473_system_events_zeroes/metadata.json b/parser/testdata/01473_system_events_zeroes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01473_system_events_zeroes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01473_system_events_zeroes/query.sql b/parser/testdata/01473_system_events_zeroes/query.sql new file mode 100644 index 000000000..62478c525 --- /dev/null +++ b/parser/testdata/01473_system_events_zeroes/query.sql @@ -0,0 +1,3 @@ +SET system_events_show_zero_values = 1; +SELECT value FROM system.events WHERE event == 'PerfAlignmentFaults'; +SET system_events_show_zero_values = 0; diff --git a/parser/testdata/01474_bad_global_join/ast.json b/parser/testdata/01474_bad_global_join/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01474_bad_global_join/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01474_bad_global_join/metadata.json b/parser/testdata/01474_bad_global_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01474_bad_global_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01474_bad_global_join/query.sql b/parser/testdata/01474_bad_global_join/query.sql new file mode 100644 index 000000000..a6f28f7d3 --- /dev/null +++ b/parser/testdata/01474_bad_global_join/query.sql @@ -0,0 +1,17 @@ +-- Tags: global + +DROP TABLE IF EXISTS local_table; +DROP TABLE IF EXISTS dist_table; + +CREATE TABLE local_table (id UInt64, val String) ENGINE = Memory; + +INSERT INTO local_table SELECT number AS id, toString(number) AS val FROM numbers(100); + +CREATE TABLE dist_table AS local_table +ENGINE = Distributed('test_cluster_two_shards_localhost', currentDatabase(), local_table); + +SELECT uniq(d.val) FROM dist_table AS d GLOBAL LEFT JOIN numbers(100) AS t USING id; -- { serverError UNKNOWN_IDENTIFIER, 284 } +SELECT uniq(d.val) FROM dist_table AS d GLOBAL LEFT JOIN local_table AS t USING id; + +DROP TABLE local_table; +DROP TABLE dist_table; diff --git a/parser/testdata/01474_decimal_scale_bug/ast.json b/parser/testdata/01474_decimal_scale_bug/ast.json new file mode 100644 index 000000000..c2c3eb31e --- /dev/null +++ b/parser/testdata/01474_decimal_scale_bug/ast.json @@ -0,0 +1,73 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function multiply (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDecimal32 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function toDecimal32 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + } + ], + + "rows": 17, + + "statistics": + { + "elapsed": 0.00160828, + "rows_read": 17, + "bytes_read": 657 + } +} diff --git a/parser/testdata/01474_decimal_scale_bug/metadata.json b/parser/testdata/01474_decimal_scale_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01474_decimal_scale_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01474_decimal_scale_bug/query.sql b/parser/testdata/01474_decimal_scale_bug/query.sql new file mode 100644 index 000000000..0fdeb3fb0 --- /dev/null +++ b/parser/testdata/01474_decimal_scale_bug/query.sql @@ -0,0 +1,20 @@ +SELECT toDecimal32(1, 2) * toDecimal32(1, 1) x, toTypeName(x); +SELECT toDecimal32(1, 1) * toDecimal32(1, 2) x, toTypeName(x); +SELECT toDecimal32(1, 3) * toDecimal64(1, 1) x, toTypeName(x); +SELECT toDecimal32(1, 1) * toDecimal64(1, 3) x, toTypeName(x); +SELECT toDecimal32(1, 2) * toDecimal128(1, 3) x, toTypeName(x); +SELECT toDecimal32(1, 3) * toDecimal128(1, 2) x, toTypeName(x); + +SELECT toDecimal64(1, 2) * toDecimal32(1, 1) x, toTypeName(x); +SELECT toDecimal64(1, 1) * toDecimal32(1, 2) x, toTypeName(x); +SELECT toDecimal64(1, 3) * toDecimal64(1, 1) x, toTypeName(x); +SELECT toDecimal64(1, 1) * toDecimal64(1, 3) x, toTypeName(x); +SELECT toDecimal64(1, 2) * toDecimal128(1, 3) x, toTypeName(x); +SELECT toDecimal64(1, 3) * toDecimal128(1, 2) x, toTypeName(x); + +SELECT toDecimal128(1, 2) * toDecimal32(1, 1) x, toTypeName(x); +SELECT toDecimal128(1, 1) * toDecimal32(1, 2) x, toTypeName(x); +SELECT toDecimal128(1, 3) * toDecimal64(1, 1) x, toTypeName(x); +SELECT toDecimal128(1, 1) * toDecimal64(1, 3) x, toTypeName(x); +SELECT toDecimal128(1, 2) * toDecimal128(1, 3) x, toTypeName(x); +SELECT toDecimal128(1, 3) * toDecimal128(1, 2) x, toTypeName(x); diff --git a/parser/testdata/01474_executable_dictionary/ast.json b/parser/testdata/01474_executable_dictionary/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01474_executable_dictionary/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01474_executable_dictionary/metadata.json b/parser/testdata/01474_executable_dictionary/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01474_executable_dictionary/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01474_executable_dictionary/query.sql b/parser/testdata/01474_executable_dictionary/query.sql new file mode 100644 index 000000000..6937a0992 --- /dev/null +++ b/parser/testdata/01474_executable_dictionary/query.sql @@ -0,0 +1,6 @@ +-- Tags: no-tsan, no-parallel +-- Tag no-tsan: informational stderr from sanitizer at start + +SELECT number, dictGet('executable_complex', 'a', (number, number)) AS a, dictGet('executable_complex', 'b', (number, number)) AS b FROM numbers(1000000) WHERE number = 999999; +SELECT number, dictGet('executable_complex_direct', 'a', (number, number)) AS a, dictGet('executable_complex_direct', 'b', (number, number)) AS b FROM numbers(1000000) WHERE number = 999999; +SELECT number, dictGet('executable_simple', 'a', number) AS a, dictGet('executable_simple', 'b', number) AS b FROM numbers(1000000) WHERE number = 999999; diff --git a/parser/testdata/01475_fix_bigint_shift/ast.json b/parser/testdata/01475_fix_bigint_shift/ast.json new file mode 100644 index 000000000..5a3360f6e --- /dev/null +++ b/parser/testdata/01475_fix_bigint_shift/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function bitShiftLeft (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toInt64 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Int64_-2" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001582968, + "rows_read": 10, + "bytes_read": 382 + } +} diff --git a/parser/testdata/01475_fix_bigint_shift/metadata.json b/parser/testdata/01475_fix_bigint_shift/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01475_fix_bigint_shift/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01475_fix_bigint_shift/query.sql b/parser/testdata/01475_fix_bigint_shift/query.sql new file mode 100644 index 000000000..d16cdeca8 --- /dev/null +++ b/parser/testdata/01475_fix_bigint_shift/query.sql @@ -0,0 +1,2 @@ +SELECT bitShiftLeft(toInt64(-2), 1); +SELECT bitShiftLeft(toInt256(-2), 1); diff --git a/parser/testdata/01475_mutation_with_if/ast.json b/parser/testdata/01475_mutation_with_if/ast.json new file mode 100644 index 000000000..af1f06ae0 --- /dev/null +++ b/parser/testdata/01475_mutation_with_if/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery mutation_table (children 1)" + }, + { + "explain": " Identifier mutation_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001131363, + "rows_read": 2, + "bytes_read": 80 + } +} diff --git a/parser/testdata/01475_mutation_with_if/metadata.json b/parser/testdata/01475_mutation_with_if/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01475_mutation_with_if/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01475_mutation_with_if/query.sql b/parser/testdata/01475_mutation_with_if/query.sql new file mode 100644 index 000000000..c25d208e9 --- /dev/null +++ b/parser/testdata/01475_mutation_with_if/query.sql @@ -0,0 +1,54 @@ +DROP TABLE IF EXISTS mutation_table; +CREATE TABLE mutation_table ( + id int, + price Nullable(Int32) +) +ENGINE = MergeTree() +PARTITION BY id +ORDER BY id; + +INSERT INTO mutation_table (id, price) VALUES (1, 100); + +ALTER TABLE mutation_table UPDATE price = 150 WHERE id = 1 SETTINGS mutations_sync = 2; + +SELECT * FROM mutation_table; + +DROP TABLE IF EXISTS mutation_table; + + + +create table mutation_table ( dt Nullable(Date), name Nullable(String)) +engine MergeTree order by tuple(); + +insert into mutation_table (name, dt) values ('car', '2020-02-28'); +insert into mutation_table (name, dt) values ('dog', '2020-03-28'); + +select * from mutation_table order by dt, name; + +alter table mutation_table update dt = toDateOrNull('2020-08-02') +where name = 'car' SETTINGS mutations_sync = 2; + +select * from mutation_table order by dt, name; + +insert into mutation_table (name, dt) values ('car', Null); +insert into mutation_table (name, dt) values ('cat', Null); + +alter table mutation_table update dt = toDateOrNull('2020-08-03') +where name = 'car' and dt is null SETTINGS mutations_sync = 2; + +select * from mutation_table order by dt, name; + +alter table mutation_table update dt = toDateOrNull('2020-08-04') +where name = 'car' or dt is null SETTINGS mutations_sync = 2; + +select * from mutation_table order by dt, name; + +insert into mutation_table (name, dt) values (Null, '2020-08-05'); + +alter table mutation_table update dt = Null +where name is not null SETTINGS mutations_sync = 2; + +select * from mutation_table order by dt, name; + + +DROP TABLE IF EXISTS mutation_table; diff --git a/parser/testdata/01475_read_subcolumns/ast.json b/parser/testdata/01475_read_subcolumns/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01475_read_subcolumns/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01475_read_subcolumns/metadata.json b/parser/testdata/01475_read_subcolumns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01475_read_subcolumns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01475_read_subcolumns/query.sql b/parser/testdata/01475_read_subcolumns/query.sql new file mode 100644 index 000000000..70964e484 --- /dev/null +++ b/parser/testdata/01475_read_subcolumns/query.sql @@ -0,0 +1,71 @@ +-- Tags: no-object-storage, no-random-settings, no-parallel + +SET use_uncompressed_cache = 0; + +SELECT '====array===='; +DROP TABLE IF EXISTS t_arr; +CREATE TABLE t_arr (a Array(UInt32)) ENGINE = MergeTree ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0; +INSERT INTO t_arr VALUES ([1]) ([]) ([1, 2, 3]) ([1, 2]); + +SYSTEM DROP MARK CACHE; +SELECT a.size0 FROM t_arr; + +SYSTEM FLUSH LOGS query_log; +SELECT ProfileEvents['FileOpen'] +FROM system.query_log +WHERE (type = 'QueryFinish') AND (lower(query) LIKE lower('SELECT a.size0 FROM %t_arr%')) + AND current_database = currentDatabase(); + +SELECT '====tuple===='; +DROP TABLE IF EXISTS t_tup; +CREATE TABLE t_tup (t Tuple(s String, u UInt32)) ENGINE = MergeTree ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0, serialization_info_version = 'basic'; +INSERT INTO t_tup VALUES (('foo', 1)) (('bar', 2)) (('baz', 42)); + +SYSTEM DROP MARK CACHE; +SELECT t.s FROM t_tup; + +SYSTEM DROP MARK CACHE; +SELECT t.u FROM t_tup; + +SYSTEM FLUSH LOGS query_log; +SELECT ProfileEvents['FileOpen'] +FROM system.query_log +WHERE (type = 'QueryFinish') AND (lower(query) LIKE lower('SELECT t._ FROM %t_tup%')) + AND current_database = currentDatabase(); + +SELECT '====nullable===='; +DROP TABLE IF EXISTS t_nul; +CREATE TABLE t_nul (n Nullable(UInt32)) ENGINE = MergeTree ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0; +INSERT INTO t_nul VALUES (1) (NULL) (2) (NULL); + +SYSTEM DROP MARK CACHE; +SELECT n.null FROM t_nul; + +SYSTEM FLUSH LOGS query_log; +SELECT ProfileEvents['FileOpen'] +FROM system.query_log +WHERE (type = 'QueryFinish') AND (lower(query) LIKE lower('SELECT n.null FROM %t_nul%')) + AND current_database = currentDatabase(); + +SELECT '====map===='; +DROP TABLE IF EXISTS t_map; +CREATE TABLE t_map (m Map(String, UInt32)) ENGINE = MergeTree ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0, serialization_info_version = 'basic'; +INSERT INTO t_map VALUES (map('a', 1, 'b', 2)) (map('a', 3, 'c', 4)), (map('b', 5, 'c', 6)); + +--- will read 4 files: keys.bin, keys.mrk2, size0.bin, size0.mrk2 +SYSTEM DROP MARK CACHE; +SELECT m.keys FROM t_map; + +SYSTEM DROP MARK CACHE; +SELECT m.values FROM t_map; + +SYSTEM FLUSH LOGS query_log; +SELECT ProfileEvents['FileOpen'] +FROM system.query_log +WHERE (type = 'QueryFinish') AND (lower(query) LIKE lower('SELECT m.% FROM %t_map%')) + AND current_database = currentDatabase(); + +DROP TABLE t_arr; +DROP TABLE t_nul; +DROP TABLE t_tup; +DROP TABLE t_map; diff --git a/parser/testdata/01475_read_subcolumns_2/ast.json b/parser/testdata/01475_read_subcolumns_2/ast.json new file mode 100644 index 000000000..a9034f0c8 --- /dev/null +++ b/parser/testdata/01475_read_subcolumns_2/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery subcolumns (children 1)" + }, + { + "explain": " Identifier subcolumns" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001148219, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/01475_read_subcolumns_2/metadata.json b/parser/testdata/01475_read_subcolumns_2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01475_read_subcolumns_2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01475_read_subcolumns_2/query.sql b/parser/testdata/01475_read_subcolumns_2/query.sql new file mode 100644 index 000000000..e827d6c36 --- /dev/null +++ b/parser/testdata/01475_read_subcolumns_2/query.sql @@ -0,0 +1,51 @@ +DROP TABLE IF EXISTS subcolumns; + +CREATE TABLE subcolumns +( + t Tuple + ( + a Array(Nullable(UInt32)), + u UInt32, + s Nullable(String) + ), + arr Array(Nullable(String)), + arr2 Array(Array(Nullable(String))), + lc LowCardinality(String), + nested Nested(col1 String, col2 Nullable(UInt32)) +) +ENGINE = MergeTree order by tuple() SETTINGS min_bytes_for_wide_part = '10M'; + +INSERT INTO subcolumns VALUES (([1, NULL], 2, 'a'), ['foo', NULL, 'bar'], [['123'], ['456', '789']], 'qqqq', ['zzz', 'xxx'], [42, 43]); +SELECT * FROM subcolumns; +SELECT t.a, t.u, t.s, nested.col1, nested.col2, lc FROM subcolumns; +SELECT t.a.size0, t.a.null, t.u, t.s, t.s.null FROM subcolumns; +SELECT sumArray(arr.null), sum(arr.size0) FROM subcolumns; +SELECT arr2, arr2.size0, arr2.size1, arr2.null FROM subcolumns; +-- SELECT nested.col1, nested.col2, nested.col1.size0, nested.col2.size0, nested.col2.null FROM subcolumns; + +DROP TABLE IF EXISTS subcolumns; + +CREATE TABLE subcolumns +( + t Tuple + ( + a Array(Nullable(UInt32)), + u UInt32, + s Nullable(String) + ), + arr Array(Nullable(String)), + arr2 Array(Array(Nullable(String))), + lc LowCardinality(String), + nested Nested(col1 String, col2 Nullable(UInt32)) +) +ENGINE = MergeTree order by tuple() SETTINGS min_bytes_for_wide_part = 0; + +INSERT INTO subcolumns VALUES (([1, NULL], 2, 'a'), ['foo', NULL, 'bar'], [['123'], ['456', '789']], 'qqqq', ['zzz', 'xxx'], [42, 43]); +SELECT * FROM subcolumns; +SELECT t.a, t.u, t.s, nested.col1, nested.col2, lc FROM subcolumns; +SELECT t.a.size0, t.a.null, t.u, t.s, t.s.null FROM subcolumns; +SELECT sumArray(arr.null), sum(arr.size0) FROM subcolumns; +SELECT arr2, arr2.size0, arr2.size1, arr2.null FROM subcolumns; +-- SELECT nested.col1, nested.col2, nested.size0, nested.size0, nested.col2.null FROM subcolumns; + +DROP TABLE subcolumns; diff --git a/parser/testdata/01475_read_subcolumns_3/ast.json b/parser/testdata/01475_read_subcolumns_3/ast.json new file mode 100644 index 000000000..14a078050 --- /dev/null +++ b/parser/testdata/01475_read_subcolumns_3/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery null_subcolumns (children 1)" + }, + { + "explain": " Identifier null_subcolumns" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001509525, + "rows_read": 2, + "bytes_read": 82 + } +} diff --git a/parser/testdata/01475_read_subcolumns_3/metadata.json b/parser/testdata/01475_read_subcolumns_3/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01475_read_subcolumns_3/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01475_read_subcolumns_3/query.sql b/parser/testdata/01475_read_subcolumns_3/query.sql new file mode 100644 index 000000000..a55b94fa2 --- /dev/null +++ b/parser/testdata/01475_read_subcolumns_3/query.sql @@ -0,0 +1,40 @@ +DROP TABLE IF EXISTS null_subcolumns; + +SELECT 'Nullable'; +CREATE TABLE null_subcolumns (id UInt32, n Nullable(String)) ENGINE = MergeTree ORDER BY id; + +INSERT INTO null_subcolumns VALUES (1, 'foo') (2, NULL) (3, NULL) (4, 'abc'); + +SELECT count() FROM null_subcolumns WHERE n.null; +SELECT count() FROM null_subcolumns PREWHERE n.null; + +-- Check, that subcolumns will be available after restart. +DETACH TABLE null_subcolumns; +ATTACH TABLE null_subcolumns; + +SELECT count() FROM null_subcolumns WHERE n.null; +SELECT count() FROM null_subcolumns PREWHERE n.null; + +DROP TABLE null_subcolumns; +DROP TABLE IF EXISTS map_subcolumns; + +SELECT 'Map'; +CREATE TABLE map_subcolumns (id UInt32, m Map(String, UInt32)) ENGINE = MergeTree ORDER BY id; +INSERT INTO map_subcolumns VALUES (1, map('a', 1, 'b', 2)) (2, map('a', 3, 'c', 4)), (3, map('b', 5, 'c', 6, 'd', 7)); + +SELECT count() FROM map_subcolumns WHERE has(m.keys, 'a'); +SELECT count() FROM map_subcolumns PREWHERE has(m.keys, 'b'); + +SELECT count() FROM map_subcolumns WHERE arrayMax(m.values) > 3; +SELECT count() FROM map_subcolumns PREWHERE arrayMax(m.values) > 3; + +DETACH TABLE map_subcolumns; +ATTACH TABLE map_subcolumns; + +SELECT count() FROM map_subcolumns WHERE has(m.keys, 'a'); +SELECT count() FROM map_subcolumns PREWHERE has(m.keys, 'b'); + +SELECT id, m.size0 FROM map_subcolumns; +SELECT count() FROM map_subcolumns WHERE m.size0 > 2; + +DROP TABLE map_subcolumns; diff --git a/parser/testdata/01476_right_full_join_switch/ast.json b/parser/testdata/01476_right_full_join_switch/ast.json new file mode 100644 index 000000000..556a42553 --- /dev/null +++ b/parser/testdata/01476_right_full_join_switch/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001303134, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01476_right_full_join_switch/metadata.json b/parser/testdata/01476_right_full_join_switch/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01476_right_full_join_switch/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01476_right_full_join_switch/query.sql b/parser/testdata/01476_right_full_join_switch/query.sql new file mode 100644 index 000000000..d8156ae5e --- /dev/null +++ b/parser/testdata/01476_right_full_join_switch/query.sql @@ -0,0 +1,45 @@ +SET join_algorithm = 'auto'; +SET max_bytes_in_join = 100; + +DROP TABLE IF EXISTS t; +DROP TABLE IF EXISTS nr; + +CREATE TABLE t (`x` UInt32, `s` LowCardinality(String)) ENGINE = MergeTree ORDER BY tuple(); +CREATE TABLE nr (`x` Nullable(UInt32), `s` Nullable(String)) ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO t VALUES (1, 'l'); +INSERT INTO nr VALUES (2, NULL); + + +SET join_use_nulls = 0; + +SET enable_analyzer = 1; + +-- x is supertupe for `t.x` and `nr.x` from left and right since `x` is inside `USING`. +SELECT x, l.s, r.s, toTypeName(l.s), toTypeName(r.s) FROM t AS l LEFT JOIN nr AS r USING (x) ORDER BY t.x; +SELECT x, l.s, r.s, toTypeName(l.s), toTypeName(r.s) FROM t AS l RIGHT JOIN nr AS r USING (x) ORDER BY t.x; +SELECT x, l.s, r.s, toTypeName(l.s), toTypeName(r.s) FROM t AS l FULL JOIN nr AS r USING (x) ORDER BY t.x; + +SELECT '-'; + +SELECT t.x, l.s, r.s, toTypeName(l.s), toTypeName(r.s) FROM nr AS l LEFT JOIN t AS r USING (x) ORDER BY t.x; +SELECT t.x, l.s, r.s, toTypeName(l.s), toTypeName(r.s) FROM nr AS l RIGHT JOIN t AS r USING (x) ORDER BY t.x; +SELECT t.x, l.s, r.s, toTypeName(l.s), toTypeName(r.s) FROM nr AS l FULL JOIN t AS r USING (x) ORDER BY t.x; + +SELECT '-'; + +SET enable_analyzer = 0; + +-- t.x is supertupe for `x` from left and right since `x` is inside `USING`. +SELECT t.x, l.s, r.s, toTypeName(l.s), toTypeName(r.s) FROM t AS l LEFT JOIN nr AS r USING (x) ORDER BY t.x; +SELECT t.x, l.s, r.s, toTypeName(l.s), toTypeName(r.s) FROM t AS l RIGHT JOIN nr AS r USING (x) ORDER BY t.x; +SELECT t.x, l.s, r.s, toTypeName(l.s), toTypeName(r.s) FROM t AS l FULL JOIN nr AS r USING (x) ORDER BY t.x; + +SELECT '-'; + +SELECT t.x, l.s, r.s, toTypeName(l.s), toTypeName(r.s) FROM nr AS l LEFT JOIN t AS r USING (x) ORDER BY t.x; +SELECT t.x, l.s, r.s, toTypeName(l.s), toTypeName(r.s) FROM nr AS l RIGHT JOIN t AS r USING (x) ORDER BY t.x; +SELECT t.x, l.s, r.s, toTypeName(l.s), toTypeName(r.s) FROM nr AS l FULL JOIN t AS r USING (x) ORDER BY t.x; + +DROP TABLE t; +DROP TABLE nr; diff --git a/parser/testdata/01478_not_equi-join_on/ast.json b/parser/testdata/01478_not_equi-join_on/ast.json new file mode 100644 index 000000000..1ea81934e --- /dev/null +++ b/parser/testdata/01478_not_equi-join_on/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (alias foo) (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal NULL (alias a)" + }, + { + "explain": " Literal UInt64_1 (alias b)" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001601326, + "rows_read": 15, + "bytes_read": 614 + } +} diff --git a/parser/testdata/01478_not_equi-join_on/metadata.json b/parser/testdata/01478_not_equi-join_on/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01478_not_equi-join_on/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01478_not_equi-join_on/query.sql b/parser/testdata/01478_not_equi-join_on/query.sql new file mode 100644 index 000000000..b1f88d6ad --- /dev/null +++ b/parser/testdata/01478_not_equi-join_on/query.sql @@ -0,0 +1,7 @@ +SELECT * FROM (SELECT NULL AS a, 1 AS b) AS foo +LEFT JOIN (SELECT 1024 AS b) AS bar +ON 1 = foo.b; -- { serverError INVALID_JOIN_ON_EXPRESSION } + +SELECT * FROM (SELECT NULL AS a, 1 AS b) AS foo +RIGHT JOIN (SELECT 1024 AS b) AS bar +ON 1 = bar.b; -- { serverError INVALID_JOIN_ON_EXPRESSION } diff --git a/parser/testdata/01479_cross_join_9855/ast.json b/parser/testdata/01479_cross_join_9855/ast.json new file mode 100644 index 000000000..9857aa341 --- /dev/null +++ b/parser/testdata/01479_cross_join_9855/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001115058, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01479_cross_join_9855/metadata.json b/parser/testdata/01479_cross_join_9855/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01479_cross_join_9855/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01479_cross_join_9855/query.sql b/parser/testdata/01479_cross_join_9855/query.sql new file mode 100644 index 000000000..19cd0ab18 --- /dev/null +++ b/parser/testdata/01479_cross_join_9855/query.sql @@ -0,0 +1,9 @@ +SET cross_to_inner_join_rewrite = 1; + +SELECT count() +FROM numbers(4) AS n1, numbers(3) AS n2 +WHERE n1.number > (select avg(n.number) from numbers(3) n) SETTINGS enable_analyzer=0; + +SELECT count() +FROM numbers(4) AS n1, numbers(3) AS n2, numbers(6) AS n3 +WHERE n1.number > (select avg(n.number) from numbers(3) n) SETTINGS enable_analyzer=0; diff --git a/parser/testdata/01480_binary_operator_monotonicity/ast.json b/parser/testdata/01480_binary_operator_monotonicity/ast.json new file mode 100644 index 000000000..82c0ba6a6 --- /dev/null +++ b/parser/testdata/01480_binary_operator_monotonicity/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery binary_op_mono1 (children 1)" + }, + { + "explain": " Identifier binary_op_mono1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001220691, + "rows_read": 2, + "bytes_read": 82 + } +} diff --git a/parser/testdata/01480_binary_operator_monotonicity/metadata.json b/parser/testdata/01480_binary_operator_monotonicity/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01480_binary_operator_monotonicity/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01480_binary_operator_monotonicity/query.sql b/parser/testdata/01480_binary_operator_monotonicity/query.sql new file mode 100644 index 000000000..6f6c01a73 --- /dev/null +++ b/parser/testdata/01480_binary_operator_monotonicity/query.sql @@ -0,0 +1,59 @@ +DROP TABLE IF EXISTS binary_op_mono1; +DROP TABLE IF EXISTS binary_op_mono2; +DROP TABLE IF EXISTS binary_op_mono3; +DROP TABLE IF EXISTS binary_op_mono4; +DROP TABLE IF EXISTS binary_op_mono5; +DROP TABLE IF EXISTS binary_op_mono6; +DROP TABLE IF EXISTS binary_op_mono7; +DROP TABLE IF EXISTS binary_op_mono8; + +CREATE TABLE binary_op_mono1(i int, j int) ENGINE MergeTree PARTITION BY toDate(i / 1000) ORDER BY j; +CREATE TABLE binary_op_mono2(i int, j int) ENGINE MergeTree PARTITION BY 1000 / i ORDER BY j settings allow_floating_point_partition_key=true;; +CREATE TABLE binary_op_mono3(i int, j int) ENGINE MergeTree PARTITION BY i + 1000 ORDER BY j; +CREATE TABLE binary_op_mono4(i int, j int) ENGINE MergeTree PARTITION BY 1000 + i ORDER BY j; +CREATE TABLE binary_op_mono5(i int, j int) ENGINE MergeTree PARTITION BY i - 1000 ORDER BY j; +CREATE TABLE binary_op_mono6(i int, j int) ENGINE MergeTree PARTITION BY 1000 - i ORDER BY j; +CREATE TABLE binary_op_mono7(i int, j int) ENGINE MergeTree PARTITION BY i / 1000.0 ORDER BY j settings allow_floating_point_partition_key=true;; +CREATE TABLE binary_op_mono8(i int, j int) ENGINE MergeTree PARTITION BY 1000.0 / i ORDER BY j settings allow_floating_point_partition_key=true;; + +INSERT INTO binary_op_mono1 VALUES (toUnixTimestamp('2020-09-01 00:00:00') * 1000, 1), (toUnixTimestamp('2020-09-01 00:00:00') * 1000, 2); +INSERT INTO binary_op_mono2 VALUES (1, 1), (10000, 2); +INSERT INTO binary_op_mono3 VALUES (1, 1), (10000, 2); +INSERT INTO binary_op_mono4 VALUES (1, 1), (10000, 2); +INSERT INTO binary_op_mono5 VALUES (1, 1), (10000, 2); +INSERT INTO binary_op_mono6 VALUES (1, 1), (10000, 2); +INSERT INTO binary_op_mono7 VALUES (1, 1), (10000, 2); +INSERT INTO binary_op_mono8 VALUES (1, 1), (10000, 2); + +SET max_rows_to_read = 1; +SELECT count() FROM binary_op_mono1 WHERE toDate(i / 1000) = '2020-09-02'; +SELECT count() FROM binary_op_mono2 WHERE 1000 / i = 100; +SELECT count() FROM binary_op_mono3 WHERE i + 1000 = 500; +SELECT count() FROM binary_op_mono4 WHERE 1000 + i = 500; +SELECT count() FROM binary_op_mono5 WHERE i - 1000 = 1234; +SELECT count() FROM binary_op_mono6 WHERE 1000 - i = 1234; +SELECT count() FROM binary_op_mono7 WHERE i / 1000.0 = 22.3; +SELECT count() FROM binary_op_mono8 WHERE 1000.0 / i = 33.4; + +DROP TABLE IF EXISTS binary_op_mono1; +DROP TABLE IF EXISTS binary_op_mono2; +DROP TABLE IF EXISTS binary_op_mono3; +DROP TABLE IF EXISTS binary_op_mono4; +DROP TABLE IF EXISTS binary_op_mono5; +DROP TABLE IF EXISTS binary_op_mono6; +DROP TABLE IF EXISTS binary_op_mono7; +DROP TABLE IF EXISTS binary_op_mono8; + +drop table if exists x; +create table x (i int, j int) engine MergeTree order by i / 10 settings index_granularity = 1; + +insert into x values (10, 1), (20, 2), (30, 3), (40, 4); + +set max_rows_to_read = 3; + +-- Prevent remote replicas from skipping index analysis in Parallel Replicas. Otherwise, they may return full ranges and trigger max_rows_to_read validation failures. +set parallel_replicas_index_analysis_only_on_coordinator = 0; + +select * from x where i > 30; -- converted to i / 10 >= 3, thus needs to read 3 granules. + +drop table x; diff --git a/parser/testdata/01481_join_with_materialized/ast.json b/parser/testdata/01481_join_with_materialized/ast.json new file mode 100644 index 000000000..53fcb67eb --- /dev/null +++ b/parser/testdata/01481_join_with_materialized/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001222319, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/01481_join_with_materialized/metadata.json b/parser/testdata/01481_join_with_materialized/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01481_join_with_materialized/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01481_join_with_materialized/query.sql b/parser/testdata/01481_join_with_materialized/query.sql new file mode 100644 index 000000000..833b483dc --- /dev/null +++ b/parser/testdata/01481_join_with_materialized/query.sql @@ -0,0 +1,21 @@ +drop table if exists t1; +drop table if exists t2; + +create table t1 +( + col UInt64, + x UInt64 MATERIALIZED col + 1 +) Engine = MergeTree order by tuple(); + +create table t2 +( + x UInt64 +) Engine = MergeTree order by tuple(); + +insert into t1 values (1),(2),(3),(4),(5); +insert into t2 values (1),(2),(3),(4),(5); + +SELECT COUNT() FROM t1 INNER JOIN t2 USING x; + +drop table t1; +drop table t2; diff --git a/parser/testdata/01482_move_to_prewhere_and_cast/ast.json b/parser/testdata/01482_move_to_prewhere_and_cast/ast.json new file mode 100644 index 000000000..c31306e6d --- /dev/null +++ b/parser/testdata/01482_move_to_prewhere_and_cast/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery APPLICATION (children 1)" + }, + { + "explain": " Identifier APPLICATION" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00112724, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/01482_move_to_prewhere_and_cast/metadata.json b/parser/testdata/01482_move_to_prewhere_and_cast/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01482_move_to_prewhere_and_cast/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01482_move_to_prewhere_and_cast/query.sql b/parser/testdata/01482_move_to_prewhere_and_cast/query.sql new file mode 100644 index 000000000..282363dcd --- /dev/null +++ b/parser/testdata/01482_move_to_prewhere_and_cast/query.sql @@ -0,0 +1,31 @@ +DROP TABLE IF EXISTS APPLICATION; +DROP TABLE IF EXISTS DATABASE_IO; + +CREATE TABLE APPLICATION ( + `Name` LowCardinality(String), + `Base` LowCardinality(String) +) ENGINE = Memory(); + +insert into table APPLICATION values ('ApplicationA', 'BaseA'), ('ApplicationB', 'BaseB') , ('ApplicationC', 'BaseC'); + +CREATE TABLE DATABASE_IO ( + `Application` LowCardinality(String), + `Base` LowCardinality(String), + `Date` DateTime, + `Ios` UInt32 ) +ENGINE = MergeTree() +ORDER BY Date; + +insert into table DATABASE_IO values ('AppA', 'BaseA', '2020-01-01 00:00:00', 1000); + +SELECT `APPLICATION`.`Name` AS `App`, + CAST(CAST(`DATABASE_IO`.`Date` AS DATE) AS DATE) AS `date` +FROM `DATABASE_IO` +INNER +JOIN `APPLICATION` ON (`DATABASE_IO`.`Base` = `APPLICATION`.`Base`) +WHERE ( + CAST(CAST(`DATABASE_IO`.`Date` AS DATE) AS TIMESTAMP) >= toDateTime('2020-01-01 00:00:00') +); + +DROP TABLE APPLICATION; +DROP TABLE DATABASE_IO; diff --git a/parser/testdata/01483_merge_table_join_and_group_by/ast.json b/parser/testdata/01483_merge_table_join_and_group_by/ast.json new file mode 100644 index 000000000..ff0a41923 --- /dev/null +++ b/parser/testdata/01483_merge_table_join_and_group_by/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery a (children 1)" + }, + { + "explain": " Identifier a" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001027598, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/01483_merge_table_join_and_group_by/metadata.json b/parser/testdata/01483_merge_table_join_and_group_by/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01483_merge_table_join_and_group_by/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01483_merge_table_join_and_group_by/query.sql b/parser/testdata/01483_merge_table_join_and_group_by/query.sql new file mode 100644 index 000000000..68b4e7d40 --- /dev/null +++ b/parser/testdata/01483_merge_table_join_and_group_by/query.sql @@ -0,0 +1,25 @@ +DROP TABLE IF EXISTS a; +DROP TABLE IF EXISTS b; +DROP TABLE IF EXISTS m; + +CREATE TABLE a (key UInt32) ENGINE = MergeTree ORDER BY key; +CREATE TABLE b (key UInt32, ID UInt32) ENGINE = MergeTree ORDER BY key; +CREATE TABLE m (key UInt32) ENGINE = Merge(currentDatabase(), 'a'); + +INSERT INTO a VALUES (0); +INSERT INTO b VALUES (0, 1); + +SELECT * FROM m INNER JOIN b USING(key); +SELECT * FROM a INNER JOIN b USING(key) GROUP BY ID, key; +SELECT * FROM m INNER JOIN b USING(key) WHERE ID = 1; +SELECT * FROM m INNER JOIN b USING(key) GROUP BY ID, key; +SELECT ID FROM m INNER JOIN b USING(key) GROUP BY ID; +SELECT * FROM m INNER JOIN b USING(key) WHERE ID = 1 HAVING ID = 1 ORDER BY ID; +SELECT * FROM m INNER JOIN b USING(key) WHERE ID = 1 GROUP BY ID, key HAVING ID = 1 ORDER BY ID; + +SELECT sum(b.ID), sum(m.key) FROM m FULL JOIN b ON (m.key == b.key) GROUP BY key; +SELECT sum(b.ID + m.key) FROM m FULL JOIN b ON (m.key == b.key) GROUP BY key; + +DROP TABLE IF EXISTS a; +DROP TABLE IF EXISTS b; +DROP TABLE IF EXISTS m; diff --git a/parser/testdata/01485_256_bit_multiply/ast.json b/parser/testdata/01485_256_bit_multiply/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01485_256_bit_multiply/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01485_256_bit_multiply/metadata.json b/parser/testdata/01485_256_bit_multiply/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01485_256_bit_multiply/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01485_256_bit_multiply/query.sql b/parser/testdata/01485_256_bit_multiply/query.sql new file mode 100644 index 000000000..a4e99d519 --- /dev/null +++ b/parser/testdata/01485_256_bit_multiply/query.sql @@ -0,0 +1,8 @@ +-- Tags: no-random-settings, no-asan, no-msan, no-tsan, no-ubsan, no-debug + +SET max_rows_to_read = '100M'; + +select count() from +( + select toInt128(number) * number x, toInt256(number) * number y from numbers_mt(100000000) where x != y +); diff --git a/parser/testdata/01486_json_array_output/ast.json b/parser/testdata/01486_json_array_output/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01486_json_array_output/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01486_json_array_output/metadata.json b/parser/testdata/01486_json_array_output/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01486_json_array_output/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01486_json_array_output/query.sql b/parser/testdata/01486_json_array_output/query.sql new file mode 100644 index 000000000..a93180088 --- /dev/null +++ b/parser/testdata/01486_json_array_output/query.sql @@ -0,0 +1,6 @@ +-- Tags: no-fasttest + +set output_format_json_array_of_rows = 1; +select number a, number * 2 b from numbers(3) format JSONEachRow; +select * from numbers(1) format JSONEachRow; +select * from numbers(1) where null format JSONEachRow; diff --git a/parser/testdata/01490_nullable_string_to_enum/ast.json b/parser/testdata/01490_nullable_string_to_enum/ast.json new file mode 100644 index 000000000..29b5c081f --- /dev/null +++ b/parser/testdata/01490_nullable_string_to_enum/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_enum (children 1)" + }, + { + "explain": " Identifier t_enum" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00112912, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/01490_nullable_string_to_enum/metadata.json b/parser/testdata/01490_nullable_string_to_enum/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01490_nullable_string_to_enum/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01490_nullable_string_to_enum/query.sql b/parser/testdata/01490_nullable_string_to_enum/query.sql new file mode 100644 index 000000000..e0624af4a --- /dev/null +++ b/parser/testdata/01490_nullable_string_to_enum/query.sql @@ -0,0 +1,12 @@ +DROP TABLE IF EXISTS t_enum; +DROP TABLE IF EXISTS t_source; + +CREATE TABLE t_enum(x Enum8('hello' = 1, 'world' = 2)) ENGINE = TinyLog; +CREATE TABLE t_source(x Nullable(String)) ENGINE = TinyLog; + +INSERT INTO t_source (x) VALUES ('hello'); +INSERT INTO t_enum(x) SELECT x from t_source WHERE x in ('hello', 'world'); +SELECT * FROM t_enum; + +DROP TABLE IF EXISTS t_enum; +DROP TABLE IF EXISTS t_source; diff --git a/parser/testdata/01491_nested_multiline_comments/ast.json b/parser/testdata/01491_nested_multiline_comments/ast.json new file mode 100644 index 000000000..cf597f08a --- /dev/null +++ b/parser/testdata/01491_nested_multiline_comments/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001403508, + "rows_read": 5, + "bytes_read": 177 + } +} diff --git a/parser/testdata/01491_nested_multiline_comments/metadata.json b/parser/testdata/01491_nested_multiline_comments/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01491_nested_multiline_comments/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01491_nested_multiline_comments/query.sql b/parser/testdata/01491_nested_multiline_comments/query.sql new file mode 100644 index 000000000..4c6f76347 --- /dev/null +++ b/parser/testdata/01491_nested_multiline_comments/query.sql @@ -0,0 +1,3 @@ +SELECT /*/**/*/ 1; +SELECT /*a/*b*/c*/ 1; +SELECT /*ab/*cd*/ef*/ 1; diff --git a/parser/testdata/01492_array_join_crash_13829/ast.json b/parser/testdata/01492_array_join_crash_13829/ast.json new file mode 100644 index 000000000..872a0455e --- /dev/null +++ b/parser/testdata/01492_array_join_crash_13829/ast.json @@ -0,0 +1,103 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Function countEqual (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Function arrayJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[NULL, NULL, NULL]" + }, + { + "explain": " Literal NULL (alias x)" + }, + { + "explain": " Function arrayJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[UInt64_255, UInt64_1025, NULL, NULL]" + }, + { + "explain": " Function arrayJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[UInt64_2, UInt64_1048576, NULL, NULL]" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Identifier Null" + } + ], + + "rows": 27, + + "statistics": + { + "elapsed": 0.001698168, + "rows_read": 27, + "bytes_read": 1214 + } +} diff --git a/parser/testdata/01492_array_join_crash_13829/metadata.json b/parser/testdata/01492_array_join_crash_13829/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01492_array_join_crash_13829/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01492_array_join_crash_13829/query.sql b/parser/testdata/01492_array_join_crash_13829/query.sql new file mode 100644 index 000000000..9e11c3b69 --- /dev/null +++ b/parser/testdata/01492_array_join_crash_13829/query.sql @@ -0,0 +1 @@ +SELECT NULL = countEqual(materialize([arrayJoin([NULL, NULL, NULL]), NULL AS x, arrayJoin([255, 1025, NULL, NULL]), arrayJoin([2, 1048576, NULL, NULL])]), materialize(x)) format Null; diff --git a/parser/testdata/01492_format_readable_quantity/ast.json b/parser/testdata/01492_format_readable_quantity/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01492_format_readable_quantity/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01492_format_readable_quantity/metadata.json b/parser/testdata/01492_format_readable_quantity/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01492_format_readable_quantity/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01492_format_readable_quantity/query.sql b/parser/testdata/01492_format_readable_quantity/query.sql new file mode 100644 index 000000000..93aa570cc --- /dev/null +++ b/parser/testdata/01492_format_readable_quantity/query.sql @@ -0,0 +1,4 @@ +WITH round(exp(number), 6) AS x, toUInt64(x) AS y, toInt32(min2(x, 2147483647)) AS z +SELECT formatReadableQuantity(x), formatReadableQuantity(y), formatReadableQuantity(z) +FROM system.numbers +LIMIT 45; diff --git a/parser/testdata/01493_alter_remove_no_property_zookeeper_long/ast.json b/parser/testdata/01493_alter_remove_no_property_zookeeper_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01493_alter_remove_no_property_zookeeper_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01493_alter_remove_no_property_zookeeper_long/metadata.json b/parser/testdata/01493_alter_remove_no_property_zookeeper_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01493_alter_remove_no_property_zookeeper_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01493_alter_remove_no_property_zookeeper_long/query.sql b/parser/testdata/01493_alter_remove_no_property_zookeeper_long/query.sql new file mode 100644 index 000000000..a00e10b61 --- /dev/null +++ b/parser/testdata/01493_alter_remove_no_property_zookeeper_long/query.sql @@ -0,0 +1,53 @@ +-- Tags: long, zookeeper + +DROP TABLE IF EXISTS no_prop_table; + +CREATE TABLE no_prop_table +( + some_column UInt64 +) +ENGINE MergeTree() +ORDER BY tuple(); + +SHOW CREATE TABLE no_prop_table; + +-- just nothing happened +ALTER TABLE no_prop_table MODIFY COLUMN some_column REMOVE DEFAULT; --{serverError BAD_ARGUMENTS} +ALTER TABLE no_prop_table MODIFY COLUMN some_column REMOVE MATERIALIZED; --{serverError BAD_ARGUMENTS} +ALTER TABLE no_prop_table MODIFY COLUMN some_column REMOVE ALIAS; --{serverError BAD_ARGUMENTS} +ALTER TABLE no_prop_table MODIFY COLUMN some_column REMOVE CODEC; --{serverError BAD_ARGUMENTS} +ALTER TABLE no_prop_table MODIFY COLUMN some_column REMOVE COMMENT; --{serverError BAD_ARGUMENTS} +ALTER TABLE no_prop_table MODIFY COLUMN some_column REMOVE TTL; --{serverError BAD_ARGUMENTS} + +ALTER TABLE no_prop_table REMOVE TTL; --{serverError BAD_ARGUMENTS} + +SHOW CREATE TABLE no_prop_table; + +DROP TABLE IF EXISTS no_prop_table; + +DROP TABLE IF EXISTS r_no_prop_table; + +CREATE TABLE r_no_prop_table +( + some_column UInt64 +) +ENGINE ReplicatedMergeTree('/clickhouse/{database}/test/01493_r_no_prop_table', '1') +ORDER BY tuple(); + +SHOW CREATE TABLE r_no_prop_table; + +ALTER TABLE r_no_prop_table MODIFY COLUMN some_column REMOVE DEFAULT; --{serverError BAD_ARGUMENTS} +ALTER TABLE r_no_prop_table MODIFY COLUMN some_column REMOVE MATERIALIZED; --{serverError BAD_ARGUMENTS} +ALTER TABLE r_no_prop_table MODIFY COLUMN some_column REMOVE ALIAS; --{serverError BAD_ARGUMENTS} +ALTER TABLE r_no_prop_table MODIFY COLUMN some_column REMOVE CODEC; --{serverError BAD_ARGUMENTS} +ALTER TABLE r_no_prop_table MODIFY COLUMN some_column REMOVE COMMENT; --{serverError BAD_ARGUMENTS} +ALTER TABLE r_no_prop_table MODIFY COLUMN some_column REMOVE TTL; --{serverError BAD_ARGUMENTS} + +ALTER TABLE r_no_prop_table REMOVE TTL; --{serverError BAD_ARGUMENTS} + +SHOW CREATE TABLE r_no_prop_table; + +ALTER TABLE r_no_prop_table MODIFY COLUMN some_column REMOVE ttl; --{serverError BAD_ARGUMENTS} +ALTER TABLE r_no_prop_table remove TTL; --{serverError BAD_ARGUMENTS} + +DROP TABLE IF EXISTS r_no_prop_table; diff --git a/parser/testdata/01493_alter_remove_properties/ast.json b/parser/testdata/01493_alter_remove_properties/ast.json new file mode 100644 index 000000000..743ebe761 --- /dev/null +++ b/parser/testdata/01493_alter_remove_properties/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery prop_table (children 1)" + }, + { + "explain": " Identifier prop_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001424097, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/01493_alter_remove_properties/metadata.json b/parser/testdata/01493_alter_remove_properties/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01493_alter_remove_properties/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01493_alter_remove_properties/query.sql b/parser/testdata/01493_alter_remove_properties/query.sql new file mode 100644 index 000000000..01213ccd8 --- /dev/null +++ b/parser/testdata/01493_alter_remove_properties/query.sql @@ -0,0 +1,72 @@ +DROP TABLE IF EXISTS prop_table; + +CREATE TABLE prop_table +( + column_default UInt64 DEFAULT 42, + column_materialized UInt64 MATERIALIZED column_default * 42, + column_alias UInt64 ALIAS column_default + 1, + column_codec String CODEC(ZSTD(10)), + column_comment Date COMMENT 'Some comment', + column_ttl UInt64 TTL column_comment + INTERVAL 1 MONTH +) +ENGINE MergeTree() +ORDER BY tuple() +TTL column_comment + INTERVAL 2 MONTH; + +SHOW CREATE TABLE prop_table; + +SYSTEM STOP TTL MERGES prop_table; + +INSERT INTO prop_table (column_codec, column_comment, column_ttl) VALUES ('str', toDate('2019-10-01'), 1); + +SELECT column_default, column_materialized, column_alias, column_codec, column_comment, column_ttl FROM prop_table; + +ALTER TABLE prop_table MODIFY COLUMN column_comment REMOVE COMMENT; + +SHOW CREATE TABLE prop_table; + +ALTER TABLE prop_table MODIFY COLUMN column_codec REMOVE CODEC; + +SHOW CREATE TABLE prop_table; + +ALTER TABLE prop_table MODIFY COLUMN column_alias REMOVE ALIAS; + +SELECT column_default, column_materialized, column_alias, column_codec, column_comment, column_ttl FROM prop_table; + +SHOW CREATE TABLE prop_table; + +INSERT INTO prop_table (column_alias, column_codec, column_comment, column_ttl) VALUES (33, 'trs', toDate('2020-01-01'), 2); + +SELECT column_default, column_materialized, column_alias, column_codec, column_comment, column_ttl FROM prop_table ORDER BY column_ttl; + +ALTER TABLE prop_table MODIFY COLUMN column_materialized REMOVE MATERIALIZED; + +SHOW CREATE TABLE prop_table; + +INSERT INTO prop_table (column_materialized, column_alias, column_codec, column_comment, column_ttl) VALUES (11, 44, 'rts', toDate('2020-02-01'), 3); + +SELECT column_default, column_materialized, column_alias, column_codec, column_comment, column_ttl FROM prop_table ORDER BY column_ttl; + +ALTER TABLE prop_table MODIFY COLUMN column_default REMOVE DEFAULT; + +SHOW CREATE TABLE prop_table; + +INSERT INTO prop_table (column_materialized, column_alias, column_codec, column_comment, column_ttl) VALUES (22, 55, 'tsr', toDate('2020-03-01'), 4); + +SELECT column_default, column_materialized, column_alias, column_codec, column_comment, column_ttl FROM prop_table ORDER BY column_ttl; + +ALTER TABLE prop_table REMOVE TTL; + +SHOW CREATE TABLE prop_table; + +ALTER TABLE prop_table MODIFY COLUMN column_ttl REMOVE TTL; + +SHOW CREATE TABLE prop_table; + +SYSTEM START TTL MERGES prop_table; + +OPTIMIZE TABLE prop_table FINAL; + +SELECT COUNT() FROM prop_table; + +DROP TABLE IF EXISTS prop_table; diff --git a/parser/testdata/01493_alter_remove_properties_zookeeper/ast.json b/parser/testdata/01493_alter_remove_properties_zookeeper/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01493_alter_remove_properties_zookeeper/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01493_alter_remove_properties_zookeeper/metadata.json b/parser/testdata/01493_alter_remove_properties_zookeeper/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01493_alter_remove_properties_zookeeper/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01493_alter_remove_properties_zookeeper/query.sql b/parser/testdata/01493_alter_remove_properties_zookeeper/query.sql new file mode 100644 index 000000000..362da3ac3 --- /dev/null +++ b/parser/testdata/01493_alter_remove_properties_zookeeper/query.sql @@ -0,0 +1,94 @@ +-- Tags: zookeeper + +DROP TABLE IF EXISTS r_prop_table1; +DROP TABLE IF EXISTS r_prop_table2; + +SET replication_alter_partitions_sync = 2; + +CREATE TABLE r_prop_table1 +( + column_default UInt64 DEFAULT 42, + column_codec String CODEC(ZSTD(10)), + column_comment Date COMMENT 'Some comment', + column_ttl UInt64 TTL column_comment + INTERVAL 1 MONTH +) +ENGINE ReplicatedMergeTree('/clickhouse/{database}/test_01493/r_prop_table', '1') +ORDER BY tuple() +TTL column_comment + INTERVAL 2 MONTH; + +CREATE TABLE r_prop_table2 +( + column_default UInt64 DEFAULT 42, + column_codec String CODEC(ZSTD(10)), + column_comment Date COMMENT 'Some comment', + column_ttl UInt64 TTL column_comment + INTERVAL 1 MONTH +) +ENGINE ReplicatedMergeTree('/clickhouse/{database}/test_01493/r_prop_table', '2') +ORDER BY tuple() +TTL column_comment + INTERVAL 2 MONTH; + +SHOW CREATE TABLE r_prop_table1; +SHOW CREATE TABLE r_prop_table2; + +INSERT INTO r_prop_table1 (column_codec, column_comment, column_ttl) VALUES ('str', toDate('2100-01-01'), 1); + +SYSTEM SYNC REPLICA r_prop_table2; + +SELECT '====== remove column comment ======'; +ALTER TABLE r_prop_table1 MODIFY COLUMN column_comment REMOVE COMMENT; + +SHOW CREATE TABLE r_prop_table1; +SHOW CREATE TABLE r_prop_table2; + +DETACH TABLE r_prop_table1; +ATTACH TABLE r_prop_table1; + +SELECT '====== remove column codec ======'; +ALTER TABLE r_prop_table2 MODIFY COLUMN column_codec REMOVE CODEC; + +SHOW CREATE TABLE r_prop_table1; +SHOW CREATE TABLE r_prop_table2; + +SELECT '====== remove column default ======'; +ALTER TABLE r_prop_table2 MODIFY COLUMN column_default REMOVE DEFAULT; + +INSERT INTO r_prop_table1 (column_codec, column_comment, column_ttl) VALUES ('tsr', now(), 2); + +SYSTEM SYNC REPLICA r_prop_table2; + +SELECT column_default, column_codec, column_ttl FROM r_prop_table1 ORDER BY column_ttl; + +DETACH TABLE r_prop_table2; +ATTACH TABLE r_prop_table2; + +SHOW CREATE TABLE r_prop_table1; +SHOW CREATE TABLE r_prop_table2; + +SELECT '====== remove column TTL ======'; +ALTER TABLE r_prop_table2 MODIFY COLUMN column_ttl REMOVE TTL; + +SHOW CREATE TABLE r_prop_table1; +SHOW CREATE TABLE r_prop_table2; + +SELECT '====== remove table TTL ======'; +ALTER TABLE r_prop_table1 REMOVE TTL; + +INSERT INTO r_prop_table1 (column_codec, column_comment, column_ttl) VALUES ('rts', now() - INTERVAL 1 YEAR, 3); + +SYSTEM SYNC REPLICA r_prop_table2; + +DETACH TABLE r_prop_table2; +ATTACH TABLE r_prop_table2; + +SHOW CREATE TABLE r_prop_table1; +SHOW CREATE TABLE r_prop_table2; + +OPTIMIZE TABLE r_prop_table2 FINAL; + +SYSTEM SYNC REPLICA r_prop_table1; + +SELECT COUNT() FROM r_prop_table1; +SELECT COUNT() FROM r_prop_table2; + +DROP TABLE IF EXISTS r_prop_table1; +DROP TABLE IF EXISTS r_prop_table2; diff --git a/parser/testdata/01493_alter_remove_wrong_default/ast.json b/parser/testdata/01493_alter_remove_wrong_default/ast.json new file mode 100644 index 000000000..b43342eae --- /dev/null +++ b/parser/testdata/01493_alter_remove_wrong_default/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery default_table (children 1)" + }, + { + "explain": " Identifier default_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001351555, + "rows_read": 2, + "bytes_read": 78 + } +} diff --git a/parser/testdata/01493_alter_remove_wrong_default/metadata.json b/parser/testdata/01493_alter_remove_wrong_default/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01493_alter_remove_wrong_default/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01493_alter_remove_wrong_default/query.sql b/parser/testdata/01493_alter_remove_wrong_default/query.sql new file mode 100644 index 000000000..3cd8e9839 --- /dev/null +++ b/parser/testdata/01493_alter_remove_wrong_default/query.sql @@ -0,0 +1,22 @@ +DROP TABLE IF EXISTS default_table; + +CREATE TABLE default_table ( + key UInt64 DEFAULT 42, + value1 UInt64 MATERIALIZED key * key, + value2 ALIAS value1 * key +) +ENGINE = MergeTree() +ORDER BY tuple(); + +ALTER TABLE default_table MODIFY COLUMN key REMOVE MATERIALIZED; --{serverError BAD_ARGUMENTS} +ALTER TABLE default_table MODIFY COLUMN key REMOVE ALIAS; --{serverError BAD_ARGUMENTS} + +ALTER TABLE default_table MODIFY COLUMN value1 REMOVE DEFAULT; --{serverError BAD_ARGUMENTS} +ALTER TABLE default_table MODIFY COLUMN value1 REMOVE ALIAS; --{serverError BAD_ARGUMENTS} + +ALTER TABLE default_table MODIFY COLUMN value2 REMOVE DEFAULT; --{serverError BAD_ARGUMENTS} +ALTER TABLE default_table MODIFY COLUMN value2 REMOVE MATERIALIZED; --{serverError BAD_ARGUMENTS} + +SHOW CREATE TABLE default_table; + +DROP TABLE IF EXISTS default_table; diff --git a/parser/testdata/01493_storage_set_persistency/ast.json b/parser/testdata/01493_storage_set_persistency/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01493_storage_set_persistency/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01493_storage_set_persistency/metadata.json b/parser/testdata/01493_storage_set_persistency/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01493_storage_set_persistency/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01493_storage_set_persistency/query.sql b/parser/testdata/01493_storage_set_persistency/query.sql new file mode 100644 index 000000000..558e92fbf --- /dev/null +++ b/parser/testdata/01493_storage_set_persistency/query.sql @@ -0,0 +1,35 @@ +-- Tags: no-parallel + +DROP TABLE IF EXISTS set; +DROP TABLE IF EXISTS number; + +CREATE TABLE number (number UInt64) ENGINE = Memory(); +INSERT INTO number values (1); + +SELECT '----- Default Settings -----'; +CREATE TABLE set (val UInt64) ENGINE = Set(); +INSERT INTO set VALUES (1); +DETACH TABLE set; +ATTACH TABLE set; +SELECT number FROM number WHERE number IN set LIMIT 1; + +DROP TABLE set; + +SELECT '----- Settings persistent=1 -----'; +CREATE TABLE set (val UInt64) ENGINE = Set() SETTINGS persistent=1; +INSERT INTO set VALUES (1); +DETACH TABLE set; +ATTACH TABLE set; +SELECT number FROM number WHERE number IN set LIMIT 1; + +DROP TABLE set; + +SELECT '----- Settings persistent=0 -----'; +CREATE TABLE set (val UInt64) ENGINE = Set() SETTINGS persistent=0; +INSERT INTO set VALUES (1); +DETACH TABLE set; +ATTACH TABLE set; +SELECT number FROM number WHERE number IN set LIMIT 1; + +DROP TABLE set; +DROP TABLE number; diff --git a/parser/testdata/01493_table_function_null/ast.json b/parser/testdata/01493_table_function_null/ast.json new file mode 100644 index 000000000..9433a5a4a --- /dev/null +++ b/parser/testdata/01493_table_function_null/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "InsertQuery (children 2)" + }, + { + "explain": " Function null (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'number UInt64'" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers_mt (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10000" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001381391, + "rows_read": 15, + "bytes_read": 579 + } +} diff --git a/parser/testdata/01493_table_function_null/metadata.json b/parser/testdata/01493_table_function_null/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01493_table_function_null/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01493_table_function_null/query.sql b/parser/testdata/01493_table_function_null/query.sql new file mode 100644 index 000000000..e6b3e652b --- /dev/null +++ b/parser/testdata/01493_table_function_null/query.sql @@ -0,0 +1 @@ +INSERT INTO function null('number UInt64') SELECT * FROM numbers_mt(10000); diff --git a/parser/testdata/01494_storage_join_persistency/ast.json b/parser/testdata/01494_storage_join_persistency/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01494_storage_join_persistency/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01494_storage_join_persistency/metadata.json b/parser/testdata/01494_storage_join_persistency/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01494_storage_join_persistency/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01494_storage_join_persistency/query.sql b/parser/testdata/01494_storage_join_persistency/query.sql new file mode 100644 index 000000000..9ea7196aa --- /dev/null +++ b/parser/testdata/01494_storage_join_persistency/query.sql @@ -0,0 +1,30 @@ +-- Tags: no-parallel + +DROP TABLE IF EXISTS join; + +SELECT '----- Default Settings -----'; +CREATE TABLE join (k UInt64, s String) ENGINE = Join(ANY, LEFT, k); +INSERT INTO join VALUES (1,21); +DETACH TABLE join; +ATTACH TABLE join; +SELECT * from join; + +DROP TABLE join; + +SELECT '----- Settings persistent=1 -----'; +CREATE TABLE join (k UInt64, s String) ENGINE = Join(ANY, LEFT, k) SETTINGS persistent=1; +INSERT INTO join VALUES (1,21); +DETACH TABLE join; +ATTACH TABLE join; +SELECT * from join; + +DROP TABLE join; + +SELECT '----- Settings persistent=0 -----'; +CREATE TABLE join (k UInt64, s String) ENGINE = Join(ANY, LEFT, k) SETTINGS persistent=0; +INSERT INTO join VALUES (1,21); +DETACH TABLE join; +ATTACH TABLE join; +SELECT * from join; + +DROP TABLE join; diff --git a/parser/testdata/01495_subqueries_in_with_statement/ast.json b/parser/testdata/01495_subqueries_in_with_statement/ast.json new file mode 100644 index 000000000..d8c022734 --- /dev/null +++ b/parser/testdata/01495_subqueries_in_with_statement/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test1 (children 1)" + }, + { + "explain": " Identifier test1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001333971, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/01495_subqueries_in_with_statement/metadata.json b/parser/testdata/01495_subqueries_in_with_statement/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01495_subqueries_in_with_statement/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01495_subqueries_in_with_statement/query.sql b/parser/testdata/01495_subqueries_in_with_statement/query.sql new file mode 100644 index 000000000..819346be1 --- /dev/null +++ b/parser/testdata/01495_subqueries_in_with_statement/query.sql @@ -0,0 +1,79 @@ +DROP TABLE IF EXISTS test1; + +CREATE TABLE test1(i int, j int) ENGINE Log; + +INSERT INTO test1 VALUES (1, 2), (3, 4); + +WITH test1 AS (SELECT * FROM numbers(5)) SELECT * FROM test1; +WITH test1 AS (SELECT i + 1, j + 1 FROM test1) SELECT * FROM test1; +WITH test1 AS (SELECT i + 1, j + 1 FROM test1) SELECT * FROM (SELECT * FROM test1); +SELECT * FROM (WITH test1 AS (SELECT toInt32(*) i FROM numbers(5)) SELECT * FROM test1) l ANY INNER JOIN test1 r on (l.i == r.i); +WITH test1 AS (SELECT i + 1, j + 1 FROM test1) SELECT toInt64(4) i, toInt64(5) j FROM numbers(3) WHERE (i, j) IN test1; + +DROP TABLE IF EXISTS test1; + +select '---------------------------'; + +set empty_result_for_aggregation_by_empty_set = 0; + +WITH test1 AS (SELECT number-1 as n FROM numbers(42)) +SELECT max(n+1)+1 z FROM test1; + +WITH test1 AS (SELECT number-1 as n FROM numbers(42)) +SELECT max(n+1)+1 z FROM test1 join test1 x using n having z - 1 = (select min(n-1)+41 from test1) + 2; + +WITH test1 AS (SELECT number-1 as n FROM numbers(4442) order by n limit 100) +SELECT max(n) FROM test1 where n=422; + +WITH test1 AS (SELECT number-1 as n FROM numbers(4442) order by n limit 100) +SELECT max(n) FROM test1 where n=42; + +drop table if exists with_test ; +create table with_test engine=Memory as select cast(number-1 as Nullable(Int64)) n from numbers(10000); + +WITH test1 AS (SELECT n FROM with_test where n <= 40) +SELECT max(n+1)+1 z FROM test1 join test1 x using (n) having max(n+1)+1 - 1 = (select min(n-1)+41 from test1) + 2; + +WITH test1 AS (SELECT n FROM with_test where n <= 40) +SELECT max(n+1)+1 z FROM test1 join test1 x using (n) having z - 1 = (select min(n-1)+41 from test1) + 2; + +WITH test1 AS (SELECT n FROM with_test order by n limit 100) +SELECT max(n) FROM test1 where n=422; + +WITH test1 AS (SELECT n FROM with_test order by n limit 100) +SELECT max(n) FROM test1 where n=42; + +WITH test1 AS (SELECT n FROM with_test where n = 42 order by n limit 100) +SELECT max(n) FROM test1 where n=42; + +WITH test1 AS (SELECT n FROM with_test where n = 42 or 1=1 order by n limit 100) +SELECT max(n) FROM test1 where n=42; + +WITH test1 AS (SELECT n, null as b FROM with_test where n = 42 or b is null order by n limit 100) +SELECT max(n) FROM test1 where n=42; + +WITH test1 AS (SELECT n, null b FROM with_test where b is null) +SELECT max(n) FROM test1 where n=42; + +WITH test1 AS (SELECT n, null b FROM with_test where b is null or 1=1) +SELECT max(n) FROM test1 where n=45; + +WITH test1 AS (SELECT n, null b FROM with_test where b is null and n = 42) +SELECT max(n) FROM test1 where n=45; + +WITH test1 AS (SELECT n, null b FROM with_test where 1=1 and n = 42 order by n) +SELECT max(n) FROM test1 where n=45; + +WITH test1 AS (SELECT n, null b, n+1 m FROM with_test where 1=0 or n = 42 order by n limit 4) +SELECT max(n) m FROM test1 where test1.m=43 having max(n)=42; + +WITH test1 AS (SELECT n, null b, n+1 m FROM with_test where n = 42 order by n limit 4) +SELECT max(n) m FROM test1 where b is null and test1.m=43 having m=42 limit 4; + +with + test1 as (select n, null b, n+1 m from with_test where n = 42 order by n limit 4), + test2 as (select n + 1 as x, n - 1 as y from test1), + test3 as (select x * y as z from test2) +select z + 1 as q from test3; + +drop table with_test ; diff --git a/parser/testdata/01495_subqueries_in_with_statement_2/ast.json b/parser/testdata/01495_subqueries_in_with_statement_2/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01495_subqueries_in_with_statement_2/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01495_subqueries_in_with_statement_2/metadata.json b/parser/testdata/01495_subqueries_in_with_statement_2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01495_subqueries_in_with_statement_2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01495_subqueries_in_with_statement_2/query.sql b/parser/testdata/01495_subqueries_in_with_statement_2/query.sql new file mode 100644 index 000000000..43dff687d --- /dev/null +++ b/parser/testdata/01495_subqueries_in_with_statement_2/query.sql @@ -0,0 +1,47 @@ + +WITH +x AS (SELECT number AS a FROM numbers(10)), +y AS (SELECT number AS a FROM numbers(5)) +SELECT * FROM x WHERE a in (SELECT a FROM y) +ORDER BY a; + +WITH +x AS (SELECT number AS a FROM numbers(10)), +y AS (SELECT number AS a FROM numbers(5)) +SELECT * FROM x left JOIN y USING a +ORDER BY a; + +WITH +x AS (SELECT number AS a FROM numbers(10)), +y AS (SELECT number AS a FROM numbers(5)) +SELECT * FROM x JOIN y USING a +ORDER BY x.a; + +WITH +x AS (SELECT number AS a FROM numbers(10)), +y AS (SELECT number AS a FROM numbers(5)), +z AS (SELECT toUInt64(1) b) +SELECT * FROM x JOIN y USING a WHERE a in (SELECT * FROM z); + +WITH +x AS (SELECT number AS a FROM numbers(10)), +y AS (SELECT number AS a FROM numbers(5)), +z AS (SELECT * FROM x WHERE a % 2), +w AS (SELECT * FROM y WHERE a > 0) +SELECT * FROM x JOIN y USING a WHERE a in (SELECT * FROM z) +ORDER BY x.a; + +WITH +x AS (SELECT number AS a FROM numbers(10)), +y AS (SELECT number AS a FROM numbers(5)), +z AS (SELECT * FROM x WHERE a % 2), +w AS (SELECT * FROM y WHERE a > 0) +SELECT max(a) FROM x JOIN y USING a WHERE a in (SELECT * FROM z) AND a > (SELECT min(a) FROM w); + +WITH +x AS (SELECT number AS a FROM numbers(10)), +y AS (SELECT number AS a FROM numbers(5)), +z AS (SELECT * FROM x WHERE a % 2), +w AS (SELECT * FROM y WHERE a > 0) +SELECT a FROM x JOIN y USING a WHERE a in (SELECT * FROM z) AND a <= (SELECT max(a) FROM w) +ORDER BY x.a; diff --git a/parser/testdata/01495_subqueries_in_with_statement_3/ast.json b/parser/testdata/01495_subqueries_in_with_statement_3/ast.json new file mode 100644 index 000000000..432b5a41c --- /dev/null +++ b/parser/testdata/01495_subqueries_in_with_statement_3/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery cte1 (children 1)" + }, + { + "explain": " Identifier cte1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001368598, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/01495_subqueries_in_with_statement_3/metadata.json b/parser/testdata/01495_subqueries_in_with_statement_3/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01495_subqueries_in_with_statement_3/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01495_subqueries_in_with_statement_3/query.sql b/parser/testdata/01495_subqueries_in_with_statement_3/query.sql new file mode 100644 index 000000000..36cba596d --- /dev/null +++ b/parser/testdata/01495_subqueries_in_with_statement_3/query.sql @@ -0,0 +1,82 @@ +DROP TABLE IF EXISTS cte1; +DROP TABLE IF EXISTS cte2; + +CREATE TABLE cte1(a Int64) ENGINE=Memory; +CREATE TABLE cte2(a Int64) ENGINE=Memory; + +INSERT INTO cte1 SELECT * FROM numbers(10000); +INSERT INTO cte2 SELECT * FROM numbers(5000); + +WITH +x AS (SELECT * FROM cte1), +y AS (SELECT * FROM cte2), +z AS (SELECT * FROM x WHERE a % 2 = 1), +w AS (SELECT * FROM y WHERE a > 333) +SELECT max(a) +FROM x JOIN y USING (a) +WHERE a in (SELECT * FROM z) AND a <= (SELECT max(a) FROM w); + +WITH +x AS (SELECT * FROM cte1), +y AS (SELECT * FROM cte2), +z AS (SELECT * FROM x WHERE a % 3 = 1), +w AS (SELECT * FROM y WHERE a > 333 AND a < 1000) +SELECT count(a) +FROM x left JOIN y USING (a) +WHERE a in (SELECT * FROM z) AND a <= (SELECT max(a) FROM w); + +WITH +x AS (SELECT * FROM cte1), +y AS (SELECT * FROM cte2), +z AS (SELECT * FROM x WHERE a % 3 = 1), +w AS (SELECT * FROM y WHERE a > 333 AND a < 1000) +SELECT count(a) +FROM x left JOIN y USING (a) +WHERE a in (SELECT * FROM z); + +WITH +x AS (SELECT a-4000 a FROM cte1 WHERE cte1.a >700), +y AS (SELECT * FROM cte2), +z AS (SELECT * FROM x WHERE a % 3 = 1), +w AS (SELECT * FROM y WHERE a > 333 AND a < 1000) +SELECT count(*) +FROM x left JOIN y USING (a) +WHERE a in (SELECT * FROM z); + +WITH +x AS (SELECT a-4000 a FROM cte1 WHERE cte1.a >700), +y AS (SELECT * FROM cte2), +z AS (SELECT * FROM x WHERE a % 3 = 1), +w AS (SELECT * FROM y WHERE a > 333 AND a < 1000) +SELECT max(a), min(a), count(*) +FROM x +WHERE a in (SELECT * FROM z) AND a <100; + +WITH +x AS (SELECT a-4000 a FROM cte1 WHERE cte1.a >700), +y AS (SELECT * FROM cte2), +z AS (SELECT * FROM x WHERE a % 3 = 1), +w AS (SELECT * FROM y WHERE a > 333 AND a < 1000) +SELECT max(a), min(a), count(*) FROM x +WHERE a <100; + +WITH +x AS (SELECT a-4000 a FROM cte1 AS t WHERE cte1.a >700), +y AS (SELECT * FROM cte2), +z AS (SELECT * FROM x WHERE a % 3 = 1), +w AS (SELECT * FROM y WHERE a > 333 AND a < 1000) +SELECT max(a), min(a), count(*) +FROM y +WHERE a <100; + +WITH +x AS (SELECT a-4000 a FROM cte1 t WHERE t.a >700), +y AS (SELECT x.a a FROM x left JOIN cte1 USING (a)), +z AS (SELECT * FROM x WHERE a % 3 = 1), +w AS (SELECT * FROM y WHERE a > 333 AND a < 1000) +SELECT max(a), min(a), count(*) +FROM y +WHERE a <100; + +DROP TABLE cte1; +DROP TABLE cte2; diff --git a/parser/testdata/01495_subqueries_in_with_statement_4/ast.json b/parser/testdata/01495_subqueries_in_with_statement_4/ast.json new file mode 100644 index 000000000..fbc29485d --- /dev/null +++ b/parser/testdata/01495_subqueries_in_with_statement_4/ast.json @@ -0,0 +1,94 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " WithElement (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier it.number" + }, + { + "explain": " Identifier i.number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier it (alias i)" + } + ], + + "rows": 24, + + "statistics": + { + "elapsed": 0.00152984, + "rows_read": 24, + "bytes_read": 984 + } +} diff --git a/parser/testdata/01495_subqueries_in_with_statement_4/metadata.json b/parser/testdata/01495_subqueries_in_with_statement_4/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01495_subqueries_in_with_statement_4/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01495_subqueries_in_with_statement_4/query.sql b/parser/testdata/01495_subqueries_in_with_statement_4/query.sql new file mode 100644 index 000000000..73bfcf32c --- /dev/null +++ b/parser/testdata/01495_subqueries_in_with_statement_4/query.sql @@ -0,0 +1,2 @@ +with it as ( select * from numbers(1) ) select it.number, i.number from it as i; +explain syntax with it as ( select * from numbers(1) ) select it.number, i.number from it as i; diff --git a/parser/testdata/01496_signedness_conversion_monotonicity/ast.json b/parser/testdata/01496_signedness_conversion_monotonicity/ast.json new file mode 100644 index 000000000..3b34e17d6 --- /dev/null +++ b/parser/testdata/01496_signedness_conversion_monotonicity/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test1 (children 1)" + }, + { + "explain": " Identifier test1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001228803, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/01496_signedness_conversion_monotonicity/metadata.json b/parser/testdata/01496_signedness_conversion_monotonicity/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01496_signedness_conversion_monotonicity/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01496_signedness_conversion_monotonicity/query.sql b/parser/testdata/01496_signedness_conversion_monotonicity/query.sql new file mode 100644 index 000000000..5c87ba3c5 --- /dev/null +++ b/parser/testdata/01496_signedness_conversion_monotonicity/query.sql @@ -0,0 +1,9 @@ +drop table if exists test1; + +create table test1 (i Int64) engine MergeTree order by i; + +insert into test1 values (53), (1777), (53284); + +select count() from test1 where toInt16(i) = 1777; + +drop table if exists test1; diff --git a/parser/testdata/01497_alias_on_default_array/ast.json b/parser/testdata/01497_alias_on_default_array/ast.json new file mode 100644 index 000000000..bea6da9a7 --- /dev/null +++ b/parser/testdata/01497_alias_on_default_array/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_new_col (children 1)" + }, + { + "explain": " Identifier test_new_col" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00111194, + "rows_read": 2, + "bytes_read": 76 + } +} diff --git a/parser/testdata/01497_alias_on_default_array/metadata.json b/parser/testdata/01497_alias_on_default_array/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01497_alias_on_default_array/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01497_alias_on_default_array/query.sql b/parser/testdata/01497_alias_on_default_array/query.sql new file mode 100644 index 000000000..c0c26b05e --- /dev/null +++ b/parser/testdata/01497_alias_on_default_array/query.sql @@ -0,0 +1,21 @@ +DROP TABLE IF EXISTS test_new_col; + +CREATE TABLE test_new_col +( + `_csv` String, + `csv_as_array` Array(String) ALIAS splitByChar(';',_csv), + `csv_col1` String DEFAULT csv_as_array[1], + `csv_col2` String DEFAULT csv_as_array[2] +) +ENGINE = MergeTree +ORDER BY tuple(); + +INSERT INTO test_new_col (_csv) VALUES ('a1;b1;c1;d1'), ('a2;b2;c2;d2'), ('a3;b3;c3;d3'); + +SELECT csv_col1, csv_col2 FROM test_new_col ORDER BY csv_col1; + +ALTER TABLE test_new_col ADD COLUMN `csv_col3` String DEFAULT csv_as_array[3]; + +SELECT csv_col3 FROM test_new_col ORDER BY csv_col3; + +DROP TABLE IF EXISTS test_new_col; diff --git a/parser/testdata/01497_extract_all_groups_empty_match/ast.json b/parser/testdata/01497_extract_all_groups_empty_match/ast.json new file mode 100644 index 000000000..7b074b8da --- /dev/null +++ b/parser/testdata/01497_extract_all_groups_empty_match/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function extractAllGroupsVertical (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '@#$%^&*'" + }, + { + "explain": " Literal '(\\\\w*)'" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001008213, + "rows_read": 8, + "bytes_read": 308 + } +} diff --git a/parser/testdata/01497_extract_all_groups_empty_match/metadata.json b/parser/testdata/01497_extract_all_groups_empty_match/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01497_extract_all_groups_empty_match/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01497_extract_all_groups_empty_match/query.sql b/parser/testdata/01497_extract_all_groups_empty_match/query.sql new file mode 100644 index 000000000..1c4dafd9e --- /dev/null +++ b/parser/testdata/01497_extract_all_groups_empty_match/query.sql @@ -0,0 +1,2 @@ +SELECT extractAllGroupsVertical('@#$%^&*', '(\w*)'); +SELECT extractAllGroupsHorizontal('@#$%^&*', '(\w*)'); diff --git a/parser/testdata/01497_mutation_support_for_storage_memory/ast.json b/parser/testdata/01497_mutation_support_for_storage_memory/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01497_mutation_support_for_storage_memory/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01497_mutation_support_for_storage_memory/metadata.json b/parser/testdata/01497_mutation_support_for_storage_memory/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01497_mutation_support_for_storage_memory/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01497_mutation_support_for_storage_memory/query.sql b/parser/testdata/01497_mutation_support_for_storage_memory/query.sql new file mode 100644 index 000000000..a6aab4432 --- /dev/null +++ b/parser/testdata/01497_mutation_support_for_storage_memory/query.sql @@ -0,0 +1,25 @@ +-- Tags: memory-engine +DROP TABLE IF EXISTS defaults; +CREATE TABLE defaults +( + n Int32, + s String +)ENGINE = Memory(); + +INSERT INTO defaults VALUES(1, '1') (2, '2') (3, '3') (4, '4') (5, '5'); + +SELECT * FROM defaults; + +ALTER TABLE defaults UPDATE n = 100 WHERE s = '1'; + +SELECT * FROM defaults; + +SELECT count(*) FROM defaults; + +ALTER TABLE defaults DELETE WHERE n = 100; + +SELECT * FROM defaults; + +SELECT count(*) FROM defaults; + +DROP TABLE defaults; diff --git a/parser/testdata/01497_now_support_timezone/ast.json b/parser/testdata/01497_now_support_timezone/ast.json new file mode 100644 index 000000000..7d3a6c640 --- /dev/null +++ b/parser/testdata/01497_now_support_timezone/ast.json @@ -0,0 +1,79 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function in (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function minus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toHour (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function now (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'Asia\/Shanghai'" + }, + { + "explain": " Function toHour (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function now (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'UTC'" + }, + { + "explain": " Literal Tuple_(UInt64_8, Int64_-16)" + } + ], + + "rows": 19, + + "statistics": + { + "elapsed": 0.001249336, + "rows_read": 19, + "bytes_read": 787 + } +} diff --git a/parser/testdata/01497_now_support_timezone/metadata.json b/parser/testdata/01497_now_support_timezone/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01497_now_support_timezone/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01497_now_support_timezone/query.sql b/parser/testdata/01497_now_support_timezone/query.sql new file mode 100644 index 000000000..b1e9bad58 --- /dev/null +++ b/parser/testdata/01497_now_support_timezone/query.sql @@ -0,0 +1 @@ +SELECT (toHour(now('Asia/Shanghai')) - toHour(now('UTC'))) IN (8, -16); diff --git a/parser/testdata/01498_alter_column_storage_memory/ast.json b/parser/testdata/01498_alter_column_storage_memory/ast.json new file mode 100644 index 000000000..0c6ccac26 --- /dev/null +++ b/parser/testdata/01498_alter_column_storage_memory/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery defaults (children 1)" + }, + { + "explain": " Identifier defaults" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001129138, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/01498_alter_column_storage_memory/metadata.json b/parser/testdata/01498_alter_column_storage_memory/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01498_alter_column_storage_memory/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01498_alter_column_storage_memory/query.sql b/parser/testdata/01498_alter_column_storage_memory/query.sql new file mode 100644 index 000000000..5e213d5be --- /dev/null +++ b/parser/testdata/01498_alter_column_storage_memory/query.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS defaults; +CREATE TABLE defaults +( + n Int32, + s String +)ENGINE = Memory(); + +ALTER TABLE defaults ADD COLUMN m Int8; +ALTER TABLE defaults DROP COLUMN n; + +DESC TABLE defaults; + +DROP TABLE defaults; diff --git a/parser/testdata/01499_json_named_tuples/ast.json b/parser/testdata/01499_json_named_tuples/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01499_json_named_tuples/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01499_json_named_tuples/metadata.json b/parser/testdata/01499_json_named_tuples/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01499_json_named_tuples/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01499_json_named_tuples/query.sql b/parser/testdata/01499_json_named_tuples/query.sql new file mode 100644 index 000000000..947455f03 --- /dev/null +++ b/parser/testdata/01499_json_named_tuples/query.sql @@ -0,0 +1,10 @@ +-- Tags: no-fasttest + +create table named_tuples engine File(JSONEachRow) + settings output_format_json_named_tuples_as_objects = 1 + as select cast(tuple(number, number * 2), 'Tuple(a int, b int)') c + from numbers(3); + +select * from named_tuples format JSONEachRow settings output_format_json_named_tuples_as_objects = 1; + +drop table named_tuples diff --git a/parser/testdata/01499_log_deadlock/ast.json b/parser/testdata/01499_log_deadlock/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01499_log_deadlock/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01499_log_deadlock/metadata.json b/parser/testdata/01499_log_deadlock/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01499_log_deadlock/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01499_log_deadlock/query.sql b/parser/testdata/01499_log_deadlock/query.sql new file mode 100644 index 000000000..f361297e0 --- /dev/null +++ b/parser/testdata/01499_log_deadlock/query.sql @@ -0,0 +1,28 @@ +-- Tags: deadlock + +DROP TABLE IF EXISTS t; +CREATE TABLE t (x UInt8) ENGINE = TinyLog; + +INSERT INTO t VALUES (1), (2), (3); +INSERT INTO t SELECT * FROM t; +SELECT count() FROM t; + +DROP TABLE t; + + +CREATE TABLE t (x UInt8) ENGINE = Log; + +INSERT INTO t VALUES (1), (2), (3); +INSERT INTO t SELECT * FROM t; +SELECT count() FROM t; + +DROP TABLE t; + + +CREATE TABLE t (x UInt8) ENGINE = StripeLog; + +INSERT INTO t VALUES (1), (2), (3); +INSERT INTO t SELECT * FROM t; +SELECT count() FROM t; + +DROP TABLE t; diff --git a/parser/testdata/01501_cache_dictionary_all_fields/ast.json b/parser/testdata/01501_cache_dictionary_all_fields/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01501_cache_dictionary_all_fields/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01501_cache_dictionary_all_fields/metadata.json b/parser/testdata/01501_cache_dictionary_all_fields/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01501_cache_dictionary_all_fields/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01501_cache_dictionary_all_fields/query.sql b/parser/testdata/01501_cache_dictionary_all_fields/query.sql new file mode 100644 index 000000000..1ef173d5f --- /dev/null +++ b/parser/testdata/01501_cache_dictionary_all_fields/query.sql @@ -0,0 +1,118 @@ +-- Tags: no-parallel + +drop database if exists db_01501; +create database db_01501; + +CREATE TABLE db_01501.table_cache_dict( +KeyField UInt64, +UInt8_ UInt8, +UInt16_ UInt16, +UInt32_ UInt32, +UInt64_ UInt64, +Int8_ Int8, +Int16_ Int16, +Int32_ Int32, +Int64_ Int64, +UUID_ UUID, +Date_ Date, +DateTime_ DateTime, +String_ String, +Float32_ Float32, +Float64_ Float64, +Decimal32_ Decimal32(5), +Decimal64_ Decimal64(15), +Decimal128_ Decimal128(35), +ParentKeyField UInt64) +ENGINE = MergeTree() ORDER BY KeyField; + + +CREATE DICTIONARY IF NOT EXISTS db_01501.cache_dict ( + KeyField UInt64 DEFAULT 9999999, + UInt8_ UInt8 DEFAULT 55, + UInt16_ UInt16 DEFAULT 65535, + UInt32_ UInt32 DEFAULT 4294967295, + UInt64_ UInt64 DEFAULT 18446744073709551615, + Int8_ Int8 DEFAULT -128, + Int16_ Int16 DEFAULT -32768, + Int32_ Int32 DEFAULT -2147483648, + Int64_ Int64 DEFAULT -9223372036854775808, + UUID_ UUID DEFAULT '550e8400-0000-0000-0000-000000000000', + Date_ Date DEFAULT '2018-12-30', + DateTime_ DateTime DEFAULT '2018-12-30 00:00:00', + String_ String DEFAULT 'hi', + Float32_ Float32 DEFAULT 111.11, + Float64_ Float64 DEFAULT 222.11, + Decimal32_ Decimal32(5) DEFAULT 333.11, + Decimal64_ Decimal64(15) DEFAULT 444.11, + Decimal128_ Decimal128(35) DEFAULT 555.11, + ParentKeyField UInt64 DEFAULT 444) +PRIMARY KEY KeyField +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'table_cache_dict' DB 'db_01501')) +LIFETIME(5) LAYOUT(CACHE(SIZE_IN_CELLS 20)); + + +INSERT INTO db_01501.table_cache_dict VALUES (1, 2, 3, 4, 5, -1, -2, -3, -4, '550e8400-e29b-41d4-a716-446655440003', '1973-06-28', '1985-02-28 23:43:25', 'clickhouse', 22.543, 3332154213.4, toDecimal32('1e-5', 5), toDecimal64('1e-15', 15), toDecimal128('1e-35', 35), 0); +INSERT INTO db_01501.table_cache_dict VALUES (2, 22, 33, 44, 55, -11, -22, -33, -44, 'cb307805-44f0-49e7-9ae9-9954c543be46', '1978-06-28', '1986-02-28 23:42:25', 'hello', 21.543, 3111154213.9, toDecimal32('2e-5', 5), toDecimal64('2e-15', 15), toDecimal128('2e-35', 35), 1); +INSERT INTO db_01501.table_cache_dict VALUES (3, 222, 333, 444, 555, -111, -222, -333, -444, 'de7f7ec3-f851-4f8c-afe5-c977cb8cea8d', '1982-06-28', '1999-02-28 23:42:25', 'dbms', 13.334, 3222187213.1, toDecimal32('3e-5', 5), toDecimal64('3e-15', 15), toDecimal128('3e-35', 35), 1); +INSERT INTO db_01501.table_cache_dict VALUES (4, 2222, 3333, 4444, 5555, -1111, -2222, -3333, -4444, '4bd3829f-0669-43b7-b884-a8e034a68224', '1987-06-28', '2000-02-28 23:42:25', 'MergeTree', 52.001, 3237554213.5, toDecimal32('4e-5', 5), toDecimal64('4e-15', 15), toDecimal128('4e-35', 35), 1); +INSERT INTO db_01501.table_cache_dict VALUES (5, 22222, 33333, 44444, 55555, -11111, -22222, -33333, -44444, 'ff99a408-78bb-4939-93cc-65e657e347c6', '1991-06-28', '2007-02-28 23:42:25', 'dictionary', 33.333, 3222193713.7, toDecimal32('5e-5', 5), toDecimal64('5e-15', 15), toDecimal128('5e-35', 35), 1); + + +SELECT arrayDistinct(groupArray(dictGetUInt8('db_01501.cache_dict', 'UInt8_', toUInt64(number)))) from numbers(10); +system reload dictionaries; +SELECT arrayDistinct(groupArray(dictGetUInt16('db_01501.cache_dict', 'UInt16_', toUInt64(number)))) from numbers(10); +system reload dictionaries; +SELECT arrayDistinct(groupArray(dictGetUInt32('db_01501.cache_dict', 'UInt32_', toUInt64(number)))) from numbers(10); +system reload dictionaries; +SELECT arrayDistinct(groupArray(dictGetUInt64('db_01501.cache_dict', 'UInt64_', toUInt64(number)))) from numbers(10); +system reload dictionaries; +SELECT arrayDistinct(groupArray(dictGetInt8('db_01501.cache_dict', 'Int8_', toUInt64(number)))) from numbers(10); +system reload dictionaries; +SELECT arrayDistinct(groupArray(dictGetInt16('db_01501.cache_dict', 'Int16_', toUInt64(number)))) from numbers(10); +system reload dictionaries; +SELECT arrayDistinct(groupArray(dictGetInt32('db_01501.cache_dict', 'Int32_', toUInt64(number)))) from numbers(10); +system reload dictionaries; +SELECT arrayDistinct(groupArray(dictGetInt64('db_01501.cache_dict', 'Int64_', toUInt64(number)))) from numbers(10); +system reload dictionaries; +SELECT arrayDistinct(groupArray(dictGetFloat32('db_01501.cache_dict', 'Float32_', toUInt64(number)))) from numbers(10); +system reload dictionaries; +SELECT arrayDistinct(groupArray(dictGetFloat64('db_01501.cache_dict', 'Float64_', toUInt64(number)))) from numbers(10); +system reload dictionaries; +SELECT arrayDistinct(groupArray(dictGet('db_01501.cache_dict', 'Decimal32_', toUInt64(number)))) from numbers(10); +system reload dictionaries; +SELECT arrayDistinct(groupArray(dictGet('db_01501.cache_dict', 'Decimal64_', toUInt64(number)))) from numbers(10); +system reload dictionaries; +SELECT arrayDistinct(groupArray(dictGet('db_01501.cache_dict', 'Decimal128_', toUInt64(number)))) from numbers(10); +system reload dictionaries; +SELECT arrayDistinct(groupArray(dictGetString('db_01501.cache_dict', 'String_', toUInt64(number)))) from numbers(10); + + + +SELECT arrayDistinct(groupArray(dictGetUInt8('db_01501.cache_dict', 'UInt8_', toUInt64(number)))) from numbers(10); +SELECT arrayDistinct(groupArray(dictGetUInt16('db_01501.cache_dict', 'UInt16_', toUInt64(number)))) from numbers(10); +SELECT arrayDistinct(groupArray(dictGetUInt32('db_01501.cache_dict', 'UInt32_', toUInt64(number)))) from numbers(10); +SELECT arrayDistinct(groupArray(dictGetUInt64('db_01501.cache_dict', 'UInt64_', toUInt64(number)))) from numbers(10); +SELECT arrayDistinct(groupArray(dictGetInt8('db_01501.cache_dict', 'Int8_', toUInt64(number)))) from numbers(10); +SELECT arrayDistinct(groupArray(dictGetInt16('db_01501.cache_dict', 'Int16_', toUInt64(number)))) from numbers(10); +SELECT arrayDistinct(groupArray(dictGetInt32('db_01501.cache_dict', 'Int32_', toUInt64(number)))) from numbers(10); +SELECT arrayDistinct(groupArray(dictGetInt64('db_01501.cache_dict', 'Int64_', toUInt64(number)))) from numbers(10); +SELECT arrayDistinct(groupArray(dictGetFloat32('db_01501.cache_dict', 'Float32_', toUInt64(number)))) from numbers(10); +SELECT arrayDistinct(groupArray(dictGetFloat64('db_01501.cache_dict', 'Float64_', toUInt64(number)))) from numbers(10); +SELECT arrayDistinct(groupArray(dictGet('db_01501.cache_dict', 'Decimal32_', toUInt64(number)))) from numbers(10); +SELECT arrayDistinct(groupArray(dictGet('db_01501.cache_dict', 'Decimal64_', toUInt64(number)))) from numbers(10); +SELECT arrayDistinct(groupArray(dictGet('db_01501.cache_dict', 'Decimal128_', toUInt64(number)))) from numbers(10); +SELECT arrayDistinct(groupArray(dictGetString('db_01501.cache_dict', 'String_', toUInt64(number)))) from numbers(10); + + +system reload dictionaries; + + +SELECT groupArray(dictHas('db_01501.cache_dict', toUInt64(number))) from numbers(10); +SELECT groupArray(dictHas('db_01501.cache_dict', toUInt64(number))) from numbers(10); +SELECT groupArray(dictHas('db_01501.cache_dict', toUInt64(number))) from numbers(10); +SELECT groupArray(dictHas('db_01501.cache_dict', toUInt64(number))) from numbers(10); +SELECT groupArray(dictHas('db_01501.cache_dict', toUInt64(number))) from numbers(10); + +drop dictionary db_01501.cache_dict; +drop table db_01501.table_cache_dict; +drop database if exists db_01501; diff --git a/parser/testdata/01502_bar_overflow/ast.json b/parser/testdata/01502_bar_overflow/ast.json new file mode 100644 index 000000000..6396ff8cd --- /dev/null +++ b/parser/testdata/01502_bar_overflow/ast.json @@ -0,0 +1,106 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function bar (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Function multiply (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function minus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function greatCircleAngle (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Literal UInt64_100" + }, + { + "explain": " Literal Int64_-1" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal Int64_-9223372036854775808" + }, + { + "explain": " Literal UInt64_1023" + }, + { + "explain": " Literal UInt64_100" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1048575" + }, + { + "explain": " Identifier Null" + } + ], + + "rows": 28, + + "statistics": + { + "elapsed": 0.001145693, + "rows_read": 28, + "bytes_read": 1111 + } +} diff --git a/parser/testdata/01502_bar_overflow/metadata.json b/parser/testdata/01502_bar_overflow/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01502_bar_overflow/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01502_bar_overflow/query.sql b/parser/testdata/01502_bar_overflow/query.sql new file mode 100644 index 000000000..4829b487f --- /dev/null +++ b/parser/testdata/01502_bar_overflow/query.sql @@ -0,0 +1 @@ +SELECT bar((greatCircleAngle(100, -1, number, number) - number) * 2, -9223372036854775808, 1023, 100) FROM numbers(1048575) FORMAT Null; diff --git a/parser/testdata/01503_fixed_string_primary_key/ast.json b/parser/testdata/01503_fixed_string_primary_key/ast.json new file mode 100644 index 000000000..7276e322f --- /dev/null +++ b/parser/testdata/01503_fixed_string_primary_key/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000972976, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/01503_fixed_string_primary_key/metadata.json b/parser/testdata/01503_fixed_string_primary_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01503_fixed_string_primary_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01503_fixed_string_primary_key/query.sql b/parser/testdata/01503_fixed_string_primary_key/query.sql new file mode 100644 index 000000000..09576777c --- /dev/null +++ b/parser/testdata/01503_fixed_string_primary_key/query.sql @@ -0,0 +1,7 @@ +DROP TABLE IF EXISTS test; + +CREATE TABLE test(key FixedString(10)) ENGINE=MergeTree() PARTITION BY tuple() ORDER BY (key); +INSERT INTO test SELECT toString(intDiv(number, 8)) FROM numbers(100); +SELECT count() FROM test WHERE key = '1'; + +DROP TABLE IF EXISTS test; diff --git a/parser/testdata/01503_if_const_optimization/ast.json b/parser/testdata/01503_if_const_optimization/ast.json new file mode 100644 index 000000000..2e0564588 --- /dev/null +++ b/parser/testdata/01503_if_const_optimization/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function if (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Literal 'Nullable(UInt8)'" + }, + { + "explain": " Literal '2.55'" + }, + { + "explain": " Literal NULL" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001453163, + "rows_read": 12, + "bytes_read": 440 + } +} diff --git a/parser/testdata/01503_if_const_optimization/metadata.json b/parser/testdata/01503_if_const_optimization/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01503_if_const_optimization/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01503_if_const_optimization/query.sql b/parser/testdata/01503_if_const_optimization/query.sql new file mode 100644 index 000000000..a64be6bc8 --- /dev/null +++ b/parser/testdata/01503_if_const_optimization/query.sql @@ -0,0 +1 @@ +SELECT if(CAST(NULL AS Nullable(UInt8)), '2.55', NULL) AS x; diff --git a/parser/testdata/01504_compression_multiple_streams/ast.json b/parser/testdata/01504_compression_multiple_streams/ast.json new file mode 100644 index 000000000..d41b37c95 --- /dev/null +++ b/parser/testdata/01504_compression_multiple_streams/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery columns_with_multiple_streams (children 1)" + }, + { + "explain": " Identifier columns_with_multiple_streams" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001696026, + "rows_read": 2, + "bytes_read": 110 + } +} diff --git a/parser/testdata/01504_compression_multiple_streams/metadata.json b/parser/testdata/01504_compression_multiple_streams/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01504_compression_multiple_streams/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01504_compression_multiple_streams/query.sql b/parser/testdata/01504_compression_multiple_streams/query.sql new file mode 100644 index 000000000..c456d4c40 --- /dev/null +++ b/parser/testdata/01504_compression_multiple_streams/query.sql @@ -0,0 +1,116 @@ +DROP TABLE IF EXISTS columns_with_multiple_streams; + +SET mutations_sync = 2; + +CREATE TABLE columns_with_multiple_streams ( + field0 Nullable(Int64) CODEC(Delta(2), LZ4), + field1 Nullable(Int64) CODEC(Delta, LZ4), + field2 Array(Array(Int64)) CODEC(Delta, LZ4), + field3 Tuple(UInt32, Array(UInt64)) CODEC(T64, Default) +) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS min_rows_for_wide_part = 0, min_bytes_for_wide_part = 0; + +INSERT INTO columns_with_multiple_streams VALUES(1, 1, [[1]], tuple(1, [1])); + +SELECT * FROM columns_with_multiple_streams; + +DETACH TABLE columns_with_multiple_streams; +ATTACH TABLE columns_with_multiple_streams; + +SELECT * FROM columns_with_multiple_streams; + +ALTER TABLE columns_with_multiple_streams MODIFY COLUMN field1 Nullable(UInt8); + +INSERT INTO columns_with_multiple_streams VALUES(2, 2, [[2]], tuple(2, [2])); + +SHOW CREATE TABLE columns_with_multiple_streams; + +SELECT * FROM columns_with_multiple_streams ORDER BY field0; + +ALTER TABLE columns_with_multiple_streams MODIFY COLUMN field3 CODEC(Delta, Default); + +SHOW CREATE TABLE columns_with_multiple_streams; + +INSERT INTO columns_with_multiple_streams VALUES(3, 3, [[3]], tuple(3, [3])); + +OPTIMIZE TABLE columns_with_multiple_streams FINAL; + +SELECT * FROM columns_with_multiple_streams ORDER BY field0; + +DROP TABLE IF EXISTS columns_with_multiple_streams; + +DROP TABLE IF EXISTS columns_with_multiple_streams_compact; + +CREATE TABLE columns_with_multiple_streams_compact ( + field0 Nullable(Int64) CODEC(Delta(2), LZ4), + field1 Nullable(Int64) CODEC(Delta, LZ4), + field2 Array(Array(Int64)) CODEC(Delta, LZ4), + field3 Tuple(UInt32, Array(UInt64)) CODEC(Delta, Default) +) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS min_rows_for_wide_part = 100000, min_bytes_for_wide_part = 100000; + +INSERT INTO columns_with_multiple_streams_compact VALUES(1, 1, [[1]], tuple(1, [1])); + +SELECT * FROM columns_with_multiple_streams_compact; + +DETACH TABLE columns_with_multiple_streams_compact; +ATTACH TABLE columns_with_multiple_streams_compact; + +SELECT * FROM columns_with_multiple_streams_compact; + +ALTER TABLE columns_with_multiple_streams_compact MODIFY COLUMN field1 Nullable(UInt8); + +INSERT INTO columns_with_multiple_streams_compact VALUES(2, 2, [[2]], tuple(2, [2])); + +SHOW CREATE TABLE columns_with_multiple_streams_compact; + +SELECT * FROM columns_with_multiple_streams_compact ORDER BY field0; + +ALTER TABLE columns_with_multiple_streams_compact MODIFY COLUMN field3 CODEC(Delta, Default); + +SELECT * FROM columns_with_multiple_streams_compact ORDER BY field0; + +SHOW CREATE TABLE columns_with_multiple_streams_compact; + +INSERT INTO columns_with_multiple_streams_compact VALUES(3, 3, [[3]], tuple(3, [3])); + +SELECT * FROM columns_with_multiple_streams_compact ORDER BY field0; + +DROP TABLE IF EXISTS columns_with_multiple_streams_compact; + +DROP TABLE IF EXISTS columns_with_multiple_streams_bad_case; + +-- validation still works, non-sense codecs checked +CREATE TABLE columns_with_multiple_streams_bad_case ( + field0 Nullable(String) CODEC(Delta, LZ4) +) +ENGINE = MergeTree +ORDER BY tuple(); --{serverError BAD_ARGUMENTS} + +CREATE TABLE columns_with_multiple_streams_bad_case ( + field0 Tuple(Array(UInt64), String) CODEC(T64, LZ4) +) +ENGINE = MergeTree +ORDER BY tuple(); --{serverError ILLEGAL_SYNTAX_FOR_CODEC_TYPE} + +SET allow_suspicious_codecs = 1; + +CREATE TABLE columns_with_multiple_streams_bad_case ( + field0 Nullable(UInt64) CODEC(Delta) +) +ENGINE = MergeTree +ORDER BY tuple(); + +INSERT INTO columns_with_multiple_streams_bad_case VALUES(1), (2); + +INSERT INTO columns_with_multiple_streams_bad_case VALUES(3); + +OPTIMIZE TABLE columns_with_multiple_streams_bad_case FINAL; + +SELECT * FROM columns_with_multiple_streams_bad_case ORDER BY field0; + +DROP TABLE IF EXISTS columns_with_multiple_streams_bad_case; diff --git a/parser/testdata/01504_rocksdb/ast.json b/parser/testdata/01504_rocksdb/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01504_rocksdb/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01504_rocksdb/metadata.json b/parser/testdata/01504_rocksdb/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01504_rocksdb/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01504_rocksdb/query.sql b/parser/testdata/01504_rocksdb/query.sql new file mode 100644 index 000000000..868c60192 --- /dev/null +++ b/parser/testdata/01504_rocksdb/query.sql @@ -0,0 +1,50 @@ +-- Tags: no-ordinary-database, no-fasttest, use-rocksdb +-- Tag no-ordinary-database: Sometimes cannot lock file most likely due to concurrent or adjacent tests, but we don't care how it works in Ordinary database +-- Tag no-fasttest: In fasttest, ENABLE_LIBRARIES=0, so rocksdb engine is not enabled by default + +DROP TABLE IF EXISTS 01504_test; + +CREATE TABLE 01504_test (key String, value UInt32) Engine=EmbeddedRocksDB; -- { serverError BAD_ARGUMENTS } +CREATE TABLE 01504_test (key String, value UInt32) Engine=EmbeddedRocksDB PRIMARY KEY(key2); -- { serverError UNKNOWN_IDENTIFIER } +CREATE TABLE 01504_test (key String, value UInt32) Engine=EmbeddedRocksDB PRIMARY KEY(key, value); -- { serverError BAD_ARGUMENTS } +CREATE TABLE 01504_test (key Tuple(String, UInt32), value UInt64) Engine=EmbeddedRocksDB PRIMARY KEY(key); + +DROP TABLE IF EXISTS 01504_test; +CREATE TABLE 01504_test (key String, value UInt32) Engine=EmbeddedRocksDB PRIMARY KEY(key); + +INSERT INTO 01504_test SELECT '1_1', number FROM numbers(10000); +SELECT COUNT(1) == 1 FROM 01504_test; + +INSERT INTO 01504_test SELECT concat(toString(number), '_1'), number FROM numbers(10000); +SELECT COUNT(1) == 10000 FROM 01504_test; +SELECT uniqExact(key) == 32 FROM (SELECT * FROM 01504_test LIMIT 32 SETTINGS max_block_size = 1); +SELECT SUM(value) == 1 + 99 + 900 FROM 01504_test WHERE key IN ('1_1', '99_1', '900_1'); + + +DROP TABLE IF EXISTS 01504_test; +DROP TABLE IF EXISTS 01504_test_memory; + +CREATE TABLE 01504_test (k UInt32, value UInt64, dummy Tuple(UInt32, Float64), bm AggregateFunction(groupBitmap, UInt64)) Engine=EmbeddedRocksDB PRIMARY KEY(k); +CREATE TABLE 01504_test_memory AS 01504_test Engine = Memory; + +INSERT INTO 01504_test SELECT number % 77 AS k, SUM(number) AS value, (1, 1.2), bitmapBuild(groupArray(number)) FROM numbers(10000000) group by k; + +INSERT INTO 01504_test_memory SELECT number % 77 AS k, SUM(number) AS value, (1, 1.2), bitmapBuild(groupArray(number)) FROM numbers(10000000) group by k; + + +SELECT A.a = B.a, A.b = B.b, A.c = B.c, A.d = B.d, A.e = B.e FROM ( SELECT 0 AS a, groupBitmapMerge(bm) AS b , SUM(k) AS c, SUM(value) AS d, SUM(dummy.1) AS e FROM 01504_test) A ANY LEFT JOIN (SELECT 0 AS a, groupBitmapMerge(bm) AS b , SUM(k) AS c, SUM(value) AS d, SUM(dummy.1) AS e FROM 01504_test_memory) B USING a ORDER BY a; + +CREATE TEMPORARY TABLE keys AS SELECT * FROM system.numbers LIMIT 1 OFFSET 4; + +SET max_rows_to_read = 2; +SELECT dummy == (1,1.2) FROM 01504_test WHERE k IN (1, 3) OR k IN (1) OR k IN (3, 1) OR k IN [1] OR k IN [1, 3] ; +SELECT k == 4 FROM 01504_test WHERE k = 4 OR k IN [4] OR k in (4, 10000001, 10000002) AND value > 0; +SELECT k == 4 FROM 01504_test WHERE k IN (SELECT toUInt32(number) FROM keys WHERE number = 4); +SELECT k, value FROM 01504_test WHERE k = 0 OR value > 0; -- { serverError TOO_MANY_ROWS } +SELECT k, value FROM 01504_test WHERE k = 0 AND k IN (1, 3) OR k > 8; -- { serverError TOO_MANY_ROWS } + +TRUNCATE TABLE 01504_test; +SELECT 0 == COUNT(1) FROM 01504_test; + +DROP TABLE IF EXISTS 01504_test; +DROP TABLE IF EXISTS 01504_test_memory; diff --git a/parser/testdata/01504_view_type_conversion/ast.json b/parser/testdata/01504_view_type_conversion/ast.json new file mode 100644 index 000000000..24f9a9b7f --- /dev/null +++ b/parser/testdata/01504_view_type_conversion/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery testv (children 1)" + }, + { + "explain": " Identifier testv" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00137271, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/01504_view_type_conversion/metadata.json b/parser/testdata/01504_view_type_conversion/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01504_view_type_conversion/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01504_view_type_conversion/query.sql b/parser/testdata/01504_view_type_conversion/query.sql new file mode 100644 index 000000000..0133ecaf4 --- /dev/null +++ b/parser/testdata/01504_view_type_conversion/query.sql @@ -0,0 +1,11 @@ +DROP TABLE IF EXISTS testv; + +create view testv(a UInt32) as select number a from numbers(10); +select groupArray(a) from testv; + +DROP TABLE testv; + +create view testv(a String) as select number a from numbers(10); +select groupArray(a) from testv; + +DROP TABLE testv; diff --git a/parser/testdata/01505_distributed_local_type_conversion_enum/ast.json b/parser/testdata/01505_distributed_local_type_conversion_enum/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01505_distributed_local_type_conversion_enum/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01505_distributed_local_type_conversion_enum/metadata.json b/parser/testdata/01505_distributed_local_type_conversion_enum/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01505_distributed_local_type_conversion_enum/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01505_distributed_local_type_conversion_enum/query.sql b/parser/testdata/01505_distributed_local_type_conversion_enum/query.sql new file mode 100644 index 000000000..85c07a959 --- /dev/null +++ b/parser/testdata/01505_distributed_local_type_conversion_enum/query.sql @@ -0,0 +1,25 @@ +-- Tags: distributed + +DROP TABLE IF EXISTS t; +DROP TABLE IF EXISTS d; + +CREATE TABLE t (x Enum8('abc' = 0, 'def' = 1, 'ghi' = 2)) ENGINE = TinyLog; +INSERT INTO t VALUES (0), (1), (2); +SELECT * FROM t; + +SELECT '---'; +CREATE TABLE d (x Enum8('abc' = 0, 'def' = 1, 'xyz' = 2)) ENGINE = Distributed(test_shard_localhost, currentDatabase(), t); +SELECT * FROM d; +DROP TABLE d; + +SELECT '---'; +CREATE TABLE d (x Enum8('abc' = 0, 'def' = 1, 'xyz' = 2)) ENGINE = Distributed(test_cluster_two_shards, currentDatabase(), t); +SELECT * FROM d; +DROP TABLE d; + +SELECT '---'; +CREATE TABLE d (x Enum8('abc' = 0, 'def' = 1, 'xyz' = 2)) ENGINE = Distributed(test_cluster_two_shards_localhost, currentDatabase(), t); +SELECT * FROM d; +DROP TABLE d; + +DROP TABLE t; diff --git a/parser/testdata/01505_log_distributed_deadlock/ast.json b/parser/testdata/01505_log_distributed_deadlock/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01505_log_distributed_deadlock/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01505_log_distributed_deadlock/metadata.json b/parser/testdata/01505_log_distributed_deadlock/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01505_log_distributed_deadlock/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01505_log_distributed_deadlock/query.sql b/parser/testdata/01505_log_distributed_deadlock/query.sql new file mode 100644 index 000000000..d0cbc501a --- /dev/null +++ b/parser/testdata/01505_log_distributed_deadlock/query.sql @@ -0,0 +1,14 @@ +-- Tags: deadlock, distributed + +DROP TABLE IF EXISTS t_local; +DROP TABLE IF EXISTS t_dist; + +create table t_local(a int) engine Log; +create table t_dist (a int) engine Distributed(test_shard_localhost, currentDatabase(), 't_local', cityHash64(a)); + +set distributed_foreground_insert = 1; + +insert into t_dist values (1); + +DROP TABLE t_local; +DROP TABLE t_dist; diff --git a/parser/testdata/01505_trivial_count_with_partition_predicate/ast.json b/parser/testdata/01505_trivial_count_with_partition_predicate/ast.json new file mode 100644 index 000000000..4d5159970 --- /dev/null +++ b/parser/testdata/01505_trivial_count_with_partition_predicate/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test1 (children 1)" + }, + { + "explain": " Identifier test1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001217725, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/01505_trivial_count_with_partition_predicate/metadata.json b/parser/testdata/01505_trivial_count_with_partition_predicate/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01505_trivial_count_with_partition_predicate/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01505_trivial_count_with_partition_predicate/query.sql b/parser/testdata/01505_trivial_count_with_partition_predicate/query.sql new file mode 100644 index 000000000..496fe26ad --- /dev/null +++ b/parser/testdata/01505_trivial_count_with_partition_predicate/query.sql @@ -0,0 +1,56 @@ +drop table if exists test1; +drop table if exists test_tuple; +drop table if exists test_two_args; + +create table test1(p DateTime, k int) engine MergeTree partition by toDate(p) order by k settings index_granularity = 1; +insert into test1 values ('2020-09-01 00:01:02', 1), ('2020-09-01 20:01:03', 2), ('2020-09-02 00:01:03', 3); + +set max_rows_to_read = 1; +set optimize_use_implicit_projections = 1; +-- non-optimized +select count() from test1 settings max_parallel_replicas = 3; +-- optimized (toYear is monotonic and we provide the partition expr as is) +select count() from test1 where toYear(toDate(p)) = 1999; +-- non-optimized (toDate(DateTime) is always monotonic, but we cannot relaxing the predicates to do trivial count()) +select count() from test1 where p > toDateTime('2020-09-01 10:00:00'); -- { serverError TOO_MANY_ROWS } +-- optimized (partition expr wrapped with non-monotonic functions) +select count() FROM test1 where toDate(p) = '2020-09-01' and sipHash64(toString(toDate(p))) % 2 = 1; +select count() FROM test1 where toDate(p) = '2020-09-01' and sipHash64(toString(toDate(p))) % 2 = 0; +-- non-optimized (some predicate depends on non-partition_expr columns) +select count() FROM test1 where toDate(p) = '2020-09-01' and k = 2; -- { serverError TOO_MANY_ROWS } +-- optimized +select count() from test1 where toDate(p) > '2020-09-01'; +-- non-optimized +select count() from test1 where toDate(p) >= '2020-09-01' and p <= '2020-09-01 00:00:00'; + +create table test_tuple(p DateTime, i int, j int) engine MergeTree partition by (toDate(p), i) order by j settings index_granularity = 1; + +insert into test_tuple values ('2020-09-01 00:01:02', 1, 2), ('2020-09-01 00:01:03', 2, 3), ('2020-09-02 00:01:03', 3, 4); + +-- optimized +select count() from test_tuple where toDate(p) > '2020-09-01'; +-- optimized +select count() from test_tuple where toDate(p) > '2020-09-01' and i = 1; +-- optimized +select count() from test_tuple where i > 2; +-- optimized +select count() from test_tuple where i < 1; +-- non-optimized +select count() from test_tuple array join [p,p] as c where toDate(p) = '2020-09-01'; -- { serverError TOO_MANY_ROWS } +select count() from test_tuple array join [1,2] as c where toDate(p) = '2020-09-01' settings max_rows_to_read = 4; +-- non-optimized +select count() from test_tuple array join [1,2,3] as c where toDate(p) = '2020-09-01'; -- { serverError TOO_MANY_ROWS } +select count() from test_tuple array join [1,2,3] as c where toDate(p) = '2020-09-01' settings max_rows_to_read = 6; + +create table test_two_args(i int, j int, k int) engine MergeTree partition by i + j order by k settings index_granularity = 1; + +insert into test_two_args values (1, 2, 3), (2, 1, 3), (0, 3, 4); + +-- optimized +select count() from test_two_args where i + j = 3; +-- non-optimized +select count() from test_two_args where i = 1; -- { serverError TOO_MANY_ROWS } + +drop table test1; +drop table test_tuple; +drop table test_two_args; diff --git a/parser/testdata/01506_buffer_table_alter_block_structure/ast.json b/parser/testdata/01506_buffer_table_alter_block_structure/ast.json new file mode 100644 index 000000000..fd2281b06 --- /dev/null +++ b/parser/testdata/01506_buffer_table_alter_block_structure/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery buf_dest (children 1)" + }, + { + "explain": " Identifier buf_dest" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001235569, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/01506_buffer_table_alter_block_structure/metadata.json b/parser/testdata/01506_buffer_table_alter_block_structure/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01506_buffer_table_alter_block_structure/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01506_buffer_table_alter_block_structure/query.sql b/parser/testdata/01506_buffer_table_alter_block_structure/query.sql new file mode 100644 index 000000000..cba7d84fa --- /dev/null +++ b/parser/testdata/01506_buffer_table_alter_block_structure/query.sql @@ -0,0 +1,22 @@ +DROP TABLE IF EXISTS buf_dest; +DROP TABLE IF EXISTS buf; + +CREATE TABLE buf_dest (timestamp DateTime) +ENGINE = MergeTree PARTITION BY toYYYYMMDD(timestamp) +ORDER BY (timestamp); + +CREATE TABLE buf (timestamp DateTime) Engine = Buffer(currentDatabase(), buf_dest, 16, 3, 20, 2000000, 20000000, 100000000, 300000000);; + +INSERT INTO buf (timestamp) VALUES (toDateTime('2020-01-01 00:05:00')); + +ALTER TABLE buf_dest ADD COLUMN s String; +ALTER TABLE buf ADD COLUMN s String; + +SELECT * FROM buf; + +INSERT INTO buf (timestamp, s) VALUES (toDateTime('2020-01-01 00:06:00'), 'hello'); + +SELECT * FROM buf ORDER BY timestamp; + +DROP TABLE IF EXISTS buf; +DROP TABLE IF EXISTS buf_dest; diff --git a/parser/testdata/01506_buffer_table_alter_block_structure_2/ast.json b/parser/testdata/01506_buffer_table_alter_block_structure_2/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01506_buffer_table_alter_block_structure_2/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01506_buffer_table_alter_block_structure_2/metadata.json b/parser/testdata/01506_buffer_table_alter_block_structure_2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01506_buffer_table_alter_block_structure_2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01506_buffer_table_alter_block_structure_2/query.sql b/parser/testdata/01506_buffer_table_alter_block_structure_2/query.sql new file mode 100644 index 000000000..0595e67f2 --- /dev/null +++ b/parser/testdata/01506_buffer_table_alter_block_structure_2/query.sql @@ -0,0 +1,26 @@ +-- Tags: no-random-settings + +DROP TABLE IF EXISTS buf_dest; +DROP TABLE IF EXISTS buf; + +CREATE TABLE buf_dest (timestamp DateTime) +ENGINE = MergeTree PARTITION BY toYYYYMMDD(timestamp) +ORDER BY (timestamp); + +CREATE TABLE buf (timestamp DateTime) Engine = Buffer(currentDatabase(), buf_dest, 16, 86400, 86400, 2000000, 20000000, 100000000, 300000000);; + +INSERT INTO buf (timestamp) VALUES (toDateTime('2020-01-01 00:05:00')); + +OPTIMIZE TABLE buf; + +ALTER TABLE buf_dest ADD COLUMN s String; +ALTER TABLE buf ADD COLUMN s String; + +SELECT * FROM buf; + +INSERT INTO buf (timestamp, s) VALUES (toDateTime('2020-01-01 00:06:00'), 'hello'); + +SELECT * FROM buf ORDER BY timestamp; + +DROP TABLE IF EXISTS buf; +DROP TABLE IF EXISTS buf_dest; diff --git a/parser/testdata/01506_ttl_same_with_order_by/ast.json b/parser/testdata/01506_ttl_same_with_order_by/ast.json new file mode 100644 index 000000000..bba5137de --- /dev/null +++ b/parser/testdata/01506_ttl_same_with_order_by/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery derived_metrics_local (children 1)" + }, + { + "explain": " Identifier derived_metrics_local" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001183745, + "rows_read": 2, + "bytes_read": 94 + } +} diff --git a/parser/testdata/01506_ttl_same_with_order_by/metadata.json b/parser/testdata/01506_ttl_same_with_order_by/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01506_ttl_same_with_order_by/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01506_ttl_same_with_order_by/query.sql b/parser/testdata/01506_ttl_same_with_order_by/query.sql new file mode 100644 index 000000000..bf0ff7c67 --- /dev/null +++ b/parser/testdata/01506_ttl_same_with_order_by/query.sql @@ -0,0 +1,86 @@ +DROP TABLE IF EXISTS derived_metrics_local; + +CREATE TABLE derived_metrics_local +( + timestamp DateTime, + bytes UInt64 +) +ENGINE=SummingMergeTree() +PARTITION BY toYYYYMMDD(timestamp) +ORDER BY (toStartOfHour(timestamp), timestamp) +TTL toStartOfHour(timestamp) + INTERVAL 1 HOUR GROUP BY toStartOfHour(timestamp) +SET bytes=max(bytes); + +SYSTEM STOP MERGES derived_metrics_local; +INSERT INTO derived_metrics_local values('2020-01-01 00:00:00', 1); +INSERT INTO derived_metrics_local values('2020-01-01 00:01:00', 3); +INSERT INTO derived_metrics_local values('2020-01-01 00:02:00', 2); + +SYSTEM START MERGES derived_metrics_local; +OPTIMIZE TABLE derived_metrics_local FINAL; +SELECT * FROM derived_metrics_local; + +DROP TABLE derived_metrics_local; + +CREATE TABLE derived_metrics_local +( + timestamp DateTime, + timestamp_h DateTime materialized toStartOfHour(timestamp), + bytes UInt64 +) +ENGINE=SummingMergeTree() +PARTITION BY toYYYYMMDD(timestamp) +ORDER BY (timestamp_h, timestamp) +TTL toStartOfHour(timestamp) + INTERVAL 1 HOUR GROUP BY timestamp_h +SET bytes=max(bytes), timestamp = toStartOfHour(any(timestamp)); + +SYSTEM STOP MERGES derived_metrics_local; +INSERT INTO derived_metrics_local values('2020-01-01 00:01:00', 111); +INSERT INTO derived_metrics_local values('2020-01-01 00:19:22', 22); +INSERT INTO derived_metrics_local values('2020-01-01 00:59:02', 1); + +SYSTEM START MERGES derived_metrics_local; +OPTIMIZE TABLE derived_metrics_local FINAL; +SELECT timestamp, timestamp_h, bytes FROM derived_metrics_local; + +DROP TABLE IF EXISTS derived_metrics_local; + +CREATE TABLE derived_metrics_local +( + timestamp DateTime, + bytes UInt64 TTL toStartOfHour(timestamp) + INTERVAL 1 HOUR +) +ENGINE=MergeTree() +ORDER BY (toStartOfHour(timestamp), timestamp) +SETTINGS min_bytes_for_wide_part = 0; + +SYSTEM STOP MERGES derived_metrics_local; +INSERT INTO derived_metrics_local values('2020-01-01 00:01:00', 111) ('2020-01-01 00:19:22', 22) ('2100-01-01 00:19:22', 1); + +SYSTEM START MERGES derived_metrics_local; +OPTIMIZE TABLE derived_metrics_local FINAL; +SELECT sum(bytes) FROM derived_metrics_local; + +DROP TABLE IF EXISTS derived_metrics_local; + +CREATE TABLE derived_metrics_local +( + timestamp DateTime, + bytes UInt64 +) +ENGINE=MergeTree() +PARTITION BY toYYYYMMDD(timestamp) +ORDER BY (toStartOfHour(timestamp), timestamp) +TTL toStartOfHour(timestamp) + INTERVAL 1 HOUR +SETTINGS min_bytes_for_wide_part = 0; + +SYSTEM STOP MERGES derived_metrics_local; +INSERT INTO derived_metrics_local values('2020-01-01 00:01:00', 111); +INSERT INTO derived_metrics_local values('2020-01-01 00:19:22', 22); +INSERT INTO derived_metrics_local values('2020-01-01 00:59:02', 1); + +SYSTEM START MERGES derived_metrics_local; +OPTIMIZE TABLE derived_metrics_local FINAL; +SELECT count() FROM derived_metrics_local; + +DROP TABLE IF EXISTS derived_metrics_local; diff --git a/parser/testdata/01507_multiversion_storage_for_storagememory/ast.json b/parser/testdata/01507_multiversion_storage_for_storagememory/ast.json new file mode 100644 index 000000000..7cb103c54 --- /dev/null +++ b/parser/testdata/01507_multiversion_storage_for_storagememory/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery defaults (children 1)" + }, + { + "explain": " Identifier defaults" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001445393, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/01507_multiversion_storage_for_storagememory/metadata.json b/parser/testdata/01507_multiversion_storage_for_storagememory/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01507_multiversion_storage_for_storagememory/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01507_multiversion_storage_for_storagememory/query.sql b/parser/testdata/01507_multiversion_storage_for_storagememory/query.sql new file mode 100644 index 000000000..fec9105cc --- /dev/null +++ b/parser/testdata/01507_multiversion_storage_for_storagememory/query.sql @@ -0,0 +1,15 @@ +DROP TABLE IF EXISTS defaults; +CREATE TABLE defaults +( + n Int32 +)ENGINE = Memory(); + +INSERT INTO defaults SELECT * FROM numbers(10); + +SELECT * FROM defaults; + +TRUNCATE defaults; + +SELECT * FROM defaults; + +DROP TABLE defaults; diff --git a/parser/testdata/01507_transform_null_in/ast.json b/parser/testdata/01507_transform_null_in/ast.json new file mode 100644 index 000000000..5b60bb4da --- /dev/null +++ b/parser/testdata/01507_transform_null_in/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001510057, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01507_transform_null_in/metadata.json b/parser/testdata/01507_transform_null_in/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01507_transform_null_in/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01507_transform_null_in/query.sql b/parser/testdata/01507_transform_null_in/query.sql new file mode 100644 index 000000000..2377ccf43 --- /dev/null +++ b/parser/testdata/01507_transform_null_in/query.sql @@ -0,0 +1,10 @@ +SET transform_null_in = 1; + +SELECT NULL IN NULL; +SELECT 1 IN NULL; +SELECT 1 IN (1, NULL); +SELECT 1 IN tuple(1, NULL); +SELECT (1, 2) IN (1, NULL); +SELECT (1, 2) IN tuple(1, NULL); +SELECT (1, 2) IN ((1, NULL), (1, 2)); +SELECT (1, NULL) IN (1, NULL); diff --git a/parser/testdata/01508_explain_header/ast.json b/parser/testdata/01508_explain_header/ast.json new file mode 100644 index 000000000..aff330cb1 --- /dev/null +++ b/parser/testdata/01508_explain_header/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001010933, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01508_explain_header/metadata.json b/parser/testdata/01508_explain_header/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01508_explain_header/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01508_explain_header/query.sql b/parser/testdata/01508_explain_header/query.sql new file mode 100644 index 000000000..03452e4bd --- /dev/null +++ b/parser/testdata/01508_explain_header/query.sql @@ -0,0 +1,3 @@ +SET enable_analyzer = 1; + +explain header = 1 select 1 as x; diff --git a/parser/testdata/01509_output_format_pretty_row_numbers/ast.json b/parser/testdata/01509_output_format_pretty_row_numbers/ast.json new file mode 100644 index 000000000..2aaf0287c --- /dev/null +++ b/parser/testdata/01509_output_format_pretty_row_numbers/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001505813, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01509_output_format_pretty_row_numbers/metadata.json b/parser/testdata/01509_output_format_pretty_row_numbers/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01509_output_format_pretty_row_numbers/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01509_output_format_pretty_row_numbers/query.sql b/parser/testdata/01509_output_format_pretty_row_numbers/query.sql new file mode 100644 index 000000000..b0e03c1bd --- /dev/null +++ b/parser/testdata/01509_output_format_pretty_row_numbers/query.sql @@ -0,0 +1,38 @@ +SET output_format_pretty_color=1; +SET output_format_pretty_row_numbers=0; +SET output_format_pretty_display_footer_column_names=0; +SET output_format_pretty_squash_consecutive_ms = 0; + +SELECT * FROM numbers(10) FORMAT Pretty; +SELECT * FROM numbers(10) FORMAT PrettyCompact; +SELECT * FROM numbers(10) FORMAT PrettyCompactMonoBlock; +SELECT * FROM numbers(10) FORMAT PrettyNoEscapes; +SELECT * FROM numbers(10) FORMAT PrettyCompactNoEscapes; +SELECT * FROM numbers(10) FORMAT PrettySpaceNoEscapes; +SELECT * FROM numbers(10) FORMAT PrettySpace; +SET output_format_pretty_row_numbers=1; +SELECT * FROM numbers(10) FORMAT Pretty; +SELECT * FROM numbers(10) FORMAT PrettyCompact; +SELECT * FROM numbers(10) FORMAT PrettyCompactMonoBlock; +SELECT * FROM numbers(10) FORMAT PrettyNoEscapes; +SELECT * FROM numbers(10) FORMAT PrettyCompactNoEscapes; +SELECT * FROM numbers(10) FORMAT PrettySpaceNoEscapes; +SELECT * FROM numbers(10) FORMAT PrettySpace; + +SET max_block_size=1; + +SELECT * FROM (SELECT 1 AS a UNION ALL SELECT 2 as a) ORDER BY a FORMAT Pretty; +SELECT * FROM (SELECT 1 AS a UNION ALL SELECT 2 as a) ORDER BY a FORMAT PrettyCompact; +SELECT * FROM (SELECT 1 AS a UNION ALL SELECT 2 as a) ORDER BY a FORMAT PrettyCompactMonoBlock; +SELECT * FROM (SELECT 1 AS a UNION ALL SELECT 2 as a) ORDER BY a FORMAT PrettyNoEscapes; +SELECT * FROM (SELECT 1 AS a UNION ALL SELECT 2 as a) ORDER BY a FORMAT PrettyCompactNoEscapes; +SELECT * FROM (SELECT 1 AS a UNION ALL SELECT 2 as a) ORDER BY a FORMAT PrettySpace; +SELECT * FROM (SELECT 1 AS a UNION ALL SELECT 2 as a) ORDER BY a FORMAT PrettySpaceNoEscapes; + +SELECT * FROM numbers(10) ORDER BY number FORMAT Pretty; +SELECT * FROM numbers(10) ORDER BY number FORMAT PrettyCompact; +SELECT * FROM numbers(10) ORDER BY number FORMAT PrettyCompactMonoBlock; +SELECT * FROM numbers(10) ORDER BY number FORMAT PrettyNoEscapes; +SELECT * FROM numbers(10) ORDER BY number FORMAT PrettyCompactNoEscapes; +SELECT * FROM numbers(10) ORDER BY number FORMAT PrettySpace; +SELECT * FROM numbers(10) ORDER BY number FORMAT PrettySpaceNoEscapes; diff --git a/parser/testdata/01509_parallel_quorum_insert_no_replicas_long/ast.json b/parser/testdata/01509_parallel_quorum_insert_no_replicas_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01509_parallel_quorum_insert_no_replicas_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01509_parallel_quorum_insert_no_replicas_long/metadata.json b/parser/testdata/01509_parallel_quorum_insert_no_replicas_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01509_parallel_quorum_insert_no_replicas_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01509_parallel_quorum_insert_no_replicas_long/query.sql b/parser/testdata/01509_parallel_quorum_insert_no_replicas_long/query.sql new file mode 100644 index 000000000..19cf5786e --- /dev/null +++ b/parser/testdata/01509_parallel_quorum_insert_no_replicas_long/query.sql @@ -0,0 +1,104 @@ +-- Tags: long, replica, no-replicated-database, no-shared-merge-tree +-- Tag no-replicated-database: Fails due to additional replicas or shards +-- no-shared-merge-tree: depend on tricks with quorum inserts for replicated mt + +DROP TABLE IF EXISTS r1 SYNC; +DROP TABLE IF EXISTS r2 SYNC; + +CREATE TABLE r1 ( + key UInt64, value String +) +ENGINE = ReplicatedMergeTree('/clickhouse/{database}/01509_parallel_quorum_insert_no_replicas', '1') +ORDER BY tuple(); + +CREATE TABLE r2 ( + key UInt64, value String +) +ENGINE = ReplicatedMergeTree('/clickhouse/{database}/01509_parallel_quorum_insert_no_replicas', '2') +ORDER BY tuple(); + +SET insert_quorum_parallel=1; + +SET insert_quorum=3; +INSERT INTO r1 VALUES(1, '1'); --{serverError TOO_FEW_LIVE_REPLICAS} + +-- retry should still fail despite the insert_deduplicate enabled +INSERT INTO r1 VALUES(1, '1'); --{serverError TOO_FEW_LIVE_REPLICAS} +INSERT INTO r1 VALUES(1, '1'); --{serverError TOO_FEW_LIVE_REPLICAS} + +SELECT 'insert to two replicas works'; +SET insert_quorum=2, insert_quorum_parallel=1; + +INSERT INTO r1 VALUES(1, '1'); + +SELECT COUNT() FROM r1; +SELECT COUNT() FROM r2; + +DETACH TABLE r2; + +INSERT INTO r1 VALUES(2, '2'); --{serverError TOO_FEW_LIVE_REPLICAS} + +-- retry should fail despite the insert_deduplicate enabled +INSERT INTO r1 VALUES(2, '2'); --{serverError TOO_FEW_LIVE_REPLICAS} +INSERT INTO r1 VALUES(2, '2'); --{serverError TOO_FEW_LIVE_REPLICAS} + +SET insert_quorum=1, insert_quorum_parallel=1; +SELECT 'insert to single replica works'; +INSERT INTO r1 VALUES(2, '2'); + +ATTACH TABLE r2; + +INSERT INTO r2 VALUES(2, '2'); + +SYSTEM SYNC REPLICA r2; + +SET insert_quorum=2, insert_quorum_parallel=1; + +INSERT INTO r1 VALUES(3, '3'); + +SELECT COUNT() FROM r1; +SELECT COUNT() FROM r2; + +SELECT 'deduplication works'; +INSERT INTO r2 VALUES(3, '3'); + +-- still works if we relax quorum +SET insert_quorum=1, insert_quorum_parallel=1; +INSERT INTO r2 VALUES(3, '3'); +INSERT INTO r1 VALUES(3, '3'); +-- will start failing if we increase quorum +SET insert_quorum=3, insert_quorum_parallel=1; +INSERT INTO r1 VALUES(3, '3'); --{serverError TOO_FEW_LIVE_REPLICAS} +-- work back ok when quorum=2 +SET insert_quorum=2, insert_quorum_parallel=1; +INSERT INTO r2 VALUES(3, '3'); + +SELECT COUNT() FROM r1; +SELECT COUNT() FROM r2; + +SYSTEM STOP FETCHES r2; + +SET insert_quorum_timeout=0; + +INSERT INTO r1 SETTINGS insert_keeper_fault_injection_probability=0 VALUES (4, '4'); -- { serverError UNKNOWN_STATUS_OF_INSERT } + +-- retry should fail despite the insert_deduplicate enabled +INSERT INTO r1 SETTINGS insert_keeper_fault_injection_probability=0 VALUES (4, '4'); -- { serverError UNKNOWN_STATUS_OF_INSERT } +INSERT INTO r1 SETTINGS insert_keeper_fault_injection_probability=0 VALUES (4, '4'); -- { serverError UNKNOWN_STATUS_OF_INSERT } +SELECT * FROM r2 WHERE key=4; + +SYSTEM START FETCHES r2; + +SET insert_quorum_timeout=6000000; + +-- now retry should be successful +INSERT INTO r1 VALUES (4, '4'); + +SYSTEM SYNC REPLICA r2; + +SELECT 'insert happened'; +SELECT COUNT() FROM r1; +SELECT COUNT() FROM r2; + +DROP TABLE IF EXISTS r1 SYNC; +DROP TABLE IF EXISTS r2 SYNC; diff --git a/parser/testdata/01511_alter_version_versioned_collapsing_merge_tree/ast.json b/parser/testdata/01511_alter_version_versioned_collapsing_merge_tree/ast.json new file mode 100644 index 000000000..d8b6ddbf9 --- /dev/null +++ b/parser/testdata/01511_alter_version_versioned_collapsing_merge_tree/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery table_with_version (children 1)" + }, + { + "explain": " Identifier table_with_version" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001385562, + "rows_read": 2, + "bytes_read": 88 + } +} diff --git a/parser/testdata/01511_alter_version_versioned_collapsing_merge_tree/metadata.json b/parser/testdata/01511_alter_version_versioned_collapsing_merge_tree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01511_alter_version_versioned_collapsing_merge_tree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01511_alter_version_versioned_collapsing_merge_tree/query.sql b/parser/testdata/01511_alter_version_versioned_collapsing_merge_tree/query.sql new file mode 100644 index 000000000..87995abb9 --- /dev/null +++ b/parser/testdata/01511_alter_version_versioned_collapsing_merge_tree/query.sql @@ -0,0 +1,46 @@ +DROP TABLE IF EXISTS table_with_version; + +CREATE TABLE table_with_version +( + key UInt64, + value String, + version UInt8, + sign Int8 +) +ENGINE VersionedCollapsingMergeTree(sign, version) +ORDER BY key; + +INSERT INTO table_with_version VALUES (1, '1', 1, -1); +INSERT INTO table_with_version VALUES (2, '2', 2, -1); + +SELECT * FROM table_with_version ORDER BY key; + +SHOW CREATE TABLE table_with_version; + +ALTER TABLE table_with_version MODIFY COLUMN version UInt32; + +SELECT * FROM table_with_version ORDER BY key; + +SHOW CREATE TABLE table_with_version; + +INSERT INTO TABLE table_with_version VALUES(1, '1', 1, 1); +INSERT INTO TABLE table_with_version VALUES(1, '1', 2, 1); + +SELECT * FROM table_with_version FINAL ORDER BY key; + +INSERT INTO TABLE table_with_version VALUES(3, '3', 65555, 1); + +SELECT * FROM table_with_version FINAL ORDER BY key; + +INSERT INTO TABLE table_with_version VALUES(3, '3', 65555, -1); + +SELECT * FROM table_with_version FINAL ORDER BY key; + +ALTER TABLE table_with_version MODIFY COLUMN version String; --{serverError ALTER_OF_COLUMN_IS_FORBIDDEN} +ALTER TABLE table_with_version MODIFY COLUMN version Int64; --{serverError ALTER_OF_COLUMN_IS_FORBIDDEN} +ALTER TABLE table_with_version MODIFY COLUMN version UInt16; --{serverError ALTER_OF_COLUMN_IS_FORBIDDEN} +ALTER TABLE table_with_version MODIFY COLUMN version Float64; --{serverError ALTER_OF_COLUMN_IS_FORBIDDEN} +ALTER TABLE table_with_version MODIFY COLUMN version Date; --{serverError ALTER_OF_COLUMN_IS_FORBIDDEN} +ALTER TABLE table_with_version MODIFY COLUMN version DateTime; --{serverError ALTER_OF_COLUMN_IS_FORBIDDEN} + +DROP TABLE IF EXISTS table_with_version; diff --git a/parser/testdata/01511_alter_version_versioned_collapsing_merge_tree_zookeeper/ast.json b/parser/testdata/01511_alter_version_versioned_collapsing_merge_tree_zookeeper/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01511_alter_version_versioned_collapsing_merge_tree_zookeeper/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01511_alter_version_versioned_collapsing_merge_tree_zookeeper/metadata.json b/parser/testdata/01511_alter_version_versioned_collapsing_merge_tree_zookeeper/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01511_alter_version_versioned_collapsing_merge_tree_zookeeper/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01511_alter_version_versioned_collapsing_merge_tree_zookeeper/query.sql b/parser/testdata/01511_alter_version_versioned_collapsing_merge_tree_zookeeper/query.sql new file mode 100644 index 000000000..aff4e1005 --- /dev/null +++ b/parser/testdata/01511_alter_version_versioned_collapsing_merge_tree_zookeeper/query.sql @@ -0,0 +1,66 @@ +-- Tags: zookeeper + +DROP TABLE IF EXISTS table_with_version_replicated_1; +DROP TABLE IF EXISTS table_with_version_replicated_2; + +CREATE TABLE table_with_version_replicated_1 +( + key UInt64, + value String, + version UInt8, + sign Int8 +) +ENGINE ReplicatedVersionedCollapsingMergeTree('/clickhouse/' || currentDatabase() || '/test_01511/{shard}/t', '1_{replica}', sign, version) +ORDER BY key; + +CREATE TABLE table_with_version_replicated_2 +( + key UInt64, + value String, + version UInt8, + sign Int8 +) +ENGINE ReplicatedVersionedCollapsingMergeTree('/clickhouse/' || currentDatabase() || '/test_01511/{shard}/t', '2_{replica}', sign, version) +ORDER BY key; + +INSERT INTO table_with_version_replicated_1 VALUES (1, '1', 1, -1); +INSERT INTO table_with_version_replicated_1 VALUES (2, '2', 2, -1); + +SELECT * FROM table_with_version_replicated_1 ORDER BY key; + +SHOW CREATE TABLE table_with_version_replicated_1; + +ALTER TABLE table_with_version_replicated_1 MODIFY COLUMN version UInt32 SETTINGS replication_alter_partitions_sync=2; + +SELECT * FROM table_with_version_replicated_1 ORDER BY key; + +SHOW CREATE TABLE table_with_version_replicated_1; + +INSERT INTO TABLE table_with_version_replicated_1 VALUES(1, '1', 1, 1); +INSERT INTO TABLE table_with_version_replicated_1 VALUES(1, '1', 2, 1); + +SELECT * FROM table_with_version_replicated_1 FINAL ORDER BY key; + +INSERT INTO TABLE table_with_version_replicated_1 VALUES(3, '3', 65555, 1); + +SELECT * FROM table_with_version_replicated_1 FINAL ORDER BY key; + +INSERT INTO TABLE table_with_version_replicated_1 VALUES(3, '3', 65555, -1); + +SYSTEM SYNC REPLICA table_with_version_replicated_2; + +DETACH TABLE table_with_version_replicated_1; +DETACH TABLE table_with_version_replicated_2; +ATTACH TABLE table_with_version_replicated_2; +ATTACH TABLE table_with_version_replicated_1; + +SELECT * FROM table_with_version_replicated_1 FINAL ORDER BY key; + +SYSTEM SYNC REPLICA table_with_version_replicated_2; + +SHOW CREATE TABLE table_with_version_replicated_2; + +SELECT * FROM table_with_version_replicated_2 FINAL ORDER BY key; + +DROP TABLE IF EXISTS table_with_version_replicated_1; +DROP TABLE IF EXISTS table_with_version_replicated_2; diff --git a/parser/testdata/01511_different_expression_with_same_alias/ast.json b/parser/testdata/01511_different_expression_with_same_alias/ast.json new file mode 100644 index 000000000..d7876e777 --- /dev/null +++ b/parser/testdata/01511_different_expression_with_same_alias/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery repro_hits (children 1)" + }, + { + "explain": " Identifier repro_hits" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00171662, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/01511_different_expression_with_same_alias/metadata.json b/parser/testdata/01511_different_expression_with_same_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01511_different_expression_with_same_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01511_different_expression_with_same_alias/query.sql b/parser/testdata/01511_different_expression_with_same_alias/query.sql new file mode 100644 index 000000000..9d5d186b8 --- /dev/null +++ b/parser/testdata/01511_different_expression_with_same_alias/query.sql @@ -0,0 +1,10 @@ +DROP TABLE IF EXISTS repro_hits; + +CREATE TABLE repro_hits ( date Date, metric Float64) ENGINE = MergeTree() ORDER BY date; + +-- From https://github.com/ClickHouse/ClickHouse/issues/12513#issue-657202535 +SELECT date as period, 1 as having_check, min(date) as period_start, addDays(max(date), 1) as period_end, dateDiff('second', period_start, period_end) as total_duration, sum(metric) as metric_ FROM repro_hits GROUP BY period HAVING having_check != -1; + +SELECT min(number) as min_number FROM numbers(10) GROUP BY number HAVING 1 ORDER BY min_number; + +DROP TABLE IF EXISTS repro_hits; diff --git a/parser/testdata/01511_format_readable_timedelta/ast.json b/parser/testdata/01511_format_readable_timedelta/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01511_format_readable_timedelta/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01511_format_readable_timedelta/metadata.json b/parser/testdata/01511_format_readable_timedelta/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01511_format_readable_timedelta/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01511_format_readable_timedelta/query.sql b/parser/testdata/01511_format_readable_timedelta/query.sql new file mode 100644 index 000000000..ee099b031 --- /dev/null +++ b/parser/testdata/01511_format_readable_timedelta/query.sql @@ -0,0 +1,19 @@ +SELECT + arrayJoin([1, 60, 60*60, 60*60*24, 60*60*24*30, 60*60*24*365]) AS elapsed, + formatReadableTimeDelta(elapsed*5.5) AS time_delta; +SELECT + 'minutes' AS maximum_unit, + arrayJoin([1, 60, 60*60, 60*60*24, 60*60*24*30, 60*60*24*365]) AS elapsed, + formatReadableTimeDelta(elapsed*5.5, maximum_unit) AS time_delta; +SELECT + 'hours' AS maximum_unit, + arrayJoin([1, 60, 60*60, 60*60*24, 60*60*24*30, 60*60*24*365]) AS elapsed, + formatReadableTimeDelta(elapsed*5.5, maximum_unit) AS time_delta; +SELECT + 'days' AS maximum_unit, + arrayJoin([1, 60, 60*60, 60*60*24, 60*60*24*30, 60*60*24*365]) AS elapsed, + formatReadableTimeDelta(elapsed*5.5, maximum_unit) AS time_delta; +SELECT + 'months' AS maximum_unit, + arrayJoin([1, 60, 60*60, 60*60*24, 60*60*24*30, 60*60*24*365]) AS elapsed, + formatReadableTimeDelta(elapsed*5.5, maximum_unit) AS time_delta; diff --git a/parser/testdata/01511_prewhere_with_virtuals/ast.json b/parser/testdata/01511_prewhere_with_virtuals/ast.json new file mode 100644 index 000000000..0330dc6a7 --- /dev/null +++ b/parser/testdata/01511_prewhere_with_virtuals/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_not_found_column_nothing (children 1)" + }, + { + "explain": " Identifier test_not_found_column_nothing" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001656581, + "rows_read": 2, + "bytes_read": 110 + } +} diff --git a/parser/testdata/01511_prewhere_with_virtuals/metadata.json b/parser/testdata/01511_prewhere_with_virtuals/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01511_prewhere_with_virtuals/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01511_prewhere_with_virtuals/query.sql b/parser/testdata/01511_prewhere_with_virtuals/query.sql new file mode 100644 index 000000000..43f003fc8 --- /dev/null +++ b/parser/testdata/01511_prewhere_with_virtuals/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS test_not_found_column_nothing; + +CREATE TABLE test_not_found_column_nothing +( + col001 UInt8, + col002 UInt8 +) Engine=MergeTree ORDER BY tuple() PARTITION BY col001 % 3; + +INSERT INTO test_not_found_column_nothing(col001) SELECT number FROM numbers(11); + +SELECT _part, count() FROM test_not_found_column_nothing PREWHERE col001 % 3 != 0 GROUP BY _part ORDER BY _part; +SELECT _part FROM test_not_found_column_nothing PREWHERE col001 = 0; + +DROP TABLE test_not_found_column_nothing; diff --git a/parser/testdata/01512_create_replicate_merge_tree_one_arg/ast.json b/parser/testdata/01512_create_replicate_merge_tree_one_arg/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01512_create_replicate_merge_tree_one_arg/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01512_create_replicate_merge_tree_one_arg/metadata.json b/parser/testdata/01512_create_replicate_merge_tree_one_arg/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01512_create_replicate_merge_tree_one_arg/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01512_create_replicate_merge_tree_one_arg/query.sql b/parser/testdata/01512_create_replicate_merge_tree_one_arg/query.sql new file mode 100644 index 000000000..77da5a182 --- /dev/null +++ b/parser/testdata/01512_create_replicate_merge_tree_one_arg/query.sql @@ -0,0 +1,5 @@ +-- Tags: replica + +CREATE TABLE mt (v UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_01497/mt') + ORDER BY tuple() -- { serverError BAD_ARGUMENTS } + diff --git a/parser/testdata/01513_count_without_select_sequence_consistency_zookeeper_long/ast.json b/parser/testdata/01513_count_without_select_sequence_consistency_zookeeper_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01513_count_without_select_sequence_consistency_zookeeper_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01513_count_without_select_sequence_consistency_zookeeper_long/metadata.json b/parser/testdata/01513_count_without_select_sequence_consistency_zookeeper_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01513_count_without_select_sequence_consistency_zookeeper_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01513_count_without_select_sequence_consistency_zookeeper_long/query.sql b/parser/testdata/01513_count_without_select_sequence_consistency_zookeeper_long/query.sql new file mode 100644 index 000000000..f692deb84 --- /dev/null +++ b/parser/testdata/01513_count_without_select_sequence_consistency_zookeeper_long/query.sql @@ -0,0 +1,39 @@ +-- Tags: long, zookeeper, no-shared-merge-tree +-- Tag no-shared-merge-tree: no-shared-merge-tree: No quorum + +SET send_logs_level = 'fatal'; + +DROP TABLE IF EXISTS quorum1 SYNC; +DROP TABLE IF EXISTS quorum2 SYNC; +DROP TABLE IF EXISTS quorum3 SYNC; + +CREATE TABLE quorum1(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_01513/sequence_consistency', '1') ORDER BY x PARTITION BY y; +CREATE TABLE quorum2(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_01513/sequence_consistency', '2') ORDER BY x PARTITION BY y; +CREATE TABLE quorum3(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_01513/sequence_consistency', '3') ORDER BY x PARTITION BY y; + +INSERT INTO quorum1 VALUES (1, '1990-11-15'); +INSERT INTO quorum1 VALUES (2, '1990-11-15'); +INSERT INTO quorum1 VALUES (3, '2020-12-16'); + +SYSTEM SYNC REPLICA quorum2; +SYSTEM SYNC REPLICA quorum3; + +SET select_sequential_consistency=0; +SET optimize_trivial_count_query=1; +SET insert_quorum=2, insert_quorum_parallel=0; + +SYSTEM STOP FETCHES quorum1; + +INSERT INTO quorum2 VALUES (4, toDate('2020-12-16')); + +SYSTEM SYNC REPLICA quorum3; + +-- Should read local committed parts instead of throwing error code: 289. DB::Exception: Replica doesn't have part 20201216_1_1_0 which was successfully written to quorum of other replicas. +SELECT count() FROM quorum1; + +SELECT count() FROM quorum2; +SELECT count() FROM quorum3; + +DROP TABLE quorum1 SYNC; +DROP TABLE quorum2 SYNC; +DROP TABLE quorum3 SYNC; diff --git a/parser/testdata/01513_defaults_on_defaults_no_column/ast.json b/parser/testdata/01513_defaults_on_defaults_no_column/ast.json new file mode 100644 index 000000000..cdda995ba --- /dev/null +++ b/parser/testdata/01513_defaults_on_defaults_no_column/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery defaults_on_defaults (children 1)" + }, + { + "explain": " Identifier defaults_on_defaults" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001055663, + "rows_read": 2, + "bytes_read": 92 + } +} diff --git a/parser/testdata/01513_defaults_on_defaults_no_column/metadata.json b/parser/testdata/01513_defaults_on_defaults_no_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01513_defaults_on_defaults_no_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01513_defaults_on_defaults_no_column/query.sql b/parser/testdata/01513_defaults_on_defaults_no_column/query.sql new file mode 100644 index 000000000..83025b6cc --- /dev/null +++ b/parser/testdata/01513_defaults_on_defaults_no_column/query.sql @@ -0,0 +1,36 @@ +DROP TABLE IF EXISTS defaults_on_defaults; +CREATE TABLE defaults_on_defaults ( + key UInt64 +) +ENGINE = MergeTree() +ORDER BY tuple(); + +INSERT INTO defaults_on_defaults values (1); + +ALTER TABLE defaults_on_defaults ADD COLUMN `Arr.C1` Array(UInt32) DEFAULT emptyArrayUInt32(); + +ALTER TABLE defaults_on_defaults ADD COLUMN `Arr.C2` Array(UInt32) DEFAULT arrayResize(emptyArrayUInt32(), length(`Arr.C1`)); + +ALTER TABLE defaults_on_defaults ADD COLUMN `Arr.C3` Array(UInt32) ALIAS arrayResize(emptyArrayUInt32(), length(`Arr.C2`)); + +SELECT 1 from defaults_on_defaults where length(`Arr.C2`) = 0; + +SELECT 1 from defaults_on_defaults where length(`Arr.C3`) = 0; + +ALTER TABLE defaults_on_defaults ADD COLUMN `Arr.C4` Array(UInt32) DEFAULT arrayResize(emptyArrayUInt32(), length(`Arr.C3`)); + +SELECT 1 from defaults_on_defaults where length(`Arr.C4`) = 0; + +ALTER TABLE defaults_on_defaults ADD COLUMN `ArrLen` UInt64 DEFAULT length(`Arr.C4`); + +SELECT 1 from defaults_on_defaults where ArrLen = 0; + +SELECT * from defaults_on_defaults where ArrLen = 0; + +SHOW CREATE TABLE defaults_on_defaults; + +OPTIMIZE TABLE defaults_on_defaults FINAL; + +SELECT 1 from defaults_on_defaults where length(`Arr.C4`) = 0; + +DROP TABLE IF EXISTS defaults_on_defaults; diff --git a/parser/testdata/01513_ilike_like_cache/ast.json b/parser/testdata/01513_ilike_like_cache/ast.json new file mode 100644 index 000000000..453586b03 --- /dev/null +++ b/parser/testdata/01513_ilike_like_cache/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function like (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'hello'" + }, + { + "explain": " Literal 'hell%'" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001132782, + "rows_read": 8, + "bytes_read": 285 + } +} diff --git a/parser/testdata/01513_ilike_like_cache/metadata.json b/parser/testdata/01513_ilike_like_cache/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01513_ilike_like_cache/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01513_ilike_like_cache/query.sql b/parser/testdata/01513_ilike_like_cache/query.sql new file mode 100644 index 000000000..dc7617713 --- /dev/null +++ b/parser/testdata/01513_ilike_like_cache/query.sql @@ -0,0 +1,5 @@ +SELECT 'hello' like 'hell%'; +SELECT 'HELLO' ilike 'hell%'; + +SELECT 'world' ilike 'Wo%Ld'; +SELECT 'world' like 'Wo%Ld'; diff --git a/parser/testdata/01513_optimize_aggregation_in_order_memory_long/ast.json b/parser/testdata/01513_optimize_aggregation_in_order_memory_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01513_optimize_aggregation_in_order_memory_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01513_optimize_aggregation_in_order_memory_long/metadata.json b/parser/testdata/01513_optimize_aggregation_in_order_memory_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01513_optimize_aggregation_in_order_memory_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01513_optimize_aggregation_in_order_memory_long/query.sql b/parser/testdata/01513_optimize_aggregation_in_order_memory_long/query.sql new file mode 100644 index 000000000..39cf6a4b9 --- /dev/null +++ b/parser/testdata/01513_optimize_aggregation_in_order_memory_long/query.sql @@ -0,0 +1,23 @@ +-- Tags: long, no-random-merge-tree-settings +--- FIXME no-random-merge-tree-settings requires investigation + +drop table if exists data_01513; +create table data_01513 (key String) engine=MergeTree() order by key; +-- 10e3 groups, 1e3 keys each +insert into data_01513 select number%10e3 from numbers(2e6); +-- reduce number of parts to 1 +optimize table data_01513 final; + +-- this is enough to trigger non-reusable Chunk in Arena. +set max_memory_usage='500M'; +set max_threads=1; +set max_block_size=500; +set max_bytes_before_external_group_by=0; +set max_bytes_ratio_before_external_group_by=0; + +select key, groupArray(repeat('a', 200)), count() from data_01513 group by key format Null settings optimize_aggregation_in_order=0; -- { serverError MEMORY_LIMIT_EXCEEDED } +select key, groupArray(repeat('a', 200)), count() from data_01513 group by key format Null settings optimize_aggregation_in_order=1; +-- for WITH TOTALS previous groups should be kept. +select key, groupArray(repeat('a', 200)), count() from data_01513 group by key with totals format Null settings optimize_aggregation_in_order=1; -- { serverError MEMORY_LIMIT_EXCEEDED } + +drop table data_01513; diff --git a/parser/testdata/01514_empty_buffer_different_types/ast.json b/parser/testdata/01514_empty_buffer_different_types/ast.json new file mode 100644 index 000000000..9172dd604 --- /dev/null +++ b/parser/testdata/01514_empty_buffer_different_types/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001099442, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01514_empty_buffer_different_types/metadata.json b/parser/testdata/01514_empty_buffer_different_types/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01514_empty_buffer_different_types/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01514_empty_buffer_different_types/query.sql b/parser/testdata/01514_empty_buffer_different_types/query.sql new file mode 100644 index 000000000..a1debbf7e --- /dev/null +++ b/parser/testdata/01514_empty_buffer_different_types/query.sql @@ -0,0 +1,13 @@ +set send_logs_level = 'error'; + +DROP TABLE IF EXISTS merge_tree_table1; +CREATE TABLE merge_tree_table1 (`s` LowCardinality(String), x UInt32) ENGINE = MergeTree ORDER BY x settings index_granularity = 1; +CREATE TABLE buffer_table1 ( `s` String , x UInt32) ENGINE = Buffer(currentDatabase(), 'merge_tree_table1', 16, 10, 60, 10, 1000, 1048576, 2097152); +SELECT s FROM buffer_table1; + +insert into merge_tree_table1 values ('a', 1); +select s from buffer_table1 where x = 1; +select s from buffer_table1 where x = 2; + +DROP TABLE IF EXISTS merge_tree_table1; +DROP TABLE buffer_table1; diff --git a/parser/testdata/01514_input_format_csv_enum_as_number_setting/ast.json b/parser/testdata/01514_input_format_csv_enum_as_number_setting/ast.json new file mode 100644 index 000000000..ce71c72f9 --- /dev/null +++ b/parser/testdata/01514_input_format_csv_enum_as_number_setting/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery table_with_enum_column_for_csv_insert (children 1)" + }, + { + "explain": " Identifier table_with_enum_column_for_csv_insert" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001121476, + "rows_read": 2, + "bytes_read": 126 + } +} diff --git a/parser/testdata/01514_input_format_csv_enum_as_number_setting/metadata.json b/parser/testdata/01514_input_format_csv_enum_as_number_setting/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01514_input_format_csv_enum_as_number_setting/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01514_input_format_csv_enum_as_number_setting/query.sql b/parser/testdata/01514_input_format_csv_enum_as_number_setting/query.sql new file mode 100644 index 000000000..9e1783b7b --- /dev/null +++ b/parser/testdata/01514_input_format_csv_enum_as_number_setting/query.sql @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS table_with_enum_column_for_csv_insert; + +CREATE TABLE table_with_enum_column_for_csv_insert ( + Id Int32, + Value Enum('ef' = 1, 'es' = 2) +) ENGINE=Memory(); + +SET input_format_csv_enum_as_number = 1; + +INSERT INTO table_with_enum_column_for_csv_insert FORMAT CSV 102,2 + +SELECT * FROM table_with_enum_column_for_csv_insert; + +SET input_format_csv_enum_as_number = 0; + +DROP TABLE IF EXISTS table_with_enum_column_for_csv_insert; diff --git a/parser/testdata/01514_input_format_json_enum_as_number/ast.json b/parser/testdata/01514_input_format_json_enum_as_number/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01514_input_format_json_enum_as_number/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01514_input_format_json_enum_as_number/metadata.json b/parser/testdata/01514_input_format_json_enum_as_number/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01514_input_format_json_enum_as_number/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01514_input_format_json_enum_as_number/query.sql b/parser/testdata/01514_input_format_json_enum_as_number/query.sql new file mode 100644 index 000000000..a5044cd68 --- /dev/null +++ b/parser/testdata/01514_input_format_json_enum_as_number/query.sql @@ -0,0 +1,14 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS table_with_enum_column_for_json_insert; + +CREATE TABLE table_with_enum_column_for_json_insert ( + Id Int32, + Value Enum('ef' = 1, 'es' = 2) +) ENGINE=Memory(); + +INSERT INTO table_with_enum_column_for_json_insert FORMAT JSONEachRow {"Id":102,"Value":2} + +SELECT * FROM table_with_enum_column_for_json_insert; + +DROP TABLE IF EXISTS table_with_enum_column_for_json_insert; diff --git a/parser/testdata/01514_input_format_tsv_enum_as_number_setting/ast.json b/parser/testdata/01514_input_format_tsv_enum_as_number_setting/ast.json new file mode 100644 index 000000000..0f29e3cee --- /dev/null +++ b/parser/testdata/01514_input_format_tsv_enum_as_number_setting/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery table_with_enum_column_for_tsv_insert (children 1)" + }, + { + "explain": " Identifier table_with_enum_column_for_tsv_insert" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001296544, + "rows_read": 2, + "bytes_read": 126 + } +} diff --git a/parser/testdata/01514_input_format_tsv_enum_as_number_setting/metadata.json b/parser/testdata/01514_input_format_tsv_enum_as_number_setting/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01514_input_format_tsv_enum_as_number_setting/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01514_input_format_tsv_enum_as_number_setting/query.sql b/parser/testdata/01514_input_format_tsv_enum_as_number_setting/query.sql new file mode 100644 index 000000000..5ad94eeb2 --- /dev/null +++ b/parser/testdata/01514_input_format_tsv_enum_as_number_setting/query.sql @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS table_with_enum_column_for_tsv_insert; + +CREATE TABLE table_with_enum_column_for_tsv_insert ( + Id Int32, + Value Enum('ef' = 1, 'es' = 2) +) ENGINE=Memory(); + +SET input_format_tsv_enum_as_number = 1; + +INSERT INTO table_with_enum_column_for_tsv_insert FORMAT TSV 102 2 + +INSERT INTO table_with_enum_column_for_tsv_insert FORMAT TabSeparatedRaw 103 1 + +SELECT * FROM table_with_enum_column_for_tsv_insert ORDER BY Id; + +SET input_format_tsv_enum_as_number = 0; + +DROP TABLE IF EXISTS table_with_enum_column_for_tsv_insert; diff --git a/parser/testdata/01514_parallel_formatting/ast.json b/parser/testdata/01514_parallel_formatting/ast.json new file mode 100644 index 000000000..f4c437f79 --- /dev/null +++ b/parser/testdata/01514_parallel_formatting/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tsv (children 1)" + }, + { + "explain": " Identifier tsv" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001028921, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/01514_parallel_formatting/metadata.json b/parser/testdata/01514_parallel_formatting/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01514_parallel_formatting/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01514_parallel_formatting/query.sql b/parser/testdata/01514_parallel_formatting/query.sql new file mode 100644 index 000000000..a2d50a4d7 --- /dev/null +++ b/parser/testdata/01514_parallel_formatting/query.sql @@ -0,0 +1,23 @@ +drop table if exists tsv; +set output_format_parallel_formatting=1; +set max_read_buffer_size=1048576; +set max_block_size=65505; + +create table tsv(a int, b int default 7) engine File(TSV); + +insert into tsv(a) select number from numbers(10000000); +select '10000000'; +select count() from tsv; + + +insert into tsv(a) select number from numbers(10000000); +select '20000000'; +select count() from tsv; + + +insert into tsv(a) select number from numbers(10000000); +select '30000000'; +select count() from tsv; + + +drop table tsv; diff --git a/parser/testdata/01514_tid_function/ast.json b/parser/testdata/01514_tid_function/ast.json new file mode 100644 index 000000000..e0f5111e4 --- /dev/null +++ b/parser/testdata/01514_tid_function/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function tid (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Identifier Null" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001143938, + "rows_read": 7, + "bytes_read": 237 + } +} diff --git a/parser/testdata/01514_tid_function/metadata.json b/parser/testdata/01514_tid_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01514_tid_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01514_tid_function/query.sql b/parser/testdata/01514_tid_function/query.sql new file mode 100644 index 000000000..3415eff4b --- /dev/null +++ b/parser/testdata/01514_tid_function/query.sql @@ -0,0 +1 @@ +SELECT tid() FORMAT Null diff --git a/parser/testdata/01515_force_data_skipping_indices/ast.json b/parser/testdata/01515_force_data_skipping_indices/ast.json new file mode 100644 index 000000000..21cafa9de --- /dev/null +++ b/parser/testdata/01515_force_data_skipping_indices/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery data_01515 (children 1)" + }, + { + "explain": " Identifier data_01515" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001032686, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/01515_force_data_skipping_indices/metadata.json b/parser/testdata/01515_force_data_skipping_indices/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01515_force_data_skipping_indices/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01515_force_data_skipping_indices/query.sql b/parser/testdata/01515_force_data_skipping_indices/query.sql new file mode 100644 index 000000000..d504e1c7d --- /dev/null +++ b/parser/testdata/01515_force_data_skipping_indices/query.sql @@ -0,0 +1,35 @@ +DROP TABLE IF EXISTS data_01515; +CREATE TABLE data_01515 +( + key Int, + d1 Int, + d1_null Nullable(Int), + INDEX d1_idx d1 TYPE minmax GRANULARITY 1, + INDEX d1_null_idx assumeNotNull(d1_null) TYPE minmax GRANULARITY 1 +) +Engine=MergeTree() +ORDER BY key; + +INSERT INTO data_01515 VALUES (1, 2, 3); + +SELECT * FROM data_01515; +SELECT * FROM data_01515 SETTINGS force_data_skipping_indices=''; -- { serverError CANNOT_PARSE_TEXT } +SELECT * FROM data_01515 SETTINGS force_data_skipping_indices='d1_idx'; -- { serverError INDEX_NOT_USED } +SELECT * FROM data_01515 SETTINGS force_data_skipping_indices='d1_null_idx'; -- { serverError INDEX_NOT_USED } + +SELECT * FROM data_01515 WHERE d1 = 0 SETTINGS force_data_skipping_indices='d1_idx'; +SELECT * FROM data_01515 WHERE d1 = 0 SETTINGS force_data_skipping_indices='`d1_idx`'; +SELECT * FROM data_01515 WHERE d1 = 0 SETTINGS force_data_skipping_indices=' d1_idx '; +SELECT * FROM data_01515 WHERE d1 = 0 SETTINGS force_data_skipping_indices=' d1_idx '; +SELECT * FROM data_01515 WHERE d1 = 0 SETTINGS force_data_skipping_indices='d1_idx,d1_null_idx'; -- { serverError INDEX_NOT_USED } +SELECT * FROM data_01515 WHERE d1 = 0 SETTINGS force_data_skipping_indices='d1_null_idx,d1_idx'; -- { serverError INDEX_NOT_USED } +SELECT * FROM data_01515 WHERE d1 = 0 SETTINGS force_data_skipping_indices='d1_null_idx,d1_idx,,'; -- { serverError INDEX_NOT_USED } +SELECT * FROM data_01515 WHERE d1 = 0 SETTINGS force_data_skipping_indices=' d1_null_idx,d1_idx'; -- { serverError INDEX_NOT_USED } +SELECT * FROM data_01515 WHERE d1 = 0 SETTINGS force_data_skipping_indices=' `d1_null_idx`,d1_idx'; -- { serverError INDEX_NOT_USED } +SELECT * FROM data_01515 WHERE d1 = 0 SETTINGS force_data_skipping_indices='d1_null_idx'; -- { serverError INDEX_NOT_USED } +SELECT * FROM data_01515 WHERE d1 = 0 SETTINGS force_data_skipping_indices=' d1_null_idx '; -- { serverError INDEX_NOT_USED } + +SELECT * FROM data_01515 WHERE d1_null = 0 SETTINGS force_data_skipping_indices='d1_null_idx'; -- { serverError INDEX_NOT_USED } +SELECT * FROM data_01515 WHERE assumeNotNull(d1_null) = 0 SETTINGS force_data_skipping_indices='d1_null_idx'; + +DROP TABLE data_01515; diff --git a/parser/testdata/01515_mv_and_array_join_optimisation_bag/ast.json b/parser/testdata/01515_mv_and_array_join_optimisation_bag/ast.json new file mode 100644 index 000000000..1581e378f --- /dev/null +++ b/parser/testdata/01515_mv_and_array_join_optimisation_bag/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery visits (children 1)" + }, + { + "explain": " Identifier visits" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001412058, + "rows_read": 2, + "bytes_read": 65 + } +} diff --git a/parser/testdata/01515_mv_and_array_join_optimisation_bag/metadata.json b/parser/testdata/01515_mv_and_array_join_optimisation_bag/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01515_mv_and_array_join_optimisation_bag/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01515_mv_and_array_join_optimisation_bag/query.sql b/parser/testdata/01515_mv_and_array_join_optimisation_bag/query.sql new file mode 100644 index 000000000..a071d5c58 --- /dev/null +++ b/parser/testdata/01515_mv_and_array_join_optimisation_bag/query.sql @@ -0,0 +1,51 @@ +CREATE TABLE visits +( + `CounterID` UInt32, + `StartDate` Date, + `StartTime` DateTime, + `GoalsID` Array(UInt32), + `Sign` Int8 +) +ENGINE = Null; + +CREATE TABLE goal +( + `CounterID` UInt32, + `StartDate` Date, + `GoalID` UInt32, + `Visits` AggregateFunction(sumIf, Int8, UInt8), + `GoalReaches` AggregateFunction(sum, Int8) +) ENGINE = AggregatingMergeTree PARTITION BY toStartOfMonth(StartDate) ORDER BY (CounterID, StartDate, GoalID) SETTINGS index_granularity = 256, index_granularity_bytes = '10Mi'; + +CREATE MATERIALIZED VIEW goal_view TO goal +( + `CounterID` UInt32, + `StartDate` Date, + `GoalID` UInt32, + `Visits` AggregateFunction(sumIf, Int8, UInt8), + `GoalReaches` AggregateFunction(sum, Int8) +) AS +SELECT + CounterID, + StartDate, + GoalID, + sumIfState(Sign, _uniq = 1) AS Visits, + sumState(Sign) AS GoalReaches +FROM visits +ARRAY JOIN + GoalsID AS GoalID, + arrayEnumerateUniq(GoalsID) AS _uniq +GROUP BY + CounterID, + StartDate, + GoalID +ORDER BY + CounterID ASC, + StartDate ASC, + GoalID ASC; + +INSERT INTO visits (`CounterID`,`StartDate`,`StartTime`,`Sign`,`GoalsID`) VALUES (1, toDate('2000-01-01'), toDateTime(toDate('2000-01-01')), 1, [1]); + +DROP TABLE goal; +DROP TABLE goal_view; +DROP TABLE visits; diff --git a/parser/testdata/01515_with_global_and_with_propagation/ast.json b/parser/testdata/01515_with_global_and_with_propagation/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01515_with_global_and_with_propagation/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01515_with_global_and_with_propagation/metadata.json b/parser/testdata/01515_with_global_and_with_propagation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01515_with_global_and_with_propagation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01515_with_global_and_with_propagation/query.sql b/parser/testdata/01515_with_global_and_with_propagation/query.sql new file mode 100644 index 000000000..31151fbc8 --- /dev/null +++ b/parser/testdata/01515_with_global_and_with_propagation/query.sql @@ -0,0 +1,17 @@ +-- Tags: global + +SET enable_global_with_statement = 1; + +with 1 as x select x; +with 1 as x select * from (select x); +with 1 as x select *, x from (with 2 as x select x as y); +with 1 as x select x union all select x; +select x from (with 1 as x select x union all with 2 as x select x) order by x; +with 5 as q1, x as (select number+100 as b, number as a from numbers(10) where number > q1) select * from x; + +explain syntax with 1 as x select x; +explain syntax with 1 as x select * from (select x); +explain syntax with 1 as x select *, x from (with 2 as x select x as y); +explain syntax with 1 as x select x union all select x; +explain syntax with 1 as x select x union all with 2 as x select x; +explain syntax with 5 as q1, x as (select number + 100 as b, number as a from numbers(10) where number > q1) select * from x; diff --git a/parser/testdata/01516_create_table_primary_key/ast.json b/parser/testdata/01516_create_table_primary_key/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01516_create_table_primary_key/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01516_create_table_primary_key/metadata.json b/parser/testdata/01516_create_table_primary_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01516_create_table_primary_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01516_create_table_primary_key/query.sql b/parser/testdata/01516_create_table_primary_key/query.sql new file mode 100644 index 000000000..1f5f80a8a --- /dev/null +++ b/parser/testdata/01516_create_table_primary_key/query.sql @@ -0,0 +1,51 @@ +-- Tags: no-parallel + +SET send_logs_level = 'fatal'; + +DROP DATABASE IF EXISTS test_01516; +set allow_deprecated_database_ordinary=1; +-- Creation of a database with Ordinary engine emits a warning. +CREATE DATABASE test_01516 ENGINE=Ordinary; -- Full ATTACH requires UUID with Atomic +USE test_01516; + +DROP TABLE IF EXISTS primary_key_test; + +CREATE TABLE primary_key_test(v Int32, PRIMARY KEY(v)) ENGINE=ReplacingMergeTree ORDER BY v; +INSERT INTO primary_key_test VALUES (1), (1), (1); +DETACH TABLE primary_key_test; +ATTACH TABLE primary_key_test(v Int32, PRIMARY KEY(v)) ENGINE=ReplacingMergeTree ORDER BY v; +SELECT * FROM primary_key_test FINAL; +DROP TABLE primary_key_test; + +CREATE TABLE primary_key_test(v Int32) ENGINE=ReplacingMergeTree ORDER BY v PRIMARY KEY(v); +INSERT INTO primary_key_test VALUES (1), (1), (1); +DETACH TABLE primary_key_test; +ATTACH TABLE primary_key_test(v Int32) ENGINE=ReplacingMergeTree ORDER BY v PRIMARY KEY(v); +SELECT * FROM primary_key_test FINAL; +DROP TABLE primary_key_test; + +CREATE TABLE primary_key_test(v1 Int32, v2 Int32, PRIMARY KEY(v1, v2)) ENGINE=ReplacingMergeTree ORDER BY (v1, v2); +INSERT INTO primary_key_test VALUES (1, 1), (1, 1), (1, 1); +DETACH TABLE primary_key_test; +ATTACH TABLE primary_key_test(v1 Int32, v2 Int32, PRIMARY KEY(v1, v2)) ENGINE=ReplacingMergeTree ORDER BY (v1, v2); +SELECT * FROM primary_key_test FINAL; +DROP TABLE primary_key_test; + +CREATE TABLE primary_key_test(v1 Int32, v2 Int32) ENGINE=ReplacingMergeTree ORDER BY (v1, v2) PRIMARY KEY(v1, v2); +INSERT INTO primary_key_test VALUES (1, 1), (1, 1), (1, 1); +DETACH TABLE primary_key_test; +ATTACH TABLE primary_key_test(v1 Int32, v2 Int32) ENGINE=ReplacingMergeTree ORDER BY (v1, v2) PRIMARY KEY(v1, v2); +SELECT * FROM primary_key_test FINAL; +DROP TABLE primary_key_test; + +CREATE TABLE primary_key_test(v1 Int64, v2 Int32, v3 String, PRIMARY KEY(v1, gcd(v1, v2))) ENGINE=ReplacingMergeTree ORDER BY v1; -- { serverError BAD_ARGUMENTS } + +CREATE TABLE primary_key_test(v1 Int64, v2 Int32, v3 String, PRIMARY KEY(v1, gcd(v1, v2))) ENGINE=ReplacingMergeTree ORDER BY (v1, gcd(v1, v2)); + +INSERT INTO primary_key_test VALUES(7, 14, 'hello'), (2, 2, 'world'), (7, 14, 'duplicate'); + +SELECT v1, v2 FROM primary_key_test FINAL ORDER BY v1, v2; + +DROP TABLE primary_key_test; + +DROP DATABASE test_01516; diff --git a/parser/testdata/01516_date_time_output_format/ast.json b/parser/testdata/01516_date_time_output_format/ast.json new file mode 100644 index 000000000..3ed6334bb --- /dev/null +++ b/parser/testdata/01516_date_time_output_format/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_datetime (children 1)" + }, + { + "explain": " Identifier test_datetime" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001159387, + "rows_read": 2, + "bytes_read": 78 + } +} diff --git a/parser/testdata/01516_date_time_output_format/metadata.json b/parser/testdata/01516_date_time_output_format/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01516_date_time_output_format/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01516_date_time_output_format/query.sql b/parser/testdata/01516_date_time_output_format/query.sql new file mode 100644 index 000000000..3c99d1bb8 --- /dev/null +++ b/parser/testdata/01516_date_time_output_format/query.sql @@ -0,0 +1,36 @@ +DROP TABLE IF EXISTS test_datetime; + +CREATE TABLE test_datetime(timestamp DateTime('Asia/Istanbul')) ENGINE=Log; + +INSERT INTO test_datetime VALUES ('2020-10-15 00:00:00'); + +SET date_time_output_format = 'simple'; +SELECT timestamp FROM test_datetime; +SELECT formatDateTime(toDateTime('2020-10-15 00:00:00', 'Asia/Istanbul'), '%Y-%m-%d %R:%S') as formatted_simple FROM test_datetime; + +SET date_time_output_format = 'iso'; +SELECT timestamp FROM test_datetime; +SELECT formatDateTime(toDateTime('2020-10-15 00:00:00', 'Asia/Istanbul'), '%Y-%m-%dT%R:%SZ', 'UTC') as formatted_iso FROM test_datetime;; + +SET date_time_output_format = 'unix_timestamp'; +SELECT timestamp FROM test_datetime; +SELECT toUnixTimestamp(timestamp) FROM test_datetime; + +SET date_time_output_format = 'simple'; +DROP TABLE test_datetime; + +CREATE TABLE test_datetime(timestamp DateTime64(3, 'Asia/Istanbul')) Engine=Log; + +INSERT INTO test_datetime VALUES ('2020-10-15 00:00:00'), (1602709200123); + +SET date_time_output_format = 'simple'; +SELECT timestamp FROM test_datetime; + +SET date_time_output_format = 'iso'; +SELECT timestamp FROM test_datetime; + +SET date_time_output_format = 'unix_timestamp'; +SELECT timestamp FROM test_datetime; + +SET date_time_output_format = 'simple'; +DROP TABLE test_datetime; diff --git a/parser/testdata/01517_drop_mv_with_inner_table/ast.json b/parser/testdata/01517_drop_mv_with_inner_table/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01517_drop_mv_with_inner_table/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01517_drop_mv_with_inner_table/metadata.json b/parser/testdata/01517_drop_mv_with_inner_table/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01517_drop_mv_with_inner_table/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01517_drop_mv_with_inner_table/query.sql b/parser/testdata/01517_drop_mv_with_inner_table/query.sql new file mode 100644 index 000000000..167625629 --- /dev/null +++ b/parser/testdata/01517_drop_mv_with_inner_table/query.sql @@ -0,0 +1,53 @@ +-- Tags: no-parallel + +SET send_logs_level = 'fatal'; + +-- +-- Atomic no SYNC +-- (should go first to check that thread for DROP TABLE does not hang) +-- +drop database if exists db_01517_atomic; +create database db_01517_atomic Engine=Atomic; + +create table db_01517_atomic.source (key Int) engine=Null; +create materialized view db_01517_atomic.mv engine=Null as select * from db_01517_atomic.source; + +drop table db_01517_atomic.mv; +-- ensure that the inner had been removed after sync drop +drop table db_01517_atomic.source sync; +show tables from db_01517_atomic; + +-- +-- Atomic +-- +drop database if exists db_01517_atomic_sync; +create database db_01517_atomic_sync Engine=Atomic; + +create table db_01517_atomic_sync.source (key Int) engine=Null; +create materialized view db_01517_atomic_sync.mv engine=Null as select * from db_01517_atomic_sync.source; + +-- drops it and hangs with Atomic engine, due to recursive DROP +drop table db_01517_atomic_sync.mv sync; +show tables from db_01517_atomic_sync; + +-- +-- Ordinary +--- +drop database if exists db_01517_ordinary; +set allow_deprecated_database_ordinary=1; +-- Creation of a database with Ordinary engine emits a warning. +create database db_01517_ordinary Engine=Ordinary; + +create table db_01517_ordinary.source (key Int) engine=Null; +create materialized view db_01517_ordinary.mv engine=Null as select * from db_01517_ordinary.source; + +-- drops it and hangs with Atomic engine, due to recursive DROP +drop table db_01517_ordinary.mv sync; +show tables from db_01517_ordinary; + +drop table db_01517_atomic_sync.source; +drop table db_01517_ordinary.source; + +drop database db_01517_atomic; +drop database db_01517_atomic_sync; +drop database db_01517_ordinary; diff --git a/parser/testdata/01517_select_final_distributed/ast.json b/parser/testdata/01517_select_final_distributed/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01517_select_final_distributed/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01517_select_final_distributed/metadata.json b/parser/testdata/01517_select_final_distributed/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01517_select_final_distributed/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01517_select_final_distributed/query.sql b/parser/testdata/01517_select_final_distributed/query.sql new file mode 100644 index 000000000..d2c6e63a8 --- /dev/null +++ b/parser/testdata/01517_select_final_distributed/query.sql @@ -0,0 +1,20 @@ +-- Tags: distributed + +SET enable_parallel_replicas = 0; + +DROP TABLE IF EXISTS test5346; + +CREATE TABLE test5346 (`Id` String, `Timestamp` DateTime, `updated` DateTime) +ENGINE = ReplacingMergeTree(updated) PARTITION BY tuple() ORDER BY (Timestamp, Id); + +INSERT INTO test5346 VALUES('1',toDateTime('2020-01-01 00:00:00'),toDateTime('2020-01-01 00:00:00')); + +SELECT Id, Timestamp +FROM remote('localhost,127.0.0.1,127.0.0.2',currentDatabase(),'test5346') FINAL +ORDER BY Timestamp; + +SELECT Id, Timestamp +FROM remote('localhost,127.0.0.1,127.0.0.2',currentDatabase(),'test5346') FINAL +ORDER BY identity(Timestamp); + +DROP TABLE test5346; diff --git a/parser/testdata/01518_cast_nullable_virtual_system_column/ast.json b/parser/testdata/01518_cast_nullable_virtual_system_column/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01518_cast_nullable_virtual_system_column/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01518_cast_nullable_virtual_system_column/metadata.json b/parser/testdata/01518_cast_nullable_virtual_system_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01518_cast_nullable_virtual_system_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01518_cast_nullable_virtual_system_column/query.sql b/parser/testdata/01518_cast_nullable_virtual_system_column/query.sql new file mode 100644 index 000000000..9f4ab03e9 --- /dev/null +++ b/parser/testdata/01518_cast_nullable_virtual_system_column/query.sql @@ -0,0 +1,8 @@ +-- NOTE: database = currentDatabase() is not mandatory + +SELECT database FROM system.tables WHERE database LIKE '%' format Null; +SELECT database AS db FROM system.tables WHERE db LIKE '%' format Null; +SELECT CAST(database, 'String') AS db FROM system.tables WHERE db LIKE '%' format Null; +SELECT CAST('a string', 'Nullable(String)') AS str WHERE str LIKE '%' format Null; +SELECT CAST(database, 'Nullable(String)') AS ndb FROM system.tables WHERE ndb LIKE '%' format Null; +SELECT 'all tests passed'; diff --git a/parser/testdata/01518_filtering_aliased_materialized_column/ast.json b/parser/testdata/01518_filtering_aliased_materialized_column/ast.json new file mode 100644 index 000000000..68c2c0f5f --- /dev/null +++ b/parser/testdata/01518_filtering_aliased_materialized_column/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery logs (children 1)" + }, + { + "explain": " Identifier logs" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00101451, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/01518_filtering_aliased_materialized_column/metadata.json b/parser/testdata/01518_filtering_aliased_materialized_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01518_filtering_aliased_materialized_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01518_filtering_aliased_materialized_column/query.sql b/parser/testdata/01518_filtering_aliased_materialized_column/query.sql new file mode 100644 index 000000000..eab5e66aa --- /dev/null +++ b/parser/testdata/01518_filtering_aliased_materialized_column/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS logs; + +CREATE TABLE logs( + date_visited DateTime, + date Date MATERIALIZED toDate(date_visited) +) ENGINE = MergeTree() ORDER BY tuple(); + +SELECT count() FROM logs AS plogs WHERE plogs.date = '2019-11-20'; + +INSERT INTO logs VALUES('2019-11-20 00:00:00'); + +SELECT count() FROM logs AS plogs WHERE plogs.date = '2019-11-20'; + +DROP TABLE logs; diff --git a/parser/testdata/01518_nullable_aggregate_states1/ast.json b/parser/testdata/01518_nullable_aggregate_states1/ast.json new file mode 100644 index 000000000..e88829ee6 --- /dev/null +++ b/parser/testdata/01518_nullable_aggregate_states1/ast.json @@ -0,0 +1,94 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 7)" + }, + { + "explain": " Function count (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function count (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier a" + }, + { + "explain": " Function max (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier a" + }, + { + "explain": " Function min (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier a" + }, + { + "explain": " Function avg (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier a" + }, + { + "explain": " Function sum (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier a" + }, + { + "explain": " Function any (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier a" + } + ], + + "rows": 24, + + "statistics": + { + "elapsed": 0.001224464, + "rows_read": 24, + "bytes_read": 835 + } +} diff --git a/parser/testdata/01518_nullable_aggregate_states1/metadata.json b/parser/testdata/01518_nullable_aggregate_states1/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01518_nullable_aggregate_states1/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01518_nullable_aggregate_states1/query.sql b/parser/testdata/01518_nullable_aggregate_states1/query.sql new file mode 100644 index 000000000..5626369b0 --- /dev/null +++ b/parser/testdata/01518_nullable_aggregate_states1/query.sql @@ -0,0 +1,17 @@ +select count(), count(a), max(a), min(a), avg(a), sum(a), any(a) +from (select cast(Null,'Nullable(Float64)') a); + +select countMerge(cnts), countMerge(cntsa), maxMerge(maxs), minMerge(mins), avgMerge(avgs), sumMerge(sums), anyMerge(anys) from ( +select countState() cnts, countState(a) cntsa, maxState(a) maxs, minState(a) mins, avgState(a) avgs, sumState(a) sums, anyState(a) anys +from (select cast(Null,'Nullable(Float64)') a)); + + +select '--- empty resultset ---'; + + +select count(), count(a), max(a), min(a), avg(a), sum(a), any(a) +from (select cast(1,'Nullable(Float64)') a) where a =0; + +select countMerge(cnts), countMerge(cntsa), maxMerge(maxs), minMerge(mins), avgMerge(avgs), sumMerge(sums), anyMerge(anys) from ( +select countState() cnts, countState(a) cntsa, maxState(a) maxs, minState(a) mins, avgState(a) avgs, sumState(a) sums, anyState(a) anys +from (select cast(1,'Nullable(Float64)') a) where a =0 ); diff --git a/parser/testdata/01518_nullable_aggregate_states2/ast.json b/parser/testdata/01518_nullable_aggregate_states2/ast.json new file mode 100644 index 000000000..3805b76a9 --- /dev/null +++ b/parser/testdata/01518_nullable_aggregate_states2/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery testNullableStates (children 1)" + }, + { + "explain": " Identifier testNullableStates" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001255356, + "rows_read": 2, + "bytes_read": 88 + } +} diff --git a/parser/testdata/01518_nullable_aggregate_states2/metadata.json b/parser/testdata/01518_nullable_aggregate_states2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01518_nullable_aggregate_states2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01518_nullable_aggregate_states2/query.sql b/parser/testdata/01518_nullable_aggregate_states2/query.sql new file mode 100644 index 000000000..11de58282 --- /dev/null +++ b/parser/testdata/01518_nullable_aggregate_states2/query.sql @@ -0,0 +1,411 @@ +DROP TABLE IF EXISTS testNullableStates; +DROP TABLE IF EXISTS testNullableStatesAgg; + +CREATE TABLE testNullableStates ( + ts DateTime, + id String, + string Nullable(String), + float64 Nullable(Float64), + float32 Nullable(Float32), + decimal325 Nullable(Decimal32(5)), + date Nullable(Date), + datetime Nullable(DateTime), + datetime64 Nullable(DateTime64), + int64 Nullable(Int64), + int32 Nullable(Int32), + int16 Nullable(Int16), + int8 Nullable(Int8)) +ENGINE=MergeTree PARTITION BY toStartOfDay(ts) ORDER BY id; + +INSERT INTO testNullableStates SELECT + toDateTime('2020-01-01 00:00:00') + number AS ts, + toString(number % 999) AS id, + toString(number) AS string, + number / 333 AS float64, + number / 333 AS float32, + number / 333 AS decimal325, + toDate(ts), + ts, + ts, + number, + toInt32(number), + toInt16(number), + toInt8(number) +FROM numbers(100000); + +INSERT INTO testNullableStates SELECT + toDateTime('2020-01-01 00:00:00') + number AS ts, + toString(number % 999 - 5) AS id, + NULL AS string, + NULL AS float64, + NULL AS float32, + NULL AS decimal325, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL +FROM numbers(500); + + +CREATE TABLE testNullableStatesAgg +( + `ts` DateTime, + `id` String, + `stringMin` AggregateFunction(min, Nullable(String)), + `stringMax` AggregateFunction(max, Nullable(String)), + `float64Min` AggregateFunction(min, Nullable(Float64)), + `float64Max` AggregateFunction(max, Nullable(Float64)), + `float64Avg` AggregateFunction(avg, Nullable(Float64)), + `float64Sum` AggregateFunction(sum, Nullable(Float64)), + `float32Min` AggregateFunction(min, Nullable(Float32)), + `float32Max` AggregateFunction(max, Nullable(Float32)), + `float32Avg` AggregateFunction(avg, Nullable(Float32)), + `float32Sum` AggregateFunction(sum, Nullable(Float32)), + `decimal325Min` AggregateFunction(min, Nullable(Decimal32(5))), + `decimal325Max` AggregateFunction(max, Nullable(Decimal32(5))), + `decimal325Avg` AggregateFunction(avg, Nullable(Decimal32(5))), + `decimal325Sum` AggregateFunction(sum, Nullable(Decimal32(5))), + `dateMin` AggregateFunction(min, Nullable(Date)), + `dateMax` AggregateFunction(max, Nullable(Date)), + `datetimeMin` AggregateFunction(min, Nullable(DateTime)), + `datetimeMax` AggregateFunction(max, Nullable(DateTime)), + `datetime64Min` AggregateFunction(min, Nullable(datetime64)), + `datetime64Max` AggregateFunction(max, Nullable(datetime64)), + `int64Min` AggregateFunction(min, Nullable(Int64)), + `int64Max` AggregateFunction(max, Nullable(Int64)), + `int64Avg` AggregateFunction(avg, Nullable(Int64)), + `int64Sum` AggregateFunction(sum, Nullable(Int64)), + `int32Min` AggregateFunction(min, Nullable(Int32)), + `int32Max` AggregateFunction(max, Nullable(Int32)), + `int32Avg` AggregateFunction(avg, Nullable(Int32)), + `int32Sum` AggregateFunction(sum, Nullable(Int32)), + `int16Min` AggregateFunction(min, Nullable(Int16)), + `int16Max` AggregateFunction(max, Nullable(Int16)), + `int16Avg` AggregateFunction(avg, Nullable(Int16)), + `int16Sum` AggregateFunction(sum, Nullable(Int16)), + `int8Min` AggregateFunction(min, Nullable(Int8)), + `int8Max` AggregateFunction(max, Nullable(Int8)), + `int8Avg` AggregateFunction(avg, Nullable(Int8)), + `int8Sum` AggregateFunction(sum, Nullable(Int8)) +) +ENGINE = AggregatingMergeTree() +PARTITION BY toStartOfDay(ts) +ORDER BY id; + + + + +insert into testNullableStatesAgg +select + ts DateTime, + id String, + minState(string) stringMin, + maxState(string) stringMax, + minState(float64) float64Min, + maxState(float64) float64Max, + avgState(float64) float64Avg, + sumState(float64) float64Sum, + minState(float32) float32Min, + maxState(float32) float32Max, + avgState(float32) float32Avg, + sumState(float32) float32Sum, + minState(decimal325) decimal325Min, + maxState(decimal325) decimal325Max, + avgState(decimal325) decimal325Avg, + sumState(decimal325) decimal325Sum, + minState(date) dateMin, + maxState(date) dateMax, + minState(datetime) datetimeMin, + maxState(datetime) datetimeMax, + minState(datetime64) datetime64Min, + maxState(datetime64) datetime64Max, + minState(int64) int64Min, + maxState(int64) int64Max, + avgState(int64) int64Avg, + sumState(int64) int64Sum, + minState(int32) int32Min, + maxState(int32) int32Max, + avgState(int32) int32Avg, + sumState(int32) int32Sum, + minState(int16) int16Min, + maxState(int16) int16Max, + avgState(int16) int16Avg, + sumState(int16) int16Sum, + minState(int8) int8Min, + maxState(int8) int8Max, + avgState(int8) int8Avg, + sumState(int8) int8Sum +from testNullableStates +group by ts, id; + +OPTIMIZE TABLE testNullableStatesAgg FINAL; + +select count() from testNullableStates; + +select count() from testNullableStatesAgg; + +select ' ---- select without states ---- '; + +SELECT id, count(), + min(string), + max(string), + floor(min(float64),5), + floor(max(float64),5), + floor(avg(float64),5), + floor(sum(float64),5), + floor(min(float32),5), + floor(max(float32),5), + floor(avg(float32),5), + floor(sum(float32),5), + min(decimal325), + max(decimal325), + avg(decimal325), + sum(decimal325), + min(date), + max(date), + min(datetime), + max(datetime), + min(datetime64), + max(datetime64), + min(int64), + max(int64), + avg(int64), + sum(int64), + min(int32), + max(int32), + avg(int32), + sum(int32), + min(int16), + max(int16), + avg(int16), + sum(int16), + min(int8), + max(int8), + avg(int8), + sum(int8) +FROM testNullableStates +GROUP BY id +ORDER BY id ASC; + +select ' ---- select with states ---- '; + +SELECT id, count(), + minMerge(stringMin), + maxMerge(stringMax), + floor(minMerge(float64Min),5), + floor(maxMerge(float64Max),5), + floor(avgMerge(float64Avg),5), + floor(sumMerge(float64Sum),5), + floor(minMerge(float32Min),5), + floor(maxMerge(float32Max),5), + floor(avgMerge(float32Avg),5), + floor(sumMerge(float32Sum),5), + minMerge(decimal325Min), + maxMerge(decimal325Max), + avgMerge(decimal325Avg), + sumMerge(decimal325Sum), + minMerge(dateMin), + maxMerge(dateMax), + minMerge(datetimeMin), + maxMerge(datetimeMax), + minMerge(datetime64Min), + maxMerge(datetime64Max), + minMerge(int64Min), + maxMerge(int64Max), + avgMerge(int64Avg), + sumMerge(int64Sum), + minMerge(int32Min), + maxMerge(int32Max), + avgMerge(int32Avg), + sumMerge(int32Sum), + minMerge(int16Min), + maxMerge(int16Max), + avgMerge(int16Avg), + sumMerge(int16Sum), + minMerge(int8Min), + maxMerge(int8Max), + avgMerge(int8Avg), + sumMerge(int8Sum) +FROM testNullableStatesAgg +GROUP BY id +ORDER BY id ASC; + + +select ' ---- select row with nulls without states ---- '; + +SELECT id, count(), + min(string), + max(string), + floor(min(float64),5), + floor(max(float64),5), + floor(avg(float64),5), + floor(sum(float64),5), + floor(min(float32),5), + floor(max(float32),5), + floor(avg(float32),5), + floor(sum(float32),5), + min(decimal325), + max(decimal325), + avg(decimal325), + sum(decimal325), + min(date), + max(date), + min(datetime), + max(datetime), + min(datetime64), + max(datetime64), + min(int64), + max(int64), + avg(int64), + sum(int64), + min(int32), + max(int32), + avg(int32), + sum(int32), + min(int16), + max(int16), + avg(int16), + sum(int16), + min(int8), + max(int8), + avg(int8), + sum(int8) +FROM testNullableStates +WHERE id = '-2' +GROUP BY id +ORDER BY id ASC; + +select ' ---- select row with nulls with states ---- '; + +SELECT id, count(), + minMerge(stringMin), + maxMerge(stringMax), + floor(minMerge(float64Min),5), + floor(maxMerge(float64Max),5), + floor(avgMerge(float64Avg),5), + floor(sumMerge(float64Sum),5), + floor(minMerge(float32Min),5), + floor(maxMerge(float32Max),5), + floor(avgMerge(float32Avg),5), + floor(sumMerge(float32Sum),5), + minMerge(decimal325Min), + maxMerge(decimal325Max), + avgMerge(decimal325Avg), + sumMerge(decimal325Sum), + minMerge(dateMin), + maxMerge(dateMax), + minMerge(datetimeMin), + maxMerge(datetimeMax), + minMerge(datetime64Min), + maxMerge(datetime64Max), + minMerge(int64Min), + maxMerge(int64Max), + avgMerge(int64Avg), + sumMerge(int64Sum), + minMerge(int32Min), + maxMerge(int32Max), + avgMerge(int32Avg), + sumMerge(int32Sum), + minMerge(int16Min), + maxMerge(int16Max), + avgMerge(int16Avg), + sumMerge(int16Sum), + minMerge(int8Min), + maxMerge(int8Max), + avgMerge(int8Avg), + sumMerge(int8Sum) +FROM testNullableStatesAgg +WHERE id = '-2' +GROUP BY id +ORDER BY id ASC; + + +select ' ---- select no rows without states ---- '; + +SELECT count(), + min(string), + max(string), + floor(min(float64),5), + floor(max(float64),5), + floor(avg(float64),5), + floor(sum(float64),5), + floor(min(float32),5), + floor(max(float32),5), + floor(avg(float32),5), + floor(sum(float32),5), + min(decimal325), + max(decimal325), + avg(decimal325), + sum(decimal325), + min(date), + max(date), + min(datetime), + max(datetime), + min(datetime64), + max(datetime64), + min(int64), + max(int64), + avg(int64), + sum(int64), + min(int32), + max(int32), + avg(int32), + sum(int32), + min(int16), + max(int16), + avg(int16), + sum(int16), + min(int8), + max(int8), + avg(int8), + sum(int8) +FROM testNullableStates +WHERE id = '-22'; + +select ' ---- select no rows with states ---- '; + +SELECT count(), + minMerge(stringMin), + maxMerge(stringMax), + floor(minMerge(float64Min),5), + floor(maxMerge(float64Max),5), + floor(avgMerge(float64Avg),5), + floor(sumMerge(float64Sum),5), + floor(minMerge(float32Min),5), + floor(maxMerge(float32Max),5), + floor(avgMerge(float32Avg),5), + floor(sumMerge(float32Sum),5), + minMerge(decimal325Min), + maxMerge(decimal325Max), + avgMerge(decimal325Avg), + sumMerge(decimal325Sum), + minMerge(dateMin), + maxMerge(dateMax), + minMerge(datetimeMin), + maxMerge(datetimeMax), + minMerge(datetime64Min), + maxMerge(datetime64Max), + minMerge(int64Min), + maxMerge(int64Max), + avgMerge(int64Avg), + sumMerge(int64Sum), + minMerge(int32Min), + maxMerge(int32Max), + avgMerge(int32Avg), + sumMerge(int32Sum), + minMerge(int16Min), + maxMerge(int16Max), + avgMerge(int16Avg), + sumMerge(int16Sum), + minMerge(int8Min), + maxMerge(int8Max), + avgMerge(int8Avg), + sumMerge(int8Sum) +FROM testNullableStatesAgg +WHERE id = '-22'; + +DROP TABLE testNullableStates; +DROP TABLE testNullableStatesAgg; diff --git a/parser/testdata/01518_select_in_null/ast.json b/parser/testdata/01518_select_in_null/ast.json new file mode 100644 index 000000000..b66ea8a8f --- /dev/null +++ b/parser/testdata/01518_select_in_null/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001330433, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/01518_select_in_null/metadata.json b/parser/testdata/01518_select_in_null/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01518_select_in_null/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01518_select_in_null/query.sql b/parser/testdata/01518_select_in_null/query.sql new file mode 100644 index 000000000..97061f6cd --- /dev/null +++ b/parser/testdata/01518_select_in_null/query.sql @@ -0,0 +1,90 @@ +DROP TABLE IF EXISTS t1; + +CREATE TABLE t1 (`cA` String, `c1` String) ENGINE = MergeTree ORDER BY (cA, c1); + +insert into t1 select 'AAAAAAAAAAA', 'BBBBBB'; + +select count() from t1 where c1 in (select 'BBBBBB' union all select null); +select count() from t1 where c1 in (select 'BBBBBB' union all select null); +select count() from t1 where c1 in (select 'BBBBBB' union all select null); +select count() from t1 where c1 in (select 'BBBBBB' union all select null); +select count() from t1 where c1 in (select 'BBBBBB' union all select null); +select count() from t1 where c1 in (select 'BBBBBB' union all select null); +select count() from t1 where c1 in (select 'BBBBBB' union all select null); +select count() from t1 where c1 in (select 'BBBBBB' union all select null); +select count() from t1 where c1 in (select 'BBBBBB' union all select null); +select count() from t1 where c1 in (select 'BBBBBB' union all select null); +select count() from t1 where c1 in (select 'BBBBBB' union all select null); +select count() from t1 where c1 in (select 'BBBBBB' union all select null); +select count() from t1 where c1 in (select 'BBBBBB' union all select null); +select count() from t1 where c1 in (select 'BBBBBB' union all select null); +select count() from t1 where c1 in (select 'BBBBBB' union all select null); +select count() from t1 where c1 in (select 'BBBBBB' union all select null); +select count() from t1 where c1 in (select 'BBBBBB' union all select null); +select count() from t1 where c1 in (select 'BBBBBB' union all select null); +select count() from t1 where c1 in (select 'BBBBBB' union all select null); +select count() from t1 where c1 in (select 'BBBBBB' union all select null); +select count() from t1 where c1 in (select 'BBBBBB' union all select null); +select count() from t1 where c1 in (select 'BBBBBB' union all select null); +select count() from t1 where c1 in (select 'BBBBBB' union all select null); +select count() from t1 where c1 in (select 'BBBBBB' union all select null); +select count() from t1 where c1 in (select 'BBBBBB' union all select null); +select count() from t1 where c1 in (select 'BBBBBB' union all select null); +select count() from t1 where c1 in (select 'BBBBBB' union all select null); +select count() from t1 where c1 in (select 'BBBBBB' union all select null); +select count() from t1 where c1 in (select 'BBBBBB' union all select null); +select count() from t1 where c1 in (select 'BBBBBB' union all select null); +select count() from t1 where c1 in (select 'BBBBBB' union all select null); +select count() from t1 where c1 in (select 'BBBBBB' union all select null); +select count() from t1 where c1 in (select 'BBBBBB' union all select null); +select count() from t1 where c1 in (select 'BBBBBB' union all select null); +select count() from t1 where c1 in (select 'BBBBBB' union all select null); +select count() from t1 where c1 in (select 'BBBBBB' union all select null); +select count() from t1 where c1 in (select 'BBBBBB' union all select null); +select count() from t1 where c1 in (select 'BBBBBB' union all select null); +select count() from t1 where c1 in (select 'BBBBBB' union all select null); +select count() from t1 where c1 in (select 'BBBBBB' union all select null); + + +select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null); +select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null); +select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null); +select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null); +select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null); +select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null); +select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null); +select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null); +select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null); +select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null); +select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null); +select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null); +select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null); +select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null); +select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null); +select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null); +select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null); +select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null); +select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null); +select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null); +select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null); +select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null); +select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null); +select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null); +select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null); +select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null); +select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null); +select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null); +select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null); +select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null); +select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null); +select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null); +select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null); +select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null); +select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null); +select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null); +select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null); +select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null); +select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null); +select count() from t1 where cast(c1 as Nullable(String)) in (select 'BBBBBB' union all select null); + +DROP TABLE t1; diff --git a/parser/testdata/01519_topK_distributed_parametrized/ast.json b/parser/testdata/01519_topK_distributed_parametrized/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01519_topK_distributed_parametrized/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01519_topK_distributed_parametrized/metadata.json b/parser/testdata/01519_topK_distributed_parametrized/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01519_topK_distributed_parametrized/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01519_topK_distributed_parametrized/query.sql b/parser/testdata/01519_topK_distributed_parametrized/query.sql new file mode 100644 index 000000000..0e35d270f --- /dev/null +++ b/parser/testdata/01519_topK_distributed_parametrized/query.sql @@ -0,0 +1,18 @@ +-- Tags: distributed + +CREATE TABLE IF NOT EXISTS topXtest(A Int64) ENGINE = Memory; +INSERT INTO topXtest SELECT number FROM numbers(100); +INSERT INTO topXtest SELECT number FROM numbers(30); +INSERT INTO topXtest SELECT number FROM numbers(10); + +SELECT length(topK(30)(A)) FROM topXtest; +SELECT length(topK(30)(A)) FROM remote('localhost,127.0.0.1', currentDatabase(), topXtest); + +SELECT length(topK(A)) FROM topXtest; +SELECT length(topK(A)) FROM remote('localhost,127.0.0.1', currentDatabase(), topXtest); + +SELECT length(topK(3)(A)) FROM topXtest; +SELECT length(topK(3)(A)) FROM remote('localhost,127.0.0.1', currentDatabase(), topXtest); + +DROP TABLE topXtest; + diff --git a/parser/testdata/01521_alter_enum_and_reverse_read/ast.json b/parser/testdata/01521_alter_enum_and_reverse_read/ast.json new file mode 100644 index 000000000..9f538671a --- /dev/null +++ b/parser/testdata/01521_alter_enum_and_reverse_read/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery enum_test (children 1)" + }, + { + "explain": " Identifier enum_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001442525, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/01521_alter_enum_and_reverse_read/metadata.json b/parser/testdata/01521_alter_enum_and_reverse_read/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01521_alter_enum_and_reverse_read/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01521_alter_enum_and_reverse_read/query.sql b/parser/testdata/01521_alter_enum_and_reverse_read/query.sql new file mode 100644 index 000000000..b5391517c --- /dev/null +++ b/parser/testdata/01521_alter_enum_and_reverse_read/query.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS enum_test; + +CREATE TABLE enum_test(timestamp DateTime, host String, e Enum8('IU' = 1, 'WS' = 2)) Engine = MergeTree PARTITION BY toDate(timestamp) ORDER BY (timestamp, host); + +INSERT INTO enum_test SELECT '2020-10-09 00:00:00', 'h1', 'WS' FROM numbers(1); + +ALTER TABLE enum_test MODIFY COLUMN e Enum8('IU' = 1, 'WS' = 2, 'PS' = 3); + +INSERT INTO enum_test SELECT '2020-10-09 00:00:00', 'h1', 'PS' from numbers(1); + +SELECT * FROM enum_test ORDER BY timestamp, e desc SETTINGS optimize_read_in_order=1; + +DROP TABLE IF EXISTS enum_test; diff --git a/parser/testdata/01521_distributed_query_hang/ast.json b/parser/testdata/01521_distributed_query_hang/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01521_distributed_query_hang/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01521_distributed_query_hang/metadata.json b/parser/testdata/01521_distributed_query_hang/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01521_distributed_query_hang/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01521_distributed_query_hang/query.sql b/parser/testdata/01521_distributed_query_hang/query.sql new file mode 100644 index 000000000..9ec303947 --- /dev/null +++ b/parser/testdata/01521_distributed_query_hang/query.sql @@ -0,0 +1,5 @@ +-- Tags: distributed + +-- regression for endless loop with connections_with_failover_max_tries=0 +set connections_with_failover_max_tries=0; +select * from remote('127.2', system.one); diff --git a/parser/testdata/01521_format_readable_time_delta2/ast.json b/parser/testdata/01521_format_readable_time_delta2/ast.json new file mode 100644 index 000000000..7d2723484 --- /dev/null +++ b/parser/testdata/01521_format_readable_time_delta2/ast.json @@ -0,0 +1,112 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function formatReadableTimeDelta (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function negate (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function plus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function plus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function plus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function plus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function plus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_60" + }, + { + "explain": " Literal UInt64_3600" + }, + { + "explain": " Literal UInt64_86400" + }, + { + "explain": " Function multiply (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Float64_30.5" + }, + { + "explain": " Literal UInt64_86400" + }, + { + "explain": " Function multiply (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_365" + }, + { + "explain": " Literal UInt64_86400" + } + ], + + "rows": 30, + + "statistics": + { + "elapsed": 0.001630073, + "rows_read": 30, + "bytes_read": 1324 + } +} diff --git a/parser/testdata/01521_format_readable_time_delta2/metadata.json b/parser/testdata/01521_format_readable_time_delta2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01521_format_readable_time_delta2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01521_format_readable_time_delta2/query.sql b/parser/testdata/01521_format_readable_time_delta2/query.sql new file mode 100644 index 000000000..b27dfc6a6 --- /dev/null +++ b/parser/testdata/01521_format_readable_time_delta2/query.sql @@ -0,0 +1,24 @@ +SELECT formatReadableTimeDelta(-(1 + 60 + 3600 + 86400 + 30.5 * 86400 + 365 * 86400)); +SELECT formatReadableTimeDelta(-(1 + 60 + 3600 + 86400 + 30.5 * 86400 + 365 * 86400), 'years'); +SELECT formatReadableTimeDelta(-(1 + 60 + 3600 + 86400 + 30.5 * 86400 + 365 * 86400), 'months'); +SELECT formatReadableTimeDelta(-(1 + 60 + 3600 + 86400 + 30.5 * 86400 + 365 * 86400), 'days'); +SELECT formatReadableTimeDelta(-(1 + 60 + 3600 + 86400 + 30.5 * 86400 + 365 * 86400), 'hours'); +SELECT formatReadableTimeDelta(-(1 + 60 + 3600 + 86400 + 30.5 * 86400 + 365 * 86400), 'minutes'); +SELECT formatReadableTimeDelta(-(1 + 60 + 3600 + 86400 + 30.5 * 86400 + 365 * 86400), 'seconds'); +SELECT formatReadableTimeDelta(-(1 + 60 + 3600 + 86400 + 30.5 * 86400 + 365 * 86400), 'second'); -- { serverError BAD_ARGUMENTS } + +SELECT formatReadableTimeDelta(-(60 + 3600 + 86400 + 30.5 * 86400 + 365 * 86400)); +SELECT formatReadableTimeDelta(-(1 + 3600 + 86400 + 30.5 * 86400 + 365 * 86400)); +SELECT formatReadableTimeDelta(-(1 + 60 + 86400 + 30.5 * 86400 + 365 * 86400)); +SELECT formatReadableTimeDelta(-(1 + 60 + 3600 + 30.5 * 86400 + 365 * 86400)); +SELECT formatReadableTimeDelta(-(1 + 60 + 3600 + 86400 + 365 * 86400)); +SELECT formatReadableTimeDelta(-(1 + 60 + 3600 + 86400 + 30.5 * 86400)); + +SELECT formatReadableTimeDelta(1e100); +SELECT formatReadableTimeDelta(1e100, 'months'); +SELECT formatReadableTimeDelta(1e100, 'days'); +SELECT formatReadableTimeDelta(1e100, 'hours'); +SELECT formatReadableTimeDelta(1e100, 'minutes'); +SELECT formatReadableTimeDelta(1e100, 'seconds'); + +SELECT formatReadableTimeDelta(0x1000000000000000); diff --git a/parser/testdata/01521_global_in_prewhere_15792/ast.json b/parser/testdata/01521_global_in_prewhere_15792/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01521_global_in_prewhere_15792/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01521_global_in_prewhere_15792/metadata.json b/parser/testdata/01521_global_in_prewhere_15792/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01521_global_in_prewhere_15792/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01521_global_in_prewhere_15792/query.sql b/parser/testdata/01521_global_in_prewhere_15792/query.sql new file mode 100644 index 000000000..fe58710a9 --- /dev/null +++ b/parser/testdata/01521_global_in_prewhere_15792/query.sql @@ -0,0 +1,14 @@ +-- Tags: global + +drop table if exists xp; +drop table if exists xp_d; + +create table xp(A Date, B Int64, S String) Engine=MergeTree partition by toYYYYMM(A) order by B; +insert into xp select '2020-01-01', number , '' from numbers(100000); + +create table xp_d as xp Engine=Distributed(test_shard_localhost, currentDatabase(), xp); + +select count() from xp_d prewhere toYYYYMM(A) global in (select toYYYYMM(min(A)) from xp_d) where B > -1; + +drop table if exists xp; +drop table if exists xp_d; diff --git a/parser/testdata/01521_max_length_alias/ast.json b/parser/testdata/01521_max_length_alias/ast.json new file mode 100644 index 000000000..754d6347e --- /dev/null +++ b/parser/testdata/01521_max_length_alias/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery max_length_alias_14053 (children 1)" + }, + { + "explain": " Identifier max_length_alias_14053" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001239495, + "rows_read": 2, + "bytes_read": 96 + } +} diff --git a/parser/testdata/01521_max_length_alias/metadata.json b/parser/testdata/01521_max_length_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01521_max_length_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01521_max_length_alias/query.sql b/parser/testdata/01521_max_length_alias/query.sql new file mode 100644 index 000000000..277d579bd --- /dev/null +++ b/parser/testdata/01521_max_length_alias/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS max_length_alias_14053; + +CREATE TABLE max_length_alias_14053 +(`a` Date,`b` UInt16,`c.d` Array(Date),`dcount` UInt16 ALIAS length(c.d)) +ENGINE = MergeTree PARTITION BY toMonday(a) ORDER BY (a, b) +SETTINGS index_granularity = 8192; + +INSERT INTO max_length_alias_14053 VALUES ('2020-10-06',7367,['2020-10-06','2020-10-06','2020-10-06','2020-10-06','2020-10-06']),('2020-10-06',7367,['2020-10-06','2020-10-06','2020-10-06']),('2020-10-06',7367,['2020-10-06','2020-10-06']),('2020-10-07',7367,['2020-10-07','2020-10-07','2020-10-07','2020-10-07','2020-10-07']),('2020-10-08',7367,['2020-10-08','2020-10-08','2020-10-08','2020-10-08']),('2020-10-11',7367,['2020-10-11','2020-10-11','2020-10-11','2020-10-11','2020-10-11','2020-10-11','2020-10-11','2020-10-11']),('2020-10-11',7367,['2020-10-11']),('2020-08-26',7367,['2020-08-26','2020-08-26']),('2020-08-28',7367,['2020-08-28','2020-08-28','2020-08-28']),('2020-08-29',7367,['2020-08-29']),('2020-09-22',7367,['2020-09-22','2020-09-22','2020-09-22','2020-09-22','2020-09-22','2020-09-22','2020-09-22']); + +SELECT count(), min(length(c.d)) AS minExpr, min(dcount) AS minAlias, + max(length(c.d)) AS maxExpr, max(dcount) AS maxAlias, b +FROM max_length_alias_14053 GROUP BY b; + +DROP TABLE max_length_alias_14053; diff --git a/parser/testdata/01522_validate_alter_default/ast.json b/parser/testdata/01522_validate_alter_default/ast.json new file mode 100644 index 000000000..69021a18e --- /dev/null +++ b/parser/testdata/01522_validate_alter_default/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery table2 (children 1)" + }, + { + "explain": " Identifier table2" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001487675, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/01522_validate_alter_default/metadata.json b/parser/testdata/01522_validate_alter_default/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01522_validate_alter_default/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01522_validate_alter_default/query.sql b/parser/testdata/01522_validate_alter_default/query.sql new file mode 100644 index 000000000..c4db2b91a --- /dev/null +++ b/parser/testdata/01522_validate_alter_default/query.sql @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS table2; +CREATE TABLE table2 +( + EventDate Date, + Id Int32, + Value Int32 +) +Engine = MergeTree() +PARTITION BY toYYYYMM(EventDate) +ORDER BY Id; + +ALTER TABLE table2 MODIFY COLUMN `Value` DEFAULT 'some_string'; --{serverError CANNOT_PARSE_TEXT} + +ALTER TABLE table2 ADD COLUMN `Value2` DEFAULT 'some_string'; --{serverError BAD_ARGUMENTS} + +DROP TABLE IF EXISTS table2; diff --git a/parser/testdata/01523_date_time_compare_with_date_literal/ast.json b/parser/testdata/01523_date_time_compare_with_date_literal/ast.json new file mode 100644 index 000000000..4d69915fe --- /dev/null +++ b/parser/testdata/01523_date_time_compare_with_date_literal/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001349473, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/01523_date_time_compare_with_date_literal/metadata.json b/parser/testdata/01523_date_time_compare_with_date_literal/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01523_date_time_compare_with_date_literal/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01523_date_time_compare_with_date_literal/query.sql b/parser/testdata/01523_date_time_compare_with_date_literal/query.sql new file mode 100644 index 000000000..5a3aa23bc --- /dev/null +++ b/parser/testdata/01523_date_time_compare_with_date_literal/query.sql @@ -0,0 +1,70 @@ +DROP TABLE IF EXISTS test; + +CREATE TABLE test(timestamp DateTime) ENGINE = MergeTree ORDER BY timestamp; + +INSERT INTO test VALUES ('2020-10-15 00:00:00'); +INSERT INTO test VALUES ('2020-10-15 12:00:00'); +INSERT INTO test VALUES ('2020-10-16 00:00:00'); + +SELECT 'DateTime'; +SELECT * FROM test WHERE timestamp != '2020-10-15' ORDER BY timestamp; +SELECT ''; +SELECT * FROM test WHERE timestamp == '2020-10-15' ORDER BY timestamp; +SELECT ''; +SELECT * FROM test WHERE timestamp > '2020-10-15' ORDER BY timestamp; +SELECT ''; +SELECT * FROM test WHERE timestamp >= '2020-10-15' ORDER by timestamp; +SELECT ''; +SELECT * FROM test WHERE timestamp < '2020-10-16' ORDER BY timestamp; +SELECT ''; +SELECT * FROM test WHERE timestamp <= '2020-10-16' ORDER BY timestamp; +SELECT ''; +SELECT ''; +SELECT * FROM test WHERE '2020-10-15' != timestamp ORDER BY timestamp; +SELECT ''; +SELECT * FROM test WHERE '2020-10-15' == timestamp ORDER BY timestamp; +SELECT ''; +SELECT * FROM test WHERE '2020-10-15' < timestamp ORDER BY timestamp; +SELECT ''; +SELECT * FROM test WHERE '2020-10-15' <= timestamp ORDER BY timestamp; +SELECT ''; +SELECT * FROM test WHERE '2020-10-16' > timestamp ORDER BY timestamp; +SELECT ''; +SELECT * FROM test WHERE '2020-10-16' >= timestamp ORDER BY timestamp; +SELECT ''; + +DROP TABLE test; +CREATE TABLE test(timestamp DateTime64) ENGINE = MergeTree ORDER BY timestamp; + +INSERT INTO test VALUES ('2020-10-15 00:00:00'); +INSERT INTO test VALUES ('2020-10-15 12:00:00'); +INSERT INTO test VALUES ('2020-10-16 00:00:00'); + +SELECT 'DateTime64'; +SELECT * FROM test WHERE timestamp != '2020-10-15' ORDER BY timestamp; +SELECT ''; +SELECT * FROM test WHERE timestamp == '2020-10-15' ORDER BY timestamp; +SELECT ''; +SELECT * FROM test WHERE timestamp > '2020-10-15' ORDER BY timestamp; +SELECT ''; +SELECT * FROM test WHERE timestamp >= '2020-10-15' ORDER BY timestamp; +SELECT ''; +SELECT * FROM test WHERE timestamp < '2020-10-16' ORDER BY timestamp; +SELECT ''; +SELECT * FROM test WHERE timestamp <= '2020-10-16' ORDER BY timestamp; +SELECT ''; +SELECT ''; +SELECT * FROM test WHERE '2020-10-15' != timestamp ORDER BY timestamp; +SELECT ''; +SELECT * FROM test WHERE '2020-10-15' == timestamp ORDER BY timestamp; +SELECT ''; +SELECT * FROM test WHERE '2020-10-15' < timestamp ORDER BY timestamp; +SELECT ''; +SELECT * FROM test WHERE '2020-10-15' <= timestamp ORDER BY timestamp; +SELECT ''; +SELECT * FROM test WHERE '2020-10-16' > timestamp ORDER BY timestamp; +SELECT ''; +SELECT * FROM test WHERE '2020-10-16' >= timestamp ORDER BY timestamp; +SELECT ''; + +DROP TABLE test; diff --git a/parser/testdata/01523_interval_operator_support_string_literal/ast.json b/parser/testdata/01523_interval_operator_support_string_literal/ast.json new file mode 100644 index 000000000..e0dc89079 --- /dev/null +++ b/parser/testdata/01523_interval_operator_support_string_literal/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toIntervalYear (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001539652, + "rows_read": 7, + "bytes_read": 267 + } +} diff --git a/parser/testdata/01523_interval_operator_support_string_literal/metadata.json b/parser/testdata/01523_interval_operator_support_string_literal/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01523_interval_operator_support_string_literal/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01523_interval_operator_support_string_literal/query.sql b/parser/testdata/01523_interval_operator_support_string_literal/query.sql new file mode 100644 index 000000000..2af2ba499 --- /dev/null +++ b/parser/testdata/01523_interval_operator_support_string_literal/query.sql @@ -0,0 +1,25 @@ +SELECT INTERVAL 2 year; +SELECT INTERVAL '2' year; +SELECT INTERVAL '2 year'; +SELECT INTERVAL 2 month; +SELECT INTERVAL '2' month; +SELECT INTERVAL '2 month'; +SELECT INTERVAL 2 week; +SELECT INTERVAL '2' week; +SELECT INTERVAL '2 week'; +SELECT INTERVAL 2 day; +SELECT INTERVAL '2' day; +SELECT INTERVAL '2 day'; +SELECT INTERVAL 2 hour; +SELECT INTERVAL '2' hour; +SELECT INTERVAL '2 hour'; +SELECT INTERVAL 2 minute; +SELECT INTERVAL '2' minute; +SELECT INTERVAL '2 minute'; +SELECT INTERVAL '2' AS n minute; +SELECT DATE_ADD(hour, '1', toDateTime(1234567890, 'UTC')); +SELECT DATE_ADD(hour, 1, toDateTime(1234567890, 'UTC')); +SELECT DATE_ADD(hour, (SELECT 1), toDateTime(1234567890, 'UTC')); +SELECT DATE_ADD(toDateTime(1234567890, 'UTC'), INTERVAL 2 day); +SELECT DATE_ADD(toDateTime(1234567890, 'UTC'), INTERVAL '2 day'); +SELECT DATE_ADD(toDateTime(1234567890, 'UTC'), INTERVAL '2' day); diff --git a/parser/testdata/01524_do_not_merge_across_partitions_select_final/ast.json b/parser/testdata/01524_do_not_merge_across_partitions_select_final/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01524_do_not_merge_across_partitions_select_final/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01524_do_not_merge_across_partitions_select_final/metadata.json b/parser/testdata/01524_do_not_merge_across_partitions_select_final/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01524_do_not_merge_across_partitions_select_final/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01524_do_not_merge_across_partitions_select_final/query.sql b/parser/testdata/01524_do_not_merge_across_partitions_select_final/query.sql new file mode 100644 index 000000000..9aeda5824 --- /dev/null +++ b/parser/testdata/01524_do_not_merge_across_partitions_select_final/query.sql @@ -0,0 +1,37 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS select_final; + +SET allow_asynchronous_read_from_io_pool_for_merge_tree = 0; +SET do_not_merge_across_partitions_select_final = 1; +SET max_threads = 16; + +CREATE TABLE select_final (t DateTime, x Int32, string String) ENGINE = ReplacingMergeTree() PARTITION BY toYYYYMM(t) ORDER BY (x, t) SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +INSERT INTO select_final SELECT toDate('2000-01-01'), number, '' FROM numbers(2); +INSERT INTO select_final SELECT toDate('2000-01-01'), number + 1, '' FROM numbers(2); + +INSERT INTO select_final SELECT toDate('2020-01-01'), number, '' FROM numbers(2); +INSERT INTO select_final SELECT toDate('2020-01-01'), number + 1, '' FROM numbers(2); + +SELECT * FROM select_final FINAL ORDER BY x, t; + +TRUNCATE TABLE select_final; + +INSERT INTO select_final SELECT toDate('2000-01-01'), number, '' FROM numbers(2); +INSERT INTO select_final SELECT toDate('2000-01-01'), number, 'updated' FROM numbers(2); + +OPTIMIZE TABLE select_final FINAL; + +INSERT INTO select_final SELECT toDate('2020-01-01'), number, '' FROM numbers(2); +INSERT INTO select_final SELECT toDate('2020-01-01'), number, 'updated' FROM numbers(2); + +SELECT max(x) FROM select_final FINAL where string = 'updated'; + +TRUNCATE TABLE select_final; + +INSERT INTO select_final SELECT toDate('2000-01-01'), number, '' FROM numbers(500000); +OPTIMIZE TABLE select_final FINAL; +SELECT max(x) FROM select_final FINAL; + +DROP TABLE select_final; diff --git a/parser/testdata/01525_select_with_offset_fetch_clause/ast.json b/parser/testdata/01525_select_with_offset_fetch_clause/ast.json new file mode 100644 index 000000000..f251e2b1e --- /dev/null +++ b/parser/testdata/01525_select_with_offset_fetch_clause/ast.json @@ -0,0 +1,70 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 5)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_3" + } + ], + + "rows": 16, + + "statistics": + { + "elapsed": 0.001688342, + "rows_read": 16, + "bytes_read": 592 + } +} diff --git a/parser/testdata/01525_select_with_offset_fetch_clause/metadata.json b/parser/testdata/01525_select_with_offset_fetch_clause/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01525_select_with_offset_fetch_clause/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01525_select_with_offset_fetch_clause/query.sql b/parser/testdata/01525_select_with_offset_fetch_clause/query.sql new file mode 100644 index 000000000..d02a2af66 --- /dev/null +++ b/parser/testdata/01525_select_with_offset_fetch_clause/query.sql @@ -0,0 +1,8 @@ +SELECT number FROM numbers(10) ORDER BY number DESC OFFSET 2 ROWS FETCH NEXT 3 ROWS WITH TIES; + +DROP TABLE IF EXISTS test_fetch; +CREATE TABLE test_fetch(a Int32, b Int32) Engine = Memory; +INSERT INTO test_fetch VALUES(1, 1), (2, 1), (3, 4), (3, 3), (5, 4), (0, 6), (5, 7); +SELECT * FROM (SELECT * FROM test_fetch ORDER BY a, b OFFSET 1 ROW FETCH FIRST 3 ROWS ONLY) ORDER BY a, b; +SELECT * FROM (SELECT * FROM test_fetch ORDER BY a OFFSET 1 ROW FETCH FIRST 3 ROWS WITH TIES) ORDER BY a, b; +DROP TABLE test_fetch; diff --git a/parser/testdata/01526_alter_add_and_modify_order_zookeeper/ast.json b/parser/testdata/01526_alter_add_and_modify_order_zookeeper/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01526_alter_add_and_modify_order_zookeeper/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01526_alter_add_and_modify_order_zookeeper/metadata.json b/parser/testdata/01526_alter_add_and_modify_order_zookeeper/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01526_alter_add_and_modify_order_zookeeper/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01526_alter_add_and_modify_order_zookeeper/query.sql b/parser/testdata/01526_alter_add_and_modify_order_zookeeper/query.sql new file mode 100644 index 000000000..24f43a89c --- /dev/null +++ b/parser/testdata/01526_alter_add_and_modify_order_zookeeper/query.sql @@ -0,0 +1,54 @@ +-- Tags: zookeeper + +DROP TABLE IF EXISTS table_for_alter; + +SET replication_alter_partitions_sync = 2; + +CREATE TABLE table_for_alter +( + `d` Date, + `a` String, + `b` UInt8, + `x` String, + `y` Int8, + `version` UInt64, + `sign` Int8 DEFAULT 1 +) +ENGINE = ReplicatedVersionedCollapsingMergeTree('/clickhouse/tables/{database}/01526_alter_add/t1', '1', sign, version) +PARTITION BY y +ORDER BY d +SETTINGS index_granularity = 8192; + +INSERT INTO table_for_alter VALUES(toDate('2019-10-01'), 'a', 1, 'aa', 1, 1, 1); + +DETACH TABLE table_for_alter; + +ATTACH TABLE table_for_alter; + + +SELECT * FROM table_for_alter; + +ALTER TABLE table_for_alter ADD COLUMN order UInt32, MODIFY ORDER BY (d, order); + + +DETACH TABLE table_for_alter; + +ATTACH TABLE table_for_alter; + +SELECT * FROM table_for_alter; + +SHOW CREATE TABLE table_for_alter; + +ALTER TABLE table_for_alter ADD COLUMN datum UInt32, MODIFY ORDER BY (d, order, datum); + +INSERT INTO table_for_alter VALUES(toDate('2019-10-02'), 'b', 2, 'bb', 2, 2, 2, 1, 2); + +SELECT * FROM table_for_alter ORDER BY d; + +SHOW CREATE TABLE table_for_alter; + +DETACH TABLE table_for_alter; + +ATTACH TABLE table_for_alter; + +DROP TABLE IF EXISTS table_for_alter; diff --git a/parser/testdata/01526_complex_key_dict_direct_layout/ast.json b/parser/testdata/01526_complex_key_dict_direct_layout/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01526_complex_key_dict_direct_layout/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01526_complex_key_dict_direct_layout/metadata.json b/parser/testdata/01526_complex_key_dict_direct_layout/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01526_complex_key_dict_direct_layout/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01526_complex_key_dict_direct_layout/query.sql b/parser/testdata/01526_complex_key_dict_direct_layout/query.sql new file mode 100644 index 000000000..f6065516a --- /dev/null +++ b/parser/testdata/01526_complex_key_dict_direct_layout/query.sql @@ -0,0 +1,34 @@ +-- Tags: no-parallel + +DROP DATABASE IF EXISTS db_01526; + +CREATE DATABASE db_01526; + + +CREATE TABLE db_01526.table_for_dict1 +( + key_column UInt64, + second_column UInt64, + third_column String +) +ENGINE = MergeTree() +ORDER BY (key_column, second_column); + +INSERT INTO db_01526.table_for_dict1 VALUES (1, 2, 'aaa'), (1, 3, 'bbb'), (2, 3, 'ccc'); + +CREATE DICTIONARY db_01526.dict1 +( + key_column UInt64 DEFAULT 0, + second_column UInt64 DEFAULT 0, + third_column String DEFAULT 'qqq' +) +PRIMARY KEY key_column, second_column +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'table_for_dict1' PASSWORD '' DB 'db_01526')) +LAYOUT(COMPLEX_KEY_DIRECT()); + +SELECT dictGet('db_01526.dict1', 'third_column', (number, number + 1)) FROM numbers(4); +SELECT dictHas('db_01526.dict1', (toUInt64(1), toUInt64(3))); + +DROP DICTIONARY db_01526.dict1; +DROP TABLE db_01526.table_for_dict1; +DROP DATABASE db_01526; diff --git a/parser/testdata/01527_bad_aggregation_in_lambda/ast.json b/parser/testdata/01527_bad_aggregation_in_lambda/ast.json new file mode 100644 index 000000000..16896e907 --- /dev/null +++ b/parser/testdata/01527_bad_aggregation_in_lambda/ast.json @@ -0,0 +1,82 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayMap (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function lambda (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function multiply (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function sum (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function range (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 20, + + "statistics": + { + "elapsed": 0.001082316, + "rows_read": 20, + "bytes_read": 789 + } +} diff --git a/parser/testdata/01527_bad_aggregation_in_lambda/metadata.json b/parser/testdata/01527_bad_aggregation_in_lambda/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01527_bad_aggregation_in_lambda/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01527_bad_aggregation_in_lambda/query.sql b/parser/testdata/01527_bad_aggregation_in_lambda/query.sql new file mode 100644 index 000000000..c1b86fdfc --- /dev/null +++ b/parser/testdata/01527_bad_aggregation_in_lambda/query.sql @@ -0,0 +1 @@ +SELECT arrayMap(x -> x * sum(x), range(10)); -- { serverError NOT_FOUND_COLUMN_IN_BLOCK, 47 } diff --git a/parser/testdata/01527_dist_sharding_key_dictGet_reload/ast.json b/parser/testdata/01527_dist_sharding_key_dictGet_reload/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01527_dist_sharding_key_dictGet_reload/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01527_dist_sharding_key_dictGet_reload/metadata.json b/parser/testdata/01527_dist_sharding_key_dictGet_reload/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01527_dist_sharding_key_dictGet_reload/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01527_dist_sharding_key_dictGet_reload/query.sql b/parser/testdata/01527_dist_sharding_key_dictGet_reload/query.sql new file mode 100644 index 000000000..03f4f758e --- /dev/null +++ b/parser/testdata/01527_dist_sharding_key_dictGet_reload/query.sql @@ -0,0 +1,35 @@ +-- Tags: shard, no-parallel + +set allow_nondeterministic_optimize_skip_unused_shards=1; +set optimize_skip_unused_shards=1; +set force_optimize_skip_unused_shards=2; +set check_table_dependencies=0; + +drop database if exists db_01527_ranges; +drop table if exists dist_01527; +drop table if exists data_01527; + +create database db_01527_ranges; + +create table data_01527 engine=Memory() as select toUInt64(number) key from numbers(2); +create table dist_01527 as data_01527 engine=Distributed('test_cluster_two_shards', currentDatabase(), data_01527, dictGetUInt64('db_01527_ranges.dict', 'shard', key)); + +create table db_01527_ranges.data engine=Memory() as select number key, number shard from numbers(100); +create dictionary db_01527_ranges.dict (key UInt64, shard UInt64) primary key key source(clickhouse(host '127.0.0.1' port tcpPort() table 'data' db 'db_01527_ranges' user 'default' password '')) lifetime(0) layout(hashed()); +system reload dictionary db_01527_ranges.dict; + +select _shard_num from dist_01527 where key=0; +select _shard_num from dist_01527 where key=1; + +drop table db_01527_ranges.data sync; +create table db_01527_ranges.data engine=Memory() as select number key, number+1 shard from numbers(100); +system reload dictionary db_01527_ranges.dict; + +select _shard_num from dist_01527 where key=0; +select _shard_num from dist_01527 where key=1; + +drop table data_01527; +drop table dist_01527; +drop table db_01527_ranges.data; +drop dictionary db_01527_ranges.dict; +drop database db_01527_ranges; diff --git a/parser/testdata/01527_materialized_view_stack_overflow/ast.json b/parser/testdata/01527_materialized_view_stack_overflow/ast.json new file mode 100644 index 000000000..db0a2c30b --- /dev/null +++ b/parser/testdata/01527_materialized_view_stack_overflow/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001211065, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01527_materialized_view_stack_overflow/metadata.json b/parser/testdata/01527_materialized_view_stack_overflow/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01527_materialized_view_stack_overflow/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01527_materialized_view_stack_overflow/query.sql b/parser/testdata/01527_materialized_view_stack_overflow/query.sql new file mode 100644 index 000000000..66bba99ca --- /dev/null +++ b/parser/testdata/01527_materialized_view_stack_overflow/query.sql @@ -0,0 +1,30 @@ +SET allow_materialized_view_with_bad_select = 1; + +DROP TABLE IF EXISTS t; +DROP TABLE IF EXISTS v; + +CREATE TABLE t (c String) ENGINE = Memory; + +CREATE MATERIALIZED VIEW v to v AS SELECT c FROM t; -- { serverError BAD_ARGUMENTS } +CREATE MATERIALIZED VIEW v to t AS SELECT * FROM v; -- { serverError UNKNOWN_TABLE } + +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS v1; +DROP TABLE IF EXISTS v2; + +CREATE TABLE t1 (c String) ENGINE = Memory; +CREATE TABLE t2 (c String) ENGINE = Memory; + +CREATE MATERIALIZED VIEW v1 to t1 AS SELECT * FROM t2; +CREATE MATERIALIZED VIEW v2 to t2 AS SELECT * FROM t1; + +INSERT INTO t1 VALUES ('Hello'); -- { serverError TOO_DEEP_RECURSION } +INSERT INTO t2 VALUES ('World'); -- { serverError TOO_DEEP_RECURSION } + +DROP TABLE IF EXISTS t; +DROP TABLE IF EXISTS v; +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS v1; +DROP TABLE IF EXISTS v2; diff --git a/parser/testdata/01528_allow_nondeterministic_optimize_skip_unused_shards/ast.json b/parser/testdata/01528_allow_nondeterministic_optimize_skip_unused_shards/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01528_allow_nondeterministic_optimize_skip_unused_shards/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01528_allow_nondeterministic_optimize_skip_unused_shards/metadata.json b/parser/testdata/01528_allow_nondeterministic_optimize_skip_unused_shards/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01528_allow_nondeterministic_optimize_skip_unused_shards/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01528_allow_nondeterministic_optimize_skip_unused_shards/query.sql b/parser/testdata/01528_allow_nondeterministic_optimize_skip_unused_shards/query.sql new file mode 100644 index 000000000..534c6b44a --- /dev/null +++ b/parser/testdata/01528_allow_nondeterministic_optimize_skip_unused_shards/query.sql @@ -0,0 +1,11 @@ +-- Tags: shard + +drop table if exists dist_01528; +create table dist_01528 as system.one engine=Distributed('test_cluster_two_shards', system, one, rand()+dummy); + +set optimize_skip_unused_shards=1; +set force_optimize_skip_unused_shards=1; +select * from dist_01528 where dummy = 2; -- { serverError UNABLE_TO_SKIP_UNUSED_SHARDS } +select * from dist_01528 where dummy = 2 settings allow_nondeterministic_optimize_skip_unused_shards=1; + +drop table dist_01528; diff --git a/parser/testdata/01528_setting_aggregate_functions_null_for_empty/ast.json b/parser/testdata/01528_setting_aggregate_functions_null_for_empty/ast.json new file mode 100644 index 000000000..0641312fe --- /dev/null +++ b/parser/testdata/01528_setting_aggregate_functions_null_for_empty/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery defaults (children 1)" + }, + { + "explain": " Identifier defaults" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001367139, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/01528_setting_aggregate_functions_null_for_empty/metadata.json b/parser/testdata/01528_setting_aggregate_functions_null_for_empty/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01528_setting_aggregate_functions_null_for_empty/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01528_setting_aggregate_functions_null_for_empty/query.sql b/parser/testdata/01528_setting_aggregate_functions_null_for_empty/query.sql new file mode 100644 index 000000000..b57a492e3 --- /dev/null +++ b/parser/testdata/01528_setting_aggregate_functions_null_for_empty/query.sql @@ -0,0 +1,39 @@ +DROP TABLE IF EXISTS defaults; + +CREATE TABLE defaults +( + n Int8 +)ENGINE = Memory(); + +SELECT sum(n) FROM defaults; +SELECT sumOrNull(n) FROM defaults; +SELECT count(n) FROM defaults; +SELECT countOrNull(n) FROM defaults; + +SET aggregate_functions_null_for_empty=1; + +SELECT sum(n) FROM defaults; +SELECT sumOrNull(n) FROM defaults; +SELECT count(n) FROM defaults; +SELECT countOrNull(n) FROM defaults; + +INSERT INTO defaults SELECT * FROM numbers(10); + +SET aggregate_functions_null_for_empty=0; + +SELECT sum(n) FROM defaults; +SELECT sumOrNull(n) FROM defaults; +SELECT count(n) FROM defaults; +SELECT countOrNull(n) FROM defaults; + +SET aggregate_functions_null_for_empty=1; + +SELECT sum(n) FROM defaults; +SELECT sumOrNull(n) FROM defaults; +SELECT count(n) FROM defaults; +SELECT countOrNull(n) FROM defaults; + + +EXPLAIN SYNTAX SELECT sumIf(1, number > 0) FROM numbers(10) WHERE 0; + +DROP TABLE defaults; diff --git a/parser/testdata/01528_to_uuid_or_null_or_zero/ast.json b/parser/testdata/01528_to_uuid_or_null_or_zero/ast.json new file mode 100644 index 000000000..2e01525b9 --- /dev/null +++ b/parser/testdata/01528_to_uuid_or_null_or_zero/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery to_uuid_test (children 1)" + }, + { + "explain": " Identifier to_uuid_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001163141, + "rows_read": 2, + "bytes_read": 76 + } +} diff --git a/parser/testdata/01528_to_uuid_or_null_or_zero/metadata.json b/parser/testdata/01528_to_uuid_or_null_or_zero/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01528_to_uuid_or_null_or_zero/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01528_to_uuid_or_null_or_zero/query.sql b/parser/testdata/01528_to_uuid_or_null_or_zero/query.sql new file mode 100644 index 000000000..2b9b6c30c --- /dev/null +++ b/parser/testdata/01528_to_uuid_or_null_or_zero/query.sql @@ -0,0 +1,22 @@ +DROP TABLE IF EXISTS to_uuid_test; + +SELECT toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0'); +SELECT toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0T'); --{serverError CANNOT_PARSE_TEXT} +SELECT toUUIDOrNull('61f0c404-5cb3-11e7-907b-a6006ad3dba0T'); +SELECT toUUIDOrZero('59f0c404-5cb3-11e7-907b-a6006ad3dba0T'); + +CREATE TABLE to_uuid_test (value String) ENGINE = TinyLog(); + +INSERT INTO to_uuid_test VALUES ('61f0c404-5cb3-11e7-907b-a6006ad3dba0'); +SELECT toUUID(value) FROM to_uuid_test; + +INSERT INTO to_uuid_test VALUES ('61f0c404-5cb3-11e7-907b-a6006ad3dba0T'); +-- If the Memory engine is replaced by MergeTree, this query returns a result for the first row +-- but throws an error while processing the second row. +-- ORDER BY ALL ensures consistent results between engines. +SELECT toUUID(value) FROM to_uuid_test ORDER BY ALL; -- {serverError CANNOT_PARSE_TEXT} +SELECT toUUIDOrNull(value) FROM to_uuid_test ORDER BY ALL; +SELECT toUUIDOrZero(value) FROM to_uuid_test ORDER BY ALL; + +DROP TABLE to_uuid_test; + diff --git a/parser/testdata/01529_union_distinct_and_setting_union_default_mode/ast.json b/parser/testdata/01529_union_distinct_and_setting_union_default_mode/ast.json new file mode 100644 index 000000000..37b1d0e2a --- /dev/null +++ b/parser/testdata/01529_union_distinct_and_setting_union_default_mode/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001471682, + "rows_read": 5, + "bytes_read": 177 + } +} diff --git a/parser/testdata/01529_union_distinct_and_setting_union_default_mode/metadata.json b/parser/testdata/01529_union_distinct_and_setting_union_default_mode/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01529_union_distinct_and_setting_union_default_mode/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01529_union_distinct_and_setting_union_default_mode/query.sql b/parser/testdata/01529_union_distinct_and_setting_union_default_mode/query.sql new file mode 100644 index 000000000..6f2fe847f --- /dev/null +++ b/parser/testdata/01529_union_distinct_and_setting_union_default_mode/query.sql @@ -0,0 +1,61 @@ +SELECT 1; + +SET union_default_mode='DISTINCT'; + +(((((((SELECT 1) UNION SELECT 1) UNION SELECT 1) UNION SELECT 1) UNION SELECT 1) UNION SELECT 1) UNION SELECT 1) UNION SELECT 1; + +(((((((SELECT 1) UNION ALL SELECT 1) UNION ALL SELECT 1) UNION ALL SELECT 1) UNION ALL SELECT 1) UNION ALL SELECT 1) UNION ALL SELECT 1) UNION ALL SELECT 1; + +SELECT 'a' UNION ALL SELECT 'a' UNION ALL SELECT 'a' UNION SELECT 'a'; + +SELECT 'a' UNION ALL (SELECT 'a' UNION ALL SELECT 'a' UNION SELECT 'a'); + +SELECT 'a' UNION SELECT 'a' UNION SELECT 'a' UNION ALL SELECT'a'; + +SELECT 1 UNION SELECT 1 UNION SELECT 1 UNION ALL SELECT 1; + +SELECT 'all' UNION SELECT 'all' UNION ALL SELECT 'all'; + +SELECT 1 UNION SELECT 1 UNION SELECT 1 UNION DISTINCT SELECT 1; + +SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION SELECT 1 UNION ALL SELECT 1; + +SELECT 1 UNION (SELECT 1 UNION ALL SELECT 1 UNION SELECT 1 UNION DISTINCT SELECT 1 UNION ALL SELECT 1); + +SELECT 1 UNION ALL (SELECT 1 UNION ALL SELECT 1 UNION SELECT 1 UNION DISTINCT SELECT 1 UNION ALL SELECT 1); + +SELECT 1 UNION (SELECT 1 UNION (SELECT 1 UNION (SELECT 1 UNION (SELECT 1 UNION (SELECT 1 UNION (SELECT 1 UNION (SELECT 1))))))); + +SELECT 1 UNION ALL (SELECT 1 UNION ALL (SELECT 1 UNION ALL (SELECT 1 UNION ALL (SELECT 1 UNION ALL (SELECT 1 UNION ALL (SELECT 1 UNION ALL (SELECT 1 UNION ALL SELECT 1))))))); + +SELECT * FROM (SELECT 1 UNION ALL (SELECT 1 UNION SELECT 1 UNION ALL (SELECT 1 UNION ALL SELECT 1))); + +SET union_default_mode='ALL'; + +(((((((SELECT 1) UNION SELECT 1) UNION SELECT 1) UNION SELECT 1) UNION SELECT 1) UNION SELECT 1) UNION SELECT 1) UNION SELECT 1; + +(((((((SELECT 1) UNION ALL SELECT 1) UNION ALL SELECT 1) UNION ALL SELECT 1) UNION ALL SELECT 1) UNION ALL SELECT 1) UNION ALL SELECT 1) UNION ALL SELECT 1; + +SELECT 'a' UNION ALL SELECT 'a' UNION ALL SELECT 'a' UNION SELECT 'a'; + +SELECT 'a' UNION ALL (SELECT 'a' UNION ALL SELECT 'a' UNION SELECT 'a'); + +SELECT 'a' UNION SELECT 'a' UNION SELECT 'a' UNION ALL SELECT'a'; + +SELECT 1 UNION SELECT 1 UNION SELECT 1 UNION ALL SELECT 1; + +SELECT 'all' UNION SELECT 'all' UNION ALL SELECT 'all'; + +SELECT 1 UNION SELECT 1 UNION SELECT 1 UNION DISTINCT SELECT 1; + +SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1 UNION SELECT 1 UNION ALL SELECT 1; + +SELECT 1 UNION (SELECT 1 UNION ALL SELECT 1 UNION SELECT 1 UNION DISTINCT SELECT 1 UNION ALL SELECT 1); + +SELECT 1 UNION ALL (SELECT 1 UNION ALL SELECT 1 UNION SELECT 1 UNION DISTINCT SELECT 1 UNION ALL SELECT 1); + +SELECT 1 UNION (SELECT 1 UNION (SELECT 1 UNION (SELECT 1 UNION (SELECT 1 UNION (SELECT 1 UNION (SELECT 1 UNION (SELECT 1))))))); + +SELECT 1 UNION ALL (SELECT 1 UNION ALL (SELECT 1 UNION ALL (SELECT 1 UNION ALL (SELECT 1 UNION ALL (SELECT 1 UNION ALL(SELECT 1 UNION ALL (SELECT 1 UNION ALL SELECT 1))))))); + +SELECT * FROM (SELECT 1 UNION ALL (SELECT 1 UNION SELECT 1 UNION ALL (SELECT 1 UNION ALL SELECT 1))); diff --git a/parser/testdata/01530_drop_database_atomic_sync/ast.json b/parser/testdata/01530_drop_database_atomic_sync/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01530_drop_database_atomic_sync/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01530_drop_database_atomic_sync/metadata.json b/parser/testdata/01530_drop_database_atomic_sync/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01530_drop_database_atomic_sync/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01530_drop_database_atomic_sync/query.sql b/parser/testdata/01530_drop_database_atomic_sync/query.sql new file mode 100644 index 000000000..6cc0eac43 --- /dev/null +++ b/parser/testdata/01530_drop_database_atomic_sync/query.sql @@ -0,0 +1,37 @@ +-- Tags: no-parallel +-- Tag no-parallel: creates database + +drop database if exists db_01530_atomic sync; + +create database db_01530_atomic Engine=Atomic; +create table db_01530_atomic.data (key Int) Engine=ReplicatedMergeTree('/clickhouse/tables/{database}/db_01530_atomic/data', 'test') order by key; +drop database db_01530_atomic sync; + +create database db_01530_atomic Engine=Atomic; +create table db_01530_atomic.data (key Int) Engine=ReplicatedMergeTree('/clickhouse/tables/{database}/db_01530_atomic/data', 'test') order by key; +drop database db_01530_atomic sync; + + +set database_atomic_wait_for_drop_and_detach_synchronously=1; + +create database db_01530_atomic Engine=Atomic; +create table db_01530_atomic.data (key Int) Engine=ReplicatedMergeTree('/clickhouse/tables/{database}/db_01530_atomic/data', 'test') order by key; +drop database db_01530_atomic; + +create database db_01530_atomic Engine=Atomic; +create table db_01530_atomic.data (key Int) Engine=ReplicatedMergeTree('/clickhouse/tables/{database}/db_01530_atomic/data', 'test') order by key; +drop database db_01530_atomic; + + +set database_atomic_wait_for_drop_and_detach_synchronously=0; + +create database db_01530_atomic Engine=Atomic; +create table db_01530_atomic.data (key Int) Engine=ReplicatedMergeTree('/clickhouse/tables/{database}/db_01530_atomic/data', 'test') order by key; +drop database db_01530_atomic; + +create database db_01530_atomic Engine=Atomic; +create table db_01530_atomic.data (key Int) Engine=ReplicatedMergeTree('/clickhouse/tables/{database}/db_01530_atomic/data', 'test') order by key; -- { serverError REPLICA_ALREADY_EXISTS } + +set database_atomic_wait_for_drop_and_detach_synchronously=1; + +drop database db_01530_atomic sync; diff --git a/parser/testdata/01531_query_log_query_comment/ast.json b/parser/testdata/01531_query_log_query_comment/ast.json new file mode 100644 index 000000000..c72568ef2 --- /dev/null +++ b/parser/testdata/01531_query_log_query_comment/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000944408, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01531_query_log_query_comment/metadata.json b/parser/testdata/01531_query_log_query_comment/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01531_query_log_query_comment/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01531_query_log_query_comment/query.sql b/parser/testdata/01531_query_log_query_comment/query.sql new file mode 100644 index 000000000..eebaa6171 --- /dev/null +++ b/parser/testdata/01531_query_log_query_comment/query.sql @@ -0,0 +1,20 @@ +set log_queries=1; +set log_queries_min_type='QUERY_FINISH'; + +set enable_global_with_statement=0; +select /* test=01531, enable_global_with_statement=0 */ 2; +system flush logs query_log; +select count() from system.query_log +where event_date >= yesterday() + and query like 'select /* test=01531, enable_global_with_statement=0 */ 2%' + and current_database = currentDatabase() + ; + +set enable_global_with_statement=1; +select /* test=01531, enable_global_with_statement=1 */ 2; +system flush logs query_log; +select count() from system.query_log +where event_date >= yesterday() + and query like 'select /* test=01531, enable_global_with_statement=1 */ 2%' + and current_database = currentDatabase() + ; diff --git a/parser/testdata/01532_collate_in_low_cardinality/ast.json b/parser/testdata/01532_collate_in_low_cardinality/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01532_collate_in_low_cardinality/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01532_collate_in_low_cardinality/metadata.json b/parser/testdata/01532_collate_in_low_cardinality/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01532_collate_in_low_cardinality/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01532_collate_in_low_cardinality/query.sql b/parser/testdata/01532_collate_in_low_cardinality/query.sql new file mode 100644 index 000000000..ace362362 --- /dev/null +++ b/parser/testdata/01532_collate_in_low_cardinality/query.sql @@ -0,0 +1,35 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS test_collate; +DROP TABLE IF EXISTS test_collate_null; + +CREATE TABLE test_collate (x UInt32, s LowCardinality(String)) ENGINE=Memory(); +CREATE TABLE test_collate_null (x UInt32, s LowCardinality(Nullable(String))) ENGINE=Memory(); + +INSERT INTO test_collate VALUES (1, 'Ё'), (1, 'ё'), (1, 'а'), (2, 'А'), (2, 'я'), (2, 'Я'); +INSERT INTO test_collate_null VALUES (1, 'Ё'), (1, 'ё'), (1, 'а'), (2, 'А'), (2, 'я'), (2, 'Я'), (1, null), (2, null); + + +SELECT 'Order by without collate'; +SELECT * FROM test_collate ORDER BY s; +SELECT 'Order by with collate'; +SELECT * FROM test_collate ORDER BY s COLLATE 'ru'; + +SELECT 'Order by tuple without collate'; +SELECT * FROM test_collate ORDER BY x, s; +SELECT 'Order by tuple with collate'; +SELECT * FROM test_collate ORDER BY x, s COLLATE 'ru'; + +SELECT 'Order by without collate'; +SELECT * FROM test_collate_null ORDER BY s; +SELECT 'Order by with collate'; +SELECT * FROM test_collate_null ORDER BY s COLLATE 'ru'; + +SELECT 'Order by tuple without collate'; +SELECT * FROM test_collate_null ORDER BY x, s; +SELECT 'Order by tuple with collate'; +SELECT * FROM test_collate_null ORDER BY x, s COLLATE 'ru'; + + +DROP TABLE test_collate; +DROP TABLE test_collate_null; diff --git a/parser/testdata/01532_execute_merges_on_single_replica_long/ast.json b/parser/testdata/01532_execute_merges_on_single_replica_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01532_execute_merges_on_single_replica_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01532_execute_merges_on_single_replica_long/metadata.json b/parser/testdata/01532_execute_merges_on_single_replica_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01532_execute_merges_on_single_replica_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01532_execute_merges_on_single_replica_long/query.sql b/parser/testdata/01532_execute_merges_on_single_replica_long/query.sql new file mode 100644 index 000000000..d84a29a4e --- /dev/null +++ b/parser/testdata/01532_execute_merges_on_single_replica_long/query.sql @@ -0,0 +1,133 @@ +-- Tags: long, replica, no-replicated-database, no-parallel, no-object-storage +-- Tag no-replicated-database: Fails due to additional replicas or shards +-- Tag no-parallel: static zk path + +DROP TABLE IF EXISTS execute_on_single_replica_r1 SYNC; +DROP TABLE IF EXISTS execute_on_single_replica_r2 SYNC; + +/* that test requires fixed zookeeper path, so we cannot use ReplicatedMergeTree({database}) */ +CREATE TABLE execute_on_single_replica_r1 (x UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/test_01532/execute_on_single_replica', 'r1') ORDER BY tuple() SETTINGS execute_merges_on_single_replica_time_threshold=10; +CREATE TABLE execute_on_single_replica_r2 (x UInt64) ENGINE=ReplicatedMergeTree('/clickhouse/tables/test_01532/execute_on_single_replica', 'r2') ORDER BY tuple() SETTINGS execute_merges_on_single_replica_time_threshold=10; + +INSERT INTO execute_on_single_replica_r1 SETTINGS insert_keeper_fault_injection_probability=0 VALUES (1); +SYSTEM SYNC REPLICA execute_on_single_replica_r2; + +SET optimize_throw_if_noop=1; + +SELECT '****************************'; +SELECT '*** emulate normal feature operation - merges are distributed between replicas'; + +/* all_0_0_1 - will be merged by r1, and downloaded by r2 */ +OPTIMIZE TABLE execute_on_single_replica_r1 FINAL; +SYSTEM SYNC REPLICA execute_on_single_replica_r2; + +/* all_0_0_2 - will be merged by r1, and downloaded by r2 */ +OPTIMIZE TABLE execute_on_single_replica_r2 FINAL; +SYSTEM SYNC REPLICA execute_on_single_replica_r1; + +/* all_0_0_3 - will be merged by r2, and downloaded by r1 */ +OPTIMIZE TABLE execute_on_single_replica_r1 FINAL; +SYSTEM SYNC REPLICA execute_on_single_replica_r2; + +/* all_0_0_4 - will be merged by r2, and downloaded by r1 */ +OPTIMIZE TABLE execute_on_single_replica_r2 FINAL; +SYSTEM SYNC REPLICA execute_on_single_replica_r1; + +SELECT '****************************'; +SELECT '*** emulate execute_merges_on_single_replica_time_threshold timeout'; + +SYSTEM STOP REPLICATION QUEUES execute_on_single_replica_r2; + +/* all_0_0_5 - should be merged by r2, but it has replication queue stopped, so r1 do the merge */ +OPTIMIZE TABLE execute_on_single_replica_r1 FINAL SETTINGS replication_alter_partitions_sync=0; + +/* if we will check immediately we can find the log entry unchecked */ +SET function_sleep_max_microseconds_per_block = 10000000; +SELECT * FROM numbers(4) where sleepEachRow(1); + +SELECT '****************************'; +SELECT '*** timeout not exceeded, r1 waits for r2'; + +/* we can now check that r1 waits for r2 */ +SELECT + table, + type, + new_part_name, + num_postponed > 0 AS has_postpones, + postpone_reason +FROM system.replication_queue +WHERE table LIKE 'execute\\_on\\_single\\_replica\\_r%' +AND database = currentDatabase() +ORDER BY table +FORMAT Vertical; + +/* we have execute_merges_on_single_replica_time_threshold exceeded */ +SELECT * FROM numbers(10) where sleepEachRow(1); + +SELECT '****************************'; +SELECT '*** timeout exceeded, r1 failed to get the merged part from r2 and did the merge by its own'; + +SELECT + table, + type, + new_part_name, + num_postponed > 0 AS has_postpones, + postpone_reason +FROM system.replication_queue +WHERE table LIKE 'execute\\_on\\_single\\_replica\\_r%' +AND database = currentDatabase() +ORDER BY table +FORMAT Vertical; + +SYSTEM START REPLICATION QUEUES execute_on_single_replica_r2; +SYSTEM SYNC REPLICA execute_on_single_replica_r2; + +SELECT '****************************'; +SELECT '*** queue unfreeze'; + +SELECT + table, + type, + new_part_name, + num_postponed > 0 AS has_postpones, + postpone_reason +FROM system.replication_queue +WHERE table LIKE 'execute\\_on\\_single\\_replica\\_r%' +AND database = currentDatabase() +ORDER BY table +FORMAT Vertical; + +SELECT '****************************'; +SELECT '*** disable the feature'; + +ALTER TABLE execute_on_single_replica_r1 MODIFY SETTING execute_merges_on_single_replica_time_threshold=0; +ALTER TABLE execute_on_single_replica_r2 MODIFY SETTING execute_merges_on_single_replica_time_threshold=0; + +SET replication_alter_partitions_sync=2; +/* all_0_0_6 - we disabled the feature, both replicas will merge */ +OPTIMIZE TABLE execute_on_single_replica_r2 FINAL; +/* all_0_0_7 - same */ +OPTIMIZE TABLE execute_on_single_replica_r1 FINAL; + +SYSTEM SYNC REPLICA execute_on_single_replica_r1; +SYSTEM SYNC REPLICA execute_on_single_replica_r2; + +SYSTEM FLUSH LOGS part_log; + +SELECT '****************************'; +SELECT '*** part_log'; +SELECT + part_name, + arraySort(groupArrayIf(table, event_type = 'MergeParts')) AS mergers, + arraySort(groupArrayIf(table, event_type = 'DownloadPart')) AS fetchers +FROM system.part_log +WHERE (event_time > (now() - 120)) + AND (table LIKE 'execute\\_on\\_single\\_replica\\_r%') + AND (part_name NOT LIKE '%\\_0') + AND (database = currentDatabase()) +GROUP BY part_name +ORDER BY part_name +FORMAT Vertical; + +DROP TABLE execute_on_single_replica_r1 SYNC; +DROP TABLE execute_on_single_replica_r2 SYNC; diff --git a/parser/testdata/01532_having_with_totals/ast.json b/parser/testdata/01532_having_with_totals/ast.json new file mode 100644 index 000000000..9c127db56 --- /dev/null +++ b/parser/testdata/01532_having_with_totals/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery local_t (children 1)" + }, + { + "explain": " Identifier local_t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001196551, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/01532_having_with_totals/metadata.json b/parser/testdata/01532_having_with_totals/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01532_having_with_totals/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01532_having_with_totals/query.sql b/parser/testdata/01532_having_with_totals/query.sql new file mode 100644 index 000000000..290799c13 --- /dev/null +++ b/parser/testdata/01532_having_with_totals/query.sql @@ -0,0 +1,45 @@ +drop table if exists local_t; +create table local_t engine Log as select 1 a; + +SELECT '127.0.0.{1,2}'; +SELECT * +FROM +( + SELECT a + FROM remote('127.0.0.{1,2}', currentDatabase(), local_t) + GROUP BY a + WITH TOTALS +) +WHERE a IN +( + SELECT 1 +); + +SELECT '127.0.0.1'; +SELECT * +FROM +( + SELECT a + FROM remote('127.0.0.1', currentDatabase(), local_t) + GROUP BY a + WITH TOTALS +) +WHERE a IN +( + SELECT 1 +); + +SELECT 'with explicit having'; +SELECT + a, + count() +FROM remote('127.0.0.{1,2}', currentDatabase(), local_t) +GROUP BY a + WITH TOTALS +HAVING a IN +( + SELECT 1 +); + + +drop table if exists local_t; \ No newline at end of file diff --git a/parser/testdata/01532_min_max_with_modifiers/ast.json b/parser/testdata/01532_min_max_with_modifiers/ast.json new file mode 100644 index 000000000..2317a2c14 --- /dev/null +++ b/parser/testdata/01532_min_max_with_modifiers/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'totals'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001052014, + "rows_read": 5, + "bytes_read": 177 + } +} diff --git a/parser/testdata/01532_min_max_with_modifiers/metadata.json b/parser/testdata/01532_min_max_with_modifiers/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01532_min_max_with_modifiers/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01532_min_max_with_modifiers/query.sql b/parser/testdata/01532_min_max_with_modifiers/query.sql new file mode 100644 index 000000000..364b110d8 --- /dev/null +++ b/parser/testdata/01532_min_max_with_modifiers/query.sql @@ -0,0 +1,18 @@ +SELECT 'totals'; +SELECT number % 3 + 1 AS n, min(n), max(n) FROM numbers(100) GROUP BY n WITH TOTALS; +SELECT 'rollup'; +SELECT number % 3 + 1 AS n, min(n), max(n) FROM numbers(100) GROUP BY n WITH ROLLUP; +SELECT 'cube'; +SELECT number % 3 + 1 AS n, min(n), max(n) FROM numbers(100) GROUP BY n WITH CUBE; +SELECT '======='; + +SELECT + x, + min(x) AS lower, + max(x) + 1 AS upper, + upper - lower AS range +FROM +( + SELECT arrayJoin([1, 2]) AS x +) +GROUP BY x WITH ROLLUP; diff --git a/parser/testdata/01532_primary_key_without_order_by_zookeeper/ast.json b/parser/testdata/01532_primary_key_without_order_by_zookeeper/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01532_primary_key_without_order_by_zookeeper/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01532_primary_key_without_order_by_zookeeper/metadata.json b/parser/testdata/01532_primary_key_without_order_by_zookeeper/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01532_primary_key_without_order_by_zookeeper/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01532_primary_key_without_order_by_zookeeper/query.sql b/parser/testdata/01532_primary_key_without_order_by_zookeeper/query.sql new file mode 100644 index 000000000..b55b48713 --- /dev/null +++ b/parser/testdata/01532_primary_key_without_order_by_zookeeper/query.sql @@ -0,0 +1,102 @@ +-- Tags: zookeeper + +DROP TABLE IF EXISTS merge_tree_pk SYNC; + +CREATE TABLE merge_tree_pk +( + key UInt64, + value String +) +ENGINE = ReplacingMergeTree() +PRIMARY KEY key; + +SHOW CREATE TABLE merge_tree_pk; + +INSERT INTO merge_tree_pk VALUES (1, 'a'); +INSERT INTO merge_tree_pk VALUES (2, 'b'); + +SELECT * FROM merge_tree_pk ORDER BY key, value; + +INSERT INTO merge_tree_pk VALUES (1, 'c'); + +DETACH TABLE merge_tree_pk; +ATTACH TABLE merge_tree_pk; + +SELECT * FROM merge_tree_pk FINAL ORDER BY key, value; + +DROP TABLE IF EXISTS merge_tree_pk SYNC; + +DROP TABLE IF EXISTS merge_tree_pk_sql SYNC; + +CREATE TABLE merge_tree_pk_sql +( + key UInt64, + value String, + PRIMARY KEY (key) +) +ENGINE = ReplacingMergeTree(); + +SHOW CREATE TABLE merge_tree_pk_sql; + +INSERT INTO merge_tree_pk_sql VALUES (1, 'a'); +INSERT INTO merge_tree_pk_sql VALUES (2, 'b'); + +SELECT * FROM merge_tree_pk_sql ORDER BY key, value; + +INSERT INTO merge_tree_pk_sql VALUES (1, 'c'); + +DETACH TABLE merge_tree_pk_sql; +ATTACH TABLE merge_tree_pk_sql; + +SELECT * FROM merge_tree_pk_sql FINAL ORDER BY key, value; + +ALTER TABLE merge_tree_pk_sql ADD COLUMN key2 UInt64, MODIFY ORDER BY (key, key2); + +INSERT INTO merge_tree_pk_sql VALUES (2, 'd', 555); + +INSERT INTO merge_tree_pk_sql VALUES (2, 'e', 555); + +SELECT * FROM merge_tree_pk_sql FINAL ORDER BY key, value; + +SHOW CREATE TABLE merge_tree_pk_sql; + +DROP TABLE IF EXISTS merge_tree_pk_sql SYNC; + +DROP TABLE IF EXISTS replicated_merge_tree_pk_sql SYNC; + +CREATE TABLE replicated_merge_tree_pk_sql +( + key UInt64, + value String, + PRIMARY KEY (key) +) +ENGINE = ReplicatedReplacingMergeTree('/clickhouse/tables/{database}/01532_primary_key_without', 'r1'); + +SHOW CREATE TABLE replicated_merge_tree_pk_sql; + +INSERT INTO replicated_merge_tree_pk_sql VALUES (1, 'a'); +INSERT INTO replicated_merge_tree_pk_sql VALUES (2, 'b'); + +SELECT * FROM replicated_merge_tree_pk_sql ORDER BY key, value; + +INSERT INTO replicated_merge_tree_pk_sql VALUES (1, 'c'); + +DETACH TABLE replicated_merge_tree_pk_sql; +ATTACH TABLE replicated_merge_tree_pk_sql; + +SELECT * FROM replicated_merge_tree_pk_sql FINAL ORDER BY key, value; + +ALTER TABLE replicated_merge_tree_pk_sql ADD COLUMN key2 UInt64, MODIFY ORDER BY (key, key2); + +INSERT INTO replicated_merge_tree_pk_sql VALUES (2, 'd', 555); + +INSERT INTO replicated_merge_tree_pk_sql VALUES (2, 'e', 555); + +SELECT * FROM replicated_merge_tree_pk_sql FINAL ORDER BY key, value; + +DETACH TABLE replicated_merge_tree_pk_sql; +ATTACH TABLE replicated_merge_tree_pk_sql; + +SHOW CREATE TABLE replicated_merge_tree_pk_sql; + +DROP TABLE IF EXISTS replicated_merge_tree_pk_sql SYNC; diff --git a/parser/testdata/01532_tuple_with_name_type/ast.json b/parser/testdata/01532_tuple_with_name_type/ast.json new file mode 100644 index 000000000..7f5221066 --- /dev/null +++ b/parser/testdata/01532_tuple_with_name_type/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_01532_1 (children 1)" + }, + { + "explain": " Identifier test_01532_1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001270041, + "rows_read": 2, + "bytes_read": 76 + } +} diff --git a/parser/testdata/01532_tuple_with_name_type/metadata.json b/parser/testdata/01532_tuple_with_name_type/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01532_tuple_with_name_type/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01532_tuple_with_name_type/query.sql b/parser/testdata/01532_tuple_with_name_type/query.sql new file mode 100644 index 000000000..fbc052d3c --- /dev/null +++ b/parser/testdata/01532_tuple_with_name_type/query.sql @@ -0,0 +1,21 @@ +DROP TABLE IF EXISTS test_01532_1; +DROP TABLE IF EXISTS test_01532_2; +DROP TABLE IF EXISTS test_01532_3; +DROP TABLE IF EXISTS test_01532_4; + +CREATE TABLE test_01532_1 (a Tuple(key String, value String)) ENGINE Memory(); +DESCRIBE TABLE test_01532_1; + +CREATE TABLE test_01532_2 (a Tuple(Tuple(key String, value String))) ENGINE Memory(); +DESCRIBE TABLE test_01532_2; + +CREATE TABLE test_01532_3 (a Array(Tuple(key String, value String))) ENGINE Memory(); +DESCRIBE TABLE test_01532_3; + +CREATE TABLE test_01532_4 (a Tuple(UInt8, Tuple(key String, value String))) ENGINE Memory(); +DESCRIBE TABLE test_01532_4; + +DROP TABLE test_01532_1; +DROP TABLE test_01532_2; +DROP TABLE test_01532_3; +DROP TABLE test_01532_4; diff --git a/parser/testdata/01533_collate_in_nullable/ast.json b/parser/testdata/01533_collate_in_nullable/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01533_collate_in_nullable/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01533_collate_in_nullable/metadata.json b/parser/testdata/01533_collate_in_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01533_collate_in_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01533_collate_in_nullable/query.sql b/parser/testdata/01533_collate_in_nullable/query.sql new file mode 100644 index 000000000..9664a8efd --- /dev/null +++ b/parser/testdata/01533_collate_in_nullable/query.sql @@ -0,0 +1,20 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS test_collate; + +CREATE TABLE test_collate (x UInt32, s Nullable(String)) ENGINE=Memory(); + +INSERT INTO test_collate VALUES (1, 'Ё'), (1, 'ё'), (1, 'а'), (1, null), (2, 'А'), (2, 'я'), (2, 'Я'), (2, null); + +SELECT 'Order by without collate'; +SELECT * FROM test_collate ORDER BY s, x; +SELECT 'Order by with collate'; +SELECT * FROM test_collate ORDER BY s COLLATE 'ru', x; + +SELECT 'Order by tuple without collate'; +SELECT * FROM test_collate ORDER BY x, s; +SELECT 'Order by tuple with collate'; +SELECT * FROM test_collate ORDER BY x, s COLLATE 'ru'; + +DROP TABLE test_collate; + diff --git a/parser/testdata/01533_distinct_depends_on_max_threads/ast.json b/parser/testdata/01533_distinct_depends_on_max_threads/ast.json new file mode 100644 index 000000000..42324714e --- /dev/null +++ b/parser/testdata/01533_distinct_depends_on_max_threads/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery bug_13492 (children 1)" + }, + { + "explain": " Identifier bug_13492" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001371806, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/01533_distinct_depends_on_max_threads/metadata.json b/parser/testdata/01533_distinct_depends_on_max_threads/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01533_distinct_depends_on_max_threads/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01533_distinct_depends_on_max_threads/query.sql b/parser/testdata/01533_distinct_depends_on_max_threads/query.sql new file mode 100644 index 000000000..4f32576e8 --- /dev/null +++ b/parser/testdata/01533_distinct_depends_on_max_threads/query.sql @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS bug_13492; + +CREATE TABLE bug_13492 (`d` DateTime) ENGINE = MergeTree +PARTITION BY toYYYYMMDD(d) ORDER BY tuple(); + +INSERT INTO bug_13492 SELECT addDays(now(), number) FROM numbers(100); + +SET max_threads = 5; + +SELECT DISTINCT 1 FROM bug_13492, numbers(1) n; + +SET max_threads = 2; + +SELECT DISTINCT 1 FROM bug_13492, numbers(1) n; + +DROP TABLE bug_13492; diff --git a/parser/testdata/01533_distinct_nullable_uuid/ast.json b/parser/testdata/01533_distinct_nullable_uuid/ast.json new file mode 100644 index 000000000..65a0767f1 --- /dev/null +++ b/parser/testdata/01533_distinct_nullable_uuid/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery bug_14144 (children 1)" + }, + { + "explain": " Identifier bug_14144" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001189671, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/01533_distinct_nullable_uuid/metadata.json b/parser/testdata/01533_distinct_nullable_uuid/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01533_distinct_nullable_uuid/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01533_distinct_nullable_uuid/query.sql b/parser/testdata/01533_distinct_nullable_uuid/query.sql new file mode 100644 index 000000000..926739d3f --- /dev/null +++ b/parser/testdata/01533_distinct_nullable_uuid/query.sql @@ -0,0 +1,38 @@ +DROP TABLE IF EXISTS bug_14144; + +CREATE TABLE bug_14144 +( meta_source_req_uuid Nullable(UUID), + a Int64, + meta_source_type String +) +ENGINE = MergeTree +ORDER BY a; + +INSERT INTO bug_14144 SELECT cast(toUUID('442d3ff4-842a-45bb-8b02-b616122c0dc6'), 'Nullable(UUID)'), number, 'missing' FROM numbers(1000); + +INSERT INTO bug_14144 SELECT cast(toUUIDOrZero('2fc89389-4728-4b30-9e51-b5bc3ad215f6'), 'Nullable(UUID)'), number, 'missing' FROM numbers(1000); + +INSERT INTO bug_14144 SELECT cast(toUUIDOrNull('05fe40cb-1d0c-45b0-8e60-8e311c2463f1'), 'Nullable(UUID)'), number, 'missing' FROM numbers(1000); + +SELECT DISTINCT meta_source_req_uuid +FROM bug_14144 +WHERE meta_source_type = 'missing' +ORDER BY meta_source_req_uuid ASC; + +TRUNCATE TABLE bug_14144; + +INSERT INTO bug_14144 SELECT generateUUIDv4(), number, 'missing' FROM numbers(10000); + +SELECT COUNT() FROM ( + SELECT DISTINCT meta_source_req_uuid + FROM bug_14144 + WHERE meta_source_type = 'missing' + ORDER BY meta_source_req_uuid ASC + LIMIT 100000 +); + +DROP TABLE bug_14144; + + + + diff --git a/parser/testdata/01533_multiple_nested/ast.json b/parser/testdata/01533_multiple_nested/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01533_multiple_nested/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01533_multiple_nested/metadata.json b/parser/testdata/01533_multiple_nested/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01533_multiple_nested/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01533_multiple_nested/query.sql b/parser/testdata/01533_multiple_nested/query.sql new file mode 100644 index 000000000..f8c5d7731 --- /dev/null +++ b/parser/testdata/01533_multiple_nested/query.sql @@ -0,0 +1,72 @@ +-- Tags: no-object-storage, no-random-merge-tree-settings, no-parallel +-- no-s3 because read FileOpen metric +DROP TABLE IF EXISTS nested; + +SET flatten_nested = 0; +SET use_uncompressed_cache = 0; +SET local_filesystem_read_method='pread'; + +CREATE TABLE nested +( + col1 Nested(a UInt32, s String), + col2 Nested(a UInt32, n Nested(s String, b UInt32)), + col3 Nested(n1 Nested(a UInt32, b UInt32), n2 Nested(s String, t String)) +) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS min_bytes_for_wide_part = 0; + +INSERT INTO nested VALUES ([(1, 'q'), (2, 'w'), (3, 'e')], [(4, [('a', 5), ('s', 6), ('d', 7)])], [([(8, 9), (10, 11)], [('z', 'x'), ('c', 'v')])]); +INSERT INTO nested VALUES ([(12, 'qq')], [(4, []), (5, [('b', 6), ('n', 7)])], [([], []), ([(44, 55), (66, 77)], [])]); + +OPTIMIZE TABLE nested FINAL; + +SELECT 'all'; +SELECT * FROM nested; +SELECT 'col1'; +SELECT col1.a, col1.s FROM nested; +SELECT 'col2'; +SELECT col2.a, col2.n, col2.n.s, col2.n.b FROM nested; +SELECT 'col3'; +SELECT col3.n1, col3.n2, col3.n1.a, col3.n1.b, col3.n2.s, col3.n2.t FROM nested; + +SELECT 'read files'; + +SYSTEM DROP MARK CACHE; +SELECT col1.a FROM nested FORMAT Null; + +-- 4 files: (col1.size0, col1.a) x2 +SYSTEM FLUSH LOGS query_log; +SELECT ProfileEvents['FileOpen'] - ProfileEvents['CreatedReadBufferDirectIOFailed'] +FROM system.query_log +WHERE (type = 'QueryFinish') AND (lower(query) LIKE lower('SELECT col1.a FROM %nested%')) + AND event_date >= yesterday() AND current_database = currentDatabase(); + +SYSTEM DROP MARK CACHE; +SELECT col3.n2.s FROM nested FORMAT Null; + +-- 6 files: (col3.size0, col3.n2.size1, col3.n2.s) x2 +SYSTEM FLUSH LOGS query_log; +SELECT ProfileEvents['FileOpen'] - ProfileEvents['CreatedReadBufferDirectIOFailed'] +FROM system.query_log +WHERE (type = 'QueryFinish') AND (lower(query) LIKE lower('SELECT col3.n2.s FROM %nested%')) + AND event_date >= yesterday() AND current_database = currentDatabase(); + +DROP TABLE nested; + +CREATE TABLE nested +( + id UInt32, + col1 Nested(a UInt32, n Nested(s String, b UInt32)) +) +ENGINE = MergeTree +ORDER BY id +SETTINGS min_bytes_for_wide_part = 0; + +INSERT INTO nested SELECT number, arrayMap(x -> (x, arrayMap(y -> (toString(y * x), y + x), range(number % 17))), range(number % 19)) FROM numbers(100000); +SELECT id % 10, sum(length(col1)), sumArray(arrayMap(x -> length(x), col1.n.b)) FROM nested GROUP BY id % 10; + +SELECT arraySum(col1.a), arrayMap(x -> x * x * 2, col1.a) FROM nested ORDER BY id LIMIT 5; +SELECT untuple(arrayJoin(arrayJoin(col1.n))) FROM nested ORDER BY id LIMIT 10 OFFSET 10; + +DROP TABLE nested; diff --git a/parser/testdata/01533_optimize_skip_merged_partitions/ast.json b/parser/testdata/01533_optimize_skip_merged_partitions/ast.json new file mode 100644 index 000000000..85e3a5bfc --- /dev/null +++ b/parser/testdata/01533_optimize_skip_merged_partitions/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery optimize_final (children 1)" + }, + { + "explain": " Identifier optimize_final" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001438318, + "rows_read": 2, + "bytes_read": 80 + } +} diff --git a/parser/testdata/01533_optimize_skip_merged_partitions/metadata.json b/parser/testdata/01533_optimize_skip_merged_partitions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01533_optimize_skip_merged_partitions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01533_optimize_skip_merged_partitions/query.sql b/parser/testdata/01533_optimize_skip_merged_partitions/query.sql new file mode 100644 index 000000000..3e9e5cda2 --- /dev/null +++ b/parser/testdata/01533_optimize_skip_merged_partitions/query.sql @@ -0,0 +1,20 @@ +DROP TABLE IF EXISTS optimize_final; + +SET optimize_skip_merged_partitions=1; + +CREATE TABLE optimize_final(t DateTime, x Int32) ENGINE = MergeTree() PARTITION BY toYYYYMM(t) ORDER BY x; + +INSERT INTO optimize_final SELECT toDate('2020-01-01'), number FROM numbers(5); +INSERT INTO optimize_final SELECT toDate('2020-01-01'), number + 5 FROM numbers(5); + +OPTIMIZE TABLE optimize_final FINAL; + +INSERT INTO optimize_final SELECT toDate('2000-01-01'), number FROM numbers(5); +INSERT INTO optimize_final SELECT toDate('2000-01-01'), number + 5 FROM numbers(5); + +OPTIMIZE TABLE optimize_final FINAL; + +SELECT table, partition, active, level from system.parts where table = 'optimize_final' and database = currentDatabase() and active = 1; + +DROP TABLE optimize_final; + diff --git a/parser/testdata/01533_quantile_deterministic_assert/ast.json b/parser/testdata/01533_quantile_deterministic_assert/ast.json new file mode 100644 index 000000000..a31e31024 --- /dev/null +++ b/parser/testdata/01533_quantile_deterministic_assert/ast.json @@ -0,0 +1,79 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function quantileDeterministic (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function sipHash64 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function remote (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '127.0.0.{1,2}'" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_8193" + } + ], + + "rows": 19, + + "statistics": + { + "elapsed": 0.001510997, + "rows_read": 19, + "bytes_read": 779 + } +} diff --git a/parser/testdata/01533_quantile_deterministic_assert/metadata.json b/parser/testdata/01533_quantile_deterministic_assert/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01533_quantile_deterministic_assert/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01533_quantile_deterministic_assert/query.sql b/parser/testdata/01533_quantile_deterministic_assert/query.sql new file mode 100644 index 000000000..c75e5dd50 --- /dev/null +++ b/parser/testdata/01533_quantile_deterministic_assert/query.sql @@ -0,0 +1 @@ +SELECT quantileDeterministic(number, sipHash64(number)) FROM remote('127.0.0.{1,2}', numbers(8193)); diff --git a/parser/testdata/01533_sum_if_nullable_bug/ast.json b/parser/testdata/01533_sum_if_nullable_bug/ast.json new file mode 100644 index 000000000..65f7c2106 --- /dev/null +++ b/parser/testdata/01533_sum_if_nullable_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery T (children 1)" + }, + { + "explain": " Identifier T" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001194075, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/01533_sum_if_nullable_bug/metadata.json b/parser/testdata/01533_sum_if_nullable_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01533_sum_if_nullable_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01533_sum_if_nullable_bug/query.sql b/parser/testdata/01533_sum_if_nullable_bug/query.sql new file mode 100644 index 000000000..6b110748f --- /dev/null +++ b/parser/testdata/01533_sum_if_nullable_bug/query.sql @@ -0,0 +1,10 @@ +drop table if exists T; +create table T(a Nullable(Int64)) engine = Memory(); +insert into T values (1), (2), (3), (4), (5); +select sumIf(42, (a % 2) = 0) from T; +select sumIf(42, (a % 2) = 0) from remote('127.0.0.{1,2}', currentDatabase(), T); +select sumIf(42, toNullable(1)) from T; +select sumIf(42, toNullable(1)) from remote('127.0.0.{1,2}', currentDatabase(), T); +-- select sumIf(42, toNullable(toInt64(1))) from T; +-- select sumIf(42, toNullable(toInt64(1))) from remote('127.0.0.{1,2}', currentDatabase(), T); +drop table if exists T; diff --git a/parser/testdata/01534_lambda_array_join/ast.json b/parser/testdata/01534_lambda_array_join/ast.json new file mode 100644 index 000000000..524d7c3d0 --- /dev/null +++ b/parser/testdata/01534_lambda_array_join/ast.json @@ -0,0 +1,91 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayMap (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function lambda (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function concat (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function concat (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function arrayJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[UInt64_1]" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Literal ''" + }, + { + "explain": " Literal Array_[UInt64_1]" + } + ], + + "rows": 23, + + "statistics": + { + "elapsed": 0.001060087, + "rows_read": 23, + "bytes_read": 917 + } +} diff --git a/parser/testdata/01534_lambda_array_join/metadata.json b/parser/testdata/01534_lambda_array_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01534_lambda_array_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01534_lambda_array_join/query.sql b/parser/testdata/01534_lambda_array_join/query.sql new file mode 100644 index 000000000..092c569b2 --- /dev/null +++ b/parser/testdata/01534_lambda_array_join/query.sql @@ -0,0 +1,12 @@ +SELECT arrayMap(x -> concat(x, concat(arrayJoin([1]), x, NULL), ''), [1]); +SELECT arrayMap(x -> arrayJoin([1]), [1, 2]); + +SELECT + arrayJoin(arrayMap(x -> reinterpretAsUInt8(substring(randomString(range(randomString(1048577), NULL), arrayJoin(arrayMap(x -> reinterpretAsUInt8(substring(randomString(range(NULL), 65537), 255)), range(1))), substring(randomString(NULL), x + 7), '257'), 1025)), range(7))) AS byte, + count() AS c + FROM numbers(10) + GROUP BY + arrayMap(x -> reinterpretAsUInt8(substring(randomString(randomString(range(randomString(255), NULL)), NULL), NULL)), range(3)), + randomString(range(randomString(1048577), NULL), NULL), + byte + ORDER BY byte ASC; diff --git a/parser/testdata/01535_decimal_round_scale_overflow_check/ast.json b/parser/testdata/01535_decimal_round_scale_overflow_check/ast.json new file mode 100644 index 000000000..6b02fddf2 --- /dev/null +++ b/parser/testdata/01535_decimal_round_scale_overflow_check/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function round (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDecimal32 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal Int64_-9223372036854775806" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.00143498, + "rows_read": 11, + "bytes_read": 429 + } +} diff --git a/parser/testdata/01535_decimal_round_scale_overflow_check/metadata.json b/parser/testdata/01535_decimal_round_scale_overflow_check/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01535_decimal_round_scale_overflow_check/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01535_decimal_round_scale_overflow_check/query.sql b/parser/testdata/01535_decimal_round_scale_overflow_check/query.sql new file mode 100644 index 000000000..d81a23f64 --- /dev/null +++ b/parser/testdata/01535_decimal_round_scale_overflow_check/query.sql @@ -0,0 +1 @@ +SELECT round(toDecimal32(1, 0), -9223372036854775806); -- { serverError ARGUMENT_OUT_OF_BOUND } diff --git a/parser/testdata/01536_fuzz_cast/ast.json b/parser/testdata/01536_fuzz_cast/ast.json new file mode 100644 index 000000000..47c89a1d1 --- /dev/null +++ b/parser/testdata/01536_fuzz_cast/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001178355, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01536_fuzz_cast/metadata.json b/parser/testdata/01536_fuzz_cast/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01536_fuzz_cast/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01536_fuzz_cast/query.sql b/parser/testdata/01536_fuzz_cast/query.sql new file mode 100644 index 000000000..7fcdf9993 --- /dev/null +++ b/parser/testdata/01536_fuzz_cast/query.sql @@ -0,0 +1,2 @@ +SET cast_keep_nullable = 0; +SELECT CAST(arrayJoin([NULL, '', '', NULL, '', NULL, '01.02.2017 03:04\005GMT', '', NULL, '01/02/2017 03:04:05 MSK01/02/\0017 03:04:05 MSK', '', NULL, '03/04/201903/04/201903/04/\001903/04/2019']), 'Enum8(\'a\' = 1, \'b\' = 2)') AS x; -- { serverError CANNOT_INSERT_NULL_IN_ORDINARY_COLUMN } diff --git a/parser/testdata/01537_fuzz_count_equal/ast.json b/parser/testdata/01537_fuzz_count_equal/ast.json new file mode 100644 index 000000000..05ee6fbf9 --- /dev/null +++ b/parser/testdata/01537_fuzz_count_equal/ast.json @@ -0,0 +1,100 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Function countEqual (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Function arrayJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[NULL, NULL, NULL]" + }, + { + "explain": " Literal NULL (alias x)" + }, + { + "explain": " Function arrayJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[UInt64_255, UInt64_1025, NULL, NULL]" + }, + { + "explain": " Function arrayJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[UInt64_2, UInt64_1048576, NULL, NULL]" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + } + ], + + "rows": 26, + + "statistics": + { + "elapsed": 0.00200637, + "rows_read": 26, + "bytes_read": 1190 + } +} diff --git a/parser/testdata/01537_fuzz_count_equal/metadata.json b/parser/testdata/01537_fuzz_count_equal/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01537_fuzz_count_equal/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01537_fuzz_count_equal/query.sql b/parser/testdata/01537_fuzz_count_equal/query.sql new file mode 100644 index 000000000..fde3fe191 --- /dev/null +++ b/parser/testdata/01537_fuzz_count_equal/query.sql @@ -0,0 +1 @@ +SELECT DISTINCT NULL = countEqual(materialize([arrayJoin([NULL, NULL, NULL]), NULL AS x, arrayJoin([255, 1025, NULL, NULL]), arrayJoin([2, 1048576, NULL, NULL])]), materialize(x)); diff --git a/parser/testdata/01538_fuzz_aggregate/ast.json b/parser/testdata/01538_fuzz_aggregate/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01538_fuzz_aggregate/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01538_fuzz_aggregate/metadata.json b/parser/testdata/01538_fuzz_aggregate/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01538_fuzz_aggregate/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01538_fuzz_aggregate/query.sql b/parser/testdata/01538_fuzz_aggregate/query.sql new file mode 100644 index 000000000..bfd027af9 --- /dev/null +++ b/parser/testdata/01538_fuzz_aggregate/query.sql @@ -0,0 +1,10 @@ +SELECT + count(), + sum(ns) +FROM +( + SELECT intDiv(number, NULL) AS k + FROM system.numbers_mt + GROUP BY k +) +ARRAY JOIN ns; -- { serverError UNKNOWN_IDENTIFIER } diff --git a/parser/testdata/01540_verbatim_partition_pruning/ast.json b/parser/testdata/01540_verbatim_partition_pruning/ast.json new file mode 100644 index 000000000..98f06ee85 --- /dev/null +++ b/parser/testdata/01540_verbatim_partition_pruning/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery xy (children 1)" + }, + { + "explain": " Identifier xy" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001196475, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/01540_verbatim_partition_pruning/metadata.json b/parser/testdata/01540_verbatim_partition_pruning/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01540_verbatim_partition_pruning/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01540_verbatim_partition_pruning/query.sql b/parser/testdata/01540_verbatim_partition_pruning/query.sql new file mode 100644 index 000000000..4779bce70 --- /dev/null +++ b/parser/testdata/01540_verbatim_partition_pruning/query.sql @@ -0,0 +1,49 @@ +drop table if exists xy; + +create table xy(x int, y int) engine MergeTree partition by intHash64(x) % 2 order by y settings index_granularity = 1; + +-- intHash64(0) % 2 = 0 +-- intHash64(2) % 2 = 1 +-- intHash64(8) % 2 = 0 +-- intHash64(9) % 2 = 1 +insert into xy values (0, 2), (2, 3), (8, 4), (9, 5); + +-- Now we have two partitions: 0 and 1, each of which contains 2 values. +-- minmax index for the first partition is 0 <= x <= 8 +-- minmax index for the second partition is 2 <= x <= 9 + +SET max_rows_to_read = 2; + +select * from xy where intHash64(x) % 2 = intHash64(2) % 2; + +-- Equality is another special operator that can be treated as an always monotonic indicator for deterministic functions. +-- minmax index is not enough. +select * from xy where x = 8; + +drop table if exists xy; + +-- Test if we provide enough columns to generate a partition value +drop table if exists xyz; +create table xyz(x int, y int, z int) engine MergeTree partition by if(toUInt8(x), y, z) order by x settings index_granularity = 1; +insert into xyz values (1, 2, 3); +select * from xyz where y = 2; +drop table if exists xyz; + +-- Test if we obey strict rules when facing NOT contitions +drop table if exists test; +create table test(d Date, k Int64, s String) Engine=MergeTree partition by (toYYYYMM(d),k) order by (d, k); + +insert into test values ('2020-01-01', 1, ''); +insert into test values ('2020-01-02', 1, ''); + +select * from test where d != '2020-01-01'; +drop table test; + +-- Test if single value partition pruning works correctly for Date = String +drop table if exists myTable; +CREATE TABLE myTable (myDay Date, myOrder Int32, someData String) ENGINE = ReplacingMergeTree PARTITION BY floor(toYYYYMMDD(myDay), -1) ORDER BY (myOrder); +INSERT INTO myTable (myDay, myOrder) VALUES ('2021-01-01', 1); +INSERT INTO myTable (myDay, myOrder) VALUES ('2021-01-02', 2); -- This row should be returned +INSERT INTO myTable (myDay, myOrder) VALUES ('2021-01-03', 3); +SELECT * FROM myTable mt WHERE myDay = '2021-01-02'; +drop table myTable; diff --git a/parser/testdata/01542_collate_in_array/ast.json b/parser/testdata/01542_collate_in_array/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01542_collate_in_array/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01542_collate_in_array/metadata.json b/parser/testdata/01542_collate_in_array/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01542_collate_in_array/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01542_collate_in_array/query.sql b/parser/testdata/01542_collate_in_array/query.sql new file mode 100644 index 000000000..1e748bd54 --- /dev/null +++ b/parser/testdata/01542_collate_in_array/query.sql @@ -0,0 +1,36 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS collate_test1; +DROP TABLE IF EXISTS collate_test2; +DROP TABLE IF EXISTS collate_test3; + +CREATE TABLE collate_test1 (x UInt32, s Array(String)) ENGINE=Memory(); +CREATE TABLE collate_test2 (x UInt32, s Array(LowCardinality(Nullable(String)))) ENGINE=Memory(); +CREATE TABLE collate_test3 (x UInt32, s Array(Array(String))) ENGINE=Memory(); + +INSERT INTO collate_test1 VALUES (1, ['Ё']), (1, ['ё']), (1, ['а']), (2, ['А']), (2, ['я', 'а']), (2, ['Я']), (1, ['ё','а']), (1, ['ё', 'я']), (2, ['ё', 'а', 'а']); +INSERT INTO collate_test2 VALUES (1, ['Ё']), (1, ['ё']), (1, ['а']), (2, ['А']), (2, ['я']), (2, [null, 'Я']), (1, ['ё','а']), (1, ['ё', null, 'я']), (2, ['ё', 'а', 'а', null]); +INSERT INTO collate_test3 VALUES (1, [['а', 'я'], ['а', 'ё']]), (1, [['а', 'Ё'], ['ё', 'я']]), (2, [['ё']]), (2, [['а', 'а'], ['я', 'ё']]); + +SELECT * FROM collate_test1 ORDER BY s COLLATE 'ru'; +SELECT ''; + +SELECT * FROM collate_test1 ORDER BY x, s COLLATE 'ru'; +SELECT ''; + +SELECT * FROM collate_test2 ORDER BY s COLLATE 'ru'; +SELECT ''; + +SELECT * FROM collate_test2 ORDER BY x, s COLLATE 'ru'; +SELECT ''; + +SELECT * FROM collate_test3 ORDER BY s COLLATE 'ru'; +SELECT ''; + +SELECT * FROM collate_test3 ORDER BY x, s COLLATE 'ru'; +SELECT ''; + +DROP TABLE collate_test1; +DROP TABLE collate_test2; +DROP TABLE collate_test3; + diff --git a/parser/testdata/01543_collate_in_tuple/ast.json b/parser/testdata/01543_collate_in_tuple/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01543_collate_in_tuple/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01543_collate_in_tuple/metadata.json b/parser/testdata/01543_collate_in_tuple/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01543_collate_in_tuple/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01543_collate_in_tuple/query.sql b/parser/testdata/01543_collate_in_tuple/query.sql new file mode 100644 index 000000000..e50b5e522 --- /dev/null +++ b/parser/testdata/01543_collate_in_tuple/query.sql @@ -0,0 +1,36 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS collate_test1; +DROP TABLE IF EXISTS collate_test2; +DROP TABLE IF EXISTS collate_test3; + +CREATE TABLE collate_test1 (x UInt32, s Tuple(UInt32, String)) ENGINE=Memory(); +CREATE TABLE collate_test2 (x UInt32, s Tuple(UInt32, LowCardinality(Nullable(String)))) ENGINE=Memory(); +CREATE TABLE collate_test3 (x UInt32, s Tuple(UInt32, Tuple(UInt32, Array(String)))) ENGINE=Memory(); + +INSERT INTO collate_test1 VALUES (1, (1, 'Ё')), (1, (1, 'ё')), (1, (1, 'а')), (2, (2, 'А')), (2, (1, 'я')), (2, (2, 'Я')), (1, (2,'а')), (1, (3, 'я')); +INSERT INTO collate_test2 VALUES (1, (1, 'Ё')), (1, (1, 'ё')), (1, (1, 'а')), (2, (2, 'А')), (2, (1, 'я')), (2, (2, 'Я')), (1, (2, null)), (1, (3, 'я')), (1, (1, null)), (2, (2, null)); +INSERT INTO collate_test3 VALUES (1, (1, (1, ['Ё']))), (1, (2, (1, ['ё']))), (1, (1, (2, ['а']))), (2, (1, (1, ['А']))), (2, (2, (1, ['я']))), (2, (1, (1, ['Я']))), (1, (2, (1, ['ё','а']))), (1, (1, (2, ['ё', 'я']))), (2, (1, (1, ['ё', 'а', 'а']))); + +SELECT * FROM collate_test1 ORDER BY s COLLATE 'ru', x; +SELECT ''; + +SELECT * FROM collate_test1 ORDER BY x, s COLLATE 'ru'; +SELECT ''; + +SELECT * FROM collate_test2 ORDER BY s COLLATE 'ru', x; +SELECT ''; + +SELECT * FROM collate_test2 ORDER BY x, s COLLATE 'ru'; +SELECT ''; + +SELECT * FROM collate_test3 ORDER BY s COLLATE 'ru', x; +SELECT ''; + +SELECT * FROM collate_test3 ORDER BY x, s COLLATE 'ru'; +SELECT ''; + +DROP TABLE collate_test1; +DROP TABLE collate_test2; +DROP TABLE collate_test3; + diff --git a/parser/testdata/01543_parse_datetime_besteffort_or_null_empty_string/ast.json b/parser/testdata/01543_parse_datetime_besteffort_or_null_empty_string/ast.json new file mode 100644 index 000000000..ac892a2c7 --- /dev/null +++ b/parser/testdata/01543_parse_datetime_besteffort_or_null_empty_string/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function parseDateTimeBestEffortOrNull (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '2010-01-01'" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.00108457, + "rows_read": 7, + "bytes_read": 286 + } +} diff --git a/parser/testdata/01543_parse_datetime_besteffort_or_null_empty_string/metadata.json b/parser/testdata/01543_parse_datetime_besteffort_or_null_empty_string/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01543_parse_datetime_besteffort_or_null_empty_string/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01543_parse_datetime_besteffort_or_null_empty_string/query.sql b/parser/testdata/01543_parse_datetime_besteffort_or_null_empty_string/query.sql new file mode 100644 index 000000000..709802896 --- /dev/null +++ b/parser/testdata/01543_parse_datetime_besteffort_or_null_empty_string/query.sql @@ -0,0 +1,12 @@ +SELECT parseDateTimeBestEffortOrNull('2010-01-01'); +SELECT parseDateTimeBestEffortOrNull('2010-01-01 01:01:01'); +SELECT parseDateTimeBestEffortOrNull('2020-01-01 11:01:01 am'); +SELECT parseDateTimeBestEffortOrNull('2020-01-01 11:01:01 pm'); +SELECT parseDateTimeBestEffortOrNull('2020-01-01 12:01:01 am'); +SELECT parseDateTimeBestEffortOrNull('2020-01-01 12:01:01 pm'); +SELECT parseDateTimeBestEffortOrNull('2000-01-01 01:01:01'); +SELECT parseDateTimeBestEffortOrNull('20100'); +SELECT parseDateTimeBestEffortOrNull('0100:0100:0000'); +SELECT parseDateTimeBestEffortOrNull('x'); +SELECT parseDateTimeBestEffortOrNull(''); +SELECT parseDateTimeBestEffortOrNull(' '); diff --git a/parser/testdata/01543_toModifiedJulianDay/ast.json b/parser/testdata/01543_toModifiedJulianDay/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01543_toModifiedJulianDay/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01543_toModifiedJulianDay/metadata.json b/parser/testdata/01543_toModifiedJulianDay/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01543_toModifiedJulianDay/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01543_toModifiedJulianDay/query.sql b/parser/testdata/01543_toModifiedJulianDay/query.sql new file mode 100644 index 000000000..47303e0a8 --- /dev/null +++ b/parser/testdata/01543_toModifiedJulianDay/query.sql @@ -0,0 +1,38 @@ +-- +SELECT 'Invocation with constant'; + +SELECT toModifiedJulianDay('1858-11-16'); +SELECT toModifiedJulianDay('1858-11-17'); +SELECT toModifiedJulianDay('2020-11-01'); +SELECT toModifiedJulianDay(NULL); +SELECT toModifiedJulianDay('unparsable'); -- { serverError CANNOT_PARSE_INPUT_ASSERTION_FAILED } +SELECT toModifiedJulianDay('1999-02-29'); -- { serverError CANNOT_PARSE_DATE } +SELECT toModifiedJulianDay('1999-13-32'); -- { serverError CANNOT_PARSE_DATE } + +SELECT 'or null'; +SELECT toModifiedJulianDayOrNull('2020-11-01'); +SELECT toModifiedJulianDayOrNull('unparsable'); +SELECT toModifiedJulianDayOrNull('1999-02-29'); +SELECT toModifiedJulianDayOrNull('1999-13-32'); + +-- +SELECT 'Invocation with String column'; + +DROP TABLE IF EXISTS toModifiedJulianDay_test; +CREATE TABLE toModifiedJulianDay_test (d String) ENGINE = Memory; + +INSERT INTO toModifiedJulianDay_test VALUES ('1858-11-16'), ('1858-11-17'), ('2020-11-01'); +SELECT toModifiedJulianDay(d) FROM toModifiedJulianDay_test; + +DROP TABLE toModifiedJulianDay_test; + +-- +SELECT 'Invocation with FixedString column'; + +DROP TABLE IF EXISTS toModifiedJulianDay_test; +CREATE TABLE toModifiedJulianDay_test (d FixedString(10)) ENGINE = Memory; + +INSERT INTO toModifiedJulianDay_test VALUES ('1858-11-16'), ('1858-11-17'), ('2020-11-01'); +SELECT toModifiedJulianDay(d) FROM toModifiedJulianDay_test; + +DROP TABLE toModifiedJulianDay_test; diff --git a/parser/testdata/01544_errorCodeToName/ast.json b/parser/testdata/01544_errorCodeToName/ast.json new file mode 100644 index 000000000..0c48187f0 --- /dev/null +++ b/parser/testdata/01544_errorCodeToName/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function errorCodeToName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toUInt32 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Int64_-1" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001357949, + "rows_read": 9, + "bytes_read": 356 + } +} diff --git a/parser/testdata/01544_errorCodeToName/metadata.json b/parser/testdata/01544_errorCodeToName/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01544_errorCodeToName/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01544_errorCodeToName/query.sql b/parser/testdata/01544_errorCodeToName/query.sql new file mode 100644 index 000000000..40814ec8a --- /dev/null +++ b/parser/testdata/01544_errorCodeToName/query.sql @@ -0,0 +1,6 @@ +SELECT errorCodeToName(toUInt32(-1)); +SELECT errorCodeToName(-1); +SELECT errorCodeToName(950); /* gap in error codes */ +SELECT errorCodeToName(0); +SELECT errorCodeToName(1); +SELECT errorCodeToName(1004); diff --git a/parser/testdata/01544_fromModifiedJulianDay/ast.json b/parser/testdata/01544_fromModifiedJulianDay/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01544_fromModifiedJulianDay/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01544_fromModifiedJulianDay/metadata.json b/parser/testdata/01544_fromModifiedJulianDay/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01544_fromModifiedJulianDay/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01544_fromModifiedJulianDay/query.sql b/parser/testdata/01544_fromModifiedJulianDay/query.sql new file mode 100644 index 000000000..6f0f08c36 --- /dev/null +++ b/parser/testdata/01544_fromModifiedJulianDay/query.sql @@ -0,0 +1,29 @@ +-- +SELECT 'Invocation with constant'; + +SELECT fromModifiedJulianDay(-1); +SELECT fromModifiedJulianDay(0); +SELECT fromModifiedJulianDay(59154); +SELECT fromModifiedJulianDay(NULL); +SELECT fromModifiedJulianDay(CAST(NULL, 'Nullable(Int64)')); +SELECT fromModifiedJulianDay(-678942); -- { serverError CANNOT_FORMAT_DATETIME } +SELECT fromModifiedJulianDay(-678941); +SELECT fromModifiedJulianDay(2973483); +SELECT fromModifiedJulianDay(2973484); -- { serverError CANNOT_FORMAT_DATETIME } + +SELECT 'or null'; +SELECT fromModifiedJulianDayOrNull(59154); +SELECT fromModifiedJulianDayOrNull(NULL); +SELECT fromModifiedJulianDayOrNull(-678942); +SELECT fromModifiedJulianDayOrNull(2973484); + +-- +SELECT 'Invocation with Int32 column'; + +DROP TABLE IF EXISTS fromModifiedJulianDay_test; +CREATE TABLE fromModifiedJulianDay_test (d Int32) ENGINE = Memory; + +INSERT INTO fromModifiedJulianDay_test VALUES (-1), (0), (59154); +SELECT fromModifiedJulianDay(d) FROM fromModifiedJulianDay_test; + +DROP TABLE fromModifiedJulianDay_test; diff --git a/parser/testdata/01545_url_file_format_settings/ast.json b/parser/testdata/01545_url_file_format_settings/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01545_url_file_format_settings/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01545_url_file_format_settings/metadata.json b/parser/testdata/01545_url_file_format_settings/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01545_url_file_format_settings/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01545_url_file_format_settings/query.sql b/parser/testdata/01545_url_file_format_settings/query.sql new file mode 100644 index 000000000..8563f242c --- /dev/null +++ b/parser/testdata/01545_url_file_format_settings/query.sql @@ -0,0 +1,23 @@ +-- Tags: no-parallel + +set use_hive_partitioning=0; -- required because of "?query=select" + +create table file_delim(a int, b int) engine File(CSV, '01545_url_file_format_settings.csv') settings format_csv_delimiter = '|'; + +truncate table file_delim; + +insert into file_delim select 1, 2; + +-- select 1, 2 format CSV settings format_csv_delimiter='/'; +create table url_delim(a int, b int) engine URL('http://127.0.0.1:8123/?query=select%201%2C%202%20format%20CSV%20settings%20format_csv_delimiter%3D%27/%27%3B%0A', CSV) settings format_csv_delimiter = '/'; + +select * from file_delim; + +select * from url_delim; + +select * from file('01545_url_file_format_settings.csv', CSV, 'a int, b int') settings format_csv_delimiter = '|'; + +select * from url('http://127.0.0.1:8123/?query=select%201%2C%202%20format%20CSV%20settings%20format_csv_delimiter%3D%27/%27%3B%0A', CSV, 'a int, b int') settings format_csv_delimiter = '/'; + +drop table file_delim; +drop table url_delim; diff --git a/parser/testdata/01546_log_queries_min_query_duration_ms/ast.json b/parser/testdata/01546_log_queries_min_query_duration_ms/ast.json new file mode 100644 index 000000000..5129f7024 --- /dev/null +++ b/parser/testdata/01546_log_queries_min_query_duration_ms/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001168619, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01546_log_queries_min_query_duration_ms/metadata.json b/parser/testdata/01546_log_queries_min_query_duration_ms/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01546_log_queries_min_query_duration_ms/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01546_log_queries_min_query_duration_ms/query.sql b/parser/testdata/01546_log_queries_min_query_duration_ms/query.sql new file mode 100644 index 000000000..80b247a9c --- /dev/null +++ b/parser/testdata/01546_log_queries_min_query_duration_ms/query.sql @@ -0,0 +1,46 @@ +set log_queries_min_query_duration_ms=300000; +set log_query_threads=1; +set log_queries=1; + +-- +-- fast -- no logging +-- +select '01546_log_queries_min_query_duration_ms-fast' format Null; +system flush logs query_log, query_thread_log; + +-- No logging, since the query is fast enough. +select count() +from system.query_log +where + query like 'select \'01546_log_queries_min_query_duration_ms-fast%' + and current_database = currentDatabase() + and event_date >= yesterday(); +select count() +from system.query_thread_log +where + query like 'select \'01546_log_queries_min_query_duration_ms-fast%' + and current_database = currentDatabase() + and event_date >= yesterday(); + +-- +-- slow -- query logged +-- +set log_queries_min_query_duration_ms=300; +select '01546_log_queries_min_query_duration_ms-slow', sleep(0.4) format Null; +system flush logs query_log, query_thread_log; + +-- With the limit on minimum execution time, "query start" and "exception before start" events are not logged, only query finish. +select count() +from system.query_log +where + query like 'select \'01546_log_queries_min_query_duration_ms-slow%' + and current_database = currentDatabase() + and event_date >= yesterday(); +-- There at least two threads involved in a simple query +-- (one thread just waits another, sigh) +select if(count() == 2, 'OK', 'Fail: ' || toString(count())) +from system.query_thread_log +where + query like 'select \'01546_log_queries_min_query_duration_ms-slow%' + and current_database = currentDatabase() + and event_date >= yesterday(); diff --git a/parser/testdata/01547_query_log_current_database/ast.json b/parser/testdata/01547_query_log_current_database/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01547_query_log_current_database/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01547_query_log_current_database/metadata.json b/parser/testdata/01547_query_log_current_database/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01547_query_log_current_database/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01547_query_log_current_database/query.sql b/parser/testdata/01547_query_log_current_database/query.sql new file mode 100644 index 000000000..7d6f5d392 --- /dev/null +++ b/parser/testdata/01547_query_log_current_database/query.sql @@ -0,0 +1,35 @@ +-- +-- This is a cleaner approach for writing a test that relies on system.query_log/query_thread_log. +-- +-- It uses current database, and since clickhouse-test will generate random for +-- each run you can run the test multiple times without worrying about +-- overlaps. +-- +-- There is still event_date/event_time filter for better performance +-- (even though this is not relevant for runs on CI) +-- + +set log_query_threads=1; +set log_queries_min_type='QUERY_FINISH'; +set log_queries=1; +select '01547_query_log_current_database' from system.one format Null; +set log_queries=0; +set log_query_threads=0; + +system flush logs query_log, query_thread_log; + +select count() +from system.query_log +where + query like 'select \'01547_query_log_current_database%' + and current_database = currentDatabase() + and event_date >= yesterday(); + +-- at least two threads for processing +-- (but one just waits for another, sigh) +select count() == 2 +from system.query_thread_log +where + query like 'select \'01547\_query\_log\_current\_database%' + and current_database = currentDatabase() + and event_date >= yesterday() diff --git a/parser/testdata/01548_lzy305/ast.json b/parser/testdata/01548_lzy305/ast.json new file mode 100644 index 000000000..6b9f0e630 --- /dev/null +++ b/parser/testdata/01548_lzy305/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery fct_rt_dc_shop_sku_vender_day (children 1)" + }, + { + "explain": " Identifier fct_rt_dc_shop_sku_vender_day" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00121074, + "rows_read": 2, + "bytes_read": 110 + } +} diff --git a/parser/testdata/01548_lzy305/metadata.json b/parser/testdata/01548_lzy305/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01548_lzy305/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01548_lzy305/query.sql b/parser/testdata/01548_lzy305/query.sql new file mode 100644 index 000000000..e2929b1bb --- /dev/null +++ b/parser/testdata/01548_lzy305/query.sql @@ -0,0 +1,148 @@ +DROP TABLE IF EXISTS fct_rt_dc_shop_sku_vender_day; + +create table fct_rt_dc_shop_sku_vender_day +( + stat_year UInt16, + stat_month UInt32, + stat_day Date, + out_buid UInt8, + out_shop_id String, + in_shop_id LowCardinality(String), + datasource UInt8, + venderid String, + categorytreeid UInt8, + categoryid String, + goodsid LowCardinality(String), + logistics UInt8, + buntype UInt8, + dctype UInt8, + shopformid UInt8, + rt_qty Decimal(18,4), + rt_cost Decimal(18,4), + rt_taxcost Decimal(18,4), + rt_boxes Decimal(18,4), + rt_shops Nullable(String), + rt_drygood_qty Decimal(18,4), + rt_drygood_cost Decimal(18,4), + rt_drygood_boxes Decimal(18,4), + rt_drygood_shops LowCardinality(Nullable(String)), + rt_fresh_qty Decimal(18,4), + rt_fresh_cost Decimal(18,4), + rt_fresh_shops LowCardinality(Nullable(String)), + rt_supshop_cost Decimal(18,4), + rt_supshop_qty Decimal(18,4), + rt_supshop_boxes Decimal(18,4), + rt_supshop_shops LowCardinality(Nullable(String)), + rt_smallshop_cost Decimal(18,4), + rt_smallshop_qty Decimal(18,4), + rt_smallshop_boxes Decimal(18,4), + rt_smallshop_shops LowCardinality(Nullable(String)), + rt_dc_cost Decimal(18,4), + rt_dc_qty Decimal(18,4), + rt_dc_boxes Decimal(18,4), + rt_dc_shops LowCardinality(Nullable(String)), + rt_drygood_supshop_cost Decimal(18,4), + rt_drygood_supshop_qty Decimal(18,4), + rt_drygood_supshop_boxes Decimal(18,4), + rt_drygood_supshop_shops LowCardinality(Nullable(String)), + rt_drygood_smallshop_cost Decimal(18,4), + rt_drygood_smallshop_qty Decimal(18,4), + rt_drygood_smallshop_boxes Decimal(18,4), + rt_drygood_smallshop_shops LowCardinality(Nullable(String)), + rt_drygood_dc_cost Decimal(18,4), + rt_drygood_dc_qty Decimal(18,4), + rt_drygood_dc_boxes Decimal(18,4), + rt_drygood_dc_shops LowCardinality(Nullable(String)), + rt_fresh_supshop_cost Decimal(18,4), + rt_fresh_supshop_qty Decimal(18,4), + rt_fresh_supshop_shops LowCardinality(Nullable(String)), + rt_fresh_smallshop_cost Decimal(18,4), + rt_fresh_smallshop_qty Decimal(18,4), + rt_fresh_smallshop_shops LowCardinality(Nullable(String)), + rt_fresh_dc_cost Decimal(18,4), + rt_fresh_dc_qty Decimal(18,4), + rt_fresh_dc_shops LowCardinality(Nullable(String)), + stat_day_num String default formatDateTime(stat_day, '%F') +) +engine = MergeTree PARTITION BY toYYYYMM(stat_day) ORDER BY (stat_day, out_shop_id) SETTINGS index_granularity = 8192 +; + + +select stat_year, + stat_month, + out_buid, + out_shop_id, + in_shop_id, + datasource, + venderid, + categorytreeid, + categoryid, + goodsid, + logistics, + buntype, + dctype, + shopformid, + sum(rt_qty), + sum(rt_cost), + sum(rt_taxcost), + sum(rt_boxes), + max(rt_shops), + sum(rt_drygood_qty), + sum(rt_drygood_cost), + sum(rt_drygood_boxes), + max(rt_drygood_shops), + sum(rt_fresh_qty), + sum(rt_fresh_cost), + max(rt_fresh_shops), + sum(rt_supshop_cost), + sum(rt_supshop_qty), + sum(rt_supshop_boxes), + max(rt_supshop_shops), + sum(rt_smallshop_cost), + sum(rt_smallshop_qty), + sum(rt_smallshop_boxes), + max(rt_smallshop_shops), + sum(rt_dc_cost), + sum(rt_dc_qty), + sum(rt_dc_boxes), + max(rt_dc_shops), + sum(rt_drygood_supshop_cost), + sum(rt_drygood_supshop_qty), + sum(rt_drygood_supshop_boxes), + max(rt_drygood_supshop_shops), + sum(rt_drygood_smallshop_cost), + sum(rt_drygood_smallshop_qty), + sum(rt_drygood_smallshop_boxes), + max(rt_drygood_smallshop_shops), + sum(rt_drygood_dc_cost), + sum(rt_drygood_dc_qty), + sum(rt_drygood_dc_boxes), + max(rt_drygood_dc_shops), + sum(rt_fresh_supshop_cost), + sum(rt_fresh_supshop_qty), + max(rt_fresh_supshop_shops), + sum(rt_fresh_smallshop_cost), + sum(rt_fresh_smallshop_qty), + max(rt_fresh_smallshop_shops), + sum(rt_fresh_dc_cost), + sum(rt_fresh_dc_qty), + max(rt_fresh_dc_shops) +from fct_rt_dc_shop_sku_vender_day frdssvd +where stat_day >= toDate('2016-01-01') + and stat_day < addMonths(toDate('2016-01-01'), 1) +group by stat_year, + stat_month, + out_buid, + out_shop_id, + in_shop_id, + datasource, + venderid, + categorytreeid, + categoryid, + goodsid, + logistics, + buntype, + dctype, + shopformid; + +DROP TABLE fct_rt_dc_shop_sku_vender_day; diff --git a/parser/testdata/01548_uncomparable_columns_in_keys/ast.json b/parser/testdata/01548_uncomparable_columns_in_keys/ast.json new file mode 100644 index 000000000..de38c15d3 --- /dev/null +++ b/parser/testdata/01548_uncomparable_columns_in_keys/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery uncomparable_keys (children 1)" + }, + { + "explain": " Identifier uncomparable_keys" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001397724, + "rows_read": 2, + "bytes_read": 86 + } +} diff --git a/parser/testdata/01548_uncomparable_columns_in_keys/metadata.json b/parser/testdata/01548_uncomparable_columns_in_keys/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01548_uncomparable_columns_in_keys/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01548_uncomparable_columns_in_keys/query.sql b/parser/testdata/01548_uncomparable_columns_in_keys/query.sql new file mode 100644 index 000000000..6b8d1c010 --- /dev/null +++ b/parser/testdata/01548_uncomparable_columns_in_keys/query.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS uncomparable_keys; + +CREATE TABLE foo (id UInt64, key AggregateFunction(max, UInt64)) ENGINE MergeTree ORDER BY key; --{serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} + +CREATE TABLE foo (id UInt64, key AggregateFunction(max, UInt64)) ENGINE MergeTree PARTITION BY key; --{serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} + +CREATE TABLE foo (id UInt64, key AggregateFunction(max, UInt64)) ENGINE MergeTree ORDER BY (key) SAMPLE BY key; --{serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} + +DROP TABLE IF EXISTS uncomparable_keys; diff --git a/parser/testdata/01548_with_totals_having/ast.json b/parser/testdata/01548_with_totals_having/ast.json new file mode 100644 index 000000000..f805a20c8 --- /dev/null +++ b/parser/testdata/01548_with_totals_having/ast.json @@ -0,0 +1,88 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_4" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function lessOrEquals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function sum (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function arrayJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 22, + + "statistics": + { + "elapsed": 0.001423765, + "rows_read": 22, + "bytes_read": 840 + } +} diff --git a/parser/testdata/01548_with_totals_having/metadata.json b/parser/testdata/01548_with_totals_having/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01548_with_totals_having/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01548_with_totals_having/query.sql b/parser/testdata/01548_with_totals_having/query.sql new file mode 100644 index 000000000..a4ee7468e --- /dev/null +++ b/parser/testdata/01548_with_totals_having/query.sql @@ -0,0 +1,2 @@ +SELECT * FROM numbers(4) GROUP BY number WITH TOTALS HAVING sum(number) <= arrayJoin([]); -- { serverError ILLEGAL_COLUMN, 59 } +SELECT * FROM numbers(4) GROUP BY number WITH TOTALS HAVING sum(number) <= arrayJoin([3, 2, 1, 0]) ORDER BY number; -- { serverError ILLEGAL_COLUMN } diff --git a/parser/testdata/01549_low_cardinality_materialized_view/ast.json b/parser/testdata/01549_low_cardinality_materialized_view/ast.json new file mode 100644 index 000000000..9b4c282c4 --- /dev/null +++ b/parser/testdata/01549_low_cardinality_materialized_view/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery HASH_MV (children 1)" + }, + { + "explain": " Identifier HASH_MV" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001202363, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/01549_low_cardinality_materialized_view/metadata.json b/parser/testdata/01549_low_cardinality_materialized_view/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01549_low_cardinality_materialized_view/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01549_low_cardinality_materialized_view/query.sql b/parser/testdata/01549_low_cardinality_materialized_view/query.sql new file mode 100644 index 000000000..a522748b0 --- /dev/null +++ b/parser/testdata/01549_low_cardinality_materialized_view/query.sql @@ -0,0 +1,12 @@ +DROP TABLE IF EXISTS HASH_MV; +DROP TABLE IF EXISTS HASH_TEST_INSERT; + +CREATE TABLE HASH_TEST_INSERT (STR_VAL String) ENGINE = Null; +CREATE MATERIALIZED VIEW HASH_MV (HASH_VAL UInt64, STR_VAL LowCardinality(String)) ENGINE = ReplacingMergeTree ORDER BY HASH_VAL AS SELECT xxHash64(STR_VAL) AS HASH_VAL, toLowCardinality(STR_VAL) AS STR_VAL FROM HASH_TEST_INSERT; +INSERT INTO HASH_TEST_INSERT VALUES ('test'); + +SELECT * FROM HASH_MV; +DESC (SELECT * FROM HASH_MV); + +DROP TABLE HASH_MV; +DROP TABLE HASH_TEST_INSERT; diff --git a/parser/testdata/01549_low_cardinality_mv_fuzz/ast.json b/parser/testdata/01549_low_cardinality_mv_fuzz/ast.json new file mode 100644 index 000000000..b623129de --- /dev/null +++ b/parser/testdata/01549_low_cardinality_mv_fuzz/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001020578, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01549_low_cardinality_mv_fuzz/metadata.json b/parser/testdata/01549_low_cardinality_mv_fuzz/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01549_low_cardinality_mv_fuzz/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01549_low_cardinality_mv_fuzz/query.sql b/parser/testdata/01549_low_cardinality_mv_fuzz/query.sql new file mode 100644 index 000000000..32a6fccc8 --- /dev/null +++ b/parser/testdata/01549_low_cardinality_mv_fuzz/query.sql @@ -0,0 +1,7 @@ +set extremes = '1'; +DROP TABLE IF EXISTS HASH_TEST_INSERT; +DROP TABLE IF EXISTS HASH_MV; +CREATE TABLE HASH_TEST_INSERT (`STR_VAL` String) ENGINE = Null; +CREATE MATERIALIZED VIEW HASH_MV (`HASH_VAL` UInt64, `STR_VAL` LowCardinality(String)) ENGINE = ReplacingMergeTree ORDER BY HASH_VAL AS SELECT xxHash64(STR_VAL) AS HASH_VAL, toLowCardinality(STR_VAL) AS STR_VAL FROM HASH_TEST_INSERT; +INSERT INTO HASH_TEST_INSERT VALUES ('a'); + diff --git a/parser/testdata/01550_create_map_type/ast.json b/parser/testdata/01550_create_map_type/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01550_create_map_type/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01550_create_map_type/metadata.json b/parser/testdata/01550_create_map_type/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01550_create_map_type/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01550_create_map_type/query.sql b/parser/testdata/01550_create_map_type/query.sql new file mode 100644 index 000000000..8dfc1bfbd --- /dev/null +++ b/parser/testdata/01550_create_map_type/query.sql @@ -0,0 +1,78 @@ +-- String type +drop table if exists table_map; +create table table_map (a Map(String, String)) engine = Memory; +insert into table_map values ({'name':'zhangsan', 'gender':'male'}), ({'name':'lisi', 'gender':'female'}); +select a['name'] from table_map; +drop table if exists table_map; + + +drop table if exists table_map; +create table table_map (a Map(String, UInt64)) engine = MergeTree() order by a SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +insert into table_map select map('key1', number, 'key2', number * 2) from numbers(1111, 3); +select a['key1'], a['key2'] from table_map; +drop table if exists table_map; + +-- MergeTree Engine +drop table if exists table_map; +create table table_map (a Map(String, String), b String) engine = MergeTree() order by a SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +insert into table_map values ({'name':'zhangsan', 'gender':'male'}, 'name'), ({'name':'lisi', 'gender':'female'}, 'gender'); +select a[b] from table_map; +select b from table_map where a = map('name','lisi', 'gender', 'female'); +drop table if exists table_map; + +-- Big Integer type + +create table table_map (d DATE, m Map(Int8, UInt256)) ENGINE = MergeTree() order by d SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +insert into table_map values ('2020-01-01', map(1, 0, 2, 1)); +select * from table_map; +drop table table_map; + +-- Integer type + +create table table_map (d DATE, m Map(Int8, Int8)) ENGINE = MergeTree() order by d SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +insert into table_map values ('2020-01-01', map(1, 0, 2, -1)); +select * from table_map; +drop table table_map; + +-- Unsigned Int type +drop table if exists table_map; +create table table_map(a Map(UInt8, UInt64), b UInt8) Engine = MergeTree() order by b SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +insert into table_map select map(number, number+5), number from numbers(1111,4); +select a[b] from table_map; +drop table if exists table_map; + + +-- Array Type +drop table if exists table_map; +create table table_map(a Map(String, Array(UInt8))) Engine = MergeTree() order by a SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +insert into table_map values(map('k1', [1,2,3], 'k2', [4,5,6])), (map('k0', [], 'k1', [100,20,90])); +insert into table_map select map('k1', [number, number + 2, number * 2]) from numbers(6); +insert into table_map select map('k2', [number, number + 2, number * 2]) from numbers(6); +select a['k1'] as col1 from table_map order by col1; +drop table if exists table_map; + +SELECT CAST(([1, 2, 3], ['1', '2', 'foo']), 'Map(UInt8, String)') AS map, map[1]; + +CREATE TABLE table_map (n UInt32, m Map(String, Int)) +ENGINE = MergeTree ORDER BY n SETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192, index_granularity_bytes = '10Mi'; + +-- coversion from Tuple(Array(K), Array(V)) +INSERT INTO table_map SELECT number, (arrayMap(x -> toString(x), range(number % 10 + 2)), range(number % 10 + 2)) FROM numbers(100000); +-- coversion from Array(Tuple(K, V)) +INSERT INTO table_map SELECT number, arrayMap(x -> (toString(x), x), range(number % 10 + 2)) FROM numbers(100000); +SELECT sum(m['1']), sum(m['7']), sum(m['100']) FROM table_map; + +DROP TABLE IF EXISTS table_map; + +CREATE TABLE table_map (n UInt32, m Map(String, Int)) +ENGINE = MergeTree ORDER BY n SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +-- coversion from Tuple(Array(K), Array(V)) +INSERT INTO table_map SELECT number, (arrayMap(x -> toString(x), range(number % 10 + 2)), range(number % 10 + 2)) FROM numbers(100000); +-- coversion from Array(Tuple(K, V)) +INSERT INTO table_map SELECT number, arrayMap(x -> (toString(x), x), range(number % 10 + 2)) FROM numbers(100000); +SELECT sum(m['1']), sum(m['7']), sum(m['100']) FROM table_map; + +DROP TABLE IF EXISTS table_map; + +SELECT CAST(([2, 1, 1023], ['', '']), 'Map(UInt8, String)') AS map, map[10] -- { serverError TYPE_MISMATCH} diff --git a/parser/testdata/01550_mutation_subquery/ast.json b/parser/testdata/01550_mutation_subquery/ast.json new file mode 100644 index 000000000..dc4e76503 --- /dev/null +++ b/parser/testdata/01550_mutation_subquery/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001458785, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/01550_mutation_subquery/metadata.json b/parser/testdata/01550_mutation_subquery/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01550_mutation_subquery/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01550_mutation_subquery/query.sql b/parser/testdata/01550_mutation_subquery/query.sql new file mode 100644 index 000000000..811c5eb4d --- /dev/null +++ b/parser/testdata/01550_mutation_subquery/query.sql @@ -0,0 +1,11 @@ +DROP TABLE IF EXISTS t; + +CREATE TABLE t(`id` String, `dealer_id` String) ENGINE = MergeTree() ORDER BY id SETTINGS index_granularity = 8192; +insert into t(id, dealer_id) values('1','2'); +SELECT * FROM t; +SET mutations_sync = 1; +ALTER TABLE t DELETE WHERE id in (select id from t as tmp); +SELECT '---'; +SELECT * FROM t; + +DROP TABLE t; diff --git a/parser/testdata/01550_type_map_formats/ast.json b/parser/testdata/01550_type_map_formats/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01550_type_map_formats/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01550_type_map_formats/metadata.json b/parser/testdata/01550_type_map_formats/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01550_type_map_formats/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01550_type_map_formats/query.sql b/parser/testdata/01550_type_map_formats/query.sql new file mode 100644 index 000000000..eafa3ac79 --- /dev/null +++ b/parser/testdata/01550_type_map_formats/query.sql @@ -0,0 +1,22 @@ +-- Tags: log-engine +SET output_format_write_statistics = 0; +SET output_format_json_pretty_print = 0; + +DROP TABLE IF EXISTS map_formats; +CREATE TABLE map_formats (m Map(String, UInt32), m1 Map(String, Date), m2 Map(String, Array(UInt32))) ENGINE = Log; + +INSERT INTO map_formats VALUES(map('k1', 1, 'k2', 2, 'k3', 3), map('k1', toDate('2020-05-05')), map('k1', [], 'k2', [7, 8])); +INSERT INTO map_formats VALUES(map('k1', 10, 'k3', 30), map('k2', toDate('2020-06-06')), map()); + +SELECT 'JSON'; +SELECT * FROM map_formats ORDER BY m['k1'] FORMAT JSON; +SELECT 'JSONEachRow'; +SELECT * FROM map_formats ORDER BY m['k1'] FORMAT JSONEachRow; +SELECT 'CSV'; +SELECT * FROM map_formats ORDER BY m['k1'] FORMAT CSV; +SELECT 'TSV'; +SELECT * FROM map_formats ORDER BY m['k1'] FORMAT TSV; +SELECT 'TSKV'; +SELECT * FROM map_formats ORDER BY m['k1'] FORMAT TSKV; + +DROP TABLE map_formats; diff --git a/parser/testdata/01551_context_uaf/ast.json b/parser/testdata/01551_context_uaf/ast.json new file mode 100644 index 000000000..e66cb2bf6 --- /dev/null +++ b/parser/testdata/01551_context_uaf/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery f (children 1)" + }, + { + "explain": " Identifier f" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001460242, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/01551_context_uaf/metadata.json b/parser/testdata/01551_context_uaf/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01551_context_uaf/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01551_context_uaf/query.sql b/parser/testdata/01551_context_uaf/query.sql new file mode 100644 index 000000000..03a6c1c49 --- /dev/null +++ b/parser/testdata/01551_context_uaf/query.sql @@ -0,0 +1,10 @@ +DROP TABLE IF EXISTS f; +DROP TABLE IF EXISTS v; + +create table f(s String) engine File(TSV, '/dev/null'); +create view v as (select * from f); +select * from v; -- was failing long time ago +select * from merge('', 'f'); -- was failing long time ago + +DROP TABLE f; +DROP TABLE v; diff --git a/parser/testdata/01551_mergetree_read_in_order_spread/ast.json b/parser/testdata/01551_mergetree_read_in_order_spread/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01551_mergetree_read_in_order_spread/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01551_mergetree_read_in_order_spread/metadata.json b/parser/testdata/01551_mergetree_read_in_order_spread/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01551_mergetree_read_in_order_spread/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01551_mergetree_read_in_order_spread/query.sql b/parser/testdata/01551_mergetree_read_in_order_spread/query.sql new file mode 100644 index 000000000..b5ece0819 --- /dev/null +++ b/parser/testdata/01551_mergetree_read_in_order_spread/query.sql @@ -0,0 +1,21 @@ +-- Tags: no-object-storage, no-random-merge-tree-settings + +DROP TABLE IF EXISTS data_01551; + +CREATE TABLE data_01551 +( + key UInt32 +) engine=AggregatingMergeTree() +PARTITION BY key%2 +ORDER BY (key, key/2) +SETTINGS index_granularity=10, index_granularity_bytes='10Mi'; + +SET optimize_trivial_insert_select = 1; +INSERT INTO data_01551 SELECT number FROM numbers(100000); +SET max_threads=3; +SET merge_tree_min_rows_for_concurrent_read=10000; +SET optimize_aggregation_in_order=1; +SET read_in_order_two_level_merge_threshold=1; +EXPLAIN PIPELINE SELECT key FROM data_01551 GROUP BY key, key/2; + +DROP TABLE data_01551; diff --git a/parser/testdata/01552_alter_name_collision/ast.json b/parser/testdata/01552_alter_name_collision/ast.json new file mode 100644 index 000000000..ba53dc209 --- /dev/null +++ b/parser/testdata/01552_alter_name_collision/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00127481, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/01552_alter_name_collision/metadata.json b/parser/testdata/01552_alter_name_collision/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01552_alter_name_collision/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01552_alter_name_collision/query.sql b/parser/testdata/01552_alter_name_collision/query.sql new file mode 100644 index 000000000..dc717f107 --- /dev/null +++ b/parser/testdata/01552_alter_name_collision/query.sql @@ -0,0 +1,3 @@ +DROP TABLE IF EXISTS test; +CREATE TABLE test(test String DEFAULT 'test', test_tmp Int DEFAULT 1)ENGINE = Memory; +DROP TABLE test; diff --git a/parser/testdata/01552_dict_fixedstring/ast.json b/parser/testdata/01552_dict_fixedstring/ast.json new file mode 100644 index 000000000..d4c4bfa13 --- /dev/null +++ b/parser/testdata/01552_dict_fixedstring/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery src (children 1)" + }, + { + "explain": " Identifier src" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00120363, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/01552_dict_fixedstring/metadata.json b/parser/testdata/01552_dict_fixedstring/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01552_dict_fixedstring/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01552_dict_fixedstring/query.sql b/parser/testdata/01552_dict_fixedstring/query.sql new file mode 100644 index 000000000..0b19c9980 --- /dev/null +++ b/parser/testdata/01552_dict_fixedstring/query.sql @@ -0,0 +1,20 @@ +DROP TABLE IF EXISTS src; + +CREATE TABLE src (k UInt64, s FixedString(11)) ENGINE = Memory; +INSERT INTO src VALUES (1, 'Hello\0World'); + +DROP DICTIONARY IF EXISTS dict; +CREATE DICTIONARY dict +( + k UInt64, + s String +) +PRIMARY KEY k +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER default TABLE 'src')) +LAYOUT(FLAT) +LIFETIME(MIN 10 MAX 10); + +SELECT dictGet(currentDatabase() || '.dict', 's', number) FROM numbers(2); + +DROP DICTIONARY dict; +DROP TABLE src; diff --git a/parser/testdata/01552_impl_aggfunc_cloneresize/ast.json b/parser/testdata/01552_impl_aggfunc_cloneresize/ast.json new file mode 100644 index 000000000..670a3878d --- /dev/null +++ b/parser/testdata/01552_impl_aggfunc_cloneresize/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_bm (children 1)" + }, + { + "explain": " Identifier test_bm" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001100115, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/01552_impl_aggfunc_cloneresize/metadata.json b/parser/testdata/01552_impl_aggfunc_cloneresize/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01552_impl_aggfunc_cloneresize/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01552_impl_aggfunc_cloneresize/query.sql b/parser/testdata/01552_impl_aggfunc_cloneresize/query.sql new file mode 100644 index 000000000..849a4378e --- /dev/null +++ b/parser/testdata/01552_impl_aggfunc_cloneresize/query.sql @@ -0,0 +1,57 @@ +drop table if EXISTS test_bm; + +drop table if EXISTS test_bm_join; + +create table test_bm( + dim UInt64, + id UInt64 ) +ENGINE = MergeTree() +ORDER BY( dim, id ) +SETTINGS index_granularity = 8192; + +create table test_bm_join( + dim UInt64, + id UInt64 ) +ENGINE = MergeTree() +ORDER BY(dim,id) +SETTINGS index_granularity = 8192; + +insert into test_bm VALUES (1,1),(2,2),(3,3),(4,4); + +select + dim , + sum(idnum) +from + test_bm_join +right join( + select + dim, + bitmapOrCardinality(ids,ids2) as idnum + from + ( + select + dim, + groupBitmapState(toUInt64(id)) as ids + FROM + test_bm + where + dim >2 + group by + dim ) A all + right join ( + select + dim, + groupBitmapState(toUInt64(id)) as ids2 + FROM + test_bm + where + dim < 2 + group by + dim ) B + using(dim) ) C +using(dim) +group by dim; + +drop table test_bm; + +drop table test_bm_join; diff --git a/parser/testdata/01553_datetime64_comparison/ast.json b/parser/testdata/01553_datetime64_comparison/ast.json new file mode 100644 index 000000000..f4903cbd5 --- /dev/null +++ b/parser/testdata/01553_datetime64_comparison/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery datetime64_cmp (children 1)" + }, + { + "explain": " Identifier datetime64_cmp" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001507591, + "rows_read": 2, + "bytes_read": 81 + } +} diff --git a/parser/testdata/01553_datetime64_comparison/metadata.json b/parser/testdata/01553_datetime64_comparison/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01553_datetime64_comparison/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01553_datetime64_comparison/query.sql b/parser/testdata/01553_datetime64_comparison/query.sql new file mode 100644 index 000000000..bc7f9a570 --- /dev/null +++ b/parser/testdata/01553_datetime64_comparison/query.sql @@ -0,0 +1,21 @@ +CREATE TABLE datetime64_cmp +( + dt6 DateTime64(6, 'UTC'), + dt3 DateTime64(3, 'UTC') +) ENGINE = Memory; + +INSERT INTO datetime64_cmp +VALUES ('2019-09-16 19:20:33.123000', '2019-09-16 19:20:33.123'), ('2019-09-16 19:20:33.123456', '2015-05-18 07:40:29.123'), ('2015-05-18 07:40:29.123456', '2019-09-16 19:20:33.123'); + +-- Compare equal and unequal values of different precicion/scale +SELECT + dt6, dt3, + dt6 > dt3, + dt6 >= dt3, + dt6 = dt3, + dt6 <= dt3, + dt6 < dt3, + dt6 != dt3 +FROM datetime64_cmp +ORDER BY + dt6, dt3; diff --git a/parser/testdata/01553_settings_early_apply/ast.json b/parser/testdata/01553_settings_early_apply/ast.json new file mode 100644 index 000000000..603f48ef1 --- /dev/null +++ b/parser/testdata/01553_settings_early_apply/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001180085, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01553_settings_early_apply/metadata.json b/parser/testdata/01553_settings_early_apply/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01553_settings_early_apply/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01553_settings_early_apply/query.sql b/parser/testdata/01553_settings_early_apply/query.sql new file mode 100644 index 000000000..821e09f66 --- /dev/null +++ b/parser/testdata/01553_settings_early_apply/query.sql @@ -0,0 +1,15 @@ +set output_format_pretty_display_footer_column_names=0; +set output_format_write_statistics=0; + +select * from numbers(100) settings max_result_rows = 1; -- { serverError TOO_MANY_ROWS_OR_BYTES } +select * from numbers(100) FORMAT JSON settings max_result_rows = 1; -- { serverError TOO_MANY_ROWS_OR_BYTES } +select * from numbers(100) FORMAT TSVWithNamesAndTypes settings max_result_rows = 1; -- { serverError TOO_MANY_ROWS_OR_BYTES } +select * from numbers(100) FORMAT CSVWithNamesAndTypes settings max_result_rows = 1; -- { serverError TOO_MANY_ROWS_OR_BYTES } +select * from numbers(100) FORMAT JSONCompactEachRowWithNamesAndTypes settings max_result_rows = 1; -- { serverError TOO_MANY_ROWS_OR_BYTES } +select * from numbers(100) FORMAT XML settings max_result_rows = 1; -- { serverError TOO_MANY_ROWS_OR_BYTES } + +SET max_result_rows = 1; +select * from numbers(10); -- { serverError TOO_MANY_ROWS_OR_BYTES } +select * from numbers(10) SETTINGS result_overflow_mode = 'break', max_block_size = 1 FORMAT PrettySpaceNoEscapes; +select * from numbers(10) settings max_result_rows = 10; +select * from numbers(10) FORMAT JSONCompact settings max_result_rows = 10, output_format_write_statistics = 0; diff --git a/parser/testdata/01554_bloom_filter_index_big_integer_uuid/ast.json b/parser/testdata/01554_bloom_filter_index_big_integer_uuid/ast.json new file mode 100644 index 000000000..1c9d9388c --- /dev/null +++ b/parser/testdata/01554_bloom_filter_index_big_integer_uuid/ast.json @@ -0,0 +1,73 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery 01154_test (children 3)" + }, + { + "explain": " Identifier 01154_test" + }, + { + "explain": " Columns definition (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration x (children 1)" + }, + { + "explain": " DataType Int128" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Index (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function bloom_filter (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Float64_0.01" + }, + { + "explain": " Storage definition (children 3)" + }, + { + "explain": " Function MergeTree (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Set" + } + ], + + "rows": 17, + + "statistics": + { + "elapsed": 0.001319607, + "rows_read": 17, + "bytes_read": 571 + } +} diff --git a/parser/testdata/01554_bloom_filter_index_big_integer_uuid/metadata.json b/parser/testdata/01554_bloom_filter_index_big_integer_uuid/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01554_bloom_filter_index_big_integer_uuid/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01554_bloom_filter_index_big_integer_uuid/query.sql b/parser/testdata/01554_bloom_filter_index_big_integer_uuid/query.sql new file mode 100644 index 000000000..f82fe39f4 --- /dev/null +++ b/parser/testdata/01554_bloom_filter_index_big_integer_uuid/query.sql @@ -0,0 +1,23 @@ +CREATE TABLE 01154_test (x Int128, INDEX ix_x x TYPE bloom_filter(0.01) GRANULARITY 1) ENGINE = MergeTree() ORDER BY x SETTINGS index_granularity=8192; +INSERT INTO 01154_test VALUES (1), (2), (3); +SELECT x FROM 01154_test WHERE x = 1; +SELECT x FROM 01154_test WHERE x IN (1, 2); +DROP TABLE 01154_test; + +CREATE TABLE 01154_test (x Int256, INDEX ix_x x TYPE bloom_filter(0.01) GRANULARITY 1) ENGINE = MergeTree() ORDER BY x SETTINGS index_granularity=8192; +INSERT INTO 01154_test VALUES (1), (2), (3); +SELECT x FROM 01154_test WHERE x = 1; +SELECT x FROM 01154_test WHERE x IN (1, 2); +DROP TABLE 01154_test; + +CREATE TABLE 01154_test (x UInt256, INDEX ix_x x TYPE bloom_filter(0.01) GRANULARITY 1) ENGINE = MergeTree() ORDER BY x SETTINGS index_granularity=8192; +INSERT INTO 01154_test VALUES (1), (2), (3); +SELECT x FROM 01154_test WHERE x = 1; +SELECT x FROM 01154_test WHERE x IN (1, 2); +DROP TABLE 01154_test; + +CREATE TABLE 01154_test (x UUID, INDEX ix_x x TYPE bloom_filter(0.01) GRANULARITY 1) ENGINE = MergeTree() ORDER BY x SETTINGS index_granularity=8192; +INSERT INTO 01154_test VALUES (toUUID('00000000-0000-0000-0000-000000000001')), (toUUID('00000000-0000-0000-0000-000000000002')), (toUUID('00000000-0000-0000-0000-000000000003')); +SELECT x FROM 01154_test WHERE x = toUUID('00000000-0000-0000-0000-000000000001'); +SELECT x FROM 01154_test WHERE x IN (toUUID('00000000-0000-0000-0000-000000000001'), toUUID('00000000-0000-0000-0000-000000000002')); +DROP TABLE 01154_test; diff --git a/parser/testdata/01554_interpreter_integer_float/ast.json b/parser/testdata/01554_interpreter_integer_float/ast.json new file mode 100644 index 000000000..2afa5bed4 --- /dev/null +++ b/parser/testdata/01554_interpreter_integer_float/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function reinterpretAsFloat32 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_123456" + }, + { + "explain": " Literal 'UInt32'" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001737097, + "rows_read": 10, + "bytes_read": 394 + } +} diff --git a/parser/testdata/01554_interpreter_integer_float/metadata.json b/parser/testdata/01554_interpreter_integer_float/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01554_interpreter_integer_float/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01554_interpreter_integer_float/query.sql b/parser/testdata/01554_interpreter_integer_float/query.sql new file mode 100644 index 000000000..69b00fc68 --- /dev/null +++ b/parser/testdata/01554_interpreter_integer_float/query.sql @@ -0,0 +1,8 @@ +SELECT reinterpretAsFloat32(CAST(123456 AS UInt32)); +SELECT reinterpretAsUInt32(CAST(1.23456 AS Float32)); +SELECT reinterpretAsFloat32(CAST(123456 AS Int32)); +SELECT reinterpretAsInt32(CAST(1.23456 AS Float32)); +SELECT reinterpretAsFloat64(CAST(123456 AS UInt64)); +SELECT reinterpretAsUInt64(CAST(1.23456 AS Float64)); +SELECT reinterpretAsFloat64(CAST(123456 AS Int64)); +SELECT reinterpretAsInt64(CAST(1.23456 AS Float64)); diff --git a/parser/testdata/01555_or_fill/ast.json b/parser/testdata/01555_or_fill/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01555_or_fill/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01555_or_fill/metadata.json b/parser/testdata/01555_or_fill/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01555_or_fill/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01555_or_fill/query.sql b/parser/testdata/01555_or_fill/query.sql new file mode 100644 index 000000000..a2da07d0a --- /dev/null +++ b/parser/testdata/01555_or_fill/query.sql @@ -0,0 +1,22 @@ +SELECT + count(), + countOrNull(), + sum(x), + sumOrNull(x) +FROM +( + SELECT number AS x + FROM numbers(10) + WHERE number > 10 +); + +SELECT + count(), + countOrNull(), + sum(x), + sumOrNull(x) +FROM +( + SELECT 1 AS x + WHERE 0 +); diff --git a/parser/testdata/01555_system_distribution_queue_mask/ast.json b/parser/testdata/01555_system_distribution_queue_mask/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01555_system_distribution_queue_mask/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01555_system_distribution_queue_mask/metadata.json b/parser/testdata/01555_system_distribution_queue_mask/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01555_system_distribution_queue_mask/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01555_system_distribution_queue_mask/query.sql b/parser/testdata/01555_system_distribution_queue_mask/query.sql new file mode 100644 index 000000000..3c14eccb9 --- /dev/null +++ b/parser/testdata/01555_system_distribution_queue_mask/query.sql @@ -0,0 +1,50 @@ + +-- force data path with the user/pass in it +set use_compact_format_in_distributed_parts_names=0; +-- use async send even for localhost +set prefer_localhost_replica=0; + +drop table if exists dist_01555; +drop table if exists data_01555; +create table data_01555 (key Int) Engine=Null(); + +-- +-- masked flush only +-- +SELECT 'masked flush only'; +create table dist_01555 (key Int) Engine=Distributed(test_cluster_with_incorrect_pw, currentDatabase(), data_01555, key); +system stop distributed sends dist_01555; + +insert into dist_01555 values (1)(2); +-- since test_cluster_with_incorrect_pw contains incorrect password ignore error +system flush distributed dist_01555; -- { serverError AUTHENTICATION_FAILED } +select length(splitByChar('*', data_path)), replaceRegexpOne(data_path, '^.*/([^/]*)/' , '\\1'), extract(last_exception, 'AUTHENTICATION_FAILED'), dateDiff('s', last_exception_time, now()) < 3600 from system.distribution_queue where database = currentDatabase() and table = 'dist_01555' format CSV; + +drop table dist_01555; + +-- +-- masked +-- +SELECT 'masked'; +create table dist_01555 (key Int) Engine=Distributed(test_cluster_with_incorrect_pw, currentDatabase(), data_01555, key); + +insert into dist_01555 values (1)(2); +-- since test_cluster_with_incorrect_pw contains incorrect password ignore error +system flush distributed dist_01555; -- { serverError AUTHENTICATION_FAILED } +select length(splitByChar('*', data_path)), replaceRegexpOne(data_path, '^.*/([^/]*)/' , '\\1'), extract(last_exception, 'AUTHENTICATION_FAILED'), dateDiff('s', last_exception_time, now()) < 3600 from system.distribution_queue where database = currentDatabase() and table = 'dist_01555' format CSV; + +drop table dist_01555; + +-- +-- no masking +-- +SELECT 'no masking'; +create table dist_01555 (key Int) Engine=Distributed(test_shard_localhost, currentDatabase(), data_01555, key); + +insert into dist_01555 values (1)(2); +system flush distributed dist_01555; +select length(splitByChar('*', data_path)), replaceRegexpOne(data_path, '^.*/([^/]*)/' , '\\1') from system.distribution_queue where database = currentDatabase() and table = 'dist_01555' format CSV; + +-- cleanup +drop table dist_01555; +drop table data_01555; diff --git a/parser/testdata/01556_accurate_cast_or_null/ast.json b/parser/testdata/01556_accurate_cast_or_null/ast.json new file mode 100644 index 000000000..858680cac --- /dev/null +++ b/parser/testdata/01556_accurate_cast_or_null/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function accurateCastOrNull (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Int64_-1" + }, + { + "explain": " Literal 'UInt8'" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001320581, + "rows_read": 8, + "bytes_read": 300 + } +} diff --git a/parser/testdata/01556_accurate_cast_or_null/metadata.json b/parser/testdata/01556_accurate_cast_or_null/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01556_accurate_cast_or_null/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01556_accurate_cast_or_null/query.sql b/parser/testdata/01556_accurate_cast_or_null/query.sql new file mode 100644 index 000000000..15ac71dea --- /dev/null +++ b/parser/testdata/01556_accurate_cast_or_null/query.sql @@ -0,0 +1,75 @@ +SELECT accurateCastOrNull(-1, 'UInt8'); +SELECT accurateCastOrNull(5, 'UInt8'); +SELECT accurateCastOrNull(257, 'UInt8'); +SELECT accurateCastOrNull(-1, 'UInt16'); +SELECT accurateCastOrNull(5, 'UInt16'); +SELECT accurateCastOrNull(65536, 'UInt16'); +SELECT accurateCastOrNull(-1, 'UInt32'); +SELECT accurateCastOrNull(5, 'UInt32'); +SELECT accurateCastOrNull(4294967296, 'UInt32'); +SELECT accurateCastOrNull(-1, 'UInt64'); +SELECT accurateCastOrNull(5, 'UInt64'); +SELECT accurateCastOrNull(-1, 'UInt256'); +SELECT accurateCastOrNull(5, 'UInt256'); + +SELECT accurateCastOrNull(-129, 'Int8'); +SELECT accurateCastOrNull(5, 'Int8'); +SELECT accurateCastOrNull(128, 'Int8'); + +SELECT accurateCastOrNull(10, 'Decimal32(9)'); +SELECT accurateCastOrNull(1, 'Decimal32(9)'); +SELECT accurateCastOrNull(-10, 'Decimal32(9)'); + +SELECT accurateCastOrNull('123', 'FixedString(2)'); + +SELECT accurateCastOrNull(inf, 'Int64'); +SELECT accurateCastOrNull(inf, 'Int128'); +SELECT accurateCastOrNull(inf, 'Int256'); +SELECT accurateCastOrNull(nan, 'Int64'); +SELECT accurateCastOrNull(nan, 'Int128'); +SELECT accurateCastOrNull(nan, 'Int256'); + +SELECT accurateCastOrNull(inf, 'UInt64'); +SELECT accurateCastOrNull(inf, 'UInt256'); +SELECT accurateCastOrNull(nan, 'UInt64'); +SELECT accurateCastOrNull(nan, 'UInt256'); + +SELECT accurateCastOrNull(number + 127, 'Int8') AS x FROM numbers (2) ORDER BY x; + +SELECT accurateCastOrNull(-1, 'DateTime'); +SELECT accurateCastOrNull(5000000000, 'DateTime'); +SELECT accurateCastOrNull('1xxx', 'DateTime'); +SELECT toString(accurateCastOrNull('2023-05-30 14:38:20', 'DateTime'), timezone()); +SELECT toString(accurateCastOrNull(19, 'DateTime'), 'UTC'); +SELECT toString(accurateCastOrNull(70000, 'DateTime'), 'UTC'); +-- need fixed timezone in these two lines +SELECT toString(accurateCastOrNull('1965-05-30 14:38:20', 'DateTime'), timezone()) SETTINGS session_timezone = 'UTC'; +SELECT toString(accurateCastOrNull('2223-05-30 14:38:20', 'DateTime'), timezone()) SETTINGS session_timezone = 'UTC'; + +SELECT accurateCastOrNull(-1, 'Date'); +SELECT accurateCastOrNull(5000000000, 'Date'); +SELECT accurateCastOrNull('1xxx', 'Date'); +SELECT accurateCastOrNull('2023-05-30', 'Date'); +SELECT accurateCastOrNull('2180-01-01', 'Date'); +SELECT accurateCastOrNull(19, 'Date'); + +select accurateCastOrNull('test', 'Bool'); +select accurateCastOrNull('truex', 'Bool'); +select accurateCastOrNull('xfalse', 'Bool'); +select accurateCastOrNull('true', 'Bool'); +select accurateCastOrNull('false', 'Bool'); +select accurateCastOrNull('1', 'Bool'); +select accurateCastOrNull('0', 'Bool'); +select accurateCastOrNull(1, 'Bool'); +select accurateCastOrNull(0, 'Bool'); + +select accurateCastOrNull('test', 'IPv4'); +select accurateCastOrNull('2001:db8::1', 'IPv4'); +select accurateCastOrNull('::ffff:192.0.2.1', 'IPv4'); +select accurateCastOrNull('192.0.2.1', 'IPv4'); +select accurateCastOrNull('192.0.2.1x', 'IPv4'); + +select accurateCastOrNull('test', 'IPv6'); +select accurateCastOrNull('192.0.2.1', 'IPv6'); +select accurateCastOrNull('2001:db8::1', 'IPv6'); +select accurateCastOrNull('2001:db8::1x', 'IPv6'); diff --git a/parser/testdata/01556_explain_select_with_union_query/ast.json b/parser/testdata/01556_explain_select_with_union_query/ast.json new file mode 100644 index 000000000..e9afdbaa6 --- /dev/null +++ b/parser/testdata/01556_explain_select_with_union_query/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001607453, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01556_explain_select_with_union_query/metadata.json b/parser/testdata/01556_explain_select_with_union_query/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01556_explain_select_with_union_query/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01556_explain_select_with_union_query/query.sql b/parser/testdata/01556_explain_select_with_union_query/query.sql new file mode 100644 index 000000000..d8278e188 --- /dev/null +++ b/parser/testdata/01556_explain_select_with_union_query/query.sql @@ -0,0 +1,34 @@ +SET enable_analyzer = 1; +SET union_default_mode = 'DISTINCT'; + +set enable_global_with_statement = 1; + +EXPLAIN SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1; +EXPLAIN (SELECT 1 UNION ALL SELECT 1) UNION ALL SELECT 1; +EXPLAIN SELECT 1 UNION (SELECT 1 UNION ALL SELECT 1); + +EXPLAIN SELECT 1 UNION SELECT 1 UNION DISTINCT SELECT 1; +EXPLAIN (SELECT 1 UNION DISTINCT SELECT 1) UNION DISTINCT SELECT 1; +EXPLAIN SELECT 1 UNION DISTINCT (SELECT 1 UNION SELECT 1); + +EXPLAIN (SELECT 1 UNION ALL (SELECT 1 UNION ALL (SELECT 1 UNION ALL SELECT 1 UNION SELECT 1))) UNION ALL (((SELECT 1) UNION (SELECT 1 UNION ALL (SELECT 1 UNION ALL (SELECT 1 UNION SELECT 1 ) UNION DISTINCT SELECT 1)))); + +EXPLAIN (((((((((((((((SELECT 1 UNION ALL SELECT 1) UNION SELECT 1)))))))))))))); +EXPLAIN (((((((((((((((((((((((((((((SELECT 1 UNION SELECT 1))))))))))))))))))))))))))))); +EXPLAIN (((((((((((((((((((((((((((((SELECT 1 UNION SELECT 1))))))))))))))))))))))))))))); + +SET union_default_mode='ALL'; + +EXPLAIN SELECT 1 UNION ALL SELECT 1 UNION ALL SELECT 1; +EXPLAIN (SELECT 1 UNION ALL SELECT 1) UNION ALL SELECT 1; +EXPLAIN SELECT 1 UNION (SELECT 1 UNION ALL SELECT 1); + +EXPLAIN SELECT 1 UNION SELECT 1 UNION DISTINCT SELECT 1; +EXPLAIN (SELECT 1 UNION DISTINCT SELECT 1) UNION DISTINCT SELECT 1; +EXPLAIN SELECT 1 UNION DISTINCT (SELECT 1 UNION SELECT 1); + +EXPLAIN (SELECT 1 UNION ALL (SELECT 1 UNION ALL (SELECT 1 UNION ALL SELECT 1 UNION SELECT 1))) UNION ALL (((SELECT 1) UNION (SELECT 1 UNION ALL (SELECT 1 UNION ALL (SELECT 1 UNION SELECT 1 ) UNION DISTINCT SELECT 1)))); + +EXPLAIN (((((((((((((((SELECT 1 UNION ALL SELECT 1) UNION SELECT 1)))))))))))))); +EXPLAIN (((((((((((((((((((((((((((((SELECT 1 UNION SELECT 1))))))))))))))))))))))))))))); +EXPLAIN (((((((((((((((((((((((((((((SELECT 1 UNION SELECT 1))))))))))))))))))))))))))))); diff --git a/parser/testdata/01556_if_null/ast.json b/parser/testdata/01556_if_null/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01556_if_null/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01556_if_null/metadata.json b/parser/testdata/01556_if_null/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01556_if_null/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01556_if_null/query.sql b/parser/testdata/01556_if_null/query.sql new file mode 100644 index 000000000..1952b4b76 --- /dev/null +++ b/parser/testdata/01556_if_null/query.sql @@ -0,0 +1,5 @@ +SELECT + sumMapIf([1], [1], nullIf(number, 3) > 0) AS col1, + countIf(1, nullIf(number, 3) > 0) AS col2, + sumIf(1, nullIf(number, 3) > 0) AS col3 +FROM numbers(1, 5); diff --git a/parser/testdata/01557_max_parallel_replicas_no_sample/ast.json b/parser/testdata/01557_max_parallel_replicas_no_sample/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01557_max_parallel_replicas_no_sample/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01557_max_parallel_replicas_no_sample/metadata.json b/parser/testdata/01557_max_parallel_replicas_no_sample/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01557_max_parallel_replicas_no_sample/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01557_max_parallel_replicas_no_sample/query.sql b/parser/testdata/01557_max_parallel_replicas_no_sample/query.sql new file mode 100644 index 000000000..3704119c9 --- /dev/null +++ b/parser/testdata/01557_max_parallel_replicas_no_sample/query.sql @@ -0,0 +1,29 @@ +-- Tags: replica + +SET enable_parallel_replicas=1; +SET max_parallel_replicas=3; +SET parallel_replicas_mode='sampling_key'; +SET parallel_replicas_for_non_replicated_merge_tree = 1; + +DROP TABLE IF EXISTS t; +CREATE TABLE t (x String) ENGINE = MergeTree ORDER BY x; +INSERT INTO t VALUES ('Hello'); + +SET max_parallel_replicas = 3; +SELECT * FROM remote('127.0.0.{2|3|4}', currentDatabase(), t); + +DROP TABLE t; + +CREATE TABLE t (x String) ENGINE = MergeTree ORDER BY cityHash64(x) SAMPLE BY cityHash64(x); +INSERT INTO t SELECT toString(number) FROM numbers(1000); + +SET max_parallel_replicas = 1; +SELECT count() FROM remote('127.0.0.{2|3|4}', currentDatabase(), t); + +SET max_parallel_replicas = 2; +SELECT count() FROM remote('127.0.0.{2|3|4}', currentDatabase(), t); + +SET max_parallel_replicas = 3; +SELECT count() FROM remote('127.0.0.{2|3|4}', currentDatabase(), t); + +DROP TABLE t; diff --git a/parser/testdata/01558_enum_as_num_in_tsv_csv_input/ast.json b/parser/testdata/01558_enum_as_num_in_tsv_csv_input/ast.json new file mode 100644 index 000000000..81191d369 --- /dev/null +++ b/parser/testdata/01558_enum_as_num_in_tsv_csv_input/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery enum_as_num (children 1)" + }, + { + "explain": " Identifier enum_as_num" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001385695, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/01558_enum_as_num_in_tsv_csv_input/metadata.json b/parser/testdata/01558_enum_as_num_in_tsv_csv_input/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01558_enum_as_num_in_tsv_csv_input/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01558_enum_as_num_in_tsv_csv_input/query.sql b/parser/testdata/01558_enum_as_num_in_tsv_csv_input/query.sql new file mode 100644 index 000000000..4dccfda4b --- /dev/null +++ b/parser/testdata/01558_enum_as_num_in_tsv_csv_input/query.sql @@ -0,0 +1,31 @@ +DROP TABLE IF EXISTS enum_as_num; + +CREATE TABLE enum_as_num ( + Id Int32, + Value Enum('a' = 1, '3' = 2, 'b' = 3) +) ENGINE=Memory(); + +INSERT INTO enum_as_num FORMAT TSV 1 1 + +INSERT INTO enum_as_num FORMAT TSV 2 2 + +INSERT INTO enum_as_num FORMAT TSV 3 3 + +INSERT INTO enum_as_num FORMAT TSV 4 a + +INSERT INTO enum_as_num FORMAT TSV 5 b + +INSERT INTO enum_as_num FORMAT CSV 6,1 + +INSERT INTO enum_as_num FORMAT CSV 7,2 + +INSERT INTO enum_as_num FORMAT CSV 8,3 + +INSERT INTO enum_as_num FORMAT CSV 9,a + +INSERT INTO enum_as_num FORMAT CSV 10,b + +SELECT * FROM enum_as_num ORDER BY Id; + + +DROP TABLE IF EXISTS enum_as_num; diff --git a/parser/testdata/01558_transform_null_in/ast.json b/parser/testdata/01558_transform_null_in/ast.json new file mode 100644 index 000000000..8195fe793 --- /dev/null +++ b/parser/testdata/01558_transform_null_in/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001785468, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01558_transform_null_in/metadata.json b/parser/testdata/01558_transform_null_in/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01558_transform_null_in/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01558_transform_null_in/query.sql b/parser/testdata/01558_transform_null_in/query.sql new file mode 100644 index 000000000..0036c08a6 --- /dev/null +++ b/parser/testdata/01558_transform_null_in/query.sql @@ -0,0 +1,31 @@ +SET transform_null_in = 1; + +DROP TABLE IF EXISTS null_in_1; +CREATE TABLE null_in_1 (u UInt32, n Nullable(UInt32)) ENGINE = Memory; +INSERT INTO null_in_1 VALUES (1, NULL), (2, 2), (3, NULL), (4, 4), (5, NULL); + +SELECT count() FROM null_in_1 WHERE n IN (1, 2, NULL); +SELECT count() FROM null_in_1 WHERE u IN (1, 2, NULL); +SELECT count() FROM null_in_1 WHERE (u, n) IN ((1, 2), (1, NULL), (2, 2)); +SELECT count() FROM null_in_1 WHERE (u, n) IN ((NULL, NULL), (2, 2), (NULL, 2)); +SELECT count() FROM null_in_1 WHERE (u, n) IN (42, NULL); +SELECT count() FROM null_in_1 WHERE (u, n) NOT IN ((3, NULL), (5, NULL)); + +SELECT '=============='; +DROP TABLE IF EXISTS null_in_1; + +CREATE TABLE null_in_1 (a Nullable(UInt32), b Nullable(UInt32)) ENGINE = Memory; +INSERT INTO null_in_1 VALUES (1, NULL) (0, NULL) (NULL, NULL) (NULL, 1) (NULL, 0) (0, 0) (1, 1); + +SELECT count() FROM null_in_1 WHERE (a, b) IN (1, NULL); +SELECT count() FROM null_in_1 WHERE (a, b) IN (0, NULL); +SELECT count() FROM null_in_1 WHERE (a, b) IN (42, NULL); +SELECT count() FROM null_in_1 WHERE (a, b) IN (NULL, 0); +SELECT count() FROM null_in_1 WHERE (a, b) IN (NULL, 1); +SELECT count() FROM null_in_1 WHERE (a, b) IN (NULL, 42); +SELECT count() FROM null_in_1 WHERE (a, b) IN (NULL, NULL); +SELECT count() FROM null_in_1 WHERE (a, b) IN (0, 0); +SELECT count() FROM null_in_1 WHERE (a, b) IN (1, 1); +SELECT count() FROM null_in_1 WHERE (a, b) IN (1, 42); + +DROP TABLE IF EXISTS null_in_1; diff --git a/parser/testdata/01558_ttest/ast.json b/parser/testdata/01558_ttest/ast.json new file mode 100644 index 000000000..a3060340f --- /dev/null +++ b/parser/testdata/01558_ttest/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery welch_ttest (children 1)" + }, + { + "explain": " Identifier welch_ttest" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001239764, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/01558_ttest/metadata.json b/parser/testdata/01558_ttest/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01558_ttest/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01558_ttest/query.sql b/parser/testdata/01558_ttest/query.sql new file mode 100644 index 000000000..9bcbdd21e --- /dev/null +++ b/parser/testdata/01558_ttest/query.sql @@ -0,0 +1,72 @@ +DROP TABLE IF EXISTS welch_ttest; +CREATE TABLE welch_ttest (left Float64, right UInt8) ENGINE = Memory; +INSERT INTO welch_ttest VALUES (27.5, 0), (21.0, 0), (19.0, 0), (23.6, 0), (17.0, 0), (17.9, 0), (16.9, 0), (20.1, 0), (21.9, 0), (22.6, 0), (23.1, 0), (19.6, 0), (19.0, 0), (21.7, 0), (21.4, 0), (27.1, 1), (22.0, 1), (20.8, 1), (23.4, 1), (23.4, 1), (23.5, 1), (25.8, 1), (22.0, 1), (24.8, 1), (20.2, 1), (21.9, 1), (22.1, 1), (22.9, 1), (20.5, 1), (24.4, 1); + +SELECT '0.021378001462867'; +SELECT roundBankers(welchTTest(left, right).2, 6) from welch_ttest; +DROP TABLE IF EXISTS welch_ttest; + + +CREATE TABLE welch_ttest (left Float64, right Float64) ENGINE = Memory; +INSERT INTO welch_ttest VALUES (30.02, 0), (29.99, 0), (30.11, 0), (29.97, 0), (30.01, 0), (29.99, 0), (29.89, 1), (29.93, 1), (29.72, 1), (29.98, 1), (30.02, 1), (29.98, 1); +SELECT '0.090773324285671'; +SELECT roundBankers(welchTTest(left, right).2, 6) from welch_ttest; +DROP TABLE IF EXISTS welch_ttest; + +CREATE TABLE welch_ttest (left Float64, right Float64) ENGINE = Memory; +INSERT INTO welch_ttest VALUES (0.010268, 0), (0.000167, 0), (0.000167, 0), (0.159258, 1), (0.136278, 1), (0.122389, 1); +SELECT '0.00339907162713746'; +SELECT roundBankers(welchTTest(left, right).2, 6) from welch_ttest; +DROP TABLE IF EXISTS welch_ttest; + +CREATE TABLE welch_ttest (left Float64, right Float64) ENGINE = Memory; +INSERT INTO welch_ttest VALUES (14.72789, 0), (9.61661, 0), (13.57615, 0), (3.98392, 0), (11.98889, 0), (10.99422, 0), (5.44792, 0), (20.29346, 0), (7.05926, 0), (9.22732, 0), (12.06847, 0), (13.52612, 0), (8.24597, 0), (9.35245, 0), (10.12297, 0), (15.80624, 0), (13.68613, 0), (10.72729, 0), (5.62078, 0), (6.12229, 0), (6.03801, 0), (8.95585, 0), (24.04613, 0), (9.04757, 0), (2.68263, 0), (15.43935, 0), (2.89423, 0), (4.01423, 0), (4.30568, 0), (11.99948, 0), (8.40574, 0), (10.86642, 0), (9.4266, 0), (-8.12752, 0), (7.91634, 0), (7.3967, 0), (2.26431, 0), (14.20118, 0), (6.68233, 0), (15.46221, 0), (7.88467, 0), (11.20011, 0), (8.92027, 0), (10.27926, 0), (5.14395, 0), (5.62178, 0), (12.84383, 0), (9.98009, 0), (-0.69789, 0), (11.41386, 0), (7.76863, 0), (7.21743, 0), (1.81176, 0), (9.43762, 0), (19.22117, 0), (2.97128, 0), (14.32851, 0), (7.54959, 0), (3.81545, 0), (10.1281, 0), (2.48596, 0), (10.0461, 0), (3.59714, 0), (9.73522, 0), (18.8077, 0), (3.15148, 0), (12.26062, 0), (5.66707, 0), (6.58623, 0), (17.30902, 0), (9.91391, 0), (5.36946, 0), (15.73637, 0), (16.96281, 0), (11.54063, 0), (18.37358, 0), (11.38255, 0), (10.53256, 0), (8.08833, 0), (16.27556, 0), (2.42969, 0), (9.56127, 0), (7.32998, 0), (9.19511, 0), (9.66903, 0), (4.15029, 0), (8.83511, 0), (14.60617, 0), (14.06143, 0), (5.39556, 0), (10.11871, 0), (10.56619, 0), (14.4462, 0), (10.42106, 0), (7.75551, 0), (11.00418, 0), (4.47226, 0), (16.35461, 0), (18.55174, 0), (11.82044, 0), (7.39454, 0), (11.27767, 0), (6.83827, 0), (7.76858, 0), (15.97614, 0), (14.53781, 0), (12.99546, 0), (16.91151, 0), (9.65012, 0), (14.25487, 0), (14.03618, 0), (2.57382, 0), (2.50779, 0), (14.24787, 0), (13.34666, 0), (7.31102, 0), (10.22981, 0), (17.4435, 0), (21.2074, 0), (6.64191, 0), (18.7086, 0), (14.78686, 0), (9.85287, 0), (4.48263, 0), (14.17469, 0), (14.4342, 0), (19.2481, 0), (3.47165, 0), (8.28712, 0), (8.81657, 0), (0.92319, 0), (20.41106, 0), (6.76127, 0), (22.00242, 0), (8.66129, 0), (10.9929, 0), (17.95494, 0), (17.20996, 0), (12.18888, 0), (12.14257, 0), (15.81243, 0), (4.43362, 0), (1.17567, 0), (15.60881, 0), (9.34833, 0), (6.33513, 0), (-0.83095, 0), (12.43268, 0), (6.63207, 0), (11.96877, 0), (14.81029, 0), (21.84876, 0), (3.75896, 0), (6.91307, 0), (13.73015, 0), (8.63753, 0), (15.71679, 0), (1.74565, 0), (9.16895, 0), (5.70685, 0), (5.00117, 0), (13.06888, 0), (7.51204, 0), (15.34885, 0), (5.20264, 0), (8.59043, 0), (6.45619, 0), (14.61979, 0), (11.7075, 0), (14.04901, 0), (4.20525, 0), (15.1733, 0), (3.12934, 0), (8.08049, 0), (15.41273, 0), (16.90751, 0), (5.86893, 0), (7.1086, 0), (4.418, 0), (12.0614, 0), (7.07887, 0), (3.61585, 0), (11.73001, 0), (10.80449, 0), (8.40311, 0), (9.91276, 0), (16.4164, 0), (5.25034, 0), (15.20283, 0), (10.42909, 0), (9.53888, 0), (14.68939, 0), (6.60007, 0), (18.31058, 0), (7.01885, 0), (18.71631, 0), (10.50002, 0), (10.7517, 0), (4.23224, 0), (2.28924, 0), (8.56059, 0), (8.25095, 0), (9.15673, 0), (13.28409, 0), (8.4513, 0), (2.83911, 0), (2.79676, 0), (9.11055, 0), (7.18529, 0), (-4.1258, 0), (5.28306, 0), (6.82757, 0), (10.89035, 0), (5.24822, 0), (11.935, 0), (6.45675, 0), (10.18088, 0), (4.9932, 0), (18.09939, 0), (8.11738, 0), (5.37883, 0), (10.50339, 0), (16.64093, 0), (14.77263, 0), (13.71385, 0), (6.98746, 0), (10.74635, 0), (5.49432, 0), (13.46078, 0), (10.67565, 0), (9.0291, 0), (11.51417, 0), (13.07118, 0), (9.5049, 0), (8.50611, 0), (6.47606, 0), (13.06526, 0), (19.08658, 0), (9.49741, 0), (10.60865, 0), (2.28996, 0), (8.12846, 0), (5.62241, 0), (4.07712, 0), (17.98526, 0), (9.466, 0), (11.38904, 0), (5.91826, 0), (1.52059, 0), (18.79161, 0), (18.20669, 0), (-1.67829, 0), (18.01586, 0), (16.31577, 0), (7.88281, 0), (8.46179, 0), (10.31113, 0), (14.88377, 0), (1.31835, 0), (2.53176, 0), (9.48625, 0), (3.97936, 0), (11.52319, 0), (13.24178, 0), (7.58739, 0), (10.00959, 0), (9.73361, 0), (8.35716, 0), (1.65491, 0), (11.11521, 0), (6.08355, 0), (10.04582, 0), (11.58237, 0), (16.40249, 0), (1.9691, 0), (13.22776, 0), (2.67059, 0), (9.83651, 0), (2.12539, 0), (9.27114, 0), (9.0699, 0), (2.78179, 0), (12.49311, 0), (12.97662, 0), (15.06359, 0), (16.91565, 0), (5.92011, 0), (5.81304, 0), (8.46425, 0), (9.48705, 0), (4.68191, 0), (5.70028, 0), (-0.78798, 0), (10.03442, 0), (15.45433, 0), (9.43845, 0), (3.05825, 0), (6.92126, 0), (14.05905, 0), (19.71579, 0), (15.0131, 0), (4.50386, 0), (1.31061, 0), (10.81197, 0), (14.32942, 0), (9.26469, 0), (7.27679, 0), (22.69295, 0), (12.03763, 0), (7.34876, 0), (16.60689, 0), (7.48786, 0), (15.78602, 0), (17.21048, 0), (13.93482, 0), (9.69911, 0), (12.24315, 0), (10.58131, 0), (19.57006, 0), (9.8856, 0), (11.70302, 0), (7.89864, 0), (12.24831, 0), (16.93707, 0), (9.65467, 0), (4.221, 0), (15.45229, 0), (12.83088, 0), (7.58313, 0), (12.895, 0), (10.02471, 0), (13.36059, 0), (5.07864, 0), (9.72017, 0), (11.05809, 0), (15.28528, 0), (13.99834, 0), (19.26989, 0), (9.41846, 0), (11.65425, 0), (8.49638, 0), (6.38592, 0), (-4.69837, 0), (12.22061, 0), (9.41331, 0), (13.2075, 0), (12.97005, 0), (11.44352, 0), (9.79805, 0), (6.93116, 0), (10.07691, 0), (22.05892, 0), (7.80353, 0), (-2.17276, 0), (0.61509, 0), (8.35842, 0), (17.77108, 0), (14.70841, 0), (1.27992, 0), (15.62699, 0), (9.32914, 0), (15.41866, 0), (10.82009, 0), (3.29902, 0), (9.21998, 0), (7.93845, 0), (10.33344, 0), (12.06399, 0), (5.5308, 0), (8.38727, 0), (18.11104, 0), (8.86565, 0), (19.41825, 0), (9.52376, 0), (3.94552, 0), (9.37587, 0), (15.44954, 0), (15.90527, 0), (13.18927, 0), (7.01646, 0), (9.06005, 0), (9.06431, 0), (5.76006, 0), (9.18705, 0), (-3.48446, 0), (15.89817, 0), (12.94719, 0), (23.69426, 0), (17.47755, 0), (15.61528, 0), (0.54832, 0), (14.32916, 0), (9.55305, 0), (13.79891, 0), (0.82544, 0), (13.34875, 0), (9.07614, 0), (5.19621, 0), (2.1451, 0), (9.87726, 0), (8.45439, 0), (-1.41842, 0), (7.93598, 0), (11.23151, 0), (17.84458, 0), (7.02237, 0), (10.7842, 0), (4.42832, 0), (4.45044, 0), (1.50938, 0), (21.21651, 0), (6.2097, 0), (6.84354, 0), (18.53804, 0), (12.01072, 0), (4.8345, 0), (20.41587, 0), (14.48353, 0), (8.71116, 0), (12.42818, 0), (14.89244, 0), (8.03033, 0), (5.25917, 0), (2.30092, 0), (10.22504, 0), (15.37573, 0), (7.13666, 0), (4.45018, 0), (10.18405, 0), (3.91025, 0), (14.52304, 0), (13.14771, 0), (11.99219, 0), (9.21345, 0), (8.85106, 0), (12.91887, 0), (15.62308, 0), (11.88034, 0), (15.12097, 0), (11.58168, 0), (16.83051, 0), (5.25405, 0), (2.19976, 0), (4.56716, 0), (16.46053, 0), (5.61995, 0), (8.67704, 0), (5.62789, 0), (9.84815, 0), (13.05834, 0), (11.74205, 0), (3.88393, 0), (16.15321, 0), (4.83925, 0), (13.00334, 0), (4.4028, 0), (4.35794, 0), (4.47478, 0), (2.38713, 0), (4.25235, 0), (10.87509, 0), (9.82411, 0), (13.61518, 0), (10.25507, 0), (4.0335, 0), (10.69881, 0), (5.70321, 0), (6.96244, 0), (9.35874, 0), (6.28076, 0), (8.29015, 0), (6.88653, 0), (7.70687, 0), (8.2001, 0), (6.73415, 0), (3.82052, 0), (3.94469, 0), (15.82384, 0), (2.54004, 0), (10.74876, 0), (12.60517, 0), (17.7024, 0), (4.6722, 0), (13.67341, 0), (6.4565, 0), (12.95699, 0), (4.56912, 0), (5.58464, 0), (4.0638, 0), (13.05559, 0), (5.38269, 0), (0.16354, 0), (7.23962, 0), (7.38577, 0), (8.50951, 0), (13.72574, 0), (17.80421, 0), (3.01135, 0), (8.02608, 0), (14.23847, 0), (-8.65656, 1), (22.98234, 1), (23.80821, 1), (13.33939, 1), (-4.05537, 1), (23.5155, 1), (-6.45272, 1), (17.7903, 1), (11.463, 1), (5.28021, 1), (8.39157, 1), (6.02464, 1), (14.43732, 1), (15.76584, 1), (1.54391, 1), (1.24897, 1), (27.1507, 1), (7.71091, 1), (15.71846, 1), (32.97808, 1), (-1.79334, 1), (-9.23439, 1), (11.27838, 1), (0.72703, 1), (18.51557, 1), (9.16619, 1), (17.29624, 1), (-1.30208, 1), (-3.48018, 1), (10.12082, 1), (-8.01318, 1), (-14.22264, 1), (16.58174, 1), (-0.55975, 1), (5.61449, 1), (1.44626, 1), (7.89158, 1), (1.13369, 1), (-0.82609, 1), (12.23365, 1), (12.45443, 1), (14.46915, 1), (13.72627, 1), (18.41459, 1), (29.66702, 1), (1.51619, 1), (10.40078, 1), (3.33266, 1), (6.12036, 1), (11.86553, 1), (6.59422, 1), (22.0948, 1), (1.79623, 1), (14.29513, 1), (19.69162, 1), (-7.98033, 1), (5.48433, 1), (-2.28474, 1), (9.91876, 1), (10.64097, 1), (0.22523, 1), (17.01773, 1), (22.37388, 1), (14.04215, 1), (23.1244, 1), (18.96958, 1), (8.42663, 1), (3.7165, 1), (14.29366, 1), (23.50886, 1), (26.33722, 1), (26.72396, 1), (13.26287, 1), (12.97607, 1), (17.41838, 1), (8.63875, 1), (17.08943, 1), (23.15356, 1), (-4.4965, 1), (7.58895, 1), (26.04074, 1), (6.84245, 1), (20.56287, 1), (3.84735, 1), (-2.76304, 1), (13.1615, 1), (8.21954, 1), (-3.49943, 1), (22.12419, 1), (7.08323, 1), (16.12937, 1), (-0.32672, 1), (16.5942, 1), (7.68977, 1), (11.39484, 1), (-5.11987, 1), (20.87404, 1), (8.01007, 1), (3.26497, 1), (5.61253, 1), (20.69182, 1), (0.0296, 1), (21.904, 1), (22.46572, 1), (3.63685, 1), (-5.10846, 1), (14.86389, 1), (5.47188, 1), (18.44095, 1), (16.71368, 1), (6.36704, 1), (8.82663, 1), (14.6727, 1), (7.98383, 1), (2.65568, 1), (21.45827, 1), (11.77948, 1), (4.71979, 1), (3.17951, 1), (13.90226, 1), (15.50578, 1), (10.8026, 1), (16.91369, 1), (9.90552, 1), (13.87322, 1), (4.12366, 1), (-3.78985, 1), (1.7599, 1), (3.43715, 1), (-3.45246, 1), (23.64571, 1), (-4.96877, 1), (3.93514, 1), (1.49914, 1), (12.71519, 1), (5.11521, 1), (4.79872, 1), (20.89391, 1), (5.363, 1), (8.02765, 1), (14.30804, 1), (11.49002, 1), (14.25281, 1), (7.6573, 1), (15.49686, 1), (3.29327, 1), (2.27236, 1), (12.58104, 1), (19.19128, 1), (15.25901, 1), (6.5221, 1), (10.10965, 1), (12.75249, 1), (16.50977, 1), (-8.6697, 1), (8.28553, 1), (1.44315, 1), (4.65869, 1), (0.98149, 1), (0.16623, 1), (17.66332, 1), (4.35346, 1), (6.52742, 1), (-1.06631, 1), (-5.28454, 1), (14.25583, 1), (8.74058, 1), (1.89553, 1), (-0.92959, 1), (10.30289, 1), (-6.3744, 1), (-8.1706, 1), (10.95369, 1), (4.94384, 1), (28.40568, 1), (3.7004, 1), (2.52363, 1), (4.07997, 1), (7.8849, 1), (17.95409, 1), (16.67021, 1), (11.34377, 1), (-0.07446, 1), (22.00223, 1), (3.31778, 1), (18.50719, 1), (-3.58655, 1), (6.5394, 1), (12.40459, 1), (16.59866, 1), (7.54176, 1), (-1.51044, 1), (12.69758, 1), (2.9842, 1), (2.49187, 1), (2.04113, 1), (-2.46544, 1), (15.18368, 1), (-0.04058, 1), (-0.4127, 1), (10.5526, 1), (12.03982, 1), (12.10923, 1), (11.54954, 1), (-1.18613, 1), (11.30984, 1), (23.54105, 1), (10.67321, 1), (24.09196, 1), (7.5008, 1), (12.52233, 1), (4.30673, 1), (9.35793, 1), (4.44472, 1), (-7.00679, 1), (8.56241, 1), (23.73891, 1), (15.62708, 1), (16.09205, 1), (12.52074, 1), (14.58927, 1), (-4.80187, 1), (8.47964, 1), (7.75477, 1), (12.6893, 1), (7.14147, 1), (12.12654, 1), (12.32334, 1), (7.98909, 1), (3.26652, 1), (20.53684, 1), (32.3369, 1), (19.74911, 1), (-4.62897, 1), (8.26483, 1), (20.88451, 1), (-2.12982, 1), (25.61459, 1), (5.32091, 1), (-4.1196, 1), (7.57937, 1), (21.15847, 1), (6.46355, 1), (7.74846, 1), (19.62636, 1), (28.34629, 1), (26.73919, 1), (20.40427, 1), (3.03378, 1), (10.2537, 1), (7.47745, 1), (10.79184, 1), (3.91962, 1), (19.97973, 1), (18.87711, 1), (12.56157, 1), (11.46033, 1), (3.78661, 1), (-9.45748, 1), (12.06033, 1), (-0.74615, 1), (13.2815, 1), (24.78052, 1), (5.83337, 1), (17.4111, 1), (19.70331, 1), (11.78446, 1), (-1.366, 1), (1.37458, 1), (16.31483, 1), (32.63464, 1), (-3.79736, 1), (19.17984, 1), (-0.27705, 1), (-3.69456, 1), (28.38058, 1), (-1.36876, 1), (-25.63301, 1), (3.58644, 1), (-6.85667, 1), (13.42225, 1), (12.04671, 1), (28.99468, 1), (7.87662, 1), (2.61119, 1), (-3.56022, 1), (1.50022, 1), (14.55836, 1), (9.35831, 1), (16.9366, 1), (29.23126, 1), (15.31386, 1), (13.46112, 1), (7.39667, 1), (11.15599, 1), (9.80499, 1), (22.64923, 1), (8.67693, 1), (18.67335, 1), (-3.19127, 1), (22.94716, 1), (17.86834, 1), (16.98267, 1), (15.91653, 1), (11.79718, 1), (18.50208, 1), (8.90755, 1), (10.44843, 1), (4.67433, 1), (6.82287, 1), (10.82228, 1), (-4.18631, 1), (20.3872, 1), (11.84735, 1), (21.25376, 1), (10.55032, 1), (12.19023, 1), (0.63369, 1), (7.92381, 1), (17.90933, 1), (15.30781, 1), (10.01877, 1), (0.88744, 1), (22.20967, 1), (-4.23117, 1), (21.50819, 1), (11.27421, 1), (-16.23179, 1), (33.43085, 1), (5.15093, 1), (1.34505, 1), (6.027, 1), (-10.43035, 1), (27.45998, 1), (19.24886, 1), (-4.44761, 1), (5.453, 1), (12.73758, 1), (11.2897, 1), (31.032, 1), (7.39168, 1), (11.95245, 1), (26.279, 1), (-1.0255, 1), (10.36675, 1), (11.58439, 1), (27.8405, 1), (13.1707, 1), (31.39133, 1), (27.08301, 1), (-2.14368, 1), (4.08476, 1), (21.5573, 1), (16.69822, 1), (7.69955, 1), (8.32793, 1), (6.49235, 1), (-7.3284, 1), (10.58264, 1), (-6.17006, 1), (34.55782, 1), (10.93221, 1), (44.24299, 1), (14.6224, 1), (-7.42798, 1), (15.52351, 1), (11.33982, 1), (10.46716, 1), (13.0986, 1), (-4.25988, 1), (9.55316, 1), (0.75489, 1), (25.99212, 1), (-0.81401, 1), (3.49551, 1), (22.99402, 1), (10.99628, 1), (23.70223, 1), (2.71482, 1), (22.82309, 1), (31.25686, 1), (4.86318, 1), (-1.06476, 1), (15.10298, 1), (-0.61015, 1), (17.81246, 1), (-1.55788, 1), (18.09709, 1), (9.11271, 1), (9.94682, 1), (-7.33194, 1), (-4.67293, 1), (21.81717, 1), (7.16318, 1), (13.25649, 1), (13.88776, 1), (4.95793, 1), (17.65303, 1), (14.47382, 1), (13.19373, 1), (31.86093, 1), (5.73161, 1), (10.96492, 1), (6.97951, 1), (1.75136, 1), (10.96144, 1), (15.08137, 1), (9.95311, 1), (7.07729, 1), (3.08148, 1), (22.37954, 1), (8.51951, 1), (2.88746, 1), (26.73509, 1), (-2.88939, 1), (-2.82367, 1), (-0.35783, 1), (14.22076, 1), (11.50295, 1), (7.10171, 1), (8.28488, 1), (0.54178, 1), (13.8022, 1), (15.62157, 1), (10.79173, 1), (28.18946, 1), (30.43524, 1), (2.54914, 1), (9.89421, 1), (13.08631, 1), (4.68761, 1), (5.61516, 1), (22.88072, 1), (7.4735, 1), (11.27382, 1), (2.39559, 1), (-3.31889, 1), (9.61957, 1), (23.01381, 1), (-1.23467, 1), (9.07691, 1), (15.78056, 1), (12.28421, 1), (9.44888, 1), (13.16928, 1), (4.33357, 1), (2.21737, 1), (33.17833, 1), (13.25407, 1), (-2.47961, 1), (6.41401, 1), (18.8439, 1), (-4.63375, 1), (-8.2909, 1), (12.18221, 1), (-2.95356, 1), (19.61659, 1), (12.45056, 1), (-4.17198, 1), (21.9641, 1), (11.96416, 1), (12.74573, 1), (10.47873, 1), (12.73295, 1), (11.31373, 1), (9.9827, 1), (5.87138, 1), (4.24372, 1), (-23.72256, 1), (28.41337, 1), (4.88103, 1), (3.61902, 1), (8.93586, 1), (16.40759, 1), (27.84494, 1), (5.6001, 1), (14.51379, 1), (13.5576, 1), (12.92213, 1), (3.90686, 1), (17.07104, 1), (15.84268, 1), (17.38777, 1), (16.54766, 1), (5.94487, 1), (17.02804, 1), (7.66386, 1), (10.43088, 1), (6.16059, 1), (20.46178, 1), (20.02888, 1), (20.95949, 1), (6.50808, 1), (7.22366, 1), (8.06659, 1), (16.08241, 1), (13.83514, 1), (-0.33454, 1), (12.98848, 1), (12.99024, 1); +SELECT '-0.5028215369186904', '0.6152361677168877'; +SELECT roundBankers(welchTTest(left, right).1, 6) as t_stat, roundBankers(welchTTest(left, right).2, 6) as p_value from welch_ttest; +DROP TABLE IF EXISTS welch_ttest; + + +CREATE TABLE welch_ttest (left Float64, right Float64) ENGINE = Memory; +INSERT INTO welch_ttest VALUES (4.82025, 0), (6.13896, 0), (15.20277, 0), (14.15351, 0), (7.21338, 0), (8.55506, 0), (13.80816, 0), (11.28411, 0), (7.4612, 0), (7.43759, 0), (12.9832, 0), (-5.74783, 0), (12.47114, 0), (15.14223, 0), (3.40603, 0), (9.27323, 0), (7.88547, 0), (8.56456, 0), (4.59731, 0), (7.91213, 0), (7.33894, 0), (21.74811, 0), (11.92111, 0), (0.18828, 0), (10.47314, 0), (20.37396, 0), (11.04991, 0), (13.30083, 0), (14.28065, 0), (2.86942, 0), (24.96072, 0), (14.20164, 0), (18.28769, 0), (10.50949, 0), (9.22273, 0), (11.77608, 0), (8.56872, 0), (13.74535, 0), (11.65209, 0), (12.51894, 0), (17.76256, 0), (13.52122, 0), (8.70796, 0), (6.04749, 0), (16.33064, 0), (8.35636, 0), (14.03496, 0), (11.05834, 0), (14.49261, 0), (2.59383, 0), (8.01022, 0), (4.05458, 0), (13.26384, 0), (14.62058, 0), (10.52489, 0), (8.46357, 0), (6.4147, 0), (9.70071, 0), (12.47581, 0), (4.38333, 0), (17.54172, 0), (10.12109, 0), (7.73186, 0), (14.0279, 0), (11.6621, 0), (17.47045, 0), (15.50223, 0), (15.46034, 0), (13.39964, 0), (14.98025, 0), (15.87912, 0), (17.67374, 0), (9.64073, 0), (12.84904, 0), (7.70278, 0), (13.03156, 0), (9.04512, 0), (15.97014, 0), (8.96389, 0), (11.48009, 0), (9.71153, 0), (13.00084, 0), (12.39803, 0), (13.08188, 0), (5.82244, 0), (10.81871, 0), (8.2539, 0), (7.52114, 0), (9.11488, 0), (8.37482, 0), (14.48652, 0), (11.42152, 0), (16.03111, 0), (13.14057, 0), (-2.26351, 0), (15.50394, 0), (14.88603, 0), (13.37257, 0), (11.84026, 0), (7.66558, 0), (6.24584, 0), (3.6312, 0), (2.7018, 0), (5.63656, 0), (5.82643, 0), (10.06745, 0), (-0.5831, 0), (14.84202, 0), (9.5524, 0), (19.71713, 0), (14.23109, 0), (8.69105, 0), (5.33742, 0), (7.30372, 0), (7.93342, 0), (15.20884, 0), (7.53839, 0), (13.45311, 0), (11.04473, 0), (10.76673, 0), (15.44145, 0), (14.06596, 0), (9.14873, 0), (12.88372, 0), (8.74994, 0), (10.53263, 0), (16.16694, 0), (8.37197, 0), (3.43739, 0), (4.72799, 0), (9.08802, 0), (11.2531, 0), (5.16115, 0), (10.20895, 0), (18.70884, 0), (15.88924, 0), (3.38758, 0), (6.46449, 0), (10.21088, 0), (14.08458, 0), (15.74508, 0), (19.31896, 0), (13.19641, 0), (11.95409, 0), (10.70718, 0), (1.05245, 0), (10.04772, 0), (17.01369, 0), (10.2286, 0), (19.58323, 0), (7.02892, 0), (4.16866, 0), (8.94326, 0), (4.99854, 0), (8.88352, 0), (18.65422, 0), (17.32328, 0), (9.33492, 0), (14.94788, 0), (8.05863, 0), (14.6737, 0), (10.93801, 0), (0.54036, 0), (-0.34242, 0), (5.89076, 0), (3.15189, 0), (1.94421, 0), (6.38698, 0), (10.50654, 0), (8.95362, 0), (6.23711, 0), (11.75359, 0), (12.42155, 0), (-1.55472, 0), (4.6688, 0), (10.48087, 0), (11.74615, 0), (9.26822, 0), (7.55517, 0), (12.76005, 0), (16.47102, 0), (11.31297, 0), (14.37437, 0), (2.38799, 0), (6.44577, 0), (5.07471, 0), (11.55123, 0), (7.76795, 0), (10.60116, 0), (14.40885, 0), (11.58158, 0), (8.81648, 0), (12.92299, 0), (11.26939, 0), (17.95014, 0), (2.95002, 0), (17.41959, 0), (11.12455, 0), (8.78541, 0), (14.36413, 0), (12.98554, 0), (12.58505, 0), (15.49789, 0), (11.70999, 0), (0.65596, 0), (11.08202, 0), (14.75752, 0), (6.84385, 0), (9.27245, 0), (13.78243, 0), (17.4863, 0), (4.01777, 0), (11.82861, 0), (13.86551, 0), (6.16591, 0), (8.71589, 0), (16.77195, 0), (17.23243, 0), (-2.12941, 0), (5.66629, 0), (12.45153, 0), (1.63971, 0), (13.84031, 0), (4.6144, 0), (5.26169, 0), (9.27769, 0), (9.14288, 0), (9.71953, 0), (9.38446, 0), (1.64788, 0), (11.72922, 0), (13.68926, 0), (9.42952, 0), (12.05574, 0), (9.09148, 0), (5.32273, 0), (20.25258, 0), (10.14599, 0), (10.82156, 0), (5.75736, 0), (7.13567, 0), (9.29746, 0), (5.1618, 0), (10.076, 0), (21.65669, 0), (13.35486, 0), (6.79957, 0), (8.76243, 0), (14.59294, 0), (16.90609, 0), (10.50337, 0), (-0.07923, 0), (13.51648, 0), (12.0676, 0), (0.86482, 0), (9.03563, 0), (5.38751, 0), (17.16866, 0), (2.78702, 0), (11.15548, 0), (12.30843, 0), (8.04897, 0), (9.95814, 0), (11.29308, 0), (14.13032, 0), (21.05877, 0), (3.57386, 0), (7.96631, 0), (3.30484, 0), (18.61856, 0), (16.35184, 0), (7.65236, 0), (18.02895, 0), (9.79458, 0), (16.7274, 0), (8.84453, 0), (13.05709, 0), (10.91447, 0), (8.40171, 0), (16.95211, 0), (11.82194, 0), (19.87978, 0), (12.88455, 0), (-0.00947, 0), (12.28109, 0), (6.96462, 0), (13.75282, 0), (14.39141, 0), (11.07193, 0), (12.88039, 0), (11.38253, 0), (21.02707, 0), (7.51955, 0), (6.31984, 0), (15.6543, 0), (14.80315, 0), (8.38024, 0), (21.7516, 0), (14.31336, 0), (15.04703, 0), (5.73787, 0), (13.16911, 0), (12.40695, 0), (9.88968, 0), (8.46703, 0), (8.70637, 0), (8.03551, 0), (5.9757, 0), (12.22951, 0), (3.14736, 0), (10.51266, 0), (18.593, 0), (10.82213, 0), (7.14216, 0), (6.81154, 0), (-0.6486, 0), (20.56136, 0), (11.35367, 0), (11.38205, 0), (17.14, 0), (14.91215, 0), (15.50207, 0), (5.93162, 0), (3.74869, 0), (14.11532, 0), (7.38954, 0), (5.45764, 0), (18.33733, 0), (9.91923, 0), (2.38991, 0), (14.16756, 0), (2.39791, 0), (6.92586, 0), (5.32474, 0), (2.28812, 0), (5.71718, 0), (5.84197, 0), (2.76206, 0), (19.05928, 0), (11.51788, 0), (6.56648, 0), (3.35735, 0), (7.55948, 0), (19.99908, 0), (13.00634, 0), (18.36886, 0), (11.14675, 0), (16.72931, 0), (12.50106, 0), (6.00605, 0), (23.06653, 0), (5.39694, 0), (9.53167, 0), (12.76944, 0), (7.20604, 0), (13.25391, 0), (13.7341, 0), (10.85292, 0), (-7.75835, 0), (10.29728, 0), (13.70099, 0), (10.17959, 0), (9.98399, 0), (12.69389, 0), (-0.28848, 0), (-2.18319, 0), (13.36378, 0), (10.09232, 0), (5.49489, 0), (5.46156, 0), (0.94225, 0), (12.79205, 0), (10.09593, 0), (6.06218, 0), (0.89463, 0), (11.88986, 0), (10.79733, 0), (1.51371, 0), (2.20967, 0), (15.45732, 0), (16.5262, 0), (5.99724, 0), (8.3613, 0), (15.68183, 0), (15.32117, 0), (14.15674, 0), (6.64553, 0), (4.20777, 0), (-0.10521, 0), (-0.88169, 0), (1.85913, 0), (9.73673, 0), (0.30926, 0), (6.17559, 0), (11.76602, 0), (5.68385, 0), (14.57088, 0), (12.81509, 0), (9.85682, 0), (12.06376, 0), (6.08874, 0), (11.63921, 0), (14.86722, 0), (10.41035, 0), (2.93794, 0), (12.21841, 0), (0.23804, 0), (3.14845, 0), (7.29748, 0), (3.06134, 0), (13.77684, 0), (16.21992, 0), (5.33511, 0), (9.68959, 0), (9.44169, 0), (18.08012, 0), (4.04224, 0), (8.77918, 0), (10.18324, 0), (9.38914, 0), (11.76995, 0), (14.19963, 0), (6.88817, 0), (16.56123, 0), (15.39885, 0), (5.21241, 0), (4.44408, 0), (17.87587, 0), (12.53337, 0), (13.60916, 0), (6.60104, 0), (7.35453, 0), (18.61572, 0), (6.10437, 0), (13.08682, 0), (12.15404, 0), (4.90789, 0), (2.13353, 0), (12.49593, 0), (11.93056, 0), (13.29408, 0), (5.70038, 0), (8.40271, 0), (5.19456, 0), (-5.51028, 0), (14.0329, 0), (10.38365, 0), (6.56812, 0), (4.21129, 0), (9.7157, 0), (9.88553, 0), (13.45346, 0), (4.97752, 0), (12.77595, 0), (8.56465, 0), (4.27703, 0), (18.12502, 0), (12.45735, 0), (12.42912, 0), (12.08125, 0), (10.85779, 0), (4.36013, 0), (11.85062, 0), (8.47776, 0), (9.60822, 0), (11.3069, 0), (14.25525, 0), (1.55168, 0), (14.57782, 0), (7.84786, 0), (9.87774, 0), (14.75575, 0), (3.68774, 0), (9.37667, 0), (20.28676, 0), (12.10027, 0), (8.01819, 0), (18.78158, 0), (20.85402, 0), (18.98069, 0), (16.1429, 0), (9.24047, 0), (14.12487, 0), (10.18841, 0), (-3.04478, 0), (5.7552, 0), (9.30376, 0), (11.42837, 0), (6.02364, 0), (8.86984, 0), (10.91177, 0), (10.04418, 0), (18.10774, 0), (7.49384, 0), (9.11556, 0), (9.7051, 0), (5.23268, 0), (9.04647, 0), (8.81547, 0), (2.65098, 0), (-2.69857, 1), (15.80943, 1), (7.31555, 1), (3.96517, 1), (4.77809, 1), (9.6472, 1), (-26.41717, 1), (-10.85635, 1), (-1.4376, 1), (-0.96308, 1), (2.84315, 1), (5.79467, 1), (-3.06091, 1), (-14.62902, 1), (22.08022, 1), (-2.11982, 1), (-4.84824, 1), (-10.50447, 1), (2.4891, 1), (9.90324, 1), (-22.66866, 1), (-0.97103, 1), (-16.57608, 1), (-3.78749, 1), (25.84511, 1), (5.30797, 1), (-18.19466, 1), (11.72708, 1), (0.2891, 1), (-9.83474, 1), (6.69942, 1), (18.09604, 1), (18.52651, 1), (1.38201, 1), (7.64615, 1), (17.66598, 1), (-2.44141, 1), (-9.01598, 1), (27.69142, 1), (4.06946, 1), (-15.0077, 1), (-10.49648, 1), (-4.88322, 1), (-25.09805, 1), (-4.64024, 1), (20.94434, 1), (24.12126, 1), (-14.10962, 1), (10.6512, 1), (14.50687, 1), (-19.88081, 1), (-11.55271, 1), (13.16921, 1), (16.63864, 1), (-24.08114, 1), (-9.09949, 1), (-10.54702, 1), (0.20813, 1), (8.19066, 1), (-2.70523, 1), (-0.23954, 1), (7.19398, 1), (-7.1618, 1), (-7.44322, 1), (-17.92031, 1), (-1.58146, 1), (9.18338, 1), (3.25838, 1), (-14.30234, 1), (1.84695, 1), (31.13794, 1), (-0.85067, 1), (19.02787, 1), (-3.09594, 1), (13.45584, 1), (-5.48104, 1), (-22.74928, 1), (-8.03697, 1), (17.31143, 1), (-16.65231, 1), (-18.58713, 1), (-16.52641, 1), (14.95261, 1), (12.56762, 1), (15.00188, 1), (1.85858, 1), (2.1926, 1), (-2.4095, 1), (21.56873, 1), (3.35509, 1), (-4.98672, 1), (35.08603, 1), (-10.01602, 1), (-3.85153, 1), (-6.81974, 1), (19.56525, 1), (-9.35488, 1), (0.24268, 1), (-3.51488, 1), (-0.37066, 1), (24.20888, 1), (-11.73537, 1), (0.01282, 1), (0.03963, 1), (-9.65589, 1), (-0.37429, 1), (5.61255, 1), (0.49984, 1), (-10.15066, 1), (-14.54314, 1), (16.56889, 1), (-7.73873, 1), (-3.76422, 1), (1.40722, 1), (2.28818, 1), (-13.12643, 1), (5.17082, 1), (4.79089, 1), (-17.42643, 1), (8.72548, 1), (-3.70285, 1), (16.77893, 1), (13.382, 1), (19.98418, 1), (0.00483, 1), (-4.75951, 1), (2.35391, 1), (21.65809, 1), (-9.2714, 1), (-18.38253, 1), (7.23097, 1), (14.97927, 1), (-4.02197, 1), (-29.8189, 1), (-12.8554, 1), (-7.60124, 1), (-14.90158, 1), (-3.31486, 1), (31.38144, 1), (-8.61288, 1), (15.31895, 1), (-10.19488, 1), (13.796, 1), (-0.32912, 1), (-0.0684, 1), (-30.06834, 1), (24.93912, 1), (-3.26506, 1), (-8.29751, 1), (-5.39189, 1), (-25.08603, 1), (-1.45318, 1), (16.72724, 1), (-3.38467, 1), (-26.00478, 1), (7.28369, 1), (16.96226, 1), (16.5858, 1), (10.46583, 1), (3.84345, 1), (-2.99382, 1), (1.42078, 1), (-11.0123, 1), (2.09909, 1), (1.21064, 1), (15.36079, 1), (-21.61349, 1), (22.7726, 1), (10.50512, 1), (-6.95825, 1), (9.20036, 1), (15.66902, 1), (3.28098, 1), (-9.05692, 1), (0.32882, 1), (-1.64934, 1), (-4.81406, 1), (-5.06006, 1), (19.97493, 1), (2.88646, 1), (-0.34552, 1), (7.55186, 1), (-22.96115, 1), (31.29166, 1), (6.18798, 1), (-2.52715, 1), (-11.58799, 1), (14.13596, 1), (13.45069, 1), (12.15179, 1), (3.44491, 1), (-8.78006, 1), (18.32087, 1), (11.91757, 1), (-2.00179, 1), (10.88411, 1), (9.09327, 1), (6.62484, 1), (8.87178, 1), (11.52254, 1), (-14.15988, 1), (-17.19515, 1), (14.03089, 1), (-2.4095, 1), (-16.83575, 1), (2.71469, 1), (4.84351, 1), (-1.17651, 1), (-3.37529, 1), (-19.92137, 1), (4.48952, 1), (-12.4906, 1), (-5.65277, 1), (8.50819, 1), (-19.61261, 1), (12.54156, 1), (11.06784, 1), (-12.59285, 1), (3.43683, 1), (-3.00325, 1), (12.49082, 1), (7.20955, 1), (17.6547, 1), (15.8619, 1), (24.3048, 1), (-8.05434, 1), (-6.06901, 1), (-15.69515, 1), (-11.13917, 1), (-3.90757, 1), (-2.57038, 1), (5.14065, 1), (17.8497, 1), (-8.64665, 1), (-18.68331, 1), (5.8567, 1), (-20.93884, 1), (4.40583, 1), (14.35985, 1), (4.18134, 1), (4.3635, 1), (9.35428, 1), (2.8908, 1), (16.01017, 1), (-1.48499, 1), (-9.97949, 1), (1.03055, 1), (-2.79697, 1), (6.85977, 1), (4.73213, 1), (2.7815, 1), (-2.46866, 1), (18.39425, 1), (-0.80378, 1), (-0.22982, 1), (-16.11608, 1), (3.0862, 1), (3.20779, 1), (10.50146, 1), (-0.21305, 1), (11.21012, 1), (-0.99825, 1), (18.39633, 1), (-3.39003, 1), (-0.64411, 1), (-1.39932, 1), (15.45319, 1), (-0.66044, 1), (-15.2223, 1), (-34.39907, 1), (-3.57836, 1), (16.82828, 1), (1.66624, 1), (15.43475, 1), (8.17776, 1), (5.50486, 1), (10.43082, 1), (-6.63332, 1), (2.28008, 1), (16.37203, 1), (5.16313, 1), (-8.85281, 1), (13.26692, 1), (-7.46842, 1), (8.43091, 1), (-13.18172, 1), (-0.72401, 1), (22.3881, 1), (10.65448, 1), (2.81289, 1), (10.92405, 1), (-8.95358, 1), (19.80653, 1), (-12.86527, 1), (5.38826, 1), (-6.83501, 1), (-15.7647, 1), (-27.67412, 1), (8.6499, 1), (-4.89542, 1), (16.76167, 1), (12.84284, 1), (-17.27324, 1), (-4.18726, 1), (-14.62366, 1), (-5.49863, 1), (-16.22846, 1), (10.60329, 1), (6.46781, 1), (1.70458, 1), (10.77448, 1), (0.8463, 1), (13.0482, 1), (-4.36264, 1), (3.22647, 1), (2.38828, 1), (6.7946, 1), (-0.25254, 1), (1.2497, 1), (1.6544, 1), (4.1019, 1), (11.27839, 1), (-5.04127, 1), (18.11674, 1), (0.51231, 1), (-0.51029, 1), (13.52556, 1), (16.10171, 1), (5.68197, 1), (-2.85904, 1), (-8.89167, 1), (6.24489, 1), (10.85319, 1), (-0.39816, 1), (3.87079, 1), (-3.1867, 1), (1.55322, 1), (16.86779, 1), (-14.60321, 1), (-1.81952, 1), (-3.11624, 1), (1.24193, 1), (10.18179, 1), (4.69796, 1), (0.69032, 1), (11.7723, 1), (7.62896, 1), (9.89741, 1), (9.11484, 1), (-3.84676, 1), (-0.4777, 1), (0.95958, 1), (-7.95056, 1), (-10.97474, 1), (-6.54861, 1), (34.74933, 1), (27.39463, 1), (4.18299, 1), (6.02476, 1), (-1.99397, 1), (1.26478, 1), (23.37106, 1), (10.49682, 1), (-11.04354, 1), (-12.22284, 1), (-9.87635, 1), (28.90511, 1), (6.77613, 1), (0.55352, 1), (0.37031, 1), (7.1418, 1), (3.24897, 1), (-1.60918, 1), (3.1675, 1), (-17.97072, 1), (-5.61743, 1), (14.1422, 1), (14.87695, 1), (-4.65961, 1), (-0.99174, 1), (-2.96623, 1), (-9.02263, 1), (-17.2088, 1), (2.78608, 1), (6.74239, 1), (4.8524, 1), (7.46731, 1), (1.04894, 1), (-12.8023, 1), (-17.18188, 1), (-5.08801, 1), (22.13942, 1), (-0.36384, 1), (17.80564, 1), (7.67504, 1), (1.59779, 1), (4.10942, 1), (0.61074, 1), (-14.40767, 1), (10.59906, 1), (16.57017, 1), (-15.17526, 1), (-6.98549, 1), (-0.64548, 1), (3.23756, 1), (14.65504, 1), (4.583, 1), (12.72378, 1), (5.26547, 1), (0.81781, 1), (9.38273, 1), (10.37636, 1), (10.70325, 1), (-0.83043, 1), (-7.53149, 1), (-9.09147, 1), (-19.51381, 1), (-28.44508, 1), (6.44392, 1), (11.10201, 1), (-2.86184, 1), (8.30673, 1), (8.8797, 1), (10.68053, 1), (15.62919, 1), (8.00579, 1), (6.4651, 1), (-4.50029, 1), (18.04514, 1), (11.12996, 1), (-5.14007, 1), (9.43857, 1), (3.13476, 1), (4.9772, 1), (-17.45782, 1), (0.05552, 1), (-1.90283, 1), (2.67908, 1), (-2.62243, 1), (-3.22767, 1), (-8.70222, 1), (-23.11605, 1), (21.6757, 1), (12.70076, 1), (4.4322, 1), (11.69344, 1), (9.18052, 1), (-2.2549, 1), (-2.15615, 1), (20.29765, 1), (-0.29536, 1), (15.50109, 1), (8.79187, 1), (5.11533, 1), (-20.44436, 1), (-3.00909, 1), (-4.48291, 1), (21.84462, 1), (1.94225, 1), (-2.81908, 1), (17.19418, 1), (-9.33528, 1), (-0.17346, 1), (0.03958, 1), (-35.17786, 1), (8.36887, 1), (-9.02292, 1), (-10.98804, 1), (0.29335, 1), (4.29634, 1), (3.87718, 1), (-9.08532, 1), (7.13922, 1), (-7.62463, 1), (-10.5666, 1), (4.68165, 1), (-3.30172, 1), (13.04852, 1), (13.45616, 1), (2.41043, 1), (-0.36501, 1), (-15.67383, 1), (17.92217, 1), (8.42106, 1), (3.22063, 1), (-7.31753, 1), (21.99596, 1), (-36.8273, 1), (-20.46391, 1), (5.74179, 1), (-15.83178, 1), (14.90454, 1), (-8.84645, 1), (3.72036, 1), (4.6877, 1), (16.35418, 1), (3.15441, 1), (2.39907, 1), (-17.58664, 1), (-13.18269, 1); +SELECT '14.971190998235835', '5.898143508382202e-44'; +SELECT roundBankers(welchTTest(left, right).1, 6) as t_stat, roundBankers(welchTTest(left, right).2, 6) as p_value from welch_ttest; +SELECT roundBankers(welchTTest(0.95)(left, right).3, 6) as t_stat, roundBankers(welchTTest(0.95)(left, right).4, 6) as p_value from welch_ttest; +DROP TABLE IF EXISTS welch_ttest; + + +DROP TABLE IF EXISTS student_ttest; + +/*Check t-stat and p-value and compare it with scipy.stat implementation + First: a=1, sigma (not sigma^2)=5, size=500 + Second: a=1, sigma = 5, size = 500 */ +CREATE TABLE student_ttest (left Float64, right Float64) ENGINE = Memory; +INSERT INTO student_ttest VALUES (0.88854, 0), (-5.76966, 0), (6.76618, 0), (3.55546, 0), (-9.76948, 0), (4.92323, 0), (-0.36352, 0), (0.97018, 0), (4.61656, 0), (-6.78292, 0), (4.02008, 0), (12.41838, 0), (5.14417, 0), (3.86836, 0), (-1.26199, 0), (12.44106, 0), (3.28349, 0), (1.77261, 0), (-8.94748, 0), (-1.01449, 0), (-1.26377, 0), (6.79682, 0), (6.32333, 0), (-8.21214, 0), (-1.68565, 0), (9.7557, 0), (3.66694, 0), (1.39967, 0), (-5.52035, 0), (-10.95601, 0), (0.93877, 0), (1.45933, 0), (-5.40551, 0), (-0.83857, 0), (8.50794, 0), (-6.68686, 0), (5.03099, 0), (1.56251, 0), (4.17381, 0), (-2.92644, 0), (5.11068, 0), (2.09617, 0), (11.7787, 0), (6.50336, 0), (0.62098, 0), (-7.97121, 0), (3.81902, 0), (0.33151, 0), (10.68584, 0), (0.56007, 0), (-7.38621, 0), (5.05882, 0), (2.34616, 0), (11.3806, 0), (5.95276, 0), (-3.01429, 0), (5.98169, 0), (0.96985, 0), (-1.15932, 0), (2.11547, 0), (2.49668, 0), (-12.49569, 0), (-4.94667, 0), (-3.64215, 0), (-8.35595, 0), (3.211, 0), (2.33805, 0), (2.38608, 0), (-3.2862, 0), (-0.80454, 0), (-0.53483, 0), (10.66445, 0), (-0.37619, 0), (0.48246, 0), (7.41919, 0), (0.42414, 0), (-2.32335, 0), (-0.70223, 0), (-5.9332, 0), (-1.20561, 0), (3.39865, 0), (9.61739, 0), (-0.78651, 0), (-4.00256, 0), (-7.99646, 0), (8.72923, 0), (0.71859, 0), (-1.62726, 0), (5.11234, 0), (-0.95625, 0), (-3.75573, 0), (1.03141, 0), (-3.33588, 0), (1.51804, 0), (-3.30935, 0), (-1.97507, 0), (4.06456, 0), (3.27195, 0), (-7.81761, 0), (-3.81785, 0), (-4.18311, 0), (-11.33313, 0), (-0.25221, 0), (7.2514, 0), (5.30301, 0), (2.46762, 0), (4.22716, 0), (0.33916, 0), (9.7638, 0), (-7.58684, 0), (-4.09888, 0), (4.26617, 0), (-0.56744, 0), (4.65125, 0), (-1.30301, 0), (4.53771, 0), (9.96929, 0), (3.72939, 0), (-2.29818, 0), (3.09417, 0), (0.82251, 0), (5.29975, 0), (2.8685, 0), (-5.73321, 0), (-1.85651, 0), (-1.07984, 0), (9.78342, 0), (-13.49652, 0), (3.68791, 0), (1.9998, 0), (1.11674, 0), (9.43869, 0), (4.07029, 0), (5.32715, 0), (7.16504, 0), (6.66096, 0), (-5.7111, 0), (-0.38575, 0), (4.49165, 0), (-3.36489, 0), (7.71814, 0), (-1.58966, 0), (-1.61063, 0), (-0.91602, 0), (0.73459, 0), (-3.24463, 0), (6.3947, 0), (-2.77845, 0), (4.45899, 0), (-8.84186, 0), (2.62276, 0), (1.774, 0), (4.3692, 0), (0.05942, 0), (-1.44042, 0), (-2.53594, 0), (-2.24752, 0), (4.98874, 0), (4.05434, 0), (-2.56483, 0), (-6.79286, 0), (-2.06165, 0), (-0.26056, 0), (1.89567, 0), (-3.15145, 0), (-7.31321, 0), (0.28936, 0), (-0.63111, 0), (0.22611, 0), (-9.3377, 0), (-5.76638, 0), (3.87306, 0), (6.7011, 0), (9.03915, 0), (-1.21835, 0), (0.82892, 0), (2.80656, 0), (-1.34746, 0), (-1.99912, 0), (0.6036, 0), (-3.46117, 0), (5.23732, 0), (-1.86702, 0), (-5.86115, 0), (6.48523, 0), (-7.40158, 0), (-1.38913, 0), (4.94613, 0), (-2.07818, 0), (2.39808, 0), (4.89238, 0), (4.39481, 0), (5.20425, 0), (13.62598, 0), (-2.86293, 0), (-3.62396, 0), (-4.28695, 0), (4.66425, 0), (2.20871, 0), (1.60382, 0), (-9.87024, 0), (-7.37302, 0), (-4.17814, 0), (2.5148, 0), (3.21708, 0), (-11.48089, 0), (1.19821, 0), (-0.07436, 0), (-1.10652, 0), (4.03395, 0), (-4.35883, 0), (2.04013, 0), (0.52264, 0), (8.14004, 0), (-8.86949, 0), (-0.35807, 0), (-10.71113, 0), (-2.13755, 0), (0.50715, 0), (6.30826, 0), (2.37527, 0), (0.20872, 0), (-5.85729, 0), (-4.97217, 0), (-9.78434, 0), (-1.53277, 0), (0.14827, 0), (-1.053, 0), (1.74558, 0), (11.17194, 0), (9.35487, 0), (-9.17209, 0), (10.41814, 0), (7.41206, 0), (3.71775, 0), (-2.04674, 0), (6.18037, 0), (5.6383, 0), (-0.90058, 0), (-1.27073, 0), (-2.3473, 0), (-8.44271, 0), (2.75551, 0), (-1.15521, 0), (4.08722, 0), (-1.70399, 0), (7.24114, 0), (-8.43976, 0), (-1.53052, 0), (-0.00526, 0), (-4.04813, 0), (-2.84299, 0), (-5.201, 0), (7.75774, 0), (-2.85791, 0), (-3.86071, 0), (-1.80029, 0), (-5.26015, 0), (-3.158, 0), (7.71014, 0), (-4.84866, 0), (-8.38785, 0), (7.67021, 0), (4.96521, 0), (-0.40919, 0), (-3.25711, 0), (3.07685, 0), (2.89376, 0), (-10.47331, 0), (-3.48942, 0), (1.13906, 0), (-8.57454, 0), (-3.38963, 0), (-2.3195, 0), (-1.60694, 0), (-5.57406, 0), (-0.93075, 0), (-11.76579, 0), (10.68283, 0), (8.74324, 0), (7.66409, 0), (4.76715, 0), (0.44539, 0), (-1.35941, 0), (4.18849, 0), (-6.17097, 0), (0.27977, 0), (-1.45006, 0), (-4.81694, 0), (-3.0297, 0), (0.02145, 0), (2.46883, 0), (9.60317, 0), (-9.93898, 0), (1.05549, 0), (5.55366, 0), (-3.80722, 0), (-4.18851, 0), (1.00351, 0), (3.11385, 0), (-5.17623, 0), (-3.18396, 0), (-6.65302, 0), (-0.50832, 0), (-4.04375, 0), (4.52707, 0), (6.63124, 0), (-3.72082, 0), (5.79825, 0), (-2.0158, 0), (-2.78369, 0), (-1.91821, 0), (6.31714, 0), (-1.80869, 0), (8.55586, 0), (2.40826, 0), (-8.46361, 0), (5.04452, 0), (-0.84665, 0), (2.30903, 0), (-3.71837, 0), (-0.69419, 0), (3.6733, 0), (-1.96098, 0), (2.36747, 0), (-12.03622, 0), (4.38481, 0), (2.93955, 0), (2.16804, 0), (-0.08218, 0), (-3.97934, 0), (-7.43985, 0), (0.91666, 0), (7.23432, 0), (-6.13303, 0), (-10.23217, 0), (-6.21681, 0), (-0.80934, 0), (0.17914, 0), (2.13338, 0), (6.97656, 0), (6.90455, 0), (6.25943, 0), (-6.04019, 0), (-7.30909, 0), (1.4589, 0), (12.00208, 0), (2.22457, 0), (-2.45912, 0), (-6.92213, 0), (4.05547, 0), (0.04709, 0), (-7.70952, 0), (-1.47883, 0), (1.3701, 0), (-4.92928, 0), (-2.75872, 0), (-0.09178, 0), (2.62642, 0), (-1.14623, 0), (2.76609, 0), (4.94404, 0), (-7.01764, 0), (-10.91568, 0), (-2.49738, 0), (0.73576, 0), (2.25436, 0), (-1.72956, 0), (2.41054, 0), (5.72149, 0), (-6.41371, 0), (3.38217, 0), (1.24133, 0), (10.03634, 0), (-2.37303, 0), (-1.35543, 0), (-1.4387, 0), (-4.0976, 0), (-0.82501, 0), (-1.93498, 0), (5.59955, 0), (5.46656, 0), (2.43568, 0), (-0.23926, 0), (-4.9945, 0), (-4.96655, 0), (-0.59258, 0), (2.02497, 0), (0.67583, 0), (3.16522, 0), (-1.9673, 0), (-6.75319, 0), (-6.69723, 0), (0.81148, 0), (4.44531, 0), (-4.43522, 0), (-5.28602, 0), (-3.58829, 0), (-7.97395, 0), (-2.84891, 0), (-3.95112, 0), (3.54945, 0), (12.12376, 0), (-3.12347, 0), (3.65209, 0), (9.34031, 0), (-0.26348, 0), (-5.23968, 0), (2.22336, 0), (-10.70405, 0), (-4.41319, 0), (-5.94912, 0), (1.8147, 0), (7.69287, 0), (9.46125, 0), (4.72497, 0), (-0.57565, 0), (-1.12303, 0), (2.90272, 0), (-4.4584, 0), (4.28819, 0), (11.64512, 0), (-1.80395, 0), (2.51605, 0), (-3.18439, 0), (-0.70213, 0), (-7.68383, 0), (-8.32268, 0), (-8.71115, 0), (9.96933, 0), (0.95675, 0), (3.35114, 0), (-2.66008, 0), (7.75456, 0), (0.73568, 0), (0.3483, 0), (-1.09203, 0), (-7.76963, 0), (5.81902, 0), (-3.41424, 0), (-0.39209, 0), (4.67608, 0), (0.68753, 0), (5.17179, 0), (4.98983, 0), (-0.12659, 0), (3.25267, 0), (1.50184, 0), (2.94507, 0), (-0.42333, 0), (-3.66227, 0), (8.90812, 0), (4.74411, 0), (2.22018, 0), (-2.07976, 0), (4.8711, 0), (0.5023, 0), (6.31569, 0), (-4.36903, 0), (3.82146, 0), (-6.99477, 0), (3.61225, 0), (14.69335, 0), (0.58368, 0), (4.65341, 0), (-3.14272, 0), (2.67048, 0), (4.64963, 0), (-2.70828, 0), (1.42923, 0), (5.84498, 0), (-4.76568, 0), (0.19907, 0), (1.67486, 0), (5.32145, 0), (-8.03477, 0), (3.46776, 0), (4.66374, 0), (-5.37394, 0), (5.39045, 0), (-1.44756, 0), (-1.64419, 0), (3.39699, 0), (-2.94659, 0), (-2.38437, 0), (-0.23958, 0), (6.88389, 0), (-2.7172, 0), (-1.53419, 0), (7.38841, 0), (-5.44178, 0), (-0.89287, 0), (2.93546, 0), (-0.26901, 0), (-4.70044, 0), (2.25846, 0), (-9.28813, 0), (6.04268, 0), (4.41693, 0), (1.75714, 0), (-2.90702, 1), (3.61651, 1), (4.27458, 1), (4.82133, 1), (9.59483, 1), (1.00424, 1), (2.04147, 1), (-3.58214, 1), (6.59543, 1), (-1.00532, 1), (-3.59794, 1), (-2.82434, 1), (-3.13194, 1), (9.90977, 1), (0.523, 1), (4.62779, 1), (-2.56872, 1), (2.25807, 1), (1.04044, 1), (-2.35744, 1), (10.81531, 1), (-9.68469, 1), (3.80885, 1), (12.70435, 1), (-6.01112, 1), (1.89065, 1), (5.08892, 1), (3.45254, 1), (11.58151, 1), (0.85035, 1), (8.38397, 1), (1.17169, 1), (4.74621, 1), (-1.66614, 1), (4.2414, 1), (1.68765, 1), (1.85223, 1), (9.10111, 1), (-2.38085, 1), (-14.79595, 1), (-3.8938, 1), (-3.41864, 1), (-3.15282, 1), (-0.56684, 1), (12.87997, 1), (6.89115, 1), (12.921, 1), (-7.94908, 1), (2.45687, 1), (2.14957, 1), (7.55081, 1), (-3.71534, 1), (-2.41064, 1), (-0.80734, 1), (-4.75651, 1), (2.05241, 1), (-5.44523, 1), (-2.75054, 1), (-13.00131, 1), (-2.74451, 1), (-1.39004, 1), (-3.02854, 1), (7.65112, 1), (1.1245, 1), (6.74117, 1), (-0.75777, 1), (8.93451, 1), (-8.85559, 1), (-0.36405, 1), (4.02742, 1), (6.88718, 1), (-1.05124, 1), (3.04085, 1), (3.32368, 1), (1.147, 1), (3.41554, 1), (-3.47851, 1), (-0.47684, 1), (-0.55605, 1), (-0.17006, 1), (2.26218, 1), (12.45494, 1), (-1.84097, 1), (1.64934, 1), (-7.07496, 1), (-9.99462, 1), (6.09954, 1), (-1.05319, 1), (3.04757, 1), (0.93899, 1), (-4.63243, 1), (-7.43322, 1), (-7.298, 1), (-6.59016, 1), (-6.11649, 1), (0.56682, 1), (2.00661, 1), (-2.79814, 1), (2.84482, 1), (3.65348, 1), (-4.22807, 1), (-4.54336, 1), (-3.63343, 1), (2.96878, 1), (6.11661, 1), (-1.70919, 1), (-4.71133, 1), (6.09652, 1), (-6.83454, 1), (0.18006, 1), (1.51676, 1), (-5.31646, 1), (-3.21215, 1), (-5.07599, 1), (-2.36591, 1), (3.55724, 1), (4.8904, 1), (-3.22586, 1), (-1.74928, 1), (5.73458, 1), (1.41188, 1), (2.86255, 1), (2.90179, 1), (-2.19949, 1), (1.72727, 1), (1.76939, 1), (-0.12848, 1), (-0.52, 1), (3.48333, 1), (7.8262, 1), (0.09099, 1), (7.77017, 1), (9.49484, 1), (1.42825, 1), (1.99624, 1), (4.00419, 1), (1.07925, 1), (-0.09987, 1), (-5.48733, 1), (-1.83517, 1), (2.38059, 1), (1.42075, 1), (-1.11968, 1), (-6.46035, 1), (7.66576, 1), (4.6307, 1), (5.55989, 1), (3.16684, 1), (5.07671, 1), (-10.20566, 1), (-4.73386, 1), (1.28353, 1), (6.75679, 1), (12.09895, 1), (7.0049, 1), (7.16156, 1), (-0.64311, 1), (-0.66747, 1), (3.99996, 1), (9.07298, 1), (-4.60971, 1), (0.70744, 1), (2.56774, 1), (9.32424, 1), (3.95087, 1), (7.11372, 1), (-0.89284, 1), (8.6155, 1), (-0.14141, 1), (-4.86319, 1), (-6.95801, 1), (4.44883, 1), (4.6156, 1), (-2.3579, 1), (-5.1186, 1), (8.12819, 1), (2.78392, 1), (-4.30221, 1), (-1.47506, 1), (6.8598, 1), (0.47636, 1), (0.95383, 1), (7.79779, 1), (-2.61767, 1), (-10.5087, 1), (-2.74299, 1), (3.87369, 1), (-1.07093, 1), (4.98864, 1), (-7.50772, 1), (6.41316, 1), (1.39061, 1), (-3.1747, 1), (-2.13621, 1), (-0.02203, 1), (0.89025, 1), (-5.87746, 1), (3.60026, 1), (-0.23178, 1), (-2.1897, 1), (-5.85101, 1), (-1.6053, 1), (3.6184, 1), (-8.53795, 1), (-0.35987, 1), (2.15301, 1), (-6.60692, 1), (9.54341, 1), (1.11511, 1), (2.94025, 1), (12.05657, 1), (3.75156, 1), (7.95597, 1), (-0.99449, 1), (0.90597, 1), (-7.90627, 1), (3.50863, 1), (-1.47493, 1), (4.11671, 1), (10.06325, 1), (-1.06059, 1), (-1.37737, 1), (-0.42542, 1), (-3.90267, 1), (9.35037, 1), (-7.91219, 1), (-4.69945, 1), (3.63776, 1), (3.46492, 1), (2.84518, 1), (-3.04301, 1), (8.82764, 1), (7.80134, 1), (7.87755, 1), (7.01035, 1), (2.43271, 1), (11.36418, 1), (-6.92659, 1), (5.95541, 1), (3.59436, 1), (5.18429, 1), (4.20225, 1), (0.5029, 1), (4.03074, 1), (5.23152, 1), (10.65409, 1), (-0.69845, 1), (11.70096, 1), (5.80692, 1), (-8.1819, 1), (4.31485, 1), (5.7227, 1), (5.67398, 1), (-1.75826, 1), (7.54164, 1), (-1.79026, 1), (-1.7395, 1), (5.65042, 1), (0.38765, 1), (-4.64719, 1), (-10.22048, 1), (-2.05447, 1), (-2.43441, 1), (-5.38551, 1), (5.47764, 1), (8.26637, 1), (-3.6421, 1), (-11.66269, 1), (3.972, 1), (5.46642, 1), (-3.72304, 1), (5.75251, 1), (5.12841, 1), (0.59067, 1), (5.21138, 1), (-4.58702, 1), (-8.737, 1), (-2.12737, 1), (0.22888, 1), (-1.46448, 1), (2.40311, 1), (-5.21814, 1), (13.94749, 1), (-2.77448, 1), (-3.7867, 1), (3.4954, 1), (3.12586, 1), (-7.01485, 1), (-3.20727, 1), (6.31415, 1), (2.37521, 1), (8.13787, 1), (2.15956, 1), (-0.40842, 1), (-7.27283, 1), (4.27575, 1), (-2.89126, 1), (6.84344, 1), (7.0869, 1), (-5.18837, 1), (2.67648, 1), (-6.57021, 1), (0.60429, 1), (-1.04921, 1), (7.12873, 1), (1.68973, 1), (-2.58404, 1), (-3.83114, 1), (-7.26546, 1), (-5.07153, 1), (-0.80395, 1), (2.09455, 1), (4.33374, 1), (8.54335, 1), (0.80566, 1), (-8.38085, 1), (7.54812, 1), (8.78007, 1), (1.5857, 1), (8.43855, 1), (-1.90846, 1), (-1.2434, 1), (7.16172, 1), (-3.44129, 1), (-6.37542, 1), (-4.99486, 1), (4.99033, 1), (-1.83734, 1), (-2.83289, 1), (-4.13997, 1), (1.40163, 1), (8.57867, 1), (-1.87639, 1), (3.41667, 1), (6.31762, 1), (1.58473, 1), (1.63625, 1), (-6.93618, 1), (3.58046, 1), (-6.8097, 1), (4.69978, 1), (-1.72912, 1), (5.29491, 1), (-1.63062, 1), (5.83818, 1), (17.0769, 1), (4.54301, 1), (-1.33801, 1), (5.64339, 1), (1.26913, 1), (-1.01553, 1), (4.8316, 1), (3.08635, 1), (-2.27738, 1), (-1.13761, 1), (10.08698, 1), (5.33827, 1), (2.84345, 1), (-1.51132, 1), (13.46078, 1), (8.58965, 1), (-2.36683, 1), (-1.8217, 1), (1.96981, 1), (2.31718, 1), (3.66493, 1), (1.93104, 1), (5.20332, 1), (3.20519, 1), (3.34631, 1), (7.0087, 1), (-7.96126, 1), (-0.62182, 1), (-4.65227, 1), (10.6572, 1), (4.50891, 1), (9.74298, 1), (3.85707, 1), (6.41144, 1), (1.48649, 1), (2.28076, 1), (2.75342, 1), (-5.40401, 1), (7.11389, 1), (5.74368, 1), (6.78345, 1), (3.83773, 1), (0.70959, 1), (0.57434, 1), (1.5888, 1), (3.94889, 1), (5.8234, 1), (7.78366, 1), (9.08354, 1), (-7.99182, 1), (-2.77033, 1), (-10.29342, 1), (1.76251, 1), (2.09266, 1), (4.20614, 1), (-3.63064, 1), (-2.17794, 1), (-2.66225, 1), (-2.74707, 1), (-1.93431, 1), (1.38629, 1), (4.12816, 1), (-1.58902, 1), (-5.08864, 1), (-2.30491, 1), (2.64605, 1), (1.16158, 1), (2.63534, 1), (1.4956, 1), (-4.60768, 1), (0.60771, 1), (3.29549, 1), (-1.42592, 1), (0.8883, 1), (-1.10612, 1), (-2.57296, 1), (5.88085, 1), (7.40745, 1), (13.48116, 1), (5.53539, 1), (-1.46014, 1), (3.73304, 1), (3.5435, 1), (-3.89151, 1), (4.16265, 1), (2.32663, 1), (5.31735, 1), (6.33485, 1), (2.1339, 1), (0.82708, 1), (-2.95155, 1), (-6.76019, 1), (-4.20179, 1), (8.78354, 1), (1.41863, 1), (7.65689, 1), (-6.52601, 1), (-4.4426, 1), (-4.49483, 1), (-3.91479, 1), (-2.84562, 1), (2.58974, 1), (2.24424, 1), (-4.65846, 1), (8.4062, 1), (8.20262, 1), (-8.63752, 1), (4.97966, 1), (-0.35563, 1), (-4.72116, 1), (-2.95997, 1), (2.73959, 1), (-0.23956, 1), (10.13915, 1), (11.83775, 1), (-2.50332, 1), (-0.58181, 1), (-7.62836, 1), (2.26478, 1), (-3.50179, 1), (-2.08023, 1), (4.07256, 1), (-1.40826, 1), (-2.33644, 1), (3.00197, 1), (4.23668, 1), (-2.24647, 1), (1.0445, 1), (-0.31901, 1), (8.62657, 1), (3.92817, 1), (0.08462, 1), (10.15884, 1), (0.4113, 1), (4.45847, 1), (5.82941, 1), (6.59202, 1), (-3.73441, 1), (-5.86969, 1), (-4.56543, 1), (-1.32636, 1), (-0.17884, 1), (-3.56181, 1), (-0.66932, 1), (6.87538, 1), (0.73527, 1), (-0.24177, 1), (-0.8657, 1), (-0.22977, 1), (1.02095, 1), (6.16311, 1), (-5.68027, 1), (-3.7619, 1), (4.22959, 1), (-1.5249, 1); +SELECT '-2.610898982580138', '0.00916587538237954'; +SELECT roundBankers(studentTTest(left, right).1, 6) as t_stat, roundBankers(studentTTest(left, right).2, 6) as p_value from student_ttest; +DROP TABLE IF EXISTS student_ttest; + +/*Check t-stat and p-value and compare it with scipy.stat implementation + First: a=1, sigma (not sigma^2)=5, size=500 + Second: a=1, sigma = 5, size = 500 */ +CREATE TABLE student_ttest (left Float64, right Float64) ENGINE = Memory; + +INSERT INTO student_ttest VALUES (4.52546, 0), (8.69444, 1), (3.73628, 0), (3.81414, 1), (-0.39478, 0), (12.38442, 1), (5.15633, 0), (8.9738, 1), (0.50539, 0), (9.19594, 1), (-5.34036, 0), (7.21009, 1), (0.19336, 0), (4.97743, 1), (8.35729, 0), (4.94756, 1), (6.95818, 0), (19.80911, 1), (-2.93812, 0), (13.75358, 1), (8.30807, 0), (16.56373, 1), (-3.3517, 0), (9.72882, 1), (4.16279, 0), (4.64509, 1), (-3.17231, 0), (17.76854, 1), (1.93545, 0), (4.80693, 1), (11.06606, 0), (8.79505, 1), (-4.22678, 0), (10.88868, 1), (-1.99975, 0), (6.21932, 1), (-4.51178, 0), (15.11614, 1), (-4.50711, 0), (13.24703, 1), (1.89786, 0), (14.76476, 1), (-6.19638, 0), (-0.6117, 1), (-3.70188, 0), (17.48993, 1), (5.01334, 0), (12.11847, 1), (1.79036, 0), (4.87439, 1), (2.14435, 0), (18.56479, 1), (3.0282, 0), (1.23712, 1), (2.35528, 0), (5.41596, 1), (-12.18535, 0), (4.54994, 1), (5.59709, 0), (11.37668, 1), (-12.92336, 0), (9.5982, 1), (-0.04281, 0), (6.59822, 1), (-0.16923, 0), (1.16703, 1), (0.88924, 0), (8.88418, 1), (-4.68414, 0), (10.95047, 1), (8.01099, 0), (5.52787, 1), (2.61686, 0), (-1.11647, 1), (-2.76895, 0), (14.49946, 1), (3.32165, 0), (3.27585, 1), (-0.85135, 0), (-0.42025, 1), (1.21368, 0), (6.37906, 1), (4.38673, 0), (2.5242, 1), (6.20964, 0), (8.1405, 1), (-1.23172, 0), (6.46732, 1), (4.65516, 0), (9.89332, 1), (-1.87143, 0), (10.4374, 1), (0.86429, 0), (-1.06465, 1), (2.51184, 0), (6.84902, 1), (-1.88822, 0), (10.96576, 1), (-1.61802, 0), (7.83319, 1), (1.93653, 0), (14.39823, 1), (-3.66631, 0), (7.02594, 1), (-1.05294, 0), (13.46629, 1), (-10.74718, 0), (10.39531, 1), (16.49295, 0), (11.27348, 1), (-7.65494, 0), (9.32187, 1), (-3.39303, 0), (12.32667, 1), (-4.89418, 0), (8.98905, 1), (3.2521, 0), (9.54757, 1), (0.05831, 0), (5.98325, 1), (-3.00409, 0), (3.47248, 1), (5.76702, 0), (9.26966, 1), (2.67674, 0), (5.77816, 1), (10.52623, 0), (6.32966, 1), (-0.54501, 0), (9.49313, 1), (-4.89835, 0), (6.21337, 1), (3.52457, 0), (10.00242, 1), (-0.0451, 0), (6.25167, 1), (-6.61226, 0), (15.64671, 1), (9.02391, 0), (2.78968, 1), (5.52571, 0), (6.55442, 1), (4.54352, 0), (3.68819, 1), (-3.8394, 0), (9.55934, 1), (-7.75295, 0), (4.166, 1), (5.91167, 0), (12.32471, 1), (1.38897, 0), (7.10969, 1), (6.24166, 0), (16.31723, 1), (5.58536, 0), (12.99482, 1), (4.7591, 0), (10.11585, 1), (-2.58336, 0), (10.29455, 1), (-1.91263, 0), (18.27524, 1), (3.31575, 0), (12.84435, 1), (5.3507, 0), (13.11954, 1), (-15.22081, 0), (12.84147, 1), (-0.84775, 0), (15.55658, 1), (-4.538, 0), (11.45329, 1), (6.71177, 0), (7.50912, 1), (0.52882, 0), (8.56226, 1), (2.0242, 0), (8.63104, 1), (5.69146, 0), (15.68026, 1), (4.63328, 0), (21.6361, 1), (0.22984, 0), (6.23925, 1), (-2.84052, 0), (8.65714, 1), (7.91867, 0), (9.9423, 1), (1.11001, 0), (12.28213, 1), (-0.11251, 0), (3.11279, 1), (-0.20905, 0), (13.58128, 1), (0.03287, 0), (16.51407, 1), (-1.59397, 0), (16.60476, 1), (-5.39405, 0), (12.02022, 1), (-7.1233, 0), (12.11035, 1), (4.51517, 0), (9.47832, 1), (-0.70967, 0), (6.40742, 1), (5.67299, 0), (8.87252, 1), (-0.33835, 0), (15.14265, 1), (-1.83047, 0), (2.23572, 1), (-0.62877, 0), (11.57144, 1), (-7.23148, 0), (18.87737, 1), (0.1802, 0), (12.1833, 1), (11.73325, 0), (11.17519, 1), (2.17603, 0), (16.80422, 1), (-0.11683, 0), (6.81423, 1), (-1.29102, 0), (12.12546, 1), (-0.23201, 0), (8.06153, 1), (-6.8643, 0), (10.97228, 1), (-6.85153, 0), (7.30596, 1), (-4.77163, 0), (15.44026, 1), (6.11721, 0), (8.00993, 1), (5.96406, 0), (12.60196, 1), (3.59135, 0), (13.96832, 1), (-0.60095, 0), (14.03207, 1), (3.11163, 0), (4.53758, 1), (-0.18831, 0), (8.08297, 1), (0.67657, 0), (4.90451, 1), (-3.16117, 0), (8.14253, 1), (0.26957, 0), (19.88605, 1), (2.18653, 0), (13.85254, 1), (-5.94611, 0), (23.01839, 1), (-4.39352, 0), (6.02084, 1), (-3.71525, 0), (9.60319, 1), (5.11103, 0), (1.90511, 1), (1.33998, 0), (10.35237, 1), (1.01629, 0), (16.27082, 1), (-3.36917, 0), (12.52379, 1), (-3.99661, 0), (11.37435, 1), (8.19336, 0), (13.61823, 1), (2.89168, 0), (15.77622, 1), (-11.10373, 0), (15.17254, 1), (11.68005, 0), (6.711, 1), (3.08282, 0), (4.74205, 1), (-6.81506, 0), (10.09812, 1), (-2.34587, 0), (6.61722, 1), (-2.68725, 0), (10.34164, 1), (0.3577, 0), (8.96602, 1), (-3.05682, 0), (12.32157, 1), (9.08062, 0), (11.75711, 1), (-0.77913, 0), (13.49499, 1), (10.35215, 0), (8.57713, 1), (6.82565, 0), (11.50313, 1), (-1.24674, 0), (1.13097, 1), (5.18822, 0), (7.83205, 1), (-3.70743, 0), (5.77957, 1), (1.40319, 0), (15.5519, 1), (5.89432, 0), (10.82676, 1), (1.43152, 0), (11.51218, 1), (6.70638, 0), (9.29779, 1), (9.76613, 0), (9.77021, 1), (4.27604, 0), (9.94114, 1), (-2.63141, 0), (15.54513, 1), (-7.8133, 0), (19.10736, 1), (-0.06668, 0), (15.04205, 1), (1.05391, 0), (9.03114, 1), (4.41797, 0), (24.0104, 1), (0.09337, 0), (9.94205, 1), (6.16075, 0), (2.5925, 1), (7.49413, 0), (8.82726, 1), (-3.52872, 0), (10.0209, 1), (-2.17126, 0), (8.1635, 1), (-3.87605, 0), (4.24074, 1), (3.26607, 0), (7.67291, 1), (-3.28045, 0), (5.21642, 1), (2.1429, 0), (11.2808, 1), (1.53386, 0), (6.88172, 1), (0.21169, 0), (5.98743, 1), (-0.63674, 0), (17.97249, 1), (5.84893, 0), (6.46323, 1), (-0.63498, 0), (15.37416, 1), (8.29526, 0), (2.89957, 1), (-1.08358, 0), (17.13044, 1), (-2.306, 0), (11.06355, 1), (2.86991, 0), (3.09625, 1), (-0.76074, 0), (-2.33019, 1), (5.49191, 0), (7.42675, 1), (1.82883, 0), (15.06792, 1), (-3.70497, 0), (8.81116, 1), (-0.53232, 0), (19.17446, 1), (-11.49722, 0), (18.77181, 1), (3.44877, 0), (14.06443, 1), (-1.8596, 0), (12.81241, 1), (-10.34851, 0), (2.72299, 1), (1.13093, 0), (18.67739, 1), (-10.93389, 0), (11.63275, 1), (-3.39703, 0), (2.23891, 1), (0.19749, 0), (13.01195, 1), (-3.68389, 0), (7.43402, 1), (-4.67863, 0), (8.14599, 1), (10.78916, 0), (16.65328, 1), (0.37675, 0), (1.362, 1), (3.98094, 0), (3.87957, 1), (-3.64775, 0), (11.16134, 1), (-4.8443, 0), (6.25357, 1), (1.102, 0), (4.21945, 1), (8.72112, 0), (12.50047, 1), (-1.47361, 0), (6.45486, 1), (6.24183, 0), (18.99924, 1), (6.83569, 0), (18.09508, 1), (-3.11684, 0), (13.59528, 1), (4.91306, 0), (3.39681, 1), (-0.03628, 0), (13.33157, 1), (5.1282, 0), (5.8945, 1), (-2.38558, 0), (5.61212, 1), (2.33351, 0), (8.41149, 1), (-0.97191, 0), (13.78608, 1), (-0.05588, 0), (6.08609, 1), (-4.70019, 0), (12.76962, 1), (-5.12371, 0), (3.26206, 1), (0.65606, 0), (0.25528, 1), (-0.11574, 0), (11.9083, 1), (4.4238, 0), (4.35071, 1), (6.93399, 0), (11.19855, 1), (3.68712, 0), (13.87404, 1), (-0.01187, 0), (6.87986, 1), (1.8332, 0), (8.32566, 1), (5.81322, 0), (22.51334, 1), (-4.04709, 0), (2.5226, 1), (-8.26397, 0), (16.84498, 1), (-2.11273, 0), (6.26108, 1), (5.28396, 0), (13.84824, 1), (0.73054, 0), (6.03262, 1), (6.43559, 0), (14.12668, 1), (4.35565, 0), (16.01939, 1), (-1.05545, 0), (8.19237, 1), (5.00087, 0), (18.01595, 1), (-2.72239, 0), (9.45609, 1), (7.32313, 0), (6.90459, 1), (2.11548, 0), (12.83115, 1), (-3.40953, 0), (10.603, 1), (6.97051, 0), (13.70439, 1), (-0.45567, 0), (6.1633, 1), (1.31699, 0), (4.1151, 1), (-1.49871, 0), (8.20499, 1), (7.14772, 0), (11.67903, 1), (0.79277, 0), (7.30851, 1), (6.9698, 0), (6.50941, 1), (2.08733, 0), (7.3949, 1), (-3.55962, 0), (12.80075, 1), (0.75601, 0), (5.62043, 1), (1.21, 0), (18.2542, 1), (-2.17877, 0), (17.9393, 1), (1.83206, 0), (16.4569, 1), (5.72463, 0), (8.78811, 1), (7.42257, 0), (4.85949, 1), (0.97829, 0), (-3.36394, 1), (7.54238, 0), (5.38683, 1), (9.91081, 0), (12.26083, 1), (-4.61743, 0), (10.27907, 1), (-4.40799, 0), (11.5144, 1), (9.99854, 0), (11.57335, 1), (8.53725, 0), (1.94203, 1), (3.2905, 0), (7.78228, 1), (0.38634, 0), (11.79385, 1), (-2.53374, 0), (10.18415, 1), (4.94758, 0), (14.67613, 1), (4.79624, 0), (4.70301, 1), (5.57664, 0), (12.72151, 1), (-6.44871, 0), (-3.35508, 1), (3.34431, 0), (17.63775, 1), (0.14209, 0), (2.53883, 1), (10.88431, 0), (14.01483, 1), (0.31846, 0), (12.4387, 1), (-0.54703, 0), (11.15408, 1), (-4.67791, 0), (7.74882, 1), (-5.68011, 0), (13.60956, 1), (-4.93362, 0), (7.81991, 1), (1.2271, 0), (10.90969, 1), (5.27512, 0), (8.19828, 1), (-3.84611, 0), (-1.18523, 1), (6.81706, 0), (0.5916, 1), (10.33033, 0), (0.35805, 1), (5.13979, 0), (12.98364, 1), (3.66534, 0), (11.38628, 1), (-2.07219, 0), (13.94644, 1), (10.65442, 0), (2.03781, 1), (-3.31751, 0), (10.74447, 1), (-1.82011, 0), (12.35656, 1), (-0.39886, 0), (7.08701, 1), (1.77052, 0), (2.69871, 1), (1.29049, 0), (19.66653, 1), (7.92344, 0), (7.88636, 1), (-2.92595, 0), (10.36916, 1), (-2.67107, 0), (1.632, 1), (5.64708, 0), (11.86081, 1), (0.34639, 0), (13.47602, 1), (-3.04356, 0), (6.60204, 1), (3.98828, 0), (7.01303, 1), (-1.36695, 0), (20.19992, 1), (-8.48462, 0), (18.88249, 1), (-4.04669, 0), (11.34367, 1), (9.84561, 0), (12.97305, 1), (-6.1537, 0), (9.5776, 1), (0.82433, 0), (17.91364, 1), (1.92449, 0), (18.3247, 1), (2.51288, 0), (9.9211, 1), (0.40965, 0), (7.14257, 1), (2.89183, 0), (6.59133, 1), (3.84347, 0), (12.35274, 1), (0.66829, 0), (10.57523, 1), (-3.45094, 0), (12.12859, 1), (1.3544, 0), (9.47177, 1), (-9.85456, 0), (0.60659, 1), (5.25689, 0), (4.72996, 1), (-5.26018, 0), (4.51121, 1), (-6.16912, 0), (13.28893, 1), (-1.77163, 0), (8.09014, 1), (3.96687, 0), (8.02511, 1), (0.70893, 0), (13.85406, 1), (-5.45342, 0), (1.75412, 1), (-3.89706, 0), (6.00641, 1), (3.11868, 0), (6.35554, 1), (4.41714, 0), (7.11293, 1), (7.64841, 0), (8.30442, 1), (0.00489, 0), (12.63024, 1), (3.2263, 0), (12.38966, 1), (-5.33042, 0), (7.6801, 1), (2.52189, 0), (11.33744, 1), (-7.40308, 0), (4.67713, 1), (0.67891, 0), (7.62276, 1), (2.49343, 0), (2.14478, 1), (5.43133, 0), (15.32988, 1), (-0.67541, 0), (1.52299, 1), (-0.60299, 0), (17.00017, 1), (-6.32903, 0), (8.29701, 1), (-3.44336, 0), (10.92961, 1), (-0.23963, 0), (6.78449, 1), (6.94686, 0), (7.02698, 1), (6.59442, 0), (11.51719, 1), (-4.18532, 0), (9.97926, 1), (-1.8228, 0), (7.44251, 1), (-0.29443, 0), (7.58541, 1), (2.99821, 0), (4.76058, 1), (2.51942, 0), (12.88959, 1), (-3.49176, 0), (9.974, 1), (-0.57979, 0), (17.03689, 1), (8.69471, 0), (11.14554, 1), (-1.19427, 0), (11.7392, 1), (-3.17119, 0), (11.50029, 1), (-2.99566, 0), (19.41759, 1), (-3.34493, 0), (9.65127, 1), (-2.33826, 0), (9.87673, 1), (-5.04164, 0), (14.13485, 1), (-0.48214, 0), (9.78034, 1), (7.45097, 0), (1.57826, 1), (3.04787, 0), (3.72091, 1), (2.92632, 0), (9.4054, 1), (1.39694, 0), (23.22816, 1), (4.38686, 0), (-0.12571, 1), (3.25753, 0), (6.97343, 1), (7.14218, 0), (10.09049, 1), (-4.04341, 0), (11.78393, 1), (-9.19352, 0), (3.01909, 1), (2.78473, 0), (16.09448, 1), (0.33331, 0), (6.25485, 1), (9.89238, 0), (7.13164, 1), (6.00566, 0), (7.75879, 1), (-1.7511, 0), (9.56834, 1), (4.77815, 0), (6.14824, 1), (5.07457, 0), (13.53454, 1), (2.56132, 0), (8.26364, 1), (2.38317, 0), (8.7095, 1), (-1.63486, 0), (10.61607, 1), (-1.46871, 0), (10.64418, 1), (-5.8681, 0), (23.9106, 1), (-2.96227, 0), (11.38978, 1), (-1.90638, 0), (11.4383, 1), (-13.3052, 0), (18.41498, 1), (-2.14705, 0), (3.70959, 1), (-9.62069, 0), (19.95918, 1), (2.29313, 0), (9.53847, 1), (0.22162, 0), (14.04957, 1), (-1.83956, 0), (13.70151, 1), (4.1853, 0), (5.45046, 1), (6.05965, 0), (10.95061, 1), (-0.23737, 0), (9.55156, 1), (6.07452, 0), (17.92345, 1), (4.34629, 0), (6.23976, 1), (4.02922, 0), (8.71029, 1), (3.62622, 0), (13.58736, 1), (-3.95825, 0), (8.78527, 1), (-1.63412, 0), (11.14213, 1), (-1.25727, 0), (12.23717, 1), (5.06323, 0), (16.44557, 1), (-0.66176, 0), (0.47144, 1), (2.36606, 0), (9.7198, 1), (-5.77792, 0), (13.50981, 1), (4.535, 0), (14.27806, 1), (1.02031, 0), (13.50793, 1), (4.49345, 0), (7.47381, 1), (-4.99791, 0), (11.07844, 1), (2.46716, 0), (9.89844, 1), (3.65471, 0), (21.48548, 1), (11.2283, 0), (6.92085, 1), (6.69743, 0), (4.44074, 1), (-5.60375, 0), (19.98074, 1), (0.28683, 0), (7.92826, 1), (-0.85737, 0), (16.6313, 1), (4.26726, 0), (17.17618, 1), (-3.4322, 0), (13.80807, 1), (-2.07039, 0), (5.37083, 1), (-2.26798, 0), (9.73962, 1), (-0.99818, 0), (10.66273, 1), (0.41335, 0), (8.90639, 1), (5.18124, 0), (12.24596, 1), (-5.01858, 0), (16.89203, 1), (2.05561, 0), (12.69184, 1), (-0.12117, 0), (15.59077, 1), (0.99471, 0), (6.94287, 1), (6.89979, 0), (-0.1801, 1), (-4.18527, 0), (3.25318, 1), (-6.35104, 0), (8.08804, 1), (3.89734, 0), (13.78384, 1), (-1.979, 0), (0.46434, 1), (3.15404, 0), (7.78224, 1), (3.52672, 0), (9.10987, 1), (2.48372, 0), (-0.89391, 1), (-6.13089, 0), (14.3696, 1), (2.2968, 0), (3.01763, 1), (-2.74324, 0), (8.03559, 1), (-0.12876, 0), (7.24609, 1), (-1.51135, 0), (11.86271, 1), (-3.92434, 0), (6.28196, 1), (-1.71254, 0), (8.9725, 1), (-1.25878, 0), (14.46114, 1), (2.03021, 0), (9.50216, 1), (4.31726, 0), (16.30413, 1), (-3.02908, 0), (1.02795, 1), (9.7093, 0), (1.88717, 1), (-3.36284, 0), (9.80106, 1), (6.70938, 0), (4.53487, 1), (0.42762, 0), (16.34543, 1), (5.04726, 0), (7.71098, 1), (2.78386, 0), (2.74639, 1), (6.83022, 0), (6.51875, 1), (-3.02109, 0), (10.42308, 1), (-0.65382, 0), (13.57901, 1), (-15.58675, 0), (0.52784, 1), (5.89746, 0), (4.4708, 1), (-4.11598, 0), (6.39619, 1), (-1.37208, 0), (14.57666, 1), (10.08082, 0), (2.71602, 1), (5.35686, 0), (12.53905, 1), (1.93331, 0), (11.4292, 1), (10.47444, 0), (12.44641, 1), (-2.36872, 0), (14.50894, 1), (6.50752, 0), (17.64374, 1), (2.54603, 0), (11.03218, 1), (-0.4332, 0), (9.82789, 1), (5.26572, 0), (10.11104, 1), (2.09016, 0), (2.16137, 1), (1.15513, 0), (10.24054, 1), (14.95941, 0), (12.86909, 1), (-3.85505, 0), (15.22845, 1), (-2.36239, 0), (5.05411, 1), (1.64338, 0), (10.84836, 1), (-4.25074, 0), (11.15717, 1), (7.29744, 0), (0.91782, 1), (-1.18964, 0), (13.29961, 1), (5.60612, 0), (15.11314, 1), (-3.77011, 0), (11.54004, 1), (6.67642, 0), (-0.94238, 1), (-0.06862, 0), (19.32581, 1), (5.60514, 0), (10.20744, 1), (3.7341, 0), (6.54857, 1), (9.59001, 0), (8.69108, 1), (3.30093, 0), (8.2296, 1), (-2.75658, 0), (8.4474, 1), (4.71994, 0), (6.81178, 1), (0.74699, 0), (5.99415, 1), (2.91095, 0), (13.99336, 1), (-7.36829, 0), (8.7469, 1), (-5.29487, 0), (8.62349, 1), (3.31079, 0), (1.84212, 1), (1.06974, 0), (4.4762, 1), (-1.18424, 0), (9.25421, 1), (-7.415, 0), (10.44229, 1), (3.40595, 0), (12.21649, 1), (-7.63085, 0), (10.45968, 1), (1.13336, 0), (15.34722, 1), (-0.0096, 0), (5.50868, 1), (0.8928, 0), (10.93609, 1), (-0.5943, 0), (2.78631, 1), (7.48306, 0), (11.86145, 1), (10.11943, 0), (18.67385, 1), (5.60459, 0), (10.64051, 1), (4.00189, 0), (12.75565, 1), (2.35823, 0), (6.63666, 1), (0.33475, 0), (12.19343, 1), (3.47072, 0), (9.08636, 1), (-6.68867, 0), (11.67256, 1), (3.31031, 0), (20.31392, 1), (2.17159, 0), (11.66443, 1); +SELECT '-28.740781574102936', '7.667329672103986e-133'; +SELECT roundBankers(studentTTest(left, right).1, 6) as t_stat, roundBankers(studentTTest(left, right).2, 6) as p_value from student_ttest; +SELECT roundBankers(studentTTest(0.95)(left, right).3, 6) as t_stat, roundBankers(studentTTest(0.95)(left, right).4, 6) as p_value from student_ttest; +DROP TABLE IF EXISTS student_ttest; + +/* One-sample t-test against population mean 75 */ +DROP TABLE IF EXISTS onesample_ttest; +CREATE TABLE onesample_ttest (value Float64) ENGINE = Memory; + +INSERT INTO onesample_ttest VALUES +(83.96056984), (76.34082839), (85.77226246), (96.27635828), (75.1901595), (75.19035652), +(96.95055379), (87.20921675), (72.36630737), (84.51072052), (72.43898769), (72.41124296), +(80.90354726), (55.04063706), (57.30098601), (71.25254965), (65.84602656), (81.77096799), +(67.10371109), (61.05235558), (95.58778523), (75.29068439), (78.81033846), (60.90302177), +(71.46740731), (79.33107108), (64.18807707), (82.50837622), (70.79233572), (74.499675); + +SELECT '0.376423', '0.709342'; +SELECT roundBankers(studentTTestOneSample(value, 75).1, 6) as t_stat, roundBankers(studentTTestOneSample(value, 75).2, 6) as p_value from onesample_ttest; +DROP TABLE IF EXISTS onesample_ttest; diff --git a/parser/testdata/01559_aggregate_null_for_empty_fix/ast.json b/parser/testdata/01559_aggregate_null_for_empty_fix/ast.json new file mode 100644 index 000000000..ae85bd1be --- /dev/null +++ b/parser/testdata/01559_aggregate_null_for_empty_fix/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function MAX (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier aggr" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001494782, + "rows_read": 7, + "bytes_read": 255 + } +} diff --git a/parser/testdata/01559_aggregate_null_for_empty_fix/metadata.json b/parser/testdata/01559_aggregate_null_for_empty_fix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01559_aggregate_null_for_empty_fix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01559_aggregate_null_for_empty_fix/query.sql b/parser/testdata/01559_aggregate_null_for_empty_fix/query.sql new file mode 100644 index 000000000..3434a0490 --- /dev/null +++ b/parser/testdata/01559_aggregate_null_for_empty_fix/query.sql @@ -0,0 +1,91 @@ +SELECT MAX(aggr) +FROM +( + SELECT MAX(-1) AS aggr + FROM system.one + WHERE NOT 1 + UNION ALL + SELECT MAX(-1) AS aggr + FROM system.one + WHERE 1 + +); +SELECT MaX(aggr) +FROM +( + SELECT mAX(-1) AS aggr + FROM system.one + WHERE NOT 1 + UNION ALL + SELECT MAx(-1) AS aggr + FROM system.one + WHERE 1 +); +SELECT MaX(aggr) +FROM +( + SELECT mAX(-1) AS aggr + FROM system.one + WHERE NOT 1 + UNION ALL + SELECT max(-1) AS aggr + FROM system.one + WHERE 1 +); +SELECT MaX(aggr) +FROM +( + SELECT mAX(-1) AS aggr + FROM system.one + WHERE NOT 1 + UNION ALL + SELECT max(-1) AS aggr + FROM system.one + WHERE not 1 +); +SET aggregate_functions_null_for_empty=1; +SELECT MAX(aggr) +FROM +( + SELECT MAX(-1) AS aggr + FROM system.one + WHERE NOT 1 + UNION ALL + SELECT MAX(-1) AS aggr + FROM system.one + WHERE 1 + +); +SELECT MaX(aggr) +FROM +( + SELECT mAX(-1) AS aggr + FROM system.one + WHERE NOT 1 + UNION ALL + SELECT MAx(-1) AS aggr + FROM system.one + WHERE 1 +); +SELECT MaX(aggr) +FROM +( + SELECT mAX(-1) AS aggr + FROM system.one + WHERE NOT 1 + UNION ALL + SELECT max(-1) AS aggr + FROM system.one + WHERE 1 +); +SELECT MaX(aggr) +FROM +( + SELECT mAX(-1) AS aggr + FROM system.one + WHERE NOT 1 + UNION ALL + SELECT max(-1) AS aggr + FROM system.one + WHERE not 1 +); diff --git a/parser/testdata/01559_misplaced_codec_diagnostics/ast.json b/parser/testdata/01559_misplaced_codec_diagnostics/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01559_misplaced_codec_diagnostics/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01559_misplaced_codec_diagnostics/metadata.json b/parser/testdata/01559_misplaced_codec_diagnostics/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01559_misplaced_codec_diagnostics/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01559_misplaced_codec_diagnostics/query.sql b/parser/testdata/01559_misplaced_codec_diagnostics/query.sql new file mode 100644 index 000000000..ab1cfc89b --- /dev/null +++ b/parser/testdata/01559_misplaced_codec_diagnostics/query.sql @@ -0,0 +1 @@ +CREATE TABLE t (c CODEC(NONE)) ENGINE = Memory -- { clientError SYNTAX_ERROR } \ No newline at end of file diff --git a/parser/testdata/01560_DateTime_and_DateTime64_comparision/ast.json b/parser/testdata/01560_DateTime_and_DateTime64_comparision/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01560_DateTime_and_DateTime64_comparision/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01560_DateTime_and_DateTime64_comparision/metadata.json b/parser/testdata/01560_DateTime_and_DateTime64_comparision/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01560_DateTime_and_DateTime64_comparision/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01560_DateTime_and_DateTime64_comparision/query.sql b/parser/testdata/01560_DateTime_and_DateTime64_comparision/query.sql new file mode 100644 index 000000000..2c3ae4186 --- /dev/null +++ b/parser/testdata/01560_DateTime_and_DateTime64_comparision/query.sql @@ -0,0 +1,43 @@ +SELECT + n, + toTypeName(dt64) AS dt64_typename, + + '<', + dt64 < dt, + toDateTime(dt64) < dt, + dt64 < toDateTime64(dt, 1, 'UTC'), + + '<=', + dt64 <= dt, + toDateTime(dt64) <= dt, + dt64 <= toDateTime64(dt, 1, 'UTC'), + + '=', + dt64 = dt, + toDateTime(dt64) = dt, + dt64 = toDateTime64(dt, 1, 'UTC'), + + '>=', + dt64 >= dt, + toDateTime(dt64) >= dt, + dt64 >= toDateTime64(dt, 1, 'UTC'), + + '>', + dt64 > dt, + toDateTime(dt64) > dt, + dt64 > toDateTime64(dt, 1, 'UTC'), + + '!=', + dt64 != dt, + toDateTime(dt64) != dt, + dt64 != toDateTime64(dt, 1, 'UTC') +FROM +( + WITH toDateTime('2015-05-18 07:40:11') as value + SELECT + number - 1 as n, + toDateTime64(value, 1, 'UTC') AS dt64, + value - n as dt + FROM system.numbers + LIMIT 3 +) diff --git a/parser/testdata/01560_cancel_agg_func_combinator_native_name_constraint/ast.json b/parser/testdata/01560_cancel_agg_func_combinator_native_name_constraint/ast.json new file mode 100644 index 000000000..4e4dd1dd5 --- /dev/null +++ b/parser/testdata/01560_cancel_agg_func_combinator_native_name_constraint/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function Sum (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001423465, + "rows_read": 7, + "bytes_read": 256 + } +} diff --git a/parser/testdata/01560_cancel_agg_func_combinator_native_name_constraint/metadata.json b/parser/testdata/01560_cancel_agg_func_combinator_native_name_constraint/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01560_cancel_agg_func_combinator_native_name_constraint/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01560_cancel_agg_func_combinator_native_name_constraint/query.sql b/parser/testdata/01560_cancel_agg_func_combinator_native_name_constraint/query.sql new file mode 100644 index 000000000..096ba6639 --- /dev/null +++ b/parser/testdata/01560_cancel_agg_func_combinator_native_name_constraint/query.sql @@ -0,0 +1,5 @@ +SELECT Sum(1); +SELECT SumOrNull(1); +SELECT SUMOrNull(1); +SELECT SUMOrNullIf(1, 1); +SELECT SUMOrNullIf(1, 0); diff --git a/parser/testdata/01560_crash_in_agg_empty_arglist/ast.json b/parser/testdata/01560_crash_in_agg_empty_arglist/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01560_crash_in_agg_empty_arglist/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01560_crash_in_agg_empty_arglist/metadata.json b/parser/testdata/01560_crash_in_agg_empty_arglist/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01560_crash_in_agg_empty_arglist/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01560_crash_in_agg_empty_arglist/query.sql b/parser/testdata/01560_crash_in_agg_empty_arglist/query.sql new file mode 100644 index 000000000..f80a3056f --- /dev/null +++ b/parser/testdata/01560_crash_in_agg_empty_arglist/query.sql @@ -0,0 +1,5 @@ +-- make sure the system.query_log table is created +SELECT 1; +SYSTEM FLUSH LOGS query_log; + +SELECT any() as t, substring(query, 1, 70) AS query, avg(memory_usage) usage, count() count FROM system.query_log WHERE current_database = currentDatabase() AND event_date >= toDate(1604295323) AND event_time >= toDateTime(1604295323) AND type in (1,2,3,4) and initial_user in ('') and('all' = 'all' or(positionCaseInsensitive(query, 'all') = 1)) GROUP BY query ORDER BY usage desc LIMIT 5; -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } diff --git a/parser/testdata/01560_mann_whitney/ast.json b/parser/testdata/01560_mann_whitney/ast.json new file mode 100644 index 000000000..44de8b046 --- /dev/null +++ b/parser/testdata/01560_mann_whitney/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery mann_whitney_test (children 1)" + }, + { + "explain": " Identifier mann_whitney_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001596275, + "rows_read": 2, + "bytes_read": 86 + } +} diff --git a/parser/testdata/01560_mann_whitney/metadata.json b/parser/testdata/01560_mann_whitney/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01560_mann_whitney/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01560_mann_whitney/query.sql b/parser/testdata/01560_mann_whitney/query.sql new file mode 100644 index 000000000..6e1ac5534 --- /dev/null +++ b/parser/testdata/01560_mann_whitney/query.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS mann_whitney_test; +CREATE TABLE mann_whitney_test (left Float64, right UInt8) ENGINE = Memory; +INSERT INTO mann_whitney_test VALUES (310, 0), (195, 0), (530, 0), (155, 0), (530, 0), (245, 0), (385, 0), (450, 0), (465, 0), (545, 0), (170, 0), (180, 0), (125, 0), (180, 0), (230, 0), (75, 0), (430, 0), (480, 0), (495, 0), (295, 0), (116, 1), (171, 1), (176, 1), (421, 1), (111, 1), (326, 1), (481, 1), (111, 1), (346, 1), (441, 1), (261, 1), (411, 1), (206, 1), (521, 1), (456, 1), (446, 1), (296, 1), (51, 1), (426, 1), (261, 1); +SELECT mannWhitneyUTest(left, right) from mann_whitney_test; +SELECT '223.0', '0.5426959774289482'; +WITH mannWhitneyUTest(left, right) AS pair SELECT roundBankers(pair.1, 16) as t_stat, roundBankers(pair.2, 16) as p_value from mann_whitney_test; +WITH mannWhitneyUTest('two-sided', 1)(left, right) as pair SELECT roundBankers(pair.1, 16) as t_stat, roundBankers(pair.2, 16) as p_value from mann_whitney_test; +WITH mannWhitneyUTest('two-sided')(left, right) as pair SELECT roundBankers(pair.1, 16) as t_stat, roundBankers(pair.2, 16) as p_value from mann_whitney_test; +DROP TABLE IF EXISTS mann_whitney_test; diff --git a/parser/testdata/01560_merge_distributed_join/ast.json b/parser/testdata/01560_merge_distributed_join/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01560_merge_distributed_join/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01560_merge_distributed_join/metadata.json b/parser/testdata/01560_merge_distributed_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01560_merge_distributed_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01560_merge_distributed_join/query.sql b/parser/testdata/01560_merge_distributed_join/query.sql new file mode 100644 index 000000000..40e89135d --- /dev/null +++ b/parser/testdata/01560_merge_distributed_join/query.sql @@ -0,0 +1,21 @@ +-- Tags: distributed + +-- test from https://github.com/ClickHouse/ClickHouse/issues/11755#issuecomment-700850254 +DROP TABLE IF EXISTS cat_hist; +DROP TABLE IF EXISTS prod_hist; +DROP TABLE IF EXISTS products_l; +DROP TABLE IF EXISTS products; + +CREATE TABLE cat_hist (categoryId UUID, categoryName String) ENGINE Memory; +CREATE TABLE prod_hist (categoryId UUID, productId UUID) ENGINE = MergeTree ORDER BY productId; + +CREATE TABLE products_l AS prod_hist ENGINE = Distributed(test_cluster_two_shards, currentDatabase(), prod_hist); +CREATE TABLE products as prod_hist ENGINE = Merge(currentDatabase(), '^products_'); + +SELECT * FROM products AS p LEFT JOIN cat_hist AS c USING (categoryId); +SELECT * FROM products AS p GLOBAL LEFT JOIN cat_hist AS c USING (categoryId); + +DROP TABLE cat_hist; +DROP TABLE prod_hist; +DROP TABLE products_l; +DROP TABLE products; diff --git a/parser/testdata/01560_monotonicity_check_multiple_args_bug/ast.json b/parser/testdata/01560_monotonicity_check_multiple_args_bug/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01560_monotonicity_check_multiple_args_bug/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01560_monotonicity_check_multiple_args_bug/metadata.json b/parser/testdata/01560_monotonicity_check_multiple_args_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01560_monotonicity_check_multiple_args_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01560_monotonicity_check_multiple_args_bug/query.sql b/parser/testdata/01560_monotonicity_check_multiple_args_bug/query.sql new file mode 100644 index 000000000..b475a5bdd --- /dev/null +++ b/parser/testdata/01560_monotonicity_check_multiple_args_bug/query.sql @@ -0,0 +1,17 @@ +WITH arrayJoin(range(2)) AS delta +SELECT + toDate(time) + toIntervalDay(delta) AS dt +FROM +( + SELECT toDateTime('2020.11.12 19:02:04') AS time +) +ORDER BY dt ASC; + +WITH arrayJoin([0, 1]) AS delta +SELECT + toDate(time) + toIntervalDay(delta) AS dt +FROM +( + SELECT toDateTime('2020.11.12 19:02:04') AS time +) +ORDER BY dt ASC; diff --git a/parser/testdata/01560_optimize_on_insert_long/ast.json b/parser/testdata/01560_optimize_on_insert_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01560_optimize_on_insert_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01560_optimize_on_insert_long/metadata.json b/parser/testdata/01560_optimize_on_insert_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01560_optimize_on_insert_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01560_optimize_on_insert_long/query.sql b/parser/testdata/01560_optimize_on_insert_long/query.sql new file mode 100644 index 000000000..5570bb762 --- /dev/null +++ b/parser/testdata/01560_optimize_on_insert_long/query.sql @@ -0,0 +1,44 @@ +-- Tags: long + +SELECT 'Replacing Merge Tree'; +DROP TABLE IF EXISTS replacing_merge_tree; +CREATE TABLE replacing_merge_tree (key UInt32, date Datetime) ENGINE=ReplacingMergeTree() PARTITION BY date ORDER BY key; +INSERT INTO replacing_merge_tree VALUES (1, '2020-01-01'), (2, '2020-01-02'), (1, '2020-01-01'), (2, '2020-01-02'); +SELECT * FROM replacing_merge_tree ORDER BY key; +DROP TABLE replacing_merge_tree; + +SELECT 'Collapsing Merge Tree'; +DROP TABLE IF EXISTS collapsing_merge_tree; +CREATE TABLE collapsing_merge_tree (key UInt32, sign Int8, date Datetime) ENGINE=CollapsingMergeTree(sign) PARTITION BY date ORDER BY key; +INSERT INTO collapsing_merge_tree VALUES (1, 1, '2020-01-01'), (2, 1, '2020-01-02'), (1, -1, '2020-01-01'), (2, -1, '2020-01-02'), (1, 1, '2020-01-01'); +SELECT * FROM collapsing_merge_tree ORDER BY key; +DROP TABLE collapsing_merge_tree; + +SELECT 'Versioned Collapsing Merge Tree'; +DROP TABLE IF EXISTS versioned_collapsing_merge_tree; +CREATE TABLE versioned_collapsing_merge_tree (key UInt32, sign Int8, version Int32, date Datetime) ENGINE=VersionedCollapsingMergeTree(sign, version) PARTITION BY date ORDER BY (key, version); +INSERT INTO versioned_collapsing_merge_tree VALUES (1, 1, 1, '2020-01-01'), (1, -1, 1, '2020-01-01'), (1, 1, 2, '2020-01-01'); +SELECT * FROM versioned_collapsing_merge_tree ORDER BY key; +DROP TABLE versioned_collapsing_merge_tree; + +SELECT 'Summing Merge Tree'; +DROP TABLE IF EXISTS summing_merge_tree; +CREATE TABLE summing_merge_tree (key UInt32, val UInt32, date Datetime) ENGINE=SummingMergeTree(val) PARTITION BY date ORDER BY key; +INSERT INTO summing_merge_tree VALUES (1, 1, '2020-01-01'), (2, 1, '2020-01-02'), (1, 5, '2020-01-01'), (2, 5, '2020-01-02'); +SELECT * FROM summing_merge_tree ORDER BY key; +DROP TABLE summing_merge_tree; + +SELECT 'Aggregating Merge Tree'; +DROP TABLE IF EXISTS aggregating_merge_tree; +CREATE TABLE aggregating_merge_tree (key UInt32, val SimpleAggregateFunction(max, UInt32), date Datetime) ENGINE=AggregatingMergeTree() PARTITION BY date ORDER BY key; +INSERT INTO aggregating_merge_tree VALUES (1, 1, '2020-01-01'), (2, 1, '2020-01-02'), (1, 5, '2020-01-01'), (2, 5, '2020-01-02'); +SELECT * FROM aggregating_merge_tree ORDER BY key; +DROP TABLE aggregating_merge_tree; + +SELECT 'Check creating empty parts'; +DROP TABLE IF EXISTS empty; +CREATE TABLE empty (key UInt32, val UInt32, date Datetime) ENGINE=SummingMergeTree(val) PARTITION BY date ORDER BY key; +INSERT INTO empty VALUES (1, 1, '2020-01-01'), (1, 1, '2020-01-01'), (1, -2, '2020-01-01'); +SELECT * FROM empty ORDER BY key; +SELECT table, partition, active FROM system.parts where table = 'empty' and active = 1 and database = currentDatabase(); +DROP TABLE empty; diff --git a/parser/testdata/01560_optimize_on_insert_zookeeper/ast.json b/parser/testdata/01560_optimize_on_insert_zookeeper/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01560_optimize_on_insert_zookeeper/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01560_optimize_on_insert_zookeeper/metadata.json b/parser/testdata/01560_optimize_on_insert_zookeeper/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01560_optimize_on_insert_zookeeper/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01560_optimize_on_insert_zookeeper/query.sql b/parser/testdata/01560_optimize_on_insert_zookeeper/query.sql new file mode 100644 index 000000000..217138c0d --- /dev/null +++ b/parser/testdata/01560_optimize_on_insert_zookeeper/query.sql @@ -0,0 +1,38 @@ +-- Tags: zookeeper + +DROP TABLE IF EXISTS empty1; +DROP TABLE IF EXISTS empty2; + +SELECT 'Check creating empty parts'; + +CREATE TABLE empty1 (key UInt32, val UInt32, date Datetime) +ENGINE=ReplicatedSummingMergeTree('/clickhouse/tables/{database}/test_01560_optimize_on_insert', '1', val) +PARTITION BY date ORDER BY key; + +CREATE TABLE empty2 (key UInt32, val UInt32, date Datetime) +ENGINE=ReplicatedSummingMergeTree('/clickhouse/tables/{database}/test_01560_optimize_on_insert', '2', val) +PARTITION BY date ORDER BY key; + +INSERT INTO empty2 VALUES (1, 1, '2020-01-01'), (1, 1, '2020-01-01'), (1, -2, '2020-01-01'); + +SYSTEM SYNC REPLICA empty1; + +SELECT * FROM empty1 ORDER BY key; +SELECT * FROM empty2 ORDER BY key; + +SELECT table, partition, active FROM system.parts where table = 'empty1' and database=currentDatabase() and active = 1; +SELECT table, partition, active FROM system.parts where table = 'empty2' and database=currentDatabase() and active = 1; + +DETACH table empty1; +DETACH table empty2; +ATTACH table empty1; +ATTACH table empty2; + +SELECT * FROM empty1 ORDER BY key; +SELECT * FROM empty2 ORDER BY key; + +SELECT table, partition, active FROM system.parts where table = 'empty1' and database=currentDatabase() and active = 1; +SELECT table, partition, active FROM system.parts where table = 'empty2' and database=currentDatabase() and active = 1; + +DROP TABLE IF EXISTS empty1; +DROP TABLE IF EXISTS empty2; diff --git a/parser/testdata/01561_Date_and_DateTime64_comparision/ast.json b/parser/testdata/01561_Date_and_DateTime64_comparision/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01561_Date_and_DateTime64_comparision/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01561_Date_and_DateTime64_comparision/metadata.json b/parser/testdata/01561_Date_and_DateTime64_comparision/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01561_Date_and_DateTime64_comparision/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01561_Date_and_DateTime64_comparision/query.sql b/parser/testdata/01561_Date_and_DateTime64_comparision/query.sql new file mode 100644 index 000000000..a61bcff4d --- /dev/null +++ b/parser/testdata/01561_Date_and_DateTime64_comparision/query.sql @@ -0,0 +1,43 @@ +SELECT + n, + toTypeName(dt64) AS dt64_typename, + + '<', + dt64 < d, + toDate(dt64) < d, + dt64 < toDateTime64(d, 1, 'UTC'), + + '<=', + dt64 <= d, + toDate(dt64) <= d, + dt64 <= toDateTime64(d, 1, 'UTC'), + + '=', + dt64 = d, + toDate(dt64) = d, + dt64 = toDateTime64(d, 1, 'UTC'), + + '>=', + dt64 >= d, + toDate(dt64) >= d, + dt64 >= toDateTime64(d, 1, 'UTC'), + + '>', + dt64 > d, + toDate(dt64) > d, + dt64 > toDateTime64(d, 1, 'UTC'), + + '!=', + dt64 != d, + toDate(dt64) != d, + dt64 != toDateTime64(d, 1, 'UTC') +FROM +( + WITH toDateTime('2019-09-16 19:20:11') as val + SELECT + number - 1 as n, + toDateTime64(val, 1, 'UTC') AS dt64, + toDate(val, 'UTC') - n as d + FROM system.numbers + LIMIT 3 +) diff --git a/parser/testdata/01561_aggregate_functions_of_key_with_join/ast.json b/parser/testdata/01561_aggregate_functions_of_key_with_join/ast.json new file mode 100644 index 000000000..35456eed7 --- /dev/null +++ b/parser/testdata/01561_aggregate_functions_of_key_with_join/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001570257, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01561_aggregate_functions_of_key_with_join/metadata.json b/parser/testdata/01561_aggregate_functions_of_key_with_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01561_aggregate_functions_of_key_with_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01561_aggregate_functions_of_key_with_join/query.sql b/parser/testdata/01561_aggregate_functions_of_key_with_join/query.sql new file mode 100644 index 000000000..66047fcc1 --- /dev/null +++ b/parser/testdata/01561_aggregate_functions_of_key_with_join/query.sql @@ -0,0 +1,5 @@ +SET optimize_aggregators_of_group_by_keys = 1; +SELECT source.key, max(target.key) FROM (SELECT 1 key, 'x' name) source +INNER JOIN (SELECT 2 key, 'x' name) target +ON source.name = target.name +GROUP BY source.key; diff --git a/parser/testdata/01562_agg_null_for_empty_ahead/ast.json b/parser/testdata/01562_agg_null_for_empty_ahead/ast.json new file mode 100644 index 000000000..e9b079121 --- /dev/null +++ b/parser/testdata/01562_agg_null_for_empty_ahead/ast.json @@ -0,0 +1,94 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function sumMerge (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier s" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function sumState (alias s) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_0" + } + ], + + "rows": 24, + + "statistics": + { + "elapsed": 0.00192536, + "rows_read": 24, + "bytes_read": 1042 + } +} diff --git a/parser/testdata/01562_agg_null_for_empty_ahead/metadata.json b/parser/testdata/01562_agg_null_for_empty_ahead/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01562_agg_null_for_empty_ahead/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01562_agg_null_for_empty_ahead/query.sql b/parser/testdata/01562_agg_null_for_empty_ahead/query.sql new file mode 100644 index 000000000..834204fed --- /dev/null +++ b/parser/testdata/01562_agg_null_for_empty_ahead/query.sql @@ -0,0 +1,36 @@ +SELECT sumMerge(s) FROM (SELECT sumState(number) s FROM numbers(0)); +SELECT sumMerge(s) FROM (SELECT sumState(number) s FROM numbers(1)); + +SELECT sumMerge(s) FROM (SELECT sumMergeState(n) s FROM (SELECT sumState(number) n FROM numbers(0))); +SELECT sumMerge(s) FROM (SELECT sumMergeState(n) s FROM (SELECT sumState(number) n FROM numbers(1))); + +SELECT sumIf(1, 0); + +SELECT sumIf(1, 1); + +-- should return Null even if we donn't set aggregate_functions_null_for_empty +SELECT sumIfOrNull(1, 0); +SELECT sumOrNullIf(1, 0); + +SELECT nullIf(1, 0); + +SELECT nullIf(1, 1); + +SET aggregate_functions_null_for_empty=1; + +SELECT sumMerge(s) FROM (SELECT sumState(number) s FROM numbers(0)); +SELECT sumMerge(s) FROM (SELECT sumState(number) s FROM numbers(1)); + +SELECT sumMerge(s) FROM (SELECT sumMergeState(n) s FROM (SELECT sumState(number) n FROM numbers(0))); +SELECT sumMerge(s) FROM (SELECT sumMergeState(n) s FROM (SELECT sumState(number) n FROM numbers(1))); + +SELECT sumIf(1, 0); + +SELECT sumIf(1, 1); + +SELECT sumIfOrNull(1, 0); +SELECT sumOrNullIf(1, 0); + +SELECT nullIf(1, 0); + +SELECT nullIf(1, 1); diff --git a/parser/testdata/01564_test_hint_woes/ast.json b/parser/testdata/01564_test_hint_woes/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01564_test_hint_woes/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01564_test_hint_woes/metadata.json b/parser/testdata/01564_test_hint_woes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01564_test_hint_woes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01564_test_hint_woes/query.sql b/parser/testdata/01564_test_hint_woes/query.sql new file mode 100644 index 000000000..c4d4d56d0 --- /dev/null +++ b/parser/testdata/01564_test_hint_woes/query.sql @@ -0,0 +1,54 @@ +-- { echo } +create table values_01564( + a int, + constraint c1 check a < 10) engine Memory; + +-- client error hint after broken insert values +insert into values_01564 values ('f'); -- { error CANNOT_PARSE_TEXT } + +insert into values_01564 values ('f'); -- { error CANNOT_PARSE_TEXT } +select 1; + +insert into values_01564 values ('f'); -- { error CANNOT_PARSE_TEXT } +select nonexistent column; -- { serverError UNKNOWN_IDENTIFIER } + +-- syntax error hint after broken insert values +insert into values_01564 this is bad syntax values ('f'); -- { clientError SYNTAX_ERROR } + +insert into values_01564 this is bad syntax values ('f'); -- { clientError SYNTAX_ERROR } +select 1; + +insert into values_01564 this is bad syntax values ('f'); -- { clientError SYNTAX_ERROR } +select nonexistent column; -- { serverError UNKNOWN_IDENTIFIER } + +-- server error hint after broken insert values (violated constraint) +insert into values_01564 values (11); -- { serverError VIOLATED_CONSTRAINT } + +insert into values_01564 values (11); -- { serverError VIOLATED_CONSTRAINT } +select 1; + +insert into values_01564 values (11); -- { serverError VIOLATED_CONSTRAINT } +select nonexistent column; -- { serverError UNKNOWN_IDENTIFIER } + +-- query after values on the same line +insert into values_01564 values (1); select 1; + +-- even this works (not sure why we need it lol) +-- insert into values_01564 values (11) /*{ serverError VIOLATED_CONSTRAINT }*/; select 1; + +-- syntax error, where the last token we can parse is long before the semicolon. +select this is too many words for an alias; -- { clientError SYNTAX_ERROR } +OPTIMIZE TABLE values_01564 DEDUPLICATE BY; -- { clientError SYNTAX_ERROR } +OPTIMIZE TABLE values_01564 DEDUPLICATE BY a EXCEPT a; -- { clientError SYNTAX_ERROR } +select 'a' || distinct one || 'c' from system.one; -- { clientError SYNTAX_ERROR } + +-- a failing insert and then a normal insert (#https://github.com/ClickHouse/ClickHouse/issues/19353) +CREATE TABLE t0 (c0 String, c1 Int32) ENGINE = Memory() ; +INSERT INTO t0(c0, c1) VALUES ("1",1) ; -- { error UNKNOWN_IDENTIFIER } +INSERT INTO t0(c0, c1) VALUES ('1', 1) ; + +-- the return code must be zero after the final query has failed with expected error +insert into values_01564 values (11); -- { serverError VIOLATED_CONSTRAINT } + +drop table t0; +drop table values_01564; diff --git a/parser/testdata/01566_negate_formatting/ast.json b/parser/testdata/01566_negate_formatting/ast.json new file mode 100644 index 000000000..24d92361f --- /dev/null +++ b/parser/testdata/01566_negate_formatting/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001424614, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01566_negate_formatting/metadata.json b/parser/testdata/01566_negate_formatting/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01566_negate_formatting/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01566_negate_formatting/query.sql b/parser/testdata/01566_negate_formatting/query.sql new file mode 100644 index 000000000..a60168f33 --- /dev/null +++ b/parser/testdata/01566_negate_formatting/query.sql @@ -0,0 +1,7 @@ +set enable_analyzer = 1; +-- { echo } +explain syntax select negate(1), negate(-1), - -1, -(-1), (-1) in (-1); +explain syntax select negate(1.), negate(-1.), - -1., -(-1.), (-1.) in (-1.); +explain syntax select negate(-9223372036854775808), -(-9223372036854775808), - -9223372036854775808; +explain syntax select negate(0), negate(-0), - -0, -(-0), (-0) in (-0); +explain syntax select negate(0.), negate(-0.), - -0., -(-0.), (-0.) in (-0.); diff --git a/parser/testdata/01567_system_processes_current_database/ast.json b/parser/testdata/01567_system_processes_current_database/ast.json new file mode 100644 index 000000000..a53ecf30d --- /dev/null +++ b/parser/testdata/01567_system_processes_current_database/ast.json @@ -0,0 +1,70 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function count (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.processes" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier current_database" + }, + { + "explain": " Function currentDatabase (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 16, + + "statistics": + { + "elapsed": 0.001781417, + "rows_read": 16, + "bytes_read": 627 + } +} diff --git a/parser/testdata/01567_system_processes_current_database/metadata.json b/parser/testdata/01567_system_processes_current_database/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01567_system_processes_current_database/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01567_system_processes_current_database/query.sql b/parser/testdata/01567_system_processes_current_database/query.sql new file mode 100644 index 000000000..406120d74 --- /dev/null +++ b/parser/testdata/01567_system_processes_current_database/query.sql @@ -0,0 +1 @@ +select count(*) from system.processes where current_database = currentDatabase(); diff --git a/parser/testdata/01568_window_functions_distributed/ast.json b/parser/testdata/01568_window_functions_distributed/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01568_window_functions_distributed/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01568_window_functions_distributed/metadata.json b/parser/testdata/01568_window_functions_distributed/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01568_window_functions_distributed/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01568_window_functions_distributed/query.sql b/parser/testdata/01568_window_functions_distributed/query.sql new file mode 100644 index 000000000..45bb1ad39 --- /dev/null +++ b/parser/testdata/01568_window_functions_distributed/query.sql @@ -0,0 +1,36 @@ +-- Tags: distributed + +-- { echo } +select row_number() over (order by dummy) as x from (select * from remote('127.0.0.{1,2}', system, one)) order by x; + +select row_number() over (order by dummy) as x from remote('127.0.0.{1,2}', system, one) order by x; + +select max(identity(dummy + 1)) over () as x from remote('127.0.0.{1,2}', system, one) order by x; + +drop table if exists t_01568; + +create table t_01568 engine Memory as +select intDiv(number, 3) p, modulo(number, 3) o, number +from numbers(9); + +select sum(number) over w as x, max(number) over w as y from t_01568 window w as (partition by p) order by x, y; + +select sum(number) over w, max(number) over w from t_01568 window w as (partition by p) order by p; + +select sum(number) over w as x, max(number) over w as y from remote('127.0.0.{1,2}', '', t_01568) window w as (partition by p) order by x, y; + +select sum(number) over w as x, max(number) over w as y from remote('127.0.0.{1,2}', '', t_01568) window w as (partition by p) order by x, y SETTINGS max_threads = 1; + +select distinct sum(number) over w as x, max(number) over w as y from remote('127.0.0.{1,2}', '', t_01568) window w as (partition by p) order by x, y; + +-- window functions + aggregation w/shards +select groupArray(groupArray(number)) over (rows unbounded preceding) as x from remote('127.0.0.{1,2}', '', t_01568) group by mod(number, 3) order by x; +select groupArray(groupArray(number)) over (rows unbounded preceding) as x from remote('127.0.0.{1,2}', '', t_01568) group by mod(number, 3) order by x settings distributed_group_by_no_merge=1; +select groupArray(groupArray(number)) over (rows unbounded preceding) as x from remote('127.0.0.{1,2}', '', t_01568) group by mod(number, 3) order by x settings distributed_group_by_no_merge=2; -- { serverError NOT_IMPLEMENTED } + +-- proper ORDER BY w/window functions +select p, o, count() over (partition by p) +from remote('127.0.0.{1,2}', '', t_01568) +order by p, o; + +drop table t_01568; diff --git a/parser/testdata/01570_aggregator_combinator_simple_state/ast.json b/parser/testdata/01570_aggregator_combinator_simple_state/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01570_aggregator_combinator_simple_state/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01570_aggregator_combinator_simple_state/metadata.json b/parser/testdata/01570_aggregator_combinator_simple_state/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01570_aggregator_combinator_simple_state/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01570_aggregator_combinator_simple_state/query.sql b/parser/testdata/01570_aggregator_combinator_simple_state/query.sql new file mode 100644 index 000000000..7417b8643 --- /dev/null +++ b/parser/testdata/01570_aggregator_combinator_simple_state/query.sql @@ -0,0 +1,18 @@ +-- { echo } +with anySimpleState(number) as c select toTypeName(c), c from numbers(1); +with anyLastSimpleState(number) as c select toTypeName(c), c from numbers(1); +with minSimpleState(number) as c select toTypeName(c), c from numbers(1); +with maxSimpleState(number) as c select toTypeName(c), c from numbers(1); +with sumSimpleState(number) as c select toTypeName(c), c from numbers(1); +with sumWithOverflowSimpleState(number) as c select toTypeName(c), c from numbers(1); +with groupBitAndSimpleState(number) as c select toTypeName(c), c from numbers(1); +with groupBitOrSimpleState(number) as c select toTypeName(c), c from numbers(1); +with groupBitXorSimpleState(number) as c select toTypeName(c), c from numbers(1); +with sumMapSimpleState(([number], [number])) as c select toTypeName(c), c from numbers(1); +with minMapSimpleState(([number], [number])) as c select toTypeName(c), c from numbers(1); +with maxMapSimpleState(([number], [number])) as c select toTypeName(c), c from numbers(1); +with groupArrayArraySimpleState([number]) as c select toTypeName(c), c from numbers(1); +with groupUniqArrayArraySimpleState([number]) as c select toTypeName(c), c from numbers(1); + +-- non-SimpleAggregateFunction +with countSimpleState(number) as c select toTypeName(c), c from numbers(1); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/01571_window_functions/ast.json b/parser/testdata/01571_window_functions/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01571_window_functions/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01571_window_functions/metadata.json b/parser/testdata/01571_window_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01571_window_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01571_window_functions/query.sql b/parser/testdata/01571_window_functions/query.sql new file mode 100644 index 000000000..dfe9c4376 --- /dev/null +++ b/parser/testdata/01571_window_functions/query.sql @@ -0,0 +1,44 @@ +-- { echo } +-- Another test for window functions because the other one is too long. + +-- some craziness with a mix of materialized and unmaterialized const columns +-- after merging sorted transform, that used to break the peer group detection in +-- the window transform. +CREATE TABLE order_by_const +( + `a` UInt64, + `b` UInt64, + `c` UInt64, + `d` UInt64 +) +ENGINE = MergeTree +ORDER BY (a, b) +SETTINGS index_granularity = 8192; + +truncate table order_by_const; +system stop merges order_by_const; +INSERT INTO order_by_const(a, b, c, d) VALUES (1, 1, 101, 1), (1, 2, 102, 1), (1, 3, 103, 1), (1, 4, 104, 1); +INSERT INTO order_by_const(a, b, c, d) VALUES (1, 5, 104, 1), (1, 6, 105, 1), (2, 1, 106, 2), (2, 1, 107, 2); +INSERT INTO order_by_const(a, b, c, d) VALUES (2, 2, 107, 2), (2, 3, 108, 2), (2, 4, 109, 2); + +-- output 1 sorted stream +SELECT row_number() OVER (order by 1, a) FROM order_by_const SETTINGS query_plan_enable_multithreading_after_window_functions=0; + +drop table order_by_const; + +-- expressions in window frame +select count() over (rows between 1 + 1 preceding and 1 + 1 following) from numbers(10); + +-- signed and unsigned in offset do not cause logical error +select count() over (rows between 2 following and 1 + -1 following) FROM numbers(10); -- { serverError BAD_ARGUMENTS } + +-- default arguments of lagInFrame can be a subtype of the argument +select number, + lagInFrame(toNullable(number), 2, null) over w, + lagInFrame(number, 2, 1) over w +from numbers(10) +window w as (order by number) +; + +-- the case when current_row goes past the partition end at the block end +select number, row_number() over (partition by number rows between unbounded preceding and 1 preceding) from numbers(4) settings max_block_size = 2; diff --git a/parser/testdata/01575_disable_detach_table_of_dictionary/ast.json b/parser/testdata/01575_disable_detach_table_of_dictionary/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01575_disable_detach_table_of_dictionary/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01575_disable_detach_table_of_dictionary/metadata.json b/parser/testdata/01575_disable_detach_table_of_dictionary/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01575_disable_detach_table_of_dictionary/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01575_disable_detach_table_of_dictionary/query.sql b/parser/testdata/01575_disable_detach_table_of_dictionary/query.sql new file mode 100644 index 000000000..60bf817fc --- /dev/null +++ b/parser/testdata/01575_disable_detach_table_of_dictionary/query.sql @@ -0,0 +1,28 @@ +-- Tags: no-parallel + +DROP DATABASE IF EXISTS database_for_dict; + +CREATE DATABASE database_for_dict; + +CREATE TABLE database_for_dict.table_for_dict (k UInt64, v UInt8) ENGINE = MergeTree ORDER BY k; + +DROP DICTIONARY IF EXISTS database_for_dict.dict1; + +CREATE DICTIONARY database_for_dict.dict1 (k UInt64 DEFAULT 0, v UInt8 DEFAULT 1) PRIMARY KEY k +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'table_for_dict' PASSWORD '' DB 'database_for_dict')) +LIFETIME(MIN 1 MAX 10) +LAYOUT(FLAT()); + +DETACH TABLE database_for_dict.dict1; -- { serverError CANNOT_DETACH_DICTIONARY_AS_TABLE } + +DETACH DICTIONARY database_for_dict.dict1; + +ATTACH TABLE database_for_dict.dict1; -- { serverError INCORRECT_QUERY } + +ATTACH DICTIONARY database_for_dict.dict1; + +DROP DICTIONARY database_for_dict.dict1; + +DROP TABLE database_for_dict.table_for_dict; + +DROP DATABASE IF EXISTS database_for_dict; diff --git a/parser/testdata/01576_alias_column_rewrite/ast.json b/parser/testdata/01576_alias_column_rewrite/ast.json new file mode 100644 index 000000000..f13d94791 --- /dev/null +++ b/parser/testdata/01576_alias_column_rewrite/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_table (children 1)" + }, + { + "explain": " Identifier test_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001565438, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/01576_alias_column_rewrite/metadata.json b/parser/testdata/01576_alias_column_rewrite/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01576_alias_column_rewrite/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01576_alias_column_rewrite/query.sql b/parser/testdata/01576_alias_column_rewrite/query.sql new file mode 100644 index 000000000..4c27950fc --- /dev/null +++ b/parser/testdata/01576_alias_column_rewrite/query.sql @@ -0,0 +1,137 @@ +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + `timestamp` DateTime, + `value` UInt64, + `day` Date ALIAS toDate(timestamp), + `day1` Date ALIAS day + 1, + `day2` Date ALIAS day1 + 1, + `time` DateTime ALIAS timestamp +) +ENGINE = MergeTree +PARTITION BY toYYYYMMDD(timestamp) +ORDER BY timestamp SETTINGS index_granularity = 1; + + +INSERT INTO test_table(timestamp, value) SELECT toDateTime('2020-01-01 12:00:00'), 1 FROM numbers(10); +INSERT INTO test_table(timestamp, value) SELECT toDateTime('2020-01-02 12:00:00'), 1 FROM numbers(10); +INSERT INTO test_table(timestamp, value) SELECT toDateTime('2020-01-03 12:00:00'), 1 FROM numbers(10); + +set optimize_respect_aliases = 1; +SELECT 'test-partition-prune'; + +SELECT COUNT() = 10 FROM test_table WHERE day = '2020-01-01' SETTINGS max_rows_to_read = 10; +SELECT t = '2020-01-03' FROM (SELECT day AS t FROM test_table WHERE t = '2020-01-03' GROUP BY t SETTINGS max_rows_to_read = 10); +SELECT COUNT() = 10 FROM test_table WHERE day = '2020-01-01' UNION ALL SELECT 1 FROM numbers(1) SETTINGS max_rows_to_read = 11; +SELECT COUNT() = 0 FROM (SELECT toDate('2019-01-01') AS day, day AS t FROM test_table PREWHERE t = '2020-01-03' WHERE t = '2020-01-03' GROUP BY t ); + +SELECT 'test-join'; +SELECT day = '2020-01-03' +FROM +( + SELECT toDate('2020-01-03') AS day + FROM numbers(1) +) AS a +INNER JOIN +( + SELECT day + FROM test_table + WHERE day = '2020-01-03' + GROUP BY day +) AS b ON a.day = b.day SETTINGS max_rows_to_read = 11; + +SELECT day = '2020-01-01' +FROM +( + SELECT day + FROM test_table + WHERE day = '2020-01-01' + GROUP BY day +) AS a +INNER JOIN +( + SELECT toDate('2020-01-01') AS day + FROM numbers(1) +) AS b ON a.day = b.day SETTINGS max_rows_to_read = 11; + + +SELECT 'alias2alias'; +SELECT COUNT() = 10 FROM test_table WHERE day1 = '2020-01-02' SETTINGS max_rows_to_read = 10; +SELECT t = '2020-01-03' FROM (SELECT day1 AS t FROM test_table WHERE t = '2020-01-03' GROUP BY t SETTINGS max_rows_to_read = 10); +SELECT t = '2020-01-03' FROM (SELECT day2 AS t FROM test_table WHERE t = '2020-01-03' GROUP BY t SETTINGS max_rows_to_read = 10); +SELECT COUNT() = 10 FROM test_table WHERE day1 = '2020-01-03' UNION ALL SELECT 1 FROM numbers(1) SETTINGS max_rows_to_read = 11; +SELECT COUNT() = 0 FROM (SELECT toDate('2019-01-01') AS day1, day1 AS t FROM test_table PREWHERE t = '2020-01-03' WHERE t = '2020-01-03' GROUP BY t ); +SELECT day1 = '2020-01-04' FROM test_table PREWHERE day1 = '2020-01-04' WHERE day1 = '2020-01-04' GROUP BY day1 SETTINGS max_rows_to_read = 10; + + +ALTER TABLE test_table add column array Array(UInt8) default [1, 2, 3]; +ALTER TABLE test_table add column struct.key Array(UInt8) default [2, 4, 6], add column struct.value Array(UInt8) alias array; + + +SELECT 'array-join'; +set max_rows_to_read = 10; +SELECT count() == 10 FROM test_table WHERE day = '2020-01-01'; +SELECT sum(struct.key) == 30, sum(struct.value) == 30 FROM (SELECT struct.key, struct.value FROM test_table array join struct WHERE day = '2020-01-01'); + + +SELECT 'lambda'; +-- lambda parameters in filter should not be rewrite +SELECT count() == 10 FROM test_table WHERE arrayMap((day) -> day + 1, [1,2,3]) [1] = 2 AND day = '2020-01-03'; + +set max_rows_to_read = 0; + +SELECT 'optimize_read_in_order'; +EXPLAIN description = 0 SELECT day AS s FROM test_table ORDER BY s LIMIT 1 SETTINGS optimize_read_in_order = 0; +EXPLAIN description = 0 SELECT day AS s FROM test_table ORDER BY s LIMIT 1 SETTINGS optimize_read_in_order = 1; +EXPLAIN description = 0 SELECT toDate(timestamp) AS s FROM test_table ORDER BY toDate(timestamp) LIMIT 1 SETTINGS optimize_read_in_order = 1; + + +SELECT 'optimize_aggregation_in_order'; +EXPLAIN description = 0 SELECT day, count() AS s FROM test_table GROUP BY day SETTINGS optimize_aggregation_in_order = 0; +EXPLAIN description = 0 SELECT day, count() AS s FROM test_table GROUP BY day SETTINGS optimize_aggregation_in_order = 1; +EXPLAIN description = 0 SELECT toDate(timestamp), count() AS s FROM test_table GROUP BY toDate(timestamp) SETTINGS optimize_aggregation_in_order = 1; + +DROP TABLE test_table; + + +SELECT 'second-index'; +DROP TABLE IF EXISTS test_index; +CREATE TABLE test_index +( + `key_string` String, + `key_uint32` ALIAS toUInt32(key_string), + INDEX idx toUInt32(key_string) TYPE set(0) GRANULARITY 1 +) +ENGINE = MergeTree +PARTITION BY tuple() +PRIMARY KEY tuple() +ORDER BY key_string SETTINGS index_granularity = 1; + +INSERT INTO test_index SELECT * FROM numbers(10); +set max_rows_to_read = 1; +SELECT COUNT() == 1 FROM test_index WHERE key_uint32 = 1; +SELECT COUNT() == 1 FROM test_index WHERE toUInt32(key_string) = 1; +DROP TABLE IF EXISTS test_index; + + +-- check alias column can be used to match projections +drop table if exists pd; +drop table if exists pl; +create table pd (dt DateTime, i int, dt_m DateTime alias toStartOfMinute(dt)) engine Distributed(test_shard_localhost, currentDatabase(), 'pl'); +create table pl (dt DateTime, i int, projection p (select sum(i) group by toStartOfMinute(dt))) engine MergeTree order by dt; + +insert into pl values ('2020-10-24', 1); + +set max_rows_to_read = 2; +select sum(i) from pd group by dt_m settings optimize_use_projections = 1, force_optimize_projection = 1; + +drop table pd; +drop table pl; + +drop table if exists t; + +create temporary table t (x UInt64, y alias x); +insert into t values (1); +select sum(x), sum(y) from t; + +drop table t; diff --git a/parser/testdata/01576_if_null_external_aggregation/ast.json b/parser/testdata/01576_if_null_external_aggregation/ast.json new file mode 100644 index 000000000..f0b8ff706 --- /dev/null +++ b/parser/testdata/01576_if_null_external_aggregation/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001431523, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01576_if_null_external_aggregation/metadata.json b/parser/testdata/01576_if_null_external_aggregation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01576_if_null_external_aggregation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01576_if_null_external_aggregation/query.sql b/parser/testdata/01576_if_null_external_aggregation/query.sql new file mode 100644 index 000000000..73ab5fee5 --- /dev/null +++ b/parser/testdata/01576_if_null_external_aggregation/query.sql @@ -0,0 +1,8 @@ +SET max_bytes_before_external_group_by = 200000000; +SET max_bytes_ratio_before_external_group_by = 0; + +SET max_memory_usage = 1500000000; +SET max_threads = 12; + +SELECT bitAnd(number, toUInt64(pow(2, 20) - 1)) as k, argMaxIf(k, number % 2 = 0 ? number : Null, number > 42), uniq(number) AS u FROM numbers(1000000) GROUP BY k format Null; + diff --git a/parser/testdata/01579_date_datetime_index_comparison/ast.json b/parser/testdata/01579_date_datetime_index_comparison/ast.json new file mode 100644 index 000000000..c745b8fc0 --- /dev/null +++ b/parser/testdata/01579_date_datetime_index_comparison/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_index (children 1)" + }, + { + "explain": " Identifier test_index" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001615636, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/01579_date_datetime_index_comparison/metadata.json b/parser/testdata/01579_date_datetime_index_comparison/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01579_date_datetime_index_comparison/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01579_date_datetime_index_comparison/query.sql b/parser/testdata/01579_date_datetime_index_comparison/query.sql new file mode 100644 index 000000000..c1ba86b01 --- /dev/null +++ b/parser/testdata/01579_date_datetime_index_comparison/query.sql @@ -0,0 +1,16 @@ +drop table if exists test_index; + +create table test_index(date Date) engine MergeTree partition by toYYYYMM(date) order by date; + +insert into test_index values('2020-10-30'); + +select 1 from test_index where date < toDateTime('2020-10-30 06:00:00'); + +drop table if exists test_index; + +select toTypeName([-1, toUInt32(1)]); +-- We don't promote to wide integers +select toTypeName([-1, toUInt64(1)]); -- { serverError NO_COMMON_TYPE } +select toTypeName([-1, toInt128(1)]); +select toTypeName([toInt64(-1), toInt128(1)]); +select toTypeName([toUInt64(1), toUInt256(1)]); diff --git a/parser/testdata/01580_column_const_comparision/ast.json b/parser/testdata/01580_column_const_comparision/ast.json new file mode 100644 index 000000000..b7779e1be --- /dev/null +++ b/parser/testdata/01580_column_const_comparision/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '1111' (alias name)" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers_mt" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier name" + }, + { + "explain": " Literal UInt64_10000" + }, + { + "explain": " Identifier Null" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001256156, + "rows_read": 14, + "bytes_read": 530 + } +} diff --git a/parser/testdata/01580_column_const_comparision/metadata.json b/parser/testdata/01580_column_const_comparision/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01580_column_const_comparision/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01580_column_const_comparision/query.sql b/parser/testdata/01580_column_const_comparision/query.sql new file mode 100644 index 000000000..2eecd9d9d --- /dev/null +++ b/parser/testdata/01580_column_const_comparision/query.sql @@ -0,0 +1 @@ +select '1111' as name from system.numbers_mt order by name limit 10000 format Null; diff --git a/parser/testdata/01581_deduplicate_by_columns_local/ast.json b/parser/testdata/01581_deduplicate_by_columns_local/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01581_deduplicate_by_columns_local/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01581_deduplicate_by_columns_local/metadata.json b/parser/testdata/01581_deduplicate_by_columns_local/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01581_deduplicate_by_columns_local/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01581_deduplicate_by_columns_local/query.sql b/parser/testdata/01581_deduplicate_by_columns_local/query.sql new file mode 100644 index 000000000..510282036 --- /dev/null +++ b/parser/testdata/01581_deduplicate_by_columns_local/query.sql @@ -0,0 +1,126 @@ +--- See also tests/queries/0_stateless/01581_deduplicate_by_columns_replicated.sql + +--- local case + +-- Just in case if previous tests run left some stuff behind. +DROP TABLE IF EXISTS source_data; + +CREATE TABLE source_data ( + pk Int32, sk Int32, val UInt32, partition_key UInt32 DEFAULT 1, + PRIMARY KEY (pk) +) ENGINE=MergeTree +ORDER BY (pk, sk); + +INSERT INTO source_data (pk, sk, val) VALUES (0, 0, 0), (0, 0, 0), (1, 1, 2), (1, 1, 3); + +SELECT 'TOTAL rows', count() FROM source_data; + +DROP TABLE IF EXISTS full_duplicates; +-- table with duplicates on MATERIALIZED columns +CREATE TABLE full_duplicates ( + pk Int32, sk Int32, val UInt32, partition_key UInt32, mat UInt32 MATERIALIZED 12345, alias UInt32 ALIAS 2, + PRIMARY KEY (pk) +) ENGINE=MergeTree +PARTITION BY (partition_key + 1) -- ensure that column in expression is properly handled when deduplicating. See [1] below. +ORDER BY (pk, toString(sk * 10)); -- silly order key to ensure that key column is checked even when it is a part of expression. See [1] below. + +-- ERROR cases +OPTIMIZE TABLE full_duplicates DEDUPLICATE BY pk, sk, val, mat, alias; -- { serverError NO_SUCH_COLUMN_IN_TABLE } -- alias column is present +OPTIMIZE TABLE full_duplicates DEDUPLICATE BY sk, val; -- { serverError THERE_IS_NO_COLUMN } -- primary key column is missing +OPTIMIZE TABLE full_duplicates DEDUPLICATE BY * EXCEPT(pk, sk, val, mat, alias, partition_key); -- { serverError EMPTY_LIST_OF_COLUMNS_QUERIED } -- list is empty +OPTIMIZE TABLE full_duplicates DEDUPLICATE BY * EXCEPT(pk); -- { serverError THERE_IS_NO_COLUMN } -- primary key column is missing [1] +OPTIMIZE TABLE full_duplicates DEDUPLICATE BY * EXCEPT(sk); -- { serverError THERE_IS_NO_COLUMN } -- sorting key column is missing [1] +OPTIMIZE TABLE full_duplicates DEDUPLICATE BY * EXCEPT(partition_key); -- { serverError THERE_IS_NO_COLUMN } -- partitioning column is missing [1] + +OPTIMIZE TABLE full_duplicates DEDUPLICATE BY; -- { clientError SYNTAX_ERROR } -- empty list is a syntax error +OPTIMIZE TABLE partial_duplicates DEDUPLICATE BY pk,sk,val,mat EXCEPT mat; -- { clientError SYNTAX_ERROR } -- invalid syntax +OPTIMIZE TABLE partial_duplicates DEDUPLICATE BY pk APPLY(pk + 1); -- { clientError SYNTAX_ERROR } -- APPLY column transformer is not supported +OPTIMIZE TABLE partial_duplicates DEDUPLICATE BY pk REPLACE(pk + 1); -- { clientError SYNTAX_ERROR } -- REPLACE column transformer is not supported + +-- Valid cases +-- NOTE: here and below we need FINAL to force deduplication in such a small set of data in only 1 part. + +SELECT 'OLD DEDUPLICATE'; +INSERT INTO full_duplicates SELECT * FROM source_data; +OPTIMIZE TABLE full_duplicates FINAL DEDUPLICATE; +SELECT * FROM full_duplicates; +TRUNCATE full_duplicates; + +SELECT 'DEDUPLICATE BY *'; +INSERT INTO full_duplicates SELECT * FROM source_data; +OPTIMIZE TABLE full_duplicates FINAL DEDUPLICATE BY *; +SELECT * FROM full_duplicates; +TRUNCATE full_duplicates; + +SELECT 'DEDUPLICATE BY * EXCEPT mat'; +INSERT INTO full_duplicates SELECT * FROM source_data; +OPTIMIZE TABLE full_duplicates FINAL DEDUPLICATE BY * EXCEPT mat; +SELECT * FROM full_duplicates; +TRUNCATE full_duplicates; + +SELECT 'DEDUPLICATE BY pk,sk,val,mat,partition_key'; +INSERT INTO full_duplicates SELECT * FROM source_data; +OPTIMIZE TABLE full_duplicates FINAL DEDUPLICATE BY pk,sk,val,mat,partition_key; +SELECT * FROM full_duplicates; +TRUNCATE full_duplicates; + +--DROP TABLE full_duplicates; + +-- Now to the partial duplicates when MATERIALIZED column alway has unique value. +DROP TABLE IF EXISTS partial_duplicates; +CREATE TABLE partial_duplicates ( + pk Int32, sk Int32, val UInt32, partition_key UInt32 DEFAULT 1, mat UInt32 MATERIALIZED rand(), alias UInt32 ALIAS 2, + PRIMARY KEY (pk) +) ENGINE=MergeTree +ORDER BY (pk, sk); + +SELECT 'Can not remove full duplicates'; + +-- should not remove anything +SELECT 'OLD DEDUPLICATE'; +INSERT INTO partial_duplicates SELECT * FROM source_data; +OPTIMIZE TABLE partial_duplicates FINAL DEDUPLICATE; +SELECT count() FROM partial_duplicates; +TRUNCATE partial_duplicates; + +SELECT 'DEDUPLICATE BY pk,sk,val,mat'; +INSERT INTO partial_duplicates SELECT * FROM source_data; +OPTIMIZE TABLE partial_duplicates FINAL DEDUPLICATE BY pk,sk,val,mat; +SELECT count() FROM partial_duplicates; +TRUNCATE partial_duplicates; + +SELECT 'Remove partial duplicates'; + +SELECT 'DEDUPLICATE BY *'; -- all except MATERIALIZED columns, hence will reduce number of rows. +INSERT INTO partial_duplicates SELECT * FROM source_data; +OPTIMIZE TABLE partial_duplicates FINAL DEDUPLICATE BY *; +SELECT count() FROM partial_duplicates; +TRUNCATE partial_duplicates; + +SELECT 'DEDUPLICATE BY * EXCEPT mat'; +INSERT INTO partial_duplicates SELECT * FROM source_data; +OPTIMIZE TABLE partial_duplicates FINAL DEDUPLICATE BY * EXCEPT mat; +SELECT * FROM partial_duplicates; +TRUNCATE partial_duplicates; + +SELECT 'DEDUPLICATE BY COLUMNS("*") EXCEPT mat'; +INSERT INTO partial_duplicates SELECT * FROM source_data; +OPTIMIZE TABLE partial_duplicates FINAL DEDUPLICATE BY COLUMNS('.*') EXCEPT mat; +SELECT * FROM partial_duplicates; +TRUNCATE partial_duplicates; + +SELECT 'DEDUPLICATE BY pk,sk'; +INSERT INTO partial_duplicates SELECT * FROM source_data; +OPTIMIZE TABLE partial_duplicates FINAL DEDUPLICATE BY pk,sk; +SELECT * FROM partial_duplicates; +TRUNCATE partial_duplicates; + +SELECT 'DEDUPLICATE BY COLUMNS(".*k")'; +INSERT INTO partial_duplicates SELECT * FROM source_data; +OPTIMIZE TABLE partial_duplicates FINAL DEDUPLICATE BY COLUMNS('.*k'); +SELECT * FROM partial_duplicates; +TRUNCATE partial_duplicates; + +DROP TABLE full_duplicates; +DROP TABLE partial_duplicates; +DROP TABLE source_data; diff --git a/parser/testdata/01581_deduplicate_by_columns_replicated_long/ast.json b/parser/testdata/01581_deduplicate_by_columns_replicated_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01581_deduplicate_by_columns_replicated_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01581_deduplicate_by_columns_replicated_long/metadata.json b/parser/testdata/01581_deduplicate_by_columns_replicated_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01581_deduplicate_by_columns_replicated_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01581_deduplicate_by_columns_replicated_long/query.sql b/parser/testdata/01581_deduplicate_by_columns_replicated_long/query.sql new file mode 100644 index 000000000..87eec36a5 --- /dev/null +++ b/parser/testdata/01581_deduplicate_by_columns_replicated_long/query.sql @@ -0,0 +1,55 @@ +-- Tags: long, replica + +--- See also tests/queries/0_stateless/01581_deduplicate_by_columns_local.sql + +--- replicated case + +-- Just in case if previous tests run left some stuff behind. +DROP TABLE IF EXISTS replicated_deduplicate_by_columns_r1 SYNC; +DROP TABLE IF EXISTS replicated_deduplicate_by_columns_r2 SYNC; + +SET replication_alter_partitions_sync = 2; + +-- IRL insert_replica_id were filled from hostname +CREATE TABLE IF NOT EXISTS replicated_deduplicate_by_columns_r1 ( + id Int32, val UInt32, unique_value UInt64 MATERIALIZED rowNumberInBlock() +) ENGINE=ReplicatedMergeTree('/clickhouse/tables/{database}/test_01581/replicated_deduplicate', 'r1') ORDER BY id; + +CREATE TABLE IF NOT EXISTS replicated_deduplicate_by_columns_r2 ( + id Int32, val UInt32, unique_value UInt64 MATERIALIZED rowNumberInBlock() +) ENGINE=ReplicatedMergeTree('/clickhouse/tables/{database}/test_01581/replicated_deduplicate', 'r2') ORDER BY id; + + +-- insert some data, 2 records: (3, 1003), (4, 1004) are duplicated and have difference in unique_value / insert_replica_id +-- (1, 1001), (5, 2005) has full duplicates +INSERT INTO replicated_deduplicate_by_columns_r1 VALUES (1, 1001), (1, 1001), (2, 1002), (3, 1003), (4, 1004), (1, 2001), (9, 1002); +INSERT INTO replicated_deduplicate_by_columns_r2 VALUES (1, 1001), (2, 2002), (3, 1003), (4, 1004), (5, 2005), (5, 2005); + +-- make sure that all data is present on all replicas +SYSTEM SYNC REPLICA replicated_deduplicate_by_columns_r2; +SYSTEM SYNC REPLICA replicated_deduplicate_by_columns_r1; + +SELECT 'check that we have a data'; +SELECT 'r1', id, val, count(), uniqExact(unique_value) FROM replicated_deduplicate_by_columns_r1 GROUP BY id, val ORDER BY id, val; +SELECT 'r2', id, val, count(), uniqExact(unique_value) FROM replicated_deduplicate_by_columns_r2 GROUP BY id, val ORDER BY id, val; + + +-- NOTE: here and below we need FINAL to force deduplication in such a small set of data in only 1 part. +-- that should remove full duplicates +OPTIMIZE TABLE replicated_deduplicate_by_columns_r1 FINAL DEDUPLICATE; + +SELECT 'after old OPTIMIZE DEDUPLICATE'; +SELECT 'r1', id, val, count(), uniqExact(unique_value) FROM replicated_deduplicate_by_columns_r1 GROUP BY id, val ORDER BY id, val; +SELECT 'r2', id, val, count(), uniqExact(unique_value) FROM replicated_deduplicate_by_columns_r2 GROUP BY id, val ORDER BY id, val; + +OPTIMIZE TABLE replicated_deduplicate_by_columns_r1 FINAL DEDUPLICATE BY id, val; +OPTIMIZE TABLE replicated_deduplicate_by_columns_r1 FINAL DEDUPLICATE BY COLUMNS('[id, val]'); +OPTIMIZE TABLE replicated_deduplicate_by_columns_r1 FINAL DEDUPLICATE BY COLUMNS('[i]') EXCEPT(unique_value); + +SELECT 'check data again after multiple deduplications with new syntax'; +SELECT 'r1', id, val, count(), uniqExact(unique_value) FROM replicated_deduplicate_by_columns_r1 GROUP BY id, val ORDER BY id, val; +SELECT 'r2', id, val, count(), uniqExact(unique_value) FROM replicated_deduplicate_by_columns_r2 GROUP BY id, val ORDER BY id, val; + +-- cleanup the mess +DROP TABLE replicated_deduplicate_by_columns_r1; +DROP TABLE replicated_deduplicate_by_columns_r2; diff --git a/parser/testdata/01581_to_int_inf_nan/ast.json b/parser/testdata/01581_to_int_inf_nan/ast.json new file mode 100644 index 000000000..f9ab1dafc --- /dev/null +++ b/parser/testdata/01581_to_int_inf_nan/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toInt64 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Float64_inf" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001238687, + "rows_read": 7, + "bytes_read": 263 + } +} diff --git a/parser/testdata/01581_to_int_inf_nan/metadata.json b/parser/testdata/01581_to_int_inf_nan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01581_to_int_inf_nan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01581_to_int_inf_nan/query.sql b/parser/testdata/01581_to_int_inf_nan/query.sql new file mode 100644 index 000000000..04679f239 --- /dev/null +++ b/parser/testdata/01581_to_int_inf_nan/query.sql @@ -0,0 +1,10 @@ +SELECT toInt64(inf); -- { serverError CANNOT_CONVERT_TYPE } +SELECT toInt128(inf); -- { serverError CANNOT_CONVERT_TYPE } +SELECT toInt256(inf); -- { serverError CANNOT_CONVERT_TYPE } +SELECT toInt64(nan); -- { serverError CANNOT_CONVERT_TYPE } +SELECT toInt128(nan); -- { serverError CANNOT_CONVERT_TYPE } +SELECT toInt256(nan); -- { serverError CANNOT_CONVERT_TYPE } +SELECT toUInt64(inf); -- { serverError CANNOT_CONVERT_TYPE } +SELECT toUInt256(inf); -- { serverError CANNOT_CONVERT_TYPE } +SELECT toUInt64(nan); -- { serverError CANNOT_CONVERT_TYPE } +SELECT toUInt256(nan); -- { serverError CANNOT_CONVERT_TYPE } diff --git a/parser/testdata/01582_any_join_supertype/ast.json b/parser/testdata/01582_any_join_supertype/ast.json new file mode 100644 index 000000000..f207bc05e --- /dev/null +++ b/parser/testdata/01582_any_join_supertype/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery foo (children 1)" + }, + { + "explain": " Identifier foo" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001178464, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/01582_any_join_supertype/metadata.json b/parser/testdata/01582_any_join_supertype/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01582_any_join_supertype/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01582_any_join_supertype/query.sql b/parser/testdata/01582_any_join_supertype/query.sql new file mode 100644 index 000000000..9cd7b4397 --- /dev/null +++ b/parser/testdata/01582_any_join_supertype/query.sql @@ -0,0 +1,37 @@ +DROP TABLE IF EXISTS foo; +DROP TABLE IF EXISTS bar; + +CREATE TABLE foo (server_date Date, server_time Datetime('Asia/Istanbul'), dimension_1 String) ENGINE = MergeTree() PARTITION BY toYYYYMM(server_date) ORDER BY (server_date); +CREATE TABLE bar (server_date Date, dimension_1 String) ENGINE = MergeTree() PARTITION BY toYYYYMM(server_date) ORDER BY (server_date); + +INSERT INTO foo VALUES ('2020-01-01', '2020-01-01 12:00:00', 'test1'), ('2020-01-01', '2020-01-01 13:00:00', 'test2'); +INSERT INTO bar VALUES ('2020-01-01', 'test2'), ('2020-01-01', 'test3'); + +SET optimize_move_to_prewhere = 1; +SET any_join_distinct_right_table_keys = 0; + +SELECT count() +FROM foo ANY INNER JOIN bar USING (dimension_1) +WHERE (foo.server_date <= '2020-11-07') AND (toDate(foo.server_time, 'Asia/Yekaterinburg') <= '2020-11-07'); + +SELECT toDateTime(foo.server_time, 'UTC') +FROM foo +ANY INNER JOIN bar USING (dimension_1) +WHERE toDate(foo.server_time, 'UTC') <= toDate('2020-04-30'); + +SELECT toDateTime(foo.server_time, 'UTC') FROM foo +SEMI JOIN bar USING (dimension_1) WHERE toDate(foo.server_time, 'UTC') <= toDate('2020-04-30'); + +SET any_join_distinct_right_table_keys = 1; + +SELECT count() +FROM foo ANY INNER JOIN bar USING (dimension_1) +WHERE (foo.server_date <= '2020-11-07') AND (toDate(foo.server_time, 'Asia/Yekaterinburg') <= '2020-11-07'); + +SELECT toDateTime(foo.server_time, 'UTC') +FROM foo +ANY INNER JOIN bar USING (dimension_1) +WHERE toDate(foo.server_time, 'UTC') <= toDate('2020-04-30'); + +DROP TABLE foo; +DROP TABLE bar; diff --git a/parser/testdata/01582_deterministic_function_with_predicate/ast.json b/parser/testdata/01582_deterministic_function_with_predicate/ast.json new file mode 100644 index 000000000..20e5ae86c --- /dev/null +++ b/parser/testdata/01582_deterministic_function_with_predicate/ast.json @@ -0,0 +1,142 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Explain EXPLAIN SYNTAX (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function count (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1000000" + }, + { + "explain": " Function less (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function rand64 (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function multiply (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Float64_0.01" + }, + { + "explain": " Literal Float64_18446744073709552000" + } + ], + + "rows": 40, + + "statistics": + { + "elapsed": 0.001871356, + "rows_read": 40, + "bytes_read": 1874 + } +} diff --git a/parser/testdata/01582_deterministic_function_with_predicate/metadata.json b/parser/testdata/01582_deterministic_function_with_predicate/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01582_deterministic_function_with_predicate/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01582_deterministic_function_with_predicate/query.sql b/parser/testdata/01582_deterministic_function_with_predicate/query.sql new file mode 100644 index 000000000..9f64c292a --- /dev/null +++ b/parser/testdata/01582_deterministic_function_with_predicate/query.sql @@ -0,0 +1 @@ +EXPLAIN SYNTAX SELECT count(*) FROM ( SELECT number FROM ( SELECT number FROM numbers(1000000) ) WHERE rand64() < (0.01 * 18446744073709552000.)); diff --git a/parser/testdata/01582_distinct_subquery_groupby/ast.json b/parser/testdata/01582_distinct_subquery_groupby/ast.json new file mode 100644 index 000000000..aff42a6d6 --- /dev/null +++ b/parser/testdata/01582_distinct_subquery_groupby/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00128691, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/01582_distinct_subquery_groupby/metadata.json b/parser/testdata/01582_distinct_subquery_groupby/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01582_distinct_subquery_groupby/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01582_distinct_subquery_groupby/query.sql b/parser/testdata/01582_distinct_subquery_groupby/query.sql new file mode 100644 index 000000000..7251dc428 --- /dev/null +++ b/parser/testdata/01582_distinct_subquery_groupby/query.sql @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS t; +DROP TABLE IF EXISTS d; + +CREATE TABLE t (a String, b Int) ENGINE = TinyLog; +INSERT INTO t VALUES ('a', 0), ('a', 1), ('b', 0); +SELECT * FROM t; + +SELECT '---'; +CREATE TABLE d (a String, b Int) ENGINE = Distributed(test_shard_localhost, currentDatabase(), t); +SELECT DISTINCT b FROM (SELECT a, b FROM d GROUP BY a, b) order by b; +DROP TABLE d; + +SELECT '---'; +CREATE TABLE d (a String, b Int) ENGINE = Distributed(test_cluster_two_shards_localhost, currentDatabase(), t); +SELECT DISTINCT b FROM (SELECT a, b FROM d GROUP BY a, b) order by b; +DROP TABLE d; + +DROP TABLE t; diff --git a/parser/testdata/01582_move_to_prewhere_compact_parts/ast.json b/parser/testdata/01582_move_to_prewhere_compact_parts/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01582_move_to_prewhere_compact_parts/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01582_move_to_prewhere_compact_parts/metadata.json b/parser/testdata/01582_move_to_prewhere_compact_parts/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01582_move_to_prewhere_compact_parts/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01582_move_to_prewhere_compact_parts/query.sql b/parser/testdata/01582_move_to_prewhere_compact_parts/query.sql new file mode 100644 index 000000000..9a4f8d1f7 --- /dev/null +++ b/parser/testdata/01582_move_to_prewhere_compact_parts/query.sql @@ -0,0 +1,21 @@ +-- Tags: no-random-merge-tree-settings + +SET optimize_move_to_prewhere = 1; +SET convert_query_to_cnf = 0; + +DROP TABLE IF EXISTS prewhere_move; +CREATE TABLE prewhere_move (x Int, y String) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO prewhere_move SELECT number, toString(number) FROM numbers(1000); + +SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT * FROM prewhere_move WHERE x > 100) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter%'; + +DROP TABLE prewhere_move; + +CREATE TABLE prewhere_move (x1 Int, x2 Int, x3 Int, x4 String CODEC(NONE)) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO prewhere_move SELECT number, number, number, repeat('a', 1024) FROM numbers(1000); + +-- Not all conditions moved +SET move_all_conditions_to_prewhere = 0; +SELECT replaceRegexpAll(explain, '__table1\.|_UInt8|_String', '') FROM (EXPLAIN actions=1 SELECT * FROM prewhere_move WHERE x1 > 100 AND x2 > 100 AND x3 > 100 AND x4 > '100') WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter%'; + +DROP TABLE prewhere_move; diff --git a/parser/testdata/01583_const_column_in_set_index/ast.json b/parser/testdata/01583_const_column_in_set_index/ast.json new file mode 100644 index 000000000..63bbaae1e --- /dev/null +++ b/parser/testdata/01583_const_column_in_set_index/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery insub (children 1)" + }, + { + "explain": " Identifier insub" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001469015, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/01583_const_column_in_set_index/metadata.json b/parser/testdata/01583_const_column_in_set_index/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01583_const_column_in_set_index/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01583_const_column_in_set_index/query.sql b/parser/testdata/01583_const_column_in_set_index/query.sql new file mode 100644 index 000000000..b781efb0f --- /dev/null +++ b/parser/testdata/01583_const_column_in_set_index/query.sql @@ -0,0 +1,9 @@ +drop table if exists insub; + +create table insub (i int, j int) engine MergeTree order by i settings index_granularity = 1; +insert into insub select number a, a + 2 from numbers(10); + +SET max_rows_to_read = 12; -- 10 from numbers + 2 from table +select * from insub where i in (select toInt32(3) from numbers(10)); + +drop table if exists insub; diff --git a/parser/testdata/01584_distributed_buffer_cannot_find_column/ast.json b/parser/testdata/01584_distributed_buffer_cannot_find_column/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01584_distributed_buffer_cannot_find_column/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01584_distributed_buffer_cannot_find_column/metadata.json b/parser/testdata/01584_distributed_buffer_cannot_find_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01584_distributed_buffer_cannot_find_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01584_distributed_buffer_cannot_find_column/query.sql b/parser/testdata/01584_distributed_buffer_cannot_find_column/query.sql new file mode 100644 index 000000000..648eb14f5 --- /dev/null +++ b/parser/testdata/01584_distributed_buffer_cannot_find_column/query.sql @@ -0,0 +1,27 @@ +-- Tags: distributed + +DROP TABLE IF EXISTS realtimedrep; +DROP TABLE IF EXISTS realtimedistributed; +DROP TABLE IF EXISTS realtimebuff; + +CREATE TABLE realtimedrep(amount Int64,transID String,userID String,appID String,appName String,transType String,orderSource String,nau String,fau String,transactionType String,supplier String,fMerchant String,bankConnCode String,reqDate DateTime) ENGINE = MergeTree PARTITION BY toDate(reqDate) ORDER BY transID SETTINGS index_granularity = 8192; +CREATE TABLE realtimedistributed(amount Int64,transID String,userID String,appID String,appName String,transType String,orderSource String,nau String,fau String,transactionType String,supplier String,fMerchant String,bankConnCode String,reqDate DateTime) ENGINE = Distributed(test_cluster_two_shards, currentDatabase(), realtimedrep, rand()); +CREATE TABLE realtimebuff(amount Int64,transID String,userID String,appID String,appName String,transType String,orderSource String,nau String,fau String,transactionType String,supplier String,fMerchant String,bankConnCode String,reqDate DateTime) ENGINE = Buffer(currentDatabase(), 'realtimedistributed', 16, 3600, 36000, 10000, 1000000, 10000000, 100000000); + +insert into realtimebuff (amount,transID,userID,appID,appName,transType,orderSource,nau,fau,transactionType,supplier,fMerchant,bankConnCode,reqDate) values (100, '200312000295032','200223000028708','14', 'Data','1', '20','1', '0','123','abc', '1234a','ZPVBIDV', 1598256583); + +-- Data is written to the buffer table but has not been written to the Distributed table +select sum(amount) = 100 from realtimebuff; + +OPTIMIZE TABLE realtimebuff; +-- Data has been flushed from Buffer table to the Distributed table and can possibly being sent to 0, 1 or 2 shards. +-- Both shards reside on localhost in the same table. +select sum(amount) IN (0, 100, 200) from realtimebuff; + +-- Data has been sent to all shards. +SYSTEM FLUSH DISTRIBUTED realtimedistributed; +select sum(amount) = 200 from realtimebuff; + +DROP TABLE realtimedrep; +DROP TABLE realtimedistributed; +DROP TABLE realtimebuff; diff --git a/parser/testdata/01585_fuzz_bits_with_bugfix/ast.json b/parser/testdata/01585_fuzz_bits_with_bugfix/ast.json new file mode 100644 index 000000000..d1dee8a8b --- /dev/null +++ b/parser/testdata/01585_fuzz_bits_with_bugfix/ast.json @@ -0,0 +1,70 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function fuzzBits (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'stringstring'" + }, + { + "explain": " Literal Float64_0.5" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_3" + } + ], + + "rows": 16, + + "statistics": + { + "elapsed": 0.001645178, + "rows_read": 16, + "bytes_read": 644 + } +} diff --git a/parser/testdata/01585_fuzz_bits_with_bugfix/metadata.json b/parser/testdata/01585_fuzz_bits_with_bugfix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01585_fuzz_bits_with_bugfix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01585_fuzz_bits_with_bugfix/query.sql b/parser/testdata/01585_fuzz_bits_with_bugfix/query.sql new file mode 100644 index 000000000..d41f18e20 --- /dev/null +++ b/parser/testdata/01585_fuzz_bits_with_bugfix/query.sql @@ -0,0 +1,3 @@ +SELECT toTypeName(fuzzBits('stringstring', 0.5)) from numbers(3); + +SELECT toTypeName(fuzzBits('stringstring', 0.5)) from ( SELECT 1 AS x UNION ALL SELECT NULL ) group by x diff --git a/parser/testdata/01585_use_index_for_global_in/ast.json b/parser/testdata/01585_use_index_for_global_in/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01585_use_index_for_global_in/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01585_use_index_for_global_in/metadata.json b/parser/testdata/01585_use_index_for_global_in/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01585_use_index_for_global_in/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01585_use_index_for_global_in/query.sql b/parser/testdata/01585_use_index_for_global_in/query.sql new file mode 100644 index 000000000..3b0ca726d --- /dev/null +++ b/parser/testdata/01585_use_index_for_global_in/query.sql @@ -0,0 +1,22 @@ +-- Tags: global + +SET merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.0; + +drop table if exists xp; +drop table if exists xp_d; + +create table xp(i UInt64, j UInt64) engine MergeTree order by i settings index_granularity = 1; +create table xp_d as xp engine Distributed(test_shard_localhost, currentDatabase(), xp); + +insert into xp select number, number + 2 from numbers(10); + +set max_rows_to_read = 4; -- 2 from numbers, 2 from tables +select * from xp where i in (select * from numbers(2)); +select * from xp where i global in (select * from numbers(2)); +select * from xp_d where i in (select * from numbers(2)); + +set max_rows_to_read = 6; -- 2 from numbers, 2 from GLOBAL temp table (pushed from numbers), 2 from local xp +select * from xp_d where i global in (select * from numbers(2)); + +drop table if exists xp; +drop table if exists xp_d; diff --git a/parser/testdata/01585_use_index_for_global_in_with_null/ast.json b/parser/testdata/01585_use_index_for_global_in_with_null/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01585_use_index_for_global_in_with_null/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01585_use_index_for_global_in_with_null/metadata.json b/parser/testdata/01585_use_index_for_global_in_with_null/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01585_use_index_for_global_in_with_null/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01585_use_index_for_global_in_with_null/query.sql b/parser/testdata/01585_use_index_for_global_in_with_null/query.sql new file mode 100644 index 000000000..d4147a445 --- /dev/null +++ b/parser/testdata/01585_use_index_for_global_in_with_null/query.sql @@ -0,0 +1,44 @@ +-- Tags: global + +drop table if exists xp; +drop table if exists xp_d; + +create table xp(i Nullable(UInt64), j UInt64) engine MergeTree order by i settings index_granularity = 1, allow_nullable_key = 1; +create table xp_d as xp engine Distributed(test_shard_localhost, currentDatabase(), xp); + +insert into xp select number, number + 2 from numbers(10); +insert into xp select null, 100; + +optimize table xp final; + +set max_rows_to_read = 2; +select * from xp where i in [0, 1]; +select * from xp where i global in [0, 1]; +select * from xp_d where i in [0, 1]; +select * from xp_d where i global in [0, 1]; + +set max_rows_to_read = 4; -- 2 in the subquery, 2 in the query itself +select * from xp where i in (select * from numbers(2)); +select * from xp where i global in (select * from numbers(2)); +select * from xp_d where i in (select * from numbers(2)); + +set max_rows_to_read = 6; -- 2 subquery, 2 from global temp table (GLOBAL IN), 2 from local xp table +select * from xp_d where i global in (select * from numbers(2)); + +set transform_null_in = 1; +set max_rows_to_read = 4; -- 2 in the subquery, 2 in the query itself +select * from xp where i in (select * from numbers(2)); +select * from xp where i global in (select * from numbers(2)); +select * from xp_d where i in (select * from numbers(2)); + +set max_rows_to_read = 6; -- 2 subquery, 2 from global temp table (GLOBAL IN), 2 from local xp table +select * from xp_d where i global in (select * from numbers(2)); + +set max_rows_to_read = 0; -- No rows should be read +select * from xp where i in (null); +select * from xp where i global in (null); +select * from xp_d where i in (null); +select * from xp_d where i global in (null); + +drop table if exists xp; +drop table if exists xp_d; diff --git a/parser/testdata/01586_columns_pruning/ast.json b/parser/testdata/01586_columns_pruning/ast.json new file mode 100644 index 000000000..30118fc95 --- /dev/null +++ b/parser/testdata/01586_columns_pruning/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001407084, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01586_columns_pruning/metadata.json b/parser/testdata/01586_columns_pruning/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01586_columns_pruning/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01586_columns_pruning/query.sql b/parser/testdata/01586_columns_pruning/query.sql new file mode 100644 index 000000000..8ed7beb07 --- /dev/null +++ b/parser/testdata/01586_columns_pruning/query.sql @@ -0,0 +1,6 @@ +SET max_memory_usage = 10000000000; + +-- Unneeded column is removed from subquery. +SELECT count() FROM (SELECT number, groupArray(repeat(toString(number), 1000000)) FROM numbers(1000000) GROUP BY number); +-- Unneeded column cannot be removed from subquery and the query is out of memory +SELECT count() FROM (SELECT number, groupArray(repeat(toString(number), 1000000)) AS agg FROM numbers(1000000) GROUP BY number HAVING notEmpty(agg)); -- { serverError MEMORY_LIMIT_EXCEEDED } diff --git a/parser/testdata/01586_replicated_mutations_empty_partition/ast.json b/parser/testdata/01586_replicated_mutations_empty_partition/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01586_replicated_mutations_empty_partition/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01586_replicated_mutations_empty_partition/metadata.json b/parser/testdata/01586_replicated_mutations_empty_partition/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01586_replicated_mutations_empty_partition/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01586_replicated_mutations_empty_partition/query.sql b/parser/testdata/01586_replicated_mutations_empty_partition/query.sql new file mode 100644 index 000000000..c4a3c939c --- /dev/null +++ b/parser/testdata/01586_replicated_mutations_empty_partition/query.sql @@ -0,0 +1,35 @@ +-- Tags: replica + +DROP TABLE IF EXISTS replicated_mutations_empty_partitions SYNC; + +CREATE TABLE replicated_mutations_empty_partitions +( + key UInt64, + value String +) +ENGINE = ReplicatedMergeTree('/clickhouse/test/'||currentDatabase()||'/01586_replicated_mutations_empty_partitions/{shard}', '{replica}') +ORDER BY key +PARTITION by key; + +-- insert_keeper* settings are adjusted since several actual inserts are happening behind one statement due to partitioning i.e. inserts in different partitions +INSERT INTO replicated_mutations_empty_partitions SETTINGS insert_keeper_fault_injection_probability=0 SELECT number, toString(number) FROM numbers(10); + +SELECT count(distinct value) FROM replicated_mutations_empty_partitions; + +SELECT count() FROM system.zookeeper WHERE path = '/clickhouse/test/'||currentDatabase()||'/01586_replicated_mutations_empty_partitions/'||getMacro('shard')||'/block_numbers'; + +ALTER TABLE replicated_mutations_empty_partitions DROP PARTITION '3'; +ALTER TABLE replicated_mutations_empty_partitions DROP PARTITION '4'; +ALTER TABLE replicated_mutations_empty_partitions DROP PARTITION '5'; +ALTER TABLE replicated_mutations_empty_partitions DROP PARTITION '9'; + +-- still ten records +SELECT count() FROM system.zookeeper WHERE path = '/clickhouse/test/'||currentDatabase()||'/01586_replicated_mutations_empty_partitions/'||getMacro('shard')||'/block_numbers'; + +ALTER TABLE replicated_mutations_empty_partitions MODIFY COLUMN value UInt64 SETTINGS replication_alter_partitions_sync=2; + +SELECT sum(value) FROM replicated_mutations_empty_partitions; + +SHOW CREATE TABLE replicated_mutations_empty_partitions; + +DROP TABLE IF EXISTS replicated_mutations_empty_partitions SYNC; diff --git a/parser/testdata/01586_storage_join_low_cardinality_key/ast.json b/parser/testdata/01586_storage_join_low_cardinality_key/ast.json new file mode 100644 index 000000000..eeaa01f3b --- /dev/null +++ b/parser/testdata/01586_storage_join_low_cardinality_key/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery low_card (children 1)" + }, + { + "explain": " Identifier low_card" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00125572, + "rows_read": 2, + "bytes_read": 69 + } +} diff --git a/parser/testdata/01586_storage_join_low_cardinality_key/metadata.json b/parser/testdata/01586_storage_join_low_cardinality_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01586_storage_join_low_cardinality_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01586_storage_join_low_cardinality_key/query.sql b/parser/testdata/01586_storage_join_low_cardinality_key/query.sql new file mode 100644 index 000000000..28507e25f --- /dev/null +++ b/parser/testdata/01586_storage_join_low_cardinality_key/query.sql @@ -0,0 +1,13 @@ +CREATE TABLE low_card +( + `lc` LowCardinality(String) +) +ENGINE = Join(ANY, LEFT, lc); + +INSERT INTO low_card VALUES ( '1' ); + +SELECT * FROM low_card; +SELECT * FROM low_card WHERE lc = '1'; +SELECT CAST(lc AS String) FROM low_card; + +DROP TABLE low_card; diff --git a/parser/testdata/01590_countSubstrings/ast.json b/parser/testdata/01590_countSubstrings/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01590_countSubstrings/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01590_countSubstrings/metadata.json b/parser/testdata/01590_countSubstrings/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01590_countSubstrings/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01590_countSubstrings/query.sql b/parser/testdata/01590_countSubstrings/query.sql new file mode 100644 index 000000000..5ec4f412d --- /dev/null +++ b/parser/testdata/01590_countSubstrings/query.sql @@ -0,0 +1,146 @@ +-- Tags: no-fasttest +-- no-fasttest: upper/lowerUTF8 use ICU + +-- +-- countSubstrings +-- +select ''; +select '# countSubstrings'; + +select ''; +select 'CountSubstringsImpl::constantConstant'; +select 'CountSubstringsImpl::constantConstantScalar'; + +select 'empty'; +select countSubstrings('', '.'); +select countSubstrings('', ''); +select countSubstrings('.', ''); +select countSubstrings(toString(number), '') from numbers(1); +select countSubstrings('', toString(number)) from numbers(1); +select countSubstrings('aaa', materialize('')); +select countSubstrings(materialize('aaa'), ''); +select countSubstrings(materialize('aaa'), materialize('')); + +select 'char'; +select countSubstrings('foobar.com', '.'); +select countSubstrings('www.foobar.com', '.'); +select countSubstrings('.foobar.com.', '.'); + +select 'word'; +select countSubstrings('foobar.com', 'com'); +select countSubstrings('com.foobar', 'com'); +select countSubstrings('foo.com.bar', 'com'); +select countSubstrings('com.foobar.com', 'com'); +select countSubstrings('com.foo.com.bar.com', 'com'); + +select 'intersect'; +select countSubstrings('aaaa', 'aa'); + +select ''; +select 'CountSubstringsImpl::vectorVector'; +select countSubstrings(toString(number), toString(number)) from numbers(1); +select countSubstrings(concat(toString(number), '000111'), toString(number)) from numbers(1); +select countSubstrings(concat(toString(number), '000111001'), toString(number)) from numbers(1); +select 'intersect', countSubstrings(concat(toString(number), '0000000'), '00') from numbers(1) format CSV; + +select ''; +select 'CountSubstringsImpl::constantVector'; +select countSubstrings('100', toString(number)) from numbers(3); +select countSubstrings('0100', toString(number)) from numbers(1); +select countSubstrings('010000', toString(number)) from numbers(1); +select 'intersect', countSubstrings('00000000', repeat(toString(number), 2)) from numbers(1) format CSV; + +select ''; +select 'CountSubstringsImpl::vectorConstant'; +select countSubstrings(toString(number), '1') from system.numbers limit 3 offset 9; +select countSubstrings(concat(toString(number), '000111'), '1') from numbers(1); +select countSubstrings(concat(toString(number), '000111001'), '1') from numbers(1); +select 'intersect', countSubstrings(repeat(toString(number), 8), '00') from numbers(1) format CSV; + +-- +-- countSubstringsCaseInsensitive +-- +select ''; +select '# countSubstringsCaseInsensitive'; + +select ''; +select 'CountSubstringsImpl::constantConstant'; +select 'CountSubstringsImpl::constantConstantScalar'; + +select 'char'; +select countSubstringsCaseInsensitive('aba', 'B'); +select countSubstringsCaseInsensitive('bab', 'B'); +select countSubstringsCaseInsensitive('BaBaB', 'b'); + +select 'word'; +select countSubstringsCaseInsensitive('foobar.com', 'COM'); +select countSubstringsCaseInsensitive('com.foobar', 'COM'); +select countSubstringsCaseInsensitive('foo.com.bar', 'COM'); +select countSubstringsCaseInsensitive('com.foobar.com', 'COM'); +select countSubstringsCaseInsensitive('com.foo.com.bar.com', 'COM'); + +select 'intersect'; +select countSubstringsCaseInsensitive('aaaa', 'AA'); + +select ''; +select 'CountSubstringsImpl::vectorVector'; +select countSubstringsCaseInsensitive(upper(char(number)), lower(char(number))) from numbers(100) where number = 0x41; -- A +select countSubstringsCaseInsensitive(concat(toString(number), 'aaa111'), char(number)) from numbers(100) where number = 0x41; +select countSubstringsCaseInsensitive(concat(toString(number), 'aaa111aa1'), char(number)) from numbers(100) where number = 0x41; + +select ''; +select 'CountSubstringsImpl::constantVector'; +select countSubstringsCaseInsensitive('aab', char(number)) from numbers(100) where number >= 0x41 and number <= 0x43; -- A..C +select countSubstringsCaseInsensitive('abaa', char(number)) from numbers(100) where number = 0x41; +select countSubstringsCaseInsensitive('abaaaa', char(number)) from numbers(100) where number = 0x41; + +select ''; +select 'CountSubstringsImpl::vectorConstant'; +select countSubstringsCaseInsensitive(char(number), 'a') from numbers(100) where number >= 0x41 and number <= 0x43; + +-- +-- countSubstringsCaseInsensitiveUTF8 +-- +select ''; +select '# countSubstringsCaseInsensitiveUTF8'; + +select ''; +select 'CountSubstringsImpl::constantConstant'; +select 'CountSubstringsImpl::constantConstantScalar'; + +select 'char'; +select countSubstringsCaseInsensitiveUTF8('фуу', 'Ф'); +select countSubstringsCaseInsensitiveUTF8('ФуФ', 'ф'); +select countSubstringsCaseInsensitiveUTF8('ФуФуФ', 'ф'); + +select 'word'; +select countSubstringsCaseInsensitiveUTF8('подстрока.рф', 'РФ'); +select countSubstringsCaseInsensitiveUTF8('рф.подстрока', 'рф'); +select countSubstringsCaseInsensitiveUTF8('подстрока.рф.подстрока', 'РФ'); +select countSubstringsCaseInsensitiveUTF8('рф.подстрока.рф', 'рф'); +select countSubstringsCaseInsensitiveUTF8('рф.подстрока.рф.подстрока.рф', 'РФ'); + +select 'intersect'; +select countSubstringsCaseInsensitiveUTF8('яяяя', 'ЯЯ'); + +select ''; +select 'CountSubstringsImpl::vectorVector'; +-- can't use any char, since this will not make valid UTF8 +-- for the haystack we use number as-is, for needle we just add dependency from number to go to vectorVector code +select countSubstringsCaseInsensitiveUTF8(upperUTF8(concat(char(number), 'я')), lowerUTF8(concat(substringUTF8(char(number), 2), 'Я'))) from numbers(100) where number = 0x41; -- A +select countSubstringsCaseInsensitiveUTF8(concat(toString(number), 'ЯЯЯ111'), concat(substringUTF8(char(number), 2), 'я')) from numbers(100) where number = 0x41; -- A +select countSubstringsCaseInsensitiveUTF8(concat(toString(number), 'яяя111яя1'), concat(substringUTF8(char(number), 2), 'Я')) from numbers(100) where number = 0x41; -- A +select 'intersect', countSubstringsCaseInsensitiveUTF8(concat(toString(number), 'яяяяяяяя'), concat(substringUTF8(char(number), 2), 'Яя')) from numbers(100) where number = 0x41 format CSV; -- A + +select ''; +select 'CountSubstringsImpl::constantVector'; +select countSubstringsCaseInsensitiveUTF8('ЯЯb', concat(substringUTF8(char(number), 2), 'я')) from numbers(100) where number = 0x41; -- A +select countSubstringsCaseInsensitiveUTF8('ЯbЯЯ', concat(substringUTF8(char(number), 2), 'я')) from numbers(100) where number = 0x41; -- A +select countSubstringsCaseInsensitiveUTF8('ЯbЯЯЯЯ', concat(substringUTF8(char(number), 2), 'я')) from numbers(100) where number = 0x41; -- A +select 'intersect', countSubstringsCaseInsensitiveUTF8('ЯЯЯЯЯЯЯЯ', concat(substringUTF8(char(number), 2), 'Яя')) from numbers(100) where number = 0x41 format CSV; -- A + +select ''; +select 'CountSubstringsImpl::vectorConstant'; +select countSubstringsCaseInsensitiveUTF8(concat(char(number), 'я'), 'Я') from numbers(100) where number = 0x41; -- A +select countSubstringsCaseInsensitiveUTF8(concat(char(number), 'б'), 'Я') from numbers(100) where number = 0x41; -- A +select 'intersect', countSubstringsCaseInsensitiveUTF8(concat(char(number), repeat('я', 8)), 'яЯ') from numbers(100) where number = 0x41 format CSV; -- A diff --git a/parser/testdata/01591_window_functions/ast.json b/parser/testdata/01591_window_functions/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01591_window_functions/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01591_window_functions/metadata.json b/parser/testdata/01591_window_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01591_window_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01591_window_functions/query.sql b/parser/testdata/01591_window_functions/query.sql new file mode 100644 index 000000000..5a73c2bed --- /dev/null +++ b/parser/testdata/01591_window_functions/query.sql @@ -0,0 +1,555 @@ +-- Tags: long + +SET enable_analyzer = 1; + +-- Too slow +SET max_bytes_before_external_sort = 0; +SET max_bytes_ratio_before_external_sort = 0; +SET max_bytes_before_external_group_by = 0; +SET max_bytes_ratio_before_external_group_by = 0; + +-- { echo } + +-- just something basic +select number, count() over (partition by intDiv(number, 3) order by number rows unbounded preceding) from numbers(10); + +-- proper calculation across blocks +select number, max(number) over (partition by intDiv(number, 3) order by number desc rows unbounded preceding) from numbers(10) settings max_block_size = 2; + +-- not a window function +select number, abs(number) over (partition by toString(intDiv(number, 3)) rows unbounded preceding) from numbers(10); -- { serverError UNKNOWN_AGGREGATE_FUNCTION } + +-- no partition by +select number, avg(number) over (order by number rows unbounded preceding) from numbers(10); + +-- no order by +select number, quantileExact(number) over (partition by intDiv(number, 3) AS value order by number rows unbounded preceding) from numbers(10); + +-- can add an alias after window spec +select number, quantileExact(number) over (partition by intDiv(number, 3) AS value order by number rows unbounded preceding) q from numbers(10); + +-- now we should be able to compute expressions with window functions +select number, q * 10, quantileExact(number) over (partition by intDiv(number, 3) order by number rows unbounded preceding) q from numbers(10) order by number; + +-- must work in WHERE if you wrap it in a subquery +select * from (select count(*) over (rows unbounded preceding) c from numbers(3)) where c > 0; + +-- should work in ORDER BY +select number, max(number) over (partition by intDiv(number, 3) order by number desc rows unbounded preceding) m from numbers(10) order by m desc, number; + +-- also works in ORDER BY if you wrap it in a subquery +select * from (select count(*) over (rows unbounded preceding) c from numbers(3)) order by c; + +-- Example with window function only in ORDER BY. Here we make a rank of all +-- numbers sorted descending, and then sort by this rank descending, and must get +-- the ascending order. +select * from (select * from numbers(5) order by rand()) order by count() over (order by number desc rows unbounded preceding) desc; + +-- Aggregate functions as window function arguments. This query is semantically +-- the same as the above one, only we replace `number` with +-- `any(number) group by number` and so on. +select * from (select * from numbers(5) order by rand()) group by number order by sum(any(number + 1)) over (order by min(number) desc rows unbounded preceding) desc; +-- some more simple cases w/aggregate functions +select sum(any(number)) over (rows unbounded preceding) from numbers(1); +select sum(any(number) + 1) over (rows unbounded preceding) from numbers(1); +select sum(any(number + 1)) over (rows unbounded preceding) from numbers(1); + +-- different windows +-- an explain test would also be helpful, but it's too immature now and I don't +-- want to change reference all the time +select number, max(number) over (partition by intDiv(number, 3) order by number desc rows unbounded preceding), count(number) over (partition by intDiv(number, 5) order by number rows unbounded preceding) as m from numbers(31) order by number settings max_block_size = 2; + +-- two functions over the same window +-- an explain test would also be helpful, but it's too immature now and I don't +-- want to change reference all the time +select number, max(number) over (partition by intDiv(number, 3) order by number desc rows unbounded preceding), count(number) over (partition by intDiv(number, 3) order by number desc rows unbounded preceding) as m from numbers(7) order by number settings max_block_size = 2; + +-- check that we can work with constant columns +select median(x) over (partition by x) from (select 1 x); + +-- an empty window definition is valid as well +select groupArray(number) over (rows unbounded preceding) from numbers(3); +select groupArray(number) over () from numbers(3); + +-- This one tests we properly process the window function arguments. +-- Seen errors like 'column `1` not found' from count(1). +select count(1) over (rows unbounded preceding), max(number + 1) over () from numbers(3); + +-- Should work in DISTINCT +select distinct sum(0) over (rows unbounded preceding) from numbers(2); +select distinct any(number) over (rows unbounded preceding) from numbers(2); + +-- Various kinds of aliases are properly substituted into various parts of window +-- function definition. +with number + 1 as x select intDiv(number, 3) as y, sum(x + y) over (partition by y order by x rows unbounded preceding) from numbers(7); + +-- WINDOW clause +select 1 window w1 as (); + +select sum(number) over w1, sum(number) over w2 +from numbers(10) +window + w1 as (rows unbounded preceding), + w2 as (partition by intDiv(number, 3) as value order by number rows unbounded preceding) +; + +-- FIXME both functions should use the same window, but they don't. Add an +-- EXPLAIN test for this. +select + sum(number) over w1, + sum(number) over (partition by intDiv(number, 3) as value order by number rows unbounded preceding) +from numbers(10) +window + w1 as (partition by intDiv(number, 3) rows unbounded preceding) +; + +-- RANGE frame +-- It's the default +select sum(number) over () from numbers(3); + +-- Try some mutually prime sizes of partition, group and block, for the number +-- of rows that is their least common multiple + 1, so that we see all the +-- interesting corner cases. +select number, intDiv(number, 3) p, mod(number, 2) o, count(number) over w as c +from numbers(31) +window w as (partition by p order by o, number range unbounded preceding) +order by number +settings max_block_size = 5 +; + +select number, intDiv(number, 5) p, mod(number, 3) o, count(number) over w as c +from numbers(31) +window w as (partition by p order by o, number range unbounded preceding) +order by number +settings max_block_size = 2 +; + +select number, intDiv(number, 5) p, mod(number, 2) o, count(number) over w as c +from numbers(31) +window w as (partition by p order by o, number range unbounded preceding) +order by number +settings max_block_size = 3 +; + +select number, intDiv(number, 3) p, mod(number, 5) o, count(number) over w as c +from numbers(31) +window w as (partition by p order by o, number range unbounded preceding) +order by number +settings max_block_size = 2 +; + +select number, intDiv(number, 2) p, mod(number, 5) o, count(number) over w as c +from numbers(31) +window w as (partition by p order by o, number range unbounded preceding) +order by number +settings max_block_size = 3 +; + +select number, intDiv(number, 2) p, mod(number, 3) o, count(number) over w as c +from numbers(31) +window w as (partition by p order by o range unbounded preceding) +order by number +settings max_block_size = 5 +; + +-- A case where the partition end is in the current block, and the frame end +-- is triggered by the partition end. +select min(number) over (partition by p) from (select number, intDiv(number, 3) p from numbers(10)); + +-- UNBOUNDED FOLLOWING frame end +select + min(number) over wa, min(number) over wo, + max(number) over wa, max(number) over wo +from + (select number, intDiv(number, 3) p, mod(number, 5) o + from numbers(31)) +window + wa as (partition by p order by o + range between unbounded preceding and unbounded following), + wo as (partition by p order by o + rows between unbounded preceding and unbounded following) +settings max_block_size = 2; + +-- ROWS offset frame start +select number, p, + count(*) over (partition by p order by number + rows between 1 preceding and unbounded following), + count(*) over (partition by p order by number + rows between current row and unbounded following), + count(*) over (partition by p order by number + rows between 1 following and unbounded following) +from (select number, intDiv(number, 5) p from numbers(31)) +order by p, number +settings max_block_size = 2; + +-- ROWS offset frame start and end +select number, p, + count(*) over (partition by p order by number + rows between 2 preceding and 2 following) +from (select number, intDiv(number, 7) p from numbers(71)) +order by p, number +settings max_block_size = 2; + +SELECT count(*) OVER (ROWS BETWEEN 2 PRECEDING AND CURRENT ROW) FROM numbers(4); + +-- frame boundaries that runs into the partition end +select + count() over (partition by intDiv(number, 3) + rows between 100 following and unbounded following), + count() over (partition by intDiv(number, 3) + rows between current row and 100 following) +from numbers(10); + +-- seen a use-after-free under MSan in this query once +SELECT number, max(number) OVER (PARTITION BY intDiv(number, 7) ORDER BY number ASC NULLS LAST ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) FROM numbers(1024) SETTINGS max_block_size = 2 FORMAT Null; + +-- a corner case +select count() over (); + +-- RANGE CURRENT ROW frame start +select number, p, o, + count(*) over (partition by p order by o + range between current row and unbounded following) +from (select number, intDiv(number, 5) p, mod(number, 3) o + from numbers(31)) +order by p, o, number +settings max_block_size = 2; + +select + count(*) over (rows between current row and current row), + count(*) over (range between current row and current row) +from numbers(3); + +-- RANGE OFFSET +-- a basic RANGE OFFSET frame +select x, min(x) over w, max(x) over w, count(x) over w from ( + select toUInt8(number) x from numbers(11)) +window w as (order by x asc range between 1 preceding and 2 following) +order by x; + +-- overflow conditions +select x, min(x) over w, max(x) over w, count(x) over w +from ( + select toUInt8(if(mod(number, 2), + toInt64(255 - intDiv(number, 2)), + toInt64(intDiv(number, 2)))) x + from numbers(10) +) +window w as (order by x range between 1 preceding and 2 following) +order by x; + +select x, min(x) over w, max(x) over w, count(x) over w +from ( + select toInt8(multiIf( + mod(number, 3) == 0, toInt64(intDiv(number, 3)), + mod(number, 3) == 1, toInt64(127 - intDiv(number, 3)), + toInt64(-128 + intDiv(number, 3)))) x + from numbers(15) +) +window w as (order by x range between 1 preceding and 2 following) +order by x; + +-- We need large offsets to trigger overflow to positive direction, or +-- else the frame end runs into partition end w/o overflow and doesn't move +-- after that. The frame from this query is equivalent to the entire partition. +select x, min(x) over w, max(x) over w, count(x) over w +from ( + select toUInt8(if(mod(number, 2), + toInt64(255 - intDiv(number, 2)), + toInt64(intDiv(number, 2)))) x + from numbers(10) +) +window w as (order by x range between 255 preceding and 255 following) +order by x; + +-- RANGE OFFSET ORDER BY DESC +select x, min(x) over w, max(x) over w, count(x) over w from ( + select toUInt8(number) x from numbers(11)) t +window w as (order by x desc range between 1 preceding and 2 following) +order by x +settings max_block_size = 1; + +select x, min(x) over w, max(x) over w, count(x) over w from ( + select toUInt8(number) x from numbers(11)) t +window w as (order by x desc range between 1 preceding and unbounded following) +order by x +settings max_block_size = 2; + +select x, min(x) over w, max(x) over w, count(x) over w from ( + select toUInt8(number) x from numbers(11)) t +window w as (order by x desc range between unbounded preceding and 2 following) +order by x +settings max_block_size = 3; + +select x, min(x) over w, max(x) over w, count(x) over w from ( + select toUInt8(number) x from numbers(11)) t +window w as (order by x desc range between unbounded preceding and 2 preceding) +order by x +settings max_block_size = 4; + + +-- Check that we put windows in such an order that we can reuse the sort. +-- First, check that at least the result is correct when we have many windows +-- with different sort order. +select + number, + count(*) over (partition by p order by number), + count(*) over (partition by p order by number, o), + count(*) over (), + count(*) over (order by number), + count(*) over (order by o), + count(*) over (order by o, number), + count(*) over (order by number, o), + count(*) over (partition by p order by o, number), + count(*) over (partition by p), + count(*) over (partition by p order by o), + count(*) over (partition by p, o order by number) +from + (select number, intDiv(number, 3) p, mod(number, 5) o + from numbers(16)) t +order by number +; + +-- The EXPLAIN for the above query would be difficult to understand, so check some +-- simple cases instead. +explain select + count(*) over (partition by p), + count(*) over (), + count(*) over (partition by p order by o) +from + (select number, intDiv(number, 3) p, mod(number, 5) o + from numbers(16)) t +; + +explain select + count(*) over (order by o, number), + count(*) over (order by number) +from + (select number, intDiv(number, 3) p, mod(number, 5) o + from numbers(16)) t +; + +-- A test case for the sort comparator found by fuzzer. +SELECT + max(number) OVER (ORDER BY number DESC NULLS FIRST), + max(number) OVER (ORDER BY number ASC NULLS FIRST) +FROM numbers(2) +; + +-- optimize_read_in_order conflicts with sorting for window functions, check that +-- it is disabled. +drop table if exists window_mt; +create table window_mt engine MergeTree order by number + as select number, mod(number, 3) p from numbers(100); + +select number, count(*) over (partition by p) + from window_mt order by number limit 10 settings optimize_read_in_order = 0; + +select number, count(*) over (partition by p) + from window_mt order by number limit 10 settings optimize_read_in_order = 1; + +drop table window_mt; + +-- some true window functions -- rank and friends +select number, p, o, + count(*) over w, + rank() over w, + dense_rank() over w, + row_number() over w +from (select number, intDiv(number, 5) p, mod(number, 3) o + from numbers(31) order by o, number) t +window w as (partition by p order by o, number) +order by p, o, number +settings max_block_size = 2; + +-- our replacement for lag/lead +select + anyOrNull(number) + over (order by number rows between 1 preceding and 1 preceding), + anyOrNull(number) + over (order by number rows between 1 following and 1 following) +from numbers(5); + +-- variants of lag/lead that respect the frame +select number, p, pp, + lagInFrame(number) over w as lag1, + lagInFrame(number, number - pp) over w as lag2, + lagInFrame(number, number - pp, number * 11) over w as lag, + leadInFrame(number, number - pp, number * 11) over w as lead +from (select number, intDiv(number, 5) p, p * 5 pp from numbers(16)) +window w as (partition by p order by number + rows between unbounded preceding and unbounded following) +order by number +settings max_block_size = 3; +; + +-- careful with auto-application of Null combinator +select lagInFrame(toNullable(1)) over (); +select lagInFrameOrNull(1) over (); -- { serverError BAD_ARGUMENTS } +-- this is the same as `select max(Null::Nullable(Nothing))` +select intDiv(1, NULL) x, toTypeName(x), max(x) over (); +-- to make lagInFrame return null for out-of-frame rows, cast the argument to +-- Nullable; otherwise, it returns default values. +SELECT + number, + lagInFrame(toNullable(number), 1) OVER w, + lagInFrame(toNullable(number), 2) OVER w, + lagInFrame(number, 1) OVER w, + lagInFrame(number, 2) OVER w +FROM numbers(4) +WINDOW w AS (ORDER BY number ASC) +; + +-- case-insensitive SQL-standard synonyms for any and anyLast +select + number, + fIrSt_VaLue(number) over w, + lAsT_vAlUe(number) over w +from numbers(10) +window w as (order by number range between 1 preceding and 1 following) +order by number +; + +-- nth_value without specific frame range given +select + number, + nth_value(number, 1) over w as firstValue, + nth_value(number, 2) over w as secondValue, + nth_value(number, 3) over w as thirdValue, + nth_value(number, 4) over w as fourthValue +from numbers(10) +window w as (order by number) +order by number +; + +-- nth_value with frame range specified +select + number, + nth_value(number, 1) over w as firstValue, + nth_value(number, 2) over w as secondValue, + nth_value(number, 3) over w as thirdValue, + nth_value(number, 4) over w as fourthValue +from numbers(10) +window w as (order by number range between 1 preceding and 1 following) +order by number +; + +-- to make nth_value return null for out-of-frame rows, cast the argument to +-- Nullable; otherwise, it returns default values. +SELECT + number, + nth_value(toNullable(number), 1) OVER w as firstValue, + nth_value(toNullable(number), 3) OVER w as thridValue +FROM numbers(5) +WINDOW w AS (ORDER BY number ASC) +; + +-- nth_value UBsan +SELECT nth_value(1, -1) OVER (); -- { serverError BAD_ARGUMENTS } +SELECT nth_value(1, 0) OVER (); -- { serverError BAD_ARGUMENTS } +SELECT nth_value(1, /* INT64_MAX+1 */ 0x7fffffffffffffff+1) OVER (); -- { serverError BAD_ARGUMENTS } +SELECT nth_value(1, /* INT64_MAX */ 0x7fffffffffffffff) OVER (); +SELECT nth_value(1, 1) OVER (); + +-- lagInFrame UBsan +SELECT lagInFrame(1, -1) OVER (); -- { serverError BAD_ARGUMENTS } +SELECT lagInFrame(1, 0) OVER (); +SELECT lagInFrame(1, /* INT64_MAX+1 */ 0x7fffffffffffffff+1) OVER (); -- { serverError BAD_ARGUMENTS } +SELECT lagInFrame(1, /* INT64_MAX */ 0x7fffffffffffffff) OVER (); +SELECT lagInFrame(1, 1) OVER (); + +-- leadInFrame UBsan +SELECT leadInFrame(1, -1) OVER (); -- { serverError BAD_ARGUMENTS } +SELECT leadInFrame(1, 0) OVER (); +SELECT leadInFrame(1, /* INT64_MAX+1 */ 0x7fffffffffffffff+1) OVER (); -- { serverError BAD_ARGUMENTS } +SELECT leadInFrame(1, /* INT64_MAX */ 0x7fffffffffffffff) OVER (); +SELECT leadInFrame(1, 1) OVER (); + +-- nth_value Msan +SELECT nth_value(1, '') OVER (); -- { serverError BAD_ARGUMENTS } + +-- lagInFrame Msan +SELECT lagInFrame(1, '') OVER (); -- { serverError BAD_ARGUMENTS } + +-- leadInFrame Msan +SELECT leadInFrame(1, '') OVER (); -- { serverError BAD_ARGUMENTS } + +-- In this case, we had a problem with PartialSortingTransform returning zero-row +-- chunks for input chunks w/o columns. +select count() over () from numbers(4) where number < 2; + +-- floating point RANGE frame +select + count(*) over (order by toFloat32(number) range 5 preceding), + count(*) over (order by toFloat64(number) range 5 preceding), + count(*) over (order by toFloat32(number) range between current row and 5 following), + count(*) over (order by toFloat64(number) range between current row and 5 following) +from numbers(7) +; + +-- negative offsets should not be allowed +select count() over (order by toInt64(number) range between -1 preceding and unbounded following) from numbers(1); -- { serverError BAD_ARGUMENTS } +select count() over (order by toInt64(number) range between -1 following and unbounded following) from numbers(1); -- { serverError BAD_ARGUMENTS } +select count() over (order by toInt64(number) range between unbounded preceding and -1 preceding) from numbers(1); -- { serverError BAD_ARGUMENTS } +select count() over (order by toInt64(number) range between unbounded preceding and -1 following) from numbers(1); -- { serverError BAD_ARGUMENTS } + +-- a test with aggregate function that allocates memory in arena +select sum(a[length(a)]) +from ( + select groupArray(number) over (partition by modulo(number, 11) + order by modulo(number, 1111), number) a + from numbers_mt(10000) +) settings max_block_size = 7; + +-- a test with aggregate function which is -state type +select bitmapCardinality(bs) +from + ( + select groupBitmapMergeState(bm) over (order by k asc rows between unbounded preceding and current row) as bs + from + ( + select + groupBitmapState(number) as bm, k + from + ( + select + number, + number % 3 as k + from numbers(3) + ) + group by k + ) + ); + +-- -INT_MIN row offset that can lead to problems with negation, found when fuzzing +-- under UBSan. Should be limited to at most INT_MAX. +select count() over (rows between 2147483648 preceding and 2147493648 following) from numbers(2); -- { serverError BAD_ARGUMENTS } + +-- Somehow in this case WindowTransform gets empty input chunks not marked as +-- input end, and then two (!) empty input chunks marked as input end. Whatever. +select count() over () from (select 1 a) l inner join (select 2 a) r using a; +-- This case works as expected, one empty input chunk marked as input end. +select count() over () where null; + +-- Inheriting another window. +select number, count() over (w1 rows unbounded preceding) from numbers(10) +window + w0 as (partition by intDiv(number, 5) as p), + w1 as (w0 order by mod(number, 3) as o, number) +order by p, o, number +; + +-- can't redefine PARTITION BY +select count() over (w partition by number) from numbers(1) window w as (partition by intDiv(number, 5)); -- { serverError BAD_ARGUMENTS } + +-- can't redefine existing ORDER BY +select count() over (w order by number) from numbers(1) window w as (partition by intDiv(number, 5) order by mod(number, 3)); -- { serverError BAD_ARGUMENTS } + +-- parent window can't have frame +select count() over (w range unbounded preceding) from numbers(1) window w as (partition by intDiv(number, 5) order by mod(number, 3) rows unbounded preceding); -- { serverError BAD_ARGUMENTS } + +-- looks weird but probably should work -- this is a window that inherits and changes nothing +select count() over (w) from numbers(1) window w as (); + +-- nonexistent parent window +select count() over (w2 rows unbounded preceding); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/01592_length_map/ast.json b/parser/testdata/01592_length_map/ast.json new file mode 100644 index 000000000..8b6cc3121 --- /dev/null +++ b/parser/testdata/01592_length_map/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function length (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function map (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Literal UInt64_4" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.00137191, + "rows_read": 12, + "bytes_read": 438 + } +} diff --git a/parser/testdata/01592_length_map/metadata.json b/parser/testdata/01592_length_map/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01592_length_map/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01592_length_map/query.sql b/parser/testdata/01592_length_map/query.sql new file mode 100644 index 000000000..bdec53689 --- /dev/null +++ b/parser/testdata/01592_length_map/query.sql @@ -0,0 +1,6 @@ +select length(map(1,2,3,4)); +select length(map()); +select empty(map(1,2,3,4)); +select empty(map()); +select notEmpty(map(1,2,3,4)); +select notEmpty(map()); diff --git a/parser/testdata/01592_long_window_functions1/ast.json b/parser/testdata/01592_long_window_functions1/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01592_long_window_functions1/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01592_long_window_functions1/metadata.json b/parser/testdata/01592_long_window_functions1/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01592_long_window_functions1/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01592_long_window_functions1/query.sql b/parser/testdata/01592_long_window_functions1/query.sql new file mode 100644 index 000000000..d0af32caa --- /dev/null +++ b/parser/testdata/01592_long_window_functions1/query.sql @@ -0,0 +1,41 @@ +-- Tags: long + +-- test became more than an order of magnitude slower with max_bytes_before_external_sort=1 +set max_bytes_before_external_sort = 0; +set max_bytes_ratio_before_external_sort = 0; + +drop table if exists stack; + +set max_insert_threads = 4; + +create table stack(item_id Int64, brand_id Int64, rack_id Int64, dt DateTime, expiration_dt DateTime, quantity UInt64) +Engine = MergeTree +partition by toYYYYMM(dt) +order by (brand_id, toStartOfHour(dt)) SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +insert into stack +select number%99991, number%11, number%1111, toDateTime('2020-01-01 00:00:00')+number/100, + toDateTime('2020-02-01 00:00:00')+number/10, intDiv(number,100)+1 +from numbers_mt(1000000); + +select '---- arrays ----'; + +select cityHash64( toString( groupArray (tuple(*) ) )) from ( + select brand_id, rack_id, arrayJoin(arraySlice(arraySort(groupArray(quantity)),1,2)) quantity + from stack + group by brand_id, rack_id + order by brand_id, rack_id, quantity +) t; + + +select '---- window f ----'; + +select cityHash64( toString( groupArray (tuple(*) ) )) from ( + select brand_id, rack_id, quantity from + ( select brand_id, rack_id, quantity, row_number() over (partition by brand_id, rack_id order by quantity) rn + from stack ) as t0 + where rn <= 2 + order by brand_id, rack_id, quantity +) t; + +drop table if exists stack; diff --git a/parser/testdata/01592_toUnixTimestamp_Date/ast.json b/parser/testdata/01592_toUnixTimestamp_Date/ast.json new file mode 100644 index 000000000..8571bd91d --- /dev/null +++ b/parser/testdata/01592_toUnixTimestamp_Date/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toUnixTimestamp (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function makeDate (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal UInt64_2023" + }, + { + "explain": " Literal UInt64_5" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001351316, + "rows_read": 11, + "bytes_read": 424 + } +} diff --git a/parser/testdata/01592_toUnixTimestamp_Date/metadata.json b/parser/testdata/01592_toUnixTimestamp_Date/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01592_toUnixTimestamp_Date/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01592_toUnixTimestamp_Date/query.sql b/parser/testdata/01592_toUnixTimestamp_Date/query.sql new file mode 100644 index 000000000..f2ba18a3b --- /dev/null +++ b/parser/testdata/01592_toUnixTimestamp_Date/query.sql @@ -0,0 +1,4 @@ +select toUnixTimestamp(makeDate(2023, 5, 10)); +select toUnixTimestamp(makeDate32(2023, 5, 10)); +select toUnixTimestamp(makeDate(2023, 5, 10), 'Pacific/Auckland'); +select toUnixTimestamp(makeDate32(2023, 5, 10), 'Pacific/Auckland'); \ No newline at end of file diff --git a/parser/testdata/01592_window_functions/ast.json b/parser/testdata/01592_window_functions/ast.json new file mode 100644 index 000000000..164256c4f --- /dev/null +++ b/parser/testdata/01592_window_functions/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery product_groups (children 1)" + }, + { + "explain": " Identifier product_groups" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.0012722, + "rows_read": 2, + "bytes_read": 80 + } +} diff --git a/parser/testdata/01592_window_functions/metadata.json b/parser/testdata/01592_window_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01592_window_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01592_window_functions/query.sql b/parser/testdata/01592_window_functions/query.sql new file mode 100644 index 000000000..c6bb23bc7 --- /dev/null +++ b/parser/testdata/01592_window_functions/query.sql @@ -0,0 +1,107 @@ +drop table if exists product_groups; +drop table if exists products; + +CREATE TABLE product_groups ( + group_id Int64, + group_name String +) Engine = Memory; + + +CREATE TABLE products ( + product_id Int64, + product_name String, + price DECIMAL(11, 2), + group_id Int64 +) Engine = Memory; + +INSERT INTO product_groups VALUES (1, 'Smartphone'),(2, 'Laptop'),(3, 'Tablet'); + +INSERT INTO products (product_id,product_name, group_id,price) VALUES (1, 'Microsoft Lumia', 1, 200), (2, 'HTC One', 1, 400), (3, 'Nexus', 1, 500), (4, 'iPhone', 1, 900),(5, 'HP Elite', 2, 1200),(6, 'Lenovo Thinkpad', 2, 700),(7, 'Sony VAIO', 2, 700),(8, 'Dell Vostro', 2, 800),(9, 'iPad', 3, 700),(10, 'Kindle Fire', 3, 150),(11, 'Samsung Galaxy Tab', 3, 200); + +select '---- Q1 ----'; + +SELECT + product_name, + price, + group_name, + AVG(price) OVER (PARTITION BY group_name) +FROM products INNER JOIN product_groups USING (group_id) +order by group_name, product_name, price; + +select '---- Q2 ----'; + +SELECT + product_name, + group_name, + price, + rank() OVER (PARTITION BY group_name ORDER BY price) rank +FROM products INNER JOIN product_groups USING (group_id) +order by group_name, rank, price, product_name; + +select '---- Q3 ----'; +SELECT + product_name, + group_name, + price, + row_number() OVER (PARTITION BY group_name ORDER BY price desc, product_name asc) rn +FROM products INNER JOIN product_groups USING (group_id) +ORDER BY group_name, rn; + +select '---- Q4 ----'; +SELECT * +FROM +( + SELECT + product_name, + group_name, + price, + min(price) OVER (PARTITION BY group_name) AS min_price, + dense_rank() OVER (PARTITION BY group_name ORDER BY price ASC) AS r + FROM products + INNER JOIN product_groups USING (group_id) +) AS t +WHERE min_price > 160 +ORDER BY + group_name ASC, + r ASC, + product_name ASC; + +select '---- Q5 ----'; +SELECT + product_name, + group_name, + price, + FIRST_VALUE (price) OVER (PARTITION BY group_name ORDER BY product_name desc) AS price_per_group_per_alphab +FROM products INNER JOIN product_groups USING (group_id) +order by group_name, product_name desc; + +select '---- Q6 ----'; +SELECT + product_name, + group_name, + price, + LAST_VALUE (price) OVER (PARTITION BY group_name ORDER BY + price RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING + ) AS highest_price_per_group +FROM + products +INNER JOIN product_groups USING (group_id) +order by group_name, product_name; + +select '---- Q7 ----'; +select product_name, price, group_name, round(avg0), round(avg1) +from ( +SELECT + product_name, + price, + group_name, + avg(price) OVER (PARTITION BY group_name ORDER BY price) avg0, + avg(price) OVER (PARTITION BY group_name ORDER BY + price RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) avg1 +FROM products INNER JOIN product_groups USING (group_id)) t +order by group_name, product_name, price; + +drop table product_groups; +drop table products; + + diff --git a/parser/testdata/01593_functions_in_order_by/ast.json b/parser/testdata/01593_functions_in_order_by/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01593_functions_in_order_by/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01593_functions_in_order_by/metadata.json b/parser/testdata/01593_functions_in_order_by/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01593_functions_in_order_by/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01593_functions_in_order_by/query.sql b/parser/testdata/01593_functions_in_order_by/query.sql new file mode 100644 index 000000000..5aa6aef9e --- /dev/null +++ b/parser/testdata/01593_functions_in_order_by/query.sql @@ -0,0 +1,11 @@ +EXPLAIN SYNTAX +SELECT msg, toDateTime(intDiv(ms, 1000)) AS time +FROM +( + SELECT + 'hello' AS msg, + toUInt64(t) * 1000 AS ms + FROM generateRandom('t DateTime') + LIMIT 10 +) +ORDER BY msg, time; diff --git a/parser/testdata/01593_insert_settings/ast.json b/parser/testdata/01593_insert_settings/ast.json new file mode 100644 index 000000000..7af190389 --- /dev/null +++ b/parser/testdata/01593_insert_settings/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery data_01593 (children 1)" + }, + { + "explain": " Identifier data_01593" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001401834, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/01593_insert_settings/metadata.json b/parser/testdata/01593_insert_settings/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01593_insert_settings/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01593_insert_settings/query.sql b/parser/testdata/01593_insert_settings/query.sql new file mode 100644 index 000000000..6493ecf64 --- /dev/null +++ b/parser/testdata/01593_insert_settings/query.sql @@ -0,0 +1,11 @@ +drop table if exists data_01593; +create table data_01593 (key Int) engine=MergeTree() order by key partition by key; + +insert into data_01593 select * from numbers_mt(10); +insert into data_01593 select * from numbers_mt(10) settings max_partitions_per_insert_block=1; -- { serverError TOO_MANY_PARTS } +-- throw_on_max_partitions_per_insert_block=false means we'll just log that the limit was reached rather than throw +insert into data_01593 select * from numbers_mt(10) settings max_partitions_per_insert_block=1, throw_on_max_partitions_per_insert_block=false; +-- settings for INSERT is prefered +insert into data_01593 settings max_partitions_per_insert_block=100 select * from numbers_mt(10) settings max_partitions_per_insert_block=1; + +drop table data_01593; diff --git a/parser/testdata/01594_storage_join_uuid/ast.json b/parser/testdata/01594_storage_join_uuid/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01594_storage_join_uuid/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01594_storage_join_uuid/metadata.json b/parser/testdata/01594_storage_join_uuid/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01594_storage_join_uuid/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01594_storage_join_uuid/query.sql b/parser/testdata/01594_storage_join_uuid/query.sql new file mode 100644 index 000000000..9236e9d0c --- /dev/null +++ b/parser/testdata/01594_storage_join_uuid/query.sql @@ -0,0 +1,29 @@ +-- the test from simPod, https://github.com/ClickHouse/ClickHouse/issues/5608 + +DROP TABLE IF EXISTS joint; -- the table name from the original issue. +DROP TABLE IF EXISTS t; + +CREATE TABLE IF NOT EXISTS joint +( + id UUID, + value LowCardinality(String) +) +ENGINE = Join (ANY, LEFT, id); + +CREATE TABLE IF NOT EXISTS t +( + id UUID, + d DateTime +) +ENGINE = MergeTree +PARTITION BY toDate(d) +ORDER BY id; + +insert into joint VALUES ('00000000-0000-0000-0000-000000000000', 'yo'); +insert into t VALUES ('00000000-0000-0000-0000-000000000000', now()); + +SELECT id FROM t +ANY LEFT JOIN joint ON t.id = joint.id; + +DROP TABLE joint; +DROP TABLE t; diff --git a/parser/testdata/01595_countMatches/ast.json b/parser/testdata/01595_countMatches/ast.json new file mode 100644 index 000000000..c3ad8ecba --- /dev/null +++ b/parser/testdata/01595_countMatches/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00093212, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01595_countMatches/metadata.json b/parser/testdata/01595_countMatches/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01595_countMatches/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01595_countMatches/query.sql b/parser/testdata/01595_countMatches/query.sql new file mode 100644 index 000000000..797b8c99d --- /dev/null +++ b/parser/testdata/01595_countMatches/query.sql @@ -0,0 +1,53 @@ +SET count_matches_stop_at_empty_match = 0; + +select 'basic'; +select countMatches('', 'foo'); +select countMatches('foo', ''); +-- simply stop if zero bytes was processed +select countMatches('foo', '[f]{0}'); +-- but this is ok +select countMatches('foo', '[f]{0}foo'); + +select 'case sensitive'; +select countMatches('foobarfoo', 'foo'); +select countMatches('foobarfoo', 'foo.*'); +select countMatches('oooo', 'oo'); +select countMatches(concat(toString(number), 'foofoo'), 'foo') from numbers(2); +select countMatches('foobarbazfoobarbaz', 'foo(bar)(?:baz|)'); +select countMatches('foo.com bar.com baz.com bam.com', '([^. ]+)\.([^. ]+)'); +select countMatches('foo.com@foo.com bar.com@foo.com baz.com@foo.com bam.com@foo.com', '([^. ]+)\.([^. ]+)@([^. ]+)\.([^. ]+)'); +select countMatches(materialize('foobarfoo'), 'foo'); + +select 'case insensitive'; +select countMatchesCaseInsensitive('foobarfoo', 'FOo'); +select countMatchesCaseInsensitive('foobarfoo', 'FOo.*'); +select countMatchesCaseInsensitive('oooo', 'Oo'); +select countMatchesCaseInsensitive(concat(toString(number), 'Foofoo'), 'foo') from numbers(2); +select countMatchesCaseInsensitive('foOBarBAZfoobarbaz', 'foo(bar)(?:baz|)'); +select countMatchesCaseInsensitive('foo.com BAR.COM baz.com bam.com', '([^. ]+)\.([^. ]+)'); +select countMatchesCaseInsensitive('foo.com@foo.com bar.com@foo.com BAZ.com@foo.com bam.com@foo.com', '([^. ]+)\.([^. ]+)@([^. ]+)\.([^. ]+)'); +select countMatchesCaseInsensitive(materialize('foobarfoo'), 'FOo'); + +select 'errors'; +select countMatches(1, 'foo') from numbers(1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select countMatches('foobarfoo', toString(number)) from numbers(1); -- { serverError ILLEGAL_COLUMN } +select countMatches('foo', materialize('foo')); -- { serverError ILLEGAL_COLUMN } + +select 'FixedString'; +select countMatches(toFixedString('foobarfoo', 9), 'foo'); +select countMatches(materialize(toFixedString('foobarfoo', 9)), 'foo'); + +select 'Pattern could match zero-bytes'; +select countMatches(' foo bar ', '[a-zA-Z]*'); +select countMatches(toFixedString(' foo bar ', 12), '[a-zA-Z]*'); +select countMatches(materialize(toFixedString(' foo bar ', 12)), '[a-zA-Z]*'); + +select 'Legacy behavior: stop at empty match'; +SET count_matches_stop_at_empty_match = 1; +select countMatches('foo bar ', '[a-zA-Z]*'); +select countMatches(' foo bar ', '[a-zA-Z]*'); +select countMatches(toFixedString('foo bar ', 12), '[a-zA-Z]*'); +select countMatches(toFixedString(' foo bar ', 12), '[a-zA-Z]*'); +select countMatches(materialize(toFixedString('foo bar ', 12)), '[a-zA-Z]*'); +select countMatches(materialize(toFixedString(' foo bar ', 12)), '[a-zA-Z]*'); +SET count_matches_stop_at_empty_match = 0; diff --git a/parser/testdata/01596_full_join_chertus/ast.json b/parser/testdata/01596_full_join_chertus/ast.json new file mode 100644 index 000000000..33444e7ee --- /dev/null +++ b/parser/testdata/01596_full_join_chertus/ast.json @@ -0,0 +1,94 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier js1.k" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier js2.k" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier js1.s" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier js2.s" + } + ], + + "rows": 24, + + "statistics": + { + "elapsed": 0.001103349, + "rows_read": 24, + "bytes_read": 969 + } +} diff --git a/parser/testdata/01596_full_join_chertus/metadata.json b/parser/testdata/01596_full_join_chertus/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01596_full_join_chertus/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01596_full_join_chertus/query.sql b/parser/testdata/01596_full_join_chertus/query.sql new file mode 100644 index 000000000..32911abb7 --- /dev/null +++ b/parser/testdata/01596_full_join_chertus/query.sql @@ -0,0 +1,9 @@ +select toTypeName(materialize(js1.k)), toTypeName(materialize(js2.k)), toTypeName(materialize(js1.s)), toTypeName(materialize(js2.s)) +from (select number k, toLowCardinality(toString(number)) s from numbers(2)) as js1 +full join (select toLowCardinality(number+1) k, toString(number+1) s from numbers(2)) as js2 +ON js1.k = js2.k order by js1.k, js2.k; + +select toTypeName(js1.k), toTypeName(js2.k), toTypeName(js1.s), toTypeName(js2.s) +from (select number k, toLowCardinality(toString(number)) s from numbers(2)) as js1 +full join (select toLowCardinality(number+1) k, toString(number+1) s from numbers(2)) as js2 +ON js1.k = js2.k order by js1.k, js2.k; diff --git a/parser/testdata/01596_null_as_default_nullable/ast.json b/parser/testdata/01596_null_as_default_nullable/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01596_null_as_default_nullable/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01596_null_as_default_nullable/metadata.json b/parser/testdata/01596_null_as_default_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01596_null_as_default_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01596_null_as_default_nullable/query.sql b/parser/testdata/01596_null_as_default_nullable/query.sql new file mode 100644 index 000000000..bf0650727 --- /dev/null +++ b/parser/testdata/01596_null_as_default_nullable/query.sql @@ -0,0 +1,6 @@ +-- Check that "null as default" applies only if type is not Nullable. + +SET input_format_null_as_default = 1; +CREATE TEMPORARY TABLE t (x Nullable(String) DEFAULT 'Hello', y String DEFAULT 'World'); +INSERT INTO t VALUES (NULL, NULL); +SELECT * FROM t; diff --git a/parser/testdata/01596_setting_limit_offset/ast.json b/parser/testdata/01596_setting_limit_offset/ast.json new file mode 100644 index 000000000..3c881c4c4 --- /dev/null +++ b/parser/testdata/01596_setting_limit_offset/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001247221, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/01596_setting_limit_offset/metadata.json b/parser/testdata/01596_setting_limit_offset/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01596_setting_limit_offset/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01596_setting_limit_offset/query.sql b/parser/testdata/01596_setting_limit_offset/query.sql new file mode 100644 index 000000000..005578e65 --- /dev/null +++ b/parser/testdata/01596_setting_limit_offset/query.sql @@ -0,0 +1,35 @@ +DROP TABLE IF EXISTS test; +CREATE TABLE test (i UInt64) Engine = MergeTree() order by i; +INSERT INTO test SELECT number FROM numbers(100); +INSERT INTO test SELECT number FROM numbers(10,100); +OPTIMIZE TABLE test FINAL; + +-- Only set limit +SET limit = 5; +SELECT * FROM test ORDER BY i; -- 5 rows +SELECT * FROM test ORDER BY i OFFSET 20; -- 5 rows +SELECT * FROM (SELECT i FROM test ORDER BY i LIMIT 10 OFFSET 50) TMP ORDER BY i; -- 5 rows +SELECT * FROM test ORDER BY i LIMIT 4 OFFSET 192; -- 4 rows +SELECT * FROM test ORDER BY i LIMIT 10 OFFSET 195; -- 5 rows +SELECT * FROM test ORDER BY i LIMIT 2*2 OFFSET 192; + +-- Only set offset +SET limit = 0; +SET offset = 195; +SELECT * FROM test ORDER BY i; -- 5 rows +SELECT * FROM test ORDER BY i OFFSET 20; -- no result +SELECT * FROM test ORDER BY i LIMIT 100; -- no result +SET offset = 10; +SELECT * FROM test ORDER BY i LIMIT 20 OFFSET 100; -- 10 rows +SELECT * FROM test ORDER BY i LIMIT 11 OFFSET 100; -- 1 rows +SELECT * FROM test ORDER BY i LIMIT 20 OFFSET 10*10; +SELECT * FROM test ORDER BY i LIMIT 4*5 OFFSET 10*10; + +-- offset and limit together +SET limit = 10; +SELECT * FROM test ORDER BY i LIMIT 50 OFFSET 50; -- 10 rows +SELECT * FROM test ORDER BY i LIMIT 50 OFFSET 190; -- 0 rows +SELECT * FROM test ORDER BY i LIMIT 50 OFFSET 185; -- 5 rows +SELECT * FROM test ORDER BY i LIMIT 18 OFFSET 5; -- 8 rows + +DROP TABLE test; diff --git a/parser/testdata/01598_memory_limit_zeros/ast.json b/parser/testdata/01598_memory_limit_zeros/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01598_memory_limit_zeros/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01598_memory_limit_zeros/metadata.json b/parser/testdata/01598_memory_limit_zeros/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01598_memory_limit_zeros/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01598_memory_limit_zeros/query.sql b/parser/testdata/01598_memory_limit_zeros/query.sql new file mode 100644 index 000000000..45e34c7c8 --- /dev/null +++ b/parser/testdata/01598_memory_limit_zeros/query.sql @@ -0,0 +1,4 @@ +-- Tags: no-parallel, no-fasttest, no-random-settings + +SET max_memory_usage = 1, max_untracked_memory = 1000000, max_threads=40; +select 'test', count(*) from zeros_mt(1000000) where not ignore(zero); -- { serverError MEMORY_LIMIT_EXCEEDED } diff --git a/parser/testdata/01600_encode_XML/ast.json b/parser/testdata/01600_encode_XML/ast.json new file mode 100644 index 000000000..bbdf6cfb8 --- /dev/null +++ b/parser/testdata/01600_encode_XML/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function encodeXMLComponent (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'Hello, \"world\"!'" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001496642, + "rows_read": 7, + "bytes_read": 280 + } +} diff --git a/parser/testdata/01600_encode_XML/metadata.json b/parser/testdata/01600_encode_XML/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01600_encode_XML/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01600_encode_XML/query.sql b/parser/testdata/01600_encode_XML/query.sql new file mode 100644 index 000000000..af3e2ce85 --- /dev/null +++ b/parser/testdata/01600_encode_XML/query.sql @@ -0,0 +1,4 @@ +SELECT encodeXMLComponent('Hello, "world"!'); +SELECT encodeXMLComponent('<123>'); +SELECT encodeXMLComponent('&clickhouse'); +SELECT encodeXMLComponent('\'foo\''); \ No newline at end of file diff --git a/parser/testdata/01600_min_max_compress_block_size/ast.json b/parser/testdata/01600_min_max_compress_block_size/ast.json new file mode 100644 index 000000000..91b776505 --- /dev/null +++ b/parser/testdata/01600_min_max_compress_block_size/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery ms (children 1)" + }, + { + "explain": " Identifier ms" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001532975, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/01600_min_max_compress_block_size/metadata.json b/parser/testdata/01600_min_max_compress_block_size/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01600_min_max_compress_block_size/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01600_min_max_compress_block_size/query.sql b/parser/testdata/01600_min_max_compress_block_size/query.sql new file mode 100644 index 000000000..747f0b736 --- /dev/null +++ b/parser/testdata/01600_min_max_compress_block_size/query.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS ms; + +CREATE TABLE ms (n Int32) ENGINE = MergeTree() ORDER BY n SETTINGS min_compress_block_size = 1024, max_compress_block_size = 10240; + +INSERT INTO ms SELECT * FROM numbers(1000); + +SELECT COUNT(*) FROM ms; + +DROP TABLE ms; diff --git a/parser/testdata/01600_multiple_left_join_with_aliases/ast.json b/parser/testdata/01600_multiple_left_join_with_aliases/ast.json new file mode 100644 index 000000000..0fb404353 --- /dev/null +++ b/parser/testdata/01600_multiple_left_join_with_aliases/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery base (children 1)" + }, + { + "explain": " Identifier base" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001344116, + "rows_read": 2, + "bytes_read": 61 + } +} diff --git a/parser/testdata/01600_multiple_left_join_with_aliases/metadata.json b/parser/testdata/01600_multiple_left_join_with_aliases/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01600_multiple_left_join_with_aliases/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01600_multiple_left_join_with_aliases/query.sql b/parser/testdata/01600_multiple_left_join_with_aliases/query.sql new file mode 100644 index 000000000..2945622fb --- /dev/null +++ b/parser/testdata/01600_multiple_left_join_with_aliases/query.sql @@ -0,0 +1,47 @@ +CREATE TABLE base +( +`id` UInt64, +`id2` UInt64, +`d` UInt64, +`value` UInt64 +) +ENGINE=MergeTree() +PARTITION BY d +ORDER BY (id,id2,d); + +CREATE TABLE derived1 +( + `id1` UInt64, + `d1` UInt64, + `value1` UInt64 +) +ENGINE = MergeTree() +PARTITION BY d1 +ORDER BY (id1, d1) +; + +CREATE TABLE derived2 +( + `id2` UInt64, + `d2` UInt64, + `value2` UInt64 +) +ENGINE = MergeTree() +PARTITION BY d2 +ORDER BY (id2, d2) +; + +select +base.id as `base.id`, +derived2.value2 as `derived2.value2`, +derived1.value1 as `derived1.value1` +from base as base +left join derived2 as derived2 on base.id2 = derived2.id2 +left join derived1 as derived1 on base.id = derived1.id1; + + +SELECT + base.id AS `base.id`, + derived1.value1 AS `derived1.value1` +FROM base AS base +LEFT JOIN derived1 AS derived1 ON base.id = derived1.id1; diff --git a/parser/testdata/01600_remerge_sort_lowered_memory_bytes_ratio/ast.json b/parser/testdata/01600_remerge_sort_lowered_memory_bytes_ratio/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01600_remerge_sort_lowered_memory_bytes_ratio/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01600_remerge_sort_lowered_memory_bytes_ratio/metadata.json b/parser/testdata/01600_remerge_sort_lowered_memory_bytes_ratio/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01600_remerge_sort_lowered_memory_bytes_ratio/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01600_remerge_sort_lowered_memory_bytes_ratio/query.sql b/parser/testdata/01600_remerge_sort_lowered_memory_bytes_ratio/query.sql new file mode 100644 index 000000000..3e4bf124a --- /dev/null +++ b/parser/testdata/01600_remerge_sort_lowered_memory_bytes_ratio/query.sql @@ -0,0 +1,31 @@ +-- Tags: no-random-settings + +-- Check remerge_sort_lowered_memory_bytes_ratio setting + +set max_memory_usage='200Mi'; +-- enter remerge once limit*2 is reached +set max_bytes_before_remerge_sort='10Mi'; +-- more blocks +set max_block_size=40960; + +-- remerge_sort_lowered_memory_bytes_ratio default 2, slightly not enough +-- MergeSortingTransform: Re-merging intermediate ORDER BY data (20 blocks with 819200 rows) to save memory consumption +-- MergeSortingTransform: Memory usage is lowered from 186.25 MiB to 95.00 MiB +-- MergeSortingTransform: Re-merging is not useful (memory usage was not lowered by remerge_sort_lowered_memory_bytes_ratio=2.0) +select number k, repeat(toString(number), 11) v1, repeat(toString(number), 12) v2 from numbers(3e6) order by v1, v2 limit 400e3 format Null; -- { serverError MEMORY_LIMIT_EXCEEDED } +select number k, repeat(toString(number), 11) v1, repeat(toString(number), 12) v2 from numbers(3e6) order by v1, v2 limit 400e3 settings remerge_sort_lowered_memory_bytes_ratio=2. format Null; -- { serverError MEMORY_LIMIT_EXCEEDED } + +-- remerge_sort_lowered_memory_bytes_ratio 1.9 is good (need at least 1.91/0.98=1.94) +-- MergeSortingTransform: Re-merging intermediate ORDER BY data (20 blocks with 819200 rows) to save memory consumption +-- MergeSortingTransform: Memory usage is lowered from 186.25 MiB to 95.00 MiB +-- MergeSortingTransform: Re-merging intermediate ORDER BY data (20 blocks with 809600 rows) to save memory consumption +-- MergeSortingTransform: Memory usage is lowered from 188.13 MiB to 95.00 MiB +-- MergeSortingTransform: Re-merging intermediate ORDER BY data (20 blocks with 809600 rows) to save memory consumption +-- MergeSortingTransform: Memory usage is lowered from 188.13 MiB to 95.00 MiB +-- MergeSortingTransform: Re-merging intermediate ORDER BY data (20 blocks with 809600 rows) to save memory consumption +-- MergeSortingTransform: Memory usage is lowered from 188.13 MiB to 95.00 MiB +-- MergeSortingTransform: Re-merging intermediate ORDER BY data (20 blocks with 809600 rows) to save memory consumption +-- MergeSortingTransform: Memory usage is lowered from 188.13 MiB to 95.00 MiB +-- MergeSortingTransform: Re-merging intermediate ORDER BY data (20 blocks with 809600 rows) to save memory consumption +-- MergeSortingTransform: Memory usage is lowered from 188.13 MiB to 95.00 MiB +select number k, repeat(toString(number), 11) v1, repeat(toString(number), 12) v2 from numbers(3e6) order by k limit 400e3 settings remerge_sort_lowered_memory_bytes_ratio=1.9 format Null; diff --git a/parser/testdata/01600_select_in_different_types/ast.json b/parser/testdata/01600_select_in_different_types/ast.json new file mode 100644 index 000000000..719abfa08 --- /dev/null +++ b/parser/testdata/01600_select_in_different_types/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function in (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001331044, + "rows_read": 13, + "bytes_read": 502 + } +} diff --git a/parser/testdata/01600_select_in_different_types/metadata.json b/parser/testdata/01600_select_in_different_types/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01600_select_in_different_types/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01600_select_in_different_types/query.sql b/parser/testdata/01600_select_in_different_types/query.sql new file mode 100644 index 000000000..a9eb6ed2a --- /dev/null +++ b/parser/testdata/01600_select_in_different_types/query.sql @@ -0,0 +1,35 @@ +SELECT 1 IN (SELECT 1); +SELECT -1 IN (SELECT 1); + +DROP TABLE IF EXISTS select_in_test; + +CREATE TABLE select_in_test(value UInt8) ENGINE=TinyLog; +INSERT INTO select_in_test VALUES (1), (2), (3); + +SELECT value FROM select_in_test WHERE value IN (-1); +SELECT value FROM select_in_test WHERE value IN (SELECT -1); + +SELECT value FROM select_in_test WHERE value IN (1); +SELECT value FROM select_in_test WHERE value IN (SELECT 1); + +DROP TABLE select_in_test; + +CREATE TABLE select_in_test(value Int8) ENGINE=TinyLog; +INSERT INTO select_in_test VALUES (-1), (2), (3); + +SELECT value FROM select_in_test WHERE value IN (1); +SELECT value FROM select_in_test WHERE value IN (SELECT 1); + +SELECT value FROM select_in_test WHERE value IN (2); +SELECT value FROM select_in_test WHERE value IN (SELECT 2); + +DROP TABLE select_in_test; + +SELECT 1 IN (1); +SELECT '1' IN (SELECT 1); + +SELECT 1 IN (SELECT 1) SETTINGS transform_null_in = 1; +SELECT 1 IN (SELECT 'a') SETTINGS transform_null_in = 1; +SELECT 'a' IN (SELECT 1) SETTINGS transform_null_in = 1; -- { serverError CANNOT_PARSE_TEXT } +SELECT 1 IN (SELECT -1) SETTINGS transform_null_in = 1; +SELECT -1 IN (SELECT 1) SETTINGS transform_null_in = 1; -- { serverError CANNOT_CONVERT_TYPE } diff --git a/parser/testdata/01601_accurate_cast/ast.json b/parser/testdata/01601_accurate_cast/ast.json new file mode 100644 index 000000000..8c100f56f --- /dev/null +++ b/parser/testdata/01601_accurate_cast/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function accurateCast (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Int64_-1" + }, + { + "explain": " Literal 'UInt8'" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001432485, + "rows_read": 8, + "bytes_read": 294 + } +} diff --git a/parser/testdata/01601_accurate_cast/metadata.json b/parser/testdata/01601_accurate_cast/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01601_accurate_cast/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01601_accurate_cast/query.sql b/parser/testdata/01601_accurate_cast/query.sql new file mode 100644 index 000000000..3d418b5a3 --- /dev/null +++ b/parser/testdata/01601_accurate_cast/query.sql @@ -0,0 +1,75 @@ +SELECT accurateCast(-1, 'UInt8'); -- { serverError CANNOT_CONVERT_TYPE } +SELECT accurateCast(5, 'UInt8'); +SELECT accurateCast(257, 'UInt8'); -- { serverError CANNOT_CONVERT_TYPE } +SELECT accurateCast(-1, 'UInt16'); -- { serverError CANNOT_CONVERT_TYPE } +SELECT accurateCast(5, 'UInt16'); +SELECT accurateCast(65536, 'UInt16'); -- { serverError CANNOT_CONVERT_TYPE } +SELECT accurateCast(-1, 'UInt32'); -- { serverError CANNOT_CONVERT_TYPE } +SELECT accurateCast(5, 'UInt32'); +SELECT accurateCast(4294967296, 'UInt32'); -- { serverError CANNOT_CONVERT_TYPE } +SELECT accurateCast(-1, 'UInt64'); -- { serverError CANNOT_CONVERT_TYPE } +SELECT accurateCast(5, 'UInt64'); +SELECT accurateCast(-1, 'UInt256'); -- { serverError CANNOT_CONVERT_TYPE } +SELECT accurateCast(5, 'UInt256'); + +SELECT accurateCast(-129, 'Int8'); -- { serverError CANNOT_CONVERT_TYPE } +SELECT accurateCast(5, 'Int8'); +SELECT accurateCast(128, 'Int8'); -- { serverError CANNOT_CONVERT_TYPE } + +SELECT accurateCast('-1', 'UInt8'); -- { serverError CANNOT_PARSE_TEXT } +SELECT accurateCast('5', 'UInt8'); +SELECT accurateCast('257', 'UInt8'); -- { serverError CANNOT_PARSE_TEXT } +SELECT accurateCast('-1', 'UInt16'); -- { serverError CANNOT_PARSE_TEXT } +SELECT accurateCast('5', 'UInt16'); +SELECT accurateCast('65536', 'UInt16'); -- { serverError CANNOT_PARSE_TEXT } +SELECT accurateCast('-1', 'UInt32'); -- { serverError CANNOT_PARSE_TEXT } +SELECT accurateCast('5', 'UInt32'); +SELECT accurateCast('4294967296', 'UInt32'); -- { serverError CANNOT_PARSE_TEXT } +SELECT accurateCast('-1', 'UInt64'); -- { serverError CANNOT_PARSE_TEXT } +SELECT accurateCast('5', 'UInt64'); +SELECT accurateCast('-129', 'Int8'); -- { serverError CANNOT_PARSE_TEXT } +SELECT accurateCast('5', 'Int8'); +SELECT accurateCast('128', 'Int8'); -- { serverError CANNOT_PARSE_TEXT } + +SELECT accurateCast(10, 'Decimal32(9)'); -- { serverError DECIMAL_OVERFLOW } +SELECT accurateCast(1, 'Decimal32(9)'); +SELECT accurateCast(-10, 'Decimal32(9)'); -- { serverError DECIMAL_OVERFLOW } + +SELECT accurateCast('123', 'FixedString(2)'); -- { serverError TOO_LARGE_STRING_SIZE } +SELECT accurateCast('12', 'FixedString(2)'); + +SELECT accurateCast(-1, 'DateTime'); -- { serverError CANNOT_CONVERT_TYPE } +SELECT accurateCast(0xFFFFFFFF + 1, 'DateTime'); -- { serverError CANNOT_CONVERT_TYPE } +SELECT accurateCast('1xxx', 'DateTime'); -- { serverError CANNOT_PARSE_DATETIME } +SELECT accurateCast('2023-05-30 14:38:20', 'DateTime'); +SELECT toString(accurateCast(19, 'DateTime'), 'UTC'); + +SELECT accurateCast(-1, 'Date'); -- { serverError CANNOT_CONVERT_TYPE } +SELECT accurateCast(0xFFFFFFFF + 1, 'Date'); -- { serverError CANNOT_CONVERT_TYPE } +SELECT accurateCast('1xxx', 'Date'); -- { serverError CANNOT_PARSE_DATE } +SELECT accurateCast('2023-05-30', 'Date'); +SELECT accurateCast(19, 'Date'); + +select accurateCast('test', 'Nullable(Bool)'); -- { serverError CANNOT_PARSE_BOOL } +select accurateCast('test', 'Bool'); -- { serverError CANNOT_PARSE_BOOL } +select accurateCast('truex', 'Bool'); -- { serverError CANNOT_PARSE_BOOL } +select accurateCast('xfalse', 'Bool'); -- { serverError CANNOT_PARSE_BOOL } +select accurateCast('true', 'Bool'); +select accurateCast('false', 'Bool'); +select accurateCast('1', 'Bool'); +select accurateCast('0', 'Bool'); +select accurateCast(1, 'Bool'); +select accurateCast(0, 'Bool'); + +select accurateCast('test', 'Nullable(IPv4)'); +select accurateCast('test', 'IPv4'); -- { serverError CANNOT_PARSE_IPV4 } +select accurateCast('2001:db8::1', 'IPv4'); -- { serverError CANNOT_PARSE_IPV4 } +select accurateCast('::ffff:192.0.2.1', 'IPv4'); -- { serverError CANNOT_PARSE_IPV4 } +select accurateCast('192.0.2.1', 'IPv4'); +select accurateCast('192.0.2.1x', 'IPv4'); -- { serverError CANNOT_PARSE_IPV4 } + +select accurateCast('test', 'Nullable(IPv6)'); +select accurateCast('test', 'IPv6'); -- { serverError CANNOT_PARSE_IPV6 } +select accurateCast('192.0.2.1', 'IPv6'); +select accurateCast('2001:db8::1', 'IPv6'); +select accurateCast('2001:db8::1x', 'IPv6'); -- { serverError CANNOT_PARSE_IPV6 } diff --git a/parser/testdata/01601_detach_permanently/ast.json b/parser/testdata/01601_detach_permanently/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01601_detach_permanently/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01601_detach_permanently/metadata.json b/parser/testdata/01601_detach_permanently/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01601_detach_permanently/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01601_detach_permanently/query.sql b/parser/testdata/01601_detach_permanently/query.sql new file mode 100644 index 000000000..41a5146fd --- /dev/null +++ b/parser/testdata/01601_detach_permanently/query.sql @@ -0,0 +1,217 @@ +-- Tags: no-parallel, log-engine + +SET send_logs_level = 'fatal'; + +SELECT 'database atomic tests'; + +DROP DATABASE IF EXISTS test1601_detach_permanently_atomic; +CREATE DATABASE test1601_detach_permanently_atomic Engine=Atomic; + +create table test1601_detach_permanently_atomic.test_name_reuse (number UInt64) engine=MergeTree order by tuple(); + +INSERT INTO test1601_detach_permanently_atomic.test_name_reuse SELECT * FROM numbers(100); + +DETACH table test1601_detach_permanently_atomic.test_name_reuse PERMANENTLY; + +SELECT 'can not create table with same name as detached permanently'; +create table test1601_detach_permanently_atomic.test_name_reuse (number UInt64) engine=MergeTree order by tuple(); -- { serverError TABLE_ALREADY_EXISTS } + +SELECT 'can not detach twice'; +DETACH table test1601_detach_permanently_atomic.test_name_reuse PERMANENTLY; -- { serverError UNKNOWN_TABLE } +DETACH table test1601_detach_permanently_atomic.test_name_reuse; -- { serverError UNKNOWN_TABLE } + +SELECT 'can not drop detached'; +drop table test1601_detach_permanently_atomic.test_name_reuse; -- { serverError UNKNOWN_TABLE } + +create table test1601_detach_permanently_atomic.test_name_rename_attempt (number UInt64) engine=MergeTree order by tuple(); + +SELECT 'can not replace with the other table'; +RENAME TABLE test1601_detach_permanently_atomic.test_name_rename_attempt TO test1601_detach_permanently_atomic.test_name_reuse; -- { serverError TABLE_ALREADY_EXISTS } +EXCHANGE TABLES test1601_detach_permanently_atomic.test_name_rename_attempt AND test1601_detach_permanently_atomic.test_name_reuse; -- { serverError UNKNOWN_TABLE } + +SELECT 'can still show the create statement'; +SHOW CREATE TABLE test1601_detach_permanently_atomic.test_name_reuse FORMAT Vertical; + +SELECT 'can not attach with bad uuid'; +-- STD_EXCEPTION occured when running flaky test, the table directory's access right was removed. Refer `DatabaseCatalog::maybeRemoveDirectory`. +ATTACH TABLE test1601_detach_permanently_atomic.test_name_reuse UUID '00000000-0000-0000-0000-000000001601' (`number` UInt64 ) ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity = 8192 ; -- { serverError TABLE_ALREADY_EXISTS, STD_EXCEPTION } + +SELECT 'can attach with short syntax'; +ATTACH TABLE test1601_detach_permanently_atomic.test_name_reuse; + +SELECT count() FROM test1601_detach_permanently_atomic.test_name_reuse; + +DETACH table test1601_detach_permanently_atomic.test_name_reuse; + +SELECT 'can not detach permanently the table which is already detached (temporary)'; +DETACH table test1601_detach_permanently_atomic.test_name_reuse PERMANENTLY; -- { serverError UNKNOWN_TABLE } + +DETACH DATABASE test1601_detach_permanently_atomic; +ATTACH DATABASE test1601_detach_permanently_atomic; + +SELECT count() FROM test1601_detach_permanently_atomic.test_name_reuse; + +SELECT 'After database reattachement the table is back (it was detached temporary)'; +SELECT 'And we can detach it permanently'; +DETACH table test1601_detach_permanently_atomic.test_name_reuse PERMANENTLY; + +DETACH DATABASE test1601_detach_permanently_atomic; +ATTACH DATABASE test1601_detach_permanently_atomic; + +SELECT 'After database reattachement the table is still absent (it was detached permamently)'; +SELECT 'And we can not detach it permanently'; +DETACH table test1601_detach_permanently_atomic.test_name_reuse PERMANENTLY; -- { serverError UNKNOWN_TABLE } + +SELECT 'But we can attach it back'; +ATTACH TABLE test1601_detach_permanently_atomic.test_name_reuse; + +SELECT 'And detach permanently again to check how database drop will behave'; +DETACH table test1601_detach_permanently_atomic.test_name_reuse PERMANENTLY; + +SELECT 'DROP database'; +DROP DATABASE test1601_detach_permanently_atomic SYNC; + +SELECT '-----------------------'; +SELECT 'database ordinary tests'; + +DROP DATABASE IF EXISTS test1601_detach_permanently_ordinary; +set allow_deprecated_database_ordinary=1; +-- Creation of a database with Ordinary engine emits a warning. +CREATE DATABASE test1601_detach_permanently_ordinary Engine=Ordinary; + +create table test1601_detach_permanently_ordinary.test_name_reuse (number UInt64) engine=MergeTree order by tuple(); + +INSERT INTO test1601_detach_permanently_ordinary.test_name_reuse SELECT * FROM numbers(100); + +DETACH table test1601_detach_permanently_ordinary.test_name_reuse PERMANENTLY; + +SELECT 'can not create table with same name as detached permanently'; +create table test1601_detach_permanently_ordinary.test_name_reuse (number UInt64) engine=MergeTree order by tuple(); -- { serverError TABLE_ALREADY_EXISTS } + +SELECT 'can not detach twice'; +DETACH table test1601_detach_permanently_ordinary.test_name_reuse PERMANENTLY; -- { serverError UNKNOWN_TABLE } +DETACH table test1601_detach_permanently_ordinary.test_name_reuse; -- { serverError UNKNOWN_TABLE } + +SELECT 'can not drop detached'; +drop table test1601_detach_permanently_ordinary.test_name_reuse; -- { serverError UNKNOWN_TABLE } + +create table test1601_detach_permanently_ordinary.test_name_rename_attempt (number UInt64) engine=MergeTree order by tuple(); + +SELECT 'can not replace with the other table'; +RENAME TABLE test1601_detach_permanently_ordinary.test_name_rename_attempt TO test1601_detach_permanently_ordinary.test_name_reuse; -- { serverError TABLE_ALREADY_EXISTS } + +SELECT 'can still show the create statement'; +SHOW CREATE TABLE test1601_detach_permanently_ordinary.test_name_reuse FORMAT Vertical; + +SELECT 'can attach with full syntax'; +ATTACH TABLE test1601_detach_permanently_ordinary.test_name_reuse (`number` UInt64 ) ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity = 8192; +DETACH table test1601_detach_permanently_ordinary.test_name_reuse PERMANENTLY; + +SELECT 'can attach with short syntax'; +ATTACH TABLE test1601_detach_permanently_ordinary.test_name_reuse; + +DETACH table test1601_detach_permanently_ordinary.test_name_reuse; + +SELECT 'can not detach permanently the table which is already detached (temporary)'; +DETACH table test1601_detach_permanently_ordinary.test_name_reuse PERMANENTLY; -- { serverError UNKNOWN_TABLE } + +DETACH DATABASE test1601_detach_permanently_ordinary; +ATTACH DATABASE test1601_detach_permanently_ordinary; + +SELECT 'After database reattachement the table is back (it was detached temporary)'; +SELECT 'And we can detach it permanently'; +DETACH table test1601_detach_permanently_ordinary.test_name_reuse PERMANENTLY; + +DETACH DATABASE test1601_detach_permanently_ordinary; +ATTACH DATABASE test1601_detach_permanently_ordinary; + +SELECT 'After database reattachement the table is still absent (it was detached permamently)'; +SELECT 'And we can not detach it permanently'; +DETACH table test1601_detach_permanently_ordinary.test_name_reuse PERMANENTLY; -- { serverError UNKNOWN_TABLE } + +SELECT 'But we can attach it back'; +ATTACH TABLE test1601_detach_permanently_ordinary.test_name_reuse; + +SELECT 'And detach permanently again to check how database drop will behave'; +DETACH table test1601_detach_permanently_ordinary.test_name_reuse PERMANENTLY; + +SELECT 'DROP database - Directory not empty error, but database detached'; +DROP DATABASE test1601_detach_permanently_ordinary; -- { serverError DATABASE_NOT_EMPTY } + +ATTACH TABLE test1601_detach_permanently_ordinary.test_name_reuse; +DROP TABLE test1601_detach_permanently_ordinary.test_name_reuse; + +SELECT 'DROP database - now success'; +DROP DATABASE test1601_detach_permanently_ordinary; + + +SELECT '-----------------------'; +SELECT 'database lazy tests'; + +DROP DATABASE IF EXISTS test1601_detach_permanently_lazy; +CREATE DATABASE test1601_detach_permanently_lazy Engine=Lazy(10); + +create table test1601_detach_permanently_lazy.test_name_reuse (number UInt64) engine=Log; + +INSERT INTO test1601_detach_permanently_lazy.test_name_reuse SELECT * FROM numbers(100); + +DETACH table test1601_detach_permanently_lazy.test_name_reuse PERMANENTLY; + +SELECT 'can not create table with same name as detached permanently'; +create table test1601_detach_permanently_lazy.test_name_reuse (number UInt64) engine=Log; -- { serverError TABLE_ALREADY_EXISTS } + +SELECT 'can not detach twice'; +DETACH table test1601_detach_permanently_lazy.test_name_reuse PERMANENTLY; -- { serverError UNKNOWN_TABLE } +DETACH table test1601_detach_permanently_lazy.test_name_reuse; -- { serverError UNKNOWN_TABLE } + +SELECT 'can not drop detached'; +drop table test1601_detach_permanently_lazy.test_name_reuse; -- { serverError UNKNOWN_TABLE } + +create table test1601_detach_permanently_lazy.test_name_rename_attempt (number UInt64) engine=Log; + +SELECT 'can not replace with the other table'; +RENAME TABLE test1601_detach_permanently_lazy.test_name_rename_attempt TO test1601_detach_permanently_lazy.test_name_reuse; -- { serverError TABLE_ALREADY_EXISTS } + +SELECT 'can still show the create statement'; +SHOW CREATE TABLE test1601_detach_permanently_lazy.test_name_reuse FORMAT Vertical; + +SELECT 'can attach with full syntax'; +ATTACH TABLE test1601_detach_permanently_lazy.test_name_reuse (`number` UInt64 ) ENGINE = Log; +DETACH table test1601_detach_permanently_lazy.test_name_reuse PERMANENTLY; + +SELECT 'can attach with short syntax'; +ATTACH TABLE test1601_detach_permanently_lazy.test_name_reuse; + +DETACH table test1601_detach_permanently_lazy.test_name_reuse; + +SELECT 'can not detach permanently the table which is already detached (temporary)'; +DETACH table test1601_detach_permanently_lazy.test_name_reuse PERMANENTLY; -- { serverError UNKNOWN_TABLE } + +DETACH DATABASE test1601_detach_permanently_lazy; +ATTACH DATABASE test1601_detach_permanently_lazy; + +SELECT 'After database reattachement the table is back (it was detached temporary)'; +SELECT 'And we can detach it permanently'; +DETACH table test1601_detach_permanently_lazy.test_name_reuse PERMANENTLY; + +DETACH DATABASE test1601_detach_permanently_lazy; +ATTACH DATABASE test1601_detach_permanently_lazy; + +SELECT 'After database reattachement the table is still absent (it was detached permamently)'; +SELECT 'And we can not detach it permanently'; +DETACH table test1601_detach_permanently_lazy.test_name_reuse PERMANENTLY; -- { serverError UNKNOWN_TABLE } + +SELECT 'But we can attach it back'; +ATTACH TABLE test1601_detach_permanently_lazy.test_name_reuse; + +SELECT 'And detach permanently again to check how database drop will behave'; +DETACH table test1601_detach_permanently_lazy.test_name_reuse PERMANENTLY; + +SELECT 'DROP database - Directory not empty error, but database deteched'; +DROP DATABASE test1601_detach_permanently_lazy; -- { serverError DATABASE_NOT_EMPTY } + +ATTACH TABLE test1601_detach_permanently_lazy.test_name_reuse; +DROP TABLE test1601_detach_permanently_lazy.test_name_reuse; + +SELECT 'DROP database - now success'; +DROP DATABASE test1601_detach_permanently_lazy; diff --git a/parser/testdata/01602_array_aggregation/ast.json b/parser/testdata/01602_array_aggregation/ast.json new file mode 100644 index 000000000..64fc5b936 --- /dev/null +++ b/parser/testdata/01602_array_aggregation/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'Array min '" + }, + { + "explain": " Function arrayMin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 6)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Literal UInt64_4" + }, + { + "explain": " Literal UInt64_5" + }, + { + "explain": " Literal UInt64_6" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.00099064, + "rows_read": 15, + "bytes_read": 538 + } +} diff --git a/parser/testdata/01602_array_aggregation/metadata.json b/parser/testdata/01602_array_aggregation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01602_array_aggregation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01602_array_aggregation/query.sql b/parser/testdata/01602_array_aggregation/query.sql new file mode 100644 index 000000000..d8be9eb82 --- /dev/null +++ b/parser/testdata/01602_array_aggregation/query.sql @@ -0,0 +1,71 @@ +SELECT 'Array min ', (arrayMin(array(1,2,3,4,5,6))); +SELECT 'Array max ', (arrayMax(array(1,2,3,4,5,6))); +SELECT 'Array sum ', (arraySum(array(1,2,3,4,5,6))); +SELECT 'Array avg ', (arrayAvg(array(1,2,3,4,5,6))); + +SELECT 'Array min :'; +SELECT arrayMin([[3], [1], [2]]); + +SELECT 'Array max :'; +SELECT arrayMax([[3], [1], [2]]); + +DROP TABLE IF EXISTS test_aggregation; +CREATE TABLE test_aggregation (x Array(Int)) ENGINE=TinyLog; + +INSERT INTO test_aggregation VALUES ([1,2,3,4,5,6]), ([]), ([1,2,3]); + +SELECT 'Table array int min'; +SELECT arrayMin(x) FROM test_aggregation; +SELECT 'Table array int max'; +SELECT arrayMax(x) FROM test_aggregation; +SELECT 'Table array int sum'; +SELECT arraySum(x) FROM test_aggregation; +SELECT 'Table array int avg'; +SELECT arrayAvg(x) FROM test_aggregation; + +DROP TABLE test_aggregation; + +CREATE TABLE test_aggregation (x Array(Decimal64(8))) ENGINE=TinyLog; + +INSERT INTO test_aggregation VALUES ([1,2,3,4,5,6]), ([]), ([1,2,3]); + +SELECT 'Table array decimal min'; +SELECT arrayMin(x) FROM test_aggregation; +SELECT 'Table array decimal max'; +SELECT arrayMax(x) FROM test_aggregation; +SELECT 'Table array decimal sum'; +SELECT arraySum(x) FROM test_aggregation; +SELECT 'Table array decimal avg'; +SELECT arrayAvg(x) FROM test_aggregation; + +DROP TABLE test_aggregation; + +WITH ['2023-04-05 00:25:23', '2023-04-05 00:25:24']::Array(DateTime) AS dt SELECT arrayMax(dt), arrayMin(dt), arrayDifference(dt); +WITH ['2023-04-05 00:25:23.123', '2023-04-05 00:25:24.124']::Array(DateTime64(3)) AS dt SELECT arrayMax(dt), arrayMin(dt), arrayDifference(dt); +WITH ['2023-04-05', '2023-04-06']::Array(Date) AS d SELECT arrayMax(d), arrayMin(d), arrayDifference(d); +WITH ['2023-04-05', '2023-04-06']::Array(Date32) AS d SELECT arrayMax(d), arrayMin(d), arrayDifference(d); + +SELECT 'Types of aggregation result array min'; +SELECT toTypeName(arrayMin([toInt8(0)])), toTypeName(arrayMin([toInt16(0)])), toTypeName(arrayMin([toInt32(0)])), toTypeName(arrayMin([toInt64(0)])); +SELECT toTypeName(arrayMin([toUInt8(0)])), toTypeName(arrayMin([toUInt16(0)])), toTypeName(arrayMin([toUInt32(0)])), toTypeName(arrayMin([toUInt64(0)])); +SELECT toTypeName(arrayMin([toInt128(0)])), toTypeName(arrayMin([toInt256(0)])), toTypeName(arrayMin([toUInt256(0)])); +SELECT toTypeName(arrayMin([toFloat32(0)])), toTypeName(arrayMin([toFloat64(0)])); +SELECT toTypeName(arrayMin([toDecimal32(0, 8)])), toTypeName(arrayMin([toDecimal64(0, 8)])), toTypeName(arrayMin([toDecimal128(0, 8)])); +SELECT 'Types of aggregation result array max'; +SELECT toTypeName(arrayMax([toInt8(0)])), toTypeName(arrayMax([toInt16(0)])), toTypeName(arrayMax([toInt32(0)])), toTypeName(arrayMax([toInt64(0)])); +SELECT toTypeName(arrayMax([toUInt8(0)])), toTypeName(arrayMax([toUInt16(0)])), toTypeName(arrayMax([toUInt32(0)])), toTypeName(arrayMax([toUInt64(0)])); +SELECT toTypeName(arrayMax([toInt128(0)])), toTypeName(arrayMax([toInt256(0)])), toTypeName(arrayMax([toUInt256(0)])); +SELECT toTypeName(arrayMax([toFloat32(0)])), toTypeName(arrayMax([toFloat64(0)])); +SELECT toTypeName(arrayMax([toDecimal32(0, 8)])), toTypeName(arrayMax([toDecimal64(0, 8)])), toTypeName(arrayMax([toDecimal128(0, 8)])); +SELECT 'Types of aggregation result array summ'; +SELECT toTypeName(arraySum([toInt8(0)])), toTypeName(arraySum([toInt16(0)])), toTypeName(arraySum([toInt32(0)])), toTypeName(arraySum([toInt64(0)])); +SELECT toTypeName(arraySum([toUInt8(0)])), toTypeName(arraySum([toUInt16(0)])), toTypeName(arraySum([toUInt32(0)])), toTypeName(arraySum([toUInt64(0)])); +SELECT toTypeName(arraySum([toInt128(0)])), toTypeName(arraySum([toInt256(0)])), toTypeName(arraySum([toUInt256(0)])); +SELECT toTypeName(arraySum([toFloat32(0)])), toTypeName(arraySum([toFloat64(0)])); +SELECT toTypeName(arraySum([toDecimal32(0, 8)])), toTypeName(arraySum([toDecimal64(0, 8)])), toTypeName(arraySum([toDecimal128(0, 8)])); +SELECT 'Types of aggregation result array avg'; +SELECT toTypeName(arrayAvg([toInt8(0)])), toTypeName(arrayAvg([toInt16(0)])), toTypeName(arrayAvg([toInt32(0)])), toTypeName(arrayAvg([toInt64(0)])); +SELECT toTypeName(arrayAvg([toUInt8(0)])), toTypeName(arrayAvg([toUInt16(0)])), toTypeName(arrayAvg([toUInt32(0)])), toTypeName(arrayAvg([toUInt64(0)])); +SELECT toTypeName(arrayAvg([toInt128(0)])), toTypeName(arrayAvg([toInt256(0)])), toTypeName(arrayAvg([toUInt256(0)])); +SELECT toTypeName(arrayAvg([toFloat32(0)])), toTypeName(arrayAvg([toFloat64(0)])); +SELECT toTypeName(arrayAvg([toDecimal32(0, 8)])), toTypeName(arrayAvg([toDecimal64(0, 8)])), toTypeName(arrayAvg([toDecimal128(0, 8)])); diff --git a/parser/testdata/01602_insert_into_table_function_cluster/ast.json b/parser/testdata/01602_insert_into_table_function_cluster/ast.json new file mode 100644 index 000000000..b0982fbad --- /dev/null +++ b/parser/testdata/01602_insert_into_table_function_cluster/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery x (children 1)" + }, + { + "explain": " Identifier x" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001785872, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/01602_insert_into_table_function_cluster/metadata.json b/parser/testdata/01602_insert_into_table_function_cluster/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01602_insert_into_table_function_cluster/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01602_insert_into_table_function_cluster/query.sql b/parser/testdata/01602_insert_into_table_function_cluster/query.sql new file mode 100644 index 000000000..7c3e5608e --- /dev/null +++ b/parser/testdata/01602_insert_into_table_function_cluster/query.sql @@ -0,0 +1,23 @@ +DROP TABLE IF EXISTS x; +DROP TABLE IF EXISTS y; + +CREATE TABLE x AS system.numbers ENGINE = MergeTree ORDER BY number; +CREATE TABLE y AS system.numbers ENGINE = MergeTree ORDER BY number; + +-- Just one shard, sharding key isn't necessary +INSERT INTO FUNCTION cluster('test_shard_localhost', currentDatabase(), x) SELECT * FROM numbers(10); +INSERT INTO FUNCTION cluster('test_shard_localhost', currentDatabase(), x, rand()) SELECT * FROM numbers(10); + +-- More than one shard, sharding key is necessary +INSERT INTO FUNCTION cluster('test_cluster_two_shards_localhost', currentDatabase(), x) SELECT * FROM numbers(10); --{ serverError STORAGE_REQUIRES_PARAMETER } +INSERT INTO FUNCTION cluster('test_cluster_two_shards_localhost', currentDatabase(), x, rand()) SELECT * FROM numbers(10); + +INSERT INTO FUNCTION remote('127.0.0.{1,2}', currentDatabase(), y, 'default') SELECT * FROM numbers(10); -- { serverError STORAGE_REQUIRES_PARAMETER } +INSERT INTO FUNCTION remote('127.0.0.{1,2}', currentDatabase(), y, 'default', rand()) SELECT * FROM numbers(10); + +SELECT * FROM x ORDER BY number; + +SELECT * FROM remote('127.0.0.{1,2}', currentDatabase(), y) ORDER BY number; + +DROP TABLE x; +DROP TABLE y; diff --git a/parser/testdata/01602_modified_julian_day_msan/ast.json b/parser/testdata/01602_modified_julian_day_msan/ast.json new file mode 100644 index 000000000..02a4cdadb --- /dev/null +++ b/parser/testdata/01602_modified_julian_day_msan/ast.json @@ -0,0 +1,97 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function tryBase64Decode (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Subquery (alias n) (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function countSubstrings (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toModifiedJulianDayOrNull (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '\\0'" + }, + { + "explain": " Literal ''" + }, + { + "explain": " Subquery (alias srocpnuv) (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function regionIn (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'l. '" + } + ], + + "rows": 25, + + "statistics": + { + "elapsed": 0.001072004, + "rows_read": 25, + "bytes_read": 1105 + } +} diff --git a/parser/testdata/01602_modified_julian_day_msan/metadata.json b/parser/testdata/01602_modified_julian_day_msan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01602_modified_julian_day_msan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01602_modified_julian_day_msan/query.sql b/parser/testdata/01602_modified_julian_day_msan/query.sql new file mode 100644 index 000000000..829229824 --- /dev/null +++ b/parser/testdata/01602_modified_julian_day_msan/query.sql @@ -0,0 +1,4 @@ +SELECT tryBase64Decode(( SELECT countSubstrings(toModifiedJulianDayOrNull('\0'), '') ) AS n, ( SELECT regionIn('l. ') ) AS srocpnuv); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT countSubstrings(toModifiedJulianDayOrNull('\0'), ''); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT countSubstrings(toInt32OrNull('123qwe123'), ''); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT 'Ok.'; diff --git a/parser/testdata/01602_runningConcurrency/ast.json b/parser/testdata/01602_runningConcurrency/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01602_runningConcurrency/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01602_runningConcurrency/metadata.json b/parser/testdata/01602_runningConcurrency/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01602_runningConcurrency/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01602_runningConcurrency/query.sql b/parser/testdata/01602_runningConcurrency/query.sql new file mode 100644 index 000000000..0a08f116d --- /dev/null +++ b/parser/testdata/01602_runningConcurrency/query.sql @@ -0,0 +1,51 @@ +-- +SELECT 'Invocation with Date columns'; + +DROP TABLE IF EXISTS runningConcurrency_test; +CREATE TABLE runningConcurrency_test(begin Date, end Date) ENGINE = Memory; + +INSERT INTO runningConcurrency_test VALUES ('2020-12-01', '2020-12-10'), ('2020-12-02', '2020-12-10'), ('2020-12-03', '2020-12-12'), ('2020-12-10', '2020-12-12'), ('2020-12-13', '2020-12-20'); +SELECT runningConcurrency(begin, end) FROM runningConcurrency_test; + +DROP TABLE runningConcurrency_test; + +-- +SELECT 'Invocation with DateTime'; + +DROP TABLE IF EXISTS runningConcurrency_test; +CREATE TABLE runningConcurrency_test(begin DateTime, end DateTime) ENGINE = Memory; + +INSERT INTO runningConcurrency_test VALUES ('2020-12-01 00:00:00', '2020-12-01 00:59:59'), ('2020-12-01 00:30:00', '2020-12-01 00:59:59'), ('2020-12-01 00:40:00', '2020-12-01 01:30:30'), ('2020-12-01 01:10:00', '2020-12-01 01:30:30'), ('2020-12-01 01:50:00', '2020-12-01 01:59:59'); +SELECT runningConcurrency(begin, end) FROM runningConcurrency_test; + +DROP TABLE runningConcurrency_test; + +-- +SELECT 'Invocation with DateTime64'; + +DROP TABLE IF EXISTS runningConcurrency_test; +CREATE TABLE runningConcurrency_test(begin DateTime64(3), end DateTime64(3)) ENGINE = Memory; + +INSERT INTO runningConcurrency_test VALUES ('2020-12-01 00:00:00.000', '2020-12-01 00:00:00.100'), ('2020-12-01 00:00:00.010', '2020-12-01 00:00:00.100'), ('2020-12-01 00:00:00.020', '2020-12-01 00:00:00.200'), ('2020-12-01 00:00:00.150', '2020-12-01 00:00:00.200'), ('2020-12-01 00:00:00.250', '2020-12-01 00:00:00.300'); +SELECT runningConcurrency(begin, end) FROM runningConcurrency_test; + +DROP TABLE runningConcurrency_test; + +-- +SELECT 'Erroneous cases'; + +-- Constant columns are currently not supported. +SELECT runningConcurrency(toDate(arrayJoin([1, 2])), toDate('2000-01-01')); -- { serverError ILLEGAL_COLUMN } + +-- Unsupported data types +SELECT runningConcurrency('strings are', 'not supported'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT runningConcurrency(NULL, NULL); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT runningConcurrency(CAST(NULL, 'Nullable(DateTime)'), CAST(NULL, 'Nullable(DateTime)')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +-- Mismatching data types +SELECT runningConcurrency(toDate('2000-01-01'), toDateTime('2000-01-01 00:00:00')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +-- begin > end +SELECT runningConcurrency(toDate('2000-01-02'), toDate('2000-01-01')); -- { serverError INCORRECT_DATA } + + diff --git a/parser/testdata/01602_show_create_view/ast.json b/parser/testdata/01602_show_create_view/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01602_show_create_view/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01602_show_create_view/metadata.json b/parser/testdata/01602_show_create_view/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01602_show_create_view/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01602_show_create_view/query.sql b/parser/testdata/01602_show_create_view/query.sql new file mode 100644 index 000000000..a1b412fb3 --- /dev/null +++ b/parser/testdata/01602_show_create_view/query.sql @@ -0,0 +1,47 @@ +-- Tags: no-parallel + +DROP DATABASE IF EXISTS test_1602; + +CREATE DATABASE test_1602; + +CREATE TABLE test_1602.tbl (`EventDate` DateTime, `CounterID` UInt32, `UserID` UInt32) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SETTINGS index_granularity = 8192; + +CREATE VIEW test_1602.v AS SELECT * FROM test_1602.tbl; + +CREATE VIEW test_1602.DATABASE AS SELECT * FROM test_1602.tbl; + +CREATE VIEW test_1602.DICTIONARY AS SELECT * FROM test_1602.tbl; + +CREATE VIEW test_1602.TABLE AS SELECT * FROM test_1602.tbl; + +CREATE MATERIALIZED VIEW test_1602.vv (`EventDate` DateTime, `CounterID` UInt32, `UserID` UInt32) ENGINE = MergeTree() PARTITION BY toYYYYMM(EventDate) ORDER BY (CounterID, EventDate, intHash32(UserID)) SETTINGS index_granularity = 8192 AS SELECT * FROM test_1602.tbl; + +CREATE VIEW test_1602.VIEW AS SELECT * FROM test_1602.tbl; + +SHOW CREATE VIEW test_1602.v; + +SHOW CREATE VIEW test_1602.vv; + +SHOW CREATE VIEW test_1602.not_exist_view; -- { serverError CANNOT_GET_CREATE_TABLE_QUERY } + +SHOW CREATE VIEW test_1602.tbl; -- { serverError BAD_ARGUMENTS } + +SHOW CREATE TEMPORARY VIEW; -- { clientError SYNTAX_ERROR } + +SHOW CREATE VIEW; -- { clientError SYNTAX_ERROR } + +SHOW CREATE DATABASE; -- { clientError SYNTAX_ERROR } + +SHOW CREATE DICTIONARY; -- { clientError SYNTAX_ERROR } + +SHOW CREATE TABLE; -- { clientError SYNTAX_ERROR } + +SHOW CREATE test_1602.VIEW; + +SHOW CREATE test_1602.DATABASE; + +SHOW CREATE test_1602.DICTIONARY; + +SHOW CREATE test_1602.TABLE; + +DROP DATABASE IF EXISTS test_1602; diff --git a/parser/testdata/01602_temporary_table_in_system_tables/ast.json b/parser/testdata/01602_temporary_table_in_system_tables/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01602_temporary_table_in_system_tables/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01602_temporary_table_in_system_tables/metadata.json b/parser/testdata/01602_temporary_table_in_system_tables/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01602_temporary_table_in_system_tables/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01602_temporary_table_in_system_tables/query.sql b/parser/testdata/01602_temporary_table_in_system_tables/query.sql new file mode 100644 index 000000000..ffc8d57a7 --- /dev/null +++ b/parser/testdata/01602_temporary_table_in_system_tables/query.sql @@ -0,0 +1,19 @@ +-- Tags: memory-engine +DROP TEMPORARY TABLE IF EXISTS test_01602a; +DROP TEMPORARY TABLE IF EXISTS test_01602b; + +CREATE TEMPORARY TABLE test_01602a(x UInt32); +CREATE TEMPORARY TABLE test_01602b(y Float64, z String); + +SELECT database, name, create_table_query, engine, engine_full, is_temporary FROM system.tables WHERE name LIKE 'test_01602%' ORDER BY name; +SELECT * FROM system.columns WHERE table LIKE 'test_01602%' ORDER BY table, name; + +SHOW CREATE TEMPORARY TABLE test_01602a; +SHOW CREATE TEMPORARY TABLE test_01602b; + +SELECT COUNT() FROM system.databases WHERE name='_temporary_and_external_tables'; +SELECT COUNT() FROM system.tables WHERE database='_temporary_and_external_tables'; +SELECT COUNT() FROM system.columns WHERE database='_temporary_and_external_tables'; + +DROP TEMPORARY TABLE test_01602a; +DROP TEMPORARY TABLE test_01602b; diff --git a/parser/testdata/01603_decimal_mult_float/ast.json b/parser/testdata/01603_decimal_mult_float/ast.json new file mode 100644 index 000000000..e9c2a93fb --- /dev/null +++ b/parser/testdata/01603_decimal_mult_float/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001502311, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01603_decimal_mult_float/metadata.json b/parser/testdata/01603_decimal_mult_float/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01603_decimal_mult_float/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01603_decimal_mult_float/query.sql b/parser/testdata/01603_decimal_mult_float/query.sql new file mode 100644 index 000000000..1a4652df2 --- /dev/null +++ b/parser/testdata/01603_decimal_mult_float/query.sql @@ -0,0 +1,29 @@ +SET optimize_arithmetic_operations_in_aggregate_functions = 0; + +SELECT round(toDecimal32(2, 2) * 1.2, 6); +SELECT round(toDecimal64(0.5, 2) * 20.33, 6); +SELECT round(0.00001 * toDecimal32(12, 2), 6); +SELECT round(30.033 * toDecimal32(5, 1), 6); + +CREATE TABLE IF NOT EXISTS test01603 ( + f64 Float64, + d Decimal64(3) DEFAULT toDecimal32(f64, 3), + f32 Float32 DEFAULT f64 +) ENGINE=MergeTree() ORDER BY f32; + +INSERT INTO test01603(f64) SELECT 1 / (number + 1) FROM system.numbers LIMIT 1000; + +SELECT round(sum(d * 1.1), 6) FROM test01603; +SELECT round(sum(8.01 * d), 6) FROM test01603; + +SELECT round(sum(f64 * toDecimal64(80, 2)), 6) FROM test01603; +SELECT round(sum(toDecimal64(40, 2) * f32), 6) FROM test01603; +SELECT round(sum(f64 * toDecimal64(0.1, 2)), 6) FROM test01603; +SELECT round(sum(toDecimal64(0.3, 2) * f32), 6) FROM test01603; + +SELECT round(sum(f64 * d), 6) FROM test01603; +SELECT round(sum(d * f64), 6) FROM test01603; +SELECT round(sum(f32 * d), 6) FROM test01603; +SELECT round(sum(d * f32), 6) FROM test01603; + +DROP TABLE IF EXISTS test01603; diff --git a/parser/testdata/01603_insert_select_too_many_parts/ast.json b/parser/testdata/01603_insert_select_too_many_parts/ast.json new file mode 100644 index 000000000..b2d2019f2 --- /dev/null +++ b/parser/testdata/01603_insert_select_too_many_parts/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery too_many_parts (children 1)" + }, + { + "explain": " Identifier too_many_parts" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001554319, + "rows_read": 2, + "bytes_read": 80 + } +} diff --git a/parser/testdata/01603_insert_select_too_many_parts/metadata.json b/parser/testdata/01603_insert_select_too_many_parts/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01603_insert_select_too_many_parts/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01603_insert_select_too_many_parts/query.sql b/parser/testdata/01603_insert_select_too_many_parts/query.sql new file mode 100644 index 000000000..276e3e015 --- /dev/null +++ b/parser/testdata/01603_insert_select_too_many_parts/query.sql @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS too_many_parts; +CREATE TABLE too_many_parts (x UInt64) ENGINE = MergeTree ORDER BY tuple() SETTINGS parts_to_delay_insert = 5, parts_to_throw_insert = 5; + +SYSTEM STOP MERGES too_many_parts; +SET max_block_size = 1, min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0; +-- Avoid concurrent parts check to avoid flakiness +SET max_threads=1, max_insert_threads=1; + +-- exception is not thrown if threshold is exceeded when multi-block INSERT is already started. +-- Single thread is used as different threads check it separately https://github.com/ClickHouse/ClickHouse/issues/61158 +INSERT INTO too_many_parts SELECT * FROM numbers(10) SETTINGS max_insert_threads=1; +SELECT count() FROM too_many_parts; + +-- exception is thrown if threshold is exceeded on new INSERT. +INSERT INTO too_many_parts SELECT * FROM numbers(10); -- { serverError TOO_MANY_PARTS } + +DROP TABLE too_many_parts; diff --git a/parser/testdata/01603_read_with_backoff_bug/ast.json b/parser/testdata/01603_read_with_backoff_bug/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01603_read_with_backoff_bug/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01603_read_with_backoff_bug/metadata.json b/parser/testdata/01603_read_with_backoff_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01603_read_with_backoff_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01603_read_with_backoff_bug/query.sql b/parser/testdata/01603_read_with_backoff_bug/query.sql new file mode 100644 index 000000000..212a18b57 --- /dev/null +++ b/parser/testdata/01603_read_with_backoff_bug/query.sql @@ -0,0 +1,22 @@ +-- Tags: long, no-tsan, no-msan, no-distributed-cache +-- Too long for TSan and MSan + +set enable_filesystem_cache=0; +set enable_filesystem_cache_on_write_operations=0; +set max_rows_to_read = '30M'; + +drop table if exists t; + +create table t (x UInt64, s String) engine = MergeTree order by x SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +INSERT INTO t SELECT + number, + if(number < (8129 * 1024), arrayStringConcat(arrayMap(x -> toString(x), range(number % 128)), ' '), '') +FROM numbers_mt((8129 * 1024) * 3) settings max_insert_threads=8, max_rows_to_read=0, max_memory_usage='10Gi'; + +-- optimize table t final; + +select count(), sum(length(s)) from t settings max_threads = 3, read_backoff_min_latency_ms = 1, read_backoff_max_throughput = 1000000000, read_backoff_min_interval_between_events_ms = 1, read_backoff_min_events = 1, read_backoff_min_concurrency = 1; +select count(), sum(length(s)) from t settings max_threads = 3, read_backoff_min_latency_ms = 1, read_backoff_max_throughput = 1000000000, read_backoff_min_interval_between_events_ms = 1, read_backoff_min_events = 1, read_backoff_min_concurrency = 1; +select count(), sum(length(s)) from t settings max_threads = 3, read_backoff_min_latency_ms = 1, read_backoff_max_throughput = 1000000000, read_backoff_min_interval_between_events_ms = 1, read_backoff_min_events = 1, read_backoff_min_concurrency = 1; + +drop table if exists t; diff --git a/parser/testdata/01603_remove_column_ttl/ast.json b/parser/testdata/01603_remove_column_ttl/ast.json new file mode 100644 index 000000000..05bceba59 --- /dev/null +++ b/parser/testdata/01603_remove_column_ttl/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery table_with_column_ttl (children 1)" + }, + { + "explain": " Identifier table_with_column_ttl" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00155359, + "rows_read": 2, + "bytes_read": 94 + } +} diff --git a/parser/testdata/01603_remove_column_ttl/metadata.json b/parser/testdata/01603_remove_column_ttl/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01603_remove_column_ttl/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01603_remove_column_ttl/query.sql b/parser/testdata/01603_remove_column_ttl/query.sql new file mode 100644 index 000000000..556d968d2 --- /dev/null +++ b/parser/testdata/01603_remove_column_ttl/query.sql @@ -0,0 +1,30 @@ +DROP TABLE IF EXISTS table_with_column_ttl; +CREATE TABLE table_with_column_ttl +( + EventTime DateTime, + UserID UInt64, + Age UInt8 TTL EventTime + INTERVAL 3 MONTH +) +ENGINE MergeTree() +ORDER BY tuple() +SETTINGS min_bytes_for_wide_part = 0; -- column TTL doesn't work for compact parts + +INSERT INTO table_with_column_ttl VALUES (now(), 1, 32); + +INSERT INTO table_with_column_ttl VALUES (now() - INTERVAL 4 MONTH, 2, 45); + +OPTIMIZE TABLE table_with_column_ttl FINAL; + +SELECT UserID, Age FROM table_with_column_ttl ORDER BY UserID; + +ALTER TABLE table_with_column_ttl MODIFY COLUMN Age REMOVE TTL; + +SHOW CREATE TABLE table_with_column_ttl; + +INSERT INTO table_with_column_ttl VALUES (now() - INTERVAL 10 MONTH, 3, 27); + +OPTIMIZE TABLE table_with_column_ttl FINAL; + +SELECT UserID, Age FROM table_with_column_ttl ORDER BY UserID; + +DROP TABLE table_with_column_ttl; diff --git a/parser/testdata/01603_rename_overwrite_bug/ast.json b/parser/testdata/01603_rename_overwrite_bug/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01603_rename_overwrite_bug/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01603_rename_overwrite_bug/metadata.json b/parser/testdata/01603_rename_overwrite_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01603_rename_overwrite_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01603_rename_overwrite_bug/query.sql b/parser/testdata/01603_rename_overwrite_bug/query.sql new file mode 100644 index 000000000..7fc7b30cd --- /dev/null +++ b/parser/testdata/01603_rename_overwrite_bug/query.sql @@ -0,0 +1,28 @@ +-- Tags: no-parallel, memory-engine, log-engine + +SET send_logs_level = 'fatal'; + +DROP database IF EXISTS test_1603_rename_bug_ordinary; +set allow_deprecated_database_ordinary=1; +-- Creation of a database with Ordinary engine emits a warning. +create database test_1603_rename_bug_ordinary engine=Ordinary; +create table test_1603_rename_bug_ordinary.foo engine=Memory as select * from numbers(100); +create table test_1603_rename_bug_ordinary.bar engine=Log as select * from numbers(200); +detach table test_1603_rename_bug_ordinary.foo; +rename table test_1603_rename_bug_ordinary.bar to test_1603_rename_bug_ordinary.foo; -- { serverError TABLE_ALREADY_EXISTS } +attach table test_1603_rename_bug_ordinary.foo; +SELECT count() from test_1603_rename_bug_ordinary.foo; +SELECT count() from test_1603_rename_bug_ordinary.bar; +DROP DATABASE test_1603_rename_bug_ordinary; + +-- was not broken, adding just in case. +DROP database IF EXISTS test_1603_rename_bug_atomic; +create database test_1603_rename_bug_atomic engine=Atomic; +create table test_1603_rename_bug_atomic.foo engine=Memory as select * from numbers(100); +create table test_1603_rename_bug_atomic.bar engine=Log as select * from numbers(200); +detach table test_1603_rename_bug_atomic.foo; +rename table test_1603_rename_bug_atomic.bar to test_1603_rename_bug_atomic.foo; -- { serverError TABLE_ALREADY_EXISTS } +attach table test_1603_rename_bug_atomic.foo; +SELECT count() from test_1603_rename_bug_atomic.foo; +SELECT count() from test_1603_rename_bug_atomic.bar; +DROP DATABASE test_1603_rename_bug_atomic; diff --git a/parser/testdata/01604_explain_ast_of_nonselect_query/ast.json b/parser/testdata/01604_explain_ast_of_nonselect_query/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01604_explain_ast_of_nonselect_query/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01604_explain_ast_of_nonselect_query/metadata.json b/parser/testdata/01604_explain_ast_of_nonselect_query/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01604_explain_ast_of_nonselect_query/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01604_explain_ast_of_nonselect_query/query.sql b/parser/testdata/01604_explain_ast_of_nonselect_query/query.sql new file mode 100644 index 000000000..a70785cce --- /dev/null +++ b/parser/testdata/01604_explain_ast_of_nonselect_query/query.sql @@ -0,0 +1,3 @@ +explain ast; -- { clientError SYNTAX_ERROR } +explain ast alter table t1 delete where date = today(); +explain ast create function double AS (n) -> 2*n; diff --git a/parser/testdata/01605_adaptive_granularity_block_borders/ast.json b/parser/testdata/01605_adaptive_granularity_block_borders/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01605_adaptive_granularity_block_borders/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01605_adaptive_granularity_block_borders/metadata.json b/parser/testdata/01605_adaptive_granularity_block_borders/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01605_adaptive_granularity_block_borders/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01605_adaptive_granularity_block_borders/query.sql b/parser/testdata/01605_adaptive_granularity_block_borders/query.sql new file mode 100644 index 000000000..b65d0a3e7 --- /dev/null +++ b/parser/testdata/01605_adaptive_granularity_block_borders/query.sql @@ -0,0 +1,42 @@ +-- Tags: long, no-random-merge-tree-settings, no-random-settings, no-tsan, no-debug, no-object-storage, no-distributed-cache +-- no-tsan: too slow +-- no-object-storage: for remote tables we use thread pool even when reading with one stream, so memory consumption is higher + +SET use_uncompressed_cache = 0; +SET allow_prefetched_read_pool_for_remote_filesystem=0; + +DROP TABLE IF EXISTS adaptive_table; + +-- If the granularity of consequent blocks differs a lot, then adaptive +-- granularity will adjust the amount of marks correctly. +-- Data for test was empirically derived, it's quite hard to get good parameters. + +CREATE TABLE adaptive_table( + key UInt64, + value String +) ENGINE MergeTree() +ORDER BY key +SETTINGS index_granularity_bytes = 1048576, +min_bytes_for_wide_part = 0, +min_rows_for_wide_part = 0, +enable_vertical_merge_algorithm = 0; + +SET max_block_size=900; + +-- There are about 900 marks for our settings. +SET optimize_trivial_insert_select = 1; +INSERT INTO adaptive_table SELECT number, if(number > 700, randomPrintableASCII(102400), randomPrintableASCII(1)) FROM numbers(10000); + +OPTIMIZE TABLE adaptive_table FINAL; + +SELECT marks FROM system.parts WHERE table = 'adaptive_table' and database=currentDatabase() and active; + +SET enable_filesystem_cache = 0; + +-- If we have computed granularity incorrectly than we will exceed this limit. +SET max_memory_usage='30M'; +SET max_threads = 1; + +SELECT max(length(value)) FROM adaptive_table; + +DROP TABLE IF EXISTS adaptive_table; diff --git a/parser/testdata/01605_dictinct_two_level/ast.json b/parser/testdata/01605_dictinct_two_level/ast.json new file mode 100644 index 000000000..f1e8ddb34 --- /dev/null +++ b/parser/testdata/01605_dictinct_two_level/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00194437, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01605_dictinct_two_level/metadata.json b/parser/testdata/01605_dictinct_two_level/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01605_dictinct_two_level/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01605_dictinct_two_level/query.sql b/parser/testdata/01605_dictinct_two_level/query.sql new file mode 100644 index 000000000..f4353b84c --- /dev/null +++ b/parser/testdata/01605_dictinct_two_level/query.sql @@ -0,0 +1,25 @@ +SET group_by_two_level_threshold_bytes = 1; +SET group_by_two_level_threshold = 1; + +SELECT groupArray(DISTINCT toString(number % 10)) FROM numbers_mt(50000) + GROUP BY number ORDER BY number LIMIT 10 + SETTINGS max_threads = 2, max_block_size = 2000; + +DROP TABLE IF EXISTS distinct_two_level; + +CREATE TABLE distinct_two_level ( + time DateTime64(3), + domain String, + subdomain String +) ENGINE = MergeTree ORDER BY time; + +INSERT INTO distinct_two_level SELECT 1546300800000, 'test.com', concat('foo', toString(number % 10000)) from numbers(10000); +INSERT INTO distinct_two_level SELECT 1546300800000, concat('test.com', toString(number / 10000)) , concat('foo', toString(number % 10000)) from numbers(10000); + +SELECT + domain, arrayUniq(groupArraySample(5, 11111)(DISTINCT subdomain)) AS example_subdomains +FROM distinct_two_level +GROUP BY domain ORDER BY domain, example_subdomains +LIMIT 10; + +DROP TABLE IF EXISTS distinct_two_level; diff --git a/parser/testdata/01605_drop_settings_profile_while_assigned/ast.json b/parser/testdata/01605_drop_settings_profile_while_assigned/ast.json new file mode 100644 index 000000000..6a763b060 --- /dev/null +++ b/parser/testdata/01605_drop_settings_profile_while_assigned/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateUserQuery" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001099865, + "rows_read": 1, + "bytes_read": 23 + } +} diff --git a/parser/testdata/01605_drop_settings_profile_while_assigned/metadata.json b/parser/testdata/01605_drop_settings_profile_while_assigned/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01605_drop_settings_profile_while_assigned/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01605_drop_settings_profile_while_assigned/query.sql b/parser/testdata/01605_drop_settings_profile_while_assigned/query.sql new file mode 100644 index 000000000..c9205d7fd --- /dev/null +++ b/parser/testdata/01605_drop_settings_profile_while_assigned/query.sql @@ -0,0 +1,8 @@ +CREATE USER OR REPLACE 'test_01605'; +CREATE SETTINGS PROFILE OR REPLACE 'test_01605'; +ALTER USER 'test_01605' SETTINGS PROFILE 'test_01605'; +SELECT * FROM system.settings_profile_elements WHERE user_name='test_01605' OR profile_name='test_01605'; +DROP SETTINGS PROFILE 'test_01605'; +SELECT 'PROFILE DROPPED'; +SELECT * FROM system.settings_profile_elements WHERE user_name='test_01605' OR profile_name='test_01605'; +DROP USER 'test_01605'; diff --git a/parser/testdata/01605_key_condition_enum_int/ast.json b/parser/testdata/01605_key_condition_enum_int/ast.json new file mode 100644 index 000000000..2f22419bc --- /dev/null +++ b/parser/testdata/01605_key_condition_enum_int/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery enum (children 1)" + }, + { + "explain": " Identifier enum" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001356197, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/01605_key_condition_enum_int/metadata.json b/parser/testdata/01605_key_condition_enum_int/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01605_key_condition_enum_int/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01605_key_condition_enum_int/query.sql b/parser/testdata/01605_key_condition_enum_int/query.sql new file mode 100644 index 000000000..e6e43d8e8 --- /dev/null +++ b/parser/testdata/01605_key_condition_enum_int/query.sql @@ -0,0 +1,4 @@ +drop table if exists enum; +create table enum engine MergeTree order by enum as select cast(1, 'Enum8(\'zero\'=0, \'one\'=1)') AS enum; +select * from enum where enum = 1; +drop table if exists enum; diff --git a/parser/testdata/01605_skip_idx_compact_parts/ast.json b/parser/testdata/01605_skip_idx_compact_parts/ast.json new file mode 100644 index 000000000..85e40d254 --- /dev/null +++ b/parser/testdata/01605_skip_idx_compact_parts/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery skip_idx_comp_parts (children 1)" + }, + { + "explain": " Identifier skip_idx_comp_parts" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001687182, + "rows_read": 2, + "bytes_read": 90 + } +} diff --git a/parser/testdata/01605_skip_idx_compact_parts/metadata.json b/parser/testdata/01605_skip_idx_compact_parts/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01605_skip_idx_compact_parts/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01605_skip_idx_compact_parts/query.sql b/parser/testdata/01605_skip_idx_compact_parts/query.sql new file mode 100644 index 000000000..9d44550c0 --- /dev/null +++ b/parser/testdata/01605_skip_idx_compact_parts/query.sql @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS skip_idx_comp_parts; +CREATE TABLE skip_idx_comp_parts (a Int, b Int, index b_idx b TYPE minmax GRANULARITY 4) + ENGINE = MergeTree ORDER BY a + SETTINGS index_granularity=256, index_granularity_bytes = '10Mi', merge_max_block_size=100; + +SYSTEM STOP MERGES skip_idx_comp_parts; + +INSERT INTO skip_idx_comp_parts SELECT number, number FROM numbers(200); +INSERT INTO skip_idx_comp_parts SELECT number, number FROM numbers(200); +INSERT INTO skip_idx_comp_parts SELECT number, number FROM numbers(200); +INSERT INTO skip_idx_comp_parts SELECT number, number FROM numbers(200); + +SYSTEM START MERGES skip_idx_comp_parts; +OPTIMIZE TABLE skip_idx_comp_parts FINAL; + +SELECT count() FROM skip_idx_comp_parts WHERE b > 100; + +DROP TABLE skip_idx_comp_parts; diff --git a/parser/testdata/01606_merge_from_wide_to_compact/ast.json b/parser/testdata/01606_merge_from_wide_to_compact/ast.json new file mode 100644 index 000000000..25f2a9c16 --- /dev/null +++ b/parser/testdata/01606_merge_from_wide_to_compact/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery wide_to_comp (children 1)" + }, + { + "explain": " Identifier wide_to_comp" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001226368, + "rows_read": 2, + "bytes_read": 76 + } +} diff --git a/parser/testdata/01606_merge_from_wide_to_compact/metadata.json b/parser/testdata/01606_merge_from_wide_to_compact/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01606_merge_from_wide_to_compact/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01606_merge_from_wide_to_compact/query.sql b/parser/testdata/01606_merge_from_wide_to_compact/query.sql new file mode 100644 index 000000000..5bdafbef1 --- /dev/null +++ b/parser/testdata/01606_merge_from_wide_to_compact/query.sql @@ -0,0 +1,39 @@ +DROP TABLE IF EXISTS wide_to_comp; + +CREATE TABLE wide_to_comp (a Int, b Int, c Int) + ENGINE = MergeTree ORDER BY a + settings vertical_merge_algorithm_min_rows_to_activate = 1, + vertical_merge_algorithm_min_columns_to_activate = 1, + min_bytes_for_wide_part = 0, + min_rows_for_wide_part = 0, + merge_max_block_size = 8192, + index_granularity = 8192, index_granularity_bytes = '10Mi'; + +SYSTEM STOP merges wide_to_comp; + +INSERT INTO wide_to_comp SELECT number, number, number FROM numbers(100000); +INSERT INTO wide_to_comp SELECT number, number, number FROM numbers(100000); +INSERT INTO wide_to_comp SELECT number, number, number FROM numbers(100000); + +SELECT name, part_type FROM system.parts WHERE table = 'wide_to_comp' AND database = currentDatabase() AND active ORDER BY name; + +ALTER TABLE wide_to_comp MODIFY setting min_rows_for_wide_part = 10000000; +SYSTEM START merges wide_to_comp; +OPTIMIZE TABLE wide_to_comp FINAL; + +SELECT name, part_type FROM system.parts WHERE table = 'wide_to_comp' AND database = currentDatabase() AND active ORDER BY name; +SELECT count() FROM wide_to_comp WHERE not ignore(*); + +SYSTEM STOP merges wide_to_comp; +INSERT INTO wide_to_comp SELECT number, number, number FROM numbers(100000); + +SELECT name, part_type FROM system.parts WHERE table = 'wide_to_comp' AND database = currentDatabase() AND active ORDER BY name; + +ALTER TABLE wide_to_comp MODIFY setting min_rows_for_wide_part = 10000000; +SYSTEM START merges wide_to_comp; +OPTIMIZE TABLE wide_to_comp FINAL; + +SELECT name, part_type FROM system.parts WHERE table = 'wide_to_comp' AND database = currentDatabase() AND active ORDER BY name; +SELECT count() FROM wide_to_comp WHERE not ignore(*); + +DROP TABLE wide_to_comp; diff --git a/parser/testdata/01611_constant_folding_subqueries/ast.json b/parser/testdata/01611_constant_folding_subqueries/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01611_constant_folding_subqueries/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01611_constant_folding_subqueries/metadata.json b/parser/testdata/01611_constant_folding_subqueries/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01611_constant_folding_subqueries/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01611_constant_folding_subqueries/query.sql b/parser/testdata/01611_constant_folding_subqueries/query.sql new file mode 100644 index 000000000..b30fb43f6 --- /dev/null +++ b/parser/testdata/01611_constant_folding_subqueries/query.sql @@ -0,0 +1,8 @@ +-- { echo } +SELECT * FROM (SELECT (SELECT * FROM system.numbers LIMIT 1 OFFSET 1) AS n, toUInt64(10 / n)) FORMAT CSV; +SELECT (SELECT * FROM system.numbers LIMIT 1 OFFSET 1) AS n, toUInt64(10 / n) FORMAT CSV; +EXPLAIN SYNTAX SELECT (SELECT * FROM system.numbers LIMIT 1 OFFSET 1) AS n, toUInt64(10 / n); +SELECT * FROM (WITH (SELECT * FROM system.numbers LIMIT 1 OFFSET 1) AS n, toUInt64(10 / n) as q SELECT * FROM system.one WHERE q > 0); + +SELECT * FROM (SELECT (SELECT '\d[a-z]') AS n, extractAll('5abc', assumeNotNull(n))) FORMAT CSV; +EXPLAIN SYNTAX SELECT (SELECT * FROM system.numbers LIMIT 1 OFFSET 1) AS n, toUInt64(10 / n); diff --git a/parser/testdata/01611_string_to_low_cardinality_key_alter/ast.json b/parser/testdata/01611_string_to_low_cardinality_key_alter/ast.json new file mode 100644 index 000000000..c32c57031 --- /dev/null +++ b/parser/testdata/01611_string_to_low_cardinality_key_alter/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery table_with_lc_key (children 1)" + }, + { + "explain": " Identifier table_with_lc_key" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001718771, + "rows_read": 2, + "bytes_read": 86 + } +} diff --git a/parser/testdata/01611_string_to_low_cardinality_key_alter/metadata.json b/parser/testdata/01611_string_to_low_cardinality_key_alter/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01611_string_to_low_cardinality_key_alter/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01611_string_to_low_cardinality_key_alter/query.sql b/parser/testdata/01611_string_to_low_cardinality_key_alter/query.sql new file mode 100644 index 000000000..3b03c82e2 --- /dev/null +++ b/parser/testdata/01611_string_to_low_cardinality_key_alter/query.sql @@ -0,0 +1,67 @@ +DROP TABLE IF EXISTS table_with_lc_key; + +CREATE TABLE table_with_lc_key +( + enum_key Enum8('x' = 2, 'y' = 1), + lc_key LowCardinality(String), + value String +) +ENGINE MergeTree() +ORDER BY (enum_key, lc_key); + +INSERT INTO table_with_lc_key VALUES(1, 'hello', 'world'); + +ALTER TABLE table_with_lc_key MODIFY COLUMN lc_key String; + +SHOW CREATE TABLE table_with_lc_key; + +DETACH TABLE table_with_lc_key; +ATTACH TABLE table_with_lc_key; + +SELECT * FROM table_with_lc_key WHERE enum_key > 0 and lc_key like 'h%'; + +ALTER TABLE table_with_lc_key MODIFY COLUMN enum_key Enum('x' = 2, 'y' = 1, 'z' = 3); +ALTER TABLE table_with_lc_key MODIFY COLUMN enum_key Enum16('x' = 2, 'y' = 1, 'z' = 3); --{serverError ALTER_OF_COLUMN_IS_FORBIDDEN} +SHOW CREATE TABLE table_with_lc_key; + +DETACH TABLE table_with_lc_key; +ATTACH TABLE table_with_lc_key; + +SELECT * FROM table_with_lc_key WHERE enum_key > 0 and lc_key like 'h%'; + +ALTER TABLE table_with_lc_key MODIFY COLUMN enum_key Int8; + +SHOW CREATE TABLE table_with_lc_key; + +DETACH TABLE table_with_lc_key; +ATTACH TABLE table_with_lc_key; + +SELECT * FROM table_with_lc_key WHERE enum_key > 0 and lc_key like 'h%'; + +DROP TABLE IF EXISTS table_with_lc_key; + + +DROP TABLE IF EXISTS table_with_string_key; +CREATE TABLE table_with_string_key +( + int_key Int8, + str_key String, + value String +) +ENGINE MergeTree() +ORDER BY (int_key, str_key); + +INSERT INTO table_with_string_key VALUES(1, 'hello', 'world'); + +ALTER TABLE table_with_string_key MODIFY COLUMN str_key LowCardinality(String); + +SHOW CREATE TABLE table_with_string_key; + +DETACH TABLE table_with_string_key; +ATTACH TABLE table_with_string_key; + +SELECT * FROM table_with_string_key WHERE int_key > 0 and str_key like 'h%'; + +ALTER TABLE table_with_string_key MODIFY COLUMN int_key Enum8('y' = 1, 'x' = 2); --{serverError ALTER_OF_COLUMN_IS_FORBIDDEN} + +DROP TABLE IF EXISTS table_with_string_key; diff --git a/parser/testdata/01614_with_fill_with_limit/ast.json b/parser/testdata/01614_with_fill_with_limit/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01614_with_fill_with_limit/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01614_with_fill_with_limit/metadata.json b/parser/testdata/01614_with_fill_with_limit/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01614_with_fill_with_limit/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01614_with_fill_with_limit/query.sql b/parser/testdata/01614_with_fill_with_limit/query.sql new file mode 100644 index 000000000..119117af2 --- /dev/null +++ b/parser/testdata/01614_with_fill_with_limit/query.sql @@ -0,0 +1,15 @@ +SELECT + toFloat32(number % 10) AS n, + 'original' AS source +FROM numbers(10) +WHERE (number % 3) = 1 +ORDER BY n ASC WITH FILL STEP 1 +LIMIT 2; + +SELECT + toFloat32(number % 10) AS n, + 'original' AS source +FROM numbers(10) +WHERE (number % 3) = 1 +ORDER BY n ASC WITH FILL STEP 1 +LIMIT 2 WITH TIES; diff --git a/parser/testdata/01615_random_one_shard_insertion/ast.json b/parser/testdata/01615_random_one_shard_insertion/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01615_random_one_shard_insertion/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01615_random_one_shard_insertion/metadata.json b/parser/testdata/01615_random_one_shard_insertion/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01615_random_one_shard_insertion/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01615_random_one_shard_insertion/query.sql b/parser/testdata/01615_random_one_shard_insertion/query.sql new file mode 100644 index 000000000..9188adf5c --- /dev/null +++ b/parser/testdata/01615_random_one_shard_insertion/query.sql @@ -0,0 +1,28 @@ +-- Tags: shard, no-parallel + +create database if not exists shard_0; +create database if not exists shard_1; +drop table if exists shard_0.tbl; +drop table if exists shard_1.tbl; +drop table if exists distr; + +create table shard_0.tbl (number UInt64) engine = MergeTree order by number; +create table shard_1.tbl (number UInt64) engine = MergeTree order by number; +create table distr (number UInt64) engine = Distributed(test_cluster_two_shards_different_databases, '', tbl); + +set distributed_foreground_insert = 1; +set insert_distributed_one_random_shard = 1; +set max_block_size = 1; +set max_insert_block_size = 1; +set min_insert_block_size_rows = 1; +insert into distr select number from numbers(100); + +select count() != 0 from shard_0.tbl; +select count() != 0 from shard_1.tbl; +select * from distr order by number LIMIT 20; + +drop table if exists shard_0.tbl; +drop table if exists shard_1.tbl; +drop database shard_0; +drop database shard_1; +drop table distr; diff --git a/parser/testdata/01615_two_args_function_index_fix/ast.json b/parser/testdata/01615_two_args_function_index_fix/ast.json new file mode 100644 index 000000000..4d47ded06 --- /dev/null +++ b/parser/testdata/01615_two_args_function_index_fix/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery bad_date_time (children 1)" + }, + { + "explain": " Identifier bad_date_time" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001081925, + "rows_read": 2, + "bytes_read": 78 + } +} diff --git a/parser/testdata/01615_two_args_function_index_fix/metadata.json b/parser/testdata/01615_two_args_function_index_fix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01615_two_args_function_index_fix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01615_two_args_function_index_fix/query.sql b/parser/testdata/01615_two_args_function_index_fix/query.sql new file mode 100644 index 000000000..6128bdfcd --- /dev/null +++ b/parser/testdata/01615_two_args_function_index_fix/query.sql @@ -0,0 +1,10 @@ +drop table if exists bad_date_time; + +create table bad_date_time (time Datetime('Asia/Istanbul'), count UInt16) Engine = MergeTree() ORDER BY (time); + +insert into bad_date_time values('2020-12-20 20:59:52', 1), ('2020-12-20 21:59:52', 1), ('2020-12-20 01:59:52', 1); + +-- primary key analysis was wrong in previous versions and did not take the timezone argument into account, so empty result was given. +select toDate(time, 'UTC') dt, min(toDateTime(time, 'UTC')), max(toDateTime(time, 'UTC')), sum(count) from bad_date_time where toDate(time, 'UTC') = '2020-12-19' group by dt; + +drop table if exists bad_date_time; diff --git a/parser/testdata/01616_untuple_access_field/ast.json b/parser/testdata/01616_untuple_access_field/ast.json new file mode 100644 index 000000000..5aece0b31 --- /dev/null +++ b/parser/testdata/01616_untuple_access_field/ast.json @@ -0,0 +1,70 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function untuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Tuple_(UInt64_1, UInt64_2)" + } + ], + + "rows": 16, + + "statistics": + { + "elapsed": 0.001307773, + "rows_read": 16, + "bytes_read": 666 + } +} diff --git a/parser/testdata/01616_untuple_access_field/metadata.json b/parser/testdata/01616_untuple_access_field/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01616_untuple_access_field/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01616_untuple_access_field/query.sql b/parser/testdata/01616_untuple_access_field/query.sql new file mode 100644 index 000000000..82cdf80c8 --- /dev/null +++ b/parser/testdata/01616_untuple_access_field/query.sql @@ -0,0 +1 @@ +select * from (select untuple((1,2))); diff --git a/parser/testdata/01620_fix_simple_state_arg_type/ast.json b/parser/testdata/01620_fix_simple_state_arg_type/ast.json new file mode 100644 index 000000000..8575d4065 --- /dev/null +++ b/parser/testdata/01620_fix_simple_state_arg_type/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery ay (children 1)" + }, + { + "explain": " Identifier ay" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001274471, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/01620_fix_simple_state_arg_type/metadata.json b/parser/testdata/01620_fix_simple_state_arg_type/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01620_fix_simple_state_arg_type/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01620_fix_simple_state_arg_type/query.sql b/parser/testdata/01620_fix_simple_state_arg_type/query.sql new file mode 100644 index 000000000..e66168f5e --- /dev/null +++ b/parser/testdata/01620_fix_simple_state_arg_type/query.sql @@ -0,0 +1,13 @@ +drop table if exists ay; + +create table ay engine AggregatingMergeTree order by i as select 1 i, sumSimpleState(10) group by i; +insert into ay values(40, 60); +insert into ay values(40, 50); +insert into ay values(20, 30); +optimize table ay; +select * from ay; +insert into ay values(20, 30), (40, 10); +optimize table ay; +select * from ay; + +drop table if exists ay; diff --git a/parser/testdata/01621_bar_nan_arguments/ast.json b/parser/testdata/01621_bar_nan_arguments/ast.json new file mode 100644 index 000000000..45e6287ac --- /dev/null +++ b/parser/testdata/01621_bar_nan_arguments/ast.json @@ -0,0 +1,85 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function bar (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Function multiply (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function minus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function greatCircleAngle (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Literal UInt64_65537" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_65535" + }, + { + "explain": " Literal UInt64_1048576" + }, + { + "explain": " Literal UInt64_1048577" + }, + { + "explain": " Literal Float64_nan" + } + ], + + "rows": 21, + + "statistics": + { + "elapsed": 0.001382625, + "rows_read": 21, + "bytes_read": 824 + } +} diff --git a/parser/testdata/01621_bar_nan_arguments/metadata.json b/parser/testdata/01621_bar_nan_arguments/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01621_bar_nan_arguments/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01621_bar_nan_arguments/query.sql b/parser/testdata/01621_bar_nan_arguments/query.sql new file mode 100644 index 000000000..3862b0cd5 --- /dev/null +++ b/parser/testdata/01621_bar_nan_arguments/query.sql @@ -0,0 +1,2 @@ +SELECT bar((greatCircleAngle(65537, 2, 1, 1) - 1) * 65535, 1048576, 1048577, nan); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select bar(1,1,1,nan); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } diff --git a/parser/testdata/01621_decode_XML/ast.json b/parser/testdata/01621_decode_XML/ast.json new file mode 100644 index 000000000..33798cf37 --- /dev/null +++ b/parser/testdata/01621_decode_XML/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function decodeXMLComponent (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'Hello, "world"!'" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001635438, + "rows_read": 7, + "bytes_read": 290 + } +} diff --git a/parser/testdata/01621_decode_XML/metadata.json b/parser/testdata/01621_decode_XML/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01621_decode_XML/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01621_decode_XML/query.sql b/parser/testdata/01621_decode_XML/query.sql new file mode 100644 index 000000000..b111520db --- /dev/null +++ b/parser/testdata/01621_decode_XML/query.sql @@ -0,0 +1,20 @@ +SELECT decodeXMLComponent('Hello, "world"!'); +SELECT decodeXMLComponent('<123>'); +SELECT decodeXMLComponent('&clickhouse'); +SELECT decodeXMLComponent(''foo''); +SELECT decodeXMLComponent('Hello, && world'); +SELECT decodeXMLComponent('Hello, &;& world'); +SELECT decodeXMLComponent('Hello, &a;& world'); +SELECT decodeXMLComponent('Hello, <t;& world'); +SELECT decodeXMLComponent('Hello, <t& world'); +SELECT decodeXMLComponent('Hello, &t;& world'); + +--decode numeric entities + +SELECT decodeXMLComponent(' !"#$%&'()*+,-./012'); +SELECT decodeXMLComponent(')*+,-./0123456789:;<'); +SELECT decodeXMLComponent('=>?@ABCDEFGHIJKLMNOP'); +SELECT decodeXMLComponent('为'); +SELECT decodeXMLComponent('为'); +SELECT decodeXMLComponent('�'123'); +SELECT decodeXMLComponent('ЦЦЮЮЫㄱ'); \ No newline at end of file diff --git a/parser/testdata/01621_sort_after_join_pipeline_stuck/ast.json b/parser/testdata/01621_sort_after_join_pipeline_stuck/ast.json new file mode 100644 index 000000000..39d79ca9a --- /dev/null +++ b/parser/testdata/01621_sort_after_join_pipeline_stuck/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001605932, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01621_sort_after_join_pipeline_stuck/metadata.json b/parser/testdata/01621_sort_after_join_pipeline_stuck/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01621_sort_after_join_pipeline_stuck/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01621_sort_after_join_pipeline_stuck/query.sql b/parser/testdata/01621_sort_after_join_pipeline_stuck/query.sql new file mode 100644 index 000000000..e2f061f82 --- /dev/null +++ b/parser/testdata/01621_sort_after_join_pipeline_stuck/query.sql @@ -0,0 +1,2 @@ +SET enable_positional_arguments = 0; +SELECT k FROM (SELECT NULL, nullIf(number, 3) AS k, '1048575', (65536, -9223372036854775808), toString(number) AS a FROM system.numbers LIMIT 1048577) AS js1 ANY RIGHT JOIN (SELECT 1.000100016593933, nullIf(number, NULL) AS k, toString(number) AS b FROM system.numbers LIMIT 2, 255) AS js2 USING (k) ORDER BY 257 ASC NULLS LAST FORMAT Null; diff --git a/parser/testdata/01621_summap_check_types/ast.json b/parser/testdata/01621_summap_check_types/ast.json new file mode 100644 index 000000000..098a9fe0c --- /dev/null +++ b/parser/testdata/01621_summap_check_types/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function initializeAggregation (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Literal 'sumMap'" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_2]" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_2]" + }, + { + "explain": " Literal Array_[UInt64_1, NULL]" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001580535, + "rows_read": 10, + "bytes_read": 414 + } +} diff --git a/parser/testdata/01621_summap_check_types/metadata.json b/parser/testdata/01621_summap_check_types/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01621_summap_check_types/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01621_summap_check_types/query.sql b/parser/testdata/01621_summap_check_types/query.sql new file mode 100644 index 000000000..b46ae2d30 --- /dev/null +++ b/parser/testdata/01621_summap_check_types/query.sql @@ -0,0 +1,5 @@ +select initializeAggregation('sumMap', [1, 2], [1, 2], [1, null]); + +CREATE TEMPORARY TABLE sum_map_overflow (events Array(UInt8), counts Array(UInt8)); +INSERT INTO sum_map_overflow VALUES ([1], [255]), ([1], [2]); +SELECT [NULL], sumMapWithOverflow(events, [NULL], [[(NULL)]], counts) FROM sum_map_overflow; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } diff --git a/parser/testdata/01622_byte_size/ast.json b/parser/testdata/01622_byte_size/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01622_byte_size/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01622_byte_size/metadata.json b/parser/testdata/01622_byte_size/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01622_byte_size/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01622_byte_size/query.sql b/parser/testdata/01622_byte_size/query.sql new file mode 100644 index 000000000..f73011f41 --- /dev/null +++ b/parser/testdata/01622_byte_size/query.sql @@ -0,0 +1,203 @@ +-- +-- byteSize +-- +select ''; +select '# byteSize'; + +-- numbers #0 -- +select ''; +select 'byteSize for numbers #0'; +drop table if exists test_byte_size_number0; +create table test_byte_size_number0 +( + key Int32, + u8 UInt8, + u16 UInt16, + u32 UInt32, + u64 UInt64, + u256 UInt256, + i8 Int8, + i16 Int16, + i32 Int32, + i64 Int64, + i128 Int128, + i256 Int256, + f32 Float32, + f64 Float64 +) engine MergeTree order by key; + +insert into test_byte_size_number0 values(1, 8, 16, 32, 64, 256, -8, -16, -32, -64, -128, -256, 32.32, 64.64); +insert into test_byte_size_number0 values(2, 8, 16, 32, 64, 256, -8, -16, -32, -64, -128, -256, 32.32, 64.64); + +select key, toTypeName(u8), byteSize(u8), toTypeName(u16), byteSize(u16), toTypeName(u32), byteSize(u32), toTypeName(u64), byteSize(u64), toTypeName(u256), byteSize(u256) from test_byte_size_number0 order by key; +select key, toTypeName(i8), byteSize(i8), toTypeName(i16), byteSize(i16), toTypeName(i32), byteSize(i32), toTypeName(i64), byteSize(i64), toTypeName(i128), byteSize(i128), toTypeName(u256), byteSize(u256) from test_byte_size_number0 order by key; +select key, toTypeName(f32), byteSize(f32), toTypeName(f64), byteSize(f64) from test_byte_size_number0 order by key; + +drop table if exists test_byte_size_number0; + + +-- numbers #1 -- +select ''; +select 'byteSize for numbers #1'; +drop table if exists test_byte_size_number1; +create table test_byte_size_number1 +( + key Int32, + date Date, + dt DateTime, + dt64 DateTime64(3), + en8 Enum8('a'=1, 'b'=2, 'c'=3, 'd'=4), + en16 Enum16('c'=100, 'l'=101, 'i'=102, 'ck'=103, 'h'=104, 'o'=105, 'u'=106, 's'=107, 'e'=108), + dec32 Decimal32(4), + dec64 Decimal64(8), + dec128 Decimal128(16), + dec256 Decimal256(16), + uuid UUID +) engine MergeTree order by key; + +insert into test_byte_size_number1 values(1, '2020-01-01', '2020-01-01 01:02:03', '2020-02-02 01:02:03', 'a', 'ck', 32.32, 64.64, 128.128, 256.256, generateUUIDv4()); +insert into test_byte_size_number1 values(2, '2020-01-01', '2020-01-01 01:02:03', '2020-02-02 01:02:03', 'a', 'ck', 32.32, 64.64, 128.128, 256.256, generateUUIDv4()); + +select key, byteSize(*), toTypeName(date), byteSize(date), toTypeName(dt), byteSize(dt), toTypeName(dt64), byteSize(dt64), toTypeName(uuid), byteSize(uuid) from test_byte_size_number1 order by key; + +drop table if exists test_byte_size_number1; + + +-- constant numbers -- +select ''; +select 'byteSize for constants'; +select 0x1, byteSize(0x1), 0x100, byteSize(0x100), 0x10000, byteSize(0x10000), 0x100000000, byteSize(0x100000000), 0.5, byteSize(0.5), 1e-10, byteSize(1e-10); +select toDate('2020-01-01'), byteSize(toDate('2020-01-01')), toDateTime('2020-01-01 01:02:03'), byteSize(toDateTime('2020-01-01 01:02:03')), toDateTime64('2020-01-01 01:02:03',3), byteSize(toDateTime64('2020-01-01 01:02:03',3)); +select toTypeName(generateUUIDv4()), byteSize(generateUUIDv4()); + + +-- strings -- +select ''; +select 'byteSize for strings'; +drop table if exists test_byte_size_string; +create table test_byte_size_string +( + key Int32, + str1 String, + str2 String, + fstr1 FixedString(8), + fstr2 FixedString(8) +) engine MergeTree order by key; + +insert into test_byte_size_string values(1, '', 'a', '', 'abcde'); +insert into test_byte_size_string values(2, 'abced', '', 'abcde', ''); + +select key, byteSize(*), str1, byteSize(str1), str2, byteSize(str2), fstr1, byteSize(fstr1), fstr2, byteSize(fstr2) from test_byte_size_string order by key; +select 'constants: ', '', byteSize(''), 'a', byteSize('a'), 'abcde', byteSize('abcde'); + +drop table if exists test_byte_size_string; + + +-- simple arrays -- +drop table if exists test_byte_size_array; +create table test_byte_size_array +( + key Int32, + uints8 Array(UInt8), + ints8 Array(Int8), + ints32 Array(Int32), + floats32 Array(Float32), + decs32 Array(Decimal32(4)), + dates Array(Date), + uuids Array(UUID) +) engine MergeTree order by key; + +insert into test_byte_size_array values(1, [], [], [], [], [], [], []); +insert into test_byte_size_array values(2, [1], [-1], [256], [1.1], [1.1], ['2020-01-01'], ['61f0c404-5cb3-11e7-907b-a6006ad3dba0']); +insert into test_byte_size_array values(3, [1,1], [-1,-1], [256,256], [1.1,1.1], [1.1,1.1], ['2020-01-01','2020-01-01'], ['61f0c404-5cb3-11e7-907b-a6006ad3dba0','61f0c404-5cb3-11e7-907b-a6006ad3dba0']); +insert into test_byte_size_array values(4, [1,1,1], [-1,-1,-1], [256,256,256], [1.1,1.1,1.1], [1.1,1.1,1.1], ['2020-01-01','2020-01-01','2020-01-01'], ['61f0c404-5cb3-11e7-907b-a6006ad3dba0','61f0c404-5cb3-11e7-907b-a6006ad3dba0','61f0c404-5cb3-11e7-907b-a6006ad3dba0']); + +select ''; +select 'byteSize for simple array'; +select key, byteSize(*), uints8, byteSize(uints8), ints8, byteSize(ints8), ints32, byteSize(ints32), floats32, byteSize(floats32), decs32, byteSize(decs32), dates, byteSize(dates), uuids, byteSize(uuids) from test_byte_size_array order by key; + +select 'constants:', [], byteSize([]), [1,1], byteSize([1,1]), [-1,-1], byteSize([-1,-1]), toTypeName([256,256]), byteSize([256,256]), toTypeName([1.1,1.1]), byteSize([1.1,1.1]); +select 'constants:', [toDecimal32(1.1,4),toDecimal32(1.1,4)], byteSize([toDecimal32(1.1,4),toDecimal32(1.1,4)]), [toDate('2020-01-01'),toDate('2020-01-01')], byteSize([toDate('2020-01-01'),toDate('2020-01-01')]); +select 'constants:', [toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0'),toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0')], byteSize([toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0'),toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0')]); + +drop table if exists test_byte_size_array; + + +-- complex arrays -- +drop table if exists test_byte_size_complex_array; +create table test_byte_size_complex_array +( + key Int32, + ints Array(Int32), + int_ints Array(Array(Int32)), + strs Array(String), + str_strs Array(Array(String)) +) engine MergeTree order by key; + +insert into test_byte_size_complex_array values(1, [], [[]], [], [[]]); +insert into test_byte_size_complex_array values(2, [1,2], [[], [1,2]], [''], [[], ['']]); +insert into test_byte_size_complex_array values(3, [0,256], [[], [1,2], [0,256]], ['','a'], [[], [''], ['','a']]); +insert into test_byte_size_complex_array values(4, [256,65536], [[], [1,2], [0,256], [256,65536]], ['','a','abced'], [[], [''], ['','a'], ['','a','abced']]); + +select ''; +select 'byteSize for int array of arrays'; +select key, byteSize(*), ints, byteSize(ints), int_ints, byteSize(int_ints) from test_byte_size_complex_array order by key; +select 'constants:', [[], [1,2], [0,0x10000]],toTypeName([[], [1,2], [0,0x10000]]), byteSize([[], [1,2], [0,0x10000]]); + +select ''; +select 'byteSize for string array of arrays'; +-- select key, byteSize(*), strs, byteSize(strs), str_strs, byteSize(str_strs) from test_byte_size_complex_array order by key; +select key, byteSize(*), strs, byteSize(strs), str_strs, byteSize(str_strs) from test_byte_size_complex_array order by key; +select 'constants:', [[], [''], ['','a']], byteSize([[], [''], ['','a']]); + +drop table if exists test_byte_size_complex_array; + + +-- others -- +drop table if exists test_byte_size_other; +create table test_byte_size_other +( + key Int32, + opt_int32 Nullable(Int32), + opt_str Nullable(String), + tuple Tuple(Int32, Nullable(String)), + strings LowCardinality(String) +) engine MergeTree order by key; + +insert into test_byte_size_other values(1, NULL, NULL, tuple(1, NULL), ''); +insert into test_byte_size_other values(2, 1, 'a', tuple(1, 'a'), 'a'); +insert into test_byte_size_other values(3, 256, 'abcde', tuple(256, 'abcde'), 'abcde'); + +select ''; +select 'byteSize for others: Nullable, Tuple, LowCardinality'; +select key, byteSize(*), opt_int32, byteSize(opt_int32), opt_str, byteSize(opt_str), tuple, byteSize(tuple), strings, byteSize(strings) from test_byte_size_other order by key; +select 'constants:', NULL, byteSize(NULL), tuple(0x10000, NULL), byteSize(tuple(0x10000, NULL)), tuple(0x10000, toNullable('a')), byteSize(tuple(0x10000, toNullable('a'))); +select 'constants:', toLowCardinality('abced'),toTypeName(toLowCardinality('abced')), byteSize(toLowCardinality('abced')); + +drop table if exists test_byte_size_other; + + +-- more complex fields -- +drop table if exists test_byte_size_more_complex; +create table test_byte_size_more_complex +( + key Int32, + complex1 Array(Tuple(Nullable(FixedString(4)), Array(Tuple(Nullable(String), String)))) +) engine MergeTree order by key; + +insert into test_byte_size_more_complex values(1, []); +insert into test_byte_size_more_complex values(2, [tuple(NULL, [])]); +insert into test_byte_size_more_complex values(3, [tuple('a', [])]); +insert into test_byte_size_more_complex values(4, [tuple('a', [tuple(NULL, 'a')])]); +insert into test_byte_size_more_complex values(5, [tuple('a', [tuple(NULL, 'a'), tuple(NULL, 'a')])]); +insert into test_byte_size_more_complex values(6, [tuple(NULL, []), tuple('a', []), tuple('a', [tuple(NULL, 'a')]), tuple('a', [tuple(NULL, 'a'), tuple(NULL, 'a')])]); + +select ''; +select 'byteSize for complex fields'; +select key, byteSize(*), complex1, byteSize(complex1) from test_byte_size_more_complex order by key; +select 'constants:', tuple(NULL, []), byteSize(tuple(NULL, [])), tuple(toNullable(toFixedString('a',4)), []), byteSize(tuple(toNullable(toFixedString('a',4)), [])), tuple(toNullable(toFixedString('a',4)), [tuple(NULL, 'a')]), byteSize(tuple(toNullable(toFixedString('a',4)), [tuple(NULL, 'a')])), tuple(toFixedString('a',4), [tuple(NULL, 'a'), tuple(NULL, 'a')]), byteSize(tuple(toNullable(toFixedString('a',4)), [tuple(NULL, 'a'), tuple(NULL, 'a')])); +select 'constants:', [tuple(NULL, []), tuple(toNullable(toFixedString('a',4)), []), tuple(toNullable(toFixedString('a',4)), [tuple(NULL, 'a')]), tuple(toNullable(toFixedString('a',4)), [tuple(NULL, 'a'), tuple(NULL, 'a')])]; +select 'constants:', toTypeName([tuple(NULL, []), tuple(toNullable(toFixedString('a',4)), []), tuple(toNullable(toFixedString('a',4)), [tuple(NULL, 'a')]), tuple(toNullable(toFixedString('a',4)), [tuple(NULL, 'a'), tuple(NULL, 'a')])]); +select 'constants:', byteSize([tuple(NULL, []), tuple(toNullable(toFixedString('a',4)), []), tuple(toNullable(toFixedString('a',4)), [tuple(NULL, 'a')]), tuple(toNullable(toFixedString('a',4)), [tuple(NULL, 'a'), tuple(NULL, 'a')])]); + +drop table if exists test_byte_size_more_complex; diff --git a/parser/testdata/01622_codec_zstd_long/ast.json b/parser/testdata/01622_codec_zstd_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01622_codec_zstd_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01622_codec_zstd_long/metadata.json b/parser/testdata/01622_codec_zstd_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01622_codec_zstd_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01622_codec_zstd_long/query.sql b/parser/testdata/01622_codec_zstd_long/query.sql new file mode 100644 index 000000000..c42cc1d8d --- /dev/null +++ b/parser/testdata/01622_codec_zstd_long/query.sql @@ -0,0 +1,31 @@ +-- Tags: long + +DROP TABLE IF EXISTS zstd_1_00; +DROP TABLE IF EXISTS zstd_1_24; +DROP TABLE IF EXISTS zstd_9_00; +DROP TABLE IF EXISTS zstd_9_24; +DROP TABLE IF EXISTS words; + +CREATE TABLE words(i Int, word String) ENGINE = Memory; +INSERT INTO words SELECT * FROM generateRandom('i Int, word String',1,10) LIMIT 1 BY i LIMIT 10000; + +CREATE TABLE zstd_1_00(n Int, b String CODEC(ZSTD(1))) ENGINE = MergeTree ORDER BY n; +CREATE TABLE zstd_1_24(n Int, b String CODEC(ZSTD(1,24))) ENGINE = MergeTree ORDER BY n; +CREATE TABLE zstd_9_00(n Int, b String CODEC(ZSTD(9))) ENGINE = MergeTree ORDER BY n; +CREATE TABLE zstd_9_24(n Int, b String CODEC(ZSTD(9,24))) ENGINE = MergeTree ORDER BY n; + +INSERT INTO zstd_1_00 SELECT * FROM words; +INSERT INTO zstd_1_24 SELECT * FROM words; +INSERT INTO zstd_9_00 SELECT * FROM words; +INSERT INTO zstd_9_24 SELECT * FROM words; + +SELECT COUNT(n) FROM zstd_1_00 LEFT JOIN words ON i == n WHERE b == word; +SELECT COUNT(n) FROM zstd_1_24 LEFT JOIN words ON i == n WHERE b == word; +SELECT COUNT(n) FROM zstd_9_00 LEFT JOIN words ON i == n WHERE b == word; +SELECT COUNT(n) FROM zstd_9_24 LEFT JOIN words ON i == n WHERE b == word; + +DROP TABLE zstd_1_00; +DROP TABLE zstd_1_24; +DROP TABLE zstd_9_00; +DROP TABLE zstd_9_24; +DROP TABLE words; diff --git a/parser/testdata/01622_constraints_simple_optimization/ast.json b/parser/testdata/01622_constraints_simple_optimization/ast.json new file mode 100644 index 000000000..48707c6c2 --- /dev/null +++ b/parser/testdata/01622_constraints_simple_optimization/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery constraint_test_assumption (children 1)" + }, + { + "explain": " Identifier constraint_test_assumption" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001374778, + "rows_read": 2, + "bytes_read": 104 + } +} diff --git a/parser/testdata/01622_constraints_simple_optimization/metadata.json b/parser/testdata/01622_constraints_simple_optimization/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01622_constraints_simple_optimization/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01622_constraints_simple_optimization/query.sql b/parser/testdata/01622_constraints_simple_optimization/query.sql new file mode 100644 index 000000000..e549467de --- /dev/null +++ b/parser/testdata/01622_constraints_simple_optimization/query.sql @@ -0,0 +1,110 @@ +DROP TABLE IF EXISTS constraint_test_assumption; +DROP TABLE IF EXISTS constraint_test_transitivity; +DROP TABLE IF EXISTS constraint_test_transitivity2; +DROP TABLE IF EXISTS constraint_test_transitivity3; +DROP TABLE IF EXISTS constraint_test_constants_repl; +DROP TABLE IF EXISTS constraint_test_constants; + +SET convert_query_to_cnf = 1; +SET optimize_using_constraints = 1; +SET optimize_move_to_prewhere = 1; +SET optimize_substitute_columns = 1; +SET optimize_append_index = 1; + +CREATE TABLE constraint_test_assumption (URL String, a Int32, CONSTRAINT c1 ASSUME domainWithoutWWW(URL) = 'bigmir.net', CONSTRAINT c2 ASSUME URL > 'zzz' AND startsWith(URL, 'test') = True) ENGINE = TinyLog; + +--- Add wrong rows in order to check optimization +INSERT INTO constraint_test_assumption (URL, a) VALUES ('1', 1); +INSERT INTO constraint_test_assumption (URL, a) VALUES ('2', 2); +INSERT INTO constraint_test_assumption (URL, a) VALUES ('bigmir.net', 3); +INSERT INTO constraint_test_assumption (URL, a) VALUES ('3', 4); + +SELECT count() FROM constraint_test_assumption WHERE domainWithoutWWW(URL) = 'bigmir.net'; --- assumption -> 4 +SELECT count() FROM constraint_test_assumption WHERE NOT (domainWithoutWWW(URL) = 'bigmir.net'); --- assumption -> 0 +SELECT count() FROM constraint_test_assumption WHERE domainWithoutWWW(URL) != 'bigmir.net'; --- assumption -> 0 +SELECT count() FROM constraint_test_assumption WHERE domainWithoutWWW(URL) = 'nothing'; --- not optimized -> 0 + +SELECT count() FROM constraint_test_assumption WHERE (domainWithoutWWW(URL) = 'bigmir.net' AND URL > 'zzz'); ---> assumption -> 4 +SELECT count() FROM constraint_test_assumption WHERE (domainWithoutWWW(URL) = 'bigmir.net' AND NOT URL <= 'zzz'); ---> assumption -> 4 +SELECT count() FROM constraint_test_assumption WHERE (domainWithoutWWW(URL) = 'bigmir.net' AND URL > 'zzz') OR (a = 10 AND a + 5 < 100); ---> assumption -> 4 +SELECT count() FROM constraint_test_assumption WHERE (domainWithoutWWW(URL) = 'bigmir.net' AND URL = '111'); ---> assumption & no assumption -> 0 +SELECT count() FROM constraint_test_assumption WHERE (startsWith(URL, 'test') = True); ---> assumption -> 4 + +DROP TABLE constraint_test_assumption; + +CREATE TABLE constraint_test_transitivity (a Int64, b Int64, c Int64, d Int32, CONSTRAINT c1 ASSUME a = b AND c = d, CONSTRAINT c2 ASSUME b = c) ENGINE = TinyLog; + +INSERT INTO constraint_test_transitivity (a, b, c, d) VALUES (1, 2, 3, 4); + +SELECT count() FROM constraint_test_transitivity WHERE a = d; ---> assumption -> 1 + +DROP TABLE constraint_test_transitivity; + +CREATE TABLE constraint_test_strong_connectivity (a String, b String, c String, d String, CONSTRAINT c1 ASSUME a <= b AND b <= c AND c <= d AND d <= a) ENGINE = TinyLog; + +INSERT INTO constraint_test_strong_connectivity (a, b, c, d) VALUES ('1', '2', '3', '4'); + +SELECT count() FROM constraint_test_strong_connectivity WHERE a = d; ---> assumption -> 1 +SELECT count() FROM constraint_test_strong_connectivity WHERE a = c AND b = d; ---> assumption -> 1 +SELECT count() FROM constraint_test_strong_connectivity WHERE a < c OR b < d; ---> assumption -> 0 +SELECT count() FROM constraint_test_strong_connectivity WHERE a <= c OR b <= d; ---> assumption -> 1 + +DROP TABLE constraint_test_strong_connectivity; + +CREATE TABLE constraint_test_transitivity2 (a String, b String, c String, d String, CONSTRAINT c1 ASSUME a > b AND b >= c AND c > d AND a >= d) ENGINE = TinyLog; + +INSERT INTO constraint_test_transitivity2 (a, b, c, d) VALUES ('1', '2', '3', '4'); + +SELECT count() FROM constraint_test_transitivity2 WHERE a > d; ---> assumption -> 1 +SELECT count() FROM constraint_test_transitivity2 WHERE a >= d; ---> assumption -> 1 +SELECT count() FROM constraint_test_transitivity2 WHERE d < a; ---> assumption -> 1 +SELECT count() FROM constraint_test_transitivity2 WHERE a < d; ---> assumption -> 0 +SELECT count() FROM constraint_test_transitivity2 WHERE a = d; ---> assumption -> 0 +SELECT count() FROM constraint_test_transitivity2 WHERE a != d; ---> assumption -> 1 + +DROP TABLE constraint_test_transitivity2; + +CREATE TABLE constraint_test_transitivity3 (a Int64, b Int64, c Int64, CONSTRAINT c1 ASSUME b > 10 AND 1 > a) ENGINE = TinyLog; + +INSERT INTO constraint_test_transitivity3 (a, b, c) VALUES (4, 0, 2); + +SELECT count() FROM constraint_test_transitivity3 WHERE a < b; ---> assumption -> 1 +SELECT count() FROM constraint_test_transitivity3 WHERE b >= a; ---> assumption -> 1 + +DROP TABLE constraint_test_transitivity3; + +CREATE TABLE constraint_test_constants_repl (a Int64, b Int64, c Int64, d Int64, CONSTRAINT c1 ASSUME a - b = 10 AND c + d = 20) ENGINE = TinyLog; + +INSERT INTO constraint_test_constants_repl (a, b, c, d) VALUES (1, 2, 3, 4); + +SELECT count() FROM constraint_test_constants_repl WHERE a - b = 10; ---> assumption -> 1 +SELECT count() FROM constraint_test_constants_repl WHERE a - b < 0; ---> assumption -> 0 +SELECT count() FROM constraint_test_constants_repl WHERE a - b = c + d; ---> assumption -> 0 +SELECT count() FROM constraint_test_constants_repl WHERE (a - b) * 2 = c + d; ---> assumption -> 1 + +DROP TABLE constraint_test_constants_repl; + +CREATE TABLE constraint_test_constants (a Int64, b Int64, c Int64, CONSTRAINT c1 ASSUME b > 10 AND a >= 10) ENGINE = TinyLog; + +INSERT INTO constraint_test_constants (a, b, c) VALUES (0, 0, 0); + +SELECT count() FROM constraint_test_constants WHERE 9 < b; ---> assumption -> 1 +SELECT count() FROM constraint_test_constants WHERE 11 < b; ---> assumption -> 0 +SELECT count() FROM constraint_test_constants WHERE 10 <= b; ---> assumption -> 1 +SELECT count() FROM constraint_test_constants WHERE 9 < a; ---> assumption -> 1 +SELECT count() FROM constraint_test_constants WHERE 10 < a; ---> assumption -> 0 +SELECT count() FROM constraint_test_constants WHERE 10 <= a; ---> assumption -> 1 +SELECT count() FROM constraint_test_constants WHERE 9 <= a; ---> assumption -> 1 +SELECT count() FROM constraint_test_constants WHERE 11 <= a; ---> assumption -> 0 + +-- A AND NOT A +EXPLAIN SYNTAX SELECT count() FROM constraint_test_constants WHERE (a > 100 OR b > 100 OR c > 100) AND (a <= 100 OR b > 100 OR c > 100); +-- EXPLAIN QUERY TREE SELECT count() FROM constraint_test_constants WHERE (a > 100 OR b > 100 OR c > 100) AND (a <= 100 OR b > 100 OR c > 100); ---> the order of the generated checks is not consistent +EXPLAIN SYNTAX SELECT count() FROM constraint_test_constants WHERE (a > 100 OR b > 100 OR c > 100) AND (a <= 100 OR b > 100 OR c > 100) AND (NOT b > 100 OR c > 100); +EXPLAIN QUERY TREE SELECT count() FROM constraint_test_constants WHERE (a > 100 OR b > 100 OR c > 100) AND (a <= 100 OR b > 100 OR c > 100) AND (NOT b > 100 OR c > 100) SETTINGS enable_analyzer = 1; +EXPLAIN SYNTAX SELECT count() FROM constraint_test_constants WHERE (a > 100 OR b > 100 OR c > 100) AND (a <= 100 OR b > 100 OR c > 100) AND (NOT b > 100 OR c > 100) AND (c > 100); +EXPLAIN QUERY TREE SELECT count() FROM constraint_test_constants WHERE (a > 100 OR b > 100 OR c > 100) AND (a <= 100 OR b > 100 OR c > 100) AND (NOT b > 100 OR c > 100) AND (c > 100) SETTINGS enable_analyzer = 1; +EXPLAIN SYNTAX SELECT count() FROM constraint_test_constants WHERE (a > 100 OR b > 100 OR c > 100) AND (a <= 100 OR b > 100 OR c > 100) AND (NOT b > 100 OR c > 100) AND (c <= 100); +EXPLAIN QUERY TREE SELECT count() FROM constraint_test_constants WHERE (a > 100 OR b > 100 OR c > 100) AND (a <= 100 OR b > 100 OR c > 100) AND (NOT b > 100 OR c > 100) AND (c <= 100) SETTINGS enable_analyzer = 1; + +DROP TABLE constraint_test_constants; diff --git a/parser/testdata/01622_constraints_where_optimization/ast.json b/parser/testdata/01622_constraints_where_optimization/ast.json new file mode 100644 index 000000000..46f148097 --- /dev/null +++ b/parser/testdata/01622_constraints_where_optimization/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001169336, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01622_constraints_where_optimization/metadata.json b/parser/testdata/01622_constraints_where_optimization/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01622_constraints_where_optimization/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01622_constraints_where_optimization/query.sql b/parser/testdata/01622_constraints_where_optimization/query.sql new file mode 100644 index 000000000..63803ec8c --- /dev/null +++ b/parser/testdata/01622_constraints_where_optimization/query.sql @@ -0,0 +1,31 @@ +SET convert_query_to_cnf = 1; +SET optimize_using_constraints = 1; +SET optimize_append_index = 0; + +DROP TABLE IF EXISTS t_constraints_where; + +CREATE TABLE t_constraints_where(a UInt32, b UInt32, CONSTRAINT c1 ASSUME b >= 5, CONSTRAINT c2 ASSUME b <= 10) ENGINE = Memory; + +INSERT INTO t_constraints_where VALUES (1, 7); + +EXPLAIN SYNTAX SELECT count() FROM t_constraints_where WHERE b > 15; -- assumption -> 0 +EXPLAIN QUERY TREE SELECT count() FROM t_constraints_where WHERE b > 15 SETTINGS enable_analyzer = 1; -- assumption -> 0 +EXPLAIN SYNTAX SELECT count() FROM t_constraints_where WHERE b = 20; -- assumption -> 0 +EXPLAIN QUERY TREE SELECT count() FROM t_constraints_where WHERE b = 20 SETTINGS enable_analyzer = 1; -- assumption -> 0 +EXPLAIN SYNTAX SELECT count() FROM t_constraints_where WHERE b < 2; -- assumption -> 0 +EXPLAIN QUERY TREE SELECT count() FROM t_constraints_where WHERE b < 2 SETTINGS enable_analyzer = 1; -- assumption -> 0 +EXPLAIN SYNTAX SELECT count() FROM t_constraints_where WHERE b > 20 OR b < 8; -- assumption -> remove (b < 20) +EXPLAIN QUERY TREE SELECT count() FROM t_constraints_where WHERE b > 20 OR b < 8 SETTINGS enable_analyzer = 1; -- assumption -> remove (b < 20) +EXPLAIN SYNTAX SELECT count() FROM t_constraints_where PREWHERE b > 20 OR b < 8; -- assumption -> remove (b < 20) +EXPLAIN QUERY TREE SELECT count() FROM t_constraints_where PREWHERE b > 20 OR b < 8 SETTINGS enable_analyzer = 1; -- assumption -> remove (b < 20) + +DROP TABLE t_constraints_where; + +CREATE TABLE t_constraints_where(a UInt32, b UInt32, CONSTRAINT c1 ASSUME b < 10) ENGINE = Memory; + +INSERT INTO t_constraints_where VALUES (1, 7); + +EXPLAIN SYNTAX SELECT count() FROM t_constraints_where WHERE b = 1 OR b < 18 OR b > 5; -- assumption -> (b < 20) -> 0; +EXPLAIN QUERY TREE SELECT count() FROM t_constraints_where WHERE b = 1 OR b < 18 OR b > 5 SETTINGS enable_analyzer = 1; -- assumption -> (b < 20) -> 0; + +DROP TABLE t_constraints_where; diff --git a/parser/testdata/01622_defaults_for_file_engine/ast.json b/parser/testdata/01622_defaults_for_file_engine/ast.json new file mode 100644 index 000000000..806af8ba5 --- /dev/null +++ b/parser/testdata/01622_defaults_for_file_engine/ast.json @@ -0,0 +1,40 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "InsertQuery (children 1)" + }, + { + "explain": " Function file (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Identifier data1622.json" + }, + { + "explain": " Identifier TSV" + }, + { + "explain": " Identifier value String" + } + ], + + "rows": 6, + + "statistics": + { + "elapsed": 0.001168101, + "rows_read": 6, + "bytes_read": 200 + } +} diff --git a/parser/testdata/01622_defaults_for_file_engine/metadata.json b/parser/testdata/01622_defaults_for_file_engine/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01622_defaults_for_file_engine/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01622_defaults_for_file_engine/query.sql b/parser/testdata/01622_defaults_for_file_engine/query.sql new file mode 100644 index 000000000..203486fe7 --- /dev/null +++ b/parser/testdata/01622_defaults_for_file_engine/query.sql @@ -0,0 +1,7 @@ +insert into table function file("data1622.json", "TSV", "value String") VALUES ('{"a":1}'); +drop table if exists json; +create table json(a int, b int default 7, c default a + b) engine File(JSONEachRow, 'data1622.json'); +set input_format_defaults_for_omitted_fields = 1; +select * from json; +truncate table json; +drop table if exists json; diff --git a/parser/testdata/01622_multiple_ttls/ast.json b/parser/testdata/01622_multiple_ttls/ast.json new file mode 100644 index 000000000..b716add3e --- /dev/null +++ b/parser/testdata/01622_multiple_ttls/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'TTL WHERE'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001721574, + "rows_read": 5, + "bytes_read": 180 + } +} diff --git a/parser/testdata/01622_multiple_ttls/metadata.json b/parser/testdata/01622_multiple_ttls/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01622_multiple_ttls/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01622_multiple_ttls/query.sql b/parser/testdata/01622_multiple_ttls/query.sql new file mode 100644 index 000000000..aa2eeb575 --- /dev/null +++ b/parser/testdata/01622_multiple_ttls/query.sql @@ -0,0 +1,44 @@ +SELECT 'TTL WHERE'; +DROP TABLE IF EXISTS ttl_where; + +CREATE TABLE ttl_where +( + `d` Date, + `i` UInt32 +) +ENGINE = MergeTree +ORDER BY tuple() +TTL d + toIntervalYear(10) DELETE WHERE i % 3 = 0, + d + toIntervalYear(40) DELETE WHERE i % 3 = 1; + +-- This test will fail at 2040-10-10 + +INSERT INTO ttl_where SELECT toDate('2000-10-10'), number FROM numbers(10); +INSERT INTO ttl_where SELECT toDate('1970-10-10'), number FROM numbers(10); +OPTIMIZE TABLE ttl_where FINAL; + +SELECT * FROM ttl_where ORDER BY d, i; + +DROP TABLE ttl_where; + +SELECT 'TTL GROUP BY'; +DROP TABLE IF EXISTS ttl_group_by; + +CREATE TABLE ttl_group_by +( + `d` Date, + `i` UInt32, + `v` UInt64 +) +ENGINE = MergeTree +ORDER BY (toStartOfMonth(d), i % 10) +TTL d + toIntervalYear(10) GROUP BY toStartOfMonth(d), i % 10 SET d = any(toStartOfMonth(d)), i = any(i % 10), v = sum(v), + d + toIntervalYear(40) GROUP BY toStartOfMonth(d) SET d = any(toStartOfMonth(d)), v = sum(v); + +INSERT INTO ttl_group_by SELECT toDate('2000-10-10'), number, number FROM numbers(100); +INSERT INTO ttl_group_by SELECT toDate('1970-10-10'), number, number FROM numbers(100); +OPTIMIZE TABLE ttl_group_by FINAL; + +SELECT * FROM ttl_group_by ORDER BY d, i; + +DROP TABLE ttl_group_by; diff --git a/parser/testdata/01623_byte_size_const/ast.json b/parser/testdata/01623_byte_size_const/ast.json new file mode 100644 index 000000000..fb9760177 --- /dev/null +++ b/parser/testdata/01623_byte_size_const/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function byteSize (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_123" + }, + { + "explain": " Literal Float64_456.7" + }, + { + "explain": " Function isConstant (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001459365, + "rows_read": 11, + "bytes_read": 418 + } +} diff --git a/parser/testdata/01623_byte_size_const/metadata.json b/parser/testdata/01623_byte_size_const/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01623_byte_size_const/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01623_byte_size_const/query.sql b/parser/testdata/01623_byte_size_const/query.sql new file mode 100644 index 000000000..584f67be8 --- /dev/null +++ b/parser/testdata/01623_byte_size_const/query.sql @@ -0,0 +1,3 @@ +SELECT byteSize(123, 456.7) AS x, isConstant(x); +SELECT byteSize(number, number + 1) AS x, isConstant(x) FROM numbers(2); +SELECT byteSize(); diff --git a/parser/testdata/01623_constraints_column_swap/ast.json b/parser/testdata/01623_constraints_column_swap/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01623_constraints_column_swap/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01623_constraints_column_swap/metadata.json b/parser/testdata/01623_constraints_column_swap/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01623_constraints_column_swap/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01623_constraints_column_swap/query.sql b/parser/testdata/01623_constraints_column_swap/query.sql new file mode 100644 index 000000000..5b5a1754a --- /dev/null +++ b/parser/testdata/01623_constraints_column_swap/query.sql @@ -0,0 +1,65 @@ +-- Tags: no-random-merge-tree-settings + +SET convert_query_to_cnf = 1; +SET optimize_using_constraints = 1; +SET optimize_move_to_prewhere = 1; +SET optimize_substitute_columns = 1; +SET optimize_append_index = 1; +SET optimize_trivial_insert_select = 1; + +DROP TABLE IF EXISTS column_swap_test_test; + +CREATE TABLE column_swap_test_test (i Int64, a String, b UInt64, CONSTRAINT c1 ASSUME b = cityHash64(a)) +ENGINE = MergeTree() ORDER BY i +SETTINGS min_bytes_for_wide_part = 0; + +INSERT INTO column_swap_test_test VALUES (1, 'cat', 1), (2, 'dog', 2); +INSERT INTO column_swap_test_test SELECT number AS i, format('test {} kek {}', toString(number), toString(number + 10)) AS a, 1 AS b FROM system.numbers LIMIT 1000000; + +EXPLAIN SYNTAX SELECT cityHash64(a) + 10, b + 3 FROM column_swap_test_test WHERE cityHash64(a) = 1; +EXPLAIN QUERY TREE SELECT cityHash64(a) + 10, b + 3 FROM column_swap_test_test WHERE cityHash64(a) = 1 SETTINGS enable_analyzer = 1; +EXPLAIN SYNTAX SELECT cityHash64(a) + 10, b + 3 FROM column_swap_test_test PREWHERE cityHash64(a) = 1; +EXPLAIN QUERY TREE SELECT cityHash64(a) + 10, b + 3 FROM column_swap_test_test PREWHERE cityHash64(a) = 1 SETTINGS enable_analyzer = 1; +EXPLAIN SYNTAX SELECT cityHash64(a) + 10, b + 3 FROM column_swap_test_test WHERE cityHash64(a) = 0; +EXPLAIN QUERY TREE SELECT cityHash64(a) + 10, b + 3 FROM column_swap_test_test WHERE cityHash64(a) = 0 SETTINGS enable_analyzer = 1; +EXPLAIN SYNTAX SELECT cityHash64(a) + 10, b + 3 FROM column_swap_test_test WHERE b = 0; +EXPLAIN QUERY TREE SELECT cityHash64(a) + 10, b + 3 FROM column_swap_test_test WHERE b = 0 SETTINGS enable_analyzer = 1; +EXPLAIN SYNTAX SELECT cityHash64(a) + 10, b + 3 FROM column_swap_test_test WHERE b = 1; +EXPLAIN QUERY TREE SELECT cityHash64(a) + 10, b + 3 FROM column_swap_test_test WHERE b = 1 SETTINGS enable_analyzer = 1; + +EXPLAIN SYNTAX SELECT cityHash64(a) + 10 FROM column_swap_test_test WHERE cityHash64(a) = 0; +EXPLAIN QUERY TREE SELECT cityHash64(a) + 10 FROM column_swap_test_test WHERE cityHash64(a) = 0 SETTINGS enable_analyzer = 1; +EXPLAIN SYNTAX SELECT cityHash64(a) + 10, a FROM column_swap_test_test WHERE cityHash64(a) = 0; +EXPLAIN QUERY TREE SELECT cityHash64(a) + 10, a FROM column_swap_test_test WHERE cityHash64(a) = 0 SETTINGS enable_analyzer = 1; +EXPLAIN SYNTAX SELECT b + 10, a FROM column_swap_test_test WHERE b = 0; +EXPLAIN QUERY TREE SELECT b + 10, a FROM column_swap_test_test WHERE b = 0 SETTINGS enable_analyzer = 1; + +DROP TABLE column_swap_test_test; + +CREATE TABLE column_swap_test_test (i Int64, a String, b String, CONSTRAINT c1 ASSUME a = substring(reverse(b), 1, 1)) +ENGINE = MergeTree() ORDER BY i +SETTINGS min_bytes_for_wide_part = 0; + +INSERT INTO column_swap_test_test SELECT number AS i, toString(number) AS a, format('test {} kek {}', toString(number), toString(number + 10)) b FROM system.numbers LIMIT 1000000; + +EXPLAIN SYNTAX SELECT substring(reverse(b), 1, 1), a FROM column_swap_test_test WHERE a = 'c'; +EXPLAIN QUERY TREE SELECT substring(reverse(b), 1, 1), a FROM column_swap_test_test WHERE a = 'c' SETTINGS enable_analyzer = 1; +EXPLAIN SYNTAX SELECT substring(reverse(b), 1, 1), a FROM column_swap_test_test WHERE substring(reverse(b), 1, 1) = 'c'; +EXPLAIN QUERY TREE SELECT substring(reverse(b), 1, 1), a FROM column_swap_test_test WHERE substring(reverse(b), 1, 1) = 'c' SETTINGS enable_analyzer = 1; +EXPLAIN SYNTAX SELECT substring(reverse(b), 1, 1) AS t1, a AS t2 FROM column_swap_test_test WHERE substring(reverse(b), 1, 1) = 'c'; +EXPLAIN QUERY TREE SELECT substring(reverse(b), 1, 1) AS t1, a AS t2 FROM column_swap_test_test WHERE substring(reverse(b), 1, 1) = 'c' SETTINGS enable_analyzer = 1; +EXPLAIN SYNTAX SELECT substring(reverse(b), 1, 1) FROM column_swap_test_test WHERE substring(reverse(b), 1, 1) = 'c'; +EXPLAIN QUERY TREE SELECT substring(reverse(b), 1, 1) FROM column_swap_test_test WHERE substring(reverse(b), 1, 1) = 'c' SETTINGS enable_analyzer = 1; + +DROP TABLE column_swap_test_test; + +DROP TABLE IF EXISTS t_bad_constraint; + +CREATE TABLE t_bad_constraint(a UInt32, s String, CONSTRAINT c1 ASSUME a = toUInt32(s)) ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO t_bad_constraint SELECT number, randomPrintableASCII(100) FROM numbers(10000); + +EXPLAIN SYNTAX SELECT a FROM t_bad_constraint; +EXPLAIN QUERY TREE SELECT a FROM t_bad_constraint SETTINGS enable_analyzer = 1; + +DROP TABLE t_bad_constraint; diff --git a/parser/testdata/01625_constraints_index_append/ast.json b/parser/testdata/01625_constraints_index_append/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01625_constraints_index_append/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01625_constraints_index_append/metadata.json b/parser/testdata/01625_constraints_index_append/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01625_constraints_index_append/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01625_constraints_index_append/query.sql b/parser/testdata/01625_constraints_index_append/query.sql new file mode 100644 index 000000000..1ae896fc4 --- /dev/null +++ b/parser/testdata/01625_constraints_index_append/query.sql @@ -0,0 +1,31 @@ +-- Tags: no-parallel + +-- CNF optimization uses QueryNodeHash to order conditions. We need fixed database.table.column identifier name to stabilize result +DROP DATABASE IF EXISTS db_memory_01625; +CREATE DATABASE db_memory_01625 ENGINE = Memory; +USE db_memory_01625; + +DROP TABLE IF EXISTS index_append_test_test; + +CREATE TABLE index_append_test_test (i Int64, a UInt32, b UInt64, CONSTRAINT c1 ASSUME i <= 2 * b AND i + 40 > a) ENGINE = MergeTree() ORDER BY i; + +INSERT INTO index_append_test_test VALUES (1, 10, 1), (2, 20, 2); + +SET convert_query_to_cnf = 1; +SET optimize_using_constraints = 1; +SET optimize_move_to_prewhere = 1; +SET optimize_substitute_columns = 1; +SET optimize_append_index = 1; + +SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT i FROM index_append_test_test WHERE a = 0) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%' SETTINGS enable_analyzer=0; +SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT i FROM index_append_test_test WHERE a < 0) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%' SETTINGS enable_analyzer=0; +SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT i FROM index_append_test_test WHERE a >= 0) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%' SETTINGS enable_analyzer=0; +SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT i FROM index_append_test_test WHERE 2 * b < 100) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%' SETTINGS enable_analyzer=0; + +SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT i FROM index_append_test_test WHERE a = 0) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%' SETTINGS enable_analyzer=1; +SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT i FROM index_append_test_test WHERE a < 0) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%' SETTINGS enable_analyzer=1; +SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT i FROM index_append_test_test WHERE a >= 0) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%' SETTINGS enable_analyzer=1; +SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT i FROM index_append_test_test WHERE 2 * b < 100) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%' SETTINGS enable_analyzer=1; + +DROP TABLE index_append_test_test; +DROP DATABASE db_memory_01625; diff --git a/parser/testdata/01626_cnf_test/ast.json b/parser/testdata/01626_cnf_test/ast.json new file mode 100644 index 000000000..3afc900ca --- /dev/null +++ b/parser/testdata/01626_cnf_test/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00142471, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01626_cnf_test/metadata.json b/parser/testdata/01626_cnf_test/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01626_cnf_test/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01626_cnf_test/query.sql b/parser/testdata/01626_cnf_test/query.sql new file mode 100644 index 000000000..8db732bc2 --- /dev/null +++ b/parser/testdata/01626_cnf_test/query.sql @@ -0,0 +1,18 @@ +SET convert_query_to_cnf = 1; + +DROP TABLE IF EXISTS cnf_test; + +CREATE TABLE cnf_test (i Int64) ENGINE = MergeTree() ORDER BY i; + +EXPLAIN SYNTAX SELECT i FROM cnf_test WHERE NOT ((i > 1) OR (i > 2)); +EXPLAIN SYNTAX SELECT i FROM cnf_test WHERE NOT ((i > 1) AND (i > 2)); + +EXPLAIN SYNTAX SELECT i FROM cnf_test WHERE ((i > 1) AND (i > 2)) OR ((i > 3) AND (i > 4)) OR ((i > 5) AND (i > 6)); + +EXPLAIN SYNTAX SELECT i FROM cnf_test WHERE NOT (((i > 1) OR (i > 2)) AND ((i > 3) OR (i > 4)) AND ((i > 5) OR (i > 6))); + +EXPLAIN SYNTAX SELECT i FROM cnf_test WHERE ((i > 1) AND (i > 2) AND (i > 7)) OR ((i > 3) AND (i > 4) AND (i > 8)) OR ((i > 5) AND (i > 6)); + +EXPLAIN SYNTAX SELECT i FROM cnf_test WHERE ((i > 1) OR (i > 2) OR (i > 7)) AND ((i > 3) OR (i > 4) OR (i > 8)) AND NOT ((i > 5) OR (i > 6)); + +DROP TABLE cnf_test; diff --git a/parser/testdata/01630_disallow_floating_point_as_partition_key/ast.json b/parser/testdata/01630_disallow_floating_point_as_partition_key/ast.json new file mode 100644 index 000000000..becd10bcb --- /dev/null +++ b/parser/testdata/01630_disallow_floating_point_as_partition_key/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001415508, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/01630_disallow_floating_point_as_partition_key/metadata.json b/parser/testdata/01630_disallow_floating_point_as_partition_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01630_disallow_floating_point_as_partition_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01630_disallow_floating_point_as_partition_key/query.sql b/parser/testdata/01630_disallow_floating_point_as_partition_key/query.sql new file mode 100644 index 000000000..96ba60a33 --- /dev/null +++ b/parser/testdata/01630_disallow_floating_point_as_partition_key/query.sql @@ -0,0 +1,7 @@ +DROP TABLE IF EXISTS test; +CREATE TABLE test (a Float32, b int) Engine = MergeTree() ORDER BY tuple() PARTITION BY a; -- { serverError BAD_ARGUMENTS } +CREATE TABLE test (a Float32, b int) Engine = MergeTree() ORDER BY tuple() PARTITION BY a settings allow_floating_point_partition_key=true; +DROP TABLE IF EXISTS test; +CREATE TABLE test (a Float32, b int, c String, d Float64) Engine = MergeTree() ORDER BY tuple() PARTITION BY (b, c, d) settings allow_floating_point_partition_key=false; -- { serverError BAD_ARGUMENTS } +CREATE TABLE test (a Float32, b int, c String, d Float64) Engine = MergeTree() ORDER BY tuple() PARTITION BY (b, c, d) settings allow_floating_point_partition_key=true; +DROP TABLE IF EXISTS test; diff --git a/parser/testdata/01630_simple_aggregate_all_functions_in_aggregating_merge_tree/ast.json b/parser/testdata/01630_simple_aggregate_all_functions_in_aggregating_merge_tree/ast.json new file mode 100644 index 000000000..6ff00b548 --- /dev/null +++ b/parser/testdata/01630_simple_aggregate_all_functions_in_aggregating_merge_tree/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery simple_agf_summing_mt (children 1)" + }, + { + "explain": " Identifier simple_agf_summing_mt" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001290657, + "rows_read": 2, + "bytes_read": 94 + } +} diff --git a/parser/testdata/01630_simple_aggregate_all_functions_in_aggregating_merge_tree/metadata.json b/parser/testdata/01630_simple_aggregate_all_functions_in_aggregating_merge_tree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01630_simple_aggregate_all_functions_in_aggregating_merge_tree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01630_simple_aggregate_all_functions_in_aggregating_merge_tree/query.sql b/parser/testdata/01630_simple_aggregate_all_functions_in_aggregating_merge_tree/query.sql new file mode 100644 index 000000000..a3140075e --- /dev/null +++ b/parser/testdata/01630_simple_aggregate_all_functions_in_aggregating_merge_tree/query.sql @@ -0,0 +1,219 @@ +DROP TABLE IF EXISTS simple_agf_summing_mt; + +CREATE TABLE simple_agf_summing_mt +( + a Int64, + min_aggreg AggregateFunction(min, UInt64), + min_simple SimpleAggregateFunction(min, UInt64), + max_aggreg AggregateFunction(max, UInt64), + max_simple SimpleAggregateFunction(max, UInt64), + sum_aggreg AggregateFunction(sum, UInt64), + sum_simple SimpleAggregateFunction(sum, UInt64), + sumov_aggreg AggregateFunction(sumWithOverflow, UInt64), + sumov_simple SimpleAggregateFunction(sumWithOverflow, UInt64), + gbitand_aggreg AggregateFunction(groupBitAnd, UInt64), + gbitand_simple SimpleAggregateFunction(groupBitAnd, UInt64), + gbitor_aggreg AggregateFunction(groupBitOr, UInt64), + gbitor_simple SimpleAggregateFunction(groupBitOr, UInt64), + gbitxor_aggreg AggregateFunction(groupBitXor, UInt64), + gbitxor_simple SimpleAggregateFunction(groupBitXor, UInt64), + gra_aggreg AggregateFunction(groupArrayArray, Array(UInt64)), + gra_simple SimpleAggregateFunction(groupArrayArray, Array(UInt64)), + grp_aggreg AggregateFunction(groupUniqArrayArray, Array(UInt64)), + grp_simple SimpleAggregateFunction(groupUniqArrayArray, Array(UInt64)), + aggreg_map AggregateFunction(sumMap, Tuple(Array(String), Array(UInt64))), + simple_map SimpleAggregateFunction(sumMap, Tuple(Array(String), Array(UInt64))), + aggreg_map_min AggregateFunction(minMap, Tuple(Array(String), Array(UInt64))), + simple_map_min SimpleAggregateFunction(minMap, Tuple(Array(String), Array(UInt64))), + aggreg_map_max AggregateFunction(maxMap, Tuple(Array(String), Array(UInt64))), + simple_map_max SimpleAggregateFunction(maxMap, Tuple(Array(String), Array(UInt64))) +) +ENGINE = SummingMergeTree +ORDER BY a; + +INSERT INTO simple_agf_summing_mt SELECT + number % 51 AS a, + minState(number), + min(number), + maxState(number), + max(number), + sumState(number), + sum(number), + sumWithOverflowState(number), + sumWithOverflow(number), + groupBitAndState(number + 111111111), + groupBitAnd(number + 111111111), + groupBitOrState(number + 111111111), + groupBitOr(number + 111111111), + groupBitXorState(number + 111111111), + groupBitXor(number + 111111111), + groupArrayArrayState([toUInt64(number % 1000)]), + groupArrayArray([toUInt64(number % 1000)]), + groupUniqArrayArrayState([toUInt64(number % 500)]), + groupUniqArrayArray([toUInt64(number % 500)]), + sumMapState((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))), + sumMap((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))), + minMapState((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))), + minMap((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))), + maxMapState((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))), + maxMap((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))) +FROM numbers(10000) +GROUP BY a; + +INSERT INTO simple_agf_summing_mt SELECT + number % 1151 AS a, + minState(number), + min(number), + maxState(number), + max(number), + sumState(number), + sum(number), + sumWithOverflowState(number), + sumWithOverflow(number), + groupBitAndState(number + 111111111), + groupBitAnd(number + 111111111), + groupBitOrState(number + 111111111), + groupBitOr(number + 111111111), + groupBitXorState(number + 111111111), + groupBitXor(number + 111111111), + groupArrayArrayState([toUInt64(number % 1000)]), + groupArrayArray([toUInt64(number % 1000)]), + groupUniqArrayArrayState([toUInt64(number % 500)]), + groupUniqArrayArray([toUInt64(number % 500)]), + sumMapState((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))), + sumMap((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))), + minMapState((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))), + minMap((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))), + maxMapState((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))), + maxMap((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))) +FROM numbers(10000) +GROUP BY a; + +OPTIMIZE TABLE simple_agf_summing_mt FINAL; + +SELECT cityHash64(groupArray(cityHash64(*))) FROM ( + SELECT + a % 31 AS g, + minMerge(min_aggreg) AS minagg, + min(min_simple) AS mins, + minagg = mins AS M, + maxMerge(max_aggreg) AS maxagg, + max(max_simple) AS maxs, + maxagg = maxs AS MX, + sumMerge(sum_aggreg) AS sumagg, + sum(sum_simple) AS sums, + sumagg = sums AS S, + sumWithOverflowMerge(sumov_aggreg) AS sumaggov, + sumWithOverflow(sumov_simple) AS sumsov, + sumaggov = sumsov AS SO, + groupBitAndMerge(gbitand_aggreg) AS gbitandaggreg, + groupBitAnd(gbitand_simple) AS gbitandsimple, + gbitandaggreg = gbitandsimple AS BIT_AND, + groupBitOrMerge(gbitor_aggreg) AS gbitoraggreg, + groupBitOr(gbitor_simple) AS gbitorsimple, + gbitoraggreg = gbitorsimple AS BIT_OR, + groupBitXorMerge(gbitxor_aggreg) AS gbitxoraggreg, + groupBitXor(gbitxor_simple) AS gbitxorsimple, + gbitxoraggreg = gbitxorsimple AS BITXOR, + arraySort(groupArrayArrayMerge(gra_aggreg)) AS graa, + arraySort(groupArrayArray(gra_simple)) AS gras, + graa = gras AS GAA, + arraySort(groupUniqArrayArrayMerge(grp_aggreg)) AS gra, + arraySort(groupUniqArrayArray(grp_simple)) AS grs, + gra = grs AS T, + sumMapMerge(aggreg_map) AS smmapagg, + sumMap(simple_map) AS smmaps, + smmapagg = smmaps AS SM, + minMapMerge(aggreg_map_min) AS minmapapagg, + minMap(simple_map_min) AS minmaps, + minmapapagg = minmaps AS SMIN, + maxMapMerge(aggreg_map_max) AS maxmapapagg, + maxMap(simple_map_max) AS maxmaps, + maxmapapagg = maxmaps AS SMAX + FROM simple_agf_summing_mt + GROUP BY g + ORDER BY g +); + +SELECT '---mutation---'; + +ALTER TABLE simple_agf_summing_mt + DELETE WHERE (a % 3) = 0 +SETTINGS mutations_sync = 1; + +INSERT INTO simple_agf_summing_mt SELECT + number % 11151 AS a, + minState(number), + min(number), + maxState(number), + max(number), + sumState(number), + sum(number), + sumWithOverflowState(number), + sumWithOverflow(number), + groupBitAndState((number % 3) + 111111110), + groupBitAnd((number % 3) + 111111110), + groupBitOrState(number + 111111111), + groupBitOr(number + 111111111), + groupBitXorState(number + 111111111), + groupBitXor(number + 111111111), + groupArrayArrayState([toUInt64(number % 100)]), + groupArrayArray([toUInt64(number % 100)]), + groupUniqArrayArrayState([toUInt64(number % 50)]), + groupUniqArrayArray([toUInt64(number % 50)]), + sumMapState((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))), + sumMap((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))), + minMapState((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))), + minMap((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))), + maxMapState((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))), + maxMap((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))) +FROM numbers(10000) +GROUP BY a; + +OPTIMIZE TABLE simple_agf_summing_mt FINAL; + +SELECT cityHash64(groupArray(cityHash64(*))) FROM ( + SELECT + a % 31 AS g, + minMerge(min_aggreg) AS minagg, + min(min_simple) AS mins, + minagg = mins AS M, + maxMerge(max_aggreg) AS maxagg, + max(max_simple) AS maxs, + maxagg = maxs AS MX, + sumMerge(sum_aggreg) AS sumagg, + sum(sum_simple) AS sums, + sumagg = sums AS S, + sumWithOverflowMerge(sumov_aggreg) AS sumaggov, + sumWithOverflow(sumov_simple) AS sumsov, + sumaggov = sumsov AS SO, + groupBitAndMerge(gbitand_aggreg) AS gbitandaggreg, + groupBitAnd(gbitand_simple) AS gbitandsimple, + gbitandaggreg = gbitandsimple AS BIT_AND, + groupBitOrMerge(gbitor_aggreg) AS gbitoraggreg, + groupBitOr(gbitor_simple) AS gbitorsimple, + gbitoraggreg = gbitorsimple AS BIT_OR, + groupBitXorMerge(gbitxor_aggreg) AS gbitxoraggreg, + groupBitXor(gbitxor_simple) AS gbitxorsimple, + gbitxoraggreg = gbitxorsimple AS BITXOR, + arraySort(groupArrayArrayMerge(gra_aggreg)) AS graa, + arraySort(groupArrayArray(gra_simple)) AS gras, + graa = gras AS GAA, + arraySort(groupUniqArrayArrayMerge(grp_aggreg)) AS gra, + arraySort(groupUniqArrayArray(grp_simple)) AS grs, + gra = grs AS T, + sumMapMerge(aggreg_map) AS smmapagg, + sumMap(simple_map) AS smmaps, + smmapagg = smmaps AS SM, + minMapMerge(aggreg_map_min) AS minmapapagg, + minMap(simple_map_min) AS minmaps, + minmapapagg = minmaps AS SMIN, + maxMapMerge(aggreg_map_max) AS maxmapapagg, + maxMap(simple_map_max) AS maxmaps, + maxmapapagg = maxmaps AS SMAX + FROM simple_agf_summing_mt + GROUP BY g + ORDER BY g +); + +DROP TABLE simple_agf_summing_mt; diff --git a/parser/testdata/01630_simple_aggregate_all_functions_in_summing_merge_tree/ast.json b/parser/testdata/01630_simple_aggregate_all_functions_in_summing_merge_tree/ast.json new file mode 100644 index 000000000..a661e876f --- /dev/null +++ b/parser/testdata/01630_simple_aggregate_all_functions_in_summing_merge_tree/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery simple_agf_aggregating_mt (children 1)" + }, + { + "explain": " Identifier simple_agf_aggregating_mt" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001609205, + "rows_read": 2, + "bytes_read": 102 + } +} diff --git a/parser/testdata/01630_simple_aggregate_all_functions_in_summing_merge_tree/metadata.json b/parser/testdata/01630_simple_aggregate_all_functions_in_summing_merge_tree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01630_simple_aggregate_all_functions_in_summing_merge_tree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01630_simple_aggregate_all_functions_in_summing_merge_tree/query.sql b/parser/testdata/01630_simple_aggregate_all_functions_in_summing_merge_tree/query.sql new file mode 100644 index 000000000..3cd4cc6ef --- /dev/null +++ b/parser/testdata/01630_simple_aggregate_all_functions_in_summing_merge_tree/query.sql @@ -0,0 +1,219 @@ +DROP TABLE IF EXISTS simple_agf_aggregating_mt; + +CREATE TABLE simple_agf_aggregating_mt +( + a Int64, + min_aggreg AggregateFunction(min, UInt64), + min_simple SimpleAggregateFunction(min, UInt64), + max_aggreg AggregateFunction(max, UInt64), + max_simple SimpleAggregateFunction(max, UInt64), + sum_aggreg AggregateFunction(sum, UInt64), + sum_simple SimpleAggregateFunction(sum, UInt64), + sumov_aggreg AggregateFunction(sumWithOverflow, UInt64), + sumov_simple SimpleAggregateFunction(sumWithOverflow, UInt64), + gbitand_aggreg AggregateFunction(groupBitAnd, UInt64), + gbitand_simple SimpleAggregateFunction(groupBitAnd, UInt64), + gbitor_aggreg AggregateFunction(groupBitOr, UInt64), + gbitor_simple SimpleAggregateFunction(groupBitOr, UInt64), + gbitxor_aggreg AggregateFunction(groupBitXor, UInt64), + gbitxor_simple SimpleAggregateFunction(groupBitXor, UInt64), + gra_aggreg AggregateFunction(groupArrayArray, Array(UInt64)), + gra_simple SimpleAggregateFunction(groupArrayArray, Array(UInt64)), + grp_aggreg AggregateFunction(groupUniqArrayArray, Array(UInt64)), + grp_simple SimpleAggregateFunction(groupUniqArrayArray, Array(UInt64)), + aggreg_map AggregateFunction(sumMap, Tuple(Array(String), Array(UInt64))), + simple_map SimpleAggregateFunction(sumMap, Tuple(Array(String), Array(UInt64))), + aggreg_map_min AggregateFunction(minMap, Tuple(Array(String), Array(UInt64))), + simple_map_min SimpleAggregateFunction(minMap, Tuple(Array(String), Array(UInt64))), + aggreg_map_max AggregateFunction(maxMap, Tuple(Array(String), Array(UInt64))), + simple_map_max SimpleAggregateFunction(maxMap, Tuple(Array(String), Array(UInt64))) +) +ENGINE = AggregatingMergeTree +ORDER BY a; + +INSERT INTO simple_agf_aggregating_mt SELECT + number % 51 AS a, + minState(number), + min(number), + maxState(number), + max(number), + sumState(number), + sum(number), + sumWithOverflowState(number), + sumWithOverflow(number), + groupBitAndState(number + 111111111), + groupBitAnd(number + 111111111), + groupBitOrState(number + 111111111), + groupBitOr(number + 111111111), + groupBitXorState(number + 111111111), + groupBitXor(number + 111111111), + groupArrayArrayState([toUInt64(number % 1000)]), + groupArrayArray([toUInt64(number % 1000)]), + groupUniqArrayArrayState([toUInt64(number % 500)]), + groupUniqArrayArray([toUInt64(number % 500)]), + sumMapState((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))), + sumMap((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))), + minMapState((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))), + minMap((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))), + maxMapState((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))), + maxMap((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))) +FROM numbers(10000) +GROUP BY a; + +INSERT INTO simple_agf_aggregating_mt SELECT + number % 1151 AS a, + minState(number), + min(number), + maxState(number), + max(number), + sumState(number), + sum(number), + sumWithOverflowState(number), + sumWithOverflow(number), + groupBitAndState(number + 111111111), + groupBitAnd(number + 111111111), + groupBitOrState(number + 111111111), + groupBitOr(number + 111111111), + groupBitXorState(number + 111111111), + groupBitXor(number + 111111111), + groupArrayArrayState([toUInt64(number % 1000)]), + groupArrayArray([toUInt64(number % 1000)]), + groupUniqArrayArrayState([toUInt64(number % 500)]), + groupUniqArrayArray([toUInt64(number % 500)]), + sumMapState((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))), + sumMap((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))), + minMapState((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))), + minMap((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))), + maxMapState((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))), + maxMap((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))) +FROM numbers(10000) +GROUP BY a; + +OPTIMIZE TABLE simple_agf_aggregating_mt FINAL; + +SELECT cityHash64(groupArray(cityHash64(*))) FROM ( + SELECT + a % 31 AS g, + minMerge(min_aggreg) AS minagg, + min(min_simple) AS mins, + minagg = mins AS M, + maxMerge(max_aggreg) AS maxagg, + max(max_simple) AS maxs, + maxagg = maxs AS MX, + sumMerge(sum_aggreg) AS sumagg, + sum(sum_simple) AS sums, + sumagg = sums AS S, + sumWithOverflowMerge(sumov_aggreg) AS sumaggov, + sumWithOverflow(sumov_simple) AS sumsov, + sumaggov = sumsov AS SO, + groupBitAndMerge(gbitand_aggreg) AS gbitandaggreg, + groupBitAnd(gbitand_simple) AS gbitandsimple, + gbitandaggreg = gbitandsimple AS BIT_AND, + groupBitOrMerge(gbitor_aggreg) AS gbitoraggreg, + groupBitOr(gbitor_simple) AS gbitorsimple, + gbitoraggreg = gbitorsimple AS BIT_OR, + groupBitXorMerge(gbitxor_aggreg) AS gbitxoraggreg, + groupBitXor(gbitxor_simple) AS gbitxorsimple, + gbitxoraggreg = gbitxorsimple AS BITXOR, + arraySort(groupArrayArrayMerge(gra_aggreg)) AS graa, + arraySort(groupArrayArray(gra_simple)) AS gras, + graa = gras AS GAA, + arraySort(groupUniqArrayArrayMerge(grp_aggreg)) AS gra, + arraySort(groupUniqArrayArray(grp_simple)) AS grs, + gra = grs AS T, + sumMapMerge(aggreg_map) AS smmapagg, + sumMap(simple_map) AS smmaps, + smmapagg = smmaps AS SM, + minMapMerge(aggreg_map_min) AS minmapapagg, + minMap(simple_map_min) AS minmaps, + minmapapagg = minmaps AS SMIN, + maxMapMerge(aggreg_map_max) AS maxmapapagg, + maxMap(simple_map_max) AS maxmaps, + maxmapapagg = maxmaps AS SMAX + FROM simple_agf_aggregating_mt + GROUP BY g + ORDER BY g +); + +SELECT '---mutation---'; + +ALTER TABLE simple_agf_aggregating_mt + DELETE WHERE (a % 3) = 0 +SETTINGS mutations_sync = 1; + +INSERT INTO simple_agf_aggregating_mt SELECT + number % 11151 AS a, + minState(number), + min(number), + maxState(number), + max(number), + sumState(number), + sum(number), + sumWithOverflowState(number), + sumWithOverflow(number), + groupBitAndState((number % 3) + 111111110), + groupBitAnd((number % 3) + 111111110), + groupBitOrState(number + 111111111), + groupBitOr(number + 111111111), + groupBitXorState(number + 111111111), + groupBitXor(number + 111111111), + groupArrayArrayState([toUInt64(number % 100)]), + groupArrayArray([toUInt64(number % 100)]), + groupUniqArrayArrayState([toUInt64(number % 50)]), + groupUniqArrayArray([toUInt64(number % 50)]), + sumMapState((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))), + sumMap((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))), + minMapState((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))), + minMap((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))), + maxMapState((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))), + maxMap((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))) +FROM numbers(10000) +GROUP BY a; + +OPTIMIZE TABLE simple_agf_aggregating_mt FINAL; + +SELECT cityHash64(groupArray(cityHash64(*))) FROM ( +SELECT + a % 31 AS g, + minMerge(min_aggreg) AS minagg, + min(min_simple) AS mins, + minagg = mins AS M, + maxMerge(max_aggreg) AS maxagg, + max(max_simple) AS maxs, + maxagg = maxs AS MX, + sumMerge(sum_aggreg) AS sumagg, + sum(sum_simple) AS sums, + sumagg = sums AS S, + sumWithOverflowMerge(sumov_aggreg) AS sumaggov, + sumWithOverflow(sumov_simple) AS sumsov, + sumaggov = sumsov AS SO, + groupBitAndMerge(gbitand_aggreg) AS gbitandaggreg, + groupBitAnd(gbitand_simple) AS gbitandsimple, + gbitandaggreg = gbitandsimple AS BIT_AND, + groupBitOrMerge(gbitor_aggreg) AS gbitoraggreg, + groupBitOr(gbitor_simple) AS gbitorsimple, + gbitoraggreg = gbitorsimple AS BIT_OR, + groupBitXorMerge(gbitxor_aggreg) AS gbitxoraggreg, + groupBitXor(gbitxor_simple) AS gbitxorsimple, + gbitxoraggreg = gbitxorsimple AS BITXOR, + arraySort(groupArrayArrayMerge(gra_aggreg)) AS graa, + arraySort(groupArrayArray(gra_simple)) AS gras, + graa = gras AS GAA, + arraySort(groupUniqArrayArrayMerge(grp_aggreg)) AS gra, + arraySort(groupUniqArrayArray(grp_simple)) AS grs, + gra = grs AS T, + sumMapMerge(aggreg_map) AS smmapagg, + sumMap(simple_map) AS smmaps, + smmapagg = smmaps AS SM, + minMapMerge(aggreg_map_min) AS minmapapagg, + minMap(simple_map_min) AS minmaps, + minmapapagg = minmaps AS SMIN, + maxMapMerge(aggreg_map_max) AS maxmapapagg, + maxMap(simple_map_max) AS maxmaps, + maxmapapagg = maxmaps AS SMAX + FROM simple_agf_aggregating_mt + GROUP BY g + ORDER BY g +); + +DROP TABLE simple_agf_aggregating_mt; diff --git a/parser/testdata/01630_simple_aggregate_function_in_summing_merge_tree/ast.json b/parser/testdata/01630_simple_aggregate_function_in_summing_merge_tree/ast.json new file mode 100644 index 000000000..af0d3ae6c --- /dev/null +++ b/parser/testdata/01630_simple_aggregate_function_in_summing_merge_tree/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_smt (children 1)" + }, + { + "explain": " Identifier test_smt" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001686889, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/01630_simple_aggregate_function_in_summing_merge_tree/metadata.json b/parser/testdata/01630_simple_aggregate_function_in_summing_merge_tree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01630_simple_aggregate_function_in_summing_merge_tree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01630_simple_aggregate_function_in_summing_merge_tree/query.sql b/parser/testdata/01630_simple_aggregate_function_in_summing_merge_tree/query.sql new file mode 100644 index 000000000..498c5d93a --- /dev/null +++ b/parser/testdata/01630_simple_aggregate_function_in_summing_merge_tree/query.sql @@ -0,0 +1,23 @@ +drop table if exists test_smt; + +create table test_smt (id UInt32, sMap SimpleAggregateFunction(sumMap, Tuple(Array(UInt8), Array(Int64))), aMap AggregateFunction(sumMap, Tuple(Array(UInt8), Array(Int64)))) engine SummingMergeTree partition by tuple() order by id; + +insert into test_smt select id, sumMap(k), sumMapState(k) from (select 2 as id, arrayJoin([([0], [1]), ([0, 25], [-1, toInt64(1)])]) as k) group by id, rowNumberInAllBlocks(); + +select sumMap(sMap), sumMapMerge(aMap) from test_smt; + +drop table if exists test_smt; + +drop table if exists simple_agf_summing_mt; + +create table simple_agf_summing_mt (a Int64, grp_aggreg AggregateFunction(groupUniqArrayArray, Array(UInt64)), grp_simple SimpleAggregateFunction(groupUniqArrayArray, Array(UInt64))) engine = SummingMergeTree() order by a; + +insert into simple_agf_summing_mt select 1 a, groupUniqArrayArrayState([toUInt64(number)]), groupUniqArrayArray([toUInt64(number)]) from numbers(1) group by a; + +insert into simple_agf_summing_mt select 1 a, groupUniqArrayArrayState([toUInt64(number)]), groupUniqArrayArray([toUInt64(number)]) from numbers(2) group by a; + +optimize table simple_agf_summing_mt final; + +SELECT arraySort(groupUniqArrayArrayMerge(grp_aggreg)) gra , arraySort(groupUniqArrayArray(grp_simple)) grs FROM simple_agf_summing_mt group by a; + +drop table if exists simple_agf_summing_mt; diff --git a/parser/testdata/01631_date_overflow_as_partition_key/ast.json b/parser/testdata/01631_date_overflow_as_partition_key/ast.json new file mode 100644 index 000000000..a83c1af24 --- /dev/null +++ b/parser/testdata/01631_date_overflow_as_partition_key/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery dt_overflow (children 1)" + }, + { + "explain": " Identifier dt_overflow" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001314411, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/01631_date_overflow_as_partition_key/metadata.json b/parser/testdata/01631_date_overflow_as_partition_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01631_date_overflow_as_partition_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01631_date_overflow_as_partition_key/query.sql b/parser/testdata/01631_date_overflow_as_partition_key/query.sql new file mode 100644 index 000000000..9a8d37084 --- /dev/null +++ b/parser/testdata/01631_date_overflow_as_partition_key/query.sql @@ -0,0 +1,11 @@ +drop table if exists dt_overflow; + +create table dt_overflow(d Date, i int) engine MergeTree partition by d order by i; + +insert into dt_overflow values('2106-11-11', 1); + +insert into dt_overflow values('2106-11-12', 1); + +select * from dt_overflow ORDER BY d; + +drop table if exists dt_overflow; diff --git a/parser/testdata/01632_group_array_msan/ast.json b/parser/testdata/01632_group_array_msan/ast.json new file mode 100644 index 000000000..7c9a89742 --- /dev/null +++ b/parser/testdata/01632_group_array_msan/ast.json @@ -0,0 +1,127 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function groupArrayMerge (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function multiply (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier y" + }, + { + "explain": " Literal UInt64_1048576" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1048577" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function groupArrayState (alias y) (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_9223372036854775807" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1048576 (alias x)" + }, + { + "explain": " Identifier Null" + } + ], + + "rows": 35, + + "statistics": + { + "elapsed": 0.001441585, + "rows_read": 35, + "bytes_read": 1546 + } +} diff --git a/parser/testdata/01632_group_array_msan/metadata.json b/parser/testdata/01632_group_array_msan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01632_group_array_msan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01632_group_array_msan/query.sql b/parser/testdata/01632_group_array_msan/query.sql new file mode 100644 index 000000000..033d3754a --- /dev/null +++ b/parser/testdata/01632_group_array_msan/query.sql @@ -0,0 +1,4 @@ +SELECT groupArrayMerge(1048577)(y * 1048576) FROM (SELECT groupArrayState(9223372036854775807)(x) AS y FROM (SELECT 1048576 AS x)) FORMAT Null; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT groupArrayMerge(1048577)(y * 1048576) FROM (SELECT groupArrayState(1048577)(x) AS y FROM (SELECT 1048576 AS x)) FORMAT Null; +SELECT groupArrayMerge(9223372036854775807)(y * 1048576) FROM (SELECT groupArrayState(9223372036854775807)(x) AS y FROM (SELECT 1048576 AS x)) FORMAT Null; +SELECT quantileResampleMerge(0.5, 257, 65536, 1)(tuple(*).1) FROM (SELECT quantileResampleState(0.10, 1, 2, 42)(number, number) FROM numbers(100)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } diff --git a/parser/testdata/01632_max_partitions_to_read/ast.json b/parser/testdata/01632_max_partitions_to_read/ast.json new file mode 100644 index 000000000..4f8c7370b --- /dev/null +++ b/parser/testdata/01632_max_partitions_to_read/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery p (children 1)" + }, + { + "explain": " Identifier p" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001289074, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/01632_max_partitions_to_read/metadata.json b/parser/testdata/01632_max_partitions_to_read/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01632_max_partitions_to_read/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01632_max_partitions_to_read/query.sql b/parser/testdata/01632_max_partitions_to_read/query.sql new file mode 100644 index 000000000..c8b2347b1 --- /dev/null +++ b/parser/testdata/01632_max_partitions_to_read/query.sql @@ -0,0 +1,17 @@ +drop table if exists p; + +create table p(d Date, i int, j int) engine MergeTree partition by d order by i settings max_partitions_to_read = 1; + +insert into p values ('2021-01-01', 1, 2), ('2021-01-02', 4, 5); + +select * from p order by i; -- { serverError TOO_MANY_PARTITIONS } + +select * from p order by i settings max_partitions_to_read = 2; + +select * from p order by i settings max_partitions_to_read = 0; -- unlimited + +alter table p modify setting max_partitions_to_read = 2; + +select * from p order by i; + +drop table if exists p; diff --git a/parser/testdata/01632_nullable_string_type_convert_to_decimal_type/ast.json b/parser/testdata/01632_nullable_string_type_convert_to_decimal_type/ast.json new file mode 100644 index 000000000..504dd82b2 --- /dev/null +++ b/parser/testdata/01632_nullable_string_type_convert_to_decimal_type/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function arrayJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_['42.1', NULL]" + }, + { + "explain": " Literal 'Nullable(Decimal(10, 2))'" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001115795, + "rows_read": 10, + "bytes_read": 406 + } +} diff --git a/parser/testdata/01632_nullable_string_type_convert_to_decimal_type/metadata.json b/parser/testdata/01632_nullable_string_type_convert_to_decimal_type/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01632_nullable_string_type_convert_to_decimal_type/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01632_nullable_string_type_convert_to_decimal_type/query.sql b/parser/testdata/01632_nullable_string_type_convert_to_decimal_type/query.sql new file mode 100644 index 000000000..b8fa08edb --- /dev/null +++ b/parser/testdata/01632_nullable_string_type_convert_to_decimal_type/query.sql @@ -0,0 +1,2 @@ +SELECT CAST(arrayJoin(['42.1', NULL]) AS Nullable(Decimal(10,2))); + diff --git a/parser/testdata/01632_select_all_syntax/ast.json b/parser/testdata/01632_select_all_syntax/ast.json new file mode 100644 index 000000000..8227f946d --- /dev/null +++ b/parser/testdata/01632_select_all_syntax/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'a'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001334604, + "rows_read": 5, + "bytes_read": 172 + } +} diff --git a/parser/testdata/01632_select_all_syntax/metadata.json b/parser/testdata/01632_select_all_syntax/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01632_select_all_syntax/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01632_select_all_syntax/query.sql b/parser/testdata/01632_select_all_syntax/query.sql new file mode 100644 index 000000000..f5e96a5cb --- /dev/null +++ b/parser/testdata/01632_select_all_syntax/query.sql @@ -0,0 +1,25 @@ +SELECT ALL 'a'; +SELECT DISTINCT 'a'; +SELECT ALL * FROM (SELECT 1 UNION ALL SELECT 1); +SELECT DISTINCT * FROM (SELECT 2 UNION ALL SELECT 2); + +SELECT sum(number) FROM numbers(10); +SELECT sum(ALL number) FROM numbers(10); +SELECT sum(DISTINCT number) FROM numbers(10); + +SELECT sum(ALL x) FROM (SELECT 1 x UNION ALL SELECT 1); +SELECT sum(DISTINCT x) FROM (SELECT 1 x UNION ALL SELECT 1); + +SELECT sum(ALL) FROM (SELECT 1 AS ALL); + +SELECT sum(DISTINCT) FROM (SELECT 1 AS DISTINCT); + +SELECT repeat('a', ALL) FROM (SELECT number AS ALL FROM numbers(10)); + +SELECT repeat('a', DISTINCT) FROM (SELECT number AS DISTINCT FROM numbers(10)); + +SELECT repeat(ALL, 5) FROM (SELECT 'a' AS ALL); + +SELECT repeat(DISTINCT, 5) FROM (SELECT 'a' AS DISTINCT); + +SELECT repeat(ALL, DISTINCT) FROM (SELECT 'a' AS ALL, 5 AS DISTINCT); diff --git a/parser/testdata/01633_limit_fuzz/ast.json b/parser/testdata/01633_limit_fuzz/ast.json new file mode 100644 index 000000000..5d5f612df --- /dev/null +++ b/parser/testdata/01633_limit_fuzz/ast.json @@ -0,0 +1,82 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 5)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_1 (alias k)" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_100000" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier k" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_1025" + }, + { + "explain": " Literal UInt64_1023" + }, + { + "explain": " Identifier Values" + } + ], + + "rows": 20, + + "statistics": + { + "elapsed": 0.001622923, + "rows_read": 20, + "bytes_read": 730 + } +} diff --git a/parser/testdata/01633_limit_fuzz/metadata.json b/parser/testdata/01633_limit_fuzz/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01633_limit_fuzz/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01633_limit_fuzz/query.sql b/parser/testdata/01633_limit_fuzz/query.sql new file mode 100644 index 000000000..3e11513e1 --- /dev/null +++ b/parser/testdata/01633_limit_fuzz/query.sql @@ -0,0 +1 @@ +SELECT number, 1 AS k FROM numbers(100000) ORDER BY k, number LIMIT 1025, 1023 FORMAT Values; diff --git a/parser/testdata/01634_sum_map_nulls/ast.json b/parser/testdata/01634_sum_map_nulls/ast.json new file mode 100644 index 000000000..2f4ef42f6 --- /dev/null +++ b/parser/testdata/01634_sum_map_nulls/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function initializeAggregation (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Literal 'sumMap'" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_2, UInt64_1]" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_1, UInt64_1]" + }, + { + "explain": " Literal Array_[Int64_-1, NULL, UInt64_10]" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.00128833, + "rows_read": 10, + "bytes_read": 445 + } +} diff --git a/parser/testdata/01634_sum_map_nulls/metadata.json b/parser/testdata/01634_sum_map_nulls/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01634_sum_map_nulls/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01634_sum_map_nulls/query.sql b/parser/testdata/01634_sum_map_nulls/query.sql new file mode 100644 index 000000000..149b946ec --- /dev/null +++ b/parser/testdata/01634_sum_map_nulls/query.sql @@ -0,0 +1,5 @@ +SELECT initializeAggregation('sumMap', [1, 2, 1], [1, 1, 1], [-1, null, 10]); +SELECT initializeAggregation('sumMap', [1, 2, 1], [1, 1, 1], [-1, null, null]); +SELECT initializeAggregation('sumMap', [1, 2, 1], [1, 1, 1], [null, null, null]); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT initializeAggregation('sumMap', [1, 2, 1], [1, 1, 1], [-1, 10, 10]); +SELECT initializeAggregation('sumMap', [1, 2, 1], [1, 1, 1], [-1, 10, null]); diff --git a/parser/testdata/01634_summap_nullable/ast.json b/parser/testdata/01634_summap_nullable/ast.json new file mode 100644 index 000000000..81869d431 --- /dev/null +++ b/parser/testdata/01634_summap_nullable/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function sumMap (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Array_['a', 'b']" + }, + { + "explain": " Literal Array_[UInt64_1, NULL]" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.00138313, + "rows_read": 8, + "bytes_read": 311 + } +} diff --git a/parser/testdata/01634_summap_nullable/metadata.json b/parser/testdata/01634_summap_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01634_summap_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01634_summap_nullable/query.sql b/parser/testdata/01634_summap_nullable/query.sql new file mode 100644 index 000000000..226da645e --- /dev/null +++ b/parser/testdata/01634_summap_nullable/query.sql @@ -0,0 +1,2 @@ +SELECT sumMap(['a', 'b'], [1, NULL]); +SELECT sumMap(['a', 'b'], [1, toNullable(0)]); diff --git a/parser/testdata/01634_uuid_fuzz/ast.json b/parser/testdata/01634_uuid_fuzz/ast.json new file mode 100644 index 000000000..f3c4924cb --- /dev/null +++ b/parser/testdata/01634_uuid_fuzz/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toUUID (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Float64_-1.1" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001421283, + "rows_read": 7, + "bytes_read": 263 + } +} diff --git a/parser/testdata/01634_uuid_fuzz/metadata.json b/parser/testdata/01634_uuid_fuzz/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01634_uuid_fuzz/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01634_uuid_fuzz/query.sql b/parser/testdata/01634_uuid_fuzz/query.sql new file mode 100644 index 000000000..2ffde0fd4 --- /dev/null +++ b/parser/testdata/01634_uuid_fuzz/query.sql @@ -0,0 +1 @@ +SELECT toUUID(-1.1); -- { serverError NOT_IMPLEMENTED } diff --git a/parser/testdata/01635_nullable_fuzz/ast.json b/parser/testdata/01635_nullable_fuzz/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01635_nullable_fuzz/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01635_nullable_fuzz/metadata.json b/parser/testdata/01635_nullable_fuzz/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01635_nullable_fuzz/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01635_nullable_fuzz/query.sql b/parser/testdata/01635_nullable_fuzz/query.sql new file mode 100644 index 000000000..d45cf179a --- /dev/null +++ b/parser/testdata/01635_nullable_fuzz/query.sql @@ -0,0 +1,21 @@ +SELECT + 'Nul\0able\0String)Nul\0\0ble(String)Nul\0able(String)Nul\0able(String)', + NULL AND 2, + '', + number, + NULL AS k +FROM +( + SELECT + materialize(NULL) OR materialize(-9223372036854775808), + number + FROM system.numbers + LIMIT 1000000 +) +ORDER BY + k ASC, + number ASC, + k ASC +LIMIT 1023, 1023 +SETTINGS max_bytes_before_external_sort = 1000000, max_bytes_ratio_before_external_sort = 0 +FORMAT Null; diff --git a/parser/testdata/01635_sum_map_fuzz/ast.json b/parser/testdata/01635_sum_map_fuzz/ast.json new file mode 100644 index 000000000..c1e91d2ff --- /dev/null +++ b/parser/testdata/01635_sum_map_fuzz/ast.json @@ -0,0 +1,85 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function finalizeAggregation (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function initializeAggregation (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Literal 'sumMapState'" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_2]" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_2]" + }, + { + "explain": " Literal Array_[UInt64_1, NULL]" + } + ], + + "rows": 21, + + "statistics": + { + "elapsed": 0.001467689, + "rows_read": 21, + "bytes_read": 923 + } +} diff --git a/parser/testdata/01635_sum_map_fuzz/metadata.json b/parser/testdata/01635_sum_map_fuzz/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01635_sum_map_fuzz/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01635_sum_map_fuzz/query.sql b/parser/testdata/01635_sum_map_fuzz/query.sql new file mode 100644 index 000000000..853eb66cb --- /dev/null +++ b/parser/testdata/01635_sum_map_fuzz/query.sql @@ -0,0 +1,6 @@ +SELECT finalizeAggregation(*) FROM (select initializeAggregation('sumMapState', [1, 2], [1, 2], [1, null])); + +DROP TABLE IF EXISTS sum_map_overflow; +CREATE TABLE sum_map_overflow(events Array(UInt8), counts Array(UInt8)) ENGINE = Log; +SELECT [NULL], sumMapWithOverflow(events, [NULL], [[(NULL)]], counts) FROM sum_map_overflow; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +DROP TABLE sum_map_overflow; diff --git a/parser/testdata/01636_nullable_fuzz2/ast.json b/parser/testdata/01636_nullable_fuzz2/ast.json new file mode 100644 index 000000000..501cc58c4 --- /dev/null +++ b/parser/testdata/01636_nullable_fuzz2/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery open_events_tmp (children 1)" + }, + { + "explain": " Identifier open_events_tmp" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001613935, + "rows_read": 2, + "bytes_read": 82 + } +} diff --git a/parser/testdata/01636_nullable_fuzz2/metadata.json b/parser/testdata/01636_nullable_fuzz2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01636_nullable_fuzz2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01636_nullable_fuzz2/query.sql b/parser/testdata/01636_nullable_fuzz2/query.sql new file mode 100644 index 000000000..49ee7626b --- /dev/null +++ b/parser/testdata/01636_nullable_fuzz2/query.sql @@ -0,0 +1,35 @@ +DROP TABLE IF EXISTS open_events_tmp; +DROP TABLE IF EXISTS tracking_events_tmp; + +CREATE TABLE open_events_tmp (`APIKey` UInt32, `EventDate` Date) ENGINE = MergeTree PARTITION BY toMonday(EventDate) ORDER BY (APIKey, EventDate); +CREATE TABLE tracking_events_tmp (`APIKey` UInt32, `EventDate` Date) ENGINE = MergeTree PARTITION BY toYYYYMM(EventDate) ORDER BY (APIKey, EventDate); + +insert into open_events_tmp select 2, '2020-07-10' from numbers(32); +insert into open_events_tmp select 2, '2020-07-11' from numbers(31); +insert into open_events_tmp select 2, '2020-07-12' from numbers(30); + +insert into tracking_events_tmp select 2, '2020-07-09' from numbers(1555); +insert into tracking_events_tmp select 2, '2020-07-10' from numbers(1881); +insert into tracking_events_tmp select 2, '2020-07-11' from numbers(1623); + +SELECT EventDate +FROM +( + SELECT EventDate + FROM tracking_events_tmp AS t1 + WHERE (EventDate >= toDate('2020-07-09')) AND (EventDate <= toDate('2020-07-11')) AND (APIKey = 2) + GROUP BY EventDate +) +FULL OUTER JOIN +( + SELECT EventDate + FROM remote('127.0.0.{1,3}', currentDatabase(), open_events_tmp) AS t2 + WHERE (EventDate <= toDate('2020-07-12')) AND (APIKey = 2) + GROUP BY EventDate + WITH TOTALS +) USING (EventDate) +ORDER BY EventDate ASC +SETTINGS totals_mode = 'after_having_auto', group_by_overflow_mode = 'any', max_rows_to_group_by = 10000000, joined_subquery_requires_alias = 0; + +DROP TABLE open_events_tmp; +DROP TABLE tracking_events_tmp; diff --git a/parser/testdata/01637_nullable_fuzz3/ast.json b/parser/testdata/01637_nullable_fuzz3/ast.json new file mode 100644 index 000000000..c27890a0e --- /dev/null +++ b/parser/testdata/01637_nullable_fuzz3/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001183407, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/01637_nullable_fuzz3/metadata.json b/parser/testdata/01637_nullable_fuzz3/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01637_nullable_fuzz3/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01637_nullable_fuzz3/query.sql b/parser/testdata/01637_nullable_fuzz3/query.sql new file mode 100644 index 000000000..6cfd0fc7d --- /dev/null +++ b/parser/testdata/01637_nullable_fuzz3/query.sql @@ -0,0 +1,6 @@ +DROP TABLE IF EXISTS t; +CREATE TABLE t (`item_id` UInt64, `price_sold` Float32, `date` Date) ENGINE = MergeTree ORDER BY item_id; +SELECT item_id FROM (SELECT item_id FROM t GROUP BY item_id WITH TOTALS) AS l FULL OUTER JOIN (SELECT item_id FROM t GROUP BY item_id WITH TOTALS) AS r USING (item_id); +SELECT item_id FROM (SELECT item_id FROM t GROUP BY item_id WITH TOTALS) AS l FULL OUTER JOIN (SELECT item_id FROM t GROUP BY item_id WITH TOTALS) AS r USING (item_id) SETTINGS join_use_nulls = '1'; +SELECT * FROM (SELECT item_id, sum(price_sold) as price_sold FROM t GROUP BY item_id WITH TOTALS) AS l FULL OUTER JOIN (SELECT item_id, sum(price_sold) as price_sold FROM t GROUP BY item_id WITH TOTALS) AS r USING (item_id) SETTINGS join_use_nulls = '1'; +DROP TABLE t; diff --git a/parser/testdata/01638_div_mod_ambiguities/ast.json b/parser/testdata/01638_div_mod_ambiguities/ast.json new file mode 100644 index 000000000..59342ce07 --- /dev/null +++ b/parser/testdata/01638_div_mod_ambiguities/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier DIV (alias MOD)" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1 (alias DIV)" + }, + { + "explain": " Identifier TSVWithNames" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001742116, + "rows_read": 15, + "bytes_read": 613 + } +} diff --git a/parser/testdata/01638_div_mod_ambiguities/metadata.json b/parser/testdata/01638_div_mod_ambiguities/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01638_div_mod_ambiguities/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01638_div_mod_ambiguities/query.sql b/parser/testdata/01638_div_mod_ambiguities/query.sql new file mode 100644 index 000000000..5c011e7e9 --- /dev/null +++ b/parser/testdata/01638_div_mod_ambiguities/query.sql @@ -0,0 +1,9 @@ +SELECT DIV AS MOD FROM (SELECT 1 `DIV`) FORMAT TSVWithNames; +SELECT DIV `MOD` FROM (SELECT 1 `DIV`) FORMAT TSVWithNames; +SELECT DIV MOD 1 FROM (SELECT 1 `DIV`) FORMAT TSVWithNames; +SELECT 1 DIV `MOD` `DIV` FROM (SELECT 1 `MOD`) FORMAT TSVWithNames; + +SELECT DIV AS mod FROM (SELECT 1 `DIV`) FORMAT TSVWithNames; +SELECT div `MOD` FROM (SELECT 1 `div`) FORMAT TSVWithNames; +SELECT DIV mod 1 FROM (SELECT 1 `DIV`) FORMAT TSVWithNames; +SELECT 1 div `mod` `div` FROM (SELECT 1 `mod`) FORMAT TSVWithNames; diff --git a/parser/testdata/01639_distributed_sync_insert_zero_rows/ast.json b/parser/testdata/01639_distributed_sync_insert_zero_rows/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01639_distributed_sync_insert_zero_rows/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01639_distributed_sync_insert_zero_rows/metadata.json b/parser/testdata/01639_distributed_sync_insert_zero_rows/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01639_distributed_sync_insert_zero_rows/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01639_distributed_sync_insert_zero_rows/query.sql b/parser/testdata/01639_distributed_sync_insert_zero_rows/query.sql new file mode 100644 index 000000000..0d0a58e9e --- /dev/null +++ b/parser/testdata/01639_distributed_sync_insert_zero_rows/query.sql @@ -0,0 +1,26 @@ +-- Tags: distributed + +DROP TABLE IF EXISTS local; +DROP TABLE IF EXISTS distributed; + +CREATE TABLE local (x UInt8) ENGINE = Memory; +CREATE TABLE distributed AS local ENGINE = Distributed(test_cluster_two_shards, currentDatabase(), local, x); + +SET distributed_foreground_insert = 1; + +INSERT INTO distributed SELECT number FROM numbers(256) WHERE number % 2 = 0; +SELECT count() FROM local; +SELECT count() FROM distributed; + +TRUNCATE TABLE local; +INSERT INTO distributed SELECT number FROM numbers(256) WHERE number % 2 = 1; +SELECT count() FROM local; +SELECT count() FROM distributed; + +TRUNCATE TABLE local; +INSERT INTO distributed SELECT number FROM numbers(256) WHERE number < 128; +SELECT count() FROM local; +SELECT count() FROM distributed; + +DROP TABLE local; +DROP TABLE distributed; diff --git a/parser/testdata/01640_distributed_async_insert_compression/ast.json b/parser/testdata/01640_distributed_async_insert_compression/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01640_distributed_async_insert_compression/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01640_distributed_async_insert_compression/metadata.json b/parser/testdata/01640_distributed_async_insert_compression/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01640_distributed_async_insert_compression/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01640_distributed_async_insert_compression/query.sql b/parser/testdata/01640_distributed_async_insert_compression/query.sql new file mode 100644 index 000000000..85fe9ae87 --- /dev/null +++ b/parser/testdata/01640_distributed_async_insert_compression/query.sql @@ -0,0 +1,18 @@ +-- Tags: distributed + +DROP TABLE IF EXISTS local; +DROP TABLE IF EXISTS distributed; + +CREATE TABLE local (x UInt8) ENGINE = Memory; +CREATE TABLE distributed AS local ENGINE = Distributed(test_cluster_two_shards, currentDatabase(), local, x); + +SET distributed_foreground_insert = 0, network_compression_method = 'zstd'; + +INSERT INTO distributed SELECT number FROM numbers(256); +SYSTEM FLUSH DISTRIBUTED distributed; + +SELECT count() FROM local; +SELECT count() FROM distributed; + +DROP TABLE local; +DROP TABLE distributed; diff --git a/parser/testdata/01640_marks_corruption_regression/ast.json b/parser/testdata/01640_marks_corruption_regression/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01640_marks_corruption_regression/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01640_marks_corruption_regression/metadata.json b/parser/testdata/01640_marks_corruption_regression/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01640_marks_corruption_regression/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01640_marks_corruption_regression/query.sql b/parser/testdata/01640_marks_corruption_regression/query.sql new file mode 100644 index 000000000..c75c26f31 --- /dev/null +++ b/parser/testdata/01640_marks_corruption_regression/query.sql @@ -0,0 +1,59 @@ +-- Tags: no-random-merge-tree-settings + +DROP TABLE IF EXISTS adaptive_table; + +CREATE TABLE adaptive_table( + key UInt64, + value String +) ENGINE MergeTree() +ORDER BY key +SETTINGS + index_granularity_bytes=1048576, + min_bytes_for_wide_part=0, + old_parts_lifetime=0, + index_granularity=8192 +; + +-- This triggers adjustment of the granules that was introduced in PR#17120 +INSERT INTO adaptive_table SELECT number, randomPrintableASCII(if(number BETWEEN 8192-30 AND 8192, 102400, 1)) FROM system.numbers LIMIT 16384; +-- This creates the following marks: +-- +-- $ check-marks /path/to/db/adaptive_table/all_*/key.{mrk2,bin} +-- Mark 0, points to 0, 0, has rows after 8192, decompressed size 72808. " String, + "\";alert(123);t=\"" String, + "';alert(123);t='" String, + "JavaSCript:alert(123)" String, + ";alert(123);" String, + "src=JaVaSCript:prompt(132)" String, + "\"><\\x3Cscript>javascript:alert(1)" String, + "'`\"><\\x00script>javascript:alert(1)" String, + "ABC
DEF" String, + "ABC
DEF" String, + "ABC
DEF" String, + "ABC
DEF" String, + "ABC
DEF" String, + "ABC
DEF" String, + "ABC
DEF" String, + "ABC
DEF" String, + "ABC
DEF" String, + "ABC
DEF" String, + "ABC
DEF" String, + "ABC
DEF" String, + "ABC
DEF" String, + "ABC
DEF" String, + "ABC
DEF" String, + "ABC
DEF" String, + "ABC
DEF" String, + "ABC
DEF" String, + "ABC
DEF" String, + "ABC
DEF" String, + "ABC
DEF" String, + "ABC
DEF" String, + "ABC
DEF" String, + "ABC
DEF" String, + "ABC
DEF" String, + "ABC
DEF" String, + "ABC
DEF" String, + "test" String, + "test" String, + "test" String, + "test" String, + "test" String, + "test" String, + "test" String, + "test" String, + "test" String, + "test" String, + "test" String, + "test" String, + "test" String, + "test" String, + "test" String, + "test" String, + "test" String, + "test" String, + "test" String, + "test" String, + "test" String, + "test" String, + "test" String, + "test" String, + "test" String, + "test" String, + "test" String, + "test" String, + "test" String, + "test" String, + "test" String, + "test" String, + "test" String, + "test" String, + "test" String, + "test" String, + "test" String, + "test" String, + "test" String, + "test" String, + "test" String, + "test" String, + "test" String, + "test" String, + "test" String, + "test" String, + "test" String, + "test" String, + "test" String, + "test" String, + "test" String, + "test" String, + "test" String, + "test" String, + "test" String, + "test" String, + "test" String, + "`\"'>" String, + "`\"'>" String, + "`\"'>" String, + "`\"'>" String, + "`\"'>" String, + "`\"'>" String, + "`\"'>" String, + "`\"'>" String, + "`\"'>" String, + "`\"'>" String, + "\"`'>" String, + "\"`'>" String, + "\"`'>" String, + "\"`'>" String, + "\"`'>" String, + "\"`'>" String, + "\"`'>" String, + "\"`'>" String, + "\"`'>" String, + "\"`'>" String, + "\"`'>" String, + "\"`'>" String, + "\"`'>" String, + "\"`'>" String, + "\"`'>" String, + "\"`'>" String, + "\"`'>" String, + "\"`'>" String, + "\"`'>" String, + "\"`'>" String, + "\"`'>" String, + "\"`'>" String, + "\"`'>" String, + "\"`'>" String, + "\"`'>" String, + "\"`'>" String, + "\"`'>" String, + "\"`'>" String, + "\"`'>" String, + "\"`'>" String, + "\"`'>" String, + "\"`'>" String, + "\"`'>" String, + "\"`'>" String, + "\"`'>" String, + "\"`'>" String, + "\"`'>" String, + "" String, + "" String, + "" String, + "" String, + "" String, + "" String, + "" String, + "" String, + "" String, + "" String, + "" String, + "" String, + "" String, + "" String, + "" String, + "" String, + "" String, + "" String, + "" String, + "" String, + "" String, + "" String, + "" String, + "" String, + "" String, + "" String, + "" String, + "" String, + "" String, + "" String, + "" String, + "" String, + "XXX" String, + "javascript:alert(1)\"` `>" String, + "" String, + "" String, + "<a href=http://foo.bar/#x=`y></a><img alt=\"`><img src=x:x onerror=javascript:alert(1)></a>\">" String, + "<!--[if]><script>javascript:alert(1)</script -->" String, + "<!--[if<img src=x onerror=javascript:alert(1)//]> -->" String, + "<script src=\"/\\%(jscript)s\"></script>" String, + "<script src=\"\\\\%(jscript)s\"></script>" String, + "<IMG \"\"\"><SCRIPT>alert(\"XSS\")</SCRIPT>\">" String, + "<IMG SRC=javascript:alert(String.fromCharCode(88,83,83))>" String, + "<IMG SRC=# onmouseover=\"alert('xxs')\">" String, + "<IMG SRC= onmouseover=\"alert('xxs')\">" String, + "<IMG onmouseover=\"alert('xxs')\">" String, + "<IMG SRC=javascript:alert('XSS')>" String, + "<IMG SRC=javascript:alert('XSS')>" String, + "<IMG SRC=javascript:alert('XSS')>" String, + "<IMG SRC=\"jav ascript:alert('XSS');\">" String, + "<IMG SRC=\"jav ascript:alert('XSS');\">" String, + "<IMG SRC=\"jav ascript:alert('XSS');\">" String, + "<IMG SRC=\"jav ascript:alert('XSS');\">" String, + "perl -e 'print \"<IMG SRC=java\\0script:alert(\\\"XSS\\\")>\";' > out" String, + "<IMG SRC=\"  javascript:alert('XSS');\">" String, + "<SCRIPT/XSS SRC=\"http://ha.ckers.org/xss.js\"></SCRIPT>" String, + "<BODY onload!#$%&()*~+-_.,:;?@[/|\\]^`=alert(\"XSS\")>" String, + "<SCRIPT/SRC=\"http://ha.ckers.org/xss.js\"></SCRIPT>" String, + "<<SCRIPT>alert(\"XSS\");//<</SCRIPT>" String, + "<SCRIPT SRC=http://ha.ckers.org/xss.js?< B >" String, + "<SCRIPT SRC=//ha.ckers.org/.j>" String, + "<IMG SRC=\"javascript:alert('XSS')\"" String, + "<iframe src=http://ha.ckers.org/scriptlet.html <" String, + "\\\";alert('XSS');//" String, + "<u oncopy=alert()> Copy me</u>" String, + "<i onwheel=alert(1)> Scroll over me </i>" String, + "<plaintext>" String, + "http://a/%%30%30" String, + "</textarea><script>alert(123)</script>" String, + "1;DROP TABLE users" String, + "1'; DROP TABLE users-- 1" String, + "' OR 1=1 -- 1" String, + "' OR '1'='1" String, + "'; EXEC sp_MSForEachTable 'DROP TABLE ?'; --" String, + " " String, + "%" String, + "_" String, + "--" String, + "--version" String, + "--help" String, + "$USER" String, + "/dev/null; touch /tmp/blns.fail ; echo" String, + "`touch /tmp/blns.fail`" String, + "$(touch /tmp/blns.fail)" String, + "@{[system \"touch /tmp/blns.fail\"]}" String, + "eval(\"puts 'hello world'\")" String, + "System(\"ls -al /\")" String, + "`ls -al /`" String, + "Kernel.exec(\"ls -al /\")" String, + "Kernel.exit(1)" String, + "%x('ls -al /')" String, + "<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?><!DOCTYPE foo [ <!ELEMENT foo ANY ><!ENTITY xxe SYSTEM \"file:///etc/passwd\" >]><foo>&xxe;</foo>" String, + "$HOME" String, + "$ENV{'HOME'}" String, + "%d" String, + "%s%s%s%s%s" String, + "{0}" String, + "%*.*s" String, + "%@" String, + "%n" String, + "File:///" String, + "../../../../../../../../../../../etc/passwd%00" String, + "../../../../../../../../../../../etc/hosts" String, + "() { 0; }; touch /tmp/blns.shellshock1.fail;" String, + "() { _; } >_[$($())] { touch /tmp/blns.shellshock2.fail; }" String, + "<<< %s(un='%s') = %u" String, + "+++ATH0" String, + "CON" String, + "PRN" String, + "AUX" String, + "CLOCK$" String, + "NUL" String, + "A:" String, + "ZZ:" String, + "COM1" String, + "LPT1" String, + "LPT2" String, + "LPT3" String, + "COM2" String, + "COM3" String, + "COM4" String, + "DCC SEND STARTKEYLOGGER 0 0 0" String, + "Scunthorpe General Hospital" String, + "Penistone Community Church" String, + "Lightwater Country Park" String, + "Jimmy Clitheroe" String, + "Horniman Museum" String, + "shitake mushrooms" String, + "RomansInSussex.co.uk" String, + "http://www.cum.qc.ca/" String, + "Craig Cockburn, Software Specialist" String, + "Linda Callahan" String, + "Dr. Herman I. Libshitz" String, + "magna cum laude" String, + "Super Bowl XXX" String, + "medieval erection of parapets" String, + "evaluate" String, + "mocha" String, + "expression" String, + "Arsenal canal" String, + "classic" String, + "Tyson Gay" String, + "Dick Van Dyke" String, + "basement" String, + "If you're reading this, you've been in a coma for almost 20 years now. We're trying a new technique. We don't know where this message will end up in your dream, but we hope it works. Please wake up, we miss you." String, + "Roses are \u001b[0;31mred\u001b[0m, violets are \u001b[0;34mblue. Hope you enjoy terminal hue" String, + "But now...\u001b[20Cfor my greatest trick...\u001b[8m" String, + "The quic\b\b\b\b\b\bk brown fo\u0007\u0007\u0007\u0007\u0007\u0007\u0007\u0007\u0007\u0007\u0007x... [Beeeep]" String, + "Powerلُلُصّبُلُلصّبُررً ॣ ॣh ॣ ॣ冗" String, + "🏳0🌈️" String, + "జ్ఞ‌ా" String, + "گچپژ" String, + "{% print 'x' * 64 * 1024**3 %}" String, + "{{ \"\".__class__.__mro__[2].__subclasses__()[40](\"/etc/passwd\").read() }}" String +) ENGINE = MergeTree ORDER BY "{{ \"\".__class__.__mro__[2].__subclasses__()[40](\"/etc/passwd\").read() }}" SETTINGS min_bytes_for_wide_part = '100G', replace_long_file_name_to_hash = 1; + +INSERT INTO test ("0") SELECT 'Hello, world!'; +SELECT count() FROM test; + +DETACH TABLE test; +ATTACH TABLE test; + +SELECT count() FROM test; + +INSERT INTO test ("1") VALUES ('Hello, world!'); +SELECT count() FROM test; + +DROP TABLE IF EXISTS test_r1 SYNC; +DROP TABLE IF EXISTS test_r2 SYNC; + +CREATE TABLE test_r1 AS test ENGINE = ReplicatedMergeTree('/clickhouse/{database}/test_01666', 'r1') ORDER BY "\\" SETTINGS min_bytes_for_wide_part = '100G', replace_long_file_name_to_hash = 1; +INSERT INTO test_r1 SELECT * FROM test; +CREATE TABLE test_r2 AS test ENGINE = ReplicatedMergeTree('/clickhouse/{database}/test_01666', 'r2') ORDER BY "\\" SETTINGS min_bytes_for_wide_part = '100G', replace_long_file_name_to_hash = 1; + +SYSTEM SYNC REPLICA test_r2 STRICT; + +SELECT count() FROM test_r1; +SELECT count() FROM test_r2; + +DROP TABLE test_r1; +DROP TABLE test_r2; + +SELECT groupArray(name) FROM system.columns WHERE database = currentDatabase() AND table = 'test'; + +DROP TABLE test; + +SELECT 'undefined','undef','null','NULL','(null)','nil','NIL','true','false','True','False','TRUE','FALSE','None','hasOwnProperty','then','\\','\\\\','0','1','1.00','$1.00','1/2','1E2','1E02','1E+02','-1','-1.00','-$1.00','-1/2','-1E2','-1E02','-1E+02','1/0','0/0','-2147483648/-1','-9223372036854775808/-1','-0','-0.0','+0','+0.0','0.00','0..0','.','0.0.0','0,00','0,,0',',','0,0,0','0.0/0','1.0/0.0','0.0/0.0','1,0/0,0','0,0/0,0','--1','-','-.','-,','999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999','NaN','Infinity','-Infinity','INF','1#INF','-1#IND','1#QNAN','1#SNAN','1#IND','0x0','0xffffffff','0xffffffffffffffff','0xabad1dea','123456789012345678901234567890123456789','1,000.00','1 000.00','1\'000.00','1,000,000.00','1 000 000.00','1\'000\'000.00','1.000,00','1 000,00','1\'000,00','1.000.000,00','1 000 000,00','1\'000\'000,00','01000','08','09','2.2250738585072011e-308',',./;\'[]\\-=','<>?:"{}|_+','!@#$%^&*()`~','\\u0001\\u0002\\u0003\\u0004\\u0005\\u0006\\u0007\b\\u000e\\u000f\\u0010\\u0011\\u0012\\u0013\\u0014\\u0015\\u0016\\u0017\\u0018\\u0019\\u001a\\u001b\\u001c\\u001d\\u001e\\u001f','€‚ƒ„†‡ˆ‰Š‹ŒŽ‘’“”•–—˜™š›œžŸ','\t\\u000b\f …             ​

   ','­؀؁؂؃؄؅؜۝܏᠎​‌‍‎‏‪‫‬‭‮⁠⁡⁢⁣⁤⁦⁧⁨⁩𑂽𛲠𛲡𛲢𛲣𝅳𝅴𝅵𝅶𝅷𝅸𝅹𝅺󠀁󠀠󠀡󠀢󠀣󠀤󠀥󠀦󠀧󠀨󠀩󠀪󠀫󠀬󠀭󠀮󠀯󠀰󠀱󠀲󠀳󠀴󠀵󠀶󠀷󠀸󠀹󠀺󠀻󠀼󠀽󠀾󠀿󠁀󠁁󠁂󠁃󠁄󠁅󠁆󠁇󠁈󠁉󠁊󠁋󠁌󠁍󠁎󠁏󠁐󠁑󠁒󠁓󠁔󠁕󠁖󠁗󠁘󠁙󠁚󠁛󠁜󠁝󠁞󠁟󠁠󠁡󠁢󠁣󠁤󠁥󠁦󠁧󠁨󠁩󠁪󠁫󠁬󠁭󠁮󠁯󠁰󠁱󠁲󠁳󠁴󠁵󠁶󠁷󠁸󠁹󠁺󠁻󠁼󠁽󠁾󠁿','','￾','Ω≈ç√∫˜µ≤≥÷','åß∂ƒ©˙∆˚¬…æ','œ∑´®†¥¨ˆøπ“‘','¡™£¢∞§¶•ªº–≠','¸˛Ç◊ı˜Â¯˘¿','ÅÍÎÏ˝ÓÔÒÚÆ☃','Œ„´‰ˇÁ¨ˆØ∏”’','`⁄€‹›fifl‡°·‚—±','⅛⅜⅝⅞','ЁЂЃЄЅІЇЈЉЊЋЌЍЎЏАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюя','٠١٢٣٤٥٦٧٨٩','⁰⁴⁵','₀₁₂','⁰⁴⁵₀₁₂','ด้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็ ด้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็ ด้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็','\'','"','\'\'','""','\'"\'','"\'\'\'\'"\'"','"\'"\'"\'\'\'\'"','<foo val=“bar” />','<foo val=”bar“ />','<foo val=`bar\' />','田中さんにあげて下さい','パーティーへ行かないか','和製漢語','部落格','사회과학원 어학연구소','찦차를 타고 온 펲시맨과 쑛다리 똠방각하','社會科學院語學研究所','울란바토르','𠜎𠜱𠝹𠱓𠱸𠲖𠳏','𐐜 𐐔𐐇𐐝𐐀𐐡𐐇𐐓 𐐙𐐊𐐡𐐝𐐓/𐐝𐐇𐐗𐐊𐐤𐐔 𐐒𐐋𐐗 𐐒𐐌 𐐜 𐐡𐐀𐐖𐐇𐐤𐐓𐐝 𐐱𐑂 𐑄 𐐔𐐇𐐝𐐀𐐡𐐇𐐓 𐐏𐐆𐐅𐐤𐐆𐐚𐐊𐐡𐐝𐐆𐐓𐐆','表ポあA鷗ŒéB逍Üߪąñ丂㐀𠀀','Ⱥ','Ⱦ','ヽ༼ຈل͜ຈ༽ノ ヽ༼ຈل͜ຈ༽ノ','(。◕ ∀ ◕。)','`ィ(´∀`∩','__ロ(,_,*)','・( ̄∀ ̄)・:*:','゚・✿ヾ╲(。◕‿◕。)╱✿・゚',',。・:*:・゜’( ☻ ω ☻ )。・:*:・゜’','(╯°□°)╯︵ ┻━┻)','(ノಥ益ಥ)ノ ┻━┻','┬─┬ノ( º _ ºノ)','( ͡° ͜ʖ ͡°)','¯\\_(ツ)_/¯','😍','👩🏽','👨‍🦰 👨🏿‍🦰 👨‍🦱 👨🏿‍🦱 🦹🏿‍♂️','👾 🙇 💁 🙅 🙆 🙋 🙎 🙍','🐵 🙈 🙉 🙊','❤️ 💔 💌 💕 💞 💓 💗 💖 💘 💝 💟 💜 💛 💚 💙','✋🏿 💪🏿 👐🏿 🙌🏿 👏🏿 🙏🏿','👨‍👩‍👦 👨‍👩‍👧‍👦 👨‍👨‍👦 👩‍👩‍👧 👨‍👦 👨‍👧‍👦 👩‍👦 👩‍👧‍👦','🚾 🆒 🆓 🆕 🆖 🆗 🆙 🏧','0️⃣ 1️⃣ 2️⃣ 3️⃣ 4️⃣ 5️⃣ 6️⃣ 7️⃣ 8️⃣ 9️⃣ 🔟','🇺🇸🇷🇺🇸 🇦🇫🇦🇲🇸','🇺🇸🇷🇺🇸🇦🇫🇦🇲','🇺🇸🇷🇺🇸🇦','123','١٢٣','ثم نفس سقطت وبالتحديد،, جزيرتي باستخدام أن دنو. إذ هنا؟ الستار وتنصيب كان. أهّل ايطاليا، بريطانيا-فرنسا قد أخذ. سليمان، إتفاقية بين ما , يذكر الحدود أي بعد, معاملة بولندا، الإطلاق عل إيو.','בְּרֵאשִׁית, בָּרָא אֱלֹהִים, אֵת הַשָּׁמַיִם, וְאֵת הָאָרֶץ','הָיְתָהtestالصفحات التّحول','﷽','ﷺ','مُنَاقَشَةُ سُبُلِ اِسْتِخْدَامِ اللُّغَةِ فِي النُّظُمِ الْقَائِمَةِ وَفِيم يَخُصَّ التَّطْبِيقَاتُ الْحاسُوبِيَّةُ، ','᚛ᚄᚓᚐᚋᚒᚄ ᚑᚄᚂᚑᚏᚅ᚜‪‪‪','‪‪᚛                 ᚜‪','‪‪test‪','‫test‫','
test
','test⁠test‫','⁦test⁧','Ṱ̺̺̕o͞ ̷i̲̬͇̪͙n̝̗͕v̟̜̘̦͟o̶̙̰̠kè͚̮̺̪̹̱̤ ̖t̝͕̳̣̻̪͞h̼͓̲̦̳̘̲e͇̣̰̦̬͎ ̢̼̻̱̘h͚͎͙̜̣̲ͅi̦̲̣̰̤v̻͍e̺̭̳̪̰-m̢iͅn̖̺̞̲̯̰d̵̼̟͙̩̼̘̳ ̞̥̱̳̭r̛̗̘e͙p͠r̼̞̻̭̗e̺̠̣͟s̘͇̳͍̝͉e͉̥̯̞̲͚̬͜ǹ̬͎͎̟̖͇̤t͍̬̤͓̼̭͘ͅi̪̱n͠g̴͉ ͏͉ͅc̬̟h͡a̫̻̯͘o̫̟̖͍̙̝͉s̗̦̲.̨̹͈̣','̡͓̞ͅI̗̘̦͝n͇͇͙v̮̫ok̲̫̙͈i̖͙̭̹̠̞n̡̻̮̣̺g̲͈͙̭͙̬͎ ̰t͔̦h̞̲e̢̤ ͍̬̲͖f̴̘͕̣è͖ẹ̥̩l͖͔͚i͓͚̦͠n͖͍̗͓̳̮g͍ ̨o͚̪͡f̘̣̬ ̖̘͖̟͙̮c҉͔̫͖͓͇͖ͅh̵̤̣͚͔á̗̼͕ͅo̼̣̥s̱͈̺̖̦̻͢.̛̖̞̠̫̰','̗̺͖̹̯͓Ṯ̤͍̥͇͈h̲́e͏͓̼̗̙̼̣͔ ͇̜̱̠͓͍ͅN͕͠e̗̱z̘̝̜̺͙p̤̺̹͍̯͚e̠̻̠͜r̨̤͍̺̖͔̖̖d̠̟̭̬̝͟i̦͖̩͓͔̤a̠̗̬͉̙n͚͜ ̻̞̰͚ͅh̵͉i̳̞v̢͇ḙ͎͟-҉̭̩̼͔m̤̭̫i͕͇̝̦n̗͙ḍ̟ ̯̲͕͞ǫ̟̯̰̲͙̻̝f ̪̰̰̗̖̭̘͘c̦͍̲̞͍̩̙ḥ͚a̮͎̟̙͜ơ̩̹͎s̤.̝̝ ҉Z̡̖̜͖̰̣͉̜a͖̰͙̬͡l̲̫̳͍̩g̡̟̼̱͚̞̬ͅo̗͜.̟','̦H̬̤̗̤͝e͜ ̜̥̝̻͍̟́w̕h̖̯͓o̝͙̖͎̱̮ ҉̺̙̞̟͈W̷̼̭a̺̪͍į͈͕̭͙̯̜t̶̼̮s̘͙͖̕ ̠̫̠B̻͍͙͉̳ͅe̵h̵̬͇̫͙i̹͓̳̳̮͎̫̕n͟d̴̪̜̖ ̰͉̩͇͙̲͞ͅT͖̼͓̪͢h͏͓̮̻e̬̝̟ͅ ̤̹̝W͙̞̝͔͇͝ͅa͏͓͔̹̼̣l̴͔̰̤̟͔ḽ̫.͕','Z̮̞̠͙͔ͅḀ̗̞͈̻̗Ḷ͙͎̯̹̞͓G̻O̭̗̮','˙ɐnbᴉlɐ ɐuƃɐɯ ǝɹolop ʇǝ ǝɹoqɐl ʇn ʇunpᴉpᴉɔuᴉ ɹodɯǝʇ poɯsnᴉǝ op pǝs \'ʇᴉlǝ ƃuᴉɔsᴉdᴉpɐ ɹnʇǝʇɔǝsuoɔ \'ʇǝɯɐ ʇᴉs ɹolop ɯnsdᴉ ɯǝɹo˥','00˙Ɩ$-','The quick brown fox jumps over the lazy dog','𝐓𝐡𝐞 𝐪𝐮𝐢𝐜𝐤 𝐛𝐫𝐨𝐰𝐧 𝐟𝐨𝐱 𝐣𝐮𝐦𝐩𝐬 𝐨𝐯𝐞𝐫 𝐭𝐡𝐞 𝐥𝐚𝐳𝐲 𝐝𝐨𝐠','𝕿𝖍𝖊 𝖖𝖚𝖎𝖈𝖐 𝖇𝖗𝖔𝖜𝖓 𝖋𝖔𝖝 𝖏𝖚𝖒𝖕𝖘 𝖔𝖛𝖊𝖗 𝖙𝖍𝖊 𝖑𝖆𝖟𝖞 𝖉𝖔𝖌','𝑻𝒉𝒆 𝒒𝒖𝒊𝒄𝒌 𝒃𝒓𝒐𝒘𝒏 𝒇𝒐𝒙 𝒋𝒖𝒎𝒑𝒔 𝒐𝒗𝒆𝒓 𝒕𝒉𝒆 𝒍𝒂𝒛𝒚 𝒅𝒐𝒈','𝓣𝓱𝓮 𝓺𝓾𝓲𝓬𝓴 𝓫𝓻𝓸𝔀𝓷 𝓯𝓸𝔁 𝓳𝓾𝓶𝓹𝓼 𝓸𝓿𝓮𝓻 𝓽𝓱𝓮 𝓵𝓪𝔃𝔂 𝓭𝓸𝓰','𝕋𝕙𝕖 𝕢𝕦𝕚𝕔𝕜 𝕓𝕣𝕠𝕨𝕟 𝕗𝕠𝕩 𝕛𝕦𝕞𝕡𝕤 𝕠𝕧𝕖𝕣 𝕥𝕙𝕖 𝕝𝕒𝕫𝕪 𝕕𝕠𝕘','𝚃𝚑𝚎 𝚚𝚞𝚒𝚌𝚔 𝚋𝚛𝚘𝚠𝚗 𝚏𝚘𝚡 𝚓𝚞𝚖𝚙𝚜 𝚘𝚟𝚎𝚛 𝚝𝚑𝚎 𝚕𝚊𝚣𝚢 𝚍𝚘𝚐','⒯⒣⒠ ⒬⒰⒤⒞⒦ ⒝⒭⒪⒲⒩ ⒡⒪⒳ ⒥⒰⒨⒫⒮ ⒪⒱⒠⒭ ⒯⒣⒠ ⒧⒜⒵⒴ ⒟⒪⒢','<script>alert(123)</script>','<script>alert('123');</script>','<img src=x onerror=alert(123) />','<svg><script>123<1>alert(123)</script>','"><script>alert(123)</script>','\'><script>alert(123)</script>','><script>alert(123)</script>','</script><script>alert(123)</script>','< / script >< script >alert(123)< / script >',' onfocus=JaVaSCript:alert(123) autofocus','" onfocus=JaVaSCript:alert(123) autofocus','\' onfocus=JaVaSCript:alert(123) autofocus','<script>alert(123)</script>','<sc<script>ript>alert(123)</sc</script>ript>','--><script>alert(123)</script>','";alert(123);t="','\';alert(123);t=\'','JavaSCript:alert(123)',';alert(123);','src=JaVaSCript:prompt(132)','"><script>alert(123);</script x="','\'><script>alert(123);</script x=\'','><script>alert(123);</script x=','" autofocus onkeyup="javascript:alert(123)','\' autofocus onkeyup=\'javascript:alert(123)','<script\\x20type="text/javascript">javascript:alert(1);</script>','<script\\x3Etype="text/javascript">javascript:alert(1);</script>','<script\\x0Dtype="text/javascript">javascript:alert(1);</script>','<script\\x09type="text/javascript">javascript:alert(1);</script>','<script\\x0Ctype="text/javascript">javascript:alert(1);</script>','<script\\x2Ftype="text/javascript">javascript:alert(1);</script>','<script\\x0Atype="text/javascript">javascript:alert(1);</script>','\'`"><\\x3Cscript>javascript:alert(1)</script>','\'`"><\\x00script>javascript:alert(1)</script>','ABC<div style="x\\x3Aexpression(javascript:alert(1)">DEF','ABC<div style="x:expression\\x5C(javascript:alert(1)">DEF','ABC<div style="x:expression\\x00(javascript:alert(1)">DEF','ABC<div style="x:exp\\x00ression(javascript:alert(1)">DEF','ABC<div style="x:exp\\x5Cression(javascript:alert(1)">DEF','ABC<div style="x:\\x0Aexpression(javascript:alert(1)">DEF','ABC<div style="x:\\x09expression(javascript:alert(1)">DEF','ABC<div style="x:\\xE3\\x80\\x80expression(javascript:alert(1)">DEF','ABC<div style="x:\\xE2\\x80\\x84expression(javascript:alert(1)">DEF','ABC<div style="x:\\xC2\\xA0expression(javascript:alert(1)">DEF','ABC<div style="x:\\xE2\\x80\\x80expression(javascript:alert(1)">DEF','ABC<div style="x:\\xE2\\x80\\x8Aexpression(javascript:alert(1)">DEF','ABC<div style="x:\\x0Dexpression(javascript:alert(1)">DEF','ABC<div style="x:\\x0Cexpression(javascript:alert(1)">DEF','ABC<div style="x:\\xE2\\x80\\x87expression(javascript:alert(1)">DEF','ABC<div style="x:\\xEF\\xBB\\xBFexpression(javascript:alert(1)">DEF','ABC<div style="x:\\x20expression(javascript:alert(1)">DEF','ABC<div style="x:\\xE2\\x80\\x88expression(javascript:alert(1)">DEF','ABC<div style="x:\\x00expression(javascript:alert(1)">DEF','ABC<div style="x:\\xE2\\x80\\x8Bexpression(javascript:alert(1)">DEF','ABC<div style="x:\\xE2\\x80\\x86expression(javascript:alert(1)">DEF','ABC<div style="x:\\xE2\\x80\\x85expression(javascript:alert(1)">DEF','ABC<div style="x:\\xE2\\x80\\x82expression(javascript:alert(1)">DEF','ABC<div style="x:\\x0Bexpression(javascript:alert(1)">DEF','ABC<div style="x:\\xE2\\x80\\x81expression(javascript:alert(1)">DEF','ABC<div style="x:\\xE2\\x80\\x83expression(javascript:alert(1)">DEF','ABC<div style="x:\\xE2\\x80\\x89expression(javascript:alert(1)">DEF','<a href="\\x0Bjavascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x0Fjavascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\xC2\\xA0javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x05javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\xE1\\xA0\\x8Ejavascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x18javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x11javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\xE2\\x80\\x88javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\xE2\\x80\\x89javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\xE2\\x80\\x80javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x17javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x03javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x0Ejavascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x1Ajavascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x00javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x10javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\xE2\\x80\\x82javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x20javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x13javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x09javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\xE2\\x80\\x8Ajavascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x14javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x19javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\xE2\\x80\\xAFjavascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x1Fjavascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\xE2\\x80\\x81javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x1Djavascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\xE2\\x80\\x87javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x07javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\xE1\\x9A\\x80javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\xE2\\x80\\x83javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x04javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x01javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x08javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\xE2\\x80\\x84javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\xE2\\x80\\x86javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\xE3\\x80\\x80javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x12javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x0Djavascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x0Ajavascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x0Cjavascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x15javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\xE2\\x80\\xA8javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x16javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x02javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x1Bjavascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x06javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\xE2\\x80\\xA9javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\xE2\\x80\\x85javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x1Ejavascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\xE2\\x81\\x9Fjavascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x1Cjavascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="javascript\\x00:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="javascript\\x3A:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="javascript\\x09:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="javascript\\x0D:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="javascript\\x0A:javascript:alert(1)" id="fuzzelement1">test</a>','`"\'><img src=xxx:x \\x0Aonerror=javascript:alert(1)>','`"\'><img src=xxx:x \\x22onerror=javascript:alert(1)>','`"\'><img src=xxx:x \\x0Bonerror=javascript:alert(1)>','`"\'><img src=xxx:x \\x0Donerror=javascript:alert(1)>','`"\'><img src=xxx:x \\x2Fonerror=javascript:alert(1)>','`"\'><img src=xxx:x \\x09onerror=javascript:alert(1)>','`"\'><img src=xxx:x \\x0Conerror=javascript:alert(1)>','`"\'><img src=xxx:x \\x00onerror=javascript:alert(1)>','`"\'><img src=xxx:x \\x27onerror=javascript:alert(1)>','`"\'><img src=xxx:x \\x20onerror=javascript:alert(1)>','"`\'><script>\\x3Bjavascript:alert(1)</script>','"`\'><script>\\x0Djavascript:alert(1)</script>','"`\'><script>\\xEF\\xBB\\xBFjavascript:alert(1)</script>','"`\'><script>\\xE2\\x80\\x81javascript:alert(1)</script>','"`\'><script>\\xE2\\x80\\x84javascript:alert(1)</script>','"`\'><script>\\xE3\\x80\\x80javascript:alert(1)</script>','"`\'><script>\\x09javascript:alert(1)</script>','"`\'><script>\\xE2\\x80\\x89javascript:alert(1)</script>','"`\'><script>\\xE2\\x80\\x85javascript:alert(1)</script>','"`\'><script>\\xE2\\x80\\x88javascript:alert(1)</script>','"`\'><script>\\x00javascript:alert(1)</script>','"`\'><script>\\xE2\\x80\\xA8javascript:alert(1)</script>','"`\'><script>\\xE2\\x80\\x8Ajavascript:alert(1)</script>','"`\'><script>\\xE1\\x9A\\x80javascript:alert(1)</script>','"`\'><script>\\x0Cjavascript:alert(1)</script>','"`\'><script>\\x2Bjavascript:alert(1)</script>','"`\'><script>\\xF0\\x90\\x96\\x9Ajavascript:alert(1)</script>','"`\'><script>-javascript:alert(1)</script>','"`\'><script>\\x0Ajavascript:alert(1)</script>','"`\'><script>\\xE2\\x80\\xAFjavascript:alert(1)</script>','"`\'><script>\\x7Ejavascript:alert(1)</script>','"`\'><script>\\xE2\\x80\\x87javascript:alert(1)</script>','"`\'><script>\\xE2\\x81\\x9Fjavascript:alert(1)</script>','"`\'><script>\\xE2\\x80\\xA9javascript:alert(1)</script>','"`\'><script>\\xC2\\x85javascript:alert(1)</script>','"`\'><script>\\xEF\\xBF\\xAEjavascript:alert(1)</script>','"`\'><script>\\xE2\\x80\\x83javascript:alert(1)</script>','"`\'><script>\\xE2\\x80\\x8Bjavascript:alert(1)</script>','"`\'><script>\\xEF\\xBF\\xBEjavascript:alert(1)</script>','"`\'><script>\\xE2\\x80\\x80javascript:alert(1)</script>','"`\'><script>\\x21javascript:alert(1)</script>','"`\'><script>\\xE2\\x80\\x82javascript:alert(1)</script>','"`\'><script>\\xE2\\x80\\x86javascript:alert(1)</script>','"`\'><script>\\xE1\\xA0\\x8Ejavascript:alert(1)</script>','"`\'><script>\\x0Bjavascript:alert(1)</script>','"`\'><script>\\x20javascript:alert(1)</script>','"`\'><script>\\xC2\\xA0javascript:alert(1)</script>','<img \\x00src=x onerror="alert(1)">','<img \\x47src=x onerror="javascript:alert(1)">','<img \\x11src=x onerror="javascript:alert(1)">','<img \\x12src=x onerror="javascript:alert(1)">','<img\\x47src=x onerror="javascript:alert(1)">','<img\\x10src=x onerror="javascript:alert(1)">','<img\\x13src=x onerror="javascript:alert(1)">','<img\\x32src=x onerror="javascript:alert(1)">','<img\\x11src=x onerror="javascript:alert(1)">','<img \\x34src=x onerror="javascript:alert(1)">','<img \\x39src=x onerror="javascript:alert(1)">','<img \\x00src=x onerror="javascript:alert(1)">','<img src\\x09=x onerror="javascript:alert(1)">','<img src\\x10=x onerror="javascript:alert(1)">','<img src\\x13=x onerror="javascript:alert(1)">','<img src\\x32=x onerror="javascript:alert(1)">','<img src\\x12=x onerror="javascript:alert(1)">','<img src\\x11=x onerror="javascript:alert(1)">','<img src\\x00=x onerror="javascript:alert(1)">','<img src\\x47=x onerror="javascript:alert(1)">','<img src=x\\x09onerror="javascript:alert(1)">','<img src=x\\x10onerror="javascript:alert(1)">','<img src=x\\x11onerror="javascript:alert(1)">','<img src=x\\x12onerror="javascript:alert(1)">','<img src=x\\x13onerror="javascript:alert(1)">','<img[a][b][c]src[d]=x[e]onerror=[f]"alert(1)">','<img src=x onerror=\\x09"javascript:alert(1)">','<img src=x onerror=\\x10"javascript:alert(1)">','<img src=x onerror=\\x11"javascript:alert(1)">','<img src=x onerror=\\x12"javascript:alert(1)">','<img src=x onerror=\\x32"javascript:alert(1)">','<img src=x onerror=\\x00"javascript:alert(1)">','<a href=java script:javascript:alert(1)>XXX</a>','<img src="x` `<script>javascript:alert(1)</script>"` `>','<img src onerror /" \'"= alt=javascript:alert(1)//">','<title onpropertychange=javascript:alert(1)>','<a href=http://foo.bar/#x=`y></a><img alt="`><img src=x:x onerror=javascript:alert(1)></a>">','<!--[if]><script>javascript:alert(1)</script -->','<!--[if<img src=x onerror=javascript:alert(1)//]> -->','<script src="/\\%(jscript)s"></script>','<script src="\\\\%(jscript)s"></script>','<IMG """><SCRIPT>alert("XSS")</SCRIPT>">','<IMG SRC=javascript:alert(String.fromCharCode(88,83,83))>','<IMG SRC=# onmouseover="alert(\'xxs\')">','<IMG SRC= onmouseover="alert(\'xxs\')">','<IMG onmouseover="alert(\'xxs\')">','<IMG SRC=javascript:alert('XSS')>','<IMG SRC=javascript:alert('XSS')>','<IMG SRC=javascript:alert('XSS')>','<IMG SRC="jav ascript:alert(\'XSS\');">','<IMG SRC="jav ascript:alert(\'XSS\');">','<IMG SRC="jav ascript:alert(\'XSS\');">','<IMG SRC="jav ascript:alert(\'XSS\');">','perl -e \'print "<IMG SRC=java\\0script:alert(\\"XSS\\")>";\' > out','<IMG SRC="  javascript:alert(\'XSS\');">','<SCRIPT/XSS SRC="http://ha.ckers.org/xss.js"></SCRIPT>','<BODY onload!#$%&()*~+-_.,:;?@[/|\\]^`=alert("XSS")>','<SCRIPT/SRC="http://ha.ckers.org/xss.js"></SCRIPT>','<<SCRIPT>alert("XSS");//<</SCRIPT>','<SCRIPT SRC=http://ha.ckers.org/xss.js?< B >','<SCRIPT SRC=//ha.ckers.org/.j>','<IMG SRC="javascript:alert(\'XSS\')"','<iframe src=http://ha.ckers.org/scriptlet.html <','\\";alert(\'XSS\');//','<u oncopy=alert()> Copy me</u>','<i onwheel=alert(1)> Scroll over me </i>','<plaintext>','http://a/%%30%30','</textarea><script>alert(123)</script>','1;DROP TABLE users','1\'; DROP TABLE users-- 1','\' OR 1=1 -- 1','\' OR \'1\'=\'1','\'; EXEC sp_MSForEachTable \'DROP TABLE ?\'; --',' ','%','_','--','--version','--help','$USER','/dev/null; touch /tmp/blns.fail ; echo','`touch /tmp/blns.fail`','$(touch /tmp/blns.fail)','@{[system "touch /tmp/blns.fail"]}','eval("puts \'hello world\'")','System("ls -al /")','`ls -al /`','Kernel.exec("ls -al /")','Kernel.exit(1)','%x(\'ls -al /\')','<?xml version="1.0" encoding="ISO-8859-1"?><!DOCTYPE foo [ <!ELEMENT foo ANY ><!ENTITY xxe SYSTEM "file:///etc/passwd" >]><foo>&xxe;</foo>','$HOME','$ENV{\'HOME\'}','%d','%s%s%s%s%s','{0}','%*.*s','%@','%n','File:///','../../../../../../../../../../../etc/passwd%00','../../../../../../../../../../../etc/hosts','() { 0; }; touch /tmp/blns.shellshock1.fail;','() { _; } >_[$($())] { touch /tmp/blns.shellshock2.fail; }','<<< %s(un=\'%s\') = %u','+++ATH0','CON','PRN','AUX','CLOCK$','NUL','A:','ZZ:','COM1','LPT1','LPT2','LPT3','COM2','COM3','COM4','DCC SEND STARTKEYLOGGER 0 0 0','Scunthorpe General Hospital','Penistone Community Church','Lightwater Country Park','Jimmy Clitheroe','Horniman Museum','shitake mushrooms','RomansInSussex.co.uk','http://www.cum.qc.ca/','Craig Cockburn, Software Specialist','Linda Callahan','Dr. Herman I. Libshitz','magna cum laude','Super Bowl XXX','medieval erection of parapets','evaluate','mocha','expression','Arsenal canal','classic','Tyson Gay','Dick Van Dyke','basement','If you\'re reading this, you\'ve been in a coma for almost 20 years now. We\'re trying a new technique. We don\'t know where this message will end up in your dream, but we hope it works. Please wake up, we miss you.','Roses are \\u001b[0;31mred\\u001b[0m, violets are \\u001b[0;34mblue. Hope you enjoy terminal hue','But now...\\u001b[20Cfor my greatest trick...\\u001b[8m','The quic\b\b\b\b\b\bk brown fo\\u0007\\u0007\\u0007\\u0007\\u0007\\u0007\\u0007\\u0007\\u0007\\u0007\\u0007x... [Beeeep]','Powerلُلُصّبُلُلصّبُررً ॣ ॣh ॣ ॣ冗','🏳0🌈️','జ్ఞ‌ా','گچپژ','{% print \'x\' * 64 * 1024**3 %}','{{ "".__class__.__mro__[2].__subclasses__()[40]("/etc/passwd").read() }}' FORMAT PrettyCompactNoEscapes; + +SELECT JSONExtract( + '{"x": [\n "", \n "undefined", \n "undef", \n "null", \n "NULL", \n "(null)", \n "nil", \n "NIL", \n "true", \n "false", \n "True", \n "False", \n "TRUE", \n "FALSE", \n "None", \n "hasOwnProperty", \n "then", \n "\\\\", \n "\\\\\\\\", \n "0", \n "1", \n "1.00", \n "$1.00", \n "1/2", \n "1E2", \n "1E02", \n "1E+02", \n "-1", \n "-1.00", \n "-$1.00", \n "-1/2", \n "-1E2", \n "-1E02", \n "-1E+02", \n "1/0", \n "0/0", \n "-2147483648/-1", \n "-9223372036854775808/-1", \n "-0", \n "-0.0", \n "+0", \n "+0.0", \n "0.00", \n "0..0", \n ".", \n "0.0.0", \n "0,00", \n "0,,0", \n ",", \n "0,0,0", \n "0.0/0", \n "1.0/0.0", \n "0.0/0.0", \n "1,0/0,0", \n "0,0/0,0", \n "--1", \n "-", \n "-.", \n "-,", \n "999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999", \n "NaN", \n "Infinity", \n "-Infinity", \n "INF", \n "1#INF", \n "-1#IND", \n "1#QNAN", \n "1#SNAN", \n "1#IND", \n "0x0", \n "0xffffffff", \n "0xffffffffffffffff", \n "0xabad1dea", \n "123456789012345678901234567890123456789", \n "1,000.00", \n "1 000.00", \n "1\'000.00", \n "1,000,000.00", \n "1 000 000.00", \n "1\'000\'000.00", \n "1.000,00", \n "1 000,00", \n "1\'000,00", \n "1.000.000,00", \n "1 000 000,00", \n "1\'000\'000,00", \n "01000", \n "08", \n "09", \n "2.2250738585072011e-308", \n ",./;\'[]\\\\-=", \n "<>?:\\"{}|_+", \n "!@#$%^&*()`~", \n "\\u0001\\u0002\\u0003\\u0004\\u0005\\u0006\\u0007\\b\\u000e\\u000f\\u0010\\u0011\\u0012\\u0013\\u0014\\u0015\\u0016\\u0017\\u0018\\u0019\\u001a\\u001b\\u001c\\u001d\\u001e\\u001f", \n "€‚ƒ„†‡ˆ‰Š‹ŒŽ‘’“”•–—˜™š›œžŸ", \n "\\t\\u000b\\f …             ​

   ", \n "­؀؁؂؃؄؅؜۝܏᠎​‌‍‎‏‪‫‬‭‮⁠⁡⁢⁣⁤⁦⁧⁨⁩𑂽𛲠𛲡𛲢𛲣𝅳𝅴𝅵𝅶𝅷𝅸𝅹𝅺󠀁󠀠󠀡󠀢󠀣󠀤󠀥󠀦󠀧󠀨󠀩󠀪󠀫󠀬󠀭󠀮󠀯󠀰󠀱󠀲󠀳󠀴󠀵󠀶󠀷󠀸󠀹󠀺󠀻󠀼󠀽󠀾󠀿󠁀󠁁󠁂󠁃󠁄󠁅󠁆󠁇󠁈󠁉󠁊󠁋󠁌󠁍󠁎󠁏󠁐󠁑󠁒󠁓󠁔󠁕󠁖󠁗󠁘󠁙󠁚󠁛󠁜󠁝󠁞󠁟󠁠󠁡󠁢󠁣󠁤󠁥󠁦󠁧󠁨󠁩󠁪󠁫󠁬󠁭󠁮󠁯󠁰󠁱󠁲󠁳󠁴󠁵󠁶󠁷󠁸󠁹󠁺󠁻󠁼󠁽󠁾󠁿", \n "", \n "￾", \n "Ω≈ç√∫˜µ≤≥÷", \n "åß∂ƒ©˙∆˚¬…æ", \n "œ∑´®†¥¨ˆøπ“‘", \n "¡™£¢∞§¶•ªº–≠", \n "¸˛Ç◊ı˜Â¯˘¿", \n "ÅÍÎÏ˝ÓÔÒÚÆ☃", \n "Œ„´‰ˇÁ¨ˆØ∏”’", \n "`⁄€‹›fifl‡°·‚—±", \n "⅛⅜⅝⅞", \n "ЁЂЃЄЅІЇЈЉЊЋЌЍЎЏАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюя", \n "٠١٢٣٤٥٦٧٨٩", \n "⁰⁴⁵", \n "₀₁₂", \n "⁰⁴⁵₀₁₂", \n "ด้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็ ด้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็ ด้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็", \n "\'", \n "\\"", \n "\'\'", \n "\\"\\"", \n "\'\\"\'", \n "\\"\'\'\'\'\\"\'\\"", \n "\\"\'\\"\'\\"\'\'\'\'\\"", \n "<foo val=“bar” />", \n "<foo val=“bar” />", \n "<foo val=”bar“ />", \n "<foo val=`bar\' />", \n "田中さんにあげて下さい", \n "パーティーへ行かないか", \n "和製漢語", \n "部落格", \n "사회과학원 어학연구소", \n "찦차를 타고 온 펲시맨과 쑛다리 똠방각하", \n "社會科學院語學研究所", \n "울란바토르", \n "𠜎𠜱𠝹𠱓𠱸𠲖𠳏", \n "𐐜 𐐔𐐇𐐝𐐀𐐡𐐇𐐓 𐐙𐐊𐐡𐐝𐐓/𐐝𐐇𐐗𐐊𐐤𐐔 𐐒𐐋𐐗 𐐒𐐌 𐐜 𐐡𐐀𐐖𐐇𐐤𐐓𐐝 𐐱𐑂 𐑄 𐐔𐐇𐐝𐐀𐐡𐐇𐐓 𐐏𐐆𐐅𐐤𐐆𐐚𐐊𐐡𐐝𐐆𐐓𐐆",\n "表ポあA鷗ŒéB逍Üߪąñ丂㐀𠀀", \n "Ⱥ", \n "Ⱦ", \n "ヽ༼ຈل͜ຈ༽ノ ヽ༼ຈل͜ຈ༽ノ", \n "(。◕ ∀ ◕。)", \n "`ィ(´∀`∩", \n "__ロ(,_,*)", \n "・( ̄∀ ̄)・:*:", \n "゚・✿ヾ╲(。◕‿◕。)╱✿・゚", \n ",。・:*:・゜’( ☻ ω ☻ )。・:*:・゜’", \n "(╯°□°)╯︵ ┻━┻)", \n "(ノಥ益ಥ)ノ ┻━┻", \n "┬─┬ノ( º _ ºノ)", \n "( ͡° ͜ʖ ͡°)", \n "¯\\\\_(ツ)_/¯", \n "😍", \n "👩🏽", \n "👨‍🦰 👨🏿‍🦰 👨‍🦱 👨🏿‍🦱 🦹🏿‍♂️", \n "👾 🙇 💁 🙅 🙆 🙋 🙎 🙍", \n "🐵 🙈 🙉 🙊", \n "❤️ 💔 💌 💕 💞 💓 💗 💖 💘 💝 💟 💜 💛 💚 💙", \n "✋🏿 💪🏿 👐🏿 🙌🏿 👏🏿 🙏🏿", \n "👨‍👩‍👦 👨‍👩‍👧‍👦 👨‍👨‍👦 👩‍👩‍👧 👨‍👦 👨‍👧‍👦 👩‍👦 👩‍👧‍👦", \n "🚾 🆒 🆓 🆕 🆖 🆗 🆙 🏧", \n "0️⃣ 1️⃣ 2️⃣ 3️⃣ 4️⃣ 5️⃣ 6️⃣ 7️⃣ 8️⃣ 9️⃣ 🔟", \n "🇺🇸🇷🇺🇸 🇦🇫🇦🇲🇸", \n "🇺🇸🇷🇺🇸🇦🇫🇦🇲", \n "🇺🇸🇷🇺🇸🇦", \n "123", \n "١٢٣", \n "ثم نفس سقطت وبالتحديد،, جزيرتي باستخدام أن دنو. إذ هنا؟ الستار وتنصيب كان. أهّل ايطاليا، بريطانيا-فرنسا قد أخذ. سليمان، إتفاقية بين ما, يذكر الحدود أي بعد, معاملة بولندا، الإطلاق عل إيو.", \n "בְּרֵאשִׁית, בָּרָא אֱלֹהִים, אֵת הַשָּׁמַיִם, וְאֵת הָאָרֶץ", \n "הָיְתָהtestالصفحات التّحول", \n "﷽", \n "ﷺ",\n "مُنَاقَشَةُ سُبُلِ اِسْتِخْدَامِ اللُّغَةِ فِي النُّظُمِ الْقَائِمَةِ وَفِيم يَخُصَّ التَّطْبِيقَاتُ الْحاسُوبِيَّةُ، ", \n "᚛ᚄᚓᚐᚋᚒᚄ ᚑᚄᚂᚑᚏᚅ᚜‪‪‪", \n "‪‪᚛                 ᚜‪",\n "‪‪test‪", \n "‫test‫", \n "
test
", \n "test⁠test‫", \n "⁦test⁧", \n "Ṱ̺̺̕o͞ ̷i̲̬͇̪͙n̝̗͕v̟̜̘̦͟o̶̙̰̠kè͚̮̺̪̹̱̤ ̖t̝͕̳̣̻̪͞h̼͓̲̦̳̘̲e͇̣̰̦̬͎ ̢̼̻̱̘h͚͎͙̜̣̲ͅi̦̲̣̰̤v̻͍e̺̭̳̪̰-m̢iͅn̖̺̞̲̯̰d̵̼̟͙̩̼̘̳ ̞̥̱̳̭r̛̗̘e͙p͠r̼̞̻̭̗e̺̠̣͟s̘͇̳͍̝͉e͉̥̯̞̲͚̬͜ǹ̬͎͎̟̖͇̤t͍̬̤͓̼̭͘ͅi̪̱n͠g̴͉ ͏͉ͅc̬̟h͡a̫̻̯͘o̫̟̖͍̙̝͉s̗̦̲.̨̹͈̣", \n "̡͓̞ͅI̗̘̦͝n͇͇͙v̮̫ok̲̫̙͈i̖͙̭̹̠̞n̡̻̮̣̺g̲͈͙̭͙̬͎ ̰t͔̦h̞̲e̢̤ ͍̬̲͖f̴̘͕̣è͖ẹ̥̩l͖͔͚i͓͚̦͠n͖͍̗͓̳̮g͍ ̨o͚̪͡f̘̣̬ ̖̘͖̟͙̮c҉͔̫͖͓͇͖ͅh̵̤̣͚͔á̗̼͕ͅo̼̣̥s̱͈̺̖̦̻͢.̛̖̞̠̫̰", \n "̗̺͖̹̯͓Ṯ̤͍̥͇͈h̲́e͏͓̼̗̙̼̣͔ ͇̜̱̠͓͍ͅN͕͠e̗̱z̘̝̜̺͙p̤̺̹͍̯͚e̠̻̠͜r̨̤͍̺̖͔̖̖d̠̟̭̬̝͟i̦͖̩͓͔̤a̠̗̬͉̙n͚͜ ̻̞̰͚ͅh̵͉i̳̞v̢͇ḙ͎͟-҉̭̩̼͔m̤̭̫i͕͇̝̦n̗͙ḍ̟ ̯̲͕͞ǫ̟̯̰̲͙̻̝f ̪̰̰̗̖̭̘͘c̦͍̲̞͍̩̙ḥ͚a̮͎̟̙͜ơ̩̹͎s̤.̝̝ ҉Z̡̖̜͖̰̣͉̜a͖̰͙̬͡l̲̫̳͍̩g̡̟̼̱͚̞̬ͅo̗͜.̟", \n "̦H̬̤̗̤͝e͜ ̜̥̝̻͍̟́w̕h̖̯͓o̝͙̖͎̱̮ ҉̺̙̞̟͈W̷̼̭a̺̪͍į͈͕̭͙̯̜t̶̼̮s̘͙͖̕ ̠̫̠B̻͍͙͉̳ͅe̵h̵̬͇̫͙i̹͓̳̳̮͎̫̕n͟d̴̪̜̖ ̰͉̩͇͙̲͞ͅT͖̼͓̪͢h͏͓̮̻e̬̝̟ͅ ̤̹̝W͙̞̝͔͇͝ͅa͏͓͔̹̼̣l̴͔̰̤̟͔ḽ̫.͕", \n "Z̮̞̠͙͔ͅḀ̗̞͈̻̗Ḷ͙͎̯̹̞͓G̻O̭̗̮", \n "˙ɐnbᴉlɐ ɐuƃɐɯ ǝɹolop ʇǝ ǝɹoqɐl ʇn ʇunpᴉpᴉɔuᴉ ɹodɯǝʇ poɯsnᴉǝ op pǝs \'ʇᴉlǝ ƃuᴉɔsᴉdᴉpɐ ɹnʇǝʇɔǝsuoɔ \'ʇǝɯɐ ʇᴉs ɹolop ɯnsdᴉ ɯǝɹo˥", \n "00˙Ɩ$-", \n "The quick brown fox jumps over the lazy dog", \n "𝐓𝐡𝐞 𝐪𝐮𝐢𝐜𝐤 𝐛𝐫𝐨𝐰𝐧 𝐟𝐨𝐱 𝐣𝐮𝐦𝐩𝐬 𝐨𝐯𝐞𝐫 𝐭𝐡𝐞 𝐥𝐚𝐳𝐲 𝐝𝐨𝐠", \n "𝕿𝖍𝖊 𝖖𝖚𝖎𝖈𝖐 𝖇𝖗𝖔𝖜𝖓 𝖋𝖔𝖝 𝖏𝖚𝖒𝖕𝖘 𝖔𝖛𝖊𝖗 𝖙𝖍𝖊 𝖑𝖆𝖟𝖞 𝖉𝖔𝖌", \n "𝑻𝒉𝒆 𝒒𝒖𝒊𝒄𝒌 𝒃𝒓𝒐𝒘𝒏 𝒇𝒐𝒙 𝒋𝒖𝒎𝒑𝒔 𝒐𝒗𝒆𝒓 𝒕𝒉𝒆 𝒍𝒂𝒛𝒚 𝒅𝒐𝒈", \n "𝓣𝓱𝓮 𝓺𝓾𝓲𝓬𝓴 𝓫𝓻𝓸𝔀𝓷 𝓯𝓸𝔁 𝓳𝓾𝓶𝓹𝓼 𝓸𝓿𝓮𝓻 𝓽𝓱𝓮 𝓵𝓪𝔃𝔂 𝓭𝓸𝓰", \n "𝕋𝕙𝕖 𝕢𝕦𝕚𝕔𝕜 𝕓𝕣𝕠𝕨𝕟 𝕗𝕠𝕩 𝕛𝕦𝕞𝕡𝕤 𝕠𝕧𝕖𝕣 𝕥𝕙𝕖 𝕝𝕒𝕫𝕪 𝕕𝕠𝕘", \n "𝚃𝚑𝚎 𝚚𝚞𝚒𝚌𝚔 𝚋𝚛𝚘𝚠𝚗 𝚏𝚘𝚡 𝚓𝚞𝚖𝚙𝚜 𝚘𝚟𝚎𝚛 𝚝𝚑𝚎 𝚕𝚊𝚣𝚢 𝚍𝚘𝚐", \n "⒯⒣⒠ ⒬⒰⒤⒞⒦ ⒝⒭⒪⒲⒩ ⒡⒪⒳ ⒥⒰⒨⒫⒮ ⒪⒱⒠⒭ ⒯⒣⒠ ⒧⒜⒵⒴ ⒟⒪⒢", \n "<script>alert(123)</script>", \n "<script>alert('123');</script>", \n "<img src=x onerror=alert(123) />", \n "<svg><script>123<1>alert(123)</script>", \n "\\"><script>alert(123)</script>", \n "\'><script>alert(123)</script>", \n "><script>alert(123)</script>", \n "</script><script>alert(123)</script>", \n "< / script >< script >alert(123)< / script >", \n " onfocus=JaVaSCript:alert(123) autofocus", \n "\\" onfocus=JaVaSCript:alert(123) autofocus", \n "\' onfocus=JaVaSCript:alert(123) autofocus", \n "<script>alert(123)</script>", \n "<sc<script>ript>alert(123)</sc</script>ript>", \n "--><script>alert(123)</script>", \n "\\";alert(123);t=\\"", \n "\';alert(123);t=\'", \n "JavaSCript:alert(123)", \n ";alert(123);", \n "src=JaVaSCript:prompt(132)", \n "\\"><script>alert(123);</script x=\\"", \n "\'><script>alert(123);</script x=\'", \n "><script>alert(123);</script x=", \n "\\" autofocus onkeyup=\\"javascript:alert(123)", \n "\' autofocus onkeyup=\'javascript:alert(123)", \n "<script\\\\x20type=\\"text/javascript\\">javascript:alert(1);</script>", \n "<script\\\\x3Etype=\\"text/javascript\\">javascript:alert(1);</script>", \n "<script\\\\x0Dtype=\\"text/javascript\\">javascript:alert(1);</script>", \n "<script\\\\x09type=\\"text/javascript\\">javascript:alert(1);</script>", \n "<script\\\\x0Ctype=\\"text/javascript\\">javascript:alert(1);</script>", \n "<script\\\\x2Ftype=\\"text/javascript\\">javascript:alert(1);</script>", \n "<script\\\\x0Atype=\\"text/javascript\\">javascript:alert(1);</script>", \n "\'`\\"><\\\\x3Cscript>javascript:alert(1)</script>", \n "\'`\\"><\\\\x00script>javascript:alert(1)</script>", \n "ABC<div style=\\"x\\\\x3Aexpression(javascript:alert(1)\\">DEF", \n "ABC<div style=\\"x:expression\\\\x5C(javascript:alert(1)\\">DEF", \n "ABC<div style=\\"x:expression\\\\x00(javascript:alert(1)\\">DEF", \n "ABC<div style=\\"x:exp\\\\x00ression(javascript:alert(1)\\">DEF", \n "ABC<div style=\\"x:exp\\\\x5Cression(javascript:alert(1)\\">DEF", \n "ABC<div style=\\"x:\\\\x0Aexpression(javascript:alert(1)\\">DEF", \n "ABC<div style=\\"x:\\\\x09expression(javascript:alert(1)\\">DEF", \n "ABC<div style=\\"x:\\\\xE3\\\\x80\\\\x80expression(javascript:alert(1)\\">DEF", \n "ABC<div style=\\"x:\\\\xE2\\\\x80\\\\x84expression(javascript:alert(1)\\">DEF", \n "ABC<div style=\\"x:\\\\xC2\\\\xA0expression(javascript:alert(1)\\">DEF", \n "ABC<div style=\\"x:\\\\xE2\\\\x80\\\\x80expression(javascript:alert(1)\\">DEF", \n "ABC<div style=\\"x:\\\\xE2\\\\x80\\\\x8Aexpression(javascript:alert(1)\\">DEF", \n "ABC<div style=\\"x:\\\\x0Dexpression(javascript:alert(1)\\">DEF", \n "ABC<div style=\\"x:\\\\x0Cexpression(javascript:alert(1)\\">DEF", \n "ABC<div style=\\"x:\\\\xE2\\\\x80\\\\x87expression(javascript:alert(1)\\">DEF", \n "ABC<div style=\\"x:\\\\xEF\\\\xBB\\\\xBFexpression(javascript:alert(1)\\">DEF", \n "ABC<div style=\\"x:\\\\x20expression(javascript:alert(1)\\">DEF", \n "ABC<div style=\\"x:\\\\xE2\\\\x80\\\\x88expression(javascript:alert(1)\\">DEF", \n "ABC<div style=\\"x:\\\\x00expression(javascript:alert(1)\\">DEF", \n "ABC<div style=\\"x:\\\\xE2\\\\x80\\\\x8Bexpression(javascript:alert(1)\\">DEF", \n "ABC<div style=\\"x:\\\\xE2\\\\x80\\\\x86expression(javascript:alert(1)\\">DEF", \n "ABC<div style=\\"x:\\\\xE2\\\\x80\\\\x85expression(javascript:alert(1)\\">DEF", \n "ABC<div style=\\"x:\\\\xE2\\\\x80\\\\x82expression(javascript:alert(1)\\">DEF", \n "ABC<div style=\\"x:\\\\x0Bexpression(javascript:alert(1)\\">DEF", \n "ABC<div style=\\"x:\\\\xE2\\\\x80\\\\x81expression(javascript:alert(1)\\">DEF", \n "ABC<div style=\\"x:\\\\xE2\\\\x80\\\\x83expression(javascript:alert(1)\\">DEF", \n "ABC<div style=\\"x:\\\\xE2\\\\x80\\\\x89expression(javascript:alert(1)\\">DEF", \n "<a href=\\"\\\\x0Bjavascript:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"\\\\x0Fjavascript:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"\\\\xC2\\\\xA0javascript:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"\\\\x05javascript:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"\\\\xE1\\\\xA0\\\\x8Ejavascript:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"\\\\x18javascript:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"\\\\x11javascript:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"\\\\xE2\\\\x80\\\\x88javascript:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"\\\\xE2\\\\x80\\\\x89javascript:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"\\\\xE2\\\\x80\\\\x80javascript:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"\\\\x17javascript:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"\\\\x03javascript:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"\\\\x0Ejavascript:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"\\\\x1Ajavascript:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"\\\\x00javascript:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"\\\\x10javascript:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"\\\\xE2\\\\x80\\\\x82javascript:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"\\\\x20javascript:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"\\\\x13javascript:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"\\\\x09javascript:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"\\\\xE2\\\\x80\\\\x8Ajavascript:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"\\\\x14javascript:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"\\\\x19javascript:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"\\\\xE2\\\\x80\\\\xAFjavascript:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"\\\\x1Fjavascript:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"\\\\xE2\\\\x80\\\\x81javascript:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"\\\\x1Djavascript:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"\\\\xE2\\\\x80\\\\x87javascript:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"\\\\x07javascript:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"\\\\xE1\\\\x9A\\\\x80javascript:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"\\\\xE2\\\\x80\\\\x83javascript:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"\\\\x04javascript:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"\\\\x01javascript:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"\\\\x08javascript:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"\\\\xE2\\\\x80\\\\x84javascript:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"\\\\xE2\\\\x80\\\\x86javascript:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"\\\\xE3\\\\x80\\\\x80javascript:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"\\\\x12javascript:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"\\\\x0Djavascript:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"\\\\x0Ajavascript:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"\\\\x0Cjavascript:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"\\\\x15javascript:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"\\\\xE2\\\\x80\\\\xA8javascript:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"\\\\x16javascript:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"\\\\x02javascript:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"\\\\x1Bjavascript:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"\\\\x06javascript:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"\\\\xE2\\\\x80\\\\xA9javascript:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"\\\\xE2\\\\x80\\\\x85javascript:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"\\\\x1Ejavascript:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"\\\\xE2\\\\x81\\\\x9Fjavascript:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"\\\\x1Cjavascript:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"javascript\\\\x00:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"javascript\\\\x3A:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"javascript\\\\x09:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"javascript\\\\x0D:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "<a href=\\"javascript\\\\x0A:javascript:alert(1)\\" id=\\"fuzzelement1\\">test</a>", \n "`\\"\'><img src=xxx:x \\\\x0Aonerror=javascript:alert(1)>", \n "`\\"\'><img src=xxx:x \\\\x22onerror=javascript:alert(1)>", \n "`\\"\'><img src=xxx:x \\\\x0Bonerror=javascript:alert(1)>", \n "`\\"\'><img src=xxx:x \\\\x0Donerror=javascript:alert(1)>", \n "`\\"\'><img src=xxx:x \\\\x2Fonerror=javascript:alert(1)>", \n "`\\"\'><img src=xxx:x \\\\x09onerror=javascript:alert(1)>", \n "`\\"\'><img src=xxx:x \\\\x0Conerror=javascript:alert(1)>", \n "`\\"\'><img src=xxx:x \\\\x00onerror=javascript:alert(1)>", \n "`\\"\'><img src=xxx:x \\\\x27onerror=javascript:alert(1)>", \n "`\\"\'><img src=xxx:x \\\\x20onerror=javascript:alert(1)>", \n "\\"`\'><script>\\\\x3Bjavascript:alert(1)</script>", \n "\\"`\'><script>\\\\x0Djavascript:alert(1)</script>", \n "\\"`\'><script>\\\\xEF\\\\xBB\\\\xBFjavascript:alert(1)</script>", \n "\\"`\'><script>\\\\xE2\\\\x80\\\\x81javascript:alert(1)</script>", \n "\\"`\'><script>\\\\xE2\\\\x80\\\\x84javascript:alert(1)</script>", \n "\\"`\'><script>\\\\xE3\\\\x80\\\\x80javascript:alert(1)</script>", \n "\\"`\'><script>\\\\x09javascript:alert(1)</script>", \n "\\"`\'><script>\\\\xE2\\\\x80\\\\x89javascript:alert(1)</script>", \n "\\"`\'><script>\\\\xE2\\\\x80\\\\x85javascript:alert(1)</script>", \n "\\"`\'><script>\\\\xE2\\\\x80\\\\x88javascript:alert(1)</script>", \n "\\"`\'><script>\\\\x00javascript:alert(1)</script>", \n "\\"`\'><script>\\\\xE2\\\\x80\\\\xA8javascript:alert(1)</script>", \n "\\"`\'><script>\\\\xE2\\\\x80\\\\x8Ajavascript:alert(1)</script>", \n "\\"`\'><script>\\\\xE1\\\\x9A\\\\x80javascript:alert(1)</script>", \n "\\"`\'><script>\\\\x0Cjavascript:alert(1)</script>", \n "\\"`\'><script>\\\\x2Bjavascript:alert(1)</script>", \n "\\"`\'><script>\\\\xF0\\\\x90\\\\x96\\\\x9Ajavascript:alert(1)</script>", \n "\\"`\'><script>-javascript:alert(1)</script>", \n "\\"`\'><script>\\\\x0Ajavascript:alert(1)</script>", \n "\\"`\'><script>\\\\xE2\\\\x80\\\\xAFjavascript:alert(1)</script>", \n "\\"`\'><script>\\\\x7Ejavascript:alert(1)</script>", \n "\\"`\'><script>\\\\xE2\\\\x80\\\\x87javascript:alert(1)</script>", \n "\\"`\'><script>\\\\xE2\\\\x81\\\\x9Fjavascript:alert(1)</script>", \n "\\"`\'><script>\\\\xE2\\\\x80\\\\xA9javascript:alert(1)</script>", \n "\\"`\'><script>\\\\xC2\\\\x85javascript:alert(1)</script>", \n "\\"`\'><script>\\\\xEF\\\\xBF\\\\xAEjavascript:alert(1)</script>", \n "\\"`\'><script>\\\\xE2\\\\x80\\\\x83javascript:alert(1)</script>", \n "\\"`\'><script>\\\\xE2\\\\x80\\\\x8Bjavascript:alert(1)</script>", \n "\\"`\'><script>\\\\xEF\\\\xBF\\\\xBEjavascript:alert(1)</script>", \n "\\"`\'><script>\\\\xE2\\\\x80\\\\x80javascript:alert(1)</script>", \n "\\"`\'><script>\\\\x21javascript:alert(1)</script>", \n "\\"`\'><script>\\\\xE2\\\\x80\\\\x82javascript:alert(1)</script>", \n "\\"`\'><script>\\\\xE2\\\\x80\\\\x86javascript:alert(1)</script>", \n "\\"`\'><script>\\\\xE1\\\\xA0\\\\x8Ejavascript:alert(1)</script>", \n "\\"`\'><script>\\\\x0Bjavascript:alert(1)</script>", \n "\\"`\'><script>\\\\x20javascript:alert(1)</script>", \n "\\"`\'><script>\\\\xC2\\\\xA0javascript:alert(1)</script>", \n "<img \\\\x00src=x onerror=\\"alert(1)\\">", \n "<img \\\\x47src=x onerror=\\"javascript:alert(1)\\">", \n "<img \\\\x11src=x onerror=\\"javascript:alert(1)\\">", \n "<img \\\\x12src=x onerror=\\"javascript:alert(1)\\">", \n "<img\\\\x47src=x onerror=\\"javascript:alert(1)\\">", \n "<img\\\\x10src=x onerror=\\"javascript:alert(1)\\">", \n "<img\\\\x13src=x onerror=\\"javascript:alert(1)\\">", \n "<img\\\\x32src=x onerror=\\"javascript:alert(1)\\">", \n "<img\\\\x47src=x onerror=\\"javascript:alert(1)\\">", \n "<img\\\\x11src=x onerror=\\"javascript:alert(1)\\">", \n "<img \\\\x47src=x onerror=\\"javascript:alert(1)\\">", \n "<img \\\\x34src=x onerror=\\"javascript:alert(1)\\">", \n "<img \\\\x39src=x onerror=\\"javascript:alert(1)\\">", \n "<img \\\\x00src=x onerror=\\"javascript:alert(1)\\">", \n "<img src\\\\x09=x onerror=\\"javascript:alert(1)\\">", \n "<img src\\\\x10=x onerror=\\"javascript:alert(1)\\">", \n "<img src\\\\x13=x onerror=\\"javascript:alert(1)\\">", \n "<img src\\\\x32=x onerror=\\"javascript:alert(1)\\">", \n "<img src\\\\x12=x onerror=\\"javascript:alert(1)\\">", \n "<img src\\\\x11=x onerror=\\"javascript:alert(1)\\">", \n "<img src\\\\x00=x onerror=\\"javascript:alert(1)\\">", \n "<img src\\\\x47=x onerror=\\"javascript:alert(1)\\">", \n "<img src=x\\\\x09onerror=\\"javascript:alert(1)\\">", \n "<img src=x\\\\x10onerror=\\"javascript:alert(1)\\">", \n "<img src=x\\\\x11onerror=\\"javascript:alert(1)\\">", \n "<img src=x\\\\x12onerror=\\"javascript:alert(1)\\">", \n "<img src=x\\\\x13onerror=\\"javascript:alert(1)\\">", \n "<img[a][b][c]src[d]=x[e]onerror=[f]\\"alert(1)\\">", \n "<img src=x onerror=\\\\x09\\"javascript:alert(1)\\">", \n "<img src=x onerror=\\\\x10\\"javascript:alert(1)\\">", \n "<img src=x onerror=\\\\x11\\"javascript:alert(1)\\">", \n "<img src=x onerror=\\\\x12\\"javascript:alert(1)\\">", \n "<img src=x onerror=\\\\x32\\"javascript:alert(1)\\">", \n "<img src=x onerror=\\\\x00\\"javascript:alert(1)\\">", \n "<a href=java script:javascript:alert(1)>XXX</a>", \n "<img src=\\"x` `<script>javascript:alert(1)</script>\\"` `>", \n "<img src onerror /\\" \'\\"= alt=javascript:alert(1)//\\">", \n "<title onpropertychange=javascript:alert(1)>", \n "<a href=http://foo.bar/#x=`y></a><img alt=\\"`><img src=x:x onerror=javascript:alert(1)></a>\\">", \n "<!--[if]><script>javascript:alert(1)</script -->", \n "<!--[if<img src=x onerror=javascript:alert(1)//]> -->", \n "<script src=\\"/\\\\%(jscript)s\\"></script>", \n "<script src=\\"\\\\\\\\%(jscript)s\\"></script>", \n "<IMG \\"\\"\\"><SCRIPT>alert(\\"XSS\\")</SCRIPT>\\">", \n "<IMG SRC=javascript:alert(String.fromCharCode(88,83,83))>", \n "<IMG SRC=# onmouseover=\\"alert(\'xxs\')\\">", \n "<IMG SRC= onmouseover=\\"alert(\'xxs\')\\">", \n "<IMG onmouseover=\\"alert(\'xxs\')\\">", \n "<IMG SRC=javascript:alert('XSS')>", \n "<IMG SRC=javascript:alert('XSS')>", \n "<IMG SRC=javascript:alert('XSS')>", \n "<IMG SRC=\\"jav ascript:alert(\'XSS\');\\">", \n "<IMG SRC=\\"jav ascript:alert(\'XSS\');\\">", \n "<IMG SRC=\\"jav ascript:alert(\'XSS\');\\">", \n "<IMG SRC=\\"jav ascript:alert(\'XSS\');\\">", \n "perl -e \'print \\"<IMG SRC=java\\\\0script:alert(\\\\\\"XSS\\\\\\")>\\";\' > out", \n "<IMG SRC=\\"  javascript:alert(\'XSS\');\\">", \n "<SCRIPT/XSS SRC=\\"http://ha.ckers.org/xss.js\\"></SCRIPT>", \n "<BODY onload!#$%&()*~+-_.,:;?@[/|\\\\]^`=alert(\\"XSS\\")>", \n "<SCRIPT/SRC=\\"http://ha.ckers.org/xss.js\\"></SCRIPT>", \n "<<SCRIPT>alert(\\"XSS\\");//<</SCRIPT>", \n "<SCRIPT SRC=http://ha.ckers.org/xss.js?< B >", \n "<SCRIPT SRC=//ha.ckers.org/.j>", \n "<IMG SRC=\\"javascript:alert(\'XSS\')\\"", \n "<iframe src=http://ha.ckers.org/scriptlet.html <", \n "\\\\\\";alert(\'XSS\');//", \n "<u oncopy=alert()> Copy me</u>", \n "<i onwheel=alert(1)> Scroll over me </i>", \n "<plaintext>", \n "http://a/%%30%30", \n "</textarea><script>alert(123)</script>", \n "1;DROP TABLE users", \n "1\'; DROP TABLE users-- 1", \n "\' OR 1=1 -- 1", \n "\' OR \'1\'=\'1", \n "\'; EXEC sp_MSForEachTable \'DROP TABLE ?\'; --",\n " ", \n "%", \n "_", \n "-", \n "--", \n "--version", \n "--help", \n "$USER", \n "/dev/null; touch /tmp/blns.fail ; echo", \n "`touch /tmp/blns.fail`", \n "$(touch /tmp/blns.fail)", \n "@{[system \\"touch /tmp/blns.fail\\"]}", \n "eval(\\"puts \'hello world\'\\")", \n "System(\\"ls -al /\\")", \n "`ls -al /`", \n "Kernel.exec(\\"ls -al /\\")", \n "Kernel.exit(1)", \n "%x(\'ls -al /\')", \n "<?xml version=\\"1.0\\" encoding=\\"ISO-8859-1\\"?><!DOCTYPE foo [ <!ELEMENT foo ANY ><!ENTITY xxe SYSTEM \\"file:///etc/passwd\\" >]><foo>&xxe;</foo>", \n "$HOME", \n "$ENV{\'HOME\'}", \n "%d", \n "%s%s%s%s%s", \n "{0}", \n "%*.*s", \n "%@", \n "%n", \n "File:///", \n "../../../../../../../../../../../etc/passwd%00", \n "../../../../../../../../../../../etc/hosts", \n "() { 0; }; touch /tmp/blns.shellshock1.fail;", \n "() { _; } >_[$($())] { touch /tmp/blns.shellshock2.fail; }", \n "<<< %s(un=\'%s\') = %u", \n "+++ATH0", \n "CON", \n "PRN", \n "AUX", \n "CLOCK$", \n "NUL", \n "A:", \n "ZZ:", \n "COM1", \n "LPT1", \n "LPT2", \n "LPT3", \n "COM2", \n "COM3", \n "COM4", \n "DCC SEND STARTKEYLOGGER 0 0 0", \n "Scunthorpe General Hospital", \n "Penistone Community Church", \n "Lightwater Country Park", \n "Jimmy Clitheroe", \n "Horniman Museum", \n "shitake mushrooms", \n "RomansInSussex.co.uk", \n "http://www.cum.qc.ca/", \n "Craig Cockburn, Software Specialist", \n "Linda Callahan", \n "Dr. Herman I. Libshitz", \n "magna cum laude", \n "Super Bowl XXX", \n "medieval erection of parapets", \n "evaluate", \n "mocha", \n "expression", \n "Arsenal canal", \n "classic", \n "Tyson Gay", \n "Dick Van Dyke", \n "basement", \n "If you\'re reading this, you\'ve been in a coma for almost 20 years now. We\'re trying a new technique. We don\'t know where this message will end up in your dream, but we hope it works. Please wake up, we miss you.", \n "Roses are \\u001b[0;31mred\\u001b[0m, violets are \\u001b[0;34mblue. Hope you enjoy terminal hue", \n "But now...\\u001b[20Cfor my greatest trick...\\u001b[8m", \n "The quic\\b\\b\\b\\b\\b\\bk brown fo\\u0007\\u0007\\u0007\\u0007\\u0007\\u0007\\u0007\\u0007\\u0007\\u0007\\u0007x... [Beeeep]", \n "Powerلُلُصّبُلُلصّبُررً ॣ ॣh ॣ ॣ冗", \n "🏳0🌈️",\n "జ్ఞ‌ా",\n "گچپژ",\n "{% print \'x\' * 64 * 1024**3 %}",\n "{{ \\"\\".__class__.__mro__[2].__subclasses__()[40](\\"/etc/passwd\\").read() }}"\n]\n}', + 'x', + 'Array(String)') FORMAT Values; + +SELECT '','undefined','undef','null','NULL','(null)','nil','NIL','true','false','True','False','TRUE','FALSE','None','hasOwnProperty','then','\\','\\\\','0','1','1.00','$1.00','1/2','1E2','1E02','1E+02','-1','-1.00','-$1.00','-1/2','-1E2','-1E02','-1E+02','1/0','0/0','-2147483648/-1','-9223372036854775808/-1','-0','-0.0','+0','+0.0','0.00','0..0','.','0.0.0','0,00','0,,0',',','0,0,0','0.0/0','1.0/0.0','0.0/0.0','1,0/0,0','0,0/0,0','--1','-','-.','-,','999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999','NaN','Infinity','-Infinity','INF','1#INF','-1#IND','1#QNAN','1#SNAN','1#IND','0x0','0xffffffff','0xffffffffffffffff','0xabad1dea','123456789012345678901234567890123456789','1,000.00','1 000.00','1\'000.00','1,000,000.00','1 000 000.00','1\'000\'000.00','1.000,00','1 000,00','1\'000,00','1.000.000,00','1 000 000,00','1\'000\'000,00','01000','08','09','2.2250738585072011e-308',',./;\'[]\\-=','<>?:"{}|_+','!@#$%^&*()`~','\b','€‚ƒ„†‡ˆ‰Š‹ŒŽ‘’“”•–—˜™š›œžŸ','\t \f …             ​

   ','­؀؁؂؃؄؅؜۝܏᠎​‌‍‎‏‪‫‬‭‮⁠⁡⁢⁣⁤⁦⁧⁨⁩𑂽𛲠𛲡𛲢𛲣𝅳𝅴𝅵𝅶𝅷𝅸𝅹𝅺󠀁󠀠󠀡󠀢󠀣󠀤󠀥󠀦󠀧󠀨󠀩󠀪󠀫󠀬󠀭󠀮󠀯󠀰󠀱󠀲󠀳󠀴󠀵󠀶󠀷󠀸󠀹󠀺󠀻󠀼󠀽󠀾󠀿󠁀󠁁󠁂󠁃󠁄󠁅󠁆󠁇󠁈󠁉󠁊󠁋󠁌󠁍󠁎󠁏󠁐󠁑󠁒󠁓󠁔󠁕󠁖󠁗󠁘󠁙󠁚󠁛󠁜󠁝󠁞󠁟󠁠󠁡󠁢󠁣󠁤󠁥󠁦󠁧󠁨󠁩󠁪󠁫󠁬󠁭󠁮󠁯󠁰󠁱󠁲󠁳󠁴󠁵󠁶󠁷󠁸󠁹󠁺󠁻󠁼󠁽󠁾󠁿','','￾','Ω≈ç√∫˜µ≤≥÷','åß∂ƒ©˙∆˚¬…æ','œ∑´®†¥¨ˆøπ“‘','¡™£¢∞§¶•ªº–≠','¸˛Ç◊ı˜Â¯˘¿','ÅÍÎÏ˝ÓÔÒÚÆ☃','Œ„´‰ˇÁ¨ˆØ∏”’','`⁄€‹›fifl‡°·‚—±','⅛⅜⅝⅞','ЁЂЃЄЅІЇЈЉЊЋЌЍЎЏАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюя','٠١٢٣٤٥٦٧٨٩','⁰⁴⁵','₀₁₂','⁰⁴⁵₀₁₂','ด้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็ ด้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็ ด้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็็้้้้้้้้็็็็็้้้้้็็็็','\'','"','\'\'','""','\'"\'','"\'\'\'\'"\'"','"\'"\'"\'\'\'\'"','<foo val=“bar” />','<foo val=“bar” />','<foo val=”bar“ />','<foo val=`bar\' />','田中さんにあげて下さい','パーティーへ行かないか','和製漢語','部落格','사회과학원 어학연구소','찦차를 타고 온 펲시맨과 쑛다리 똠방각하','社會科學院語學研究所','울란바토르','𠜎𠜱𠝹𠱓𠱸𠲖𠳏','𐐜 𐐔𐐇𐐝𐐀𐐡𐐇𐐓 𐐙𐐊𐐡𐐝𐐓/𐐝𐐇𐐗𐐊𐐤𐐔 𐐒𐐋𐐗 𐐒𐐌 𐐜 𐐡𐐀𐐖𐐇𐐤𐐓𐐝 𐐱𐑂 𐑄 𐐔𐐇𐐝𐐀𐐡𐐇𐐓 𐐏𐐆𐐅𐐤𐐆𐐚𐐊𐐡𐐝𐐆𐐓𐐆','表ポあA鷗ŒéB逍Üߪąñ丂㐀𠀀','Ⱥ','Ⱦ','ヽ༼ຈل͜ຈ༽ノ ヽ༼ຈل͜ຈ༽ノ','(。◕ ∀ ◕。)','`ィ(´∀`∩','__ロ(,_,*)','・( ̄∀ ̄)・:*:','゚・✿ヾ╲(。◕‿◕。)╱✿・゚',',。・:*:・゜’( ☻ ω ☻ )。・:*:・゜’','(╯°□°)╯︵ ┻━┻)','(ノಥ益ಥ)ノ ┻━┻','┬─┬ノ( º _ ºノ)','( ͡° ͜ʖ ͡°)','¯\\_(ツ)_/¯','😍','👩🏽','👨‍🦰 👨🏿‍🦰 👨‍🦱 👨🏿‍🦱 🦹🏿‍♂️','👾 🙇 💁 🙅 🙆 🙋 🙎 🙍','🐵 🙈 🙉 🙊','❤️ 💔 💌 💕 💞 💓 💗 💖 💘 💝 💟 💜 💛 💚 💙','✋🏿 💪🏿 👐🏿 🙌🏿 👏🏿 🙏🏿','👨‍👩‍👦 👨‍👩‍👧‍👦 👨‍👨‍👦 👩‍👩‍👧 👨‍👦 👨‍👧‍👦 👩‍👦 👩‍👧‍👦','🚾 🆒 🆓 🆕 🆖 🆗 🆙 🏧','0️⃣ 1️⃣ 2️⃣ 3️⃣ 4️⃣ 5️⃣ 6️⃣ 7️⃣ 8️⃣ 9️⃣ 🔟','🇺🇸🇷🇺🇸 🇦🇫🇦🇲🇸','🇺🇸🇷🇺🇸🇦🇫🇦🇲','🇺🇸🇷🇺🇸🇦','123','١٢٣','ثم نفس سقطت وبالتحديد،, جزيرتي باستخدام أن دنو. إذ هنا؟ الستار وتنصيب كان. أهّل ايطاليا، بريطانيا-فرنسا قد أخذ. سليمان، إتفاقية بين ما, يذكر الحدود أي بعد, معاملة بولندا، الإطلاق عل إيو.','בְּרֵאשִׁית, בָּרָא אֱלֹהִים, אֵת הַשָּׁמַיִם, וְאֵת הָאָרֶץ','הָיְתָהtestالصفحات التّحول','﷽','ﷺ','مُنَاقَشَةُ سُبُلِ اِسْتِخْدَامِ اللُّغَةِ فِي النُّظُمِ الْقَائِمَةِ وَفِيم يَخُصَّ التَّطْبِيقَاتُ الْحاسُوبِيَّةُ، ','᚛ᚄᚓᚐᚋᚒᚄ ᚑᚄᚂᚑᚏᚅ᚜‪‪‪','‪‪᚛                 ᚜‪','‪‪test‪','‫test‫','
test
','test⁠test‫','⁦test⁧','Ṱ̺̺̕o͞ ̷i̲̬͇̪͙n̝̗͕v̟̜̘̦͟o̶̙̰̠kè͚̮̺̪̹̱̤ ̖t̝͕̳̣̻̪͞h̼͓̲̦̳̘̲e͇̣̰̦̬͎ ̢̼̻̱̘h͚͎͙̜̣̲ͅi̦̲̣̰̤v̻͍e̺̭̳̪̰-m̢iͅn̖̺̞̲̯̰d̵̼̟͙̩̼̘̳ ̞̥̱̳̭r̛̗̘e͙p͠r̼̞̻̭̗e̺̠̣͟s̘͇̳͍̝͉e͉̥̯̞̲͚̬͜ǹ̬͎͎̟̖͇̤t͍̬̤͓̼̭͘ͅi̪̱n͠g̴͉ ͏͉ͅc̬̟h͡a̫̻̯͘o̫̟̖͍̙̝͉s̗̦̲.̨̹͈̣','̡͓̞ͅI̗̘̦͝n͇͇͙v̮̫ok̲̫̙͈i̖͙̭̹̠̞n̡̻̮̣̺g̲͈͙̭͙̬͎ ̰t͔̦h̞̲e̢̤ ͍̬̲͖f̴̘͕̣è͖ẹ̥̩l͖͔͚i͓͚̦͠n͖͍̗͓̳̮g͍ ̨o͚̪͡f̘̣̬ ̖̘͖̟͙̮c҉͔̫͖͓͇͖ͅh̵̤̣͚͔á̗̼͕ͅo̼̣̥s̱͈̺̖̦̻͢.̛̖̞̠̫̰','̗̺͖̹̯͓Ṯ̤͍̥͇͈h̲́e͏͓̼̗̙̼̣͔ ͇̜̱̠͓͍ͅN͕͠e̗̱z̘̝̜̺͙p̤̺̹͍̯͚e̠̻̠͜r̨̤͍̺̖͔̖̖d̠̟̭̬̝͟i̦͖̩͓͔̤a̠̗̬͉̙n͚͜ ̻̞̰͚ͅh̵͉i̳̞v̢͇ḙ͎͟-҉̭̩̼͔m̤̭̫i͕͇̝̦n̗͙ḍ̟ ̯̲͕͞ǫ̟̯̰̲͙̻̝f ̪̰̰̗̖̭̘͘c̦͍̲̞͍̩̙ḥ͚a̮͎̟̙͜ơ̩̹͎s̤.̝̝ ҉Z̡̖̜͖̰̣͉̜a͖̰͙̬͡l̲̫̳͍̩g̡̟̼̱͚̞̬ͅo̗͜.̟','̦H̬̤̗̤͝e͜ ̜̥̝̻͍̟́w̕h̖̯͓o̝͙̖͎̱̮ ҉̺̙̞̟͈W̷̼̭a̺̪͍į͈͕̭͙̯̜t̶̼̮s̘͙͖̕ ̠̫̠B̻͍͙͉̳ͅe̵h̵̬͇̫͙i̹͓̳̳̮͎̫̕n͟d̴̪̜̖ ̰͉̩͇͙̲͞ͅT͖̼͓̪͢h͏͓̮̻e̬̝̟ͅ ̤̹̝W͙̞̝͔͇͝ͅa͏͓͔̹̼̣l̴͔̰̤̟͔ḽ̫.͕','Z̮̞̠͙͔ͅḀ̗̞͈̻̗Ḷ͙͎̯̹̞͓G̻O̭̗̮','˙ɐnbᴉlɐ ɐuƃɐɯ ǝɹolop ʇǝ ǝɹoqɐl ʇn ʇunpᴉpᴉɔuᴉ ɹodɯǝʇ poɯsnᴉǝ op pǝs \'ʇᴉlǝ ƃuᴉɔsᴉdᴉpɐ ɹnʇǝʇɔǝsuoɔ \'ʇǝɯɐ ʇᴉs ɹolop ɯnsdᴉ ɯǝɹo˥','00˙Ɩ$-','The quick brown fox jumps over the lazy dog','𝐓𝐡𝐞 𝐪𝐮𝐢𝐜𝐤 𝐛𝐫𝐨𝐰𝐧 𝐟𝐨𝐱 𝐣𝐮𝐦𝐩𝐬 𝐨𝐯𝐞𝐫 𝐭𝐡𝐞 𝐥𝐚𝐳𝐲 𝐝𝐨𝐠','𝕿𝖍𝖊 𝖖𝖚𝖎𝖈𝖐 𝖇𝖗𝖔𝖜𝖓 𝖋𝖔𝖝 𝖏𝖚𝖒𝖕𝖘 𝖔𝖛𝖊𝖗 𝖙𝖍𝖊 𝖑𝖆𝖟𝖞 𝖉𝖔𝖌','𝑻𝒉𝒆 𝒒𝒖𝒊𝒄𝒌 𝒃𝒓𝒐𝒘𝒏 𝒇𝒐𝒙 𝒋𝒖𝒎𝒑𝒔 𝒐𝒗𝒆𝒓 𝒕𝒉𝒆 𝒍𝒂𝒛𝒚 𝒅𝒐𝒈','𝓣𝓱𝓮 𝓺𝓾𝓲𝓬𝓴 𝓫𝓻𝓸𝔀𝓷 𝓯𝓸𝔁 𝓳𝓾𝓶𝓹𝓼 𝓸𝓿𝓮𝓻 𝓽𝓱𝓮 𝓵𝓪𝔃𝔂 𝓭𝓸𝓰','𝕋𝕙𝕖 𝕢𝕦𝕚𝕔𝕜 𝕓𝕣𝕠𝕨𝕟 𝕗𝕠𝕩 𝕛𝕦𝕞𝕡𝕤 𝕠𝕧𝕖𝕣 𝕥𝕙𝕖 𝕝𝕒𝕫𝕪 𝕕𝕠𝕘','𝚃𝚑𝚎 𝚚𝚞𝚒𝚌𝚔 𝚋𝚛𝚘𝚠𝚗 𝚏𝚘𝚡 𝚓𝚞𝚖𝚙𝚜 𝚘𝚟𝚎𝚛 𝚝𝚑𝚎 𝚕𝚊𝚣𝚢 𝚍𝚘𝚐','⒯⒣⒠ ⒬⒰⒤⒞⒦ ⒝⒭⒪⒲⒩ ⒡⒪⒳ ⒥⒰⒨⒫⒮ ⒪⒱⒠⒭ ⒯⒣⒠ ⒧⒜⒵⒴ ⒟⒪⒢','<script>alert(123)</script>','<script>alert('123');</script>','<img src=x onerror=alert(123) />','<svg><script>123<1>alert(123)</script>','"><script>alert(123)</script>','\'><script>alert(123)</script>','><script>alert(123)</script>','</script><script>alert(123)</script>','< / script >< script >alert(123)< / script >',' onfocus=JaVaSCript:alert(123) autofocus','" onfocus=JaVaSCript:alert(123) autofocus','\' onfocus=JaVaSCript:alert(123) autofocus','<script>alert(123)</script>','<sc<script>ript>alert(123)</sc</script>ript>','--><script>alert(123)</script>','";alert(123);t="','\';alert(123);t=\'','JavaSCript:alert(123)',';alert(123);','src=JaVaSCript:prompt(132)','"><script>alert(123);</script x="','\'><script>alert(123);</script x=\'','><script>alert(123);</script x=','" autofocus onkeyup="javascript:alert(123)','\' autofocus onkeyup=\'javascript:alert(123)','<script\\x20type="text/javascript">javascript:alert(1);</script>','<script\\x3Etype="text/javascript">javascript:alert(1);</script>','<script\\x0Dtype="text/javascript">javascript:alert(1);</script>','<script\\x09type="text/javascript">javascript:alert(1);</script>','<script\\x0Ctype="text/javascript">javascript:alert(1);</script>','<script\\x2Ftype="text/javascript">javascript:alert(1);</script>','<script\\x0Atype="text/javascript">javascript:alert(1);</script>','\'`"><\\x3Cscript>javascript:alert(1)</script>','\'`"><\\x00script>javascript:alert(1)</script>','ABC<div style="x\\x3Aexpression(javascript:alert(1)">DEF','ABC<div style="x:expression\\x5C(javascript:alert(1)">DEF','ABC<div style="x:expression\\x00(javascript:alert(1)">DEF','ABC<div style="x:exp\\x00ression(javascript:alert(1)">DEF','ABC<div style="x:exp\\x5Cression(javascript:alert(1)">DEF','ABC<div style="x:\\x0Aexpression(javascript:alert(1)">DEF','ABC<div style="x:\\x09expression(javascript:alert(1)">DEF','ABC<div style="x:\\xE3\\x80\\x80expression(javascript:alert(1)">DEF','ABC<div style="x:\\xE2\\x80\\x84expression(javascript:alert(1)">DEF','ABC<div style="x:\\xC2\\xA0expression(javascript:alert(1)">DEF','ABC<div style="x:\\xE2\\x80\\x80expression(javascript:alert(1)">DEF','ABC<div style="x:\\xE2\\x80\\x8Aexpression(javascript:alert(1)">DEF','ABC<div style="x:\\x0Dexpression(javascript:alert(1)">DEF','ABC<div style="x:\\x0Cexpression(javascript:alert(1)">DEF','ABC<div style="x:\\xE2\\x80\\x87expression(javascript:alert(1)">DEF','ABC<div style="x:\\xEF\\xBB\\xBFexpression(javascript:alert(1)">DEF','ABC<div style="x:\\x20expression(javascript:alert(1)">DEF','ABC<div style="x:\\xE2\\x80\\x88expression(javascript:alert(1)">DEF','ABC<div style="x:\\x00expression(javascript:alert(1)">DEF','ABC<div style="x:\\xE2\\x80\\x8Bexpression(javascript:alert(1)">DEF','ABC<div style="x:\\xE2\\x80\\x86expression(javascript:alert(1)">DEF','ABC<div style="x:\\xE2\\x80\\x85expression(javascript:alert(1)">DEF','ABC<div style="x:\\xE2\\x80\\x82expression(javascript:alert(1)">DEF','ABC<div style="x:\\x0Bexpression(javascript:alert(1)">DEF','ABC<div style="x:\\xE2\\x80\\x81expression(javascript:alert(1)">DEF','ABC<div style="x:\\xE2\\x80\\x83expression(javascript:alert(1)">DEF','ABC<div style="x:\\xE2\\x80\\x89expression(javascript:alert(1)">DEF','<a href="\\x0Bjavascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x0Fjavascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\xC2\\xA0javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x05javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\xE1\\xA0\\x8Ejavascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x18javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x11javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\xE2\\x80\\x88javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\xE2\\x80\\x89javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\xE2\\x80\\x80javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x17javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x03javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x0Ejavascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x1Ajavascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x00javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x10javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\xE2\\x80\\x82javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x20javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x13javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x09javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\xE2\\x80\\x8Ajavascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x14javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x19javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\xE2\\x80\\xAFjavascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x1Fjavascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\xE2\\x80\\x81javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x1Djavascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\xE2\\x80\\x87javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x07javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\xE1\\x9A\\x80javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\xE2\\x80\\x83javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x04javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x01javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x08javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\xE2\\x80\\x84javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\xE2\\x80\\x86javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\xE3\\x80\\x80javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x12javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x0Djavascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x0Ajavascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x0Cjavascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x15javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\xE2\\x80\\xA8javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x16javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x02javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x1Bjavascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x06javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\xE2\\x80\\xA9javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\xE2\\x80\\x85javascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x1Ejavascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\xE2\\x81\\x9Fjavascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="\\x1Cjavascript:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="javascript\\x00:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="javascript\\x3A:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="javascript\\x09:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="javascript\\x0D:javascript:alert(1)" id="fuzzelement1">test</a>','<a href="javascript\\x0A:javascript:alert(1)" id="fuzzelement1">test</a>','`"\'><img src=xxx:x \\x0Aonerror=javascript:alert(1)>','`"\'><img src=xxx:x \\x22onerror=javascript:alert(1)>','`"\'><img src=xxx:x \\x0Bonerror=javascript:alert(1)>','`"\'><img src=xxx:x \\x0Donerror=javascript:alert(1)>','`"\'><img src=xxx:x \\x2Fonerror=javascript:alert(1)>','`"\'><img src=xxx:x \\x09onerror=javascript:alert(1)>','`"\'><img src=xxx:x \\x0Conerror=javascript:alert(1)>','`"\'><img src=xxx:x \\x00onerror=javascript:alert(1)>','`"\'><img src=xxx:x \\x27onerror=javascript:alert(1)>','`"\'><img src=xxx:x \\x20onerror=javascript:alert(1)>','"`\'><script>\\x3Bjavascript:alert(1)</script>','"`\'><script>\\x0Djavascript:alert(1)</script>','"`\'><script>\\xEF\\xBB\\xBFjavascript:alert(1)</script>','"`\'><script>\\xE2\\x80\\x81javascript:alert(1)</script>','"`\'><script>\\xE2\\x80\\x84javascript:alert(1)</script>','"`\'><script>\\xE3\\x80\\x80javascript:alert(1)</script>','"`\'><script>\\x09javascript:alert(1)</script>','"`\'><script>\\xE2\\x80\\x89javascript:alert(1)</script>','"`\'><script>\\xE2\\x80\\x85javascript:alert(1)</script>','"`\'><script>\\xE2\\x80\\x88javascript:alert(1)</script>','"`\'><script>\\x00javascript:alert(1)</script>','"`\'><script>\\xE2\\x80\\xA8javascript:alert(1)</script>','"`\'><script>\\xE2\\x80\\x8Ajavascript:alert(1)</script>','"`\'><script>\\xE1\\x9A\\x80javascript:alert(1)</script>','"`\'><script>\\x0Cjavascript:alert(1)</script>','"`\'><script>\\x2Bjavascript:alert(1)</script>','"`\'><script>\\xF0\\x90\\x96\\x9Ajavascript:alert(1)</script>','"`\'><script>-javascript:alert(1)</script>','"`\'><script>\\x0Ajavascript:alert(1)</script>','"`\'><script>\\xE2\\x80\\xAFjavascript:alert(1)</script>','"`\'><script>\\x7Ejavascript:alert(1)</script>','"`\'><script>\\xE2\\x80\\x87javascript:alert(1)</script>','"`\'><script>\\xE2\\x81\\x9Fjavascript:alert(1)</script>','"`\'><script>\\xE2\\x80\\xA9javascript:alert(1)</script>','"`\'><script>\\xC2\\x85javascript:alert(1)</script>','"`\'><script>\\xEF\\xBF\\xAEjavascript:alert(1)</script>','"`\'><script>\\xE2\\x80\\x83javascript:alert(1)</script>','"`\'><script>\\xE2\\x80\\x8Bjavascript:alert(1)</script>','"`\'><script>\\xEF\\xBF\\xBEjavascript:alert(1)</script>','"`\'><script>\\xE2\\x80\\x80javascript:alert(1)</script>','"`\'><script>\\x21javascript:alert(1)</script>','"`\'><script>\\xE2\\x80\\x82javascript:alert(1)</script>','"`\'><script>\\xE2\\x80\\x86javascript:alert(1)</script>','"`\'><script>\\xE1\\xA0\\x8Ejavascript:alert(1)</script>','"`\'><script>\\x0Bjavascript:alert(1)</script>','"`\'><script>\\x20javascript:alert(1)</script>','"`\'><script>\\xC2\\xA0javascript:alert(1)</script>','<img \\x00src=x onerror="alert(1)">','<img \\x47src=x onerror="javascript:alert(1)">','<img \\x11src=x onerror="javascript:alert(1)">','<img \\x12src=x onerror="javascript:alert(1)">','<img\\x47src=x onerror="javascript:alert(1)">','<img\\x10src=x onerror="javascript:alert(1)">','<img\\x13src=x onerror="javascript:alert(1)">','<img\\x32src=x onerror="javascript:alert(1)">','<img\\x47src=x onerror="javascript:alert(1)">','<img\\x11src=x onerror="javascript:alert(1)">','<img \\x47src=x onerror="javascript:alert(1)">','<img \\x34src=x onerror="javascript:alert(1)">','<img \\x39src=x onerror="javascript:alert(1)">','<img \\x00src=x onerror="javascript:alert(1)">','<img src\\x09=x onerror="javascript:alert(1)">','<img src\\x10=x onerror="javascript:alert(1)">','<img src\\x13=x onerror="javascript:alert(1)">','<img src\\x32=x onerror="javascript:alert(1)">','<img src\\x12=x onerror="javascript:alert(1)">','<img src\\x11=x onerror="javascript:alert(1)">','<img src\\x00=x onerror="javascript:alert(1)">','<img src\\x47=x onerror="javascript:alert(1)">','<img src=x\\x09onerror="javascript:alert(1)">','<img src=x\\x10onerror="javascript:alert(1)">','<img src=x\\x11onerror="javascript:alert(1)">','<img src=x\\x12onerror="javascript:alert(1)">','<img src=x\\x13onerror="javascript:alert(1)">','<img[a][b][c]src[d]=x[e]onerror=[f]"alert(1)">','<img src=x onerror=\\x09"javascript:alert(1)">','<img src=x onerror=\\x10"javascript:alert(1)">','<img src=x onerror=\\x11"javascript:alert(1)">','<img src=x onerror=\\x12"javascript:alert(1)">','<img src=x onerror=\\x32"javascript:alert(1)">','<img src=x onerror=\\x00"javascript:alert(1)">','<a href=java script:javascript:alert(1)>XXX</a>','<img src="x` `<script>javascript:alert(1)</script>"` `>','<img src onerror /" \'"= alt=javascript:alert(1)//">','<title onpropertychange=javascript:alert(1)>','<a href=http://foo.bar/#x=`y></a><img alt="`><img src=x:x onerror=javascript:alert(1)></a>">','<!--[if]><script>javascript:alert(1)</script -->','<!--[if<img src=x onerror=javascript:alert(1)//]> -->','<script src="/\\%(jscript)s"></script>','<script src="\\\\%(jscript)s"></script>','<IMG """><SCRIPT>alert("XSS")</SCRIPT>">','<IMG SRC=javascript:alert(String.fromCharCode(88,83,83))>','<IMG SRC=# onmouseover="alert(\'xxs\')">','<IMG SRC= onmouseover="alert(\'xxs\')">','<IMG onmouseover="alert(\'xxs\')">','<IMG SRC=javascript:alert('XSS')>','<IMG SRC=javascript:alert('XSS')>','<IMG SRC=javascript:alert('XSS')>','<IMG SRC="jav ascript:alert(\'XSS\');">','<IMG SRC="jav ascript:alert(\'XSS\');">','<IMG SRC="jav ascript:alert(\'XSS\');">','<IMG SRC="jav ascript:alert(\'XSS\');">','perl -e \'print "<IMG SRC=java\\0script:alert(\\"XSS\\")>";\' > out','<IMG SRC="  javascript:alert(\'XSS\');">','<SCRIPT/XSS SRC="http://ha.ckers.org/xss.js"></SCRIPT>','<BODY onload!#$%&()*~+-_.,:;?@[/|\\]^`=alert("XSS")>','<SCRIPT/SRC="http://ha.ckers.org/xss.js"></SCRIPT>','<<SCRIPT>alert("XSS");//<</SCRIPT>','<SCRIPT SRC=http://ha.ckers.org/xss.js?< B >','<SCRIPT SRC=//ha.ckers.org/.j>','<IMG SRC="javascript:alert(\'XSS\')"','<iframe src=http://ha.ckers.org/scriptlet.html <','\\";alert(\'XSS\');//','<u oncopy=alert()> Copy me</u>','<i onwheel=alert(1)> Scroll over me </i>','<plaintext>','http://a/%%30%30','</textarea><script>alert(123)</script>','1;DROP TABLE users','1\'; DROP TABLE users-- 1','\' OR 1=1 -- 1','\' OR \'1\'=\'1','\'; EXEC sp_MSForEachTable \'DROP TABLE ?\'; --',' ','%','_','-','--','--version','--help','$USER','/dev/null; touch /tmp/blns.fail ; echo','`touch /tmp/blns.fail`','$(touch /tmp/blns.fail)','@{[system "touch /tmp/blns.fail"]}','eval("puts \'hello world\'")','System("ls -al /")','`ls -al /`','Kernel.exec("ls -al /")','Kernel.exit(1)','%x(\'ls -al /\')','<?xml version="1.0" encoding="ISO-8859-1"?><!DOCTYPE foo [ <!ELEMENT foo ANY ><!ENTITY xxe SYSTEM "file:///etc/passwd" >]><foo>&xxe;</foo>','$HOME','$ENV{\'HOME\'}','%d','%s%s%s%s%s','{0}','%*.*s','%@','%n','File:///','../../../../../../../../../../../etc/passwd%00','../../../../../../../../../../../etc/hosts','() { 0; }; touch /tmp/blns.shellshock1.fail;','() { _; } >_[$($())] { touch /tmp/blns.shellshock2.fail; }','<<< %s(un=\'%s\') = %u','+++ATH0','CON','PRN','AUX','CLOCK$','NUL','A:','ZZ:','COM1','LPT1','LPT2','LPT3','COM2','COM3','COM4','DCC SEND STARTKEYLOGGER 0 0 0','Scunthorpe General Hospital','Penistone Community Church','Lightwater Country Park','Jimmy Clitheroe','Horniman Museum','shitake mushrooms','RomansInSussex.co.uk','http://www.cum.qc.ca/','Craig Cockburn, Software Specialist','Linda Callahan','Dr. Herman I. Libshitz','magna cum laude','Super Bowl XXX','medieval erection of parapets','evaluate','mocha','expression','Arsenal canal','classic','Tyson Gay','Dick Van Dyke','basement','If you\'re reading this, you\'ve been in a coma for almost 20 years now. We\'re trying a new technique. We don\'t know where this message will end up in your dream, but we hope it works. Please wake up, we miss you.','Roses are red, violets are blue. Hope you enjoy terminal hue','But now...for my greatest trick...','The quic\b\b\b\b\b\bk brown fox... [Beeeep]','Powerلُلُصّبُلُلصّبُررً ॣ ॣh ॣ ॣ冗','🏳0🌈️','జ్ఞ‌ా','گچپژ','{% print \'x\' * 64 * 1024**3 %}','{{ "".__class__.__mro__[2].__subclasses__()[40]("/etc/passwd").read() }}'; diff --git a/parser/testdata/01666_date_lut_buffer_overflow/ast.json b/parser/testdata/01666_date_lut_buffer_overflow/ast.json new file mode 100644 index 000000000..f67ff85d2 --- /dev/null +++ b/parser/testdata/01666_date_lut_buffer_overflow/ast.json @@ -0,0 +1,76 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function plus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDate (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '2105-12-31'" + }, + { + "explain": " Function toIntervalMonth (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_25000" + }, + { + "explain": " Identifier Null" + } + ], + + "rows": 18, + + "statistics": + { + "elapsed": 0.001239082, + "rows_read": 18, + "bytes_read": 707 + } +} diff --git a/parser/testdata/01666_date_lut_buffer_overflow/metadata.json b/parser/testdata/01666_date_lut_buffer_overflow/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01666_date_lut_buffer_overflow/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01666_date_lut_buffer_overflow/query.sql b/parser/testdata/01666_date_lut_buffer_overflow/query.sql new file mode 100644 index 000000000..f1cf610ff --- /dev/null +++ b/parser/testdata/01666_date_lut_buffer_overflow/query.sql @@ -0,0 +1 @@ +SELECT toDate('2105-12-31') + INTERVAL number MONTH FROM system.numbers LIMIT 25000 FORMAT Null; diff --git a/parser/testdata/01666_gcd_ubsan/ast.json b/parser/testdata/01666_gcd_ubsan/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01666_gcd_ubsan/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01666_gcd_ubsan/metadata.json b/parser/testdata/01666_gcd_ubsan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01666_gcd_ubsan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01666_gcd_ubsan/query.sql b/parser/testdata/01666_gcd_ubsan/query.sql new file mode 100644 index 000000000..bd7023caa --- /dev/null +++ b/parser/testdata/01666_gcd_ubsan/query.sql @@ -0,0 +1,11 @@ +-- { echo } +SELECT gcd(9223372036854775807, -9223372036854775808); -- { serverError DECIMAL_OVERFLOW } +SELECT gcd(9223372036854775808, -9223372036854775807); -- { serverError DECIMAL_OVERFLOW } +SELECT gcd(-9223372036854775808, 9223372036854775807); -- { serverError DECIMAL_OVERFLOW } +SELECT gcd(-9223372036854775807, 9223372036854775808); -- { serverError DECIMAL_OVERFLOW } +SELECT gcd(9223372036854775808, -1); -- { serverError DECIMAL_OVERFLOW } +SELECT lcm(-170141183460469231731687303715884105728, -170141183460469231731687303715884105728); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT lcm(toInt128(-170141183460469231731687303715884105728), toInt128(-170141183460469231731687303715884105728)); +SELECT lcm(toInt128(-170141183460469231731687303715884105720), toInt128(-170141183460469231731687303715884105720)); +SELECT lcm(toInt128('-170141183460469231731687303715884105720'), toInt128('-170141183460469231731687303715884105720')); +SELECT gcd(-9223372036854775806, -9223372036854775806); diff --git a/parser/testdata/01666_great_circle_distance_ubsan/ast.json b/parser/testdata/01666_great_circle_distance_ubsan/ast.json new file mode 100644 index 000000000..3c4975ab3 --- /dev/null +++ b/parser/testdata/01666_great_circle_distance_ubsan/ast.json @@ -0,0 +1,73 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function greatCircleAngle (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal Int64_-9223372036854775808" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Identifier Null" + } + ], + + "rows": 17, + + "statistics": + { + "elapsed": 0.001286494, + "rows_read": 17, + "bytes_read": 655 + } +} diff --git a/parser/testdata/01666_great_circle_distance_ubsan/metadata.json b/parser/testdata/01666_great_circle_distance_ubsan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01666_great_circle_distance_ubsan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01666_great_circle_distance_ubsan/query.sql b/parser/testdata/01666_great_circle_distance_ubsan/query.sql new file mode 100644 index 000000000..a24e4747e --- /dev/null +++ b/parser/testdata/01666_great_circle_distance_ubsan/query.sql @@ -0,0 +1 @@ +SELECT greatCircleAngle(0, -9223372036854775808, number, number) FROM numbers(3) FORMAT Null; diff --git a/parser/testdata/01666_lcm_ubsan/ast.json b/parser/testdata/01666_lcm_ubsan/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01666_lcm_ubsan/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01666_lcm_ubsan/metadata.json b/parser/testdata/01666_lcm_ubsan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01666_lcm_ubsan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01666_lcm_ubsan/query.sql b/parser/testdata/01666_lcm_ubsan/query.sql new file mode 100644 index 000000000..cd3eba362 --- /dev/null +++ b/parser/testdata/01666_lcm_ubsan/query.sql @@ -0,0 +1,11 @@ +-- { echo } +SELECT lcm(9223372036854775807, -9223372036854775808); -- { serverError DECIMAL_OVERFLOW } +SELECT lcm(9223372036854775808, -9223372036854775807); -- { serverError DECIMAL_OVERFLOW } +SELECT lcm(-9223372036854775808, 9223372036854775807); -- { serverError DECIMAL_OVERFLOW } +SELECT lcm(-9223372036854775807, 9223372036854775808); -- { serverError DECIMAL_OVERFLOW } +SELECT lcm(9223372036854775808, -1); -- { serverError DECIMAL_OVERFLOW } +SELECT lcm(-170141183460469231731687303715884105728, -170141183460469231731687303715884105728); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT lcm(toInt128(-170141183460469231731687303715884105728), toInt128(-170141183460469231731687303715884105728)); +SELECT lcm(toInt128(-170141183460469231731687303715884105720), toInt128(-170141183460469231731687303715884105720)); +SELECT lcm(toInt128('-170141183460469231731687303715884105720'), toInt128('-170141183460469231731687303715884105720')); +SELECT lcm(-9223372036854775806, -9223372036854775806); diff --git a/parser/testdata/01667_aes_args_check/ast.json b/parser/testdata/01667_aes_args_check/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01667_aes_args_check/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01667_aes_args_check/metadata.json b/parser/testdata/01667_aes_args_check/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01667_aes_args_check/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01667_aes_args_check/query.sql b/parser/testdata/01667_aes_args_check/query.sql new file mode 100644 index 000000000..71273558d --- /dev/null +++ b/parser/testdata/01667_aes_args_check/query.sql @@ -0,0 +1,4 @@ +-- Tags: no-fasttest +-- Tag no-fasttest: Depends on OpenSSL + +SELECT encrypt('aes-128-ecb', [1, -1, 0, NULL], 'text'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } diff --git a/parser/testdata/01668_avg_weighted_ubsan/ast.json b/parser/testdata/01668_avg_weighted_ubsan/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01668_avg_weighted_ubsan/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01668_avg_weighted_ubsan/metadata.json b/parser/testdata/01668_avg_weighted_ubsan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01668_avg_weighted_ubsan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01668_avg_weighted_ubsan/query.sql b/parser/testdata/01668_avg_weighted_ubsan/query.sql new file mode 100644 index 000000000..b4dd6a6bd --- /dev/null +++ b/parser/testdata/01668_avg_weighted_ubsan/query.sql @@ -0,0 +1,5 @@ +-- https://github.com/ClickHouse/ClickHouse/pull/19475 +-- The result of this test is not important, but just so you know, it's wrong as it might overflow depending on which +-- underlying type is used. The expected result should be 10: +-- https://www.wolframalpha.com/input?i=%281023+*+1000000000.0+%2B+10+*+-9223372036854775808.0%29+%2F+%281000000000.0+%2B+-9223372036854775808.0%29 +SELECT round(avgWeighted(x, y)) FROM (SELECT 1023 AS x, 1000000000 AS y UNION ALL SELECT 10 AS x, -9223372036854775808 AS y); diff --git a/parser/testdata/01668_test_toMonth_mysql_dialect/ast.json b/parser/testdata/01668_test_toMonth_mysql_dialect/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01668_test_toMonth_mysql_dialect/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01668_test_toMonth_mysql_dialect/metadata.json b/parser/testdata/01668_test_toMonth_mysql_dialect/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01668_test_toMonth_mysql_dialect/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01668_test_toMonth_mysql_dialect/query.sql b/parser/testdata/01668_test_toMonth_mysql_dialect/query.sql new file mode 100644 index 000000000..c6b0cb84f --- /dev/null +++ b/parser/testdata/01668_test_toMonth_mysql_dialect/query.sql @@ -0,0 +1,3 @@ +-- Tags: no-fasttest + +SELECT MONTH(toDateTime('2016-06-15 23:00:00')); diff --git a/parser/testdata/01669_columns_declaration_serde_long/ast.json b/parser/testdata/01669_columns_declaration_serde_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01669_columns_declaration_serde_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01669_columns_declaration_serde_long/metadata.json b/parser/testdata/01669_columns_declaration_serde_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01669_columns_declaration_serde_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01669_columns_declaration_serde_long/query.sql b/parser/testdata/01669_columns_declaration_serde_long/query.sql new file mode 100644 index 000000000..4a45c0c9e --- /dev/null +++ b/parser/testdata/01669_columns_declaration_serde_long/query.sql @@ -0,0 +1,42 @@ +-- Tags: long + +CREATE TEMPORARY TABLE test ("\\" String DEFAULT '\r\n\t\\' || ' +'); + +INSERT INTO test VALUES ('Hello, world!'); +INSERT INTO test ("\\") VALUES ('Hello, world!'); + +SELECT * FROM test; + +DROP TEMPORARY TABLE test; +DROP TABLE IF EXISTS test; + +CREATE TABLE test (x UInt64, "\\" String DEFAULT '\r\n\t\\' || ' +') ENGINE = MergeTree ORDER BY x; + +INSERT INTO test (x) VALUES (1); + +SELECT * FROM test; + +DROP TABLE test; + +DROP TABLE IF EXISTS test_r1; +DROP TABLE IF EXISTS test_r2; + +CREATE TABLE test_r1 (x UInt64, "\\" String DEFAULT '\r\n\t\\' || ' +') ENGINE = ReplicatedMergeTree('/clickhouse/{database}/test_01669', 'r1') ORDER BY "\\" settings ratio_of_defaults_for_sparse_serialization = 1.0; + +INSERT INTO test_r1 ("\\") VALUES ('\\'); + +CREATE TABLE test_r2 (x UInt64, "\\" String DEFAULT '\r\n\t\\' || ' +') ENGINE = ReplicatedMergeTree('/clickhouse/{database}/test_01669', 'r2') ORDER BY "\\" settings ratio_of_defaults_for_sparse_serialization = 1.0; + +SYSTEM SYNC REPLICA test_r2; + +SELECT '---'; +SELECT * FROM test_r1; +SELECT '---'; +SELECT * FROM test_r2; + +DROP TABLE test_r1; +DROP TABLE test_r2; diff --git a/parser/testdata/01669_join_or_duplicates/ast.json b/parser/testdata/01669_join_or_duplicates/ast.json new file mode 100644 index 000000000..74b6346cb --- /dev/null +++ b/parser/testdata/01669_join_or_duplicates/ast.json @@ -0,0 +1,148 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '1 left'" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 2)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (alias t1) (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1 (alias x)" + }, + { + "explain": " Literal UInt64_2 (alias y)" + }, + { + "explain": " TablesInSelectQueryElement (children 2)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (alias t2) (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1 (alias xx)" + }, + { + "explain": " Literal UInt64_2 (alias yy)" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " TableJoin (children 1)" + }, + { + "explain": " Function or (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Identifier xx" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier y" + }, + { + "explain": " Identifier yy" + } + ], + + "rows": 42, + + "statistics": + { + "elapsed": 0.001146654, + "rows_read": 42, + "bytes_read": 1759 + } +} diff --git a/parser/testdata/01669_join_or_duplicates/metadata.json b/parser/testdata/01669_join_or_duplicates/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01669_join_or_duplicates/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01669_join_or_duplicates/query.sql b/parser/testdata/01669_join_or_duplicates/query.sql new file mode 100644 index 000000000..7495ecd1d --- /dev/null +++ b/parser/testdata/01669_join_or_duplicates/query.sql @@ -0,0 +1,23 @@ +select '1 left', * from (select 1 as x, 2 as y) t1 left join (select 1 as xx, 2 as yy from numbers(1)) t2 on x = xx or y = yy; + +select '5 left', * from (select 1 as x, 2 as y) t1 left join (select 1 as xx, 2 as yy from numbers(5)) t2 on x = xx or y = yy; + +select '15 left', * from (select 1 as x, 2 as y) t1 left join (select 1 as xx, 2 as yy from numbers(15)) t2 on x = xx or y = yy; + +select '16 left', * from (select 1 as x, 2 as y) t1 left join (select 1 as xx, 2 as yy from numbers(16)) t2 on x = xx or y = yy; + +select '17 left', * from (select 1 as x, 2 as y) t1 left join (select 1 as xx, 2 as yy from numbers(17)) t2 on x = xx or y = yy; + +select '17 any left', * from (select 1 as x, 2 as y) t1 any left join (select 1 as xx, 2 as yy from numbers(17)) t2 on x = xx or y = yy; + +select '17 right', * from (select 1 as x, 2 as y) t1 right join (select 1 as xx, 2 as yy from numbers(17)) t2 on x = xx or y = yy; + +select '17 any right', * from (select 1 as x, 2 as y) t1 any right join (select 1 as xx, 2 as yy from numbers(17)) t2 on x = xx or y = yy; + +select '17 full', * from (select 1 as x, 2 as y) t1 full join (select 1 as xx, 2 as yy from numbers(17)) t2 on x = xx or y = yy; + +select count(1) from (select * from (select 1 as x, 2 as y) t1 left join (select 1 as xx, 2 as yy from numbers(555)) t2 on x = xx or y = yy); + +select * from (select 'a' as a, number as c from numbers(2)) as t1 join (select 'a' as a, number as c from numbers(2)) as t2 on t1.c = t2.c or t1.a = t2.a order by t1.c, t2.c; + +select * from (select 'a' as a, number as c from numbers(2)) as t1 join (select 'a' as a, number as c from numbers(2)) as t2 on t1.a = t2.a or t1.c = t2.c order by t1.c, t2.c; diff --git a/parser/testdata/01669_test_toYear_mysql_dialect/ast.json b/parser/testdata/01669_test_toYear_mysql_dialect/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01669_test_toYear_mysql_dialect/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01669_test_toYear_mysql_dialect/metadata.json b/parser/testdata/01669_test_toYear_mysql_dialect/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01669_test_toYear_mysql_dialect/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01669_test_toYear_mysql_dialect/query.sql b/parser/testdata/01669_test_toYear_mysql_dialect/query.sql new file mode 100644 index 000000000..31088b5dd --- /dev/null +++ b/parser/testdata/01669_test_toYear_mysql_dialect/query.sql @@ -0,0 +1,3 @@ +-- Tags: no-fasttest + +SELECT YEAR(toDateTime('2016-06-15 23:00:00')); diff --git a/parser/testdata/01670_dictionary_create_key_expression/ast.json b/parser/testdata/01670_dictionary_create_key_expression/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01670_dictionary_create_key_expression/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01670_dictionary_create_key_expression/metadata.json b/parser/testdata/01670_dictionary_create_key_expression/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01670_dictionary_create_key_expression/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01670_dictionary_create_key_expression/query.sql b/parser/testdata/01670_dictionary_create_key_expression/query.sql new file mode 100644 index 000000000..97c04ce44 --- /dev/null +++ b/parser/testdata/01670_dictionary_create_key_expression/query.sql @@ -0,0 +1,42 @@ +-- Tags: no-parallel + +CREATE DATABASE database_dictionary_test_key_expression; + +CREATE TABLE database_dictionary_test_key_expression.test_for_dictionary (value String) ENGINE=TinyLog; +INSERT INTO database_dictionary_test_key_expression.test_for_dictionary VALUES ('Test1'), ('Test2'), ('Test3'); + +SELECT 'Simple'; + +CREATE DICTIONARY database_dictionary_test_key_expression.test_query_log_dictionary_simple +( + `value_id` UInt64 EXPRESSION cityHash64(value), + `value` String +) +PRIMARY KEY value_id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'test_for_dictionary' DB 'database_dictionary_test_key_expression')) +LIFETIME(MIN 1 MAX 10) +LAYOUT(HASHED()); + +SELECT * FROM database_dictionary_test_key_expression.test_query_log_dictionary_simple ORDER BY value_id; + +DROP DICTIONARY IF EXISTS database_dictionary_test_key_expression.test_query_log_dictionary_simple; + +SELECT 'Complex'; + +CREATE DICTIONARY database_dictionary_test_key_expression.test_query_log_dictionary_complex +( + `value_id` UInt64 EXPRESSION cityHash64(value), + `value_length` UInt64 EXPRESSION length(value), + `value` String +) +PRIMARY KEY value_id, value_length +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'test_for_dictionary' DB 'database_dictionary_test_key_expression')) +LIFETIME(MIN 1 MAX 10) +LAYOUT(COMPLEX_KEY_HASHED()); + +SELECT * FROM database_dictionary_test_key_expression.test_query_log_dictionary_complex ORDER BY value_id; + +DROP DICTIONARY IF EXISTS database_dictionary_test_key_expression.test_query_log_dictionary_complex; + +DROP TABLE IF EXISTS database_dictionary_test_key_expression.test_for_dictionary; +DROP DATABASE IF EXISTS database_dictionary_test_key_expression; diff --git a/parser/testdata/01670_distributed_bytes_to_throw_insert/ast.json b/parser/testdata/01670_distributed_bytes_to_throw_insert/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01670_distributed_bytes_to_throw_insert/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01670_distributed_bytes_to_throw_insert/metadata.json b/parser/testdata/01670_distributed_bytes_to_throw_insert/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01670_distributed_bytes_to_throw_insert/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01670_distributed_bytes_to_throw_insert/query.sql b/parser/testdata/01670_distributed_bytes_to_throw_insert/query.sql new file mode 100644 index 000000000..7e32f6ab4 --- /dev/null +++ b/parser/testdata/01670_distributed_bytes_to_throw_insert/query.sql @@ -0,0 +1,16 @@ +-- Tags: distributed + +drop table if exists dist_01670; +drop table if exists data_01670; + +create table data_01670 (key Int) engine=Null(); +create table dist_01670 (key Int) engine=Distributed(test_shard_localhost, currentDatabase(), data_01670) settings bytes_to_throw_insert=1; +system stop distributed sends dist_01670; +-- first batch is always OK, since there is no pending bytes yet +insert into dist_01670 select * from numbers(1) settings prefer_localhost_replica=0; +-- second will fail, because of bytes_to_throw_insert=1 +-- (previous block definitelly takes more, since it has header) +insert into dist_01670 select * from numbers(1) settings prefer_localhost_replica=0; -- { serverError DISTRIBUTED_TOO_MANY_PENDING_BYTES } +system flush distributed dist_01670; +drop table dist_01670; +drop table data_01670; diff --git a/parser/testdata/01670_log_comment/ast.json b/parser/testdata/01670_log_comment/ast.json new file mode 100644 index 000000000..a40e96f1d --- /dev/null +++ b/parser/testdata/01670_log_comment/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001203796, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01670_log_comment/metadata.json b/parser/testdata/01670_log_comment/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01670_log_comment/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01670_log_comment/query.sql b/parser/testdata/01670_log_comment/query.sql new file mode 100644 index 000000000..cb974693b --- /dev/null +++ b/parser/testdata/01670_log_comment/query.sql @@ -0,0 +1,5 @@ +SET log_comment = 'log_comment test', log_queries = 1; +SELECT 1; +SYSTEM FLUSH LOGS query_log; +SELECT type, query FROM system.query_log WHERE current_database = currentDatabase() AND log_comment = 'log_comment test' AND query LIKE 'SELECT 1%' AND event_date >= yesterday() AND type = 1 ORDER BY event_time_microseconds DESC LIMIT 1; +SELECT type, query FROM system.query_log WHERE current_database = currentDatabase() AND log_comment = 'log_comment test' AND query LIKE 'SELECT 1%' AND event_date >= yesterday() AND type = 2 ORDER BY event_time_microseconds DESC LIMIT 1; diff --git a/parser/testdata/01670_neighbor_lc_bug/ast.json b/parser/testdata/01670_neighbor_lc_bug/ast.json new file mode 100644 index 000000000..2cbd3051b --- /dev/null +++ b/parser/testdata/01670_neighbor_lc_bug/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001396035, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01670_neighbor_lc_bug/metadata.json b/parser/testdata/01670_neighbor_lc_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01670_neighbor_lc_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01670_neighbor_lc_bug/query.sql b/parser/testdata/01670_neighbor_lc_bug/query.sql new file mode 100644 index 000000000..599a1f490 --- /dev/null +++ b/parser/testdata/01670_neighbor_lc_bug/query.sql @@ -0,0 +1,49 @@ +SET allow_deprecated_error_prone_window_functions = 1; +SET output_format_pretty_row_numbers = 0; + +SELECT + neighbor(n, -2) AS int, + neighbor(s, -2) AS str, + neighbor(lcs, -2) AS lowCstr +FROM +( + SELECT + number % 5 AS n, + toString(n) AS s, + CAST(s, 'LowCardinality(String)') AS lcs + FROM numbers(10) +); + +drop table if exists neighbor_test; + +CREATE TABLE neighbor_test +( + `rowNr` UInt8, + `val_string` String, + `val_low` LowCardinality(String) +) +ENGINE = MergeTree +PARTITION BY tuple() +ORDER BY rowNr; + +INSERT INTO neighbor_test VALUES (1, 'String 1', 'String 1'), (2, 'String 1', 'String 1'), (3, 'String 2', 'String 2'); + +SELECT + rowNr, + val_string, + neighbor(val_string, -1) AS str_m1, + neighbor(val_string, 1) AS str_p1, + val_low, + neighbor(val_low, -1) AS low_m1, + neighbor(val_low, 1) AS low_p1 +FROM +( + SELECT * + FROM neighbor_test + ORDER BY val_string, rowNr +) +ORDER BY rowNr, val_string, str_m1, str_p1, val_low, low_m1, low_p1 +SETTINGS output_format_pretty_color=1 +format PrettyCompact; + +drop table if exists neighbor_test; diff --git a/parser/testdata/01670_sign_function/ast.json b/parser/testdata/01670_sign_function/ast.json new file mode 100644 index 000000000..e3869353a --- /dev/null +++ b/parser/testdata/01670_sign_function/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function sign (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_0" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001232764, + "rows_read": 7, + "bytes_read": 257 + } +} diff --git a/parser/testdata/01670_sign_function/metadata.json b/parser/testdata/01670_sign_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01670_sign_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01670_sign_function/query.sql b/parser/testdata/01670_sign_function/query.sql new file mode 100644 index 000000000..af5559a08 --- /dev/null +++ b/parser/testdata/01670_sign_function/query.sql @@ -0,0 +1,32 @@ +SELECT sign(0); +SELECT sign(1); +SELECT sign(-1); + +DROP TABLE IF EXISTS test; + +CREATE TABLE test( + n1 Int32, + n2 UInt32, + n3 Float32, + n4 Float64, + n5 Decimal32(5) +) ENGINE = Memory; + +INSERT INTO test VALUES (1, 2, -0.0001, 1.5, 0.5) (-2, 0, 2.5, -4, -5) (4, 5, 5, 0, 7); + +SELECT 'sign(Int32)'; +SELECT sign(n1) FROM test; + +SELECT 'sign(UInt32)'; +SELECT sign(n2) FROM test; + +SELECT 'sign(Float32)'; +SELECT sign(n3) FROM test; + +SELECT 'sign(Float64)'; +SELECT sign(n4) FROM test; + +SELECT 'sign(Decimal32(5))'; +SELECT sign(n5) FROM test; + +DROP TABLE test; diff --git a/parser/testdata/01670_test_repeat_mysql_dialect/ast.json b/parser/testdata/01670_test_repeat_mysql_dialect/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01670_test_repeat_mysql_dialect/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01670_test_repeat_mysql_dialect/metadata.json b/parser/testdata/01670_test_repeat_mysql_dialect/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01670_test_repeat_mysql_dialect/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01670_test_repeat_mysql_dialect/query.sql b/parser/testdata/01670_test_repeat_mysql_dialect/query.sql new file mode 100644 index 000000000..87cdc00fe --- /dev/null +++ b/parser/testdata/01670_test_repeat_mysql_dialect/query.sql @@ -0,0 +1,3 @@ +-- Tags: no-fasttest + +SELECT REPEAT('Test', 3); diff --git a/parser/testdata/01671_aggregate_function_group_bitmap_data/ast.json b/parser/testdata/01671_aggregate_function_group_bitmap_data/ast.json new file mode 100644 index 000000000..f99d7e4d5 --- /dev/null +++ b/parser/testdata/01671_aggregate_function_group_bitmap_data/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.0011953, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01671_aggregate_function_group_bitmap_data/metadata.json b/parser/testdata/01671_aggregate_function_group_bitmap_data/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01671_aggregate_function_group_bitmap_data/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01671_aggregate_function_group_bitmap_data/query.sql b/parser/testdata/01671_aggregate_function_group_bitmap_data/query.sql new file mode 100644 index 000000000..a04f40058 --- /dev/null +++ b/parser/testdata/01671_aggregate_function_group_bitmap_data/query.sql @@ -0,0 +1,58 @@ +SET group_by_two_level_threshold = 10000; + +CREATE TABLE group_bitmap_data_test +( + `pickup_date` Date, + `city_id` UInt32, + `uid` UInt32 +) +ENGINE = Memory; + +INSERT INTO group_bitmap_data_test SELECT + '2019-01-01', + 1, + number +FROM numbers(1, 50); + +INSERT INTO group_bitmap_data_test SELECT + '2019-01-02', + 1, + number +FROM numbers(11, 60); + +INSERT INTO group_bitmap_data_test SELECT + '2019-01-03', + 2, + number +FROM numbers(1, 10); + +SELECT + bitmapCardinality(day_today) AS today_users, + bitmapCardinality(day_before) AS before_users, + bitmapCardinality(bitmapOr(day_today, day_before)) AS ll_users, + bitmapCardinality(bitmapAnd(day_today, day_before)) AS old_users, + bitmapCardinality(bitmapAndnot(day_today, day_before)) AS new_users, + bitmapCardinality(bitmapXor(day_today, day_before)) AS diff_users +FROM +( + SELECT + city_id, + groupBitmapState(uid) AS day_today + FROM group_bitmap_data_test + WHERE pickup_date = '2019-01-02' + GROUP BY + uid, + city_id +) AS js1 +ALL LEFT JOIN +( + SELECT + city_id, + groupBitmapState(uid) AS day_before + FROM group_bitmap_data_test + WHERE pickup_date = '2019-01-01' + GROUP BY city_id +) AS js2 USING (city_id) +ORDER BY today_users, before_users, ll_users, old_users, new_users, diff_users; + +DROP TABLE IF EXISTS group_bitmap_data_test; diff --git a/parser/testdata/01671_merge_join_and_constants/ast.json b/parser/testdata/01671_merge_join_and_constants/ast.json new file mode 100644 index 000000000..10479d037 --- /dev/null +++ b/parser/testdata/01671_merge_join_and_constants/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001311785, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01671_merge_join_and_constants/metadata.json b/parser/testdata/01671_merge_join_and_constants/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01671_merge_join_and_constants/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01671_merge_join_and_constants/query.sql b/parser/testdata/01671_merge_join_and_constants/query.sql new file mode 100644 index 000000000..a2153bf00 --- /dev/null +++ b/parser/testdata/01671_merge_join_and_constants/query.sql @@ -0,0 +1,20 @@ +SET output_format_pretty_color=1; +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS table1; +DROP TABLE IF EXISTS table2; + +CREATE TABLE table1(a String, b Date) ENGINE MergeTree order by a; +CREATE TABLE table2(c String, a String, d Date) ENGINE MergeTree order by c; + +INSERT INTO table1 VALUES ('a', '2018-01-01') ('b', '2018-01-01') ('c', '2018-01-01'); +INSERT INTO table2 VALUES ('D', 'd', '2018-01-01') ('B', 'b', '2018-01-01') ('C', 'c', '2018-01-01'); + +set join_algorithm = 'partial_merge'; + +SELECT * FROM table1 AS t1 ALL LEFT JOIN (SELECT *, '0.10', c, d AS b FROM table2) AS t2 USING (a, b) ORDER BY d, t1.a ASC FORMAT PrettyCompact settings max_rows_in_join = 1; + +SELECT pow('0.0000000257', NULL), pow(pow(NULL, NULL), NULL) - NULL, (val + NULL) = (rval * 0), * FROM (SELECT (val + 256) = (NULL * NULL), toLowCardinality(toNullable(dummy)) AS val FROM system.one) AS s1 ANY LEFT JOIN (SELECT toLowCardinality(dummy) AS rval FROM system.one) AS s2 ON (val + 0) = (rval * 255) settings max_rows_in_join = 1; + +DROP TABLE IF EXISTS table1; +DROP TABLE IF EXISTS table2; diff --git a/parser/testdata/01671_test_toQuarter_mysql_dialect/ast.json b/parser/testdata/01671_test_toQuarter_mysql_dialect/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01671_test_toQuarter_mysql_dialect/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01671_test_toQuarter_mysql_dialect/metadata.json b/parser/testdata/01671_test_toQuarter_mysql_dialect/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01671_test_toQuarter_mysql_dialect/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01671_test_toQuarter_mysql_dialect/query.sql b/parser/testdata/01671_test_toQuarter_mysql_dialect/query.sql new file mode 100644 index 000000000..9c9965c8f --- /dev/null +++ b/parser/testdata/01671_test_toQuarter_mysql_dialect/query.sql @@ -0,0 +1,3 @@ +-- Tags: no-fasttest + +SELECT QUARTER(toDateTime('2016-06-15 23:00:00')); diff --git a/parser/testdata/01672_actions_dag_merge_crash/ast.json b/parser/testdata/01672_actions_dag_merge_crash/ast.json new file mode 100644 index 000000000..0a9d9a2d6 --- /dev/null +++ b/parser/testdata/01672_actions_dag_merge_crash/ast.json @@ -0,0 +1,244 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 6)" + }, + { + "explain": " Literal Array_[NULL, '25.6', '-0.02', NULL]" + }, + { + "explain": " Literal Array_[NULL]" + }, + { + "explain": " Literal UInt64_1024" + }, + { + "explain": " Literal Array_[NULL, '10485.76', NULL, NULL]" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Literal '-922337203.6854775808'" + }, + { + "explain": " Function toNullable (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Literal Array_[NULL]" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 6)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function multiIf (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_1023" + }, + { + "explain": " Literal Float64_-inf" + }, + { + "explain": " Function toString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Literal '-1'" + }, + { + "explain": " Function multiIf (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Function toString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal ''" + }, + { + "explain": " Literal Array_[NULL, NULL]" + }, + { + "explain": " Function multiIf (alias s) (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Literal UInt64_65536" + }, + { + "explain": " Function toString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal ''" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_1024" + }, + { + "explain": " Identifier Null" + } + ], + + "rows": 74, + + "statistics": + { + "elapsed": 0.002042095, + "rows_read": 74, + "bytes_read": 3230 + } +} diff --git a/parser/testdata/01672_actions_dag_merge_crash/metadata.json b/parser/testdata/01672_actions_dag_merge_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01672_actions_dag_merge_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01672_actions_dag_merge_crash/query.sql b/parser/testdata/01672_actions_dag_merge_crash/query.sql new file mode 100644 index 000000000..b3b5b3bcd --- /dev/null +++ b/parser/testdata/01672_actions_dag_merge_crash/query.sql @@ -0,0 +1 @@ +SELECT [NULL, '25.6', '-0.02', NULL], [NULL], 1024, [NULL, '10485.76', NULL, NULL], [NULL, '-922337203.6854775808', toNullable(NULL)], [NULL] FROM (SELECT [multiIf((number % 1023) = -inf, toString(number), NULL)], NULL, '-1', multiIf((number % NULL) = NULL, toString(number), ''), [NULL, NULL], multiIf((number % NULL) = 65536, toString(number), '') AS s FROM system.numbers) LIMIT 1024 format Null diff --git a/parser/testdata/01672_test_toSecond_mysql_dialect/ast.json b/parser/testdata/01672_test_toSecond_mysql_dialect/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01672_test_toSecond_mysql_dialect/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01672_test_toSecond_mysql_dialect/metadata.json b/parser/testdata/01672_test_toSecond_mysql_dialect/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01672_test_toSecond_mysql_dialect/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01672_test_toSecond_mysql_dialect/query.sql b/parser/testdata/01672_test_toSecond_mysql_dialect/query.sql new file mode 100644 index 000000000..3438b5984 --- /dev/null +++ b/parser/testdata/01672_test_toSecond_mysql_dialect/query.sql @@ -0,0 +1,3 @@ +-- Tags: no-fasttest + +SELECT SECOND(toDateTime('2016-06-15 23:00:00')); diff --git a/parser/testdata/01673_test_toMinute_mysql_dialect/ast.json b/parser/testdata/01673_test_toMinute_mysql_dialect/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01673_test_toMinute_mysql_dialect/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01673_test_toMinute_mysql_dialect/metadata.json b/parser/testdata/01673_test_toMinute_mysql_dialect/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01673_test_toMinute_mysql_dialect/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01673_test_toMinute_mysql_dialect/query.sql b/parser/testdata/01673_test_toMinute_mysql_dialect/query.sql new file mode 100644 index 000000000..a4f1427e0 --- /dev/null +++ b/parser/testdata/01673_test_toMinute_mysql_dialect/query.sql @@ -0,0 +1,3 @@ +-- Tags: no-fasttest + +SELECT MINUTE(toDateTime('2016-06-15 23:00:00')); diff --git a/parser/testdata/01674_executable_dictionary_implicit_key/ast.json b/parser/testdata/01674_executable_dictionary_implicit_key/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01674_executable_dictionary_implicit_key/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01674_executable_dictionary_implicit_key/metadata.json b/parser/testdata/01674_executable_dictionary_implicit_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01674_executable_dictionary_implicit_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01674_executable_dictionary_implicit_key/query.sql b/parser/testdata/01674_executable_dictionary_implicit_key/query.sql new file mode 100644 index 000000000..caa3e9760 --- /dev/null +++ b/parser/testdata/01674_executable_dictionary_implicit_key/query.sql @@ -0,0 +1,7 @@ +-- Tags: no-parallel + +SELECT dictGet('simple_executable_cache_dictionary_no_implicit_key', 'value', toUInt64(1)); +SELECT dictGet('simple_executable_cache_dictionary_implicit_key', 'value', toUInt64(1)); + +SELECT dictGet('complex_executable_cache_dictionary_no_implicit_key', 'value', (toUInt64(1), 'FirstKey')); +SELECT dictGet('complex_executable_cache_dictionary_implicit_key', 'value', (toUInt64(1), 'FirstKey')); diff --git a/parser/testdata/01674_filter_by_uint8/ast.json b/parser/testdata/01674_filter_by_uint8/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01674_filter_by_uint8/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01674_filter_by_uint8/metadata.json b/parser/testdata/01674_filter_by_uint8/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01674_filter_by_uint8/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01674_filter_by_uint8/query.sql b/parser/testdata/01674_filter_by_uint8/query.sql new file mode 100644 index 000000000..27538497f --- /dev/null +++ b/parser/testdata/01674_filter_by_uint8/query.sql @@ -0,0 +1,15 @@ +-- ORDER BY is to trigger comparison at uninitialized memory after bad filtering. +SELECT ignore(number) FROM numbers(256) ORDER BY arrayFilter(x -> materialize(255), materialize([257])) LIMIT 1; +SELECT ignore(number) FROM numbers(256) ORDER BY arrayFilter(x -> materialize(255), materialize(['257'])) LIMIT 1; + +SELECT count() FROM numbers(256) WHERE toUInt8(number); + +DROP TABLE IF EXISTS t_filter; +CREATE TABLE t_filter(s String, a Array(FixedString(3)), u UInt64, f UInt8) +ENGINE = MergeTree ORDER BY u; + +INSERT INTO t_filter SELECT toString(number), ['foo', 'bar'], number, toUInt8(number) FROM numbers(1000); +SELECT * FROM t_filter WHERE f ORDER BY u LIMIT 5; +SELECT * FROM t_filter WHERE f != 0 ORDER BY u LIMIT 5; + +DROP TABLE IF EXISTS t_filter; diff --git a/parser/testdata/01674_htm_xml_coarse_parse/ast.json b/parser/testdata/01674_htm_xml_coarse_parse/ast.json new file mode 100644 index 000000000..e3ca96b4e --- /dev/null +++ b/parser/testdata/01674_htm_xml_coarse_parse/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function extractTextFromHTML (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '<script>Here is script.<\/script>'" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001480084, + "rows_read": 7, + "bytes_read": 298 + } +} diff --git a/parser/testdata/01674_htm_xml_coarse_parse/metadata.json b/parser/testdata/01674_htm_xml_coarse_parse/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01674_htm_xml_coarse_parse/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01674_htm_xml_coarse_parse/query.sql b/parser/testdata/01674_htm_xml_coarse_parse/query.sql new file mode 100644 index 000000000..fd1292eb3 --- /dev/null +++ b/parser/testdata/01674_htm_xml_coarse_parse/query.sql @@ -0,0 +1,16 @@ +SELECT extractTextFromHTML('<script>Here is script.</script>'); +SELECT extractTextFromHTML('<style>Here is style.</style>'); +SELECT extractTextFromHTML('<![CDATA[Here is CDTATA.]]>'); +SELECT extractTextFromHTML('This is a white space test.'); +SELECT extractTextFromHTML('This is a complex test. <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"\n "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"><html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en"><![CDATA[<script type="text/javascript">Hello, world</script> ]]><hello />world<![CDATA[ <style> ]]> hello</style>\n<script><![CDATA[</script>]]>hello</script>\n</html>'); + +DROP TABLE IF EXISTS defaults; +CREATE TABLE defaults +( + stringColumn String +) ENGINE = Memory(); + +INSERT INTO defaults values ('<common tag>hello, world<tag>'), ('<script desc=content> some content </script>'), ('<![CDATA[hello, world]]>'), ('white space collapse'); + +SELECT extractTextFromHTML(stringColumn) FROM defaults; +DROP table defaults; diff --git a/parser/testdata/01674_unicode_asan/ast.json b/parser/testdata/01674_unicode_asan/ast.json new file mode 100644 index 000000000..cbb10463b --- /dev/null +++ b/parser/testdata/01674_unicode_asan/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function positionCaseInsensitiveUTF8 (alias res) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'иголка.ру'" + }, + { + "explain": " Literal 'иголка.р�\\0'" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001287523, + "rows_read": 8, + "bytes_read": 345 + } +} diff --git a/parser/testdata/01674_unicode_asan/metadata.json b/parser/testdata/01674_unicode_asan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01674_unicode_asan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01674_unicode_asan/query.sql b/parser/testdata/01674_unicode_asan/query.sql new file mode 100644 index 000000000..85c210235 --- /dev/null +++ b/parser/testdata/01674_unicode_asan/query.sql @@ -0,0 +1,2 @@ +SELECT positionCaseInsensitiveUTF8('иголка.ру', 'иголка.р\0') AS res; +SELECT sum(ignore(positionCaseInsensitiveUTF8('иголка.ру', randomString(rand() % 2)))) FROM numbers(1000000); diff --git a/parser/testdata/01674_where_prewhere_array_crash/ast.json b/parser/testdata/01674_where_prewhere_array_crash/ast.json new file mode 100644 index 000000000..bb7409b84 --- /dev/null +++ b/parser/testdata/01674_where_prewhere_array_crash/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tab (children 1)" + }, + { + "explain": " Identifier tab" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001230101, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/01674_where_prewhere_array_crash/metadata.json b/parser/testdata/01674_where_prewhere_array_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01674_where_prewhere_array_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01674_where_prewhere_array_crash/query.sql b/parser/testdata/01674_where_prewhere_array_crash/query.sql new file mode 100644 index 000000000..2611eedff --- /dev/null +++ b/parser/testdata/01674_where_prewhere_array_crash/query.sql @@ -0,0 +1,5 @@ +drop table if exists tab; +create table tab (x UInt64, `arr.a` Array(UInt64), `arr.b` Array(UInt64)) engine = MergeTree order by x; +select x from tab array join arr prewhere x != 0 where arr; -- { serverError UNKNOWN_IDENTIFIER, 59 } +select x from tab array join arr prewhere arr where x != 0; -- { serverError UNKNOWN_IDENTIFIER, 59 } +drop table if exists tab; diff --git a/parser/testdata/01676_dictget_in_default_expression/ast.json b/parser/testdata/01676_dictget_in_default_expression/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01676_dictget_in_default_expression/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01676_dictget_in_default_expression/metadata.json b/parser/testdata/01676_dictget_in_default_expression/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01676_dictget_in_default_expression/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01676_dictget_in_default_expression/query.sql b/parser/testdata/01676_dictget_in_default_expression/query.sql new file mode 100644 index 000000000..db23ae191 --- /dev/null +++ b/parser/testdata/01676_dictget_in_default_expression/query.sql @@ -0,0 +1,34 @@ +-- Tags: no-parallel + +DROP DATABASE IF EXISTS test_01676 SYNC; + +CREATE DATABASE test_01676; + +CREATE TABLE test_01676.dict_data (key UInt64, value UInt64) ENGINE=MergeTree ORDER BY tuple(); +INSERT INTO test_01676.dict_data VALUES (2,20), (3,30), (4,40), (5,50); + +CREATE DICTIONARY test_01676.dict (key UInt64, value UInt64) PRIMARY KEY key SOURCE(CLICKHOUSE(DB 'test_01676' TABLE 'dict_data' HOST '127.0.0.1' PORT tcpPort())) LIFETIME(0) LAYOUT(HASHED()); + +CREATE TABLE test_01676.table (x UInt64, y UInt64 DEFAULT dictGet('test_01676.dict', 'value', x)) ENGINE=MergeTree ORDER BY tuple(); +INSERT INTO test_01676.table (x) VALUES (2); +INSERT INTO test_01676.table VALUES (toUInt64(3), toUInt64(15)); + +SELECT * FROM test_01676.table ORDER BY x; + +SELECT 'status:'; +SELECT status FROM system.dictionaries WHERE database='test_01676' AND name='dict'; + +DETACH DATABASE test_01676; +ATTACH DATABASE test_01676; + +SELECT 'status_after_detach_and_attach:'; +-- It can be not loaded, or not even finish attaching in case of asynchronous tables loading. +SELECT COALESCE((SELECT status FROM system.dictionaries WHERE database='test_01676' AND name='dict')::Nullable(String), 'NOT_LOADED'); + +INSERT INTO test_01676.table (x) VALUES (toInt64(4)); +SELECT * FROM test_01676.table ORDER BY x; + +SELECT 'status:'; +SELECT status FROM system.dictionaries WHERE database='test_01676' AND name='dict'; + +DROP DATABASE test_01676; diff --git a/parser/testdata/01676_range_hashed_dictionary/ast.json b/parser/testdata/01676_range_hashed_dictionary/ast.json new file mode 100644 index 000000000..a1fcdc49e --- /dev/null +++ b/parser/testdata/01676_range_hashed_dictionary/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery date_table (children 1)" + }, + { + "explain": " Identifier date_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001548171, + "rows_read": 2, + "bytes_read": 73 + } +} diff --git a/parser/testdata/01676_range_hashed_dictionary/metadata.json b/parser/testdata/01676_range_hashed_dictionary/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01676_range_hashed_dictionary/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01676_range_hashed_dictionary/query.sql b/parser/testdata/01676_range_hashed_dictionary/query.sql new file mode 100644 index 000000000..ba2a9eba8 --- /dev/null +++ b/parser/testdata/01676_range_hashed_dictionary/query.sql @@ -0,0 +1,106 @@ +CREATE TABLE date_table +( + CountryID UInt64, + StartDate Date, + EndDate Date, + Tax Float64 +) +ENGINE = MergeTree() +ORDER BY CountryID; + +INSERT INTO date_table VALUES(1, toDate('2019-05-05'), toDate('2019-05-20'), 0.33); +INSERT INTO date_table VALUES(1, toDate('2019-05-21'), toDate('2019-05-30'), 0.42); +INSERT INTO date_table VALUES(2, toDate('2019-05-21'), toDate('2019-05-30'), 0.46); + +CREATE DICTIONARY range_dictionary +( + CountryID UInt64, + StartDate Date, + EndDate Date, + Tax Float64 DEFAULT 0.2 +) +PRIMARY KEY CountryID +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'date_table' DB currentDatabase())) +LIFETIME(MIN 1 MAX 1000) +LAYOUT(RANGE_HASHED()) +RANGE(MIN StartDate MAX EndDate) +SETTINGS(dictionary_use_async_executor=1, max_threads=8) +; + +SELECT 'Dictionary not nullable'; +SELECT 'dictGet'; +SELECT dictGet('range_dictionary', 'Tax', toUInt64(1), toDate('2019-05-15')); +SELECT dictGet('range_dictionary', 'Tax', toUInt64(1), toDate('2019-05-29')); +SELECT dictGet('range_dictionary', 'Tax', toUInt64(2), toDate('2019-05-29')); +SELECT dictGet('range_dictionary', 'Tax', toUInt64(2), toDate('2019-05-31')); +SELECT dictGetOrDefault('range_dictionary', 'Tax', toUInt64(2), toDate('2019-05-31'), 0.4); +SELECT 'dictHas'; +SELECT dictHas('range_dictionary', toUInt64(1), toDate('2019-05-15')); +SELECT dictHas('range_dictionary', toUInt64(1), toDate('2019-05-29')); +SELECT dictHas('range_dictionary', toUInt64(2), toDate('2019-05-29')); +SELECT dictHas('range_dictionary', toUInt64(2), toDate('2019-05-31')); +SELECT 'select columns from dictionary'; +SELECT 'allColumns'; +SELECT * FROM range_dictionary ORDER BY CountryID, StartDate, EndDate; +SELECT 'noColumns'; +SELECT 1 FROM range_dictionary ORDER BY CountryID, StartDate, EndDate; +SELECT 'onlySpecificColumns'; +SELECT CountryID, StartDate, Tax FROM range_dictionary ORDER BY CountryID, StartDate, EndDate; +SELECT 'onlySpecificColumn'; +SELECT Tax FROM range_dictionary ORDER BY CountryID, StartDate, EndDate; + +DROP DICTIONARY range_dictionary; +DROP TABLE date_table; + +CREATE TABLE date_table +( + CountryID UInt64, + StartDate Date, + EndDate Date, + Tax Nullable(Float64) +) +ENGINE = MergeTree() +ORDER BY CountryID; + +INSERT INTO date_table VALUES(1, toDate('2019-05-05'), toDate('2019-05-20'), 0.33); +INSERT INTO date_table VALUES(1, toDate('2019-05-21'), toDate('2019-05-30'), 0.42); +INSERT INTO date_table VALUES(2, toDate('2019-05-21'), toDate('2019-05-30'), NULL); + +CREATE DICTIONARY range_dictionary_nullable +( + CountryID UInt64, + StartDate Date, + EndDate Date, + Tax Nullable(Float64) DEFAULT 0.2 +) +PRIMARY KEY CountryID +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'date_table' DB currentDatabase())) +LIFETIME(MIN 1 MAX 1000) +LAYOUT(RANGE_HASHED()) +RANGE(MIN StartDate MAX EndDate); + +SELECT 'Dictionary nullable'; +SELECT 'dictGet'; +SELECT dictGet('range_dictionary_nullable', 'Tax', toUInt64(1), toDate('2019-05-15')); +SELECT dictGet('range_dictionary_nullable', 'Tax', toUInt64(1), toDate('2019-05-29')); +SELECT dictGet('range_dictionary_nullable', 'Tax', toUInt64(2), toDate('2019-05-29')); +SELECT dictGet('range_dictionary_nullable', 'Tax', toUInt64(2), toDate('2019-05-31')); +SELECT dictGetOrDefault('range_dictionary_nullable', 'Tax', toUInt64(2), toDate('2019-05-31'), 0.4); +SELECT 'dictHas'; +SELECT dictHas('range_dictionary_nullable', toUInt64(1), toDate('2019-05-15')); +SELECT dictHas('range_dictionary_nullable', toUInt64(1), toDate('2019-05-29')); +SELECT dictHas('range_dictionary_nullable', toUInt64(2), toDate('2019-05-29')); +SELECT dictHas('range_dictionary_nullable', toUInt64(2), toDate('2019-05-31')); +SELECT 'select columns from dictionary'; +SELECT 'allColumns'; +SELECT * FROM range_dictionary_nullable ORDER BY CountryID, StartDate, EndDate; +SELECT 'noColumns'; +SELECT 1 FROM range_dictionary_nullable ORDER BY CountryID, StartDate, EndDate; +SELECT 'onlySpecificColumns'; +SELECT CountryID, StartDate, Tax FROM range_dictionary_nullable ORDER BY CountryID, StartDate, EndDate; +SELECT 'onlySpecificColumn'; +SELECT Tax FROM range_dictionary_nullable ORDER BY CountryID, StartDate, EndDate; + +DROP DICTIONARY range_dictionary_nullable; +DROP TABLE date_table; + diff --git a/parser/testdata/01676_reinterpret_as/ast.json b/parser/testdata/01676_reinterpret_as/ast.json new file mode 100644 index 000000000..32f77aafa --- /dev/null +++ b/parser/testdata/01676_reinterpret_as/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'Into String'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001714053, + "rows_read": 5, + "bytes_read": 182 + } +} diff --git a/parser/testdata/01676_reinterpret_as/metadata.json b/parser/testdata/01676_reinterpret_as/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01676_reinterpret_as/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01676_reinterpret_as/query.sql b/parser/testdata/01676_reinterpret_as/query.sql new file mode 100644 index 000000000..aa9f901c9 --- /dev/null +++ b/parser/testdata/01676_reinterpret_as/query.sql @@ -0,0 +1,42 @@ +SELECT 'Into String'; +SELECT reinterpret(49, 'String'); +SELECT 'Into FixedString'; +SELECT reinterpret(49, 'FixedString(1)'); +SELECT reinterpret(49, 'FixedString(2)'); +SELECT reinterpret(49, 'FixedString(3)'); +SELECT reinterpret(49, 'FixedString(4)'); +SELECT reinterpretAsFixedString(49); +SELECT 'Into Numeric Representable'; +SELECT 'Integer and Integer types'; +SELECT reinterpret(257, 'UInt8'), reinterpretAsUInt8(257); +SELECT reinterpret(257, 'Int8'), reinterpretAsInt8(257); +SELECT reinterpret(257, 'UInt16'), reinterpretAsUInt16(257); +SELECT reinterpret(257, 'Int16'), reinterpretAsInt16(257); +SELECT reinterpret(257, 'UInt32'), reinterpretAsUInt32(257); +SELECT reinterpret(257, 'Int32'), reinterpretAsInt32(257); +SELECT reinterpret(257, 'UInt64'), reinterpretAsUInt64(257); +SELECT reinterpret(257, 'Int64'), reinterpretAsInt64(257); +SELECT reinterpret(257, 'Int128'), reinterpretAsInt128(257); +SELECT reinterpret(257, 'UInt256'), reinterpretAsUInt256(257); +SELECT reinterpret(257, 'Int256'), reinterpretAsInt256(257); +SELECT 'Integer and Float types'; +SELECT reinterpret(toFloat32(0.2), 'UInt32'), reinterpretAsUInt32(toFloat32(0.2)); +SELECT reinterpret(toFloat64(0.2), 'UInt64'), reinterpretAsUInt64(toFloat64(0.2)); +SELECT reinterpretAsFloat32(a), reinterpretAsUInt32(toFloat32(0.2)) as a; +SELECT reinterpretAsFloat64(a), reinterpretAsUInt64(toFloat64(0.2)) as a; +SELECT 'Integer and String types'; +SELECT reinterpret(a, 'String'), reinterpretAsString(a), reinterpretAsUInt8('1') as a; +SELECT reinterpret(a, 'String'), reinterpretAsString(a), reinterpretAsUInt8('11') as a; +SELECT reinterpret(a, 'String'), reinterpretAsString(a), reinterpretAsUInt16('11') as a; +SELECT 'Dates'; +SELECT reinterpret(0, 'Date'), reinterpret('', 'Date'); +SELECT reinterpret(0, 'DateTime(''Asia/Istanbul'')'), reinterpret('', 'DateTime(''Asia/Istanbul'')'); +SELECT reinterpret(0, 'DateTime64(3, ''Asia/Istanbul'')'), reinterpret('', 'DateTime64(3, ''Asia/Istanbul'')'); +SELECT 'Decimals'; +SELECT reinterpret(toDecimal32(5, 2), 'Decimal32(2)'), reinterpret('1', 'Decimal32(2)'); +SELECT reinterpret(toDecimal64(5, 2), 'Decimal64(2)'), reinterpret('1', 'Decimal64(2)');; +SELECT reinterpret(toDecimal128(5, 2), 'Decimal128(2)'), reinterpret('1', 'Decimal128(2)'); +SELECT reinterpret(toDecimal256(5, 2), 'Decimal256(2)'), reinterpret('1', 'Decimal256(2)'); +SELECT reinterpret(toDateTime64(0, 0), 'Decimal64(2)'); +SELECT 'ReinterpretErrors'; +SELECT reinterpret('123', 'FixedString(1)'); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} diff --git a/parser/testdata/01676_round_int_ubsan/ast.json b/parser/testdata/01676_round_int_ubsan/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01676_round_int_ubsan/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01676_round_int_ubsan/metadata.json b/parser/testdata/01676_round_int_ubsan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01676_round_int_ubsan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01676_round_int_ubsan/query.sql b/parser/testdata/01676_round_int_ubsan/query.sql new file mode 100644 index 000000000..45aa5706a --- /dev/null +++ b/parser/testdata/01676_round_int_ubsan/query.sql @@ -0,0 +1,6 @@ +-- Overflow during integer rounding is implementation specific behaviour. +-- This test allows to be aware if the impkementation changes. +-- Changing the implementation specific behaviour is Ok. +-- and should not be treat as incompatibility (simply update test result then). + +SELECT round(-9223372036854775808, -2); diff --git a/parser/testdata/01677_array_enumerate_bug/ast.json b/parser/testdata/01677_array_enumerate_bug/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01677_array_enumerate_bug/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01677_array_enumerate_bug/metadata.json b/parser/testdata/01677_array_enumerate_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01677_array_enumerate_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01677_array_enumerate_bug/query.sql b/parser/testdata/01677_array_enumerate_bug/query.sql new file mode 100644 index 000000000..0db0c51fe --- /dev/null +++ b/parser/testdata/01677_array_enumerate_bug/query.sql @@ -0,0 +1,13 @@ +-- there was a bug - missing check of the total size of keys for the case with hash table with 128bit key. + +SELECT arrayEnumerateUniq(arrayEnumerateUniq([toInt256(10), toInt256(100), toInt256(2)]), [toInt256(123), toInt256(1023), toInt256(123)]); + +SELECT arrayEnumerateUniq( + [111111, 222222, 333333], + [444444, 555555, 666666], + [111111, 222222, 333333], + [444444, 555555, 666666], + [111111, 222222, 333333], + [444444, 555555, 666666], + [111111, 222222, 333333], + [444444, 555555, 666666]); diff --git a/parser/testdata/01677_bit_float/ast.json b/parser/testdata/01677_bit_float/ast.json new file mode 100644 index 000000000..45a46e80b --- /dev/null +++ b/parser/testdata/01677_bit_float/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function bitAnd (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal Float64_inf" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001348582, + "rows_read": 8, + "bytes_read": 292 + } +} diff --git a/parser/testdata/01677_bit_float/metadata.json b/parser/testdata/01677_bit_float/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01677_bit_float/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01677_bit_float/query.sql b/parser/testdata/01677_bit_float/query.sql new file mode 100644 index 000000000..d0ad8f2d9 --- /dev/null +++ b/parser/testdata/01677_bit_float/query.sql @@ -0,0 +1,9 @@ +SELECT bitAnd(0, inf); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT bitXor(0, inf); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT bitOr(0, inf); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT bitTest(inf, 0); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT bitTest(0, inf); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT bitRotateLeft(inf, 0); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT bitRotateRight(inf, 0); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT bitShiftLeft(inf, 0); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT bitShiftRight(inf, 0); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } diff --git a/parser/testdata/01678_great_circle_angle/ast.json b/parser/testdata/01678_great_circle_angle/ast.json new file mode 100644 index 000000000..1398684f9 --- /dev/null +++ b/parser/testdata/01678_great_circle_angle/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001228399, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01678_great_circle_angle/metadata.json b/parser/testdata/01678_great_circle_angle/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01678_great_circle_angle/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01678_great_circle_angle/query.sql b/parser/testdata/01678_great_circle_angle/query.sql new file mode 100644 index 000000000..595622822 --- /dev/null +++ b/parser/testdata/01678_great_circle_angle/query.sql @@ -0,0 +1,17 @@ +SET geo_distance_returns_float64_on_float64_arguments = 0; + +SELECT round(greatCircleAngle(0, 45, 0.1, 45.1), 4); +SELECT round(greatCircleAngle(0, 45, 1, 45), 4); +SELECT round(greatCircleAngle(0, 45, 1, 45.1), 4); + +SELECT round(greatCircleDistance(0, 0, 0, 90), 4); +SELECT round(greatCircleDistance(0, 0, 90, 0), 4); + +SET geo_distance_returns_float64_on_float64_arguments = 1; + +SELECT round(greatCircleAngle(0, 45, 0.1, 45.1), 4); +SELECT round(greatCircleAngle(0, 45, 1, 45), 4); +SELECT round(greatCircleAngle(0, 45, 1, 45.1), 4); + +SELECT round(greatCircleDistance(0, 0, 0, 90), 4); +SELECT round(greatCircleDistance(0, 0, 90, 0), 4); diff --git a/parser/testdata/01679_format_readable_time_delta_inf/ast.json b/parser/testdata/01679_format_readable_time_delta_inf/ast.json new file mode 100644 index 000000000..15bf36e2d --- /dev/null +++ b/parser/testdata/01679_format_readable_time_delta_inf/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function formatReadableTimeDelta (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[Float64_inf, Float64_-inf, Float64_nan]" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001417045, + "rows_read": 9, + "bytes_read": 403 + } +} diff --git a/parser/testdata/01679_format_readable_time_delta_inf/metadata.json b/parser/testdata/01679_format_readable_time_delta_inf/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01679_format_readable_time_delta_inf/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01679_format_readable_time_delta_inf/query.sql b/parser/testdata/01679_format_readable_time_delta_inf/query.sql new file mode 100644 index 000000000..ac92dec2b --- /dev/null +++ b/parser/testdata/01679_format_readable_time_delta_inf/query.sql @@ -0,0 +1 @@ +SELECT formatReadableTimeDelta(arrayJoin([inf, -inf, nan])); diff --git a/parser/testdata/01680_date_time_add_ubsan/ast.json b/parser/testdata/01680_date_time_add_ubsan/ast.json new file mode 100644 index 000000000..7edc7d5f2 --- /dev/null +++ b/parser/testdata/01680_date_time_add_ubsan/ast.json @@ -0,0 +1,151 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier result" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toStartOfFifteenMinutes (alias result) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function plus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDateTime (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toStartOfFifteenMinutes (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function plus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDateTime (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Float64_1000.0001220703125" + }, + { + "explain": " Function multiply (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_65536" + }, + { + "explain": " Function multiply (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_9223372036854775807" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_1048576" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier result" + }, + { + "explain": " Identifier Null" + } + ], + + "rows": 43, + + "statistics": + { + "elapsed": 0.001449157, + "rows_read": 43, + "bytes_read": 2065 + } +} diff --git a/parser/testdata/01680_date_time_add_ubsan/metadata.json b/parser/testdata/01680_date_time_add_ubsan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01680_date_time_add_ubsan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01680_date_time_add_ubsan/query.sql b/parser/testdata/01680_date_time_add_ubsan/query.sql new file mode 100644 index 000000000..6570621e4 --- /dev/null +++ b/parser/testdata/01680_date_time_add_ubsan/query.sql @@ -0,0 +1,3 @@ +SELECT DISTINCT result FROM (SELECT toStartOfFifteenMinutes(toDateTime(toStartOfFifteenMinutes(toDateTime(1000.0001220703125) + (number * 65536))) + (number * 9223372036854775807)) AS result FROM system.numbers LIMIT 1048576) ORDER BY result DESC NULLS FIRST FORMAT Null; -- { serverError DECIMAL_OVERFLOW } +SELECT DISTINCT result FROM (SELECT toStartOfFifteenMinutes(toDateTime(toStartOfFifteenMinutes(toDateTime(1000.0001220703125) + (number * 65536))) + toInt64(number * 9223372036854775807)) AS result FROM system.numbers LIMIT 1048576) ORDER BY result DESC NULLS FIRST FORMAT Null; +SELECT round(round(round(round(round(100)), round(round(round(round(NULL), round(65535)), toTypeName(now() + 9223372036854775807) LIKE 'DateTime%DateTime%DateTime%DateTime%', round(-2)), 255), round(NULL)))); diff --git a/parser/testdata/01680_predicate_pushdown_union_distinct_subquery/ast.json b/parser/testdata/01680_predicate_pushdown_union_distinct_subquery/ast.json new file mode 100644 index 000000000..5244d19bf --- /dev/null +++ b/parser/testdata/01680_predicate_pushdown_union_distinct_subquery/ast.json @@ -0,0 +1,88 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function count (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2000 (alias d_year)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2000 (alias d_year)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier d_year" + }, + { + "explain": " Literal UInt64_2002" + } + ], + + "rows": 22, + + "statistics": + { + "elapsed": 0.001263293, + "rows_read": 22, + "bytes_read": 894 + } +} diff --git a/parser/testdata/01680_predicate_pushdown_union_distinct_subquery/metadata.json b/parser/testdata/01680_predicate_pushdown_union_distinct_subquery/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01680_predicate_pushdown_union_distinct_subquery/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01680_predicate_pushdown_union_distinct_subquery/query.sql b/parser/testdata/01680_predicate_pushdown_union_distinct_subquery/query.sql new file mode 100644 index 000000000..181e7109c --- /dev/null +++ b/parser/testdata/01680_predicate_pushdown_union_distinct_subquery/query.sql @@ -0,0 +1 @@ +SELECT count() FROM (SELECT 2000 AS d_year UNION DISTINCT SELECT 2000 AS d_year) WHERE d_year = 2002 diff --git a/parser/testdata/01681_arg_min_max_if_fix/ast.json b/parser/testdata/01681_arg_min_max_if_fix/ast.json new file mode 100644 index 000000000..5593b7a6d --- /dev/null +++ b/parser/testdata/01681_arg_min_max_if_fix/ast.json @@ -0,0 +1,178 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function bitAnd (alias k) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function toUInt64 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function minus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function pow (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_257" + }, + { + "explain": " Literal UInt64_20" + }, + { + "explain": " Literal UInt64_1048576" + }, + { + "explain": " Function argMaxIf (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Identifier k" + }, + { + "explain": " Function if (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_255" + }, + { + "explain": " Literal UInt64_256" + }, + { + "explain": " Function toInt256 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_65535" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function greater (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_42" + }, + { + "explain": " Function uniq (alias u) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toInt256 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Int64_-2" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Identifier k" + }, + { + "explain": " Identifier Null" + } + ], + + "rows": 52, + + "statistics": + { + "elapsed": 0.001655831, + "rows_read": 52, + "bytes_read": 2033 + } +} diff --git a/parser/testdata/01681_arg_min_max_if_fix/metadata.json b/parser/testdata/01681_arg_min_max_if_fix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01681_arg_min_max_if_fix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01681_arg_min_max_if_fix/query.sql b/parser/testdata/01681_arg_min_max_if_fix/query.sql new file mode 100644 index 000000000..5edd52e08 --- /dev/null +++ b/parser/testdata/01681_arg_min_max_if_fix/query.sql @@ -0,0 +1 @@ +SELECT bitAnd(number, toUInt64(pow(257, 20) - 1048576)) AS k, argMaxIf(k, if((number % 255) = 256, toInt256(65535), number), number > 42), uniq(number) AS u FROM numbers(2) GROUP BY toInt256(-2, NULL), k FORMAT Null diff --git a/parser/testdata/01681_bloom_filter_nullable_column/ast.json b/parser/testdata/01681_bloom_filter_nullable_column/ast.json new file mode 100644 index 000000000..7bb2afdd2 --- /dev/null +++ b/parser/testdata/01681_bloom_filter_nullable_column/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery bloom_filter_nullable_index (children 1)" + }, + { + "explain": " Identifier bloom_filter_nullable_index" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001852941, + "rows_read": 2, + "bytes_read": 106 + } +} diff --git a/parser/testdata/01681_bloom_filter_nullable_column/metadata.json b/parser/testdata/01681_bloom_filter_nullable_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01681_bloom_filter_nullable_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01681_bloom_filter_nullable_column/query.sql b/parser/testdata/01681_bloom_filter_nullable_column/query.sql new file mode 100644 index 000000000..3b9af3d27 --- /dev/null +++ b/parser/testdata/01681_bloom_filter_nullable_column/query.sql @@ -0,0 +1,51 @@ +DROP TABLE IF EXISTS bloom_filter_nullable_index; +CREATE TABLE bloom_filter_nullable_index + ( + order_key UInt64, + str Nullable(String), + + INDEX idx (str) TYPE bloom_filter GRANULARITY 1 + ) + ENGINE = MergeTree() + ORDER BY order_key SETTINGS index_granularity = 6, index_granularity_bytes = '10Mi'; + +INSERT INTO bloom_filter_nullable_index VALUES (1, 'test'); +INSERT INTO bloom_filter_nullable_index VALUES (2, 'test2'); + +SELECT 'NullableTuple with transform_null_in=0'; +SELECT * FROM bloom_filter_nullable_index WHERE str IN + (SELECT '1048576', str FROM bloom_filter_nullable_index) SETTINGS transform_null_in = 0; +SELECT * FROM bloom_filter_nullable_index WHERE str IN + (SELECT '1048576', str FROM bloom_filter_nullable_index) SETTINGS transform_null_in = 0; + +SELECT 'NullableTuple with transform_null_in=1'; + +SELECT * FROM bloom_filter_nullable_index WHERE str IN + (SELECT '1048576', str FROM bloom_filter_nullable_index) SETTINGS transform_null_in = 1; -- { serverError NUMBER_OF_COLUMNS_DOESNT_MATCH } + +SELECT * FROM bloom_filter_nullable_index WHERE str IN + (SELECT '1048576', str FROM bloom_filter_nullable_index) SETTINGS transform_null_in = 1; -- { serverError NUMBER_OF_COLUMNS_DOESNT_MATCH } + + +SELECT 'NullableColumnFromCast with transform_null_in=0'; +SELECT * FROM bloom_filter_nullable_index WHERE str IN + (SELECT cast('test', 'Nullable(String)')) SETTINGS transform_null_in = 0; + +SELECT 'NullableColumnFromCast with transform_null_in=1'; +SELECT * FROM bloom_filter_nullable_index WHERE str IN + (SELECT cast('test', 'Nullable(String)')) SETTINGS transform_null_in = 1; + +DROP TABLE IF EXISTS nullable_string_value; +CREATE TABLE nullable_string_value (value Nullable(String)) ENGINE=TinyLog; +INSERT INTO nullable_string_value VALUES ('test'); + +SELECT 'NullableColumnFromTable with transform_null_in=0'; +SELECT * FROM bloom_filter_nullable_index WHERE str IN + (SELECT value FROM nullable_string_value) SETTINGS transform_null_in = 0; + +SELECT 'NullableColumnFromTable with transform_null_in=1'; +SELECT * FROM bloom_filter_nullable_index WHERE str IN + (SELECT value FROM nullable_string_value) SETTINGS transform_null_in = 1; + +DROP TABLE nullable_string_value; +DROP TABLE bloom_filter_nullable_index; diff --git a/parser/testdata/01681_cache_dictionary_simple_key/ast.json b/parser/testdata/01681_cache_dictionary_simple_key/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01681_cache_dictionary_simple_key/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01681_cache_dictionary_simple_key/metadata.json b/parser/testdata/01681_cache_dictionary_simple_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01681_cache_dictionary_simple_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01681_cache_dictionary_simple_key/query.sql b/parser/testdata/01681_cache_dictionary_simple_key/query.sql new file mode 100644 index 000000000..9ba8a6de7 --- /dev/null +++ b/parser/testdata/01681_cache_dictionary_simple_key/query.sql @@ -0,0 +1,127 @@ +-- Tags: no-parallel + +DROP DATABASE IF EXISTS 01681_database_for_cache_dictionary; +CREATE DATABASE 01681_database_for_cache_dictionary; + +CREATE TABLE 01681_database_for_cache_dictionary.simple_key_simple_attributes_source_table +( + id UInt64, + value_first String, + value_second String +) +ENGINE = TinyLog; + +INSERT INTO 01681_database_for_cache_dictionary.simple_key_simple_attributes_source_table VALUES(0, 'value_0', 'value_second_0'); +INSERT INTO 01681_database_for_cache_dictionary.simple_key_simple_attributes_source_table VALUES(1, 'value_1', 'value_second_1'); +INSERT INTO 01681_database_for_cache_dictionary.simple_key_simple_attributes_source_table VALUES(2, 'value_2', 'value_second_2'); + +CREATE DICTIONARY 01681_database_for_cache_dictionary.cache_dictionary_simple_key_simple_attributes +( + id UInt64, + value_first String DEFAULT 'value_first_default', + value_second String DEFAULT 'value_second_default' +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'simple_key_simple_attributes_source_table')) +LIFETIME(MIN 1 MAX 1000) +LAYOUT(CACHE(SIZE_IN_CELLS 10)) +SETTINGS(dictionary_use_async_executor=1, max_threads=8) +; + +SELECT 'Dictionary cache_dictionary_simple_key_simple_attributes'; +SELECT 'dictGet existing value'; +SELECT dictGet('01681_database_for_cache_dictionary.cache_dictionary_simple_key_simple_attributes', 'value_first', number) as value_first, + dictGet('01681_database_for_cache_dictionary.cache_dictionary_simple_key_simple_attributes', 'value_second', number) as value_second FROM system.numbers LIMIT 3; +SELECT 'dictGet with non existing value'; +SELECT dictGet('01681_database_for_cache_dictionary.cache_dictionary_simple_key_simple_attributes', 'value_first', number) as value_first, + dictGet('01681_database_for_cache_dictionary.cache_dictionary_simple_key_simple_attributes', 'value_second', number) as value_second FROM system.numbers LIMIT 4; +SELECT 'dictGetOrDefault existing value'; +SELECT dictGetOrDefault('01681_database_for_cache_dictionary.cache_dictionary_simple_key_simple_attributes', 'value_first', number, toString('default')) as value_first, + dictGetOrDefault('01681_database_for_cache_dictionary.cache_dictionary_simple_key_simple_attributes', 'value_second', number, toString('default')) as value_second FROM system.numbers LIMIT 3; +SELECT 'dictGetOrDefault non existing value'; +SELECT dictGetOrDefault('01681_database_for_cache_dictionary.cache_dictionary_simple_key_simple_attributes', 'value_first', number, toString('default')) as value_first, + dictGetOrDefault('01681_database_for_cache_dictionary.cache_dictionary_simple_key_simple_attributes', 'value_second', number, toString('default')) as value_second FROM system.numbers LIMIT 4; +SELECT 'dictHas'; +SELECT dictHas('01681_database_for_cache_dictionary.cache_dictionary_simple_key_simple_attributes', number) FROM system.numbers LIMIT 4; +SELECT 'select all values as input stream'; +SELECT * FROM 01681_database_for_cache_dictionary.cache_dictionary_simple_key_simple_attributes ORDER BY id; + +DROP DICTIONARY 01681_database_for_cache_dictionary.cache_dictionary_simple_key_simple_attributes; +DROP TABLE 01681_database_for_cache_dictionary.simple_key_simple_attributes_source_table; + +CREATE TABLE 01681_database_for_cache_dictionary.simple_key_complex_attributes_source_table +( + id UInt64, + value_first String, + value_second Nullable(String) +) +ENGINE = TinyLog; + +INSERT INTO 01681_database_for_cache_dictionary.simple_key_complex_attributes_source_table VALUES(0, 'value_0', 'value_second_0'); +INSERT INTO 01681_database_for_cache_dictionary.simple_key_complex_attributes_source_table VALUES(1, 'value_1', NULL); +INSERT INTO 01681_database_for_cache_dictionary.simple_key_complex_attributes_source_table VALUES(2, 'value_2', 'value_second_2'); + +CREATE DICTIONARY 01681_database_for_cache_dictionary.cache_dictionary_simple_key_complex_attributes +( + id UInt64, + value_first String DEFAULT 'value_first_default', + value_second Nullable(String) DEFAULT 'value_second_default' +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'simple_key_complex_attributes_source_table')) +LIFETIME(MIN 1 MAX 1000) +LAYOUT(CACHE(SIZE_IN_CELLS 10)); + +SELECT 'Dictionary cache_dictionary_simple_key_complex_attributes'; +SELECT 'dictGet existing value'; +SELECT dictGet('01681_database_for_cache_dictionary.cache_dictionary_simple_key_complex_attributes', 'value_first', number) as value_first, + dictGet('01681_database_for_cache_dictionary.cache_dictionary_simple_key_complex_attributes', 'value_second', number) as value_second FROM system.numbers LIMIT 3; +SELECT 'dictGet with non existing value'; +SELECT dictGet('01681_database_for_cache_dictionary.cache_dictionary_simple_key_complex_attributes', 'value_first', number) as value_first, + dictGet('01681_database_for_cache_dictionary.cache_dictionary_simple_key_complex_attributes', 'value_second', number) as value_second FROM system.numbers LIMIT 4; +SELECT 'dictGetOrDefault existing value'; +SELECT dictGetOrDefault('01681_database_for_cache_dictionary.cache_dictionary_simple_key_complex_attributes', 'value_first', number, toString('default')) as value_first, + dictGetOrDefault('01681_database_for_cache_dictionary.cache_dictionary_simple_key_complex_attributes', 'value_second', number, toString('default')) as value_second FROM system.numbers LIMIT 3; +SELECT 'dictGetOrDefault non existing value'; +SELECT dictGetOrDefault('01681_database_for_cache_dictionary.cache_dictionary_simple_key_complex_attributes', 'value_first', number, toString('default')) as value_first, + dictGetOrDefault('01681_database_for_cache_dictionary.cache_dictionary_simple_key_complex_attributes', 'value_second', number, toString('default')) as value_second FROM system.numbers LIMIT 4; +SELECT 'dictHas'; +SELECT dictHas('01681_database_for_cache_dictionary.cache_dictionary_simple_key_complex_attributes', number) FROM system.numbers LIMIT 4; +SELECT 'select all values as input stream'; +SELECT * FROM 01681_database_for_cache_dictionary.cache_dictionary_simple_key_complex_attributes ORDER BY id; + +DROP DICTIONARY 01681_database_for_cache_dictionary.cache_dictionary_simple_key_complex_attributes; +DROP TABLE 01681_database_for_cache_dictionary.simple_key_complex_attributes_source_table; + +CREATE TABLE 01681_database_for_cache_dictionary.simple_key_hierarchy_table +( + id UInt64, + parent_id UInt64 +) ENGINE = TinyLog(); + +INSERT INTO 01681_database_for_cache_dictionary.simple_key_hierarchy_table VALUES (1, 0); +INSERT INTO 01681_database_for_cache_dictionary.simple_key_hierarchy_table VALUES (2, 1); +INSERT INTO 01681_database_for_cache_dictionary.simple_key_hierarchy_table VALUES (3, 1); +INSERT INTO 01681_database_for_cache_dictionary.simple_key_hierarchy_table VALUES (4, 2); + +CREATE DICTIONARY 01681_database_for_cache_dictionary.cache_dictionary_simple_key_hierarchy +( + id UInt64, + parent_id UInt64 HIERARCHICAL +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'simple_key_hierarchy_table')) +LIFETIME(MIN 1 MAX 1000) +LAYOUT(CACHE(SIZE_IN_CELLS 10)); + +SELECT 'Dictionary cache_dictionary_simple_key_hierarchy'; +SELECT 'dictGet'; +SELECT dictGet('01681_database_for_cache_dictionary.cache_dictionary_simple_key_hierarchy', 'parent_id', number) FROM system.numbers LIMIT 5; +SELECT 'dictGetHierarchy'; +SELECT dictGetHierarchy('01681_database_for_cache_dictionary.cache_dictionary_simple_key_hierarchy', toUInt64(1)); +SELECT dictGetHierarchy('01681_database_for_cache_dictionary.cache_dictionary_simple_key_hierarchy', toUInt64(4)); + +DROP DICTIONARY 01681_database_for_cache_dictionary.cache_dictionary_simple_key_hierarchy; +DROP TABLE 01681_database_for_cache_dictionary.simple_key_hierarchy_table; + +DROP DATABASE 01681_database_for_cache_dictionary; diff --git a/parser/testdata/01682_cache_dictionary_complex_key/ast.json b/parser/testdata/01682_cache_dictionary_complex_key/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01682_cache_dictionary_complex_key/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01682_cache_dictionary_complex_key/metadata.json b/parser/testdata/01682_cache_dictionary_complex_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01682_cache_dictionary_complex_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01682_cache_dictionary_complex_key/query.sql b/parser/testdata/01682_cache_dictionary_complex_key/query.sql new file mode 100644 index 000000000..dbbbf8ad4 --- /dev/null +++ b/parser/testdata/01682_cache_dictionary_complex_key/query.sql @@ -0,0 +1,99 @@ +-- Tags: no-parallel + +DROP DATABASE IF EXISTS 01682_database_for_cache_dictionary; +CREATE DATABASE 01682_database_for_cache_dictionary; + +CREATE TABLE 01682_database_for_cache_dictionary.complex_key_simple_attributes_source_table +( + id UInt64, + id_key String, + value_first String, + value_second String +) +ENGINE = TinyLog; + +INSERT INTO 01682_database_for_cache_dictionary.complex_key_simple_attributes_source_table VALUES(0, 'id_key_0', 'value_0', 'value_second_0'); +INSERT INTO 01682_database_for_cache_dictionary.complex_key_simple_attributes_source_table VALUES(1, 'id_key_1', 'value_1', 'value_second_1'); +INSERT INTO 01682_database_for_cache_dictionary.complex_key_simple_attributes_source_table VALUES(2, 'id_key_2', 'value_2', 'value_second_2'); + +CREATE DICTIONARY 01682_database_for_cache_dictionary.cache_dictionary_complex_key_simple_attributes +( + id UInt64, + id_key String, + value_first String DEFAULT 'value_first_default', + value_second String DEFAULT 'value_second_default' +) +PRIMARY KEY id, id_key +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'complex_key_simple_attributes_source_table' DB '01682_database_for_cache_dictionary')) +LIFETIME(MIN 1 MAX 1000) +LAYOUT(COMPLEX_KEY_CACHE(SIZE_IN_CELLS 10)); + +SELECT 'Dictionary cache_dictionary_complex_key_simple_attributes'; +SELECT 'dictGet existing value'; +SELECT dictGet('01682_database_for_cache_dictionary.cache_dictionary_complex_key_simple_attributes', 'value_first', (number, concat('id_key_', toString(number)))) as value_first, + dictGet('01682_database_for_cache_dictionary.cache_dictionary_complex_key_simple_attributes', 'value_second', (number, concat('id_key_', toString(number)))) as value_second FROM system.numbers LIMIT 3; +SELECT 'dictGet with non existing value'; +SELECT dictGet('01682_database_for_cache_dictionary.cache_dictionary_complex_key_simple_attributes', 'value_first', (number, concat('id_key_', toString(number)))) as value_first, + dictGet('01682_database_for_cache_dictionary.cache_dictionary_complex_key_simple_attributes', 'value_second', (number, concat('id_key_', toString(number)))) as value_second FROM system.numbers LIMIT 4; +SELECT 'dictGetOrDefault existing value'; +SELECT dictGetOrDefault('01682_database_for_cache_dictionary.cache_dictionary_complex_key_simple_attributes', 'value_first', (number, concat('id_key_', toString(number))), toString('default')) as value_first, + dictGetOrDefault('01682_database_for_cache_dictionary.cache_dictionary_complex_key_simple_attributes', 'value_second', (number, concat('id_key_', toString(number))), toString('default')) as value_second FROM system.numbers LIMIT 3; +SELECT 'dictGetOrDefault non existing value'; +SELECT dictGetOrDefault('01682_database_for_cache_dictionary.cache_dictionary_complex_key_simple_attributes', 'value_first', (number, concat('id_key_', toString(number))), toString('default')) as value_first, + dictGetOrDefault('01682_database_for_cache_dictionary.cache_dictionary_complex_key_simple_attributes', 'value_second', (number, concat('id_key_', toString(number))), toString('default')) as value_second FROM system.numbers LIMIT 4; +SELECT 'dictHas'; +SELECT dictHas('01682_database_for_cache_dictionary.cache_dictionary_complex_key_simple_attributes', (number, concat('id_key_', toString(number)))) FROM system.numbers LIMIT 4; +SELECT 'select all values as input stream'; +SELECT * FROM 01682_database_for_cache_dictionary.cache_dictionary_complex_key_simple_attributes ORDER BY id; + +DROP DICTIONARY 01682_database_for_cache_dictionary.cache_dictionary_complex_key_simple_attributes; +DROP TABLE 01682_database_for_cache_dictionary.complex_key_simple_attributes_source_table; + +CREATE TABLE 01682_database_for_cache_dictionary.complex_key_complex_attributes_source_table +( + id UInt64, + id_key String, + value_first String, + value_second Nullable(String) +) +ENGINE = TinyLog; + +INSERT INTO 01682_database_for_cache_dictionary.complex_key_complex_attributes_source_table VALUES(0, 'id_key_0', 'value_0', 'value_second_0'); +INSERT INTO 01682_database_for_cache_dictionary.complex_key_complex_attributes_source_table VALUES(1, 'id_key_1', 'value_1', NULL); +INSERT INTO 01682_database_for_cache_dictionary.complex_key_complex_attributes_source_table VALUES(2, 'id_key_2', 'value_2', 'value_second_2'); + +CREATE DICTIONARY 01682_database_for_cache_dictionary.cache_dictionary_complex_key_complex_attributes +( + id UInt64, + id_key String, + + value_first String DEFAULT 'value_first_default', + value_second Nullable(String) DEFAULT 'value_second_default' +) +PRIMARY KEY id, id_key +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'complex_key_complex_attributes_source_table' DB '01682_database_for_cache_dictionary')) +LIFETIME(MIN 1 MAX 1000) +LAYOUT(COMPLEX_KEY_CACHE(SIZE_IN_CELLS 10)); + +SELECT 'Dictionary cache_dictionary_complex_key_complex_attributes'; +SELECT 'dictGet existing value'; +SELECT dictGet('01682_database_for_cache_dictionary.cache_dictionary_complex_key_complex_attributes', 'value_first', (number, concat('id_key_', toString(number)))) as value_first, + dictGet('01682_database_for_cache_dictionary.cache_dictionary_complex_key_complex_attributes', 'value_second', (number, concat('id_key_', toString(number)))) as value_second FROM system.numbers LIMIT 3; +SELECT 'dictGet with non existing value'; +SELECT dictGet('01682_database_for_cache_dictionary.cache_dictionary_complex_key_complex_attributes', 'value_first', (number, concat('id_key_', toString(number)))) as value_first, + dictGet('01682_database_for_cache_dictionary.cache_dictionary_complex_key_complex_attributes', 'value_second', (number, concat('id_key_', toString(number)))) as value_second FROM system.numbers LIMIT 4; +SELECT 'dictGetOrDefault existing value'; +SELECT dictGetOrDefault('01682_database_for_cache_dictionary.cache_dictionary_complex_key_complex_attributes', 'value_first', (number, concat('id_key_', toString(number))), toString('default')) as value_first, + dictGetOrDefault('01682_database_for_cache_dictionary.cache_dictionary_complex_key_complex_attributes', 'value_second', (number, concat('id_key_', toString(number))), toString('default')) as value_second FROM system.numbers LIMIT 3; +SELECT 'dictGetOrDefault non existing value'; +SELECT dictGetOrDefault('01682_database_for_cache_dictionary.cache_dictionary_complex_key_complex_attributes', 'value_first', (number, concat('id_key_', toString(number))), toString('default')) as value_first, + dictGetOrDefault('01682_database_for_cache_dictionary.cache_dictionary_complex_key_complex_attributes', 'value_second', (number, concat('id_key_', toString(number))), toString('default')) as value_second FROM system.numbers LIMIT 4; +SELECT 'dictHas'; +SELECT dictHas('01682_database_for_cache_dictionary.cache_dictionary_complex_key_complex_attributes', (number, concat('id_key_', toString(number)))) FROM system.numbers LIMIT 4; +SELECT 'select all values as input stream'; +SELECT * FROM 01682_database_for_cache_dictionary.cache_dictionary_complex_key_complex_attributes ORDER BY id; + +DROP DICTIONARY 01682_database_for_cache_dictionary.cache_dictionary_complex_key_complex_attributes; +DROP TABLE 01682_database_for_cache_dictionary.complex_key_complex_attributes_source_table; + +DROP DATABASE 01682_database_for_cache_dictionary; diff --git a/parser/testdata/01682_gather_utils_ubsan/ast.json b/parser/testdata/01682_gather_utils_ubsan/ast.json new file mode 100644 index 000000000..9c5b1b914 --- /dev/null +++ b/parser/testdata/01682_gather_utils_ubsan/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayResize (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_2, UInt64_3]" + }, + { + "explain": " Literal Int64_-9223372036854775808" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001396909, + "rows_read": 8, + "bytes_read": 340 + } +} diff --git a/parser/testdata/01682_gather_utils_ubsan/metadata.json b/parser/testdata/01682_gather_utils_ubsan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01682_gather_utils_ubsan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01682_gather_utils_ubsan/query.sql b/parser/testdata/01682_gather_utils_ubsan/query.sql new file mode 100644 index 000000000..d1a0e5dcc --- /dev/null +++ b/parser/testdata/01682_gather_utils_ubsan/query.sql @@ -0,0 +1 @@ +SELECT arrayResize([1, 2, 3], -9223372036854775808); -- { serverError TOO_LARGE_ARRAY_SIZE } diff --git a/parser/testdata/01683_codec_encrypted/ast.json b/parser/testdata/01683_codec_encrypted/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01683_codec_encrypted/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01683_codec_encrypted/metadata.json b/parser/testdata/01683_codec_encrypted/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01683_codec_encrypted/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01683_codec_encrypted/query.sql b/parser/testdata/01683_codec_encrypted/query.sql new file mode 100644 index 000000000..d3ba1bb5e --- /dev/null +++ b/parser/testdata/01683_codec_encrypted/query.sql @@ -0,0 +1,17 @@ +-- Tags: no-fasttest +-- Tag no-fasttest: Depends on OpenSSL + +DROP TABLE IF EXISTS encryption_test; +CREATE TABLE encryption_test (i Int, s String Codec(AES_128_GCM_SIV)) ENGINE = MergeTree ORDER BY i; + +INSERT INTO encryption_test VALUES (1, 'Some plaintext'); +SELECT * FROM encryption_test; + +DROP TABLE encryption_test; + +CREATE TABLE encryption_test (i Int, s String Codec(AES_256_GCM_SIV)) ENGINE = MergeTree ORDER BY i; + +INSERT INTO encryption_test VALUES (1, 'Some plaintext'); +SELECT * FROM encryption_test; + +DROP TABLE encryption_test; diff --git a/parser/testdata/01683_dist_INSERT_block_structure_mismatch/ast.json b/parser/testdata/01683_dist_INSERT_block_structure_mismatch/ast.json new file mode 100644 index 000000000..03bb73036 --- /dev/null +++ b/parser/testdata/01683_dist_INSERT_block_structure_mismatch/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tmp_01683 (children 1)" + }, + { + "explain": " Identifier tmp_01683" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001624726, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/01683_dist_INSERT_block_structure_mismatch/metadata.json b/parser/testdata/01683_dist_INSERT_block_structure_mismatch/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01683_dist_INSERT_block_structure_mismatch/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01683_dist_INSERT_block_structure_mismatch/query.sql b/parser/testdata/01683_dist_INSERT_block_structure_mismatch/query.sql new file mode 100644 index 000000000..9c584bda1 --- /dev/null +++ b/parser/testdata/01683_dist_INSERT_block_structure_mismatch/query.sql @@ -0,0 +1,23 @@ +DROP TABLE IF EXISTS tmp_01683; +DROP TABLE IF EXISTS dist_01683; + +SET prefer_localhost_replica=0; +-- To suppress "Structure does not match (remote: n Int8 Int8(size = 0), local: n UInt64 UInt64(size = 1)), implicit conversion will be done." +SET send_logs_level='error'; + +CREATE TABLE tmp_01683 (n Int8) ENGINE=Memory; +CREATE TABLE dist_01683 (n UInt64) Engine=Distributed(test_cluster_two_shards, currentDatabase(), tmp_01683, n); + +SET distributed_foreground_insert=1; +INSERT INTO dist_01683 VALUES (1),(2); + +SET distributed_foreground_insert=0; +INSERT INTO dist_01683 VALUES (1),(2); +SYSTEM FLUSH DISTRIBUTED dist_01683; + +-- TODO: cover distributed_background_insert_batch=1 + +SELECT * FROM tmp_01683 ORDER BY n; + +DROP TABLE tmp_01683; +DROP TABLE dist_01683; diff --git a/parser/testdata/01683_flat_dictionary/ast.json b/parser/testdata/01683_flat_dictionary/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01683_flat_dictionary/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01683_flat_dictionary/metadata.json b/parser/testdata/01683_flat_dictionary/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01683_flat_dictionary/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01683_flat_dictionary/query.sql b/parser/testdata/01683_flat_dictionary/query.sql new file mode 100644 index 000000000..f65695219 --- /dev/null +++ b/parser/testdata/01683_flat_dictionary/query.sql @@ -0,0 +1,121 @@ +-- Tags: no-parallel + +DROP DATABASE IF EXISTS 01681_database_for_flat_dictionary; +CREATE DATABASE 01681_database_for_flat_dictionary; + +CREATE TABLE 01681_database_for_flat_dictionary.simple_key_simple_attributes_source_table +( + id UInt64, + value_first String, + value_second String +) +ENGINE = TinyLog; + +INSERT INTO 01681_database_for_flat_dictionary.simple_key_simple_attributes_source_table VALUES(0, 'value_0', 'value_second_0'); +INSERT INTO 01681_database_for_flat_dictionary.simple_key_simple_attributes_source_table VALUES(1, 'value_1', 'value_second_1'); +INSERT INTO 01681_database_for_flat_dictionary.simple_key_simple_attributes_source_table VALUES(2, 'value_2', 'value_second_2'); + +CREATE DICTIONARY 01681_database_for_flat_dictionary.flat_dictionary_simple_key_simple_attributes +( + id UInt64, + value_first String DEFAULT 'value_first_default', + value_second String DEFAULT 'value_second_default' +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'simple_key_simple_attributes_source_table')) +LIFETIME(MIN 1 MAX 1000) +LAYOUT(FLAT()); + +SELECT 'Dictionary flat_dictionary_simple_key_simple_attributes'; +SELECT 'dictGet existing value'; +SELECT dictGet('01681_database_for_flat_dictionary.flat_dictionary_simple_key_simple_attributes', 'value_first', number) as value_first, + dictGet('01681_database_for_flat_dictionary.flat_dictionary_simple_key_simple_attributes', 'value_second', number) as value_second FROM system.numbers LIMIT 3; +SELECT 'dictGet with non existing value'; +SELECT dictGet('01681_database_for_flat_dictionary.flat_dictionary_simple_key_simple_attributes', 'value_first', number) as value_first, + dictGet('01681_database_for_flat_dictionary.flat_dictionary_simple_key_simple_attributes', 'value_second', number) as value_second FROM system.numbers LIMIT 4; +SELECT 'dictGetOrDefault existing value'; +SELECT dictGetOrDefault('01681_database_for_flat_dictionary.flat_dictionary_simple_key_simple_attributes', 'value_first', number, toString('default')) as value_first, + dictGetOrDefault('01681_database_for_flat_dictionary.flat_dictionary_simple_key_simple_attributes', 'value_second', number, toString('default')) as value_second FROM system.numbers LIMIT 3; +SELECT 'dictGetOrDefault non existing value'; +SELECT dictGetOrDefault('01681_database_for_flat_dictionary.flat_dictionary_simple_key_simple_attributes', 'value_first', number, toString('default')) as value_first, + dictGetOrDefault('01681_database_for_flat_dictionary.flat_dictionary_simple_key_simple_attributes', 'value_second', number, toString('default')) as value_second FROM system.numbers LIMIT 4; +SELECT 'dictHas'; +SELECT dictHas('01681_database_for_flat_dictionary.flat_dictionary_simple_key_simple_attributes', number) FROM system.numbers LIMIT 4; + +DROP DICTIONARY 01681_database_for_flat_dictionary.flat_dictionary_simple_key_simple_attributes; +DROP TABLE 01681_database_for_flat_dictionary.simple_key_simple_attributes_source_table; + +CREATE TABLE 01681_database_for_flat_dictionary.simple_key_complex_attributes_source_table +( + id UInt64, + value_first String, + value_second Nullable(String) +) +ENGINE = TinyLog; + +INSERT INTO 01681_database_for_flat_dictionary.simple_key_complex_attributes_source_table VALUES(0, 'value_0', 'value_second_0'); +INSERT INTO 01681_database_for_flat_dictionary.simple_key_complex_attributes_source_table VALUES(1, 'value_1', NULL); +INSERT INTO 01681_database_for_flat_dictionary.simple_key_complex_attributes_source_table VALUES(2, 'value_2', 'value_second_2'); + +CREATE DICTIONARY 01681_database_for_flat_dictionary.flat_dictionary_simple_key_complex_attributes +( + id UInt64, + value_first String DEFAULT 'value_first_default', + value_second Nullable(String) DEFAULT 'value_second_default' +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'simple_key_complex_attributes_source_table')) +LIFETIME(MIN 1 MAX 1000) +LAYOUT(FLAT()); + +SELECT 'Dictionary flat_dictionary_simple_key_complex_attributes'; +SELECT 'dictGet existing value'; +SELECT dictGet('01681_database_for_flat_dictionary.flat_dictionary_simple_key_complex_attributes', 'value_first', number) as value_first, + dictGet('01681_database_for_flat_dictionary.flat_dictionary_simple_key_complex_attributes', 'value_second', number) as value_second FROM system.numbers LIMIT 3; +SELECT 'dictGet with non existing value'; +SELECT dictGet('01681_database_for_flat_dictionary.flat_dictionary_simple_key_complex_attributes', 'value_first', number) as value_first, + dictGet('01681_database_for_flat_dictionary.flat_dictionary_simple_key_complex_attributes', 'value_second', number) as value_second FROM system.numbers LIMIT 4; +SELECT 'dictGetOrDefault existing value'; +SELECT dictGetOrDefault('01681_database_for_flat_dictionary.flat_dictionary_simple_key_complex_attributes', 'value_first', number, toString('default')) as value_first, + dictGetOrDefault('01681_database_for_flat_dictionary.flat_dictionary_simple_key_complex_attributes', 'value_second', number, toString('default')) as value_second FROM system.numbers LIMIT 3; +SELECT 'dictGetOrDefault non existing value'; +SELECT dictGetOrDefault('01681_database_for_flat_dictionary.flat_dictionary_simple_key_complex_attributes', 'value_first', number, toString('default')) as value_first, + dictGetOrDefault('01681_database_for_flat_dictionary.flat_dictionary_simple_key_complex_attributes', 'value_second', number, toString('default')) as value_second FROM system.numbers LIMIT 4; +SELECT 'dictHas'; +SELECT dictHas('01681_database_for_flat_dictionary.flat_dictionary_simple_key_complex_attributes', number) FROM system.numbers LIMIT 4; + +DROP DICTIONARY 01681_database_for_flat_dictionary.flat_dictionary_simple_key_complex_attributes; +DROP TABLE 01681_database_for_flat_dictionary.simple_key_complex_attributes_source_table; + +CREATE TABLE 01681_database_for_flat_dictionary.simple_key_hierarchy_table +( + id UInt64, + parent_id UInt64 +) ENGINE = TinyLog(); + +INSERT INTO 01681_database_for_flat_dictionary.simple_key_hierarchy_table VALUES (1, 0); +INSERT INTO 01681_database_for_flat_dictionary.simple_key_hierarchy_table VALUES (2, 1); +INSERT INTO 01681_database_for_flat_dictionary.simple_key_hierarchy_table VALUES (3, 1); +INSERT INTO 01681_database_for_flat_dictionary.simple_key_hierarchy_table VALUES (4, 2); + +CREATE DICTIONARY 01681_database_for_flat_dictionary.flat_dictionary_simple_key_hierarchy +( + id UInt64, + parent_id UInt64 HIERARCHICAL +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'simple_key_hierarchy_table')) +LIFETIME(MIN 1 MAX 1000) +LAYOUT(FLAT()); + +SELECT 'Dictionary flat_dictionary_simple_key_hierarchy'; +SELECT 'dictGet'; +SELECT dictGet('01681_database_for_flat_dictionary.flat_dictionary_simple_key_hierarchy', 'parent_id', number) FROM system.numbers LIMIT 5; +SELECT 'dictGetHierarchy'; +SELECT dictGetHierarchy('01681_database_for_flat_dictionary.flat_dictionary_simple_key_hierarchy', toUInt64(1)); +SELECT dictGetHierarchy('01681_database_for_flat_dictionary.flat_dictionary_simple_key_hierarchy', toUInt64(4)); + +DROP DICTIONARY 01681_database_for_flat_dictionary.flat_dictionary_simple_key_hierarchy; +DROP TABLE 01681_database_for_flat_dictionary.simple_key_hierarchy_table; + +DROP DATABASE 01681_database_for_flat_dictionary; diff --git a/parser/testdata/01683_intdiv_ubsan/ast.json b/parser/testdata/01683_intdiv_ubsan/ast.json new file mode 100644 index 000000000..acf7ff813 --- /dev/null +++ b/parser/testdata/01683_intdiv_ubsan/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function intDiv (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal Float64_nan" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001117567, + "rows_read": 14, + "bytes_read": 546 + } +} diff --git a/parser/testdata/01683_intdiv_ubsan/metadata.json b/parser/testdata/01683_intdiv_ubsan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01683_intdiv_ubsan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01683_intdiv_ubsan/query.sql b/parser/testdata/01683_intdiv_ubsan/query.sql new file mode 100644 index 000000000..11a6645e6 --- /dev/null +++ b/parser/testdata/01683_intdiv_ubsan/query.sql @@ -0,0 +1 @@ +SELECT DISTINCT intDiv(number, nan) FROM numbers(10); -- { serverError ILLEGAL_DIVISION } diff --git a/parser/testdata/01684_geohash_ubsan/ast.json b/parser/testdata/01684_geohash_ubsan/ast.json new file mode 100644 index 000000000..413b99d1f --- /dev/null +++ b/parser/testdata/01684_geohash_ubsan/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function geohashesInBox (children 1)" + }, + { + "explain": " ExpressionList (children 5)" + }, + { + "explain": " Literal Float64_100.0000991821289" + }, + { + "explain": " Literal Float64_100.0000991821289" + }, + { + "explain": " Literal Float64_1000.0001220703125" + }, + { + "explain": " Literal Float64_1000.0001220703125" + }, + { + "explain": " Literal UInt64_0" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001600032, + "rows_read": 11, + "bytes_read": 457 + } +} diff --git a/parser/testdata/01684_geohash_ubsan/metadata.json b/parser/testdata/01684_geohash_ubsan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01684_geohash_ubsan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01684_geohash_ubsan/query.sql b/parser/testdata/01684_geohash_ubsan/query.sql new file mode 100644 index 000000000..e7eb9c526 --- /dev/null +++ b/parser/testdata/01684_geohash_ubsan/query.sql @@ -0,0 +1 @@ +SELECT geohashesInBox(100.0000991821289, 100.0000991821289, 1000.0001220703125, 1000.0001220703125, 0); diff --git a/parser/testdata/01684_insert_specify_shard_id/ast.json b/parser/testdata/01684_insert_specify_shard_id/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01684_insert_specify_shard_id/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01684_insert_specify_shard_id/metadata.json b/parser/testdata/01684_insert_specify_shard_id/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01684_insert_specify_shard_id/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01684_insert_specify_shard_id/query.sql b/parser/testdata/01684_insert_specify_shard_id/query.sql new file mode 100644 index 000000000..dc33f8c5c --- /dev/null +++ b/parser/testdata/01684_insert_specify_shard_id/query.sql @@ -0,0 +1,39 @@ +-- Tags: shard + +DROP TABLE IF EXISTS x; +DROP TABLE IF EXISTS x_dist; +DROP TABLE IF EXISTS y; +DROP TABLE IF EXISTS y_dist; + +CREATE TABLE x AS system.numbers ENGINE = MergeTree ORDER BY number; +CREATE TABLE y AS system.numbers ENGINE = MergeTree ORDER BY number; + +CREATE TABLE x_dist as x ENGINE = Distributed('test_cluster_two_shards', currentDatabase(), x); +CREATE TABLE y_dist as y ENGINE = Distributed('test_cluster_two_shards_localhost', currentDatabase(), y); + +-- insert into first shard +INSERT INTO x_dist SELECT * FROM numbers(10) settings insert_shard_id = 1; +INSERT INTO y_dist SELECT * FROM numbers(10) settings insert_shard_id = 1; + +SELECT * FROM x_dist ORDER by number; +SELECT * FROM y_dist ORDER by number; + +-- insert into second shard +INSERT INTO x_dist SELECT * FROM numbers(10, 10) settings insert_shard_id = 2; +INSERT INTO y_dist SELECT * FROM numbers(10, 10) settings insert_shard_id = 2; + +SELECT * FROM x_dist ORDER by number; +SELECT * FROM y_dist ORDER by number; + +-- no sharding key +INSERT INTO x_dist SELECT * FROM numbers(10); -- { serverError STORAGE_REQUIRES_PARAMETER } +INSERT INTO y_dist SELECT * FROM numbers(10); -- { serverError STORAGE_REQUIRES_PARAMETER } + +-- invalid shard id +INSERT INTO x_dist SELECT * FROM numbers(10) settings insert_shard_id = 3; -- { serverError INVALID_SHARD_ID } +INSERT INTO y_dist SELECT * FROM numbers(10) settings insert_shard_id = 3; -- { serverError INVALID_SHARD_ID } + +DROP TABLE x; +DROP TABLE x_dist; +DROP TABLE y; +DROP TABLE y_dist; diff --git a/parser/testdata/01685_json_extract_double_as_float/ast.json b/parser/testdata/01685_json_extract_double_as_float/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01685_json_extract_double_as_float/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01685_json_extract_double_as_float/metadata.json b/parser/testdata/01685_json_extract_double_as_float/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01685_json_extract_double_as_float/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01685_json_extract_double_as_float/query.sql b/parser/testdata/01685_json_extract_double_as_float/query.sql new file mode 100644 index 000000000..0f9827a27 --- /dev/null +++ b/parser/testdata/01685_json_extract_double_as_float/query.sql @@ -0,0 +1,25 @@ + +WITH '{ "v":1.1}' AS raw +SELECT + JSONExtract(raw, 'v', 'float') AS float32_1, + JSONExtract(raw, 'v', 'Float32') AS float32_2, + JSONExtractFloat(raw, 'v') AS float64_1, + JSONExtract(raw, 'v', 'double') AS float64_2; + +WITH '{ "v":1E-2}' AS raw +SELECT + JSONExtract(raw, 'v', 'float') AS float32_1, + JSONExtract(raw, 'v', 'Float32') AS float32_2, + JSONExtractFloat(raw, 'v') AS float64_1, + JSONExtract(raw, 'v', 'double') AS float64_2; + +SELECT JSONExtract('{"v":1.1}', 'v', 'UInt64'); +SELECT JSONExtract('{"v":1.1}', 'v', 'Nullable(UInt64)'); + +SELECT JSONExtract('{"v":-1e300}', 'v', 'Float64'); +SELECT JSONExtract('{"v":-1e300}', 'v', 'Float32'); + +SELECT JSONExtract('{"v":-1e300}', 'v', 'UInt64'); +SELECT JSONExtract('{"v":-1e300}', 'v', 'Int64'); +SELECT JSONExtract('{"v":-1e300}', 'v', 'UInt8'); +SELECT JSONExtract('{"v":-1e300}', 'v', 'Int8'); diff --git a/parser/testdata/01686_rocksdb/ast.json b/parser/testdata/01686_rocksdb/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01686_rocksdb/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01686_rocksdb/metadata.json b/parser/testdata/01686_rocksdb/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01686_rocksdb/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01686_rocksdb/query.sql b/parser/testdata/01686_rocksdb/query.sql new file mode 100644 index 000000000..6893d3b9e --- /dev/null +++ b/parser/testdata/01686_rocksdb/query.sql @@ -0,0 +1,32 @@ +-- Tags: no-ordinary-database, no-fasttest, use-rocksdb +-- Tag no-ordinary-database: Sometimes cannot lock file most likely due to concurrent or adjacent tests, but we don't care how it works in Ordinary database +-- Tag no-fasttest: In fasttest, ENABLE_LIBRARIES=0, so rocksdb engine is not enabled by default + +DROP TABLE IF EXISTS 01686_test; + +CREATE TABLE 01686_test (key UInt64, value String) Engine=EmbeddedRocksDB PRIMARY KEY(key) SETTINGS optimize_for_bulk_insert = 0; + +SELECT value FROM system.rocksdb WHERE database = currentDatabase() and table = '01686_test' and name = 'number.keys.written'; +INSERT INTO 01686_test SELECT number, format('Hello, world ({})', toString(number)) FROM numbers(10000); +SELECT value FROM system.rocksdb WHERE database = currentDatabase() and table = '01686_test' and name = 'number.keys.written'; + +SELECT * FROM 01686_test WHERE key = 123; +SELECT '--'; +SELECT * FROM 01686_test WHERE key = -123; +SELECT '--'; +SELECT * FROM 01686_test WHERE key = 123 OR key = 4567 ORDER BY key; +SELECT '--'; +SELECT * FROM 01686_test WHERE key = NULL; +SELECT '--'; +SELECT * FROM 01686_test WHERE key = NULL OR key = 0; +SELECT '--'; +SELECT * FROM 01686_test WHERE key IN (123, 456, -123) ORDER BY key; +SELECT '--'; +SELECT * FROM 01686_test WHERE key = 'Hello'; -- { serverError TYPE_MISMATCH } + +DETACH TABLE 01686_test SYNC; +ATTACH TABLE 01686_test; + +SELECT * FROM 01686_test WHERE key IN (99, 999, 9999, -123) ORDER BY key; + +DROP TABLE IF EXISTS 01686_test; diff --git a/parser/testdata/01690_quantilesTiming_ubsan/ast.json b/parser/testdata/01690_quantilesTiming_ubsan/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01690_quantilesTiming_ubsan/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01690_quantilesTiming_ubsan/metadata.json b/parser/testdata/01690_quantilesTiming_ubsan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01690_quantilesTiming_ubsan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01690_quantilesTiming_ubsan/query.sql b/parser/testdata/01690_quantilesTiming_ubsan/query.sql new file mode 100644 index 000000000..b2a5ab61e --- /dev/null +++ b/parser/testdata/01690_quantilesTiming_ubsan/query.sql @@ -0,0 +1,31 @@ +-- NOTE: that due to overflows it may give different result before +-- quantilesTimingWeighted() had been converted to double: +-- +-- Before: +-- +-- SELECT quantilesTimingWeighted(1)(number, 9223372036854775807) +-- FROM numbers(2) +-- +-- ┌─quantilesTimingWeighted(1)(number, 9223372036854775807)─┐ +-- │ [1] │ +-- └─────────────────────────────────────────────────────────┘ +-- +-- After: +-- +-- SELECT quantilesTimingWeighted(1)(number, 9223372036854775807) +-- FROM numbers(2) +-- +-- ┌─quantilesTimingWeighted(1)(number, 9223372036854775807)─┐ +-- │ [0] │ +-- └─────────────────────────────────────────────────────────┘ + +SELECT quantilesTimingWeighted(0.1)(number, 9223372036854775807) FROM numbers(2); + +-- same UB, but in the inner loop +SELECT quantilesTimingWeighted(0, 0.001, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.6, 0.7, 0.8, 0.9, 0.95, 0.99, 0.999, 1)(number, 9223372036854775807) +FROM +( + SELECT number + FROM system.numbers + LIMIT 100 +); diff --git a/parser/testdata/01691_DateTime64_clamp/ast.json b/parser/testdata/01691_DateTime64_clamp/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01691_DateTime64_clamp/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01691_DateTime64_clamp/metadata.json b/parser/testdata/01691_DateTime64_clamp/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01691_DateTime64_clamp/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01691_DateTime64_clamp/query.sql b/parser/testdata/01691_DateTime64_clamp/query.sql new file mode 100644 index 000000000..b7077aff1 --- /dev/null +++ b/parser/testdata/01691_DateTime64_clamp/query.sql @@ -0,0 +1,17 @@ +-- { echo } +-- These values are within the extended range of DateTime64 [1925-01-01, 2284-01-01) +SELECT toTimeZone(toDateTime(-2, 2), 'Asia/Istanbul'); +SELECT toDateTime64(-2, 2, 'Asia/Istanbul'); +SELECT CAST(-1 AS DateTime64(0, 'Asia/Istanbul')); +SELECT CAST('2020-01-01 00:00:00.3' AS DateTime64(0, 'Asia/Istanbul')); +SELECT toDateTime64(bitShiftLeft(toUInt64(1), 33), 2, 'Asia/Istanbul') FORMAT Null; +SELECT toTimeZone(toDateTime(-2., 2), 'Asia/Istanbul'); +SELECT toDateTime64(-2., 2, 'Asia/Istanbul'); +SELECT toDateTime64(toFloat32(bitShiftLeft(toUInt64(1),33)), 2, 'Asia/Istanbul'); +SELECT toDateTime64(toFloat64(bitShiftLeft(toUInt64(1),33)), 2, 'Asia/Istanbul') FORMAT Null; + +-- These are outsize of extended range and hence clamped +SELECT toDateTime64(-1 * bitShiftLeft(toUInt64(1), 35), 2, 'Asia/Istanbul'); +SELECT CAST(-1 * bitShiftLeft(toUInt64(1), 35) AS DateTime64(3, 'Asia/Istanbul')); +SELECT CAST(bitShiftLeft(toUInt64(1), 35) AS DateTime64(3, 'Asia/Istanbul')); +SELECT toDateTime64(bitShiftLeft(toUInt64(1), 35), 2, 'Asia/Istanbul'); diff --git a/parser/testdata/01692_DateTime64_from_DateTime/ast.json b/parser/testdata/01692_DateTime64_from_DateTime/ast.json new file mode 100644 index 000000000..ef7913de1 --- /dev/null +++ b/parser/testdata/01692_DateTime64_from_DateTime/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDateTime64 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDateTime (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal 'Asia\/Istanbul'" + }, + { + "explain": " Literal UInt64_2" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001277739, + "rows_read": 11, + "bytes_read": 424 + } +} diff --git a/parser/testdata/01692_DateTime64_from_DateTime/metadata.json b/parser/testdata/01692_DateTime64_from_DateTime/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01692_DateTime64_from_DateTime/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01692_DateTime64_from_DateTime/query.sql b/parser/testdata/01692_DateTime64_from_DateTime/query.sql new file mode 100644 index 000000000..c08062a45 --- /dev/null +++ b/parser/testdata/01692_DateTime64_from_DateTime/query.sql @@ -0,0 +1,7 @@ +select toDateTime64(toDateTime(1, 'Asia/Istanbul'), 2); +select toDateTime64(toDate(1), 2) FORMAT Null; -- Unknown timezone +select toDateTime64(toDateTime(1), 2) FORMAT Null; -- Unknown timezone +select toDateTime64(toDateTime(1), 2, 'Asia/Istanbul'); +select toDateTime64(toDate(1), 2, 'Asia/Istanbul'); +select toDateTime64(toDateTime(1), 2, 'GMT'); +select toDateTime64(toDate(1), 2, 'GMT'); diff --git a/parser/testdata/01698_fix_toMinute/ast.json b/parser/testdata/01698_fix_toMinute/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01698_fix_toMinute/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01698_fix_toMinute/metadata.json b/parser/testdata/01698_fix_toMinute/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01698_fix_toMinute/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01698_fix_toMinute/query.sql b/parser/testdata/01698_fix_toMinute/query.sql new file mode 100644 index 000000000..4d11efa90 --- /dev/null +++ b/parser/testdata/01698_fix_toMinute/query.sql @@ -0,0 +1,16 @@ +/* toDateTime or toString or other functions which should call the toMinute() function will all meet this bug. tests below will verify the toDateTime and toString. */ +SELECT 'Check the bug causing situation: the special Australia/Lord_Howe time zone. toDateTime and toString functions are all tested at once'; +SELECT toUnixTimestamp(x) as tt, (toDateTime('2019-04-07 01:00:00', 'Australia/Lord_Howe') + INTERVAL number * 600 SECOND) AS x, toString(x) as xx FROM numbers(20); + +/* The Batch Part. Test period is whole 4 days*/ +SELECT '4 days test in batch comparing with manually computation result for Asia/Istanbul whose timezone epoc is of whole hour:'; +SELECT toUnixTimestamp(x) as tt, (toDateTime('1981-04-01 00:00:00', 'Asia/Istanbul') + INTERVAL number * 600 SECOND) AS x, timezoneOffset(x) as res,(toDateTime(toString(x), 'UTC') - x ) AS calc FROM numbers(576) where res != calc; +SELECT toUnixTimestamp(x) as tt, (toDateTime('1981-09-30 00:00:00', 'Asia/Istanbul') + INTERVAL number * 600 SECOND) AS x, timezoneOffset(x) as res,(toDateTime(toString(x), 'UTC') - x ) AS calc FROM numbers(576) where res != calc; + +SELECT '4 days test in batch comparing with manually computation result for Asia/Tehran whose timezone epoc is of half hour:'; +SELECT toUnixTimestamp(x) as tt, (toDateTime('2020-03-21 00:00:00', 'Asia/Tehran') + INTERVAL number * 600 SECOND) AS x, timezoneOffset(x) as res,(toDateTime(toString(x), 'UTC') - x ) AS calc FROM numbers(576) where res != calc; +SELECT toUnixTimestamp(x) as tt, (toDateTime('2020-09-20 00:00:00', 'Asia/Tehran') + INTERVAL number * 600 SECOND) AS x, timezoneOffset(x) as res,(toDateTime(toString(x), 'UTC') - x ) AS calc FROM numbers(576) where res != calc; + +SELECT '4 days test in batch comparing with manually computation result for Australia/Lord_Howe whose timezone epoc is of half hour and also its DST offset is half hour:'; +SELECT toUnixTimestamp(x) as tt, (toDateTime('2020-10-04 01:40:00', 'Australia/Lord_Howe') + INTERVAL number * 600 SECOND) AS x, timezoneOffset(x) as res,(toDateTime(toString(x), 'UTC') - x ) AS calc FROM numbers(576) where res != calc; +SELECT toUnixTimestamp(x) as tt, (toDateTime('2019-04-07 01:00:00', 'Australia/Lord_Howe') + INTERVAL number * 600 SECOND) AS x, timezoneOffset(x) as res,(toDateTime(toString(x), 'UTC') - x ) AS calc FROM numbers(576) where res != calc; diff --git a/parser/testdata/01698_map_populate_overflow/ast.json b/parser/testdata/01698_map_populate_overflow/ast.json new file mode 100644 index 000000000..5537af76c --- /dev/null +++ b/parser/testdata/01698_map_populate_overflow/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function mapPopulateSeries (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal Array_[UInt64_18446744073709551615]" + }, + { + "explain": " Literal Array_[UInt64_0]" + }, + { + "explain": " Literal UInt64_18446744073709551615" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001530885, + "rows_read": 9, + "bytes_read": 384 + } +} diff --git a/parser/testdata/01698_map_populate_overflow/metadata.json b/parser/testdata/01698_map_populate_overflow/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01698_map_populate_overflow/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01698_map_populate_overflow/query.sql b/parser/testdata/01698_map_populate_overflow/query.sql new file mode 100644 index 000000000..e1f09d4ed --- /dev/null +++ b/parser/testdata/01698_map_populate_overflow/query.sql @@ -0,0 +1,2 @@ +SELECT mapPopulateSeries([0xFFFFFFFFFFFFFFFF], [0], 0xFFFFFFFFFFFFFFFF); +SELECT mapPopulateSeries([toUInt64(1)], [1], 0xFFFFFFFFFFFFFFFF); -- { serverError TOO_LARGE_ARRAY_SIZE } diff --git a/parser/testdata/01699_timezoneOffset/ast.json b/parser/testdata/01699_timezoneOffset/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01699_timezoneOffset/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01699_timezoneOffset/metadata.json b/parser/testdata/01699_timezoneOffset/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01699_timezoneOffset/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01699_timezoneOffset/query.sql b/parser/testdata/01699_timezoneOffset/query.sql new file mode 100644 index 000000000..8a664521a --- /dev/null +++ b/parser/testdata/01699_timezoneOffset/query.sql @@ -0,0 +1,44 @@ + +/* Test the DST(daylight saving time) offset changing boundary*/ +SELECT 'DST boundary test for Europe/Moscow:'; +SELECT number,(toDateTime('1981-04-01 22:40:00', 'Europe/Moscow') + INTERVAL number * 600 SECOND) AS k, timezoneOffset(k) AS t, toUnixTimestamp(k) as s FROM numbers(4); +SELECT number,(toDateTime('1981-09-30 23:00:00', 'Europe/Moscow') + INTERVAL number * 600 SECOND) AS k, timezoneOffset(k) AS t, toUnixTimestamp(k) as s FROM numbers(18); + +SELECT 'DST boundary test for Asia/Tehran:'; +SELECT number,(toDateTime('2020-03-21 22:40:00', 'Asia/Tehran') + INTERVAL number * 600 SECOND) AS k, timezoneOffset(k) AS t, toUnixTimestamp(k) as s FROM numbers(4); +SELECT number,(toDateTime('2020-09-20 23:00:00', 'Asia/Tehran') + INTERVAL number * 600 SECOND) AS k, timezoneOffset(k) AS t, toUnixTimestamp(k) as s FROM numbers(18); + +SELECT 'DST boundary test for Australia/Lord_Howe. This is a special timezone with DST offset is 30mins with the timezone epoc also lays at half hour'; +SELECT timezoneOffset(toDateTime('2018-08-21 22:20:00', 'Australia/Lord_Howe')); +SELECT timezoneOffset(toDateTime('2018-02-21 22:20:00', 'Australia/Lord_Howe')); + +SELECT 'DST boundary test for Australia/Lord_Howe:'; +SELECT number,(toDateTime('2020-10-04 01:40:00', 'Australia/Lord_Howe') + INTERVAL number * 600 SECOND) AS k, timezoneOffset(k) AS t, toUnixTimestamp(k) as s FROM numbers(4); +SELECT number,(toDateTime('2019-04-07 01:00:00', 'Australia/Lord_Howe') + INTERVAL number * 600 SECOND) AS k, timezoneOffset(k) AS t, toUnixTimestamp(k) as s FROM numbers(18); + + +/* The Batch Part. Test period is whole 4 days*/ +SELECT '4 days test in batch comparing with manually computation result for Europe/Moscow:'; +SELECT toUnixTimestamp(x) as tt, (toDateTime('1981-04-01 00:00:00', 'Europe/Moscow') + INTERVAL number * 600 SECOND) AS x, timezoneOffset(x) as res,(toDateTime(toString(x), 'UTC') - x ) AS calc FROM numbers(576) where res != calc; +SELECT toUnixTimestamp(x) as tt, (toDateTime('1981-09-30 00:00:00', 'Europe/Moscow') + INTERVAL number * 600 SECOND) AS x, timezoneOffset(x) as res,(toDateTime(toString(x), 'UTC') - x ) AS calc FROM numbers(576) where res != calc; + +SELECT '4 days test in batch comparing with manually computation result for Asia/Tehran:'; +SELECT toUnixTimestamp(x) as tt, (toDateTime('2020-03-21 00:00:00', 'Asia/Tehran') + INTERVAL number * 600 SECOND) AS x, timezoneOffset(x) as res,(toDateTime(toString(x), 'UTC') - x ) AS calc FROM numbers(576) where res != calc; +SELECT toUnixTimestamp(x) as tt, (toDateTime('2020-09-20 00:00:00', 'Asia/Tehran') + INTERVAL number * 600 SECOND) AS x, timezoneOffset(x) as res,(toDateTime(toString(x), 'UTC') - x ) AS calc FROM numbers(576) where res != calc; + +SELECT '4 days test in batch comparing with manually computation result for Australia/Lord_Howe'; +SELECT toUnixTimestamp(x) as tt, (toDateTime('2020-10-04 01:40:00', 'Australia/Lord_Howe') + INTERVAL number * 600 SECOND) AS x, timezoneOffset(x) as res,(toDateTime(toString(x), 'UTC') - x ) AS calc FROM numbers(18) where res != calc; +SELECT toUnixTimestamp(x) as tt, (toDateTime('2019-04-07 01:00:00', 'Australia/Lord_Howe') + INTERVAL number * 600 SECOND) AS x, timezoneOffset(x) as res,(toDateTime(toString(x), 'UTC') - x ) AS calc FROM numbers(18) where res != calc; + + +/* Find all the years had followed DST during given period*/ + +SELECT 'Moscow DST Years:'; +SELECT number, (toDateTime('1970-06-01 00:00:00', 'Europe/Moscow') + INTERVAL number YEAR) AS DST_Y, timezoneOffset(DST_Y) AS t FROM numbers(51) where t != 10800; +SELECT 'Moscow DST Years with perment DST from 2011-2014:'; +SELECT min((toDateTime('2011-01-01 00:00:00', 'Europe/Moscow') + INTERVAL number DAY) as day) as start, max(day) as end, count(1), concat(toString(toYear(day)),'_',toString(timezoneOffset(day)))as DST from numbers(365*4+1) group by DST order by start; + +SELECT 'Tehran DST Years:'; +SELECT number, (toDateTime('1970-06-01 00:00:00', 'Asia/Tehran') + INTERVAL number YEAR) AS DST_Y, timezoneOffset(DST_Y) AS t FROM numbers(51) where t != 12600; +SELECT 'Shanghai DST Years:'; +SELECT number, (toDateTime('1970-08-01 00:00:00', 'Asia/Shanghai') + INTERVAL number YEAR) AS DST_Y, timezoneOffset(DST_Y) AS t FROM numbers(51) where t != 28800; diff --git a/parser/testdata/01700_deltasum/ast.json b/parser/testdata/01700_deltasum/ast.json new file mode 100644 index 000000000..43921322d --- /dev/null +++ b/parser/testdata/01700_deltasum/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function deltaSum (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_2, UInt64_3]" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001096557, + "rows_read": 9, + "bytes_read": 378 + } +} diff --git a/parser/testdata/01700_deltasum/metadata.json b/parser/testdata/01700_deltasum/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01700_deltasum/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01700_deltasum/query.sql b/parser/testdata/01700_deltasum/query.sql new file mode 100644 index 000000000..49d2398ec --- /dev/null +++ b/parser/testdata/01700_deltasum/query.sql @@ -0,0 +1,44 @@ +select deltaSum(arrayJoin([1, 2, 3])); +select deltaSum(arrayJoin([1, 2, 3, 0, 3, 4])); +select deltaSum(arrayJoin([1, 2, 3, 0, 3, 4, 2, 3])); +select deltaSum(arrayJoin([1, 2, 3, 0, 3, 3, 3, 3, 3, 4, 2, 3])); +select deltaSum(arrayJoin([1, 2, 3, 0, 0, 0, 0, 3, 3, 3, 3, 3, 4, 2, 3])); +select deltaSumMerge(rows) as delta_sum from +( + select * from + ( + select 1 as x, deltaSumState(arrayJoin([0, 1])) as rows + union all + select 2, deltaSumState(arrayJoin([4, 5])) as rows + ) order by x +) order by delta_sum; +select deltaSumMerge(rows) as delta_sum from +( + select * from + ( + select 1 as x, deltaSumState(arrayJoin([4, 5])) as rows + union all + select 2, deltaSumState(arrayJoin([0, 1])) as rows + ) order by x +) order by delta_sum; +select deltaSum(arrayJoin([2.25, 3, 4.5])); +select deltaSumMerge(rows) as delta_sum from +( + select * from + ( + select 1 as x, deltaSumState(arrayJoin([0.1, 0.3, 0.5])) as rows + union all + select 2, deltaSumState(arrayJoin([4.1, 5.1, 6.6])) as rows + ) order by x +) order by delta_sum; +select deltaSumMerge(rows) as delta_sum from +( + select * from + ( + select 1 as x, deltaSumState(arrayJoin([3, 5])) as rows + union all + select 2, deltaSumState(arrayJoin([1, 2])) as rows + union all + select 3, deltaSumState(arrayJoin([4, 6])) as rows + ) order by x +) order by delta_sum; diff --git a/parser/testdata/01700_mod_negative_type_promotion/ast.json b/parser/testdata/01700_mod_negative_type_promotion/ast.json new file mode 100644 index 000000000..761297a48 --- /dev/null +++ b/parser/testdata/01700_mod_negative_type_promotion/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function modulo (alias k) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toInt32 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Int64_-199" + }, + { + "explain": " Literal UInt64_200" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier k" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001086441, + "rows_read": 13, + "bytes_read": 500 + } +} diff --git a/parser/testdata/01700_mod_negative_type_promotion/metadata.json b/parser/testdata/01700_mod_negative_type_promotion/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01700_mod_negative_type_promotion/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01700_mod_negative_type_promotion/query.sql b/parser/testdata/01700_mod_negative_type_promotion/query.sql new file mode 100644 index 000000000..93bb70716 --- /dev/null +++ b/parser/testdata/01700_mod_negative_type_promotion/query.sql @@ -0,0 +1,12 @@ +SELECT toInt32(-199) % 200 as k, toTypeName(k); +SELECT toInt32(-199) % toUInt16(200) as k, toTypeName(k); +SELECT toInt32(-199) % toUInt32(200) as k, toTypeName(k); +SELECT toInt32(-199) % toUInt64(200) as k, toTypeName(k); + +SELECT toInt32(-199) % toInt16(-200) as k, toTypeName(k); + +SELECT 199 % -10 as k, toTypeName(k); +SELECT 199 % -200 as k, toTypeName(k); + +SELECT toFloat64(-199) % 200 as k, toTypeName(k); +SELECT -199 % toFloat64(200) as k, toTypeName(k); diff --git a/parser/testdata/01700_point_in_polygon_ubsan/ast.json b/parser/testdata/01700_point_in_polygon_ubsan/ast.json new file mode 100644 index 000000000..fd322d266 --- /dev/null +++ b/parser/testdata/01700_point_in_polygon_ubsan/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function pointInPolygon (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Tuple_(UInt64_0, UInt64_0)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal Tuple_(UInt64_0, UInt64_0)" + }, + { + "explain": " Literal Tuple_(UInt64_10, UInt64_10)" + }, + { + "explain": " Literal Tuple_(UInt64_256, Int64_-9223372036854775808)" + }, + { + "explain": " Identifier Null" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001466669, + "rows_read": 15, + "bytes_read": 657 + } +} diff --git a/parser/testdata/01700_point_in_polygon_ubsan/metadata.json b/parser/testdata/01700_point_in_polygon_ubsan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01700_point_in_polygon_ubsan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01700_point_in_polygon_ubsan/query.sql b/parser/testdata/01700_point_in_polygon_ubsan/query.sql new file mode 100644 index 000000000..645c304ff --- /dev/null +++ b/parser/testdata/01700_point_in_polygon_ubsan/query.sql @@ -0,0 +1 @@ +SELECT pointInPolygon((0, 0), [[(0, 0), (10, 10), (256, -9223372036854775808)]]) FORMAT Null ;-- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/01700_system_zookeeper_path_in/ast.json b/parser/testdata/01700_system_zookeeper_path_in/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01700_system_zookeeper_path_in/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01700_system_zookeeper_path_in/metadata.json b/parser/testdata/01700_system_zookeeper_path_in/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01700_system_zookeeper_path_in/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01700_system_zookeeper_path_in/query.sql b/parser/testdata/01700_system_zookeeper_path_in/query.sql new file mode 100644 index 000000000..0c9f8c329 --- /dev/null +++ b/parser/testdata/01700_system_zookeeper_path_in/query.sql @@ -0,0 +1,25 @@ +-- Tags: zookeeper, no-shared-merge-tree +-- no-shared-merge-tree: depend on replicated merge tree zookeeper structure + +DROP TABLE IF EXISTS sample_table; + +CREATE TABLE sample_table ( + key UInt64 +) +ENGINE ReplicatedMergeTree('/clickhouse/{database}/01700_system_zookeeper_path_in/{shard}', '{replica}') +ORDER BY tuple(); + +SELECT name FROM system.zookeeper WHERE path = '/clickhouse/' || currentDatabase() || '/01700_system_zookeeper_path_in/' || getMacro('shard') AND name like 'block%' ORDER BY name; +SELECT 'r1' FROM system.zookeeper WHERE path = '/clickhouse/' || currentDatabase() || '/01700_system_zookeeper_path_in/' || getMacro('shard') || '/replicas' AND name LIKE '%'|| getMacro('replica') ||'%' ORDER BY name; + +SELECT '========'; +SELECT name FROM system.zookeeper WHERE path IN ('/clickhouse/' || currentDatabase() || '/01700_system_zookeeper_path_in/' || getMacro('shard')) AND name LIKE 'block%' ORDER BY name; +SELECT 'r1' FROM system.zookeeper WHERE path IN ('/clickhouse/' || currentDatabase() || '/01700_system_zookeeper_path_in/' || getMacro('shard') || '/replicas') AND name LIKE '%' || getMacro('replica') || '%' ORDER BY name; +SELECT '========'; +SELECT name FROM system.zookeeper WHERE path IN ('/clickhouse/' || currentDatabase() || '/01700_system_zookeeper_path_in/' || getMacro('shard'), + '/clickhouse/' || currentDatabase() || '/01700_system_zookeeper_path_in/' || getMacro('shard') || '/replicas') AND name LIKE 'block%' ORDER BY name; +SELECT '========'; +SELECT name FROM system.zookeeper WHERE path IN (SELECT concat('/clickhouse/' || currentDatabase() || '/01700_system_zookeeper_path_in/' || getMacro('shard') || '/', name) + FROM system.zookeeper WHERE (name != 'replicas' AND name NOT LIKE 'leader_election%' AND name NOT LIKE 'zero_copy_%' AND path = '/clickhouse/' || currentDatabase() || '/01700_system_zookeeper_path_in/' || getMacro('shard'))) ORDER BY name; + +DROP TABLE IF EXISTS sample_table; diff --git a/parser/testdata/01701_clear_projection_and_part_remove/ast.json b/parser/testdata/01701_clear_projection_and_part_remove/ast.json new file mode 100644 index 000000000..9b2a717ca --- /dev/null +++ b/parser/testdata/01701_clear_projection_and_part_remove/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tp_1 (children 1)" + }, + { + "explain": " Identifier tp_1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001311881, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/01701_clear_projection_and_part_remove/metadata.json b/parser/testdata/01701_clear_projection_and_part_remove/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01701_clear_projection_and_part_remove/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01701_clear_projection_and_part_remove/query.sql b/parser/testdata/01701_clear_projection_and_part_remove/query.sql new file mode 100644 index 000000000..e6cc4cbdb --- /dev/null +++ b/parser/testdata/01701_clear_projection_and_part_remove/query.sql @@ -0,0 +1,19 @@ +drop table if exists tp_1; +-- In this test, we are going to create an old part with written projection which does not exist in table metadata +create table tp_1 (x Int32, y Int32, projection p (select x, y order by x)) engine = MergeTree order by y partition by intDiv(y, 100) settings old_parts_lifetime=1; +insert into tp_1 select number, number from numbers(3); +set mutations_sync = 2; +alter table tp_1 add projection pp (select x, count() group by x); +insert into tp_1 select number, number from numbers(4); +-- Here we have a part with written projection pp +alter table tp_1 detach partition '0'; +-- Move part to detached +alter table tp_1 clear projection pp; +-- Remove projection from table metadata +alter table tp_1 drop projection pp; +-- Now, we don't load projection pp for attached part, but it is written on disk +alter table tp_1 attach partition '0'; +-- Make this part obsolete +optimize table tp_1 final; +-- Now, DROP TABLE triggers part removal +drop table tp_1; diff --git a/parser/testdata/01701_if_tuple_segfault/ast.json b/parser/testdata/01701_if_tuple_segfault/ast.json new file mode 100644 index 000000000..21ed21629 --- /dev/null +++ b/parser/testdata/01701_if_tuple_segfault/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery agg_table (children 1)" + }, + { + "explain": " Identifier agg_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001050585, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/01701_if_tuple_segfault/metadata.json b/parser/testdata/01701_if_tuple_segfault/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01701_if_tuple_segfault/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01701_if_tuple_segfault/query.sql b/parser/testdata/01701_if_tuple_segfault/query.sql new file mode 100644 index 000000000..6266f171e --- /dev/null +++ b/parser/testdata/01701_if_tuple_segfault/query.sql @@ -0,0 +1,33 @@ +DROP TABLE IF EXISTS agg_table; + +CREATE TABLE IF NOT EXISTS agg_table +( + time DateTime CODEC(DoubleDelta, LZ4), + xxx String, + two_values Tuple(Array(UInt16), UInt32), + agg_simple SimpleAggregateFunction(sum, UInt64), + agg SimpleAggregateFunction(sumMap, Tuple(Array(Int16), Array(UInt64))) +) +ENGINE = AggregatingMergeTree() +ORDER BY (xxx, time); + +INSERT INTO agg_table SELECT toDateTime('2020-10-01 19:20:30'), 'hello', ([any(number)], sum(number)), sum(number), + sumMap((arrayMap(i -> toString(i), range(13)), arrayMap(i -> (number + i), range(13)))) FROM numbers(10); + +SELECT * FROM agg_table; + +SELECT if(xxx = 'x', ([2], 3), ([3], 4)) FROM agg_table; + +SELECT if(xxx = 'x', ([2], 3), ([3], 4, 'q', 'w', 7)) FROM agg_table; --{ serverError NO_COMMON_TYPE } + +ALTER TABLE agg_table UPDATE two_values = (two_values.1, two_values.2) WHERE time BETWEEN toDateTime('2020-08-01 00:00:00') AND toDateTime('2020-12-01 00:00:00') SETTINGS mutations_sync = 2; + +ALTER TABLE agg_table UPDATE agg_simple = 5 WHERE time BETWEEN toDateTime('2020-08-01 00:00:00') AND toDateTime('2020-12-01 00:00:00') SETTINGS mutations_sync = 2; + +ALTER TABLE agg_table UPDATE agg = (agg.1, agg.2) WHERE time BETWEEN toDateTime('2020-08-01 00:00:00') AND toDateTime('2020-12-01 00:00:00') SETTINGS mutations_sync = 2; + +ALTER TABLE agg_table UPDATE agg = (agg.1, arrayMap(x -> toUInt64(x / 2), agg.2)) WHERE time BETWEEN toDateTime('2020-08-01 00:00:00') AND toDateTime('2020-12-01 00:00:00') SETTINGS mutations_sync = 2; + +SELECT * FROM agg_table; + +DROP TABLE IF EXISTS agg_table; diff --git a/parser/testdata/01702_bitmap_native_integers/ast.json b/parser/testdata/01702_bitmap_native_integers/ast.json new file mode 100644 index 000000000..29e2252da --- /dev/null +++ b/parser/testdata/01702_bitmap_native_integers/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001205779, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/01702_bitmap_native_integers/metadata.json b/parser/testdata/01702_bitmap_native_integers/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01702_bitmap_native_integers/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01702_bitmap_native_integers/query.sql b/parser/testdata/01702_bitmap_native_integers/query.sql new file mode 100644 index 000000000..a31de25dc --- /dev/null +++ b/parser/testdata/01702_bitmap_native_integers/query.sql @@ -0,0 +1,5 @@ +drop table if exists t; +create table t(i8 Int8, i16 Int16, i32 Int32, i64 Int64) engine Memory; +insert into t values (-1, -1, -1, -1), (-2, -2, -2, -2), (-3, -3, -3, -3), (-4, -4, -4, -4), (-5, -5, -5, -5); +select * apply bitmapMin, * apply bitmapMax from (select * apply groupBitmapState from t); +drop table t; diff --git a/parser/testdata/01702_rewrite_avg_for_algebraic_optimization/ast.json b/parser/testdata/01702_rewrite_avg_for_algebraic_optimization/ast.json new file mode 100644 index 000000000..9c35ac794 --- /dev/null +++ b/parser/testdata/01702_rewrite_avg_for_algebraic_optimization/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'SELECT avg(number + 2) FROM numbers(10)'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.00145428, + "rows_read": 5, + "bytes_read": 210 + } +} diff --git a/parser/testdata/01702_rewrite_avg_for_algebraic_optimization/metadata.json b/parser/testdata/01702_rewrite_avg_for_algebraic_optimization/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01702_rewrite_avg_for_algebraic_optimization/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01702_rewrite_avg_for_algebraic_optimization/query.sql b/parser/testdata/01702_rewrite_avg_for_algebraic_optimization/query.sql new file mode 100644 index 000000000..8fa4dd0ae --- /dev/null +++ b/parser/testdata/01702_rewrite_avg_for_algebraic_optimization/query.sql @@ -0,0 +1,22 @@ +SELECT 'SELECT avg(number + 2) FROM numbers(10)'; +SELECT 'value: ', avg(number + 2) FROM numbers(10); +SELECT 'EXPLAIN syntax:'; +EXPLAIN SYNTAX SELECT avg(number + 2) FROM numbers(10); + +SELECT ''; +SELECT 'SELECT avg(number - 2) FROM numbers(10)'; +SELECT 'value: ', avg(number - 2) FROM numbers(10); +SELECT 'EXPLAIN syntax:'; +EXPLAIN SYNTAX SELECT avg(number - 2) FROM numbers(10); + +SELECT ''; +SELECT 'SELECT avg(number * 2) FROM numbers(10)'; +SELECT 'value: ', avg(number * 2) FROM numbers(10); +SELECT 'EXPLAIN syntax:'; +EXPLAIN SYNTAX SELECT avg(number * 2) FROM numbers(10); + +SELECT ''; +SELECT 'SELECT avg(number / 2) FROM numbers(10)'; +SELECT 'value: ', avg(number / 2) FROM numbers(10); +SELECT 'EXPLAIN syntax:'; +EXPLAIN SYNTAX SELECT avg(number / 2) FROM numbers(10); diff --git a/parser/testdata/01702_system_numbers_scientific_notation/ast.json b/parser/testdata/01702_system_numbers_scientific_notation/ast.json new file mode 100644 index 000000000..016379819 --- /dev/null +++ b/parser/testdata/01702_system_numbers_scientific_notation/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Float64_100" + }, + { + "explain": " Identifier Null" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001396272, + "rows_read": 12, + "bytes_read": 448 + } +} diff --git a/parser/testdata/01702_system_numbers_scientific_notation/metadata.json b/parser/testdata/01702_system_numbers_scientific_notation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01702_system_numbers_scientific_notation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01702_system_numbers_scientific_notation/query.sql b/parser/testdata/01702_system_numbers_scientific_notation/query.sql new file mode 100644 index 000000000..c87b33272 --- /dev/null +++ b/parser/testdata/01702_system_numbers_scientific_notation/query.sql @@ -0,0 +1,5 @@ +select * from numbers(1e2) format Null; +select * from numbers_mt(1e2) format Null; +select * from numbers_mt('100') format Null; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select * from numbers_mt(inf) format Null; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select * from numbers_mt(nan) format Null; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } diff --git a/parser/testdata/01702_system_query_log/ast.json b/parser/testdata/01702_system_query_log/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01702_system_query_log/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01702_system_query_log/metadata.json b/parser/testdata/01702_system_query_log/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01702_system_query_log/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01702_system_query_log/query.sql b/parser/testdata/01702_system_query_log/query.sql new file mode 100644 index 000000000..92cc3d3bf --- /dev/null +++ b/parser/testdata/01702_system_query_log/query.sql @@ -0,0 +1,148 @@ +-- Tags: no-parallel + +-- fire all kinds of queries and then check if those are present in the system.query_log +SET log_comment='system.query_log logging test'; + +SELECT 'DROP queries and also a cleanup before the test'; +DROP DATABASE IF EXISTS sqllt SYNC; +DROP USER IF EXISTS sqllt_user; +DROP ROLE IF EXISTS sqllt_role; +DROP POLICY IF EXISTS sqllt_policy ON sqllt.table, sqllt.view, sqllt.dictionary; +DROP ROW POLICY IF EXISTS sqllt_row_policy ON sqllt.table, sqllt.view, sqllt.dictionary; +DROP QUOTA IF EXISTS sqllt_quota; +DROP SETTINGS PROFILE IF EXISTS sqllt_settings_profile; + +SELECT 'CREATE queries'; +CREATE DATABASE sqllt; + +CREATE TABLE sqllt.table +( + i UInt8, s String +) +ENGINE = MergeTree PARTITION BY tuple() ORDER BY tuple(); + +CREATE VIEW sqllt.view AS SELECT i, s FROM sqllt.table; +CREATE DICTIONARY sqllt.dictionary (key UInt64, value UInt64) PRIMARY KEY key SOURCE(CLICKHOUSE(DB 'sqllt' TABLE 'table' HOST 'localhost' PORT 9001)) LIFETIME(0) LAYOUT(FLAT()); + +CREATE USER sqllt_user IDENTIFIED WITH PLAINTEXT_PASSWORD BY 'password'; +CREATE ROLE sqllt_role; + +CREATE POLICY sqllt_policy ON sqllt.table, sqllt.view, sqllt.dictionary AS PERMISSIVE TO ALL; +CREATE POLICY sqllt_row_policy ON sqllt.table, sqllt.view, sqllt.dictionary AS PERMISSIVE TO ALL; + +CREATE QUOTA sqllt_quota KEYED BY user_name TO sqllt_role; +CREATE SETTINGS PROFILE sqllt_settings_profile SETTINGS interactive_delay = 200000; + +GRANT sqllt_role TO sqllt_user; + + +SELECT 'SET queries'; +SET log_profile_events=false; +SET DEFAULT ROLE sqllt_role TO sqllt_user; +-- SET ROLE sqllt_role; -- tests are executed by user `default` which is defined in XML and is impossible to update. + +SELECT 'ALTER TABLE queries'; +ALTER TABLE sqllt.table ADD COLUMN new_col UInt32 DEFAULT 123456789; +ALTER TABLE sqllt.table COMMENT COLUMN new_col 'dummy column with a comment'; +ALTER TABLE sqllt.table CLEAR COLUMN new_col; +ALTER TABLE sqllt.table MODIFY COLUMN new_col DateTime DEFAULT '2015-05-18 07:40:13'; +ALTER TABLE sqllt.table MODIFY COLUMN new_col REMOVE COMMENT; +ALTER TABLE sqllt.table RENAME COLUMN new_col TO the_new_col; +ALTER TABLE sqllt.table DROP COLUMN the_new_col; +ALTER TABLE sqllt.table UPDATE i = i + 1 WHERE 1; +ALTER TABLE sqllt.table DELETE WHERE i > 65535; + +-- not done, seems to hard, so I've skipped queries of ALTER-X, where X is: +-- PARTITION +-- ORDER BY +-- SAMPLE BY +-- INDEX +-- CONSTRAINT +-- TTL +-- USER +-- QUOTA +-- ROLE +-- ROW POLICY +-- SETTINGS PROFILE + +SELECT 'SYSTEM queries'; +SYSTEM FLUSH LOGS query_log; +SYSTEM STOP MERGES sqllt.table; +SYSTEM START MERGES sqllt.table; +SYSTEM STOP TTL MERGES sqllt.table; +SYSTEM START TTL MERGES sqllt.table; +SYSTEM STOP MOVES sqllt.table; +SYSTEM START MOVES sqllt.table; +SYSTEM STOP FETCHES sqllt.table; +SYSTEM START FETCHES sqllt.table; +SYSTEM STOP REPLICATED SENDS sqllt.table; +SYSTEM START REPLICATED SENDS sqllt.table; + +-- SYSTEM RELOAD DICTIONARY sqllt.dictionary; -- temporary out of order: Code: 210, Connection refused (localhost:9001) (version 21.3.1.1) +-- DROP REPLICA +-- haha, no +-- SYSTEM KILL; +-- SYSTEM SHUTDOWN; + +-- Since we don't really care about the actual output, suppress it with `FORMAT Null`. +SELECT 'SHOW queries'; + +SHOW CREATE TABLE sqllt.table FORMAT Null; +SHOW CREATE DICTIONARY sqllt.dictionary FORMAT Null; +SHOW DATABASES LIKE 'sqllt' FORMAT Null; +SHOW TABLES FROM sqllt FORMAT Null; +SHOW DICTIONARIES FROM sqllt FORMAT Null; +SHOW GRANTS FORMAT Null; +SHOW GRANTS FOR sqllt_user FORMAT Null; +SHOW CREATE USER sqllt_user FORMAT Null; +SHOW CREATE ROLE sqllt_role FORMAT Null; +SHOW CREATE POLICY sqllt_policy FORMAT Null; +SHOW CREATE ROW POLICY sqllt_row_policy FORMAT Null; +SHOW CREATE QUOTA sqllt_quota FORMAT Null; +SHOW CREATE SETTINGS PROFILE sqllt_settings_profile FORMAT Null; + +SELECT 'GRANT queries'; +GRANT SELECT ON sqllt.table TO sqllt_user; +GRANT DROP ON sqllt.view TO sqllt_user; + +SELECT 'REVOKE queries'; +REVOKE SELECT ON sqllt.table FROM sqllt_user; +REVOKE DROP ON sqllt.view FROM sqllt_user; + +SELECT 'Misc queries'; +DESCRIBE TABLE sqllt.table FORMAT Null; + +CHECK TABLE sqllt.table FORMAT Null; +DETACH TABLE sqllt.table; +ATTACH TABLE sqllt.table; + +RENAME TABLE sqllt.table TO sqllt.table_new; +RENAME TABLE sqllt.table_new TO sqllt.table; +TRUNCATE TABLE sqllt.table; +DROP TABLE sqllt.table SYNC; + +SET log_comment=''; +--------------------------------------------------------------------------------------------------- +-- Now get all logs related to this test +--------------------------------------------------------------------------------------------------- + +SYSTEM FLUSH LOGS query_log; +SELECT 'ACTUAL LOG CONTENT:'; + +-- Try to filter out all possible previous junk events by excluding old log entries, +SELECT query_kind, query FROM system.query_log +WHERE + log_comment LIKE '%system.query_log%' AND type == 'QueryStart' AND event_date >= yesterday() + AND current_database == currentDatabase() AND is_internal = 0 +ORDER BY event_time_microseconds; + + +-- cleanup +SELECT 'DROP queries and also a cleanup after the test'; +DROP DATABASE IF EXISTS sqllt; +DROP USER IF EXISTS sqllt_user; +DROP ROLE IF EXISTS sqllt_role; +DROP POLICY IF EXISTS sqllt_policy ON sqllt.table, sqllt.view, sqllt.dictionary; +DROP ROW POLICY IF EXISTS sqllt_row_policy ON sqllt.table, sqllt.view, sqllt.dictionary; +DROP QUOTA IF EXISTS sqllt_quota; +DROP SETTINGS PROFILE IF EXISTS sqllt_settings_profile; diff --git a/parser/testdata/01702_toDateTime_from_string_clamping/ast.json b/parser/testdata/01702_toDateTime_from_string_clamping/ast.json new file mode 100644 index 000000000..1c8654839 --- /dev/null +++ b/parser/testdata/01702_toDateTime_from_string_clamping/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDateTime (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal '-922337203.6854775808'" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal 'Asia\/Istanbul'" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.00136514, + "rows_read": 11, + "bytes_read": 437 + } +} diff --git a/parser/testdata/01702_toDateTime_from_string_clamping/metadata.json b/parser/testdata/01702_toDateTime_from_string_clamping/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01702_toDateTime_from_string_clamping/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01702_toDateTime_from_string_clamping/query.sql b/parser/testdata/01702_toDateTime_from_string_clamping/query.sql new file mode 100644 index 000000000..e84bb35b3 --- /dev/null +++ b/parser/testdata/01702_toDateTime_from_string_clamping/query.sql @@ -0,0 +1,4 @@ +SELECT toString(toDateTime('-922337203.6854775808', 1, 'Asia/Istanbul')); +SELECT toString(toDateTime('9922337203.6854775808', 1, 'Asia/Istanbul')); +SELECT toDateTime64(CAST('10500000000.1' AS Decimal64(1)), 1, 'Asia/Istanbul'); +SELECT toDateTime64(CAST('-10500000000.1' AS Decimal64(1)), 1, 'Asia/Istanbul'); diff --git a/parser/testdata/01703_rewrite_aggregate_function_case_insensitive/ast.json b/parser/testdata/01703_rewrite_aggregate_function_case_insensitive/ast.json new file mode 100644 index 000000000..1a5273908 --- /dev/null +++ b/parser/testdata/01703_rewrite_aggregate_function_case_insensitive/ast.json @@ -0,0 +1,70 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function sum (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function divide (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 16, + + "statistics": + { + "elapsed": 0.001165787, + "rows_read": 16, + "bytes_read": 628 + } +} diff --git a/parser/testdata/01703_rewrite_aggregate_function_case_insensitive/metadata.json b/parser/testdata/01703_rewrite_aggregate_function_case_insensitive/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01703_rewrite_aggregate_function_case_insensitive/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01703_rewrite_aggregate_function_case_insensitive/query.sql b/parser/testdata/01703_rewrite_aggregate_function_case_insensitive/query.sql new file mode 100644 index 000000000..1c5271b47 --- /dev/null +++ b/parser/testdata/01703_rewrite_aggregate_function_case_insensitive/query.sql @@ -0,0 +1,6 @@ +SELECT sum(number / 2) FROM numbers(10); +EXPLAIN SYNTAX SELECT sum(number / 2) FROM numbers(10); + + +SELECT Sum(number / 2) FROM numbers(10); +EXPLAIN SYNTAX SELECT Sum(number / 2) FROM numbers(10); diff --git a/parser/testdata/01704_transform_with_float_key/ast.json b/parser/testdata/01704_transform_with_float_key/ast.json new file mode 100644 index 000000000..3e2f57659 --- /dev/null +++ b/parser/testdata/01704_transform_with_float_key/ast.json @@ -0,0 +1,79 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function transform (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Function divide (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal Array_[Float64_0.5, Float64_1.5]" + }, + { + "explain": " Literal Array_['Hello', 'World']" + }, + { + "explain": " Literal '-'" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 19, + + "statistics": + { + "elapsed": 0.001503251, + "rows_read": 19, + "bytes_read": 759 + } +} diff --git a/parser/testdata/01704_transform_with_float_key/metadata.json b/parser/testdata/01704_transform_with_float_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01704_transform_with_float_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01704_transform_with_float_key/query.sql b/parser/testdata/01704_transform_with_float_key/query.sql new file mode 100644 index 000000000..690c73ee2 --- /dev/null +++ b/parser/testdata/01704_transform_with_float_key/query.sql @@ -0,0 +1,3 @@ +SELECT transform(number / 2, [0.5, 1.5], ['Hello', 'World'], '-') FROM numbers(10); +SELECT transform(number / 2, [1.0, 2.0], ['Hello', 'World'], '-') FROM numbers(10); +SELECT transform(number / 2, [1, 2], ['Hello', 'World'], '-') FROM numbers(10); diff --git a/parser/testdata/01705_normalize_case_insensitive_function_names/ast.json b/parser/testdata/01705_normalize_case_insensitive_function_names/ast.json new file mode 100644 index 000000000..f03db54c9 --- /dev/null +++ b/parser/testdata/01705_normalize_case_insensitive_function_names/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001271152, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01705_normalize_case_insensitive_function_names/metadata.json b/parser/testdata/01705_normalize_case_insensitive_function_names/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01705_normalize_case_insensitive_function_names/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01705_normalize_case_insensitive_function_names/query.sql b/parser/testdata/01705_normalize_case_insensitive_function_names/query.sql new file mode 100644 index 000000000..9e9fa486c --- /dev/null +++ b/parser/testdata/01705_normalize_case_insensitive_function_names/query.sql @@ -0,0 +1,2 @@ +set enable_analyzer = 1; +EXPLAIN SYNTAX SELECT CAST(1 AS INT), CEIL(1), CEILING(1), CHAR(49), CHAR_LENGTH('1'), CHARACTER_LENGTH('1'), COALESCE(1), CONCAT('1', '1'), CORR(1, 1), COS(1), COUNT(1), COVAR_POP(1, 1), COVAR_SAMP(1, 1), DATABASE(), SCHEMA(), DATEDIFF('DAY', toDate('2020-10-24'), toDate('2019-10-24')), EXP(1), FLATTEN([[1]]), FLOOR(1), FQDN(), GREATEST(1), IF(1, 1, 1), IFNULL(1, 1), LCASE('A'), LEAST(1), LENGTH('1'), LN(1), LOG(1), LOG10(1), LOG2(1), LOWER('A'), MAX(1), MID('123', 1, 1), MIN(1), MOD(1, 1), NOT(1), NOW(), NOW64(), NULLIF(1, 1), PI(), POSITION('123', '2'), POW(1, 1), POWER(1, 1), RAND(), REPLACE('1', '1', '2'), REVERSE('123'), ROUND(1), SIN(1), SQRT(1), STDDEV_POP(1), STDDEV_SAMP(1), SUBSTR('123', 2), SUBSTRING('123', 2), SUM(1), TAN(1), TANH(1), TRUNC(1), TRUNCATE(1), UCASE('A'), UPPER('A'), USER(), VAR_POP(1), VAR_SAMP(1), WEEK(toDate('2020-10-24')), YEARWEEK(toDate('2020-10-24')) format TSVRaw; diff --git a/parser/testdata/01705_normalize_create_alter_function_names/ast.json b/parser/testdata/01705_normalize_create_alter_function_names/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01705_normalize_create_alter_function_names/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01705_normalize_create_alter_function_names/metadata.json b/parser/testdata/01705_normalize_create_alter_function_names/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01705_normalize_create_alter_function_names/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01705_normalize_create_alter_function_names/query.sql b/parser/testdata/01705_normalize_create_alter_function_names/query.sql new file mode 100644 index 000000000..921d28e63 --- /dev/null +++ b/parser/testdata/01705_normalize_create_alter_function_names/query.sql @@ -0,0 +1,13 @@ +-- Tags: zookeeper, no-replicated-database, no-parallel, no-object-storage + +drop table if exists x; + +create table x(i int, index mm LOG2(i) type minmax granularity 1, projection p (select MAX(i))) engine ReplicatedMergeTree('/clickhouse/tables/{database}/x', 'r') order by i; + +alter table x add index nn LOG2(i) type minmax granularity 1, add projection p2 (select MIN(i)); + +show create x; + +select value from system.zookeeper WHERE name = 'metadata' and path = '/clickhouse/tables/'||currentDatabase()||'/x'; + +drop table x; diff --git a/parser/testdata/01706_optimize_normalize_count_variants/ast.json b/parser/testdata/01706_optimize_normalize_count_variants/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01706_optimize_normalize_count_variants/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01706_optimize_normalize_count_variants/metadata.json b/parser/testdata/01706_optimize_normalize_count_variants/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01706_optimize_normalize_count_variants/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01706_optimize_normalize_count_variants/query.sql b/parser/testdata/01706_optimize_normalize_count_variants/query.sql new file mode 100644 index 000000000..9c85d6bc2 --- /dev/null +++ b/parser/testdata/01706_optimize_normalize_count_variants/query.sql @@ -0,0 +1,8 @@ + +set optimize_normalize_count_variants = 1; + +explain syntax select count(), count(1), count(-1), sum(1), count(null); + +set aggregate_functions_null_for_empty = 1; + +explain syntax select sum(1) from numbers(10) where 0; diff --git a/parser/testdata/01707_join_use_nulls/ast.json b/parser/testdata/01707_join_use_nulls/ast.json new file mode 100644 index 000000000..a0042819a --- /dev/null +++ b/parser/testdata/01707_join_use_nulls/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery X (children 1)" + }, + { + "explain": " Identifier X" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001180963, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/01707_join_use_nulls/metadata.json b/parser/testdata/01707_join_use_nulls/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01707_join_use_nulls/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01707_join_use_nulls/query.sql b/parser/testdata/01707_join_use_nulls/query.sql new file mode 100644 index 000000000..980ac48dd --- /dev/null +++ b/parser/testdata/01707_join_use_nulls/query.sql @@ -0,0 +1,21 @@ +DROP TABLE IF EXISTS X; +DROP TABLE IF EXISTS Y; + +CREATE TABLE X (id Int) ENGINE=Memory; +CREATE TABLE Y (id Int) ENGINE=Memory; + +SELECT Y.id - 1 FROM X RIGHT JOIN Y ON (X.id + 1) = Y.id SETTINGS join_use_nulls=1; +SELECT Y.id - 1 FROM X RIGHT JOIN Y ON (X.id + 1) = toInt64(Y.id) SETTINGS join_use_nulls=1; + +-- Fix issue #20366 +-- Arguments of 'plus' have incorrect data types: '2' of type 'UInt8', '1' of type 'UInt8'. +-- Because 1 became toNullable(1), i.e.: +-- 2 UInt8 Const(size = 1, UInt8(size = 1)) +-- 1 UInt8 Const(size = 1, Nullable(size = 1, UInt8(size = 1), UInt8(size = 1))) +SELECT 2+1 FROM system.one X RIGHT JOIN system.one Y ON X.dummy+1 = Y.dummy SETTINGS join_use_nulls = 1; +SELECT 2+1 FROM system.one X RIGHT JOIN system.one Y ON X.dummy+1 = toUInt16(Y.dummy) SETTINGS join_use_nulls = 1; +SELECT X.dummy+1 FROM system.one X RIGHT JOIN system.one Y ON X.dummy = Y.dummy SETTINGS join_use_nulls = 1; +SELECT Y.dummy+1 FROM system.one X RIGHT JOIN system.one Y ON X.dummy = Y.dummy SETTINGS join_use_nulls = 1; + +DROP TABLE X; +DROP TABLE Y; diff --git a/parser/testdata/01709_inactive_parts_to_throw_insert/ast.json b/parser/testdata/01709_inactive_parts_to_throw_insert/ast.json new file mode 100644 index 000000000..bbe408338 --- /dev/null +++ b/parser/testdata/01709_inactive_parts_to_throw_insert/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery data_01709 (children 1)" + }, + { + "explain": " Identifier data_01709" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001672856, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/01709_inactive_parts_to_throw_insert/metadata.json b/parser/testdata/01709_inactive_parts_to_throw_insert/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01709_inactive_parts_to_throw_insert/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01709_inactive_parts_to_throw_insert/query.sql b/parser/testdata/01709_inactive_parts_to_throw_insert/query.sql new file mode 100644 index 000000000..75b4d7c6a --- /dev/null +++ b/parser/testdata/01709_inactive_parts_to_throw_insert/query.sql @@ -0,0 +1,12 @@ +drop table if exists data_01709; + +create table data_01709 (i int) engine MergeTree order by i settings old_parts_lifetime = 10000000000, min_bytes_for_wide_part = 0, inactive_parts_to_throw_insert = 1; + +insert into data_01709 values (1); +insert into data_01709 values (2); + +optimize table data_01709 final; + +insert into data_01709 values (3); -- { serverError TOO_MANY_PARTS } + +drop table data_01709; diff --git a/parser/testdata/01710_aggregate_projection_with_grouping_set/ast.json b/parser/testdata/01710_aggregate_projection_with_grouping_set/ast.json new file mode 100644 index 000000000..b6c32ee6b --- /dev/null +++ b/parser/testdata/01710_aggregate_projection_with_grouping_set/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00113157, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/01710_aggregate_projection_with_grouping_set/metadata.json b/parser/testdata/01710_aggregate_projection_with_grouping_set/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01710_aggregate_projection_with_grouping_set/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01710_aggregate_projection_with_grouping_set/query.sql b/parser/testdata/01710_aggregate_projection_with_grouping_set/query.sql new file mode 100644 index 000000000..652ce786b --- /dev/null +++ b/parser/testdata/01710_aggregate_projection_with_grouping_set/query.sql @@ -0,0 +1,15 @@ +drop table if exists test; + +create table test(dim1 String, dim2 String, projection p1 (select dim1, dim2, count() group by dim1, dim2)) engine MergeTree order by dim1; + +insert into test values ('a', 'x') ('a', 'y') ('b', 'x') ('b', 'y'); + +select dim1, dim2, count() from test group by grouping sets ((dim1, dim2), dim1) order by dim1, dim2, count(); + +select dim1, dim2, count() from test group by dim1, dim2 with rollup order by dim1, dim2, count(); + +select dim1, dim2, count() from test group by dim1, dim2 with cube order by dim1, dim2, count(); + +select dim1, dim2, count() from test group by dim1, dim2 with totals order by dim1, dim2, count(); + +drop table test; diff --git a/parser/testdata/01710_aggregate_projection_with_hashing/ast.json b/parser/testdata/01710_aggregate_projection_with_hashing/ast.json new file mode 100644 index 000000000..814c0e188 --- /dev/null +++ b/parser/testdata/01710_aggregate_projection_with_hashing/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001355781, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01710_aggregate_projection_with_hashing/metadata.json b/parser/testdata/01710_aggregate_projection_with_hashing/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01710_aggregate_projection_with_hashing/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01710_aggregate_projection_with_hashing/query.sql b/parser/testdata/01710_aggregate_projection_with_hashing/query.sql new file mode 100644 index 000000000..0429865e7 --- /dev/null +++ b/parser/testdata/01710_aggregate_projection_with_hashing/query.sql @@ -0,0 +1,11 @@ +set optimize_use_projections = 1, force_optimize_projection = 1; + +drop table if exists tp; + +create table tp (type Int32, device UUID, cnt UInt64) engine = MergeTree order by (type, device); +insert into tp select number%3, generateUUIDv4(), 1 from numbers(300); + +alter table tp add projection uniq_city_proj ( select type, uniq(cityHash64(device)), sum(cnt) group by type ); +alter table tp materialize projection uniq_city_proj settings mutations_sync = 1; + +drop table tp; diff --git a/parser/testdata/01710_aggregate_projection_with_monotonic_key_expr/ast.json b/parser/testdata/01710_aggregate_projection_with_monotonic_key_expr/ast.json new file mode 100644 index 000000000..6e5185d16 --- /dev/null +++ b/parser/testdata/01710_aggregate_projection_with_monotonic_key_expr/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t0 (children 1)" + }, + { + "explain": " Identifier t0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001569342, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/01710_aggregate_projection_with_monotonic_key_expr/metadata.json b/parser/testdata/01710_aggregate_projection_with_monotonic_key_expr/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01710_aggregate_projection_with_monotonic_key_expr/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01710_aggregate_projection_with_monotonic_key_expr/query.sql b/parser/testdata/01710_aggregate_projection_with_monotonic_key_expr/query.sql new file mode 100644 index 000000000..51dafb07b --- /dev/null +++ b/parser/testdata/01710_aggregate_projection_with_monotonic_key_expr/query.sql @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS t0; +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE TABLE t0 (c0 Int16, projection h (SELECT min(c0), max(c0), count() GROUP BY -c0)) ENGINE = MergeTree ORDER BY (); + +INSERT INTO t0(c0) VALUES (1); + +SELECT count() FROM t0 GROUP BY gcd(-sign(c0), -c0) SETTINGS optimize_use_implicit_projections = 1; + +create table t1 (c0 Int32) engine = MergeTree order by sin(c0); +insert into t1 values (-1), (1); +select c0 from t1 order by sin(-c0) settings optimize_read_in_order=0; +select c0 from t1 order by sin(-c0) settings optimize_read_in_order=1; + +DROP TABLE t0; +DROP TABLE t1; diff --git a/parser/testdata/01710_aggregate_projection_with_normalized_states/ast.json b/parser/testdata/01710_aggregate_projection_with_normalized_states/ast.json new file mode 100644 index 000000000..3e5aaae2f --- /dev/null +++ b/parser/testdata/01710_aggregate_projection_with_normalized_states/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery r (children 1)" + }, + { + "explain": " Identifier r" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001424875, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/01710_aggregate_projection_with_normalized_states/metadata.json b/parser/testdata/01710_aggregate_projection_with_normalized_states/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01710_aggregate_projection_with_normalized_states/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01710_aggregate_projection_with_normalized_states/query.sql b/parser/testdata/01710_aggregate_projection_with_normalized_states/query.sql new file mode 100644 index 000000000..956bf3711 --- /dev/null +++ b/parser/testdata/01710_aggregate_projection_with_normalized_states/query.sql @@ -0,0 +1,34 @@ +DROP TABLE IF EXISTS r; + +select finalizeAggregation(cast(quantileState(0)(arrayJoin([1,2,3])) as AggregateFunction(quantile(1), UInt8))); + +CREATE TABLE r ( + x String, + a LowCardinality(String), + q AggregateFunction(quantilesTiming(0.5, 0.95, 0.99), Int64), + s Int64, + PROJECTION p + (SELECT a, quantilesTimingMerge(0.5, 0.95, 0.99)(q), sum(s) GROUP BY a) +) Engine=SummingMergeTree order by (x, a) +SETTINGS deduplicate_merge_projection_mode = 'drop'; -- should set it to rebuild once projection is supported with SummingMergeTree + +insert into r +select number%100 x, + 'x' a, + quantilesTimingState(0.5, 0.95, 0.99)(number::Int64) q, + sum(1) s +from numbers(1000) +group by x,a; + +SELECT + ifNotFinite(quantilesTimingMerge(0.95)(q)[1],0) as d1, + ifNotFinite(quantilesTimingMerge(0.99)(q)[1],0) as d2, + ifNotFinite(quantilesTimingMerge(0.50)(q)[1],0) as d3, + sum(s) +FROM cluster('test_cluster_two_shards', currentDatabase(), r) +WHERE a = 'x' +settings prefer_localhost_replica=0; + +SELECT quantilesTimingMerge(0.95)(q), quantilesTimingMerge(toInt64(1))(q) FROM remote('127.0.0.{1,2}', currentDatabase(), r); + +DROP TABLE r; diff --git a/parser/testdata/01710_force_use_projection/ast.json b/parser/testdata/01710_force_use_projection/ast.json new file mode 100644 index 000000000..895f18368 --- /dev/null +++ b/parser/testdata/01710_force_use_projection/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tp (children 1)" + }, + { + "explain": " Identifier tp" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001337212, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/01710_force_use_projection/metadata.json b/parser/testdata/01710_force_use_projection/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01710_force_use_projection/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01710_force_use_projection/query.sql b/parser/testdata/01710_force_use_projection/query.sql new file mode 100644 index 000000000..d6a23b70f --- /dev/null +++ b/parser/testdata/01710_force_use_projection/query.sql @@ -0,0 +1,18 @@ +drop table if exists tp; + +create table tp (d1 Int32, d2 Int32, eventcnt Int64, projection p (select sum(eventcnt) group by d1)) engine = MergeTree order by (d1, d2); + +set optimize_use_projections = 1, force_optimize_projection = 1; +set parallel_replicas_local_plan = 1, parallel_replicas_support_projection = 1, optimize_aggregation_in_order = 0; + +select sum(eventcnt) eventcnt, d1 from tp group by d1; + +select avg(eventcnt) eventcnt, d1 from tp group by d1; + +insert into tp values (1, 2, 3); + +select sum(eventcnt) eventcnt, d1 from tp group by d1; + +select avg(eventcnt) eventcnt, d1 from tp group by d1; -- { serverError PROJECTION_NOT_USED } + +drop table tp; diff --git a/parser/testdata/01710_minmax_count_projection/ast.json b/parser/testdata/01710_minmax_count_projection/ast.json new file mode 100644 index 000000000..c65b0d287 --- /dev/null +++ b/parser/testdata/01710_minmax_count_projection/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery d (children 1)" + }, + { + "explain": " Identifier d" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001439489, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/01710_minmax_count_projection/metadata.json b/parser/testdata/01710_minmax_count_projection/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01710_minmax_count_projection/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01710_minmax_count_projection/query.sql b/parser/testdata/01710_minmax_count_projection/query.sql new file mode 100644 index 000000000..87a119624 --- /dev/null +++ b/parser/testdata/01710_minmax_count_projection/query.sql @@ -0,0 +1,82 @@ +drop table if exists d; + +create table d (i int, j int) engine MergeTree partition by i % 2 order by tuple() settings index_granularity = 1; + +insert into d select number, number from numbers(10000); + +set max_rows_to_read = 2, optimize_use_projections = 1, optimize_use_implicit_projections = 1, optimize_use_projection_filtering = 1; +set parallel_replicas_local_plan = 1, parallel_replicas_support_projection = 1, optimize_aggregation_in_order = 0; + +select min(i), max(i), count() from d; +select min(i), max(i), count() from d group by _partition_id order by _partition_id; +select min(i), max(i), count() from d where _partition_value.1 = 0 group by _partition_id order by _partition_id; +select min(i), max(i), count() from d where moduloLegacy(i, 2) = 0 group by _partition_id order by _partition_id; +select min(i), max(i), count() from d where _partition_value.1 = 10 group by _partition_id order by _partition_id; + +-- fuzz crash +select min(i) from d where 1 = _partition_value.1; + +-- fuzz crash https://github.com/ClickHouse/ClickHouse/issues/37151 +SELECT min(i), max(i), count() FROM d WHERE (_partition_value.1) = 0 GROUP BY ignore(bitTest(ignore(NULL), 0), NULL, (_partition_value.1) = 7, '10.25', bitTest(NULL, 0), NULL, ignore(ignore(-2147483647, NULL)), 1024), _partition_id ORDER BY _partition_id ASC NULLS FIRST; + +drop table d; + +drop table if exists has_final_mark; +drop table if exists mixed_final_mark; + +create table has_final_mark (i int, j int) engine MergeTree partition by i % 2 order by j settings index_granularity = 10, write_final_mark = 1; +create table mixed_final_mark (i int, j int) engine MergeTree partition by i % 2 order by j settings index_granularity = 10; + +set max_rows_to_read = 100000; + +insert into has_final_mark select number, number from numbers(10000); + +alter table mixed_final_mark attach partition 1 from has_final_mark; + +set max_rows_to_read = 2; + +select min(j) from has_final_mark; +select min(j) from mixed_final_mark; + +select min(j), max(j) from has_final_mark; + +set max_rows_to_read = 5001; -- one normal part 5000 + one minmax_count_projection part 1 +select min(j), max(j) from mixed_final_mark; + +-- The first primary expr is the same of some partition column +drop table if exists t; +create table t (server_date Date, something String) engine MergeTree partition by (toYYYYMM(server_date), server_date) order by (server_date, something); +insert into t values ('2019-01-01', 'test1'), ('2019-02-01', 'test2'), ('2019-03-01', 'test3'); +select count() from t; +drop table t; + +drop table if exists d; +create table d (dt DateTime, j int) engine MergeTree partition by (toDate(dt), ceiling(j), toDate(dt), CEILING(j)) order by tuple(); +insert into d values ('2021-10-24 10:00:00', 10), ('2021-10-25 10:00:00', 10), ('2021-10-26 10:00:00', 10), ('2021-10-27 10:00:00', 10); +select min(dt), max(dt), count() from d where toDate(dt) >= '2021-10-25'; +-- fuzz crash +select min(dt), max(dt), count(toDate(dt) >= '2021-10-25') from d where toDate(dt) >= '2021-10-25'; +select count() from d group by toDate(dt); + +-- fuzz crash +SELECT min(dt), count(ignore(ignore(ignore(tupleElement(_partition_value, 'xxxx', NULL) = NULL), NULL, NULL, NULL), 0, '10485.76', NULL)), max(dt), count(toDate(dt) >= '2021-10-25') FROM d WHERE toDate(dt) >= '2021-10-25'; + +-- fuzz crash +SELECT pointInEllipses(min(j), NULL), max(dt), count('0.0000000007') FROM d WHERE toDate(dt) >= '2021-10-25'; +SELECT min(j) FROM d PREWHERE ceil(j) <= 0; +SELECT min(dt) FROM d PREWHERE ((0.9998999834060669 AND 1023) AND 255) <= ceil(j); +SELECT count('') AND NULL FROM d PREWHERE ceil(j) <= NULL; + +drop table d; + +-- count variant optimization + +drop table if exists test; +create table test (id Int64, d Int64) engine MergeTree order by id; +insert into test select number, number from numbers(1e3); + +select count(if(d=4, d, 1)) from test settings force_optimize_projection = 1; +select count(d/3) from test settings force_optimize_projection = 1; +select count(if(d=4, Null, 1)) from test settings force_optimize_projection = 1; -- { serverError PROJECTION_NOT_USED } + +drop table test; diff --git a/parser/testdata/01710_minmax_count_projection_constant_query/ast.json b/parser/testdata/01710_minmax_count_projection_constant_query/ast.json new file mode 100644 index 000000000..5e3ef7374 --- /dev/null +++ b/parser/testdata/01710_minmax_count_projection_constant_query/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001260197, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/01710_minmax_count_projection_constant_query/metadata.json b/parser/testdata/01710_minmax_count_projection_constant_query/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01710_minmax_count_projection_constant_query/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01710_minmax_count_projection_constant_query/query.sql b/parser/testdata/01710_minmax_count_projection_constant_query/query.sql new file mode 100644 index 000000000..895cd957f --- /dev/null +++ b/parser/testdata/01710_minmax_count_projection_constant_query/query.sql @@ -0,0 +1,5 @@ +drop table if exists t; +create table t (n int) engine MergeTree order by n; +insert into t values (1); +select 1 from t group by 1; +drop table t; diff --git a/parser/testdata/01710_minmax_count_projection_count_nullable/ast.json b/parser/testdata/01710_minmax_count_projection_count_nullable/ast.json new file mode 100644 index 000000000..e0b58990a --- /dev/null +++ b/parser/testdata/01710_minmax_count_projection_count_nullable/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001121453, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/01710_minmax_count_projection_count_nullable/metadata.json b/parser/testdata/01710_minmax_count_projection_count_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01710_minmax_count_projection_count_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01710_minmax_count_projection_count_nullable/query.sql b/parser/testdata/01710_minmax_count_projection_count_nullable/query.sql new file mode 100644 index 000000000..048d725e0 --- /dev/null +++ b/parser/testdata/01710_minmax_count_projection_count_nullable/query.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS test; + +CREATE TABLE test (`val` LowCardinality(Nullable(String))) ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity = 8192; + +insert into test select number == 3 ? 'some value' : null from numbers(5); + +SELECT count(val) FROM test SETTINGS optimize_use_implicit_projections = 1; + +DROP TABLE test; diff --git a/parser/testdata/01710_minmax_count_projection_distributed_query/ast.json b/parser/testdata/01710_minmax_count_projection_distributed_query/ast.json new file mode 100644 index 000000000..df030da7e --- /dev/null +++ b/parser/testdata/01710_minmax_count_projection_distributed_query/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001221017, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/01710_minmax_count_projection_distributed_query/metadata.json b/parser/testdata/01710_minmax_count_projection_distributed_query/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01710_minmax_count_projection_distributed_query/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01710_minmax_count_projection_distributed_query/query.sql b/parser/testdata/01710_minmax_count_projection_distributed_query/query.sql new file mode 100644 index 000000000..723c82090 --- /dev/null +++ b/parser/testdata/01710_minmax_count_projection_distributed_query/query.sql @@ -0,0 +1,5 @@ +drop table if exists t; +create table t (n int, s String) engine MergeTree order by n; +insert into t values (1, 'a'); +select count(), count(n), count(s) from cluster('test_cluster_two_shards', currentDatabase(), t); +drop table t; diff --git a/parser/testdata/01710_minmax_count_projection_modify_partition_key/ast.json b/parser/testdata/01710_minmax_count_projection_modify_partition_key/ast.json new file mode 100644 index 000000000..e096f7172 --- /dev/null +++ b/parser/testdata/01710_minmax_count_projection_modify_partition_key/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001369713, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/01710_minmax_count_projection_modify_partition_key/metadata.json b/parser/testdata/01710_minmax_count_projection_modify_partition_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01710_minmax_count_projection_modify_partition_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01710_minmax_count_projection_modify_partition_key/query.sql b/parser/testdata/01710_minmax_count_projection_modify_partition_key/query.sql new file mode 100644 index 000000000..5297fb3e1 --- /dev/null +++ b/parser/testdata/01710_minmax_count_projection_modify_partition_key/query.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS test; + +CREATE TABLE test (type Enum('x'), s String) ENGINE = MergeTree ORDER BY s PARTITION BY type; +INSERT INTO test VALUES ('x', 'Hello'); + +SELECT type, count() FROM test GROUP BY type ORDER BY type; + +ALTER TABLE test MODIFY COLUMN type Enum('x', 'y'); +INSERT INTO test VALUES ('y', 'World'); + +SELECT type, count() FROM test GROUP BY type ORDER BY type; + +DROP TABLE test; diff --git a/parser/testdata/01710_normal_projection_fix1/ast.json b/parser/testdata/01710_normal_projection_fix1/ast.json new file mode 100644 index 000000000..8ead8eabc --- /dev/null +++ b/parser/testdata/01710_normal_projection_fix1/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001722365, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/01710_normal_projection_fix1/metadata.json b/parser/testdata/01710_normal_projection_fix1/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01710_normal_projection_fix1/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01710_normal_projection_fix1/query.sql b/parser/testdata/01710_normal_projection_fix1/query.sql new file mode 100644 index 000000000..fbb0bf48a --- /dev/null +++ b/parser/testdata/01710_normal_projection_fix1/query.sql @@ -0,0 +1,21 @@ +drop table if exists t; + +create table t (i int, j int) engine MergeTree order by i; + +insert into t values (1, 2); + +alter table t add projection x (select * order by j); + +insert into t values (1, 4); +insert into t values (1, 5); + +set optimize_use_projections = 1, force_optimize_projection = 1; +set parallel_replicas_local_plan = 1, parallel_replicas_support_projection = 1, optimize_aggregation_in_order = 0; + +select i from t prewhere j = 4; + +SELECT j = 2, i FROM t PREWHERE j = 2; + +SELECT j = -1, j = NULL FROM t WHERE j = -1; + +drop table t; diff --git a/parser/testdata/01710_normal_projection_format/ast.json b/parser/testdata/01710_normal_projection_format/ast.json new file mode 100644 index 000000000..56f9981d4 --- /dev/null +++ b/parser/testdata/01710_normal_projection_format/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001319609, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/01710_normal_projection_format/metadata.json b/parser/testdata/01710_normal_projection_format/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01710_normal_projection_format/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01710_normal_projection_format/query.sql b/parser/testdata/01710_normal_projection_format/query.sql new file mode 100644 index 000000000..6210be166 --- /dev/null +++ b/parser/testdata/01710_normal_projection_format/query.sql @@ -0,0 +1,17 @@ +DROP TABLE if exists test; + +CREATE TABLE test +( + uuid FixedString(16), + id int, + ns FixedString(16), + dt DateTime64(6), +) +ENGINE = MergeTree +ORDER BY (id, dt, uuid); + +ALTER TABLE test ADD PROJECTION mtlog_proj_source_reference (SELECT * ORDER BY substring(ns, 1, 5)); + +SHOW CREATE test; + +drop table test; diff --git a/parser/testdata/01710_normal_projection_join_plan_fix/ast.json b/parser/testdata/01710_normal_projection_join_plan_fix/ast.json new file mode 100644 index 000000000..eaad8aff3 --- /dev/null +++ b/parser/testdata/01710_normal_projection_join_plan_fix/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001349243, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/01710_normal_projection_join_plan_fix/metadata.json b/parser/testdata/01710_normal_projection_join_plan_fix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01710_normal_projection_join_plan_fix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01710_normal_projection_join_plan_fix/query.sql b/parser/testdata/01710_normal_projection_join_plan_fix/query.sql new file mode 100644 index 000000000..40847a301 --- /dev/null +++ b/parser/testdata/01710_normal_projection_join_plan_fix/query.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE TABLE t1 (id UInt32, s String) Engine = MergeTree ORDER BY id; +CREATE TABLE t2 (id1 UInt32, id2 UInt32) Engine = MergeTree ORDER BY id1 SETTINGS index_granularity = 1; +INSERT INTO t2 SELECT number, number from numbers(100); +ALTER TABLE t2 ADD PROJECTION proj (SELECT id2 ORDER BY id2); +INSERT INTO t2 SELECT number, number from numbers(100); + +SELECT s FROM t1 as lhs LEFT JOIN (SELECT * FROM t2 WHERE id2 = 2) as rhs ON lhs.id = rhs.id2; + +DROP TABLE t1; +DROP TABLE t2; diff --git a/parser/testdata/01710_normal_projection_with_query_plan_optimization/ast.json b/parser/testdata/01710_normal_projection_with_query_plan_optimization/ast.json new file mode 100644 index 000000000..bdd887051 --- /dev/null +++ b/parser/testdata/01710_normal_projection_with_query_plan_optimization/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001050774, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/01710_normal_projection_with_query_plan_optimization/metadata.json b/parser/testdata/01710_normal_projection_with_query_plan_optimization/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01710_normal_projection_with_query_plan_optimization/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01710_normal_projection_with_query_plan_optimization/query.sql b/parser/testdata/01710_normal_projection_with_query_plan_optimization/query.sql new file mode 100644 index 000000000..d5ea8b4d6 --- /dev/null +++ b/parser/testdata/01710_normal_projection_with_query_plan_optimization/query.sql @@ -0,0 +1,12 @@ +drop table if exists t; + +CREATE TABLE t (id UInt64, id2 UInt64, id3 UInt64, PROJECTION t_reverse (SELECT id, id2, id3 ORDER BY id2, id, id3)) ENGINE = MergeTree ORDER BY (id) settings index_granularity = 4; + +insert into t SELECT number, -number, number FROM numbers(10000); + +set max_rows_to_read = 4; + +set parallel_replicas_local_plan = 1, parallel_replicas_support_projection = 1, optimize_aggregation_in_order = 0; +select count() from t where id = 3; + +drop table t; diff --git a/parser/testdata/01710_order_by_projections_complete/ast.json b/parser/testdata/01710_order_by_projections_complete/ast.json new file mode 100644 index 000000000..a7c8d8cb9 --- /dev/null +++ b/parser/testdata/01710_order_by_projections_complete/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery data_order_by_proj_comp (children 1)" + }, + { + "explain": " Identifier data_order_by_proj_comp" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001070878, + "rows_read": 2, + "bytes_read": 98 + } +} diff --git a/parser/testdata/01710_order_by_projections_complete/metadata.json b/parser/testdata/01710_order_by_projections_complete/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01710_order_by_projections_complete/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01710_order_by_projections_complete/query.sql b/parser/testdata/01710_order_by_projections_complete/query.sql new file mode 100644 index 000000000..86a553569 --- /dev/null +++ b/parser/testdata/01710_order_by_projections_complete/query.sql @@ -0,0 +1,14 @@ +drop table if exists data_order_by_proj_comp; +create table data_order_by_proj_comp (t UInt64, projection tSort (select * order by t)) ENGINE MergeTree() order by t; + +system stop merges data_order_by_proj_comp; + +insert into data_order_by_proj_comp values (5); +insert into data_order_by_proj_comp values (5); +insert into data_order_by_proj_comp values (6); + +-- { echoOn } +select t from data_order_by_proj_comp where t > 0 order by t settings optimize_read_in_order=1; +select t from data_order_by_proj_comp where t > 0 order by t settings optimize_read_in_order=0; +select t from data_order_by_proj_comp where t > 0 order by t settings max_threads=1; +-- { echoOff } diff --git a/parser/testdata/01710_order_by_projections_incomplete/ast.json b/parser/testdata/01710_order_by_projections_incomplete/ast.json new file mode 100644 index 000000000..9c290781e --- /dev/null +++ b/parser/testdata/01710_order_by_projections_incomplete/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery data_order_by_proj_incomp (children 1)" + }, + { + "explain": " Identifier data_order_by_proj_incomp" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001574989, + "rows_read": 2, + "bytes_read": 102 + } +} diff --git a/parser/testdata/01710_order_by_projections_incomplete/metadata.json b/parser/testdata/01710_order_by_projections_incomplete/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01710_order_by_projections_incomplete/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01710_order_by_projections_incomplete/query.sql b/parser/testdata/01710_order_by_projections_incomplete/query.sql new file mode 100644 index 000000000..17ae71286 --- /dev/null +++ b/parser/testdata/01710_order_by_projections_incomplete/query.sql @@ -0,0 +1,16 @@ +drop table if exists data_order_by_proj_incomp; +create table data_order_by_proj_incomp (t UInt64) ENGINE MergeTree() order by t; + +system stop merges data_order_by_proj_incomp; + +insert into data_order_by_proj_incomp values (5); +insert into data_order_by_proj_incomp values (5); + +alter table data_order_by_proj_incomp add projection tSort (select * order by t); +insert into data_order_by_proj_incomp values (6); + +-- { echoOn } +select t from data_order_by_proj_incomp where t > 0 order by t settings optimize_read_in_order=1; +select t from data_order_by_proj_incomp where t > 0 order by t settings optimize_read_in_order=0; +select t from data_order_by_proj_incomp where t > 0 order by t settings max_threads=1; +-- { echoOff } diff --git a/parser/testdata/01710_projection_additional_filters/ast.json b/parser/testdata/01710_projection_additional_filters/ast.json new file mode 100644 index 000000000..8462ff610 --- /dev/null +++ b/parser/testdata/01710_projection_additional_filters/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001254627, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/01710_projection_additional_filters/metadata.json b/parser/testdata/01710_projection_additional_filters/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01710_projection_additional_filters/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01710_projection_additional_filters/query.sql b/parser/testdata/01710_projection_additional_filters/query.sql new file mode 100644 index 000000000..b6be6af44 --- /dev/null +++ b/parser/testdata/01710_projection_additional_filters/query.sql @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS t; + +set parallel_replicas_local_plan = 1, parallel_replicas_support_projection = 1, optimize_aggregation_in_order = 0; + +CREATE TABLE t(a UInt32, b UInt32) ENGINE = MergeTree PARTITION BY a ORDER BY a; + +INSERT INTO t SELECT number % 10, number FROM numbers(10000); + +SELECT count(), min(a), max(a) FROM t SETTINGS additional_table_filters = {'t' : '0'}; + +DROP TABLE t; + +drop table if exists atf_p; +create table atf_p (x UInt64) engine = MergeTree order by tuple(); +insert into atf_p select number from numbers(10); +select count() from atf_p settings additional_table_filters = {'atf_p': 'x <= 2'}; +drop table atf_p; diff --git a/parser/testdata/01710_projection_aggregate_functions_null_for_empty/ast.json b/parser/testdata/01710_projection_aggregate_functions_null_for_empty/ast.json new file mode 100644 index 000000000..aac60f635 --- /dev/null +++ b/parser/testdata/01710_projection_aggregate_functions_null_for_empty/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000864011, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/01710_projection_aggregate_functions_null_for_empty/metadata.json b/parser/testdata/01710_projection_aggregate_functions_null_for_empty/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01710_projection_aggregate_functions_null_for_empty/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01710_projection_aggregate_functions_null_for_empty/query.sql b/parser/testdata/01710_projection_aggregate_functions_null_for_empty/query.sql new file mode 100644 index 000000000..a77720b65 --- /dev/null +++ b/parser/testdata/01710_projection_aggregate_functions_null_for_empty/query.sql @@ -0,0 +1,8 @@ +DROP TABLE IF EXISTS t1; + +CREATE TABLE t1 (c0 Int32, PRIMARY KEY (c0)) ENGINE=MergeTree; +INSERT INTO t1 VALUES (1554690688); + +SELECT MIN(t1.c0) FROM t1 SETTINGS aggregate_functions_null_for_empty = 1; + +DROP TABLE IF EXISTS t1; diff --git a/parser/testdata/01710_projection_aggregation_in_order/ast.json b/parser/testdata/01710_projection_aggregation_in_order/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01710_projection_aggregation_in_order/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01710_projection_aggregation_in_order/metadata.json b/parser/testdata/01710_projection_aggregation_in_order/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01710_projection_aggregation_in_order/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01710_projection_aggregation_in_order/query.sql b/parser/testdata/01710_projection_aggregation_in_order/query.sql new file mode 100644 index 000000000..c3faa734e --- /dev/null +++ b/parser/testdata/01710_projection_aggregation_in_order/query.sql @@ -0,0 +1,79 @@ +-- Test that check the correctness of the result for optimize_aggregation_in_order and projections, +-- not that this optimization will take place. + +DROP TABLE IF EXISTS normal; + +CREATE TABLE normal +( + `key` UInt32, + `ts` DateTime, + `value` UInt32, + PROJECTION aaaa + ( + SELECT + ts, + key, + value + ORDER BY ts, key + ) +) +ENGINE = MergeTree +ORDER BY tuple(); + +INSERT INTO normal SELECT + number, + toDateTime('2021-12-06 00:00:00') + number, + number +FROM numbers(100000); + +SET force_optimize_projection=1; +SET optimize_use_projections=1, optimize_aggregation_in_order=1, enable_parallel_replicas=0; + +WITH toStartOfHour(ts) AS a SELECT sum(value) v FROM normal WHERE ts > '2021-12-06 22:00:00' GROUP BY a ORDER BY v LIMIT 5; +WITH toStartOfHour(ts) AS a SELECT sum(value) v FROM normal WHERE ts > '2021-12-06 22:00:00' GROUP BY toStartOfHour(ts), a ORDER BY v LIMIT 5; + +SET optimize_aggregation_in_order=0; +SET enable_parallel_replicas=1, parallel_replicas_local_plan=1, parallel_replicas_support_projection=1, parallel_replicas_for_non_replicated_merge_tree=1, max_parallel_replicas=3, cluster_for_parallel_replicas='test_cluster_one_shard_three_replicas_localhost'; + +WITH toStartOfHour(ts) AS a SELECT sum(value) v FROM normal WHERE ts > '2021-12-06 22:00:00' GROUP BY a ORDER BY v LIMIT 5; +WITH toStartOfHour(ts) AS a SELECT sum(value) v FROM normal WHERE ts > '2021-12-06 22:00:00' GROUP BY toStartOfHour(ts), a ORDER BY v LIMIT 5; + +DROP TABLE normal; + +DROP TABLE IF EXISTS agg; + +CREATE TABLE agg +( + `key` UInt32, + `ts` DateTime, + `value` UInt32, + PROJECTION aaaa + ( + SELECT + ts, + key, + sum(value) + GROUP BY ts, key + ) +) +ENGINE = MergeTree +ORDER BY tuple(); + +INSERT INTO agg SELECT + 1, + toDateTime('2021-12-06 00:00:00') + number, + number +FROM numbers(100000); + +SET optimize_use_projections=1, optimize_aggregation_in_order=1, enable_parallel_replicas=0; + +WITH toStartOfHour(ts) AS a SELECT sum(value) v FROM agg WHERE ts > '2021-12-06 22:00:00' GROUP BY a ORDER BY v LIMIT 5; +WITH toStartOfHour(ts) AS a SELECT sum(value) v FROM agg WHERE ts > '2021-12-06 22:00:00' GROUP BY toStartOfHour(ts), a ORDER BY v LIMIT 5; + +SET optimize_aggregation_in_order=0; +SET enable_parallel_replicas=1, parallel_replicas_local_plan=1, parallel_replicas_support_projection=1, parallel_replicas_for_non_replicated_merge_tree=1, max_parallel_replicas=3, cluster_for_parallel_replicas='test_cluster_one_shard_three_replicas_localhost'; + +WITH toStartOfHour(ts) AS a SELECT sum(value) v FROM agg WHERE ts > '2021-12-06 22:00:00' GROUP BY a ORDER BY v LIMIT 5; +WITH toStartOfHour(ts) AS a SELECT sum(value) v FROM agg WHERE ts > '2021-12-06 22:00:00' GROUP BY toStartOfHour(ts), a ORDER BY v LIMIT 5; + +DROP TABLE agg; diff --git a/parser/testdata/01710_projection_array_join/ast.json b/parser/testdata/01710_projection_array_join/ast.json new file mode 100644 index 000000000..949be00c7 --- /dev/null +++ b/parser/testdata/01710_projection_array_join/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001181139, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01710_projection_array_join/metadata.json b/parser/testdata/01710_projection_array_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01710_projection_array_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01710_projection_array_join/query.sql b/parser/testdata/01710_projection_array_join/query.sql new file mode 100644 index 000000000..509e053e5 --- /dev/null +++ b/parser/testdata/01710_projection_array_join/query.sql @@ -0,0 +1,11 @@ +set optimize_use_projections = 1; + +drop table if exists x; + +create table x (pk int, arr Array(int), projection p (select arr order by pk)) engine MergeTree order by tuple(); + +insert into x values (1, [2]); + +select a from x array join arr as a; + +drop table x; diff --git a/parser/testdata/01710_projection_detach_part/ast.json b/parser/testdata/01710_projection_detach_part/ast.json new file mode 100644 index 000000000..2e2b05063 --- /dev/null +++ b/parser/testdata/01710_projection_detach_part/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001101187, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01710_projection_detach_part/metadata.json b/parser/testdata/01710_projection_detach_part/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01710_projection_detach_part/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01710_projection_detach_part/query.sql b/parser/testdata/01710_projection_detach_part/query.sql new file mode 100644 index 000000000..c77a2abfb --- /dev/null +++ b/parser/testdata/01710_projection_detach_part/query.sql @@ -0,0 +1,15 @@ +set optimize_use_projections = 1; + +drop table if exists t; + +create table t (i int, j int, projection x (select * order by j)) engine MergeTree partition by i order by i; + +insert into t values (1, 2); + +alter table t detach partition 1; + +alter table t attach partition 1; + +select count() from system.projection_parts where database = currentDatabase() and table = 't' and active; + +drop table t; diff --git a/parser/testdata/01710_projection_drop_if_exists/ast.json b/parser/testdata/01710_projection_drop_if_exists/ast.json new file mode 100644 index 000000000..a3977473d --- /dev/null +++ b/parser/testdata/01710_projection_drop_if_exists/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tp (children 1)" + }, + { + "explain": " Identifier tp" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00122387, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/01710_projection_drop_if_exists/metadata.json b/parser/testdata/01710_projection_drop_if_exists/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01710_projection_drop_if_exists/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01710_projection_drop_if_exists/query.sql b/parser/testdata/01710_projection_drop_if_exists/query.sql new file mode 100644 index 000000000..4c19ba18c --- /dev/null +++ b/parser/testdata/01710_projection_drop_if_exists/query.sql @@ -0,0 +1,11 @@ +drop table if exists tp; + +create table tp (x Int32, y Int32, projection p (select x, y order by x)) engine = MergeTree order by y; + +alter table tp drop projection pp; -- { serverError NO_SUCH_PROJECTION_IN_TABLE } +alter table tp drop projection if exists pp; +alter table tp drop projection if exists p; +alter table tp drop projection p; -- { serverError NO_SUCH_PROJECTION_IN_TABLE } +alter table tp drop projection if exists p; + +drop table tp; diff --git a/parser/testdata/01710_projection_external_aggregate/ast.json b/parser/testdata/01710_projection_external_aggregate/ast.json new file mode 100644 index 000000000..5e13038de --- /dev/null +++ b/parser/testdata/01710_projection_external_aggregate/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery agg (children 1)" + }, + { + "explain": " Identifier agg" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001097741, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/01710_projection_external_aggregate/metadata.json b/parser/testdata/01710_projection_external_aggregate/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01710_projection_external_aggregate/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01710_projection_external_aggregate/query.sql b/parser/testdata/01710_projection_external_aggregate/query.sql new file mode 100644 index 000000000..8ef2f2b11 --- /dev/null +++ b/parser/testdata/01710_projection_external_aggregate/query.sql @@ -0,0 +1,25 @@ +DROP TABLE IF EXISTS agg; + +CREATE TABLE agg +( + `key` UInt32, + `ts` DateTime, + `value` UInt32, + PROJECTION aaaa + ( + SELECT + ts, + key, + sum(value) + GROUP BY ts, key + ) +) +ENGINE = MergeTree +ORDER BY (key, ts); + +SET max_bytes_before_external_group_by=1; +SET max_bytes_ratio_before_external_group_by=0; + +INSERT INTO agg SELECT 1, toDateTime('2021-12-06 00:00:00') + number, number FROM numbers(100000); + +DROP TABLE agg; diff --git a/parser/testdata/01710_projection_fetch_long/ast.json b/parser/testdata/01710_projection_fetch_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01710_projection_fetch_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01710_projection_fetch_long/metadata.json b/parser/testdata/01710_projection_fetch_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01710_projection_fetch_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01710_projection_fetch_long/query.sql b/parser/testdata/01710_projection_fetch_long/query.sql new file mode 100644 index 000000000..c5449b46f --- /dev/null +++ b/parser/testdata/01710_projection_fetch_long/query.sql @@ -0,0 +1,43 @@ +-- Tags: long, no-object-storage + +drop table if exists tp_1; +drop table if exists tp_2; + +create table tp_1 (x Int32, y Int32, projection p (select x, y order by x)) engine = ReplicatedMergeTree('/clickhouse/tables/{shard}/01710_projection_fetch_' || currentDatabase(), '1_{replica}') order by y settings min_rows_for_wide_part = 4, min_bytes_for_wide_part = 32; + +create table tp_2 (x Int32, y Int32, projection p (select x, y order by x)) engine = ReplicatedMergeTree('/clickhouse/tables/{shard}/01710_projection_fetch_' || currentDatabase(), '2_{replica}') order by y settings min_rows_for_wide_part = 4, min_bytes_for_wide_part = 32; + +insert into tp_1 select number, number from numbers(3); + +system sync replica tp_2; +select * from tp_2 order by x; + +insert into tp_1 select number, number from numbers(5); + +system sync replica tp_2; +select * from tp_2 order by x; + +-- test projection creation, materialization, clear and drop +alter table tp_1 add projection pp (select x, count() group by x); +system sync replica tp_2; +select count() from system.projection_parts where database = currentDatabase() and table = 'tp_2' and name = 'pp' and active; +show create table tp_2; + +-- all other three operations are mutations +set mutations_sync = 2; +alter table tp_1 materialize projection pp; +select count() from system.projection_parts where database = currentDatabase() and table = 'tp_2' and name = 'pp' and active; +show create table tp_2; + +alter table tp_1 clear projection pp; +system sync replica tp_2; +select * from system.projection_parts where database = currentDatabase() and table = 'tp_2' and name = 'pp' and active; +show create table tp_2; + +alter table tp_1 drop projection pp; +system sync replica tp_2; +select * from system.projection_parts where database = currentDatabase() and table = 'tp_2' and name = 'pp' and active; +show create table tp_2; + +drop table if exists tp_1; +drop table if exists tp_2; diff --git a/parser/testdata/01710_projection_group_by_order_by/ast.json b/parser/testdata/01710_projection_group_by_order_by/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01710_projection_group_by_order_by/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01710_projection_group_by_order_by/metadata.json b/parser/testdata/01710_projection_group_by_order_by/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01710_projection_group_by_order_by/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01710_projection_group_by_order_by/query.sql b/parser/testdata/01710_projection_group_by_order_by/query.sql new file mode 100644 index 000000000..e97e2ff70 --- /dev/null +++ b/parser/testdata/01710_projection_group_by_order_by/query.sql @@ -0,0 +1,10 @@ +--Tags: no-random-merge-tree-settings +-- Tag no-random-merge-tree-settings: bug in formatting of projections. +-- https://github.com/ClickHouse/ClickHouse/issues/44318 + +DROP TABLE IF EXISTS t; +drop table if exists tp; + +create table tp (type Int32, eventcnt UInt64, projection p (select sum(eventcnt), type group by type order by sum(eventcnt))) engine = MergeTree order by type; -- { serverError ILLEGAL_PROJECTION } + +drop table if exists tp; diff --git a/parser/testdata/01710_projection_in_index/ast.json b/parser/testdata/01710_projection_in_index/ast.json new file mode 100644 index 000000000..4c6d5b34f --- /dev/null +++ b/parser/testdata/01710_projection_in_index/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001015802, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01710_projection_in_index/metadata.json b/parser/testdata/01710_projection_in_index/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01710_projection_in_index/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01710_projection_in_index/query.sql b/parser/testdata/01710_projection_in_index/query.sql new file mode 100644 index 000000000..bd2261880 --- /dev/null +++ b/parser/testdata/01710_projection_in_index/query.sql @@ -0,0 +1,24 @@ +SET merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.0; + +drop table if exists t; + +create table t (i int, j int, k int, projection p (select * order by j)) engine MergeTree order by i settings index_granularity = 1; + +insert into t select number, number, number from numbers(10); + +set optimize_use_projections = 1, max_rows_to_read = 3; +set parallel_replicas_local_plan = 1, parallel_replicas_support_projection = 1, optimize_aggregation_in_order = 0; + +select * from t where i < 5 and j in (1, 2); + +drop table t; + +drop table if exists test; + +create table test (name String, time Int64) engine MergeTree order by time; + +insert into test values ('hello world', 1662336000241); + +select count() from (select fromUnixTimestamp64Milli(time, 'UTC') time_fmt, name from test where time_fmt > '2022-09-05 00:00:00'); + +drop table test; diff --git a/parser/testdata/01710_projection_in_set/ast.json b/parser/testdata/01710_projection_in_set/ast.json new file mode 100644 index 000000000..741d4feaa --- /dev/null +++ b/parser/testdata/01710_projection_in_set/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery x (children 1)" + }, + { + "explain": " Identifier x" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001219629, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/01710_projection_in_set/metadata.json b/parser/testdata/01710_projection_in_set/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01710_projection_in_set/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01710_projection_in_set/query.sql b/parser/testdata/01710_projection_in_set/query.sql new file mode 100644 index 000000000..3d1abd0c0 --- /dev/null +++ b/parser/testdata/01710_projection_in_set/query.sql @@ -0,0 +1,21 @@ +drop table if exists x; +create table x (i UInt64, j UInt64, k UInt64, projection agg (select sum(j), avg(k) group by i), projection norm (select j, k order by i)) engine MergeTree order by tuple(); + +insert into x values (1, 2, 3); + +set optimize_use_projections = 1, use_index_for_in_with_subqueries = 0; + +select sum(j), avg(k) from x where i in (select number from numbers(4)); + +select j, k from x where i in (select number from numbers(4)); + +drop table x; + +-- Projection analysis should not break other IN constructs. See https://github.com/ClickHouse/ClickHouse/issues/35336 +create table if not exists flows (SrcAS UInt32, Bytes UInt64) engine MergeTree() order by tuple(); + +insert into table flows values (15169, 83948), (12322, 98989), (60068, 99990), (15169, 89898), (15169, 83948), (15169, 89898), (15169, 83948), (15169, 89898), (15169, 83948), (15169, 89898), (15169, 83948), (15169, 89898); + +select if(SrcAS in (select SrcAS from flows group by SrcAS order by sum(Bytes) desc limit 10) , SrcAS, 33) as SrcAS from flows where 2 == 2 order by SrcAS; + +drop table flows; diff --git a/parser/testdata/01710_projection_materialize_with_missing_columns/ast.json b/parser/testdata/01710_projection_materialize_with_missing_columns/ast.json new file mode 100644 index 000000000..1b188053b --- /dev/null +++ b/parser/testdata/01710_projection_materialize_with_missing_columns/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery x (children 1)" + }, + { + "explain": " Identifier x" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001079242, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/01710_projection_materialize_with_missing_columns/metadata.json b/parser/testdata/01710_projection_materialize_with_missing_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01710_projection_materialize_with_missing_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01710_projection_materialize_with_missing_columns/query.sql b/parser/testdata/01710_projection_materialize_with_missing_columns/query.sql new file mode 100644 index 000000000..28bf1c050 --- /dev/null +++ b/parser/testdata/01710_projection_materialize_with_missing_columns/query.sql @@ -0,0 +1,9 @@ +drop table if exists x; + +create table x (i int) engine MergeTree order by tuple(); +insert into x values (1); +alter table x add column j int; +alter table x add projection p_agg (select sum(j)); +alter table x materialize projection p_agg settings mutations_sync = 1; + +drop table x; diff --git a/parser/testdata/01710_projection_mutation/ast.json b/parser/testdata/01710_projection_mutation/ast.json new file mode 100644 index 000000000..2177239f2 --- /dev/null +++ b/parser/testdata/01710_projection_mutation/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001093762, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/01710_projection_mutation/metadata.json b/parser/testdata/01710_projection_mutation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01710_projection_mutation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01710_projection_mutation/query.sql b/parser/testdata/01710_projection_mutation/query.sql new file mode 100644 index 000000000..d963cde74 --- /dev/null +++ b/parser/testdata/01710_projection_mutation/query.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS t; + +CREATE TABLE t (`key` UInt32, `created_at` Date, `value` UInt32, PROJECTION xxx (SELECT key, created_at, sum(value) GROUP BY key, created_at)) ENGINE = MergeTree PARTITION BY toYYYYMM(created_at) ORDER BY key; + +INSERT INTO t SELECT 1 AS key, today() + (number % 30), number FROM numbers(1000); + +ALTER TABLE t UPDATE value = 0 WHERE (value > 0) AND (created_at >= '2021-12-21') SETTINGS optimize_use_projections = 1; + +DROP TABLE IF EXISTS t; diff --git a/parser/testdata/01710_projection_optimize_aggregators_of_group_by_keys/ast.json b/parser/testdata/01710_projection_optimize_aggregators_of_group_by_keys/ast.json new file mode 100644 index 000000000..c71dcf57a --- /dev/null +++ b/parser/testdata/01710_projection_optimize_aggregators_of_group_by_keys/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery proj (children 1)" + }, + { + "explain": " Identifier proj" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001309897, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/01710_projection_optimize_aggregators_of_group_by_keys/metadata.json b/parser/testdata/01710_projection_optimize_aggregators_of_group_by_keys/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01710_projection_optimize_aggregators_of_group_by_keys/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01710_projection_optimize_aggregators_of_group_by_keys/query.sql b/parser/testdata/01710_projection_optimize_aggregators_of_group_by_keys/query.sql new file mode 100644 index 000000000..ef80dcd71 --- /dev/null +++ b/parser/testdata/01710_projection_optimize_aggregators_of_group_by_keys/query.sql @@ -0,0 +1,7 @@ +drop table if exists proj; + +CREATE TABLE proj(date Date, PROJECTION maxdate( SELECT max(date) GROUP BY date )) ENGINE = MergeTree ORDER BY tuple() as select toDate('2012-10-24')-number%100 from numbers(1e2); + +SELECT max(date) FROM proj PREWHERE date != '2012-10-24'; + +drop table proj; diff --git a/parser/testdata/01710_projection_optimize_group_by_function_keys/ast.json b/parser/testdata/01710_projection_optimize_group_by_function_keys/ast.json new file mode 100644 index 000000000..240f5892f --- /dev/null +++ b/parser/testdata/01710_projection_optimize_group_by_function_keys/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery proj (children 1)" + }, + { + "explain": " Identifier proj" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001035415, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/01710_projection_optimize_group_by_function_keys/metadata.json b/parser/testdata/01710_projection_optimize_group_by_function_keys/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01710_projection_optimize_group_by_function_keys/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01710_projection_optimize_group_by_function_keys/query.sql b/parser/testdata/01710_projection_optimize_group_by_function_keys/query.sql new file mode 100644 index 000000000..bc9b31519 --- /dev/null +++ b/parser/testdata/01710_projection_optimize_group_by_function_keys/query.sql @@ -0,0 +1,31 @@ +drop table if exists proj; + +create table proj ( + bool_value UInt8, + zero_integer_value Int32, + integer_value Int32, + float_value Float32, + datetime_value DateTime, + string_value String, + projection test_projection ( + select + toStartOfDay (toDateTime (datetime_value)) as Day, + datetime_value, + float_value, + count( + distinct if(zero_integer_value = 1, string_value, NULL) + ) + group by + Day, + datetime_value, + float_value + ) + ) engine MergeTree +partition by + toDate (datetime_value) +order by + bool_value; + +insert into proj values (1, 1, 1, 1, '2012-10-24 21:30:00', 'ab'); + +drop table proj; diff --git a/parser/testdata/01710_projection_optimize_materialize/ast.json b/parser/testdata/01710_projection_optimize_materialize/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01710_projection_optimize_materialize/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01710_projection_optimize_materialize/metadata.json b/parser/testdata/01710_projection_optimize_materialize/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01710_projection_optimize_materialize/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01710_projection_optimize_materialize/query.sql b/parser/testdata/01710_projection_optimize_materialize/query.sql new file mode 100644 index 000000000..e704c3e56 --- /dev/null +++ b/parser/testdata/01710_projection_optimize_materialize/query.sql @@ -0,0 +1,14 @@ +-- Tags: no-random-merge-tree-settings +drop table if exists z; + +create table z (pk Int64, d Date, id UInt64, c UInt64) Engine MergeTree partition by d order by pk settings ratio_of_defaults_for_sparse_serialization = 1.0; + +insert into z select number, '2021-10-24', intDiv (number, 10000), 1 from numbers(1000000); +optimize table z final; + +alter table z add projection pp (select id, sum(c) group by id); +alter table z materialize projection pp settings mutations_sync=1; + +SELECT name, partition, formatReadableSize(sum(data_compressed_bytes) AS size) AS compressed, formatReadableSize(sum(data_uncompressed_bytes) AS usize) AS uncompressed, round(usize / size, 2) AS compr_rate, sum(rows) AS rows, count() AS part_count FROM system.projection_parts WHERE database = currentDatabase() and table = 'z' AND active GROUP BY name, partition ORDER BY size DESC; + +drop table z; diff --git a/parser/testdata/01710_projection_part_check/ast.json b/parser/testdata/01710_projection_part_check/ast.json new file mode 100644 index 000000000..ddce3b495 --- /dev/null +++ b/parser/testdata/01710_projection_part_check/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tp (children 1)" + }, + { + "explain": " Identifier tp" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001128629, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/01710_projection_part_check/metadata.json b/parser/testdata/01710_projection_part_check/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01710_projection_part_check/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01710_projection_part_check/query.sql b/parser/testdata/01710_projection_part_check/query.sql new file mode 100644 index 000000000..8f496ddbe --- /dev/null +++ b/parser/testdata/01710_projection_part_check/query.sql @@ -0,0 +1,26 @@ +drop table if exists tp; + +create table tp (x Int32, y Int32, projection p (select x, y order by x)) engine = MergeTree order by y settings min_rows_for_wide_part = 4, min_bytes_for_wide_part = 32; + +insert into tp select number, number from numbers(3); +insert into tp select number, number from numbers(5); + +check table tp settings check_query_single_value_result=0, max_threads=1; + +drop table tp; + +create table tp (p Date, k UInt64, v1 UInt64, v2 Int64, projection p1 ( select p, sum(k), sum(v1), sum(v2) group by p) ) engine = MergeTree partition by toYYYYMM(p) order by k settings min_bytes_for_wide_part = 0; + +insert into tp (p, k, v1, v2) values ('2018-05-15', 1, 1000, 2000), ('2018-05-16', 2, 3000, 4000), ('2018-05-17', 3, 5000, 6000), ('2018-05-18', 4, 7000, 8000); + +check table tp settings check_query_single_value_result=0, max_threads=1; + +drop table tp; + +drop table if exists tp; +create table tp (x int, projection p (select sum(x))) engine = MergeTree order by x settings min_rows_for_wide_part = 2, min_bytes_for_wide_part = 0; +insert into tp values (1), (2), (3), (4); +select part_type from system.parts where database = currentDatabase() and table = 'tp'; +select part_type from system.projection_parts where database = currentDatabase() and table = 'tp'; +check table tp settings check_query_single_value_result=0, max_threads=1; +drop table tp; diff --git a/parser/testdata/01710_projection_pk_trivial_count/ast.json b/parser/testdata/01710_projection_pk_trivial_count/ast.json new file mode 100644 index 000000000..b9e80e8d4 --- /dev/null +++ b/parser/testdata/01710_projection_pk_trivial_count/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery x (children 1)" + }, + { + "explain": " Identifier x" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001140122, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/01710_projection_pk_trivial_count/metadata.json b/parser/testdata/01710_projection_pk_trivial_count/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01710_projection_pk_trivial_count/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01710_projection_pk_trivial_count/query.sql b/parser/testdata/01710_projection_pk_trivial_count/query.sql new file mode 100644 index 000000000..53ecf4e80 --- /dev/null +++ b/parser/testdata/01710_projection_pk_trivial_count/query.sql @@ -0,0 +1,10 @@ +drop table if exists x; + +set parallel_replicas_local_plan = 1, parallel_replicas_support_projection = 1, optimize_aggregation_in_order = 0; + +create table x (i int) engine MergeTree order by i settings index_granularity = 3; +insert into x select * from numbers(10); +select trimLeft(*) from (explain select count() from x where (i >= 3 and i <= 6) or i = 7) where explain like '%ReadFromPreparedSource%' or explain like '%ReadFromMergeTree%'; +select count() from x where (i >= 3 and i <= 6) or i = 7; + +drop table x; diff --git a/parser/testdata/01710_projection_query_plan_optimization_misc/ast.json b/parser/testdata/01710_projection_query_plan_optimization_misc/ast.json new file mode 100644 index 000000000..8a5044952 --- /dev/null +++ b/parser/testdata/01710_projection_query_plan_optimization_misc/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001053377, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/01710_projection_query_plan_optimization_misc/metadata.json b/parser/testdata/01710_projection_query_plan_optimization_misc/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01710_projection_query_plan_optimization_misc/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01710_projection_query_plan_optimization_misc/query.sql b/parser/testdata/01710_projection_query_plan_optimization_misc/query.sql new file mode 100644 index 000000000..cb5653133 --- /dev/null +++ b/parser/testdata/01710_projection_query_plan_optimization_misc/query.sql @@ -0,0 +1,11 @@ +drop table if exists t; + +create table t (x Int32, codectest Int32) engine = MergeTree order by x; + +alter table t add projection x (select * order by codectest); + +insert into t values (1, 2); + +select * from merge('', 't'); + +drop table t; diff --git a/parser/testdata/01710_projection_row_policy/ast.json b/parser/testdata/01710_projection_row_policy/ast.json new file mode 100644 index 000000000..304479fa5 --- /dev/null +++ b/parser/testdata/01710_projection_row_policy/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001182801, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/01710_projection_row_policy/metadata.json b/parser/testdata/01710_projection_row_policy/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01710_projection_row_policy/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01710_projection_row_policy/query.sql b/parser/testdata/01710_projection_row_policy/query.sql new file mode 100644 index 000000000..a54cc50b9 --- /dev/null +++ b/parser/testdata/01710_projection_row_policy/query.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS t; + +CREATE TABLE t(a UInt32, b UInt32) ENGINE = MergeTree PARTITION BY a ORDER BY a; + +INSERT INTO t SELECT number % 10, number FROM numbers(10000); + +CREATE ROW POLICY OR REPLACE rp ON t FOR SELECT USING 0 TO ALL; + +SELECT count(), min(a), max(a) FROM t; + +DROP ROW POLICY rp ON t; + +DROP TABLE t; diff --git a/parser/testdata/01710_projection_vertical_merges/ast.json b/parser/testdata/01710_projection_vertical_merges/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01710_projection_vertical_merges/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01710_projection_vertical_merges/metadata.json b/parser/testdata/01710_projection_vertical_merges/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01710_projection_vertical_merges/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01710_projection_vertical_merges/query.sql b/parser/testdata/01710_projection_vertical_merges/query.sql new file mode 100644 index 000000000..38a02c227 --- /dev/null +++ b/parser/testdata/01710_projection_vertical_merges/query.sql @@ -0,0 +1,21 @@ +-- Tags: long, no-parallel, no-msan, no-tsan, no-asan +-- set no-parallel and no sanitizers tag is to prevent timeout of this test + +drop table if exists t; + +create table t (c1 Int64, c2 String, c3 DateTime, c4 Int8, c5 String, c6 String, c7 String, c8 String, c9 String, c10 String, c11 String, c12 String, c13 Int8, c14 Int64, c15 String, c16 String, c17 String, c18 Int64, c19 Int64, c20 Int64) engine MergeTree order by c18 SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +insert into t (c1, c18) select number, -number from numbers(2000000); + +alter table t add projection p_norm (select * order by c1); + +optimize table t final; + +alter table t materialize projection p_norm settings mutations_sync = 1; + +set optimize_use_projections = 1, max_rows_to_read = 3; +set parallel_replicas_local_plan = 1, parallel_replicas_support_projection = 1, optimize_aggregation_in_order = 0; + +select c18 from t where c1 < 0; + +drop table t; diff --git a/parser/testdata/01710_projection_with_alter_conversions/ast.json b/parser/testdata/01710_projection_with_alter_conversions/ast.json new file mode 100644 index 000000000..505add7a5 --- /dev/null +++ b/parser/testdata/01710_projection_with_alter_conversions/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001267703, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/01710_projection_with_alter_conversions/metadata.json b/parser/testdata/01710_projection_with_alter_conversions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01710_projection_with_alter_conversions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01710_projection_with_alter_conversions/query.sql b/parser/testdata/01710_projection_with_alter_conversions/query.sql new file mode 100644 index 000000000..649a07b9b --- /dev/null +++ b/parser/testdata/01710_projection_with_alter_conversions/query.sql @@ -0,0 +1,15 @@ +drop table if exists t; + +create table t (i int, j int, projection p (select i order by i)) engine MergeTree order by tuple(); + +insert into t values (1, 2); + +system stop merges t; + +set alter_sync = 0; + +alter table t rename column j to k; + +select * from t; + +drop table t; diff --git a/parser/testdata/01710_projection_with_ast_rewrite_settings/ast.json b/parser/testdata/01710_projection_with_ast_rewrite_settings/ast.json new file mode 100644 index 000000000..ce31c939e --- /dev/null +++ b/parser/testdata/01710_projection_with_ast_rewrite_settings/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery aggregate_functions_null_for_empty (children 1)" + }, + { + "explain": " Identifier aggregate_functions_null_for_empty" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001001604, + "rows_read": 2, + "bytes_read": 120 + } +} diff --git a/parser/testdata/01710_projection_with_ast_rewrite_settings/metadata.json b/parser/testdata/01710_projection_with_ast_rewrite_settings/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01710_projection_with_ast_rewrite_settings/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01710_projection_with_ast_rewrite_settings/query.sql b/parser/testdata/01710_projection_with_ast_rewrite_settings/query.sql new file mode 100644 index 000000000..1286b0e74 --- /dev/null +++ b/parser/testdata/01710_projection_with_ast_rewrite_settings/query.sql @@ -0,0 +1,29 @@ +DROP TABLE IF EXISTS aggregate_functions_null_for_empty; + +CREATE TABLE aggregate_functions_null_for_empty (`x` UInt32, `y` UInt64, PROJECTION p (SELECT sum(y))) ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO aggregate_functions_null_for_empty SELECT number, number * 2 FROM numbers(8192 * 10) SETTINGS aggregate_functions_null_for_empty = true; + +SELECT count() FROM aggregate_functions_null_for_empty; + +DROP TABLE aggregate_functions_null_for_empty; + +DROP TABLE IF EXISTS transform_null_in; + +CREATE TABLE transform_null_in (`x` UInt32, `y` UInt64, PROJECTION p (SELECT sum(y in (1,2,3)))) ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO transform_null_in SELECT number, number * 2 FROM numbers(8192 * 10) SETTINGS transform_null_in = true; + +SELECT count() FROM transform_null_in; + +DROP TABLE transform_null_in; + +DROP TABLE IF EXISTS legacy_column_name_of_tuple_literal; + +CREATE TABLE legacy_column_name_of_tuple_literal (`x` UInt32, `y` UInt64, PROJECTION p (SELECT sum(y in (1,2,3)))) ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO legacy_column_name_of_tuple_literal SELECT number, number * 2 FROM numbers(8192 * 10) SETTINGS legacy_column_name_of_tuple_literal = true; + +SELECT count() FROM legacy_column_name_of_tuple_literal; + +DROP TABLE legacy_column_name_of_tuple_literal; diff --git a/parser/testdata/01710_projection_with_column_transformers/ast.json b/parser/testdata/01710_projection_with_column_transformers/ast.json new file mode 100644 index 000000000..6e22d2079 --- /dev/null +++ b/parser/testdata/01710_projection_with_column_transformers/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery foo (children 1)" + }, + { + "explain": " Identifier foo" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001451479, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/01710_projection_with_column_transformers/metadata.json b/parser/testdata/01710_projection_with_column_transformers/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01710_projection_with_column_transformers/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01710_projection_with_column_transformers/query.sql b/parser/testdata/01710_projection_with_column_transformers/query.sql new file mode 100644 index 000000000..90d92f68f --- /dev/null +++ b/parser/testdata/01710_projection_with_column_transformers/query.sql @@ -0,0 +1,11 @@ +drop table if exists foo; + +create table foo(bar String, projection p (select * apply groupUniqArray(100))) engine MergeTree order by bar; + +show create foo; + +detach table foo; + +attach table foo; + +drop table foo; diff --git a/parser/testdata/01710_projection_with_joins/ast.json b/parser/testdata/01710_projection_with_joins/ast.json new file mode 100644 index 000000000..3f1b9925c --- /dev/null +++ b/parser/testdata/01710_projection_with_joins/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001217254, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/01710_projection_with_joins/metadata.json b/parser/testdata/01710_projection_with_joins/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01710_projection_with_joins/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01710_projection_with_joins/query.sql b/parser/testdata/01710_projection_with_joins/query.sql new file mode 100644 index 000000000..94c15d00e --- /dev/null +++ b/parser/testdata/01710_projection_with_joins/query.sql @@ -0,0 +1,21 @@ +drop table if exists t; + +create table t (s UInt16, l UInt16, projection p (select s, l order by l)) engine MergeTree order by s; + +select s from t join (select toUInt16(1) as s) x using (s) order by s settings optimize_use_projections = 1; +select s from t join (select toUInt16(1) as s) x using (s) order by s settings optimize_use_projections = 0; + +drop table t; + +drop table if exists mt; +create table mt (id1 Int8, id2 Int8) Engine=MergeTree order by tuple(); +select alias1 from (select id1, id1 as alias1 from mt) as l all inner join (select id2 as alias1 from mt) as t using (alias1) order by l.id1 settings optimize_use_projections = 1; +select id1 from mt all inner join (select id2 as id1 from mt) as t using (id1) order by id1 settings optimize_use_projections = 1; +select id2 as id1 from mt all inner join (select id1 from mt) as t using (id1) order by id1 settings optimize_use_projections = 1; +drop table mt; + +drop table if exists j; +create table j (id1 Int8, id2 Int8, projection p (select id1, id2 order by id2)) Engine=MergeTree order by id1 settings index_granularity = 1; +insert into j select number, number from numbers(10); +select alias1 from (select id1, id1 as alias1 from j) as l all inner join (select id2, id2 as alias1 from j where id2 in (1, 2, 3)) as t using (alias1) where id2 in (2, 3, 4) order by id1 settings optimize_use_projections = 1; +drop table j; diff --git a/parser/testdata/01710_projection_with_mixed_pipeline/ast.json b/parser/testdata/01710_projection_with_mixed_pipeline/ast.json new file mode 100644 index 000000000..659f584a1 --- /dev/null +++ b/parser/testdata/01710_projection_with_mixed_pipeline/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000998977, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/01710_projection_with_mixed_pipeline/metadata.json b/parser/testdata/01710_projection_with_mixed_pipeline/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01710_projection_with_mixed_pipeline/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01710_projection_with_mixed_pipeline/query.sql b/parser/testdata/01710_projection_with_mixed_pipeline/query.sql new file mode 100644 index 000000000..2bf2cc487 --- /dev/null +++ b/parser/testdata/01710_projection_with_mixed_pipeline/query.sql @@ -0,0 +1,9 @@ +drop table if exists t; + +create table t (x UInt32) engine = MergeTree order by tuple() settings index_granularity = 8; +insert into t select number from numbers(100); +alter table t add projection p (select uniqHLL12(x)); +insert into t select number + 100 from numbers(100); +select uniqHLL12(x) from t settings optimize_use_projections = 1, max_bytes_to_read=400, max_block_size=8; -- { serverError TOO_MANY_BYTES } + +drop table if exists t; diff --git a/parser/testdata/01710_projection_with_nullable_keys/ast.json b/parser/testdata/01710_projection_with_nullable_keys/ast.json new file mode 100644 index 000000000..b0e85f0a3 --- /dev/null +++ b/parser/testdata/01710_projection_with_nullable_keys/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery sales (children 1)" + }, + { + "explain": " Identifier sales" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001151705, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/01710_projection_with_nullable_keys/metadata.json b/parser/testdata/01710_projection_with_nullable_keys/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01710_projection_with_nullable_keys/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01710_projection_with_nullable_keys/query.sql b/parser/testdata/01710_projection_with_nullable_keys/query.sql new file mode 100644 index 000000000..72757a1d7 --- /dev/null +++ b/parser/testdata/01710_projection_with_nullable_keys/query.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS sales; + +CREATE TABLE sales (DATE_SOLD DateTime64(3, 'UTC'), PRODUCT_ID Nullable(String)) Engine MergeTree() PARTITION BY toYYYYMM(DATE_SOLD) ORDER BY DATE_SOLD; + +ALTER TABLE sales ADD PROJECTION test (SELECT toInt64(COUNT(*)) GROUP BY PRODUCT_ID, DATE_SOLD); + +SHOW CREATE sales; + +DROP TABLE sales; diff --git a/parser/testdata/01710_projections/ast.json b/parser/testdata/01710_projections/ast.json new file mode 100644 index 000000000..c1f8b705f --- /dev/null +++ b/parser/testdata/01710_projections/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery projection_test (children 1)" + }, + { + "explain": " Identifier projection_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001040205, + "rows_read": 2, + "bytes_read": 82 + } +} diff --git a/parser/testdata/01710_projections/metadata.json b/parser/testdata/01710_projections/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01710_projections/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01710_projections/query.sql b/parser/testdata/01710_projections/query.sql new file mode 100644 index 000000000..518317a68 --- /dev/null +++ b/parser/testdata/01710_projections/query.sql @@ -0,0 +1,53 @@ +drop table if exists projection_test; + +create table projection_test (`sum(block_count)` UInt64, domain_alias UInt64 alias length(domain), datetime DateTime, domain LowCardinality(String), x_id String, y_id String, block_count Int64, retry_count Int64, duration Int64, kbytes Int64, buffer_time Int64, first_time Int64, total_bytes Nullable(UInt64), valid_bytes Nullable(UInt64), completed_bytes Nullable(UInt64), fixed_bytes Nullable(UInt64), force_bytes Nullable(UInt64), projection p (select toStartOfMinute(datetime) dt_m, countIf(first_time = 0) / count(), avg((kbytes * 8) / duration), count(), sum(block_count) / sum(duration), avg(block_count / duration), sum(buffer_time) / sum(duration), avg(buffer_time / duration), sum(valid_bytes) / sum(total_bytes), sum(completed_bytes) / sum(total_bytes), sum(fixed_bytes) / sum(total_bytes), sum(force_bytes) / sum(total_bytes), sum(valid_bytes) / sum(total_bytes), sum(retry_count) / sum(duration), avg(retry_count / duration), countIf(block_count > 0) / count(), countIf(first_time = 0) / count(), uniqHLL12(x_id), uniqHLL12(y_id) group by dt_m, domain)) engine MergeTree partition by toDate(datetime) order by toStartOfTenMinutes(datetime) settings index_granularity_bytes = 10000000; + +insert into projection_test with rowNumberInAllBlocks() as id select 1, toDateTime('2020-10-24 00:00:00') + (id / 20), toString(id % 100), * from generateRandom('x_id String, y_id String, block_count Int64, retry_count Int64, duration Int64, kbytes Int64, buffer_time Int64, first_time Int64, total_bytes Nullable(UInt64), valid_bytes Nullable(UInt64), completed_bytes Nullable(UInt64), fixed_bytes Nullable(UInt64), force_bytes Nullable(UInt64)', 10, 10, 1) limit 1000 settings max_threads = 1; + +set optimize_use_projections = 1, force_optimize_projection = 1; +set parallel_replicas_local_plan = 1, parallel_replicas_support_projection = 1, optimize_aggregation_in_order = 0; + +select * from projection_test; -- { serverError PROJECTION_NOT_USED } +select toStartOfMinute(datetime) dt_m, countIf(first_time = 0) from projection_test join (select 1) x on 1 where domain = '1' group by dt_m order by dt_m; -- { serverError PROJECTION_NOT_USED } + +select toStartOfMinute(datetime) dt_m, countIf(first_time = 0) / count(), avg((kbytes * 8) / duration) from projection_test where domain = '1' group by dt_m order by dt_m; + +-- prewhere with alias +select toStartOfMinute(datetime) dt_m, countIf(first_time = 0) / count(), avg((kbytes * 8) / duration) from projection_test prewhere domain_alias = 3 where domain = '1' group by dt_m order by dt_m; + +drop row policy if exists filter on projection_test; +create row policy filter on projection_test using (domain = 'non_existing_domain') to all; +-- prewhere with alias with row policy (non existing) +select toStartOfMinute(datetime) dt_m, countIf(first_time = 0) / count(), avg((kbytes * 8) / duration) from projection_test prewhere domain_alias = 1 where domain = '1' group by dt_m order by dt_m; +drop row policy filter on projection_test; + +-- TODO There is a bug in row policy filter (not related to projections, crash in master) +-- drop row policy if exists filter on projection_test; +-- create row policy filter on projection_test using (domain != '1') to all; +-- prewhere with alias with row policy (existing) +-- select toStartOfMinute(datetime) dt_m, countIf(first_time = 0) / count(), avg((kbytes * 8) / duration) from projection_test prewhere domain_alias = 1 where domain = '1' group by dt_m order by dt_m; +-- drop row policy filter on projection_test; + +select toStartOfMinute(datetime) dt_m, count(), sum(block_count) / sum(duration), avg(block_count / duration) from projection_test group by dt_m order by dt_m; + +-- TODO figure out how to deal with conflict column names +-- select toStartOfMinute(datetime) dt_m, count(), sum(block_count) / sum(duration), avg(block_count / duration) from projection_test where `sum(block_count)` = 1 group by dt_m order by dt_m; + +select toStartOfMinute(datetime) dt_m, sum(buffer_time) / sum(duration), avg(buffer_time / duration), sum(valid_bytes) / sum(total_bytes), sum(completed_bytes) / sum(total_bytes), sum(fixed_bytes) / sum(total_bytes), sum(force_bytes) / sum(total_bytes), sum(valid_bytes) / sum(total_bytes) from projection_test where domain in ('12', '14') group by dt_m order by dt_m; + +select toStartOfMinute(datetime) dt_m, domain, sum(retry_count) / sum(duration), avg(retry_count / duration), countIf(block_count > 0) / count(), countIf(first_time = 0) / count() from projection_test group by dt_m, domain having domain = '19' order by dt_m, domain; + +select toStartOfHour(toStartOfMinute(datetime)) dt_h, uniqHLL12(x_id), uniqHLL12(y_id) from projection_test group by dt_h order by dt_h; + +-- found by fuzzer +SET enable_positional_arguments = 0, force_optimize_projection = 0; +SELECT 2, -1 FROM projection_test PREWHERE domain_alias = 1. WHERE domain = NULL GROUP BY -9223372036854775808 ORDER BY countIf(first_time = 0) / count(-2147483649) DESC NULLS LAST, 1048576 DESC NULLS LAST; + +drop table if exists projection_test; + +drop table if exists projection_without_key; +create table projection_without_key (key UInt32, PROJECTION x (SELECT max(key))) engine MergeTree order by key; +insert into projection_without_key select number from numbers(1000); +set force_optimize_projection = 1, optimize_use_projections = 1; +select max(key) from projection_without_key; +drop table projection_without_key; diff --git a/parser/testdata/01710_projections_and_duplicate_columms/ast.json b/parser/testdata/01710_projections_and_duplicate_columms/ast.json new file mode 100644 index 000000000..2cb0c70dd --- /dev/null +++ b/parser/testdata/01710_projections_and_duplicate_columms/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery projection_test__fuzz_0 (children 1)" + }, + { + "explain": " Identifier projection_test__fuzz_0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001212223, + "rows_read": 2, + "bytes_read": 98 + } +} diff --git a/parser/testdata/01710_projections_and_duplicate_columms/metadata.json b/parser/testdata/01710_projections_and_duplicate_columms/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01710_projections_and_duplicate_columms/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01710_projections_and_duplicate_columms/query.sql b/parser/testdata/01710_projections_and_duplicate_columms/query.sql new file mode 100644 index 000000000..74a7aea41 --- /dev/null +++ b/parser/testdata/01710_projections_and_duplicate_columms/query.sql @@ -0,0 +1,8 @@ +drop table if exists projection_test__fuzz_0; +set allow_suspicious_low_cardinality_types=1; + +CREATE TABLE projection_test__fuzz_0 (`sum(block_count)` UInt64, `domain_alias` UInt64 ALIAS length(domain), `datetime` DateTime, `domain` LowCardinality(String), `x_id` String, `y_id` String, `block_count` Int64, `retry_count` Int64, `duration` Decimal(76, 13), `kbytes` LowCardinality(Int64), `buffer_time` Int64, `first_time` UInt256, `total_bytes` LowCardinality(Nullable(UInt64)), `valid_bytes` Nullable(UInt64), `completed_bytes` Nullable(UInt64), `fixed_bytes` LowCardinality(Nullable(UInt64)), `force_bytes` Int256, PROJECTION p (SELECT toStartOfMinute(datetime) AS dt_m, countIf(first_time = 0) / count(), avg((kbytes * 8) / duration), count(), sum(block_count) / sum(duration), avg(block_count / duration), sum(buffer_time) / sum(duration), avg(buffer_time / duration), sum(valid_bytes) / sum(total_bytes), sum(completed_bytes) / sum(total_bytes), sum(fixed_bytes) / sum(total_bytes), sum(force_bytes) / sum(total_bytes), sum(valid_bytes) / sum(total_bytes), sum(retry_count) / sum(duration), avg(retry_count / duration), countIf(block_count > 0) / count(), countIf(first_time = 0) / count(), uniqHLL12(x_id), uniqHLL12(y_id) GROUP BY dt_m, domain)) ENGINE = MergeTree PARTITION BY toDate(datetime) ORDER BY (toStartOfTenMinutes(datetime), domain) SETTINGS index_granularity_bytes = 10000000; +INSERT INTO projection_test__fuzz_0 SETTINGS max_threads = 1 WITH rowNumberInAllBlocks() AS id SELECT 1, toDateTime('2020-10-24 00:00:00') + (id / 20), toString(id % 100), * FROM generateRandom('x_id String, y_id String, block_count Int64, retry_count Int64, duration Int64, kbytes Int64, buffer_time Int64, first_time Int64, total_bytes Nullable(UInt64), valid_bytes Nullable(UInt64), completed_bytes Nullable(UInt64), fixed_bytes Nullable(UInt64), force_bytes Nullable(UInt64)', 10, 10, 1) LIMIT 1000 SETTINGS max_threads = 1; +SELECT '-21474836.48', 10000000000., '', count(kbytes), '', 10.0001, toStartOfMinute(datetime) AS dt_m, 10, NULL FROM projection_test__fuzz_0 GROUP BY dt_m WITH ROLLUP WITH TOTALS ORDER BY count(retry_count / duration) ASC NULLS LAST, 100000000000000000000. ASC NULLS FIRST format Null; + +drop table projection_test__fuzz_0; diff --git a/parser/testdata/01710_projections_group_by_no_key/ast.json b/parser/testdata/01710_projections_group_by_no_key/ast.json new file mode 100644 index 000000000..a5daf7a74 --- /dev/null +++ b/parser/testdata/01710_projections_group_by_no_key/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery projection_without_key (children 1)" + }, + { + "explain": " Identifier projection_without_key" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001014713, + "rows_read": 2, + "bytes_read": 96 + } +} diff --git a/parser/testdata/01710_projections_group_by_no_key/metadata.json b/parser/testdata/01710_projections_group_by_no_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01710_projections_group_by_no_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01710_projections_group_by_no_key/query.sql b/parser/testdata/01710_projections_group_by_no_key/query.sql new file mode 100644 index 000000000..98545bdd0 --- /dev/null +++ b/parser/testdata/01710_projections_group_by_no_key/query.sql @@ -0,0 +1,8 @@ +drop table if exists projection_without_key; + +create table projection_without_key (key UInt32, PROJECTION x (SELECT sum(key) group by key % 3)) engine MergeTree order by key; +insert into projection_without_key select number from numbers(1000); +select sum(key) from projection_without_key settings optimize_use_projections = 1; +select sum(key) from projection_without_key settings optimize_use_projections = 0; + +drop table projection_without_key; diff --git a/parser/testdata/01710_projections_in_distributed_query/ast.json b/parser/testdata/01710_projections_in_distributed_query/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01710_projections_in_distributed_query/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01710_projections_in_distributed_query/metadata.json b/parser/testdata/01710_projections_in_distributed_query/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01710_projections_in_distributed_query/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01710_projections_in_distributed_query/query.sql b/parser/testdata/01710_projections_in_distributed_query/query.sql new file mode 100644 index 000000000..412440318 --- /dev/null +++ b/parser/testdata/01710_projections_in_distributed_query/query.sql @@ -0,0 +1,25 @@ +-- Tags: distributed + +set enable_memory_bound_merging_of_aggregation_results=0; + +drop table if exists projection_test; + +create table projection_test (dt DateTime, cost Int64, projection p (select toStartOfMinute(dt) dt_m, sum(cost) group by dt_m)) engine MergeTree partition by toDate(dt) order by dt; + +insert into projection_test with rowNumberInAllBlocks() as id select toDateTime('2020-10-24 00:00:00') + (id / 20), * from generateRandom('cost Int64', 10, 10, 1) limit 1000 settings max_threads = 1; + +set optimize_use_projections = 1, force_optimize_projection = 1; +set parallel_replicas_local_plan = 1, parallel_replicas_support_projection = 1, optimize_aggregation_in_order = 0; + +select toStartOfMinute(dt) dt_m, sum(cost) from projection_test group by dt_m; +select sum(cost) from projection_test; + +drop table if exists projection_test_d; + +create table projection_test_d (dt DateTime, cost Int64) engine Distributed(test_cluster_two_shards, currentDatabase(), projection_test); + +select toStartOfMinute(dt) dt_m, sum(cost) from projection_test_d group by dt_m; +select sum(cost) from projection_test_d; + +drop table projection_test; +drop table projection_test_d; diff --git a/parser/testdata/01710_projections_order_by_complete/ast.json b/parser/testdata/01710_projections_order_by_complete/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01710_projections_order_by_complete/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01710_projections_order_by_complete/metadata.json b/parser/testdata/01710_projections_order_by_complete/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01710_projections_order_by_complete/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01710_projections_order_by_complete/query.sql b/parser/testdata/01710_projections_order_by_complete/query.sql new file mode 100644 index 000000000..d8126d32b --- /dev/null +++ b/parser/testdata/01710_projections_order_by_complete/query.sql @@ -0,0 +1,16 @@ +-- Test from https://github.com/ClickHouse/ClickHouse/issues/37673 + +drop table if exists data_proj_order_by_comp; +create table data_proj_order_by_comp (t UInt64, projection tSort (select * order by t)) ENGINE MergeTree() order by tuple(); + +system stop merges data_proj_order_by_comp; + +insert into data_proj_order_by_comp values (5); +insert into data_proj_order_by_comp values (5); +insert into data_proj_order_by_comp values (6); + +-- { echoOn } +select t from data_proj_order_by_comp where t > 0 order by t settings optimize_read_in_order=1; +select t from data_proj_order_by_comp where t > 0 order by t settings optimize_read_in_order=0; +select t from data_proj_order_by_comp where t > 0 order by t settings max_threads=1; +-- { echoOff } diff --git a/parser/testdata/01710_projections_order_by_incomplete/ast.json b/parser/testdata/01710_projections_order_by_incomplete/ast.json new file mode 100644 index 000000000..b9e2bbeb6 --- /dev/null +++ b/parser/testdata/01710_projections_order_by_incomplete/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery data_proj_order_by_incomp (children 1)" + }, + { + "explain": " Identifier data_proj_order_by_incomp" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001072113, + "rows_read": 2, + "bytes_read": 102 + } +} diff --git a/parser/testdata/01710_projections_order_by_incomplete/metadata.json b/parser/testdata/01710_projections_order_by_incomplete/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01710_projections_order_by_incomplete/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01710_projections_order_by_incomplete/query.sql b/parser/testdata/01710_projections_order_by_incomplete/query.sql new file mode 100644 index 000000000..3ac5f22af --- /dev/null +++ b/parser/testdata/01710_projections_order_by_incomplete/query.sql @@ -0,0 +1,16 @@ +drop table if exists data_proj_order_by_incomp; +create table data_proj_order_by_incomp (t UInt64) ENGINE MergeTree() order by tuple(); + +system stop merges data_proj_order_by_incomp; + +insert into data_proj_order_by_incomp values (5); +insert into data_proj_order_by_incomp values (5); + +alter table data_proj_order_by_incomp add projection tSort (select * order by t); +insert into data_proj_order_by_incomp values (6); + +-- { echoOn } +select t from data_proj_order_by_incomp where t > 0 order by t settings optimize_read_in_order=1; +select t from data_proj_order_by_incomp where t > 0 order by t settings optimize_read_in_order=0; +select t from data_proj_order_by_incomp where t > 0 order by t settings max_threads=1; +-- { echoOff } diff --git a/parser/testdata/01710_query_log_with_projection_info/ast.json b/parser/testdata/01710_query_log_with_projection_info/ast.json new file mode 100644 index 000000000..8d1957565 --- /dev/null +++ b/parser/testdata/01710_query_log_with_projection_info/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001034008, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01710_query_log_with_projection_info/metadata.json b/parser/testdata/01710_query_log_with_projection_info/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01710_query_log_with_projection_info/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01710_query_log_with_projection_info/query.sql b/parser/testdata/01710_query_log_with_projection_info/query.sql new file mode 100644 index 000000000..d755a5a46 --- /dev/null +++ b/parser/testdata/01710_query_log_with_projection_info/query.sql @@ -0,0 +1,67 @@ +set log_queries=1; +set log_queries_min_type='QUERY_FINISH'; +set optimize_use_implicit_projections=1; + +DROP TABLE IF EXISTS t; + +CREATE TABLE t +( + `id` UInt64, + `id2` UInt64, + `id3` UInt64, + PROJECTION t_normal + ( + SELECT + id, + id2, + id3 + ORDER BY + id2, + id, + id3 + ), + PROJECTION t_agg + ( + SELECT + sum(id3) + GROUP BY id2 + ) +) +ENGINE = MergeTree +ORDER BY id +SETTINGS index_granularity = 8; + +insert into t SELECT number, -number, number FROM numbers(10000); + +set parallel_replicas_local_plan = 1, parallel_replicas_support_projection = 1, optimize_aggregation_in_order = 0; +SELECT * FROM t WHERE id2 = 3 FORMAT Null; +SELECT sum(id3) FROM t GROUP BY id2 FORMAT Null; +SELECT min(id) FROM t FORMAT Null; + +SYSTEM FLUSH LOGS query_log; + +SELECT + --Remove the prefix string which is a mutable database name. + arrayStringConcat(arrayPopFront(splitByString('.', projections[1])), '.') +FROM + system.query_log +WHERE + current_database=currentDatabase() and query = 'SELECT * FROM t WHERE id2 = 3 FORMAT Null;'; + +SELECT + --Remove the prefix string which is a mutable database name. + arrayStringConcat(arrayPopFront(splitByString('.', projections[1])), '.') +FROM + system.query_log +WHERE + current_database=currentDatabase() and query = 'SELECT sum(id3) FROM t GROUP BY id2 FORMAT Null;'; + +SELECT + --Remove the prefix string which is a mutable database name. + arrayStringConcat(arrayPopFront(splitByString('.', projections[1])), '.') +FROM + system.query_log +WHERE + current_database=currentDatabase() and query = 'SELECT min(id) FROM t FORMAT Null;'; + +DROP TABLE t; diff --git a/parser/testdata/01711_cte_subquery_fix/ast.json b/parser/testdata/01711_cte_subquery_fix/ast.json new file mode 100644 index 000000000..a7ae1fe51 --- /dev/null +++ b/parser/testdata/01711_cte_subquery_fix/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001250392, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/01711_cte_subquery_fix/metadata.json b/parser/testdata/01711_cte_subquery_fix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01711_cte_subquery_fix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01711_cte_subquery_fix/query.sql b/parser/testdata/01711_cte_subquery_fix/query.sql new file mode 100644 index 000000000..10ad90192 --- /dev/null +++ b/parser/testdata/01711_cte_subquery_fix/query.sql @@ -0,0 +1,7 @@ +drop table if exists t; +create table t engine = Memory as with cte as (select * from numbers(10)) select * from cte; +drop table t; + +drop table if exists view1; +create view view1 as with t as (select number n from numbers(3)) select n from t; +drop table view1; diff --git a/parser/testdata/01711_decimal_multiplication/ast.json b/parser/testdata/01711_decimal_multiplication/ast.json new file mode 100644 index 000000000..79a61c47b --- /dev/null +++ b/parser/testdata/01711_decimal_multiplication/ast.json @@ -0,0 +1,76 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function minus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDecimal64 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_4" + }, + { + "explain": " Literal UInt64_4" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDecimal32 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_2" + } + ], + + "rows": 18, + + "statistics": + { + "elapsed": 0.001304363, + "rows_read": 18, + "bytes_read": 728 + } +} diff --git a/parser/testdata/01711_decimal_multiplication/metadata.json b/parser/testdata/01711_decimal_multiplication/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01711_decimal_multiplication/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01711_decimal_multiplication/query.sql b/parser/testdata/01711_decimal_multiplication/query.sql new file mode 100644 index 000000000..10d23599b --- /dev/null +++ b/parser/testdata/01711_decimal_multiplication/query.sql @@ -0,0 +1,4 @@ +SELECT materialize(toDecimal64(4,4)) - materialize(toDecimal32(2,2)); +SELECT toDecimal64(4,4) - materialize(toDecimal32(2,2)); +SELECT materialize(toDecimal64(4,4)) - toDecimal32(2,2); +SELECT toDecimal64(4,4) - toDecimal32(2,2); diff --git a/parser/testdata/01712_no_adaptive_granularity_vertical_merge/ast.json b/parser/testdata/01712_no_adaptive_granularity_vertical_merge/ast.json new file mode 100644 index 000000000..c0ab9efdf --- /dev/null +++ b/parser/testdata/01712_no_adaptive_granularity_vertical_merge/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery old_school_table (children 1)" + }, + { + "explain": " Identifier old_school_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001398371, + "rows_read": 2, + "bytes_read": 84 + } +} diff --git a/parser/testdata/01712_no_adaptive_granularity_vertical_merge/metadata.json b/parser/testdata/01712_no_adaptive_granularity_vertical_merge/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01712_no_adaptive_granularity_vertical_merge/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01712_no_adaptive_granularity_vertical_merge/query.sql b/parser/testdata/01712_no_adaptive_granularity_vertical_merge/query.sql new file mode 100644 index 000000000..0acf6992c --- /dev/null +++ b/parser/testdata/01712_no_adaptive_granularity_vertical_merge/query.sql @@ -0,0 +1,30 @@ +DROP TABLE IF EXISTS old_school_table; + +CREATE TABLE old_school_table +( + key UInt64, + value String +) +ENGINE = MergeTree() +ORDER BY key +SETTINGS index_granularity_bytes = 0, enable_mixed_granularity_parts = 0, min_bytes_for_wide_part = 0, +vertical_merge_algorithm_min_rows_to_activate = 1, vertical_merge_algorithm_min_columns_to_activate = 1; + +INSERT INTO old_school_table VALUES (1, '1'); +INSERT INTO old_school_table VALUES (2, '2'); + +OPTIMIZE TABLE old_school_table FINAL; + +SELECT * FROM old_school_table ORDER BY key; + +OPTIMIZE TABLE old_school_table FINAL; -- just to be sure + +SELECT * FROM old_school_table ORDER BY key; + +ALTER TABLE old_school_table MODIFY SETTING vertical_merge_algorithm_min_rows_to_activate = 10000, vertical_merge_algorithm_min_columns_to_activate = 10000; + +OPTIMIZE TABLE old_school_table FINAL; -- and horizontal merge + +SELECT * FROM old_school_table ORDER BY key; + +DROP TABLE IF EXISTS old_school_table; diff --git a/parser/testdata/01713_table_ttl_old_syntax_zookeeper/ast.json b/parser/testdata/01713_table_ttl_old_syntax_zookeeper/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01713_table_ttl_old_syntax_zookeeper/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01713_table_ttl_old_syntax_zookeeper/metadata.json b/parser/testdata/01713_table_ttl_old_syntax_zookeeper/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01713_table_ttl_old_syntax_zookeeper/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01713_table_ttl_old_syntax_zookeeper/query.sql b/parser/testdata/01713_table_ttl_old_syntax_zookeeper/query.sql new file mode 100644 index 000000000..5509d527d --- /dev/null +++ b/parser/testdata/01713_table_ttl_old_syntax_zookeeper/query.sql @@ -0,0 +1,32 @@ +-- Tags: zookeeper + +DROP TABLE IF EXISTS ttl_table; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE ttl_table +( + date Date, + value UInt64 +) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_01713_table_ttl', '1', date, date, 8192) +TTL date + INTERVAL 2 MONTH; --{ serverError BAD_ARGUMENTS } + +CREATE TABLE ttl_table +( + date Date, + value UInt64 +) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_01713_table_ttl', '1', date, date, 8192) +PARTITION BY date; --{ serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +CREATE TABLE ttl_table +( + date Date, + value UInt64 +) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_01713_table_ttl', '1', date, date, 8192) +ORDER BY value; --{ serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +SELECT 1; + +DROP TABLE IF EXISTS ttl_table; diff --git a/parser/testdata/01714_alter_drop_version/ast.json b/parser/testdata/01714_alter_drop_version/ast.json new file mode 100644 index 000000000..5e65ce51d --- /dev/null +++ b/parser/testdata/01714_alter_drop_version/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery alter_drop_version (children 1)" + }, + { + "explain": " Identifier alter_drop_version" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001179873, + "rows_read": 2, + "bytes_read": 88 + } +} diff --git a/parser/testdata/01714_alter_drop_version/metadata.json b/parser/testdata/01714_alter_drop_version/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01714_alter_drop_version/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01714_alter_drop_version/query.sql b/parser/testdata/01714_alter_drop_version/query.sql new file mode 100644 index 000000000..91670fff2 --- /dev/null +++ b/parser/testdata/01714_alter_drop_version/query.sql @@ -0,0 +1,23 @@ +DROP TABLE IF EXISTS alter_drop_version; + +CREATE TABLE alter_drop_version +( + `key` UInt64, + `value` String, + `ver` Int8 +) +ENGINE = ReplacingMergeTree(ver) +ORDER BY key; + +INSERT INTO alter_drop_version VALUES (1, '1', 1); + +ALTER TABLE alter_drop_version DROP COLUMN ver; --{serverError ALTER_OF_COLUMN_IS_FORBIDDEN} +ALTER TABLE alter_drop_version RENAME COLUMN ver TO rev; --{serverError ALTER_OF_COLUMN_IS_FORBIDDEN} + +DETACH TABLE alter_drop_version; + +ATTACH TABLE alter_drop_version; + +SELECT * FROM alter_drop_version; + +DROP TABLE IF EXISTS alter_drop_version; diff --git a/parser/testdata/01715_background_checker_blather_zookeeper_long/ast.json b/parser/testdata/01715_background_checker_blather_zookeeper_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01715_background_checker_blather_zookeeper_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01715_background_checker_blather_zookeeper_long/metadata.json b/parser/testdata/01715_background_checker_blather_zookeeper_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01715_background_checker_blather_zookeeper_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01715_background_checker_blather_zookeeper_long/query.sql b/parser/testdata/01715_background_checker_blather_zookeeper_long/query.sql new file mode 100644 index 000000000..3de3509fa --- /dev/null +++ b/parser/testdata/01715_background_checker_blather_zookeeper_long/query.sql @@ -0,0 +1,32 @@ +-- Tags: long, zookeeper, no-shared-merge-tree, no-msan, no-asan, no-tsan, no-ubsan +-- no-shared-merge-tree: no replication queue + +DROP TABLE IF EXISTS i20203_1 SYNC; +DROP TABLE IF EXISTS i20203_2 SYNC; + +CREATE TABLE i20203_1 (a Int8) +ENGINE = ReplicatedMergeTree('/clickhouse/{database}/01715_background_checker_i20203', 'r1') +ORDER BY tuple(); + +CREATE TABLE i20203_2 (a Int8) +ENGINE = ReplicatedMergeTree('/clickhouse/{database}/01715_background_checker_i20203', 'r2') +ORDER BY tuple(); + +DETACH TABLE i20203_2; +INSERT INTO i20203_1 VALUES (2); + +DETACH TABLE i20203_1; +ATTACH TABLE i20203_2; + +-- sleep 10 seconds +SET function_sleep_max_microseconds_per_block = 10000000; +SELECT number from numbers(10) where sleepEachRow(1) Format Null; + +SELECT num_tries < 200 +FROM system.replication_queue +WHERE table = 'i20203_2' AND database = currentDatabase(); + +ATTACH TABLE i20203_1; + +DROP TABLE i20203_1 SYNC; +DROP TABLE i20203_2 SYNC; diff --git a/parser/testdata/01715_table_function_view_fix/ast.json b/parser/testdata/01715_table_function_view_fix/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01715_table_function_view_fix/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01715_table_function_view_fix/metadata.json b/parser/testdata/01715_table_function_view_fix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01715_table_function_view_fix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01715_table_function_view_fix/query.sql b/parser/testdata/01715_table_function_view_fix/query.sql new file mode 100644 index 000000000..7407e6b0d --- /dev/null +++ b/parser/testdata/01715_table_function_view_fix/query.sql @@ -0,0 +1,3 @@ +SELECT view(SELECT 1); -- { clientError SYNTAX_ERROR } + +SELECT sumIf(dummy, dummy) FROM remote('127.0.0.{1,2}', numbers(2, 100), view(SELECT CAST(NULL, 'Nullable(UInt8)') AS dummy FROM system.one)); -- { serverError UNKNOWN_FUNCTION } diff --git a/parser/testdata/01715_tuple_insert_null_as_default/ast.json b/parser/testdata/01715_tuple_insert_null_as_default/ast.json new file mode 100644 index 000000000..bfc028b5e --- /dev/null +++ b/parser/testdata/01715_tuple_insert_null_as_default/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'Tuple'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001137209, + "rows_read": 5, + "bytes_read": 176 + } +} diff --git a/parser/testdata/01715_tuple_insert_null_as_default/metadata.json b/parser/testdata/01715_tuple_insert_null_as_default/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01715_tuple_insert_null_as_default/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01715_tuple_insert_null_as_default/query.sql b/parser/testdata/01715_tuple_insert_null_as_default/query.sql new file mode 100644 index 000000000..b4930c278 --- /dev/null +++ b/parser/testdata/01715_tuple_insert_null_as_default/query.sql @@ -0,0 +1,75 @@ +SELECT 'Tuple'; + +DROP TABLE IF EXISTS test_tuple; +CREATE TABLE test_tuple (value Tuple(UInt8, UInt8)) ENGINE=TinyLog; + +SET input_format_null_as_default = 1; +INSERT INTO test_tuple VALUES ((NULL, 1)); +SELECT * FROM test_tuple; + +SET input_format_null_as_default = 0; +INSERT INTO test_tuple VALUES ((NULL, 2)); -- { error TYPE_MISMATCH } +SELECT * FROM test_tuple; + +DROP TABLE test_tuple; + +SELECT 'Tuple nested in Array'; + +DROP TABLE IF EXISTS test_tuple_nested_in_array; +CREATE TABLE test_tuple_nested_in_array (value Array(Tuple(UInt8, UInt8))) ENGINE=TinyLog; + +SET input_format_null_as_default = 1; +INSERT INTO test_tuple_nested_in_array VALUES ([(NULL, 2), (3, NULL), (NULL, 4)]); +SELECT * FROM test_tuple_nested_in_array; + +SET input_format_null_as_default = 0; +INSERT INTO test_tuple_nested_in_array VALUES ([(NULL, 1)]); -- { error TYPE_MISMATCH } +SELECT * FROM test_tuple_nested_in_array; + +DROP TABLE test_tuple_nested_in_array; + +SELECT 'Tuple nested in Array nested in Tuple'; + +DROP TABLE IF EXISTS test_tuple_nested_in_array_nested_in_tuple; +CREATE TABLE test_tuple_nested_in_array_nested_in_tuple (value Tuple(UInt8, Array(Tuple(UInt8, UInt8)))) ENGINE=TinyLog; + +SET input_format_null_as_default = 1; +INSERT INTO test_tuple_nested_in_array_nested_in_tuple VALUES ( (NULL, [(NULL, 2), (3, NULL), (NULL, 4)]) ); +SELECT * FROM test_tuple_nested_in_array_nested_in_tuple; + +SET input_format_null_as_default = 0; +INSERT INTO test_tuple_nested_in_array_nested_in_tuple VALUES ( (NULL, [(NULL, 1)]) ); -- { error TYPE_MISMATCH } +SELECT * FROM test_tuple_nested_in_array_nested_in_tuple; + +DROP TABLE test_tuple_nested_in_array_nested_in_tuple; + +SELECT 'Tuple nested in Map'; + +DROP TABLE IF EXISTS test_tuple_nested_in_map; +CREATE TABLE test_tuple_nested_in_map (value Map(String, Tuple(UInt8, UInt8))) ENGINE=TinyLog; + +SET input_format_null_as_default = 1; +INSERT INTO test_tuple_nested_in_map VALUES (map('test', (NULL, 1))); + +SELECT * FROM test_tuple_nested_in_map; + +SET input_format_null_as_default = 0; +INSERT INTO test_tuple_nested_in_map VALUES (map('test', (NULL, 1))); -- { error TYPE_MISMATCH } +SELECT * FROM test_tuple_nested_in_map; + +DROP TABLE test_tuple_nested_in_map; + +SELECT 'Tuple nested in Map nested in Tuple'; + +DROP TABLE IF EXISTS test_tuple_nested_in_map_nested_in_tuple; +CREATE TABLE test_tuple_nested_in_map_nested_in_tuple (value Tuple(UInt8, Map(String, Tuple(UInt8, UInt8)))) ENGINE=TinyLog; + +SET input_format_null_as_default = 1; +INSERT INTO test_tuple_nested_in_map_nested_in_tuple VALUES ( (NULL, map('test', (NULL, 1))) ); +SELECT * FROM test_tuple_nested_in_map_nested_in_tuple; + +SET input_format_null_as_default = 0; +INSERT INTO test_tuple_nested_in_map_nested_in_tuple VALUES ( (NULL, map('test', (NULL, 1))) ); -- { error TYPE_MISMATCH } +SELECT * FROM test_tuple_nested_in_map_nested_in_tuple; + +DROP TABLE test_tuple_nested_in_map_nested_in_tuple; diff --git a/parser/testdata/01716_array_difference_overflow/ast.json b/parser/testdata/01716_array_difference_overflow/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01716_array_difference_overflow/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01716_array_difference_overflow/metadata.json b/parser/testdata/01716_array_difference_overflow/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01716_array_difference_overflow/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01716_array_difference_overflow/query.sql b/parser/testdata/01716_array_difference_overflow/query.sql new file mode 100644 index 000000000..747e0ad75 --- /dev/null +++ b/parser/testdata/01716_array_difference_overflow/query.sql @@ -0,0 +1,8 @@ +-- Overflow is Ok and behaves as the CPU does it. +SELECT arrayDifference([65536, -9223372036854775808]); + +-- Diff of unsigned int -> int +SELECT arrayDifference( cast([10, 1], 'Array(UInt8)')); +SELECT arrayDifference( cast([10, 1], 'Array(UInt16)')); +SELECT arrayDifference( cast([10, 1], 'Array(UInt32)')); +SELECT arrayDifference( cast([10, 1], 'Array(UInt64)')); diff --git a/parser/testdata/01716_decimal_comparison_ubsan/ast.json b/parser/testdata/01716_decimal_comparison_ubsan/ast.json new file mode 100644 index 000000000..f0465179b --- /dev/null +++ b/parser/testdata/01716_decimal_comparison_ubsan/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001061771, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01716_decimal_comparison_ubsan/metadata.json b/parser/testdata/01716_decimal_comparison_ubsan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01716_decimal_comparison_ubsan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01716_decimal_comparison_ubsan/query.sql b/parser/testdata/01716_decimal_comparison_ubsan/query.sql new file mode 100644 index 000000000..f68d9de19 --- /dev/null +++ b/parser/testdata/01716_decimal_comparison_ubsan/query.sql @@ -0,0 +1,2 @@ +SET decimal_check_overflow = 0; +SELECT toDecimal64(0, 8) = 9223372036854775807; diff --git a/parser/testdata/01716_drop_rename_sign_column/ast.json b/parser/testdata/01716_drop_rename_sign_column/ast.json new file mode 100644 index 000000000..cab1a458f --- /dev/null +++ b/parser/testdata/01716_drop_rename_sign_column/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery signed_table (children 1)" + }, + { + "explain": " Identifier signed_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001491817, + "rows_read": 2, + "bytes_read": 76 + } +} diff --git a/parser/testdata/01716_drop_rename_sign_column/metadata.json b/parser/testdata/01716_drop_rename_sign_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01716_drop_rename_sign_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01716_drop_rename_sign_column/query.sql b/parser/testdata/01716_drop_rename_sign_column/query.sql new file mode 100644 index 000000000..bdaa5a584 --- /dev/null +++ b/parser/testdata/01716_drop_rename_sign_column/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS signed_table; + +CREATE TABLE signed_table ( + k UInt32, + v String, + s Int8 +) ENGINE CollapsingMergeTree(s) ORDER BY k; + +INSERT INTO signed_table(k, v, s) VALUES (1, 'a', 1); + +ALTER TABLE signed_table DROP COLUMN s; --{serverError ALTER_OF_COLUMN_IS_FORBIDDEN} +ALTER TABLE signed_table RENAME COLUMN s TO s1; --{serverError ALTER_OF_COLUMN_IS_FORBIDDEN} + +DROP TABLE IF EXISTS signed_table; diff --git a/parser/testdata/01717_global_with_subquery_fix/ast.json b/parser/testdata/01717_global_with_subquery_fix/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01717_global_with_subquery_fix/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01717_global_with_subquery_fix/metadata.json b/parser/testdata/01717_global_with_subquery_fix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01717_global_with_subquery_fix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01717_global_with_subquery_fix/query.sql b/parser/testdata/01717_global_with_subquery_fix/query.sql new file mode 100644 index 000000000..b835b22f4 --- /dev/null +++ b/parser/testdata/01717_global_with_subquery_fix/query.sql @@ -0,0 +1,3 @@ +-- Tags: global + +WITH (SELECT count(distinct colU) from tabA) AS withA, (SELECT count(distinct colU) from tabA) AS withB SELECT withA / withB AS ratio FROM (SELECT date AS period, colX FROM (SELECT date, if(colA IN (SELECT colB FROM tabC), 0, colA) AS colX FROM tabB) AS tempB GROUP BY period, colX) AS main; -- {serverError UNKNOWN_TABLE} diff --git a/parser/testdata/01717_int_div_float_too_large_ubsan/ast.json b/parser/testdata/01717_int_div_float_too_large_ubsan/ast.json new file mode 100644 index 000000000..808d93ac6 --- /dev/null +++ b/parser/testdata/01717_int_div_float_too_large_ubsan/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function intDiv (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_18446744073709551615" + }, + { + "explain": " Literal Float64_0.9998999834060669" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.00105861, + "rows_read": 8, + "bytes_read": 326 + } +} diff --git a/parser/testdata/01717_int_div_float_too_large_ubsan/metadata.json b/parser/testdata/01717_int_div_float_too_large_ubsan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01717_int_div_float_too_large_ubsan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01717_int_div_float_too_large_ubsan/query.sql b/parser/testdata/01717_int_div_float_too_large_ubsan/query.sql new file mode 100644 index 000000000..04d18db5f --- /dev/null +++ b/parser/testdata/01717_int_div_float_too_large_ubsan/query.sql @@ -0,0 +1,2 @@ +SELECT intDiv(18446744073709551615, 0.9998999834060669); -- { serverError ILLEGAL_DIVISION } +SELECT intDiv(18446744073709551615, 1.); -- { serverError ILLEGAL_DIVISION } diff --git a/parser/testdata/01718_subtract_seconds_date/ast.json b/parser/testdata/01718_subtract_seconds_date/ast.json new file mode 100644 index 000000000..9eb76d2da --- /dev/null +++ b/parser/testdata/01718_subtract_seconds_date/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function subtractSeconds (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDate (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '2021-02-15'" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001300364, + "rows_read": 10, + "bytes_read": 388 + } +} diff --git a/parser/testdata/01718_subtract_seconds_date/metadata.json b/parser/testdata/01718_subtract_seconds_date/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01718_subtract_seconds_date/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01718_subtract_seconds_date/query.sql b/parser/testdata/01718_subtract_seconds_date/query.sql new file mode 100644 index 000000000..6bffcd4db --- /dev/null +++ b/parser/testdata/01718_subtract_seconds_date/query.sql @@ -0,0 +1,2 @@ +SELECT subtractSeconds(toDate('2021-02-15'), 1); +SELECT subtractSeconds(today(), 1) - subtractSeconds(today(), 11); diff --git a/parser/testdata/01719_join_timezone/ast.json b/parser/testdata/01719_join_timezone/ast.json new file mode 100644 index 000000000..f8c6795b1 --- /dev/null +++ b/parser/testdata/01719_join_timezone/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001478044, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/01719_join_timezone/metadata.json b/parser/testdata/01719_join_timezone/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01719_join_timezone/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01719_join_timezone/query.sql b/parser/testdata/01719_join_timezone/query.sql new file mode 100644 index 000000000..cbf0c27fc --- /dev/null +++ b/parser/testdata/01719_join_timezone/query.sql @@ -0,0 +1,45 @@ +DROP TABLE IF EXISTS test; + +CREATE TABLE test (timestamp DateTime('UTC'), i UInt8) Engine=MergeTree() PARTITION BY toYYYYMM(timestamp) ORDER BY (i); +INSERT INTO test values ('2020-05-13 16:38:45', 1); + +SELECT + toTimeZone(timestamp, 'America/Sao_Paulo') AS converted, + timestamp AS original +FROM test +LEFT JOIN (SELECT 2 AS x) AS anything ON x = i +WHERE timestamp >= toDateTime('2020-05-13T00:00:00', 'America/Sao_Paulo'); + +/* This was incorrect result in previous ClickHouse versions: +┌─converted───────────┬─original────────────┐ +│ 2020-05-13 16:38:45 │ 2020-05-13 16:38:45 │ <-- toTimeZone is ignored. +└─────────────────────┴─────────────────────┘ +*/ + +SELECT + toTimeZone(timestamp, 'America/Sao_Paulo') AS converted, + timestamp AS original +FROM test +-- LEFT JOIN (SELECT 2 AS x) AS anything ON x = i -- Removing the join fixes the issue. +WHERE timestamp >= toDateTime('2020-05-13T00:00:00', 'America/Sao_Paulo'); + +/* +┌─converted───────────┬─original────────────┐ +│ 2020-05-13 13:38:45 │ 2020-05-13 16:38:45 │ <-- toTimeZone works. +└─────────────────────┴─────────────────────┘ +*/ + +SELECT + toTimeZone(timestamp, 'America/Sao_Paulo') AS converted, + timestamp AS original +FROM test +LEFT JOIN (SELECT 2 AS x) AS anything ON x = i +WHERE timestamp >= '2020-05-13T00:00:00'; -- Not using toDateTime in the WHERE also fixes the issue. + +/* +┌─converted───────────┬─original────────────┐ +│ 2020-05-13 13:38:45 │ 2020-05-13 16:38:45 │ <-- toTimeZone works. +└─────────────────────┴─────────────────────┘ +*/ + +DROP TABLE test; diff --git a/parser/testdata/01720_constraints_complex_types/ast.json b/parser/testdata/01720_constraints_complex_types/ast.json new file mode 100644 index 000000000..4d2a510ef --- /dev/null +++ b/parser/testdata/01720_constraints_complex_types/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001329006, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01720_constraints_complex_types/metadata.json b/parser/testdata/01720_constraints_complex_types/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01720_constraints_complex_types/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01720_constraints_complex_types/query.sql b/parser/testdata/01720_constraints_complex_types/query.sql new file mode 100644 index 000000000..fd40699e5 --- /dev/null +++ b/parser/testdata/01720_constraints_complex_types/query.sql @@ -0,0 +1,47 @@ +SET allow_suspicious_low_cardinality_types = 1; + +DROP TABLE IF EXISTS constraint_on_nullable_type; +CREATE TABLE constraint_on_nullable_type +( + `id` Nullable(UInt64), + CONSTRAINT `c0` CHECK `id` = 1 +) +ENGINE = TinyLog(); + +INSERT INTO constraint_on_nullable_type VALUES (0); -- {serverError VIOLATED_CONSTRAINT} +INSERT INTO constraint_on_nullable_type VALUES (1); + +SELECT * FROM constraint_on_nullable_type; + +DROP TABLE constraint_on_nullable_type; + +DROP TABLE IF EXISTS constraint_on_low_cardinality_type; +CREATE TABLE constraint_on_low_cardinality_type +( + `id` LowCardinality(UInt64), + CONSTRAINT `c0` CHECK `id` = 2 +) +ENGINE = TinyLog; + +INSERT INTO constraint_on_low_cardinality_type VALUES (0); -- {serverError VIOLATED_CONSTRAINT} +INSERT INTO constraint_on_low_cardinality_type VALUES (2); + +SELECT * FROM constraint_on_low_cardinality_type; + +DROP TABLE constraint_on_low_cardinality_type; + +DROP TABLE IF EXISTS constraint_on_low_cardinality_nullable_type; + +CREATE TABLE constraint_on_low_cardinality_nullable_type +( + `id` LowCardinality(Nullable(UInt64)), + CONSTRAINT `c0` CHECK `id` = 3 +) +ENGINE = TinyLog; + +INSERT INTO constraint_on_low_cardinality_nullable_type VALUES (0); -- {serverError VIOLATED_CONSTRAINT} +INSERT INTO constraint_on_low_cardinality_nullable_type VALUES (3); + +SELECT * FROM constraint_on_low_cardinality_nullable_type; + +DROP TABLE constraint_on_low_cardinality_nullable_type; diff --git a/parser/testdata/01720_dictionary_create_source_with_functions/ast.json b/parser/testdata/01720_dictionary_create_source_with_functions/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01720_dictionary_create_source_with_functions/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01720_dictionary_create_source_with_functions/metadata.json b/parser/testdata/01720_dictionary_create_source_with_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01720_dictionary_create_source_with_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01720_dictionary_create_source_with_functions/query.sql b/parser/testdata/01720_dictionary_create_source_with_functions/query.sql new file mode 100644 index 000000000..216a62845 --- /dev/null +++ b/parser/testdata/01720_dictionary_create_source_with_functions/query.sql @@ -0,0 +1,30 @@ +-- Tags: no-parallel + +DROP DATABASE IF EXISTS 01720_dictionary_db; +CREATE DATABASE 01720_dictionary_db; + +CREATE TABLE 01720_dictionary_db.dictionary_source_table +( + key UInt8, + value String +) +ENGINE = TinyLog; + +INSERT INTO 01720_dictionary_db.dictionary_source_table VALUES (1, 'First'); + +CREATE DICTIONARY 01720_dictionary_db.dictionary +( + key UInt64, + value String +) +PRIMARY KEY key +SOURCE(CLICKHOUSE(DB '01720_dictionary_db' TABLE 'dictionary_source_table' HOST hostName() PORT tcpPort())) +LIFETIME(0) +LAYOUT(FLAT()); + +SELECT * FROM 01720_dictionary_db.dictionary; + +DROP DICTIONARY 01720_dictionary_db.dictionary; +DROP TABLE 01720_dictionary_db.dictionary_source_table; + +DROP DATABASE 01720_dictionary_db; diff --git a/parser/testdata/01720_engine_file_empty_if_not_exists/ast.json b/parser/testdata/01720_engine_file_empty_if_not_exists/ast.json new file mode 100644 index 000000000..4c841384f --- /dev/null +++ b/parser/testdata/01720_engine_file_empty_if_not_exists/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery file_engine_table (children 1)" + }, + { + "explain": " Identifier file_engine_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001149894, + "rows_read": 2, + "bytes_read": 86 + } +} diff --git a/parser/testdata/01720_engine_file_empty_if_not_exists/metadata.json b/parser/testdata/01720_engine_file_empty_if_not_exists/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01720_engine_file_empty_if_not_exists/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01720_engine_file_empty_if_not_exists/query.sql b/parser/testdata/01720_engine_file_empty_if_not_exists/query.sql new file mode 100644 index 000000000..d031c71f1 --- /dev/null +++ b/parser/testdata/01720_engine_file_empty_if_not_exists/query.sql @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS file_engine_table; + +CREATE TABLE file_engine_table (id UInt32) ENGINE=File(TSV); + +SELECT * FROM file_engine_table; --{ serverError FILE_DOESNT_EXIST } + +SET engine_file_empty_if_not_exists=0; + +SELECT * FROM file_engine_table; --{ serverError FILE_DOESNT_EXIST } + +SET engine_file_empty_if_not_exists=1; + +SELECT * FROM file_engine_table; + +SET engine_file_empty_if_not_exists=0; +DROP TABLE file_engine_table; diff --git a/parser/testdata/01720_type_map_and_casts/ast.json b/parser/testdata/01720_type_map_and_casts/ast.json new file mode 100644 index 000000000..b356e2a6c --- /dev/null +++ b/parser/testdata/01720_type_map_and_casts/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery table_map_with_key_integer (children 1)" + }, + { + "explain": " Identifier table_map_with_key_integer" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001471724, + "rows_read": 2, + "bytes_read": 104 + } +} diff --git a/parser/testdata/01720_type_map_and_casts/metadata.json b/parser/testdata/01720_type_map_and_casts/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01720_type_map_and_casts/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01720_type_map_and_casts/query.sql b/parser/testdata/01720_type_map_and_casts/query.sql new file mode 100644 index 000000000..7bbe18512 --- /dev/null +++ b/parser/testdata/01720_type_map_and_casts/query.sql @@ -0,0 +1,86 @@ +DROP TABLE IF EXISTS table_map_with_key_integer; + +CREATE TABLE table_map_with_key_integer (d DATE, m Map(Int8, Int8)) +ENGINE = MergeTree() ORDER BY d; + +INSERT INTO table_map_with_key_integer VALUES ('2020-01-01', map(127, 1, 0, 1, -1, 1)) ('2020-01-01', map()); + +SELECT 'Map(Int8, Int8)'; + +SELECT m FROM table_map_with_key_integer; +SELECT m[127], m[1], m[0], m[-1] FROM table_map_with_key_integer; +SELECT m[toInt8(number - 2)] FROM table_map_with_key_integer ARRAY JOIN [0, 1, 2, 3, 4] AS number; + +SELECT count() FROM table_map_with_key_integer WHERE m = map(); + +DROP TABLE IF EXISTS table_map_with_key_integer; + +CREATE TABLE table_map_with_key_integer (d DATE, m Map(Int32, UInt16)) +ENGINE = MergeTree() ORDER BY d; + +INSERT INTO table_map_with_key_integer VALUES ('2020-01-01', map(-1, 1, 2147483647, 2, -2147483648, 3)); + +SELECT 'Map(Int32, UInt16)'; + +SELECT m FROM table_map_with_key_integer; +SELECT m[-1], m[2147483647], m[-2147483648] FROM table_map_with_key_integer; +SELECT m[toInt32(number - 2)] FROM table_map_with_key_integer ARRAY JOIN [0, 1, 2, 3, 4] AS number; + +DROP TABLE IF EXISTS table_map_with_key_integer; + +CREATE TABLE table_map_with_key_integer (d DATE, m Map(Date, Int32)) +ENGINE = MergeTree() ORDER BY d; + +INSERT INTO table_map_with_key_integer VALUES ('2020-01-01', map('2020-01-01', 1, '2020-01-02', 2, '1970-01-02', 3)); + +SELECT 'Map(Date, Int32)'; + +SELECT m FROM table_map_with_key_integer; +SELECT m[toDate('2020-01-01')], m[toDate('2020-01-02')], m[toDate('2020-01-03')] FROM table_map_with_key_integer; +SELECT m[toDate(number)] FROM table_map_with_key_integer ARRAY JOIN [0, 1, 2] AS number; + +DROP TABLE IF EXISTS table_map_with_key_integer; + +CREATE TABLE table_map_with_key_integer (d DATE, m Map(UUID, UInt16)) +ENGINE = MergeTree() ORDER BY d; + +INSERT INTO table_map_with_key_integer VALUES ('2020-01-01', map('00001192-0000-4000-8000-000000000001', 1, '00001192-0000-4000-7000-000000000001', 2)); + +SELECT 'Map(UUID, UInt16)'; + +SELECT m FROM table_map_with_key_integer; +SELECT + m[toUUID('00001192-0000-4000-6000-000000000001')], + m[toUUID('00001192-0000-4000-7000-000000000001')], + m[toUUID('00001192-0000-4000-8000-000000000001')] +FROM table_map_with_key_integer; + +SELECT m[257], m[1] FROM table_map_with_key_integer; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +DROP TABLE IF EXISTS table_map_with_key_integer; + +CREATE TABLE table_map_with_key_integer (d DATE, m Map(Int128, String)) +ENGINE = MergeTree() ORDER BY d; + + +INSERT INTO table_map_with_key_integer SELECT '2020-01-01', map(-1, 'a', 0, 'b', toInt128('1234567898765432123456789'), 'c', toInt128('-1234567898765432123456789'), 'd'); + +SELECT 'Map(Int128, String)'; + +SELECT m FROM table_map_with_key_integer; +SELECT m[toInt128(-1)], m[toInt128(0)], m[toInt128('1234567898765432123456789')], m[toInt128('-1234567898765432123456789')] FROM table_map_with_key_integer; +SELECT m[toInt128(number - 2)] FROM table_map_with_key_integer ARRAY JOIN [0, 1, 2, 3] AS number; + +SELECT m[-1], m[0], m[toInt128('1234567898765432123456789')], m[toInt128('-1234567898765432123456789')] FROM table_map_with_key_integer; +SELECT m[toUInt64(0)], m[toInt64(0)], m[toUInt8(0)], m[toUInt16(0)] FROM table_map_with_key_integer; + +DROP TABLE IF EXISTS table_map_with_key_integer; + + +CREATE TABLE table_map_with_key_integer (m Map(Float32, String)) ENGINE = MergeTree() ORDER BY tuple(); +DROP TABLE IF EXISTS table_map_with_key_integer; + +CREATE TABLE table_map_with_key_integer (m Map(Array(UInt32), String)) ENGINE = MergeTree() ORDER BY tuple(); +DROP TABLE IF EXISTS table_map_with_key_integer; + +CREATE TABLE table_map_with_key_integer (m Map(Nullable(String), String)) ENGINE = MergeTree() ORDER BY tuple(); -- { serverError BAD_ARGUMENTS} diff --git a/parser/testdata/01720_union_distinct_with_limit/ast.json b/parser/testdata/01720_union_distinct_with_limit/ast.json new file mode 100644 index 000000000..8e17fe7d0 --- /dev/null +++ b/parser/testdata/01720_union_distinct_with_limit/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.00152395, + "rows_read": 5, + "bytes_read": 173 + } +} diff --git a/parser/testdata/01720_union_distinct_with_limit/metadata.json b/parser/testdata/01720_union_distinct_with_limit/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01720_union_distinct_with_limit/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01720_union_distinct_with_limit/query.sql b/parser/testdata/01720_union_distinct_with_limit/query.sql new file mode 100644 index 000000000..9fc5b3eaf --- /dev/null +++ b/parser/testdata/01720_union_distinct_with_limit/query.sql @@ -0,0 +1,8 @@ +SELECT x +FROM +( + SELECT 1 AS x + UNION DISTINCT + SELECT 1 +) +LIMIT 1; diff --git a/parser/testdata/01721_constraints_constant_expressions/ast.json b/parser/testdata/01721_constraints_constant_expressions/ast.json new file mode 100644 index 000000000..d270487ad --- /dev/null +++ b/parser/testdata/01721_constraints_constant_expressions/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery constraint_constant_number_expression (children 1)" + }, + { + "explain": " Identifier constraint_constant_number_expression" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001335358, + "rows_read": 2, + "bytes_read": 126 + } +} diff --git a/parser/testdata/01721_constraints_constant_expressions/metadata.json b/parser/testdata/01721_constraints_constant_expressions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01721_constraints_constant_expressions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01721_constraints_constant_expressions/query.sql b/parser/testdata/01721_constraints_constant_expressions/query.sql new file mode 100644 index 000000000..150aef3b5 --- /dev/null +++ b/parser/testdata/01721_constraints_constant_expressions/query.sql @@ -0,0 +1,40 @@ +DROP TABLE IF EXISTS constraint_constant_number_expression; +CREATE TABLE constraint_constant_number_expression +( + id UInt64, + CONSTRAINT `c0` CHECK 1, + CONSTRAINT `c1` CHECK 1 < 2, + CONSTRAINT `c2` CHECK isNull(cast(NULL, 'Nullable(UInt8)')) +) ENGINE = TinyLog(); + +INSERT INTO constraint_constant_number_expression VALUES (1); + +SELECT * FROM constraint_constant_number_expression; + +DROP TABLE constraint_constant_number_expression; + +DROP TABLE IF EXISTS constraint_constant_number_expression_non_uint8; +CREATE TABLE constraint_constant_number_expression_non_uint8 +( + id UInt64, + CONSTRAINT `c0` CHECK toUInt64(1) +) ENGINE = TinyLog(); + +INSERT INTO constraint_constant_number_expression_non_uint8 VALUES (2); -- {serverError UNSUPPORTED_METHOD} + +SELECT * FROM constraint_constant_number_expression_non_uint8; + +DROP TABLE constraint_constant_number_expression_non_uint8; + +DROP TABLE IF EXISTS constraint_constant_nullable_expression_that_contains_null; +CREATE TABLE constraint_constant_nullable_expression_that_contains_null +( + id UInt64, + CONSTRAINT `c0` CHECK nullIf(1 % 2, 1) +) ENGINE = TinyLog(); + +INSERT INTO constraint_constant_nullable_expression_that_contains_null VALUES (3); -- {serverError VIOLATED_CONSTRAINT} + +SELECT * FROM constraint_constant_nullable_expression_that_contains_null; + +DROP TABLE constraint_constant_nullable_expression_that_contains_null; diff --git a/parser/testdata/01721_dictionary_decimal_p_s/ast.json b/parser/testdata/01721_dictionary_decimal_p_s/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01721_dictionary_decimal_p_s/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01721_dictionary_decimal_p_s/metadata.json b/parser/testdata/01721_dictionary_decimal_p_s/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01721_dictionary_decimal_p_s/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01721_dictionary_decimal_p_s/query.sql b/parser/testdata/01721_dictionary_decimal_p_s/query.sql new file mode 100644 index 000000000..82e85957a --- /dev/null +++ b/parser/testdata/01721_dictionary_decimal_p_s/query.sql @@ -0,0 +1,74 @@ +-- Tags: no-parallel + +drop table if exists table_decimal_dict; +drop dictionary if exists decimal_dict; + + +CREATE TABLE table_decimal_dict( +KeyField UInt64, +Decimal32_ Decimal(5,4), +Decimal64_ Decimal(18,8), +Decimal128_ Decimal(25,8), +Decimal256_ Decimal(76,37) +) +ENGINE = Memory; + +insert into table_decimal_dict +select number, + number / 3, + number / 3, + number / 3, + number / 3 +from numbers(5000); + + +CREATE DICTIONARY IF NOT EXISTS decimal_dict ( + KeyField UInt64 DEFAULT 9999999, + Decimal32_ Decimal(5,4) DEFAULT 0.11, + Decimal64_ Decimal(18,8) DEFAULT 0.11, + Decimal128_ Decimal(25,8) DEFAULT 0.11 +-- ,Decimal256_ Decimal256(37) DEFAULT 0.11 +) +PRIMARY KEY KeyField +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'table_decimal_dict' DB current_database())) +LIFETIME(0) LAYOUT(SPARSE_HASHED); + +select '-------- 42 --------'; + +SELECT * from table_decimal_dict where KeyField = 42; + +SELECT * from decimal_dict where KeyField = 42; + +SELECT dictGet('decimal_dict', 'Decimal32_', toUInt64(42)), + dictGet('decimal_dict', 'Decimal64_', toUInt64(42)), + dictGet('decimal_dict', 'Decimal128_', toUInt64(42)) + -- ,dictGet('decimal_dict', 'Decimal256_', toUInt64(42)) +; + + +select '-------- 4999 --------'; + +SELECT * from table_decimal_dict where KeyField = 4999; + +SELECT * from decimal_dict where KeyField = 4999; + +SELECT dictGet('decimal_dict', 'Decimal32_', toUInt64(4999)), + dictGet('decimal_dict', 'Decimal64_', toUInt64(4999)), + dictGet('decimal_dict', 'Decimal128_', toUInt64(4999)) + --,dictGet('decimal_dict', 'Decimal256_', toUInt64(4999)) +; + +select '-------- 5000 --------'; + +SELECT * from table_decimal_dict where KeyField = 5000; + +SELECT * from decimal_dict where KeyField = 5000; + +SELECT dictGet('decimal_dict', 'Decimal32_', toUInt64(5000)), + dictGet('decimal_dict', 'Decimal64_', toUInt64(5000)), + dictGet('decimal_dict', 'Decimal128_', toUInt64(5000)) + --,dictGet('decimal_dict', 'Decimal256_', toUInt64(5000)) +; + +drop dictionary if exists decimal_dict; +drop table if exists table_decimal_dict; diff --git a/parser/testdata/01721_engine_file_truncate_on_insert/ast.json b/parser/testdata/01721_engine_file_truncate_on_insert/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01721_engine_file_truncate_on_insert/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01721_engine_file_truncate_on_insert/metadata.json b/parser/testdata/01721_engine_file_truncate_on_insert/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01721_engine_file_truncate_on_insert/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01721_engine_file_truncate_on_insert/query.sql b/parser/testdata/01721_engine_file_truncate_on_insert/query.sql new file mode 100644 index 000000000..fb61931d7 --- /dev/null +++ b/parser/testdata/01721_engine_file_truncate_on_insert/query.sql @@ -0,0 +1,24 @@ +-- Tags: no-replicated-database, no-parallel +-- Tag no-replicated-database: user_files + +DROP TABLE IF EXISTS test; + +INSERT INTO TABLE FUNCTION file('01721_file/test/data.TSV', 'TSV', 'id UInt32') VALUES (1); +ATTACH TABLE test FROM '01721_file/test' (id UInt8) ENGINE=File(TSV); + +INSERT INTO test VALUES (2), (3); +INSERT INTO test VALUES (4); +SELECT * FROM test; + +SET engine_file_truncate_on_insert=0; + +INSERT INTO test VALUES (5), (6); +SELECT * FROM test; + +SET engine_file_truncate_on_insert=1; + +INSERT INTO test VALUES (0), (1), (2); +SELECT * FROM test; + +SET engine_file_truncate_on_insert=0; +DROP TABLE test; diff --git a/parser/testdata/01730_distributed_group_by_no_merge_order_by_long/ast.json b/parser/testdata/01730_distributed_group_by_no_merge_order_by_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01730_distributed_group_by_no_merge_order_by_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01730_distributed_group_by_no_merge_order_by_long/metadata.json b/parser/testdata/01730_distributed_group_by_no_merge_order_by_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01730_distributed_group_by_no_merge_order_by_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01730_distributed_group_by_no_merge_order_by_long/query.sql b/parser/testdata/01730_distributed_group_by_no_merge_order_by_long/query.sql new file mode 100644 index 000000000..c0f795894 --- /dev/null +++ b/parser/testdata/01730_distributed_group_by_no_merge_order_by_long/query.sql @@ -0,0 +1,28 @@ +-- Tags: long, distributed, no-random-settings + +drop table if exists data_01730; +SET max_rows_to_read = 0, max_result_rows = 0, max_bytes_before_external_group_by = 0, max_bytes_ratio_before_external_group_by = 0; + +-- Memory limit exceeded +SET enable_parallel_blocks_marshalling = 0; + +-- does not use 127.1 due to prefer_localhost_replica + +select * from remote('127.{2..11}', view(select * from numbers(1e6))) group by number order by number limit 20 settings distributed_group_by_no_merge=0, max_memory_usage='100Mi'; -- { serverError MEMORY_LIMIT_EXCEEDED } +-- no memory limit error, because with distributed_group_by_no_merge=2 remote servers will do ORDER BY and will cut to the LIMIT +select * from remote('127.{2..11}', view(select * from numbers(1e6))) group by number order by number limit 20 settings distributed_group_by_no_merge=2, max_memory_usage='100Mi'; + +-- since the MergingSortedTransform will start processing only when all ports (remotes) will have some data, +-- and the query with GROUP BY on remote servers will first do GROUP BY and then send the block, +-- so the initiator will first receive all blocks from remotes and only after start merging, +-- and will hit the memory limit. +select * from remote('127.{2..11}', view(select * from numbers(1e6))) group by number order by number limit 1e6 settings distributed_group_by_no_merge=2, max_memory_usage='20Mi', max_block_size=4294967296; -- { serverError MEMORY_LIMIT_EXCEEDED } + +-- with optimize_aggregation_in_order=1 remote servers will produce blocks more frequently, +-- since they don't need to wait until the aggregation will be finished, +-- and so the query will not hit the memory limit error. +-- Set max_threads equal to the number of replicas so that we don't have too many threads +-- receiving the small blocks. +create table data_01730 engine=MergeTree() order by key as select number key from numbers(1e6); +select * from remote('127.{2..11}', currentDatabase(), data_01730) group by key order by key limit 1e6 settings distributed_group_by_no_merge=2, max_memory_usage='100Mi', optimize_aggregation_in_order=1, max_threads=10 format Null; +drop table data_01730; diff --git a/parser/testdata/01732_alters_bad_conversions/ast.json b/parser/testdata/01732_alters_bad_conversions/ast.json new file mode 100644 index 000000000..0ec3b6e43 --- /dev/null +++ b/parser/testdata/01732_alters_bad_conversions/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery bad_conversions (children 1)" + }, + { + "explain": " Identifier bad_conversions" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00115182, + "rows_read": 2, + "bytes_read": 82 + } +} diff --git a/parser/testdata/01732_alters_bad_conversions/metadata.json b/parser/testdata/01732_alters_bad_conversions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01732_alters_bad_conversions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01732_alters_bad_conversions/query.sql b/parser/testdata/01732_alters_bad_conversions/query.sql new file mode 100644 index 000000000..fe8eb0c14 --- /dev/null +++ b/parser/testdata/01732_alters_bad_conversions/query.sql @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS bad_conversions; +DROP TABLE IF EXISTS bad_conversions_2; + +CREATE TABLE bad_conversions (a UInt32) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO bad_conversions VALUES (1); +ALTER TABLE bad_conversions MODIFY COLUMN a Array(String); -- { serverError TYPE_MISMATCH } +SHOW CREATE TABLE bad_conversions; +SELECT count() FROM system.mutations WHERE table = 'bad_conversions' AND database = currentDatabase(); + +CREATE TABLE bad_conversions_2 (e Enum('foo' = 1, 'bar' = 2)) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO bad_conversions_2 VALUES (1); +ALTER TABLE bad_conversions_2 MODIFY COLUMN e Enum('bar' = 1, 'foo' = 2); -- { serverError CANNOT_CONVERT_TYPE } +SHOW CREATE TABLE bad_conversions_2; +SELECT count() FROM system.mutations WHERE table = 'bad_conversions_2' AND database = currentDatabase(); + +DROP TABLE IF EXISTS bad_conversions; +DROP TABLE IF EXISTS bad_conversions_2; diff --git a/parser/testdata/01732_bigint_ubsan/ast.json b/parser/testdata/01732_bigint_ubsan/ast.json new file mode 100644 index 000000000..31715ac40 --- /dev/null +++ b/parser/testdata/01732_bigint_ubsan/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery decimal (children 1)" + }, + { + "explain": " Identifier decimal" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00123993, + "rows_read": 2, + "bytes_read": 67 + } +} diff --git a/parser/testdata/01732_bigint_ubsan/metadata.json b/parser/testdata/01732_bigint_ubsan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01732_bigint_ubsan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01732_bigint_ubsan/query.sql b/parser/testdata/01732_bigint_ubsan/query.sql new file mode 100644 index 000000000..42d9fee45 --- /dev/null +++ b/parser/testdata/01732_bigint_ubsan/query.sql @@ -0,0 +1,11 @@ +CREATE TEMPORARY TABLE decimal +( + f dec(38, 38) +); + +INSERT INTO decimal VALUES (0); +INSERT INTO decimal VALUES (0.42); +INSERT INTO decimal VALUES (-0.42); + +SELECT f + 1048575, f - 21, f - 84, f * 21, f * -21, f / 21, f / 84 FROM decimal WHERE f > 0; -- { serverError DECIMAL_OVERFLOW } +SELECT f + -2, f - 21, f - 84, f * 21, f * -21, f / 9223372036854775807, f / 84 FROM decimal WHERE f > 0; -- { serverError DECIMAL_OVERFLOW } diff --git a/parser/testdata/01732_explain_syntax_union_query/ast.json b/parser/testdata/01732_explain_syntax_union_query/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01732_explain_syntax_union_query/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01732_explain_syntax_union_query/metadata.json b/parser/testdata/01732_explain_syntax_union_query/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01732_explain_syntax_union_query/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01732_explain_syntax_union_query/query.sql b/parser/testdata/01732_explain_syntax_union_query/query.sql new file mode 100644 index 000000000..c35021090 --- /dev/null +++ b/parser/testdata/01732_explain_syntax_union_query/query.sql @@ -0,0 +1,86 @@ +EXPLAIN SYNTAX +SELECT 1 +UNION ALL +( + SELECT 1 + UNION ALL + ( + SELECT 1 + UNION ALL + SELECT 1 + ) + UNION ALL + SELECT 1 +); + +SELECT '-'; + +EXPLAIN SYNTAX +SELECT 1 +UNION ALL +( + SELECT 1 + UNION DISTINCT + ( + SELECT 1 + UNION ALL + SELECT 1 + ) + UNION ALL + SELECT 1 +); + +SELECT '-'; + +EXPLAIN SYNTAX +SELECT x +FROM +( + SELECT 1 AS x + UNION ALL + ( + SELECT 1 + UNION DISTINCT + ( + SELECT 1 + UNION ALL + SELECT 1 + ) + UNION ALL + SELECT 1 + ) +); + +SELECT '-'; + +EXPLAIN SYNTAX +SELECT x +FROM +( + SELECT 1 AS x + UNION ALL + ( + SELECT 1 + UNION ALL + SELECT 1 + ) +); + +SELECT '-'; + +EXPLAIN SYNTAX +SELECT 1 +UNION ALL +SELECT 1 +UNION DISTINCT +SELECT 1; + +SELECT '-'; + +EXPLAIN SYNTAX +(((((((((((((((SELECT 1))))))))))))))); + +SELECT '-'; + +EXPLAIN SYNTAX +(((((((((((((((SELECT 1 UNION DISTINCT SELECT 1))) UNION DISTINCT SELECT 1)))) UNION ALL SELECT 1)))))))); diff --git a/parser/testdata/01732_more_consistent_datetime64_parsing/ast.json b/parser/testdata/01732_more_consistent_datetime64_parsing/ast.json new file mode 100644 index 000000000..7640d82bd --- /dev/null +++ b/parser/testdata/01732_more_consistent_datetime64_parsing/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery t (children 2)" + }, + { + "explain": " Identifier t" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration i (children 1)" + }, + { + "explain": " DataType UInt8" + }, + { + "explain": " ColumnDeclaration x (children 1)" + }, + { + "explain": " DataType DateTime64 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Literal 'UTC'" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001152624, + "rows_read": 11, + "bytes_read": 385 + } +} diff --git a/parser/testdata/01732_more_consistent_datetime64_parsing/metadata.json b/parser/testdata/01732_more_consistent_datetime64_parsing/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01732_more_consistent_datetime64_parsing/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01732_more_consistent_datetime64_parsing/query.sql b/parser/testdata/01732_more_consistent_datetime64_parsing/query.sql new file mode 100644 index 000000000..7e7fe3f2e --- /dev/null +++ b/parser/testdata/01732_more_consistent_datetime64_parsing/query.sql @@ -0,0 +1,11 @@ +CREATE TEMPORARY TABLE t (i UInt8, x DateTime64(3, 'UTC')); +INSERT INTO t VALUES (1, 1111111111222); +INSERT INTO t VALUES (2, 1111111111.222); +INSERT INTO t VALUES (3, '1111111111222'); +INSERT INTO t VALUES (4, '1111111111.222'); +SELECT * FROM t ORDER BY i; + +SELECT toDateTime64(1111111111.222, 3, 'Asia/Istanbul'); +SELECT toDateTime64('1111111111.222', 3, 'Asia/Istanbul'); +SELECT toDateTime64('1111111111222', 3, 'Asia/Istanbul'); +SELECT ignore(toDateTime64(1111111111222, 3, 'Asia/Istanbul')); -- This gives somewhat correct but unexpected result diff --git a/parser/testdata/01732_union_and_union_all/ast.json b/parser/testdata/01732_union_and_union_all/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01732_union_and_union_all/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01732_union_and_union_all/metadata.json b/parser/testdata/01732_union_and_union_all/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01732_union_and_union_all/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01732_union_and_union_all/query.sql b/parser/testdata/01732_union_and_union_all/query.sql new file mode 100644 index 000000000..e1108d046 --- /dev/null +++ b/parser/testdata/01732_union_and_union_all/query.sql @@ -0,0 +1 @@ +select 1 UNION select 1 UNION ALL select 1; -- { serverError EXPECTED_ALL_OR_DISTINCT } diff --git a/parser/testdata/01733_transform_ubsan/ast.json b/parser/testdata/01733_transform_ubsan/ast.json new file mode 100644 index 000000000..17d7a7525 --- /dev/null +++ b/parser/testdata/01733_transform_ubsan/ast.json @@ -0,0 +1,124 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayStringConcat (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function arrayMap (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function lambda (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function transform (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal Array_[UInt64_1025, Int64_-9223372036854775808, UInt64_65537, UInt64_257, UInt64_1048576, UInt64_10, UInt64_7, UInt64_1048575, UInt64_65536]" + }, + { + "explain": " Literal Array_['censor.net', 'googlegooglegooglegoogle', 'test', '', '', 'hello', 'world', '', 'xyz']" + }, + { + "explain": " Literal ''" + }, + { + "explain": " Function arrayMap (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function lambda (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal Float64_-inf" + }, + { + "explain": " Function range (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal ''" + } + ], + + "rows": 34, + + "statistics": + { + "elapsed": 0.001748661, + "rows_read": 34, + "bytes_read": 1618 + } +} diff --git a/parser/testdata/01733_transform_ubsan/metadata.json b/parser/testdata/01733_transform_ubsan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01733_transform_ubsan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01733_transform_ubsan/query.sql b/parser/testdata/01733_transform_ubsan/query.sql new file mode 100644 index 000000000..7c3d8ef65 --- /dev/null +++ b/parser/testdata/01733_transform_ubsan/query.sql @@ -0,0 +1,4 @@ +SELECT arrayStringConcat(arrayMap(x -> transform(x, [1025, -9223372036854775808, 65537, 257, 1048576, 10, 7, 1048575, 65536], ['censor.net', 'googlegooglegooglegoogle', 'test', '', '', 'hello', 'world', '', 'xyz'], ''), arrayMap(x -> (x % -inf), range(number))), '') +FROM system.numbers +LIMIT 1025 +FORMAT Null; diff --git a/parser/testdata/01734_datetime64_from_float/ast.json b/parser/testdata/01734_datetime64_from_float/ast.json new file mode 100644 index 000000000..21d788ffd --- /dev/null +++ b/parser/testdata/01734_datetime64_from_float/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Float64_1111111111.222" + }, + { + "explain": " Literal 'DateTime64(3, \\'Asia\/Istanbul\\')'" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.000947011, + "rows_read": 8, + "bytes_read": 327 + } +} diff --git a/parser/testdata/01734_datetime64_from_float/metadata.json b/parser/testdata/01734_datetime64_from_float/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01734_datetime64_from_float/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01734_datetime64_from_float/query.sql b/parser/testdata/01734_datetime64_from_float/query.sql new file mode 100644 index 000000000..9e6237932 --- /dev/null +++ b/parser/testdata/01734_datetime64_from_float/query.sql @@ -0,0 +1,22 @@ +SELECT CAST(1111111111.222 AS DateTime64(3, 'Asia/Istanbul')); +SELECT toDateTime(1111111111.222, 3, 'Asia/Istanbul'); +SELECT toDateTime64(1111111111.222, 3, 'Asia/Istanbul'); + +SELECT toDateTime64(0.0, 9, 'UTC') ; +SELECT toDateTime64(0, 9, 'UTC'); + +SELECT toDateTime64(-2200000000.0, 9, 'UTC'); -- 1900-01-01 < value +SELECT toDateTime64(-2200000000, 9, 'UTC'); + +SELECT toDateTime64(-2300000000.0, 9, 'UTC'); -- value < 1900-01-01 +SELECT toDateTime64(-2300000000, 9, 'UTC'); + +SELECT toDateTime64(-999999999999.0, 9, 'UTC'); -- value << 1900-01-01 +SELECT toDateTime64(-999999999999, 9, 'UTC'); + +SELECT toDateTime64(9200000000.0, 9, 'UTC'); -- value < 2262-04-11 +SELECT toDateTime64(9200000000, 9, 'UTC'); + +SELECT toDateTime64(9300000000.0, 9, 'UTC'); -- { serverError DECIMAL_OVERFLOW } # 2262-04-11 < value +SELECT toDateTime64(9300000000, 9, 'UTC'); -- { serverError DECIMAL_OVERFLOW } + diff --git a/parser/testdata/01735_join_get_low_card_fix/ast.json b/parser/testdata/01735_join_get_low_card_fix/ast.json new file mode 100644 index 000000000..7e5ef59c4 --- /dev/null +++ b/parser/testdata/01735_join_get_low_card_fix/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery join_tbl (children 1)" + }, + { + "explain": " Identifier join_tbl" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001136235, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/01735_join_get_low_card_fix/metadata.json b/parser/testdata/01735_join_get_low_card_fix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01735_join_get_low_card_fix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01735_join_get_low_card_fix/query.sql b/parser/testdata/01735_join_get_low_card_fix/query.sql new file mode 100644 index 000000000..e20021123 --- /dev/null +++ b/parser/testdata/01735_join_get_low_card_fix/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS join_tbl; + +CREATE TABLE join_tbl (`id` String, `name` String, lcname LowCardinality(String)) ENGINE = Join(any, left, id); + +INSERT INTO join_tbl VALUES ('xxx', 'yyy', 'yyy'); + +SELECT joinGet('join_tbl', 'name', 'xxx') == 'yyy'; +SELECT joinGet('join_tbl', 'name', toLowCardinality('xxx')) == 'yyy'; +SELECT joinGet('join_tbl', 'name', toLowCardinality(materialize('xxx'))) == 'yyy'; +SELECT joinGet('join_tbl', 'lcname', 'xxx') == 'yyy'; +SELECT joinGet('join_tbl', 'lcname', toLowCardinality('xxx')) == 'yyy'; +SELECT joinGet('join_tbl', 'lcname', toLowCardinality(materialize('xxx'))) == 'yyy'; + +DROP TABLE IF EXISTS join_tbl; diff --git a/parser/testdata/01735_to_datetime64/ast.json b/parser/testdata/01735_to_datetime64/ast.json new file mode 100644 index 000000000..ed0da6bc3 --- /dev/null +++ b/parser/testdata/01735_to_datetime64/ast.json @@ -0,0 +1,85 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDate (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDateTime64 (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function today (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal 'UTC'" + }, + { + "explain": " Function toDate (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDateTime (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function today (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Literal 'UTC'" + } + ], + + "rows": 21, + + "statistics": + { + "elapsed": 0.001227273, + "rows_read": 21, + "bytes_read": 835 + } +} diff --git a/parser/testdata/01735_to_datetime64/metadata.json b/parser/testdata/01735_to_datetime64/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01735_to_datetime64/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01735_to_datetime64/query.sql b/parser/testdata/01735_to_datetime64/query.sql new file mode 100644 index 000000000..fe4eb5211 --- /dev/null +++ b/parser/testdata/01735_to_datetime64/query.sql @@ -0,0 +1 @@ +SELECT toDate(toDateTime64(today(), 0, 'UTC')) = toDate(toDateTime(today(), 'UTC')); diff --git a/parser/testdata/01736_null_as_default/ast.json b/parser/testdata/01736_null_as_default/ast.json new file mode 100644 index 000000000..173687b04 --- /dev/null +++ b/parser/testdata/01736_null_as_default/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_enum (children 1)" + }, + { + "explain": " Identifier test_enum" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001228269, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/01736_null_as_default/metadata.json b/parser/testdata/01736_null_as_default/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01736_null_as_default/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01736_null_as_default/query.sql b/parser/testdata/01736_null_as_default/query.sql new file mode 100644 index 000000000..c897d035a --- /dev/null +++ b/parser/testdata/01736_null_as_default/query.sql @@ -0,0 +1,7 @@ +drop table if exists test_enum; +create table test_enum (c Nullable(Enum16('A' = 1, 'B' = 2))) engine Log; +insert into test_enum values (1), (NULL); +select * from test_enum; +select toString(c) from test_enum; +select toString('aaaa', NULL); +drop table test_enum; diff --git a/parser/testdata/01737_move_order_key_to_prewhere_select_final/ast.json b/parser/testdata/01737_move_order_key_to_prewhere_select_final/ast.json new file mode 100644 index 000000000..af9ca77a3 --- /dev/null +++ b/parser/testdata/01737_move_order_key_to_prewhere_select_final/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000995139, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01737_move_order_key_to_prewhere_select_final/metadata.json b/parser/testdata/01737_move_order_key_to_prewhere_select_final/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01737_move_order_key_to_prewhere_select_final/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01737_move_order_key_to_prewhere_select_final/query.sql b/parser/testdata/01737_move_order_key_to_prewhere_select_final/query.sql new file mode 100644 index 000000000..d4830e9e3 --- /dev/null +++ b/parser/testdata/01737_move_order_key_to_prewhere_select_final/query.sql @@ -0,0 +1,36 @@ +SET optimize_move_to_prewhere = 1; +SET convert_query_to_cnf = 0; + +DROP TABLE IF EXISTS prewhere_move_select_final; + +CREATE TABLE prewhere_move_select_final (x Int, y Int, z Int) ENGINE = ReplacingMergeTree() ORDER BY (x, y); +INSERT INTO prewhere_move_select_final SELECT number, number * 2, number * 3 FROM numbers(1000); + +select 'optimize_move_to_prewhere_if_final = 1'; +SET optimize_move_to_prewhere_if_final = 1; + +-- order key can be pushed down with final +SELECT replaceRegexpAll(explain, '__table1\.|_UInt8|_UInt16', '') FROM (EXPLAIN actions=1 SELECT * FROM prewhere_move_select_final WHERE x > 100) WHERE explain LIKE '%Prewhere%'; +SELECT replaceRegexpAll(explain, '__table1\.|_UInt8|_UInt16', '') FROM (EXPLAIN actions=1 SELECT * FROM prewhere_move_select_final FINAL WHERE x > 100) WHERE explain LIKE '%Prewhere%'; +SELECT replaceRegexpAll(explain, '__table1\.|_UInt8|_UInt16', '') FROM (EXPLAIN actions=1 SELECT * FROM prewhere_move_select_final WHERE y > 100) WHERE explain LIKE '%Prewhere%'; +SELECT replaceRegexpAll(explain, '__table1\.|_UInt8|_UInt16', '') FROM (EXPLAIN actions=1 SELECT * FROM prewhere_move_select_final FINAL WHERE y > 100) WHERE explain LIKE '%Prewhere%'; +SELECT replaceRegexpAll(explain, '__table1\.|_UInt8|_UInt16', '') FROM (EXPLAIN actions=1 SELECT * FROM prewhere_move_select_final WHERE x + y > 100) WHERE explain LIKE '%Prewhere%'; +SELECT replaceRegexpAll(explain, '__table1\.|_UInt8|_UInt16', '') FROM (EXPLAIN actions=1 SELECT * FROM prewhere_move_select_final FINAL WHERE x + y > 100) WHERE explain LIKE '%Prewhere%'; + +-- can not be pushed down +SELECT * FROM (EXPLAIN actions=1 SELECT * FROM prewhere_move_select_final FINAL WHERE z > 400) WHERE explain LIKE '%Prewhere filter'; + +-- only condition with x/y can be pushed down +SELECT replaceRegexpAll(explain, '__table1\.|_UInt8|_UInt16', '') FROM (EXPLAIN actions=1 SELECT * FROM prewhere_move_select_final FINAL WHERE y > 100 and z > 400) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter%'; +SELECT replaceRegexpAll(explain, '__table1\.|_UInt8|_UInt16', '') FROM (EXPLAIN actions=1 SELECT * FROM prewhere_move_select_final FINAL WHERE x > 50 and z > 400) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter%'; +SELECT replaceRegexpAll(explain, '__table1\.|_UInt8|_UInt16', '') FROM (EXPLAIN actions=1 SELECT * FROM prewhere_move_select_final FINAL WHERE x + y > 50 and z > 400) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter%'; + +select 'optimize_move_to_prewhere_if_final = 0'; +SET optimize_move_to_prewhere_if_final = 0; + +SELECT replaceRegexpAll(explain, '__table1\.|_UInt8|_UInt16', '') FROM (EXPLAIN actions=1 SELECT * FROM prewhere_move_select_final WHERE y > 100) WHERE explain LIKE '%Prewhere%'; +SELECT replaceRegexpAll(explain, '__table1\.|_UInt8|_UInt16', '') FROM (EXPLAIN actions=1 SELECT * FROM prewhere_move_select_final FINAL WHERE y > 100) WHERE explain LIKE '%Prewhere%'; +SELECT replaceRegexpAll(explain, '__table1\.|_UInt8|_UInt16', '') FROM (EXPLAIN actions=1 SELECT * FROM prewhere_move_select_final FINAL WHERE z > 400) WHERE explain LIKE '%Prewhere%'; +SELECT replaceRegexpAll(explain, '__table1\.|_UInt8|_UInt16', '') FROM (EXPLAIN actions=1 SELECT * FROM prewhere_move_select_final FINAL WHERE y > 100 and z > 400) WHERE explain LIKE '%Prewhere%'; + +DROP TABLE prewhere_move_select_final; diff --git a/parser/testdata/01739_index_hint/ast.json b/parser/testdata/01739_index_hint/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01739_index_hint/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01739_index_hint/metadata.json b/parser/testdata/01739_index_hint/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01739_index_hint/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01739_index_hint/query.sql b/parser/testdata/01739_index_hint/query.sql new file mode 100644 index 000000000..b208063e7 --- /dev/null +++ b/parser/testdata/01739_index_hint/query.sql @@ -0,0 +1,45 @@ +-- { echo } + +drop table if exists tbl; + +create table tbl (p Int64, t Int64, f Float64) Engine=MergeTree partition by p order by t settings index_granularity=1; + +insert into tbl select number / 4, number, 0 from numbers(16); + +select * from tbl WHERE indexHint(t = 1) order by t; + +select * from tbl WHERE indexHint(t in (select toInt64(number) + 2 from numbers(3))) order by t; + +select * from tbl WHERE indexHint(p = 2) order by t; + +select * from tbl WHERE indexHint(p in (select toInt64(number) - 2 from numbers(3))) order by t; + +drop table tbl; + +drop table if exists XXXX; + +create table XXXX (t Int64, f Float64) Engine=MergeTree order by t settings index_granularity=128, index_granularity_bytes = '10Mi'; + +insert into XXXX select number*60, 0 from numbers(100000); + +SELECT sum(t) FROM XXXX WHERE indexHint(t = 42); + +drop table if exists XXXX; + +create table XXXX (t Int64, f Float64) Engine=MergeTree order by t settings index_granularity=8192, index_granularity_bytes = '10Mi'; + +insert into XXXX select number*60, 0 from numbers(100000); + +SELECT count() FROM XXXX WHERE indexHint(t = toDateTime(0)) SETTINGS optimize_use_implicit_projections = 1; + +drop table XXXX; + +CREATE TABLE XXXX (p Nullable(Int64), k Decimal(76, 39)) ENGINE = MergeTree PARTITION BY toDate(p) ORDER BY k SETTINGS index_granularity = 1, allow_nullable_key = 1; + +INSERT INTO XXXX FORMAT Values ('2020-09-01 00:01:02', 1), ('2020-09-01 20:01:03', 2), ('2020-09-02 00:01:03', 3); + +SELECT count() FROM XXXX WHERE indexHint(p = 1.) SETTINGS optimize_use_implicit_projections = 1, enable_analyzer=0; +-- TODO: optimize_use_implicit_projections ignores indexHint (with analyzer) because source columns might be aliased. +SELECT count() FROM XXXX WHERE indexHint(p = 1.) SETTINGS optimize_use_implicit_projections = 1, enable_analyzer=1; + +drop table XXXX; diff --git a/parser/testdata/01744_tuple_cast_to_map_bugfix/ast.json b/parser/testdata/01744_tuple_cast_to_map_bugfix/ast.json new file mode 100644 index 000000000..8a7ed9d03 --- /dev/null +++ b/parser/testdata/01744_tuple_cast_to_map_bugfix/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (alias map) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Array_['1', '2', '3']" + }, + { + "explain": " Literal Array_['Ready', 'Steady', 'Go']" + }, + { + "explain": " Literal 'Map(UInt8, String)'" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001347128, + "rows_read": 11, + "bytes_read": 464 + } +} diff --git a/parser/testdata/01744_tuple_cast_to_map_bugfix/metadata.json b/parser/testdata/01744_tuple_cast_to_map_bugfix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01744_tuple_cast_to_map_bugfix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01744_tuple_cast_to_map_bugfix/query.sql b/parser/testdata/01744_tuple_cast_to_map_bugfix/query.sql new file mode 100644 index 000000000..c5c50c5b0 --- /dev/null +++ b/parser/testdata/01744_tuple_cast_to_map_bugfix/query.sql @@ -0,0 +1,3 @@ +SELECT CAST((['1', '2', '3'], ['Ready', 'Steady', 'Go']), 'Map(UInt8, String)') AS map; +SELECT CAST((['1', '2', '3'], ['Ready', 'Steady', 'Go']), 'Map(UInt8, String)') AS map; +SELECT CAST((['1', '2', '3'], ['Ready', 'Steady', 'Go']), 'Map(UInt8, String)') AS map; diff --git a/parser/testdata/01745_alter_delete_view/ast.json b/parser/testdata/01745_alter_delete_view/ast.json new file mode 100644 index 000000000..f1fd39085 --- /dev/null +++ b/parser/testdata/01745_alter_delete_view/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_view (children 1)" + }, + { + "explain": " Identifier test_view" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001079561, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/01745_alter_delete_view/metadata.json b/parser/testdata/01745_alter_delete_view/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01745_alter_delete_view/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01745_alter_delete_view/query.sql b/parser/testdata/01745_alter_delete_view/query.sql new file mode 100644 index 000000000..e4715b16b --- /dev/null +++ b/parser/testdata/01745_alter_delete_view/query.sql @@ -0,0 +1,28 @@ +DROP VIEW IF EXISTS test_view; +DROP TABLE IF EXISTS test_table; + +CREATE TABLE test_table +( + f1 Int32, + f2 Int32, + pk Int32 +) +ENGINE = MergeTree() +ORDER BY f1 +PARTITION BY pk; + +CREATE VIEW test_view AS +SELECT f1, f2 +FROM test_table +WHERE pk = 2; + +INSERT INTO test_table (f1, f2, pk) VALUES (1,1,1), (1,1,2), (2,1,1), (2,1,2); + +SELECT * FROM test_view ORDER BY f1, f2; + +ALTER TABLE test_view DELETE WHERE pk = 2; --{serverError NOT_IMPLEMENTED} + +SELECT * FROM test_view ORDER BY f1, f2; + +DROP VIEW IF EXISTS test_view; +DROP TABLE IF EXISTS test_table; diff --git a/parser/testdata/01746_convert_type_with_default/ast.json b/parser/testdata/01746_convert_type_with_default/ast.json new file mode 100644 index 000000000..5d8402de6 --- /dev/null +++ b/parser/testdata/01746_convert_type_with_default/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toUInt8OrDefault (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '1'" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal 'UInt8'" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001206631, + "rows_read": 11, + "bytes_read": 409 + } +} diff --git a/parser/testdata/01746_convert_type_with_default/metadata.json b/parser/testdata/01746_convert_type_with_default/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01746_convert_type_with_default/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01746_convert_type_with_default/query.sql b/parser/testdata/01746_convert_type_with_default/query.sql new file mode 100644 index 000000000..5ef771878 --- /dev/null +++ b/parser/testdata/01746_convert_type_with_default/query.sql @@ -0,0 +1,83 @@ +select toUInt8OrDefault('1', cast(2 as UInt8)); +select toUInt8OrDefault('1xx', cast(2 as UInt8)); +select toInt8OrDefault('-1', cast(-2 as Int8)); +select toInt8OrDefault('-1xx', cast(-2 as Int8)); + +select toUInt16OrDefault('1', cast(2 as UInt16)); +select toUInt16OrDefault('1xx', cast(2 as UInt16)); +select toInt16OrDefault('-1', cast(-2 as Int16)); +select toInt16OrDefault('-1xx', cast(-2 as Int16)); + +select toUInt32OrDefault('1', cast(2 as UInt32)); +select toUInt32OrDefault('1xx', cast(2 as UInt32)); +select toInt32OrDefault('-1', cast(-2 as Int32)); +select toInt32OrDefault('-1xx', cast(-2 as Int32)); + +select toUInt64OrDefault('1', cast(2 as UInt64)); +select toUInt64OrDefault('1xx', cast(2 as UInt64)); +select toInt64OrDefault('-1', cast(-2 as Int64)); +select toInt64OrDefault('-1xx', cast(-2 as Int64)); + +select toInt128OrDefault('-1', cast(-2 as Int128)); +select toInt128OrDefault('-1xx', cast(-2 as Int128)); + +select toUInt256OrDefault('1', cast(2 as UInt256)); +select toUInt256OrDefault('1xx', cast(2 as UInt256)); +select toInt256OrDefault('-1', cast(-2 as Int256)); +select toInt256OrDefault('-1xx', cast(-2 as Int256)); + +SELECT toUUIDOrDefault('61f0c404-5cb3-11e7-907b-a6006ad3dba0', cast('59f0c404-5cb3-11e7-907b-a6006ad3dba0' as UUID)); +SELECT toUUIDOrDefault('-----61f0c404-5cb3-11e7-907b-a6006ad3dba0', cast('59f0c404-5cb3-11e7-907b-a6006ad3dba0' as UUID)); + +select toDateOrDefault('1xxx'); +select toDateOrDefault('2023-05-30'); +select toDateOrDefault('2023-05-30', '2000-01-01'::Date); +select toDateOrDefault('1xx', '2023-05-30'::Date); +select toDateOrDefault(-1); + +select toDateOrDefault(cast(19 as Int8)); +select toDateOrDefault(cast(19 as UInt8)); + +select toDateOrDefault(cast(19 as Int16)); +select toDateOrDefault(cast(19 as UInt16)); + +select toDateOrDefault(cast(19 as Int32)); +select toDateOrDefault(cast(19 as UInt32)); + +select toDateOrDefault(cast(19 as Int64)); +select toDateOrDefault(cast(19 as UInt64)); + +select toDateOrDefault(cast(19 as Int128)); +select toDateOrDefault(cast(19 as UInt128)); + +select toDateOrDefault(cast(19 as Int256)); +select toDateOrDefault(cast(19 as UInt256)); + +select toDateOrDefault(65535); +select toDateOrDefault(65536) in ('1970-01-01', '1970-01-02'); + +select toDateOrDefault(19507, '2000-01-01'::Date); +select toDateOrDefault(-1, '2023-05-30'::Date); + +select toDateTimeOrDefault('2023-05-30 14:38:20', 'UTC'); +select toDateTimeOrDefault('1xxx', 'UTC', '2023-05-30 14:38:20'::DateTime('UTC')); +select toDateTimeOrDefault(1685457500, 'UTC'); +select toDateTimeOrDefault(-1, 'UTC', '2023-05-30 14:38:20'::DateTime('UTC')); + +select toDateTimeOrDefault(cast(19 as Int8), 'UTC'); +select toDateTimeOrDefault(cast(19 as UInt8), 'UTC'); + +select toDateTimeOrDefault(cast(19 as Int16), 'UTC'); +select toDateTimeOrDefault(cast(19 as UInt16), 'UTC'); + +select toDateTimeOrDefault(cast(19 as Int32), 'UTC'); +select toDateTimeOrDefault(cast(19 as UInt32), 'UTC'); + +select toDateTimeOrDefault(cast(19 as Int64), 'UTC'); +select toDateTimeOrDefault(cast(19 as UInt64), 'UTC'); + +select toDateTimeOrDefault(cast(19 as Int128), 'UTC'); +select toDateTimeOrDefault(cast(19 as UInt128), 'UTC'); + +select toDateTimeOrDefault(cast(19 as Int256), 'UTC'); +select toDateTimeOrDefault(cast(19 as UInt256), 'UTC'); diff --git a/parser/testdata/01746_executable_pool_dictionary/ast.json b/parser/testdata/01746_executable_pool_dictionary/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01746_executable_pool_dictionary/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01746_executable_pool_dictionary/metadata.json b/parser/testdata/01746_executable_pool_dictionary/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01746_executable_pool_dictionary/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01746_executable_pool_dictionary/query.sql b/parser/testdata/01746_executable_pool_dictionary/query.sql new file mode 100644 index 000000000..69c37d127 --- /dev/null +++ b/parser/testdata/01746_executable_pool_dictionary/query.sql @@ -0,0 +1,17 @@ +-- Tags: no-parallel + +SELECT 'executable_pool_simple'; + +SELECT dictGet('executable_pool_simple', 'a', toUInt64(1)); +SELECT dictGet('executable_pool_simple', 'b', toUInt64(1)); + +SELECT dictGet('executable_pool_simple', 'a', toUInt64(2)); +SELECT dictGet('executable_pool_simple', 'b', toUInt64(2)); + +SELECT 'executable_pool_complex'; + +SELECT dictGet('executable_pool_complex', 'a', ('First_1', 'Second_1')); +SELECT dictGet('executable_pool_complex', 'b', ('First_1', 'Second_1')); + +SELECT dictGet('executable_pool_complex', 'a', ('First_2', 'Second_2')); +SELECT dictGet('executable_pool_complex', 'b', ('First_2', 'Second_2')); diff --git a/parser/testdata/01746_extract_text_from_html/ast.json b/parser/testdata/01746_extract_text_from_html/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01746_extract_text_from_html/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01746_extract_text_from_html/metadata.json b/parser/testdata/01746_extract_text_from_html/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01746_extract_text_from_html/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01746_extract_text_from_html/query.sql b/parser/testdata/01746_extract_text_from_html/query.sql new file mode 100644 index 000000000..b4ccc775b --- /dev/null +++ b/parser/testdata/01746_extract_text_from_html/query.sql @@ -0,0 +1,72 @@ +-- { echo } + +SELECT extractTextFromHTML(''); +SELECT extractTextFromHTML(' '); +SELECT extractTextFromHTML(' '); +SELECT extractTextFromHTML('Hello'); +SELECT extractTextFromHTML('Hello, world'); +SELECT extractTextFromHTML('Hello, world'); +SELECT extractTextFromHTML(' Hello, world'); +SELECT extractTextFromHTML(' Hello, world '); +SELECT extractTextFromHTML(' \t Hello,\rworld \n '); + +SELECT extractTextFromHTML('Hello<world'); +SELECT extractTextFromHTML('Hello < world'); +SELECT extractTextFromHTML('Hello > world'); +SELECT extractTextFromHTML('Hello<world>'); +SELECT extractTextFromHTML('Hello<>world'); +SELECT extractTextFromHTML('Hello<!>world'); +SELECT extractTextFromHTML('Hello<!->world'); +SELECT extractTextFromHTML('Hello<!-->world'); +SELECT extractTextFromHTML('Hello<!--->world'); +SELECT extractTextFromHTML('Hello<!---->world'); + +SELECT extractTextFromHTML('Hello <!-- --> World'); +SELECT extractTextFromHTML('Hello<!-- --> World'); +SELECT extractTextFromHTML('Hello<!-- -->World'); +SELECT extractTextFromHTML('Hello <!-- -->World'); +SELECT extractTextFromHTML('Hello <u> World</u>'); +SELECT extractTextFromHTML('Hello <u>World</u>'); +SELECT extractTextFromHTML('Hello<u>World</u>'); +SELECT extractTextFromHTML('Hello<u> World</u>'); + +SELECT extractTextFromHTML('<![CDATA[ \t Hello,\rworld \n ]]>'); +SELECT extractTextFromHTML('Hello <![CDATA[Hello\tworld]]> world!'); +SELECT extractTextFromHTML('Hello<![CDATA[Hello\tworld]]>world!'); + +SELECT extractTextFromHTML('Hello <![CDATA[Hello <b>world</b>]]> world!'); +SELECT extractTextFromHTML('<![CDATA[<sender>John Smith</sender>]]>'); +SELECT extractTextFromHTML('<![CDATA[<sender>John <![CDATA[Smith</sender>]]>'); +SELECT extractTextFromHTML('<![CDATA[<sender>John <![CDATA[]]>Smith</sender>]]>'); +SELECT extractTextFromHTML('<![CDATA[<sender>John ]]><![CDATA[Smith</sender>]]>'); +SELECT extractTextFromHTML('<![CDATA[<sender>John ]]> <![CDATA[Smith</sender>]]>'); +SELECT extractTextFromHTML('<![CDATA[<sender>John]]> <![CDATA[Smith</sender>]]>'); +SELECT extractTextFromHTML('<![CDATA[<sender>John ]]>]]><![CDATA[Smith</sender>]]>'); + +SELECT extractTextFromHTML('Hello<script>World</script> goodbye'); +SELECT extractTextFromHTML('Hello<script >World</script> goodbye'); +SELECT extractTextFromHTML('Hello<scripta>World</scripta> goodbye'); +SELECT extractTextFromHTML('Hello<script type="text/javascript">World</script> goodbye'); +SELECT extractTextFromHTML('Hello<style type="text/css">World</style> goodbye'); +SELECT extractTextFromHTML('Hello<script:p>World</script:p> goodbye'); +SELECT extractTextFromHTML('Hello<script:p type="text/javascript">World</script:p> goodbye'); + +SELECT extractTextFromHTML('Hello<style type="text/css">World <!-- abc --> </style> goodbye'); +SELECT extractTextFromHTML('Hello<style type="text/css">World <!-- abc --> </style \n > goodbye'); +SELECT extractTextFromHTML('Hello<style type="text/css">World <!-- abc --> </ style> goodbye'); +SELECT extractTextFromHTML('Hello<style type="text/css">World <!-- abc --> </stylea> goodbye'); + +SELECT extractTextFromHTML('Hello<style type="text/css">World <![CDATA[</style>]]> </stylea> goodbye'); +SELECT extractTextFromHTML('Hello<style type="text/css">World <![CDATA[</style>]]> </style> goodbye'); +SELECT extractTextFromHTML('Hello<style type="text/css">World <![CDAT[</style>]]> </style> goodbye'); +SELECT extractTextFromHTML('Hello<style type="text/css">World <![endif]--> </style> goodbye'); +SELECT extractTextFromHTML('Hello<style type="text/css">World <script>abc</script> </stylea> goodbye'); +SELECT extractTextFromHTML('Hello<style type="text/css">World <script>abc</script> </style> goodbye'); + +SELECT extractTextFromHTML('<![CDATA[]]]]><![CDATA[>]]>'); + +SELECT extractTextFromHTML(' +<img src="pictures/power.png" style="margin-bottom: -30px;" /> +<br><span style="padding-right: 10px; font-size: 10px;">xkcd.com</span> +</div> +'); diff --git a/parser/testdata/01746_forbid_drop_column_referenced_by_mv/ast.json b/parser/testdata/01746_forbid_drop_column_referenced_by_mv/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01746_forbid_drop_column_referenced_by_mv/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01746_forbid_drop_column_referenced_by_mv/metadata.json b/parser/testdata/01746_forbid_drop_column_referenced_by_mv/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01746_forbid_drop_column_referenced_by_mv/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01746_forbid_drop_column_referenced_by_mv/query.sql b/parser/testdata/01746_forbid_drop_column_referenced_by_mv/query.sql new file mode 100644 index 000000000..bac417625 --- /dev/null +++ b/parser/testdata/01746_forbid_drop_column_referenced_by_mv/query.sql @@ -0,0 +1,172 @@ +-- MergeTree +DROP TABLE IF EXISTS `01746_merge_tree`; +CREATE TABLE `01746_merge_tree` +( + `n1` Int8, + `n2` Int8, + `n3` Int8, + `n4` Int8 +) +ENGINE = MergeTree +ORDER BY n1; + +DROP TABLE IF EXISTS `01746_merge_tree_mv`; +CREATE MATERIALIZED VIEW `01746_merge_tree_mv` +ENGINE = Memory AS +SELECT + n2, + n3 +FROM `01746_merge_tree`; + +ALTER TABLE `01746_merge_tree` + DROP COLUMN n3; -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } + +ALTER TABLE `01746_merge_tree` + DROP COLUMN n2; -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } + +-- ok +ALTER TABLE `01746_merge_tree` + DROP COLUMN n4; + +DROP TABLE `01746_merge_tree`; +DROP TABLE `01746_merge_tree_mv`; + +-- Null +DROP TABLE IF EXISTS `01746_null`; +CREATE TABLE `01746_null` +( + `n1` Int8, + `n2` Int8, + `n3` Int8 +) +ENGINE = Null; + +DROP TABLE IF EXISTS `01746_null_mv`; +CREATE MATERIALIZED VIEW `01746_null_mv` +ENGINE = Memory AS +SELECT + n1, + n2 +FROM `01746_null`; + +ALTER TABLE `01746_null` + DROP COLUMN n1; -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } + +ALTER TABLE `01746_null` + DROP COLUMN n2; -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } + +-- ok +ALTER TABLE `01746_null` + DROP COLUMN n3; + +DROP TABLE `01746_null`; +DROP TABLE `01746_null_mv`; + +-- Distributed + +DROP TABLE IF EXISTS `01746_local`; +CREATE TABLE `01746_local` +( + `n1` Int8, + `n2` Int8, + `n3` Int8 +) +ENGINE = Memory; + +DROP TABLE IF EXISTS `01746_dist`; +CREATE TABLE `01746_dist` AS `01746_local` +ENGINE = Distributed('test_shard_localhost', currentDatabase(), `01746_local`, rand()); + +DROP TABLE IF EXISTS `01746_dist_mv`; +CREATE MATERIALIZED VIEW `01746_dist_mv` +ENGINE = Memory AS +SELECT + n1, + n2 +FROM `01746_dist`; + +ALTER TABLE `01746_dist` + DROP COLUMN n1; -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } + +ALTER TABLE `01746_dist` + DROP COLUMN n2; -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } + +-- ok +ALTER TABLE `01746_dist` + DROP COLUMN n3; + +DROP TABLE `01746_local`; +DROP TABLE `01746_dist`; +DROP TABLE `01746_dist_mv`; + +-- Merge +DROP TABLE IF EXISTS `01746_merge_t`; +CREATE TABLE `01746_merge_t` +( + `n1` Int8, + `n2` Int8, + `n3` Int8 +) +ENGINE = Memory; + +DROP TABLE IF EXISTS `01746_merge`; +CREATE TABLE `01746_merge` AS `01746_merge_t` +ENGINE = Merge(currentDatabase(), '01746_merge_t'); + +DROP TABLE IF EXISTS `01746_merge_mv`; +CREATE MATERIALIZED VIEW `01746_merge_mv` +ENGINE = Memory AS +SELECT + n1, + n2 +FROM `01746_merge`; + +ALTER TABLE `01746_merge` + DROP COLUMN n1; -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } + +ALTER TABLE `01746_merge` + DROP COLUMN n2; -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } + +-- ok +ALTER TABLE `01746_merge` + DROP COLUMN n3; + +DROP TABLE `01746_merge_t`; +DROP TABLE `01746_merge`; +DROP TABLE `01746_merge_mv`; + +-- Buffer +DROP TABLE IF EXISTS `01746_buffer_t`; +CREATE TABLE `01746_buffer_t` +( + `n1` Int8, + `n2` Int8, + `n3` Int8 +) +ENGINE = Memory; + +DROP TABLE IF EXISTS `01746_buffer`; +CREATE TABLE `01746_buffer` AS `01746_buffer_t` +ENGINE = Buffer(currentDatabase(), `01746_buffer_t`, 16, 10, 100, 10000, 1000000, 10000000, 100000000); + +DROP TABLE IF EXISTS `01746_buffer_mv`; +CREATE MATERIALIZED VIEW `01746_buffer_mv` +ENGINE = Memory AS +SELECT + n1, + n2 +FROM `01746_buffer`; + +ALTER TABLE `01746_buffer` + DROP COLUMN n1; -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } + +ALTER TABLE `01746_buffer` + DROP COLUMN n2; -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } + +-- ok +ALTER TABLE `01746_buffer` + DROP COLUMN n3; + +DROP TABLE `01746_buffer_t`; +DROP TABLE `01746_buffer`; +DROP TABLE `01746_buffer_mv`; diff --git a/parser/testdata/01746_lc_values_format_bug/ast.json b/parser/testdata/01746_lc_values_format_bug/ast.json new file mode 100644 index 000000000..181b22831 --- /dev/null +++ b/parser/testdata/01746_lc_values_format_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery lc_test (children 1)" + }, + { + "explain": " Identifier lc_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001146119, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/01746_lc_values_format_bug/metadata.json b/parser/testdata/01746_lc_values_format_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01746_lc_values_format_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01746_lc_values_format_bug/query.sql b/parser/testdata/01746_lc_values_format_bug/query.sql new file mode 100644 index 000000000..6717b9ae5 --- /dev/null +++ b/parser/testdata/01746_lc_values_format_bug/query.sql @@ -0,0 +1,14 @@ +drop table if exists lc_test; + +CREATE TABLE lc_test +( + `id` LowCardinality(String) +) +ENGINE = MergeTree +PARTITION BY tuple() +ORDER BY id; + +insert into lc_test values (toString('a')); + +select id from lc_test; +drop table if exists lc_test; diff --git a/parser/testdata/01746_test_for_tupleElement_must_be_constant_issue/ast.json b/parser/testdata/01746_test_for_tupleElement_must_be_constant_issue/ast.json new file mode 100644 index 000000000..865d38602 --- /dev/null +++ b/parser/testdata/01746_test_for_tupleElement_must_be_constant_issue/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery ttt01746 (children 1)" + }, + { + "explain": " Identifier ttt01746" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001343713, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/01746_test_for_tupleElement_must_be_constant_issue/metadata.json b/parser/testdata/01746_test_for_tupleElement_must_be_constant_issue/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01746_test_for_tupleElement_must_be_constant_issue/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01746_test_for_tupleElement_must_be_constant_issue/query.sql b/parser/testdata/01746_test_for_tupleElement_must_be_constant_issue/query.sql new file mode 100644 index 000000000..585640665 --- /dev/null +++ b/parser/testdata/01746_test_for_tupleElement_must_be_constant_issue/query.sql @@ -0,0 +1,7 @@ +DROP TABLE IF EXISTS ttt01746; +CREATE TABLE ttt01746 (d Date, n UInt64) ENGINE = MergeTree() PARTITION BY toMonday(d) ORDER BY n SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +INSERT INTO ttt01746 SELECT toDate('2021-02-14') + (number % 30) AS d, number AS n FROM numbers(1500000); +set optimize_move_to_prewhere=0; +SELECT arraySort(x -> x.2, [tuple('a', 10)]) AS X FROM ttt01746 WHERE d >= toDate('2021-03-03') - 2 ORDER BY n LIMIT 1; +SELECT arraySort(x -> x.2, [tuple('a', 10)]) AS X FROM ttt01746 PREWHERE d >= toDate('2021-03-03') - 2 ORDER BY n LIMIT 1; +DROP TABLE ttt01746; diff --git a/parser/testdata/01747_alter_partition_key_enum_zookeeper_long/ast.json b/parser/testdata/01747_alter_partition_key_enum_zookeeper_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01747_alter_partition_key_enum_zookeeper_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01747_alter_partition_key_enum_zookeeper_long/metadata.json b/parser/testdata/01747_alter_partition_key_enum_zookeeper_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01747_alter_partition_key_enum_zookeeper_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01747_alter_partition_key_enum_zookeeper_long/query.sql b/parser/testdata/01747_alter_partition_key_enum_zookeeper_long/query.sql new file mode 100644 index 000000000..85e3c11dd --- /dev/null +++ b/parser/testdata/01747_alter_partition_key_enum_zookeeper_long/query.sql @@ -0,0 +1,65 @@ +-- Tags: long, zookeeper + +DROP TABLE IF EXISTS report; + +CREATE TABLE report +( + `product` Enum8('IU' = 1, 'WS' = 2), + `machine` String, + `branch` String, + `generated_time` DateTime +) +ENGINE = MergeTree +PARTITION BY (product, toYYYYMM(generated_time)) +ORDER BY (product, machine, branch, generated_time); + +INSERT INTO report VALUES ('IU', 'lada', '2101', toDateTime('1970-04-19 15:00:00')); + +SELECT * FROM report WHERE product = 'IU'; + +ALTER TABLE report MODIFY COLUMN product Enum8('IU' = 1, 'WS' = 2, 'PS' = 3); + +SELECT * FROM report WHERE product = 'PS'; + +INSERT INTO report VALUES ('PS', 'jeep', 'Grand Cherokee', toDateTime('2005-10-03 15:00:00')); + +SELECT * FROM report WHERE product = 'PS'; + +DETACH TABLE report; +ATTACH TABLE report; + +SELECT * FROM report WHERE product = 'PS'; + +DROP TABLE IF EXISTS report; + +DROP TABLE IF EXISTS replicated_report; + +CREATE TABLE replicated_report +( + `product` Enum8('IU' = 1, 'WS' = 2), + `machine` String, + `branch` String, + `generated_time` DateTime +) +ENGINE = ReplicatedMergeTree('/clickhouse/{database}/01747_alter_partition_key/t', '1') +PARTITION BY (product, toYYYYMM(generated_time)) +ORDER BY (product, machine, branch, generated_time); + +INSERT INTO replicated_report VALUES ('IU', 'lada', '2101', toDateTime('1970-04-19 15:00:00')); + +SELECT * FROM replicated_report WHERE product = 'IU'; + +ALTER TABLE replicated_report MODIFY COLUMN product Enum8('IU' = 1, 'WS' = 2, 'PS' = 3) SETTINGS alter_sync=2; + +SELECT * FROM replicated_report WHERE product = 'PS'; + +INSERT INTO replicated_report VALUES ('PS', 'jeep', 'Grand Cherokee', toDateTime('2005-10-03 15:00:00')); + +SELECT * FROM replicated_report WHERE product = 'PS'; + +DETACH TABLE replicated_report; +ATTACH TABLE replicated_report; + +SELECT * FROM replicated_report WHERE product = 'PS'; + +DROP TABLE IF EXISTS replicated_report; diff --git a/parser/testdata/01747_executable_pool_dictionary_implicit_key/ast.json b/parser/testdata/01747_executable_pool_dictionary_implicit_key/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01747_executable_pool_dictionary_implicit_key/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01747_executable_pool_dictionary_implicit_key/metadata.json b/parser/testdata/01747_executable_pool_dictionary_implicit_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01747_executable_pool_dictionary_implicit_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01747_executable_pool_dictionary_implicit_key/query.sql b/parser/testdata/01747_executable_pool_dictionary_implicit_key/query.sql new file mode 100644 index 000000000..fb3b7e20e --- /dev/null +++ b/parser/testdata/01747_executable_pool_dictionary_implicit_key/query.sql @@ -0,0 +1,17 @@ +-- Tags: no-parallel + +SELECT 'executable_pool_simple_implicit_key'; + +SELECT dictGet('executable_pool_simple_implicit_key', 'a', toUInt64(1)); +SELECT dictGet('executable_pool_simple_implicit_key', 'b', toUInt64(1)); + +SELECT dictGet('executable_pool_simple_implicit_key', 'a', toUInt64(2)); +SELECT dictGet('executable_pool_simple_implicit_key', 'b', toUInt64(2)); + +SELECT 'executable_pool_complex_implicit_key'; + +SELECT dictGet('executable_pool_complex_implicit_key', 'a', ('First_1', 'Second_1')); +SELECT dictGet('executable_pool_complex_implicit_key', 'b', ('First_1', 'Second_1')); + +SELECT dictGet('executable_pool_complex_implicit_key', 'a', ('First_2', 'Second_2')); +SELECT dictGet('executable_pool_complex_implicit_key', 'b', ('First_2', 'Second_2')); diff --git a/parser/testdata/01747_join_view_filter_dictionary/ast.json b/parser/testdata/01747_join_view_filter_dictionary/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01747_join_view_filter_dictionary/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01747_join_view_filter_dictionary/metadata.json b/parser/testdata/01747_join_view_filter_dictionary/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01747_join_view_filter_dictionary/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01747_join_view_filter_dictionary/query.sql b/parser/testdata/01747_join_view_filter_dictionary/query.sql new file mode 100644 index 000000000..c7c525ba2 --- /dev/null +++ b/parser/testdata/01747_join_view_filter_dictionary/query.sql @@ -0,0 +1,51 @@ +-- Tags: no-parallel + +drop table if exists summing_table01747; +drop view if exists rates01747; +drop view if exists agg_view01747; +drop table if exists dictst01747; +drop DICTIONARY if exists default.dict01747; + +CREATE TABLE summing_table01747 + ( + some_name String, + user_id UInt64, + amount Int64, + currency String + ) +ENGINE = SummingMergeTree() +ORDER BY (some_name); + +CREATE VIEW rates01747 AS + SELECT 'USD' as from_currency, 'EUR' as to_currency, 1.2 as rates01747; + +insert into summing_table01747 values ('name', 2, 20, 'USD'),('name', 1, 10, 'USD'); + +create table dictst01747(some_name String, field1 String, field2 UInt8) Engine = Memory +as select 'name', 'test', 33; + +CREATE DICTIONARY default.dict01747 (some_name String, field1 String, field2 UInt8) +PRIMARY KEY some_name SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() +TABLE dictst01747 DB currentDatabase() USER 'default')) +LIFETIME(MIN 0 MAX 0) LAYOUT(COMPLEX_KEY_HASHED()); + + +CREATE VIEW agg_view01747 AS + SELECT + summing_table01747.some_name as some_name, + dictGet('default.dict01747', 'field1', tuple(some_name)) as field1, + dictGet('default.dict01747', 'field2', tuple(some_name)) as field2, + rates01747.rates01747 as rates01747 + FROM summing_table01747 + ANY LEFT JOIN rates01747 + ON rates01747.from_currency = summing_table01747.currency; + +select * from agg_view01747; + +SELECT field2 FROM agg_view01747 WHERE field1 = 'test'; + +drop table summing_table01747; +drop view rates01747; +drop view agg_view01747; +drop DICTIONARY default.dict01747; +drop table dictst01747; diff --git a/parser/testdata/01747_transform_empty_arrays/ast.json b/parser/testdata/01747_transform_empty_arrays/ast.json new file mode 100644 index 000000000..fd1b11f3e --- /dev/null +++ b/parser/testdata/01747_transform_empty_arrays/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.000934457, + "rows_read": 5, + "bytes_read": 169 + } +} diff --git a/parser/testdata/01747_transform_empty_arrays/metadata.json b/parser/testdata/01747_transform_empty_arrays/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01747_transform_empty_arrays/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01747_transform_empty_arrays/query.sql b/parser/testdata/01747_transform_empty_arrays/query.sql new file mode 100644 index 000000000..cc9a00a45 --- /dev/null +++ b/parser/testdata/01747_transform_empty_arrays/query.sql @@ -0,0 +1,27 @@ +SELECT * +FROM +( + WITH + + ( + SELECT groupArray(a) + FROM + ( + SELECT 1 AS a + ) + ) AS keys, + + ( + SELECT groupArray(a) + FROM + ( + SELECT 2 AS a + ) + ) AS values + SELECT * + FROM + ( + SELECT 1 AS a + ) + WHERE transform(a, keys, values, 0) +) AS wrap; diff --git a/parser/testdata/01748_dictionary_table_dot/ast.json b/parser/testdata/01748_dictionary_table_dot/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01748_dictionary_table_dot/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01748_dictionary_table_dot/metadata.json b/parser/testdata/01748_dictionary_table_dot/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01748_dictionary_table_dot/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01748_dictionary_table_dot/query.sql b/parser/testdata/01748_dictionary_table_dot/query.sql new file mode 100644 index 000000000..993d2e1a6 --- /dev/null +++ b/parser/testdata/01748_dictionary_table_dot/query.sql @@ -0,0 +1,36 @@ +-- Tags: no-parallel + +DROP DATABASE IF EXISTS test_01748; +CREATE DATABASE test_01748; +USE test_01748; + +DROP TABLE IF EXISTS `test.txt`; +DROP DICTIONARY IF EXISTS test_dict; + +CREATE TABLE `test.txt` +( + `key1` UInt32, + `key2` UInt32, + `value` String +) +ENGINE = Memory(); + +CREATE DICTIONARY test_dict +( + `key1` UInt32, + `key2` UInt32, + `value` String +) +PRIMARY KEY key1, key2 +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE `test.txt` PASSWORD '' DB currentDatabase())) +LIFETIME(MIN 1 MAX 3600) +LAYOUT(COMPLEX_KEY_HASHED()); + +INSERT INTO `test.txt` VALUES (1, 2, 'Hello'); + +-- TODO: it does not work without fully qualified name. +SYSTEM RELOAD DICTIONARY test_01748.test_dict; + +SELECT dictGet(test_dict, 'value', (toUInt32(1), toUInt32(2))); + +DROP DATABASE test_01748; diff --git a/parser/testdata/01748_partition_id_pruning/ast.json b/parser/testdata/01748_partition_id_pruning/ast.json new file mode 100644 index 000000000..34da32d4e --- /dev/null +++ b/parser/testdata/01748_partition_id_pruning/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001325701, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01748_partition_id_pruning/metadata.json b/parser/testdata/01748_partition_id_pruning/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01748_partition_id_pruning/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01748_partition_id_pruning/query.sql b/parser/testdata/01748_partition_id_pruning/query.sql new file mode 100644 index 000000000..f492a4a88 --- /dev/null +++ b/parser/testdata/01748_partition_id_pruning/query.sql @@ -0,0 +1,35 @@ +SET merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.0; + +drop table if exists x; + +create table x (i int, j int) engine MergeTree partition by i order by j settings index_granularity = 1; + +insert into x values (1, 1), (1, 2), (1, 3), (2, 4), (2, 5), (2, 6); + +set max_rows_to_read = 3; + +select * from x where _partition_id = partitionID(1); + +set max_rows_to_read = 5; -- one row for subquery + subquery + +select * from x where _partition_id in (select partitionID(number + 1) from numbers(1)); + +-- trivial count optimization test +set max_rows_to_read = 2; -- one row for subquery + subquery itself +-- TODO: Relax the limits because we might build prepared set twice with _minmax_count_projection +set max_rows_to_read = 3; +select count() from x where _partition_id in (select partitionID(number + 1) from numbers(1)); + +drop table x; + +drop table if exists mt; + +create table mt (n UInt64) engine=MergeTree order by n partition by n % 10; + +set max_rows_to_read = 200; + +insert into mt select * from numbers(100); + +select * from mt where toUInt64(substr(_part, 1, position(_part, '_') - 1)) = 1; + +drop table mt; diff --git a/parser/testdata/01752_distributed_query_sigsegv/ast.json b/parser/testdata/01752_distributed_query_sigsegv/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01752_distributed_query_sigsegv/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01752_distributed_query_sigsegv/metadata.json b/parser/testdata/01752_distributed_query_sigsegv/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01752_distributed_query_sigsegv/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01752_distributed_query_sigsegv/query.sql b/parser/testdata/01752_distributed_query_sigsegv/query.sql new file mode 100644 index 000000000..2fe3e29a7 --- /dev/null +++ b/parser/testdata/01752_distributed_query_sigsegv/query.sql @@ -0,0 +1,10 @@ +-- Tags: distributed + +-- this is enough to trigger the regression +SELECT throwIf(dummy = 0) FROM remote('127.1', system.one); -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO } + +-- these are just in case +SELECT throwIf(dummy = 0) FROM remote('127.{1,2}', system.one); -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO } +SELECT throwIf(dummy = 0) FROM remote('127.{1,2}', system.one) SETTINGS prefer_localhost_replica=0; -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO } +SELECT throwIf(dummy = 0) FROM remote('127.{1,2}', system.one) SETTINGS prefer_localhost_replica=0, distributed_group_by_no_merge=1; -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO } +SELECT throwIf(dummy = 0) FROM remote('127.{1,2}', system.one) SETTINGS prefer_localhost_replica=0, distributed_group_by_no_merge=2; -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO } diff --git a/parser/testdata/01753_direct_dictionary_simple_key/ast.json b/parser/testdata/01753_direct_dictionary_simple_key/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01753_direct_dictionary_simple_key/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01753_direct_dictionary_simple_key/metadata.json b/parser/testdata/01753_direct_dictionary_simple_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01753_direct_dictionary_simple_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01753_direct_dictionary_simple_key/query.sql b/parser/testdata/01753_direct_dictionary_simple_key/query.sql new file mode 100644 index 000000000..669542091 --- /dev/null +++ b/parser/testdata/01753_direct_dictionary_simple_key/query.sql @@ -0,0 +1,122 @@ +-- Tags: no-parallel + +DROP DATABASE IF EXISTS 01753_dictionary_db; +CREATE DATABASE 01753_dictionary_db; + +CREATE TABLE 01753_dictionary_db.simple_key_simple_attributes_source_table +( + id UInt64, + value_first String, + value_second String +) +ENGINE = TinyLog; + +INSERT INTO 01753_dictionary_db.simple_key_simple_attributes_source_table VALUES(0, 'value_0', 'value_second_0'); +INSERT INTO 01753_dictionary_db.simple_key_simple_attributes_source_table VALUES(1, 'value_1', 'value_second_1'); +INSERT INTO 01753_dictionary_db.simple_key_simple_attributes_source_table VALUES(2, 'value_2', 'value_second_2'); + +CREATE DICTIONARY 01753_dictionary_db.direct_dictionary_simple_key_simple_attributes +( + id UInt64, + value_first String DEFAULT 'value_first_default', + value_second String DEFAULT 'value_second_default' +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'simple_key_simple_attributes_source_table')) +LAYOUT(DIRECT()); + +SELECT 'Dictionary direct_dictionary_simple_key_simple_attributes'; +SELECT 'dictGet existing value'; +SELECT dictGet('01753_dictionary_db.direct_dictionary_simple_key_simple_attributes', 'value_first', number) as value_first, + dictGet('01753_dictionary_db.direct_dictionary_simple_key_simple_attributes', 'value_second', number) as value_second FROM system.numbers LIMIT 3; +SELECT 'dictGet with non existing value'; +SELECT dictGet('01753_dictionary_db.direct_dictionary_simple_key_simple_attributes', 'value_first', number) as value_first, + dictGet('01753_dictionary_db.direct_dictionary_simple_key_simple_attributes', 'value_second', number) as value_second FROM system.numbers LIMIT 4; +SELECT 'dictGetOrDefault existing value'; +SELECT dictGetOrDefault('01753_dictionary_db.direct_dictionary_simple_key_simple_attributes', 'value_first', number, toString('default')) as value_first, + dictGetOrDefault('01753_dictionary_db.direct_dictionary_simple_key_simple_attributes', 'value_second', number, toString('default')) as value_second FROM system.numbers LIMIT 3; +SELECT 'dictGetOrDefault non existing value'; +SELECT dictGetOrDefault('01753_dictionary_db.direct_dictionary_simple_key_simple_attributes', 'value_first', number, toString('default')) as value_first, + dictGetOrDefault('01753_dictionary_db.direct_dictionary_simple_key_simple_attributes', 'value_second', number, toString('default')) as value_second FROM system.numbers LIMIT 4; +SELECT 'dictHas'; +SELECT dictHas('01753_dictionary_db.direct_dictionary_simple_key_simple_attributes', number) FROM system.numbers LIMIT 4; +SELECT 'select all values as input stream'; +SELECT * FROM 01753_dictionary_db.direct_dictionary_simple_key_simple_attributes ORDER BY ALL; + +DROP DICTIONARY 01753_dictionary_db.direct_dictionary_simple_key_simple_attributes; +DROP TABLE 01753_dictionary_db.simple_key_simple_attributes_source_table; + +CREATE TABLE 01753_dictionary_db.simple_key_complex_attributes_source_table +( + id UInt64, + value_first String, + value_second Nullable(String) +) +ENGINE = TinyLog; + +INSERT INTO 01753_dictionary_db.simple_key_complex_attributes_source_table VALUES(0, 'value_0', 'value_second_0'); +INSERT INTO 01753_dictionary_db.simple_key_complex_attributes_source_table VALUES(1, 'value_1', NULL); +INSERT INTO 01753_dictionary_db.simple_key_complex_attributes_source_table VALUES(2, 'value_2', 'value_second_2'); + +CREATE DICTIONARY 01753_dictionary_db.direct_dictionary_simple_key_complex_attributes +( + id UInt64, + value_first String DEFAULT 'value_first_default', + value_second Nullable(String) DEFAULT 'value_second_default' +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'simple_key_complex_attributes_source_table')) +LAYOUT(DIRECT()); + +SELECT 'Dictionary direct_dictionary_simple_key_complex_attributes'; +SELECT 'dictGet existing value'; +SELECT dictGet('01753_dictionary_db.direct_dictionary_simple_key_complex_attributes', 'value_first', number) as value_first, + dictGet('01753_dictionary_db.direct_dictionary_simple_key_complex_attributes', 'value_second', number) as value_second FROM system.numbers LIMIT 3; +SELECT 'dictGet with non existing value'; +SELECT dictGet('01753_dictionary_db.direct_dictionary_simple_key_complex_attributes', 'value_first', number) as value_first, + dictGet('01753_dictionary_db.direct_dictionary_simple_key_complex_attributes', 'value_second', number) as value_second FROM system.numbers LIMIT 4; +SELECT 'dictGetOrDefault existing value'; +SELECT dictGetOrDefault('01753_dictionary_db.direct_dictionary_simple_key_complex_attributes', 'value_first', number, toString('default')) as value_first, + dictGetOrDefault('01753_dictionary_db.direct_dictionary_simple_key_complex_attributes', 'value_second', number, toString('default')) as value_second FROM system.numbers LIMIT 3; +SELECT 'dictGetOrDefault non existing value'; +SELECT dictGetOrDefault('01753_dictionary_db.direct_dictionary_simple_key_complex_attributes', 'value_first', number, toString('default')) as value_first, + dictGetOrDefault('01753_dictionary_db.direct_dictionary_simple_key_complex_attributes', 'value_second', number, toString('default')) as value_second FROM system.numbers LIMIT 4; +SELECT 'dictHas'; +SELECT dictHas('01753_dictionary_db.direct_dictionary_simple_key_complex_attributes', number) FROM system.numbers LIMIT 4; +SELECT 'select all values as input stream'; +SELECT * FROM 01753_dictionary_db.direct_dictionary_simple_key_complex_attributes ORDER BY ALL; + +DROP DICTIONARY 01753_dictionary_db.direct_dictionary_simple_key_complex_attributes; +DROP TABLE 01753_dictionary_db.simple_key_complex_attributes_source_table; + +CREATE TABLE 01753_dictionary_db.simple_key_hierarchy_table +( + id UInt64, + parent_id UInt64 +) ENGINE = TinyLog(); + +INSERT INTO 01753_dictionary_db.simple_key_hierarchy_table VALUES (1, 0); +INSERT INTO 01753_dictionary_db.simple_key_hierarchy_table VALUES (2, 1); +INSERT INTO 01753_dictionary_db.simple_key_hierarchy_table VALUES (3, 1); +INSERT INTO 01753_dictionary_db.simple_key_hierarchy_table VALUES (4, 2); + +CREATE DICTIONARY 01753_dictionary_db.direct_dictionary_simple_key_hierarchy +( + id UInt64, + parent_id UInt64 HIERARCHICAL +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'simple_key_hierarchy_table')) +LAYOUT(DIRECT()); + +SELECT 'Dictionary direct_dictionary_simple_key_hierarchy'; +SELECT 'dictGet'; +SELECT dictGet('01753_dictionary_db.direct_dictionary_simple_key_hierarchy', 'parent_id', number) FROM system.numbers LIMIT 5; +SELECT 'dictGetHierarchy'; +SELECT dictGetHierarchy('01753_dictionary_db.direct_dictionary_simple_key_hierarchy', toUInt64(1)); +SELECT dictGetHierarchy('01753_dictionary_db.direct_dictionary_simple_key_hierarchy', toUInt64(4)); + +DROP DICTIONARY 01753_dictionary_db.direct_dictionary_simple_key_hierarchy; +DROP TABLE 01753_dictionary_db.simple_key_hierarchy_table; + +DROP DATABASE 01753_dictionary_db; diff --git a/parser/testdata/01753_mutate_table_predicated_with_table/ast.json b/parser/testdata/01753_mutate_table_predicated_with_table/ast.json new file mode 100644 index 000000000..75adda3ca --- /dev/null +++ b/parser/testdata/01753_mutate_table_predicated_with_table/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery mmm (children 1)" + }, + { + "explain": " Identifier mmm" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001487088, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/01753_mutate_table_predicated_with_table/metadata.json b/parser/testdata/01753_mutate_table_predicated_with_table/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01753_mutate_table_predicated_with_table/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01753_mutate_table_predicated_with_table/query.sql b/parser/testdata/01753_mutate_table_predicated_with_table/query.sql new file mode 100644 index 000000000..efb444eb4 --- /dev/null +++ b/parser/testdata/01753_mutate_table_predicated_with_table/query.sql @@ -0,0 +1,12 @@ +DROP TABLE IF EXISTS mmm; + +CREATE TABLE mmm ENGINE=MergeTree ORDER BY number +AS SELECT number, rand() % 10 AS a FROM numbers(1000); + +ALTER TABLE mmm DELETE WHERE a IN (SELECT a FROM mmm) SETTINGS mutations_sync=1; + +SELECT is_done FROM system.mutations WHERE table = 'mmm' and database=currentDatabase(); + +SELECT * FROM mmm; + +DROP TABLE IF EXISTS mmm; diff --git a/parser/testdata/01754_cluster_all_replicas_shard_num/ast.json b/parser/testdata/01754_cluster_all_replicas_shard_num/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01754_cluster_all_replicas_shard_num/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01754_cluster_all_replicas_shard_num/metadata.json b/parser/testdata/01754_cluster_all_replicas_shard_num/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01754_cluster_all_replicas_shard_num/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01754_cluster_all_replicas_shard_num/query.sql b/parser/testdata/01754_cluster_all_replicas_shard_num/query.sql new file mode 100644 index 000000000..ea88e304c --- /dev/null +++ b/parser/testdata/01754_cluster_all_replicas_shard_num/query.sql @@ -0,0 +1,16 @@ +-- Tags: replica, shard + +SELECT _shard_num FROM cluster('test_shard_localhost', system.one); +SELECT _shard_num FROM cluster('test_shard_localhost'); +SELECT _shard_num FROM clusterAllReplicas('test_shard_localhost', system.one); +SELECT _shard_num FROM clusterAllReplicas('test_shard_localhost'); + +SELECT _shard_num FROM cluster('test_cluster_two_shards', system.one) ORDER BY _shard_num; +SELECT _shard_num FROM cluster('test_cluster_two_shards') ORDER BY _shard_num; +SELECT _shard_num FROM clusterAllReplicas('test_cluster_two_shards', system.one) ORDER BY _shard_num; +SELECT _shard_num FROM clusterAllReplicas('test_cluster_two_shards') ORDER BY _shard_num; + +SELECT _shard_num FROM cluster('test_cluster_one_shard_two_replicas', system.one) ORDER BY _shard_num; +SELECT _shard_num FROM cluster('test_cluster_one_shard_two_replicas') ORDER BY _shard_num; +SELECT _shard_num FROM clusterAllReplicas('test_cluster_one_shard_two_replicas', system.one) ORDER BY _shard_num; +SELECT _shard_num FROM clusterAllReplicas('test_cluster_one_shard_two_replicas') ORDER BY _shard_num; diff --git a/parser/testdata/01754_direct_dictionary_complex_key/ast.json b/parser/testdata/01754_direct_dictionary_complex_key/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01754_direct_dictionary_complex_key/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01754_direct_dictionary_complex_key/metadata.json b/parser/testdata/01754_direct_dictionary_complex_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01754_direct_dictionary_complex_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01754_direct_dictionary_complex_key/query.sql b/parser/testdata/01754_direct_dictionary_complex_key/query.sql new file mode 100644 index 000000000..73536d8e9 --- /dev/null +++ b/parser/testdata/01754_direct_dictionary_complex_key/query.sql @@ -0,0 +1,97 @@ +-- Tags: no-parallel + +DROP DATABASE IF EXISTS 01754_dictionary_db; +CREATE DATABASE 01754_dictionary_db; + +CREATE TABLE 01754_dictionary_db.complex_key_simple_attributes_source_table +( + id UInt64, + id_key String, + value_first String, + value_second String +) +ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO 01754_dictionary_db.complex_key_simple_attributes_source_table VALUES(0, 'id_key_0', 'value_0', 'value_second_0'); +INSERT INTO 01754_dictionary_db.complex_key_simple_attributes_source_table VALUES(1, 'id_key_1', 'value_1', 'value_second_1'); +INSERT INTO 01754_dictionary_db.complex_key_simple_attributes_source_table VALUES(2, 'id_key_2', 'value_2', 'value_second_2'); + +CREATE DICTIONARY 01754_dictionary_db.direct_dictionary_complex_key_simple_attributes +( + id UInt64, + id_key String DEFAULT 'test_default_id_key', + value_first String DEFAULT 'value_first_default', + value_second String DEFAULT 'value_second_default' +) +PRIMARY KEY id, id_key +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'complex_key_simple_attributes_source_table')) +LAYOUT(COMPLEX_KEY_DIRECT()); + +SELECT 'Dictionary direct_dictionary_complex_key_simple_attributes'; +SELECT 'dictGet existing value'; +SELECT dictGet('01754_dictionary_db.direct_dictionary_complex_key_simple_attributes', 'value_first', (number, concat('id_key_', toString(number)))) as value_first, + dictGet('01754_dictionary_db.direct_dictionary_complex_key_simple_attributes', 'value_second', (number, concat('id_key_', toString(number)))) as value_second FROM system.numbers LIMIT 3; +SELECT 'dictGet with non existing value'; +SELECT dictGet('01754_dictionary_db.direct_dictionary_complex_key_simple_attributes', 'value_first', (number, concat('id_key_', toString(number)))) as value_first, + dictGet('01754_dictionary_db.direct_dictionary_complex_key_simple_attributes', 'value_second', (number, concat('id_key_', toString(number)))) as value_second FROM system.numbers LIMIT 4; +SELECT 'dictGetOrDefault existing value'; +SELECT dictGetOrDefault('01754_dictionary_db.direct_dictionary_complex_key_simple_attributes', 'value_first', (number, concat('id_key_', toString(number))), toString('default')) as value_first, + dictGetOrDefault('01754_dictionary_db.direct_dictionary_complex_key_simple_attributes', 'value_second', (number, concat('id_key_', toString(number))), toString('default')) as value_second FROM system.numbers LIMIT 3; +SELECT 'dictGetOrDefault non existing value'; +SELECT dictGetOrDefault('01754_dictionary_db.direct_dictionary_complex_key_simple_attributes', 'value_first', (number, concat('id_key_', toString(number))), toString('default')) as value_first, + dictGetOrDefault('01754_dictionary_db.direct_dictionary_complex_key_simple_attributes', 'value_second', (number, concat('id_key_', toString(number))), toString('default')) as value_second FROM system.numbers LIMIT 4; +SELECT 'dictHas'; +SELECT dictHas('01754_dictionary_db.direct_dictionary_complex_key_simple_attributes', (number, concat('id_key_', toString(number)))) FROM system.numbers LIMIT 4; +SELECT 'select all values as input stream'; +SELECT * FROM 01754_dictionary_db.direct_dictionary_complex_key_simple_attributes ORDER BY ALL; + +DROP DICTIONARY 01754_dictionary_db.direct_dictionary_complex_key_simple_attributes; +DROP TABLE 01754_dictionary_db.complex_key_simple_attributes_source_table; + +CREATE TABLE 01754_dictionary_db.complex_key_complex_attributes_source_table +( + id UInt64, + id_key String, + value_first String, + value_second Nullable(String) +) +ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO 01754_dictionary_db.complex_key_complex_attributes_source_table VALUES(0, 'id_key_0', 'value_0', 'value_second_0'); +INSERT INTO 01754_dictionary_db.complex_key_complex_attributes_source_table VALUES(1, 'id_key_1', 'value_1', NULL); +INSERT INTO 01754_dictionary_db.complex_key_complex_attributes_source_table VALUES(2, 'id_key_2', 'value_2', 'value_second_2'); + +CREATE DICTIONARY 01754_dictionary_db.direct_dictionary_complex_key_complex_attributes +( + id UInt64, + id_key String, + + value_first String DEFAULT 'value_first_default', + value_second Nullable(String) DEFAULT 'value_second_default' +) +PRIMARY KEY id, id_key +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'complex_key_complex_attributes_source_table')) +LAYOUT(COMPLEX_KEY_DIRECT()); + +SELECT 'Dictionary direct_dictionary_complex_key_complex_attributes'; +SELECT 'dictGet existing value'; +SELECT dictGet('01754_dictionary_db.direct_dictionary_complex_key_complex_attributes', 'value_first', (number, concat('id_key_', toString(number)))) as value_first, + dictGet('01754_dictionary_db.direct_dictionary_complex_key_complex_attributes', 'value_second', (number, concat('id_key_', toString(number)))) as value_second FROM system.numbers LIMIT 3; +SELECT 'dictGet with non existing value'; +SELECT dictGet('01754_dictionary_db.direct_dictionary_complex_key_complex_attributes', 'value_first', (number, concat('id_key_', toString(number)))) as value_first, + dictGet('01754_dictionary_db.direct_dictionary_complex_key_complex_attributes', 'value_second', (number, concat('id_key_', toString(number)))) as value_second FROM system.numbers LIMIT 4; +SELECT 'dictGetOrDefault existing value'; +SELECT dictGetOrDefault('01754_dictionary_db.direct_dictionary_complex_key_complex_attributes', 'value_first', (number, concat('id_key_', toString(number))), toString('default')) as value_first, + dictGetOrDefault('01754_dictionary_db.direct_dictionary_complex_key_complex_attributes', 'value_second', (number, concat('id_key_', toString(number))), toString('default')) as value_second FROM system.numbers LIMIT 3; +SELECT 'dictGetOrDefault non existing value'; +SELECT dictGetOrDefault('01754_dictionary_db.direct_dictionary_complex_key_complex_attributes', 'value_first', (number, concat('id_key_', toString(number))), toString('default')) as value_first, + dictGetOrDefault('01754_dictionary_db.direct_dictionary_complex_key_complex_attributes', 'value_second', (number, concat('id_key_', toString(number))), toString('default')) as value_second FROM system.numbers LIMIT 4; +SELECT 'dictHas'; +SELECT dictHas('01754_dictionary_db.direct_dictionary_complex_key_complex_attributes', (number, concat('id_key_', toString(number)))) FROM system.numbers LIMIT 4; +SELECT 'select all values as input stream'; +SELECT * FROM 01754_dictionary_db.direct_dictionary_complex_key_complex_attributes ORDER BY ALL; + +DROP DICTIONARY 01754_dictionary_db.direct_dictionary_complex_key_complex_attributes; +DROP TABLE 01754_dictionary_db.complex_key_complex_attributes_source_table; + +DROP DATABASE 01754_dictionary_db; diff --git a/parser/testdata/01755_shard_pruning_with_literal/ast.json b/parser/testdata/01755_shard_pruning_with_literal/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01755_shard_pruning_with_literal/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01755_shard_pruning_with_literal/metadata.json b/parser/testdata/01755_shard_pruning_with_literal/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01755_shard_pruning_with_literal/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01755_shard_pruning_with_literal/query.sql b/parser/testdata/01755_shard_pruning_with_literal/query.sql new file mode 100644 index 000000000..163366561 --- /dev/null +++ b/parser/testdata/01755_shard_pruning_with_literal/query.sql @@ -0,0 +1,16 @@ +-- Tags: shard + +set optimize_skip_unused_shards=1; + +drop table if exists data_01755; +drop table if exists dist_01755; + +create table data_01755 (i Int) Engine=Memory; +create table dist_01755 as data_01755 Engine=Distributed(test_cluster_two_shards, currentDatabase(), data_01755, i); + +insert into data_01755 values (1); + +select * from dist_01755 where 1 settings enable_early_constant_folding = 0; + +drop table if exists data_01755; +drop table if exists dist_01755; diff --git a/parser/testdata/01756_optimize_skip_unused_shards_rewrite_in/ast.json b/parser/testdata/01756_optimize_skip_unused_shards_rewrite_in/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01756_optimize_skip_unused_shards_rewrite_in/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01756_optimize_skip_unused_shards_rewrite_in/metadata.json b/parser/testdata/01756_optimize_skip_unused_shards_rewrite_in/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01756_optimize_skip_unused_shards_rewrite_in/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01756_optimize_skip_unused_shards_rewrite_in/query.sql b/parser/testdata/01756_optimize_skip_unused_shards_rewrite_in/query.sql new file mode 100644 index 000000000..5475bec42 --- /dev/null +++ b/parser/testdata/01756_optimize_skip_unused_shards_rewrite_in/query.sql @@ -0,0 +1,172 @@ +-- Tags: shard + +-- NOTE: this test cannot use 'current_database = currentDatabase()', +-- because it does not propagated via remote queries, +-- hence it uses 'with (select currentDatabase()) as X' +-- (with subquery to expand it on the initiator). + +drop table if exists dist_01756; +drop table if exists dist_01756_str; +drop table if exists dist_01756_column; +drop table if exists data_01756_str; +drop table if exists data_01756_signed; + +-- separate log entry for localhost queries +set prefer_localhost_replica=0; +set force_optimize_skip_unused_shards=2; +set optimize_skip_unused_shards=1; +set optimize_skip_unused_shards_rewrite_in=0; +set log_queries=1; + +-- { echoOn } + +-- SELECT +-- intHash64(0) % 2, +-- intHash64(2) % 2 +-- ┌─modulo(intHash64(0), 2)─┬─modulo(intHash64(2), 2)─┐ +-- │ 0 │ 1 │ +-- └─────────────────────────┴─────────────────────────┘ +create table dist_01756 as system.one engine=Distributed(test_cluster_two_shards, system, one, intHash64(dummy)); + +-- +-- w/o optimize_skip_unused_shards_rewrite_in=1 +-- +select '(0, 2)'; +with (select currentDatabase()) as id_no select *, ignore(id_no) from dist_01756 where dummy in (0, 2); +system flush logs query_log; +select splitByString('IN', query)[-1] from system.query_log where + event_date >= yesterday() and + event_time > now() - interval 1 hour and + not is_initial_query and + query not like '%system%query_log%' and + query like concat('%', currentDatabase(), '%AS%id_no%') and + type = 'QueryFinish' +order by query; + +-- +-- w/ optimize_skip_unused_shards_rewrite_in=1 +-- + +set optimize_skip_unused_shards_rewrite_in=1; + +-- detailed coverage for realistic examples +select 'optimize_skip_unused_shards_rewrite_in(0, 2)'; +with (select currentDatabase()) as id_02 select *, ignore(id_02) from dist_01756 where dummy in (0, 2); +system flush logs query_log; +select splitByString('IN', query)[-1] from system.query_log where + event_date >= yesterday() and + event_time > now() - interval 1 hour and + not is_initial_query and + query not like '%system%query_log%' and + query like concat('%', currentDatabase(), '%AS%id_02%') and + type = 'QueryFinish' +order by query; + +select 'optimize_skip_unused_shards_rewrite_in(2,)'; +with (select currentDatabase()) as id_2 select *, ignore(id_2) from dist_01756 where dummy in (2); +system flush logs query_log; +select splitByString('IN', query)[-1] from system.query_log where + event_date >= yesterday() and + event_time > now() - interval 1 hour and + not is_initial_query and + query not like '%system%query_log%' and + query like concat('%', currentDatabase(), '%AS%id_2%') and + type = 'QueryFinish' +order by query; + +select 'optimize_skip_unused_shards_rewrite_in(0,)'; +with (select currentDatabase()) as id_00 select *, ignore(id_00) from dist_01756 where dummy in (0); +system flush logs query_log; +select splitByString('IN', query)[-1] from system.query_log where + event_date >= yesterday() and + event_time > now() - interval 1 hour and + not is_initial_query and + query not like '%system%query_log%' and + query like concat('%', currentDatabase(), '%AS%id_00%') and + type = 'QueryFinish' +order by query; + +-- signed column +select 'signed column'; +create table data_01756_signed (key Int) engine=Null; +with (select currentDatabase()) as key_signed select *, ignore(key_signed) from cluster(test_cluster_two_shards, currentDatabase(), data_01756_signed, key) where key in (-1, -2); +system flush logs query_log; +select splitByString('IN', query)[-1] from system.query_log where + event_date >= yesterday() and + event_time > now() - interval 1 hour and + not is_initial_query and + query not like '%system%query_log%' and + query like concat('%', currentDatabase(), '%AS%key_signed%') and + type = 'QueryFinish' +order by query; + +-- not tuple +select * from dist_01756 where dummy in (0); +select * from dist_01756 where dummy in ('0'); + + +-- +-- errors +-- +select 'errors'; + +-- optimize_skip_unused_shards does not support non-constants +select * from dist_01756 where dummy in (select * from system.one); -- { serverError UNABLE_TO_SKIP_UNUSED_SHARDS } +-- this is a constant for analyzer +select * from dist_01756 where dummy in (toUInt8(0)) settings enable_analyzer=0; -- { serverError UNABLE_TO_SKIP_UNUSED_SHARDS } +-- NOT IN does not supported +select * from dist_01756 where dummy not in (0, 2); -- { serverError UNABLE_TO_SKIP_UNUSED_SHARDS } + +-- +-- others +-- +select 'others'; + +select * from dist_01756 where dummy not in (2, 3) and dummy in (0, 2); +select * from dist_01756 where dummy in tuple(0, 2); +select * from dist_01756 where dummy in tuple(0); +select * from dist_01756 where dummy in tuple(2); +-- Identifier is NULL +select (2 IN (2,)), * from dist_01756 where dummy in (0, 2) format Null; +-- Literal is NULL +select (dummy IN (toUInt8(2),)), * from dist_01756 where dummy in (0, 2) format Null; + +-- different type +select 'different types -- prohibited'; +create table data_01756_str (key String) engine=Memory(); +insert into data_01756_str values (0)(1); +-- SELECT +-- cityHash64(0) % 2, +-- cityHash64(2) % 2 +-- +-- ┌─modulo(cityHash64(0), 2)─┬─modulo(cityHash64(2), 2)─┐ +-- │ 0 │ 1 │ +-- └──────────────────────────┴──────────────────────────┘ +create table dist_01756_str as data_01756_str engine=Distributed(test_cluster_two_shards, currentDatabase(), data_01756_str, cityHash64(key)); +select * from dist_01756_str where key in ('0', '2'); +select * from dist_01756_str where key in (0, 2); +-- analyzer does support this +select * from dist_01756_str where key in ('0', Null) settings enable_analyzer=0; -- { serverError UNABLE_TO_SKIP_UNUSED_SHARDS } +-- select * from dist_01756_str where key in (0, 2); -- { serverError TYPE_MISMATCH } +-- select * from dist_01756_str where key in (0, Null); -- { serverError TYPE_MISMATCH } + +-- different type #2 +select 'different types -- conversion'; +create table dist_01756_column as system.one engine=Distributed(test_cluster_two_shards, system, one, dummy); +select * from dist_01756_column where dummy in (0, '255'); +select * from dist_01756_column where dummy in (0, '255foo'); -- { serverError TYPE_MISMATCH } +-- intHash64 does not accept string, but implicit conversion should be done +select * from dist_01756 where dummy in ('0', '2'); + +-- optimize_skip_unused_shards_limit +select 'optimize_skip_unused_shards_limit'; +select * from dist_01756 where dummy in (0, 2) settings optimize_skip_unused_shards_limit=1; -- { serverError UNABLE_TO_SKIP_UNUSED_SHARDS } +select * from dist_01756 where dummy in (0, 2) settings optimize_skip_unused_shards_limit=1, force_optimize_skip_unused_shards=0; + +-- { echoOff } + +drop table dist_01756; +drop table dist_01756_str; +drop table dist_01756_column; +drop table data_01756_str; +drop table data_01756_signed; diff --git a/parser/testdata/01757_optimize_skip_unused_shards_limit/ast.json b/parser/testdata/01757_optimize_skip_unused_shards_limit/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01757_optimize_skip_unused_shards_limit/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01757_optimize_skip_unused_shards_limit/metadata.json b/parser/testdata/01757_optimize_skip_unused_shards_limit/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01757_optimize_skip_unused_shards_limit/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01757_optimize_skip_unused_shards_limit/query.sql b/parser/testdata/01757_optimize_skip_unused_shards_limit/query.sql new file mode 100644 index 000000000..6fcf98d47 --- /dev/null +++ b/parser/testdata/01757_optimize_skip_unused_shards_limit/query.sql @@ -0,0 +1,39 @@ +-- Tags: shard + +drop table if exists dist_01757; +create table dist_01757 as system.one engine=Distributed(test_cluster_two_shards, system, one, dummy); + +set optimize_skip_unused_shards=1; +set force_optimize_skip_unused_shards=2; + +-- in +select * from dist_01757 where dummy in (0,) format Null; +select * from dist_01757 where dummy in (0, 1) format Null settings optimize_skip_unused_shards_limit=2; + +-- in negative +select * from dist_01757 where dummy in (0, 1) settings optimize_skip_unused_shards_limit=1; -- { serverError UNABLE_TO_SKIP_UNUSED_SHARDS } + +-- or negative +select * from dist_01757 where dummy = 0 or dummy = 1 settings optimize_skip_unused_shards_limit=1; -- { serverError UNABLE_TO_SKIP_UNUSED_SHARDS } + +-- or +select * from dist_01757 where dummy = 0 or dummy = 1 format Null settings optimize_skip_unused_shards_limit=2; + +-- and negative +-- disabled for analyzer cause new implementation consider `dummy = 0 and dummy = 1` as constant False. +select * from dist_01757 where dummy = 0 and dummy = 1 settings optimize_skip_unused_shards_limit=1, enable_analyzer=0; -- { serverError UNABLE_TO_SKIP_UNUSED_SHARDS } +select * from dist_01757 where dummy = 0 and dummy = 2 and dummy = 3 settings optimize_skip_unused_shards_limit=1, enable_analyzer=0; -- { serverError UNABLE_TO_SKIP_UNUSED_SHARDS } +select * from dist_01757 where dummy = 0 and dummy = 2 and dummy = 3 settings optimize_skip_unused_shards_limit=2, enable_analyzer=0; -- { serverError UNABLE_TO_SKIP_UNUSED_SHARDS } + +-- and +select * from dist_01757 where dummy = 0 and dummy = 1 settings optimize_skip_unused_shards_limit=2; +select * from dist_01757 where dummy = 0 and dummy = 1 and dummy = 3 settings optimize_skip_unused_shards_limit=3; + +-- ARGUMENT_OUT_OF_BOUND error +select * from dist_01757 where dummy in (0, 1) settings optimize_skip_unused_shards_limit=0; -- { serverError ARGUMENT_OUT_OF_BOUND } +select * from dist_01757 where dummy in (0, 1) settings optimize_skip_unused_shards_limit=9223372036854775808; -- { serverError ARGUMENT_OUT_OF_BOUND } + +drop table dist_01757; + +-- fuzzed +SELECT * FROM remote('127.0.0.{1,2}', numbers(40), number) ORDER BY 'a' LIMIT 1 BY number SETTINGS optimize_skip_unused_shards = 1, force_optimize_skip_unused_shards=0 format Null diff --git a/parser/testdata/01759_dictionary_unique_attribute_names/ast.json b/parser/testdata/01759_dictionary_unique_attribute_names/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01759_dictionary_unique_attribute_names/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01759_dictionary_unique_attribute_names/metadata.json b/parser/testdata/01759_dictionary_unique_attribute_names/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01759_dictionary_unique_attribute_names/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01759_dictionary_unique_attribute_names/query.sql b/parser/testdata/01759_dictionary_unique_attribute_names/query.sql new file mode 100644 index 000000000..b85090c51 --- /dev/null +++ b/parser/testdata/01759_dictionary_unique_attribute_names/query.sql @@ -0,0 +1,29 @@ +-- Tags: no-parallel + +DROP DATABASE IF EXISTS 01759_db; +CREATE DATABASE 01759_db; + +CREATE TABLE 01759_db.dictionary_source_table +( + key UInt64, + value1 UInt64, + value2 UInt64 +) +ENGINE = TinyLog; + +INSERT INTO 01759_db.dictionary_source_table VALUES (0, 2, 3), (1, 5, 6), (2, 8, 9); + +CREATE DICTIONARY 01759_db.test_dictionary(key UInt64, value1 UInt64, value1 UInt64) +PRIMARY KEY key +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'dictionary_source_table' DB '01759_db')) +LAYOUT(COMPLEX_KEY_DIRECT()); -- {serverError BAD_ARGUMENTS} + +CREATE DICTIONARY 01759_db.test_dictionary(key UInt64, value1 UInt64, value2 UInt64) +PRIMARY KEY key +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'dictionary_source_table' DB '01759_db')) +LAYOUT(COMPLEX_KEY_DIRECT()); + +SELECT number, dictGet('01759_db.test_dictionary', 'value1', tuple(number)) as value1, + dictGet('01759_db.test_dictionary', 'value2', tuple(number)) as value2 FROM system.numbers LIMIT 3; + +DROP DATABASE 01759_db; diff --git a/parser/testdata/01759_optimize_skip_unused_shards_zero_shards/ast.json b/parser/testdata/01759_optimize_skip_unused_shards_zero_shards/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01759_optimize_skip_unused_shards_zero_shards/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01759_optimize_skip_unused_shards_zero_shards/metadata.json b/parser/testdata/01759_optimize_skip_unused_shards_zero_shards/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01759_optimize_skip_unused_shards_zero_shards/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01759_optimize_skip_unused_shards_zero_shards/query.sql b/parser/testdata/01759_optimize_skip_unused_shards_zero_shards/query.sql new file mode 100644 index 000000000..937bd1c0c --- /dev/null +++ b/parser/testdata/01759_optimize_skip_unused_shards_zero_shards/query.sql @@ -0,0 +1,5 @@ +-- Tags: shard + +-- { echo } +select * from remote('127.{1,2}', system, one, dummy) where 0 settings optimize_skip_unused_shards=1, force_optimize_skip_unused_shards=1; +select count() from remote('127.{1,2}', system, one, dummy) where 0 settings optimize_skip_unused_shards=1, force_optimize_skip_unused_shards=1; diff --git a/parser/testdata/01760_ddl_dictionary_use_current_database_name/ast.json b/parser/testdata/01760_ddl_dictionary_use_current_database_name/ast.json new file mode 100644 index 000000000..197e44bcc --- /dev/null +++ b/parser/testdata/01760_ddl_dictionary_use_current_database_name/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery ddl_dictonary_test_source (children 1)" + }, + { + "explain": " Identifier ddl_dictonary_test_source" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001029053, + "rows_read": 2, + "bytes_read": 102 + } +} diff --git a/parser/testdata/01760_ddl_dictionary_use_current_database_name/metadata.json b/parser/testdata/01760_ddl_dictionary_use_current_database_name/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01760_ddl_dictionary_use_current_database_name/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01760_ddl_dictionary_use_current_database_name/query.sql b/parser/testdata/01760_ddl_dictionary_use_current_database_name/query.sql new file mode 100644 index 000000000..c6bccde85 --- /dev/null +++ b/parser/testdata/01760_ddl_dictionary_use_current_database_name/query.sql @@ -0,0 +1,29 @@ +DROP TABLE IF EXISTS ddl_dictonary_test_source; +CREATE TABLE ddl_dictonary_test_source +( + id UInt64, + value UInt64 +) +ENGINE = TinyLog; + +INSERT INTO ddl_dictonary_test_source VALUES (0, 0); +INSERT INTO ddl_dictonary_test_source VALUES (1, 1); + +DROP DICTIONARY IF EXISTS ddl_dictionary_test; +CREATE DICTIONARY ddl_dictionary_test +( + id UInt64, + value UInt64 DEFAULT 0 +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'ddl_dictonary_test_source')) +LAYOUT(DIRECT()); + +SELECT 'dictGet'; +SELECT dictGet('ddl_dictionary_test', 'value', number) FROM system.numbers LIMIT 3; + +SELECT 'dictHas'; +SELECT dictHas('ddl_dictionary_test', number) FROM system.numbers LIMIT 3; + +DROP DICTIONARY ddl_dictionary_test; +DROP TABLE ddl_dictonary_test_source; diff --git a/parser/testdata/01760_modulo_negative/ast.json b/parser/testdata/01760_modulo_negative/ast.json new file mode 100644 index 000000000..61d020ba4 --- /dev/null +++ b/parser/testdata/01760_modulo_negative/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function negate (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal Int64_-9223372036854775808" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.000935593, + "rows_read": 14, + "bytes_read": 573 + } +} diff --git a/parser/testdata/01760_modulo_negative/metadata.json b/parser/testdata/01760_modulo_negative/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01760_modulo_negative/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01760_modulo_negative/query.sql b/parser/testdata/01760_modulo_negative/query.sql new file mode 100644 index 000000000..3e5f96267 --- /dev/null +++ b/parser/testdata/01760_modulo_negative/query.sql @@ -0,0 +1 @@ +SELECT -number % -9223372036854775808 FROM system.numbers; -- { serverError ILLEGAL_DIVISION } diff --git a/parser/testdata/01760_polygon_dictionaries/ast.json b/parser/testdata/01760_polygon_dictionaries/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01760_polygon_dictionaries/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01760_polygon_dictionaries/metadata.json b/parser/testdata/01760_polygon_dictionaries/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01760_polygon_dictionaries/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01760_polygon_dictionaries/query.sql b/parser/testdata/01760_polygon_dictionaries/query.sql new file mode 100644 index 000000000..f3be66eb8 --- /dev/null +++ b/parser/testdata/01760_polygon_dictionaries/query.sql @@ -0,0 +1,72 @@ +-- Tags: no-parallel + +DROP DATABASE IF EXISTS 01760_db; +CREATE DATABASE 01760_db; + +DROP TABLE IF EXISTS 01760_db.polygons; +CREATE TABLE 01760_db.polygons (key Array(Array(Array(Tuple(Float64, Float64)))), name String, value UInt64, value_nullable Nullable(UInt64)) ENGINE = Memory; +INSERT INTO 01760_db.polygons VALUES ([[[(3, 1), (0, 1), (0, -1), (3, -1)]]], 'Click East', 421, 421); +INSERT INTO 01760_db.polygons VALUES ([[[(-1, 1), (1, 1), (1, 3), (-1, 3)]]], 'Click North', 422, NULL); +INSERT INTO 01760_db.polygons VALUES ([[[(-3, 1), (-3, -1), (0, -1), (0, 1)]]], 'Click South', 423, 423); +INSERT INTO 01760_db.polygons VALUES ([[[(-1, -1), (1, -1), (1, -3), (-1, -3)]]], 'Click West', 424, NULL); + +DROP TABLE IF EXISTS 01760_db.points; +CREATE TABLE 01760_db.points (x Float64, y Float64, def_i UInt64, def_s String) ENGINE = Memory; +INSERT INTO 01760_db.points VALUES (0.1, 0.0, 112, 'aax'); +INSERT INTO 01760_db.points VALUES (-0.1, 0.0, 113, 'aay'); +INSERT INTO 01760_db.points VALUES (0.0, 1.1, 114, 'aaz'); +INSERT INTO 01760_db.points VALUES (0.0, -1.1, 115, 'aat'); +INSERT INTO 01760_db.points VALUES (3.0, 3.0, 22, 'bb'); + +DROP DICTIONARY IF EXISTS 01760_db.dict_array; +CREATE DICTIONARY 01760_db.dict_array +( + key Array(Array(Array(Tuple(Float64, Float64)))), + name String DEFAULT 'qqq', + value UInt64 DEFAULT 10, + value_nullable Nullable(UInt64) DEFAULT 20 +) +PRIMARY KEY key +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'polygons' DB '01760_db')) +LIFETIME(0) +LAYOUT(POLYGON()) +SETTINGS(dictionary_use_async_executor=1, max_threads=8) +; + +SELECT 'dictGet'; + +SELECT tuple(x, y) as key, + dictGet('01760_db.dict_array', 'name', key), + dictGet('01760_db.dict_array', 'value', key), + dictGet('01760_db.dict_array', 'value_nullable', key) +FROM 01760_db.points +ORDER BY x, y; + +SELECT 'dictGetOrDefault'; + +SELECT tuple(x, y) as key, + dictGetOrDefault('01760_db.dict_array', 'name', key, 'DefaultName'), + dictGetOrDefault('01760_db.dict_array', 'value', key, 30), + dictGetOrDefault('01760_db.dict_array', 'value_nullable', key, 40) +FROM 01760_db.points +ORDER BY x, y; + +SELECT 'dictHas'; + +SELECT tuple(x, y) as key, + dictHas('01760_db.dict_array', key), + dictHas('01760_db.dict_array', key), + dictHas('01760_db.dict_array', key) +FROM 01760_db.points +ORDER BY x, y; + +SELECT 'check NaN or infinite point input'; +SELECT tuple(nan, inf) as key, dictGet('01760_db.dict_array', 'name', key); --{serverError BAD_ARGUMENTS} +SELECT tuple(nan, nan) as key, dictGet('01760_db.dict_array', 'name', key); --{serverError BAD_ARGUMENTS} +SELECT tuple(inf, nan) as key, dictGet('01760_db.dict_array', 'name', key); --{serverError BAD_ARGUMENTS} +SELECT tuple(inf, inf) as key, dictGet('01760_db.dict_array', 'name', key); --{serverError BAD_ARGUMENTS} + +DROP DICTIONARY 01760_db.dict_array; +DROP TABLE 01760_db.points; +DROP TABLE 01760_db.polygons; +DROP DATABASE 01760_db; diff --git a/parser/testdata/01760_system_dictionaries/ast.json b/parser/testdata/01760_system_dictionaries/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01760_system_dictionaries/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01760_system_dictionaries/metadata.json b/parser/testdata/01760_system_dictionaries/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01760_system_dictionaries/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01760_system_dictionaries/query.sql b/parser/testdata/01760_system_dictionaries/query.sql new file mode 100644 index 000000000..2e7d41848 --- /dev/null +++ b/parser/testdata/01760_system_dictionaries/query.sql @@ -0,0 +1,59 @@ +-- Tags: no-parallel + +DROP DATABASE IF EXISTS 01760_db; +CREATE DATABASE 01760_db; + +DROP TABLE IF EXISTS 01760_db.example_simple_key_source; +CREATE TABLE 01760_db.example_simple_key_source (id UInt64, value UInt64) ENGINE=TinyLog; +INSERT INTO 01760_db.example_simple_key_source VALUES (0, 0), (1, 1), (2, 2); + +DROP DICTIONARY IF EXISTS 01760_db.example_simple_key_dictionary; +CREATE DICTIONARY 01760_db.example_simple_key_dictionary ( + id UInt64, + value UInt64 +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'example_simple_key_source' DATABASE '01760_db')) +LAYOUT(DIRECT()); + +SELECT 'simple key'; + +SELECT name, database, key.names, key.types, attribute.names, attribute.types, status FROM system.dictionaries WHERE database='01760_db'; +SELECT name, database, key.names, key.types, attribute.names, attribute.types, status FROM system.dictionaries WHERE database='01760_db'; + +SELECT * FROM 01760_db.example_simple_key_dictionary; + +SELECT name, database, key.names, key.types, attribute.names, attribute.types, status FROM system.dictionaries WHERE database='01760_db'; + +DROP DICTIONARY 01760_db.example_simple_key_dictionary; +DROP TABLE 01760_db.example_simple_key_source; + +SELECT name, database, key.names, key.types, attribute.names, attribute.types, status FROM system.dictionaries WHERE database='01760_db'; + +DROP TABLE IF EXISTS 01760_db.example_complex_key_source; +CREATE TABLE 01760_db.example_complex_key_source (id UInt64, id_key String, value UInt64) ENGINE=TinyLog; +INSERT INTO 01760_db.example_complex_key_source VALUES (0, '0_key', 0), (1, '1_key', 1), (2, '2_key', 2); + +DROP DICTIONARY IF EXISTS 01760_db.example_complex_key_dictionary; +CREATE DICTIONARY 01760_db.example_complex_key_dictionary ( + id UInt64, + id_key String, + value UInt64 +) +PRIMARY KEY id, id_key +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'example_complex_key_source' DATABASE '01760_db')) +LAYOUT(COMPLEX_KEY_DIRECT()); + +SELECT 'complex key'; + +SELECT name, database, key.names, key.types, attribute.names, attribute.types, status FROM system.dictionaries WHERE database='01760_db'; +SELECT name, database, key.names, key.types, attribute.names, attribute.types, status FROM system.dictionaries WHERE database='01760_db'; + +SELECT * FROM 01760_db.example_complex_key_dictionary; + +SELECT name, database, key.names, key.types, attribute.names, attribute.types, status FROM system.dictionaries WHERE database='01760_db'; + +DROP DICTIONARY 01760_db.example_complex_key_dictionary; +DROP TABLE 01760_db.example_complex_key_source; + +DROP DATABASE 01760_db; diff --git a/parser/testdata/01761_alter_decimal_zookeeper_long/ast.json b/parser/testdata/01761_alter_decimal_zookeeper_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01761_alter_decimal_zookeeper_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01761_alter_decimal_zookeeper_long/metadata.json b/parser/testdata/01761_alter_decimal_zookeeper_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01761_alter_decimal_zookeeper_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01761_alter_decimal_zookeeper_long/query.sql b/parser/testdata/01761_alter_decimal_zookeeper_long/query.sql new file mode 100644 index 000000000..5d28e8ca6 --- /dev/null +++ b/parser/testdata/01761_alter_decimal_zookeeper_long/query.sql @@ -0,0 +1,33 @@ +-- Tags: long, zookeeper + +DROP TABLE IF EXISTS test_alter_decimal; + +CREATE TABLE test_alter_decimal +(n UInt64, d Decimal(15, 8)) +ENGINE = ReplicatedMergeTree('/clickhouse/{database}/01761_alter_decimal_zookeeper', 'r1') +ORDER BY tuple(); + +INSERT INTO test_alter_decimal VALUES (1, toDecimal32(5, 5)); + +INSERT INTO test_alter_decimal VALUES (2, toDecimal32(6, 6)); + +SELECT * FROM test_alter_decimal ORDER BY n; + +ALTER TABLE test_alter_decimal MODIFY COLUMN d Decimal(18, 8); + +SHOW CREATE TABLE test_alter_decimal; + +SELECT * FROM test_alter_decimal ORDER BY n; + +DETACH TABLE test_alter_decimal; +ATTACH TABLE test_alter_decimal; + +SHOW CREATE TABLE test_alter_decimal; + +INSERT INTO test_alter_decimal VALUES (3, toDecimal32(7, 7)); + +OPTIMIZE TABLE test_alter_decimal FINAL; + +SELECT * FROM test_alter_decimal ORDER BY n; + +DROP TABLE IF EXISTS test_alter_decimal; diff --git a/parser/testdata/01761_cast_to_enum_nullable/ast.json b/parser/testdata/01761_cast_to_enum_nullable/ast.json new file mode 100644 index 000000000..3b1ba33d0 --- /dev/null +++ b/parser/testdata/01761_cast_to_enum_nullable/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toUInt8 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function assumeNotNull (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Literal 'Nullable(String)'" + }, + { + "explain": " Literal 'Nullable(Enum8(\\'Hello\\' = 1))'" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001020681, + "rows_read": 15, + "bytes_read": 633 + } +} diff --git a/parser/testdata/01761_cast_to_enum_nullable/metadata.json b/parser/testdata/01761_cast_to_enum_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01761_cast_to_enum_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01761_cast_to_enum_nullable/query.sql b/parser/testdata/01761_cast_to_enum_nullable/query.sql new file mode 100644 index 000000000..42a51d2f7 --- /dev/null +++ b/parser/testdata/01761_cast_to_enum_nullable/query.sql @@ -0,0 +1 @@ +SELECT toUInt8(assumeNotNull(cast(cast(NULL, 'Nullable(String)'), 'Nullable(Enum8(\'Hello\' = 1))'))); diff --git a/parser/testdata/01761_round_year_bounds/ast.json b/parser/testdata/01761_round_year_bounds/ast.json new file mode 100644 index 000000000..94cb7b6e5 --- /dev/null +++ b/parser/testdata/01761_round_year_bounds/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toStartOfInterval (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function toDateTime (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Int64_-9223372036854775808" + }, + { + "explain": " Function toIntervalYear (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_100" + }, + { + "explain": " Literal 'Asia\/Istanbul'" + }, + { + "explain": " Identifier Null" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001107315, + "rows_read": 14, + "bytes_read": 565 + } +} diff --git a/parser/testdata/01761_round_year_bounds/metadata.json b/parser/testdata/01761_round_year_bounds/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01761_round_year_bounds/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01761_round_year_bounds/query.sql b/parser/testdata/01761_round_year_bounds/query.sql new file mode 100644 index 000000000..57f421d15 --- /dev/null +++ b/parser/testdata/01761_round_year_bounds/query.sql @@ -0,0 +1 @@ +SELECT toStartOfInterval(toDateTime(-9223372036854775808), toIntervalYear(100), 'Asia/Istanbul') FORMAT Null; diff --git a/parser/testdata/01762_datetime64_extended_parsing/ast.json b/parser/testdata/01762_datetime64_extended_parsing/ast.json new file mode 100644 index 000000000..884d93392 --- /dev/null +++ b/parser/testdata/01762_datetime64_extended_parsing/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDateTime64 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '1925-01-02 03:04:05.678901'" + }, + { + "explain": " Literal UInt64_6" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001210374, + "rows_read": 8, + "bytes_read": 315 + } +} diff --git a/parser/testdata/01762_datetime64_extended_parsing/metadata.json b/parser/testdata/01762_datetime64_extended_parsing/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01762_datetime64_extended_parsing/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01762_datetime64_extended_parsing/query.sql b/parser/testdata/01762_datetime64_extended_parsing/query.sql new file mode 100644 index 000000000..a7ad447b2 --- /dev/null +++ b/parser/testdata/01762_datetime64_extended_parsing/query.sql @@ -0,0 +1 @@ +SELECT toDateTime64('1925-01-02 03:04:05.678901', 6); diff --git a/parser/testdata/01762_deltasumtimestamp/ast.json b/parser/testdata/01762_deltasumtimestamp/ast.json new file mode 100644 index 000000000..68ded2627 --- /dev/null +++ b/parser/testdata/01762_deltasumtimestamp/ast.json @@ -0,0 +1,238 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function deltaSumTimestampMerge (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier state" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function deltaSumTimestampState (alias state) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier value" + }, + { + "explain": " Identifier timestamp" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDate (alias timestamp) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function arrayElement (alias value) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Array_[UInt64_4, UInt64_5, UInt64_5, UInt64_5]" + }, + { + "explain": " Function minus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_4" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_5" + }, + { + "explain": " Literal UInt64_4" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function deltaSumTimestampState (alias state) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier value" + }, + { + "explain": " Identifier timestamp" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDate (alias timestamp) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function arrayElement (alias value) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Array_[UInt64_0, UInt64_4, UInt64_8, UInt64_3]" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_4" + } + ], + + "rows": 72, + + "statistics": + { + "elapsed": 0.001265051, + "rows_read": 72, + "bytes_read": 3693 + } +} diff --git a/parser/testdata/01762_deltasumtimestamp/metadata.json b/parser/testdata/01762_deltasumtimestamp/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01762_deltasumtimestamp/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01762_deltasumtimestamp/query.sql b/parser/testdata/01762_deltasumtimestamp/query.sql new file mode 100644 index 000000000..a0098ccc2 --- /dev/null +++ b/parser/testdata/01762_deltasumtimestamp/query.sql @@ -0,0 +1,5 @@ +select deltaSumTimestampMerge(state) from (select deltaSumTimestampState(value, timestamp) as state from (select toDate(number) as timestamp, [4, 5, 5, 5][number-4] as value from numbers(5, 4)) UNION ALL select deltaSumTimestampState(value, timestamp) as state from (select toDate(number) as timestamp, [0, 4, 8, 3][number] as value from numbers(1, 4))); +select deltaSumTimestampMerge(state) from (select deltaSumTimestampState(value, timestamp) as state from (select number as timestamp, [0, 4, 8, 3][number] as value from numbers(1, 4)) UNION ALL select deltaSumTimestampState(value, timestamp) as state from (select number as timestamp, [4, 5, 5, 5][number-4] as value from numbers(5, 4))); +select deltaSumTimestamp(value, timestamp) from (select toDateTime(number) as timestamp, [0, 4, 8, 3][number] as value from numbers(1, 4)); +select deltaSumTimestamp(value, timestamp) from (select toDateTime(number) as timestamp, [0, 4.5, 8, 3][number] as value from numbers(1, 4)); +select deltaSumTimestamp(value, timestamp) from (select number as timestamp, [0, 4, 8, 3, 0, 0, 0, 1, 3, 5][number] as value from numbers(1, 10)); diff --git a/parser/testdata/01763_filter_push_down_bugs/ast.json b/parser/testdata/01763_filter_push_down_bugs/ast.json new file mode 100644 index 000000000..c5e97c376 --- /dev/null +++ b/parser/testdata/01763_filter_push_down_bugs/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001050157, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01763_filter_push_down_bugs/metadata.json b/parser/testdata/01763_filter_push_down_bugs/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01763_filter_push_down_bugs/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01763_filter_push_down_bugs/query.sql b/parser/testdata/01763_filter_push_down_bugs/query.sql new file mode 100644 index 000000000..ad98e2763 --- /dev/null +++ b/parser/testdata/01763_filter_push_down_bugs/query.sql @@ -0,0 +1,84 @@ +SET allow_statistics_optimize = 0; +SELECT * FROM (SELECT col1, col2 FROM (select '1' as col1, '2' as col2) GROUP by col1, col2) AS expr_qry WHERE col2 != ''; +SELECT * FROM (SELECT materialize('1') AS s1, materialize('2') AS s2 GROUP BY s1, s2) WHERE s2 = '2'; +SELECT * FROM (SELECT materialize([1]) AS s1, materialize('2') AS s2 GROUP BY s1, s2) WHERE s2 = '2'; +SELECT * FROM (SELECT materialize([[1]]) AS s1, materialize('2') AS s2 GROUP BY s1, s2) WHERE s2 = '2'; + +DROP TABLE IF EXISTS Test; + +CREATE TABLE Test +ENGINE = MergeTree() +PRIMARY KEY (String1,String2) +ORDER BY (String1,String2) +SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi' +AS +SELECT + 'String1_' || toString(number) as String1, + 'String2_' || toString(number) as String2, + 'String3_' || toString(number) as String3, + 'String4_' || toString(number%4) as String4 +FROM numbers(1); + +SELECT * +FROM + ( + SELECT String1,String2,String3,String4,COUNT(*) + FROM Test + GROUP by String1,String2,String3,String4 + ) AS expr_qry; + +SELECT * +FROM + ( + SELECT String1,String2,String3,String4,COUNT(*) + FROM Test + GROUP by String1,String2,String3,String4 + ) AS expr_qry +WHERE String4 ='String4_0'; + +DROP TABLE IF EXISTS Test; + +select x, y from (select [0, 1, 2] as y, 1 as a, 2 as b) array join y as x where a = 1 and b = 2 and (x = 1 or x != 1) and x = 1; + +DROP TABLE IF EXISTS t; +create table t(a UInt8) engine=MergeTree order by a SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +insert into t select * from numbers(2); +select a from t t1 join t t2 on t1.a = t2.a where t1.a; +DROP TABLE IF EXISTS t; + +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +CREATE TABLE t1 (id Int64, create_time DateTime) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +CREATE TABLE t2 (delete_time DateTime) ENGINE = MergeTree ORDER BY delete_time SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +insert into t1 values (101, '2023-05-28 00:00:00'), (102, '2023-05-28 00:00:00'); +insert into t2 values ('2023-05-31 00:00:00'); + +EXPLAIN indexes=1 SELECT id, delete_time FROM t1 + CROSS JOIN ( + SELECT delete_time + FROM t2 +) AS d WHERE create_time < delete_time AND id = 101 SETTINGS enable_analyzer=0; + +EXPLAIN indexes=1 SELECT id, delete_time FROM t1 + CROSS JOIN ( + SELECT delete_time + FROM t2 +) AS d WHERE create_time < delete_time AND id = 101 SETTINGS enable_analyzer=1; + +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +-- expected to get row (1, 3, 1, 4) from JOIN and empty result from the query +SELECT * +FROM +( + SELECT * + FROM Values('id UInt64, t UInt64', (1, 3)) +) AS t1 +ASOF INNER JOIN +( + SELECT * + FROM Values('id UInt64, t UInt64', (1, 1), (1, 2), (1, 3), (1, 4), (1, 5)) +) AS t2 ON (t1.id = t2.id) AND (t1.t < t2.t) +WHERE t2.t != 4; diff --git a/parser/testdata/01763_long_ttl_group_by/ast.json b/parser/testdata/01763_long_ttl_group_by/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01763_long_ttl_group_by/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01763_long_ttl_group_by/metadata.json b/parser/testdata/01763_long_ttl_group_by/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01763_long_ttl_group_by/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01763_long_ttl_group_by/query.sql b/parser/testdata/01763_long_ttl_group_by/query.sql new file mode 100644 index 000000000..9e1ad574f --- /dev/null +++ b/parser/testdata/01763_long_ttl_group_by/query.sql @@ -0,0 +1,28 @@ +-- Tags: long + +DROP TABLE IF EXISTS test_ttl_group_by01763; +CREATE TABLE test_ttl_group_by01763 +(key UInt32, ts DateTime, value UInt32, min_value UInt32 default value, max_value UInt32 default value) +ENGINE = MergeTree() PARTITION BY toYYYYMM(ts) +ORDER BY (key, toStartOfInterval(ts, toIntervalMinute(3)), ts) +TTL ts + INTERVAL 5 MINUTE GROUP BY key, toStartOfInterval(ts, toIntervalMinute(3)) +SET value = sum(value), min_value = min(min_value), max_value = max(max_value), ts=min(toStartOfInterval(ts, toIntervalMinute(3))); + +INSERT INTO test_ttl_group_by01763(key, ts, value) SELECT number%5 as key, now() - interval 10 minute + number, 1 FROM numbers(100000); +INSERT INTO test_ttl_group_by01763(key, ts, value) SELECT number%5 as key, now() - interval 10 minute + number, 0 FROM numbers(1000); +INSERT INTO test_ttl_group_by01763(key, ts, value) SELECT number%5 as key, now() - interval 10 minute + number, 3 FROM numbers(1000); +INSERT INTO test_ttl_group_by01763(key, ts, value) SELECT number%5 as key, now() - interval 2 month + number, 1 FROM numbers(100000); +INSERT INTO test_ttl_group_by01763(key, ts, value) SELECT number%5 as key, now() - interval 2 month + number, 0 FROM numbers(1000); +INSERT INTO test_ttl_group_by01763(key, ts, value) SELECT number%5 as key, now() - interval 2 month + number, 3 FROM numbers(1000); + +SELECT sum(value), min(min_value), max(max_value), uniqExact(key) FROM test_ttl_group_by01763; +SELECT sum(value), min(min_value), max(max_value), uniqExact(key) FROM test_ttl_group_by01763 where key = 3 ; +SELECT sum(value), min(min_value), max(max_value), uniqExact(key) FROM test_ttl_group_by01763 where key = 3 and ts <= today() - interval 30 day ; + +OPTIMIZE TABLE test_ttl_group_by01763 FINAL; + +SELECT sum(value), min(min_value), max(max_value), uniqExact(key) FROM test_ttl_group_by01763; +SELECT sum(value), min(min_value), max(max_value), uniqExact(key) FROM test_ttl_group_by01763 where key = 3 ; +SELECT sum(value), min(min_value), max(max_value), uniqExact(key) FROM test_ttl_group_by01763 where key = 3 and ts <= today() - interval 30 day ; + +DROP TABLE test_ttl_group_by01763; diff --git a/parser/testdata/01763_max_distributed_depth/ast.json b/parser/testdata/01763_max_distributed_depth/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01763_max_distributed_depth/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01763_max_distributed_depth/metadata.json b/parser/testdata/01763_max_distributed_depth/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01763_max_distributed_depth/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01763_max_distributed_depth/query.sql b/parser/testdata/01763_max_distributed_depth/query.sql new file mode 100644 index 000000000..f722a8822 --- /dev/null +++ b/parser/testdata/01763_max_distributed_depth/query.sql @@ -0,0 +1,22 @@ +-- Tags: distributed + +SET prefer_localhost_replica = 1; + +DROP TABLE IF EXISTS tt6; + +CREATE TABLE tt6 +( + `id` UInt32, + `first_column` UInt32, + `second_column` UInt32, + `third_column` UInt32, + `status` String + +) +ENGINE = Distributed('test_shard_localhost', '', 'tt7', rand()); + +DROP TABLE IF EXISTS tt7; + +CREATE TABLE tt7 as tt6 ENGINE = Distributed('test_shard_localhost', '', 'tt6', rand()); -- {serverError INFINITE_LOOP} + +DROP TABLE tt6; diff --git a/parser/testdata/01763_support_map_lowcardinality_type/ast.json b/parser/testdata/01763_support_map_lowcardinality_type/ast.json new file mode 100644 index 000000000..6e4a0ad24 --- /dev/null +++ b/parser/testdata/01763_support_map_lowcardinality_type/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery map_lc (children 1)" + }, + { + "explain": " Identifier map_lc" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001114099, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/01763_support_map_lowcardinality_type/metadata.json b/parser/testdata/01763_support_map_lowcardinality_type/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01763_support_map_lowcardinality_type/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01763_support_map_lowcardinality_type/query.sql b/parser/testdata/01763_support_map_lowcardinality_type/query.sql new file mode 100644 index 000000000..28361bbd7 --- /dev/null +++ b/parser/testdata/01763_support_map_lowcardinality_type/query.sql @@ -0,0 +1,11 @@ +DROP TABLE IF EXISTS map_lc; +CREATE TABLE map_lc +( + `kv` Map(LowCardinality(String), LowCardinality(String)) +) +ENGINE = Memory; + +INSERT INTO map_lc select map('a', 'b'); +SELECT kv['a'] FROM map_lc; +DROP TABLE map_lc; +SELECT map(toFixedString('1',1),1) AS m, m[toFixedString('1',1)],m[toFixedString('1',2)]; diff --git a/parser/testdata/01764_collapsing_merge_adaptive_granularity/ast.json b/parser/testdata/01764_collapsing_merge_adaptive_granularity/ast.json new file mode 100644 index 000000000..b33bef136 --- /dev/null +++ b/parser/testdata/01764_collapsing_merge_adaptive_granularity/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery collapsing_table (children 1)" + }, + { + "explain": " Identifier collapsing_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001147582, + "rows_read": 2, + "bytes_read": 84 + } +} diff --git a/parser/testdata/01764_collapsing_merge_adaptive_granularity/metadata.json b/parser/testdata/01764_collapsing_merge_adaptive_granularity/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01764_collapsing_merge_adaptive_granularity/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01764_collapsing_merge_adaptive_granularity/query.sql b/parser/testdata/01764_collapsing_merge_adaptive_granularity/query.sql new file mode 100644 index 000000000..ca6465154 --- /dev/null +++ b/parser/testdata/01764_collapsing_merge_adaptive_granularity/query.sql @@ -0,0 +1,53 @@ +DROP TABLE IF EXISTS collapsing_table; +SET optimize_on_insert = 0; + +CREATE TABLE collapsing_table +( + key UInt64, + value UInt64, + Sign Int8 +) +ENGINE = CollapsingMergeTree(Sign) +ORDER BY key +SETTINGS + vertical_merge_algorithm_min_rows_to_activate=0, + vertical_merge_algorithm_min_columns_to_activate=0, + min_bytes_for_wide_part = 0; + +INSERT INTO collapsing_table SELECT if(number == 8192, 8191, number), 1, if(number == 8192, +1, -1) FROM numbers(8193); + +SELECT sum(Sign), count() from collapsing_table; + +OPTIMIZE TABLE collapsing_table FINAL; + +SELECT sum(Sign), count() from collapsing_table; + +DROP TABLE IF EXISTS collapsing_table; + + +DROP TABLE IF EXISTS collapsing_suspicious_granularity; + +CREATE TABLE collapsing_suspicious_granularity +( + key UInt64, + value UInt64, + Sign Int8 +) +ENGINE = CollapsingMergeTree(Sign) +ORDER BY key +SETTINGS + vertical_merge_algorithm_min_rows_to_activate=0, + vertical_merge_algorithm_min_columns_to_activate=0, + min_bytes_for_wide_part = 0, + index_granularity = 1; + +INSERT INTO collapsing_suspicious_granularity VALUES (1, 1, -1) (1, 1, 1); + +SELECT sum(Sign), count() from collapsing_suspicious_granularity; + +OPTIMIZE TABLE collapsing_suspicious_granularity FINAL; + +SELECT sum(Sign), count() from collapsing_suspicious_granularity; + + +DROP TABLE IF EXISTS collapsing_suspicious_granularity; diff --git a/parser/testdata/01764_prefer_column_name_to_alias/ast.json b/parser/testdata/01764_prefer_column_name_to_alias/ast.json new file mode 100644 index 000000000..43ae1a7fd --- /dev/null +++ b/parser/testdata/01764_prefer_column_name_to_alias/ast.json @@ -0,0 +1,70 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function avg (alias number) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function max (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 16, + + "statistics": + { + "elapsed": 0.001127539, + "rows_read": 16, + "bytes_read": 633 + } +} diff --git a/parser/testdata/01764_prefer_column_name_to_alias/metadata.json b/parser/testdata/01764_prefer_column_name_to_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01764_prefer_column_name_to_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01764_prefer_column_name_to_alias/query.sql b/parser/testdata/01764_prefer_column_name_to_alias/query.sql new file mode 100644 index 000000000..781ba6095 --- /dev/null +++ b/parser/testdata/01764_prefer_column_name_to_alias/query.sql @@ -0,0 +1,18 @@ +SELECT avg(number) AS number, max(number) FROM numbers(10); -- { serverError ILLEGAL_AGGREGATION } +SELECT sum(x) AS x, max(x) FROM (SELECT 1 AS x UNION ALL SELECT 2 AS x) t; -- { serverError ILLEGAL_AGGREGATION } +select sum(C1) as C1, count(C1) as C2 from (select number as C1 from numbers(3)) as ITBL; -- { serverError ILLEGAL_AGGREGATION } + +set prefer_column_name_to_alias = 1; +SELECT avg(number) AS number, max(number) FROM numbers(10); +SELECT sum(x) AS x, max(x) FROM (SELECT 1 AS x UNION ALL SELECT 2 AS x) t settings prefer_column_name_to_alias = 1; +select sum(C1) as C1, count(C1) as C2 from (select number as C1 from numbers(3)) as ITBL settings prefer_column_name_to_alias = 1; + +DROP TABLE IF EXISTS mytable; +CREATE TABLE IF NOT EXISTS mytable (start_ts UInt32, end_ts UInt32, uuid String) ENGINE = MergeTree() ORDER BY start_ts; +INSERT INTO mytable VALUES (1, 2, 3); + +SELECT any(uuid) AS id, max(end_ts) - any(start_ts) AS time_delta, any(start_ts) AS start_ts, max(end_ts) AS end_ts FROM mytable GROUP BY uuid HAVING max(end_ts) < 1620141001 ORDER BY any(start_ts) DESC; + +SELECT any(uuid) AS id, max(end_ts) - any(start_ts) AS time_delta, any(start_ts) AS start_ts, max(end_ts) AS end_ts FROM mytable GROUP BY uuid HAVING max(end_ts) < 1620141001 ORDER BY any(start_ts) DESC SETTINGS prefer_column_name_to_alias=1; + +DROP TABLE mytable; diff --git a/parser/testdata/01764_table_function_dictionary/ast.json b/parser/testdata/01764_table_function_dictionary/ast.json new file mode 100644 index 000000000..f66cdc1df --- /dev/null +++ b/parser/testdata/01764_table_function_dictionary/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery table_function_dictionary_source_table (children 1)" + }, + { + "explain": " Identifier table_function_dictionary_source_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001260212, + "rows_read": 2, + "bytes_read": 128 + } +} diff --git a/parser/testdata/01764_table_function_dictionary/metadata.json b/parser/testdata/01764_table_function_dictionary/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01764_table_function_dictionary/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01764_table_function_dictionary/query.sql b/parser/testdata/01764_table_function_dictionary/query.sql new file mode 100644 index 000000000..eb4e2a8f3 --- /dev/null +++ b/parser/testdata/01764_table_function_dictionary/query.sql @@ -0,0 +1,25 @@ +DROP TABLE IF EXISTS table_function_dictionary_source_table; +CREATE TABLE table_function_dictionary_source_table +( + id UInt64, + value UInt64 +) +ENGINE = TinyLog; + +INSERT INTO table_function_dictionary_source_table VALUES (0, 0); +INSERT INTO table_function_dictionary_source_table VALUES (1, 1); + +DROP DICTIONARY IF EXISTS table_function_dictionary_test_dictionary; +CREATE DICTIONARY table_function_dictionary_test_dictionary +( + id UInt64, + value UInt64 DEFAULT 0 +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' DATABASE currentDatabase() TABLE 'table_function_dictionary_source_table')) +LAYOUT(DIRECT()); + +SELECT * FROM dictionary('table_function_dictionary_test_dictionary') ORDER BY ALL; + +DROP DICTIONARY table_function_dictionary_test_dictionary; +DROP TABLE table_function_dictionary_source_table; diff --git a/parser/testdata/01765_hashed_dictionary_simple_key/ast.json b/parser/testdata/01765_hashed_dictionary_simple_key/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01765_hashed_dictionary_simple_key/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01765_hashed_dictionary_simple_key/metadata.json b/parser/testdata/01765_hashed_dictionary_simple_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01765_hashed_dictionary_simple_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01765_hashed_dictionary_simple_key/query.sql b/parser/testdata/01765_hashed_dictionary_simple_key/query.sql new file mode 100644 index 000000000..0b12b2fc8 --- /dev/null +++ b/parser/testdata/01765_hashed_dictionary_simple_key/query.sql @@ -0,0 +1,210 @@ +-- Tags: no-parallel + +DROP DATABASE IF EXISTS 01765_db; +CREATE DATABASE 01765_db; + +CREATE TABLE 01765_db.simple_key_simple_attributes_source_table +( + id UInt64, + value_first String, + value_second String +) +ENGINE = TinyLog; + +INSERT INTO 01765_db.simple_key_simple_attributes_source_table VALUES(0, 'value_0', 'value_second_0'); +INSERT INTO 01765_db.simple_key_simple_attributes_source_table VALUES(1, 'value_1', 'value_second_1'); +INSERT INTO 01765_db.simple_key_simple_attributes_source_table VALUES(2, 'value_2', 'value_second_2'); + +CREATE DICTIONARY 01765_db.hashed_dictionary_simple_key_simple_attributes +( + id UInt64, + value_first String DEFAULT 'value_first_default', + value_second String DEFAULT 'value_second_default' +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'simple_key_simple_attributes_source_table')) +LIFETIME(MIN 1 MAX 1000) +LAYOUT(HASHED()) +SETTINGS(dictionary_use_async_executor=1, max_threads=8); + +SELECT 'Dictionary hashed_dictionary_simple_key_simple_attributes'; +SELECT 'dictGet existing value'; +SELECT dictGet('01765_db.hashed_dictionary_simple_key_simple_attributes', 'value_first', number) as value_first, + dictGet('01765_db.hashed_dictionary_simple_key_simple_attributes', 'value_second', number) as value_second FROM system.numbers LIMIT 3; +SELECT 'dictGet with non existing value'; +SELECT dictGet('01765_db.hashed_dictionary_simple_key_simple_attributes', 'value_first', number) as value_first, + dictGet('01765_db.hashed_dictionary_simple_key_simple_attributes', 'value_second', number) as value_second FROM system.numbers LIMIT 4; +SELECT 'dictGetOrDefault existing value'; +SELECT dictGetOrDefault('01765_db.hashed_dictionary_simple_key_simple_attributes', 'value_first', number, toString('default')) as value_first, + dictGetOrDefault('01765_db.hashed_dictionary_simple_key_simple_attributes', 'value_second', number, toString('default')) as value_second FROM system.numbers LIMIT 3; +SELECT 'dictGetOrDefault non existing value'; +SELECT dictGetOrDefault('01765_db.hashed_dictionary_simple_key_simple_attributes', 'value_first', number, toString('default')) as value_first, + dictGetOrDefault('01765_db.hashed_dictionary_simple_key_simple_attributes', 'value_second', number, toString('default')) as value_second FROM system.numbers LIMIT 4; +SELECT 'dictHas'; +SELECT dictHas('01765_db.hashed_dictionary_simple_key_simple_attributes', number) FROM system.numbers LIMIT 4; +SELECT 'select all values as input stream'; +SELECT * FROM 01765_db.hashed_dictionary_simple_key_simple_attributes ORDER BY id; + +DROP DICTIONARY 01765_db.hashed_dictionary_simple_key_simple_attributes; + +CREATE DICTIONARY 01765_db.sparse_hashed_dictionary_simple_key_simple_attributes +( + id UInt64, + value_first String DEFAULT 'value_first_default', + value_second String DEFAULT 'value_second_default' +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'simple_key_simple_attributes_source_table')) +LIFETIME(MIN 1 MAX 1000) +LAYOUT(SPARSE_HASHED()); + +SELECT 'Dictionary sparse_hashed_dictionary_simple_key_simple_attributes'; +SELECT 'dictGet existing value'; +SELECT dictGet('01765_db.sparse_hashed_dictionary_simple_key_simple_attributes', 'value_first', number) as value_first, + dictGet('01765_db.sparse_hashed_dictionary_simple_key_simple_attributes', 'value_second', number) as value_second FROM system.numbers LIMIT 3; +SELECT 'dictGet with non existing value'; +SELECT dictGet('01765_db.sparse_hashed_dictionary_simple_key_simple_attributes', 'value_first', number) as value_first, + dictGet('01765_db.sparse_hashed_dictionary_simple_key_simple_attributes', 'value_second', number) as value_second FROM system.numbers LIMIT 4; +SELECT 'dictGetOrDefault existing value'; +SELECT dictGetOrDefault('01765_db.sparse_hashed_dictionary_simple_key_simple_attributes', 'value_first', number, toString('default')) as value_first, + dictGetOrDefault('01765_db.sparse_hashed_dictionary_simple_key_simple_attributes', 'value_second', number, toString('default')) as value_second FROM system.numbers LIMIT 3; +SELECT 'dictGetOrDefault non existing value'; +SELECT dictGetOrDefault('01765_db.sparse_hashed_dictionary_simple_key_simple_attributes', 'value_first', number, toString('default')) as value_first, + dictGetOrDefault('01765_db.sparse_hashed_dictionary_simple_key_simple_attributes', 'value_second', number, toString('default')) as value_second FROM system.numbers LIMIT 4; +SELECT 'dictHas'; +SELECT dictHas('01765_db.sparse_hashed_dictionary_simple_key_simple_attributes', number) FROM system.numbers LIMIT 4; +SELECT 'select all values as input stream'; +SELECT * FROM 01765_db.sparse_hashed_dictionary_simple_key_simple_attributes ORDER BY id; + +DROP DICTIONARY 01765_db.sparse_hashed_dictionary_simple_key_simple_attributes; + +DROP TABLE 01765_db.simple_key_simple_attributes_source_table; + +CREATE TABLE 01765_db.simple_key_complex_attributes_source_table +( + id UInt64, + value_first String, + value_second Nullable(String) +) +ENGINE = TinyLog; + +INSERT INTO 01765_db.simple_key_complex_attributes_source_table VALUES(0, 'value_0', 'value_second_0'); +INSERT INTO 01765_db.simple_key_complex_attributes_source_table VALUES(1, 'value_1', NULL); +INSERT INTO 01765_db.simple_key_complex_attributes_source_table VALUES(2, 'value_2', 'value_second_2'); + +CREATE DICTIONARY 01765_db.hashed_dictionary_simple_key_complex_attributes +( + id UInt64, + value_first String DEFAULT 'value_first_default', + value_second Nullable(String) DEFAULT 'value_second_default' +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'simple_key_complex_attributes_source_table')) +LIFETIME(MIN 1 MAX 1000) +LAYOUT(HASHED()); + +SELECT 'Dictionary hashed_dictionary_simple_key_complex_attributes'; +SELECT 'dictGet existing value'; +SELECT dictGet('01765_db.hashed_dictionary_simple_key_complex_attributes', 'value_first', number) as value_first, + dictGet('01765_db.hashed_dictionary_simple_key_complex_attributes', 'value_second', number) as value_second FROM system.numbers LIMIT 3; +SELECT 'dictGet with non existing value'; +SELECT dictGet('01765_db.hashed_dictionary_simple_key_complex_attributes', 'value_first', number) as value_first, + dictGet('01765_db.hashed_dictionary_simple_key_complex_attributes', 'value_second', number) as value_second FROM system.numbers LIMIT 4; +SELECT 'dictGetOrDefault existing value'; +SELECT dictGetOrDefault('01765_db.hashed_dictionary_simple_key_complex_attributes', 'value_first', number, toString('default')) as value_first, + dictGetOrDefault('01765_db.hashed_dictionary_simple_key_complex_attributes', 'value_second', number, toString('default')) as value_second FROM system.numbers LIMIT 3; +SELECT 'dictGetOrDefault non existing value'; +SELECT dictGetOrDefault('01765_db.hashed_dictionary_simple_key_complex_attributes', 'value_first', number, toString('default')) as value_first, + dictGetOrDefault('01765_db.hashed_dictionary_simple_key_complex_attributes', 'value_second', number, toString('default')) as value_second FROM system.numbers LIMIT 4; +SELECT 'dictHas'; +SELECT dictHas('01765_db.hashed_dictionary_simple_key_complex_attributes', number) FROM system.numbers LIMIT 4; +SELECT 'select all values as input stream'; +SELECT * FROM 01765_db.hashed_dictionary_simple_key_complex_attributes ORDER BY id; + +DROP DICTIONARY 01765_db.hashed_dictionary_simple_key_complex_attributes; + +CREATE DICTIONARY 01765_db.sparse_hashed_dictionary_simple_key_complex_attributes +( + id UInt64, + value_first String DEFAULT 'value_first_default', + value_second Nullable(String) DEFAULT 'value_second_default' +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'simple_key_complex_attributes_source_table')) +LIFETIME(MIN 1 MAX 1000) +LAYOUT(HASHED()); + +SELECT 'Dictionary sparse_hashed_dictionary_simple_key_complex_attributes'; +SELECT 'dictGet existing value'; +SELECT dictGet('01765_db.sparse_hashed_dictionary_simple_key_complex_attributes', 'value_first', number) as value_first, + dictGet('01765_db.sparse_hashed_dictionary_simple_key_complex_attributes', 'value_second', number) as value_second FROM system.numbers LIMIT 3; +SELECT 'dictGet with non existing value'; +SELECT dictGet('01765_db.sparse_hashed_dictionary_simple_key_complex_attributes', 'value_first', number) as value_first, + dictGet('01765_db.sparse_hashed_dictionary_simple_key_complex_attributes', 'value_second', number) as value_second FROM system.numbers LIMIT 4; +SELECT 'dictGetOrDefault existing value'; +SELECT dictGetOrDefault('01765_db.sparse_hashed_dictionary_simple_key_complex_attributes', 'value_first', number, toString('default')) as value_first, + dictGetOrDefault('01765_db.sparse_hashed_dictionary_simple_key_complex_attributes', 'value_second', number, toString('default')) as value_second FROM system.numbers LIMIT 3; +SELECT 'dictGetOrDefault non existing value'; +SELECT dictGetOrDefault('01765_db.sparse_hashed_dictionary_simple_key_complex_attributes', 'value_first', number, toString('default')) as value_first, + dictGetOrDefault('01765_db.sparse_hashed_dictionary_simple_key_complex_attributes', 'value_second', number, toString('default')) as value_second FROM system.numbers LIMIT 4; +SELECT 'dictHas'; +SELECT dictHas('01765_db.sparse_hashed_dictionary_simple_key_complex_attributes', number) FROM system.numbers LIMIT 4; +SELECT 'select all values as input stream'; +SELECT * FROM 01765_db.sparse_hashed_dictionary_simple_key_complex_attributes ORDER BY id; + +DROP DICTIONARY 01765_db.sparse_hashed_dictionary_simple_key_complex_attributes; + +DROP TABLE 01765_db.simple_key_complex_attributes_source_table; + +CREATE TABLE 01765_db.simple_key_hierarchy_table +( + id UInt64, + parent_id UInt64 +) ENGINE = TinyLog(); + +INSERT INTO 01765_db.simple_key_hierarchy_table VALUES (1, 0); +INSERT INTO 01765_db.simple_key_hierarchy_table VALUES (2, 1); +INSERT INTO 01765_db.simple_key_hierarchy_table VALUES (3, 1); +INSERT INTO 01765_db.simple_key_hierarchy_table VALUES (4, 2); + +CREATE DICTIONARY 01765_db.hashed_dictionary_simple_key_hierarchy +( + id UInt64, + parent_id UInt64 HIERARCHICAL +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'simple_key_hierarchy_table')) +LIFETIME(MIN 1 MAX 1000) +LAYOUT(HASHED()); + +SELECT 'Dictionary hashed_dictionary_simple_key_hierarchy'; +SELECT 'dictGet'; +SELECT dictGet('01765_db.hashed_dictionary_simple_key_hierarchy', 'parent_id', number) FROM system.numbers LIMIT 5; +SELECT 'dictGetHierarchy'; +SELECT dictGetHierarchy('01765_db.hashed_dictionary_simple_key_hierarchy', toUInt64(1)); +SELECT dictGetHierarchy('01765_db.hashed_dictionary_simple_key_hierarchy', toUInt64(4)); + +DROP DICTIONARY 01765_db.hashed_dictionary_simple_key_hierarchy; + +CREATE DICTIONARY 01765_db.sparse_hashed_dictionary_simple_key_hierarchy +( + id UInt64, + parent_id UInt64 HIERARCHICAL +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'simple_key_hierarchy_table')) +LIFETIME(MIN 1 MAX 1000) +LAYOUT(HASHED()); + +SELECT 'Dictionary sparse_hashed_dictionary_simple_key_hierarchy'; +SELECT 'dictGet'; +SELECT dictGet('01765_db.sparse_hashed_dictionary_simple_key_hierarchy', 'parent_id', number) FROM system.numbers LIMIT 5; +SELECT 'dictGetHierarchy'; +SELECT dictGetHierarchy('01765_db.sparse_hashed_dictionary_simple_key_hierarchy', toUInt64(1)); +SELECT dictGetHierarchy('01765_db.sparse_hashed_dictionary_simple_key_hierarchy', toUInt64(4)); + +DROP DICTIONARY 01765_db.sparse_hashed_dictionary_simple_key_hierarchy; + +DROP TABLE 01765_db.simple_key_hierarchy_table; + +DROP DATABASE 01765_db; diff --git a/parser/testdata/01765_move_to_table_overlapping_block_number/ast.json b/parser/testdata/01765_move_to_table_overlapping_block_number/ast.json new file mode 100644 index 000000000..d857380fd --- /dev/null +++ b/parser/testdata/01765_move_to_table_overlapping_block_number/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_src (children 1)" + }, + { + "explain": " Identifier t_src" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001200491, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/01765_move_to_table_overlapping_block_number/metadata.json b/parser/testdata/01765_move_to_table_overlapping_block_number/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01765_move_to_table_overlapping_block_number/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01765_move_to_table_overlapping_block_number/query.sql b/parser/testdata/01765_move_to_table_overlapping_block_number/query.sql new file mode 100644 index 000000000..ea00c573c --- /dev/null +++ b/parser/testdata/01765_move_to_table_overlapping_block_number/query.sql @@ -0,0 +1,20 @@ +DROP TABLE IF EXISTS t_src; +DROP TABLE IF EXISTS t_dst; + +CREATE TABLE t_src (id UInt32, v UInt32) ENGINE = MergeTree ORDER BY id PARTITION BY id; +CREATE TABLE t_dst (id UInt32, v UInt32) ENGINE = MergeTree ORDER BY id PARTITION BY id; + +SYSTEM STOP MERGES t_src; +SYSTEM STOP MERGES t_dst; + +INSERT INTO t_dst VALUES (1, 1); +INSERT INTO t_dst VALUES (1, 2); +INSERT INTO t_dst VALUES (1, 3); + +INSERT INTO t_src VALUES (1, 4); + +ALTER TABLE t_src MOVE PARTITION 1 TO TABLE t_dst; +SELECT *, _part FROM t_dst ORDER BY v; + +DROP TABLE t_src; +DROP TABLE t_dst; diff --git a/parser/testdata/01765_tehran_dst/ast.json b/parser/testdata/01765_tehran_dst/ast.json new file mode 100644 index 000000000..2807a8c18 --- /dev/null +++ b/parser/testdata/01765_tehran_dst/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toTimeZone (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDateTime (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '2021-03-22 18:45:11'" + }, + { + "explain": " Literal 'UTC'" + }, + { + "explain": " Literal 'Asia\/Tehran'" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001501222, + "rows_read": 11, + "bytes_read": 430 + } +} diff --git a/parser/testdata/01765_tehran_dst/metadata.json b/parser/testdata/01765_tehran_dst/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01765_tehran_dst/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01765_tehran_dst/query.sql b/parser/testdata/01765_tehran_dst/query.sql new file mode 100644 index 000000000..41b92ae23 --- /dev/null +++ b/parser/testdata/01765_tehran_dst/query.sql @@ -0,0 +1,2 @@ +SELECT toTimeZone(toDateTime('2021-03-22 18:45:11', 'UTC'), 'Asia/Tehran'); +SELECT toDateTime('2020-03-21 23:00:00', 'Asia/Tehran'); diff --git a/parser/testdata/01766_hashed_dictionary_complex_key/ast.json b/parser/testdata/01766_hashed_dictionary_complex_key/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01766_hashed_dictionary_complex_key/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01766_hashed_dictionary_complex_key/metadata.json b/parser/testdata/01766_hashed_dictionary_complex_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01766_hashed_dictionary_complex_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01766_hashed_dictionary_complex_key/query.sql b/parser/testdata/01766_hashed_dictionary_complex_key/query.sql new file mode 100644 index 000000000..1342e3d69 --- /dev/null +++ b/parser/testdata/01766_hashed_dictionary_complex_key/query.sql @@ -0,0 +1,100 @@ +-- Tags: no-parallel + +DROP DATABASE IF EXISTS 01766_db; +CREATE DATABASE 01766_db; + +CREATE TABLE 01766_db.complex_key_simple_attributes_source_table +( + id UInt64, + id_key String, + value_first String, + value_second String +) +ENGINE = TinyLog; + +INSERT INTO 01766_db.complex_key_simple_attributes_source_table VALUES(0, 'id_key_0', 'value_0', 'value_second_0'); +INSERT INTO 01766_db.complex_key_simple_attributes_source_table VALUES(1, 'id_key_1', 'value_1', 'value_second_1'); +INSERT INTO 01766_db.complex_key_simple_attributes_source_table VALUES(2, 'id_key_2', 'value_2', 'value_second_2'); + +CREATE DICTIONARY 01766_db.hashed_dictionary_complex_key_simple_attributes +( + id UInt64, + id_key String, + value_first String DEFAULT 'value_first_default', + value_second String DEFAULT 'value_second_default' +) +PRIMARY KEY id, id_key +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'complex_key_simple_attributes_source_table' DB '01766_db')) +LIFETIME(MIN 1 MAX 1000) +LAYOUT(COMPLEX_KEY_HASHED()); + +SELECT 'Dictionary hashed_dictionary_complex_key_simple_attributes'; +SELECT 'dictGet existing value'; +SELECT dictGet('01766_db.hashed_dictionary_complex_key_simple_attributes', 'value_first', (number, concat('id_key_', toString(number)))) as value_first, + dictGet('01766_db.hashed_dictionary_complex_key_simple_attributes', 'value_second', (number, concat('id_key_', toString(number)))) as value_second FROM system.numbers LIMIT 3; +SELECT 'dictGet with non existing value'; +SELECT dictGet('01766_db.hashed_dictionary_complex_key_simple_attributes', 'value_first', (number, concat('id_key_', toString(number)))) as value_first, + dictGet('01766_db.hashed_dictionary_complex_key_simple_attributes', 'value_second', (number, concat('id_key_', toString(number)))) as value_second FROM system.numbers LIMIT 4; +SELECT 'dictGetOrDefault existing value'; +SELECT dictGetOrDefault('01766_db.hashed_dictionary_complex_key_simple_attributes', 'value_first', (number, concat('id_key_', toString(number))), toString('default')) as value_first, + dictGetOrDefault('01766_db.hashed_dictionary_complex_key_simple_attributes', 'value_second', (number, concat('id_key_', toString(number))), toString('default')) as value_second FROM system.numbers LIMIT 3; +SELECT 'dictGetOrDefault non existing value'; +SELECT dictGetOrDefault('01766_db.hashed_dictionary_complex_key_simple_attributes', 'value_first', (number, concat('id_key_', toString(number))), toString('default')) as value_first, + dictGetOrDefault('01766_db.hashed_dictionary_complex_key_simple_attributes', 'value_second', (number, concat('id_key_', toString(number))), toString('default')) as value_second FROM system.numbers LIMIT 4; +SELECT 'dictHas'; +SELECT dictHas('01766_db.hashed_dictionary_complex_key_simple_attributes', (number, concat('id_key_', toString(number)))) FROM system.numbers LIMIT 4; +SELECT 'select all values as input stream'; +SELECT * FROM 01766_db.hashed_dictionary_complex_key_simple_attributes ORDER BY (id, id_key); + +DROP DICTIONARY 01766_db.hashed_dictionary_complex_key_simple_attributes; + +DROP TABLE 01766_db.complex_key_simple_attributes_source_table; + +CREATE TABLE 01766_db.complex_key_complex_attributes_source_table +( + id UInt64, + id_key String, + value_first String, + value_second Nullable(String) +) +ENGINE = TinyLog; + +INSERT INTO 01766_db.complex_key_complex_attributes_source_table VALUES(0, 'id_key_0', 'value_0', 'value_second_0'); +INSERT INTO 01766_db.complex_key_complex_attributes_source_table VALUES(1, 'id_key_1', 'value_1', NULL); +INSERT INTO 01766_db.complex_key_complex_attributes_source_table VALUES(2, 'id_key_2', 'value_2', 'value_second_2'); + +CREATE DICTIONARY 01766_db.hashed_dictionary_complex_key_complex_attributes +( + id UInt64, + id_key String, + + value_first String DEFAULT 'value_first_default', + value_second Nullable(String) DEFAULT 'value_second_default' +) +PRIMARY KEY id, id_key +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'complex_key_complex_attributes_source_table' DB '01766_db')) +LIFETIME(MIN 1 MAX 1000) +LAYOUT(COMPLEX_KEY_HASHED()); + +SELECT 'Dictionary hashed_dictionary_complex_key_complex_attributes'; +SELECT 'dictGet existing value'; +SELECT dictGet('01766_db.hashed_dictionary_complex_key_complex_attributes', 'value_first', (number, concat('id_key_', toString(number)))) as value_first, + dictGet('01766_db.hashed_dictionary_complex_key_complex_attributes', 'value_second', (number, concat('id_key_', toString(number)))) as value_second FROM system.numbers LIMIT 3; +SELECT 'dictGet with non existing value'; +SELECT dictGet('01766_db.hashed_dictionary_complex_key_complex_attributes', 'value_first', (number, concat('id_key_', toString(number)))) as value_first, + dictGet('01766_db.hashed_dictionary_complex_key_complex_attributes', 'value_second', (number, concat('id_key_', toString(number)))) as value_second FROM system.numbers LIMIT 4; +SELECT 'dictGetOrDefault existing value'; +SELECT dictGetOrDefault('01766_db.hashed_dictionary_complex_key_complex_attributes', 'value_first', (number, concat('id_key_', toString(number))), toString('default')) as value_first, + dictGetOrDefault('01766_db.hashed_dictionary_complex_key_complex_attributes', 'value_second', (number, concat('id_key_', toString(number))), toString('default')) as value_second FROM system.numbers LIMIT 3; +SELECT 'dictGetOrDefault non existing value'; +SELECT dictGetOrDefault('01766_db.hashed_dictionary_complex_key_complex_attributes', 'value_first', (number, concat('id_key_', toString(number))), toString('default')) as value_first, + dictGetOrDefault('01766_db.hashed_dictionary_complex_key_complex_attributes', 'value_second', (number, concat('id_key_', toString(number))), toString('default')) as value_second FROM system.numbers LIMIT 4; +SELECT 'dictHas'; +SELECT dictHas('01766_db.hashed_dictionary_complex_key_complex_attributes', (number, concat('id_key_', toString(number)))) FROM system.numbers LIMIT 4; +SELECT 'select all values as input stream'; +SELECT * FROM 01766_db.hashed_dictionary_complex_key_complex_attributes ORDER BY (id, id_key); + +DROP DICTIONARY 01766_db.hashed_dictionary_complex_key_complex_attributes; +DROP TABLE 01766_db.complex_key_complex_attributes_source_table; + +DROP DATABASE 01766_db; diff --git a/parser/testdata/01766_todatetime64_no_timezone_arg/ast.json b/parser/testdata/01766_todatetime64_no_timezone_arg/ast.json new file mode 100644 index 000000000..606c724b4 --- /dev/null +++ b/parser/testdata/01766_todatetime64_no_timezone_arg/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDateTime64 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '2021-03-23'" + }, + { + "explain": " Literal UInt64_3" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001635693, + "rows_read": 8, + "bytes_read": 299 + } +} diff --git a/parser/testdata/01766_todatetime64_no_timezone_arg/metadata.json b/parser/testdata/01766_todatetime64_no_timezone_arg/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01766_todatetime64_no_timezone_arg/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01766_todatetime64_no_timezone_arg/query.sql b/parser/testdata/01766_todatetime64_no_timezone_arg/query.sql new file mode 100644 index 000000000..2aac92248 --- /dev/null +++ b/parser/testdata/01766_todatetime64_no_timezone_arg/query.sql @@ -0,0 +1 @@ +SELECT toDateTime64('2021-03-23', 3); diff --git a/parser/testdata/01768_array_product/ast.json b/parser/testdata/01768_array_product/ast.json new file mode 100644 index 000000000..48f7e13dd --- /dev/null +++ b/parser/testdata/01768_array_product/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'Array product with constant column'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.00107361, + "rows_read": 5, + "bytes_read": 205 + } +} diff --git a/parser/testdata/01768_array_product/metadata.json b/parser/testdata/01768_array_product/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01768_array_product/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01768_array_product/query.sql b/parser/testdata/01768_array_product/query.sql new file mode 100644 index 000000000..75056888e --- /dev/null +++ b/parser/testdata/01768_array_product/query.sql @@ -0,0 +1,26 @@ +SELECT 'Array product with constant column'; + +SELECT arrayProduct([1,2,3,4,5,6]) as a, toTypeName(a); +SELECT arrayProduct(array(1.0,2.0,3.0,4.0)) as a, toTypeName(a); +SELECT arrayProduct(array(1,3.5)) as a, toTypeName(a); +SELECT arrayProduct([toDecimal64(1,8), toDecimal64(2,8), toDecimal64(3,8)]) as a, toTypeName(a); + +SELECT 'Array product with non constant column'; + +DROP TABLE IF EXISTS test_aggregation; +CREATE TABLE test_aggregation (x Array(Int)) ENGINE=TinyLog; +INSERT INTO test_aggregation VALUES ([1,2,3,4]), ([]), ([1,2,3]); +SELECT arrayProduct(x) FROM test_aggregation; +DROP TABLE test_aggregation; + +CREATE TABLE test_aggregation (x Array(Decimal64(8))) ENGINE=TinyLog; +INSERT INTO test_aggregation VALUES ([1,2,3,4]), ([]), ([1,2,3]); +SELECT arrayProduct(x) FROM test_aggregation; +DROP TABLE test_aggregation; + +SELECT 'Types of aggregation result array product'; +SELECT toTypeName(arrayProduct([toInt8(0)])), toTypeName(arrayProduct([toInt16(0)])), toTypeName(arrayProduct([toInt32(0)])), toTypeName(arrayProduct([toInt64(0)])); +SELECT toTypeName(arrayProduct([toUInt8(0)])), toTypeName(arrayProduct([toUInt16(0)])), toTypeName(arrayProduct([toUInt32(0)])), toTypeName(arrayProduct([toUInt64(0)])); +SELECT toTypeName(arrayProduct([toInt128(0)])), toTypeName(arrayProduct([toInt256(0)])), toTypeName(arrayProduct([toUInt256(0)])); +SELECT toTypeName(arrayProduct([toFloat32(0)])), toTypeName(arrayProduct([toFloat64(0)])); +SELECT toTypeName(arrayProduct([toDecimal32(0, 8)])), toTypeName(arrayProduct([toDecimal64(0, 8)])), toTypeName(arrayProduct([toDecimal128(0, 8)])); diff --git a/parser/testdata/01768_extended_range/ast.json b/parser/testdata/01768_extended_range/ast.json new file mode 100644 index 000000000..da81250ce --- /dev/null +++ b/parser/testdata/01768_extended_range/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toYear (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDateTime64 (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal '1968-12-12 11:22:33'" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal 'UTC'" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.00142131, + "rows_read": 11, + "bytes_read": 425 + } +} diff --git a/parser/testdata/01768_extended_range/metadata.json b/parser/testdata/01768_extended_range/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01768_extended_range/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01768_extended_range/query.sql b/parser/testdata/01768_extended_range/query.sql new file mode 100644 index 000000000..fe506e97b --- /dev/null +++ b/parser/testdata/01768_extended_range/query.sql @@ -0,0 +1,4 @@ +SELECT toYear(toDateTime64('1968-12-12 11:22:33', 0, 'UTC')); +SELECT toInt16(toRelativeWeekNum(toDateTime64('1960-11-30 18:00:11.999', 3, 'UTC'))); +SELECT toStartOfQuarter(toDateTime64('1990-01-04 12:14:12', 0, 'UTC')); +SELECT toUnixTimestamp(toDateTime64('1900-12-12 11:22:33', 0, 'UTC')); -- { serverError DECIMAL_OVERFLOW } diff --git a/parser/testdata/01769_extended_range_2/ast.json b/parser/testdata/01769_extended_range_2/ast.json new file mode 100644 index 000000000..41043db1b --- /dev/null +++ b/parser/testdata/01769_extended_range_2/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDateTime64 (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal '1969-12-31 18:00:12'" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal 'America\/Phoenix'" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001134849, + "rows_read": 9, + "bytes_read": 347 + } +} diff --git a/parser/testdata/01769_extended_range_2/metadata.json b/parser/testdata/01769_extended_range_2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01769_extended_range_2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01769_extended_range_2/query.sql b/parser/testdata/01769_extended_range_2/query.sql new file mode 100644 index 000000000..0b1319dda --- /dev/null +++ b/parser/testdata/01769_extended_range_2/query.sql @@ -0,0 +1,3 @@ +SELECT toDateTime64('1969-12-31 18:00:12', 0, 'America/Phoenix'); +SELECT toDateTime64('1969-12-30 18:00:12', 0, 'America/Phoenix'); +SELECT toDateTime64('1969-12-31 18:00:12', 0, 'Asia/Istanbul'); diff --git a/parser/testdata/01770_add_months_ubsan/ast.json b/parser/testdata/01770_add_months_ubsan/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01770_add_months_ubsan/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01770_add_months_ubsan/metadata.json b/parser/testdata/01770_add_months_ubsan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01770_add_months_ubsan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01770_add_months_ubsan/query.sql b/parser/testdata/01770_add_months_ubsan/query.sql new file mode 100644 index 000000000..039434ff9 --- /dev/null +++ b/parser/testdata/01770_add_months_ubsan/query.sql @@ -0,0 +1,2 @@ +-- Result does not make sense but UBSan report should not be triggered. +SELECT ignore(now() + INTERVAL 9223372036854775807 MONTH); diff --git a/parser/testdata/01770_extended_range_3/ast.json b/parser/testdata/01770_extended_range_3/ast.json new file mode 100644 index 000000000..e85d7b5ae --- /dev/null +++ b/parser/testdata/01770_extended_range_3/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function addHours (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDateTime64 (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal '1984-03-31 23:00:00'" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal 'Asia\/Novosibirsk'" + }, + { + "explain": " Literal UInt64_8" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001259267, + "rows_read": 12, + "bytes_read": 470 + } +} diff --git a/parser/testdata/01770_extended_range_3/metadata.json b/parser/testdata/01770_extended_range_3/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01770_extended_range_3/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01770_extended_range_3/query.sql b/parser/testdata/01770_extended_range_3/query.sql new file mode 100644 index 000000000..68e0782d3 --- /dev/null +++ b/parser/testdata/01770_extended_range_3/query.sql @@ -0,0 +1,2 @@ +SELECT addHours(toDateTime64('1984-03-31 23:00:00', 0, 'Asia/Novosibirsk'), 8); +SELECT addHours(toDateTime64('1985-03-31 00:00:00', 0, 'Asia/Novosibirsk'), 8); diff --git a/parser/testdata/01771_bloom_filter_not_has/ast.json b/parser/testdata/01771_bloom_filter_not_has/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01771_bloom_filter_not_has/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01771_bloom_filter_not_has/metadata.json b/parser/testdata/01771_bloom_filter_not_has/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01771_bloom_filter_not_has/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01771_bloom_filter_not_has/query.sql b/parser/testdata/01771_bloom_filter_not_has/query.sql new file mode 100644 index 000000000..00b71d6fe --- /dev/null +++ b/parser/testdata/01771_bloom_filter_not_has/query.sql @@ -0,0 +1,8 @@ +-- Tags: no-parallel, long +DROP TABLE IF EXISTS bloom_filter_null_array; +CREATE TABLE bloom_filter_null_array (v Array(Int32), INDEX idx v TYPE bloom_filter GRANULARITY 3) ENGINE = MergeTree() ORDER BY v SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +INSERT INTO bloom_filter_null_array SELECT [number] FROM numbers(10000000); +SELECT COUNT() FROM bloom_filter_null_array; +SELECT COUNT() FROM bloom_filter_null_array WHERE has(v, 0); +SELECT COUNT() FROM bloom_filter_null_array WHERE not has(v, 0); +DROP TABLE bloom_filter_null_array; diff --git a/parser/testdata/01771_datetime64_no_time_part/ast.json b/parser/testdata/01771_datetime64_no_time_part/ast.json new file mode 100644 index 000000000..4f556f5f5 --- /dev/null +++ b/parser/testdata/01771_datetime64_no_time_part/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDateTime64 (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal '1985-03-31'" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal 'Europe\/Helsinki'" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001350454, + "rows_read": 9, + "bytes_read": 338 + } +} diff --git a/parser/testdata/01771_datetime64_no_time_part/metadata.json b/parser/testdata/01771_datetime64_no_time_part/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01771_datetime64_no_time_part/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01771_datetime64_no_time_part/query.sql b/parser/testdata/01771_datetime64_no_time_part/query.sql new file mode 100644 index 000000000..debf4783e --- /dev/null +++ b/parser/testdata/01771_datetime64_no_time_part/query.sql @@ -0,0 +1 @@ +SELECT toDateTime64('1985-03-31', 0, 'Europe/Helsinki'); diff --git a/parser/testdata/01772_intdiv_minus_one_ubsan/ast.json b/parser/testdata/01772_intdiv_minus_one_ubsan/ast.json new file mode 100644 index 000000000..9ce96f82c --- /dev/null +++ b/parser/testdata/01772_intdiv_minus_one_ubsan/ast.json @@ -0,0 +1,73 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function intDiv (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toInt64 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal Int64_-1" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_9223372036854775807" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 17, + + "statistics": + { + "elapsed": 0.001511605, + "rows_read": 17, + "bytes_read": 680 + } +} diff --git a/parser/testdata/01772_intdiv_minus_one_ubsan/metadata.json b/parser/testdata/01772_intdiv_minus_one_ubsan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01772_intdiv_minus_one_ubsan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01772_intdiv_minus_one_ubsan/query.sql b/parser/testdata/01772_intdiv_minus_one_ubsan/query.sql new file mode 100644 index 000000000..20b4f5851 --- /dev/null +++ b/parser/testdata/01772_intdiv_minus_one_ubsan/query.sql @@ -0,0 +1 @@ +SELECT intDiv(toInt64(number), -1) FROM numbers(9223372036854775807, 10); diff --git a/parser/testdata/01772_to_start_of_hour_align/ast.json b/parser/testdata/01772_to_start_of_hour_align/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01772_to_start_of_hour_align/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01772_to_start_of_hour_align/metadata.json b/parser/testdata/01772_to_start_of_hour_align/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01772_to_start_of_hour_align/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01772_to_start_of_hour_align/query.sql b/parser/testdata/01772_to_start_of_hour_align/query.sql new file mode 100644 index 000000000..6d1bb460f --- /dev/null +++ b/parser/testdata/01772_to_start_of_hour_align/query.sql @@ -0,0 +1,21 @@ +-- Rounding down to hour intervals is aligned to midnight even if the interval length does not divide the whole day. +SELECT toStartOfInterval(toDateTime('2021-03-23 03:58:00'), INTERVAL 11 HOUR); +SELECT toStartOfInterval(toDateTime('2021-03-23 13:58:00'), INTERVAL 11 HOUR); +SELECT toStartOfInterval(toDateTime('2021-03-23 23:58:00'), INTERVAL 11 HOUR); + +-- It should work correctly even in timezones with non-whole hours offset. India have +05:30. +SELECT toStartOfHour(toDateTime('2021-03-23 13:58:00', 'Asia/Kolkata')); +SELECT toStartOfInterval(toDateTime('2021-03-23 13:58:00', 'Asia/Kolkata'), INTERVAL 6 HOUR); + +-- Specifying the interval longer than 24 hours is not correct, but it works as expected by just rounding to midnight. +SELECT toStartOfInterval(toDateTime('2021-03-23 13:58:00', 'Asia/Kolkata'), INTERVAL 66 HOUR); + +-- In case of timezone shifts, rounding is performed to the hour number on "wall clock" time. +-- The intervals may become shorter or longer due to time shifts. For example, the three hour interval may actually last two hours. +-- If the same hour number on "wall clock" time correspond to multiple time points due to shifting backwards, the unspecified time point is selected among the candidates. +SELECT toDateTime('2010-03-28 00:00:00', 'Europe/Moscow') + INTERVAL 15 * number MINUTE AS src, toStartOfInterval(src, INTERVAL 2 HOUR) AS rounded, toUnixTimestamp(src) AS t FROM numbers(20); +SELECT toDateTime('2010-10-31 00:00:00', 'Europe/Moscow') + INTERVAL 15 * number MINUTE AS src, toStartOfInterval(src, INTERVAL 2 HOUR) AS rounded, toUnixTimestamp(src) AS t FROM numbers(20); + +-- And this should work even for non whole number of hours shifts. +SELECT toDateTime('2020-04-05 00:00:00', 'Australia/Lord_Howe') + INTERVAL 15 * number MINUTE AS src, toStartOfInterval(src, INTERVAL 2 HOUR) AS rounded, toUnixTimestamp(src) AS t FROM numbers(20); +SELECT toDateTime('2020-10-04 00:00:00', 'Australia/Lord_Howe') + INTERVAL 15 * number MINUTE AS src, toStartOfInterval(src, INTERVAL 2 HOUR) AS rounded, toUnixTimestamp(src) AS t FROM numbers(20); diff --git a/parser/testdata/01773_case_sensitive_revision/ast.json b/parser/testdata/01773_case_sensitive_revision/ast.json new file mode 100644 index 000000000..188925f59 --- /dev/null +++ b/parser/testdata/01773_case_sensitive_revision/ast.json @@ -0,0 +1,88 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function revision (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function Revision (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function REVISION (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function Revision (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function revisiON (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function reVision (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 22, + + "statistics": + { + "elapsed": 0.001178492, + "rows_read": 22, + "bytes_read": 827 + } +} diff --git a/parser/testdata/01773_case_sensitive_revision/metadata.json b/parser/testdata/01773_case_sensitive_revision/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01773_case_sensitive_revision/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01773_case_sensitive_revision/query.sql b/parser/testdata/01773_case_sensitive_revision/query.sql new file mode 100644 index 000000000..16970daf6 --- /dev/null +++ b/parser/testdata/01773_case_sensitive_revision/query.sql @@ -0,0 +1 @@ +SELECT revision()=Revision(), REVISION()=Revision(), revisiON()=reVision(); diff --git a/parser/testdata/01773_case_sensitive_version/ast.json b/parser/testdata/01773_case_sensitive_version/ast.json new file mode 100644 index 000000000..238b00e05 --- /dev/null +++ b/parser/testdata/01773_case_sensitive_version/ast.json @@ -0,0 +1,88 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function version (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function Version (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function VERSION (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function Version (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function vErSiOn (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function VeRsIoN (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 22, + + "statistics": + { + "elapsed": 0.001020957, + "rows_read": 22, + "bytes_read": 821 + } +} diff --git a/parser/testdata/01773_case_sensitive_version/metadata.json b/parser/testdata/01773_case_sensitive_version/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01773_case_sensitive_version/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01773_case_sensitive_version/query.sql b/parser/testdata/01773_case_sensitive_version/query.sql new file mode 100644 index 000000000..27fa1c27b --- /dev/null +++ b/parser/testdata/01773_case_sensitive_version/query.sql @@ -0,0 +1 @@ +SELECT version()=Version(), VERSION()=Version(), vErSiOn()=VeRsIoN(); diff --git a/parser/testdata/01773_datetime64_add_ubsan/ast.json b/parser/testdata/01773_datetime64_add_ubsan/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01773_datetime64_add_ubsan/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01773_datetime64_add_ubsan/metadata.json b/parser/testdata/01773_datetime64_add_ubsan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01773_datetime64_add_ubsan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01773_datetime64_add_ubsan/query.sql b/parser/testdata/01773_datetime64_add_ubsan/query.sql new file mode 100644 index 000000000..f0a352a79 --- /dev/null +++ b/parser/testdata/01773_datetime64_add_ubsan/query.sql @@ -0,0 +1,2 @@ +-- The result is unspecified but UBSan should not argue. +SELECT ignore(addHours(now64(3), inf)) FROM numbers(2); -- { serverError DECIMAL_OVERFLOW } diff --git a/parser/testdata/01773_min_max_time_system_parts_datetime64/ast.json b/parser/testdata/01773_min_max_time_system_parts_datetime64/ast.json new file mode 100644 index 000000000..e8f745fd4 --- /dev/null +++ b/parser/testdata/01773_min_max_time_system_parts_datetime64/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001015741, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/01773_min_max_time_system_parts_datetime64/metadata.json b/parser/testdata/01773_min_max_time_system_parts_datetime64/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01773_min_max_time_system_parts_datetime64/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01773_min_max_time_system_parts_datetime64/query.sql b/parser/testdata/01773_min_max_time_system_parts_datetime64/query.sql new file mode 100644 index 000000000..5a1f809b0 --- /dev/null +++ b/parser/testdata/01773_min_max_time_system_parts_datetime64/query.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS test; +CREATE TABLE test (time DateTime64(3)) ENGINE = MergeTree ORDER BY tuple() PARTITION BY toStartOfInterval(time, INTERVAL 2 YEAR); + +INSERT INTO test VALUES ('2000-01-02 03:04:05.123'), ('2001-02-03 04:05:06.789'); + +SELECT min_time, max_time FROM system.parts WHERE table = 'test' AND database = currentDatabase(); +SELECT min_time, max_time FROM system.parts_columns WHERE table = 'test' AND database = currentDatabase(); + +DROP TABLE test; diff --git a/parser/testdata/01774_bar_with_illegal_value/ast.json b/parser/testdata/01774_bar_with_illegal_value/ast.json new file mode 100644 index 000000000..8a6f75e3a --- /dev/null +++ b/parser/testdata/01774_bar_with_illegal_value/ast.json @@ -0,0 +1,76 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function minus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function greatCircleAngle (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Literal UInt64_1048575" + }, + { + "explain": " Literal UInt64_257" + }, + { + "explain": " Literal Int64_-9223372036854775808" + }, + { + "explain": " Literal UInt64_1048576" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Function bar (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal UInt64_7" + }, + { + "explain": " Literal Float64_-inf" + }, + { + "explain": " Literal UInt64_1024" + } + ], + + "rows": 18, + + "statistics": + { + "elapsed": 0.001032958, + "rows_read": 18, + "bytes_read": 682 + } +} diff --git a/parser/testdata/01774_bar_with_illegal_value/metadata.json b/parser/testdata/01774_bar_with_illegal_value/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01774_bar_with_illegal_value/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01774_bar_with_illegal_value/query.sql b/parser/testdata/01774_bar_with_illegal_value/query.sql new file mode 100644 index 000000000..44ed0521d --- /dev/null +++ b/parser/testdata/01774_bar_with_illegal_value/query.sql @@ -0,0 +1 @@ +SELECT greatCircleAngle(1048575, 257, -9223372036854775808, 1048576) - NULL, bar(7, -inf, 1024); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/01774_case_sensitive_connection_id/ast.json b/parser/testdata/01774_case_sensitive_connection_id/ast.json new file mode 100644 index 000000000..01c87d7b9 --- /dev/null +++ b/parser/testdata/01774_case_sensitive_connection_id/ast.json @@ -0,0 +1,70 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 6)" + }, + { + "explain": " Function connection_id (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function CONNECTION_ID (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function CoNnEcTiOn_Id (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function connectionid (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function CONNECTIONID (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function CoNnEcTiOnId (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 16, + + "statistics": + { + "elapsed": 0.001443121, + "rows_read": 16, + "bytes_read": 590 + } +} diff --git a/parser/testdata/01774_case_sensitive_connection_id/metadata.json b/parser/testdata/01774_case_sensitive_connection_id/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01774_case_sensitive_connection_id/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01774_case_sensitive_connection_id/query.sql b/parser/testdata/01774_case_sensitive_connection_id/query.sql new file mode 100644 index 000000000..5a4f2b585 --- /dev/null +++ b/parser/testdata/01774_case_sensitive_connection_id/query.sql @@ -0,0 +1 @@ +SELECT connection_id(), CONNECTION_ID(), CoNnEcTiOn_Id(), connectionid(), CONNECTIONID(), CoNnEcTiOnId(); diff --git a/parser/testdata/01774_ip_address_in_range/ast.json b/parser/testdata/01774_ip_address_in_range/ast.json new file mode 100644 index 000000000..5866ecd1b --- /dev/null +++ b/parser/testdata/01774_ip_address_in_range/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '# Invocation with constants'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001007236, + "rows_read": 5, + "bytes_read": 198 + } +} diff --git a/parser/testdata/01774_ip_address_in_range/metadata.json b/parser/testdata/01774_ip_address_in_range/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01774_ip_address_in_range/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01774_ip_address_in_range/query.sql b/parser/testdata/01774_ip_address_in_range/query.sql new file mode 100644 index 000000000..b547e44ea --- /dev/null +++ b/parser/testdata/01774_ip_address_in_range/query.sql @@ -0,0 +1,63 @@ +SELECT '# Invocation with constants'; + +SELECT isIPAddressInRange('127.0.0.1', '127.0.0.0/8'); +SELECT isIPAddressInRange('128.0.0.1', '127.0.0.0/8'); + +SELECT isIPAddressInRange('ffff::1', 'ffff::/16'); +SELECT isIPAddressInRange('fffe::1', 'ffff::/16'); + +SELECT '# Invocation with non-constant addresses'; + +WITH arrayJoin(['192.168.99.255', '192.168.100.1', '192.168.103.255', '192.168.104.0']) as addr, '192.168.100.0/22' as prefix SELECT addr, prefix, isIPAddressInRange(addr, prefix); +WITH arrayJoin(['::192.168.99.255', '::192.168.100.1', '::192.168.103.255', '::192.168.104.0']) as addr, '::192.168.100.0/118' as prefix SELECT addr, prefix, isIPAddressInRange(addr, prefix); + +SELECT '# Invocation with non-constant prefixes'; + +WITH '192.168.100.1' as addr, arrayJoin(['192.168.100.0/22', '192.168.100.0/24', '192.168.100.0/32']) as prefix SELECT addr, prefix, isIPAddressInRange(addr, prefix); +WITH '::192.168.100.1' as addr, arrayJoin(['::192.168.100.0/118', '::192.168.100.0/120', '::192.168.100.0/128']) as prefix SELECT addr, prefix, isIPAddressInRange(addr, prefix); + +SELECT '# Invocation with non-constants'; + +WITH arrayJoin(['192.168.100.1', '192.168.103.255']) as addr, arrayJoin(['192.168.100.0/22', '192.168.100.0/24']) as prefix SELECT addr, prefix, isIPAddressInRange(addr, prefix); +WITH arrayJoin(['::192.168.100.1', '::192.168.103.255']) as addr, arrayJoin(['::192.168.100.0/118', '::192.168.100.0/120']) as prefix SELECT addr, prefix, isIPAddressInRange(addr, prefix); + +SELECT '# Check with dense table'; + +DROP TABLE IF EXISTS test_data; +CREATE TABLE test_data (cidr String) ENGINE = Memory; + +INSERT INTO test_data +SELECT + IPv4NumToString(IPv4CIDRToRange(IPv4StringToNum('255.255.255.255'), toUInt8(number)).1) || '/' || toString(number) AS cidr +FROM system.numbers LIMIT 33; + +SELECT sum(isIPAddressInRange('0.0.0.0', cidr)) == 1 FROM test_data; +SELECT sum(isIPAddressInRange('127.0.0.0', cidr)) == 1 FROM test_data; +SELECT sum(isIPAddressInRange('128.0.0.0', cidr)) == 2 FROM test_data; +SELECT sum(isIPAddressInRange('255.0.0.0', cidr)) == 9 FROM test_data; +SELECT sum(isIPAddressInRange('255.0.0.1', cidr)) == 9 FROM test_data; +SELECT sum(isIPAddressInRange('255.0.0.255', cidr)) == 9 FROM test_data; +SELECT sum(isIPAddressInRange('255.255.255.255', cidr)) == 33 FROM test_data; +SELECT sum(isIPAddressInRange('255.255.255.254', cidr)) == 32 FROM test_data; + +DROP TABLE IF EXISTS test_data; + +SELECT '# Mismatching IP versions is not an error.'; + +SELECT isIPAddressInRange('127.0.0.1', 'ffff::/16'); +SELECT isIPAddressInRange('127.0.0.1', '::127.0.0.1/128'); +SELECT isIPAddressInRange('::1', '127.0.0.0/8'); +SELECT isIPAddressInRange('::127.0.0.1', '127.0.0.1/32'); + +SELECT '# Unparsable arguments'; + +SELECT isIPAddressInRange('unparsable', '127.0.0.0/8'); -- { serverError CANNOT_PARSE_TEXT } +SELECT isIPAddressInRange('127.0.0.1', 'unparsable'); -- { serverError CANNOT_PARSE_TEXT } + +SELECT '# Wrong argument types'; + +SELECT isIPAddressInRange(100, '127.0.0.0/8'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT isIPAddressInRange(NULL, '127.0.0.0/8'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT isIPAddressInRange('127.0.0.1', 100); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT isIPAddressInRange(100, NULL); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +WITH arrayJoin([NULL, NULL, NULL, NULL]) AS prefix SELECT isIPAddressInRange([NULL, NULL, 0, 255, 0], prefix); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } diff --git a/parser/testdata/01774_ip_address_in_range_2/ast.json b/parser/testdata/01774_ip_address_in_range_2/ast.json new file mode 100644 index 000000000..53da169e6 --- /dev/null +++ b/parser/testdata/01774_ip_address_in_range_2/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '# Invocation with constants'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.000995943, + "rows_read": 5, + "bytes_read": 198 + } +} diff --git a/parser/testdata/01774_ip_address_in_range_2/metadata.json b/parser/testdata/01774_ip_address_in_range_2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01774_ip_address_in_range_2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01774_ip_address_in_range_2/query.sql b/parser/testdata/01774_ip_address_in_range_2/query.sql new file mode 100644 index 000000000..7853ada0f --- /dev/null +++ b/parser/testdata/01774_ip_address_in_range_2/query.sql @@ -0,0 +1,86 @@ +SELECT '# Invocation with constants'; + +SELECT isIPAddressInRange(CAST('127.0.0.1', 'Nullable(String)'), '127.0.0.0/8'); +SELECT isIPAddressInRange(CAST('128.0.0.1', 'Nullable(String)'), '127.0.0.0/8'); + +SELECT isIPAddressInRange(CAST('ffff::1', 'Nullable(String)'), 'ffff::/16'); +SELECT isIPAddressInRange(CAST('fffe::1', 'Nullable(String)'), 'ffff::/16'); + +SELECT isIPAddressInRange(toIPv4('127.0.0.1'), '127.0.0.0/8'); +SELECT isIPAddressInRange(toIPv4('128.0.0.1'), '127.0.0.0/8'); + +SELECT isIPAddressInRange(toIPv6('ffff::1'), 'ffff::/16'); +SELECT isIPAddressInRange(toIPv6('fffe::1'), 'ffff::/16'); + +SELECT isIPAddressInRange(CAST(toIPv4('127.0.0.1'), 'Nullable(IPv4)'), '127.0.0.0/8'); +SELECT isIPAddressInRange(CAST(toIPv4('128.0.0.1'), 'Nullable(IPv4)'), '127.0.0.0/8'); + +SELECT isIPAddressInRange(CAST(toIPv6('ffff::1'), 'Nullable(IPv6)'), 'ffff::/16'); +SELECT isIPAddressInRange(CAST(toIPv6('fffe::1'), 'Nullable(IPv6)'), 'ffff::/16'); + +SELECT '# Invocation with non-constant addresses'; + +WITH arrayJoin([NULL, CAST('192.168.99.255', 'Nullable(String)'), CAST('192.168.100.1', 'Nullable(String)'), CAST('192.168.103.255', 'Nullable(String)'), CAST('192.168.104.0', 'Nullable(String)')]) as addr, '192.168.100.0/22' as prefix SELECT addr, prefix, isIPAddressInRange(addr, prefix); + +WITH arrayJoin([toIPv4('192.168.99.255'), toIPv4('192.168.100.1'), toIPv4('192.168.103.255'), toIPv4('192.168.104.0')]) as addr, '192.168.100.0/22' as prefix SELECT addr, prefix, isIPAddressInRange(addr, prefix); +WITH arrayJoin([NULL, CAST(toIPv4('192.168.99.255'), 'Nullable(IPv4)'), CAST(toIPv4('192.168.100.1'), 'Nullable(IPv4)'), CAST(toIPv4('192.168.103.255'), 'Nullable(IPv4)'), CAST(toIPv4('192.168.104.0'), 'Nullable(IPv4)')]) as addr, '192.168.100.0/22' as prefix SELECT addr, prefix, isIPAddressInRange(addr, prefix); +WITH arrayJoin([toIPv6('::192.168.99.255'), toIPv6('::192.168.100.1'), toIPv6('::192.168.103.255'), toIPv6('::192.168.104.0')]) as addr, '::192.168.100.0/118' as prefix SELECT addr, prefix, isIPAddressInRange(addr, prefix); +WITH arrayJoin([NULL, CAST(toIPv6('::192.168.99.255'), 'Nullable(IPv6)'), CAST(toIPv6('::192.168.100.1'), 'Nullable(IPv6)'), CAST(toIPv6('::192.168.103.255'), 'Nullable(IPv6)'), CAST(toIPv6('::192.168.104.0'), 'Nullable(IPv6)')]) as addr, '::192.168.100.0/118' as prefix SELECT addr, prefix, isIPAddressInRange(addr, prefix); + +SELECT '# Invocation with non-constant prefixes'; + +WITH CAST('192.168.100.1', 'Nullable(String)') as addr, arrayJoin(['192.168.100.0/22', '192.168.100.0/24', '192.168.100.0/32']) as prefix SELECT addr, prefix, isIPAddressInRange(addr, prefix); +WITH CAST('::192.168.100.1', 'Nullable(String)') as addr, arrayJoin(['::192.168.100.0/118', '::192.168.100.0/120', '::192.168.100.0/128']) as prefix SELECT addr, prefix, isIPAddressInRange(addr, prefix); +WITH CAST(NULL, 'Nullable(String)') as addr, arrayJoin(['::192.168.100.0/118', '::192.168.100.0/120', '::192.168.100.0/128']) as prefix SELECT addr, prefix, isIPAddressInRange(addr, prefix); +WITH toIPv4('192.168.100.1') as addr, arrayJoin(['192.168.100.0/22', '192.168.100.0/24', '192.168.100.0/32']) as prefix SELECT addr, prefix, isIPAddressInRange(addr, prefix); +WITH toIPv6('::192.168.100.1') as addr, arrayJoin(['::192.168.100.0/118', '::192.168.100.0/120', '::192.168.100.0/128']) as prefix SELECT addr, prefix, isIPAddressInRange(addr, prefix); +WITH CAST(toIPv4('192.168.100.1'), 'Nullable(IPv4)') as addr, arrayJoin(['192.168.100.0/22', '192.168.100.0/24', '192.168.100.0/32']) as prefix SELECT addr, prefix, isIPAddressInRange(addr, prefix); +WITH CAST(NULL, 'Nullable(IPv4)') as addr, arrayJoin(['192.168.100.0/22', '192.168.100.0/24', '192.168.100.0/32']) as prefix SELECT addr, prefix, isIPAddressInRange(addr, prefix); +WITH CAST(toIPv6('::192.168.100.1'), 'Nullable(IPv6)') as addr, arrayJoin(['::192.168.100.0/118', '::192.168.100.0/120', '::192.168.100.0/128']) as prefix SELECT addr, prefix, isIPAddressInRange(addr, prefix); +WITH CAST(NULL, 'Nullable(IPv6)') as addr, arrayJoin(['::192.168.100.0/118', '::192.168.100.0/120', '::192.168.100.0/128']) as prefix SELECT addr, prefix, isIPAddressInRange(addr, prefix); + +SELECT '# Invocation with non-constants'; + +WITH arrayJoin([CAST('192.168.100.1', 'Nullable(String)'), CAST('192.168.103.255', 'Nullable(String)')]) as addr, arrayJoin(['192.168.100.0/22', '192.168.100.0/24']) as prefix SELECT addr, prefix, isIPAddressInRange(addr, prefix); +WITH arrayJoin([CAST('::192.168.100.1', 'Nullable(String)'), CAST('::192.168.103.255', 'Nullable(String)')]) as addr, arrayJoin(['::192.168.100.0/118', '::192.168.100.0/120']) as prefix SELECT addr, prefix, isIPAddressInRange(addr, prefix); + +WITH arrayJoin([toIPv4('192.168.100.1'), toIPv4('192.168.103.255')]) as addr, arrayJoin(['192.168.100.0/22', '192.168.100.0/24']) as prefix SELECT addr, prefix, isIPAddressInRange(addr, prefix); +WITH arrayJoin([toIPv6('::192.168.100.1'), toIPv6('::192.168.103.255')]) as addr, arrayJoin(['::192.168.100.0/118', '::192.168.100.0/120']) as prefix SELECT addr, prefix, isIPAddressInRange(addr, prefix); +WITH arrayJoin([CAST(toIPv4('192.168.100.1'), 'Nullable(IPv4)'), CAST(toIPv4('192.168.103.255'), 'Nullable(IPv4)')]) as addr, arrayJoin(['192.168.100.0/22', '192.168.100.0/24']) as prefix SELECT addr, prefix, isIPAddressInRange(addr, prefix); +WITH arrayJoin([CAST(toIPv6('::192.168.100.1'), 'Nullable(IPv6)'), CAST(toIPv6('::192.168.103.255'), 'Nullable(IPv6)')]) as addr, arrayJoin(['::192.168.100.0/118', '::192.168.100.0/120']) as prefix SELECT addr, prefix, isIPAddressInRange(addr, prefix); + +SELECT '# Check with dense table'; + +DROP TABLE IF EXISTS test_data_2; +CREATE TABLE test_data_2 (cidr String) ENGINE = Memory; +INSERT INTO test_data_2 +SELECT + IPv4NumToString(IPv4CIDRToRange(IPv4StringToNum('255.255.255.255'), toUInt8(number)).1) || '/' || toString(number) AS cidr +FROM system.numbers LIMIT 33; + +SELECT sum(isIPAddressInRange(CAST(NULL, 'Nullable(String)'), cidr)) == 0 FROM test_data_2; + +SELECT sum(isIPAddressInRange(toIPv4('0.0.0.0'), cidr)) == 1 FROM test_data_2; +SELECT sum(isIPAddressInRange(toIPv4('127.0.0.0'), cidr)) == 1 FROM test_data_2; +SELECT sum(isIPAddressInRange(toIPv4('128.0.0.0'), cidr)) == 2 FROM test_data_2; +SELECT sum(isIPAddressInRange(toIPv4('255.0.0.0'), cidr)) == 9 FROM test_data_2; +SELECT sum(isIPAddressInRange(toIPv4('255.0.0.1'), cidr)) == 9 FROM test_data_2; +SELECT sum(isIPAddressInRange(toIPv4('255.0.0.255'), cidr)) == 9 FROM test_data_2; +SELECT sum(isIPAddressInRange(toIPv4('255.255.255.255'), cidr)) == 33 FROM test_data_2; +SELECT sum(isIPAddressInRange(toIPv4('255.255.255.254'), cidr)) == 32 FROM test_data_2; +SELECT sum(isIPAddressInRange(CAST(NULL, 'Nullable(IPv4)'), cidr)) == 0 FROM test_data_2; + +DROP TABLE IF EXISTS test_data_2; + +SELECT '# Mismatching IP versions is not an error.'; + +SELECT isIPAddressInRange(toIPv4('127.0.0.1'), 'ffff::/16'); +SELECT isIPAddressInRange(toIPv4('127.0.0.1'), '::127.0.0.1/128'); +SELECT isIPAddressInRange(CAST(toIPv4('127.0.0.1'), 'Nullable(IPv4)'), 'ffff::/16'); +SELECT isIPAddressInRange(CAST(toIPv4('127.0.0.1'), 'Nullable(IPv4)'), '::127.0.0.1/128'); +SELECT isIPAddressInRange(CAST(NULL, 'Nullable(IPv4)'), '::127.0.0.1/128'); +SELECT isIPAddressInRange(toIPv6('::1'), '127.0.0.0/8'); +SELECT isIPAddressInRange(toIPv6('::127.0.0.1'), '127.0.0.1/32'); +SELECT isIPAddressInRange(CAST(toIPv6('::1'), 'Nullable(IPv6)'), '127.0.0.0/8'); +SELECT isIPAddressInRange(CAST(toIPv6('::127.0.0.1'), 'Nullable(IPv6)'), '127.0.0.1/32'); +SELECT isIPAddressInRange(CAST(NULL, 'Nullable(IPv6)'), '127.0.0.1/32'); \ No newline at end of file diff --git a/parser/testdata/01774_tuple_null_in/ast.json b/parser/testdata/01774_tuple_null_in/ast.json new file mode 100644 index 000000000..257ba9ea0 --- /dev/null +++ b/parser/testdata/01774_tuple_null_in/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function in (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Tuple_(NULL, NULL)" + }, + { + "explain": " Literal Tuple_(Tuple_(NULL, UInt64_0), Tuple_(UInt64_3, UInt64_1), Tuple_(UInt64_3, UInt64_2), Tuple_(UInt64_8, UInt64_0), Tuple_(NULL, NULL))" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001059484, + "rows_read": 8, + "bytes_read": 421 + } +} diff --git a/parser/testdata/01774_tuple_null_in/metadata.json b/parser/testdata/01774_tuple_null_in/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01774_tuple_null_in/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01774_tuple_null_in/query.sql b/parser/testdata/01774_tuple_null_in/query.sql new file mode 100644 index 000000000..f08e1a067 --- /dev/null +++ b/parser/testdata/01774_tuple_null_in/query.sql @@ -0,0 +1 @@ +SELECT (NULL, NULL) IN ((NULL, 0), (3, 1), (3, 2), (8, 0), (NULL, NULL)); diff --git a/parser/testdata/01776_decrypt_aead_size_check/ast.json b/parser/testdata/01776_decrypt_aead_size_check/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01776_decrypt_aead_size_check/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01776_decrypt_aead_size_check/metadata.json b/parser/testdata/01776_decrypt_aead_size_check/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01776_decrypt_aead_size_check/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01776_decrypt_aead_size_check/query.sql b/parser/testdata/01776_decrypt_aead_size_check/query.sql new file mode 100644 index 000000000..75834e25a --- /dev/null +++ b/parser/testdata/01776_decrypt_aead_size_check/query.sql @@ -0,0 +1,4 @@ +-- Tags: no-fasttest +-- Tag no-fasttest: Depends on OpenSSL + +SELECT decrypt('aes-128-gcm', 'text', 'key', 'IV'); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/01777_map_populate_series_ubsan/ast.json b/parser/testdata/01777_map_populate_series_ubsan/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01777_map_populate_series_ubsan/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01777_map_populate_series_ubsan/metadata.json b/parser/testdata/01777_map_populate_series_ubsan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01777_map_populate_series_ubsan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01777_map_populate_series_ubsan/query.sql b/parser/testdata/01777_map_populate_series_ubsan/query.sql new file mode 100644 index 000000000..241b863d1 --- /dev/null +++ b/parser/testdata/01777_map_populate_series_ubsan/query.sql @@ -0,0 +1,2 @@ +-- Should correctly throw exception about overflow: +SELECT mapPopulateSeries([-9223372036854775808, toUInt32(2)], [toUInt32(1023), -1]); -- { serverError TOO_LARGE_ARRAY_SIZE } diff --git a/parser/testdata/01778_hierarchical_dictionaries/ast.json b/parser/testdata/01778_hierarchical_dictionaries/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01778_hierarchical_dictionaries/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01778_hierarchical_dictionaries/metadata.json b/parser/testdata/01778_hierarchical_dictionaries/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01778_hierarchical_dictionaries/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01778_hierarchical_dictionaries/query.sql b/parser/testdata/01778_hierarchical_dictionaries/query.sql new file mode 100644 index 000000000..8e5f68c2c --- /dev/null +++ b/parser/testdata/01778_hierarchical_dictionaries/query.sql @@ -0,0 +1,97 @@ +-- Tags: no-parallel + +DROP DATABASE IF EXISTS 01778_db; +CREATE DATABASE 01778_db; + +CREATE TABLE 01778_db.hierarchy_source_table (id UInt64, parent_id UInt64) ENGINE = TinyLog; +INSERT INTO 01778_db.hierarchy_source_table VALUES (1, 0), (2, 1), (3, 1), (4, 2); + +CREATE DICTIONARY 01778_db.hierarchy_flat_dictionary +( + id UInt64, + parent_id UInt64 HIERARCHICAL +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'hierarchy_source_table' DB '01778_db')) +LAYOUT(FLAT()) +LIFETIME(MIN 1 MAX 1000); + +SELECT 'Flat dictionary'; + +SELECT 'Get hierarchy'; +SELECT dictGetHierarchy('01778_db.hierarchy_flat_dictionary', number) FROM system.numbers LIMIT 6; +SELECT 'Get is in hierarchy'; +SELECT dictIsIn('01778_db.hierarchy_flat_dictionary', number, number) FROM system.numbers LIMIT 6; +SELECT 'Get children'; +SELECT dictGetChildren('01778_db.hierarchy_flat_dictionary', number) FROM system.numbers LIMIT 6; +SELECT 'Get all descendants'; +SELECT dictGetDescendants('01778_db.hierarchy_flat_dictionary', number) FROM system.numbers LIMIT 6; +SELECT 'Get descendants at first level'; +SELECT dictGetDescendants('01778_db.hierarchy_flat_dictionary', number, 1) FROM system.numbers LIMIT 6; + +DROP DICTIONARY 01778_db.hierarchy_flat_dictionary; + +CREATE DICTIONARY 01778_db.hierarchy_hashed_dictionary +( + id UInt64, + parent_id UInt64 HIERARCHICAL +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'hierarchy_source_table' DB '01778_db')) +LAYOUT(HASHED()) +LIFETIME(MIN 1 MAX 1000); + +SELECT 'Hashed dictionary'; + +SELECT 'Get hierarchy'; +SELECT dictGetHierarchy('01778_db.hierarchy_hashed_dictionary', number) FROM system.numbers LIMIT 6; +SELECT 'Get is in hierarchy'; +SELECT dictIsIn('01778_db.hierarchy_hashed_dictionary', number, number) FROM system.numbers LIMIT 6; +SELECT 'Get children'; +SELECT dictGetChildren('01778_db.hierarchy_hashed_dictionary', number) FROM system.numbers LIMIT 6; +SELECT 'Get all descendants'; +SELECT dictGetDescendants('01778_db.hierarchy_hashed_dictionary', number) FROM system.numbers LIMIT 6; +SELECT 'Get descendants at first level'; +SELECT dictGetDescendants('01778_db.hierarchy_hashed_dictionary', number, 1) FROM system.numbers LIMIT 6; + +DROP DICTIONARY 01778_db.hierarchy_hashed_dictionary; + +CREATE DICTIONARY 01778_db.hierarchy_cache_dictionary +( + id UInt64, + parent_id UInt64 HIERARCHICAL +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'hierarchy_source_table' DB '01778_db')) +LAYOUT(CACHE(SIZE_IN_CELLS 10)) +LIFETIME(MIN 1 MAX 1000); + +SELECT 'Cache dictionary'; + +SELECT 'Get hierarchy'; +SELECT dictGetHierarchy('01778_db.hierarchy_cache_dictionary', number) FROM system.numbers LIMIT 6; +SELECT 'Get is in hierarchy'; +SELECT dictIsIn('01778_db.hierarchy_cache_dictionary', number, number) FROM system.numbers LIMIT 6; + +DROP DICTIONARY 01778_db.hierarchy_cache_dictionary; + +CREATE DICTIONARY 01778_db.hierarchy_direct_dictionary +( + id UInt64, + parent_id UInt64 HIERARCHICAL +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'hierarchy_source_table' DB '01778_db')) +LAYOUT(DIRECT()); + +SELECT 'Direct dictionary'; + +SELECT 'Get hierarchy'; +SELECT dictGetHierarchy('01778_db.hierarchy_direct_dictionary', number) FROM system.numbers LIMIT 6; +SELECT 'Get is in hierarchy'; +SELECT dictIsIn('01778_db.hierarchy_direct_dictionary', number, number) FROM system.numbers LIMIT 6; + +DROP DICTIONARY 01778_db.hierarchy_direct_dictionary; + +DROP TABLE 01778_db.hierarchy_source_table; +DROP DATABASE 01778_db; diff --git a/parser/testdata/01778_mmap_cache_infra/ast.json b/parser/testdata/01778_mmap_cache_infra/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01778_mmap_cache_infra/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01778_mmap_cache_infra/metadata.json b/parser/testdata/01778_mmap_cache_infra/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01778_mmap_cache_infra/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01778_mmap_cache_infra/query.sql b/parser/testdata/01778_mmap_cache_infra/query.sql new file mode 100644 index 000000000..50fdb6ffb --- /dev/null +++ b/parser/testdata/01778_mmap_cache_infra/query.sql @@ -0,0 +1,8 @@ +-- Tags: no-parallel +-- We check the existence of queries and metrics and don't check the results (a smoke test). + +SYSTEM DROP MMAP CACHE; + +SET system_events_show_zero_values = 1; +SELECT event FROM system.events WHERE event LIKE '%MMap%' ORDER BY event; +SELECT metric FROM system.metrics WHERE metric LIKE '%MMap%' ORDER BY metric; diff --git a/parser/testdata/01778_test_LowCardinality_FixedString_pk/ast.json b/parser/testdata/01778_test_LowCardinality_FixedString_pk/ast.json new file mode 100644 index 000000000..9865834e5 --- /dev/null +++ b/parser/testdata/01778_test_LowCardinality_FixedString_pk/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_01778 (children 1)" + }, + { + "explain": " Identifier test_01778" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001464917, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/01778_test_LowCardinality_FixedString_pk/metadata.json b/parser/testdata/01778_test_LowCardinality_FixedString_pk/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01778_test_LowCardinality_FixedString_pk/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01778_test_LowCardinality_FixedString_pk/query.sql b/parser/testdata/01778_test_LowCardinality_FixedString_pk/query.sql new file mode 100644 index 000000000..78a9b35a4 --- /dev/null +++ b/parser/testdata/01778_test_LowCardinality_FixedString_pk/query.sql @@ -0,0 +1,22 @@ +DROP TABLE IF EXISTS test_01778; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE test_01778 +( + `key` LowCardinality(FixedString(3)), + `d` date +) +ENGINE = MergeTree(d, key, 8192); + + +INSERT INTO test_01778 SELECT toString(intDiv(number,8000)), today() FROM numbers(100000); +INSERT INTO test_01778 SELECT toString('xxx'), today() FROM numbers(100); + +SELECT count() FROM test_01778 WHERE key = 'xxx'; + +SELECT count() FROM test_01778 WHERE key = toFixedString('xxx', 3); + +SELECT count() FROM test_01778 WHERE toString(key) = 'xxx'; + +DROP TABLE test_01778; + diff --git a/parser/testdata/01778_where_with_column_name/ast.json b/parser/testdata/01778_where_with_column_name/ast.json new file mode 100644 index 000000000..cc75dd148 --- /dev/null +++ b/parser/testdata/01778_where_with_column_name/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery ttt01778 (children 1)" + }, + { + "explain": " Identifier ttt01778" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001267477, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/01778_where_with_column_name/metadata.json b/parser/testdata/01778_where_with_column_name/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01778_where_with_column_name/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01778_where_with_column_name/query.sql b/parser/testdata/01778_where_with_column_name/query.sql new file mode 100644 index 000000000..effde87f0 --- /dev/null +++ b/parser/testdata/01778_where_with_column_name/query.sql @@ -0,0 +1,5 @@ +DROP TABLE IF EXISTS ttt01778; +CREATE TABLE ttt01778 (`1` String, `2` INT) ENGINE = MergeTree() ORDER BY tuple(); +INSERT INTO ttt01778 values('1',1),('2',2),('3',3); +select * from ttt01778 where 1=2; -- no server error +DROP TABLE ttt01778; diff --git a/parser/testdata/01779_quantile_deterministic_msan/ast.json b/parser/testdata/01779_quantile_deterministic_msan/ast.json new file mode 100644 index 000000000..33bf48ea1 --- /dev/null +++ b/parser/testdata/01779_quantile_deterministic_msan/ast.json @@ -0,0 +1,82 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function cityHash64 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function quantileDeterministicState (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function sipHash64 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_8193" + } + ], + + "rows": 20, + + "statistics": + { + "elapsed": 0.001279377, + "rows_read": 20, + "bytes_read": 849 + } +} diff --git a/parser/testdata/01779_quantile_deterministic_msan/metadata.json b/parser/testdata/01779_quantile_deterministic_msan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01779_quantile_deterministic_msan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01779_quantile_deterministic_msan/query.sql b/parser/testdata/01779_quantile_deterministic_msan/query.sql new file mode 100644 index 000000000..d5080cfa0 --- /dev/null +++ b/parser/testdata/01779_quantile_deterministic_msan/query.sql @@ -0,0 +1,3 @@ +SELECT cityHash64(toString(quantileDeterministicState(number, sipHash64(number)))) FROM numbers(8193); +-- https://github.com/ClickHouse/ClickHouse/issues/80862 +SELECT hex(quantileDeterministicState(1,1)); diff --git a/parser/testdata/01780_clickhouse_dictionary_source_loop/ast.json b/parser/testdata/01780_clickhouse_dictionary_source_loop/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01780_clickhouse_dictionary_source_loop/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01780_clickhouse_dictionary_source_loop/metadata.json b/parser/testdata/01780_clickhouse_dictionary_source_loop/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01780_clickhouse_dictionary_source_loop/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01780_clickhouse_dictionary_source_loop/query.sql b/parser/testdata/01780_clickhouse_dictionary_source_loop/query.sql new file mode 100644 index 000000000..3ebc85c47 --- /dev/null +++ b/parser/testdata/01780_clickhouse_dictionary_source_loop/query.sql @@ -0,0 +1,55 @@ +-- Tags: no-parallel + +DROP DATABASE IF EXISTS 01780_db; +CREATE DATABASE 01780_db; + +DROP DICTIONARY IF EXISTS dict1; +CREATE DICTIONARY dict1 +( + id UInt64, + value String +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'dict1')) +LAYOUT(DIRECT()); + +SELECT * FROM dict1; --{serverError BAD_ARGUMENTS} + +DROP DICTIONARY dict1; + +DROP DICTIONARY IF EXISTS dict2; +CREATE DICTIONARY 01780_db.dict2 +( + id UInt64, + value String +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() DATABASE '01780_db' TABLE 'dict2')) +LAYOUT(DIRECT()); + +SELECT * FROM 01780_db.dict2; --{serverError BAD_ARGUMENTS} +DROP DICTIONARY 01780_db.dict2; + +DROP TABLE IF EXISTS 01780_db.dict3_source; +CREATE TABLE 01780_db.dict3_source +( + id UInt64, + value String +) ENGINE = TinyLog; + +INSERT INTO 01780_db.dict3_source VALUES (1, '1'), (2, '2'), (3, '3'); + +CREATE DICTIONARY 01780_db.dict3 +( + id UInt64, + value String +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'dict3_source' DATABASE '01780_db')) +LAYOUT(DIRECT()); + +SELECT * FROM 01780_db.dict3; + +DROP DICTIONARY 01780_db.dict3; + +DROP DATABASE 01780_db; diff --git a/parser/testdata/01780_column_sparse/ast.json b/parser/testdata/01780_column_sparse/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01780_column_sparse/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01780_column_sparse/metadata.json b/parser/testdata/01780_column_sparse/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01780_column_sparse/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01780_column_sparse/query.sql b/parser/testdata/01780_column_sparse/query.sql new file mode 100644 index 000000000..8e3c4372d --- /dev/null +++ b/parser/testdata/01780_column_sparse/query.sql @@ -0,0 +1,44 @@ +-- { echo } + +DROP TABLE IF EXISTS t_sparse; +DROP TABLE IF EXISTS t_sparse_1; + +CREATE TABLE t_sparse (id UInt64, u UInt64, s String, arr1 Array(String), arr2 Array(UInt64)) +ENGINE = MergeTree ORDER BY tuple() +SETTINGS ratio_of_defaults_for_sparse_serialization = 0.1; + +INSERT INTO t_sparse SELECT + number, + if (number % 10 = 0, number, 0), + if (number % 5 = 0, toString(number), ''), + if (number % 7 = 0, arrayMap(x -> toString(x), range(number % 10)), []), + if (number % 12 = 0, range(number % 10), []) +FROM numbers (200); + +SELECT column, serialization_kind FROM system.parts_columns +WHERE table = 't_sparse' AND database = currentDatabase() +ORDER BY column; + +SELECT * FROM t_sparse WHERE u != 0 ORDER BY id; +SELECT * FROM t_sparse WHERE s != '' ORDER BY id; +SELECT * FROM t_sparse WHERE arr1 != [] ORDER BY id; +SELECT * FROM t_sparse WHERE arr2 != [] ORDER BY id; + +SELECT sum(u) FROM t_sparse; +SELECT id % 7, sum(u) FROM t_sparse GROUP BY id % 7 ORDER BY id % 7; + +SELECT arrayFilter(x -> x % 2 = 1, arr2) FROM t_sparse WHERE arr2 != [] LIMIT 5; + +CREATE TABLE t_sparse_1 (id UInt64, v Int64) +ENGINE = MergeTree ORDER BY tuple() +SETTINGS ratio_of_defaults_for_sparse_serialization = 0; + +INSERT INTO t_sparse_1 VALUES (1, 6), (2, 1), (3, 0), (4, -1), (5, 0), (6, 0), (7, -2), (8, 0), (9, 0), (10, 4), (11, 0); + +SELECT * FROM t_sparse_1 ORDER BY v, id; +SELECT * FROM t_sparse_1 ORDER BY v DESC, id; +SELECT * FROM t_sparse_1 ORDER BY v, id LIMIT 5; +SELECT * FROM t_sparse_1 ORDER BY v DESC, id LIMIT 5; + +DROP TABLE t_sparse; +DROP TABLE t_sparse_1; diff --git a/parser/testdata/01780_column_sparse_alter/ast.json b/parser/testdata/01780_column_sparse_alter/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01780_column_sparse_alter/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01780_column_sparse_alter/metadata.json b/parser/testdata/01780_column_sparse_alter/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01780_column_sparse_alter/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01780_column_sparse_alter/query.sql b/parser/testdata/01780_column_sparse_alter/query.sql new file mode 100644 index 000000000..f33573e50 --- /dev/null +++ b/parser/testdata/01780_column_sparse_alter/query.sql @@ -0,0 +1,32 @@ + +SET mutations_sync = 2; + +DROP TABLE IF EXISTS t_sparse_alter; + +CREATE TABLE t_sparse_alter (id UInt64, u UInt64, s String) +ENGINE = MergeTree ORDER BY id +SETTINGS ratio_of_defaults_for_sparse_serialization = 0.5; +INSERT INTO t_sparse_alter SELECT + number, + if (number % 11 = 0, number, 0), + if (number % 13 = 0, toString(number), '') +FROM numbers(2000); + +SELECT column, serialization_kind FROM system.parts_columns WHERE database = currentDatabase() AND table = 't_sparse_alter' AND active ORDER BY column; + +SELECT uniqExact(u), uniqExact(s) FROM t_sparse_alter; + +ALTER TABLE t_sparse_alter DROP COLUMN s, RENAME COLUMN u TO t; +ALTER TABLE t_sparse_alter MODIFY COLUMN t UInt16; + +SELECT column, serialization_kind FROM system.parts_columns WHERE database = currentDatabase() AND table = 't_sparse_alter' AND active ORDER BY column; + +SELECT uniqExact(t) FROM t_sparse_alter; + +DETACH TABLE t_sparse_alter; +ATTACH TABLE t_sparse_alter; + +SELECT column, serialization_kind FROM system.parts_columns WHERE database = currentDatabase() AND table = 't_sparse_alter' AND active ORDER BY column; +SELECT uniqExact(t) FROM t_sparse_alter; + +DROP TABLE t_sparse_alter; diff --git a/parser/testdata/01780_column_sparse_distinct/ast.json b/parser/testdata/01780_column_sparse_distinct/ast.json new file mode 100644 index 000000000..aeddfed3b --- /dev/null +++ b/parser/testdata/01780_column_sparse_distinct/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001411413, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01780_column_sparse_distinct/metadata.json b/parser/testdata/01780_column_sparse_distinct/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01780_column_sparse_distinct/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01780_column_sparse_distinct/query.sql b/parser/testdata/01780_column_sparse_distinct/query.sql new file mode 100644 index 000000000..a0735e38f --- /dev/null +++ b/parser/testdata/01780_column_sparse_distinct/query.sql @@ -0,0 +1,22 @@ +SET optimize_trivial_insert_select = 1; + +DROP TABLE IF EXISTS t_sparse_distinct; + +CREATE TABLE t_sparse_distinct (id UInt32, v UInt64) +ENGINE = MergeTree +ORDER BY id +SETTINGS ratio_of_defaults_for_sparse_serialization = 0.9; + +SYSTEM STOP MERGES t_sparse_distinct; + +INSERT INTO t_sparse_distinct SELECT number, number % 6 FROM numbers(100000); +INSERT INTO t_sparse_distinct SELECT number, number % 100 = 0 FROM numbers(100000); + +SELECT name, column, serialization_kind +FROM system.parts_columns +WHERE table = 't_sparse_distinct' AND database = currentDatabase() AND column = 'v' +ORDER BY name; + +SELECT DISTINCT v FROM t_sparse_distinct ORDER BY v; + +DROP TABLE t_sparse_distinct; diff --git a/parser/testdata/01780_column_sparse_filter/ast.json b/parser/testdata/01780_column_sparse_filter/ast.json new file mode 100644 index 000000000..e857af799 --- /dev/null +++ b/parser/testdata/01780_column_sparse_filter/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001192505, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01780_column_sparse_filter/metadata.json b/parser/testdata/01780_column_sparse_filter/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01780_column_sparse_filter/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01780_column_sparse_filter/query.sql b/parser/testdata/01780_column_sparse_filter/query.sql new file mode 100644 index 000000000..245c7c121 --- /dev/null +++ b/parser/testdata/01780_column_sparse_filter/query.sql @@ -0,0 +1,35 @@ +SET optimize_trivial_insert_select = 1; + +DROP TABLE IF EXISTS t_sparse; + +CREATE TABLE t_sparse (id UInt64, u UInt64, s String) +ENGINE = MergeTree ORDER BY id +SETTINGS ratio_of_defaults_for_sparse_serialization = 0.9, index_granularity = 8192, index_granularity_bytes = '10Mi'; + +INSERT INTO t_sparse SELECT + number, + if (number % 20 = 0, number, 0), + if (number % 50 = 0, toString(number), '') +FROM numbers(1, 100000); + +SELECT column, serialization_kind FROM system.parts_columns +WHERE table = 't_sparse' AND database = currentDatabase() +ORDER BY column, serialization_kind; + +SELECT count() FROM t_sparse WHERE u > 0; +SELECT count() FROM t_sparse WHERE notEmpty(s); + +SYSTEM STOP MERGES t_sparse; + +INSERT INTO t_sparse SELECT + number, number, toString(number) +FROM numbers (1, 100000); + +SELECT column, serialization_kind FROM system.parts_columns +WHERE table = 't_sparse' AND database = currentDatabase() +ORDER BY column, serialization_kind; + +SELECT count() FROM t_sparse WHERE u > 0; +SELECT count() FROM t_sparse WHERE notEmpty(s); + +DROP TABLE t_sparse; diff --git a/parser/testdata/01780_column_sparse_full/ast.json b/parser/testdata/01780_column_sparse_full/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01780_column_sparse_full/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01780_column_sparse_full/metadata.json b/parser/testdata/01780_column_sparse_full/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01780_column_sparse_full/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01780_column_sparse_full/query.sql b/parser/testdata/01780_column_sparse_full/query.sql new file mode 100644 index 000000000..ad926fb70 --- /dev/null +++ b/parser/testdata/01780_column_sparse_full/query.sql @@ -0,0 +1,108 @@ +-- This test checks, that common SQL operations work +-- with mixed columns (sparse and full) in table. + +DROP TABLE IF EXISTS t_sparse_full; + +CREATE TABLE t_sparse_full (id UInt64, u UInt64, s String) +ENGINE = MergeTree ORDER BY id +SETTINGS index_granularity = 32, + index_granularity_bytes = '10Mi', + ratio_of_defaults_for_sparse_serialization = 0.1, + enable_block_number_column = 0, + enable_block_offset_column = 0; + +SYSTEM STOP MERGES t_sparse_full; + +INSERT INTO t_sparse_full +SELECT + number, + if (number % 10 = 0, number, 0), + if (number % 7 = 0, toString(number), '') +FROM numbers(1000); + +INSERT INTO t_sparse_full +SELECT + number, + number, + toString(number) +FROM numbers(500); + +SELECT name, column, serialization_kind +FROM system.parts_columns WHERE table = 't_sparse_full' AND database = currentDatabase() AND active +ORDER BY name, column; + +SELECT id, u FROM t_sparse_full ORDER BY id, u LIMIT 4; +SELECT '======'; +SELECT id, u FROM t_sparse_full ORDER BY id, u LIMIT 4 SETTINGS optimize_read_in_order = 0; +SELECT '======'; +SELECT id, u, s FROM t_sparse_full ORDER BY u DESC LIMIT 3; +SELECT '======'; +SELECT id, u, s FROM t_sparse_full WHERE u != 0 ORDER BY u DESC LIMIT 3; +SELECT '======'; +SELECT id % 3 AS k, sum(u) FROM t_sparse_full WHERE u != 0 GROUP BY k ORDER BY k; +SELECT '======'; +SELECT uniqExact(u) FROM t_sparse_full WHERE s != ''; +SELECT '======'; +SELECT toUInt32(s) % 5 AS k, groupUniqArray(u % 4) FROM t_sparse_full WHERE s != '' GROUP BY k ORDER BY k; +SELECT max(range(id % 10)[u]) FROM t_sparse_full; +SELECT '======'; +SELECT id, u, s FROM remote('127.0.0.{1,2}', currentDatabase(), t_sparse_full) ORDER BY id, u, s LIMIT 5; +SELECT '======'; +SELECT sum(u) FROM t_sparse_full GROUP BY id % 3 AS k WITH TOTALS ORDER BY k; +SELECT '======'; +SELECT sum(u) AS value FROM t_sparse_full GROUP BY id % 3 AS k WITH ROLLUP ORDER BY value; +SELECT '======'; +SELECT sum(u) AS value FROM t_sparse_full GROUP BY id % 3 AS k WITH CUBE ORDER BY value; +SELECT '======'; +SELECT sum(id) FROM t_sparse_full GROUP BY u % 3 AS k ORDER BY k; +SELECT '======'; +SELECT count() FROM t_sparse_full WHERE u % 4 = 0; +SELECT '======'; +SELECT count() FROM t_sparse_full WHERE u IN (SELECT u FROM t_sparse_full WHERE id % 4 = 2); +SELECT '======'; +SELECT DISTINCT u FROM t_sparse_full ORDER BY id LIMIT 5; + +SELECT '======'; + +SELECT id, u, s FROM t_sparse_full INNER JOIN +( + SELECT number * 3 AS u FROM numbers(10) +) AS t1 USING(u) ORDER BY id, u, s LIMIT 5; + +SELECT '======'; + +SELECT id, u, s FROM t_sparse_full FULL JOIN +( + SELECT number * 3 AS u FROM numbers(10) +) AS t1 USING(u) ORDER BY id, u, s LIMIT 5; + +SELECT '======'; + +SELECT id, u, s FROM (SELECT number * 2 AS u FROM numbers(10)) AS t1 +INNER JOIN t_sparse_full USING(u) ORDER BY id, u, s LIMIT 5; + +SELECT '======'; + +SELECT id, u, s FROM (SELECT number * 2 AS u FROM numbers(10)) AS t1 +FULL JOIN t_sparse_full USING(u) ORDER BY id, u, s LIMIT 5; + +SELECT '======'; + +SELECT id, u, s FROM (SELECT u FROM t_sparse_full) AS t1 +FULL JOIN t_sparse_full USING(u) ORDER BY id, u, s LIMIT 5; + +SYSTEM START MERGES t_sparse_full; + +OPTIMIZE TABLE t_sparse_full FINAL; + +SELECT '======'; + +SELECT column, serialization_kind +FROM system.parts_columns WHERE table = 't_sparse_full' AND database = currentDatabase() AND active +ORDER BY name, column; + +SELECT '======'; + +SELECT id, u, s FROM t_sparse_full ORDER BY u DESC LIMIT 3; + +DROP TABLE t_sparse_full; diff --git a/parser/testdata/01780_column_sparse_materialize/ast.json b/parser/testdata/01780_column_sparse_materialize/ast.json new file mode 100644 index 000000000..ca72d4b38 --- /dev/null +++ b/parser/testdata/01780_column_sparse_materialize/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery sparse_t (children 1)" + }, + { + "explain": " Identifier sparse_t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001235739, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/01780_column_sparse_materialize/metadata.json b/parser/testdata/01780_column_sparse_materialize/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01780_column_sparse_materialize/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01780_column_sparse_materialize/query.sql b/parser/testdata/01780_column_sparse_materialize/query.sql new file mode 100644 index 000000000..a53ea140f --- /dev/null +++ b/parser/testdata/01780_column_sparse_materialize/query.sql @@ -0,0 +1,52 @@ +DROP TABLE IF EXISTS sparse_t; + +CREATE TABLE sparse_t ( + id UInt64, + u UInt64, + s String, + arr1 Array(String), + arr2 Array(UInt64), + t Tuple(a UInt64, s String)) +ENGINE = MergeTree ORDER BY tuple() +SETTINGS ratio_of_defaults_for_sparse_serialization = 0.1; + +INSERT INTO sparse_t SELECT + number, + if (number % 2 = 0, number, 0), + if (number % 2 = 0, toString(number), ''), + if (number % 2 = 0, [''], []), + if (number % 2 = 0, [0], []), + (if (number % 2 = 0, number, 0), '') +FROM numbers(2); + +-- { echoOn } + +SELECT dumpColumnStructure(id) FROM sparse_t; +SELECT dumpColumnStructure(materialize(id)) FROM sparse_t; + +SELECT dumpColumnStructure(u) FROM sparse_t; +SELECT dumpColumnStructure(materialize(u)) FROM sparse_t; + +SELECT dumpColumnStructure(s) FROM sparse_t; +SELECT dumpColumnStructure(materialize(s)) FROM sparse_t; + +SELECT dumpColumnStructure(arr1) FROM sparse_t; +SELECT dumpColumnStructure(materialize(arr1)) FROM sparse_t; + +SELECT dumpColumnStructure(arr2) FROM sparse_t; +SELECT dumpColumnStructure(materialize(arr2)) FROM sparse_t; + +SELECT dumpColumnStructure(t) FROM sparse_t; +SELECT dumpColumnStructure(materialize(t)) FROM sparse_t; + +SELECT dumpColumnStructure(t.a) FROM sparse_t; +SELECT dumpColumnStructure(materialize(t.a)) FROM sparse_t; + +SELECT dumpColumnStructure(t.s) FROM sparse_t; +SELECT dumpColumnStructure(materialize(t.s)) FROM sparse_t; + +-- { echoOff } + + +DROP TABLE IF EXISTS sparse_t +; diff --git a/parser/testdata/01780_column_sparse_pk/ast.json b/parser/testdata/01780_column_sparse_pk/ast.json new file mode 100644 index 000000000..ba27ae765 --- /dev/null +++ b/parser/testdata/01780_column_sparse_pk/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_sparse_pk (children 1)" + }, + { + "explain": " Identifier t_sparse_pk" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001273484, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/01780_column_sparse_pk/metadata.json b/parser/testdata/01780_column_sparse_pk/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01780_column_sparse_pk/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01780_column_sparse_pk/query.sql b/parser/testdata/01780_column_sparse_pk/query.sql new file mode 100644 index 000000000..53a60633d --- /dev/null +++ b/parser/testdata/01780_column_sparse_pk/query.sql @@ -0,0 +1,43 @@ +DROP TABLE IF EXISTS t_sparse_pk; +DROP TABLE IF EXISTS t_full_pk; + +CREATE TABLE t_sparse_pk (k UInt64, s String) +ENGINE = MergeTree ORDER BY k +SETTINGS ratio_of_defaults_for_sparse_serialization = 0.0, index_granularity = 1; + +INSERT INTO t_sparse_pk VALUES (0, 'a'), (0, 'b'), (1, ''), (2, ''), (2, 'e'), (3, 'f'), (4, 'g'); + +SET force_primary_key = 1; + +SELECT k, s FROM t_sparse_pk WHERE k = 2 ORDER BY k, s; +SELECT k, s FROM t_sparse_pk WHERE k = 0 OR k = 3 ORDER BY k, s; + +DROP TABLE IF EXISTS t_sparse_pk; + +CREATE TABLE t_sparse_pk (k UInt64, v UInt64 CODEC(NONE)) +ENGINE = MergeTree ORDER BY k +SETTINGS ratio_of_defaults_for_sparse_serialization = 0.0, index_granularity = 30; + +CREATE TABLE t_full_pk (k UInt64, v UInt64) +ENGINE = MergeTree ORDER BY k +SETTINGS ratio_of_defaults_for_sparse_serialization = 1.1, index_granularity = 30; + +INSERT INTO t_sparse_pk SELECT number % 10, number % 4 = 0 FROM numbers(1000); +INSERT INTO t_full_pk SELECT number % 10, number % 4 = 0 FROM numbers(1000); + +INSERT INTO t_sparse_pk SELECT number % 10, number % 6 = 0 FROM numbers(1000); +INSERT INTO t_full_pk SELECT number % 10, number % 6 = 0 FROM numbers(1000); + +SELECT count(v), sum(v) FROM t_sparse_pk WHERE k = 0; +SELECT count(v), sum(v) FROM t_full_pk WHERE k = 0; + +SELECT count(v), sum(v) FROM t_sparse_pk WHERE k = 0 OR k = 3 OR k = 7 OR k = 8; +SELECT count(v), sum(v) FROM t_full_pk WHERE k = 0 OR k = 3 OR k = 7 OR k = 8; + +SET force_primary_key = 0; + +SELECT (k = NULL) OR (k = 1000) FROM t_sparse_pk LIMIT 3; +SELECT range(k) FROM t_sparse_pk ORDER BY k LIMIT 3; + +DROP TABLE IF EXISTS t_sparse_pk; +DROP TABLE IF EXISTS t_full_pk; diff --git a/parser/testdata/01780_column_sparse_tuple/ast.json b/parser/testdata/01780_column_sparse_tuple/ast.json new file mode 100644 index 000000000..b8a9d3259 --- /dev/null +++ b/parser/testdata/01780_column_sparse_tuple/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery sparse_tuple (children 1)" + }, + { + "explain": " Identifier sparse_tuple" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001165927, + "rows_read": 2, + "bytes_read": 76 + } +} diff --git a/parser/testdata/01780_column_sparse_tuple/metadata.json b/parser/testdata/01780_column_sparse_tuple/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01780_column_sparse_tuple/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01780_column_sparse_tuple/query.sql b/parser/testdata/01780_column_sparse_tuple/query.sql new file mode 100644 index 000000000..d5860fa7f --- /dev/null +++ b/parser/testdata/01780_column_sparse_tuple/query.sql @@ -0,0 +1,53 @@ +DROP TABLE IF EXISTS sparse_tuple; + +CREATE TABLE sparse_tuple (id UInt64, t Tuple(a UInt64, s String)) +ENGINE = MergeTree ORDER BY tuple() +SETTINGS ratio_of_defaults_for_sparse_serialization = 0.5, serialization_info_version = 'basic'; + +INSERT INTO sparse_tuple SELECT number, (if (number % 20 = 0, number, 0), repeat('a', number % 10 + 1)) FROM numbers(1000); + +SELECT column, subcolumns.names, subcolumns.types, subcolumns.serializations +FROM system.parts_columns +WHERE table = 'sparse_tuple' AND database = currentDatabase() +ORDER BY column; + +SELECT t FROM sparse_tuple ORDER BY id LIMIT 5; +SELECT t FROM sparse_tuple WHERE t.a != 0 ORDER BY id LIMIT 5; +SELECT t FROM sparse_tuple WHERE t.a != 0 ORDER BY t.a LIMIT 5; + +SELECT t.a FROM sparse_tuple ORDER BY id LIMIT 5; +SELECT t.a FROM sparse_tuple WHERE t.a != 0 ORDER BY id LIMIT 5; +SELECT t.a FROM sparse_tuple WHERE t.a != 0 ORDER BY t.a LIMIT 5; + +SELECT t.s FROM sparse_tuple ORDER BY id LIMIT 5; +SELECT t.s FROM sparse_tuple WHERE t.a != 0 ORDER BY id LIMIT 5; + +DROP TABLE IF EXISTS sparse_tuple; + +CREATE TABLE sparse_tuple (id UInt64, t Tuple(a UInt64, b Tuple(u UInt32, s String))) +ENGINE = MergeTree ORDER BY tuple() +SETTINGS ratio_of_defaults_for_sparse_serialization = 0.5, serialization_info_version = 'basic'; + +INSERT INTO sparse_tuple SELECT number, (if (number % 20 = 0, number, 0), (if (number % 15 = 0, number, 0), repeat('a', number % 10 + 1))) FROM numbers(1000); + +SELECT column, subcolumns.names, subcolumns.types, subcolumns.serializations +FROM system.parts_columns +WHERE table = 'sparse_tuple' AND database = currentDatabase() +ORDER BY column; + +SELECT t.a FROM sparse_tuple WHERE t.b.u != 0 ORDER BY id LIMIT 5; + +SELECT t.b.s FROM sparse_tuple ORDER BY id LIMIT 5; +SELECT t.b.s FROM sparse_tuple WHERE t.b.u != 0 ORDER BY id LIMIT 5; + +DETACH TABLE sparse_tuple; +ATTACH TABLE sparse_tuple; + +SELECT column, subcolumns.names, subcolumns.types, subcolumns.serializations +FROM system.parts_columns +WHERE table = 'sparse_tuple' AND database = currentDatabase() +ORDER BY column; + +SELECT t.b.s FROM sparse_tuple WHERE t.b.u != 0 ORDER BY id LIMIT 5; + +DROP TABLE IF EXISTS sparse_tuple; diff --git a/parser/testdata/01780_dict_get_or_null/ast.json b/parser/testdata/01780_dict_get_or_null/ast.json new file mode 100644 index 000000000..a93749998 --- /dev/null +++ b/parser/testdata/01780_dict_get_or_null/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery simple_key_dictionary_source_table (children 1)" + }, + { + "explain": " Identifier simple_key_dictionary_source_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001514789, + "rows_read": 2, + "bytes_read": 120 + } +} diff --git a/parser/testdata/01780_dict_get_or_null/metadata.json b/parser/testdata/01780_dict_get_or_null/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01780_dict_get_or_null/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01780_dict_get_or_null/query.sql b/parser/testdata/01780_dict_get_or_null/query.sql new file mode 100644 index 000000000..f13bcf57d --- /dev/null +++ b/parser/testdata/01780_dict_get_or_null/query.sql @@ -0,0 +1,116 @@ +DROP TABLE IF EXISTS simple_key_dictionary_source_table; +CREATE TABLE simple_key_dictionary_source_table +( + id UInt64, + value String, + value_nullable Nullable(String) +) ENGINE = TinyLog; + +INSERT INTO simple_key_dictionary_source_table VALUES (1, 'First', 'First'); +INSERT INTO simple_key_dictionary_source_table VALUES (2, 'Second', NULL); +INSERT INTO simple_key_dictionary_source_table VALUES (3, 'Third', 'Third'); + +DROP DICTIONARY IF EXISTS simple_key_dictionary; +CREATE DICTIONARY simple_key_dictionary +( + id UInt64, + value String, + value_nullable Nullable(String) +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'simple_key_dictionary_source_table')) +LAYOUT(DIRECT()); + +SELECT 'Simple key dictionary dictGetOrNull'; + +SELECT + number, + dictHas('simple_key_dictionary', number), + dictGetOrNull('simple_key_dictionary', 'value', number), + dictGetOrNull('simple_key_dictionary', 'value_nullable', number), + dictGetOrNull('simple_key_dictionary', ('value', 'value_nullable'), number) +FROM system.numbers LIMIT 5; + +DROP DICTIONARY simple_key_dictionary; +DROP TABLE simple_key_dictionary_source_table; + +DROP TABLE IF EXISTS complex_key_dictionary_source_table; +CREATE TABLE complex_key_dictionary_source_table +( + id UInt64, + id_key String, + value String, + value_nullable Nullable(String) +) ENGINE = TinyLog; + +INSERT INTO complex_key_dictionary_source_table VALUES (1, 'key', 'First', 'First'); +INSERT INTO complex_key_dictionary_source_table VALUES (2, 'key', 'Second', NULL); +INSERT INTO complex_key_dictionary_source_table VALUES (3, 'key', 'Third', 'Third'); + +DROP DICTIONARY IF EXISTS complex_key_dictionary; +CREATE DICTIONARY complex_key_dictionary +( + id UInt64, + id_key String, + value String, + value_nullable Nullable(String) +) +PRIMARY KEY id, id_key +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'complex_key_dictionary_source_table')) +LAYOUT(COMPLEX_KEY_DIRECT()); + +SELECT 'Complex key dictionary dictGetOrNull'; + +SELECT + (number, 'key'), + dictHas('complex_key_dictionary', (number, 'key')), + dictGetOrNull('complex_key_dictionary', 'value', (number, 'key')), + dictGetOrNull('complex_key_dictionary', 'value_nullable', (number, 'key')), + dictGetOrNull('complex_key_dictionary', ('value', 'value_nullable'), (number, 'key')) +FROM system.numbers LIMIT 5; + +DROP DICTIONARY complex_key_dictionary; +DROP TABLE complex_key_dictionary_source_table; + +DROP TABLE IF EXISTS range_key_dictionary_source_table; +CREATE TABLE range_key_dictionary_source_table +( + key UInt64, + start_date Date, + end_date Date, + value String, + value_nullable Nullable(String) +) +ENGINE = TinyLog(); + +INSERT INTO range_key_dictionary_source_table VALUES(1, toDate('2019-05-20'), toDate('2019-05-20'), 'First', 'First'); +INSERT INTO range_key_dictionary_source_table VALUES(2, toDate('2019-05-20'), toDate('2019-05-20'), 'Second', NULL); +INSERT INTO range_key_dictionary_source_table VALUES(3, toDate('2019-05-20'), toDate('2019-05-20'), 'Third', 'Third'); + +DROP DICTIONARY IF EXISTS range_key_dictionary; +CREATE DICTIONARY range_key_dictionary +( + key UInt64, + start_date Date, + end_date Date, + value String, + value_nullable Nullable(String) +) +PRIMARY KEY key +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'range_key_dictionary_source_table')) +LIFETIME(MIN 1 MAX 1000) +LAYOUT(RANGE_HASHED()) +RANGE(MIN start_date MAX end_date); + +SELECT 'Range key dictionary dictGetOrNull'; + +SELECT + (number, toDate('2019-05-20')), + dictHas('range_key_dictionary', number, toDate('2019-05-20')), + dictGetOrNull('range_key_dictionary', 'value', number, toDate('2019-05-20')), + dictGetOrNull('range_key_dictionary', 'value_nullable', number, toDate('2019-05-20')), + dictGetOrNull('range_key_dictionary', ('value', 'value_nullable'), number, toDate('2019-05-20')) +FROM system.numbers LIMIT 5; + +DROP DICTIONARY range_key_dictionary; +DROP TABLE range_key_dictionary_source_table; diff --git a/parser/testdata/01780_range_msan/ast.json b/parser/testdata/01780_range_msan/ast.json new file mode 100644 index 000000000..f0862138b --- /dev/null +++ b/parser/testdata/01780_range_msan/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function range (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toUInt256 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.00117887, + "rows_read": 10, + "bytes_read": 377 + } +} diff --git a/parser/testdata/01780_range_msan/metadata.json b/parser/testdata/01780_range_msan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01780_range_msan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01780_range_msan/query.sql b/parser/testdata/01780_range_msan/query.sql new file mode 100644 index 000000000..7cfdddbfa --- /dev/null +++ b/parser/testdata/01780_range_msan/query.sql @@ -0,0 +1 @@ +SELECT range(toUInt256(1), 1); -- { serverError ILLEGAL_COLUMN } diff --git a/parser/testdata/01781_map_op_ubsan/ast.json b/parser/testdata/01781_map_op_ubsan/ast.json new file mode 100644 index 000000000..b5e5bfa2b --- /dev/null +++ b/parser/testdata/01781_map_op_ubsan/ast.json @@ -0,0 +1,310 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 5)" + }, + { + "explain": " Function toInt32 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toUInt8 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function mapSubtract (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toUInt8 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_256" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toInt32 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Int64_-9223372036854775808" + }, + { + "explain": " Literal UInt64_1025" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toUInt8 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_65535" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toInt16 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Float64_0" + }, + { + "explain": " Literal Int64_-9223372036854775808" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toUInt8 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Int64_-1" + }, + { + "explain": " Function toInt32 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toUInt8 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_9223372036854775807" + }, + { + "explain": " Literal Int64_-1" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toInt32 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_255" + }, + { + "explain": " Literal UInt64_65536" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Function toUInt8 (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Array_[UInt64_2, UInt64_9223372036854775807]" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toFloat32 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '0.0000065536'" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_9223372036854775807" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toUInt8 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1024" + }, + { + "explain": " Literal UInt64_255" + }, + { + "explain": " Function toUInt8 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toInt16 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Int64_-2" + }, + { + "explain": " Literal Array_[NULL]" + } + ], + + "rows": 96, + + "statistics": + { + "elapsed": 0.001706096, + "rows_read": 96, + "bytes_read": 4124 + } +} diff --git a/parser/testdata/01781_map_op_ubsan/metadata.json b/parser/testdata/01781_map_op_ubsan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01781_map_op_ubsan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01781_map_op_ubsan/query.sql b/parser/testdata/01781_map_op_ubsan/query.sql new file mode 100644 index 000000000..adbb5d5a8 --- /dev/null +++ b/parser/testdata/01781_map_op_ubsan/query.sql @@ -0,0 +1 @@ +SELECT toInt32([toUInt8(NULL)], NULL), (mapSubtract(([toUInt8(256), 10], [toInt32(-9223372036854775808), 1025]), ([toUInt8(65535), 0], [toInt16(0.), -9223372036854775808])), [toUInt8(-1), toInt32(([toUInt8(9223372036854775807), -1], [toInt32(255), 65536]), NULL)]), toUInt8(([2, 9223372036854775807], [toFloat32('0.0000065536'), 2]), 9223372036854775807, NULL), ([toUInt8(1024), 255], toUInt8(3), [toInt16(-2)]), [NULL]; diff --git a/parser/testdata/01781_merge_tree_deduplication/ast.json b/parser/testdata/01781_merge_tree_deduplication/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01781_merge_tree_deduplication/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01781_merge_tree_deduplication/metadata.json b/parser/testdata/01781_merge_tree_deduplication/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01781_merge_tree_deduplication/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01781_merge_tree_deduplication/query.sql b/parser/testdata/01781_merge_tree_deduplication/query.sql new file mode 100644 index 000000000..4a148c539 --- /dev/null +++ b/parser/testdata/01781_merge_tree_deduplication/query.sql @@ -0,0 +1,190 @@ +-- Tags: no-replicated-database +-- Tag no-replicated-database: Unsupported type of ALTER query + +DROP TABLE IF EXISTS merge_tree_deduplication; + +CREATE TABLE merge_tree_deduplication +( + key UInt64, + value String, + part UInt8 DEFAULT 77 +) +ENGINE=MergeTree() +ORDER BY key +PARTITION BY part +SETTINGS non_replicated_deduplication_window=3; + +SYSTEM STOP MERGES merge_tree_deduplication; + +INSERT INTO merge_tree_deduplication (key, value) VALUES (1, '1'); + +SELECT key, value FROM merge_tree_deduplication; + +INSERT INTO merge_tree_deduplication (key, value) VALUES (1, '1'); + +SELECT key, value FROM merge_tree_deduplication; + +SELECT '==============='; + +INSERT INTO merge_tree_deduplication (key, value) VALUES (2, '2'); + +INSERT INTO merge_tree_deduplication (key, value) VALUES (3, '3'); + +INSERT INTO merge_tree_deduplication (key, value) VALUES (4, '4'); + +INSERT INTO merge_tree_deduplication (key, value) VALUES (1, '1'); + +SELECT key, value FROM merge_tree_deduplication ORDER BY key; + +SELECT '==============='; + +INSERT INTO merge_tree_deduplication (key, value) VALUES (5, '5'); + +INSERT INTO merge_tree_deduplication (key, value) VALUES (6, '6'); + +INSERT INTO merge_tree_deduplication (key, value) VALUES (7, '7'); + +INSERT INTO merge_tree_deduplication (key, value) VALUES (5, '5'); + +SELECT key, value FROM merge_tree_deduplication ORDER BY key; + +SELECT '==============='; + +INSERT INTO merge_tree_deduplication (key, value) VALUES (8, '8'); + +INSERT INTO merge_tree_deduplication (key, value) VALUES (9, '9'); + +INSERT INTO merge_tree_deduplication (key, value) VALUES (10, '10'); + +INSERT INTO merge_tree_deduplication (key, value) VALUES (11, '11'); + +INSERT INTO merge_tree_deduplication (key, value) VALUES (12, '12'); + +INSERT INTO merge_tree_deduplication (key, value) VALUES (10, '10'); +INSERT INTO merge_tree_deduplication (key, value) VALUES (11, '11'); +INSERT INTO merge_tree_deduplication (key, value) VALUES (12, '12'); + +SELECT key, value FROM merge_tree_deduplication ORDER BY key; + +SELECT '==============='; + +ALTER TABLE merge_tree_deduplication DROP PART '77_9_9_0'; -- some old part + +INSERT INTO merge_tree_deduplication (key, value) VALUES (10, '10'); + +SELECT key, value FROM merge_tree_deduplication WHERE key = 10; + +ALTER TABLE merge_tree_deduplication DROP PART '77_13_13_0'; -- fresh part + +INSERT INTO merge_tree_deduplication (key, value) VALUES (12, '12'); + +SELECT key, value FROM merge_tree_deduplication WHERE key = 12; + +DETACH TABLE merge_tree_deduplication; +ATTACH TABLE merge_tree_deduplication; + +OPTIMIZE TABLE merge_tree_deduplication FINAL; + +INSERT INTO merge_tree_deduplication (key, value) VALUES (11, '11'); -- deduplicated +INSERT INTO merge_tree_deduplication (key, value) VALUES (12, '12'); -- deduplicated + +SELECT '==============='; + +SELECT key, value FROM merge_tree_deduplication ORDER BY key; + +SELECT '==============='; + +INSERT INTO merge_tree_deduplication (key, value, part) VALUES (11, '11', 88); + +ALTER TABLE merge_tree_deduplication DROP PARTITION 77; + +INSERT INTO merge_tree_deduplication (key, value, part) VALUES (11, '11', 88); --deduplicated + +INSERT INTO merge_tree_deduplication (key, value) VALUES (11, '11'); -- not deduplicated +INSERT INTO merge_tree_deduplication (key, value) VALUES (12, '12'); -- not deduplicated + +SELECT part, key, value FROM merge_tree_deduplication ORDER BY key, part; + +-- Alters.... + +ALTER TABLE merge_tree_deduplication MODIFY SETTING non_replicated_deduplication_window = 2; + +SELECT '==============='; + +INSERT INTO merge_tree_deduplication (key, value, part) VALUES (1, '1', 33); +INSERT INTO merge_tree_deduplication (key, value, part) VALUES (2, '2', 33); +INSERT INTO merge_tree_deduplication (key, value, part) VALUES (3, '3', 33); +INSERT INTO merge_tree_deduplication (key, value, part) VALUES (1, '1', 33); + +SELECT * FROM merge_tree_deduplication WHERE part = 33 ORDER BY key; + +SELECT '==============='; + +ALTER TABLE merge_tree_deduplication MODIFY SETTING non_replicated_deduplication_window = 0; + +INSERT INTO merge_tree_deduplication (key, value, part) VALUES (1, '1', 33); +INSERT INTO merge_tree_deduplication (key, value, part) VALUES (1, '1', 33); + +DETACH TABLE merge_tree_deduplication; +ATTACH TABLE merge_tree_deduplication; + +SELECT * FROM merge_tree_deduplication WHERE part = 33 ORDER BY key; + +SELECT '==============='; + +ALTER TABLE merge_tree_deduplication MODIFY SETTING non_replicated_deduplication_window = 3; + +INSERT INTO merge_tree_deduplication (key, value, part) VALUES (1, '1', 33); + +SELECT * FROM merge_tree_deduplication WHERE part = 33 ORDER BY key; + +SELECT '==============='; + +INSERT INTO merge_tree_deduplication (key, value, part) VALUES (1, '1', 44); +INSERT INTO merge_tree_deduplication (key, value, part) VALUES (2, '2', 44); +INSERT INTO merge_tree_deduplication (key, value, part) VALUES (3, '3', 44); +INSERT INTO merge_tree_deduplication (key, value, part) VALUES (1, '1', 44); + +INSERT INTO merge_tree_deduplication (key, value, part) VALUES (4, '4', 44); + +DETACH TABLE merge_tree_deduplication; +ATTACH TABLE merge_tree_deduplication; + +SELECT * FROM merge_tree_deduplication WHERE part = 44 ORDER BY key; + +DROP TABLE IF EXISTS merge_tree_deduplication; + +SELECT '==============='; + +DROP TABLE IF EXISTS merge_tree_no_deduplication; + +CREATE TABLE merge_tree_no_deduplication +( + key UInt64, + value String +) +ENGINE=MergeTree() +ORDER BY key; + +INSERT INTO merge_tree_no_deduplication (key, value) VALUES (1, '1'); +INSERT INTO merge_tree_no_deduplication (key, value) VALUES (1, '1'); + +SELECT * FROM merge_tree_no_deduplication ORDER BY key; + +SELECT '==============='; + +ALTER TABLE merge_tree_no_deduplication MODIFY SETTING non_replicated_deduplication_window = 3; + +INSERT INTO merge_tree_no_deduplication (key, value) VALUES (1, '1'); +INSERT INTO merge_tree_no_deduplication (key, value) VALUES (2, '2'); +INSERT INTO merge_tree_no_deduplication (key, value) VALUES (3, '3'); + +DETACH TABLE merge_tree_no_deduplication; +ATTACH TABLE merge_tree_no_deduplication; + +INSERT INTO merge_tree_no_deduplication (key, value) VALUES (1, '1'); +INSERT INTO merge_tree_no_deduplication (key, value) VALUES (4, '4'); + +SELECT * FROM merge_tree_no_deduplication ORDER BY key; + +DROP TABLE IF EXISTS merge_tree_no_deduplication; diff --git a/parser/testdata/01781_token_extractor_buffer_overflow/ast.json b/parser/testdata/01781_token_extractor_buffer_overflow/ast.json new file mode 100644 index 000000000..96b0d63dd --- /dev/null +++ b/parser/testdata/01781_token_extractor_buffer_overflow/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001194193, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01781_token_extractor_buffer_overflow/metadata.json b/parser/testdata/01781_token_extractor_buffer_overflow/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01781_token_extractor_buffer_overflow/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01781_token_extractor_buffer_overflow/query.sql b/parser/testdata/01781_token_extractor_buffer_overflow/query.sql new file mode 100644 index 000000000..400792df8 --- /dev/null +++ b/parser/testdata/01781_token_extractor_buffer_overflow/query.sql @@ -0,0 +1,10 @@ +SET max_block_size = 10, min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0, max_threads = 20; + +DROP TABLE IF EXISTS bloom_filter; +CREATE TABLE bloom_filter (`id` UInt64, `s` String, INDEX tok_bf (s, lower(s)) TYPE tokenbf_v1(512, 3, 0) GRANULARITY 1) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 8, index_granularity_bytes = '10Mi'; +INSERT INTO bloom_filter SELECT number, 'yyy,uuu' FROM numbers(1024); + +SELECT max(id) FROM bloom_filter WHERE hasToken(s, 'abc'); +SELECT max(id) FROM bloom_filter WHERE hasToken(s, 'abcabcabcabcabcabcabcab\0'); + +DROP TABLE bloom_filter; diff --git a/parser/testdata/01782_field_oom/ast.json b/parser/testdata/01782_field_oom/ast.json new file mode 100644 index 000000000..42ecb6560 --- /dev/null +++ b/parser/testdata/01782_field_oom/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001211603, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01782_field_oom/metadata.json b/parser/testdata/01782_field_oom/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01782_field_oom/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01782_field_oom/query.sql b/parser/testdata/01782_field_oom/query.sql new file mode 100644 index 000000000..acbbac7f5 --- /dev/null +++ b/parser/testdata/01782_field_oom/query.sql @@ -0,0 +1,2 @@ +SET max_memory_usage = '500M'; +SELECT sumMap([number], [number]) FROM system.numbers_mt; -- { serverError MEMORY_LIMIT_EXCEEDED } diff --git a/parser/testdata/01783_merge_engine_join_key_condition/ast.json b/parser/testdata/01783_merge_engine_join_key_condition/ast.json new file mode 100644 index 000000000..4d0308037 --- /dev/null +++ b/parser/testdata/01783_merge_engine_join_key_condition/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery foo (children 1)" + }, + { + "explain": " Identifier foo" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001009797, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/01783_merge_engine_join_key_condition/metadata.json b/parser/testdata/01783_merge_engine_join_key_condition/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01783_merge_engine_join_key_condition/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01783_merge_engine_join_key_condition/query.sql b/parser/testdata/01783_merge_engine_join_key_condition/query.sql new file mode 100644 index 000000000..372c1bd35 --- /dev/null +++ b/parser/testdata/01783_merge_engine_join_key_condition/query.sql @@ -0,0 +1,23 @@ +DROP TABLE IF EXISTS foo; +DROP TABLE IF EXISTS foo_merge; +DROP TABLE IF EXISTS t2; + +CREATE TABLE foo(Id Int32, Val Int32) Engine=MergeTree PARTITION BY Val ORDER BY Id; +INSERT INTO foo SELECT number, number%5 FROM numbers(100000); + +CREATE TABLE foo_merge as foo ENGINE=Merge(currentDatabase(), '^foo'); + +CREATE TABLE t2 (Id Int32, Val Int32, X Int32) Engine=Memory; +INSERT INTO t2 values (4, 3, 4); + +SET force_primary_key = 1, force_index_by_date=1; + +SELECT * FROM foo_merge WHERE Val = 3 AND Id = 3; +SELECT count(), X FROM foo_merge JOIN t2 USING Val WHERE Val = 3 AND Id = 3 AND t2.X == 4 GROUP BY X; +SELECT count(), X FROM foo_merge JOIN t2 USING Val WHERE Val = 3 AND (Id = 3 AND t2.X == 4) GROUP BY X; +SELECT count(), X FROM foo_merge JOIN t2 USING Val WHERE Val = 3 AND Id = 3 GROUP BY X; +SELECT count(), X FROM (SELECT * FROM foo_merge) f JOIN t2 USING Val WHERE Val = 3 AND Id = 3 GROUP BY X; + +DROP TABLE IF EXISTS foo; +DROP TABLE IF EXISTS foo_merge; +DROP TABLE IF EXISTS t2; diff --git a/parser/testdata/01784_parallel_formatting_memory/ast.json b/parser/testdata/01784_parallel_formatting_memory/ast.json new file mode 100644 index 000000000..e9b0bd250 --- /dev/null +++ b/parser/testdata/01784_parallel_formatting_memory/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000843901, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01784_parallel_formatting_memory/metadata.json b/parser/testdata/01784_parallel_formatting_memory/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01784_parallel_formatting_memory/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01784_parallel_formatting_memory/query.sql b/parser/testdata/01784_parallel_formatting_memory/query.sql new file mode 100644 index 000000000..00b3b2d88 --- /dev/null +++ b/parser/testdata/01784_parallel_formatting_memory/query.sql @@ -0,0 +1,2 @@ +SET max_memory_usage = '1G'; +SELECT range(65535) FROM system.one ARRAY JOIN range(65536) AS number; -- { serverError MEMORY_LIMIT_EXCEEDED } diff --git a/parser/testdata/01785_dictionary_element_count/ast.json b/parser/testdata/01785_dictionary_element_count/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01785_dictionary_element_count/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01785_dictionary_element_count/metadata.json b/parser/testdata/01785_dictionary_element_count/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01785_dictionary_element_count/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01785_dictionary_element_count/query.sql b/parser/testdata/01785_dictionary_element_count/query.sql new file mode 100644 index 000000000..1d60fc924 --- /dev/null +++ b/parser/testdata/01785_dictionary_element_count/query.sql @@ -0,0 +1,93 @@ +-- Tags: no-parallel + +DROP DATABASE IF EXISTS 01785_db; +CREATE DATABASE 01785_db; + +DROP TABLE IF EXISTS 01785_db.simple_key_source_table; +CREATE TABLE 01785_db.simple_key_source_table +( + id UInt64, + value String +) ENGINE = TinyLog(); + +INSERT INTO 01785_db.simple_key_source_table VALUES (1, 'First'); +INSERT INTO 01785_db.simple_key_source_table VALUES (1, 'First'); + +DROP DICTIONARY IF EXISTS 01785_db.simple_key_flat_dictionary; +CREATE DICTIONARY 01785_db.simple_key_flat_dictionary +( + id UInt64, + value String +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() DB '01785_db' TABLE 'simple_key_source_table')) +LAYOUT(FLAT()) +LIFETIME(MIN 0 MAX 1000); + +SELECT * FROM 01785_db.simple_key_flat_dictionary; +SELECT name, database, element_count FROM system.dictionaries WHERE database = '01785_db' AND name = 'simple_key_flat_dictionary'; + +DROP DICTIONARY 01785_db.simple_key_flat_dictionary; + +CREATE DICTIONARY 01785_db.simple_key_hashed_dictionary +( + id UInt64, + value String +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() DB '01785_db' TABLE 'simple_key_source_table')) +LAYOUT(HASHED()) +LIFETIME(MIN 0 MAX 1000); + +SELECT * FROM 01785_db.simple_key_hashed_dictionary; +SELECT name, database, element_count FROM system.dictionaries WHERE database = '01785_db' AND name = 'simple_key_hashed_dictionary'; + +DROP DICTIONARY 01785_db.simple_key_hashed_dictionary; + +CREATE DICTIONARY 01785_db.simple_key_cache_dictionary +( + id UInt64, + value String +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() DB '01785_db' TABLE 'simple_key_source_table')) +LAYOUT(CACHE(SIZE_IN_CELLS 100000)) +LIFETIME(MIN 0 MAX 1000); + +SELECT toUInt64(1) as key, dictGet('01785_db.simple_key_cache_dictionary', 'value', key); +SELECT name, database, element_count FROM system.dictionaries WHERE database = '01785_db' AND name = 'simple_key_cache_dictionary'; + +DROP DICTIONARY 01785_db.simple_key_cache_dictionary; + +DROP TABLE 01785_db.simple_key_source_table; + +DROP TABLE IF EXISTS 01785_db.complex_key_source_table; +CREATE TABLE 01785_db.complex_key_source_table +( + id UInt64, + id_key String, + value String +) ENGINE = TinyLog(); + +INSERT INTO 01785_db.complex_key_source_table VALUES (1, 'FirstKey', 'First'); +INSERT INTO 01785_db.complex_key_source_table VALUES (1, 'FirstKey', 'First'); + +CREATE DICTIONARY 01785_db.complex_key_hashed_dictionary +( + id UInt64, + id_key String, + value String +) +PRIMARY KEY id, id_key +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() DB '01785_db' TABLE 'complex_key_source_table')) +LAYOUT(COMPLEX_KEY_HASHED()) +LIFETIME(MIN 0 MAX 1000); + +SELECT * FROM 01785_db.complex_key_hashed_dictionary; +SELECT name, database, element_count FROM system.dictionaries WHERE database = '01785_db' AND name = 'complex_key_hashed_dictionary'; + +DROP DICTIONARY 01785_db.complex_key_hashed_dictionary; + +DROP TABLE 01785_db.complex_key_source_table; + +DROP DATABASE 01785_db; diff --git a/parser/testdata/01785_pmj_lc_bug/ast.json b/parser/testdata/01785_pmj_lc_bug/ast.json new file mode 100644 index 000000000..ffad12bf7 --- /dev/null +++ b/parser/testdata/01785_pmj_lc_bug/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001540313, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01785_pmj_lc_bug/metadata.json b/parser/testdata/01785_pmj_lc_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01785_pmj_lc_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01785_pmj_lc_bug/query.sql b/parser/testdata/01785_pmj_lc_bug/query.sql new file mode 100644 index 000000000..3020692c8 --- /dev/null +++ b/parser/testdata/01785_pmj_lc_bug/query.sql @@ -0,0 +1,17 @@ +SET join_algorithm = 'partial_merge'; +SET max_bytes_in_join = '100'; + +CREATE TABLE foo_lc (n LowCardinality(String)) ENGINE = Memory; +CREATE TABLE foo (n String) ENGINE = Memory; + +INSERT INTO foo SELECT toString(number) AS n FROM system.numbers LIMIT 1025; +INSERT INTO foo_lc SELECT toString(number) AS n FROM system.numbers LIMIT 1025; + +SELECT 1025 == count(n) FROM foo_lc AS t1 ANY LEFT JOIN foo_lc AS t2 ON t1.n == t2.n; +SELECT 1025 == count(n) FROM foo AS t1 ANY LEFT JOIN foo_lc AS t2 ON t1.n == t2.n; +SELECT 1025 == count(n) FROM foo_lc AS t1 ANY LEFT JOIN foo AS t2 ON t1.n == t2.n; + +SELECT 1025 == count(n) FROM foo_lc AS t1 ALL LEFT JOIN foo_lc AS t2 ON t1.n == t2.n; + +DROP TABLE foo; +DROP TABLE foo_lc; diff --git a/parser/testdata/01786_group_by_pk_many_streams/ast.json b/parser/testdata/01786_group_by_pk_many_streams/ast.json new file mode 100644 index 000000000..6965892d0 --- /dev/null +++ b/parser/testdata/01786_group_by_pk_many_streams/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery group_by_pk (children 1)" + }, + { + "explain": " Identifier group_by_pk" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001242251, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/01786_group_by_pk_many_streams/metadata.json b/parser/testdata/01786_group_by_pk_many_streams/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01786_group_by_pk_many_streams/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01786_group_by_pk_many_streams/query.sql b/parser/testdata/01786_group_by_pk_many_streams/query.sql new file mode 100644 index 000000000..e555aa4d6 --- /dev/null +++ b/parser/testdata/01786_group_by_pk_many_streams/query.sql @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS group_by_pk; + +CREATE TABLE group_by_pk (k UInt64, v UInt64) +ENGINE = MergeTree ORDER BY k PARTITION BY v % 50; + +INSERT INTO group_by_pk SELECT number / 100, number FROM numbers(1000); + +SELECT sum(v) AS s FROM group_by_pk GROUP BY k ORDER BY s DESC LIMIT 5 +SETTINGS optimize_aggregation_in_order = 1, max_block_size = 1; + +SELECT '======='; + +SELECT sum(v) AS s FROM group_by_pk GROUP BY k ORDER BY s DESC LIMIT 5 +SETTINGS optimize_aggregation_in_order = 0, max_block_size = 1; + +DROP TABLE IF EXISTS group_by_pk; diff --git a/parser/testdata/01787_arena_assert_column_nothing/ast.json b/parser/testdata/01787_arena_assert_column_nothing/ast.json new file mode 100644 index 000000000..e9327c759 --- /dev/null +++ b/parser/testdata/01787_arena_assert_column_nothing/ast.json @@ -0,0 +1,76 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function emptyArrayToSingle (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayFilter (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function lambda (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 18, + + "statistics": + { + "elapsed": 0.001291697, + "rows_read": 18, + "bytes_read": 715 + } +} diff --git a/parser/testdata/01787_arena_assert_column_nothing/metadata.json b/parser/testdata/01787_arena_assert_column_nothing/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01787_arena_assert_column_nothing/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01787_arena_assert_column_nothing/query.sql b/parser/testdata/01787_arena_assert_column_nothing/query.sql new file mode 100644 index 000000000..de6374a1b --- /dev/null +++ b/parser/testdata/01787_arena_assert_column_nothing/query.sql @@ -0,0 +1 @@ +SELECT 1 GROUP BY emptyArrayToSingle(arrayFilter(x -> 1, [])); diff --git a/parser/testdata/01787_map_remote/ast.json b/parser/testdata/01787_map_remote/ast.json new file mode 100644 index 000000000..9b7c6a833 --- /dev/null +++ b/parser/testdata/01787_map_remote/ast.json @@ -0,0 +1,76 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function map (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Literal 'a'" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal 'b'" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function remote (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal '127.0.0.{1,2}'" + }, + { + "explain": " Identifier system" + }, + { + "explain": " Identifier one" + } + ], + + "rows": 18, + + "statistics": + { + "elapsed": 0.001324745, + "rows_read": 18, + "bytes_read": 657 + } +} diff --git a/parser/testdata/01787_map_remote/metadata.json b/parser/testdata/01787_map_remote/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01787_map_remote/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01787_map_remote/query.sql b/parser/testdata/01787_map_remote/query.sql new file mode 100644 index 000000000..e169ed627 --- /dev/null +++ b/parser/testdata/01787_map_remote/query.sql @@ -0,0 +1,8 @@ +SELECT map('a', 1, 'b', 2) FROM remote('127.0.0.{1,2}', system, one); +SELECT map('a', 1, 'b', 2) FROM remote('127.0.0.{1,2}'); + +SELECT map() from remote('127.0.0.{1,2}', system,one); + +drop table if exists bug_repro_local; +CREATE TABLE bug_repro_local (`attributes` Map(LowCardinality(String), String)) ENGINE = Log as select map('',''); +SELECT if(1, attributes, map()) from remote('127.0.0.{1,2}', currentDatabase(), bug_repro_local) limit 1; diff --git a/parser/testdata/01788_update_nested_type_subcolumn_check/ast.json b/parser/testdata/01788_update_nested_type_subcolumn_check/ast.json new file mode 100644 index 000000000..ab7859ef6 --- /dev/null +++ b/parser/testdata/01788_update_nested_type_subcolumn_check/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_wide_nested (children 1)" + }, + { + "explain": " Identifier test_wide_nested" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001317069, + "rows_read": 2, + "bytes_read": 84 + } +} diff --git a/parser/testdata/01788_update_nested_type_subcolumn_check/metadata.json b/parser/testdata/01788_update_nested_type_subcolumn_check/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01788_update_nested_type_subcolumn_check/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01788_update_nested_type_subcolumn_check/query.sql b/parser/testdata/01788_update_nested_type_subcolumn_check/query.sql new file mode 100644 index 000000000..2edf99299 --- /dev/null +++ b/parser/testdata/01788_update_nested_type_subcolumn_check/query.sql @@ -0,0 +1,81 @@ +DROP TABLE IF EXISTS test_wide_nested; + +CREATE TABLE test_wide_nested +( + `id` Int, + `info.id` Array(Int), + `info.name` Array(String), + `info.age` Array(Int) +) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS min_bytes_for_wide_part = 0; + +SELECT '********* test 1 **********'; +set mutations_sync = 1; + +INSERT INTO test_wide_nested SELECT number, [number,number + 1], ['aa','bb'], [number,number * 2] FROM numbers(5); + +alter table test_wide_nested update `info.id` = [100,200] where id = 1; +select * from test_wide_nested where id = 1 order by id; + +alter table test_wide_nested update `info.id` = [100,200,300], `info.age` = [10,20,30], `info.name` = ['a','b','c'] where id = 2; +select * from test_wide_nested; + +alter table test_wide_nested update `info.id` = [100,200,300], `info.age` = `info.id`, `info.name` = ['a','b','c'] where id = 2; +select * from test_wide_nested; + +alter table test_wide_nested update `info.id` = [100,200], `info.age`=[68,72] where id = 3; +alter table test_wide_nested update `info.id` = `info.age` where id = 3; +select * from test_wide_nested; + +alter table test_wide_nested update `info.id` = [100,200], `info.age` = [10,20,30], `info.name` = ['a','b','c'] where id = 0; -- { serverError UNFINISHED } + +kill mutation where table = 'test_wide_nested' and database = currentDatabase() format Null; + +-- Recreate table, because KILL MUTATION is not suitable for parallel tests execution. +SELECT '********* test 2 **********'; +DROP TABLE test_wide_nested; + +CREATE TABLE test_wide_nested +( + `id` Int, + `info.id` Array(Int), + `info.name` Array(String), + `info.age` Array(Int) +) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS min_bytes_for_wide_part = 0; + +INSERT INTO test_wide_nested SELECT number, [number,number + 1], ['aa','bb'], [number,number * 2] FROM numbers(5); +ALTER TABLE test_wide_nested ADD COLUMN `info2.id` Array(Int); +ALTER TABLE test_wide_nested ADD COLUMN `info2.name` Array(String); +ALTER table test_wide_nested update `info2.id` = `info.id`, `info2.name` = `info.name` where 1; +select * from test_wide_nested; + +alter table test_wide_nested update `info.id` = [100,200,300], `info.age` = [10,20,30] where id = 1; -- { serverError UNFINISHED } + +kill mutation where table = 'test_wide_nested' and database = currentDatabase() format Null; + +DROP TABLE test_wide_nested; + +SELECT '********* test 3 **********'; +DROP TABLE IF EXISTS test_wide_not_nested; + +CREATE TABLE test_wide_not_nested +( + `id` Int, + `info.id` Int, + `info.name` String, + `info.age` Int +) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS min_bytes_for_wide_part = 0; + +INSERT INTO test_wide_not_nested SELECT number, number, 'aa', number * 2 FROM numbers(5); +ALTER TABLE test_wide_not_nested UPDATE `info.name` = 'bb' WHERE id = 1; +SELECT * FROM test_wide_not_nested ORDER BY id; + +DROP TABLE test_wide_not_nested; diff --git a/parser/testdata/01790_dist_INSERT_block_structure_mismatch_types_and_names/ast.json b/parser/testdata/01790_dist_INSERT_block_structure_mismatch_types_and_names/ast.json new file mode 100644 index 000000000..9880d3f18 --- /dev/null +++ b/parser/testdata/01790_dist_INSERT_block_structure_mismatch_types_and_names/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tmp_01781 (children 1)" + }, + { + "explain": " Identifier tmp_01781" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001426443, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/01790_dist_INSERT_block_structure_mismatch_types_and_names/metadata.json b/parser/testdata/01790_dist_INSERT_block_structure_mismatch_types_and_names/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01790_dist_INSERT_block_structure_mismatch_types_and_names/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01790_dist_INSERT_block_structure_mismatch_types_and_names/query.sql b/parser/testdata/01790_dist_INSERT_block_structure_mismatch_types_and_names/query.sql new file mode 100644 index 000000000..b77d96e16 --- /dev/null +++ b/parser/testdata/01790_dist_INSERT_block_structure_mismatch_types_and_names/query.sql @@ -0,0 +1,22 @@ +DROP TABLE IF EXISTS tmp_01781; +DROP TABLE IF EXISTS dist_01781; + +SET prefer_localhost_replica=0; + +CREATE TABLE tmp_01781 (n LowCardinality(String)) ENGINE=Memory; +CREATE TABLE dist_01781 (n LowCardinality(String)) Engine=Distributed(test_cluster_two_shards, currentDatabase(), tmp_01781, cityHash64(n)); + +SET distributed_foreground_insert=1; +INSERT INTO dist_01781 VALUES ('1'),('2'); +-- different LowCardinality size +INSERT INTO dist_01781 SELECT * FROM numbers(1000); + +SET distributed_foreground_insert=0; +SYSTEM STOP DISTRIBUTED SENDS dist_01781; +INSERT INTO dist_01781 VALUES ('1'),('2'); +-- different LowCardinality size +INSERT INTO dist_01781 SELECT * FROM numbers(1000); +SYSTEM FLUSH DISTRIBUTED dist_01781; + +DROP TABLE tmp_01781; +DROP TABLE dist_01781; diff --git a/parser/testdata/01795_TinyLog_rwlock_ub/ast.json b/parser/testdata/01795_TinyLog_rwlock_ub/ast.json new file mode 100644 index 000000000..1d3fd410e --- /dev/null +++ b/parser/testdata/01795_TinyLog_rwlock_ub/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery underlying_01795 (children 1)" + }, + { + "explain": " Identifier underlying_01795" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001494457, + "rows_read": 2, + "bytes_read": 84 + } +} diff --git a/parser/testdata/01795_TinyLog_rwlock_ub/metadata.json b/parser/testdata/01795_TinyLog_rwlock_ub/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01795_TinyLog_rwlock_ub/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01795_TinyLog_rwlock_ub/query.sql b/parser/testdata/01795_TinyLog_rwlock_ub/query.sql new file mode 100644 index 000000000..7afa2b7e1 --- /dev/null +++ b/parser/testdata/01795_TinyLog_rwlock_ub/query.sql @@ -0,0 +1,5 @@ +DROP TABLE IF EXISTS underlying_01795; +CREATE TABLE underlying_01795 (key UInt64) Engine=TinyLog(); +INSERT INTO FUNCTION remote('127.1', currentDatabase(), underlying_01795) SELECT toUInt64(number) FROM system.numbers LIMIT 1; +SELECT * FROM underlying_01795 FORMAT Null; +DROP TABLE underlying_01795; diff --git a/parser/testdata/01796_Log_rwlock_ub/ast.json b/parser/testdata/01796_Log_rwlock_ub/ast.json new file mode 100644 index 000000000..ae63652db --- /dev/null +++ b/parser/testdata/01796_Log_rwlock_ub/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery underlying_01796 (children 1)" + }, + { + "explain": " Identifier underlying_01796" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001119635, + "rows_read": 2, + "bytes_read": 84 + } +} diff --git a/parser/testdata/01796_Log_rwlock_ub/metadata.json b/parser/testdata/01796_Log_rwlock_ub/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01796_Log_rwlock_ub/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01796_Log_rwlock_ub/query.sql b/parser/testdata/01796_Log_rwlock_ub/query.sql new file mode 100644 index 000000000..4f95dbc14 --- /dev/null +++ b/parser/testdata/01796_Log_rwlock_ub/query.sql @@ -0,0 +1,5 @@ +DROP TABLE IF EXISTS underlying_01796; +CREATE TABLE underlying_01796 (key UInt64) Engine=Log(); +INSERT INTO FUNCTION remote('127.1', currentDatabase(), underlying_01796) SELECT toUInt64(number) FROM system.numbers LIMIT 1; +SELECT * FROM underlying_01796 FORMAT Null; +DROP TABLE underlying_01796; diff --git a/parser/testdata/01797_StripeLog_rwlock_ub/ast.json b/parser/testdata/01797_StripeLog_rwlock_ub/ast.json new file mode 100644 index 000000000..139ac9b5a --- /dev/null +++ b/parser/testdata/01797_StripeLog_rwlock_ub/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery underlying_01797 (children 1)" + }, + { + "explain": " Identifier underlying_01797" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001358192, + "rows_read": 2, + "bytes_read": 84 + } +} diff --git a/parser/testdata/01797_StripeLog_rwlock_ub/metadata.json b/parser/testdata/01797_StripeLog_rwlock_ub/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01797_StripeLog_rwlock_ub/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01797_StripeLog_rwlock_ub/query.sql b/parser/testdata/01797_StripeLog_rwlock_ub/query.sql new file mode 100644 index 000000000..18cadf0a2 --- /dev/null +++ b/parser/testdata/01797_StripeLog_rwlock_ub/query.sql @@ -0,0 +1,5 @@ +DROP TABLE IF EXISTS underlying_01797; +CREATE TABLE underlying_01797 (key UInt64) Engine=StripeLog(); +INSERT INTO FUNCTION remote('127.1', currentDatabase(), underlying_01797) SELECT toUInt64(number) FROM system.numbers LIMIT 1; +SELECT * FROM underlying_01797 FORMAT Null; +DROP TABLE underlying_01797; diff --git a/parser/testdata/01798_having_push_down/ast.json b/parser/testdata/01798_having_push_down/ast.json new file mode 100644 index 000000000..d38cd4b8d --- /dev/null +++ b/parser/testdata/01798_having_push_down/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_having (children 1)" + }, + { + "explain": " Identifier t_having" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001274027, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/01798_having_push_down/metadata.json b/parser/testdata/01798_having_push_down/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01798_having_push_down/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01798_having_push_down/query.sql b/parser/testdata/01798_having_push_down/query.sql new file mode 100644 index 000000000..afedc8c03 --- /dev/null +++ b/parser/testdata/01798_having_push_down/query.sql @@ -0,0 +1,35 @@ +DROP TABLE IF EXISTS t_having; + +CREATE TABLE t_having (c0 Int32, c1 UInt64) ENGINE = MergeTree ORDER BY c0; + +INSERT INTO t_having SELECT number, number FROM numbers(1000); + +SELECT sum(c0 = 0), min(c0 + 1), sum(c0 + 2) FROM t_having +GROUP BY c0 HAVING c0 = 0 +SETTINGS enable_optimize_predicate_expression=0; + +SET enable_positional_arguments=0; + +SELECT c0 + -1, sum(intDivOrZero(intDivOrZero(NULL, NULL), '2'), intDivOrZero(10000000000., intDivOrZero(intDivOrZero(intDivOrZero(NULL, NULL), 10), NULL))) FROM t_having GROUP BY c0 = 2, c0 = 10, intDivOrZero(intDivOrZero(intDivOrZero(NULL, NULL), NULL), NULL), c0 HAVING c0 = 2 SETTINGS enable_optimize_predicate_expression = 0; + +SELECT sum(c0 + 257) FROM t_having GROUP BY c0 = -9223372036854775808, NULL, -2147483649, c0 HAVING c0 = -9223372036854775808 SETTINGS enable_optimize_predicate_expression = 0; + +SELECT c0 + -2, c0 + -9223372036854775807, c0 = NULL FROM t_having GROUP BY c0 = 0.9998999834060669, 1023, c0 HAVING c0 = 0.9998999834060669 SETTINGS enable_optimize_predicate_expression = 0; + +SET enable_parallel_replicas = 1, + max_parallel_replicas = 3, + cluster_for_parallel_replicas = 'parallel_replicas', + parallel_replicas_for_non_replicated_merge_tree = 1; + +SELECT sum(c0 = 0), min(c0 + 1), sum(c0 + 2) FROM t_having +GROUP BY c0 HAVING c0 = 0; + +DROP TABLE t_having; + +CREATE TABLE t_exact (c0 Bool, c1 Int) ENGINE = MergeTree() ORDER BY tuple(); +INSERT INTO TABLE t_exact (c0, c1) VALUES (FALSE, 1), (TRUE, 2); +SELECT c1 FROM t_exact GROUP BY c1, c0 HAVING c0; +DROP TABLE t_exact; + +SELECT 1 FROM remote('127.0.0.{1,1}') GROUP BY (2, materialize(3)) HAVING materialize(3) SETTINGS group_by_use_nulls = true; + diff --git a/parser/testdata/01798_uniq_theta_sketch/ast.json b/parser/testdata/01798_uniq_theta_sketch/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01798_uniq_theta_sketch/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01798_uniq_theta_sketch/metadata.json b/parser/testdata/01798_uniq_theta_sketch/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01798_uniq_theta_sketch/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01798_uniq_theta_sketch/query.sql b/parser/testdata/01798_uniq_theta_sketch/query.sql new file mode 100644 index 000000000..59d5888c0 --- /dev/null +++ b/parser/testdata/01798_uniq_theta_sketch/query.sql @@ -0,0 +1,216 @@ +-- Tags: no-fasttest + +SET max_block_size = 65505; + +SELECT 'uniqTheta many agrs'; + +SELECT + uniqTheta(x), uniqTheta((x)), uniqTheta(x, y), uniqTheta((x, y)), uniqTheta(x, y, z), uniqTheta((x, y, z)) +FROM +( + SELECT + number % 10 AS x, + intDiv(number, 10) % 10 AS y, + toString(intDiv(number, 100) % 10) AS z + FROM system.numbers LIMIT 1000 +); + + +SELECT k, + uniqTheta(x), uniqTheta((x)), uniqTheta(x, y), uniqTheta((x, y)), uniqTheta(x, y, z), uniqTheta((x, y, z)), + count() AS c +FROM +( + SELECT + (number + 0x8ffcbd8257219a26) * 0x66bb3430c06d2353 % 131 AS k, + number % 10 AS x, + intDiv(number, 10) % 10 AS y, + toString(intDiv(number, 100) % 10) AS z + FROM system.numbers LIMIT 100000 +) +GROUP BY k +ORDER BY c DESC, k ASC +LIMIT 10; + + +SELECT 'uniqTheta distinct'; + +SET count_distinct_implementation = 'uniqTheta'; +SELECT count(DISTINCT x) FROM (SELECT number % 123 AS x FROM system.numbers LIMIT 1000); +SELECT count(DISTINCT x, y) FROM (SELECT number % 11 AS x, number % 13 AS y FROM system.numbers LIMIT 1000); + + +SELECT 'uniqTheta arrays'; + +SELECT uniqThetaArray([0, 1, 1], [0, 1, 1], [0, 1, 1]); +SELECT uniqThetaArray([0, 1, 1], [0, 1, 1], [0, 1, 0]); +SELECT uniqTheta(x) FROM (SELECT arrayJoin([[1, 2], [1, 2], [1, 2, 3], []]) AS x); + + +SELECT 'uniqTheta complex types'; + +SELECT uniqTheta(x) FROM (SELECT arrayJoin([[], ['a'], ['a', 'b'], []]) AS x); +SELECT uniqTheta(x) FROM (SELECT arrayJoin([[[]], [['a', 'b']], [['a'], ['b']], [['a', 'b']]]) AS x); +SELECT uniqTheta(x, x) FROM (SELECT arrayJoin([[], ['a'], ['a', 'b'], []]) AS x); +SELECT uniqTheta(x, arrayMap(elem -> [elem, elem], x)) FROM (SELECT arrayJoin([[], ['a'], ['a', 'b'], []]) AS x); +SELECT uniqTheta(x, toString(x)) FROM (SELECT arrayJoin([[], ['a'], ['a', 'b'], []]) AS x); +SELECT uniqTheta((x, x)) FROM (SELECT arrayJoin([[], ['a'], ['a', 'b'], []]) AS x); +SELECT uniqTheta((x, arrayMap(elem -> [elem, elem], x))) FROM (SELECT arrayJoin([[], ['a'], ['a', 'b'], []]) AS x); +SELECT uniqTheta((x, toString(x))) FROM (SELECT arrayJoin([[], ['a'], ['a', 'b'], []]) AS x); +SELECT uniqTheta(x) FROM (SELECT arrayJoin([[], ['a'], ['a', NULL, 'b'], []]) AS x); + + +SELECT 'uniqTheta decimals'; + +DROP TABLE IF EXISTS decimal; +CREATE TABLE decimal +( + a Decimal32(4), + b Decimal64(8), + c Decimal128(8) +) ENGINE = Memory; + +SELECT (uniqTheta(a), uniqTheta(b), uniqTheta(c)) +FROM (SELECT * FROM decimal ORDER BY a); + +INSERT INTO decimal (a, b, c) +SELECT toDecimal32(number - 50, 4), toDecimal64(number - 50, 8) / 3, toDecimal128(number - 50, 8) / 5 +FROM system.numbers LIMIT 101; + +SELECT (uniqTheta(a), uniqTheta(b), uniqTheta(c)) +FROM (SELECT * FROM decimal ORDER BY a); + +DROP TABLE decimal; + + +SELECT 'uniqTheta remove injective'; + +set optimize_injective_functions_inside_uniq = 1; + +EXPLAIN SYNTAX select uniqTheta(x) from (select number % 2 as x from numbers(10)); +EXPLAIN SYNTAX select uniqTheta(x + y) from (select number % 2 as x, number % 3 y from numbers(10)); +EXPLAIN SYNTAX select uniqTheta(-x) from (select number % 2 as x from numbers(10)); +EXPLAIN SYNTAX select uniqTheta(bitNot(x)) from (select number % 2 as x from numbers(10)); +EXPLAIN SYNTAX select uniqTheta(bitNot(-x)) from (select number % 2 as x from numbers(10)); +EXPLAIN SYNTAX select uniqTheta(-bitNot(-x)) from (select number % 2 as x from numbers(10)); + +set optimize_injective_functions_inside_uniq = 0; + +EXPLAIN SYNTAX select uniqTheta(x) from (select number % 2 as x from numbers(10)); +EXPLAIN SYNTAX select uniqTheta(x + y) from (select number % 2 as x, number % 3 y from numbers(10)); +EXPLAIN SYNTAX select uniqTheta(-x) from (select number % 2 as x from numbers(10)); +EXPLAIN SYNTAX select uniqTheta(bitNot(x)) from (select number % 2 as x from numbers(10)); +EXPLAIN SYNTAX select uniqTheta(bitNot(-x)) from (select number % 2 as x from numbers(10)); +EXPLAIN SYNTAX select uniqTheta(-bitNot(-x)) from (select number % 2 as x from numbers(10)); + + +DROP TABLE IF EXISTS stored_aggregates; + +-- simple +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE stored_aggregates +( + d Date, + Uniq AggregateFunction(uniq, UInt64), + UniqThetaSketch AggregateFunction(uniqTheta, UInt64) +) +ENGINE = AggregatingMergeTree(d, d, 8192); + +INSERT INTO stored_aggregates +SELECT + toDate('2014-06-01') AS d, + uniqState(number) AS Uniq, + uniqThetaState(number) AS UniqThetaSketch +FROM +( + SELECT * FROM system.numbers LIMIT 1000 +); + +SELECT uniqMerge(Uniq), uniqThetaMerge(UniqThetaSketch) FROM stored_aggregates; + +SELECT d, uniqMerge(Uniq), uniqThetaMerge(UniqThetaSketch) FROM stored_aggregates GROUP BY d ORDER BY d; + +OPTIMIZE TABLE stored_aggregates; + +SELECT uniqMerge(Uniq), uniqThetaMerge(UniqThetaSketch) FROM stored_aggregates; + +SELECT d, uniqMerge(Uniq), uniqThetaMerge(UniqThetaSketch) FROM stored_aggregates GROUP BY d ORDER BY d; + +DROP TABLE stored_aggregates; + +-- complex +CREATE TABLE stored_aggregates +( + d Date, + k1 UInt64, + k2 String, + Uniq AggregateFunction(uniq, UInt64), + UniqThetaSketch AggregateFunction(uniqTheta, UInt64) +) +ENGINE = AggregatingMergeTree(d, (d, k1, k2), 8192); + +INSERT INTO stored_aggregates +SELECT + toDate('2014-06-01') AS d, + intDiv(number, 100) AS k1, + toString(intDiv(number, 10)) AS k2, + uniqState(toUInt64(number % 7)) AS Uniq, + uniqThetaState(toUInt64(number % 7)) AS UniqThetaSketch +FROM +( + SELECT * FROM system.numbers LIMIT 1000 +) +GROUP BY d, k1, k2 +ORDER BY d, k1, k2; + +SELECT d, k1, k2, + uniqMerge(Uniq), uniqThetaMerge(UniqThetaSketch) +FROM stored_aggregates +GROUP BY d, k1, k2 +ORDER BY d, k1, k2; + +SELECT d, k1, + uniqMerge(Uniq), uniqThetaMerge(UniqThetaSketch) +FROM stored_aggregates +GROUP BY d, k1 +ORDER BY d, k1; + +SELECT d, + uniqMerge(Uniq), uniqThetaMerge(UniqThetaSketch) +FROM stored_aggregates +GROUP BY d +ORDER BY d; + +DROP TABLE stored_aggregates; + +---- sum + uniq with more data +drop table if exists summing_merge_tree_null; +drop table if exists summing_merge_tree_aggregate_function; +create table summing_merge_tree_null ( + d materialized today(), + k UInt64, + c UInt64, + u UInt64 +) engine=Null; + +create materialized view summing_merge_tree_aggregate_function ( + d Date, + k UInt64, + c UInt64, + un AggregateFunction(uniq, UInt64), + ut AggregateFunction(uniqTheta, UInt64) +) engine=SummingMergeTree(d, k, 8192) +as select d, k, sum(c) as c, uniqState(u) as un, uniqThetaState(u) as ut +from summing_merge_tree_null +group by d, k; + +-- prime number 53 to avoid resonanse between %3 and %53 +insert into summing_merge_tree_null select number % 3, 1, number % 53 from numbers(999999); + +select k, sum(c), uniqMerge(un), uniqThetaMerge(ut) from summing_merge_tree_aggregate_function group by k order by k; +optimize table summing_merge_tree_aggregate_function; +select k, sum(c), uniqMerge(un), uniqThetaMerge(ut) from summing_merge_tree_aggregate_function group by k order by k; + +drop table summing_merge_tree_aggregate_function; +drop table summing_merge_tree_null; + diff --git a/parser/testdata/01798_uniq_theta_union_intersect_not/ast.json b/parser/testdata/01798_uniq_theta_union_intersect_not/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01798_uniq_theta_union_intersect_not/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01798_uniq_theta_union_intersect_not/metadata.json b/parser/testdata/01798_uniq_theta_union_intersect_not/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01798_uniq_theta_union_intersect_not/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01798_uniq_theta_union_intersect_not/query.sql b/parser/testdata/01798_uniq_theta_union_intersect_not/query.sql new file mode 100644 index 000000000..ccaf6ca6c --- /dev/null +++ b/parser/testdata/01798_uniq_theta_union_intersect_not/query.sql @@ -0,0 +1,90 @@ +-- Tags: no-fasttest + +SELECT 'uniqTheta union test'; + +select finalizeAggregation(uniqThetaUnion(a, b)), finalizeAggregation(a), finalizeAggregation(b) from (select arrayReduce('uniqThetaState',[]) as a, arrayReduce('uniqThetaState',[]) as b ); + +select finalizeAggregation(uniqThetaUnion(a, b)), finalizeAggregation(a), finalizeAggregation(b) from (select arrayReduce('uniqThetaState',[1,2]) as a, arrayReduce('uniqThetaState',[2,3,4]) as b ); + +select finalizeAggregation(uniqThetaUnion(a, b)), finalizeAggregation(a), finalizeAggregation(b) from (select arrayReduce('uniqThetaState',[2,3,4]) as a, arrayReduce('uniqThetaState',[1,2]) as b ); + +SELECT 'uniqTheta intersect test'; + +select finalizeAggregation(uniqThetaIntersect(a, b)), finalizeAggregation(a), finalizeAggregation(b) from (select arrayReduce('uniqThetaState',[]) as a, arrayReduce('uniqThetaState',[]) as b ); + +select finalizeAggregation(uniqThetaIntersect(a, b)), finalizeAggregation(a), finalizeAggregation(b) from (select arrayReduce('uniqThetaState',[1,2]) as a, arrayReduce('uniqThetaState',[2,3,4]) as b ); + +select finalizeAggregation(uniqThetaIntersect(a, b)), finalizeAggregation(a), finalizeAggregation(b) from (select arrayReduce('uniqThetaState',[2,3,4]) as a, arrayReduce('uniqThetaState',[1,2]) as b ); + +SELECT 'uniqTheta union test'; + +select finalizeAggregation(uniqThetaNot(a, b)), finalizeAggregation(a), finalizeAggregation(b) from (select arrayReduce('uniqThetaState',[]) as a, arrayReduce('uniqThetaState',[]) as b ); + +select finalizeAggregation(uniqThetaNot(a, b)), finalizeAggregation(a), finalizeAggregation(b) from (select arrayReduce('uniqThetaState',[1,2]) as a, arrayReduce('uniqThetaState',[2,3,4]) as b ); + +select finalizeAggregation(uniqThetaNot(a, b)), finalizeAggregation(a), finalizeAggregation(b) from (select arrayReduce('uniqThetaState',[2,3,4]) as a, arrayReduce('uniqThetaState',[1,2]) as b ); + +SELECT 'uniqTheta retention test'; + +select finalizeAggregation(uniqThetaIntersect(a,b)), finalizeAggregation(a),finalizeAggregation(b) from +( +select (uniqThetaStateIf(number, number>0)) as a, (uniqThetaStateIf(number, number>5)) as b +from +(select number FROM system.numbers LIMIT 10) +); + +SELECT 'uniqTheta retention with AggregatingMergeTree test'; +DROP TABLE IF EXISTS test1; + +CREATE TABLE test1 +( + `year` String , + `uv` AggregateFunction(uniqTheta, Int64) +) +ENGINE = AggregatingMergeTree() +ORDER BY (year); + +INSERT INTO TABLE test1(year, uv) select '2021',uniqThetaState(toInt64(1)); +INSERT INTO TABLE test1(year, uv) select '2021',uniqThetaState(toInt64(2)); +INSERT INTO TABLE test1(year, uv) select '2021',uniqThetaState(toInt64(3)); +INSERT INTO TABLE test1(year, uv) select '2021',uniqThetaState(toInt64(4)); +INSERT INTO TABLE test1(year, uv) select '2022',uniqThetaState(toInt64(1)); +INSERT INTO TABLE test1(year, uv) select '2022',uniqThetaState(toInt64(3)); + +select finalizeAggregation(uniqThetaIntersect(uv2021,uv2022))/finalizeAggregation(uv2021),finalizeAggregation(uniqThetaIntersect(uv2021,uv2022)),finalizeAggregation(uv2021) +from +( +select uniqThetaMergeStateIf(uv,year='2021') as uv2021, uniqThetaMergeStateIf(uv,year='2022') as uv2022 +from test1 +); + +DROP TABLE IF EXISTS test1; + +SELECT 'uniqTheta retention with MergeTree test'; +DROP TABLE IF EXISTS test2; + +CREATE TABLE test2 +( + `year` String , + `uv` Int64 +) +ENGINE = MergeTree() +ORDER BY (year); + +INSERT INTO TABLE test2(year, uv) select '2021',1; +INSERT INTO TABLE test2(year, uv) select '2021',2; +INSERT INTO TABLE test2(year, uv) select '2021',3; +INSERT INTO TABLE test2(year, uv) select '2021',4; +INSERT INTO TABLE test2(year, uv) select '2022',1; +INSERT INTO TABLE test2(year, uv) select '2022',3; + +select finalizeAggregation(uniqThetaIntersect(uv2021,uv2022))/finalizeAggregation(uv2021),finalizeAggregation(uniqThetaIntersect(uv2021,uv2022)),finalizeAggregation(uv2021) +from +( +select uniqThetaStateIf(uv,year='2021') as uv2021, uniqThetaStateIf(uv,year='2022') as uv2022 +from test2 +); + + + +DROP TABLE IF EXISTS test2; diff --git a/parser/testdata/01799_long_uniq_theta_sketch/ast.json b/parser/testdata/01799_long_uniq_theta_sketch/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01799_long_uniq_theta_sketch/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01799_long_uniq_theta_sketch/metadata.json b/parser/testdata/01799_long_uniq_theta_sketch/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01799_long_uniq_theta_sketch/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01799_long_uniq_theta_sketch/query.sql b/parser/testdata/01799_long_uniq_theta_sketch/query.sql new file mode 100644 index 000000000..8d079fdd4 --- /dev/null +++ b/parser/testdata/01799_long_uniq_theta_sketch/query.sql @@ -0,0 +1,40 @@ +-- Tags: long, no-fasttest + +-- The result slightly differs but it's ok since `uniqueTheta` is an approximate function. +set max_bytes_before_external_group_by = 0, max_bytes_ratio_before_external_group_by = 0; + +SELECT 'uniqTheta'; + +SELECT Y, uniqTheta(X) FROM (SELECT number AS X, (3*X*X - 7*X + 11) % 37 AS Y FROM system.numbers LIMIT 15) GROUP BY Y ORDER BY Y; +SELECT Y, uniqTheta(X) FROM (SELECT number AS X, (3*X*X - 7*X + 11) % 37 AS Y FROM system.numbers LIMIT 3000) GROUP BY Y ORDER BY Y; +SELECT Y, uniqTheta(X) FROM (SELECT number AS X, (3*X*X - 7*X + 11) % 37 AS Y FROM system.numbers LIMIT 1000000) GROUP BY Y ORDER BY Y; + +SELECT 'uniqTheta round(float)'; + +SELECT Y, uniqTheta(X) FROM (SELECT number AS X, round(1/(1 + (3*X*X - 7*X + 11) % 37), 3) AS Y FROM system.numbers LIMIT 15) GROUP BY Y ORDER BY Y; +SELECT Y, uniqTheta(X) FROM (SELECT number AS X, round(1/(1 + (3*X*X - 7*X + 11) % 37), 3) AS Y FROM system.numbers LIMIT 3000) GROUP BY Y ORDER BY Y; +SELECT Y, uniqTheta(X) FROM (SELECT number AS X, round(1/(1 + (3*X*X - 7*X + 11) % 37), 3) AS Y FROM system.numbers LIMIT 1000000) GROUP BY Y ORDER BY Y; + +SELECT 'uniqTheta round(toFloat32())'; + +SELECT Y, uniqTheta(X) FROM (SELECT number AS X, round(toFloat32(1/(1 + (3*X*X - 7*X + 11) % 37)), 3) AS Y FROM system.numbers LIMIT 15) GROUP BY Y ORDER BY Y; +SELECT Y, uniqTheta(X) FROM (SELECT number AS X, round(toFloat32(1/(1 + (3*X*X - 7*X + 11) % 37)), 3) AS Y FROM system.numbers LIMIT 3000) GROUP BY Y ORDER BY Y; +SELECT Y, uniqTheta(X) FROM (SELECT number AS X, round(toFloat32(1/(1 + (3*X*X - 7*X + 11) % 37)), 3) AS Y FROM system.numbers LIMIT 1000000) GROUP BY Y ORDER BY Y; + +SELECT 'uniqTheta IPv4NumToString'; + +SELECT Y, uniqTheta(Z) FROM (SELECT number AS X, IPv4NumToString(toUInt32(X)) AS Z, (3*X*X - 7*X + 11) % 37 AS Y FROM system.numbers LIMIT 15) GROUP BY Y ORDER BY Y; +SELECT Y, uniqTheta(Z) FROM (SELECT number AS X, IPv4NumToString(toUInt32(X)) AS Z, (3*X*X - 7*X + 11) % 37 AS Y FROM system.numbers LIMIT 3000) GROUP BY Y ORDER BY Y; +SELECT Y, uniqTheta(Z) FROM (SELECT number AS X, IPv4NumToString(toUInt32(X)) AS Z, (3*X*X - 7*X + 11) % 37 AS Y FROM system.numbers LIMIT 1000000) GROUP BY Y ORDER BY Y; + +SELECT 'uniqTheta remote()'; + +SELECT uniqTheta(dummy) FROM remote('127.0.0.{2,3}', system.one); + + +SELECT 'uniqTheta precise'; +SELECT uniqExact(number) FROM numbers(1e7); +SELECT uniqCombined(number) FROM numbers(1e7); +SELECT uniqCombined64(number) FROM numbers(1e7); +SELECT uniqTheta(number) FROM numbers(1e7); + diff --git a/parser/testdata/01800_log_nested/ast.json b/parser/testdata/01800_log_nested/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01800_log_nested/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01800_log_nested/metadata.json b/parser/testdata/01800_log_nested/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01800_log_nested/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01800_log_nested/query.sql b/parser/testdata/01800_log_nested/query.sql new file mode 100644 index 000000000..eb74b8707 --- /dev/null +++ b/parser/testdata/01800_log_nested/query.sql @@ -0,0 +1,20 @@ +-- TinyLog +DROP TABLE IF EXISTS nested_01800_tiny_log; +CREATE TABLE nested_01800_tiny_log (`column` Nested(name String, names Array(String), types Array(Enum8('PU' = 1, 'US' = 2, 'OTHER' = 3)))) ENGINE = TinyLog; +INSERT INTO nested_01800_tiny_log VALUES (['Hello', 'World'], [['a'], ['b', 'c']], [['PU', 'US'], ['OTHER']]); +SELECT 10 FROM nested_01800_tiny_log FORMAT Null; +DROP TABLE nested_01800_tiny_log; + +-- StripeLog +DROP TABLE IF EXISTS nested_01800_stripe_log; +CREATE TABLE nested_01800_stripe_log (`column` Nested(name String, names Array(String), types Array(Enum8('PU' = 1, 'US' = 2, 'OTHER' = 3)))) ENGINE = StripeLog; +INSERT INTO nested_01800_stripe_log VALUES (['Hello', 'World'], [['a'], ['b', 'c']], [['PU', 'US'], ['OTHER']]); +SELECT 10 FROM nested_01800_stripe_log FORMAT Null; +DROP TABLE nested_01800_stripe_log; + +-- Log +DROP TABLE IF EXISTS nested_01800_log; +CREATE TABLE nested_01800_log (`column` Nested(name String, names Array(String), types Array(Enum8('PU' = 1, 'US' = 2, 'OTHER' = 3)))) ENGINE = Log; +INSERT INTO nested_01800_log VALUES (['Hello', 'World'], [['a'], ['b', 'c']], [['PU', 'US'], ['OTHER']]); +SELECT 10 FROM nested_01800_log FORMAT Null; +DROP TABLE nested_01800_log; diff --git a/parser/testdata/01801_approx_total_rows_mergetree_reverse/ast.json b/parser/testdata/01801_approx_total_rows_mergetree_reverse/ast.json new file mode 100644 index 000000000..2f87d5f22 --- /dev/null +++ b/parser/testdata/01801_approx_total_rows_mergetree_reverse/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery data_01801 (children 1)" + }, + { + "explain": " Identifier data_01801" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001045876, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/01801_approx_total_rows_mergetree_reverse/metadata.json b/parser/testdata/01801_approx_total_rows_mergetree_reverse/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01801_approx_total_rows_mergetree_reverse/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01801_approx_total_rows_mergetree_reverse/query.sql b/parser/testdata/01801_approx_total_rows_mergetree_reverse/query.sql new file mode 100644 index 000000000..eecc9c9c1 --- /dev/null +++ b/parser/testdata/01801_approx_total_rows_mergetree_reverse/query.sql @@ -0,0 +1,13 @@ +drop table if exists data_01801; +create table data_01801 (key Int) engine=MergeTree() order by key settings index_granularity=10 as select number/10 from numbers(100); + +-- Prevent remote replicas from skipping index analysis in Parallel Replicas. Otherwise, they may return full ranges and trigger max_rows_to_read validation failures. +SET parallel_replicas_index_analysis_only_on_coordinator = 0; + +select * from data_01801 where key = 0 order by key settings max_rows_to_read=9 format Null; -- { serverError TOO_MANY_ROWS } +select * from data_01801 where key = 0 order by key desc settings max_rows_to_read=9 format Null; -- { serverError TOO_MANY_ROWS } + +select * from data_01801 where key = 0 order by key settings max_rows_to_read=10 format Null; +select * from data_01801 where key = 0 order by key desc settings max_rows_to_read=10 format Null; + +drop table data_01801; diff --git a/parser/testdata/01801_dateDiff_DateTime64/ast.json b/parser/testdata/01801_dateDiff_DateTime64/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01801_dateDiff_DateTime64/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01801_dateDiff_DateTime64/metadata.json b/parser/testdata/01801_dateDiff_DateTime64/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01801_dateDiff_DateTime64/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01801_dateDiff_DateTime64/query.sql b/parser/testdata/01801_dateDiff_DateTime64/query.sql new file mode 100644 index 000000000..020559f4e --- /dev/null +++ b/parser/testdata/01801_dateDiff_DateTime64/query.sql @@ -0,0 +1,77 @@ +-- { echo } + +-- DateTime64 vs DateTime64 same scale +SELECT dateDiff('second', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-01-01 00:00:10', 0, 'UTC')); +SELECT dateDiff('second', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-01-01 00:10:00', 0, 'UTC')); +SELECT dateDiff('second', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-01-01 01:00:00', 0, 'UTC')); +SELECT dateDiff('second', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-01-01 01:10:10', 0, 'UTC')); + +SELECT dateDiff('minute', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-01-01 00:10:00', 0, 'UTC')); +SELECT dateDiff('minute', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-01-01 10:00:00', 0, 'UTC')); + +SELECT dateDiff('hour', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-01-01 10:00:00', 0, 'UTC')); + +SELECT dateDiff('day', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-01-02 00:00:00', 0, 'UTC')); +SELECT dateDiff('month', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-02-01 00:00:00', 0, 'UTC')); +SELECT dateDiff('year', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1928-01-01 00:00:00', 0, 'UTC')); + +-- DateTime64 vs DateTime64 different scale +SELECT dateDiff('second', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-01-01 00:00:10', 3, 'UTC')); +SELECT dateDiff('second', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-01-01 00:10:00', 3, 'UTC')); +SELECT dateDiff('second', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-01-01 01:00:00', 3, 'UTC')); +SELECT dateDiff('second', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-01-01 01:10:10', 3, 'UTC')); + +SELECT dateDiff('minute', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-01-01 00:10:00', 3, 'UTC')); +SELECT dateDiff('minute', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-01-01 10:00:00', 3, 'UTC')); + +SELECT dateDiff('hour', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-01-01 10:00:00', 3, 'UTC')); + +SELECT dateDiff('day', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-01-02 00:00:00', 3, 'UTC')); +SELECT dateDiff('month', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-02-01 00:00:00', 3, 'UTC')); +SELECT dateDiff('year', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1928-01-01 00:00:00', 3, 'UTC')); + +-- With DateTime +-- DateTime64 vs DateTime +SELECT dateDiff('second', toDateTime64('2015-08-18 00:00:00', 0, 'UTC'), toDateTime('2015-08-18 00:00:00', 'UTC')); +SELECT dateDiff('second', toDateTime64('2015-08-18 00:00:00', 0, 'UTC'), toDateTime('2015-08-18 00:00:10', 'UTC')); +SELECT dateDiff('second', toDateTime64('2015-08-18 00:00:00', 0, 'UTC'), toDateTime('2015-08-18 00:10:00', 'UTC')); +SELECT dateDiff('second', toDateTime64('2015-08-18 00:00:00', 0, 'UTC'), toDateTime('2015-08-18 01:00:00', 'UTC')); +SELECT dateDiff('second', toDateTime64('2015-08-18 00:00:00', 0, 'UTC'), toDateTime('2015-08-18 01:10:10', 'UTC')); + +-- DateTime vs DateTime64 +SELECT dateDiff('second', toDateTime('2015-08-18 00:00:00', 'UTC'), toDateTime64('2015-08-18 00:00:00', 3, 'UTC')); +SELECT dateDiff('second', toDateTime('2015-08-18 00:00:00', 'UTC'), toDateTime64('2015-08-18 00:00:10', 3, 'UTC')); +SELECT dateDiff('second', toDateTime('2015-08-18 00:00:00', 'UTC'), toDateTime64('2015-08-18 00:10:00', 3, 'UTC')); +SELECT dateDiff('second', toDateTime('2015-08-18 00:00:00', 'UTC'), toDateTime64('2015-08-18 01:00:00', 3, 'UTC')); +SELECT dateDiff('second', toDateTime('2015-08-18 00:00:00', 'UTC'), toDateTime64('2015-08-18 01:10:10', 3, 'UTC')); + +-- With Date +-- DateTime64 vs Date +SELECT dateDiff('day', toDateTime64('2015-08-18 00:00:00', 0, 'UTC'), toDate('2015-08-19', 'UTC')); + +-- Date vs DateTime64 +SELECT dateDiff('day', toDate('2015-08-18', 'UTC'), toDateTime64('2015-08-19 00:00:00', 3, 'UTC')); + +-- Same thing but const vs non-const columns +SELECT dateDiff('second', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), materialize(toDateTime64('1927-01-01 00:00:10', 0, 'UTC'))); +SELECT dateDiff('second', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), materialize(toDateTime64('1927-01-01 00:00:10', 3, 'UTC'))); +SELECT dateDiff('second', toDateTime64('2015-08-18 00:00:00', 0, 'UTC'), materialize(toDateTime('2015-08-18 00:00:10', 'UTC'))); +SELECT dateDiff('second', toDateTime('2015-08-18 00:00:00', 'UTC'), materialize(toDateTime64('2015-08-18 00:00:10', 3, 'UTC'))); +SELECT dateDiff('day', toDateTime64('2015-08-18 00:00:00', 0, 'UTC'), materialize(toDate('2015-08-19', 'UTC'))); +SELECT dateDiff('day', toDate('2015-08-18', 'UTC'), materialize(toDateTime64('2015-08-19 00:00:00', 3, 'UTC'))); + +-- Same thing but non-const vs const columns +SELECT dateDiff('second', materialize(toDateTime64('1927-01-01 00:00:00', 0, 'UTC')), toDateTime64('1927-01-01 00:00:10', 0, 'UTC')); +SELECT dateDiff('second', materialize(toDateTime64('1927-01-01 00:00:00', 6, 'UTC')), toDateTime64('1927-01-01 00:00:10', 3, 'UTC')); +SELECT dateDiff('second', materialize(toDateTime64('2015-08-18 00:00:00', 0, 'UTC')), toDateTime('2015-08-18 00:00:10', 'UTC')); +SELECT dateDiff('second', materialize(toDateTime('2015-08-18 00:00:00', 'UTC')), toDateTime64('2015-08-18 00:00:10', 3, 'UTC')); +SELECT dateDiff('day', materialize(toDateTime64('2015-08-18 00:00:00', 0, 'UTC')), toDate('2015-08-19', 'UTC')); +SELECT dateDiff('day', materialize(toDate('2015-08-18', 'UTC')), toDateTime64('2015-08-19 00:00:00', 3, 'UTC')); + +-- Same thing but non-const vs non-const columns +SELECT dateDiff('second', materialize(toDateTime64('1927-01-01 00:00:00', 0, 'UTC')), materialize(toDateTime64('1927-01-01 00:00:10', 0, 'UTC'))); +SELECT dateDiff('second', materialize(toDateTime64('1927-01-01 00:00:00', 6, 'UTC')), materialize(toDateTime64('1927-01-01 00:00:10', 3, 'UTC'))); +SELECT dateDiff('second', materialize(toDateTime64('2015-08-18 00:00:00', 0, 'UTC')), materialize(toDateTime('2015-08-18 00:00:10', 'UTC'))); +SELECT dateDiff('second', materialize(toDateTime('2015-08-18 00:00:00', 'UTC')), materialize(toDateTime64('2015-08-18 00:00:10', 3, 'UTC'))); +SELECT dateDiff('day', materialize(toDateTime64('2015-08-18 00:00:00', 0, 'UTC')), materialize(toDate('2015-08-19', 'UTC'))); +SELECT dateDiff('day', materialize(toDate('2015-08-18', 'UTC')), materialize(toDateTime64('2015-08-19 00:00:00', 3, 'UTC'))); diff --git a/parser/testdata/01801_distinct_group_by_shard/ast.json b/parser/testdata/01801_distinct_group_by_shard/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01801_distinct_group_by_shard/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01801_distinct_group_by_shard/metadata.json b/parser/testdata/01801_distinct_group_by_shard/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01801_distinct_group_by_shard/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01801_distinct_group_by_shard/query.sql b/parser/testdata/01801_distinct_group_by_shard/query.sql new file mode 100644 index 000000000..848077f05 --- /dev/null +++ b/parser/testdata/01801_distinct_group_by_shard/query.sql @@ -0,0 +1,3 @@ +-- Tags: shard + +SELECT DISTINCT a FROM remote('127.0.0.{1,2,3}', values('a UInt8, b UInt8', (1, 2), (1, 3))) GROUP BY a, b; diff --git a/parser/testdata/01801_s3_cluster/ast.json b/parser/testdata/01801_s3_cluster/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01801_s3_cluster/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01801_s3_cluster/metadata.json b/parser/testdata/01801_s3_cluster/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01801_s3_cluster/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01801_s3_cluster/query.sql b/parser/testdata/01801_s3_cluster/query.sql new file mode 100644 index 000000000..f94f1102d --- /dev/null +++ b/parser/testdata/01801_s3_cluster/query.sql @@ -0,0 +1,38 @@ +-- Tags: no-fasttest +-- Tag no-fasttest: Depends on AWS + +select * from s3('http://localhost:11111/test/{a,b,c}.tsv') ORDER BY c1, c2, c3; +select * from s3('http://localhost:11111/test/{a,b,c}.tsv', NOSIGN) ORDER BY c1, c2, c3; +select * from s3('http://localhost:11111/test/{a,b,c}.tsv', 'TSV') ORDER BY c1, c2, c3; +select * from s3('http://localhost:11111/test/{a,b,c}.tsv', NOSIGN, 'TSV') ORDER BY c1, c2, c3; +select * from s3('http://localhost:11111/test/{a,b,c}.tsv', 'TSV', 'c1 UInt64, c2 UInt64, c3 UInt64') ORDER BY c1, c2, c3; +select * from s3('http://localhost:11111/test/{a,b,c}.tsv', NOSIGN, 'TSV', 'c1 UInt64, c2 UInt64, c3 UInt64') ORDER BY c1, c2, c3; +select * from s3('http://localhost:11111/test/{a,b,c}.tsv', 'TSV', 'c1 UInt64, c2 UInt64, c3 UInt64', 'auto') ORDER BY c1, c2, c3; +select * from s3('http://localhost:11111/test/{a,b,c}.tsv', NOSIGN, 'TSV', 'c1 UInt64, c2 UInt64, c3 UInt64', 'auto') ORDER BY c1, c2, c3; +select * from s3('http://localhost:11111/test/{a,b,c}.tsv', 'test', 'testtest') ORDER BY c1, c2, c3; +select * from s3('http://localhost:11111/test/{a,b,c}.tsv', 'test', 'testtest', '') ORDER BY c1, c2, c3; +select * from s3('http://localhost:11111/test/{a,b,c}.tsv', 'test', 'testtest', 'TSV') ORDER BY c1, c2, c3; +select * from s3('http://localhost:11111/test/{a,b,c}.tsv', 'test', 'testtest', '', 'TSV') ORDER BY c1, c2, c3; +select * from s3('http://localhost:11111/test/{a,b,c}.tsv', 'test', 'testtest', 'TSV', 'c1 UInt64, c2 UInt64, c3 UInt64') ORDER BY c1, c2, c3; +select * from s3('http://localhost:11111/test/{a,b,c}.tsv', 'test', 'testtest', '', 'TSV', 'c1 UInt64, c2 UInt64, c3 UInt64') ORDER BY c1, c2, c3; +select * from s3('http://localhost:11111/test/{a,b,c}.tsv', 'test', 'testtest', 'TSV', 'c1 UInt64, c2 UInt64, c3 UInt64', 'auto') ORDER BY c1, c2, c3; +select * from s3('http://localhost:11111/test/{a,b,c}.tsv', 'test', 'testtest', '', 'TSV', 'c1 UInt64, c2 UInt64, c3 UInt64', 'auto') ORDER BY c1, c2, c3; + + +select * from s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:11111/test/{a,b,c}.tsv') ORDER BY c1, c2, c3; +select * from s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:11111/test/{a,b,c}.tsv', NOSIGN) ORDER BY c1, c2, c3; +select * from s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:11111/test/{a,b,c}.tsv', 'TSV') ORDER BY c1, c2, c3; +select * from s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:11111/test/{a,b,c}.tsv', NOSIGN, 'TSV') ORDER BY c1, c2, c3; +select * from s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:11111/test/{a,b,c}.tsv', 'TSV', 'c1 UInt64, c2 UInt64, c3 UInt64') ORDER BY c1, c2, c3; +select * from s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:11111/test/{a,b,c}.tsv', NOSIGN, 'TSV', 'c1 UInt64, c2 UInt64, c3 UInt64') ORDER BY c1, c2, c3; +select * from s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:11111/test/{a,b,c}.tsv', 'TSV', 'c1 UInt64, c2 UInt64, c3 UInt64', 'auto') ORDER BY c1, c2, c3; +select * from s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:11111/test/{a,b,c}.tsv', NOSIGN, 'TSV', 'c1 UInt64, c2 UInt64, c3 UInt64', 'auto') ORDER BY c1, c2, c3; +select * from s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:11111/test/{a,b,c}.tsv', 'test', 'testtest') ORDER BY c1, c2, c3; +select * from s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:11111/test/{a,b,c}.tsv', 'test', 'testtest', '') ORDER BY c1, c2, c3; +select * from s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:11111/test/{a,b,c}.tsv', 'test', 'testtest', 'TSV') ORDER BY c1, c2, c3; +select * from s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:11111/test/{a,b,c}.tsv', 'test', 'testtest', '', 'TSV') ORDER BY c1, c2, c3; +select * from s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:11111/test/{a,b,c}.tsv', 'test', 'testtest', 'TSV', 'c1 UInt64, c2 UInt64, c3 UInt64') ORDER BY c1, c2, c3; +select * from s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:11111/test/{a,b,c}.tsv', 'test', 'testtest', '', 'TSV', 'c1 UInt64, c2 UInt64, c3 UInt64') ORDER BY c1, c2, c3; +select * from s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:11111/test/{a,b,c}.tsv', 'test', 'testtest', 'TSV', 'c1 UInt64, c2 UInt64, c3 UInt64', 'auto') ORDER BY c1, c2, c3; +select * from s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:11111/test/{a,b,c}.tsv', 'test', 'testtest', '', 'TSV', 'c1 UInt64, c2 UInt64, c3 UInt64', 'auto') ORDER BY c1, c2, c3; + diff --git a/parser/testdata/01801_s3_cluster_count/ast.json b/parser/testdata/01801_s3_cluster_count/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01801_s3_cluster_count/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01801_s3_cluster_count/metadata.json b/parser/testdata/01801_s3_cluster_count/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01801_s3_cluster_count/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01801_s3_cluster_count/query.sql b/parser/testdata/01801_s3_cluster_count/query.sql new file mode 100644 index 000000000..8a4fb8049 --- /dev/null +++ b/parser/testdata/01801_s3_cluster_count/query.sql @@ -0,0 +1,5 @@ +-- Tags: no-fasttest +-- Tag no-fasttest: Depends on AWS + +select COUNT() from s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:11111/test/{a,b,c}.tsv'); +select COUNT(*) from s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:11111/test/{a,b,c}.tsv'); diff --git a/parser/testdata/01802_formatDateTime_DateTime64_century/ast.json b/parser/testdata/01802_formatDateTime_DateTime64_century/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01802_formatDateTime_DateTime64_century/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01802_formatDateTime_DateTime64_century/metadata.json b/parser/testdata/01802_formatDateTime_DateTime64_century/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01802_formatDateTime_DateTime64_century/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01802_formatDateTime_DateTime64_century/query.sql b/parser/testdata/01802_formatDateTime_DateTime64_century/query.sql new file mode 100644 index 000000000..712afd28c --- /dev/null +++ b/parser/testdata/01802_formatDateTime_DateTime64_century/query.sql @@ -0,0 +1,16 @@ +-- { echo } + +SELECT formatDateTime(toDateTime64('1935-12-12 12:12:12', 0, 'Asia/Istanbul'), '%C'); +SELECT formatDateTime(toDateTime64('1969-12-12 12:12:12', 0, 'Asia/Istanbul'), '%C'); +SELECT formatDateTime(toDateTime64('1989-12-12 12:12:12', 0, 'Asia/Istanbul'), '%C'); +SELECT formatDateTime(toDateTime64('2019-09-16 19:20:12', 0, 'Asia/Istanbul'), '%C'); +SELECT formatDateTime(toDateTime64('2105-12-12 12:12:12', 0, 'Asia/Istanbul'), '%C'); +SELECT formatDateTime(toDateTime64('2205-12-12 12:12:12', 0, 'Asia/Istanbul'), '%C'); + +-- non-zero scale +SELECT formatDateTime(toDateTime64('1935-12-12 12:12:12', 6, 'Asia/Istanbul'), '%C'); +SELECT formatDateTime(toDateTime64('1969-12-12 12:12:12', 6, 'Asia/Istanbul'), '%C'); +SELECT formatDateTime(toDateTime64('1989-12-12 12:12:12', 6, 'Asia/Istanbul'), '%C'); +SELECT formatDateTime(toDateTime64('2019-09-16 19:20:12', 0, 'Asia/Istanbul'), '%C'); +SELECT formatDateTime(toDateTime64('2105-12-12 12:12:12', 6, 'Asia/Istanbul'), '%C'); +SELECT formatDateTime(toDateTime64('2205-01-12 12:12:12', 6, 'Asia/Istanbul'), '%C'); \ No newline at end of file diff --git a/parser/testdata/01802_rank_corr_mann_whitney_over_window/ast.json b/parser/testdata/01802_rank_corr_mann_whitney_over_window/ast.json new file mode 100644 index 000000000..a8d59de2b --- /dev/null +++ b/parser/testdata/01802_rank_corr_mann_whitney_over_window/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery 01802_empsalary (children 1)" + }, + { + "explain": " Identifier 01802_empsalary" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001455295, + "rows_read": 2, + "bytes_read": 82 + } +} diff --git a/parser/testdata/01802_rank_corr_mann_whitney_over_window/metadata.json b/parser/testdata/01802_rank_corr_mann_whitney_over_window/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01802_rank_corr_mann_whitney_over_window/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01802_rank_corr_mann_whitney_over_window/query.sql b/parser/testdata/01802_rank_corr_mann_whitney_over_window/query.sql new file mode 100644 index 000000000..ee3bdc63f --- /dev/null +++ b/parser/testdata/01802_rank_corr_mann_whitney_over_window/query.sql @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS 01802_empsalary; + +CREATE TABLE 01802_empsalary +( + `depname` LowCardinality(String), + `empno` UInt64, + `salary` Int32, + `enroll_date` Date +) +ENGINE = MergeTree +ORDER BY enroll_date +SETTINGS index_granularity = 8192; + +INSERT INTO 01802_empsalary VALUES ('sales', 1, 5000, '2006-10-01'), ('develop', 8, 6000, '2006-10-01'), ('personnel', 2, 3900, '2006-12-23'), ('develop', 10, 5200, '2007-08-01'), ('sales', 3, 4800, '2007-08-01'), ('sales', 4, 4801, '2007-08-08'), ('develop', 11, 5200, '2007-08-15'), ('personnel', 5, 3500, '2007-12-10'), ('develop', 7, 4200, '2008-01-01'), ('develop', 9, 4500, '2008-01-01'); + +SELECT mannWhitneyUTest(salary, salary) OVER (ORDER BY salary ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) AS func FROM 01802_empsalary; -- {serverError BAD_ARGUMENTS} + +DROP TABLE IF EXISTS 01802_empsalary; diff --git a/parser/testdata/01802_toDateTime64_large_values/ast.json b/parser/testdata/01802_toDateTime64_large_values/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01802_toDateTime64_large_values/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01802_toDateTime64_large_values/metadata.json b/parser/testdata/01802_toDateTime64_large_values/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01802_toDateTime64_large_values/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01802_toDateTime64_large_values/query.sql b/parser/testdata/01802_toDateTime64_large_values/query.sql new file mode 100644 index 000000000..5c2e65188 --- /dev/null +++ b/parser/testdata/01802_toDateTime64_large_values/query.sql @@ -0,0 +1,10 @@ +-- { echo } + +SELECT toDateTime64('2205-12-12 12:12:12', 0, 'UTC'); +SELECT toDateTime64('2205-12-12 12:12:12', 0, 'Asia/Istanbul'); + +SELECT toDateTime64('2205-12-12 12:12:12', 6, 'Asia/Istanbul'); +SELECT toDateTime64('2205-12-12 12:12:12', 6, 'Asia/Istanbul'); + +SELECT toDateTime64('2299-12-31 23:59:59', 3, 'UTC'); +SELECT toDateTime64('2299-12-31 23:59:59', 3, 'UTC'); \ No newline at end of file diff --git a/parser/testdata/01803_const_nullable_map/ast.json b/parser/testdata/01803_const_nullable_map/ast.json new file mode 100644 index 000000000..ce5b32087 --- /dev/null +++ b/parser/testdata/01803_const_nullable_map/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_map_null (children 1)" + }, + { + "explain": " Identifier t_map_null" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00091511, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/01803_const_nullable_map/metadata.json b/parser/testdata/01803_const_nullable_map/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01803_const_nullable_map/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01803_const_nullable_map/query.sql b/parser/testdata/01803_const_nullable_map/query.sql new file mode 100644 index 000000000..6496dfb39 --- /dev/null +++ b/parser/testdata/01803_const_nullable_map/query.sql @@ -0,0 +1,7 @@ +DROP TABLE IF EXISTS t_map_null; + +CREATE TABLE t_map_null (a Map(String, String), b String) engine = MergeTree() ORDER BY a; +INSERT INTO t_map_null VALUES (map('a', 'b', 'c', 'd'), 'foo'); +SELECT count() FROM t_map_null WHERE a = map('name', NULL, '', NULL); + +DROP TABLE t_map_null; diff --git a/parser/testdata/01803_untuple_subquery/ast.json b/parser/testdata/01803_untuple_subquery/ast.json new file mode 100644 index 000000000..dd1113422 --- /dev/null +++ b/parser/testdata/01803_untuple_subquery/ast.json @@ -0,0 +1,214 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 5)" + }, + { + "explain": " Literal Tuple_(Float64_0.5, '92233720368547758.07', NULL)" + }, + { + "explain": " Literal ''" + }, + { + "explain": " Literal '1.00'" + }, + { + "explain": " Function untuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Tuple_('256', NULL)" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 7)" + }, + { + "explain": " Function untuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Function untuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Tuple_(Tuple_('0.0000000100', Tuple_(UInt64_65536, NULL, Tuple_(UInt64_65535, UInt64_9223372036854775807), '25.7', Tuple_(Float64_0.00009999999747378752, '10.25', UInt64_1048577), UInt64_65536)), '0.0000001024', '65537', NULL)" + }, + { + "explain": " Function untuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Tuple_(UInt64_9223372036854775807, Float64_-inf, Float64_0.5)" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Literal Int64_-9223372036854775808" + }, + { + "explain": " Literal UInt64_257" + }, + { + "explain": " Literal UInt64_7" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '0.0001048575'" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 5)" + }, + { + "explain": " Literal UInt64_1024" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Literal Tuple_(UInt64_7, UInt64_3)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Function untuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function negate (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Literal '0.0001048577'" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal Tuple_(UInt64_0, Float64_0.9998999834060669, '65537')" + }, + { + "explain": " Function untuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '10.25'" + } + ], + + "rows": 64, + + "statistics": + { + "elapsed": 0.001373573, + "rows_read": 64, + "bytes_read": 3164 + } +} diff --git a/parser/testdata/01803_untuple_subquery/metadata.json b/parser/testdata/01803_untuple_subquery/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01803_untuple_subquery/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01803_untuple_subquery/query.sql b/parser/testdata/01803_untuple_subquery/query.sql new file mode 100644 index 000000000..512b4c561 --- /dev/null +++ b/parser/testdata/01803_untuple_subquery/query.sql @@ -0,0 +1,3 @@ +SELECT (0.5, '92233720368547758.07', NULL), '', '1.00', untuple(('256', NULL)), NULL FROM (SELECT untuple(((NULL, untuple((('0.0000000100', (65536, NULL, (65535, 9223372036854775807), '25.7', (0.00009999999747378752, '10.25', 1048577), 65536)), '0.0000001024', '65537', NULL))), untuple((9223372036854775807, -inf, 0.5)), NULL, -9223372036854775808)), 257, 7, ('0.0001048575', (1024, NULL, (7, 3), (untuple(tuple(-NULL)), NULL, '0.0001048577', NULL), 0)), 0, (0, 0.9998999834060669, '65537'), untuple(tuple('10.25'))); + +SELECT NULL FROM (SELECT untuple((NULL, dummy))); diff --git a/parser/testdata/01804_dictionary_decimal256_type/ast.json b/parser/testdata/01804_dictionary_decimal256_type/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01804_dictionary_decimal256_type/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01804_dictionary_decimal256_type/metadata.json b/parser/testdata/01804_dictionary_decimal256_type/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01804_dictionary_decimal256_type/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01804_dictionary_decimal256_type/query.sql b/parser/testdata/01804_dictionary_decimal256_type/query.sql new file mode 100644 index 000000000..32b029442 --- /dev/null +++ b/parser/testdata/01804_dictionary_decimal256_type/query.sql @@ -0,0 +1,127 @@ +-- Tags: no-parallel + +DROP TABLE IF EXISTS dictionary_decimal_source_table; +CREATE TABLE dictionary_decimal_source_table +( + id UInt64, + decimal_value Decimal256(5) +) ENGINE = TinyLog; + +INSERT INTO dictionary_decimal_source_table VALUES (1, 5.0); + +DROP DICTIONARY IF EXISTS flat_dictionary; +CREATE DICTIONARY flat_dictionary +( + id UInt64, + decimal_value Decimal256(5) +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'dictionary_decimal_source_table')) +LIFETIME(MIN 1 MAX 1000) +LAYOUT(FLAT()); + +SELECT 'Flat dictionary'; +SELECT dictGet('flat_dictionary', 'decimal_value', toUInt64(1)); + +DROP DICTIONARY flat_dictionary; + +DROP DICTIONARY IF EXISTS hashed_dictionary; +CREATE DICTIONARY hashed_dictionary +( + id UInt64, + decimal_value Decimal256(5) +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'dictionary_decimal_source_table')) +LIFETIME(MIN 1 MAX 1000) +LAYOUT(HASHED()); + +SELECT 'Hashed dictionary'; +SELECT dictGet('hashed_dictionary', 'decimal_value', toUInt64(1)); + +DROP DICTIONARY hashed_dictionary; + +DROP DICTIONARY IF EXISTS cache_dictionary; +CREATE DICTIONARY cache_dictionary +( + id UInt64, + decimal_value Decimal256(5) +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'dictionary_decimal_source_table')) +LIFETIME(MIN 1 MAX 1000) +LAYOUT(CACHE(SIZE_IN_CELLS 10)); + +SELECT 'Cache dictionary'; +SELECT dictGet('cache_dictionary', 'decimal_value', toUInt64(1)); + +DROP DICTIONARY cache_dictionary; + +DROP DICTIONARY IF EXISTS direct_dictionary; +CREATE DICTIONARY direct_dictionary +( + id UInt64, + decimal_value Decimal256(5) +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'dictionary_decimal_source_table')) +LAYOUT(DIRECT()); + +SELECT 'Direct dictionary'; +SELECT dictGet('direct_dictionary', 'decimal_value', toUInt64(1)); + +DROP DICTIONARY direct_dictionary; + +DROP TABLE dictionary_decimal_source_table; + +DROP TABLE IF EXISTS ip_trie_dictionary_decimal_source_table; +CREATE TABLE ip_trie_dictionary_decimal_source_table +( + prefix String, + decimal_value Decimal256(5) +) ENGINE = TinyLog; + +INSERT INTO ip_trie_dictionary_decimal_source_table VALUES ('127.0.0.0', 5.0); + +DROP DICTIONARY IF EXISTS ip_trie_dictionary; +CREATE DICTIONARY ip_trie_dictionary +( + prefix String, + decimal_value Decimal256(5) +) +PRIMARY KEY prefix +SOURCE(CLICKHOUSE(HOST 'localhost' port tcpPort() TABLE 'ip_trie_dictionary_decimal_source_table')) +LIFETIME(MIN 10 MAX 1000) +LAYOUT(IP_TRIE()); + +SELECT 'IPTrie dictionary'; +SELECT dictGet('ip_trie_dictionary', 'decimal_value', tuple(IPv4StringToNum('127.0.0.0'))); + +DROP DICTIONARY ip_trie_dictionary; +DROP TABLE ip_trie_dictionary_decimal_source_table; + +DROP TABLE IF EXISTS dictionary_decimal_polygons_source_table; +CREATE TABLE dictionary_decimal_polygons_source_table +( + key Array(Array(Array(Tuple(Float64, Float64)))), + decimal_value Decimal256(5) +) ENGINE = TinyLog; + +INSERT INTO dictionary_decimal_polygons_source_table VALUES ([[[(0, 0), (0, 1), (1, 1), (1, 0)]]], 5.0); + +DROP DICTIONARY IF EXISTS polygon_dictionary; +CREATE DICTIONARY polygon_dictionary +( + key Array(Array(Array(Tuple(Float64, Float64)))), + decimal_value Decimal256(5) +) +PRIMARY KEY key +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'dictionary_decimal_polygons_source_table')) +LIFETIME(MIN 0 MAX 1000) +LAYOUT(POLYGON()); + +SELECT 'Polygon dictionary'; +SELECT dictGet('polygon_dictionary', 'decimal_value', tuple(0.5, 0.5)); + +DROP DICTIONARY polygon_dictionary; +DROP TABLE dictionary_decimal_polygons_source_table; diff --git a/parser/testdata/01804_uniq_up_to_ubsan/ast.json b/parser/testdata/01804_uniq_up_to_ubsan/ast.json new file mode 100644 index 000000000..d7262a0e2 --- /dev/null +++ b/parser/testdata/01804_uniq_up_to_ubsan/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function uniqUpTo (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Float64_1e100" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_5" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.000983358, + "rows_read": 15, + "bytes_read": 589 + } +} diff --git a/parser/testdata/01804_uniq_up_to_ubsan/metadata.json b/parser/testdata/01804_uniq_up_to_ubsan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01804_uniq_up_to_ubsan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01804_uniq_up_to_ubsan/query.sql b/parser/testdata/01804_uniq_up_to_ubsan/query.sql new file mode 100644 index 000000000..d2bcdb121 --- /dev/null +++ b/parser/testdata/01804_uniq_up_to_ubsan/query.sql @@ -0,0 +1,2 @@ +SELECT uniqUpTo(1e100)(number) FROM numbers(5); -- { serverError CANNOT_CONVERT_TYPE } +SELECT uniqUpTo(-1e100)(number) FROM numbers(5); -- { serverError CANNOT_CONVERT_TYPE } diff --git a/parser/testdata/01809_inactive_parts_to_delay_throw_insert/ast.json b/parser/testdata/01809_inactive_parts_to_delay_throw_insert/ast.json new file mode 100644 index 000000000..f6f1d2903 --- /dev/null +++ b/parser/testdata/01809_inactive_parts_to_delay_throw_insert/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery data_01809 (children 1)" + }, + { + "explain": " Identifier data_01809" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000999714, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/01809_inactive_parts_to_delay_throw_insert/metadata.json b/parser/testdata/01809_inactive_parts_to_delay_throw_insert/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01809_inactive_parts_to_delay_throw_insert/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01809_inactive_parts_to_delay_throw_insert/query.sql b/parser/testdata/01809_inactive_parts_to_delay_throw_insert/query.sql new file mode 100644 index 000000000..e9bbfe694 --- /dev/null +++ b/parser/testdata/01809_inactive_parts_to_delay_throw_insert/query.sql @@ -0,0 +1,12 @@ +drop table if exists data_01809; + +create table data_01809 (i int) engine MergeTree order by i settings old_parts_lifetime = 10000000000, min_bytes_for_wide_part = 0, inactive_parts_to_throw_insert = 0, inactive_parts_to_delay_insert = 1; + +insert into data_01809 values (1); +insert into data_01809 values (2); + +optimize table data_01809 final; + +insert into data_01809 values (3); + +drop table data_01809; diff --git a/parser/testdata/01811_datename/ast.json b/parser/testdata/01811_datename/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01811_datename/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01811_datename/metadata.json b/parser/testdata/01811_datename/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01811_datename/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01811_datename/query.sql b/parser/testdata/01811_datename/query.sql new file mode 100644 index 000000000..fe9f5d202 --- /dev/null +++ b/parser/testdata/01811_datename/query.sql @@ -0,0 +1,79 @@ +WITH + toDate('2021-04-14') AS date_value, + toDate32('2021-04-14') AS date_32_value, + toDateTime('2021-04-14 11:22:33') AS date_time_value, + toDateTime64('2021-04-14 11:22:33', 3) AS date_time_64_value +SELECT dateName('year', date_value), dateName('year', date_32_value), dateName('year', date_time_value), dateName('year', date_time_64_value); + +WITH + toDate('2021-04-14') AS date_value, + toDate32('2021-04-14') AS date_32_value, + toDateTime('2021-04-14 11:22:33') AS date_time_value, + toDateTime64('2021-04-14 11:22:33', 3) AS date_time_64_value +SELECT dateName('quarter', date_value), dateName('quarter', date_32_value), dateName('quarter', date_time_value), dateName('quarter', date_time_64_value); + +WITH + toDate('2021-04-14') AS date_value, + toDate32('2021-04-14') AS date_32_value, + toDateTime('2021-04-14 11:22:33') AS date_time_value, + toDateTime64('2021-04-14 11:22:33', 3) AS date_time_64_value +SELECT dateName('month', date_value), dateName('month', date_32_value), dateName('month', date_time_value), dateName('month', date_time_64_value); + +WITH + toDate('2021-04-14') AS date_value, + toDate32('2021-04-14') AS date_32_value, + toDateTime('2021-04-14 11:22:33') AS date_time_value, + toDateTime64('2021-04-14 11:22:33', 3) AS date_time_64_value +SELECT dateName('dayofyear', date_value), dateName('dayofyear', date_32_value), dateName('dayofyear', date_time_value), dateName('dayofyear', date_time_64_value); + +WITH + toDate('2021-04-14') AS date_value, + toDate32('2021-04-14') AS date_32_value, + toDateTime('2021-04-14 11:22:33') AS date_time_value, + toDateTime64('2021-04-14 11:22:33', 3) AS date_time_64_value +SELECT dateName('day', date_value), dateName('day', date_32_value), dateName('day', date_time_value), dateName('day', date_time_64_value); + +WITH + toDate('2021-04-14') AS date_value, + toDate32('2021-04-14') AS date_32_value, + toDateTime('2021-04-14 11:22:33') AS date_time_value, + toDateTime64('2021-04-14 11:22:33', 3) AS date_time_64_value +SELECT dateName('week', date_value), dateName('week', date_32_value), dateName('week', date_time_value), dateName('week', date_time_64_value); + +WITH + toDate('2021-04-14') AS date_value, + toDate32('2021-04-14') AS date_32_value, + toDateTime('2021-04-14 11:22:33') AS date_time_value, + toDateTime64('2021-04-14 11:22:33', 3) AS date_time_64_value +SELECT dateName('weekday', date_value), dateName('weekday', date_32_value), dateName('weekday', date_time_value), dateName('weekday', date_time_64_value); + +WITH + toDateTime('2021-04-14 11:22:33') AS date_time_value, + toDateTime64('2021-04-14 11:22:33', 3) AS date_time_64_value +SELECT dateName('hour', date_time_value), dateName('hour', date_time_64_value); + +WITH + toDateTime('2021-04-14 11:22:33') AS date_time_value, + toDateTime64('2021-04-14 11:22:33', 3) AS date_time_64_value +SELECT dateName('minute', date_time_value), dateName('minute', date_time_64_value); + +WITH + toDateTime('2021-04-14 11:22:33') AS date_time_value, + toDateTime64('2021-04-14 11:22:33', 3) AS date_time_64_value +SELECT dateName('second', date_time_value), dateName('second', date_time_64_value); + +WITH + toDateTime('2021-04-14 23:22:33', 'UTC') as date +SELECT + dateName('weekday', date, 'UTC'), + dateName('hour', date, 'UTC'), + dateName('minute', date, 'UTC'), + dateName('second', date, 'UTC'); + +WITH + toDateTime('2021-04-14 23:22:33', 'UTC') as date +SELECT + dateName('weekday', date, 'Asia/Istanbul'), + dateName('hour', date, 'Asia/Istanbul'), + dateName('minute', date, 'Asia/Istanbul'), + dateName('second', date, 'Asia/Istanbul'); diff --git a/parser/testdata/01811_filter_by_null/ast.json b/parser/testdata/01811_filter_by_null/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01811_filter_by_null/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01811_filter_by_null/metadata.json b/parser/testdata/01811_filter_by_null/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01811_filter_by_null/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01811_filter_by_null/query.sql b/parser/testdata/01811_filter_by_null/query.sql new file mode 100644 index 000000000..77d633525 --- /dev/null +++ b/parser/testdata/01811_filter_by_null/query.sql @@ -0,0 +1,12 @@ +-- Tags: no-fasttest +-- Tag no-fasttest: Depends on OpenSSL + +DROP TABLE IF EXISTS test_01344; + +CREATE TABLE test_01344 (x String, INDEX idx (x) TYPE set(10) GRANULARITY 1) ENGINE = MergeTree ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0; +INSERT INTO test_01344 VALUES ('Hello, world'); +SELECT NULL FROM test_01344 WHERE ignore(1) = NULL; +SELECT NULL FROM test_01344 WHERE encrypt(ignore(encrypt(NULL, '0.0001048577', lcm(2, 65537), NULL, inf, NULL), lcm(-2, 1048575)), '-0.0000000001', lcm(NULL, NULL)) = NULL; +SELECT NULL FROM test_01344 WHERE ignore(x, lcm(NULL, 1048576), -2) = NULL; + +DROP TABLE test_01344; diff --git a/parser/testdata/01812_has_generic/ast.json b/parser/testdata/01812_has_generic/ast.json new file mode 100644 index 000000000..4e460fd76 --- /dev/null +++ b/parser/testdata/01812_has_generic/ast.json @@ -0,0 +1,70 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function has (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Tuple_(UInt64_1, UInt64_2)" + }, + { + "explain": " Literal Tuple_(UInt64_3, UInt64_4)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toUInt16 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Literal UInt64_4" + } + ], + + "rows": 16, + + "statistics": + { + "elapsed": 0.001072965, + "rows_read": 16, + "bytes_read": 648 + } +} diff --git a/parser/testdata/01812_has_generic/metadata.json b/parser/testdata/01812_has_generic/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01812_has_generic/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01812_has_generic/query.sql b/parser/testdata/01812_has_generic/query.sql new file mode 100644 index 000000000..9ab5b6551 --- /dev/null +++ b/parser/testdata/01812_has_generic/query.sql @@ -0,0 +1,3 @@ +SELECT has([(1, 2), (3, 4)], (toUInt16(3), 4)); +SELECT hasAny([(1, 2), (3, 4)], [(toUInt16(3), 4)]); +SELECT hasAll([(1, 2), (3, 4)], [(toNullable(1), toUInt64(2)), (toUInt16(3), 4)]); diff --git a/parser/testdata/01812_optimize_skip_unused_shards_single_node/ast.json b/parser/testdata/01812_optimize_skip_unused_shards_single_node/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01812_optimize_skip_unused_shards_single_node/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01812_optimize_skip_unused_shards_single_node/metadata.json b/parser/testdata/01812_optimize_skip_unused_shards_single_node/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01812_optimize_skip_unused_shards_single_node/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01812_optimize_skip_unused_shards_single_node/query.sql b/parser/testdata/01812_optimize_skip_unused_shards_single_node/query.sql new file mode 100644 index 000000000..668c3511d --- /dev/null +++ b/parser/testdata/01812_optimize_skip_unused_shards_single_node/query.sql @@ -0,0 +1,5 @@ +-- Tags: shard + +-- remote() does not have sharding key, while force_optimize_skip_unused_shards=2 requires from table to have it. +-- But due to only one node, everything works. +select * from remote('127.1', system.one) settings optimize_skip_unused_shards=1, force_optimize_skip_unused_shards=2 format Null; diff --git a/parser/testdata/01813_distributed_scalar_subqueries_alias/ast.json b/parser/testdata/01813_distributed_scalar_subqueries_alias/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01813_distributed_scalar_subqueries_alias/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01813_distributed_scalar_subqueries_alias/metadata.json b/parser/testdata/01813_distributed_scalar_subqueries_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01813_distributed_scalar_subqueries_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01813_distributed_scalar_subqueries_alias/query.sql b/parser/testdata/01813_distributed_scalar_subqueries_alias/query.sql new file mode 100644 index 000000000..9ccea41c5 --- /dev/null +++ b/parser/testdata/01813_distributed_scalar_subqueries_alias/query.sql @@ -0,0 +1,20 @@ +-- Tags: distributed + +DROP TABLE IF EXISTS data; +CREATE TABLE data (a Int64, b Int64) ENGINE = TinyLog(); + +DROP TABLE IF EXISTS data_distributed; +CREATE TABLE data_distributed (a Int64, b Int64) ENGINE = Distributed(test_shard_localhost, currentDatabase(), 'data'); + +INSERT INTO data VALUES (0, 0); + +SET prefer_localhost_replica = 1; +SELECT a / (SELECT sum(number) FROM numbers(10)) FROM data_distributed; +SELECT a < (SELECT 1) FROM data_distributed; + +SET prefer_localhost_replica = 0; +SELECT a / (SELECT sum(number) FROM numbers(10)) FROM data_distributed; +SELECT a < (SELECT 1) FROM data_distributed; + +DROP TABLE data_distributed; +DROP TABLE data; diff --git a/parser/testdata/01813_quantileBfloat16_nans/ast.json b/parser/testdata/01813_quantileBfloat16_nans/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01813_quantileBfloat16_nans/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01813_quantileBfloat16_nans/metadata.json b/parser/testdata/01813_quantileBfloat16_nans/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01813_quantileBfloat16_nans/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01813_quantileBfloat16_nans/query.sql b/parser/testdata/01813_quantileBfloat16_nans/query.sql new file mode 100644 index 000000000..1f823f0b9 --- /dev/null +++ b/parser/testdata/01813_quantileBfloat16_nans/query.sql @@ -0,0 +1,16 @@ +SELECT DISTINCT + eq +FROM + ( + WITH + range(2 + number % 10) AS arr, -- minimum two elements, to avoid nan result -- + arrayMap(x -> x = intDiv(number, 10) ? nan : x, arr) AS arr_with_nan, + arrayFilter(x -> x != intDiv(number, 10), arr) AS arr_filtered + SELECT + number, + arrayReduce('quantileBFloat16', arr_with_nan) AS q1, + arrayReduce('quantileBFloat16', arr_filtered) AS q2, + q1 = q2 AS eq + FROM + numbers(100) + ); diff --git a/parser/testdata/01817_storage_buffer_parameters/ast.json b/parser/testdata/01817_storage_buffer_parameters/ast.json new file mode 100644 index 000000000..f69caa308 --- /dev/null +++ b/parser/testdata/01817_storage_buffer_parameters/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery data_01817 (children 1)" + }, + { + "explain": " Identifier data_01817" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001447407, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/01817_storage_buffer_parameters/metadata.json b/parser/testdata/01817_storage_buffer_parameters/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01817_storage_buffer_parameters/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01817_storage_buffer_parameters/query.sql b/parser/testdata/01817_storage_buffer_parameters/query.sql new file mode 100644 index 000000000..b973def84 --- /dev/null +++ b/parser/testdata/01817_storage_buffer_parameters/query.sql @@ -0,0 +1,42 @@ +drop table if exists data_01817; +drop table if exists buffer_01817; + +create table data_01817 (key Int) Engine=Null(); + +-- w/ flush_* +create table buffer_01817 (key Int) Engine=Buffer(currentDatabase(), data_01817, + /* num_layers= */ 1, + /* min_time= */ 1, /* max_time= */ 86400, + /* min_rows= */ 1e9, /* max_rows= */ 1e6, + /* min_bytes= */ 0, /* max_bytes= */ 4e6, + /* flush_time= */ 86400, /* flush_rows= */ 10, /* flush_bytes= */0 +); +drop table buffer_01817; + +-- w/o flush_* +create table buffer_01817 (key Int) Engine=Buffer(currentDatabase(), data_01817, + /* num_layers= */ 1, + /* min_time= */ 1, /* max_time= */ 86400, + /* min_rows= */ 1e9, /* max_rows= */ 1e6, + /* min_bytes= */ 0, /* max_bytes= */ 4e6 +); +drop table buffer_01817; + +-- not enough args +create table buffer_01817 (key Int) Engine=Buffer(currentDatabase(), data_01817, + /* num_layers= */ 1, + /* min_time= */ 1, /* max_time= */ 86400, + /* min_rows= */ 1e9, /* max_rows= */ 1e6, + /* min_bytes= */ 0 /* max_bytes= 4e6 */ +); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +-- too much args +create table buffer_01817 (key Int) Engine=Buffer(currentDatabase(), data_01817, + /* num_layers= */ 1, + /* min_time= */ 1, /* max_time= */ 86400, + /* min_rows= */ 1e9, /* max_rows= */ 1e6, + /* min_bytes= */ 0, /* max_bytes= */ 4e6, + /* flush_time= */ 86400, /* flush_rows= */ 10, /* flush_bytes= */0, + 0 +); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +drop table data_01817; diff --git a/parser/testdata/01818_case_float_value_fangyc/ast.json b/parser/testdata/01818_case_float_value_fangyc/ast.json new file mode 100644 index 000000000..9e2f3194e --- /dev/null +++ b/parser/testdata/01818_case_float_value_fangyc/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function caseWithExpression (alias f) (children 1)" + }, + { + "explain": " ExpressionList (children 8)" + }, + { + "explain": " Literal Float64_1.1" + }, + { + "explain": " Literal Float64_0.1" + }, + { + "explain": " Literal 'a'" + }, + { + "explain": " Literal Float64_1.1" + }, + { + "explain": " Literal 'b'" + }, + { + "explain": " Literal Float64_2.1" + }, + { + "explain": " Literal 'c'" + }, + { + "explain": " Literal 'default'" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001441992, + "rows_read": 14, + "bytes_read": 489 + } +} diff --git a/parser/testdata/01818_case_float_value_fangyc/metadata.json b/parser/testdata/01818_case_float_value_fangyc/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01818_case_float_value_fangyc/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01818_case_float_value_fangyc/query.sql b/parser/testdata/01818_case_float_value_fangyc/query.sql new file mode 100644 index 000000000..3cdb8503e --- /dev/null +++ b/parser/testdata/01818_case_float_value_fangyc/query.sql @@ -0,0 +1 @@ +select case 1.1 when 0.1 then 'a' when 1.1 then 'b' when 2.1 then 'c' else 'default' end as f; diff --git a/parser/testdata/01818_move_partition_simple/ast.json b/parser/testdata/01818_move_partition_simple/ast.json new file mode 100644 index 000000000..dc15a1022 --- /dev/null +++ b/parser/testdata/01818_move_partition_simple/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery main_table_01818 (children 1)" + }, + { + "explain": " Identifier main_table_01818" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001462303, + "rows_read": 2, + "bytes_read": 84 + } +} diff --git a/parser/testdata/01818_move_partition_simple/metadata.json b/parser/testdata/01818_move_partition_simple/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01818_move_partition_simple/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01818_move_partition_simple/query.sql b/parser/testdata/01818_move_partition_simple/query.sql new file mode 100644 index 000000000..6ca3ae75e --- /dev/null +++ b/parser/testdata/01818_move_partition_simple/query.sql @@ -0,0 +1,122 @@ +DROP TABLE IF EXISTS main_table_01818; +DROP TABLE IF EXISTS tmp_table_01818; + + +CREATE TABLE main_table_01818 +( + `id` UInt32, + `advertiser_id` String, + `campaign_id` String, + `name` String, + `budget` Float64, + `budget_mode` String, + `landing_type` String, + `status` String, + `modify_time` String, + `campaign_type` String, + `campaign_create_time` DateTime, + `campaign_modify_time` DateTime, + `create_time` DateTime, + `update_time` DateTime +) +ENGINE = MergeTree +PARTITION BY advertiser_id +ORDER BY campaign_id +SETTINGS index_granularity = 8192; + +CREATE TABLE tmp_table_01818 +( + `id` UInt32, + `advertiser_id` String, + `campaign_id` String, + `name` String, + `budget` Float64, + `budget_mode` String, + `landing_type` String, + `status` String, + `modify_time` String, + `campaign_type` String, + `campaign_create_time` DateTime, + `campaign_modify_time` DateTime, + `create_time` DateTime, + `update_time` DateTime +) +ENGINE = MergeTree +PARTITION BY advertiser_id +ORDER BY campaign_id +SETTINGS index_granularity = 8192; + +SELECT 'INSERT INTO main_table_01818'; +INSERT INTO main_table_01818 SELECT 1 as `id`, 'ClickHouse' as `advertiser_id`, * EXCEPT (`id`, `advertiser_id`) +FROM generateRandom( + '`id` UInt32, + `advertiser_id` String, + `campaign_id` String, + `name` String, + `budget` Float64, + `budget_mode` String, + `landing_type` String, + `status` String, + `modify_time` String, + `campaign_type` String, + `campaign_create_time` DateTime, + `campaign_modify_time` DateTime, + `create_time` DateTime, + `update_time` DateTime', 10, 10, 10) +LIMIT 100; + +SELECT 'INSERT INTO tmp_table_01818'; +INSERT INTO tmp_table_01818 SELECT 2 as `id`, 'Database' as `advertiser_id`, * EXCEPT (`id`, `advertiser_id`) +FROM generateRandom( + '`id` UInt32, + `advertiser_id` String, + `campaign_id` String, + `name` String, + `budget` Float64, + `budget_mode` String, + `landing_type` String, + `status` String, + `modify_time` String, + `campaign_type` String, + `campaign_create_time` DateTime, + `campaign_modify_time` DateTime, + `create_time` DateTime, + `update_time` DateTime', 10, 10, 10) +LIMIT 100; + +SELECT 'INSERT INTO tmp_table_01818'; +INSERT INTO tmp_table_01818 SELECT 3 as `id`, 'ClickHouse' as `advertiser_id`, * EXCEPT (`id`, `advertiser_id`) +FROM generateRandom( + '`id` UInt32, + `advertiser_id` String, + `campaign_id` String, + `name` String, + `budget` Float64, + `budget_mode` String, + `landing_type` String, + `status` String, + `modify_time` String, + `campaign_type` String, + `campaign_create_time` DateTime, + `campaign_modify_time` DateTime, + `create_time` DateTime, + `update_time` DateTime', 10, 10, 10) +LIMIT 100; + +SELECT 'ALL tmp_table_01818', count() FROM tmp_table_01818; +SELECT 'ALL main_table_01818', count() FROM main_table_01818; +SELECT 'tmp_table_01818', count() FROM tmp_table_01818 WHERE `advertiser_id` = 'ClickHouse'; +SELECT 'main_table_01818', count() FROM main_table_01818 WHERE `advertiser_id` = 'ClickHouse'; + +SELECT 'Executing ALTER TABLE MOVE PARTITION...'; +ALTER TABLE tmp_table_01818 MOVE PARTITION 'ClickHouse' TO TABLE main_table_01818; + + +SELECT 'ALL tmp_table_01818', count() FROM tmp_table_01818; +SELECT 'ALL main_table_01818', count() FROM main_table_01818; +SELECT 'tmp_table_01818', count() FROM tmp_table_01818 WHERE `advertiser_id` = 'ClickHouse'; +SELECT 'main_table_01818', count() FROM main_table_01818 WHERE `advertiser_id` = 'ClickHouse'; + + +DROP TABLE IF EXISTS main_table_01818; +DROP TABLE IF EXISTS tmp_table_01818; diff --git a/parser/testdata/01820_unhex_case_insensitive/ast.json b/parser/testdata/01820_unhex_case_insensitive/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01820_unhex_case_insensitive/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01820_unhex_case_insensitive/metadata.json b/parser/testdata/01820_unhex_case_insensitive/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01820_unhex_case_insensitive/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01820_unhex_case_insensitive/query.sql b/parser/testdata/01820_unhex_case_insensitive/query.sql new file mode 100644 index 000000000..99d8031ee --- /dev/null +++ b/parser/testdata/01820_unhex_case_insensitive/query.sql @@ -0,0 +1,2 @@ +-- MySQL has function `unhex`, so we will make our function `unhex` also case insensitive for compatibility. +SELECT unhex('303132'), UNHEX('4D7953514C'); diff --git a/parser/testdata/01821_dictionary_primary_key_wrong_order/ast.json b/parser/testdata/01821_dictionary_primary_key_wrong_order/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01821_dictionary_primary_key_wrong_order/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01821_dictionary_primary_key_wrong_order/metadata.json b/parser/testdata/01821_dictionary_primary_key_wrong_order/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01821_dictionary_primary_key_wrong_order/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01821_dictionary_primary_key_wrong_order/query.sql b/parser/testdata/01821_dictionary_primary_key_wrong_order/query.sql new file mode 100644 index 000000000..c53c59df7 --- /dev/null +++ b/parser/testdata/01821_dictionary_primary_key_wrong_order/query.sql @@ -0,0 +1,26 @@ +-- Tags: no-parallel + +DROP TABLE IF EXISTS dictionary_primary_key_source_table; +CREATE TABLE dictionary_primary_key_source_table +( + identifier UInt64, + v UInt64 +) ENGINE = TinyLog; + +INSERT INTO dictionary_primary_key_source_table VALUES (20, 1); + +DROP DICTIONARY IF EXISTS flat_dictionary; +CREATE DICTIONARY flat_dictionary +( + identifier UInt64, + v UInt64 +) +PRIMARY KEY v +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'dictionary_primary_key_source_table')) +LIFETIME(MIN 1 MAX 1000) +LAYOUT(FLAT()); + +SELECT * FROM flat_dictionary; + +DROP DICTIONARY flat_dictionary; +DROP TABLE dictionary_primary_key_source_table; diff --git a/parser/testdata/01821_join_table_mutation/ast.json b/parser/testdata/01821_join_table_mutation/ast.json new file mode 100644 index 000000000..39c71e2d6 --- /dev/null +++ b/parser/testdata/01821_join_table_mutation/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery join_table_mutation (children 1)" + }, + { + "explain": " Identifier join_table_mutation" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001359578, + "rows_read": 2, + "bytes_read": 90 + } +} diff --git a/parser/testdata/01821_join_table_mutation/metadata.json b/parser/testdata/01821_join_table_mutation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01821_join_table_mutation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01821_join_table_mutation/query.sql b/parser/testdata/01821_join_table_mutation/query.sql new file mode 100644 index 000000000..c9d82d07f --- /dev/null +++ b/parser/testdata/01821_join_table_mutation/query.sql @@ -0,0 +1,35 @@ +DROP TABLE IF EXISTS join_table_mutation; + +CREATE TABLE join_table_mutation(id Int32, name String) ENGINE = Join(ANY, LEFT, id); + +INSERT INTO join_table_mutation select number, toString(number) from numbers(100); + +SELECT count() FROM join_table_mutation; + +SELECT name FROM join_table_mutation WHERE id = 10; + +ALTER TABLE join_table_mutation DELETE WHERE id = 10; + +SELECT count() FROM join_table_mutation; + +SELECT name FROM join_table_mutation WHERE id = 10; + +INSERT INTO join_table_mutation VALUES (10, 'm10'); + +SELECT name FROM join_table_mutation WHERE id = 10; + +ALTER TABLE join_table_mutation DELETE WHERE id % 2 = 0; + +ALTER TABLE join_table_mutation UPDATE name = 'some' WHERE 1; -- {serverError NOT_IMPLEMENTED} + +SELECT count() FROM join_table_mutation; + +ALTER TABLE join_table_mutation DELETE WHERE name IN ('1', '2', '3', '4'); + +SELECT count() FROM join_table_mutation; + +ALTER TABLE join_table_mutation DELETE WHERE 1; + +SELECT count() FROM join_table_mutation; + +DROP TABLE join_table_mutation; diff --git a/parser/testdata/01821_table_comment/ast.json b/parser/testdata/01821_table_comment/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01821_table_comment/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01821_table_comment/metadata.json b/parser/testdata/01821_table_comment/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01821_table_comment/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01821_table_comment/query.sql b/parser/testdata/01821_table_comment/query.sql new file mode 100644 index 000000000..e97871e9d --- /dev/null +++ b/parser/testdata/01821_table_comment/query.sql @@ -0,0 +1,65 @@ +-- Tags: no-parallel, no-fasttest, use-rocksdb + +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS t3; + +CREATE TABLE t1 +( + `n` Int8 +) +ENGINE = Memory +COMMENT 'this is a temporary table'; + +CREATE TABLE t2 +( + `n` Int8 +) +ENGINE = MergeTree +ORDER BY n +COMMENT 'this is a MergeTree table'; + +CREATE TABLE t3 +( + `n` Int8 +) +ENGINE = Log +COMMENT 'this is a Log table'; + +CREATE TABLE t4 +( + `n` Int8 +) +ENGINE = Kafka +SETTINGS + kafka_broker_list = 'localhost:10000', + kafka_topic_list = 'test', + kafka_group_name = 'test', + kafka_format = 'JSONEachRow' +COMMENT 'this is a Kafka table'; + +CREATE TABLE t5 +( + `n` Int8 +) +ENGINE = EmbeddedRocksDB +PRIMARY KEY n +COMMENT 'this is a EmbeddedRocksDB table'; + +CREATE TABLE t6 +( + `n` Int8 +) +ENGINE = Executable('script.py', TabSeparated) +COMMENT 'this is a Executable table'; + +SELECT + name, + comment +FROM system.tables +WHERE name IN ('t1', 't2', 't3', 't4', 't5', 't6') + AND database = currentDatabase() order by name; + +SHOW CREATE TABLE t1; + +DROP TABLE t1, t2, t3, t4, t5, t6; diff --git a/parser/testdata/01821_to_date_time_ubsan/ast.json b/parser/testdata/01821_to_date_time_ubsan/ast.json new file mode 100644 index 000000000..f7de763c0 --- /dev/null +++ b/parser/testdata/01821_to_date_time_ubsan/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDateTime (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal '9223372036854775806'" + }, + { + "explain": " Literal UInt64_7" + }, + { + "explain": " Literal 'Asia\/Istanbul'" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.00109063, + "rows_read": 9, + "bytes_read": 343 + } +} diff --git a/parser/testdata/01821_to_date_time_ubsan/metadata.json b/parser/testdata/01821_to_date_time_ubsan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01821_to_date_time_ubsan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01821_to_date_time_ubsan/query.sql b/parser/testdata/01821_to_date_time_ubsan/query.sql new file mode 100644 index 000000000..5ec767fe4 --- /dev/null +++ b/parser/testdata/01821_to_date_time_ubsan/query.sql @@ -0,0 +1,2 @@ +SELECT toDateTime('9223372036854775806', 7, 'Asia/Istanbul'); +SELECT toDateTime('9223372036854775806', 8, 'Asia/Istanbul'); diff --git a/parser/testdata/01822_short_circuit/ast.json b/parser/testdata/01822_short_circuit/ast.json new file mode 100644 index 000000000..8a11d0863 --- /dev/null +++ b/parser/testdata/01822_short_circuit/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001476887, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01822_short_circuit/metadata.json b/parser/testdata/01822_short_circuit/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01822_short_circuit/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01822_short_circuit/query.sql b/parser/testdata/01822_short_circuit/query.sql new file mode 100644 index 000000000..c7379d210 --- /dev/null +++ b/parser/testdata/01822_short_circuit/query.sql @@ -0,0 +1,157 @@ +set short_circuit_function_evaluation = 'enable'; +set convert_query_to_cnf = 0; + +select if(number > 0, intDiv(number + 100, number), throwIf(number)) from numbers(10); +select multiIf(number == 0, 0, number == 1, intDiv(1, number), number == 2, intDiv(1, number - 1), number == 3, intDiv(1, number - 2), intDiv(1, number - 3)) from numbers(10); +select number != 0 and intDiv(1, number) == 0 and number != 2 and intDiv(1, number - 2) == 0 from numbers(10); +select number == 0 or intDiv(1, number) != 0 or number == 2 or intDiv(1, number - 2) != 0 from numbers(10); + +select count() from (select if(number >= 0, number, sleep(1)) from numbers(10000000)); + + +select if(number % 5 == 0, toInt8OrZero(toString(number)), toInt8OrZero(toString(number + 1))) from numbers(20); +select if(number % 5 == 0, toInt8OrZero(toString(number)), Null) from numbers(20); +select if(number % 5 == 0, Null, toInt8OrZero(toString(number))) from numbers(20); +select if(number % 5, Null, toInt8OrZero(toString(number))) from numbers(20); + +select if(number % 5 == 0, toUInt8OrZero(toString(number)), toUInt8OrZero(toString(number + 1))) from numbers(20); +select if(number % 5 == 0, toUInt8OrZero(toString(number)), Null) from numbers(20); +select if(number % 5 == 0, Null, toUInt8OrZero(toString(number))) from numbers(20); +select if(number % 5, Null, toUInt8OrZero(toString(number))) from numbers(20); + +select if(number % 5 == 0, toInt32OrZero(toString(number)), toInt32OrZero(toString(number + 1))) from numbers(20); +select if(number % 5 == 0, toInt32OrZero(toString(number)), Null) from numbers(20); +select if(number % 5 == 0, Null, toInt32OrZero(toString(number))) from numbers(20); +select if(number % 5, Null, toInt32OrZero(toString(number))) from numbers(20); + +select if(number % 5 == 0, toUInt32OrZero(toString(number)), toUInt32OrZero(toString(number + 1))) from numbers(20); +select if(number % 5 == 0, toUInt32OrZero(toString(number)), Null) from numbers(20); +select if(number % 5 == 0, Null, toUInt32OrZero(toString(number))) from numbers(20); +select if(number % 5, Null, toUInt32OrZero(toString(number))) from numbers(20); + +select if(number % 5 == 0, toInt64OrZero(toString(number)), toInt64OrZero(toString(number + 1))) from numbers(20); +select if(number % 5 == 0, toInt64OrZero(toString(number)), Null) from numbers(20); +select if(number % 5 == 0, Null, toInt64OrZero(toString(number))) from numbers(20); +select if(number % 5, Null, toInt64OrZero(toString(number))) from numbers(20); + +select if(number % 5 == 0, toUInt64OrZero(toString(number)), toUInt64OrZero(toString(number + 1))) from numbers(20); +select if(number % 5 == 0, toUInt64OrZero(toString(number)), Null) from numbers(20); +select if(number % 5 == 0, Null, toUInt64OrZero(toString(number))) from numbers(20); +select if(number % 5, Null, toUInt64OrZero(toString(number))) from numbers(20); + +select if(number % 5 == 0, toInt128OrZero(toString(number)), toInt128OrZero(toString(number + 1))) from numbers(20); +select if(number % 5 == 0, toInt128OrZero(toString(number)), Null) from numbers(20); +select if(number % 5 == 0, Null, toInt128OrZero(toString(number))) from numbers(20); +select if(number % 5, Null, toInt128OrZero(toString(number))) from numbers(20); + +select if(number % 5 == 0, toUInt128OrZero(toString(number)), toUInt128OrZero(toString(number + 1))) from numbers(20); +select if(number % 5 == 0, toUInt128OrZero(toString(number)), Null) from numbers(20); +select if(number % 5 == 0, Null, toUInt128OrZero(toString(number))) from numbers(20); +select if(number % 5, Null, toUInt128OrZero(toString(number))) from numbers(20); + +select if(number % 5 == 0, toInt256OrZero(toString(number)), toInt256OrZero(toString(number + 1))) from numbers(20); +select if(number % 5 == 0, toInt256OrZero(toString(number)), Null) from numbers(20); +select if(number % 5 == 0, Null, toInt256OrZero(toString(number))) from numbers(20); +select if(number % 5, Null, toInt256OrZero(toString(number))) from numbers(20); + +select if(number % 5 == 0, toUInt256OrZero(toString(number)), toUInt256OrZero(toString(number + 1))) from numbers(20); +select if(number % 5 == 0, toUInt256OrZero(toString(number)), Null) from numbers(20); +select if(number % 5 == 0, Null, toUInt256OrZero(toString(number))) from numbers(20); +select if(number % 5, Null, toUInt256OrZero(toString(number))) from numbers(20); + +select if(number % 5 == 0, toFloat32OrZero(toString(number)), toFloat32OrZero(toString(number + 1))) from numbers(20); +select if(number % 5 == 0, toFloat32OrZero(toString(number)), Null) from numbers(20); +select if(number % 5 == 0, Null, toFloat32OrZero(toString(number))) from numbers(20); +select if(number % 5, Null, toFloat32OrZero(toString(number))) from numbers(20); + +select if(number % 5 == 0, toFloat64OrZero(toString(number)), toFloat64OrZero(toString(number + 1))) from numbers(20); +select if(number % 5 == 0, toFloat64OrZero(toString(number)), Null) from numbers(20); +select if(number % 5 == 0, Null, toFloat64OrZero(toString(number))) from numbers(20); +select if(number % 5, Null, toFloat64OrZero(toString(number))) from numbers(20); + +select if(number % 5 == 0, repeat(toString(number), 2), repeat(toString(number + 1), 2)) from numbers(20); +select if(number % 5 == 0, repeat(toString(number), 2), Null) from numbers(20); +select if(number % 5 == 0, Null, repeat(toString(number), 2)) from numbers(20); +select if(number % 5, Null, repeat(toString(number), 2)) from numbers(20); + +select if(number % 5 == 0, toFixedString(toString(number + 10), 2), toFixedString(toString(number + 11), 2)) from numbers(20); +select if(number % 5 == 0, toFixedString(toString(number + 10), 2), Null) from numbers(20); +select if(number % 5 == 0, Null, toFixedString(toString(number + 10), 2)) from numbers(20); +select if(number % 5, Null, toFixedString(toString(number + 10), 2)) from numbers(20); + +select if(number % 5 == 0, toDateOrZero(toString(number)), toDateOrZero(toString(number + 1))) from numbers(20); +select if(number % 5 == 0, toDateOrZero(toString(number)), Null) from numbers(20); +select if(number % 5 == 0, Null, toDateOrZero(toString(number))) from numbers(20); +select if(number % 5, Null, toDateOrZero(toString(number))) from numbers(20); + +select if(number % 5 == 0, toDateTimeOrZero(toString(number * 10000), 'UTC'), toDateTimeOrZero(toString((number + 1) * 10000), 'UTC')) from numbers(20); +select if(number % 5 == 0, toDateTimeOrZero(toString(number * 10000), 'UTC'), Null) from numbers(20); +select if(number % 5 == 0, Null, toDateTimeOrZero(toString(number * 10000), 'UTC')) from numbers(20); +select if(number % 5, Null, toDateTimeOrZero(toString(number * 10000), 'UTC')) from numbers(20); + +select if(number % 5 == 0, toDecimal32OrZero(toString(number), 5), toDecimal32OrZero(toString(number + 1), 5)) from numbers(20); +select if(number % 5 == 0, toDecimal32OrZero(toString(number), 5), Null) from numbers(20); +select if(number % 5 == 0, Null, toDecimal32OrZero(toString(number), 5)) from numbers(20); +select if(number % 5, Null, toDecimal32OrZero(toString(number), 5)) from numbers(20); + +select if(number % 5 == 0, toDecimal64OrZero(toString(number), 5), toDecimal64OrZero(toString(number + 1), 5)) from numbers(20); +select if(number % 5 == 0, toDecimal64OrZero(toString(number), 5), Null) from numbers(20); +select if(number % 5 == 0, Null, toDecimal64OrZero(toString(number), 5)) from numbers(20); +select if(number % 5, Null, toDecimal64OrZero(toString(number), 5)) from numbers(20); + +select if(number % 5 == 0, toDecimal128OrZero(toString(number), 5), toDecimal128OrZero(toString(number + 1), 5)) from numbers(20); +select if(number % 5 == 0, toDecimal128OrZero(toString(number), 5), Null) from numbers(20); +select if(number % 5 == 0, Null, toDecimal128OrZero(toString(number), 5)) from numbers(20); +select if(number % 5, Null, toDecimal128OrZero(toString(number), 5)) from numbers(20); + +select if(number % 5 == 0, toDecimal256OrZero(toString(number), 5), toDecimal256OrZero(toString(number + 1), 5)) from numbers(20); +select if(number % 5 == 0, toDecimal256OrZero(toString(number), 5), Null) from numbers(20); +select if(number % 5 == 0, Null, toDecimal256OrZero(toString(number), 5)) from numbers(20); +select if(number % 5, Null, toDecimal256OrZero(toString(number), 5)) from numbers(20); + +select if(number % 5 == 0, range(number), range(number + 1)) from numbers(20); +select if(number % 5 == 0, replicate(toString(number), range(number)), replicate(toString(number), range(number + 1))) from numbers(20); + +select number > 0 and 5 and intDiv(100, number) from numbers(5); +select number > 0 and Null and intDiv(100, number) from numbers(5); +select number == 0 or 5 or intDiv(100, number) from numbers(5); +select multiIf(number % 2 != 0, intDiv(10, number % 2), 5, intDiv(10, 1 - number % 2), intDiv(10, number)) from numbers(5); + +select if(number != 0, 5 * (1 + intDiv(100, number)), toInt32(exp(log(throwIf(number) + 10)))) from numbers(5); +select if(number % 2, 5 * (1 + intDiv(100, number + 1)), 3 + 10 * intDiv(100, intDiv(100, number + 1))) from numbers(10); + +select sum(number) FROM numbers(10) WHERE number != 0 and 3 % number and number != 1 and intDiv(1, number - 1) > 0; +select multiIf(0, 1, intDiv(number % 2, 1), 2, 0, 3, 1, number + 10, 2) from numbers(10); + +select toTypeName(toString(number)) from numbers(5); +select toColumnTypeName(toString(number)) from numbers(5); + +select toTypeName(toInt64OrZero(toString(number))) from numbers(5); +select toColumnTypeName(toInt64OrZero(toString(number))) from numbers(5); + +select toTypeName(toDecimal32OrZero(toString(number), 5)) from numbers(5); +select toColumnTypeName(toDecimal32OrZero(toString(number), 5)) from numbers(5); + +select if(if(number > 0, intDiv(42, number), 0), intDiv(42, number), 8) from numbers(5); +select if(number > 0, intDiv(42, number), 0), if(number = 0, 0, intDiv(42, number)) from numbers(5); + +select Null or isNull(intDiv(number, 1)) from numbers(5); + +set compile_expressions = 1; +select if(number > 0, intDiv(42, number), 1) from numbers(5); +select if(number > 0, intDiv(42, number), 1) from numbers(5); +select if(number > 0, intDiv(42, number), 1) from numbers(5); +select if(number > 0, intDiv(42, number), 1) from numbers(5); + +select if(number > 0, 42 / toDecimal32(number, 2), 0) from numbers(5); +select if(number = 0, 0, toDecimal32(42, 2) / number) from numbers(5); +select if(isNull(x), Null, 42 / x) from (select CAST(materialize(Null), 'Nullable(Decimal32(2))') as x); +select if(isNull(x), Null, x / 0) from (select CAST(materialize(Null), 'Nullable(Decimal32(2))') as x); + +select if(isNull(x), Null, intDiv(42, x)) from (select CAST(materialize(Null), 'Nullable(Int64)') as x); + +select number % 2 and toLowCardinality(number) from numbers(5); +select number % 2 or toLowCardinality(number) from numbers(5); +select if(toLowCardinality(number) % 2, number, number + 1) from numbers(10); +select multiIf(toLowCardinality(number) % 2, number, number + 1) from numbers(10); + diff --git a/parser/testdata/01822_union_and_constans_error/ast.json b/parser/testdata/01822_union_and_constans_error/ast.json new file mode 100644 index 000000000..35c2f89d2 --- /dev/null +++ b/parser/testdata/01822_union_and_constans_error/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t0 (children 1)" + }, + { + "explain": " Identifier t0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001413513, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/01822_union_and_constans_error/metadata.json b/parser/testdata/01822_union_and_constans_error/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01822_union_and_constans_error/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01822_union_and_constans_error/query.sql b/parser/testdata/01822_union_and_constans_error/query.sql new file mode 100644 index 000000000..9017e8769 --- /dev/null +++ b/parser/testdata/01822_union_and_constans_error/query.sql @@ -0,0 +1,20 @@ +drop table if exists t0; +CREATE TABLE t0 (c0 String) ENGINE = Log(); + +SELECT isNull(t0.c0) OR COUNT('\n?pVa') +FROM t0 +GROUP BY t0.c0 +HAVING isNull(t0.c0) +UNION ALL +SELECT isNull(t0.c0) OR COUNT('\n?pVa') +FROM t0 +GROUP BY t0.c0 +HAVING NOT isNull(t0.c0) +UNION ALL +SELECT isNull(t0.c0) OR COUNT('\n?pVa') +FROM t0 +GROUP BY t0.c0 +HAVING isNull(isNull(t0.c0)) +SETTINGS aggregate_functions_null_for_empty = 1, enable_optimize_predicate_expression = 0 format Null; + +drop table if exists t0; diff --git a/parser/testdata/01823_array_low_cardinality_KuliginStepan/ast.json b/parser/testdata/01823_array_low_cardinality_KuliginStepan/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01823_array_low_cardinality_KuliginStepan/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01823_array_low_cardinality_KuliginStepan/metadata.json b/parser/testdata/01823_array_low_cardinality_KuliginStepan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01823_array_low_cardinality_KuliginStepan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01823_array_low_cardinality_KuliginStepan/query.sql b/parser/testdata/01823_array_low_cardinality_KuliginStepan/query.sql new file mode 100644 index 000000000..528a3b464 --- /dev/null +++ b/parser/testdata/01823_array_low_cardinality_KuliginStepan/query.sql @@ -0,0 +1,7 @@ +create temporary table test ( + arr Array(Array(LowCardinality(String))) +); + +insert into test(arr) values ([['a'], ['b', 'c']]); + +select arrayFilter(x -> 1, arr) from test; diff --git a/parser/testdata/01824_move_to_prewhere_many_columns/ast.json b/parser/testdata/01824_move_to_prewhere_many_columns/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01824_move_to_prewhere_many_columns/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01824_move_to_prewhere_many_columns/metadata.json b/parser/testdata/01824_move_to_prewhere_many_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01824_move_to_prewhere_many_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01824_move_to_prewhere_many_columns/query.sql b/parser/testdata/01824_move_to_prewhere_many_columns/query.sql new file mode 100644 index 000000000..6ad804ac1 --- /dev/null +++ b/parser/testdata/01824_move_to_prewhere_many_columns/query.sql @@ -0,0 +1,44 @@ +-- Tags: no-random-merge-tree-settings + +SET optimize_move_to_prewhere = 1; +SET convert_query_to_cnf = 0; +SET move_all_conditions_to_prewhere = 0; + +DROP TABLE IF EXISTS t_move_to_prewhere; + +CREATE TABLE t_move_to_prewhere (id UInt32, a UInt8, b UInt8, c UInt8, fat_string String) +ENGINE = MergeTree ORDER BY id PARTITION BY id +SETTINGS min_rows_for_wide_part = 100, min_bytes_for_wide_part = 0; + +INSERT INTO t_move_to_prewhere SELECT 1, number % 2 = 0, number % 3 = 0, number % 5 = 0, repeat('a', 1000) FROM numbers(1000); +INSERT INTO t_move_to_prewhere SELECT 2, number % 2 = 0, number % 3 = 0, number % 5 = 0, repeat('a', 1000) FROM numbers(10); + +SELECT partition, part_type FROM system.parts +WHERE table = 't_move_to_prewhere' AND database = currentDatabase() +ORDER BY partition; + +SELECT count() FROM t_move_to_prewhere WHERE a AND b AND c AND NOT ignore(fat_string); +SELECT replaceRegexpAll(explain, '__table1\.', '') FROM (EXPLAIN actions=1 SELECT count() FROM t_move_to_prewhere WHERE a AND b AND c AND NOT ignore(fat_string)) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter%'; + +DROP TABLE IF EXISTS t_move_to_prewhere; + +-- With only compact parts, we cannot move 3 conditions to PREWHERE, +-- because we don't know sizes and we can use only number of columns in conditions. +-- Sometimes moving a lot of columns to prewhere may be harmful. + +CREATE TABLE t_move_to_prewhere (id UInt32, a UInt8, b UInt8, c UInt8, fat_string String) +ENGINE = MergeTree ORDER BY id PARTITION BY id +SETTINGS min_rows_for_wide_part = 10000, min_bytes_for_wide_part = 100000000; + +INSERT INTO t_move_to_prewhere SELECT 1, number % 2 = 0, number % 3 = 0, number % 5 = 0, repeat('a', 1000) FROM numbers(1000); +INSERT INTO t_move_to_prewhere SELECT 2, number % 2 = 0, number % 3 = 0, number % 5 = 0, repeat('a', 1000) FROM numbers(10); + +SELECT partition, part_type FROM system.parts +WHERE table = 't_move_to_prewhere' AND database = currentDatabase() +ORDER BY partition; + +SELECT count() FROM t_move_to_prewhere WHERE a AND b AND c AND NOT ignore(fat_string); +EXPLAIN SYNTAX SELECT count() FROM t_move_to_prewhere WHERE a AND b AND c AND NOT ignore(fat_string); +SELECT replaceRegexpAll(explain, '__table1\.', '') FROM (EXPLAIN actions=1 SELECT count() FROM t_move_to_prewhere WHERE a AND b AND c AND NOT ignore(fat_string)) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter%'; + +DROP TABLE IF EXISTS t_move_to_prewhere; diff --git a/parser/testdata/01824_prefer_global_in_and_join/ast.json b/parser/testdata/01824_prefer_global_in_and_join/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01824_prefer_global_in_and_join/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01824_prefer_global_in_and_join/metadata.json b/parser/testdata/01824_prefer_global_in_and_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01824_prefer_global_in_and_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01824_prefer_global_in_and_join/query.sql b/parser/testdata/01824_prefer_global_in_and_join/query.sql new file mode 100644 index 000000000..01ef768b4 --- /dev/null +++ b/parser/testdata/01824_prefer_global_in_and_join/query.sql @@ -0,0 +1,66 @@ +-- Tags: global, no-parallel + +-- { echo } +CREATE DATABASE IF NOT EXISTS test_01824; +USE test_01824; + +DROP TABLE IF EXISTS t1_shard; +DROP TABLE IF EXISTS t2_shard; +DROP TABLE IF EXISTS t1_distr; +DROP TABLE IF EXISTS t2_distr; + +create table t1_shard (id Int32) engine MergeTree order by id; +create table t2_shard (id Int32) engine MergeTree order by id; + +create table t1_distr as t1_shard engine Distributed(test_cluster_two_shards_localhost, test_01824, t1_shard, id); +create table t2_distr as t2_shard engine Distributed(test_cluster_two_shards_localhost, test_01824, t2_shard, id); + +insert into t1_shard values (42); +insert into t2_shard values (42); + +SET prefer_global_in_and_join = 1; + +select d0.id from t1_distr d0 +join ( + select d1.id + from t1_distr as d1 + inner join t2_distr as d2 on d1.id = d2.id + where d1.id > 0 + order by d1.id +) s0 using id; + +explain syntax select d0.id from t1_distr d0 +join ( + select d1.id + from t1_distr as d1 + inner join t2_distr as d2 on d1.id = d2.id + where d1.id > 0 + order by d1.id +) s0 using id; + +-- Force using local mode +set distributed_product_mode = 'local'; + +select d0.id from t1_distr d0 +join ( + select d1.id + from t1_distr as d1 + inner join t2_distr as d2 on d1.id = d2.id + where d1.id > 0 + order by d1.id +) s0 using id; + +explain syntax select d0.id from t1_distr d0 +join ( + select d1.id + from t1_distr as d1 + inner join t2_distr as d2 on d1.id = d2.id + where d1.id > 0 + order by d1.id +) s0 using id; + +DROP TABLE t1_shard; +DROP TABLE t2_shard; +DROP TABLE t1_distr; +DROP TABLE t2_distr; +DROP DATABASE test_01824; diff --git a/parser/testdata/01825_new_type_json_10/ast.json b/parser/testdata/01825_new_type_json_10/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01825_new_type_json_10/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01825_new_type_json_10/metadata.json b/parser/testdata/01825_new_type_json_10/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01825_new_type_json_10/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01825_new_type_json_10/query.sql b/parser/testdata/01825_new_type_json_10/query.sql new file mode 100644 index 000000000..b4d22df3f --- /dev/null +++ b/parser/testdata/01825_new_type_json_10/query.sql @@ -0,0 +1,18 @@ +-- Tags: no-fasttest + +SET enable_json_type = 1; +SET allow_suspicious_types_in_order_by = 1; + +DROP TABLE IF EXISTS t_json_10; +CREATE TABLE t_json_10 (o JSON) ENGINE = Memory; + +INSERT INTO t_json_10 FORMAT JSONAsObject {"a": {"b": 1, "c": [{"d": 10, "e": [31]}, {"d": 20, "e": [63, 127]}]}} {"a": {"b": 2, "c": []}} + +INSERT INTO t_json_10 FORMAT JSONAsObject {"a": {"b": 3, "c": [{"f": 20, "e": [32]}, {"f": 30, "e": [64, 128]}]}} {"a": {"b": 4, "c": []}} + +SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(o)) as path FROM t_json_10 order by path; +SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin(o.a.c.:`Array(JSON)`))) as path FROM t_json_10 order by path; +SELECT o FROM t_json_10 ORDER BY o.a.b FORMAT JSONEachRow; +SELECT o.a.b, o.a.c.:`Array(JSON)`.d, o.a.c.:`Array(JSON)`.e, o.a.c.:`Array(JSON)`.f FROM t_json_10 ORDER BY o.a.b; + +DROP TABLE t_json_10; diff --git a/parser/testdata/01825_new_type_json_18/ast.json b/parser/testdata/01825_new_type_json_18/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01825_new_type_json_18/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01825_new_type_json_18/metadata.json b/parser/testdata/01825_new_type_json_18/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01825_new_type_json_18/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01825_new_type_json_18/query.sql b/parser/testdata/01825_new_type_json_18/query.sql new file mode 100644 index 000000000..97a5e4670 --- /dev/null +++ b/parser/testdata/01825_new_type_json_18/query.sql @@ -0,0 +1,18 @@ +-- Tags: no-fasttest + +SET enable_json_type = 1; + +DROP TABLE IF EXISTS t_json_2; + +CREATE TABLE t_json_2(id UInt64, data JSON) +ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO t_json_2 FORMAT JSONEachRow {"id": 1, "data" : {"k1": 1}}; + +SELECT id, data, JSONAllPathsWithTypes(data) FROM t_json_2 ORDER BY id; + +TRUNCATE TABLE t_json_2; + +INSERT INTO t_json_2 FORMAT JSONEachRow {"id": 1, "data" : {"k1": [1, 2]}}; + +SELECT id, data, JSONAllPathsWithTypes(data) FROM t_json_2 ORDER BY id; diff --git a/parser/testdata/01825_new_type_json_2/ast.json b/parser/testdata/01825_new_type_json_2/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01825_new_type_json_2/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01825_new_type_json_2/metadata.json b/parser/testdata/01825_new_type_json_2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01825_new_type_json_2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01825_new_type_json_2/query.sql b/parser/testdata/01825_new_type_json_2/query.sql new file mode 100644 index 000000000..b2aa363b6 --- /dev/null +++ b/parser/testdata/01825_new_type_json_2/query.sql @@ -0,0 +1,42 @@ +-- Tags: no-fasttest + +SET enable_json_type = 1; +SET input_format_json_infer_array_of_dynamic_from_array_of_different_types = 0; + +DROP TABLE IF EXISTS t_json_2; + +CREATE TABLE t_json_2(id UInt64, data JSON) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_01825_2/t_json_2', 'r1') ORDER BY tuple(); + +INSERT INTO t_json_2 FORMAT JSONEachRow {"id": 1, "data": {"k1": 1, "k2" : 2}} {"id": 2, "data": {"k2": 3, "k3" : 4}}; + +SELECT id, data, JSONAllPathsWithTypes(data) FROM t_json_2 ORDER BY id; +SELECT id, data.k1, data.k2, data.k3 FROM t_json_2 ORDER BY id; + +INSERT INTO t_json_2 FORMAT JSONEachRow {"id": 3, "data": {"k3" : 10}} {"id": 4, "data": {"k2": 5, "k3" : "str"}}; + +SELECT id, data, JSONAllPathsWithTypes(data) FROM t_json_2 ORDER BY id; +SELECT id, data.k1, data.k2, data.k3 FROM t_json_2 ORDER BY id; + +SELECT '============'; +TRUNCATE TABLE t_json_2; + +INSERT INTO TABLE t_json_2 FORMAT JSONEachRow {"id": 1, "data": {"k1" : [1, 2, 3.3]}}; + +SELECT id, data, JSONAllPathsWithTypes(data) FROM t_json_2 ORDER BY id; +SELECT id, data.k1 FROM t_json_2 ORDEr BY id; + +INSERT INTO TABLE t_json_2 FORMAT JSONEachRow {"id": 2, "data": {"k1" : ["a", 4, "b"]}}; + +SELECT id, data, JSONAllPathsWithTypes(data) FROM t_json_2 ORDER BY id; +SELECT id, data.k1 FROM t_json_2 ORDER BY id; + +SELECT '============'; +TRUNCATE TABLE t_json_2; + +INSERT INTO TABLE t_json_2 FORMAT JSONEachRow {"id": 1, "data": {"k1" : [{"k2" : 11}, {"k3" : 22}]}} {"id": 2, "data": {"k1" : [{"k3" : 33}, {"k4" : 44}, {"k3" : 55, "k4" : 66}]}}; + +SELECT id, data, JSONAllPathsWithTypes(data) FROM t_json_2 ORDER BY id; +SELECT id, data.k1.k2, data.k1.k3, data.k1.k4 FROM t_json_2 ORDER BY id; + +DROP TABLE t_json_2; diff --git a/parser/testdata/01825_new_type_json_9/ast.json b/parser/testdata/01825_new_type_json_9/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01825_new_type_json_9/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01825_new_type_json_9/metadata.json b/parser/testdata/01825_new_type_json_9/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01825_new_type_json_9/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01825_new_type_json_9/query.sql b/parser/testdata/01825_new_type_json_9/query.sql new file mode 100644 index 000000000..57e9c557e --- /dev/null +++ b/parser/testdata/01825_new_type_json_9/query.sql @@ -0,0 +1,17 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS t_json; + +SET enable_json_type = 1; + +CREATE TABLE t_json(id UInt64, obj JSON) ENGINE = MergeTree ORDER BY id; + +INSERT INTO t_json format JSONEachRow {"id": 1, "obj": {"foo": 1, "k1": 2}}; + +INSERT INTO t_json format JSONEachRow {"id": 2, "obj": {"foo": 1, "k2": 2}}; + +OPTIMIZE TABLE t_json FINAL; + +SELECT distinct arrayJoin(JSONAllPathsWithTypes(obj)) as path from t_json order by path; + +DROP TABLE IF EXISTS t_json; diff --git a/parser/testdata/01825_new_type_json_bools/ast.json b/parser/testdata/01825_new_type_json_bools/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01825_new_type_json_bools/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01825_new_type_json_bools/metadata.json b/parser/testdata/01825_new_type_json_bools/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01825_new_type_json_bools/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01825_new_type_json_bools/query.sql b/parser/testdata/01825_new_type_json_bools/query.sql new file mode 100644 index 000000000..89c3888cc --- /dev/null +++ b/parser/testdata/01825_new_type_json_bools/query.sql @@ -0,0 +1,10 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS t_json_bools; +SET enable_json_type = 1; + +CREATE TABLE t_json_bools (data JSON) ENGINE = Memory; +INSERT INTO t_json_bools VALUES ('{"k1": true, "k2": false}'); +SELECT data, JSONAllPathsWithTypes(data) FROM t_json_bools; + +DROP TABLE t_json_bools; diff --git a/parser/testdata/01825_new_type_json_distributed/ast.json b/parser/testdata/01825_new_type_json_distributed/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01825_new_type_json_distributed/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01825_new_type_json_distributed/metadata.json b/parser/testdata/01825_new_type_json_distributed/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01825_new_type_json_distributed/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01825_new_type_json_distributed/query.sql b/parser/testdata/01825_new_type_json_distributed/query.sql new file mode 100644 index 000000000..9b8ad5b5e --- /dev/null +++ b/parser/testdata/01825_new_type_json_distributed/query.sql @@ -0,0 +1,18 @@ +-- Tags: no-fasttest + +SET enable_json_type = 1; + +DROP TABLE IF EXISTS t_json_local; +DROP TABLE IF EXISTS t_json_dist; + +CREATE TABLE t_json_local(data JSON) ENGINE = MergeTree ORDER BY tuple(); +CREATE TABLE t_json_dist AS t_json_local ENGINE = Distributed(test_cluster_two_shards, currentDatabase(), t_json_local); + +INSERT INTO t_json_local FORMAT JSONAsObject {"k1": 2, "k2": {"k3": "qqq", "k4": [44, 55]}} +; + +SELECT data, JSONAllPathsWithTypes(data) FROM t_json_dist; +SELECT data.k1, data.k2.k3, data.k2.k4 FROM t_json_dist; + +DROP TABLE IF EXISTS t_json_local; +DROP TABLE IF EXISTS t_json_dist; diff --git a/parser/testdata/01825_new_type_json_ephemeral/ast.json b/parser/testdata/01825_new_type_json_ephemeral/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01825_new_type_json_ephemeral/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01825_new_type_json_ephemeral/metadata.json b/parser/testdata/01825_new_type_json_ephemeral/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01825_new_type_json_ephemeral/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01825_new_type_json_ephemeral/query.sql b/parser/testdata/01825_new_type_json_ephemeral/query.sql new file mode 100644 index 000000000..16f979d1b --- /dev/null +++ b/parser/testdata/01825_new_type_json_ephemeral/query.sql @@ -0,0 +1,18 @@ + +SET enable_json_type = 1; + +DROP TABLE IF EXISTS t_github_json; + +CREATE table t_github_json +( + event_type LowCardinality(String) DEFAULT JSONExtractString(message_raw, 'type'), + repo_name LowCardinality(String) DEFAULT JSONExtractString(message_raw, 'repo', 'name'), + message JSON DEFAULT empty(message_raw) ? '{}' : message_raw, + message_raw String EPHEMERAL +) ENGINE = MergeTree ORDER BY (event_type, repo_name); + +INSERT INTO t_github_json (message_raw) FORMAT JSONEachRow {"message_raw": "{\"type\":\"PushEvent\", \"created_at\": \"2022-01-04 07:00:00\", \"actor\":{\"avatar_url\":\"https://avatars.githubusercontent.com/u/123213213?\",\"display_login\":\"github-actions\",\"gravatar_id\":\"\",\"id\":123123123,\"login\":\"github-actions[bot]\",\"url\":\"https://api.github.com/users/github-actions[bot]\"},\"repo\":{\"id\":1001001010101,\"name\":\"some-repo\",\"url\":\"https://api.github.com/repos/some-repo\"}}"} + +SELECT * FROM t_github_json ORDER BY event_type, repo_name; + +DROP TABLE t_github_json; diff --git a/parser/testdata/01825_new_type_json_in_array/ast.json b/parser/testdata/01825_new_type_json_in_array/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01825_new_type_json_in_array/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01825_new_type_json_in_array/metadata.json b/parser/testdata/01825_new_type_json_in_array/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01825_new_type_json_in_array/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01825_new_type_json_in_array/query.sql b/parser/testdata/01825_new_type_json_in_array/query.sql new file mode 100644 index 000000000..b6597a304 --- /dev/null +++ b/parser/testdata/01825_new_type_json_in_array/query.sql @@ -0,0 +1,43 @@ +-- Tags: no-fasttest + +SET enable_json_type = 1; +SET enable_analyzer = 1; +SET allow_suspicious_types_in_order_by = 1; +SET allow_suspicious_types_in_group_by = 1; +SET output_format_native_write_json_as_string = 0; + +DROP TABLE IF EXISTS t_json_array; + +CREATE TABLE t_json_array (id UInt32, arr Array(JSON)) ENGINE = MergeTree ORDER BY id; + +INSERT INTO t_json_array FORMAT JSONEachRow {"id": 1, "arr": [{"k1": 1, "k2": {"k3": 2, "k4": 3}}, {"k1": 2, "k2": {"k5": "foo"}}]} + +INSERT INTO t_json_array FORMAT JSONEachRow {"id": 2, "arr": [{"k1": 3, "k2": {"k3": 4, "k4": 5}}]} + + +SELECT * FROM t_json_array ORDER BY id FORMAT JSONEachRow; +SELECT id, arr.k1, arr.k2.k3, arr.k2.k4, arr.k2.k5 FROM t_json_array ORDER BY id; +SELECT arr FROM t_json_array ARRAY JOIN arr ORDER BY arr.k1 FORMAT JSONEachRow; +SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin(arr))) as path FROM t_json_array order by path; + +TRUNCATE TABLE t_json_array; + +INSERT INTO t_json_array FORMAT JSONEachRow {"id": 1, "arr": [{"k1": [{"k2": "aaa", "k3": "bbb"}, {"k2": "ccc"}]}]} + +INSERT INTO t_json_array FORMAT JSONEachRow {"id": 2, "arr": [{"k1": [{"k3": "ddd", "k4": 10}, {"k4": 20}], "k5": {"k6": "foo"}}]} + +SELECT * FROM t_json_array ORDER BY id FORMAT JSONEachRow; +SELECT id, arr.k1[].k2, arr.k1[].k3, arr.k1[].k4, arr.k5.k6 FROM t_json_array ORDER BY id; + +SELECT arrayJoin(arrayJoin(arr.k1[])) AS k1 FROM t_json_array ORDER BY toString(k1) FORMAT JSONEachRow; +SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(arrayJoin(arrayJoin(arr.k1[])))) AS path FROM t_json_array order by path; + +SELECT arr.k1 FROM t_json_array GROUP BY arr.k1 ORDER BY toString(arr.k1); + +DROP TABLE t_json_array; + +SELECT * FROM values('arr Array(JSON)', '[\'{"x" : 1}\']') FORMAT JSONEachRow; +SELECT * FROM values('arr Map(String, JSON)', '{\'x\' : \'{"y" : 1}\', \'t\' : \'{"y" : 2}\'}') FORMAT JSONEachRow; +SELECT * FROM values('arr Tuple(Int32, JSON)', '(1, \'{"y" : 1}\')', '(2, \'{"y" : 2}\')') FORMAT JSONEachRow; +SELECT * FROM format(JSONEachRow, '{"arr" : [{"x" : "aaa", "y" : [1,2,3]}]}') FORMAT JSONEachRow; +SELECT * FROM values('arr Array(JSON)', '[\'{"x" : 1}\']') FORMAT JSONEachRow; diff --git a/parser/testdata/01825_new_type_json_insert_select/ast.json b/parser/testdata/01825_new_type_json_insert_select/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01825_new_type_json_insert_select/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01825_new_type_json_insert_select/metadata.json b/parser/testdata/01825_new_type_json_insert_select/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01825_new_type_json_insert_select/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01825_new_type_json_insert_select/query.sql b/parser/testdata/01825_new_type_json_insert_select/query.sql new file mode 100644 index 000000000..54c0f542b --- /dev/null +++ b/parser/testdata/01825_new_type_json_insert_select/query.sql @@ -0,0 +1,76 @@ +-- Tags: no-fasttest + +SET enable_json_type = 1; +SET allow_suspicious_types_in_order_by = 1; +SET allow_suspicious_types_in_order_by = 1; +SET parallel_replicas_local_plan = 1; + +DROP TABLE IF EXISTS type_json_src; +DROP TABLE IF EXISTS type_json_dst; + +CREATE TABLE type_json_src (id UInt32, data JSON) ENGINE = MergeTree ORDER BY id; +CREATE TABLE type_json_dst AS type_json_src; + +INSERT INTO type_json_src VALUES (1, '{"k1": 1, "k2": "foo"}'); + +INSERT INTO type_json_dst SELECT * FROM type_json_src; + +SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(data)) AS path FROM type_json_dst ORDER BY path; +SELECT id, data FROM type_json_dst ORDER BY id; + +INSERT INTO type_json_src VALUES (2, '{"k1": 2, "k2": "bar"}') (3, '{"k1": 3, "k3": "aaa"}'); + +INSERT INTO type_json_dst SELECT * FROM type_json_src WHERE id > 1; + +SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(data)) AS path FROM type_json_dst ORDER BY path; +SELECT id, data FROM type_json_dst ORDER BY id; + +INSERT INTO type_json_dst VALUES (4, '{"arr": [{"k11": 5, "k22": 6}, {"k11": 7, "k33": 8}]}'); + +INSERT INTO type_json_src VALUES (5, '{"arr": "not array"}'); + +INSERT INTO type_json_dst SELECT * FROM type_json_src WHERE id = 5; + +TRUNCATE TABLE type_json_src; +INSERT INTO type_json_src VALUES (6, '{"arr": [{"k22": "str1"}]}'); + +INSERT INTO type_json_dst SELECT * FROM type_json_src WHERE id = 5; + +SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(data)) AS path FROM type_json_dst ORDER BY path; +SELECT id, data FROM type_json_dst ORDER BY id; + +DROP TABLE type_json_src; +DROP TABLE type_json_dst; + +CREATE TABLE type_json_dst (data JSON) ENGINE = MergeTree ORDER BY tuple(); +CREATE TABLE type_json_src (data String) ENGINE = MergeTree ORDER BY tuple(); + +SYSTEM STOP MERGES type_json_src; + +SET max_threads = 1; +SET max_insert_threads = 1; +SET output_format_json_named_tuples_as_objects = 1; + +INSERT INTO type_json_src FORMAT JSONAsString {"k1": 1, "k10": [{"a": "1", "b": "2"}, {"a": "2", "b": "3"}]}; + +INSERT INTO type_json_src FORMAT JSONAsString {"k1": 2, "k10": [{"a": "1", "b": "2", "c": {"k11": "haha"}}]}; + +INSERT INTO type_json_dst SELECT data FROM type_json_src; + +SELECT * FROM type_json_dst ORDER BY data.k1 FORMAT JSONEachRow; +SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(data)) AS path FROM type_json_dst ORDER BY path; + +TRUNCATE TABLE type_json_src; +TRUNCATE TABLE type_json_dst; + +INSERT INTO type_json_src FORMAT JSONAsString {"k1": 2, "k10": [{"a": "1", "b": "2", "c": {"k11": "haha"}}]}; + +INSERT INTO type_json_src FORMAT JSONAsString {"k1": 1, "k10": [{"a": "1", "b": "2"}, {"a": "2", "b": "3"}]}; + +INSERT INTO type_json_dst SELECT data FROM type_json_src; + +SELECT * FROM type_json_dst ORDER BY data.k1 FORMAT JSONEachRow; +SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(data)) AS path FROM type_json_dst ORDER BY path; + +DROP TABLE type_json_src; +DROP TABLE type_json_dst; diff --git a/parser/testdata/01825_new_type_json_missed_values/ast.json b/parser/testdata/01825_new_type_json_missed_values/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01825_new_type_json_missed_values/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01825_new_type_json_missed_values/metadata.json b/parser/testdata/01825_new_type_json_missed_values/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01825_new_type_json_missed_values/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01825_new_type_json_missed_values/query.sql b/parser/testdata/01825_new_type_json_missed_values/query.sql new file mode 100644 index 000000000..5d61a10d4 --- /dev/null +++ b/parser/testdata/01825_new_type_json_missed_values/query.sql @@ -0,0 +1,19 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS t_json; + +SET enable_json_type = 1; + +CREATE TABLE t_json(id UInt64, obj JSON) +ENGINE = MergeTree ORDER BY id +SETTINGS min_bytes_for_wide_part = 0; + +SYSTEM STOP MERGES t_json; + +INSERT INTO t_json SELECT number, '{"k1": 1, "k2": 2}' FROM numbers(1000000); +INSERT INTO t_json VALUES (1000001, '{"foo": 1}'); + +SELECT DISTINCT arrayJoin(JSONAllPathsWithTypes(obj)) AS path FROM t_json ORDER BY path; +SELECT count() FROM t_json WHERE obj.foo IS NOT NULL; + +DROP TABLE IF EXISTS t_json; diff --git a/parser/testdata/01825_new_type_json_mutations/ast.json b/parser/testdata/01825_new_type_json_mutations/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01825_new_type_json_mutations/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01825_new_type_json_mutations/metadata.json b/parser/testdata/01825_new_type_json_mutations/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01825_new_type_json_mutations/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01825_new_type_json_mutations/query.sql b/parser/testdata/01825_new_type_json_mutations/query.sql new file mode 100644 index 000000000..46ceead22 --- /dev/null +++ b/parser/testdata/01825_new_type_json_mutations/query.sql @@ -0,0 +1,21 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS t_json_mutations; + +SET enable_json_type = 1; +SET output_format_json_named_tuples_as_objects = 1; +SET mutations_sync = 2; + +CREATE TABLE t_json_mutations(id UInt32, s String, obj JSON) ENGINE = MergeTree ORDER BY id; + +INSERT INTO t_json_mutations VALUES (1, 'q', '{"k1": 1, "k2": 2, "k3": [{"k4": "aaa"}, {"k4": "bbb"}]}'); +INSERT INTO t_json_mutations VALUES (2, 'w', '{"k1": 3, "k2": 4, "k3": [{"k4": "ccc"}]}'); +INSERT INTO t_json_mutations VALUES (3, 'e', '{"k1": 5, "k2": 6}'); + +SELECT * FROM t_json_mutations ORDER BY id; +ALTER TABLE t_json_mutations DELETE WHERE id = 2; +SELECT * FROM t_json_mutations ORDER BY id; +ALTER TABLE t_json_mutations DROP COLUMN s, DROP COLUMN obj, ADD COLUMN t String DEFAULT 'foo'; +SELECT * FROM t_json_mutations ORDER BY id; + +DROP TABLE t_json_mutations; diff --git a/parser/testdata/01825_new_type_json_order_by/ast.json b/parser/testdata/01825_new_type_json_order_by/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01825_new_type_json_order_by/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01825_new_type_json_order_by/metadata.json b/parser/testdata/01825_new_type_json_order_by/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01825_new_type_json_order_by/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01825_new_type_json_order_by/query.sql b/parser/testdata/01825_new_type_json_order_by/query.sql new file mode 100644 index 000000000..98a179208 --- /dev/null +++ b/parser/testdata/01825_new_type_json_order_by/query.sql @@ -0,0 +1,6 @@ +-- Tags: no-fasttest + +SET enable_json_type = 1; +SELECT dummy FROM system.one ORDER BY materialize('{"k":"v"}'::JSON); +SELECT dummy FROM system.one ORDER BY materialize('{"k":"v"}'::JSON), dummy; +SELECT materialize('{"k":"v"}'::JSON) SETTINGS extremes = 1; diff --git a/parser/testdata/01825_new_type_json_parallel_insert/ast.json b/parser/testdata/01825_new_type_json_parallel_insert/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01825_new_type_json_parallel_insert/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01825_new_type_json_parallel_insert/metadata.json b/parser/testdata/01825_new_type_json_parallel_insert/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01825_new_type_json_parallel_insert/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01825_new_type_json_parallel_insert/query.sql b/parser/testdata/01825_new_type_json_parallel_insert/query.sql new file mode 100644 index 000000000..763b61369 --- /dev/null +++ b/parser/testdata/01825_new_type_json_parallel_insert/query.sql @@ -0,0 +1,10 @@ +-- Tags: long +DROP TABLE IF EXISTS t_json_parallel; + +SET enable_json_type = 1, max_insert_threads = 20, max_threads = 20, min_insert_block_size_rows = 65536; +CREATE TABLE t_json_parallel (data JSON) ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO t_json_parallel SELECT materialize('{"k1":1, "k2": "some"}') FROM numbers_mt(500000); +SELECT groupUniqArrayMap(JSONAllPathsWithTypes(data)), count() FROM t_json_parallel; + +DROP TABLE t_json_parallel; diff --git a/parser/testdata/01825_new_type_json_partitions/ast.json b/parser/testdata/01825_new_type_json_partitions/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01825_new_type_json_partitions/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01825_new_type_json_partitions/metadata.json b/parser/testdata/01825_new_type_json_partitions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01825_new_type_json_partitions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01825_new_type_json_partitions/query.sql b/parser/testdata/01825_new_type_json_partitions/query.sql new file mode 100644 index 000000000..7cf8232a6 --- /dev/null +++ b/parser/testdata/01825_new_type_json_partitions/query.sql @@ -0,0 +1,14 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS t_json_partitions; + +SET enable_json_type = 1; + +CREATE TABLE t_json_partitions (id UInt32, obj JSON) +ENGINE MergeTree ORDER BY id PARTITION BY id; + +INSERT INTO t_json_partitions FORMAT JSONEachRow {"id": 1, "obj": {"k1": "v1"}} {"id": 2, "obj": {"k2": "v2"}}; + +SELECT * FROM t_json_partitions ORDER BY id FORMAT JSONEachRow; + +DROP TABLE t_json_partitions; diff --git a/parser/testdata/01825_replacing_vertical_merge/ast.json b/parser/testdata/01825_replacing_vertical_merge/ast.json new file mode 100644 index 000000000..2623cb8e7 --- /dev/null +++ b/parser/testdata/01825_replacing_vertical_merge/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.002032198, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01825_replacing_vertical_merge/metadata.json b/parser/testdata/01825_replacing_vertical_merge/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01825_replacing_vertical_merge/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01825_replacing_vertical_merge/query.sql b/parser/testdata/01825_replacing_vertical_merge/query.sql new file mode 100644 index 000000000..0048f8d7b --- /dev/null +++ b/parser/testdata/01825_replacing_vertical_merge/query.sql @@ -0,0 +1,48 @@ +SET optimize_on_insert = 0; + +DROP TABLE IF EXISTS replacing_table; + +CREATE TABLE replacing_table (a UInt32, b UInt32, c UInt32) +ENGINE = ReplacingMergeTree ORDER BY a +SETTINGS vertical_merge_algorithm_min_rows_to_activate = 1, + vertical_merge_algorithm_min_columns_to_activate = 1, + index_granularity = 16, + min_bytes_for_wide_part = 0, + merge_max_block_size = 16; + +SYSTEM STOP MERGES replacing_table; + +INSERT INTO replacing_table SELECT number, number, number from numbers(16); +INSERT INTO replacing_table SELECT 100, number, number from numbers(16); + +SELECT sum(a), count() FROM replacing_table; + +SYSTEM START MERGES replacing_table; + +OPTIMIZE TABLE replacing_table FINAL; + +SELECT sum(a), count() FROM replacing_table; + +DROP TABLE IF EXISTS replacing_table; + +CREATE TABLE replacing_table +( + key UInt64, + value UInt64 +) +ENGINE = ReplacingMergeTree +ORDER BY key +SETTINGS + vertical_merge_algorithm_min_rows_to_activate=0, + vertical_merge_algorithm_min_columns_to_activate=0, + min_bytes_for_wide_part = 0; + +INSERT INTO replacing_table SELECT if(number == 8192, 8191, number), 1 FROM numbers(8193); + +SELECT sum(key), count() from replacing_table; + +OPTIMIZE TABLE replacing_table FINAL; + +SELECT sum(key), count() from replacing_table; + +DROP TABLE IF EXISTS replacing_table; diff --git a/parser/testdata/01831_max_streams/ast.json b/parser/testdata/01831_max_streams/ast.json new file mode 100644 index 000000000..d93625298 --- /dev/null +++ b/parser/testdata/01831_max_streams/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function remote (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '127.1'" + }, + { + "explain": " Identifier system.one" + }, + { + "explain": " Set" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001606179, + "rows_read": 13, + "bytes_read": 470 + } +} diff --git a/parser/testdata/01831_max_streams/metadata.json b/parser/testdata/01831_max_streams/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01831_max_streams/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01831_max_streams/query.sql b/parser/testdata/01831_max_streams/query.sql new file mode 100644 index 000000000..aa835dea5 --- /dev/null +++ b/parser/testdata/01831_max_streams/query.sql @@ -0,0 +1 @@ +select * from remote('127.1', system.one) settings max_distributed_connections=0; diff --git a/parser/testdata/01832_memory_write_suffix/ast.json b/parser/testdata/01832_memory_write_suffix/ast.json new file mode 100644 index 000000000..2e6db12e5 --- /dev/null +++ b/parser/testdata/01832_memory_write_suffix/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery data_01832 (children 1)" + }, + { + "explain": " Identifier data_01832" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001882441, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/01832_memory_write_suffix/metadata.json b/parser/testdata/01832_memory_write_suffix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01832_memory_write_suffix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01832_memory_write_suffix/query.sql b/parser/testdata/01832_memory_write_suffix/query.sql new file mode 100644 index 000000000..274736c5c --- /dev/null +++ b/parser/testdata/01832_memory_write_suffix/query.sql @@ -0,0 +1,9 @@ +drop table if exists data_01832; + +-- Memory writes from the writeSuffix() and if it will be called twice two rows +-- will be written (since it does not reset the block). +create table data_01832 (key Int) Engine=Memory; +insert into data_01832 values (1); +select * from data_01832; + +drop table data_01832; diff --git a/parser/testdata/01833_test_collation_alvarotuso/ast.json b/parser/testdata/01833_test_collation_alvarotuso/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01833_test_collation_alvarotuso/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01833_test_collation_alvarotuso/metadata.json b/parser/testdata/01833_test_collation_alvarotuso/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01833_test_collation_alvarotuso/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01833_test_collation_alvarotuso/query.sql b/parser/testdata/01833_test_collation_alvarotuso/query.sql new file mode 100644 index 000000000..7a9e9cd88 --- /dev/null +++ b/parser/testdata/01833_test_collation_alvarotuso/query.sql @@ -0,0 +1,23 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS test_collation; + +CREATE TABLE test_collation +( + `v` String, + `v2` String +) +ENGINE = MergeTree +ORDER BY v +SETTINGS index_granularity = 8192; + +insert into test_collation values ('A', 'A'); +insert into test_collation values ('B', 'B'); +insert into test_collation values ('C', 'C'); +insert into test_collation values ('a', 'a'); +insert into test_collation values ('b', 'b'); +insert into test_collation values ('c', 'c'); + +SELECT * FROM test_collation ORDER BY v ASC COLLATE 'en'; + +DROP TABLE test_collation; diff --git a/parser/testdata/01835_alias_to_primary_key_cyfdecyf/ast.json b/parser/testdata/01835_alias_to_primary_key_cyfdecyf/ast.json new file mode 100644 index 000000000..1d2011d3f --- /dev/null +++ b/parser/testdata/01835_alias_to_primary_key_cyfdecyf/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery db (children 1)" + }, + { + "explain": " Identifier db" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001315396, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/01835_alias_to_primary_key_cyfdecyf/metadata.json b/parser/testdata/01835_alias_to_primary_key_cyfdecyf/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01835_alias_to_primary_key_cyfdecyf/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01835_alias_to_primary_key_cyfdecyf/query.sql b/parser/testdata/01835_alias_to_primary_key_cyfdecyf/query.sql new file mode 100644 index 000000000..54ffb7b4c --- /dev/null +++ b/parser/testdata/01835_alias_to_primary_key_cyfdecyf/query.sql @@ -0,0 +1,21 @@ +DROP TABLE IF EXISTS db; + +CREATE TABLE tb +( + date Date, + `index` Int32, + value Int32, + idx Int32 ALIAS `index` +) +ENGINE = MergeTree +PARTITION BY date +ORDER BY (date, `index`); + +insert into tb values ('2017-12-15', 1, 1); + +SET force_primary_key = 1; + +select * from tb where `index` >= 0 AND `index` <= 2; +select * from tb where idx >= 0 AND idx <= 2; + +DROP TABLE tb; diff --git a/parser/testdata/01836_date_time_keep_default_timezone_on_operations_den_crane/ast.json b/parser/testdata/01836_date_time_keep_default_timezone_on_operations_den_crane/ast.json new file mode 100644 index 000000000..52c5dc4e4 --- /dev/null +++ b/parser/testdata/01836_date_time_keep_default_timezone_on_operations_den_crane/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function now (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001653787, + "rows_read": 8, + "bytes_read": 301 + } +} diff --git a/parser/testdata/01836_date_time_keep_default_timezone_on_operations_den_crane/metadata.json b/parser/testdata/01836_date_time_keep_default_timezone_on_operations_den_crane/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01836_date_time_keep_default_timezone_on_operations_den_crane/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01836_date_time_keep_default_timezone_on_operations_den_crane/query.sql b/parser/testdata/01836_date_time_keep_default_timezone_on_operations_den_crane/query.sql new file mode 100644 index 000000000..be47cfb04 --- /dev/null +++ b/parser/testdata/01836_date_time_keep_default_timezone_on_operations_den_crane/query.sql @@ -0,0 +1,26 @@ +SELECT toTypeName(now()); +SELECT toTypeName(now() - 1); +SELECT toTypeName(now('UTC') - 1); + +SELECT toTypeName(now64(3)); +SELECT toTypeName(now64(3) - 1); +SELECT toTypeName(toTimeZone(now64(3), 'UTC') - 1); + +DROP TABLE IF EXISTS tt_null; +DROP TABLE IF EXISTS tt; +DROP TABLE IF EXISTS tt_mv; + +create table tt_null(p String) engine = Null; + +create table tt(p String,tmin AggregateFunction(min, DateTime)) +engine = AggregatingMergeTree order by p; + +create materialized view tt_mv to tt as +select p, minState(now() - interval 30 minute) as tmin +from tt_null group by p; + +insert into tt_null values('x'); + +DROP TABLE tt_null; +DROP TABLE tt; +DROP TABLE tt_mv; diff --git a/parser/testdata/01837_cast_to_array_from_empty_array/ast.json b/parser/testdata/01837_cast_to_array_from_empty_array/ast.json new file mode 100644 index 000000000..80f75a3da --- /dev/null +++ b/parser/testdata/01837_cast_to_array_from_empty_array/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Literal 'Array(Array(String))'" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001290609, + "rows_read": 9, + "bytes_read": 341 + } +} diff --git a/parser/testdata/01837_cast_to_array_from_empty_array/metadata.json b/parser/testdata/01837_cast_to_array_from_empty_array/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01837_cast_to_array_from_empty_array/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01837_cast_to_array_from_empty_array/query.sql b/parser/testdata/01837_cast_to_array_from_empty_array/query.sql new file mode 100644 index 000000000..f3aa595f6 --- /dev/null +++ b/parser/testdata/01837_cast_to_array_from_empty_array/query.sql @@ -0,0 +1,2 @@ +SELECT CAST([] AS Array(Array(String))); +SELECT CAST([] AS Array(Array(Array(String)))); diff --git a/parser/testdata/01837_database_memory_ddl_dictionaries/ast.json b/parser/testdata/01837_database_memory_ddl_dictionaries/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01837_database_memory_ddl_dictionaries/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01837_database_memory_ddl_dictionaries/metadata.json b/parser/testdata/01837_database_memory_ddl_dictionaries/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01837_database_memory_ddl_dictionaries/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01837_database_memory_ddl_dictionaries/query.sql b/parser/testdata/01837_database_memory_ddl_dictionaries/query.sql new file mode 100644 index 000000000..199f5cb5a --- /dev/null +++ b/parser/testdata/01837_database_memory_ddl_dictionaries/query.sql @@ -0,0 +1,32 @@ +-- Tags: no-parallel, no-fasttest + +DROP DATABASE IF EXISTS 01837_db; +CREATE DATABASE 01837_db ENGINE = Memory; + +DROP TABLE IF EXISTS 01837_db.simple_key_dictionary_source; +CREATE TABLE 01837_db.simple_key_dictionary_source +( + id UInt64, + value String +) ENGINE = TinyLog; + +INSERT INTO 01837_db.simple_key_dictionary_source VALUES (1, 'First'); +INSERT INTO 01837_db.simple_key_dictionary_source VALUES (2, 'Second'); +INSERT INTO 01837_db.simple_key_dictionary_source VALUES (3, 'Third'); + +DROP DICTIONARY IF EXISTS 01837_db.simple_key_direct_dictionary; +CREATE DICTIONARY 01837_db.simple_key_direct_dictionary +( + id UInt64, + value String +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() DB '01837_db' TABLE 'simple_key_dictionary_source')) +LAYOUT(DIRECT()); + +SELECT * FROM 01837_db.simple_key_direct_dictionary ORDER BY ALL; + +DROP DICTIONARY 01837_db.simple_key_direct_dictionary; +DROP TABLE 01837_db.simple_key_dictionary_source; + +DROP DATABASE 01837_db; diff --git a/parser/testdata/01838_system_dictionaries_virtual_key_column/ast.json b/parser/testdata/01838_system_dictionaries_virtual_key_column/ast.json new file mode 100644 index 000000000..952b89a0d --- /dev/null +++ b/parser/testdata/01838_system_dictionaries_virtual_key_column/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery example_simple_key_dictionary (children 1)" + }, + { + "explain": " Identifier example_simple_key_dictionary" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00115257, + "rows_read": 2, + "bytes_read": 110 + } +} diff --git a/parser/testdata/01838_system_dictionaries_virtual_key_column/metadata.json b/parser/testdata/01838_system_dictionaries_virtual_key_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01838_system_dictionaries_virtual_key_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01838_system_dictionaries_virtual_key_column/query.sql b/parser/testdata/01838_system_dictionaries_virtual_key_column/query.sql new file mode 100644 index 000000000..bcc26491e --- /dev/null +++ b/parser/testdata/01838_system_dictionaries_virtual_key_column/query.sql @@ -0,0 +1,29 @@ +DROP DICTIONARY IF EXISTS example_simple_key_dictionary; +CREATE DICTIONARY example_simple_key_dictionary ( + id UInt64, + value UInt64 +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE '' DATABASE currentDatabase())) +LAYOUT(DIRECT()); + +SELECT 'simple key'; + +SELECT name, key FROM system.dictionaries WHERE name='example_simple_key_dictionary' AND database=currentDatabase(); + +DROP DICTIONARY IF EXISTS example_complex_key_dictionary; +CREATE DICTIONARY example_complex_key_dictionary ( + id UInt64, + id_key String, + value UInt64 +) +PRIMARY KEY id, id_key +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE '' DATABASE currentDatabase())) +LAYOUT(COMPLEX_KEY_DIRECT()); + +SELECT 'complex key'; + +SELECT name, key FROM system.dictionaries WHERE name='example_complex_key_dictionary' AND database=currentDatabase(); + +DROP DICTIONARY example_complex_key_dictionary; +DROP DICTIONARY example_simple_key_dictionary; diff --git a/parser/testdata/01839_join_to_subqueries_rewriter_columns_matcher/ast.json b/parser/testdata/01839_join_to_subqueries_rewriter_columns_matcher/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01839_join_to_subqueries_rewriter_columns_matcher/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01839_join_to_subqueries_rewriter_columns_matcher/metadata.json b/parser/testdata/01839_join_to_subqueries_rewriter_columns_matcher/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01839_join_to_subqueries_rewriter_columns_matcher/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01839_join_to_subqueries_rewriter_columns_matcher/query.sql b/parser/testdata/01839_join_to_subqueries_rewriter_columns_matcher/query.sql new file mode 100644 index 000000000..f1387ebcd --- /dev/null +++ b/parser/testdata/01839_join_to_subqueries_rewriter_columns_matcher/query.sql @@ -0,0 +1,9 @@ +SELECT (if(a.test == 'a', b.test, c.test)) as `a.test` FROM + (SELECT 1 AS id, 'a' AS test) a + LEFT JOIN (SELECT 1 AS id, 'b' AS test) b ON b.id = a.id + LEFT JOIN (SELECT 1 AS id, 'c' AS test) c ON c.id = a.id; + +SELECT COLUMNS('test') FROM + (SELECT 1 AS id, 'a' AS test) a + LEFT JOIN (SELECT 1 AS id, 'b' AS test) b ON b.id = a.id + LEFT JOIN (SELECT 1 AS id, 'c' AS test) c ON c.id = a.id; diff --git a/parser/testdata/01840_tupleElement_formatting_fuzzer/ast.json b/parser/testdata/01840_tupleElement_formatting_fuzzer/ast.json new file mode 100644 index 000000000..41ef24363 --- /dev/null +++ b/parser/testdata/01840_tupleElement_formatting_fuzzer/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Explain EXPLAIN AST (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function tupleElement (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_255" + }, + { + "explain": " Literal UInt64_100" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001249883, + "rows_read": 9, + "bytes_read": 347 + } +} diff --git a/parser/testdata/01840_tupleElement_formatting_fuzzer/metadata.json b/parser/testdata/01840_tupleElement_formatting_fuzzer/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01840_tupleElement_formatting_fuzzer/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01840_tupleElement_formatting_fuzzer/query.sql b/parser/testdata/01840_tupleElement_formatting_fuzzer/query.sql new file mode 100644 index 000000000..299372d79 --- /dev/null +++ b/parser/testdata/01840_tupleElement_formatting_fuzzer/query.sql @@ -0,0 +1,6 @@ +explain ast select tupleElement(255, 100); +explain ast select tupleElement((255, 1), 1); +select tupleElement((255, 1), 1); + +EXPLAIN AST SELECT tupleElement(*, 2), tupleElement(x, 1) FROM (SELECT arrayJoin([(0,1)]) AS x); +SELECT tupleElement(*, 2), tupleElement(x, 1) FROM (SELECT arrayJoin([(0,1)]) AS x); diff --git a/parser/testdata/01845_add_testcase_for_arrayElement/ast.json b/parser/testdata/01845_add_testcase_for_arrayElement/ast.json new file mode 100644 index 000000000..fc3750f83 --- /dev/null +++ b/parser/testdata/01845_add_testcase_for_arrayElement/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001353389, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/01845_add_testcase_for_arrayElement/metadata.json b/parser/testdata/01845_add_testcase_for_arrayElement/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01845_add_testcase_for_arrayElement/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01845_add_testcase_for_arrayElement/query.sql b/parser/testdata/01845_add_testcase_for_arrayElement/query.sql new file mode 100644 index 000000000..6aeb71b85 --- /dev/null +++ b/parser/testdata/01845_add_testcase_for_arrayElement/query.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS test; +CREATE TABLE test (`key` UInt32, `arr` ALIAS [1, 2], `xx` MATERIALIZED arr[1]) ENGINE = MergeTree PARTITION BY tuple() ORDER BY tuple(); +DROP TABLE test; + +CREATE TABLE test (`key` UInt32, `arr` Array(UInt32) ALIAS [1, 2], `xx` MATERIALIZED arr[1]) ENGINE = MergeTree PARTITION BY tuple() ORDER BY tuple(); +DROP TABLE test; + +CREATE TABLE test (`key` UInt32, `arr` Array(UInt32) ALIAS [1, 2], `xx` UInt32 MATERIALIZED arr[1]) ENGINE = MergeTree PARTITION BY tuple() ORDER BY tuple(); +DROP TABLE test; + +CREATE TABLE test (`key` UInt32, `arr` ALIAS [1, 2]) ENGINE = MergeTree PARTITION BY tuple() ORDER BY tuple(); +ALTER TABLE test ADD COLUMN `xx` UInt32 MATERIALIZED arr[1]; +DROP TABLE test; diff --git a/parser/testdata/01846_alter_column_without_type_bugfix/ast.json b/parser/testdata/01846_alter_column_without_type_bugfix/ast.json new file mode 100644 index 000000000..134b784a5 --- /dev/null +++ b/parser/testdata/01846_alter_column_without_type_bugfix/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery alter_test (children 1)" + }, + { + "explain": " Identifier alter_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001176754, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/01846_alter_column_without_type_bugfix/metadata.json b/parser/testdata/01846_alter_column_without_type_bugfix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01846_alter_column_without_type_bugfix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01846_alter_column_without_type_bugfix/query.sql b/parser/testdata/01846_alter_column_without_type_bugfix/query.sql new file mode 100644 index 000000000..5df8daedb --- /dev/null +++ b/parser/testdata/01846_alter_column_without_type_bugfix/query.sql @@ -0,0 +1,6 @@ +DROP TABLE IF EXISTS alter_test; +CREATE TABLE alter_test (a Int32, b DateTime) ENGINE = ReplacingMergeTree(b) ORDER BY a; +ALTER TABLE alter_test MODIFY COLUMN `b` DateTime DEFAULT now(); +ALTER TABLE alter_test MODIFY COLUMN `b` DEFAULT now() + 1; +SHOW CREATE TABLE alter_test; +DROP TABLE alter_test; diff --git a/parser/testdata/01846_null_as_default_for_insert_select/ast.json b/parser/testdata/01846_null_as_default_for_insert_select/ast.json new file mode 100644 index 000000000..5daa8ba49 --- /dev/null +++ b/parser/testdata/01846_null_as_default_for_insert_select/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_null_as_default (children 1)" + }, + { + "explain": " Identifier test_null_as_default" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001350608, + "rows_read": 2, + "bytes_read": 92 + } +} diff --git a/parser/testdata/01846_null_as_default_for_insert_select/metadata.json b/parser/testdata/01846_null_as_default_for_insert_select/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01846_null_as_default_for_insert_select/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01846_null_as_default_for_insert_select/query.sql b/parser/testdata/01846_null_as_default_for_insert_select/query.sql new file mode 100644 index 000000000..44a1537f7 --- /dev/null +++ b/parser/testdata/01846_null_as_default_for_insert_select/query.sql @@ -0,0 +1,32 @@ +DROP TABLE IF EXISTS test_null_as_default; +CREATE TABLE test_null_as_default (a String DEFAULT 'WORLD') ENGINE = Memory; + +INSERT INTO test_null_as_default SELECT 'HELLO' UNION ALL SELECT NULL; +SELECT * FROM test_null_as_default ORDER BY a; +SELECT ''; + +INSERT INTO test_null_as_default SELECT NULL; +SELECT * FROM test_null_as_default ORDER BY a; +SELECT ''; + +DROP TABLE IF EXISTS test_null_as_default; +CREATE TABLE test_null_as_default (a String DEFAULT 'WORLD', b String DEFAULT 'PEOPLE') ENGINE = Memory; + +INSERT INTO test_null_as_default(a) SELECT 'HELLO' UNION ALL SELECT NULL; +SELECT * FROM test_null_as_default ORDER BY a; +SELECT ''; + +DROP TABLE IF EXISTS test_null_as_default; +CREATE TABLE test_null_as_default (a Int8, b Int64 DEFAULT a + 1000) ENGINE = Memory; + +INSERT INTO test_null_as_default SELECT 1, NULL UNION ALL SELECT 2, NULL; +SELECT * FROM test_null_as_default ORDER BY a; +SELECT ''; + +DROP TABLE IF EXISTS test_null_as_default; +CREATE TABLE test_null_as_default (a Int8, b Int64 DEFAULT c - 500, c Int32 DEFAULT a + 1000) ENGINE = Memory; + +INSERT INTO test_null_as_default(a, c) SELECT 1, NULL UNION ALL SELECT 2, NULL; +SELECT * FROM test_null_as_default ORDER BY a; + +DROP TABLE test_null_as_default; diff --git a/parser/testdata/01847_bad_like/ast.json b/parser/testdata/01847_bad_like/ast.json new file mode 100644 index 000000000..ee7be9f53 --- /dev/null +++ b/parser/testdata/01847_bad_like/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function like (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '\\\\w'" + }, + { + "explain": " Literal '%\\\\w%'" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001521537, + "rows_read": 8, + "bytes_read": 283 + } +} diff --git a/parser/testdata/01847_bad_like/metadata.json b/parser/testdata/01847_bad_like/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01847_bad_like/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01847_bad_like/query.sql b/parser/testdata/01847_bad_like/query.sql new file mode 100644 index 000000000..79f7cb58a --- /dev/null +++ b/parser/testdata/01847_bad_like/query.sql @@ -0,0 +1,30 @@ +SELECT '\w' LIKE '%\w%'; +SELECT '\w' LIKE '\w%'; +SELECT '\w' LIKE '%\w'; +SELECT '\w' LIKE '\w'; + +SELECT '\\w' LIKE '%\\w%'; +SELECT '\\w' LIKE '\\w%'; +SELECT '\\w' LIKE '%\\w'; +SELECT '\\w' LIKE '\\w'; + +SELECT '\i' LIKE '%\i%'; +SELECT '\i' LIKE '\i%'; +SELECT '\i' LIKE '%\i'; +SELECT '\i' LIKE '\i'; + +SELECT '\\i' LIKE '%\\i%'; +SELECT '\\i' LIKE '\\i%'; +SELECT '\\i' LIKE '%\\i'; +SELECT '\\i' LIKE '\\i'; + +SELECT '\\' LIKE '%\\\\%'; +SELECT '\\' LIKE '\\\\%'; +SELECT '\\' LIKE '%\\\\'; +SELECT '\\' LIKE '\\\\'; +SELECT '\\' LIKE '\\'; -- { serverError CANNOT_PARSE_ESCAPE_SEQUENCE } + +SELECT '\\xyz\\' LIKE '\\\\%\\\\'; +SELECT '\\xyz\\' LIKE '\\\\___\\\\'; +SELECT '\\xyz\\' LIKE '\\\\_%_\\\\'; +SELECT '\\xyz\\' LIKE '\\\\%_%\\\\'; diff --git a/parser/testdata/01848_partition_value_column/ast.json b/parser/testdata/01848_partition_value_column/ast.json new file mode 100644 index 000000000..641f8f432 --- /dev/null +++ b/parser/testdata/01848_partition_value_column/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tbl (children 1)" + }, + { + "explain": " Identifier tbl" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001755631, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/01848_partition_value_column/metadata.json b/parser/testdata/01848_partition_value_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01848_partition_value_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01848_partition_value_column/query.sql b/parser/testdata/01848_partition_value_column/query.sql new file mode 100644 index 000000000..de5e766c9 --- /dev/null +++ b/parser/testdata/01848_partition_value_column/query.sql @@ -0,0 +1,21 @@ +drop table if exists tbl; +drop table if exists tbl2; + +create table tbl(dt DateTime, i int, j String, v Float64) engine MergeTree partition by (toDate(dt), i % 2, length(j)) order by i settings index_granularity = 1; + +insert into tbl values ('2021-04-01 00:01:02', 1, '123', 4), ('2021-04-01 01:01:02', 1, '12', 4), ('2021-04-01 02:11:02', 2, '345', 4), ('2021-04-01 04:31:02', 2, '2', 4), ('2021-04-02 00:01:02', 1, '1234', 4), ('2021-04-02 00:01:02', 2, '123', 4), ('2021-04-02 00:01:02', 3, '12', 4), ('2021-04-02 00:01:02', 4, '1', 4); + +set optimize_use_implicit_projections = 1; + +select count() from tbl where _partition_value = ('2021-04-01', 1, 2) settings max_rows_to_read = 1; +select count() from tbl where _partition_value.1 = '2021-04-01' settings max_rows_to_read = 4; +select count() from tbl where _partition_value.2 = 0 settings max_rows_to_read = 4; +select count() from tbl where _partition_value.3 = 4 settings max_rows_to_read = 1; + +create table tbl2(i int) engine MergeTree order by i; +insert into tbl2 values (1); +select _partition_value from tbl2; -- { serverError UNKNOWN_IDENTIFIER } +select _partition_value from tbl2 group by 1; -- { serverError UNKNOWN_IDENTIFIER } + +drop table tbl; +drop table tbl2; diff --git a/parser/testdata/01849_geoToS2/ast.json b/parser/testdata/01849_geoToS2/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01849_geoToS2/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01849_geoToS2/metadata.json b/parser/testdata/01849_geoToS2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01849_geoToS2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01849_geoToS2/query.sql b/parser/testdata/01849_geoToS2/query.sql new file mode 100644 index 000000000..8e268753b --- /dev/null +++ b/parser/testdata/01849_geoToS2/query.sql @@ -0,0 +1,53 @@ +-- Tags: no-fasttest +-- Tag no-fasttest: needs s2 + +DROP TABLE IF EXISTS s2_indexes; + +CREATE TABLE s2_indexes (s2_index UInt64, longitude Float64, latitude Float64) ENGINE = Memory; + +-- Random geo coordinates were generated using S2Testing::RandomPoint() method from s2 API. + +INSERT INTO s2_indexes VALUES (3814912406305146967, 125.938503, 25.519362); +INSERT INTO s2_indexes VALUES (10654167528317613967, -64.364998, -13.206226); +INSERT INTO s2_indexes VALUES (1913723177026859705, 8.774109, -3.271374); +INSERT INTO s2_indexes VALUES (13606307743304496111, -89.810962, -57.013984); +INSERT INTO s2_indexes VALUES (8094352344009072761,-170.423649, -10.102188); +INSERT INTO s2_indexes VALUES (2414200527355011659, 54.724353, -19.210608); +INSERT INTO s2_indexes VALUES (4590287096029015693, 51.390374, 29.368252); +INSERT INTO s2_indexes VALUES (10173921221664598133, 5.161979, -46.718242); +INSERT INTO s2_indexes VALUES (525948609053546189, -41.564128, -16.777073); +INSERT INTO s2_indexes VALUES (2446780491369950853, 49.94229, -18.633856); +INSERT INTO s2_indexes VALUES (1723620528513492581, 40.768274, 2.853563); +INSERT INTO s2_indexes VALUES (8295275405228383207, -168.258979, -38.271170); +INSERT INTO s2_indexes VALUES (7280210779810727639, 170.145748, 7.606449); +INSERT INTO s2_indexes VALUES (10670400906708524495, -61.761938, -24.969589); +INSERT INTO s2_indexes VALUES (10868726821406046149, -79.245460, -22.940849); +INSERT INTO s2_indexes VALUES (13202270384266773545, 10.610774, -64.184103); +INSERT INTO s2_indexes VALUES (145638248314527629, -19.826140, -41.192912); +INSERT INTO s2_indexes VALUES (12793606480989360601, 74.006104, -68.321240); +INSERT INTO s2_indexes VALUES (6317132534461540391, -165.907973, 54.205178); +INSERT INTO s2_indexes VALUES (6944470717485986643, 140.428834, 28.399755); + +SELECT 'Checking s2 index generation.'; + +SELECT s2ToGeo(s2_index), geoToS2(longitude, latitude) FROM s2_indexes ORDER BY s2_index; + +SELECT first, second, result FROM ( + SELECT + s2ToGeo(geoToS2(longitude, latitude)) AS output_geo, + tuple(roundBankers(longitude, 5), roundBankers(latitude, 5)) AS first, + tuple(roundBankers(output_geo.1, 5), roundBankers(output_geo.2, 5)) AS second, + if(first = second, 'ok', 'fail') AS result + FROM s2_indexes + ORDER BY s2_index + ); + +SELECT s2ToGeo(toUInt64(-1)); -- { serverError BAD_ARGUMENTS } +SELECT s2ToGeo(nan); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT geoToS2(toFloat64(toUInt64(-1)), toFloat64(toUInt64(-1))); -- { serverError BAD_ARGUMENTS } +SELECT geoToS2(nan, nan); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT geoToS2(-inf, 1.1754943508222875e-38); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + + + +DROP TABLE IF EXISTS s2_indexes; diff --git a/parser/testdata/01850_dist_INSERT_preserve_error/ast.json b/parser/testdata/01850_dist_INSERT_preserve_error/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01850_dist_INSERT_preserve_error/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01850_dist_INSERT_preserve_error/metadata.json b/parser/testdata/01850_dist_INSERT_preserve_error/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01850_dist_INSERT_preserve_error/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01850_dist_INSERT_preserve_error/query.sql b/parser/testdata/01850_dist_INSERT_preserve_error/query.sql new file mode 100644 index 000000000..3fc0f8a3e --- /dev/null +++ b/parser/testdata/01850_dist_INSERT_preserve_error/query.sql @@ -0,0 +1,20 @@ +-- Tags: no-parallel + +create database if not exists shard_0; +create database if not exists shard_1; + +drop table if exists dist_01850; +drop table if exists shard_0.data_01850; + +create table shard_0.data_01850 (key Int) engine=Memory(); +create table dist_01850 (key Int) engine=Distributed('test_cluster_two_replicas_different_databases', /* default_database= */ '', data_01850, key); + +set distributed_foreground_insert=1; +set prefer_localhost_replica=0; +insert into dist_01850 values (1); -- { serverError UNKNOWN_TABLE } + +drop table if exists dist_01850; +drop table shard_0.data_01850; + +drop database shard_0; +drop database shard_1; diff --git a/parser/testdata/01851_array_difference_decimal_overflow_ubsan/ast.json b/parser/testdata/01851_array_difference_decimal_overflow_ubsan/ast.json new file mode 100644 index 000000000..62a2ba278 --- /dev/null +++ b/parser/testdata/01851_array_difference_decimal_overflow_ubsan/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayDifference (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDecimal32 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Float64_100.0000991821289" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal Int64_-2147483647" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001422332, + "rows_read": 13, + "bytes_read": 550 + } +} diff --git a/parser/testdata/01851_array_difference_decimal_overflow_ubsan/metadata.json b/parser/testdata/01851_array_difference_decimal_overflow_ubsan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01851_array_difference_decimal_overflow_ubsan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01851_array_difference_decimal_overflow_ubsan/query.sql b/parser/testdata/01851_array_difference_decimal_overflow_ubsan/query.sql new file mode 100644 index 000000000..4e7b7301e --- /dev/null +++ b/parser/testdata/01851_array_difference_decimal_overflow_ubsan/query.sql @@ -0,0 +1 @@ +SELECT arrayDifference([toDecimal32(100.0000991821289, 0), -2147483647]) AS x; --{serverError DECIMAL_OVERFLOW} diff --git a/parser/testdata/01851_clear_column_referenced_by_mv/ast.json b/parser/testdata/01851_clear_column_referenced_by_mv/ast.json new file mode 100644 index 000000000..2368ab659 --- /dev/null +++ b/parser/testdata/01851_clear_column_referenced_by_mv/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery 01851_merge_tree (children 1)" + }, + { + "explain": " Identifier 01851_merge_tree" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001049521, + "rows_read": 2, + "bytes_read": 84 + } +} diff --git a/parser/testdata/01851_clear_column_referenced_by_mv/metadata.json b/parser/testdata/01851_clear_column_referenced_by_mv/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01851_clear_column_referenced_by_mv/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01851_clear_column_referenced_by_mv/query.sql b/parser/testdata/01851_clear_column_referenced_by_mv/query.sql new file mode 100644 index 000000000..da053c68f --- /dev/null +++ b/parser/testdata/01851_clear_column_referenced_by_mv/query.sql @@ -0,0 +1,35 @@ +DROP TABLE IF EXISTS `01851_merge_tree`; +CREATE TABLE `01851_merge_tree` +( + `n1` Int8, + `n2` Int8, + `n3` Int8, + `n4` Int8 +) +ENGINE = MergeTree +ORDER BY n1; + +DROP TABLE IF EXISTS `001851_merge_tree_mv`; +CREATE MATERIALIZED VIEW `01851_merge_tree_mv` +ENGINE = Memory AS +SELECT + n2, + n3 +FROM `01851_merge_tree`; + +ALTER TABLE `01851_merge_tree` + DROP COLUMN n3; -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } + +ALTER TABLE `01851_merge_tree` + DROP COLUMN n2; -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } + +-- ok +ALTER TABLE `01851_merge_tree` + DROP COLUMN n4; + +-- CLEAR COLUMN is OK +ALTER TABLE `01851_merge_tree` + CLEAR COLUMN n2; + +DROP TABLE `01851_merge_tree`; +DROP TABLE `01851_merge_tree_mv`; diff --git a/parser/testdata/01851_fix_row_policy_empty_result/ast.json b/parser/testdata/01851_fix_row_policy_empty_result/ast.json new file mode 100644 index 000000000..d7810cd07 --- /dev/null +++ b/parser/testdata/01851_fix_row_policy_empty_result/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tbl (children 1)" + }, + { + "explain": " Identifier tbl" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001010039, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/01851_fix_row_policy_empty_result/metadata.json b/parser/testdata/01851_fix_row_policy_empty_result/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01851_fix_row_policy_empty_result/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01851_fix_row_policy_empty_result/query.sql b/parser/testdata/01851_fix_row_policy_empty_result/query.sql new file mode 100644 index 000000000..f28426bb6 --- /dev/null +++ b/parser/testdata/01851_fix_row_policy_empty_result/query.sql @@ -0,0 +1,12 @@ +drop table if exists tbl; +create table tbl (s String, i int) engine MergeTree order by i; + +insert into tbl values ('123', 123); + +drop row policy if exists filter on tbl; +create row policy filter on tbl using (s = 'non_existing_domain') to all; + +select * from tbl prewhere s = '123' where i = 123; + +drop row policy filter on tbl; +drop table tbl; diff --git a/parser/testdata/01851_hedged_connections_external_tables/ast.json b/parser/testdata/01851_hedged_connections_external_tables/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01851_hedged_connections_external_tables/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01851_hedged_connections_external_tables/metadata.json b/parser/testdata/01851_hedged_connections_external_tables/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01851_hedged_connections_external_tables/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01851_hedged_connections_external_tables/query.sql b/parser/testdata/01851_hedged_connections_external_tables/query.sql new file mode 100644 index 000000000..22888d5e6 --- /dev/null +++ b/parser/testdata/01851_hedged_connections_external_tables/query.sql @@ -0,0 +1,2 @@ +-- Tags: no-tsan +select number from remote('127.0.0.{3|2}', numbers(2)) where number global in (select number from numbers(1)) settings async_socket_for_remote=1, use_hedged_requests = 1, sleep_in_send_data_ms=10, receive_data_timeout_ms=1; diff --git a/parser/testdata/01851_s2_to_geo/ast.json b/parser/testdata/01851_s2_to_geo/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01851_s2_to_geo/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01851_s2_to_geo/metadata.json b/parser/testdata/01851_s2_to_geo/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01851_s2_to_geo/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01851_s2_to_geo/query.sql b/parser/testdata/01851_s2_to_geo/query.sql new file mode 100644 index 000000000..d833b95a6 --- /dev/null +++ b/parser/testdata/01851_s2_to_geo/query.sql @@ -0,0 +1,5 @@ +-- Tags: no-fasttest +-- Tag no-fasttest: needs s2 + +select s2ToGeo(4573520603753570041); +select s2ToGeo(4573517609713934091); diff --git a/parser/testdata/01852_cast_operator/ast.json b/parser/testdata/01852_cast_operator/ast.json new file mode 100644 index 000000000..882a6a2f0 --- /dev/null +++ b/parser/testdata/01852_cast_operator/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (alias c) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '0.1'" + }, + { + "explain": " Literal 'Decimal(38, 38)'" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.00099789, + "rows_read": 8, + "bytes_read": 303 + } +} diff --git a/parser/testdata/01852_cast_operator/metadata.json b/parser/testdata/01852_cast_operator/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01852_cast_operator/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01852_cast_operator/query.sql b/parser/testdata/01852_cast_operator/query.sql new file mode 100644 index 000000000..adb9f8653 --- /dev/null +++ b/parser/testdata/01852_cast_operator/query.sql @@ -0,0 +1,45 @@ +SELECT 0.1::Decimal(38, 38) AS c; +EXPLAIN SYNTAX SELECT 0.1::Decimal(38, 38) AS c; + +SELECT [1, 2, 3]::Array(UInt32) AS c; +EXPLAIN SYNTAX SELECT [1, 2, 3]::Array(UInt32) AS c; + +SELECT 'abc'::FixedString(3) AS c; +EXPLAIN SYNTAX SELECT 'abc'::FixedString(3) AS c; + +SELECT 123::String AS c; +EXPLAIN SYNTAX SELECT 123::String AS c; + +SELECT 1::Int8 AS c; +EXPLAIN SYNTAX SELECT 1::Int8 AS c; + +SELECT [1, 1 + 1, 1 + 2]::Array(UInt32) AS c; +EXPLAIN SYNTAX SELECT [1, 1 + 1, 1 + 2]::Array(UInt32) AS c; + +SELECT '2010-10-10'::Date AS c; +EXPLAIN SYNTAX SELECT '2010-10-10'::Date AS c; + +SELECT '2010-10-10'::DateTime('UTC') AS c; +EXPLAIN SYNTAX SELECT '2010-10-10'::DateTime('UTC') AS c; + +SELECT ['2010-10-10', '2010-10-10']::Array(Date) AS c; +EXPLAIN SYNTAX SELECT ['2010-10-10', '2010-10-10']::Array(Date); + +SELECT (1 + 2)::UInt32 AS c; +EXPLAIN SYNTAX SELECT (1 + 2)::UInt32 AS c; + +SELECT (0.1::Decimal(4, 4) * 5)::Float64 AS c; +EXPLAIN SYNTAX SELECT (0.1::Decimal(4, 4) * 5)::Float64 AS c; + +SELECT number::UInt8 AS c, toTypeName(c) FROM numbers(1); +EXPLAIN SYNTAX SELECT number::UInt8 AS c, toTypeName(c) FROM numbers(1); + +SELECT (0 + 1 + 2 + 3 + 4)::Date AS c; +EXPLAIN SYNTAX SELECT (0 + 1 + 2 + 3 + 4)::Date AS c; + +SELECT (0.1::Decimal(4, 4) + 0.2::Decimal(4, 4) + 0.3::Decimal(4, 4))::Decimal(4, 4) AS c; +EXPLAIN SYNTAX SELECT (0.1::Decimal(4, 4) + 0.2::Decimal(4, 4) + 0.3::Decimal(4, 4))::Decimal(4, 4) AS c; + +SELECT [[1][1]]::Array(UInt32); +SELECT [[1, 2, 3], [], [1]]::Array(Array(UInt32)); +SELECT [[], []]::Array(Array(UInt32)); diff --git a/parser/testdata/01852_cast_operator_2/ast.json b/parser/testdata/01852_cast_operator_2/ast.json new file mode 100644 index 000000000..8321c7b3f --- /dev/null +++ b/parser/testdata/01852_cast_operator_2/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '(0.1, 0.2)'" + }, + { + "explain": " Literal 'Tuple(Decimal(75, 70), Decimal(75, 70))'" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001403819, + "rows_read": 8, + "bytes_read": 324 + } +} diff --git a/parser/testdata/01852_cast_operator_2/metadata.json b/parser/testdata/01852_cast_operator_2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01852_cast_operator_2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01852_cast_operator_2/query.sql b/parser/testdata/01852_cast_operator_2/query.sql new file mode 100644 index 000000000..859d33a38 --- /dev/null +++ b/parser/testdata/01852_cast_operator_2/query.sql @@ -0,0 +1,19 @@ +SELECT (0.1, 0.2)::Tuple(Decimal(75, 70), Decimal(75, 70)); +EXPLAIN SYNTAX SELECT (0.1, 0.2)::Tuple(Decimal(75, 70), Decimal(75, 70)); + +SELECT 0.1 :: Decimal(4, 4); +EXPLAIN SYNTAX SELECT 0.1 :: Decimal(4, 4); + +SELECT [1, 2, 3] :: Array(Int32); +EXPLAIN SYNTAX SELECT [1, 2, 3] :: Array(Int32); + +SELECT [1::UInt32, 2::UInt32]::Array(UInt64); +EXPLAIN SYNTAX SELECT [1::UInt32, 2::UInt32]::Array(UInt64); + +SELECT [[1, 2]::Array(UInt32), [3]]::Array(Array(UInt64)); +EXPLAIN SYNTAX SELECT [[1, 2]::Array(UInt32), [3]]::Array(Array(UInt64)); + +SELECT [[1::UInt16, 2::UInt16]::Array(UInt32), [3]]::Array(Array(UInt64)); +EXPLAIN SYNTAX SELECT [[1::UInt16, 2::UInt16]::Array(UInt32), [3]]::Array(Array(UInt64)); + +SELECT [(1, 'a'), (3, 'b')]::Nested(u UInt8, s String) AS t, toTypeName(t); diff --git a/parser/testdata/01852_cast_operator_3/ast.json b/parser/testdata/01852_cast_operator_3/ast.json new file mode 100644 index 000000000..0141c0484 --- /dev/null +++ b/parser/testdata/01852_cast_operator_3/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '-1'" + }, + { + "explain": " Literal 'Int32'" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001443812, + "rows_read": 8, + "bytes_read": 282 + } +} diff --git a/parser/testdata/01852_cast_operator_3/metadata.json b/parser/testdata/01852_cast_operator_3/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01852_cast_operator_3/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01852_cast_operator_3/query.sql b/parser/testdata/01852_cast_operator_3/query.sql new file mode 100644 index 000000000..1ad015a8d --- /dev/null +++ b/parser/testdata/01852_cast_operator_3/query.sql @@ -0,0 +1,14 @@ +SELECT -1::Int32; +EXPLAIN SYNTAX SELECT -1::Int32; + +SELECT -0.1::Decimal(38, 38); +EXPLAIN SYNTAX SELECT -0.1::Decimal(38, 38); + +SELECT -0.111::Float64; +EXPLAIN SYNTAX SELECT -0.111::Float64; + +SELECT [-1, 2, -3]::Array(Int32); +EXPLAIN SYNTAX SELECT [-1, 2, -3]::Array(Int32); + +SELECT [-1.1, 2, -3]::Array(Float64); +EXPLAIN SYNTAX SELECT [-1.1, 2, -3]::Array(Float64); diff --git a/parser/testdata/01852_cast_operator_4/ast.json b/parser/testdata/01852_cast_operator_4/ast.json new file mode 100644 index 000000000..53b11a19e --- /dev/null +++ b/parser/testdata/01852_cast_operator_4/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001593675, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01852_cast_operator_4/metadata.json b/parser/testdata/01852_cast_operator_4/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01852_cast_operator_4/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01852_cast_operator_4/query.sql b/parser/testdata/01852_cast_operator_4/query.sql new file mode 100644 index 000000000..d090a0edb --- /dev/null +++ b/parser/testdata/01852_cast_operator_4/query.sql @@ -0,0 +1,22 @@ +SET enable_analyzer = 1; + +SELECT [3,4,5][1]::Int32; +EXPLAIN SYNTAX SELECT [3,4,5][1]::Int32; + +SELECT [3,4,5]::Array(Int64)[2]::Int8; +EXPLAIN SYNTAX SELECT [3,4,5]::Array(Int64)[2]::Int8; + +SELECT [1,2,3]::Array(UInt64)[[number, number]::Array(UInt8)[number]::UInt64]::UInt8 from numbers(3); +EXPLAIN SYNTAX SELECT [1,2,3]::Array(UInt64)[[number, number]::Array(UInt8)[number]::UInt64]::UInt8 from numbers(3); + +WITH [3,4,5] AS x SELECT x[1]::Int32; +EXPLAIN SYNTAX WITH [3,4,5] AS x SELECT x[1]::Int32; + +SELECT tuple(3,4,5).1::Int32; +EXPLAIN SYNTAX SELECT tuple(3,4,5).1::Int32; + +SELECT tuple(3,4,5)::Tuple(UInt64, UInt64, UInt64).2::Int32; +EXPLAIN SYNTAX SELECT tuple(3,4,5)::Tuple(UInt64, UInt64, UInt64).1::Int32; + +WITH tuple(3,4,5) AS x SELECT x.1::Int32; +EXPLAIN SYNTAX WITH tuple(3,4,5) AS x SELECT x.1::Int32; diff --git a/parser/testdata/01852_dictionary_query_count_long/ast.json b/parser/testdata/01852_dictionary_query_count_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01852_dictionary_query_count_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01852_dictionary_query_count_long/metadata.json b/parser/testdata/01852_dictionary_query_count_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01852_dictionary_query_count_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01852_dictionary_query_count_long/query.sql b/parser/testdata/01852_dictionary_query_count_long/query.sql new file mode 100644 index 000000000..e0d92b671 --- /dev/null +++ b/parser/testdata/01852_dictionary_query_count_long/query.sql @@ -0,0 +1,317 @@ +-- Tags: long, no-parallel + +-- originally intended to check found rate, as it is not deterministic, so check query_count instead + +-- +-- Simple key +-- + +DROP TABLE IF EXISTS simple_key_source_table_01862; +CREATE TABLE simple_key_source_table_01862 +( + id UInt64, + value String +) ENGINE = Memory(); + +INSERT INTO simple_key_source_table_01862 VALUES (1, 'First'); +INSERT INTO simple_key_source_table_01862 VALUES (1, 'First'); + +-- simple flat +DROP DICTIONARY IF EXISTS simple_key_flat_dictionary_01862; +CREATE DICTIONARY simple_key_flat_dictionary_01862 +( + id UInt64, + value String +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE 'simple_key_source_table_01862')) +LAYOUT(FLAT()) +LIFETIME(MIN 0 MAX 1000); + +SELECT * FROM simple_key_flat_dictionary_01862 FORMAT Null; +SELECT name, query_count, status, last_exception FROM system.dictionaries WHERE database = currentDatabase() AND name = 'simple_key_flat_dictionary_01862'; +SELECT * FROM simple_key_flat_dictionary_01862 WHERE id = 0 FORMAT Null; +SELECT name, query_count, status, last_exception FROM system.dictionaries WHERE database = currentDatabase() AND name = 'simple_key_flat_dictionary_01862'; +SELECT dictGet('simple_key_flat_dictionary_01862', 'value', toUInt64(2)) FORMAT Null; +SELECT name, round(query_count, 2), status, last_exception FROM system.dictionaries WHERE database = currentDatabase() AND name = 'simple_key_flat_dictionary_01862'; + +DROP DICTIONARY simple_key_flat_dictionary_01862; + +-- simple direct +DROP DICTIONARY IF EXISTS simple_key_direct_dictionary_01862; +CREATE DICTIONARY simple_key_direct_dictionary_01862 +( + id UInt64, + value String +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE 'simple_key_source_table_01862')) +LAYOUT(DIRECT()); + +-- check that found_rate is 0, not nan +SELECT name, query_count, status, last_exception FROM system.dictionaries WHERE database = currentDatabase() AND name = 'simple_key_direct_dictionary_01862'; +SELECT * FROM simple_key_direct_dictionary_01862 FORMAT Null; +SELECT name, query_count, status, last_exception FROM system.dictionaries WHERE database = currentDatabase() AND name = 'simple_key_direct_dictionary_01862'; +SELECT dictGet('simple_key_direct_dictionary_01862', 'value', toUInt64(1)) FORMAT Null; +SELECT name, query_count, status, last_exception FROM system.dictionaries WHERE database = currentDatabase() AND name = 'simple_key_direct_dictionary_01862'; +SELECT dictGet('simple_key_direct_dictionary_01862', 'value', toUInt64(2)) FORMAT Null; +SELECT name, query_count, status, last_exception FROM system.dictionaries WHERE database = currentDatabase() AND name = 'simple_key_direct_dictionary_01862'; + +DROP DICTIONARY simple_key_direct_dictionary_01862; + +-- simple hashed +DROP DICTIONARY IF EXISTS simple_key_hashed_dictionary_01862; +CREATE DICTIONARY simple_key_hashed_dictionary_01862 +( + id UInt64, + value String +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE 'simple_key_source_table_01862')) +LAYOUT(HASHED()) +LIFETIME(MIN 0 MAX 1000); + +SELECT name, query_count, status, last_exception FROM system.dictionaries WHERE database = currentDatabase() AND name = 'simple_key_hashed_dictionary_01862'; +SELECT dictGet('simple_key_hashed_dictionary_01862', 'value', toUInt64(1)) FORMAT Null; +SELECT name, query_count, status, last_exception FROM system.dictionaries WHERE database = currentDatabase() AND name = 'simple_key_hashed_dictionary_01862'; +SELECT dictGet('simple_key_hashed_dictionary_01862', 'value', toUInt64(2)) FORMAT Null; +SELECT name, query_count, status, last_exception FROM system.dictionaries WHERE database = currentDatabase() AND name = 'simple_key_hashed_dictionary_01862'; + +DROP DICTIONARY simple_key_hashed_dictionary_01862; + +-- simple sparse_hashed +DROP DICTIONARY IF EXISTS simple_key_sparse_hashed_dictionary_01862; +CREATE DICTIONARY simple_key_sparse_hashed_dictionary_01862 +( + id UInt64, + value String +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE 'simple_key_source_table_01862')) +LAYOUT(SPARSE_HASHED()) +LIFETIME(MIN 0 MAX 1000); + +SELECT name, query_count, status, last_exception FROM system.dictionaries WHERE database = currentDatabase() AND name = 'simple_key_sparse_hashed_dictionary_01862'; +SELECT dictGet('simple_key_sparse_hashed_dictionary_01862', 'value', toUInt64(1)) FORMAT Null; +SELECT name, query_count, status, last_exception FROM system.dictionaries WHERE database = currentDatabase() AND name = 'simple_key_sparse_hashed_dictionary_01862'; +SELECT dictGet('simple_key_sparse_hashed_dictionary_01862', 'value', toUInt64(2)) FORMAT Null; +SELECT name, query_count, status, last_exception FROM system.dictionaries WHERE database = currentDatabase() AND name = 'simple_key_sparse_hashed_dictionary_01862'; + +DROP DICTIONARY simple_key_sparse_hashed_dictionary_01862; + +-- simple cache +DROP DICTIONARY IF EXISTS simple_key_cache_dictionary_01862; +CREATE DICTIONARY simple_key_cache_dictionary_01862 +( + id UInt64, + value String +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE 'simple_key_source_table_01862')) +LAYOUT(CACHE(SIZE_IN_CELLS 100000)) +LIFETIME(MIN 0 MAX 1000); + +SELECT name, query_count, status, last_exception FROM system.dictionaries WHERE database = currentDatabase() AND name = 'simple_key_cache_dictionary_01862'; +SELECT toUInt64(1) as key, dictGet('simple_key_cache_dictionary_01862', 'value', key) FORMAT Null; +SELECT name, query_count, status, last_exception FROM system.dictionaries WHERE database = currentDatabase() AND name = 'simple_key_cache_dictionary_01862'; +SELECT toUInt64(2) as key, dictGet('simple_key_cache_dictionary_01862', 'value', key) FORMAT Null; +SELECT name, query_count, status, last_exception FROM system.dictionaries WHERE database = currentDatabase() AND name = 'simple_key_cache_dictionary_01862'; + +DROP DICTIONARY simple_key_cache_dictionary_01862; + +DROP TABLE simple_key_source_table_01862; + +-- +-- Complex key +-- + +DROP TABLE IF EXISTS complex_key_source_table_01862; +CREATE TABLE complex_key_source_table_01862 +( + id UInt64, + id_key String, + value String +) ENGINE = Memory(); + +INSERT INTO complex_key_source_table_01862 VALUES (1, 'FirstKey', 'First'); +INSERT INTO complex_key_source_table_01862 VALUES (1, 'FirstKey', 'First'); + +-- complex hashed +DROP DICTIONARY IF EXISTS complex_key_hashed_dictionary_01862; +CREATE DICTIONARY complex_key_hashed_dictionary_01862 +( + id UInt64, + id_key String, + value String +) +PRIMARY KEY id, id_key +SOURCE(CLICKHOUSE(TABLE 'complex_key_source_table_01862')) +LAYOUT(COMPLEX_KEY_HASHED()) +LIFETIME(MIN 0 MAX 1000); + +SELECT name, query_count, status, last_exception FROM system.dictionaries WHERE database = currentDatabase() AND name = 'complex_key_hashed_dictionary_01862'; +SELECT dictGet('complex_key_hashed_dictionary_01862', 'value', (toUInt64(1), 'FirstKey')) FORMAT Null; +SELECT name, query_count, status, last_exception FROM system.dictionaries WHERE database = currentDatabase() AND name = 'complex_key_hashed_dictionary_01862'; +SELECT dictGet('complex_key_hashed_dictionary_01862', 'value', (toUInt64(2), 'FirstKey')) FORMAT Null; +SELECT name, query_count, status, last_exception FROM system.dictionaries WHERE database = currentDatabase() AND name = 'complex_key_hashed_dictionary_01862'; + +DROP DICTIONARY complex_key_hashed_dictionary_01862; + +-- complex direct +DROP DICTIONARY IF EXISTS complex_key_direct_dictionary_01862; +CREATE DICTIONARY complex_key_direct_dictionary_01862 +( + id UInt64, + id_key String, + value String +) +PRIMARY KEY id, id_key +SOURCE(CLICKHOUSE(TABLE 'complex_key_source_table_01862')) +LAYOUT(COMPLEX_KEY_DIRECT()); + +SELECT name, query_count, status, last_exception FROM system.dictionaries WHERE database = currentDatabase() AND name = 'complex_key_direct_dictionary_01862'; +SELECT dictGet('complex_key_direct_dictionary_01862', 'value', (toUInt64(1), 'FirstKey')) FORMAT Null; +SELECT name, query_count, status, last_exception FROM system.dictionaries WHERE database = currentDatabase() AND name = 'complex_key_direct_dictionary_01862'; +SELECT dictGet('complex_key_direct_dictionary_01862', 'value', (toUInt64(2), 'FirstKey')) FORMAT Null; +SELECT name, query_count, status, last_exception FROM system.dictionaries WHERE database = currentDatabase() AND name = 'complex_key_direct_dictionary_01862'; + +DROP DICTIONARY complex_key_direct_dictionary_01862; + +-- complex cache +DROP DICTIONARY IF EXISTS complex_key_cache_dictionary_01862; +CREATE DICTIONARY complex_key_cache_dictionary_01862 +( + id UInt64, + id_key String, + value String +) +PRIMARY KEY id, id_key +SOURCE(CLICKHOUSE(TABLE 'complex_key_source_table_01862')) +LAYOUT(COMPLEX_KEY_CACHE(SIZE_IN_CELLS 100000)) +LIFETIME(MIN 0 MAX 1000); + +SELECT name, query_count, status, last_exception FROM system.dictionaries WHERE database = currentDatabase() AND name = 'complex_key_cache_dictionary_01862'; +SELECT dictGet('complex_key_cache_dictionary_01862', 'value', (toUInt64(1), 'FirstKey')) FORMAT Null; +SELECT name, query_count, status, last_exception FROM system.dictionaries WHERE database = currentDatabase() AND name = 'complex_key_cache_dictionary_01862'; +SELECT dictGet('complex_key_cache_dictionary_01862', 'value', (toUInt64(2), 'FirstKey')) FORMAT Null; +SELECT name, query_count, status, last_exception FROM system.dictionaries WHERE database = currentDatabase() AND name = 'complex_key_cache_dictionary_01862'; + +DROP DICTIONARY complex_key_cache_dictionary_01862; + +DROP TABLE complex_key_source_table_01862; + +-- +-- Range +-- +DROP TABLE IF EXISTS range_key_source_table_01862; +CREATE TABLE range_key_source_table_01862 +( + id UInt64, + value String, + first Date, + last Date +) ENGINE = Memory(); + +INSERT INTO range_key_source_table_01862 VALUES (1, 'First', today(), today()); +INSERT INTO range_key_source_table_01862 VALUES (1, 'First', today(), today()); + +-- simple range_hashed +DROP DICTIONARY IF EXISTS simple_key_range_hashed_dictionary_01862; +CREATE DICTIONARY simple_key_range_hashed_dictionary_01862 +( + id UInt64, + value String, + first Date, + last Date +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE 'range_key_source_table_01862')) +LAYOUT(RANGE_HASHED()) +RANGE(MIN first MAX last) +LIFETIME(MIN 0 MAX 1000); + +SELECT name, query_count, status, last_exception FROM system.dictionaries WHERE database = currentDatabase() AND name = 'simple_key_range_hashed_dictionary_01862'; +SELECT dictGet('simple_key_range_hashed_dictionary_01862', 'value', toUInt64(1), today()) FORMAT Null; +SELECT name, query_count, status, last_exception FROM system.dictionaries WHERE database = currentDatabase() AND name = 'simple_key_range_hashed_dictionary_01862'; +SELECT dictGet('simple_key_range_hashed_dictionary_01862', 'value', toUInt64(2), today()) FORMAT Null; +SELECT name, query_count, status, last_exception FROM system.dictionaries WHERE database = currentDatabase() AND name = 'simple_key_range_hashed_dictionary_01862'; + +DROP DICTIONARY simple_key_range_hashed_dictionary_01862; + +DROP TABLE range_key_source_table_01862; + +-- +-- IP Trie +-- +DROP TABLE IF EXISTS ip_trie_source_table_01862; +CREATE TABLE ip_trie_source_table_01862 +( + prefix String, + value String +) ENGINE = Memory(); + +INSERT INTO ip_trie_source_table_01862 VALUES ('127.0.0.0/8', 'First'); +INSERT INTO ip_trie_source_table_01862 VALUES ('127.0.0.0/8', 'First'); + +-- ip_trie +DROP DICTIONARY IF EXISTS ip_trie_dictionary_01862; +CREATE DICTIONARY ip_trie_dictionary_01862 +( + prefix String, + value String +) +PRIMARY KEY prefix +SOURCE(CLICKHOUSE(TABLE 'ip_trie_source_table_01862')) +LAYOUT(IP_TRIE()) +LIFETIME(MIN 0 MAX 1000); + +-- found_rate = 0, because we didn't make any searches. +SELECT name, query_count, status, last_exception FROM system.dictionaries WHERE database = currentDatabase() AND name = 'ip_trie_dictionary_01862'; +-- found_rate = 1, because the dictionary covers the 127.0.0.1 address. +SELECT dictGet('ip_trie_dictionary_01862', 'value', tuple(toIPv4('127.0.0.1'))) FORMAT Null; +SELECT name, query_count, status, last_exception FROM system.dictionaries WHERE database = currentDatabase() AND name = 'ip_trie_dictionary_01862'; +-- found_rate = 0.5, because the dictionary does not cover 1.1.1.1 and we have two lookups in total as of now. +SELECT dictGet('ip_trie_dictionary_01862', 'value', tuple(toIPv4('1.1.1.1'))) FORMAT Null; +SELECT name, query_count, status, last_exception FROM system.dictionaries WHERE database = currentDatabase() AND name = 'ip_trie_dictionary_01862'; + +DROP DICTIONARY ip_trie_dictionary_01862; + +DROP TABLE ip_trie_source_table_01862; + +-- Polygon +DROP TABLE IF EXISTS polygons_01862; +CREATE TABLE polygons_01862 ( + key Array(Array(Array(Tuple(Float64, Float64)))), + name String +) ENGINE = Memory; +INSERT INTO polygons_01862 VALUES ([[[(3, 1), (0, 1), (0, -1), (3, -1)]]], 'Click East'); +INSERT INTO polygons_01862 VALUES ([[[(-1, 1), (1, 1), (1, 3), (-1, 3)]]], 'Click North'); +INSERT INTO polygons_01862 VALUES ([[[(-3, 1), (-3, -1), (0, -1), (0, 1)]]], 'Click South'); +INSERT INTO polygons_01862 VALUES ([[[(-1, -1), (1, -1), (1, -3), (-1, -3)]]], 'Click West'); + +DROP TABLE IF EXISTS points_01862; +CREATE TABLE points_01862 (x Float64, y Float64) ENGINE = Memory; +INSERT INTO points_01862 VALUES ( 0.1, 0.0); +INSERT INTO points_01862 VALUES (-0.1, 0.0); +INSERT INTO points_01862 VALUES ( 0.0, 1.1); +INSERT INTO points_01862 VALUES ( 0.0, -1.1); +INSERT INTO points_01862 VALUES ( 3.0, 3.0); + +DROP DICTIONARY IF EXISTS polygon_dictionary_01862; +CREATE DICTIONARY polygon_dictionary_01862 +( + key Array(Array(Array(Tuple(Float64, Float64)))), + name String +) +PRIMARY KEY key +SOURCE(CLICKHOUSE(USER 'default' TABLE 'polygons_01862')) +LIFETIME(0) +LAYOUT(POLYGON()); + +SELECT name, query_count, status, last_exception FROM system.dictionaries WHERE database = currentDatabase() AND name = 'polygon_dictionary_01862'; +SELECT tuple(x, y) as key, dictGet('polygon_dictionary_01862', 'name', key) FROM points_01862 FORMAT Null; +SELECT name, query_count, status, last_exception FROM system.dictionaries WHERE database = currentDatabase() AND name = 'polygon_dictionary_01862'; + +DROP DICTIONARY polygon_dictionary_01862; +DROP TABLE polygons_01862; +DROP TABLE points_01862; diff --git a/parser/testdata/01852_jit_if/ast.json b/parser/testdata/01852_jit_if/ast.json new file mode 100644 index 000000000..6a60e0097 --- /dev/null +++ b/parser/testdata/01852_jit_if/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001685411, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01852_jit_if/metadata.json b/parser/testdata/01852_jit_if/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01852_jit_if/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01852_jit_if/query.sql b/parser/testdata/01852_jit_if/query.sql new file mode 100644 index 000000000..9a9d74274 --- /dev/null +++ b/parser/testdata/01852_jit_if/query.sql @@ -0,0 +1,19 @@ +SET compile_expressions = 1; +SET min_count_to_compile_expression = 0; + +DROP TABLE IF EXISTS test_jit_nonnull; +CREATE TABLE test_jit_nonnull (value UInt8) ENGINE = TinyLog; +INSERT INTO test_jit_nonnull VALUES (0), (1); + +SELECT 'test_jit_nonnull'; +SELECT value, multiIf(value = 1, 2, value, 1, 0), if (value, 1, 0) FROM test_jit_nonnull; + +DROP TABLE IF EXISTS test_jit_nullable; +CREATE TABLE test_jit_nullable (value Nullable(UInt8)) ENGINE = TinyLog; +INSERT INTO test_jit_nullable VALUES (0), (1), (NULL); + +SELECT 'test_jit_nullable'; +SELECT value, multiIf(value = 1, 2, value, 1, 0), if (value, 1, 0) FROM test_jit_nullable; + +DROP TABLE test_jit_nonnull; +DROP TABLE test_jit_nullable; diff --git a/parser/testdata/01852_map_combinator/ast.json b/parser/testdata/01852_map_combinator/ast.json new file mode 100644 index 000000000..67f59ac6c --- /dev/null +++ b/parser/testdata/01852_map_combinator/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001328933, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01852_map_combinator/metadata.json b/parser/testdata/01852_map_combinator/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01852_map_combinator/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01852_map_combinator/query.sql b/parser/testdata/01852_map_combinator/query.sql new file mode 100644 index 000000000..09eadc7e7 --- /dev/null +++ b/parser/testdata/01852_map_combinator/query.sql @@ -0,0 +1,56 @@ +SET send_logs_level = 'fatal'; + +DROP TABLE IF EXISTS map_comb; +CREATE TABLE map_comb(a int, statusMap Map(UInt16, UInt32)) ENGINE = Log; + +INSERT INTO map_comb VALUES (1, map(1, 10, 2, 10, 3, 10)),(1, map(3, 10, 4, 10, 5, 10)),(2, map(4, 10, 5, 10, 6, 10)),(2, map(6, 10, 7, 10, 8, 10)),(3, map(1, 10, 2, 10, 3, 10)),(4, map(3, 10, 4, 10, 5, 10)),(5, map(4, 10, 5, 10, 6, 10)),(5, map(6, 10, 7, 10, 8, 10)); + +SELECT * FROM map_comb ORDER BY a, statusMap; +SELECT toTypeName(res), sumMap(statusMap) as res FROM map_comb; +SELECT toTypeName(res), sumWithOverflowMap(statusMap) as res FROM map_comb; +SELECT toTypeName(res), sumMapMerge(s) as res FROM (SELECT sumMapState(statusMap) AS s FROM map_comb); +SELECT minMap(statusMap) FROM map_comb; +SELECT maxMap(statusMap) FROM map_comb; +SELECT toTypeName(res), avgMap(statusMap) as res FROM map_comb; +SELECT countMap(statusMap) FROM map_comb; +SELECT a, sumMap(statusMap) FROM map_comb GROUP BY a ORDER BY a; + +DROP TABLE map_comb; + +-- check different types +select minMap(val) from values ('val Map(UUID, Int32)', + (map('01234567-89ab-cdef-0123-456789abcdef', 1)), + (map('01234567-89ab-cdef-0123-456789abcdef', 2))); +select minMap(val) from values ('val Map(String, String)', (map('1', '1')), (map('1', '2'))); +select minMap(val) from values ('val Map(FixedString(1), FixedString(1))', (map('1', '1')), (map('1', '2'))); +select minMap(val) from values ('val Map(UInt64, UInt64)', (map(1, 1)), (map(1, 2))); +select minMap(val) from values ('val Map(Date, Int16)', (map(1, 1)), (map(1, 2))); +select minMap(val) from values ('val Map(DateTime(\'Asia/Istanbul\'), Int32)', (map(1, 1)), (map(1, 2))); +select minMap(val) from values ('val Map(Enum16(\'a\'=1), Int16)', (map('a', 1)), (map('a', 2))); +select maxMap(val) from values ('val Map(String, String)', (map('1', '1')), (map('1', '2'))); +select minMap(val) from values ('val Map(Int128, Int128)', (map(1, 1)), (map(1, 2))); +select minMap(val) from values ('val Map(Int256, Int256)', (map(1, 1)), (map(1, 2))); +select minMap(val) from values ('val Map(UInt128, UInt128)', (map(1, 1)), (map(1, 2))); +select minMap(val) from values ('val Map(UInt256, UInt256)', (map(1, 1)), (map(1, 2))); + +select sumMap(map(1,2), 1, 2); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +select sumMap(map(1,2), map(1,3)); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +-- array and tuple arguments +select avgMap([1,1,1], [2,2,2]); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select minMap((1,1)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select minMap(([1,1,1],1)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select minMap([1,1,1],1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select minMap([1,1,1]); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select minMap(([1,1,1])); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +DROP TABLE IF EXISTS sum_map_decimal; + +CREATE TABLE sum_map_decimal(statusMap Map(UInt16,Decimal32(5))) ENGINE = Log; + +INSERT INTO sum_map_decimal VALUES (map(1,'1.0',2,'2.0',3,'3.0')), (map(3,'3.0',4,'4.0',5,'5.0')), (map(4,'4.0',5,'5.0',6,'6.0')), (map(6,'6.0',7,'7.0',8,'8.0')); + +SELECT sumMap(statusMap) FROM sum_map_decimal; +SELECT sumWithOverflowMap(statusMap) FROM sum_map_decimal; + +DROP TABLE sum_map_decimal; diff --git a/parser/testdata/01852_multiple_joins_with_union_join/ast.json b/parser/testdata/01852_multiple_joins_with_union_join/ast.json new file mode 100644 index 000000000..f9890c4b0 --- /dev/null +++ b/parser/testdata/01852_multiple_joins_with_union_join/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001297424, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01852_multiple_joins_with_union_join/metadata.json b/parser/testdata/01852_multiple_joins_with_union_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01852_multiple_joins_with_union_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01852_multiple_joins_with_union_join/query.sql b/parser/testdata/01852_multiple_joins_with_union_join/query.sql new file mode 100644 index 000000000..759f8a5e3 --- /dev/null +++ b/parser/testdata/01852_multiple_joins_with_union_join/query.sql @@ -0,0 +1,27 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS v1; +DROP TABLE IF EXISTS v2; + +CREATE TABLE v1 ( id Int32 ) ENGINE = MergeTree() ORDER BY id; +CREATE TABLE v2 ( value Int32 ) ENGINE = MergeTree() ORDER BY value; + +INSERT INTO v1 ( id ) VALUES (1); +INSERT INTO v2 ( value ) VALUES (1); + +SELECT * FROM v1 AS t1 +JOIN v1 AS t2 USING (id) +JOIN v2 AS n1 ON t1.id = n1.value +JOIN v2 AS n2 ON t1.id = n2.value; + +SELECT '---'; + +SELECT * FROM v1 AS t1 +JOIN v1 AS t2 USING (id) +CROSS JOIN v2 AS n1 +CROSS JOIN v2 AS n2; + +SELECT * FROM v1 AS t1 JOIN v1 AS t2 USING (id) JOIN v1 AS t3 USING (value); -- { serverError UNKNOWN_IDENTIFIER } + +DROP TABLE IF EXISTS v1; +DROP TABLE IF EXISTS v2; diff --git a/parser/testdata/01852_s2_get_neighbours/ast.json b/parser/testdata/01852_s2_get_neighbours/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01852_s2_get_neighbours/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01852_s2_get_neighbours/metadata.json b/parser/testdata/01852_s2_get_neighbours/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01852_s2_get_neighbours/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01852_s2_get_neighbours/query.sql b/parser/testdata/01852_s2_get_neighbours/query.sql new file mode 100644 index 000000000..89232c512 --- /dev/null +++ b/parser/testdata/01852_s2_get_neighbours/query.sql @@ -0,0 +1,4 @@ +-- Tags: no-fasttest +-- Tag no-fasttest: needs s2 + +select s2GetNeighbors(5074766849661468672); diff --git a/parser/testdata/01853_s2_cells_intersect/ast.json b/parser/testdata/01853_s2_cells_intersect/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01853_s2_cells_intersect/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01853_s2_cells_intersect/metadata.json b/parser/testdata/01853_s2_cells_intersect/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01853_s2_cells_intersect/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01853_s2_cells_intersect/query.sql b/parser/testdata/01853_s2_cells_intersect/query.sql new file mode 100644 index 000000000..5ab7e7aa9 --- /dev/null +++ b/parser/testdata/01853_s2_cells_intersect/query.sql @@ -0,0 +1,8 @@ +-- Tags: no-fasttest +-- Tag no-fasttest: needs s2 + +select s2CellsIntersect(9926595209846587392, 9926594385212866560); +select s2CellsIntersect(9926595209846587392, 9937259648002293760); + + +SELECT s2CellsIntersect(9926595209846587392, 9223372036854775806); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/01854_dictionary_range_hashed_min_max_attr/ast.json b/parser/testdata/01854_dictionary_range_hashed_min_max_attr/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01854_dictionary_range_hashed_min_max_attr/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01854_dictionary_range_hashed_min_max_attr/metadata.json b/parser/testdata/01854_dictionary_range_hashed_min_max_attr/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01854_dictionary_range_hashed_min_max_attr/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01854_dictionary_range_hashed_min_max_attr/query.sql b/parser/testdata/01854_dictionary_range_hashed_min_max_attr/query.sql new file mode 100644 index 000000000..0029971f0 --- /dev/null +++ b/parser/testdata/01854_dictionary_range_hashed_min_max_attr/query.sql @@ -0,0 +1,13 @@ +-- Tags: no-parallel + +DROP DICTIONARY IF EXISTS dict_01864; +CREATE DICTIONARY dict_01864 +( + `id` UInt64, + `value` String +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'does_not_exists')) +LIFETIME(MIN 0 MAX 1000) +LAYOUT(RANGE_HASHED()) +RANGE(MIN first MAX last) -- { serverError INCORRECT_DICTIONARY_DEFINITION } diff --git a/parser/testdata/01854_s2_cap_contains/ast.json b/parser/testdata/01854_s2_cap_contains/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01854_s2_cap_contains/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01854_s2_cap_contains/metadata.json b/parser/testdata/01854_s2_cap_contains/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01854_s2_cap_contains/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01854_s2_cap_contains/query.sql b/parser/testdata/01854_s2_cap_contains/query.sql new file mode 100644 index 000000000..43a9d8705 --- /dev/null +++ b/parser/testdata/01854_s2_cap_contains/query.sql @@ -0,0 +1,14 @@ +-- Tags: no-fasttest +-- Tag no-fasttest: needs s2 + +select s2CapContains(1157339245694594829, 1.0, 1157347770437378819); +select s2CapContains(1157339245694594829, 1.0, 1152921504606846977); +select s2CapContains(1157339245694594829, 3.14, 1157339245694594829); + +select s2CapContains(nan, 3.14, 1157339245694594829); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select s2CapContains(1157339245694594829, nan, 1157339245694594829); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select s2CapContains(1157339245694594829, 3.14, nan); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + + +select s2CapContains(toUInt64(-1), -1.0, toUInt64(-1)); -- { serverError BAD_ARGUMENTS } +select s2CapContains(toUInt64(-1), 9999.9999, toUInt64(-1)); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/01854_s2_cap_union/ast.json b/parser/testdata/01854_s2_cap_union/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01854_s2_cap_union/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01854_s2_cap_union/metadata.json b/parser/testdata/01854_s2_cap_union/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01854_s2_cap_union/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01854_s2_cap_union/query.sql b/parser/testdata/01854_s2_cap_union/query.sql new file mode 100644 index 000000000..8f8e20901 --- /dev/null +++ b/parser/testdata/01854_s2_cap_union/query.sql @@ -0,0 +1,12 @@ +-- Tags: no-fasttest +-- Tag no-fasttest: needs s2 + +select s2CapUnion(3814912406305146967, 1.0, 1157347770437378819, 1.0); +select s2CapUnion(1157339245694594829, -1.0, 1152921504606846977, -1.0); +select s2CapUnion(1157339245694594829, toFloat64(toUInt64(-1)), 1157339245694594829, toFloat64(toUInt64(-1))); + + +select s2CapUnion(nan, 3.14, 1157339245694594829, 3.14); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select s2CapUnion(1157339245694594829, nan, 1157339245694594829, 3.14); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select s2CapUnion(1157339245694594829, 3.14, nan, 3.14); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select s2CapUnion(1157339245694594829, 3.14, 1157339245694594829, nan); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } diff --git a/parser/testdata/01855_jit_comparison_constant_result/ast.json b/parser/testdata/01855_jit_comparison_constant_result/ast.json new file mode 100644 index 000000000..342a964f4 --- /dev/null +++ b/parser/testdata/01855_jit_comparison_constant_result/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001252354, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01855_jit_comparison_constant_result/metadata.json b/parser/testdata/01855_jit_comparison_constant_result/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01855_jit_comparison_constant_result/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01855_jit_comparison_constant_result/query.sql b/parser/testdata/01855_jit_comparison_constant_result/query.sql new file mode 100644 index 000000000..51cf9aa1d --- /dev/null +++ b/parser/testdata/01855_jit_comparison_constant_result/query.sql @@ -0,0 +1,36 @@ +SET compile_expressions = 1; +SET min_count_to_compile_expression = 0; + +SELECT 'ComparisionOperator column with same column'; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table (a UInt64) ENGINE = MergeTree() ORDER BY tuple(); +INSERT INTO test_table VALUES (1); + +SELECT test_table.a FROM test_table ORDER BY (test_table.a > test_table.a) + 1; +SELECT test_table.a FROM test_table ORDER BY (test_table.a >= test_table.a) + 1; + +SELECT test_table.a FROM test_table ORDER BY (test_table.a < test_table.a) + 1; +SELECT test_table.a FROM test_table ORDER BY (test_table.a <= test_table.a) + 1; + +SELECT test_table.a FROM test_table ORDER BY (test_table.a == test_table.a) + 1; +SELECT test_table.a FROM test_table ORDER BY (test_table.a != test_table.a) + 1; + +DROP TABLE test_table; + +SELECT 'ComparisionOperator column with alias on same column'; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table (a UInt64, b ALIAS a, c ALIAS b) ENGINE = MergeTree() ORDER BY tuple(); +INSERT INTO test_table VALUES (1); + +SELECT test_table.a FROM test_table ORDER BY (test_table.a > test_table.b) + 1 AND (test_table.a > test_table.c) + 1; +SELECT test_table.a FROM test_table ORDER BY (test_table.a >= test_table.b) + 1 AND (test_table.a >= test_table.c) + 1; + +SELECT test_table.a FROM test_table ORDER BY (test_table.a < test_table.b) + 1 AND (test_table.a < test_table.c) + 1; +SELECT test_table.a FROM test_table ORDER BY (test_table.a <= test_table.b) + 1 AND (test_table.a <= test_table.c) + 1; + +SELECT test_table.a FROM test_table ORDER BY (test_table.a == test_table.b) + 1 AND (test_table.a == test_table.c) + 1; +SELECT test_table.a FROM test_table ORDER BY (test_table.a != test_table.b) + 1 AND (test_table.a != test_table.c) + 1; + +DROP TABLE test_table; diff --git a/parser/testdata/01856_create_function/ast.json b/parser/testdata/01856_create_function/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01856_create_function/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01856_create_function/metadata.json b/parser/testdata/01856_create_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01856_create_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01856_create_function/query.sql b/parser/testdata/01856_create_function/query.sql new file mode 100644 index 000000000..8efb346d5 --- /dev/null +++ b/parser/testdata/01856_create_function/query.sql @@ -0,0 +1,14 @@ +-- Tags: no-parallel + +CREATE FUNCTION 01856_test_function_0 AS (a, b, c) -> a * b * c; +SELECT 01856_test_function_0(2, 3, 4); +SELECT isConstant(01856_test_function_0(1, 2, 3)); +DROP FUNCTION 01856_test_function_0; +CREATE FUNCTION 01856_test_function_1 AS (a, b) -> 01856_test_function_1(a, b) + 01856_test_function_1(a, b); --{serverError CANNOT_CREATE_RECURSIVE_FUNCTION} +CREATE FUNCTION cast AS a -> a + 1; --{serverError FUNCTION_ALREADY_EXISTS} +CREATE FUNCTION sum AS (a, b) -> a + b; --{serverError FUNCTION_ALREADY_EXISTS} +CREATE FUNCTION 01856_test_function_2 AS (a, b) -> a + b; +CREATE FUNCTION 01856_test_function_2 AS (a) -> a || '!!!'; --{serverError FUNCTION_ALREADY_EXISTS} +DROP FUNCTION 01856_test_function_2; +DROP FUNCTION unknown_function; -- {serverError UNKNOWN_FUNCTION} +DROP FUNCTION CAST; -- {serverError CANNOT_DROP_FUNCTION} diff --git a/parser/testdata/01860_Distributed__shard_num_GROUP_BY/ast.json b/parser/testdata/01860_Distributed__shard_num_GROUP_BY/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01860_Distributed__shard_num_GROUP_BY/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01860_Distributed__shard_num_GROUP_BY/metadata.json b/parser/testdata/01860_Distributed__shard_num_GROUP_BY/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01860_Distributed__shard_num_GROUP_BY/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01860_Distributed__shard_num_GROUP_BY/query.sql b/parser/testdata/01860_Distributed__shard_num_GROUP_BY/query.sql new file mode 100644 index 000000000..5dfdb9482 --- /dev/null +++ b/parser/testdata/01860_Distributed__shard_num_GROUP_BY/query.sql @@ -0,0 +1,16 @@ +-- Tags: shard + +-- GROUP BY _shard_num +SELECT _shard_num, count() FROM remote('127.0.0.{1,2}', system.one) GROUP BY _shard_num ORDER BY _shard_num; +SELECT _shard_num s, count() FROM remote('127.0.0.{1,2}', system.one) GROUP BY _shard_num ORDER BY _shard_num; + +SELECT _shard_num + 1, count() FROM remote('127.0.0.{1,2}', system.one) GROUP BY _shard_num + 1 ORDER BY _shard_num + 1; +SELECT _shard_num + 1 s, count() FROM remote('127.0.0.{1,2}', system.one) GROUP BY _shard_num + 1 ORDER BY _shard_num + 1; + +SELECT _shard_num + dummy, count() FROM remote('127.0.0.{1,2}', system.one) GROUP BY _shard_num + dummy ORDER BY _shard_num + dummy; +SELECT _shard_num + dummy s, count() FROM remote('127.0.0.{1,2}', system.one) GROUP BY _shard_num + dummy ORDER BY _shard_num + dummy; + +SELECT _shard_num FROM remote('127.0.0.{1,2}', system.one) ORDER BY _shard_num; +SELECT _shard_num s FROM remote('127.0.0.{1,2}', system.one) ORDER BY _shard_num; + +SELECT _shard_num, count() FROM remote('127.0.0.{1,2}', system.one) GROUP BY _shard_num order by _shard_num; diff --git a/parser/testdata/01861_explain_pipeline/ast.json b/parser/testdata/01861_explain_pipeline/ast.json new file mode 100644 index 000000000..cd55c9f57 --- /dev/null +++ b/parser/testdata/01861_explain_pipeline/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001133463, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/01861_explain_pipeline/metadata.json b/parser/testdata/01861_explain_pipeline/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01861_explain_pipeline/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01861_explain_pipeline/query.sql b/parser/testdata/01861_explain_pipeline/query.sql new file mode 100644 index 000000000..cd42e4bc5 --- /dev/null +++ b/parser/testdata/01861_explain_pipeline/query.sql @@ -0,0 +1,11 @@ +DROP TABLE IF EXISTS test; +CREATE TABLE test(a Int, b Int) Engine=ReplacingMergeTree order by a SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +set optimize_on_insert = 0; +INSERT INTO test select number, number from numbers(5); +INSERT INTO test select number, number from numbers(5,2); +set max_threads =1; +explain pipeline select * from test final SETTINGS enable_vertical_final = 0; +select * from test final; +set max_threads =2; +explain pipeline select * from test final SETTINGS enable_vertical_final = 0; +DROP TABLE test; diff --git a/parser/testdata/01865_aggregator_overflow_row/ast.json b/parser/testdata/01865_aggregator_overflow_row/ast.json new file mode 100644 index 000000000..a2f024f1b --- /dev/null +++ b/parser/testdata/01865_aggregator_overflow_row/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function uniqCombined (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001333204, + "rows_read": 7, + "bytes_read": 266 + } +} diff --git a/parser/testdata/01865_aggregator_overflow_row/metadata.json b/parser/testdata/01865_aggregator_overflow_row/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01865_aggregator_overflow_row/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01865_aggregator_overflow_row/query.sql b/parser/testdata/01865_aggregator_overflow_row/query.sql new file mode 100644 index 000000000..269b6fdfe --- /dev/null +++ b/parser/testdata/01865_aggregator_overflow_row/query.sql @@ -0,0 +1,21 @@ +SELECT uniqCombined(number) +FROM numbers(10000) +GROUP BY number + WITH TOTALS +ORDER BY number DESC +LIMIT 10 +SETTINGS + /* force aggregates serialization to trigger the issue with */ + max_bytes_before_external_group_by=1, + max_bytes_ratio_before_external_group_by=0, + /* overflow row: */ + max_rows_to_group_by=10000000000, + group_by_overflow_mode='any', + totals_mode='before_having', + /* this is to account memory under 4MB (for max_bytes_before_external_group_by) to use less rows */ + max_untracked_memory=0, + group_by_two_level_threshold=10000, + /* explicitly */ + max_block_size=1000, + max_threads=1 +; diff --git a/parser/testdata/01866_aggregate_function_interval_length_sum/ast.json b/parser/testdata/01866_aggregate_function_interval_length_sum/ast.json new file mode 100644 index 000000000..597881b4c --- /dev/null +++ b/parser/testdata/01866_aggregate_function_interval_length_sum/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery interval (children 1)" + }, + { + "explain": " Identifier interval" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001122605, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/01866_aggregate_function_interval_length_sum/metadata.json b/parser/testdata/01866_aggregate_function_interval_length_sum/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01866_aggregate_function_interval_length_sum/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01866_aggregate_function_interval_length_sum/query.sql b/parser/testdata/01866_aggregate_function_interval_length_sum/query.sql new file mode 100644 index 000000000..4da308453 --- /dev/null +++ b/parser/testdata/01866_aggregate_function_interval_length_sum/query.sql @@ -0,0 +1,26 @@ +DROP TABLE IF EXISTS interval; +DROP TABLE IF EXISTS fl_interval; +DROP TABLE IF EXISTS dt_interval; +DROP TABLE IF EXISTS date_interval; + +CREATE TABLE interval ( `id` String, `start` Int64, `end` Int64 ) ENGINE = MergeTree ORDER BY start; +INSERT INTO interval VALUES ('a', 1, 3), ('a', 1, 3), ('a', 2, 4), ('a', 1, 1), ('a', 5, 6), ('a', 5, 7), ('b', 10, 12), ('b', 13, 19), ('b', 14, 16), ('c', -1, 1), ('c', -2, -1); + +CREATE TABLE fl_interval ( `id` String, `start` Float, `end` Float ) ENGINE = MergeTree ORDER BY start; +INSERT INTO fl_interval VALUES ('a', 1.1, 3.2), ('a', 1.5, 3.6), ('a', 4.0, 5.0); + +CREATE TABLE dt_interval ( `id` String, `start` DateTime, `end` DateTime ) ENGINE = MergeTree ORDER BY start; +INSERT INTO dt_interval VALUES ('a', '2020-01-01 02:11:22', '2020-01-01 03:12:31'), ('a', '2020-01-01 01:12:30', '2020-01-01 02:50:11'); + +CREATE TABLE date_interval ( `id` String, `start` Date, `end` Date ) ENGINE = MergeTree ORDER BY start; +INSERT INTO date_interval VALUES ('a', '2020-01-01', '2020-01-04'), ('a', '2020-01-03', '2020-01-08 02:50:11'); + +SELECT id, intervalLengthSum(start, end), toTypeName(intervalLengthSum(start, end)) FROM interval GROUP BY id ORDER BY id; +SELECT id, 3.4 < intervalLengthSum(start, end) AND intervalLengthSum(start, end) < 3.6, toTypeName(intervalLengthSum(start, end)) FROM fl_interval GROUP BY id ORDER BY id; +SELECT id, intervalLengthSum(start, end), toTypeName(intervalLengthSum(start, end)) FROM dt_interval GROUP BY id ORDER BY id; +SELECT id, intervalLengthSum(start, end), toTypeName(intervalLengthSum(start, end)) FROM date_interval GROUP BY id ORDER BY id; + +DROP TABLE interval; +DROP TABLE fl_interval; +DROP TABLE dt_interval; +DROP TABLE date_interval; diff --git a/parser/testdata/01866_bit_positions_to_array/ast.json b/parser/testdata/01866_bit_positions_to_array/ast.json new file mode 100644 index 000000000..890885b81 --- /dev/null +++ b/parser/testdata/01866_bit_positions_to_array/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'Int8'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001237675, + "rows_read": 5, + "bytes_read": 175 + } +} diff --git a/parser/testdata/01866_bit_positions_to_array/metadata.json b/parser/testdata/01866_bit_positions_to_array/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01866_bit_positions_to_array/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01866_bit_positions_to_array/query.sql b/parser/testdata/01866_bit_positions_to_array/query.sql new file mode 100644 index 000000000..49c947175 --- /dev/null +++ b/parser/testdata/01866_bit_positions_to_array/query.sql @@ -0,0 +1,55 @@ +SELECT 'Int8'; +SELECT toInt8(0), bitPositionsToArray(toInt8(0)); +SELECT toInt8(1), bitPositionsToArray(toInt8(1)); +SELECT toInt8(-1), bitPositionsToArray(toInt8(-1)); +SELECT toInt8(127), bitPositionsToArray(toInt8(127)); +SELECT toInt8(128), bitPositionsToArray(toInt8(128)); + +SELECT 'Int16'; +SELECT toInt16(0), bitPositionsToArray(toInt16(0)); +SELECT toInt16(1), bitPositionsToArray(toInt16(1)); +SELECT toInt16(-1), bitPositionsToArray(toInt16(-1)); +select toInt16(32765), bitPositionsToArray(toInt16(32765)); +select toInt16(32768), bitPositionsToArray(toInt16(32768)); + +SELECT 'Int32'; +SELECT toInt32(0), bitPositionsToArray(toInt32(0)); +SELECT toInt32(1), bitPositionsToArray(toInt32(1)); + +SELECT 'Int64'; +SELECT toInt64(0), bitPositionsToArray(toInt64(0)); +SELECT toInt64(1), bitPositionsToArray(toInt64(1)); + +SELECT 'Int128'; +SELECT toInt128(0), bitPositionsToArray(toInt128(0)); +SELECT toInt128(1), bitPositionsToArray(toInt128(1)); + +SELECT 'Int256'; +SELECT toInt256(0), bitPositionsToArray(toInt256(0)); +SELECT toInt256(1), bitPositionsToArray(toInt256(1)); + +SELECT 'UInt8'; +SELECT toUInt8(0), bitPositionsToArray(toUInt8(0)); +SELECT toUInt8(1), bitPositionsToArray(toUInt8(1)); +SELECT toUInt8(128), bitPositionsToArray(toUInt8(128)); + +SELECT 'UInt16'; +SELECT toUInt16(0), bitPositionsToArray(toUInt16(0)); +SELECT toUInt16(1), bitPositionsToArray(toUInt16(1)); + +SELECT 'UInt32'; +SELECT toUInt32(0), bitPositionsToArray(toUInt32(0)); +SELECT toUInt32(1), bitPositionsToArray(toUInt32(1)); + +SELECT 'UInt64'; +SELECT toUInt64(0), bitPositionsToArray(toUInt64(0)); +SELECT toUInt64(1), bitPositionsToArray(toUInt64(1)); + +SELECT 'UInt128'; +SELECT toUInt128(0), bitPositionsToArray(toUInt128(0)); +SELECT toUInt128(1), bitPositionsToArray(toUInt128(1)); +SELECT toUInt128(-1), bitPositionsToArray(toUInt128(1)); + +SELECT 'UInt256'; +SELECT toUInt256(0), bitPositionsToArray(toUInt256(0)); +SELECT toUInt256(1), bitPositionsToArray(toUInt256(1)); diff --git a/parser/testdata/01866_datetime64_cmp_with_constant/ast.json b/parser/testdata/01866_datetime64_cmp_with_constant/ast.json new file mode 100644 index 000000000..08e72cb50 --- /dev/null +++ b/parser/testdata/01866_datetime64_cmp_with_constant/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery dt64test (children 1)" + }, + { + "explain": " Identifier dt64test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001140454, + "rows_read": 2, + "bytes_read": 69 + } +} diff --git a/parser/testdata/01866_datetime64_cmp_with_constant/metadata.json b/parser/testdata/01866_datetime64_cmp_with_constant/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01866_datetime64_cmp_with_constant/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01866_datetime64_cmp_with_constant/query.sql b/parser/testdata/01866_datetime64_cmp_with_constant/query.sql new file mode 100644 index 000000000..15edf8b2f --- /dev/null +++ b/parser/testdata/01866_datetime64_cmp_with_constant/query.sql @@ -0,0 +1,42 @@ +CREATE TABLE dt64test +( + `dt64_column` DateTime64(3), + `dt_column` DateTime DEFAULT toDateTime(dt64_column) +) +ENGINE = MergeTree +PARTITION BY toYYYYMM(dt64_column) +ORDER BY dt64_column; + +INSERT INTO dt64test (`dt64_column`) VALUES ('2020-01-13 13:37:00'); + +SELECT 'dt64 < const dt' FROM dt64test WHERE dt64_column < toDateTime('2020-01-13 13:37:00'); +SELECT 'dt64 < dt' FROM dt64test WHERE dt64_column < materialize(toDateTime('2020-01-13 13:37:00')); +SELECT 'dt < const dt64' FROM dt64test WHERE dt_column < toDateTime64('2020-01-13 13:37:00', 3); +SELECT 'dt < dt64' FROM dt64test WHERE dt_column < materialize(toDateTime64('2020-01-13 13:37:00', 3)); + +SELECT 'dt64 <= const dt' FROM dt64test WHERE dt64_column <= toDateTime('2020-01-13 13:37:00'); +SELECT 'dt64 <= dt' FROM dt64test WHERE dt64_column <= materialize(toDateTime('2020-01-13 13:37:00')); +SELECT 'dt <= const dt64' FROM dt64test WHERE dt_column <= toDateTime64('2020-01-13 13:37:00', 3); +SELECT 'dt <= dt64' FROM dt64test WHERE dt_column <= materialize(toDateTime64('2020-01-13 13:37:00', 3)); + +SELECT 'dt64 = const dt' FROM dt64test WHERE dt64_column = toDateTime('2020-01-13 13:37:00'); +SELECT 'dt64 = dt' FROM dt64test WHERE dt64_column = materialize(toDateTime('2020-01-13 13:37:00')); +SELECT 'dt = const dt64' FROM dt64test WHERE dt_column = toDateTime64('2020-01-13 13:37:00', 3); +SELECT 'dt = dt64' FROM dt64test WHERE dt_column = materialize(toDateTime64('2020-01-13 13:37:00', 3)); + +SELECT 'dt64 >= const dt' FROM dt64test WHERE dt64_column >= toDateTime('2020-01-13 13:37:00'); +SELECT 'dt64 >= dt' FROM dt64test WHERE dt64_column >= materialize(toDateTime('2020-01-13 13:37:00')); +SELECT 'dt >= const dt64' FROM dt64test WHERE dt_column >= toDateTime64('2020-01-13 13:37:00', 3); +SELECT 'dt >= dt64' FROM dt64test WHERE dt_column >= materialize(toDateTime64('2020-01-13 13:37:00', 3)); + +SELECT 'dt64 > const dt' FROM dt64test WHERE dt64_column > toDateTime('2020-01-13 13:37:00'); +SELECT 'dt64 > dt' FROM dt64test WHERE dt64_column > materialize(toDateTime('2020-01-13 13:37:00')); +SELECT 'dt > const dt64' FROM dt64test WHERE dt_column > toDateTime64('2020-01-13 13:37:00', 3); +SELECT 'dt > dt64' FROM dt64test WHERE dt_column > materialize(toDateTime64('2020-01-13 13:37:00', 3)); + +SELECT 'dt64 != const dt' FROM dt64test WHERE dt64_column != toDateTime('2020-01-13 13:37:00'); +SELECT 'dt64 != dt' FROM dt64test WHERE dt64_column != materialize(toDateTime('2020-01-13 13:37:00')); +SELECT 'dt != const dt64' FROM dt64test WHERE dt_column != toDateTime64('2020-01-13 13:37:00', 3); +SELECT 'dt != dt64' FROM dt64test WHERE dt_column != materialize(toDateTime64('2020-01-13 13:37:00', 3)); + +DROP TABLE dt64test; diff --git a/parser/testdata/01866_split_by_regexp/ast.json b/parser/testdata/01866_split_by_regexp/ast.json new file mode 100644 index 000000000..2f020185f --- /dev/null +++ b/parser/testdata/01866_split_by_regexp/ast.json @@ -0,0 +1,79 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function splitByRegexp (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '\\\\d+'" + }, + { + "explain": " Identifier x" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayJoin (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_['a1ba5ba8b', 'a11ba5689ba891011b']" + } + ], + + "rows": 19, + + "statistics": + { + "elapsed": 0.001378584, + "rows_read": 19, + "bytes_read": 814 + } +} diff --git a/parser/testdata/01866_split_by_regexp/metadata.json b/parser/testdata/01866_split_by_regexp/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01866_split_by_regexp/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01866_split_by_regexp/query.sql b/parser/testdata/01866_split_by_regexp/query.sql new file mode 100644 index 000000000..bc25d3e10 --- /dev/null +++ b/parser/testdata/01866_split_by_regexp/query.sql @@ -0,0 +1,25 @@ +select splitByRegexp('\\d+', x) from (select arrayJoin(['a1ba5ba8b', 'a11ba5689ba891011b']) x); +select splitByRegexp('', 'abcde'); +select splitByRegexp('<[^<>]*>', x) from (select arrayJoin(['<h1>hello<h2>world</h2></h1>', 'gbye<split>bug']) x); +select splitByRegexp('ab', ''); +select splitByRegexp('', ''); + +SELECT 'Test fallback of splitByRegexp to splitByChar if regexp is trivial'; +select splitByRegexp(' ', 'a b c'); +select splitByRegexp('-', 'a-b-c'); +select splitByRegexp('.', 'a.b.c'); +select splitByRegexp('^', 'a^b^c'); +select splitByRegexp('$', 'a$b$c'); +select splitByRegexp('+', 'a+b+c'); -- { serverError CANNOT_COMPILE_REGEXP } +select splitByRegexp('?', 'a?b?c'); -- { serverError CANNOT_COMPILE_REGEXP } +select splitByRegexp('(', 'a(b(c'); -- { serverError CANNOT_COMPILE_REGEXP } +select splitByRegexp(')', 'a)b)c'); +select splitByRegexp('[', 'a[b[c'); -- { serverError CANNOT_COMPILE_REGEXP } +select splitByRegexp(']', 'a]b]c'); +select splitByRegexp('{', 'a{b{c'); +select splitByRegexp('}', 'a}b}c'); +select splitByRegexp('|', 'a|b|c'); +select splitByRegexp('\\', 'a\\b\\c'); + +SELECT 'AST Fuzzer failure'; +SELECT splitByRegexp(materialize(1), NULL, 3) -- { serverError ILLEGAL_COLUMN } diff --git a/parser/testdata/01866_view_persist_settings/ast.json b/parser/testdata/01866_view_persist_settings/ast.json new file mode 100644 index 000000000..6fb73635a --- /dev/null +++ b/parser/testdata/01866_view_persist_settings/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery view_no_nulls (children 1)" + }, + { + "explain": " Identifier view_no_nulls" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001173167, + "rows_read": 2, + "bytes_read": 78 + } +} diff --git a/parser/testdata/01866_view_persist_settings/metadata.json b/parser/testdata/01866_view_persist_settings/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01866_view_persist_settings/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01866_view_persist_settings/query.sql b/parser/testdata/01866_view_persist_settings/query.sql new file mode 100644 index 000000000..1c300b8e2 --- /dev/null +++ b/parser/testdata/01866_view_persist_settings/query.sql @@ -0,0 +1,97 @@ +DROP TABLE IF EXISTS view_no_nulls; +DROP TABLE IF EXISTS view_no_nulls_set; +DROP TABLE IF EXISTS view_nulls_set; +DROP TABLE IF EXISTS view_nulls; + +SET join_use_nulls = 0; + +CREATE OR REPLACE VIEW view_no_nulls AS +SELECT * FROM ( SELECT number + 1 AS a, number + 11 AS b FROM numbers(2) ) AS t1 +FULL JOIN ( SELECT number + 2 AS a, number + 22 AS c FROM numbers(2) ) AS t2 +USING a ORDER BY a; + +CREATE OR REPLACE VIEW view_nulls_set AS +SELECT * FROM ( SELECT number + 1 AS a, number + 11 AS b FROM numbers(2) ) AS t1 +FULL JOIN ( SELECT number + 2 AS a, number + 22 AS c FROM numbers(2) ) AS t2 +USING a ORDER BY a +SETTINGS join_use_nulls = 1; + +SET join_use_nulls = 1; + +CREATE OR REPLACE VIEW view_nulls AS +SELECT * FROM ( SELECT number + 1 AS a, number + 11 AS b FROM numbers(2) ) AS t1 +FULL JOIN ( SELECT number + 2 AS a, number + 22 AS c FROM numbers(2) ) AS t2 +USING a ORDER BY a; + +CREATE OR REPLACE VIEW view_no_nulls_set AS +SELECT * FROM ( SELECT number + 1 AS a, number + 11 AS b FROM numbers(2) ) AS t1 +FULL JOIN ( SELECT number + 2 AS a, number + 22 AS c FROM numbers(2) ) AS t2 +USING a ORDER BY a +SETTINGS join_use_nulls = 0; + +SET join_use_nulls = 1; + +SELECT 'join_use_nulls = 1'; + +SELECT '-'; +SELECT * FROM view_no_nulls; -- { serverError INCORRECT_QUERY } +SELECT '-'; +SELECT * FROM view_no_nulls_set; +SELECT '-'; +SELECT * FROM view_nulls_set; +SELECT '-'; +SELECT * FROM view_nulls; + +SET join_use_nulls = 0; + +SELECT 'join_use_nulls = 0'; + +SELECT '-'; +SELECT * FROM view_no_nulls; +SELECT '-'; +SELECT * FROM view_no_nulls_set; +SELECT '-'; +SELECT * FROM view_nulls_set; +SELECT '-'; +SELECT * FROM view_nulls; + +DETACH TABLE view_no_nulls; +DETACH TABLE view_no_nulls_set; +DETACH TABLE view_nulls_set; +DETACH TABLE view_nulls; + +ATTACH TABLE view_no_nulls; +ATTACH TABLE view_no_nulls_set; +ATTACH TABLE view_nulls_set; +ATTACH TABLE view_nulls; + +SET join_use_nulls = 1; + +SELECT 'join_use_nulls = 1'; + +SELECT '-'; +SELECT * FROM view_no_nulls; -- { serverError INCORRECT_QUERY } +SELECT '-'; +SELECT * FROM view_no_nulls_set; +SELECT '-'; +SELECT * FROM view_nulls_set; +SELECT '-'; +SELECT * FROM view_nulls; + +SET join_use_nulls = 0; + +SELECT 'join_use_nulls = 0'; + +SELECT '-'; +SELECT * FROM view_no_nulls; +SELECT '-'; +SELECT * FROM view_no_nulls_set; +SELECT '-'; +SELECT * FROM view_nulls_set; +SELECT '-'; +SELECT * FROM view_nulls; + +DROP TABLE IF EXISTS view_no_nulls; +DROP TABLE IF EXISTS view_no_nulls_set; +DROP TABLE IF EXISTS view_nulls_set; +DROP TABLE IF EXISTS view_nulls; diff --git a/parser/testdata/01867_fix_storage_memory_mutation/ast.json b/parser/testdata/01867_fix_storage_memory_mutation/ast.json new file mode 100644 index 000000000..0ac85df49 --- /dev/null +++ b/parser/testdata/01867_fix_storage_memory_mutation/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery mem_test (children 1)" + }, + { + "explain": " Identifier mem_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001355444, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/01867_fix_storage_memory_mutation/metadata.json b/parser/testdata/01867_fix_storage_memory_mutation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01867_fix_storage_memory_mutation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01867_fix_storage_memory_mutation/query.sql b/parser/testdata/01867_fix_storage_memory_mutation/query.sql new file mode 100644 index 000000000..4cb80036d --- /dev/null +++ b/parser/testdata/01867_fix_storage_memory_mutation/query.sql @@ -0,0 +1,32 @@ +DROP TABLE IF EXISTS mem_test; + +CREATE TABLE mem_test +( + `a` Int64, + `b` Int64 +) +ENGINE = Memory; + +SET max_block_size = 3; + +INSERT INTO mem_test SELECT + number, + number +FROM numbers(100); + +ALTER TABLE mem_test + UPDATE a = 0 WHERE b = 99; +ALTER TABLE mem_test + UPDATE a = 0 WHERE b = 99; +ALTER TABLE mem_test + UPDATE a = 0 WHERE b = 99; +ALTER TABLE mem_test + UPDATE a = 0 WHERE b = 99; +ALTER TABLE mem_test + UPDATE a = 0 WHERE b = 99; + +SELECT * +FROM mem_test +FORMAT Null; + +DROP TABLE mem_test; diff --git a/parser/testdata/01867_support_datetime64_version_column/ast.json b/parser/testdata/01867_support_datetime64_version_column/ast.json new file mode 100644 index 000000000..cbf5b3151 --- /dev/null +++ b/parser/testdata/01867_support_datetime64_version_column/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery replacing (children 1)" + }, + { + "explain": " Identifier replacing" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001652956, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/01867_support_datetime64_version_column/metadata.json b/parser/testdata/01867_support_datetime64_version_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01867_support_datetime64_version_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01867_support_datetime64_version_column/query.sql b/parser/testdata/01867_support_datetime64_version_column/query.sql new file mode 100644 index 000000000..2f0ed1fdc --- /dev/null +++ b/parser/testdata/01867_support_datetime64_version_column/query.sql @@ -0,0 +1,16 @@ +drop table if exists replacing; +create table replacing( `A` Int64, `D` DateTime64(9, 'Asia/Istanbul'), `S` String) ENGINE = ReplacingMergeTree(D) ORDER BY A; + +insert into replacing values (1,'1970-01-01 08:25:46.300800000','a'); +insert into replacing values (2,'1970-01-01 08:25:46.300800002','b'); +insert into replacing values (1,'1970-01-01 08:25:46.300800003','a1'); +insert into replacing values (1,'1970-01-01 08:25:46.300800002','a2'); +insert into replacing values (2,'1970-01-01 08:25:46.300800004','b1'); +insert into replacing values (3,'1970-01-01 08:25:46.300800005','c1'); +insert into replacing values (2,'1970-01-01 08:25:46.300800005','a1'); + +OPTIMIZE TABLE replacing FINAL; + +select * from replacing; + +drop table replacing; diff --git a/parser/testdata/01868_order_by_fill_with_datetime64/ast.json b/parser/testdata/01868_order_by_fill_with_datetime64/ast.json new file mode 100644 index 000000000..372150c33 --- /dev/null +++ b/parser/testdata/01868_order_by_fill_with_datetime64/ast.json @@ -0,0 +1,151 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier n" + }, + { + "explain": " Identifier source" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDateTime64 (alias n) (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function multiply (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_1000" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Literal 'Asia\/Istanbul'" + }, + { + "explain": " Literal 'original' (alias source)" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 2)" + }, + { + "explain": " Identifier n" + }, + { + "explain": " Function toDateTime64 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1000" + }, + { + "explain": " Literal UInt64_3" + } + ], + + "rows": 43, + + "statistics": + { + "elapsed": 0.001191558, + "rows_read": 43, + "bytes_read": 1825 + } +} diff --git a/parser/testdata/01868_order_by_fill_with_datetime64/metadata.json b/parser/testdata/01868_order_by_fill_with_datetime64/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01868_order_by_fill_with_datetime64/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01868_order_by_fill_with_datetime64/query.sql b/parser/testdata/01868_order_by_fill_with_datetime64/query.sql new file mode 100644 index 000000000..3a49ef73d --- /dev/null +++ b/parser/testdata/01868_order_by_fill_with_datetime64/query.sql @@ -0,0 +1,2 @@ +SELECT n, source FROM (SELECT toDateTime64(number * 1000, 3,'Asia/Istanbul') AS n, 'original' AS source FROM numbers(10) WHERE (number % 3) = 1 ) ORDER BY n ASC WITH FILL STEP toDateTime64(1000, 3); +SELECT n, source FROM (SELECT toDateTime64(number * 1000, 9,'Asia/Istanbul') AS n, 'original' AS source FROM numbers(10) WHERE (number % 3) = 1 ) ORDER BY n ASC WITH FILL STEP toDateTime64(1000, 9); diff --git a/parser/testdata/01869_function_modulo_legacy/ast.json b/parser/testdata/01869_function_modulo_legacy/ast.json new file mode 100644 index 000000000..4494996d7 --- /dev/null +++ b/parser/testdata/01869_function_modulo_legacy/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function moduloLegacy (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_199" + }, + { + "explain": " Literal UInt64_200" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001201379, + "rows_read": 8, + "bytes_read": 299 + } +} diff --git a/parser/testdata/01869_function_modulo_legacy/metadata.json b/parser/testdata/01869_function_modulo_legacy/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01869_function_modulo_legacy/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01869_function_modulo_legacy/query.sql b/parser/testdata/01869_function_modulo_legacy/query.sql new file mode 100644 index 000000000..66e2edddd --- /dev/null +++ b/parser/testdata/01869_function_modulo_legacy/query.sql @@ -0,0 +1,2 @@ +SELECT moduloLegacy(199, 200); +SELECT moduloLegacy(-199, 200); diff --git a/parser/testdata/01869_reinterpret_as_fixed_string_uuid/ast.json b/parser/testdata/01869_reinterpret_as_fixed_string_uuid/ast.json new file mode 100644 index 000000000..d578ccad3 --- /dev/null +++ b/parser/testdata/01869_reinterpret_as_fixed_string_uuid/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function hex (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function reinterpretAsFixedString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toUUID (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '61f0c404-5cb3-11e7-907b-a6006ad3dba0'" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001433707, + "rows_read": 11, + "bytes_read": 480 + } +} diff --git a/parser/testdata/01869_reinterpret_as_fixed_string_uuid/metadata.json b/parser/testdata/01869_reinterpret_as_fixed_string_uuid/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01869_reinterpret_as_fixed_string_uuid/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01869_reinterpret_as_fixed_string_uuid/query.sql b/parser/testdata/01869_reinterpret_as_fixed_string_uuid/query.sql new file mode 100644 index 000000000..f6cf90c37 --- /dev/null +++ b/parser/testdata/01869_reinterpret_as_fixed_string_uuid/query.sql @@ -0,0 +1 @@ +SELECT hex(reinterpretAsFixedString(toUUID('61f0c404-5cb3-11e7-907b-a6006ad3dba0'))); diff --git a/parser/testdata/01870_buffer_flush/ast.json b/parser/testdata/01870_buffer_flush/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01870_buffer_flush/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01870_buffer_flush/metadata.json b/parser/testdata/01870_buffer_flush/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01870_buffer_flush/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01870_buffer_flush/query.sql b/parser/testdata/01870_buffer_flush/query.sql new file mode 100644 index 000000000..5e60bc7e0 --- /dev/null +++ b/parser/testdata/01870_buffer_flush/query.sql @@ -0,0 +1,28 @@ +-- Tags: no-parallel + +-- Check that Buffer will be flushed before shutdown +-- (via DETACH DATABASE) + +drop database if exists db_01870; +create database db_01870; + +-- Right now the order for shutdown is defined and it is: +-- (prefixes are important, to define the order) +-- - a_data_01870 +-- - z_buffer_01870 +-- so on DETACH DATABASE the following error will be printed: +-- +-- Destination table default.a_data_01870 doesn't exist. Block of data is discarded. +create table db_01870.a_data_01870 as system.numbers Engine=TinyLog(); +create table db_01870.z_buffer_01870 as system.numbers Engine=Buffer(db_01870, a_data_01870, 1, + 100, 100, /* time */ + 100, 100, /* rows */ + 100, 1e6 /* bytes */ +); +insert into db_01870.z_buffer_01870 select * from system.numbers limit 5; +select count() from db_01870.a_data_01870; +detach database db_01870; +attach database db_01870; +select count() from db_01870.a_data_01870; + +drop database db_01870; diff --git a/parser/testdata/01870_modulo_partition_key/ast.json b/parser/testdata/01870_modulo_partition_key/ast.json new file mode 100644 index 000000000..0df9be4dc --- /dev/null +++ b/parser/testdata/01870_modulo_partition_key/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001273109, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01870_modulo_partition_key/metadata.json b/parser/testdata/01870_modulo_partition_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01870_modulo_partition_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01870_modulo_partition_key/query.sql b/parser/testdata/01870_modulo_partition_key/query.sql new file mode 100644 index 000000000..46cd09622 --- /dev/null +++ b/parser/testdata/01870_modulo_partition_key/query.sql @@ -0,0 +1,57 @@ +SET insert_keeper_max_retries=100; +SET insert_keeper_retry_max_backoff_ms=10; + +SELECT 'simple partition key:'; +DROP TABLE IF EXISTS table1 SYNC; +CREATE TABLE table1 (id Int64, v UInt64) +ENGINE = ReplicatedReplacingMergeTree('/clickhouse/tables/{database}/test_table12', '1', v) +PARTITION BY id % 200 ORDER BY id; +INSERT INTO table1 SELECT number-205, number FROM numbers(10); +INSERT INTO table1 SELECT number-205, number FROM numbers(400, 10); +SELECT toInt64(partition) as p FROM system.parts WHERE table='table1' and database=currentDatabase() ORDER BY p; + +select 'where id % 200 = +-2:'; +select id from table1 where id % 200 = 2 OR id % 200 = -2 order by id; +select 'where id % 200 > 0:'; +select id from table1 where id % 200 > 0 order by id; +select 'where id % 200 < 0:'; +select id from table1 where id % 200 < 0 order by id; + +SELECT 'tuple as partition key:'; +DROP TABLE IF EXISTS table2 SYNC; +CREATE TABLE table2 (id Int64, v UInt64) +ENGINE = MergeTree() +PARTITION BY (toInt32(id / 2) % 3, id % 200) ORDER BY id; +INSERT INTO table2 SELECT number-205, number FROM numbers(10); +INSERT INTO table2 SELECT number-205, number FROM numbers(400, 10); +SELECT partition as p FROM system.parts WHERE table='table2' and database=currentDatabase() ORDER BY p; + +SELECT 'recursive modulo partition key:'; +DROP TABLE IF EXISTS table3 SYNC; +CREATE TABLE table3 (id Int64, v UInt64) +ENGINE = MergeTree() +PARTITION BY (id % 200, (id % 200) % 10, toInt32(round((id % 200) / 2, 0))) ORDER BY id; +INSERT INTO table3 SELECT number-205, number FROM numbers(10); +INSERT INTO table3 SELECT number-205, number FROM numbers(400, 10); +SELECT partition as p FROM system.parts WHERE table='table3' and database=currentDatabase() ORDER BY p; + +DETACH TABLE table3; +ATTACH TABLE table3; +SELECT 'After detach:'; +SELECT partition as p FROM system.parts WHERE table='table3' and database=currentDatabase() ORDER BY p; + +SELECT 'Indexes:'; +DROP TABLE IF EXISTS table4 SYNC; +CREATE TABLE table4 (id Int64, v UInt64, s String, +INDEX a (id * 2, s) TYPE minmax GRANULARITY 3 +) ENGINE = MergeTree() PARTITION BY id % 10 ORDER BY v; +INSERT INTO table4 SELECT number, number, toString(number) FROM numbers(1000); +SELECT count() FROM table4 WHERE id % 10 = 7; + +SELECT 'comparison:'; +SELECT v, v-205 as vv, modulo(vv, 200), moduloLegacy(vv, 200) FROM table1 ORDER BY v; + +DROP TABLE table1 SYNC; +DROP TABLE table2 SYNC; +DROP TABLE table3 SYNC; +DROP TABLE table4 SYNC; diff --git a/parser/testdata/01871_merge_tree_compile_expressions/ast.json b/parser/testdata/01871_merge_tree_compile_expressions/ast.json new file mode 100644 index 000000000..eabfa3f1d --- /dev/null +++ b/parser/testdata/01871_merge_tree_compile_expressions/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery data_01875_1 (children 1)" + }, + { + "explain": " Identifier data_01875_1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001049639, + "rows_read": 2, + "bytes_read": 76 + } +} diff --git a/parser/testdata/01871_merge_tree_compile_expressions/metadata.json b/parser/testdata/01871_merge_tree_compile_expressions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01871_merge_tree_compile_expressions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01871_merge_tree_compile_expressions/query.sql b/parser/testdata/01871_merge_tree_compile_expressions/query.sql new file mode 100644 index 000000000..ea1e26b1f --- /dev/null +++ b/parser/testdata/01871_merge_tree_compile_expressions/query.sql @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS data_01875_1; +DROP TABLE IF EXISTS data_01875_2; +DROP TABLE IF EXISTS data_01875_3; + +SET compile_expressions=true; + +-- CREATE TABLE will use global profile with default min_count_to_compile_expression=3 +-- so retry 3 times +CREATE TABLE data_01875_1 Engine=MergeTree ORDER BY number PARTITION BY bitShiftRight(number, 8) + 1 AS SELECT * FROM numbers(16384); +CREATE TABLE data_01875_2 Engine=MergeTree ORDER BY number PARTITION BY bitShiftRight(number, 8) + 1 AS SELECT * FROM numbers(16384); +CREATE TABLE data_01875_3 Engine=MergeTree ORDER BY number PARTITION BY bitShiftRight(number, 8) + 1 AS SELECT * FROM numbers(16384); + +SELECT number FROM data_01875_3 WHERE number = 999; + +DROP TABLE data_01875_1; +DROP TABLE data_01875_2; +DROP TABLE data_01875_3; diff --git a/parser/testdata/01872_functions_to_subcolumns_analyzer/ast.json b/parser/testdata/01872_functions_to_subcolumns_analyzer/ast.json new file mode 100644 index 000000000..210caab5c --- /dev/null +++ b/parser/testdata/01872_functions_to_subcolumns_analyzer/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_func_to_subcolumns (children 1)" + }, + { + "explain": " Identifier t_func_to_subcolumns" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00121051, + "rows_read": 2, + "bytes_read": 92 + } +} diff --git a/parser/testdata/01872_functions_to_subcolumns_analyzer/metadata.json b/parser/testdata/01872_functions_to_subcolumns_analyzer/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01872_functions_to_subcolumns_analyzer/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01872_functions_to_subcolumns_analyzer/query.sql b/parser/testdata/01872_functions_to_subcolumns_analyzer/query.sql new file mode 100644 index 000000000..d1f28e93b --- /dev/null +++ b/parser/testdata/01872_functions_to_subcolumns_analyzer/query.sql @@ -0,0 +1,43 @@ +DROP TABLE IF EXISTS t_func_to_subcolumns; + +SET enable_analyzer = 1; +SET optimize_functions_to_subcolumns = 1; + +CREATE TABLE t_func_to_subcolumns (id UInt64, arr Array(UInt64), n Nullable(String), m Map(String, UInt64)) +ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO t_func_to_subcolumns VALUES (1, [1, 2, 3], 'abc', map('foo', 1, 'bar', 2)) (2, [], NULL, map()); + +SELECT id IS NULL, n IS NULL, n IS NOT NULL FROM t_func_to_subcolumns ORDER BY id; +EXPLAIN QUERY TREE dump_tree = 1, dump_ast = 1 SELECT id IS NULL, n IS NULL, n IS NOT NULL FROM t_func_to_subcolumns; + +SELECT length(arr), empty(arr), notEmpty(arr), empty(n) FROM t_func_to_subcolumns ORDER BY id; +EXPLAIN QUERY TREE dump_tree = 1, dump_ast = 1 SELECT length(arr), empty(arr), notEmpty(arr), empty(n) FROM t_func_to_subcolumns; + +SELECT mapKeys(m), mapValues(m) FROM t_func_to_subcolumns ORDER BY id; +EXPLAIN QUERY TREE dump_tree = 1, dump_ast = 1 SELECT mapKeys(m), mapValues(m) FROM t_func_to_subcolumns; + +SELECT count(n) FROM t_func_to_subcolumns; +EXPLAIN QUERY TREE dump_tree = 1, dump_ast = 1 SELECT count(n) FROM t_func_to_subcolumns; + +SELECT count(id) FROM t_func_to_subcolumns; +EXPLAIN QUERY TREE dump_tree = 1, dump_ast = 1 SELECT count(id) FROM t_func_to_subcolumns; + +SELECT id, left.n IS NULL, right.n IS NULL FROM t_func_to_subcolumns AS left +FULL JOIN (SELECT 1 AS id, 'qqq' AS n UNION ALL SELECT 3 AS id, 'www') AS right USING(id) +ORDER BY id; + +EXPLAIN QUERY TREE dump_tree = 1, dump_ast = 1 SELECT id, left.n IS NULL, right.n IS NULL FROM t_func_to_subcolumns AS left +FULL JOIN (SELECT 1 AS id, 'qqq' AS n UNION ALL SELECT 3 AS id, 'www') AS right USING(id); + +DROP TABLE t_func_to_subcolumns; + +DROP TABLE IF EXISTS t_tuple_null; + +CREATE TABLE t_tuple_null (t Tuple(null UInt32)) ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO t_tuple_null VALUES ((10)), ((20)); + +SELECT t IS NULL, t.null FROM t_tuple_null; + +DROP TABLE t_tuple_null; diff --git a/parser/testdata/01880_materialized_view_to_table_type_check/ast.json b/parser/testdata/01880_materialized_view_to_table_type_check/ast.json new file mode 100644 index 000000000..ffd45d0bd --- /dev/null +++ b/parser/testdata/01880_materialized_view_to_table_type_check/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_mv (children 1)" + }, + { + "explain": " Identifier test_mv" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001102907, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/01880_materialized_view_to_table_type_check/metadata.json b/parser/testdata/01880_materialized_view_to_table_type_check/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01880_materialized_view_to_table_type_check/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01880_materialized_view_to_table_type_check/query.sql b/parser/testdata/01880_materialized_view_to_table_type_check/query.sql new file mode 100644 index 000000000..768fda9cd --- /dev/null +++ b/parser/testdata/01880_materialized_view_to_table_type_check/query.sql @@ -0,0 +1,23 @@ +DROP TABLE IF EXISTS test_mv; +DROP TABLE IF EXISTS test; +DROP TABLE IF EXISTS test_input; + +CREATE TABLE test_input(id Int32) ENGINE=MergeTree() order by id; + +CREATE TABLE test(`id` Int32, `pv` AggregateFunction(sum, Int32)) ENGINE = AggregatingMergeTree() ORDER BY id; + +CREATE MATERIALIZED VIEW test_mv to test(`id` Int32, `pv` AggregateFunction(sum, Int32)) as SELECT id, sumState(1) as pv from test_input group by id; -- { serverError CANNOT_CONVERT_TYPE } + +INSERT INTO test_input SELECT toInt32(number % 1000) AS id FROM numbers(10); +select '----------test--------:'; +select * from test; + +create MATERIALIZED VIEW test_mv to test(`id` Int32, `pv` AggregateFunction(sum, Int32)) as SELECT id, sumState(toInt32(1)) as pv from test_input group by id; +INSERT INTO test_input SELECT toInt32(number % 1000) AS id FROM numbers(100,3); + +select '----------test--------:'; +select * from test; + +DROP TABLE test_mv; +DROP TABLE test; +DROP TABLE test_input; diff --git a/parser/testdata/01880_remote_ipv6/ast.json b/parser/testdata/01880_remote_ipv6/ast.json new file mode 100644 index 000000000..2cc6c7b95 --- /dev/null +++ b/parser/testdata/01880_remote_ipv6/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001614956, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01880_remote_ipv6/metadata.json b/parser/testdata/01880_remote_ipv6/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01880_remote_ipv6/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01880_remote_ipv6/query.sql b/parser/testdata/01880_remote_ipv6/query.sql new file mode 100644 index 000000000..0ec217898 --- /dev/null +++ b/parser/testdata/01880_remote_ipv6/query.sql @@ -0,0 +1,23 @@ +SET connections_with_failover_max_tries=0; + +SELECT * FROM remote('[::1]', system.one) FORMAT Null; +SELECT * FROM remote('[::1]:9000', system.one) FORMAT Null; + +SELECT * FROM remote('[::1', system.one) FORMAT Null; -- { serverError BAD_ARGUMENTS } +SELECT * FROM remote('::1]', system.one) FORMAT Null; -- { serverError BAD_ARGUMENTS } +SELECT * FROM remote('::1', system.one) FORMAT Null; -- { serverError BAD_ARGUMENTS } + +SELECT * FROM remote('[::1][::1]', system.one) FORMAT Null; -- { serverError BAD_ARGUMENTS } +SELECT * FROM remote('[::1][::1', system.one) FORMAT Null; -- { serverError BAD_ARGUMENTS } +SELECT * FROM remote('[::1]::1]', system.one) FORMAT Null; -- { serverError BAD_ARGUMENTS } + +SELECT * FROM remote('[::1]') FORMAT Null; +SELECT * FROM remote('[::1]:9000') FORMAT Null; + +SELECT * FROM remote('[::1') FORMAT Null; -- { serverError BAD_ARGUMENTS } +SELECT * FROM remote('::1]') FORMAT Null; -- { serverError BAD_ARGUMENTS } +SELECT * FROM remote('::1') FORMAT Null; -- { serverError BAD_ARGUMENTS } + +SELECT * FROM remote('[::1][::1]') FORMAT Null; -- { serverError BAD_ARGUMENTS } +SELECT * FROM remote('[::1][::1') FORMAT Null; -- { serverError BAD_ARGUMENTS } +SELECT * FROM remote('[::1]::1]') FORMAT Null; -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/01881_aggregate_functions_versioning/ast.json b/parser/testdata/01881_aggregate_functions_versioning/ast.json new file mode 100644 index 000000000..b99cc3302 --- /dev/null +++ b/parser/testdata/01881_aggregate_functions_versioning/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_table (children 1)" + }, + { + "explain": " Identifier test_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001461607, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/01881_aggregate_functions_versioning/metadata.json b/parser/testdata/01881_aggregate_functions_versioning/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01881_aggregate_functions_versioning/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01881_aggregate_functions_versioning/query.sql b/parser/testdata/01881_aggregate_functions_versioning/query.sql new file mode 100644 index 000000000..f5007f8ef --- /dev/null +++ b/parser/testdata/01881_aggregate_functions_versioning/query.sql @@ -0,0 +1,22 @@ +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + `col1` DateTime, + `col2` Int64, + `col3` AggregateFunction(sumMap, Tuple(Array(UInt8), Array(UInt8))) +) +ENGINE = AggregatingMergeTree() ORDER BY (col1, col2); + +SHOW CREATE TABLE test_table; + +-- regression from performance tests comparison script +DROP TABLE IF EXISTS test; +CREATE TABLE test +ENGINE = Null AS +WITH ( + SELECT arrayReduce('sumMapState', [(['foo'], arrayMap(x -> -0., ['foo']))]) + ) AS all_metrics +SELECT + (finalizeAggregation(arrayReduce('sumMapMergeState', [all_metrics])) AS metrics_tuple).1 AS metric_names, + metrics_tuple.2 AS metric_values +FROM system.one; diff --git a/parser/testdata/01881_create_as_tuple/ast.json b/parser/testdata/01881_create_as_tuple/ast.json new file mode 100644 index 000000000..b5986826c --- /dev/null +++ b/parser/testdata/01881_create_as_tuple/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_create_as_tuple (children 1)" + }, + { + "explain": " Identifier t_create_as_tuple" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001517305, + "rows_read": 2, + "bytes_read": 86 + } +} diff --git a/parser/testdata/01881_create_as_tuple/metadata.json b/parser/testdata/01881_create_as_tuple/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01881_create_as_tuple/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01881_create_as_tuple/query.sql b/parser/testdata/01881_create_as_tuple/query.sql new file mode 100644 index 000000000..cf370fe46 --- /dev/null +++ b/parser/testdata/01881_create_as_tuple/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS t_create_as_tuple; + +CREATE TABLE t_create_as_tuple ENGINE = MergeTree() +ORDER BY number AS +SELECT number, [('string',number)] AS array FROM numbers(3); + +SELECT * FROM t_create_as_tuple ORDER BY number; + +DETACH TABLE t_create_as_tuple; +ATTACH TABLE t_create_as_tuple; + +SELECT * FROM t_create_as_tuple ORDER BY number; + +DROP TABLE t_create_as_tuple; diff --git a/parser/testdata/01881_negate_formatting/ast.json b/parser/testdata/01881_negate_formatting/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01881_negate_formatting/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01881_negate_formatting/metadata.json b/parser/testdata/01881_negate_formatting/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01881_negate_formatting/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01881_negate_formatting/query.sql b/parser/testdata/01881_negate_formatting/query.sql new file mode 100644 index 000000000..c7e516876 --- /dev/null +++ b/parser/testdata/01881_negate_formatting/query.sql @@ -0,0 +1,7 @@ +-- { echo } +EXPLAIN SYNTAX SELECT -1; +EXPLAIN SYNTAX SELECT -(1); +EXPLAIN SYNTAX SELECT -(-(1)); +EXPLAIN SYNTAX SELECT -(-(-(1))); +EXPLAIN SYNTAX SELECT -(-(-1)); +EXPLAIN SYNTAX SELECT -(-toUInt64(-(1))); diff --git a/parser/testdata/01881_to_week_monotonic_fix/ast.json b/parser/testdata/01881_to_week_monotonic_fix/ast.json new file mode 100644 index 000000000..83452c8a3 --- /dev/null +++ b/parser/testdata/01881_to_week_monotonic_fix/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_tbl (children 1)" + }, + { + "explain": " Identifier test_tbl" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.0014483, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/01881_to_week_monotonic_fix/metadata.json b/parser/testdata/01881_to_week_monotonic_fix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01881_to_week_monotonic_fix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01881_to_week_monotonic_fix/query.sql b/parser/testdata/01881_to_week_monotonic_fix/query.sql new file mode 100644 index 000000000..c73a7909f --- /dev/null +++ b/parser/testdata/01881_to_week_monotonic_fix/query.sql @@ -0,0 +1,11 @@ +drop table if exists test_tbl; + +create table test_tbl (vend_nm String, ship_dt Date) engine MergeTree partition by toWeek(ship_dt) order by vend_nm; + +insert into test_tbl values('1', '2020-11-11'), ('1', '2021-01-01'); + +select * From test_tbl where ship_dt >= toDate('2020-11-01') and ship_dt <= toDate('2021-05-05') order by ship_dt; + +select * From test_tbl where ship_dt >= toDate('2020-01-01') and ship_dt <= toDate('2021-05-05') order by ship_dt; + +drop table test_tbl; diff --git a/parser/testdata/01881_total_bytes_storage_buffer/ast.json b/parser/testdata/01881_total_bytes_storage_buffer/ast.json new file mode 100644 index 000000000..bfd5151b3 --- /dev/null +++ b/parser/testdata/01881_total_bytes_storage_buffer/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_buffer_table (children 1)" + }, + { + "explain": " Identifier test_buffer_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.0011597, + "rows_read": 2, + "bytes_read": 86 + } +} diff --git a/parser/testdata/01881_total_bytes_storage_buffer/metadata.json b/parser/testdata/01881_total_bytes_storage_buffer/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01881_total_bytes_storage_buffer/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01881_total_bytes_storage_buffer/query.sql b/parser/testdata/01881_total_bytes_storage_buffer/query.sql new file mode 100644 index 000000000..1fd9ea56e --- /dev/null +++ b/parser/testdata/01881_total_bytes_storage_buffer/query.sql @@ -0,0 +1,23 @@ +DROP TABLE IF EXISTS test_buffer_table; + +CREATE TABLE test_buffer_table +( + `a` Int64 +) +ENGINE = Buffer('', '', 1, 1000000, 1000000, 1000000, 1000000, 1000000, 1000000); + +SELECT total_bytes FROM system.tables WHERE name = 'test_buffer_table' and database = currentDatabase(); + +INSERT INTO test_buffer_table SELECT number FROM numbers(1000); +SELECT total_bytes FROM system.tables WHERE name = 'test_buffer_table' and database = currentDatabase(); + +OPTIMIZE TABLE test_buffer_table; +SELECT total_bytes FROM system.tables WHERE name = 'test_buffer_table' and database = currentDatabase(); + +INSERT INTO test_buffer_table SELECT number FROM numbers(1000); +SELECT total_bytes FROM system.tables WHERE name = 'test_buffer_table' and database = currentDatabase(); + +OPTIMIZE TABLE test_buffer_table; +SELECT total_bytes FROM system.tables WHERE name = 'test_buffer_table' and database = currentDatabase(); + +DROP TABLE test_buffer_table; diff --git a/parser/testdata/01881_union_header_mismatch_bug/ast.json b/parser/testdata/01881_union_header_mismatch_bug/ast.json new file mode 100644 index 000000000..321b3e5ea --- /dev/null +++ b/parser/testdata/01881_union_header_mismatch_bug/ast.json @@ -0,0 +1,130 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal 'table' (alias table)" + }, + { + "explain": " Function toInt64 (alias rows) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Function toInt64 (alias elements) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_101" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal 'another table' (alias table)" + }, + { + "explain": " Function toInt64 (alias rows) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Function toInt64 (alias elements) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Function notEquals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function minus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier rows" + }, + { + "explain": " Identifier elements" + }, + { + "explain": " Literal UInt64_0" + } + ], + + "rows": 36, + + "statistics": + { + "elapsed": 0.001939005, + "rows_read": 36, + "bytes_read": 1552 + } +} diff --git a/parser/testdata/01881_union_header_mismatch_bug/metadata.json b/parser/testdata/01881_union_header_mismatch_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01881_union_header_mismatch_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01881_union_header_mismatch_bug/query.sql b/parser/testdata/01881_union_header_mismatch_bug/query.sql new file mode 100644 index 000000000..bf8749fb0 --- /dev/null +++ b/parser/testdata/01881_union_header_mismatch_bug/query.sql @@ -0,0 +1,35 @@ +select * from ( select 'table' as table, toInt64(10) as rows, toInt64(101) as elements union all select 'another table' as table, toInt64(0) as rows, toInt64(0) as elements ) where rows - elements <> 0; + +SELECT + label, + number +FROM +( + SELECT + 'a' AS label, + number + FROM + ( + SELECT number + FROM numbers(10) + ) + UNION ALL + SELECT + 'b' AS label, + number + FROM + ( + SELECT number + FROM numbers(10) + ) +) +WHERE number IN +( + SELECT number + FROM numbers(5) +) order by label, number; + +SELECT NULL FROM +(SELECT [1048575, NULL] AS ax, 2147483648 AS c) t1 ARRAY JOIN ax +INNER JOIN (SELECT NULL AS c) t2 USING (c); + diff --git a/parser/testdata/01882_check_max_parts_to_merge_at_once/ast.json b/parser/testdata/01882_check_max_parts_to_merge_at_once/ast.json new file mode 100644 index 000000000..6bc38b056 --- /dev/null +++ b/parser/testdata/01882_check_max_parts_to_merge_at_once/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery limited_merge_table (children 1)" + }, + { + "explain": " Identifier limited_merge_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001295912, + "rows_read": 2, + "bytes_read": 90 + } +} diff --git a/parser/testdata/01882_check_max_parts_to_merge_at_once/metadata.json b/parser/testdata/01882_check_max_parts_to_merge_at_once/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01882_check_max_parts_to_merge_at_once/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01882_check_max_parts_to_merge_at_once/query.sql b/parser/testdata/01882_check_max_parts_to_merge_at_once/query.sql new file mode 100644 index 000000000..f5325616a --- /dev/null +++ b/parser/testdata/01882_check_max_parts_to_merge_at_once/query.sql @@ -0,0 +1,32 @@ +DROP TABLE IF EXISTS limited_merge_table; + +SET max_threads = 1; +SET max_block_size = 1; +SET min_insert_block_size_rows = 1; + +CREATE TABLE limited_merge_table +( + key UInt64 +) +ENGINE = MergeTree() +ORDER BY key +SETTINGS max_parts_to_merge_at_once = 3; + +SYSTEM STOP MERGES limited_merge_table; + +INSERT INTO limited_merge_table SELECT number FROM numbers(100); + +SYSTEM START MERGES limited_merge_table; + +OPTIMIZE TABLE limited_merge_table FINAL; + +SYSTEM FLUSH LOGS part_log; + +SELECT COUNT() FROM limited_merge_table; + +-- final optimize FINAL will merge all parts, but all previous merges must merge <= 3 parts. +-- During concurrent run only one final merge can happen, thats why we have this `if`. +SELECT if(length(topK(2)(length(merged_from))) == 2, arrayMin(topK(2)(length(merged_from))) <= 3, 1) +FROM system.part_log WHERE table = 'limited_merge_table' and database = currentDatabase() and event_type = 'MergeParts'; + +DROP TABLE IF EXISTS limited_merge_table; diff --git a/parser/testdata/01882_scalar_subquery_exception/ast.json b/parser/testdata/01882_scalar_subquery_exception/ast.json new file mode 100644 index 000000000..ba3a11872 --- /dev/null +++ b/parser/testdata/01882_scalar_subquery_exception/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery nums_in_mem (children 1)" + }, + { + "explain": " Identifier nums_in_mem" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000966066, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/01882_scalar_subquery_exception/metadata.json b/parser/testdata/01882_scalar_subquery_exception/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01882_scalar_subquery_exception/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01882_scalar_subquery_exception/query.sql b/parser/testdata/01882_scalar_subquery_exception/query.sql new file mode 100644 index 000000000..b9f6f70f9 --- /dev/null +++ b/parser/testdata/01882_scalar_subquery_exception/query.sql @@ -0,0 +1,19 @@ +drop table if exists nums_in_mem; +drop table if exists nums_in_mem_dist; + +create table nums_in_mem(v UInt64) engine=Memory; +insert into nums_in_mem select * from system.numbers limit 1000000; + +create table nums_in_mem_dist as nums_in_mem engine=Distributed('test_shard_localhost', currentDatabase(), nums_in_mem); + +set prefer_localhost_replica = 0; +set max_rows_to_read = 100; + +select + count() + / + (select count() from nums_in_mem_dist where rand() > 0) +from system.one; -- { serverError TOO_MANY_ROWS } + +drop table nums_in_mem; +drop table nums_in_mem_dist; diff --git a/parser/testdata/01883_grouping_sets_crash/ast.json b/parser/testdata/01883_grouping_sets_crash/ast.json new file mode 100644 index 000000000..20cd21f0b --- /dev/null +++ b/parser/testdata/01883_grouping_sets_crash/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery grouping_sets (children 1)" + }, + { + "explain": " Identifier grouping_sets" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001274057, + "rows_read": 2, + "bytes_read": 78 + } +} diff --git a/parser/testdata/01883_grouping_sets_crash/metadata.json b/parser/testdata/01883_grouping_sets_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01883_grouping_sets_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01883_grouping_sets_crash/query.sql b/parser/testdata/01883_grouping_sets_crash/query.sql new file mode 100644 index 000000000..4cbd7c8f2 --- /dev/null +++ b/parser/testdata/01883_grouping_sets_crash/query.sql @@ -0,0 +1,127 @@ +DROP TABLE IF EXISTS grouping_sets; + +CREATE TABLE grouping_sets(fact_1_id Int32, fact_2_id Int32, fact_3_id Int32, fact_4_id Int32, sales_value Int32) ENGINE = Memory; + +INSERT INTO grouping_sets +SELECT + number % 2 + 1 AS fact_1_id, + number % 5 + 1 AS fact_2_id, + number % 10 + 1 AS fact_3_id, + number % 10 + 1 AS fact_4_id, + number % 100 AS sales_value +FROM system.numbers limit 1000; + +-- { echoOn } +SELECT + fact_3_id, + fact_4_id +FROM grouping_sets +GROUP BY + GROUPING SETS ( + ('wo\0ldworldwo\0ldworld'), + (fact_3_id, fact_4_id)) +ORDER BY + fact_3_id, fact_4_id; + +SELECT 'SECOND QUERY:'; + +SELECT + fact_3_id, + fact_4_id +FROM grouping_sets +GROUP BY + GROUPING SETS ( + (fact_1_id, fact_2_id), + ((-9223372036854775808, NULL, (tuple(1.), (tuple(1.), 1048576), 65535))), + ((tuple(3.4028234663852886e38), (tuple(1024), -2147483647), NULL)), + (fact_3_id, fact_4_id)) +ORDER BY + (NULL, ('256', (tuple(NULL), NULL), NULL, NULL), NULL) ASC, + fact_1_id DESC NULLS FIRST, + fact_2_id DESC NULLS FIRST, + fact_4_id ASC; + +SELECT 'THIRD QUERY:'; + +SELECT + extractAllGroups(NULL, 'worldworldworldwo\0ldworldworldworldwo\0ld'), + fact_2_id, + fact_3_id, + fact_4_id +FROM grouping_sets +GROUP BY + GROUPING SETS ( + (sales_value), + (fact_1_id, fact_2_id), + ('wo\0ldworldwo\0ldworld'), + (fact_3_id, fact_4_id)) +ORDER BY + fact_1_id DESC NULLS LAST, + fact_1_id DESC NULLS FIRST, + fact_2_id ASC, + fact_3_id DESC NULLS FIRST, + fact_4_id ASC; + +SELECT fact_3_id +FROM grouping_sets +GROUP BY + GROUPING SETS ((fact_3_id, fact_4_id)) +ORDER BY fact_3_id ASC; + +-- Following two queries were fuzzed +SELECT 'w\0\0ldworldwo\0l\0world' +FROM grouping_sets +GROUP BY + GROUPING SETS ( + ( fact_4_id), + ( NULL), + ( fact_3_id, fact_4_id)) +ORDER BY + NULL ASC, + NULL DESC NULLS FIRST, + fact_3_id ASC, + fact_3_id ASC NULLS LAST, + 'wo\0ldworldwo\0ldworld' ASC NULLS LAST, + 'w\0\0ldworldwo\0l\0world' DESC NULLS FIRST, + 'wo\0ldworldwo\0ldworld' ASC, + NULL ASC NULLS FIRST, + fact_4_id DESC NULLS LAST; + +SELECT fact_3_id +FROM grouping_sets +GROUP BY + GROUPING SETS ( + ( 'wo\0ldworldwo\0ldworldwo\0ldworldwo\0ldworldwo\0ldworldwo\0ldworldwo\0ldworldwo\0ldworld'), + ( NULL), + ( fact_4_id), + ( fact_3_id, fact_4_id)) +ORDER BY fact_3_id ASC NULLS FIRST; + +SELECT fact_3_id, fact_4_id, count() +FROM grouping_sets +GROUP BY + GROUPING SETS ( + ( fact_3_id, fact_4_id)) +ORDER BY fact_3_id, fact_4_id +SETTINGS optimize_aggregation_in_order=1; + +SELECT fact_3_id, fact_4_id, count() +FROM grouping_sets +GROUP BY + GROUPING SETS ( + fact_3_id, + fact_4_id) +ORDER BY fact_3_id, fact_4_id +SETTINGS optimize_aggregation_in_order=1; + +SELECT fact_3_id, fact_4_id, count() +FROM grouping_sets +GROUP BY + GROUPING SETS ( + ( fact_3_id ), + ( fact_3_id, fact_4_id)) +ORDER BY fact_3_id, fact_4_id +SETTINGS optimize_aggregation_in_order=1; + +-- { echoOff } +DROP TABLE IF EXISTS grouping_sets; diff --git a/parser/testdata/01883_subcolumns_distributed/ast.json b/parser/testdata/01883_subcolumns_distributed/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01883_subcolumns_distributed/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01883_subcolumns_distributed/metadata.json b/parser/testdata/01883_subcolumns_distributed/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01883_subcolumns_distributed/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01883_subcolumns_distributed/query.sql b/parser/testdata/01883_subcolumns_distributed/query.sql new file mode 100644 index 000000000..72730e796 --- /dev/null +++ b/parser/testdata/01883_subcolumns_distributed/query.sql @@ -0,0 +1,26 @@ +-- Tags: distributed + +DROP TABLE IF EXISTS t_subcolumns_local; +DROP TABLE IF EXISTS t_subcolumns_dist; + +CREATE TABLE t_subcolumns_local (arr Array(UInt32), n Nullable(String), t Tuple(s1 String, s2 String)) +ENGINE = MergeTree ORDER BY tuple(); + +CREATE TABLE t_subcolumns_dist AS t_subcolumns_local ENGINE = Distributed(test_cluster_two_shards, currentDatabase(), t_subcolumns_local); + +INSERT INTO t_subcolumns_local VALUES ([1, 2, 3], 'aaa', ('bbb', 'ccc')); + +SELECT arr.size0, n.null, t.s1, t.s2 FROM t_subcolumns_dist; + +DROP TABLE t_subcolumns_local; + +-- StripeLog doesn't support subcolumns. +CREATE TABLE t_subcolumns_local (arr Array(UInt32), n Nullable(String), t Tuple(s1 String, s2 String)) ENGINE = StripeLog; + +INSERT INTO t_subcolumns_local VALUES ([1, 2, 3], 'aaa', ('bbb', 'ccc')); + +SELECT arr.size0, n.null, t.s1, t.s2 FROM t_subcolumns_dist SETTINGS enable_analyzer=1; +SELECT arr.size0, n.null, t.s1, t.s2 FROM t_subcolumns_dist SETTINGS enable_analyzer=0; -- {serverError UNKNOWN_IDENTIFIER} + +DROP TABLE t_subcolumns_local; +DROP TABLE t_subcolumns_dist; diff --git a/parser/testdata/01883_with_grouping_sets/ast.json b/parser/testdata/01883_with_grouping_sets/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01883_with_grouping_sets/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01883_with_grouping_sets/metadata.json b/parser/testdata/01883_with_grouping_sets/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01883_with_grouping_sets/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01883_with_grouping_sets/query.sql b/parser/testdata/01883_with_grouping_sets/query.sql new file mode 100644 index 000000000..7247267fd --- /dev/null +++ b/parser/testdata/01883_with_grouping_sets/query.sql @@ -0,0 +1,69 @@ +-- Specific value doesn't matter, we just need it to be fixed, because it is a part of `EXPLAIN PIPELINE` output. +SET max_threads = 8; + +DROP TABLE IF EXISTS grouping_sets; + +CREATE TABLE grouping_sets(fact_1_id Int32, fact_2_id Int32, fact_3_id Int32, fact_4_id Int32, sales_value Int32) ENGINE = Memory; + +SELECT fact_1_id, fact_3_id, sum(sales_value), count() from grouping_sets GROUP BY GROUPING SETS(fact_1_id, fact_3_id) ORDER BY fact_1_id, fact_3_id; + +INSERT INTO grouping_sets +SELECT + number % 2 + 1 AS fact_1_id, + number % 5 + 1 AS fact_2_id, + number % 10 + 1 AS fact_3_id, + number % 10 + 1 AS fact_4_id, + number % 100 AS sales_value +FROM system.numbers limit 1000; + +EXPLAIN PIPELINE +SELECT fact_1_id, fact_2_id, fact_3_id, SUM(sales_value) AS sales_value from grouping_sets +GROUP BY GROUPING SETS ((fact_1_id, fact_2_id), (fact_1_id, fact_3_id)) +ORDER BY fact_1_id, fact_2_id, fact_3_id; + +SELECT fact_1_id, fact_2_id, fact_3_id, SUM(sales_value) AS sales_value from grouping_sets +GROUP BY GROUPING SETS ((fact_1_id, fact_2_id), (fact_1_id, fact_3_id)) +ORDER BY fact_1_id, fact_2_id, fact_3_id; + +SELECT fact_1_id, fact_2_id, fact_3_id, fact_4_id, SUM(sales_value) AS sales_value from grouping_sets +GROUP BY GROUPING SETS ((fact_1_id, fact_2_id), (fact_3_id, fact_4_id)) +ORDER BY fact_1_id, fact_2_id, fact_3_id, fact_4_id; + +SELECT fact_1_id, fact_2_id, fact_3_id, SUM(sales_value) AS sales_value from grouping_sets +GROUP BY GROUPING SETS ((fact_1_id, fact_2_id), (fact_3_id), ()) +ORDER BY fact_1_id, fact_2_id, fact_3_id; + +SELECT + fact_1_id, + fact_3_id, + SUM(sales_value) AS sales_value +FROM grouping_sets +GROUP BY grouping sets ((fact_1_id), (fact_1_id, fact_3_id)) WITH TOTALS +ORDER BY fact_1_id, fact_3_id; -- { serverError NOT_IMPLEMENTED } + +EXPLAIN SYNTAX SELECT + fact_1_id, + fact_3_id, + SUM(sales_value) AS sales_value +FROM grouping_sets +GROUP BY grouping sets (fact_1_id, (fact_1_id, fact_3_id)) WITH TOTALS +ORDER BY fact_1_id, fact_3_id; + +SELECT + fact_1_id, + fact_3_id, + SUM(sales_value) AS sales_value +FROM grouping_sets +GROUP BY grouping sets (fact_1_id, (fact_1_id, fact_3_id)) WITH TOTALS +ORDER BY fact_1_id, fact_3_id; -- { serverError NOT_IMPLEMENTED } + +DROP TABLE grouping_sets; + +EXPLAIN PIPELINE +SELECT SUM(number) as sum_value, count() AS count_value from numbers_mt(1000000) +GROUP BY GROUPING SETS ((number % 10), (number % 100)) +ORDER BY sum_value, count_value SETTINGS max_threads=3; + +SELECT SUM(number) as sum_value, count() AS count_value from numbers_mt(1000000) +GROUP BY GROUPING SETS ((number % 10), (number % 100)) +ORDER BY sum_value, count_value SETTINGS max_threads=3; diff --git a/parser/testdata/01888_bloom_filter_hasAny/ast.json b/parser/testdata/01888_bloom_filter_hasAny/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01888_bloom_filter_hasAny/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01888_bloom_filter_hasAny/metadata.json b/parser/testdata/01888_bloom_filter_hasAny/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01888_bloom_filter_hasAny/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01888_bloom_filter_hasAny/query.sql b/parser/testdata/01888_bloom_filter_hasAny/query.sql new file mode 100644 index 000000000..dcd0a1346 --- /dev/null +++ b/parser/testdata/01888_bloom_filter_hasAny/query.sql @@ -0,0 +1,40 @@ +CREATE TABLE bftest ( + k Int64, + y Array(Int64) DEFAULT x, + x Array(Int64), + index ix1(x) TYPE bloom_filter GRANULARITY 3 +) +Engine=MergeTree +ORDER BY k; + +INSERT INTO bftest (k, x) SELECT number, arrayMap(i->rand64()%565656, range(10)) FROM numbers(1000); + +-- index is not used, but query should still work +SELECT count() FROM bftest WHERE hasAny(x, materialize([1,2,3])) FORMAT Null; + +-- verify the expression in WHERE works on non-index col the same way as on index cols +SELECT count() FROM bftest WHERE hasAny(y, [NULL,-42]) FORMAT Null; +SELECT count() FROM bftest WHERE hasAny(y, [0,NULL]) FORMAT Null; +SELECT count() FROM bftest WHERE hasAny(y, [[123], -42]) FORMAT Null; -- { serverError NO_COMMON_TYPE } +SELECT count() FROM bftest WHERE hasAny(y, [toDecimal32(123, 3), 2]) FORMAT Null; -- different, doesn't fail + +SET force_data_skipping_indices='ix1'; +SELECT count() FROM bftest WHERE has (x, 42) or has(x, -42) FORMAT Null; +SELECT count() FROM bftest WHERE hasAny(x, [42,-42]) FORMAT Null; +SELECT count() FROM bftest WHERE hasAny(x, []) FORMAT Null; +SELECT count() FROM bftest WHERE hasAny(x, [1]) FORMAT Null; + +-- can't use bloom_filter with `hasAny` on non-constant arguments (just like `has`) +SELECT count() FROM bftest WHERE hasAny(x, [1,2,k]) FORMAT Null; -- { serverError INDEX_NOT_USED } + +-- NULLs are not Ok +SELECT count() FROM bftest WHERE hasAny(x, [NULL,-42]) FORMAT Null; -- { serverError INDEX_NOT_USED } +SELECT count() FROM bftest WHERE hasAny(x, [0,NULL]) FORMAT Null; -- { serverError INDEX_NOT_USED } + +-- non-compatible types +SELECT count() FROM bftest WHERE hasAny(x, [[123], -42]) FORMAT Null; -- { serverError NO_COMMON_TYPE } +SELECT count() FROM bftest WHERE hasAny(x, [toDecimal32(123, 3), 2]) FORMAT Null; -- { serverError INDEX_NOT_USED } + +-- Bug discovered by AST fuzzier (fixed, shouldn't crash). +SELECT 1 FROM bftest WHERE has(x, -0.) OR 0. FORMAT Null; +SELECT count() FROM bftest WHERE hasAny(x, [0, 1]) OR 0. FORMAT Null; diff --git a/parser/testdata/01888_read_int_safe/ast.json b/parser/testdata/01888_read_int_safe/ast.json new file mode 100644 index 000000000..3a1d6c071 --- /dev/null +++ b/parser/testdata/01888_read_int_safe/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toInt64 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '--1'" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001418252, + "rows_read": 7, + "bytes_read": 257 + } +} diff --git a/parser/testdata/01888_read_int_safe/metadata.json b/parser/testdata/01888_read_int_safe/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01888_read_int_safe/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01888_read_int_safe/query.sql b/parser/testdata/01888_read_int_safe/query.sql new file mode 100644 index 000000000..e70db497f --- /dev/null +++ b/parser/testdata/01888_read_int_safe/query.sql @@ -0,0 +1,10 @@ +select toInt64('--1'); -- { serverError CANNOT_PARSE_NUMBER } +select toInt64('+-1'); -- { serverError CANNOT_PARSE_NUMBER } +select toInt64('++1'); -- { serverError CANNOT_PARSE_NUMBER } +select toInt64('++'); -- { serverError CANNOT_PARSE_NUMBER } +select toInt64('+'); -- { serverError CANNOT_PARSE_NUMBER } +select toInt64('1+1'); -- { serverError CANNOT_PARSE_TEXT } +select toInt64('1-1'); -- { serverError CANNOT_PARSE_TEXT } +select toInt64(''); -- { serverError ATTEMPT_TO_READ_AFTER_EOF } +select toInt64('1'); +select toInt64('-1'); diff --git a/parser/testdata/01889_key_condition_function_chains/ast.json b/parser/testdata/01889_key_condition_function_chains/ast.json new file mode 100644 index 000000000..fbc6da95c --- /dev/null +++ b/parser/testdata/01889_key_condition_function_chains/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001386317, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01889_key_condition_function_chains/metadata.json b/parser/testdata/01889_key_condition_function_chains/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01889_key_condition_function_chains/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01889_key_condition_function_chains/query.sql b/parser/testdata/01889_key_condition_function_chains/query.sql new file mode 100644 index 000000000..5afececf1 --- /dev/null +++ b/parser/testdata/01889_key_condition_function_chains/query.sql @@ -0,0 +1,33 @@ +set force_primary_key=1; + +drop table if exists tab; +create table tab (t DateTime) engine = MergeTree order by toStartOfDay(t); +insert into tab values ('2020-02-02 01:01:01'); +select t from tab where t > '2020-01-01 01:01:01'; +with t as s select t from tab where s > '2020-01-01 01:01:01'; + +drop table if exists tab; +create table tab (t DateTime) engine = MergeTree order by toStartOfDay(t + 1); +insert into tab values ('2020-02-02 01:01:01'); +select t from tab where t + 1 > '2020-01-01 01:01:01'; +with t + 1 as s select t from tab where s > '2020-01-01 01:01:01'; + + +set force_primary_key = 0; +set force_index_by_date=1; + +drop table if exists tab; +create table tab (x Int32, y Int32) engine = MergeTree partition by x + y order by tuple(); +insert into tab values (1, 1), (2, 2); +select x, y from tab where (x + y) = 2; +with x + y as s select x, y from tab where s = 2; +-- with x as s select x, y from tab where s + y = 2; + +drop table if exists tab; +create table tab (x Int32, y Int32) engine = MergeTree partition by ((x + y) + 1) * 2 order by tuple(); +insert into tab values (1, 1), (2, 2); +select x, y from tab where (x + y) + 1 = 3; +-- with x + y as s select x, y from tab where s + 1 = 3; + +set force_index_by_date=0; +drop table if exists tab; diff --git a/parser/testdata/01889_sql_json_functions/ast.json b/parser/testdata/01889_sql_json_functions/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01889_sql_json_functions/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01889_sql_json_functions/metadata.json b/parser/testdata/01889_sql_json_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01889_sql_json_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01889_sql_json_functions/query.sql b/parser/testdata/01889_sql_json_functions/query.sql new file mode 100644 index 000000000..6e0852029 --- /dev/null +++ b/parser/testdata/01889_sql_json_functions/query.sql @@ -0,0 +1,90 @@ +-- Tags: no-fasttest + +-- { echo } +SELECT '--JSON_VALUE--'; +SELECT JSON_VALUE('{"hello":1}', '$'); -- root is a complex object => default value (empty string) +SELECT JSON_VALUE('{"hello":1}', '$.hello'); +SELECT JSON_VALUE('{"hello":1.2}', '$.hello'); +SELECT JSON_VALUE('{"hello":true}', '$.hello'); +SELECT JSON_VALUE('{"hello":"world"}', '$.hello'); +SELECT JSON_VALUE('{"hello":null}', '$.hello'); +SELECT JSON_VALUE('{"hello":["world","world2"]}', '$.hello'); +SELECT JSON_VALUE('{"hello":{"world":"!"}}', '$.hello'); +SELECT JSON_VALUE('{hello:world}', '$.hello'); -- invalid json => default value (empty string) +SELECT JSON_VALUE('', '$.hello'); +SELECT JSON_VALUE('{"foo foo":"bar"}', '$."foo foo"'); +SELECT JSON_VALUE('{"hello":"\\uD83C\\uDF3A \\uD83C\\uDF38 \\uD83C\\uDF37 Hello, World \\uD83C\\uDF37 \\uD83C\\uDF38 \\uD83C\\uDF3A"}', '$.hello'); +SELECT JSON_VALUE('{"a":"Hello \\"World\\" \\\\"}', '$.a'); +select JSON_VALUE('{"a":"\\n\\u0000"}', '$.a'); +select JSON_VALUE('{"a":"\\u263a"}', '$.a'); +select JSON_VALUE('{"hello":"world"}', '$.b') settings function_json_value_return_type_allow_nullable=true; +select JSON_VALUE('{"hello":{"world":"!"}}', '$.hello') settings function_json_value_return_type_allow_complex=true; +SELECT JSON_VALUE('{"hello":["world","world2"]}', '$.hello') settings function_json_value_return_type_allow_complex=true; +SELECT JSON_VALUE('{"1key":1}', '$.1key'); +SELECT JSON_VALUE('{"hello":1}', '$[hello]'); +SELECT JSON_VALUE('{"hello":1}', '$["hello"]'); +SELECT JSON_VALUE('{"hello":1}', '$[\'hello\']'); +SELECT JSON_VALUE('{"hello 1":1}', '$["hello 1"]'); +SELECT JSON_VALUE('{"1key":1}', '$..1key'); -- { serverError BAD_ARGUMENTS } +SELECT JSON_VALUE('{"1key":1}', '$1key'); -- { serverError BAD_ARGUMENTS } +SELECT JSON_VALUE('{"1key":1}', '$key'); -- { serverError BAD_ARGUMENTS } +SELECT JSON_VALUE('{"1key":1}', '$.[key]'); -- { serverError BAD_ARGUMENTS } + +SELECT '--JSON_QUERY--'; +SELECT JSON_QUERY('{"hello":1}', '$'); +SELECT JSON_QUERY('{"hello":1}', '$.hello'); +SELECT JSON_QUERY('{"hello":1.2}', '$.hello'); +SELECT JSON_QUERY('{"hello":true}', '$.hello'); +SELECT JSON_QUERY('{"hello":"world"}', '$.hello'); +SELECT JSON_QUERY('{"hello":null}', '$.hello'); +SELECT JSON_QUERY('{"hello":["world","world2"]}', '$.hello'); +SELECT JSON_QUERY('{"hello":{"world":"!"}}', '$.hello'); +SELECT JSON_QUERY( '{hello:{"world":"!"}}}', '$.hello'); -- invalid json => default value (empty string) +SELECT JSON_QUERY('', '$.hello'); +SELECT JSON_QUERY('{"array":[[0, 1, 2, 3, 4, 5], [0, -1, -2, -3, -4, -5]]}', '$.array[*][0 to 2, 4]'); +SELECT JSON_QUERY('{"1key":1}', '$.1key'); +SELECT JSON_QUERY('{"123":1}', '$.123'); +SELECT JSON_QUERY('{"123":{"123":1}}', '$.123.123'); +SELECT JSON_QUERY('{"123":{"abc":1}}', '$.123.abc'); +SELECT JSON_QUERY('{"abc":{"123":1}}', '$.abc.123'); +SELECT JSON_QUERY('{"123abc":{"123":1}}', '$.123abc.123'); +SELECT JSON_QUERY('{"abc123":{"123":1}}', '$.abc123.123'); +SELECT JSON_QUERY('{"123":1}', '$[123]'); +SELECT JSON_QUERY('{"123":["1"]}', '$.123[0]'); +SELECT JSON_QUERY('{"123abc":["1"]}', '$.123abc[0]'); +SELECT JSON_QUERY('{"123abc":[{"123":"1"}]}', '$.123abc[0].123'); +SELECT JSON_QUERY('{"hello":1}', '$[hello]'); +SELECT JSON_QUERY('{"hello":1}', '$["hello"]'); +SELECT JSON_QUERY('{"hello":1}', '$[\'hello\']'); +SELECT JSON_QUERY('{"hello 1":1}', '$["hello 1"]'); +SELECT JSON_QUERY('{"1key":1}', '$..1key'); -- { serverError BAD_ARGUMENTS } +SELECT JSON_QUERY('{"1key":1}', '$1key'); -- { serverError BAD_ARGUMENTS } +SELECT JSON_QUERY('{"1key":1}', '$key'); -- { serverError BAD_ARGUMENTS } +SELECT JSON_QUERY('{"1key":1}', '$.[key]'); -- { serverError BAD_ARGUMENTS } + +SELECT '--JSON_EXISTS--'; +SELECT JSON_EXISTS('{"hello":1}', '$'); +SELECT JSON_EXISTS('', '$'); +SELECT JSON_EXISTS('{}', '$'); +SELECT JSON_EXISTS('{"hello":1}', '$.hello'); +SELECT JSON_EXISTS('{"hello":1,"world":2}', '$.world'); +SELECT JSON_EXISTS('{"hello":{"world":1}}', '$.world'); +SELECT JSON_EXISTS('{"hello":{"world":1}}', '$.hello.world'); +SELECT JSON_EXISTS('{hello:world}', '$.hello'); -- invalid json => default value (zero integer) +SELECT JSON_EXISTS('', '$.hello'); +SELECT JSON_EXISTS('{"hello":["world"]}', '$.hello[*]'); +SELECT JSON_EXISTS('{"hello":["world"]}', '$.hello[0]'); +SELECT JSON_EXISTS('{"hello":["world"]}', '$.hello[1]'); +SELECT JSON_EXISTS('{"a":[{"b":1},{"c":2}]}', '$.a[*].b'); +SELECT JSON_EXISTS('{"a":[{"b":1},{"c":2}]}', '$.a[*].f'); +SELECT JSON_EXISTS('{"a":[[{"b":1}, {"g":1}],[{"h":1},{"y":1}]]}', '$.a[*][0].h'); + +SELECT '--MANY ROWS--'; +DROP TABLE IF EXISTS 01889_sql_json; +CREATE TABLE 01889_sql_json (id UInt8, json String) ENGINE = MergeTree ORDER BY id; +INSERT INTO 01889_sql_json(id, json) VALUES(0, '{"name":"Ivan","surname":"Ivanov","friends":["Vasily","Kostya","Artyom"]}'); +INSERT INTO 01889_sql_json(id, json) VALUES(1, '{"name":"Katya","surname":"Baltica","friends":["Tihon","Ernest","Innokentiy"]}'); +INSERT INTO 01889_sql_json(id, json) VALUES(2, '{"name":"Vitali","surname":"Brown","friends":["Katya","Anatoliy","Ivan","Oleg"]}'); +SELECT id, JSON_QUERY(json, '$.friends[0 to 2]') FROM 01889_sql_json ORDER BY id; +SELECT id, JSON_VALUE(json, '$.friends[0]') FROM 01889_sql_json ORDER BY id; +DROP TABLE 01889_sql_json; diff --git a/parser/testdata/01889_tokenize/ast.json b/parser/testdata/01889_tokenize/ast.json new file mode 100644 index 000000000..7abcd10d2 --- /dev/null +++ b/parser/testdata/01889_tokenize/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000955738, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01889_tokenize/metadata.json b/parser/testdata/01889_tokenize/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01889_tokenize/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01889_tokenize/query.sql b/parser/testdata/01889_tokenize/query.sql new file mode 100644 index 000000000..287e439d2 --- /dev/null +++ b/parser/testdata/01889_tokenize/query.sql @@ -0,0 +1,11 @@ +SET allow_experimental_nlp_functions = 1; + +SELECT splitByNonAlpha('It is quite a wonderful day, isn\'t it?'); +SELECT splitByNonAlpha('There is.... so much to learn!'); +SELECT splitByNonAlpha('22:00 email@tut.by'); +SELECT splitByNonAlpha('Токенизация каких-либо других языков?'); + +SELECT splitByWhitespace('It is quite a wonderful day, isn\'t it?'); +SELECT splitByWhitespace('There is.... so much to learn!'); +SELECT splitByWhitespace('22:00 email@tut.by'); +SELECT splitByWhitespace('Токенизация каких-либо других языков?'); diff --git a/parser/testdata/01890_cross_join_explain_crash/ast.json b/parser/testdata/01890_cross_join_explain_crash/ast.json new file mode 100644 index 000000000..313e77023 --- /dev/null +++ b/parser/testdata/01890_cross_join_explain_crash/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001617558, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01890_cross_join_explain_crash/metadata.json b/parser/testdata/01890_cross_join_explain_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01890_cross_join_explain_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01890_cross_join_explain_crash/query.sql b/parser/testdata/01890_cross_join_explain_crash/query.sql new file mode 100644 index 000000000..79aea3884 --- /dev/null +++ b/parser/testdata/01890_cross_join_explain_crash/query.sql @@ -0,0 +1,7 @@ +SET enable_analyzer = 1; +SET joined_subquery_requires_alias = 0; + +select * FROM (SELECT 1), (SELECT 1), (SELECT 1); +select * from (select 2), (select 1) as a, (select 1) as b; +select * from (select 1) as a, (select 2), (select 1) as b; +select * from (select 1) as a, (select 1) as b, (select 2); diff --git a/parser/testdata/01890_jit_aggregation_function_sum_long/ast.json b/parser/testdata/01890_jit_aggregation_function_sum_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01890_jit_aggregation_function_sum_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01890_jit_aggregation_function_sum_long/metadata.json b/parser/testdata/01890_jit_aggregation_function_sum_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01890_jit_aggregation_function_sum_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01890_jit_aggregation_function_sum_long/query.sql b/parser/testdata/01890_jit_aggregation_function_sum_long/query.sql new file mode 100644 index 000000000..d1b8f4073 --- /dev/null +++ b/parser/testdata/01890_jit_aggregation_function_sum_long/query.sql @@ -0,0 +1,121 @@ +-- Tags: long + +SET compile_aggregate_expressions = 1; +SET min_count_to_compile_aggregate_expression = 0; + +SELECT 'Test unsigned integer values'; + +DROP TABLE IF EXISTS test_table_unsigned_values; +CREATE TABLE test_table_unsigned_values +( + id UInt64, + + value1 UInt8, + value2 UInt16, + value3 UInt32, + value4 UInt64 +) ENGINE=TinyLog; + +INSERT INTO test_table_unsigned_values SELECT number % 3, number, number, number, number FROM system.numbers LIMIT 120; +SELECT id, sum(value1), sum(value2), sum(value3), sum(value4) FROM test_table_unsigned_values GROUP BY id ORDER BY id; +DROP TABLE test_table_unsigned_values; + +SELECT 'Test signed integer values'; + +DROP TABLE IF EXISTS test_table_signed_values; +CREATE TABLE test_table_signed_values +( + id UInt64, + + value1 Int8, + value2 Int16, + value3 Int32, + value4 Int64 +) ENGINE=TinyLog; + +INSERT INTO test_table_signed_values SELECT number % 3, number, number, number, number FROM system.numbers LIMIT 120; +SELECT id, sum(value1), sum(value2), sum(value3), sum(value4)FROM test_table_signed_values GROUP BY id ORDER BY id; +DROP TABLE test_table_signed_values; + +SELECT 'Test float values'; + +DROP TABLE IF EXISTS test_table_float_values; +CREATE TABLE test_table_float_values +( + id UInt64, + + value1 Float32, + value2 Float64 +) ENGINE=TinyLog; + +INSERT INTO test_table_float_values SELECT number % 3, number, number FROM system.numbers LIMIT 120; +SELECT id, sum(value1), sum(value2) FROM test_table_float_values GROUP BY id ORDER BY id; +DROP TABLE test_table_float_values; + +SELECT 'Test nullable unsigned integer values'; + +DROP TABLE IF EXISTS test_table_nullable_unsigned_values; +CREATE TABLE test_table_nullable_unsigned_values +( + id UInt64, + + value1 Nullable(UInt8), + value2 Nullable(UInt16), + value3 Nullable(UInt32), + value4 Nullable(UInt64) +) ENGINE=TinyLog; + +INSERT INTO test_table_nullable_unsigned_values SELECT number % 3, number, number, number, number FROM system.numbers LIMIT 120; +SELECT id, sum(value1), sum(value2), sum(value3), sum(value4) FROM test_table_nullable_unsigned_values GROUP BY id ORDER BY id; +DROP TABLE test_table_nullable_unsigned_values; + +SELECT 'Test nullable signed integer values'; + +DROP TABLE IF EXISTS test_table_nullable_signed_values; +CREATE TABLE test_table_nullable_signed_values +( + id UInt64, + + value1 Nullable(Int8), + value2 Nullable(Int16), + value3 Nullable(Int32), + value4 Nullable(Int64) +) ENGINE=TinyLog; + +INSERT INTO test_table_nullable_signed_values SELECT number % 3, number, number, number, number FROM system.numbers LIMIT 120; +SELECT id, sum(value1), sum(value2), sum(value3), sum(value4) FROM test_table_nullable_signed_values GROUP BY id ORDER BY id; +DROP TABLE test_table_nullable_signed_values; + +SELECT 'Test nullable float values'; + +DROP TABLE IF EXISTS test_table_nullable_float_values; +CREATE TABLE test_table_nullable_float_values +( + id UInt64, + + value1 Nullable(Float32), + value2 Nullable(Float64) +) ENGINE=TinyLog; + +INSERT INTO test_table_nullable_float_values SELECT number % 3, number, number FROM system.numbers LIMIT 120; +SELECT id, sum(value1), sum(value2) FROM test_table_nullable_float_values GROUP BY id ORDER BY id; +DROP TABLE test_table_nullable_float_values; + +SELECT 'Test null specifics'; + +DROP TABLE IF EXISTS test_table_null_specifics; +CREATE TABLE test_table_null_specifics +( + id UInt64, + + value1 Nullable(UInt64), + value2 Nullable(UInt64), + value3 Nullable(UInt64) +) ENGINE=TinyLog; + +INSERT INTO test_table_null_specifics VALUES (0, 1, 1, NULL); +INSERT INTO test_table_null_specifics VALUES (0, 2, NULL, NULL); +INSERT INTO test_table_null_specifics VALUES (0, 3, 3, NULL); + +SELECT id, sum(value1), sum(value2), sum(value3) FROM test_table_null_specifics GROUP BY id ORDER BY id; +DROP TABLE IF EXISTS test_table_null_specifics; diff --git a/parser/testdata/01890_state_of_state/ast.json b/parser/testdata/01890_state_of_state/ast.json new file mode 100644 index 000000000..3a7352d38 --- /dev/null +++ b/parser/testdata/01890_state_of_state/ast.json @@ -0,0 +1,94 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function uniqExact (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function uniqState (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_100" + } + ], + + "rows": 24, + + "statistics": + { + "elapsed": 0.001767679, + "rows_read": 24, + "bytes_read": 1046 + } +} diff --git a/parser/testdata/01890_state_of_state/metadata.json b/parser/testdata/01890_state_of_state/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01890_state_of_state/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01890_state_of_state/query.sql b/parser/testdata/01890_state_of_state/query.sql new file mode 100644 index 000000000..bec3ddad3 --- /dev/null +++ b/parser/testdata/01890_state_of_state/query.sql @@ -0,0 +1,22 @@ +SELECT uniqExact(x) FROM (SELECT uniqState(number) AS x FROM numbers(100)); +SELECT uniqExact(x) FROM (SELECT uniqState(number) AS x FROM numbers(1000)); +SELECT hex(toString(uniqExactState(x))) FROM (SELECT uniqState(number) AS x FROM numbers(1000)); +SELECT hex(toString(uniqExactState(x))) FROM (SELECT quantileState(number) AS x FROM numbers(1000)); +SELECT toTypeName(uniqExactState(x)) FROM (SELECT quantileState(number) AS x FROM numbers(1000)); +SELECT toTypeName(initializeAggregation('uniqExact', 0)); +SELECT toTypeName(initializeAggregation('uniqExactState', 0)); +SELECT toTypeName(initializeAggregation('uniqExactState', initializeAggregation('quantileState', 0))); +SELECT hex(toString(initializeAggregation('quantileState', 0))); +SELECT toTypeName(initializeAggregation('sumState', initializeAggregation('quantileState', 0))); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toTypeName(initializeAggregation('anyState', initializeAggregation('quantileState', 0))); +SELECT toTypeName(initializeAggregation('anyState', initializeAggregation('uniqState', 0))); +SELECT hex(toString(initializeAggregation('uniqState', initializeAggregation('uniqState', 0)))); +SELECT hex(toString(initializeAggregation('uniqState', initializeAggregation('quantileState', 0)))); +SELECT hex(toString(initializeAggregation('anyLastState', initializeAggregation('uniqState', 0)))); +SELECT hex(toString(initializeAggregation('anyState', initializeAggregation('uniqState', 0)))); +SELECT hex(toString(initializeAggregation('maxState', initializeAggregation('uniqState', 0)))); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT hex(toString(initializeAggregation('uniqExactState', initializeAggregation('uniqState', 0)))); +SELECT finalizeAggregation(initializeAggregation('uniqExactState', initializeAggregation('uniqState', 0))); +SELECT toTypeName(quantileState(x)) FROM (SELECT uniqState(number) AS x FROM numbers(1000)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT hex(toString(quantileState(x))) FROM (SELECT uniqState(number) AS x FROM numbers(1000)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT hex(toString(anyState(x))), hex(toString(any(x))) FROM (SELECT uniqState(number) AS x FROM numbers(1000)) FORMAT Vertical; diff --git a/parser/testdata/01890_stem/ast.json b/parser/testdata/01890_stem/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01890_stem/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01890_stem/metadata.json b/parser/testdata/01890_stem/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01890_stem/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01890_stem/query.sql b/parser/testdata/01890_stem/query.sql new file mode 100644 index 000000000..8fe41d225 --- /dev/null +++ b/parser/testdata/01890_stem/query.sql @@ -0,0 +1,28 @@ +-- Tags: no-fasttest +-- Tag no-fasttest: depends on libstemmer_c + +SET allow_experimental_nlp_functions = 1; + +SELECT stem('en', 'given'); +SELECT stem('en', 'combinatorial'); +SELECT stem('en', 'collection'); +SELECT stem('en', 'possibility'); +SELECT stem('en', 'studied'); +SELECT stem('en', 'commonplace'); +SELECT stem('en', 'packing'); + +SELECT stem('ru', 'комбинаторной'); +SELECT stem('ru', 'получила'); +SELECT stem('ru', 'ограничена'); +SELECT stem('ru', 'конечной'); +SELECT stem('ru', 'максимальной'); +SELECT stem('ru', 'суммарный'); +SELECT stem('ru', 'стоимостью'); + +SELECT stem('fr', 'remplissage'); +SELECT stem('fr', 'valeur'); +SELECT stem('fr', 'maximiser'); +SELECT stem('fr', 'dépasser'); +SELECT stem('fr', 'intensivement'); +SELECT stem('fr', 'étudié'); +SELECT stem('fr', 'peuvent'); diff --git a/parser/testdata/01891_echo/ast.json b/parser/testdata/01891_echo/ast.json new file mode 100644 index 000000000..a63406f7a --- /dev/null +++ b/parser/testdata/01891_echo/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001756502, + "rows_read": 5, + "bytes_read": 177 + } +} diff --git a/parser/testdata/01891_echo/metadata.json b/parser/testdata/01891_echo/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01891_echo/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01891_echo/query.sql b/parser/testdata/01891_echo/query.sql new file mode 100644 index 000000000..fc46b64dc --- /dev/null +++ b/parser/testdata/01891_echo/query.sql @@ -0,0 +1,7 @@ +select 1; +-- { echo } +select 1; +-- { echoOff } +select 2; +-- { echoOn } +select 2; diff --git a/parser/testdata/01891_jit_aggregation_function_any_long/ast.json b/parser/testdata/01891_jit_aggregation_function_any_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01891_jit_aggregation_function_any_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01891_jit_aggregation_function_any_long/metadata.json b/parser/testdata/01891_jit_aggregation_function_any_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01891_jit_aggregation_function_any_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01891_jit_aggregation_function_any_long/query.sql b/parser/testdata/01891_jit_aggregation_function_any_long/query.sql new file mode 100644 index 000000000..124353e2a --- /dev/null +++ b/parser/testdata/01891_jit_aggregation_function_any_long/query.sql @@ -0,0 +1,121 @@ +-- Tags: long, log-engine + +SET compile_aggregate_expressions = 1; +SET min_count_to_compile_aggregate_expression = 0; + +SELECT 'Test unsigned integer values'; + +DROP TABLE IF EXISTS test_table_unsigned_values; +CREATE TABLE test_table_unsigned_values +( + id UInt64, + + value1 UInt8, + value2 UInt16, + value3 UInt32, + value4 UInt64 +) ENGINE=TinyLog; + +INSERT INTO test_table_unsigned_values SELECT number % 3, number, number, number, number FROM system.numbers LIMIT 120; +SELECT id, any(value1), any(value2), any(value3), any(value4) FROM test_table_unsigned_values GROUP BY id ORDER BY id; +DROP TABLE test_table_unsigned_values; + +SELECT 'Test signed integer values'; + +DROP TABLE IF EXISTS test_table_signed_values; +CREATE TABLE test_table_signed_values +( + id UInt64, + + value1 Int8, + value2 Int16, + value3 Int32, + value4 Int64 +) ENGINE=TinyLog; + +INSERT INTO test_table_signed_values SELECT number % 3, number, number, number, number FROM system.numbers LIMIT 120; +SELECT id, any(value1), any(value2), any(value3), any(value4) FROM test_table_signed_values GROUP BY id ORDER BY id; +DROP TABLE test_table_signed_values; + +SELECT 'Test float values'; + +DROP TABLE IF EXISTS test_table_float_values; +CREATE TABLE test_table_float_values +( + id UInt64, + + value1 Float32, + value2 Float64 +) ENGINE=TinyLog; + +INSERT INTO test_table_float_values SELECT number % 3, number, number FROM system.numbers LIMIT 120; +SELECT id, any(value1), any(value2) FROM test_table_float_values GROUP BY id ORDER BY id; +DROP TABLE test_table_float_values; + +SELECT 'Test nullable unsigned integer values'; + +DROP TABLE IF EXISTS test_table_nullable_unsigned_values; +CREATE TABLE test_table_nullable_unsigned_values +( + id UInt64, + + value1 Nullable(UInt8), + value2 Nullable(UInt16), + value3 Nullable(UInt32), + value4 Nullable(UInt64) +) ENGINE=TinyLog; + +INSERT INTO test_table_nullable_unsigned_values SELECT number % 3, number, number, number, number FROM system.numbers LIMIT 120; +SELECT id, any(value1), any(value2), any(value3), any(value4) FROM test_table_nullable_unsigned_values GROUP BY id ORDER BY id; +DROP TABLE test_table_nullable_unsigned_values; + +SELECT 'Test nullable signed integer values'; + +DROP TABLE IF EXISTS test_table_nullable_signed_values; +CREATE TABLE test_table_nullable_signed_values +( + id UInt64, + + value1 Nullable(Int8), + value2 Nullable(Int16), + value3 Nullable(Int32), + value4 Nullable(Int64) +) ENGINE=TinyLog; + +INSERT INTO test_table_nullable_signed_values SELECT number % 3, number, number, number, number FROM system.numbers LIMIT 120; +SELECT id, any(value1), any(value2), any(value3), any(value4) FROM test_table_nullable_signed_values GROUP BY id ORDER BY id; +DROP TABLE test_table_nullable_signed_values; + +SELECT 'Test nullable float values'; + +DROP TABLE IF EXISTS test_table_nullable_float_values; +CREATE TABLE test_table_nullable_float_values +( + id UInt64, + + value1 Nullable(Float32), + value2 Nullable(Float64) +) ENGINE=TinyLog; + +INSERT INTO test_table_nullable_float_values SELECT number % 3, number, number FROM system.numbers LIMIT 120; +SELECT id, any(value1), any(value2) FROM test_table_nullable_float_values GROUP BY id ORDER BY id; +DROP TABLE test_table_nullable_float_values; + +SELECT 'Test null specifics'; + +DROP TABLE IF EXISTS test_table_null_specifics; +CREATE TABLE test_table_null_specifics +( + id UInt64, + + value1 Nullable(UInt64), + value2 Nullable(UInt64), + value3 Nullable(UInt64) +) ENGINE=TinyLog; + +INSERT INTO test_table_null_specifics VALUES (0, 1, 1, NULL); +INSERT INTO test_table_null_specifics VALUES (0, 2, NULL, NULL); +INSERT INTO test_table_null_specifics VALUES (0, 3, 3, NULL); + +SELECT id, any(value1), any(value2), any(value3) FROM test_table_null_specifics GROUP BY id ORDER BY id; +DROP TABLE IF EXISTS test_table_null_specifics; diff --git a/parser/testdata/01891_not_in_partition_prune/ast.json b/parser/testdata/01891_not_in_partition_prune/ast.json new file mode 100644 index 000000000..2ccd93d07 --- /dev/null +++ b/parser/testdata/01891_not_in_partition_prune/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test1 (children 1)" + }, + { + "explain": " Identifier test1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001338796, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/01891_not_in_partition_prune/metadata.json b/parser/testdata/01891_not_in_partition_prune/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01891_not_in_partition_prune/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01891_not_in_partition_prune/query.sql b/parser/testdata/01891_not_in_partition_prune/query.sql new file mode 100644 index 000000000..5bf90fdd6 --- /dev/null +++ b/parser/testdata/01891_not_in_partition_prune/query.sql @@ -0,0 +1,25 @@ +drop table if exists test1; + +create table test1 (i int, j int) engine MergeTree partition by i order by tuple() settings index_granularity = 1; + +insert into test1 select number, number + 100 from numbers(10); +select count() from test1 where i not in (1,2,3); +set max_rows_to_read = 5; +select * from test1 where i not in (1,2,3,4,5) order by i; + +drop table test1; + +drop table if exists t1; +drop table if exists t2; + +create table t1 (date Date, a Float64, b String) Engine=MergeTree ORDER BY date; +create table t2 (date Date, a Float64, b String) Engine=MergeTree ORDER BY date; + +insert into t1(a, b) values (1, 'one'), (2, 'two'); +insert into t2(a, b) values (2, 'two'), (3, 'three'); + +select date, a, b from t1 where (date, a, b) NOT IN (select date,a,b from t2); +select date, a, b from t2 where (date, a, b) NOT IN (select date,a,b from t1); + +drop table t1; +drop table t2; diff --git a/parser/testdata/01891_not_like_partition_prune/ast.json b/parser/testdata/01891_not_like_partition_prune/ast.json new file mode 100644 index 000000000..3ca0426db --- /dev/null +++ b/parser/testdata/01891_not_like_partition_prune/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00149458, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/01891_not_like_partition_prune/metadata.json b/parser/testdata/01891_not_like_partition_prune/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01891_not_like_partition_prune/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01891_not_like_partition_prune/query.sql b/parser/testdata/01891_not_like_partition_prune/query.sql new file mode 100644 index 000000000..5346a7f08 --- /dev/null +++ b/parser/testdata/01891_not_like_partition_prune/query.sql @@ -0,0 +1,9 @@ +drop table if exists test; + +create table test (a String) Engine MergeTree order by a partition by a; +insert into test values('1'), ('1.1'), ('1.2'), ('1.12'); + +select * from test where a like '1%1' order by a; +select * from test where a not like '1%1' order by a; +select * from test where a not like '1%2' order by a; +drop table test; diff --git a/parser/testdata/01891_partition_by_uuid/ast.json b/parser/testdata/01891_partition_by_uuid/ast.json new file mode 100644 index 000000000..c4aebe413 --- /dev/null +++ b/parser/testdata/01891_partition_by_uuid/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tab (children 1)" + }, + { + "explain": " Identifier tab" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001440558, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/01891_partition_by_uuid/metadata.json b/parser/testdata/01891_partition_by_uuid/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01891_partition_by_uuid/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01891_partition_by_uuid/query.sql b/parser/testdata/01891_partition_by_uuid/query.sql new file mode 100644 index 000000000..19abbb2eb --- /dev/null +++ b/parser/testdata/01891_partition_by_uuid/query.sql @@ -0,0 +1,7 @@ +drop table if exists tab; +create table tab (id UUID, value UInt32) engine = MergeTree PARTITION BY id order by tuple(); +insert into tab values ('61f0c404-5cb3-11e7-907b-a6006ad3dba0', 1), ('61f0c404-5cb3-11e7-907b-a6006ad3dba0', 2); +-- Here we check that partition id for UUID partition key did not change. +-- Different result means Backward Incompatible Change. Old partitions will not be accepted by new server. +select partition_id from system.parts where table = 'tab' and database = currentDatabase(); +drop table if exists tab; diff --git a/parser/testdata/01891_partition_hash/ast.json b/parser/testdata/01891_partition_hash/ast.json new file mode 100644 index 000000000..3153575f0 --- /dev/null +++ b/parser/testdata/01891_partition_hash/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tab (children 1)" + }, + { + "explain": " Identifier tab" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001715206, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/01891_partition_hash/metadata.json b/parser/testdata/01891_partition_hash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01891_partition_hash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01891_partition_hash/query.sql b/parser/testdata/01891_partition_hash/query.sql new file mode 100644 index 000000000..894594dd4 --- /dev/null +++ b/parser/testdata/01891_partition_hash/query.sql @@ -0,0 +1,32 @@ +DROP TABLE IF EXISTS tab; +CREATE TABLE tab ( + i8 Int8, + i16 Int16, + i32 Int32, + i64 Int64, + i128 Int128, + i256 Int256, + u8 UInt8, + u16 UInt16, + u32 UInt32, + u64 UInt64, + u128 UInt128, + u256 UInt256, + id UUID, + s String, + fs FixedString(33), + a Array(UInt8), + t Tuple(UInt16, UInt32), + d Date, + dt DateTime('Asia/Istanbul'), + dt64 DateTime64(3, 'Asia/Istanbul'), + dec128 Decimal128(3), + dec256 Decimal256(4), + lc LowCardinality(String)) +engine = MergeTree PARTITION BY (i8, i16, i32, i64, i128, i256, u8, u16, u32, u64, u128, u256, id, s, fs, a, t, d, dt, dt64, dec128, dec256, lc) ORDER BY tuple(); +INSERT INTO tab VALUES (-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, '61f0c404-5cb3-11e7-907b-a6006ad3dba0', 'a', 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', [1, 2, 3], (-1, -2), '2020-01-01', '2020-01-01 01:01:01', '2020-01-01 01:01:01', '123.456', '78.9101', 'a'); +INSERT INTO tab VALUES (123, 12345, 1234567890, 1234567890000000000, 123456789000000000000000000000000000000, 123456789000000000000000000000000000000000000000000000000000000000000000000000, 123, 12345, 1234567890, 1234567890000000000, 123456789000000000000000000000000000000, 123456789000000000000000000000000000000000000000000000000000000000000000000000, '61f0c404-5cb3-11e7-907b-a6006ad3dba0', 'a', 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', [1, 2, 3], (-1, -2), '2020-01-01', '2020-01-01 01:01:01', '2020-01-01 01:01:01', '123.456', '78.9101', 'a'); +-- Here we check that partition id did not change. +-- Different result means Backward Incompatible Change. Old partitions will not be accepted by new server. +SELECT partition_id FROM system.parts WHERE table = 'tab' AND database = currentDatabase(); +DROP TABLE IF EXISTS tab; diff --git a/parser/testdata/01891_partition_hash_no_long_int/ast.json b/parser/testdata/01891_partition_hash_no_long_int/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01891_partition_hash_no_long_int/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01891_partition_hash_no_long_int/metadata.json b/parser/testdata/01891_partition_hash_no_long_int/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01891_partition_hash_no_long_int/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01891_partition_hash_no_long_int/query.sql b/parser/testdata/01891_partition_hash_no_long_int/query.sql new file mode 100644 index 000000000..431f566b8 --- /dev/null +++ b/parser/testdata/01891_partition_hash_no_long_int/query.sql @@ -0,0 +1,9 @@ +-- Tags: long + +drop table if exists tab; +create table tab (i8 Int8, i16 Int16, i32 Int32, i64 Int64, u8 UInt8, u16 UInt16, u32 UInt32, u64 UInt64, id UUID, s String, fs FixedString(33), a Array(UInt8), t Tuple(UInt16, UInt32), d Date, dt DateTime('Asia/Istanbul'), dt64 DateTime64(3, 'Asia/Istanbul'), dec128 Decimal128(3), lc LowCardinality(String)) engine = MergeTree PARTITION BY (i8, i16, i32, i64, u8, u16, u32, u64, id, s, fs, a, t, d, dt, dt64, dec128, lc) order by tuple(); +insert into tab values (-1, -1, -1, -1, -1, -1, -1, -1, '61f0c404-5cb3-11e7-907b-a6006ad3dba0', 'a', 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', [1, 2, 3], (-1, -2), '2020-01-01', '2020-01-01 01:01:01', '2020-01-01 01:01:01', '123.456', 'a'); +-- Here we check that partition id did not change. +-- Different result means Backward Incompatible Change. Old partitions will not be accepted by new server. +select partition_id from system.parts where table = 'tab' and database = currentDatabase(); +drop table if exists tab; diff --git a/parser/testdata/01892_jit_aggregation_function_any_last_long/ast.json b/parser/testdata/01892_jit_aggregation_function_any_last_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01892_jit_aggregation_function_any_last_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01892_jit_aggregation_function_any_last_long/metadata.json b/parser/testdata/01892_jit_aggregation_function_any_last_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01892_jit_aggregation_function_any_last_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01892_jit_aggregation_function_any_last_long/query.sql b/parser/testdata/01892_jit_aggregation_function_any_last_long/query.sql new file mode 100644 index 000000000..13ebd958a --- /dev/null +++ b/parser/testdata/01892_jit_aggregation_function_any_last_long/query.sql @@ -0,0 +1,121 @@ +-- Tags: long + +SET compile_aggregate_expressions = 1; +SET min_count_to_compile_aggregate_expression = 0; + +SELECT 'Test unsigned integer values'; + +DROP TABLE IF EXISTS test_table_unsigned_values; +CREATE TABLE test_table_unsigned_values +( + id UInt64, + + value1 UInt8, + value2 UInt16, + value3 UInt32, + value4 UInt64 +) ENGINE=TinyLog; + +INSERT INTO test_table_unsigned_values SELECT number % 3, number, number, number, number FROM system.numbers LIMIT 120; +SELECT id, anyLast(value1), anyLast(value2), anyLast(value3), anyLast(value4) FROM test_table_unsigned_values GROUP BY id ORDER BY id; +DROP TABLE test_table_unsigned_values; + +SELECT 'Test signed integer values'; + +DROP TABLE IF EXISTS test_table_signed_values; +CREATE TABLE test_table_signed_values +( + id UInt64, + + value1 Int8, + value2 Int16, + value3 Int32, + value4 Int64 +) ENGINE=TinyLog; + +INSERT INTO test_table_signed_values SELECT number % 3, number, number, number, number FROM system.numbers LIMIT 120; +SELECT id, anyLast(value1), anyLast(value2), anyLast(value3), anyLast(value4) FROM test_table_signed_values GROUP BY id ORDER BY id; +DROP TABLE test_table_signed_values; + +SELECT 'Test float values'; + +DROP TABLE IF EXISTS test_table_float_values; +CREATE TABLE test_table_float_values +( + id UInt64, + + value1 Float32, + value2 Float64 +) ENGINE=TinyLog; + +INSERT INTO test_table_float_values SELECT number % 3, number, number FROM system.numbers LIMIT 120; +SELECT id, anyLast(value1), anyLast(value2) FROM test_table_float_values GROUP BY id ORDER BY id; +DROP TABLE test_table_float_values; + +SELECT 'Test nullable unsigned integer values'; + +DROP TABLE IF EXISTS test_table_nullable_unsigned_values; +CREATE TABLE test_table_nullable_unsigned_values +( + id UInt64, + + value1 Nullable(UInt8), + value2 Nullable(UInt16), + value3 Nullable(UInt32), + value4 Nullable(UInt64) +) ENGINE=TinyLog; + +INSERT INTO test_table_nullable_unsigned_values SELECT number % 3, number, number, number, number FROM system.numbers LIMIT 120; +SELECT id, anyLast(value1), anyLast(value2), anyLast(value3), anyLast(value4) FROM test_table_nullable_unsigned_values GROUP BY id ORDER BY id; +DROP TABLE test_table_nullable_unsigned_values; + +SELECT 'Test nullable signed integer values'; + +DROP TABLE IF EXISTS test_table_nullable_signed_values; +CREATE TABLE test_table_nullable_signed_values +( + id UInt64, + + value1 Nullable(Int8), + value2 Nullable(Int16), + value3 Nullable(Int32), + value4 Nullable(Int64) +) ENGINE=TinyLog; + +INSERT INTO test_table_nullable_signed_values SELECT number % 3, number, number, number, number FROM system.numbers LIMIT 120; +SELECT id, anyLast(value1), anyLast(value2), anyLast(value3), anyLast(value4) FROM test_table_nullable_signed_values GROUP BY id ORDER BY id; +DROP TABLE test_table_nullable_signed_values; + +SELECT 'Test nullable float values'; + +DROP TABLE IF EXISTS test_table_nullable_float_values; +CREATE TABLE test_table_nullable_float_values +( + id UInt64, + + value1 Nullable(Float32), + value2 Nullable(Float64) +) ENGINE=TinyLog; + +INSERT INTO test_table_nullable_float_values SELECT number % 3, number, number FROM system.numbers LIMIT 120; +SELECT id, anyLast(value1), anyLast(value2) FROM test_table_nullable_float_values GROUP BY id ORDER BY id; +DROP TABLE test_table_nullable_float_values; + +SELECT 'Test null specifics'; + +DROP TABLE IF EXISTS test_table_null_specifics; +CREATE TABLE test_table_null_specifics +( + id UInt64, + + value1 Nullable(UInt64), + value2 Nullable(UInt64), + value3 Nullable(UInt64) +) ENGINE=TinyLog; + +INSERT INTO test_table_null_specifics VALUES (0, 1, 1, NULL); +INSERT INTO test_table_null_specifics VALUES (0, 2, NULL, NULL); +INSERT INTO test_table_null_specifics VALUES (0, 3, 3, NULL); + +SELECT id, anyLast(value1), anyLast(value2), anyLast(value3) FROM test_table_null_specifics GROUP BY id ORDER BY id; +DROP TABLE IF EXISTS test_table_null_specifics; diff --git a/parser/testdata/01892_setting_limit_offset_distributed/ast.json b/parser/testdata/01892_setting_limit_offset_distributed/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01892_setting_limit_offset_distributed/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01892_setting_limit_offset_distributed/metadata.json b/parser/testdata/01892_setting_limit_offset_distributed/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01892_setting_limit_offset_distributed/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01892_setting_limit_offset_distributed/query.sql b/parser/testdata/01892_setting_limit_offset_distributed/query.sql new file mode 100644 index 000000000..88867eef2 --- /dev/null +++ b/parser/testdata/01892_setting_limit_offset_distributed/query.sql @@ -0,0 +1,32 @@ +-- Tags: distributed + +SELECT 'limit', * FROM remote('127.1', view(SELECT * FROM numbers(10))) SETTINGS limit=5; +SELECT 'offset', * FROM remote('127.1', view(SELECT * FROM numbers(10))) SETTINGS offset=5; + +SELECT + 'limit w/ GROUP BY', + count(), + number +FROM remote('127.{1,2}', view( + SELECT intDiv(number, 2) AS number + FROM numbers(10) +)) +GROUP BY number +ORDER BY + count() ASC, + number DESC +SETTINGS limit=2; + +SELECT + 'limit/offset w/ GROUP BY', + count(), + number +FROM remote('127.{1,2}', view( + SELECT intDiv(number, 2) AS number + FROM numbers(10) +)) +GROUP BY number +ORDER BY + count() ASC, + number DESC +SETTINGS limit=2, offset=2; diff --git a/parser/testdata/01893_jit_aggregation_function_min_long/ast.json b/parser/testdata/01893_jit_aggregation_function_min_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01893_jit_aggregation_function_min_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01893_jit_aggregation_function_min_long/metadata.json b/parser/testdata/01893_jit_aggregation_function_min_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01893_jit_aggregation_function_min_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01893_jit_aggregation_function_min_long/query.sql b/parser/testdata/01893_jit_aggregation_function_min_long/query.sql new file mode 100644 index 000000000..2f2b683a1 --- /dev/null +++ b/parser/testdata/01893_jit_aggregation_function_min_long/query.sql @@ -0,0 +1,121 @@ +-- Tags: long + +SET compile_aggregate_expressions = 1; +SET min_count_to_compile_aggregate_expression = 0; + +SELECT 'Test unsigned integer values'; + +DROP TABLE IF EXISTS test_table_unsigned_values; +CREATE TABLE test_table_unsigned_values +( + id UInt64, + + value1 UInt8, + value2 UInt16, + value3 UInt32, + value4 UInt64 +) ENGINE=TinyLog; + +INSERT INTO test_table_unsigned_values SELECT number % 3, number, number, number, number FROM system.numbers LIMIT 120; +SELECT id, min(value1), min(value2), min(value3), min(value4) FROM test_table_unsigned_values GROUP BY id ORDER BY id; +DROP TABLE test_table_unsigned_values; + +SELECT 'Test signed integer values'; + +DROP TABLE IF EXISTS test_table_signed_values; +CREATE TABLE test_table_signed_values +( + id UInt64, + + value1 Int8, + value2 Int16, + value3 Int32, + value4 Int64 +) ENGINE=TinyLog; + +INSERT INTO test_table_signed_values SELECT number % 3, number, number, number, number FROM system.numbers LIMIT 120; +SELECT id, min(value1), min(value2), min(value3), min(value4) FROM test_table_signed_values GROUP BY id ORDER BY id; +DROP TABLE test_table_signed_values; + +SELECT 'Test float values'; + +DROP TABLE IF EXISTS test_table_float_values; +CREATE TABLE test_table_float_values +( + id UInt64, + + value1 Float32, + value2 Float64 +) ENGINE=TinyLog; + +INSERT INTO test_table_float_values SELECT number % 3, number, number FROM system.numbers LIMIT 120; +SELECT id, min(value1), min(value2) FROM test_table_float_values GROUP BY id ORDER BY id; +DROP TABLE test_table_float_values; + +SELECT 'Test nullable unsigned integer values'; + +DROP TABLE IF EXISTS test_table_nullable_unsigned_values; +CREATE TABLE test_table_nullable_unsigned_values +( + id UInt64, + + value1 Nullable(UInt8), + value2 Nullable(UInt16), + value3 Nullable(UInt32), + value4 Nullable(UInt64) +) ENGINE=TinyLog; + +INSERT INTO test_table_nullable_unsigned_values SELECT number % 3, number, number, number, number FROM system.numbers LIMIT 120; +SELECT id, min(value1), min(value2), min(value3), min(value4) FROM test_table_nullable_unsigned_values GROUP BY id ORDER BY id; +DROP TABLE test_table_nullable_unsigned_values; + +SELECT 'Test nullable signed integer values'; + +DROP TABLE IF EXISTS test_table_nullable_signed_values; +CREATE TABLE test_table_nullable_signed_values +( + id UInt64, + + value1 Nullable(Int8), + value2 Nullable(Int16), + value3 Nullable(Int32), + value4 Nullable(Int64) +) ENGINE=TinyLog; + +INSERT INTO test_table_nullable_signed_values SELECT number % 3, number, number, number, number FROM system.numbers LIMIT 120; +SELECT id, min(value1), min(value2), min(value3), min(value4) FROM test_table_nullable_signed_values GROUP BY id ORDER BY id; +DROP TABLE test_table_nullable_signed_values; + +SELECT 'Test nullable float values'; + +DROP TABLE IF EXISTS test_table_nullable_float_values; +CREATE TABLE test_table_nullable_float_values +( + id UInt64, + + value1 Nullable(Float32), + value2 Nullable(Float64) +) ENGINE=TinyLog; + +INSERT INTO test_table_nullable_float_values SELECT number % 3, number, number FROM system.numbers LIMIT 120; +SELECT id, min(value1), min(value2) FROM test_table_nullable_float_values GROUP BY id ORDER BY id; +DROP TABLE test_table_nullable_float_values; + +SELECT 'Test null specifics'; + +DROP TABLE IF EXISTS test_table_null_specifics; +CREATE TABLE test_table_null_specifics +( + id UInt64, + + value1 Nullable(UInt64), + value2 Nullable(UInt64), + value3 Nullable(UInt64) +) ENGINE=TinyLog; + +INSERT INTO test_table_null_specifics VALUES (0, 1, 1, NULL); +INSERT INTO test_table_null_specifics VALUES (0, 2, NULL, NULL); +INSERT INTO test_table_null_specifics VALUES (0, 3, 3, NULL); + +SELECT id, min(value1), min(value2), min(value3) FROM test_table_null_specifics GROUP BY id ORDER BY id; +DROP TABLE IF EXISTS test_table_null_specifics; diff --git a/parser/testdata/01894_jit_aggregation_function_max_long/ast.json b/parser/testdata/01894_jit_aggregation_function_max_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01894_jit_aggregation_function_max_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01894_jit_aggregation_function_max_long/metadata.json b/parser/testdata/01894_jit_aggregation_function_max_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01894_jit_aggregation_function_max_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01894_jit_aggregation_function_max_long/query.sql b/parser/testdata/01894_jit_aggregation_function_max_long/query.sql new file mode 100644 index 000000000..99dda0593 --- /dev/null +++ b/parser/testdata/01894_jit_aggregation_function_max_long/query.sql @@ -0,0 +1,121 @@ +-- Tags: long + +SET compile_aggregate_expressions = 1; +SET min_count_to_compile_aggregate_expression = 0; + +SELECT 'Test unsigned integer values'; + +DROP TABLE IF EXISTS test_table_unsigned_values; +CREATE TABLE test_table_unsigned_values +( + id UInt64, + + value1 UInt8, + value2 UInt16, + value3 UInt32, + value4 UInt64 +) ENGINE=TinyLog; + +INSERT INTO test_table_unsigned_values SELECT number % 3, number, number, number, number FROM system.numbers LIMIT 120; +SELECT id, max(value1), max(value2), max(value3), max(value4) FROM test_table_unsigned_values GROUP BY id ORDER BY id; +DROP TABLE test_table_unsigned_values; + +SELECT 'Test signed integer values'; + +DROP TABLE IF EXISTS test_table_signed_values; +CREATE TABLE test_table_signed_values +( + id UInt64, + + value1 Int8, + value2 Int16, + value3 Int32, + value4 Int64 +) ENGINE=TinyLog; + +INSERT INTO test_table_signed_values SELECT number % 3, number, number, number, number FROM system.numbers LIMIT 120; +SELECT id, max(value1), max(value2), max(value3), max(value4) FROM test_table_signed_values GROUP BY id ORDER BY id; +DROP TABLE test_table_signed_values; + +SELECT 'Test float values'; + +DROP TABLE IF EXISTS test_table_float_values; +CREATE TABLE test_table_float_values +( + id UInt64, + + value1 Float32, + value2 Float64 +) ENGINE=TinyLog; + +INSERT INTO test_table_float_values SELECT number % 3, number, number FROM system.numbers LIMIT 120; +SELECT id, min(value1), min(value2) FROM test_table_float_values GROUP BY id ORDER BY id; +DROP TABLE test_table_float_values; + +SELECT 'Test nullable unsigned integer values'; + +DROP TABLE IF EXISTS test_table_nullable_unsigned_values; +CREATE TABLE test_table_nullable_unsigned_values +( + id UInt64, + + value1 Nullable(UInt8), + value2 Nullable(UInt16), + value3 Nullable(UInt32), + value4 Nullable(UInt64) +) ENGINE=TinyLog; + +INSERT INTO test_table_nullable_unsigned_values SELECT number % 3, number, number, number, number FROM system.numbers LIMIT 120; +SELECT id, max(value1), max(value2), max(value3), max(value4) FROM test_table_nullable_unsigned_values GROUP BY id ORDER BY id; +DROP TABLE test_table_nullable_unsigned_values; + +SELECT 'Test nullable signed integer values'; + +DROP TABLE IF EXISTS test_table_nullable_signed_values; +CREATE TABLE test_table_nullable_signed_values +( + id UInt64, + + value1 Nullable(Int8), + value2 Nullable(Int16), + value3 Nullable(Int32), + value4 Nullable(Int64) +) ENGINE=TinyLog; + +INSERT INTO test_table_nullable_signed_values SELECT number % 3, number, number, number, number FROM system.numbers LIMIT 120; +SELECT id, max(value1), max(value2), max(value3), max(value4) FROM test_table_nullable_signed_values GROUP BY id ORDER BY id; +DROP TABLE test_table_nullable_signed_values; + +SELECT 'Test nullable float values'; + +DROP TABLE IF EXISTS test_table_nullable_float_values; +CREATE TABLE test_table_nullable_float_values +( + id UInt64, + + value1 Nullable(Float32), + value2 Nullable(Float64) +) ENGINE=TinyLog; + +INSERT INTO test_table_nullable_float_values SELECT number % 3, number, number FROM system.numbers LIMIT 120; +SELECT id, max(value1), max(value2) FROM test_table_nullable_float_values GROUP BY id ORDER BY id; +DROP TABLE test_table_nullable_float_values; + +SELECT 'Test null specifics'; + +DROP TABLE IF EXISTS test_table_null_specifics; +CREATE TABLE test_table_null_specifics +( + id UInt64, + + value1 Nullable(UInt64), + value2 Nullable(UInt64), + value3 Nullable(UInt64) +) ENGINE=TinyLog; + +INSERT INTO test_table_null_specifics VALUES (0, 1, 1, NULL); +INSERT INTO test_table_null_specifics VALUES (0, 2, NULL, NULL); +INSERT INTO test_table_null_specifics VALUES (0, 3, 3, NULL); + +SELECT id, max(value1), max(value2), max(value3) FROM test_table_null_specifics GROUP BY id ORDER BY id; +DROP TABLE IF EXISTS test_table_null_specifics; diff --git a/parser/testdata/01895_jit_aggregation_function_avg_long/ast.json b/parser/testdata/01895_jit_aggregation_function_avg_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01895_jit_aggregation_function_avg_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01895_jit_aggregation_function_avg_long/metadata.json b/parser/testdata/01895_jit_aggregation_function_avg_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01895_jit_aggregation_function_avg_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01895_jit_aggregation_function_avg_long/query.sql b/parser/testdata/01895_jit_aggregation_function_avg_long/query.sql new file mode 100644 index 000000000..27a97faa4 --- /dev/null +++ b/parser/testdata/01895_jit_aggregation_function_avg_long/query.sql @@ -0,0 +1,121 @@ +-- Tags: long + +SET compile_aggregate_expressions = 1; +SET min_count_to_compile_aggregate_expression = 0; + +SELECT 'Test unsigned integer values'; + +DROP TABLE IF EXISTS test_table_unsigned_values; +CREATE TABLE test_table_unsigned_values +( + id UInt64, + + value1 UInt8, + value2 UInt16, + value3 UInt32, + value4 UInt64 +) ENGINE=TinyLog; + +INSERT INTO test_table_unsigned_values SELECT number % 3, number, number, number, number FROM system.numbers LIMIT 120; +SELECT id, avg(value1), avg(value2), avg(value3), avg(value4) FROM test_table_unsigned_values GROUP BY id ORDER BY id; +DROP TABLE test_table_unsigned_values; + +SELECT 'Test signed integer values'; + +DROP TABLE IF EXISTS test_table_signed_values; +CREATE TABLE test_table_signed_values +( + id UInt64, + + value1 Int8, + value2 Int16, + value3 Int32, + value4 Int64 +) ENGINE=TinyLog; + +INSERT INTO test_table_signed_values SELECT number % 3, number, number, number, number FROM system.numbers LIMIT 120; +SELECT id, avg(value1), avg(value2), avg(value3), avg(value4) FROM test_table_signed_values GROUP BY id ORDER BY id; +DROP TABLE test_table_signed_values; + +SELECT 'Test float values'; + +DROP TABLE IF EXISTS test_table_float_values; +CREATE TABLE test_table_float_values +( + id UInt64, + + value1 Float32, + value2 Float64 +) ENGINE=TinyLog; + +INSERT INTO test_table_float_values SELECT number % 3, number, number FROM system.numbers LIMIT 120; +SELECT id, avg(value1), avg(value2) FROM test_table_float_values GROUP BY id ORDER BY id; +DROP TABLE test_table_float_values; + +SELECT 'Test nullable unsigned integer values'; + +DROP TABLE IF EXISTS test_table_nullable_unsigned_values; +CREATE TABLE test_table_nullable_unsigned_values +( + id UInt64, + + value1 Nullable(UInt8), + value2 Nullable(UInt16), + value3 Nullable(UInt32), + value4 Nullable(UInt64) +) ENGINE=TinyLog; + +INSERT INTO test_table_nullable_unsigned_values SELECT number % 3, number, number, number, number FROM system.numbers LIMIT 120; +SELECT id, avg(value1), avg(value2), avg(value3), avg(value4) FROM test_table_nullable_unsigned_values GROUP BY id ORDER BY id; +DROP TABLE test_table_nullable_unsigned_values; + +SELECT 'Test nullable signed integer values'; + +DROP TABLE IF EXISTS test_table_nullable_signed_values; +CREATE TABLE test_table_nullable_signed_values +( + id UInt64, + + value1 Nullable(Int8), + value2 Nullable(Int16), + value3 Nullable(Int32), + value4 Nullable(Int64) +) ENGINE=TinyLog; + +INSERT INTO test_table_nullable_signed_values SELECT number % 3, number, number, number, number FROM system.numbers LIMIT 120; +SELECT id, avg(value1), avg(value2), avg(value3), avg(value4) FROM test_table_nullable_signed_values GROUP BY id ORDER BY id; +DROP TABLE test_table_nullable_signed_values; + +SELECT 'Test nullable float values'; + +DROP TABLE IF EXISTS test_table_nullable_float_values; +CREATE TABLE test_table_nullable_float_values +( + id UInt64, + + value1 Nullable(Float32), + value2 Nullable(Float64) +) ENGINE=TinyLog; + +INSERT INTO test_table_nullable_float_values SELECT number % 3, number, number FROM system.numbers LIMIT 120; +SELECT id, avg(value1), avg(value2) FROM test_table_nullable_float_values GROUP BY id ORDER BY id; +DROP TABLE test_table_nullable_float_values; + +SELECT 'Test null specifics'; + +DROP TABLE IF EXISTS test_table_null_specifics; +CREATE TABLE test_table_null_specifics +( + id UInt64, + + value1 Nullable(UInt64), + value2 Nullable(UInt64), + value3 Nullable(UInt64) +) ENGINE=TinyLog; + +INSERT INTO test_table_null_specifics VALUES (0, 1, 1, NULL); +INSERT INTO test_table_null_specifics VALUES (0, 2, NULL, NULL); +INSERT INTO test_table_null_specifics VALUES (0, 3, 3, NULL); + +SELECT id, avg(value1), avg(value2), avg(value3) FROM test_table_null_specifics GROUP BY id ORDER BY id; +DROP TABLE IF EXISTS test_table_null_specifics; diff --git a/parser/testdata/01896_jit_aggregation_function_if_long/ast.json b/parser/testdata/01896_jit_aggregation_function_if_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01896_jit_aggregation_function_if_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01896_jit_aggregation_function_if_long/metadata.json b/parser/testdata/01896_jit_aggregation_function_if_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01896_jit_aggregation_function_if_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01896_jit_aggregation_function_if_long/query.sql b/parser/testdata/01896_jit_aggregation_function_if_long/query.sql new file mode 100644 index 000000000..a3228d4a8 --- /dev/null +++ b/parser/testdata/01896_jit_aggregation_function_if_long/query.sql @@ -0,0 +1,199 @@ +-- Tags: long + +SET compile_aggregate_expressions = 1; +SET min_count_to_compile_aggregate_expression = 0; + +SELECT 'Test unsigned integer values'; + +DROP TABLE IF EXISTS test_table_unsigned_values; +CREATE TABLE test_table_unsigned_values +( + id UInt64, + + value1 UInt8, + value2 UInt16, + value3 UInt32, + value4 UInt64, + + predicate_value UInt8 +) ENGINE=TinyLog; + +INSERT INTO test_table_unsigned_values SELECT number % 3, number, number, number, number, if(number % 2 == 0, 1, 0) FROM system.numbers LIMIT 120; +SELECT + id, + sumIf(value1, predicate_value), + sumIf(value2, predicate_value), + sumIf(value3, predicate_value), + sumIf(value4, predicate_value) +FROM test_table_unsigned_values GROUP BY id ORDER BY id; +DROP TABLE test_table_unsigned_values; + +SELECT 'Test signed integer values'; + +DROP TABLE IF EXISTS test_table_signed_values; +CREATE TABLE test_table_signed_values +( + id UInt64, + + value1 Int8, + value2 Int16, + value3 Int32, + value4 Int64, + + predicate_value UInt8 +) ENGINE=TinyLog; + +INSERT INTO test_table_signed_values SELECT number % 3, number, number, number, number, if(number % 2 == 0, 1, 0) FROM system.numbers LIMIT 120; +SELECT + id, + sumIf(value1, predicate_value), + sumIf(value2, predicate_value), + sumIf(value3, predicate_value), + sumIf(value4, predicate_value) +FROM test_table_signed_values GROUP BY id ORDER BY id; +DROP TABLE test_table_signed_values; + +SELECT 'Test float values'; + +DROP TABLE IF EXISTS test_table_float_values; +CREATE TABLE test_table_float_values +( + id UInt64, + + value1 Float32, + value2 Float64, + + predicate_value UInt8 +) ENGINE=TinyLog; + +INSERT INTO test_table_float_values SELECT number % 3, number, number, if(number % 2 == 0, 1, 0) FROM system.numbers LIMIT 120; +SELECT + id, + sumIf(value1, predicate_value), + sumIf(value2, predicate_value) +FROM test_table_float_values GROUP BY id ORDER BY id; +DROP TABLE test_table_float_values; + +SELECT 'Test nullable unsigned integer values'; + +DROP TABLE IF EXISTS test_table_nullable_unsigned_values; +CREATE TABLE test_table_nullable_unsigned_values +( + id UInt64, + + value1 Nullable(UInt8), + value2 Nullable(UInt16), + value3 Nullable(UInt32), + value4 Nullable(UInt64), + + predicate_value UInt8 +) ENGINE=TinyLog; + +INSERT INTO test_table_nullable_unsigned_values SELECT number % 3, number, number, number, number, if(number % 2 == 0, 1, 0) FROM system.numbers LIMIT 120; +SELECT + id, + sumIf(value1, predicate_value), + sumIf(value2, predicate_value), + sumIf(value3, predicate_value), + sumIf(value4, predicate_value) +FROM test_table_nullable_unsigned_values GROUP BY id ORDER BY id; +DROP TABLE test_table_nullable_unsigned_values; + +SELECT 'Test nullable signed integer values'; + +DROP TABLE IF EXISTS test_table_nullable_signed_values; +CREATE TABLE test_table_nullable_signed_values +( + id UInt64, + + value1 Nullable(Int8), + value2 Nullable(Int16), + value3 Nullable(Int32), + value4 Nullable(Int64), + + predicate_value UInt8 +) ENGINE=TinyLog; + +INSERT INTO test_table_nullable_signed_values SELECT number % 3, number, number, number, number, if(number % 2 == 0, 1, 0) FROM system.numbers LIMIT 120; +SELECT + id, + sumIf(value1, predicate_value), + sumIf(value2, predicate_value), + sumIf(value3, predicate_value), + sumIf(value4, predicate_value) +FROM test_table_nullable_signed_values GROUP BY id ORDER BY id; +DROP TABLE test_table_nullable_signed_values; + +SELECT 'Test nullable float values'; + +DROP TABLE IF EXISTS test_table_nullable_float_values; +CREATE TABLE test_table_nullable_float_values +( + id UInt64, + + value1 Nullable(Float32), + value2 Nullable(Float64), + + predicate_value UInt8 +) ENGINE=TinyLog; + +INSERT INTO test_table_nullable_float_values SELECT number % 3, number, number, if(number % 2 == 0, 1, 0) FROM system.numbers LIMIT 120; +SELECT + id, + sumIf(value1, predicate_value), + sumIf(value2, predicate_value) +FROM test_table_nullable_float_values GROUP BY id ORDER BY id; +DROP TABLE test_table_nullable_float_values; + +SELECT 'Test null specifics'; + +DROP TABLE IF EXISTS test_table_null_specifics; +CREATE TABLE test_table_null_specifics +( + id UInt64, + + value1 Nullable(UInt64), + value2 Nullable(UInt64), + value3 Nullable(UInt64), + + predicate_value UInt8 +) ENGINE=TinyLog; + +INSERT INTO test_table_null_specifics VALUES (0, 1, 1, NULL, 1); +INSERT INTO test_table_null_specifics VALUES (0, 2, NULL, NULL, 1); +INSERT INTO test_table_null_specifics VALUES (0, 3, 3, NULL, 1); + +SELECT + id, + sumIf(value1, predicate_value), + sumIf(value2, predicate_value), + sumIf(value3, predicate_value) +FROM test_table_null_specifics GROUP BY id ORDER BY id; +DROP TABLE IF EXISTS test_table_null_specifics; + +SELECT 'Test null variadic'; + +DROP TABLE IF EXISTS test_table_null_specifics; +CREATE TABLE test_table_null_specifics +( + id UInt64, + + value1 Nullable(UInt64), + value2 Nullable(UInt64), + value3 Nullable(UInt64), + + predicate_value UInt8, + weight UInt64 +) ENGINE=TinyLog; + +INSERT INTO test_table_null_specifics VALUES (0, 1, 1, NULL, 1, 1); +INSERT INTO test_table_null_specifics VALUES (0, 2, NULL, NULL, 1, 2); +INSERT INTO test_table_null_specifics VALUES (0, 3, 3, NULL, 1, 3); + +SELECT + id, + avgWeightedIf(value1, weight, predicate_value), + avgWeightedIf(value2, weight, predicate_value), + avgWeightedIf(value3, weight, predicate_value) +FROM test_table_null_specifics GROUP BY id ORDER BY id; +DROP TABLE IF EXISTS test_table_null_specifics; diff --git a/parser/testdata/01897_jit_aggregation_function_avg_weighted_long/ast.json b/parser/testdata/01897_jit_aggregation_function_avg_weighted_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01897_jit_aggregation_function_avg_weighted_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01897_jit_aggregation_function_avg_weighted_long/metadata.json b/parser/testdata/01897_jit_aggregation_function_avg_weighted_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01897_jit_aggregation_function_avg_weighted_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01897_jit_aggregation_function_avg_weighted_long/query.sql b/parser/testdata/01897_jit_aggregation_function_avg_weighted_long/query.sql new file mode 100644 index 000000000..5e378f3ec --- /dev/null +++ b/parser/testdata/01897_jit_aggregation_function_avg_weighted_long/query.sql @@ -0,0 +1,169 @@ +-- Tags: long + +SET compile_aggregate_expressions = 1; +SET min_count_to_compile_aggregate_expression = 0; + +SELECT 'Test unsigned integer values'; + +DROP TABLE IF EXISTS test_table_unsigned_values; +CREATE TABLE test_table_unsigned_values +( + id UInt64, + + value1 UInt8, + value2 UInt16, + value3 UInt32, + value4 UInt64, + + weight UInt64 +) ENGINE=TinyLog; + +INSERT INTO test_table_unsigned_values SELECT number % 3, number, number, number, number, number % 3 FROM system.numbers LIMIT 120; +SELECT + id, + avgWeighted(value1, weight), + avgWeighted(value2, weight), + avgWeighted(value3, weight), + avgWeighted(value4, weight) +FROM test_table_unsigned_values GROUP BY id ORDER BY id; +DROP TABLE test_table_unsigned_values; + +SELECT 'Test signed integer values'; + +DROP TABLE IF EXISTS test_table_signed_values; +CREATE TABLE test_table_signed_values +( + id UInt64, + + value1 Int8, + value2 Int16, + value3 Int32, + value4 Int64, + + weight UInt64 +) ENGINE=TinyLog; + +INSERT INTO test_table_signed_values SELECT number % 3, number, number, number, number, number % 3 FROM system.numbers LIMIT 120; +SELECT + id, + avgWeighted(value1, weight), + avgWeighted(value2, weight), + avgWeighted(value3, weight), + avgWeighted(value4, weight) +FROM test_table_signed_values GROUP BY id ORDER BY id; +DROP TABLE test_table_signed_values; + +SELECT 'Test float values'; + +DROP TABLE IF EXISTS test_table_float_values; +CREATE TABLE test_table_float_values +( + id UInt64, + + value1 Float32, + value2 Float64, + + weight UInt64 +) ENGINE=TinyLog; + +INSERT INTO test_table_float_values SELECT number % 3, number, number, number % 3 FROM system.numbers LIMIT 120; +SELECT id, avgWeighted(value1, weight), avgWeighted(value2, weight) FROM test_table_float_values GROUP BY id ORDER BY id; +DROP TABLE test_table_float_values; + +SELECT 'Test nullable unsigned integer values'; + +DROP TABLE IF EXISTS test_table_nullable_unsigned_values; +CREATE TABLE test_table_nullable_unsigned_values +( + id UInt64, + + value1 Nullable(UInt8), + value2 Nullable(UInt16), + value3 Nullable(UInt32), + value4 Nullable(UInt64), + + weight UInt64 +) ENGINE=TinyLog; + +INSERT INTO test_table_nullable_unsigned_values SELECT number % 3, number, number, number, number, number % 3 FROM system.numbers LIMIT 120; +SELECT + id, + avgWeighted(value1, weight), + avgWeighted(value2, weight), + avgWeighted(value3, weight), + avgWeighted(value4, weight) +FROM test_table_nullable_unsigned_values GROUP BY id ORDER BY id; +DROP TABLE test_table_nullable_unsigned_values; + +SELECT 'Test nullable signed integer values'; + +DROP TABLE IF EXISTS test_table_nullable_signed_values; +CREATE TABLE test_table_nullable_signed_values +( + id UInt64, + + value1 Nullable(Int8), + value2 Nullable(Int16), + value3 Nullable(Int32), + value4 Nullable(Int64), + + weight UInt64 +) ENGINE=TinyLog; + + +INSERT INTO test_table_nullable_signed_values SELECT number % 3, number, number, number, number, number % 3 FROM system.numbers LIMIT 120; +SELECT + id, + avgWeighted(value1, weight), + avgWeighted(value2, weight), + avgWeighted(value3, weight), + avgWeighted(value4, weight) +FROM test_table_nullable_signed_values GROUP BY id ORDER BY id; +DROP TABLE test_table_nullable_signed_values; + +SELECT 'Test nullable float values'; + +DROP TABLE IF EXISTS test_table_nullable_float_values; +CREATE TABLE test_table_nullable_float_values +( + id UInt64, + + value1 Nullable(Float32), + value2 Nullable(Float64), + + weight UInt64 +) ENGINE=TinyLog; + +INSERT INTO test_table_nullable_float_values SELECT number % 3, number, number, number % 3 FROM system.numbers LIMIT 120; +SELECT id, avgWeighted(value1, weight), avgWeighted(value2, weight) FROM test_table_nullable_float_values GROUP BY id ORDER BY id; +DROP TABLE test_table_nullable_float_values; + +SELECT 'Test null specifics'; + +DROP TABLE IF EXISTS test_table_null_specifics; +CREATE TABLE test_table_null_specifics +( + id UInt64, + + value1 Nullable(UInt64), + value2 Nullable(UInt64), + value3 Nullable(UInt64), + + weight UInt64, + weight_nullable Nullable(UInt64) +) ENGINE=TinyLog; + +INSERT INTO test_table_null_specifics VALUES (0, 1, 1, NULL, 1, 1); +INSERT INTO test_table_null_specifics VALUES (0, 2, NULL, NULL, 2, NULL); +INSERT INTO test_table_null_specifics VALUES (0, 3, 3, NULL, 3, 3); + +SELECT + id, + avgWeighted(value1, weight), + avgWeighted(value2, weight), + avgWeighted(value3, weight), + avgWeighted(value1, weight_nullable), + avgWeighted(value2, weight_nullable), + avgWeighted(value3, weight_nullable) +FROM test_table_null_specifics GROUP BY id ORDER BY id; +DROP TABLE IF EXISTS test_table_null_specifics; diff --git a/parser/testdata/01901_in_literal_shard_prune/ast.json b/parser/testdata/01901_in_literal_shard_prune/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01901_in_literal_shard_prune/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01901_in_literal_shard_prune/metadata.json b/parser/testdata/01901_in_literal_shard_prune/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01901_in_literal_shard_prune/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01901_in_literal_shard_prune/query.sql b/parser/testdata/01901_in_literal_shard_prune/query.sql new file mode 100644 index 000000000..9250547da --- /dev/null +++ b/parser/testdata/01901_in_literal_shard_prune/query.sql @@ -0,0 +1,17 @@ +-- Tags: shard + +set optimize_skip_unused_shards=1; +set force_optimize_skip_unused_shards=1; + +drop table if exists d; +drop table if exists dp; + +create table d (i UInt8) Engine=Memory; +create table dp as d Engine=Distributed(test_cluster_two_shards, currentDatabase(), d, i); + +insert into d values (1), (2); + +select * from dp where i in (1); + +drop table if exists d; +drop table if exists dp; diff --git a/parser/testdata/01901_test_attach_partition_from/ast.json b/parser/testdata/01901_test_attach_partition_from/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01901_test_attach_partition_from/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01901_test_attach_partition_from/metadata.json b/parser/testdata/01901_test_attach_partition_from/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01901_test_attach_partition_from/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01901_test_attach_partition_from/query.sql b/parser/testdata/01901_test_attach_partition_from/query.sql new file mode 100644 index 000000000..1fe073f0c --- /dev/null +++ b/parser/testdata/01901_test_attach_partition_from/query.sql @@ -0,0 +1,25 @@ +-- Tags: no-parallel + +DROP TABLE IF EXISTS test_alter_attach_01901S; +DROP TABLE IF EXISTS test_alter_attach_01901D; + +CREATE TABLE test_alter_attach_01901S (A Int64, D date) ENGINE = MergeTree PARTITION BY D ORDER BY A; +INSERT INTO test_alter_attach_01901S VALUES (1, '2020-01-01'); + +CREATE TABLE test_alter_attach_01901D (A Int64, D date) +Engine=ReplicatedMergeTree('/clickhouse/tables/{database}/test_alter_attach_01901D', 'r1') +PARTITION BY D ORDER BY A; + +ALTER TABLE test_alter_attach_01901D ATTACH PARTITION '2020-01-01' FROM test_alter_attach_01901S; + +SELECT count() FROM test_alter_attach_01901D; +SELECT count() FROM test_alter_attach_01901S; + +INSERT INTO test_alter_attach_01901S VALUES (1, '2020-01-01'); +ALTER TABLE test_alter_attach_01901D REPLACE PARTITION '2020-01-01' FROM test_alter_attach_01901S; + +SELECT count() FROM test_alter_attach_01901D; +SELECT count() FROM test_alter_attach_01901S; + +DROP TABLE test_alter_attach_01901S; +DROP TABLE test_alter_attach_01901D; diff --git a/parser/testdata/01902_dictionary_array_type/ast.json b/parser/testdata/01902_dictionary_array_type/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01902_dictionary_array_type/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01902_dictionary_array_type/metadata.json b/parser/testdata/01902_dictionary_array_type/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01902_dictionary_array_type/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01902_dictionary_array_type/query.sql b/parser/testdata/01902_dictionary_array_type/query.sql new file mode 100644 index 000000000..bff107d4d --- /dev/null +++ b/parser/testdata/01902_dictionary_array_type/query.sql @@ -0,0 +1,167 @@ +-- Tags: no-parallel + +DROP TABLE IF EXISTS dictionary_array_source_table; +CREATE TABLE dictionary_array_source_table +( + id UInt64, + array_value Array(Int64) +) ENGINE=TinyLog; + +INSERT INTO dictionary_array_source_table VALUES (0, [0, 1, 2]); + +DROP DICTIONARY IF EXISTS flat_dictionary; +CREATE DICTIONARY flat_dictionary +( + id UInt64, + array_value Array(Int64) DEFAULT [1,2,3] +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'dictionary_array_source_table')) +LIFETIME(MIN 1 MAX 1000) +LAYOUT(FLAT()); + +SELECT 'Flat dictionary'; +SELECT dictGet('flat_dictionary', 'array_value', toUInt64(0)); +SELECT dictGet('flat_dictionary', 'array_value', toUInt64(1)); +SELECT dictGetOrDefault('flat_dictionary', 'array_value', toUInt64(1), [2,3,4]); +DROP DICTIONARY flat_dictionary; + +DROP DICTIONARY IF EXISTS hashed_dictionary; +CREATE DICTIONARY hashed_dictionary +( + id UInt64, + array_value Array(Int64) DEFAULT [1,2,3] +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'dictionary_array_source_table')) +LIFETIME(MIN 1 MAX 1000) +LAYOUT(HASHED()); + +SELECT 'Hashed dictionary'; +SELECT dictGet('hashed_dictionary', 'array_value', toUInt64(0)); +SELECT dictGet('hashed_dictionary', 'array_value', toUInt64(1)); +SELECT dictGetOrDefault('hashed_dictionary', 'array_value', toUInt64(1), [2,3,4]); +DROP DICTIONARY hashed_dictionary; + +DROP DICTIONARY IF EXISTS cache_dictionary; +CREATE DICTIONARY cache_dictionary +( + id UInt64, + array_value Array(Int64) DEFAULT [1,2,3] +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'dictionary_array_source_table')) +LIFETIME(MIN 1 MAX 1000) +LAYOUT(CACHE(SIZE_IN_CELLS 10)); + +SELECT 'Cache dictionary'; +SELECT dictGet('cache_dictionary', 'array_value', toUInt64(0)); +SELECT dictGet('cache_dictionary', 'array_value', toUInt64(1)); +SELECT dictGetOrDefault('cache_dictionary', 'array_value', toUInt64(1), [2,3,4]); +DROP DICTIONARY cache_dictionary; + +DROP DICTIONARY IF EXISTS direct_dictionary; +CREATE DICTIONARY direct_dictionary +( + id UInt64, + array_value Array(Int64) DEFAULT [1,2,3] +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'dictionary_array_source_table')) +LAYOUT(DIRECT()); + +SELECT 'Direct dictionary'; +SELECT dictGet('direct_dictionary', 'array_value', toUInt64(0)); +SELECT dictGet('direct_dictionary', 'array_value', toUInt64(1)); +SELECT dictGetOrDefault('direct_dictionary', 'array_value', toUInt64(1), [2,3,4]); +DROP DICTIONARY direct_dictionary; + +DROP TABLE IF EXISTS ip_trie_dictionary_array_source_table; +CREATE TABLE ip_trie_dictionary_array_source_table +( + prefix String, + array_value Array(Int64) +) ENGINE = TinyLog; + +DROP TABLE dictionary_array_source_table; + +DROP DICTIONARY IF EXISTS ip_trie_dictionary; +CREATE DICTIONARY ip_trie_dictionary +( + prefix String, + array_value Array(Int64) DEFAULT [1,2,3] +) +PRIMARY KEY prefix +SOURCE(CLICKHOUSE(HOST 'localhost' port tcpPort() TABLE 'ip_trie_dictionary_array_source_table')) +LIFETIME(MIN 10 MAX 1000) +LAYOUT(IP_TRIE()); + +INSERT INTO ip_trie_dictionary_array_source_table VALUES ('127.0.0.0', [0, 1, 2]); + +SELECT 'IPTrie dictionary'; +SELECT dictGet('ip_trie_dictionary', 'array_value', tuple(IPv4StringToNum('127.0.0.0'))); +SELECT dictGet('ip_trie_dictionary', 'array_value', tuple(IPv4StringToNum('128.0.0.0'))); +SELECT dictGetOrDefault('ip_trie_dictionary', 'array_value', tuple(IPv4StringToNum('128.0.0.0')), [2,3,4]); + +DROP DICTIONARY ip_trie_dictionary; +DROP TABLE ip_trie_dictionary_array_source_table; + +DROP TABLE IF EXISTS polygon_dictionary_array_source_table; +CREATE TABLE polygon_dictionary_array_source_table +( + key Array(Array(Array(Tuple(Float64, Float64)))), + array_value Array(Int64) +) ENGINE = TinyLog; + +INSERT INTO polygon_dictionary_array_source_table VALUES ([[[(0, 0), (0, 1), (1, 1), (1, 0)]]], [0, 1, 2]); + +DROP DICTIONARY IF EXISTS polygon_dictionary; +CREATE DICTIONARY polygon_dictionary +( + key Array(Array(Array(Tuple(Float64, Float64)))), + array_value Array(Int64) DEFAULT [1,2,3] +) +PRIMARY KEY key +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'polygon_dictionary_array_source_table')) +LIFETIME(MIN 0 MAX 1000) +LAYOUT(POLYGON()); + +SELECT 'Polygon dictionary'; +SELECT dictGet('polygon_dictionary', 'array_value', tuple(0.5, 0.5)); +SELECT dictGet('polygon_dictionary', 'array_value', tuple(1.5, 1.5)); +SELECT dictGetOrDefault('polygon_dictionary', 'array_value', tuple(1.5, 1.5), [2, 3, 4]); + +DROP DICTIONARY polygon_dictionary; +DROP TABLE polygon_dictionary_array_source_table; + +DROP TABLE IF EXISTS range_dictionary_array_source_table; +CREATE TABLE range_dictionary_array_source_table +( + key UInt64, + start_date Date, + end_date Date, + array_value Array(Int64) +) +ENGINE = TinyLog; + +INSERT INTO range_dictionary_array_source_table VALUES(1, toDate('2019-05-05'), toDate('2019-05-20'), [0, 1, 2]); +CREATE DICTIONARY range_dictionary +( + key UInt64, + start_date Date, + end_date Date, + array_value Array(Int64) DEFAULT [1,2,3] +) +PRIMARY KEY key +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'range_dictionary_array_source_table')) +LIFETIME(MIN 1 MAX 1000) +LAYOUT(RANGE_HASHED()) +RANGE(MIN start_date MAX end_date); + +SELECT 'Range dictionary'; +SELECT dictGet('range_dictionary', 'array_value', toUInt64(1), toDate('2019-05-15')); +SELECT dictGet('range_dictionary', 'array_value', toUInt64(1), toDate('2019-05-21')); +SELECT dictGetOrDefault('range_dictionary', 'array_value', toUInt64(1), toDate('2019-05-21'), [2, 3, 4]); + +DROP DICTIONARY range_dictionary; +DROP TABLE range_dictionary_array_source_table; diff --git a/parser/testdata/01902_self_aliases_in_columns/ast.json b/parser/testdata/01902_self_aliases_in_columns/ast.json new file mode 100644 index 000000000..377294b76 --- /dev/null +++ b/parser/testdata/01902_self_aliases_in_columns/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery a (children 1)" + }, + { + "explain": " Identifier a" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001343981, + "rows_read": 2, + "bytes_read": 55 + } +} diff --git a/parser/testdata/01902_self_aliases_in_columns/metadata.json b/parser/testdata/01902_self_aliases_in_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01902_self_aliases_in_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01902_self_aliases_in_columns/query.sql b/parser/testdata/01902_self_aliases_in_columns/query.sql new file mode 100644 index 000000000..1b2745af1 --- /dev/null +++ b/parser/testdata/01902_self_aliases_in_columns/query.sql @@ -0,0 +1,14 @@ +CREATE TABLE a +( + `number` UInt64, + `x` MATERIALIZED x +) +ENGINE = MergeTree +ORDER BY number; --{ serverError CYCLIC_ALIASES} + +CREATE TABLE foo +( + i Int32, + j ALIAS j + 1 +) +ENGINE = MergeTree() ORDER BY i; --{ serverError CYCLIC_ALIASES} diff --git a/parser/testdata/01902_table_function_merge_db_params/ast.json b/parser/testdata/01902_table_function_merge_db_params/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01902_table_function_merge_db_params/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01902_table_function_merge_db_params/metadata.json b/parser/testdata/01902_table_function_merge_db_params/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01902_table_function_merge_db_params/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01902_table_function_merge_db_params/query.sql b/parser/testdata/01902_table_function_merge_db_params/query.sql new file mode 100644 index 000000000..caa783211 --- /dev/null +++ b/parser/testdata/01902_table_function_merge_db_params/query.sql @@ -0,0 +1,15 @@ +-- Tags: no-parallel + +DROP DATABASE IF EXISTS 01902_db_params; +CREATE DATABASE 01902_db_params; +CREATE TABLE 01902_db_params.t(n Int8) ENGINE=MergeTree ORDER BY n; +INSERT INTO 01902_db_params.t SELECT * FROM numbers(3); +SELECT _database, _table, n FROM merge(REGEXP('^01902_db_params'), '^t') ORDER BY _database, _table, n; + +SELECT _database, _table, n FROM merge() ORDER BY _database, _table, n; -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +SELECT _database, _table, n FROM merge('^t') ORDER BY _database, _table, n; -- {serverError CANNOT_EXTRACT_TABLE_STRUCTURE} + +USE 01902_db_params; +SELECT _database, _table, n FROM merge('^t') ORDER BY _database, _table, n; + +DROP DATABASE 01902_db_params; diff --git a/parser/testdata/01902_table_function_merge_db_repr/ast.json b/parser/testdata/01902_table_function_merge_db_repr/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01902_table_function_merge_db_repr/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01902_table_function_merge_db_repr/metadata.json b/parser/testdata/01902_table_function_merge_db_repr/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01902_table_function_merge_db_repr/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01902_table_function_merge_db_repr/query.sql b/parser/testdata/01902_table_function_merge_db_repr/query.sql new file mode 100644 index 000000000..ddd5a41c3 --- /dev/null +++ b/parser/testdata/01902_table_function_merge_db_repr/query.sql @@ -0,0 +1,75 @@ +-- Tags: no-parallel + +SET enable_analyzer = 1; + +DROP DATABASE IF EXISTS 01902_db_repr; +DROP DATABASE IF EXISTS 01902_db_repr1; +DROP DATABASE IF EXISTS 01902_db_repr2; +DROP DATABASE IF EXISTS 01902_db_repr3; + +CREATE DATABASE 01902_db_repr; +CREATE DATABASE 01902_db_repr1; +CREATE DATABASE 01902_db_repr2; +CREATE DATABASE 01902_db_repr3; + +CREATE TABLE 01902_db_repr.t (n Int8) ENGINE=MergeTree ORDER BY n; +CREATE TABLE 01902_db_repr1.t1 (n Int8) ENGINE=MergeTree ORDER BY n; +CREATE TABLE 01902_db_repr2.t2 (n Int8) ENGINE=MergeTree ORDER BY n; +CREATE TABLE 01902_db_repr3.t3 (n Int8) ENGINE=MergeTree ORDER BY n; + +INSERT INTO 01902_db_repr.t SELECT * FROM numbers(10); +INSERT INTO 01902_db_repr1.t1 SELECT * FROM numbers(10); +INSERT INTO 01902_db_repr2.t2 SELECT * FROM numbers(10); +INSERT INTO 01902_db_repr3.t3 SELECT * FROM numbers(10); + +SELECT 'CREATE TABLE t_merge as 01902_db_repr.t ENGINE=Merge(REGEXP(^01902_db_repr), ^t)'; +CREATE TABLE 01902_db_repr.t_merge as 01902_db_repr.t ENGINE=Merge(REGEXP('^01902_db_repr'), '^t'); + +SELECT 'SELECT _database, _table, n FROM 01902_db_repr.t_merge ORDER BY _database, _table, n'; +SELECT _database, _table, n FROM 01902_db_repr.t_merge ORDER BY _database, _table, n; + +SELECT 'SHOW CREATE TABLE 01902_db_repr.t_merge'; +SHOW CREATE TABLE 01902_db_repr.t_merge; + +SELECT 'SELECT _database, _table, n FROM merge(REGEXP(^01902_db_repr), ^t) ORDER BY _database, _table, n'; +SELECT _database, _table, n FROM merge(REGEXP('^01902_db_repr'), '^t') ORDER BY _database, _table, n; + +SELECT 'SELECT _database, _table, n FROM 01902_db_repr.t_merge WHERE _database = 01902_db_repr1 ORDER BY _database, _table, n'; +SELECT _database, _table, n FROM 01902_db_repr.t_merge WHERE _database = '01902_db_repr1' ORDER BY _database, _table, n; + +SELECT 'SELECT _database, _table, n FROM 01902_db_repr.t_merge WHERE _table = t1 ORDER BY _database, _table, n'; +SELECT _database, _table, n FROM 01902_db_repr.t_merge WHERE _table = 't1' ORDER BY _database, _table, n; + +-- not regexp +SELECT 'CREATE TABLE t_merge1 as 01902_db_repr.t ENGINE=Merge(01902_db_repr, ^t$)'; +CREATE TABLE 01902_db_repr.t_merge1 as 01902_db_repr.t ENGINE=Merge('01902_db_repr', '^t$'); + +SELECT 'SELECT _database, _table, n FROM 01902_db_repr.t_merge1 ORDER BY _database, _table, n'; +SELECT _database, _table, n FROM 01902_db_repr.t_merge1 ORDER BY _database, _table, n; + +SELECT 'SELECT _database, _table, n FROM merge(01902_db_repr, ^t$) ORDER BY _database, _table, n'; +SELECT _database, _table, n FROM merge('01902_db_repr', '^t$') ORDER BY _database, _table, n; + +USE 01902_db_repr1; + +SELECT 'CREATE TABLE t_merge_1 as 01902_db_repr.t ENGINE=Merge(currentDatabase(), ^t)'; +CREATE TABLE 01902_db_repr.t_merge_1 as 01902_db_repr.t ENGINE=Merge(currentDatabase(), '^t'); + +SELECT 'SELECT _database, _table, n FROM 01902_db_repr.t_merge_1 ORDER BY _database, _table, n'; +SELECT _database, _table, n FROM 01902_db_repr.t_merge_1 ORDER BY _database, _table, n; + +SELECT 'SHOW CREATE TABLE 01902_db_repr.t_merge_1'; +SHOW CREATE TABLE 01902_db_repr.t_merge_1; + +SELECT 'SELECT _database, _table, n FROM merge(currentDatabase(), ^t) ORDER BY _database, _table, n'; +SELECT _database, _table, n FROM merge(currentDatabase(), '^t') ORDER BY _database, _table, n; + +--fuzzed LOGICAL_ERROR +CREATE TABLE 01902_db_repr.t4 (n Date) ENGINE=MergeTree ORDER BY n; +INSERT INTO 01902_db_repr.t4 SELECT * FROM numbers(10); +SELECT NULL FROM 01902_db_repr.t_merge WHERE n ORDER BY _table DESC; + +DROP DATABASE 01902_db_repr; +DROP DATABASE 01902_db_repr1; +DROP DATABASE 01902_db_repr2; +DROP DATABASE 01902_db_repr3; diff --git a/parser/testdata/01904_dictionary_default_nullable_type/ast.json b/parser/testdata/01904_dictionary_default_nullable_type/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01904_dictionary_default_nullable_type/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01904_dictionary_default_nullable_type/metadata.json b/parser/testdata/01904_dictionary_default_nullable_type/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01904_dictionary_default_nullable_type/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01904_dictionary_default_nullable_type/query.sql b/parser/testdata/01904_dictionary_default_nullable_type/query.sql new file mode 100644 index 000000000..d28f9e5c4 --- /dev/null +++ b/parser/testdata/01904_dictionary_default_nullable_type/query.sql @@ -0,0 +1,204 @@ +-- Tags: no-parallel + +DROP TABLE IF EXISTS dictionary_nullable_source_table; +CREATE TABLE dictionary_nullable_source_table +( + id UInt64, + value Nullable(Int64) +) ENGINE=TinyLog; + +DROP TABLE IF EXISTS dictionary_nullable_default_source_table; +CREATE TABLE dictionary_nullable_default_source_table +( + id UInt64, + value Nullable(UInt64) +) ENGINE=TinyLog; + +INSERT INTO dictionary_nullable_source_table VALUES (0, 0), (1, NULL); +INSERT INTO dictionary_nullable_default_source_table VALUES (2, 2), (3, NULL); + +DROP DICTIONARY IF EXISTS flat_dictionary; +CREATE DICTIONARY flat_dictionary +( + id UInt64, + value Nullable(Int64) DEFAULT NULL +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'dictionary_nullable_source_table')) +LIFETIME(MIN 1 MAX 1000) +LAYOUT(FLAT()); + +SELECT 'Flat dictionary'; +SELECT dictGet('flat_dictionary', 'value', toUInt64(0)); +SELECT dictGet('flat_dictionary', 'value', toUInt64(1)); +SELECT dictGet('flat_dictionary', 'value', toUInt64(2)); +SELECT dictGetOrDefault('flat_dictionary', 'value', toUInt64(2), 2); +SELECT dictGetOrDefault('flat_dictionary', 'value', toUInt64(2), NULL); +SELECT dictGetOrDefault('flat_dictionary', 'value', id, value) FROM dictionary_nullable_default_source_table; +DROP DICTIONARY flat_dictionary; + +DROP DICTIONARY IF EXISTS hashed_dictionary; +CREATE DICTIONARY hashed_dictionary +( + id UInt64, + value Nullable(Int64) DEFAULT NULL +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'dictionary_nullable_source_table')) +LIFETIME(MIN 1 MAX 1000) +LAYOUT(HASHED()); + +SELECT 'Hashed dictionary'; +SELECT dictGet('hashed_dictionary', 'value', toUInt64(0)); +SELECT dictGet('hashed_dictionary', 'value', toUInt64(1)); +SELECT dictGet('hashed_dictionary', 'value', toUInt64(2)); +SELECT dictGetOrDefault('hashed_dictionary', 'value', toUInt64(2), 2); +SELECT dictGetOrDefault('hashed_dictionary', 'value', toUInt64(2), NULL); +SELECT dictGetOrDefault('hashed_dictionary', 'value', id, value) FROM dictionary_nullable_default_source_table; +DROP DICTIONARY hashed_dictionary; + +DROP DICTIONARY IF EXISTS cache_dictionary; +CREATE DICTIONARY cache_dictionary +( + id UInt64, + value Nullable(Int64) DEFAULT NULL +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'dictionary_nullable_source_table')) +LIFETIME(MIN 1 MAX 1000) +LAYOUT(CACHE(SIZE_IN_CELLS 10)); + +SELECT 'Cache dictionary'; +SELECT dictGet('cache_dictionary', 'value', toUInt64(0)); +SELECT dictGet('cache_dictionary', 'value', toUInt64(1)); +SELECT dictGet('cache_dictionary', 'value', toUInt64(2)); +SELECT dictGetOrDefault('cache_dictionary', 'value', toUInt64(2), 2); +SELECT dictGetOrDefault('cache_dictionary', 'value', toUInt64(2), NULL); +SELECT dictGetOrDefault('cache_dictionary', 'value', id, value) FROM dictionary_nullable_default_source_table; +DROP DICTIONARY cache_dictionary; + +DROP DICTIONARY IF EXISTS direct_dictionary; +CREATE DICTIONARY direct_dictionary +( + id UInt64, + value Nullable(Int64) DEFAULT NULL +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'dictionary_nullable_source_table')) +LAYOUT(DIRECT()); + +SELECT 'Direct dictionary'; +SELECT dictGet('direct_dictionary', 'value', toUInt64(0)); +SELECT dictGet('direct_dictionary', 'value', toUInt64(1)); +SELECT dictGet('direct_dictionary', 'value', toUInt64(2)); +SELECT dictGetOrDefault('direct_dictionary', 'value', toUInt64(2), 2); +SELECT dictGetOrDefault('direct_dictionary', 'value', toUInt64(2), NULL); +SELECT dictGetOrDefault('direct_dictionary', 'value', id, value) FROM dictionary_nullable_default_source_table; +DROP DICTIONARY direct_dictionary; + +DROP DICTIONARY IF EXISTS ip_trie_dictionary; +CREATE DICTIONARY ip_trie_dictionary +( + prefix String, + value Nullable(Int64) DEFAULT NULL +) +PRIMARY KEY prefix +SOURCE(CLICKHOUSE(HOST 'localhost' port tcpPort() TABLE 'dictionary_nullable_source_table')) +LIFETIME(MIN 10 MAX 1000) +LAYOUT(IP_TRIE()); + +-- Nullable type is not supported by IPTrie dictionary +SELECT 'IPTrie dictionary'; +SELECT dictGet('ip_trie_dictionary', 'value', tuple(IPv4StringToNum('127.0.0.0'))); --{serverError UNSUPPORTED_METHOD} + +DROP DICTIONARY ip_trie_dictionary; + +DROP TABLE dictionary_nullable_source_table; +DROP TABLE dictionary_nullable_default_source_table; + +DROP TABLE IF EXISTS polygon_dictionary_nullable_source_table; +CREATE TABLE polygon_dictionary_nullable_source_table +( + key Array(Array(Array(Tuple(Float64, Float64)))), + value Nullable(Int64) +) ENGINE = TinyLog; + +DROP TABLE IF EXISTS polygon_dictionary_nullable_default_source_table; +CREATE TABLE polygon_dictionary_nullable_default_source_table +( + key Tuple(Float64, Float64), + value Nullable(UInt64) +) ENGINE=TinyLog; + +INSERT INTO polygon_dictionary_nullable_source_table VALUES ([[[(0, 0), (0, 1), (1, 1), (1, 0)]]], 0), ([[[(0, 0), (0, 1.5), (1.5, 1.5), (1.5, 0)]]], NULL); +INSERT INTO polygon_dictionary_nullable_default_source_table VALUES ((2.0, 2.0), 2), ((4, 4), NULL); + + +DROP DICTIONARY IF EXISTS polygon_dictionary; +CREATE DICTIONARY polygon_dictionary +( + key Array(Array(Array(Tuple(Float64, Float64)))), + value Nullable(UInt64) DEFAULT NULL +) +PRIMARY KEY key +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'polygon_dictionary_nullable_source_table')) +LIFETIME(MIN 0 MAX 1000) +LAYOUT(POLYGON()); + +SELECT 'Polygon dictionary'; +SELECT dictGet('polygon_dictionary', 'value', tuple(0.5, 0.5)); +SELECT dictGet('polygon_dictionary', 'value', tuple(1.5, 1.5)); +SELECT dictGet('polygon_dictionary', 'value', tuple(2.0, 2.0)); +SELECT dictGetOrDefault('polygon_dictionary', 'value', tuple(2.0, 2.0), 2); +SELECT dictGetOrDefault('polygon_dictionary', 'value', tuple(2.0, 2.0), NULL); +SELECT dictGetOrDefault('polygon_dictionary', 'value', key, value) FROM polygon_dictionary_nullable_default_source_table; + +DROP DICTIONARY polygon_dictionary; +DROP TABLE polygon_dictionary_nullable_source_table; +DROP TABLE polygon_dictionary_nullable_default_source_table; + +DROP TABLE IF EXISTS range_dictionary_nullable_source_table; +CREATE TABLE range_dictionary_nullable_source_table +( + key UInt64, + start_date Date, + end_date Date, + value Nullable(UInt64) +) +ENGINE = TinyLog; + +DROP TABLE IF EXISTS range_dictionary_nullable_default_source_table; +CREATE TABLE range_dictionary_nullable_default_source_table +( + key UInt64, + value Nullable(UInt64) +) ENGINE=TinyLog; + +INSERT INTO range_dictionary_nullable_source_table VALUES (0, toDate('2019-05-05'), toDate('2019-05-20'), 0), (1, toDate('2019-05-05'), toDate('2019-05-20'), NULL); +INSERT INTO range_dictionary_nullable_default_source_table VALUES (2, 2), (3, NULL); + +DROP DICTIONARY IF EXISTS range_dictionary; +CREATE DICTIONARY range_dictionary +( + key UInt64, + start_date Date, + end_date Date, + value Nullable(UInt64) DEFAULT NULL +) +PRIMARY KEY key +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'range_dictionary_nullable_source_table')) +LIFETIME(MIN 1 MAX 1000) +LAYOUT(RANGE_HASHED()) +RANGE(MIN start_date MAX end_date); + +SELECT 'Range dictionary'; +SELECT dictGet('range_dictionary', 'value', toUInt64(0), toDate('2019-05-15')); +SELECT dictGet('range_dictionary', 'value', toUInt64(1), toDate('2019-05-15')); +SELECT dictGet('range_dictionary', 'value', toUInt64(2), toDate('2019-05-15')); +SELECT dictGetOrDefault('range_dictionary', 'value', toUInt64(2), toDate('2019-05-15'), 2); +SELECT dictGetOrDefault('range_dictionary', 'value', toUInt64(2), toDate('2019-05-15'), NULL); +SELECT dictGetOrDefault('range_dictionary', 'value', key, toDate('2019-05-15'), value) FROM range_dictionary_nullable_default_source_table; + +DROP DICTIONARY range_dictionary; +DROP TABLE range_dictionary_nullable_source_table; +DROP TABLE range_dictionary_nullable_default_source_table; diff --git a/parser/testdata/01905_to_json_string/ast.json b/parser/testdata/01905_to_json_string/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01905_to_json_string/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01905_to_json_string/metadata.json b/parser/testdata/01905_to_json_string/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01905_to_json_string/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01905_to_json_string/query.sql b/parser/testdata/01905_to_json_string/query.sql new file mode 100644 index 000000000..42437de1b --- /dev/null +++ b/parser/testdata/01905_to_json_string/query.sql @@ -0,0 +1,18 @@ +-- Tags: no-fasttest + +create temporary table t engine Memory as select * from generateRandom( +$$ + a Array(Int8), + b UInt32, + c Nullable(String), + d Decimal32(4), + e Nullable(Enum16('h' = 1, 'w' = 5 , 'o' = -200)), + f Float64, + g Tuple(Date, DateTime('Asia/Istanbul'), DateTime64(3, 'Asia/Istanbul'), UUID), + h FixedString(2), + i Array(Nullable(UUID)) +$$, 10, 5, 3) limit 2; + +select * apply toJSONString from t; + +select toJSONString(map('1234', '5678')); diff --git a/parser/testdata/01906_bigint_accurate_cast_ubsan/ast.json b/parser/testdata/01906_bigint_accurate_cast_ubsan/ast.json new file mode 100644 index 000000000..257872890 --- /dev/null +++ b/parser/testdata/01906_bigint_accurate_cast_ubsan/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function accurateCast (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Float64_1e35" + }, + { + "explain": " Literal 'UInt32'" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001555326, + "rows_read": 8, + "bytes_read": 299 + } +} diff --git a/parser/testdata/01906_bigint_accurate_cast_ubsan/metadata.json b/parser/testdata/01906_bigint_accurate_cast_ubsan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01906_bigint_accurate_cast_ubsan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01906_bigint_accurate_cast_ubsan/query.sql b/parser/testdata/01906_bigint_accurate_cast_ubsan/query.sql new file mode 100644 index 000000000..c038b3b56 --- /dev/null +++ b/parser/testdata/01906_bigint_accurate_cast_ubsan/query.sql @@ -0,0 +1,15 @@ +SELECT accurateCast(1e35, 'UInt32'); -- { serverError CANNOT_CONVERT_TYPE } +SELECT accurateCast(1e35, 'UInt64'); -- { serverError CANNOT_CONVERT_TYPE } +SELECT accurateCast(1e35, 'UInt128'); -- { serverError CANNOT_CONVERT_TYPE } +SELECT accurateCast(1e35, 'UInt256'); -- { serverError CANNOT_CONVERT_TYPE } + +SELECT accurateCast(1e19, 'UInt64'); +SELECT accurateCast(1e19, 'UInt128'); +SELECT accurateCast(1e19, 'UInt256'); +SELECT accurateCast(1e20, 'UInt64'); -- { serverError CANNOT_CONVERT_TYPE } +SELECT accurateCast(1e20, 'UInt128'); -- { serverError CANNOT_CONVERT_TYPE } +SELECT accurateCast(1e20, 'UInt256'); -- { serverError CANNOT_CONVERT_TYPE } + +SELECT accurateCast(1e19, 'Int64'); -- { serverError CANNOT_CONVERT_TYPE } +SELECT accurateCast(1e19, 'Int128'); +SELECT accurateCast(1e19, 'Int256'); diff --git a/parser/testdata/01906_h3_to_geo/ast.json b/parser/testdata/01906_h3_to_geo/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01906_h3_to_geo/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01906_h3_to_geo/metadata.json b/parser/testdata/01906_h3_to_geo/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01906_h3_to_geo/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01906_h3_to_geo/query.sql b/parser/testdata/01906_h3_to_geo/query.sql new file mode 100644 index 000000000..1da89098e --- /dev/null +++ b/parser/testdata/01906_h3_to_geo/query.sql @@ -0,0 +1,63 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS h3_indexes; + +CREATE TABLE h3_indexes (h3_index UInt64) ENGINE = Memory; + +-- Random geo coordinates were generated using the H3 tool: https://github.com/ClickHouse-Extras/h3/blob/master/src/apps/testapps/mkRandGeo.c at various resolutions from 0 to 15. +-- Corresponding H3 index values were in turn generated with those geo coordinates using `geoToH3(lat, lon, res)` ClickHouse function for the following test. + +INSERT INTO h3_indexes VALUES (579205133326352383); +INSERT INTO h3_indexes VALUES (581263419093549055); +INSERT INTO h3_indexes VALUES (589753847883235327); +INSERT INTO h3_indexes VALUES (594082350283882495); +INSERT INTO h3_indexes VALUES (598372386957426687); +INSERT INTO h3_indexes VALUES (599542359671177215); +INSERT INTO h3_indexes VALUES (604296355086598143); +INSERT INTO h3_indexes VALUES (608785214872748031); +INSERT INTO h3_indexes VALUES (615732192485572607); +INSERT INTO h3_indexes VALUES (617056794467368959); +INSERT INTO h3_indexes VALUES (624586477873168383); +INSERT INTO h3_indexes VALUES (627882919484481535); +INSERT INTO h3_indexes VALUES (634600058503392255); +INSERT INTO h3_indexes VALUES (635544851677385791); +INSERT INTO h3_indexes VALUES (639763125756281263); +INSERT INTO h3_indexes VALUES (644178757620501158); + + +WITH h3ToGeo(h3_index) AS p SELECT round(p.1, 3), round(p.2, 3) FROM h3_indexes ORDER BY h3_index; + +DROP TABLE h3_indexes; + +DROP TABLE IF EXISTS h3_geo; + +-- compare if the results of h3ToGeo and geoToH3 are the same + +CREATE TABLE h3_geo(lat Float64, lon Float64, res UInt8) ENGINE = Memory; + +INSERT INTO h3_geo VALUES (-173.6412167681162, -14.130272474941535, 0); +INSERT INTO h3_geo VALUES (59.48137613600854, 58.020407687755686, 1); +INSERT INTO h3_geo VALUES (172.68095885060296, -83.6576608516349, 2); +INSERT INTO h3_geo VALUES (-94.46556851304558, -69.1999982492279, 3); +INSERT INTO h3_geo VALUES (-8.188263637093279, -55.856179102736284, 4); +INSERT INTO h3_geo VALUES (77.25594891852249, 47.39278564360122, 5); +INSERT INTO h3_geo VALUES (135.11348004704536, 36.60778126579667, 6); +INSERT INTO h3_geo VALUES (39.28534828967223, 49.07710003066973, 7); +INSERT INTO h3_geo VALUES (124.71163478198051, -27.481172161567258, 8); +INSERT INTO h3_geo VALUES (-147.4887686066785, 76.73237945824442, 9); +INSERT INTO h3_geo VALUES (86.63291906118863, -25.52526285188784, 10); +INSERT INTO h3_geo VALUES (23.27751790712118, 13.126101362212724, 11); +INSERT INTO h3_geo VALUES (-70.40163237204142, -63.12562536833242, 12); +INSERT INTO h3_geo VALUES (15.642428355535966, 40.285813505163574, 13); +INSERT INTO h3_geo VALUES (-76.53411447979884, 54.5560449693637, 14); +INSERT INTO h3_geo VALUES (8.19906334981474, 67.69370966550179, 15); + +SELECT result FROM ( + SELECT + (lon, lat) AS input_geo, + h3ToGeo(geoToH3(lon, lat, res)) AS output_geo, + if(abs(input_geo.1 - output_geo.1) < 0.001 AND abs(input_geo.2 - output_geo.2) < 0.001, 'ok', 'fail') AS result + FROM h3_geo +); + +DROP TABLE h3_geo; diff --git a/parser/testdata/01906_lc_in_bug/ast.json b/parser/testdata/01906_lc_in_bug/ast.json new file mode 100644 index 000000000..0f04ece45 --- /dev/null +++ b/parser/testdata/01906_lc_in_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tab (children 1)" + }, + { + "explain": " Identifier tab" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001785166, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/01906_lc_in_bug/metadata.json b/parser/testdata/01906_lc_in_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01906_lc_in_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01906_lc_in_bug/query.sql b/parser/testdata/01906_lc_in_bug/query.sql new file mode 100644 index 000000000..035e1fa15 --- /dev/null +++ b/parser/testdata/01906_lc_in_bug/query.sql @@ -0,0 +1,13 @@ +drop table if exists tab; +create table tab (x LowCardinality(String)) engine = MergeTree order by tuple(); + +insert into tab values ('a'), ('bb'), ('a'), ('cc'); + +select count() as c, x in ('a', 'bb') as g from tab group by g order by c; + +drop table if exists tab; + +-- https://github.com/ClickHouse/ClickHouse/issues/44503 +CREATE TABLE test(key Int32) ENGINE = MergeTree ORDER BY (key) SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +insert into test select intDiv(number,100) from numbers(10000000); +SELECT COUNT() FROM test WHERE key <= 100000 AND (NOT (toLowCardinality('') IN (SELECT ''))); diff --git a/parser/testdata/01906_partition_by_multiply_by_zero/ast.json b/parser/testdata/01906_partition_by_multiply_by_zero/ast.json new file mode 100644 index 000000000..39d494140 --- /dev/null +++ b/parser/testdata/01906_partition_by_multiply_by_zero/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_01906 (children 1)" + }, + { + "explain": " Identifier t_01906" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001606471, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/01906_partition_by_multiply_by_zero/metadata.json b/parser/testdata/01906_partition_by_multiply_by_zero/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01906_partition_by_multiply_by_zero/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01906_partition_by_multiply_by_zero/query.sql b/parser/testdata/01906_partition_by_multiply_by_zero/query.sql new file mode 100644 index 000000000..be890339c --- /dev/null +++ b/parser/testdata/01906_partition_by_multiply_by_zero/query.sql @@ -0,0 +1,23 @@ +DROP TABLE IF EXISTS t_01906; + +CREATE TABLE t_01906 +( + `id` UInt64, + `update_ts` DateTime, + `value` UInt32 +) +ENGINE = ReplacingMergeTree(update_ts) +PARTITION BY 0 * id +ORDER BY (update_ts, id); + +INSERT INTO t_01906 SELECT + number, + toDateTime('2020-01-01 00:00:00'), + 1 +FROM numbers(100); + +SELECT count() FROM t_01906 WHERE id >= 42; + +SELECT count() FROM t_01906 FINAL WHERE id >= 42 and update_ts <= '2021-01-01 00:00:00'; + +DROP TABLE t_01906; diff --git a/parser/testdata/01907_multiple_aliases/ast.json b/parser/testdata/01907_multiple_aliases/ast.json new file mode 100644 index 000000000..a497f74f6 --- /dev/null +++ b/parser/testdata/01907_multiple_aliases/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001373674, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/01907_multiple_aliases/metadata.json b/parser/testdata/01907_multiple_aliases/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01907_multiple_aliases/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01907_multiple_aliases/query.sql b/parser/testdata/01907_multiple_aliases/query.sql new file mode 100644 index 000000000..5e8efba7a --- /dev/null +++ b/parser/testdata/01907_multiple_aliases/query.sql @@ -0,0 +1,12 @@ +DROP TABLE IF EXISTS t; +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE t (d Date, z UInt32) ENGINE = MergeTree(d, (z), 1); + +INSERT INTO t VALUES ('2017-01-01', 1); + +WITH (d < '2018-01-01') AND (d < '2018-01-02') AS x +SELECT 1 +FROM t +WHERE x; + +DROP TABLE t; diff --git a/parser/testdata/01908_with_unknown_column/ast.json b/parser/testdata/01908_with_unknown_column/ast.json new file mode 100644 index 000000000..5b980c49e --- /dev/null +++ b/parser/testdata/01908_with_unknown_column/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier a" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001329282, + "rows_read": 5, + "bytes_read": 173 + } +} diff --git a/parser/testdata/01908_with_unknown_column/metadata.json b/parser/testdata/01908_with_unknown_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01908_with_unknown_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01908_with_unknown_column/query.sql b/parser/testdata/01908_with_unknown_column/query.sql new file mode 100644 index 000000000..c3bce12d4 --- /dev/null +++ b/parser/testdata/01908_with_unknown_column/query.sql @@ -0,0 +1,30 @@ +select a +from ( + with a+1 as aa, + sumIf(aa, b > 0) as aaif + select a, aaif + FROM (select 1 as a, 2 as b) + GROUP BY a +) as V; + +select a +from ( + with a+1 as aa + -- , sumIf(c, b > 0) as aaif + , sum(if(b>0,c,0)) as aaif2 + select a, aaif2 + FROM + (select 1 as a, 2 as b, 3 as c) + GROUP BY a +) as V; + +select a +from ( + with a+1 as aa + -- , sumIf(c, b > 0) as aaif + -- , sum(if(b>0,c,0)) as aaif2 + select a, sumIf(c, b > 0) as aaif3 + FROM + (select 1 as a, 2 as b, 3 as c) + GROUP BY a +) as V; diff --git a/parser/testdata/01909_mbtolou/ast.json b/parser/testdata/01909_mbtolou/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01909_mbtolou/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01909_mbtolou/metadata.json b/parser/testdata/01909_mbtolou/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01909_mbtolou/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01909_mbtolou/query.sql b/parser/testdata/01909_mbtolou/query.sql new file mode 100644 index 000000000..a1b05e7c4 --- /dev/null +++ b/parser/testdata/01909_mbtolou/query.sql @@ -0,0 +1,51 @@ +-- BIGMDM.DATE_INFO_DICT definition + +DROP TABLE IF EXISTS DATE_INFO_DICT; + +CREATE TABLE DATE_INFO_DICT +( + `TV` Date, + `SHAMSI` String, + `HIJRI` String, + `MILADI` String, + `S_DAY` UInt8, + `H_DAY` UInt8, + `S_MONTH` UInt8, + `H_MONTH` UInt8, + `WEEK_DAY_NAME` String, + `DAY_NUMBER` UInt8, + `HOLIDAY` UInt8, + `WEEK_NAME` String +) +ENGINE = Join(ANY, LEFT, TV); + + +truncate table DATE_INFO_DICT; + +INSERT INTO DATE_INFO_DICT (TV, SHAMSI, HIJRI, MILADI, S_DAY, H_DAY, S_MONTH, H_MONTH, WEEK_DAY_NAME, DAY_NUMBER, HOLIDAY, WEEK_NAME) VALUES ('2017-04-01', '1396-01-12', '1438-07-04', '2017-04-01', 12, 4, 1, 7, 'saturday ', 7, 0, 'NONE') ,('2017-04-02', '1396-01-13', '1438-07-05', '2017-04-02', 13, 5, 1, 7, 'sunday ', 1, 0, 'NONE') ,('2017-04-03', '1396-01-14', '1438-07-06', '2017-04-03', 14, 6, 1, 7, 'monday ', 2, 0, 'NONE') ,('2017-04-04', '1396-01-15', '1438-07-07', '2017-04-04', 15, 7, 1, 7, 'tuesday ', 3, 0, 'NONE') ,('2017-04-05', '1396-01-16', '1438-07-08', '2017-04-05', 16, 8, 1, 7, 'wednesday', 4, 0, 'NONE') ,('2017-04-06', '1396-01-17', '1438-07-09', '2017-04-06', 17, 9, 1, 7, 'thursday ', 5, 0, 'NONE') ,('2017-04-07', '1396-01-18', '1438-07-10', '2017-04-07', 18, 10, 1, 7, 'friday ', 6, 0, 'NONE') ,('2017-04-08', '1396-01-19', '1438-07-11', '2017-04-08', 19, 11, 1, 7, 'saturday ', 7, 0, 'NONE') ,('2017-04-09', '1396-01-20', '1438-07-12', '2017-04-09', 20, 12, 1, 7, 'sunday ', 1, 0, 'NONE') ,('2017-04-10', '1396-01-21', '1438-07-13', '2017-04-10', 21, 13, 1, 7, 'monday ', 2, 0, 'NONE') ,('2017-04-11', '1396-01-22', '1438-07-14', '2017-04-11', 22, 14, 1, 7, 'tuesday ', 3, 0, 'NONE') ,('2017-04-12', '1396-01-23', '1438-07-15', '2017-04-12', 23, 15, 1, 7, 'wednesday', 4, 0, 'NONE') ,('2017-04-13', '1396-01-24', '1438-07-16', '2017-04-13', 24, 16, 1, 7, 'thursday ', 5, 0, 'NONE') ,('2017-04-14', '1396-01-25', '1438-07-17', '2017-04-14', 25, 17, 1, 7, 'friday ', 6, 0, 'NONE') ,('2017-04-15', '1396-01-26', '1438-07-18', '2017-04-15', 26, 18, 1, 7, 'saturday ', 7, 0, 'NONE') ,('2017-04-16', '1396-01-27', '1438-07-19', '2017-04-16', 27, 19, 1, 7, 'sunday ', 1, 0, 'NONE') ,('2017-04-17', '1396-01-28', '1438-07-20', '2017-04-17', 28, 20, 1, 7, 'monday ', 2, 0, 'NONE') ,('2017-04-18', '1396-01-29', '1438-07-21', '2017-04-18', 29, 21, 1, 7, 'tuesday ', 3, 0, 'NONE') ,('2017-04-19', '1396-01-30', '1438-07-22', '2017-04-19', 30, 22, 1, 7, 'wednesday', 4, 0, 'NONE') ,('2017-04-20', '1396-01-31', '1438-07-23', '2017-04-20', 31, 23, 1, 7, 'thursday ', 5, 0, 'NONE') ,('2017-04-21', '1396-02-01', '1438-07-24', '2017-04-21', 1, 24, 2, 7, 'friday ', 6, 0, 'NONE') ,('2017-04-22', '1396-02-02', '1438-07-25', '2017-04-22', 2, 25, 2, 7, 'saturday ', 7, 0, 'NONE') ,('2017-04-23', '1396-02-03', '1438-07-26', '2017-04-23', 3, 26, 2, 7, 'sunday ', 1, 0, 'NONE') ,('2017-04-24', '1396-02-04', '1438-07-27', '2017-04-24', 4, 27, 2, 7, 'monday ', 2, 0, 'NONE') ,('2017-04-25', '1396-02-05', '1438-07-28', '2017-04-25', 5, 28, 2, 7, 'tuesday ', 3, 0, 'NONE') ,('2017-04-26', '1396-02-06', '1438-07-29', '2017-04-26', 6, 29, 2, 7, 'wednesday', 4, 0, 'NONE') ,('2017-04-27', '1396-02-07', '1438-07-30', '2017-04-27', 7, 30, 2, 7, 'thursday ', 5, 0, 'NONE') ,('2017-04-28', '1396-02-08', '1438-08-01', '2017-04-28', 8, 1, 2, 8, 'friday ', 6, 0, 'NONE') ,('2017-04-29', '1396-02-09', '1438-08-02', '2017-04-29', 9, 2, 2, 8, 'saturday ', 7, 0, 'NONE') ,('2017-04-30', '1396-02-10', '1438-08-03', '2017-04-30', 10, 3, 2, 8, 'sunday ', 1, 0, 'NONE') ,('2017-05-01', '1396-02-11', '1438-08-04', '2017-05-01', 11, 4, 2, 8, 'monday ', 2, 0, 'NONE') ,('2017-05-02', '1396-02-12', '1438-08-05', '2017-05-02', 12, 5, 2, 8, 'tuesday ', 3, 0, 'NONE') ,('2017-05-03', '1396-02-13', '1438-08-06', '2017-05-03', 13, 6, 2, 8, 'wednesday', 4, 0, 'NONE') ,('2017-05-04', '1396-02-14', '1438-08-07', '2017-05-04', 14, 7, 2, 8, 'thursday ', 5, 0, 'NONE') ,('2017-05-05', '1396-02-15', '1438-08-08', '2017-05-05', 15, 8, 2, 8, 'friday ', 6, 0, 'NONE') ,('2017-05-06', '1396-02-16', '1438-08-09', '2017-05-06', 16, 9, 2, 8, 'saturday ', 7, 0, 'NONE') ,('2017-05-07', '1396-02-17', '1438-08-10', '2017-05-07', 17, 10, 2, 8, 'sunday ', 1, 0, 'NONE') ,('2017-05-08', '1396-02-18', '1438-08-11', '2017-05-08', 18, 11, 2, 8, 'monday ', 2, 0, 'NONE') ,('2017-05-09', '1396-02-19', '1438-08-12', '2017-05-09', 19, 12, 2, 8, 'tuesday ', 3, 0, 'NONE') ,('2017-05-10', '1396-02-20', '1438-08-13', '2017-05-10', 20, 13, 2, 8, 'wednesday', 4, 0, 'NONE') ,('2017-05-11', '1396-02-21', '1438-08-14', '2017-05-11', 21, 14, 2, 8, 'thursday ', 5, 0, 'NONE') ,('2017-05-12', '1396-02-22', '1438-08-15', '2017-05-12', 22, 15, 2, 8, 'friday ', 6, 0, 'NONE') ,('2017-05-13', '1396-02-23', '1438-08-16', '2017-05-13', 23, 16, 2, 8, 'saturday ', 7, 0, 'NONE') ,('2017-05-14', '1396-02-24', '1438-08-17', '2017-05-14', 24, 17, 2, 8, 'sunday ', 1, 0, 'NONE') ,('2017-05-15', '1396-02-25', '1438-08-18', '2017-05-15', 25, 18, 2, 8, 'monday ', 2, 0, 'NONE') ,('2017-05-16', '1396-02-26', '1438-08-19', '2017-05-16', 26, 19, 2, 8, 'tuesday ', 3, 0, 'NONE') ,('2017-05-17', '1396-02-27', '1438-08-20', '2017-05-17', 27, 20, 2, 8, 'wednesday', 4, 0, 'NONE') ,('2017-05-18', '1396-02-28', '1438-08-21', '2017-05-18', 28, 21, 2, 8, 'thursday ', 5, 0, 'NONE') ,('2017-05-19', '1396-02-29', '1438-08-22', '2017-05-19', 29, 22, 2, 8, 'friday ', 6, 0, 'NONE') ,('2017-05-20', '1396-02-30', '1438-08-23', '2017-05-20', 30, 23, 2, 8, 'saturday ', 7, 0, 'NONE') ,('2017-05-21', '1396-02-31', '1438-08-24', '2017-05-21', 31, 24, 2, 8, 'sunday ', 1, 0, 'NONE') ,('2017-05-22', '1396-03-01', '1438-08-25', '2017-05-22', 1, 25, 3, 8, 'monday ', 2, 0, 'NONE') ,('2017-05-23', '1396-03-02', '1438-08-26', '2017-05-23', 2, 26, 3, 8, 'tuesday ', 3, 0, 'NONE') ,('2017-05-24', '1396-03-03', '1438-08-27', '2017-05-24', 3, 27, 3, 8, 'wednesday', 4, 0, 'NONE') ,('2017-05-25', '1396-03-04', '1438-08-28', '2017-05-25', 4, 28, 3, 8, 'thursday ', 5, 0, 'NONE') ,('2017-05-26', '1396-03-05', '1438-08-29', '2017-05-26', 5, 29, 3, 8, 'friday ', 6, 0, 'NONE') ,('2017-05-27', '1396-03-06', '1438-09-01', '2017-05-27', 6, 1, 3, 9, 'saturday ', 7, 0, 'NONE') ,('2017-05-28', '1396-03-07', '1438-09-02', '2017-05-28', 7, 2, 3, 9, 'sunday ', 1, 0, 'NONE') ,('2017-05-29', '1396-03-08', '1438-09-03', '2017-05-29', 8, 3, 3, 9, 'monday ', 2, 0, 'NONE') ,('2017-05-30', '1396-03-09', '1438-09-04', '2017-05-30', 9, 4, 3, 9, 'tuesday ', 3, 0, 'NONE') ,('2017-05-31', '1396-03-10', '1438-09-05', '2017-05-31', 10, 5, 3, 9, 'wednesday', 4, 0, 'NONE') ,('2017-06-01', '1396-03-11', '1438-09-06', '2017-06-01', 11, 6, 3, 9, 'thursday ', 5, 0, 'NONE') ,('2017-06-02', '1396-03-12', '1438-09-07', '2017-06-02', 12, 7, 3, 9, 'friday ', 6, 0, 'NONE') ,('2017-06-03', '1396-03-13', '1438-09-08', '2017-06-03', 13, 8, 3, 9, 'saturday ', 7, 0, 'NONE') ,('2017-06-04', '1396-03-14', '1438-09-09', '2017-06-04', 14, 9, 3, 9, 'sunday ', 1, 0, 'NONE') ,('2017-06-05', '1396-03-15', '1438-09-10', '2017-06-05', 15, 10, 3, 9, 'monday ', 2, 0, 'NONE') ,('2017-06-06', '1396-03-16', '1438-09-11', '2017-06-06', 16, 11, 3, 9, 'tuesday ', 3, 0, 'NONE') ,('2017-06-07', '1396-03-17', '1438-09-12', '2017-06-07', 17, 12, 3, 9, 'wednesday', 4, 0, 'NONE') ,('2017-06-08', '1396-03-18', '1438-09-13', '2017-06-08', 18, 13, 3, 9, 'thursday ', 5, 0, 'NONE') ,('2017-06-09', '1396-03-19', '1438-09-14', '2017-06-09', 19, 14, 3, 9, 'friday ', 6, 0, 'NONE') ,('2017-06-10', '1396-03-20', '1438-09-15', '2017-06-10', 20, 15, 3, 9, 'saturday ', 7, 0, 'NONE') ,('2017-06-11', '1396-03-21', '1438-09-16', '2017-06-11', 21, 16, 3, 9, 'sunday ', 1, 0, 'NONE') ,('2017-06-12', '1396-03-22', '1438-09-17', '2017-06-12', 22, 17, 3, 9, 'monday ', 2, 0, 'NONE') ,('2017-06-13', '1396-03-23', '1438-09-18', '2017-06-13', 23, 18, 3, 9, 'tuesday ', 3, 0, 'NONE') ,('2017-06-14', '1396-03-24', '1438-09-19', '2017-06-14', 24, 19, 3, 9, 'wednesday', 4, 0, 'NONE') ,('2017-06-15', '1396-03-25', '1438-09-20', '2017-06-15', 25, 20, 3, 9, 'thursday ', 5, 0, 'NONE') ,('2017-06-16', '1396-03-26', '1438-09-21', '2017-06-16', 26, 21, 3, 9, 'friday ', 6, 0, 'NONE') ,('2017-06-17', '1396-03-27', '1438-09-22', '2017-06-17', 27, 22, 3, 9, 'saturday ', 7, 0, 'NONE') ,('2017-06-18', '1396-03-28', '1438-09-23', '2017-06-18', 28, 23, 3, 9, 'sunday ', 1, 0, 'NONE') ,('2017-06-19', '1396-03-29', '1438-09-24', '2017-06-19', 29, 24, 3, 9, 'monday ', 2, 0, 'NONE') ,('2017-06-20', '1396-03-30', '1438-09-25', '2017-06-20', 30, 25, 3, 9, 'tuesday ', 3, 0, 'NONE') ,('2017-06-21', '1396-03-31', '1438-09-26', '2017-06-21', 31, 26, 3, 9, 'wednesday', 4, 0, 'NONE') ,('2017-06-22', '1396-04-01', '1438-09-27', '2017-06-22', 1, 27, 4, 9, 'thursday ', 5, 0, 'NONE') ,('2017-06-23', '1396-04-02', '1438-09-28', '2017-06-23', 2, 28, 4, 9, 'friday ', 6, 0, 'NONE') ,('2017-06-24', '1396-04-03', '1438-09-29', '2017-06-24', 3, 29, 4, 9, 'saturday ', 7, 0, 'NONE') ,('2017-06-25', '1396-04-04', '1438-09-30', '2017-06-25', 4, 30, 4, 9, 'sunday ', 1, 0, 'NONE') ,('2017-06-26', '1396-04-05', '1438-10-01', '2017-06-26', 5, 1, 4, 10, 'monday ', 2, 0, 'NONE') ,('2017-06-27', '1396-04-06', '1438-10-02', '2017-06-27', 6, 2, 4, 10, 'tuesday ', 3, 0, 'NONE') ,('2017-06-28', '1396-04-07', '1438-10-03', '2017-06-28', 7, 3, 4, 10, 'wednesday', 4, 0, 'NONE') ,('2017-06-29', '1396-04-08', '1438-10-04', '2017-06-29', 8, 4, 4, 10, 'thursday ', 5, 0, 'NONE') ,('2017-06-30', '1396-04-09', '1438-10-05', '2017-06-30', 9, 5, 4, 10, 'friday ', 6, 0, 'NONE') ,('2017-07-01', '1396-04-10', '1438-10-06', '2017-07-01', 10, 6, 4, 10, 'saturday ', 7, 0, 'NONE') ,('2017-07-02', '1396-04-11', '1438-10-07', '2017-07-02', 11, 7, 4, 10, 'sunday ', 1, 0, 'NONE') ,('2017-07-03', '1396-04-12', '1438-10-08', '2017-07-03', 12, 8, 4, 10, 'monday ', 2, 0, 'NONE') ,('2017-07-04', '1396-04-13', '1438-10-09', '2017-07-04', 13, 9, 4, 10, 'tuesday ', 3, 0, 'NONE') ,('2017-07-05', '1396-04-14', '1438-10-10', '2017-07-05', 14, 10, 4, 10, 'wednesday', 4, 0, 'NONE') ,('2017-07-06', '1396-04-15', '1438-10-11', '2017-07-06', 15, 11, 4, 10, 'thursday ', 5, 0, 'NONE') ,('2017-07-07', '1396-04-16', '1438-10-12', '2017-07-07', 16, 12, 4, 10, 'friday ', 6, 0, 'NONE') ,('2017-07-08', '1396-04-17', '1438-10-13', '2017-07-08', 17, 13, 4, 10, 'saturday ', 7, 0, 'NONE') ,('2017-07-09', '1396-04-18', '1438-10-14', '2017-07-09', 18, 14, 4, 10, 'sunday ', 1, 0, 'NONE') ,('2017-07-10', '1396-04-19', '1438-10-15', '2017-07-10', 19, 15, 4, 10, 'monday ', 2, 0, 'NONE') ,('2017-07-11', '1396-04-20', '1438-10-16', '2017-07-11', 20, 16, 4, 10, 'tuesday ', 3, 0, 'NONE') ,('2017-07-12', '1396-04-21', '1438-10-17', '2017-07-12', 21, 17, 4, 10, 'wednesday', 4, 0, 'NONE') ,('2017-07-13', '1396-04-22', '1438-10-18', '2017-07-13', 22, 18, 4, 10, 'thursday ', 5, 0, 'NONE') ,('2017-07-14', '1396-04-23', '1438-10-19', '2017-07-14', 23, 19, 4, 10, 'friday ', 6, 0, 'NONE') ,('2017-07-15', '1396-04-24', '1438-10-20', '2017-07-15', 24, 20, 4, 10, 'saturday ', 7, 0, 'NONE') ,('2017-07-16', '1396-04-25', '1438-10-21', '2017-07-16', 25, 21, 4, 10, 'sunday ', 1, 0, 'NONE') ,('2017-07-17', '1396-04-26', '1438-10-22', '2017-07-17', 26, 22, 4, 10, 'monday ', 2, 0, 'NONE') ,('2017-07-18', '1396-04-27', '1438-10-23', '2017-07-18', 27, 23, 4, 10, 'tuesday ', 3, 0, 'NONE') ,('2017-07-19', '1396-04-28', '1438-10-24', '2017-07-19', 28, 24, 4, 10, 'wednesday', 4, 0, 'NONE') ,('2017-07-20', '1396-04-29', '1438-10-25', '2017-07-20', 29, 25, 4, 10, 'thursday ', 5, 0, 'NONE') ,('2017-07-21', '1396-04-30', '1438-10-26', '2017-07-21', 30, 26, 4, 10, 'friday ', 6, 0, 'NONE') ,('2017-07-22', '1396-04-31', '1438-10-27', '2017-07-22', 31, 27, 4, 10, 'saturday ', 7, 0, 'NONE') ,('2017-07-23', '1396-05-01', '1438-10-28', '2017-07-23', 1, 28, 5, 10, 'sunday ', 1, 0, 'NONE') ,('2017-07-24', '1396-05-02', '1438-10-29', '2017-07-24', 2, 29, 5, 10, 'monday ', 2, 0, 'NONE') ,('2017-07-25', '1396-05-03', '1438-11-01', '2017-07-25', 3, 1, 5, 11, 'tuesday ', 3, 0, 'NONE') ,('2017-07-26', '1396-05-04', '1438-11-02', '2017-07-26', 4, 2, 5, 11, 'wednesday', 4, 0, 'NONE') ,('2017-07-27', '1396-05-05', '1438-11-03', '2017-07-27', 5, 3, 5, 11, 'thursday ', 5, 0, 'NONE') ,('2017-07-28', '1396-05-06', '1438-11-04', '2017-07-28', 6, 4, 5, 11, 'friday ', 6, 0, 'NONE') ,('2017-07-29', '1396-05-07', '1438-11-05', '2017-07-29', 7, 5, 5, 11, 'saturday ', 7, 0, 'NONE') ,('2017-07-30', '1396-05-08', '1438-11-06', '2017-07-30', 8, 6, 5, 11, 'sunday ', 1, 0, 'NONE') ,('2017-07-31', '1396-05-09', '1438-11-07', '2017-07-31', 9, 7, 5, 11, 'monday ', 2, 0, 'NONE') ,('2017-08-01', '1396-05-10', '1438-11-08', '2017-08-01', 10, 8, 5, 11, 'tuesday ', 3, 0, 'NONE') ,('2017-08-02', '1396-05-11', '1438-11-09', '2017-08-02', 11, 9, 5, 11, 'wednesday', 4, 0, 'NONE') ,('2017-08-03', '1396-05-12', '1438-11-10', '2017-08-03', 12, 10, 5, 11, 'thursday ', 5, 0, 'NONE') ,('2017-08-04', '1396-05-13', '1438-11-11', '2017-08-04', 13, 11, 5, 11, 'friday ', 6, 0, 'NONE') ,('2017-08-05', '1396-05-14', '1438-11-12', '2017-08-05', 14, 12, 5, 11, 'saturday ', 7, 0, 'NONE') ,('2017-08-06', '1396-05-15', '1438-11-13', '2017-08-06', 15, 13, 5, 11, 'sunday ', 1, 0, 'NONE') ,('2017-08-07', '1396-05-16', '1438-11-14', '2017-08-07', 16, 14, 5, 11, 'monday ', 2, 0, 'NONE') ,('2017-08-08', '1396-05-17', '1438-11-15', '2017-08-08', 17, 15, 5, 11, 'tuesday ', 3, 0, 'NONE') ,('2017-08-09', '1396-05-18', '1438-11-16', '2017-08-09', 18, 16, 5, 11, 'wednesday', 4, 0, 'NONE') ,('2017-08-10', '1396-05-19', '1438-11-17', '2017-08-10', 19, 17, 5, 11, 'thursday ', 5, 0, 'NONE') ,('2017-08-11', '1396-05-20', '1438-11-18', '2017-08-11', 20, 18, 5, 11, 'friday ', 6, 0, 'NONE') ,('2017-08-12', '1396-05-21', '1438-11-19', '2017-08-12', 21, 19, 5, 11, 'saturday ', 7, 0, 'NONE') ,('2017-08-13', '1396-05-22', '1438-11-20', '2017-08-13', 22, 20, 5, 11, 'sunday ', 1, 0, 'NONE') ,('2017-08-14', '1396-05-23', '1438-11-21', '2017-08-14', 23, 21, 5, 11, 'monday ', 2, 0, 'NONE') ,('2017-08-15', '1396-05-24', '1438-11-22', '2017-08-15', 24, 22, 5, 11, 'tuesday ', 3, 0, 'NONE') ,('2017-08-16', '1396-05-25', '1438-11-23', '2017-08-16', 25, 23, 5, 11, 'wednesday', 4, 0, 'NONE') ,('2017-08-17', '1396-05-26', '1438-11-24', '2017-08-17', 26, 24, 5, 11, 'thursday ', 5, 0, 'NONE') ,('2017-08-18', '1396-05-27', '1438-11-25', '2017-08-18', 27, 25, 5, 11, 'friday ', 6, 0, 'NONE') ,('2017-08-19', '1396-05-28', '1438-11-26', '2017-08-19', 28, 26, 5, 11, 'saturday ', 7, 0, 'NONE') ,('2017-08-20', '1396-05-29', '1438-11-27', '2017-08-20', 29, 27, 5, 11, 'sunday ', 1, 0, 'NONE') ,('2017-08-21', '1396-05-30', '1438-11-28', '2017-08-21', 30, 28, 5, 11, 'monday ', 2, 0, 'NONE') ,('2017-08-22', '1396-05-31', '1438-11-29', '2017-08-22', 31, 29, 5, 11, 'tuesday ', 3, 0, 'NONE') ,('2017-08-23', '1396-06-01', '1438-11-30', '2017-08-23', 1, 30, 6, 11, 'wednesday', 4, 0, 'NONE') ,('2017-08-24', '1396-06-02', '1438-12-01', '2017-08-24', 2, 1, 6, 12, 'thursday ', 5, 0, 'NONE') ,('2017-08-25', '1396-06-03', '1438-12-02', '2017-08-25', 3, 2, 6, 12, 'friday ', 6, 0, 'NONE') ,('2017-08-26', '1396-06-04', '1438-12-03', '2017-08-26', 4, 3, 6, 12, 'saturday ', 7, 0, 'NONE') ,('2017-08-27', '1396-06-05', '1438-12-04', '2017-08-27', 5, 4, 6, 12, 'sunday ', 1, 0, 'NONE') ,('2017-08-28', '1396-06-06', '1438-12-05', '2017-08-28', 6, 5, 6, 12, 'monday ', 2, 0, 'NONE') ,('2017-08-29', '1396-06-07', '1438-12-06', '2017-08-29', 7, 6, 6, 12, 'tuesday ', 3, 0, 'NONE') ,('2017-08-30', '1396-06-08', '1438-12-07', '2017-08-30', 8, 7, 6, 12, 'wednesday', 4, 0, 'NONE') ,('2017-08-31', '1396-06-09', '1438-12-08', '2017-08-31', 9, 8, 6, 12, 'thursday ', 5, 0, 'NONE') ,('2017-09-01', '1396-06-10', '1438-12-09', '2017-09-01', 10, 9, 6, 12, 'friday ', 6, 0, 'NONE') ,('2017-09-02', '1396-06-11', '1438-12-10', '2017-09-02', 11, 10, 6, 12, 'saturday ', 7, 0, 'NONE') ,('2017-09-03', '1396-06-12', '1438-12-11', '2017-09-03', 12, 11, 6, 12, 'sunday ', 1, 0, 'NONE') ,('2017-09-04', '1396-06-13', '1438-12-12', '2017-09-04', 13, 12, 6, 12, 'monday ', 2, 0, 'NONE') ,('2017-09-05', '1396-06-14', '1438-12-13', '2017-09-05', 14, 13, 6, 12, 'tuesday ', 3, 0, 'NONE') ,('2017-09-06', '1396-06-15', '1438-12-14', '2017-09-06', 15, 14, 6, 12, 'wednesday', 4, 0, 'NONE') ,('2017-09-07', '1396-06-16', '1438-12-15', '2017-09-07', 16, 15, 6, 12, 'thursday ', 5, 0, 'NONE') ,('2017-09-08', '1396-06-17', '1438-12-16', '2017-09-08', 17, 16, 6, 12, 'friday ', 6, 0, 'NONE') ,('2017-09-09', '1396-06-18', '1438-12-17', '2017-09-09', 18, 17, 6, 12, 'saturday ', 7, 0, 'NONE') ,('2017-09-10', '1396-06-19', '1438-12-18', '2017-09-10', 19, 18, 6, 12, 'sunday ', 1, 0, 'NONE') ,('2017-09-11', '1396-06-20', '1438-12-19', '2017-09-11', 20, 19, 6, 12, 'monday ', 2, 0, 'NONE') ,('2017-09-12', '1396-06-21', '1438-12-20', '2017-09-12', 21, 20, 6, 12, 'tuesday ', 3, 0, 'NONE') ,('2017-09-13', '1396-06-22', '1438-12-21', '2017-09-13', 22, 21, 6, 12, 'wednesday', 4, 0, 'NONE') ,('2017-09-14', '1396-06-23', '1438-12-22', '2017-09-14', 23, 22, 6, 12, 'thursday ', 5, 0, 'NONE') ,('2017-09-15', '1396-06-24', '1438-12-23', '2017-09-15', 24, 23, 6, 12, 'friday ', 6, 0, 'NONE') ,('2017-09-16', '1396-06-25', '1438-12-24', '2017-09-16', 25, 24, 6, 12, 'saturday ', 7, 0, 'NONE') ,('2017-09-17', '1396-06-26', '1438-12-25', '2017-09-17', 26, 25, 6, 12, 'sunday ', 1, 0, 'NONE') ,('2017-09-18', '1396-06-27', '1438-12-26', '2017-09-18', 27, 26, 6, 12, 'monday ', 2, 0, 'NONE') ,('2017-09-19', '1396-06-28', '1438-12-27', '2017-09-19', 28, 27, 6, 12, 'tuesday ', 3, 0, 'NONE') ,('2017-09-20', '1396-06-29', '1438-12-28', '2017-09-20', 29, 28, 6, 12, 'wednesday', 4, 0, 'NONE') ,('2017-09-21', '1396-06-30', '1438-12-29', '2017-09-21', 30, 29, 6, 12, 'thursday ', 5, 0, 'NONE') ,('2017-09-22', '1396-06-31', '1439-01-01', '2017-09-22', 31, 1, 6, 1, 'friday ', 6, 0, 'NONE') ,('2017-09-23', '1396-07-01', '1439-01-02', '2017-09-23', 1, 2, 7, 1, 'saturday ', 7, 0, 'NONE') ,('2017-09-24', '1396-07-02', '1439-01-03', '2017-09-24', 2, 3, 7, 1, 'sunday ', 1, 0, 'NONE') ,('2017-09-25', '1396-07-03', '1439-01-04', '2017-09-25', 3, 4, 7, 1, 'monday ', 2, 0, 'NONE') ,('2017-09-26', '1396-07-04', '1439-01-05', '2017-09-26', 4, 5, 7, 1, 'tuesday ', 3, 0, 'NONE') ,('2017-09-27', '1396-07-05', '1439-01-06', '2017-09-27', 5, 6, 7, 1, 'wednesday', 4, 0, 'NONE') ,('2017-09-28', '1396-07-06', '1439-01-07', '2017-09-28', 6, 7, 7, 1, 'thursday ', 5, 0, 'NONE') ,('2017-09-29', '1396-07-07', '1439-01-08', '2017-09-29', 7, 8, 7, 1, 'friday ', 6, 0, 'NONE') ,('2017-09-30', '1396-07-08', '1439-01-09', '2017-09-30', 8, 9, 7, 1, 'saturday ', 7, 0, 'NONE') ,('2017-10-01', '1396-07-09', '1439-01-10', '2017-10-01', 9, 10, 7, 1, 'sunday ', 1, 0, 'NONE') ,('2017-10-02', '1396-07-10', '1439-01-11', '2017-10-02', 10, 11, 7, 1, 'monday ', 2, 0, 'NONE') ,('2017-10-03', '1396-07-11', '1439-01-12', '2017-10-03', 11, 12, 7, 1, 'tuesday ', 3, 0, 'NONE') ,('2017-10-04', '1396-07-12', '1439-01-13', '2017-10-04', 12, 13, 7, 1, 'wednesday', 4, 0, 'NONE') ,('2017-10-05', '1396-07-13', '1439-01-14', '2017-10-05', 13, 14, 7, 1, 'thursday ', 5, 0, 'NONE') ,('2017-10-06', '1396-07-14', '1439-01-15', '2017-10-06', 14, 15, 7, 1, 'friday ', 6, 0, 'NONE') ,('2017-10-07', '1396-07-15', '1439-01-16', '2017-10-07', 15, 16, 7, 1, 'saturday ', 7, 0, 'NONE') ,('2017-10-08', '1396-07-16', '1439-01-17', '2017-10-08', 16, 17, 7, 1, 'sunday ', 1, 0, 'NONE') ,('2017-10-09', '1396-07-17', '1439-01-18', '2017-10-09', 17, 18, 7, 1, 'monday ', 2, 0, 'NONE') ,('2017-10-10', '1396-07-18', '1439-01-19', '2017-10-10', 18, 19, 7, 1, 'tuesday ', 3, 0, 'NONE') ,('2017-10-11', '1396-07-19', '1439-01-20', '2017-10-11', 19, 20, 7, 1, 'wednesday', 4, 0, 'NONE') ,('2017-10-12', '1396-07-20', '1439-01-21', '2017-10-12', 20, 21, 7, 1, 'thursday ', 5, 0, 'NONE') ,('2017-10-13', '1396-07-21', '1439-01-22', '2017-10-13', 21, 22, 7, 1, 'friday ', 6, 0, 'NONE') ,('2017-10-14', '1396-07-22', '1439-01-23', '2017-10-14', 22, 23, 7, 1, 'saturday ', 7, 0, 'NONE') ,('2017-10-15', '1396-07-23', '1439-01-24', '2017-10-15', 23, 24, 7, 1, 'sunday ', 1, 0, 'NONE') ,('2017-10-16', '1396-07-24', '1439-01-25', '2017-10-16', 24, 25, 7, 1, 'monday ', 2, 0, 'NONE') ,('2017-10-17', '1396-07-25', '1439-01-26', '2017-10-17', 25, 26, 7, 1, 'tuesday ', 3, 0, 'NONE') ,('2017-10-18', '1396-07-26', '1439-01-27', '2017-10-18', 26, 27, 7, 1, 'wednesday', 4, 0, 'NONE') ,('2017-10-19', '1396-07-27', '1439-01-28', '2017-10-19', 27, 28, 7, 1, 'thursday ', 5, 0, 'NONE') ,('2017-10-20', '1396-07-28', '1439-01-29', '2017-10-20', 28, 29, 7, 1, 'friday ', 6, 0, 'NONE') ,('2017-10-21', '1396-07-29', '1439-01-30', '2017-10-21', 29, 30, 7, 1, 'saturday ', 7, 0, 'NONE') ,('2017-10-22', '1396-07-30', '1439-02-01', '2017-10-22', 30, 1, 7, 2, 'sunday ', 1, 0, 'NONE') ,('2017-10-23', '1396-08-01', '1439-02-02', '2017-10-23', 1, 2, 8, 2, 'monday ', 2, 0, 'NONE') ,('2017-10-24', '1396-08-02', '1439-02-03', '2017-10-24', 2, 3, 8, 2, 'tuesday ', 3, 0, 'NONE') ,('2017-10-25', '1396-08-03', '1439-02-04', '2017-10-25', 3, 4, 8, 2, 'wednesday', 4, 0, 'NONE') ,('2017-10-26', '1396-08-04', '1439-02-05', '2017-10-26', 4, 5, 8, 2, 'thursday ', 5, 0, 'NONE') ,('2017-10-27', '1396-08-05', '1439-02-06', '2017-10-27', 5, 6, 8, 2, 'friday ', 6, 0, 'NONE') ,('2017-10-28', '1396-08-06', '1439-02-07', '2017-10-28', 6, 7, 8, 2, 'saturday ', 7, 0, 'NONE') ,('2017-10-29', '1396-08-07', '1439-02-08', '2017-10-29', 7, 8, 8, 2, 'sunday ', 1, 0, 'NONE') ,('2017-10-30', '1396-08-08', '1439-02-09', '2017-10-30', 8, 9, 8, 2, 'monday ', 2, 0, 'NONE') ,('2017-10-31', '1396-08-09', '1439-02-10', '2017-10-31', 9, 10, 8, 2, 'tuesday ', 3, 0, 'NONE') ,('2017-11-01', '1396-08-10', '1439-02-11', '2017-11-01', 10, 11, 8, 2, 'wednesday', 4, 0, 'NONE') ,('2017-11-02', '1396-08-11', '1439-02-12', '2017-11-02', 11, 12, 8, 2, 'thursday ', 5, 0, 'NONE') ,('2017-11-03', '1396-08-12', '1439-02-13', '2017-11-03', 12, 13, 8, 2, 'friday ', 6, 0, 'NONE') ,('2017-11-04', '1396-08-13', '1439-02-14', '2017-11-04', 13, 14, 8, 2, 'saturday ', 7, 0, 'NONE') ,('2017-11-05', '1396-08-14', '1439-02-15', '2017-11-05', 14, 15, 8, 2, 'sunday ', 1, 0, 'NONE') ,('2017-11-06', '1396-08-15', '1439-02-16', '2017-11-06', 15, 16, 8, 2, 'monday ', 2, 0, 'NONE') ,('2017-11-07', '1396-08-16', '1439-02-17', '2017-11-07', 16, 17, 8, 2, 'tuesday ', 3, 0, 'NONE') ,('2017-11-08', '1396-08-17', '1439-02-18', '2017-11-08', 17, 18, 8, 2, 'wednesday', 4, 0, 'NONE') ,('2017-11-09', '1396-08-18', '1439-02-19', '2017-11-09', 18, 19, 8, 2, 'thursday ', 5, 0, 'NONE') ,('2017-11-10', '1396-08-19', '1439-02-20', '2017-11-10', 19, 20, 8, 2, 'friday ', 6, 0, 'NONE') ,('2017-11-11', '1396-08-20', '1439-02-21', '2017-11-11', 20, 21, 8, 2, 'saturday ', 7, 0, 'NONE') ,('2017-11-12', '1396-08-21', '1439-02-22', '2017-11-12', 21, 22, 8, 2, 'sunday ', 1, 0, 'NONE') ,('2017-11-13', '1396-08-22', '1439-02-23', '2017-11-13', 22, 23, 8, 2, 'monday ', 2, 0, 'NONE') ,('2017-11-14', '1396-08-23', '1439-02-24', '2017-11-14', 23, 24, 8, 2, 'tuesday ', 3, 0, 'NONE') ,('2017-11-15', '1396-08-24', '1439-02-25', '2017-11-15', 24, 25, 8, 2, 'wednesday', 4, 0, 'NONE') ,('2017-11-16', '1396-08-25', '1439-02-26', '2017-11-16', 25, 26, 8, 2, 'thursday ', 5, 0, 'NONE') ,('2017-11-17', '1396-08-26', '1439-02-27', '2017-11-17', 26, 27, 8, 2, 'friday ', 6, 0, 'NONE') ,('2017-11-18', '1396-08-27', '1439-02-28', '2017-11-18', 27, 28, 8, 2, 'saturday ', 7, 0, 'NONE') ,('2017-11-19', '1396-08-28', '1439-02-29', '2017-11-19', 28, 29, 8, 2, 'sunday ', 1, 0, 'NONE') ,('2017-11-20', '1396-08-29', '1439-03-01', '2017-11-20', 29, 1, 8, 3, 'monday ', 2, 0, 'NONE') ,('2017-11-21', '1396-08-30', '1439-03-02', '2017-11-21', 30, 2, 8, 3, 'tuesday ', 3, 0, 'NONE') ,('2017-11-22', '1396-09-01', '1439-03-03', '2017-11-22', 1, 3, 9, 3, 'wednesday', 4, 0, 'NONE') ,('2017-11-23', '1396-09-02', '1439-03-04', '2017-11-23', 2, 4, 9, 3, 'thursday ', 5, 0, 'NONE') ,('2017-11-24', '1396-09-03', '1439-03-05', '2017-11-24', 3, 5, 9, 3, 'friday ', 6, 0, 'NONE') ,('2017-11-25', '1396-09-04', '1439-03-06', '2017-11-25', 4, 6, 9, 3, 'saturday ', 7, 0, 'NONE') ,('2017-11-26', '1396-09-05', '1439-03-07', '2017-11-26', 5, 7, 9, 3, 'sunday ', 1, 0, 'NONE') ,('2017-11-27', '1396-09-06', '1439-03-08', '2017-11-27', 6, 8, 9, 3, 'monday ', 2, 0, 'NONE') ,('2017-11-28', '1396-09-07', '1439-03-09', '2017-11-28', 7, 9, 9, 3, 'tuesday ', 3, 0, 'NONE') ,('2017-11-29', '1396-09-08', '1439-03-10', '2017-11-29', 8, 10, 9, 3, 'wednesday', 4, 0, 'NONE') ,('2017-11-30', '1396-09-09', '1439-03-11', '2017-11-30', 9, 11, 9, 3, 'thursday ', 5, 0, 'NONE') ,('2017-12-01', '1396-09-10', '1439-03-12', '2017-12-01', 10, 12, 9, 3, 'friday ', 6, 0, 'NONE') ,('2017-12-02', '1396-09-11', '1439-03-13', '2017-12-02', 11, 13, 9, 3, 'saturday ', 7, 0, 'NONE') ,('2017-12-03', '1396-09-12', '1439-03-14', '2017-12-03', 12, 14, 9, 3, 'sunday ', 1, 0, 'NONE') ,('2017-12-04', '1396-09-13', '1439-03-15', '2017-12-04', 13, 15, 9, 3, 'monday ', 2, 0, 'NONE') ,('2017-12-05', '1396-09-14', '1439-03-16', '2017-12-05', 14, 16, 9, 3, 'tuesday ', 3, 0, 'NONE') ,('2017-12-06', '1396-09-15', '1439-03-17', '2017-12-06', 15, 17, 9, 3, 'wednesday', 4, 0, 'NONE') ,('2017-12-07', '1396-09-16', '1439-03-18', '2017-12-07', 16, 18, 9, 3, 'thursday ', 5, 0, 'NONE') ,('2017-12-08', '1396-09-17', '1439-03-19', '2017-12-08', 17, 19, 9, 3, 'friday ', 6, 0, 'NONE') ,('2017-12-09', '1396-09-18', '1439-03-20', '2017-12-09', 18, 20, 9, 3, 'saturday ', 7, 0, 'NONE') ,('2017-12-10', '1396-09-19', '1439-03-21', '2017-12-10', 19, 21, 9, 3, 'sunday ', 1, 0, 'NONE') ,('2017-12-11', '1396-09-20', '1439-03-22', '2017-12-11', 20, 22, 9, 3, 'monday ', 2, 0, 'NONE') ,('2017-12-12', '1396-09-21', '1439-03-23', '2017-12-12', 21, 23, 9, 3, 'tuesday ', 3, 0, 'NONE') ,('2017-12-13', '1396-09-22', '1439-03-24', '2017-12-13', 22, 24, 9, 3, 'wednesday', 4, 0, 'NONE') ,('2017-12-14', '1396-09-23', '1439-03-25', '2017-12-14', 23, 25, 9, 3, 'thursday ', 5, 0, 'NONE') ,('2017-12-15', '1396-09-24', '1439-03-26', '2017-12-15', 24, 26, 9, 3, 'friday ', 6, 0, 'NONE') ,('2017-12-16', '1396-09-25', '1439-03-27', '2017-12-16', 25, 27, 9, 3, 'saturday ', 7, 0, 'NONE') ,('2017-12-17', '1396-09-26', '1439-03-28', '2017-12-17', 26, 28, 9, 3, 'sunday ', 1, 0, 'NONE') ,('2017-12-18', '1396-09-27', '1439-03-29', '2017-12-18', 27, 29, 9, 3, 'monday ', 2, 0, 'NONE') ,('2017-12-19', '1396-09-28', '1439-03-30', '2017-12-19', 28, 30, 9, 3, 'tuesday ', 3, 0, 'NONE') ,('2017-12-20', '1396-09-29', '1439-04-01', '2017-12-20', 29, 1, 9, 4, 'wednesday', 4, 0, 'NONE') ,('2017-12-21', '1396-09-30', '1439-04-02', '2017-12-21', 30, 2, 9, 4, 'thursday ', 5, 0, 'NONE') ,('2017-12-22', '1396-10-01', '1439-04-03', '2017-12-22', 1, 3, 10, 4, 'friday ', 6, 0, 'NONE') ,('2017-12-23', '1396-10-02', '1439-04-04', '2017-12-23', 2, 4, 10, 4, 'saturday ', 7, 0, 'NONE') ,('2017-12-24', '1396-10-03', '1439-04-05', '2017-12-24', 3, 5, 10, 4, 'sunday ', 1, 0, 'NONE') ,('2017-12-25', '1396-10-04', '1439-04-06', '2017-12-25', 4, 6, 10, 4, 'monday ', 2, 0, 'NONE') ,('2017-12-26', '1396-10-05', '1439-04-07', '2017-12-26', 5, 7, 10, 4, 'tuesday ', 3, 0, 'NONE') ,('2017-12-27', '1396-10-06', '1439-04-08', '2017-12-27', 6, 8, 10, 4, 'wednesday', 4, 0, 'NONE') ,('2017-12-28', '1396-10-07', '1439-04-09', '2017-12-28', 7, 9, 10, 4, 'thursday ', 5, 0, 'NONE') ,('2017-12-29', '1396-10-08', '1439-04-10', '2017-12-29', 8, 10, 10, 4, 'friday ', 6, 0, 'NONE') ,('2017-12-30', '1396-10-09', '1439-04-11', '2017-12-30', 9, 11, 10, 4, 'saturday ', 7, 0, 'NONE') ,('2017-12-31', '1396-10-10', '1439-04-12', '2017-12-31', 10, 12, 10, 4, 'sunday ', 1, 0, 'NONE') ,('2018-01-01', '1396-10-11', '1439-04-13', '2018-01-01', 11, 13, 10, 4, 'monday ', 2, 0, 'NONE') ,('2018-01-02', '1396-10-12', '1439-04-14', '2018-01-02', 12, 14, 10, 4, 'tuesday ', 3, 0, 'NONE') ,('2018-01-03', '1396-10-13', '1439-04-15', '2018-01-03', 13, 15, 10, 4, 'wednesday', 4, 0, 'NONE') ,('2018-01-04', '1396-10-14', '1439-04-16', '2018-01-04', 14, 16, 10, 4, 'thursday ', 5, 0, 'NONE') ,('2018-01-05', '1396-10-15', '1439-04-17', '2018-01-05', 15, 17, 10, 4, 'friday ', 6, 0, 'NONE') ,('2018-01-06', '1396-10-16', '1439-04-18', '2018-01-06', 16, 18, 10, 4, 'saturday ', 7, 0, 'NONE') ,('2018-01-07', '1396-10-17', '1439-04-19', '2018-01-07', 17, 19, 10, 4, 'sunday ', 1, 0, 'NONE') ,('2018-01-08', '1396-10-18', '1439-04-20', '2018-01-08', 18, 20, 10, 4, 'monday ', 2, 0, 'NONE') ,('2018-01-09', '1396-10-19', '1439-04-21', '2018-01-09', 19, 21, 10, 4, 'tuesday ', 3, 0, 'NONE') ,('2018-01-10', '1396-10-20', '1439-04-22', '2018-01-10', 20, 22, 10, 4, 'wednesday', 4, 0, 'NONE') ,('2018-01-11', '1396-10-21', '1439-04-23', '2018-01-11', 21, 23, 10, 4, 'thursday ', 5, 0, 'NONE') ,('2018-01-12', '1396-10-22', '1439-04-24', '2018-01-12', 22, 24, 10, 4, 'friday ', 6, 0, 'NONE') ,('2018-01-13', '1396-10-23', '1439-04-25', '2018-01-13', 23, 25, 10, 4, 'saturday ', 7, 0, 'NONE') ,('2018-01-14', '1396-10-24', '1439-04-26', '2018-01-14', 24, 26, 10, 4, 'sunday ', 1, 0, 'NONE') ,('2018-01-15', '1396-10-25', '1439-04-27', '2018-01-15', 25, 27, 10, 4, 'monday ', 2, 0, 'NONE') ,('2018-01-16', '1396-10-26', '1439-04-28', '2018-01-16', 26, 28, 10, 4, 'tuesday ', 3, 0, 'NONE') ,('2018-01-17', '1396-10-27', '1439-04-29', '2018-01-17', 27, 29, 10, 4, 'wednesday', 4, 0, 'NONE') ,('2018-01-18', '1396-10-28', '1439-05-01', '2018-01-18', 28, 1, 10, 5, 'thursday ', 5, 0, 'NONE') ,('2018-01-19', '1396-10-29', '1439-05-02', '2018-01-19', 29, 2, 10, 5, 'friday ', 6, 0, 'NONE') ,('2018-01-20', '1396-10-30', '1439-05-03', '2018-01-20', 30, 3, 10, 5, 'saturday ', 7, 0, 'NONE') ,('2018-01-21', '1396-11-01', '1439-05-04', '2018-01-21', 1, 4, 11, 5, 'sunday ', 1, 0, 'NONE') ,('2018-01-22', '1396-11-02', '1439-05-05', '2018-01-22', 2, 5, 11, 5, 'monday ', 2, 0, 'NONE') ,('2018-01-23', '1396-11-03', '1439-05-06', '2018-01-23', 3, 6, 11, 5, 'tuesday ', 3, 0, 'NONE') ,('2018-01-24', '1396-11-04', '1439-05-07', '2018-01-24', 4, 7, 11, 5, 'wednesday', 4, 0, 'NONE') ,('2018-01-25', '1396-11-05', '1439-05-08', '2018-01-25', 5, 8, 11, 5, 'thursday ', 5, 0, 'NONE') ,('2018-01-26', '1396-11-06', '1439-05-09', '2018-01-26', 6, 9, 11, 5, 'friday ', 6, 0, 'NONE') ,('2018-01-27', '1396-11-07', '1439-05-10', '2018-01-27', 7, 10, 11, 5, 'saturday ', 7, 0, 'NONE') ,('2018-01-28', '1396-11-08', '1439-05-11', '2018-01-28', 8, 11, 11, 5, 'sunday ', 1, 0, 'NONE') ,('2018-01-29', '1396-11-09', '1439-05-12', '2018-01-29', 9, 12, 11, 5, 'monday ', 2, 0, 'NONE') ,('2018-01-30', '1396-11-10', '1439-05-13', '2018-01-30', 10, 13, 11, 5, 'tuesday ', 3, 0, 'NONE') ,('2018-01-31', '1396-11-11', '1439-05-14', '2018-01-31', 11, 14, 11, 5, 'wednesday', 4, 0, 'NONE') ,('2018-02-01', '1396-11-12', '1439-05-15', '2018-02-01', 12, 15, 11, 5, 'thursday ', 5, 0, 'NONE') ,('2018-02-02', '1396-11-13', '1439-05-16', '2018-02-02', 13, 16, 11, 5, 'friday ', 6, 0, 'NONE') ,('2018-02-03', '1396-11-14', '1439-05-17', '2018-02-03', 14, 17, 11, 5, 'saturday ', 7, 0, 'NONE') ,('2018-02-04', '1396-11-15', '1439-05-18', '2018-02-04', 15, 18, 11, 5, 'sunday ', 1, 0, 'NONE') ,('2018-02-05', '1396-11-16', '1439-05-19', '2018-02-05', 16, 19, 11, 5, 'monday ', 2, 0, 'NONE') ,('2018-02-06', '1396-11-17', '1439-05-20', '2018-02-06', 17, 20, 11, 5, 'tuesday ', 3, 0, 'NONE') ,('2018-02-07', '1396-11-18', '1439-05-21', '2018-02-07', 18, 21, 11, 5, 'wednesday', 4, 0, 'NONE') ,('2018-02-08', '1396-11-19', '1439-05-22', '2018-02-08', 19, 22, 11, 5, 'thursday ', 5, 0, 'NONE') ,('2018-02-09', '1396-11-20', '1439-05-23', '2018-02-09', 20, 23, 11, 5, 'friday ', 6, 0, 'NONE') ,('2018-02-10', '1396-11-21', '1439-05-24', '2018-02-10', 21, 24, 11, 5, 'saturday ', 7, 0, 'NONE') ,('2018-02-11', '1396-11-22', '1439-05-25', '2018-02-11', 22, 25, 11, 5, 'sunday ', 1, 0, 'NONE') ,('2018-02-12', '1396-11-23', '1439-05-26', '2018-02-12', 23, 26, 11, 5, 'monday ', 2, 0, 'NONE') ,('2018-02-13', '1396-11-24', '1439-05-27', '2018-02-13', 24, 27, 11, 5, 'tuesday ', 3, 0, 'NONE') ,('2018-02-14', '1396-11-25', '1439-05-28', '2018-02-14', 25, 28, 11, 5, 'wednesday', 4, 0, 'NONE') ,('2018-02-15', '1396-11-26', '1439-05-29', '2018-02-15', 26, 29, 11, 5, 'thursday ', 5, 0, 'NONE') ,('2018-02-16', '1396-11-27', '1439-05-30', '2018-02-16', 27, 30, 11, 5, 'friday ', 6, 0, 'NONE') ,('2018-02-17', '1396-11-28', '1439-06-01', '2018-02-17', 28, 1, 11, 6, 'saturday ', 7, 0, 'NONE') ,('2018-02-18', '1396-11-29', '1439-06-02', '2018-02-18', 29, 2, 11, 6, 'sunday ', 1, 0, 'NONE') ,('2018-02-19', '1396-11-30', '1439-06-03', '2018-02-19', 30, 3, 11, 6, 'monday ', 2, 0, 'NONE') ,('2018-02-20', '1396-12-01', '1439-06-04', '2018-02-20', 1, 4, 12, 6, 'tuesday ', 3, 0, 'NONE') ,('2018-02-21', '1396-12-02', '1439-06-05', '2018-02-21', 2, 5, 12, 6, 'wednesday', 4, 0, 'NONE') ,('2018-02-22', '1396-12-03', '1439-06-06', '2018-02-22', 3, 6, 12, 6, 'thursday ', 5, 0, 'NONE') ,('2018-02-23', '1396-12-04', '1439-06-07', '2018-02-23', 4, 7, 12, 6, 'friday ', 6, 0, 'NONE') ,('2018-02-24', '1396-12-05', '1439-06-08', '2018-02-24', 5, 8, 12, 6, 'saturday ', 7, 0, 'NONE') ,('2018-02-25', '1396-12-06', '1439-06-09', '2018-02-25', 6, 9, 12, 6, 'sunday ', 1, 0, 'NONE') ,('2018-02-26', '1396-12-07', '1439-06-10', '2018-02-26', 7, 10, 12, 6, 'monday ', 2, 0, 'NONE') ,('2018-02-27', '1396-12-08', '1439-06-11', '2018-02-27', 8, 11, 12, 6, 'tuesday ', 3, 0, 'NONE') ,('2018-02-28', '1396-12-09', '1439-06-12', '2018-02-28', 9, 12, 12, 6, 'wednesday', 4, 0, 'NONE') ,('2018-03-01', '1396-12-10', '1439-06-13', '2018-03-01', 10, 13, 12, 6, 'thursday ', 5, 0, 'NONE') ,('2018-03-02', '1396-12-11', '1439-06-14', '2018-03-02', 11, 14, 12, 6, 'friday ', 6, 0, 'NONE') ,('2018-03-03', '1396-12-12', '1439-06-15', '2018-03-03', 12, 15, 12, 6, 'saturday ', 7, 0, 'NONE') ,('2018-03-04', '1396-12-13', '1439-06-16', '2018-03-04', 13, 16, 12, 6, 'sunday ', 1, 0, 'NONE') ,('2018-03-05', '1396-12-14', '1439-06-17', '2018-03-05', 14, 17, 12, 6, 'monday ', 2, 0, 'NONE') ,('2018-03-06', '1396-12-15', '1439-06-18', '2018-03-06', 15, 18, 12, 6, 'tuesday ', 3, 0, 'NONE') ,('2018-03-07', '1396-12-16', '1439-06-19', '2018-03-07', 16, 19, 12, 6, 'wednesday', 4, 0, 'NONE') ,('2018-03-08', '1396-12-17', '1439-06-20', '2018-03-08', 17, 20, 12, 6, 'thursday ', 5, 0, 'NONE') ,('2018-03-09', '1396-12-18', '1439-06-21', '2018-03-09', 18, 21, 12, 6, 'friday ', 6, 0, 'NONE') ,('2018-03-10', '1396-12-19', '1439-06-22', '2018-03-10', 19, 22, 12, 6, 'saturday ', 7, 0, 'NONE') ,('2018-03-11', '1396-12-20', '1439-06-23', '2018-03-11', 20, 23, 12, 6, 'sunday ', 1, 0, 'NONE') ,('2018-03-12', '1396-12-21', '1439-06-24', '2018-03-12', 21, 24, 12, 6, 'monday ', 2, 0, 'NONE') ,('2018-03-13', '1396-12-22', '1439-06-25', '2018-03-13', 22, 25, 12, 6, 'tuesday ', 3, 0, 'NONE') ,('2018-03-14', '1396-12-23', '1439-06-26', '2018-03-14', 23, 26, 12, 6, 'wednesday', 4, 0, 'NONE') ,('2018-03-15', '1396-12-24', '1439-06-27', '2018-03-15', 24, 27, 12, 6, 'thursday ', 5, 0, 'NONE') ,('2018-03-16', '1396-12-25', '1439-06-28', '2018-03-16', 25, 28, 12, 6, 'friday ', 6, 0, 'NONE') ,('2018-03-17', '1396-12-26', '1439-06-29', '2018-03-17', 26, 29, 12, 6, 'saturday ', 7, 0, 'NONE') ,('2018-03-18', '1396-12-27', '1439-07-01', '2018-03-18', 27, 1, 12, 7, 'sunday ', 1, 0, 'NONE') ,('2018-03-19', '1396-12-28', '1439-07-02', '2018-03-19', 28, 2, 12, 7, 'monday ', 2, 0, 'NONE') ,('2018-03-20', '1396-12-29', '1439-07-03', '2018-03-20', 29, 3, 12, 7, 'tuesday ', 3, 0, 'NONE') ,('2018-03-21', '1397-01-01', '1439-07-04', '2018-03-21', 1, 4, 1, 7, 'wednesday', 4, 0, 'NONE') ,('2018-03-22', '1397-01-02', '1439-07-05', '2018-03-22', 2, 5, 1, 7, 'thursday ', 5, 0, 'NONE') ,('2018-03-23', '1397-01-03', '1439-07-06', '2018-03-23', 3, 6, 1, 7, 'friday ', 6, 0, 'NONE') ,('2018-03-24', '1397-01-04', '1439-07-07', '2018-03-24', 4, 7, 1, 7, 'saturday ', 7, 0, 'NONE') ,('2018-03-25', '1397-01-05', '1439-07-08', '2018-03-25', 5, 8, 1, 7, 'sunday ', 1, 0, 'NONE') ,('2018-03-26', '1397-01-06', '1439-07-09', '2018-03-26', 6, 9, 1, 7, 'monday ', 2, 0, 'NONE') ,('2018-03-27', '1397-01-07', '1439-07-10', '2018-03-27', 7, 10, 1, 7, 'tuesday ', 3, 0, 'NONE') ,('2018-03-28', '1397-01-08', '1439-07-11', '2018-03-28', 8, 11, 1, 7, 'wednesday', 4, 0, 'NONE') ,('2018-03-29', '1397-01-09', '1439-07-12', '2018-03-29', 9, 12, 1, 7, 'thursday ', 5, 0, 'NONE') ,('2018-03-30', '1397-01-10', '1439-07-13', '2018-03-30', 10, 13, 1, 7, 'friday ', 6, 0, 'NONE') ,('2018-03-31', '1397-01-11', '1439-07-14', '2018-03-31', 11, 14, 1, 7, 'saturday ', 7, 0, 'NONE') ,('2018-04-01', '1397-01-12', '1439-07-15', '2018-04-01', 12, 15, 1, 7, 'sunday ', 1, 0, 'NONE') ,('2018-04-02', '1397-01-13', '1439-07-16', '2018-04-02', 13, 16, 1, 7, 'monday ', 2, 0, 'NONE') ,('2018-04-03', '1397-01-14', '1439-07-17', '2018-04-03', 14, 17, 1, 7, 'tuesday ', 3, 0, 'NONE') ,('2018-04-04', '1397-01-15', '1439-07-18', '2018-04-04', 15, 18, 1, 7, 'wednesday', 4, 0, 'NONE') ,('2018-04-05', '1397-01-16', '1439-07-19', '2018-04-05', 16, 19, 1, 7, 'thursday ', 5, 0, 'NONE') ,('2018-04-06', '1397-01-17', '1439-07-20', '2018-04-06', 17, 20, 1, 7, 'friday ', 6, 0, 'NONE') ,('2018-04-07', '1397-01-18', '1439-07-21', '2018-04-07', 18, 21, 1, 7, 'saturday ', 7, 0, 'NONE') ,('2018-04-08', '1397-01-19', '1439-07-22', '2018-04-08', 19, 22, 1, 7, 'sunday ', 1, 0, 'NONE') ,('2018-04-09', '1397-01-20', '1439-07-23', '2018-04-09', 20, 23, 1, 7, 'monday ', 2, 0, 'NONE') ,('2018-04-10', '1397-01-21', '1439-07-24', '2018-04-10', 21, 24, 1, 7, 'tuesday ', 3, 0, 'NONE') ,('2018-04-11', '1397-01-22', '1439-07-25', '2018-04-11', 22, 25, 1, 7, 'wednesday', 4, 0, 'NONE') ,('2018-04-12', '1397-01-23', '1439-07-26', '2018-04-12', 23, 26, 1, 7, 'thursday ', 5, 0, 'NONE') ,('2018-04-13', '1397-01-24', '1439-07-27', '2018-04-13', 24, 27, 1, 7, 'friday ', 6, 0, 'NONE') ,('2018-04-14', '1397-01-25', '1439-07-28', '2018-04-14', 25, 28, 1, 7, 'saturday ', 7, 0, 'NONE') ,('2018-04-15', '1397-01-26', '1439-07-29', '2018-04-15', 26, 29, 1, 7, 'sunday ', 1, 0, 'NONE') ,('2018-04-16', '1397-01-27', '1439-07-30', '2018-04-16', 27, 30, 1, 7, 'monday ', 2, 0, 'NONE') ,('2018-04-17', '1397-01-28', '1439-08-01', '2018-04-17', 28, 1, 1, 8, 'tuesday ', 3, 0, 'NONE') ,('2018-04-18', '1397-01-29', '1439-08-02', '2018-04-18', 29, 2, 1, 8, 'wednesday', 4, 0, 'NONE') ,('2018-04-19', '1397-01-30', '1439-08-03', '2018-04-19', 30, 3, 1, 8, 'thursday ', 5, 0, 'NONE') ,('2018-04-20', '1397-01-31', '1439-08-04', '2018-04-20', 31, 4, 1, 8, 'friday ', 6, 0, 'NONE') ,('2018-04-21', '1397-02-01', '1439-08-05', '2018-04-21', 1, 5, 2, 8, 'saturday ', 7, 0, 'NONE') ,('2018-04-22', '1397-02-02', '1439-08-06', '2018-04-22', 2, 6, 2, 8, 'sunday ', 1, 0, 'NONE') ,('2018-04-23', '1397-02-03', '1439-08-07', '2018-04-23', 3, 7, 2, 8, 'monday ', 2, 0, 'NONE') ,('2018-04-24', '1397-02-04', '1439-08-08', '2018-04-24', 4, 8, 2, 8, 'tuesday ', 3, 0, 'NONE') ,('2018-04-25', '1397-02-05', '1439-08-09', '2018-04-25', 5, 9, 2, 8, 'wednesday', 4, 0, 'NONE') ,('2018-04-26', '1397-02-06', '1439-08-10', '2018-04-26', 6, 10, 2, 8, 'thursday ', 5, 0, 'NONE') ,('2018-04-27', '1397-02-07', '1439-08-11', '2018-04-27', 7, 11, 2, 8, 'friday ', 6, 0, 'NONE') ,('2018-04-28', '1397-02-08', '1439-08-12', '2018-04-28', 8, 12, 2, 8, 'saturday ', 7, 0, 'NONE') ,('2018-04-29', '1397-02-09', '1439-08-13', '2018-04-29', 9, 13, 2, 8, 'sunday ', 1, 0, 'NONE') ,('2018-04-30', '1397-02-10', '1439-08-14', '2018-04-30', 10, 14, 2, 8, 'monday ', 2, 0, 'NONE') ,('2018-05-01', '1397-02-11', '1439-08-15', '2018-05-01', 11, 15, 2, 8, 'tuesday ', 3, 0, 'NONE') ,('2018-05-02', '1397-02-12', '1439-08-16', '2018-05-02', 12, 16, 2, 8, 'wednesday', 4, 0, 'NONE') ,('2018-05-03', '1397-02-13', '1439-08-17', '2018-05-03', 13, 17, 2, 8, 'thursday ', 5, 0, 'NONE') ,('2018-05-04', '1397-02-14', '1439-08-18', '2018-05-04', 14, 18, 2, 8, 'friday ', 6, 0, 'NONE') ,('2018-05-05', '1397-02-15', '1439-08-19', '2018-05-05', 15, 19, 2, 8, 'saturday ', 7, 0, 'NONE') ,('2018-05-06', '1397-02-16', '1439-08-20', '2018-05-06', 16, 20, 2, 8, 'sunday ', 1, 0, 'NONE') ,('2018-05-07', '1397-02-17', '1439-08-21', '2018-05-07', 17, 21, 2, 8, 'monday ', 2, 0, 'NONE') ,('2018-05-08', '1397-02-18', '1439-08-22', '2018-05-08', 18, 22, 2, 8, 'tuesday ', 3, 0, 'NONE') ,('2018-05-09', '1397-02-19', '1439-08-23', '2018-05-09', 19, 23, 2, 8, 'wednesday', 4, 0, 'NONE') ,('2018-05-10', '1397-02-20', '1439-08-24', '2018-05-10', 20, 24, 2, 8, 'thursday ', 5, 0, 'NONE') ,('2018-05-11', '1397-02-21', '1439-08-25', '2018-05-11', 21, 25, 2, 8, 'friday ', 6, 0, 'NONE') ,('2018-05-12', '1397-02-22', '1439-08-26', '2018-05-12', 22, 26, 2, 8, 'saturday ', 7, 0, 'NONE') ,('2018-05-13', '1397-02-23', '1439-08-27', '2018-05-13', 23, 27, 2, 8, 'sunday ', 1, 0, 'NONE') ,('2018-05-14', '1397-02-24', '1439-08-28', '2018-05-14', 24, 28, 2, 8, 'monday ', 2, 0, 'NONE') ,('2018-05-15', '1397-02-25', '1439-08-29', '2018-05-15', 25, 29, 2, 8, 'tuesday ', 3, 0, 'NONE') ,('2018-05-16', '1397-02-26', '1439-09-01', '2018-05-16', 26, 1, 2, 9, 'wednesday', 4, 0, 'NONE') ,('2018-05-17', '1397-02-27', '1439-09-02', '2018-05-17', 27, 2, 2, 9, 'thursday ', 5, 0, 'NONE') ,('2018-05-18', '1397-02-28', '1439-09-03', '2018-05-18', 28, 3, 2, 9, 'friday ', 6, 0, 'NONE') ,('2018-05-19', '1397-02-29', '1439-09-04', '2018-05-19', 29, 4, 2, 9, 'saturday ', 7, 0, 'NONE') ,('2018-05-20', '1397-02-30', '1439-09-05', '2018-05-20', 30, 5, 2, 9, 'sunday ', 1, 0, 'NONE') ,('2018-05-21', '1397-02-31', '1439-09-06', '2018-05-21', 31, 6, 2, 9, 'monday ', 2, 0, 'NONE') ,('2018-05-22', '1397-03-01', '1439-09-07', '2018-05-22', 1, 7, 3, 9, 'tuesday ', 3, 0, 'NONE') ,('2018-05-23', '1397-03-02', '1439-09-08', '2018-05-23', 2, 8, 3, 9, 'wednesday', 4, 0, 'NONE') ,('2018-05-24', '1397-03-03', '1439-09-09', '2018-05-24', 3, 9, 3, 9, 'thursday ', 5, 0, 'NONE') ,('2018-05-25', '1397-03-04', '1439-09-10', '2018-05-25', 4, 10, 3, 9, 'friday ', 6, 0, 'NONE') ,('2018-05-26', '1397-03-05', '1439-09-11', '2018-05-26', 5, 11, 3, 9, 'saturday ', 7, 0, 'NONE') ,('2018-05-27', '1397-03-06', '1439-09-12', '2018-05-27', 6, 12, 3, 9, 'sunday ', 1, 0, 'NONE') ,('2018-05-28', '1397-03-07', '1439-09-13', '2018-05-28', 7, 13, 3, 9, 'monday ', 2, 0, 'NONE') ,('2018-05-29', '1397-03-08', '1439-09-14', '2018-05-29', 8, 14, 3, 9, 'tuesday ', 3, 0, 'NONE') ,('2018-05-30', '1397-03-09', '1439-09-15', '2018-05-30', 9, 15, 3, 9, 'wednesday', 4, 0, 'NONE') ,('2018-05-31', '1397-03-10', '1439-09-16', '2018-05-31', 10, 16, 3, 9, 'thursday ', 5, 0, 'NONE') ,('2018-06-01', '1397-03-11', '1439-09-17', '2018-06-01', 11, 17, 3, 9, 'friday ', 6, 0, 'NONE') ,('2018-06-02', '1397-03-12', '1439-09-18', '2018-06-02', 12, 18, 3, 9, 'saturday ', 7, 0, 'NONE') ,('2018-06-03', '1397-03-13', '1439-09-19', '2018-06-03', 13, 19, 3, 9, 'sunday ', 1, 0, 'NONE') ,('2018-06-04', '1397-03-14', '1439-09-20', '2018-06-04', 14, 20, 3, 9, 'monday ', 2, 0, 'NONE') ,('2018-06-05', '1397-03-15', '1439-09-21', '2018-06-05', 15, 21, 3, 9, 'tuesday ', 3, 0, 'NONE') ,('2018-06-06', '1397-03-16', '1439-09-22', '2018-06-06', 16, 22, 3, 9, 'wednesday', 4, 0, 'NONE') ,('2018-06-07', '1397-03-17', '1439-09-23', '2018-06-07', 17, 23, 3, 9, 'thursday ', 5, 0, 'NONE') ,('2018-06-08', '1397-03-18', '1439-09-24', '2018-06-08', 18, 24, 3, 9, 'friday ', 6, 0, 'NONE') ,('2018-06-09', '1397-03-19', '1439-09-25', '2018-06-09', 19, 25, 3, 9, 'saturday ', 7, 0, 'NONE') ,('2018-06-10', '1397-03-20', '1439-09-26', '2018-06-10', 20, 26, 3, 9, 'sunday ', 1, 0, 'NONE') ,('2018-06-11', '1397-03-21', '1439-09-27', '2018-06-11', 21, 27, 3, 9, 'monday ', 2, 0, 'NONE') ,('2018-06-12', '1397-03-22', '1439-09-28', '2018-06-12', 22, 28, 3, 9, 'tuesday ', 3, 0, 'NONE') ,('2018-06-13', '1397-03-23', '1439-09-29', '2018-06-13', 23, 29, 3, 9, 'wednesday', 4, 0, 'NONE') ,('2018-06-14', '1397-03-24', '1439-09-30', '2018-06-14', 24, 30, 3, 9, 'thursday ', 5, 0, 'NONE') ,('2018-06-15', '1397-03-25', '1439-10-01', '2018-06-15', 25, 1, 3, 10, 'friday ', 6, 0, 'NONE') ,('2018-06-16', '1397-03-26', '1439-10-02', '2018-06-16', 26, 2, 3, 10, 'saturday ', 7, 0, 'NONE') ,('2018-06-17', '1397-03-27', '1439-10-03', '2018-06-17', 27, 3, 3, 10, 'sunday ', 1, 0, 'NONE') ,('2018-06-18', '1397-03-28', '1439-10-04', '2018-06-18', 28, 4, 3, 10, 'monday ', 2, 0, 'NONE') ,('2018-06-19', '1397-03-29', '1439-10-05', '2018-06-19', 29, 5, 3, 10, 'tuesday ', 3, 0, 'NONE') ,('2018-06-20', '1397-03-30', '1439-10-06', '2018-06-20', 30, 6, 3, 10, 'wednesday', 4, 0, 'NONE') ,('2018-06-21', '1397-03-31', '1439-10-07', '2018-06-21', 31, 7, 3, 10, 'thursday ', 5, 0, 'NONE') ,('2018-06-22', '1397-04-01', '1439-10-08', '2018-06-22', 1, 8, 4, 10, 'friday ', 6, 0, 'NONE') ,('2018-06-23', '1397-04-02', '1439-10-09', '2018-06-23', 2, 9, 4, 10, 'saturday ', 7, 0, 'NONE') ,('2018-06-24', '1397-04-03', '1439-10-10', '2018-06-24', 3, 10, 4, 10, 'sunday ', 1, 0, 'NONE') ,('2018-06-25', '1397-04-04', '1439-10-11', '2018-06-25', 4, 11, 4, 10, 'monday ', 2, 0, 'NONE') ,('2018-06-26', '1397-04-05', '1439-10-12', '2018-06-26', 5, 12, 4, 10, 'tuesday ', 3, 0, 'NONE') ,('2018-06-27', '1397-04-06', '1439-10-13', '2018-06-27', 6, 13, 4, 10, 'wednesday', 4, 0, 'NONE') ,('2018-06-28', '1397-04-07', '1439-10-14', '2018-06-28', 7, 14, 4, 10, 'thursday ', 5, 0, 'NONE') ,('2018-06-29', '1397-04-08', '1439-10-15', '2018-06-29', 8, 15, 4, 10, 'friday ', 6, 0, 'NONE') ,('2018-06-30', '1397-04-09', '1439-10-16', '2018-06-30', 9, 16, 4, 10, 'saturday ', 7, 0, 'NONE') ,('2018-07-01', '1397-04-10', '1439-10-17', '2018-07-01', 10, 17, 4, 10, 'sunday ', 1, 0, 'NONE') ,('2018-07-02', '1397-04-11', '1439-10-18', '2018-07-02', 11, 18, 4, 10, 'monday ', 2, 0, 'NONE') ,('2018-07-03', '1397-04-12', '1439-10-19', '2018-07-03', 12, 19, 4, 10, 'tuesday ', 3, 0, 'NONE') ,('2018-07-04', '1397-04-13', '1439-10-20', '2018-07-04', 13, 20, 4, 10, 'wednesday', 4, 0, 'NONE') ,('2018-07-05', '1397-04-14', '1439-10-21', '2018-07-05', 14, 21, 4, 10, 'thursday ', 5, 0, 'NONE') ,('2018-07-06', '1397-04-15', '1439-10-22', '2018-07-06', 15, 22, 4, 10, 'friday ', 6, 0, 'NONE') ,('2018-07-07', '1397-04-16', '1439-10-23', '2018-07-07', 16, 23, 4, 10, 'saturday ', 7, 0, 'NONE') ,('2018-07-08', '1397-04-17', '1439-10-24', '2018-07-08', 17, 24, 4, 10, 'sunday ', 1, 0, 'NONE') ,('2018-07-09', '1397-04-18', '1439-10-25', '2018-07-09', 18, 25, 4, 10, 'monday ', 2, 0, 'NONE') ,('2018-07-10', '1397-04-19', '1439-10-26', '2018-07-10', 19, 26, 4, 10, 'tuesday ', 3, 0, 'NONE') ,('2018-07-11', '1397-04-20', '1439-10-27', '2018-07-11', 20, 27, 4, 10, 'wednesday', 4, 0, 'NONE') ,('2018-07-12', '1397-04-21', '1439-10-28', '2018-07-12', 21, 28, 4, 10, 'thursday ', 5, 0, 'NONE') ,('2018-07-13', '1397-04-22', '1439-10-29', '2018-07-13', 22, 29, 4, 10, 'friday ', 6, 0, 'NONE') ,('2018-07-14', '1397-04-23', '1439-11-01', '2018-07-14', 23, 1, 4, 11, 'saturday ', 7, 0, 'NONE') ,('2018-07-15', '1397-04-24', '1439-11-02', '2018-07-15', 24, 2, 4, 11, 'sunday ', 1, 0, 'NONE') ,('2018-07-16', '1397-04-25', '1439-11-03', '2018-07-16', 25, 3, 4, 11, 'monday ', 2, 0, 'NONE') ,('2018-07-17', '1397-04-26', '1439-11-04', '2018-07-17', 26, 4, 4, 11, 'tuesday ', 3, 0, 'NONE') ,('2018-07-18', '1397-04-27', '1439-11-05', '2018-07-18', 27, 5, 4, 11, 'wednesday', 4, 0, 'NONE') ,('2018-07-19', '1397-04-28', '1439-11-06', '2018-07-19', 28, 6, 4, 11, 'thursday ', 5, 0, 'NONE') ,('2018-07-20', '1397-04-29', '1439-11-07', '2018-07-20', 29, 7, 4, 11, 'friday ', 6, 0, 'NONE') ,('2018-07-21', '1397-04-30', '1439-11-08', '2018-07-21', 30, 8, 4, 11, 'saturday ', 7, 0, 'NONE') ,('2018-07-22', '1397-04-31', '1439-11-09', '2018-07-22', 31, 9, 4, 11, 'sunday ', 1, 0, 'NONE') ,('2018-07-23', '1397-05-01', '1439-11-10', '2018-07-23', 1, 10, 5, 11, 'monday ', 2, 0, 'NONE') ,('2018-07-24', '1397-05-02', '1439-11-11', '2018-07-24', 2, 11, 5, 11, 'tuesday ', 3, 0, 'NONE') ,('2018-07-25', '1397-05-03', '1439-11-12', '2018-07-25', 3, 12, 5, 11, 'wednesday', 4, 0, 'NONE') ,('2018-07-26', '1397-05-04', '1439-11-13', '2018-07-26', 4, 13, 5, 11, 'thursday ', 5, 0, 'NONE') ,('2018-07-27', '1397-05-05', '1439-11-14', '2018-07-27', 5, 14, 5, 11, 'friday ', 6, 0, 'NONE') ,('2018-07-28', '1397-05-06', '1439-11-15', '2018-07-28', 6, 15, 5, 11, 'saturday ', 7, 0, 'NONE') ,('2018-07-29', '1397-05-07', '1439-11-16', '2018-07-29', 7, 16, 5, 11, 'sunday ', 1, 0, 'NONE') ,('2018-07-30', '1397-05-08', '1439-11-17', '2018-07-30', 8, 17, 5, 11, 'monday ', 2, 0, 'NONE') ,('2018-07-31', '1397-05-09', '1439-11-18', '2018-07-31', 9, 18, 5, 11, 'tuesday ', 3, 0, 'NONE') ,('2018-08-01', '1397-05-10', '1439-11-19', '2018-08-01', 10, 19, 5, 11, 'wednesday', 4, 0, 'NONE') ,('2018-08-02', '1397-05-11', '1439-11-20', '2018-08-02', 11, 20, 5, 11, 'thursday ', 5, 0, 'NONE') ,('2018-08-03', '1397-05-12', '1439-11-21', '2018-08-03', 12, 21, 5, 11, 'friday ', 6, 0, 'NONE') ,('2018-08-04', '1397-05-13', '1439-11-22', '2018-08-04', 13, 22, 5, 11, 'saturday ', 7, 0, 'NONE') ,('2018-08-05', '1397-05-14', '1439-11-23', '2018-08-05', 14, 23, 5, 11, 'sunday ', 1, 0, 'NONE') ,('2018-08-06', '1397-05-15', '1439-11-24', '2018-08-06', 15, 24, 5, 11, 'monday ', 2, 0, 'NONE') ,('2018-08-07', '1397-05-16', '1439-11-25', '2018-08-07', 16, 25, 5, 11, 'tuesday ', 3, 0, 'NONE') ,('2018-08-08', '1397-05-17', '1439-11-26', '2018-08-08', 17, 26, 5, 11, 'wednesday', 4, 0, 'NONE') ,('2018-08-09', '1397-05-18', '1439-11-27', '2018-08-09', 18, 27, 5, 11, 'thursday ', 5, 0, 'NONE') ,('2018-08-10', '1397-05-19', '1439-11-28', '2018-08-10', 19, 28, 5, 11, 'friday ', 6, 0, 'NONE') ,('2018-08-11', '1397-05-20', '1439-11-29', '2018-08-11', 20, 29, 5, 11, 'saturday ', 7, 0, 'NONE') ,('2018-08-12', '1397-05-21', '1439-11-30', '2018-08-12', 21, 30, 5, 11, 'sunday ', 1, 0, 'NONE') ,('2018-08-13', '1397-05-22', '1439-12-01', '2018-08-13', 22, 1, 5, 12, 'monday ', 2, 0, 'NONE') ,('2018-08-14', '1397-05-23', '1439-12-02', '2018-08-14', 23, 2, 5, 12, 'tuesday ', 3, 0, 'NONE') ,('2018-08-15', '1397-05-24', '1439-12-03', '2018-08-15', 24, 3, 5, 12, 'wednesday', 4, 0, 'NONE') ,('2018-08-16', '1397-05-25', '1439-12-04', '2018-08-16', 25, 4, 5, 12, 'thursday ', 5, 0, 'NONE') ,('2018-08-17', '1397-05-26', '1439-12-05', '2018-08-17', 26, 5, 5, 12, 'friday ', 6, 0, 'NONE') ,('2018-08-18', '1397-05-27', '1439-12-06', '2018-08-18', 27, 6, 5, 12, 'saturday ', 7, 0, 'NONE') ,('2018-08-19', '1397-05-28', '1439-12-07', '2018-08-19', 28, 7, 5, 12, 'sunday ', 1, 0, 'NONE') ,('2018-08-20', '1397-05-29', '1439-12-08', '2018-08-20', 29, 8, 5, 12, 'monday ', 2, 0, 'NONE') ,('2018-08-21', '1397-05-30', '1439-12-09', '2018-08-21', 30, 9, 5, 12, 'tuesday ', 3, 0, 'NONE') ,('2018-08-22', '1397-05-31', '1439-12-10', '2018-08-22', 31, 10, 5, 12, 'wednesday', 4, 0, 'NONE') ,('2018-08-23', '1397-06-01', '1439-12-11', '2018-08-23', 1, 11, 6, 12, 'thursday ', 5, 0, 'NONE') ,('2018-08-24', '1397-06-02', '1439-12-12', '2018-08-24', 2, 12, 6, 12, 'friday ', 6, 0, 'NONE') ,('2018-08-25', '1397-06-03', '1439-12-13', '2018-08-25', 3, 13, 6, 12, 'saturday ', 7, 0, 'NONE') ,('2018-08-26', '1397-06-04', '1439-12-14', '2018-08-26', 4, 14, 6, 12, 'sunday ', 1, 0, 'NONE') ,('2018-08-27', '1397-06-05', '1439-12-15', '2018-08-27', 5, 15, 6, 12, 'monday ', 2, 0, 'NONE') ,('2018-08-28', '1397-06-06', '1439-12-16', '2018-08-28', 6, 16, 6, 12, 'tuesday ', 3, 0, 'NONE') ,('2018-08-29', '1397-06-07', '1439-12-17', '2018-08-29', 7, 17, 6, 12, 'wednesday', 4, 0, 'NONE') ,('2018-08-30', '1397-06-08', '1439-12-18', '2018-08-30', 8, 18, 6, 12, 'thursday ', 5, 0, 'NONE') ,('2018-08-31', '1397-06-09', '1439-12-19', '2018-08-31', 9, 19, 6, 12, 'friday ', 6, 0, 'NONE') ,('2018-09-01', '1397-06-10', '1439-12-20', '2018-09-01', 10, 20, 6, 12, 'saturday ', 7, 0, 'NONE') ,('2018-09-02', '1397-06-11', '1439-12-21', '2018-09-02', 11, 21, 6, 12, 'sunday ', 1, 0, 'NONE') ,('2018-09-03', '1397-06-12', '1439-12-22', '2018-09-03', 12, 22, 6, 12, 'monday ', 2, 0, 'NONE') ,('2018-09-04', '1397-06-13', '1439-12-23', '2018-09-04', 13, 23, 6, 12, 'tuesday ', 3, 0, 'NONE') ,('2018-09-05', '1397-06-14', '1439-12-24', '2018-09-05', 14, 24, 6, 12, 'wednesday', 4, 0, 'NONE') ,('2018-09-06', '1397-06-15', '1439-12-25', '2018-09-06', 15, 25, 6, 12, 'thursday ', 5, 0, 'NONE') ,('2018-09-07', '1397-06-16', '1439-12-26', '2018-09-07', 16, 26, 6, 12, 'friday ', 6, 0, 'NONE') ,('2018-09-08', '1397-06-17', '1439-12-27', '2018-09-08', 17, 27, 6, 12, 'saturday ', 7, 0, 'NONE') ,('2018-09-09', '1397-06-18', '1439-12-28', '2018-09-09', 18, 28, 6, 12, 'sunday ', 1, 0, 'NONE') ,('2018-09-10', '1397-06-19', '1439-12-29', '2018-09-10', 19, 29, 6, 12, 'monday ', 2, 0, 'NONE') ,('2018-09-11', '1397-06-20', '1439-12-30', '2018-09-11', 20, 30, 6, 12, 'tuesday ', 3, 0, 'NONE') ,('2018-09-12', '1397-06-21', '1440-01-01', '2018-09-12', 21, 1, 6, 1, 'wednesday', 4, 0, 'NONE') ,('2018-09-13', '1397-06-22', '1440-01-02', '2018-09-13', 22, 2, 6, 1, 'thursday ', 5, 0, 'NONE') ,('2018-09-14', '1397-06-23', '1440-01-03', '2018-09-14', 23, 3, 6, 1, 'friday ', 6, 0, 'NONE') ,('2018-09-15', '1397-06-24', '1440-01-04', '2018-09-15', 24, 4, 6, 1, 'saturday ', 7, 0, 'NONE') ,('2018-09-16', '1397-06-25', '1440-01-05', '2018-09-16', 25, 5, 6, 1, 'sunday ', 1, 0, 'NONE') ,('2018-09-17', '1397-06-26', '1440-01-06', '2018-09-17', 26, 6, 6, 1, 'monday ', 2, 0, 'NONE') ,('2018-09-18', '1397-06-27', '1440-01-07', '2018-09-18', 27, 7, 6, 1, 'tuesday ', 3, 0, 'NONE') ,('2018-09-19', '1397-06-28', '1440-01-08', '2018-09-19', 28, 8, 6, 1, 'wednesday', 4, 0, 'NONE') ,('2018-09-20', '1397-06-29', '1440-01-09', '2018-09-20', 29, 9, 6, 1, 'thursday ', 5, 0, 'NONE') ,('2018-09-21', '1397-06-30', '1440-01-10', '2018-09-21', 30, 10, 6, 1, 'friday ', 6, 0, 'NONE') ,('2018-09-22', '1397-06-31', '1440-01-11', '2018-09-22', 31, 11, 6, 1, 'saturday ', 7, 0, 'NONE') ,('2018-09-23', '1397-07-01', '1440-01-12', '2018-09-23', 1, 12, 7, 1, 'sunday ', 1, 0, 'NONE') ,('2018-09-24', '1397-07-02', '1440-01-13', '2018-09-24', 2, 13, 7, 1, 'monday ', 2, 0, 'NONE') ,('2018-09-25', '1397-07-03', '1440-01-14', '2018-09-25', 3, 14, 7, 1, 'tuesday ', 3, 0, 'NONE') ,('2018-09-26', '1397-07-04', '1440-01-15', '2018-09-26', 4, 15, 7, 1, 'wednesday', 4, 0, 'NONE') ,('2018-09-27', '1397-07-05', '1440-01-16', '2018-09-27', 5, 16, 7, 1, 'thursday ', 5, 0, 'NONE') ,('2018-09-28', '1397-07-06', '1440-01-17', '2018-09-28', 6, 17, 7, 1, 'friday ', 6, 0, 'NONE') ,('2018-09-29', '1397-07-07', '1440-01-18', '2018-09-29', 7, 18, 7, 1, 'saturday ', 7, 0, 'NONE') ,('2018-09-30', '1397-07-08', '1440-01-19', '2018-09-30', 8, 19, 7, 1, 'sunday ', 1, 0, 'NONE') ,('2018-10-01', '1397-07-09', '1440-01-20', '2018-10-01', 9, 20, 7, 1, 'monday ', 2, 0, 'NONE') ,('2018-10-02', '1397-07-10', '1440-01-21', '2018-10-02', 10, 21, 7, 1, 'tuesday ', 3, 0, 'NONE') ,('2018-10-03', '1397-07-11', '1440-01-22', '2018-10-03', 11, 22, 7, 1, 'wednesday', 4, 0, 'NONE') ,('2018-10-04', '1397-07-12', '1440-01-23', '2018-10-04', 12, 23, 7, 1, 'thursday ', 5, 0, 'NONE') ,('2018-10-05', '1397-07-13', '1440-01-24', '2018-10-05', 13, 24, 7, 1, 'friday ', 6, 0, 'NONE') ,('2018-10-06', '1397-07-14', '1440-01-25', '2018-10-06', 14, 25, 7, 1, 'saturday ', 7, 0, 'NONE') ,('2018-10-07', '1397-07-15', '1440-01-26', '2018-10-07', 15, 26, 7, 1, 'sunday ', 1, 0, 'NONE') ,('2018-10-08', '1397-07-16', '1440-01-27', '2018-10-08', 16, 27, 7, 1, 'monday ', 2, 0, 'NONE') ,('2018-10-09', '1397-07-17', '1440-01-28', '2018-10-09', 17, 28, 7, 1, 'tuesday ', 3, 0, 'NONE') ,('2018-10-10', '1397-07-18', '1440-01-29', '2018-10-10', 18, 29, 7, 1, 'wednesday', 4, 0, 'NONE') ,('2018-10-11', '1397-07-19', '1440-01-30', '2018-10-11', 19, 30, 7, 1, 'thursday ', 5, 0, 'NONE') ,('2018-10-12', '1397-07-20', '1440-02-01', '2018-10-12', 20, 1, 7, 2, 'friday ', 6, 0, 'NONE') ,('2018-10-13', '1397-07-21', '1440-02-02', '2018-10-13', 21, 2, 7, 2, 'saturday ', 7, 0, 'NONE') ,('2018-10-14', '1397-07-22', '1440-02-03', '2018-10-14', 22, 3, 7, 2, 'sunday ', 1, 0, 'NONE') ,('2018-10-15', '1397-07-23', '1440-02-04', '2018-10-15', 23, 4, 7, 2, 'monday ', 2, 0, 'NONE') ,('2018-10-16', '1397-07-24', '1440-02-05', '2018-10-16', 24, 5, 7, 2, 'tuesday ', 3, 0, 'NONE') ,('2018-10-17', '1397-07-25', '1440-02-06', '2018-10-17', 25, 6, 7, 2, 'wednesday', 4, 0, 'NONE') ,('2018-10-18', '1397-07-26', '1440-02-07', '2018-10-18', 26, 7, 7, 2, 'thursday ', 5, 0, 'NONE') ,('2018-10-19', '1397-07-27', '1440-02-08', '2018-10-19', 27, 8, 7, 2, 'friday ', 6, 0, 'NONE') ,('2018-10-20', '1397-07-28', '1440-02-09', '2018-10-20', 28, 9, 7, 2, 'saturday ', 7, 0, 'NONE') ,('2018-10-21', '1397-07-29', '1440-02-10', '2018-10-21', 29, 10, 7, 2, 'sunday ', 1, 0, 'NONE') ,('2018-10-22', '1397-07-30', '1440-02-11', '2018-10-22', 30, 11, 7, 2, 'monday ', 2, 0, 'NONE') ,('2018-10-23', '1397-08-01', '1440-02-12', '2018-10-23', 1, 12, 8, 2, 'tuesday ', 3, 0, 'NONE') ,('2018-10-24', '1397-08-02', '1440-02-13', '2018-10-24', 2, 13, 8, 2, 'wednesday', 4, 0, 'NONE') ,('2018-10-25', '1397-08-03', '1440-02-14', '2018-10-25', 3, 14, 8, 2, 'thursday ', 5, 0, 'NONE') ,('2018-10-26', '1397-08-04', '1440-02-15', '2018-10-26', 4, 15, 8, 2, 'friday ', 6, 0, 'NONE') ,('2018-10-27', '1397-08-05', '1440-02-16', '2018-10-27', 5, 16, 8, 2, 'saturday ', 7, 0, 'NONE') ,('2018-10-28', '1397-08-06', '1440-02-17', '2018-10-28', 6, 17, 8, 2, 'sunday ', 1, 0, 'NONE') ,('2018-10-29', '1397-08-07', '1440-02-18', '2018-10-29', 7, 18, 8, 2, 'monday ', 2, 0, 'NONE') ,('2018-10-30', '1397-08-08', '1440-02-19', '2018-10-30', 8, 19, 8, 2, 'tuesday ', 3, 0, 'NONE') ,('2018-10-31', '1397-08-09', '1440-02-20', '2018-10-31', 9, 20, 8, 2, 'wednesday', 4, 0, 'NONE') ,('2018-11-01', '1397-08-10', '1440-02-21', '2018-11-01', 10, 21, 8, 2, 'thursday ', 5, 0, 'NONE') ,('2018-11-02', '1397-08-11', '1440-02-22', '2018-11-02', 11, 22, 8, 2, 'friday ', 6, 0, 'NONE') ,('2018-11-03', '1397-08-12', '1440-02-23', '2018-11-03', 12, 23, 8, 2, 'saturday ', 7, 0, 'NONE') ,('2018-11-04', '1397-08-13', '1440-02-24', '2018-11-04', 13, 24, 8, 2, 'sunday ', 1, 0, 'NONE') ,('2018-11-05', '1397-08-14', '1440-02-25', '2018-11-05', 14, 25, 8, 2, 'monday ', 2, 0, 'NONE') ,('2018-11-06', '1397-08-15', '1440-02-26', '2018-11-06', 15, 26, 8, 2, 'tuesday ', 3, 0, 'NONE') ,('2018-11-07', '1397-08-16', '1440-02-27', '2018-11-07', 16, 27, 8, 2, 'wednesday', 4, 0, 'NONE') ,('2018-11-08', '1397-08-17', '1440-02-28', '2018-11-08', 17, 28, 8, 2, 'thursday ', 5, 0, 'NONE') ,('2018-11-09', '1397-08-18', '1440-02-29', '2018-11-09', 18, 29, 8, 2, 'friday ', 6, 0, 'NONE') ,('2018-11-10', '1397-08-19', '1440-03-01', '2018-11-10', 19, 1, 8, 3, 'saturday ', 7, 0, 'NONE') ,('2018-11-11', '1397-08-20', '1440-03-02', '2018-11-11', 20, 2, 8, 3, 'sunday ', 1, 0, 'NONE') ,('2018-11-12', '1397-08-21', '1440-03-03', '2018-11-12', 21, 3, 8, 3, 'monday ', 2, 0, 'NONE') ,('2018-11-13', '1397-08-22', '1440-03-04', '2018-11-13', 22, 4, 8, 3, 'tuesday ', 3, 0, 'NONE') ,('2018-11-14', '1397-08-23', '1440-03-05', '2018-11-14', 23, 5, 8, 3, 'wednesday', 4, 0, 'NONE') ,('2018-11-15', '1397-08-24', '1440-03-06', '2018-11-15', 24, 6, 8, 3, 'thursday ', 5, 0, 'NONE') ,('2018-11-16', '1397-08-25', '1440-03-07', '2018-11-16', 25, 7, 8, 3, 'friday ', 6, 0, 'NONE') ,('2018-11-17', '1397-08-26', '1440-03-08', '2018-11-17', 26, 8, 8, 3, 'saturday ', 7, 0, 'NONE') ,('2018-11-18', '1397-08-27', '1440-03-09', '2018-11-18', 27, 9, 8, 3, 'sunday ', 1, 0, 'NONE') ,('2018-11-19', '1397-08-28', '1440-03-10', '2018-11-19', 28, 10, 8, 3, 'monday ', 2, 0, 'NONE') ,('2018-11-20', '1397-08-29', '1440-03-11', '2018-11-20', 29, 11, 8, 3, 'tuesday ', 3, 0, 'NONE') ,('2018-11-21', '1397-08-30', '1440-03-12', '2018-11-21', 30, 12, 8, 3, 'wednesday', 4, 0, 'NONE') ,('2018-11-22', '1397-09-01', '1440-03-13', '2018-11-22', 1, 13, 9, 3, 'thursday ', 5, 0, 'NONE') ,('2018-11-23', '1397-09-02', '1440-03-14', '2018-11-23', 2, 14, 9, 3, 'friday ', 6, 0, 'NONE') ,('2018-11-24', '1397-09-03', '1440-03-15', '2018-11-24', 3, 15, 9, 3, 'saturday ', 7, 0, 'NONE') ,('2018-11-25', '1397-09-04', '1440-03-16', '2018-11-25', 4, 16, 9, 3, 'sunday ', 1, 0, 'NONE') ,('2018-11-26', '1397-09-05', '1440-03-17', '2018-11-26', 5, 17, 9, 3, 'monday ', 2, 0, 'NONE') ,('2018-11-27', '1397-09-06', '1440-03-18', '2018-11-27', 6, 18, 9, 3, 'tuesday ', 3, 0, 'NONE') ,('2018-11-28', '1397-09-07', '1440-03-19', '2018-11-28', 7, 19, 9, 3, 'wednesday', 4, 0, 'NONE') ,('2018-11-29', '1397-09-08', '1440-03-20', '2018-11-29', 8, 20, 9, 3, 'thursday ', 5, 0, 'NONE') ,('2018-11-30', '1397-09-09', '1440-03-21', '2018-11-30', 9, 21, 9, 3, 'friday ', 6, 0, 'NONE') ,('2018-12-01', '1397-09-10', '1440-03-22', '2018-12-01', 10, 22, 9, 3, 'saturday ', 7, 0, 'NONE') ,('2018-12-02', '1397-09-11', '1440-03-23', '2018-12-02', 11, 23, 9, 3, 'sunday ', 1, 0, 'NONE') ,('2018-12-03', '1397-09-12', '1440-03-24', '2018-12-03', 12, 24, 9, 3, 'monday ', 2, 0, 'NONE') ,('2018-12-04', '1397-09-13', '1440-03-25', '2018-12-04', 13, 25, 9, 3, 'tuesday ', 3, 0, 'NONE') ,('2018-12-05', '1397-09-14', '1440-03-26', '2018-12-05', 14, 26, 9, 3, 'wednesday', 4, 0, 'NONE') ,('2018-12-06', '1397-09-15', '1440-03-27', '2018-12-06', 15, 27, 9, 3, 'thursday ', 5, 0, 'NONE') ,('2018-12-07', '1397-09-16', '1440-03-28', '2018-12-07', 16, 28, 9, 3, 'friday ', 6, 0, 'NONE') ,('2018-12-08', '1397-09-17', '1440-03-29', '2018-12-08', 17, 29, 9, 3, 'saturday ', 7, 0, 'NONE') ,('2018-12-09', '1397-09-18', '1440-03-30', '2018-12-09', 18, 30, 9, 3, 'sunday ', 1, 0, 'NONE') ,('2018-12-10', '1397-09-19', '1440-04-01', '2018-12-10', 19, 1, 9, 4, 'monday ', 2, 0, 'NONE') ,('2018-12-11', '1397-09-20', '1440-04-02', '2018-12-11', 20, 2, 9, 4, 'tuesday ', 3, 0, 'NONE') ,('2018-12-12', '1397-09-21', '1440-04-03', '2018-12-12', 21, 3, 9, 4, 'wednesday', 4, 0, 'NONE') ,('2018-12-13', '1397-09-22', '1440-04-04', '2018-12-13', 22, 4, 9, 4, 'thursday ', 5, 0, 'NONE') ,('2018-12-14', '1397-09-23', '1440-04-05', '2018-12-14', 23, 5, 9, 4, 'friday ', 6, 0, 'NONE') ,('2018-12-15', '1397-09-24', '1440-04-06', '2018-12-15', 24, 6, 9, 4, 'saturday ', 7, 0, 'NONE') ,('2018-12-16', '1397-09-25', '1440-04-07', '2018-12-16', 25, 7, 9, 4, 'sunday ', 1, 0, 'NONE') ,('2018-12-17', '1397-09-26', '1440-04-08', '2018-12-17', 26, 8, 9, 4, 'monday ', 2, 0, 'NONE') ,('2018-12-18', '1397-09-27', '1440-04-09', '2018-12-18', 27, 9, 9, 4, 'tuesday ', 3, 0, 'NONE') ,('2018-12-19', '1397-09-28', '1440-04-10', '2018-12-19', 28, 10, 9, 4, 'wednesday', 4, 0, 'NONE') ,('2018-12-20', '1397-09-29', '1440-04-11', '2018-12-20', 29, 11, 9, 4, 'thursday ', 5, 0, 'NONE') ,('2018-12-21', '1397-09-30', '1440-04-12', '2018-12-21', 30, 12, 9, 4, 'friday ', 6, 0, 'NONE') ,('2018-12-22', '1397-10-01', '1440-04-13', '2018-12-22', 1, 13, 10, 4, 'saturday ', 7, 0, 'NONE') ,('2018-12-23', '1397-10-02', '1440-04-14', '2018-12-23', 2, 14, 10, 4, 'sunday ', 1, 0, 'NONE') ,('2018-12-24', '1397-10-03', '1440-04-15', '2018-12-24', 3, 15, 10, 4, 'monday ', 2, 0, 'NONE') ,('2018-12-25', '1397-10-04', '1440-04-16', '2018-12-25', 4, 16, 10, 4, 'tuesday ', 3, 0, 'NONE') ,('2018-12-26', '1397-10-05', '1440-04-17', '2018-12-26', 5, 17, 10, 4, 'wednesday', 4, 0, 'NONE') ,('2018-12-27', '1397-10-06', '1440-04-18', '2018-12-27', 6, 18, 10, 4, 'thursday ', 5, 0, 'NONE') ,('2018-12-28', '1397-10-07', '1440-04-19', '2018-12-28', 7, 19, 10, 4, 'friday ', 6, 0, 'NONE') ,('2018-12-29', '1397-10-08', '1440-04-20', '2018-12-29', 8, 20, 10, 4, 'saturday ', 7, 0, 'NONE') ,('2018-12-30', '1397-10-09', '1440-04-21', '2018-12-30', 9, 21, 10, 4, 'sunday ', 1, 0, 'NONE') ,('2018-12-31', '1397-10-10', '1440-04-22', '2018-12-31', 10, 22, 10, 4, 'monday ', 2, 0, 'NONE') ,('2019-01-01', '1397-10-11', '1440-04-23', '2019-01-01', 11, 23, 10, 4, 'tuesday ', 3, 0, 'NONE') ,('2019-01-02', '1397-10-12', '1440-04-24', '2019-01-02', 12, 24, 10, 4, 'wednesday', 4, 0, 'NONE') ,('2019-01-03', '1397-10-13', '1440-04-25', '2019-01-03', 13, 25, 10, 4, 'thursday ', 5, 0, 'NONE') ,('2019-01-04', '1397-10-14', '1440-04-26', '2019-01-04', 14, 26, 10, 4, 'friday ', 6, 0, 'NONE') ,('2019-01-05', '1397-10-15', '1440-04-27', '2019-01-05', 15, 27, 10, 4, 'saturday ', 7, 0, 'NONE') ,('2019-01-06', '1397-10-16', '1440-04-28', '2019-01-06', 16, 28, 10, 4, 'sunday ', 1, 0, 'NONE') ,('2019-01-07', '1397-10-17', '1440-04-29', '2019-01-07', 17, 29, 10, 4, 'monday ', 2, 0, 'NONE') ,('2019-01-08', '1397-10-18', '1440-05-01', '2019-01-08', 18, 1, 10, 5, 'tuesday ', 3, 0, 'NONE') ,('2019-01-09', '1397-10-19', '1440-05-02', '2019-01-09', 19, 2, 10, 5, 'wednesday', 4, 0, 'NONE') ,('2019-01-10', '1397-10-20', '1440-05-03', '2019-01-10', 20, 3, 10, 5, 'thursday ', 5, 0, 'NONE') ,('2019-01-11', '1397-10-21', '1440-05-04', '2019-01-11', 21, 4, 10, 5, 'friday ', 6, 0, 'NONE') ,('2019-01-12', '1397-10-22', '1440-05-05', '2019-01-12', 22, 5, 10, 5, 'saturday ', 7, 0, 'NONE') ,('2019-01-13', '1397-10-23', '1440-05-06', '2019-01-13', 23, 6, 10, 5, 'sunday ', 1, 0, 'NONE') ,('2019-01-14', '1397-10-24', '1440-05-07', '2019-01-14', 24, 7, 10, 5, 'monday ', 2, 0, 'NONE') ,('2019-01-15', '1397-10-25', '1440-05-08', '2019-01-15', 25, 8, 10, 5, 'tuesday ', 3, 0, 'NONE') ,('2019-01-16', '1397-10-26', '1440-05-09', '2019-01-16', 26, 9, 10, 5, 'wednesday', 4, 0, 'NONE') ,('2019-01-17', '1397-10-27', '1440-05-10', '2019-01-17', 27, 10, 10, 5, 'thursday ', 5, 0, 'NONE') ,('2019-01-18', '1397-10-28', '1440-05-11', '2019-01-18', 28, 11, 10, 5, 'friday ', 6, 0, 'NONE') ,('2019-01-19', '1397-10-29', '1440-05-12', '2019-01-19', 29, 12, 10, 5, 'saturday ', 7, 0, 'NONE') ,('2019-01-20', '1397-10-30', '1440-05-13', '2019-01-20', 30, 13, 10, 5, 'sunday ', 1, 0, 'NONE') ,('2019-01-21', '1397-11-01', '1440-05-14', '2019-01-21', 1, 14, 11, 5, 'monday ', 2, 0, 'NONE') ,('2019-01-22', '1397-11-02', '1440-05-15', '2019-01-22', 2, 15, 11, 5, 'tuesday ', 3, 0, 'NONE') ,('2019-01-23', '1397-11-03', '1440-05-16', '2019-01-23', 3, 16, 11, 5, 'wednesday', 4, 0, 'NONE') ,('2019-01-24', '1397-11-04', '1440-05-17', '2019-01-24', 4, 17, 11, 5, 'thursday ', 5, 0, 'NONE') ,('2019-01-25', '1397-11-05', '1440-05-18', '2019-01-25', 5, 18, 11, 5, 'friday ', 6, 0, 'NONE') ,('2019-01-26', '1397-11-06', '1440-05-19', '2019-01-26', 6, 19, 11, 5, 'saturday ', 7, 0, 'NONE') ,('2019-01-27', '1397-11-07', '1440-05-20', '2019-01-27', 7, 20, 11, 5, 'sunday ', 1, 0, 'NONE') ,('2019-01-28', '1397-11-08', '1440-05-21', '2019-01-28', 8, 21, 11, 5, 'monday ', 2, 0, 'NONE') ,('2019-01-29', '1397-11-09', '1440-05-22', '2019-01-29', 9, 22, 11, 5, 'tuesday ', 3, 0, 'NONE') ,('2019-01-30', '1397-11-10', '1440-05-23', '2019-01-30', 10, 23, 11, 5, 'wednesday', 4, 0, 'NONE') ,('2019-01-31', '1397-11-11', '1440-05-24', '2019-01-31', 11, 24, 11, 5, 'thursday ', 5, 0, 'NONE') ,('2019-02-01', '1397-11-12', '1440-05-25', '2019-02-01', 12, 25, 11, 5, 'friday ', 6, 0, 'NONE') ,('2019-02-02', '1397-11-13', '1440-05-26', '2019-02-02', 13, 26, 11, 5, 'saturday ', 7, 0, 'NONE') ,('2019-02-03', '1397-11-14', '1440-05-27', '2019-02-03', 14, 27, 11, 5, 'sunday ', 1, 0, 'NONE') ,('2019-02-04', '1397-11-15', '1440-05-28', '2019-02-04', 15, 28, 11, 5, 'monday ', 2, 0, 'NONE') ,('2019-02-05', '1397-11-16', '1440-05-29', '2019-02-05', 16, 29, 11, 5, 'tuesday ', 3, 0, 'NONE') ,('2019-02-06', '1397-11-17', '1440-05-30', '2019-02-06', 17, 30, 11, 5, 'wednesday', 4, 0, 'NONE') ,('2019-02-07', '1397-11-18', '1440-06-01', '2019-02-07', 18, 1, 11, 6, 'thursday ', 5, 0, 'NONE') ,('2019-02-08', '1397-11-19', '1440-06-02', '2019-02-08', 19, 2, 11, 6, 'friday ', 6, 0, 'NONE') ,('2019-02-09', '1397-11-20', '1440-06-03', '2019-02-09', 20, 3, 11, 6, 'saturday ', 7, 0, 'NONE') ,('2019-02-10', '1397-11-21', '1440-06-04', '2019-02-10', 21, 4, 11, 6, 'sunday ', 1, 0, 'NONE') ,('2019-02-11', '1397-11-22', '1440-06-05', '2019-02-11', 22, 5, 11, 6, 'monday ', 2, 0, 'NONE') ,('2019-02-12', '1397-11-23', '1440-06-06', '2019-02-12', 23, 6, 11, 6, 'tuesday ', 3, 0, 'NONE') ,('2019-02-13', '1397-11-24', '1440-06-07', '2019-02-13', 24, 7, 11, 6, 'wednesday', 4, 0, 'NONE') ,('2019-02-14', '1397-11-25', '1440-06-08', '2019-02-14', 25, 8, 11, 6, 'thursday ', 5, 0, 'NONE') ,('2019-02-15', '1397-11-26', '1440-06-09', '2019-02-15', 26, 9, 11, 6, 'friday ', 6, 0, 'NONE') ,('2019-02-16', '1397-11-27', '1440-06-10', '2019-02-16', 27, 10, 11, 6, 'saturday ', 7, 0, 'NONE') ,('2019-02-17', '1397-11-28', '1440-06-11', '2019-02-17', 28, 11, 11, 6, 'sunday ', 1, 0, 'NONE') ,('2019-02-18', '1397-11-29', '1440-06-12', '2019-02-18', 29, 12, 11, 6, 'monday ', 2, 0, 'NONE') ,('2019-02-19', '1397-11-30', '1440-06-13', '2019-02-19', 30, 13, 11, 6, 'tuesday ', 3, 0, 'NONE') ,('2019-02-20', '1397-12-01', '1440-06-14', '2019-02-20', 1, 14, 12, 6, 'wednesday', 4, 0, 'NONE') ,('2019-02-21', '1397-12-02', '1440-06-15', '2019-02-21', 2, 15, 12, 6, 'thursday ', 5, 0, 'NONE') ,('2019-02-22', '1397-12-03', '1440-06-16', '2019-02-22', 3, 16, 12, 6, 'friday ', 6, 0, 'NONE') ,('2019-02-23', '1397-12-04', '1440-06-17', '2019-02-23', 4, 17, 12, 6, 'saturday ', 7, 0, 'NONE') ,('2019-02-24', '1397-12-05', '1440-06-18', '2019-02-24', 5, 18, 12, 6, 'sunday ', 1, 0, 'NONE') ,('2019-02-25', '1397-12-06', '1440-06-19', '2019-02-25', 6, 19, 12, 6, 'monday ', 2, 0, 'NONE') ,('2019-02-26', '1397-12-07', '1440-06-20', '2019-02-26', 7, 20, 12, 6, 'tuesday ', 3, 0, 'NONE') ,('2019-02-27', '1397-12-08', '1440-06-21', '2019-02-27', 8, 21, 12, 6, 'wednesday', 4, 0, 'NONE') ,('2019-02-28', '1397-12-09', '1440-06-22', '2019-02-28', 9, 22, 12, 6, 'thursday ', 5, 0, 'NONE') ,('2019-03-01', '1397-12-10', '1440-06-23', '2019-03-01', 10, 23, 12, 6, 'friday ', 6, 0, 'NONE') ,('2019-03-02', '1397-12-11', '1440-06-24', '2019-03-02', 11, 24, 12, 6, 'saturday ', 7, 0, 'NONE') ,('2019-03-03', '1397-12-12', '1440-06-25', '2019-03-03', 12, 25, 12, 6, 'sunday ', 1, 0, 'NONE') ,('2019-03-04', '1397-12-13', '1440-06-26', '2019-03-04', 13, 26, 12, 6, 'monday ', 2, 0, 'NONE') ,('2019-03-05', '1397-12-14', '1440-06-27', '2019-03-05', 14, 27, 12, 6, 'tuesday ', 3, 0, 'NONE') ,('2019-03-06', '1397-12-15', '1440-06-28', '2019-03-06', 15, 28, 12, 6, 'wednesday', 4, 0, 'NONE') ,('2019-03-07', '1397-12-16', '1440-06-29', '2019-03-07', 16, 29, 12, 6, 'thursday ', 5, 0, 'NONE') ,('2019-03-08', '1397-12-17', '1440-07-01', '2019-03-08', 17, 1, 12, 7, 'friday ', 6, 0, 'NONE') ,('2019-03-09', '1397-12-18', '1440-07-02', '2019-03-09', 18, 2, 12, 7, 'saturday ', 7, 0, 'NONE') ,('2019-03-10', '1397-12-19', '1440-07-03', '2019-03-10', 19, 3, 12, 7, 'sunday ', 1, 0, 'NONE') ,('2019-03-11', '1397-12-20', '1440-07-04', '2019-03-11', 20, 4, 12, 7, 'monday ', 2, 0, 'NONE') ,('2019-03-12', '1397-12-21', '1440-07-05', '2019-03-12', 21, 5, 12, 7, 'tuesday ', 3, 0, 'NONE') ,('2019-03-13', '1397-12-22', '1440-07-06', '2019-03-13', 22, 6, 12, 7, 'wednesday', 4, 0, 'NONE') ,('2019-03-14', '1397-12-23', '1440-07-07', '2019-03-14', 23, 7, 12, 7, 'thursday ', 5, 0, 'NONE') ,('2019-03-15', '1397-12-24', '1440-07-08', '2019-03-15', 24, 8, 12, 7, 'friday ', 6, 0, 'NONE') ,('2019-03-16', '1397-12-25', '1440-07-09', '2019-03-16', 25, 9, 12, 7, 'saturday ', 7, 0, 'NONE') ,('2019-03-17', '1397-12-26', '1440-07-10', '2019-03-17', 26, 10, 12, 7, 'sunday ', 1, 0, 'NONE') ,('2019-03-18', '1397-12-27', '1440-07-11', '2019-03-18', 27, 11, 12, 7, 'monday ', 2, 0, 'NONE') ,('2019-03-19', '1397-12-28', '1440-07-12', '2019-03-19', 28, 12, 12, 7, 'tuesday ', 3, 0, 'NONE') ,('2019-03-20', '1397-12-29', '1440-07-13', '2019-03-20', 29, 13, 12, 7, 'wednesday', 4, 0, 'NONE') ,('2019-03-21', '1398-01-01', '1440-07-14', '2019-03-21', 1, 14, 1, 7, 'thursday ', 5, 0, 'NONE') ,('2019-03-22', '1398-01-02', '1440-07-15', '2019-03-22', 2, 15, 1, 7, 'friday ', 6, 0, 'NONE') ,('2019-03-23', '1398-01-03', '1440-07-16', '2019-03-23', 3, 16, 1, 7, 'saturday ', 7, 0, 'NONE') ,('2019-03-24', '1398-01-04', '1440-07-17', '2019-03-24', 4, 17, 1, 7, 'sunday ', 1, 0, 'NONE') ,('2019-03-25', '1398-01-05', '1440-07-18', '2019-03-25', 5, 18, 1, 7, 'monday ', 2, 0, 'NONE') ,('2019-03-26', '1398-01-06', '1440-07-19', '2019-03-26', 6, 19, 1, 7, 'tuesday ', 3, 0, 'NONE') ,('2019-03-27', '1398-01-07', '1440-07-20', '2019-03-27', 7, 20, 1, 7, 'wednesday', 4, 0, 'NONE') ,('2019-03-28', '1398-01-08', '1440-07-21', '2019-03-28', 8, 21, 1, 7, 'thursday ', 5, 0, 'NONE') ,('2019-03-29', '1398-01-09', '1440-07-22', '2019-03-29', 9, 22, 1, 7, 'friday ', 6, 0, 'NONE') ,('2019-03-30', '1398-01-10', '1440-07-23', '2019-03-30', 10, 23, 1, 7, 'saturday ', 7, 0, 'NONE') ,('2019-03-31', '1398-01-11', '1440-07-24', '2019-03-31', 11, 24, 1, 7, 'sunday ', 1, 0, 'NONE') ,('2019-04-01', '1398-01-12', '1440-07-25', '2019-04-01', 12, 25, 1, 7, 'monday ', 2, 0, 'NONE') ,('2019-04-02', '1398-01-13', '1440-07-26', '2019-04-02', 13, 26, 1, 7, 'tuesday ', 3, 0, 'NONE') ,('2019-04-03', '1398-01-14', '1440-07-27', '2019-04-03', 14, 27, 1, 7, 'wednesday', 4, 0, 'NONE') ,('2019-04-04', '1398-01-15', '1440-07-28', '2019-04-04', 15, 28, 1, 7, 'thursday ', 5, 0, 'NONE') ,('2019-04-05', '1398-01-16', '1440-07-29', '2019-04-05', 16, 29, 1, 7, 'friday ', 6, 0, 'NONE') ,('2019-04-06', '1398-01-17', '1440-07-30', '2019-04-06', 17, 30, 1, 7, 'saturday ', 7, 0, 'NONE') ,('2019-04-07', '1398-01-18', '1440-08-01', '2019-04-07', 18, 1, 1, 8, 'sunday ', 1, 0, 'NONE') ,('2019-04-08', '1398-01-19', '1440-08-02', '2019-04-08', 19, 2, 1, 8, 'monday ', 2, 0, 'NONE') ,('2019-04-09', '1398-01-20', '1440-08-03', '2019-04-09', 20, 3, 1, 8, 'tuesday ', 3, 0, 'NONE') ,('2019-04-10', '1398-01-21', '1440-08-04', '2019-04-10', 21, 4, 1, 8, 'wednesday', 4, 0, 'NONE') ,('2019-04-11', '1398-01-22', '1440-08-05', '2019-04-11', 22, 5, 1, 8, 'thursday ', 5, 0, 'NONE') ,('2019-04-12', '1398-01-23', '1440-08-06', '2019-04-12', 23, 6, 1, 8, 'friday ', 6, 0, 'NONE') ,('2019-04-13', '1398-01-24', '1440-08-07', '2019-04-13', 24, 7, 1, 8, 'saturday ', 7, 0, 'NONE') ,('2019-04-14', '1398-01-25', '1440-08-08', '2019-04-14', 25, 8, 1, 8, 'sunday ', 1, 0, 'NONE') ,('2019-04-15', '1398-01-26', '1440-08-09', '2019-04-15', 26, 9, 1, 8, 'monday ', 2, 0, 'NONE') ,('2019-04-16', '1398-01-27', '1440-08-10', '2019-04-16', 27, 10, 1, 8, 'tuesday ', 3, 0, 'NONE') ,('2019-04-17', '1398-01-28', '1440-08-11', '2019-04-17', 28, 11, 1, 8, 'wednesday', 4, 0, 'NONE') ,('2019-04-18', '1398-01-29', '1440-08-12', '2019-04-18', 29, 12, 1, 8, 'thursday ', 5, 0, 'NONE') ,('2019-04-19', '1398-01-30', '1440-08-13', '2019-04-19', 30, 13, 1, 8, 'friday ', 6, 0, 'NONE') ,('2019-04-20', '1398-01-31', '1440-08-14', '2019-04-20', 31, 14, 1, 8, 'saturday ', 7, 0, 'NONE') ,('2019-04-21', '1398-02-01', '1440-08-15', '2019-04-21', 1, 15, 2, 8, 'sunday ', 1, 0, 'NONE') ,('2019-04-22', '1398-02-02', '1440-08-16', '2019-04-22', 2, 16, 2, 8, 'monday ', 2, 0, 'NONE') ,('2019-04-23', '1398-02-03', '1440-08-17', '2019-04-23', 3, 17, 2, 8, 'tuesday ', 3, 0, 'NONE') ,('2019-04-24', '1398-02-04', '1440-08-18', '2019-04-24', 4, 18, 2, 8, 'wednesday', 4, 0, 'NONE') ,('2019-04-25', '1398-02-05', '1440-08-19', '2019-04-25', 5, 19, 2, 8, 'thursday ', 5, 0, 'NONE') ,('2019-04-26', '1398-02-06', '1440-08-20', '2019-04-26', 6, 20, 2, 8, 'friday ', 6, 0, 'NONE') ,('2019-04-27', '1398-02-07', '1440-08-21', '2019-04-27', 7, 21, 2, 8, 'saturday ', 7, 0, 'NONE') ,('2019-04-28', '1398-02-08', '1440-08-22', '2019-04-28', 8, 22, 2, 8, 'sunday ', 1, 0, 'NONE') ,('2019-04-29', '1398-02-09', '1440-08-23', '2019-04-29', 9, 23, 2, 8, 'monday ', 2, 0, 'NONE') ,('2019-04-30', '1398-02-10', '1440-08-24', '2019-04-30', 10, 24, 2, 8, 'tuesday ', 3, 0, 'NONE') ,('2019-05-01', '1398-02-11', '1440-08-25', '2019-05-01', 11, 25, 2, 8, 'wednesday', 4, 0, 'NONE') ,('2019-05-02', '1398-02-12', '1440-08-26', '2019-05-02', 12, 26, 2, 8, 'thursday ', 5, 0, 'NONE') ,('2019-05-03', '1398-02-13', '1440-08-27', '2019-05-03', 13, 27, 2, 8, 'friday ', 6, 0, 'NONE') ,('2019-05-04', '1398-02-14', '1440-08-28', '2019-05-04', 14, 28, 2, 8, 'saturday ', 7, 0, 'NONE') ,('2019-05-05', '1398-02-15', '1440-08-29', '2019-05-05', 15, 29, 2, 8, 'sunday ', 1, 0, 'NONE') ,('2019-05-06', '1398-02-16', '1440-09-01', '2019-05-06', 16, 1, 2, 9, 'monday ', 2, 0, 'NONE') ,('2019-05-07', '1398-02-17', '1440-09-02', '2019-05-07', 17, 2, 2, 9, 'tuesday ', 3, 0, 'NONE') ,('2019-05-08', '1398-02-18', '1440-09-03', '2019-05-08', 18, 3, 2, 9, 'wednesday', 4, 0, 'NONE') ,('2019-05-09', '1398-02-19', '1440-09-04', '2019-05-09', 19, 4, 2, 9, 'thursday ', 5, 0, 'NONE') ,('2019-05-10', '1398-02-20', '1440-09-05', '2019-05-10', 20, 5, 2, 9, 'friday ', 6, 0, 'NONE') ,('2019-05-11', '1398-02-21', '1440-09-06', '2019-05-11', 21, 6, 2, 9, 'saturday ', 7, 0, 'NONE') ,('2019-05-12', '1398-02-22', '1440-09-07', '2019-05-12', 22, 7, 2, 9, 'sunday ', 1, 0, 'NONE') ,('2019-05-13', '1398-02-23', '1440-09-08', '2019-05-13', 23, 8, 2, 9, 'monday ', 2, 0, 'NONE') ,('2019-05-14', '1398-02-24', '1440-09-09', '2019-05-14', 24, 9, 2, 9, 'tuesday ', 3, 0, 'NONE') ,('2019-05-15', '1398-02-25', '1440-09-10', '2019-05-15', 25, 10, 2, 9, 'wednesday', 4, 0, 'NONE') ,('2019-05-16', '1398-02-26', '1440-09-11', '2019-05-16', 26, 11, 2, 9, 'thursday ', 5, 0, 'NONE') ,('2019-05-17', '1398-02-27', '1440-09-12', '2019-05-17', 27, 12, 2, 9, 'friday ', 6, 0, 'NONE') ,('2019-05-18', '1398-02-28', '1440-09-13', '2019-05-18', 28, 13, 2, 9, 'saturday ', 7, 0, 'NONE') ,('2019-05-19', '1398-02-29', '1440-09-14', '2019-05-19', 29, 14, 2, 9, 'sunday ', 1, 0, 'NONE') ,('2019-05-20', '1398-02-30', '1440-09-15', '2019-05-20', 30, 15, 2, 9, 'monday ', 2, 0, 'NONE') ,('2019-05-21', '1398-02-31', '1440-09-16', '2019-05-21', 31, 16, 2, 9, 'tuesday ', 3, 0, 'NONE') ,('2019-05-22', '1398-03-01', '1440-09-17', '2019-05-22', 1, 17, 3, 9, 'wednesday', 4, 0, 'NONE') ,('2019-05-23', '1398-03-02', '1440-09-18', '2019-05-23', 2, 18, 3, 9, 'thursday ', 5, 0, 'NONE') ,('2019-05-24', '1398-03-03', '1440-09-19', '2019-05-24', 3, 19, 3, 9, 'friday ', 6, 0, 'NONE') ,('2019-05-25', '1398-03-04', '1440-09-20', '2019-05-25', 4, 20, 3, 9, 'saturday ', 7, 0, 'NONE') ,('2019-05-26', '1398-03-05', '1440-09-21', '2019-05-26', 5, 21, 3, 9, 'sunday ', 1, 0, 'NONE') ,('2019-05-27', '1398-03-06', '1440-09-22', '2019-05-27', 6, 22, 3, 9, 'monday ', 2, 0, 'NONE') ,('2019-05-28', '1398-03-07', '1440-09-23', '2019-05-28', 7, 23, 3, 9, 'tuesday ', 3, 0, 'NONE') ,('2019-05-29', '1398-03-08', '1440-09-24', '2019-05-29', 8, 24, 3, 9, 'wednesday', 4, 0, 'NONE') ,('2019-05-30', '1398-03-09', '1440-09-25', '2019-05-30', 9, 25, 3, 9, 'thursday ', 5, 0, 'NONE') ,('2019-05-31', '1398-03-10', '1440-09-26', '2019-05-31', 10, 26, 3, 9, 'friday ', 6, 0, 'NONE') ,('2019-06-01', '1398-03-11', '1440-09-27', '2019-06-01', 11, 27, 3, 9, 'saturday ', 7, 0, 'NONE') ,('2019-06-02', '1398-03-12', '1440-09-28', '2019-06-02', 12, 28, 3, 9, 'sunday ', 1, 0, 'NONE') ,('2019-06-03', '1398-03-13', '1440-09-29', '2019-06-03', 13, 29, 3, 9, 'monday ', 2, 0, 'NONE') ,('2019-06-04', '1398-03-14', '1440-09-30', '2019-06-04', 14, 30, 3, 9, 'tuesday ', 3, 0, 'NONE') ,('2019-06-05', '1398-03-15', '1440-10-01', '2019-06-05', 15, 1, 3, 10, 'wednesday', 4, 0, 'NONE') ,('2019-06-06', '1398-03-16', '1440-10-02', '2019-06-06', 16, 2, 3, 10, 'thursday ', 5, 0, 'NONE') ,('2019-06-07', '1398-03-17', '1440-10-03', '2019-06-07', 17, 3, 3, 10, 'friday ', 6, 0, 'NONE') ,('2019-06-08', '1398-03-18', '1440-10-04', '2019-06-08', 18, 4, 3, 10, 'saturday ', 7, 0, 'NONE') ,('2019-06-09', '1398-03-19', '1440-10-05', '2019-06-09', 19, 5, 3, 10, 'sunday ', 1, 0, 'NONE') ,('2019-06-10', '1398-03-20', '1440-10-06', '2019-06-10', 20, 6, 3, 10, 'monday ', 2, 0, 'NONE') ,('2019-06-11', '1398-03-21', '1440-10-07', '2019-06-11', 21, 7, 3, 10, 'tuesday ', 3, 0, 'NONE') ,('2019-06-12', '1398-03-22', '1440-10-08', '2019-06-12', 22, 8, 3, 10, 'wednesday', 4, 0, 'NONE') ,('2019-06-13', '1398-03-23', '1440-10-09', '2019-06-13', 23, 9, 3, 10, 'thursday ', 5, 0, 'NONE') ,('2019-06-14', '1398-03-24', '1440-10-10', '2019-06-14', 24, 10, 3, 10, 'friday ', 6, 0, 'NONE') ,('2019-06-15', '1398-03-25', '1440-10-11', '2019-06-15', 25, 11, 3, 10, 'saturday ', 7, 0, 'NONE') ,('2019-06-16', '1398-03-26', '1440-10-12', '2019-06-16', 26, 12, 3, 10, 'sundaINSERT INTO DATE_INFO_DICT (TV, SHAMSI, HIJRI, MILADI, S_DAY, H_DAY, S_MONTH, H_MONTH, WEEK_DAY_NAME, DAY_NUMBER, HOLIDAY, WEEK_NAME) VALUESy ', 1, 0, 'NONE') ,('2019-06-17', '1398-03-27', '1440-10-13', '2019-06-17', 27, 13, 3, 10, 'monday ', 2, 0, 'NONE') ,('2019-06-18', '1398-03-28', '1440-10-14', '2019-06-18', 28, 14, 3, 10, 'tuesday ', 3, 0, 'NONE') ,('2019-06-19', '1398-03-29', '1440-10-15', '2019-06-19', 29, 15, 3, 10, 'wednesday', 4, 0, 'NONE') ,('2019-06-20', '1398-03-30', '1440-10-16', '2019-06-20', 30, 16, 3, 10, 'thursday ', 5, 0, 'NONE') ,('2019-06-21', '1398-03-31', '1440-10-17', '2019-06-21', 31, 17, 3, 10, 'friday ', 6, 0, 'NONE') ,('2019-06-22', '1398-04-01', '1440-10-18', '2019-06-22', 1, 18, 4, 10, 'saturday ', 7, 0, 'NONE') ,('2019-06-23', '1398-04-02', '1440-10-19', '2019-06-23', 2, 19, 4, 10, 'sunday ', 1, 0, 'NONE') ,('2019-06-24', '1398-04-03', '1440-10-20', '2019-06-24', 3, 20, 4, 10, 'monday ', 2, 0, 'NONE') ,('2019-06-25', '1398-04-04', '1440-10-21', '2019-06-25', 4, 21, 4, 10, 'tuesday ', 3, 0, 'NONE') ,('2019-06-26', '1398-04-05', '1440-10-22', '2019-06-26', 5, 22, 4, 10, 'wednesday', 4, 0, 'NONE') ,('2019-06-27', '1398-04-06', '1440-10-23', '2019-06-27', 6, 23, 4, 10, 'thursday ', 5, 0, 'NONE') ,('2019-06-28', '1398-04-07', '1440-10-24', '2019-06-28', 7, 24, 4, 10, 'friday ', 6, 0, 'NONE') ,('2019-06-29', '1398-04-08', '1440-10-25', '2019-06-29', 8, 25, 4, 10, 'saturday ', 7, 0, 'NONE') ,('2019-06-30', '1398-04-09', '1440-10-26', '2019-06-30', 9, 26, 4, 10, 'sunday ', 1, 0, 'NONE') ,('2019-07-01', '1398-04-10', '1440-10-27', '2019-07-01', 10, 27, 4, 10, 'monday ', 2, 0, 'NONE') ,('2019-07-02', '1398-04-11', '1440-10-28', '2019-07-02', 11, 28, 4, 10, 'tuesday ', 3, 0, 'NONE') ,('2019-07-03', '1398-04-12', '1440-10-29', '2019-07-03', 12, 29, 4, 10, 'wednesday', 4, 0, 'NONE') ,('2019-07-04', '1398-04-13', '1440-11-01', '2019-07-04', 13, 1, 4, 11, 'thursday ', 5, 0, 'NONE') ,('2019-07-05', '1398-04-14', '1440-11-02', '2019-07-05', 14, 2, 4, 11, 'friday ', 6, 0, 'NONE') ,('2019-07-06', '1398-04-15', '1440-11-03', '2019-07-06', 15, 3, 4, 11, 'saturday ', 7, 0, 'NONE') ,('2019-07-07', '1398-04-16', '1440-11-04', '2019-07-07', 16, 4, 4, 11, 'sunday ', 1, 0, 'NONE') ,('2019-07-08', '1398-04-17', '1440-11-05', '2019-07-08', 17, 5, 4, 11, 'monday ', 2, 0, 'NONE') ,('2019-07-09', '1398-04-18', '1440-11-06', '2019-07-09', 18, 6, 4, 11, 'tuesday ', 3, 0, 'NONE') ,('2019-07-10', '1398-04-19', '1440-11-07', '2019-07-10', 19, 7, 4, 11, 'wednesday', 4, 0, 'NONE') ,('2019-07-11', '1398-04-20', '1440-11-08', '2019-07-11', 20, 8, 4, 11, 'thursday ', 5, 0, 'NONE') ,('2019-07-12', '1398-04-21', '1440-11-09', '2019-07-12', 21, 9, 4, 11, 'friday ', 6, 0, 'NONE') ,('2019-07-13', '1398-04-22', '1440-11-10', '2019-07-13', 22, 10, 4, 11, 'saturday ', 7, 0, 'NONE') ,('2019-07-14', '1398-04-23', '1440-11-11', '2019-07-14', 23, 11, 4, 11, 'sunday ', 1, 0, 'NONE') ,('2019-07-15', '1398-04-24', '1440-11-12', '2019-07-15', 24, 12, 4, 11, 'monday ', 2, 0, 'NONE') ,('2019-07-16', '1398-04-25', '1440-11-13', '2019-07-16', 25, 13, 4, 11, 'tuesday ', 3, 0, 'NONE') ,('2019-07-17', '1398-04-26', '1440-11-14', '2019-07-17', 26, 14, 4, 11, 'wednesday', 4, 0, 'NONE') ,('2019-07-18', '1398-04-27', '1440-11-15', '2019-07-18', 27, 15, 4, 11, 'thursday ', 5, 0, 'NONE') ,('2019-07-19', '1398-04-28', '1440-11-16', '2019-07-19', 28, 16, 4, 11, 'friday ', 6, 0, 'NONE') ,('2019-07-20', '1398-04-29', '1440-11-17', '2019-07-20', 29, 17, 4, 11, 'saturday ', 7, 0, 'NONE') ,('2019-07-21', '1398-04-30', '1440-11-18', '2019-07-21', 30, 18, 4, 11, 'sunday ', 1, 0, 'NONE') ,('2019-07-22', '1398-04-31', '1440-11-19', '2019-07-22', 31, 19, 4, 11, 'monday ', 2, 0, 'NONE') ,('2019-07-23', '1398-05-01', '1440-11-20', '2019-07-23', 1, 20, 5, 11, 'tuesday ', 3, 0, 'NONE') ,('2019-07-24', '1398-05-02', '1440-11-21', '2019-07-24', 2, 21, 5, 11, 'wednesday', 4, 0, 'NONE') ,('2019-07-25', '1398-05-03', '1440-11-22', '2019-07-25', 3, 22, 5, 11, 'thursday ', 5, 0, 'NONE') ,('2019-07-26', '1398-05-04', '1440-11-23', '2019-07-26', 4, 23, 5, 11, 'friday ', 6, 0, 'NONE') ,('2019-07-27', '1398-05-05', '1440-11-24', '2019-07-27', 5, 24, 5, 11, 'saturday ', 7, 0, 'NONE') ,('2019-07-28', '1398-05-06', '1440-11-25', '2019-07-28', 6, 25, 5, 11, 'sunday ', 1, 0, 'NONE') ,('2019-07-29', '1398-05-07', '1440-11-26', '2019-07-29', 7, 26, 5, 11, 'monday ', 2, 0, 'NONE') ,('2019-07-30', '1398-05-08', '1440-11-27', '2019-07-30', 8, 27, 5, 11, 'tuesday ', 3, 0, 'NONE') ,('2019-07-31', '1398-05-09', '1440-11-28', '2019-07-31', 9, 28, 5, 11, 'wednesday', 4, 0, 'NONE') ,('2019-08-01', '1398-05-10', '1440-11-29', '2019-08-01', 10, 29, 5, 11, 'thursday ', 5, 0, 'NONE') ,('2019-08-02', '1398-05-11', '1440-11-30', '2019-08-02', 11, 30, 5, 11, 'friday ', 6, 0, 'NONE') ,('2019-08-03', '1398-05-12', '1440-12-01', '2019-08-03', 12, 1, 5, 12, 'saturday ', 7, 0, 'NONE') ,('2019-08-04', '1398-05-13', '1440-12-02', '2019-08-04', 13, 2, 5, 12, 'sunday ', 1, 0, 'NONE') ,('2019-08-05', '1398-05-14', '1440-12-03', '2019-08-05', 14, 3, 5, 12, 'monday ', 2, 0, 'NONE') ,('2019-08-06', '1398-05-15', '1440-12-04', '2019-08-06', 15, 4, 5, 12, 'tuesday ', 3, 0, 'NONE') ,('2019-08-07', '1398-05-16', '1440-12-05', '2019-08-07', 16, 5, 5, 12, 'wednesday', 4, 0, 'NONE') ,('2019-08-08', '1398-05-17', '1440-12-06', '2019-08-08', 17, 6, 5, 12, 'thursday ', 5, 0, 'NONE') ,('2019-08-09', '1398-05-18', '1440-12-07', '2019-08-09', 18, 7, 5, 12, 'friday ', 6, 0, 'NONE') ,('2019-08-10', '1398-05-19', '1440-12-08', '2019-08-10', 19, 8, 5, 12, 'saturday ', 7, 0, 'NONE') ,('2019-08-11', '1398-05-20', '1440-12-09', '2019-08-11', 20, 9, 5, 12, 'sunday ', 1, 0, 'NONE') ,('2019-08-12', '1398-05-21', '1440-12-10', '2019-08-12', 21, 10, 5, 12, 'monday ', 2, 0, 'NONE') ,('2019-08-13', '1398-05-22', '1440-12-11', '2019-08-13', 22, 11, 5, 12, 'tuesday ', 3, 0, 'NONE') ,('2019-08-14', '1398-05-23', '1440-12-12', '2019-08-14', 23, 12, 5, 12, 'wednesday', 4, 0, 'NONE') ,('2019-08-15', '1398-05-24', '1440-12-13', '2019-08-15', 24, 13, 5, 12, 'thursday ', 5, 0, 'NONE') ,('2019-08-16', '1398-05-25', '1440-12-14', '2019-08-16', 25, 14, 5, 12, 'friday ', 6, 0, 'NONE') ,('2019-08-17', '1398-05-26', '1440-12-15', '2019-08-17', 26, 15, 5, 12, 'saturday ', 7, 0, 'NONE') ,('2019-08-18', '1398-05-27', '1440-12-16', '2019-08-18', 27, 16, 5, 12, 'sunday ', 1, 0, 'NONE') ,('2019-08-19', '1398-05-28', '1440-12-17', '2019-08-19', 28, 17, 5, 12, 'monday ', 2, 0, 'NONE') ,('2019-08-20', '1398-05-29', '1440-12-18', '2019-08-20', 29, 18, 5, 12, 'tuesday ', 3, 0, 'NONE') ,('2019-08-21', '1398-05-30', '1440-12-19', '2019-08-21', 30, 19, 5, 12, 'wednesday', 4, 0, 'NONE') ,('2019-08-22', '1398-05-31', '1440-12-20', '2019-08-22', 31, 20, 5, 12, 'thursday ', 5, 0, 'NONE') ,('2019-08-23', '1398-06-01', '1440-12-21', '2019-08-23', 1, 21, 6, 12, 'friday ', 6, 0, 'NONE') ,('2019-08-24', '1398-06-02', '1440-12-22', '2019-08-24', 2, 22, 6, 12, 'saturday ', 7, 0, 'NONE') ,('2019-08-25', '1398-06-03', '1440-12-23', '2019-08-25', 3, 23, 6, 12, 'sunday ', 1, 0, 'NONE') ,('2019-08-26', '1398-06-04', '1440-12-24', '2019-08-26', 4, 24, 6, 12, 'monday ', 2, 0, 'NONE') ,('2019-08-27', '1398-06-05', '1440-12-25', '2019-08-27', 5, 25, 6, 12, 'tuesday ', 3, 0, 'NONE') ,('2019-08-28', '1398-06-06', '1440-12-26', '2019-08-28', 6, 26, 6, 12, 'wednesday', 4, 0, 'NONE') ,('2019-08-29', '1398-06-07', '1440-12-27', '2019-08-29', 7, 27, 6, 12, 'thursday ', 5, 0, 'NONE') ,('2019-08-30', '1398-06-08', '1440-12-28', '2019-08-30', 8, 28, 6, 12, 'friday ', 6, 0, 'NONE') ,('2019-08-31', '1398-06-09', '1440-12-29', '2019-08-31', 9, 29, 6, 12, 'saturday ', 7, 0, 'NONE') ,('2019-09-01', '1398-06-10', '1441-01-01', '2019-09-01', 10, 1, 6, 1, 'sunday ', 1, 0, 'NONE') ,('2019-09-02', '1398-06-11', '1441-01-02', '2019-09-02', 11, 2, 6, 1, 'monday ', 2, 0, 'NONE') ,('2019-09-03', '1398-06-12', '1441-01-03', '2019-09-03', 12, 3, 6, 1, 'tuesday ', 3, 0, 'NONE') ,('2019-09-04', '1398-06-13', '1441-01-04', '2019-09-04', 13, 4, 6, 1, 'wednesday', 4, 0, 'NONE') ,('2019-09-05', '1398-06-14', '1441-01-05', '2019-09-05', 14, 5, 6, 1, 'thursday ', 5, 0, 'NONE') ,('2019-09-06', '1398-06-15', '1441-01-06', '2019-09-06', 15, 6, 6, 1, 'friday ', 6, 0, 'NONE') ,('2019-09-07', '1398-06-16', '1441-01-07', '2019-09-07', 16, 7, 6, 1, 'saturday ', 7, 0, 'NONE') ,('2019-09-08', '1398-06-17', '1441-01-08', '2019-09-08', 17, 8, 6, 1, 'sunday ', 1, 0, 'NONE') ,('2019-09-09', '1398-06-18', '1441-01-09', '2019-09-09', 18, 9, 6, 1, 'monday ', 2, 0, 'NONE') ,('2019-09-10', '1398-06-19', '1441-01-10', '2019-09-10', 19, 10, 6, 1, 'tuesday ', 3, 0, 'NONE') ,('2019-09-11', '1398-06-20', '1441-01-11', '2019-09-11', 20, 11, 6, 1, 'wednesday', 4, 0, 'NONE') ,('2019-09-12', '1398-06-21', '1441-01-12', '2019-09-12', 21, 12, 6, 1, 'thursday ', 5, 0, 'NONE') ,('2019-09-13', '1398-06-22', '1441-01-13', '2019-09-13', 22, 13, 6, 1, 'friday ', 6, 0, 'NONE') ,('2019-09-14', '1398-06-23', '1441-01-14', '2019-09-14', 23, 14, 6, 1, 'saturday ', 7, 0, 'NONE') ,('2019-09-15', '1398-06-24', '1441-01-15', '2019-09-15', 24, 15, 6, 1, 'sunday ', 1, 0, 'NONE') ,('2019-09-16', '1398-06-25', '1441-01-16', '2019-09-16', 25, 16, 6, 1, 'monday ', 2, 0, 'NONE') ,('2019-09-17', '1398-06-26', '1441-01-17', '2019-09-17', 26, 17, 6, 1, 'tuesday ', 3, 0, 'NONE') ,('2019-09-18', '1398-06-27', '1441-01-18', '2019-09-18', 27, 18, 6, 1, 'wednesday', 4, 0, 'NONE') ,('2019-09-19', '1398-06-28', '1441-01-19', '2019-09-19', 28, 19, 6, 1, 'thursday ', 5, 0, 'NONE') ,('2019-09-20', '1398-06-29', '1441-01-20', '2019-09-20', 29, 20, 6, 1, 'friday ', 6, 0, 'NONE') ,('2019-09-21', '1398-06-30', '1441-01-21', '2019-09-21', 30, 21, 6, 1, 'saturday ', 7, 0, 'NONE') ,('2019-09-22', '1398-06-31', '1441-01-22', '2019-09-22', 31, 22, 6, 1, 'sunday ', 1, 0, 'NONE') ,('2019-09-23', '1398-07-01', '1441-01-23', '2019-09-23', 1, 23, 7, 1, 'monday ', 2, 0, 'NONE') ,('2019-09-24', '1398-07-02', '1441-01-24', '2019-09-24', 2, 24, 7, 1, 'tuesday ', 3, 0, 'NONE') ,('2019-09-25', '1398-07-03', '1441-01-25', '2019-09-25', 3, 25, 7, 1, 'wednesday', 4, 0, 'NONE') ,('2019-09-26', '1398-07-04', '1441-01-26', '2019-09-26', 4, 26, 7, 1, 'thursday ', 5, 0, 'NONE') ,('2019-09-27', '1398-07-05', '1441-01-27', '2019-09-27', 5, 27, 7, 1, 'friday ', 6, 0, 'NONE') ,('2019-09-28', '1398-07-06', '1441-01-28', '2019-09-28', 6, 28, 7, 1, 'saturday ', 7, 0, 'NONE') ,('2019-09-29', '1398-07-07', '1441-01-29', '2019-09-29', 7, 29, 7, 1, 'sunday ', 1, 0, 'NONE') ,('2019-09-30', '1398-07-08', '1441-01-30', '2019-09-30', 8, 30, 7, 1, 'monday ', 2, 0, 'NONE') ,('2019-10-01', '1398-07-09', '1441-02-01', '2019-10-01', 9, 1, 7, 2, 'tuesday ', 3, 0, 'NONE') ,('2019-10-02', '1398-07-10', '1441-02-02', '2019-10-02', 10, 2, 7, 2, 'wednesday', 4, 0, 'NONE') ,('2019-10-03', '1398-07-11', '1441-02-03', '2019-10-03', 11, 3, 7, 2, 'thursday ', 5, 0, 'NONE') ,('2019-10-04', '1398-07-12', '1441-02-04', '2019-10-04', 12, 4, 7, 2, 'friday ', 6, 0, 'NONE') ,('2019-10-05', '1398-07-13', '1441-02-05', '2019-10-05', 13, 5, 7, 2, 'saturday ', 7, 0, 'NONE') ,('2019-10-06', '1398-07-14', '1441-02-06', '2019-10-06', 14, 6, 7, 2, 'sunday ', 1, 0, 'NONE') ,('2019-10-07', '1398-07-15', '1441-02-07', '2019-10-07', 15, 7, 7, 2, 'monday ', 2, 0, 'NONE') ,('2019-10-08', '1398-07-16', '1441-02-08', '2019-10-08', 16, 8, 7, 2, 'tuesday ', 3, 0, 'NONE') ,('2019-10-09', '1398-07-17', '1441-02-09', '2019-10-09', 17, 9, 7, 2, 'wednesday', 4, 0, 'NONE') ,('2019-10-10', '1398-07-18', '1441-02-10', '2019-10-10', 18, 10, 7, 2, 'thursday ', 5, 0, 'NONE') ,('2019-10-11', '1398-07-19', '1441-02-11', '2019-10-11', 19, 11, 7, 2, 'friday ', 6, 0, 'NONE') ,('2019-10-12', '1398-07-20', '1441-02-12', '2019-10-12', 20, 12, 7, 2, 'saturday ', 7, 0, 'NONE') ,('2019-10-13', '1398-07-21', '1441-02-13', '2019-10-13', 21, 13, 7, 2, 'sunday ', 1, 0, 'NONE') ,('2019-10-14', '1398-07-22', '1441-02-14', '2019-10-14', 22, 14, 7, 2, 'monday ', 2, 0, 'NONE') ,('2019-10-15', '1398-07-23', '1441-02-15', '2019-10-15', 23, 15, 7, 2, 'tuesday ', 3, 0, 'NONE') ,('2019-10-16', '1398-07-24', '1441-02-16', '2019-10-16', 24, 16, 7, 2, 'wednesday', 4, 0, 'NONE') ,('2019-10-17', '1398-07-25', '1441-02-17', '2019-10-17', 25, 17, 7, 2, 'thursday ', 5, 0, 'NONE') ,('2019-10-18', '1398-07-26', '1441-02-18', '2019-10-18', 26, 18, 7, 2, 'friday ', 6, 0, 'NONE') ,('2019-10-19', '1398-07-27', '1441-02-19', '2019-10-19', 27, 19, 7, 2, 'saturday ', 7, 0, 'NONE') ,('2019-10-20', '1398-07-28', '1441-02-20', '2019-10-20', 28, 20, 7, 2, 'sunday ', 1, 0, 'NONE') ,('2019-10-21', '1398-07-29', '1441-02-21', '2019-10-21', 29, 21, 7, 2, 'monday ', 2, 0, 'NONE') ,('2019-10-22', '1398-07-30', '1441-02-22', '2019-10-22', 30, 22, 7, 2, 'tuesday ', 3, 0, 'NONE') ,('2019-10-23', '1398-08-01', '1441-02-23', '2019-10-23', 1, 23, 8, 2, 'wednesday', 4, 0, 'NONE') ,('2019-10-24', '1398-08-02', '1441-02-24', '2019-10-24', 2, 24, 8, 2, 'thursday ', 5, 0, 'NONE') ,('2019-10-25', '1398-08-03', '1441-02-25', '2019-10-25', 3, 25, 8, 2, 'friday ', 6, 0, 'NONE') ,('2019-10-26', '1398-08-04', '1441-02-26', '2019-10-26', 4, 26, 8, 2, 'saturday ', 7, 0, 'NONE') ,('2019-10-27', '1398-08-05', '1441-02-27', '2019-10-27', 5, 27, 8, 2, 'sunday ', 1, 0, 'NONE') ,('2019-10-28', '1398-08-06', '1441-02-28', '2019-10-28', 6, 28, 8, 2, 'monday ', 2, 0, 'NONE') ,('2019-10-29', '1398-08-07', '1441-02-29', '2019-10-29', 7, 29, 8, 2, 'tuesday ', 3, 0, 'NONE') ,('2019-10-30', '1398-08-08', '1441-03-01', '2019-10-30', 8, 1, 8, 3, 'wednesday', 4, 0, 'NONE') ,('2019-10-31', '1398-08-09', '1441-03-02', '2019-10-31', 9, 2, 8, 3, 'thursday ', 5, 0, 'NONE') ,('2019-11-01', '1398-08-10', '1441-03-03', '2019-11-01', 10, 3, 8, 3, 'friday ', 6, 0, 'NONE') ,('2019-11-02', '1398-08-11', '1441-03-04', '2019-11-02', 11, 4, 8, 3, 'saturday ', 7, 0, 'NONE') ,('2019-11-03', '1398-08-12', '1441-03-05', '2019-11-03', 12, 5, 8, 3, 'sunday ', 1, 0, 'NONE') ,('2019-11-04', '1398-08-13', '1441-03-06', '2019-11-04', 13, 6, 8, 3, 'monday ', 2, 0, 'NONE') ,('2019-11-05', '1398-08-14', '1441-03-07', '2019-11-05', 14, 7, 8, 3, 'tuesday ', 3, 0, 'NONE') ,('2019-11-06', '1398-08-15', '1441-03-08', '2019-11-06', 15, 8, 8, 3, 'wednesday', 4, 0, 'NONE') ,('2019-11-07', '1398-08-16', '1441-03-09', '2019-11-07', 16, 9, 8, 3, 'thursday ', 5, 0, 'NONE') ,('2019-11-08', '1398-08-17', '1441-03-10', '2019-11-08', 17, 10, 8, 3, 'friday ', 6, 0, 'NONE') ,('2019-11-09', '1398-08-18', '1441-03-11', '2019-11-09', 18, 11, 8, 3, 'saturday ', 7, 0, 'NONE') ,('2019-11-10', '1398-08-19', '1441-03-12', '2019-11-10', 19, 12, 8, 3, 'sunday ', 1, 0, 'NONE') ,('2019-11-11', '1398-08-20', '1441-03-13', '2019-11-11', 20, 13, 8, 3, 'monday ', 2, 0, 'NONE') ,('2019-11-12', '1398-08-21', '1441-03-14', '2019-11-12', 21, 14, 8, 3, 'tuesday ', 3, 0, 'NONE') ,('2019-11-13', '1398-08-22', '1441-03-15', '2019-11-13', 22, 15, 8, 3, 'wednesday', 4, 0, 'NONE') ,('2019-11-14', '1398-08-23', '1441-03-16', '2019-11-14', 23, 16, 8, 3, 'thursday ', 5, 0, 'NONE') ,('2019-11-15', '1398-08-24', '1441-03-17', '2019-11-15', 24, 17, 8, 3, 'friday ', 6, 0, 'NONE') ,('2019-11-16', '1398-08-25', '1441-03-18', '2019-11-16', 25, 18, 8, 3, 'saturday ', 7, 0, 'NONE') ,('2019-11-17', '1398-08-26', '1441-03-19', '2019-11-17', 26, 19, 8, 3, 'sunday ', 1, 0, 'NONE') ,('2019-11-18', '1398-08-27', '1441-03-20', '2019-11-18', 27, 20, 8, 3, 'monday ', 2, 0, 'NONE') ,('2019-11-19', '1398-08-28', '1441-03-21', '2019-11-19', 28, 21, 8, 3, 'tuesday ', 3, 0, 'NONE') ,('2019-11-20', '1398-08-29', '1441-03-22', '2019-11-20', 29, 22, 8, 3, 'wednesday', 4, 0, 'NONE') ,('2019-11-21', '1398-08-30', '1441-03-23', '2019-11-21', 30, 23, 8, 3, 'thursday ', 5, 0, 'NONE') ,('2019-11-22', '1398-09-01', '1441-03-24', '2019-11-22', 1, 24, 9, 3, 'friday ', 6, 0, 'NONE') ,('2019-11-23', '1398-09-02', '1441-03-25', '2019-11-23', 2, 25, 9, 3, 'saturday ', 7, 0, 'NONE') ,('2019-11-24', '1398-09-03', '1441-03-26', '2019-11-24', 3, 26, 9, 3, 'sunday ', 1, 0, 'NONE') ,('2019-11-25', '1398-09-04', '1441-03-27', '2019-11-25', 4, 27, 9, 3, 'monday ', 2, 0, 'NONE') ,('2019-11-26', '1398-09-05', '1441-03-28', '2019-11-26', 5, 28, 9, 3, 'tuesday ', 3, 0, 'NONE') ,('2019-11-27', '1398-09-06', '1441-03-29', '2019-11-27', 6, 29, 9, 3, 'wednesday', 4, 0, 'NONE') ,('2019-11-28', '1398-09-07', '1441-03-30', '2019-11-28', 7, 30, 9, 3, 'thursday ', 5, 0, 'NONE') ,('2019-11-29', '1398-09-08', '1441-04-01', '2019-11-29', 8, 1, 9, 4, 'friday ', 6, 0, 'NONE') ,('2019-11-30', '1398-09-09', '1441-04-02', '2019-11-30', 9, 2, 9, 4, 'saturday ', 7, 0, 'NONE') ,('2019-12-01', '1398-09-10', '1441-04-03', '2019-12-01', 10, 3, 9, 4, 'sunday ', 1, 0, 'NONE') ,('2019-12-02', '1398-09-11', '1441-04-04', '2019-12-02', 11, 4, 9, 4, 'monday ', 2, 0, 'NONE') ,('2019-12-03', '1398-09-12', '1441-04-05', '2019-12-03', 12, 5, 9, 4, 'tuesday ', 3, 0, 'NONE') ,('2019-12-04', '1398-09-13', '1441-04-06', '2019-12-04', 13, 6, 9, 4, 'wednesday', 4, 0, 'NONE') ,('2019-12-05', '1398-09-14', '1441-04-07', '2019-12-05', 14, 7, 9, 4, 'thursday ', 5, 0, 'NONE') ,('2019-12-06', '1398-09-15', '1441-04-08', '2019-12-06', 15, 8, 9, 4, 'friday ', 6, 0, 'NONE') ,('2019-12-07', '1398-09-16', '1441-04-09', '2019-12-07', 16, 9, 9, 4, 'saturday ', 7, 0, 'NONE') ,('2019-12-08', '1398-09-17', '1441-04-10', '2019-12-08', 17, 10, 9, 4, 'sunday ', 1, 0, 'NONE') ,('2019-12-09', '1398-09-18', '1441-04-11', '2019-12-09', 18, 11, 9, 4, 'monday ', 2, 0, 'NONE') ,('2019-12-10', '1398-09-19', '1441-04-12', '2019-12-10', 19, 12, 9, 4, 'tuesday ', 3, 0, 'NONE') ,('2019-12-11', '1398-09-20', '1441-04-13', '2019-12-11', 20, 13, 9, 4, 'wednesday', 4, 0, 'NONE') ,('2019-12-12', '1398-09-21', '1441-04-14', '2019-12-12', 21, 14, 9, 4, 'thursday ', 5, 0, 'NONE') ,('2019-12-13', '1398-09-22', '1441-04-15', '2019-12-13', 22, 15, 9, 4, 'friday ', 6, 0, 'NONE') ,('2019-12-14', '1398-09-23', '1441-04-16', '2019-12-14', 23, 16, 9, 4, 'saturday ', 7, 0, 'NONE') ,('2019-12-15', '1398-09-24', '1441-04-17', '2019-12-15', 24, 17, 9, 4, 'sunday ', 1, 0, 'NONE') ,('2019-12-16', '1398-09-25', '1441-04-18', '2019-12-16', 25, 18, 9, 4, 'monday ', 2, 0, 'NONE') ,('2019-12-17', '1398-09-26', '1441-04-19', '2019-12-17', 26, 19, 9, 4, 'tuesday ', 3, 0, 'NONE') ,('2019-12-18', '1398-09-27', '1441-04-20', '2019-12-18', 27, 20, 9, 4, 'wednesday', 4, 0, 'NONE') ,('2019-12-19', '1398-09-28', '1441-04-21', '2019-12-19', 28, 21, 9, 4, 'thursday ', 5, 0, 'NONE') ,('2019-12-20', '1398-09-29', '1441-04-22', '2019-12-20', 29, 22, 9, 4, 'friday ', 6, 0, 'NONE') ,('2019-12-21', '1398-09-30', '1441-04-23', '2019-12-21', 30, 23, 9, 4, 'saturday ', 7, 0, 'NONE') ,('2019-12-22', '1398-10-01', '1441-04-24', '2019-12-22', 1, 24, 10, 4, 'sunday ', 1, 0, 'NONE') ,('2019-12-23', '1398-10-02', '1441-04-25', '2019-12-23', 2, 25, 10, 4, 'monday ', 2, 0, 'NONE') ,('2019-12-24', '1398-10-03', '1441-04-26', '2019-12-24', 3, 26, 10, 4, 'tuesday ', 3, 0, 'NONE') ,('2019-12-25', '1398-10-04', '1441-04-27', '2019-12-25', 4, 27, 10, 4, 'wednesday', 4, 0, 'NONE') ,('2019-12-26', '1398-10-05', '1441-04-28', '2019-12-26', 5, 28, 10, 4, 'thursday ', 5, 0, 'NONE') ,('2019-12-27', '1398-10-06', '1441-04-29', '2019-12-27', 6, 29, 10, 4, 'friday ', 6, 0, 'NONE') ,('2019-12-28', '1398-10-07', '1441-05-01', '2019-12-28', 7, 1, 10, 5, 'saturday ', 7, 0, 'NONE') ,('2019-12-29', '1398-10-08', '1441-05-02', '2019-12-29', 8, 2, 10, 5, 'sunday ', 1, 0, 'NONE') ,('2019-12-30', '1398-10-09', '1441-05-03', '2019-12-30', 9, 3, 10, 5, 'monday ', 2, 0, 'NONE') ,('2019-12-31', '1398-10-10', '1441-05-04', '2019-12-31', 10, 4, 10, 5, 'tuesday ', 3, 0, 'NONE') ,('2020-01-01', '1398-10-11', '1441-05-05', '2020-01-01', 11, 5, 10, 5, 'wednesday', 4, 0, 'NONE') ,('2020-01-02', '1398-10-12', '1441-05-06', '2020-01-02', 12, 6, 10, 5, 'thursday ', 5, 0, 'NONE') ,('2020-01-03', '1398-10-13', '1441-05-07', '2020-01-03', 13, 7, 10, 5, 'friday ', 6, 0, 'NONE') ,('2020-01-04', '1398-10-14', '1441-05-08', '2020-01-04', 14, 8, 10, 5, 'saturday ', 7, 0, 'NONE') ,('2020-01-05', '1398-10-15', '1441-05-09', '2020-01-05', 15, 9, 10, 5, 'sunday ', 1, 0, 'NONE') ,('2020-01-06', '1398-10-16', '1441-05-10', '2020-01-06', 16, 10, 10, 5, 'monday ', 2, 0, 'NONE') ,('2020-01-07', '1398-10-17', '1441-05-11', '2020-01-07', 17, 11, 10, 5, 'tuesday ', 3, 0, 'NONE') ,('2020-01-08', '1398-10-18', '1441-05-12', '2020-01-08', 18, 12, 10, 5, 'wednesday', 4, 0, 'NONE') ,('2020-01-09', '1398-10-19', '1441-05-13', '2020-01-09', 19, 13, 10, 5, 'thursday ', 5, 0, 'NONE') ,('2020-01-10', '1398-10-20', '1441-05-14', '2020-01-10', 20, 14, 10, 5, 'friday ', 6, 0, 'NONE') ,('2020-01-11', '1398-10-21', '1441-05-15', '2020-01-11', 21, 15, 10, 5, 'saturday ', 7, 0, 'NONE') ,('2020-01-12', '1398-10-22', '1441-05-16', '2020-01-12', 22, 16, 10, 5, 'sunday ', 1, 0, 'NONE') ,('2020-01-13', '1398-10-23', '1441-05-17', '2020-01-13', 23, 17, 10, 5, 'monday ', 2, 0, 'NONE') ,('2020-01-14', '1398-10-24', '1441-05-18', '2020-01-14', 24, 18, 10, 5, 'tuesday ', 3, 0, 'NONE') ,('2020-01-15', '1398-10-25', '1441-05-19', '2020-01-15', 25, 19, 10, 5, 'wednesday', 4, 0, 'NONE') ,('2020-01-16', '1398-10-26', '1441-05-20', '2020-01-16', 26, 20, 10, 5, 'thursday ', 5, 0, 'NONE') ,('2020-01-17', '1398-10-27', '1441-05-21', '2020-01-17', 27, 21, 10, 5, 'friday ', 6, 0, 'NONE') ,('2020-01-18', '1398-10-28', '1441-05-22', '2020-01-18', 28, 22, 10, 5, 'saturday ', 7, 0, 'NONE') ,('2020-01-19', '1398-10-29', '1441-05-23', '2020-01-19', 29, 23, 10, 5, 'sunday ', 1, 0, 'NONE') ,('2020-01-20', '1398-10-30', '1441-05-24', '2020-01-20', 30, 24, 10, 5, 'monday ', 2, 0, 'NONE') ,('2020-01-21', '1398-11-01', '1441-05-25', '2020-01-21', 1, 25, 11, 5, 'tuesday ', 3, 0, 'NONE') ,('2020-01-22', '1398-11-02', '1441-05-26', '2020-01-22', 2, 26, 11, 5, 'wednesday', 4, 0, 'NONE') ,('2020-01-23', '1398-11-03', '1441-05-27', '2020-01-23', 3, 27, 11, 5, 'thursday ', 5, 0, 'NONE') ,('2020-01-24', '1398-11-04', '1441-05-28', '2020-01-24', 4, 28, 11, 5, 'friday ', 6, 0, 'NONE') ,('2020-01-25', '1398-11-05', '1441-05-29', '2020-01-25', 5, 29, 11, 5, 'saturday ', 7, 0, 'NONE') ,('2020-01-26', '1398-11-06', '1441-05-30', '2020-01-26', 6, 30, 11, 5, 'sunday ', 1, 0, 'NONE') ,('2020-01-27', '1398-11-07', '1441-06-01', '2020-01-27', 7, 1, 11, 6, 'monday ', 2, 0, 'NONE') ,('2020-01-28', '1398-11-08', '1441-06-02', '2020-01-28', 8, 2, 11, 6, 'tuesday ', 3, 0, 'NONE') ,('2020-01-29', '1398-11-09', '1441-06-03', '2020-01-29', 9, 3, 11, 6, 'wednesday', 4, 0, 'NONE') ,('2020-01-30', '1398-11-10', '1441-06-04', '2020-01-30', 10, 4, 11, 6, 'thursday ', 5, 0, 'NONE') ,('2020-01-31', '1398-11-11', '1441-06-05', '2020-01-31', 11, 5, 11, 6, 'friday ', 6, 0, 'NONE') ,('2020-02-01', '1398-11-12', '1441-06-06', '2020-02-01', 12, 6, 11, 6, 'saturday ', 7, 0, 'NONE') ,('2020-02-02', '1398-11-13', '1441-06-07', '2020-02-02', 13, 7, 11, 6, 'sunday ', 1, 0, 'NONE') ,('2020-02-03', '1398-11-14', '1441-06-08', '2020-02-03', 14, 8, 11, 6, 'monday ', 2, 0, 'NONE') ,('2020-02-04', '1398-11-15', '1441-06-09', '2020-02-04', 15, 9, 11, 6, 'tuesday ', 3, 0, 'NONE') ,('2020-02-05', '1398-11-16', '1441-06-10', '2020-02-05', 16, 10, 11, 6, 'wednesday', 4, 0, 'NONE') ,('2020-02-06', '1398-11-17', '1441-06-11', '2020-02-06', 17, 11, 11, 6, 'thursday ', 5, 0, 'NONE') ,('2020-02-07', '1398-11-18', '1441-06-12', '2020-02-07', 18, 12, 11, 6, 'friday ', 6, 0, 'NONE') ,('2020-02-08', '1398-11-19', '1441-06-13', '2020-02-08', 19, 13, 11, 6, 'saturday ', 7, 0, 'NONE') ,('2020-02-09', '1398-11-20', '1441-06-14', '2020-02-09', 20, 14, 11, 6, 'sunday ', 1, 0, 'NONE') ,('2020-02-10', '1398-11-21', '1441-06-15', '2020-02-10', 21, 15, 11, 6, 'monday ', 2, 0, 'NONE') ,('2020-02-11', '1398-11-22', '1441-06-16', '2020-02-11', 22, 16, 11, 6, 'tuesday ', 3, 0, 'NONE') ,('2020-02-12', '1398-11-23', '1441-06-17', '2020-02-12', 23, 17, 11, 6, 'wednesday', 4, 0, 'NONE') ,('2020-02-13', '1398-11-24', '1441-06-18', '2020-02-13', 24, 18, 11, 6, 'thursday ', 5, 0, 'NONE') ,('2020-02-14', '1398-11-25', '1441-06-19', '2020-02-14', 25, 19, 11, 6, 'friday ', 6, 0, 'NONE') ,('2020-02-15', '1398-11-26', '1441-06-20', '2020-02-15', 26, 20, 11, 6, 'saturday ', 7, 0, 'NONE') ,('2020-02-16', '1398-11-27', '1441-06-21', '2020-02-16', 27, 21, 11, 6, 'sunday ', 1, 0, 'NONE') ,('2020-02-17', '1398-11-28', '1441-06-22', '2020-02-17', 28, 22, 11, 6, 'monday ', 2, 0, 'NONE') ,('2020-02-18', '1398-11-29', '1441-06-23', '2020-02-18', 29, 23, 11, 6, 'tuesday ', 3, 0, 'NONE') ,('2020-02-19', '1398-11-30', '1441-06-24', '2020-02-19', 30, 24, 11, 6, 'wednesday', 4, 0, 'NONE') ,('2020-02-20', '1398-12-01', '1441-06-25', '2020-02-20', 1, 25, 12, 6, 'thursday ', 5, 0, 'NONE') ,('2020-02-21', '1398-12-02', '1441-06-26', '2020-02-21', 2, 26, 12, 6, 'friday ', 6, 0, 'NONE') ,('2020-02-22', '1398-12-03', '1441-06-27', '2020-02-22', 3, 27, 12, 6, 'saturday ', 7, 0, 'NONE') ,('2020-02-23', '1398-12-04', '1441-06-28', '2020-02-23', 4, 28, 12, 6, 'sunday ', 1, 0, 'NONE') ,('2020-02-24', '1398-12-05', '1441-06-29', '2020-02-24', 5, 29, 12, 6, 'monday ', 2, 0, 'NONE') ,('2020-02-25', '1398-12-06', '1441-07-01', '2020-02-25', 6, 1, 12, 7, 'tuesday ', 3, 0, 'NONE') ,('2020-02-26', '1398-12-07', '1441-07-02', '2020-02-26', 7, 2, 12, 7, 'wednesday', 4, 0, 'NONE') ,('2020-02-27', '1398-12-08', '1441-07-03', '2020-02-27', 8, 3, 12, 7, 'thursday ', 5, 0, 'NONE') ,('2020-02-28', '1398-12-09', '1441-07-04', '2020-02-28', 9, 4, 12, 7, 'friday ', 6, 0, 'NONE') ,('2020-02-29', '1398-12-10', '1441-07-05', '2020-02-29', 10, 5, 12, 7, 'saturday ', 7, 0, 'NONE') ,('2020-03-01', '1398-12-11', '1441-07-06', '2020-03-01', 11, 6, 12, 7, 'sunday ', 1, 0, 'NONE') ,('2020-03-02', '1398-12-12', '1441-07-07', '2020-03-02', 12, 7, 12, 7, 'monday ', 2, 0, 'NONE') ,('2020-03-03', '1398-12-13', '1441-07-08', '2020-03-03', 13, 8, 12, 7, 'tuesday ', 3, 0, 'NONE') ,('2020-03-04', '1398-12-14', '1441-07-09', '2020-03-04', 14, 9, 12, 7, 'wednesday', 4, 0, 'NONE') ,('2020-03-05', '1398-12-15', '1441-07-10', '2020-03-05', 15, 10, 12, 7, 'thursday ', 5, 0, 'NONE') ,('2020-03-06', '1398-12-16', '1441-07-11', '2020-03-06', 16, 11, 12, 7, 'friday ', 6, 0, 'NONE') ,('2020-03-07', '1398-12-17', '1441-07-12', '2020-03-07', 17, 12, 12, 7, 'saturday ', 7, 0, 'NONE') ,('2020-03-08', '1398-12-18', '1441-07-13', '2020-03-08', 18, 13, 12, 7, 'sunday ', 1, 0, 'NONE') ,('2020-03-09', '1398-12-19', '1441-07-14', '2020-03-09', 19, 14, 12, 7, 'monday ', 2, 0, 'NONE') ,('2020-03-10', '1398-12-20', '1441-07-15', '2020-03-10', 20, 15, 12, 7, 'tuesday ', 3, 0, 'NONE') ,('2020-03-11', '1398-12-21', '1441-07-16', '2020-03-11', 21, 16, 12, 7, 'wednesday', 4, 0, 'NONE') ,('2020-03-12', '1398-12-22', '1441-07-17', '2020-03-12', 22, 17, 12, 7, 'thursday ', 5, 0, 'NONE') ,('2020-03-13', '1398-12-23', '1441-07-18', '2020-03-13', 23, 18, 12, 7, 'friday ', 6, 0, 'NONE') ,('2020-03-14', '1398-12-24', '1441-07-19', '2020-03-14', 24, 19, 12, 7, 'saturday ', 7, 0, 'NONE') ,('2020-03-15', '1398-12-25', '1441-07-20', '2020-03-15', 25, 20, 12, 7, 'sunday ', 1, 0, 'NONE') ,('2020-03-16', '1398-12-26', '1441-07-21', '2020-03-16', 26, 21, 12, 7, 'monday ', 2, 0, 'NONE') ,('2020-03-17', '1398-12-27', '1441-07-22', '2020-03-17', 27, 22, 12, 7, 'tuesday ', 3, 0, 'NONE') ,('2020-03-18', '1398-12-28', '1441-07-23', '2020-03-18', 28, 23, 12, 7, 'wednesday', 4, 0, 'NONE') ,('2020-03-19', '1398-12-29', '1441-07-24', '2020-03-19', 29, 24, 12, 7, 'thursday ', 5, 0, 'NONE') ,('2020-03-20', '1399-01-01', '1441-07-25', '2020-03-20', 1, 25, 1, 7, 'friday ', 6, 0, 'NONE') ,('2020-03-21', '1399-01-02', '1441-07-26', '2020-03-21', 2, 26, 1, 7, 'saturday ', 7, 0, 'NONE') ,('2020-03-22', '1399-01-03', '1441-07-27', '2020-03-22', 3, 27, 1, 7, 'sunday ', 1, 0, 'NONE') ,('2020-03-23', '1399-01-04', '1441-07-28', '2020-03-23', 4, 28, 1, 7, 'monday ', 2, 0, 'NONE') ,('2020-03-24', '1399-01-05', '1441-07-29', '2020-03-24', 5, 29, 1, 7, 'tuesday ', 3, 0, 'NONE') ,('2020-03-25', '1399-01-06', '1441-07-30', '2020-03-25', 6, 30, 1, 7, 'wednesday', 4, 0, 'NONE') ,('2020-03-26', '1399-01-07', '1441-08-01', '2020-03-26', 7, 1, 1, 8, 'thursday ', 5, 0, 'NONE') ,('2020-03-27', '1399-01-08', '1441-08-02', '2020-03-27', 8, 2, 1, 8, 'friday ', 6, 0, 'NONE') ,('2020-03-28', '1399-01-09', '1441-08-03', '2020-03-28', 9, 3, 1, 8, 'saturday ', 7, 0, 'NONE') ,('2020-03-29', '1399-01-10', '1441-08-04', '2020-03-29', 10, 4, 1, 8, 'sunday ', 1, 0, 'NONE') ,('2020-03-30', '1399-01-11', '1441-08-05', '2020-03-30', 11, 5, 1, 8, 'monday ', 2, 0, 'NONE') ,('2020-03-31', '1399-01-12', '1441-08-06', '2020-03-31', 12, 6, 1, 8, 'tuesday ', 3, 0, 'NONE') ,('2020-04-01', '1399-01-13', '1441-08-07', '2020-04-01', 13, 7, 1, 8, 'wednesday', 4, 0, 'NONE') ,('2020-04-02', '1399-01-14', '1441-08-08', '2020-04-02', 14, 8, 1, 8, 'thursday ', 5, 0, 'NONE') ,('2020-04-03', '1399-01-15', '1441-08-09', '2020-04-03', 15, 9, 1, 8, 'friday ', 6, 0, 'NONE') ,('2020-04-04', '1399-01-16', '1441-08-10', '2020-04-04', 16, 10, 1, 8, 'saturday ', 7, 0, 'NONE') ,('2020-04-05', '1399-01-17', '1441-08-11', '2020-04-05', 17, 11, 1, 8, 'sunday ', 1, 0, 'NONE') ,('2020-04-06', '1399-01-18', '1441-08-12', '2020-04-06', 18, 12, 1, 8, 'monday ', 2, 0, 'NONE') ,('2020-04-07', '1399-01-19', '1441-08-13', '2020-04-07', 19, 13, 1, 8, 'tuesday ', 3, 0, 'NONE') ,('2020-04-08', '1399-01-20', '1441-08-14', '2020-04-08', 20, 14, 1, 8, 'wednesday', 4, 0, 'NONE') ,('2020-04-09', '1399-01-21', '1441-08-15', '2020-04-09', 21, 15, 1, 8, 'thursday ', 5, 0, 'NONE') ,('2020-04-10', '1399-01-22', '1441-08-16', '2020-04-10', 22, 16, 1, 8, 'friday ', 6, 0, 'NONE') ,('2020-04-11', '1399-01-23', '1441-08-17', '2020-04-11', 23, 17, 1, 8, 'saturday ', 7, 0, 'NONE') ,('2020-04-12', '1399-01-24', '1441-08-18', '2020-04-12', 24, 18, 1, 8, 'sunday ', 1, 0, 'NONE') ,('2020-04-13', '1399-01-25', '1441-08-19', '2020-04-13', 25, 19, 1, 8, 'monday ', 2, 0, 'NONE') ,('2020-04-14', '1399-01-26', '1441-08-20', '2020-04-14', 26, 20, 1, 8, 'tuesday ', 3, 0, 'NONE') ,('2020-04-15', '1399-01-27', '1441-08-21', '2020-04-15', 27, 21, 1, 8, 'wednesday', 4, 0, 'NONE') ,('2020-04-16', '1399-01-28', '1441-08-22', '2020-04-16', 28, 22, 1, 8, 'thursday ', 5, 0, 'NONE') ,('2020-04-17', '1399-01-29', '1441-08-23', '2020-04-17', 29, 23, 1, 8, 'friday ', 6, 0, 'NONE') ,('2020-04-18', '1399-01-30', '1441-08-24', '2020-04-18', 30, 24, 1, 8, 'saturday ', 7, 0, 'NONE') ,('2020-04-19', '1399-01-31', '1441-08-25', '2020-04-19', 31, 25, 1, 8, 'sunday ', 1, 0, 'NONE') ,('2020-04-20', '1399-02-01', '1441-08-26', '2020-04-20', 1, 26, 2, 8, 'monday ', 2, 0, 'NONE') ,('2020-04-21', '1399-02-02', '1441-08-27', '2020-04-21', 2, 27, 2, 8, 'tuesday ', 3, 0, 'NONE') ,('2020-04-22', '1399-02-03', '1441-08-28', '2020-04-22', 3, 28, 2, 8, 'wednesday', 4, 0, 'NONE') ,('2020-04-23', '1399-02-04', '1441-08-29', '2020-04-23', 4, 29, 2, 8, 'thursday ', 5, 0, 'NONE') ,('2020-04-24', '1399-02-05', '1441-09-01', '2020-04-24', 5, 1, 2, 9, 'friday ', 6, 0, 'NONE') ,('2020-04-25', '1399-02-06', '1441-09-02', '2020-04-25', 6, 2, 2, 9, 'saturday ', 7, 0, 'NONE') ,('2020-04-26', '1399-02-07', '1441-09-03', '2020-04-26', 7, 3, 2, 9, 'sunday ', 1, 0, 'NONE') ,('2020-04-27', '1399-02-08', '1441-09-04', '2020-04-27', 8, 4, 2, 9, 'monday ', 2, 0, 'NONE') ,('2020-04-28', '1399-02-09', '1441-09-05', '2020-04-28', 9, 5, 2, 9, 'tuesday ', 3, 0, 'NONE') ,('2020-04-29', '1399-02-10', '1441-09-06', '2020-04-29', 10, 6, 2, 9, 'wednesday', 4, 0, 'NONE') ,('2020-04-30', '1399-02-11', '1441-09-07', '2020-04-30', 11, 7, 2, 9, 'thursday ', 5, 0, 'NONE') ,('2020-05-01', '1399-02-12', '1441-09-08', '2020-05-01', 12, 8, 2, 9, 'friday ', 6, 0, 'NONE') ,('2020-05-02', '1399-02-13', '1441-09-09', '2020-05-02', 13, 9, 2, 9, 'saturday ', 7, 0, 'NONE') ,('2020-05-03', '1399-02-14', '1441-09-10', '2020-05-03', 14, 10, 2, 9, 'sunday ', 1, 0, 'NONE') ,('2020-05-04', '1399-02-15', '1441-09-11', '2020-05-04', 15, 11, 2, 9, 'monday ', 2, 0, 'NONE') ,('2020-05-05', '1399-02-16', '1441-09-12', '2020-05-05', 16, 12, 2, 9, 'tuesday ', 3, 0, 'NONE') ,('2020-05-06', '1399-02-17', '1441-09-13', '2020-05-06', 17, 13, 2, 9, 'wednesday', 4, 0, 'NONE') ,('2020-05-07', '1399-02-18', '1441-09-14', '2020-05-07', 18, 14, 2, 9, 'thursday ', 5, 0, 'NONE') ,('2020-05-08', '1399-02-19', '1441-09-15', '2020-05-08', 19, 15, 2, 9, 'friday ', 6, 0, 'NONE') ,('2020-05-09', '1399-02-20', '1441-09-16', '2020-05-09', 20, 16, 2, 9, 'saturday ', 7, 0, 'NONE') ,('2020-05-10', '1399-02-21', '1441-09-17', '2020-05-10', 21, 17, 2, 9, 'sunday ', 1, 0, 'NONE') ,('2020-05-11', '1399-02-22', '1441-09-18', '2020-05-11', 22, 18, 2, 9, 'monday ', 2, 0, 'NONE') ,('2020-05-12', '1399-02-23', '1441-09-19', '2020-05-12', 23, 19, 2, 9, 'tuesday ', 3, 0, 'NONE') ,('2020-05-13', '1399-02-24', '1441-09-20', '2020-05-13', 24, 20, 2, 9, 'wednesday', 4, 0, 'NONE') ,('2020-05-14', '1399-02-25', '1441-09-21', '2020-05-14', 25, 21, 2, 9, 'thursday ', 5, 0, 'NONE') ,('2020-05-15', '1399-02-26', '1441-09-22', '2020-05-15', 26, 22, 2, 9, 'friday ', 6, 0, 'NONE') ,('2020-05-16', '1399-02-27', '1441-09-23', '2020-05-16', 27, 23, 2, 9, 'saturday ', 7, 0, 'NONE') ,('2020-05-17', '1399-02-28', '1441-09-24', '2020-05-17', 28, 24, 2, 9, 'sunday ', 1, 0, 'NONE') ,('2020-05-18', '1399-02-29', '1441-09-25', '2020-05-18', 29, 25, 2, 9, 'monday ', 2, 0, 'NONE') ,('2020-05-19', '1399-02-30', '1441-09-26', '2020-05-19', 30, 26, 2, 9, 'tuesday ', 3, 0, 'NONE') ,('2020-05-20', '1399-02-31', '1441-09-27', '2020-05-20', 31, 27, 2, 9, 'wednesday', 4, 0, 'NONE') ,('2020-05-21', '1399-03-01', '1441-09-28', '2020-05-21', 1, 28, 3, 9, 'thursday ', 5, 0, 'NONE') ,('2020-05-22', '1399-03-02', '1441-09-29', '2020-05-22', 2, 29, 3, 9, 'friday ', 6, 0, 'NONE') ,('2020-05-23', '1399-03-03', '1441-09-30', '2020-05-23', 3, 30, 3, 9, 'saturday ', 7, 0, 'NONE') ,('2020-05-24', '1399-03-04', '1441-10-01', '2020-05-24', 4, 1, 3, 10, 'sunday ', 1, 0, 'NONE') ,('2020-05-25', '1399-03-05', '1441-10-02', '2020-05-25', 5, 2, 3, 10, 'monday ', 2, 0, 'NONE') ,('2020-05-26', '1399-03-06', '1441-10-03', '2020-05-26', 6, 3, 3, 10, 'tuesday ', 3, 0, 'NONE') ,('2020-05-27', '1399-03-07', '1441-10-04', '2020-05-27', 7, 4, 3, 10, 'wednesday', 4, 0, 'NONE') ,('2020-05-28', '1399-03-08', '1441-10-05', '2020-05-28', 8, 5, 3, 10, 'thursday ', 5, 0, 'NONE') ,('2020-05-29', '1399-03-09', '1441-10-06', '2020-05-29', 9, 6, 3, 10, 'friday ', 6, 0, 'NONE') ,('2020-05-30', '1399-03-10', '1441-10-07', '2020-05-30', 10, 7, 3, 10, 'saturday ', 7, 0, 'NONE') ,('2020-05-31', '1399-03-11', '1441-10-08', '2020-05-31', 11, 8, 3, 10, 'sunday ', 1, 0, 'NONE') ,('2020-06-01', '1399-03-12', '1441-10-09', '2020-06-01', 12, 9, 3, 10, 'monday ', 2, 0, 'NONE') ,('2020-06-02', '1399-03-13', '1441-10-10', '2020-06-02', 13, 10, 3, 10, 'tuesday ', 3, 0, 'NONE') ,('2020-06-03', '1399-03-14', '1441-10-11', '2020-06-03', 14, 11, 3, 10, 'wednesday', 4, 0, 'NONE') ,('2020-06-04', '1399-03-15', '1441-10-12', '2020-06-04', 15, 12, 3, 10, 'thursday ', 5, 0, 'NONE') ,('2020-06-05', '1399-03-16', '1441-10-13', '2020-06-05', 16, 13, 3, 10, 'friday ', 6, 0, 'NONE') ,('2020-06-06', '1399-03-17', '1441-10-14', '2020-06-06', 17, 14, 3, 10, 'saturday ', 7, 0, 'NONE') ,('2020-06-07', '1399-03-18', '1441-10-15', '2020-06-07', 18, 15, 3, 10, 'sunday ', 1, 0, 'NONE') ,('2020-06-08', '1399-03-19', '1441-10-16', '2020-06-08', 19, 16, 3, 10, 'monday ', 2, 0, 'NONE') ,('2020-06-09', '1399-03-20', '1441-10-17', '2020-06-09', 20, 17, 3, 10, 'tuesday ', 3, 0, 'NONE') ,('2020-06-10', '1399-03-21', '1441-10-18', '2020-06-10', 21, 18, 3, 10, 'wednesday', 4, 0, 'NONE') ,('2020-06-11', '1399-03-22', '1441-10-19', '2020-06-11', 22, 19, 3, 10, 'thursday ', 5, 0, 'NONE') ,('2020-06-12', '1399-03-23', '1441-10-20', '2020-06-12', 23, 20, 3, 10, 'friday ', 6, 0, 'NONE') ,('2020-06-13', '1399-03-24', '1441-10-21', '2020-06-13', 24, 21, 3, 10, 'saturday ', 7, 0, 'NONE') ,('2020-06-14', '1399-03-25', '1441-10-22', '2020-06-14', 25, 22, 3, 10, 'sunday ', 1, 0, 'NONE') ,('2020-06-15', '1399-03-26', '1441-10-23', '2020-06-15', 26, 23, 3, 10, 'monday ', 2, 0, 'NONE') ,('2020-06-16', '1399-03-27', '1441-10-24', '2020-06-16', 27, 24, 3, 10, 'tuesday ', 3, 0, 'NONE') ,('2020-06-17', '1399-03-28', '1441-10-25', '2020-06-17', 28, 25, 3, 10, 'wednesday', 4, 0, 'NONE') ,('2020-06-18', '1399-03-29', '1441-10-26', '2020-06-18', 29, 26, 3, 10, 'thursday ', 5, 0, 'NONE') ,('2020-06-19', '1399-03-30', '1441-10-27', '2020-06-19', 30, 27, 3, 10, 'friday ', 6, 0, 'NONE') ,('2020-06-20', '1399-03-31', '1441-10-28', '2020-06-20', 31, 28, 3, 10, 'saturday ', 7, 0, 'NONE') ,('2020-06-21', '1399-04-01', '1441-10-29', '2020-06-21', 1, 29, 4, 10, 'sunday ', 1, 0, 'NONE') ,('2020-06-22', '1399-04-02', '1441-11-01', '2020-06-22', 2, 1, 4, 11, 'monday ', 2, 0, 'NONE') ,('2020-06-23', '1399-04-03', '1441-11-02', '2020-06-23', 3, 2, 4, 11, 'tuesday ', 3, 0, 'NONE') ,('2020-06-24', '1399-04-04', '1441-11-03', '2020-06-24', 4, 3, 4, 11, 'wednesday', 4, 0, 'NONE') ,('2020-06-25', '1399-04-05', '1441-11-04', '2020-06-25', 5, 4, 4, 11, 'thursday ', 5, 0, 'NONE') ,('2020-06-26', '1399-04-06', '1441-11-05', '2020-06-26', 6, 5, 4, 11, 'friday ', 6, 0, 'NONE') ,('2020-06-27', '1399-04-07', '1441-11-06', '2020-06-27', 7, 6, 4, 11, 'saturday ', 7, 0, 'NONE') ,('2020-06-28', '1399-04-08', '1441-11-07', '2020-06-28', 8, 7, 4, 11, 'sunday ', 1, 0, 'NONE') ,('2020-06-29', '1399-04-09', '1441-11-08', '2020-06-29', 9, 8, 4, 11, 'monday ', 2, 0, 'NONE') ,('2020-06-30', '1399-04-10', '1441-11-09', '2020-06-30', 10, 9, 4, 11, 'tuesday ', 3, 0, 'NONE') ,('2020-07-01', '1399-04-11', '1441-11-10', '2020-07-01', 11, 10, 4, 11, 'wednesday', 4, 0, 'NONE') ,('2020-07-02', '1399-04-12', '1441-11-11', '2020-07-02', 12, 11, 4, 11, 'thursday ', 5, 0, 'NONE') ,('2020-07-03', '1399-04-13', '1441-11-12', '2020-07-03', 13, 12, 4, 11, 'friday ', 6, 0, 'NONE') ,('2020-07-04', '1399-04-14', '1441-11-13', '2020-07-04', 14, 13, 4, 11, 'saturday ', 7, 0, 'NONE') ,('2020-07-05', '1399-04-15', '1441-11-14', '2020-07-05', 15, 14, 4, 11, 'sunday ', 1, 0, 'NONE') ,('2020-07-06', '1399-04-16', '1441-11-15', '2020-07-06', 16, 15, 4, 11, 'monday ', 2, 0, 'NONE') ,('2020-07-07', '1399-04-17', '1441-11-16', '2020-07-07', 17, 16, 4, 11, 'tuesday ', 3, 0, 'NONE') ,('2020-07-08', '1399-04-18', '1441-11-17', '2020-07-08', 18, 17, 4, 11, 'wednesday', 4, 0, 'NONE') ,('2020-07-09', '1399-04-19', '1441-11-18', '2020-07-09', 19, 18, 4, 11, 'thursday ', 5, 0, 'NONE') ,('2020-07-10', '1399-04-20', '1441-11-19', '2020-07-10', 20, 19, 4, 11, 'friday ', 6, 0, 'NONE') ,('2020-07-11', '1399-04-21', '1441-11-20', '2020-07-11', 21, 20, 4, 11, 'saturday ', 7, 0, 'NONE') ,('2020-07-12', '1399-04-22', '1441-11-21', '2020-07-12', 22, 21, 4, 11, 'sunday ', 1, 0, 'NONE') ,('2020-07-13', '1399-04-23', '1441-11-22', '2020-07-13', 23, 22, 4, 11, 'monday ', 2, 0, 'NONE') ,('2020-07-14', '1399-04-24', '1441-11-23', '2020-07-14', 24, 23, 4, 11, 'tuesday ', 3, 0, 'NONE') ,('2020-07-15', '1399-04-25', '1441-11-24', '2020-07-15', 25, 24, 4, 11, 'wednesday', 4, 0, 'NONE') ,('2020-07-16', '1399-04-26', '1441-11-25', '2020-07-16', 26, 25, 4, 11, 'thursday ', 5, 0, 'NONE') ,('2020-07-17', '1399-04-27', '1441-11-26', '2020-07-17', 27, 26, 4, 11, 'friday ', 6, 0, 'NONE') ,('2020-07-18', '1399-04-28', '1441-11-27', '2020-07-18', 28, 27, 4, 11, 'saturday ', 7, 0, 'NONE') ,('2020-07-19', '1399-04-29', '1441-11-28', '2020-07-19', 29, 28, 4, 11, 'sunday ', 1, 0, 'NONE') ,('2020-07-20', '1399-04-30', '1441-11-29', '2020-07-20', 30, 29, 4, 11, 'monday ', 2, 0, 'NONE') ,('2020-07-21', '1399-04-31', '1441-11-30', '2020-07-21', 31, 30, 4, 11, 'tuesday ', 3, 0, 'NONE') ,('2020-07-22', '1399-05-01', '1441-12-01', '2020-07-22', 1, 1, 5, 12, 'wednesday', 4, 0, 'NONE') ,('2020-07-23', '1399-05-02', '1441-12-02', '2020-07-23', 2, 2, 5, 12, 'thursday ', 5, 0, 'NONE') ,('2020-07-24', '1399-05-03', '1441-12-03', '2020-07-24', 3, 3, 5, 12, 'friday ', 6, 0, 'NONE') ,('2020-07-25', '1399-05-04', '1441-12-04', '2020-07-25', 4, 4, 5, 12, 'saturday ', 7, 0, 'NONE') ,('2020-07-26', '1399-05-05', '1441-12-05', '2020-07-26', 5, 5, 5, 12, 'sunday ', 1, 0, 'NONE') ,('2020-07-27', '1399-05-06', '1441-12-06', '2020-07-27', 6, 6, 5, 12, 'monday ', 2, 0, 'NONE') ,('2020-07-28', '1399-05-07', '1441-12-07', '2020-07-28', 7, 7, 5, 12, 'tuesday ', 3, 0, 'NONE') ,('2020-07-29', '1399-05-08', '1441-12-08', '2020-07-29', 8, 8, 5, 12, 'wednesday', 4, 0, 'NONE') ,('2020-07-30', '1399-05-09', '1441-12-09', '2020-07-30', 9, 9, 5, 12, 'thursday ', 5, 0, 'NONE') ,('2020-07-31', '1399-05-10', '1441-12-10', '2020-07-31', 10, 10, 5, 12, 'friday ', 6, 0, 'NONE') ,('2020-08-01', '1399-05-11', '1441-12-11', '2020-08-01', 11, 11, 5, 12, 'saturday ', 7, 0, 'NONE') ,('2020-08-02', '1399-05-12', '1441-12-12', '2020-08-02', 12, 12, 5, 12, 'sunday ', 1, 0, 'NONE') ,('2020-08-03', '1399-05-13', '1441-12-13', '2020-08-03', 13, 13, 5, 12, 'monday ', 2, 0, 'NONE') ,('2020-08-04', '1399-05-14', '1441-12-14', '2020-08-04', 14, 14, 5, 12, 'tuesday ', 3, 0, 'NONE') ,('2020-08-05', '1399-05-15', '1441-12-15', '2020-08-05', 15, 15, 5, 12, 'wednesday', 4, 0, 'NONE') ,('2020-08-06', '1399-05-16', '1441-12-16', '2020-08-06', 16, 16, 5, 12, 'thursday ', 5, 0, 'NONE') ,('2020-08-07', '1399-05-17', '1441-12-17', '2020-08-07', 17, 17, 5, 12, 'friday ', 6, 0, 'NONE') ,('2020-08-08', '1399-05-18', '1441-12-18', '2020-08-08', 18, 18, 5, 12, 'saturday ', 7, 0, 'NONE') ,('2020-08-09', '1399-05-19', '1441-12-19', '2020-08-09', 19, 19, 5, 12, 'sunday ', 1, 0, 'NONE') ,('2020-08-10', '1399-05-20', '1441-12-20', '2020-08-10', 20, 20, 5, 12, 'monday ', 2, 0, 'NONE') ,('2020-08-11', '1399-05-21', '1441-12-21', '2020-08-11', 21, 21, 5, 12, 'tuesday ', 3, 0, 'NONE') ,('2020-08-12', '1399-05-22', '1441-12-22', '2020-08-12', 22, 22, 5, 12, 'wednesday', 4, 0, 'NONE') ,('2020-08-13', '1399-05-23', '1441-12-23', '2020-08-13', 23, 23, 5, 12, 'thursday ', 5, 0, 'NONE') ,('2020-08-14', '1399-05-24', '1441-12-24', '2020-08-14', 24, 24, 5, 12, 'friday ', 6, 0, 'NONE') ,('2020-08-15', '1399-05-25', '1441-12-25', '2020-08-15', 25, 25, 5, 12, 'saturday ', 7, 0, 'NONE') ,('2020-08-16', '1399-05-26', '1441-12-26', '2020-08-16', 26, 26, 5, 12, 'sunday ', 1, 0, 'NONE') ,('2020-08-17', '1399-05-27', '1441-12-27', '2020-08-17', 27, 27, 5, 12, 'monday ', 2, 0, 'NONE') ,('2020-08-18', '1399-05-28', '1441-12-28', '2020-08-18', 28, 28, 5, 12, 'tuesday ', 3, 0, 'NONE') ,('2020-08-19', '1399-05-29', '1441-12-29', '2020-08-19', 29, 29, 5, 12, 'wednesday', 4, 0, 'NONE') ,('2020-08-20', '1399-05-30', '1442-01-01', '2020-08-20', 30, 1, 5, 1, 'thursday ', 5, 0, 'NONE') ,('2020-08-21', '1399-05-31', '1442-01-02', '2020-08-21', 31, 2, 5, 1, 'friday ', 6, 0, 'NONE') ,('2020-08-22', '1399-06-01', '1442-01-03', '2020-08-22', 1, 3, 6, 1, 'saturday ', 7, 0, 'NONE') ,('2020-08-23', '1399-06-02', '1442-01-04', '2020-08-23', 2, 4, 6, 1, 'sunday ', 1, 0, 'NONE') ,('2020-08-24', '1399-06-03', '1442-01-05', '2020-08-24', 3, 5, 6, 1, 'monday ', 2, 0, 'NONE') ,('2020-08-25', '1399-06-04', '1442-01-06', '2020-08-25', 4, 6, 6, 1, 'tuesday ', 3, 0, 'NONE') ,('2020-08-26', '1399-06-05', '1442-01-07', '2020-08-26', 5, 7, 6, 1, 'wednesday', 4, 0, 'NONE') ,('2020-08-27', '1399-06-06', '1442-01-08', '2020-08-27', 6, 8, 6, 1, 'thursday ', 5, 0, 'NONE') ,('2020-08-28', '1399-06-07', '1442-01-09', '2020-08-28', 7, 9, 6, 1, 'friday ', 6, 0, 'NONE') ,('2020-08-29', '1399-06-08', '1442-01-10', '2020-08-29', 8, 10, 6, 1, 'saturday ', 7, 0, 'NONE') ,('2020-08-30', '1399-06-09', '1442-01-11', '2020-08-30', 9, 11, 6, 1, 'sunday ', 1, 0, 'NONE') ,('2020-08-31', '1399-06-10', '1442-01-12', '2020-08-31', 10, 12, 6, 1, 'monday ', 2, 0, 'NONE') ,('2020-09-01', '1399-06-11', '1442-01-13', '2020-09-01', 11, 13, 6, 1, 'tuesday ', 3, 0, 'NONE') ,('2020-09-02', '1399-06-12', '1442-01-14', '2020-09-02', 12, 14, 6, 1, 'wednesday', 4, 0, 'NONE') ,('2020-09-03', '1399-06-13', '1442-01-15', '2020-09-03', 13, 15, 6, 1, 'thursday ', 5, 0, 'NONE') ,('2020-09-04', '1399-06-14', '1442-01-16', '2020-09-04', 14, 16, 6, 1, 'friday ', 6, 0, 'NONE') ,('2020-09-05', '1399-06-15', '1442-01-17', '2020-09-05', 15, 17, 6, 1, 'saturday ', 7, 0, 'NONE') ,('2020-09-06', '1399-06-16', '1442-01-18', '2020-09-06', 16, 18, 6, 1, 'sunday ', 1, 0, 'NONE') ,('2020-09-07', '1399-06-17', '1442-01-19', '2020-09-07', 17, 19, 6, 1, 'monday ', 2, 0, 'NONE') ,('2020-09-08', '1399-06-18', '1442-01-20', '2020-09-08', 18, 20, 6, 1, 'tuesday ', 3, 0, 'NONE') ,('2020-09-09', '1399-06-19', '1442-01-21', '2020-09-09', 19, 21, 6, 1, 'wednesday', 4, 0, 'NONE') ,('2020-09-10', '1399-06-20', '1442-01-22', '2020-09-10', 20, 22, 6, 1, 'thursday ', 5, 0, 'NONE') ,('2020-09-11', '1399-06-21', '1442-01-23', '2020-09-11', 21, 23, 6, 1, 'friday ', 6, 0, 'NONE') ,('2020-09-12', '1399-06-22', '1442-01-24', '2020-09-12', 22, 24, 6, 1, 'saturday ', 7, 0, 'NONE') ,('2020-09-13', '1399-06-23', '1442-01-25', '2020-09-13', 23, 25, 6, 1, 'sunday ', 1, 0, 'NONE') ,('2020-09-14', '1399-06-24', '1442-01-26', '2020-09-14', 24, 26, 6, 1, 'monday ', 2, 0, 'NONE') ,('2020-09-15', '1399-06-25', '1442-01-27', '2020-09-15', 25, 27, 6, 1, 'tuesday ', 3, 0, 'NONE') ,('2020-09-16', '1399-06-26', '1442-01-28', '2020-09-16', 26, 28, 6, 1, 'wednesday', 4, 0, 'NONE') ,('2020-09-17', '1399-06-27', '1442-01-29', '2020-09-17', 27, 29, 6, 1, 'thursday ', 5, 0, 'NONE') ,('2020-09-18', '1399-06-28', '1442-01-30', '2020-09-18', 28, 30, 6, 1, 'friday ', 6, 0, 'NONE') ,('2020-09-19', '1399-06-29', '1442-02-01', '2020-09-19', 29, 1, 6, 2, 'saturday ', 7, 0, 'NONE') ,('2020-09-20', '1399-06-30', '1442-02-02', '2020-09-20', 30, 2, 6, 2, 'sunday ', 1, 0, 'NONE') ,('2020-09-21', '1399-06-31', '1442-02-03', '2020-09-21', 31, 3, 6, 2, 'monday ', 2, 0, 'NONE') ,('2020-09-22', '1399-07-01', '1442-02-04', '2020-09-22', 1, 4, 7, 2, 'tuesday ', 3, 0, 'NONE') ,('2020-09-23', '1399-07-02', '1442-02-05', '2020-09-23', 2, 5, 7, 2, 'wednesday', 4, 0, 'NONE') ,('2020-09-24', '1399-07-03', '1442-02-06', '2020-09-24', 3, 6, 7, 2, 'thursday ', 5, 0, 'NONE') ,('2020-09-25', '1399-07-04', '1442-02-07', '2020-09-25', 4, 7, 7, 2, 'friday ', 6, 0, 'NONE') ,('2020-09-26', '1399-07-05', '1442-02-08', '2020-09-26', 5, 8, 7, 2, 'saturday ', 7, 0, 'NONE') ,('2020-09-27', '1399-07-06', '1442-02-09', '2020-09-27', 6, 9, 7, 2, 'sunday ', 1, 0, 'NONE') ,('2020-09-28', '1399-07-07', '1442-02-10', '2020-09-28', 7, 10, 7, 2, 'monday ', 2, 0, 'NONE') ,('2020-09-29', '1399-07-08', '1442-02-11', '2020-09-29', 8, 11, 7, 2, 'tuesday ', 3, 0, 'NONE') ,('2020-09-30', '1399-07-09', '1442-02-12', '2020-09-30', 9, 12, 7, 2, 'wednesday', 4, 0, 'NONE') ,('2020-10-01', '1399-07-10', '1442-02-13', '2020-10-01', 10, 13, 7, 2, 'thursday ', 5, 0, 'NONE') ,('2020-10-02', '1399-07-11', '1442-02-14', '2020-10-02', 11, 14, 7, 2, 'friday ', 6, 0, 'NONE') ,('2020-10-03', '1399-07-12', '1442-02-15', '2020-10-03', 12, 15, 7, 2, 'saturday ', 7, 0, 'NONE') ,('2020-10-04', '1399-07-13', '1442-02-16', '2020-10-04', 13, 16, 7, 2, 'sunday ', 1, 0, 'NONE') ,('2020-10-05', '1399-07-14', '1442-02-17', '2020-10-05', 14, 17, 7, 2, 'monday ', 2, 0, 'NONE') ,('2020-10-06', '1399-07-15', '1442-02-18', '2020-10-06', 15, 18, 7, 2, 'tuesday ', 3, 0, 'NONE') ,('2020-10-07', '1399-07-16', '1442-02-19', '2020-10-07', 16, 19, 7, 2, 'wednesday', 4, 0, 'NONE') ,('2020-10-08', '1399-07-17', '1442-02-20', '2020-10-08', 17, 20, 7, 2, 'thursday ', 5, 0, 'NONE') ,('2020-10-09', '1399-07-18', '1442-02-21', '2020-10-09', 18, 21, 7, 2, 'friday ', 6, 0, 'NONE') ,('2020-10-10', '1399-07-19', '1442-02-22', '2020-10-10', 19, 22, 7, 2, 'saturday ', 7, 0, 'NONE') ,('2020-10-11', '1399-07-20', '1442-02-23', '2020-10-11', 20, 23, 7, 2, 'sunday ', 1, 0, 'NONE') ,('2020-10-12', '1399-07-21', '1442-02-24', '2020-10-12', 21, 24, 7, 2, 'monday ', 2, 0, 'NONE') ,('2020-10-13', '1399-07-22', '1442-02-25', '2020-10-13', 22, 25, 7, 2, 'tuesday ', 3, 0, 'NONE') ,('2020-10-14', '1399-07-23', '1442-02-26', '2020-10-14', 23, 26, 7, 2, 'wednesday', 4, 0, 'NONE') ,('2020-10-15', '1399-07-24', '1442-02-27', '2020-10-15', 24, 27, 7, 2, 'thursday ', 5, 0, 'NONE') ,('2020-10-16', '1399-07-25', '1442-02-28', '2020-10-16', 25, 28, 7, 2, 'friday ', 6, 0, 'NONE') ,('2020-10-17', '1399-07-26', '1442-02-29', '2020-10-17', 26, 29, 7, 2, 'saturday ', 7, 0, 'NONE') ,('2020-10-18', '1399-07-27', '1442-03-01', '2020-10-18', 27, 1, 7, 3, 'sunday ', 1, 0, 'NONE') ,('2020-10-19', '1399-07-28', '1442-03-02', '2020-10-19', 28, 2, 7, 3, 'monday ', 2, 0, 'NONE') ,('2020-10-20', '1399-07-29', '1442-03-03', '2020-10-20', 29, 3, 7, 3, 'tuesday ', 3, 0, 'NONE') ,('2020-10-21', '1399-07-30', '1442-03-04', '2020-10-21', 30, 4, 7, 3, 'wednesday', 4, 0, 'NONE') ,('2020-10-22', '1399-08-01', '1442-03-05', '2020-10-22', 1, 5, 8, 3, 'thursday ', 5, 0, 'NONE') ,('2020-10-23', '1399-08-02', '1442-03-06', '2020-10-23', 2, 6, 8, 3, 'friday ', 6, 0, 'NONE') ,('2020-10-24', '1399-08-03', '1442-03-07', '2020-10-24', 3, 7, 8, 3, 'saturday ', 7, 0, 'NONE') ,('2020-10-25', '1399-08-04', '1442-03-08', '2020-10-25', 4, 8, 8, 3, 'sunday ', 1, 0, 'NONE') ,('2020-10-26', '1399-08-05', '1442-03-09', '2020-10-26', 5, 9, 8, 3, 'monday ', 2, 0, 'NONE') ,('2020-10-27', '1399-08-06', '1442-03-10', '2020-10-27', 6, 10, 8, 3, 'tuesday ', 3, 0, 'NONE') ,('2020-10-28', '1399-08-07', '1442-03-11', '2020-10-28', 7, 11, 8, 3, 'wednesday', 4, 0, 'NONE') ,('2020-10-29', '1399-08-08', '1442-03-12', '2020-10-29', 8, 12, 8, 3, 'thursday ', 5, 0, 'NONE') ,('2020-10-30', '1399-08-09', '1442-03-13', '2020-10-30', 9, 13, 8, 3, 'friday ', 6, 0, 'NONE') ,('2020-10-31', '1399-08-10', '1442-03-14', '2020-10-31', 10, 14, 8, 3, 'saturday ', 7, 0, 'NONE') ,('2020-11-01', '1399-08-11', '1442-03-15', '2020-11-01', 11, 15, 8, 3, 'sunday ', 1, 0, 'NONE') ,('2020-11-02', '1399-08-12', '1442-03-16', '2020-11-02', 12, 16, 8, 3, 'monday ', 2, 0, 'NONE') ,('2020-11-03', '1399-08-13', '1442-03-17', '2020-11-03', 13, 17, 8, 3, 'tuesday ', 3, 0, 'NONE') ,('2020-11-04', '1399-08-14', '1442-03-18', '2020-11-04', 14, 18, 8, 3, 'wednesday', 4, 0, 'NONE') ,('2020-11-05', '1399-08-15', '1442-03-19', '2020-11-05', 15, 19, 8, 3, 'thursday ', 5, 0, 'NONE') ,('2020-11-06', '1399-08-16', '1442-03-20', '2020-11-06', 16, 20, 8, 3, 'friday ', 6, 0, 'NONE') ,('2020-11-07', '1399-08-17', '1442-03-21', '2020-11-07', 17, 21, 8, 3, 'saturday ', 7, 0, 'NONE') ,('2020-11-08', '1399-08-18', '1442-03-22', '2020-11-08', 18, 22, 8, 3, 'sunday ', 1, 0, 'NONE') ,('2020-11-09', '1399-08-19', '1442-03-23', '2020-11-09', 19, 23, 8, 3, 'monday ', 2, 0, 'NONE') ,('2020-11-10', '1399-08-20', '1442-03-24', '2020-11-10', 20, 24, 8, 3, 'tuesday ', 3, 0, 'NONE') ,('2020-11-11', '1399-08-21', '1442-03-25', '2020-11-11', 21, 25, 8, 3, 'wednesday', 4, 0, 'NONE') ,('2020-11-12', '1399-08-22', '1442-03-26', '2020-11-12', 22, 26, 8, 3, 'thursday ', 5, 0, 'NONE') ,('2020-11-13', '1399-08-23', '1442-03-27', '2020-11-13', 23, 27, 8, 3, 'friday ', 6, 0, 'NONE') ,('2020-11-14', '1399-08-24', '1442-03-28', '2020-11-14', 24, 28, 8, 3, 'saturday ', 7, 0, 'NONE') ,('2020-11-15', '1399-08-25', '1442-03-29', '2020-11-15', 25, 29, 8, 3, 'sunday ', 1, 0, 'NONE') ,('2020-11-16', '1399-08-26', '1442-03-30', '2020-11-16', 26, 30, 8, 3, 'monday ', 2, 0, 'NONE') ,('2020-11-17', '1399-08-27', '1442-04-01', '2020-11-17', 27, 1, 8, 4, 'tuesday ', 3, 0, 'NONE') ,('2020-11-18', '1399-08-28', '1442-04-02', '2020-11-18', 28, 2, 8, 4, 'wednesday', 4, 0, 'NONE') ,('2020-11-19', '1399-08-29', '1442-04-03', '2020-11-19', 29, 3, 8, 4, 'thursday ', 5, 0, 'NONE') ,('2020-11-20', '1399-08-30', '1442-04-04', '2020-11-20', 30, 4, 8, 4, 'friday ', 6, 0, 'NONE') ,('2020-11-21', '1399-09-01', '1442-04-05', '2020-11-21', 1, 5, 9, 4, 'saturday ', 7, 0, 'NONE') ,('2020-11-22', '1399-09-02', '1442-04-06', '2020-11-22', 2, 6, 9, 4, 'sunday ', 1, 0, 'NONE') ,('2020-11-23', '1399-09-03', '1442-04-07', '2020-11-23', 3, 7, 9, 4, 'monday ', 2, 0, 'NONE') ,('2020-11-24', '1399-09-04', '1442-04-08', '2020-11-24', 4, 8, 9, 4, 'tuesday ', 3, 0, 'NONE') ,('2020-11-25', '1399-09-05', '1442-04-09', '2020-11-25', 5, 9, 9, 4, 'wednesday', 4, 0, 'NONE') ,('2020-11-26', '1399-09-06', '1442-04-10', '2020-11-26', 6, 10, 9, 4, 'thursday ', 5, 0, 'NONE') ,('2020-11-27', '1399-09-07', '1442-04-11', '2020-11-27', 7, 11, 9, 4, 'friday ', 6, 0, 'NONE') ,('2020-11-28', '1399-09-08', '1442-04-12', '2020-11-28', 8, 12, 9, 4, 'saturday ', 7, 0, 'NONE') ,('2020-11-29', '1399-09-09', '1442-04-13', '2020-11-29', 9, 13, 9, 4, 'sunday ', 1, 0, 'NONE') ,('2020-11-30', '1399-09-10', '1442-04-14', '2020-11-30', 10, 14, 9, 4, 'monday ', 2, 0, 'NONE') ,('2020-12-01', '1399-09-11', '1442-04-15', '2020-12-01', 11, 15, 9, 4, 'tuesday ', 3, 0, 'NONE') ,('2020-12-02', '1399-09-12', '1442-04-16', '2020-12-02', 12, 16, 9, 4, 'wednesday', 4, 0, 'NONE') ,('2020-12-03', '1399-09-13', '1442-04-17', '2020-12-03', 13, 17, 9, 4, 'thursday ', 5, 0, 'NONE') ,('2020-12-04', '1399-09-14', '1442-04-18', '2020-12-04', 14, 18, 9, 4, 'friday ', 6, 0, 'NONE') ,('2020-12-05', '1399-09-15', '1442-04-19', '2020-12-05', 15, 19, 9, 4, 'saturday ', 7, 0, 'NONE') ,('2020-12-06', '1399-09-16', '1442-04-20', '2020-12-06', 16, 20, 9, 4, 'sunday ', 1, 0, 'NONE') ,('2020-12-07', '1399-09-17', '1442-04-21', '2020-12-07', 17, 21, 9, 4, 'monday ', 2, 0, 'NONE') ,('2020-12-08', '1399-09-18', '1442-04-22', '2020-12-08', 18, 22, 9, 4, 'tuesday ', 3, 0, 'NONE') ,('2020-12-09', '1399-09-19', '1442-04-23', '2020-12-09', 19, 23, 9, 4, 'wednesday', 4, 0, 'NONE') ,('2020-12-10', '1399-09-20', '1442-04-24', '2020-12-10', 20, 24, 9, 4, 'thursday ', 5, 0, 'NONE') ,('2020-12-11', '1399-09-21', '1442-04-25', '2020-12-11', 21, 25, 9, 4, 'friday ', 6, 0, 'NONE') ,('2020-12-12', '1399-09-22', '1442-04-26', '2020-12-12', 22, 26, 9, 4, 'saturday ', 7, 0, 'NONE') ,('2020-12-13', '1399-09-23', '1442-04-27', '2020-12-13', 23, 27, 9, 4, 'sunday ', 1, 0, 'NONE') ,('2020-12-14', '1399-09-24', '1442-04-28', '2020-12-14', 24, 28, 9, 4, 'monday ', 2, 0, 'NONE') ,('2020-12-15', '1399-09-25', '1442-04-29', '2020-12-15', 25, 29, 9, 4, 'tuesday ', 3, 0, 'NONE') ,('2020-12-16', '1399-09-26', '1442-05-01', '2020-12-16', 26, 1, 9, 5, 'wednesday', 4, 0, 'NONE') ,('2020-12-17', '1399-09-27', '1442-05-02', '2020-12-17', 27, 2, 9, 5, 'thursday ', 5, 0, 'NONE') ,('2020-12-18', '1399-09-28', '1442-05-03', '2020-12-18', 28, 3, 9, 5, 'friday ', 6, 0, 'NONE') ,('2020-12-19', '1399-09-29', '1442-05-04', '2020-12-19', 29, 4, 9, 5, 'saturday ', 7, 0, 'NONE') ,('2020-12-20', '1399-09-30', '1442-05-05', '2020-12-20', 30, 5, 9, 5, 'sunday ', 1, 0, 'NONE') ,('2020-12-21', '1399-10-01', '1442-05-06', '2020-12-21', 1, 6, 10, 5, 'monday ', 2, 0, 'NONE') ,('2020-12-22', '1399-10-02', '1442-05-07', '2020-12-22', 2, 7, 10, 5, 'tuesday ', 3, 0, 'NONE') ,('2020-12-23', '1399-10-03', '1442-05-08', '2020-12-23', 3, 8, 10, 5, 'wednesday', 4, 0, 'NONE') ,('2020-12-24', '1399-10-04', '1442-05-09', '2020-12-24', 4, 9, 10, 5, 'thursday ', 5, 0, 'NONE') ,('2020-12-25', '1399-10-05', '1442-05-10', '2020-12-25', 5, 10, 10, 5, 'friday ', 6, 0, 'NONE') ,('2020-12-26', '1399-10-06', '1442-05-11', '2020-12-26', 6, 11, 10, 5, 'saturday ', 7, 0, 'NONE') ,('2020-12-27', '1399-10-07', '1442-05-12', '2020-12-27', 7, 12, 10, 5, 'sunday ', 1, 0, 'NONE') ,('2020-12-28', '1399-10-08', '1442-05-13', '2020-12-28', 8, 13, 10, 5, 'monday ', 2, 0, 'NONE') ,('2020-12-29', '1399-10-09', '1442-05-14', '2020-12-29', 9, 14, 10, 5, 'tuesday ', 3, 0, 'NONE') ,('2020-12-30', '1399-10-10', '1442-05-15', '2020-12-30', 10, 15, 10, 5, 'wednesday', 4, 0, 'NONE') ,('2020-12-31', '1399-10-11', '1442-05-16', '2020-12-31', 11, 16, 10, 5, 'thursday ', 5, 0, 'NONE') ,('2021-01-01', '1399-10-12', '1442-05-17', '2021-01-01', 12, 17, 10, 5, 'friday ', 6, 0, 'NONE') ,('2021-01-02', '1399-10-13', '1442-05-18', '2021-01-02', 13, 18, 10, 5, 'saturday ', 7, 0, 'NONE') ,('2021-01-03', '1399-10-14', '1442-05-19', '2021-01-03', 14, 19, 10, 5, 'sunday ', 1, 0, 'NONE') ,('2021-01-04', '1399-10-15', '1442-05-20', '2021-01-04', 15, 20, 10, 5, 'monday ', 2, 0, 'NONE') ,('2021-01-05', '1399-10-16', '1442-05-21', '2021-01-05', 16, 21, 10, 5, 'tuesday ', 3, 0, 'NONE') ,('2021-01-06', '1399-10-17', '1442-05-22', '2021-01-06', 17, 22, 10, 5, 'wednesday', 4, 0, 'NONE') ,('2021-01-07', '1399-10-18', '1442-05-23', '2021-01-07', 18, 23, 10, 5, 'thursday ', 5, 0, 'NONE') ,('2021-01-08', '1399-10-19', '1442-05-24', '2021-01-08', 19, 24, 10, 5, 'friday ', 6, 0, 'NONE') ,('2021-01-09', '1399-10-20', '1442-05-25', '2021-01-09', 20, 25, 10, 5, 'saturday ', 7, 0, 'NONE') ,('2021-01-10', '1399-10-21', '1442-05-26', '2021-01-10', 21, 26, 10, 5, 'sunday ', 1, 0, 'NONE') ,('2021-01-11', '1399-10-22', '1442-05-27', '2021-01-11', 22, 27, 10, 5, 'monday ', 2, 0, 'NONE') ,('2021-01-12', '1399-10-23', '1442-05-28', '2021-01-12', 23, 28, 10, 5, 'tuesday ', 3, 0, 'NONE') ,('2021-01-13', '1399-10-24', '1442-05-29', '2021-01-13', 24, 29, 10, 5, 'wednesday', 4, 0, 'NONE') ,('2021-01-14', '1399-10-25', '1442-05-30', '2021-01-14', 25, 30, 10, 5, 'thursday ', 5, 0, 'NONE') ,('2021-01-15', '1399-10-26', '1442-06-01', '2021-01-15', 26, 1, 10, 6, 'friday ', 6, 0, 'NONE') ,('2021-01-16', '1399-10-27', '1442-06-02', '2021-01-16', 27, 2, 10, 6, 'saturday ', 7, 0, 'NONE') ,('2021-01-17', '1399-10-28', '1442-06-03', '2021-01-17', 28, 3, 10, 6, 'sunday ', 1, 0, 'NONE') ,('2021-01-18', '1399-10-29', '1442-06-04', '2021-01-18', 29, 4, 10, 6, 'monday ', 2, 0, 'NONE') ,('2021-01-19', '1399-10-30', '1442-06-05', '2021-01-19', 30, 5, 10, 6, 'tuesday ', 3, 0, 'NONE') ,('2021-01-20', '1399-11-01', '1442-06-06', '2021-01-20', 1, 6, 11, 6, 'wednesday', 4, 0, 'NONE') ,('2021-01-21', '1399-11-02', '1442-06-07', '2021-01-21', 2, 7, 11, 6, 'thursday ', 5, 0, 'NONE') ,('2021-01-22', '1399-11-03', '1442-06-08', '2021-01-22', 3, 8, 11, 6, 'friday ', 6, 0, 'NONE') ,('2021-01-23', '1399-11-04', '1442-06-09', '2021-01-23', 4, 9, 11, 6, 'saturday ', 7, 0, 'NONE') ,('2021-01-24', '1399-11-05', '1442-06-10', '2021-01-24', 5, 10, 11, 6, 'sunday ', 1, 0, 'NONE') ,('2021-01-25', '1399-11-06', '1442-06-11', '2021-01-25', 6, 11, 11, 6, 'monday ', 2, 0, 'NONE') ,('2021-01-26', '1399-11-07', '1442-06-12', '2021-01-26', 7, 12, 11, 6, 'tuesday ', 3, 0, 'NONE') ,('2021-01-27', '1399-11-08', '1442-06-13', '2021-01-27', 8, 13, 11, 6, 'wednesday', 4, 0, 'NONE') ,('2021-01-28', '1399-11-09', '1442-06-14', '2021-01-28', 9, 14, 11, 6, 'thursday ', 5, 0, 'NONE') ,('2021-01-29', '1399-11-10', '1442-06-15', '2021-01-29', 10, 15, 11, 6, 'friday ', 6, 0, 'NONE') ,('2021-01-30', '1399-11-11', '1442-06-16', '2021-01-30', 11, 16, 11, 6, 'saturday ', 7, 0, 'NONE') ,('2021-01-31', '1399-11-12', '1442-06-17', '2021-01-31', 12, 17, 11, 6, 'sunday ', 1, 0, 'NONE') ,('2021-02-01', '1399-11-13', '1442-06-18', '2021-02-01', 13, 18, 11, 6, 'monday ', 2, 0, 'NONE') ,('2021-02-02', '1399-11-14', '1442-06-19', '2021-02-02', 14, 19, 11, 6, 'tuesday ', 3, 0, 'NONE') ,('2021-02-03', '1399-11-15', '1442-06-20', '2021-02-03', 15, 20, 11, 6, 'wednesday', 4, 0, 'NONE') ,('2021-02-04', '1399-11-16', '1442-06-21', '2021-02-04', 16, 21, 11, 6, 'thursday ', 5, 0, 'NONE') ,('2021-02-05', '1399-11-17', '1442-06-22', '2021-02-05', 17, 22, 11, 6, 'friday ', 6, 0, 'NONE') ,('2021-02-06', '1399-11-18', '1442-06-23', '2021-02-06', 18, 23, 11, 6, 'saturday ', 7, 0, 'NONE') ,('2021-02-07', '1399-11-19', '1442-06-24', '2021-02-07', 19, 24, 11, 6, 'sunday ', 1, 0, 'NONE') ,('2021-02-08', '1399-11-20', '1442-06-25', '2021-02-08', 20, 25, 11, 6, 'monday ', 2, 0, 'NONE') ,('2021-02-09', '1399-11-21', '1442-06-26', '2021-02-09', 21, 26, 11, 6, 'tuesday ', 3, 0, 'NONE') ,('2021-02-10', '1399-11-22', '1442-06-27', '2021-02-10', 22, 27, 11, 6, 'wednesday', 4, 0, 'NONE') ,('2021-02-11', '1399-11-23', '1442-06-28', '2021-02-11', 23, 28, 11, 6, 'thursday ', 5, 0, 'NONE') ,('2021-02-12', '1399-11-24', '1442-06-29', '2021-02-12', 24, 29, 11, 6, 'friday ', 6, 0, 'NONE') ,('2021-02-13', '1399-11-25', '1442-07-01', '2021-02-13', 25, 1, 11, 7, 'saturday ', 7, 0, 'NONE') ,('2021-02-14', '1399-11-26', '1442-07-02', '2021-02-14', 26, 2, 11, 7, 'sunday ', 1, 0, 'NONE') ,('2021-02-15', '1399-11-27', '1442-07-03', '2021-02-15', 27, 3, 11, 7, 'monday ', 2, 0, 'NONE') ,('2021-02-16', '1399-11-28', '1442-07-04', '2021-02-16', 28, 4, 11, 7, 'tuesday ', 3, 0, 'NONE') ,('2021-02-17', '1399-11-29', '1442-07-05', '2021-02-17', 29, 5, 11, 7, 'wednesday', 4, 0, 'NONE') ,('2021-02-18', '1399-11-30', '1442-07-06', '2021-02-18', 30, 6, 11, 7, 'thursday ', 5, 0, 'NONE') ,('2021-02-19', '1399-12-01', '1442-07-07', '2021-02-19', 1, 7, 12, 7, 'friday ', 6, 0, 'NONE') ,('2021-02-20', '1399-12-02', '1442-07-08', '2021-02-20', 2, 8, 12, 7, 'saturday ', 7, 0, 'NONE') ,('2021-02-21', '1399-12-03', '1442-07-09', '2021-02-21', 3, 9, 12, 7, 'sunday ', 1, 0, 'NONE') ,('2021-02-22', '1399-12-04', '1442-07-10', '2021-02-22', 4, 10, 12, 7, 'monday ', 2, 0, 'NONE') ,('2021-02-23', '1399-12-05', '1442-07-11', '2021-02-23', 5, 11, 12, 7, 'tuesday ', 3, 0, 'NONE') ,('2021-02-24', '1399-12-06', '1442-07-12', '2021-02-24', 6, 12, 12, 7, 'wednesday', 4, 0, 'NONE') ,('2021-02-25', '1399-12-07', '1442-07-13', '2021-02-25', 7, 13, 12, 7, 'thursday ', 5, 0, 'NONE') ,('2021-02-26', '1399-12-08', '1442-07-14', '2021-02-26', 8, 14, 12, 7, 'friday ', 6, 0, 'NONE') ,('2021-02-27', '1399-12-09', '1442-07-15', '2021-02-27', 9, 15, 12, 7, 'saturday ', 7, 0, 'NONE') ,('2021-02-28', '1399-12-10', '1442-07-16', '2021-02-28', 10, 16, 12, 7, 'sunday ', 1, 0, 'NONE') ,('2021-03-01', '1399-12-11', '1442-07-17', '2021-03-01', 11, 17, 12, 7, 'monday ', 2, 0, 'NONE') ,('2021-03-02', '1399-12-12', '1442-07-18', '2021-03-02', 12, 18, 12, 7, 'tuesday ', 3, 0, 'NONE') ,('2021-03-03', '1399-12-13', '1442-07-19', '2021-03-03', 13, 19, 12, 7, 'wednesday', 4, 0, 'NONE') ,('2021-03-04', '1399-12-14', '1442-07-20', '2021-03-04', 14, 20, 12, 7, 'thursday ', 5, 0, 'NONE') ,('2021-03-05', '1399-12-15', '1442-07-21', '2021-03-05', 15, 21, 12, 7, 'friday ', 6, 0, 'NONE') ,('2021-03-06', '1399-12-16', '1442-07-22', '2021-03-06', 16, 22, 12, 7, 'saturday ', 7, 0, 'NONE') ,('2021-03-07', '1399-12-17', '1442-07-23', '2021-03-07', 17, 23, 12, 7, 'sunday ', 1, 0, 'NONE') ,('2021-03-08', '1399-12-18', '1442-07-24', '2021-03-08', 18, 24, 12, 7, 'monday ', 2, 0, 'NONE') ,('2021-03-09', '1399-12-19', '1442-07-25', '2021-03-09', 19, 25, 12, 7, 'tuesday ', 3, 0, 'NONE') ,('2021-03-10', '1399-12-20', '1442-07-26', '2021-03-10', 20, 26, 12, 7, 'wednesday', 4, 0, 'NONE') ,('2021-03-11', '1399-12-21', '1442-07-27', '2021-03-11', 21, 27, 12, 7, 'thursday ', 5, 0, 'NONE') ,('2021-03-12', '1399-12-22', '1442-07-28', '2021-03-12', 22, 28, 12, 7, 'friday ', 6, 0, 'NONE') ,('2021-03-13', '1399-12-23', '1442-07-29', '2021-03-13', 23, 29, 12, 7, 'saturday ', 7, 0, 'NONE') ,('2021-03-14', '1399-12-24', '1442-07-30', '2021-03-14', 24, 30, 12, 7, 'sunday ', 1, 0, 'NONE') ,('2021-03-15', '1399-12-25', '1442-08-01', '2021-03-15', 25, 1, 12, 8, 'monday ', 2, 0, 'NONE') ,('2021-03-16', '1399-12-26', '1442-08-02', '2021-03-16', 26, 2, 12, 8, 'tuesday ', 3, 0, 'NONE') ,('2021-03-17', '1399-12-27', '1442-08-03', '2021-03-17', 27, 3, 12, 8, 'wednesday', 4, 0, 'NONE') ,('2021-03-18', '1399-12-28', '1442-08-04', '2021-03-18', 28, 4, 12, 8, 'thursday ', 5, 0, 'NONE') ,('2021-03-19', '1399-12-29', '1442-08-05', '2021-03-19', 29, 5, 12, 8, 'friday ', 6, 0, 'NONE') ,('2021-03-20', '1399-12-30', '1442-08-06', '2021-03-20', 30, 6, 12, 8, 'saturday ', 7, 0, 'NONE') ,('2021-03-21', '1400-01-01', '1442-08-07', '2021-03-21', 1, 7, 1, 8, 'sunday ', 1, 0, 'NONE') ,('2021-03-22', '1400-01-02', '1442-08-08', '2021-03-22', 2, 8, 1, 8, 'monday ', 2, 0, 'NONE') ,('2021-03-23', '1400-01-03', '1442-08-09', '2021-03-23', 3, 9, 1, 8, 'tuesday ', 3, 0, 'NONE') ,('2021-03-24', '1400-01-04', '1442-08-10', '2021-03-24', 4, 10, 1, 8, 'wednesday', 4, 0, 'NONE') ,('2021-03-25', '1400-01-05', '1442-08-11', '2021-03-25', 5, 11, 1, 8, 'thursday ', 5, 0, 'NONE') ,('2021-03-26', '1400-01-06', '1442-08-12', '2021-03-26', 6, 12, 1, 8, 'friday ', 6, 0, 'NONE') ,('2021-03-27', '1400-01-07', '1442-08-13', '2021-03-27', 7, 13, 1, 8, 'saturday ', 7, 0, 'NONE') ,('2021-03-28', '1400-01-08', '1442-08-14', '2021-03-28', 8, 14, 1, 8, 'sunday ', 1, 0, 'NONE') ,('2021-03-29', '1400-01-09', '1442-08-15', '2021-03-29', 9, 15, 1, 8, 'monday ', 2, 0, 'NONE') ,('2021-03-30', '1400-01-10', '1442-08-16', '2021-03-30', 10, 16, 1, 8, 'tuesday ', 3, 0, 'NONE') ,('2021-03-31', '1400-01-11', '1442-08-17', '2021-03-31', 11, 17, 1, 8, 'wednesday', 4, 0, 'NONE') ,('2021-04-01', '1400-01-12', '1442-08-18', '2021-04-01', 12, 18, 1, 8, 'thursday ', 5, 0, 'NONE') ,('2021-04-02', '1400-01-13', '1442-08-19', '2021-04-02', 13, 19, 1, 8, 'friday ', 6, 0, 'NONE') ,('2021-04-03', '1400-01-14', '1442-08-20', '2021-04-03', 14, 20, 1, 8, 'saturday ', 7, 0, 'NONE') ,('2021-04-04', '1400-01-15', '1442-08-21', '2021-04-04', 15, 21, 1, 8, 'sunday ', 1, 0, 'NONE') ,('2021-04-05', '1400-01-16', '1442-08-22', '2021-04-05', 16, 22, 1, 8, 'monday ', 2, 0, 'NONE') ,('2021-04-06', '1400-01-17', '1442-08-23', '2021-04-06', 17, 23, 1, 8, 'tuesday ', 3, 0, 'NONE') ,('2021-04-07', '1400-01-18', '1442-08-24', '2021-04-07', 18, 24, 1, 8, 'wednesday', 4, 0, 'NONE') ,('2021-04-08', '1400-01-19', '1442-08-25', '2021-04-08', 19, 25, 1, 8, 'thursday ', 5, 0, 'NONE') ,('2021-04-09', '1400-01-20', '1442-08-26', '2021-04-09', 20, 26, 1, 8, 'friday ', 6, 0, 'NONE') ,('2021-04-10', '1400-01-21', '1442-08-27', '2021-04-10', 21, 27, 1, 8, 'saturday ', 7, 0, 'NONE') ,('2021-04-11', '1400-01-22', '1442-08-28', '2021-04-11', 22, 28, 1, 8, 'sunday ', 1, 0, 'NONE') ,('2021-04-12', '1400-01-23', '1442-08-29', '2021-04-12', 23, 29, 1, 8, 'monday ', 2, 0, 'NONE') ,('2021-04-13', '1400-01-24', '1442-09-01', '2021-04-13', 24, 1, 1, 9, 'tuesday ', 3, 0, 'NONE') ,('2021-04-14', '1400-01-25', '1442-09-02', '2021-04-14', 25, 2, 1, 9, 'wednesday', 4, 0, 'NONE') ,('2021-04-15', '1400-01-26', '1442-09-03', '2021-04-15', 26, 3, 1, 9, 'thursday ', 5, 0, 'NONE') ,('2021-04-16', '1400-01-27', '1442-09-04', '2021-04-16', 27, 4, 1, 9, 'friday ', 6, 0, 'NONE') ,('2021-04-17', '1400-01-28', '1442-09-05', '2021-04-17', 28, 5, 1, 9, 'saturday ', 7, 0, 'NONE') ,('2021-04-18', '1400-01-29', '1442-09-06', '2021-04-18', 29, 6, 1, 9, 'sunday ', 1, 0, 'NONE') ,('2021-04-19', '1400-01-30', '1442-09-07', '2021-04-19', 30, 7, 1, 9, 'monday ', 2, 0, 'NONE') ,('2021-04-20', '1400-01-31', '1442-09-08', '2021-04-20', 31, 8, 1, 9, 'tuesday ', 3, 0, 'NONE') ,('2021-04-21', '1400-02-01', '1442-09-09', '2021-04-21', 1, 9, 2, 9, 'wednesday', 4, 0, 'NONE') ,('2021-04-22', '1400-02-02', '1442-09-10', '2021-04-22', 2, 10, 2, 9, 'thursday ', 5, 0, 'NONE') ,('2021-04-23', '1400-02-03', '1442-09-11', '2021-04-23', 3, 11, 2, 9, 'friday ', 6, 0, 'NONE') ,('2021-04-24', '1400-02-04', '1442-09-12', '2021-04-24', 4, 12, 2, 9, 'saturday ', 7, 0, 'NONE') ,('2021-04-25', '1400-02-05', '1442-09-13', '2021-04-25', 5, 13, 2, 9, 'sunday ', 1, 0, 'NONE') ,('2021-04-26', '1400-02-06', '1442-09-14', '2021-04-26', 6, 14, 2, 9, 'monday ', 2, 0, 'NONE') ,('2021-04-27', '1400-02-07', '1442-09-15', '2021-04-27', 7, 15, 2, 9, 'tuesday ', 3, 0, 'NONE') ,('2021-04-28', '1400-02-08', '1442-09-16', '2021-04-28', 8, 16, 2, 9, 'wednesday', 4, 0, 'NONE') ,('2021-04-29', '1400-02-09', '1442-09-17', '2021-04-29', 9, 17, 2, 9, 'thursday ', 5, 0, 'NONE') ,('2021-04-30', '1400-02-10', '1442-09-18', '2021-04-30', 10, 18, 2, 9, 'friday ', 6, 0, 'NONE') ,('2021-05-01', '1400-02-11', '1442-09-19', '2021-05-01', 11, 19, 2, 9, 'saturday ', 7, 0, 'NONE') ,('2021-05-02', '1400-02-12', '1442-09-20', '2021-05-02', 12, 20, 2, 9, 'sunday ', 1, 0, 'NONE') ,('2021-05-03', '1400-02-13', '1442-09-21', '2021-05-03', 13, 21, 2, 9, 'monday ', 2, 0, 'NONE') ,('2021-05-04', '1400-02-14', '1442-09-22', '2021-05-04', 14, 22, 2, 9, 'tuesday ', 3, 0, 'NONE') ,('2021-05-05', '1400-02-15', '1442-09-23', '2021-05-05', 15, 23, 2, 9, 'wednesday', 4, 0, 'NONE') ,('2021-05-06', '1400-02-16', '1442-09-24', '2021-05-06', 16, 24, 2, 9, 'thursday ', 5, 0, 'NONE') ,('2021-05-07', '1400-02-17', '1442-09-25', '2021-05-07', 17, 25, 2, 9, 'friday ', 6, 0, 'NONE') ,('2021-05-08', '1400-02-18', '1442-09-26', '2021-05-08', 18, 26, 2, 9, 'saturday ', 7, 0, 'NONE') ,('2021-05-09', '1400-02-19', '1442-09-27', '2021-05-09', 19, 27, 2, 9, 'sunday ', 1, 0, 'NONE') ,('2021-05-10', '1400-02-20', '1442-09-28', '2021-05-10', 20, 28, 2, 9, 'monday ', 2, 0, 'NONE') ,('2021-05-11', '1400-02-21', '1442-09-29', '2021-05-11', 21, 29, 2, 9, 'tuesday ', 3, 0, 'NONE') ,('2021-05-12', '1400-02-22', '1442-09-30', '2021-05-12', 22, 30, 2, 9, 'wednesday', 4, 0, 'NONE') ,('2021-05-13', '1400-02-23', '1442-10-01', '2021-05-13', 23, 1, 2, 10, 'thursday ', 5, 0, 'NONE') ,('2021-05-14', '1400-02-24', '1442-10-02', '2021-05-14', 24, 2, 2, 10, 'friday ', 6, 0, 'NONE') ,('2021-05-15', '1400-02-25', '1442-10-03', '2021-05-15', 25, 3, 2, 10, 'saturday ', 7, 0, 'NONE') ,('2021-05-16', '1400-02-26', '1442-10-04', '2021-05-16', 26, 4, 2, 10, 'sunday ', 1, 0, 'NONE') ,('2021-05-17', '1400-02-27', '1442-10-05', '2021-05-17', 27, 5, 2, 10, 'monday ', 2, 0, 'NONE') ,('2021-05-18', '1400-02-28', '1442-10-06', '2021-05-18', 28, 6, 2, 10, 'tuesday ', 3, 0, 'NONE') ,('2021-05-19', '1400-02-29', '1442-10-07', '2021-05-19', 29, 7, 2, 10, 'wednesday', 4, 0, 'NONE') ,('2021-05-20', '1400-02-30', '1442-10-08', '2021-05-20', 30, 8, 2, 10, 'thursday ', 5, 0, 'NONE') ,('2021-05-21', '1400-02-31', '1442-10-09', '2021-05-21', 31, 9, 2, 10, 'friday ', 6, 0, 'NONE') ,('2021-05-22', '1400-03-01', '1442-10-10', '2021-05-22', 1, 10, 3, 10, 'saturday ', 7, 0, 'NONE') ,('2021-05-23', '1400-03-02', '1442-10-11', '2021-05-23', 2, 11, 3, 10, 'sunday ', 1, 0, 'NONE') ,('2021-05-24', '1400-03-03', '1442-10-12', '2021-05-24', 3, 12, 3, 10, 'monday ', 2, 0, 'NONE') ,('2021-05-25', '1400-03-04', '1442-10-13', '2021-05-25', 4, 13, 3, 10, 'tuesday ', 3, 0, 'NONE') ,('2021-05-26', '1400-03-05', '1442-10-14', '2021-05-26', 5, 14, 3, 10, 'wednesday', 4, 0, 'NONE') ,('2021-05-27', '1400-03-06', '1442-10-15', '2021-05-27', 6, 15, 3, 10, 'thursday ', 5, 0, 'NONE') ,('2021-05-28', '1400-03-07', '1442-10-16', '2021-05-28', 7, 16, 3, 10, 'friday ', 6, 0, 'NONE') ,('2021-05-29', '1400-03-08', '1442-10-17', '2021-05-29', 8, 17, 3, 10, 'saturday ', 7, 0, 'NONE') ,('2021-05-30', '1400-03-09', '1442-10-18', '2021-05-30', 9, 18, 3, 10, 'sunday ', 1, 0, 'NONE') ,('2021-05-31', '1400-03-10', '1442-10-19', '2021-05-31', 10, 19, 3, 10, 'monday ', 2, 0, 'NONE') ,('2021-06-01', '1400-03-11', '1442-10-20', '2021-06-01', 11, 20, 3, 10, 'tuesday ', 3, 0, 'NONE') ,('2021-06-02', '1400-03-12', '1442-10-21', '2021-06-02', 12, 21, 3, 10, 'wednesday', 4, 0, 'NONE') ,('2021-06-03', '1400-03-13', '1442-10-22', '2021-06-03', 13, 22, 3, 10, 'thursday ', 5, 0, 'NONE') ,('2021-06-04', '1400-03-14', '1442-10-23', '2021-06-04', 14, 23, 3, 10, 'friday ', 6, 0, 'NONE') ,('2021-06-05', '1400-03-15', '1442-10-24', '2021-06-05', 15, 24, 3, 10, 'saturday ', 7, 0, 'NONE') ,('2021-06-06', '1400-03-16', '1442-10-25', '2021-06-06', 16, 25, 3, 10, 'sunday ', 1, 0, 'NONE') ,('2021-06-07', '1400-03-17', '1442-10-26', '2021-06-07', 17, 26, 3, 10, 'monday ', 2, 0, 'NONE') ,('2021-06-08', '1400-03-18', '1442-10-27', '2021-06-08', 18, 27, 3, 10, 'tuesday ', 3, 0, 'NONE') ,('2021-06-09', '1400-03-19', '1442-10-28', '2021-06-09', 19, 28, 3, 10, 'wednesday', 4, 0, 'NONE') ,('2021-06-10', '1400-03-20', '1442-10-29', '2021-06-10', 20, 29, 3, 10, 'thursday ', 5, 0, 'NONE') ,('2021-06-11', '1400-03-21', '1442-11-01', '2021-06-11', 21, 1, 3, 11, 'friday ', 6, 0, 'NONE') ,('2021-06-12', '1400-03-22', '1442-11-02', '2021-06-12', 22, 2, 3, 11, 'saturday ', 7, 0, 'NONE') ,('2021-06-13', '1400-03-23', '1442-11-03', '2021-06-13', 23, 3, 3, 11, 'sunday ', 1, 0, 'NONE') ,('2021-06-14', '1400-03-24', '1442-11-04', '2021-06-14', 24, 4, 3, 11, 'monday ', 2, 0, 'NONE') ,('2021-06-15', '1400-03-25', '1442-11-05', '2021-06-15', 25, 5, 3, 11, 'tuesday ', 3, 0, 'NONE') ,('2021-06-16', '1400-03-26', '1442-11-06', '2021-06-16', 26, 6, 3, 11, 'wednesday', 4, 0, 'NONE') ,('2021-06-17', '1400-03-27', '1442-11-07', '2021-06-17', 27, 7, 3, 11, 'thursday ', 5, 0, 'NONE') ,('2021-06-18', '1400-03-28', '1442-11-08', '2021-06-18', 28, 8, 3, 11, 'friday ', 6, 0, 'NONE') ,('2021-06-19', '1400-03-29', '1442-11-09', '2021-06-19', 29, 9, 3, 11, 'saturday ', 7, 0, 'NONE') ,('2021-06-20', '1400-03-30', '1442-11-10', '2021-06-20', 30, 10, 3, 11, 'sunday ', 1, 0, 'NONE') ,('2021-06-21', '1400-03-31', '1442-11-11', '2021-06-21', 31, 11, 3, 11, 'monday ', 2, 0, 'NONE') ,('2021-06-22', '1400-04-01', '1442-11-12', '2021-06-22', 1, 12, 4, 11, 'tuesday ', 3, 0, 'NONE') ,('2021-06-23', '1400-04-02', '1442-11-13', '2021-06-23', 2, 13, 4, 11, 'wednesday', 4, 0, 'NONE') ,('2021-06-24', '1400-04-03', '1442-11-14', '2021-06-24', 3, 14, 4, 11, 'thursday ', 5, 0, 'NONE') ,('2021-06-25', '1400-04-04', '1442-11-15', '2021-06-25', 4, 15, 4, 11, 'friday ', 6, 0, 'NONE') ,('2021-06-26', '1400-04-05', '1442-11-16', '2021-06-26', 5, 16, 4, 11, 'saturday ', 7, 0, 'NONE') ,('2021-06-27', '1400-04-06', '1442-11-17', '2021-06-27', 6, 17, 4, 11, 'sunday ', 1, 0, 'NONE') ,('2021-06-28', '1400-04-07', '1442-11-18', '2021-06-28', 7, 18, 4, 11, 'monday ', 2, 0, 'NONE') ,('2021-06-29', '1400-04-08', '1442-11-19', '2021-06-29', 8, 19, 4, 11, 'tuesday ', 3, 0, 'NONE') ,('2021-06-30', '1400-04-09', '1442-11-20', '2021-06-30', 9, 20, 4, 11, 'wednesday', 4, 0, 'NONE') ,('2021-07-01', '1400-04-10', '1442-11-21', '2021-07-01', 10, 21, 4, 11, 'thursday ', 5, 0, 'NONE') ,('2021-07-02', '1400-04-11', '1442-11-22', '2021-07-02', 11, 22, 4, 11, 'friday ', 6, 0, 'NONE') ,('2021-07-03', '1400-04-12', '1442-11-23', '2021-07-03', 12, 23, 4, 11, 'saturday ', 7, 0, 'NONE') ,('2021-07-04', '1400-04-13', '1442-11-24', '2021-07-04', 13, 24, 4, 11, 'sunday ', 1, 0, 'NONE') ,('2021-07-05', '1400-04-14', '1442-11-25', '2021-07-05', 14, 25, 4, 11, 'monday ', 2, 0, 'NONE') ,('2021-07-06', '1400-04-15', '1442-11-26', '2021-07-06', 15, 26, 4, 11, 'tuesday ', 3, 0, 'NONE') ,('2021-07-07', '1400-04-16', '1442-11-27', '2021-07-07', 16, 27, 4, 11, 'wednesday', 4, 0, 'NONE') ,('2021-07-08', '1400-04-17', '1442-11-28', '2021-07-08', 17, 28, 4, 11, 'thursday ', 5, 0, 'NONE') ,('2021-07-09', '1400-04-18', '1442-11-29', '2021-07-09', 18, 29, 4, 11, 'friday ', 6, 0, 'NONE') ,('2021-07-10', '1400-04-19', '1442-11-30', '2021-07-10', 19, 30, 4, 11, 'saturday ', 7, 0, 'NONE') ,('2021-07-11', '1400-04-20', '1442-12-01', '2021-07-11', 20, 1, 4, 12, 'sunday ', 1, 0, 'NONE') ,('2021-07-12', '1400-04-21', '1442-12-02', '2021-07-12', 21, 2, 4, 12, 'monday ', 2, 0, 'NONE') ,('2021-07-13', '1400-04-22', '1442-12-03', '2021-07-13', 22, 3, 4, 12, 'tuesday ', 3, 0, 'NONE') ,('2021-07-14', '1400-04-23', '1442-12-04', '2021-07-14', 23, 4, 4, 12, 'wednesday', 4, 0, 'NONE') ,('2021-07-15', '1400-04-24', '1442-12-05', '2021-07-15', 24, 5, 4, 12, 'thursday ', 5, 0, 'NONE') ,('2021-07-16', '1400-04-25', '1442-12-06', '2021-07-16', 25, 6, 4, 12, 'friday ', 6, 0, 'NONE') ,('2021-07-17', '1400-04-26', '1442-12-07', '2021-07-17', 26, 7, 4, 12, 'saturday ', 7, 0, 'NONE') ,('2021-07-18', '1400-04-27', '1442-12-08', '2021-07-18', 27, 8, 4, 12, 'sunday ', 1, 0, 'NONE') ,('2021-07-19', '1400-04-28', '1442-12-09', '2021-07-19', 28, 9, 4, 12, 'monday ', 2, 0, 'NONE') ,('2021-07-20', '1400-04-29', '1442-12-10', '2021-07-20', 29, 10, 4, 12, 'tuesday ', 3, 0, 'NONE') ,('2021-07-21', '1400-04-30', '1442-12-11', '2021-07-21', 30, 11, 4, 12, 'wednesday', 4, 0, 'NONE') ,('2021-07-22', '1400-04-31', '1442-12-12', '2021-07-22', 31, 12, 4, 12, 'thursday ', 5, 0, 'NONE') ,('2021-07-23', '1400-05-01', '1442-12-13', '2021-07-23', 1, 13, 5, 12, 'friday ', 6, 0, 'NONE') ,('2021-07-24', '1400-05-02', '1442-12-14', '2021-07-24', 2, 14, 5, 12, 'saturday ', 7, 0, 'NONE') ,('2021-07-25', '1400-05-03', '1442-12-15', '2021-07-25', 3, 15, 5, 12, 'sunday ', 1, 0, 'NONE') ,('2021-07-26', '1400-05-04', '1442-12-16', '2021-07-26', 4, 16, 5, 12, 'monday ', 2, 0, 'NONE') ,('2021-07-27', '1400-05-05', '1442-12-17', '2021-07-27', 5, 17, 5, 12, 'tuesday ', 3, 0, 'NONE') ,('2021-07-28', '1400-05-06', '1442-12-18', '2021-07-28', 6, 18, 5, 12, 'wednesday', 4, 0, 'NONE') ,('2021-07-29', '1400-05-07', '1442-12-19', '2021-07-29', 7, 19, 5, 12, 'thursday ', 5, 0, 'NONE') ,('2021-07-30', '1400-05-08', '1442-12-20', '2021-07-30', 8, 20, 5, 12, 'friday ', 6, 0, 'NONE') ,('2021-07-31', '1400-05-09', '1442-12-21', '2021-07-31', 9, 21, 5, 12, 'saturday ', 7, 0, 'NONE') ,('2021-08-01', '1400-05-10', '1442-12-22', '2021-08-01', 10, 22, 5, 12, 'sunday ', 1, 0, 'NONE') ,('2021-08-02', '1400-05-11', '1442-12-23', '2021-08-02', 11, 23, 5, 12, 'monday ', 2, 0, 'NONE') ,('2021-08-03', '1400-05-12', '1442-12-24', '2021-08-03', 12, 24, 5, 12, 'tuesday ', 3, 0, 'NONE') ,('2021-08-04', '1400-05-13', '1442-12-25', '2021-08-04', 13, 25, 5, 12, 'wednesday', 4, 0, 'NONE') ,('2021-08-05', '1400-05-14', '1442-12-26', '2021-08-05', 14, 26, 5, 12, 'thursday ', 5, 0, 'NONE') ,('2021-08-06', '1400-05-15', '1442-12-27', '2021-08-06', 15, 27, 5, 12, 'friday ', 6, 0, 'NONE') ,('2021-08-07', '1400-05-16', '1442-12-28', '2021-08-07', 16, 28, 5, 12, 'saturday ', 7, 0, 'NONE') ,('2021-08-08', '1400-05-17', '1442-12-29', '2021-08-08', 17, 29, 5, 12, 'sunday ', 1, 0, 'NONE') ,('2021-08-09', '1400-05-18', '1442-12-30', '2021-08-09', 18, 30, 5, 12, 'monday ', 2, 0, 'NONE') ,('2021-08-10', '1400-05-19', '1443-01-01', '2021-08-10', 19, 1, 5, 1, 'tuesday ', 3, 0, 'NONE') ,('2021-08-11', '1400-05-20', '1443-01-02', '2021-08-11', 20, 2, 5, 1, 'wednesday', 4, 0, 'NONE') ,('2021-08-12', '1400-05-21', '1443-01-03', '2021-08-12', 21, 3, 5, 1, 'thursday ', 5, 0, 'NONE') ,('2021-08-13', '1400-05-22', '1443-01-04', '2021-08-13', 22, 4, 5, 1, 'friday ', 6, 0, 'NONE') ,('2021-08-14', '1400-05-23', '1443-01-05', '2021-08-14', 23, 5, 5, 1, 'saturday ', 7, 0, 'NONE') ,('2021-08-15', '1400-05-24', '1443-01-06', '2021-08-15', 24, 6, 5, 1, 'sunday ', 1, 0, 'NONE') ,('2021-08-16', '1400-05-25', '1443-01-07', '2021-08-16', 25, 7, 5, 1, 'monday ', 2, 0, 'NONE') ,('2021-08-17', '1400-05-26', '1443-01-08', '2021-08-17', 26, 8, 5, 1, 'tuesday ', 3, 0, 'NONE') ,('2021-08-18', '1400-05-27', '1443-01-09', '2021-08-18', 27, 9, 5, 1, 'wednesday', 4, 0, 'NONE') ,('2021-08-19', '1400-05-28', '1443-01-10', '2021-08-19', 28, 10, 5, 1, 'thursday ', 5, 0, 'NONE') ,('2021-08-20', '1400-05-29', '1443-01-11', '2021-08-20', 29, 11, 5, 1, 'friday ', 6, 0, 'NONE') ,('2021-08-21', '1400-05-30', '1443-01-12', '2021-08-21', 30, 12, 5, 1, 'saturday ', 7, 0, 'NONE') ,('2021-08-22', '1400-05-31', '1443-01-13', '2021-08-22', 31, 13, 5, 1, 'sunday ', 1, 0, 'NONE') ,('2021-08-23', '1400-06-01', '1443-01-14', '2021-08-23', 1, 14, 6, 1, 'monday ', 2, 0, 'NONE') ,('2021-08-24', '1400-06-02', '1443-01-15', '2021-08-24', 2, 15, 6, 1, 'tuesday ', 3, 0, 'NONE') ,('2021-08-25', '1400-06-03', '1443-01-16', '2021-08-25', 3, 16, 6, 1, 'wednesday', 4, 0, 'NONE') ,('2021-08-26', '1400-06-04', '1443-01-17', '2021-08-26', 4, 17, 6, 1, 'thursday ', 5, 0, 'NONE') ,('2021-08-27', '1400-06-05', '1443-01-18', '2021-08-27', 5, 18, 6, 1, 'friday ', 6, 0, 'NONE') ,('2021-08-28', '1400-06-06', '1443-01-19', '2021-08-28', 6, 19, 6, 1, 'saturday ', 7, 0, 'NONE') ,('2021-08-29', '1400-06-07', '1443-01-20', '2021-08-29', 7, 20, 6, 1, 'sunday ', 1, 0, 'NONE') ,('2021-08-30', '1400-06-08', '1443-01-21', '2021-08-30', 8, 21, 6, 1, 'monday ', 2, 0, 'NONE') ,('2021-08-31', '1400-06-09', '1443-01-22', '2021-08-31', 9, 22, 6, 1, 'tuesday ', 3, 0, 'NONE') ,('2021-09-01', '1400-06-10', '1443-01-23', '2021-09-01', 10, 23, 6, 1, 'wednesday', 4, 0, 'NONE') ,('2021-09-02', '1400-06-11', '1443-01-24', '2021-09-02', 11, 24, 6, 1, 'thursday ', 5, 0, 'NONE') ,('2021-09-03', '1400-06-12', '1443-01-25', '2021-09-03', 12, 25, 6, 1, 'friday ', 6, 0, 'NONE') ,('2021-09-04', '1400-06-13', '1443-01-26', '2021-09-04', 13, 26, 6, 1, 'saturday ', 7, 0, 'NONE') ,('2021-09-05', '1400-06-14', '1443-01-27', '2021-09-05', 14, 27, 6, 1, 'sunday ', 1, 0, 'NONE') ,('2021-09-06', '1400-06-15', '1443-01-28', '2021-09-06', 15, 28, 6, 1, 'monday ', 2, 0, 'NONE') ,('2021-09-07', '1400-06-16', '1443-01-29', '2021-09-07', 16, 29, 6, 1, 'tuesday ', 3, 0, 'NONE') ,('2021-09-08', '1400-06-17', '1443-01-30', '2021-09-08', 17, 30, 6, 1, 'wednesday', 4, 0, 'NONE') ,('2021-09-09', '1400-06-18', '1443-02-01', '2021-09-09', 18, 1, 6, 2, 'thursday ', 5, 0, 'NONE') ,('2021-09-10', '1400-06-19', '1443-02-02', '2021-09-10', 19, 2, 6, 2, 'friday ', 6, 0, 'NONE') ,('2021-09-11', '1400-06-20', '1443-02-03', '2021-09-11', 20, 3, 6, 2, 'saturday ', 7, 0, 'NONE') ,('2021-09-12', '1400-06-21', '1443-02-04', '2021-09-12', 21, 4, 6, 2, 'sunday ', 1, 0, 'NONE') ,('2021-09-13', '1400-06-22', '1443-02-05', '2021-09-13', 22, 5, 6, 2, 'monday ', 2, 0, 'NONE') ,('2021-09-14', '1400-06-23', '1443-02-06', '2021-09-14', 23, 6, 6, 2, 'tuesday ', 3, 0, 'NONE') ,('2021-09-15', '1400-06-24', '1443-02-07', '2021-09-15', 24, 7, 6, 2, 'wednesday', 4, 0, 'NONE') ,('2021-09-16', '1400-06-25', '1443-02-08', '2021-09-16', 25, 8, 6, 2, 'thursday ', 5, 0, 'NONE') ,('2021-09-17', '1400-06-26', '1443-02-09', '2021-09-17', 26, 9, 6, 2, 'friday ', 6, 0, 'NONE') ,('2021-09-18', '1400-06-27', '1443-02-10', '2021-09-18', 27, 10, 6, 2, 'saturday ', 7, 0, 'NONE') ,('2021-09-19', '1400-06-28', '1443-02-11', '2021-09-19', 28, 11, 6, 2, 'sunday ', 1, 0, 'NONE') ,('2021-09-20', '1400-06-29', '1443-02-12', '2021-09-20', 29, 12, 6, 2, 'monday ', 2, 0, 'NONE') ,('2021-09-21', '1400-06-30', '1443-02-13', '2021-09-21', 30, 13, 6, 2, 'tuesday ', 3, 0, 'NONE') ,('2021-09-22', '1400-06-31', '1443-02-14', '2021-09-22', 31, 14, 6, 2, 'wednesday', 4, 0, 'NONE') ,('2021-09-23', '1400-07-01', '1443-02-15', '2021-09-23', 1, 15, 7, 2, 'thursday ', 5, 0, 'NONE') ,('2021-09-24', '1400-07-02', '1443-02-16', '2021-09-24', 2, 16, 7, 2, 'friday ', 6, 0, 'NONE') ,('2021-09-25', '1400-07-03', '1443-02-17', '2021-09-25', 3, 17, 7, 2, 'saturday ', 7, 0, 'NONE') ,('2021-09-26', '1400-07-04', '1443-02-18', '2021-09-26', 4, 18, 7, 2, 'sunday ', 1, 0, 'NONE') ,('2021-09-27', '1400-07-05', '1443-02-19', '2021-09-27', 5, 19, 7, 2, 'monday ', 2, 0, 'NONE') ,('2021-09-28', '1400-07-06', '1443-02-20', '2021-09-28', 6, 20, 7, 2, 'tuesday ', 3, 0, 'NONE') ,('2021-09-29', '1400-07-07', '1443-02-21', '2021-09-29', 7, 21, 7, 2, 'wednesday', 4, 0, 'NONE') ,('2021-09-30', '1400-07-08', '1443-02-22', '2021-09-30', 8, 22, 7, 2, 'thursday ', 5, 0, 'NONE') ,('2021-10-01', '1400-07-09', '1443-02-23', '2021-10-01', 9, 23, 7, 2, 'friday ', 6, 0, 'NONE') ,('2021-10-02', '1400-07-10', '1443-02-24', '2021-10-02', 10, 24, 7, 2, 'saturday ', 7, 0, 'NONE') ,('2021-10-03', '1400-07-11', '1443-02-25', '2021-10-03', 11, 25, 7, 2, 'sunday ', 1, 0, 'NONE') ,('2021-10-04', '1400-07-12', '1443-02-26', '2021-10-04', 12, 26, 7, 2, 'monday ', 2, 0, 'NONE') ,('2021-10-05', '1400-07-13', '1443-02-27', '2021-10-05', 13, 27, 7, 2, 'tuesday ', 3, 0, 'NONE') ,('2021-10-06', '1400-07-14', '1443-02-28', '2021-10-06', 14, 28, 7, 2, 'wednesday', 4, 0, 'NONE') ,('2021-10-07', '1400-07-15', '1443-02-29', '2021-10-07', 15, 29, 7, 2, 'thursday ', 5, 0, 'NONE') ,('2021-10-08', '1400-07-16', '1443-03-01', '2021-10-08', 16, 1, 7, 3, 'friday ', 6, 0, 'NONE') ,('2021-10-09', '1400-07-17', '1443-03-02', '2021-10-09', 17, 2, 7, 3, 'saturday ', 7, 0, 'NONE') ,('2021-10-10', '1400-07-18', '1443-03-03', '2021-10-10', 18, 3, 7, 3, 'sunday ', 1, 0, 'NONE') ,('2021-10-11', '1400-07-19', '1443-03-04', '2021-10-11', 19, 4, 7, 3, 'monday ', 2, 0, 'NONE') ,('2021-10-12', '1400-07-20', '1443-03-05', '2021-10-12', 20, 5, 7, 3, 'tuesday ', 3, 0, 'NONE') ,('2021-10-13', '1400-07-21', '1443-03-06', '2021-10-13', 21, 6, 7, 3, 'wednesday', 4, 0, 'NONE') ,('2021-10-14', '1400-07-22', '1443-03-07', '2021-10-14', 22, 7, 7, 3, 'thursday ', 5, 0, 'NONE') ,('2021-10-15', '1400-07-23', '1443-03-08', '2021-10-15', 23, 8, 7, 3, 'friday ', 6, 0, 'NONE') ,('2021-10-16', '1400-07-24', '1443-03-09', '2021-10-16', 24, 9, 7, 3, 'saturday ', 7, 0, 'NONE') ,('2021-10-17', '1400-07-25', '1443-03-10', '2021-10-17', 25, 10, 7, 3, 'sunday ', 1, 0, 'NONE') ,('2021-10-18', '1400-07-26', '1443-03-11', '2021-10-18', 26, 11, 7, 3, 'monday ', 2, 0, 'NONE') ,('2021-10-19', '1400-07-27', '1443-03-12', '2021-10-19', 27, 12, 7, 3, 'tuesday ', 3, 0, 'NONE') ,('2021-10-20', '1400-07-28', '1443-03-13', '2021-10-20', 28, 13, 7, 3, 'wednesday', 4, 0, 'NONE') ,('2021-10-21', '1400-07-29', '1443-03-14', '2021-10-21', 29, 14, 7, 3, 'thursday ', 5, 0, 'NONE') ,('2021-10-22', '1400-07-30', '1443-03-15', '2021-10-22', 30, 15, 7, 3, 'friday ', 6, 0, 'NONE') ,('2021-10-23', '1400-08-01', '1443-03-16', '2021-10-23', 1, 16, 8, 3, 'saturday ', 7, 0, 'NONE') ,('2021-10-24', '1400-08-02', '1443-03-17', '2021-10-24', 2, 17, 8, 3, 'sunday ', 1, 0, 'NONE') ,('2021-10-25', '1400-08-03', '1443-03-18', '2021-10-25', 3, 18, 8, 3, 'monday ', 2, 0, 'NONE') ,('2021-10-26', '1400-08-04', '1443-03-19', '2021-10-26', 4, 19, 8, 3, 'tuesday ', 3, 0, 'NONE') ,('2021-10-27', '1400-08-05', '1443-03-20', '2021-10-27', 5, 20, 8, 3, 'wednesday', 4, 0, 'NONE') ,('2021-10-28', '1400-08-06', '1443-03-21', '2021-10-28', 6, 21, 8, 3, 'thursday ', 5, 0, 'NONE') ,('2021-10-29', '1400-08-07', '1443-03-22', '2021-10-29', 7, 22, 8, 3, 'friday ', 6, 0, 'NONE') ,('2021-10-30', '1400-08-08', '1443-03-23', '2021-10-30', 8, 23, 8, 3, 'saturday ', 7, 0, 'NONE') ,('2021-10-31', '1400-08-09', '1443-03-24', '2021-10-31', 9, 24, 8, 3, 'sunday ', 1, 0, 'NONE') ,('2021-11-01', '1400-08-10', '1443-03-25', '2021-11-01', 10, 25, 8, 3, 'monday ', 2, 0, 'NONE') ,('2021-11-02', '1400-08-11', '1443-03-26', '2021-11-02', 11, 26, 8, 3, 'tuesday ', 3, 0, 'NONE') ,('2021-11-03', '1400-08-12', '1443-03-27', '2021-11-03', 12, 27, 8, 3, 'wednesday', 4, 0, 'NONE') ,('2021-11-04', '1400-08-13', '1443-03-28', '2021-11-04', 13, 28, 8, 3, 'thursday ', 5, 0, 'NONE') ,('2021-11-05', '1400-08-14', '1443-03-29', '2021-11-05', 14, 29, 8, 3, 'friday ', 6, 0, 'NONE') ,('2021-11-06', '1400-08-15', '1443-03-30', '2021-11-06', 15, 30, 8, 3, 'saturday ', 7, 0, 'NONE') ,('2021-11-07', '1400-08-16', '1443-04-01', '2021-11-07', 16, 1, 8, 4, 'sunday ', 1, 0, 'NONE') ,('2021-11-08', '1400-08-17', '1443-04-02', '2021-11-08', 17, 2, 8, 4, 'monday ', 2, 0, 'NONE') ,('2021-11-09', '1400-08-18', '1443-04-03', '2021-11-09', 18, 3, 8, 4, 'tuesday ', 3, 0, 'NONE') ,('2021-11-10', '1400-08-19', '1443-04-04', '2021-11-10', 19, 4, 8, 4, 'wednesday', 4, 0, 'NONE') ,('2021-11-11', '1400-08-20', '1443-04-05', '2021-11-11', 20, 5, 8, 4, 'thursday ', 5, 0, 'NONE') ,('2021-11-12', '1400-08-21', '1443-04-06', '2021-11-12', 21, 6, 8, 4, 'friday ', 6, 0, 'NONE') ,('2021-11-13', '1400-08-22', '1443-04-07', '2021-11-13', 22, 7, 8, 4, 'saturday ', 7, 0, 'NONE') ,('2021-11-14', '1400-08-23', '1443-04-08', '2021-11-14', 23, 8, 8, 4, 'sunday ', 1, 0, 'NONE') ,('2021-11-15', '1400-08-24', '1443-04-09', '2021-11-15', 24, 9, 8, 4, 'monday ', 2, 0, 'NONE') ,('2021-11-16', '1400-08-25', '1443-04-10', '2021-11-16', 25, 10, 8, 4, 'tuesday ', 3, 0, 'NONE') ,('2021-11-17', '1400-08-26', '1443-04-11', '2021-11-17', 26, 11, 8, 4, 'wednesday', 4, 0, 'NONE') ,('2021-11-18', '1400-08-27', '1443-04-12', '2021-11-18', 27, 12, 8, 4, 'thursday ', 5, 0, 'NONE') ,('2021-11-19', '1400-08-28', '1443-04-13', '2021-11-19', 28, 13, 8, 4, 'friday ', 6, 0, 'NONE') ,('2021-11-20', '1400-08-29', '1443-04-14', '2021-11-20', 29, 14, 8, 4, 'saturday ', 7, 0, 'NONE') ,('2021-11-21', '1400-08-30', '1443-04-15', '2021-11-21', 30, 15, 8, 4, 'sunday ', 1, 0, 'NONE') ,('2021-11-22', '1400-09-01', '1443-04-16', '2021-11-22', 1, 16, 9, 4, 'monday ', 2, 0, 'NONE') ,('2021-11-23', '1400-09-02', '1443-04-17', '2021-11-23', 2, 17, 9, 4, 'tuesday ', 3, 0, 'NONE') ,('2021-11-24', '1400-09-03', '1443-04-18', '2021-11-24', 3, 18, 9, 4, 'wednesday', 4, 0, 'NONE') ,('2021-11-25', '1400-09-04', '1443-04-19', '2021-11-25', 4, 19, 9, 4, 'thursday ', 5, 0, 'NONE') ,('2021-11-26', '1400-09-05', '1443-04-20', '2021-11-26', 5, 20, 9, 4, 'friday ', 6, 0, 'NONE') ,('2021-11-27', '1400-09-06', '1443-04-21', '2021-11-27', 6, 21, 9, 4, 'saturday ', 7, 0, 'NONE') ,('2021-11-28', '1400-09-07', '1443-04-22', '2021-11-28', 7, 22, 9, 4, 'sunday ', 1, 0, 'NONE') ,('2021-11-29', '1400-09-08', '1443-04-23', '2021-11-29', 8, 23, 9, 4, 'monday ', 2, 0, 'NONE') ,('2021-11-30', '1400-09-09', '1443-04-24', '2021-11-30', 9, 24, 9, 4, 'tuesday ', 3, 0, 'NONE') ,('2021-12-01', '1400-09-10', '1443-04-25', '2021-12-01', 10, 25, 9, 4, 'wednesday', 4, 0, 'NONE') ,('2021-12-02', '1400-09-11', '1443-04-26', '2021-12-02', 11, 26, 9, 4, 'thursday ', 5, 0, 'NONE') ,('2021-12-03', '1400-09-12', '1443-04-27', '2021-12-03', 12, 27, 9, 4, 'friday ', 6, 0, 'NONE') ,('2021-12-04', '1400-09-13', '1443-04-28', '2021-12-04', 13, 28, 9, 4, 'saturday ', 7, 0, 'NONE') ,('2021-12-05', '1400-09-14', '1443-04-29', '2021-12-05', 14, 29, 9, 4, 'sunday ', 1, 0, 'NONE') ,('2021-12-06', '1400-09-15', '1443-05-01', '2021-12-06', 15, 1, 9, 5, 'monday ', 2, 0, 'NONE') ,('2021-12-07', '1400-09-16', '1443-05-02', '2021-12-07', 16, 2, 9, 5, 'tuesday ', 3, 0, 'NONE') ,('2021-12-08', '1400-09-17', '1443-05-03', '2021-12-08', 17, 3, 9, 5, 'wednesday', 4, 0, 'NONE') ,('2021-12-09', '1400-09-18', '1443-05-04', '2021-12-09', 18, 4, 9, 5, 'thursday ', 5, 0, 'NONE') ,('2021-12-10', '1400-09-19', '1443-05-05', '2021-12-10', 19, 5, 9, 5, 'friday ', 6, 0, 'NONE') ,('2021-12-11', '1400-09-20', '1443-05-06', '2021-12-11', 20, 6, 9, 5, 'saturday ', 7, 0, 'NONE') ,('2021-12-12', '1400-09-21', '1443-05-07', '2021-12-12', 21, 7, 9, 5, 'sunday ', 1, 0, 'NONE') ,('2021-12-13', '1400-09-22', '1443-05-08', '2021-12-13', 22, 8, 9, 5, 'monday ', 2, 0, 'NONE') ,('2021-12-14', '1400-09-23', '1443-05-09', '2021-12-14', 23, 9, 9, 5, 'tuesday ', 3, 0, 'NONE') ,('2021-12-15', '1400-09-24', '1443-05-10', '2021-12-15', 24, 10, 9, 5, 'wednesday', 4, 0, 'NONE') ,('2021-12-16', '1400-09-25', '1443-05-11', '2021-12-16', 25, 11, 9, 5, 'thursday ', 5, 0, 'NONE') ,('2021-12-17', '1400-09-26', '1443-05-12', '2021-12-17', 26, 12, 9, 5, 'friday ', 6, 0, 'NONE') ,('2021-12-18', '1400-09-27', '1443-05-13', '2021-12-18', 27, 13, 9, 5, 'saturday ', 7, 0, 'NONE') ,('2021-12-19', '1400-09-28', '1443-05-14', '2021-12-19', 28, 14, 9, 5, 'sunday ', 1, 0, 'NONE') ,('2021-12-20', '1400-09-29', '1443-05-15', '2021-12-20', 29, 15, 9, 5, 'monday ', 2, 0, 'NONE') ,('2021-12-21', '1400-09-30', '1443-05-16', '2021-12-21', 30, 16, 9, 5, 'tuesday ', 3, 0, 'NONE') ,('2021-12-22', '1400-10-01', '1443-05-17', '2021-12-22', 1, 17, 10, 5, 'wednesday', 4, 0, 'NONE') ,('2021-12-23', '1400-10-02', '1443-05-18', '2021-12-23', 2, 18, 10, 5, 'thursday ', 5, 0, 'NONE') ,('2021-12-24', '1400-10-03', '1443-05-19', '2021-12-24', 3, 19, 10, 5, 'friday ', 6, 0, 'NONE') ,('2021-12-25', '1400-10-04', '1443-05-20', '2021-12-25', 4, 20, 10, 5, 'saturday ', 7, 0, 'NONE') ,('2021-12-26', '1400-10-05', '1443-05-21', '2021-12-26', 5, 21, 10, 5, 'sunday ', 1, 0, 'NONE') ,('2021-12-27', '1400-10-06', '1443-05-22', '2021-12-27', 6, 22, 10, 5, 'monday ', 2, 0, 'NONE') ,('2021-12-28', '1400-10-07', '1443-05-23', '2021-12-28', 7, 23, 10, 5, 'tuesday ', 3, 0, 'NONE') ,('2021-12-29', '1400-10-08', '1443-05-24', '2021-12-29', 8, 24, 10, 5, 'wednesday', 4, 0, 'NONE') ,('2021-12-30', '1400-10-09', '1443-05-25', '2021-12-30', 9, 25, 10, 5, 'thursday ', 5, 0, 'NONE') ,('2021-12-31', '1400-10-10', '1443-05-26', '2021-12-31', 10, 26, 10, 5, 'friday ', 6, 0, 'NONE') ,('2022-01-01', '1400-10-11', '1443-05-27', '2022-01-01', 11, 27, 10, 5, 'saturday ', 7, 0, 'NONE') ,('2022-01-02', '1400-10-12', '1443-05-28', '2022-01-02', 12, 28, 10, 5, 'sunday ', 1, 0, 'NONE') ,('2022-01-03', '1400-10-13', '1443-05-29', '2022-01-03', 13, 29, 10, 5, 'monday ', 2, 0, 'NONE') ,('2022-01-04', '1400-10-14', '1443-05-30', '2022-01-04', 14, 30, 10, 5, 'tuesday ', 3, 0, 'NONE') ,('2022-01-05', '1400-10-15', '1443-06-01', '2022-01-05', 15, 1, 10, 6, 'wednesday', 4, 0, 'NONE') ,('2022-01-06', '1400-10-16', '1443-06-02', '2022-01-06', 16, 2, 10, 6, 'thursday ', 5, 0, 'NONE') ,('2022-01-07', '1400-10-17', '1443-06-03', '2022-01-07', 17, 3, 10, 6, 'friday ', 6, 0, 'NONE') ,('2022-01-08', '1400-10-18', '1443-06-04', '2022-01-08', 18, 4, 10, 6, 'saturday ', 7, 0, 'NONE') ,('2022-01-09', '1400-10-19', '1443-06-05', '2022-01-09', 19, 5, 10, 6, 'sunday ', 1, 0, 'NONE') ,('2022-01-10', '1400-10-20', '1443-06-06', '2022-01-10', 20, 6, 10, 6, 'monday ', 2, 0, 'NONE') ,('2022-01-11', '1400-10-21', '1443-06-07', '2022-01-11', 21, 7, 10, 6, 'tuesday ', 3, 0, 'NONE') ,('2022-01-12', '1400-10-22', '1443-06-08', '2022-01-12', 22, 8, 10, 6, 'wednesday', 4, 0, 'NONE') ,('2022-01-13', '1400-10-23', '1443-06-09', '2022-01-13', 23, 9, 10, 6, 'thursday ', 5, 0, 'NONE') ,('2022-01-14', '1400-10-24', '1443-06-10', '2022-01-14', 24, 10, 10, 6, 'friday ', 6, 0, 'NONE') ,('2022-01-15', '1400-10-25', '1443-06-11', '2022-01-15', 25, 11, 10, 6, 'saturday ', 7, 0, 'NONE') ,('2022-01-16', '1400-10-26', '1443-06-12', '2022-01-16', 26, 12, 10, 6, 'sunday ', 1, 0, 'NONE') ,('2022-01-17', '1400-10-27', '1443-06-13', '2022-01-17', 27, 13, 10, 6, 'monday ', 2, 0, 'NONE') ,('2022-01-18', '1400-10-28', '1443-06-14', '2022-01-18', 28, 14, 10, 6, 'tuesday ', 3, 0, 'NONE') ,('2022-01-19', '1400-10-29', '1443-06-15', '2022-01-19', 29, 15, 10, 6, 'wednesday', 4, 0, 'NONE') ,('2022-01-20', '1400-10-30', '1443-06-16', '2022-01-20', 30, 16, 10, 6, 'thursday ', 5, 0, 'NONE') ,('2022-01-21', '1400-11-01', '1443-06-17', '2022-01-21', 1, 17, 11, 6, 'friday ', 6, 0, 'NONE') ,('2022-01-22', '1400-11-02', '1443-06-18', '2022-01-22', 2, 18, 11, 6, 'saturday ', 7, 0, 'NONE') ,('2022-01-23', '1400-11-03', '1443-06-19', '2022-01-23', 3, 19, 11, 6, 'sunday ', 1, 0, 'NONE') ,('2022-01-24', '1400-11-04', '1443-06-20', '2022-01-24', 4, 20, 11, 6, 'monday ', 2, 0, 'NONE') ,('2022-01-25', '1400-11-05', '1443-06-21', '2022-01-25', 5, 21, 11, 6, 'tuesday ', 3, 0, 'NONE') ,('2022-01-26', '1400-11-06', '1443-06-22', '2022-01-26', 6, 22, 11, 6, 'wednesday', 4, 0, 'NONE') ,('2022-01-27', '1400-11-07', '1443-06-23', '2022-01-27', 7, 23, 11, 6, 'thursday ', 5, 0, 'NONE') ,('2022-01-28', '1400-11-08', '1443-06-24', '2022-01-28', 8, 24, 11, 6, 'friday ', 6, 0, 'NONE') ,('2022-01-29', '1400-11-09', '1443-06-25', '2022-01-29', 9, 25, 11, 6, 'saturday ', 7, 0, 'NONE') ,('2022-01-30', '1400-11-10', '1443-06-26', '2022-01-30', 10, 26, 11, 6, 'sunday ', 1, 0, 'NONE') ,('2022-01-31', '1400-11-11', '1443-06-27', '2022-01-31', 11, 27, 11, 6, 'monday ', 2, 0, 'NONE') ,('2022-02-01', '1400-11-12', '1443-06-28', '2022-02-01', 12, 28, 11, 6, 'tuesday ', 3, 0, 'NONE') ,('2022-02-02', '1400-11-13', '1443-06-29', '2022-02-02', 13, 29, 11, 6, 'wednesday', 4, 0, 'NONE') ,('2022-02-03', '1400-11-14', '1443-07-01', '2022-02-03', 14, 1, 11, 7, 'thursday ', 5, 0, 'NONE') ,('2022-02-04', '1400-11-15', '1443-07-02', '2022-02-04', 15, 2, 11, 7, 'friday ', 6, 0, 'NONE') ,('2022-02-05', '1400-11-16', '1443-07-03', '2022-02-05', 16, 3, 11, 7, 'saturday ', 7, 0, 'NONE') ,('2022-02-06', '1400-11-17', '1443-07-04', '2022-02-06', 17, 4, 11, 7, 'sunday ', 1, 0, 'NONE') ,('2022-02-07', '1400-11-18', '1443-07-05', '2022-02-07', 18, 5, 11, 7, 'monday ', 2, 0, 'NONE') ,('2022-02-08', '1400-11-19', '1443-07-06', '2022-02-08', 19, 6, 11, 7, 'tuesday ', 3, 0, 'NONE') ,('2022-02-09', '1400-11-20', '1443-07-07', '2022-02-09', 20, 7, 11, 7, 'wednesday', 4, 0, 'NONE') ,('2022-02-10', '1400-11-21', '1443-07-08', '2022-02-10', 21, 8, 11, 7, 'thursday ', 5, 0, 'NONE') ,('2022-02-11', '1400-11-22', '1443-07-09', '2022-02-11', 22, 9, 11, 7, 'friday ', 6, 0, 'NONE') ,('2022-02-12', '1400-11-23', '1443-07-10', '2022-02-12', 23, 10, 11, 7, 'saturday ', 7, 0, 'NONE') ,('2022-02-13', '1400-11-24', '1443-07-11', '2022-02-13', 24, 11, 11, 7, 'sunday ', 1, 0, 'NONE') ,('2022-02-14', '1400-11-25', '1443-07-12', '2022-02-14', 25, 12, 11, 7, 'monday ', 2, 0, 'NONE') ,('2022-02-15', '1400-11-26', '1443-07-13', '2022-02-15', 26, 13, 11, 7, 'tuesday ', 3, 0, 'NONE') ,('2022-02-16', '1400-11-27', '1443-07-14', '2022-02-16', 27, 14, 11, 7, 'wednesday', 4, 0, 'NONE') ,('2022-02-17', '1400-11-28', '1443-07-15', '2022-02-17', 28, 15, 11, 7, 'thursday ', 5, 0, 'NONE') ,('2022-02-18', '1400-11-29', '1443-07-16', '2022-02-18', 29, 16, 11, 7, 'friday ', 6, 0, 'NONE') ,('2022-02-19', '1400-11-30', '1443-07-17', '2022-02-19', 30, 17, 11, 7, 'saturday ', 7, 0, 'NONE') ,('2022-02-20', '1400-12-01', '1443-07-18', '2022-02-20', 1, 18, 12, 7, 'sunday ', 1, 0, 'NONE') ,('2022-02-21', '1400-12-02', '1443-07-19', '2022-02-21', 2, 19, 12, 7, 'monday ', 2, 0, 'NONE') ,('2022-02-22', '1400-12-03', '1443-07-20', '2022-02-22', 3, 20, 12, 7, 'tuesday ', 3, 0, 'NONE') ,('2022-02-23', '1400-12-04', '1443-07-21', '2022-02-23', 4, 21, 12, 7, 'wednesday', 4, 0, 'NONE') ,('2022-02-24', '1400-12-05', '1443-07-22', '2022-02-24', 5, 22, 12, 7, 'thursday ', 5, 0, 'NONE') ,('2022-02-25', '1400-12-06', '1443-07-23', '2022-02-25', 6, 23, 12, 7, 'friday ', 6, 0, 'NONE') ,('2022-02-26', '1400-12-07', '1443-07-24', '2022-02-26', 7, 24, 12, 7, 'saturday ', 7, 0, 'NONE') ,('2022-02-27', '1400-12-08', '1443-07-25', '2022-02-27', 8, 25, 12, 7, 'sunday ', 1, 0, 'NONE') ,('2022-02-28', '1400-12-09', '1443-07-26', '2022-02-28', 9, 26, 12, 7, 'monday ', 2, 0, 'NONE') ,('2022-03-01', '1400-12-10', '1443-07-27', '2022-03-01', 10, 27, 12, 7, 'tuesday ', 3, 0, 'NONE') ,('2022-03-02', '1400-12-11', '1443-07-28', '2022-03-02', 11, 28, 12, 7, 'wednesday', 4, 0, 'NONE') ,('2022-03-03', '1400-12-12', '1443-07-29', '2022-03-03', 12, 29, 12, 7, 'thursday ', 5, 0, 'NONE') ,('2022-03-04', '1400-12-13', '1443-07-30', '2022-03-04', 13, 30, 12, 7, 'friday ', 6, 0, 'NONE') ,('2022-03-05', '1400-12-14', '1443-08-01', '2022-03-05', 14, 1, 12, 8, 'saturday ', 7, 0, 'NONE') ,('2022-03-06', '1400-12-15', '1443-08-02', '2022-03-06', 15, 2, 12, 8, 'sunday ', 1, 0, 'NONE') ,('2022-03-07', '1400-12-16', '1443-08-03', '2022-03-07', 16, 3, 12, 8, 'monday ', 2, 0, 'NONE') ,('2022-03-08', '1400-12-17', '1443-08-04', '2022-03-08', 17, 4, 12, 8, 'tuesday ', 3, 0, 'NONE') ,('2022-03-09', '1400-12-18', '1443-08-05', '2022-03-09', 18, 5, 12, 8, 'wednesday', 4, 0, 'NONE') ,('2022-03-10', '1400-12-19', '1443-08-06', '2022-03-10', 19, 6, 12, 8, 'thursday ', 5, 0, 'NONE') ,('2022-03-11', '1400-12-20', '1443-08-07', '2022-03-11', 20, 7, 12, 8, 'friday ', 6, 0, 'NONE') ,('2022-03-12', '1400-12-21', '1443-08-08', '2022-03-12', 21, 8, 12, 8, 'saturday ', 7, 0, 'NONE') ,('2022-03-13', '1400-12-22', '1443-08-09', '2022-03-13', 22, 9, 12, 8, 'sunday ', 1, 0, 'NONE') ,('2022-03-14', '1400-12-23', '1443-08-10', '2022-03-14', 23, 10, 12, 8, 'monday ', 2, 0, 'NONE') ,('2022-03-15', '1400-12-24', '1443-08-11', '2022-03-15', 24, 11, 12, 8, 'tuesday ', 3, 0, 'NONE') ,('2022-03-16', '1400-12-25', '1443-08-12', '2022-03-16', 25, 12, 12, 8, 'wednesday', 4, 0, 'NONE') ,('2022-03-17', '1400-12-26', '1443-08-13', '2022-03-17', 26, 13, 12, 8, 'thursday ', 5, 0, 'NONE') ,('2022-03-18', '1400-12-27', '1443-08-14', '2022-03-18', 27, 14, 12, 8, 'friday ', 6, 0, 'NONE') ,('2022-03-19', '1400-12-28', '1443-08-15', '2022-03-19', 28, 15, 12, 8, 'saturday ', 7, 0, 'NONE') ,('2022-03-20', '1400-12-29', '1443-08-16', '2022-03-20', 29, 16, 12, 8, 'sunday ', 1, 0, 'NONE') ,('2022-03-21', '1401-01-01', '1443-08-17', '2022-03-21', 1, 17, 1, 8, 'monday ', 2, 0, 'NONE') ,('2022-03-22', '1401-01-02', '1443-08-18', '2022-03-22', 2, 18, 1, 8, 'tuesday ', 3, 0, 'NONE') ,('2022-03-23', '1401-01-03', '1443-08-19', '2022-03-23', 3, 19, 1, 8, 'wednesday', 4, 0, 'NONE') ,('2022-03-24', '1401-01-04', '1443-08-20', '2022-03-24', 4, 20, 1, 8, 'thursday ', 5, 0, 'NONE') ,('2022-03-25', '1401-01-05', '1443-08-21', '2022-03-25', 5, 21, 1, 8, 'friday ', 6, 0, 'NONE') ,('2022-03-26', '1401-01-06', '1443-08-22', '2022-03-26', 6, 22, 1, 8, 'saturday ', 7, 0, 'NONE') ,('2022-03-27', '1401-01-07', '1443-08-23', '2022-03-27', 7, 23, 1, 8, 'sunday ', 1, 0, 'NONE') ,('2022-03-28', '1401-01-08', '1443-08-24', '2022-03-28', 8, 24, 1, 8, 'monday ', 2, 0, 'NONE') ,('2022-03-29', '1401-01-09', '1443-08-25', '2022-03-29', 9, 25, 1, 8, 'tuesday ', 3, 0, 'NONE') ,('2022-03-30', '1401-01-10', '1443-08-26', '2022-03-30', 10, 26, 1, 8, 'wednesday', 4, 0, 'NONE') ,('2022-03-31', '1401-01-11', '1443-08-27', '2022-03-31', 11, 27, 1, 8, 'thursday ', 5, 0, 'NONE') ,('2022-04-01', '1401-01-12', '1443-08-28', '2022-04-01', 12, 28, 1, 8, 'friday ', 6, 0, 'NONE') ,('2022-04-02', '1401-01-13', '1443-08-29', '2022-04-02', 13, 29, 1, 8, 'saturday ', 7, 0, 'NONE') ,('2022-04-03', '1401-01-14', '1443-09-01', '2022-04-03', 14, 1, 1, 9, 'sunday ', 1, 0, 'NONE') ,('2022-04-04', '1401-01-15', '1443-09-02', '2022-04-04', 15, 2, 1, 9, 'monday ', 2, 0, 'NONE') ,('2022-04-05', '1401-01-16', '1443-09-03', '2022-04-05', 16, 3, 1, 9, 'tuesday ', 3, 0, 'NONE') ,('2022-04-06', '1401-01-17', '1443-09-04', '2022-04-06', 17, 4, 1, 9, 'wednesday', 4, 0, 'NONE') ,('2022-04-07', '1401-01-18', '1443-09-05', '2022-04-07', 18, 5, 1, 9, 'thursday ', 5, 0, 'NONE') ,('2022-04-08', '1401-01-19', '1443-09-06', '2022-04-08', 19, 6, 1, 9, 'friday ', 6, 0, 'NONE') ,('2022-04-09', '1401-01-20', '1443-09-07', '2022-04-09', 20, 7, 1, 9, 'saturday ', 7, 0, 'NONE') ,('2022-04-10', '1401-01-21', '1443-09-08', '2022-04-10', 21, 8, 1, 9, 'sunday ', 1, 0, 'NONE') ,('2022-04-11', '1401-01-22', '1443-09-09', '2022-04-11', 22, 9, 1, 9, 'monday ', 2, 0, 'NONE') ,('2022-04-12', '1401-01-23', '1443-09-10', '2022-04-12', 23, 10, 1, 9, 'tuesday ', 3, 0, 'NONE') ,('2022-04-13', '1401-01-24', '1443-09-11', '2022-04-13', 24, 11, 1, 9, 'wednesday', 4, 0, 'NONE') ,('2022-04-14', '1401-01-25', '1443-09-12', '2022-04-14', 25, 12, 1, 9, 'thursday ', 5, 0, 'NONE') ,('2022-04-15', '1401-01-26', '1443-09-13', '2022-04-15', 26, 13, 1, 9, 'friday ', 6, 0, 'NONE') ,('2022-04-16', '1401-01-27', '1443-09-14', '2022-04-16', 27, 14, 1, 9, 'saturday ', 7, 0, 'NONE') ,('2022-04-17', '1401-01-28', '1443-09-15', '2022-04-17', 28, 15, 1, 9, 'sunday ', 1, 0, 'NONE') ,('2022-04-18', '1401-01-29', '1443-09-16', '2022-04-18', 29, 16, 1, 9, 'monday ', 2, 0, 'NONE') ,('2022-04-19', '1401-01-30', '1443-09-17', '2022-04-19', 30, 17, 1, 9, 'tuesday ', 3, 0, 'NONE') ,('2022-04-20', '1401-01-31', '1443-09-18', '2022-04-20', 31, 18, 1, 9, 'wednesday', 4, 0, 'NONE') ,('2022-04-21', '1401-02-01', '1443-09-19', '2022-04-21', 1, 19, 2, 9, 'thursday ', 5, 0, 'NONE') ,('2022-04-22', '1401-02-02', '1443-09-20', '2022-04-22', 2, 20, 2, 9, 'friday ', 6, 0, 'NONE') ,('2022-04-23', '1401-02-03', '1443-09-21', '2022-04-23', 3, 21, 2, 9, 'saturday ', 7, 0, 'NONE') ,('2022-04-24', '1401-02-04', '1443-09-22', '2022-04-24', 4, 22, 2, 9, 'sunday ', 1, 0, 'NONE') ,('2022-04-25', '1401-02-05', '1443-09-23', '2022-04-25', 5, 23, 2, 9, 'monday ', 2, 0, 'NONE') ,('2022-04-26', '1401-02-06', '1443-09-24', '2022-04-26', 6, 24, 2, 9, 'tuesday ', 3, 0, 'NONE') ,('2022-04-27', '1401-02-07', '1443-09-25', '2022-04-27', 7, 25, 2, 9, 'wednesday', 4, 0, 'NONE') ,('2022-04-28', '1401-02-08', '1443-09-26', '2022-04-28', 8, 26, 2, 9, 'thursday ', 5, 0, 'NONE') ,('2022-04-29', '1401-02-09', '1443-09-27', '2022-04-29', 9, 27, 2, 9, 'friday ', 6, 0, 'NONE') ,('2022-04-30', '1401-02-10', '1443-09-28', '2022-04-30', 10, 28, 2, 9, 'saturday ', 7, 0, 'NONE') ,('2022-05-01', '1401-02-11', '1443-09-29', '2022-05-01', 11, 29, 2, 9, 'sunday ', 1, 0, 'NONE') ,('2022-05-02', '1401-02-12', '1443-09-30', '2022-05-02', 12, 30, 2, 9, 'monday ', 2, 0, 'NONE') ,('2022-05-03', '1401-02-13', '1443-10-01', '2022-05-03', 13, 1, 2, 10, 'tuesday ', 3, 0, 'NONE') ,('2022-05-04', '1401-02-14', '1443-10-02', '2022-05-04', 14, 2, 2, 10, 'wednesday', 4, 0, 'NONE') ,('2022-05-05', '1401-02-15', '1443-10-03', '2022-05-05', 15, 3, 2, 10, 'thursday ', 5, 0, 'NONE') ,('2022-05-06', '1401-02-16', '1443-10-04', '2022-05-06', 16, 4, 2, 10, 'friday ', 6, 0, 'NONE') ,('2022-05-07', '1401-02-17', '1443-10-05', '2022-05-07', 17, 5, 2, 10, 'saturday ', 7, 0, 'NONE') ,('2022-05-08', '1401-02-18', '1443-10-06', '2022-05-08', 18, 6, 2, 10, 'sunday ', 1, 0, 'NONE') ,('2022-05-09', '1401-02-19', '1443-10-07', '2022-05-09', 19, 7, 2, 10, 'monday ', 2, 0, 'NONE') ,('2022-05-10', '1401-02-20', '1443-10-08', '2022-05-10', 20, 8, 2, 10, 'tuesday ', 3, 0, 'NONE') ,('2022-05-11', '1401-02-21', '1443-10-09', '2022-05-11', 21, 9, 2, 10, 'wednesday', 4, 0, 'NONE') ,('2022-05-12', '1401-02-22', '1443-10-10', '2022-05-12', 22, 10, 2, 10, 'thursday ', 5, 0, 'NONE') ,('2022-05-13', '1401-02-23', '1443-10-11', '2022-05-13', 23, 11, 2, 10, 'friday ', 6, 0, 'NONE') ,('2022-05-14', '1401-02-24', '1443-10-12', '2022-05-14', 24, 12, 2, 10, 'saturday ', 7, 0, 'NONE') ,('2022-05-15', '1401-02-25', '1443-10-13', '2022-05-15', 25, 13, 2, 10, 'sunday ', 1, 0, 'NONE') ,('2022-05-16', '1401-02-26', '1443-10-14', '2022-05-16', 26, 14, 2, 10, 'monday ', 2, 0, 'NONE') ,('2022-05-17', '1401-02-27', '1443-10-15', '2022-05-17', 27, 15, 2, 10, 'tuesday ', 3, 0, 'NONE') ,('2022-05-18', '1401-02-28', '1443-10-16', '2022-05-18', 28, 16, 2, 10, 'wednesday', 4, 0, 'NONE') ,('2022-05-19', '1401-02-29', '1443-10-17', '2022-05-19', 29, 17, 2, 10, 'thursday ', 5, 0, 'NONE') ,('2022-05-20', '1401-02-30', '1443-10-18', '2022-05-20', 30, 18, 2, 10, 'friday ', 6, 0, 'NONE') ,('2022-05-21', '1401-02-31', '1443-10-19', '2022-05-21', 31, 19, 2, 10, 'saturday ', 7, 0, 'NONE') ,('2022-05-22', '1401-03-01', '1443-10-20', '2022-05-22', 1, 20, 3, 10, 'sunday ', 1, 0, 'NONE') ,('2022-05-23', '1401-03-02', '1443-10-21', '2022-05-23', 2, 21, 3, 10, 'monday ', 2, 0, 'NONE') ,('2022-05-24', '1401-03-03', '1443-10-22', '2022-05-24', 3, 22, 3, 10, 'tuesday ', 3, 0, 'NONE') ,('2022-05-25', '1401-03-04', '1443-10-23', '2022-05-25', 4, 23, 3, 10, 'wednesday', 4, 0, 'NONE') ,('2022-05-26', '1401-03-05', '1443-10-24', '2022-05-26', 5, 24, 3, 10, 'thursday ', 5, 0, 'NONE') ,('2022-05-27', '1401-03-06', '1443-10-25', '2022-05-27', 6, 25, 3, 10, 'friday ', 6, 0, 'NONE') ,('2022-05-28', '1401-03-07', '1443-10-26', '2022-05-28', 7, 26, 3, 10, 'saturday ', 7, 0, 'NONE') ,('2022-05-29', '1401-03-08', '1443-10-27', '2022-05-29', 8, 27, 3, 10, 'sunday ', 1, 0, 'NONE') ,('2022-05-30', '1401-03-09', '1443-10-28', '2022-05-30', 9, 28, 3, 10, 'monday ', 2, 0, 'NONE') ,('2022-05-31', '1401-03-10', '1443-10-29', '2022-05-31', 10, 29, 3, 10, 'tuesday ', 3, 0, 'NONE') ,('2022-06-01', '1401-03-11', '1443-11-01', '2022-06-01', 11, 1, 3, 11, 'wednesday', 4, 0, 'NONE') ,('2022-06-02', '1401-03-12', '1443-11-02', '2022-06-02', 12, 2, 3, 11, 'thursday ', 5, 0, 'NONE') ,('2022-06-03', '1401-03-13', '1443-11-03', '2022-06-03', 13, 3, 3, 11, 'friday ', 6, 0, 'NONE') ,('2022-06-04', '1401-03-14', '1443-11-04', '2022-06-04', 14, 4, 3, 11, 'saturday ', 7, 0, 'NONE') ,('2022-06-05', '1401-03-15', '1443-11-05', '2022-06-05', 15, 5, 3, 11, 'sunday ', 1, 0, 'NONE') ,('2022-06-06', '1401-03-16', '1443-11-06', '2022-06-06', 16, 6, 3, 11, 'monday ', 2, 0, 'NONE') ,('2022-06-07', '1401-03-17', '1443-11-07', '2022-06-07', 17, 7, 3, 11, 'tuesday ', 3, 0, 'NONE') ,('2022-06-08', '1401-03-18', '1443-11-08', '2022-06-08', 18, 8, 3, 11, 'wednesday', 4, 0, 'NONE') ,('2022-06-09', '1401-03-19', '1443-11-09', '2022-06-09', 19, 9, 3, 11, 'thursday ', 5, 0, 'NONE') ,('2022-06-10', '1401-03-20', '1443-11-10', '2022-06-10', 20, 10, 3, 11, 'friday ', 6, 0, 'NONE') ,('2022-06-11', '1401-03-21', '1443-11-11', '2022-06-11', 21, 11, 3, 11, 'saturday ', 7, 0, 'NONE') ,('2022-06-12', '1401-03-22', '1443-11-12', '2022-06-12', 22, 12, 3, 11, 'sunday ', 1, 0, 'NONE') ,('2022-06-13', '1401-03-23', '1443-11-13', '2022-06-13', 23, 13, 3, 11, 'monday ', 2, 0, 'NONE') ,('2022-06-14', '1401-03-24', '1443-11-14', '2022-06-14', 24, 14, 3, 11, 'tuesday ', 3, 0, 'NONE') ,('2022-06-15', '1401-03-25', '1443-11-15', '2022-06-15', 25, 15, 3, 11, 'wednesday', 4, 0, 'NONE') ,('2022-06-16', '1401-03-26', '1443-11-16', '2022-06-16', 26, 16, 3, 11, 'thursday ', 5, 0, 'NONE') ,('2022-06-17', '1401-03-27', '1443-11-17', '2022-06-17', 27, 17, 3, 11, 'friday ', 6, 0, 'NONE') ,('2022-06-18', '1401-03-28', '1443-11-18', '2022-06-18', 28, 18, 3, 11, 'saturday ', 7, 0, 'NONE') ,('2022-06-19', '1401-03-29', '1443-11-19', '2022-06-19', 29, 19, 3, 11, 'sunday ', 1, 0, 'NONE') ,('2022-06-20', '1401-03-30', '1443-11-20', '2022-06-20', 30, 20, 3, 11, 'monday ', 2, 0, 'NONE') ,('2022-06-21', '1401-03-31', '1443-11-21', '2022-06-21', 31, 21, 3, 11, 'tuesday ', 3, 0, 'NONE') ,('2022-06-22', '1401-04-01', '1443-11-22', '2022-06-22', 1, 22, 4, 11, 'wednesday', 4, 0, 'NONE') ,('2022-06-23', '1401-04-02', '1443-11-23', '2022-06-23', 2, 23, 4, 11, 'thursday ', 5, 0, 'NONE') ,('2022-06-24', '1401-04-03', '1443-11-24', '2022-06-24', 3, 24, 4, 11, 'friday ', 6, 0, 'NONE') ,('2022-06-25', '1401-04-04', '1443-11-25', '2022-06-25', 4, 25, 4, 11, 'saturday ', 7, 0, 'NONE') ,('2022-06-26', '1401-04-05', '1443-11-26', '2022-06-26', 5, 26, 4, 11, 'sunday ', 1, 0, 'NONE') ,('2022-06-27', '1401-04-06', '1443-11-27', '2022-06-27', 6, 27, 4, 11, 'monday ', 2, 0, 'NONE') ,('2022-06-28', '1401-04-07', '1443-11-28', '2022-06-28', 7, 28, 4, 11, 'tuesday ', 3, 0, 'NONE') ,('2022-06-29', '1401-04-08', '1443-11-29', '2022-06-29', 8, 29, 4, 11, 'wednesday', 4, 0, 'NONE') ,('2022-06-30', '1401-04-09', '1443-11-30', '2022-06-30', 9, 30, 4, 11, 'thursday ', 5, 0, 'NONE') ,('2022-07-01', '1401-04-10', '1443-12-01', '2022-07-01', 10, 1, 4, 12, 'friday ', 6, 0, 'NONE') ,('2022-07-02', '1401-04-11', '1443-12-02', '2022-07-02', 11, 2, 4, 12, 'saturday ', 7, 0, 'NONE') ,('2022-07-03', '1401-04-12', '1443-12-03', '2022-07-03', 12, 3, 4, 12, 'sunday ', 1, 0, 'NONE') ,('2022-07-04', '1401-04-13', '1443-12-04', '2022-07-04', 13, 4, 4, 12, 'monday ', 2, 0, 'NONE') ,('2022-07-05', '1401-04-14', '1443-12-05', '2022-07-05', 14, 5, 4, 12, 'tuesday ', 3, 0, 'NONE') ,('2022-07-06', '1401-04-15', '1443-12-06', '2022-07-06', 15, 6, 4, 12, 'wednesday', 4, 0, 'NONE') ,('2022-07-07', '1401-04-16', '1443-12-07', '2022-07-07', 16, 7, 4, 12, 'thursday ', 5, 0, 'NONE') ,('2022-07-08', '1401-04-17', '1443-12-08', '2022-07-08', 17, 8, 4, 12, 'friday ', 6, 0, 'NONE') ,('2022-07-09', '1401-04-18', '1443-12-09', '2022-07-09', 18, 9, 4, 12, 'saturday ', 7, 0, 'NONE') ,('2022-07-10', '1401-04-19', '1443-12-10', '2022-07-10', 19, 10, 4, 12, 'sunday ', 1, 0, 'NONE') ,('2022-07-11', '1401-04-20', '1443-12-11', '2022-07-11', 20, 11, 4, 12, 'monday ', 2, 0, 'NONE') ,('2022-07-12', '1401-04-21', '1443-12-12', '2022-07-12', 21, 12, 4, 12, 'tuesday ', 3, 0, 'NONE') ,('2022-07-13', '1401-04-22', '1443-12-13', '2022-07-13', 22, 13, 4, 12, 'wednesday', 4, 0, 'NONE') ,('2022-07-14', '1401-04-23', '1443-12-14', '2022-07-14', 23, 14, 4, 12, 'thursday ', 5, 0, 'NONE') ,('2022-07-15', '1401-04-24', '1443-12-15', '2022-07-15', 24, 15, 4, 12, 'friday ', 6, 0, 'NONE') ,('2022-07-16', '1401-04-25', '1443-12-16', '2022-07-16', 25, 16, 4, 12, 'saturday ', 7, 0, 'NONE') ,('2022-07-17', '1401-04-26', '1443-12-17', '2022-07-17', 26, 17, 4, 12, 'sunday ', 1, 0, 'NONE') ,('2022-07-18', '1401-04-27', '1443-12-18', '2022-07-18', 27, 18, 4, 12, 'monday ', 2, 0, 'NONE') ,('2022-07-19', '1401-04-28', '1443-12-19', '2022-07-19', 28, 19, 4, 12, 'tuesday ', 3, 0, 'NONE') ,('2022-07-20', '1401-04-29', '1443-12-20', '2022-07-20', 29, 20, 4, 12, 'wednesday', 4, 0, 'NONE') ,('2022-07-21', '1401-04-30', '1443-12-21', '2022-07-21', 30, 21, 4, 12, 'thursday ', 5, 0, 'NONE') ,('2022-07-22', '1401-04-31', '1443-12-22', '2022-07-22', 31, 22, 4, 12, 'friday ', 6, 0, 'NONE') ,('2022-07-23', '1401-05-01', '1443-12-23', '2022-07-23', 1, 23, 5, 12, 'saturday ', 7, 0, 'NONE') ,('2022-07-24', '1401-05-02', '1443-12-24', '2022-07-24', 2, 24, 5, 12, 'sunday ', 1, 0, 'NONE') ,('2022-07-25', '1401-05-03', '1443-12-25', '2022-07-25', 3, 25, 5, 12, 'monday ', 2, 0, 'NONE') ,('2022-07-26', '1401-05-04', '1443-12-26', '2022-07-26', 4, 26, 5, 12, 'tuesday ', 3, 0, 'NONE') ,('2022-07-27', '1401-05-05', '1443-12-27', '2022-07-27', 5, 27, 5, 12, 'wednesday', 4, 0, 'NONE') ,('2022-07-28', '1401-05-06', '1443-12-28', '2022-07-28', 6, 28, 5, 12, 'thursday ', 5, 0, 'NONE') ,('2022-07-29', '1401-05-07', '1443-12-29', '2022-07-29', 7, 29, 5, 12, 'friday ', 6, 0, 'NONE') ,('2022-07-30', '1401-05-08', '1444-01-01', '2022-07-30', 8, 1, 5, 1, 'saturday ', 7, 0, 'NONE') ,('2022-07-31', '1401-05-09', '1444-01-02', '2022-07-31', 9, 2, 5, 1, 'sunday ', 1, 0, 'NONE') ,('2022-08-01', '1401-05-10', '1444-01-03', '2022-08-01', 10, 3, 5, 1, 'monday ', 2, 0, 'NONE') ,('2022-08-02', '1401-05-11', '1444-01-04', '2022-08-02', 11, 4, 5, 1, 'tuesday ', 3, 0, 'NONE') ,('2022-08-03', '1401-05-12', '1444-01-05', '2022-08-03', 12, 5, 5, 1, 'wednesday', 4, 0, 'NONE') ,('2022-08-04', '1401-05-13', '1444-01-06', '2022-08-04', 13, 6, 5, 1, 'thursday ', 5, 0, 'NONE') ,('2022-08-05', '1401-05-14', '1444-01-07', '2022-08-05', 14, 7, 5, 1, 'friday ', 6, 0, 'NONE') ,('2022-08-06', '1401-05-15', '1444-01-08', '2022-08-06', 15, 8, 5, 1, 'saturday ', 7, 0, 'NONE') ,('2022-08-07', '1401-05-16', '1444-01-09', '2022-08-07', 16, 9, 5, 1, 'sunday ', 1, 0, 'NONE') ,('2022-08-08', '1401-05-17', '1444-01-10', '2022-08-08', 17, 10, 5, 1, 'monday ', 2, 0, 'NONE') ,('2022-08-09', '1401-05-18', '1444-01-11', '2022-08-09', 18, 11, 5, 1, 'tuesday ', 3, 0, 'NONE') ,('2022-08-10', '1401-05-19', '1444-01-12', '2022-08-10', 19, 12, 5, 1, 'wednesday', 4, 0, 'NONE') ,('2022-08-11', '1401-05-20', '1444-01-13', '2022-08-11', 20, 13, 5, 1, 'thursday ', 5, 0, 'NONE') ,('2022-08-12', '1401-05-21', '1444-01-14', '2022-08-12', 21, 14, 5, 1, 'friday ', 6, 0, 'NONE') ,('2022-08-13', '1401-05-22', '1444-01-15', '2022-08-13', 22, 15, 5, 1, 'saturday ', 7, 0, 'NONE') ,('2022-08-14', '1401-05-23', '1444-01-16', '2022-08-14', 23, 16, 5, 1, 'sunday ', 1, 0, 'NONE') ,('2022-08-15', '1401-05-24', '1444-01-17', '2022-08-15', 24, 17, 5, 1, 'monday ', 2, 0, 'NONE') ,('2022-08-16', '1401-05-25', '1444-01-18', '2022-08-16', 25, 18, 5, 1, 'tuesday ', 3, 0, 'NONE') ,('2022-08-17', '1401-05-26', '1444-01-19', '2022-08-17', 26, 19, 5, 1, 'wednesday', 4, 0, 'NONE') ,('2022-08-18', '1401-05-27', '1444-01-20', '2022-08-18', 27, 20, 5, 1, 'thursday ', 5, 0, 'NONE') ,('2022-08-19', '1401-05-28', '1444-01-21', '2022-08-19', 28, 21, 5, 1, 'friday ', 6, 0, 'NONE') ,('2022-08-20', '1401-05-29', '1444-01-22', '2022-08-20', 29, 22, 5, 1, 'saturday ', 7, 0, 'NONE') ,('2022-08-21', '1401-05-30', '1444-01-23', '2022-08-21', 30, 23, 5, 1, 'sunday ', 1, 0, 'NONE') ,('2022-08-22', '1401-05-31', '1444-01-24', '2022-08-22', 31, 24, 5, 1, 'monday ', 2, 0, 'NONE') ,('2022-08-23', '1401-06-01', '1444-01-25', '2022-08-23', 1, 25, 6, 1, 'tuesday ', 3, 0, 'NONE') ,('2022-08-24', '1401-06-02', '1444-01-26', '2022-08-24', 2, 26, 6, 1, 'wednesday', 4, 0, 'NONE') ,('2022-08-25', '1401-06-03', '1444-01-27', '2022-08-25', 3, 27, 6, 1, 'thursday ', 5, 0, 'NONE') ,('2022-08-26', '1401-06-04', '1444-01-28', '2022-08-26', 4, 28, 6, 1, 'friday ', 6, 0, 'NONE') ,('2022-08-27', '1401-06-05', '1444-01-29', '2022-08-27', 5, 29, 6, 1, 'saturday ', 7, 0, 'NONE') ,('2022-08-28', '1401-06-06', '1444-01-30', '2022-08-28', 6, 30, 6, 1, 'sunday ', 1, 0, 'NONE') ,('2022-08-29', '1401-06-07', '1444-02-01', '2022-08-29', 7, 1, 6, 2, 'monday ', 2, 0, 'NONE') ,('2022-08-30', '1401-06-08', '1444-02-02', '2022-08-30', 8, 2, 6, 2, 'tuesday ', 3, 0, 'NONE') ,('2022-08-31', '1401-06-09', '1444-02-03', '2022-08-31', 9, 3, 6, 2, 'wednesday', 4, 0, 'NONE') ,('2022-09-01', '1401-06-10', '1444-02-04', '2022-09-01', 10, 4, 6, 2, 'thursday ', 5, 0, 'NONE') ,('2022-09-02', '1401-06-11', '1444-02-05', '2022-09-02', 11, 5, 6, 2, 'friday ', 6, 0, 'NONE') ,('2022-09-03', '1401-06-12', '1444-02-06', '2022-09-03', 12, 6, 6, 2, 'saturday ', 7, 0, 'NONE') ,('2022-09-04', '1401-06-13', '1444-02-07', '2022-09-04', 13, 7, 6, 2, 'sunday ', 1, 0, 'NONE') ,('2022-09-05', '1401-06-14', '1444-02-08', '2022-09-05', 14, 8, 6, 2, 'monday ', 2, 0, 'NONE') ,('2022-09-06', '1401-06-15', '1444-02-09', '2022-09-06', 15, 9, 6, 2, 'tuesday ', 3, 0, 'NONE') ,('2022-09-07', '1401-06-16', '1444-02-10', '2022-09-07', 16, 10, 6, 2, 'wednesday', 4, 0, 'NONE') ,('2022-09-08', '1401-06-17', '1444-02-11', '2022-09-08', 17, 11, 6, 2, 'thursday ', 5, 0, 'NONE') ,('2022-09-09', '1401-06-18', '1444-02-12', '2022-09-09', 18, 12, 6, 2, 'friday ', 6, 0, 'NONE') ,('2022-09-10', '1401-06-19', '1444-02-13', '2022-09-10', 19, 13, 6, 2, 'saturday ', 7, 0, 'NONE') ,('2022-09-11', '1401-06-20', '1444-02-14', '2022-09-11', 20, 14, 6, 2, 'sunday ', 1, 0, 'NONE') ,('2022-09-12', '1401-06-21', '1444-02-15', '2022-09-12', 21, 15, 6, 2, 'monday ', 2, 0, 'NONE') ,('2022-09-13', '1401-06-22', '1444-02-16', '2022-09-13', 22, 16, 6, 2, 'tuesday ', 3, 0, 'NONE') ,('2022-09-14', '1401-06-23', '1444-02-17', '2022-09-14', 23, 17, 6, 2, 'wednesday', 4, 0, 'NONE') ,('2022-09-15', '1401-06-24', '1444-02-18', '2022-09-15', 24, 18, 6, 2, 'thursday ', 5, 0, 'NONE') ,('2022-09-16', '1401-06-25', '1444-02-19', '2022-09-16', 25, 19, 6, 2, 'friday ', 6, 0, 'NONE') ,('2022-09-17', '1401-06-26', '1444-02-20', '2022-09-17', 26, 20, 6, 2, 'saturday ', 7, 0, 'NONE') ,('2022-09-18', '1401-06-27', '1444-02-21', '2022-09-18', 27, 21, 6, 2, 'sunday ', 1, 0, 'NONE') ,('2022-09-19', '1401-06-28', '1444-02-22', '2022-09-19', 28, 22, 6, 2, 'monday ', 2, 0, 'NONE') ,('2022-09-20', '1401-06-29', '1444-02-23', '2022-09-20', 29, 23, 6, 2, 'tuesday ', 3, 0, 'NONE') ,('2022-09-21', '1401-06-30', '1444-02-24', '2022-09-21', 30, 24, 6, 2, 'wednesday', 4, 0, 'NONE') ,('2022-09-22', '1401-06-31', '1444-02-25', '2022-09-22', 31, 25, 6, 2, 'thursday ', 5, 0, 'NONE') ,('2022-09-23', '1401-07-01', '1444-02-26', '2022-09-23', 1, 26, 7, 2, 'friday ', 6, 0, 'NONE') ,('2022-09-24', '1401-07-02', '1444-02-27', '2022-09-24', 2, 27, 7, 2, 'saturday ', 7, 0, 'NONE') ,('2022-09-25', '1401-07-03', '1444-02-28', '2022-09-25', 3, 28, 7, 2, 'sunday ', 1, 0, 'NONE') ,('2022-09-26', '1401-07-04', '1444-02-29', '2022-09-26', 4, 29, 7, 2, 'monday ', 2, 0, 'NONE') ,('2022-09-27', '1401-07-05', '1444-03-01', '2022-09-27', 5, 1, 7, 3, 'tuesday ', 3, 0, 'NONE') ,('2022-09-28', '1401-07-06', '1444-03-02', '2022-09-28', 6, 2, 7, 3, 'wednesday', 4, 0, 'NONE') ,('2022-09-29', '1401-07-07', '1444-03-03', '2022-09-29', 7, 3, 7, 3, 'thursday ', 5, 0, 'NONE') ,('2022-09-30', '1401-07-08', '1444-03-04', '2022-09-30', 8, 4, 7, 3, 'friday ', 6, 0, 'NONE') ,('2022-10-01', '1401-07-09', '1444-03-05', '2022-10-01', 9, 5, 7, 3, 'saturday ', 7, 0, 'NONE') ,('2022-10-02', '1401-07-10', '1444-03-06', '2022-10-02', 10, 6, 7, 3, 'sunday ', 1, 0, 'NONE') ,('2022-10-03', '1401-07-11', '1444-03-07', '2022-10-03', 11, 7, 7, 3, 'monday ', 2, 0, 'NONE') ,('2022-10-04', '1401-07-12', '1444-03-08', '2022-10-04', 12, 8, 7, 3, 'tuesday ', 3, 0, 'NONE') ,('2022-10-05', '1401-07-13', '1444-03-09', '2022-10-05', 13, 9, 7, 3, 'wednesday', 4, 0, 'NONE') ,('2022-10-06', '1401-07-14', '1444-03-10', '2022-10-06', 14, 10, 7, 3, 'thursday ', 5, 0, 'NONE') ,('2022-10-07', '1401-07-15', '1444-03-11', '2022-10-07', 15, 11, 7, 3, 'friday ', 6, 0, 'NONE') ,('2022-10-08', '1401-07-16', '1444-03-12', '2022-10-08', 16, 12, 7, 3, 'saturday ', 7, 0, 'NONE') ,('2022-10-09', '1401-07-17', '1444-03-13', '2022-10-09', 17, 13, 7, 3, 'sunday ', 1, 0, 'NONE') ,('2022-10-10', '1401-07-18', '1444-03-14', '2022-10-10', 18, 14, 7, 3, 'monday ', 2, 0, 'NONE') ,('2022-10-11', '1401-07-19', '1444-03-15', '2022-10-11', 19, 15, 7, 3, 'tuesday ', 3, 0, 'NONE') ,('2022-10-12', '1401-07-20', '1444-03-16', '2022-10-12', 20, 16, 7, 3, 'wednesday', 4, 0, 'NONE') ,('2022-10-13', '1401-07-21', '1444-03-17', '2022-10-13', 21, 17, 7, 3, 'thursday ', 5, 0, 'NONE') ,('2022-10-14', '1401-07-22', '1444-03-18', '2022-10-14', 22, 18, 7, 3, 'friday ', 6, 0, 'NONE') ,('2022-10-15', '1401-07-23', '1444-03-19', '2022-10-15', 23, 19, 7, 3, 'saturday ', 7, 0, 'NONE') ,('2022-10-16', '1401-07-24', '1444-03-20', '2022-10-16', 24, 20, 7, 3, 'sunday ', 1, 0, 'NONE') ,('2022-10-17', '1401-07-25', '1444-03-21', '2022-10-17', 25, 21, 7, 3, 'monday ', 2, 0, 'NONE') ,('2022-10-18', '1401-07-26', '1444-03-22', '2022-10-18', 26, 22, 7, 3, 'tuesday ', 3, 0, 'NONE') ,('2022-10-19', '1401-07-27', '1444-03-23', '2022-10-19', 27, 23, 7, 3, 'wednesday', 4, 0, 'NONE') ,('2022-10-20', '1401-07-28', '1444-03-24', '2022-10-20', 28, 24, 7, 3, 'thursday ', 5, 0, 'NONE') ,('2022-10-21', '1401-07-29', '1444-03-25', '2022-10-21', 29, 25, 7, 3, 'friday ', 6, 0, 'NONE') ,('2022-10-22', '1401-07-30', '1444-03-26', '2022-10-22', 30, 26, 7, 3, 'saturday ', 7, 0, 'NONE') ,('2022-10-23', '1401-08-01', '1444-03-27', '2022-10-23', 1, 27, 8, 3, 'sunday ', 1, 0, 'NONE') ,('2022-10-24', '1401-08-02', '1444-03-28', '2022-10-24', 2, 28, 8, 3, 'monday ', 2, 0, 'NONE') ,('2022-10-25', '1401-08-03', '1444-03-29', '2022-10-25', 3, 29, 8, 3, 'tuesday ', 3, 0, 'NONE') ,('2022-10-26', '1401-08-04', '1444-03-30', '2022-10-26', 4, 30, 8, 3, 'wednesday', 4, 0, 'NONE') ,('2022-10-27', '1401-08-05', '1444-04-01', '2022-10-27', 5, 1, 8, 4, 'thursday ', 5, 0, 'NONE') ,('2022-10-28', '1401-08-06', '1444-04-02', '2022-10-28', 6, 2, 8, 4, 'friday ', 6, 0, 'NONE') ,('2022-10-29', '1401-08-07', '1444-04-03', '2022-10-29', 7, 3, 8, 4, 'saturday ', 7, 0, 'NONE') ,('2022-10-30', '1401-08-08', '1444-04-04', '2022-10-30', 8, 4, 8, 4, 'sunday ', 1, 0, 'NONE') ,('2022-10-31', '1401-08-09', '1444-04-05', '2022-10-31', 9, 5, 8, 4, 'monday ', 2, 0, 'NONE') ,('2022-11-01', '1401-08-10', '1444-04-06', '2022-11-01', 10, 6, 8, 4, 'tuesday ', 3, 0, 'NONE') ,('2022-11-02', '1401-08-11', '1444-04-07', '2022-11-02', 11, 7, 8, 4, 'wednesday', 4, 0, 'NONE') ,('2022-11-03', '1401-08-12', '1444-04-08', '2022-11-03', 12, 8, 8, 4, 'thursday ', 5, 0, 'NONE') ,('2022-11-04', '1401-08-13', '1444-04-09', '2022-11-04', 13, 9, 8, 4, 'friday ', 6, 0, 'NONE') ,('2022-11-05', '1401-08-14', '1444-04-10', '2022-11-05', 14, 10, 8, 4, 'saturday ', 7, 0, 'NONE') ,('2022-11-06', '1401-08-15', '1444-04-11', '2022-11-06', 15, 11, 8, 4, 'sunday ', 1, 0, 'NONE') ,('2022-11-07', '1401-08-16', '1444-04-12', '2022-11-07', 16, 12, 8, 4, 'monday ', 2, 0, 'NONE') ,('2022-11-08', '1401-08-17', '1444-04-13', '2022-11-08', 17, 13, 8, 4, 'tuesday ', 3, 0, 'NONE') ,('2022-11-09', '1401-08-18', '1444-04-14', '2022-11-09', 18, 14, 8, 4, 'wednesday', 4, 0, 'NONE') ,('2022-11-10', '1401-08-19', '1444-04-15', '2022-11-10', 19, 15, 8, 4, 'thursday ', 5, 0, 'NONE') ,('2022-11-11', '1401-08-20', '1444-04-16', '2022-11-11', 20, 16, 8, 4, 'friday ', 6, 0, 'NONE') ,('2022-11-12', '1401-08-21', '1444-04-17', '2022-11-12', 21, 17, 8, 4, 'saturday ', 7, 0, 'NONE') ,('2022-11-13', '1401-08-22', '1444-04-18', '2022-11-13', 22, 18, 8, 4, 'sunday ', 1, 0, 'NONE') ,('2022-11-14', '1401-08-23', '1444-04-19', '2022-11-14', 23, 19, 8, 4, 'monday ', 2, 0, 'NONE') ,('2022-11-15', '1401-08-24', '1444-04-20', '2022-11-15', 24, 20, 8, 4, 'tuesday ', 3, 0, 'NONE') ,('2022-11-16', '1401-08-25', '1444-04-21', '2022-11-16', 25, 21, 8, 4, 'wednesday', 4, 0, 'NONE') ,('2022-11-17', '1401-08-26', '1444-04-22', '2022-11-17', 26, 22, 8, 4, 'thursday ', 5, 0, 'NONE') ,('2022-11-18', '1401-08-27', '1444-04-23', '2022-11-18', 27, 23, 8, 4, 'friday ', 6, 0, 'NONE') ,('2022-11-19', '1401-08-28', '1444-04-24', '2022-11-19', 28, 24, 8, 4, 'saturday ', 7, 0, 'NONE') ,('2022-11-20', '1401-08-29', '1444-04-25', '2022-11-20', 29, 25, 8, 4, 'sunday ', 1, 0, 'NONE') ,('2022-11-21', '1401-08-30', '1444-04-26', '2022-11-21', 30, 26, 8, 4, 'monday ', 2, 0, 'NONE') ,('2022-11-22', '1401-09-01', '1444-04-27', '2022-11-22', 1, 27, 9, 4, 'tuesday ', 3, 0, 'NONE') ,('2022-11-23', '1401-09-02', '1444-04-28', '2022-11-23', 2, 28, 9, 4, 'wednesday', 4, 0, 'NONE') ,('2022-11-24', '1401-09-03', '1444-04-29', '2022-11-24', 3, 29, 9, 4, 'thursday ', 5, 0, 'NONE') ,('2022-11-25', '1401-09-04', '1444-05-01', '2022-11-25', 4, 1, 9, 5, 'friday ', 6, 0, 'NONE') ,('2022-11-26', '1401-09-05', '1444-05-02', '2022-11-26', 5, 2, 9, 5, 'saturday ', 7, 0, 'NONE') ,('2022-11-27', '1401-09-06', '1444-05-03', '2022-11-27', 6, 3, 9, 5, 'sunday ', 1, 0, 'NONE') ,('2022-11-28', '1401-09-07', '1444-05-04', '2022-11-28', 7, 4, 9, 5, 'monday ', 2, 0, 'NONE') ,('2022-11-29', '1401-09-08', '1444-05-05', '2022-11-29', 8, 5, 9, 5, 'tuesday ', 3, 0, 'NONE') ,('2022-11-30', '1401-09-09', '1444-05-06', '2022-11-30', 9, 6, 9, 5, 'wednesday', 4, 0, 'NONE') ,('2022-12-01', '1401-09-10', '1444-05-07', '2022-12-01', 10, 7, 9, 5, 'thursday ', 5, 0, 'NONE') ,('2022-12-02', '1401-09-11', '1444-05-08', '2022-12-02', 11, 8, 9, 5, 'friday ', 6, 0, 'NONE') ,('2022-12-03', '1401-09-12', '1444-05-09', '2022-12-03', 12, 9, 9, 5, 'saturday ', 7, 0, 'NONE') ,('2022-12-04', '1401-09-13', '1444-05-10', '2022-12-04', 13, 10, 9, 5, 'sunday ', 1, 0, 'NONE') ,('2022-12-05', '1401-09-14', '1444-05-11', '2022-12-05', 14, 11, 9, 5, 'monday ', 2, 0, 'NONE') ,('2022-12-06', '1401-09-15', '1444-05-12', '2022-12-06', 15, 12, 9, 5, 'tuesday ', 3, 0, 'NONE') ,('2022-12-07', '1401-09-16', '1444-05-13', '2022-12-07', 16, 13, 9, 5, 'wednesday', 4, 0, 'NONE') ,('2022-12-08', '1401-09-17', '1444-05-14', '2022-12-08', 17, 14, 9, 5, 'thursday ', 5, 0, 'NONE') ,('2022-12-09', '1401-09-18', '1444-05-15', '2022-12-09', 18, 15, 9, 5, 'friday ', 6, 0, 'NONE') ,('2022-12-10', '1401-09-19', '1444-05-16', '2022-12-10', 19, 16, 9, 5, 'saturday ', 7, 0, 'NONE') ,('2022-12-11', '1401-09-20', '1444-05-17', '2022-12-11', 20, 17, 9, 5, 'sunday ', 1, 0, 'NONE') ,('2022-12-12', '1401-09-21', '1444-05-18', '2022-12-12', 21, 18, 9, 5, 'monday ', 2, 0, 'NONE') ,('2022-12-13', '1401-09-22', '1444-05-19', '2022-12-13', 22, 19, 9, 5, 'tuesday ', 3, 0, 'NONE') ,('2022-12-14', '1401-09-23', '1444-05-20', '2022-12-14', 23, 20, 9, 5, 'wednesday', 4, 0, 'NONE') ,('2022-12-15', '1401-09-24', '1444-05-21', '2022-12-15', 24, 21, 9, 5, 'thursday ', 5, 0, 'NONE') ,('2022-12-16', '1401-09-25', '1444-05-22', '2022-12-16', 25, 22, 9, 5, 'friday ', 6, 0, 'NONE') ,('2022-12-17', '1401-09-26', '1444-05-23', '2022-12-17', 26, 23, 9, 5, 'saturday ', 7, 0, 'NONE') ,('2022-12-18', '1401-09-27', '1444-05-24', '2022-12-18', 27, 24, 9, 5, 'sunday ', 1, 0, 'NONE') ,('2022-12-19', '1401-09-28', '1444-05-25', '2022-12-19', 28, 25, 9, 5, 'monday ', 2, 0, 'NONE') ,('2022-12-20', '1401-09-29', '1444-05-26', '2022-12-20', 29, 26, 9, 5, 'tuesday ', 3, 0, 'NONE') ,('2022-12-21', '1401-09-30', '1444-05-27', '2022-12-21', 30, 27, 9, 5, 'wednesday', 4, 0, 'NONE') ,('2022-12-22', '1401-10-01', '1444-05-28', '2022-12-22', 1, 28, 10, 5, 'thursday ', 5, 0, 'NONE') ,('2022-12-23', '1401-10-02', '1444-05-29', '2022-12-23', 2, 29, 10, 5, 'friday ', 6, 0, 'NONE') ,('2022-12-24', '1401-10-03', '1444-05-30', '2022-12-24', 3, 30, 10, 5, 'saturday ', 7, 0, 'NONE') ,('2022-12-25', '1401-10-04', '1444-06-01', '2022-12-25', 4, 1, 10, 6, 'sunday ', 1, 0, 'NONE') ,('2022-12-26', '1401-10-05', '1444-06-02', '2022-12-26', 5, 2, 10, 6, 'monday ', 2, 0, 'NONE') ,('2022-12-27', '1401-10-06', '1444-06-03', '2022-12-27', 6, 3, 10, 6, 'tuesday ', 3, 0, 'NONE') ,('2022-12-28', '1401-10-07', '1444-06-04', '2022-12-28', 7, 4, 10, 6, 'wednesday', 4, 0, 'NONE') ,('2022-12-29', '1401-10-08', '1444-06-05', '2022-12-29', 8, 5, 10, 6, 'thursday ', 5, 0, 'NONE') ,('2022-12-30', '1401-10-09', '1444-06-06', '2022-12-30', 9, 6, 10, 6, 'friday ', 6, 0, 'NONE') ,('2022-12-31', '1401-10-10', '1444-06-07', '2022-12-31', 10, 7, 10, 6, 'saturday ', 7, 0, 'NONE') ,('2023-01-01', '1401-10-11', '1444-06-08', '2023-01-01', 11, 8, 10, 6, 'sunday ', 1, 0, 'NONE') ,('2023-01-02', '1401-10-12', '1444-06-09', '2023-01-02', 12, 9, 10, 6, 'monday ', 2, 0, 'NONE') ,('2023-01-03', '1401-10-13', '1444-06-10', '2023-01-03', 13, 10, 10, 6, 'tuesday ', 3, 0, 'NONE') ,('2023-01-04', '1401-10-14', '1444-06-11', '2023-01-04', 14, 11, 10, 6, 'wednesday', 4, 0, 'NONE') ,('2023-01-05', '1401-10-15', '1444-06-12', '2023-01-05', 15, 12, 10, 6, 'thursday ', 5, 0, 'NONE') ,('2023-01-06', '1401-10-16', '1444-06-13', '2023-01-06', 16, 13, 10, 6, 'friday ', 6, 0, 'NONE') ,('2023-01-07', '1401-10-17', '1444-06-14', '2023-01-07', 17, 14, 10, 6, 'saturday ', 7, 0, 'NONE') ,('2023-01-08', '1401-10-18', '1444-06-15', '2023-01-08', 18, 15, 10, 6, 'sunday ', 1, 0, 'NONE') ,('2023-01-09', '1401-10-19', '1444-06-16', '2023-01-09', 19, 16, 10, 6, 'monday ', 2, 0, 'NONE') ,('2023-01-10', '1401-10-20', '1444-06-17', '2023-01-10', 20, 17, 10, 6, 'tuesday ', 3, 0, 'NONE') ,('2023-01-11', '1401-10-21', '1444-06-18', '2023-01-11', 21, 18, 10, 6, 'wednesday', 4, 0, 'NONE') ,('2023-01-12', '1401-10-22', '1444-06-19', '2023-01-12', 22, 19, 10, 6, 'thursday ', 5, 0, 'NONE') ,('2023-01-13', '1401-10-23', '1444-06-20', '2023-01-13', 23, 20, 10, 6, 'friday ', 6, 0, 'NONE') ,('2023-01-14', '1401-10-24', '1444-06-21', '2023-01-14', 24, 21, 10, 6, 'saturday ', 7, 0, 'NONE') ,('2023-01-15', '1401-10-25', '1444-06-22', '2023-01-15', 25, 22, 10, 6, 'sunday ', 1, 0, 'NONE') ,('2023-01-16', '1401-10-26', '1444-06-23', '2023-01-16', 26, 23, 10, 6, 'monday ', 2, 0, 'NONE') ,('2023-01-17', '1401-10-27', '1444-06-24', '2023-01-17', 27, 24, 10, 6, 'tuesday ', 3, 0, 'NONE') ,('2023-01-18', '1401-10-28', '1444-06-25', '2023-01-18', 28, 25, 10, 6, 'wednesday', 4, 0, 'NONE') ,('2023-01-19', '1401-10-29', '1444-06-26', '2023-01-19', 29, 26, 10, 6, 'thursday ', 5, 0, 'NONE') ,('2023-01-20', '1401-10-30', '1444-06-27', '2023-01-20', 30, 27, 10, 6, 'friday ', 6, 0, 'NONE') ,('2023-01-21', '1401-11-01', '1444-06-28', '2023-01-21', 1, 28, 11, 6, 'saturday ', 7, 0, 'NONE') ,('2023-01-22', '1401-11-02', '1444-06-29', '2023-01-22', 2, 29, 11, 6, 'sunday ', 1, 0, 'NONE') ,('2023-01-23', '1401-11-03', '1444-07-01', '2023-01-23', 3, 1, 11, 7, 'monday ', 2, 0, 'NONE') ,('2023-01-24', '1401-11-04', '1444-07-02', '2023-01-24', 4, 2, 11, 7, 'tuesday ', 3, 0, 'NONE') ,('2023-01-25', '1401-11-05', '1444-07-03', '2023-01-25', 5, 3, 11, 7, 'wednesday', 4, 0, 'NONE') ,('2023-01-26', '1401-11-06', '1444-07-04', '2023-01-26', 6, 4, 11, 7, 'thursday ', 5, 0, 'NONE') ,('2023-01-27', '1401-11-07', '1444-07-05', '2023-01-27', 7, 5, 11, 7, 'friday ', 6, 0, 'NONE') ,('2023-01-28', '1401-11-08', '1444-07-06', '2023-01-28', 8, 6, 11, 7, 'saturday ', 7, 0, 'NONE') ,('2023-01-29', '1401-11-09', '1444-07-07', '2023-01-29', 9, 7, 11, 7, 'sunday ', 1, 0, 'NONE') ,('2023-01-30', '1401-11-10', '1444-07-08', '2023-01-30', 10, 8, 11, 7, 'monday ', 2, 0, 'NONE') ,('2023-01-31', '1401-11-11', '1444-07-09', '2023-01-31', 11, 9, 11, 7, 'tuesday ', 3, 0, 'NONE') ,('2023-02-01', '1401-11-12', '1444-07-10', '2023-02-01', 12, 10, 11, 7, 'wednesday', 4, 0, 'NONE') ,('2023-02-02', '1401-11-13', '1444-07-11', '2023-02-02', 13, 11, 11, 7, 'thursday ', 5, 0, 'NONE') ,('2023-02-03', '1401-11-14', '1444-07-12', '2023-02-03', 14, 12, 11, 7, 'friday ', 6, 0, 'NONE') ,('2023-02-04', '1401-11-15', '1444-07-13', '2023-02-04', 15, 13, 11, 7, 'saturday ', 7, 0, 'NONE') ,('2023-02-05', '1401-11-16', '1444-07-14', '2023-02-05', 16, 14, 11, 7, 'sunday ', 1, 0, 'NONE') ,('2023-02-06', '1401-11-17', '1444-07-15', '2023-02-06', 17, 15, 11, 7, 'monday ', 2, 0, 'NONE') ,('2023-02-07', '1401-11-18', '1444-07-16', '2023-02-07', 18, 16, 11, 7, 'tuesday ', 3, 0, 'NONE') ,('2023-02-08', '1401-11-19', '1444-07-17', '2023-02-08', 19, 17, 11, 7, 'wednesday', 4, 0, 'NONE') ,('2023-02-09', '1401-11-20', '1444-07-18', '2023-02-09', 20, 18, 11, 7, 'thursday ', 5, 0, 'NONE') ,('2023-02-10', '1401-11-21', '1444-07-19', '2023-02-10', 21, 19, 11, 7, 'friday ', 6, 0, 'NONE') ,('2023-02-11', '1401-11-22', '1444-07-20', '2023-02-11', 22, 20, 11, 7, 'saturday ', 7, 0, 'NONE') ,('2023-02-12', '1401-11-23', '1444-07-21', '2023-02-12', 23, 21, 11, 7, 'sunday ', 1, 0, 'NONE') ,('2023-02-13', '1401-11-24', '1444-07-22', '2023-02-13', 24, 22, 11, 7, 'monday ', 2, 0, 'NONE') ,('2023-02-14', '1401-11-25', '1444-07-23', '2023-02-14', 25, 23, 11, 7, 'tuesday ', 3, 0, 'NONE') ,('2023-02-15', '1401-11-26', '1444-07-24', '2023-02-15', 26, 24, 11, 7, 'wednesday', 4, 0, 'NONE') ,('2023-02-16', '1401-11-27', '1444-07-25', '2023-02-16', 27, 25, 11, 7, 'thursday ', 5, 0, 'NONE') ,('2023-02-17', '1401-11-28', '1444-07-26', '2023-02-17', 28, 26, 11, 7, 'friday ', 6, 0, 'NONE') ,('2023-02-18', '1401-11-29', '1444-07-27', '2023-02-18', 29, 27, 11, 7, 'saturday ', 7, 0, 'NONE') ,('2023-02-19', '1401-11-30', '1444-07-28', '2023-02-19', 30, 28, 11, 7, 'sunday ', 1, 0, 'NONE') ,('2023-02-20', '1401-12-01', '1444-07-29', '2023-02-20', 1, 29, 12, 7, 'monday ', 2, 0, 'NONE') ,('2023-02-21', '1401-12-02', '1444-07-30', '2023-02-21', 2, 30, 12, 7, 'tuesday ', 3, 0, 'NONE') ,('2023-02-22', '1401-12-03', '1444-08-01', '2023-02-22', 3, 1, 12, 8, 'wednesday', 4, 0, 'NONE') ,('2023-02-23', '1401-12-04', '1444-08-02', '2023-02-23', 4, 2, 12, 8, 'thursday ', 5, 0, 'NONE') ,('2023-02-24', '1401-12-05', '1444-08-03', '2023-02-24', 5, 3, 12, 8, 'friday ', 6, 0, 'NONE') ,('2023-02-25', '1401-12-06', '1444-08-04', '2023-02-25', 6, 4, 12, 8, 'saturday ', 7, 0, 'NONE') ,('2023-02-26', '1401-12-07', '1444-08-05', '2023-02-26', 7, 5, 12, 8, 'sunday ', 1, 0, 'NONE') ,('2023-02-27', '1401-12-08', '1444-08-06', '2023-02-27', 8, 6, 12, 8, 'monday ', 2, 0, 'NONE') ,('2023-02-28', '1401-12-09', '1444-08-07', '2023-02-28', 9, 7, 12, 8, 'tuesday ', 3, 0, 'NONE') ,('2023-03-01', '1401-12-10', '1444-08-08', '2023-03-01', 10, 8, 12, 8, 'wednesday', 4, 0, 'NONE') ,('2023-03-02', '1401-12-11', '1444-08-09', '2023-03-02', 11, 9, 12, 8, 'thursday ', 5, 0, 'NONE') ,('2023-03-03', '1401-12-12', '1444-08-10', '2023-03-03', 12, 10, 12, 8, 'friday ', 6, 0, 'NONE') ,('2023-03-04', '1401-12-13', '1444-08-11', '2023-03-04', 13, 11, 12, 8, 'saturday ', 7, 0, 'NONE') ,('2023-03-05', '1401-12-14', '1444-08-12', '2023-03-05', 14, 12, 12, 8, 'sunday ', 1, 0, 'NONE') ,('2023-03-06', '1401-12-15', '1444-08-13', '2023-03-06', 15, 13, 12, 8, 'monday ', 2, 0, 'NONE') ,('2023-03-07', '1401-12-16', '1444-08-14', '2023-03-07', 16, 14, 12, 8, 'tuesday ', 3, 0, 'NONE') ,('2023-03-08', '1401-12-17', '1444-08-15', '2023-03-08', 17, 15, 12, 8, 'wednesday', 4, 0, 'NONE') ,('2023-03-09', '1401-12-18', '1444-08-16', '2023-03-09', 18, 16, 12, 8, 'thursday ', 5, 0, 'NONE') ,('2023-03-10', '1401-12-19', '1444-08-17', '2023-03-10', 19, 17, 12, 8, 'friday ', 6, 0, 'NONE') ,('2023-03-11', '1401-12-20', '1444-08-18', '2023-03-11', 20, 18, 12, 8, 'saturday ', 7, 0, 'NONE') ,('2023-03-12', '1401-12-21', '1444-08-19', '2023-03-12', 21, 19, 12, 8, 'sunday ', 1, 0, 'NONE') ,('2023-03-13', '1401-12-22', '1444-08-20', '2023-03-13', 22, 20, 12, 8, 'monday ', 2, 0, 'NONE') ,('2023-03-14', '1401-12-23', '1444-08-21', '2023-03-14', 23, 21, 12, 8, 'tuesday ', 3, 0, 'NONE') ,('2023-03-15', '1401-12-24', '1444-08-22', '2023-03-15', 24, 22, 12, 8, 'wednesday', 4, 0, 'NONE') ,('2023-03-16', '1401-12-25', '1444-08-23', '2023-03-16', 25, 23, 12, 8, 'thursday ', 5, 0, 'NONE') ,('2023-03-17', '1401-12-26', '1444-08-24', '2023-03-17', 26, 24, 12, 8, 'friday ', 6, 0, 'NONE') ,('2023-03-18', '1401-12-27', '1444-08-25', '2023-03-18', 27, 25, 12, 8, 'saturday ', 7, 0, 'NONE') ,('2023-03-19', '1401-12-28', '1444-08-26', '2023-03-19', 28, 26, 12, 8, 'sunday ', 1, 0, 'NONE') ,('2023-03-20', '1401-12-29', '1444-08-27', '2023-03-20', 29, 27, 12, 8, 'monday ', 2, 0, 'NONE') ,('2023-03-21', '1402-01-01', '1444-08-28', '2023-03-21', 1, 28, 1, 8, 'tuesday ', 3, 0, 'NONE') ,('2023-03-22', '1402-01-02', '1444-08-29', '2023-03-22', 2, 29, 1, 8, 'wednesday', 4, 0, 'NONE') ,('2023-03-23', '1402-01-03', '1444-09-01', '2023-03-23', 3, 1, 1, 9, 'thursday ', 5, 0, 'NONE') ,('2023-03-24', '1402-01-04', '1444-09-02', '2023-03-24', 4, 2, 1, 9, 'friday ', 6, 0, 'NONE') ,('2023-03-25', '1402-01-05', '1444-09-03', '2023-03-25', 5, 3, 1, 9, 'saturday ', 7, 0, 'NONE') ,('2023-03-26', '1402-01-06', '1444-09-04', '2023-03-26', 6, 4, 1, 9, 'sunday ', 1, 0, 'NONE') ,('2023-03-27', '1402-01-07', '1444-09-05', '2023-03-27', 7, 5, 1, 9, 'monday ', 2, 0, 'NONE') ,('2023-03-28', '1402-01-08', '1444-09-06', '2023-03-28', 8, 6, 1, 9, 'tuesday ', 3, 0, 'NONE') ,('2023-03-29', '1402-01-09', '1444-09-07', '2023-03-29', 9, 7, 1, 9, 'wednesday', 4, 0, 'NONE') ,('2023-03-30', '1402-01-10', '1444-09-08', '2023-03-30', 10, 8, 1, 9, 'thursday ', 5, 0, 'NONE') ,('2023-03-31', '1402-01-11', '1444-09-09', '2023-03-31', 11, 9, 1, 9, 'friday ', 6, 0, 'NONE') ,('2023-04-01', '1402-01-12', '1444-09-10', '2023-04-01', 12, 10, 1, 9, 'saturday ', 7, 0, 'NONE') ,('2023-04-02', '1402-01-13', '1444-09-11', '2023-04-02', 13, 11, 1, 9, 'sunday ', 1, 0, 'NONE') ,('2023-04-03', '1402-01-14', '1444-09-12', '2023-04-03', 14, 12, 1, 9, 'monday ', 2, 0, 'NONE') ,('2023-04-04', '1402-01-15', '1444-09-13', '2023-04-04', 15, 13, 1, 9, 'tuesday ', 3, 0, 'NONE') ,('2023-04-05', '1402-01-16', '1444-09-14', '2023-04-05', 16, 14, 1, 9, 'wednesday', 4, 0, 'NONE') ,('2023-04-06', '1402-01-17', '1444-09-15', '2023-04-06', 17, 15, 1, 9, 'thursday ', 5, 0, 'NONE') ,('2023-04-07', '1402-01-18', '1444-09-16', '2023-04-07', 18, 16, 1, 9, 'friday ', 6, 0, 'NONE') ,('2023-04-08', '1402-01-19', '1444-09-17', '2023-04-08', 19, 17, 1, 9, 'saturday ', 7, 0, 'NONE') ,('2023-04-09', '1402-01-20', '1444-09-18', '2023-04-09', 20, 18, 1, 9, 'sunday ', 1, 0, 'NONE') ,('2023-04-10', '1402-01-21', '1444-09-19', '2023-04-10', 21, 19, 1, 9, 'monday ', 2, 0, 'NONE') ,('2023-04-11', '1402-01-22', '1444-09-20', '2023-04-11', 22, 20, 1, 9, 'tuesday ', 3, 0, 'NONE') ,('2023-04-12', '1402-01-23', '1444-09-21', '2023-04-12', 23, 21, 1, 9, 'wednesday', 4, 0, 'NONE') ,('2023-04-13', '1402-01-24', '1444-09-22', '2023-04-13', 24, 22, 1, 9, 'thursday ', 5, 0, 'NONE') ,('2023-04-14', '1402-01-25', '1444-09-23', '2023-04-14', 25, 23, 1, 9, 'friday ', 6, 0, 'NONE') ,('2023-04-15', '1402-01-26', '1444-09-24', '2023-04-15', 26, 24, 1, 9, 'saturday ', 7, 0, 'NONE') ,('2023-04-16', '1402-01-27', '1444-09-25', '2023-04-16', 27, 25, 1, 9, 'sunday ', 1, 0, 'NONE') ,('2023-04-17', '1402-01-28', '1444-09-26', '2023-04-17', 28, 26, 1, 9, 'monday ', 2, 0, 'NONE') ,('2023-04-18', '1402-01-29', '1444-09-27', '2023-04-18', 29, 27, 1, 9, 'tuesday ', 3, 0, 'NONE') ,('2023-04-19', '1402-01-30', '1444-09-28', '2023-04-19', 30, 28, 1, 9, 'wednesday', 4, 0, 'NONE') ,('2023-04-20', '1402-01-31', '1444-09-29', '2023-04-20', 31, 29, 1, 9, 'thursday ', 5, 0, 'NONE') ,('2023-04-21', '1402-02-01', '1444-09-30', '2023-04-21', 1, 30, 2, 9, 'friday ', 6, 0, 'NONE') ,('2023-04-22', '1402-02-02', '1444-10-01', '2023-04-22', 2, 1, 2, 10, 'saturday ', 7, 0, 'NONE') ,('2023-04-23', '1402-02-03', '1444-10-02', '2023-04-23', 3, 2, 2, 10, 'sunday ', 1, 0, 'NONE') ,('2023-04-24', '1402-02-04', '1444-10-03', '2023-04-24', 4, 3, 2, 10, 'monday ', 2, 0, 'NONE') ,('2023-04-25', '1402-02-05', '1444-10-04', '2023-04-25', 5, 4, 2, 10, 'tuesday ', 3, 0, 'NONE') ,('2023-04-26', '1402-02-06', '1444-10-05', '2023-04-26', 6, 5, 2, 10, 'wednesday', 4, 0, 'NONE') ,('2023-04-27', '1402-02-07', '1444-10-06', '2023-04-27', 7, 6, 2, 10, 'thursday ', 5, 0, 'NONE') ,('2023-04-28', '1402-02-08', '1444-10-07', '2023-04-28', 8, 7, 2, 10, 'friday ', 6, 0, 'NONE') ,('2023-04-29', '1402-02-09', '1444-10-08', '2023-04-29', 9, 8, 2, 10, 'saturday ', 7, 0, 'NONE') ,('2023-04-30', '1402-02-10', '1444-10-09', '2023-04-30', 10, 9, 2, 10, 'sunday ', 1, 0, 'NONE') ,('2023-05-01', '1402-02-11', '1444-10-10', '2023-05-01', 11, 10, 2, 10, 'monday ', 2, 0, 'NONE') ,('2023-05-02', '1402-02-12', '1444-10-11', '2023-05-02', 12, 11, 2, 10, 'tuesday ', 3, 0, 'NONE') ,('2023-05-03', '1402-02-13', '1444-10-12', '2023-05-03', 13, 12, 2, 10, 'wednesday', 4, 0, 'NONE') ,('2023-05-04', '1402-02-14', '1444-10-13', '2023-05-04', 14, 13, 2, 10, 'thursday ', 5, 0, 'NONE') ,('2023-05-05', '1402-02-15', '1444-10-14', '2023-05-05', 15, 14, 2, 10, 'friday ', 6, 0, 'NONE') ,('2023-05-06', '1402-02-16', '1444-10-15', '2023-05-06', 16, 15, 2, 10, 'saturday ', 7, 0, 'NONE') ,('2023-05-07', '1402-02-17', '1444-10-16', '2023-05-07', 17, 16, 2, 10, 'sunday ', 1, 0, 'NONE') ,('2023-05-08', '1402-02-18', '1444-10-17', '2023-05-08', 18, 17, 2, 10, 'monday ', 2, 0, 'NONE') ,('2023-05-09', '1402-02-19', '1444-10-18', '2023-05-09', 19, 18, 2, 10, 'tuesday ', 3, 0, 'NONE') ,('2023-05-10', '1402-02-20', '1444-10-19', '2023-05-10', 20, 19, 2, 10, 'wednesday', 4, 0, 'NONE') ,('2023-05-11', '1402-02-21', '1444-10-20', '2023-05-11', 21, 20, 2, 10, 'thursday ', 5, 0, 'NONE') ,('2023-05-12', '1402-02-22', '1444-10-21', '2023-05-12', 22, 21, 2, 10, 'friday ', 6, 0, 'NONE') ,('2023-05-13', '1402-02-23', '1444-10-22', '2023-05-13', 23, 22, 2, 10, 'saturday ', 7, 0, 'NONE') ,('2023-05-14', '1402-02-24', '1444-10-23', '2023-05-14', 24, 23, 2, 10, 'sunday ', 1, 0, 'NONE') ,('2023-05-15', '1402-02-25', '1444-10-24', '2023-05-15', 25, 24, 2, 10, 'monday ', 2, 0, 'NONE') ,('2023-05-16', '1402-02-26', '1444-10-25', '2023-05-16', 26, 25, 2, 10, 'tuesday ', 3, 0, 'NONE') ,('2023-05-17', '1402-02-27', '1444-10-26', '2023-05-17', 27, 26, 2, 10, 'wednesday', 4, 0, 'NONE') ,('2023-05-18', '1402-02-28', '1444-10-27', '2023-05-18', 28, 27, 2, 10, 'thursday ', 5, 0, 'NONE') ,('2023-05-19', '1402-02-29', '1444-10-28', '2023-05-19', 29, 28, 2, 10, 'friday ', 6, 0, 'NONE') ,('2023-05-20', '1402-02-30', '1444-10-29', '2023-05-20', 30, 29, 2, 10, 'saturday ', 7, 0, 'NONE') ,('2023-05-21', '1402-02-31', '1444-11-01', '2023-05-21', 31, 1, 2, 11, 'sunday ', 1, 0, 'NONE') ,('2023-05-22', '1402-03-01', '1444-11-02', '2023-05-22', 1, 2, 3, 11, 'monday ', 2, 0, 'NONE') ,('2023-05-23', '1402-03-02', '1444-11-03', '2023-05-23', 2, 3, 3, 11, 'tuesday ', 3, 0, 'NONE') ,('2023-05-24', '1402-03-03', '1444-11-04', '2023-05-24', 3, 4, 3, 11, 'wednesday', 4, 0, 'NONE') ,('2023-05-25', '1402-03-04', '1444-11-05', '2023-05-25', 4, 5, 3, 11, 'thursday ', 5, 0, 'NONE') ,('2023-05-26', '1402-03-05', '1444-11-06', '2023-05-26', 5, 6, 3, 11, 'friday ', 6, 0, 'NONE') ,('2023-05-27', '1402-03-06', '1444-11-07', '2023-05-27', 6, 7, 3, 11, 'saturday ', 7, 0, 'NONE') ,('2023-05-28', '1402-03-07', '1444-11-08', '2023-05-28', 7, 8, 3, 11, 'sunday ', 1, 0, 'NONE') ,('2023-05-29', '1402-03-08', '1444-11-09', '2023-05-29', 8, 9, 3, 11, 'monday ', 2, 0, 'NONE') ,('2023-05-30', '1402-03-09', '1444-11-10', '2023-05-30', 9, 10, 3, 11, 'tuesday ', 3, 0, 'NONE') ,('2023-05-31', '1402-03-10', '1444-11-11', '2023-05-31', 10, 11, 3, 11, 'wednesday', 4, 0, 'NONE') ,('2023-06-01', '1402-03-11', '1444-11-12', '2023-06-01', 11, 12, 3, 11, 'thursday ', 5, 0, 'NONE') ,('2023-06-02', '1402-03-12', '1444-11-13', '2023-06-02', 12, 13, 3, 11, 'friday ', 6, 0, 'NONE') ,('2023-06-03', '1402-03-13', '1444-11-14', '2023-06-03', 13, 14, 3, 11, 'saturday ', 7, 0, 'NONE') ,('2023-06-04', '1402-03-14', '1444-11-15', '2023-06-04', 14, 15, 3, 11, 'sunday ', 1, 0, 'NONE') ,('2023-06-05', '1402-03-15', '1444-11-16', '2023-06-05', 15, 16, 3, 11, 'monday ', 2, 0, 'NONE') ,('2023-06-06', '1402-03-16', '1444-11-17', '2023-06-06', 16, 17, 3, 11, 'tuesday ', 3, 0, 'NONE') ,('2023-06-07', '1402-03-17', '1444-11-18', '2023-06-07', 17, 18, 3, 11, 'wednesday', 4, 0, 'NONE') ,('2023-06-08', '1402-03-18', '1444-11-19', '2023-06-08', 18, 19, 3, 11, 'thursday ', 5, 0, 'NONE') ,('2023-06-09', '1402-03-19', '1444-11-20', '2023-06-09', 19, 20, 3, 11, 'friday ', 6, 0, 'NONE') ,('2023-06-10', '1402-03-20', '1444-11-21', '2023-06-10', 20, 21, 3, 11, 'saturday ', 7, 0, 'NONE') ,('2023-06-11', '1402-03-21', '1444-11-22', '2023-06-11', 21, 22, 3, 11, 'sunday ', 1, 0, 'NONE') ,('2023-06-12', '1402-03-22', '1444-11-23', '2023-06-12', 22, 23, 3, 11, 'monday ', 2, 0, 'NONE') ,('2023-06-13', '1402-03-23', '1444-11-24', '2023-06-13', 23, 24, 3, 11, 'tuesday ', 3, 0, 'NONE') ,('2023-06-14', '1402-03-24', '1444-11-25', '2023-06-14', 24, 25, 3, 11, 'wednesday', 4, 0, 'NONE') ,('2023-06-15', '1402-03-25', '1444-11-26', '2023-06-15', 25, 26, 3, 11, 'thursday ', 5, 0, 'NONE') ,('2023-06-16', '1402-03-26', '1444-11-27', '2023-06-16', 26, 27, 3, 11, 'friday ', 6, 0, 'NONE') ,('2023-06-17', '1402-03-27', '1444-11-28', '2023-06-17', 27, 28, 3, 11, 'saturday ', 7, 0, 'NONE') ,('2023-06-18', '1402-03-28', '1444-11-29', '2023-06-18', 28, 29, 3, 11, 'sunday ', 1, 0, 'NONE') ,('2023-06-19', '1402-03-29', '1444-11-30', '2023-06-19', 29, 30, 3, 11, 'monday ', 2, 0, 'NONE') ,('2023-06-20', '1402-03-30', '1444-12-01', '2023-06-20', 30, 1, 3, 12, 'tuesday ', 3, 0, 'NONE') ,('2023-06-21', '1402-03-31', '1444-12-02', '2023-06-21', 31, 2, 3, 12, 'wednesday', 4, 0, 'NONE') ,('2023-06-22', '1402-04-01', '1444-12-03', '2023-06-22', 1, 3, 4, 12, 'thursday ', 5, 0, 'NONE') ,('2023-06-23', '1402-04-02', '1444-12-04', '2023-06-23', 2, 4, 4, 12, 'friday ', 6, 0, 'NONE') ,('2023-06-24', '1402-04-03', '1444-12-05', '2023-06-24', 3, 5, 4, 12, 'saturday ', 7, 0, 'NONE') ,('2023-06-25', '1402-04-04', '1444-12-06', '2023-06-25', 4, 6, 4, 12, 'sunday ', 1, 0, 'NONE') ,('2023-06-26', '1402-04-05', '1444-12-07', '2023-06-26', 5, 7, 4, 12, 'monday ', 2, 0, 'NONE') ,('2023-06-27', '1402-04-06', '1444-12-08', '2023-06-27', 6, 8, 4, 12, 'tuesday ', 3, 0, 'NONE') ,('2023-06-28', '1402-04-07', '1444-12-09', '2023-06-28', 7, 9, 4, 12, 'wednesday', 4, 0, 'NONE') ,('2023-06-29', '1402-04-08', '1444-12-10', '2023-06-29', 8, 10, 4, 12, 'thursday ', 5, 0, 'NONE') ,('2023-06-30', '1402-04-09', '1444-12-11', '2023-06-30', 9, 11, 4, 12, 'friday ', 6, 0, 'NONE') ,('2023-07-01', '1402-04-10', '1444-12-12', '2023-07-01', 10, 12, 4, 12, 'saturday ', 7, 0, 'NONE') ,('2023-07-02', '1402-04-11', '1444-12-13', '2023-07-02', 11, 13, 4, 12, 'sunday ', 1, 0, 'NONE') ,('2023-07-03', '1402-04-12', '1444-12-14', '2023-07-03', 12, 14, 4, 12, 'monday ', 2, 0, 'NONE') ,('2023-07-04', '1402-04-13', '1444-12-15', '2023-07-04', 13, 15, 4, 12, 'tuesday ', 3, 0, 'NONE') ,('2023-07-05', '1402-04-14', '1444-12-16', '2023-07-05', 14, 16, 4, 12, 'wednesday', 4, 0, 'NONE') ,('2023-07-06', '1402-04-15', '1444-12-17', '2023-07-06', 15, 17, 4, 12, 'thursday ', 5, 0, 'NONE') ,('2023-07-07', '1402-04-16', '1444-12-18', '2023-07-07', 16, 18, 4, 12, 'friday ', 6, 0, 'NONE') ,('2023-07-08', '1402-04-17', '1444-12-19', '2023-07-08', 17, 19, 4, 12, 'saturday ', 7, 0, 'NONE') ,('2023-07-09', '1402-04-18', '1444-12-20', '2023-07-09', 18, 20, 4, 12, 'sunday ', 1, 0, 'NONE') ,('2023-07-10', '1402-04-19', '1444-12-21', '2023-07-10', 19, 21, 4, 12, 'monday ', 2, 0, 'NONE') ,('2023-07-11', '1402-04-20', '1444-12-22', '2023-07-11', 20, 22, 4, 12, 'tuesday ', 3, 0, 'NONE') ,('2023-07-12', '1402-04-21', '1444-12-23', '2023-07-12', 21, 23, 4, 12, 'wednesday', 4, 0, 'NONE') ,('2023-07-13', '1402-04-22', '1444-12-24', '2023-07-13', 22, 24, 4, 12, 'thursday ', 5, 0, 'NONE') ,('2023-07-14', '1402-04-23', '1444-12-25', '2023-07-14', 23, 25, 4, 12, 'friday ', 6, 0, 'NONE') ,('2023-07-15', '1402-04-24', '1444-12-26', '2023-07-15', 24, 26, 4, 12, 'saturday ', 7, 0, 'NONE') ,('2023-07-16', '1402-04-25', '1444-12-27', '2023-07-16', 25, 27, 4, 12, 'sunday ', 1, 0, 'NONE') ,('2023-07-17', '1402-04-26', '1444-12-28', '2023-07-17', 26, 28, 4, 12, 'monday ', 2, 0, 'NONE') ,('2023-07-18', '1402-04-27', '1444-12-29', '2023-07-18', 27, 29, 4, 12, 'tuesday ', 3, 0, 'NONE') ,('2023-07-19', '1402-04-28', '1445-01-01', '2023-07-19', 28, 1, 4, 1, 'wednesday', 4, 0, 'NONE') ,('2023-07-20', '1402-04-29', '1445-01-02', '2023-07-20', 29, 2, 4, 1, 'thursday ', 5, 0, 'NONE') ,('2023-07-21', '1402-04-30', '1445-01-03', '2023-07-21', 30, 3, 4, 1, 'friday ', 6, 0, 'NONE') ,('2023-07-22', '1402-04-31', '1445-01-04', '2023-07-22', 31, 4, 4, 1, 'saturday ', 7, 0, 'NONE') ,('2023-07-23', '1402-05-01', '1445-01-05', '2023-07-23', 1, 5, 5, 1, 'sunday ', 1, 0, 'NONE') ,('2023-07-24', '1402-05-02', '1445-01-06', '2023-07-24', 2, 6, 5, 1, 'monday ', 2, 0, 'NONE') ,('2023-07-25', '1402-05-03', '1445-01-07', '2023-07-25', 3, 7, 5, 1, 'tuesday ', 3, 0, 'NONE') ,('2023-07-26', '1402-05-04', '1445-01-08', '2023-07-26', 4, 8, 5, 1, 'wednesday', 4, 0, 'NONE') ,('2023-07-27', '1402-05-05', '1445-01-09', '2023-07-27', 5, 9, 5, 1, 'thursday ', 5, 0, 'NONE') ,('2023-07-28', '1402-05-06', '1445-01-10', '2023-07-28', 6, 10, 5, 1, 'friday ', 6, 0, 'NONE') ,('2023-07-29', '1402-05-07', '1445-01-11', '2023-07-29', 7, 11, 5, 1, 'saturday ', 7, 0, 'NONE') ,('2023-07-30', '1402-05-08', '1445-01-12', '2023-07-30', 8, 12, 5, 1, 'sunday ', 1, 0, 'NONE') ,('2023-07-31', '1402-05-09', '1445-01-13', '2023-07-31', 9, 13, 5, 1, 'monday ', 2, 0, 'NONE') ,('2023-08-01', '1402-05-10', '1445-01-14', '2023-08-01', 10, 14, 5, 1, 'tuesday ', 3, 0, 'NONE') ,('2023-08-02', '1402-05-11', '1445-01-15', '2023-08-02', 11, 15, 5, 1, 'wednesday', 4, 0, 'NONE') ,('2023-08-03', '1402-05-12', '1445-01-16', '2023-08-03', 12, 16, 5, 1, 'thursday ', 5, 0, 'NONE') ,('2023-08-04', '1402-05-13', '1445-01-17', '2023-08-04', 13, 17, 5, 1, 'friday ', 6, 0, 'NONE') ,('2023-08-05', '1402-05-14', '1445-01-18', '2023-08-05', 14, 18, 5, 1, 'saturday ', 7, 0, 'NONE') ,('2023-08-06', '1402-05-15', '1445-01-19', '2023-08-06', 15, 19, 5, 1, 'sunday ', 1, 0, 'NONE') ,('2023-08-07', '1402-05-16', '1445-01-20', '2023-08-07', 16, 20, 5, 1, 'monday ', 2, 0, 'NONE') ,('2023-08-08', '1402-05-17', '1445-01-21', '2023-08-08', 17, 21, 5, 1, 'tuesday ', 3, 0, 'NONE') ,('2023-08-09', '1402-05-18', '1445-01-22', '2023-08-09', 18, 22, 5, 1, 'wednesday', 4, 0, 'NONE') ,('2023-08-10', '1402-05-19', '1445-01-23', '2023-08-10', 19, 23, 5, 1, 'thursday ', 5, 0, 'NONE') ,('2023-08-11', '1402-05-20', '1445-01-24', '2023-08-11', 20, 24, 5, 1, 'friday ', 6, 0, 'NONE') ,('2023-08-12', '1402-05-21', '1445-01-25', '2023-08-12', 21, 25, 5, 1, 'saturday ', 7, 0, 'NONE') ,('2023-08-13', '1402-05-22', '1445-01-26', '2023-08-13', 22, 26, 5, 1, 'sunday ', 1, 0, 'NONE') ,('2023-08-14', '1402-05-23', '1445-01-27', '2023-08-14', 23, 27, 5, 1, 'monday ', 2, 0, 'NONE') ,('2023-08-15', '1402-05-24', '1445-01-28', '2023-08-15', 24, 28, 5, 1, 'tuesday ', 3, 0, 'NONE') ,('2023-08-16', '1402-05-25', '1445-01-29', '2023-08-16', 25, 29, 5, 1, 'wednesday', 4, 0, 'NONE') ,('2023-08-17', '1402-05-26', '1445-01-30', '2023-08-17', 26, 30, 5, 1, 'thursday ', 5, 0, 'NONE') ,('2023-08-18', '1402-05-27', '1445-02-01', '2023-08-18', 27, 1, 5, 2, 'friday ', 6, 0, 'NONE') ,('2023-08-19', '1402-05-28', '1445-02-02', '2023-08-19', 28, 2, 5, 2, 'saturday ', 7, 0, 'NONE') ,('2023-08-20', '1402-05-29', '1445-02-03', '2023-08-20', 29, 3, 5, 2, 'sunday ', 1, 0, 'NONE') ,('2023-08-21', '1402-05-30', '1445-02-04', '2023-08-21', 30, 4, 5, 2, 'monday ', 2, 0, 'NONE') ,('2023-08-22', '1402-05-31', '1445-02-05', '2023-08-22', 31, 5, 5, 2, 'tuesday ', 3, 0, 'NONE') ,('2023-08-23', '1402-06-01', '1445-02-06', '2023-08-23', 1, 6, 6, 2, 'wednesday', 4, 0, 'NONE') ,('2023-08-24', '1402-06-02', '1445-02-07', '2023-08-24', 2, 7, 6, 2, 'thursday ', 5, 0, 'NONE') ,('2023-08-25', '1402-06-03', '1445-02-08', '2023-08-25', 3, 8, 6, 2, 'friday ', 6, 0, 'NONE') ,('2023-08-26', '1402-06-04', '1445-02-09', '2023-08-26', 4, 9, 6, 2, 'saturday ', 7, 0, 'NONE') ,('2023-08-27', '1402-06-05', '1445-02-10', '2023-08-27', 5, 10, 6, 2, 'sunday ', 1, 0, 'NONE') ,('2023-08-28', '1402-06-06', '1445-02-11', '2023-08-28', 6, 11, 6, 2, 'monday ', 2, 0, 'NONE') ,('2023-08-29', '1402-06-07', '1445-02-12', '2023-08-29', 7, 12, 6, 2, 'tuesday ', 3, 0, 'NONE') ,('2023-08-30', '1402-06-08', '1445-02-13', '2023-08-30', 8, 13, 6, 2, 'wednesday', 4, 0, 'NONE') ,('2023-08-31', '1402-06-09', '1445-02-14', '2023-08-31', 9, 14, 6, 2, 'thursday ', 5, 0, 'NONE') ,('2023-09-01', '1402-06-10', '1445-02-15', '2023-09-01', 10, 15, 6, 2, 'friday ', 6, 0, 'NONE') ,('2023-09-02', '1402-06-11', '1445-02-16', '2023-09-02', 11, 16, 6, 2, 'saturday ', 7, 0, 'NONE') ,('2023-09-03', '1402-06-12', '1445-02-17', '2023-09-03', 12, 17, 6, 2, 'sunday ', 1, 0, 'NONE') ,('2023-09-04', '1402-06-13', '1445-02-18', '2023-09-04', 13, 18, 6, 2, 'monday ', 2, 0, 'NONE') ,('2023-09-05', '1402-06-14', '1445-02-19', '2023-09-05', 14, 19, 6, 2, 'tuesday ', 3, 0, 'NONE') ,('2023-09-06', '1402-06-15', '1445-02-20', '2023-09-06', 15, 20, 6, 2, 'wednesday', 4, 0, 'NONE') ,('2023-09-07', '1402-06-16', '1445-02-21', '2023-09-07', 16, 21, 6, 2, 'thursday ', 5, 0, 'NONE') ,('2023-09-08', '1402-06-17', '1445-02-22', '2023-09-08', 17, 22, 6, 2, 'friday ', 6, 0, 'NONE') ,('2023-09-09', '1402-06-18', '1445-02-23', '2023-09-09', 18, 23, 6, 2, 'saturday ', 7, 0, 'NONE') ,('2023-09-10', '1402-06-19', '1445-02-24', '2023-09-10', 19, 24, 6, 2, 'sunday ', 1, 0, 'NONE') ,('2023-09-11', '1402-06-20', '1445-02-25', '2023-09-11', 20, 25, 6, 2, 'monday ', 2, 0, 'NONE') ,('2023-09-12', '1402-06-21', '1445-02-26', '2023-09-12', 21, 26, 6, 2, 'tuesday ', 3, 0, 'NONE') ,('2023-09-13', '1402-06-22', '1445-02-27', '2023-09-13', 22, 27, 6, 2, 'wednesday', 4, 0, 'NONE') ,('2023-09-14', '1402-06-23', '1445-02-28', '2023-09-14', 23, 28, 6, 2, 'thursday ', 5, 0, 'NONE') ,('2023-09-15', '1402-06-24', '1445-02-29', '2023-09-15', 24, 29, 6, 2, 'friday ', 6, 0, 'NONE') ,('2023-09-16', '1402-06-25', '1445-03-01', '2023-09-16', 25, 1, 6, 3, 'saturday ', 7, 0, 'NONE') ,('2023-09-17', '1402-06-26', '1445-03-02', '2023-09-17', 26, 2, 6, 3, 'sunday ', 1, 0, 'NONE') ,('2023-09-18', '1402-06-27', '1445-03-03', '2023-09-18', 27, 3, 6, 3, 'monday ', 2, 0, 'NONE') ,('2023-09-19', '1402-06-28', '1445-03-04', '2023-09-19', 28, 4, 6, 3, 'tuesday ', 3, 0, 'NONE') ,('2023-09-20', '1402-06-29', '1445-03-05', '2023-09-20', 29, 5, 6, 3, 'wednesday', 4, 0, 'NONE') ,('2023-09-21', '1402-06-30', '1445-03-06', '2023-09-21', 30, 6, 6, 3, 'thursday ', 5, 0, 'NONE') ,('2023-09-22', '1402-06-31', '1445-03-07', '2023-09-22', 31, 7, 6, 3, 'friday ', 6, 0, 'NONE') ,('2023-09-23', '1402-07-01', '1445-03-08', '2023-09-23', 1, 8, 7, 3, 'saturday ', 7, 0, 'NONE') ,('2023-09-24', '1402-07-02', '1445-03-09', '2023-09-24', 2, 9, 7, 3, 'sunday ', 1, 0, 'NONE') ,('2023-09-25', '1402-07-03', '1445-03-10', '2023-09-25', 3, 10, 7, 3, 'monday ', 2, 0, 'NONE') ,('2023-09-26', '1402-07-04', '1445-03-11', '2023-09-26', 4, 11, 7, 3, 'tuesday ', 3, 0, 'NONE') ,('2023-09-27', '1402-07-05', '1445-03-12', '2023-09-27', 5, 12, 7, 3, 'wednesday', 4, 0, 'NONE') ,('2023-09-28', '1402-07-06', '1445-03-13', '2023-09-28', 6, 13, 7, 3, 'thursday ', 5, 0, 'NONE') ,('2023-09-29', '1402-07-07', '1445-03-14', '2023-09-29', 7, 14, 7, 3, 'friday ', 6, 0, 'NONE') ,('2023-09-30', '1402-07-08', '1445-03-15', '2023-09-30', 8, 15, 7, 3, 'saturday ', 7, 0, 'NONE') ,('2023-10-01', '1402-07-09', '1445-03-16', '2023-10-01', 9, 16, 7, 3, 'sunday ', 1, 0, 'NONE') ,('2023-10-02', '1402-07-10', '1445-03-17', '2023-10-02', 10, 17, 7, 3, 'monday ', 2, 0, 'NONE') ,('2023-10-03', '1402-07-11', '1445-03-18', '2023-10-03', 11, 18, 7, 3, 'tuesday ', 3, 0, 'NONE') ,('2023-10-04', '1402-07-12', '1445-03-19', '2023-10-04', 12, 19, 7, 3, 'wednesday', 4, 0, 'NONE') ,('2023-10-05', '1402-07-13', '1445-03-20', '2023-10-05', 13, 20, 7, 3, 'thursday ', 5, 0, 'NONE') ,('2023-10-06', '1402-07-14', '1445-03-21', '2023-10-06', 14, 21, 7, 3, 'friday ', 6, 0, 'NONE') ,('2023-10-07', '1402-07-15', '1445-03-22', '2023-10-07', 15, 22, 7, 3, 'saturday ', 7, 0, 'NONE') ,('2023-10-08', '1402-07-16', '1445-03-23', '2023-10-08', 16, 23, 7, 3, 'sunday ', 1, 0, 'NONE') ,('2023-10-09', '1402-07-17', '1445-03-24', '2023-10-09', 17, 24, 7, 3, 'monday ', 2, 0, 'NONE') ,('2023-10-10', '1402-07-18', '1445-03-25', '2023-10-10', 18, 25, 7, 3, 'tuesday ', 3, 0, 'NONE') ,('2023-10-11', '1402-07-19', '1445-03-26', '2023-10-11', 19, 26, 7, 3, 'wednesday', 4, 0, 'NONE') ,('2023-10-12', '1402-07-20', '1445-03-27', '2023-10-12', 20, 27, 7, 3, 'thursday ', 5, 0, 'NONE') ,('2023-10-13', '1402-07-21', '1445-03-28', '2023-10-13', 21, 28, 7, 3, 'friday ', 6, 0, 'NONE') ,('2023-10-14', '1402-07-22', '1445-03-29', '2023-10-14', 22, 29, 7, 3, 'saturday ', 7, 0, 'NONE') ,('2023-10-15', '1402-07-23', '1445-03-30', '2023-10-15', 23, 30, 7, 3, 'sunday ', 1, 0, 'NONE') ,('2023-10-16', '1402-07-24', '1445-04-01', '2023-10-16', 24, 1, 7, 4, 'monday ', 2, 0, 'NONE') ,('2023-10-17', '1402-07-25', '1445-04-02', '2023-10-17', 25, 2, 7, 4, 'tuesday ', 3, 0, 'NONE') ,('2023-10-18', '1402-07-26', '1445-04-03', '2023-10-18', 26, 3, 7, 4, 'wednesday', 4, 0, 'NONE') ,('2023-10-19', '1402-07-27', '1445-04-04', '2023-10-19', 27, 4, 7, 4, 'thursday ', 5, 0, 'NONE') ,('2023-10-20', '1402-07-28', '1445-04-05', '2023-10-20', 28, 5, 7, 4, 'friday ', 6, 0, 'NONE') ,('2023-10-21', '1402-07-29', '1445-04-06', '2023-10-21', 29, 6, 7, 4, 'saturday ', 7, 0, 'NONE') ,('2023-10-22', '1402-07-30', '1445-04-07', '2023-10-22', 30, 7, 7, 4, 'sunday ', 1, 0, 'NONE') ,('2023-10-23', '1402-08-01', '1445-04-08', '2023-10-23', 1, 8, 8, 4, 'monday ', 2, 0, 'NONE') ,('2023-10-24', '1402-08-02', '1445-04-09', '2023-10-24', 2, 9, 8, 4, 'tuesday ', 3, 0, 'NONE') ,('2023-10-25', '1402-08-03', '1445-04-10', '2023-10-25', 3, 10, 8, 4, 'wednesday', 4, 0, 'NONE') ,('2023-10-26', '1402-08-04', '1445-04-11', '2023-10-26', 4, 11, 8, 4, 'thursday ', 5, 0, 'NONE') ,('2023-10-27', '1402-08-05', '1445-04-12', '2023-10-27', 5, 12, 8, 4, 'friday ', 6, 0, 'NONE') ,('2023-10-28', '1402-08-06', '1445-04-13', '2023-10-28', 6, 13, 8, 4, 'saturday ', 7, 0, 'NONE') ,('2023-10-29', '1402-08-07', '1445-04-14', '2023-10-29', 7, 14, 8, 4, 'sunday ', 1, 0, 'NONE') ,('2023-10-30', '1402-08-08', '1445-04-15', '2023-10-30', 8, 15, 8, 4, 'monday ', 2, 0, 'NONE') ,('2023-10-31', '1402-08-09', '1445-04-16', '2023-10-31', 9, 16, 8, 4, 'tuesday ', 3, 0, 'NONE') ,('2023-11-01', '1402-08-10', '1445-04-17', '2023-11-01', 10, 17, 8, 4, 'wednesday', 4, 0, 'NONE') ,('2023-11-02', '1402-08-11', '1445-04-18', '2023-11-02', 11, 18, 8, 4, 'thursday ', 5, 0, 'NONE') ,('2023-11-03', '1402-08-12', '1445-04-19', '2023-11-03', 12, 19, 8, 4, 'friday ', 6, 0, 'NONE') ,('2023-11-04', '1402-08-13', '1445-04-20', '2023-11-04', 13, 20, 8, 4, 'saturday ', 7, 0, 'NONE') ,('2023-11-05', '1402-08-14', '1445-04-21', '2023-11-05', 14, 21, 8, 4, 'sunday ', 1, 0, 'NONE') ,('2023-11-06', '1402-08-15', '1445-04-22', '2023-11-06', 15, 22, 8, 4, 'monday ', 2, 0, 'NONE') ,('2023-11-07', '1402-08-16', '1445-04-23', '2023-11-07', 16, 23, 8, 4, 'tuesday ', 3, 0, 'NONE') ,('2023-11-08', '1402-08-17', '1445-04-24', '2023-11-08', 17, 24, 8, 4, 'wednesday', 4, 0, 'NONE') ,('2023-11-09', '1402-08-18', '1445-04-25', '2023-11-09', 18, 25, 8, 4, 'thursday ', 5, 0, 'NONE') ,('2023-11-10', '1402-08-19', '1445-04-26', '2023-11-10', 19, 26, 8, 4, 'friday ', 6, 0, 'NONE') ,('2023-11-11', '1402-08-20', '1445-04-27', '2023-11-11', 20, 27, 8, 4, 'saturday ', 7, 0, 'NONE') ,('2023-11-12', '1402-08-21', '1445-04-28', '2023-11-12', 21, 28, 8, 4, 'sunday ', 1, 0, 'NONE') ,('2023-11-13', '1402-08-22', '1445-04-29', '2023-11-13', 22, 29, 8, 4, 'monday ', 2, 0, 'NONE') ,('2023-11-14', '1402-08-23', '1445-05-01', '2023-11-14', 23, 1, 8, 5, 'tuesday ', 3, 0, 'NONE') ,('2023-11-15', '1402-08-24', '1445-05-02', '2023-11-15', 24, 2, 8, 5, 'wednesday', 4, 0, 'NONE') ,('2023-11-16', '1402-08-25', '1445-05-03', '2023-11-16', 25, 3, 8, 5, 'thursday ', 5, 0, 'NONE') ,('2023-11-17', '1402-08-26', '1445-05-04', '2023-11-17', 26, 4, 8, 5, 'friday ', 6, 0, 'NONE') ,('2023-11-18', '1402-08-27', '1445-05-05', '2023-11-18', 27, 5, 8, 5, 'saturday ', 7, 0, 'NONE') ,('2023-11-19', '1402-08-28', '1445-05-06', '2023-11-19', 28, 6, 8, 5, 'sunday ', 1, 0, 'NONE') ,('2023-11-20', '1402-08-29', '1445-05-07', '2023-11-20', 29, 7, 8, 5, 'monday ', 2, 0, 'NONE') ,('2023-11-21', '1402-08-30', '1445-05-08', '2023-11-21', 30, 8, 8, 5, 'tuesday ', 3, 0, 'NONE') ,('2023-11-22', '1402-09-01', '1445-05-09', '2023-11-22', 1, 9, 9, 5, 'wednesday', 4, 0, 'NONE') ,('2023-11-23', '1402-09-02', '1445-05-10', '2023-11-23', 2, 10, 9, 5, 'thursday ', 5, 0, 'NONE') ,('2023-11-24', '1402-09-03', '1445-05-11', '2023-11-24', 3, 11, 9, 5, 'friday ', 6, 0, 'NONE') ,('2023-11-25', '1402-09-04', '1445-05-12', '2023-11-25', 4, 12, 9, 5, 'saturday ', 7, 0, 'NONE') ,('2023-11-26', '1402-09-05', '1445-05-13', '2023-11-26', 5, 13, 9, 5, 'sunday ', 1, 0, 'NONE') ,('2023-11-27', '1402-09-06', '1445-05-14', '2023-11-27', 6, 14, 9, 5, 'monday ', 2, 0, 'NONE') ,('2023-11-28', '1402-09-07', '1445-05-15', '2023-11-28', 7, 15, 9, 5, 'tuesday ', 3, 0, 'NONE') ,('2023-11-29', '1402-09-08', '1445-05-16', '2023-11-29', 8, 16, 9, 5, 'wednesday', 4, 0, 'NONE') ,('2023-11-30', '1402-09-09', '1445-05-17', '2023-11-30', 9, 17, 9, 5, 'thursday ', 5, 0, 'NONE') ,('2023-12-01', '1402-09-10', '1445-05-18', '2023-12-01', 10, 18, 9, 5, 'friday ', 6, 0, 'NONE') ,('2023-12-02', '1402-09-11', '1445-05-19', '2023-12-02', 11, 19, 9, 5, 'saturday ', 7, 0, 'NONE') ,('2023-12-03', '1402-09-12', '1445-05-20', '2023-12-03', 12, 20, 9, 5, 'sunday ', 1, 0, 'NONE') ,('2023-12-04', '1402-09-13', '1445-05-21', '2023-12-04', 13, 21, 9, 5, 'monday ', 2, 0, 'NONE') ,('2023-12-05', '1402-09-14', '1445-05-22', '2023-12-05', 14, 22, 9, 5, 'tuesday ', 3, 0, 'NONE') ,('2023-12-06', '1402-09-15', '1445-05-23', '2023-12-06', 15, 23, 9, 5, 'wednesday', 4, 0, 'NONE') ,('2023-12-07', '1402-09-16', '1445-05-24', '2023-12-07', 16, 24, 9, 5, 'thursday ', 5, 0, 'NONE') ,('2023-12-08', '1402-09-17', '1445-05-25', '2023-12-08', 17, 25, 9, 5, 'friday ', 6, 0, 'NONE') ,('2023-12-09', '1402-09-18', '1445-05-26', '2023-12-09', 18, 26, 9, 5, 'saturday ', 7, 0, 'NONE') ,('2023-12-10', '1402-09-19', '1445-05-27', '2023-12-10', 19, 27, 9, 5, 'sunday ', 1, 0, 'NONE') ,('2023-12-11', '1402-09-20', '1445-05-28', '2023-12-11', 20, 28, 9, 5, 'monday ', 2, 0, 'NONE') ,('2023-12-12', '1402-09-21', '1445-05-29', '2023-12-12', 21, 29, 9, 5, 'tuesday ', 3, 0, 'NONE') ,('2023-12-13', '1402-09-22', '1445-05-30', '2023-12-13', 22, 30, 9, 5, 'wednesday', 4, 0, 'NONE') ,('2023-12-14', '1402-09-23', '1445-06-01', '2023-12-14', 23, 1, 9, 6, 'thursday ', 5, 0, 'NONE') ,('2023-12-15', '1402-09-24', '1445-06-02', '2023-12-15', 24, 2, 9, 6, 'friday ', 6, 0, 'NONE') ,('2023-12-16', '1402-09-25', '1445-06-03', '2023-12-16', 25, 3, 9, 6, 'saturday ', 7, 0, 'NONE') ,('2023-12-17', '1402-09-26', '1445-06-04', '2023-12-17', 26, 4, 9, 6, 'sunday ', 1, 0, 'NONE') ,('2023-12-18', '1402-09-27', '1445-06-05', '2023-12-18', 27, 5, 9, 6, 'monday ', 2, 0, 'NONE') ,('2023-12-19', '1402-09-28', '1445-06-06', '2023-12-19', 28, 6, 9, 6, 'tuesday ', 3, 0, 'NONE') ,('2023-12-20', '1402-09-29', '1445-06-07', '2023-12-20', 29, 7, 9, 6, 'wednesday', 4, 0, 'NONE') ,('2023-12-21', '1402-09-30', '1445-06-08', '2023-12-21', 30, 8, 9, 6, 'thursday ', 5, 0, 'NONE') ,('2023-12-22', '1402-10-01', '1445-06-09', '2023-12-22', 1, 9, 10, 6, 'friday ', 6, 0, 'NONE') ,('2023-12-23', '1402-10-02', '1445-06-10', '2023-12-23', 2, 10, 10, 6, 'saturday ', 7, 0, 'NONE') ,('2023-12-24', '1402-10-03', '1445-06-11', '2023-12-24', 3, 11, 10, 6, 'sunday ', 1, 0, 'NONE') ,('2023-12-25', '1402-10-04', '1445-06-12', '2023-12-25', 4, 12, 10, 6, 'monday ', 2, 0, 'NONE') ,('2023-12-26', '1402-10-05', '1445-06-13', '2023-12-26', 5, 13, 10, 6, 'tuesday ', 3, 0, 'NONE') ,('2023-12-27', '1402-10-06', '1445-06-14', '2023-12-27', 6, 14, 10, 6, 'wednesday', 4, 0, 'NONE') ,('2023-12-28', '1402-10-07', '1445-06-15', '2023-12-28', 7, 15, 10, 6, 'thursday ', 5, 0, 'NONE') ,('2023-12-29', '1402-10-08', '1445-06-16', '2023-12-29', 8, 16, 10, 6, 'friday ', 6, 0, 'NONE') ,('2023-12-30', '1402-10-09', '1445-06-17', '2023-12-30', 9, 17, 10, 6, 'saturday ', 7, 0, 'NONE') ,('2023-12-31', '1402-10-10', '1445-06-18', '2023-12-31', 10, 18, 10, 6, 'sunday ', 1, 0, 'NONE') ,('2024-01-01', '1402-10-11', '1445-06-19', '2024-01-01', 11, 19, 10, 6, 'monday ', 2, 0, 'NONE') ,('2024-01-02', '1402-10-12', '1445-06-20', '2024-01-02', 12, 20, 10, 6, 'tuesday ', 3, 0, 'NONE') ,('2024-01-03', '1402-10-13', '1445-06-21', '2024-01-03', 13, 21, 10, 6, 'wednesday', 4, 0, 'NONE') ,('2024-01-04', '1402-10-14', '1445-06-22', '2024-01-04', 14, 22, 10, 6, 'thursday ', 5, 0, 'NONE') ,('2024-01-05', '1402-10-15', '1445-06-23', '2024-01-05', 15, 23, 10, 6, 'friday ', 6, 0, 'NONE') ,('2024-01-06', '1402-10-16', '1445-06-24', '2024-01-06', 16, 24, 10, 6, 'saturday ', 7, 0, 'NONE') ,('2024-01-07', '1402-10-17', '1445-06-25', '2024-01-07', 17, 25, 10, 6, 'sunday ', 1, 0, 'NONE') ,('2024-01-08', '1402-10-18', '1445-06-26', '2024-01-08', 18, 26, 10, 6, 'monday ', 2, 0, 'NONE') ,('2024-01-09', '1402-10-19', '1445-06-27', '2024-01-09', 19, 27, 10, 6, 'tuesday ', 3, 0, 'NONE') ,('2024-01-10', '1402-10-20', '1445-06-28', '2024-01-10', 20, 28, 10, 6, 'wednesday', 4, 0, 'NONE') ,('2024-01-11', '1402-10-21', '1445-06-29', '2024-01-11', 21, 29, 10, 6, 'thursday ', 5, 0, 'NONE') ,('2024-01-12', '1402-10-22', '1445-07-01', '2024-01-12', 22, 1, 10, 7, 'friday ', 6, 0, 'NONE') ,('2024-01-13', '1402-10-23', '1445-07-02', '2024-01-13', 23, 2, 10, 7, 'saturday ', 7, 0, 'NONE') ,('2024-01-14', '1402-10-24', '1445-07-03', '2024-01-14', 24, 3, 10, 7, 'sunday ', 1, 0, 'NONE') ,('2024-01-15', '1402-10-25', '1445-07-04', '2024-01-15', 25, 4, 10, 7, 'monday ', 2, 0, 'NONE') ,('2024-01-16', '1402-10-26', '1445-07-05', '2024-01-16', 26, 5, 10, 7, 'tuesday ', 3, 0, 'NONE') ,('2024-01-17', '1402-10-27', '1445-07-06', '2024-01-17', 27, 6, 10, 7, 'wednesday', 4, 0, 'NONE') ,('2024-01-18', '1402-10-28', '1445-07-07', '2024-01-18', 28, 7, 10, 7, 'thursday ', 5, 0, 'NONE') ,('2024-01-19', '1402-10-29', '1445-07-08', '2024-01-19', 29, 8, 10, 7, 'friday ', 6, 0, 'NONE') ,('2024-01-20', '1402-10-30', '1445-07-09', '2024-01-20', 30, 9, 10, 7, 'saturday ', 7, 0, 'NONE') ,('2024-01-21', '1402-11-01', '1445-07-10', '2024-01-21', 1, 10, 11, 7, 'sunday ', 1, 0, 'NONE') ,('2024-01-22', '1402-11-02', '1445-07-11', '2024-01-22', 2, 11, 11, 7, 'monday ', 2, 0, 'NONE') ,('2024-01-23', '1402-11-03', '1445-07-12', '2024-01-23', 3, 12, 11, 7, 'tuesday ', 3, 0, 'NONE') ,('2024-01-24', '1402-11-04', '1445-07-13', '2024-01-24', 4, 13, 11, 7, 'wednesday', 4, 0, 'NONE') ,('2024-01-25', '1402-11-05', '1445-07-14', '2024-01-25', 5, 14, 11, 7, 'thursday ', 5, 0, 'NONE') ,('2024-01-26', '1402-11-06', '1445-07-15', '2024-01-26', 6, 15, 11, 7, 'friday ', 6, 0, 'NONE') ,('2024-01-27', '1402-11-07', '1445-07-16', '2024-01-27', 7, 16, 11, 7, 'saturday ', 7, 0, 'NONE') ,('2024-01-28', '1402-11-08', '1445-07-17', '2024-01-28', 8, 17, 11, 7, 'sunday ', 1, 0, 'NONE') ,('2024-01-29', '1402-11-09', '1445-07-18', '2024-01-29', 9, 18, 11, 7, 'monday ', 2, 0, 'NONE') ,('2024-01-30', '1402-11-10', '1445-07-19', '2024-01-30', 10, 19, 11, 7, 'tuesday ', 3, 0, 'NONE') ,('2024-01-31', '1402-11-11', '1445-07-20', '2024-01-31', 11, 20, 11, 7, 'wednesday', 4, 0, 'NONE') ,('2024-02-01', '1402-11-12', '1445-07-21', '2024-02-01', 12, 21, 11, 7, 'thursday ', 5, 0, 'NONE') ,('2024-02-02', '1402-11-13', '1445-07-22', '2024-02-02', 13, 22, 11, 7, 'friday ', 6, 0, 'NONE') ,('2024-02-03', '1402-11-14', '1445-07-23', '2024-02-03', 14, 23, 11, 7, 'saturday ', 7, 0, 'NONE') ,('2024-02-04', '1402-11-15', '1445-07-24', '2024-02-04', 15, 24, 11, 7, 'sunday ', 1, 0, 'NONE') ,('2024-02-05', '1402-11-16', '1445-07-25', '2024-02-05', 16, 25, 11, 7, 'monday ', 2, 0, 'NONE') ,('2024-02-06', '1402-11-17', '1445-07-26', '2024-02-06', 17, 26, 11, 7, 'tuesday ', 3, 0, 'NONE') ,('2024-02-07', '1402-11-18', '1445-07-27', '2024-02-07', 18, 27, 11, 7, 'wednesday', 4, 0, 'NONE') ,('2024-02-08', '1402-11-19', '1445-07-28', '2024-02-08', 19, 28, 11, 7, 'thursday ', 5, 0, 'NONE') ,('2024-02-09', '1402-11-20', '1445-07-29', '2024-02-09', 20, 29, 11, 7, 'friday ', 6, 0, 'NONE') ,('2024-02-10', '1402-11-21', '1445-07-30', '2024-02-10', 21, 30, 11, 7, 'saturday ', 7, 0, 'NONE') ,('2024-02-11', '1402-11-22', '1445-08-01', '2024-02-11', 22, 1, 11, 8, 'sunday ', 1, 0, 'NONE') ,('2024-02-12', '1402-11-23', '1445-08-02', '2024-02-12', 23, 2, 11, 8, 'monday ', 2, 0, 'NONE') ,('2024-02-13', '1402-11-24', '1445-08-03', '2024-02-13', 24, 3, 11, 8, 'tuesday ', 3, 0, 'NONE') ,('2024-02-14', '1402-11-25', '1445-08-04', '2024-02-14', 25, 4, 11, 8, 'wednesday', 4, 0, 'NONE') ,('2024-02-15', '1402-11-26', '1445-08-05', '2024-02-15', 26, 5, 11, 8, 'thursday ', 5, 0, 'NONE') ,('2024-02-16', '1402-11-27', '1445-08-06', '2024-02-16', 27, 6, 11, 8, 'friday ', 6, 0, 'NONE') ,('2024-02-17', '1402-11-28', '1445-08-07', '2024-02-17', 28, 7, 11, 8, 'saturday ', 7, 0, 'NONE') ,('2024-02-18', '1402-11-29', '1445-08-08', '2024-02-18', 29, 8, 11, 8, 'sunday ', 1, 0, 'NONE') ,('2024-02-19', '1402-11-30', '1445-08-09', '2024-02-19', 30, 9, 11, 8, 'monday ', 2, 0, 'NONE') ,('2024-02-20', '1402-12-01', '1445-08-10', '2024-02-20', 1, 10, 12, 8, 'tuesday ', 3, 0, 'NONE') ,('2024-02-21', '1402-12-02', '1445-08-11', '2024-02-21', 2, 11, 12, 8, 'wednesday', 4, 0, 'NONE') ,('2024-02-22', '1402-12-03', '1445-08-12', '2024-02-22', 3, 12, 12, 8, 'thursday ', 5, 0, 'NONE') ,('2024-02-23', '1402-12-04', '1445-08-13', '2024-02-23', 4, 13, 12, 8, 'friday ', 6, 0, 'NONE') ,('2024-02-24', '1402-12-05', '1445-08-14', '2024-02-24', 5, 14, 12, 8, 'saturday ', 7, 0, 'NONE') ,('2024-02-25', '1402-12-06', '1445-08-15', '2024-02-25', 6, 15, 12, 8, 'sunday ', 1, 0, 'NONE') ,('2024-02-26', '1402-12-07', '1445-08-16', '2024-02-26', 7, 16, 12, 8, 'monday ', 2, 0, 'NONE') ,('2024-02-27', '1402-12-08', '1445-08-17', '2024-02-27', 8, 17, 12, 8, 'tuesday ', 3, 0, 'NONE') ,('2024-02-28', '1402-12-09', '1445-08-18', '2024-02-28', 9, 18, 12, 8, 'wednesday', 4, 0, 'NONE') ,('2024-02-29', '1402-12-10', '1445-08-19', '2024-02-29', 10, 19, 12, 8, 'thursday ', 5, 0, 'NONE') ,('2024-03-01', '1402-12-11', '1445-08-20', '2024-03-01', 11, 20, 12, 8, 'friday ', 6, 0, 'NONE') ,('2024-03-02', '1402-12-12', '1445-08-21', '2024-03-02', 12, 21, 12, 8, 'saturday ', 7, 0, 'NONE') ,('2024-03-03', '1402-12-13', '1445-08-22', '2024-03-03', 13, 22, 12, 8, 'sunday ', 1, 0, 'NONE') ,('2024-03-04', '1402-12-14', '1445-08-23', '2024-03-04', 14, 23, 12, 8, 'monday ', 2, 0, 'NONE') ,('2024-03-05', '1402-12-15', '1445-08-24', '2024-03-05', 15, 24, 12, 8, 'tuesday ', 3, 0, 'NONE') ,('2024-03-06', '1402-12-16', '1445-08-25', '2024-03-06', 16, 25, 12, 8, 'wednesday', 4, 0, 'NONE') ,('2024-03-07', '1402-12-17', '1445-08-26', '2024-03-07', 17, 26, 12, 8, 'thursday ', 5, 0, 'NONE') ,('2024-03-08', '1402-12-18', '1445-08-27', '2024-03-08', 18, 27, 12, 8, 'friday ', 6, 0, 'NONE') ,('2024-03-09', '1402-12-19', '1445-08-28', '2024-03-09', 19, 28, 12, 8, 'saturday ', 7, 0, 'NONE') ,('2024-03-10', '1402-12-20', '1445-08-29', '2024-03-10', 20, 29, 12, 8, 'sunday ', 1, 0, 'NONE') ,('2024-03-11', '1402-12-21', '1445-09-01', '2024-03-11', 21, 1, 12, 9, 'monday ', 2, 0, 'NONE') ,('2024-03-12', '1402-12-22', '1445-09-02', '2024-03-12', 22, 2, 12, 9, 'tuesday ', 3, 0, 'NONE') ,('2024-03-13', '1402-12-23', '1445-09-03', '2024-03-13', 23, 3, 12, 9, 'wednesday', 4, 0, 'NONE') ,('2024-03-14', '1402-12-24', '1445-09-04', '2024-03-14', 24, 4, 12, 9, 'thursday ', 5, 0, 'NONE') ,('2024-03-15', '1402-12-25', '1445-09-05', '2024-03-15', 25, 5, 12, 9, 'friday ', 6, 0, 'NONE') ,('2024-03-16', '1402-12-26', '1445-09-06', '2024-03-16', 26, 6, 12, 9, 'saturday ', 7, 0, 'NONE') ,('2024-03-17', '1402-12-27', '1445-09-07', '2024-03-17', 27, 7, 12, 9, 'sunday ', 1, 0, 'NONE') ,('2024-03-18', '1402-12-28', '1445-09-08', '2024-03-18', 28, 8, 12, 9, 'monday ', 2, 0, 'NONE') ,('2024-03-19', '1402-12-29', '1445-09-09', '2024-03-19', 29, 9, 12, 9, 'tuesday ', 3, 0, 'NONE') ,('2024-03-20', '1403-01-01', '1445-09-10', '2024-03-20', 1, 10, 1, 9, 'wednesday', 4, 0, 'NONE') ,('2024-03-21', '1403-01-02', '1445-09-11', '2024-03-21', 2, 11, 1, 9, 'thursday ', 5, 0, 'NONE') ,('2024-03-22', '1403-01-03', '1445-09-12', '2024-03-22', 3, 12, 1, 9, 'friday ', 6, 0, 'NONE') ,('2024-03-23', '1403-01-04', '1445-09-13', '2024-03-23', 4, 13, 1, 9, 'saturday ', 7, 0, 'NONE') ,('2024-03-24', '1403-01-05', '1445-09-14', '2024-03-24', 5, 14, 1, 9, 'sunday ', 1, 0, 'NONE') ,('2024-03-25', '1403-01-06', '1445-09-15', '2024-03-25', 6, 15, 1, 9, 'monday ', 2, 0, 'NONE') ,('2024-03-26', '1403-01-07', '1445-09-16', '2024-03-26', 7, 16, 1, 9, 'tuesday ', 3, 0, 'NONE') ,('2024-03-27', '1403-01-08', '1445-09-17', '2024-03-27', 8, 17, 1, 9, 'wednesday', 4, 0, 'NONE') ,('2024-03-28', '1403-01-09', '1445-09-18', '2024-03-28', 9, 18, 1, 9, 'thursday ', 5, 0, 'NONE') ,('2024-03-29', '1403-01-10', '1445-09-19', '2024-03-29', 10, 19, 1, 9, 'friday ', 6, 0, 'NONE') ,('2024-03-30', '1403-01-11', '1445-09-20', '2024-03-30', 11, 20, 1, 9, 'saturday ', 7, 0, 'NONE') ,('2024-03-31', '1403-01-12', '1445-09-21', '2024-03-31', 12, 21, 1, 9, 'sunday ', 1, 0, 'NONE') ,('2024-04-01', '1403-01-13', '1445-09-22', '2024-04-01', 13, 22, 1, 9, 'monday ', 2, 0, 'NONE') ,('2024-04-02', '1403-01-14', '1445-09-23', '2024-04-02', 14, 23, 1, 9, 'tuesday ', 3, 0, 'NONE') ,('2024-04-03', '1403-01-15', '1445-09-24', '2024-04-03', 15, 24, 1, 9, 'wednesday', 4, 0, 'NONE') ,('2024-04-04', '1403-01-16', '1445-09-25', '2024-04-04', 16, 25, 1, 9, 'thursday ', 5, 0, 'NONE') ,('2024-04-05', '1403-01-17', '1445-09-26', '2024-04-05', 17, 26, 1, 9, 'friday ', 6, 0, 'NONE') ,('2024-04-06', '1403-01-18', '1445-09-27', '2024-04-06', 18, 27, 1, 9, 'saturday ', 7, 0, 'NONE') ,('2024-04-07', '1403-01-19', '1445-09-28', '2024-04-07', 19, 28, 1, 9, 'sunday ', 1, 0, 'NONE') ,('2024-04-08', '1403-01-20', '1445-09-29', '2024-04-08', 20, 29, 1, 9, 'monday ', 2, 0, 'NONE') ,('2024-04-09', '1403-01-21', '1445-09-30', '2024-04-09', 21, 30, 1, 9, 'tuesday ', 3, 0, 'NONE') ,('2024-04-10', '1403-01-22', '1445-10-01', '2024-04-10', 22, 1, 1, 10, 'wednesday', 4, 0, 'NONE') ,('2024-04-11', '1403-01-23', '1445-10-02', '2024-04-11', 23, 2, 1, 10, 'thursday ', 5, 0, 'NONE') ,('2024-04-12', '1403-01-24', '1445-10-03', '2024-04-12', 24, 3, 1, 10, 'friday ', 6, 0, 'NONE') ,('2024-04-13', '1403-01-25', '1445-10-04', '2024-04-13', 25, 4, 1, 10, 'saturday ', 7, 0, 'NONE') ,('2024-04-14', '1403-01-26', '1445-10-05', '2024-04-14', 26, 5, 1, 10, 'sunday ', 1, 0, 'NONE') ,('2024-04-15', '1403-01-27', '1445-10-06', '2024-04-15', 27, 6, 1, 10, 'monday ', 2, 0, 'NONE') ,('2024-04-16', '1403-01-28', '1445-10-07', '2024-04-16', 28, 7, 1, 10, 'tuesday ', 3, 0, 'NONE') ,('2024-04-17', '1403-01-29', '1445-10-08', '2024-04-17', 29, 8, 1, 10, 'wednesday', 4, 0, 'NONE') ,('2024-04-18', '1403-01-30', '1445-10-09', '2024-04-18', 30, 9, 1, 10, 'thursday ', 5, 0, 'NONE') ,('2024-04-19', '1403-01-31', '1445-10-10', '2024-04-19', 31, 10, 1, 10, 'friday ', 6, 0, 'NONE') ,('2024-04-20', '1403-02-01', '1445-10-11', '2024-04-20', 1, 11, 2, 10, 'saturday ', 7, 0, 'NONE') ,('2024-04-21', '1403-02-02', '1445-10-12', '2024-04-21', 2, 12, 2, 10, 'sunday ', 1, 0, 'NONE') ,('2024-04-22', '1403-02-03', '1445-10-13', '2024-04-22', 3, 13, 2, 10, 'monday ', 2, 0, 'NONE') ,('2024-04-23', '1403-02-04', '1445-10-14', '2024-04-23', 4, 14, 2, 10, 'tuesday ', 3, 0, 'NONE') ,('2024-04-24', '1403-02-05', '1445-10-15', '2024-04-24', 5, 15, 2, 10, 'wednesday', 4, 0, 'NONE') ,('2024-04-25', '1403-02-06', '1445-10-16', '2024-04-25', 6, 16, 2, 10, 'thursday ', 5, 0, 'NONE') ,('2024-04-26', '1403-02-07', '1445-10-17', '2024-04-26', 7, 17, 2, 10, 'friday ', 6, 0, 'NONE') ,('2024-04-27', '1403-02-08', '1445-10-18', '2024-04-27', 8, 18, 2, 10, 'saturday ', 7, 0, 'NONE') ,('2024-04-28', '1403-02-09', '1445-10-19', '2024-04-28', 9, 19, 2, 10, 'sunday ', 1, 0, 'NONE') ,('2024-04-29', '1403-02-10', '1445-10-20', '2024-04-29', 10, 20, 2, 10, 'monday ', 2, 0, 'NONE') ,('2024-04-30', '1403-02-11', '1445-10-21', '2024-04-30', 11, 21, 2, 10, 'tuesday ', 3, 0, 'NONE') ,('2024-05-01', '1403-02-12', '1445-10-22', '2024-05-01', 12, 22, 2, 10, 'wednesday', 4, 0, 'NONE') ,('2024-05-02', '1403-02-13', '1445-10-23', '2024-05-02', 13, 23, 2, 10, 'thursday ', 5, 0, 'NONE') ,('2024-05-03', '1403-02-14', '1445-10-24', '2024-05-03', 14, 24, 2, 10, 'friday ', 6, 0, 'NONE') ,('2024-05-04', '1403-02-15', '1445-10-25', '2024-05-04', 15, 25, 2, 10, 'saturday ', 7, 0, 'NONE') ,('2024-05-05', '1403-02-16', '1445-10-26', '2024-05-05', 16, 26, 2, 10, 'sunday ', 1, 0, 'NONE') ,('2024-05-06', '1403-02-17', '1445-10-27', '2024-05-06', 17, 27, 2, 10, 'monday ', 2, 0, 'NONE') ,('2024-05-07', '1403-02-18', '1445-10-28', '2024-05-07', 18, 28, 2, 10, 'tuesday ', 3, 0, 'NONE') ,('2024-05-08', '1403-02-19', '1445-10-29', '2024-05-08', 19, 29, 2, 10, 'wednesday', 4, 0, 'NONE') ,('2024-05-09', '1403-02-20', '1445-11-01', '2024-05-09', 20, 1, 2, 11, 'thursday ', 5, 0, 'NONE') ,('2024-05-10', '1403-02-21', '1445-11-02', '2024-05-10', 21, 2, 2, 11, 'friday ', 6, 0, 'NONE') ,('2024-05-11', '1403-02-22', '1445-11-03', '2024-05-11', 22, 3, 2, 11, 'saturday ', 7, 0, 'NONE') ,('2024-05-12', '1403-02-23', '1445-11-04', '2024-05-12', 23, 4, 2, 11, 'sunday ', 1, 0, 'NONE') ,('2024-05-13', '1403-02-24', '1445-11-05', '2024-05-13', 24, 5, 2, 11, 'monday ', 2, 0, 'NONE') ,('2024-05-14', '1403-02-25', '1445-11-06', '2024-05-14', 25, 6, 2, 11, 'tuesday ', 3, 0, 'NONE') ,('2024-05-15', '1403-02-26', '1445-11-07', '2024-05-15', 26, 7, 2, 11, 'wednesday', 4, 0, 'NONE') ,('2024-05-16', '1403-02-27', '1445-11-08', '2024-05-16', 27, 8, 2, 11, 'thursday ', 5, 0, 'NONE') ,('2024-05-17', '1403-02-28', '1445-11-09', '2024-05-17', 28, 9, 2, 11, 'friday ', 6, 0, 'NONE') ,('2024-05-18', '1403-02-29', '1445-11-10', '2024-05-18', 29, 10, 2, 11, 'saturday ', 7, 0, 'NONE') ,('2024-05-19', '1403-02-30', '1445-11-11', '2024-05-19', 30, 11, 2, 11, 'sunday ', 1, 0, 'NONE') ,('2024-05-20', '1403-02-31', '1445-11-12', '2024-05-20', 31, 12, 2, 11, 'monday ', 2, 0, 'NONE') ,('2024-05-21', '1403-03-01', '1445-11-13', '2024-05-21', 1, 13, 3, 11, 'tuesday ', 3, 0, 'NONE') ,('2024-05-22', '1403-03-02', '1445-11-14', '2024-05-22', 2, 14, 3, 11, 'wednesday', 4, 0, 'NONE') ,('2024-05-23', '1403-03-03', '1445-11-15', '2024-05-23', 3, 15, 3, 11, 'thursday ', 5, 0, 'NONE') ,('2024-05-24', '1403-03-04', '1445-11-16', '2024-05-24', 4, 16, 3, 11, 'friday ', 6, 0, 'NONE') ,('2024-05-25', '1403-03-05', '1445-11-17', '2024-05-25', 5, 17, 3, 11, 'saturday ', 7, 0, 'NONE') ,('2024-05-26', '1403-03-06', '1445-11-18', '2024-05-26', 6, 18, 3, 11, 'sunday ', 1, 0, 'NONE') ,('2024-05-27', '1403-03-07', '1445-11-19', '2024-05-27', 7, 19, 3, 11, 'monday ', 2, 0, 'NONE') ,('2024-05-28', '1403-03-08', '1445-11-20', '2024-05-28', 8, 20, 3, 11, 'tuesday ', 3, 0, 'NONE') ,('2024-05-29', '1403-03-09', '1445-11-21', '2024-05-29', 9, 21, 3, 11, 'wednesday', 4, 0, 'NONE') ,('2024-05-30', '1403-03-10', '1445-11-22', '2024-05-30', 10, 22, 3, 11, 'thursday ', 5, 0, 'NONE') ,('2024-05-31', '1403-03-11', '1445-11-23', '2024-05-31', 11, 23, 3, 11, 'friday ', 6, 0, 'NONE') ,('2024-06-01', '1403-03-12', '1445-11-24', '2024-06-01', 12, 24, 3, 11, 'saturday ', 7, 0, 'NONE') ,('2024-06-02', '1403-03-13', '1445-11-25', '2024-06-02', 13, 25, 3, 11, 'sunday ', 1, 0, 'NONE') ,('2024-06-03', '1403-03-14', '1445-11-26', '2024-06-03', 14, 26, 3, 11, 'monday ', 2, 0, 'NONE') ,('2024-06-04', '1403-03-15', '1445-11-27', '2024-06-04', 15, 27, 3, 11, 'tuesday ', 3, 0, 'NONE') ,('2024-06-05', '1403-03-16', '1445-11-28', '2024-06-05', 16, 28, 3, 11, 'wednesday', 4, 0, 'NONE') ,('2024-06-06', '1403-03-17', '1445-11-29', '2024-06-06', 17, 29, 3, 11, 'thursday ', 5, 0, 'NONE') ,('2024-06-07', '1403-03-18', '1445-11-30', '2024-06-07', 18, 30, 3, 11, 'friday ', 6, 0, 'NONE') ,('2024-06-08', '1403-03-19', '1445-12-01', '2024-06-08', 19, 1, 3, 12, 'saturday ', 7, 0, 'NONE') ,('2024-06-09', '1403-03-20', '1445-12-02', '2024-06-09', 20, 2, 3, 12, 'sunday ', 1, 0, 'NONE') ,('2024-06-10', '1403-03-21', '1445-12-03', '2024-06-10', 21, 3, 3, 12, 'monday ', 2, 0, 'NONE') ,('2024-06-11', '1403-03-22', '1445-12-04', '2024-06-11', 22, 4, 3, 12, 'tuesday ', 3, 0, 'NONE') ,('2024-06-12', '1403-03-23', '1445-12-05', '2024-06-12', 23, 5, 3, 12, 'wednesday', 4, 0, 'NONE') ,('2024-06-13', '1403-03-24', '1445-12-06', '2024-06-13', 24, 6, 3, 12, 'thursday ', 5, 0, 'NONE') ,('2024-06-14', '1403-03-25', '1445-12-07', '2024-06-14', 25, 7, 3, 12, 'friday ', 6, 0, 'NONE') ,('2024-06-15', '1403-03-26', '1445-12-08', '2024-06-15', 26, 8, 3, 12, 'saturday ', 7, 0, 'NONE') ,('2024-06-16', '1403-03-27', '1445-12-09', '2024-06-16', 27, 9, 3, 12, 'sunday ', 1, 0, 'NONE') ,('2024-06-17', '1403-03-28', '1445-12-10', '2024-06-17', 28, 10, 3, 12, 'monday ', 2, 0, 'NONE') ,('2024-06-18', '1403-03-29', '1445-12-11', '2024-06-18', 29, 11, 3, 12, 'tuesday ', 3, 0, 'NONE') ,('2024-06-19', '1403-03-30', '1445-12-12', '2024-06-19', 30, 12, 3, 12, 'wednesday', 4, 0, 'NONE') ,('2024-06-20', '1403-03-31', '1445-12-13', '2024-06-20', 31, 13, 3, 12, 'thursday ', 5, 0, 'NONE') ,('2024-06-21', '1403-04-01', '1445-12-14', '2024-06-21', 1, 14, 4, 12, 'friday ', 6, 0, 'NONE') ,('2024-06-22', '1403-04-02', '1445-12-15', '2024-06-22', 2, 15, 4, 12, 'saturday ', 7, 0, 'NONE') ,('2024-06-23', '1403-04-03', '1445-12-16', '2024-06-23', 3, 16, 4, 12, 'sunday ', 1, 0, 'NONE') ,('2024-06-24', '1403-04-04', '1445-12-17', '2024-06-24', 4, 17, 4, 12, 'monday ', 2, 0, 'NONE') ,('2024-06-25', '1403-04-05', '1445-12-18', '2024-06-25', 5, 18, 4, 12, 'tuesday ', 3, 0, 'NONE') ,('2024-06-26', '1403-04-06', '1445-12-19', '2024-06-26', 6, 19, 4, 12, 'wednesday', 4, 0, 'NONE') ,('2024-06-27', '1403-04-07', '1445-12-20', '2024-06-27', 7, 20, 4, 12, 'thursday ', 5, 0, 'NONE') ,('2024-06-28', '1403-04-08', '1445-12-21', '2024-06-28', 8, 21, 4, 12, 'friday ', 6, 0, 'NONE') ,('2024-06-29', '1403-04-09', '1445-12-22', '2024-06-29', 9, 22, 4, 12, 'saturday ', 7, 0, 'NONE') ,('2024-06-30', '1403-04-10', '1445-12-23', '2024-06-30', 10, 23, 4, 12, 'sunday ', 1, 0, 'NONE') ,('2024-07-01', '1403-04-11', '1445-12-24', '2024-07-01', 11, 24, 4, 12, 'monday ', 2, 0, 'NONE') ,('2024-07-02', '1403-04-12', '1445-12-25', '2024-07-02', 12, 25, 4, 12, 'tuesday ', 3, 0, 'NONE') ,('2024-07-03', '1403-04-13', '1445-12-26', '2024-07-03', 13, 26, 4, 12, 'wednesday', 4, 0, 'NONE') ,('2024-07-04', '1403-04-14', '1445-12-27', '2024-07-04', 14, 27, 4, 12, 'thursday ', 5, 0, 'NONE') ,('2024-07-05', '1403-04-15', '1445-12-28', '2024-07-05', 15, 28, 4, 12, 'friday ', 6, 0, 'NONE') ,('2024-07-06', '1403-04-16', '1445-12-29', '2024-07-06', 16, 29, 4, 12, 'saturday ', 7, 0, 'NONE') ,('2024-07-07', '1403-04-17', '1445-12-30', '2024-07-07', 17, 30, 4, 12, 'sunday ', 1, 0, 'NONE') ,('2024-07-08', '1403-04-18', '1446-01-01', '2024-07-08', 18, 1, 4, 1, 'monday ', 2, 0, 'NONE') ,('2024-07-09', '1403-04-19', '1446-01-02', '2024-07-09', 19, 2, 4, 1, 'tuesday ', 3, 0, 'NONE') ,('2024-07-10', '1403-04-20', '1446-01-03', '2024-07-10', 20, 3, 4, 1, 'wednesday', 4, 0, 'NONE') ,('2024-07-11', '1403-04-21', '1446-01-04', '2024-07-11', 21, 4, 4, 1, 'thursday ', 5, 0, 'NONE') ,('2024-07-12', '1403-04-22', '1446-01-05', '2024-07-12', 22, 5, 4, 1, 'friday ', 6, 0, 'NONE') ,('2024-07-13', '1403-04-23', '1446-01-06', '2024-07-13', 23, 6, 4, 1, 'saturday ', 7, 0, 'NONE') ,('2024-07-14', '1403-04-24', '1446-01-07', '2024-07-14', 24, 7, 4, 1, 'sunday ', 1, 0, 'NONE') ,('2024-07-15', '1403-04-25', '1446-01-08', '2024-07-15', 25, 8, 4, 1, 'monday ', 2, 0, 'NONE') ,('2024-07-16', '1403-04-26', '1446-01-09', '2024-07-16', 26, 9, 4, 1, 'tuesday ', 3, 0, 'NONE') ,('2024-07-17', '1403-04-27', '1446-01-10', '2024-07-17', 27, 10, 4, 1, 'wednesday', 4, 0, 'NONE') ,('2024-07-18', '1403-04-28', '1446-01-11', '2024-07-18', 28, 11, 4, 1, 'thursday ', 5, 0, 'NONE') ,('2024-07-19', '1403-04-29', '1446-01-12', '2024-07-19', 29, 12, 4, 1, 'friday ', 6, 0, 'NONE') ,('2024-07-20', '1403-04-30', '1446-01-13', '2024-07-20', 30, 13, 4, 1, 'saturday ', 7, 0, 'NONE') ,('2024-07-21', '1403-04-31', '1446-01-14', '2024-07-21', 31, 14, 4, 1, 'sunday ', 1, 0, 'NONE') ,('2024-07-22', '1403-05-01', '1446-01-15', '2024-07-22', 1, 15, 5, 1, 'monday ', 2, 0, 'NONE') ,('2024-07-23', '1403-05-02', '1446-01-16', '2024-07-23', 2, 16, 5, 1, 'tuesday ', 3, 0, 'NONE') ,('2024-07-24', '1403-05-03', '1446-01-17', '2024-07-24', 3, 17, 5, 1, 'wednesday', 4, 0, 'NONE') ,('2024-07-25', '1403-05-04', '1446-01-18', '2024-07-25', 4, 18, 5, 1, 'thursday ', 5, 0, 'NONE') ,('2024-07-26', '1403-05-05', '1446-01-19', '2024-07-26', 5, 19, 5, 1, 'friday ', 6, 0, 'NONE') ,('2024-07-27', '1403-05-06', '1446-01-20', '2024-07-27', 6, 20, 5, 1, 'saturday ', 7, 0, 'NONE') ,('2024-07-28', '1403-05-07', '1446-01-21', '2024-07-28', 7, 21, 5, 1, 'sunday ', 1, 0, 'NONE') ,('2024-07-29', '1403-05-08', '1446-01-22', '2024-07-29', 8, 22, 5, 1, 'monday ', 2, 0, 'NONE') ,('2024-07-30', '1403-05-09', '1446-01-23', '2024-07-30', 9, 23, 5, 1, 'tuesday ', 3, 0, 'NONE') ,('2024-07-31', '1403-05-10', '1446-01-24', '2024-07-31', 10, 24, 5, 1, 'wednesday', 4, 0, 'NONE') ,('2024-08-01', '1403-05-11', '1446-01-25', '2024-08-01', 11, 25, 5, 1, 'thursday ', 5, 0, 'NONE') ,('2024-08-02', '1403-05-12', '1446-01-26', '2024-08-02', 12, 26, 5, 1, 'friday ', 6, 0, 'NONE') ,('2024-08-03', '1403-05-13', '1446-01-27', '2024-08-03', 13, 27, 5, 1, 'saturday ', 7, 0, 'NONE') ,('2024-08-04', '1403-05-14', '1446-01-28', '2024-08-04', 14, 28, 5, 1, 'sunday ', 1, 0, 'NONE') ,('2024-08-05', '1403-05-15', '1446-01-29', '2024-08-05', 15, 29, 5, 1, 'monday ', 2, 0, 'NONE') ,('2024-08-06', '1403-05-16', '1446-01-30', '2024-08-06', 16, 30, 5, 1, 'tuesday ', 3, 0, 'NONE') ,('2024-08-07', '1403-05-17', '1446-02-01', '2024-08-07', 17, 1, 5, 2, 'wednesday', 4, 0, 'NONE') ,('2024-08-08', '1403-05-18', '1446-02-02', '2024-08-08', 18, 2, 5, 2, 'thursday ', 5, 0, 'NONE') ,('2024-08-09', '1403-05-19', '1446-02-03', '2024-08-09', 19, 3, 5, 2, 'friday ', 6, 0, 'NONE') ,('2024-08-10', '1403-05-20', '1446-02-04', '2024-08-10', 20, 4, 5, 2, 'saturday ', 7, 0, 'NONE') ,('2024-08-11', '1403-05-21', '1446-02-05', '2024-08-11', 21, 5, 5, 2, 'sunday ', 1, 0, 'NONE') ,('2024-08-12', '1403-05-22', '1446-02-06', '2024-08-12', 22, 6, 5, 2, 'monday ', 2, 0, 'NONE') ,('2024-08-13', '1403-05-23', '1446-02-07', '2024-08-13', 23, 7, 5, 2, 'tuesday ', 3, 0, 'NONE') ,('2024-08-14', '1403-05-24', '1446-02-08', '2024-08-14', 24, 8, 5, 2, 'wednesday', 4, 0, 'NONE') ,('2024-08-15', '1403-05-25', '1446-02-09', '2024-08-15', 25, 9, 5, 2, 'thursday ', 5, 0, 'NONE') ,('2024-08-16', '1403-05-26', '1446-02-10', '2024-08-16', 26, 10, 5, 2, 'friday ', 6, 0, 'NONE') ,('2024-08-17', '1403-05-27', '1446-02-11', '2024-08-17', 27, 11, 5, 2, 'saturday ', 7, 0, 'NONE') ,('2024-08-18', '1403-05-28', '1446-02-12', '2024-08-18', 28, 12, 5, 2, 'sunday ', 1, 0, 'NONE') ,('2024-08-19', '1403-05-29', '1446-02-13', '2024-08-19', 29, 13, 5, 2, 'monday ', 2, 0, 'NONE') ,('2024-08-20', '1403-05-30', '1446-02-14', '2024-08-20', 30, 14, 5, 2, 'tuesday ', 3, 0, 'NONE') ,('2024-08-21', '1403-05-31', '1446-02-15', '2024-08-21', 31, 15, 5, 2, 'wednesday', 4, 0, 'NONE') ,('2024-08-22', '1403-06-01', '1446-02-16', '2024-08-22', 1, 16, 6, 2, 'thursday ', 5, 0, 'NONE') ,('2024-08-23', '1403-06-02', '1446-02-17', '2024-08-23', 2, 17, 6, 2, 'friday ', 6, 0, 'NONE') ,('2024-08-24', '1403-06-03', '1446-02-18', '2024-08-24', 3, 18, 6, 2, 'saturday ', 7, 0, 'NONE') ,('2024-08-25', '1403-06-04', '1446-02-19', '2024-08-25', 4, 19, 6, 2, 'sunday ', 1, 0, 'NONE') ,('2024-08-26', '1403-06-05', '1446-02-20', '2024-08-26', 5, 20, 6, 2, 'monday ', 2, 0, 'NONE') ,('2024-08-27', '1403-06-06', '1446-02-21', '2024-08-27', 6, 21, 6, 2, 'tuesday ', 3, 0, 'NONE') ,('2024-08-28', '1403-06-07', '1446-02-22', '2024-08-28', 7, 22, 6, 2, 'wednesday', 4, 0, 'NONE') ,('2024-08-29', '1403-06-08', '1446-02-23', '2024-08-29', 8, 23, 6, 2, 'thursday ', 5, 0, 'NONE') ,('2024-08-30', '1403-06-09', '1446-02-24', '2024-08-30', 9, 24, 6, 2, 'friday ', 6, 0, 'NONE') ,('2024-08-31', '1403-06-10', '1446-02-25', '2024-08-31', 10, 25, 6, 2, 'saturday ', 7, 0, 'NONE') ,('2024-09-01', '1403-06-11', '1446-02-26', '2024-09-01', 11, 26, 6, 2, 'sunday ', 1, 0, 'NONE') ,('2024-09-02', '1403-06-12', '1446-02-27', '2024-09-02', 12, 27, 6, 2, 'monday ', 2, 0, 'NONE') ,('2024-09-03', '1403-06-13', '1446-02-28', '2024-09-03', 13, 28, 6, 2, 'tuesday ', 3, 0, 'NONE') ,('2024-09-04', '1403-06-14', '1446-02-29', '2024-09-04', 14, 29, 6, 2, 'wednesday', 4, 0, 'NONE') ,('2024-09-05', '1403-06-15', '1446-03-01', '2024-09-05', 15, 1, 6, 3, 'thursday ', 5, 0, 'NONE') ,('2024-09-06', '1403-06-16', '1446-03-02', '2024-09-06', 16, 2, 6, 3, 'friday ', 6, 0, 'NONE') ,('2024-09-07', '1403-06-17', '1446-03-03', '2024-09-07', 17, 3, 6, 3, 'saturday ', 7, 0, 'NONE') ,('2024-09-08', '1403-06-18', '1446-03-04', '2024-09-08', 18, 4, 6, 3, 'sunday ', 1, 0, 'NONE') ,('2024-09-09', '1403-06-19', '1446-03-05', '2024-09-09', 19, 5, 6, 3, 'monday ', 2, 0, 'NONE') ,('2024-09-10', '1403-06-20', '1446-03-06', '2024-09-10', 20, 6, 6, 3, 'tuesday ', 3, 0, 'NONE') ,('2024-09-11', '1403-06-21', '1446-03-07', '2024-09-11', 21, 7, 6, 3, 'wednesday', 4, 0, 'NONE') ,('2024-09-12', '1403-06-22', '1446-03-08', '2024-09-12', 22, 8, 6, 3, 'thursday ', 5, 0, 'NONE') ,('2024-09-13', '1403-06-23', '1446-03-09', '2024-09-13', 23, 9, 6, 3, 'friday ', 6, 0, 'NONE') ,('2024-09-14', '1403-06-24', '1446-03-10', '2024-09-14', 24, 10, 6, 3, 'saturday ', 7, 0, 'NONE') ,('2024-09-15', '1403-06-25', '1446-03-11', '2024-09-15', 25, 11, 6, 3, 'sunday ', 1, 0, 'NONE') ,('2024-09-16', '1403-06-26', '1446-03-12', '2024-09-16', 26, 12, 6, 3, 'monday ', 2, 0, 'NONE') ,('2024-09-17', '1403-06-27', '1446-03-13', '2024-09-17', 27, 13, 6, 3, 'tuesday ', 3, 0, 'NONE') ,('2024-09-18', '1403-06-28', '1446-03-14', '2024-09-18', 28, 14, 6, 3, 'wednesday', 4, 0, 'NONE') ,('2024-09-19', '1403-06-29', '1446-03-15', '2024-09-19', 29, 15, 6, 3, 'thursday ', 5, 0, 'NONE') ,('2024-09-20', '1403-06-30', '1446-03-16', '2024-09-20', 30, 16, 6, 3, 'friday ', 6, 0, 'NONE') ,('2024-09-21', '1403-06-31', '1446-03-17', '2024-09-21', 31, 17, 6, 3, 'saturday ', 7, 0, 'NONE') ,('2024-09-22', '1403-07-01', '1446-03-18', '2024-09-22', 1, 18, 7, 3, 'sunday ', 1, 0, 'NONE') ,('2024-09-23', '1403-07-02', '1446-03-19', '2024-09-23', 2, 19, 7, 3, 'monday ', 2, 0, 'NONE') ,('2024-09-24', '1403-07-03', '1446-03-20', '2024-09-24', 3, 20, 7, 3, 'tuesday ', 3, 0, 'NONE') ,('2024-09-25', '1403-07-04', '1446-03-21', '2024-09-25', 4, 21, 7, 3, 'wednesday', 4, 0, 'NONE') ,('2024-09-26', '1403-07-05', '1446-03-22', '2024-09-26', 5, 22, 7, 3, 'thursday ', 5, 0, 'NONE') ,('2024-09-27', '1403-07-06', '1446-03-23', '2024-09-27', 6, 23, 7, 3, 'friday ', 6, 0, 'NONE') ,('2024-09-28', '1403-07-07', '1446-03-24', '2024-09-28', 7, 24, 7, 3, 'saturday ', 7, 0, 'NONE') ,('2024-09-29', '1403-07-08', '1446-03-25', '2024-09-29', 8, 25, 7, 3, 'sunday ', 1, 0, 'NONE') ,('2024-09-30', '1403-07-09', '1446-03-26', '2024-09-30', 9, 26, 7, 3, 'monday ', 2, 0, 'NONE') ,('2024-10-01', '1403-07-10', '1446-03-27', '2024-10-01', 10, 27, 7, 3, 'tuesday ', 3, 0, 'NONE') ,('2024-10-02', '1403-07-11', '1446-03-28', '2024-10-02', 11, 28, 7, 3, 'wednesday', 4, 0, 'NONE') ,('2024-10-03', '1403-07-12', '1446-03-29', '2024-10-03', 12, 29, 7, 3, 'thursday ', 5, 0, 'NONE') ,('2024-10-04', '1403-07-13', '1446-03-30', '2024-10-04', 13, 30, 7, 3, 'friday ', 6, 0, 'NONE') ,('2024-10-05', '1403-07-14', '1446-04-01', '2024-10-05', 14, 1, 7, 4, 'saturday ', 7, 0, 'NONE') ,('2024-10-06', '1403-07-15', '1446-04-02', '2024-10-06', 15, 2, 7, 4, 'sunday ', 1, 0, 'NONE') ,('2024-10-07', '1403-07-16', '1446-04-03', '2024-10-07', 16, 3, 7, 4, 'monday ', 2, 0, 'NONE') ,('2024-10-08', '1403-07-17', '1446-04-04', '2024-10-08', 17, 4, 7, 4, 'tuesday ', 3, 0, 'NONE') ,('2024-10-09', '1403-07-18', '1446-04-05', '2024-10-09', 18, 5, 7, 4, 'wednesday', 4, 0, 'NONE') ,('2024-10-10', '1403-07-19', '1446-04-06', '2024-10-10', 19, 6, 7, 4, 'thursday ', 5, 0, 'NONE') ,('2024-10-11', '1403-07-20', '1446-04-07', '2024-10-11', 20, 7, 7, 4, 'friday ', 6, 0, 'NONE') ,('2024-10-12', '1403-07-21', '1446-04-08', '2024-10-12', 21, 8, 7, 4, 'saturday ', 7, 0, 'NONE') ,('2024-10-13', '1403-07-22', '1446-04-09', '2024-10-13', 22, 9, 7, 4, 'sunday ', 1, 0, 'NONE') ,('2024-10-14', '1403-07-23', '1446-04-10', '2024-10-14', 23, 10, 7, 4, 'monday ', 2, 0, 'NONE') ,('2024-10-15', '1403-07-24', '1446-04-11', '2024-10-15', 24, 11, 7, 4, 'tuesday ', 3, 0, 'NONE') ,('2024-10-16', '1403-07-25', '1446-04-12', '2024-10-16', 25, 12, 7, 4, 'wednesday', 4, 0, 'NONE') ,('2024-10-17', '1403-07-26', '1446-04-13', '2024-10-17', 26, 13, 7, 4, 'thursday ', 5, 0, 'NONE') ,('2024-10-18', '1403-07-27', '1446-04-14', '2024-10-18', 27, 14, 7, 4, 'friday ', 6, 0, 'NONE') ,('2024-10-19', '1403-07-28', '1446-04-15', '2024-10-19', 28, 15, 7, 4, 'saturday ', 7, 0, 'NONE') ,('2024-10-20', '1403-07-29', '1446-04-16', '2024-10-20', 29, 16, 7, 4, 'sunday ', 1, 0, 'NONE') ,('2024-10-21', '1403-07-30', '1446-04-17', '2024-10-21', 30, 17, 7, 4, 'monday ', 2, 0, 'NONE') ,('2024-10-22', '1403-08-01', '1446-04-18', '2024-10-22', 1, 18, 8, 4, 'tuesday ', 3, 0, 'NONE') ,('2024-10-23', '1403-08-02', '1446-04-19', '2024-10-23', 2, 19, 8, 4, 'wednesday', 4, 0, 'NONE') ,('2024-10-24', '1403-08-03', '1446-04-20', '2024-10-24', 3, 20, 8, 4, 'thursday ', 5, 0, 'NONE') ,('2024-10-25', '1403-08-04', '1446-04-21', '2024-10-25', 4, 21, 8, 4, 'friday ', 6, 0, 'NONE') ,('2024-10-26', '1403-08-05', '1446-04-22', '2024-10-26', 5, 22, 8, 4, 'saturday ', 7, 0, 'NONE') ,('2024-10-27', '1403-08-06', '1446-04-23', '2024-10-27', 6, 23, 8, 4, 'sunday ', 1, 0, 'NONE') ,('2024-10-28', '1403-08-07', '1446-04-24', '2024-10-28', 7, 24, 8, 4, 'monday ', 2, 0, 'NONE') ,('2024-10-29', '1403-08-08', '1446-04-25', '2024-10-29', 8, 25, 8, 4, 'tuesday ', 3, 0, 'NONE') ,('2024-10-30', '1403-08-09', '1446-04-26', '2024-10-30', 9, 26, 8, 4, 'wednesday', 4, 0, 'NONE') ,('2024-10-31', '1403-08-10', '1446-04-27', '2024-10-31', 10, 27, 8, 4, 'thursday ', 5, 0, 'NONE') ,('2024-11-01', '1403-08-11', '1446-04-28', '2024-11-01', 11, 28, 8, 4, 'friday ', 6, 0, 'NONE') ,('2024-11-02', '1403-08-12', '1446-04-29', '2024-11-02', 12, 29, 8, 4, 'saturday ', 7, 0, 'NONE') ,('2024-11-03', '1403-08-13', '1446-05-01', '2024-11-03', 13, 1, 8, 5, 'sunday ', 1, 0, 'NONE') ,('2024-11-04', '1403-08-14', '1446-05-02', '2024-11-04', 14, 2, 8, 5, 'monday ', 2, 0, 'NONE') ,('2024-11-05', '1403-08-15', '1446-05-03', '2024-11-05', 15, 3, 8, 5, 'tuesday ', 3, 0, 'NONE') ,('2024-11-06', '1403-08-16', '1446-05-04', '2024-11-06', 16, 4, 8, 5, 'wednesday', 4, 0, 'NONE') ,('2024-11-07', '1403-08-17', '1446-05-05', '2024-11-07', 17, 5, 8, 5, 'thursday ', 5, 0, 'NONE') ,('2024-11-08', '1403-08-18', '1446-05-06', '2024-11-08', 18, 6, 8, 5, 'friday ', 6, 0, 'NONE') ,('2024-11-09', '1403-08-19', '1446-05-07', '2024-11-09', 19, 7, 8, 5, 'saturday ', 7, 0, 'NONE') ,('2024-11-10', '1403-08-20', '1446-05-08', '2024-11-10', 20, 8, 8, 5, 'sunday ', 1, 0, 'NONE') ,('2024-11-11', '1403-08-21', '1446-05-09', '2024-11-11', 21, 9, 8, 5, 'monday ', 2, 0, 'NONE') ,('2024-11-12', '1403-08-22', '1446-05-10', '2024-11-12', 22, 10, 8, 5, 'tuesday ', 3, 0, 'NONE') ,('2024-11-13', '1403-08-23', '1446-05-11', '2024-11-13', 23, 11, 8, 5, 'wednesday', 4, 0, 'NONE') ,('2024-11-14', '1403-08-24', '1446-05-12', '2024-11-14', 24, 12, 8, 5, 'thursday ', 5, 0, 'NONE') ,('2024-11-15', '1403-08-25', '1446-05-13', '2024-11-15', 25, 13, 8, 5, 'friday ', 6, 0, 'NONE') ,('2024-11-16', '1403-08-26', '1446-05-14', '2024-11-16', 26, 14, 8, 5, 'saturday ', 7, 0, 'NONE') ,('2024-11-17', '1403-08-27', '1446-05-15', '2024-11-17', 27, 15, 8, 5, 'sunday ', 1, 0, 'NONE') ,('2024-11-18', '1403-08-28', '1446-05-16', '2024-11-18', 28, 16, 8, 5, 'monday ', 2, 0, 'NONE') ,('2024-11-19', '1403-08-29', '1446-05-17', '2024-11-19', 29, 17, 8, 5, 'tuesday ', 3, 0, 'NONE') ,('2024-11-20', '1403-08-30', '1446-05-18', '2024-11-20', 30, 18, 8, 5, 'wednesday', 4, 0, 'NONE') ,('2024-11-21', '1403-09-01', '1446-05-19', '2024-11-21', 1, 19, 9, 5, 'thursday ', 5, 0, 'NONE') ,('2024-11-22', '1403-09-02', '1446-05-20', '2024-11-22', 2, 20, 9, 5, 'friday ', 6, 0, 'NONE') ,('2024-11-23', '1403-09-03', '1446-05-21', '2024-11-23', 3, 21, 9, 5, 'saturday ', 7, 0, 'NONE') ,('2024-11-24', '1403-09-04', '1446-05-22', '2024-11-24', 4, 22, 9, 5, 'sunday ', 1, 0, 'NONE') ,('2024-11-25', '1403-09-05', '1446-05-23', '2024-11-25', 5, 23, 9, 5, 'monday ', 2, 0, 'NONE') ,('2024-11-26', '1403-09-06', '1446-05-24', '2024-11-26', 6, 24, 9, 5, 'tuesday ', 3, 0, 'NONE') ,('2024-11-27', '1403-09-07', '1446-05-25', '2024-11-27', 7, 25, 9, 5, 'wednesday', 4, 0, 'NONE') ,('2024-11-28', '1403-09-08', '1446-05-26', '2024-11-28', 8, 26, 9, 5, 'thursday ', 5, 0, 'NONE') ,('2024-11-29', '1403-09-09', '1446-05-27', '2024-11-29', 9, 27, 9, 5, 'friday ', 6, 0, 'NONE') ,('2024-11-30', '1403-09-10', '1446-05-28', '2024-11-30', 10, 28, 9, 5, 'saturday ', 7, 0, 'NONE') ,('2024-12-01', '1403-09-11', '1446-05-29', '2024-12-01', 11, 29, 9, 5, 'sunday ', 1, 0, 'NONE') ,('2024-12-02', '1403-09-12', '1446-05-30', '2024-12-02', 12, 30, 9, 5, 'monday ', 2, 0, 'NONE') ,('2024-12-03', '1403-09-13', '1446-06-01', '2024-12-03', 13, 1, 9, 6, 'tuesday ', 3, 0, 'NONE') ,('2024-12-04', '1403-09-14', '1446-06-02', '2024-12-04', 14, 2, 9, 6, 'wednesday', 4, 0, 'NONE') ,('2024-12-05', '1403-09-15', '1446-06-03', '2024-12-05', 15, 3, 9, 6, 'thursday ', 5, 0, 'NONE') ,('2024-12-06', '1403-09-16', '1446-06-04', '2024-12-06', 16, 4, 9, 6, 'friday ', 6, 0, 'NONE') ,('2024-12-07', '1403-09-17', '1446-06-05', '2024-12-07', 17, 5, 9, 6, 'saturday ', 7, 0, 'NONE') ,('2024-12-08', '1403-09-18', '1446-06-06', '2024-12-08', 18, 6, 9, 6, 'sunday ', 1, 0, 'NONE') ,('2024-12-09', '1403-09-19', '1446-06-07', '2024-12-09', 19, 7, 9, 6, 'monday ', 2, 0, 'NONE') ,('2024-12-10', '1403-09-20', '1446-06-08', '2024-12-10', 20, 8, 9, 6, 'tuesday ', 3, 0, 'NONE') ,('2024-12-11', '1403-09-21', '1446-06-09', '2024-12-11', 21, 9, 9, 6, 'wednesday', 4, 0, 'NONE') ,('2024-12-12', '1403-09-22', '1446-06-10', '2024-12-12', 22, 10, 9, 6, 'thursday ', 5, 0, 'NONE') ,('2024-12-13', '1403-09-23', '1446-06-11', '2024-12-13', 23, 11, 9, 6, 'friday ', 6, 0, 'NONE') ,('2024-12-14', '1403-09-24', '1446-06-12', '2024-12-14', 24, 12, 9, 6, 'saturday ', 7, 0, 'NONE') ,('2024-12-15', '1403-09-25', '1446-06-13', '2024-12-15', 25, 13, 9, 6, 'sunday ', 1, 0, 'NONE') ,('2024-12-16', '1403-09-26', '1446-06-14', '2024-12-16', 26, 14, 9, 6, 'monday ', 2, 0, 'NONE') ,('2024-12-17', '1403-09-27', '1446-06-15', '2024-12-17', 27, 15, 9, 6, 'tuesday ', 3, 0, 'NONE') ,('2024-12-18', '1403-09-28', '1446-06-16', '2024-12-18', 28, 16, 9, 6, 'wednesday', 4, 0, 'NONE') ,('2024-12-19', '1403-09-29', '1446-06-17', '2024-12-19', 29, 17, 9, 6, 'thursday ', 5, 0, 'NONE') ,('2024-12-20', '1403-09-30', '1446-06-18', '2024-12-20', 30, 18, 9, 6, 'friday ', 6, 0, 'NONE') ,('2024-12-21', '1403-10-01', '1446-06-19', '2024-12-21', 1, 19, 10, 6, 'saturday ', 7, 0, 'NONE') ,('2024-12-22', '1403-10-02', '1446-06-20', '2024-12-22', 2, 20, 10, 6, 'sunday ', 1, 0, 'NONE') ,('2024-12-23', '1403-10-03', '1446-06-21', '2024-12-23', 3, 21, 10, 6, 'monday ', 2, 0, 'NONE') ,('2024-12-24', '1403-10-04', '1446-06-22', '2024-12-24', 4, 22, 10, 6, 'tuesday ', 3, 0, 'NONE') ,('2024-12-25', '1403-10-05', '1446-06-23', '2024-12-25', 5, 23, 10, 6, 'wednesday', 4, 0, 'NONE') ,('2024-12-26', '1403-10-06', '1446-06-24', '2024-12-26', 6, 24, 10, 6, 'thursday ', 5, 0, 'NONE') ,('2024-12-27', '1403-10-07', '1446-06-25', '2024-12-27', 7, 25, 10, 6, 'friday ', 6, 0, 'NONE') ,('2024-12-28', '1403-10-08', '1446-06-26', '2024-12-28', 8, 26, 10, 6, 'saturday ', 7, 0, 'NONE') ,('2024-12-29', '1403-10-09', '1446-06-27', '2024-12-29', 9, 27, 10, 6, 'sunday ', 1, 0, 'NONE') ,('2024-12-30', '1403-10-10', '1446-06-28', '2024-12-30', 10, 28, 10, 6, 'monday ', 2, 0, 'NONE') ,('2024-12-31', '1403-10-11', '1446-06-29', '2024-12-31', 11, 29, 10, 6, 'tuesday ', 3, 0, 'NONE') ,('2025-01-01', '1403-10-12', '1446-07-01', '2025-01-01', 12, 1, 10, 7, 'wednesday', 4, 0, 'NONE') ,('2025-01-02', '1403-10-13', '1446-07-02', '2025-01-02', 13, 2, 10, 7, 'thursday ', 5, 0, 'NONE') ,('2025-01-03', '1403-10-14', '1446-07-03', '2025-01-03', 14, 3, 10, 7, 'friday ', 6, 0, 'NONE') ,('2025-01-04', '1403-10-15', '1446-07-04', '2025-01-04', 15, 4, 10, 7, 'saturday ', 7, 0, 'NONE') ,('2025-01-05', '1403-10-16', '1446-07-05', '2025-01-05', 16, 5, 10, 7, 'sunday ', 1, 0, 'NONE') ,('2025-01-06', '1403-10-17', '1446-07-06', '2025-01-06', 17, 6, 10, 7, 'monday ', 2, 0, 'NONE') ,('2025-01-07', '1403-10-18', '1446-07-07', '2025-01-07', 18, 7, 10, 7, 'tuesday ', 3, 0, 'NONE') ,('2025-01-08', '1403-10-19', '1446-07-08', '2025-01-08', 19, 8, 10, 7, 'wednesday', 4, 0, 'NONE') ,('2025-01-09', '1403-10-20', '1446-07-09', '2025-01-09', 20, 9, 10, 7, 'thursday ', 5, 0, 'NONE') ,('2025-01-10', '1403-10-21', '1446-07-10', '2025-01-10', 21, 10, 10, 7, 'friday ', 6, 0, 'NONE') ,('2025-01-11', '1403-10-22', '1446-07-11', '2025-01-11', 22, 11, 10, 7, 'saturday ', 7, 0, 'NONE') ,('2025-01-12', '1403-10-23', '1446-07-12', '2025-01-12', 23, 12, 10, 7, 'sunday ', 1, 0, 'NONE') ,('2025-01-13', '1403-10-24', '1446-07-13', '2025-01-13', 24, 13, 10, 7, 'monday ', 2, 0, 'NONE') ,('2025-01-14', '1403-10-25', '1446-07-14', '2025-01-14', 25, 14, 10, 7, 'tuesday ', 3, 0, 'NONE') ,('2025-01-15', '1403-10-26', '1446-07-15', '2025-01-15', 26, 15, 10, 7, 'wednesday', 4, 0, 'NONE') ,('2025-01-16', '1403-10-27', '1446-07-16', '2025-01-16', 27, 16, 10, 7, 'thursday ', 5, 0, 'NONE') ,('2025-01-17', '1403-10-28', '1446-07-17', '2025-01-17', 28, 17, 10, 7, 'friday ', 6, 0, 'NONE') ,('2025-01-18', '1403-10-29', '1446-07-18', '2025-01-18', 29, 18, 10, 7, 'saturday ', 7, 0, 'NONE') ,('2025-01-19', '1403-10-30', '1446-07-19', '2025-01-19', 30, 19, 10, 7, 'sunday ', 1, 0, 'NONE') ,('2025-01-20', '1403-11-01', '1446-07-20', '2025-01-20', 1, 20, 11, 7, 'monday ', 2, 0, 'NONE') ,('2025-01-21', '1403-11-02', '1446-07-21', '2025-01-21', 2, 21, 11, 7, 'tuesday ', 3, 0, 'NONE') ,('2025-01-22', '1403-11-03', '1446-07-22', '2025-01-22', 3, 22, 11, 7, 'wednesday', 4, 0, 'NONE') ,('2025-01-23', '1403-11-04', '1446-07-23', '2025-01-23', 4, 23, 11, 7, 'thursday ', 5, 0, 'NONE') ,('2025-01-24', '1403-11-05', '1446-07-24', '2025-01-24', 5, 24, 11, 7, 'friday ', 6, 0, 'NONE') ,('2025-01-25', '1403-11-06', '1446-07-25', '2025-01-25', 6, 25, 11, 7, 'saturday ', 7, 0, 'NONE') ,('2025-01-26', '1403-11-07', '1446-07-26', '2025-01-26', 7, 26, 11, 7, 'sunday ', 1, 0, 'NONE') ,('2025-01-27', '1403-11-08', '1446-07-27', '2025-01-27', 8, 27, 11, 7, 'monday ', 2, 0, 'NONE') ,('2025-01-28', '1403-11-09', '1446-07-28', '2025-01-28', 9, 28, 11, 7, 'tuesday ', 3, 0, 'NONE') ,('2025-01-29', '1403-11-10', '1446-07-29', '2025-01-29', 10, 29, 11, 7, 'wednesday', 4, 0, 'NONE') ,('2025-01-30', '1403-11-11', '1446-07-30', '2025-01-30', 11, 30, 11, 7, 'thursday ', 5, 0, 'NONE') ,('2025-01-31', '1403-11-12', '1446-08-01', '2025-01-31', 12, 1, 11, 8, 'friday ', 6, 0, 'NONE') ,('2025-02-01', '1403-11-13', '1446-08-02', '2025-02-01', 13, 2, 11, 8, 'saturday ', 7, 0, 'NONE') ,('2025-02-02', '1403-11-14', '1446-08-03', '2025-02-02', 14, 3, 11, 8, 'sunday ', 1, 0, 'NONE') ,('2025-02-03', '1403-11-15', '1446-08-04', '2025-02-03', 15, 4, 11, 8, 'monday ', 2, 0, 'NONE') ,('2025-02-04', '1403-11-16', '1446-08-05', '2025-02-04', 16, 5, 11, 8, 'tuesday ', 3, 0, 'NONE') ,('2025-02-05', '1403-11-17', '1446-08-06', '2025-02-05', 17, 6, 11, 8, 'wednesday', 4, 0, 'NONE') ,('2025-02-06', '1403-11-18', '1446-08-07', '2025-02-06', 18, 7, 11, 8, 'thursday ', 5, 0, 'NONE') ,('2025-02-07', '1403-11-19', '1446-08-08', '2025-02-07', 19, 8, 11, 8, 'friday ', 6, 0, 'NONE') ,('2025-02-08', '1403-11-20', '1446-08-09', '2025-02-08', 20, 9, 11, 8, 'saturday ', 7, 0, 'NONE') ,('2025-02-09', '1403-11-21', '1446-08-10', '2025-02-09', 21, 10, 11, 8, 'sunday ', 1, 0, 'NONE') ,('2025-02-10', '1403-11-22', '1446-08-11', '2025-02-10', 22, 11, 11, 8, 'monday ', 2, 0, 'NONE') ,('2025-02-11', '1403-11-23', '1446-08-12', '2025-02-11', 23, 12, 11, 8, 'tuesday ', 3, 0, 'NONE') ,('2025-02-12', '1403-11-24', '1446-08-13', '2025-02-12', 24, 13, 11, 8, 'wednesday', 4, 0, 'NONE') ,('2025-02-13', '1403-11-25', '1446-08-14', '2025-02-13', 25, 14, 11, 8, 'thursday ', 5, 0, 'NONE') ,('2025-02-14', '1403-11-26', '1446-08-15', '2025-02-14', 26, 15, 11, 8, 'friday ', 6, 0, 'NONE') ,('2025-02-15', '1403-11-27', '1446-08-16', '2025-02-15', 27, 16, 11, 8, 'saturday ', 7, 0, 'NONE') ,('2025-02-16', '1403-11-28', '1446-08-17', '2025-02-16', 28, 17, 11, 8, 'sunday ', 1, 0, 'NONE') ,('2025-02-17', '1403-11-29', '1446-08-18', '2025-02-17', 29, 18, 11, 8, 'monday ', 2, 0, 'NONE') ,('2025-02-18', '1403-11-30', '1446-08-19', '2025-02-18', 30, 19, 11, 8, 'tuesday ', 3, 0, 'NONE') ,('2025-02-19', '1403-12-01', '1446-08-20', '2025-02-19', 1, 20, 12, 8, 'wednesday', 4, 0, 'NONE') ,('2025-02-20', '1403-12-02', '1446-08-21', '2025-02-20', 2, 21, 12, 8, 'thursday ', 5, 0, 'NONE') ,('2025-02-21', '1403-12-03', '1446-08-22', '2025-02-21', 3, 22, 12, 8, 'friday ', 6, 0, 'NONE') ,('2025-02-22', '1403-12-04', '1446-08-23', '2025-02-22', 4, 23, 12, 8, 'saturday ', 7, 0, 'NONE') ,('2025-02-23', '1403-12-05', '1446-08-24', '2025-02-23', 5, 24, 12, 8, 'sunday ', 1, 0, 'NONE') ,('2025-02-24', '1403-12-06', '1446-08-25', '2025-02-24', 6, 25, 12, 8, 'monday ', 2, 0, 'NONE') ,('2025-02-25', '1403-12-07', '1446-08-26', '2025-02-25', 7, 26, 12, 8, 'tuesday ', 3, 0, 'NONE') ,('2025-02-26', '1403-12-08', '1446-08-27', '2025-02-26', 8, 27, 12, 8, 'wednesday', 4, 0, 'NONE') ,('2025-02-27', '1403-12-09', '1446-08-28', '2025-02-27', 9, 28, 12, 8, 'thursday ', 5, 0, 'NONE') ,('2025-02-28', '1403-12-10', '1446-08-29', '2025-02-28', 10, 29, 12, 8, 'friday ', 6, 0, 'NONE') ,('2025-03-01', '1403-12-11', '1446-09-01', '2025-03-01', 11, 1, 12, 9, 'saturday ', 7, 0, 'NONE') ,('2025-03-02', '1403-12-12', '1446-09-02', '2025-03-02', 12, 2, 12, 9, 'sunday ', 1, 0, 'NONE') ,('2025-03-03', '1403-12-13', '1446-09-03', '2025-03-03', 13, 3, 12, 9, 'monday ', 2, 0, 'NONE') ,('2025-03-04', '1403-12-14', '1446-09-04', '2025-03-04', 14, 4, 12, 9, 'tuesday ', 3, 0, 'NONE') ,('2025-03-05', '1403-12-15', '1446-09-05', '2025-03-05', 15, 5, 12, 9, 'wednesday', 4, 0, 'NONE') ,('2025-03-06', '1403-12-16', '1446-09-06', '2025-03-06', 16, 6, 12, 9, 'thursday ', 5, 0, 'NONE') ,('2025-03-07', '1403-12-17', '1446-09-07', '2025-03-07', 17, 7, 12, 9, 'friday ', 6, 0, 'NONE') ,('2025-03-08', '1403-12-18', '1446-09-08', '2025-03-08', 18, 8, 12, 9, 'saturday ', 7, 0, 'NONE') ,('2025-03-09', '1403-12-19', '1446-09-09', '2025-03-09', 19, 9, 12, 9, 'sunday ', 1, 0, 'NONE') ,('2025-03-10', '1403-12-20', '1446-09-10', '2025-03-10', 20, 10, 12, 9, 'monday ', 2, 0, 'NONE') ,('2025-03-11', '1403-12-21', '1446-09-11', '2025-03-11', 21, 11, 12, 9, 'tuesday ', 3, 0, 'NONE') ,('2025-03-12', '1403-12-22', '1446-09-12', '2025-03-12', 22, 12, 12, 9, 'wednesday', 4, 0, 'NONE') ,('2025-03-13', '1403-12-23', '1446-09-13', '2025-03-13', 23, 13, 12, 9, 'thursday ', 5, 0, 'NONE') ,('2025-03-14', '1403-12-24', '1446-09-14', '2025-03-14', 24, 14, 12, 9, 'friday ', 6, 0, 'NONE') ,('2025-03-15', '1403-12-25', '1446-09-15', '2025-03-15', 25, 15, 12, 9, 'saturday ', 7, 0, 'NONE') ,('2025-03-16', '1403-12-26', '1446-09-16', '2025-03-16', 26, 16, 12, 9, 'sunday ', 1, 0, 'NONE') ,('2025-03-17', '1403-12-27', '1446-09-17', '2025-03-17', 27, 17, 12, 9, 'monday ', 2, 0, 'NONE') ,('2025-03-18', '1403-12-28', '1446-09-18', '2025-03-18', 28, 18, 12, 9, 'tuesday ', 3, 0, 'NONE') ,('2025-03-19', '1403-12-29', '1446-09-19', '2025-03-19', 29, 19, 12, 9, 'wednesday', 4, 0, 'NONE') ,('2025-03-20', '1403-12-30', '1446-09-20', '2025-03-20', 30, 20, 12, 9, 'thursday ', 5, 0, 'NONE') ,('2025-03-21', '1404-01-01', '1446-09-21', '2025-03-21', 1, 21, 1, 9, 'friday ', 6, 0, 'NONE') ,('2025-03-22', '1404-01-02', '1446-09-22', '2025-03-22', 2, 22, 1, 9, 'saturday ', 7, 0, 'NONE') ,('2025-03-23', '1404-01-03', '1446-09-23', '2025-03-23', 3, 23, 1, 9, 'sunday ', 1, 0, 'NONE') ,('2025-03-24', '1404-01-04', '1446-09-24', '2025-03-24', 4, 24, 1, 9, 'monday ', 2, 0, 'NONE') ,('2025-03-25', '1404-01-05', '1446-09-25', '2025-03-25', 5, 25, 1, 9, 'tuesday ', 3, 0, 'NONE') ,('2025-03-26', '1404-01-06', '1446-09-26', '2025-03-26', 6, 26, 1, 9, 'wednesday', 4, 0, 'NONE') ,('2025-03-27', '1404-01-07', '1446-09-27', '2025-03-27', 7, 27, 1, 9, 'thursday ', 5, 0, 'NONE') ,('2025-03-28', '1404-01-08', '1446-09-28', '2025-03-28', 8, 28, 1, 9, 'friday ', 6, 0, 'NONE') ,('2025-03-29', '1404-01-09', '1446-09-29', '2025-03-29', 9, 29, 1, 9, 'saturday ', 7, 0, 'NONE') ,('2025-03-30', '1404-01-10', '1446-09-30', '2025-03-30', 10, 30, 1, 9, 'sunday ', 1, 0, 'NONE') ,('2025-03-31', '1404-01-11', '1446-10-01', '2025-03-31', 11, 1, 1, 10, 'monday ', 2, 0, 'NONE') ,('2025-04-01', '1404-01-12', '1446-10-02', '2025-04-01', 12, 2, 1, 10, 'tuesday ', 3, 0, 'NONE') ,('2025-04-02', '1404-01-13', '1446-10-03', '2025-04-02', 13, 3, 1, 10, 'wednesday', 4, 0, 'NONE') ,('2025-04-03', '1404-01-14', '1446-10-04', '2025-04-03', 14, 4, 1, 10, 'thursday ', 5, 0, 'NONE') ,('2025-04-04', '1404-01-15', '1446-10-05', '2025-04-04', 15, 5, 1, 10, 'friday ', 6, 0, 'NONE') ,('2025-04-05', '1404-01-16', '1446-10-06', '2025-04-05', 16, 6, 1, 10, 'saturday ', 7, 0, 'NONE') ,('2025-04-06', '1404-01-17', '1446-10-07', '2025-04-06', 17, 7, 1, 10, 'sunday ', 1, 0, 'NONE') ,('2025-04-07', '1404-01-18', '1446-10-08', '2025-04-07', 18, 8, 1, 10, 'monday ', 2, 0, 'NONE') ,('2025-04-08', '1404-01-19', '1446-10-09', '2025-04-08', 19, 9, 1, 10, 'tuesday ', 3, 0, 'NONE') ,('2025-04-09', '1404-01-20', '1446-10-10', '2025-04-09', 20, 10, 1, 10, 'wednesday', 4, 0, 'NONE') ,('2025-04-10', '1404-01-21', '1446-10-11', '2025-04-10', 21, 11, 1, 10, 'thursday ', 5, 0, 'NONE') ,('2025-04-11', '1404-01-22', '1446-10-12', '2025-04-11', 22, 12, 1, 10, 'friday ', 6, 0, 'NONE') ,('2025-04-12', '1404-01-23', '1446-10-13', '2025-04-12', 23, 13, 1, 10, 'saturday ', 7, 0, 'NONE') ,('2025-04-13', '1404-01-24', '1446-10-14', '2025-04-13', 24, 14, 1, 10, 'sunday ', 1, 0, 'NONE') ,('2025-04-14', '1404-01-25', '1446-10-15', '2025-04-14', 25, 15, 1, 10, 'monday ', 2, 0, 'NONE') ,('2025-04-15', '1404-01-26', '1446-10-16', '2025-04-15', 26, 16, 1, 10, 'tuesday ', 3, 0, 'NONE') ,('2025-04-16', '1404-01-27', '1446-10-17', '2025-04-16', 27, 17, 1, 10, 'wednesday', 4, 0, 'NONE') ,('2025-04-17', '1404-01-28', '1446-10-18', '2025-04-17', 28, 18, 1, 10, 'thursday ', 5, 0, 'NONE') ,('2025-04-18', '1404-01-29', '1446-10-19', '2025-04-18', 29, 19, 1, 10, 'friday ', 6, 0, 'NONE') ,('2025-04-19', '1404-01-30', '1446-10-20', '2025-04-19', 30, 20, 1, 10, 'saturday ', 7, 0, 'NONE') ,('2025-04-20', '1404-01-31', '1446-10-21', '2025-04-20', 31, 21, 1, 10, 'sunday ', 1, 0, 'NONE') ,('2025-04-21', '1404-02-01', '1446-10-22', '2025-04-21', 1, 22, 2, 10, 'monday ', 2, 0, 'NONE') ,('2025-04-22', '1404-02-02', '1446-10-23', '2025-04-22', 2, 23, 2, 10, 'tuesday ', 3, 0, 'NONE') ,('2025-04-23', '1404-02-03', '1446-10-24', '2025-04-23', 3, 24, 2, 10, 'wednesday', 4, 0, 'NONE') ,('2025-04-24', '1404-02-04', '1446-10-25', '2025-04-24', 4, 25, 2, 10, 'thursday ', 5, 0, 'NONE') ,('2025-04-25', '1404-02-05', '1446-10-26', '2025-04-25', 5, 26, 2, 10, 'friday ', 6, 0, 'NONE') ,('2025-04-26', '1404-02-06', '1446-10-27', '2025-04-26', 6, 27, 2, 10, 'saturday ', 7, 0, 'NONE') ,('2025-04-27', '1404-02-07', '1446-10-28', '2025-04-27', 7, 28, 2, 10, 'sunday ', 1, 0, 'NONE') ,('2025-04-28', '1404-02-08', '1446-10-29', '2025-04-28', 8, 29, 2, 10, 'monday ', 2, 0, 'NONE') ,('2025-04-29', '1404-02-09', '1446-11-01', '2025-04-29', 9, 1, 2, 11, 'tuesday ', 3, 0, 'NONE') ,('2025-04-30', '1404-02-10', '1446-11-02', '2025-04-30', 10, 2, 2, 11, 'wednesday', 4, 0, 'NONE') ,('2025-05-01', '1404-02-11', '1446-11-03', '2025-05-01', 11, 3, 2, 11, 'thursday ', 5, 0, 'NONE') ,('2025-05-02', '1404-02-12', '1446-11-04', '2025-05-02', 12, 4, 2, 11, 'friday ', 6, 0, 'NONE') ,('2025-05-03', '1404-02-13', '1446-11-05', '2025-05-03', 13, 5, 2, 11, 'saturday ', 7, 0, 'NONE') ,('2025-05-04', '1404-02-14', '1446-11-06', '2025-05-04', 14, 6, 2, 11, 'sunday ', 1, 0, 'NONE') ,('2025-05-05', '1404-02-15', '1446-11-07', '2025-05-05', 15, 7, 2, 11, 'monday ', 2, 0, 'NONE') ,('2025-05-06', '1404-02-16', '1446-11-08', '2025-05-06', 16, 8, 2, 11, 'tuesday ', 3, 0, 'NONE') ,('2025-05-07', '1404-02-17', '1446-11-09', '2025-05-07', 17, 9, 2, 11, 'wednesday', 4, 0, 'NONE') ,('2025-05-08', '1404-02-18', '1446-11-10', '2025-05-08', 18, 10, 2, 11, 'thursday ', 5, 0, 'NONE') ,('2025-05-09', '1404-02-19', '1446-11-11', '2025-05-09', 19, 11, 2, 11, 'friday ', 6, 0, 'NONE') ,('2025-05-10', '1404-02-20', '1446-11-12', '2025-05-10', 20, 12, 2, 11, 'saturday ', 7, 0, 'NONE') ,('2025-05-11', '1404-02-21', '1446-11-13', '2025-05-11', 21, 13, 2, 11, 'sunday ', 1, 0, 'NONE') ,('2025-05-12', '1404-02-22', '1446-11-14', '2025-05-12', 22, 14, 2, 11, 'monday ', 2, 0, 'NONE') ,('2025-05-13', '1404-02-23', '1446-11-15', '2025-05-13', 23, 15, 2, 11, 'tuesday ', 3, 0, 'NONE') ,('2025-05-14', '1404-02-24', '1446-11-16', '2025-05-14', 24, 16, 2, 11, 'wednesday', 4, 0, 'NONE') ,('2025-05-15', '1404-02-25', '1446-11-17', '2025-05-15', 25, 17, 2, 11, 'thursday ', 5, 0, 'NONE') ,('2025-05-16', '1404-02-26', '1446-11-18', '2025-05-16', 26, 18, 2, 11, 'friday ', 6, 0, 'NONE') ,('2025-05-17', '1404-02-27', '1446-11-19', '2025-05-17', 27, 19, 2, 11, 'saturday ', 7, 0, 'NONE') ,('2025-05-18', '1404-02-28', '1446-11-20', '2025-05-18', 28, 20, 2, 11, 'sunday ', 1, 0, 'NONE') ,('2025-05-19', '1404-02-29', '1446-11-21', '2025-05-19', 29, 21, 2, 11, 'monday ', 2, 0, 'NONE') ,('2025-05-20', '1404-02-30', '1446-11-22', '2025-05-20', 30, 22, 2, 11, 'tuesday ', 3, 0, 'NONE') ,('2025-05-21', '1404-02-31', '1446-11-23', '2025-05-21', 31, 23, 2, 11, 'wednesday', 4, 0, 'NONE') ,('2025-05-22', '1404-03-01', '1446-11-24', '2025-05-22', 1, 24, 3, 11, 'thursday ', 5, 0, 'NONE') ,('2025-05-23', '1404-03-02', '1446-11-25', '2025-05-23', 2, 25, 3, 11, 'friday ', 6, 0, 'NONE') ,('2025-05-24', '1404-03-03', '1446-11-26', '2025-05-24', 3, 26, 3, 11, 'saturday ', 7, 0, 'NONE') ,('2025-05-25', '1404-03-04', '1446-11-27', '2025-05-25', 4, 27, 3, 11, 'sunday ', 1, 0, 'NONE') ,('2025-05-26', '1404-03-05', '1446-11-28', '2025-05-26', 5, 28, 3, 11, 'monday ', 2, 0, 'NONE') ,('2025-05-27', '1404-03-06', '1446-11-29', '2025-05-27', 6, 29, 3, 11, 'tuesday ', 3, 0, 'NONE') ,('2025-05-28', '1404-03-07', '1446-11-30', '2025-05-28', 7, 30, 3, 11, 'wednesday', 4, 0, 'NONE') ,('2025-05-29', '1404-03-08', '1446-12-01', '2025-05-29', 8, 1, 3, 12, 'thursday ', 5, 0, 'NONE') ,('2025-05-30', '1404-03-09', '1446-12-02', '2025-05-30', 9, 2, 3, 12, 'friday ', 6, 0, 'NONE') ,('2025-05-31', '1404-03-10', '1446-12-03', '2025-05-31', 10, 3, 3, 12, 'saturday ', 7, 0, 'NONE') ,('2025-06-01', '1404-03-11', '1446-12-04', '2025-06-01', 11, 4, 3, 12, 'sunday ', 1, 0, 'NONE') ,('2025-06-02', '1404-03-12', '1446-12-05', '2025-06-02', 12, 5, 3, 12, 'monday ', 2, 0, 'NONE') ,('2025-06-03', '1404-03-13', '1446-12-06', '2025-06-03', 13, 6, 3, 12, 'tuesday ', 3, 0, 'NONE') ,('2025-06-04', '1404-03-14', '1446-12-07', '2025-06-04', 14, 7, 3, 12, 'wednesday', 4, 0, 'NONE') ,('2025-06-05', '1404-03-15', '1446-12-08', '2025-06-05', 15, 8, 3, 12, 'thursday ', 5, 0, 'NONE') ,('2025-06-06', '1404-03-16', '1446-12-09', '2025-06-06', 16, 9, 3, 12, 'friday ', 6, 0, 'NONE') ,('2025-06-07', '1404-03-17', '1446-12-10', '2025-06-07', 17, 10, 3, 12, 'saturday ', 7, 0, 'NONE') ,('2025-06-08', '1404-03-18', '1446-12-11', '2025-06-08', 18, 11, 3, 12, 'sunday ', 1, 0, 'NONE') ,('2025-06-09', '1404-03-19', '1446-12-12', '2025-06-09', 19, 12, 3, 12, 'monday ', 2, 0, 'NONE') ,('2025-06-10', '1404-03-20', '1446-12-13', '2025-06-10', 20, 13, 3, 12, 'tuesday ', 3, 0, 'NONE') ,('2025-06-11', '1404-03-21', '1446-12-14', '2025-06-11', 21, 14, 3, 12, 'wednesday', 4, 0, 'NONE') ,('2025-06-12', '1404-03-22', '1446-12-15', '2025-06-12', 22, 15, 3, 12, 'thursday ', 5, 0, 'NONE') ,('2025-06-13', '1404-03-23', '1446-12-16', '2025-06-13', 23, 16, 3, 12, 'friday ', 6, 0, 'NONE') ,('2025-06-14', '1404-03-24', '1446-12-17', '2025-06-14', 24, 17, 3, 12, 'saturday ', 7, 0, 'NONE') ,('2025-06-15', '1404-03-25', '1446-12-18', '2025-06-15', 25, 18, 3, 12, 'sunday ', 1, 0, 'NONE') ,('2025-06-16', '1404-03-26', '1446-12-19', '2025-06-16', 26, 19, 3, 12, 'monday ', 2, 0, 'NONE') ,('2025-06-17', '1404-03-27', '1446-12-20', '2025-06-17', 27, 20, 3, 12, 'tuesday ', 3, 0, 'NONE') ,('2025-06-18', '1404-03-28', '1446-12-21', '2025-06-18', 28, 21, 3, 12, 'wednesday', 4, 0, 'NONE') ,('2025-06-19', '1404-03-29', '1446-12-22', '2025-06-19', 29, 22, 3, 12, 'thursday ', 5, 0, 'NONE') ,('2025-06-20', '1404-03-30', '1446-12-23', '2025-06-20', 30, 23, 3, 12, 'friday ', 6, 0, 'NONE') ,('2025-06-21', '1404-03-31', '1446-12-24', '2025-06-21', 31, 24, 3, 12, 'saturday ', 7, 0, 'NONE') ,('2025-06-22', '1404-04-01', '1446-12-25', '2025-06-22', 1, 25, 4, 12, 'sunday ', 1, 0, 'NONE') ,('2025-06-23', '1404-04-02', '1446-12-26', '2025-06-23', 2, 26, 4, 12, 'monday ', 2, 0, 'NONE') ,('2025-06-24', '1404-04-03', '1446-12-27', '2025-06-24', 3, 27, 4, 12, 'tuesday ', 3, 0, 'NONE') ,('2025-06-25', '1404-04-04', '1446-12-28', '2025-06-25', 4, 28, 4, 12, 'wednesday', 4, 0, 'NONE') ,('2025-06-26', '1404-04-05', '1446-12-29', '2025-06-26', 5, 29, 4, 12, 'thursday ', 5, 0, 'NONE') ,('2025-06-27', '1404-04-06', '1447-01-01', '2025-06-27', 6, 1, 4, 1, 'friday ', 6, 0, 'NONE') ,('2025-06-28', '1404-04-07', '1447-01-02', '2025-06-28', 7, 2, 4, 1, 'saturday ', 7, 0, 'NONE') ,('2025-06-29', '1404-04-08', '1447-01-03', '2025-06-29', 8, 3, 4, 1, 'sunday ', 1, 0, 'NONE') ,('2025-06-30', '1404-04-09', '1447-01-04', '2025-06-30', 9, 4, 4, 1, 'monday ', 2, 0, 'NONE') ,('2025-07-01', '1404-04-10', '1447-01-05', '2025-07-01', 10, 5, 4, 1, 'tuesday ', 3, 0, 'NONE') ,('2025-07-02', '1404-04-11', '1447-01-06', '2025-07-02', 11, 6, 4, 1, 'wednesday', 4, 0, 'NONE') ,('2025-07-03', '1404-04-12', '1447-01-07', '2025-07-03', 12, 7, 4, 1, 'thursday ', 5, 0, 'NONE') ,('2025-07-04', '1404-04-13', '1447-01-08', '2025-07-04', 13, 8, 4, 1, 'friday ', 6, 0, 'NONE') ,('2025-07-05', '1404-04-14', '1447-01-09', '2025-07-05', 14, 9, 4, 1, 'saturday ', 7, 0, 'NONE') ,('2025-07-06', '1404-04-15', '1447-01-10', '2025-07-06', 15, 10, 4, 1, 'sunday ', 1, 0, 'NONE') ,('2025-07-07', '1404-04-16', '1447-01-11', '2025-07-07', 16, 11, 4, 1, 'monday ', 2, 0, 'NONE') ,('2025-07-08', '1404-04-17', '1447-01-12', '2025-07-08', 17, 12, 4, 1, 'tuesday ', 3, 0, 'NONE') ,('2025-07-09', '1404-04-18', '1447-01-13', '2025-07-09', 18, 13, 4, 1, 'wednesday', 4, 0, 'NONE') ,('2025-07-10', '1404-04-19', '1447-01-14', '2025-07-10', 19, 14, 4, 1, 'thursday ', 5, 0, 'NONE') ,('2025-07-11', '1404-04-20', '1447-01-15', '2025-07-11', 20, 15, 4, 1, 'friday ', 6, 0, 'NONE') ,('2025-07-12', '1404-04-21', '1447-01-16', '2025-07-12', 21, 16, 4, 1, 'saturday ', 7, 0, 'NONE') ,('2025-07-13', '1404-04-22', '1447-01-17', '2025-07-13', 22, 17, 4, 1, 'sunday ', 1, 0, 'NONE') ,('2025-07-14', '1404-04-23', '1447-01-18', '2025-07-14', 23, 18, 4, 1, 'monday ', 2, 0, 'NONE') ,('2025-07-15', '1404-04-24', '1447-01-19', '2025-07-15', 24, 19, 4, 1, 'tuesday ', 3, 0, 'NONE') ,('2025-07-16', '1404-04-25', '1447-01-20', '2025-07-16', 25, 20, 4, 1, 'wednesday', 4, 0, 'NONE') ,('2025-07-17', '1404-04-26', '1447-01-21', '2025-07-17', 26, 21, 4, 1, 'thursday ', 5, 0, 'NONE') ,('2025-07-18', '1404-04-27', '1447-01-22', '2025-07-18', 27, 22, 4, 1, 'friday ', 6, 0, 'NONE') ,('2025-07-19', '1404-04-28', '1447-01-23', '2025-07-19', 28, 23, 4, 1, 'saturday ', 7, 0, 'NONE') ,('2025-07-20', '1404-04-29', '1447-01-24', '2025-07-20', 29, 24, 4, 1, 'sunday ', 1, 0, 'NONE') ,('2025-07-21', '1404-04-30', '1447-01-25', '2025-07-21', 30, 25, 4, 1, 'monday ', 2, 0, 'NONE') ,('2025-07-22', '1404-04-31', '1447-01-26', '2025-07-22', 31, 26, 4, 1, 'tuesday ', 3, 0, 'NONE') ,('2025-07-23', '1404-05-01', '1447-01-27', '2025-07-23', 1, 27, 5, 1, 'wednesday', 4, 0, 'NONE') ,('2025-07-24', '1404-05-02', '1447-01-28', '2025-07-24', 2, 28, 5, 1, 'thursday ', 5, 0, 'NONE') ,('2025-07-25', '1404-05-03', '1447-01-29', '2025-07-25', 3, 29, 5, 1, 'friday ', 6, 0, 'NONE') ,('2025-07-26', '1404-05-04', '1447-01-30', '2025-07-26', 4, 30, 5, 1, 'saturday ', 7, 0, 'NONE') ,('2025-07-27', '1404-05-05', '1447-02-01', '2025-07-27', 5, 1, 5, 2, 'sunday ', 1, 0, 'NONE') ,('2025-07-28', '1404-05-06', '1447-02-02', '2025-07-28', 6, 2, 5, 2, 'monday ', 2, 0, 'NONE') ,('2025-07-29', '1404-05-07', '1447-02-03', '2025-07-29', 7, 3, 5, 2, 'tuesday ', 3, 0, 'NONE') ,('2025-07-30', '1404-05-08', '1447-02-04', '2025-07-30', 8, 4, 5, 2, 'wednesday', 4, 0, 'NONE') ,('2025-07-31', '1404-05-09', '1447-02-05', '2025-07-31', 9, 5, 5, 2, 'thursday ', 5, 0, 'NONE') ,('2025-08-01', '1404-05-10', '1447-02-06', '2025-08-01', 10, 6, 5, 2, 'friday ', 6, 0, 'NONE') ,('2025-08-02', '1404-05-11', '1447-02-07', '2025-08-02', 11, 7, 5, 2, 'saturday ', 7, 0, 'NONE') ,('2025-08-03', '1404-05-12', '1447-02-08', '2025-08-03', 12, 8, 5, 2, 'sunday ', 1, 0, 'NONE') ,('2025-08-04', '1404-05-13', '1447-02-09', '2025-08-04', 13, 9, 5, 2, 'monday ', 2, 0, 'NONE') ,('2025-08-05', '1404-05-14', '1447-02-10', '2025-08-05', 14, 10, 5, 2, 'tuesday ', 3, 0, 'NONE') ,('2025-08-06', '1404-05-15', '1447-02-11', '2025-08-06', 15, 11, 5, 2, 'wednesday', 4, 0, 'NONE') ,('2025-08-07', '1404-05-16', '1447-02-12', '2025-08-07', 16, 12, 5, 2, 'thursday ', 5, 0, 'NONE') ,('2025-08-08', '1404-05-17', '1447-02-13', '2025-08-08', 17, 13, 5, 2, 'friday ', 6, 0, 'NONE') ,('2025-08-09', '1404-05-18', '1447-02-14', '2025-08-09', 18, 14, 5, 2, 'saturday ', 7, 0, 'NONE') ,('2025-08-10', '1404-05-19', '1447-02-15', '2025-08-10', 19, 15, 5, 2, 'sunday ', 1, 0, 'NONE') ,('2025-08-11', '1404-05-20', '1447-02-16', '2025-08-11', 20, 16, 5, 2, 'monday ', 2, 0, 'NONE') ,('2025-08-12', '1404-05-21', '1447-02-17', '2025-08-12', 21, 17, 5, 2, 'tuesday ', 3, 0, 'NONE') ,('2025-08-13', '1404-05-22', '1447-02-18', '2025-08-13', 22, 18, 5, 2, 'wednesday', 4, 0, 'NONE') ,('2025-08-14', '1404-05-23', '1447-02-19', '2025-08-14', 23, 19, 5, 2, 'thursday ', 5, 0, 'NONE') ,('2025-08-15', '1404-05-24', '1447-02-20', '2025-08-15', 24, 20, 5, 2, 'friday ', 6, 0, 'NONE') ,('2025-08-16', '1404-05-25', '1447-02-21', '2025-08-16', 25, 21, 5, 2, 'saturday ', 7, 0, 'NONE') ,('2025-08-17', '1404-05-26', '1447-02-22', '2025-08-17', 26, 22, 5, 2, 'sunday ', 1, 0, 'NONE') ,('2025-08-18', '1404-05-27', '1447-02-23', '2025-08-18', 27, 23, 5, 2, 'monday ', 2, 0, 'NONE') ,('2025-08-19', '1404-05-28', '1447-02-24', '2025-08-19', 28, 24, 5, 2, 'tuesday ', 3, 0, 'NONE') ,('2025-08-20', '1404-05-29', '1447-02-25', '2025-08-20', 29, 25, 5, 2, 'wednesday', 4, 0, 'NONE') ,('2025-08-21', '1404-05-30', '1447-02-26', '2025-08-21', 30, 26, 5, 2, 'thursday ', 5, 0, 'NONE') ,('2025-08-22', '1404-05-31', '1447-02-27', '2025-08-22', 31, 27, 5, 2, 'friday ', 6, 0, 'NONE') ,('2025-08-23', '1404-06-01', '1447-02-28', '2025-08-23', 1, 28, 6, 2, 'saturday ', 7, 0, 'NONE') ,('2025-08-24', '1404-06-02', '1447-02-29', '2025-08-24', 2, 29, 6, 2, 'sunday ', 1, 0, 'NONE') ,('2025-08-25', '1404-06-03', '1447-03-01', '2025-08-25', 3, 1, 6, 3, 'monday ', 2, 0, 'NONE') ,('2025-08-26', '1404-06-04', '1447-03-02', '2025-08-26', 4, 2, 6, 3, 'tuesday ', 3, 0, 'NONE') ,('2025-08-27', '1404-06-05', '1447-03-03', '2025-08-27', 5, 3, 6, 3, 'wednesday', 4, 0, 'NONE') ,('2025-08-28', '1404-06-06', '1447-03-04', '2025-08-28', 6, 4, 6, 3, 'thursday ', 5, 0, 'NONE') ,('2025-08-29', '1404-06-07', '1447-03-05', '2025-08-29', 7, 5, 6, 3, 'friday ', 6, 0, 'NONE') ,('2025-08-30', '1404-06-08', '1447-03-06', '2025-08-30', 8, 6, 6, 3, 'saturday ', 7, 0, 'NONE') ,('2025-08-31', '1404-06-09', '1447-03-07', '2025-08-31', 9, 7, 6, 3, 'sunday ', 1, 0, 'NONE') ,('2025-09-01', '1404-06-10', '1447-03-08', '2025-09-01', 10, 8, 6, 3, 'monday ', 2, 0, 'NONE') ,('2025-09-02', '1404-06-11', '1447-03-09', '2025-09-02', 11, 9, 6, 3, 'tuesday ', 3, 0, 'NONE') ,('2025-09-03', '1404-06-12', '1447-03-10', '2025-09-03', 12, 10, 6, 3, 'wednesday', 4, 0, 'NONE') ,('2025-09-04', '1404-06-13', '1447-03-11', '2025-09-04', 13, 11, 6, 3, 'thursday ', 5, 0, 'NONE') ,('2025-09-05', '1404-06-14', '1447-03-12', '2025-09-05', 14, 12, 6, 3, 'friday ', 6, 0, 'NONE') ,('2025-09-06', '1404-06-15', '1447-03-13', '2025-09-06', 15, 13, 6, 3, 'saturday ', 7, 0, 'NONE') ,('2025-09-07', '1404-06-16', '1447-03-14', '2025-09-07', 16, 14, 6, 3, 'sunday ', 1, 0, 'NONE') ,('2025-09-08', '1404-06-17', '1447-03-15', '2025-09-08', 17, 15, 6, 3, 'monday ', 2, 0, 'NONE') ,('2025-09-09', '1404-06-18', '1447-03-16', '2025-09-09', 18, 16, 6, 3, 'tuesday ', 3, 0, 'NONE') ,('2025-09-10', '1404-06-19', '1447-03-17', '2025-09-10', 19, 17, 6, 3, 'wednesday', 4, 0, 'NONE') ,('2025-09-11', '1404-06-20', '1447-03-18', '2025-09-11', 20, 18, 6, 3, 'thursday ', 5, 0, 'NONE') ,('2025-09-12', '1404-06-21', '1447-03-19', '2025-09-12', 21, 19, 6, 3, 'friday ', 6, 0, 'NONE') ,('2025-09-13', '1404-06-22', '1447-03-20', '2025-09-13', 22, 20, 6, 3, 'saturday ', 7, 0, 'NONE') ,('2025-09-14', '1404-06-23', '1447-03-21', '2025-09-14', 23, 21, 6, 3, 'sunday ', 1, 0, 'NONE') ,('2025-09-15', '1404-06-24', '1447-03-22', '2025-09-15', 24, 22, 6, 3, 'monday ', 2, 0, 'NONE') ,('2025-09-16', '1404-06-25', '1447-03-23', '2025-09-16', 25, 23, 6, 3, 'tuesday ', 3, 0, 'NONE') ,('2025-09-17', '1404-06-26', '1447-03-24', '2025-09-17', 26, 24, 6, 3, 'wednesday', 4, 0, 'NONE') ,('2025-09-18', '1404-06-27', '1447-03-25', '2025-09-18', 27, 25, 6, 3, 'thursday ', 5, 0, 'NONE') ,('2025-09-19', '1404-06-28', '1447-03-26', '2025-09-19', 28, 26, 6, 3, 'friday ', 6, 0, 'NONE') ,('2025-09-20', '1404-06-29', '1447-03-27', '2025-09-20', 29, 27, 6, 3, 'saturday ', 7, 0, 'NONE') ,('2025-09-21', '1404-06-30', '1447-03-28', '2025-09-21', 30, 28, 6, 3, 'sunday ', 1, 0, 'NONE') ,('2025-09-22', '1404-06-31', '1447-03-29', '2025-09-22', 31, 29, 6, 3, 'monday ', 2, 0, 'NONE') ,('2025-09-23', '1404-07-01', '1447-03-30', '2025-09-23', 1, 30, 7, 3, 'tuesday ', 3, 0, 'NONE') ,('2025-09-24', '1404-07-02', '1447-04-01', '2025-09-24', 2, 1, 7, 4, 'wednesday', 4, 0, 'NONE') ,('2025-09-25', '1404-07-03', '1447-04-02', '2025-09-25', 3, 2, 7, 4, 'thursday ', 5, 0, 'NONE') ,('2025-09-26', '1404-07-04', '1447-04-03', '2025-09-26', 4, 3, 7, 4, 'friday ', 6, 0, 'NONE') ,('2025-09-27', '1404-07-05', '1447-04-04', '2025-09-27', 5, 4, 7, 4, 'saturday ', 7, 0, 'NONE') ,('2025-09-28', '1404-07-06', '1447-04-05', '2025-09-28', 6, 5, 7, 4, 'sunday ', 1, 0, 'NONE') ,('2025-09-29', '1404-07-07', '1447-04-06', '2025-09-29', 7, 6, 7, 4, 'monday ', 2, 0, 'NONE') ,('2025-09-30', '1404-07-08', '1447-04-07', '2025-09-30', 8, 7, 7, 4, 'tuesday ', 3, 0, 'NONE') ,('2025-10-01', '1404-07-09', '1447-04-08', '2025-10-01', 9, 8, 7, 4, 'wednesday', 4, 0, 'NONE') ,('2025-10-02', '1404-07-10', '1447-04-09', '2025-10-02', 10, 9, 7, 4, 'thursday ', 5, 0, 'NONE') ,('2025-10-03', '1404-07-11', '1447-04-10', '2025-10-03', 11, 10, 7, 4, 'friday ', 6, 0, 'NONE') ,('2025-10-04', '1404-07-12', '1447-04-11', '2025-10-04', 12, 11, 7, 4, 'saturday ', 7, 0, 'NONE') ,('2025-10-05', '1404-07-13', '1447-04-12', '2025-10-05', 13, 12, 7, 4, 'sunday ', 1, 0, 'NONE') ,('2025-10-06', '1404-07-14', '1447-04-13', '2025-10-06', 14, 13, 7, 4, 'monday ', 2, 0, 'NONE') ,('2025-10-07', '1404-07-15', '1447-04-14', '2025-10-07', 15, 14, 7, 4, 'tuesday ', 3, 0, 'NONE') ,('2025-10-08', '1404-07-16', '1447-04-15', '2025-10-08', 16, 15, 7, 4, 'wednesday', 4, 0, 'NONE') ,('2025-10-09', '1404-07-17', '1447-04-16', '2025-10-09', 17, 16, 7, 4, 'thursday ', 5, 0, 'NONE') ,('2025-10-10', '1404-07-18', '1447-04-17', '2025-10-10', 18, 17, 7, 4, 'friday ', 6, 0, 'NONE') ,('2025-10-11', '1404-07-19', '1447-04-18', '2025-10-11', 19, 18, 7, 4, 'saturday ', 7, 0, 'NONE') ,('2025-10-12', '1404-07-20', '1447-04-19', '2025-10-12', 20, 19, 7, 4, 'sunday ', 1, 0, 'NONE') ,('2025-10-13', '1404-07-21', '1447-04-20', '2025-10-13', 21, 20, 7, 4, 'monday ', 2, 0, 'NONE') ,('2025-10-14', '1404-07-22', '1447-04-21', '2025-10-14', 22, 21, 7, 4, 'tuesday ', 3, 0, 'NONE') ,('2025-10-15', '1404-07-23', '1447-04-22', '2025-10-15', 23, 22, 7, 4, 'wednesday', 4, 0, 'NONE') ,('2025-10-16', '1404-07-24', '1447-04-23', '2025-10-16', 24, 23, 7, 4, 'thursday ', 5, 0, 'NONE') ,('2025-10-17', '1404-07-25', '1447-04-24', '2025-10-17', 25, 24, 7, 4, 'friday ', 6, 0, 'NONE') ,('2025-10-18', '1404-07-26', '1447-04-25', '2025-10-18', 26, 25, 7, 4, 'saturday ', 7, 0, 'NONE') ,('2025-10-19', '1404-07-27', '1447-04-26', '2025-10-19', 27, 26, 7, 4, 'sunday ', 1, 0, 'NONE') ,('2025-10-20', '1404-07-28', '1447-04-27', '2025-10-20', 28, 27, 7, 4, 'monday ', 2, 0, 'NONE') ,('2025-10-21', '1404-07-29', '1447-04-28', '2025-10-21', 29, 28, 7, 4, 'tuesday ', 3, 0, 'NONE') ,('2025-10-22', '1404-07-30', '1447-04-29', '2025-10-22', 30, 29, 7, 4, 'wednesday', 4, 0, 'NONE') ,('2025-10-23', '1404-08-01', '1447-05-01', '2025-10-23', 1, 1, 8, 5, 'thursday ', 5, 0, 'NONE') ,('2025-10-24', '1404-08-02', '1447-05-02', '2025-10-24', 2, 2, 8, 5, 'friday ', 6, 0, 'NONE') ,('2025-10-25', '1404-08-03', '1447-05-03', '2025-10-25', 3, 3, 8, 5, 'saturday ', 7, 0, 'NONE') ,('2025-10-26', '1404-08-04', '1447-05-04', '2025-10-26', 4, 4, 8, 5, 'sunday ', 1, 0, 'NONE') ,('2025-10-27', '1404-08-05', '1447-05-05', '2025-10-27', 5, 5, 8, 5, 'monday ', 2, 0, 'NONE') ,('2025-10-28', '1404-08-06', '1447-05-06', '2025-10-28', 6, 6, 8, 5, 'tuesday ', 3, 0, 'NONE') ,('2025-10-29', '1404-08-07', '1447-05-07', '2025-10-29', 7, 7, 8, 5, 'wednesday', 4, 0, 'NONE') ,('2025-10-30', '1404-08-08', '1447-05-08', '2025-10-30', 8, 8, 8, 5, 'thursday ', 5, 0, 'NONE') ,('2025-10-31', '1404-08-09', '1447-05-09', '2025-10-31', 9, 9, 8, 5, 'friday ', 6, 0, 'NONE') ,('2025-11-01', '1404-08-10', '1447-05-10', '2025-11-01', 10, 10, 8, 5, 'saturday ', 7, 0, 'NONE') ,('2025-11-02', '1404-08-11', '1447-05-11', '2025-11-02', 11, 11, 8, 5, 'sunday ', 1, 0, 'NONE') ,('2025-11-03', '1404-08-12', '1447-05-12', '2025-11-03', 12, 12, 8, 5, 'monday ', 2, 0, 'NONE') ,('2025-11-04', '1404-08-13', '1447-05-13', '2025-11-04', 13, 13, 8, 5, 'tuesday ', 3, 0, 'NONE') ,('2025-11-05', '1404-08-14', '1447-05-14', '2025-11-05', 14, 14, 8, 5, 'wednesday', 4, 0, 'NONE') ,('2025-11-06', '1404-08-15', '1447-05-15', '2025-11-06', 15, 15, 8, 5, 'thursday ', 5, 0, 'NONE') ,('2025-11-07', '1404-08-16', '1447-05-16', '2025-11-07', 16, 16, 8, 5, 'friday ', 6, 0, 'NONE') ,('2025-11-08', '1404-08-17', '1447-05-17', '2025-11-08', 17, 17, 8, 5, 'saturday ', 7, 0, 'NONE') ,('2025-11-09', '1404-08-18', '1447-05-18', '2025-11-09', 18, 18, 8, 5, 'sunday ', 1, 0, 'NONE') ,('2025-11-10', '1404-08-19', '1447-05-19', '2025-11-10', 19, 19, 8, 5, 'monday ', 2, 0, 'NONE') ,('2025-11-11', '1404-08-20', '1447-05-20', '2025-11-11', 20, 20, 8, 5, 'tuesday ', 3, 0, 'NONE') ,('2025-11-12', '1404-08-21', '1447-05-21', '2025-11-12', 21, 21, 8, 5, 'wednesday', 4, 0, 'NONE') ,('2025-11-13', '1404-08-22', '1447-05-22', '2025-11-13', 22, 22, 8, 5, 'thursday ', 5, 0, 'NONE') ,('2025-11-14', '1404-08-23', '1447-05-23', '2025-11-14', 23, 23, 8, 5, 'friday ', 6, 0, 'NONE') ,('2025-11-15', '1404-08-24', '1447-05-24', '2025-11-15', 24, 24, 8, 5, 'saturday ', 7, 0, 'NONE') ,('2025-11-16', '1404-08-25', '1447-05-25', '2025-11-16', 25, 25, 8, 5, 'sunday ', 1, 0, 'NONE') ,('2025-11-17', '1404-08-26', '1447-05-26', '2025-11-17', 26, 26, 8, 5, 'monday ', 2, 0, 'NONE') ,('2025-11-18', '1404-08-27', '1447-05-27', '2025-11-18', 27, 27, 8, 5, 'tuesday ', 3, 0, 'NONE') ,('2025-11-19', '1404-08-28', '1447-05-28', '2025-11-19', 28, 28, 8, 5, 'wednesday', 4, 0, 'NONE') ,('2025-11-20', '1404-08-29', '1447-05-29', '2025-11-20', 29, 29, 8, 5, 'thursday ', 5, 0, 'NONE') ,('2025-11-21', '1404-08-30', '1447-05-30', '2025-11-21', 30, 30, 8, 5, 'friday ', 6, 0, 'NONE') ,('2025-11-22', '1404-09-01', '1447-06-01', '2025-11-22', 1, 1, 9, 6, 'saturday ', 7, 0, 'NONE') ,('2025-11-23', '1404-09-02', '1447-06-02', '2025-11-23', 2, 2, 9, 6, 'sunday ', 1, 0, 'NONE') ,('2025-11-24', '1404-09-03', '1447-06-03', '2025-11-24', 3, 3, 9, 6, 'monday ', 2, 0, 'NONE') ,('2025-11-25', '1404-09-04', '1447-06-04', '2025-11-25', 4, 4, 9, 6, 'tuesday ', 3, 0, 'NONE') ,('2025-11-26', '1404-09-05', '1447-06-05', '2025-11-26', 5, 5, 9, 6, 'wednesday', 4, 0, 'NONE') ,('2025-11-27', '1404-09-06', '1447-06-06', '2025-11-27', 6, 6, 9, 6, 'thursday ', 5, 0, 'NONE') ,('2025-11-28', '1404-09-07', '1447-06-07', '2025-11-28', 7, 7, 9, 6, 'friday ', 6, 0, 'NONE') ,('2025-11-29', '1404-09-08', '1447-06-08', '2025-11-29', 8, 8, 9, 6, 'saturday ', 7, 0, 'NONE') ,('2025-11-30', '1404-09-09', '1447-06-09', '2025-11-30', 9, 9, 9, 6, 'sunday ', 1, 0, 'NONE') ,('2025-12-01', '1404-09-10', '1447-06-10', '2025-12-01', 10, 10, 9, 6, 'monday ', 2, 0, 'NONE') ,('2025-12-02', '1404-09-11', '1447-06-11', '2025-12-02', 11, 11, 9, 6, 'tuesday ', 3, 0, 'NONE') ,('2025-12-03', '1404-09-12', '1447-06-12', '2025-12-03', 12, 12, 9, 6, 'wednesday', 4, 0, 'NONE') ,('2025-12-04', '1404-09-13', '1447-06-13', '2025-12-04', 13, 13, 9, 6, 'thursday ', 5, 0, 'NONE') ,('2025-12-05', '1404-09-14', '1447-06-14', '2025-12-05', 14, 14, 9, 6, 'friday ', 6, 0, 'NONE') ,('2025-12-06', '1404-09-15', '1447-06-15', '2025-12-06', 15, 15, 9, 6, 'saturday ', 7, 0, 'NONE') ,('2025-12-07', '1404-09-16', '1447-06-16', '2025-12-07', 16, 16, 9, 6, 'sunday ', 1, 0, 'NONE') ,('2025-12-08', '1404-09-17', '1447-06-17', '2025-12-08', 17, 17, 9, 6, 'monday ', 2, 0, 'NONE') ,('2025-12-09', '1404-09-18', '1447-06-18', '2025-12-09', 18, 18, 9, 6, 'tuesday ', 3, 0, 'NONE') ,('2025-12-10', '1404-09-19', '1447-06-19', '2025-12-10', 19, 19, 9, 6, 'wednesday', 4, 0, 'NONE') ,('2025-12-11', '1404-09-20', '1447-06-20', '2025-12-11', 20, 20, 9, 6, 'thursday ', 5, 0, 'NONE') ,('2025-12-12', '1404-09-21', '1447-06-21', '2025-12-12', 21, 21, 9, 6, 'friday ', 6, 0, 'NONE') ,('2025-12-13', '1404-09-22', '1447-06-22', '2025-12-13', 22, 22, 9, 6, 'saturday ', 7, 0, 'NONE') ,('2025-12-14', '1404-09-23', '1447-06-23', '2025-12-14', 23, 23, 9, 6, 'sunday ', 1, 0, 'NONE') ,('2025-12-15', '1404-09-24', '1447-06-24', '2025-12-15', 24, 24, 9, 6, 'monday ', 2, 0, 'NONE') ,('2025-12-16', '1404-09-25', '1447-06-25', '2025-12-16', 25, 25, 9, 6, 'tuesday ', 3, 0, 'NONE') ,('2025-12-17', '1404-09-26', '1447-06-26', '2025-12-17', 26, 26, 9, 6, 'wednesday', 4, 0, 'NONE') ,('2025-12-18', '1404-09-27', '1447-06-27', '2025-12-18', 27, 27, 9, 6, 'thursday ', 5, 0, 'NONE') ,('2025-12-19', '1404-09-28', '1447-06-28', '2025-12-19', 28, 28, 9, 6, 'friday ', 6, 0, 'NONE') ,('2025-12-20', '1404-09-29', '1447-06-29', '2025-12-20', 29, 29, 9, 6, 'saturday ', 7, 0, 'NONE') ,('2025-12-21', '1404-09-30', '1447-07-01', '2025-12-21', 30, 1, 9, 7, 'sunday ', 1, 0, 'NONE') ,('2025-12-22', '1404-10-01', '1447-07-02', '2025-12-22', 1, 2, 10, 7, 'monday ', 2, 0, 'NONE') ,('2025-12-23', '1404-10-02', '1447-07-03', '2025-12-23', 2, 3, 10, 7, 'tuesday ', 3, 0, 'NONE') ,('2025-12-24', '1404-10-03', '1447-07-04', '2025-12-24', 3, 4, 10, 7, 'wednesday', 4, 0, 'NONE') ,('2025-12-25', '1404-10-04', '1447-07-05', '2025-12-25', 4, 5, 10, 7, 'thursday ', 5, 0, 'NONE') ,('2025-12-26', '1404-10-05', '1447-07-06', '2025-12-26', 5, 6, 10, 7, 'friday ', 6, 0, 'NONE') ,('2025-12-27', '1404-10-06', '1447-07-07', '2025-12-27', 6, 7, 10, 7, 'saturday ', 7, 0, 'NONE') ,('2025-12-28', '1404-10-07', '1447-07-08', '2025-12-28', 7, 8, 10, 7, 'sunday ', 1, 0, 'NONE') ,('2025-12-29', '1404-10-08', '1447-07-09', '2025-12-29', 8, 9, 10, 7, 'monday ', 2, 0, 'NONE') ,('2025-12-30', '1404-10-09', '1447-07-10', '2025-12-30', 9, 10, 10, 7, 'tuesday ', 3, 0, 'NONE') ,('2025-12-31', '1404-10-10', '1447-07-11', '2025-12-31', 10, 11, 10, 7, 'wednesday', 4, 0, 'NONE') ,('2026-01-01', '1404-10-11', '1447-07-12', '2026-01-01', 11, 12, 10, 7, 'thursday ', 5, 0, 'NONE') ,('2026-01-02', '1404-10-12', '1447-07-13', '2026-01-02', 12, 13, 10, 7, 'friday ', 6, 0, 'NONE') ,('2026-01-03', '1404-10-13', '1447-07-14', '2026-01-03', 13, 14, 10, 7, 'saturday ', 7, 0, 'NONE') ,('2026-01-04', '1404-10-14', '1447-07-15', '2026-01-04', 14, 15, 10, 7, 'sunday ', 1, 0, 'NONE') ,('2026-01-05', '1404-10-15', '1447-07-16', '2026-01-05', 15, 16, 10, 7, 'monday ', 2, 0, 'NONE') ,('2026-01-06', '1404-10-16', '1447-07-17', '2026-01-06', 16, 17, 10, 7, 'tuesday ', 3, 0, 'NONE') ,('2026-01-07', '1404-10-17', '1447-07-18', '2026-01-07', 17, 18, 10, 7, 'wednesday', 4, 0, 'NONE') ,('2026-01-08', '1404-10-18', '1447-07-19', '2026-01-08', 18, 19, 10, 7, 'thursday ', 5, 0, 'NONE') ,('2026-01-09', '1404-10-19', '1447-07-20', '2026-01-09', 19, 20, 10, 7, 'friday ', 6, 0, 'NONE') ,('2026-01-10', '1404-10-20', '1447-07-21', '2026-01-10', 20, 21, 10, 7, 'saturday ', 7, 0, 'NONE') ,('2026-01-11', '1404-10-21', '1447-07-22', '2026-01-11', 21, 22, 10, 7, 'sunday ', 1, 0, 'NONE') ,('2026-01-12', '1404-10-22', '1447-07-23', '2026-01-12', 22, 23, 10, 7, 'monday ', 2, 0, 'NONE') ,('2026-01-13', '1404-10-23', '1447-07-24', '2026-01-13', 23, 24, 10, 7, 'tuesday ', 3, 0, 'NONE') ,('2026-01-14', '1404-10-24', '1447-07-25', '2026-01-14', 24, 25, 10, 7, 'wednesday', 4, 0, 'NONE') ,('2026-01-15', '1404-10-25', '1447-07-26', '2026-01-15', 25, 26, 10, 7, 'thursday ', 5, 0, 'NONE') ,('2026-01-16', '1404-10-26', '1447-07-27', '2026-01-16', 26, 27, 10, 7, 'friday ', 6, 0, 'NONE') ,('2026-01-17', '1404-10-27', '1447-07-28', '2026-01-17', 27, 28, 10, 7, 'saturday ', 7, 0, 'NONE') ,('2026-01-18', '1404-10-28', '1447-07-29', '2026-01-18', 28, 29, 10, 7, 'sunday ', 1, 0, 'NONE') ,('2026-01-19', '1404-10-29', '1447-07-30', '2026-01-19', 29, 30, 10, 7, 'monday ', 2, 0, 'NONE') ,('2026-01-20', '1404-10-30', '1447-08-01', '2026-01-20', 30, 1, 10, 8, 'tuesday ', 3, 0, 'NONE') ,('2026-01-21', '1404-11-01', '1447-08-02', '2026-01-21', 1, 2, 11, 8, 'wednesday', 4, 0, 'NONE') ,('2026-01-22', '1404-11-02', '1447-08-03', '2026-01-22', 2, 3, 11, 8, 'thursday ', 5, 0, 'NONE') ,('2026-01-23', '1404-11-03', '1447-08-04', '2026-01-23', 3, 4, 11, 8, 'friday ', 6, 0, 'NONE') ,('2026-01-24', '1404-11-04', '1447-08-05', '2026-01-24', 4, 5, 11, 8, 'saturday ', 7, 0, 'NONE') ,('2026-01-25', '1404-11-05', '1447-08-06', '2026-01-25', 5, 6, 11, 8, 'sunday ', 1, 0, 'NONE') ,('2026-01-26', '1404-11-06', '1447-08-07', '2026-01-26', 6, 7, 11, 8, 'monday ', 2, 0, 'NONE') ,('2026-01-27', '1404-11-07', '1447-08-08', '2026-01-27', 7, 8, 11, 8, 'tuesday ', 3, 0, 'NONE') ,('2026-01-28', '1404-11-08', '1447-08-09', '2026-01-28', 8, 9, 11, 8, 'wednesday', 4, 0, 'NONE') ,('2026-01-29', '1404-11-09', '1447-08-10', '2026-01-29', 9, 10, 11, 8, 'thursday ', 5, 0, 'NONE') ,('2026-01-30', '1404-11-10', '1447-08-11', '2026-01-30', 10, 11, 11, 8, 'friday ', 6, 0, 'NONE') ,('2026-01-31', '1404-11-11', '1447-08-12', '2026-01-31', 11, 12, 11, 8, 'saturday ', 7, 0, 'NONE') ,('2026-02-01', '1404-11-12', '1447-08-13', '2026-02-01', 12, 13, 11, 8, 'sunday ', 1, 0, 'NONE') ,('2026-02-02', '1404-11-13', '1447-08-14', '2026-02-02', 13, 14, 11, 8, 'monday ', 2, 0, 'NONE') ,('2026-02-03', '1404-11-14', '1447-08-15', '2026-02-03', 14, 15, 11, 8, 'tuesday ', 3, 0, 'NONE') ,('2026-02-04', '1404-11-15', '1447-08-16', '2026-02-04', 15, 16, 11, 8, 'wednesday', 4, 0, 'NONE') ,('2026-02-05', '1404-11-16', '1447-08-17', '2026-02-05', 16, 17, 11, 8, 'thursday ', 5, 0, 'NONE') ,('2026-02-06', '1404-11-17', '1447-08-18', '2026-02-06', 17, 18, 11, 8, 'friday ', 6, 0, 'NONE') ,('2026-02-07', '1404-11-18', '1447-08-19', '2026-02-07', 18, 19, 11, 8, 'saturday ', 7, 0, 'NONE') ,('2026-02-08', '1404-11-19', '1447-08-20', '2026-02-08', 19, 20, 11, 8, 'sunday ', 1, 0, 'NONE') ,('2026-02-09', '1404-11-20', '1447-08-21', '2026-02-09', 20, 21, 11, 8, 'monday ', 2, 0, 'NONE') ,('2026-02-10', '1404-11-21', '1447-08-22', '2026-02-10', 21, 22, 11, 8, 'tuesday ', 3, 0, 'NONE') ,('2026-02-11', '1404-11-22', '1447-08-23', '2026-02-11', 22, 23, 11, 8, 'wednesday', 4, 0, 'NONE') ,('2026-02-12', '1404-11-23', '1447-08-24', '2026-02-12', 23, 24, 11, 8, 'thursday ', 5, 0, 'NONE') ,('2026-02-13', '1404-11-24', '1447-08-25', '2026-02-13', 24, 25, 11, 8, 'friday ', 6, 0, 'NONE') ,('2026-02-14', '1404-11-25', '1447-08-26', '2026-02-14', 25, 26, 11, 8, 'saturday ', 7, 0, 'NONE') ,('2026-02-15', '1404-11-26', '1447-08-27', '2026-02-15', 26, 27, 11, 8, 'sunday ', 1, 0, 'NONE') ,('2026-02-16', '1404-11-27', '1447-08-28', '2026-02-16', 27, 28, 11, 8, 'monday ', 2, 0, 'NONE') ,('2026-02-17', '1404-11-28', '1447-08-29', '2026-02-17', 28, 29, 11, 8, 'tuesday ', 3, 0, 'NONE') ,('2026-02-18', '1404-11-29', '1447-09-01', '2026-02-18', 29, 1, 11, 9, 'wednesday', 4, 0, 'NONE') ,('2026-02-19', '1404-11-30', '1447-09-02', '2026-02-19', 30, 2, 11, 9, 'thursday ', 5, 0, 'NONE') ,('2026-02-20', '1404-12-01', '1447-09-03', '2026-02-20', 1, 3, 12, 9, 'friday ', 6, 0, 'NONE') ,('2026-02-21', '1404-12-02', '1447-09-04', '2026-02-21', 2, 4, 12, 9, 'saturday ', 7, 0, 'NONE') ,('2026-02-22', '1404-12-03', '1447-09-05', '2026-02-22', 3, 5, 12, 9, 'sunday ', 1, 0, 'NONE') ,('2026-02-23', '1404-12-04', '1447-09-06', '2026-02-23', 4, 6, 12, 9, 'monday ', 2, 0, 'NONE') ,('2026-02-24', '1404-12-05', '1447-09-07', '2026-02-24', 5, 7, 12, 9, 'tuesday ', 3, 0, 'NONE') ,('2026-02-25', '1404-12-06', '1447-09-08', '2026-02-25', 6, 8, 12, 9, 'wednesday', 4, 0, 'NONE') ,('2026-02-26', '1404-12-07', '1447-09-09', '2026-02-26', 7, 9, 12, 9, 'thursday ', 5, 0, 'NONE') ,('2026-02-27', '1404-12-08', '1447-09-10', '2026-02-27', 8, 10, 12, 9, 'friday ', 6, 0, 'NONE') ,('2026-02-28', '1404-12-09', '1447-09-11', '2026-02-28', 9, 11, 12, 9, 'saturday ', 7, 0, 'NONE') ,('2026-03-01', '1404-12-10', '1447-09-12', '2026-03-01', 10, 12, 12, 9, 'sunday ', 1, 0, 'NONE') ,('2026-03-02', '1404-12-11', '1447-09-13', '2026-03-02', 11, 13, 12, 9, 'monday ', 2, 0, 'NONE') ,('2026-03-03', '1404-12-12', '1447-09-14', '2026-03-03', 12, 14, 12, 9, 'tuesday ', 3, 0, 'NONE') ,('2026-03-04', '1404-12-13', '1447-09-15', '2026-03-04', 13, 15, 12, 9, 'wednesday', 4, 0, 'NONE') ,('2026-03-05', '1404-12-14', '1447-09-16', '2026-03-05', 14, 16, 12, 9, 'thursday ', 5, 0, 'NONE') ,('2026-03-06', '1404-12-15', '1447-09-17', '2026-03-06', 15, 17, 12, 9, 'friday ', 6, 0, 'NONE') ,('2026-03-07', '1404-12-16', '1447-09-18', '2026-03-07', 16, 18, 12, 9, 'saturday ', 7, 0, 'NONE') ,('2026-03-08', '1404-12-17', '1447-09-19', '2026-03-08', 17, 19, 12, 9, 'sunday ', 1, 0, 'NONE') ,('2026-03-09', '1404-12-18', '1447-09-20', '2026-03-09', 18, 20, 12, 9, 'monday ', 2, 0, 'NONE') ,('2026-03-10', '1404-12-19', '1447-09-21', '2026-03-10', 19, 21, 12, 9, 'tuesday ', 3, 0, 'NONE') ,('2026-03-11', '1404-12-20', '1447-09-22', '2026-03-11', 20, 22, 12, 9, 'wednesday', 4, 0, 'NONE') ,('2026-03-12', '1404-12-21', '1447-09-23', '2026-03-12', 21, 23, 12, 9, 'thursday ', 5, 0, 'NONE') ,('2026-03-13', '1404-12-22', '1447-09-24', '2026-03-13', 22, 24, 12, 9, 'friday ', 6, 0, 'NONE') ,('2026-03-14', '1404-12-23', '1447-09-25', '2026-03-14', 23, 25, 12, 9, 'saturday ', 7, 0, 'NONE') ,('2026-03-15', '1404-12-24', '1447-09-26', '2026-03-15', 24, 26, 12, 9, 'sunday ', 1, 0, 'NONE') ,('2026-03-16', '1404-12-25', '1447-09-27', '2026-03-16', 25, 27, 12, 9, 'monday ', 2, 0, 'NONE') ,('2026-03-17', '1404-12-26', '1447-09-28', '2026-03-17', 26, 28, 12, 9, 'tuesday ', 3, 0, 'NONE') ,('2026-03-18', '1404-12-27', '1447-09-29', '2026-03-18', 27, 29, 12, 9, 'wednesday', 4, 0, 'NONE') ,('2026-03-19', '1404-12-28', '1447-09-30', '2026-03-19', 28, 30, 12, 9, 'thursday ', 5, 0, 'NONE') ,('2026-03-20', '1404-12-29', '1447-10-01', '2026-03-20', 29, 1, 12, 10, 'friday ', 6, 0, 'NONE') ,('2026-03-21', '1405-01-01', '1447-10-02', '2026-03-21', 1, 2, 1, 10, 'saturday ', 7, 0, 'NONE') ,('2026-03-22', '1405-01-02', '1447-10-03', '2026-03-22', 2, 3, 1, 10, 'sunday ', 1, 0, 'NONE') ,('2026-03-23', '1405-01-03', '1447-10-04', '2026-03-23', 3, 4, 1, 10, 'monday ', 2, 0, 'NONE') ,('2026-03-24', '1405-01-04', '1447-10-05', '2026-03-24', 4, 5, 1, 10, 'tuesday ', 3, 0, 'NONE') ,('2026-03-25', '1405-01-05', '1447-10-06', '2026-03-25', 5, 6, 1, 10, 'wednesday', 4, 0, 'NONE') ,('2026-03-26', '1405-01-06', '1447-10-07', '2026-03-26', 6, 7, 1, 10, 'thursday ', 5, 0, 'NONE') ,('2026-03-27', '1405-01-07', '1447-10-08', '2026-03-27', 7, 8, 1, 10, 'friday ', 6, 0, 'NONE') ,('2026-03-28', '1405-01-08', '1447-10-09', '2026-03-28', 8, 9, 1, 10, 'saturday ', 7, 0, 'NONE') ,('2026-03-29', '1405-01-09', '1447-10-10', '2026-03-29', 9, 10, 1, 10, 'sunday ', 1, 0, 'NONE') ,('2026-03-30', '1405-01-10', '1447-10-11', '2026-03-30', 10, 11, 1, 10, 'monday ', 2, 0, 'NONE') ,('2026-03-31', '1405-01-11', '1447-10-12', '2026-03-31', 11, 12, 1, 10, 'tuesday ', 3, 0, 'NONE') ,('2026-04-01', '1405-01-12', '1447-10-13', '2026-04-01', 12, 13, 1, 10, 'wednesday', 4, 0, 'NONE') ,('2026-04-02', '1405-01-13', '1447-10-14', '2026-04-02', 13, 14, 1, 10, 'thursday ', 5, 0, 'NONE') ,('2026-04-03', '1405-01-14', '1447-10-15', '2026-04-03', 14, 15, 1, 10, 'friday ', 6, 0, 'NONE') ,('2026-04-04', '1405-01-15', '1447-10-16', '2026-04-04', 15, 16, 1, 10, 'saturday ', 7, 0, 'NONE') ,('2026-04-05', '1405-01-16', '1447-10-17', '2026-04-05', 16, 17, 1, 10, 'sunday ', 1, 0, 'NONE') ,('2026-04-06', '1405-01-17', '1447-10-18', '2026-04-06', 17, 18, 1, 10, 'monday ', 2, 0, 'NONE') ,('2026-04-07', '1405-01-18', '1447-10-19', '2026-04-07', 18, 19, 1, 10, 'tuesday ', 3, 0, 'NONE') ,('2026-04-08', '1405-01-19', '1447-10-20', '2026-04-08', 19, 20, 1, 10, 'wednesday', 4, 0, 'NONE') ,('2026-04-09', '1405-01-20', '1447-10-21', '2026-04-09', 20, 21, 1, 10, 'thursday ', 5, 0, 'NONE') ,('2026-04-10', '1405-01-21', '1447-10-22', '2026-04-10', 21, 22, 1, 10, 'friday ', 6, 0, 'NONE') ,('2026-04-11', '1405-01-22', '1447-10-23', '2026-04-11', 22, 23, 1, 10, 'saturday ', 7, 0, 'NONE') ,('2026-04-12', '1405-01-23', '1447-10-24', '2026-04-12', 23, 24, 1, 10, 'sunday ', 1, 0, 'NONE') ,('2026-04-13', '1405-01-24', '1447-10-25', '2026-04-13', 24, 25, 1, 10, 'monday ', 2, 0, 'NONE') ,('2026-04-14', '1405-01-25', '1447-10-26', '2026-04-14', 25, 26, 1, 10, 'tuesday ', 3, 0, 'NONE') ,('2026-04-15', '1405-01-26', '1447-10-27', '2026-04-15', 26, 27, 1, 10, 'wednesday', 4, 0, 'NONE') ,('2026-04-16', '1405-01-27', '1447-10-28', '2026-04-16', 27, 28, 1, 10, 'thursday ', 5, 0, 'NONE') ,('2026-04-17', '1405-01-28', '1447-10-29', '2026-04-17', 28, 29, 1, 10, 'friday ', 6, 0, 'NONE') ,('2026-04-18', '1405-01-29', '1447-11-01', '2026-04-18', 29, 1, 1, 11, 'saturday ', 7, 0, 'NONE') ,('2026-04-19', '1405-01-30', '1447-11-02', '2026-04-19', 30, 2, 1, 11, 'sunday ', 1, 0, 'NONE') ,('2026-04-20', '1405-01-31', '1447-11-03', '2026-04-20', 31, 3, 1, 11, 'monday ', 2, 0, 'NONE') ,('2026-04-21', '1405-02-01', '1447-11-04', '2026-04-21', 1, 4, 2, 11, 'tuesday ', 3, 0, 'NONE') ,('2026-04-22', '1405-02-02', '1447-11-05', '2026-04-22', 2, 5, 2, 11, 'wednesday', 4, 0, 'NONE') ,('2026-04-23', '1405-02-03', '1447-11-06', '2026-04-23', 3, 6, 2, 11, 'thursday ', 5, 0, 'NONE') ,('2026-04-24', '1405-02-04', '1447-11-07', '2026-04-24', 4, 7, 2, 11, 'friday ', 6, 0, 'NONE') ,('2026-04-25', '1405-02-05', '1447-11-08', '2026-04-25', 5, 8, 2, 11, 'saturday ', 7, 0, 'NONE') ,('2026-04-26', '1405-02-06', '1447-11-09', '2026-04-26', 6, 9, 2, 11, 'sunday ', 1, 0, 'NONE') ,('2026-04-27', '1405-02-07', '1447-11-10', '2026-04-27', 7, 10, 2, 11, 'monday ', 2, 0, 'NONE') ,('2026-04-28', '1405-02-08', '1447-11-11', '2026-04-28', 8, 11, 2, 11, 'tuesday ', 3, 0, 'NONE') ,('2026-04-29', '1405-02-09', '1447-11-12', '2026-04-29', 9, 12, 2, 11, 'wednesday', 4, 0, 'NONE') ,('2026-04-30', '1405-02-10', '1447-11-13', '2026-04-30', 10, 13, 2, 11, 'thursday ', 5, 0, 'NONE') ,('2026-05-01', '1405-02-11', '1447-11-14', '2026-05-01', 11, 14, 2, 11, 'friday ', 6, 0, 'NONE') ,('2026-05-02', '1405-02-12', '1447-11-15', '2026-05-02', 12, 15, 2, 11, 'saturday ', 7, 0, 'NONE') ,('2026-05-03', '1405-02-13', '1447-11-16', '2026-05-03', 13, 16, 2, 11, 'sunday ', 1, 0, 'NONE') ,('2026-05-04', '1405-02-14', '1447-11-17', '2026-05-04', 14, 17, 2, 11, 'monday ', 2, 0, 'NONE') ,('2026-05-05', '1405-02-15', '1447-11-18', '2026-05-05', 15, 18, 2, 11, 'tuesday ', 3, 0, 'NONE') ,('2026-05-06', '1405-02-16', '1447-11-19', '2026-05-06', 16, 19, 2, 11, 'wednesday', 4, 0, 'NONE') ,('2026-05-07', '1405-02-17', '1447-11-20', '2026-05-07', 17, 20, 2, 11, 'thursday ', 5, 0, 'NONE') ,('2026-05-08', '1405-02-18', '1447-11-21', '2026-05-08', 18, 21, 2, 11, 'friday ', 6, 0, 'NONE') ,('2026-05-09', '1405-02-19', '1447-11-22', '2026-05-09', 19, 22, 2, 11, 'saturday ', 7, 0, 'NONE') ,('2026-05-10', '1405-02-20', '1447-11-23', '2026-05-10', 20, 23, 2, 11, 'sunday ', 1, 0, 'NONE') ,('2026-05-11', '1405-02-21', '1447-11-24', '2026-05-11', 21, 24, 2, 11, 'monday ', 2, 0, 'NONE') ,('2026-05-12', '1405-02-22', '1447-11-25', '2026-05-12', 22, 25, 2, 11, 'tuesday ', 3, 0, 'NONE') ,('2026-05-13', '1405-02-23', '1447-11-26', '2026-05-13', 23, 26, 2, 11, 'wednesday', 4, 0, 'NONE') ,('2026-05-14', '1405-02-24', '1447-11-27', '2026-05-14', 24, 27, 2, 11, 'thursday ', 5, 0, 'NONE') ,('2026-05-15', '1405-02-25', '1447-11-28', '2026-05-15', 25, 28, 2, 11, 'friday ', 6, 0, 'NONE') ,('2026-05-16', '1405-02-26', '1447-11-29', '2026-05-16', 26, 29, 2, 11, 'saturday ', 7, 0, 'NONE') ,('2026-05-17', '1405-02-27', '1447-11-30', '2026-05-17', 27, 30, 2, 11, 'sunday ', 1, 0, 'NONE') ,('2026-05-18', '1405-02-28', '1447-12-01', '2026-05-18', 28, 1, 2, 12, 'monday ', 2, 0, 'NONE') ,('2026-05-19', '1405-02-29', '1447-12-02', '2026-05-19', 29, 2, 2, 12, 'tuesday ', 3, 0, 'NONE') ,('2026-05-20', '1405-02-30', '1447-12-03', '2026-05-20', 30, 3, 2, 12, 'wednesday', 4, 0, 'NONE') ,('2026-05-21', '1405-02-31', '1447-12-04', '2026-05-21', 31, 4, 2, 12, 'thursday ', 5, 0, 'NONE') ,('2026-05-22', '1405-03-01', '1447-12-05', '2026-05-22', 1, 5, 3, 12, 'friday ', 6, 0, 'NONE') ,('2026-05-23', '1405-03-02', '1447-12-06', '2026-05-23', 2, 6, 3, 12, 'saturday ', 7, 0, 'NONE') ,('2026-05-24', '1405-03-03', '1447-12-07', '2026-05-24', 3, 7, 3, 12, 'sunday ', 1, 0, 'NONE') ,('2026-05-25', '1405-03-04', '1447-12-08', '2026-05-25', 4, 8, 3, 12, 'monday ', 2, 0, 'NONE') ,('2026-05-26', '1405-03-05', '1447-12-09', '2026-05-26', 5, 9, 3, 12, 'tuesday ', 3, 0, 'NONE') ,('2026-05-27', '1405-03-06', '1447-12-10', '2026-05-27', 6, 10, 3, 12, 'wednesday', 4, 0, 'NONE') ,('2026-05-28', '1405-03-07', '1447-12-11', '2026-05-28', 7, 11, 3, 12, 'thursday ', 5, 0, 'NONE') ,('2026-05-29', '1405-03-08', '1447-12-12', '2026-05-29', 8, 12, 3, 12, 'friday ', 6, 0, 'NONE') ,('2026-05-30', '1405-03-09', '1447-12-13', '2026-05-30', 9, 13, 3, 12, 'saturday ', 7, 0, 'NONE') ,('2026-05-31', '1405-03-10', '1447-12-14', '2026-05-31', 10, 14, 3, 12, 'sunday ', 1, 0, 'NONE') ,('2026-06-01', '1405-03-11', '1447-12-15', '2026-06-01', 11, 15, 3, 12, 'monday ', 2, 0, 'NONE') ,('2026-06-02', '1405-03-12', '1447-12-16', '2026-06-02', 12, 16, 3, 12, 'tuesday ', 3, 0, 'NONE') ,('2026-06-03', '1405-03-13', '1447-12-17', '2026-06-03', 13, 17, 3, 12, 'wednesday', 4, 0, 'NONE') ,('2026-06-04', '1405-03-14', '1447-12-18', '2026-06-04', 14, 18, 3, 12, 'thursday ', 5, 0, 'NONE') ,('2026-06-05', '1405-03-15', '1447-12-19', '2026-06-05', 15, 19, 3, 12, 'friday ', 6, 0, 'NONE') ,('2026-06-06', '1405-03-16', '1447-12-20', '2026-06-06', 16, 20, 3, 12, 'saturday ', 7, 0, 'NONE') ,('2026-06-07', '1405-03-17', '1447-12-21', '2026-06-07', 17, 21, 3, 12, 'sunday ', 1, 0, 'NONE') ,('2026-06-08', '1405-03-18', '1447-12-22', '2026-06-08', 18, 22, 3, 12, 'monday ', 2, 0, 'NONE') ,('2026-06-09', '1405-03-19', '1447-12-23', '2026-06-09', 19, 23, 3, 12, 'tuesday ', 3, 0, 'NONE') ,('2026-06-10', '1405-03-20', '1447-12-24', '2026-06-10', 20, 24, 3, 12, 'wednesday', 4, 0, 'NONE') ,('2026-06-11', '1405-03-21', '1447-12-25', '2026-06-11', 21, 25, 3, 12, 'thursday ', 5, 0, 'NONE') ,('2026-06-12', '1405-03-22', '1447-12-26', '2026-06-12', 22, 26, 3, 12, 'friday ', 6, 0, 'NONE') ,('2026-06-13', '1405-03-23', '1447-12-27', '2026-06-13', 23, 27, 3, 12, 'saturday ', 7, 0, 'NONE') ,('2026-06-14', '1405-03-24', '1447-12-28', '2026-06-14', 24, 28, 3, 12, 'sunday ', 1, 0, 'NONE') ,('2026-06-15', '1405-03-25', '1447-12-29', '2026-06-15', 25, 29, 3, 12, 'monday ', 2, 0, 'NONE') ,('2026-06-16', '1405-03-26', '1447-12-30', '2026-06-16', 26, 30, 3, 12, 'tuesday ', 3, 0, 'NONE') ,('2026-06-17', '1405-03-27', '1448-01-01', '2026-06-17', 27, 1, 3, 1, 'wednesday', 4, 0, 'NONE') ,('2026-06-18', '1405-03-28', '1448-01-02', '2026-06-18', 28, 2, 3, 1, 'thursday ', 5, 0, 'NONE') ,('2026-06-19', '1405-03-29', '1448-01-03', '2026-06-19', 29, 3, 3, 1, 'friday ', 6, 0, 'NONE') ,('2026-06-20', '1405-03-30', '1448-01-04', '2026-06-20', 30, 4, 3, 1, 'saturday ', 7, 0, 'NONE') ,('2026-06-21', '1405-03-31', '1448-01-05', '2026-06-21', 31, 5, 3, 1, 'sunday ', 1, 0, 'NONE') ,('2026-06-22', '1405-04-01', '1448-01-06', '2026-06-22', 1, 6, 4, 1, 'monday ', 2, 0, 'NONE') ,('2026-06-23', '1405-04-02', '1448-01-07', '2026-06-23', 2, 7, 4, 1, 'tuesday ', 3, 0, 'NONE') ,('2026-06-24', '1405-04-03', '1448-01-08', '2026-06-24', 3, 8, 4, 1, 'wednesday', 4, 0, 'NONE') ,('2026-06-25', '1405-04-04', '1448-01-09', '2026-06-25', 4, 9, 4, 1, 'thursday ', 5, 0, 'NONE') ,('2026-06-26', '1405-04-05', '1448-01-10', '2026-06-26', 5, 10, 4, 1, 'friday ', 6, 0, 'NONE') ,('2026-06-27', '1405-04-06', '1448-01-11', '2026-06-27', 6, 11, 4, 1, 'saturday ', 7, 0, 'NONE') ,('2026-06-28', '1405-04-07', '1448-01-12', '2026-06-28', 7, 12, 4, 1, 'sunday ', 1, 0, 'NONE') ,('2026-06-29', '1405-04-08', '1448-01-13', '2026-06-29', 8, 13, 4, 1, 'monday ', 2, 0, 'NONE') ,('2026-06-30', '1405-04-09', '1448-01-14', '2026-06-30', 9, 14, 4, 1, 'tuesday ', 3, 0, 'NONE') ,('2026-07-01', '1405-04-10', '1448-01-15', '2026-07-01', 10, 15, 4, 1, 'wednesday', 4, 0, 'NONE') ,('2026-07-02', '1405-04-11', '1448-01-16', '2026-07-02', 11, 16, 4, 1, 'thursday ', 5, 0, 'NONE') ,('2026-07-03', '1405-04-12', '1448-01-17', '2026-07-03', 12, 17, 4, 1, 'friday ', 6, 0, 'NONE') ,('2026-07-04', '1405-04-13', '1448-01-18', '2026-07-04', 13, 18, 4, 1, 'saturday ', 7, 0, 'NONE') ,('2026-07-05', '1405-04-14', '1448-01-19', '2026-07-05', 14, 19, 4, 1, 'sunday ', 1, 0, 'NONE') ,('2026-07-06', '1405-04-15', '1448-01-20', '2026-07-06', 15, 20, 4, 1, 'monday ', 2, 0, 'NONE') ,('2026-07-07', '1405-04-16', '1448-01-21', '2026-07-07', 16, 21, 4, 1, 'tuesday ', 3, 0, 'NONE') ,('2026-07-08', '1405-04-17', '1448-01-22', '2026-07-08', 17, 22, 4, 1, 'wednesday', 4, 0, 'NONE') ,('2026-07-09', '1405-04-18', '1448-01-23', '2026-07-09', 18, 23, 4, 1, 'thursday ', 5, 0, 'NONE') ,('2026-07-10', '1405-04-19', '1448-01-24', '2026-07-10', 19, 24, 4, 1, 'friday ', 6, 0, 'NONE') ,('2026-07-11', '1405-04-20', '1448-01-25', '2026-07-11', 20, 25, 4, 1, 'saturday ', 7, 0, 'NONE') ,('2026-07-12', '1405-04-21', '1448-01-26', '2026-07-12', 21, 26, 4, 1, 'sunday ', 1, 0, 'NONE') ,('2026-07-13', '1405-04-22', '1448-01-27', '2026-07-13', 22, 27, 4, 1, 'monday ', 2, 0, 'NONE') ,('2026-07-14', '1405-04-23', '1448-01-28', '2026-07-14', 23, 28, 4, 1, 'tuesday ', 3, 0, 'NONE') ,('2026-07-15', '1405-04-24', '1448-01-29', '2026-07-15', 24, 29, 4, 1, 'wednesday', 4, 0, 'NONE') ,('2026-07-16', '1405-04-25', '1448-01-30', '2026-07-16', 25, 30, 4, 1, 'thursday ', 5, 0, 'NONE') ,('2026-07-17', '1405-04-26', '1448-02-01', '2026-07-17', 26, 1, 4, 2, 'friday ', 6, 0, 'NONE') ,('2026-07-18', '1405-04-27', '1448-02-02', '2026-07-18', 27, 2, 4, 2, 'saturday ', 7, 0, 'NONE') ,('2026-07-19', '1405-04-28', '1448-02-03', '2026-07-19', 28, 3, 4, 2, 'sunday ', 1, 0, 'NONE') ,('2026-07-20', '1405-04-29', '1448-02-04', '2026-07-20', 29, 4, 4, 2, 'monday ', 2, 0, 'NONE') ,('2026-07-21', '1405-04-30', '1448-02-05', '2026-07-21', 30, 5, 4, 2, 'tuesday ', 3, 0, 'NONE') ,('2026-07-22', '1405-04-31', '1448-02-06', '2026-07-22', 31, 6, 4, 2, 'wednesday', 4, 0, 'NONE') ,('2026-07-23', '1405-05-01', '1448-02-07', '2026-07-23', 1, 7, 5, 2, 'thursday ', 5, 0, 'NONE') ,('2026-07-24', '1405-05-02', '1448-02-08', '2026-07-24', 2, 8, 5, 2, 'friday ', 6, 0, 'NONE') ,('2026-07-25', '1405-05-03', '1448-02-09', '2026-07-25', 3, 9, 5, 2, 'saturday ', 7, 0, 'NONE') ,('2026-07-26', '1405-05-04', '1448-02-10', '2026-07-26', 4, 10, 5, 2, 'sunday ', 1, 0, 'NONE') ,('2026-07-27', '1405-05-05', '1448-02-11', '2026-07-27', 5, 11, 5, 2, 'monday ', 2, 0, 'NONE') ,('2026-07-28', '1405-05-06', '1448-02-12', '2026-07-28', 6, 12, 5, 2, 'tuesday ', 3, 0, 'NONE') ,('2026-07-29', '1405-05-07', '1448-02-13', '2026-07-29', 7, 13, 5, 2, 'wednesday', 4, 0, 'NONE') ,('2026-07-30', '1405-05-08', '1448-02-14', '2026-07-30', 8, 14, 5, 2, 'thursday ', 5, 0, 'NONE') ,('2026-07-31', '1405-05-09', '1448-02-15', '2026-07-31', 9, 15, 5, 2, 'friday ', 6, 0, 'NONE') ,('2026-08-01', '1405-05-10', '1448-02-16', '2026-08-01', 10, 16, 5, 2, 'saturday ', 7, 0, 'NONE') ,('2026-08-02', '1405-05-11', '1448-02-17', '2026-08-02', 11, 17, 5, 2, 'sunday ', 1, 0, 'NONE') ,('2026-08-03', '1405-05-12', '1448-02-18', '2026-08-03', 12, 18, 5, 2, 'monday ', 2, 0, 'NONE') ,('2026-08-04', '1405-05-13', '1448-02-19', '2026-08-04', 13, 19, 5, 2, 'tuesday ', 3, 0, 'NONE') ,('2026-08-05', '1405-05-14', '1448-02-20', '2026-08-05', 14, 20, 5, 2, 'wednesday', 4, 0, 'NONE') ,('2026-08-06', '1405-05-15', '1448-02-21', '2026-08-06', 15, 21, 5, 2, 'thursday ', 5, 0, 'NONE') ,('2026-08-07', '1405-05-16', '1448-02-22', '2026-08-07', 16, 22, 5, 2, 'friday ', 6, 0, 'NONE') ,('2026-08-08', '1405-05-17', '1448-02-23', '2026-08-08', 17, 23, 5, 2, 'saturday ', 7, 0, 'NONE') ,('2026-08-09', '1405-05-18', '1448-02-24', '2026-08-09', 18, 24, 5, 2, 'sunday ', 1, 0, 'NONE') ,('2026-08-10', '1405-05-19', '1448-02-25', '2026-08-10', 19, 25, 5, 2, 'monday ', 2, 0, 'NONE') ,('2026-08-11', '1405-05-20', '1448-02-26', '2026-08-11', 20, 26, 5, 2, 'tuesday ', 3, 0, 'NONE') ,('2026-08-12', '1405-05-21', '1448-02-27', '2026-08-12', 21, 27, 5, 2, 'wednesday', 4, 0, 'NONE') ,('2026-08-13', '1405-05-22', '1448-02-28', '2026-08-13', 22, 28, 5, 2, 'thursday ', 5, 0, 'NONE') ,('2026-08-14', '1405-05-23', '1448-02-29', '2026-08-14', 23, 29, 5, 2, 'friday ', 6, 0, 'NONE') ,('2026-08-15', '1405-05-24', '1448-03-01', '2026-08-15', 24, 1, 5, 3, 'saturday ', 7, 0, 'NONE') ,('2026-08-16', '1405-05-25', '1448-03-02', '2026-08-16', 25, 2, 5, 3, 'sunday ', 1, 0, 'NONE') ,('2026-08-17', '1405-05-26', '1448-03-03', '2026-08-17', 26, 3, 5, 3, 'monday ', 2, 0, 'NONE') ,('2026-08-18', '1405-05-27', '1448-03-04', '2026-08-18', 27, 4, 5, 3, 'tuesday ', 3, 0, 'NONE') ,('2026-08-19', '1405-05-28', '1448-03-05', '2026-08-19', 28, 5, 5, 3, 'wednesday', 4, 0, 'NONE') ,('2026-08-20', '1405-05-29', '1448-03-06', '2026-08-20', 29, 6, 5, 3, 'thursday ', 5, 0, 'NONE') ,('2026-08-21', '1405-05-30', '1448-03-07', '2026-08-21', 30, 7, 5, 3, 'friday ', 6, 0, 'NONE') ,('2026-08-22', '1405-05-31', '1448-03-08', '2026-08-22', 31, 8, 5, 3, 'saturday ', 7, 0, 'NONE') ,('2026-08-23', '1405-06-01', '1448-03-09', '2026-08-23', 1, 9, 6, 3, 'sunday ', 1, 0, 'NONE') ,('2026-08-24', '1405-06-02', '1448-03-10', '2026-08-24', 2, 10, 6, 3, 'monday ', 2, 0, 'NONE') ,('2026-08-25', '1405-06-03', '1448-03-11', '2026-08-25', 3, 11, 6, 3, 'tuesday ', 3, 0, 'NONE') ,('2026-08-26', '1405-06-04', '1448-03-12', '2026-08-26', 4, 12, 6, 3, 'wednesday', 4, 0, 'NONE') ,('2026-08-27', '1405-06-05', '1448-03-13', '2026-08-27', 5, 13, 6, 3, 'thursday ', 5, 0, 'NONE') ,('2026-08-28', '1405-06-06', '1448-03-14', '2026-08-28', 6, 14, 6, 3, 'friday ', 6, 0, 'NONE') ,('2026-08-29', '1405-06-07', '1448-03-15', '2026-08-29', 7, 15, 6, 3, 'saturday ', 7, 0, 'NONE') ,('2026-08-30', '1405-06-08', '1448-03-16', '2026-08-30', 8, 16, 6, 3, 'sunday ', 1, 0, 'NONE') ,('2026-08-31', '1405-06-09', '1448-03-17', '2026-08-31', 9, 17, 6, 3, 'monday ', 2, 0, 'NONE') ,('2026-09-01', '1405-06-10', '1448-03-18', '2026-09-01', 10, 18, 6, 3, 'tuesday ', 3, 0, 'NONE') ,('2026-09-02', '1405-06-11', '1448-03-19', '2026-09-02', 11, 19, 6, 3, 'wednesday', 4, 0, 'NONE') ,('2026-09-03', '1405-06-12', '1448-03-20', '2026-09-03', 12, 20, 6, 3, 'thursday ', 5, 0, 'NONE') ,('2026-09-04', '1405-06-13', '1448-03-21', '2026-09-04', 13, 21, 6, 3, 'friday ', 6, 0, 'NONE') ,('2026-09-05', '1405-06-14', '1448-03-22', '2026-09-05', 14, 22, 6, 3, 'saturday ', 7, 0, 'NONE') ,('2026-09-06', '1405-06-15', '1448-03-23', '2026-09-06', 15, 23, 6, 3, 'sunday ', 1, 0, 'NONE') ,('2026-09-07', '1405-06-16', '1448-03-24', '2026-09-07', 16, 24, 6, 3, 'monday ', 2, 0, 'NONE') ,('2026-09-08', '1405-06-17', '1448-03-25', '2026-09-08', 17, 25, 6, 3, 'tuesday ', 3, 0, 'NONE') ,('2026-09-09', '1405-06-18', '1448-03-26', '2026-09-09', 18, 26, 6, 3, 'wednesday', 4, 0, 'NONE') ,('2026-09-10', '1405-06-19', '1448-03-27', '2026-09-10', 19, 27, 6, 3, 'thursday ', 5, 0, 'NONE') ,('2026-09-11', '1405-06-20', '1448-03-28', '2026-09-11', 20, 28, 6, 3, 'friday ', 6, 0, 'NONE') ,('2026-09-12', '1405-06-21', '1448-03-29', '2026-09-12', 21, 29, 6, 3, 'saturday ', 7, 0, 'NONE') ,('2026-09-13', '1405-06-22', '1448-03-30', '2026-09-13', 22, 30, 6, 3, 'sunday ', 1, 0, 'NONE') ,('2026-09-14', '1405-06-23', '1448-04-01', '2026-09-14', 23, 1, 6, 4, 'monday ', 2, 0, 'NONE') ,('2026-09-15', '1405-06-24', '1448-04-02', '2026-09-15', 24, 2, 6, 4, 'tuesday ', 3, 0, 'NONE') ,('2026-09-16', '1405-06-25', '1448-04-03', '2026-09-16', 25, 3, 6, 4, 'wednesday', 4, 0, 'NONE') ,('2026-09-17', '1405-06-26', '1448-04-04', '2026-09-17', 26, 4, 6, 4, 'thursday ', 5, 0, 'NONE') ,('2026-09-18', '1405-06-27', '1448-04-05', '2026-09-18', 27, 5, 6, 4, 'friday ', 6, 0, 'NONE') ,('2026-09-19', '1405-06-28', '1448-04-06', '2026-09-19', 28, 6, 6, 4, 'saturday ', 7, 0, 'NONE') ,('2026-09-20', '1405-06-29', '1448-04-07', '2026-09-20', 29, 7, 6, 4, 'sunday ', 1, 0, 'NONE') ,('2026-09-21', '1405-06-30', '1448-04-08', '2026-09-21', 30, 8, 6, 4, 'monday ', 2, 0, 'NONE') ,('2026-09-22', '1405-06-31', '1448-04-09', '2026-09-22', 31, 9, 6, 4, 'tuesday ', 3, 0, 'NONE') ,('2026-09-23', '1405-07-01', '1448-04-10', '2026-09-23', 1, 10, 7, 4, 'wednesday', 4, 0, 'NONE') ,('2026-09-24', '1405-07-02', '1448-04-11', '2026-09-24', 2, 11, 7, 4, 'thursday ', 5, 0, 'NONE') ,('2026-09-25', '1405-07-03', '1448-04-12', '2026-09-25', 3, 12, 7, 4, 'friday ', 6, 0, 'NONE') ,('2026-09-26', '1405-07-04', '1448-04-13', '2026-09-26', 4, 13, 7, 4, 'saturday ', 7, 0, 'NONE') ,('2026-09-27', '1405-07-05', '1448-04-14', '2026-09-27', 5, 14, 7, 4, 'sunday ', 1, 0, 'NONE') ,('2026-09-28', '1405-07-06', '1448-04-15', '2026-09-28', 6, 15, 7, 4, 'monday ', 2, 0, 'NONE') ,('2026-09-29', '1405-07-07', '1448-04-16', '2026-09-29', 7, 16, 7, 4, 'tuesday ', 3, 0, 'NONE') ,('2026-09-30', '1405-07-08', '1448-04-17', '2026-09-30', 8, 17, 7, 4, 'wednesday', 4, 0, 'NONE') ,('2026-10-01', '1405-07-09', '1448-04-18', '2026-10-01', 9, 18, 7, 4, 'thursday ', 5, 0, 'NONE') ,('2026-10-02', '1405-07-10', '1448-04-19', '2026-10-02', 10, 19, 7, 4, 'friday ', 6, 0, 'NONE') ,('2026-10-03', '1405-07-11', '1448-04-20', '2026-10-03', 11, 20, 7, 4, 'saturday ', 7, 0, 'NONE') ,('2026-10-04', '1405-07-12', '1448-04-21', '2026-10-04', 12, 21, 7, 4, 'sunday ', 1, 0, 'NONE') ,('2026-10-05', '1405-07-13', '1448-04-22', '2026-10-05', 13, 22, 7, 4, 'monday ', 2, 0, 'NONE') ,('2026-10-06', '1405-07-14', '1448-04-23', '2026-10-06', 14, 23, 7, 4, 'tuesday ', 3, 0, 'NONE') ,('2026-10-07', '1405-07-15', '1448-04-24', '2026-10-07', 15, 24, 7, 4, 'wednesday', 4, 0, 'NONE') ,('2026-10-08', '1405-07-16', '1448-04-25', '2026-10-08', 16, 25, 7, 4, 'thursday ', 5, 0, 'NONE') ,('2026-10-09', '1405-07-17', '1448-04-26', '2026-10-09', 17, 26, 7, 4, 'friday ', 6, 0, 'NONE') ,('2026-10-10', '1405-07-18', '1448-04-27', '2026-10-10', 18, 27, 7, 4, 'saturday ', 7, 0, 'NONE') ,('2026-10-11', '1405-07-19', '1448-04-28', '2026-10-11', 19, 28, 7, 4, 'sunday ', 1, 0, 'NONE') ,('2026-10-12', '1405-07-20', '1448-04-29', '2026-10-12', 20, 29, 7, 4, 'monday ', 2, 0, 'NONE') ,('2026-10-13', '1405-07-21', '1448-05-01', '2026-10-13', 21, 1, 7, 5, 'tuesday ', 3, 0, 'NONE') ,('2026-10-14', '1405-07-22', '1448-05-02', '2026-10-14', 22, 2, 7, 5, 'wednesday', 4, 0, 'NONE') ,('2026-10-15', '1405-07-23', '1448-05-03', '2026-10-15', 23, 3, 7, 5, 'thursday ', 5, 0, 'NONE') ,('2026-10-16', '1405-07-24', '1448-05-04', '2026-10-16', 24, 4, 7, 5, 'friday ', 6, 0, 'NONE') ,('2026-10-17', '1405-07-25', '1448-05-05', '2026-10-17', 25, 5, 7, 5, 'saturday ', 7, 0, 'NONE') ,('2026-10-18', '1405-07-26', '1448-05-06', '2026-10-18', 26, 6, 7, 5, 'sunday ', 1, 0, 'NONE') ,('2026-10-19', '1405-07-27', '1448-05-07', '2026-10-19', 27, 7, 7, 5, 'monday ', 2, 0, 'NONE') ,('2026-10-20', '1405-07-28', '1448-05-08', '2026-10-20', 28, 8, 7, 5, 'tuesday ', 3, 0, 'NONE') ,('2026-10-21', '1405-07-29', '1448-05-09', '2026-10-21', 29, 9, 7, 5, 'wednesday', 4, 0, 'NONE') ,('2026-10-22', '1405-07-30', '1448-05-10', '2026-10-22', 30, 10, 7, 5, 'thursday ', 5, 0, 'NONE') ,('2026-10-23', '1405-08-01', '1448-05-11', '2026-10-23', 1, 11, 8, 5, 'friday ', 6, 0, 'NONE') ,('2026-10-24', '1405-08-02', '1448-05-12', '2026-10-24', 2, 12, 8, 5, 'saturday ', 7, 0, 'NONE') ,('2026-10-25', '1405-08-03', '1448-05-13', '2026-10-25', 3, 13, 8, 5, 'sunday ', 1, 0, 'NONE') ,('2026-10-26', '1405-08-04', '1448-05-14', '2026-10-26', 4, 14, 8, 5, 'monday ', 2, 0, 'NONE') ,('2026-10-27', '1405-08-05', '1448-05-15', '2026-10-27', 5, 15, 8, 5, 'tuesday ', 3, 0, 'NONE') ,('2026-10-28', '1405-08-06', '1448-05-16', '2026-10-28', 6, 16, 8, 5, 'wednesday', 4, 0, 'NONE') ,('2026-10-29', '1405-08-07', '1448-05-17', '2026-10-29', 7, 17, 8, 5, 'thursday ', 5, 0, 'NONE') ,('2026-10-30', '1405-08-08', '1448-05-18', '2026-10-30', 8, 18, 8, 5, 'friday ', 6, 0, 'NONE'); + + +WITH A as (SELECT rowNumberInAllBlocks() R,addDays(toDate('2021-05-18'), R) TVV from numbers(5)), + B as (SELECT rowNumberInAllBlocks() R,toDateTime(NULL) TVV from numbers(1)) +SELECT + joinGet('DATE_INFO_DICT', 'SHAMSI', toDate(A.TVV) ) TV1, + substr(TV1, 3, 8) || ' : ' || toString(1) TV_CHAR_1 +from A LEFT JOIN B USING (R) +ORDER BY TV1; + +--query run success in 215ms + + + +WITH A as (SELECT rowNumberInAllBlocks() R,addDays(toDate('2021-05-18'), R) TVV from numbers(5)), + B as (SELECT rowNumberInAllBlocks() R,toDateTime(NULL) TVV from numbers(1)) +SELECT + joinGetOrNull('DATE_INFO_DICT', 'SHAMSI', toDate(A.TVV) ) TV1, + substr(TV1, 3, 8) || ' : ' || toString(1) TV_CHAR_1 +from A LEFT JOIN B USING (R) +ORDER BY TV1; + +--query not run success !!!! + + +DROP TABLE DATE_INFO_DICT; diff --git a/parser/testdata/01910_memory_tracking_topk/ast.json b/parser/testdata/01910_memory_tracking_topk/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01910_memory_tracking_topk/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01910_memory_tracking_topk/metadata.json b/parser/testdata/01910_memory_tracking_topk/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01910_memory_tracking_topk/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01910_memory_tracking_topk/query.sql b/parser/testdata/01910_memory_tracking_topk/query.sql new file mode 100644 index 000000000..c638d7a96 --- /dev/null +++ b/parser/testdata/01910_memory_tracking_topk/query.sql @@ -0,0 +1,6 @@ +-- Tags: no-replicated-database + +-- Memory limit must correctly apply, triggering an exception: + +SET max_memory_usage = '100M'; +SELECT length(topK(5592405)(tuple(number))) FROM numbers(10) GROUP BY number; -- { serverError MEMORY_LIMIT_EXCEEDED } diff --git a/parser/testdata/01910_view_dictionary/ast.json b/parser/testdata/01910_view_dictionary/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01910_view_dictionary/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01910_view_dictionary/metadata.json b/parser/testdata/01910_view_dictionary/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01910_view_dictionary/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01910_view_dictionary/query.sql b/parser/testdata/01910_view_dictionary/query.sql new file mode 100644 index 000000000..51f46deca --- /dev/null +++ b/parser/testdata/01910_view_dictionary/query.sql @@ -0,0 +1,49 @@ +-- Tags: no-parallel + +DROP TABLE IF EXISTS dictionary_source_en; +DROP TABLE IF EXISTS dictionary_source_ru; +DROP TABLE IF EXISTS dictionary_source_view; +DROP DICTIONARY IF EXISTS flat_dictionary; + +CREATE TABLE dictionary_source_en +( + id UInt64, + value String +) ENGINE = TinyLog; + +INSERT INTO dictionary_source_en VALUES (1, 'One'), (2,'Two'), (3, 'Three'); + +CREATE TABLE dictionary_source_ru +( + id UInt64, + value String +) ENGINE = TinyLog; + +INSERT INTO dictionary_source_ru VALUES (1, 'Один'), (2,'Два'), (3, 'Три'); + +CREATE VIEW dictionary_source_view AS + SELECT id, dictionary_source_en.value as value_en, dictionary_source_ru.value as value_ru + FROM dictionary_source_en LEFT JOIN dictionary_source_ru USING (id); + +select * from dictionary_source_view ORDER BY id; + +CREATE DICTIONARY flat_dictionary +( + id UInt64, + value_en String, + value_ru String +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' PASSWORD '' TABLE 'dictionary_source_view')) +LIFETIME(MIN 1 MAX 1000) +LAYOUT(FLAT()); + +SELECT + dictGet(concat(currentDatabase(), '.flat_dictionary'), 'value_en', number + 1), + dictGet(concat(currentDatabase(), '.flat_dictionary'), 'value_ru', number + 1) +FROM numbers(3); + +DROP TABLE dictionary_source_en; +DROP TABLE dictionary_source_ru; +DROP DICTIONARY flat_dictionary; +DROP TABLE dictionary_source_view; diff --git a/parser/testdata/01910_view_dictionary_check_refresh/ast.json b/parser/testdata/01910_view_dictionary_check_refresh/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01910_view_dictionary_check_refresh/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01910_view_dictionary_check_refresh/metadata.json b/parser/testdata/01910_view_dictionary_check_refresh/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01910_view_dictionary_check_refresh/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01910_view_dictionary_check_refresh/query.sql b/parser/testdata/01910_view_dictionary_check_refresh/query.sql new file mode 100644 index 000000000..b36a378d8 --- /dev/null +++ b/parser/testdata/01910_view_dictionary_check_refresh/query.sql @@ -0,0 +1,54 @@ +-- Tags: long + +DROP DICTIONARY IF EXISTS TestTblDict; +DROP VIEW IF EXISTS TestTbl_view; +DROP TABLE IF EXISTS TestTbl; + +CREATE TABLE TestTbl +( + `id` UInt16, + `dt` Date, + `val` String +) +ENGINE = MergeTree +PARTITION BY dt +ORDER BY (id); + +CREATE VIEW TestTbl_view +AS +SELECT * +FROM TestTbl +WHERE dt = ( SELECT max(dt) FROM TestTbl ); + +CREATE DICTIONARY IF NOT EXISTS TestTblDict +( + `id` UInt16, + `dt` Date, + `val` String +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE TestTbl_view DB currentDatabase())) +LIFETIME(1) +LAYOUT(COMPLEX_KEY_HASHED()); + +select 'view' src,* FROM TestTbl_view; +select 'dict' src,* FROM TestTblDict ; + +insert into TestTbl values(1, '2022-10-20', 'first'); + +SELECT sleep(3) from numbers(4) settings max_block_size= 1 format Null; + +select 'view' src,* FROM TestTbl_view; +select 'dict' src,* FROM TestTblDict ; + +insert into TestTbl values(1, '2022-10-21', 'second'); + +SELECT sleep(3) from numbers(4) settings max_block_size= 1 format Null; + +select 'view' src,* FROM TestTbl_view; +select 'dict' src,* FROM TestTblDict ; + +DROP DICTIONARY IF EXISTS TestTblDict; +DROP VIEW IF EXISTS TestTbl_view; +DROP TABLE IF EXISTS TestTbl; + diff --git a/parser/testdata/01911_logical_error_minus/ast.json b/parser/testdata/01911_logical_error_minus/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01911_logical_error_minus/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01911_logical_error_minus/metadata.json b/parser/testdata/01911_logical_error_minus/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01911_logical_error_minus/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01911_logical_error_minus/query.sql b/parser/testdata/01911_logical_error_minus/query.sql new file mode 100644 index 000000000..7f371a463 --- /dev/null +++ b/parser/testdata/01911_logical_error_minus/query.sql @@ -0,0 +1,78 @@ +-- This test case is almost completely generated by fuzzer. +-- It appeared to trigger assertion. + +SET cross_to_inner_join_rewrite = 1; + +DROP TABLE IF EXISTS codecTest; + +CREATE TABLE codecTest ( + key UInt64, + name String, + ref_valueF64 Float64, + ref_valueF32 Float32, + valueF64 Float64 CODEC(Gorilla), + valueF32 Float32 CODEC(Gorilla) +) Engine = MergeTree ORDER BY key; + +INSERT INTO codecTest (key, name, ref_valueF64, valueF64, ref_valueF32, valueF32) + SELECT number AS n, 'e()', e() AS v, v, v, v FROM system.numbers LIMIT 1, 100; + +INSERT INTO codecTest (key, name, ref_valueF64, valueF64, ref_valueF32, valueF32) + SELECT number AS n, 'log2(n)', log2(n) AS v, v, v, v FROM system.numbers LIMIT 101, 100; + +INSERT INTO codecTest (key, name, ref_valueF64, valueF64, ref_valueF32, valueF32) + SELECT number AS n, 'n*sqrt(n)', n*sqrt(n) AS v, v, v, v FROM system.numbers LIMIT 201, 100; + +INSERT INTO codecTest (key, name, ref_valueF64, valueF64, ref_valueF32, valueF32) + SELECT number AS n, 'sin(n*n*n)*n', sin(n * n * n * n* n) AS v, v, v, v FROM system.numbers LIMIT 301, 100; + +SELECT IF(2, NULL, 0.00009999999747378752), IF(104, 1048576, NULL), c1.key, IF(1, NULL, NULL), c2.key FROM codecTest AS c1 , codecTest AS c2 WHERE ignore(IF(255, -2, NULL), arrayJoin([65537]), IF(3, 1024, 9223372036854775807)) AND IF(NULL, 256, NULL) AND (IF(NULL, '1048576', NULL) = (c1.key - NULL)) LIMIT 65535; + +SELECT c1.key, c1.name, c1.ref_valueF64, c1.valueF64, c1.ref_valueF64 - c1.valueF64 AS dF64, '', c2.key, c2.ref_valueF64 FROM codecTest AS c1 , codecTest AS c2 WHERE (dF64 != 3) AND c1.valueF64 != 0 AND (c2.key = (c1.key - 1048576)) LIMIT 0; + + +DROP TABLE codecTest; + +CREATE TABLE codecTest ( + key UInt64, + ref_valueU64 UInt64, + ref_valueU32 UInt32, + ref_valueU16 UInt16, + ref_valueU8 UInt8, + ref_valueI64 Int64, + ref_valueI32 Int32, + ref_valueI16 Int16, + ref_valueI8 Int8, + ref_valueDT DateTime, + ref_valueD Date, + valueU64 UInt64 CODEC(DoubleDelta), + valueU32 UInt32 CODEC(DoubleDelta), + valueU16 UInt16 CODEC(DoubleDelta), + valueU8 UInt8 CODEC(DoubleDelta), + valueI64 Int64 CODEC(DoubleDelta), + valueI32 Int32 CODEC(DoubleDelta), + valueI16 Int16 CODEC(DoubleDelta), + valueI8 Int8 CODEC(DoubleDelta), + valueDT DateTime CODEC(DoubleDelta), + valueD Date CODEC(DoubleDelta) +) Engine = MergeTree ORDER BY key SETTINGS min_bytes_for_wide_part = 0; + +INSERT INTO codecTest (key, ref_valueU64, valueU64, ref_valueI64, valueI64) + VALUES (1, 18446744073709551615, 18446744073709551615, 9223372036854775807, 9223372036854775807), (2, 0, 0, -9223372036854775808, -9223372036854775808), (3, 18446744073709551615, 18446744073709551615, 9223372036854775807, 9223372036854775807); + +INSERT INTO codecTest (key, ref_valueU64, valueU64, ref_valueU32, valueU32, ref_valueU16, valueU16, ref_valueU8, valueU8, ref_valueI64, valueI64, ref_valueI32, valueI32, ref_valueI16, valueI16, ref_valueI8, valueI8, ref_valueDT, valueDT, ref_valueD, valueD) + SELECT number as n, n * n * n as v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, toDateTime(v), toDateTime(v), toDate(v), toDate(v) + FROM system.numbers LIMIT 101, 1000; + +INSERT INTO codecTest (key, ref_valueU64, valueU64, ref_valueU32, valueU32, ref_valueU16, valueU16, ref_valueU8, valueU8, ref_valueI64, valueI64, ref_valueI32, valueI32, ref_valueI16, valueI16, ref_valueI8, valueI8, ref_valueDT, valueDT, ref_valueD, valueD) + SELECT number as n, n as v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, toDateTime(v), toDateTime(v), toDate(v), toDate(v) + FROM system.numbers LIMIT 2001, 1000; + +INSERT INTO codecTest (key, ref_valueU64, valueU64, ref_valueU32, valueU32, ref_valueU16, valueU16, ref_valueU8, valueU8, ref_valueI64, valueI64, ref_valueI32, valueI32, ref_valueI16, valueI16, ref_valueI8, valueI8, ref_valueDT, valueDT, ref_valueD, valueD) + SELECT number as n, n + (rand64() - 9223372036854775807)/1000 as v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, v, toDateTime(v), toDateTime(v), toDate(v), toDate(v) + FROM system.numbers LIMIT 3001, 1000; + +SELECT IF(2, NULL, 0.00009999999747378752), IF(104, 1048576, NULL), c1.key, IF(1, NULL, NULL), c2.key FROM codecTest AS c1 , codecTest AS c2 WHERE ignore(IF(255, -2, NULL), arrayJoin([65537]), IF(3, 1024, 9223372036854775807)) AND IF(NULL, 256, NULL) AND (IF(NULL, '1048576', NULL) = (c1.key - NULL)) LIMIT 65535; + + +DROP TABLE codecTest; diff --git a/parser/testdata/01912_bad_cast_join_fuzz/ast.json b/parser/testdata/01912_bad_cast_join_fuzz/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01912_bad_cast_join_fuzz/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01912_bad_cast_join_fuzz/metadata.json b/parser/testdata/01912_bad_cast_join_fuzz/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01912_bad_cast_join_fuzz/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01912_bad_cast_join_fuzz/query.sql b/parser/testdata/01912_bad_cast_join_fuzz/query.sql new file mode 100644 index 000000000..01e02a3be --- /dev/null +++ b/parser/testdata/01912_bad_cast_join_fuzz/query.sql @@ -0,0 +1,16 @@ +SELECT + 1023 + l, + * +FROM +( + SELECT toLowCardinality(toNullable(number)) AS l + FROM system.numbers + LIMIT 10 +) AS s1 +ANY LEFT JOIN +( + SELECT toLowCardinality(toNullable(number)) AS r + FROM system.numbers + LIMIT 7 +) AS s2 ON (l + 1023) = (r * 3) +ORDER BY l, r; diff --git a/parser/testdata/01913_exact_rows_before_limit/ast.json b/parser/testdata/01913_exact_rows_before_limit/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01913_exact_rows_before_limit/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01913_exact_rows_before_limit/metadata.json b/parser/testdata/01913_exact_rows_before_limit/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01913_exact_rows_before_limit/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01913_exact_rows_before_limit/query.sql b/parser/testdata/01913_exact_rows_before_limit/query.sql new file mode 100644 index 000000000..6a354f575 --- /dev/null +++ b/parser/testdata/01913_exact_rows_before_limit/query.sql @@ -0,0 +1,18 @@ +-- Tags: no-parallel, no-random-merge-tree-settings + +drop table if exists test_rows_compact_part; +create table test_rows_compact_part(f1 int,f2 int) engine=MergeTree partition by f1 order by f2 settings min_bytes_for_wide_part=10485760; +insert into test_rows_compact_part select 0,arrayJoin(range(10000)) ; +insert into test_rows_compact_part select 1,arrayJoin(range(10000)); +select 0 from test_rows_compact_part limit 1 FORMAT JSONCompact settings exact_rows_before_limit = 0,output_format_write_statistics = 0; +select 0 from test_rows_compact_part limit 1 FORMAT JSONCompact settings exact_rows_before_limit = 1, output_format_write_statistics = 0; +drop table if exists test_rows_compact_part; + + +drop table if exists test_rows_wide_part; +create table test_rows_wide_part(f1 int,f2 int) engine=MergeTree partition by f1 order by f2 settings min_bytes_for_wide_part=0; +insert into test_rows_wide_part select 0,arrayJoin(range(10000)) ; +insert into test_rows_wide_part select 1,arrayJoin(range(10000)); +select 0 from test_rows_wide_part limit 1 FORMAT JSONCompact settings exact_rows_before_limit = 0,output_format_write_statistics = 0; +select 0 from test_rows_wide_part limit 1 FORMAT JSONCompact settings exact_rows_before_limit = 1, output_format_write_statistics = 0; +drop table if exists test_rows_compact_part; \ No newline at end of file diff --git a/parser/testdata/01913_exact_rows_before_limit_full/ast.json b/parser/testdata/01913_exact_rows_before_limit_full/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01913_exact_rows_before_limit_full/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01913_exact_rows_before_limit_full/metadata.json b/parser/testdata/01913_exact_rows_before_limit_full/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01913_exact_rows_before_limit_full/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01913_exact_rows_before_limit_full/query.sql b/parser/testdata/01913_exact_rows_before_limit_full/query.sql new file mode 100644 index 000000000..8841854d5 --- /dev/null +++ b/parser/testdata/01913_exact_rows_before_limit_full/query.sql @@ -0,0 +1,29 @@ +-- Tags: no-parallel, no-random-merge-tree-settings, no-parallel-replicas + +drop table if exists test; + +create table test (i int) engine MergeTree order by tuple(); + +insert into test select arrayJoin(range(10000)); + +set exact_rows_before_limit = 1, output_format_write_statistics = 0, max_block_size = 100; + +select * from test limit 1 FORMAT JSONCompact; + +select * from test where i < 10 group by i order by i limit 1 FORMAT JSONCompact; + +select * from test group by i having i in (10, 11, 12) order by i limit 1 FORMAT JSONCompact; + +select * from test where i < 20 order by i limit 1 FORMAT JSONCompact; + +set prefer_localhost_replica = 0; +select * from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 30 order by i limit 1 FORMAT JSONCompact; +select * from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 20 order by i limit 1 FORMAT JSONCompact; + +set prefer_localhost_replica = 1; +select * from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 30 order by i limit 1 FORMAT JSONCompact; +select * from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 20 order by i limit 1 FORMAT JSONCompact; + +select * from (select * from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 10) order by i limit 1 FORMAT JSONCompact; + +drop table if exists test; diff --git a/parser/testdata/01913_fix_column_transformer_replace_format/ast.json b/parser/testdata/01913_fix_column_transformer_replace_format/ast.json new file mode 100644 index 000000000..60bf7acc7 --- /dev/null +++ b/parser/testdata/01913_fix_column_transformer_replace_format/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery my_table (children 1)" + }, + { + "explain": " Identifier my_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001300511, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/01913_fix_column_transformer_replace_format/metadata.json b/parser/testdata/01913_fix_column_transformer_replace_format/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01913_fix_column_transformer_replace_format/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01913_fix_column_transformer_replace_format/query.sql b/parser/testdata/01913_fix_column_transformer_replace_format/query.sql new file mode 100644 index 000000000..e17c202cc --- /dev/null +++ b/parser/testdata/01913_fix_column_transformer_replace_format/query.sql @@ -0,0 +1,9 @@ +drop table if exists my_table; +drop view if exists my_view; +create table my_table(Id UInt32, Object Nested(Key UInt8, Value String)) engine MergeTree order by Id; +create view my_view as select * replace arrayMap(x -> x + 1,`Object.Key`) as `Object.Key` from my_table; + +show create my_view; + +drop table my_table; +drop view my_view; diff --git a/parser/testdata/01913_if_int_decimal/ast.json b/parser/testdata/01913_if_int_decimal/ast.json new file mode 100644 index 000000000..367f65fe9 --- /dev/null +++ b/parser/testdata/01913_if_int_decimal/ast.json @@ -0,0 +1,91 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function if (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal 'Decimal(18, 10)'" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_3" + } + ], + + "rows": 23, + + "statistics": + { + "elapsed": 0.001480994, + "rows_read": 23, + "bytes_read": 906 + } +} diff --git a/parser/testdata/01913_if_int_decimal/metadata.json b/parser/testdata/01913_if_int_decimal/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01913_if_int_decimal/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01913_if_int_decimal/query.sql b/parser/testdata/01913_if_int_decimal/query.sql new file mode 100644 index 000000000..83fb4c352 --- /dev/null +++ b/parser/testdata/01913_if_int_decimal/query.sql @@ -0,0 +1 @@ +select number % 2 ? materialize(1)::Decimal(18, 10) : 2 FROM numbers(3); diff --git a/parser/testdata/01913_join_push_down_bug/ast.json b/parser/testdata/01913_join_push_down_bug/ast.json new file mode 100644 index 000000000..37467b9d5 --- /dev/null +++ b/parser/testdata/01913_join_push_down_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001541321, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/01913_join_push_down_bug/metadata.json b/parser/testdata/01913_join_push_down_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01913_join_push_down_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01913_join_push_down_bug/query.sql b/parser/testdata/01913_join_push_down_bug/query.sql new file mode 100644 index 000000000..b945530b3 --- /dev/null +++ b/parser/testdata/01913_join_push_down_bug/query.sql @@ -0,0 +1,23 @@ +DROP TABLE IF EXISTS test; + +CREATE TABLE test +( + `t` UInt8, + `flag` UInt8, + `id` UInt8 +) +ENGINE = MergeTree +PARTITION BY t +ORDER BY (t, id) +SETTINGS index_granularity = 8192; + +INSERT INTO test VALUES (1,0,1),(1,0,2),(1,0,3),(1,0,4),(1,0,5),(1,0,6),(1,1,7),(0,0,7); + +set query_plan_filter_push_down = true; + +SELECT id, flag FROM test t1 +INNER JOIN (SELECT DISTINCT id FROM test) AS t2 ON t1.id = t2.id +WHERE flag = 0 and t = 1 AND id NOT IN (SELECT 1 WHERE 0) +ORDER BY id; + +DROP TABLE IF EXISTS test; diff --git a/parser/testdata/01913_names_of_tuple_literal/ast.json b/parser/testdata/01913_names_of_tuple_literal/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01913_names_of_tuple_literal/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01913_names_of_tuple_literal/metadata.json b/parser/testdata/01913_names_of_tuple_literal/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01913_names_of_tuple_literal/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01913_names_of_tuple_literal/query.sql b/parser/testdata/01913_names_of_tuple_literal/query.sql new file mode 100644 index 000000000..d6dda4fda --- /dev/null +++ b/parser/testdata/01913_names_of_tuple_literal/query.sql @@ -0,0 +1,4 @@ +SET enable_analyzer = 0; + +SELECT ((1, 2), (2, 3), (3, 4)) FORMAT TSVWithNames; +SELECT ((1, 2), (2, 3), (3, 4)) FORMAT TSVWithNames SETTINGS legacy_column_name_of_tuple_literal = 1; diff --git a/parser/testdata/01913_replace_dictionary/ast.json b/parser/testdata/01913_replace_dictionary/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01913_replace_dictionary/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01913_replace_dictionary/metadata.json b/parser/testdata/01913_replace_dictionary/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01913_replace_dictionary/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01913_replace_dictionary/query.sql b/parser/testdata/01913_replace_dictionary/query.sql new file mode 100644 index 000000000..891d0c6ab --- /dev/null +++ b/parser/testdata/01913_replace_dictionary/query.sql @@ -0,0 +1,53 @@ +-- Tags: no-parallel + +DROP DATABASE IF EXISTS 01913_db; +CREATE DATABASE 01913_db ENGINE=Atomic; + +DROP TABLE IF EXISTS 01913_db.test_source_table_1; +CREATE TABLE 01913_db.test_source_table_1 +( + id UInt64, + value String +) ENGINE=TinyLog; + +INSERT INTO 01913_db.test_source_table_1 VALUES (0, 'Value0'); + +DROP DICTIONARY IF EXISTS 01913_db.test_dictionary; +CREATE DICTIONARY 01913_db.test_dictionary +( + id UInt64, + value String +) +PRIMARY KEY id +LAYOUT(DIRECT()) +SOURCE(CLICKHOUSE(DB '01913_db' TABLE 'test_source_table_1')); + +SELECT * FROM 01913_db.test_dictionary; + +DROP TABLE IF EXISTS 01913_db.test_source_table_2; +CREATE TABLE 01913_db.test_source_table_2 +( + id UInt64, + value_1 String +) ENGINE=TinyLog; + +INSERT INTO 01913_db.test_source_table_2 VALUES (0, 'Value1'); + +REPLACE DICTIONARY 01913_db.test_dictionary +( + id UInt64, + value_1 String +) +PRIMARY KEY id +LAYOUT(HASHED()) +SOURCE(CLICKHOUSE(DB '01913_db' TABLE 'test_source_table_2')) +LIFETIME(0); + +SELECT * FROM 01913_db.test_dictionary; + +DROP DICTIONARY 01913_db.test_dictionary; + +DROP TABLE 01913_db.test_source_table_1; +DROP TABLE 01913_db.test_source_table_2; + +DROP DATABASE 01913_db; diff --git a/parser/testdata/01913_summing_mt_and_simple_agg_function_with_lc/ast.json b/parser/testdata/01913_summing_mt_and_simple_agg_function_with_lc/ast.json new file mode 100644 index 000000000..d7c943b7d --- /dev/null +++ b/parser/testdata/01913_summing_mt_and_simple_agg_function_with_lc/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery smta (children 1)" + }, + { + "explain": " Identifier smta" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001864561, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/01913_summing_mt_and_simple_agg_function_with_lc/metadata.json b/parser/testdata/01913_summing_mt_and_simple_agg_function_with_lc/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01913_summing_mt_and_simple_agg_function_with_lc/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01913_summing_mt_and_simple_agg_function_with_lc/query.sql b/parser/testdata/01913_summing_mt_and_simple_agg_function_with_lc/query.sql new file mode 100644 index 000000000..f4785c963 --- /dev/null +++ b/parser/testdata/01913_summing_mt_and_simple_agg_function_with_lc/query.sql @@ -0,0 +1,21 @@ +drop table if exists smta; + +CREATE TABLE smta +( + `k` Int64, + `a` AggregateFunction(max, Int64), + `city` SimpleAggregateFunction(max, LowCardinality(String)) +) +ENGINE = SummingMergeTree +ORDER BY k; + +insert into smta(k, city) values (1, 'x'); + +select k, city from smta; + +insert into smta(k, city) values (1, 'y'); +optimize table smta; + +select k, city from smta; + +drop table if exists smta; diff --git a/parser/testdata/01914_exchange_dictionaries/ast.json b/parser/testdata/01914_exchange_dictionaries/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01914_exchange_dictionaries/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01914_exchange_dictionaries/metadata.json b/parser/testdata/01914_exchange_dictionaries/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01914_exchange_dictionaries/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01914_exchange_dictionaries/query.sql b/parser/testdata/01914_exchange_dictionaries/query.sql new file mode 100644 index 000000000..8c86a04fd --- /dev/null +++ b/parser/testdata/01914_exchange_dictionaries/query.sql @@ -0,0 +1,42 @@ +-- Tags: no-ordinary-database, no-parallel +-- Tag no-ordinary-database: Requires Atomic database + +DROP DATABASE IF EXISTS 01914_db; +CREATE DATABASE 01914_db ENGINE=Atomic; + +DROP TABLE IF EXISTS 01914_db.table_1; +CREATE TABLE 01914_db.table_1 (id UInt64, value String) ENGINE=TinyLog; + +DROP TABLE IF EXISTS 01914_db.table_2; +CREATE TABLE 01914_db.table_2 (id UInt64, value String) ENGINE=TinyLog; + +INSERT INTO 01914_db.table_1 VALUES (1, 'Table1'); +INSERT INTO 01914_db.table_2 VALUES (2, 'Table2'); + +DROP DICTIONARY IF EXISTS 01914_db.dictionary_1; +CREATE DICTIONARY 01914_db.dictionary_1 (id UInt64, value String) +PRIMARY KEY id +LAYOUT(DIRECT()) +SOURCE(CLICKHOUSE(DB '01914_db' TABLE 'table_1')); + +DROP DICTIONARY IF EXISTS 01914_db.dictionary_2; +CREATE DICTIONARY 01914_db.dictionary_2 (id UInt64, value String) +PRIMARY KEY id +LAYOUT(DIRECT()) +SOURCE(CLICKHOUSE(DB '01914_db' TABLE 'table_2')); + +SELECT * FROM 01914_db.dictionary_1; +SELECT * FROM 01914_db.dictionary_2; + +EXCHANGE DICTIONARIES 01914_db.dictionary_1 AND 01914_db.dictionary_2; + +SELECT * FROM 01914_db.dictionary_1; +SELECT * FROM 01914_db.dictionary_2; + +DROP DICTIONARY 01914_db.dictionary_1; +DROP DICTIONARY 01914_db.dictionary_2; + +DROP TABLE 01914_db.table_1; +DROP TABLE 01914_db.table_2; + +DROP DATABASE 01914_db; diff --git a/parser/testdata/01914_index_bgranvea/ast.json b/parser/testdata/01914_index_bgranvea/ast.json new file mode 100644 index 000000000..5e85d17c5 --- /dev/null +++ b/parser/testdata/01914_index_bgranvea/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001353764, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/01914_index_bgranvea/metadata.json b/parser/testdata/01914_index_bgranvea/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01914_index_bgranvea/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01914_index_bgranvea/query.sql b/parser/testdata/01914_index_bgranvea/query.sql new file mode 100644 index 000000000..817c95710 --- /dev/null +++ b/parser/testdata/01914_index_bgranvea/query.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS test; +create table test (id UInt64,insid UInt64,insidvalue Nullable(UInt64), index insid_idx (insid) type bloom_filter() granularity 1, index insidvalue_idx (insidvalue) type bloom_filter() granularity 1) ENGINE=MergeTree() ORDER BY (insid,id); + +insert into test values(1,1,1),(2,2,2); + +select * from test where insid IN (1) OR insidvalue IN (1); +select * from test where insid IN (1) AND insidvalue IN (1); + +DROP TABLE test; diff --git a/parser/testdata/01914_ubsan_quantile_timing/ast.json b/parser/testdata/01914_ubsan_quantile_timing/ast.json new file mode 100644 index 000000000..541429f1a --- /dev/null +++ b/parser/testdata/01914_ubsan_quantile_timing/ast.json @@ -0,0 +1,76 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function quantileTiming (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function divide (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal Float64_1.1754943508222875e-38" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Float64_-0" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_257" + } + ], + + "rows": 18, + + "statistics": + { + "elapsed": 0.001323176, + "rows_read": 18, + "bytes_read": 734 + } +} diff --git a/parser/testdata/01914_ubsan_quantile_timing/metadata.json b/parser/testdata/01914_ubsan_quantile_timing/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01914_ubsan_quantile_timing/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01914_ubsan_quantile_timing/query.sql b/parser/testdata/01914_ubsan_quantile_timing/query.sql new file mode 100644 index 000000000..422dd3bbb --- /dev/null +++ b/parser/testdata/01914_ubsan_quantile_timing/query.sql @@ -0,0 +1 @@ +SELECT quantileTiming(-0.)(number / 1.1754943508222875e-38) FROM numbers(257); diff --git a/parser/testdata/01915_create_or_replace_dictionary/ast.json b/parser/testdata/01915_create_or_replace_dictionary/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01915_create_or_replace_dictionary/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01915_create_or_replace_dictionary/metadata.json b/parser/testdata/01915_create_or_replace_dictionary/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01915_create_or_replace_dictionary/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01915_create_or_replace_dictionary/query.sql b/parser/testdata/01915_create_or_replace_dictionary/query.sql new file mode 100644 index 000000000..6f52aea0f --- /dev/null +++ b/parser/testdata/01915_create_or_replace_dictionary/query.sql @@ -0,0 +1,53 @@ +-- Tags: no-parallel + +DROP DATABASE IF EXISTS test_01915_db; +CREATE DATABASE test_01915_db ENGINE=Atomic; + +DROP TABLE IF EXISTS test_01915_db.test_source_table_1; +CREATE TABLE test_01915_db.test_source_table_1 +( + id UInt64, + value String +) ENGINE=TinyLog; + +INSERT INTO test_01915_db.test_source_table_1 VALUES (0, 'Value0'); + +DROP DICTIONARY IF EXISTS test_01915_db.test_dictionary; +CREATE OR REPLACE DICTIONARY test_01915_db.test_dictionary +( + id UInt64, + value String +) +PRIMARY KEY id +LAYOUT(DIRECT()) +SOURCE(CLICKHOUSE(DB 'test_01915_db' TABLE 'test_source_table_1')); + +SELECT * FROM test_01915_db.test_dictionary; + +DROP TABLE IF EXISTS test_01915_db.test_source_table_2; +CREATE TABLE test_01915_db.test_source_table_2 +( + id UInt64, + value_1 String +) ENGINE=TinyLog; + +INSERT INTO test_01915_db.test_source_table_2 VALUES (0, 'Value1'); + +CREATE OR REPLACE DICTIONARY test_01915_db.test_dictionary +( + id UInt64, + value_1 String +) +PRIMARY KEY id +LAYOUT(HASHED()) +SOURCE(CLICKHOUSE(DB 'test_01915_db' TABLE 'test_source_table_2')) +LIFETIME(0); + +SELECT * FROM test_01915_db.test_dictionary; + +DROP DICTIONARY test_01915_db.test_dictionary; + +DROP TABLE test_01915_db.test_source_table_1; +DROP TABLE test_01915_db.test_source_table_2; + +DROP DATABASE test_01915_db; diff --git a/parser/testdata/01915_for_each_crakjie/ast.json b/parser/testdata/01915_for_each_crakjie/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01915_for_each_crakjie/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01915_for_each_crakjie/metadata.json b/parser/testdata/01915_for_each_crakjie/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01915_for_each_crakjie/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01915_for_each_crakjie/query.sql b/parser/testdata/01915_for_each_crakjie/query.sql new file mode 100644 index 000000000..7d6ce0746 --- /dev/null +++ b/parser/testdata/01915_for_each_crakjie/query.sql @@ -0,0 +1,11 @@ +WITH arrayJoin(['a', 'b']) AS z +SELECT + z, + sumMergeForEach(x) AS x +FROM +( + SELECT sumStateForEach([1., 1.1, 1.1300175]) AS x + FROM remote('127.0.0.{1,2}', system.one) +) +GROUP BY z +ORDER BY z; diff --git a/parser/testdata/01915_json_extract_raw_string/ast.json b/parser/testdata/01915_json_extract_raw_string/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01915_json_extract_raw_string/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01915_json_extract_raw_string/metadata.json b/parser/testdata/01915_json_extract_raw_string/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01915_json_extract_raw_string/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01915_json_extract_raw_string/query.sql b/parser/testdata/01915_json_extract_raw_string/query.sql new file mode 100644 index 000000000..4b46db315 --- /dev/null +++ b/parser/testdata/01915_json_extract_raw_string/query.sql @@ -0,0 +1,9 @@ + +select JSONExtract('{"a": "123", "b": 456, "c": [7, 8, 9]}', 'Tuple(a String, b String, c String)'); + +with '{"string_value":null}' as json select JSONExtract(json, 'string_value', 'Nullable(String)'); +with '{"string_value":null}' as json select JSONExtract(json, 'string_value', 'LowCardinality(Nullable(String))'); + +select JSONExtractString('{"a": 123}', 'a'); +select JSONExtractString('{"a": "123"}', 'a'); +select JSONExtractString('{"a": null}', 'a'); diff --git a/parser/testdata/01915_merge_prewhere_virtual_column_rand_chao_wang/ast.json b/parser/testdata/01915_merge_prewhere_virtual_column_rand_chao_wang/ast.json new file mode 100644 index 000000000..b64721deb --- /dev/null +++ b/parser/testdata/01915_merge_prewhere_virtual_column_rand_chao_wang/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery abc (children 1)" + }, + { + "explain": " Identifier abc" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001694155, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/01915_merge_prewhere_virtual_column_rand_chao_wang/metadata.json b/parser/testdata/01915_merge_prewhere_virtual_column_rand_chao_wang/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01915_merge_prewhere_virtual_column_rand_chao_wang/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01915_merge_prewhere_virtual_column_rand_chao_wang/query.sql b/parser/testdata/01915_merge_prewhere_virtual_column_rand_chao_wang/query.sql new file mode 100644 index 000000000..499776636 --- /dev/null +++ b/parser/testdata/01915_merge_prewhere_virtual_column_rand_chao_wang/query.sql @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS abc; + +CREATE TABLE abc +( + `f1` String, + `f2` String +) +ENGINE = MergeTree() +ORDER BY f1; + +-- In version 20.12 this query sometimes produces an exception "Cannot find column" +SELECT f2 FROM merge(currentDatabase(), '^abc$') PREWHERE _table = 'abc' AND f1 = 'a' AND rand() % 100 < 20; -- { serverError ILLEGAL_PREWHERE } +SELECT f2 FROM merge(currentDatabase(), '^abc$') PREWHERE _table = 'abc' AND f1 = 'a'; -- { serverError ILLEGAL_PREWHERE } + +SELECT f2 FROM merge(currentDatabase(), '^abc$') PREWHERE f1 = 'a' AND rand() % 100 < 20 WHERE _table = 'abc'; +SELECT f2 FROM merge(currentDatabase(), '^abc$') PREWHERE f1 = 'a' WHERE _table = 'abc'; + +DROP TABLE abc; diff --git a/parser/testdata/01916_low_cardinality_interval/ast.json b/parser/testdata/01916_low_cardinality_interval/ast.json new file mode 100644 index 000000000..fc52316e2 --- /dev/null +++ b/parser/testdata/01916_low_cardinality_interval/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toLowCardinality (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toIntervalSecond (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001464657, + "rows_read": 9, + "bytes_read": 365 + } +} diff --git a/parser/testdata/01916_low_cardinality_interval/metadata.json b/parser/testdata/01916_low_cardinality_interval/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01916_low_cardinality_interval/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01916_low_cardinality_interval/query.sql b/parser/testdata/01916_low_cardinality_interval/query.sql new file mode 100644 index 000000000..954431e1e --- /dev/null +++ b/parser/testdata/01916_low_cardinality_interval/query.sql @@ -0,0 +1,7 @@ +SELECT toLowCardinality(toIntervalSecond(1)); +SELECT toLowCardinality(toIntervalMinute(1)); +SELECT toLowCardinality(toIntervalHour(1)); +SELECT toLowCardinality(toIntervalDay(1)); +SELECT toLowCardinality(toIntervalWeek(1)); +SELECT toLowCardinality(toIntervalQuarter(1)); +SELECT toLowCardinality(toIntervalYear(1)); diff --git a/parser/testdata/01916_lowcard_dict_type/ast.json b/parser/testdata/01916_lowcard_dict_type/ast.json new file mode 100644 index 000000000..f55b24ba7 --- /dev/null +++ b/parser/testdata/01916_lowcard_dict_type/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001490388, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/01916_lowcard_dict_type/metadata.json b/parser/testdata/01916_lowcard_dict_type/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01916_lowcard_dict_type/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01916_lowcard_dict_type/query.sql b/parser/testdata/01916_lowcard_dict_type/query.sql new file mode 100644 index 000000000..116f47dc0 --- /dev/null +++ b/parser/testdata/01916_lowcard_dict_type/query.sql @@ -0,0 +1,20 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE TABLE t1 (`x` UInt32, `lc` LowCardinality(String) ) ENGINE = Memory; +INSERT INTO t1 VALUES (1, '1'), (2, '2'); + +SELECT toIntervalMinute(lc) as e, toTypeName(e) FROM t1; +SELECT toIntervalDay(lc) as e, toTypeName(e) FROM t1; + +CREATE TABLE t2 (`x` UInt32, `lc` LowCardinality(String) ) ENGINE = Memory; +INSERT INTO t2 VALUES (1, '61f0c404-5cb3-11e7-907b-a6006ad3dba2'); + +SELECT toUUID(lc) as e, toTypeName(e) FROM t2; + +INSERT INTO t2 VALUES (2, '2'); + +SELECT toIntervalMinute(lc), toTypeName(materialize(r.lc)) FROM t1 AS l INNER JOIN t2 as r USING (lc); + +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; diff --git a/parser/testdata/01916_multiple_join_view_optimize_predicate_chertus/ast.json b/parser/testdata/01916_multiple_join_view_optimize_predicate_chertus/ast.json new file mode 100644 index 000000000..e42b739f1 --- /dev/null +++ b/parser/testdata/01916_multiple_join_view_optimize_predicate_chertus/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery a (children 1)" + }, + { + "explain": " Identifier a" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00127395, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/01916_multiple_join_view_optimize_predicate_chertus/metadata.json b/parser/testdata/01916_multiple_join_view_optimize_predicate_chertus/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01916_multiple_join_view_optimize_predicate_chertus/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01916_multiple_join_view_optimize_predicate_chertus/query.sql b/parser/testdata/01916_multiple_join_view_optimize_predicate_chertus/query.sql new file mode 100644 index 000000000..b3d5dbae8 --- /dev/null +++ b/parser/testdata/01916_multiple_join_view_optimize_predicate_chertus/query.sql @@ -0,0 +1,19 @@ +DROP TABLE IF EXISTS a; +DROP TABLE IF EXISTS j; + +CREATE TABLE a(`id` UInt32, `val` UInt32) ENGINE = Memory; +CREATE TABLE j(`id` UInt32, `val` UInt8) ENGINE = Join(ANY, LEFT, id); + +INSERT INTO a VALUES (1,1)(2,2)(3,3); +INSERT INTO j VALUES (2,2)(4,4); + +SELECT * FROM a ANY LEFT OUTER JOIN j USING id ORDER BY a.id, a.val SETTINGS enable_optimize_predicate_expression = 1; +SELECT * FROM a ANY LEFT OUTER JOIN j USING id ORDER BY a.id, a.val SETTINGS enable_optimize_predicate_expression = 0; + +DROP TABLE a; +DROP TABLE j; + +CREATE TABLE j (id UInt8, val UInt8) Engine = Join(ALL, INNER, id); +SELECT * FROM (SELECT 0 id, 1 val) _ JOIN j USING id; + +DROP TABLE j; diff --git a/parser/testdata/01917_distinct_on/ast.json b/parser/testdata/01917_distinct_on/ast.json new file mode 100644 index 000000000..d98d4d618 --- /dev/null +++ b/parser/testdata/01917_distinct_on/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001213845, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/01917_distinct_on/metadata.json b/parser/testdata/01917_distinct_on/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01917_distinct_on/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01917_distinct_on/query.sql b/parser/testdata/01917_distinct_on/query.sql new file mode 100644 index 000000000..93f756603 --- /dev/null +++ b/parser/testdata/01917_distinct_on/query.sql @@ -0,0 +1,23 @@ +DROP TABLE IF EXISTS t1; + +CREATE TABLE t1 (`a` UInt32, `b` UInt32, `c` UInt32 ) ENGINE = Memory; +INSERT INTO t1 VALUES (1, 1, 1), (1, 1, 2), (2, 2, 2), (1, 2, 2); + +SELECT DISTINCT ON (a, b) a, b, c FROM t1; +SELECT DISTINCT ON (a, b) * FROM t1; +SELECT DISTINCT ON (a) * FROM t1; + +-- fuzzer will fail, enable when fixed +-- SELECT DISTINCT ON (a, b) a, b, c FROM t1 LIMIT 1 BY a, b; -- { clientError SYNTAX_ERROR } + +-- SELECT DISTINCT ON a, b a, b FROM t1; -- { clientError SYNTAX_ERROR } +-- SELECT DISTINCT ON a a, b FROM t1; -- { clientError SYNTAX_ERROR } + +-- "Code: 47. DB::Exception: Missing columns: 'DISTINCT'" - error can be better +-- SELECT DISTINCT ON (a, b) DISTINCT a, b FROM t1; -- { serverError UNKNOWN_IDENTIFIER } +-- SELECT DISTINCT DISTINCT ON (a, b) a, b FROM t1; -- { clientError SYNTAX_ERROR } + +-- SELECT ALL DISTINCT ON (a, b) a, b FROM t1; -- { clientError SYNTAX_ERROR } +-- SELECT DISTINCT ON (a, b) ALL a, b FROM t1; -- { clientError SYNTAX_ERROR } + +DROP TABLE IF EXISTS t1; diff --git a/parser/testdata/01917_prewhere_column_type/ast.json b/parser/testdata/01917_prewhere_column_type/ast.json new file mode 100644 index 000000000..1e28822ce --- /dev/null +++ b/parser/testdata/01917_prewhere_column_type/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001712877, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01917_prewhere_column_type/metadata.json b/parser/testdata/01917_prewhere_column_type/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01917_prewhere_column_type/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01917_prewhere_column_type/query.sql b/parser/testdata/01917_prewhere_column_type/query.sql new file mode 100644 index 000000000..676f72f0c --- /dev/null +++ b/parser/testdata/01917_prewhere_column_type/query.sql @@ -0,0 +1,20 @@ +SET optimize_move_to_prewhere = 1; + +DROP TABLE IF EXISTS t1; + +CREATE TABLE t1 ( s String, f Float32, e UInt16 ) ENGINE = MergeTree ORDER BY tuple() SETTINGS min_bytes_for_wide_part = '100G'; + +INSERT INTO t1 VALUES ('111', 1, 1); + +SELECT s FROM t1 WHERE f AND (e = 1); +SELECT s FROM t1 WHERE f AND (e = 1) SETTINGS optimize_move_to_prewhere=true; +SELECT s FROM t1 WHERE f AND (e = 1) SETTINGS optimize_move_to_prewhere=false; +SELECT s FROM t1 PREWHERE f AND (e = 1); +SELECT s FROM t1 PREWHERE f; +SELECT s FROM t1 PREWHERE f WHERE (e = 1); +SELECT s FROM t1 PREWHERE f WHERE f AND (e = 1); + +SELECT s FROM t1 WHERE e AND (e = 1); +SELECT s FROM t1 PREWHERE e; +SELECT s FROM t1 PREWHERE e WHERE (e = 1); +SELECT s FROM t1 PREWHERE e WHERE f AND (e = 1); diff --git a/parser/testdata/01917_system_data_skipping_indices/ast.json b/parser/testdata/01917_system_data_skipping_indices/ast.json new file mode 100644 index 000000000..1e49d23b5 --- /dev/null +++ b/parser/testdata/01917_system_data_skipping_indices/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery data_01917 (children 1)" + }, + { + "explain": " Identifier data_01917" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001626368, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/01917_system_data_skipping_indices/metadata.json b/parser/testdata/01917_system_data_skipping_indices/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01917_system_data_skipping_indices/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01917_system_data_skipping_indices/query.sql b/parser/testdata/01917_system_data_skipping_indices/query.sql new file mode 100644 index 000000000..bfe9d6398 --- /dev/null +++ b/parser/testdata/01917_system_data_skipping_indices/query.sql @@ -0,0 +1,35 @@ +DROP TABLE IF EXISTS data_01917; +DROP TABLE IF EXISTS data_01917_2; + +CREATE TABLE data_01917 +( + key Int, + d1 Int, + d1_null Nullable(Int), + INDEX d1_idx d1 TYPE minmax GRANULARITY 1, + INDEX d1_null_idx assumeNotNull(d1_null) TYPE minmax GRANULARITY 1 +) +Engine=MergeTree() +ORDER BY key; + +CREATE TABLE data_01917_2 +( + name String, + frequency UInt64, + INDEX memory (frequency * length(name)) TYPE set(1000) GRANULARITY 5, + INDEX sample_index1 (length(name), name) TYPE minmax GRANULARITY 4, + INDEX sample_index2 (lower(name), name) TYPE ngrambf_v1(3, 256, 2, 0) GRANULARITY 4 +) +Engine=MergeTree() +ORDER BY name; + +SELECT * FROM system.data_skipping_indices WHERE database = currentDatabase(); + +SELECT count(*) FROM system.data_skipping_indices WHERE table = 'data_01917' AND database = currentDatabase(); +SELECT count(*) FROM system.data_skipping_indices WHERE table = 'data_01917_2' AND database = currentDatabase(); + +SELECT name FROM system.data_skipping_indices WHERE type = 'minmax' AND database = currentDatabase(); + +DROP TABLE data_01917; +DROP TABLE data_01917_2; + diff --git a/parser/testdata/01920_not_chain_format/ast.json b/parser/testdata/01920_not_chain_format/ast.json new file mode 100644 index 000000000..08526a350 --- /dev/null +++ b/parser/testdata/01920_not_chain_format/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00133391, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01920_not_chain_format/metadata.json b/parser/testdata/01920_not_chain_format/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01920_not_chain_format/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01920_not_chain_format/query.sql b/parser/testdata/01920_not_chain_format/query.sql new file mode 100644 index 000000000..d4447713c --- /dev/null +++ b/parser/testdata/01920_not_chain_format/query.sql @@ -0,0 +1,4 @@ +set enable_analyzer = 1; +-- { echo } +EXPLAIN SYNTAX SELECT NOT NOT (NOT (NOT (NULL))); +EXPLAIN SYNTAX SELECT NOT (NOT (NOT NOT NULL)); diff --git a/parser/testdata/01921_datatype_date32/ast.json b/parser/testdata/01921_datatype_date32/ast.json new file mode 100644 index 000000000..807f91571 --- /dev/null +++ b/parser/testdata/01921_datatype_date32/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00125658, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/01921_datatype_date32/metadata.json b/parser/testdata/01921_datatype_date32/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01921_datatype_date32/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01921_datatype_date32/query.sql b/parser/testdata/01921_datatype_date32/query.sql new file mode 100644 index 000000000..7c699f55b --- /dev/null +++ b/parser/testdata/01921_datatype_date32/query.sql @@ -0,0 +1,133 @@ +drop table if exists t1; +create table t1(x1 Date32) engine Memory; + +insert into t1 values ('1900-01-01'),('1899-01-01'),('2299-12-15'),('2300-12-31'),('2021-06-22'); + +select x1 from t1; +select '-------toYear---------'; +select toYear(x1) from t1; +select '-------toMonth---------'; +select toMonth(x1) from t1; +select '-------toQuarter---------'; +select toQuarter(x1) from t1; +select '-------toDayOfMonth---------'; +select toDayOfMonth(x1) from t1; +select '-------toDayOfWeek---------'; +select toDayOfWeek(x1) from t1; +select '-------toDayOfYear---------'; +select toDayOfYear(x1) from t1; +select '-------toHour---------'; +select toHour(x1) from t1; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select '-------toMinute---------'; +select toMinute(x1) from t1; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select '-------toSecond---------'; +select toSecond(x1) from t1; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select '-------toStartOfDay---------'; +select toStartOfDay(x1, 'Asia/Istanbul') from t1; +select '-------toMonday---------'; +select toMonday(x1) from t1; +select '-------toISOWeek---------'; +select toISOWeek(x1) from t1; +select '-------toISOYear---------'; +select toISOYear(x1) from t1; +select '-------toWeek---------'; +select toWeek(x1) from t1; +select '-------toYearWeek---------'; +select toYearWeek(x1) from t1; +select '-------toStartOfWeek---------'; +select toStartOfWeek(x1) from t1; +select '-------toLastDayOfWeek---------'; +select toLastDayOfWeek(x1) from t1; +select '-------toStartOfMonth---------'; +select toStartOfMonth(x1) from t1; +select '-------toStartOfQuarter---------'; +select toStartOfQuarter(x1) from t1; +select '-------toStartOfYear---------'; +select toStartOfYear(x1) from t1; +select '-------toStartOfSecond---------'; +select toStartOfSecond(x1) from t1; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select '-------toStartOfMinute---------'; +select toStartOfMinute(x1) from t1; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select '-------toStartOfFiveMinutes---------'; +select toStartOfFiveMinutes(x1) from t1; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select '-------toStartOfTenMinutes---------'; +select toStartOfTenMinutes(x1) from t1; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select '-------toStartOfFifteenMinutes---------'; +select toStartOfFifteenMinutes(x1) from t1; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select '-------toStartOfHour---------'; +select toStartOfHour(x1) from t1; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select '-------toStartOfISOYear---------'; +select toStartOfISOYear(x1) from t1; +select '-------toRelativeYearNum---------'; +select toRelativeYearNum(x1, 'Asia/Istanbul') from t1; +select '-------toRelativeQuarterNum---------'; +select toRelativeQuarterNum(x1, 'Asia/Istanbul') from t1; +select '-------toRelativeMonthNum---------'; +select toRelativeMonthNum(x1, 'Asia/Istanbul') from t1; +select '-------toRelativeWeekNum---------'; +select toRelativeWeekNum(x1, 'Asia/Istanbul') from t1; +select '-------toRelativeDayNum---------'; +select toRelativeDayNum(x1, 'Asia/Istanbul') from t1; +select '-------toRelativeHourNum---------'; +select toRelativeHourNum(x1, 'Asia/Istanbul') from t1; +select '-------toRelativeMinuteNum---------'; +select toRelativeMinuteNum(x1, 'Asia/Istanbul') from t1; +select '-------toRelativeSecondNum---------'; +select toRelativeSecondNum(x1, 'Asia/Istanbul') from t1; +select '-------toTime---------'; +select toTimeWithFixedDate(x1) from t1; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select '-------toYYYYMM---------'; +select toYYYYMM(x1) from t1; +select '-------toYYYYMMDD---------'; +select toYYYYMMDD(x1) from t1; +select '-------toYYYYMMDDhhmmss---------'; +select toYYYYMMDDhhmmss(x1) from t1; +select '-------addSeconds---------'; +select addSeconds(x1, 3600) from t1; +select '-------addMinutes---------'; +select addMinutes(x1, 60) from t1; +select '-------addHours---------'; +select addHours(x1, 1) from t1; +select '-------addDays---------'; +select addDays(x1, 7) from t1; +select '-------addWeeks---------'; +select addWeeks(x1, 1) from t1; +select '-------addMonths---------'; +select addMonths(x1, 1) from t1; +select '-------addQuarters---------'; +select addQuarters(x1, 1) from t1; +select '-------addYears---------'; +select addYears(x1, 1) from t1; +select '-------subtractSeconds---------'; +select subtractSeconds(x1, 3600) from t1; +select '-------subtractMinutes---------'; +select subtractMinutes(x1, 60) from t1; +select '-------subtractHours---------'; +select subtractHours(x1, 1) from t1; +select '-------subtractDays---------'; +select subtractDays(x1, 7) from t1; +select '-------subtractWeeks---------'; +select subtractWeeks(x1, 1) from t1; +select '-------subtractMonths---------'; +select subtractMonths(x1, 1) from t1; +select '-------subtractQuarters---------'; +select subtractQuarters(x1, 1) from t1; +select '-------subtractYears---------'; +select subtractYears(x1, 1) from t1; +select '-------toDate32---------'; +select toDate32('1900-01-01'), toDate32(toDate('2000-01-01')); +select toDate32OrZero('1899-01-01'), toDate32OrNull('1899-01-01'); +select toDate32OrZero(''), toDate32OrNull(''); +select (select toDate32OrZero('')); +select (select toDate32OrNull('')); +SELECT toString(T.d) dateStr +FROM + ( + SELECT '1900-01-01'::Date32 d + UNION ALL SELECT '1969-12-31'::Date32 + UNION ALL SELECT '1970-01-01'::Date32 + UNION ALL SELECT '2149-06-06'::Date32 + UNION ALL SELECT '2149-06-07'::Date32 + UNION ALL SELECT '2299-12-31'::Date32 + ) AS T +ORDER BY T.d diff --git a/parser/testdata/01921_not_chain/ast.json b/parser/testdata/01921_not_chain/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01921_not_chain/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01921_not_chain/metadata.json b/parser/testdata/01921_not_chain/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01921_not_chain/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01921_not_chain/query.sql b/parser/testdata/01921_not_chain/query.sql new file mode 100644 index 000000000..52549e8ce --- /dev/null +++ b/parser/testdata/01921_not_chain/query.sql @@ -0,0 +1,5 @@ +-- { echo } +SELECT 1 != (NOT 1); +SELECT 1 != NOT 1; +EXPLAIN SYNTAX SELECT 1 != (NOT 1); +EXPLAIN SYNTAX SELECT 1 != NOT 1; diff --git a/parser/testdata/01921_with_fill_with_totals/ast.json b/parser/testdata/01921_with_fill_with_totals/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01921_with_fill_with_totals/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01921_with_fill_with_totals/metadata.json b/parser/testdata/01921_with_fill_with_totals/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01921_with_fill_with_totals/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01921_with_fill_with_totals/query.sql b/parser/testdata/01921_with_fill_with_totals/query.sql new file mode 100644 index 000000000..253e8219d --- /dev/null +++ b/parser/testdata/01921_with_fill_with_totals/query.sql @@ -0,0 +1,19 @@ +SELECT + number, + sum(number) +FROM numbers(10) +WHERE number % 3 = 1 +GROUP BY number + WITH TOTALS +ORDER BY number DESC WITH FILL FROM 15; + +SET enable_positional_arguments = 0; + +SELECT + number, + sum(number) +FROM numbers(10) +WHERE number % 3 = 1 +GROUP BY number + WITH TOTALS +ORDER BY 10, number DESC WITH FILL FROM 15; diff --git a/parser/testdata/01922_array_join_with_index/ast.json b/parser/testdata/01922_array_join_with_index/ast.json new file mode 100644 index 000000000..94ac424a4 --- /dev/null +++ b/parser/testdata/01922_array_join_with_index/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_array_index (children 1)" + }, + { + "explain": " Identifier t_array_index" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001110568, + "rows_read": 2, + "bytes_read": 78 + } +} diff --git a/parser/testdata/01922_array_join_with_index/metadata.json b/parser/testdata/01922_array_join_with_index/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01922_array_join_with_index/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01922_array_join_with_index/query.sql b/parser/testdata/01922_array_join_with_index/query.sql new file mode 100644 index 000000000..1444c6396 --- /dev/null +++ b/parser/testdata/01922_array_join_with_index/query.sql @@ -0,0 +1,10 @@ +DROP TABLE IF EXISTS t_array_index; + +CREATE TABLE t_array_index (n Nested(key String, value String)) +ENGINE = MergeTree ORDER BY n.key; + +INSERT INTO t_array_index VALUES (['a', 'b'], ['c', 'd']); + +SELECT * FROM t_array_index ARRAY JOIN n WHERE n.key = 'a'; + +DROP TABLE IF EXISTS t_array_index; diff --git a/parser/testdata/01922_sum_null_for_remote/ast.json b/parser/testdata/01922_sum_null_for_remote/ast.json new file mode 100644 index 000000000..807f2d806 --- /dev/null +++ b/parser/testdata/01922_sum_null_for_remote/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function sum (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function remote (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal '127.0.0.{1,2}'" + }, + { + "explain": " Literal 'system'" + }, + { + "explain": " Literal 'one'" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001321868, + "rows_read": 15, + "bytes_read": 571 + } +} diff --git a/parser/testdata/01922_sum_null_for_remote/metadata.json b/parser/testdata/01922_sum_null_for_remote/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01922_sum_null_for_remote/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01922_sum_null_for_remote/query.sql b/parser/testdata/01922_sum_null_for_remote/query.sql new file mode 100644 index 000000000..a19740364 --- /dev/null +++ b/parser/testdata/01922_sum_null_for_remote/query.sql @@ -0,0 +1 @@ +select sum(null) from remote('127.0.0.{1,2}', 'system', 'one') diff --git a/parser/testdata/01923_different_expression_name_alias/ast.json b/parser/testdata/01923_different_expression_name_alias/ast.json new file mode 100644 index 000000000..a978beb28 --- /dev/null +++ b/parser/testdata/01923_different_expression_name_alias/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery distributed_tbl (children 1)" + }, + { + "explain": " Identifier distributed_tbl" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00163434, + "rows_read": 2, + "bytes_read": 82 + } +} diff --git a/parser/testdata/01923_different_expression_name_alias/metadata.json b/parser/testdata/01923_different_expression_name_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01923_different_expression_name_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01923_different_expression_name_alias/query.sql b/parser/testdata/01923_different_expression_name_alias/query.sql new file mode 100644 index 000000000..09108cef4 --- /dev/null +++ b/parser/testdata/01923_different_expression_name_alias/query.sql @@ -0,0 +1,36 @@ +DROP TABLE IF EXISTS distributed_tbl; +DROP TABLE IF EXISTS merge_tree_table; + +CREATE TABLE merge_tree_table +( + Date Date, + SomeType UInt8, + Alternative1 UInt64, + Alternative2 UInt64, + User UInt32, + CharID UInt64 ALIAS multiIf(SomeType IN (3, 4, 11), 0, SomeType IN (7, 8), Alternative1, Alternative2) +) +ENGINE = MergeTree() +ORDER BY tuple(); + +INSERT INTO merge_tree_table VALUES(toDate('2016-03-01'), 4, 0, 0, 1486392); + +SELECT count() FROM merge_tree_table; + +CREATE TABLE distributed_tbl +( + Date Date, + SomeType UInt8, + Alternative1 UInt64, + Alternative2 UInt64, + CharID UInt64, + User UInt32 +) +ENGINE = Distributed(test_shard_localhost, currentDatabase(), merge_tree_table); + +SELECT identity(CharID) AS x +FROM distributed_tbl +WHERE (Date = toDate('2016-03-01')) AND (User = 1486392) AND (x = 0); + +DROP TABLE IF EXISTS distributed_tbl; +DROP TABLE IF EXISTS merge_tree_table; diff --git a/parser/testdata/01923_ttl_with_modify_column/ast.json b/parser/testdata/01923_ttl_with_modify_column/ast.json new file mode 100644 index 000000000..a0efad3a9 --- /dev/null +++ b/parser/testdata/01923_ttl_with_modify_column/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_ttl_modify_column (children 1)" + }, + { + "explain": " Identifier t_ttl_modify_column" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001384079, + "rows_read": 2, + "bytes_read": 90 + } +} diff --git a/parser/testdata/01923_ttl_with_modify_column/metadata.json b/parser/testdata/01923_ttl_with_modify_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01923_ttl_with_modify_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01923_ttl_with_modify_column/query.sql b/parser/testdata/01923_ttl_with_modify_column/query.sql new file mode 100644 index 000000000..732a699b2 --- /dev/null +++ b/parser/testdata/01923_ttl_with_modify_column/query.sql @@ -0,0 +1,43 @@ +DROP TABLE IF EXISTS t_ttl_modify_column; + +CREATE TABLE t_ttl_modify_column +( + InsertionDateTime DateTime, + TTLDays Int32 DEFAULT CAST(365, 'Int32') +) +ENGINE = MergeTree +ORDER BY tuple() +TTL InsertionDateTime + toIntervalDay(TTLDays) +SETTINGS min_bytes_for_wide_part = 0; + +INSERT INTO t_ttl_modify_column VALUES (now(), 23); + +SET mutations_sync = 2; + +ALTER TABLE t_ttl_modify_column modify column TTLDays Int16 DEFAULT CAST(365, 'Int16'); + +INSERT INTO t_ttl_modify_column VALUES (now(), 23); + +SELECT sum(rows), groupUniqArray(type) FROM system.parts_columns +WHERE database = currentDatabase() AND table = 't_ttl_modify_column' AND column = 'TTLDays' AND active; + +DROP TABLE IF EXISTS t_ttl_modify_column; + +CREATE TABLE t_ttl_modify_column (InsertionDateTime DateTime) +ENGINE = MergeTree +ORDER BY tuple() +TTL InsertionDateTime + INTERVAL 3 DAY +SETTINGS min_bytes_for_wide_part = 0; + +INSERT INTO t_ttl_modify_column VALUES (now()); + +ALTER TABLE t_ttl_modify_column MODIFY COLUMN InsertionDateTime Date; + +INSERT INTO t_ttl_modify_column VALUES (now()); + +SELECT sum(rows), groupUniqArray(type) FROM system.parts_columns +WHERE database = currentDatabase() AND table = 't_ttl_modify_column' AND column = 'InsertionDateTime' AND active; + +ALTER TABLE t_ttl_modify_column MODIFY COLUMN InsertionDateTime Float32; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +DROP TABLE IF EXISTS t_ttl_modify_column; diff --git a/parser/testdata/01924_argmax_bitmap_state/ast.json b/parser/testdata/01924_argmax_bitmap_state/ast.json new file mode 100644 index 000000000..e713ffc01 --- /dev/null +++ b/parser/testdata/01924_argmax_bitmap_state/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function bitmapMax (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function argMax (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Identifier y" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001059998, + "rows_read": 10, + "bytes_read": 372 + } +} diff --git a/parser/testdata/01924_argmax_bitmap_state/metadata.json b/parser/testdata/01924_argmax_bitmap_state/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01924_argmax_bitmap_state/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01924_argmax_bitmap_state/query.sql b/parser/testdata/01924_argmax_bitmap_state/query.sql new file mode 100644 index 000000000..298bbceeb --- /dev/null +++ b/parser/testdata/01924_argmax_bitmap_state/query.sql @@ -0,0 +1,8 @@ +SELECT bitmapMax(argMax(x, y)) +FROM remote('127.0.0.{2,3}', view( + SELECT + groupBitmapState(toUInt32(number)) AS x, + number AS y + FROM numbers(10) + GROUP BY number +)); diff --git a/parser/testdata/01925_broken_partition_id_zookeeper/ast.json b/parser/testdata/01925_broken_partition_id_zookeeper/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01925_broken_partition_id_zookeeper/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01925_broken_partition_id_zookeeper/metadata.json b/parser/testdata/01925_broken_partition_id_zookeeper/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01925_broken_partition_id_zookeeper/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01925_broken_partition_id_zookeeper/query.sql b/parser/testdata/01925_broken_partition_id_zookeeper/query.sql new file mode 100644 index 000000000..d0fc34e4f --- /dev/null +++ b/parser/testdata/01925_broken_partition_id_zookeeper/query.sql @@ -0,0 +1,29 @@ +-- Tags: zookeeper + +DROP TABLE IF EXISTS broken_partition; + +CREATE TABLE broken_partition +( + date Date, + key UInt64 +) +ENGINE = ReplicatedMergeTree('/clickhouse/test_01925_{database}/rmt', 'r1') +ORDER BY tuple() +PARTITION BY date; + +ALTER TABLE broken_partition DROP PARTITION ID '20210325_0_13241_6_12747'; --{serverError INVALID_PARTITION_VALUE} + +ALTER TABLE broken_partition DROP PARTITION ID '20210325_0_13241_6_12747'; --{serverError INVALID_PARTITION_VALUE} + +DROP TABLE IF EXISTS broken_partition; + +DROP TABLE IF EXISTS old_partition_key; + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE TABLE old_partition_key (sd Date, dh UInt64, ak UInt32, ed Date) ENGINE=MergeTree(sd, dh, (ak, ed, dh), 8192); + +ALTER TABLE old_partition_key DROP PARTITION ID '20210325_0_13241_6_12747'; --{serverError INVALID_PARTITION_VALUE} + +ALTER TABLE old_partition_key DROP PARTITION ID '202103'; + +DROP TABLE old_partition_key; diff --git a/parser/testdata/01925_date_date_time_comparison/ast.json b/parser/testdata/01925_date_date_time_comparison/ast.json new file mode 100644 index 000000000..7985b882e --- /dev/null +++ b/parser/testdata/01925_date_date_time_comparison/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function less (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDate (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '2000-01-01'" + }, + { + "explain": " Function toDateTime (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '2000-01-01 00:00:01'" + }, + { + "explain": " Literal 'Asia\/Istanbul'" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.000999755, + "rows_read": 13, + "bytes_read": 519 + } +} diff --git a/parser/testdata/01925_date_date_time_comparison/metadata.json b/parser/testdata/01925_date_date_time_comparison/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01925_date_date_time_comparison/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01925_date_date_time_comparison/query.sql b/parser/testdata/01925_date_date_time_comparison/query.sql new file mode 100644 index 000000000..0659d85b0 --- /dev/null +++ b/parser/testdata/01925_date_date_time_comparison/query.sql @@ -0,0 +1,2 @@ +SELECT toDate('2000-01-01') < toDateTime('2000-01-01 00:00:01', 'Asia/Istanbul'); +SELECT toDate('2000-01-01') < toDateTime64('2000-01-01 00:00:01', 0, 'Asia/Istanbul'); diff --git a/parser/testdata/01925_jit_aggregation_function_count_long/ast.json b/parser/testdata/01925_jit_aggregation_function_count_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01925_jit_aggregation_function_count_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01925_jit_aggregation_function_count_long/metadata.json b/parser/testdata/01925_jit_aggregation_function_count_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01925_jit_aggregation_function_count_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01925_jit_aggregation_function_count_long/query.sql b/parser/testdata/01925_jit_aggregation_function_count_long/query.sql new file mode 100644 index 000000000..01e2c711e --- /dev/null +++ b/parser/testdata/01925_jit_aggregation_function_count_long/query.sql @@ -0,0 +1,17 @@ +-- Tags: long + +SET compile_aggregate_expressions = 1; +SET min_count_to_compile_aggregate_expression = 0; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + + value UInt8, + value_nullable Nullable(UInt8) +) ENGINE=TinyLog; + +INSERT INTO test_table SELECT number % 3, number, if (number % 2 == 0, number, NULL) FROM system.numbers LIMIT 120; +SELECT id, count(value), count(value_nullable) FROM test_table GROUP BY id ORDER BY id; +DROP TABLE test_table; diff --git a/parser/testdata/01925_join_materialized_columns/ast.json b/parser/testdata/01925_join_materialized_columns/ast.json new file mode 100644 index 000000000..5f8816ad8 --- /dev/null +++ b/parser/testdata/01925_join_materialized_columns/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001505599, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/01925_join_materialized_columns/metadata.json b/parser/testdata/01925_join_materialized_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01925_join_materialized_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01925_join_materialized_columns/query.sql b/parser/testdata/01925_join_materialized_columns/query.sql new file mode 100644 index 000000000..8721dca87 --- /dev/null +++ b/parser/testdata/01925_join_materialized_columns/query.sql @@ -0,0 +1,59 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE TABLE t1 ( + time DateTime, + foo String, + dimension_1 String, + dt Date MATERIALIZED toDate(time), + dt1 Date MATERIALIZED toDayOfYear(time), + aliascol1 ALIAS foo || dimension_1, + time_alias DateTime ALIAS time +) ENGINE = MergeTree() PARTITION BY toYYYYMM(dt) ORDER BY (dt, foo); + +CREATE TABLE t2 ( + time DateTime, + bar String, + dimension_2 String, + dt Date MATERIALIZED toDate(time), + dt2 Date MATERIALIZED toDayOfYear(time), + aliascol2 ALIAS bar || dimension_2, + time_alias DateTime ALIAS time +) ENGINE = MergeTree() PARTITION BY toYYYYMM(dt) ORDER BY (dt, bar); + +INSERT INTO t1 VALUES ('2020-01-01 12:00:00', 'fact1', 't1_val1'), ('2020-02-02 13:00:00', 'fact2', 't1_val2'), ('2020-01-01 13:00:00', 'fact3', 't1_val3'); +INSERT INTO t2 VALUES ('2020-01-01 12:00:00', 'fact1', 't2_val2'), ('2020-02-05 13:00:00', 'fact2', 't1_val2'), ('2019-01-01 12:00:00', 'fact4', 't2_val2'); + +SELECT * FROM t1 JOIN t2 ON t1.foo = t2.bar WHERE t2.dt >= '2020-02-01'; +SELECT '-'; +SELECT t1.*, t1.dt, t2.*, t2.dt FROM t1 JOIN t2 ON t1.foo = t2.bar WHERE t2.dt >= '2020-02-01'; +SELECT '-'; +SELECT t1.dt, t2.dt FROM t1 JOIN t2 ON t1.foo = t2.bar ORDER BY t1.dt; +SELECT '-'; +SELECT * FROM t1 ALL JOIN t2 ON t1.dt = t2.dt ORDER BY t1.time, t2.time; +SELECT '-'; +SELECT * FROM t1 ALL JOIN t2 USING (dt) ORDER BY t1.time, t2.time settings enable_analyzer=0; +SELECT * FROM t1 ALL JOIN t2 USING (dt) ORDER BY t1.time, t2.time settings enable_analyzer=1; +SELECT '-'; +SELECT * FROM t1 JOIN t2 ON t1.dt1 = t2.dt2 ORDER BY t1.time, t1.dimension_1, t2.time, t2.dimension_2; +SELECT '-'; +SELECT * FROM t1 JOIN t2 ON t1.foo = t2.bar WHERE t2.aliascol2 == 'fact2t1_val2'; +SELECT '-'; +SELECT t1.aliascol1, t2.aliascol2 FROM t1 JOIN t2 ON t1.foo = t2.bar ORDER BY t1.time, t2.time; +SELECT '-'; +SELECT t1.time, t2.time FROM t1 JOIN t2 ON t1.aliascol1 = t2.aliascol2 ORDER BY t1.time, t2.time; +SELECT '-'; +SELECT count() FROM t1 JOIN t2 ON t1.time_alias = t2.time; +SELECT count() FROM t1 JOIN t2 ON t1.time = t2.time_alias; +SELECT count() FROM t1 JOIN t2 ON t1.time_alias = t2.time_alias; +SELECT count() FROM t1 JOIN t2 USING (time_alias); +SELECT '-'; +SELECT t1.time as talias FROM t1 JOIN t2 ON talias = t2.time; +SELECT t1.time as talias FROM t1 JOIN t2 ON talias = t2.time_alias; +SELECT t2.time as talias FROM t1 JOIN t2 ON t1.time = talias; +SELECT t2.time as talias FROM t1 JOIN t2 ON t1.time_alias = talias; +SELECT time as talias FROM t1 JOIN t2 ON talias = t2.time settings enable_analyzer=0; -- { serverError AMBIGUOUS_COLUMN_NAME } +SELECT time as talias FROM t1 JOIN t2 ON talias = t2.time settings enable_analyzer=1; + +SELECT time as talias FROM t1 JOIN t2 ON t1.time = talias ORDER BY ALL settings enable_analyzer=1; +SELECT * FROM t1 JOIN t2 ON t1.time = t2.time AND NULL ORDER BY ALL settings enable_analyzer=1; diff --git a/parser/testdata/01925_json_as_string_data_in_square_brackets/ast.json b/parser/testdata/01925_json_as_string_data_in_square_brackets/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01925_json_as_string_data_in_square_brackets/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01925_json_as_string_data_in_square_brackets/metadata.json b/parser/testdata/01925_json_as_string_data_in_square_brackets/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01925_json_as_string_data_in_square_brackets/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01925_json_as_string_data_in_square_brackets/query.sql b/parser/testdata/01925_json_as_string_data_in_square_brackets/query.sql new file mode 100644 index 000000000..6e6f306c1 --- /dev/null +++ b/parser/testdata/01925_json_as_string_data_in_square_brackets/query.sql @@ -0,0 +1,14 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS json_square_brackets; +CREATE TABLE json_square_brackets (field String) ENGINE = Memory; +INSERT INTO json_square_brackets FORMAT JSONAsString [{"id": 1, "name": "name1"}, {"id": 2, "name": "name2"}]; + +INSERT INTO json_square_brackets FORMAT JSONAsString[]; + +INSERT INTO json_square_brackets FORMAT JSONAsString [ ] ; + +INSERT INTO json_square_brackets FORMAT JSONEachRow ; + +SELECT * FROM json_square_brackets; +DROP TABLE IF EXISTS json_square_brackets; diff --git a/parser/testdata/01925_map_populate_series_on_map/ast.json b/parser/testdata/01925_map_populate_series_on_map/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01925_map_populate_series_on_map/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01925_map_populate_series_on_map/metadata.json b/parser/testdata/01925_map_populate_series_on_map/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01925_map_populate_series_on_map/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01925_map_populate_series_on_map/query.sql b/parser/testdata/01925_map_populate_series_on_map/query.sql new file mode 100644 index 000000000..22c56e7e4 --- /dev/null +++ b/parser/testdata/01925_map_populate_series_on_map/query.sql @@ -0,0 +1,35 @@ +-- { echo } +drop table if exists map_test; +create table map_test engine=TinyLog() as (select (number + 1) as n, map(1, 1, number,2) as m from numbers(1, 5)); + +select mapPopulateSeries(m) from map_test; +select mapPopulateSeries(m, toUInt64(3)) from map_test; +select mapPopulateSeries(m, toUInt64(10)) from map_test; +select mapPopulateSeries(m, 10) from map_test; +select mapPopulateSeries(m, n) from map_test; + +drop table map_test; + +select mapPopulateSeries(map(toUInt8(1), toUInt8(1), 2, 1)) as res, toTypeName(res); +select mapPopulateSeries(map(toUInt16(1), toUInt16(1), 2, 1)) as res, toTypeName(res); +select mapPopulateSeries(map(toUInt32(1), toUInt32(1), 2, 1)) as res, toTypeName(res); +select mapPopulateSeries(map(toUInt64(1), toUInt64(1), 2, 1)) as res, toTypeName(res); +select mapPopulateSeries(map(toUInt128(1), toUInt128(1), 2, 1)) as res, toTypeName(res); +select mapPopulateSeries(map(toUInt256(1), toUInt256(1), 2, 1)) as res, toTypeName(res); + +select mapPopulateSeries(map(toInt8(1), toInt8(1), 2, 1)) as res, toTypeName(res); +select mapPopulateSeries(map(toInt16(1), toInt16(1), 2, 1)) as res, toTypeName(res); +select mapPopulateSeries(map(toInt32(1), toInt32(1), 2, 1)) as res, toTypeName(res); +select mapPopulateSeries(map(toInt64(1), toInt64(1), 2, 1)) as res, toTypeName(res); +select mapPopulateSeries(map(toInt128(1), toInt128(1), 2, 1)) as res, toTypeName(res); +select mapPopulateSeries(map(toInt256(1), toInt256(1), 2, 1)) as res, toTypeName(res); + +select mapPopulateSeries(map(toInt8(-10), toInt8(1), 2, 1)) as res, toTypeName(res); +select mapPopulateSeries(map(toInt16(-10), toInt16(1), 2, 1)) as res, toTypeName(res); +select mapPopulateSeries(map(toInt32(-10), toInt32(1), 2, 1)) as res, toTypeName(res); +select mapPopulateSeries(map(toInt64(-10), toInt64(1), 2, 1)) as res, toTypeName(res); +select mapPopulateSeries(map(toInt64(-10), toInt64(1), 2, 1), toInt64(-5)) as res, toTypeName(res); + +select mapPopulateSeries(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +select mapPopulateSeries('asdf'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select mapPopulateSeries(map('1', 1, '2', 1)) as res, toTypeName(res); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } diff --git a/parser/testdata/01925_merge_prewhere_table/ast.json b/parser/testdata/01925_merge_prewhere_table/ast.json new file mode 100644 index 000000000..031a3500e --- /dev/null +++ b/parser/testdata/01925_merge_prewhere_table/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery x_1 (children 1)" + }, + { + "explain": " Identifier x_1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001149017, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/01925_merge_prewhere_table/metadata.json b/parser/testdata/01925_merge_prewhere_table/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01925_merge_prewhere_table/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01925_merge_prewhere_table/query.sql b/parser/testdata/01925_merge_prewhere_table/query.sql new file mode 100644 index 000000000..4862a7bb4 --- /dev/null +++ b/parser/testdata/01925_merge_prewhere_table/query.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS x_1; +DROP TABLE IF EXISTS x_2; +DROP TABLE IF EXISTS x; + +create table x_1 engine=Log as select * from numbers(10); +create table x_2 engine=Log as select * from numbers(10); +create table x engine=Merge(currentDatabase(), '^x_(1|2)$') as x_1; + +select _table, count() from x group by _table order by _table; + +DROP TABLE x_1; +DROP TABLE x_2; +DROP TABLE x; diff --git a/parser/testdata/01925_test_group_by_const_consistency/ast.json b/parser/testdata/01925_test_group_by_const_consistency/ast.json new file mode 100644 index 000000000..b7a0fde96 --- /dev/null +++ b/parser/testdata/01925_test_group_by_const_consistency/ast.json @@ -0,0 +1,70 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1 (alias a)" + }, + { + "explain": " Function count (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier a" + } + ], + + "rows": 16, + + "statistics": + { + "elapsed": 0.001396778, + "rows_read": 16, + "bytes_read": 595 + } +} diff --git a/parser/testdata/01925_test_group_by_const_consistency/metadata.json b/parser/testdata/01925_test_group_by_const_consistency/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01925_test_group_by_const_consistency/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01925_test_group_by_const_consistency/query.sql b/parser/testdata/01925_test_group_by_const_consistency/query.sql new file mode 100644 index 000000000..176b5761a --- /dev/null +++ b/parser/testdata/01925_test_group_by_const_consistency/query.sql @@ -0,0 +1,6 @@ +SELECT 1 as a, count() FROM numbers(10) WHERE 0 GROUP BY a; +SELECT count() FROM numbers(10) WHERE 0; + +SELECT 1 as a, count() FROM numbers(10) WHERE 0 GROUP BY a SETTINGS empty_result_for_aggregation_by_constant_keys_on_empty_set = 0; + +SELECT 1 as a, count() FROM numbers(10) WHERE 0 GROUP BY a SETTINGS empty_result_for_aggregation_by_constant_keys_on_empty_set = 0, optimize_trivial_count_query = 0; diff --git a/parser/testdata/01925_test_storage_merge_aliases/ast.json b/parser/testdata/01925_test_storage_merge_aliases/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01925_test_storage_merge_aliases/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01925_test_storage_merge_aliases/metadata.json b/parser/testdata/01925_test_storage_merge_aliases/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01925_test_storage_merge_aliases/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01925_test_storage_merge_aliases/query.sql b/parser/testdata/01925_test_storage_merge_aliases/query.sql new file mode 100644 index 000000000..56d255570 --- /dev/null +++ b/parser/testdata/01925_test_storage_merge_aliases/query.sql @@ -0,0 +1,59 @@ +-- Tags: no-parallel + +drop table if exists merge; +create table merge +( + dt Date, + colAlias0 Int32, + colAlias1 Int32, + col2 Int32, + colAlias2 UInt32, + col3 Int32, + colAlias3 UInt32 +) +engine = Merge(currentDatabase(), '^alias_'); + +drop table if exists alias_1; +drop table if exists alias_2; + +create table alias_1 +( + dt Date, + col Int32, + colAlias0 UInt32 alias col, + colAlias1 UInt32 alias col3 + colAlias0, + col2 Int32, + colAlias2 Int32 alias colAlias1 + col2 + 10, + col3 Int32, + colAlias3 Int32 alias colAlias2 + colAlias1 + col3 +) +engine = MergeTree() +order by (dt); + +insert into alias_1 (dt, col, col2, col3) values ('2020-02-02', 1, 2, 3); + +select 'alias1'; +select colAlias0, colAlias1, colAlias2, colAlias3 from alias_1; +select colAlias3, colAlias2, colAlias1, colAlias0 from merge; +select * from merge; + +create table alias_2 +( + dt Date, + col Int32, + col2 Int32, + colAlias0 UInt32 alias col, + colAlias3 Int32 alias col3 + colAlias0, + colAlias1 UInt32 alias colAlias0 + col2, + colAlias2 Int32 alias colAlias0 + colAlias1, + col3 Int32 +) +engine = MergeTree() +order by (dt); + +insert into alias_2 (dt, col, col2, col3) values ('2020-02-01', 1, 2, 3); + +select 'alias2'; +select colAlias0, colAlias1, colAlias2, colAlias3 from alias_2; +select colAlias3, colAlias2, colAlias1, colAlias0 from merge order by dt; +select * from merge order by dt; diff --git a/parser/testdata/01925_test_storage_merge_aliases_analyzer/ast.json b/parser/testdata/01925_test_storage_merge_aliases_analyzer/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01925_test_storage_merge_aliases_analyzer/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01925_test_storage_merge_aliases_analyzer/metadata.json b/parser/testdata/01925_test_storage_merge_aliases_analyzer/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01925_test_storage_merge_aliases_analyzer/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01925_test_storage_merge_aliases_analyzer/query.sql b/parser/testdata/01925_test_storage_merge_aliases_analyzer/query.sql new file mode 100644 index 000000000..28b9c8650 --- /dev/null +++ b/parser/testdata/01925_test_storage_merge_aliases_analyzer/query.sql @@ -0,0 +1,60 @@ +-- Tags: no-parallel + +drop table if exists merge; +set enable_analyzer = 1; +create table merge +( + dt Date, + colAlias0 Int32, + colAlias1 Int32, + col2 Int32, + colAlias2 UInt32, + col3 Int32, + colAlias3 UInt32 +) +engine = Merge(currentDatabase(), '^alias_'); + +drop table if exists alias_1; +drop table if exists alias_2; + +create table alias_1 +( + dt Date, + col Int32, + colAlias0 UInt32 alias col, + colAlias1 UInt32 alias col3 + colAlias0, + col2 Int32, + colAlias2 Int32 alias colAlias1 + col2 + 10, + col3 Int32, + colAlias3 Int32 alias colAlias2 + colAlias1 + col3 +) +engine = MergeTree() +order by (dt); + +insert into alias_1 (dt, col, col2, col3) values ('2020-02-02', 1, 2, 3); + +select 'alias1'; +select colAlias0, colAlias1, colAlias2, colAlias3 from alias_1; +select colAlias3, colAlias2, colAlias1, colAlias0 from merge; +select * from merge; + +create table alias_2 +( + dt Date, + col Int32, + col2 Int32, + colAlias0 UInt32 alias col, + colAlias3 Int32 alias col3 + colAlias0, + colAlias1 UInt32 alias colAlias0 + col2, + colAlias2 Int32 alias colAlias0 + colAlias1, + col3 Int32 +) +engine = MergeTree() +order by (dt); + +insert into alias_2 (dt, col, col2, col3) values ('2020-02-01', 1, 2, 3); + +select 'alias2'; +select colAlias0, colAlias1, colAlias2, colAlias3 from alias_2; +select colAlias3, colAlias2, colAlias1, colAlias0 from merge order by dt; +select * from merge order by dt; diff --git a/parser/testdata/01926_bin_unbin/ast.json b/parser/testdata/01926_bin_unbin/ast.json new file mode 100644 index 000000000..4f4bb9d08 --- /dev/null +++ b/parser/testdata/01926_bin_unbin/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function bin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal ''" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.00132883, + "rows_read": 7, + "bytes_read": 250 + } +} diff --git a/parser/testdata/01926_bin_unbin/metadata.json b/parser/testdata/01926_bin_unbin/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01926_bin_unbin/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01926_bin_unbin/query.sql b/parser/testdata/01926_bin_unbin/query.sql new file mode 100644 index 000000000..e112f8bd8 --- /dev/null +++ b/parser/testdata/01926_bin_unbin/query.sql @@ -0,0 +1,45 @@ +select bin(''); +select bin(0); +select bin(1); +select bin(10); +select bin(127); +select bin(255); +select bin(256); +select bin(511); +select bin(512); +select bin('0'); +select bin('10'); +select bin('测试'); +select bin(toFixedString('测试', 10)); +select bin(toFloat32(1.2)); +select bin(toFloat64(1.2)); +select bin(toDecimal32(1.2, 8)); +select bin(toDecimal64(1.2, 17)); +select bin('12332424'); +select bin(materialize('12332424')); +select bin(toNullable(materialize('12332424'))); +select bin(toLowCardinality(materialize('12332424'))); + +select unbin(''); +select unbin('0') == '\0'; +select unbin('00110000'); -- 0 +select unbin('0011000100110000'); -- 10 +select unbin('111001101011010110001011111010001010111110010101'); -- 测试 +select unbin(materialize('00110000')); +select unbin(toNullable(materialize('00110000'))); +select unbin(toLowCardinality(materialize('00110000'))); + +select unbin(bin('')) == ''; +select bin(unbin('')) == ''; +select bin(unbin('0')) == '00000000'; + +-- hex and bin consistent for corner cases +select hex('') == bin(''); +select unhex('') == unbin(''); +select unhex('0') == unbin('0'); + +-- hex and bin support AggregateFunction +select hex(sumState(number)) == hex(toString(sumState(number))) from numbers(10); +select hex(avgState(number)) == hex(toString(avgState(number))) from numbers(99); +select hex(avgState(number)) from numbers(10); +select bin(avgState(number)) from numbers(10); diff --git a/parser/testdata/01926_date_date_time_supertype/ast.json b/parser/testdata/01926_date_date_time_supertype/ast.json new file mode 100644 index 000000000..1b8a7a723 --- /dev/null +++ b/parser/testdata/01926_date_date_time_supertype/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'Array'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.00111874, + "rows_read": 5, + "bytes_read": 176 + } +} diff --git a/parser/testdata/01926_date_date_time_supertype/metadata.json b/parser/testdata/01926_date_date_time_supertype/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01926_date_date_time_supertype/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01926_date_date_time_supertype/query.sql b/parser/testdata/01926_date_date_time_supertype/query.sql new file mode 100644 index 000000000..756fd04a0 --- /dev/null +++ b/parser/testdata/01926_date_date_time_supertype/query.sql @@ -0,0 +1,24 @@ +SELECT 'Array'; + +SELECT toTypeName([toDate('2000-01-01'), toDateTime('2000-01-01', 'Asia/Istanbul')]); +SELECT toTypeName([toDate('2000-01-01'), toDateTime('2000-01-01', 'Asia/Istanbul'), toDateTime64('2000-01-01', 5, 'Asia/Istanbul')]); +SELECT toTypeName([toDate('2000-01-01'), toDateTime('2000-01-01', 'Asia/Istanbul'), toDateTime64('2000-01-01', 5, 'Asia/Istanbul'), toDateTime64('2000-01-01', 6, 'Asia/Istanbul')]); + +DROP TABLE IF EXISTS predicate_table; +CREATE TABLE predicate_table (value UInt8) ENGINE=TinyLog; + +INSERT INTO predicate_table VALUES (0), (1); + +SELECT 'If'; + +WITH toDate('2000-01-01') as a, toDateTime('2000-01-01', 'Asia/Istanbul') as b +SELECT if(value, b, a) as result, toTypeName(result) +FROM predicate_table; + +WITH toDateTime('2000-01-01', 'Asia/Istanbul') as a, toDateTime64('2000-01-01', 5, 'Asia/Istanbul') as b +SELECT if(value, b, a) as result, toTypeName(result) +FROM predicate_table; + +SELECT 'Cast'; +SELECT CAST(toDate('2000-01-01') AS DateTime('UTC')) AS x, toTypeName(x); +SELECT CAST(toDate('2000-01-01') AS DateTime64(5, 'UTC')) AS x, toTypeName(x); diff --git a/parser/testdata/01926_order_by_desc_limit/ast.json b/parser/testdata/01926_order_by_desc_limit/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01926_order_by_desc_limit/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01926_order_by_desc_limit/metadata.json b/parser/testdata/01926_order_by_desc_limit/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01926_order_by_desc_limit/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01926_order_by_desc_limit/query.sql b/parser/testdata/01926_order_by_desc_limit/query.sql new file mode 100644 index 000000000..6e3c41151 --- /dev/null +++ b/parser/testdata/01926_order_by_desc_limit/query.sql @@ -0,0 +1,28 @@ +-- Tags: no-random-settings + +DROP TABLE IF EXISTS order_by_desc; + +SET enable_filesystem_cache=0; +SET read_through_distributed_cache=0; + +CREATE TABLE order_by_desc (u UInt32, s String) +ENGINE MergeTree ORDER BY u PARTITION BY u % 100 +SETTINGS index_granularity = 1024, index_granularity_bytes = '10Mi'; + +INSERT INTO order_by_desc SELECT number, repeat('a', 1024) FROM numbers(1024 * 300); +OPTIMIZE TABLE order_by_desc FINAL; + +SELECT s FROM order_by_desc ORDER BY u DESC LIMIT 10 FORMAT Null +SETTINGS max_memory_usage = '400M'; + +SELECT s FROM order_by_desc ORDER BY u LIMIT 10 FORMAT Null +SETTINGS max_memory_usage = '400M'; + +SYSTEM FLUSH LOGS query_log; + +SELECT read_rows < 110000 FROM system.query_log +WHERE type = 'QueryFinish' AND current_database = currentDatabase() +AND event_date >= yesterday() +AND lower(query) LIKE lower('SELECT s FROM order_by_desc ORDER BY u%'); + +DROP TABLE IF EXISTS order_by_desc; diff --git a/parser/testdata/01926_union_all_schmak/ast.json b/parser/testdata/01926_union_all_schmak/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01926_union_all_schmak/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01926_union_all_schmak/metadata.json b/parser/testdata/01926_union_all_schmak/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01926_union_all_schmak/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01926_union_all_schmak/query.sql b/parser/testdata/01926_union_all_schmak/query.sql new file mode 100644 index 000000000..feab81cca --- /dev/null +++ b/parser/testdata/01926_union_all_schmak/query.sql @@ -0,0 +1,8 @@ +SELECT * FROM ( + SELECT 1 AS a, 2 AS b FROM system.one + JOIN system.one USING dummy + UNION ALL + SELECT 3 AS a, 4 AS b FROM system.one +) +WHERE a != 10 +ORDER BY a, b; diff --git a/parser/testdata/01927_query_views_log_current_database/ast.json b/parser/testdata/01927_query_views_log_current_database/ast.json new file mode 100644 index 000000000..aafc327d3 --- /dev/null +++ b/parser/testdata/01927_query_views_log_current_database/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00119601, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01927_query_views_log_current_database/metadata.json b/parser/testdata/01927_query_views_log_current_database/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01927_query_views_log_current_database/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01927_query_views_log_current_database/query.sql b/parser/testdata/01927_query_views_log_current_database/query.sql new file mode 100644 index 000000000..f2ada78e9 --- /dev/null +++ b/parser/testdata/01927_query_views_log_current_database/query.sql @@ -0,0 +1,138 @@ +SET log_queries=0; +SET log_query_threads=0; + +-- SETUP TABLES +CREATE TABLE table_a (a String, b Int64) ENGINE = MergeTree ORDER BY b; +CREATE TABLE table_b (a Float64, count Int64) ENGINE = MergeTree ORDER BY tuple(); +CREATE TABLE table_c (a Float64) ENGINE = MergeTree ORDER BY a; + +CREATE TABLE table_d (a Float64, count Int64) ENGINE MergeTree ORDER BY a; +CREATE TABLE table_e (a Float64, count Int64) ENGINE MergeTree ORDER BY a; +CREATE TABLE table_f (a Float64, count Int64) ENGINE MergeTree ORDER BY a; + +-- SETUP MATERIALIZED VIEWS +CREATE MATERIALIZED VIEW matview_a_to_b TO table_b AS SELECT toFloat64(a) AS a, b + sleepEachRow(0.000001) AS count FROM table_a; +CREATE MATERIALIZED VIEW matview_b_to_c TO table_c AS SELECT SUM(a + sleepEachRow(0.000002)) as a FROM table_b; +CREATE MATERIALIZED VIEW matview_join_d_e TO table_f AS SELECT table_d.a as a, table_e.count + sleepEachRow(0.000003) as count FROM table_d LEFT JOIN table_e ON table_d.a = table_e.a; + +-- ENABLE LOGS +SET parallel_view_processing=0; +SET log_query_views=1; +SET log_queries_min_type='QUERY_FINISH'; +SET log_queries=1; + +-- INSERT 1 +INSERT INTO table_a SELECT '111', * FROM numbers(100); + +-- INSERT 2 +INSERT INTO table_d SELECT 0.5, * FROM numbers(50); + +SYSTEM FLUSH LOGS query_log, query_views_log; + + +-- CHECK LOGS OF INSERT 1 +SELECT + 'Query log rows' as stage, + read_rows, + written_rows, + arraySort(databases) as databases, + arraySort(tables) as tables, + arraySort(views) as views, + ProfileEvents['SleepFunctionCalls'] as sleep_calls, + ProfileEvents['SleepFunctionMicroseconds'] as sleep_us, + ProfileEvents['SelectedRows'] as profile_select_rows, + ProfileEvents['SelectedBytes'] as profile_select_bytes, + ProfileEvents['InsertedRows'] as profile_insert_rows, + ProfileEvents['InsertedBytes'] as profile_insert_bytes +FROM system.query_log +WHERE query like '-- INSERT 1%INSERT INTO table_a%' + AND current_database = currentDatabase() + AND event_date >= yesterday() +FORMAT Vertical; + +SELECT + 'Depending views' as stage, + view_name, + view_type, + status, + view_target, + view_query, + read_rows, + written_rows, + ProfileEvents['SleepFunctionCalls'] as sleep_calls, + ProfileEvents['SleepFunctionMicroseconds'] as sleep_us, + ProfileEvents['SelectedRows'] as profile_select_rows, + ProfileEvents['SelectedBytes'] as profile_select_bytes, + ProfileEvents['InsertedRows'] as profile_insert_rows, + ProfileEvents['InsertedBytes'] as profile_insert_bytes +FROM system.query_views_log +WHERE initial_query_id = + ( + SELECT initial_query_id + FROM system.query_log + WHERE query like '-- INSERT 1%INSERT INTO table_a%' + AND current_database = currentDatabase() + AND event_date >= yesterday() + LIMIT 1 + ) +ORDER BY view_name +FORMAT Vertical; + +-- CHECK LOGS OF INSERT 2 +SELECT + 'Query log rows 2' as stage, + read_rows, + written_rows, + arraySort(databases) as databases, + arraySort(tables) as tables, + arraySort(views) as views, + ProfileEvents['SleepFunctionCalls'] as sleep_calls, + ProfileEvents['SleepFunctionMicroseconds'] as sleep_us, + ProfileEvents['SelectedRows'] as profile_select_rows, + ProfileEvents['SelectedBytes'] as profile_select_bytes, + ProfileEvents['InsertedRows'] as profile_insert_rows, + ProfileEvents['InsertedBytes'] as profile_insert_bytes +FROM system.query_log +WHERE query like '-- INSERT 2%INSERT INTO table_d%' + AND current_database = currentDatabase() + AND event_date >= yesterday() +FORMAT Vertical; + +SELECT + 'Depending views 2' as stage, + view_name, + view_type, + status, + view_target, + view_query, + read_rows, + written_rows, + ProfileEvents['SleepFunctionCalls'] as sleep_calls, + ProfileEvents['SleepFunctionMicroseconds'] as sleep_us, + ProfileEvents['SelectedRows'] as profile_select_rows, + ProfileEvents['SelectedBytes'] as profile_select_bytes, + ProfileEvents['InsertedRows'] as profile_insert_rows, + ProfileEvents['InsertedBytes'] as profile_insert_bytes +FROM system.query_views_log +WHERE initial_query_id = + ( + SELECT initial_query_id + FROM system.query_log + WHERE query like '-- INSERT 2%INSERT INTO table_d%' + AND current_database = currentDatabase() + AND event_date >= yesterday() + LIMIT 1 + ) +ORDER BY view_name +FORMAT Vertical; + +-- TEARDOWN +DROP TABLE matview_a_to_b; +DROP TABLE matview_b_to_c; +DROP TABLE matview_join_d_e; +DROP TABLE table_f; +DROP TABLE table_e; +DROP TABLE table_d; +DROP TABLE table_c; +DROP TABLE table_b; +DROP TABLE table_a; diff --git a/parser/testdata/01930_optimize_skip_unused_shards_rewrite_in/ast.json b/parser/testdata/01930_optimize_skip_unused_shards_rewrite_in/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01930_optimize_skip_unused_shards_rewrite_in/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01930_optimize_skip_unused_shards_rewrite_in/metadata.json b/parser/testdata/01930_optimize_skip_unused_shards_rewrite_in/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01930_optimize_skip_unused_shards_rewrite_in/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01930_optimize_skip_unused_shards_rewrite_in/query.sql b/parser/testdata/01930_optimize_skip_unused_shards_rewrite_in/query.sql new file mode 100644 index 000000000..afc2d014f --- /dev/null +++ b/parser/testdata/01930_optimize_skip_unused_shards_rewrite_in/query.sql @@ -0,0 +1,65 @@ +-- Tags: shard + +set optimize_skip_unused_shards=1; +set force_optimize_skip_unused_shards=2; + +create temporary table data (id UInt64) engine=Memory() as with [ + 0, + 1, + 0x7f, 0x80, 0xff, + 0x7fff, 0x8000, 0xffff, + 0x7fffffff, 0x80000000, 0xffffffff, + 0x7fffffffffffffff, 0x8000000000000000, 0xffffffffffffffff +] as values select arrayJoin(values) id; + +-- { echoOn } + +-- Int8, Int8 +select _shard_num, * from remote('127.{1..4}', view(select toInt8(id) id from data), toInt8(id)) where id in (0, 1, 0x7f) order by _shard_num, id; +-- Int8, UInt8 +select _shard_num, * from remote('127.{1..4}', view(select toInt8(id) id from data), toUInt8(id)) where id in (0, 1, 0x7f) order by _shard_num, id; +-- UInt8, UInt8 +select _shard_num, * from remote('127.{1..4}', view(select toUInt8(id) id from data), toUInt8(id)) where id in (0, 1, 0x7f, 0x80, 0xff) order by _shard_num, id; +-- UInt8, Int8 +select _shard_num, * from remote('127.{1..4}', view(select toUInt8(id) id from data), toInt8(id)) where id in (0, 1, 0x7f, 0x80, 0xff) order by _shard_num, id; + +-- Int16, Int16 +select _shard_num, * from remote('127.{1..4}', view(select toInt16(id) id from data), toInt16(id)) where id in (0, 1, 0x7fff) order by _shard_num, id; +-- Int16, UInt16 +select _shard_num, * from remote('127.{1..4}', view(select toInt16(id) id from data), toUInt16(id)) where id in (0, 1, 0x7fff) order by _shard_num, id; +-- UInt16, UInt16 +select _shard_num, * from remote('127.{1..4}', view(select toUInt16(id) id from data), toUInt16(id)) where id in (0, 1, 0x7fff, 0x8000, 0xffff) order by _shard_num, id; +-- UInt16, Int16 +select _shard_num, * from remote('127.{1..4}', view(select toUInt16(id) id from data), toInt16(id)) where id in (0, 1, 0x7fff, 0x8000, 0xffff) order by _shard_num, id; + +-- Int32, Int32 +select _shard_num, * from remote('127.{1..4}', view(select toInt32(id) id from data), toInt32(id)) where id in (0, 1, 0x7fffffff) order by _shard_num, id; +-- Int32, UInt32 +select _shard_num, * from remote('127.{1..4}', view(select toInt32(id) id from data), toUInt32(id)) where id in (0, 1, 0x7fffffff) order by _shard_num, id; +-- UInt32, UInt32 +select _shard_num, * from remote('127.{1..4}', view(select toUInt32(id) id from data), toUInt32(id)) where id in (0, 1, 0x7fffffff, 0x80000000, 0xffffffff) order by _shard_num, id; +-- UInt32, Int32 +select _shard_num, * from remote('127.{1..4}', view(select toUInt32(id) id from data), toInt32(id)) where id in (0, 1, 0x7fffffff, 0x80000000, 0xffffffff) order by _shard_num, id; + +-- Int64, Int64 +select _shard_num, * from remote('127.{1..4}', view(select toInt64(id) id from data), toInt64(id)) where id in (0, 1, 0x7fffffffffffffff) order by _shard_num, id; +-- Int64, UInt64 +select _shard_num, * from remote('127.{1..4}', view(select toInt64(id) id from data), toUInt64(id)) where id in (0, 1, 0x7fffffffffffffff) order by _shard_num, id; +-- UInt64, UInt64 +select _shard_num, * from remote('127.{1..4}', view(select toUInt64(id) id from data), toUInt64(id)) where id in (0, 1, 0x7fffffffffffffff, 0x8000000000000000, 0xffffffffffffffff) order by _shard_num, id; +-- UInt64, Int64 +select _shard_num, * from remote('127.{1..4}', view(select toUInt64(id) id from data), toInt64(id)) where id in (0, 1, 0x7fffffffffffffff, 0x8000000000000000, 0xffffffffffffffff) order by _shard_num, id; + +-- modulo(Int8) +select distinct _shard_num, * from remote('127.{1..4}', view(select toInt16(id) id from data), toInt8(id)%255) where id in (-1) order by _shard_num, id; +-- modulo(UInt8) +select distinct _shard_num, * from remote('127.{1..4}', view(select toInt16(id) id from data), toUInt8(id)%255) where id in (-1) order by _shard_num, id; + +-- { echoOff } + +-- those two had been reported initially by amosbird: +-- (the problem is that murmurHash3_32() returns different value to toInt64(1) and toUInt64(1)) +---- error for local node +select * from remote('127.{1..4}', view(select number id from numbers(0)), bitAnd(murmurHash3_32(id), 2147483647)) where id in (2, 3); +---- error for remote node +select * from remote('127.{1..8}', view(select number id from numbers(0)), bitAnd(murmurHash3_32(id), 2147483647)) where id in (2, 3); diff --git a/parser/testdata/01931_storage_merge_no_columns/ast.json b/parser/testdata/01931_storage_merge_no_columns/ast.json new file mode 100644 index 000000000..9cbf7be16 --- /dev/null +++ b/parser/testdata/01931_storage_merge_no_columns/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery data (children 1)" + }, + { + "explain": " Identifier data" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001312848, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/01931_storage_merge_no_columns/metadata.json b/parser/testdata/01931_storage_merge_no_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01931_storage_merge_no_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01931_storage_merge_no_columns/query.sql b/parser/testdata/01931_storage_merge_no_columns/query.sql new file mode 100644 index 000000000..fcf9dbfa1 --- /dev/null +++ b/parser/testdata/01931_storage_merge_no_columns/query.sql @@ -0,0 +1,5 @@ +drop table if exists data; +create table data (key Int) engine=MergeTree() order by key; +select 1 from merge(currentDatabase(), '^data$') prewhere _table in (NULL); -- { serverError ILLEGAL_PREWHERE } +select 1 from merge(currentDatabase(), '^data$') where _table in (NULL); +drop table data; diff --git a/parser/testdata/01932_alter_index_with_order/ast.json b/parser/testdata/01932_alter_index_with_order/ast.json new file mode 100644 index 000000000..ba008dd02 --- /dev/null +++ b/parser/testdata/01932_alter_index_with_order/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery alter_index_test (children 1)" + }, + { + "explain": " Identifier alter_index_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001224983, + "rows_read": 2, + "bytes_read": 84 + } +} diff --git a/parser/testdata/01932_alter_index_with_order/metadata.json b/parser/testdata/01932_alter_index_with_order/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01932_alter_index_with_order/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01932_alter_index_with_order/query.sql b/parser/testdata/01932_alter_index_with_order/query.sql new file mode 100644 index 000000000..0f2953b53 --- /dev/null +++ b/parser/testdata/01932_alter_index_with_order/query.sql @@ -0,0 +1,28 @@ +DROP TABLE IF EXISTS alter_index_test; + +CREATE TABLE alter_index_test ( + a UInt32, + b Date, + c UInt32, + d UInt32, + INDEX index_a a TYPE set(0) GRANULARITY 1 +) +ENGINE = MergeTree() +ORDER BY tuple(); + +SELECT * FROM system.data_skipping_indices WHERE table = 'alter_index_test' AND database = currentDatabase(); + +ALTER TABLE alter_index_test ADD INDEX index_b b type minmax granularity 1 FIRST; + +ALTER TABLE alter_index_test ADD INDEX index_c c type set(0) granularity 2 AFTER index_b; + +ALTER TABLE alter_index_test ADD INDEX index_d d type set(0) granularity 1; + +SELECT * FROM system.data_skipping_indices WHERE table = 'alter_index_test' AND database = currentDatabase(); + +DETACH TABLE alter_index_test; +ATTACH TABLE alter_index_test; + +SELECT * FROM system.data_skipping_indices WHERE table = 'alter_index_test' AND database = currentDatabase(); + +DROP TABLE IF EXISTS alter_index_test; diff --git a/parser/testdata/01932_global_in_function/ast.json b/parser/testdata/01932_global_in_function/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01932_global_in_function/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01932_global_in_function/metadata.json b/parser/testdata/01932_global_in_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01932_global_in_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01932_global_in_function/query.sql b/parser/testdata/01932_global_in_function/query.sql new file mode 100644 index 000000000..ccae1e17a --- /dev/null +++ b/parser/testdata/01932_global_in_function/query.sql @@ -0,0 +1,4 @@ +-- Tags: global + +select number from cluster(test_cluster_two_shards_localhost, numbers(1)) where number global in tuple(0, 1, 2, 3); +select number from cluster(test_cluster_two_shards_localhost, numbers(1)) where number global in array(0, 1, 2, 3); diff --git a/parser/testdata/01932_null_valid_identifier/ast.json b/parser/testdata/01932_null_valid_identifier/ast.json new file mode 100644 index 000000000..0ec4b42c5 --- /dev/null +++ b/parser/testdata/01932_null_valid_identifier/ast.json @@ -0,0 +1,76 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier null" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function remote (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '127.0.0.2'" + }, + { + "explain": " Function view (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1 (alias null)" + } + ], + + "rows": 18, + + "statistics": + { + "elapsed": 0.001412055, + "rows_read": 18, + "bytes_read": 756 + } +} diff --git a/parser/testdata/01932_null_valid_identifier/metadata.json b/parser/testdata/01932_null_valid_identifier/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01932_null_valid_identifier/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01932_null_valid_identifier/query.sql b/parser/testdata/01932_null_valid_identifier/query.sql new file mode 100644 index 000000000..31f1a7716 --- /dev/null +++ b/parser/testdata/01932_null_valid_identifier/query.sql @@ -0,0 +1,3 @@ +SELECT `null` FROM remote('127.0.0.2', view(SELECT 1 AS `null`)); +SELECT `NULL` FROM remote('127.0.0.2', view(SELECT 1 AS `NULL`)); +SELECT `nULl`, null FROM remote('127.0.0.2', view(SELECT 1 AS `nULl`)); diff --git a/parser/testdata/01932_remote_sharding_key_column/ast.json b/parser/testdata/01932_remote_sharding_key_column/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01932_remote_sharding_key_column/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01932_remote_sharding_key_column/metadata.json b/parser/testdata/01932_remote_sharding_key_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01932_remote_sharding_key_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01932_remote_sharding_key_column/query.sql b/parser/testdata/01932_remote_sharding_key_column/query.sql new file mode 100644 index 000000000..20b86eacd --- /dev/null +++ b/parser/testdata/01932_remote_sharding_key_column/query.sql @@ -0,0 +1,17 @@ +-- Tags: shard + +-- regression test for the following query: +-- +-- select * from remote('127.1', system.one, dummy) +-- +-- that produce the following error before: +-- +-- Unknown column: dummy, there are only columns . +-- +-- NOTE: that wrapping column into any function works before. +select * from remote('127.1', system.one, dummy) format Null; +select * from remote('127.1', system.one, identity(dummy)) format Null; +select * from remote('127.1', view(select * from system.one), identity(dummy)) format Null; +select * from remote('127.{1,2}', view(select * from system.one), identity(dummy)) format Null; +select * from remote('127.1', view(select * from system.one), dummy) format Null; +select * from remote('127.{1,2}', view(select * from system.one), dummy) format Null; diff --git a/parser/testdata/01933_invalid_date/ast.json b/parser/testdata/01933_invalid_date/ast.json new file mode 100644 index 000000000..85580bb79 --- /dev/null +++ b/parser/testdata/01933_invalid_date/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDate (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '07-08-2019'" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001124236, + "rows_read": 7, + "bytes_read": 263 + } +} diff --git a/parser/testdata/01933_invalid_date/metadata.json b/parser/testdata/01933_invalid_date/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01933_invalid_date/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01933_invalid_date/query.sql b/parser/testdata/01933_invalid_date/query.sql new file mode 100644 index 000000000..26beea4d5 --- /dev/null +++ b/parser/testdata/01933_invalid_date/query.sql @@ -0,0 +1,10 @@ +SELECT toDate('07-08-2019'); -- { serverError CANNOT_PARSE_DATE } +SELECT toDate('2019-0708'); -- { serverError CANNOT_PARSE_DATE } +SELECT toDate('201907-08'); -- { serverError CANNOT_PARSE_DATE } +SELECT toDate('2019^7^8'); + +CREATE TEMPORARY TABLE test (d Date); +INSERT INTO test VALUES ('2018-01-01'); + +SELECT * FROM test WHERE d >= '07-08-2019'; -- { serverError CANNOT_PARSE_DATE } +SELECT * FROM test WHERE d >= '2019-07-08'; diff --git a/parser/testdata/01934_constexpr_aggregate_function_parameters/ast.json b/parser/testdata/01934_constexpr_aggregate_function_parameters/ast.json new file mode 100644 index 000000000..5bdfb6b64 --- /dev/null +++ b/parser/testdata/01934_constexpr_aggregate_function_parameters/ast.json @@ -0,0 +1,76 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function groupArray (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function plus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 18, + + "statistics": + { + "elapsed": 0.001191605, + "rows_read": 18, + "bytes_read": 703 + } +} diff --git a/parser/testdata/01934_constexpr_aggregate_function_parameters/metadata.json b/parser/testdata/01934_constexpr_aggregate_function_parameters/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01934_constexpr_aggregate_function_parameters/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01934_constexpr_aggregate_function_parameters/query.sql b/parser/testdata/01934_constexpr_aggregate_function_parameters/query.sql new file mode 100644 index 000000000..3146c01ee --- /dev/null +++ b/parser/testdata/01934_constexpr_aggregate_function_parameters/query.sql @@ -0,0 +1,10 @@ +SELECT groupArray(2 + 3)(number) FROM numbers(10); +SELECT groupArray('5'::UInt8)(number) FROM numbers(10); + +SELECT groupArray(NULL)(number) FROM numbers(10); -- { serverError BAD_ARGUMENTS } +SELECT groupArray(NULL + NULL)(number) FROM numbers(10); -- { serverError BAD_ARGUMENTS } +SELECT groupArray([])(number) FROM numbers(10); -- { serverError BAD_ARGUMENTS } +SELECT groupArray(throwIf(1))(number) FROM numbers(10); -- { serverError BAD_ARGUMENTS, 134 } + +-- Not the best error message, can be improved. +SELECT groupArray(number)(number) FROM numbers(10); -- { serverError BAD_ARGUMENTS, 47 } diff --git a/parser/testdata/01936_empty_function_support_uuid/ast.json b/parser/testdata/01936_empty_function_support_uuid/ast.json new file mode 100644 index 000000000..b970b84b6 --- /dev/null +++ b/parser/testdata/01936_empty_function_support_uuid/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function empty (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toUUID (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '00000000-0000-0000-0000-000000000000'" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001289278, + "rows_read": 9, + "bytes_read": 374 + } +} diff --git a/parser/testdata/01936_empty_function_support_uuid/metadata.json b/parser/testdata/01936_empty_function_support_uuid/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01936_empty_function_support_uuid/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01936_empty_function_support_uuid/query.sql b/parser/testdata/01936_empty_function_support_uuid/query.sql new file mode 100644 index 000000000..c67f38b77 --- /dev/null +++ b/parser/testdata/01936_empty_function_support_uuid/query.sql @@ -0,0 +1,35 @@ +SELECT empty(toUUID('00000000-0000-0000-0000-000000000000')); +SELECT notEmpty(toUUID('00000000-0000-0000-0000-000000000000')); +SELECT uniqIf(uuid, empty(uuid)), uniqIf(uuid, notEmpty(uuid)) +FROM +( + SELECT toUUID('00000000-0000-0000-0000-000000000002') AS uuid + UNION ALL + SELECT toUUID('00000000-0000-0000-0000-000000000000') AS uuid + UNION ALL + SELECT toUUID('00000000-0000-0000-0000-000000000001') AS uuid +); + +DROP TABLE IF EXISTS users; +DROP TABLE IF EXISTS orders; + +CREATE TABLE users (user_id UUID) ENGINE = Memory; +CREATE TABLE orders (order_id UUID, user_id UUID) ENGINE = Memory; + +INSERT INTO users VALUES ('00000000-0000-0000-0000-000000000001'); +INSERT INTO users VALUES ('00000000-0000-0000-0000-000000000002'); +INSERT INTO orders VALUES ('00000000-0000-0000-0000-000000000003', '00000000-0000-0000-0000-000000000001'); + +SELECT + uniq(user_id) AS users, + uniqIf(order_id, notEmpty(order_id)) AS orders +FROM +( + SELECT * FROM users +) t1 ALL LEFT JOIN ( + SELECT * FROM orders +) t2 USING (user_id); + +DROP TABLE users; +DROP TABLE orders; + diff --git a/parser/testdata/01936_quantiles_cannot_return_null/ast.json b/parser/testdata/01936_quantiles_cannot_return_null/ast.json new file mode 100644 index 000000000..4ef3dce06 --- /dev/null +++ b/parser/testdata/01936_quantiles_cannot_return_null/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001204985, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01936_quantiles_cannot_return_null/metadata.json b/parser/testdata/01936_quantiles_cannot_return_null/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01936_quantiles_cannot_return_null/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01936_quantiles_cannot_return_null/query.sql b/parser/testdata/01936_quantiles_cannot_return_null/query.sql new file mode 100644 index 000000000..46fd8adf5 --- /dev/null +++ b/parser/testdata/01936_quantiles_cannot_return_null/query.sql @@ -0,0 +1,9 @@ +set aggregate_functions_null_for_empty=0; + +SELECT quantiles(0.95)(x) FROM (SELECT 1 x WHERE 0); +SELECT quantiles(0.95)(number) FROM (SELECT number FROM numbers(10) WHERE number > 10); + +set aggregate_functions_null_for_empty=1; + +SELECT quantiles(0.95)(x) FROM (SELECT 1 x WHERE 0); +SELECT quantiles(0.95)(number) FROM (SELECT number FROM numbers(10) WHERE number > 10); diff --git a/parser/testdata/01936_three_parts_identifiers_in_wrong_places/ast.json b/parser/testdata/01936_three_parts_identifiers_in_wrong_places/ast.json new file mode 100644 index 000000000..81de5a33b --- /dev/null +++ b/parser/testdata/01936_three_parts_identifiers_in_wrong_places/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001057898, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01936_three_parts_identifiers_in_wrong_places/metadata.json b/parser/testdata/01936_three_parts_identifiers_in_wrong_places/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01936_three_parts_identifiers_in_wrong_places/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01936_three_parts_identifiers_in_wrong_places/query.sql b/parser/testdata/01936_three_parts_identifiers_in_wrong_places/query.sql new file mode 100644 index 000000000..4efbb4615 --- /dev/null +++ b/parser/testdata/01936_three_parts_identifiers_in_wrong_places/query.sql @@ -0,0 +1,9 @@ +SET enable_analyzer = 1; + +SELECT dictGet(t.nest.a, concat(currentDatabase(), '.dict.dict'), 's', number) FROM numbers(5); -- { serverError INVALID_IDENTIFIER } + +SELECT dictGetFloat64(t.b.s, 'database_for_dict.dict1', dictGetFloat64('Ta\0', toUInt64('databas\0_for_dict.dict1databas\0_for_dict.dict1', dictGetFloat64('', '', toUInt64(1048577), toDate(NULL)), NULL), toDate(dictGetFloat64(257, 'database_for_dict.dict1database_for_dict.dict1', '', toUInt64(NULL), 2, toDate(NULL)), '2019-05-2\0')), NULL, toUInt64(dictGetFloat64('', '', toUInt64(-9223372036854775808), toDate(NULL)), NULL)); -- { serverError INVALID_IDENTIFIER } + +SELECT NULL AND (2147483648 AND NULL) AND -2147483647, toUUID(((1048576 AND NULL) AND (2147483647 AND 257 AND NULL AND -2147483649) AND NULL) IN (test_01103.t1_distr.id), '00000000-e1fe-11e\0-bb8f\0853d60c00749'), stringToH3('89184926cc3ffff89184926cc3ffff89184926cc3ffff89184926cc3ffff89184926cc3ffff89184926cc3ffff89184926cc3ffff89184926cc3ffff'); -- { serverError INVALID_IDENTIFIER } + +SELECT 'still alive'; diff --git a/parser/testdata/01937_nested_chinese/ast.json b/parser/testdata/01937_nested_chinese/ast.json new file mode 100644 index 000000000..a016ac986 --- /dev/null +++ b/parser/testdata/01937_nested_chinese/ast.json @@ -0,0 +1,73 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery test (children 2)" + }, + { + "explain": " Identifier test" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration id (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " ColumnDeclaration products (children 1)" + }, + { + "explain": " DataType Nested (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " NameTypePair 产品 (children 1)" + }, + { + "explain": " DataType Array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " NameTypePair 销量 (children 1)" + }, + { + "explain": " DataType Array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " DataType Int32" + } + ], + + "rows": 17, + + "statistics": + { + "elapsed": 0.001374839, + "rows_read": 17, + "bytes_read": 664 + } +} diff --git a/parser/testdata/01937_nested_chinese/metadata.json b/parser/testdata/01937_nested_chinese/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01937_nested_chinese/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01937_nested_chinese/query.sql b/parser/testdata/01937_nested_chinese/query.sql new file mode 100644 index 000000000..94c659848 --- /dev/null +++ b/parser/testdata/01937_nested_chinese/query.sql @@ -0,0 +1,8 @@ +CREATE TEMPORARY TABLE test (`id` String, `products` Nested (`产品` Array(String), `销量` Array(Int32))); + +DESCRIBE test; +DESCRIBE (SELECT * FROM test); +DESCRIBE (SELECT * FROM test ARRAY JOIN products); +DESCRIBE (SELECT p.`产品`, p.`销量` FROM test ARRAY JOIN products AS p); +SELECT * FROM test ARRAY JOIN products; +SELECT count() FROM (SELECT * FROM test ARRAY JOIN products); diff --git a/parser/testdata/01938_joins_identifiers/ast.json b/parser/testdata/01938_joins_identifiers/ast.json new file mode 100644 index 000000000..1b012e846 --- /dev/null +++ b/parser/testdata/01938_joins_identifiers/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery \/t0 (children 1)" + }, + { + "explain": " Identifier \/t0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001230656, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/01938_joins_identifiers/metadata.json b/parser/testdata/01938_joins_identifiers/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01938_joins_identifiers/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01938_joins_identifiers/query.sql b/parser/testdata/01938_joins_identifiers/query.sql new file mode 100644 index 000000000..b518080b1 --- /dev/null +++ b/parser/testdata/01938_joins_identifiers/query.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS "/t0"; +DROP TABLE IF EXISTS "/t1"; + +create table "/t0" (a Int64, b Int64) engine = MergeTree() partition by a order by a; +create table "/t1" (a Int64, b Int64) engine = MergeTree() partition by a order by a; + +insert into "/t0" values (0, 0); +insert into "/t1" values (0, 1); + +select * from "/t0" join "/t1" using a; + +DROP TABLE "/t0"; +DROP TABLE "/t1"; diff --git a/parser/testdata/01939_type_map_json/ast.json b/parser/testdata/01939_type_map_json/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01939_type_map_json/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01939_type_map_json/metadata.json b/parser/testdata/01939_type_map_json/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01939_type_map_json/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01939_type_map_json/query.sql b/parser/testdata/01939_type_map_json/query.sql new file mode 100644 index 000000000..9a2d8840e --- /dev/null +++ b/parser/testdata/01939_type_map_json/query.sql @@ -0,0 +1,30 @@ +-- Tags: no-fasttest + +SELECT map(1, 2, 3, 4) AS m FORMAT JSONEachRow; +SELECT map(1, 2, 3, 4) AS m, toJSONString(m) AS s, isValidJSON(s); + +SELECT map('key1', number, 'key2', number * 2) AS m FROM numbers(1, 1) FORMAT JSONEachRow; +SELECT map('key1', number, 'key2', number * 2) AS m, toJSONString(m) AS s, isValidJSON(s) FROM numbers(1, 1); + +SELECT map('key1', number, 'key2', number * 2) AS m FROM numbers(1, 1) + FORMAT JSONEachRow + SETTINGS output_format_json_quote_64bit_integers = 0; + +SELECT map('key1', number, 'key2', number * 2) AS m, toJSONString(m) AS s, isValidJSON(s) FROM numbers(1, 1) + SETTINGS output_format_json_quote_64bit_integers = 0; + +SELECT map('2020-10-10'::Date, 'v1', '2020-10-11'::Date, 'v2') AS m FORMAT JSONEachRow; +SELECT map('2020-10-10'::Date, 'v1', '2020-10-11'::Date, 'v2') AS m, toJSONString(m) AS s, isValidJSON(s); + +SELECT map(11::UInt64, 'v1', 22::UInt64, 'v2') AS m FORMAT JSONEachRow; +SELECT map(11::UInt64, 'v1', 22::UInt64, 'v2') AS m, toJSONString(m) AS s, isValidJSON(s); + +SELECT map(11::Int128, 'v1', 22::Int128, 'v2') AS m FORMAT JSONEachRow; +SELECT map(11::Int128, 'v1', 22::Int128, 'v2') AS m, toJSONString(m) AS s, isValidJSON(s); + +CREATE TEMPORARY TABLE map_json (m1 Map(String, UInt64), m2 Map(UInt32, UInt32), m3 Map(Date, String)); + +INSERT INTO map_json FORMAT JSONEachRow {"m1" : {"k1" : 1, "k2" : 2}, "m2" : {"1" : 2, "2" : 3}, "m3" : {"2020-10-10" : "foo"}}; + +SELECT m1, m2, m3 FROM map_json FORMAT JSONEachRow; +SELECT m1, m2, m3 FROM map_json FORMAT JSONEachRow SETTINGS output_format_json_quote_64bit_integers = 0; diff --git a/parser/testdata/01940_custom_tld_sharding_key/ast.json b/parser/testdata/01940_custom_tld_sharding_key/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01940_custom_tld_sharding_key/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01940_custom_tld_sharding_key/metadata.json b/parser/testdata/01940_custom_tld_sharding_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01940_custom_tld_sharding_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01940_custom_tld_sharding_key/query.sql b/parser/testdata/01940_custom_tld_sharding_key/query.sql new file mode 100644 index 000000000..dd2ec704c --- /dev/null +++ b/parser/testdata/01940_custom_tld_sharding_key/query.sql @@ -0,0 +1,4 @@ +-- Tags: shard + +select * from remote('127.{1,2}', view(select 'foo.com' key), cityHash64(key)) where key = cutToFirstSignificantSubdomainCustom('foo.com', 'public_suffix_list') settings optimize_skip_unused_shards=1, force_optimize_skip_unused_shards=1; +select * from remote('127.{1,2}', view(select 'foo.com' key), cityHash64(key)) where key = cutToFirstSignificantSubdomainCustom('bar.com', 'public_suffix_list') settings optimize_skip_unused_shards=1, force_optimize_skip_unused_shards=1; diff --git a/parser/testdata/01940_pad_string/ast.json b/parser/testdata/01940_pad_string/ast.json new file mode 100644 index 000000000..e5da1f587 --- /dev/null +++ b/parser/testdata/01940_pad_string/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'leftPad'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001326734, + "rows_read": 5, + "bytes_read": 178 + } +} diff --git a/parser/testdata/01940_pad_string/metadata.json b/parser/testdata/01940_pad_string/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01940_pad_string/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01940_pad_string/query.sql b/parser/testdata/01940_pad_string/query.sql new file mode 100644 index 000000000..45f590b1b --- /dev/null +++ b/parser/testdata/01940_pad_string/query.sql @@ -0,0 +1,55 @@ +SELECT 'leftPad'; +SELECT leftPad('abc', 0), leftPad('abc', 0::Int32); +SELECT leftPad('abc', 1), leftPad('abc', 1::Int32); +SELECT leftPad('abc', 2), leftPad('abc', 2::Int32); +SELECT leftPad('abc', 3), leftPad('abc', 3::Int32); +SELECT leftPad('abc', 4), leftPad('abc', 4::Int32); +SELECT leftPad('abc', 5), leftPad('abc', 5::Int32); +SELECT leftPad('abc', 10), leftPad('abc', 10::Int32); + +SELECT leftPad('abc', 2, '*'), leftPad('abc', 2::Int32, '*'); +SELECT leftPad('abc', 4, '*'), leftPad('abc', 4::Int32, '*'); +SELECT leftPad('abc', 5, '*'), leftPad('abc', 5::Int32, '*'); +SELECT leftPad('abc', 10, '*'), leftPad('abc', 10::Int32, '*'); +SELECT leftPad('abc', 2, '*.'), leftPad('abc', 2::Int32, '*.'); +SELECT leftPad('abc', 4, '*.'), leftPad('abc', 4::Int32, '*.'); +SELECT leftPad('abc', 5, '*.'), leftPad('abc', 5::Int32, '*.'); +SELECT leftPad('abc', 10, '*.'),leftPad('abc', 10::Int32, '*.'); + +SELECT 'leftPadUTF8'; +SELECT leftPad('абвг', 2), leftPad('абвг', 2::Int32); +SELECT leftPadUTF8('абвг', 2), leftPadUTF8('абвг', 2::Int32); +SELECT leftPad('абвг', 4), leftPad('абвг', 4::Int32); +SELECT leftPadUTF8('абвг', 4), leftPadUTF8('абвг', 4::Int32); +SELECT leftPad('абвг', 12, 'ЧАС'), leftPad('абвг', 12::Int32, 'ЧАС'); +SELECT leftPadUTF8('абвг', 12, 'ЧАС'), leftPadUTF8('абвг', 12::Int32, 'ЧАС'); + +SELECT 'rightPad'; +SELECT rightPad('abc', 0), rightPad('abc', 0::Int32); +SELECT rightPad('abc', 1), rightPad('abc', 1::Int32); +SELECT rightPad('abc', 2), rightPad('abc', 2::Int32); +SELECT rightPad('abc', 3), rightPad('abc', 3::Int32); +SELECT rightPad('abc', 4), rightPad('abc', 4::Int32); +SELECT rightPad('abc', 5), rightPad('abc', 5::Int32); +SELECT rightPad('abc', 10), rightPad('abc', 10::Int32); + +SELECT rightPad('abc', 2, '*'), rightPad('abc', 2::Int32, '*'); +SELECT rightPad('abc', 4, '*'), rightPad('abc', 4::Int32, '*'); +SELECT rightPad('abc', 5, '*'), rightPad('abc', 5::Int32, '*'); +SELECT rightPad('abc', 10, '*'), rightPad('abc', 10::Int32, '*'); +SELECT rightPad('abc', 2, '*.'), rightPad('abc', 2::Int32, '*.'); +SELECT rightPad('abc', 4, '*.'), rightPad('abc', 4::Int32, '*.'); +SELECT rightPad('abc', 5, '*.'), rightPad('abc', 5::Int32, '*.'); +SELECT rightPad('abc', 10, '*.'), rightPad('abc', 10::Int32, '*.'); + +SELECT 'rightPadUTF8'; +SELECT rightPad('абвг', 2), rightPad('абвг', 2::Int32); +SELECT rightPadUTF8('абвг', 2), rightPadUTF8('абвг', 2::Int32); +SELECT rightPad('абвг', 4), rightPad('абвг', 4::Int32); +SELECT rightPadUTF8('абвг', 4), rightPadUTF8('абвг', 4::Int32); +SELECT rightPad('абвг', 12, 'ЧАС'), rightPad('абвг', 12::Int32, 'ЧАС'); +SELECT rightPadUTF8('абвг', 12, 'ЧАС'), rightPadUTF8('абвг', 12::Int32, 'ЧАС'); + +SELECT 'numbers'; +SELECT rightPad(leftPad(toString(number), number, '_'), number*2, '^') FROM numbers(7); +SELECT rightPad(leftPad(toString(number), number::Int64, '_'), number::Int64*2, '^') FROM numbers(7); diff --git a/parser/testdata/01940_point_in_polygon_ubsan/ast.json b/parser/testdata/01940_point_in_polygon_ubsan/ast.json new file mode 100644 index 000000000..c2f1ea621 --- /dev/null +++ b/parser/testdata/01940_point_in_polygon_ubsan/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001328049, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01940_point_in_polygon_ubsan/metadata.json b/parser/testdata/01940_point_in_polygon_ubsan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01940_point_in_polygon_ubsan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01940_point_in_polygon_ubsan/query.sql b/parser/testdata/01940_point_in_polygon_ubsan/query.sql new file mode 100644 index 000000000..d01172569 --- /dev/null +++ b/parser/testdata/01940_point_in_polygon_ubsan/query.sql @@ -0,0 +1,2 @@ +SET validate_polygons = 0; +SELECT pointInPolygon((-inf, 1023), [(10.000100135803223, 10000000000.), (inf, 0.9998999834060669), (1.1920928955078125e-7, 100.0000991821289), (1.000100016593933, 100.0000991821289)]); diff --git a/parser/testdata/01940_totimezone_operator_monotonicity/ast.json b/parser/testdata/01940_totimezone_operator_monotonicity/ast.json new file mode 100644 index 000000000..7e6906cf7 --- /dev/null +++ b/parser/testdata/01940_totimezone_operator_monotonicity/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery totimezone_op_mono (children 1)" + }, + { + "explain": " Identifier totimezone_op_mono" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001368224, + "rows_read": 2, + "bytes_read": 88 + } +} diff --git a/parser/testdata/01940_totimezone_operator_monotonicity/metadata.json b/parser/testdata/01940_totimezone_operator_monotonicity/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01940_totimezone_operator_monotonicity/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01940_totimezone_operator_monotonicity/query.sql b/parser/testdata/01940_totimezone_operator_monotonicity/query.sql new file mode 100644 index 000000000..b8065947e --- /dev/null +++ b/parser/testdata/01940_totimezone_operator_monotonicity/query.sql @@ -0,0 +1,6 @@ +DROP TABLE IF EXISTS totimezone_op_mono; +CREATE TABLE totimezone_op_mono(i int, tz String, create_time DateTime) ENGINE MergeTree PARTITION BY toDate(create_time) ORDER BY i; +INSERT INTO totimezone_op_mono VALUES (1, 'UTC', toDateTime('2020-09-01 00:00:00', 'UTC')), (2, 'UTC', toDateTime('2020-09-02 00:00:00', 'UTC')); +SET max_rows_to_read = 1; +SELECT count() FROM totimezone_op_mono WHERE toTimeZone(create_time, 'UTC') = '2020-09-01 00:00:00'; +DROP TABLE IF EXISTS totimezone_op_mono; diff --git a/parser/testdata/01941_dict_get_has_complex_single_key/ast.json b/parser/testdata/01941_dict_get_has_complex_single_key/ast.json new file mode 100644 index 000000000..94cf57b9e --- /dev/null +++ b/parser/testdata/01941_dict_get_has_complex_single_key/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_dictionary_source (children 1)" + }, + { + "explain": " Identifier test_dictionary_source" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001135275, + "rows_read": 2, + "bytes_read": 96 + } +} diff --git a/parser/testdata/01941_dict_get_has_complex_single_key/metadata.json b/parser/testdata/01941_dict_get_has_complex_single_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01941_dict_get_has_complex_single_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01941_dict_get_has_complex_single_key/query.sql b/parser/testdata/01941_dict_get_has_complex_single_key/query.sql new file mode 100644 index 000000000..a44107d68 --- /dev/null +++ b/parser/testdata/01941_dict_get_has_complex_single_key/query.sql @@ -0,0 +1,26 @@ +DROP TABLE IF EXISTS test_dictionary_source; +CREATE TABLE test_dictionary_source (key String, value String) ENGINE=TinyLog; + +INSERT INTO test_dictionary_source VALUES ('Key', 'Value'); + +DROP DICTIONARY IF EXISTS test_dictionary; +CREATE DICTIONARY test_dictionary(key String, value String) +PRIMARY KEY key +LAYOUT(COMPLEX_KEY_HASHED()) +SOURCE(CLICKHOUSE(TABLE 'test_dictionary_source')) +LIFETIME(0); + +SELECT 'dictGet'; +SELECT dictGet('test_dictionary', 'value', tuple('Key')); +SELECT dictGet('test_dictionary', 'value', tuple(materialize('Key'))); +SELECT dictGet('test_dictionary', 'value', 'Key'); +SELECT dictGet('test_dictionary', 'value', materialize('Key')); + +SELECT 'dictHas'; +SELECT dictHas('test_dictionary', tuple('Key')); +SELECT dictHas('test_dictionary', tuple(materialize('Key'))); +SELECT dictHas('test_dictionary', 'Key'); +SELECT dictHas('test_dictionary', materialize('Key')); + +DROP DICTIONARY test_dictionary; +DROP TABLE test_dictionary_source; diff --git a/parser/testdata/01942_create_table_with_sample/ast.json b/parser/testdata/01942_create_table_with_sample/ast.json new file mode 100644 index 000000000..031f86605 --- /dev/null +++ b/parser/testdata/01942_create_table_with_sample/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery sample_incorrect (children 1)" + }, + { + "explain": " Identifier sample_incorrect" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001210833, + "rows_read": 2, + "bytes_read": 85 + } +} diff --git a/parser/testdata/01942_create_table_with_sample/metadata.json b/parser/testdata/01942_create_table_with_sample/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01942_create_table_with_sample/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01942_create_table_with_sample/query.sql b/parser/testdata/01942_create_table_with_sample/query.sql new file mode 100644 index 000000000..8e919027f --- /dev/null +++ b/parser/testdata/01942_create_table_with_sample/query.sql @@ -0,0 +1,14 @@ +CREATE TABLE IF NOT EXISTS sample_incorrect +(`x` UUID) +ENGINE = MergeTree +ORDER BY tuple(x) +SAMPLE BY x; -- { serverError ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER } + +DROP TABLE IF EXISTS sample_correct; +CREATE TABLE IF NOT EXISTS sample_correct +(`x` String) +ENGINE = MergeTree +ORDER BY tuple(sipHash64(x)) +SAMPLE BY sipHash64(x); + +DROP TABLE sample_correct; diff --git a/parser/testdata/01942_dateTimeToSnowflake/ast.json b/parser/testdata/01942_dateTimeToSnowflake/ast.json new file mode 100644 index 000000000..920376ece --- /dev/null +++ b/parser/testdata/01942_dateTimeToSnowflake/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001386108, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01942_dateTimeToSnowflake/metadata.json b/parser/testdata/01942_dateTimeToSnowflake/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01942_dateTimeToSnowflake/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01942_dateTimeToSnowflake/query.sql b/parser/testdata/01942_dateTimeToSnowflake/query.sql new file mode 100644 index 000000000..6cce4863c --- /dev/null +++ b/parser/testdata/01942_dateTimeToSnowflake/query.sql @@ -0,0 +1,49 @@ +SET allow_deprecated_snowflake_conversion_functions = 1; -- Force-enable deprecated snowflake conversion functions (in case this is randomized in CI) +SET session_timezone = 'Africa/Juba'; + +-- Error cases +SELECT dateTimeToSnowflake(); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +SELECT dateTime64ToSnowflake(); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} + +SELECT dateTimeToSnowflake('abc'); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT dateTime64ToSnowflake('abc'); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} + +SELECT dateTimeToSnowflake('abc', 123); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +SELECT dateTime64ToSnowflake('abc', 123); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} + +SELECT dateTimeToSnowflake(now()) SETTINGS allow_deprecated_snowflake_conversion_functions = 0; -- { serverError DEPRECATED_FUNCTION } +SELECT dateTime64ToSnowflake(now64()) SETTINGS allow_deprecated_snowflake_conversion_functions = 0; -- { serverError DEPRECATED_FUNCTION } + +SELECT '-- const / non-const inputs'; + +WITH toDateTime('2021-08-15 18:57:56', 'Asia/Shanghai') AS dt +SELECT dt, dateTimeToSnowflake(dt), materialize(dateTimeToSnowflake(dt)); + +WITH toDateTime64('2021-08-15 18:57:56.492', 3, 'Asia/Shanghai') AS dt64 +SELECT dt64, dateTime64ToSnowflake(dt64), materialize(dateTime64ToSnowflake(dt64)); + +SELECT '-- different DateTime64 scales'; + +WITH toDateTime64('2021-08-15 18:57:56.492', 0, 'UTC') AS dt64_0, + toDateTime64('2021-08-15 18:57:56.492', 1, 'UTC') AS dt64_1, + toDateTime64('2021-08-15 18:57:56.492', 2, 'UTC') AS dt64_2, + toDateTime64('2021-08-15 18:57:56.492', 3, 'UTC') AS dt64_3, + toDateTime64('2021-08-15 18:57:56.492', 4, 'UTC') AS dt64_4 +SELECT dateTime64ToSnowflake(dt64_0), + dateTime64ToSnowflake(dt64_1), + dateTime64ToSnowflake(dt64_2), + dateTime64ToSnowflake(dt64_3), + dateTime64ToSnowflake(dt64_4); + +-- DateTime64-to-Snowflake-to-DateTime64 is idempotent *if* the scale is <=3 (millisecond precision) +WITH now64(0, 'UTC') AS dt64_0, + now64(1, 'UTC') AS dt64_1, + now64(2, 'UTC') AS dt64_2, + now64(3, 'UTC') AS dt64_3 +SELECT snowflakeToDateTime64(dateTime64ToSnowflake(dt64_0), 'UTC') == dt64_0, + snowflakeToDateTime64(dateTime64ToSnowflake(dt64_1), 'UTC') == dt64_1, + snowflakeToDateTime64(dateTime64ToSnowflake(dt64_2), 'UTC') == dt64_2, + snowflakeToDateTime64(dateTime64ToSnowflake(dt64_3), 'UTC') == dt64_3; + +WITH toDateTime64('2023-11-11 11:11:11.1231', 4, 'UTC') AS dt64_4 +SELECT dt64_4, snowflakeToDateTime64(dateTime64ToSnowflake(dt64_4), 'UTC'); -- not idempotent diff --git a/parser/testdata/01942_dateTimeToSnowflakeID/ast.json b/parser/testdata/01942_dateTimeToSnowflakeID/ast.json new file mode 100644 index 000000000..ceb2ce696 --- /dev/null +++ b/parser/testdata/01942_dateTimeToSnowflakeID/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001395633, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01942_dateTimeToSnowflakeID/metadata.json b/parser/testdata/01942_dateTimeToSnowflakeID/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01942_dateTimeToSnowflakeID/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01942_dateTimeToSnowflakeID/query.sql b/parser/testdata/01942_dateTimeToSnowflakeID/query.sql new file mode 100644 index 000000000..aeaf48716 --- /dev/null +++ b/parser/testdata/01942_dateTimeToSnowflakeID/query.sql @@ -0,0 +1,75 @@ +SET session_timezone = 'UTC'; -- disable timezone randomization +SET enable_analyzer = 1; -- The old path formats the result with different whitespaces +SET output_format_pretty_single_large_number_tip_threshold = 0; + +SELECT '-- Negative tests'; +SELECT dateTimeToSnowflakeID(); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +SELECT dateTime64ToSnowflakeID(); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +SELECT dateTimeToSnowflakeID('invalid_dt'); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT dateTime64ToSnowflakeID('invalid_dt'); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT dateTimeToSnowflakeID(now(), 'invalid_epoch'); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT dateTime64ToSnowflakeID(now64(), 'invalid_epoch'); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT dateTimeToSnowflakeID(now(), 42, 'too_many_args'); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +SELECT dateTime64ToSnowflakeID(now64(), 42, 'too_many_args'); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} + +SELECT '-- Return type'; +SELECT toTypeName(dateTimeToSnowflakeID(now())); +SELECT toTypeName(dateTime64ToSnowflakeID(now64())); + +SELECT '-- Standard and twitter epoch'; + +WITH + toDateTime('2021-08-15 18:57:56') AS dt, + toDateTime64('2021-08-15 18:57:56.492', 3) AS dt64, + 1288834974657 AS twitter_epoch +SELECT + dt, + dt64, + dateTimeToSnowflakeID(dt), + dateTime64ToSnowflakeID(dt64), + dateTimeToSnowflakeID(dt, twitter_epoch), + dateTime64ToSnowflakeID(dt64, twitter_epoch) +FORMAT + Vertical; + +SELECT '-- Different DateTime64 scales'; + +WITH + toDateTime64('2021-08-15 18:57:56.492', 0, 'UTC') AS dt64_0, + toDateTime64('2021-08-15 18:57:56.492', 1, 'UTC') AS dt64_1, + toDateTime64('2021-08-15 18:57:56.492', 2, 'UTC') AS dt64_2, + toDateTime64('2021-08-15 18:57:56.492', 3, 'UTC') AS dt64_3, + toDateTime64('2021-08-15 18:57:56.492', 4, 'UTC') AS dt64_4 +SELECT + dateTime64ToSnowflakeID(dt64_0), + dateTime64ToSnowflakeID(dt64_1), + dateTime64ToSnowflakeID(dt64_2), + dateTime64ToSnowflakeID(dt64_3), + dateTime64ToSnowflakeID(dt64_4) +Format + Vertical; + +SELECT '-- Idempotency'; + + -- DateTime64-to-SnowflakeID-to-DateTime64 is idempotent if the scale is <=3 (millisecond precision) +WITH + now64(0) AS dt64_0, + now64(1) AS dt64_1, + now64(2) AS dt64_2, + now64(3) AS dt64_3 +SELECT + snowflakeIDToDateTime64(dateTime64ToSnowflakeID(dt64_0), 0, 'UTC') == dt64_0, + snowflakeIDToDateTime64(dateTime64ToSnowflakeID(dt64_1), 0, 'UTC') == dt64_1, + snowflakeIDToDateTime64(dateTime64ToSnowflakeID(dt64_2), 0, 'UTC') == dt64_2, + snowflakeIDToDateTime64(dateTime64ToSnowflakeID(dt64_3), 0, 'UTC') == dt64_3 +FORMAT + Vertical; + +-- not idempotent +WITH + toDateTime64('2023-11-11 11:11:11.1231', 4, 'UTC') AS dt64_4 +SELECT + dt64_4, + snowflakeIDToDateTime64(dateTime64ToSnowflakeID(dt64_4)) +FORMAT + Vertical; diff --git a/parser/testdata/01942_snowflakeIDToDateTime/ast.json b/parser/testdata/01942_snowflakeIDToDateTime/ast.json new file mode 100644 index 000000000..199bf1863 --- /dev/null +++ b/parser/testdata/01942_snowflakeIDToDateTime/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001485785, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01942_snowflakeIDToDateTime/metadata.json b/parser/testdata/01942_snowflakeIDToDateTime/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01942_snowflakeIDToDateTime/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01942_snowflakeIDToDateTime/query.sql b/parser/testdata/01942_snowflakeIDToDateTime/query.sql new file mode 100644 index 000000000..e9b326078 --- /dev/null +++ b/parser/testdata/01942_snowflakeIDToDateTime/query.sql @@ -0,0 +1,83 @@ +SET session_timezone = 'UTC'; -- disable timezone randomization +SET enable_analyzer = 1; -- The old path formats the result with different whitespaces +SET output_format_pretty_single_large_number_tip_threshold = 0; + +SELECT '-- Negative tests'; +SELECT snowflakeIDToDateTime(); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +SELECT snowflakeIDToDateTime64(); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +SELECT snowflakeIDToDateTime('invalid_snowflake'); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT snowflakeIDToDateTime64('invalid_snowflake'); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT snowflakeIDToDateTime(123::UInt64, 'invalid_epoch'); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT snowflakeIDToDateTime64(123::UInt64, 'invalid_epoch'); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT snowflakeIDToDateTime(123::UInt64, materialize(42)); -- {serverError ILLEGAL_COLUMN} +SELECT snowflakeIDToDateTime64(123::UInt64, materialize(42)); -- {serverError ILLEGAL_COLUMN} +SELECT snowflakeIDToDateTime(123::UInt64, 42, 42); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT snowflakeIDToDateTime64(123::UInt64, 42, 42); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT snowflakeIDToDateTime(123::UInt64, 42, 'UTC', 'too_many_args'); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +SELECT snowflakeIDToDateTime64(123::UInt64, 42, 'UTC', 'too_many_args'); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} + +SELECT '-- Return type'; +SELECT toTypeName(snowflakeIDToDateTime(123::UInt64)); +SELECT toTypeName(snowflakeIDToDateTime64(123::UInt64)); + +SELECT '-- Non-const path'; +-- Two const arguments are mapped to two non-const arguments ('getDefaultImplementationForConstants'), the non-const path is taken + +WITH + 7204436857747984384 AS sf +SELECT + sf, + snowflakeIDToDateTime(sf) as dt, + snowflakeIDToDateTime64(sf) as dt64 +FORMAT + Vertical; + +-- With Twitter Snowflake ID and Twitter epoch +WITH + 1426981498778550272 AS sf, + 1288834974657 AS epoch +SELECT + sf, + snowflakeIDToDateTime(sf, epoch) as dt, + snowflakeIDToDateTime64(sf, epoch) as dt64 +FORMAT + Vertical; + +-- non-default timezone +WITH + 7204436857747984384 AS sf, + 0 AS epoch, -- default epoch + 'Asia/Shanghai' AS tz +SELECT + sf, + snowflakeIDToDateTime(sf, epoch, tz) as dt, + snowflakeIDToDateTime64(sf, epoch, tz) as dt64 +FORMAT + Vertical; + +SELECT '-- Const path'; + +-- The const path can only be tested by const snowflake + const epoch + non-const time-zone. The latter requires a special setting. +WITH + 7204436857747984384 AS sf, + 0 AS epoch, -- default epoch + materialize('Asia/Shanghai') AS tz +SELECT + sf, + snowflakeIDToDateTime(sf, epoch, tz) as dt, + snowflakeIDToDateTime64(sf, epoch, tz) as dt64 +FORMAT + Vertical +SETTINGS + allow_nonconst_timezone_arguments = 1; + + +SELECT '-- Can be combined with generateSnowflakeID'; + +WITH + generateSnowflakeID() AS snowflake +SELECT + snowflakeIDToDateTime(snowflake), + snowflakeIDToDateTime64(snowflake) +FORMAT + Null; diff --git a/parser/testdata/01942_snowflakeToDateTime/ast.json b/parser/testdata/01942_snowflakeToDateTime/ast.json new file mode 100644 index 000000000..53a98f818 --- /dev/null +++ b/parser/testdata/01942_snowflakeToDateTime/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001220812, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01942_snowflakeToDateTime/metadata.json b/parser/testdata/01942_snowflakeToDateTime/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01942_snowflakeToDateTime/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01942_snowflakeToDateTime/query.sql b/parser/testdata/01942_snowflakeToDateTime/query.sql new file mode 100644 index 000000000..34fe15ec1 --- /dev/null +++ b/parser/testdata/01942_snowflakeToDateTime/query.sql @@ -0,0 +1,47 @@ +SET allow_deprecated_snowflake_conversion_functions = 1; -- Force-enable deprecated snowflake conversion functions (in case this is randomized in CI) + +-- Error cases +SELECT snowflakeToDateTime(); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +SELECT snowflakeToDateTime64(); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} + +SELECT snowflakeToDateTime('abc'); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT snowflakeToDateTime64('abc'); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} + +SELECT snowflakeToDateTime('abc', 123); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT snowflakeToDateTime64('abc', 123); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} + +SELECT snowflakeToDateTime(123::Int64) SETTINGS allow_deprecated_snowflake_conversion_functions = 0; -- { serverError DEPRECATED_FUNCTION } +SELECT snowflakeToDateTime64(123::Int64) SETTINGS allow_deprecated_snowflake_conversion_functions = 0; -- { serverError DEPRECATED_FUNCTION } + +SELECT 'const column'; +WITH + CAST(1426860704886947840 AS Int64) AS i64, + 'UTC' AS tz +SELECT + tz, + i64, + snowflakeToDateTime(i64, tz) as dt, + toTypeName(dt), + snowflakeToDateTime64(i64, tz) as dt64, + toTypeName(dt64); + +WITH + CAST(1426860704886947840 AS Int64) AS i64, + 'Asia/Shanghai' AS tz +SELECT + tz, + i64, + snowflakeToDateTime(i64, tz) as dt, + toTypeName(dt), + snowflakeToDateTime64(i64, tz) as dt64, + toTypeName(dt64); + + +DROP TABLE IF EXISTS tab; +CREATE TABLE tab(val Int64, tz String) engine = Log; +INSERT INTO tab VALUES (42, 'Asia/Singapore'); + +SELECT 1 FROM tab WHERE snowflakeToDateTime(42::Int64, tz) != now() SETTINGS allow_nonconst_timezone_arguments = 1; +SELECT 1 FROM tab WHERE snowflakeToDateTime64(42::Int64, tz) != now() SETTINGS allow_nonconst_timezone_arguments = 1; + +DROP TABLE tab; diff --git a/parser/testdata/01942_untuple_transformers_msan/ast.json b/parser/testdata/01942_untuple_transformers_msan/ast.json new file mode 100644 index 000000000..a6863100c --- /dev/null +++ b/parser/testdata/01942_untuple_transformers_msan/ast.json @@ -0,0 +1,142 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Function untuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Float64_100.0000991821289" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Function untuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDateTime (children 1)" + }, + { + "explain": " ExpressionList (children 5)" + }, + { + "explain": " Literal UInt64_9223372036854775806" + }, + { + "explain": " Literal Int64_-1" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Function toDateTime (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Asterisk (children 1)" + }, + { + "explain": " ColumnsTransformerList (children 1)" + }, + { + "explain": " ColumnsExceptTransformer (children 1)" + }, + { + "explain": " Identifier b" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal UInt64_1 (alias a)" + }, + { + "explain": " Literal UInt64_1024" + }, + { + "explain": " Literal NULL (alias b)" + } + ], + + "rows": 40, + + "statistics": + { + "elapsed": 0.001292289, + "rows_read": 40, + "bytes_read": 1618 + } +} diff --git a/parser/testdata/01942_untuple_transformers_msan/metadata.json b/parser/testdata/01942_untuple_transformers_msan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01942_untuple_transformers_msan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01942_untuple_transformers_msan/query.sql b/parser/testdata/01942_untuple_transformers_msan/query.sql new file mode 100644 index 000000000..c1be25d34 --- /dev/null +++ b/parser/testdata/01942_untuple_transformers_msan/query.sql @@ -0,0 +1 @@ +SELECT untuple(tuple(100.0000991821289)), NULL, untuple((toDateTime(9223372036854775806, -1, NULL, NULL, toDateTime(NULL, NULL)), * EXCEPT b)), NULL FROM (SELECT 1 AS a, 1024, NULL AS b); diff --git a/parser/testdata/01943_log_column_sizes/ast.json b/parser/testdata/01943_log_column_sizes/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01943_log_column_sizes/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01943_log_column_sizes/metadata.json b/parser/testdata/01943_log_column_sizes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01943_log_column_sizes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01943_log_column_sizes/query.sql b/parser/testdata/01943_log_column_sizes/query.sql new file mode 100644 index 000000000..4df5e7d25 --- /dev/null +++ b/parser/testdata/01943_log_column_sizes/query.sql @@ -0,0 +1,15 @@ +-- Tags: log-engine +DROP TABLE IF EXISTS test_log; +DROP TABLE IF EXISTS test_tiny_log; + +CREATE TABLE test_log (x UInt8, s String, a Array(Nullable(String))) ENGINE = Log; +CREATE TABLE test_tiny_log (x UInt8, s String, a Array(Nullable(String))) ENGINE = TinyLog; + +INSERT INTO test_log VALUES (64, 'Value1', ['Value2', 'Value3', NULL]); +INSERT INTO test_tiny_log VALUES (64, 'Value1', ['Value2', 'Value3', NULL]); + +SELECT data_compressed_bytes FROM system.columns WHERE table = 'test_log' AND database = currentDatabase(); +SELECT data_compressed_bytes FROM system.columns WHERE table = 'test_tiny_log' AND database = currentDatabase(); + +DROP TABLE test_log; +DROP TABLE test_tiny_log; diff --git a/parser/testdata/01943_non_deterministic_order_key/ast.json b/parser/testdata/01943_non_deterministic_order_key/ast.json new file mode 100644 index 000000000..33393a801 --- /dev/null +++ b/parser/testdata/01943_non_deterministic_order_key/ast.json @@ -0,0 +1,91 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery a (children 3)" + }, + { + "explain": " Identifier a" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration number (children 1)" + }, + { + "explain": " DataType UInt64" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Function if (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function greater (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function now (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function toDateTime (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '2020-06-01 13:31:40'" + }, + { + "explain": " Function toInt64 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function negate (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + } + ], + + "rows": 23, + + "statistics": + { + "elapsed": 0.001274946, + "rows_read": 23, + "bytes_read": 852 + } +} diff --git a/parser/testdata/01943_non_deterministic_order_key/metadata.json b/parser/testdata/01943_non_deterministic_order_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01943_non_deterministic_order_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01943_non_deterministic_order_key/query.sql b/parser/testdata/01943_non_deterministic_order_key/query.sql new file mode 100644 index 000000000..94aa423c5 --- /dev/null +++ b/parser/testdata/01943_non_deterministic_order_key/query.sql @@ -0,0 +1,4 @@ +CREATE TABLE a (number UInt64) ENGINE = MergeTree ORDER BY if(now() > toDateTime('2020-06-01 13:31:40'), toInt64(number), -number); -- { serverError BAD_ARGUMENTS } +CREATE TABLE b (number UInt64) ENGINE = MergeTree ORDER BY now() > toDateTime(number); -- { serverError BAD_ARGUMENTS } +CREATE TABLE c (number UInt64) ENGINE = MergeTree ORDER BY now(); -- { serverError BAD_ARGUMENTS } +CREATE TABLE d (number UInt64) ENGINE = MergeTree ORDER BY now() + 1 + 1 + number; -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/01943_query_id_check/ast.json b/parser/testdata/01943_query_id_check/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01943_query_id_check/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01943_query_id_check/metadata.json b/parser/testdata/01943_query_id_check/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01943_query_id_check/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01943_query_id_check/query.sql b/parser/testdata/01943_query_id_check/query.sql new file mode 100644 index 000000000..2123ec8d6 --- /dev/null +++ b/parser/testdata/01943_query_id_check/query.sql @@ -0,0 +1,26 @@ +-- Tags: no-replicated-database, log-engine +-- Tag no-replicated-database: Different query_id + +SET prefer_localhost_replica = 1; + +DROP TABLE IF EXISTS tmp; + +CREATE TABLE tmp ENGINE = TinyLog AS SELECT queryID(); +SYSTEM FLUSH LOGS query_log; +SELECT query FROM system.query_log WHERE query_id = (SELECT * FROM tmp) AND current_database = currentDatabase() LIMIT 1; +DROP TABLE tmp; + +CREATE TABLE tmp ENGINE = TinyLog AS SELECT initialQueryID(); +SYSTEM FLUSH LOGS query_log; +SELECT query FROM system.query_log WHERE initial_query_id = (SELECT * FROM tmp) AND current_database = currentDatabase() LIMIT 1; +DROP TABLE tmp; + +CREATE TABLE tmp (str String) ENGINE = Log; +INSERT INTO tmp (*) VALUES ('a'); +SELECT count() FROM (SELECT initialQueryID() FROM remote('127.0.0.{1..3}', currentDatabase(), 'tmp') GROUP BY queryID()); +SELECT count() FROM (SELECT queryID() FROM remote('127.0.0.{1..3}', currentDatabase(), 'tmp') GROUP BY queryID()); +SELECT count() FROM (SELECT queryID() AS t FROM remote('127.0.0.{1..3}', currentDatabase(), 'tmp') GROUP BY queryID() HAVING t == initialQueryID()); +SELECT count(DISTINCT t) FROM (SELECT initialQueryID() AS t FROM remote('127.0.0.{1..3}', currentDatabase(), 'tmp') GROUP BY queryID()); +SELECT count(DISTINCT t) FROM (SELECT queryID() AS t FROM remote('127.0.0.{1..3}', currentDatabase(), 'tmp') GROUP BY queryID()); +DROP TABLE tmp; + diff --git a/parser/testdata/01944_insert_partition_by/ast.json b/parser/testdata/01944_insert_partition_by/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01944_insert_partition_by/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01944_insert_partition_by/metadata.json b/parser/testdata/01944_insert_partition_by/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01944_insert_partition_by/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01944_insert_partition_by/query.sql b/parser/testdata/01944_insert_partition_by/query.sql new file mode 100644 index 000000000..03bbd17b8 --- /dev/null +++ b/parser/testdata/01944_insert_partition_by/query.sql @@ -0,0 +1,9 @@ +-- Tags: no-fasttest +-- Tag no-fasttest: needs s3 + +INSERT INTO TABLE FUNCTION s3('http://localhost:9001/foo/test_{_partition_id}.csv', 'admin', 'admin', 'CSV', 'id Int32, val String') PARTITION BY val VALUES (1, '\r\n'); -- { serverError CANNOT_PARSE_TEXT } +INSERT INTO TABLE FUNCTION s3('http://localhost:9001/foo/test_{_partition_id}.csv', 'admin', 'admin', 'CSV', 'id Int32, val String') PARTITION BY val VALUES (1, 'abc\x00abc'); -- { serverError CANNOT_PARSE_TEXT } +INSERT INTO TABLE FUNCTION s3('http://localhost:9001/foo/test_{_partition_id}.csv', 'admin', 'admin', 'CSV', 'id Int32, val String') PARTITION BY val VALUES (1, 'abc\xc3\x28abc'); -- { serverError CANNOT_PARSE_TEXT } +INSERT INTO TABLE FUNCTION s3('http://localhost:9001/foo/test_{_partition_id}.csv', 'admin', 'admin', 'CSV', 'id Int32, val String') PARTITION BY val VALUES (1, 'abc}{abc'); -- { serverError CANNOT_PARSE_TEXT } +INSERT INTO TABLE FUNCTION s3('http://localhost:9001/foo/test_{_partition_id}.csv', 'admin', 'admin', 'CSV', 'id Int32, val String') PARTITION BY val VALUES (1, 'abc*abc'); -- { serverError CANNOT_PARSE_TEXT } +INSERT INTO TABLE FUNCTION s3('http://localhost:9001/foo/{_partition_id}', 'admin', 'admin', 'CSV', 'id Int32, val String') PARTITION BY val VALUES (1, ''); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/01944_range_max_elements/ast.json b/parser/testdata/01944_range_max_elements/ast.json new file mode 100644 index 000000000..c808cb12f --- /dev/null +++ b/parser/testdata/01944_range_max_elements/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001408383, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01944_range_max_elements/metadata.json b/parser/testdata/01944_range_max_elements/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01944_range_max_elements/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01944_range_max_elements/query.sql b/parser/testdata/01944_range_max_elements/query.sql new file mode 100644 index 000000000..d08f41e44 --- /dev/null +++ b/parser/testdata/01944_range_max_elements/query.sql @@ -0,0 +1,7 @@ +SET function_range_max_elements_in_block = 10; +SELECT range(number % 3) FROM numbers(10); +SELECT range(number % 3) FROM numbers(11); +SELECT range(number % 3) FROM numbers(12); -- { serverError ARGUMENT_OUT_OF_BOUND } + +SET function_range_max_elements_in_block = 12; +SELECT range(number % 3) FROM numbers(12); diff --git a/parser/testdata/01946_profile_sleep/ast.json b/parser/testdata/01946_profile_sleep/ast.json new file mode 100644 index 000000000..71ea0f486 --- /dev/null +++ b/parser/testdata/01946_profile_sleep/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00123244, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01946_profile_sleep/metadata.json b/parser/testdata/01946_profile_sleep/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01946_profile_sleep/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01946_profile_sleep/query.sql b/parser/testdata/01946_profile_sleep/query.sql new file mode 100644 index 000000000..939ddb284 --- /dev/null +++ b/parser/testdata/01946_profile_sleep/query.sql @@ -0,0 +1,65 @@ +SET log_queries=1; +SET log_profile_events=true; + +SELECT 'SLEEP #1 TEST', sleep(0.001) FORMAT Null; +SYSTEM FLUSH LOGS query_log; +SELECT 'SLEEP #1 CHECK', ProfileEvents['SleepFunctionCalls'] as calls, ProfileEvents['SleepFunctionMicroseconds'] as microseconds +FROM system.query_log +WHERE query like '%SELECT ''SLEEP #1 TEST''%' + AND type > 1 + AND current_database = currentDatabase() + AND event_date >= yesterday() + FORMAT JSONEachRow; + +SELECT 'SLEEP #2 TEST', sleep(0.001) FROM numbers(2) FORMAT Null; +SYSTEM FLUSH LOGS query_log; +SELECT 'SLEEP #2 CHECK', ProfileEvents['SleepFunctionCalls'] as calls, ProfileEvents['SleepFunctionMicroseconds'] as microseconds +FROM system.query_log +WHERE query like '%SELECT ''SLEEP #2 TEST''%' + AND type > 1 + AND current_database = currentDatabase() + AND event_date >= yesterday() + FORMAT JSONEachRow; + +SELECT 'SLEEP #3 TEST', sleepEachRow(0.001) FORMAT Null; +SYSTEM FLUSH LOGS query_log; +SELECT 'SLEEP #3 CHECK', ProfileEvents['SleepFunctionCalls'] as calls, ProfileEvents['SleepFunctionMicroseconds'] as microseconds +FROM system.query_log +WHERE query like '%SELECT ''SLEEP #3 TEST''%' + AND type > 1 + AND current_database = currentDatabase() + AND event_date >= yesterday() + FORMAT JSONEachRow; + +SELECT 'SLEEP #4 TEST', sleepEachRow(0.001) FROM numbers(2) FORMAT Null; +SYSTEM FLUSH LOGS query_log; +SELECT 'SLEEP #4 CHECK', ProfileEvents['SleepFunctionCalls'] as calls, ProfileEvents['SleepFunctionMicroseconds'] as microseconds +FROM system.query_log +WHERE query like '%SELECT ''SLEEP #4 TEST''%' + AND type > 1 + AND current_database = currentDatabase() + AND event_date >= yesterday() + FORMAT JSONEachRow; + + +CREATE VIEW sleep_view AS SELECT sleepEachRow(0.001) FROM system.numbers; +SYSTEM FLUSH LOGS query_log; +SELECT 'SLEEP #5 CHECK', ProfileEvents['SleepFunctionCalls'] as calls, ProfileEvents['SleepFunctionMicroseconds'] as microseconds +FROM system.query_log +WHERE query like '%CREATE VIEW sleep_view AS%' + AND type > 1 + AND current_database = currentDatabase() + AND event_date >= yesterday() + FORMAT JSONEachRow; + +SELECT 'SLEEP #6 TEST', sleepEachRow(0.001) FROM sleep_view LIMIT 10 FORMAT Null; +SYSTEM FLUSH LOGS query_log; +SELECT 'SLEEP #6 CHECK', ProfileEvents['SleepFunctionCalls'] as calls, ProfileEvents['SleepFunctionMicroseconds'] as microseconds +FROM system.query_log +WHERE query like '%SELECT ''SLEEP #6 TEST''%' + AND type > 1 + AND current_database = currentDatabase() + AND event_date >= yesterday() + FORMAT JSONEachRow; + +DROP TABLE sleep_view; diff --git a/parser/testdata/01947_mv_subquery/ast.json b/parser/testdata/01947_mv_subquery/ast.json new file mode 100644 index 000000000..37090206c --- /dev/null +++ b/parser/testdata/01947_mv_subquery/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001178333, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01947_mv_subquery/metadata.json b/parser/testdata/01947_mv_subquery/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01947_mv_subquery/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01947_mv_subquery/query.sql b/parser/testdata/01947_mv_subquery/query.sql new file mode 100644 index 000000000..bef06d0c2 --- /dev/null +++ b/parser/testdata/01947_mv_subquery/query.sql @@ -0,0 +1,145 @@ +SET log_queries=1; +SET log_profile_events=true; + +CREATE TABLE src Engine=MergeTree ORDER BY id AS SELECT number as id, toInt32(1) as value FROM numbers(1); +CREATE TABLE dst (id UInt64, delta Int64) Engine=MergeTree ORDER BY id; + +-- First we try with default values (https://github.com/ClickHouse/ClickHouse/issues/9587) +SET use_index_for_in_with_subqueries = 1; + +CREATE MATERIALIZED VIEW src2dst_true TO dst AS +SELECT + id, + src.value - deltas_sum as delta +FROM src +LEFT JOIN +( + SELECT id, sum(delta) as deltas_sum FROM dst + WHERE id IN (SELECT id FROM src WHERE not sleepEachRow(0.001)) + GROUP BY id +) _a +USING (id); + +-- Inserting 2 numbers should require 2 calls to sleep +INSERT into src SELECT number + 100 as id, 1 FROM numbers(2); + +-- Describe should not need to call sleep +DESCRIBE ( SELECT '1947 #3 QUERY - TRUE', + id, + src.value - deltas_sum as delta + FROM src + LEFT JOIN + ( + SELECT id, sum(delta) as deltas_sum FROM dst + WHERE id IN (SELECT id FROM src WHERE not sleepEachRow(0.001)) + GROUP BY id + ) _a + USING (id) + ) FORMAT Null; + + +SYSTEM FLUSH LOGS query_log; + +SELECT '1947 #1 CHECK - TRUE' as test, + ProfileEvents['SleepFunctionCalls'] as sleep_calls, + ProfileEvents['SleepFunctionMicroseconds'] as sleep_microseconds +FROM system.query_log +WHERE query like '%CREATE MATERIALIZED VIEW src2dst_true%' + AND type > 1 + AND current_database = currentDatabase() + AND event_date >= yesterday() + FORMAT JSONEachRow; + +SELECT '1947 #2 CHECK - TRUE' as test, + ProfileEvents['SleepFunctionCalls'] as sleep_calls, + ProfileEvents['SleepFunctionMicroseconds'] as sleep_microseconds +FROM system.query_log +WHERE query like '%INSERT into src SELECT number + 100 as id, 1 FROM numbers(2)%' + AND type > 1 + AND current_database = currentDatabase() + AND event_date >= yesterday() + FORMAT JSONEachRow; + +SELECT '1947 #3 CHECK - TRUE' as test, + ProfileEvents['SleepFunctionCalls'] as sleep_calls, + ProfileEvents['SleepFunctionMicroseconds'] as sleep_microseconds +FROM system.query_log +WHERE query like '%DESCRIBE ( SELECT ''1947 #3 QUERY - TRUE'',%' + AND type > 1 + AND current_database = currentDatabase() + AND event_date >= yesterday() + FORMAT JSONEachRow; + +DROP TABLE src2dst_true; + + +-- Retry the same but using use_index_for_in_with_subqueries = 0 + +SET use_index_for_in_with_subqueries = 0; + +CREATE MATERIALIZED VIEW src2dst_false TO dst AS +SELECT + id, + src.value - deltas_sum as delta +FROM src +LEFT JOIN +( + SELECT id, sum(delta) as deltas_sum FROM dst + WHERE id IN (SELECT id FROM src WHERE not sleepEachRow(0.001)) + GROUP BY id +) _a +USING (id); + +-- Inserting 2 numbers should require 2 calls to sleep +INSERT into src SELECT number + 200 as id, 1 FROM numbers(2); + +-- Describe should not need to call sleep +DESCRIBE ( SELECT '1947 #3 QUERY - FALSE', + id, + src.value - deltas_sum as delta + FROM src + LEFT JOIN + ( + SELECT id, sum(delta) as deltas_sum FROM dst + WHERE id IN (SELECT id FROM src WHERE not sleepEachRow(0.001)) + GROUP BY id + ) _a + USING (id) + ) FORMAT Null; + +SYSTEM FLUSH LOGS query_log; + +SELECT '1947 #1 CHECK - FALSE' as test, + ProfileEvents['SleepFunctionCalls'] as sleep_calls, + ProfileEvents['SleepFunctionMicroseconds'] as sleep_microseconds +FROM system.query_log +WHERE query like '%CREATE MATERIALIZED VIEW src2dst_false%' + AND type > 1 + AND current_database = currentDatabase() + AND event_date >= yesterday() + FORMAT JSONEachRow; + +SELECT '1947 #2 CHECK - FALSE' as test, + ProfileEvents['SleepFunctionCalls'] as sleep_calls, + ProfileEvents['SleepFunctionMicroseconds'] as sleep_microseconds +FROM system.query_log +WHERE query like '%INSERT into src SELECT number + 200 as id, 1 FROM numbers(2)%' + AND type > 1 + AND current_database = currentDatabase() + AND event_date >= yesterday() + FORMAT JSONEachRow; + +SELECT '1947 #3 CHECK - FALSE' as test, + ProfileEvents['SleepFunctionCalls'] as sleep_calls, + ProfileEvents['SleepFunctionMicroseconds'] as sleep_microseconds +FROM system.query_log +WHERE query like '%DESCRIBE ( SELECT ''1947 #3 QUERY - FALSE'',%' + AND type > 1 + AND current_database = currentDatabase() + AND event_date >= yesterday() + FORMAT JSONEachRow; + +DROP TABLE src2dst_false; + +DROP TABLE src; +DROP TABLE dst; diff --git a/parser/testdata/01948_dictionary_quoted_database_name/ast.json b/parser/testdata/01948_dictionary_quoted_database_name/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01948_dictionary_quoted_database_name/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01948_dictionary_quoted_database_name/metadata.json b/parser/testdata/01948_dictionary_quoted_database_name/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01948_dictionary_quoted_database_name/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01948_dictionary_quoted_database_name/query.sql b/parser/testdata/01948_dictionary_quoted_database_name/query.sql new file mode 100644 index 000000000..33fd79b14 --- /dev/null +++ b/parser/testdata/01948_dictionary_quoted_database_name/query.sql @@ -0,0 +1,40 @@ +-- Tags: no-parallel + +DROP DATABASE IF EXISTS `01945.db`; +CREATE DATABASE `01945.db`; + +CREATE TABLE `01945.db`.test_dictionary_values +( + id UInt64, + value String +) ENGINE=TinyLog; + +INSERT INTO `01945.db`.test_dictionary_values VALUES (0, 'Value'); + +CREATE DICTIONARY `01945.db`.test_dictionary +( + id UInt64, + value String +) +PRIMARY KEY id +LAYOUT(DIRECT()) +SOURCE(CLICKHOUSE(DB '01945.db' TABLE 'test_dictionary_values')); + +SELECT * FROM `01945.db`.test_dictionary; +DROP DICTIONARY `01945.db`.test_dictionary; + +CREATE DICTIONARY `01945.db`.`test_dictionary.test` +( + id UInt64, + value String +) +PRIMARY KEY id +LAYOUT(DIRECT()) +SOURCE(CLICKHOUSE(DB '01945.db' TABLE 'test_dictionary_values')); + +SELECT * FROM `01945.db`.`test_dictionary.test`; +DROP DICTIONARY `01945.db`.`test_dictionary.test`; + + +DROP TABLE `01945.db`.test_dictionary_values; +DROP DATABASE `01945.db`; diff --git a/parser/testdata/01948_group_bitmap_and_or_xor_fix/ast.json b/parser/testdata/01948_group_bitmap_and_or_xor_fix/ast.json new file mode 100644 index 000000000..d411f7a65 --- /dev/null +++ b/parser/testdata/01948_group_bitmap_and_or_xor_fix/ast.json @@ -0,0 +1,142 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function groupBitmapAnd (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function bitmapBuild (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toInt32 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function groupBitmapOr (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function bitmapBuild (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toInt32 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function groupBitmapXor (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function bitmapBuild (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toInt32 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function cluster (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier test_cluster_two_shards" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 40, + + "statistics": + { + "elapsed": 0.001664598, + "rows_read": 40, + "bytes_read": 1721 + } +} diff --git a/parser/testdata/01948_group_bitmap_and_or_xor_fix/metadata.json b/parser/testdata/01948_group_bitmap_and_or_xor_fix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01948_group_bitmap_and_or_xor_fix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01948_group_bitmap_and_or_xor_fix/query.sql b/parser/testdata/01948_group_bitmap_and_or_xor_fix/query.sql new file mode 100644 index 000000000..7a7c603ff --- /dev/null +++ b/parser/testdata/01948_group_bitmap_and_or_xor_fix/query.sql @@ -0,0 +1 @@ +SELECT groupBitmapAnd(bitmapBuild([toInt32(1)])), groupBitmapOr(bitmapBuild([toInt32(1)])), groupBitmapXor(bitmapBuild([toInt32(1)])) FROM cluster(test_cluster_two_shards, numbers(10)); diff --git a/parser/testdata/01948_heredoc/ast.json b/parser/testdata/01948_heredoc/ast.json new file mode 100644 index 000000000..a573dddd9 --- /dev/null +++ b/parser/testdata/01948_heredoc/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal ''" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001125606, + "rows_read": 5, + "bytes_read": 171 + } +} diff --git a/parser/testdata/01948_heredoc/metadata.json b/parser/testdata/01948_heredoc/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01948_heredoc/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01948_heredoc/query.sql b/parser/testdata/01948_heredoc/query.sql new file mode 100644 index 000000000..9abc0025c --- /dev/null +++ b/parser/testdata/01948_heredoc/query.sql @@ -0,0 +1,18 @@ +SELECT $$$$; +SELECT $$VALUE$$; +SELECT $doc$$doc$; +SELECT $doc$VALUE$doc$; +SELECT $doc$'VALUE'$doc$; +SELECT $doc$$do$ $ doc$ $doc $ $doco$$doc$; +SELECT $doc$$do$ $ doc$ $doc $ $doco$$doc$, $doc$$do$ $ doc$ $doc $ $doco$$doc$; + +SELECT $doc$ТЕСТ$doc$; +SELECT $doc$该类型的引擎$doc$; + +SELECT $$ +value1 +value2 +value3 +$$; + +SELECT $doc$'\xc3\x28'$doc$; diff --git a/parser/testdata/01950_aliases_bad_cast/ast.json b/parser/testdata/01950_aliases_bad_cast/ast.json new file mode 100644 index 000000000..dc1b283f3 --- /dev/null +++ b/parser/testdata/01950_aliases_bad_cast/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal NULL (alias 1)" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001283955, + "rows_read": 15, + "bytes_read": 585 + } +} diff --git a/parser/testdata/01950_aliases_bad_cast/metadata.json b/parser/testdata/01950_aliases_bad_cast/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01950_aliases_bad_cast/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01950_aliases_bad_cast/query.sql b/parser/testdata/01950_aliases_bad_cast/query.sql new file mode 100644 index 000000000..370e83b1e --- /dev/null +++ b/parser/testdata/01950_aliases_bad_cast/query.sql @@ -0,0 +1,2 @@ +SELECT 1, * FROM (SELECT NULL AS `1`); -- { serverError AMBIGUOUS_COLUMN_NAME } +SELECT '7', 'xyz', * FROM (SELECT NULL AS `'xyz'`); -- { serverError AMBIGUOUS_COLUMN_NAME } diff --git a/parser/testdata/01951_distributed_push_down_limit/ast.json b/parser/testdata/01951_distributed_push_down_limit/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01951_distributed_push_down_limit/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01951_distributed_push_down_limit/metadata.json b/parser/testdata/01951_distributed_push_down_limit/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01951_distributed_push_down_limit/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01951_distributed_push_down_limit/query.sql b/parser/testdata/01951_distributed_push_down_limit/query.sql new file mode 100644 index 000000000..aee714a49 --- /dev/null +++ b/parser/testdata/01951_distributed_push_down_limit/query.sql @@ -0,0 +1,7 @@ +-- Tags: distributed + +set prefer_localhost_replica = 1; + +-- { echo } +explain description=0 select * from remote('127.{1,2}', view(select * from numbers(1e6))) order by number limit 10 settings distributed_push_down_limit=0; +explain description=0 select * from remote('127.{1,2}', view(select * from numbers(1e6))) order by number limit 10 settings distributed_push_down_limit=1; diff --git a/parser/testdata/01952_optimize_distributed_group_by_sharding_key/ast.json b/parser/testdata/01952_optimize_distributed_group_by_sharding_key/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01952_optimize_distributed_group_by_sharding_key/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01952_optimize_distributed_group_by_sharding_key/metadata.json b/parser/testdata/01952_optimize_distributed_group_by_sharding_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01952_optimize_distributed_group_by_sharding_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01952_optimize_distributed_group_by_sharding_key/query.sql b/parser/testdata/01952_optimize_distributed_group_by_sharding_key/query.sql new file mode 100644 index 000000000..960fd227a --- /dev/null +++ b/parser/testdata/01952_optimize_distributed_group_by_sharding_key/query.sql @@ -0,0 +1,30 @@ +-- Tags: distributed + +set optimize_skip_unused_shards=1; +set optimize_distributed_group_by_sharding_key=1; +set prefer_localhost_replica=1; + +set enable_analyzer = 0; + +-- { echo } +explain select distinct k1 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)); -- not optimized +explain select distinct k1, k2 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)); -- optimized +explain select distinct on (k1) k2 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)); -- not optimized +explain select distinct on (k1, k2) v from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)); -- optimized + +explain select distinct k1 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)) order by v; -- not optimized +explain select distinct k1, k2 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)) order by v; -- optimized +explain select distinct on (k1) k2 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)) order by v; -- not optimized +explain select distinct on (k1, k2) v from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)) order by v; -- optimized + +set enable_analyzer = 1; + +explain select distinct k1 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)); -- not optimized +explain select distinct k1, k2 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)); -- optimized +explain select distinct on (k1) k2 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)); -- not optimized +explain select distinct on (k1, k2) v from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)); -- optimized + +explain select distinct k1 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)) order by v; -- not optimized +explain select distinct k1, k2 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)) order by v; -- optimized +explain select distinct on (k1) k2 from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)) order by v; -- not optimized +explain select distinct on (k1, k2) v from remote('127.{1,2}', view(select 1 k1, 2 k2, 3 v from numbers(2)), cityHash64(k1, k2)) order by v; -- optimized diff --git a/parser/testdata/01957_heredoc_more/ast.json b/parser/testdata/01957_heredoc_more/ast.json new file mode 100644 index 000000000..41dd17171 --- /dev/null +++ b/parser/testdata/01957_heredoc_more/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function hex (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '�'" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001020703, + "rows_read": 7, + "bytes_read": 254 + } +} diff --git a/parser/testdata/01957_heredoc_more/metadata.json b/parser/testdata/01957_heredoc_more/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01957_heredoc_more/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01957_heredoc_more/query.sql b/parser/testdata/01957_heredoc_more/query.sql new file mode 100644 index 000000000..61681ad39 --- /dev/null +++ b/parser/testdata/01957_heredoc_more/query.sql @@ -0,0 +1 @@ +SELECT hex($$$$); diff --git a/parser/testdata/01958_partial_hour_timezone/ast.json b/parser/testdata/01958_partial_hour_timezone/ast.json new file mode 100644 index 000000000..60baf3185 --- /dev/null +++ b/parser/testdata/01958_partial_hour_timezone/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001295706, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/01958_partial_hour_timezone/metadata.json b/parser/testdata/01958_partial_hour_timezone/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01958_partial_hour_timezone/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01958_partial_hour_timezone/query.sql b/parser/testdata/01958_partial_hour_timezone/query.sql new file mode 100644 index 000000000..3eecaaf97 --- /dev/null +++ b/parser/testdata/01958_partial_hour_timezone/query.sql @@ -0,0 +1,23 @@ +SET output_format_pretty_single_large_number_tip_threshold = 0; + +-- Appeared in https://github.com/ClickHouse/ClickHouse/pull/26978#issuecomment-890889362 +WITH toDateTime('1970-06-17 07:39:21', 'Africa/Monrovia') as t +SELECT toUnixTimestamp(t), + timeZoneOffset(t), + formatDateTime(t, '%F %T', 'Africa/Monrovia'), + toString(t, 'Africa/Monrovia'), + toStartOfMinute(t), + toStartOfFiveMinutes(t), + toStartOfFifteenMinutes(t), + toStartOfTenMinutes(t), + toStartOfHour(t), + toStartOfDay(t), + toStartOfWeek(t), + toStartOfInterval(t, INTERVAL 1 second), + toStartOfInterval(t, INTERVAL 1 minute), + toStartOfInterval(t, INTERVAL 2 minute), + toStartOfInterval(t, INTERVAL 5 minute), + toStartOfInterval(t, INTERVAL 60 minute), + addMinutes(t, 1), + addMinutes(t, 60) +FORMAT Vertical; diff --git a/parser/testdata/01960_lambda_precedence/ast.json b/parser/testdata/01960_lambda_precedence/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01960_lambda_precedence/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01960_lambda_precedence/metadata.json b/parser/testdata/01960_lambda_precedence/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01960_lambda_precedence/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01960_lambda_precedence/query.sql b/parser/testdata/01960_lambda_precedence/query.sql new file mode 100644 index 000000000..a3ff1424c --- /dev/null +++ b/parser/testdata/01960_lambda_precedence/query.sql @@ -0,0 +1,26 @@ +SELECT + 1000 AS a, + arrayMap(a -> (a + 1), [1, 2, 3]), + a + 10 as c; + + +-- https://github.com/ClickHouse/ClickHouse/issues/5046 +SELECT sum(c1) AS v +FROM + ( + SELECT + 1 AS c1, + ['v'] AS c2 + ) +WHERE arrayExists(v -> (v = 'v'), c2); + + +SELECT sum(c1) AS v +FROM + ( + SELECT + 1 AS c1, + ['v'] AS c2, + ['d'] AS d + ) +WHERE arrayExists(i -> (d = ['d']), c2); diff --git a/parser/testdata/01961_roaring_memory_tracking/ast.json b/parser/testdata/01961_roaring_memory_tracking/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01961_roaring_memory_tracking/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01961_roaring_memory_tracking/metadata.json b/parser/testdata/01961_roaring_memory_tracking/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01961_roaring_memory_tracking/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01961_roaring_memory_tracking/query.sql b/parser/testdata/01961_roaring_memory_tracking/query.sql new file mode 100644 index 000000000..d32901898 --- /dev/null +++ b/parser/testdata/01961_roaring_memory_tracking/query.sql @@ -0,0 +1,8 @@ +-- Tags: no-replicated-database, no-asan, no-tsan, no-msan, no-ubsan, no-coverage +-- Sanitizers have their own mechanism of tracking the allocation/deallocations and it doesn't work with our MemoryTracker. + +SET max_bytes_before_external_group_by = 0; +SET max_bytes_ratio_before_external_group_by = 0; + +SET max_memory_usage = '100M', max_rows_to_read = '1G'; +SELECT cityHash64(rand() % 1000) as n, groupBitmapState(number) FROM numbers_mt(200000000) GROUP BY n FORMAT Null; -- { serverError MEMORY_LIMIT_EXCEEDED } diff --git a/parser/testdata/01999_grant_with_replace/ast.json b/parser/testdata/01999_grant_with_replace/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/01999_grant_with_replace/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/01999_grant_with_replace/metadata.json b/parser/testdata/01999_grant_with_replace/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/01999_grant_with_replace/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/01999_grant_with_replace/query.sql b/parser/testdata/01999_grant_with_replace/query.sql new file mode 100644 index 000000000..0f11bc542 --- /dev/null +++ b/parser/testdata/01999_grant_with_replace/query.sql @@ -0,0 +1,77 @@ +-- Tags: no-parallel + +DROP USER IF EXISTS test_user_01999; + +CREATE USER test_user_01999; +SHOW CREATE USER test_user_01999; + +SELECT 'A'; +SHOW GRANTS FOR test_user_01999; + +GRANT SELECT ON db1.* TO test_user_01999; +GRANT SHOW ON db2.tb2 TO test_user_01999; + +SELECT 'B'; +SHOW GRANTS FOR test_user_01999; + +GRANT SELECT(col1) ON db3.table TO test_user_01999 WITH REPLACE OPTION; + +SELECT 'C'; +SHOW GRANTS FOR test_user_01999; + +GRANT SELECT(col3) ON db3.table3, SELECT(col1, col2) ON db4.table4 TO test_user_01999 WITH REPLACE OPTION; + +SELECT 'D'; +SHOW GRANTS FOR test_user_01999; + +GRANT SELECT(cola) ON db5.table, INSERT(colb) ON db6.tb61, SHOW ON db7.* TO test_user_01999 WITH REPLACE OPTION; + +SELECT 'E'; +SHOW GRANTS FOR test_user_01999; + +SELECT 'F'; +GRANT SELECT ON all.* TO test_user_01999 WITH REPLACE OPTION; +SHOW GRANTS FOR test_user_01999; + +SELECT 'G'; +GRANT USAGE ON *.* TO test_user_01999 WITH REPLACE OPTION; +SHOW GRANTS FOR test_user_01999; + +SELECT 'H'; +DROP ROLE IF EXISTS test_role_01999; +CREATE role test_role_01999; +GRANT test_role_01999 to test_user_01999; +GRANT SELECT ON db1.tb1 TO test_user_01999; +SHOW GRANTS FOR test_user_01999; + +SELECT 'I'; +GRANT NONE ON *.* TO test_user_01999 WITH REPLACE OPTION; +SHOW GRANTS FOR test_user_01999; + +SELECT 'J'; +GRANT SHOW ON db8.* TO test_user_01999; +SHOW GRANTS FOR test_user_01999; + +SELECT 'K'; +GRANT NONE TO test_user_01999 WITH REPLACE OPTION; +SHOW GRANTS FOR test_user_01999; + +SELECT 'L'; +GRANT NONE ON *.*, SELECT on db9.tb3 TO test_user_01999 WITH REPLACE OPTION; +SHOW GRANTS FOR test_user_01999; + +SELECT 'M'; +GRANT test_role_01999 to test_user_01999; +SHOW GRANTS FOR test_user_01999; + +SELECT 'N'; +DROP ROLE IF EXISTS test_role_01999_1; +CREATE role test_role_01999_1; +GRANT NONE, test_role_01999_1 TO test_user_01999 WITH REPLACE OPTION; +SHOW GRANTS FOR test_user_01999; + +DROP USER IF EXISTS test_user_01999; +DROP ROLE IF EXISTS test_role_01999; +DROP ROLE IF EXISTS test_role_01999_1; + +SELECT 'O'; diff --git a/parser/testdata/02000_default_from_default_empty_column/ast.json b/parser/testdata/02000_default_from_default_empty_column/ast.json new file mode 100644 index 000000000..7abe6f5a6 --- /dev/null +++ b/parser/testdata/02000_default_from_default_empty_column/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001254962, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/02000_default_from_default_empty_column/metadata.json b/parser/testdata/02000_default_from_default_empty_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02000_default_from_default_empty_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02000_default_from_default_empty_column/query.sql b/parser/testdata/02000_default_from_default_empty_column/query.sql new file mode 100644 index 000000000..5ca642628 --- /dev/null +++ b/parser/testdata/02000_default_from_default_empty_column/query.sql @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS test; + +CREATE TABLE test (col Int8) ENGINE=MergeTree ORDER BY tuple() +SETTINGS vertical_merge_algorithm_min_rows_to_activate=1, + vertical_merge_algorithm_min_columns_to_activate=1, + min_bytes_for_wide_part = 0; + + +INSERT INTO test VALUES (1); +ALTER TABLE test ADD COLUMN s1 String; +ALTER TABLE test ADD COLUMN s2 String DEFAULT s1; + +OPTIMIZE TABLE test FINAL; + +SELECT * FROM test; + +DROP TABLE IF EXISTS test; diff --git a/parser/testdata/02000_join_on_const/ast.json b/parser/testdata/02000_join_on_const/ast.json new file mode 100644 index 000000000..fa61dac48 --- /dev/null +++ b/parser/testdata/02000_join_on_const/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001251651, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/02000_join_on_const/metadata.json b/parser/testdata/02000_join_on_const/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02000_join_on_const/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02000_join_on_const/query.sql b/parser/testdata/02000_join_on_const/query.sql new file mode 100644 index 000000000..c6f489e25 --- /dev/null +++ b/parser/testdata/02000_join_on_const/query.sql @@ -0,0 +1,150 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE TABLE t1 (id Int) ENGINE = MergeTree ORDER BY tuple(); +CREATE TABLE t2 (id Int) ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO t1 VALUES (1), (2); +INSERT INTO t2 VALUES (2), (3); + +SELECT 70 = 10 * sum(t1.id) + sum(t2.id) AND count() == 4 FROM t1 JOIN t2 ON 1 = 1; +SELECT 70 = 10 * sum(t1.id) + sum(t2.id) AND count() == 4 FROM t1 JOIN t2 ON 1; +SELECT 70 = 10 * sum(t1.id) + sum(t2.id) AND count() == 4 FROM t1 JOIN t2 ON 2 = 2 AND 3 = 3; +SELECT 70 = 10 * sum(t1.id) + sum(t2.id) AND count() == 4 FROM t1 JOIN t2 ON toNullable(1); +SELECT 70 = 10 * sum(t1.id) + sum(t2.id) AND count() == 4 FROM t1 JOIN t2 ON toLowCardinality(1); +SELECT 70 = 10 * sum(t1.id) + sum(t2.id) AND count() == 4 FROM t1 JOIN t2 ON toLowCardinality(toNullable(1)); +SELECT 70 = 10 * sum(t1.id) + sum(t2.id) AND count() == 4 FROM t1 JOIN t2 ON toNullable(toLowCardinality(1)); + +SELECT * FROM t1 JOIN t2 ON toUInt16(1); -- { serverError INVALID_JOIN_ON_EXPRESSION } +SELECT * FROM t1 JOIN t2 ON toInt8(1); -- { serverError INVALID_JOIN_ON_EXPRESSION } +SELECT * FROM t1 JOIN t2 ON 256; -- { serverError INVALID_JOIN_ON_EXPRESSION } +SELECT * FROM t1 JOIN t2 ON -1; -- { serverError INVALID_JOIN_ON_EXPRESSION } +SELECT * FROM t1 JOIN t2 ON toString(1); -- { serverError INVALID_JOIN_ON_EXPRESSION } + +SELECT '- ON NULL -'; + +SELECT '- inner -'; +SELECT * FROM t1 JOIN t2 ON NULL; +SELECT * FROM t1 JOIN t2 ON 0; +SELECT * FROM t1 JOIN t2 ON 1 = 2; +SELECT '- left -'; +SELECT * FROM t1 LEFT JOIN t2 ON NULL ORDER BY t1.id, t2.id; +SELECT '- right -'; +SELECT * FROM t1 RIGHT JOIN t2 ON NULL ORDER BY t1.id, t2.id; +SELECT '- full -'; +SELECT * FROM t1 FULL JOIN t2 ON NULL ORDER BY t1.id, t2.id; + +SELECT '- inner -'; +SELECT * FROM t1 JOIN t2 ON NULL ORDER BY t1.id NULLS FIRST, t2.id SETTINGS join_use_nulls = 1; +SELECT '- left -'; +SELECT * FROM t1 LEFT JOIN t2 ON NULL ORDER BY t1.id NULLS FIRST, t2.id SETTINGS join_use_nulls = 1; +SELECT '- right -'; +SELECT * FROM t1 RIGHT JOIN t2 ON NULL ORDER BY t1.id NULLS FIRST, t2.id SETTINGS join_use_nulls = 1; +SELECT '- full -'; +SELECT * FROM t1 FULL JOIN t2 ON NULL ORDER BY t1.id NULLS FIRST, t2.id SETTINGS join_use_nulls = 1; + +-- in this cases in old analyzer we have AMBIGUOUS_COLUMN_NAME instead of INVALID_JOIN_ON_EXPRESSION +-- because there's some function in ON expression is not constant itself (result is constant) +SELECT * FROM t1 JOIN t2 ON 1 = 1 SETTINGS join_algorithm = 'full_sorting_merge'; -- { serverError AMBIGUOUS_COLUMN_NAME,NOT_IMPLEMENTED } +SELECT * FROM t1 JOIN t2 ON 1 = 1 SETTINGS join_algorithm = 'partial_merge'; -- { serverError AMBIGUOUS_COLUMN_NAME,NOT_IMPLEMENTED } +SELECT * FROM t1 JOIN t2 ON 1 = 1 SETTINGS join_algorithm = 'auto'; -- { serverError AMBIGUOUS_COLUMN_NAME,NOT_IMPLEMENTED } + +SELECT * FROM t1 JOIN t2 ON NULL SETTINGS join_algorithm = 'full_sorting_merge'; -- { serverError INVALID_JOIN_ON_EXPRESSION,NOT_IMPLEMENTED } +SELECT * FROM t1 JOIN t2 ON NULL SETTINGS join_algorithm = 'partial_merge'; -- { serverError INVALID_JOIN_ON_EXPRESSION,NOT_IMPLEMENTED } +SELECT * FROM t1 LEFT JOIN t2 ON NULL SETTINGS join_algorithm = 'partial_merge'; -- { serverError INVALID_JOIN_ON_EXPRESSION,NOT_IMPLEMENTED } +SELECT * FROM t1 RIGHT JOIN t2 ON NULL SETTINGS join_algorithm = 'auto'; -- { serverError INVALID_JOIN_ON_EXPRESSION,NOT_IMPLEMENTED } +SELECT * FROM t1 FULL JOIN t2 ON NULL SETTINGS join_algorithm = 'partial_merge'; -- { serverError INVALID_JOIN_ON_EXPRESSION,NOT_IMPLEMENTED } + +SET query_plan_use_new_logical_join_step = 1; + +-- mixing of constant and non-constant expressions in ON is not allowed +SELECT * FROM t1 JOIN t2 ON t1.id = t2.id AND 1 == 1 SETTINGS enable_analyzer = 0; -- { serverError AMBIGUOUS_COLUMN_NAME } +SELECT * FROM t1 JOIN t2 ON t1.id = t2.id AND 1 == 1 SETTINGS enable_analyzer = 1; +SELECT * FROM t1 JOIN t2 ON t1.id = t2.id AND 1 == 2 SETTINGS enable_analyzer = 0; -- { serverError AMBIGUOUS_COLUMN_NAME } +SELECT * FROM t1 JOIN t2 ON t1.id = t2.id AND 1 == 2 SETTINGS enable_analyzer = 1; + +SELECT * FROM t1 JOIN t2 ON t1.id = t2.id AND 1 != 1 SETTINGS enable_analyzer = 0; -- { serverError INVALID_JOIN_ON_EXPRESSION } +SELECT * FROM t1 JOIN t2 ON t1.id = t2.id AND 1 != 1 SETTINGS enable_analyzer = 1; +SELECT * FROM t1 JOIN t2 ON t1.id = t2.id AND 'aaa'; -- { serverError INVALID_JOIN_ON_EXPRESSION,ILLEGAL_TYPE_OF_ARGUMENT } +SELECT * FROM t1 JOIN t2 ON 'aaa'; -- { serverError INVALID_JOIN_ON_EXPRESSION } + +SELECT * FROM t1 JOIN t2 ON t1.id = t2.id AND 0 SETTINGS enable_analyzer = 0; -- { serverError INVALID_JOIN_ON_EXPRESSION } +SELECT * FROM t1 JOIN t2 ON t1.id = t2.id AND 0 SETTINGS enable_analyzer = 1; +SELECT * FROM t1 JOIN t2 ON t1.id = t2.id AND 1 SETTINGS enable_analyzer = 0; -- { serverError INVALID_JOIN_ON_EXPRESSION } +SELECT * FROM t1 JOIN t2 ON t1.id = t2.id AND 1 SETTINGS enable_analyzer = 1; + +-- { echoOn } +SELECT * FROM t1 LEFT JOIN t2 ON t1.id = t2.id AND 1 = 1 ORDER BY 1 SETTINGS enable_analyzer = 1; +SELECT * FROM t1 RIGHT JOIN t2 ON t1.id = t2.id AND 1 = 1 ORDER BY 1 SETTINGS enable_analyzer = 1; +SELECT * FROM t1 FULL JOIN t2 ON t1.id = t2.id AND 1 = 1 ORDER BY 2, 1 SETTINGS enable_analyzer = 1; + +SELECT * FROM t1 LEFT JOIN t2 ON t1.id = t2.id AND 1 = 2 ORDER BY 1 SETTINGS enable_analyzer = 1; +SELECT * FROM t1 RIGHT JOIN t2 ON t1.id = t2.id AND 1 = 2 ORDER BY 2 SETTINGS enable_analyzer = 1; +SELECT * FROM t1 FULL JOIN t2 ON t1.id = t2.id AND 1 = 2 ORDER BY 2, 1 SETTINGS enable_analyzer = 1; + +SELECT * FROM (SELECT 1 as a) as t1 INNER JOIN ( SELECT ('b', 256) as b ) AS t2 ON NULL; +SELECT * FROM (SELECT 1 as a) as t1 LEFT JOIN ( SELECT ('b', 256) as b ) AS t2 ON NULL; +SELECT * FROM (SELECT 1 as a) as t1 RIGHT JOIN ( SELECT ('b', 256) as b ) AS t2 ON NULL; +SELECT * FROM (SELECT 1 as a) as t1 FULL JOIN ( SELECT ('b', 256) as b ) AS t2 ON NULL ORDER BY 2; +SELECT * FROM (SELECT 1 as a) as t1 SEMI JOIN ( SELECT ('b', 256) as b ) AS t2 ON NULL; +SELECT * FROM (SELECT 1 as a) as t1 ANTI JOIN ( SELECT ('b', 256) as b ) AS t2 ON NULL ORDER BY 2; + +-- { echoOff } + +SELECT a + 1 +FROM (SELECT 1 as x) as t1 +LEFT JOIN ( SELECT 1 AS a ) AS t2 +ON TRUE +SETTINGS enable_analyzer=1, join_use_nulls=1; + +SELECT a + 1, x + 1, toTypeName(a), toTypeName(x) +FROM (SELECT 1 as x) as t1 +LEFT JOIN ( SELECT sum(number) as a from numbers(3) GROUP BY NULL) AS t2 +ON TRUE +SETTINGS enable_analyzer=1, join_use_nulls=1; + +SELECT a + 1, x + 1, toTypeName(a), toTypeName(x) +FROM (SELECT 1 as x) as t1 +RIGHT JOIN ( SELECT sum(number) as a from numbers(3) GROUP BY NULL) AS t2 +ON TRUE +SETTINGS enable_analyzer=1, join_use_nulls=1; + +SELECT a + 1, x + 1, toTypeName(a), toTypeName(x) +FROM (SELECT 1 as x) as t1 +FULL JOIN ( SELECT sum(number) as a from numbers(3) GROUP BY NULL) AS t2 +ON TRUE +SETTINGS enable_analyzer=1, join_use_nulls=1; + +-- Join on constant with empty table fixed only with query_plan_use_new_logical_join_step +SET query_plan_use_new_logical_join_step = 1; +-- query_plan_use_new_logical_join_step disabled for parallel replicas +SET enable_parallel_replicas = 0; +SET join_use_nulls = 1; +SET enable_analyzer = 1; + +CREATE TABLE empty_table (id Int) ENGINE = Memory; + +SELECT * FROM t1 LEFT JOIN empty_table ON 1 = 1 ORDER BY ALL; +SELECT * FROM t1 FULL JOIN empty_table ON 1 = 1 ORDER BY ALL; +SELECT * FROM t1 LEFT JOIN empty_table ON 1 = 2 ORDER BY ALL; +SELECT * FROM t1 FULL JOIN empty_table ON 1 = 2 ORDER BY ALL; +SELECT * FROM empty_table RIGHT JOIN t1 ON 1 = 1 ORDER BY ALL; +SELECT * FROM empty_table FULL JOIN t1 ON 1 = 1 ORDER BY ALL; +SELECT * FROM empty_table RIGHT JOIN t1 ON 1 = 2 ORDER BY ALL; +SELECT * FROM empty_table FULL JOIN t1 ON 1 = 2 ORDER BY ALL; + +SELECT '- empty -'; +SELECT * FROM t1 JOIN empty_table ON 1 = 1; +SELECT * FROM t1 RIGHT JOIN empty_table ON 1 = 1; +SELECT * FROM t1 JOIN empty_table ON 1 = 2; +SELECT * FROM t1 RIGHT JOIN empty_table ON 1 = 2; +SELECT * FROM empty_table JOIN t1 ON 1 = 1; +SELECT * FROM empty_table LEFT JOIN t1 ON 1 = 1; +SELECT * FROM empty_table JOIN t1 ON 1 = 2; +SELECT * FROM empty_table LEFT JOIN t1 ON 1 = 2; +SELECT '- empty -'; + + +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS empty_table; diff --git a/parser/testdata/02000_map_full_text_bloom_filter_index/ast.json b/parser/testdata/02000_map_full_text_bloom_filter_index/ast.json new file mode 100644 index 000000000..c7146bd05 --- /dev/null +++ b/parser/testdata/02000_map_full_text_bloom_filter_index/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery bf_tokenbf_map_keys_test (children 1)" + }, + { + "explain": " Identifier bf_tokenbf_map_keys_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001162124, + "rows_read": 2, + "bytes_read": 100 + } +} diff --git a/parser/testdata/02000_map_full_text_bloom_filter_index/metadata.json b/parser/testdata/02000_map_full_text_bloom_filter_index/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02000_map_full_text_bloom_filter_index/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02000_map_full_text_bloom_filter_index/query.sql b/parser/testdata/02000_map_full_text_bloom_filter_index/query.sql new file mode 100644 index 000000000..5d61fad58 --- /dev/null +++ b/parser/testdata/02000_map_full_text_bloom_filter_index/query.sql @@ -0,0 +1,191 @@ +DROP TABLE IF EXISTS bf_tokenbf_map_keys_test; +DROP TABLE IF EXISTS bf_ngrambf_map_keys_test; + +CREATE TABLE bf_tokenbf_map_keys_test +( + row_id UInt32, + map Map(String, String), + map_fixed Map(FixedString(2), String), + INDEX map_keys_tokenbf mapKeys(map) TYPE tokenbf_v1(256,2,0) GRANULARITY 1, + INDEX map_fixed_keys_tokenbf mapKeys(map_fixed) TYPE ngrambf_v1(4,256,2,0) GRANULARITY 1 +) Engine=MergeTree() ORDER BY row_id SETTINGS index_granularity = 1; + +INSERT INTO bf_tokenbf_map_keys_test VALUES (0, {'K0':'V0'}, {'K0':'V0'}), (1, {'K1':'V1'}, {'K1':'V1'}); + +SELECT 'Map full text bloom filter tokenbf mapKeys'; + +SELECT 'Equals with existing key'; +SELECT * FROM bf_tokenbf_map_keys_test WHERE map['K0'] = 'V0' SETTINGS force_data_skipping_indices='map_keys_tokenbf'; +SELECT 'Equals with non existing key'; +SELECT * FROM bf_tokenbf_map_keys_test WHERE map['K2'] = 'V2' SETTINGS force_data_skipping_indices='map_keys_tokenbf'; +SELECT 'Equals with non existing key and default value'; +SELECT * FROM bf_tokenbf_map_keys_test WHERE map['K3'] = ''; +SELECT 'Not equals with existing key'; +SELECT * FROM bf_tokenbf_map_keys_test WHERE map['K0'] != 'V0' SETTINGS force_data_skipping_indices='map_keys_tokenbf'; +SELECT 'Not equals with non existing key'; +SELECT * FROM bf_tokenbf_map_keys_test WHERE map['K2'] != 'V2' SETTINGS force_data_skipping_indices='map_keys_tokenbf'; +SELECT 'Not equals with non existing key and default value'; +SELECT * FROM bf_tokenbf_map_keys_test WHERE map['K3'] != ''; + +SELECT 'Map fixed full text bloom filter tokenbf mapKeys'; + +SELECT 'Equals with existing key'; +SELECT * FROM bf_tokenbf_map_keys_test WHERE map_fixed['K0'] = 'V0' SETTINGS force_data_skipping_indices='map_fixed_keys_tokenbf'; +SELECT 'Equals with non existing key'; +SELECT * FROM bf_tokenbf_map_keys_test WHERE map_fixed['K2'] = 'V2' SETTINGS force_data_skipping_indices='map_fixed_keys_tokenbf'; +SELECT 'Equals with non existing key and default value'; +SELECT * FROM bf_tokenbf_map_keys_test WHERE map_fixed['K3'] = ''; +SELECT 'Not equals with existing key'; +SELECT * FROM bf_tokenbf_map_keys_test WHERE map_fixed['K0'] != 'V0' SETTINGS force_data_skipping_indices='map_fixed_keys_tokenbf'; +SELECT 'Not equals with non existing key'; +SELECT * FROM bf_tokenbf_map_keys_test WHERE map_fixed['K2'] != 'V2' SETTINGS force_data_skipping_indices='map_fixed_keys_tokenbf'; +SELECT 'Not equals with non existing key and default value'; +SELECT * FROM bf_tokenbf_map_keys_test WHERE map_fixed['K3'] != ''; + +DROP TABLE bf_tokenbf_map_keys_test; + +CREATE TABLE bf_tokenbf_map_values_test +( + row_id UInt32, + map Map(String, String), + map_fixed Map(FixedString(2), String), + INDEX map_values_tokenbf mapValues(map) TYPE tokenbf_v1(256,2,0) GRANULARITY 1, + INDEX map_fixed_values_tokenbf mapValues(map_fixed) TYPE ngrambf_v1(4,256,2,0) GRANULARITY 1 +) Engine=MergeTree() ORDER BY row_id SETTINGS index_granularity = 1; + +INSERT INTO bf_tokenbf_map_values_test VALUES (0, {'K0':'V0'}, {'K0':'V0'}), (1, {'K1':'V1'}, {'K1':'V1'}); + +SELECT 'Map full text bloom filter tokenbf mapValues'; + +SELECT 'Equals with existing key'; +SELECT * FROM bf_tokenbf_map_values_test WHERE map['K0'] = 'V0' SETTINGS force_data_skipping_indices='map_values_tokenbf'; +SELECT 'Equals with non existing key'; +SELECT * FROM bf_tokenbf_map_values_test WHERE map['K2'] = 'V2' SETTINGS force_data_skipping_indices='map_values_tokenbf'; +SELECT 'Equals with non existing key and default value'; +SELECT * FROM bf_tokenbf_map_values_test WHERE map['K3'] = ''; +SELECT 'Not equals with existing key'; +SELECT * FROM bf_tokenbf_map_values_test WHERE map['K0'] != 'V0' SETTINGS force_data_skipping_indices='map_values_tokenbf'; +SELECT 'Not equals with non existing key'; +SELECT * FROM bf_tokenbf_map_values_test WHERE map['K2'] != 'V2' SETTINGS force_data_skipping_indices='map_values_tokenbf'; +SELECT 'Not equals with non existing key and default value'; +SELECT * FROM bf_tokenbf_map_values_test WHERE map['K3'] != ''; +SELECT 'Equals with existing value'; +SELECT * FROM bf_tokenbf_map_values_test WHERE mapContainsValueLike(map, 'V0') SETTINGS force_data_skipping_indices='map_values_tokenbf'; +SELECT 'Equals with non existing value'; +SELECT * FROM bf_tokenbf_map_values_test WHERE mapContainsValueLike(map, 'V2') SETTINGS force_data_skipping_indices='map_values_tokenbf'; +SELECT 'Not equals with existing value'; +SELECT * FROM bf_tokenbf_map_values_test WHERE NOT mapContainsValueLike(map, 'V0') SETTINGS force_data_skipping_indices='map_values_tokenbf'; +SELECT 'Not equals with non existing value'; +SELECT * FROM bf_tokenbf_map_values_test WHERE NOT mapContainsValueLike(map, 'V2') SETTINGS force_data_skipping_indices='map_values_tokenbf'; + +SELECT 'Map fixed full text bloom filter tokenbf mapKeys'; + +SELECT 'Equals with existing key'; +SELECT * FROM bf_tokenbf_map_values_test WHERE map_fixed['K0'] = 'V0' SETTINGS force_data_skipping_indices='map_fixed_values_tokenbf'; +SELECT 'Equals with non existing key'; +SELECT * FROM bf_tokenbf_map_values_test WHERE map_fixed['K2'] = 'V2' SETTINGS force_data_skipping_indices='map_fixed_values_tokenbf'; +SELECT 'Equals with non existing key and default value'; +SELECT * FROM bf_tokenbf_map_values_test WHERE map_fixed['K3'] = ''; +SELECT 'Not equals with existing key'; +SELECT * FROM bf_tokenbf_map_values_test WHERE map_fixed['K0'] != 'V0' SETTINGS force_data_skipping_indices='map_fixed_values_tokenbf'; +SELECT 'Not equals with non existing key'; +SELECT * FROM bf_tokenbf_map_values_test WHERE map_fixed['K2'] != 'V2' SETTINGS force_data_skipping_indices='map_fixed_values_tokenbf'; +SELECT 'Not equals with non existing key and default value'; +SELECT * FROM bf_tokenbf_map_values_test WHERE map_fixed['K3'] != ''; + +SELECT 'Equals with existing value'; +SELECT * FROM bf_tokenbf_map_values_test WHERE mapContainsValueLike(map_fixed, 'V0%') SETTINGS force_data_skipping_indices='map_fixed_values_tokenbf'; +SELECT 'Equals with non existing value'; +SELECT * FROM bf_tokenbf_map_values_test WHERE mapContainsValueLike(map_fixed, 'V2%') SETTINGS force_data_skipping_indices='map_fixed_values_tokenbf'; +SELECT 'Not equals with existing value'; +SELECT * FROM bf_tokenbf_map_values_test WHERE NOT mapContainsValueLike(map_fixed, 'V0%') SETTINGS force_data_skipping_indices='map_fixed_values_tokenbf'; +SELECT 'Not equals with non existing value'; +SELECT * FROM bf_tokenbf_map_values_test WHERE NOT mapContainsValueLike(map_fixed, 'V2%') SETTINGS force_data_skipping_indices='map_fixed_values_tokenbf'; + +DROP TABLE bf_tokenbf_map_values_test; + +CREATE TABLE bf_ngrambf_map_keys_test +( + row_id UInt32, + map Map(String, String), + map_fixed Map(FixedString(2), String), + INDEX map_keys_ngrambf mapKeys(map) TYPE ngrambf_v1(4,256,2,0) GRANULARITY 1, + INDEX map_fixed_keys_ngrambf mapKeys(map_fixed) TYPE ngrambf_v1(4,256,2,0) GRANULARITY 1 +) Engine=MergeTree() ORDER BY row_id SETTINGS index_granularity = 1; + +INSERT INTO bf_ngrambf_map_keys_test VALUES (0, {'K0':'V0'}, {'K0':'V0'}), (1, {'K1':'V1'}, {'K1':'V1'}); + +SELECT 'Map full text bloom filter ngrambf mapKeys'; + +SELECT 'Equals with existing key'; +SELECT * FROM bf_ngrambf_map_keys_test WHERE map['K0'] = 'V0' SETTINGS force_data_skipping_indices='map_keys_ngrambf'; +SELECT 'Equals with non existing key'; +SELECT * FROM bf_ngrambf_map_keys_test WHERE map['K2'] = 'V2' SETTINGS force_data_skipping_indices='map_keys_ngrambf'; +SELECT 'Equals with non existing key and default value'; +SELECT * FROM bf_ngrambf_map_keys_test WHERE map['K3'] = ''; +SELECT 'Not equals with existing key'; +SELECT * FROM bf_ngrambf_map_keys_test WHERE map['K0'] != 'V0' SETTINGS force_data_skipping_indices='map_keys_ngrambf'; +SELECT 'Not equals with non existing key'; +SELECT * FROM bf_ngrambf_map_keys_test WHERE map['K2'] != 'V2' SETTINGS force_data_skipping_indices='map_keys_ngrambf'; +SELECT 'Not equals with non existing key and default value'; +SELECT * FROM bf_ngrambf_map_keys_test WHERE map['K3'] != ''; + +SELECT 'Map fixed full text bloom filter ngrambf mapKeys'; + +SELECT 'Equals with existing key'; +SELECT * FROM bf_ngrambf_map_keys_test WHERE map_fixed['K0'] = 'V0' SETTINGS force_data_skipping_indices='map_fixed_keys_ngrambf'; +SELECT 'Equals with non existing key'; +SELECT * FROM bf_ngrambf_map_keys_test WHERE map_fixed['K2'] = 'V2' SETTINGS force_data_skipping_indices='map_fixed_keys_ngrambf'; +SELECT 'Equals with non existing key and default value'; +SELECT * FROM bf_ngrambf_map_keys_test WHERE map_fixed['K3'] = ''; +SELECT 'Not equals with existing key'; +SELECT * FROM bf_ngrambf_map_keys_test WHERE map_fixed['K0'] != 'V0' SETTINGS force_data_skipping_indices='map_fixed_keys_ngrambf'; +SELECT 'Not equals with non existing key'; +SELECT * FROM bf_ngrambf_map_keys_test WHERE map_fixed['K2'] != 'V2' SETTINGS force_data_skipping_indices='map_fixed_keys_ngrambf'; +SELECT 'Not equals with non existing key and default value'; +SELECT * FROM bf_ngrambf_map_keys_test WHERE map_fixed['K3'] != ''; + +DROP TABLE bf_ngrambf_map_keys_test; + +CREATE TABLE bf_ngrambf_map_values_test +( + row_id UInt32, + map Map(String, String), + map_fixed Map(FixedString(2), String), + INDEX map_values_ngrambf mapKeys(map) TYPE ngrambf_v1(4,256,2,0) GRANULARITY 1, + INDEX map_fixed_values_ngrambf mapKeys(map_fixed) TYPE ngrambf_v1(4,256,2,0) GRANULARITY 1 +) Engine=MergeTree() ORDER BY row_id SETTINGS index_granularity = 1; + +INSERT INTO bf_ngrambf_map_values_test VALUES (0, {'K0':'V0'}, {'K0':'V0'}), (1, {'K1':'V1'}, {'K1':'V1'}); + +SELECT 'Map full text bloom filter ngrambf mapValues'; + +SELECT 'Equals with existing key'; +SELECT * FROM bf_ngrambf_map_values_test WHERE map['K0'] = 'V0' SETTINGS force_data_skipping_indices='map_values_ngrambf'; +SELECT 'Equals with non existing key'; +SELECT * FROM bf_ngrambf_map_values_test WHERE map['K2'] = 'V2' SETTINGS force_data_skipping_indices='map_values_ngrambf'; +SELECT 'Equals with non existing key and default value'; +SELECT * FROM bf_ngrambf_map_values_test WHERE map['K3'] = ''; +SELECT 'Not equals with existing key'; +SELECT * FROM bf_ngrambf_map_values_test WHERE map['K0'] != 'V0' SETTINGS force_data_skipping_indices='map_values_ngrambf'; +SELECT 'Not equals with non existing key'; +SELECT * FROM bf_ngrambf_map_values_test WHERE map['K2'] != 'V2' SETTINGS force_data_skipping_indices='map_values_ngrambf'; +SELECT 'Not equals with non existing key and default value'; +SELECT * FROM bf_ngrambf_map_values_test WHERE map['K3'] != ''; + +SELECT 'Map fixed full text bloom filter ngrambf mapKeys'; + +SELECT 'Equals with existing key'; +SELECT * FROM bf_ngrambf_map_values_test WHERE map_fixed['K0'] = 'V0' SETTINGS force_data_skipping_indices='map_fixed_values_ngrambf'; +SELECT 'Equals with non existing key'; +SELECT * FROM bf_ngrambf_map_values_test WHERE map_fixed['K2'] = 'V2' SETTINGS force_data_skipping_indices='map_fixed_values_ngrambf'; +SELECT 'Equals with non existing key and default value'; +SELECT * FROM bf_ngrambf_map_values_test WHERE map_fixed['K3'] = ''; +SELECT 'Not equals with existing key'; +SELECT * FROM bf_ngrambf_map_values_test WHERE map_fixed['K0'] != 'V0' SETTINGS force_data_skipping_indices='map_fixed_values_ngrambf'; +SELECT 'Not equals with non existing key'; +SELECT * FROM bf_ngrambf_map_values_test WHERE map_fixed['K2'] != 'V2' SETTINGS force_data_skipping_indices='map_fixed_values_ngrambf'; +SELECT 'Not equals with non existing key and default value'; +SELECT * FROM bf_ngrambf_map_values_test WHERE map_fixed['K3'] != ''; + +DROP TABLE bf_ngrambf_map_values_test; diff --git a/parser/testdata/02000_table_function_cluster_macros/ast.json b/parser/testdata/02000_table_function_cluster_macros/ast.json new file mode 100644 index 000000000..4754ef8e7 --- /dev/null +++ b/parser/testdata/02000_table_function_cluster_macros/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier _shard_num" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function cluster (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier {default_cluster_macro}" + }, + { + "explain": " Identifier system.one" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001333107, + "rows_read": 12, + "bytes_read": 489 + } +} diff --git a/parser/testdata/02000_table_function_cluster_macros/metadata.json b/parser/testdata/02000_table_function_cluster_macros/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02000_table_function_cluster_macros/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02000_table_function_cluster_macros/query.sql b/parser/testdata/02000_table_function_cluster_macros/query.sql new file mode 100644 index 000000000..354d40f1e --- /dev/null +++ b/parser/testdata/02000_table_function_cluster_macros/query.sql @@ -0,0 +1,9 @@ +SELECT _shard_num FROM cluster("{default_cluster_macro}", system.one); +SELECT _shard_num FROM cluster("{default_cluster_macro}"); +SELECT _shard_num FROM clusterAllReplicas("{default_cluster_macro}", system.one); +SELECT _shard_num FROM clusterAllReplicas("{default_cluster_macro}"); + +SELECT _shard_num FROM cluster("{nonexistent}"); -- { serverError BAD_ARGUMENTS } +SELECT _shard_num FROM cluster("{nonexistent}", system.one); -- { serverError BAD_ARGUMENTS } +SELECT _shard_num FROM clusterAllReplicas("{nonexistent}"); -- { serverError BAD_ARGUMENTS } +SELECT _shard_num FROM clusterAllReplicas("{nonexistent}", system.one); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/02001_add_default_database_to_system_users/ast.json b/parser/testdata/02001_add_default_database_to_system_users/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02001_add_default_database_to_system_users/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02001_add_default_database_to_system_users/metadata.json b/parser/testdata/02001_add_default_database_to_system_users/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02001_add_default_database_to_system_users/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02001_add_default_database_to_system_users/query.sql b/parser/testdata/02001_add_default_database_to_system_users/query.sql new file mode 100644 index 000000000..9cf029301 --- /dev/null +++ b/parser/testdata/02001_add_default_database_to_system_users/query.sql @@ -0,0 +1,6 @@ +-- Tags: no-parallel +-- Tag no-parallel: create user + +create user if not exists u_02001 default database system; +select default_database from system.users where name = 'u_02001'; +drop user if exists u_02001; diff --git a/parser/testdata/02001_dist_on_dist_WithMergeableStateAfterAggregation/ast.json b/parser/testdata/02001_dist_on_dist_WithMergeableStateAfterAggregation/ast.json new file mode 100644 index 000000000..77355ace4 --- /dev/null +++ b/parser/testdata/02001_dist_on_dist_WithMergeableStateAfterAggregation/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery dist (children 1)" + }, + { + "explain": " Identifier dist" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001247387, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/02001_dist_on_dist_WithMergeableStateAfterAggregation/metadata.json b/parser/testdata/02001_dist_on_dist_WithMergeableStateAfterAggregation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02001_dist_on_dist_WithMergeableStateAfterAggregation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02001_dist_on_dist_WithMergeableStateAfterAggregation/query.sql b/parser/testdata/02001_dist_on_dist_WithMergeableStateAfterAggregation/query.sql new file mode 100644 index 000000000..0925df188 --- /dev/null +++ b/parser/testdata/02001_dist_on_dist_WithMergeableStateAfterAggregation/query.sql @@ -0,0 +1,6 @@ +drop table if exists dist; +create table dist as system.one engine=Distributed('test_shard_localhost', system, one); +-- { echo } +select dummy as foo from remote('127.{2,3}', currentDatabase(), dist) limit 1 settings prefer_localhost_replica=0, distributed_push_down_limit=0; +select dummy as foo from remote('127.{2,3}', currentDatabase(), dist) limit 1 settings prefer_localhost_replica=0, distributed_push_down_limit=1; +select dummy as foo from remote('127.{2,3}', currentDatabase(), dist) limit 1 settings prefer_localhost_replica=0, distributed_group_by_no_merge=1; diff --git a/parser/testdata/02001_hostname_test/ast.json b/parser/testdata/02001_hostname_test/ast.json new file mode 100644 index 000000000..c94979c2b --- /dev/null +++ b/parser/testdata/02001_hostname_test/ast.json @@ -0,0 +1,40 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function hostname (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 6, + + "statistics": + { + "elapsed": 0.001146169, + "rows_read": 6, + "bytes_read": 218 + } +} diff --git a/parser/testdata/02001_hostname_test/metadata.json b/parser/testdata/02001_hostname_test/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02001_hostname_test/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02001_hostname_test/query.sql b/parser/testdata/02001_hostname_test/query.sql new file mode 100644 index 000000000..a8c7a8dab --- /dev/null +++ b/parser/testdata/02001_hostname_test/query.sql @@ -0,0 +1,2 @@ +select hostname(); +select hostName() h, count() from cluster(test_cluster_two_shards, system.one) group by h; diff --git a/parser/testdata/02001_join_on_const/ast.json b/parser/testdata/02001_join_on_const/ast.json new file mode 100644 index 000000000..a877f4dbc --- /dev/null +++ b/parser/testdata/02001_join_on_const/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001283926, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/02001_join_on_const/metadata.json b/parser/testdata/02001_join_on_const/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02001_join_on_const/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02001_join_on_const/query.sql b/parser/testdata/02001_join_on_const/query.sql new file mode 100644 index 000000000..2f65b5a95 --- /dev/null +++ b/parser/testdata/02001_join_on_const/query.sql @@ -0,0 +1,21 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE TABLE t1 (id Int) ENGINE = TinyLog; +CREATE TABLE t2 (id Int) ENGINE = TinyLog; + +INSERT INTO t1 VALUES (1), (2); +INSERT INTO t2 SELECT number + 5 AS x FROM (SELECT * FROM system.numbers LIMIT 1111); + +SET min_joined_block_size_bytes = 0; +SET max_block_size = 100; + +SELECT count() == 2222 FROM t1 JOIN t2 ON 1 = 1; + +SELECT count() == 0 FROM t1 JOIN t2 ON 1 = 2; +SELECT count() == 2 FROM t1 LEFT JOIN t2 ON 1 = 2; +SELECT count() == 1111 FROM t1 RIGHT JOIN t2 ON 1 = 2; +SELECT count() == 1113 FROM t1 FULL JOIN t2 ON 1 = 2; + +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; diff --git a/parser/testdata/02001_select_with_filter/ast.json b/parser/testdata/02001_select_with_filter/ast.json new file mode 100644 index 000000000..83e2320ba --- /dev/null +++ b/parser/testdata/02001_select_with_filter/ast.json @@ -0,0 +1,85 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function argMaxIf (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function plus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function notEquals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_99" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_100" + } + ], + + "rows": 21, + + "statistics": + { + "elapsed": 0.001584641, + "rows_read": 21, + "bytes_read": 816 + } +} diff --git a/parser/testdata/02001_select_with_filter/metadata.json b/parser/testdata/02001_select_with_filter/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02001_select_with_filter/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02001_select_with_filter/query.sql b/parser/testdata/02001_select_with_filter/query.sql new file mode 100644 index 000000000..70152db83 --- /dev/null +++ b/parser/testdata/02001_select_with_filter/query.sql @@ -0,0 +1,4 @@ +SELECT argMax(number, number + 1) FILTER(WHERE number != 99) FROM numbers(100) ; +SELECT sum(number) FILTER(WHERE number % 2 == 0) FROM numbers(100); +SELECT sumIfOrNull(number, number % 2 == 1) FILTER(WHERE 0) FROM numbers(100); +SELECT sumIfOrNull(number, number % 2 == 1) FILTER(WHERE 1) FROM numbers(100); diff --git a/parser/testdata/02001_shard_num_shard_count/ast.json b/parser/testdata/02001_shard_num_shard_count/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02001_shard_num_shard_count/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02001_shard_num_shard_count/metadata.json b/parser/testdata/02001_shard_num_shard_count/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02001_shard_num_shard_count/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02001_shard_num_shard_count/query.sql b/parser/testdata/02001_shard_num_shard_count/query.sql new file mode 100644 index 000000000..0d4a92f4e --- /dev/null +++ b/parser/testdata/02001_shard_num_shard_count/query.sql @@ -0,0 +1,5 @@ +-- Tags: shard + +select shardNum() n, shardCount() c; +select shardNum() n, shardCount() c from remote('127.0.0.{1,2,3}', system.one) order by n settings prefer_localhost_replica = 0; +select shardNum() n, shardCount() c from remote('127.0.0.{1,2,3}', system.one) order by n settings prefer_localhost_replica = 1; diff --git a/parser/testdata/02002_global_subqueries_subquery_or_table_name/ast.json b/parser/testdata/02002_global_subqueries_subquery_or_table_name/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02002_global_subqueries_subquery_or_table_name/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02002_global_subqueries_subquery_or_table_name/metadata.json b/parser/testdata/02002_global_subqueries_subquery_or_table_name/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02002_global_subqueries_subquery_or_table_name/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02002_global_subqueries_subquery_or_table_name/query.sql b/parser/testdata/02002_global_subqueries_subquery_or_table_name/query.sql new file mode 100644 index 000000000..e752e8714 --- /dev/null +++ b/parser/testdata/02002_global_subqueries_subquery_or_table_name/query.sql @@ -0,0 +1,7 @@ +-- Tags: global + +SELECT + cityHash64(number GLOBAL IN (NULL, -2147483648, -9223372036854775808), nan, 1024, NULL, NULL, 1.000100016593933, NULL), + (NULL, cityHash64(inf, -2147483648, NULL, NULL, 10.000100135803223), cityHash64(1.1754943508222875e-38, NULL, NULL, NULL), 2147483647) +FROM cluster(test_cluster_two_shards_localhost, numbers((NULL, cityHash64(0., 65536, NULL, NULL, 10000000000., NULL), 0) GLOBAL IN (some_identifier), 65536)) +WHERE number GLOBAL IN [1025] --{ serverError BAD_ARGUMENTS, WRONG_GLOBAL_SUBQUERY, UNKNOWN_IDENTIFIER } diff --git a/parser/testdata/02002_parse_map_int_key/ast.json b/parser/testdata/02002_parse_map_int_key/ast.json new file mode 100644 index 000000000..f6d1e2d06 --- /dev/null +++ b/parser/testdata/02002_parse_map_int_key/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_map_int_key (children 1)" + }, + { + "explain": " Identifier t_map_int_key" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001194606, + "rows_read": 2, + "bytes_read": 78 + } +} diff --git a/parser/testdata/02002_parse_map_int_key/metadata.json b/parser/testdata/02002_parse_map_int_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02002_parse_map_int_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02002_parse_map_int_key/query.sql b/parser/testdata/02002_parse_map_int_key/query.sql new file mode 100644 index 000000000..c4b48e3d2 --- /dev/null +++ b/parser/testdata/02002_parse_map_int_key/query.sql @@ -0,0 +1,8 @@ +DROP TABLE IF EXISTS t_map_int_key; +CREATE TABLE t_map_int_key (m1 Map(UInt32, UInt32), m2 Map(Date, UInt32)) ENGINE = Memory; + +INSERT INTO t_map_int_key FORMAT CSV "{1:2, 3: 4, 5 :6, 7 : 8}","{'2021-05-20':1, '2021-05-21': 2, '2021-05-22' :3, '2021-05-23' : 4}" + +SELECT m1, m2 FROM t_map_int_key; + +DROP TABLE t_map_int_key; diff --git a/parser/testdata/02002_sampling_and_unknown_column_bug/ast.json b/parser/testdata/02002_sampling_and_unknown_column_bug/ast.json new file mode 100644 index 000000000..ce162dc04 --- /dev/null +++ b/parser/testdata/02002_sampling_and_unknown_column_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery sessions (children 1)" + }, + { + "explain": " Identifier sessions" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00139994, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/02002_sampling_and_unknown_column_bug/metadata.json b/parser/testdata/02002_sampling_and_unknown_column_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02002_sampling_and_unknown_column_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02002_sampling_and_unknown_column_bug/query.sql b/parser/testdata/02002_sampling_and_unknown_column_bug/query.sql new file mode 100644 index 000000000..838d7a552 --- /dev/null +++ b/parser/testdata/02002_sampling_and_unknown_column_bug/query.sql @@ -0,0 +1,20 @@ +drop table if exists sessions; +CREATE TABLE sessions +( + `user_id` UInt64 +) +ENGINE = MergeTree +ORDER BY user_id +SAMPLE BY user_id; + +insert into sessions values(1); + +SELECT + sum(user_id * _sample_factor) +FROM sessions +SAMPLE 10000000; + +SELECT + uniq(user_id) a, min(_sample_factor) x, a*x +FROM sessions +SAMPLE 10000000; diff --git a/parser/testdata/02003_WithMergeableStateAfterAggregationAndLimit_LIMIT_BY_LIMIT_OFFSET/ast.json b/parser/testdata/02003_WithMergeableStateAfterAggregationAndLimit_LIMIT_BY_LIMIT_OFFSET/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02003_WithMergeableStateAfterAggregationAndLimit_LIMIT_BY_LIMIT_OFFSET/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02003_WithMergeableStateAfterAggregationAndLimit_LIMIT_BY_LIMIT_OFFSET/metadata.json b/parser/testdata/02003_WithMergeableStateAfterAggregationAndLimit_LIMIT_BY_LIMIT_OFFSET/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02003_WithMergeableStateAfterAggregationAndLimit_LIMIT_BY_LIMIT_OFFSET/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02003_WithMergeableStateAfterAggregationAndLimit_LIMIT_BY_LIMIT_OFFSET/query.sql b/parser/testdata/02003_WithMergeableStateAfterAggregationAndLimit_LIMIT_BY_LIMIT_OFFSET/query.sql new file mode 100644 index 000000000..1a446a806 --- /dev/null +++ b/parser/testdata/02003_WithMergeableStateAfterAggregationAndLimit_LIMIT_BY_LIMIT_OFFSET/query.sql @@ -0,0 +1,36 @@ +-- Here we use a trick with shardNum() to generate unique data on each shard. +-- Since distributed_group_by_no_merge=2 will use WithMergeableStateAfterAggregationAndLimit, +-- which assume that the data on shards is unique +-- (LIMIT BY will be applied only on shards, not on the initiator). + +-- To distinguish echoing from the comments above we use SELECT FORMAT Null. +SELECT '' FORMAT Null; + +-- { echo } +SELECT * +FROM remote('127.{1,2}', view( + SELECT number%20 number + FROM numbers(40) + WHERE (number % 2) = (shardNum() - 1) +), number) +GROUP BY number +ORDER BY number ASC +LIMIT 1 BY number +LIMIT 5, 5 +SETTINGS + optimize_skip_unused_shards=1, + optimize_distributed_group_by_sharding_key=1, + distributed_push_down_limit=1; +SELECT * +FROM remote('127.{1,2}', view( + SELECT number%20 number + FROM numbers(40) + WHERE (number % 2) = (shardNum() - 1) +), number) +GROUP BY number +ORDER BY number ASC +LIMIT 1 BY number +LIMIT 5, 5 +SETTINGS + distributed_group_by_no_merge=2, + distributed_push_down_limit=1; diff --git a/parser/testdata/02003_bug_from_23515/ast.json b/parser/testdata/02003_bug_from_23515/ast.json new file mode 100644 index 000000000..6a9ac1ccc --- /dev/null +++ b/parser/testdata/02003_bug_from_23515/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tab (children 1)" + }, + { + "explain": " Identifier tab" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00134079, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/02003_bug_from_23515/metadata.json b/parser/testdata/02003_bug_from_23515/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02003_bug_from_23515/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02003_bug_from_23515/query.sql b/parser/testdata/02003_bug_from_23515/query.sql new file mode 100644 index 000000000..febd59b07 --- /dev/null +++ b/parser/testdata/02003_bug_from_23515/query.sql @@ -0,0 +1,10 @@ +drop table if exists tab; +create table tab (a LowCardinality(String), b LowCardinality(String)) engine = MergeTree partition by a order by tuple() settings min_bytes_for_wide_part = 0, min_rows_for_wide_part = 0; + +insert into tab values ('1', 'a'), ('2', 'b'); +SELECT a = '1' FROM tab WHERE a = '1' and b='a'; + +-- Fuzzed +SELECT * FROM tab WHERE (a = '1') AND 0 AND (b = 'a'); + +drop table if exists tab; diff --git a/parser/testdata/02004_intersect_except_const_column/ast.json b/parser/testdata/02004_intersect_except_const_column/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02004_intersect_except_const_column/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02004_intersect_except_const_column/metadata.json b/parser/testdata/02004_intersect_except_const_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02004_intersect_except_const_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02004_intersect_except_const_column/query.sql b/parser/testdata/02004_intersect_except_const_column/query.sql new file mode 100644 index 000000000..6fabf34d3 --- /dev/null +++ b/parser/testdata/02004_intersect_except_const_column/query.sql @@ -0,0 +1,20 @@ +-- { echo } +-- Test: crash the server +SELECT 'fooooo' INTERSECT DISTINCT SELECT 'fooooo'; +SELECT 'fooooo' EXCEPT ALL SELECT 'fooooo'; + +-- Test: intersect return incorrect result for const column +SELECT 1 FROM numbers(10) INTERSECT SELECT 1 FROM numbers(10); +SELECT toString(1) FROM numbers(10) INTERSECT SELECT toString(1) FROM numbers(10); +SELECT '1' FROM numbers(10) INTERSECT SELECT '1' FROM numbers(10); +SELECT 1 FROM numbers(10) INTERSECT DISTINCT SELECT 1 FROM numbers(10); +SELECT toString(1) FROM numbers(10) INTERSECT DISTINCT SELECT toString(1) FROM numbers(10); +SELECT '1' FROM numbers(10) INTERSECT DISTINCT SELECT '1' FROM numbers(10); + +-- Test: except return incorrect result for const column +SELECT 2 FROM numbers(10) EXCEPT SELECT 1 FROM numbers(5); +SELECT toString(2) FROM numbers(10) EXCEPT SELECT toString(1) FROM numbers(5); +SELECT '2' FROM numbers(10) EXCEPT SELECT '1' FROM numbers(5); +SELECT 2 FROM numbers(10) EXCEPT DISTINCT SELECT 1 FROM numbers(5); +SELECT toString(2) FROM numbers(10) EXCEPT DISTINCT SELECT toString(1) FROM numbers(5); +SELECT '2' FROM numbers(10) EXCEPT DISTINCT SELECT '1' FROM numbers(5); \ No newline at end of file diff --git a/parser/testdata/02004_intersect_except_distinct_operators/ast.json b/parser/testdata/02004_intersect_except_distinct_operators/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02004_intersect_except_distinct_operators/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02004_intersect_except_distinct_operators/metadata.json b/parser/testdata/02004_intersect_except_distinct_operators/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02004_intersect_except_distinct_operators/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02004_intersect_except_distinct_operators/query.sql b/parser/testdata/02004_intersect_except_distinct_operators/query.sql new file mode 100644 index 000000000..efb48e59f --- /dev/null +++ b/parser/testdata/02004_intersect_except_distinct_operators/query.sql @@ -0,0 +1,58 @@ +-- { echo } + +set intersect_default_mode = 'DISTINCT'; +set except_default_mode = 'DISTINCT'; + +select 1 intersect select 1; +select 2 intersect select 1; +select 1 except select 1; +select 2 except select 1; + +select 5 from numbers(20) intersect select number from numbers(5, 5); +select number from numbers(10) except select number from numbers(5); +select number, number+10 from numbers(12) except select number+5, number+15 from numbers(10); + +select 1 except select 2 intersect select 1; +select 1 except select 2 intersect select 2; +select 1 intersect select 1 except select 2; +select 1 intersect select 1 except select 1; +select 1 intersect select 1 except select 2 intersect select 1 except select 3 intersect select 1; +select 1 intersect select 1 except select 2 intersect select 1 except select 3 intersect select 2; +select 1 intersect select 1 except select 2 intersect select 1 except select 3 intersect select 2 except select 1; + +select number%3 from numbers(10) except select 1; +select number from numbers(100) intersect select number from numbers(20, 60) except select number from numbers(30, 20) except select number from numbers(60, 20); + +select * from (select 1 intersect select 1); +with (select number from numbers(10) intersect select 5) as a select a * 10; +with (select 5 except select 1) as a select a except select 5; +with (select number from numbers(10) intersect select 5) as a select a intersect select 1; +with (select number from numbers(10) intersect select 5) as a select a except select 1; +select count() from (select number from numbers(10) except select 5); +select count() from (select number from numbers(1000000) intersect select number from numbers(200000, 600000)); +select count() from (select number from numbers(100) intersect select number from numbers(20, 60) except select number from numbers(30, 20) except select number from numbers(60, 20)); +select count() from (select number from numbers(100) intersect select number from numbers(20, 60) except select number from numbers(30, 20) except select number from numbers(60, 20) union all select number from numbers(100, 10)); +select count() from (select number from numbers(1000000) intersect select number from numbers(200000, 600000) except select number from numbers(300000, 200000) except select number from numbers(600000, 200000)); + +select count() from (select 1 intersect select 1) limit 100; +select count() from (select 1 except select 2) limit 100; +with (select count() from (select 1 union distinct select 2 except select 1)) as max +select count() from (select 1 union all select max) limit 100; + +select 1 union all select 1 intersect select 1; +select 1 union all select 1 intersect select 2; +select * from (select 1 union all select 2 union all select 3 union all select 4 except select 3 union all select 5) order by 1; +select * from (select 1 union all select 2 union all select 3 union all select 4 intersect select 3 union all select 5) order by 1; +select * from (select 1 union all select 2 union all select 3 union all select 4 intersect select 3 union all select 5 except select 1) order by 1; + +select 1 intersect (select 1 except select 2); +select 1 union all select 2 except (select 2 except select 1 union all select 1) except select 4; +select 1 intersect select count() from (select 1 except select 2 intersect select 2 union all select 1); + +explain syntax select 1 intersect select 1; +explain syntax select 1 except select 1; +explain syntax select 1 union all select 2 except (select 2 except select 1 union all select 1) except select 4; + +set limit=1; +select 1 intersect select 1; +(((select 1) intersect select 1)); diff --git a/parser/testdata/02004_intersect_except_operators/ast.json b/parser/testdata/02004_intersect_except_operators/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02004_intersect_except_operators/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02004_intersect_except_operators/metadata.json b/parser/testdata/02004_intersect_except_operators/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02004_intersect_except_operators/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02004_intersect_except_operators/query.sql b/parser/testdata/02004_intersect_except_operators/query.sql new file mode 100644 index 000000000..7ed756cc5 --- /dev/null +++ b/parser/testdata/02004_intersect_except_operators/query.sql @@ -0,0 +1,54 @@ +-- { echo } +select 1 intersect select 1; +select 2 intersect select 1; +select 1 except select 1; +select 2 except select 1; + +select number from numbers(20) intersect select number from numbers(5, 5); +select number from numbers(10) except select number from numbers(5); +select number, number+10 from numbers(12) except select number+5, number+15 from numbers(10); + +select 1 except select 2 intersect select 1; +select 1 except select 2 intersect select 2; +select 1 intersect select 1 except select 2; +select 1 intersect select 1 except select 1; +select 1 intersect select 1 except select 2 intersect select 1 except select 3 intersect select 1; +select 1 intersect select 1 except select 2 intersect select 1 except select 3 intersect select 2; +select 1 intersect select 1 except select 2 intersect select 1 except select 3 intersect select 2 except select 1; + +select number from numbers(10) except select 5; +select number from numbers(100) intersect select number from numbers(20, 60) except select number from numbers(30, 20) except select number from numbers(60, 20); + +select * from (select 1 intersect select 1); +with (select number from numbers(10) intersect select 5) as a select a * 10; +with (select 5 except select 1) as a select a except select 5; +with (select number from numbers(10) intersect select 5) as a select a intersect select 1; +with (select number from numbers(10) intersect select 5) as a select a except select 1; +select count() from (select number from numbers(10) except select 5); +select count() from (select number from numbers(1000000) intersect select number from numbers(200000, 600000)); +select count() from (select number from numbers(100) intersect select number from numbers(20, 60) except select number from numbers(30, 20) except select number from numbers(60, 20)); +select count() from (select number from numbers(100) intersect select number from numbers(20, 60) except select number from numbers(30, 20) except select number from numbers(60, 20) union all select number from numbers(100, 10)); +select count() from (select number from numbers(1000000) intersect select number from numbers(200000, 600000) except select number from numbers(300000, 200000) except select number from numbers(600000, 200000)); + +select count() from (select 1 intersect select 1) limit 100; +select count() from (select 1 except select 2) limit 100; +with (select count() from (select 1 union distinct select 2 except select 1)) as max +select count() from (select 1 union all select max) limit 100; + +select 1 union all select 1 intersect select 1; +select 1 union all select 1 intersect select 2; +select * from (select 1 union all select 2 union all select 3 union all select 4 except select 3 union all select 5) order by 1; +select * from (select 1 union all select 2 union all select 3 union all select 4 intersect select 3 union all select 5) order by 1; +select * from (select 1 union all select 2 union all select 3 union all select 4 intersect select 3 union all select 5 except select 1) order by 1; + +select 1 intersect (select 1 except select 2); +select 1 union all select 2 except (select 2 except select 1 union all select 1) except select 4; +select 1 intersect select count() from (select 1 except select 2 intersect select 2 union all select 1); + +explain syntax select 1 intersect select 1; +explain syntax select 1 except select 1; +explain syntax select 1 union all select 2 except (select 2 except select 1 union all select 1) except select 4; + +set limit=1; +select 1 intersect select 1; +(((select 1) intersect select 1)); diff --git a/parser/testdata/02004_invalid_partition_mutation_stuck/ast.json b/parser/testdata/02004_invalid_partition_mutation_stuck/ast.json new file mode 100644 index 000000000..4e7ace6b5 --- /dev/null +++ b/parser/testdata/02004_invalid_partition_mutation_stuck/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001115565, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02004_invalid_partition_mutation_stuck/metadata.json b/parser/testdata/02004_invalid_partition_mutation_stuck/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02004_invalid_partition_mutation_stuck/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02004_invalid_partition_mutation_stuck/query.sql b/parser/testdata/02004_invalid_partition_mutation_stuck/query.sql new file mode 100644 index 000000000..07706c27c --- /dev/null +++ b/parser/testdata/02004_invalid_partition_mutation_stuck/query.sql @@ -0,0 +1,33 @@ +SET mutations_sync=2; + +DROP TABLE IF EXISTS rep_data; +CREATE TABLE rep_data +( + p Int, + t DateTime, + INDEX idx t TYPE minmax GRANULARITY 1 +) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/rep_data', '1') +PARTITION BY p +ORDER BY t +SETTINGS number_of_free_entries_in_pool_to_execute_mutation=0; +INSERT INTO rep_data VALUES (1, now()); +ALTER TABLE rep_data MATERIALIZE INDEX idx IN PARTITION ID 'NO_SUCH_PART'; -- { serverError INVALID_PARTITION_VALUE } +ALTER TABLE rep_data MATERIALIZE INDEX idx IN PARTITION ID '1'; +ALTER TABLE rep_data MATERIALIZE INDEX idx IN PARTITION ID '2'; + +DROP TABLE IF EXISTS data; +CREATE TABLE data +( + p Int, + t DateTime, + INDEX idx t TYPE minmax GRANULARITY 1 +) +ENGINE = MergeTree +PARTITION BY p +ORDER BY t +SETTINGS number_of_free_entries_in_pool_to_execute_mutation=0; +INSERT INTO data VALUES (1, now()); +ALTER TABLE data MATERIALIZE INDEX idx IN PARTITION ID 'NO_SUCH_PART'; -- { serverError INVALID_PARTITION_VALUE } +ALTER TABLE data MATERIALIZE INDEX idx IN PARTITION ID '1'; +ALTER TABLE data MATERIALIZE INDEX idx IN PARTITION ID '2'; diff --git a/parser/testdata/02004_max_hyperscan_regex_length/ast.json b/parser/testdata/02004_max_hyperscan_regex_length/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02004_max_hyperscan_regex_length/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02004_max_hyperscan_regex_length/metadata.json b/parser/testdata/02004_max_hyperscan_regex_length/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02004_max_hyperscan_regex_length/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02004_max_hyperscan_regex_length/query.sql b/parser/testdata/02004_max_hyperscan_regex_length/query.sql new file mode 100644 index 000000000..2133bcf88 --- /dev/null +++ b/parser/testdata/02004_max_hyperscan_regex_length/query.sql @@ -0,0 +1,56 @@ +-- Tags: no-debug, no-fasttest, use-vectorscan + +set max_hyperscan_regexp_length = 1; +set max_hyperscan_regexp_total_length = 1; + +SELECT '- const pattern'; + +select multiMatchAny('123', ['1']); +select multiMatchAny('123', ['12']); -- { serverError BAD_ARGUMENTS } +select multiMatchAny('123', ['1', '2']); -- { serverError BAD_ARGUMENTS } + +select multiMatchAnyIndex('123', ['1']); +select multiMatchAnyIndex('123', ['12']); -- { serverError BAD_ARGUMENTS } +select multiMatchAnyIndex('123', ['1', '2']); -- { serverError BAD_ARGUMENTS } + +select multiMatchAllIndices('123', ['1']); +select multiMatchAllIndices('123', ['12']); -- { serverError BAD_ARGUMENTS } +select multiMatchAllIndices('123', ['1', '2']); -- { serverError BAD_ARGUMENTS } + +select multiFuzzyMatchAny('123', 0, ['1']); +select multiFuzzyMatchAny('123', 0, ['12']); -- { serverError BAD_ARGUMENTS } +select multiFuzzyMatchAny('123', 0, ['1', '2']); -- { serverError BAD_ARGUMENTS } + +select multiFuzzyMatchAnyIndex('123', 0, ['1']); +select multiFuzzyMatchAnyIndex('123', 0, ['12']); -- { serverError BAD_ARGUMENTS } +select multiFuzzyMatchAnyIndex('123', 0, ['1', '2']); -- { serverError BAD_ARGUMENTS } + +select multiFuzzyMatchAllIndices('123', 0, ['1']); +select multiFuzzyMatchAllIndices('123', 0, ['12']); -- { serverError BAD_ARGUMENTS } +select multiFuzzyMatchAllIndices('123', 0, ['1', '2']); -- { serverError BAD_ARGUMENTS } + +SELECT '- non-const pattern'; + +select multiMatchAny(materialize('123'), materialize(['1'])); +select multiMatchAny(materialize('123'), materialize(['12'])); -- { serverError BAD_ARGUMENTS } +select multiMatchAny(materialize('123'), materialize(['1', '2'])); -- { serverError BAD_ARGUMENTS } + +select multiMatchAnyIndex(materialize('123'), materialize(['1'])); +select multiMatchAnyIndex(materialize('123'), materialize(['12'])); -- { serverError BAD_ARGUMENTS } +select multiMatchAnyIndex(materialize('123'), materialize(['1', '2'])); -- { serverError BAD_ARGUMENTS } + +select multiMatchAllIndices(materialize('123'), materialize(['1'])); +select multiMatchAllIndices(materialize('123'), materialize(['12'])); -- { serverError BAD_ARGUMENTS } +select multiMatchAllIndices(materialize('123'), materialize(['1', '2'])); -- { serverError BAD_ARGUMENTS } + +select multiFuzzyMatchAny(materialize('123'), 0, materialize(['1'])); +select multiFuzzyMatchAny(materialize('123'), 0, materialize(['12'])); -- { serverError BAD_ARGUMENTS } +select multiFuzzyMatchAny(materialize('123'), 0, materialize(['1', '2'])); -- { serverError BAD_ARGUMENTS } + +select multiFuzzyMatchAnyIndex(materialize('123'), 0, materialize(['1'])); +select multiFuzzyMatchAnyIndex(materialize('123'), 0, materialize(['12'])); -- { serverError BAD_ARGUMENTS } +select multiFuzzyMatchAnyIndex(materialize('123'), 0, materialize(['1', '2'])); -- { serverError BAD_ARGUMENTS } + +select multiFuzzyMatchAllIndices(materialize('123'), 0, materialize(['1'])); +select multiFuzzyMatchAllIndices(materialize('123'), 0, materialize(['12'])); -- { serverError BAD_ARGUMENTS } +select multiFuzzyMatchAllIndices(materialize('123'), 0, materialize(['1', '2'])); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/02005_log_formatted_queries/ast.json b/parser/testdata/02005_log_formatted_queries/ast.json new file mode 100644 index 000000000..9b6195a0e --- /dev/null +++ b/parser/testdata/02005_log_formatted_queries/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001090902, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02005_log_formatted_queries/metadata.json b/parser/testdata/02005_log_formatted_queries/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02005_log_formatted_queries/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02005_log_formatted_queries/query.sql b/parser/testdata/02005_log_formatted_queries/query.sql new file mode 100644 index 000000000..e2176989e --- /dev/null +++ b/parser/testdata/02005_log_formatted_queries/query.sql @@ -0,0 +1,5 @@ +set log_formatted_queries = 1; + +select '02005_log_formatted_queries.sql' from system.one; +system flush logs query_log; +select query, formatted_query from system.query_log where current_database = currentDatabase() and query = 'select \'02005_log_formatted_queries.sql\' from system.one;' and event_date >= yesterday() and event_time > now() - interval 5 minute; diff --git a/parser/testdata/02006_client_test_hint_error_name/ast.json b/parser/testdata/02006_client_test_hint_error_name/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02006_client_test_hint_error_name/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02006_client_test_hint_error_name/metadata.json b/parser/testdata/02006_client_test_hint_error_name/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02006_client_test_hint_error_name/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02006_client_test_hint_error_name/query.sql b/parser/testdata/02006_client_test_hint_error_name/query.sql new file mode 100644 index 000000000..60d840fb4 --- /dev/null +++ b/parser/testdata/02006_client_test_hint_error_name/query.sql @@ -0,0 +1,3 @@ +-- Tags: no-fasttest + +select throwIf(1); -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO } diff --git a/parser/testdata/02006_h3_to_geo_boundary/ast.json b/parser/testdata/02006_h3_to_geo_boundary/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02006_h3_to_geo_boundary/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02006_h3_to_geo_boundary/metadata.json b/parser/testdata/02006_h3_to_geo_boundary/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02006_h3_to_geo_boundary/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02006_h3_to_geo_boundary/query.sql b/parser/testdata/02006_h3_to_geo_boundary/query.sql new file mode 100644 index 000000000..003429c9c --- /dev/null +++ b/parser/testdata/02006_h3_to_geo_boundary/query.sql @@ -0,0 +1,28 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS h3_indexes; + +CREATE TABLE h3_indexes (h3_index UInt64) ENGINE = Memory; + +-- Coordinates from h3ToGeo test. + +INSERT INTO h3_indexes VALUES (579205133326352383); +INSERT INTO h3_indexes VALUES (581263419093549055); +INSERT INTO h3_indexes VALUES (589753847883235327); +INSERT INTO h3_indexes VALUES (594082350283882495); +INSERT INTO h3_indexes VALUES (598372386957426687); +INSERT INTO h3_indexes VALUES (599542359671177215); +INSERT INTO h3_indexes VALUES (604296355086598143); +INSERT INTO h3_indexes VALUES (608785214872748031); +INSERT INTO h3_indexes VALUES (615732192485572607); +INSERT INTO h3_indexes VALUES (617056794467368959); +INSERT INTO h3_indexes VALUES (624586477873168383); +INSERT INTO h3_indexes VALUES (627882919484481535); +INSERT INTO h3_indexes VALUES (634600058503392255); +INSERT INTO h3_indexes VALUES (635544851677385791); +INSERT INTO h3_indexes VALUES (639763125756281263); +INSERT INTO h3_indexes VALUES (644178757620501158); + +SELECT arrayMap(p -> (round(p.1, 2), round(p.2, 2)), h3ToGeoBoundary(h3_index)) FROM h3_indexes ORDER BY h3_index; + +DROP TABLE h3_indexes; diff --git a/parser/testdata/02006_test_positional_arguments/ast.json b/parser/testdata/02006_test_positional_arguments/ast.json new file mode 100644 index 000000000..666469b98 --- /dev/null +++ b/parser/testdata/02006_test_positional_arguments/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001042929, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02006_test_positional_arguments/metadata.json b/parser/testdata/02006_test_positional_arguments/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02006_test_positional_arguments/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02006_test_positional_arguments/query.sql b/parser/testdata/02006_test_positional_arguments/query.sql new file mode 100644 index 000000000..3ba89af0e --- /dev/null +++ b/parser/testdata/02006_test_positional_arguments/query.sql @@ -0,0 +1,98 @@ +set group_by_two_level_threshold = 100000; +set enable_positional_arguments = 1; +set enable_analyzer = 1; + +drop table if exists test; +drop table if exists test2; + +create table test(x1 Int, x2 Int, x3 Int) engine=Memory(); +insert into test values (1, 10, 100), (10, 1, 10), (100, 100, 1); + +-- { echo } +select x3, x2, x1 from test order by 1; +select x3, x2, x1 from test order by -3; +select x3, x2, x1 from test order by x3; + +select x3, x2, x1 from test order by 3; +select x3, x2, x1 from test order by -1; +select x3, x2, x1 from test order by x1; + +select x3, x2, x1 from test order by 1 desc; +select x3, x2, x1 from test order by -3 desc; +select x3, x2, x1 from test order by x3 desc; + +select x3, x2, x1 from test order by 3 desc; +select x3, x2, x1 from test order by -1 desc; +select x3, x2, x1 from test order by x1 desc; + +insert into test values (1, 10, 100), (10, 1, 10), (100, 100, 1); +select x3, x2 from test group by x3, x2 order by x3; +select x3, x2 from test group by 1, 2 order by x3; + +select x1, x2, x3 from test order by x3 limit 1 by x3; +select x1, x2, x3 from test order by 3 limit 1 by 3; +select x1, x2, x3 from test order by x3 limit 1 by x1; +select x1, x2, x3 from test order by 3 limit 1 by 1; + +explain syntax select x3, x2, x1 from test order by 1; +explain syntax select x3 + 1, x2, x1 from test order by 1; +explain syntax select x3, x2, x1 from test order by -1; +explain syntax select x3 + 1, x2, x1 from test order by -1; +explain syntax select x3, x3 - x2, x2, x1 from test order by 2; +explain syntax select x3, x3 - x2, x2, x1 from test order by -2; +explain syntax select x3, if(x3 > 10, x3, plus(x1, x2)), x1 + x2 from test order by 2; +explain syntax select x3, if(x3 > 10, x3, plus(x1, x2)), x1 + x2 from test order by -2; +explain syntax select max(x1), x2 from test group by 2 order by 1, 2; +explain syntax select max(x1), x2 from test group by -1 order by -2, -1; +explain syntax select 1 + greatest(x1, 1), x2 from test group by 1, 2; +explain syntax select 1 + greatest(x1, 1), x2 from test group by -2, -1; + +select max(x1), x2 from test group by 1, 2; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT, 184 } +select 1 + max(x1), x2 from test group by 1, 2; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT, 184 } +select max(x1), x2 from test group by -2, -1; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT, 184 } +select 1 + max(x1), x2 from test group by -2, -1; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT, 184 } + +explain syntax select x1 + x3, x3 from test group by 1, 2; +explain syntax select x1 + x3, x3 from test group by -2, -1; + +create table test2(x1 Int, x2 Int, x3 Int) engine=Memory; +insert into test2 values (1, 10, 100), (10, 1, 10), (100, 100, 1); +select x1, x1 * 2, max(x2), max(x3) from test2 group by 2, 1, x1 order by 1, 2, 4 desc, 3 asc; +select x1, x1 * 2, max(x2), max(x3) from test2 group by 2, 1, x1 order by 1, 2, -1 desc, -2 asc; + +select a, b, c, d, e, f from (select 44 a, 88 b, 13 c, 14 d, 15 e, 16 f) t group by 1,2,3,4,5,6 order by a; +select a, b, c, d, e, f from (select 44 a, 88 b, 13 c, 14 d, 15 e, 16 f) t group by 1,2,3,-3,-2,-1 order by a; + +explain syntax select plus(1, 1) as a group by a; +select substr('aaaaaaaaaaaaaa', 8) as a group by a order by a; +select substr('aaaaaaaaaaaaaa', 8) as a group by substr('aaaaaaaaaaaaaa', 8) order by a; + +select b from (select 5 as a, 'Hello' as b order by a); +select b from (select 5 as a, 'Hello' as b group by a); +select b from (select 5 as a, 'Hello' as b order by 1); + +drop table if exists tp2; +create table tp2(first_col String, second_col Int32) engine = MergeTree() order by tuple(); +insert into tp2 select 'bbb', 1; +insert into tp2 select 'aaa', 2; +select count(*) from (select first_col, count(second_col) from tp2 group by 1); +select total from (select first_col, count(second_col) as total from tp2 group by 1); +select first_col from (select first_col, second_col as total from tp2 order by 1 desc); +select first_col from (select first_col, second_col as total from tp2 order by 2 desc); +select max from (select max(first_col) as max, second_col as total from tp2 group by 2) order by 1; +with res as (select first_col from (select first_col, second_col as total from tp2 order by 2 desc) limit 1) +select * from res; + +drop table if exists test; +create table test +( +`id` UInt32, +`time` UInt32, +index `id` (`id`) type set(0) granularity 3, +index `time` (`time`) type minmax granularity 3 +) engine = MergeTree() +order by (`time`); + +select count(*) as `value`, 0 as `data` from test group by `data`; + +drop table test; diff --git a/parser/testdata/02006_test_positional_arguments_on_cluster/ast.json b/parser/testdata/02006_test_positional_arguments_on_cluster/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02006_test_positional_arguments_on_cluster/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02006_test_positional_arguments_on_cluster/metadata.json b/parser/testdata/02006_test_positional_arguments_on_cluster/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02006_test_positional_arguments_on_cluster/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02006_test_positional_arguments_on_cluster/query.sql b/parser/testdata/02006_test_positional_arguments_on_cluster/query.sql new file mode 100644 index 000000000..e13ebf491 --- /dev/null +++ b/parser/testdata/02006_test_positional_arguments_on_cluster/query.sql @@ -0,0 +1,56 @@ +-- Tags: no-ordinary-database, no-replicated-database, distributed, zookeeper + +DROP TABLE IF EXISTS t02006 on cluster test_shard_localhost format Null; +DROP TABLE IF EXISTS m02006 on cluster test_shard_localhost format Null; + +CREATE TABLE t02006 on cluster test_shard_localhost (d Date) +ENGINE = MergeTree ORDER BY d +format Null; + +CREATE MATERIALIZED VIEW m02006 on cluster test_shard_localhost +Engine = MergeTree ORDER BY tuple() AS SELECT d, 0 AS i FROM t02006 GROUP BY d, i +format Null; + +ALTER TABLE t02006 on cluster test_shard_localhost ADD COLUMN IF NOT EXISTS f UInt64 format Null; + +DESC t02006; + +DROP TABLE IF EXISTS t02006 on cluster test_shard_localhost format Null; +DROP TABLE IF EXISTS m02006 on cluster test_shard_localhost format Null; +DROP TABLE IF EXISTS tt02006 on cluster test_shard_localhost format Null; + +SET enable_analyzer=1; + +CREATE TABLE t02006 ON CLUSTER test_shard_localhost +( + `a` String, + `b` UInt32 +) +ENGINE = ReplicatedMergeTree +PRIMARY KEY a +ORDER BY a +format Null; + +CREATE TABLE tt02006 ON CLUSTER test_shard_localhost +( + `a` String, + `total` SimpleAggregateFunction(sum, UInt64) +) +ENGINE = ReplicatedAggregatingMergeTree +ORDER BY a +format Null; + +CREATE MATERIALIZED VIEW m02006 ON CLUSTER test_shard_localhost TO tt02006 +AS SELECT + a, + sum(b) AS total +FROM t02006 +GROUP BY 1 +ORDER BY 1 ASC +format Null; + +DESC m02006; + +DROP TABLE IF EXISTS t02006 on cluster test_shard_localhost format Null; +DROP TABLE IF EXISTS m02006 on cluster test_shard_localhost format Null; +DROP TABLE IF EXISTS tt02006 on cluster test_shard_localhost format Null; diff --git a/parser/testdata/02006_todatetime64_from_string/ast.json b/parser/testdata/02006_todatetime64_from_string/ast.json new file mode 100644 index 000000000..391b2a550 --- /dev/null +++ b/parser/testdata/02006_todatetime64_from_string/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDateTime64 (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal '2021-03-22'" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Literal 'Asia\/Tehran'" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001243502, + "rows_read": 9, + "bytes_read": 334 + } +} diff --git a/parser/testdata/02006_todatetime64_from_string/metadata.json b/parser/testdata/02006_todatetime64_from_string/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02006_todatetime64_from_string/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02006_todatetime64_from_string/query.sql b/parser/testdata/02006_todatetime64_from_string/query.sql new file mode 100644 index 000000000..305dd49f7 --- /dev/null +++ b/parser/testdata/02006_todatetime64_from_string/query.sql @@ -0,0 +1 @@ +SELECT toDateTime64('2021-03-22', 3, 'Asia/Tehran'); diff --git a/parser/testdata/02006_use_constants_in_with_and_select/ast.json b/parser/testdata/02006_use_constants_in_with_and_select/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02006_use_constants_in_with_and_select/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02006_use_constants_in_with_and_select/metadata.json b/parser/testdata/02006_use_constants_in_with_and_select/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02006_use_constants_in_with_and_select/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02006_use_constants_in_with_and_select/query.sql b/parser/testdata/02006_use_constants_in_with_and_select/query.sql new file mode 100644 index 000000000..daca6c5d0 --- /dev/null +++ b/parser/testdata/02006_use_constants_in_with_and_select/query.sql @@ -0,0 +1,36 @@ +SELECT + 1 AS max_size, + groupArray(max_size)(col) +FROM + (SELECT col FROM ( + SELECT 1 AS col + UNION ALL + SELECT 2 + ) ORDER BY col); + +WITH 1 AS max_size +SELECT groupArray(max_size)(col) +FROM + (SELECT col FROM ( + SELECT 1 as col + UNION ALL + SELECT 2 + ) ORDER BY col); + +WITH 0.1 AS level +SELECT quantile(level)(number) +FROM numbers(1000); + +SELECT 0.1 AS level, quantile(level)(number) +FROM numbers(1000); + +WITH + 0.1 AS level, + 1 AS max_size +SELECT groupArray(max_size)(col) +FROM + ( + SELECT quantile(level)(number) AS col + FROM numbers(1000) + ); + diff --git a/parser/testdata/02007_ipv4_and_ipv6_to_and_from_string/ast.json b/parser/testdata/02007_ipv4_and_ipv6_to_and_from_string/ast.json new file mode 100644 index 000000000..79ccaf497 --- /dev/null +++ b/parser/testdata/02007_ipv4_and_ipv6_to_and_from_string/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function CAST (alias v) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '127.0.0.1'" + }, + { + "explain": " Literal 'IPv4'" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier v" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001370612, + "rows_read": 11, + "bytes_read": 408 + } +} diff --git a/parser/testdata/02007_ipv4_and_ipv6_to_and_from_string/metadata.json b/parser/testdata/02007_ipv4_and_ipv6_to_and_from_string/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02007_ipv4_and_ipv6_to_and_from_string/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02007_ipv4_and_ipv6_to_and_from_string/query.sql b/parser/testdata/02007_ipv4_and_ipv6_to_and_from_string/query.sql new file mode 100644 index 000000000..f392d0125 --- /dev/null +++ b/parser/testdata/02007_ipv4_and_ipv6_to_and_from_string/query.sql @@ -0,0 +1,13 @@ +SELECT CAST('127.0.0.1' as IPv4) as v, toTypeName(v); +SELECT CAST(toIPv4('127.0.0.1') as String) as v, toTypeName(v); + +SELECT CAST('2001:0db8:0000:85a3:0000:0000:ac1f:8001' as IPv6) as v, toTypeName(v); +SELECT CAST(toIPv6('2001:0db8:0000:85a3:0000:0000:ac1f:8001') as String) as v, toTypeName(v); + +SELECT toIPv4OrDefault('hello') as v, toTypeName(v); +SELECT toIPv6OrDefault('hello') as v, toTypeName(v); + +SELECT CAST('hello' as IPv4) as v, toTypeName(v); -- { serverError CANNOT_PARSE_IPV4 } +SELECT CAST('hello' as IPv6) as v, toTypeName(v); -- { serverError CANNOT_PARSE_IPV6 } + +SELECT CAST('1.1.1.1' as IPv6) as v, toTypeName(v); diff --git a/parser/testdata/02007_join_use_nulls/ast.json b/parser/testdata/02007_join_use_nulls/ast.json new file mode 100644 index 000000000..60a064520 --- /dev/null +++ b/parser/testdata/02007_join_use_nulls/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001089256, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02007_join_use_nulls/metadata.json b/parser/testdata/02007_join_use_nulls/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02007_join_use_nulls/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02007_join_use_nulls/query.sql b/parser/testdata/02007_join_use_nulls/query.sql new file mode 100644 index 000000000..e08fffce3 --- /dev/null +++ b/parser/testdata/02007_join_use_nulls/query.sql @@ -0,0 +1,11 @@ +SET join_use_nulls = 1; + +SELECT *, d.* FROM ( SELECT 1 AS id, 2 AS value ) a SEMI LEFT JOIN ( SELECT 1 AS id, 3 AS values ) AS d USING id; + +SELECT id, toTypeName(id), value, toTypeName(value), d.values, toTypeName(d.values) FROM ( SELECT 1 AS id, 2 AS value ) a SEMI LEFT JOIN ( SELECT 1 AS id, 3 AS values ) AS d USING id; +SELECT id, toTypeName(id), value, toTypeName(value), d.values, toTypeName(d.values) FROM ( SELECT toLowCardinality(1) AS id, toLowCardinality(2) AS value ) a SEMI LEFT JOIN ( SELECT toLowCardinality(1) AS id, toLowCardinality(3) AS values ) AS d USING id; +SELECT id, toTypeName(id), value, toTypeName(value), d.id, toTypeName(d.id) FROM ( SELECT toLowCardinality(1) AS id, toLowCardinality(2) AS value ) a SEMI LEFT JOIN ( SELECT toLowCardinality(1) AS id, toLowCardinality(3) AS values ) AS d USING id; +SELECT id, toTypeName(id), value, toTypeName(value), d.values, toTypeName(d.values) FROM ( SELECT 1 AS id, 2 AS value ) a SEMI LEFT JOIN ( SELECT 1 AS id, 3 AS values ) AS d USING id; +SELECT id, toTypeName(id), value, toTypeName(value), d.id, toTypeName(d.id) , d.values, toTypeName(d.values) FROM ( SELECT 1 AS id, 2 AS value ) a SEMI LEFT JOIN ( SELECT 1 AS id, 3 AS values ) AS d USING id; +SELECT id, toTypeName(id), value, toTypeName(value), d.values, toTypeName(d.values) FROM ( SELECT toLowCardinality(1) AS id, toLowCardinality(2) AS value ) a SEMI LEFT JOIN ( SELECT toLowCardinality(1) AS id, toLowCardinality(3) AS values ) AS d USING id; +SELECT id, toTypeName(id), value, toTypeName(value), d.id, toTypeName(d.id) , d.values, toTypeName(d.values) FROM ( SELECT toLowCardinality(1) AS id, toLowCardinality(2) AS value ) a SEMI LEFT JOIN ( SELECT toLowCardinality(1) AS id, toLowCardinality(3) AS values ) AS d USING id; diff --git a/parser/testdata/02007_test_any_all_operators/ast.json b/parser/testdata/02007_test_any_all_operators/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02007_test_any_all_operators/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02007_test_any_all_operators/metadata.json b/parser/testdata/02007_test_any_all_operators/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02007_test_any_all_operators/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02007_test_any_all_operators/query.sql b/parser/testdata/02007_test_any_all_operators/query.sql new file mode 100644 index 000000000..dd539bcbc --- /dev/null +++ b/parser/testdata/02007_test_any_all_operators/query.sql @@ -0,0 +1,28 @@ +-- { echo } +select 1 == any (select number from numbers(10)); +select 1 == any (select number from numbers(2, 10)); + +select 1 != all (select 1 from numbers(10)); +select 1 != all (select number from numbers(10)); + +select 1 == all (select 1 from numbers(10)); +select 1 == all (select number from numbers(10)); + +select 1 != any (select 1 from numbers(10)); +select 1 != any (select number from numbers(10)); + +select number as a from numbers(10) where a == any (select number from numbers(3, 3)); +select number as a from numbers(10) where a != any (select 5 from numbers(3, 3)); + +select 1 < any (select 1 from numbers(10)); +select 1 <= any (select 1 from numbers(10)); +select 1 < any (select number from numbers(10)); +select 1 > any (select number from numbers(10)); +select 1 >= any (select number from numbers(10)); +select 11 > all (select number from numbers(10)); +select 11 <= all (select number from numbers(11)); +select 11 < all (select 11 from numbers(10)); +select 11 > all (select 11 from numbers(10)); +select 11 >= all (select 11 from numbers(10)); +select sum(number) = any(number) from numbers(1) group by number; +select 1 == any (1); diff --git a/parser/testdata/02008_aliased_column_distributed_bug/ast.json b/parser/testdata/02008_aliased_column_distributed_bug/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02008_aliased_column_distributed_bug/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02008_aliased_column_distributed_bug/metadata.json b/parser/testdata/02008_aliased_column_distributed_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02008_aliased_column_distributed_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02008_aliased_column_distributed_bug/query.sql b/parser/testdata/02008_aliased_column_distributed_bug/query.sql new file mode 100644 index 000000000..9796beefd --- /dev/null +++ b/parser/testdata/02008_aliased_column_distributed_bug/query.sql @@ -0,0 +1,18 @@ +-- Tags: distributed + +DROP TABLE IF EXISTS click_storage; +DROP TABLE IF EXISTS click_storage_dst; + +CREATE TABLE click_storage ( `PhraseID` UInt64, `PhraseProcessedID` UInt64 ALIAS if(PhraseID > 5, PhraseID, 0) ) ENGINE = MergeTree() ORDER BY tuple(); +INSERT INTO click_storage SELECT number AS PhraseID from numbers(10); + +CREATE TABLE click_storage_dst ( `PhraseID` UInt64, `PhraseProcessedID` UInt64 ) ENGINE = Distributed(test_shard_localhost, currentDatabase(), 'click_storage'); + +SET prefer_localhost_replica = 1; +SELECT materialize(PhraseProcessedID) FROM click_storage_dst; + +SET prefer_localhost_replica = 0; +SELECT materialize(PhraseProcessedID) FROM click_storage_dst; + +DROP TABLE IF EXISTS click_storage; +DROP TABLE IF EXISTS click_storage_dst; diff --git a/parser/testdata/02008_complex_key_range_hashed_dictionary/ast.json b/parser/testdata/02008_complex_key_range_hashed_dictionary/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02008_complex_key_range_hashed_dictionary/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02008_complex_key_range_hashed_dictionary/metadata.json b/parser/testdata/02008_complex_key_range_hashed_dictionary/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02008_complex_key_range_hashed_dictionary/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02008_complex_key_range_hashed_dictionary/query.sql b/parser/testdata/02008_complex_key_range_hashed_dictionary/query.sql new file mode 100644 index 000000000..ea2dad5c7 --- /dev/null +++ b/parser/testdata/02008_complex_key_range_hashed_dictionary/query.sql @@ -0,0 +1,111 @@ +-- Tags: no-parallel + +DROP TABLE IF EXISTS date_table; +CREATE TABLE date_table +( + CountryID UInt64, + CountryKey String, + StartDate Date, + EndDate Date, + Tax Float64 +) +ENGINE = MergeTree() +ORDER BY CountryID; + +INSERT INTO date_table VALUES(1, '1', toDate('2019-05-05'), toDate('2019-05-20'), 0.33); +INSERT INTO date_table VALUES(1, '1', toDate('2019-05-21'), toDate('2019-05-30'), 0.42); +INSERT INTO date_table VALUES(2, '2', toDate('2019-05-21'), toDate('2019-05-30'), 0.46); + +DROP DICTIONARY IF EXISTS range_dictionary; +CREATE DICTIONARY range_dictionary +( + CountryID UInt64, + CountryKey String, + StartDate Date, + EndDate Date, + Tax Float64 DEFAULT 0.2 +) +PRIMARY KEY CountryID, CountryKey +SOURCE(CLICKHOUSE(TABLE 'date_table')) +LIFETIME(MIN 1 MAX 1000) +LAYOUT(COMPLEX_KEY_RANGE_HASHED()) +RANGE(MIN StartDate MAX EndDate); + +SELECT 'Dictionary not nullable'; +SELECT 'dictGet'; +SELECT dictGet('range_dictionary', 'Tax', (toUInt64(1), '1'), toDate('2019-05-15')); +SELECT dictGet('range_dictionary', 'Tax', (toUInt64(1), '1'), toDate('2019-05-29')); +SELECT dictGet('range_dictionary', 'Tax', (toUInt64(2), '2'), toDate('2019-05-29')); +SELECT dictGet('range_dictionary', 'Tax', (toUInt64(2), '2'), toDate('2019-05-31')); +SELECT dictGetOrDefault('range_dictionary', 'Tax', (toUInt64(2), '2'), toDate('2019-05-31'), 0.4); +SELECT 'dictHas'; +SELECT dictHas('range_dictionary', (toUInt64(1), '1'), toDate('2019-05-15')); +SELECT dictHas('range_dictionary', (toUInt64(1), '1'), toDate('2019-05-29')); +SELECT dictHas('range_dictionary', (toUInt64(2), '2'), toDate('2019-05-29')); +SELECT dictHas('range_dictionary', (toUInt64(2), '2'), toDate('2019-05-31')); +SELECT 'select columns from dictionary'; +SELECT 'allColumns'; +SELECT * FROM range_dictionary ORDER BY CountryID, StartDate, EndDate; +SELECT 'noColumns'; +SELECT 1 FROM range_dictionary ORDER BY CountryID, StartDate, EndDate; +SELECT 'onlySpecificColumns'; +SELECT CountryID, StartDate, Tax FROM range_dictionary ORDER BY CountryID, StartDate, EndDate; +SELECT 'onlySpecificColumn'; +SELECT Tax FROM range_dictionary ORDER BY CountryID, StartDate, EndDate; + +DROP DICTIONARY range_dictionary; +DROP TABLE date_table; + +CREATE TABLE date_table +( + CountryID UInt64, + CountryKey String, + StartDate Date, + EndDate Date, + Tax Nullable(Float64) +) +ENGINE = MergeTree() +ORDER BY CountryID; + +INSERT INTO date_table VALUES(1, '1', toDate('2019-05-05'), toDate('2019-05-20'), 0.33); +INSERT INTO date_table VALUES(1, '1', toDate('2019-05-21'), toDate('2019-05-30'), 0.42); +INSERT INTO date_table VALUES(2, '2', toDate('2019-05-21'), toDate('2019-05-30'), NULL); + +CREATE DICTIONARY range_dictionary_nullable +( + CountryID UInt64, + CountryKey String, + StartDate Date, + EndDate Date, + Tax Nullable(Float64) DEFAULT 0.2 +) +PRIMARY KEY CountryID, CountryKey +SOURCE(CLICKHOUSE(TABLE 'date_table')) +LIFETIME(MIN 1 MAX 1000) +LAYOUT(COMPLEX_KEY_RANGE_HASHED()) +RANGE(MIN StartDate MAX EndDate); + +SELECT 'Dictionary nullable'; +SELECT 'dictGet'; +SELECT dictGet('range_dictionary_nullable', 'Tax', (toUInt64(1), '1'), toDate('2019-05-15')); +SELECT dictGet('range_dictionary_nullable', 'Tax', (toUInt64(1), '1'), toDate('2019-05-29')); +SELECT dictGet('range_dictionary_nullable', 'Tax', (toUInt64(2), '2'), toDate('2019-05-29')); +SELECT dictGet('range_dictionary_nullable', 'Tax', (toUInt64(2), '2'), toDate('2019-05-31')); +SELECT dictGetOrDefault('range_dictionary_nullable', 'Tax', (toUInt64(2), '2'), toDate('2019-05-31'), 0.4); +SELECT 'dictHas'; +SELECT dictHas('range_dictionary_nullable', (toUInt64(1), '1'), toDate('2019-05-15')); +SELECT dictHas('range_dictionary_nullable', (toUInt64(1), '1'), toDate('2019-05-29')); +SELECT dictHas('range_dictionary_nullable', (toUInt64(2), '2'), toDate('2019-05-29')); +SELECT dictHas('range_dictionary_nullable', (toUInt64(2), '2'), toDate('2019-05-31')); +SELECT 'select columns from dictionary'; +SELECT 'allColumns'; +SELECT * FROM range_dictionary_nullable ORDER BY CountryID, StartDate, EndDate; +SELECT 'noColumns'; +SELECT 1 FROM range_dictionary_nullable ORDER BY CountryID, StartDate, EndDate; +SELECT 'onlySpecificColumns'; +SELECT CountryID, StartDate, Tax FROM range_dictionary_nullable ORDER BY CountryID, StartDate, EndDate; +SELECT 'onlySpecificColumn'; +SELECT Tax FROM range_dictionary_nullable ORDER BY CountryID, StartDate, EndDate; + +DROP DICTIONARY range_dictionary_nullable; +DROP TABLE date_table; diff --git a/parser/testdata/02008_materialize_column/ast.json b/parser/testdata/02008_materialize_column/ast.json new file mode 100644 index 000000000..b76ef71b0 --- /dev/null +++ b/parser/testdata/02008_materialize_column/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tmp (children 1)" + }, + { + "explain": " Identifier tmp" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00125641, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/02008_materialize_column/metadata.json b/parser/testdata/02008_materialize_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02008_materialize_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02008_materialize_column/query.sql b/parser/testdata/02008_materialize_column/query.sql new file mode 100644 index 000000000..a2dbc75e4 --- /dev/null +++ b/parser/testdata/02008_materialize_column/query.sql @@ -0,0 +1,41 @@ +DROP TABLE IF EXISTS tmp; + +SET mutations_sync = 2; + +CREATE TABLE tmp (x Int64) ENGINE = MergeTree() ORDER BY tuple() PARTITION BY tuple(); +INSERT INTO tmp SELECT * FROM system.numbers LIMIT 20; + +ALTER TABLE tmp MATERIALIZE COLUMN x; -- { serverError BAD_ARGUMENTS } + +ALTER TABLE tmp ADD COLUMN s String DEFAULT toString(x); +SELECT arraySort(groupArray(x)), arraySort(groupArray(s)) FROM tmp; + +ALTER TABLE tmp MODIFY COLUMN s String DEFAULT toString(x+1); +SELECT arraySort(groupArray(x)), arraySort(groupArray(s)) FROM tmp; + +ALTER TABLE tmp MATERIALIZE COLUMN s; +ALTER TABLE tmp MODIFY COLUMN s String DEFAULT toString(x+2); +SELECT arraySort(groupArray(x)), arraySort(groupArray(s)) FROM tmp; + +ALTER TABLE tmp CLEAR COLUMN s; -- Need to clear because MATERIALIZE COLUMN won't override past values; +ALTER TABLE tmp MATERIALIZE COLUMN s; +ALTER TABLE tmp MODIFY COLUMN s String DEFAULT toString(x+3); +SELECT arraySort(groupArray(x)), arraySort(groupArray(s)) FROM tmp; +ALTER TABLE tmp DROP COLUMN s; + +ALTER TABLE tmp ADD COLUMN s String MATERIALIZED toString(x); +SELECT arraySort(groupArray(x)), arraySort(groupArray(s)) FROM tmp; + +ALTER TABLE tmp MODIFY COLUMN s String MATERIALIZED toString(x+1); +SELECT arraySort(groupArray(x)), arraySort(groupArray(s)) FROM tmp; + +ALTER TABLE tmp MATERIALIZE COLUMN s; +ALTER TABLE tmp MODIFY COLUMN s String MATERIALIZED toString(x+2); +SELECT arraySort(groupArray(x)), arraySort(groupArray(s)) FROM tmp; + +ALTER TABLE tmp MATERIALIZE COLUMN s; +ALTER TABLE tmp MODIFY COLUMN s String MATERIALIZED toString(x+3); +SELECT arraySort(groupArray(x)), arraySort(groupArray(s)) FROM tmp; +ALTER TABLE tmp DROP COLUMN s; + +DROP TABLE tmp; diff --git a/parser/testdata/02008_test_union_distinct_in_subquery/ast.json b/parser/testdata/02008_test_union_distinct_in_subquery/ast.json new file mode 100644 index 000000000..7aa7aac23 --- /dev/null +++ b/parser/testdata/02008_test_union_distinct_in_subquery/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001173121, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/02008_test_union_distinct_in_subquery/metadata.json b/parser/testdata/02008_test_union_distinct_in_subquery/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02008_test_union_distinct_in_subquery/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02008_test_union_distinct_in_subquery/query.sql b/parser/testdata/02008_test_union_distinct_in_subquery/query.sql new file mode 100644 index 000000000..c5d270a4c --- /dev/null +++ b/parser/testdata/02008_test_union_distinct_in_subquery/query.sql @@ -0,0 +1,23 @@ +drop table if exists test; +create table test (name String, uuid UUID) engine=Memory(); +insert into test select '1', '00000000-0000-0000-0000-000000000000'; +insert into test select '2', '00000000-0000-0000-0000-000000000000'; +insert into test select '3', '00000000-0000-0000-0000-000000000000'; +insert into test select '4', '00000000-0000-0000-0000-000000000000'; +insert into test select '5', '00000000-0000-0000-0000-000000000000'; + +-- { echo } +select count() from (select * from test union distinct select * from test); +select count() from (select * from test union distinct select * from test union all select * from test); +select count() from (select * from test union distinct select * from test except select * from test where name = '3'); +select count() from (select * from test intersect (select * from test where toUInt8(name) < 4) union distinct (select * from test where name = '5' or name = '1') except select * from test where name = '3'); + +with (select count() from (select * from test union distinct select * from test except select * from test where toUInt8(name) > 3)) as max +select count() from (select * from test union all select * from test where toUInt8(name) < max); +with (select count() from (select * from test union distinct select * from test except select * from test where toUInt8(name) > 3)) as max +select count() from (select * from test except select * from test where toUInt8(name) < max); + +select uuid from test union distinct select uuid from test; +select uuid from test union distinct select uuid from test union all select uuid from test where name = '1'; +select uuid from (select * from test union distinct select * from test); + diff --git a/parser/testdata/02008_tuple_to_name_value_pairs/ast.json b/parser/testdata/02008_tuple_to_name_value_pairs/ast.json new file mode 100644 index 000000000..fb91f511e --- /dev/null +++ b/parser/testdata/02008_tuple_to_name_value_pairs/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function tupleToNameValuePairs (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_3" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001184609, + "rows_read": 11, + "bytes_read": 423 + } +} diff --git a/parser/testdata/02008_tuple_to_name_value_pairs/metadata.json b/parser/testdata/02008_tuple_to_name_value_pairs/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02008_tuple_to_name_value_pairs/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02008_tuple_to_name_value_pairs/query.sql b/parser/testdata/02008_tuple_to_name_value_pairs/query.sql new file mode 100644 index 000000000..9f3443cf6 --- /dev/null +++ b/parser/testdata/02008_tuple_to_name_value_pairs/query.sql @@ -0,0 +1,25 @@ +SELECT tupleToNameValuePairs(tuple(1, 2, 3)); + +DROP TABLE IF EXISTS test02008; +CREATE TABLE test02008 ( + col Tuple( + a Tuple(key1 int, key2 int), + b Tuple(key1 int, key2 int) + ) +) ENGINE=Memory(); +INSERT INTO test02008 VALUES (tuple(tuple(1, 2), tuple(3, 4))); +INSERT INTO test02008 VALUES (tuple(tuple(5, 6), tuple(7, 8))); +SELECT tupleToNameValuePairs(col) FROM test02008 ORDER BY col; + +DROP TABLE IF EXISTS test02008; +CREATE TABLE test02008 ( + col Tuple(CPU double, Memory double, Disk double) +) ENGINE=Memory(); +INSERT INTO test02008 VALUES (tuple(3.3, 5.5, 6.6)); +SELECT untuple(arrayJoin(tupleToNameValuePairs(col))) from test02008; + +DROP TABLE IF EXISTS test02008; +SELECT tupleToNameValuePairs(tuple(1, 1.3)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT tupleToNameValuePairs(tuple(1, [1,2])); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT tupleToNameValuePairs(tuple(1, 'a')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT tupleToNameValuePairs(33); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } diff --git a/parser/testdata/02009_array_join_partition/ast.json b/parser/testdata/02009_array_join_partition/ast.json new file mode 100644 index 000000000..f0c79867e --- /dev/null +++ b/parser/testdata/02009_array_join_partition/ast.json @@ -0,0 +1,70 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery table_2009_part (children 3)" + }, + { + "explain": " Identifier table_2009_part" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " ColumnDeclaration i (children 1)" + }, + { + "explain": " DataType Int64" + }, + { + "explain": " ColumnDeclaration d (children 1)" + }, + { + "explain": " DataType Date" + }, + { + "explain": " ColumnDeclaration s (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " Storage definition (children 3)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Function toYYYYMM (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier d" + }, + { + "explain": " Identifier i" + } + ], + + "rows": 16, + + "statistics": + { + "elapsed": 0.001150839, + "rows_read": 16, + "bytes_read": 559 + } +} diff --git a/parser/testdata/02009_array_join_partition/metadata.json b/parser/testdata/02009_array_join_partition/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02009_array_join_partition/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02009_array_join_partition/query.sql b/parser/testdata/02009_array_join_partition/query.sql new file mode 100644 index 000000000..3b9468947 --- /dev/null +++ b/parser/testdata/02009_array_join_partition/query.sql @@ -0,0 +1,4 @@ +CREATE TABLE table_2009_part (`i` Int64, `d` Date, `s` String) ENGINE = MergeTree PARTITION BY toYYYYMM(d) ORDER BY i; + +ALTER TABLE table_2009_part ATTACH PARTITION tuple(arrayJoin([0, 1])); -- {serverError BAD_ARGUMENTS} +ALTER TABLE table_2009_part ATTACH PARTITION tuple(toYYYYMM(toDate([arrayJoin([arrayJoin([arrayJoin([arrayJoin([3, materialize(NULL), arrayJoin([1025, materialize(NULL), materialize(NULL)]), NULL])])]), materialize(NULL)])], NULL))); -- {serverError BAD_ARGUMENTS} diff --git a/parser/testdata/02009_decimal_no_trailing_zeros/ast.json b/parser/testdata/02009_decimal_no_trailing_zeros/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02009_decimal_no_trailing_zeros/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02009_decimal_no_trailing_zeros/metadata.json b/parser/testdata/02009_decimal_no_trailing_zeros/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02009_decimal_no_trailing_zeros/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02009_decimal_no_trailing_zeros/query.sql b/parser/testdata/02009_decimal_no_trailing_zeros/query.sql new file mode 100644 index 000000000..e88e878b3 --- /dev/null +++ b/parser/testdata/02009_decimal_no_trailing_zeros/query.sql @@ -0,0 +1,37 @@ +-- { echo } + +SELECT 1.123::Decimal64(1); +SELECT 1.123::Decimal64(2); +SELECT 1.123::Decimal64(3); +SELECT 1.123::Decimal64(4); +SELECT 1.123::Decimal64(5); +SELECT 1.123::Decimal64(10); +SELECT 1::Decimal64(0); +SELECT 1::Decimal64(1); +SELECT 1::Decimal64(10); + +SELECT 1.1234567::Decimal32(8); +SELECT 1.1234567890::Decimal64(10); +SELECT 1.1234567890::Decimal128(10); +SELECT 1.1234567890::Decimal256(10); +SELECT 1.123456789012345678901::Decimal256(20); +SELECT 1.123456789012345678901::Decimal256(22); + +SET output_format_decimal_trailing_zeros = 1; + +SELECT 1.123::Decimal64(1); +SELECT 1.123::Decimal64(2); +SELECT 1.123::Decimal64(3); +SELECT 1.123::Decimal64(4); +SELECT 1.123::Decimal64(5); +SELECT 1.123::Decimal64(10); +SELECT 1::Decimal64(0); +SELECT 1::Decimal64(1); +SELECT 1::Decimal64(10); + +SELECT 1.1234567::Decimal32(8); +SELECT 1.1234567890::Decimal64(10); +SELECT 1.1234567890::Decimal128(10); +SELECT 1.1234567890::Decimal256(10); +SELECT 1.123456789012345678901::Decimal256(20); +SELECT 1.123456789012345678901::Decimal256(22); diff --git a/parser/testdata/02010_array_index_bad_cast/ast.json b/parser/testdata/02010_array_index_bad_cast/ast.json new file mode 100644 index 000000000..ffadeeae2 --- /dev/null +++ b/parser/testdata/02010_array_index_bad_cast/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001208444, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02010_array_index_bad_cast/metadata.json b/parser/testdata/02010_array_index_bad_cast/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02010_array_index_bad_cast/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02010_array_index_bad_cast/query.sql b/parser/testdata/02010_array_index_bad_cast/query.sql new file mode 100644 index 000000000..590e60eb4 --- /dev/null +++ b/parser/testdata/02010_array_index_bad_cast/query.sql @@ -0,0 +1,4 @@ +SET allow_suspicious_low_cardinality_types=1; +SELECT has(materialize(CAST(['2021-07-14'] AS Array(LowCardinality(Nullable(DateTime))))), materialize('2021-07-14'::DateTime64(7))); +SELECT has(materialize(CAST(['2021-07-14'] AS Array(LowCardinality(Nullable(DateTime))))), materialize('2021-07-14 00:00:01'::DateTime64(7))); +SELECT has(materialize(CAST(['2021-07-14'] AS Array(LowCardinality(Nullable(DateTime))))), materialize(NULL)); diff --git a/parser/testdata/02011_dictionary_empty_attribute_list/ast.json b/parser/testdata/02011_dictionary_empty_attribute_list/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02011_dictionary_empty_attribute_list/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02011_dictionary_empty_attribute_list/metadata.json b/parser/testdata/02011_dictionary_empty_attribute_list/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02011_dictionary_empty_attribute_list/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02011_dictionary_empty_attribute_list/query.sql b/parser/testdata/02011_dictionary_empty_attribute_list/query.sql new file mode 100644 index 000000000..684cc365b --- /dev/null +++ b/parser/testdata/02011_dictionary_empty_attribute_list/query.sql @@ -0,0 +1,14 @@ +-- Tags: no-parallel + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table (id UInt64) ENGINE=TinyLog; +INSERT INTO test_table VALUES (0); + +DROP DICTIONARY IF EXISTS test_dictionary; +CREATE DICTIONARY test_dictionary (id UInt64) PRIMARY KEY id LAYOUT(DIRECT()) SOURCE(CLICKHOUSE(TABLE 'test_table')); +SELECT * FROM test_dictionary; +SELECT dictHas('test_dictionary', toUInt64(0)); +SELECT dictHas('test_dictionary', toUInt64(1)); + +DROP DICTIONARY test_dictionary; +DROP TABLE test_table; diff --git a/parser/testdata/02011_normalize_utf8/ast.json b/parser/testdata/02011_normalize_utf8/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02011_normalize_utf8/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02011_normalize_utf8/metadata.json b/parser/testdata/02011_normalize_utf8/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02011_normalize_utf8/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02011_normalize_utf8/query.sql b/parser/testdata/02011_normalize_utf8/query.sql new file mode 100644 index 000000000..acb76b38d --- /dev/null +++ b/parser/testdata/02011_normalize_utf8/query.sql @@ -0,0 +1,44 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS normalize_test; +CREATE TABLE normalize_test (id int, value String) ENGINE = MergeTree ORDER BY value; + + +SELECT + 'ё' AS norm, 'ё' AS denorm, + length(norm), length(denorm), + normalizeUTF8NFC(norm) AS norm_nfc, + normalizeUTF8NFC(denorm) AS denorm_nfc, + length(norm_nfc), + length(denorm_nfc); + + +INSERT INTO normalize_test (id, value) VALUES (1, 'ё'); +INSERT INTO normalize_test (id, value) VALUES (2, 'ё'); +INSERT INTO normalize_test (id, value) VALUES (3, 'జ్ఞ‌ా'); +INSERT INTO normalize_test (id, value) VALUES (4, '本気ですか'); +INSERT INTO normalize_test (id, value) VALUES (5, 'ﷺ'); +INSERT INTO normalize_test (id, value) VALUES (6, 'ᾂ'); +INSERT INTO normalize_test (id, value) VALUES (7, 'ΐ'); +INSERT INTO normalize_test (id, value) VALUES (8, 'שּׁ'); +INSERT INTO normalize_test (id, value) VALUES (9, '𝅘𝅥𝅮'); + + +INSERT INTO normalize_test (id, value) VALUES (10, 'Q̹̣̩̭̰̰̹̄ͬ̿͋̃ṷ̬̰ͥe̘͚͈̰̺̍͐s͎̜̖t͔̣̯̲̜̠ͣ̑ͨ̉̈̈o̲͙̺͊ͯͣ̐̋̂̔ ̳͉͍̒̂è̗ͥͯͨ̍ͮ͛ ̦̹̣̰̐̅̑͑̅̂t͙̭̻̖͛̾e̺͙ͣ͒̚ṣ̠͉͓͔̲̦̎t̖͖̝͓̣ͭ͑̈́̂ỏ̥͕͈͛̓ ̀ͦ̽ͅZͯ̑̎a͆l̻ͨ̋ͧͣͨͬg͉̙̟̾̅̾ͬo̠ͮ͒'); + + + +SELECT + id, value, length(value), + normalizeUTF8NFC(value) AS nfc, length(nfc) AS nfc_len, + normalizeUTF8NFD(value) AS nfd, length(nfd) AS nfd_len, + normalizeUTF8NFKC(value) AS nfkc, length(nfkc) AS nfkc_len, + normalizeUTF8NFKD(value) AS nfkd, length(nfkd) AS nfkd_len +FROM normalize_test +ORDER BY id; + + +SELECT char(228) AS value, normalizeUTF8NFC(value); -- { serverError CANNOT_NORMALIZE_STRING } +SELECT char(228) AS value, normalizeUTF8NFD(value); -- { serverError CANNOT_NORMALIZE_STRING } +SELECT char(228) AS value, normalizeUTF8NFKC(value); -- { serverError CANNOT_NORMALIZE_STRING } +SELECT char(228) AS value, normalizeUTF8NFKD(value); -- { serverError CANNOT_NORMALIZE_STRING } diff --git a/parser/testdata/02011_tuple_vector_functions/ast.json b/parser/testdata/02011_tuple_vector_functions/ast.json new file mode 100644 index 000000000..c097de0a4 --- /dev/null +++ b/parser/testdata/02011_tuple_vector_functions/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function tupleHammingDistance (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001432185, + "rows_read": 12, + "bytes_read": 473 + } +} diff --git a/parser/testdata/02011_tuple_vector_functions/metadata.json b/parser/testdata/02011_tuple_vector_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02011_tuple_vector_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02011_tuple_vector_functions/query.sql b/parser/testdata/02011_tuple_vector_functions/query.sql new file mode 100644 index 000000000..9575001bd --- /dev/null +++ b/parser/testdata/02011_tuple_vector_functions/query.sql @@ -0,0 +1,112 @@ +SELECT tupleHammingDistance(tuple(1), tuple(1)); +SELECT tupleHammingDistance((1, 3), (1, 2)); + +SELECT (1, 2) + tupleMultiply((3, 4), materialize((5, 1))) - (6, 3); +SELECT vectorDifference(tuplePlus((1, 2), (3, 4)), (5, 6)); +SELECT tupleMinus(materialize(vectorSum(tupleMultiply(materialize((1, 2)), (3, 4)), (5, 6))), (31, 41)); +SELECT tupleDivide((5, 8, 11), (-2, 2, 4)); +SELECT tuple(1) + tuple(2); + +SELECT tupleNegate((1, 0, 3.5)); +SELECT -materialize((1, 2, 3)); +SELECT -tuple(1); + +SELECT tupleMultiplyByNumber((1, 2, 3), 0.5); +SELECT tupleDivideByNumber((1, 2.5, 3), materialize(0.5)); +SELECT tupleMultiplyByNumber(tuple(1), 1); +SELECT tupleDivideByNumber(tuple(1), materialize(1)); + +SELECT materialize((1, 2.0, 3.1)) * 3; +SELECT 5.5 * (2, 4); +SELECT (1, 2) / 2; +SELECT 2 / (1, 1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT tuple(1, 2, 3) * tuple(2, 3, 4); +SELECT dotProduct(materialize((-1, 2, 3.002)), materialize((2, 3.4, 4))); +SELECT scalarProduct(tuple(1), tuple(0)); + +SELECT L1Norm((-1, 2, -3)); +SELECT L1Norm((-1, 2.5, -3.6)); +SELECT L2Norm((1, 1.0)); +SELECT L2SquaredNorm((1, 1.0)); +SELECT L2Norm(materialize((-12, 5))); +SELECT L2SquaredNorm(materialize((-12, 5))); + +SELECT max2(materialize(1), 1.5); +SELECT min2(-1, -3); +SELECT LinfNorm((1, -2.3, 1.7)); + +SELECT LpNorm(tuple(-1), 3.3); +SELECT LpNorm(tuple(-1), 3); +SELECT LpNorm(tuple(-1.1), 3); +SELECT LpNorm((95800, 217519, 414560), 4); +SELECT LpNorm((13, -84.4, 91, 63.1), 2) = L2Norm(tuple(13, -84.4, 91, 63.1)); +SELECT LpNorm(materialize((13, -84.4, 91, 63.1)), 1) = L1Norm(tuple(13, -84.4, 91, 63.1)); +SELECT LpNorm((-1, -2), 11.); + +SELECT L1Distance((1, 2, 3), (2, 3, 1)); +SELECT L2Distance(materialize((1, 1)), (3, -1)); +SELECT L2SquaredDistance(materialize((1, 1)), (3, -1)); +SELECT LinfDistance((1, 1), (1, 2)); +SELECT L2Distance((5, 5), (5, 5)); +SELECT L2SquaredDistance((5, 5), (5, 5)); +SELECT LpDistance((1800, 1900), (18, 59), 12) - LpDistance(tuple(-22), tuple(1900), 12.); + +SELECT L1Normalize(materialize((1, -4))); +SELECT L2Normalize((3, 4)); +SELECT LinfNormalize((5, -5, 5.0)); +SELECT LpNormalize((1, pow(31, 1 / 5)), 5.); + +SELECT cosineDistance(materialize((1, 1)), (2, 2)); +SELECT cosineDistance((1, 1), materialize((-3, 3.0))); +SELECT cosineDistance((1, 1), (-1, -1)); +SELECT cosineDistance((1, 0), (0.5, sqrt(3) / 2)); + +SELECT (NULL, 1) + (1, NULL); +SELECT (NULL, 1) * materialize((1, NULL)); +SELECT L2Norm((NULL, 3, 4)); +SELECT L2SquaredNorm((NULL, 3, 4)); +SELECT 2 * (1, 2, NULL); +SELECT (1, 1.0, NULL) / NULL; +SELECT (1, 1.0, NULL) / materialize(NULL); +SELECT -(NULL, NULL, 1); +SELECT (NULL, NULL) * NULL; +SELECT L1Normalize((NULL, 1)); +SELECT cosineDistance((NULL, 1), (NULL, NULL)); +SELECT max2(NULL, 1) - min2(NULL, 1); + +SELECT L1Norm(1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT (1, 1) / toString(1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT -(1, toString(1)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT LpNorm((1, 2), toDecimal32(2, 4)); -- { serverError ILLEGAL_COLUMN } +SELECT (1, 2) * toDecimal32(3.1, 8); + +SELECT cosineDistance((1, 2), (2, 3, 4)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +-- TODO: what's the expected value of () + ()? Currently it returns 0. +-- SELECT tuple() + tuple(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT LpNorm((1, 2, 3)); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT max2(1, 2, -1); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +SELECT LpNorm((1, 2, 3), materialize(4.)); -- { serverError ILLEGAL_COLUMN } + +SELECT tuple(*, 1) + tuple(2, *) FROM numbers(3); +SELECT LpDistance(tuple(*, 1), tuple(2, *), * + 1.) FROM numbers(3, 2); -- { serverError ILLEGAL_COLUMN } +SELECT cosineDistance(tuple(*, * + 1), tuple(1, 2)) FROM numbers(1, 3); +SELECT -tuple(NULL, * * 2, *) FROM numbers(2); + +SELECT normL1((1, 1)), normL2((1, 1)), normLinf((1, 1)), normLp((1, 1), 1.); +SELECT distanceL1((1, 1), (1, 1)), distanceL2((1, 1), (1, 1)), distanceLinf((1, 1), (1, 1)), distanceLp((1, 1), (1, 1), 1.); +SELECT normalizeL1((1, 1)), normalizeL2((1, 1)), normalizeLinf((1, 1)), normalizeLp((1, 1), 1.); + +SELECT LpNorm((1, 2, 3), 2.2); +SELECT LpNorm((1.5, 2.5, 4), pi()); +SELECT LpNorm((3, 1, 4), 0); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT LpNorm((1, 2, 3), 0.5); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT LpNorm((1, 2, 3), inf); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT LpNorm((1, 2, 3), -1.); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT LpNorm((1, 2, 3), -1); -- { serverError ILLEGAL_COLUMN } +SELECT LpNorm((1, 2, 3), 0.); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT cosineDistance(materialize((NULL, -2147483648)), (1048577, 1048575)); + +-- not extra parentheses +EXPLAIN SYNTAX SELECT -((3, 7, 3), 100); diff --git a/parser/testdata/02012_changed_enum_type_non_replicated/ast.json b/parser/testdata/02012_changed_enum_type_non_replicated/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02012_changed_enum_type_non_replicated/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02012_changed_enum_type_non_replicated/metadata.json b/parser/testdata/02012_changed_enum_type_non_replicated/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02012_changed_enum_type_non_replicated/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02012_changed_enum_type_non_replicated/query.sql b/parser/testdata/02012_changed_enum_type_non_replicated/query.sql new file mode 100644 index 000000000..50d5bfe0d --- /dev/null +++ b/parser/testdata/02012_changed_enum_type_non_replicated/query.sql @@ -0,0 +1,10 @@ +-- Tags: replica + +create table enum_alter_issue (a Enum8('one' = 1, 'two' = 2)) engine = MergeTree() ORDER BY a; +insert into enum_alter_issue values ('one'), ('two'); +alter table enum_alter_issue modify column a Enum8('one' = 1, 'two' = 2, 'three' = 3); +insert into enum_alter_issue values ('one'), ('two'); +alter table enum_alter_issue detach partition id 'all'; +alter table enum_alter_issue attach partition id 'all'; +select * from enum_alter_issue order by a; +drop table enum_alter_issue; diff --git a/parser/testdata/02012_get_server_port/ast.json b/parser/testdata/02012_get_server_port/ast.json new file mode 100644 index 000000000..4d3dc0ca9 --- /dev/null +++ b/parser/testdata/02012_get_server_port/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function getServerPort (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'tcp_port'" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001225645, + "rows_read": 7, + "bytes_read": 268 + } +} diff --git a/parser/testdata/02012_get_server_port/metadata.json b/parser/testdata/02012_get_server_port/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02012_get_server_port/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02012_get_server_port/query.sql b/parser/testdata/02012_get_server_port/query.sql new file mode 100644 index 000000000..2cf2014cf --- /dev/null +++ b/parser/testdata/02012_get_server_port/query.sql @@ -0,0 +1,3 @@ +select getServerPort('tcp_port'); + +select getServerPort('unknown'); -- { serverError CLUSTER_DOESNT_EXIST } diff --git a/parser/testdata/02012_low_cardinality_uuid_with_extremes/ast.json b/parser/testdata/02012_low_cardinality_uuid_with_extremes/ast.json new file mode 100644 index 000000000..a3db98f0d --- /dev/null +++ b/parser/testdata/02012_low_cardinality_uuid_with_extremes/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tbl (children 1)" + }, + { + "explain": " Identifier tbl" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001383932, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/02012_low_cardinality_uuid_with_extremes/metadata.json b/parser/testdata/02012_low_cardinality_uuid_with_extremes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02012_low_cardinality_uuid_with_extremes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02012_low_cardinality_uuid_with_extremes/query.sql b/parser/testdata/02012_low_cardinality_uuid_with_extremes/query.sql new file mode 100644 index 000000000..191383cc9 --- /dev/null +++ b/parser/testdata/02012_low_cardinality_uuid_with_extremes/query.sql @@ -0,0 +1,11 @@ +DROP TABLE IF EXISTS tbl; + +SET allow_suspicious_low_cardinality_types = 1; +CREATE TABLE tbl (`lc` LowCardinality(UUID)) ENGINE = Memory; + +INSERT INTO tbl VALUES ('0562380c-d1f3-4091-83d5-8c972f534317'); + +SET extremes = 1; +SELECT * FROM tbl; + +DROP TABLE tbl; diff --git a/parser/testdata/02012_settings_clause_for_s3/ast.json b/parser/testdata/02012_settings_clause_for_s3/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02012_settings_clause_for_s3/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02012_settings_clause_for_s3/metadata.json b/parser/testdata/02012_settings_clause_for_s3/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02012_settings_clause_for_s3/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02012_settings_clause_for_s3/query.sql b/parser/testdata/02012_settings_clause_for_s3/query.sql new file mode 100644 index 000000000..87d45c9d5 --- /dev/null +++ b/parser/testdata/02012_settings_clause_for_s3/query.sql @@ -0,0 +1,10 @@ +-- Tags: no-fasttest +-- Tag no-fasttest: Depends on AWS + +DROP TABLE IF EXISTS table_with_range; + +CREATE TABLE table_with_range(`name` String,`number` UInt32) ENGINE = S3('http://localhost:11111/test/tsv_with_header.tsv', 'test', 'testtest', 'TSVWithNames') SETTINGS input_format_with_names_use_header = 1; + +select * from table_with_range; + +DROP TABLE IF EXISTS table_with_range; diff --git a/parser/testdata/02012_sha512_fixedstring/ast.json b/parser/testdata/02012_sha512_fixedstring/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02012_sha512_fixedstring/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02012_sha512_fixedstring/metadata.json b/parser/testdata/02012_sha512_fixedstring/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02012_sha512_fixedstring/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02012_sha512_fixedstring/query.sql b/parser/testdata/02012_sha512_fixedstring/query.sql new file mode 100644 index 000000000..ca9520350 --- /dev/null +++ b/parser/testdata/02012_sha512_fixedstring/query.sql @@ -0,0 +1,21 @@ +-- Tags: no-fasttest +-- Tag no-fasttest: Depends on OpenSSL + +SELECT hex(SHA512('')); +SELECT hex(SHA512('abc')); + +SELECT hex(SHA512_256('')); +SELECT hex(SHA512_256('abc')); + +DROP TABLE IF EXISTS defaults; +CREATE TABLE defaults +( + s FixedString(20) +)ENGINE = Memory(); + +INSERT INTO defaults SELECT s FROM generateRandom('s FixedString(20)', 1, 1, 1) LIMIT 20; + +SELECT hex(SHA512(s)) FROM defaults; +SELECT hex(SHA512_256(s)) FROM defaults; + +DROP TABLE defaults; diff --git a/parser/testdata/02012_zookeeper_changed_enum_type/ast.json b/parser/testdata/02012_zookeeper_changed_enum_type/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02012_zookeeper_changed_enum_type/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02012_zookeeper_changed_enum_type/metadata.json b/parser/testdata/02012_zookeeper_changed_enum_type/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02012_zookeeper_changed_enum_type/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02012_zookeeper_changed_enum_type/query.sql b/parser/testdata/02012_zookeeper_changed_enum_type/query.sql new file mode 100644 index 000000000..5dc9ef8ca --- /dev/null +++ b/parser/testdata/02012_zookeeper_changed_enum_type/query.sql @@ -0,0 +1,16 @@ +-- Tags: zookeeper, no-replicated-database +-- Tag no-replicated-database: Fails due to additional replicas or shards + +create table enum_alter_issue (a Enum8('one' = 1, 'two' = 2), b Int) +engine = ReplicatedMergeTree('/clickhouse/tables/{database}/test_02012/enum_alter_issue', 'r1') +ORDER BY a; + +insert into enum_alter_issue values ('one', 1), ('two', 2); +alter table enum_alter_issue modify column a Enum8('one' = 1, 'two' = 2, 'three' = 3); +insert into enum_alter_issue values ('one', 3), ('two', 4); + +alter table enum_alter_issue detach partition id 'all'; +alter table enum_alter_issue attach partition id 'all'; +select * from enum_alter_issue order by b; + +drop table enum_alter_issue; diff --git a/parser/testdata/02012_zookeeper_changed_enum_type_incompatible/ast.json b/parser/testdata/02012_zookeeper_changed_enum_type_incompatible/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02012_zookeeper_changed_enum_type_incompatible/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02012_zookeeper_changed_enum_type_incompatible/metadata.json b/parser/testdata/02012_zookeeper_changed_enum_type_incompatible/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02012_zookeeper_changed_enum_type_incompatible/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02012_zookeeper_changed_enum_type_incompatible/query.sql b/parser/testdata/02012_zookeeper_changed_enum_type_incompatible/query.sql new file mode 100644 index 000000000..b83f02dc7 --- /dev/null +++ b/parser/testdata/02012_zookeeper_changed_enum_type_incompatible/query.sql @@ -0,0 +1,16 @@ +-- Tags: zookeeper, no-replicated-database +-- Tag no-replicated-database: Fails due to additional replicas or shards + +drop table if exists enum_alter_issue; +create table enum_alter_issue (a Enum16('one' = 1, 'two' = 2), b Int) +engine = ReplicatedMergeTree('/clickhouse/tables/{database}/test_02012/enum_alter_issue', 'r2') +ORDER BY b; + +insert into enum_alter_issue values ('one', 1), ('two', 1); +alter table enum_alter_issue detach partition id 'all'; +alter table enum_alter_issue modify column a Enum8('one' = 1, 'two' = 2, 'three' = 3); +insert into enum_alter_issue values ('one', 1), ('two', 1); + +alter table enum_alter_issue attach partition id 'all'; +select * from enum_alter_issue; +drop table enum_alter_issue; diff --git a/parser/testdata/02013_bloom_filter_hasAll/ast.json b/parser/testdata/02013_bloom_filter_hasAll/ast.json new file mode 100644 index 000000000..18ee06f78 --- /dev/null +++ b/parser/testdata/02013_bloom_filter_hasAll/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery bftest (children 1)" + }, + { + "explain": " Identifier bftest" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001476465, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/02013_bloom_filter_hasAll/metadata.json b/parser/testdata/02013_bloom_filter_hasAll/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02013_bloom_filter_hasAll/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02013_bloom_filter_hasAll/query.sql b/parser/testdata/02013_bloom_filter_hasAll/query.sql new file mode 100644 index 000000000..1058f8df7 --- /dev/null +++ b/parser/testdata/02013_bloom_filter_hasAll/query.sql @@ -0,0 +1,41 @@ +DROP TABLE IF EXISTS bftest; +CREATE TABLE bftest ( + k Int64, + y Array(Int64) DEFAULT x, + x Array(Int64), + index ix1(x) TYPE bloom_filter GRANULARITY 3 +) +Engine=MergeTree +ORDER BY k; + +INSERT INTO bftest (k, x) SELECT number, arrayMap(i->rand64()%565656, range(10)) FROM numbers(1000); + +-- index is not used, but query should still work +SELECT count() FROM bftest WHERE hasAll(x, materialize([1,2,3])) FORMAT Null; + +-- verify the expression in WHERE works on non-index col the same way as on index cols +SELECT count() FROM bftest WHERE hasAll(y, [NULL,-42]) FORMAT Null; +SELECT count() FROM bftest WHERE hasAll(y, [0,NULL]) FORMAT Null; +SELECT count() FROM bftest WHERE hasAll(y, [[123], -42]) FORMAT Null; -- { serverError NO_COMMON_TYPE } +SELECT count() FROM bftest WHERE hasAll(y, [toDecimal32(123, 3), 2]) FORMAT Null; -- different, doesn't fail + +SET force_data_skipping_indices='ix1'; +SELECT count() FROM bftest WHERE has (x, 42) and has(x, -42) FORMAT Null; +SELECT count() FROM bftest WHERE hasAll(x, [42,-42]) FORMAT Null; +SELECT count() FROM bftest WHERE hasAll(x, []) FORMAT Null; +SELECT count() FROM bftest WHERE hasAll(x, [1]) FORMAT Null; + +-- can't use bloom_filter with `hasAll` on non-constant arguments (just like `has`) +SELECT count() FROM bftest WHERE hasAll(x, [1,2,k]) FORMAT Null; -- { serverError INDEX_NOT_USED } + +-- NULLs are not Ok +SELECT count() FROM bftest WHERE hasAll(x, [NULL,-42]) FORMAT Null; -- { serverError INDEX_NOT_USED } +SELECT count() FROM bftest WHERE hasAll(x, [0,NULL]) FORMAT Null; -- { serverError INDEX_NOT_USED } + +-- non-compatible types +SELECT count() FROM bftest WHERE hasAll(x, [[123], -42]) FORMAT Null; -- { serverError NO_COMMON_TYPE } +SELECT count() FROM bftest WHERE hasAll(x, [toDecimal32(123, 3), 2]) FORMAT Null; -- { serverError INDEX_NOT_USED } + +-- Bug discovered by AST fuzzier (fixed, shouldn't crash). +SELECT 1 FROM bftest WHERE has(x, -0.) OR 0. FORMAT Null; +SELECT count() FROM bftest WHERE hasAll(x, [0, 1]) OR 0. FORMAT Null; diff --git a/parser/testdata/02013_emptystring_cast/ast.json b/parser/testdata/02013_emptystring_cast/ast.json new file mode 100644 index 000000000..0231f4059 --- /dev/null +++ b/parser/testdata/02013_emptystring_cast/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_uint64 (children 1)" + }, + { + "explain": " Identifier test_uint64" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001050768, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/02013_emptystring_cast/metadata.json b/parser/testdata/02013_emptystring_cast/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02013_emptystring_cast/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02013_emptystring_cast/query.sql b/parser/testdata/02013_emptystring_cast/query.sql new file mode 100644 index 000000000..b91c523ae --- /dev/null +++ b/parser/testdata/02013_emptystring_cast/query.sql @@ -0,0 +1,19 @@ +drop table if exists test_uint64; +create table test_uint64 (`data` UInt64 Default 0) engine = MergeTree order by tuple(); +insert into test_uint64 values ('0'), (NULL), (1), ('2'); +drop table if exists test_uint64; + +drop table if exists test_float64; +create table test_float64 (`data` Float64 Default 0.0) engine = MergeTree order by tuple(); +insert into test_float64 values ('0.1'), (NULL), (1.1), ('2.2'); +drop table if exists test_float64; + +drop table if exists test_date; +create table test_date (`data` Date) engine = MergeTree order by tuple(); +insert into test_date values ('2021-01-01'), (NULL), ('2021-02-01'), ('2021-03-01'); +drop table if exists test_date; + +drop table if exists test_datetime; +create table test_datetime (`data` DateTime) engine = MergeTree order by tuple(); +insert into test_datetime values ('2021-01-01 00:00:00'), (NULL), ('2021-02-01 01:00:00'), ('2021-03-01 02:00:00'); +drop table if exists test_datetime; diff --git a/parser/testdata/02013_json_function_null_column/ast.json b/parser/testdata/02013_json_function_null_column/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02013_json_function_null_column/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02013_json_function_null_column/metadata.json b/parser/testdata/02013_json_function_null_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02013_json_function_null_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02013_json_function_null_column/query.sql b/parser/testdata/02013_json_function_null_column/query.sql new file mode 100644 index 000000000..963d0ee55 --- /dev/null +++ b/parser/testdata/02013_json_function_null_column/query.sql @@ -0,0 +1,33 @@ + +SELECT JSONExtract('{"string_value":null}', 'string_value', 'Nullable(String)') as x, toTypeName(x); +SELECT JSONExtract('{"string_value":null}', 'string_value', 'LowCardinality(Nullable(String))') as x, toTypeName(x); +SELECT JSONExtract('{"string_value":null}', 'string_value', 'String') as x, toTypeName(x); +SELECT JSONExtract(toNullable('{"string_value":null}'), 'string_value', 'Nullable(String)') as x, toTypeName(x); +SELECT JSONExtract(toNullable('{"string_value":null}'), 'string_value', 'LowCardinality(Nullable(String))') as x, toTypeName(x); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT JSONExtract(toNullable('{"string_value":null}'), 'string_value', 'String') as x, toTypeName(x); +SELECT JSONExtract(NULL, 'string_value', 'Nullable(String)') as x, toTypeName(x); +SELECT JSONExtract(NULL, 'string_value', 'LowCardinality(Nullable(String))') as x, toTypeName(x); +SELECT JSONExtract(NULL, 'string_value', 'String') as x, toTypeName(x); +SELECT JSONExtractString('["a", "b", "c", "d", "e"]', idx) FROM (SELECT arrayJoin([2, NULL, 2147483646, 65535, 65535, 3]) AS idx); + +SELECT JSONExtractInt('[1]', toNullable(1)); +SELECT JSONExtractBool('[1]', toNullable(1)); +SELECT JSONExtractFloat('[1]', toNullable(1)); +SELECT JSONExtractString('["a"]', toNullable(1)); +SELECT JSONExtractInt('[1]', toLowCardinality(toNullable(1))); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT JSONExtractArrayRaw('["1"]', toNullable(1)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT JSONExtractKeysAndValuesRaw('["1"]', toNullable(1)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT JSONExtractKeysAndValues('["1"]', toNullable(1)); -- { serverError ILLEGAL_COLUMN } + +SELECT JSONExtract('[1]', toNullable(1), 'Nullable(Int)'); +SELECT JSONExtract('[1]', toNullable(1), 'Nullable(UInt8)'); +SELECT JSONExtract('[1]', toNullable(1), 'Nullable(Bool)'); +SELECT JSONExtract('[1]', toNullable(1), 'Nullable(Float)'); +SELECT JSONExtract('["a"]', toNullable(1), 'Nullable(String)'); +SELECT JSONExtract('["a"]', toNullable(1), 'Nullable(Int)'); +SELECT JSONExtract('["-a"]', toNullable(1), 'Nullable(Int)'); + +SELECT JSONExtract(materialize('{"key":"value"}'), 'Tuple(key LowCardinality(Nullable(String)))'); +SELECT JSONExtract(materialize('{"key":null}'), 'Tuple(key LowCardinality(Nullable(String)))'); +SELECT JSONExtract(materialize('{"not_a_key":"value"}'), 'Tuple(key LowCardinality(Nullable(String)))'); diff --git a/parser/testdata/02013_lc_nullable_and_infinity/ast.json b/parser/testdata/02013_lc_nullable_and_infinity/ast.json new file mode 100644 index 000000000..f828ab264 --- /dev/null +++ b/parser/testdata/02013_lc_nullable_and_infinity/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001497271, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02013_lc_nullable_and_infinity/metadata.json b/parser/testdata/02013_lc_nullable_and_infinity/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02013_lc_nullable_and_infinity/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02013_lc_nullable_and_infinity/query.sql b/parser/testdata/02013_lc_nullable_and_infinity/query.sql new file mode 100644 index 000000000..8cca4aa4e --- /dev/null +++ b/parser/testdata/02013_lc_nullable_and_infinity/query.sql @@ -0,0 +1,3 @@ +set receive_timeout = '10', receive_data_timeout_ms = '10000', extremes = '1', allow_suspicious_low_cardinality_types = '1', force_primary_key = '1', join_use_nulls = '1', max_rows_to_read = '2', join_algorithm = 'partial_merge'; + +SELECT * FROM (SELECT dummy AS val FROM system.one) AS s1 ANY LEFT JOIN (SELECT toLowCardinality(dummy) AS rval FROM system.one) AS s2 ON (val + 9223372036854775806) = (rval * 1); diff --git a/parser/testdata/02014_dict_get_nullable_key/ast.json b/parser/testdata/02014_dict_get_nullable_key/ast.json new file mode 100644 index 000000000..0ad5f1a90 --- /dev/null +++ b/parser/testdata/02014_dict_get_nullable_key/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery dictionary_non_nullable_source_table (children 1)" + }, + { + "explain": " Identifier dictionary_non_nullable_source_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00128178, + "rows_read": 2, + "bytes_read": 124 + } +} diff --git a/parser/testdata/02014_dict_get_nullable_key/metadata.json b/parser/testdata/02014_dict_get_nullable_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02014_dict_get_nullable_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02014_dict_get_nullable_key/query.sql b/parser/testdata/02014_dict_get_nullable_key/query.sql new file mode 100644 index 000000000..d6c058b28 --- /dev/null +++ b/parser/testdata/02014_dict_get_nullable_key/query.sql @@ -0,0 +1,29 @@ +DROP TABLE IF EXISTS dictionary_non_nullable_source_table; +CREATE TABLE dictionary_non_nullable_source_table (id UInt64, value String) ENGINE=TinyLog; +INSERT INTO dictionary_non_nullable_source_table VALUES (0, 'Test'); + +DROP DICTIONARY IF EXISTS test_dictionary_non_nullable; +CREATE DICTIONARY test_dictionary_non_nullable (id UInt64, value String) PRIMARY KEY id LAYOUT(DIRECT()) SOURCE(CLICKHOUSE(TABLE 'dictionary_non_nullable_source_table')); + +SELECT 'Non nullable value only null key '; +SELECT dictGet('test_dictionary_non_nullable', 'value', NULL); +SELECT 'Non nullable value nullable key'; +SELECT dictGet('test_dictionary_non_nullable', 'value', arrayJoin([toUInt64(0), NULL, 1])); + +DROP DICTIONARY test_dictionary_non_nullable; +DROP TABLE dictionary_non_nullable_source_table; + +DROP TABLE IF EXISTS dictionary_nullable_source_table; +CREATE TABLE dictionary_nullable_source_table (id UInt64, value Nullable(String)) ENGINE=TinyLog; +INSERT INTO dictionary_nullable_source_table VALUES (0, 'Test'), (1, NULL); + +DROP DICTIONARY IF EXISTS test_dictionary_nullable; +CREATE DICTIONARY test_dictionary_nullable (id UInt64, value Nullable(String)) PRIMARY KEY id LAYOUT(DIRECT()) SOURCE(CLICKHOUSE(TABLE 'dictionary_nullable_source_table')); + +SELECT 'Nullable value only null key '; +SELECT dictGet('test_dictionary_nullable', 'value', NULL); +SELECT 'Nullable value nullable key'; +SELECT dictGet('test_dictionary_nullable', 'value', arrayJoin([toUInt64(0), NULL, 1, 2])); + +DROP DICTIONARY test_dictionary_nullable; +DROP TABLE dictionary_nullable_source_table; diff --git a/parser/testdata/02014_map_different_keys/ast.json b/parser/testdata/02014_map_different_keys/ast.json new file mode 100644 index 000000000..4f45092fe --- /dev/null +++ b/parser/testdata/02014_map_different_keys/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '...const maps...'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001065031, + "rows_read": 5, + "bytes_read": 187 + } +} diff --git a/parser/testdata/02014_map_different_keys/metadata.json b/parser/testdata/02014_map_different_keys/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02014_map_different_keys/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02014_map_different_keys/query.sql b/parser/testdata/02014_map_different_keys/query.sql new file mode 100644 index 000000000..0998a9283 --- /dev/null +++ b/parser/testdata/02014_map_different_keys/query.sql @@ -0,0 +1,32 @@ +SELECT '...const maps...'; + +WITH map(1, 2, 3, 4) AS m SELECT m[number] FROM numbers(5); +WITH map('1', 2, '3', 4) AS m SELECT m[toString(number)] FROM numbers(5); + +WITH map(1, 2, 3, 4) AS m SELECT m[3]; +WITH map('1', 2, '3', 4) AS m SELECT m['3']; + +DROP TABLE IF EXISTS t_map_02014; + +CREATE TABLE t_map_02014(i1 UInt64, i2 Int32, m1 Map(UInt32, String), m2 Map(Int8, String), m3 Map(Int128, String)) ENGINE = Memory; +INSERT INTO t_map_02014 VALUES (1, -1, map(1, 'foo', 2, 'bar'), map(-1, 'foo', 1, 'bar'), map(-1, 'foo', 1, 'bar')); + +SELECT '...int keys...'; + +SELECT m1[i1], m2[i1], m3[i1] FROM t_map_02014; +SELECT m1[i2], m2[i2], m3[i2] FROM t_map_02014; + +DROP TABLE IF EXISTS t_map_02014; + +CREATE TABLE t_map_02014(s String, fs FixedString(3), m1 Map(String, String), m2 Map(FixedString(3), String)) ENGINE = Memory; +INSERT INTO t_map_02014 VALUES ('aaa', 'bbb', map('aaa', 'foo', 'bbb', 'bar'), map('aaa', 'foo', 'bbb', 'bar')); + +SELECT '...string keys...'; + +SELECT m1['aaa'], m2['aaa'] FROM t_map_02014; +SELECT m1['aaa'::FixedString(3)], m2['aaa'::FixedString(3)] FROM t_map_02014; +SELECT m1[s], m2[s] FROM t_map_02014; +SELECT m1[fs], m2[fs] FROM t_map_02014; +SELECT length(m2['aaa'::FixedString(4)]) FROM t_map_02014; + +DROP TABLE IF EXISTS t_map_02014; diff --git a/parser/testdata/02014_storage_merge_order_by/ast.json b/parser/testdata/02014_storage_merge_order_by/ast.json new file mode 100644 index 000000000..c256566cb --- /dev/null +++ b/parser/testdata/02014_storage_merge_order_by/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery short (children 1)" + }, + { + "explain": " Identifier short" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001235361, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/02014_storage_merge_order_by/metadata.json b/parser/testdata/02014_storage_merge_order_by/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02014_storage_merge_order_by/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02014_storage_merge_order_by/query.sql b/parser/testdata/02014_storage_merge_order_by/query.sql new file mode 100644 index 000000000..5b9789ae1 --- /dev/null +++ b/parser/testdata/02014_storage_merge_order_by/query.sql @@ -0,0 +1,22 @@ +DROP TABLE IF EXISTS short; +DROP TABLE IF EXISTS long; +DROP TABLE IF EXISTS merged; + +CREATE TABLE short (e Int64, t DateTime ) ENGINE = MergeTree PARTITION BY e ORDER BY t; +CREATE TABLE long (e Int64, t DateTime ) ENGINE = MergeTree PARTITION BY (e, toStartOfMonth(t)) ORDER BY t; + +insert into short select number % 11, toDateTime('2021-01-01 00:00:00') + number from numbers(1000); +insert into long select number % 11, toDateTime('2021-01-01 00:00:00') + number from numbers(1000); + +CREATE TABLE merged as short ENGINE = Merge(currentDatabase(), 'short|long'); + +select sum(e) from (select * from merged order by t limit 10) SETTINGS optimize_read_in_order = 0; + +select sum(e) from (select * from merged order by t limit 10) SETTINGS max_threads = 1; +select sum(e) from (select * from merged order by t limit 10) SETTINGS max_threads = 3; +select sum(e) from (select * from merged order by t limit 10) SETTINGS max_threads = 10; +select sum(e) from (select * from merged order by t limit 10) SETTINGS max_threads = 50; + +DROP TABLE IF EXISTS short; +DROP TABLE IF EXISTS long; +DROP TABLE IF EXISTS merged; diff --git a/parser/testdata/02015_column_default_dict_get_identifier/ast.json b/parser/testdata/02015_column_default_dict_get_identifier/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02015_column_default_dict_get_identifier/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02015_column_default_dict_get_identifier/metadata.json b/parser/testdata/02015_column_default_dict_get_identifier/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02015_column_default_dict_get_identifier/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02015_column_default_dict_get_identifier/query.sql b/parser/testdata/02015_column_default_dict_get_identifier/query.sql new file mode 100644 index 000000000..046d0c421 --- /dev/null +++ b/parser/testdata/02015_column_default_dict_get_identifier/query.sql @@ -0,0 +1,36 @@ +-- Tags: no-parallel + +CREATE TABLE test_table +( + key_column UInt64, + data_column_1 UInt64, + data_column_2 UInt8 +) +ENGINE = MergeTree +ORDER BY key_column; + +INSERT INTO test_table VALUES (0, 0, 0); + +CREATE DICTIONARY test_dictionary +( + key_column UInt64 DEFAULT 0, + data_column_1 UInt64 DEFAULT 1, + data_column_2 UInt8 DEFAULT 1 +) +PRIMARY KEY key_column +LAYOUT(DIRECT()) +SOURCE(CLICKHOUSE(DB currentDatabase() TABLE 'test_table')); + +CREATE TABLE test_table_default +( + data_1 DEFAULT dictGetUInt64('test_dictionary', 'data_column_1', toUInt64(0)), + data_2 DEFAULT dictGet(test_dictionary, 'data_column_2', toUInt64(0)) +) +ENGINE=TinyLog; + +INSERT INTO test_table_default(data_1) VALUES (5); +SELECT * FROM test_table_default; + +DROP TABLE test_table_default; +DROP DICTIONARY test_dictionary; +DROP TABLE test_table; diff --git a/parser/testdata/02015_division_by_nullable/ast.json b/parser/testdata/02015_division_by_nullable/ast.json new file mode 100644 index 000000000..65b622dc3 --- /dev/null +++ b/parser/testdata/02015_division_by_nullable/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function divide (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Literal 'Nullable(Decimal(7, 2))'" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001216578, + "rows_read": 11, + "bytes_read": 418 + } +} diff --git a/parser/testdata/02015_division_by_nullable/metadata.json b/parser/testdata/02015_division_by_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02015_division_by_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02015_division_by_nullable/query.sql b/parser/testdata/02015_division_by_nullable/query.sql new file mode 100644 index 000000000..16a010610 --- /dev/null +++ b/parser/testdata/02015_division_by_nullable/query.sql @@ -0,0 +1,101 @@ +SELECT 1 / CAST(NULL, 'Nullable(Decimal(7, 2))'); +SELECT materialize(1) / CAST(NULL, 'Nullable(Decimal(7, 2))'); +SELECT 1 / CAST(materialize(NULL), 'Nullable(Decimal(7, 2))'); +SELECT materialize(1) / CAST(materialize(NULL), 'Nullable(Decimal(7, 2))'); + + +SELECT 1 / CAST(1, 'Nullable(Decimal(7, 2))'); +SELECT materialize(1) / CAST(1, 'Nullable(Decimal(7, 2))'); +SELECT 1 / CAST(materialize(1), 'Nullable(Decimal(7, 2))'); +SELECT materialize(1) / CAST(materialize(1), 'Nullable(Decimal(7, 2))'); + + +SELECT intDiv(1, CAST(NULL, 'Nullable(Decimal(7, 2))')); +SELECT intDiv(materialize(1), CAST(NULL, 'Nullable(Decimal(7, 2))')); +SELECT intDiv(1, CAST(materialize(NULL), 'Nullable(Decimal(7, 2))')); +SELECT intDiv(materialize(1), CAST(materialize(NULL), 'Nullable(Decimal(7, 2))')); + + +SELECT intDiv(1, CAST(1, 'Nullable(Decimal(7, 2))')); +SELECT intDiv(materialize(1), CAST(1, 'Nullable(Decimal(7, 2))')); +SELECT intDiv(1, CAST(materialize(1), 'Nullable(Decimal(7, 2))')); +SELECT intDiv(materialize(1), CAST(materialize(1), 'Nullable(Decimal(7, 2))')); + + +SELECT toDecimal32(1, 2) / CAST(NULL, 'Nullable(UInt32)'); +SELECT materialize(toDecimal32(1, 2)) / CAST(NULL, 'Nullable(UInt32)'); +SELECT toDecimal32(1, 2) / CAST(materialize(NULL), 'Nullable(UInt32)'); +SELECT materialize(toDecimal32(1, 2)) / CAST(materialize(NULL), 'Nullable(UInt32)'); + + +SELECT toDecimal32(1, 2) / CAST(1, 'Nullable(UInt32)'); +SELECT materialize(toDecimal32(1, 2)) / CAST(1, 'Nullable(UInt32)'); +SELECT toDecimal32(1, 2) / CAST(materialize(1), 'Nullable(UInt32)'); +SELECT materialize(toDecimal32(1, 2)) / CAST(materialize(1), 'Nullable(UInt32)'); + + +SELECT intDiv(1, CAST(NULL, 'Nullable(UInt32)')); +SELECT intDiv(materialize(1), CAST(NULL, 'Nullable(UInt32)')); +SELECT intDiv(1, CAST(materialize(NULL), 'Nullable(UInt32)')); +SELECT intDiv(materialize(1), CAST(materialize(NULL), 'Nullable(UInt32)')); + + +SELECT intDiv(1, CAST(1, 'Nullable(UInt32)')); +SELECT intDiv(materialize(1), CAST(1, 'Nullable(UInt32)')); +SELECT intDiv(1, CAST(materialize(1), 'Nullable(UInt32)')); +SELECT intDiv(materialize(1), CAST(materialize(1), 'Nullable(UInt32)')); + + +SELECT 1 % CAST(NULL, 'Nullable(UInt32)'); +SELECT materialize(1) % CAST(NULL, 'Nullable(UInt32)'); +SELECT 1 % CAST(materialize(NULL), 'Nullable(UInt32)'); +SELECT materialize(1) % CAST(materialize(NULL), 'Nullable(UInt32)'); + + +SELECT 1 % CAST(1, 'Nullable(UInt32)'); +SELECT materialize(1) % CAST(1, 'Nullable(UInt32)'); +SELECT 1 % CAST(materialize(1), 'Nullable(UInt32)'); +SELECT materialize(1) % CAST(materialize(1), 'Nullable(UInt32)'); + + +SELECT intDiv(1, CAST(NULL, 'Nullable(Float32)')); +SELECT intDiv(materialize(1), CAST(NULL, 'Nullable(Float32)')); +SELECT intDiv(1, CAST(materialize(NULL), 'Nullable(Float32)')); +SELECT intDiv(materialize(1), CAST(materialize(NULL), 'Nullable(Float32)')); + + +SELECT intDiv(1, CAST(1, 'Nullable(Float32)')); +SELECT intDiv(materialize(1), CAST(1, 'Nullable(Float32)')); +SELECT intDiv(1, CAST(materialize(1), 'Nullable(Float32)')); +SELECT intDiv(materialize(1), CAST(materialize(1), 'Nullable(Float32)')); + + +SELECT 1 % CAST(NULL, 'Nullable(Float32)'); +SELECT materialize(1) % CAST(NULL, 'Nullable(Float32)'); +SELECT 1 % CAST(materialize(NULL), 'Nullable(Float32)'); +SELECT materialize(1) % CAST(materialize(NULL), 'Nullable(Float32)'); + + +SELECT 1 % CAST(1, 'Nullable(Float32)'); +SELECT materialize(1) % CAST(1, 'Nullable(Float32)'); +SELECT 1 % CAST(materialize(1), 'Nullable(Float32)'); +SELECT materialize(1) % CAST(materialize(1), 'Nullable(Float32)'); + + +DROP TABLE IF EXISTS nullable_division; +CREATE TABLE nullable_division (x UInt32, y Nullable(UInt32), a Decimal(7, 2), b Nullable(Decimal(7, 2))) ENGINE=MergeTree() order by x; +INSERT INTO nullable_division VALUES (1, 1, 1, 1), (1, NULL, 1, NULL), (1, 0, 1, 0); + +SELECT if(y = 0, 0, intDiv(x, y)) from nullable_division; +SELECT if(y = 0, 0, x % y) from nullable_division; + +SELECT if(y = 0, 0, intDiv(a, y)) from nullable_division; +SELECT if(y = 0, 0, a / y) from nullable_division; + +SELECT if(b = 0, 0, intDiv(a, b)) from nullable_division; +SELECT if(b = 0, 0, a / b) from nullable_division; + +SELECT if(b = 0, 0, intDiv(x, b)) from nullable_division; +SELECT if(b = 0, 0, x / b) from nullable_division; + +DROP TABLE nullable_division; diff --git a/parser/testdata/02015_executable_user_defined_functions/ast.json b/parser/testdata/02015_executable_user_defined_functions/ast.json new file mode 100644 index 000000000..a7806a11e --- /dev/null +++ b/parser/testdata/02015_executable_user_defined_functions/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function test_function (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toUInt64 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function toUInt64 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001231314, + "rows_read": 12, + "bytes_read": 472 + } +} diff --git a/parser/testdata/02015_executable_user_defined_functions/metadata.json b/parser/testdata/02015_executable_user_defined_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02015_executable_user_defined_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02015_executable_user_defined_functions/query.sql b/parser/testdata/02015_executable_user_defined_functions/query.sql new file mode 100644 index 000000000..68848f49c --- /dev/null +++ b/parser/testdata/02015_executable_user_defined_functions/query.sql @@ -0,0 +1 @@ +SELECT test_function(toUInt64(2), toUInt64(2)); diff --git a/parser/testdata/02015_order_by_with_fill_misoptimization/ast.json b/parser/testdata/02015_order_by_with_fill_misoptimization/ast.json new file mode 100644 index 000000000..8341c7167 --- /dev/null +++ b/parser/testdata/02015_order_by_with_fill_misoptimization/ast.json @@ -0,0 +1,91 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier s" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_5 (alias x)" + }, + { + "explain": " Literal 'Hello' (alias s)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 3)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier s" + } + ], + + "rows": 23, + + "statistics": + { + "elapsed": 0.001417399, + "rows_read": 23, + "bytes_read": 907 + } +} diff --git a/parser/testdata/02015_order_by_with_fill_misoptimization/metadata.json b/parser/testdata/02015_order_by_with_fill_misoptimization/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02015_order_by_with_fill_misoptimization/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02015_order_by_with_fill_misoptimization/query.sql b/parser/testdata/02015_order_by_with_fill_misoptimization/query.sql new file mode 100644 index 000000000..f0d90f151 --- /dev/null +++ b/parser/testdata/02015_order_by_with_fill_misoptimization/query.sql @@ -0,0 +1 @@ +SELECT s FROM (SELECT 5 AS x, 'Hello' AS s ORDER BY x WITH FILL FROM 1 TO 10) ORDER BY s; diff --git a/parser/testdata/02016_agg_empty_result_bug_28880/ast.json b/parser/testdata/02016_agg_empty_result_bug_28880/ast.json new file mode 100644 index 000000000..ad6907418 --- /dev/null +++ b/parser/testdata/02016_agg_empty_result_bug_28880/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function count (alias cnt) (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier cnt" + }, + { + "explain": " Literal UInt64_0" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001431001, + "rows_read": 11, + "bytes_read": 388 + } +} diff --git a/parser/testdata/02016_agg_empty_result_bug_28880/metadata.json b/parser/testdata/02016_agg_empty_result_bug_28880/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02016_agg_empty_result_bug_28880/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02016_agg_empty_result_bug_28880/query.sql b/parser/testdata/02016_agg_empty_result_bug_28880/query.sql new file mode 100644 index 000000000..005358eb4 --- /dev/null +++ b/parser/testdata/02016_agg_empty_result_bug_28880/query.sql @@ -0,0 +1,10 @@ +SELECT count() AS cnt WHERE 0 HAVING cnt = 0; + +select cnt from (select count() cnt where 0) where cnt = 0; + +select cnt from (select count() cnt from system.one where 0) where cnt = 0; + +select sum from (select sum(dummy) sum from system.one where 0) where sum = 0; + +set aggregate_functions_null_for_empty=1; +select sum from (select sum(dummy) sum from system.one where 0) where sum is null; diff --git a/parser/testdata/02016_aggregation_spark_bar/ast.json b/parser/testdata/02016_aggregation_spark_bar/ast.json new file mode 100644 index 000000000..51b3671b9 --- /dev/null +++ b/parser/testdata/02016_aggregation_spark_bar/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery spark_bar_test (children 1)" + }, + { + "explain": " Identifier spark_bar_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001169306, + "rows_read": 2, + "bytes_read": 80 + } +} diff --git a/parser/testdata/02016_aggregation_spark_bar/metadata.json b/parser/testdata/02016_aggregation_spark_bar/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02016_aggregation_spark_bar/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02016_aggregation_spark_bar/query.sql b/parser/testdata/02016_aggregation_spark_bar/query.sql new file mode 100644 index 000000000..4d2de566e --- /dev/null +++ b/parser/testdata/02016_aggregation_spark_bar/query.sql @@ -0,0 +1,68 @@ +DROP TABLE IF EXISTS spark_bar_test; + +CREATE TABLE spark_bar_test (`cnt` UInt64,`event_date` Date) ENGINE = MergeTree ORDER BY event_date SETTINGS index_granularity = 8192; + +INSERT INTO spark_bar_test VALUES(1,'2020-01-01'),(4,'2020-01-02'),(5,'2020-01-03'),(2,'2020-01-04'),(3,'2020-01-05'),(7,'2020-01-06'),(6,'2020-01-07'),(8,'2020-01-08'),(2,'2020-01-11'); + +-- { echoOn } + +SELECT sparkbar(2)(event_date,cnt) FROM spark_bar_test; +SELECT sparkbar(3)(event_date,cnt) FROM spark_bar_test; +SELECT sparkbar(4)(event_date,cnt) FROM spark_bar_test; +SELECT sparkbar(5)(event_date,cnt) FROM spark_bar_test; +SELECT sparkbar(6)(event_date,cnt) FROM spark_bar_test; +SELECT sparkbar(7)(event_date,cnt) FROM spark_bar_test; +SELECT sparkbar(8)(event_date,cnt) FROM spark_bar_test; +SELECT sparkbar(9)(event_date,cnt) FROM spark_bar_test; +SELECT sparkbar(10)(event_date,cnt) FROM spark_bar_test; +SELECT sparkbar(11)(event_date,cnt) FROM spark_bar_test; + +SELECT sparkbar(11,2,5)(event_date,cnt) FROM spark_bar_test; +SELECT sparkbar(11,3,7)(event_date,cnt) FROM spark_bar_test; +SELECT sparkbar(11,4,11)(event_date,cnt) FROM spark_bar_test; + +SELECT sparkbar(11,toDate('2020-01-02'),toDate('2020-01-05'))(event_date,cnt) FROM spark_bar_test; +SELECT sparkbar(11,toDate('2020-01-03'),toDate('2020-01-07'))(event_date,cnt) FROM spark_bar_test; +SELECT sparkbar(11,toDate('2020-01-04'),toDate('2020-01-11'))(event_date,cnt) FROM spark_bar_test; + +SELECT sparkbar(2,toDate('2020-01-01'),toDate('2020-01-08'))(event_date,cnt) FROM spark_bar_test; +SELECT sparkbar(2,toDate('2020-01-02'),toDate('2020-01-09'))(event_date,cnt) FROM spark_bar_test; +SELECT sparkbar(3,toDate('2020-01-01'),toDate('2020-01-09'))(event_date,cnt) FROM spark_bar_test; +SELECT sparkbar(3,toDate('2020-01-01'),toDate('2020-01-10'))(event_date,cnt) FROM spark_bar_test; +SELECT sparkbar(4,toDate('2020-01-01'),toDate('2020-01-08'))(event_date,cnt) FROM spark_bar_test; +SELECT sparkbar(5,toDate('2020-01-01'),toDate('2020-01-10'))(event_date,cnt) FROM spark_bar_test; +SELECT sparkbar(9,toDate('2020-01-01'),toDate('2020-01-10'))(event_date,cnt) FROM spark_bar_test; + +WITH number DIV 50 AS k, toUInt32(number % 50) AS value SELECT k, sparkbar(50, 0, 99)(number, value) FROM numbers(100) GROUP BY k ORDER BY k; + +SELECT sparkbar(128, 0, 9223372036854775806)(toUInt64(9223372036854775806), number % 65535) FROM numbers(100); +SELECT sparkbar(128)(toUInt64(9223372036854775806), number % 65535) FROM numbers(100); +SELECT sparkbar(9)(x, y) FROM (SELECT * FROM Values('x UInt64, y UInt8', (18446744073709551615,255), (0,0), (0,0), (4036797895307271799,254))); + +SELECT sparkbar(8, 0, 7)((number + 1) % 8, 1), sparkbar(8, 0, 7)((number + 2) % 8, 1), sparkbar(8, 0, 7)((number + 3) % 8, 1) FROM numbers(7); + +SELECT sparkbar(2)(number, -number) FROM numbers(10); +SELECT sparkbar(10)(number, number - 7) FROM numbers(10); +SELECT sparkbar(1024)(number, number) FROM numbers(1024); +SELECT sparkbar(1024)(number, 1) FROM numbers(1024); +SELECT sparkbar(1024)(number, 0) FROM numbers(1024); + +-- { echoOff } + +SELECT sparkbar(0)(number, number) FROM numbers(10); -- { serverError BAD_ARGUMENTS } +SELECT sparkbar(1)(number, number) FROM numbers(10); -- { serverError BAD_ARGUMENTS } +SELECT sparkbar(1025)(number, number) FROM numbers(10); -- { serverError BAD_ARGUMENTS } +SELECT sparkbar(2, 10, 9)(number, number) FROM numbers(10); -- { serverError BAD_ARGUMENTS } +SELECT sparkbar(2, -5, -1)(number, number) FROM numbers(10); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT sparkbar(2, -5, 1)(number, number) FROM numbers(10); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT sparkbar(2)(toInt32(number), number) FROM numbers(10); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT sparkbar(2, 0)(number, number) FROM numbers(10); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT sparkbar(2, 0, 5, 8)(number, number) FROM numbers(10); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +-- it causes overflow, just check that it doesn't crash under UBSan, do not check the result it's not really reasonable +SELECT sparkbar(10)(number, toInt64(number)) FROM numbers(toUInt64(9223372036854775807), 20) FORMAT Null; +SELECT sparkbar(10)(number, -number) FROM numbers(toUInt64(9223372036854775807), 7) FORMAT Null; +SELECT sparkbar(10)(number, number) FROM numbers(18446744073709551615, 7) FORMAT Null; +SELECT sparkbar(16)(number, number) FROM numbers(18446744073709551600, 16) FORMAT Null; + +DROP TABLE IF EXISTS spark_bar_test; diff --git a/parser/testdata/02016_bit_shift_right_for_string_integer/ast.json b/parser/testdata/02016_bit_shift_right_for_string_integer/ast.json new file mode 100644 index 000000000..2a1435123 --- /dev/null +++ b/parser/testdata/02016_bit_shift_right_for_string_integer/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'String ConstConst'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001546122, + "rows_read": 5, + "bytes_read": 188 + } +} diff --git a/parser/testdata/02016_bit_shift_right_for_string_integer/metadata.json b/parser/testdata/02016_bit_shift_right_for_string_integer/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02016_bit_shift_right_for_string_integer/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02016_bit_shift_right_for_string_integer/query.sql b/parser/testdata/02016_bit_shift_right_for_string_integer/query.sql new file mode 100644 index 000000000..40fccbc89 --- /dev/null +++ b/parser/testdata/02016_bit_shift_right_for_string_integer/query.sql @@ -0,0 +1,131 @@ +SELECT 'String ConstConst'; +SELECT bin('Hello') == bin(bitShiftRight('Hello', 0)); +SELECT 0, 'Hello',bin(bitShiftRight('Hello', 0)); +SELECT 1, 'Hello',bin(bitShiftRight('Hello', 1)); +SELECT 2, 'Hello',bin(bitShiftRight('Hello', 2)); +SELECT 3, 'Hello',bin(bitShiftRight('Hello', 3)); +SELECT 4, 'Hello',bin(bitShiftRight('Hello', 4)); +SELECT 5, 'Hello',bin(bitShiftRight('Hello', 5)); +SELECT 6, 'Hello',bin(bitShiftRight('Hello', 6)); +SELECT 7, 'Hello',bin(bitShiftRight('Hello', 7)); +SELECT 8, 'Hello',bin(bitShiftRight('Hello', 8)); +SELECT 9, 'Hello',bin(bitShiftRight('Hello', 9)); +SELECT 10,'Hello',bin(bitShiftRight('Hello', 10)); +SELECT 11,'Hello',bin(bitShiftRight('Hello', 11)); +SELECT 12,'Hello',bin(bitShiftRight('Hello', 12)); +SELECT 13,'Hello',bin(bitShiftRight('Hello', 13)); +SELECT 14,'Hello',bin(bitShiftRight('Hello', 14)); +SELECT 15,'Hello',bin(bitShiftRight('Hello', 15)); +SELECT 16,'Hello',bin(bitShiftRight('Hello', 16)); +SELECT 17,'Hello',bin(bitShiftRight('Hello', 17)); +SELECT 18,'Hello',bin(bitShiftRight('Hello', 18)); +SELECT 19,'Hello',bin(bitShiftRight('Hello', 19)); +SELECT 20,'Hello',bin(bitShiftRight('Hello', 20)); +SELECT 21,'Hello',bin(bitShiftRight('Hello', 21)); +SELECT 22,'Hello',bin(bitShiftRight('Hello', 22)); +SELECT 23,'Hello',bin(bitShiftRight('Hello', 23)); +SELECT 24,'Hello',bin(bitShiftRight('Hello', 24)); +SELECT 25,'Hello',bin(bitShiftRight('Hello', 25)); +SELECT 26,'Hello',bin(bitShiftRight('Hello', 26)); +SELECT 27,'Hello',bin(bitShiftRight('Hello', 27)); +SELECT 28,'Hello',bin(bitShiftRight('Hello', 28)); +SELECT 29,'Hello',bin(bitShiftRight('Hello', 29)); +SELECT 30,'Hello',bin(bitShiftRight('Hello', 30)); +SELECT 31,'Hello',bin(bitShiftRight('Hello', 31)); +SELECT 32,'Hello',bin(bitShiftRight('Hello', 32)); +SELECT 33,'Hello',bin(bitShiftRight('Hello', 33)); +SELECT 34,'Hello',bin(bitShiftRight('Hello', 34)); +SELECT 35,'Hello',bin(bitShiftRight('Hello', 35)); +SELECT 36,'Hello',bin(bitShiftRight('Hello', 36)); +SELECT 37,'Hello',bin(bitShiftRight('Hello', 37)); +SELECT 38,'Hello',bin(bitShiftRight('Hello', 38)); +SELECT 39,'Hello',bin(bitShiftRight('Hello', 39)); +SELECT 40,'Hello',bin(bitShiftRight('Hello', 40)); + +SELECT 'FixedString ConstConst'; +SELECT bin(toFixedString('Hello', 10)) == bin(bitShiftRight(toFixedString('Hello', 10), 0)); +SELECT 0, toFixedString('Hello', 10), bin(bitShiftRight(toFixedString('Hello', 10), 0)); +SELECT 1, toFixedString('Hello', 10), bin(bitShiftRight(toFixedString('Hello', 10), 1)); +SELECT 2, toFixedString('Hello', 10), bin(bitShiftRight(toFixedString('Hello', 10), 2)); +SELECT 3, toFixedString('Hello', 10), bin(bitShiftRight(toFixedString('Hello', 10), 3)); +SELECT 4, toFixedString('Hello', 10), bin(bitShiftRight(toFixedString('Hello', 10), 4)); +SELECT 5, toFixedString('Hello', 10), bin(bitShiftRight(toFixedString('Hello', 10), 5)); +SELECT 6, toFixedString('Hello', 10), bin(bitShiftRight(toFixedString('Hello', 10), 6)); +SELECT 7, toFixedString('Hello', 10), bin(bitShiftRight(toFixedString('Hello', 10), 7)); +SELECT 8, toFixedString('Hello', 10), bin(bitShiftRight(toFixedString('Hello', 10), 8)); +SELECT 9, toFixedString('Hello', 10), bin(bitShiftRight(toFixedString('Hello', 10), 9)); +SELECT 10,toFixedString('Hello', 10), bin(bitShiftRight(toFixedString('Hello', 10), 10)); +SELECT 11,toFixedString('Hello', 10), bin(bitShiftRight(toFixedString('Hello', 10), 11)); +SELECT 12,toFixedString('Hello', 10), bin(bitShiftRight(toFixedString('Hello', 10), 12)); +SELECT 13,toFixedString('Hello', 10), bin(bitShiftRight(toFixedString('Hello', 10), 13)); +SELECT 14,toFixedString('Hello', 10), bin(bitShiftRight(toFixedString('Hello', 10), 14)); +SELECT 15,toFixedString('Hello', 10), bin(bitShiftRight(toFixedString('Hello', 10), 15)); +SELECT 16,toFixedString('Hello', 10), bin(bitShiftRight(toFixedString('Hello', 10), 16)); +SELECT 17,toFixedString('Hello', 10), bin(bitShiftRight(toFixedString('Hello', 10), 17)); +SELECT 18,toFixedString('Hello', 10), bin(bitShiftRight(toFixedString('Hello', 10), 18)); +SELECT 19,toFixedString('Hello', 10), bin(bitShiftRight(toFixedString('Hello', 10), 19)); +SELECT 20,toFixedString('Hello', 10), bin(bitShiftRight(toFixedString('Hello', 10), 20)); +SELECT 21,toFixedString('Hello', 10), bin(bitShiftRight(toFixedString('Hello', 10), 21)); +SELECT 22,toFixedString('Hello', 10), bin(bitShiftRight(toFixedString('Hello', 10), 22)); +SELECT 23,toFixedString('Hello', 10), bin(bitShiftRight(toFixedString('Hello', 10), 23)); +SELECT 24,toFixedString('Hello', 10), bin(bitShiftRight(toFixedString('Hello', 10), 24)); +SELECT 25,toFixedString('Hello', 10), bin(bitShiftRight(toFixedString('Hello', 10), 25)); +SELECT 26,toFixedString('Hello', 10), bin(bitShiftRight(toFixedString('Hello', 10), 26)); +SELECT 27,toFixedString('Hello', 10), bin(bitShiftRight(toFixedString('Hello', 10), 27)); +SELECT 28,toFixedString('Hello', 10), bin(bitShiftRight(toFixedString('Hello', 10), 28)); +SELECT 29,toFixedString('Hello', 10), bin(bitShiftRight(toFixedString('Hello', 10), 29)); +SELECT 30,toFixedString('Hello', 10), bin(bitShiftRight(toFixedString('Hello', 10), 30)); +SELECT 31,toFixedString('Hello', 10), bin(bitShiftRight(toFixedString('Hello', 10), 31)); +SELECT 32,toFixedString('Hello', 10), bin(bitShiftRight(toFixedString('Hello', 10), 32)); +SELECT 33,toFixedString('Hello', 10), bin(bitShiftRight(toFixedString('Hello', 10), 33)); +SELECT 34,toFixedString('Hello', 10), bin(bitShiftRight(toFixedString('Hello', 10), 34)); +SELECT 35,toFixedString('Hello', 10), bin(bitShiftRight(toFixedString('Hello', 10), 35)); +SELECT 36,toFixedString('Hello', 10), bin(bitShiftRight(toFixedString('Hello', 10), 36)); +SELECT 37,toFixedString('Hello', 10), bin(bitShiftRight(toFixedString('Hello', 10), 37)); +SELECT 38,toFixedString('Hello', 10), bin(bitShiftRight(toFixedString('Hello', 10), 38)); +SELECT 39,toFixedString('Hello', 10), bin(bitShiftRight(toFixedString('Hello', 10), 39)); +SELECT 40,toFixedString('Hello', 10), bin(bitShiftRight(toFixedString('Hello', 10), 40)); +SELECT 41,toFixedString('Hello', 10), bin(bitShiftRight(toFixedString('Hello', 10), 41)); +SELECT 42,toFixedString('Hello', 10), bin(bitShiftRight(toFixedString('Hello', 10), 42)); +SELECT 77,toFixedString('Hello', 10), bin(bitShiftRight(toFixedString('Hello', 10), 77)); +SELECT 78,toFixedString('Hello', 10), bin(bitShiftRight(toFixedString('Hello', 10), 78)); +SELECT 79,toFixedString('Hello', 10), bin(bitShiftRight(toFixedString('Hello', 10), 79)); +SELECT 80,toFixedString('Hello', 10), bin(bitShiftRight(toFixedString('Hello', 10), 80)); + +DROP TABLE IF EXISTS test_bit_shift_right_string_integer; + +CREATE TABLE test_bit_shift_right_string_integer (str String, fixedStr FixedString(10), id Int64) engine=Log; + +INSERT INTO test_bit_shift_right_string_integer VALUES('Hello','Hello',0),('Hello','Hello',1),('Hello','Hello',7),('Hello','Hello',8),('Hello','Hello',9),('Hello','Hello',15),('Hello','Hello',16),('Hello','Hello',17),('Hello','Hello',23),('Hello','Hello',24),('Hello','Hello',25),('Hello','Hello',31),('Hello','Hello',32),('Hello','Hello',33),('Hello','Hello',39),('Hello','Hello',40),('Hel','Hel',7),('Hel','Hel',8),('Hel','Hel',9); + +SELECT bin(bitShiftRight('Hello', 40)); --A blank line +SELECT 'String VectorVector'; +SELECT id as shift_right_bit,str as arg,bin(bitShiftRight(str, id)) as string_res FROM test_bit_shift_right_string_integer; +SELECT id as shift_right_bit,str as arg,bin(bitShiftRight(str, id)) as string_res FROM test_bit_shift_right_string_integer WHERE (str='Hello' AND (id=23 OR id=24 OR id=25)) OR (str='Hel' AND (id=7 OR id=8 OR id=9)); + +SELECT bin(bitShiftRight('Hello', 40)); +SELECT 'FixedString VectorVector'; +SELECT id as shift_right_bit,fixedStr as arg,bin(bitShiftRight(fixedStr, id)) as fixed_string_res FROM test_bit_shift_right_string_integer; +SELECT id as shift_right_bit,fixedStr as arg,bin(bitShiftRight(fixedStr, id)) as fixed_string_res FROM test_bit_shift_right_string_integer WHERE (str='Hello' AND (id=23 OR id=24 OR id=25)) OR (str='Hel' AND (id=7 OR id=8 OR id=9)); + +SELECT bin(bitShiftRight('Hello', 40)); --A blank line +SELECT 'String VectorConst'; +SELECT 7 as shift_right_bit,str as arg,bin(bitShiftRight(str, 7)) as string_res FROM test_bit_shift_right_string_integer; +SELECT 8 as shift_right_bit,str as arg,bin(bitShiftRight(str, 8)) as string_res FROM test_bit_shift_right_string_integer; + +SELECT bin(bitShiftRight('Hello', 40)); --A blank line +SELECT 'FixedString VectorConst'; +SELECT 7 as shift_right_bit,fixedStr as arg,bin(bitShiftRight(fixedStr, 7)) as fixed_string_res FROM test_bit_shift_right_string_integer; +SELECT 8 as shift_right_bit,fixedStr as arg,bin(bitShiftRight(fixedStr, 8)) as fixed_string_res FROM test_bit_shift_right_string_integer; + +SELECT bin(bitShiftRight('Hello', 40)); --A blank line +SELECT 'String ConstVector'; +SELECT id as shift_right_bit,'Hello' as arg,bin(bitShiftRight('Hello', id)) as string_res FROM test_bit_shift_right_string_integer; +SELECT id as shift_right_bit,'Hel' as arg,bin(bitShiftRight('Hel', id)) as string_res FROM test_bit_shift_right_string_integer WHERE id <= 8 * 3; + +SELECT bin(bitShiftRight('Hello', 40)); --A blank line +SELECT 'FixedString ConstVector'; +SELECT id as shift_right_bit,toFixedString('Hello', 10) as arg,bin(bitShiftRight(toFixedString('Hello', 10), id)) as fixed_string_res FROM test_bit_shift_right_string_integer; +SELECT id as shift_right_bit,toFixedString('Hel', 10) as arg,bin(bitShiftRight(toFixedString('Hel', 10), id)) as fixed_string_res FROM test_bit_shift_right_string_integer; + +DROP TABLE IF EXISTS test_bit_shift_right_string_integer; diff --git a/parser/testdata/02016_order_by_with_fill_monotonic_functions_removal/ast.json b/parser/testdata/02016_order_by_with_fill_monotonic_functions_removal/ast.json new file mode 100644 index 000000000..b05001618 --- /dev/null +++ b/parser/testdata/02016_order_by_with_fill_monotonic_functions_removal/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toStartOfMinute (alias ts) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier some_time" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001490431, + "rows_read": 7, + "bytes_read": 283 + } +} diff --git a/parser/testdata/02016_order_by_with_fill_monotonic_functions_removal/metadata.json b/parser/testdata/02016_order_by_with_fill_monotonic_functions_removal/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02016_order_by_with_fill_monotonic_functions_removal/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02016_order_by_with_fill_monotonic_functions_removal/query.sql b/parser/testdata/02016_order_by_with_fill_monotonic_functions_removal/query.sql new file mode 100644 index 000000000..bf232ed5c --- /dev/null +++ b/parser/testdata/02016_order_by_with_fill_monotonic_functions_removal/query.sql @@ -0,0 +1,6 @@ +SELECT toStartOfMinute(some_time) AS ts +FROM +( + SELECT toDateTime('2021-07-07 15:21:05') AS some_time +) +ORDER BY ts ASC WITH FILL FROM toDateTime('2021-07-07 15:21:00') TO toDateTime('2021-07-07 15:21:15') STEP 5; diff --git a/parser/testdata/02016_summing_mt_aggregating_column/ast.json b/parser/testdata/02016_summing_mt_aggregating_column/ast.json new file mode 100644 index 000000000..4c1cda3c8 --- /dev/null +++ b/parser/testdata/02016_summing_mt_aggregating_column/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery summing_mt_aggregating_column (children 1)" + }, + { + "explain": " Identifier summing_mt_aggregating_column" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001233268, + "rows_read": 2, + "bytes_read": 110 + } +} diff --git a/parser/testdata/02016_summing_mt_aggregating_column/metadata.json b/parser/testdata/02016_summing_mt_aggregating_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02016_summing_mt_aggregating_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02016_summing_mt_aggregating_column/query.sql b/parser/testdata/02016_summing_mt_aggregating_column/query.sql new file mode 100644 index 000000000..c1f115a9d --- /dev/null +++ b/parser/testdata/02016_summing_mt_aggregating_column/query.sql @@ -0,0 +1,20 @@ +DROP TABLE IF EXISTS summing_mt_aggregating_column; + +CREATE TABLE summing_mt_aggregating_column +( + Key UInt64, + Value UInt64, + ConcatArraySimple SimpleAggregateFunction(groupArrayArray, Array(UInt64)), + ConcatArrayComplex AggregateFunction(groupArrayArray, Array(UInt64)) +) +ENGINE = SummingMergeTree() +ORDER BY Key; + +INSERT INTO summing_mt_aggregating_column SELECT 1, 2, [333, 444], groupArrayArrayState([toUInt64(33), toUInt64(44)]); +INSERT INTO summing_mt_aggregating_column SELECT 1, 3, [555, 999], groupArrayArrayState([toUInt64(55), toUInt64(99)]); + +OPTIMIZE TABLE summing_mt_aggregating_column FINAL; + +SELECT Key, any(Value), any(ConcatArraySimple), groupArrayArrayMerge(ConcatArrayComplex) FROM summing_mt_aggregating_column GROUP BY Key; + +DROP TABLE IF EXISTS summing_mt_aggregating_column; diff --git a/parser/testdata/02017_bit_shift_left_for_string_integer/ast.json b/parser/testdata/02017_bit_shift_left_for_string_integer/ast.json new file mode 100644 index 000000000..9ea5017dc --- /dev/null +++ b/parser/testdata/02017_bit_shift_left_for_string_integer/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'String ConstConst'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001472748, + "rows_read": 5, + "bytes_read": 188 + } +} diff --git a/parser/testdata/02017_bit_shift_left_for_string_integer/metadata.json b/parser/testdata/02017_bit_shift_left_for_string_integer/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02017_bit_shift_left_for_string_integer/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02017_bit_shift_left_for_string_integer/query.sql b/parser/testdata/02017_bit_shift_left_for_string_integer/query.sql new file mode 100644 index 000000000..a8e66eda2 --- /dev/null +++ b/parser/testdata/02017_bit_shift_left_for_string_integer/query.sql @@ -0,0 +1,131 @@ +SELECT 'String ConstConst'; +SELECT bin('Hello') == bin(bitShiftLeft('Hello', 0)); +SELECT 0, 'Hello',bin(bitShiftLeft('Hello', 0)); +SELECT 1, 'Hello',bin(bitShiftLeft('Hello', 1)); +SELECT 2, 'Hello',bin(bitShiftLeft('Hello', 2)); +SELECT 3, 'Hello',bin(bitShiftLeft('Hello', 3)); +SELECT 4, 'Hello',bin(bitShiftLeft('Hello', 4)); +SELECT 5, 'Hello',bin(bitShiftLeft('Hello', 5)); +SELECT 6, 'Hello',bin(bitShiftLeft('Hello', 6)); +SELECT 7, 'Hello',bin(bitShiftLeft('Hello', 7)); +SELECT 8, 'Hello',bin(bitShiftLeft('Hello', 8)); +SELECT 9, 'Hello',bin(bitShiftLeft('Hello', 9)); +SELECT 10,'Hello',bin(bitShiftLeft('Hello', 10)); +SELECT 11,'Hello',bin(bitShiftLeft('Hello', 11)); +SELECT 12,'Hello',bin(bitShiftLeft('Hello', 12)); +SELECT 13,'Hello',bin(bitShiftLeft('Hello', 13)); +SELECT 14,'Hello',bin(bitShiftLeft('Hello', 14)); +SELECT 15,'Hello',bin(bitShiftLeft('Hello', 15)); +SELECT 16,'Hello',bin(bitShiftLeft('Hello', 16)); +SELECT 17,'Hello',bin(bitShiftLeft('Hello', 17)); +SELECT 18,'Hello',bin(bitShiftLeft('Hello', 18)); +SELECT 19,'Hello',bin(bitShiftLeft('Hello', 19)); +SELECT 20,'Hello',bin(bitShiftLeft('Hello', 20)); +SELECT 21,'Hello',bin(bitShiftLeft('Hello', 21)); +SELECT 22,'Hello',bin(bitShiftLeft('Hello', 22)); +SELECT 23,'Hello',bin(bitShiftLeft('Hello', 23)); +SELECT 24,'Hello',bin(bitShiftLeft('Hello', 24)); +SELECT 25,'Hello',bin(bitShiftLeft('Hello', 25)); +SELECT 26,'Hello',bin(bitShiftLeft('Hello', 26)); +SELECT 27,'Hello',bin(bitShiftLeft('Hello', 27)); +SELECT 28,'Hello',bin(bitShiftLeft('Hello', 28)); +SELECT 29,'Hello',bin(bitShiftLeft('Hello', 29)); +SELECT 30,'Hello',bin(bitShiftLeft('Hello', 30)); +SELECT 31,'Hello',bin(bitShiftLeft('Hello', 31)); +SELECT 32,'Hello',bin(bitShiftLeft('Hello', 32)); +SELECT 33,'Hello',bin(bitShiftLeft('Hello', 33)); +SELECT 34,'Hello',bin(bitShiftLeft('Hello', 34)); +SELECT 35,'Hello',bin(bitShiftLeft('Hello', 35)); +SELECT 36,'Hello',bin(bitShiftLeft('Hello', 36)); +SELECT 37,'Hello',bin(bitShiftLeft('Hello', 37)); +SELECT 38,'Hello',bin(bitShiftLeft('Hello', 38)); +SELECT 39,'Hello',bin(bitShiftLeft('Hello', 39)); +SELECT 40,'Hello',bin(bitShiftLeft('Hello', 40)); + +SELECT 'FixedString ConstConst'; +SELECT bin(toFixedString('Hello', 10)) == bin(bitShiftLeft(toFixedString('Hello', 10), 0)); +SELECT 0, toFixedString('Hello', 10), bin(bitShiftLeft(toFixedString('Hello', 10), 0)); +SELECT 1, toFixedString('Hello', 10), bin(bitShiftLeft(toFixedString('Hello', 10), 1)); +SELECT 2, toFixedString('Hello', 10), bin(bitShiftLeft(toFixedString('Hello', 10), 2)); +SELECT 3, toFixedString('Hello', 10), bin(bitShiftLeft(toFixedString('Hello', 10), 3)); +SELECT 4, toFixedString('Hello', 10), bin(bitShiftLeft(toFixedString('Hello', 10), 4)); +SELECT 5, toFixedString('Hello', 10), bin(bitShiftLeft(toFixedString('Hello', 10), 5)); +SELECT 6, toFixedString('Hello', 10), bin(bitShiftLeft(toFixedString('Hello', 10), 6)); +SELECT 7, toFixedString('Hello', 10), bin(bitShiftLeft(toFixedString('Hello', 10), 7)); +SELECT 8, toFixedString('Hello', 10), bin(bitShiftLeft(toFixedString('Hello', 10), 8)); +SELECT 9, toFixedString('Hello', 10), bin(bitShiftLeft(toFixedString('Hello', 10), 9)); +SELECT 10,toFixedString('Hello', 10), bin(bitShiftLeft(toFixedString('Hello', 10), 10)); +SELECT 11,toFixedString('Hello', 10), bin(bitShiftLeft(toFixedString('Hello', 10), 11)); +SELECT 12,toFixedString('Hello', 10), bin(bitShiftLeft(toFixedString('Hello', 10), 12)); +SELECT 13,toFixedString('Hello', 10), bin(bitShiftLeft(toFixedString('Hello', 10), 13)); +SELECT 14,toFixedString('Hello', 10), bin(bitShiftLeft(toFixedString('Hello', 10), 14)); +SELECT 15,toFixedString('Hello', 10), bin(bitShiftLeft(toFixedString('Hello', 10), 15)); +SELECT 16,toFixedString('Hello', 10), bin(bitShiftLeft(toFixedString('Hello', 10), 16)); +SELECT 17,toFixedString('Hello', 10), bin(bitShiftLeft(toFixedString('Hello', 10), 17)); +SELECT 18,toFixedString('Hello', 10), bin(bitShiftLeft(toFixedString('Hello', 10), 18)); +SELECT 19,toFixedString('Hello', 10), bin(bitShiftLeft(toFixedString('Hello', 10), 19)); +SELECT 20,toFixedString('Hello', 10), bin(bitShiftLeft(toFixedString('Hello', 10), 20)); +SELECT 21,toFixedString('Hello', 10), bin(bitShiftLeft(toFixedString('Hello', 10), 21)); +SELECT 22,toFixedString('Hello', 10), bin(bitShiftLeft(toFixedString('Hello', 10), 22)); +SELECT 23,toFixedString('Hello', 10), bin(bitShiftLeft(toFixedString('Hello', 10), 23)); +SELECT 24,toFixedString('Hello', 10), bin(bitShiftLeft(toFixedString('Hello', 10), 24)); +SELECT 25,toFixedString('Hello', 10), bin(bitShiftLeft(toFixedString('Hello', 10), 25)); +SELECT 26,toFixedString('Hello', 10), bin(bitShiftLeft(toFixedString('Hello', 10), 26)); +SELECT 27,toFixedString('Hello', 10), bin(bitShiftLeft(toFixedString('Hello', 10), 27)); +SELECT 28,toFixedString('Hello', 10), bin(bitShiftLeft(toFixedString('Hello', 10), 28)); +SELECT 29,toFixedString('Hello', 10), bin(bitShiftLeft(toFixedString('Hello', 10), 29)); +SELECT 30,toFixedString('Hello', 10), bin(bitShiftLeft(toFixedString('Hello', 10), 30)); +SELECT 31,toFixedString('Hello', 10), bin(bitShiftLeft(toFixedString('Hello', 10), 31)); +SELECT 32,toFixedString('Hello', 10), bin(bitShiftLeft(toFixedString('Hello', 10), 32)); +SELECT 33,toFixedString('Hello', 10), bin(bitShiftLeft(toFixedString('Hello', 10), 33)); +SELECT 34,toFixedString('Hello', 10), bin(bitShiftLeft(toFixedString('Hello', 10), 34)); +SELECT 35,toFixedString('Hello', 10), bin(bitShiftLeft(toFixedString('Hello', 10), 35)); +SELECT 36,toFixedString('Hello', 10), bin(bitShiftLeft(toFixedString('Hello', 10), 36)); +SELECT 37,toFixedString('Hello', 10), bin(bitShiftLeft(toFixedString('Hello', 10), 37)); +SELECT 38,toFixedString('Hello', 10), bin(bitShiftLeft(toFixedString('Hello', 10), 38)); +SELECT 39,toFixedString('Hello', 10), bin(bitShiftLeft(toFixedString('Hello', 10), 39)); +SELECT 40,toFixedString('Hello', 10), bin(bitShiftLeft(toFixedString('Hello', 10), 40)); +SELECT 41,toFixedString('Hello', 10), bin(bitShiftLeft(toFixedString('Hello', 10), 41)); +SELECT 42,toFixedString('Hello', 10), bin(bitShiftLeft(toFixedString('Hello', 10), 42)); +SELECT 77,toFixedString('Hello', 10), bin(bitShiftLeft(toFixedString('Hello', 10), 77)); +SELECT 78,toFixedString('Hello', 10), bin(bitShiftLeft(toFixedString('Hello', 10), 78)); +SELECT 79,toFixedString('Hello', 10), bin(bitShiftLeft(toFixedString('Hello', 10), 79)); +SELECT 80,toFixedString('Hello', 10), bin(bitShiftLeft(toFixedString('Hello', 10), 80)); + +DROP TABLE IF EXISTS test_bit_shift_left_string_integer; + +CREATE TABLE test_bit_shift_left_string_integer (str String, fixedStr FixedString(10), id Int64) engine=Log; + +INSERT INTO test_bit_shift_left_string_integer VALUES('Hello','Hello',0),('Hello','Hello',1),('Hello','Hello',7),('Hello','Hello',8),('Hello','Hello',9),('Hello','Hello',15),('Hello','Hello',16),('Hello','Hello',17),('Hello','Hello',23),('Hello','Hello',24),('Hello','Hello',25),('Hello','Hello',31),('Hello','Hello',32),('Hello','Hello',33),('Hello','Hello',39),('Hello','Hello',40),('Hel','Hel',7),('Hel','Hel',8),('Hel','Hel',9); + +SELECT bin(bitShiftLeft('Hello', 40)); --A blank line +SELECT 'String VectorVector'; +SELECT id as shift_right_bit,str as arg,bin(bitShiftLeft(str, id)) as string_res FROM test_bit_shift_left_string_integer; +SELECT id as shift_right_bit,str as arg,bin(bitShiftLeft(str, id)) as string_res FROM test_bit_shift_left_string_integer WHERE (str='Hello' AND (id=23 OR id=24 OR id=25)) OR (str='Hel' AND (id=7 OR id=8 OR id=9)); + +SELECT bin(bitShiftLeft('Hello', 40)); +SELECT 'FixedString VectorVector'; +SELECT id as shift_right_bit,fixedStr as arg,bin(bitShiftLeft(fixedStr, id)) as fixed_string_res FROM test_bit_shift_left_string_integer; +SELECT id as shift_right_bit,fixedStr as arg,bin(bitShiftLeft(fixedStr, id)) as fixed_string_res FROM test_bit_shift_left_string_integer WHERE (str='Hello' AND (id=23 OR id=24 OR id=25)) OR (str='Hel' AND (id=7 OR id=8 OR id=9)); + +SELECT bin(bitShiftLeft('Hello', 40)); --A blank line +SELECT 'String VectorConst'; +SELECT 7 as shift_right_bit,str as arg,bin(bitShiftLeft(str, 7)) as string_res FROM test_bit_shift_left_string_integer; +SELECT 8 as shift_right_bit,str as arg,bin(bitShiftLeft(str, 8)) as string_res FROM test_bit_shift_left_string_integer; + +SELECT bin(bitShiftLeft('Hello', 40)); --A blank line +SELECT 'FixedString VectorConst'; +SELECT 7 as shift_right_bit,fixedStr as arg,bin(bitShiftLeft(fixedStr, 7)) as fixed_string_res FROM test_bit_shift_left_string_integer; +SELECT 8 as shift_right_bit,fixedStr as arg,bin(bitShiftLeft(fixedStr, 8)) as fixed_string_res FROM test_bit_shift_left_string_integer; + +SELECT bin(bitShiftLeft('Hello', 40)); --A blank line +SELECT 'String ConstVector'; +SELECT id as shift_right_bit,'Hello' as arg,bin(bitShiftLeft('Hello', id)) as string_res FROM test_bit_shift_left_string_integer; +SELECT id as shift_right_bit,'Hel' as arg,bin(bitShiftLeft('Hel', id)) as string_res FROM test_bit_shift_left_string_integer WHERE id <= 8 * 3; + +SELECT bin(bitShiftLeft('Hello', 40)); --A blank line +SELECT 'FixedString ConstVector'; +SELECT id as shift_right_bit,toFixedString('Hello', 10) as arg,bin(bitShiftLeft(toFixedString('Hello', 10), id)) as fixed_string_res FROM test_bit_shift_left_string_integer; +SELECT id as shift_right_bit,toFixedString('Hel', 10) as arg,bin(bitShiftLeft(toFixedString('Hel', 10), id)) as fixed_string_res FROM test_bit_shift_left_string_integer; + +DROP TABLE IF EXISTS test_bit_shift_left_string_integer; diff --git a/parser/testdata/02017_columns_with_dot/ast.json b/parser/testdata/02017_columns_with_dot/ast.json new file mode 100644 index 000000000..4066b1248 --- /dev/null +++ b/parser/testdata/02017_columns_with_dot/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_with_dots (children 1)" + }, + { + "explain": " Identifier t_with_dots" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001932763, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/02017_columns_with_dot/metadata.json b/parser/testdata/02017_columns_with_dot/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02017_columns_with_dot/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02017_columns_with_dot/query.sql b/parser/testdata/02017_columns_with_dot/query.sql new file mode 100644 index 000000000..ae901214d --- /dev/null +++ b/parser/testdata/02017_columns_with_dot/query.sql @@ -0,0 +1,24 @@ +DROP TABLE IF EXISTS t_with_dots; +CREATE TABLE t_with_dots (id UInt32, arr Array(UInt32), `b.id` UInt32, `b.arr` Array(UInt32)) ENGINE = Log; + +INSERT INTO t_with_dots VALUES (1, [0, 0], 2, [1, 1, 3]); +SELECT * FROM t_with_dots; + +DROP TABLE t_with_dots; + +CREATE TABLE t_with_dots (id UInt32, arr Array(UInt32), `b.id` UInt32, `b.arr` Array(UInt32)) +ENGINE = MergeTree ORDER BY id; + +INSERT INTO t_with_dots VALUES (1, [0, 0], 2, [1, 1, 3]); +SELECT * FROM t_with_dots; + +DROP TABLE t_with_dots; + +CREATE TABLE t_with_dots (id UInt32, arr Array(UInt32), `b.id` UInt32, `b.arr` Array(UInt32)) +ENGINE = MergeTree ORDER BY id +SETTINGS min_bytes_for_wide_part = 0; + +INSERT INTO t_with_dots VALUES (1, [0, 0], 2, [1, 1, 3]); +SELECT * FROM t_with_dots; + +DROP TABLE t_with_dots; diff --git a/parser/testdata/02017_columns_with_dot_2/ast.json b/parser/testdata/02017_columns_with_dot_2/ast.json new file mode 100644 index 000000000..7f7a0191a --- /dev/null +++ b/parser/testdata/02017_columns_with_dot_2/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_nested (children 1)" + }, + { + "explain": " Identifier test_nested" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001533605, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/02017_columns_with_dot_2/metadata.json b/parser/testdata/02017_columns_with_dot_2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02017_columns_with_dot_2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02017_columns_with_dot_2/query.sql b/parser/testdata/02017_columns_with_dot_2/query.sql new file mode 100644 index 000000000..eefe52b74 --- /dev/null +++ b/parser/testdata/02017_columns_with_dot_2/query.sql @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS test_nested; + +CREATE TABLE test_nested +( + `id` String, + `with_dot.str` String, + `with_dot.array` Array(Int32) +) +ENGINE = MergeTree() +ORDER BY id; + +INSERT INTO test_nested VALUES('123', 'asd', [1,2]); +SELECT * FROM test_nested; + +ALTER TABLE test_nested ADD COLUMN `with_dot.bool` UInt8; +SELECT * FROM test_nested; + +DROP TABLE test_nested; diff --git a/parser/testdata/02017_create_distributed_table_coredump/ast.json b/parser/testdata/02017_create_distributed_table_coredump/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02017_create_distributed_table_coredump/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02017_create_distributed_table_coredump/metadata.json b/parser/testdata/02017_create_distributed_table_coredump/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02017_create_distributed_table_coredump/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02017_create_distributed_table_coredump/query.sql b/parser/testdata/02017_create_distributed_table_coredump/query.sql new file mode 100644 index 000000000..a7a77672a --- /dev/null +++ b/parser/testdata/02017_create_distributed_table_coredump/query.sql @@ -0,0 +1,14 @@ +-- Tags: distributed + +drop table if exists t; +drop table if exists td1; +drop table if exists td2; +drop table if exists td3; +create table t (val UInt32) engine = MergeTree order by val; +create table td1 engine = Distributed(test_shard_localhost, currentDatabase(), 't') as t; +create table td2 engine = Distributed(test_shard_localhost, currentDatabase(), 't', xxHash32(val), default) as t; +create table td3 engine = Distributed(test_shard_localhost, currentDatabase(), 't', xxHash32(val), 'default') as t; +drop table if exists t; +drop table if exists td1; +drop table if exists td2; +drop table if exists td3; diff --git a/parser/testdata/02017_order_by_with_fill_redundant_functions/ast.json b/parser/testdata/02017_order_by_with_fill_redundant_functions/ast.json new file mode 100644 index 000000000..da58bd73f --- /dev/null +++ b/parser/testdata/02017_order_by_with_fill_redundant_functions/ast.json @@ -0,0 +1,91 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_5 (alias x)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Function negate (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " OrderByElement (children 3)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 23, + + "statistics": + { + "elapsed": 0.001351396, + "rows_read": 23, + "bytes_read": 874 + } +} diff --git a/parser/testdata/02017_order_by_with_fill_redundant_functions/metadata.json b/parser/testdata/02017_order_by_with_fill_redundant_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02017_order_by_with_fill_redundant_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02017_order_by_with_fill_redundant_functions/query.sql b/parser/testdata/02017_order_by_with_fill_redundant_functions/query.sql new file mode 100644 index 000000000..6f3e6787c --- /dev/null +++ b/parser/testdata/02017_order_by_with_fill_redundant_functions/query.sql @@ -0,0 +1 @@ +SELECT x FROM (SELECT 5 AS x) ORDER BY -x, x WITH FILL FROM 1 TO 10; diff --git a/parser/testdata/02018_multiple_with_fill_for_the_same_column/ast.json b/parser/testdata/02018_multiple_with_fill_for_the_same_column/ast.json new file mode 100644 index 000000000..ffdb6556e --- /dev/null +++ b/parser/testdata/02018_multiple_with_fill_for_the_same_column/ast.json @@ -0,0 +1,103 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Identifier y" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_5 (alias x)" + }, + { + "explain": " Literal 'Hello' (alias y)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " OrderByElement (children 3)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Literal UInt64_7" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier y" + }, + { + "explain": " OrderByElement (children 3)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 27, + + "statistics": + { + "elapsed": 0.001100622, + "rows_read": 27, + "bytes_read": 980 + } +} diff --git a/parser/testdata/02018_multiple_with_fill_for_the_same_column/metadata.json b/parser/testdata/02018_multiple_with_fill_for_the_same_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02018_multiple_with_fill_for_the_same_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02018_multiple_with_fill_for_the_same_column/query.sql b/parser/testdata/02018_multiple_with_fill_for_the_same_column/query.sql new file mode 100644 index 000000000..0db88defa --- /dev/null +++ b/parser/testdata/02018_multiple_with_fill_for_the_same_column/query.sql @@ -0,0 +1 @@ +SELECT x, y FROM (SELECT 5 AS x, 'Hello' AS y) ORDER BY x WITH FILL FROM 3 TO 7, y, x WITH FILL FROM 1 TO 10; -- { serverError INVALID_WITH_FILL_EXPRESSION } diff --git a/parser/testdata/02019_multiple_weird_with_fill/ast.json b/parser/testdata/02019_multiple_weird_with_fill/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02019_multiple_weird_with_fill/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02019_multiple_weird_with_fill/metadata.json b/parser/testdata/02019_multiple_weird_with_fill/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02019_multiple_weird_with_fill/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02019_multiple_weird_with_fill/query.sql b/parser/testdata/02019_multiple_weird_with_fill/query.sql new file mode 100644 index 000000000..a2ed33c51 --- /dev/null +++ b/parser/testdata/02019_multiple_weird_with_fill/query.sql @@ -0,0 +1,14 @@ +SELECT + x, + -x, + y +FROM +( + SELECT + 5 AS x, + 'Hello' AS y +) +ORDER BY + x ASC WITH FILL FROM 3 TO 7, + y ASC, + -x ASC WITH FILL FROM -10 TO -1; diff --git a/parser/testdata/02020_cast_integer_overflow/ast.json b/parser/testdata/02020_cast_integer_overflow/ast.json new file mode 100644 index 000000000..e45babb2d --- /dev/null +++ b/parser/testdata/02020_cast_integer_overflow/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toInt32 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '-2147483648'" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001140411, + "rows_read": 7, + "bytes_read": 265 + } +} diff --git a/parser/testdata/02020_cast_integer_overflow/metadata.json b/parser/testdata/02020_cast_integer_overflow/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02020_cast_integer_overflow/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02020_cast_integer_overflow/query.sql b/parser/testdata/02020_cast_integer_overflow/query.sql new file mode 100644 index 000000000..57aeff9a9 --- /dev/null +++ b/parser/testdata/02020_cast_integer_overflow/query.sql @@ -0,0 +1,2 @@ +SELECT toInt32('-2147483648'); +SELECT toInt32OrNull('-2147483648'); diff --git a/parser/testdata/02020_exponential_smoothing/ast.json b/parser/testdata/02020_exponential_smoothing/ast.json new file mode 100644 index 000000000..6a725c5b1 --- /dev/null +++ b/parser/testdata/02020_exponential_smoothing/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'exponentialMovingAverage'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.000965009, + "rows_read": 5, + "bytes_read": 195 + } +} diff --git a/parser/testdata/02020_exponential_smoothing/metadata.json b/parser/testdata/02020_exponential_smoothing/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02020_exponential_smoothing/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02020_exponential_smoothing/query.sql b/parser/testdata/02020_exponential_smoothing/query.sql new file mode 100644 index 000000000..a0506b4c7 --- /dev/null +++ b/parser/testdata/02020_exponential_smoothing/query.sql @@ -0,0 +1,236 @@ +SELECT 'exponentialMovingAverage'; + +SELECT value, time, round(exp_smooth, 3) FROM (SELECT number = 0 AS value, number AS time, exponentialMovingAverage(1)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10)); +SELECT value, time, round(exp_smooth, 3) FROM (SELECT number = 0 AS value, number AS time, exponentialMovingAverage(10)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10)); +SELECT value, time, round(exp_smooth, 3) FROM (SELECT number AS value, number AS time, exponentialMovingAverage(1)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10)); + +SELECT + value, + time, + round(exp_smooth, 3), + bar(exp_smooth, 0, 1, 50) AS bar +FROM +( + SELECT + (number = 0) OR (number >= 25) AS value, + number AS time, + exponentialMovingAverage(10)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth + FROM numbers(50) +); + +SELECT + value, + time, + round(exp_smooth, 3), + bar(exp_smooth, 0, 1, 50) AS bar +FROM +( + SELECT + (number % 5) = 0 AS value, + number AS time, + exponentialMovingAverage(1)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth + FROM numbers(50) +); + +SELECT 'exponentialTimeDecayedSum'; + +SELECT value, time, round(exp_smooth, 3) FROM (SELECT number = 0 AS value, number AS time, exponentialTimeDecayedSum(1)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10)); +SELECT value, time, round(exp_smooth, 3) FROM (SELECT number = 0 AS value, number AS time, exponentialTimeDecayedSum(10)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10)); +SELECT value, time, round(exp_smooth, 3) FROM (SELECT number AS value, number AS time, exponentialTimeDecayedSum(1)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10)); + +SELECT + value, + time, + round(exp_smooth, 3), + bar(exp_smooth, 0, 10, 50) AS bar +FROM +( + SELECT + (number = 0) OR (number >= 25) AS value, + number AS time, + exponentialTimeDecayedSum(10)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth + FROM numbers(50) +); + +SELECT + value, + time, + round(exp_smooth, 3), + bar(exp_smooth, 0, 1, 50) AS bar +FROM +( + SELECT + (number % 5) = 0 AS value, + number AS time, + exponentialTimeDecayedSum(1)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth + FROM numbers(50) +); + +SELECT 'exponentialTimeDecayedMax'; + +SELECT value, time, round(exp_smooth, 3) FROM (SELECT number = 0 AS value, number AS time, exponentialTimeDecayedMax(1)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10)); +SELECT value, time, round(exp_smooth, 3) FROM (SELECT number = 0 AS value, number AS time, exponentialTimeDecayedMax(10)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10)); +SELECT value, time, round(exp_smooth, 3) FROM (SELECT number AS value, number AS time, exponentialTimeDecayedMax(1)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10)); + +SELECT + value, + time, + round(exp_smooth, 3), + bar(exp_smooth, 0, 10, 50) AS bar +FROM +( + SELECT + (number = 0) OR (number >= 25) AS value, + number AS time, + exponentialTimeDecayedMax(10)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth + FROM numbers(50) +); + +SELECT + value, + time, + round(exp_smooth, 3), + bar(exp_smooth, 0, 1, 50) AS bar +FROM +( + SELECT + (number % 5) = 0 AS value, + number AS time, + exponentialTimeDecayedMax(1)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth + FROM numbers(50) +); + +SELECT 'exponentialTimeDecayedCount'; + +SELECT value, time, round(exp_smooth, 3) FROM (SELECT number = 0 AS value, number AS time, exponentialTimeDecayedCount(1)(time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10)); +SELECT value, time, round(exp_smooth, 3) FROM (SELECT number = 0 AS value, number AS time, exponentialTimeDecayedCount(10)(time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10)); +SELECT value, time, round(exp_smooth, 3) FROM (SELECT number AS value, number AS time, exponentialTimeDecayedCount(1)(time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10)); + +SELECT + value, + time, + round(exp_smooth, 3), + bar(exp_smooth, 0, 10, 50) AS bar +FROM +( + SELECT + (number = 0) OR (number >= 25) AS value, + number AS time, + exponentialTimeDecayedCount(5)(time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth + FROM numbers(50) +); + +SELECT + value, + time, + round(exp_smooth, 3), + bar(exp_smooth, 0, 20, 50) AS bar +FROM +( + SELECT + (number % 5) = 0 AS value, + number AS time, + exponentialTimeDecayedCount(10)(time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth + FROM numbers(50) +); + +SELECT 'exponentialTimeDecayedAvg'; + +SELECT value, time, round(exp_smooth, 3) FROM (SELECT number = 0 AS value, number AS time, exponentialTimeDecayedAvg(1)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10)); +SELECT value, time, round(exp_smooth, 3) FROM (SELECT number = 0 AS value, number AS time, exponentialTimeDecayedAvg(10)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10)); +SELECT value, time, round(exp_smooth, 3) FROM (SELECT number AS value, number AS time, exponentialTimeDecayedAvg(1)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth FROM numbers(10)); + +SELECT + value, + time, + round(exp_smooth, 3), + bar(exp_smooth, 0, 5, 50) AS bar +FROM +( + SELECT + (number = 0) OR (number >= 25) AS value, + number AS time, + exponentialTimeDecayedAvg(10)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth + FROM numbers(50) +); + +SELECT + value, + time, + round(exp_smooth, 3), + bar(exp_smooth, 0, 0.5, 50) AS bar +FROM +( + SELECT + (number % 5) = 0 AS value, + number AS time, + exponentialTimeDecayedAvg(100)(value, time) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS exp_smooth + FROM numbers(50) +); + +SELECT 'Check `exponentialTimeDecayed.*` supports sliding windows'; + +SELECT + x, + t, + round(sum, 12), + round(max, 12), + round(count, 12), + round(avg, 12) +FROM +( + SELECT + d[1] AS x, + d[2] AS t, + exponentialTimeDecayedSum(100)(x, t) OVER w AS sum, + exponentialTimeDecayedMax(100)(x, t) OVER w AS max, + exponentialTimeDecayedCount(100)(t) OVER w AS count, + exponentialTimeDecayedAvg(100)(x, t) OVER w AS avg + FROM + ( + SELECT [[2, 1], [1, 2], [0, 3], [4, 4], [5, 5], [1, 6], [0, 7], [10, 8]] AS d + ) + ARRAY JOIN d + WINDOW w AS (ORDER BY 1 ASC Rows BETWEEN 2 PRECEDING AND 2 FOLLOWING) +); + +SELECT + x, + t, + round(sum, 12), + round(max, 12), + round(count, 12), + round(avg, 12) +FROM +( + SELECT + sin(number) AS x, + number AS t, + exponentialTimeDecayedSum(100)(x, t) OVER w AS sum, + exponentialTimeDecayedMax(100)(x, t) OVER w AS max, + exponentialTimeDecayedCount(100)(t) OVER w AS count, + exponentialTimeDecayedAvg(100)(x, t) OVER w AS avg + FROM numbers(1000000) + WINDOW w AS (ORDER BY 1 ASC Rows BETWEEN 2 PRECEDING AND 2 FOLLOWING) +) +FORMAT `Null`; + +SELECT 'Check `exponentialTimeDecayedMax` works with negative values'; + +SELECT + x, + t, + round(max, 12) +FROM +( + SELECT + d[1] AS x, + d[2] AS t, + exponentialTimeDecayedMax(100)(-x, t) OVER w AS max + FROM + ( + SELECT [[2, 1], [1, 2], [10, 3], [4, 4], [5, 5], [1, 6], [10, 7], [10, 8], [10, 9], [9.81, 10], [9.9, 11]] AS d + ) + ARRAY JOIN d + WINDOW w AS (ORDER BY 1 ASC Rows BETWEEN 2 PRECEDING AND 2 FOLLOWING) +); diff --git a/parser/testdata/02020_exponential_smoothing_cross_block/ast.json b/parser/testdata/02020_exponential_smoothing_cross_block/ast.json new file mode 100644 index 000000000..c5fcf08dd --- /dev/null +++ b/parser/testdata/02020_exponential_smoothing_cross_block/ast.json @@ -0,0 +1,190 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function countIf (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 8)" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Function exponentialTimeDecayedSum (children 3)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier value" + }, + { + "explain": " Identifier time" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Float64_100000002004087730000" + }, + { + "explain": " WindowDefinition (children 1)" + }, + { + "explain": " Literal UInt64_255" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal Int64_-2147483649" + }, + { + "explain": " Function exponentialTimeDecayedSum (children 3)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier value" + }, + { + "explain": " Identifier time" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Int64_-1" + }, + { + "explain": " WindowDefinition (children 1)" + }, + { + "explain": " Literal UInt64_65537" + }, + { + "explain": " Identifier number (alias value)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Function exponentialTimeDecayedSum (children 3)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier value" + }, + { + "explain": " Identifier time" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_65537" + }, + { + "explain": " WindowDefinition (children 1)" + }, + { + "explain": " Literal UInt64_1048577" + }, + { + "explain": " Identifier number (alias time)" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_65535" + } + ], + + "rows": 56, + + "statistics": + { + "elapsed": 0.001282544, + "rows_read": 56, + "bytes_read": 2483 + } +} diff --git a/parser/testdata/02020_exponential_smoothing_cross_block/metadata.json b/parser/testdata/02020_exponential_smoothing_cross_block/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02020_exponential_smoothing_cross_block/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02020_exponential_smoothing_cross_block/query.sql b/parser/testdata/02020_exponential_smoothing_cross_block/query.sql new file mode 100644 index 000000000..05bd7f0ca --- /dev/null +++ b/parser/testdata/02020_exponential_smoothing_cross_block/query.sql @@ -0,0 +1 @@ +SELECT countIf(1) FROM (SELECT NULL, exponentialTimeDecayedSum(100000002004087730000.)(value, time) OVER (Rows BETWEEN 255 FOLLOWING AND UNBOUNDED FOLLOWING), number = -2147483649, exponentialTimeDecayedSum(-1)(value, time) OVER (Rows BETWEEN UNBOUNDED PRECEDING AND 65537 PRECEDING), number AS value, number = NULL, exponentialTimeDecayedSum(65537)(value, time) OVER (Rows BETWEEN UNBOUNDED PRECEDING AND 1048577 FOLLOWING), number AS time FROM numbers(65535)) diff --git a/parser/testdata/02021_exponential_sum/ast.json b/parser/testdata/02021_exponential_sum/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02021_exponential_sum/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02021_exponential_sum/metadata.json b/parser/testdata/02021_exponential_sum/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02021_exponential_sum/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02021_exponential_sum/query.sql b/parser/testdata/02021_exponential_sum/query.sql new file mode 100644 index 000000000..62ec7dcf9 --- /dev/null +++ b/parser/testdata/02021_exponential_sum/query.sql @@ -0,0 +1,8 @@ +-- Check that it is deterministic +WITH number % 10 = 0 AS value, number AS time SELECT exponentialMovingAverage(1)(value, time) AS exp_smooth FROM numbers_mt(10); +WITH number % 10 = 0 AS value, number AS time SELECT exponentialMovingAverage(1)(value, time) AS exp_smooth FROM numbers_mt(100); +WITH number % 10 = 0 AS value, number AS time SELECT exponentialMovingAverage(1)(value, time) AS exp_smooth FROM numbers_mt(1000); +WITH number % 10 = 0 AS value, number AS time SELECT exponentialMovingAverage(1)(value, time) AS exp_smooth FROM numbers_mt(10000); +WITH number % 10 = 0 AS value, number AS time SELECT exponentialMovingAverage(1)(value, time) AS exp_smooth FROM numbers_mt(100000); +WITH number % 10 = 0 AS value, number AS time SELECT exponentialMovingAverage(1)(value, time) AS exp_smooth FROM numbers_mt(1000000); +WITH number % 10 = 0 AS value, number AS time SELECT exponentialMovingAverage(1)(value, time) AS exp_smooth FROM numbers_mt(10000000); diff --git a/parser/testdata/02021_exponential_sum_shard/ast.json b/parser/testdata/02021_exponential_sum_shard/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02021_exponential_sum_shard/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02021_exponential_sum_shard/metadata.json b/parser/testdata/02021_exponential_sum_shard/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02021_exponential_sum_shard/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02021_exponential_sum_shard/query.sql b/parser/testdata/02021_exponential_sum_shard/query.sql new file mode 100644 index 000000000..8e91637e4 --- /dev/null +++ b/parser/testdata/02021_exponential_sum_shard/query.sql @@ -0,0 +1,5 @@ +-- Check that it is deterministic +WITH number % 10 = 0 AS value, number AS time SELECT exponentialMovingAverage(1)(value, time) AS exp_smooth FROM remote('127.0.0.{1..10}', numbers_mt(1000)); +WITH number % 10 = 0 AS value, number AS time SELECT exponentialMovingAverage(1)(value, time) AS exp_smooth FROM remote('127.0.0.{1..10}', numbers_mt(10000)); +WITH number % 10 = 0 AS value, number AS time SELECT exponentialMovingAverage(1)(value, time) AS exp_smooth FROM remote('127.0.0.{1..10}', numbers_mt(100000)); +WITH number % 10 = 0 AS value, number AS time SELECT exponentialMovingAverage(1)(value, time) AS exp_smooth FROM remote('127.0.0.{1..10}', numbers_mt(1000000)); diff --git a/parser/testdata/02021_h3_get_faces/ast.json b/parser/testdata/02021_h3_get_faces/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02021_h3_get_faces/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02021_h3_get_faces/metadata.json b/parser/testdata/02021_h3_get_faces/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02021_h3_get_faces/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02021_h3_get_faces/query.sql b/parser/testdata/02021_h3_get_faces/query.sql new file mode 100644 index 000000000..284e66dcf --- /dev/null +++ b/parser/testdata/02021_h3_get_faces/query.sql @@ -0,0 +1,16 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS h3_indexes; + +CREATE TABLE h3_indexes (h3_index UInt64) ENGINE = Memory; + +-- test H3 indexes from: https://github.com/uber/h3-java/blob/master/src/test/java/com/uber/h3core/TestInspection.java#L86 + +INSERT INTO h3_indexes VALUES (stringToH3('0x85283473fffffffL')); +INSERT INTO h3_indexes VALUES (stringToH3('85283473fffffff')); +INSERT INTO h3_indexes VALUES (stringToH3('0x8167bffffffffffL')); +INSERT INTO h3_indexes VALUES (stringToH3('0x804dfffffffffffL')); + +SELECT arraySort(h3GetFaces(h3_index)) FROM h3_indexes ORDER BY h3_index; + +DROP TABLE h3_indexes; diff --git a/parser/testdata/02021_h3_is_pentagon/ast.json b/parser/testdata/02021_h3_is_pentagon/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02021_h3_is_pentagon/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02021_h3_is_pentagon/metadata.json b/parser/testdata/02021_h3_is_pentagon/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02021_h3_is_pentagon/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02021_h3_is_pentagon/query.sql b/parser/testdata/02021_h3_is_pentagon/query.sql new file mode 100644 index 000000000..11960a7eb --- /dev/null +++ b/parser/testdata/02021_h3_is_pentagon/query.sql @@ -0,0 +1,16 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS h3_indexes; + +CREATE TABLE h3_indexes (h3_index UInt64) ENGINE = Memory; + +-- test H3 indexes from: https://github.com/uber/h3-java/blob/master/src/test/java/com/uber/h3core/TestInspection.java#L78 + +INSERT INTO h3_indexes VALUES (stringToH3('8f28308280f18f2')); +INSERT INTO h3_indexes VALUES (stringToH3('0x8f28308280f18f2L')); +INSERT INTO h3_indexes VALUES (stringToH3('821c07fffffffff')); +INSERT INTO h3_indexes VALUES (stringToH3('0x821c07fffffffffL')); + +SELECT h3IsPentagon(h3_index) FROM h3_indexes ORDER BY h3_index; + +DROP TABLE h3_indexes; diff --git a/parser/testdata/02021_h3_is_res_classIII/ast.json b/parser/testdata/02021_h3_is_res_classIII/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02021_h3_is_res_classIII/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02021_h3_is_res_classIII/metadata.json b/parser/testdata/02021_h3_is_res_classIII/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02021_h3_is_res_classIII/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02021_h3_is_res_classIII/query.sql b/parser/testdata/02021_h3_is_res_classIII/query.sql new file mode 100644 index 000000000..4b8fd99c7 --- /dev/null +++ b/parser/testdata/02021_h3_is_res_classIII/query.sql @@ -0,0 +1,16 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS h3_indexes; + +CREATE TABLE h3_indexes (h3_index UInt64) ENGINE = Memory; + +-- test H3 indexes from: https://github.com/uber/h3-java/blob/master/src/test/java/com/uber/h3core/TestInspection.java#L57 + +INSERT INTO h3_indexes VALUES (geoToH3(0.0, 0.0, 0)); +INSERT INTO h3_indexes VALUES (geoToH3(10.0, 0.0, 1)); +INSERT INTO h3_indexes VALUES (geoToH3(0.0, 10.0, 2)); +INSERT INTO h3_indexes VALUES (geoToH3(10.0, 10.0, 3)); + +SELECT h3IsResClassIII(h3_index) FROM h3_indexes ORDER BY h3_index; + +DROP TABLE h3_indexes; diff --git a/parser/testdata/02021_map_bloom_filter_index/ast.json b/parser/testdata/02021_map_bloom_filter_index/ast.json new file mode 100644 index 000000000..864cc6ff9 --- /dev/null +++ b/parser/testdata/02021_map_bloom_filter_index/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery map_test_index_map_keys (children 1)" + }, + { + "explain": " Identifier map_test_index_map_keys" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001167077, + "rows_read": 2, + "bytes_read": 98 + } +} diff --git a/parser/testdata/02021_map_bloom_filter_index/metadata.json b/parser/testdata/02021_map_bloom_filter_index/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02021_map_bloom_filter_index/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02021_map_bloom_filter_index/query.sql b/parser/testdata/02021_map_bloom_filter_index/query.sql new file mode 100644 index 000000000..d30089343 --- /dev/null +++ b/parser/testdata/02021_map_bloom_filter_index/query.sql @@ -0,0 +1,106 @@ +DROP TABLE IF EXISTS map_test_index_map_keys; +CREATE TABLE map_test_index_map_keys +( + row_id UInt32, + map Map(String, String), + INDEX map_bloom_filter_keys mapKeys(map) TYPE bloom_filter GRANULARITY 1 +) Engine=MergeTree() ORDER BY row_id SETTINGS index_granularity = 1; + +INSERT INTO map_test_index_map_keys VALUES (0, {'K0':'V0'}), (1, {'K1':'V1'}); + +SELECT 'Map bloom filter mapKeys'; + +SELECT 'Equals with existing key'; +SELECT * FROM map_test_index_map_keys WHERE map['K0'] = 'V0' SETTINGS force_data_skipping_indices='map_bloom_filter_keys'; +SELECT 'Equals with non existing key'; +SELECT * FROM map_test_index_map_keys WHERE map['K2'] = 'V2' SETTINGS force_data_skipping_indices='map_bloom_filter_keys'; +SELECT 'Equals with non existing key and default value'; +SELECT * FROM map_test_index_map_keys WHERE map['K3'] = ''; +SELECT 'Not equals with existing key'; +SELECT * FROM map_test_index_map_keys WHERE map['K0'] != 'V0' SETTINGS force_data_skipping_indices='map_bloom_filter_keys'; +SELECT 'Not equals with non existing key'; +SELECT * FROM map_test_index_map_keys WHERE map['K2'] != 'V2' SETTINGS force_data_skipping_indices='map_bloom_filter_keys'; +SELECT 'Not equals with non existing key and default value'; +SELECT * FROM map_test_index_map_keys WHERE map['K3'] != ''; + +SELECT 'IN with existing key'; +SELECT * FROM map_test_index_map_keys WHERE map['K0'] IN 'V0' SETTINGS force_data_skipping_indices='map_bloom_filter_keys'; +SELECT 'IN with non existing key'; +SELECT * FROM map_test_index_map_keys WHERE map['K2'] IN 'V2' SETTINGS force_data_skipping_indices='map_bloom_filter_keys'; +SELECT 'IN with non existing key and default value'; +SELECT * FROM map_test_index_map_keys WHERE map['K3'] IN ''; +SELECT 'NOT IN with existing key'; +SELECT * FROM map_test_index_map_keys WHERE map['K0'] NOT IN 'V0' SETTINGS force_data_skipping_indices='map_bloom_filter_keys'; +SELECT 'NOT IN with non existing key'; +SELECT * FROM map_test_index_map_keys WHERE map['K2'] NOT IN 'V2' SETTINGS force_data_skipping_indices='map_bloom_filter_keys'; +SELECT 'NOT IN with non existing key and default value'; +SELECT * FROM map_test_index_map_keys WHERE map['K3'] NOT IN ''; + +SELECT 'MapContains with existing key'; +SELECT * FROM map_test_index_map_keys WHERE mapContains(map, 'K0') SETTINGS force_data_skipping_indices='map_bloom_filter_keys'; +SELECT 'MapContains with non existing key'; +SELECT * FROM map_test_index_map_keys WHERE mapContains(map, 'K2') SETTINGS force_data_skipping_indices='map_bloom_filter_keys'; +SELECT 'MapContains with non existing key and default value'; +SELECT * FROM map_test_index_map_keys WHERE mapContains(map, ''); + +SELECT 'MapContainsKey with existing key'; +SELECT * FROM map_test_index_map_keys WHERE mapContainsKey(map, 'K0') SETTINGS force_data_skipping_indices='map_bloom_filter_keys'; +SELECT 'MapContainsKey with non existing key'; +SELECT * FROM map_test_index_map_keys WHERE mapContainsKey(map, 'K2') SETTINGS force_data_skipping_indices='map_bloom_filter_keys'; +SELECT 'MapContainsKey with non existing key and default value'; +SELECT * FROM map_test_index_map_keys WHERE mapContainsKey(map, ''); + +SELECT 'Has with existing key'; +SELECT * FROM map_test_index_map_keys WHERE has(map, 'K0') SETTINGS force_data_skipping_indices='map_bloom_filter_keys'; +SELECT 'Has with non existing key'; +SELECT * FROM map_test_index_map_keys WHERE has(map, 'K2') SETTINGS force_data_skipping_indices='map_bloom_filter_keys'; +SELECT 'Has with non existing key and default value'; +SELECT * FROM map_test_index_map_keys WHERE has(map, '') SETTINGS force_data_skipping_indices='map_bloom_filter_keys'; + +DROP TABLE map_test_index_map_keys; + +DROP TABLE IF EXISTS map_test_index_map_values; +CREATE TABLE map_test_index_map_values +( + row_id UInt32, + map Map(String, String), + INDEX map_bloom_filter_values mapValues(map) TYPE bloom_filter GRANULARITY 1 +) Engine=MergeTree() ORDER BY row_id SETTINGS index_granularity = 1; + +INSERT INTO map_test_index_map_values VALUES (0, {'K0':'V0'}), (1, {'K1':'V1'}); + +SELECT 'Map bloom filter mapValues'; + +SELECT 'Equals with existing key'; +SELECT * FROM map_test_index_map_values WHERE map['K0'] = 'V0' SETTINGS force_data_skipping_indices='map_bloom_filter_values'; +SELECT 'Equals with non existing key'; +SELECT * FROM map_test_index_map_values WHERE map['K2'] = 'V2' SETTINGS force_data_skipping_indices='map_bloom_filter_values'; +SELECT 'Equals with non existing key and default value'; +SELECT * FROM map_test_index_map_values WHERE map['K3'] = ''; +SELECT 'Not equals with existing key'; +SELECT * FROM map_test_index_map_values WHERE map['K0'] != 'V0' SETTINGS force_data_skipping_indices='map_bloom_filter_values'; +SELECT 'Not equals with non existing key'; +SELECT * FROM map_test_index_map_values WHERE map['K2'] != 'V2' SETTINGS force_data_skipping_indices='map_bloom_filter_values'; +SELECT 'Not equals with non existing key and default value'; +SELECT * FROM map_test_index_map_values WHERE map['K3'] != ''; +SELECT 'IN with existing key'; +SELECT * FROM map_test_index_map_values WHERE map['K0'] IN 'V0' SETTINGS force_data_skipping_indices='map_bloom_filter_values'; +SELECT 'IN with non existing key'; +SELECT * FROM map_test_index_map_values WHERE map['K2'] IN 'V2' SETTINGS force_data_skipping_indices='map_bloom_filter_values'; +SELECT 'IN with non existing key and default value'; +SELECT * FROM map_test_index_map_values WHERE map['K3'] IN ''; +SELECT 'NOT IN with existing key'; +SELECT * FROM map_test_index_map_values WHERE map['K0'] NOT IN 'V0' SETTINGS force_data_skipping_indices='map_bloom_filter_values'; +SELECT 'NOT IN with non existing key'; +SELECT * FROM map_test_index_map_values WHERE map['K2'] NOT IN 'V2' SETTINGS force_data_skipping_indices='map_bloom_filter_values'; +SELECT 'NOT IN with non existing key and default value'; +SELECT * FROM map_test_index_map_values WHERE map['K3'] NOT IN ''; + +SELECT 'MapContainsValue with existing value'; +SELECT * FROM map_test_index_map_values WHERE mapContainsValue(map, 'V0') SETTINGS force_data_skipping_indices='map_bloom_filter_values'; +SELECT 'MapContainsValue with non existing value'; +SELECT * FROM map_test_index_map_values WHERE mapContainsValue(map, 'V2') SETTINGS force_data_skipping_indices='map_bloom_filter_values'; +SELECT 'MapContainsValue with non existing default value'; +SELECT * FROM map_test_index_map_values WHERE mapContainsValue(map, ''); + +DROP TABLE map_test_index_map_values; diff --git a/parser/testdata/02021_map_has/ast.json b/parser/testdata/02021_map_has/ast.json new file mode 100644 index 000000000..631acbd11 --- /dev/null +++ b/parser/testdata/02021_map_has/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_map (children 1)" + }, + { + "explain": " Identifier test_map" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001343377, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/02021_map_has/metadata.json b/parser/testdata/02021_map_has/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02021_map_has/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02021_map_has/query.sql b/parser/testdata/02021_map_has/query.sql new file mode 100644 index 000000000..840990582 --- /dev/null +++ b/parser/testdata/02021_map_has/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS test_map; +CREATE TABLE test_map (value Map(String, String)) ENGINE=TinyLog; + +SELECT 'Non constant map'; +INSERT INTO test_map VALUES ({'K0':'V0'}); +SELECT has(value, 'K0') FROM test_map; +SELECT has(value, 'K1') FROM test_map; + +SELECT 'Constant map'; + +SELECT has(map('K0', 'V0'), 'K0') FROM system.one; +SELECT has(map('K0', 'V0'), 'K1') FROM system.one; + +DROP TABLE test_map; diff --git a/parser/testdata/02021_prewhere_always_true_where/ast.json b/parser/testdata/02021_prewhere_always_true_where/ast.json new file mode 100644 index 000000000..2062aa2f7 --- /dev/null +++ b/parser/testdata/02021_prewhere_always_true_where/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery data_02021 (children 1)" + }, + { + "explain": " Identifier data_02021" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001014659, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/02021_prewhere_always_true_where/metadata.json b/parser/testdata/02021_prewhere_always_true_where/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02021_prewhere_always_true_where/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02021_prewhere_always_true_where/query.sql b/parser/testdata/02021_prewhere_always_true_where/query.sql new file mode 100644 index 000000000..95dcb6a15 --- /dev/null +++ b/parser/testdata/02021_prewhere_always_true_where/query.sql @@ -0,0 +1,5 @@ +drop table if exists data_02021; +create table data_02021 (key Int) engine=MergeTree() order by key; +insert into data_02021 values (1); +select count() from data_02021 prewhere 1 or ignore(key) where ignore(key)=0; +drop table data_02021; diff --git a/parser/testdata/02021_prewhere_column_optimization/ast.json b/parser/testdata/02021_prewhere_column_optimization/ast.json new file mode 100644 index 000000000..627f582a9 --- /dev/null +++ b/parser/testdata/02021_prewhere_column_optimization/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery data_02021 (children 1)" + }, + { + "explain": " Identifier data_02021" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001147568, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/02021_prewhere_column_optimization/metadata.json b/parser/testdata/02021_prewhere_column_optimization/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02021_prewhere_column_optimization/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02021_prewhere_column_optimization/query.sql b/parser/testdata/02021_prewhere_column_optimization/query.sql new file mode 100644 index 000000000..4fe8b912c --- /dev/null +++ b/parser/testdata/02021_prewhere_column_optimization/query.sql @@ -0,0 +1,10 @@ +drop table if exists data_02021; +create table data_02021 (key Int) engine=MergeTree() order by key; +insert into data_02021 values (1); +-- { echoOn } +select * from data_02021 prewhere 1 or ignore(key); +select * from data_02021 prewhere 1 or ignore(key) where key = 1; +select * from data_02021 prewhere 0 or ignore(key); +select * from data_02021 prewhere 0 or ignore(key) where key = 1; +-- { echoOff } +drop table data_02021; diff --git a/parser/testdata/02022_array_full_text_bloom_filter_index/ast.json b/parser/testdata/02022_array_full_text_bloom_filter_index/ast.json new file mode 100644 index 000000000..a70c39d48 --- /dev/null +++ b/parser/testdata/02022_array_full_text_bloom_filter_index/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery bf_tokenbf_array_test (children 1)" + }, + { + "explain": " Identifier bf_tokenbf_array_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001199524, + "rows_read": 2, + "bytes_read": 94 + } +} diff --git a/parser/testdata/02022_array_full_text_bloom_filter_index/metadata.json b/parser/testdata/02022_array_full_text_bloom_filter_index/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02022_array_full_text_bloom_filter_index/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02022_array_full_text_bloom_filter_index/query.sql b/parser/testdata/02022_array_full_text_bloom_filter_index/query.sql new file mode 100644 index 000000000..646a566b8 --- /dev/null +++ b/parser/testdata/02022_array_full_text_bloom_filter_index/query.sql @@ -0,0 +1,42 @@ +DROP TABLE IF EXISTS bf_tokenbf_array_test; +DROP TABLE IF EXISTS bf_ngram_array_test; + +CREATE TABLE bf_tokenbf_array_test +( + row_id UInt32, + array Array(String), + array_fixed Array(FixedString(2)), + INDEX array_bf_tokenbf array TYPE tokenbf_v1(256,2,0) GRANULARITY 1, + INDEX array_fixed_bf_tokenbf array_fixed TYPE tokenbf_v1(256,2,0) GRANULARITY 1 +) Engine=MergeTree() ORDER BY row_id SETTINGS index_granularity = 1; + +CREATE TABLE bf_ngram_array_test +( + row_id UInt32, + array Array(String), + array_fixed Array(FixedString(2)), + INDEX array_ngram array TYPE ngrambf_v1(4,256,2,0) GRANULARITY 1, + INDEX array_fixed_ngram array_fixed TYPE ngrambf_v1(4,256,2,0) GRANULARITY 1 +) Engine=MergeTree() ORDER BY row_id SETTINGS index_granularity = 1; + +INSERT INTO bf_tokenbf_array_test VALUES (1, ['K1'], ['K1']), (2, ['K2'], ['K2']); +INSERT INTO bf_ngram_array_test VALUES (1, ['K1'], ['K1']), (2, ['K2'], ['K2']); + +SELECT * FROM bf_tokenbf_array_test WHERE has(array, 'K1') SETTINGS force_data_skipping_indices='array_bf_tokenbf'; +SELECT * FROM bf_tokenbf_array_test WHERE has(array, 'K2') SETTINGS force_data_skipping_indices='array_bf_tokenbf'; +SELECT * FROM bf_tokenbf_array_test WHERE has(array, 'K3') SETTINGS force_data_skipping_indices='array_bf_tokenbf'; + +SELECT * FROM bf_tokenbf_array_test WHERE has(array_fixed, 'K1') SETTINGS force_data_skipping_indices='array_fixed_bf_tokenbf'; +SELECT * FROM bf_tokenbf_array_test WHERE has(array_fixed, 'K2') SETTINGS force_data_skipping_indices='array_fixed_bf_tokenbf'; +SELECT * FROM bf_tokenbf_array_test WHERE has(array_fixed, 'K3') SETTINGS force_data_skipping_indices='array_fixed_bf_tokenbf'; + +SELECT * FROM bf_ngram_array_test WHERE has(array, 'K1') SETTINGS force_data_skipping_indices='array_ngram'; +SELECT * FROM bf_ngram_array_test WHERE has(array, 'K2') SETTINGS force_data_skipping_indices='array_ngram'; +SELECT * FROM bf_ngram_array_test WHERE has(array, 'K3') SETTINGS force_data_skipping_indices='array_ngram'; + +SELECT * FROM bf_ngram_array_test WHERE has(array_fixed, 'K1') SETTINGS force_data_skipping_indices='array_fixed_ngram'; +SELECT * FROM bf_ngram_array_test WHERE has(array_fixed, 'K2') SETTINGS force_data_skipping_indices='array_fixed_ngram'; +SELECT * FROM bf_ngram_array_test WHERE has(array_fixed, 'K3') SETTINGS force_data_skipping_indices='array_fixed_ngram'; + +DROP TABLE bf_tokenbf_array_test; +DROP TABLE bf_ngram_array_test; diff --git a/parser/testdata/02023_nullable_int_uint_where/ast.json b/parser/testdata/02023_nullable_int_uint_where/ast.json new file mode 100644 index 000000000..f6d30d658 --- /dev/null +++ b/parser/testdata/02023_nullable_int_uint_where/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001383736, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/02023_nullable_int_uint_where/metadata.json b/parser/testdata/02023_nullable_int_uint_where/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02023_nullable_int_uint_where/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02023_nullable_int_uint_where/query.sql b/parser/testdata/02023_nullable_int_uint_where/query.sql new file mode 100644 index 000000000..4318fbf50 --- /dev/null +++ b/parser/testdata/02023_nullable_int_uint_where/query.sql @@ -0,0 +1,10 @@ +drop table if exists t1; + +set allow_suspicious_low_cardinality_types = 1; +create table t1 (id LowCardinality(Nullable(Int64))) engine MergeTree order by id settings allow_nullable_key = 1, index_granularity = 1; + +insert into t1 values (21585718595728998), (null); + +select * from t1 where id = 21585718595728998; + +drop table t1; diff --git a/parser/testdata/02023_parser_number_binary_literal/ast.json b/parser/testdata/02023_parser_number_binary_literal/ast.json new file mode 100644 index 000000000..4cbddfd0e --- /dev/null +++ b/parser/testdata/02023_parser_number_binary_literal/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1 (alias number)" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001320629, + "rows_read": 8, + "bytes_read": 307 + } +} diff --git a/parser/testdata/02023_parser_number_binary_literal/metadata.json b/parser/testdata/02023_parser_number_binary_literal/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02023_parser_number_binary_literal/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02023_parser_number_binary_literal/query.sql b/parser/testdata/02023_parser_number_binary_literal/query.sql new file mode 100644 index 000000000..bd5df7845 --- /dev/null +++ b/parser/testdata/02023_parser_number_binary_literal/query.sql @@ -0,0 +1,16 @@ +SELECT 0b0001 as number, toTypeName(number); +SELECT 0b0010 as number, toTypeName(number); +SELECT 0b0100 as number, toTypeName(number); +SELECT 0b1000 as number, toTypeName(number); + +SELECT 'Unsigned numbers'; +SELECT 0b10000000 as number, toTypeName(number); +SELECT 0b1000000000000000 as number, toTypeName(number); +SELECT 0b10000000000000000000000000000000 as number, toTypeName(number); +SELECT 0b1000000000000000000000000000000000000000000000000000000000000000 as number, toTypeName(number); + +SELECT 'Signed numbers'; +SELECT -0b10000000 as number, toTypeName(number); +SELECT -0b1000000000000000 as number, toTypeName(number); +SELECT -0b10000000000000000000000000000000 as number, toTypeName(number); +SELECT -0b1000000000000000000000000000000000000000000000000000000000000000 as number, toTypeName(number); diff --git a/parser/testdata/02023_transform_or_to_in/ast.json b/parser/testdata/02023_transform_or_to_in/ast.json new file mode 100644 index 000000000..389cf8302 --- /dev/null +++ b/parser/testdata/02023_transform_or_to_in/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_transform_or (children 1)" + }, + { + "explain": " Identifier t_transform_or" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001106404, + "rows_read": 2, + "bytes_read": 80 + } +} diff --git a/parser/testdata/02023_transform_or_to_in/metadata.json b/parser/testdata/02023_transform_or_to_in/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02023_transform_or_to_in/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02023_transform_or_to_in/query.sql b/parser/testdata/02023_transform_or_to_in/query.sql new file mode 100644 index 000000000..c4ceeb769 --- /dev/null +++ b/parser/testdata/02023_transform_or_to_in/query.sql @@ -0,0 +1,15 @@ +DROP TABLE IF EXISTS t_transform_or; + +CREATE TABLE t_transform_or(B AggregateFunction(uniq, String), A String) Engine=MergeTree ORDER BY (A); + +INSERT INTO t_transform_or SELECT uniqState(''), '0'; + +SELECT uniqMergeIf(B, (A = '1') OR (A = '2') OR (A = '3')) +FROM cluster(test_cluster_two_shards, currentDatabase(), t_transform_or) +SETTINGS legacy_column_name_of_tuple_literal = 0; + +SELECT uniqMergeIf(B, (A = '1') OR (A = '2') OR (A = '3')) +FROM cluster(test_cluster_two_shards, currentDatabase(), t_transform_or) +SETTINGS legacy_column_name_of_tuple_literal = 1; + +DROP TABLE t_transform_or; diff --git a/parser/testdata/02024_compile_expressions_with_short_circuit_evaluation/ast.json b/parser/testdata/02024_compile_expressions_with_short_circuit_evaluation/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02024_compile_expressions_with_short_circuit_evaluation/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02024_compile_expressions_with_short_circuit_evaluation/metadata.json b/parser/testdata/02024_compile_expressions_with_short_circuit_evaluation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02024_compile_expressions_with_short_circuit_evaluation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02024_compile_expressions_with_short_circuit_evaluation/query.sql b/parser/testdata/02024_compile_expressions_with_short_circuit_evaluation/query.sql new file mode 100644 index 000000000..113d0d9d4 --- /dev/null +++ b/parser/testdata/02024_compile_expressions_with_short_circuit_evaluation/query.sql @@ -0,0 +1,2 @@ +-- { echo } +select 1+number+multiIf(number == 1, cityHash64(number), number) from numbers(1) settings compile_expressions=1, min_count_to_compile_expression=0; diff --git a/parser/testdata/02024_create_dictionary_with_comment/ast.json b/parser/testdata/02024_create_dictionary_with_comment/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02024_create_dictionary_with_comment/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02024_create_dictionary_with_comment/metadata.json b/parser/testdata/02024_create_dictionary_with_comment/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02024_create_dictionary_with_comment/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02024_create_dictionary_with_comment/query.sql b/parser/testdata/02024_create_dictionary_with_comment/query.sql new file mode 100644 index 000000000..00557e4cf --- /dev/null +++ b/parser/testdata/02024_create_dictionary_with_comment/query.sql @@ -0,0 +1,33 @@ +----------------------------------------------------------------------------------- +-- Check that `DICTIONARY` can be created with a `COMMENT` clause +-- and comment is visible both in `comment` column of `system.dictionaries` +-- and `SHOW CREATE DICTIONARY`. +----------------------------------------------------------------------------------- + +-- prerequisites +CREATE TABLE source_table +( + id UInt64, + value String +) ENGINE = Memory(); + +INSERT INTO source_table VALUES (1, 'First'); +INSERT INTO source_table VALUES (2, 'Second'); + +DROP DICTIONARY IF EXISTS 2024_dictionary_with_comment; + +CREATE DICTIONARY 2024_dictionary_with_comment +( + id UInt64, + value String +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'source_table')) +LAYOUT(FLAT()) +LIFETIME(MIN 0 MAX 1000) +COMMENT 'Test dictionary with comment'; + +SHOW CREATE DICTIONARY 2024_dictionary_with_comment; +SELECT comment FROM system.dictionaries WHERE name == '2024_dictionary_with_comment' AND database == currentDatabase(); + +DROP DICTIONARY IF EXISTS 2024_dictionary_with_comment; diff --git a/parser/testdata/02024_merge_regexp_assert/ast.json b/parser/testdata/02024_merge_regexp_assert/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02024_merge_regexp_assert/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02024_merge_regexp_assert/metadata.json b/parser/testdata/02024_merge_regexp_assert/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02024_merge_regexp_assert/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02024_merge_regexp_assert/query.sql b/parser/testdata/02024_merge_regexp_assert/query.sql new file mode 100644 index 000000000..feb1ce2f9 --- /dev/null +++ b/parser/testdata/02024_merge_regexp_assert/query.sql @@ -0,0 +1,10 @@ +-- Tags: no-parallel +-- (databases can be removed in background, so this test should not be run in parallel) + +DROP TABLE IF EXISTS t; +CREATE TABLE t (b UInt8) ENGINE = Memory; +SELECT a FROM merge(REGEXP('.'), '^t$'); -- { serverError UNKNOWN_IDENTIFIER } +SELECT a FROM merge(REGEXP('\0'), '^t$'); -- { serverError UNKNOWN_IDENTIFIER } +SELECT a FROM merge(REGEXP('\0a'), '^t$'); -- { serverError UNKNOWN_IDENTIFIER } +SELECT a FROM merge(REGEXP('\0a'), '^$'); -- { serverError CANNOT_EXTRACT_TABLE_STRUCTURE } +DROP TABLE t; diff --git a/parser/testdata/02025_dictionary_array_nested_map/ast.json b/parser/testdata/02025_dictionary_array_nested_map/ast.json new file mode 100644 index 000000000..d6477601c --- /dev/null +++ b/parser/testdata/02025_dictionary_array_nested_map/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery dict_nested_map_test_table (children 1)" + }, + { + "explain": " Identifier dict_nested_map_test_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001003344, + "rows_read": 2, + "bytes_read": 105 + } +} diff --git a/parser/testdata/02025_dictionary_array_nested_map/metadata.json b/parser/testdata/02025_dictionary_array_nested_map/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02025_dictionary_array_nested_map/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02025_dictionary_array_nested_map/query.sql b/parser/testdata/02025_dictionary_array_nested_map/query.sql new file mode 100644 index 000000000..99b45fbb1 --- /dev/null +++ b/parser/testdata/02025_dictionary_array_nested_map/query.sql @@ -0,0 +1,25 @@ +CREATE TABLE dict_nested_map_test_table +( + test_id UInt32, + type String, + test_config Array(Map(String, Decimal(28,12))), + ncp UInt8 +) +ENGINE=MergeTree() +ORDER BY test_id; + +INSERT INTO dict_nested_map_test_table VALUES (3, 't', [{'l': 0.0, 'h': 10000.0, 't': 0.1}, {'l': 10001.0, 'h': 100000000000000.0, 't': 0.2}], 0); + +CREATE DICTIONARY dict_nested_map_dictionary +( + test_id UInt32, + type String, + test_config Array(Map(String, Decimal(28,12))), + ncp UInt8 +) +PRIMARY KEY test_id +SOURCE(CLICKHOUSE(TABLE 'dict_nested_map_test_table')) +LAYOUT(HASHED()) +LIFETIME(MIN 1 MAX 1000000); + +SELECT dictGet('dict_nested_map_dictionary', 'test_config', toUInt64(3)); diff --git a/parser/testdata/02025_dictionary_view_different_db/ast.json b/parser/testdata/02025_dictionary_view_different_db/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02025_dictionary_view_different_db/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02025_dictionary_view_different_db/metadata.json b/parser/testdata/02025_dictionary_view_different_db/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02025_dictionary_view_different_db/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02025_dictionary_view_different_db/query.sql b/parser/testdata/02025_dictionary_view_different_db/query.sql new file mode 100644 index 000000000..f45520f79 --- /dev/null +++ b/parser/testdata/02025_dictionary_view_different_db/query.sql @@ -0,0 +1,43 @@ +-- Tags: no-parallel + +DROP DATABASE IF EXISTS test_db_2025; +CREATE DATABASE test_db_2025; + +DROP TABLE IF EXISTS test_db_2025.test_table; +CREATE TABLE test_db_2025.test_table +( + id UInt64, + value String +) ENGINE=TinyLog; + +INSERT INTO test_db_2025.test_table VALUES (0, 'Value'); + +CREATE DICTIONARY test_db_2025.test_dictionary +( + id UInt64, + value String +) +PRIMARY KEY id +LAYOUT(DIRECT()) +SOURCE(CLICKHOUSE(TABLE 'test_table' DB 'test_db_2025')); + +DROP TABLE IF EXISTS test_db_2025.view_table; +CREATE TABLE test_db_2025.view_table +( + id UInt64, + value String +) ENGINE=TinyLog; + +INSERT INTO test_db_2025.view_table VALUES (0, 'ViewValue'); + +DROP VIEW IF EXISTS test_view_different_db; +CREATE VIEW test_view_different_db AS SELECT id, value, dictGet('test_db_2025.test_dictionary', 'value', id) FROM test_db_2025.view_table; +SELECT * FROM test_view_different_db; + +DROP DICTIONARY test_db_2025.test_dictionary; +DROP TABLE test_db_2025.test_table; +DROP TABLE test_db_2025.view_table; + +DROP VIEW test_view_different_db; + +DROP DATABASE test_db_2025; diff --git a/parser/testdata/02025_having_filter_column/ast.json b/parser/testdata/02025_having_filter_column/ast.json new file mode 100644 index 000000000..4d2721442 --- /dev/null +++ b/parser/testdata/02025_having_filter_column/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001198467, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/02025_having_filter_column/metadata.json b/parser/testdata/02025_having_filter_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02025_having_filter_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02025_having_filter_column/query.sql b/parser/testdata/02025_having_filter_column/query.sql new file mode 100644 index 000000000..aab419adc --- /dev/null +++ b/parser/testdata/02025_having_filter_column/query.sql @@ -0,0 +1,40 @@ +drop table if exists test; + +-- #29010 +CREATE TABLE test +( + d DateTime, + a String, + b UInt64 +) +ENGINE = MergeTree +PARTITION BY toDate(d) +ORDER BY d; + +SELECT * +FROM ( + SELECT + a, + max((d, b)).2 AS value + FROM test + GROUP BY rollup(a) +) +WHERE a <> ''; + +-- the same query, but after syntax optimization +SELECT + a, + value +FROM +( + SELECT + a, + max((d, b)).2 AS value + FROM test + GROUP BY a + WITH ROLLUP + HAVING a != '' +) +WHERE a != ''; + +drop table if exists test; diff --git a/parser/testdata/02025_nested_func_for_if_combinator/ast.json b/parser/testdata/02025_nested_func_for_if_combinator/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02025_nested_func_for_if_combinator/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02025_nested_func_for_if_combinator/metadata.json b/parser/testdata/02025_nested_func_for_if_combinator/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02025_nested_func_for_if_combinator/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02025_nested_func_for_if_combinator/query.sql b/parser/testdata/02025_nested_func_for_if_combinator/query.sql new file mode 100644 index 000000000..4811023c1 --- /dev/null +++ b/parser/testdata/02025_nested_func_for_if_combinator/query.sql @@ -0,0 +1,8 @@ +-- { echo } +SELECT uniqCombinedIfMerge(n) FROM (SELECT uniqCombinedIfState(number, number % 2) AS n, max(number) AS last FROM numbers(10)); +SELECT uniqCombinedIfMergeIf(n, last > 50) FROM (SELECT uniqCombinedIfState(number, number % 2) AS n, max(number) AS last FROM numbers(10)); +SELECT uniqCombinedIfMergeIf(n, last > 50) FILTER(WHERE last>50) FROM (SELECT uniqCombinedIfState(number, number % 2) AS n, max(number) AS last FROM numbers(10)); -- { serverError ILLEGAL_AGGREGATION } +SELECT uniqCombinedIfMerge(n) FILTER(WHERE last>50) FROM (SELECT uniqCombinedIfState(number, number % 2) AS n, max(number) AS last FROM numbers(10)); +SELECT uniqCombinedIfMergeIf(n, last > 5) FROM (SELECT uniqCombinedIfState(number, number % 2) AS n, max(number) AS last FROM numbers(10)); +SELECT uniqCombinedIfMergeIfIf(n, last > 5) FROM (SELECT uniqCombinedIfState(number, number % 2) AS n, max(number) AS last FROM numbers(10)); -- { serverError ILLEGAL_AGGREGATION } +SELECT uniqCombinedIfMergeIfIf(n, last > 5, 1) FROM (SELECT uniqCombinedIfState(number, number % 2) AS n, max(number) AS last FROM numbers(10)); -- { serverError ILLEGAL_AGGREGATION } diff --git a/parser/testdata/02025_subcolumns_compact_parts/ast.json b/parser/testdata/02025_subcolumns_compact_parts/ast.json new file mode 100644 index 000000000..6b23ab334 --- /dev/null +++ b/parser/testdata/02025_subcolumns_compact_parts/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_comp_subcolumns (children 1)" + }, + { + "explain": " Identifier t_comp_subcolumns" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001219104, + "rows_read": 2, + "bytes_read": 86 + } +} diff --git a/parser/testdata/02025_subcolumns_compact_parts/metadata.json b/parser/testdata/02025_subcolumns_compact_parts/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02025_subcolumns_compact_parts/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02025_subcolumns_compact_parts/query.sql b/parser/testdata/02025_subcolumns_compact_parts/query.sql new file mode 100644 index 000000000..7d1957a1e --- /dev/null +++ b/parser/testdata/02025_subcolumns_compact_parts/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS t_comp_subcolumns; + +CREATE TABLE t_comp_subcolumns (id UInt32, n Nullable(String), arr Array(Array(UInt32))) +ENGINE = MergeTree ORDER BY id; + +INSERT INTO t_comp_subcolumns SELECT number, 'a', [range(number % 11), range(number % 13)] FROM numbers(20000); + +SELECT sum(n.null) FROM t_comp_subcolumns; +SELECT n.null FROM t_comp_subcolumns LIMIT 10000, 5; + +SELECT sum(arr.size0) FROM t_comp_subcolumns; +SELECT sumArray(arr.size1) FROM t_comp_subcolumns; + +DROP TABLE t_comp_subcolumns; diff --git a/parser/testdata/02026_accurate_cast_or_default/ast.json b/parser/testdata/02026_accurate_cast_or_default/ast.json new file mode 100644 index 000000000..0e82885d0 --- /dev/null +++ b/parser/testdata/02026_accurate_cast_or_default/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function accurateCastOrDefault (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Int64_-1" + }, + { + "explain": " Literal 'UInt8'" + }, + { + "explain": " Function accurateCastOrDefault (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_5" + }, + { + "explain": " Literal 'UInt8'" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001222261, + "rows_read": 12, + "bytes_read": 457 + } +} diff --git a/parser/testdata/02026_accurate_cast_or_default/metadata.json b/parser/testdata/02026_accurate_cast_or_default/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02026_accurate_cast_or_default/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02026_accurate_cast_or_default/query.sql b/parser/testdata/02026_accurate_cast_or_default/query.sql new file mode 100644 index 000000000..d493914c9 --- /dev/null +++ b/parser/testdata/02026_accurate_cast_or_default/query.sql @@ -0,0 +1,60 @@ +SELECT accurateCastOrDefault(-1, 'UInt8'), accurateCastOrDefault(5, 'UInt8'); +SELECT accurateCastOrDefault(5, 'UInt8'); +SELECT accurateCastOrDefault(257, 'UInt8'), accurateCastOrDefault(257, 'UInt8', 5); +SELECT accurateCastOrDefault(-1, 'UInt16'), accurateCastOrDefault(-1, 'UInt16', toUInt16(5)); +SELECT accurateCastOrDefault(5, 'UInt16'); +SELECT accurateCastOrDefault(65536, 'UInt16'), accurateCastOrDefault(65536, 'UInt16', toUInt16(5)); +SELECT accurateCastOrDefault(-1, 'UInt32'), accurateCastOrDefault(-1, 'UInt32', toUInt32(5)); +SELECT accurateCastOrDefault(5, 'UInt32'); +SELECT accurateCastOrDefault(4294967296, 'UInt32'), accurateCastOrDefault(4294967296, 'UInt32', toUInt32(5)); +SELECT accurateCastOrDefault(-1, 'UInt64'), accurateCastOrDefault(-1, 'UInt64', toUInt64(5)); +SELECT accurateCastOrDefault(5, 'UInt64'); +SELECT accurateCastOrDefault(-1, 'UInt256'), accurateCastOrDefault(-1, 'UInt256', toUInt256(5)); +SELECT accurateCastOrDefault(5, 'UInt256'); +SELECT accurateCastOrDefault(-129, 'Int8'), accurateCastOrDefault(-129, 'Int8', toInt8(5)); +SELECT accurateCastOrDefault(5, 'Int8'); +SELECT accurateCastOrDefault(128, 'Int8'), accurateCastOrDefault(128, 'Int8', toInt8(5)); + +SELECT accurateCastOrDefault(10, 'Decimal32(9)'), accurateCastOrDefault(10, 'Decimal32(9)', toDecimal32(2, 9)); +SELECT accurateCastOrDefault(1, 'Decimal32(9)'); +SELECT accurateCastOrDefault(-10, 'Decimal32(9)'), accurateCastOrDefault(-10, 'Decimal32(9)', toDecimal32(2, 9)); + +SELECT accurateCastOrDefault('123', 'FixedString(2)'), accurateCastOrDefault('123', 'FixedString(2)', cast('12', 'FixedString(2)')); + +SELECT accurateCastOrDefault(inf, 'Int64'), accurateCastOrDefault(inf, 'Int64', toInt64(5)); +SELECT accurateCastOrDefault(inf, 'Int128'), accurateCastOrDefault(inf, 'Int128', toInt128(5)); +SELECT accurateCastOrDefault(inf, 'Int256'), accurateCastOrDefault(inf, 'Int256', toInt256(5)); +SELECT accurateCastOrDefault(nan, 'Int64'), accurateCastOrDefault(nan, 'Int64', toInt64(5)); +SELECT accurateCastOrDefault(nan, 'Int128'), accurateCastOrDefault(nan, 'Int128', toInt128(5)); +SELECT accurateCastOrDefault(nan, 'Int256'), accurateCastOrDefault(nan, 'Int256', toInt256(5)); + +SELECT accurateCastOrDefault(inf, 'UInt64'), accurateCastOrDefault(inf, 'UInt64', toUInt64(5)); +SELECT accurateCastOrDefault(inf, 'UInt256'), accurateCastOrDefault(inf, 'UInt256', toUInt256(5)); +SELECT accurateCastOrDefault(nan, 'UInt64'), accurateCastOrDefault(nan, 'UInt64', toUInt64(5)); +SELECT accurateCastOrDefault(nan, 'UInt256'), accurateCastOrDefault(nan, 'UInt256', toUInt256(5)); + +SELECT accurateCastOrDefault(number + 127, 'Int8') AS x, accurateCastOrDefault(number + 127, 'Int8', toInt8(5)) AS x_with_default FROM numbers (2) ORDER BY number; + +select accurateCastOrDefault('test', 'Nullable(Bool)'); +select accurateCastOrDefault('test', 'Bool'); +select accurateCastOrDefault('truex', 'Bool'); +select accurateCastOrDefault('xfalse', 'Bool'); +select accurateCastOrDefault('true', 'Bool'); +select accurateCastOrDefault('false', 'Bool'); +select accurateCastOrDefault('1', 'Bool'); +select accurateCastOrDefault('0', 'Bool'); +select accurateCastOrDefault(1, 'Bool'); +select accurateCastOrDefault(0, 'Bool'); + +select accurateCastOrDefault('test', 'Nullable(IPv4)'); +select accurateCastOrDefault('test', 'IPv4'); +select accurateCastOrDefault('2001:db8::1', 'IPv4'); +select accurateCastOrDefault('::ffff:192.0.2.1', 'IPv4'); +select accurateCastOrDefault('192.0.2.1', 'IPv4'); +select accurateCastOrDefault('192.0.2.1x', 'IPv4'); + +select accurateCastOrDefault('test', 'Nullable(IPv6)'); +select accurateCastOrDefault('test', 'IPv6'); +select accurateCastOrDefault('192.0.2.1', 'IPv6'); +select accurateCastOrDefault('2001:db8::1', 'IPv6'); +select accurateCastOrDefault('2001:db8::1x', 'IPv6'); diff --git a/parser/testdata/02026_arrayDifference_const/ast.json b/parser/testdata/02026_arrayDifference_const/ast.json new file mode 100644 index 000000000..c134bf2c3 --- /dev/null +++ b/parser/testdata/02026_arrayDifference_const/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayDifference (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function lambda (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_2]" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001188281, + "rows_read": 15, + "bytes_read": 619 + } +} diff --git a/parser/testdata/02026_arrayDifference_const/metadata.json b/parser/testdata/02026_arrayDifference_const/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02026_arrayDifference_const/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02026_arrayDifference_const/query.sql b/parser/testdata/02026_arrayDifference_const/query.sql new file mode 100644 index 000000000..55a48d2be --- /dev/null +++ b/parser/testdata/02026_arrayDifference_const/query.sql @@ -0,0 +1 @@ +SELECT toString(arrayDifference(x->0, [1, 2])); diff --git a/parser/testdata/02026_describe_include_subcolumns/ast.json b/parser/testdata/02026_describe_include_subcolumns/ast.json new file mode 100644 index 000000000..f7effc53f --- /dev/null +++ b/parser/testdata/02026_describe_include_subcolumns/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001205598, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02026_describe_include_subcolumns/metadata.json b/parser/testdata/02026_describe_include_subcolumns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02026_describe_include_subcolumns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02026_describe_include_subcolumns/query.sql b/parser/testdata/02026_describe_include_subcolumns/query.sql new file mode 100644 index 000000000..9210666a9 --- /dev/null +++ b/parser/testdata/02026_describe_include_subcolumns/query.sql @@ -0,0 +1,20 @@ +SET output_format_pretty_fallback_to_vertical = 0; + +DROP TABLE IF EXISTS t_desc_subcolumns; + +CREATE TABLE t_desc_subcolumns +( + d Date, + n Nullable(String) COMMENT 'It is a nullable column', + arr1 Array(UInt32) CODEC(ZSTD), + arr2 Array(Array(String)) TTL d + INTERVAL 1 DAY, + t Tuple(s String, a Array(Tuple(a UInt32, b UInt32))) CODEC(ZSTD) +) +ENGINE = MergeTree ORDER BY d; + +DESCRIBE TABLE t_desc_subcolumns FORMAT PrettyCompactNoEscapes; + +DESCRIBE TABLE t_desc_subcolumns FORMAT PrettyCompactNoEscapes +SETTINGS describe_include_subcolumns = 1; + +DROP TABLE t_desc_subcolumns; diff --git a/parser/testdata/02027_arrayCumSumNonNegative_const/ast.json b/parser/testdata/02027_arrayCumSumNonNegative_const/ast.json new file mode 100644 index 000000000..9d670ffbb --- /dev/null +++ b/parser/testdata/02027_arrayCumSumNonNegative_const/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayCumSumNonNegative (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function lambda (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_2]" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.00119568, + "rows_read": 15, + "bytes_read": 626 + } +} diff --git a/parser/testdata/02027_arrayCumSumNonNegative_const/metadata.json b/parser/testdata/02027_arrayCumSumNonNegative_const/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02027_arrayCumSumNonNegative_const/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02027_arrayCumSumNonNegative_const/query.sql b/parser/testdata/02027_arrayCumSumNonNegative_const/query.sql new file mode 100644 index 000000000..f95220731 --- /dev/null +++ b/parser/testdata/02027_arrayCumSumNonNegative_const/query.sql @@ -0,0 +1 @@ +SELECT toString(arrayCumSumNonNegative(x->0, [1, 2])); diff --git a/parser/testdata/02027_ngrams/ast.json b/parser/testdata/02027_ngrams/ast.json new file mode 100644 index 000000000..89de05c85 --- /dev/null +++ b/parser/testdata/02027_ngrams/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function ngrams (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'Test'" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001701615, + "rows_read": 8, + "bytes_read": 287 + } +} diff --git a/parser/testdata/02027_ngrams/metadata.json b/parser/testdata/02027_ngrams/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02027_ngrams/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02027_ngrams/query.sql b/parser/testdata/02027_ngrams/query.sql new file mode 100644 index 000000000..f7a6550e6 --- /dev/null +++ b/parser/testdata/02027_ngrams/query.sql @@ -0,0 +1,43 @@ +SELECT ngrams('Test', 1); +SELECT ngrams('Test', 2); +SELECT ngrams('Test', 3); +SELECT ngrams('Test', 4); +SELECT ngrams('Test', 5); +SELECT ngrams('😁😈😁😈', 1); +SELECT ngrams('😁😈😁😈', 2); +SELECT ngrams('😁😈😁😈', 3); +SELECT ngrams('😁😈😁😈', 4); +SELECT ngrams('😁😈😁😈', 5); + +SELECT ngrams(materialize('Test'), 1); +SELECT ngrams(materialize('Test'), 2); +SELECT ngrams(materialize('Test'), 3); +SELECT ngrams(materialize('Test'), 4); +SELECT ngrams(materialize('Test'), 5); +SELECT ngrams(materialize('😁😈😁😈'), 1); +SELECT ngrams(materialize('😁😈😁😈'), 2); +SELECT ngrams(materialize('😁😈😁😈'), 3); +SELECT ngrams(materialize('😁😈😁😈'), 4); +SELECT ngrams(materialize('😁😈😁😈'), 5); + +SELECT ngrams(toFixedString('Test', 4), 1); +SELECT ngrams(toFixedString('Test', 4), 2); +SELECT ngrams(toFixedString('Test', 4), 3); +SELECT ngrams(toFixedString('Test', 4), 4); +SELECT ngrams(toFixedString('Test', 4), 5); +SELECT ngrams(toFixedString('😁😈😁😈', 16), 1); +SELECT ngrams(toFixedString('😁😈😁😈', 16), 2); +SELECT ngrams(toFixedString('😁😈😁😈', 16), 3); +SELECT ngrams(toFixedString('😁😈😁😈', 16), 4); +SELECT ngrams(toFixedString('😁😈😁😈', 16), 5); + +SELECT ngrams(materialize(toFixedString('Test', 4)), 1); +SELECT ngrams(materialize(toFixedString('Test', 4)), 2); +SELECT ngrams(materialize(toFixedString('Test', 4)), 3); +SELECT ngrams(materialize(toFixedString('Test', 4)), 4); +SELECT ngrams(materialize(toFixedString('Test', 4)), 5); +SELECT ngrams(materialize(toFixedString('😁😈😁😈', 16)), 1); +SELECT ngrams(materialize(toFixedString('😁😈😁😈', 16)), 2); +SELECT ngrams(materialize(toFixedString('😁😈😁😈', 16)), 3); +SELECT ngrams(materialize(toFixedString('😁😈😁😈', 16)), 4); +SELECT ngrams(materialize(toFixedString('😁😈😁😈', 16)), 5); \ No newline at end of file diff --git a/parser/testdata/02028_add_default_database_for_alterquery_on_cluster/ast.json b/parser/testdata/02028_add_default_database_for_alterquery_on_cluster/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02028_add_default_database_for_alterquery_on_cluster/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02028_add_default_database_for_alterquery_on_cluster/metadata.json b/parser/testdata/02028_add_default_database_for_alterquery_on_cluster/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02028_add_default_database_for_alterquery_on_cluster/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02028_add_default_database_for_alterquery_on_cluster/query.sql b/parser/testdata/02028_add_default_database_for_alterquery_on_cluster/query.sql new file mode 100644 index 000000000..8408f7d17 --- /dev/null +++ b/parser/testdata/02028_add_default_database_for_alterquery_on_cluster/query.sql @@ -0,0 +1,19 @@ +-- Tags: distributed, no-parallel, no-replicated-database +-- Tag no-replicated-database: ON CLUSTER is not allowed + +DROP DATABASE IF EXISTS 02028_db ON CLUSTER test_shard_localhost; +CREATE DATABASE 02028_db ON CLUSTER test_shard_localhost; +USE 02028_db; + +CREATE TABLE t1_local ON CLUSTER test_shard_localhost(partition_col_1 String, tc1 int,tc2 int)ENGINE=MergeTree() PARTITION BY partition_col_1 ORDER BY tc1; +CREATE TABLE t2_local ON CLUSTER test_shard_localhost(partition_col_1 String, tc1 int,tc2 int)ENGINE=MergeTree() PARTITION BY partition_col_1 ORDER BY tc1; + +INSERT INTO t1_local VALUES('partition1', 1,1); +INSERT INTO t1_local VALUES('partition2', 1,1); +INSERT INTO t2_local VALUES('partition1', 3,3); +INSERT INTO t2_local VALUES('partition2', 6,6); + +ALTER TABLE t1_local ON CLUSTER test_shard_localhost REPLACE PARTITION 'partition1' FROM t2_local; +ALTER TABLE t1_local ON CLUSTER test_shard_localhost MOVE PARTITION 'partition2' TO TABLE t2_local; + +DROP DATABASE 02028_db ON CLUSTER test_shard_localhost; diff --git a/parser/testdata/02028_create_select_settings/ast.json b/parser/testdata/02028_create_select_settings/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02028_create_select_settings/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02028_create_select_settings/metadata.json b/parser/testdata/02028_create_select_settings/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02028_create_select_settings/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02028_create_select_settings/query.sql b/parser/testdata/02028_create_select_settings/query.sql new file mode 100644 index 000000000..35102dd68 --- /dev/null +++ b/parser/testdata/02028_create_select_settings/query.sql @@ -0,0 +1,3 @@ +-- Tags: no-ordinary-database + +create table test_table engine MergeTree order by a as select a_table.a, b_table.b_arr from (select arrayJoin(range(10000)) as a) a_table cross join (select range(10000) as b_arr) b_table settings max_memory_usage = 1; -- { serverError MEMORY_LIMIT_EXCEEDED } diff --git a/parser/testdata/02028_system_data_skipping_indices_size/ast.json b/parser/testdata/02028_system_data_skipping_indices_size/ast.json new file mode 100644 index 000000000..9c50baef4 --- /dev/null +++ b/parser/testdata/02028_system_data_skipping_indices_size/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_table (children 1)" + }, + { + "explain": " Identifier test_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001025343, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/02028_system_data_skipping_indices_size/metadata.json b/parser/testdata/02028_system_data_skipping_indices_size/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02028_system_data_skipping_indices_size/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02028_system_data_skipping_indices_size/query.sql b/parser/testdata/02028_system_data_skipping_indices_size/query.sql new file mode 100644 index 000000000..07237c43b --- /dev/null +++ b/parser/testdata/02028_system_data_skipping_indices_size/query.sql @@ -0,0 +1,21 @@ +DROP TABLE IF EXISTS test_table; + +CREATE TABLE test_table +( + key UInt64, + value String, + INDEX value_index value TYPE minmax GRANULARITY 1 +) +Engine=MergeTree() +ORDER BY key SETTINGS compress_marks=false; + +INSERT INTO test_table VALUES (0, 'Value'); +SELECT * FROM system.data_skipping_indices WHERE database = currentDatabase(); + +ALTER TABLE test_table DROP INDEX value_index; +ALTER TABLE test_table ADD INDEX value_index value TYPE minmax GRANULARITY 1; +ALTER TABLE test_table MATERIALIZE INDEX value_index SETTINGS mutations_sync=1; + +SELECT * FROM system.data_skipping_indices WHERE database = currentDatabase(); + +DROP TABLE test_table; diff --git a/parser/testdata/02028_tokens/ast.json b/parser/testdata/02028_tokens/ast.json new file mode 100644 index 000000000..84e82c9ec --- /dev/null +++ b/parser/testdata/02028_tokens/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function tokens (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'test'" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001229344, + "rows_read": 7, + "bytes_read": 257 + } +} diff --git a/parser/testdata/02028_tokens/metadata.json b/parser/testdata/02028_tokens/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02028_tokens/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02028_tokens/query.sql b/parser/testdata/02028_tokens/query.sql new file mode 100644 index 000000000..16c88397c --- /dev/null +++ b/parser/testdata/02028_tokens/query.sql @@ -0,0 +1,17 @@ +SELECT tokens('test'); +SELECT tokens('test1, test2, test3'); +SELECT tokens('test1, test2, test3, test4'); +SELECT tokens('test1,;\ test2,;\ test3,;\ test4'); +SELECT tokens('ё ё జ్ఞ‌ా 本気ですか ﷺ ᾂ ΐ שּ'); +SELECT tokens('ё, ё, జ్ఞ‌ా, 本気ですか, ﷺ, ᾂ, ΐ, שּ'); +SELECT tokens('ё, ё, జ్ఞ‌ా, 本気ですか, ﷺ, ᾂ, ΐ, שּ'); +SELECT tokens('ё;\ ё;\ జ్ఞ‌ా;\ 本気ですか;\ ﷺ;\ ᾂ;\ ΐ;\ שּ'); + +SELECT tokens(materialize('test')); +SELECT tokens(materialize('test1, test2, test3')); +SELECT tokens(materialize('test1, test2, test3, test4')); +SELECT tokens(materialize('test1,;\ test2,;\ test3,;\ test4')); +SELECT tokens(materialize('ё ё జ్ఞ‌ా 本気ですか ﷺ ᾂ ΐ שּ')); +SELECT tokens(materialize('ё, ё, జ్ఞ‌ా, 本気ですか, ﷺ, ᾂ, ΐ, שּ')); +SELECT tokens(materialize('ё, ё, జ్ఞ‌ా, 本気ですか, ﷺ, ᾂ, ΐ, שּ')); +SELECT tokens(materialize('ё;\ ё;\ జ్ఞ‌ా;\ 本気ですか;\ ﷺ;\ ᾂ;\ ΐ;\ שּ')); diff --git a/parser/testdata/02029_output_csv_null_representation/ast.json b/parser/testdata/02029_output_csv_null_representation/ast.json new file mode 100644 index 000000000..bbbbe7c2c --- /dev/null +++ b/parser/testdata/02029_output_csv_null_representation/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_data (children 1)" + }, + { + "explain": " Identifier test_data" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001315215, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/02029_output_csv_null_representation/metadata.json b/parser/testdata/02029_output_csv_null_representation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02029_output_csv_null_representation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02029_output_csv_null_representation/query.sql b/parser/testdata/02029_output_csv_null_representation/query.sql new file mode 100644 index 000000000..a27c552ee --- /dev/null +++ b/parser/testdata/02029_output_csv_null_representation/query.sql @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS test_data; +CREATE TABLE test_data ( + col1 Nullable(String), + col2 Nullable(String), + col3 Nullable(String) +) ENGINE = Memory; + +INSERT INTO test_data VALUES ('val1', NULL, 'val3'); + +SELECT '# format_csv_null_representation should initially be \\N'; +SELECT * FROM test_data FORMAT CSV; + +SELECT '# Changing format_csv_null_representation'; +SET format_csv_null_representation = '∅'; +SELECT * FROM test_data FORMAT CSV; +SET format_csv_null_representation = '\\N'; diff --git a/parser/testdata/02029_quantile_sanitizer/ast.json b/parser/testdata/02029_quantile_sanitizer/ast.json new file mode 100644 index 000000000..ad5a3f72e --- /dev/null +++ b/parser/testdata/02029_quantile_sanitizer/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function quantileTDigestWeighted (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDateTime (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Float64_10000000000" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Float64_-0" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001076787, + "rows_read": 12, + "bytes_read": 479 + } +} diff --git a/parser/testdata/02029_quantile_sanitizer/metadata.json b/parser/testdata/02029_quantile_sanitizer/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02029_quantile_sanitizer/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02029_quantile_sanitizer/query.sql b/parser/testdata/02029_quantile_sanitizer/query.sql new file mode 100644 index 000000000..3e06775ce --- /dev/null +++ b/parser/testdata/02029_quantile_sanitizer/query.sql @@ -0,0 +1 @@ +SELECT quantileTDigestWeighted(-0.)(toDateTime(10000000000.), 1); -- { serverError DECIMAL_OVERFLOW } diff --git a/parser/testdata/02030_function_mapContainsKeyLike/ast.json b/parser/testdata/02030_function_mapContainsKeyLike/ast.json new file mode 100644 index 000000000..4b15672c4 --- /dev/null +++ b/parser/testdata/02030_function_mapContainsKeyLike/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery map_containsKeyLike_test (children 1)" + }, + { + "explain": " Identifier map_containsKeyLike_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001168224, + "rows_read": 2, + "bytes_read": 100 + } +} diff --git a/parser/testdata/02030_function_mapContainsKeyLike/metadata.json b/parser/testdata/02030_function_mapContainsKeyLike/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02030_function_mapContainsKeyLike/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02030_function_mapContainsKeyLike/query.sql b/parser/testdata/02030_function_mapContainsKeyLike/query.sql new file mode 100644 index 000000000..9ebf06c1e --- /dev/null +++ b/parser/testdata/02030_function_mapContainsKeyLike/query.sql @@ -0,0 +1,24 @@ +DROP TABLE IF EXISTS map_containsKeyLike_test; + +CREATE TABLE map_containsKeyLike_test (id UInt32, map Map(String, String)) Engine=MergeTree() ORDER BY id settings index_granularity=2; + +INSERT INTO map_containsKeyLike_test VALUES (1, {'1-K1':'1-V1','1-K2':'1-V2'}),(2,{'2-K1':'2-V1','2-K2':'2-V2'}); +INSERT INTO map_containsKeyLike_test VALUES (3, {'3-K1':'3-V1','3-K2':'3-V2'}),(4, {'4-K1':'4-V1','4-K2':'4-V2'}); +INSERT INTO map_containsKeyLike_test VALUES (5, {'5-K1':'5-V1','5-K2':'5-V2'}),(6, {'6-K1':'6-V1','6-K2':'6-V2'}); + +SELECT id, map FROM map_containsKeyLike_test WHERE mapContainsKeyLike(map, '1-%') = 1; +SELECT id, map FROM map_containsKeyLike_test WHERE mapContainsKeyLike(map, '3-%') = 0 order by id; + +DROP TABLE map_containsKeyLike_test; + +SELECT mapContainsKeyLike(map('aa', 1, 'bb', 2), 'a%'); +SELECT mapContainsKeyLike(map(toLowCardinality('aa'), 1, toLowCardinality('b'), 2), 'a%'); +SELECT mapContainsKeyLike(map('aa', 1, 'bb', 2), materialize('a%')); +SELECT mapContainsKeyLike(materialize(map('aa', 1, 'bb', 2)), 'a%'); +SELECT mapContainsKeyLike(materialize(map('aa', 1, 'bb', 2)), materialize('a%')); + +SELECT mapContainsKeyLike(map('aa', NULL, 'bb', NULL), 'a%'); +SELECT mapContainsKeyLike(map('aa', NULL, 'bb', NULL), 'q%'); + +SELECT mapExtractKeyLike(map('aa', NULL, 'bb', NULL), 'a%'); +SELECT mapExtractKeyLike(map('aa', NULL, 'bb', NULL), 'q%'); diff --git a/parser/testdata/02030_quantiles_underflow/ast.json b/parser/testdata/02030_quantiles_underflow/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02030_quantiles_underflow/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02030_quantiles_underflow/metadata.json b/parser/testdata/02030_quantiles_underflow/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02030_quantiles_underflow/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02030_quantiles_underflow/query.sql b/parser/testdata/02030_quantiles_underflow/query.sql new file mode 100644 index 000000000..cb5ad3dac --- /dev/null +++ b/parser/testdata/02030_quantiles_underflow/query.sql @@ -0,0 +1,6 @@ +SELECT + arrayMap(y -> round(y, 1), quantilesExactInclusive(0.1, 0.9)(x)) AS q +FROM +( + SELECT arrayJoin([-2147483648, 1, 2]) AS x +); diff --git a/parser/testdata/02030_tuple_filter/ast.json b/parser/testdata/02030_tuple_filter/ast.json new file mode 100644 index 000000000..76da69421 --- /dev/null +++ b/parser/testdata/02030_tuple_filter/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001128515, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02030_tuple_filter/metadata.json b/parser/testdata/02030_tuple_filter/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02030_tuple_filter/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02030_tuple_filter/query.sql b/parser/testdata/02030_tuple_filter/query.sql new file mode 100644 index 000000000..c8f344f50 --- /dev/null +++ b/parser/testdata/02030_tuple_filter/query.sql @@ -0,0 +1,43 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS test_tuple_filter; + +CREATE TABLE test_tuple_filter (id UInt32, value String, log_date Date) Engine=MergeTree() ORDER BY id PARTITION BY log_date SETTINGS index_granularity = 3, index_granularity_bytes = '10Mi'; + +INSERT INTO test_tuple_filter VALUES (1,'A','2021-01-01'),(2,'B','2021-01-01'),(3,'C','2021-01-01'),(4,'D','2021-01-02'),(5,'E','2021-01-02'); + +SET force_primary_key = 1; +SET optimize_move_to_prewhere = 1; + +SELECT * FROM test_tuple_filter WHERE (id, value) = (1, 'A'); +SELECT * FROM test_tuple_filter WHERE (1, 'A') = (id, value); +SELECT * FROM test_tuple_filter WHERE (id, value) = (1, 'A') AND (id, log_date) = (1, '2021-01-01'); +SELECT * FROM test_tuple_filter WHERE ((id, value), id * 2) = ((1, 'A'), 2); +SELECT * FROM test_tuple_filter WHERE ((id, value), log_date) = ((1, 'A'), '2021-01-01'); + +-- not supported functions (concat) do not lost +SELECT * FROM test_tuple_filter WHERE (id, value, value||'foo') = ('1', 'A', 'A'); + +SELECT * FROM test_tuple_filter WHERE (1, (1, (1, (1, (id, value))))) = (1, (1, (1, (1, (1, 'A'))))); + +SELECT * FROM test_tuple_filter WHERE (1, value) = (id, 'A'); +SELECT * FROM test_tuple_filter WHERE (1, (1, (1, (1, tuple(id))))) = (1, (1, (1, (1, tuple(1))))); +SELECT * FROM test_tuple_filter WHERE ((id, value), tuple(log_date)) = ((1, 'A'), tuple('2021-01-01')); + +SET force_index_by_date = 1; +SET force_primary_key = 0; +SELECT * FROM test_tuple_filter WHERE (log_date, value) = ('2021-01-01', 'A'); + +SET force_index_by_date = 0; +SET force_primary_key = 0; + +SELECT * FROM test_tuple_filter WHERE (1, value) = (id, 'A'); +SELECT * FROM test_tuple_filter WHERE tuple(id) = tuple(1); +SELECT * FROM test_tuple_filter WHERE (id, (id, id) = (1, NULL)) == (NULL, NULL); + +SELECT * FROM test_tuple_filter WHERE (log_date, value) = tuple('2021-01-01'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT * FROM test_tuple_filter WHERE (id, value) = tuple(1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT * FROM test_tuple_filter WHERE tuple(id, value) = tuple(value, id); -- { serverError NO_COMMON_TYPE } +SELECT * FROM test_tuple_filter WHERE equals((id, value)); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +DROP TABLE IF EXISTS test_tuple_filter; diff --git a/parser/testdata/02032_short_circuit_least_greatest_bug/ast.json b/parser/testdata/02032_short_circuit_least_greatest_bug/ast.json new file mode 100644 index 000000000..e4df3d8b2 --- /dev/null +++ b/parser/testdata/02032_short_circuit_least_greatest_bug/ast.json @@ -0,0 +1,91 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function and (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function greatest (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 23, + + "statistics": + { + "elapsed": 0.001753137, + "rows_read": 23, + "bytes_read": 909 + } +} diff --git a/parser/testdata/02032_short_circuit_least_greatest_bug/metadata.json b/parser/testdata/02032_short_circuit_least_greatest_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02032_short_circuit_least_greatest_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02032_short_circuit_least_greatest_bug/query.sql b/parser/testdata/02032_short_circuit_least_greatest_bug/query.sql new file mode 100644 index 000000000..e7dca0bde --- /dev/null +++ b/parser/testdata/02032_short_circuit_least_greatest_bug/query.sql @@ -0,0 +1,2 @@ +select 1 and greatest(number % 2, number % 3) from numbers(10); +select 1 and least(number % 2, number % 3) from numbers(10); diff --git a/parser/testdata/02035_isNull_isNotNull_format/ast.json b/parser/testdata/02035_isNull_isNotNull_format/ast.json new file mode 100644 index 000000000..efdc80b81 --- /dev/null +++ b/parser/testdata/02035_isNull_isNotNull_format/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00187585, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02035_isNull_isNotNull_format/metadata.json b/parser/testdata/02035_isNull_isNotNull_format/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02035_isNull_isNotNull_format/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02035_isNull_isNotNull_format/query.sql b/parser/testdata/02035_isNull_isNotNull_format/query.sql new file mode 100644 index 000000000..933eb4d2c --- /dev/null +++ b/parser/testdata/02035_isNull_isNotNull_format/query.sql @@ -0,0 +1,8 @@ +set enable_analyzer = 1; +-- { echo } +explain syntax select null is null; +explain syntax select null is not null; +explain syntax select isNull(null); +explain syntax select isNotNull(null); +explain syntax select isNotNull(1)+isNotNull(2) from remote('127.2', system.one); +select isNotNull(1)+isNotNull(2) from remote('127.2', system.one); diff --git a/parser/testdata/02036_jit_short_circuit/ast.json b/parser/testdata/02036_jit_short_circuit/ast.json new file mode 100644 index 000000000..f8561dce7 --- /dev/null +++ b/parser/testdata/02036_jit_short_circuit/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001264664, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02036_jit_short_circuit/metadata.json b/parser/testdata/02036_jit_short_circuit/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02036_jit_short_circuit/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02036_jit_short_circuit/query.sql b/parser/testdata/02036_jit_short_circuit/query.sql new file mode 100644 index 000000000..18faf701a --- /dev/null +++ b/parser/testdata/02036_jit_short_circuit/query.sql @@ -0,0 +1,12 @@ +SET compile_expressions = 1; +SET min_count_to_compile_expression = 0; +SET short_circuit_function_evaluation='enable'; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table (message String) ENGINE=TinyLog; + +INSERT INTO test_table VALUES ('Test'); + +SELECT if(action = 'bonus', sport_amount, 0) * 100 FROM (SELECT message AS action, cast(message, 'Float64') AS sport_amount FROM test_table); + +DROP TABLE test_table; diff --git a/parser/testdata/02039_group_by_with_totals_having/ast.json b/parser/testdata/02039_group_by_with_totals_having/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02039_group_by_with_totals_having/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02039_group_by_with_totals_having/metadata.json b/parser/testdata/02039_group_by_with_totals_having/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02039_group_by_with_totals_having/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02039_group_by_with_totals_having/query.sql b/parser/testdata/02039_group_by_with_totals_having/query.sql new file mode 100644 index 000000000..28aa34a90 --- /dev/null +++ b/parser/testdata/02039_group_by_with_totals_having/query.sql @@ -0,0 +1,3 @@ +-- { echo } +SELECT 'x' FROM numbers(2) GROUP BY number WITH TOTALS HAVING count(number)>0; +SELECT 'x' FROM numbers(2) GROUP BY number WITH TOTALS HAVING count(number)<0; diff --git a/parser/testdata/02041_conversion_between_date32_and_datetime64/ast.json b/parser/testdata/02041_conversion_between_date32_and_datetime64/ast.json new file mode 100644 index 000000000..658655ded --- /dev/null +++ b/parser/testdata/02041_conversion_between_date32_and_datetime64/ast.json @@ -0,0 +1,76 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDate32 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDateTime64 (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal '2019-01-01 00:00:00'" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Literal 'Asia\/Istanbul'" + }, + { + "explain": " Function toDateTime64 (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function toDate32 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '2019-01-01'" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Literal 'Asia\/Istanbul'" + } + ], + + "rows": 18, + + "statistics": + { + "elapsed": 0.001980548, + "rows_read": 18, + "bytes_read": 712 + } +} diff --git a/parser/testdata/02041_conversion_between_date32_and_datetime64/metadata.json b/parser/testdata/02041_conversion_between_date32_and_datetime64/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02041_conversion_between_date32_and_datetime64/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02041_conversion_between_date32_and_datetime64/query.sql b/parser/testdata/02041_conversion_between_date32_and_datetime64/query.sql new file mode 100644 index 000000000..05e5a090d --- /dev/null +++ b/parser/testdata/02041_conversion_between_date32_and_datetime64/query.sql @@ -0,0 +1 @@ +select toDate32(toDateTime64('2019-01-01 00:00:00', 3, 'Asia/Istanbul')), toDateTime64(toDate32('2019-01-01'), 3, 'Asia/Istanbul') \ No newline at end of file diff --git a/parser/testdata/02041_openssl_hash_functions_test/ast.json b/parser/testdata/02041_openssl_hash_functions_test/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02041_openssl_hash_functions_test/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02041_openssl_hash_functions_test/metadata.json b/parser/testdata/02041_openssl_hash_functions_test/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02041_openssl_hash_functions_test/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02041_openssl_hash_functions_test/query.sql b/parser/testdata/02041_openssl_hash_functions_test/query.sql new file mode 100644 index 000000000..eb16bfa9f --- /dev/null +++ b/parser/testdata/02041_openssl_hash_functions_test/query.sql @@ -0,0 +1,11 @@ +-- Tags: no-fasttest, no-openssl-fips + +SELECT hex(halfMD5('test')); +SELECT hex(MD4('test')); +SELECT hex(MD5('test')); +SELECT hex(SHA1('test')); +SELECT hex(SHA224('test')); +SELECT hex(SHA256('test')); +SELECT hex(SHA384('test')); +SELECT hex(SHA512('test')); +SELECT hex(SHA512_256('test')); diff --git a/parser/testdata/02041_test_fuzzy_alter/ast.json b/parser/testdata/02041_test_fuzzy_alter/ast.json new file mode 100644 index 000000000..38b68eb97 --- /dev/null +++ b/parser/testdata/02041_test_fuzzy_alter/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery alter_table (children 1)" + }, + { + "explain": " Identifier alter_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001089218, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/02041_test_fuzzy_alter/metadata.json b/parser/testdata/02041_test_fuzzy_alter/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02041_test_fuzzy_alter/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02041_test_fuzzy_alter/query.sql b/parser/testdata/02041_test_fuzzy_alter/query.sql new file mode 100644 index 000000000..3c23ce5ec --- /dev/null +++ b/parser/testdata/02041_test_fuzzy_alter/query.sql @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS alter_table; + +CREATE TABLE alter_table (a UInt8, b Int16) +ENGINE = MergeTree +ORDER BY a; + +ALTER TABLE alter_table + MODIFY COLUMN `b` DateTime DEFAULT now(([NULL, NULL, NULL, [-2147483648], [NULL, NULL, NULL, NULL, NULL, NULL, NULL]] AND (1048576 AND NULL) AND (NULL AND 1048575 AND NULL AND -2147483649) AND NULL) IN (test_01103.t1_distr.id)); --{serverError NO_COMMON_TYPE,UNKNOWN_IDENTIFIER} + +ALTER TABLE alter_table + MODIFY COLUMN `b` DateTime DEFAULT now([1240506] IN (test_01103.t1_distr.id)); --{serverError INVALID_IDENTIFIER,UNKNOWN_IDENTIFIER} + +SELECT 1; + + +DROP TABLE IF EXISTS alter_table; diff --git a/parser/testdata/02042_map_get_non_const_key/ast.json b/parser/testdata/02042_map_get_non_const_key/ast.json new file mode 100644 index 000000000..ad8f51d54 --- /dev/null +++ b/parser/testdata/02042_map_get_non_const_key/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayElement (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier map" + }, + { + "explain": " Identifier key" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001295352, + "rows_read": 8, + "bytes_read": 291 + } +} diff --git a/parser/testdata/02042_map_get_non_const_key/metadata.json b/parser/testdata/02042_map_get_non_const_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02042_map_get_non_const_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02042_map_get_non_const_key/query.sql b/parser/testdata/02042_map_get_non_const_key/query.sql new file mode 100644 index 000000000..96f70f880 --- /dev/null +++ b/parser/testdata/02042_map_get_non_const_key/query.sql @@ -0,0 +1,5 @@ +SELECT map[key] +FROM +( + SELECT materialize('key') AS key, CAST((['key'], ['value']), 'Map(String, String)') AS map +); diff --git a/parser/testdata/02043_user_defined_executable_function_implicit_cast/ast.json b/parser/testdata/02043_user_defined_executable_function_implicit_cast/ast.json new file mode 100644 index 000000000..989ad3823 --- /dev/null +++ b/parser/testdata/02043_user_defined_executable_function_implicit_cast/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function test_function (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_2" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001155705, + "rows_read": 8, + "bytes_read": 296 + } +} diff --git a/parser/testdata/02043_user_defined_executable_function_implicit_cast/metadata.json b/parser/testdata/02043_user_defined_executable_function_implicit_cast/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02043_user_defined_executable_function_implicit_cast/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02043_user_defined_executable_function_implicit_cast/query.sql b/parser/testdata/02043_user_defined_executable_function_implicit_cast/query.sql new file mode 100644 index 000000000..b7c6e4837 --- /dev/null +++ b/parser/testdata/02043_user_defined_executable_function_implicit_cast/query.sql @@ -0,0 +1 @@ +SELECT test_function(2, 2); diff --git a/parser/testdata/02044_exists_operator/ast.json b/parser/testdata/02044_exists_operator/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02044_exists_operator/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02044_exists_operator/metadata.json b/parser/testdata/02044_exists_operator/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02044_exists_operator/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02044_exists_operator/query.sql b/parser/testdata/02044_exists_operator/query.sql new file mode 100644 index 000000000..bec191ecd --- /dev/null +++ b/parser/testdata/02044_exists_operator/query.sql @@ -0,0 +1,11 @@ +-- { echo } +select exists(select 1); +select exists(select 1 except select 1); +select exists(select number from numbers(10) where number > 0); +select exists(select number from numbers(10) where number < 0); + +select count() from numbers(10) where exists(select 1 except select 1); +select count() from numbers(10) where exists(select 1 intersect select 1); + +select count() from numbers(10) where exists(select number from numbers(10) where number > 8); +select count() from numbers(10) where exists(select number from numbers(10) where number > 9); diff --git a/parser/testdata/02045_like_function/ast.json b/parser/testdata/02045_like_function/ast.json new file mode 100644 index 000000000..0dceb18f7 --- /dev/null +++ b/parser/testdata/02045_like_function/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function like (alias res) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'r\\\\a1bbb'" + }, + { + "explain": " Literal '%r\\\\\\\\a1%bbb%'" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001604421, + "rows_read": 8, + "bytes_read": 308 + } +} diff --git a/parser/testdata/02045_like_function/metadata.json b/parser/testdata/02045_like_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02045_like_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02045_like_function/query.sql b/parser/testdata/02045_like_function/query.sql new file mode 100644 index 000000000..d395e8d45 --- /dev/null +++ b/parser/testdata/02045_like_function/query.sql @@ -0,0 +1,10 @@ +SELECT 'r\\a1bbb' LIKE '%r\\\\a1%bbb%' AS res; + +WITH lower('\RealVNC\WinVNC4 /v password') as CommandLine +SELECT + CommandLine LIKE '%\\\\realvnc\\\\winvnc4%password%' as t1, + CommandLine LIKE '%\\\\realvnc\\\\winvnc4 %password%' as t2, + CommandLine LIKE '%\\\\realvnc\\\\winvnc4%password' as t3, + CommandLine LIKE '%\\\\realvnc\\\\winvnc4 %password' as t4, + CommandLine LIKE '%realvnc%winvnc4%password%' as t5, + CommandLine LIKE '%\\\\winvnc4%password%' as t6; diff --git a/parser/testdata/02046_remote_table_function_named_collections/ast.json b/parser/testdata/02046_remote_table_function_named_collections/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02046_remote_table_function_named_collections/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02046_remote_table_function_named_collections/metadata.json b/parser/testdata/02046_remote_table_function_named_collections/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02046_remote_table_function_named_collections/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02046_remote_table_function_named_collections/query.sql b/parser/testdata/02046_remote_table_function_named_collections/query.sql new file mode 100644 index 000000000..18aa6faf3 --- /dev/null +++ b/parser/testdata/02046_remote_table_function_named_collections/query.sql @@ -0,0 +1,11 @@ +-- Tags: shard, no-fasttest + +DROP TABLE IF EXISTS remote_test; +CREATE TABLE remote_test(a1 UInt8) ENGINE=Memory; +INSERT INTO FUNCTION remote(remote1, database=currentDatabase()) VALUES(1); +INSERT INTO FUNCTION remote(remote1, database=currentDatabase()) VALUES(2); +INSERT INTO FUNCTION remote(remote1, database=currentDatabase()) VALUES(3); +INSERT INTO FUNCTION remote(remote1, database=currentDatabase()) VALUES(4); +SELECT count() FROM remote(remote1, database=currentDatabase()); +SELECT count() FROM remote(remote2, database=merge(currentDatabase(), '^remote_test')); +DROP TABLE remote_test; diff --git a/parser/testdata/02047_alias_for_table_and_database_name/ast.json b/parser/testdata/02047_alias_for_table_and_database_name/ast.json new file mode 100644 index 000000000..fb216f166 --- /dev/null +++ b/parser/testdata/02047_alias_for_table_and_database_name/ast.json @@ -0,0 +1,82 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier name" + }, + { + "explain": " Identifier table" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.tables" + }, + { + "explain": " Function and (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier database" + }, + { + "explain": " Literal 'system'" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier name" + }, + { + "explain": " Literal 'numbers'" + } + ], + + "rows": 20, + + "statistics": + { + "elapsed": 0.001167225, + "rows_read": 20, + "bytes_read": 748 + } +} diff --git a/parser/testdata/02047_alias_for_table_and_database_name/metadata.json b/parser/testdata/02047_alias_for_table_and_database_name/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02047_alias_for_table_and_database_name/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02047_alias_for_table_and_database_name/query.sql b/parser/testdata/02047_alias_for_table_and_database_name/query.sql new file mode 100644 index 000000000..2fabd2aff --- /dev/null +++ b/parser/testdata/02047_alias_for_table_and_database_name/query.sql @@ -0,0 +1,2 @@ +SELECT name,table from system.tables where database = 'system' and name = 'numbers'; +SELECt name,database from system.databases where name = 'default'; diff --git a/parser/testdata/02049_lowcardinality_shortcircuit_crash/ast.json b/parser/testdata/02049_lowcardinality_shortcircuit_crash/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02049_lowcardinality_shortcircuit_crash/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02049_lowcardinality_shortcircuit_crash/metadata.json b/parser/testdata/02049_lowcardinality_shortcircuit_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02049_lowcardinality_shortcircuit_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02049_lowcardinality_shortcircuit_crash/query.sql b/parser/testdata/02049_lowcardinality_shortcircuit_crash/query.sql new file mode 100644 index 000000000..84d64d316 --- /dev/null +++ b/parser/testdata/02049_lowcardinality_shortcircuit_crash/query.sql @@ -0,0 +1,47 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/30231 +SET allow_suspicious_low_cardinality_types=1; + +SELECT * +FROM ( + SELECT number, + multiIf( + CAST(number < 4, 'UInt8'), toString(number), + CAST(number < 8, 'LowCardinality(UInt8)'), toString(number * 10), + CAST(number < 12, 'Nullable(UInt8)'), toString(number * 100), + CAST(number < 16, 'LowCardinality(Nullable(UInt8))'), toString(number * 1000), + toString(number * 10000)) as m + FROM system.numbers + LIMIT 20 + ) +ORDER BY number +SETTINGS short_circuit_function_evaluation='enable'; + +SELECT * +FROM ( + SELECT number, + multiIf( + CAST(number < 4, 'UInt8'), toString(number), + CAST(number < 8, 'LowCardinality(UInt8)'), toString(number * 10), + CAST(NULL, 'Nullable(UInt8)'), toString(number * 100), + CAST(NULL, 'LowCardinality(Nullable(UInt8))'), toString(number * 1000), + toString(number * 10000)) as m + FROM system.numbers + LIMIT 20 + ) +ORDER BY number +SETTINGS short_circuit_function_evaluation='enable'; + +SELECT * +FROM ( + SELECT number, + multiIf( + CAST(number < 4, 'UInt8'), toString(number), + CAST(number < 8, 'LowCardinality(UInt8)'), toString(number * 10)::LowCardinality(String), + CAST(number < 12, 'Nullable(UInt8)'), toString(number * 100)::Nullable(String), + CAST(number < 16, 'LowCardinality(Nullable(UInt8))'), toString(number * 1000)::LowCardinality(Nullable(String)), + toString(number * 10000)) as m + FROM system.numbers + LIMIT 20 + ) +ORDER BY number +SETTINGS short_circuit_function_evaluation='enable'; diff --git a/parser/testdata/02053_INSERT_SELECT_MATERIALIZED/ast.json b/parser/testdata/02053_INSERT_SELECT_MATERIALIZED/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02053_INSERT_SELECT_MATERIALIZED/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02053_INSERT_SELECT_MATERIALIZED/metadata.json b/parser/testdata/02053_INSERT_SELECT_MATERIALIZED/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02053_INSERT_SELECT_MATERIALIZED/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02053_INSERT_SELECT_MATERIALIZED/query.sql b/parser/testdata/02053_INSERT_SELECT_MATERIALIZED/query.sql new file mode 100644 index 000000000..e9ea0c9f0 --- /dev/null +++ b/parser/testdata/02053_INSERT_SELECT_MATERIALIZED/query.sql @@ -0,0 +1,6 @@ +-- Test from https://github.com/ClickHouse/ClickHouse/issues/29729 +create table data_02053 (id Int64, A Nullable(Int64), X Int64 materialized coalesce(A, -1)) engine=MergeTree order by id; +insert into data_02053 values (1, 42); +-- Due to insert_null_as_default A became Null and X became -1 +insert into data_02053 select 1, 42; +select *, X from data_02053 order by id; diff --git a/parser/testdata/02067_lost_part_s3/ast.json b/parser/testdata/02067_lost_part_s3/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02067_lost_part_s3/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02067_lost_part_s3/metadata.json b/parser/testdata/02067_lost_part_s3/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02067_lost_part_s3/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02067_lost_part_s3/query.sql b/parser/testdata/02067_lost_part_s3/query.sql new file mode 100644 index 000000000..6fbde71ff --- /dev/null +++ b/parser/testdata/02067_lost_part_s3/query.sql @@ -0,0 +1,45 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS partslost_0; +DROP TABLE IF EXISTS partslost_1; +DROP TABLE IF EXISTS partslost_2; + +CREATE TABLE partslost_0 (x String) ENGINE=ReplicatedMergeTree('/clickhouse/table/{database}_02067_lost/partslost', '0') ORDER BY tuple() + SETTINGS min_rows_for_wide_part = 0, min_bytes_for_wide_part = 0, old_parts_lifetime = 1, + cleanup_delay_period = 1, cleanup_delay_period_random_add = 1, cleanup_thread_preferred_points_per_iteration=0, + index_granularity = 8192, index_granularity_bytes = '10Mi'; + +CREATE TABLE partslost_1 (x String) ENGINE=ReplicatedMergeTree('/clickhouse/table/{database}_02067_lost/partslost', '1') ORDER BY tuple() + SETTINGS min_rows_for_wide_part = 0, min_bytes_for_wide_part = 0, old_parts_lifetime = 1, + cleanup_delay_period = 1, cleanup_delay_period_random_add = 1, cleanup_thread_preferred_points_per_iteration=0, + index_granularity = 8192, index_granularity_bytes = '10Mi'; + +CREATE TABLE partslost_2 (x String) ENGINE=ReplicatedMergeTree('/clickhouse/table/{database}_02067_lost/partslost', '2') ORDER BY tuple() + SETTINGS min_rows_for_wide_part = 0, min_bytes_for_wide_part = 0, old_parts_lifetime = 1, + cleanup_delay_period = 1, cleanup_delay_period_random_add = 1, cleanup_thread_preferred_points_per_iteration=0, + index_granularity = 8192, index_granularity_bytes = '10Mi'; + + +INSERT INTO partslost_0 SELECT toString(number) AS x from system.numbers LIMIT 10000; + +ALTER TABLE partslost_0 ADD INDEX idx x TYPE tokenbf_v1(285000, 3, 12345) GRANULARITY 3; + +SET mutations_sync = 2; + +ALTER TABLE partslost_0 MATERIALIZE INDEX idx; + +-- In worst case doesn't check anything, but it's not flaky +select sleep(3) FORMAT Null; +select sleep(3) FORMAT Null; +select sleep(3) FORMAT Null; +select sleep(3) FORMAT Null; + +ALTER TABLE partslost_0 DROP INDEX idx; + +select count() from partslost_0; +select count() from partslost_1; +select count() from partslost_2; + +DROP TABLE IF EXISTS partslost_0; +DROP TABLE IF EXISTS partslost_1; +DROP TABLE IF EXISTS partslost_2; diff --git a/parser/testdata/02070_join_on_disk/ast.json b/parser/testdata/02070_join_on_disk/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02070_join_on_disk/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02070_join_on_disk/metadata.json b/parser/testdata/02070_join_on_disk/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02070_join_on_disk/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02070_join_on_disk/query.sql b/parser/testdata/02070_join_on_disk/query.sql new file mode 100644 index 000000000..eabf31df2 --- /dev/null +++ b/parser/testdata/02070_join_on_disk/query.sql @@ -0,0 +1,21 @@ +-- Regression test when Join stores data on disk and receive empty block. +-- Because of this it does not create empty file, while expect it. + +SET max_threads = 1; +SET join_algorithm = 'auto'; +SET max_rows_in_join = 1000; +SET optimize_aggregation_in_order = 1; +SET max_block_size = 1000; + +DROP TABLE IF EXISTS join_on_disk; + +SYSTEM STOP MERGES join_on_disk; + +CREATE TABLE join_on_disk (id Int) Engine=MergeTree() ORDER BY id; + +INSERT INTO join_on_disk SELECT number as id FROM numbers_mt(50000); +INSERT INTO join_on_disk SELECT number as id FROM numbers_mt(1000); + +SELECT id FROM join_on_disk lhs LEFT JOIN (SELECT id FROM join_on_disk GROUP BY id) rhs USING (id) FORMAT Null; + +DROP TABLE join_on_disk; diff --git a/parser/testdata/02071_lower_upper_utf8_row_overlaps/ast.json b/parser/testdata/02071_lower_upper_utf8_row_overlaps/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02071_lower_upper_utf8_row_overlaps/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02071_lower_upper_utf8_row_overlaps/metadata.json b/parser/testdata/02071_lower_upper_utf8_row_overlaps/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02071_lower_upper_utf8_row_overlaps/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02071_lower_upper_utf8_row_overlaps/query.sql b/parser/testdata/02071_lower_upper_utf8_row_overlaps/query.sql new file mode 100644 index 000000000..d175e0659 --- /dev/null +++ b/parser/testdata/02071_lower_upper_utf8_row_overlaps/query.sql @@ -0,0 +1,25 @@ +-- Tags: no-fasttest +-- no-fasttest: upper/lowerUTF8 use ICU + +drop table if exists utf8_overlap; +create table utf8_overlap (str String) engine=Memory(); + +-- { echoOn } +-- NOTE: total string size should be > 16 (sizeof(__m128i)) +insert into utf8_overlap values ('\xe2'), ('Foo⚊BarBazBam'), ('\xe2'), ('Foo⚊BarBazBam'); +-- ^ +-- MONOGRAM FOR YANG +with lowerUTF8(str) as l_, upperUTF8(str) as u_, '0x' || hex(str) as h_ +select length(str), if(l_ == '\xe2', h_, l_), if(u_ == '\xe2', h_, u_) from utf8_overlap format CSV; + +-- NOTE: regression test for introduced bug +-- https://github.com/ClickHouse/ClickHouse/issues/42756 +SELECT lowerUTF8('КВ АМ И СЖ'); +SELECT upperUTF8('кв ам и сж'); +SELECT lowerUTF8('КВ АМ И СЖ КВ АМ И СЖ'); +SELECT upperUTF8('кв ам и сж кв ам и сж'); +-- Test at 32 and 64 byte boundaries +SELECT lowerUTF8(repeat('0', 16) || 'КВ АМ И СЖ'); +SELECT upperUTF8(repeat('0', 16) || 'кв ам и сж'); +SELECT lowerUTF8(repeat('0', 48) || 'КВ АМ И СЖ'); +SELECT upperUTF8(repeat('0', 48) || 'кв ам и сж'); diff --git a/parser/testdata/02072_rdb_recovery_escaped_name/ast.json b/parser/testdata/02072_rdb_recovery_escaped_name/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02072_rdb_recovery_escaped_name/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02072_rdb_recovery_escaped_name/metadata.json b/parser/testdata/02072_rdb_recovery_escaped_name/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02072_rdb_recovery_escaped_name/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02072_rdb_recovery_escaped_name/query.sql b/parser/testdata/02072_rdb_recovery_escaped_name/query.sql new file mode 100644 index 000000000..f1f86a856 --- /dev/null +++ b/parser/testdata/02072_rdb_recovery_escaped_name/query.sql @@ -0,0 +1,15 @@ + +DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}; +DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE_1:Identifier}; +CREATE DATABASE {CLICKHOUSE_DATABASE:Identifier} ENGINE=Replicated('/test/02072/{database}_1', '{shard}', '{replica}_1'); + +SET distributed_ddl_output_mode='none'; +CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.`test%_\_` (n int) engine=Log; +SHOW TABLES FROM {CLICKHOUSE_DATABASE:Identifier}; + +CREATE DATABASE {CLICKHOUSE_DATABASE_1:Identifier} ENGINE=Replicated('/test/02072/{database}', '{shard}', '{replica}_2'); +DROP DATABASE {CLICKHOUSE_DATABASE:Identifier}; +SYSTEM SYNC DATABASE REPLICA {CLICKHOUSE_DATABASE_1:Identifier}; +SHOW TABLES FROM {CLICKHOUSE_DATABASE_1:Identifier}; + +DROP DATABASE {CLICKHOUSE_DATABASE_1:Identifier}; diff --git a/parser/testdata/02095_function_get_os_kernel_version/ast.json b/parser/testdata/02095_function_get_os_kernel_version/ast.json new file mode 100644 index 000000000..177758f4c --- /dev/null +++ b/parser/testdata/02095_function_get_os_kernel_version/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function splitByChar (alias version_pair) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal ' '" + }, + { + "explain": " Function getOSKernelVersion (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayElement (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier version_pair" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001412157, + "rows_read": 14, + "bytes_read": 554 + } +} diff --git a/parser/testdata/02095_function_get_os_kernel_version/metadata.json b/parser/testdata/02095_function_get_os_kernel_version/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02095_function_get_os_kernel_version/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02095_function_get_os_kernel_version/query.sql b/parser/testdata/02095_function_get_os_kernel_version/query.sql new file mode 100644 index 000000000..d62b360f7 --- /dev/null +++ b/parser/testdata/02095_function_get_os_kernel_version/query.sql @@ -0,0 +1 @@ +WITH splitByChar(' ', getOSKernelVersion()) AS version_pair SELECT version_pair[1] diff --git a/parser/testdata/02096_date_time_1970_saturation/ast.json b/parser/testdata/02096_date_time_1970_saturation/ast.json new file mode 100644 index 000000000..a8cfa5ef2 --- /dev/null +++ b/parser/testdata/02096_date_time_1970_saturation/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDate (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_0" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001081525, + "rows_read": 7, + "bytes_read": 259 + } +} diff --git a/parser/testdata/02096_date_time_1970_saturation/metadata.json b/parser/testdata/02096_date_time_1970_saturation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02096_date_time_1970_saturation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02096_date_time_1970_saturation/query.sql b/parser/testdata/02096_date_time_1970_saturation/query.sql new file mode 100644 index 000000000..6fa1660f0 --- /dev/null +++ b/parser/testdata/02096_date_time_1970_saturation/query.sql @@ -0,0 +1,30 @@ +select toDate(0); +select toDateTime(0, 'Asia/Istanbul'); +select toMonday(toDate(0)); +select toMonday(toDateTime(0, 'Asia/Istanbul')); +select toStartOfWeek(toDate(0)); +select toStartOfWeek(toDateTime(0, 'Asia/Istanbul')); +select toStartOfMonth(toDate(0)); +select toStartOfMonth(toDateTime(0, 'Asia/Istanbul')); +select toStartOfQuarter(toDate(0)); +select toStartOfQuarter(toDateTime(0, 'Asia/Istanbul')); +select toStartOfYear(toDate(0)); +select toStartOfYear(toDateTime(0, 'Asia/Istanbul')); +select toTimeWithFixedDate(toDateTime(0, 'Asia/Istanbul')); +select toStartOfMinute(toDateTime(0, 'Asia/Istanbul')); +select toStartOfFiveMinutes(toDateTime(0, 'Asia/Istanbul')); +select toStartOfTenMinutes(toDateTime(0, 'Asia/Istanbul')); +select toStartOfFifteenMinutes(toDateTime(0, 'Asia/Istanbul')); +select toStartOfHour(toDateTime(0, 'Asia/Istanbul')); +select toDateTime(0, 'America/Los_Angeles'); +select toMonday(toDateTime(0, 'America/Los_Angeles')); +select toStartOfWeek(toDateTime(0, 'America/Los_Angeles')); +select toStartOfMonth(toDateTime(0, 'America/Los_Angeles')); +select toStartOfQuarter(toDateTime(0, 'America/Los_Angeles')); +select toStartOfYear(toDateTime(0, 'America/Los_Angeles')); +select toTimeWithFixedDate(toDateTime(0, 'America/Los_Angeles'), 'America/Los_Angeles'); +select toStartOfMinute(toDateTime(0, 'America/Los_Angeles')); +select toStartOfFiveMinutes(toDateTime(0, 'America/Los_Angeles')); +select toStartOfTenMinutes(toDateTime(0, 'America/Los_Angeles')); +select toStartOfFifteenMinutes(toDateTime(0, 'America/Los_Angeles')); +select toStartOfHour(toDateTime(0, 'America/Los_Angeles')); diff --git a/parser/testdata/02096_date_time_1970_saturation2/ast.json b/parser/testdata/02096_date_time_1970_saturation2/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02096_date_time_1970_saturation2/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02096_date_time_1970_saturation2/metadata.json b/parser/testdata/02096_date_time_1970_saturation2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02096_date_time_1970_saturation2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02096_date_time_1970_saturation2/query.sql b/parser/testdata/02096_date_time_1970_saturation2/query.sql new file mode 100644 index 000000000..cf60163e5 --- /dev/null +++ b/parser/testdata/02096_date_time_1970_saturation2/query.sql @@ -0,0 +1,27 @@ +-- America/Paramaribo : partial hours timezones +select toDateTime(0, 'America/Paramaribo'); +select toMonday(toDateTime(0, 'America/Paramaribo')); +select toStartOfWeek(toDateTime(0, 'America/Paramaribo')); +select toStartOfMonth(toDateTime(0, 'America/Paramaribo')); +select toStartOfQuarter(toDateTime(0, 'America/Paramaribo')); +select toStartOfYear(toDateTime(0, 'America/Paramaribo')); +select toTimeWithFixedDate(toDateTime(0, 'America/Paramaribo'), 'America/Paramaribo'); +select toStartOfMinute(toDateTime(0, 'America/Paramaribo')); +select toStartOfFiveMinute(toDateTime(0, 'America/Paramaribo')); +select toStartOfTenMinutes(toDateTime(0, 'America/Paramaribo')); +select toStartOfFifteenMinutes(toDateTime(0, 'America/Paramaribo')); +select toStartOfHour(toDateTime(0, 'America/Paramaribo')); + +-- Africa/Monrovia : partial minutes timezones +select toDateTime(0, 'Africa/Monrovia'); +select toMonday(toDateTime(0, 'Africa/Monrovia')); +select toStartOfWeek(toDateTime(0, 'Africa/Monrovia')); +select toStartOfMonth(toDateTime(0, 'Africa/Monrovia')); +select toStartOfQuarter(toDateTime(0, 'Africa/Monrovia')); +select toStartOfYear(toDateTime(0, 'Africa/Monrovia')); +select toTimeWithFixedDate(toDateTime(0, 'Africa/Monrovia'), 'Africa/Monrovia'); +select toStartOfMinute(toDateTime(0, 'Africa/Monrovia')); +select toStartOfFiveMinute(toDateTime(0, 'Africa/Monrovia')); +select toStartOfTenMinutes(toDateTime(0, 'Africa/Monrovia')); +select toStartOfFifteenMinutes(toDateTime(0, 'Africa/Monrovia')); +select toStartOfHour(toDateTime(0, 'Africa/Monrovia')); diff --git a/parser/testdata/02096_join_unusual_identifier_begin/ast.json b/parser/testdata/02096_join_unusual_identifier_begin/ast.json new file mode 100644 index 000000000..70e85a361 --- /dev/null +++ b/parser/testdata/02096_join_unusual_identifier_begin/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001798867, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/02096_join_unusual_identifier_begin/metadata.json b/parser/testdata/02096_join_unusual_identifier_begin/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02096_join_unusual_identifier_begin/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02096_join_unusual_identifier_begin/query.sql b/parser/testdata/02096_join_unusual_identifier_begin/query.sql new file mode 100644 index 000000000..fc6be2eff --- /dev/null +++ b/parser/testdata/02096_join_unusual_identifier_begin/query.sql @@ -0,0 +1,27 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS t3; + +CREATE TABLE t1 ( `a1` Int64, `1a1` Int64 ) ENGINE = Memory; +INSERT INTO t1 VALUES (1, 1); + +CREATE TABLE t2 ( `b1` Int64, `1b1` Int64 ) ENGINE = Memory; +INSERT INTO t2 VALUES (1, 1); + +CREATE TABLE t3 ( `c1` Int64, `1c1` Int64 ) ENGINE = Memory; +INSERT INTO t3 VALUES (1, 1); + +SELECT + * +FROM t1 AS t1 +INNER JOIN t2 AS t2 ON t1.a1 = t2.b1 +INNER JOIN t3 AS t3 ON t1.a1 = t3.c1; + +SELECT t2.`1b1` FROM t1 JOIN t2 ON a1 = b1; + +-- Without quialification it doesn't work: +-- SELECT `1b1` FROM t1 JOIN t2 ON a1 = b1; + +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS t3; diff --git a/parser/testdata/02096_rename_atomic_hang/ast.json b/parser/testdata/02096_rename_atomic_hang/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02096_rename_atomic_hang/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02096_rename_atomic_hang/metadata.json b/parser/testdata/02096_rename_atomic_hang/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02096_rename_atomic_hang/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02096_rename_atomic_hang/query.sql b/parser/testdata/02096_rename_atomic_hang/query.sql new file mode 100644 index 000000000..32d7efec6 --- /dev/null +++ b/parser/testdata/02096_rename_atomic_hang/query.sql @@ -0,0 +1,22 @@ +-- Tags: no-parallel +SET send_logs_level = 'fatal'; +drop database if exists db_hang; +drop database if exists db_hang_temp; +set allow_deprecated_database_ordinary=1; +-- Creation of a database with Ordinary engine emits a warning. +create database db_hang engine=Ordinary; +use db_hang; +create table db_hang.test(A Int64) Engine=MergeTree order by A; +create materialized view db_hang.test_mv(A Int64) Engine=MergeTree order by A as select * from db_hang.test; +insert into db_hang.test select * from numbers(1000); + +create database db_hang_temp engine=Atomic; +rename table db_hang.test to db_hang_temp.test; +rename table db_hang.test_mv to db_hang_temp.test_mv; + +drop database db_hang; +rename database db_hang_temp to db_hang; +insert into db_hang.test select * from numbers(1000); +select count() from db_hang.test; +select count() from db_hang.test_mv; +drop database db_hang; diff --git a/parser/testdata/02096_sample_by_tuple/ast.json b/parser/testdata/02096_sample_by_tuple/ast.json new file mode 100644 index 000000000..bf87e2fda --- /dev/null +++ b/parser/testdata/02096_sample_by_tuple/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001373606, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/02096_sample_by_tuple/metadata.json b/parser/testdata/02096_sample_by_tuple/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02096_sample_by_tuple/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02096_sample_by_tuple/query.sql b/parser/testdata/02096_sample_by_tuple/query.sql new file mode 100644 index 000000000..1a86e1bca --- /dev/null +++ b/parser/testdata/02096_sample_by_tuple/query.sql @@ -0,0 +1,7 @@ +DROP TABLE IF EXISTS t; + +CREATE TABLE t (n UInt8) ENGINE=MergeTree ORDER BY n SAMPLE BY tuple(); -- { serverError INCORRECT_QUERY } + +CREATE TABLE t (n UInt8) ENGINE=MergeTree ORDER BY tuple(); + +ALTER TABLE t MODIFY SAMPLE BY tuple(); -- { serverError INCORRECT_QUERY } diff --git a/parser/testdata/02096_sql_user_defined_function_alias/ast.json b/parser/testdata/02096_sql_user_defined_function_alias/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02096_sql_user_defined_function_alias/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02096_sql_user_defined_function_alias/metadata.json b/parser/testdata/02096_sql_user_defined_function_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02096_sql_user_defined_function_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02096_sql_user_defined_function_alias/query.sql b/parser/testdata/02096_sql_user_defined_function_alias/query.sql new file mode 100644 index 000000000..70e657262 --- /dev/null +++ b/parser/testdata/02096_sql_user_defined_function_alias/query.sql @@ -0,0 +1,5 @@ +-- Tags: no-parallel + +CREATE FUNCTION 02096_test_function AS x -> x + 1; +DESCRIBE (SELECT 02096_test_function(1) AS a); +DROP FUNCTION 02096_test_function; diff --git a/parser/testdata/02096_totals_global_in_bug/ast.json b/parser/testdata/02096_totals_global_in_bug/ast.json new file mode 100644 index 000000000..923debecf --- /dev/null +++ b/parser/testdata/02096_totals_global_in_bug/ast.json @@ -0,0 +1,142 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 5)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function sum (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function remote (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '127.0.0.{2,3}'" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function globalIn (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function sum (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier number" + } + ], + + "rows": 40, + + "statistics": + { + "elapsed": 0.001639862, + "rows_read": 40, + "bytes_read": 1622 + } +} diff --git a/parser/testdata/02096_totals_global_in_bug/metadata.json b/parser/testdata/02096_totals_global_in_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02096_totals_global_in_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02096_totals_global_in_bug/query.sql b/parser/testdata/02096_totals_global_in_bug/query.sql new file mode 100644 index 000000000..27ca26cf1 --- /dev/null +++ b/parser/testdata/02096_totals_global_in_bug/query.sql @@ -0,0 +1 @@ +select sum(number) from remote('127.0.0.{2,3}', numbers(2)) where number global in (select sum(number) from numbers(2) group by number with totals) group by number with totals order by number; diff --git a/parser/testdata/02097_default_dict_get_add_database/ast.json b/parser/testdata/02097_default_dict_get_add_database/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02097_default_dict_get_add_database/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02097_default_dict_get_add_database/metadata.json b/parser/testdata/02097_default_dict_get_add_database/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02097_default_dict_get_add_database/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02097_default_dict_get_add_database/query.sql b/parser/testdata/02097_default_dict_get_add_database/query.sql new file mode 100644 index 000000000..d87d0d706 --- /dev/null +++ b/parser/testdata/02097_default_dict_get_add_database/query.sql @@ -0,0 +1,40 @@ +-- Tags: no-parallel, log-engine + +DROP DATABASE IF EXISTS db_02097; +CREATE DATABASE db_02097; + +USE db_02097; + +CREATE TABLE test_table +( + key_column UInt64, + data_column_1 UInt64, + data_column_2 UInt8 +) +ENGINE = MergeTree +ORDER BY key_column; + +CREATE DICTIONARY test_dictionary +( + key_column UInt64 DEFAULT 0, + data_column_1 UInt64 DEFAULT 1, + data_column_2 UInt8 DEFAULT 1 +) +PRIMARY KEY key_column +LAYOUT(DIRECT()) +SOURCE(CLICKHOUSE(TABLE 'test_table')); + +CREATE TABLE test_table_default +( + data_1 DEFAULT dictGetUInt64('db_02097.test_dictionary', 'data_column_1', toUInt64(0)), + data_2 DEFAULT dictGet(db_02097.test_dictionary, 'data_column_2', toUInt64(0)) +) +ENGINE=TinyLog; + +SELECT create_table_query FROM system.tables WHERE name = 'test_table_default' AND database = 'db_02097'; + +DROP TABLE test_table_default; +DROP DICTIONARY test_dictionary; +DROP TABLE test_table; + +DROP DATABASE IF EXISTS db_02097; diff --git a/parser/testdata/02097_initializeAggregationNullable/ast.json b/parser/testdata/02097_initializeAggregationNullable/ast.json new file mode 100644 index 000000000..fd738293b --- /dev/null +++ b/parser/testdata/02097_initializeAggregationNullable/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function finalizeAggregation (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function initializeAggregation (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'uniqExactState'" + }, + { + "explain": " Function toNullable (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'foo'" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001131685, + "rows_read": 12, + "bytes_read": 504 + } +} diff --git a/parser/testdata/02097_initializeAggregationNullable/metadata.json b/parser/testdata/02097_initializeAggregationNullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02097_initializeAggregationNullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02097_initializeAggregationNullable/query.sql b/parser/testdata/02097_initializeAggregationNullable/query.sql new file mode 100644 index 000000000..31c2b142d --- /dev/null +++ b/parser/testdata/02097_initializeAggregationNullable/query.sql @@ -0,0 +1,8 @@ +SELECT finalizeAggregation(initializeAggregation('uniqExactState', toNullable('foo'))); +SELECT toTypeName(initializeAggregation('uniqExactState', toNullable('foo'))); + +SELECT finalizeAggregation(initializeAggregation('uniqExactState', toNullable(123))); +SELECT toTypeName(initializeAggregation('uniqExactState', toNullable(123))); + +SELECT toTypeName(initializeAggregation('uniqExactState', toNullable('foo'))) = toTypeName(arrayReduce('uniqExactState', [toNullable('foo')])); +SELECT toTypeName(initializeAggregation('uniqExactState', toNullable(123))) = toTypeName(arrayReduce('uniqExactState', [toNullable(123)])); diff --git a/parser/testdata/02097_polygon_dictionary_store_key/ast.json b/parser/testdata/02097_polygon_dictionary_store_key/ast.json new file mode 100644 index 000000000..849f51fd5 --- /dev/null +++ b/parser/testdata/02097_polygon_dictionary_store_key/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery polygons_test_table (children 1)" + }, + { + "explain": " Identifier polygons_test_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001144765, + "rows_read": 2, + "bytes_read": 90 + } +} diff --git a/parser/testdata/02097_polygon_dictionary_store_key/metadata.json b/parser/testdata/02097_polygon_dictionary_store_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02097_polygon_dictionary_store_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02097_polygon_dictionary_store_key/query.sql b/parser/testdata/02097_polygon_dictionary_store_key/query.sql new file mode 100644 index 000000000..97297a776 --- /dev/null +++ b/parser/testdata/02097_polygon_dictionary_store_key/query.sql @@ -0,0 +1,38 @@ +DROP TABLE IF EXISTS polygons_test_table; +CREATE TABLE polygons_test_table +( + key Array(Array(Array(Tuple(Float64, Float64)))), + name String +) ENGINE = TinyLog; + +INSERT INTO polygons_test_table VALUES ([[[(3, 1), (0, 1), (0, -1), (3, -1)]]], 'Value'); + +DROP DICTIONARY IF EXISTS polygons_test_dictionary_no_option; +CREATE DICTIONARY polygons_test_dictionary_no_option +( + key Array(Array(Array(Tuple(Float64, Float64)))), + name String +) +PRIMARY KEY key +SOURCE(CLICKHOUSE(TABLE 'polygons_test_table')) +LAYOUT(POLYGON()) +LIFETIME(0); + +SELECT * FROM polygons_test_dictionary_no_option; -- {serverError UNSUPPORTED_METHOD} + +DROP DICTIONARY IF EXISTS polygons_test_dictionary; +CREATE DICTIONARY polygons_test_dictionary +( + key Array(Array(Array(Tuple(Float64, Float64)))), + name String +) +PRIMARY KEY key +SOURCE(CLICKHOUSE(TABLE 'polygons_test_table')) +LAYOUT(POLYGON(STORE_POLYGON_KEY_COLUMN 1)) +LIFETIME(0); + +SELECT * FROM polygons_test_dictionary; + +DROP DICTIONARY polygons_test_dictionary_no_option; +DROP DICTIONARY polygons_test_dictionary; +DROP TABLE polygons_test_table; diff --git a/parser/testdata/02097_remove_sample_by/ast.json b/parser/testdata/02097_remove_sample_by/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02097_remove_sample_by/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02097_remove_sample_by/metadata.json b/parser/testdata/02097_remove_sample_by/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02097_remove_sample_by/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02097_remove_sample_by/query.sql b/parser/testdata/02097_remove_sample_by/query.sql new file mode 100644 index 000000000..d9e3c7eab --- /dev/null +++ b/parser/testdata/02097_remove_sample_by/query.sql @@ -0,0 +1,44 @@ +-- Tags: zookeeper + +DROP TABLE IF EXISTS t_remove_sample_by; + +CREATE TABLE t_remove_sample_by(id UInt64) ENGINE = MergeTree ORDER BY id SAMPLE BY id; + +ALTER TABLE t_remove_sample_by REMOVE SAMPLE BY; +SHOW CREATE TABLE t_remove_sample_by; + +ALTER TABLE t_remove_sample_by REMOVE SAMPLE BY; -- { serverError BAD_ARGUMENTS } +SELECT * FROM t_remove_sample_by SAMPLE 1 / 10; -- { serverError SAMPLING_NOT_SUPPORTED } + +DROP TABLE t_remove_sample_by; + +CREATE TABLE t_remove_sample_by(id UInt64) +ENGINE = ReplicatedMergeTree('/clickhouse/{database}/t_remove_sample_by', '1') +ORDER BY id SAMPLE BY id; + +ALTER TABLE t_remove_sample_by REMOVE SAMPLE BY; +SHOW CREATE TABLE t_remove_sample_by; + +DROP TABLE t_remove_sample_by; + +CREATE TABLE t_remove_sample_by(id UInt64) ENGINE = Memory; +ALTER TABLE t_remove_sample_by REMOVE SAMPLE BY; -- { serverError BAD_ARGUMENTS } + +DROP TABLE t_remove_sample_by; + +CREATE TABLE t_remove_sample_by(id String) +ENGINE = MergeTree ORDER BY id SAMPLE BY id +SETTINGS check_sample_column_is_correct = 0; + +ALTER TABLE t_remove_sample_by RESET SETTING check_sample_column_is_correct; + +DETACH TABLE t_remove_sample_by; +ATTACH TABLE t_remove_sample_by; + +INSERT INTO t_remove_sample_by VALUES (1); +SELECT * FROM t_remove_sample_by SAMPLE 1 / 10; -- { serverError ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER } + +ALTER TABLE t_remove_sample_by REMOVE SAMPLE BY; +SHOW CREATE TABLE t_remove_sample_by; + +DROP TABLE t_remove_sample_by; diff --git a/parser/testdata/02098_date32_comparison/ast.json b/parser/testdata/02098_date32_comparison/ast.json new file mode 100644 index 000000000..c7296bb9b --- /dev/null +++ b/parser/testdata/02098_date32_comparison/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDate32 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '1990-02-01'" + }, + { + "explain": " Function toDate (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '1990-02-01'" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001211361, + "rows_read": 12, + "bytes_read": 471 + } +} diff --git a/parser/testdata/02098_date32_comparison/metadata.json b/parser/testdata/02098_date32_comparison/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02098_date32_comparison/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02098_date32_comparison/query.sql b/parser/testdata/02098_date32_comparison/query.sql new file mode 100644 index 000000000..b35191e58 --- /dev/null +++ b/parser/testdata/02098_date32_comparison/query.sql @@ -0,0 +1,19 @@ +select toDate32('1990-02-01') = toDate('1990-02-01'); +select toDate('1991-01-02') > toDate32('1990-02-01'); +select toDate32('1925-02-01') <= toDate('1990-02-01'); +select toDate('1991-02-01') < toDate32('2283-11-11'); +select toDate32('1990-02-01') = toDateTime('1990-02-01'); +select toDateTime('1991-01-02') > toDate32('1990-02-01'); +select toDate32('1925-02-01') <= toDateTime('1990-02-01'); +select toDateTime('1991-02-01') < toDate32('2283-11-11'); +select toDate32('1990-02-01') = toDateTime64('1990-02-01',2); +select toDateTime64('1991-01-02',2) > toDate32('1990-02-01'); +select toDate32('1925-02-01') = toDateTime64('1925-02-01',2); +select toDateTime64('1925-02-02',2) > toDate32('1925-02-01'); +select toDate32('2283-11-11') = toDateTime64('2283-11-11',2); +select toDateTime64('2283-11-11',2) > toDate32('1925-02-01'); +select toDate32('1990-02-01') = '1990-02-01'; +select '1991-01-02' > toDate32('1990-02-01'); +select toDate32('1925-02-01') = '1925-02-01'; +select '2283-11-11' >= toDate32('2283-11-10'); +select '2283-11-11' > toDate32('1925-02-01'); diff --git a/parser/testdata/02098_sql_user_defined_functions_aliases/ast.json b/parser/testdata/02098_sql_user_defined_functions_aliases/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02098_sql_user_defined_functions_aliases/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02098_sql_user_defined_functions_aliases/metadata.json b/parser/testdata/02098_sql_user_defined_functions_aliases/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02098_sql_user_defined_functions_aliases/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02098_sql_user_defined_functions_aliases/query.sql b/parser/testdata/02098_sql_user_defined_functions_aliases/query.sql new file mode 100644 index 000000000..c5bd2b5b5 --- /dev/null +++ b/parser/testdata/02098_sql_user_defined_functions_aliases/query.sql @@ -0,0 +1,4 @@ +-- Tags: no-parallel +CREATE FUNCTION 02098_alias_function AS x -> (((x * 2) AS x_doubled) + x_doubled); +SELECT 02098_alias_function(2); +DROP FUNCTION 02098_alias_function; diff --git a/parser/testdata/02099_sql_user_defined_functions_lambda/ast.json b/parser/testdata/02099_sql_user_defined_functions_lambda/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02099_sql_user_defined_functions_lambda/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02099_sql_user_defined_functions_lambda/metadata.json b/parser/testdata/02099_sql_user_defined_functions_lambda/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02099_sql_user_defined_functions_lambda/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02099_sql_user_defined_functions_lambda/query.sql b/parser/testdata/02099_sql_user_defined_functions_lambda/query.sql new file mode 100644 index 000000000..1c926faf3 --- /dev/null +++ b/parser/testdata/02099_sql_user_defined_functions_lambda/query.sql @@ -0,0 +1,4 @@ +-- Tags: no-parallel +CREATE FUNCTION 02099_lambda_function AS x -> arrayMap(array_element -> array_element * 2, x); +SELECT 02099_lambda_function([1,2,3]); +DROP FUNCTION 02099_lambda_function; diff --git a/parser/testdata/02100_alter_scalar_circular_deadlock/ast.json b/parser/testdata/02100_alter_scalar_circular_deadlock/ast.json new file mode 100644 index 000000000..8ed1722fd --- /dev/null +++ b/parser/testdata/02100_alter_scalar_circular_deadlock/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery foo (children 1)" + }, + { + "explain": " Identifier foo" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001395243, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/02100_alter_scalar_circular_deadlock/metadata.json b/parser/testdata/02100_alter_scalar_circular_deadlock/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02100_alter_scalar_circular_deadlock/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02100_alter_scalar_circular_deadlock/query.sql b/parser/testdata/02100_alter_scalar_circular_deadlock/query.sql new file mode 100644 index 000000000..32b757f54 --- /dev/null +++ b/parser/testdata/02100_alter_scalar_circular_deadlock/query.sql @@ -0,0 +1,34 @@ +DROP TABLE IF EXISTS foo; + +CREATE TABLE foo (ts DateTime, x UInt64) +ENGINE = MergeTree PARTITION BY toYYYYMMDD(ts) +ORDER BY (ts); + +INSERT INTO foo (ts, x) SELECT toDateTime('2020-01-01 00:05:00'), number from system.numbers_mt LIMIT 10; + +SET mutations_sync = 1; + +ALTER TABLE foo UPDATE x = 1 WHERE x = (SELECT x from foo WHERE x = 4); + +SELECT sum(x) == 42 FROM foo; + +ALTER TABLE foo UPDATE x = 1 WHERE x IN (SELECT x FROM foo WHERE x != 0); + +SELECT sum(x) == 9 FROM foo; + +DROP TABLE IF EXISTS bar; + +CREATE TABLE bar (ts DateTime, x UInt64) +ENGINE = Memory; + +INSERT INTO bar (ts, x) SELECT toDateTime('2020-01-01 00:05:00'), number from system.numbers_mt LIMIT 10; + +SET mutations_sync = 1; + +ALTER TABLE bar UPDATE x = 1 WHERE x = (SELECT x from bar WHERE x = 4); + +SELECT sum(x) == 42 FROM bar; + +ALTER TABLE bar UPDATE x = 1 WHERE x IN (SELECT x FROM bar WHERE x != 0); + +SELECT sum(x) == 9 FROM bar; diff --git a/parser/testdata/02100_limit_push_down_bug/ast.json b/parser/testdata/02100_limit_push_down_bug/ast.json new file mode 100644 index 000000000..eb2e8fbd1 --- /dev/null +++ b/parser/testdata/02100_limit_push_down_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tbl_repr (children 1)" + }, + { + "explain": " Identifier tbl_repr" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001837634, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/02100_limit_push_down_bug/metadata.json b/parser/testdata/02100_limit_push_down_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02100_limit_push_down_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02100_limit_push_down_bug/query.sql b/parser/testdata/02100_limit_push_down_bug/query.sql new file mode 100644 index 000000000..2ba9d2b88 --- /dev/null +++ b/parser/testdata/02100_limit_push_down_bug/query.sql @@ -0,0 +1,21 @@ +drop table if exists tbl_repr; + +CREATE TABLE tbl_repr( +ts DateTime, +x String) +ENGINE=MergeTree ORDER BY ts; + + +SELECT * +FROM +( + SELECT + x, + length(x) + FROM tbl_repr + WHERE ts > now() + LIMIT 1 +) +WHERE x != ''; + +drop table if exists tbl_repr; diff --git a/parser/testdata/02100_now64_types_bug/ast.json b/parser/testdata/02100_now64_types_bug/ast.json new file mode 100644 index 000000000..42d1aeb2e --- /dev/null +++ b/parser/testdata/02100_now64_types_bug/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001824723, + "rows_read": 5, + "bytes_read": 173 + } +} diff --git a/parser/testdata/02100_now64_types_bug/metadata.json b/parser/testdata/02100_now64_types_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02100_now64_types_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02100_now64_types_bug/query.sql b/parser/testdata/02100_now64_types_bug/query.sql new file mode 100644 index 000000000..ef91b19bb --- /dev/null +++ b/parser/testdata/02100_now64_types_bug/query.sql @@ -0,0 +1,8 @@ +SELECT x +FROM +( + SELECT if((number % NULL) = -2147483648, NULL, if(toInt64(toInt64(now64(if((number % NULL) = -2147483648, NULL, if(toInt64(now64(toInt64(9223372036854775807, now64(plus(NULL, NULL))), plus(NULL, NULL))) = (number % NULL), nan, toFloat64(number))), toInt64(9223372036854775807, toInt64(9223372036854775807, now64(plus(NULL, NULL))), now64(plus(NULL, NULL))), plus(NULL, NULL))), now64(toInt64(9223372036854775807, toInt64(0, now64(plus(NULL, NULL))), now64(plus(NULL, NULL))), plus(NULL, NULL))) = (number % NULL), nan, toFloat64(number))) AS x + FROM system.numbers + LIMIT 3 +) +ORDER BY x DESC NULLS LAST diff --git a/parser/testdata/02100_replaceRegexpAll_bug/ast.json b/parser/testdata/02100_replaceRegexpAll_bug/ast.json new file mode 100644 index 000000000..0af708a5d --- /dev/null +++ b/parser/testdata/02100_replaceRegexpAll_bug/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function equals (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'aaaabb '" + }, + { + "explain": " Function trimLeft (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'b aaaabb '" + }, + { + "explain": " Literal 'b '" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001076127, + "rows_read": 11, + "bytes_read": 419 + } +} diff --git a/parser/testdata/02100_replaceRegexpAll_bug/metadata.json b/parser/testdata/02100_replaceRegexpAll_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02100_replaceRegexpAll_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02100_replaceRegexpAll_bug/query.sql b/parser/testdata/02100_replaceRegexpAll_bug/query.sql new file mode 100644 index 000000000..66ccb0445 --- /dev/null +++ b/parser/testdata/02100_replaceRegexpAll_bug/query.sql @@ -0,0 +1,16 @@ +SELECT 'aaaabb ' == trim(leading 'b ' FROM 'b aaaabb ') x; +SELECT 'b aaaa' == trim(trailing 'b ' FROM 'b aaaabb ') x; +SELECT 'aaaa' == trim(both 'b ' FROM 'b aaaabb ') x; + +SELECT '1' == replaceRegexpAll(',,1,,', '^[,]*|[,]*$', '') x; +SELECT '1' == replaceRegexpAll(',,1', '^[,]*|[,]*$', '') x; +SELECT '1' == replaceRegexpAll('1,,', '^[,]*|[,]*$', '') x; + +SELECT '1,,' == replaceRegexpOne(',,1,,', '^[,]*|[,]*$', '') x; +SELECT '1' == replaceRegexpOne(',,1', '^[,]*|[,]*$', '') x; +SELECT '1,,' == replaceRegexpOne('1,,', '^[,]*|[,]*$', '') x; + +SELECT '5935,5998,6014' == trim(BOTH ', ' FROM '5935,5998,6014, ') x; +SELECT '5935,5998,6014' == replaceRegexpAll('5935,5998,6014, ', concat('^[', regexpQuoteMeta(', '), ']*|[', regexpQuoteMeta(', '), ']*$'), '') AS x; + +SELECT trim(BOTH '"' FROM '2') == '2' diff --git a/parser/testdata/02101_sql_user_defined_functions_create_or_replace/ast.json b/parser/testdata/02101_sql_user_defined_functions_create_or_replace/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02101_sql_user_defined_functions_create_or_replace/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02101_sql_user_defined_functions_create_or_replace/metadata.json b/parser/testdata/02101_sql_user_defined_functions_create_or_replace/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02101_sql_user_defined_functions_create_or_replace/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02101_sql_user_defined_functions_create_or_replace/query.sql b/parser/testdata/02101_sql_user_defined_functions_create_or_replace/query.sql new file mode 100644 index 000000000..7b0ad311b --- /dev/null +++ b/parser/testdata/02101_sql_user_defined_functions_create_or_replace/query.sql @@ -0,0 +1,13 @@ +-- Tags: no-parallel + +CREATE OR REPLACE FUNCTION 02101_test_function AS x -> x + 1; + +SELECT create_query FROM system.functions WHERE name = '02101_test_function'; +SELECT 02101_test_function(1); + +CREATE OR REPLACE FUNCTION 02101_test_function AS x -> x + 2; + +SELECT create_query FROM system.functions WHERE name = '02101_test_function'; +SELECT 02101_test_function(1); + +DROP FUNCTION 02101_test_function; diff --git a/parser/testdata/02101_sql_user_defined_functions_drop_if_exists/ast.json b/parser/testdata/02101_sql_user_defined_functions_drop_if_exists/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02101_sql_user_defined_functions_drop_if_exists/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02101_sql_user_defined_functions_drop_if_exists/metadata.json b/parser/testdata/02101_sql_user_defined_functions_drop_if_exists/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02101_sql_user_defined_functions_drop_if_exists/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02101_sql_user_defined_functions_drop_if_exists/query.sql b/parser/testdata/02101_sql_user_defined_functions_drop_if_exists/query.sql new file mode 100644 index 000000000..8061f227b --- /dev/null +++ b/parser/testdata/02101_sql_user_defined_functions_drop_if_exists/query.sql @@ -0,0 +1,9 @@ +-- Tags: no-parallel + +CREATE FUNCTION 02101_test_function AS x -> x + 1; + +SELECT 02101_test_function(1); + +DROP FUNCTION 02101_test_function; +DROP FUNCTION 02101_test_function; --{serverError UNKNOWN_FUNCTION} +DROP FUNCTION IF EXISTS 02101_test_function; diff --git a/parser/testdata/02102_sql_user_defined_functions_create_if_not_exists/ast.json b/parser/testdata/02102_sql_user_defined_functions_create_if_not_exists/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02102_sql_user_defined_functions_create_if_not_exists/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02102_sql_user_defined_functions_create_if_not_exists/metadata.json b/parser/testdata/02102_sql_user_defined_functions_create_if_not_exists/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02102_sql_user_defined_functions_create_if_not_exists/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02102_sql_user_defined_functions_create_if_not_exists/query.sql b/parser/testdata/02102_sql_user_defined_functions_create_if_not_exists/query.sql new file mode 100644 index 000000000..5dba8a2e7 --- /dev/null +++ b/parser/testdata/02102_sql_user_defined_functions_create_if_not_exists/query.sql @@ -0,0 +1,8 @@ +-- Tags: no-parallel + +CREATE FUNCTION IF NOT EXISTS 02102_test_function AS x -> x + 1; +SELECT 02102_test_function(1); + +CREATE FUNCTION 02102_test_function AS x -> x + 1; --{serverError FUNCTION_ALREADY_EXISTS} +CREATE FUNCTION IF NOT EXISTS 02102_test_function AS x -> x + 1; +DROP FUNCTION 02102_test_function; diff --git a/parser/testdata/02103_sql_user_defined_functions_composition/ast.json b/parser/testdata/02103_sql_user_defined_functions_composition/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02103_sql_user_defined_functions_composition/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02103_sql_user_defined_functions_composition/metadata.json b/parser/testdata/02103_sql_user_defined_functions_composition/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02103_sql_user_defined_functions_composition/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02103_sql_user_defined_functions_composition/query.sql b/parser/testdata/02103_sql_user_defined_functions_composition/query.sql new file mode 100644 index 000000000..3d34413b9 --- /dev/null +++ b/parser/testdata/02103_sql_user_defined_functions_composition/query.sql @@ -0,0 +1,12 @@ +-- Tags: no-parallel + +CREATE FUNCTION 02103_test_function AS x -> x + 1; +CREATE FUNCTION 02103_test_function_with_nested_function_empty_args AS () -> 02103_test_function(1); +CREATE FUNCTION 02103_test_function_with_nested_function_arg AS (x) -> 02103_test_function(x); + +SELECT 02103_test_function_with_nested_function_empty_args(); +SELECT 02103_test_function_with_nested_function_arg(1); + +DROP FUNCTION 02103_test_function_with_nested_function_empty_args; +DROP FUNCTION 02103_test_function_with_nested_function_arg; +DROP FUNCTION 02103_test_function; diff --git a/parser/testdata/02111_function_mapExtractKeyLike/ast.json b/parser/testdata/02111_function_mapExtractKeyLike/ast.json new file mode 100644 index 000000000..9bed85853 --- /dev/null +++ b/parser/testdata/02111_function_mapExtractKeyLike/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery map_extractKeyLike_test (children 1)" + }, + { + "explain": " Identifier map_extractKeyLike_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001450677, + "rows_read": 2, + "bytes_read": 98 + } +} diff --git a/parser/testdata/02111_function_mapExtractKeyLike/metadata.json b/parser/testdata/02111_function_mapExtractKeyLike/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02111_function_mapExtractKeyLike/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02111_function_mapExtractKeyLike/query.sql b/parser/testdata/02111_function_mapExtractKeyLike/query.sql new file mode 100644 index 000000000..a17b6b745 --- /dev/null +++ b/parser/testdata/02111_function_mapExtractKeyLike/query.sql @@ -0,0 +1,27 @@ +DROP TABLE IF EXISTS map_extractKeyLike_test; + +CREATE TABLE map_extractKeyLike_test (id UInt32, map Map(String, String)) Engine=MergeTree() ORDER BY id settings index_granularity=2; + +INSERT INTO map_extractKeyLike_test VALUES (1, {'P1-K1':'1-V1','P2-K2':'1-V2'}),(2,{'P1-K1':'2-V1','P2-K2':'2-V2'}); +INSERT INTO map_extractKeyLike_test VALUES (3, {'P1-K1':'3-V1','P2-K2':'3-V2'}),(4,{'P1-K1':'4-V1','P2-K2':'4-V2'}); +INSERT INTO map_extractKeyLike_test VALUES (5, {'5-K1':'5-V1','5-K2':'5-V2'}),(6, {'P3-K1':'6-V1','P4-K2':'6-V2'}); + +SELECT 'The data of table:'; +SELECT * FROM map_extractKeyLike_test ORDER BY id; + +SELECT ''; + +SELECT 'The results of query: SELECT id, mapExtractKeyLike(map, \'P1%\') FROM map_extractKeyLike_test ORDER BY id;'; +SELECT id, mapExtractKeyLike(map, 'P1%') FROM map_extractKeyLike_test ORDER BY id; + +SELECT ''; + +SELECT 'The results of query: SELECT id, mapExtractKeyLike(map, \'5-K1\') FROM map_extractKeyLike_test ORDER BY id;'; +SELECT id, mapExtractKeyLike(map, '5-K1') FROM map_extractKeyLike_test ORDER BY id; + +DROP TABLE map_extractKeyLike_test; + +SELECT mapExtractKeyLike(map('aa', 1, 'bb', 2), 'a%'); +SELECT mapExtractKeyLike(map('aa', 1, 'bb', 2), materialize('a%')); +SELECT mapExtractKeyLike(materialize(map('aa', 1, 'bb', 2)), 'a%'); +SELECT mapExtractKeyLike(materialize(map('aa', 1, 'bb', 2)), materialize('a%')); diff --git a/parser/testdata/02111_global_context_temporary_tables/ast.json b/parser/testdata/02111_global_context_temporary_tables/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02111_global_context_temporary_tables/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02111_global_context_temporary_tables/metadata.json b/parser/testdata/02111_global_context_temporary_tables/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02111_global_context_temporary_tables/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02111_global_context_temporary_tables/query.sql b/parser/testdata/02111_global_context_temporary_tables/query.sql new file mode 100644 index 000000000..96651f9c9 --- /dev/null +++ b/parser/testdata/02111_global_context_temporary_tables/query.sql @@ -0,0 +1,5 @@ +-- { echo } +SELECT * FROM remote('127.1', system.one, 1 IN id); -- { serverError UNKNOWN_TABLE } +SELECT * FROM remote('127.1', system.one, 1 IN dummy); -- { serverError UNKNOWN_TABLE } +SELECT * FROM remote('127.1', view(SELECT * FROM system.one), 1 IN id); -- { serverError UNKNOWN_TABLE } +SELECT * FROM remote('127.1', view(SELECT number AS id FROM numbers(2)), 1 IN id); -- { serverError UNKNOWN_TABLE } diff --git a/parser/testdata/02111_json_column_name_encoding/ast.json b/parser/testdata/02111_json_column_name_encoding/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02111_json_column_name_encoding/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02111_json_column_name_encoding/metadata.json b/parser/testdata/02111_json_column_name_encoding/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02111_json_column_name_encoding/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02111_json_column_name_encoding/query.sql b/parser/testdata/02111_json_column_name_encoding/query.sql new file mode 100644 index 000000000..69af75072 --- /dev/null +++ b/parser/testdata/02111_json_column_name_encoding/query.sql @@ -0,0 +1,7 @@ +-- Tags: no-fasttest + +SET output_format_write_statistics = 0; + +SELECT + length('\x80') + FORMAT JSONCompact; diff --git a/parser/testdata/02111_modify_table_comment/ast.json b/parser/testdata/02111_modify_table_comment/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02111_modify_table_comment/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02111_modify_table_comment/metadata.json b/parser/testdata/02111_modify_table_comment/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02111_modify_table_comment/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02111_modify_table_comment/query.sql b/parser/testdata/02111_modify_table_comment/query.sql new file mode 100644 index 000000000..f9f864c7f --- /dev/null +++ b/parser/testdata/02111_modify_table_comment/query.sql @@ -0,0 +1,34 @@ +-- Tags: no-parallel + +DROP DATABASE IF EXISTS 02111_modify_table_comment; +CREATE DATABASE 02111_modify_table_comment; + +USE 02111_modify_table_comment; + +CREATE TABLE t +( + `n` Int8 +) +ENGINE = MergeTree +ORDER BY n +COMMENT 'this is a MergeTree table'; + +SHOW CREATE t; + +ALTER TABLE t + MODIFY COMMENT 'MergeTree Table'; + +SHOW CREATE t; + +CREATE TABLE t_merge AS t +ENGINE = Merge('02111_modify_table_comment', 't') +COMMENT 'this is a Merge table'; + +SHOW CREATE t_merge; + +ALTER TABLE t_merge + MODIFY COMMENT 'Merge Table'; + +SHOW CREATE t_merge; + +DROP DATABASE 02111_modify_table_comment; diff --git a/parser/testdata/02111_with_fill_no_rows/ast.json b/parser/testdata/02111_with_fill_no_rows/ast.json new file mode 100644 index 000000000..1a9e28d42 --- /dev/null +++ b/parser/testdata/02111_with_fill_no_rows/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toYear (alias y) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier d" + }, + { + "explain": " Function count (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001443927, + "rows_read": 9, + "bytes_read": 331 + } +} diff --git a/parser/testdata/02111_with_fill_no_rows/metadata.json b/parser/testdata/02111_with_fill_no_rows/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02111_with_fill_no_rows/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02111_with_fill_no_rows/query.sql b/parser/testdata/02111_with_fill_no_rows/query.sql new file mode 100644 index 000000000..e671dd5f0 --- /dev/null +++ b/parser/testdata/02111_with_fill_no_rows/query.sql @@ -0,0 +1,19 @@ +SELECT toYear(d) AS y, count() +FROM ( SELECT today() AS d WHERE 0) +GROUP BY y +ORDER BY y ASC WITH FILL FROM 2019 TO 2023; + +SELECT toYear(d) AS y, count() +FROM ( SELECT today() AS d WHERE 0) +GROUP BY y +ORDER BY y ASC WITH FILL FROM 2019; + +SELECT toYear(d) AS y, count() +FROM ( SELECT today() AS d WHERE 0) +GROUP BY y +ORDER BY y ASC WITH FILL TO 2023; + +SELECT toYear(d) AS y, count() +FROM ( SELECT today() AS d WHERE 0) +GROUP BY y +ORDER BY y ASC WITH FILL; diff --git a/parser/testdata/02112_skip_index_set_and_or/ast.json b/parser/testdata/02112_skip_index_set_and_or/ast.json new file mode 100644 index 000000000..13f1bfcdb --- /dev/null +++ b/parser/testdata/02112_skip_index_set_and_or/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery set_index (children 1)" + }, + { + "explain": " Identifier set_index" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001330549, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/02112_skip_index_set_and_or/metadata.json b/parser/testdata/02112_skip_index_set_and_or/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02112_skip_index_set_and_or/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02112_skip_index_set_and_or/query.sql b/parser/testdata/02112_skip_index_set_and_or/query.sql new file mode 100644 index 000000000..7b52e5de9 --- /dev/null +++ b/parser/testdata/02112_skip_index_set_and_or/query.sql @@ -0,0 +1,6 @@ +drop table if exists set_index; + +create table set_index (a Int32, b Int32, INDEX b_set b type set(0) granularity 1) engine MergeTree order by tuple(); +insert into set_index values (1, 2); + +select b from set_index where a = 1 and a = 1 and b = 1 settings force_data_skipping_indices = 'b_set', optimize_move_to_prewhere=0; diff --git a/parser/testdata/02112_with_fill_interval/ast.json b/parser/testdata/02112_with_fill_interval/ast.json new file mode 100644 index 000000000..6e1d14f6f --- /dev/null +++ b/parser/testdata/02112_with_fill_interval/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery with_fill_date (children 1)" + }, + { + "explain": " Identifier with_fill_date" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00114524, + "rows_read": 2, + "bytes_read": 80 + } +} diff --git a/parser/testdata/02112_with_fill_interval/metadata.json b/parser/testdata/02112_with_fill_interval/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02112_with_fill_interval/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02112_with_fill_interval/query.sql b/parser/testdata/02112_with_fill_interval/query.sql new file mode 100644 index 000000000..1210b0f2a --- /dev/null +++ b/parser/testdata/02112_with_fill_interval/query.sql @@ -0,0 +1,84 @@ +DROP TABLE IF EXISTS with_fill_date; +CREATE TABLE with_fill_date (d Date, d32 Date32) ENGINE = Memory; + +INSERT INTO with_fill_date VALUES (toDate('2020-02-05'), toDate32('2020-02-05')); +INSERT INTO with_fill_date VALUES (toDate('2020-02-16'), toDate32('2020-02-16')); +INSERT INTO with_fill_date VALUES (toDate('2020-03-03'), toDate32('2020-03-03')); +INSERT INTO with_fill_date VALUES (toDate('2020-06-10'), toDate32('2020-06-10')); + +SELECT '1 DAY'; +SELECT d, count() FROM with_fill_date GROUP BY d ORDER BY d WITH FILL STEP INTERVAL 1 DAY LIMIT 5; +SELECT '1 WEEK'; +SELECT toStartOfWeek(d) as d, count() FROM with_fill_date GROUP BY d ORDER BY d WITH FILL STEP INTERVAL 1 WEEK LIMIT 5; +SELECT '1 MONTH'; +SELECT toStartOfMonth(d) as d, count() FROM with_fill_date GROUP BY d ORDER BY d WITH FILL STEP INTERVAL 1 MONTH LIMIT 5; +SELECT '3 MONTH'; +SELECT toStartOfMonth(d) as d, count() FROM with_fill_date GROUP BY d ORDER BY d WITH FILL + FROM toDate('2020-01-01') + TO toDate('2021-01-01') + STEP INTERVAL 3 MONTH; + +SELECT d, count() FROM with_fill_date GROUP BY d ORDER BY d WITH FILL STEP INTERVAL 1 HOUR LIMIT 5; -- { serverError INVALID_WITH_FILL_EXPRESSION } + +SELECT '1 DAY'; +SELECT d32, count() FROM with_fill_date GROUP BY d32 ORDER BY d32 WITH FILL STEP INTERVAL 1 DAY LIMIT 5; +SELECT '1 WEEK'; +SELECT toStartOfWeek(d32) as d32, count() FROM with_fill_date GROUP BY d32 ORDER BY d32 WITH FILL STEP INTERVAL 1 WEEK LIMIT 5; +SELECT '1 MONTH'; +SELECT toStartOfMonth(d32) as d32, count() FROM with_fill_date GROUP BY d32 ORDER BY d32 WITH FILL STEP INTERVAL 1 MONTH LIMIT 5; +SELECT '3 MONTH'; +SELECT toStartOfMonth(d32) as d32, count() FROM with_fill_date GROUP BY d32 ORDER BY d32 WITH FILL + FROM toDate('2020-01-01') + TO toDate('2021-01-01') + STEP INTERVAL 3 MONTH; + +SELECT d, count() FROM with_fill_date GROUP BY d ORDER BY d WITH FILL STEP INTERVAL 1 HOUR LIMIT 5; -- { serverError INVALID_WITH_FILL_EXPRESSION } + +DROP TABLE with_fill_date; + +DROP TABLE IF EXISTS with_fill_date; +CREATE TABLE with_fill_date (d DateTime('UTC'), d64 DateTime64(3, 'UTC')) ENGINE = Memory; + +INSERT INTO with_fill_date VALUES (toDateTime('2020-02-05 10:20:00', 'UTC'), toDateTime64('2020-02-05 10:20:00', 3, 'UTC')); +INSERT INTO with_fill_date VALUES (toDateTime('2020-03-08 11:01:00', 'UTC'), toDateTime64('2020-03-08 11:01:00', 3, 'UTC')); + +SELECT '15 MINUTE'; +SELECT d, count() FROM with_fill_date GROUP BY d ORDER BY d WITH FILL STEP INTERVAL 15 MINUTE LIMIT 5; +SELECT '6 HOUR'; +SELECT toStartOfHour(d) as d, count() FROM with_fill_date GROUP BY d ORDER BY d WITH FILL STEP INTERVAL 6 HOUR LIMIT 5; +SELECT '10 DAY'; +SELECT toStartOfDay(d) as d, count() FROM with_fill_date GROUP BY d ORDER BY d WITH FILL STEP INTERVAL 10 DAY LIMIT 5; + +SELECT '15 MINUTE'; +SELECT d64, count() FROM with_fill_date GROUP BY d64 ORDER BY d64 WITH FILL STEP INTERVAL 15 MINUTE LIMIT 5; +SELECT '6 HOUR'; +SELECT toStartOfHour(d64) as d64, count() FROM with_fill_date GROUP BY d64 ORDER BY d64 WITH FILL STEP INTERVAL 6 HOUR LIMIT 5; +SELECT '10 DAY'; +SELECT toStartOfDay(d64) as d64, count() FROM with_fill_date GROUP BY d64 ORDER BY d64 WITH FILL STEP INTERVAL 10 DAY LIMIT 5; + +DROP TABLE with_fill_date; + +SELECT number FROM numbers(100) ORDER BY number WITH FILL STEP INTERVAL 1 HOUR; -- { serverError INVALID_WITH_FILL_EXPRESSION } + +CREATE TABLE with_fill_date (d Date, id UInt32) ENGINE = Memory; + +INSERT INTO with_fill_date VALUES (toDate('2020-02-05'), 1); +INSERT INTO with_fill_date VALUES (toDate('2020-02-16'), 3); +INSERT INTO with_fill_date VALUES (toDate('2020-03-10'), 2); +INSERT INTO with_fill_date VALUES (toDate('2020-03-03'), 3); + +SELECT '1 MONTH'; + +SELECT toStartOfMonth(d) as d, id, count() FROM with_fill_date +GROUP BY d, id +ORDER BY +d WITH FILL + FROM toDate('2020-01-01') + TO toDate('2020-05-01') + STEP INTERVAL 1 MONTH, +id WITH FILL FROM 1 TO 5; + +DROP TABLE with_fill_date; + +SELECT d FROM (SELECT toDate(1) AS d) +ORDER BY d DESC WITH FILL FROM toDate(3) TO toDate(0) STEP INTERVAL -1 DAY; diff --git a/parser/testdata/02113_base64encode_trailing_bytes/ast.json b/parser/testdata/02113_base64encode_trailing_bytes/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02113_base64encode_trailing_bytes/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02113_base64encode_trailing_bytes/metadata.json b/parser/testdata/02113_base64encode_trailing_bytes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02113_base64encode_trailing_bytes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02113_base64encode_trailing_bytes/query.sql b/parser/testdata/02113_base64encode_trailing_bytes/query.sql new file mode 100644 index 000000000..eb0424312 --- /dev/null +++ b/parser/testdata/02113_base64encode_trailing_bytes/query.sql @@ -0,0 +1,20 @@ +-- Tags: no-fasttest +SET log_queries=1; + +DROP TABLE IF EXISTS tabl_1; +DROP TABLE IF EXISTS tabl_2; + +CREATE TABLE tabl_1 (key String) ENGINE MergeTree ORDER BY key; +CREATE TABLE tabl_2 (key String) ENGINE MergeTree ORDER BY key; +SELECT * FROM tabl_1 SETTINGS log_comment = 'ad15a651'; +SELECT * FROM tabl_2 SETTINGS log_comment = 'ad15a651'; +SYSTEM FLUSH LOGS query_log; + +SELECT base64Decode(base64Encode(normalizeQuery(query))) + FROM system.query_log + WHERE type = 'QueryFinish' AND log_comment = 'ad15a651' AND current_database = currentDatabase() + GROUP BY normalizeQuery(query) + ORDER BY normalizeQuery(query); + +DROP TABLE tabl_1; +DROP TABLE tabl_2; diff --git a/parser/testdata/02113_base64encode_trailing_bytes_1/ast.json b/parser/testdata/02113_base64encode_trailing_bytes_1/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02113_base64encode_trailing_bytes_1/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02113_base64encode_trailing_bytes_1/metadata.json b/parser/testdata/02113_base64encode_trailing_bytes_1/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02113_base64encode_trailing_bytes_1/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02113_base64encode_trailing_bytes_1/query.sql b/parser/testdata/02113_base64encode_trailing_bytes_1/query.sql new file mode 100644 index 000000000..56edf5dbf --- /dev/null +++ b/parser/testdata/02113_base64encode_trailing_bytes_1/query.sql @@ -0,0 +1,6 @@ +-- Tags: no-fasttest + +SELECT + number, + hex(base64Decode(base64Encode(repeat('a', number)))) r +FROM numbers(100); diff --git a/parser/testdata/02113_format_row/ast.json b/parser/testdata/02113_format_row/ast.json new file mode 100644 index 000000000..b3eb083a0 --- /dev/null +++ b/parser/testdata/02113_format_row/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001144452, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02113_format_row/metadata.json b/parser/testdata/02113_format_row/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02113_format_row/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02113_format_row/query.sql b/parser/testdata/02113_format_row/query.sql new file mode 100644 index 000000000..1af6f7cc7 --- /dev/null +++ b/parser/testdata/02113_format_row/query.sql @@ -0,0 +1,6 @@ +set output_format_write_statistics=0; +select formatRow('TSVWithNamesAndTypes', number, toDate(number)) from numbers(5); +select formatRow('CSVWithNamesAndTypes', number, toDate(number)) from numbers(5); +select formatRow('JSONCompactEachRowWithNamesAndTypes', number, toDate(number)) from numbers(5); +select formatRow('XML', number, toDate(number)) from numbers(5); + diff --git a/parser/testdata/02113_format_row_bug/ast.json b/parser/testdata/02113_format_row_bug/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02113_format_row_bug/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02113_format_row_bug/metadata.json b/parser/testdata/02113_format_row_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02113_format_row_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02113_format_row_bug/query.sql b/parser/testdata/02113_format_row_bug/query.sql new file mode 100644 index 000000000..ce9342012 --- /dev/null +++ b/parser/testdata/02113_format_row_bug/query.sql @@ -0,0 +1,6 @@ +-- Tags: no-fasttest + +select formatRow('ORC', number, toDate(number)) from numbers(5); -- { serverError BAD_ARGUMENTS } +select formatRow('Parquet', number, toDate(number)) from numbers(5); -- { serverError BAD_ARGUMENTS } +select formatRow('Arrow', number, toDate(number)) from numbers(5); -- { serverError BAD_ARGUMENTS } +select formatRow('Native', number, toDate(number)) from numbers(5); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/02113_untuple_func_alias/ast.json b/parser/testdata/02113_untuple_func_alias/ast.json new file mode 100644 index 000000000..078d4a740 --- /dev/null +++ b/parser/testdata/02113_untuple_func_alias/ast.json @@ -0,0 +1,82 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function untuple (alias ut) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Identifier b" + }, + { + "explain": " Function untuple (alias ut2) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Identifier a" + } + ], + + "rows": 20, + + "statistics": + { + "elapsed": 0.001001628, + "rows_read": 20, + "bytes_read": 744 + } +} diff --git a/parser/testdata/02113_untuple_func_alias/metadata.json b/parser/testdata/02113_untuple_func_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02113_untuple_func_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02113_untuple_func_alias/query.sql b/parser/testdata/02113_untuple_func_alias/query.sql new file mode 100644 index 000000000..d39e6626d --- /dev/null +++ b/parser/testdata/02113_untuple_func_alias/query.sql @@ -0,0 +1,2 @@ +SELECT untuple((1, 2, 3, b)) AS `ut`, untuple((NULL, 3, 2, a)) AS `ut2` +FROM (SELECT 1 AS a, NULL AS b) FORMAT TSVWithNames; diff --git a/parser/testdata/02114_bool_type/ast.json b/parser/testdata/02114_bool_type/ast.json new file mode 100644 index 000000000..d7533413c --- /dev/null +++ b/parser/testdata/02114_bool_type/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery bool_test (children 1)" + }, + { + "explain": " Identifier bool_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001309883, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/02114_bool_type/metadata.json b/parser/testdata/02114_bool_type/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02114_bool_type/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02114_bool_type/query.sql b/parser/testdata/02114_bool_type/query.sql new file mode 100644 index 000000000..6ff9206bb --- /dev/null +++ b/parser/testdata/02114_bool_type/query.sql @@ -0,0 +1,47 @@ +DROP TABLE IF EXISTS bool_test; + +CREATE TABLE bool_test (value Bool,f String) ENGINE = Memory; + +-- value column shoud have type 'Bool' +SHOW CREATE TABLE bool_test; + +INSERT INTO bool_test (value,f) VALUES (false, 'test'), (true , 'test'), (0, 'test'), (1, 'test'), (FALSE, 'test'), (TRUE, 'test'); +INSERT INTO bool_test (value,f) FORMAT JSONEachRow {"value":false,"f":"test"}{"value":true,"f":"test"}{"value":0,"f":"test"}{"value":1,"f":"test"} + +SELECT value,f FROM bool_test; +SELECT value,f FROM bool_test FORMAT JSONEachRow; +SELECT toUInt64(value),f FROM bool_test; +SELECT value,f FROM bool_test where value > 0; + +set bool_true_representation='True'; +set bool_false_representation='False'; + +INSERT INTO bool_test (value,f) FORMAT CSV True,test + +INSERT INTO bool_test (value,f) FORMAT TSV False test + +SELECT value,f FROM bool_test order by value FORMAT CSV; +SELECT value,f FROM bool_test order by value FORMAT TSV; + +set bool_true_representation='Yes'; +set bool_false_representation='No'; + +INSERT INTO bool_test (value,f) FORMAT CSV Yes,test + +INSERT INTO bool_test (value,f) FORMAT TSV No test + +SELECT value,f FROM bool_test order by value FORMAT CSV; +SELECT value,f FROM bool_test order by value FORMAT TSV; + +set bool_true_representation='On'; +set bool_false_representation='Off'; + +INSERT INTO bool_test (value,f) FORMAT CSV On,test + +INSERT INTO bool_test (value,f) FORMAT TSV Off test + +SELECT value,f FROM bool_test order by value FORMAT CSV; +SELECT value,f FROM bool_test order by value FORMAT TSV; + +DROP TABLE IF EXISTS bool_test; + diff --git a/parser/testdata/02115_map_contains_analyzer/ast.json b/parser/testdata/02115_map_contains_analyzer/ast.json new file mode 100644 index 000000000..19fa43245 --- /dev/null +++ b/parser/testdata/02115_map_contains_analyzer/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_map_contains (children 1)" + }, + { + "explain": " Identifier t_map_contains" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001275533, + "rows_read": 2, + "bytes_read": 80 + } +} diff --git a/parser/testdata/02115_map_contains_analyzer/metadata.json b/parser/testdata/02115_map_contains_analyzer/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02115_map_contains_analyzer/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02115_map_contains_analyzer/query.sql b/parser/testdata/02115_map_contains_analyzer/query.sql new file mode 100644 index 000000000..002854049 --- /dev/null +++ b/parser/testdata/02115_map_contains_analyzer/query.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS t_map_contains; + +CREATE TABLE t_map_contains (m Map(String, UInt32)) ENGINE = Memory; + +INSERT INTO t_map_contains VALUES (map('a', 1, 'b', 2)), (map('c', 3, 'd', 4)); + +SET optimize_functions_to_subcolumns = 1; +SET enable_analyzer = 1; + +EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT mapContains(m, 'a') FROM t_map_contains; +SELECT mapContains(m, 'a') FROM t_map_contains; + +DROP TABLE t_map_contains; diff --git a/parser/testdata/02115_rewrite_local_join_right_distribute_table/ast.json b/parser/testdata/02115_rewrite_local_join_right_distribute_table/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02115_rewrite_local_join_right_distribute_table/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02115_rewrite_local_join_right_distribute_table/metadata.json b/parser/testdata/02115_rewrite_local_join_right_distribute_table/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02115_rewrite_local_join_right_distribute_table/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02115_rewrite_local_join_right_distribute_table/query.sql b/parser/testdata/02115_rewrite_local_join_right_distribute_table/query.sql new file mode 100644 index 000000000..d5ab82ba0 --- /dev/null +++ b/parser/testdata/02115_rewrite_local_join_right_distribute_table/query.sql @@ -0,0 +1,35 @@ +-- Tags: global, no-parallel +CREATE DATABASE IF NOT EXISTS test_02115; +USE test_02115; + +DROP TABLE IF EXISTS t1_local; +DROP TABLE IF EXISTS t2_local; +DROP TABLE IF EXISTS t1_all; +DROP TABLE IF EXISTS t2_all; + +create table t1_local(a Int32) engine=MergeTree() order by a; +create table t2_local as t1_local; + +create table t1_all as t1_local engine Distributed(test_cluster_two_shards_localhost, test_02115, t1_local, rand()); +create table t2_all as t2_local engine Distributed(test_cluster_two_shards_localhost, test_02115, t2_local, rand()); + +insert into t1_local values (1), (2), (3); +insert into t2_local values (1), (2), (3); + +set distributed_product_mode = 'local'; +select * from t1_all t1 where t1.a in (select t2.a from t2_all t2); +explain syntax select t1.* from t1_all t1 join t2_all t2 on t1.a = t2.a; +select t1.* from t1_all t1 join t2_all t2 on t1.a = t2.a ORDER BY t1.a; + +SELECT '-'; + +set distributed_product_mode = 'global'; +select * from t1_all t1 where t1.a in (select t2.a from t2_all t2); +explain syntax select t1.* from t1_all t1 join t2_all t2 on t1.a = t2.a; +select t1.* from t1_all t1 join t2_all t2 on t1.a = t2.a ORDER BY t1.a; + +DROP TABLE t1_local; +DROP TABLE t2_local; +DROP TABLE t1_all; +DROP TABLE t2_all; +DROP DATABASE test_02115; diff --git a/parser/testdata/02116_tuple_element_analyzer/ast.json b/parser/testdata/02116_tuple_element_analyzer/ast.json new file mode 100644 index 000000000..fb4e2bbe8 --- /dev/null +++ b/parser/testdata/02116_tuple_element_analyzer/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_tuple_element (children 1)" + }, + { + "explain": " Identifier t_tuple_element" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001135906, + "rows_read": 2, + "bytes_read": 82 + } +} diff --git a/parser/testdata/02116_tuple_element_analyzer/metadata.json b/parser/testdata/02116_tuple_element_analyzer/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02116_tuple_element_analyzer/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02116_tuple_element_analyzer/query.sql b/parser/testdata/02116_tuple_element_analyzer/query.sql new file mode 100644 index 000000000..ef3729bdc --- /dev/null +++ b/parser/testdata/02116_tuple_element_analyzer/query.sql @@ -0,0 +1,43 @@ +DROP TABLE IF EXISTS t_tuple_element; + +CREATE TABLE t_tuple_element(t1 Tuple(a UInt32, s String), t2 Tuple(UInt32, String)) ENGINE = Memory; +INSERT INTO t_tuple_element VALUES ((1, 'a'), (2, 'b')); + +SET optimize_functions_to_subcolumns = 1; +SET enable_analyzer = 1; + +SELECT t1.1 FROM t_tuple_element; +EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT t1.1 FROM t_tuple_element; + +SELECT tupleElement(t1, 2) FROM t_tuple_element; +EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT tupleElement(t1, 2) FROM t_tuple_element; + +SELECT tupleElement(t1, 'a') FROM t_tuple_element; +EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT tupleElement(t1, 'a') FROM t_tuple_element; + +SELECT tupleElement(number, 1) FROM numbers(1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT tupleElement(t1) FROM t_tuple_element; -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT tupleElement(t1, 'b') FROM t_tuple_element; -- { serverError NOT_FOUND_COLUMN_IN_BLOCK, UNKNOWN_IDENTIFIER } +SELECT tupleElement(t1, 0) FROM t_tuple_element; -- { serverError ARGUMENT_OUT_OF_BOUND, NOT_FOUND_COLUMN_IN_BLOCK } +SELECT tupleElement(t1, 3) FROM t_tuple_element; -- { serverError ARGUMENT_OUT_OF_BOUND, NOT_FOUND_COLUMN_IN_BLOCK } +SELECT tupleElement(t1, materialize('a')) FROM t_tuple_element; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT t2.1 FROM t_tuple_element; +EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT t2.1 FROM t_tuple_element; + +SELECT tupleElement(t2, 1) FROM t_tuple_element; +EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT tupleElement(t2, 1) FROM t_tuple_element; + +SELECT tupleElement(t2) FROM t_tuple_element; -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT tupleElement(t2, 'a') FROM t_tuple_element; -- { serverError NOT_FOUND_COLUMN_IN_BLOCK, UNKNOWN_IDENTIFIER } +SELECT tupleElement(t2, 0) FROM t_tuple_element; -- { serverError ARGUMENT_OUT_OF_BOUND, NOT_FOUND_COLUMN_IN_BLOCK } +SELECT tupleElement(t2, 3) FROM t_tuple_element; -- { serverError ARGUMENT_OUT_OF_BOUND, NOT_FOUND_COLUMN_IN_BLOCK } +SELECT tupleElement(t2, materialize(1)) FROM t_tuple_element; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +DROP TABLE t_tuple_element; + +WITH (1, 2) AS t SELECT t.1, t.2; +EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 WITH (1, 2) AS t SELECT t.1, t.2; + +WITH (1, 2)::Tuple(a UInt32, b UInt32) AS t SELECT t.1, tupleElement(t, 'b'); +EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 WITH (1, 2)::Tuple(a UInt32, b UInt32) AS t SELECT t.1, tupleElement(t, 'b'); diff --git a/parser/testdata/02117_show_create_table_system/ast.json b/parser/testdata/02117_show_create_table_system/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02117_show_create_table_system/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02117_show_create_table_system/metadata.json b/parser/testdata/02117_show_create_table_system/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02117_show_create_table_system/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02117_show_create_table_system/query.sql b/parser/testdata/02117_show_create_table_system/query.sql new file mode 100644 index 000000000..438f26dcc --- /dev/null +++ b/parser/testdata/02117_show_create_table_system/query.sql @@ -0,0 +1,84 @@ +/* we will `use system` to bypass style check, +because `show create table` statement +cannot fit the requirement in check-style, which is as + +"# Queries to: +tables_with_database_column=( + system.tables + system.parts + system.detached_parts + system.parts_columns + system.columns + system.projection_parts + system.mutations +) +# should have database = currentDatabase() condition" + + */ +use system; +show create table aggregate_function_combinators format TSVRaw; +show create table asynchronous_inserts format TSVRaw; +show create table asynchronous_metrics format TSVRaw; +show create table build_options format TSVRaw; +show create table clusters format TSVRaw; +show create table collations format TSVRaw; +show create table columns format TSVRaw; +show create table contributors format TSVRaw; +show create table current_roles format TSVRaw; +show create table data_skipping_indices format TSVRaw; +show create table data_type_families format TSVRaw; +show create table databases format TSVRaw; +show create table detached_parts format TSVRaw; +show create table dictionaries format TSVRaw; +show create table disks format TSVRaw; +show create table distributed_ddl_queue format TSVRaw; +show create table distribution_queue format TSVRaw; +show create table enabled_roles format TSVRaw; +show create table errors format TSVRaw; +show create table events format TSVRaw; +show create table formats format TSVRaw; +show create table functions format TSVRaw; +-- show create table grants format TSVRaw; -- it's updated too often, it's inconvenient to update the test +show create table graphite_retentions format TSVRaw; +show create table licenses format TSVRaw; +show create table macros format TSVRaw; +show create table merge_tree_settings format TSVRaw; +show create table merges format TSVRaw; +show create table metrics format TSVRaw; +show create table moves format TSVRaw; +show create table mutations format TSVRaw; +show create table numbers format TSVRaw; +show create table numbers_mt format TSVRaw; +show create table one format TSVRaw; +show create table part_moves_between_shards format TSVRaw; +show create table parts format TSVRaw; +show create table parts_columns format TSVRaw; +-- show create table privileges format TSVRaw; -- it's updated too often, it's inconvenient to update the test +show create table processes format TSVRaw; +show create table projection_parts format TSVRaw; +show create table projection_parts_columns format TSVRaw; +show create table quota_limits format TSVRaw; +show create table quota_usage format TSVRaw; +show create table quotas format TSVRaw; +show create table quotas_usage format TSVRaw; +show create table replicas format TSVRaw; +show create table replicated_fetches format TSVRaw; +show create table replicated_merge_tree_settings format TSVRaw; +show create table replication_queue format TSVRaw; +show create table role_grants format TSVRaw; +show create table roles format TSVRaw; +show create table row_policies format TSVRaw; +show create table settings format TSVRaw; +show create table settings_profile_elements format TSVRaw; +show create table settings_profiles format TSVRaw; +show create table stack_trace format TSVRaw; +show create table storage_policies format TSVRaw; +show create table table_engines format TSVRaw; +show create table table_functions format TSVRaw; +show create table tables format TSVRaw; +show create table time_zones format TSVRaw; +show create table user_directories format TSVRaw; +show create table users format TSVRaw; +show create table warnings format TSVRaw; +show create table zeros format TSVRaw; +show create table zeros_mt format TSVRaw; diff --git a/parser/testdata/02118_show_create_table_rocksdb/ast.json b/parser/testdata/02118_show_create_table_rocksdb/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02118_show_create_table_rocksdb/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02118_show_create_table_rocksdb/metadata.json b/parser/testdata/02118_show_create_table_rocksdb/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02118_show_create_table_rocksdb/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02118_show_create_table_rocksdb/query.sql b/parser/testdata/02118_show_create_table_rocksdb/query.sql new file mode 100644 index 000000000..98a64c4b7 --- /dev/null +++ b/parser/testdata/02118_show_create_table_rocksdb/query.sql @@ -0,0 +1,3 @@ +-- Tags: no-fasttest +-- Tag no-fasttest: In fasttest, ENABLE_LIBRARIES=0, so rocksdb engine is not enabled by default +show create table system.rocksdb; diff --git a/parser/testdata/02119_sumcount/ast.json b/parser/testdata/02119_sumcount/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02119_sumcount/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02119_sumcount/metadata.json b/parser/testdata/02119_sumcount/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02119_sumcount/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02119_sumcount/query.sql b/parser/testdata/02119_sumcount/query.sql new file mode 100644 index 000000000..dc66a822d --- /dev/null +++ b/parser/testdata/02119_sumcount/query.sql @@ -0,0 +1,173 @@ +-- Integer types are added as integers +SELECT toTypeName(sumCount(v)), sumCount(v) FROM +( + SELECT v FROM + ( + SELECT '9007199254740992'::UInt64 AS v + UNION ALL + SELECT '1'::UInt64 AS v + UNION ALL SELECT '1'::UInt64 AS v + ) + ORDER BY v +); +SELECT toTypeName(sumCount(v)), sumCount(v) FROM +( + SELECT v FROM + ( + SELECT '9007199254740992'::Nullable(UInt64) AS v + UNION ALL + SELECT '1'::Nullable(UInt64) AS v + UNION ALL + SELECT '1'::Nullable(UInt64) AS v + ) + ORDER BY v +); + +SET allow_suspicious_low_cardinality_types=1; + +SELECT toTypeName(sumCount(v)), sumCount(v) FROM +( + SELECT v FROM + ( + SELECT '9007199254740992'::LowCardinality(UInt64) AS v + UNION ALL + SELECT '1'::LowCardinality(UInt64) AS v + UNION ALL + SELECT '1'::LowCardinality(UInt64) AS v + ) + ORDER BY v +); +SELECT toTypeName(sumCount(v)), sumCount(v) FROM +( + SELECT v FROM + ( + SELECT '9007199254740992'::LowCardinality(Nullable(UInt64)) AS v + UNION ALL + SELECT '1'::LowCardinality(Nullable(UInt64)) AS v + UNION ALL + SELECT '1'::LowCardinality(Nullable(UInt64)) AS v + ) + ORDER BY v +); + +-- -- Float64 types are added as Float64 +SELECT toTypeName(sumCount(v)), sumCount(v) FROM +( + SELECT v FROM + ( + SELECT '9007199254740992'::Float64 AS v + UNION ALL + SELECT '1'::Float64 AS v + UNION ALL SELECT '1'::Float64 AS v + ) + ORDER BY v +); +SELECT toTypeName(sumCount(v)), sumCount(v) FROM +( + SELECT v FROM + ( + SELECT '9007199254740992'::Nullable(Float64) AS v + UNION ALL + SELECT '1'::Nullable(Float64) AS v + UNION ALL + SELECT '1'::Nullable(Float64) AS v + ) + ORDER BY v +); +SELECT toTypeName(sumCount(v)), sumCount(v) FROM +( + SELECT v FROM + ( + SELECT '9007199254740992'::LowCardinality(Float64) AS v + UNION ALL + SELECT '1'::LowCardinality(Float64) AS v + UNION ALL + SELECT '1'::LowCardinality(Float64) AS v + ) + ORDER BY v +); +SELECT toTypeName(sumCount(v)), sumCount(v) FROM +( + SELECT v FROM + ( + SELECT '9007199254740992'::LowCardinality(Nullable(Float64)) AS v + UNION ALL + SELECT '1'::LowCardinality(Nullable(Float64)) AS v + UNION ALL + SELECT '1'::LowCardinality(Nullable(Float64)) AS v + ) + ORDER BY v +); + +-- -- Float32 are added as Float64 +SELECT toTypeName(sumCount(v)), sumCount(v) FROM +( + SELECT v FROM + ( + SELECT '16777216'::Float32 AS v + UNION ALL + SELECT '1'::Float32 AS v + UNION ALL + SELECT '1'::Float32 AS v + ) + ORDER BY v +); +SELECT toTypeName(sumCount(v)), sumCount(v) FROM +( + SELECT v FROM + ( + SELECT '16777216'::Nullable(Float32) AS v + UNION ALL + SELECT '1'::Nullable(Float32) AS v + UNION ALL + SELECT '1'::Nullable(Float32) AS v + ) + ORDER BY v +); +SELECT toTypeName(sumCount(v)), sumCount(v) FROM +( + SELECT v FROM + ( + SELECT '16777216'::LowCardinality(Float32) AS v + UNION ALL + SELECT '1'::LowCardinality(Float32) AS v + UNION ALL + SELECT '1'::LowCardinality(Float32) AS v + ) + ORDER BY v +); +SELECT toTypeName(sumCount(v)), sumCount(v) FROM +( + SELECT v FROM + ( + SELECT '16777216'::LowCardinality(Nullable(Float32)) AS v + UNION ALL + SELECT '1'::LowCardinality(Nullable(Float32)) AS v + UNION ALL + SELECT '1'::LowCardinality(Nullable(Float32)) AS v + ) + ORDER BY v +); + +-- Small integer types use their sign/unsigned 64 byte supertype +SELECT toTypeName(sumCount(number::Int8)), sumCount(number::Int8) FROM numbers(120); +SELECT toTypeName(sumCount(number::UInt8)), sumCount(number::UInt8) FROM numbers(250); + +-- Greater integers use their own type +SELECT toTypeName(sumCount(v)), sumCount(v) FROM (SELECT '1'::Int128 AS v FROM numbers(100)); +SELECT toTypeName(sumCount(v)), sumCount(v) FROM (SELECT '1'::Int256 AS v FROM numbers(100)); +SELECT toTypeName(sumCount(v)), sumCount(v) FROM (SELECT '1'::UInt128 AS v FROM numbers(100)); +SELECT toTypeName(sumCount(v)), sumCount(v) FROM (SELECT '1'::UInt256 AS v FROM numbers(100)); + +-- Decimal types +SELECT toTypeName(sumCount(v)), sumCount(v) FROM (SELECT '1.001'::Decimal(3, 3) AS v FROM numbers(100)); + +-- Other types +SELECT toTypeName(sumCount(v)), sumCount(v) FROM (SELECT 'a'::String AS v); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toTypeName(sumCount(v)), sumCount(v) FROM (SELECT now()::DateTime AS v); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + + +-- SumCountIf +SELECT sumCountIf(n, n > 10) FROM (SELECT number AS n FROM system.numbers LIMIT 100 ); +SELECT sumCountIf(n, n > 10) FROM (SELECT toNullable(number) AS n FROM system.numbers LIMIT 100); +SELECT sumCountIf(n, n > 10) FROM (SELECT If(number % 2 == 0, number, NULL) AS n FROM system.numbers LIMIT 100); diff --git a/parser/testdata/02123_MySQLWire_regression/ast.json b/parser/testdata/02123_MySQLWire_regression/ast.json new file mode 100644 index 000000000..c7fe3383f --- /dev/null +++ b/parser/testdata/02123_MySQLWire_regression/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery table_MySQLWire (children 1)" + }, + { + "explain": " Identifier table_MySQLWire" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001215034, + "rows_read": 2, + "bytes_read": 82 + } +} diff --git a/parser/testdata/02123_MySQLWire_regression/metadata.json b/parser/testdata/02123_MySQLWire_regression/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02123_MySQLWire_regression/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02123_MySQLWire_regression/query.sql b/parser/testdata/02123_MySQLWire_regression/query.sql new file mode 100644 index 000000000..504d2f2a5 --- /dev/null +++ b/parser/testdata/02123_MySQLWire_regression/query.sql @@ -0,0 +1,6 @@ +DROP TABLE IF EXISTS table_MySQLWire; +CREATE TABLE table_MySQLWire (x UInt64) ENGINE = File(MySQLWire); +INSERT INTO table_MySQLWire SELECT number FROM numbers(10); +-- regression for not initializing serializations +INSERT INTO table_MySQLWire SELECT number FROM numbers(10); +DROP TABLE table_MySQLWire; diff --git a/parser/testdata/02124_clickhouse_dictionary_with_predefined_configuration/ast.json b/parser/testdata/02124_clickhouse_dictionary_with_predefined_configuration/ast.json new file mode 100644 index 000000000..2dd066391 --- /dev/null +++ b/parser/testdata/02124_clickhouse_dictionary_with_predefined_configuration/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery dict (children 1)" + }, + { + "explain": " Identifier dict" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001314387, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/02124_clickhouse_dictionary_with_predefined_configuration/metadata.json b/parser/testdata/02124_clickhouse_dictionary_with_predefined_configuration/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02124_clickhouse_dictionary_with_predefined_configuration/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02124_clickhouse_dictionary_with_predefined_configuration/query.sql b/parser/testdata/02124_clickhouse_dictionary_with_predefined_configuration/query.sql new file mode 100644 index 000000000..442325805 --- /dev/null +++ b/parser/testdata/02124_clickhouse_dictionary_with_predefined_configuration/query.sql @@ -0,0 +1,25 @@ +DROP DICTIONARY IF EXISTS dict; +DROP TABLE IF EXISTS s; +CREATE TABLE s +( + id UInt64, + value String +) +ENGINE = Memory; + +INSERT INTO s VALUES(1, 'OK'); + +CREATE DICTIONARY dict +( + id UInt64, + value String +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(NAME clickhouse_dictionary PORT tcpPort() DB currentDatabase())) +LIFETIME(MIN 1 MAX 1000) +LAYOUT(CACHE(SIZE_IN_CELLS 10)); + +SELECT dictGet('dict', 'value', toUInt64(1)); + +DROP DICTIONARY dict; +DROP TABLE s; diff --git a/parser/testdata/02124_comparison_betwwen_decimal_and_float/ast.json b/parser/testdata/02124_comparison_betwwen_decimal_and_float/ast.json new file mode 100644 index 000000000..2bc713482 --- /dev/null +++ b/parser/testdata/02124_comparison_betwwen_decimal_and_float/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function greater (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Float64_1" + }, + { + "explain": " Literal 'Decimal(15,2)'" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal 'Float64'" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001021426, + "rows_read": 14, + "bytes_read": 531 + } +} diff --git a/parser/testdata/02124_comparison_betwwen_decimal_and_float/metadata.json b/parser/testdata/02124_comparison_betwwen_decimal_and_float/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02124_comparison_betwwen_decimal_and_float/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02124_comparison_betwwen_decimal_and_float/query.sql b/parser/testdata/02124_comparison_betwwen_decimal_and_float/query.sql new file mode 100644 index 000000000..d8dabbab5 --- /dev/null +++ b/parser/testdata/02124_comparison_betwwen_decimal_and_float/query.sql @@ -0,0 +1,47 @@ +select CAST(1.0, 'Decimal(15,2)') > CAST(1, 'Float64'); +select CAST(1.0, 'Decimal(15,2)') = CAST(1, 'Float64'); +select CAST(1.0, 'Decimal(15,2)') < CAST(1, 'Float64'); +select CAST(1.0, 'Decimal(15,2)') != CAST(1, 'Float64'); +select CAST(1.0, 'Decimal(15,2)') > CAST(-1, 'Float64'); +select CAST(1.0, 'Decimal(15,2)') = CAST(-1, 'Float64'); +select CAST(1.0, 'Decimal(15,2)') < CAST(-1, 'Float64'); +select CAST(1.0, 'Decimal(15,2)') != CAST(-1, 'Float64'); +select CAST(1.0, 'Decimal(15,2)') > CAST(1, 'Float32'); +select CAST(1.0, 'Decimal(15,2)') = CAST(1, 'Float32'); +select CAST(1.0, 'Decimal(15,2)') < CAST(1, 'Float32'); +select CAST(1.0, 'Decimal(15,2)') != CAST(1, 'Float32'); +select CAST(1.0, 'Decimal(15,2)') > CAST(-1, 'Float32'); +select CAST(1.0, 'Decimal(15,2)') = CAST(-1, 'Float32'); +select CAST(1.0, 'Decimal(15,2)') < CAST(-1, 'Float32'); +select CAST(1.0, 'Decimal(15,2)') != CAST(-1, 'Float32'); + +SELECT toDecimal32('11.00', 2) > 1.; + +SELECT 0.1000000000000000055511151231257827021181583404541015625::Decimal256(70) = 0.1; + +DROP TABLE IF EXISTS t; + +CREATE TABLE t +( + d1 Decimal32(5), + d2 Decimal64(10), + d3 Decimal128(30), + d4 Decimal256(50), + f1 Float32, + f2 Float32 +)ENGINE = Memory; + +INSERT INTO t values (-1.5, -1.5, -1.5, -1.5, 1.5, 1.5); +INSERT INTO t values (1.5, 1.5, 1.5, 1.5, -1.5, -1.5); + +SELECT d1 > f1 FROM t ORDER BY f1; +SELECT d2 > f1 FROM t ORDER BY f1; +SELECT d3 > f1 FROM t ORDER BY f1; +SELECT d4 > f1 FROM t ORDER BY f1; + +SELECT d1 > f2 FROM t ORDER BY f2; +SELECT d2 > f2 FROM t ORDER BY f2; +SELECT d3 > f2 FROM t ORDER BY f2; +SELECT d4 > f2 FROM t ORDER BY f2; + +DROP TABLE t; diff --git a/parser/testdata/02124_empty_uuid/ast.json b/parser/testdata/02124_empty_uuid/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02124_empty_uuid/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02124_empty_uuid/metadata.json b/parser/testdata/02124_empty_uuid/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02124_empty_uuid/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02124_empty_uuid/query.sql b/parser/testdata/02124_empty_uuid/query.sql new file mode 100644 index 000000000..8dbfa3bae --- /dev/null +++ b/parser/testdata/02124_empty_uuid/query.sql @@ -0,0 +1,7 @@ +SELECT + arrayJoin([toUUID('00000000-0000-0000-0000-000000000000'), toUUID('992f6910-42b2-43cd-98bc-c812fbf9b683')]) AS x, + empty(x) AS emp; + +SELECT + arrayJoin([toUUID('992f6910-42b2-43cd-98bc-c812fbf9b683'), toUUID('00000000-0000-0000-0000-000000000000')]) AS x, + empty(x) AS emp; diff --git a/parser/testdata/02124_encrypt_decrypt_nullable/ast.json b/parser/testdata/02124_encrypt_decrypt_nullable/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02124_encrypt_decrypt_nullable/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02124_encrypt_decrypt_nullable/metadata.json b/parser/testdata/02124_encrypt_decrypt_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02124_encrypt_decrypt_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02124_encrypt_decrypt_nullable/query.sql b/parser/testdata/02124_encrypt_decrypt_nullable/query.sql new file mode 100644 index 000000000..a029b4afa --- /dev/null +++ b/parser/testdata/02124_encrypt_decrypt_nullable/query.sql @@ -0,0 +1,57 @@ +-- Tags: no-fasttest +-- Tag no-fasttest: Depends on OpenSSL + +------------------------------------------------------------------------------- +-- Validate that encrypt/decrypt (and mysql versions) work against Nullable(String). +-- null gets encrypted/decrypted as null, non-null encrypted/decrypted as usual. +------------------------------------------------------------------------------- +-- using nullIf since that is the easiest way to produce `Nullable(String)` with a `null` value + +----------------------------------------------------------------------------------------------------------------------------------- +-- MySQL compatibility +SELECT 'aes_encrypt_mysql'; + +SELECT aes_encrypt_mysql('aes-256-ecb', CAST(null as Nullable(String)), 'test_key________________________'); + +WITH 'aes-256-ecb' as mode, 'Hello World!' as plaintext, 'test_key________________________' as key +SELECT hex(aes_encrypt_mysql(mode, toNullable(plaintext), key)); + +SELECT 'aes_decrypt_mysql'; + +SELECT aes_decrypt_mysql('aes-256-ecb', CAST(null as Nullable(String)), 'test_key________________________'); + +WITH 'aes-256-ecb' as mode, unhex('D1B43643E1D0E9390E39BA4EAE150851') as ciphertext, 'test_key________________________' as key +SELECT hex(aes_decrypt_mysql(mode, toNullable(ciphertext), key)); + +----------------------------------------------------------------------------------------------------------------------------------- +-- encrypt both non-null and null values of Nullable(String) +SELECT 'encrypt'; + +WITH 'aes-256-ecb' as mode, 'test_key________________________' as key +SELECT mode, encrypt(mode, CAST(null as Nullable(String)), key); + +WITH 'aes-256-gcm' as mode, 'test_key________________________' as key, 'test_iv_____' as iv +SELECT mode, encrypt(mode, CAST(null as Nullable(String)), key, iv); + +WITH 'aes-256-ecb' as mode, 'test_key________________________' as key +SELECT mode, hex(encrypt(mode, toNullable('Hello World!'), key)); + +WITH 'aes-256-gcm' as mode, 'test_key________________________' as key, 'test_iv_____' as iv +SELECT mode, hex(encrypt(mode, toNullable('Hello World!'), key, iv)); + +----------------------------------------------------------------------------------------------------------------------------------- +-- decrypt both non-null and null values of Nullable(String) + +SELECT 'decrypt'; + +WITH 'aes-256-ecb' as mode, 'test_key________________________' as key +SELECT mode, decrypt(mode, CAST(null as Nullable(String)), key); + +WITH 'aes-256-gcm' as mode, 'test_key________________________' as key, 'test_iv_____' as iv +SELECT mode, decrypt(mode, CAST(null as Nullable(String)), key, iv); + +WITH 'aes-256-ecb' as mode, unhex('D1B43643E1D0E9390E39BA4EAE150851') as ciphertext, 'test_key________________________' as key +SELECT mode, decrypt(mode, toNullable(ciphertext), key); + +WITH 'aes-256-gcm' as mode, unhex('219E6478A1A3BB5B686DA4BAD70323F192EFEDCCBBD6F49E78A7E2F6') as ciphertext, 'test_key________________________' as key, 'test_iv_____' as iv +SELECT mode, decrypt(mode, toNullable(ciphertext), key, iv); diff --git a/parser/testdata/02124_insert_deduplication_token/ast.json b/parser/testdata/02124_insert_deduplication_token/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02124_insert_deduplication_token/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02124_insert_deduplication_token/metadata.json b/parser/testdata/02124_insert_deduplication_token/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02124_insert_deduplication_token/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02124_insert_deduplication_token/query.sql b/parser/testdata/02124_insert_deduplication_token/query.sql new file mode 100644 index 000000000..4581ef995 --- /dev/null +++ b/parser/testdata/02124_insert_deduplication_token/query.sql @@ -0,0 +1,33 @@ +-- insert data duplicates by providing deduplication token on insert + +DROP TABLE IF EXISTS insert_dedup_token SYNC; + +select 'create and check deduplication'; +CREATE TABLE insert_dedup_token ( + id Int32, val UInt32 +) ENGINE=MergeTree() ORDER BY id +SETTINGS non_replicated_deduplication_window=0xFFFFFFFF; + +select 'two inserts with exact data, one inserted, one deduplicated by data digest'; +INSERT INTO insert_dedup_token VALUES(0, 1000); +INSERT INTO insert_dedup_token VALUES(0, 1000); +SELECT * FROM insert_dedup_token ORDER BY id; + +select 'two inserts with the same dedup token, one inserted, one deduplicated by the token'; +set insert_deduplication_token = '\x61\x00\x62'; +INSERT INTO insert_dedup_token VALUES(1, 1001); +INSERT INTO insert_dedup_token VALUES(2, 1002); +SELECT * FROM insert_dedup_token ORDER BY id; + +select 'update dedup token, two inserts with the same dedup token, one inserted, one deduplicated by the token'; +set insert_deduplication_token = '\x61\x00\x63'; +INSERT INTO insert_dedup_token VALUES(1, 1001); +INSERT INTO insert_dedup_token VALUES(2, 1002); +SELECT * FROM insert_dedup_token ORDER BY id; + +select 'reset deduplication token and insert new row'; +set insert_deduplication_token = ''; +INSERT INTO insert_dedup_token VALUES(2, 1002); +SELECT * FROM insert_dedup_token ORDER BY id; + +DROP TABLE insert_dedup_token SYNC; diff --git a/parser/testdata/02124_insert_deduplication_token_materialized_views/ast.json b/parser/testdata/02124_insert_deduplication_token_materialized_views/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02124_insert_deduplication_token_materialized_views/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02124_insert_deduplication_token_materialized_views/metadata.json b/parser/testdata/02124_insert_deduplication_token_materialized_views/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02124_insert_deduplication_token_materialized_views/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02124_insert_deduplication_token_materialized_views/query.sql b/parser/testdata/02124_insert_deduplication_token_materialized_views/query.sql new file mode 100644 index 000000000..36761f9fa --- /dev/null +++ b/parser/testdata/02124_insert_deduplication_token_materialized_views/query.sql @@ -0,0 +1,93 @@ +-- Tags: long + +select 'deduplicate_blocks_in_dependent_materialized_views=0, insert_deduplication_token = no, results: test_mv_a and test_mv_c have all data, test_mv_b has data obly with max_partitions_per_insert_block=0'; + +drop table if exists test sync; +drop table if exists test_mv_a sync; +drop table if exists test_mv_b sync; +drop table if exists test_mv_c sync; + +set deduplicate_blocks_in_dependent_materialized_views=0; + +CREATE TABLE test (test String, A Int64, B Int64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_02124/{table}', '1') +ORDER BY tuple(); + +CREATE MATERIALIZED VIEW test_mv_a Engine=ReplicatedMergeTree('/clickhouse/tables/{database}/test_02124/{table}', '1') +order by tuple() AS SELECT test, A, count() c FROM test group by test, A; + +CREATE MATERIALIZED VIEW test_mv_b Engine=ReplicatedMergeTree('/clickhouse/tables/{database}/test_02124/{table}', '1') +partition by A order by tuple() AS SELECT test, A, count() c FROM test group by test, A; + +CREATE MATERIALIZED VIEW test_mv_c Engine=ReplicatedMergeTree('/clickhouse/tables/{database}/test_02124/{table}', '1') +order by tuple() AS SELECT test, A, count() c FROM test group by test, A; + +SET max_partitions_per_insert_block = 1; +INSERT INTO test SELECT 'case1', number%3, 1 FROM numbers(9) SETTINGS materialized_views_ignore_errors=1; +SET max_partitions_per_insert_block = 0; +INSERT INTO test SELECT 'case1', number%3, 1 FROM numbers(9); +INSERT INTO test SELECT 'case1', number%3, 2 FROM numbers(9); +INSERT INTO test SELECT 'case1', number%3, 2 FROM numbers(9); + +select + (select count() from test where test='case1'), + (select sum(c) from test_mv_a where test='case1'), + (select sum(c) from test_mv_b where test='case1'), + (select sum(c) from test_mv_c where test='case1'); + + +select 'deduplicate_blocks_in_dependent_materialized_views=1, insert_deduplication_token = no, results: all tables have deduplicated data'; + +set deduplicate_blocks_in_dependent_materialized_views=1; + +SET max_partitions_per_insert_block = 1; +INSERT INTO test SELECT 'case2', number%3, 1 FROM numbers(9) SETTINGS materialized_views_ignore_errors=1; +SET max_partitions_per_insert_block = 0; +INSERT INTO test SELECT 'case2', number%3, 1 FROM numbers(9); +INSERT INTO test SELECT 'case2', number%3, 2 FROM numbers(9); +INSERT INTO test SELECT 'case2', number%3, 2 FROM numbers(9); + +select + (select count() from test where test='case2'), + (select sum(c) from test_mv_a where test='case2'), + (select sum(c) from test_mv_b where test='case2'), + (select sum(c) from test_mv_c where test='case2'); + + +select 'deduplicate_blocks_in_dependent_materialized_views=0, insert_deduplication_token = yes, results: test_mv_a and test_mv_c have all data, test_mv_b has data obly with max_partitions_per_insert_block=0'; + +set deduplicate_blocks_in_dependent_materialized_views=0; + +SET max_partitions_per_insert_block = 1; +INSERT INTO test SELECT 'case3', number%3, 1 FROM numbers(9) SETTINGS insert_deduplication_token = 'case3test1', materialized_views_ignore_errors=1; +SET max_partitions_per_insert_block = 0; +INSERT INTO test SELECT 'case3', number%3, 1 FROM numbers(9) SETTINGS insert_deduplication_token = 'case3test1'; +INSERT INTO test SELECT 'case3', number%3, 2 FROM numbers(9) SETTINGS insert_deduplication_token = 'case3test2'; +INSERT INTO test SELECT 'case3', number%3, 2 FROM numbers(9) SETTINGS insert_deduplication_token = 'case3test2'; + +select + (select count() from test where test='case3'), + (select sum(c) from test_mv_a where test='case3'), + (select sum(c) from test_mv_b where test='case3'), + (select sum(c) from test_mv_c where test='case3'); + +select 'deduplicate_blocks_in_dependent_materialized_views=1, insert_deduplication_token = yes, results: all tables have deduplicated data'; + +set deduplicate_blocks_in_dependent_materialized_views=1; + +SET max_partitions_per_insert_block = 1; +INSERT INTO test SELECT 'case4', number%3, 1 FROM numbers(9) SETTINGS insert_deduplication_token = 'case4test1' ; -- { serverError TOO_MANY_PARTS } +SET max_partitions_per_insert_block = 0; +INSERT INTO test SELECT 'case4', number%3, 1 FROM numbers(9) SETTINGS insert_deduplication_token = 'case4test1'; +INSERT INTO test SELECT 'case4', number%3, 2 FROM numbers(9) SETTINGS insert_deduplication_token = 'case4test2'; +INSERT INTO test SELECT 'case4', number%3, 2 FROM numbers(9) SETTINGS insert_deduplication_token = 'case4test2'; + +select + (select count() from test where test='case4'), + (select sum(c) from test_mv_a where test='case4'), + (select sum(c) from test_mv_b where test='case4'), + (select sum(c) from test_mv_c where test='case4'); + +drop table test sync; +drop table test_mv_a sync; +drop table test_mv_b sync; +drop table test_mv_c sync; diff --git a/parser/testdata/02124_insert_deduplication_token_replica/ast.json b/parser/testdata/02124_insert_deduplication_token_replica/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02124_insert_deduplication_token_replica/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02124_insert_deduplication_token_replica/metadata.json b/parser/testdata/02124_insert_deduplication_token_replica/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02124_insert_deduplication_token_replica/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02124_insert_deduplication_token_replica/query.sql b/parser/testdata/02124_insert_deduplication_token_replica/query.sql new file mode 100644 index 000000000..82f53ecca --- /dev/null +++ b/parser/testdata/02124_insert_deduplication_token_replica/query.sql @@ -0,0 +1,56 @@ +-- insert data duplicates by providing deduplication token on insert + +DROP TABLE IF EXISTS insert_dedup_token1 SYNC; +DROP TABLE IF EXISTS insert_dedup_token2 SYNC; + +select 'create replica 1 and check deduplication'; +CREATE TABLE insert_dedup_token1 ( + id Int32, val UInt32 +) ENGINE=ReplicatedMergeTree('/clickhouse/tables/{database}/insert_dedup_token', 'r1') ORDER BY id; + +select 'two inserts with exact data, one inserted, one deduplicated by data digest'; +INSERT INTO insert_dedup_token1 VALUES(1, 1001); +INSERT INTO insert_dedup_token1 VALUES(1, 1001); +SELECT * FROM insert_dedup_token1 ORDER BY id; + +SYSTEM FLUSH LOGS system.part_log; +SELECT DISTINCT exception FROM system.part_log +WHERE table = 'insert_dedup_token1' + AND database = currentDatabase() + AND event_type = 'NewPart' + AND error = 389; + +select 'two inserts with the same dedup token, one inserted, one deduplicated by the token'; +set insert_deduplication_token = '1'; +INSERT INTO insert_dedup_token1 VALUES(1, 1001); +INSERT INTO insert_dedup_token1 VALUES(2, 1002); +SELECT * FROM insert_dedup_token1 ORDER BY id; + +select 'reset deduplication token and insert new row'; +set insert_deduplication_token = ''; +INSERT INTO insert_dedup_token1 VALUES(2, 1002); +SELECT * FROM insert_dedup_token1 ORDER BY id; + +select 'create replica 2 and check deduplication'; +CREATE TABLE insert_dedup_token2 ( + id Int32, val UInt32 +) ENGINE=ReplicatedMergeTree('/clickhouse/tables/{database}/insert_dedup_token', 'r2') ORDER BY id; +SYSTEM SYNC REPLICA insert_dedup_token2; + +select 'inserted value deduplicated by data digest, the same result as before'; +set insert_deduplication_token = ''; +INSERT INTO insert_dedup_token2 VALUES(1, 1001); -- deduplicated by data digest +SELECT * FROM insert_dedup_token2 ORDER BY id; + +select 'inserted value deduplicated by dedup token, the same result as before'; +set insert_deduplication_token = '1'; +INSERT INTO insert_dedup_token2 VALUES(3, 1003); -- deduplicated by dedup token +SELECT * FROM insert_dedup_token2 ORDER BY id; + +select 'new record inserted by providing new deduplication token'; +set insert_deduplication_token = '2'; +INSERT INTO insert_dedup_token2 VALUES(2, 1002); -- inserted +SELECT * FROM insert_dedup_token2 ORDER BY id; + +DROP TABLE insert_dedup_token1 SYNC; +DROP TABLE insert_dedup_token2 SYNC; diff --git a/parser/testdata/02124_uncompressed_cache/ast.json b/parser/testdata/02124_uncompressed_cache/ast.json new file mode 100644 index 000000000..9ed0a9fda --- /dev/null +++ b/parser/testdata/02124_uncompressed_cache/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_uncompressed_cache (children 1)" + }, + { + "explain": " Identifier t_uncompressed_cache" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001155209, + "rows_read": 2, + "bytes_read": 92 + } +} diff --git a/parser/testdata/02124_uncompressed_cache/metadata.json b/parser/testdata/02124_uncompressed_cache/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02124_uncompressed_cache/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02124_uncompressed_cache/query.sql b/parser/testdata/02124_uncompressed_cache/query.sql new file mode 100644 index 000000000..60b616b45 --- /dev/null +++ b/parser/testdata/02124_uncompressed_cache/query.sql @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS t_uncompressed_cache; + +CREATE TABLE t_uncompressed_cache(id UInt32, n UInt32) +ENGINE = MergeTree ORDER BY tuple() +SETTINGS min_bytes_for_wide_part = 0, +min_compress_block_size = 12, max_compress_block_size = 12, +index_granularity = 4; + +INSERT INTO t_uncompressed_cache SELECT number, number FROM numbers(200); + +SET max_threads = 1; + +SELECT sum(n), count() FROM t_uncompressed_cache PREWHERE id = 0 OR id = 5 OR id = 100 SETTINGS use_uncompressed_cache = 0; +SELECT sum(n), count() FROM t_uncompressed_cache PREWHERE id = 0 OR id = 5 OR id = 100 SETTINGS use_uncompressed_cache = 1; + +DROP TABLE t_uncompressed_cache; diff --git a/parser/testdata/02125_constant_if_condition_and_not_existing_column/ast.json b/parser/testdata/02125_constant_if_condition_and_not_existing_column/ast.json new file mode 100644 index 000000000..3a978f01e --- /dev/null +++ b/parser/testdata/02125_constant_if_condition_and_not_existing_column/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001034708, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/02125_constant_if_condition_and_not_existing_column/metadata.json b/parser/testdata/02125_constant_if_condition_and_not_existing_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02125_constant_if_condition_and_not_existing_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02125_constant_if_condition_and_not_existing_column/query.sql b/parser/testdata/02125_constant_if_condition_and_not_existing_column/query.sql new file mode 100644 index 000000000..822ffb197 --- /dev/null +++ b/parser/testdata/02125_constant_if_condition_and_not_existing_column/query.sql @@ -0,0 +1,15 @@ +drop table if exists test; +-- this queries does not have to pass, but they works historically +-- let's support this while can, see #31687 +create table test (x String) Engine=StripeLog; +insert into test values (0); +select if(0, y, 42) from test; +select if(1, 42, y) from test; +select if(toUInt8(0), y, 42) from test; +select if(toUInt8(0), y, 42) from test; +select if(toUInt8(1), 42, y) from test; +select if(toUInt8(1), 42, y) from test; +select if(toUInt8(toUInt8(0)), y, 42) from test; +select if(cast(cast(0, 'UInt8'), 'UInt8'), y, 42) from test; +explain syntax select x, if((select hasColumnInTable(currentDatabase(), 'test', 'y')), y, x || '_') from test; +drop table if exists t; diff --git a/parser/testdata/02125_dict_get_type_nullable_fix/ast.json b/parser/testdata/02125_dict_get_type_nullable_fix/ast.json new file mode 100644 index 000000000..c51791433 --- /dev/null +++ b/parser/testdata/02125_dict_get_type_nullable_fix/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery 02125_test_table (children 1)" + }, + { + "explain": " Identifier 02125_test_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00118513, + "rows_read": 2, + "bytes_read": 84 + } +} diff --git a/parser/testdata/02125_dict_get_type_nullable_fix/metadata.json b/parser/testdata/02125_dict_get_type_nullable_fix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02125_dict_get_type_nullable_fix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02125_dict_get_type_nullable_fix/query.sql b/parser/testdata/02125_dict_get_type_nullable_fix/query.sql new file mode 100644 index 000000000..1d08dc636 --- /dev/null +++ b/parser/testdata/02125_dict_get_type_nullable_fix/query.sql @@ -0,0 +1,22 @@ +DROP TABLE IF EXISTS 02125_test_table; +CREATE TABLE 02125_test_table +( + id UInt64, + value Nullable(String) +) +ENGINE=TinyLog; + +INSERT INTO 02125_test_table VALUES (0, 'Value'); + +DROP DICTIONARY IF EXISTS 02125_test_dictionary; +CREATE DICTIONARY 02125_test_dictionary +( + id UInt64, + value Nullable(String) +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE '02125_test_table')) +LAYOUT(DIRECT()); + +SELECT dictGet('02125_test_dictionary', 'value', toUInt64(0)); +SELECT dictGetString('02125_test_dictionary', 'value', toUInt64(0)); --{serverError TYPE_MISMATCH} diff --git a/parser/testdata/02125_fix_storage_filelog/ast.json b/parser/testdata/02125_fix_storage_filelog/ast.json new file mode 100644 index 000000000..a5d13b854 --- /dev/null +++ b/parser/testdata/02125_fix_storage_filelog/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery log (children 3)" + }, + { + "explain": " Identifier log" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration A (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function FileLog (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '\/tmp\/aaa.csv'" + }, + { + "explain": " Literal 'CSV'" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001238264, + "rows_read": 11, + "bytes_read": 382 + } +} diff --git a/parser/testdata/02125_fix_storage_filelog/metadata.json b/parser/testdata/02125_fix_storage_filelog/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02125_fix_storage_filelog/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02125_fix_storage_filelog/query.sql b/parser/testdata/02125_fix_storage_filelog/query.sql new file mode 100644 index 000000000..1ac33586b --- /dev/null +++ b/parser/testdata/02125_fix_storage_filelog/query.sql @@ -0,0 +1,3 @@ +CREATE TABLE log (A String) ENGINE= FileLog('/tmp/aaa.csv', 'CSV'); -- {serverError BAD_ARGUMENTS } +CREATE TABLE log (A String) ENGINE= FileLog('/tmp/aaa.csv', 'CSV'); -- {serverError BAD_ARGUMENTS } +CREATE TABLE log (A String) ENGINE= FileLog('/tmp/aaa.csv', 'CSV'); -- {serverError BAD_ARGUMENTS } diff --git a/parser/testdata/02125_low_cardinality_int256/ast.json b/parser/testdata/02125_low_cardinality_int256/ast.json new file mode 100644 index 000000000..9d5d7405a --- /dev/null +++ b/parser/testdata/02125_low_cardinality_int256/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toLowCardinality (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toInt256 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001431228, + "rows_read": 9, + "bytes_read": 357 + } +} diff --git a/parser/testdata/02125_low_cardinality_int256/metadata.json b/parser/testdata/02125_low_cardinality_int256/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02125_low_cardinality_int256/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02125_low_cardinality_int256/query.sql b/parser/testdata/02125_low_cardinality_int256/query.sql new file mode 100644 index 000000000..c52d0ff2c --- /dev/null +++ b/parser/testdata/02125_low_cardinality_int256/query.sql @@ -0,0 +1 @@ +SELECT toLowCardinality(toInt256(1)); diff --git a/parser/testdata/02125_query_views_log/ast.json b/parser/testdata/02125_query_views_log/ast.json new file mode 100644 index 000000000..aa7897db7 --- /dev/null +++ b/parser/testdata/02125_query_views_log/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001129859, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02125_query_views_log/metadata.json b/parser/testdata/02125_query_views_log/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02125_query_views_log/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02125_query_views_log/query.sql b/parser/testdata/02125_query_views_log/query.sql new file mode 100644 index 000000000..a62fac942 --- /dev/null +++ b/parser/testdata/02125_query_views_log/query.sql @@ -0,0 +1,18 @@ +SET output_format_pretty_single_large_number_tip_threshold = 0; + +drop table if exists src; +drop table if exists dst; +drop table if exists mv1; +drop table if exists mv2; + +create table src (key Int) engine=Null(); +create table dst (key Int) engine=Null(); +create materialized view mv1 to dst as select * from src; +create materialized view mv2 to dst as select * from src; + +insert into src select * from numbers(1e6) settings log_queries=1, max_untracked_memory=0, parallel_view_processing=0; +system flush logs query_views_log, query_log; + +-- { echo } +select view_name, read_rows, read_bytes, written_rows, written_bytes from system.query_views_log where startsWith(view_name, currentDatabase() || '.mv') order by view_name format Vertical; +select read_rows, read_bytes, written_rows, written_bytes from system.query_log where type = 'QueryFinish' and query_kind = 'Insert' and current_database = currentDatabase() format Vertical; diff --git a/parser/testdata/02125_recursive_sql_user_defined_functions/ast.json b/parser/testdata/02125_recursive_sql_user_defined_functions/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02125_recursive_sql_user_defined_functions/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02125_recursive_sql_user_defined_functions/metadata.json b/parser/testdata/02125_recursive_sql_user_defined_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02125_recursive_sql_user_defined_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02125_recursive_sql_user_defined_functions/query.sql b/parser/testdata/02125_recursive_sql_user_defined_functions/query.sql new file mode 100644 index 000000000..883ca6f9a --- /dev/null +++ b/parser/testdata/02125_recursive_sql_user_defined_functions/query.sql @@ -0,0 +1,23 @@ +-- Tags: no-parallel + +DROP FUNCTION IF EXISTS 02125_function; +CREATE FUNCTION 02125_function AS x -> 02125_function(x); +SELECT 02125_function(1); --{serverError UNSUPPORTED_METHOD}; +DROP FUNCTION 02125_function; + +DROP FUNCTION IF EXISTS 02125_function_1; +CREATE FUNCTION 02125_function_1 AS x -> 02125_function_2(x); + +DROP FUNCTION IF EXISTS 02125_function_2; +CREATE FUNCTION 02125_function_2 AS x -> 02125_function_1(x); + +SELECT 02125_function_1(1); --{serverError UNSUPPORTED_METHOD}; +SELECT 02125_function_2(2); --{serverError UNSUPPORTED_METHOD}; + +CREATE OR REPLACE FUNCTION 02125_function_2 AS x -> x + 1; + +SELECT 02125_function_1(1); +SELECT 02125_function_2(2); + +DROP FUNCTION 02125_function_1; +DROP FUNCTION 02125_function_2; diff --git a/parser/testdata/02125_transform_decimal_bug/ast.json b/parser/testdata/02125_transform_decimal_bug/ast.json new file mode 100644 index 000000000..9ca8c9886 --- /dev/null +++ b/parser/testdata/02125_transform_decimal_bug/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function transform (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal Array_[UInt64_1]" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDecimal32 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_2" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001574774, + "rows_read": 14, + "bytes_read": 544 + } +} diff --git a/parser/testdata/02125_transform_decimal_bug/metadata.json b/parser/testdata/02125_transform_decimal_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02125_transform_decimal_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02125_transform_decimal_bug/query.sql b/parser/testdata/02125_transform_decimal_bug/query.sql new file mode 100644 index 000000000..002f60076 --- /dev/null +++ b/parser/testdata/02125_transform_decimal_bug/query.sql @@ -0,0 +1,11 @@ +SELECT transform(1, [1], [toDecimal32(1, 2)]); +SELECT transform(toDecimal32(number, 2), [toDecimal32(3, 2)], [toDecimal32(30, 2)]) FROM system.numbers LIMIT 10; +SELECT transform(toDecimal32(number, 2), [toDecimal32(3, 2)], [toDecimal32(30, 2)], toDecimal32(1000, 2)) FROM system.numbers LIMIT 10; +SELECT transform(number, [3, 5, 11], [toDecimal32(30, 2), toDecimal32(50, 2), toDecimal32(70,2)], toDecimal32(1000, 2)) FROM system.numbers LIMIT 10; +SELECT transform(number, [3, 5, 11], [toDecimal32(30, 2), toDecimal32(50, 2), toDecimal32(70,2)], toDecimal32(1000, 2)) FROM system.numbers LIMIT 10; +SELECT transform(toString(number), ['3', '5', '7'], [toDecimal32(30, 2), toDecimal32(50, 2), toDecimal32(70,2)], toDecimal32(1000, 2)) FROM system.numbers LIMIT 10; + + + + + diff --git a/parser/testdata/02126_alter_table_alter_column/ast.json b/parser/testdata/02126_alter_table_alter_column/ast.json new file mode 100644 index 000000000..8ee0a65c8 --- /dev/null +++ b/parser/testdata/02126_alter_table_alter_column/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery alter_column_02126 (children 1)" + }, + { + "explain": " Identifier alter_column_02126" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001147715, + "rows_read": 2, + "bytes_read": 88 + } +} diff --git a/parser/testdata/02126_alter_table_alter_column/metadata.json b/parser/testdata/02126_alter_table_alter_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02126_alter_table_alter_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02126_alter_table_alter_column/query.sql b/parser/testdata/02126_alter_table_alter_column/query.sql new file mode 100644 index 000000000..f86d1575e --- /dev/null +++ b/parser/testdata/02126_alter_table_alter_column/query.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS alter_column_02126; +CREATE TABLE alter_column_02126 (a Int, x Int, y Int) ENGINE = MergeTree ORDER BY a; +SHOW CREATE TABLE alter_column_02126; +ALTER TABLE alter_column_02126 ALTER COLUMN x TYPE Float32; +SHOW CREATE TABLE alter_column_02126; +ALTER TABLE alter_column_02126 ALTER COLUMN x TYPE Float64, MODIFY COLUMN y Float32; +SHOW CREATE TABLE alter_column_02126; +ALTER TABLE alter_column_02126 MODIFY COLUMN y TYPE Float32; -- { clientError SYNTAX_ERROR } +ALTER TABLE alter_column_02126 ALTER COLUMN y Float32; -- { clientError SYNTAX_ERROR } diff --git a/parser/testdata/02126_identity_user_defined_function/ast.json b/parser/testdata/02126_identity_user_defined_function/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02126_identity_user_defined_function/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02126_identity_user_defined_function/metadata.json b/parser/testdata/02126_identity_user_defined_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02126_identity_user_defined_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02126_identity_user_defined_function/query.sql b/parser/testdata/02126_identity_user_defined_function/query.sql new file mode 100644 index 000000000..8a108ed21 --- /dev/null +++ b/parser/testdata/02126_identity_user_defined_function/query.sql @@ -0,0 +1,14 @@ +-- Tags: no-parallel + +DROP FUNCTION IF EXISTS 02126_function; +CREATE FUNCTION 02126_function AS x -> x; +SELECT 02126_function(1); +DROP FUNCTION 02126_function; + +CREATE FUNCTION 02126_function AS () -> x; +SELECT 02126_function(); --{ serverError UNKNOWN_IDENTIFIER } +DROP FUNCTION 02126_function; + +CREATE FUNCTION 02126_function AS () -> 5; +SELECT 02126_function(); +DROP FUNCTION 02126_function; diff --git a/parser/testdata/02126_lc_window_functions/ast.json b/parser/testdata/02126_lc_window_functions/ast.json new file mode 100644 index 000000000..c21f86a3b --- /dev/null +++ b/parser/testdata/02126_lc_window_functions/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function max (alias aid) (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier id" + }, + { + "explain": " WindowDefinition" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001367809, + "rows_read": 8, + "bytes_read": 294 + } +} diff --git a/parser/testdata/02126_lc_window_functions/metadata.json b/parser/testdata/02126_lc_window_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02126_lc_window_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02126_lc_window_functions/query.sql b/parser/testdata/02126_lc_window_functions/query.sql new file mode 100644 index 000000000..6a1fb691a --- /dev/null +++ b/parser/testdata/02126_lc_window_functions/query.sql @@ -0,0 +1,38 @@ +SELECT max(id) OVER () AS aid +FROM +( + SELECT materialize(toLowCardinality('aaaa')) AS id + FROM numbers_mt(1000000) +) +FORMAT `Null`; + +SELECT max(id) OVER (PARTITION BY id) AS id +FROM +( + SELECT materialize('aaaa') AS id + FROM numbers_mt(1000000) +) +FORMAT `Null`; + +SELECT countIf(sym = 'Red') OVER () AS res +FROM +( + SELECT CAST(CAST(number % 5, 'Enum8(\'Red\' = 0, \'Blue\' = 1, \'Yellow\' = 2, \'Black\' = 3, \'White\' = 4)'), 'LowCardinality(String)') AS sym + FROM numbers(10) +); + +SELECT materialize(toLowCardinality('a\0aa')), countIf(toLowCardinality('aaaaaaa\0aaaaaaa\0aaaaaaa\0aaaaaaa\0aaaaaaa\0aaaaaaa\0aaaaaaa\0aaaaaaa\0aaaaaaa\0aaaaaaa\0aaaaaaa\0aaaaaaa\0aaaaaaa\0aaaaaaa\0aaaaaaa\0aaaaaaa\0'), sym = 'Red') OVER (Range BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS res FROM (SELECT CAST(CAST(number % 5, 'Enum8(\'Red\' = 0, \'Blue\' = 1, \'Yellow\' = 2, \'Black\' = 3, \'White\' = 4)'), 'LowCardinality(String)') AS sym FROM numbers(3)); + +SELECT + NULL, + id, + max(id) OVER (Rows BETWEEN 10 PRECEDING AND UNBOUNDED FOLLOWING) AS aid +FROM +( + SELECT + NULL, + max(id) OVER (), + materialize(toLowCardinality('')) AS id + FROM numbers_mt(0, 1) +) +FORMAT `Null`; diff --git a/parser/testdata/02127_storage_join_settings_with_persistency/ast.json b/parser/testdata/02127_storage_join_settings_with_persistency/ast.json new file mode 100644 index 000000000..a4fd16495 --- /dev/null +++ b/parser/testdata/02127_storage_join_settings_with_persistency/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery 02127_join_settings_with_persistency_1 (children 1)" + }, + { + "explain": " Identifier 02127_join_settings_with_persistency_1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001259602, + "rows_read": 2, + "bytes_read": 128 + } +} diff --git a/parser/testdata/02127_storage_join_settings_with_persistency/metadata.json b/parser/testdata/02127_storage_join_settings_with_persistency/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02127_storage_join_settings_with_persistency/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02127_storage_join_settings_with_persistency/query.sql b/parser/testdata/02127_storage_join_settings_with_persistency/query.sql new file mode 100644 index 000000000..1dc1529eb --- /dev/null +++ b/parser/testdata/02127_storage_join_settings_with_persistency/query.sql @@ -0,0 +1,6 @@ +DROP TABLE IF EXISTS 02127_join_settings_with_persistency_1; +CREATE TABLE 02127_join_settings_with_persistency_1 (k UInt64, s String) ENGINE = Join(ANY, LEFT, k) SETTINGS persistent=1, join_any_take_last_row=0; +SHOW CREATE TABLE 02127_join_settings_with_persistency_1; +DROP TABLE IF EXISTS 02127_join_settings_with_persistency_0; +CREATE TABLE 02127_join_settings_with_persistency_0 (k UInt64, s String) ENGINE = Join(ANY, LEFT, k) SETTINGS persistent=0, join_any_take_last_row=0; +SHOW CREATE TABLE 02127_join_settings_with_persistency_0; diff --git a/parser/testdata/02128_apply_lambda_parsing/ast.json b/parser/testdata/02128_apply_lambda_parsing/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02128_apply_lambda_parsing/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02128_apply_lambda_parsing/metadata.json b/parser/testdata/02128_apply_lambda_parsing/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02128_apply_lambda_parsing/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02128_apply_lambda_parsing/query.sql b/parser/testdata/02128_apply_lambda_parsing/query.sql new file mode 100644 index 000000000..5fc809ca7 --- /dev/null +++ b/parser/testdata/02128_apply_lambda_parsing/query.sql @@ -0,0 +1,13 @@ +WITH * APPLY lambda(e); -- { clientError SYNTAX_ERROR } +SELECT * APPLY lambda(); -- { clientError SYNTAX_ERROR } +SELECT * APPLY lambda(1); -- { clientError SYNTAX_ERROR } +SELECT * APPLY lambda(x); -- { clientError SYNTAX_ERROR } +SELECT * APPLY lambda(range(1)); -- { clientError SYNTAX_ERROR } +SELECT * APPLY lambda(range(x)); -- { clientError SYNTAX_ERROR } +SELECT * APPLY lambda(1, 2); -- { clientError SYNTAX_ERROR } +SELECT * APPLY lambda(x, y); -- { clientError SYNTAX_ERROR } +SELECT * APPLY lambda((x, y), 2); -- { clientError SYNTAX_ERROR } +SELECT * APPLY lambda((x, y), x + y); -- { clientError SYNTAX_ERROR } +SELECT * APPLY lambda(tuple(1), 1); -- { clientError SYNTAX_ERROR } +SELECT * APPLY lambda(tuple(x), 1) FROM numbers(5); +SELECT * APPLY lambda(tuple(x), x + 1) FROM numbers(5); diff --git a/parser/testdata/02128_cast_nullable/ast.json b/parser/testdata/02128_cast_nullable/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02128_cast_nullable/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02128_cast_nullable/metadata.json b/parser/testdata/02128_cast_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02128_cast_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02128_cast_nullable/query.sql b/parser/testdata/02128_cast_nullable/query.sql new file mode 100644 index 000000000..fec686d79 --- /dev/null +++ b/parser/testdata/02128_cast_nullable/query.sql @@ -0,0 +1,5 @@ +-- { echo } +SELECT toUInt32OrDefault(toNullable(toUInt32(1))) SETTINGS cast_keep_nullable=1; +SELECT toUInt32OrDefault(toNullable(toUInt32(1)), toNullable(toUInt32(2))) SETTINGS cast_keep_nullable=1; +SELECT toUInt32OrDefault(toUInt32(1)) SETTINGS cast_keep_nullable=1; +SELECT toUInt32OrDefault(toUInt32(1), toUInt32(2)) SETTINGS cast_keep_nullable=1; diff --git a/parser/testdata/02128_hex_bin_on_uuid/ast.json b/parser/testdata/02128_hex_bin_on_uuid/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02128_hex_bin_on_uuid/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02128_hex_bin_on_uuid/metadata.json b/parser/testdata/02128_hex_bin_on_uuid/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02128_hex_bin_on_uuid/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02128_hex_bin_on_uuid/query.sql b/parser/testdata/02128_hex_bin_on_uuid/query.sql new file mode 100644 index 000000000..30c0c4b76 --- /dev/null +++ b/parser/testdata/02128_hex_bin_on_uuid/query.sql @@ -0,0 +1,16 @@ +-- length should be 32 +select length(hex(generateUUIDv4())); + +with generateUUIDv4() as uuid, + replace(toString(uuid), '-', '') as str1, + lower(hex(uuid)) as str2 +select str1 = str2; + +-- hex on UUID always generate 32 characters even there're leading zeros +select lower(hex(toUUID('00000000-80e7-46f8-0000-9d773a2fd319'))); + +-- length should be 128 +select length(bin(generateUUIDv4())); + +-- bin on UUID always generate 128 characters even there're leading zeros +select bin(toUUID('00000000-80e7-46f8-0000-9d773a2fd319')); \ No newline at end of file diff --git a/parser/testdata/02129_add_column_add_ttl/ast.json b/parser/testdata/02129_add_column_add_ttl/ast.json new file mode 100644 index 000000000..2146981c7 --- /dev/null +++ b/parser/testdata/02129_add_column_add_ttl/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery ttl_test_02129 (children 1)" + }, + { + "explain": " Identifier ttl_test_02129" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001073701, + "rows_read": 2, + "bytes_read": 80 + } +} diff --git a/parser/testdata/02129_add_column_add_ttl/metadata.json b/parser/testdata/02129_add_column_add_ttl/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02129_add_column_add_ttl/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02129_add_column_add_ttl/query.sql b/parser/testdata/02129_add_column_add_ttl/query.sql new file mode 100644 index 000000000..09b2138f5 --- /dev/null +++ b/parser/testdata/02129_add_column_add_ttl/query.sql @@ -0,0 +1,43 @@ +drop table if exists ttl_test_02129; + +create table ttl_test_02129(a Int64, b String, d Date) +Engine=MergeTree partition by d order by a +settings min_bytes_for_wide_part = 0, min_rows_for_wide_part = 0, materialize_ttl_recalculate_only = 0; + +system stop ttl merges ttl_test_02129; + +insert into ttl_test_02129 select number, '', '2021-01-01' from numbers(10); +alter table ttl_test_02129 add column c Int64 settings mutations_sync=2; + +insert into ttl_test_02129 select number, '', '2021-01-01', 1 from numbers(10); +alter table ttl_test_02129 modify TTL (d + INTERVAL 1 MONTH) DELETE WHERE c=1 settings mutations_sync=2; + +select * from ttl_test_02129 order by a, b, d, c; +drop table ttl_test_02129; + +drop table if exists ttl_test_02129; + +select '=========='; + +create table ttl_test_02129(a Int64, b String, d Date) +Engine=MergeTree partition by d order by a +settings min_bytes_for_wide_part = 0, min_rows_for_wide_part = 0, materialize_ttl_recalculate_only = 1; + +system stop ttl merges ttl_test_02129; + +insert into ttl_test_02129 select number, '', '2021-01-01' from numbers(10); +alter table ttl_test_02129 add column c Int64 settings mutations_sync=2, alter_sync=2; + +insert into ttl_test_02129 select number, '', '2021-01-01', 1 from numbers(10); +alter table ttl_test_02129 modify TTL (d + INTERVAL 1 MONTH) DELETE WHERE c=1 settings mutations_sync=2, alter_sync=2; + +select * from ttl_test_02129 order by a, b, d, c; + +select '=========='; + +system start ttl merges ttl_test_02129; + +optimize table ttl_test_02129 final; + +select * from ttl_test_02129 order by a, b, d, c; +drop table ttl_test_02129; diff --git a/parser/testdata/02129_window_functions_disable_optimizations/ast.json b/parser/testdata/02129_window_functions_disable_optimizations/ast.json new file mode 100644 index 000000000..c756725d9 --- /dev/null +++ b/parser/testdata/02129_window_functions_disable_optimizations/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000894436, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02129_window_functions_disable_optimizations/metadata.json b/parser/testdata/02129_window_functions_disable_optimizations/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02129_window_functions_disable_optimizations/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02129_window_functions_disable_optimizations/query.sql b/parser/testdata/02129_window_functions_disable_optimizations/query.sql new file mode 100644 index 000000000..20a4f2bcf --- /dev/null +++ b/parser/testdata/02129_window_functions_disable_optimizations/query.sql @@ -0,0 +1,32 @@ +SET optimize_rewrite_sum_if_to_count_if = 1; + +SELECT if(number % 10 = 0, 1, 0) AS dummy, +sum(dummy) OVER w +FROM numbers(10) +WINDOW w AS (ORDER BY number ASC ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW); + +SET optimize_arithmetic_operations_in_aggregate_functions=1; +SELECT + *, + if((number % 2) = 0, 0.5, 1) AS a, + 30 AS b, + sum(a * b) OVER (ORDER BY number ASC) AS s +FROM numbers(10); + +SET optimize_aggregators_of_group_by_keys=1; + +SELECT + *, + if(number = 1, 1, 0) as a, + max(a) OVER (ORDER BY number ASC) AS s +FROM numbers(10); + +SET optimize_group_by_function_keys = 1; +SELECT round(sum(log(2) * number), 6) AS k FROM numbers(10000) +GROUP BY (number % 2) * (number % 3), number % 3, number % 2 +HAVING sum(log(2) * number) > 346.57353 ORDER BY k; + +SELECT round(sum(log(2) * number), 6) AS k FROM numbers(10000) +GROUP BY (number % 2) * (number % 3), number % 3, number % 2 +HAVING sum(log(2) * number) > 346.57353 ORDER BY k +SETTINGS enable_analyzer=1; diff --git a/parser/testdata/02131_materialize_column_cast/ast.json b/parser/testdata/02131_materialize_column_cast/ast.json new file mode 100644 index 000000000..955cd7406 --- /dev/null +++ b/parser/testdata/02131_materialize_column_cast/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_materialize_column (children 1)" + }, + { + "explain": " Identifier t_materialize_column" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001091492, + "rows_read": 2, + "bytes_read": 92 + } +} diff --git a/parser/testdata/02131_materialize_column_cast/metadata.json b/parser/testdata/02131_materialize_column_cast/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02131_materialize_column_cast/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02131_materialize_column_cast/query.sql b/parser/testdata/02131_materialize_column_cast/query.sql new file mode 100644 index 000000000..3bfeaf5ba --- /dev/null +++ b/parser/testdata/02131_materialize_column_cast/query.sql @@ -0,0 +1,35 @@ +DROP TABLE IF EXISTS t_materialize_column; + +CREATE TABLE t_materialize_column (i Int32) +ENGINE = MergeTree ORDER BY i PARTITION BY i +SETTINGS min_bytes_for_wide_part = 0; + +INSERT INTO t_materialize_column VALUES (1); + +ALTER TABLE t_materialize_column ADD COLUMN s LowCardinality(String) DEFAULT toString(i); +ALTER TABLE t_materialize_column MATERIALIZE COLUMN s SETTINGS mutations_sync = 2; + +SELECT name, column, type FROM system.parts_columns +WHERE table = 't_materialize_column' AND database = currentDatabase() AND active +ORDER BY name, column; + +SELECT '==========='; + +INSERT INTO t_materialize_column (i) VALUES (2); + +SELECT name, column, type FROM system.parts_columns +WHERE table = 't_materialize_column' AND database = currentDatabase() AND active +ORDER BY name, column; + +SELECT '==========='; + +ALTER TABLE t_materialize_column ADD INDEX s_bf (s) TYPE bloom_filter(0.01) GRANULARITY 1; +ALTER TABLE t_materialize_column MATERIALIZE INDEX s_bf SETTINGS mutations_sync = 2; + +SELECT name, column, type FROM system.parts_columns +WHERE table = 't_materialize_column' AND database = currentDatabase() AND active +ORDER BY name, column; + +SELECT * FROM t_materialize_column ORDER BY i; + +DROP TABLE t_materialize_column; diff --git a/parser/testdata/02131_mv_many_chunks_bug/ast.json b/parser/testdata/02131_mv_many_chunks_bug/ast.json new file mode 100644 index 000000000..46ee77e49 --- /dev/null +++ b/parser/testdata/02131_mv_many_chunks_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001162357, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/02131_mv_many_chunks_bug/metadata.json b/parser/testdata/02131_mv_many_chunks_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02131_mv_many_chunks_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02131_mv_many_chunks_bug/query.sql b/parser/testdata/02131_mv_many_chunks_bug/query.sql new file mode 100644 index 000000000..f3f5f6a00 --- /dev/null +++ b/parser/testdata/02131_mv_many_chunks_bug/query.sql @@ -0,0 +1,16 @@ +drop table if exists t; +drop table if exists t_mv; + +create table t (x UInt64) engine = MergeTree order by x; +create materialized view t_mv engine = MergeTree order by tuple() as select uniq(x), bitAnd(x, 255) as y from t group by y; + +set max_bytes_before_external_group_by = 1000000000; +set max_bytes_ratio_before_external_group_by = 0; +set group_by_two_level_threshold = 100; +set min_insert_block_size_rows = 100; + +insert into t select number from numbers(300); +select count() from (select y from t_mv group by y); + +drop table if exists t; +drop table if exists t_mv; diff --git a/parser/testdata/02131_remove_columns_in_subquery/ast.json b/parser/testdata/02131_remove_columns_in_subquery/ast.json new file mode 100644 index 000000000..f2347534e --- /dev/null +++ b/parser/testdata/02131_remove_columns_in_subquery/ast.json @@ -0,0 +1,97 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function count (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1 (alias a)" + }, + { + "explain": " Function count (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_5" + } + ], + + "rows": 25, + + "statistics": + { + "elapsed": 0.001256833, + "rows_read": 25, + "bytes_read": 1074 + } +} diff --git a/parser/testdata/02131_remove_columns_in_subquery/metadata.json b/parser/testdata/02131_remove_columns_in_subquery/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02131_remove_columns_in_subquery/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02131_remove_columns_in_subquery/query.sql b/parser/testdata/02131_remove_columns_in_subquery/query.sql new file mode 100644 index 000000000..c765d989d --- /dev/null +++ b/parser/testdata/02131_remove_columns_in_subquery/query.sql @@ -0,0 +1,2 @@ +select count(1) from (SELECT 1 AS a, count(1) FROM numbers(5)); +select count(1) from (SELECT 1 AS a, count(1) + 1 FROM numbers(5)); \ No newline at end of file diff --git a/parser/testdata/02131_row_policies_combination/ast.json b/parser/testdata/02131_row_policies_combination/ast.json new file mode 100644 index 000000000..3e267c39a --- /dev/null +++ b/parser/testdata/02131_row_policies_combination/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery 02131_rptable (children 1)" + }, + { + "explain": " Identifier 02131_rptable" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000946975, + "rows_read": 2, + "bytes_read": 78 + } +} diff --git a/parser/testdata/02131_row_policies_combination/metadata.json b/parser/testdata/02131_row_policies_combination/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02131_row_policies_combination/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02131_row_policies_combination/query.sql b/parser/testdata/02131_row_policies_combination/query.sql new file mode 100644 index 000000000..02f2365ee --- /dev/null +++ b/parser/testdata/02131_row_policies_combination/query.sql @@ -0,0 +1,72 @@ +DROP TABLE IF EXISTS 02131_rptable; +CREATE TABLE 02131_rptable (x UInt8) ENGINE = MergeTree ORDER BY x; +INSERT INTO 02131_rptable VALUES (1), (2), (3), (4); + +DROP ROW POLICY IF EXISTS 02131_filter_1 ON 02131_rptable; +DROP ROW POLICY IF EXISTS 02131_filter_2 ON 02131_rptable; +DROP ROW POLICY IF EXISTS 02131_filter_3 ON 02131_rptable; +DROP ROW POLICY IF EXISTS 02131_filter_4 ON 02131_rptable; +DROP ROW POLICY IF EXISTS 02131_filter_5 ON 02131_rptable; + +SELECT 'None'; +SELECT * FROM 02131_rptable; + +CREATE ROW POLICY 02131_filter_1 ON 02131_rptable USING x=1 AS permissive TO ALL; +SELECT 'R1: x == 1'; +SELECT * FROM 02131_rptable; + +CREATE ROW POLICY 02131_filter_2 ON 02131_rptable USING x=2 AS permissive TO ALL; +SELECT 'R1, R2: (x == 1) OR (x == 2)'; +SELECT * FROM 02131_rptable; + +CREATE ROW POLICY 02131_filter_3 ON 02131_rptable USING x=3 AS permissive TO ALL; +SELECT 'R1, R2, R3: (x == 1) OR (x == 2) OR (x == 3)'; +SELECT * FROM 02131_rptable; + +SELECT 'R1, R2, R3 + additional_table_filters and PREWHERE: (x == 1) OR (x == 2) OR (x == 3) AND (x < 3) AND (x > 1)'; +SELECT * FROM 02131_rptable +PREWHERE x >= 2 +SETTINGS additional_table_filters = {'02131_rptable': 'x > 1'} +; + +SELECT 'R1, R2, R3 + additional_result_filter and PREWHERE: (x == 1) OR (x == 2) OR (x == 3) AND (x < 3) AND (x > 1)'; +SELECT * FROM 02131_rptable +PREWHERE x >= 2 +SETTINGS additional_result_filter = 'x > 1' +; + +SELECT 'R1, R2, R3 + additional_table_filters and WHERE: (x == 1) OR (x == 2) OR (x == 3) AND (x < 3) AND (x > 1)'; +SELECT * FROM 02131_rptable +WHERE x >= 2 +SETTINGS additional_table_filters = {'02131_rptable': 'x > 1'} +; + +CREATE ROW POLICY 02131_filter_4 ON 02131_rptable USING x<=2 AS restrictive TO ALL; +SELECT 'R1, R2, R3, R4: ((x == 1) OR (x == 2) OR (x == 3)) AND (x <= 2)'; +SELECT * FROM 02131_rptable; + +CREATE ROW POLICY 02131_filter_5 ON 02131_rptable USING x>=2 AS restrictive TO ALL; +SELECT 'R1, R2, R3, R4, R5: ((x == 1) OR (x == 2) OR (x == 3)) AND (x <= 2) AND (x >= 2)'; +SELECT * FROM 02131_rptable; + +DROP ROW POLICY 02131_filter_1 ON 02131_rptable; +SELECT 'R2, R3, R4, R5: ((x == 2) OR (x == 3)) AND (x <= 2) AND (x >= 2)'; +SELECT * FROM 02131_rptable; + +DROP ROW POLICY 02131_filter_2 ON 02131_rptable; +SELECT 'R3, R4, R5: (x == 3) AND (x <= 2) AND (x >= 2)'; +SELECT * FROM 02131_rptable; + +DROP ROW POLICY 02131_filter_3 ON 02131_rptable; +SELECT 'R4, R5: (x <= 2) AND (x >= 2)'; +SELECT * FROM 02131_rptable; + +DROP ROW POLICY 02131_filter_4 ON 02131_rptable; +SELECT 'R5: (x >= 2)'; +SELECT * FROM 02131_rptable; + +DROP ROW POLICY 02131_filter_5 ON 02131_rptable; +SELECT 'None'; +SELECT * FROM 02131_rptable; + +DROP TABLE 02131_rptable; diff --git a/parser/testdata/02131_skip_index_not_materialized/ast.json b/parser/testdata/02131_skip_index_not_materialized/ast.json new file mode 100644 index 000000000..b839a486d --- /dev/null +++ b/parser/testdata/02131_skip_index_not_materialized/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_index_non_materialized (children 1)" + }, + { + "explain": " Identifier t_index_non_materialized" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00119515, + "rows_read": 2, + "bytes_read": 100 + } +} diff --git a/parser/testdata/02131_skip_index_not_materialized/metadata.json b/parser/testdata/02131_skip_index_not_materialized/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02131_skip_index_not_materialized/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02131_skip_index_not_materialized/query.sql b/parser/testdata/02131_skip_index_not_materialized/query.sql new file mode 100644 index 000000000..cae0b1d9f --- /dev/null +++ b/parser/testdata/02131_skip_index_not_materialized/query.sql @@ -0,0 +1,12 @@ +DROP TABLE IF EXISTS t_index_non_materialized; + +CREATE TABLE t_index_non_materialized (a UInt32) ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO t_index_non_materialized VALUES (1); + +ALTER TABLE t_index_non_materialized ADD INDEX ind_set (a) TYPE set(1) GRANULARITY 1; +ALTER TABLE t_index_non_materialized ADD INDEX ind_minmax (a) TYPE minmax() GRANULARITY 1; + +SELECT count() FROM t_index_non_materialized WHERE a = 1; + +DROP TABLE t_index_non_materialized; diff --git a/parser/testdata/02131_used_row_policies_in_query_log/ast.json b/parser/testdata/02131_used_row_policies_in_query_log/ast.json new file mode 100644 index 000000000..2f01b576a --- /dev/null +++ b/parser/testdata/02131_used_row_policies_in_query_log/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery 02131_rqtable (children 1)" + }, + { + "explain": " Identifier 02131_rqtable" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001585038, + "rows_read": 2, + "bytes_read": 78 + } +} diff --git a/parser/testdata/02131_used_row_policies_in_query_log/metadata.json b/parser/testdata/02131_used_row_policies_in_query_log/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02131_used_row_policies_in_query_log/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02131_used_row_policies_in_query_log/query.sql b/parser/testdata/02131_used_row_policies_in_query_log/query.sql new file mode 100644 index 000000000..9172eecf9 --- /dev/null +++ b/parser/testdata/02131_used_row_policies_in_query_log/query.sql @@ -0,0 +1,58 @@ +DROP TABLE IF EXISTS 02131_rqtable; +CREATE TABLE 02131_rqtable (x UInt8) ENGINE = MergeTree ORDER BY x; +INSERT INTO 02131_rqtable VALUES (1), (2), (3), (4); + +DROP ROW POLICY IF EXISTS 02131_filter_1 ON 02131_rqtable; +DROP ROW POLICY IF EXISTS 02131_filter_2 ON 02131_rqtable; +DROP ROW POLICY IF EXISTS 02131_filter_3 ON 02131_rqtable; +DROP ROW POLICY IF EXISTS 02131_filter_4 ON 02131_rqtable; +DROP ROW POLICY IF EXISTS 02131_filter_5 ON 02131_rqtable; + +SELECT 'None'; +SELECT * FROM 02131_rqtable; + +CREATE ROW POLICY 02131_filter_1 ON 02131_rqtable USING x=1 AS permissive TO ALL; +SELECT 'R1: x == 1'; +SELECT * FROM 02131_rqtable; + +CREATE ROW POLICY 02131_filter_2 ON 02131_rqtable USING x=2 AS permissive TO ALL; +SELECT 'R1, R2: (x == 1) OR (x == 2)'; +SELECT * FROM 02131_rqtable; + +CREATE ROW POLICY 02131_filter_3 ON 02131_rqtable USING x=3 AS permissive TO ALL; +SELECT 'R1, R2, R3: (x == 1) OR (x == 2) OR (x == 3)'; +SELECT * FROM 02131_rqtable; + +CREATE ROW POLICY 02131_filter_4 ON 02131_rqtable USING x<=2 AS restrictive TO ALL; +SELECT 'R1, R2, R3, R4: ((x == 1) OR (x == 2) OR (x == 3)) AND (x <= 2)'; +SELECT * FROM 02131_rqtable; + +CREATE ROW POLICY 02131_filter_5 ON 02131_rqtable USING x>=2 AS restrictive TO ALL; +SELECT 'R1, R2, R3, R4, R5: ((x == 1) OR (x == 2) OR (x == 3)) AND (x <= 2) AND (x >= 2)'; +SELECT * FROM 02131_rqtable; + +DROP ROW POLICY 02131_filter_1 ON 02131_rqtable; +SELECT 'R2, R3, R4, R5: ((x == 2) OR (x == 3)) AND (x <= 2) AND (x >= 2)'; +SELECT * FROM 02131_rqtable; + +DROP ROW POLICY 02131_filter_2 ON 02131_rqtable; +SELECT 'R3, R4, R5: (x == 3) AND (x <= 2) AND (x >= 2)'; +SELECT * FROM 02131_rqtable; + +DROP ROW POLICY 02131_filter_3 ON 02131_rqtable; +SELECT 'R4, R5: (x <= 2) AND (x >= 2)'; +SELECT * FROM 02131_rqtable; + +DROP ROW POLICY 02131_filter_4 ON 02131_rqtable; +SELECT 'R5: (x >= 2)'; +SELECT * FROM 02131_rqtable; + +DROP ROW POLICY 02131_filter_5 ON 02131_rqtable; +SELECT 'None'; +SELECT * FROM 02131_rqtable; + +DROP TABLE 02131_rqtable; + +SELECT 'Check system.query_log'; +SYSTEM FLUSH LOGS query_log; +SELECT query, used_row_policies FROM system.query_log WHERE current_database == currentDatabase() AND type == 'QueryStart' AND query_kind == 'Select' ORDER BY event_time_microseconds; diff --git a/parser/testdata/02132_empty_mutation_livelock/ast.json b/parser/testdata/02132_empty_mutation_livelock/ast.json new file mode 100644 index 000000000..a48915441 --- /dev/null +++ b/parser/testdata/02132_empty_mutation_livelock/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery a8x (children 1)" + }, + { + "explain": " Identifier a8x" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001229438, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/02132_empty_mutation_livelock/metadata.json b/parser/testdata/02132_empty_mutation_livelock/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02132_empty_mutation_livelock/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02132_empty_mutation_livelock/query.sql b/parser/testdata/02132_empty_mutation_livelock/query.sql new file mode 100644 index 000000000..186199d4e --- /dev/null +++ b/parser/testdata/02132_empty_mutation_livelock/query.sql @@ -0,0 +1,12 @@ +drop table if exists a8x; + +set empty_result_for_aggregation_by_empty_set=1; +create table a8x ENGINE = MergeTree ORDER BY tuple() settings min_bytes_for_wide_part=0 as SELECT number FROM system.numbers limit 100; + +select count() from a8x; + +set mutations_sync=1; +alter table a8x update number=0 WHERE number=-3; + +select count() from a8x; +drop table if exists a8x; diff --git a/parser/testdata/02133_classification/ast.json b/parser/testdata/02133_classification/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02133_classification/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02133_classification/metadata.json b/parser/testdata/02133_classification/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02133_classification/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02133_classification/query.sql b/parser/testdata/02133_classification/query.sql new file mode 100644 index 000000000..7788ece28 --- /dev/null +++ b/parser/testdata/02133_classification/query.sql @@ -0,0 +1,23 @@ +-- Tags: no-fasttest +-- Tag no-fasttest: depends on cld2 and nlp-data + +SET allow_experimental_nlp_functions = 1; + +SELECT detectLanguage('Они сошлись. Волна и камень, Стихи и проза, лед и пламень, Не столь различны меж собой.'); +SELECT detectLanguage('Sweet are the uses of adversity which, like the toad, ugly and venomous, wears yet a precious jewel in his head.'); +SELECT detectLanguage('A vaincre sans peril, on triomphe sans gloire.'); +SELECT detectLanguage('二兎を追う者は一兎をも得ず'); +SELECT detectLanguage('有情饮水饱,无情食饭饥。'); +SELECT detectLanguage('*****///// _____ ,,,,,,,, .....'); +SELECT detectLanguageMixed('二兎を追う者は一兎をも得ず二兎を追う者は一兎をも得ず A vaincre sans peril, on triomphe sans gloire.'); +SELECT detectLanguageMixed('어디든 가치가 있는 곳으로 가려면 지름길은 없다'); +SELECT detectLanguageMixed('*****///// _____ ,,,,,,,, .....'); + +SELECT detectCharset('Plain English'); +SELECT detectLanguageUnknown('Plain English'); + +SELECT detectTonality('милая кошка'); +SELECT detectTonality('ненависть к людям'); +SELECT detectTonality('обычная прогулка по ближайшему парку'); + +SELECT detectProgrammingLanguage('#include <iostream>'); diff --git a/parser/testdata/02133_distributed_queries_formatting/ast.json b/parser/testdata/02133_distributed_queries_formatting/ast.json new file mode 100644 index 000000000..1425ee516 --- /dev/null +++ b/parser/testdata/02133_distributed_queries_formatting/ast.json @@ -0,0 +1,79 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function cluster (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier test_cluster_two_shards" + }, + { + "explain": " Function view (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'Hello' (alias all)" + }, + { + "explain": " Literal 'World' (alias distinct)" + } + ], + + "rows": 19, + + "statistics": + { + "elapsed": 0.001381018, + "rows_read": 19, + "bytes_read": 817 + } +} diff --git a/parser/testdata/02133_distributed_queries_formatting/metadata.json b/parser/testdata/02133_distributed_queries_formatting/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02133_distributed_queries_formatting/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02133_distributed_queries_formatting/query.sql b/parser/testdata/02133_distributed_queries_formatting/query.sql new file mode 100644 index 000000000..3015ddf18 --- /dev/null +++ b/parser/testdata/02133_distributed_queries_formatting/query.sql @@ -0,0 +1 @@ +SELECT * FROM cluster(test_cluster_two_shards, view(SELECT 'Hello' AS all, 'World' AS distinct)); diff --git a/parser/testdata/02133_final_prewhere_where_lowcardinality_replacing/ast.json b/parser/testdata/02133_final_prewhere_where_lowcardinality_replacing/ast.json new file mode 100644 index 000000000..351b38948 --- /dev/null +++ b/parser/testdata/02133_final_prewhere_where_lowcardinality_replacing/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery errors_local (children 1)" + }, + { + "explain": " Identifier errors_local" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001274197, + "rows_read": 2, + "bytes_read": 76 + } +} diff --git a/parser/testdata/02133_final_prewhere_where_lowcardinality_replacing/metadata.json b/parser/testdata/02133_final_prewhere_where_lowcardinality_replacing/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02133_final_prewhere_where_lowcardinality_replacing/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02133_final_prewhere_where_lowcardinality_replacing/query.sql b/parser/testdata/02133_final_prewhere_where_lowcardinality_replacing/query.sql new file mode 100644 index 000000000..a801fe086 --- /dev/null +++ b/parser/testdata/02133_final_prewhere_where_lowcardinality_replacing/query.sql @@ -0,0 +1,15 @@ +DROP TABLE IF EXISTS errors_local; + +CREATE TABLE errors_local (level LowCardinality(String)) ENGINE=ReplacingMergeTree ORDER BY level settings min_bytes_for_wide_part = '10000000'; +insert into errors_local select toString(number) from numbers(10000); + +SELECT toTypeName(level) FROM errors_local FINAL PREWHERE isNotNull(level) WHERE isNotNull(level) LIMIT 1; + +DROP TABLE errors_local; + +CREATE TABLE errors_local(level LowCardinality(String)) ENGINE=ReplacingMergeTree ORDER BY level; +insert into errors_local select toString(number) from numbers(10000); + +SELECT toTypeName(level) FROM errors_local FINAL PREWHERE isNotNull(level) WHERE isNotNull(level) LIMIT 1; + +DROP TABLE errors_local; diff --git a/parser/testdata/02133_issue_32458/ast.json b/parser/testdata/02133_issue_32458/ast.json new file mode 100644 index 000000000..06b61804f --- /dev/null +++ b/parser/testdata/02133_issue_32458/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001009777, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/02133_issue_32458/metadata.json b/parser/testdata/02133_issue_32458/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02133_issue_32458/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02133_issue_32458/query.sql b/parser/testdata/02133_issue_32458/query.sql new file mode 100644 index 000000000..16af361db --- /dev/null +++ b/parser/testdata/02133_issue_32458/query.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE TABLE t1 (`id` Int32, `key` String) ENGINE = Memory; +CREATE TABLE t2 (`id` Int32, `key` String) ENGINE = Memory; + +INSERT INTO t1 VALUES (0, ''); +INSERT INTO t2 VALUES (0, ''); + +SELECT * FROM t1 ANY INNER JOIN t2 ON ((NULL = t1.key) = t2.id) AND (('' = t1.key) = t2.id); + +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; diff --git a/parser/testdata/02136_scalar_subquery_metrics/ast.json b/parser/testdata/02136_scalar_subquery_metrics/ast.json new file mode 100644 index 000000000..0d67371de --- /dev/null +++ b/parser/testdata/02136_scalar_subquery_metrics/ast.json @@ -0,0 +1,79 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '#02136_scalar_subquery_1'" + }, + { + "explain": " Subquery (alias n) (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function max (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1000" + } + ], + + "rows": 19, + + "statistics": + { + "elapsed": 0.001109952, + "rows_read": 19, + "bytes_read": 815 + } +} diff --git a/parser/testdata/02136_scalar_subquery_metrics/metadata.json b/parser/testdata/02136_scalar_subquery_metrics/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02136_scalar_subquery_metrics/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02136_scalar_subquery_metrics/query.sql b/parser/testdata/02136_scalar_subquery_metrics/query.sql new file mode 100644 index 000000000..6e29bbb1d --- /dev/null +++ b/parser/testdata/02136_scalar_subquery_metrics/query.sql @@ -0,0 +1,13 @@ +SELECT '#02136_scalar_subquery_1', (SELECT max(number) FROM numbers(1000)) as n; +SELECT '#02136_scalar_subquery_2', (SELECT max(number) FROM numbers(1000)) as n, (SELECT min(number) FROM numbers(1000)) as n2; +SELECT '#02136_scalar_subquery_3', (SELECT max(number) FROM numbers(1000)) as n, (SELECT max(number) FROM numbers(1000)) as n2; -- Cached +SELECT '#02136_scalar_subquery_4', (SELECT max(number) FROM numbers(1000)) as n FROM system.numbers LIMIT 2; -- Cached + +SYSTEM FLUSH LOGS query_log; +SELECT read_rows, query FROM system.query_log +WHERE + event_date >= yesterday() + AND type = 'QueryFinish' + AND current_database == currentDatabase() + AND query LIKE 'SELECT ''#02136_scalar_subquery_%' +ORDER BY query ASC; diff --git a/parser/testdata/02137_mv_into_join/ast.json b/parser/testdata/02137_mv_into_join/ast.json new file mode 100644 index 000000000..6c2c1e493 --- /dev/null +++ b/parser/testdata/02137_mv_into_join/ast.json @@ -0,0 +1,70 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery main (children 3)" + }, + { + "explain": " Identifier main" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " ColumnDeclaration id (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " ColumnDeclaration color (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " ColumnDeclaration section (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " ColumnDeclaration description (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 16, + + "statistics": + { + "elapsed": 0.001304322, + "rows_read": 16, + "bytes_read": 569 + } +} diff --git a/parser/testdata/02137_mv_into_join/metadata.json b/parser/testdata/02137_mv_into_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02137_mv_into_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02137_mv_into_join/query.sql b/parser/testdata/02137_mv_into_join/query.sql new file mode 100644 index 000000000..cca896ac6 --- /dev/null +++ b/parser/testdata/02137_mv_into_join/query.sql @@ -0,0 +1,17 @@ +CREATE TABLE main ( `id` String, `color` String, `section` String, `description` String) ENGINE = MergeTree ORDER BY tuple(); +CREATE TABLE destination_join ( `key` String, `id` String, `color` String, `section` String, `description` String) ENGINE = Join(ANY, LEFT, key); +CREATE TABLE destination_set (`key` String) ENGINE = Set; + +CREATE MATERIALIZED VIEW mv_to_join TO `destination_join` AS SELECT concat(id, '_', color) AS key, * FROM main; +CREATE MATERIALIZED VIEW mv_to_set TO `destination_set` AS SELECT key FROM destination_join; + +INSERT INTO main VALUES ('sku_0001','black','women','nice shirt'); +SELECT * FROM main; +SELECT * FROM destination_join; +SELECT * FROM destination_join WHERE key in destination_set; + +DROP TABLE mv_to_set; +DROP TABLE destination_set; +DROP TABLE mv_to_join; +DROP TABLE destination_join; +DROP TABLE main; diff --git a/parser/testdata/02139_MV_with_scalar_subquery/ast.json b/parser/testdata/02139_MV_with_scalar_subquery/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02139_MV_with_scalar_subquery/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02139_MV_with_scalar_subquery/metadata.json b/parser/testdata/02139_MV_with_scalar_subquery/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02139_MV_with_scalar_subquery/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02139_MV_with_scalar_subquery/query.sql b/parser/testdata/02139_MV_with_scalar_subquery/query.sql new file mode 100644 index 000000000..63c894cfb --- /dev/null +++ b/parser/testdata/02139_MV_with_scalar_subquery/query.sql @@ -0,0 +1,24 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/9587#issuecomment-944431385 + +CREATE TABLE source (a Int32) ENGINE=MergeTree() ORDER BY tuple(); +CREATE TABLE source_null AS source ENGINE=Null; +CREATE TABLE dest_a (count UInt32, min Int32, max Int32, count_subquery Int32, min_subquery Int32, max_subquery Int32) ENGINE=MergeTree() ORDER BY tuple(); + +CREATE MATERIALIZED VIEW mv_null TO source_null AS SELECT * FROM source; +CREATE MATERIALIZED VIEW mv_a to dest_a AS +SELECT + count() AS count, + min(a) AS min, + max(a) AS max, + (SELECT count() FROM source_null) AS count_subquery, + (SELECT min(a) FROM source_null) AS min_subquery, + (SELECT max(a) FROM source_null) AS max_subquery +FROM source_null +GROUP BY count_subquery, min_subquery, max_subquery; + +SET optimize_trivial_insert_select = 1; +INSERT INTO source SELECT number FROM numbers(2000) SETTINGS min_insert_block_size_rows=1500, max_insert_block_size=1500; + +SELECT count() FROM source; +SELECT count() FROM dest_a; +SELECT * from dest_a ORDER BY count DESC; diff --git a/parser/testdata/02144_avg_ubsan/ast.json b/parser/testdata/02144_avg_ubsan/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02144_avg_ubsan/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02144_avg_ubsan/metadata.json b/parser/testdata/02144_avg_ubsan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02144_avg_ubsan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02144_avg_ubsan/query.sql b/parser/testdata/02144_avg_ubsan/query.sql new file mode 100644 index 000000000..7c5196333 --- /dev/null +++ b/parser/testdata/02144_avg_ubsan/query.sql @@ -0,0 +1,11 @@ +-- { echo } + +-- Aggregate function 'avg' allows overflow with two's complement arithmetics. +-- This contradicts the standard SQL semantic and we are totally fine with it. + +-- AggregateFunctionAvg::add +SELECT avg(-8000000000000000000) FROM (SELECT *, 1 AS k FROM numbers(65535*2)) GROUP BY k; +-- AggregateFunctionAvg::addBatchSinglePlace +SELECT avg(-8000000000000000000) FROM numbers(65535 * 2); +-- AggregateFunctionAvg::addBatchSinglePlaceNotNull +SELECT avg(toNullable(-8000000000000000000)) FROM numbers(65535 * 2); diff --git a/parser/testdata/02146_mv_non_phys/ast.json b/parser/testdata/02146_mv_non_phys/ast.json new file mode 100644 index 000000000..1f33bc703 --- /dev/null +++ b/parser/testdata/02146_mv_non_phys/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery mv_02146 (children 1)" + }, + { + "explain": " Identifier mv_02146" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000941545, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/02146_mv_non_phys/metadata.json b/parser/testdata/02146_mv_non_phys/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02146_mv_non_phys/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02146_mv_non_phys/query.sql b/parser/testdata/02146_mv_non_phys/query.sql new file mode 100644 index 000000000..4b15900fe --- /dev/null +++ b/parser/testdata/02146_mv_non_phys/query.sql @@ -0,0 +1,2 @@ +drop table if exists mv_02146; +create materialized view mv_02146 engine=MergeTree() order by number as select * from numbers(10); -- { serverError QUERY_IS_NOT_SUPPORTED_IN_MATERIALIZED_VIEW } diff --git a/parser/testdata/02148_cast_type_parsing/ast.json b/parser/testdata/02148_cast_type_parsing/ast.json new file mode 100644 index 000000000..61b71579c --- /dev/null +++ b/parser/testdata/02148_cast_type_parsing/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Tuple_(UInt64_1, 'Hello')" + }, + { + "explain": " Literal Tuple_(UInt64_2, 'World')" + }, + { + "explain": " Literal 'Array(Tuple(a UInt64, b String))'" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001109481, + "rows_read": 11, + "bytes_read": 464 + } +} diff --git a/parser/testdata/02148_cast_type_parsing/metadata.json b/parser/testdata/02148_cast_type_parsing/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02148_cast_type_parsing/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02148_cast_type_parsing/query.sql b/parser/testdata/02148_cast_type_parsing/query.sql new file mode 100644 index 000000000..f5d902360 --- /dev/null +++ b/parser/testdata/02148_cast_type_parsing/query.sql @@ -0,0 +1 @@ +SELECT CAST([(1, 'Hello'), (2, 'World')] AS Array(Tuple(a UInt64, b String))); diff --git a/parser/testdata/02148_issue_32737/ast.json b/parser/testdata/02148_issue_32737/ast.json new file mode 100644 index 000000000..2c90b3419 --- /dev/null +++ b/parser/testdata/02148_issue_32737/ast.json @@ -0,0 +1,76 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function fuzzBits (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toFixedString (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal ''" + }, + { + "explain": " Literal UInt64_200" + }, + { + "explain": " Literal Float64_0.99" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Identifier Null" + } + ], + + "rows": 18, + + "statistics": + { + "elapsed": 0.001478266, + "rows_read": 18, + "bytes_read": 692 + } +} diff --git a/parser/testdata/02148_issue_32737/metadata.json b/parser/testdata/02148_issue_32737/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02148_issue_32737/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02148_issue_32737/query.sql b/parser/testdata/02148_issue_32737/query.sql new file mode 100644 index 000000000..c8fbac457 --- /dev/null +++ b/parser/testdata/02148_issue_32737/query.sql @@ -0,0 +1,3 @@ +SELECT fuzzBits(toFixedString('', 200), 0.99) from numbers(1) FORMAT Null; +SELECT fuzzBits(toFixedString('', 200), 0.99) from numbers(128) FORMAT Null; +SELECT fuzzBits(toFixedString('', 200), 0.99) from numbers(60000) FORMAT Null; diff --git a/parser/testdata/02148_sql_user_defined_function_subquery/ast.json b/parser/testdata/02148_sql_user_defined_function_subquery/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02148_sql_user_defined_function_subquery/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02148_sql_user_defined_function_subquery/metadata.json b/parser/testdata/02148_sql_user_defined_function_subquery/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02148_sql_user_defined_function_subquery/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02148_sql_user_defined_function_subquery/query.sql b/parser/testdata/02148_sql_user_defined_function_subquery/query.sql new file mode 100644 index 000000000..cc62d1ac4 --- /dev/null +++ b/parser/testdata/02148_sql_user_defined_function_subquery/query.sql @@ -0,0 +1,35 @@ +-- Tags: no-parallel + +DROP FUNCTION IF EXISTS 02148_test_function; +CREATE FUNCTION 02148_test_function AS () -> (SELECT 1); + +SELECT 02148_test_function(); + +CREATE OR REPLACE FUNCTION 02148_test_function AS () -> (SELECT 2); + +SELECT 02148_test_function(); + +DROP FUNCTION 02148_test_function; + +CREATE FUNCTION 02148_test_function AS (x) -> (SELECT x + 1); +SELECT 02148_test_function(1); + +DROP FUNCTION IF EXISTS 02148_test_function_nested; +CREATE FUNCTION 02148_test_function_nested AS (x) -> 02148_test_function(x + 2); +SELECT 02148_test_function_nested(1); + +DROP FUNCTION 02148_test_function; +DROP FUNCTION 02148_test_function_nested; + +DROP TABLE IF EXISTS 02148_test_table; +CREATE TABLE 02148_test_table (id UInt64, value String) ENGINE=TinyLog; +INSERT INTO 02148_test_table VALUES (0, 'Value'); + +CREATE FUNCTION 02148_test_function AS () -> (SELECT * FROM 02148_test_table LIMIT 1); +SELECT 02148_test_function(); + +CREATE OR REPLACE FUNCTION 02148_test_function AS () -> (SELECT value FROM 02148_test_table LIMIT 1); +SELECT 02148_test_function(); + +DROP FUNCTION 02148_test_function; +DROP TABLE 02148_test_table; diff --git a/parser/testdata/02149_issue_32487/ast.json b/parser/testdata/02149_issue_32487/ast.json new file mode 100644 index 000000000..72a6aa44b --- /dev/null +++ b/parser/testdata/02149_issue_32487/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function topKWeightedState (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function now (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Identifier Null" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001258469, + "rows_read": 12, + "bytes_read": 432 + } +} diff --git a/parser/testdata/02149_issue_32487/metadata.json b/parser/testdata/02149_issue_32487/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02149_issue_32487/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02149_issue_32487/query.sql b/parser/testdata/02149_issue_32487/query.sql new file mode 100644 index 000000000..4e75c9817 --- /dev/null +++ b/parser/testdata/02149_issue_32487/query.sql @@ -0,0 +1 @@ +SELECT topKWeightedState(2)(now(), 1) FORMAT Null; diff --git a/parser/testdata/02149_read_in_order_fixed_prefix/ast.json b/parser/testdata/02149_read_in_order_fixed_prefix/ast.json new file mode 100644 index 000000000..97f310f5a --- /dev/null +++ b/parser/testdata/02149_read_in_order_fixed_prefix/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001125743, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02149_read_in_order_fixed_prefix/metadata.json b/parser/testdata/02149_read_in_order_fixed_prefix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02149_read_in_order_fixed_prefix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02149_read_in_order_fixed_prefix/query.sql b/parser/testdata/02149_read_in_order_fixed_prefix/query.sql new file mode 100644 index 000000000..4cc05203b --- /dev/null +++ b/parser/testdata/02149_read_in_order_fixed_prefix/query.sql @@ -0,0 +1,74 @@ +SET max_threads=0; +SET optimize_read_in_order=1; +SET optimize_trivial_insert_select = 1; +SET read_in_order_two_level_merge_threshold=100; +SET read_in_order_use_virtual_row = 1; + +DROP TABLE IF EXISTS t_read_in_order; + +CREATE TABLE t_read_in_order(date Date, i UInt64, v UInt64) +ENGINE = MergeTree ORDER BY (date, i) SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +INSERT INTO t_read_in_order SELECT '2020-10-10', number % 10, number FROM numbers(100000); +INSERT INTO t_read_in_order SELECT '2020-10-11', number % 10, number FROM numbers(100000); + +SELECT toStartOfMonth(date) as d, i FROM t_read_in_order ORDER BY d, i LIMIT 5; +EXPLAIN PIPELINE SELECT toStartOfMonth(date) as d, i FROM t_read_in_order ORDER BY d, i LIMIT 5; + +SELECT toStartOfMonth(date) as d, i FROM t_read_in_order ORDER BY d DESC, -i LIMIT 5; +EXPLAIN PIPELINE SELECT toStartOfMonth(date) as d, i FROM t_read_in_order ORDER BY d DESC, -i LIMIT 5; + +-- Here FinishSorting is used, because directions don't match. +SELECT toStartOfMonth(date) as d, i FROM t_read_in_order ORDER BY d, -i LIMIT 5; +EXPLAIN PIPELINE SELECT toStartOfMonth(date) as d, i FROM t_read_in_order ORDER BY d, -i LIMIT 5; + +SELECT date, i FROM t_read_in_order WHERE date = '2020-10-11' ORDER BY i LIMIT 5; +EXPLAIN PIPELINE SELECT date, i FROM t_read_in_order WHERE date = '2020-10-11' ORDER BY i LIMIT 5 settings enable_analyzer=0; +EXPLAIN PIPELINE SELECT date, i FROM t_read_in_order WHERE date = '2020-10-11' ORDER BY i LIMIT 5 settings enable_analyzer=1; + +SELECT * FROM t_read_in_order WHERE date = '2020-10-11' ORDER BY i, v LIMIT 5; +EXPLAIN PIPELINE SELECT * FROM t_read_in_order WHERE date = '2020-10-11' ORDER BY i, v LIMIT 5 settings enable_analyzer=0; +EXPLAIN PIPELINE SELECT * FROM t_read_in_order WHERE date = '2020-10-11' ORDER BY i, v LIMIT 5 settings enable_analyzer=1; + +INSERT INTO t_read_in_order SELECT '2020-10-12', number, number FROM numbers(100000); + +SELECT date, i FROM t_read_in_order WHERE date = '2020-10-12' ORDER BY i LIMIT 5; + +EXPLAIN SYNTAX SELECT date, i FROM t_read_in_order WHERE date = '2020-10-12' ORDER BY i DESC LIMIT 5; +EXPLAIN PIPELINE SELECT date, i FROM t_read_in_order WHERE date = '2020-10-12' ORDER BY i DESC LIMIT 5 settings enable_analyzer=0; +EXPLAIN PIPELINE SELECT date, i FROM t_read_in_order WHERE date = '2020-10-12' ORDER BY i DESC LIMIT 5 settings enable_analyzer=1; +SELECT date, i FROM t_read_in_order WHERE date = '2020-10-12' ORDER BY i DESC LIMIT 5; + +DROP TABLE IF EXISTS t_read_in_order; + +CREATE TABLE t_read_in_order(a UInt32, b UInt32) +ENGINE = MergeTree ORDER BY (a, b) +SETTINGS index_granularity = 3, index_granularity_bytes = '10Mi'; + +SYSTEM STOP MERGES t_read_in_order; + +INSERT INTO t_read_in_order VALUES (0, 100), (1, 2), (1, 3), (1, 4), (2, 5); +INSERT INTO t_read_in_order VALUES (0, 100), (1, 2), (1, 3), (1, 4), (2, 5); + +SELECT a, b FROM t_read_in_order WHERE a = 1 ORDER BY b SETTINGS read_in_order_two_level_merge_threshold = 1; +SELECT '========'; +SELECT a, b FROM t_read_in_order WHERE a = 1 ORDER BY b DESC SETTINGS read_in_order_two_level_merge_threshold = 1; + +DROP TABLE t_read_in_order; + +CREATE TABLE t_read_in_order(dt DateTime, d Decimal64(5), v UInt64) +ENGINE = MergeTree ORDER BY (toStartOfDay(dt), d) SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +INSERT INTO t_read_in_order SELECT toDateTime('2020-10-10 00:00:00') + number, 1 / (number % 100 + 1), number FROM numbers(1000); + +EXPLAIN PIPELINE SELECT toStartOfDay(dt) as date, d FROM t_read_in_order ORDER BY date, round(d) LIMIT 5; +SELECT * from ( + SELECT toStartOfDay(dt) as date, d FROM t_read_in_order ORDER BY date, round(d) LIMIT 50000000000 + -- subquery with limit 50000000 to stabilize a test result and prevent order by d pushdown +) order by d limit 5; + +EXPLAIN PIPELINE SELECT toStartOfDay(dt) as date, d FROM t_read_in_order ORDER BY date, round(d) LIMIT 5; +SELECT * from ( + SELECT toStartOfDay(dt) as date, d FROM t_read_in_order WHERE date = '2020-10-10' ORDER BY round(d) LIMIT 50000000000 + -- subquery with limit 50000000 to stabilize a test result and prevent order by d pushdown +) order by d limit 5; diff --git a/parser/testdata/02150_replace_regexp_all_empty_match/ast.json b/parser/testdata/02150_replace_regexp_all_empty_match/ast.json new file mode 100644 index 000000000..b02432895 --- /dev/null +++ b/parser/testdata/02150_replace_regexp_all_empty_match/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function replaceRegexpAll (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal 'Hello, World!'" + }, + { + "explain": " Literal '^'" + }, + { + "explain": " Literal 'here: '" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001205216, + "rows_read": 9, + "bytes_read": 331 + } +} diff --git a/parser/testdata/02150_replace_regexp_all_empty_match/metadata.json b/parser/testdata/02150_replace_regexp_all_empty_match/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02150_replace_regexp_all_empty_match/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02150_replace_regexp_all_empty_match/query.sql b/parser/testdata/02150_replace_regexp_all_empty_match/query.sql new file mode 100644 index 000000000..a7b52a1c8 --- /dev/null +++ b/parser/testdata/02150_replace_regexp_all_empty_match/query.sql @@ -0,0 +1 @@ +select replaceRegexpAll('Hello, World!', '^', 'here: '); diff --git a/parser/testdata/02151_lc_prefetch/ast.json b/parser/testdata/02151_lc_prefetch/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02151_lc_prefetch/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02151_lc_prefetch/metadata.json b/parser/testdata/02151_lc_prefetch/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02151_lc_prefetch/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02151_lc_prefetch/query.sql b/parser/testdata/02151_lc_prefetch/query.sql new file mode 100644 index 000000000..f8c760381 --- /dev/null +++ b/parser/testdata/02151_lc_prefetch/query.sql @@ -0,0 +1,8 @@ +-- Tags: no-tsan, no-asan, no-ubsan, no-msan, no-debug +drop table if exists tab_lc; +CREATE TABLE tab_lc (x UInt64, y LowCardinality(String)) engine = MergeTree order by x SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +insert into tab_lc select number, toString(number % 10) from numbers(20000000); +optimize table tab_lc; +SET max_rows_to_read = '21M'; +select count() from tab_lc where y == '0' settings local_filesystem_read_prefetch=1; +drop table if exists tab_lc; diff --git a/parser/testdata/02151_replace_regexp_all_empty_match_alternative/ast.json b/parser/testdata/02151_replace_regexp_all_empty_match_alternative/ast.json new file mode 100644 index 000000000..c64ebc241 --- /dev/null +++ b/parser/testdata/02151_replace_regexp_all_empty_match_alternative/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function replaceRegexpAll (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal ',,1,,'" + }, + { + "explain": " Literal '^[,]*|[,]*$'" + }, + { + "explain": " Literal ''" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001136945, + "rows_read": 9, + "bytes_read": 327 + } +} diff --git a/parser/testdata/02151_replace_regexp_all_empty_match_alternative/metadata.json b/parser/testdata/02151_replace_regexp_all_empty_match_alternative/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02151_replace_regexp_all_empty_match_alternative/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02151_replace_regexp_all_empty_match_alternative/query.sql b/parser/testdata/02151_replace_regexp_all_empty_match_alternative/query.sql new file mode 100644 index 000000000..ebbc6ce97 --- /dev/null +++ b/parser/testdata/02151_replace_regexp_all_empty_match_alternative/query.sql @@ -0,0 +1,21 @@ +SELECT replaceRegexpAll(',,1,,', '^[,]*|[,]*$', ''); +SELECT replaceRegexpAll(',,1', '^[,]*|[,]*$', ''); +SELECT replaceRegexpAll('1,,', '^[,]*|[,]*$', ''); + +SELECT replaceRegexpAll(materialize(',,1,,'), '^[,]*|[,]*$', ''); +SELECT replaceRegexpAll(materialize(',,1'), '^[,]*|[,]*$', ''); +SELECT replaceRegexpAll(materialize('1,,'), '^[,]*|[,]*$', ''); + +SELECT replaceRegexpAll('a', 'z*', '') == 'a'; +SELECT replaceRegexpAll('aa', 'z*', '') == 'aa'; +SELECT replaceRegexpAll('aaq', 'z*', '') == 'aaq'; +SELECT replaceRegexpAll('aazq', 'z*', '') == 'aaq'; +SELECT replaceRegexpAll('aazzq', 'z*', '') == 'aaq'; +SELECT replaceRegexpAll('aazzqa', 'z*', '') == 'aaqa'; + +SELECT replaceRegexpAll(materialize('a'), 'z*', '') == 'a'; +SELECT replaceRegexpAll(materialize('aa'), 'z*', '') == 'aa'; +SELECT replaceRegexpAll(materialize('aaq'), 'z*', '') == 'aaq'; +SELECT replaceRegexpAll(materialize('aazq'), 'z*', '') == 'aaq'; +SELECT replaceRegexpAll(materialize('aazzq'), 'z*', '') == 'aaq'; +SELECT replaceRegexpAll(materialize('aazzqa'), 'z*', '') == 'aaqa'; diff --git a/parser/testdata/02152_bool_type/ast.json b/parser/testdata/02152_bool_type/ast.json new file mode 100644 index 000000000..87d6a953b --- /dev/null +++ b/parser/testdata/02152_bool_type/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000864782, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02152_bool_type/metadata.json b/parser/testdata/02152_bool_type/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02152_bool_type/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02152_bool_type/query.sql b/parser/testdata/02152_bool_type/query.sql new file mode 100644 index 000000000..1ed3620c1 --- /dev/null +++ b/parser/testdata/02152_bool_type/query.sql @@ -0,0 +1,50 @@ +SET output_format_pretty_color=1; + +SELECT CAST('True', 'Bool'); +SELECT CAST('TrUe', 'Bool'); +SELECT CAST('true', 'Bool'); +SELECT CAST('On', 'Bool'); +SELECT CAST('on', 'Bool'); +SELECT CAST('Yes', 'Bool'); +SELECT CAST('yes', 'Bool'); +SELECT CAST('T', 'Bool'); +SELECT CAST('t', 'Bool'); +SELECT CAST('Y', 'Bool'); +SELECT CAST('y', 'Bool'); +SELECT CAST('1', 'Bool'); +SELECT CAST('enabled', 'Bool'); +SELECT CAST('enable', 'Bool'); + +SELECT CAST('False', 'Bool'); +SELECT CAST('FaLse', 'Bool'); +SELECT CAST('false', 'Bool'); +SELECT CAST('Off', 'Bool'); +SELECT CAST('off', 'Bool'); +SELECT CAST('No', 'Bool'); +SELECT CAST('no', 'Bool'); +SELECT CAST('N', 'Bool'); +SELECT CAST('n', 'Bool'); +SELECT CAST('F', 'Bool'); +SELECT CAST('f', 'Bool'); +SELECT CAST('0', 'Bool'); +SELECT CAST('disabled', 'Bool'); +SELECT CAST('disable', 'Bool'); + +SET bool_true_representation = 'Custom true'; +SET bool_false_representation = 'Custom false'; + +SELECT CAST('true', 'Bool') format CSV; +SELECT CAST('true', 'Bool') format TSV; +SELECT CAST('true', 'Bool') format Values; +SELECT ''; +SELECT CAST('true', 'Bool') format Vertical; +SELECT CAST('true', 'Bool') format Pretty; +SELECT CAST('true', 'Bool') format JSONEachRow; + +SELECT CAST(CAST(2, 'Bool'), 'UInt8'); +SELECT CAST(CAST(toUInt32(2), 'Bool'), 'UInt8'); +SELECT CAST(CAST(toInt8(2), 'Bool'), 'UInt8'); +SELECT CAST(CAST(toFloat32(2), 'Bool'), 'UInt8'); +SELECT CAST(CAST(toDecimal32(2, 2), 'Bool'), 'UInt8'); +SELECT CAST(CAST(materialize(2), 'Bool'), 'UInt8'); + diff --git a/parser/testdata/02152_count_distinct_optimization/ast.json b/parser/testdata/02152_count_distinct_optimization/ast.json new file mode 100644 index 000000000..0835f483d --- /dev/null +++ b/parser/testdata/02152_count_distinct_optimization/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery table_02152 (children 1)" + }, + { + "explain": " Identifier table_02152" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000916473, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/02152_count_distinct_optimization/metadata.json b/parser/testdata/02152_count_distinct_optimization/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02152_count_distinct_optimization/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02152_count_distinct_optimization/query.sql b/parser/testdata/02152_count_distinct_optimization/query.sql new file mode 100644 index 000000000..abb47763a --- /dev/null +++ b/parser/testdata/02152_count_distinct_optimization/query.sql @@ -0,0 +1,18 @@ +drop table if exists table_02152; + +create table table_02152 (a String, b LowCardinality(String)) engine = MergeTree order by a; +insert into table_02152 values ('a_1', 'b_1') ('a_2', 'b_2') ('a_1', 'b_3') ('a_2', 'b_2'); + +set count_distinct_optimization=true; +select countDistinct(a) from table_02152; +select countDistinct(b) from table_02152; +select uniqExact(m) from (select number, (number / 2)::UInt64 as m from numbers(10)); +select uniqExact(x) from numbers(10) group by number % 2 as x; + +set count_distinct_optimization=false; +select countDistinct(a) from table_02152; +select countDistinct(b) from table_02152; +select uniqExact(m) from (select number, (number / 2)::UInt64 as m from numbers(10)); +select uniqExact(x) from numbers(10) group by number % 2 as x; + +drop table if exists table_02152; diff --git a/parser/testdata/02152_csv_tuple/ast.json b/parser/testdata/02152_csv_tuple/ast.json new file mode 100644 index 000000000..6c26cb043 --- /dev/null +++ b/parser/testdata/02152_csv_tuple/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_02152 (children 1)" + }, + { + "explain": " Identifier test_02152" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000969386, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/02152_csv_tuple/metadata.json b/parser/testdata/02152_csv_tuple/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02152_csv_tuple/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02152_csv_tuple/query.sql b/parser/testdata/02152_csv_tuple/query.sql new file mode 100644 index 000000000..6a6c029e5 --- /dev/null +++ b/parser/testdata/02152_csv_tuple/query.sql @@ -0,0 +1,11 @@ +drop table if exists test_02152; +create table test_02152 (x UInt32, y String, z Array(UInt32), t Tuple(UInt32, String, Array(UInt32))) engine=File('CSV') settings format_csv_delimiter=';'; +insert into test_02152 select 1, 'Hello', [1,2,3], tuple(2, 'World', [4,5,6]); +select * from test_02152; +drop table test_02152; + +create table test_02152 (x UInt32, y String, z Array(UInt32), t Tuple(UInt32, String, Array(UInt32))) engine=File('CustomSeparated') settings format_custom_field_delimiter='<field_delimiter>', format_custom_row_before_delimiter='<row_start>', format_custom_row_after_delimiter='<row_end_delimiter>', format_custom_escaping_rule='CSV'; +insert into test_02152 select 1, 'Hello', [1,2,3], tuple(2, 'World', [4,5,6]); +select * from test_02152; +drop table test_02152; + diff --git a/parser/testdata/02152_dictionary_date32_type/ast.json b/parser/testdata/02152_dictionary_date32_type/ast.json new file mode 100644 index 000000000..1da3e5c2c --- /dev/null +++ b/parser/testdata/02152_dictionary_date32_type/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_table (children 1)" + }, + { + "explain": " Identifier test_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001033062, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/02152_dictionary_date32_type/metadata.json b/parser/testdata/02152_dictionary_date32_type/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02152_dictionary_date32_type/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02152_dictionary_date32_type/query.sql b/parser/testdata/02152_dictionary_date32_type/query.sql new file mode 100644 index 000000000..65547883a --- /dev/null +++ b/parser/testdata/02152_dictionary_date32_type/query.sql @@ -0,0 +1,24 @@ +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value Date32 +) ENGINE=TinyLog; + +INSERT INTO test_table VALUES (0, toDate32('2019-05-05')); + +DROP DICTIONARY IF EXISTS test_dictionary; +CREATE DICTIONARY test_dictionary +( + id UInt64, + value Date32 +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE 'test_table')) +LAYOUT(DIRECT()); + +SELECT * FROM test_dictionary; +SELECT dictGet('test_dictionary', 'value', toUInt64(0)); + +DROP DICTIONARY test_dictionary; +DROP TABLE test_table; diff --git a/parser/testdata/02152_short_circuit_throw_if/ast.json b/parser/testdata/02152_short_circuit_throw_if/ast.json new file mode 100644 index 000000000..08a9cbf13 --- /dev/null +++ b/parser/testdata/02152_short_circuit_throw_if/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function if (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Function throwIf (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal 'Executing FALSE branch'" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001064392, + "rows_read": 12, + "bytes_read": 450 + } +} diff --git a/parser/testdata/02152_short_circuit_throw_if/metadata.json b/parser/testdata/02152_short_circuit_throw_if/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02152_short_circuit_throw_if/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02152_short_circuit_throw_if/query.sql b/parser/testdata/02152_short_circuit_throw_if/query.sql new file mode 100644 index 000000000..3fdc3cc48 --- /dev/null +++ b/parser/testdata/02152_short_circuit_throw_if/query.sql @@ -0,0 +1,2 @@ +SELECT if(1, 0, throwIf(1, 'Executing FALSE branch')); +SELECT if(empty(''), 0, throwIf(1, 'Executing FALSE branch')); diff --git a/parser/testdata/02154_bit_slice_for_fixedstring/ast.json b/parser/testdata/02154_bit_slice_for_fixedstring/ast.json new file mode 100644 index 000000000..87078f77e --- /dev/null +++ b/parser/testdata/02154_bit_slice_for_fixedstring/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'Const Offset'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.000885073, + "rows_read": 5, + "bytes_read": 183 + } +} diff --git a/parser/testdata/02154_bit_slice_for_fixedstring/metadata.json b/parser/testdata/02154_bit_slice_for_fixedstring/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02154_bit_slice_for_fixedstring/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02154_bit_slice_for_fixedstring/query.sql b/parser/testdata/02154_bit_slice_for_fixedstring/query.sql new file mode 100644 index 000000000..f3e803396 --- /dev/null +++ b/parser/testdata/02154_bit_slice_for_fixedstring/query.sql @@ -0,0 +1,143 @@ +SELECT 'Const Offset'; +select 1 as offset, toFixedString('Hello', 6) as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select 2 as offset, toFixedString('Hello', 6) as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select 3 as offset, toFixedString('Hello', 6) as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select 4 as offset, toFixedString('Hello', 6) as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select 5 as offset, toFixedString('Hello', 6) as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select 6 as offset, toFixedString('Hello', 6) as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select 7 as offset, toFixedString('Hello', 6) as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select 8 as offset, toFixedString('Hello', 6) as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select 9 as offset, toFixedString('Hello', 6) as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select 10 as offset, toFixedString('Hello', 6) as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select 11 as offset, toFixedString('Hello', 6) as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select 12 as offset, toFixedString('Hello', 6) as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select 13 as offset, toFixedString('Hello', 6) as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select 14 as offset, toFixedString('Hello', 6) as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select 15 as offset, toFixedString('Hello', 6) as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select 16 as offset, toFixedString('Hello', 6) as s, subString(bin(s), offset), bin(bitSlice(s, offset)); + +select -1 as offset, toFixedString('Hello', 6) as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select -2 as offset, toFixedString('Hello', 6) as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select -3 as offset, toFixedString('Hello', 6) as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select -4 as offset, toFixedString('Hello', 6) as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select -5 as offset, toFixedString('Hello', 6) as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select -6 as offset, toFixedString('Hello', 6) as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select -7 as offset, toFixedString('Hello', 6) as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select -8 as offset, toFixedString('Hello', 6) as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select -9 as offset, toFixedString('Hello', 6) as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select -10 as offset, toFixedString('Hello', 6) as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select -11 as offset, toFixedString('Hello', 6) as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select -12 as offset, toFixedString('Hello', 6) as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select -13 as offset, toFixedString('Hello', 6) as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select -14 as offset, toFixedString('Hello', 6) as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select -15 as offset, toFixedString('Hello', 6) as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select -16 as offset, toFixedString('Hello', 6) as s, subString(bin(s), offset), bin(bitSlice(s, offset)); + +SELECT 'Const Truncate Offset'; +select 49 as offset, toFixedString('Hello', 6) as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select -49 as offset, toFixedString('Hello', 6) as s, subString(bin(s), offset), bin(bitSlice(s, offset)); + +SELECT 'Const Nullable Offset'; +select 1 as offset, null as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select null as offset, toFixedString('Hello', 6) as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select null as offset, null as s, subString(bin(s), offset), bin(bitSlice(s, offset)); + +SELECT 'Const Offset, Const Length'; +select 1 as offset, 1 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 2 as offset, 2 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 3 as offset, 3 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 4 as offset, 4 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 5 as offset, 5 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 6 as offset, 6 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 7 as offset, 7 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 8 as offset, 8 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 9 as offset, 9 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 10 as offset, 10 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 11 as offset, 11 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 12 as offset, 12 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 13 as offset, 13 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 14 as offset, 14 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 15 as offset, 15 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 16 as offset, 16 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); + +select 1 as offset, -1 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 2 as offset, -2 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 3 as offset, -3 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 4 as offset, -4 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 5 as offset, -5 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 6 as offset, -6 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 7 as offset, -7 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 8 as offset, -8 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 9 as offset, -9 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 10 as offset, -10 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 11 as offset, -11 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 12 as offset, -12 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 13 as offset, -13 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 14 as offset, -14 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 15 as offset, -15 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 16 as offset, -16 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); + +select -1 as offset, 1 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -2 as offset, 2 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -3 as offset, 3 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -4 as offset, 4 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -5 as offset, 5 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -6 as offset, 6 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -7 as offset, 7 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -8 as offset, 8 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -9 as offset, 9 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -10 as offset, 10 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -11 as offset, 11 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -12 as offset, 12 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -13 as offset, 13 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -14 as offset, 14 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -15 as offset, 15 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -16 as offset, 16 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); + +select -1 as offset, -16 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -2 as offset, -15 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -3 as offset, -14 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -4 as offset, -13 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -5 as offset, -12 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -6 as offset, -11 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -7 as offset, -10 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -8 as offset, -9 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -9 as offset, -8 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -10 as offset, -7 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -11 as offset, -6 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -12 as offset, -5 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -13 as offset, -4 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -14 as offset, -3 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -15 as offset, -2 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -16 as offset, -1 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); + +select 'Const Truncate Offset, Const Truncate Length'; +select 36 as offset, 16 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 49 as offset, 1 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -52 as offset, -44 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -49 as offset, -48 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -49 as offset, 49 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); + +select 'Const Nullable Offset, Const Nullable Length'; +select 1 as offset, 1 as length, null as s, subString(bin(s), offset , length), bin(bitSlice(s, offset, length)); +select null as offset, 1 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 1 as offset, null as length, toFixedString('Hello', 6) as s, subString(bin(s), offset , length), bin(bitSlice(s, offset, length)); +select null as offset, null as length, null as s, subString(bin(s), offset , length), bin(bitSlice(s, offset, length)); + +select 'Dynamic Offset, Dynamic Length'; +select number as offset, number as length, toFixedString('Hello', 6) as s, subString(bin(s), offset , length), bin(bitSlice(s, offset, length)) from numbers(16); +select number as offset, -number as length, toFixedString('Hello', 6) as s, subString(bin(s), offset , length), bin(bitSlice(s, offset, length)) from numbers(16); +select -number as offset, -16+number as length, toFixedString('Hello', 6) as s, subString(bin(s), offset , length), bin(bitSlice(s, offset, length)) from numbers(16); +select -number as offset, number as length, toFixedString('Hello', 6) as s, subString(bin(s), offset , length), bin(bitSlice(s, offset, length)) from numbers(16); + +select 'Dynamic Truncate Offset, Dynamic Truncate Length'; +select number-8 as offset, 8 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset , length), bin(bitSlice(s, offset, length)) from numbers(9); +select -4 as offset, number as length, toFixedString('Hello', 6) as s, subString(bin(s), offset , length), bin(bitSlice(s, offset, length)) from numbers(9); +select -44-number as offset, 8 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset , length), bin(bitSlice(s, offset, length)) from numbers(9); +select -52 as offset, number as length, toFixedString('Hello', 6) as s, subString(bin(s), offset , length), bin(bitSlice(s, offset, length)) from numbers(9); +select -52 as offset, number + 48 as length, toFixedString('Hello', 6) as s, subString(bin(s), offset , length), bin(bitSlice(s, offset, length)) from numbers(9); + +select 'Dynamic Nullable Offset, Dynamic Nullable Length'; +select if(number%4 ==1 or number%8==7, null, number) as offset, if(number%4==2 or number%8==7, null, number) as length,if(number%4 ==3, null, toFixedString('Hello', 6)) as s, + subString(bin(s), offset, length), bin(bitSlice(s, offset , length)) +from numbers(16); diff --git a/parser/testdata/02154_bit_slice_for_string/ast.json b/parser/testdata/02154_bit_slice_for_string/ast.json new file mode 100644 index 000000000..b033ed932 --- /dev/null +++ b/parser/testdata/02154_bit_slice_for_string/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'Const Offset'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001002126, + "rows_read": 5, + "bytes_read": 183 + } +} diff --git a/parser/testdata/02154_bit_slice_for_string/metadata.json b/parser/testdata/02154_bit_slice_for_string/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02154_bit_slice_for_string/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02154_bit_slice_for_string/query.sql b/parser/testdata/02154_bit_slice_for_string/query.sql new file mode 100644 index 000000000..f192301a7 --- /dev/null +++ b/parser/testdata/02154_bit_slice_for_string/query.sql @@ -0,0 +1,144 @@ +SELECT 'Const Offset'; +select 1 as offset, 'Hello' as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select 2 as offset, 'Hello' as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select 3 as offset, 'Hello' as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select 4 as offset, 'Hello' as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select 5 as offset, 'Hello' as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select 6 as offset, 'Hello' as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select 7 as offset, 'Hello' as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select 8 as offset, 'Hello' as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select 9 as offset, 'Hello' as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select 10 as offset, 'Hello' as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select 11 as offset, 'Hello' as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select 12 as offset, 'Hello' as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select 13 as offset, 'Hello' as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select 14 as offset, 'Hello' as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select 15 as offset, 'Hello' as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select 16 as offset, 'Hello' as s, subString(bin(s), offset), bin(bitSlice(s, offset)); + +select -1 as offset, 'Hello' as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select -2 as offset, 'Hello' as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select -3 as offset, 'Hello' as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select -4 as offset, 'Hello' as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select -5 as offset, 'Hello' as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select -6 as offset, 'Hello' as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select -7 as offset, 'Hello' as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select -8 as offset, 'Hello' as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select -9 as offset, 'Hello' as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select -10 as offset, 'Hello' as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select -11 as offset, 'Hello' as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select -12 as offset, 'Hello' as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select -13 as offset, 'Hello' as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select -14 as offset, 'Hello' as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select -15 as offset, 'Hello' as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select -16 as offset, 'Hello' as s, subString(bin(s), offset), bin(bitSlice(s, offset)); + + +SELECT 'Const Truncate Offset'; +select 41 as offset, 'Hello' as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select -41 as offset, 'Hello' as s, subString(bin(s), offset), bin(bitSlice(s, offset)); + +SELECT 'Const Nullable Offset'; +select 1 as offset, null as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select null as offset, 'Hello' as s, subString(bin(s), offset), bin(bitSlice(s, offset)); +select null as offset, null as s, subString(bin(s), offset), bin(bitSlice(s, offset)); + +SELECT 'Const Offset, Const Length'; +select 1 as offset, 1 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 2 as offset, 2 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 3 as offset, 3 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 4 as offset, 4 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 5 as offset, 5 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 6 as offset, 6 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 7 as offset, 7 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 8 as offset, 8 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 9 as offset, 9 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 10 as offset, 10 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 11 as offset, 11 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 12 as offset, 12 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 13 as offset, 13 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 14 as offset, 14 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 15 as offset, 15 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 16 as offset, 16 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); + +select 1 as offset, -1 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 2 as offset, -2 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 3 as offset, -3 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 4 as offset, -4 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 5 as offset, -5 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 6 as offset, -6 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 7 as offset, -7 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 8 as offset, -8 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 9 as offset, -9 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 10 as offset, -10 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 11 as offset, -11 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 12 as offset, -12 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 13 as offset, -13 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 14 as offset, -14 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 15 as offset, -15 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 16 as offset, -16 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); + +select -1 as offset, 1 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -2 as offset, 2 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -3 as offset, 3 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -4 as offset, 4 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -5 as offset, 5 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -6 as offset, 6 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -7 as offset, 7 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -8 as offset, 8 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -9 as offset, 9 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -10 as offset, 10 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -11 as offset, 11 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -12 as offset, 12 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -13 as offset, 13 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -14 as offset, 14 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -15 as offset, 15 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -16 as offset, 16 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); + +select -1 as offset, -16 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -2 as offset, -15 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -3 as offset, -14 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -4 as offset, -13 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -5 as offset, -12 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -6 as offset, -11 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -7 as offset, -10 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -8 as offset, -9 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -9 as offset, -8 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -10 as offset, -7 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -11 as offset, -6 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -12 as offset, -5 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -13 as offset, -4 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -14 as offset, -3 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -15 as offset, -2 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -16 as offset, -1 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); + +select 'Const Truncate Offset, Const Truncate Length'; +select 36 as offset, 8 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 41 as offset, 1 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -44 as offset, -36 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -41 as offset, -40 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select -41 as offset, 41 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); + +select 'Const Nullable Offset, Const Nullable Length'; +select 1 as offset, 1 as length, null as s, subString(bin(s), offset , length), bin(bitSlice(s, offset, length)); +select null as offset, 1 as length, 'Hello' as s, subString(bin(s), offset, length), bin(bitSlice(s, offset, length)); +select 1 as offset, null as length, 'Hello' as s, subString(bin(s), offset , length), bin(bitSlice(s, offset, length)); +select null as offset, null as length, null as s, subString(bin(s), offset , length), bin(bitSlice(s, offset, length)); + +select 'Dynamic Offset, Dynamic Length'; +select number as offset, number as length, 'Hello' as s, subString(bin(s), offset , length), bin(bitSlice(s, offset, length)) from numbers(16); +select number as offset, -number as length, 'Hello' as s, subString(bin(s), offset , length), bin(bitSlice(s, offset, length)) from numbers(16); +select -number as offset, -16+number as length, 'Hello' as s, subString(bin(s), offset , length), bin(bitSlice(s, offset, length)) from numbers(16); +select -number as offset, number as length, 'Hello' as s, subString(bin(s), offset , length), bin(bitSlice(s, offset, length)) from numbers(16); + +select 'Dynamic Truncate Offset, Dynamic Truncate Length'; +select number-8 as offset, 8 as length, 'Hello' as s, subString(bin(s), offset , length), bin(bitSlice(s, offset, length)) from numbers(9); +select -4 as offset, number as length, 'Hello' as s, subString(bin(s), offset , length), bin(bitSlice(s, offset, length)) from numbers(9); +select -36-number as offset, 8 as length, 'Hello' as s, subString(bin(s), offset , length), bin(bitSlice(s, offset, length)) from numbers(9); +select -44 as offset, number as length, 'Hello' as s, subString(bin(s), offset , length), bin(bitSlice(s, offset, length)) from numbers(9); +select -44 as offset, number + 40 as length, 'Hello' as s, subString(bin(s), offset , length), bin(bitSlice(s, offset, length)) from numbers(9); + +select 'Dynamic Nullable Offset, Dynamic Nullable Length'; +select if(number%4 ==1 or number%8==7, null, number) as offset, if(number%4==2 or number%8==7, null, number) as length,if(number%4 ==3, null, 'Hello') as s, + subString(bin(s), offset, length), bin(bitSlice(s, offset , length)) +from numbers(16); diff --git a/parser/testdata/02154_bitmap_contains/ast.json b/parser/testdata/02154_bitmap_contains/ast.json new file mode 100644 index 000000000..7145266da --- /dev/null +++ b/parser/testdata/02154_bitmap_contains/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function bitmapContains (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function bitmapBuild (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[UInt64_9]" + }, + { + "explain": " Literal UInt64_964291337" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001189017, + "rows_read": 10, + "bytes_read": 404 + } +} diff --git a/parser/testdata/02154_bitmap_contains/metadata.json b/parser/testdata/02154_bitmap_contains/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02154_bitmap_contains/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02154_bitmap_contains/query.sql b/parser/testdata/02154_bitmap_contains/query.sql new file mode 100644 index 000000000..3235e81e2 --- /dev/null +++ b/parser/testdata/02154_bitmap_contains/query.sql @@ -0,0 +1 @@ +select bitmapContains(bitmapBuild([9]), 964291337) diff --git a/parser/testdata/02154_default_keyword_insert/ast.json b/parser/testdata/02154_default_keyword_insert/ast.json new file mode 100644 index 000000000..1395c99ef --- /dev/null +++ b/parser/testdata/02154_default_keyword_insert/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery default_table (children 3)" + }, + { + "explain": " Identifier default_table" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " ColumnDeclaration x (children 1)" + }, + { + "explain": " DataType UInt32" + }, + { + "explain": " ColumnDeclaration y (children 2)" + }, + { + "explain": " DataType UInt32" + }, + { + "explain": " Literal UInt64_42" + }, + { + "explain": " ColumnDeclaration z (children 2)" + }, + { + "explain": " DataType UInt32" + }, + { + "explain": " Literal UInt64_33" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function Memory" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.00102865, + "rows_read": 14, + "bytes_read": 489 + } +} diff --git a/parser/testdata/02154_default_keyword_insert/metadata.json b/parser/testdata/02154_default_keyword_insert/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02154_default_keyword_insert/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02154_default_keyword_insert/query.sql b/parser/testdata/02154_default_keyword_insert/query.sql new file mode 100644 index 000000000..efccc35e6 --- /dev/null +++ b/parser/testdata/02154_default_keyword_insert/query.sql @@ -0,0 +1,7 @@ +CREATE TEMPORARY TABLE IF NOT EXISTS default_table (x UInt32, y UInt32 DEFAULT 42, z UInt32 DEFAULT 33) ENGINE = Memory; + +INSERT INTO default_table(x) values (DEFAULT); +INSERT INTO default_table(x, z) values (1, DEFAULT); +INSERT INTO default_table values (2, 33, DEFAULT); + +SELECT * FROM default_table ORDER BY x; diff --git a/parser/testdata/02155_binary_op_between_float_and_decimal/ast.json b/parser/testdata/02155_binary_op_between_float_and_decimal/ast.json new file mode 100644 index 000000000..fd807c8a1 --- /dev/null +++ b/parser/testdata/02155_binary_op_between_float_and_decimal/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function plus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '1.5'" + }, + { + "explain": " Literal 'Decimal32(5)'" + }, + { + "explain": " Literal Float64_1.5" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.000959128, + "rows_read": 11, + "bytes_read": 409 + } +} diff --git a/parser/testdata/02155_binary_op_between_float_and_decimal/metadata.json b/parser/testdata/02155_binary_op_between_float_and_decimal/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02155_binary_op_between_float_and_decimal/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02155_binary_op_between_float_and_decimal/query.sql b/parser/testdata/02155_binary_op_between_float_and_decimal/query.sql new file mode 100644 index 000000000..2e8ac3246 --- /dev/null +++ b/parser/testdata/02155_binary_op_between_float_and_decimal/query.sql @@ -0,0 +1,96 @@ +SELECT 1.5::Decimal32(5) + 1.5; +SELECT 1.5::Decimal32(5) - 1.5; +SELECT 1.5::Decimal32(5) * 1.5; +SELECT 1.5::Decimal32(5) / 1.5; + +SELECT 1.5 + 1.5::Decimal32(5); +SELECT 1.5 - 1.5::Decimal32(5); +SELECT 1.5 * 1.5::Decimal32(5); +SELECT 1.5 / 1.5::Decimal32(5); + +SELECT 1.0::Decimal32(5) / 0.0; + +SELECT least(1.5, 1.0::Decimal32(5)); +SELECT greatest(1.5, 1.0::Decimal32(5)); + +DROP TABLE IF EXISTS t; +CREATE TABLE t(d1 Decimal32(5), d2 Decimal64(10), d3 Decimal128(20), d4 Decimal256(40), f1 Float32, f2 Float64) ENGINE=Memory; + +INSERT INTO t values (-4.5, 4.5, -45.5, 45.5, 2.5, -3.5); +INSERT INTO t values (4.5, -4.5, 45.5, -45.5, -3.5, 2.5); +INSERT INTO t values (2.5, -2.5, 25.5, -25.5, -2.5, 3.5); +INSERT INTO t values (-2.5, 2.5, -25.5, 25.5, 3.5, -2.5); + +SELECT ''; +SELECT 'plus'; +SELECT d1, f1, d1 + f1 FROM t ORDER BY f1; +SELECT d2, f1, d2 + f1 FROM t ORDER BY f1; +SELECT d3, f1, d3 + f1 FROM t ORDER BY f1; +SELECT d4, f1, d4 + f1 FROM t ORDER BY f1; + +SELECT d1, f2, d1 + f2 FROM t ORDER BY f2; +SELECT d2, f2, d2 + f2 FROM t ORDER BY f2; +SELECT d3, f2, d3 + f2 FROM t ORDER BY f2; +SELECT d4, f2, d4 + f2 FROM t ORDER BY f2; + +SELECT ''; +SELECT 'minus'; +SELECT d1, f1, d1 - f1 FROM t ORDER BY f1; +SELECT d2, f1, d2 - f1 FROM t ORDER BY f1; +SELECT d3, f1, d3 - f1 FROM t ORDER BY f1; +SELECT d4, f1, d4 - f1 FROM t ORDER BY f1; + +SELECT d1, f2, d1 - f2 FROM t ORDER BY f2; +SELECT d2, f2, d2 - f2 FROM t ORDER BY f2; +SELECT d3, f2, d3 - f2 FROM t ORDER BY f2; +SELECT d4, f2, d4 - f2 FROM t ORDER BY f2; + +SELECT ''; +SELECT 'multiply'; +SELECT d1, f1, d1 * f1 FROM t ORDER BY f1; +SELECT d2, f1, d2 * f1 FROM t ORDER BY f1; +SELECT d3, f1, d3 * f1 FROM t ORDER BY f1; +SELECT d4, f1, d4 * f1 FROM t ORDER BY f1; + +SELECT d1, f2, d1 * f2 FROM t ORDER BY f2; +SELECT d2, f2, d2 * f2 FROM t ORDER BY f2; +SELECT d3, f2, d3 * f2 FROM t ORDER BY f2; +SELECT d4, f2, d4 * f2 FROM t ORDER BY f2; + +SELECT ''; +SELECT 'division'; +SELECT d1, f1, d1 / f1 FROM t ORDER BY f1; +SELECT d2, f1, d2 / f1 FROM t ORDER BY f1; +SELECT d3, f1, d3 / f1 FROM t ORDER BY f1; +SELECT d4, f1, d4 / f1 FROM t ORDER BY f1; + +SELECT d1, f2, d1 / f2 FROM t ORDER BY f2; +SELECT d2, f2, d2 / f2 FROM t ORDER BY f2; +SELECT d3, f2, d3 / f2 FROM t ORDER BY f2; +SELECT d4, f2, d4 / f2 FROM t ORDER BY f2; + +SELECT ''; +SELECT 'least'; +SELECT d1, f1, least(d1, f1) FROM t ORDER BY f1; +SELECT d2, f1, least(d2, f1) FROM t ORDER BY f1; +SELECT d3, f1, least(d3, f1) FROM t ORDER BY f1; +SELECT d4, f1, least(d4, f1) FROM t ORDER BY f1; + +SELECT d1, f2, least(d1, f2) FROM t ORDER BY f2; +SELECT d2, f2, least(d2, f2) FROM t ORDER BY f2; +SELECT d3, f2, least(d3, f2) FROM t ORDER BY f2; +SELECT d4, f2, least(d4, f2) FROM t ORDER BY f2; + +SELECT ''; +SELECT 'greatest'; +SELECT d1, f1, greatest(d1, f1) FROM t ORDER BY f1; +SELECT d2, f1, greatest(d2, f1) FROM t ORDER BY f1; +SELECT d3, f1, greatest(d3, f1) FROM t ORDER BY f1; +SELECT d4, f1, greatest(d4, f1) FROM t ORDER BY f1; + +SELECT d1, f2, greatest(d1, f2) FROM t ORDER BY f2; +SELECT d2, f2, greatest(d2, f2) FROM t ORDER BY f2; +SELECT d3, f2, greatest(d3, f2) FROM t ORDER BY f2; +SELECT d4, f2, greatest(d4, f2) FROM t ORDER BY f2; + +DROP TABLE t; diff --git a/parser/testdata/02155_create_table_w_timezone/ast.json b/parser/testdata/02155_create_table_w_timezone/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02155_create_table_w_timezone/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02155_create_table_w_timezone/metadata.json b/parser/testdata/02155_create_table_w_timezone/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02155_create_table_w_timezone/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02155_create_table_w_timezone/query.sql b/parser/testdata/02155_create_table_w_timezone/query.sql new file mode 100644 index 000000000..015efe3b6 --- /dev/null +++ b/parser/testdata/02155_create_table_w_timezone/query.sql @@ -0,0 +1,8 @@ +create table t02155_t64_tz ( a DateTime64(9, America/Chicago)) Engine = Memory; -- { clientError SYNTAX_ERROR } +create table t02155_t_tz ( a DateTime(America/Chicago)) Engine = Memory; -- { clientError SYNTAX_ERROR } + +create table t02155_t64_tz ( a DateTime64(9, 'America/Chicago')) Engine = Memory; +create table t02155_t_tz ( a DateTime('America/Chicago')) Engine = Memory; + +drop table t02155_t64_tz; +drop table t02155_t_tz; diff --git a/parser/testdata/02155_dictionary_comment/ast.json b/parser/testdata/02155_dictionary_comment/ast.json new file mode 100644 index 000000000..3aa859e2b --- /dev/null +++ b/parser/testdata/02155_dictionary_comment/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery 02155_test_table (children 1)" + }, + { + "explain": " Identifier 02155_test_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.0010656, + "rows_read": 2, + "bytes_read": 84 + } +} diff --git a/parser/testdata/02155_dictionary_comment/metadata.json b/parser/testdata/02155_dictionary_comment/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02155_dictionary_comment/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02155_dictionary_comment/query.sql b/parser/testdata/02155_dictionary_comment/query.sql new file mode 100644 index 000000000..8ebc7b259 --- /dev/null +++ b/parser/testdata/02155_dictionary_comment/query.sql @@ -0,0 +1,53 @@ +DROP TABLE IF EXISTS 02155_test_table; +CREATE TABLE 02155_test_table +( + id UInt64, + value String +) ENGINE=TinyLog; + +INSERT INTO 02155_test_table VALUES (0, 'Value'); + +DROP DICTIONARY IF EXISTS 02155_test_dictionary; +CREATE DICTIONARY 02155_test_dictionary +( + id UInt64, + value String +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE '02155_test_table')) +LAYOUT(DIRECT()); + +SELECT name, comment FROM system.dictionaries WHERE name == '02155_test_dictionary' AND database == currentDatabase(); + +ALTER TABLE 02155_test_dictionary COMMENT COLUMN value 'value_column'; --{serverError NOT_IMPLEMENTED} + +ALTER TABLE 02155_test_dictionary MODIFY COMMENT '02155_test_dictionary_comment_0'; +SELECT name, comment FROM system.dictionaries WHERE name == '02155_test_dictionary' AND database == currentDatabase(); +SELECT name, comment FROM system.tables WHERE name == '02155_test_dictionary' AND database == currentDatabase(); + +SELECT * FROM 02155_test_dictionary; +SELECT name, comment FROM system.dictionaries WHERE name == '02155_test_dictionary' AND database == currentDatabase(); +SELECT name, comment FROM system.tables WHERE name == '02155_test_dictionary' AND database == currentDatabase(); + +ALTER TABLE 02155_test_dictionary MODIFY COMMENT '02155_test_dictionary_comment_1'; +SELECT name, comment FROM system.dictionaries WHERE name == '02155_test_dictionary' AND database == currentDatabase(); +SELECT name, comment FROM system.tables WHERE name == '02155_test_dictionary' AND database == currentDatabase(); + +DROP TABLE IF EXISTS 02155_test_dictionary_view; +CREATE TABLE 02155_test_dictionary_view +( + id UInt64, + value String +) ENGINE=Dictionary(concat(currentDatabase(), '.02155_test_dictionary')); + +SELECT * FROM 02155_test_dictionary_view; + +ALTER TABLE 02155_test_dictionary_view COMMENT COLUMN value 'value_column'; --{serverError NOT_IMPLEMENTED} + +ALTER TABLE 02155_test_dictionary_view MODIFY COMMENT '02155_test_dictionary_view_comment_0'; +SELECT name, comment FROM system.tables WHERE name == '02155_test_dictionary_view' AND database == currentDatabase(); +SELECT name, comment FROM system.tables WHERE name == '02155_test_dictionary_view' AND database == currentDatabase(); + +DROP TABLE 02155_test_dictionary_view; +DROP DICTIONARY 02155_test_dictionary; +DROP TABLE 02155_test_table; diff --git a/parser/testdata/02155_h3_to_center_child/ast.json b/parser/testdata/02155_h3_to_center_child/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02155_h3_to_center_child/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02155_h3_to_center_child/metadata.json b/parser/testdata/02155_h3_to_center_child/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02155_h3_to_center_child/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02155_h3_to_center_child/query.sql b/parser/testdata/02155_h3_to_center_child/query.sql new file mode 100644 index 000000000..a5eade166 --- /dev/null +++ b/parser/testdata/02155_h3_to_center_child/query.sql @@ -0,0 +1,135 @@ +-- Tags: no-fasttest, no-async-insert +-- no-async-insert: https://github.com/ClickHouse/ClickHouse/issues/80105 + +DROP TABLE IF EXISTS h3_indexes; + +--Note: id column just exists to keep the test results sorted. +-- Order is not guaranteed with h3_index or res columns as we test the same h3_index at various resolutions. +CREATE TABLE h3_indexes (id UInt8, h3_index UInt64, res UInt8) ENGINE = Memory; +-- Test cases taken from fixture: https://github.com/uber/h3/blob/master/src/apps/testapps/testCellToCenterChild.c + +INSERT INTO h3_indexes VALUES +(1,577023702256844799,1), +(2,577023702256844799,2), +(3,577023702256844799,3), +(4,577023702256844799,4), +(5,577023702256844799,5), +(6,577023702256844799,6), +(7,577023702256844799,7), +(8,577023702256844799,8), +(9,577023702256844799,9), +(10,577023702256844799,10), +(11,577023702256844799,11), +(12,577023702256844799,12), +(13,577023702256844799,13), +(14,577023702256844799,14), +(15,577023702256844799,15), +(16,581518505791193087,2), +(17,581518505791193087,3), +(18,581518505791193087,4), +(19,581518505791193087,5), +(20,581518505791193087,6), +(21,581518505791193087,7), +(22,581518505791193087,8), +(23,581518505791193087,9), +(24,581518505791193087,10), +(25,581518505791193087,11), +(26,581518505791193087,12), +(27,581518505791193087,13), +(28,581518505791193087,14), +(29,581518505791193087,15), +(30,586021555662749695,3), +(31,586021555662749695,4), +(32,586021555662749695,5), +(33,586021555662749695,6), +(34,586021555662749695,7), +(35,586021555662749695,8), +(36,586021555662749695,9), +(37,586021555662749695,10), +(38,586021555662749695,11), +(39,586021555662749695,12), +(40,586021555662749695,13), +(41,586021555662749695,14), +(42,586021555662749695,15), +(43,590525017851166719,4), +(44,590525017851166719,5), +(45,590525017851166719,6), +(46,590525017851166719,7), +(47,590525017851166719,8), +(48,590525017851166719,9), +(49,590525017851166719,10), +(50,590525017851166719,11), +(51,590525017851166719,12), +(52,590525017851166719,13), +(53,590525017851166719,14), +(54,590525017851166719,15), +(55,595028608888602623,5), +(56,595028608888602623,6), +(57,595028608888602623,7), +(58,595028608888602623,8), +(59,595028608888602623,9), +(60,595028608888602623,10), +(61,595028608888602623,11), +(62,595028608888602623,12), +(63,595028608888602623,13), +(64,595028608888602623,14), +(65,595028608888602623,15), +(66,599532206368489471,6), +(67,599532206368489471,7), +(68,599532206368489471,8), +(69,599532206368489471,9), +(70,599532206368489471,10), +(71,599532206368489471,11), +(72,599532206368489471,12), +(73,599532206368489471,13), +(74,599532206368489471,14), +(75,599532206368489471,15), +(76,604035805861642239,7), +(77,604035805861642239,8), +(78,604035805861642239,9), +(79,604035805861642239,10), +(80,604035805861642239,11), +(81,604035805861642239,12), +(82,604035805861642239,13), +(83,604035805861642239,14), +(84,604035805861642239,15), +(85,608136739873095679,8), +(86,608136739873095679,9), +(87,608136739873095679,10), +(88,608136739873095679,11), +(89,608136739873095679,12), +(90,608136739873095679,13), +(91,608136739873095679,14), +(92,608136739873095679,15), +(93,612640339489980415,9), +(94,612640339489980415,10), +(95,612640339489980415,11), +(96,612640339489980415,12), +(97,612640339489980415,13), +(98,612640339489980415,14), +(99,612640339489980415,15), +(100,617143939115515903,10), +(101,617143939115515903,11), +(102,617143939115515903,12), +(103,617143939115515903,13), +(104,617143939115515903,14), +(105,617143939115515903,15), +(106,621647538742657023,11), +(107,621647538742657023,12), +(108,621647538742657023,13), +(109,621647538742657023,14), +(110,621647538742657023,15), +(111,626151138369998847,12), +(112,626151138369998847,13), +(113,626151138369998847,14), +(114,626151138369998847,15), +(115,630654737997365759,13), +(116,630654737997365759,14), +(117,630654737997365759,15), +(118,635158337624735807,14), +(119,635158337624735807,15), +(120,639661937252106247,15); + +SELECT h3ToCenterChild(h3_index,res) FROM h3_indexes ORDER BY id; + +DROP TABLE h3_indexes; diff --git a/parser/testdata/02155_multiple_inserts_for_formats_with_suffix/ast.json b/parser/testdata/02155_multiple_inserts_for_formats_with_suffix/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02155_multiple_inserts_for_formats_with_suffix/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02155_multiple_inserts_for_formats_with_suffix/metadata.json b/parser/testdata/02155_multiple_inserts_for_formats_with_suffix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02155_multiple_inserts_for_formats_with_suffix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02155_multiple_inserts_for_formats_with_suffix/query.sql b/parser/testdata/02155_multiple_inserts_for_formats_with_suffix/query.sql new file mode 100644 index 000000000..7947536bc --- /dev/null +++ b/parser/testdata/02155_multiple_inserts_for_formats_with_suffix/query.sql @@ -0,0 +1,39 @@ +-- Tags: no-fasttest, no-parallel + +drop table if exists test; +create table test (number UInt64) engine=File('Parquet'); +insert into test select * from numbers(10); +insert into test select * from numbers(10, 10); -- { serverError CANNOT_APPEND_TO_FILE } +insert into test select * from numbers(10, 10) settings engine_file_allow_create_multiple_files=1; +select * from test order by number; +truncate table test; +drop table test; + +create table test (number UInt64) engine=File('Parquet', 'test_02155/test1/data.Parquet'); +insert into test select * from numbers(10) settings engine_file_truncate_on_insert=1; +insert into test select * from numbers(10, 10); -- { serverError CANNOT_APPEND_TO_FILE } +insert into test select * from numbers(10, 10) settings engine_file_allow_create_multiple_files=1; +select * from test order by number; +drop table test; + + +insert into table function file(concat(currentDatabase(), '/test2/data.Parquet'), 'Parquet', 'number UInt64') select * from numbers(10) settings engine_file_truncate_on_insert=1; +insert into table function file(concat(currentDatabase(), '/test2/data.Parquet'), 'Parquet', 'number UInt64') select * from numbers(10, 10); -- { serverError CANNOT_APPEND_TO_FILE } +insert into table function file(concat(currentDatabase(), '/test2/data.Parquet'), 'Parquet', 'number UInt64') select * from numbers(10, 10) settings engine_file_allow_create_multiple_files=1; +select * from file(concat(currentDatabase(), '/test2/data.Parquet'), 'Parquet', 'number UInt64'); +select * from file(concat(currentDatabase(), '/test2/data.1.Parquet'), 'Parquet', 'number UInt64'); + +create table test (number UInt64) engine=File('Parquet', 'test_02155/test3/data.Parquet.gz'); +insert into test select * from numbers(10) settings engine_file_truncate_on_insert=1; +; +insert into test select * from numbers(10, 10); -- { serverError CANNOT_APPEND_TO_FILE } +insert into test select * from numbers(10, 10) settings engine_file_allow_create_multiple_files=1; +select * from test order by number; +drop table test; + +insert into table function file(concat(currentDatabase(), '/test4/data.Parquet.gz'), 'Parquet', 'number UInt64') select * from numbers(10) settings engine_file_truncate_on_insert=1; +insert into table function file(concat(currentDatabase(), '/test4/data.Parquet.gz'), 'Parquet', 'number UInt64') select * from numbers(10, 10); -- { serverError CANNOT_APPEND_TO_FILE } +insert into table function file(concat(currentDatabase(), '/test4/data.Parquet.gz'), 'Parquet', 'number UInt64') select * from numbers(10, 10) settings engine_file_allow_create_multiple_files=1; +select * from file(concat(currentDatabase(), '/test4/data.Parquet.gz'), 'Parquet', 'number UInt64'); +select * from file(concat(currentDatabase(), '/test4/data.1.Parquet.gz'), 'Parquet', 'number UInt64'); + diff --git a/parser/testdata/02155_nested_lc_defalut_bug/ast.json b/parser/testdata/02155_nested_lc_defalut_bug/ast.json new file mode 100644 index 000000000..b83458b6b --- /dev/null +++ b/parser/testdata/02155_nested_lc_defalut_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery nested_test (children 1)" + }, + { + "explain": " Identifier nested_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001351626, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/02155_nested_lc_defalut_bug/metadata.json b/parser/testdata/02155_nested_lc_defalut_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02155_nested_lc_defalut_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02155_nested_lc_defalut_bug/query.sql b/parser/testdata/02155_nested_lc_defalut_bug/query.sql new file mode 100644 index 000000000..45cb9f96b --- /dev/null +++ b/parser/testdata/02155_nested_lc_defalut_bug/query.sql @@ -0,0 +1,8 @@ +DROP TABLE IF EXISTS nested_test; +CREATE TABLE nested_test (x UInt32, `nest.col1` Array(String), `nest.col2` Array(Int8)) ENGINE = MergeTree ORDER BY x; + +ALTER TABLE nested_test ADD COLUMN `nest.col3` Array(LowCardinality(String)); +INSERT INTO nested_test (x, `nest.col1`, `nest.col2`) values (1, ['a', 'b'], [3, 4]); +SELECT * FROM nested_test; + +DROP TABLE IF EXISTS nested_test; diff --git a/parser/testdata/02155_parse_date_lowcard_default_throw/ast.json b/parser/testdata/02155_parse_date_lowcard_default_throw/ast.json new file mode 100644 index 000000000..cc23a88e0 --- /dev/null +++ b/parser/testdata/02155_parse_date_lowcard_default_throw/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function parseDateTimeBestEffort (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toLowCardinality (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '15-JUL-16'" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001896055, + "rows_read": 11, + "bytes_read": 470 + } +} diff --git a/parser/testdata/02155_parse_date_lowcard_default_throw/metadata.json b/parser/testdata/02155_parse_date_lowcard_default_throw/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02155_parse_date_lowcard_default_throw/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02155_parse_date_lowcard_default_throw/query.sql b/parser/testdata/02155_parse_date_lowcard_default_throw/query.sql new file mode 100644 index 000000000..703cf1fed --- /dev/null +++ b/parser/testdata/02155_parse_date_lowcard_default_throw/query.sql @@ -0,0 +1 @@ +SELECT parseDateTimeBestEffort(toLowCardinality(materialize('15-JUL-16'))); diff --git a/parser/testdata/02155_read_in_order_max_rows_to_read/ast.json b/parser/testdata/02155_read_in_order_max_rows_to_read/ast.json new file mode 100644 index 000000000..b43d03551 --- /dev/null +++ b/parser/testdata/02155_read_in_order_max_rows_to_read/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001543195, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02155_read_in_order_max_rows_to_read/metadata.json b/parser/testdata/02155_read_in_order_max_rows_to_read/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02155_read_in_order_max_rows_to_read/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02155_read_in_order_max_rows_to_read/query.sql b/parser/testdata/02155_read_in_order_max_rows_to_read/query.sql new file mode 100644 index 000000000..acd2379ff --- /dev/null +++ b/parser/testdata/02155_read_in_order_max_rows_to_read/query.sql @@ -0,0 +1,24 @@ +SET merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.0; + +DROP TABLE IF EXISTS t_max_rows_to_read; + +CREATE TABLE t_max_rows_to_read (a UInt64) +ENGINE = MergeTree ORDER BY a +SETTINGS index_granularity = 4, index_granularity_bytes = '10Mi'; + +INSERT INTO t_max_rows_to_read SELECT number FROM numbers(100); + +SET max_threads = 1; +SET optimize_read_in_order = 1; + +SELECT a FROM t_max_rows_to_read WHERE a = 10 SETTINGS max_rows_to_read = 4; + +SELECT a FROM t_max_rows_to_read ORDER BY a LIMIT 5 SETTINGS max_rows_to_read = 12; + +SELECT a FROM t_max_rows_to_read WHERE a = 10 OR a = 20 SETTINGS max_rows_to_read = 12; + +SELECT a FROM t_max_rows_to_read ORDER BY a LIMIT 20 FORMAT Null SETTINGS max_rows_to_read = 12; -- { serverError TOO_MANY_ROWS } +SELECT a FROM t_max_rows_to_read WHERE a > 10 ORDER BY a LIMIT 5 FORMAT Null SETTINGS max_rows_to_read = 12; -- { serverError TOO_MANY_ROWS } +SELECT a FROM t_max_rows_to_read WHERE a = 10 OR a = 20 FORMAT Null SETTINGS max_rows_to_read = 4; -- { serverError TOO_MANY_ROWS } + +DROP TABLE t_max_rows_to_read; diff --git a/parser/testdata/02156_minus_op_with_datatime64/ast.json b/parser/testdata/02156_minus_op_with_datatime64/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02156_minus_op_with_datatime64/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02156_minus_op_with_datatime64/metadata.json b/parser/testdata/02156_minus_op_with_datatime64/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02156_minus_op_with_datatime64/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02156_minus_op_with_datatime64/query.sql b/parser/testdata/02156_minus_op_with_datatime64/query.sql new file mode 100644 index 000000000..968593f5d --- /dev/null +++ b/parser/testdata/02156_minus_op_with_datatime64/query.sql @@ -0,0 +1,65 @@ +-- Test subtraction without materialize() +SELECT +-- DateTime64 (scale 3) - DateTime +toDateTime64('2023-10-01 12:00:00', 3) - toDateTime('2023-10-01 11:00:00') AS result_no_materialize_1, +toDateTime64('2023-10-01 12:00:00.123', 3) - toDateTime('2023-10-01 11:00:00') AS result_no_materialize_2, + +-- DateTime64 (scale 0) - DateTime +toDateTime64('2023-10-02 12:00:00', 0) - toDateTime('2023-10-01 11:00:00') AS result_no_materialize_3, + +-- DateTime64 (scale 3) - DateTime64 (scale 6) +toDateTime64('2023-10-01 12:00:00.123', 3) - toDateTime64('2023-10-01 11:00:00.123456', 6) AS result_no_materialize_4, + +-- DateTime64 (scale 6) - DateTime64 (scale 3) +toDateTime64('2023-10-01 12:00:00.123456', 6) - toDateTime64('2023-10-01 11:00:00.123', 3) AS result_no_materialize_5, + +-- DateTime - DateTime64 (scale 3) +toDateTime('2023-10-01 12:00:00') - toDateTime64('2023-10-01 11:00:00', 3) AS result_no_materialize_6, + +-- DateTime - DateTime64 (scale 6) +toDateTime('2023-10-01 12:00:00') - toDateTime64('2023-10-01 11:00:00', 6) AS result_no_materialize_7; + +-- Test subtraction with materialize() on left side +SELECT +materialize(toDateTime64('2023-10-01 12:00:00', 3)) - toDateTime('2023-10-01 11:00:00') AS result_left_materialize_1, +materialize(toDateTime64('2023-10-01 12:00:00.123', 3)) - toDateTime('2023-10-01 11:00:00') AS result_left_materialize_2, + +materialize(toDateTime64('2023-10-02 12:00:00', 0)) - toDateTime('2023-10-01 11:00:00') AS result_left_materialize_3, + +materialize(toDateTime64('2023-10-01 12:00:00.123', 3)) - toDateTime64('2023-10-01 11:00:00.123456', 6) AS result_left_materialize_4, + +materialize(toDateTime64('2023-10-01 12:00:00.123456', 6)) - toDateTime64('2023-10-01 11:00:00.123', 3) AS result_left_materialize_5, + +materialize(toDateTime('2023-10-01 12:00:00')) - toDateTime64('2023-10-01 11:00:00', 3) AS result_left_materialize_6, + +materialize(toDateTime('2023-10-01 12:00:00')) - toDateTime64('2023-10-01 11:00:00', 6) AS result_left_materialize_7; + +-- Test subtraction with materialize() on right side +SELECT +toDateTime64('2023-10-01 12:00:00', 3) - materialize(toDateTime('2023-10-01 11:00:00')) AS result_right_materialize_1, +toDateTime64('2023-10-01 12:00:00.123', 3) - materialize(toDateTime('2023-10-01 11:00:00')) AS result_right_materialize_2, + +toDateTime64('2023-10-01 12:00:00', 0) - materialize(toDateTime('2023-10-01 11:00:00')) AS result_right_materialize_3, + +toDateTime('2023-10-01 12:00:00') - materialize(toDateTime64('2023-10-01 11:00:00', 3)) AS result_right_materialize_4; + +-- Test subtraction with materialize() on both sides +SELECT +materialize(toDateTime64('2023-10-01 12:00:00', 3)) - materialize(toDateTime('2023-10-01 11:00:00')) AS result_both_materialize_1, +materialize(toDateTime64('2023-10-01 12:00:00.123', 3)) - materialize(toDateTime('2023-10-01 11:00:00')) AS result_both_materialize_2, + +materialize(toDateTime64('2023-10-01 12:00:00', 0)) - materialize(toDateTime('2023-10-01 11:00:00')) AS result_both_materialize_3, + +materialize(toDateTime('2023-10-01 12:00:00')) - materialize(toDateTime64('2023-10-01 11:00:00', 3)) AS result_both_materialize_4; + +-- Test overflow +SELECT +materialize(toDateTime64('2262-04-11 23:47:16', 9, 'UTC')) - toDateTime64('1900-01-01 00:00:00', 9, 'UTC') FORMAT Null; -- { serverError DECIMAL_OVERFLOW } + +SELECT +materialize(toDateTime64('1900-01-01 00:00:00', 0, 'UTC')) - materialize(toDateTime64('2262-04-11 23:47:16', 9, 'UTC')); -- { serverError DECIMAL_OVERFLOW } + +SELECT +materialize(toDateTime64('2262-04-11 23:47:16', 9, 'UTC')) - toDateTime64('1900-01-01 00:00:00', 9, 'UTC'), +materialize(toDateTime64('1900-01-01 00:00:00', 0, 'UTC')) - materialize(toDateTime64('2262-04-11 23:47:16', 9, 'UTC')) +SETTINGS decimal_check_overflow=0 FORMAT Null; diff --git a/parser/testdata/02156_storage_merge_prewhere/ast.json b/parser/testdata/02156_storage_merge_prewhere/ast.json new file mode 100644 index 000000000..9458e6f30 --- /dev/null +++ b/parser/testdata/02156_storage_merge_prewhere/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.0013103, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02156_storage_merge_prewhere/metadata.json b/parser/testdata/02156_storage_merge_prewhere/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02156_storage_merge_prewhere/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02156_storage_merge_prewhere/query.sql b/parser/testdata/02156_storage_merge_prewhere/query.sql new file mode 100644 index 000000000..a8c4bcdd1 --- /dev/null +++ b/parser/testdata/02156_storage_merge_prewhere/query.sql @@ -0,0 +1,44 @@ +SET optimize_move_to_prewhere = 1; +SET enable_multiple_prewhere_read_steps = 1; +SET prefer_localhost_replica = 1; -- Make sure plan is reliable +SET optimize_functions_to_subcolumns = 0; + +DROP TABLE IF EXISTS t_02156_mt1; +DROP TABLE IF EXISTS t_02156_mt2; +DROP TABLE IF EXISTS t_02156_log; +DROP TABLE IF EXISTS t_02156_dist; +DROP TABLE IF EXISTS t_02156_merge1; +DROP TABLE IF EXISTS t_02156_merge2; +DROP TABLE IF EXISTS t_02156_merge3; + +CREATE TABLE t_02156_mt1 (k UInt32, v String) ENGINE = MergeTree ORDER BY k SETTINGS min_bytes_for_wide_part=0; +CREATE TABLE t_02156_mt2 (k UInt32, v String) ENGINE = MergeTree ORDER BY k SETTINGS min_bytes_for_wide_part=0; +CREATE TABLE t_02156_log (k UInt32, v String) ENGINE = Log; + +CREATE TABLE t_02156_dist (k UInt32, v String) ENGINE = Distributed(test_shard_localhost, currentDatabase(), t_02156_mt1); + +CREATE TABLE t_02156_merge1 (k UInt32, v String) ENGINE = Merge(currentDatabase(), 't_02156_mt1|t_02156_mt2'); +CREATE TABLE t_02156_merge2 (k UInt32, v String) ENGINE = Merge(currentDatabase(), 't_02156_mt1|t_02156_log'); +CREATE TABLE t_02156_merge3 (k UInt32, v String) ENGINE = Merge(currentDatabase(), 't_02156_mt2|t_02156_dist'); + +INSERT INTO t_02156_mt1 SELECT number, toString(number) FROM numbers(10000); +INSERT INTO t_02156_mt2 SELECT number, toString(number) FROM numbers(10000); +INSERT INTO t_02156_log SELECT number, toString(number) FROM numbers(10000); + +SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT count() FROM t_02156_merge1 WHERE k = 3 AND notEmpty(v)) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%' settings enable_analyzer=1; +SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT count() FROM t_02156_merge1 WHERE k = 3 AND notEmpty(v)) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%' settings enable_analyzer=0; +SELECT count() FROM t_02156_merge1 WHERE k = 3 AND notEmpty(v); + +SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT count() FROM t_02156_merge2 WHERE k = 3 AND notEmpty(v)) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; +SELECT count() FROM t_02156_merge2 WHERE k = 3 AND notEmpty(v); + +SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT count() FROM t_02156_merge3 WHERE k = 3 AND notEmpty(v)) WHERE explain LIKE '%Prewhere%' OR explain LIKE '%Filter column%'; +SELECT count() FROM t_02156_merge3 WHERE k = 3 AND notEmpty(v); + +DROP TABLE IF EXISTS t_02156_mt1; +DROP TABLE IF EXISTS t_02156_mt2; +DROP TABLE IF EXISTS t_02156_log; +DROP TABLE IF EXISTS t_02156_dist; +DROP TABLE IF EXISTS t_02156_merge1; +DROP TABLE IF EXISTS t_02156_merge2; +DROP TABLE IF EXISTS t_02156_merge3; diff --git a/parser/testdata/02156_storage_merge_prewhere_2/ast.json b/parser/testdata/02156_storage_merge_prewhere_2/ast.json new file mode 100644 index 000000000..837e05b21 --- /dev/null +++ b/parser/testdata/02156_storage_merge_prewhere_2/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_02156_ololo_1 (children 1)" + }, + { + "explain": " Identifier t_02156_ololo_1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001241899, + "rows_read": 2, + "bytes_read": 82 + } +} diff --git a/parser/testdata/02156_storage_merge_prewhere_2/metadata.json b/parser/testdata/02156_storage_merge_prewhere_2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02156_storage_merge_prewhere_2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02156_storage_merge_prewhere_2/query.sql b/parser/testdata/02156_storage_merge_prewhere_2/query.sql new file mode 100644 index 000000000..1b4881d4e --- /dev/null +++ b/parser/testdata/02156_storage_merge_prewhere_2/query.sql @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS t_02156_ololo_1; +DROP TABLE IF EXISTS t_02156_ololo_2; +DROP TABLE IF EXISTS t_02156_ololo_dist; + +CREATE TABLE t_02156_ololo_1 (k UInt32, v Nullable(String)) ENGINE = MergeTree order by k; +CREATE TABLE t_02156_ololo_2 (k UInt32, v String) ENGINE = MergeTree order by k; +CREATE TABLE t_02156_ololo_dist (k UInt32, v String) ENGINE = Distributed(test_shard_localhost, currentDatabase(), t_02156_ololo_2); +CREATE TABLE t_02156_ololo_dist2 (k UInt32, v Nullable(String)) ENGINE = Distributed(test_shard_localhost, currentDatabase(), t_02156_ololo_1); + +insert into t_02156_ololo_1 values (1, 'a'); +insert into t_02156_ololo_2 values (2, 'b'); + +select * from merge('t_02156_ololo') where k != 0 and notEmpty(v) order by k settings optimize_move_to_prewhere=0; +select * from merge('t_02156_ololo') where k != 0 and notEmpty(v) order by k settings optimize_move_to_prewhere=1; + +select * from merge('t_02156_ololo_dist') where k != 0 and notEmpty(v) order by k settings optimize_move_to_prewhere=0; +select * from merge('t_02156_ololo_dist') where k != 0 and notEmpty(v) order by k settings optimize_move_to_prewhere=1; diff --git a/parser/testdata/02156_storage_merge_prewhere_not_ready_set_bug/ast.json b/parser/testdata/02156_storage_merge_prewhere_not_ready_set_bug/ast.json new file mode 100644 index 000000000..9480add9a --- /dev/null +++ b/parser/testdata/02156_storage_merge_prewhere_not_ready_set_bug/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery merge_kek_1 (children 3)" + }, + { + "explain": " Identifier merge_kek_1" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration x (children 1)" + }, + { + "explain": " DataType UInt32" + }, + { + "explain": " ColumnDeclaration y (children 1)" + }, + { + "explain": " DataType UInt32" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Identifier x" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001190389, + "rows_read": 11, + "bytes_read": 382 + } +} diff --git a/parser/testdata/02156_storage_merge_prewhere_not_ready_set_bug/metadata.json b/parser/testdata/02156_storage_merge_prewhere_not_ready_set_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02156_storage_merge_prewhere_not_ready_set_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02156_storage_merge_prewhere_not_ready_set_bug/query.sql b/parser/testdata/02156_storage_merge_prewhere_not_ready_set_bug/query.sql new file mode 100644 index 000000000..fc18c97cb --- /dev/null +++ b/parser/testdata/02156_storage_merge_prewhere_not_ready_set_bug/query.sql @@ -0,0 +1,7 @@ +create table merge_kek_1 (x UInt32, y UInt32) engine = MergeTree order by x; +create table merge_kek_2 (x UInt32, y UInt32) engine = MergeTree order by x; + +insert into merge_kek_1 select number, number from numbers(100); +insert into merge_kek_2 select number + 500, number + 500 from numbers(1e6); + +select sum(x), min(x + x), max(x + x) from merge(currentDatabase(), '^merge_kek_.$') where x > 200 and y in (select 500 + number * 2 from numbers(100)) settings max_threads=2; diff --git a/parser/testdata/02157_line_as_string_output_format/ast.json b/parser/testdata/02157_line_as_string_output_format/ast.json new file mode 100644 index 000000000..4d099b671 --- /dev/null +++ b/parser/testdata/02157_line_as_string_output_format/ast.json @@ -0,0 +1,40 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'Hello \\\\ World'" + }, + { + "explain": " Identifier LineAsString" + } + ], + + "rows": 6, + + "statistics": + { + "elapsed": 0.001184998, + "rows_read": 6, + "bytes_read": 217 + } +} diff --git a/parser/testdata/02157_line_as_string_output_format/metadata.json b/parser/testdata/02157_line_as_string_output_format/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02157_line_as_string_output_format/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02157_line_as_string_output_format/query.sql b/parser/testdata/02157_line_as_string_output_format/query.sql new file mode 100644 index 000000000..f1c567cf4 --- /dev/null +++ b/parser/testdata/02157_line_as_string_output_format/query.sql @@ -0,0 +1 @@ +SELECT 'Hello \\ World' FORMAT LineAsString; diff --git a/parser/testdata/02158_contingency/ast.json b/parser/testdata/02158_contingency/ast.json new file mode 100644 index 000000000..a30de5218 --- /dev/null +++ b/parser/testdata/02158_contingency/ast.json @@ -0,0 +1,205 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 5)" + }, + { + "explain": " Function round (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function cramersV (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier a" + }, + { + "explain": " Identifier b" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function round (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function cramersVBiasCorrected (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier a" + }, + { + "explain": " Identifier b" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function round (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function theilsU (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier a" + }, + { + "explain": " Identifier b" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function round (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function theilsU (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier b" + }, + { + "explain": " Identifier a" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function round (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function contingency (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier a" + }, + { + "explain": " Identifier b" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function modulo (alias a) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Function modulo (alias b) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_5" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_150" + } + ], + + "rows": 61, + + "statistics": + { + "elapsed": 0.001687363, + "rows_read": 61, + "bytes_read": 2419 + } +} diff --git a/parser/testdata/02158_contingency/metadata.json b/parser/testdata/02158_contingency/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02158_contingency/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02158_contingency/query.sql b/parser/testdata/02158_contingency/query.sql new file mode 100644 index 000000000..d1e1c76c0 --- /dev/null +++ b/parser/testdata/02158_contingency/query.sql @@ -0,0 +1,5 @@ +SELECT round(cramersV(a, b), 2), round(cramersVBiasCorrected(a, b), 2), round(theilsU(a, b), 2), round(theilsU(b, a), 2), round(contingency(a, b), 2) FROM (SELECT number % 3 AS a, number % 5 AS b FROM numbers(150)); +SELECT round(cramersV(a, b), 2), round(cramersVBiasCorrected(a, b), 2), round(theilsU(a, b), 2), round(theilsU(b, a), 2), round(contingency(a, b), 2) FROM (SELECT number AS a, number + 1 AS b FROM numbers(150)); +SELECT round(cramersV(a, b), 2), round(cramersVBiasCorrected(a, b), 2), round(theilsU(a, b), 2), round(theilsU(b, a), 2), round(contingency(a, b), 2) FROM (SELECT number % 10 AS a, number % 10 AS b FROM numbers(150)); +SELECT round(cramersV(a, b), 2), round(cramersVBiasCorrected(a, b), 2), round(theilsU(a, b), 2), round(theilsU(b, a), 2), round(contingency(a, b), 2) FROM (SELECT number % 10 AS a, number % 5 AS b FROM numbers(150)); +SELECT round(cramersV(a, b), 2), round(cramersVBiasCorrected(a, b), 2), round(theilsU(a, b), 2), round(theilsU(b, a), 2), round(contingency(a, b), 2) FROM (SELECT number % 10 AS a, number % 10 = 0 ? number : a AS b FROM numbers(150)); diff --git a/parser/testdata/02158_interval_length_sum/ast.json b/parser/testdata/02158_interval_length_sum/ast.json new file mode 100644 index 000000000..7fd161996 --- /dev/null +++ b/parser/testdata/02158_interval_length_sum/ast.json @@ -0,0 +1,76 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function intervalLengthSum (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Identifier y" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function values (children 1)" + }, + { + "explain": " ExpressionList (children 5)" + }, + { + "explain": " Literal 'x Int64, y Int64'" + }, + { + "explain": " Literal Tuple_(UInt64_0, UInt64_10)" + }, + { + "explain": " Literal Tuple_(UInt64_5, UInt64_5)" + }, + { + "explain": " Literal Tuple_(UInt64_5, UInt64_6)" + }, + { + "explain": " Literal Tuple_(UInt64_1, Int64_-1)" + } + ], + + "rows": 18, + + "statistics": + { + "elapsed": 0.001057658, + "rows_read": 18, + "bytes_read": 754 + } +} diff --git a/parser/testdata/02158_interval_length_sum/metadata.json b/parser/testdata/02158_interval_length_sum/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02158_interval_length_sum/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02158_interval_length_sum/query.sql b/parser/testdata/02158_interval_length_sum/query.sql new file mode 100644 index 000000000..af22a707c --- /dev/null +++ b/parser/testdata/02158_interval_length_sum/query.sql @@ -0,0 +1 @@ +SELECT intervalLengthSum(x, y) FROM values('x Int64, y Int64', (0, 10), (5, 5), (5, 6), (1, -1)); diff --git a/parser/testdata/02158_proportions_ztest/ast.json b/parser/testdata/02158_proportions_ztest/ast.json new file mode 100644 index 000000000..e095138ef --- /dev/null +++ b/parser/testdata/02158_proportions_ztest/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function proportionsZTest (children 1)" + }, + { + "explain": " ExpressionList (children 6)" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Literal UInt64_11" + }, + { + "explain": " Literal UInt64_100" + }, + { + "explain": " Literal UInt64_101" + }, + { + "explain": " Literal Float64_0.95" + }, + { + "explain": " Literal 'unpooled'" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001047403, + "rows_read": 12, + "bytes_read": 431 + } +} diff --git a/parser/testdata/02158_proportions_ztest/metadata.json b/parser/testdata/02158_proportions_ztest/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02158_proportions_ztest/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02158_proportions_ztest/query.sql b/parser/testdata/02158_proportions_ztest/query.sql new file mode 100644 index 000000000..aee50e57f --- /dev/null +++ b/parser/testdata/02158_proportions_ztest/query.sql @@ -0,0 +1,13 @@ +SELECT proportionsZTest(10, 11, 100, 101, 0.95, 'unpooled'); + +DROP TABLE IF EXISTS proportions_ztest; +CREATE TABLE proportions_ztest (sx UInt64, sy UInt64, tx UInt64, ty UInt64) Engine = Memory(); +INSERT INTO proportions_ztest VALUES (10, 11, 100, 101); +SELECT proportionsZTest(sx, sy, tx, ty, 0.95, 'unpooled') FROM proportions_ztest; +DROP TABLE IF EXISTS proportions_ztest; + + +SELECT + NULL, + proportionsZTest(257, 1048575, 1048575, 257, -inf, NULL), + proportionsZTest(1024, 1025, 2, 2, 'unpooled'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } \ No newline at end of file diff --git a/parser/testdata/02158_ztest/ast.json b/parser/testdata/02158_ztest/ast.json new file mode 100644 index 000000000..c8b2ebfc1 --- /dev/null +++ b/parser/testdata/02158_ztest/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery mean_ztest (children 1)" + }, + { + "explain": " Identifier mean_ztest" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001266392, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/02158_ztest/metadata.json b/parser/testdata/02158_ztest/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02158_ztest/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02158_ztest/query.sql b/parser/testdata/02158_ztest/query.sql new file mode 100644 index 000000000..1d3e55db9 --- /dev/null +++ b/parser/testdata/02158_ztest/query.sql @@ -0,0 +1,6 @@ +DROP TABLE IF EXISTS mean_ztest; +CREATE TABLE mean_ztest (v int, s UInt8) ENGINE = Memory; +INSERT INTO mean_ztest SELECT number, 0 FROM numbers(100) WHERE number % 2 = 0; +INSERT INTO mean_ztest SELECT number, 1 FROM numbers(100) WHERE number % 2 = 1; +SELECT roundBankers(meanZTest(833.0, 800.0, 0.95)(v, s).1, 16) as z_stat, roundBankers(meanZTest(833.0, 800.0, 0.95)(v, s).2, 16) as p_value, roundBankers(meanZTest(833.0, 800.0, 0.95)(v, s).3, 16) as ci_low, roundBankers(meanZTest(833.0, 800.0, 0.95)(v, s).4, 16) as ci_high FROM mean_ztest; +DROP TABLE IF EXISTS mean_ztest; diff --git a/parser/testdata/02159_left_right/ast.json b/parser/testdata/02159_left_right/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02159_left_right/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02159_left_right/metadata.json b/parser/testdata/02159_left_right/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02159_left_right/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02159_left_right/query.sql b/parser/testdata/02159_left_right/query.sql new file mode 100644 index 000000000..e183c6d00 --- /dev/null +++ b/parser/testdata/02159_left_right/query.sql @@ -0,0 +1,86 @@ +-- { echo } + +SELECT left('Hello', 3); +SELECT left('Hello', -3); +SELECT left('Hello', 5); +SELECT left('Hello', -5); +SELECT left('Hello', 6); +SELECT left('Hello', -6); +SELECT left('Hello', 0); +SELECT left('Hello', NULL); + +SELECT left(materialize('Привет'), 4); +SELECT LEFT('Привет', -4); +SELECT left(toNullable('Привет'), 12); +SELECT left(toNullable('Привет'), -12); +SELECT lEFT('Привет', -12); +SELECT left(materialize(toNullable('Привет')), 13); +SELECT left(materialize(toNullable('Привет')), -13); +SELECT left(materialize(toNullable('Привет')), -4); +SELECT left('Привет', -13); +SELECT Left('Привет', 0); +SELECT left('Привет', NULL); + +SELECT leftUTF8('Привет', 4); +SELECT leftUTF8('Привет', -4); +SELECT leftUTF8('Привет', 12); +SELECT leftUTF8('Привет', -12); +SELECT leftUTF8('Привет', 13); +SELECT leftUTF8('Привет', -13); +SELECT leftUTF8('Привет', 0); +SELECT leftUTF8('Привет', NULL); + +SELECT left('Hello', number) FROM numbers(10); +SELECT leftUTF8('Привет', number) FROM numbers(10); +SELECT left('Hello', -number) FROM numbers(10); +SELECT leftUTF8('Привет', -number) FROM numbers(10); + +SELECT leftUTF8('Привет', number % 3 = 0 ? NULL : (number % 2 ? toInt64(number) : -number)) FROM numbers(10); +SELECT leftUTF8(number < 5 ? 'Hello' : 'Привет', number % 3 = 0 ? NULL : (number % 2 ? toInt64(number) : -number)) FROM numbers(10); + +SELECT right('Hello', 3); +SELECT right('Hello', -3); +SELECT right('Hello', 5); +SELECT right('Hello', -5); +SELECT right('Hello', 6); +SELECT right('Hello', -6); +SELECT right('Hello', 0); +SELECT right('Hello', NULL); + +SELECT right(materialize('Hello'), -3); +SELECT left(materialize('Hello'), -3); +SELECT right(materialize('Hello'), -5); +SELECT left(materialize('Hello'), -5); +SELECT rightUTF8(materialize('Hello'), -3); +SELECT leftUTF8(materialize('Hello'), -3); +SELECT rightUTF8(materialize('Hello'), -5); +SELECT leftUTF8(materialize('Hello'), -5); + +SELECT RIGHT(materialize('Привет'), 4); +SELECT RIGHT(materialize('Привет'), -4); +SELECT right('Привет', -4); +SELECT Right(toNullable('Привет'), 12); +SELECT Right(toNullable('Привет'), -12); +SELECT right('Привет', -12); +SELECT rIGHT(materialize(toNullable('Привет')), 13); +SELECT rIGHT(materialize(toNullable('Привет')), -13); +SELECT right('Привет', -13); +SELECT rIgHt('Привет', 0); +SELECT RiGhT('Привет', NULL); + +SELECT rightUTF8('Привет', 4); +SELECT rightUTF8('Привет', -4); +SELECT rightUTF8('Привет', 12); +SELECT rightUTF8('Привет', -12); +SELECT rightUTF8('Привет', 13); +SELECT rightUTF8('Привет', -13); +SELECT rightUTF8('Привет', 0); +SELECT rightUTF8('Привет', NULL); + +SELECT right('Hello', number) FROM numbers(10); +SELECT rightUTF8('Привет', number) FROM numbers(10); +SELECT right('Hello', -number) FROM numbers(10); +SELECT rightUTF8('Привет', -number) FROM numbers(10); + +SELECT rightUTF8('Привет', number % 3 = 0 ? NULL : (number % 2 ? toInt64(number) : -number)) FROM numbers(10); +SELECT rightUTF8(number < 5 ? 'Hello' : 'Привет', number % 3 = 0 ? NULL : (number % 2 ? toInt64(number) : -number)) FROM numbers(10); diff --git a/parser/testdata/02160_h3_cell_area_m2/ast.json b/parser/testdata/02160_h3_cell_area_m2/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02160_h3_cell_area_m2/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02160_h3_cell_area_m2/metadata.json b/parser/testdata/02160_h3_cell_area_m2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02160_h3_cell_area_m2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02160_h3_cell_area_m2/query.sql b/parser/testdata/02160_h3_cell_area_m2/query.sql new file mode 100644 index 000000000..bad06c21d --- /dev/null +++ b/parser/testdata/02160_h3_cell_area_m2/query.sql @@ -0,0 +1,29 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS h3_indexes; + +CREATE TABLE h3_indexes (h3_index UInt64) ENGINE = Memory; + +-- Random geo coordinates were generated using the H3 tool: https://github.com/ClickHouse-Extras/h3/blob/master/src/apps/testapps/mkRandGeo.c at various resolutions from 0 to 15. +-- Corresponding H3 index values were in turn generated with those geo coordinates using `geoToH3(lon, lat, res)` ClickHouse function for the following test. + +INSERT INTO h3_indexes VALUES (579205133326352383); +INSERT INTO h3_indexes VALUES (581263419093549055); +INSERT INTO h3_indexes VALUES (589753847883235327); +INSERT INTO h3_indexes VALUES (594082350283882495); +INSERT INTO h3_indexes VALUES (598372386957426687); +INSERT INTO h3_indexes VALUES (599542359671177215); +INSERT INTO h3_indexes VALUES (604296355086598143); +INSERT INTO h3_indexes VALUES (608785214872748031); +INSERT INTO h3_indexes VALUES (615732192485572607); +INSERT INTO h3_indexes VALUES (617056794467368959); +INSERT INTO h3_indexes VALUES (624586477873168383); +INSERT INTO h3_indexes VALUES (627882919484481535); +INSERT INTO h3_indexes VALUES (634600058503392255); +INSERT INTO h3_indexes VALUES (635544851677385791); +INSERT INTO h3_indexes VALUES (639763125756281263); +INSERT INTO h3_indexes VALUES (644178757620501158); + +SELECT round(h3CellAreaM2(h3_index), 2) FROM h3_indexes ORDER BY h3_index; + +DROP TABLE h3_indexes; diff --git a/parser/testdata/02160_h3_cell_area_rads2/ast.json b/parser/testdata/02160_h3_cell_area_rads2/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02160_h3_cell_area_rads2/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02160_h3_cell_area_rads2/metadata.json b/parser/testdata/02160_h3_cell_area_rads2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02160_h3_cell_area_rads2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02160_h3_cell_area_rads2/query.sql b/parser/testdata/02160_h3_cell_area_rads2/query.sql new file mode 100644 index 000000000..17d4a7e73 --- /dev/null +++ b/parser/testdata/02160_h3_cell_area_rads2/query.sql @@ -0,0 +1,29 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS h3_indexes; + +CREATE TABLE h3_indexes (h3_index UInt64) ENGINE = Memory; + +-- Random geo coordinates were generated using the H3 tool: https://github.com/ClickHouse-Extras/h3/blob/master/src/apps/testapps/mkRandGeo.c at various resolutions from 0 to 15. +-- Corresponding H3 index values were in turn generated with those geo coordinates using `geoToH3(lon, lat, res)` ClickHouse function for the following test. + +INSERT INTO h3_indexes VALUES (579205133326352383); +INSERT INTO h3_indexes VALUES (581263419093549055); +INSERT INTO h3_indexes VALUES (589753847883235327); +INSERT INTO h3_indexes VALUES (594082350283882495); +INSERT INTO h3_indexes VALUES (598372386957426687); +INSERT INTO h3_indexes VALUES (599542359671177215); +INSERT INTO h3_indexes VALUES (604296355086598143); +INSERT INTO h3_indexes VALUES (608785214872748031); +INSERT INTO h3_indexes VALUES (615732192485572607); +INSERT INTO h3_indexes VALUES (617056794467368959); +INSERT INTO h3_indexes VALUES (624586477873168383); +INSERT INTO h3_indexes VALUES (627882919484481535); +INSERT INTO h3_indexes VALUES (634600058503392255); +INSERT INTO h3_indexes VALUES (635544851677385791); +INSERT INTO h3_indexes VALUES (639763125756281263); +INSERT INTO h3_indexes VALUES (644178757620501158); + +SELECT substring(h3CellAreaRads2(h3_index)::String, 1, 10) FROM h3_indexes ORDER BY h3_index; + +DROP TABLE h3_indexes; diff --git a/parser/testdata/02160_h3_hex_area_Km2/ast.json b/parser/testdata/02160_h3_hex_area_Km2/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02160_h3_hex_area_Km2/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02160_h3_hex_area_Km2/metadata.json b/parser/testdata/02160_h3_hex_area_Km2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02160_h3_hex_area_Km2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02160_h3_hex_area_Km2/query.sql b/parser/testdata/02160_h3_hex_area_Km2/query.sql new file mode 100644 index 000000000..e6c73fa9b --- /dev/null +++ b/parser/testdata/02160_h3_hex_area_Km2/query.sql @@ -0,0 +1,18 @@ +-- Tags: no-fasttest + +SELECT h3HexAreaKm2(0); +SELECT h3HexAreaKm2(1); +SELECT h3HexAreaKm2(2); +SELECT h3HexAreaKm2(3); +SELECT h3HexAreaKm2(4); +SELECT h3HexAreaKm2(5); +SELECT h3HexAreaKm2(6); +SELECT h3HexAreaKm2(7); +SELECT h3HexAreaKm2(8); +SELECT h3HexAreaKm2(9); +SELECT h3HexAreaKm2(10); +SELECT h3HexAreaKm2(11); +SELECT h3HexAreaKm2(12); +SELECT h3HexAreaKm2(13); +SELECT h3HexAreaKm2(14); +SELECT h3HexAreaKm2(15); diff --git a/parser/testdata/02160_monthname/ast.json b/parser/testdata/02160_monthname/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02160_monthname/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02160_monthname/metadata.json b/parser/testdata/02160_monthname/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02160_monthname/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02160_monthname/query.sql b/parser/testdata/02160_monthname/query.sql new file mode 100644 index 000000000..2c5bd5b57 --- /dev/null +++ b/parser/testdata/02160_monthname/query.sql @@ -0,0 +1,71 @@ +WITH + toDate('2021-01-14') AS date_value, + toDateTime('2021-01-14 11:22:33') AS date_time_value, + toDateTime64('2021-01-14 11:22:33', 3) AS date_time_64_value +SELECT monthName(date_value), monthName(date_time_value), monthName(date_time_64_value); + +WITH + toDate('2021-02-14') AS date_value, + toDateTime('2021-02-14 11:22:33') AS date_time_value, + toDateTime64('2021-02-14 11:22:33', 3) AS date_time_64_value +SELECT monthName(date_value), monthName(date_time_value), monthName(date_time_64_value); + +WITH + toDate('2021-03-14') AS date_value, + toDateTime('2021-03-14 11:22:33') AS date_time_value, + toDateTime64('2021-03-14 11:22:33', 3) AS date_time_64_value +SELECT monthName(date_value), monthName(date_time_value), monthName(date_time_64_value); + +WITH + toDate('2021-04-14') AS date_value, + toDateTime('2021-04-14 11:22:33') AS date_time_value, + toDateTime64('2021-04-14 11:22:33', 3) AS date_time_64_value +SELECT monthName(date_value), monthName(date_time_value), monthName(date_time_64_value); + +WITH + toDate('2021-05-14') AS date_value, + toDateTime('2021-05-14 11:22:33') AS date_time_value, + toDateTime64('2021-05-14 11:22:33', 3) AS date_time_64_value +SELECT monthName(date_value), monthName(date_time_value), monthName(date_time_64_value); + +WITH + toDate('2021-06-14') AS date_value, + toDateTime('2021-06-14 11:22:33') AS date_time_value, + toDateTime64('2021-06-14 11:22:33', 3) AS date_time_64_value +SELECT monthName(date_value), monthName(date_time_value), monthName(date_time_64_value); + +WITH + toDate('2021-07-14') AS date_value, + toDateTime('2021-07-14 11:22:33') AS date_time_value, + toDateTime64('2021-07-14 11:22:33', 3) AS date_time_64_value +SELECT monthName(date_value), monthName(date_time_value), monthName(date_time_64_value); + +WITH + toDate('2021-08-14') AS date_value, + toDateTime('2021-08-14 11:22:33') AS date_time_value, + toDateTime64('2021-08-14 11:22:33', 3) AS date_time_64_value +SELECT monthName(date_value), monthName(date_time_value), monthName(date_time_64_value); + +WITH + toDate('2021-09-14') AS date_value, + toDateTime('2021-09-14 11:22:33') AS date_time_value, + toDateTime64('2021-09-14 11:22:33', 3) AS date_time_64_value +SELECT monthName(date_value), monthName(date_time_value), monthName(date_time_64_value); + +WITH + toDate('2021-10-14') AS date_value, + toDateTime('2021-10-14 11:22:33') AS date_time_value, + toDateTime64('2021-10-14 11:22:33', 3) AS date_time_64_value +SELECT monthName(date_value), monthName(date_time_value), monthName(date_time_64_value); + +WITH + toDate('2021-11-14') AS date_value, + toDateTime('2021-11-14 11:22:33') AS date_time_value, + toDateTime64('2021-11-14 11:22:33', 3) AS date_time_64_value +SELECT monthName(date_value), monthName(date_time_value), monthName(date_time_64_value); + +WITH + toDate('2021-12-14') AS date_value, + toDateTime('2021-12-14 11:22:33') AS date_time_value, + toDateTime64('2021-12-14 11:22:33', 3) AS date_time_64_value +SELECT monthName(date_value), monthName(date_time_value), monthName(date_time_64_value); diff --git a/parser/testdata/02160_special_functions/ast.json b/parser/testdata/02160_special_functions/ast.json new file mode 100644 index 000000000..59c70d1a6 --- /dev/null +++ b/parser/testdata/02160_special_functions/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal 'UInt8'" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001236072, + "rows_read": 8, + "bytes_read": 286 + } +} diff --git a/parser/testdata/02160_special_functions/metadata.json b/parser/testdata/02160_special_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02160_special_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02160_special_functions/query.sql b/parser/testdata/02160_special_functions/query.sql new file mode 100644 index 000000000..6d18e7d0d --- /dev/null +++ b/parser/testdata/02160_special_functions/query.sql @@ -0,0 +1,44 @@ +SELECT CAST(1 AS UInt8); +SELECT CAST([] AS Array(UInt8)); +SELECT CAST(1, 'UInt8'); + +SELECT SUBSTRING('Hello, world' FROM 8); +SELECT SUBSTRING('Hello, world' FROM 8 FOR 5); +SELECT SUBSTRING('Hello, world', 8); +SELECT SUBSTRING('Hello, world', 8, 5); + +SELECT TRIM(LEADING 'abc' FROM 'abcdef'); +SELECT TRIM(TRAILING 'def' FROM 'abcdef'); +SELECT TRIM(BOTH 'af' FROM 'abcdef'); +SELECT TRIM(' abcdef '); +SELECT LTRIM(' abcdef '); +SELECT RTRIM(' abcdef '); + +SELECT EXTRACT(YEAR FROM DATE '2022-01-01'); +SELECT EXTRACT('Hello, world', '^\w+'); + +SELECT POSITION('ll' IN 'Hello'); +SELECT POSITION('Hello', 'll'); + +SELECT DATE_ADD(YEAR, 1, DATE '2022-01-01'); +SELECT DATE_ADD(INTERVAL 1 YEAR, DATE '2022-01-01'); +SELECT DATEADD(YEAR, 1, DATE '2022-01-01'); +SELECT DATEADD(INTERVAL 1 YEAR, DATE '2022-01-01'); +SELECT TIMESTAMP_ADD(YEAR, 1, DATE '2022-01-01'); +SELECT TIMESTAMP_ADD(INTERVAL 1 YEAR, DATE '2022-01-01'); +SELECT TIMESTAMPADD(YEAR, 1, DATE '2022-01-01'); +SELECT TIMESTAMPADD(INTERVAL 1 YEAR, DATE '2022-01-01'); + +SELECT DATE_SUB(YEAR, 1, DATE '2022-01-01'); +SELECT DATE_SUB(DATE '2022-01-01', INTERVAL 1 YEAR); +SELECT DATESUB(YEAR, 1, DATE '2022-01-01'); +SELECT DATESUB(DATE '2022-01-01', INTERVAL 1 YEAR); +SELECT TIMESTAMP_SUB(YEAR, 1, DATE '2022-01-01'); +SELECT TIMESTAMP_SUB(DATE '2022-01-01', INTERVAL 1 YEAR); +SELECT TIMESTAMPSUB(YEAR, 1, DATE '2022-01-01'); +SELECT TIMESTAMPSUB(DATE '2022-01-01', INTERVAL 1 YEAR); + +SELECT DATE_DIFF(YEAR, DATE '2021-01-01', DATE '2022-01-01'); +SELECT DATEDIFF(YEAR, DATE '2021-01-01', DATE '2022-01-01'); + +SELECT EXISTS (SELECT 1); diff --git a/parser/testdata/02161_addressToLineWithInlines/ast.json b/parser/testdata/02161_addressToLineWithInlines/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02161_addressToLineWithInlines/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02161_addressToLineWithInlines/metadata.json b/parser/testdata/02161_addressToLineWithInlines/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02161_addressToLineWithInlines/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02161_addressToLineWithInlines/query.sql b/parser/testdata/02161_addressToLineWithInlines/query.sql new file mode 100644 index 000000000..a138ab2ed --- /dev/null +++ b/parser/testdata/02161_addressToLineWithInlines/query.sql @@ -0,0 +1,26 @@ +-- Tags: no-tsan, no-asan, no-ubsan, no-msan, no-debug, no-fasttest + +SET allow_introspection_functions = 0; +SELECT addressToLineWithInlines(1); -- { serverError FUNCTION_NOT_ALLOWED } + +SET allow_introspection_functions = 1; +SET query_profiler_real_time_period_ns = 0; +SET query_profiler_cpu_time_period_ns = 1000000; +SET log_queries = 1, max_rows_to_read = 0; +SELECT count() FROM numbers_mt(10000000000) SETTINGS log_comment='02161_test_case'; +SET log_queries = 0; +SET query_profiler_cpu_time_period_ns = 0; +SYSTEM FLUSH LOGS query_log, trace_log; +SET max_execution_time = 300; + +WITH + lineWithInlines AS + ( + SELECT DISTINCT addressToLineWithInlines(arrayJoin(trace)) AS lineWithInlines FROM system.trace_log WHERE query_id = + ( + SELECT query_id FROM system.query_log WHERE current_database = currentDatabase() AND log_comment='02161_test_case' ORDER BY event_time DESC LIMIT 1 + ) + ) +SELECT 'has inlines:', or(max(length(lineWithInlines)) > 1, max(locate(lineWithInlines[1], ':')) = 0) FROM lineWithInlines SETTINGS short_circuit_function_evaluation='enable'; +-- `max(length(lineWithInlines)) > 1` check there is any inlines. +-- `max(locate(lineWithInlines[1], ':')) = 0` check whether none could get a symbol. diff --git a/parser/testdata/02161_array_first_last/ast.json b/parser/testdata/02161_array_first_last/ast.json new file mode 100644 index 000000000..4a11fa287 --- /dev/null +++ b/parser/testdata/02161_array_first_last/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'ArrayFirst constant predicate'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001099575, + "rows_read": 5, + "bytes_read": 200 + } +} diff --git a/parser/testdata/02161_array_first_last/metadata.json b/parser/testdata/02161_array_first_last/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02161_array_first_last/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02161_array_first_last/query.sql b/parser/testdata/02161_array_first_last/query.sql new file mode 100644 index 000000000..f5be8cd26 --- /dev/null +++ b/parser/testdata/02161_array_first_last/query.sql @@ -0,0 +1,21 @@ +SELECT 'ArrayFirst constant predicate'; +SELECT arrayFirst(x -> 1, emptyArrayUInt8()); +SELECT arrayFirst(x -> 0, emptyArrayUInt8()); +SELECT arrayFirst(x -> 1, [1, 2, 3]); +SELECT arrayFirst(x -> 0, [1, 2, 3]); + +SELECT 'ArrayFirst non constant predicate'; +SELECT arrayFirst(x -> x >= 2, emptyArrayUInt8()); +SELECT arrayFirst(x -> x >= 2, [1, 2, 3]); +SELECT arrayFirst(x -> x >= 2, materialize([1, 2, 3])); + +SELECT 'ArrayLast constant predicate'; +SELECT arrayLast(x -> 1, emptyArrayUInt8()); +SELECT arrayLast(x -> 0, emptyArrayUInt8()); +SELECT arrayLast(x -> 1, [1, 2, 3]); +SELECT arrayLast(x -> 0, [1, 2, 3]); + +SELECT 'ArrayLast non constant predicate'; +SELECT arrayLast(x -> x >= 2, emptyArrayUInt8()); +SELECT arrayLast(x -> x >= 2, [1, 2, 3]); +SELECT arrayLast(x -> x >= 2, materialize([1, 2, 3])); diff --git a/parser/testdata/02162_array_first_last_index/ast.json b/parser/testdata/02162_array_first_last_index/ast.json new file mode 100644 index 000000000..50eb398a6 --- /dev/null +++ b/parser/testdata/02162_array_first_last_index/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'ArrayFirstIndex constant predicate'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001585874, + "rows_read": 5, + "bytes_read": 205 + } +} diff --git a/parser/testdata/02162_array_first_last_index/metadata.json b/parser/testdata/02162_array_first_last_index/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02162_array_first_last_index/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02162_array_first_last_index/query.sql b/parser/testdata/02162_array_first_last_index/query.sql new file mode 100644 index 000000000..af107f0f4 --- /dev/null +++ b/parser/testdata/02162_array_first_last_index/query.sql @@ -0,0 +1,21 @@ +SELECT 'ArrayFirstIndex constant predicate'; +SELECT arrayFirstIndex(x -> 1, emptyArrayUInt8()); +SELECT arrayFirstIndex(x -> 0, emptyArrayUInt8()); +SELECT arrayFirstIndex(x -> 1, [1, 2, 3]); +SELECT arrayFirstIndex(x -> 0, [1, 2, 3]); + +SELECT 'ArrayFirstIndex non constant predicate'; +SELECT arrayFirstIndex(x -> x >= 2, emptyArrayUInt8()); +SELECT arrayFirstIndex(x -> x >= 2, [1, 2, 3]); +SELECT arrayFirstIndex(x -> x >= 2, [1, 2, 3]); + +SELECT 'ArrayLastIndex constant predicate'; +SELECT arrayLastIndex(x -> 1, emptyArrayUInt8()); +SELECT arrayLastIndex(x -> 0, emptyArrayUInt8()); +SELECT arrayLastIndex(x -> 1, [1, 2, 3]); +SELECT arrayLastIndex(x -> 0, materialize([1, 2, 3])); + +SELECT 'ArrayLastIndex non constant predicate'; +SELECT arrayLastIndex(x -> x >= 2, emptyArrayUInt8()); +SELECT arrayLastIndex(x -> x >= 2, [1, 2, 3]); +SELECT arrayLastIndex(x -> x >= 2, materialize([1, 2, 3])); diff --git a/parser/testdata/02162_range_hashed_dictionary_ddl_expression/ast.json b/parser/testdata/02162_range_hashed_dictionary_ddl_expression/ast.json new file mode 100644 index 000000000..783d56408 --- /dev/null +++ b/parser/testdata/02162_range_hashed_dictionary_ddl_expression/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery 02162_test_table (children 1)" + }, + { + "explain": " Identifier 02162_test_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001224782, + "rows_read": 2, + "bytes_read": 84 + } +} diff --git a/parser/testdata/02162_range_hashed_dictionary_ddl_expression/metadata.json b/parser/testdata/02162_range_hashed_dictionary_ddl_expression/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02162_range_hashed_dictionary_ddl_expression/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02162_range_hashed_dictionary_ddl_expression/query.sql b/parser/testdata/02162_range_hashed_dictionary_ddl_expression/query.sql new file mode 100644 index 000000000..24eb08137 --- /dev/null +++ b/parser/testdata/02162_range_hashed_dictionary_ddl_expression/query.sql @@ -0,0 +1,29 @@ +DROP TABLE IF EXISTS 02162_test_table; +CREATE TABLE 02162_test_table +( + id UInt64, + value String, + range_value UInt64 +) ENGINE=TinyLog; + +INSERT INTO 02162_test_table VALUES (0, 'Value', 1); + +DROP DICTIONARY IF EXISTS 02162_test_dictionary; +CREATE DICTIONARY 02162_test_dictionary +( + id UInt64, + value String, + range_value UInt64, + start UInt64 EXPRESSION range_value, + end UInt64 EXPRESSION range_value +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE '02162_test_table')) +LAYOUT(RANGE_HASHED()) +RANGE(MIN start MAX end) +LIFETIME(0); + +SELECT * FROM 02162_test_dictionary; + +DROP DICTIONARY 02162_test_dictionary; +DROP TABLE 02162_test_table; diff --git a/parser/testdata/02163_operators/ast.json b/parser/testdata/02163_operators/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02163_operators/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02163_operators/metadata.json b/parser/testdata/02163_operators/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02163_operators/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02163_operators/query.sql b/parser/testdata/02163_operators/query.sql new file mode 100644 index 000000000..3f2d7d8bb --- /dev/null +++ b/parser/testdata/02163_operators/query.sql @@ -0,0 +1,2 @@ +WITH 2 AS `b.c`, [4, 5] AS a, 6 AS u, 3 AS v, 2 AS d, TRUE AS e, 1 AS f, 0 AS g, 2 AS h, 'Hello' AS i, 'World' AS j, 'hi' AS w, NULL AS k, (1, 2) AS l, 2 AS m, 3 AS n, [] AS o, [1] AS p, 1 AS q, q AS r, 1 AS s, 1 AS t +SELECT INTERVAL CASE CASE WHEN NOT -a[`b.c`] * u DIV v + d IS NOT NULL AND e OR f BETWEEN g AND h THEN i ELSE j END WHEN w THEN k END || [l, (m, n)] MINUTE IS NULL OR NOT o::Array(INT) = p <> q < r > s != t AS upyachka; diff --git a/parser/testdata/02163_shard_num/ast.json b/parser/testdata/02163_shard_num/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02163_shard_num/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02163_shard_num/metadata.json b/parser/testdata/02163_shard_num/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02163_shard_num/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02163_shard_num/query.sql b/parser/testdata/02163_shard_num/query.sql new file mode 100644 index 000000000..af1688792 --- /dev/null +++ b/parser/testdata/02163_shard_num/query.sql @@ -0,0 +1,10 @@ +-- { echoOn } + +SELECT shardNum() AS shard_num, sum(1) as rows FROM remote('127.{1,2}', system, one) GROUP BY _shard_num ORDER BY _shard_num; +SELECT shardNum() AS shard_num, sum(1) as rows FROM remote('127.{1,2}', system, one) GROUP BY shard_num ORDER BY shard_num; +SELECT _shard_num AS shard_num, sum(1) as rows FROM remote('127.{1,2}', system, one) GROUP BY _shard_num ORDER BY _shard_num; +SELECT _shard_num AS shard_num, sum(1) as rows FROM remote('127.{1,2}', system, one) GROUP BY shard_num ORDER BY shard_num; +SELECT a._shard_num AS shard_num, sum(1) as rows FROM remote('127.{1,2}', system, one) a GROUP BY shard_num ORDER BY shard_num; +SELECT _shard_num FROM remote('127.1', system.one) AS a INNER JOIN (SELECT _shard_num FROM system.one) AS b USING (dummy); -- { serverError NOT_IMPLEMENTED, UNKNOWN_IDENTIFIER } + +-- { echoOff } diff --git a/parser/testdata/02164_materialized_view_support_virtual_column/ast.json b/parser/testdata/02164_materialized_view_support_virtual_column/ast.json new file mode 100644 index 000000000..47352ca3e --- /dev/null +++ b/parser/testdata/02164_materialized_view_support_virtual_column/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_tb (children 1)" + }, + { + "explain": " Identifier test_tb" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001236471, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/02164_materialized_view_support_virtual_column/metadata.json b/parser/testdata/02164_materialized_view_support_virtual_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02164_materialized_view_support_virtual_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02164_materialized_view_support_virtual_column/query.sql b/parser/testdata/02164_materialized_view_support_virtual_column/query.sql new file mode 100644 index 000000000..ad48a7507 --- /dev/null +++ b/parser/testdata/02164_materialized_view_support_virtual_column/query.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS test_tb; +CREATE TABLE test_tb (a UInt64, s String) ENGINE = MergeTree() ORDER BY a; + +DROP VIEW IF EXISTS test_view_tb; +CREATE MATERIALIZED VIEW test_view_tb ENGINE = MergeTree() ORDER BY a AS SELECT * FROM test_tb; + +INSERT INTO test_tb VALUES (1, '1'), (2, '2'), (3, '3'); + +SELECT count(_part) FROM test_view_tb; diff --git a/parser/testdata/02165_h3_edge_length_km/ast.json b/parser/testdata/02165_h3_edge_length_km/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02165_h3_edge_length_km/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02165_h3_edge_length_km/metadata.json b/parser/testdata/02165_h3_edge_length_km/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02165_h3_edge_length_km/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02165_h3_edge_length_km/query.sql b/parser/testdata/02165_h3_edge_length_km/query.sql new file mode 100644 index 000000000..e67b691ef --- /dev/null +++ b/parser/testdata/02165_h3_edge_length_km/query.sql @@ -0,0 +1,18 @@ +-- Tags: no-fasttest + +SELECT h3EdgeLengthKm(0); +SELECT h3EdgeLengthKm(1); +SELECT h3EdgeLengthKm(2); +SELECT h3EdgeLengthKm(3); +SELECT h3EdgeLengthKm(4); +SELECT h3EdgeLengthKm(5); +SELECT h3EdgeLengthKm(6); +SELECT h3EdgeLengthKm(7); +SELECT h3EdgeLengthKm(8); +SELECT h3EdgeLengthKm(9); +SELECT h3EdgeLengthKm(10); +SELECT h3EdgeLengthKm(11); +SELECT h3EdgeLengthKm(12); +SELECT h3EdgeLengthKm(13); +SELECT h3EdgeLengthKm(14); +SELECT h3EdgeLengthKm(15); diff --git a/parser/testdata/02165_h3_exact_edge_length_Km/ast.json b/parser/testdata/02165_h3_exact_edge_length_Km/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02165_h3_exact_edge_length_Km/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02165_h3_exact_edge_length_Km/metadata.json b/parser/testdata/02165_h3_exact_edge_length_Km/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02165_h3_exact_edge_length_Km/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02165_h3_exact_edge_length_Km/query.sql b/parser/testdata/02165_h3_exact_edge_length_Km/query.sql new file mode 100644 index 000000000..6a3a288cc --- /dev/null +++ b/parser/testdata/02165_h3_exact_edge_length_Km/query.sql @@ -0,0 +1,28 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS h3_indexes; + +CREATE TABLE h3_indexes (h3_index UInt64) ENGINE = Memory; + +-- Test h3 indices selected from original test fixture: https://github.com/uber/h3/blob/master/src/apps/testapps/testH3CellAreaExhaustive.c + +INSERT INTO h3_indexes VALUES (1298057039473278975); +INSERT INTO h3_indexes VALUES (1370114633511206911); +INSERT INTO h3_indexes VALUES (1442172227549134847); +INSERT INTO h3_indexes VALUES (1514229821587062783); +INSERT INTO h3_indexes VALUES (1232301846085763071); +INSERT INTO h3_indexes VALUES (1304359440123691007); +INSERT INTO h3_indexes VALUES (1376417034161618943); +INSERT INTO h3_indexes VALUES (1448474628199546879); +INSERT INTO h3_indexes VALUES (1598506838100279295); +INSERT INTO h3_indexes VALUES (1238219417666453503); +INSERT INTO h3_indexes VALUES (1310277011704381439); +INSERT INTO h3_indexes VALUES (1382334605742309375); +INSERT INTO h3_indexes VALUES (1458182628678041599); +INSERT INTO h3_indexes VALUES (1530240222715969535); +INSERT INTO h3_indexes VALUES (1602297816753897471); +INSERT INTO h3_indexes VALUES (1242009915283734527); + +SELECT round(h3ExactEdgeLengthKm(h3_index), 2) FROM h3_indexes ORDER BY h3_index; + +DROP TABLE h3_indexes; diff --git a/parser/testdata/02165_h3_exact_edge_length_m/ast.json b/parser/testdata/02165_h3_exact_edge_length_m/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02165_h3_exact_edge_length_m/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02165_h3_exact_edge_length_m/metadata.json b/parser/testdata/02165_h3_exact_edge_length_m/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02165_h3_exact_edge_length_m/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02165_h3_exact_edge_length_m/query.sql b/parser/testdata/02165_h3_exact_edge_length_m/query.sql new file mode 100644 index 000000000..06b50670c --- /dev/null +++ b/parser/testdata/02165_h3_exact_edge_length_m/query.sql @@ -0,0 +1,28 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS h3_indexes; + +CREATE TABLE h3_indexes (h3_index UInt64) ENGINE = Memory; + +-- Test h3 indices selected from original test fixture: https://github.com/uber/h3/blob/master/src/apps/testapps/testH3CellAreaExhaustive.c + +INSERT INTO h3_indexes VALUES (1298057039473278975); +INSERT INTO h3_indexes VALUES (1370114633511206911); +INSERT INTO h3_indexes VALUES (1442172227549134847); +INSERT INTO h3_indexes VALUES (1514229821587062783); +INSERT INTO h3_indexes VALUES (1232301846085763071); +INSERT INTO h3_indexes VALUES (1304359440123691007); +INSERT INTO h3_indexes VALUES (1376417034161618943); +INSERT INTO h3_indexes VALUES (1448474628199546879); +INSERT INTO h3_indexes VALUES (1598506838100279295); +INSERT INTO h3_indexes VALUES (1238219417666453503); +INSERT INTO h3_indexes VALUES (1310277011704381439); +INSERT INTO h3_indexes VALUES (1382334605742309375); +INSERT INTO h3_indexes VALUES (1458182628678041599); +INSERT INTO h3_indexes VALUES (1530240222715969535); +INSERT INTO h3_indexes VALUES (1602297816753897471); +INSERT INTO h3_indexes VALUES (1242009915283734527); + +SELECT round(h3ExactEdgeLengthM(h3_index), 2) FROM h3_indexes ORDER BY h3_index; + +DROP TABLE h3_indexes; diff --git a/parser/testdata/02165_h3_exact_edge_length_rads/ast.json b/parser/testdata/02165_h3_exact_edge_length_rads/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02165_h3_exact_edge_length_rads/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02165_h3_exact_edge_length_rads/metadata.json b/parser/testdata/02165_h3_exact_edge_length_rads/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02165_h3_exact_edge_length_rads/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02165_h3_exact_edge_length_rads/query.sql b/parser/testdata/02165_h3_exact_edge_length_rads/query.sql new file mode 100644 index 000000000..b03d52715 --- /dev/null +++ b/parser/testdata/02165_h3_exact_edge_length_rads/query.sql @@ -0,0 +1,28 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS h3_indexes; + +CREATE TABLE h3_indexes (h3_index UInt64) ENGINE = Memory; + +-- Test h3 indices selected from original test fixture: https://github.com/uber/h3/blob/master/src/apps/testapps/testH3CellAreaExhaustive.c + +INSERT INTO h3_indexes VALUES (1298057039473278975); +INSERT INTO h3_indexes VALUES (1370114633511206911); +INSERT INTO h3_indexes VALUES (1442172227549134847); +INSERT INTO h3_indexes VALUES (1514229821587062783); +INSERT INTO h3_indexes VALUES (1232301846085763071); +INSERT INTO h3_indexes VALUES (1304359440123691007); +INSERT INTO h3_indexes VALUES (1376417034161618943); +INSERT INTO h3_indexes VALUES (1448474628199546879); +INSERT INTO h3_indexes VALUES (1598506838100279295); +INSERT INTO h3_indexes VALUES (1238219417666453503); +INSERT INTO h3_indexes VALUES (1310277011704381439); +INSERT INTO h3_indexes VALUES (1382334605742309375); +INSERT INTO h3_indexes VALUES (1458182628678041599); +INSERT INTO h3_indexes VALUES (1530240222715969535); +INSERT INTO h3_indexes VALUES (1602297816753897471); +INSERT INTO h3_indexes VALUES (1242009915283734527); + +SELECT round(h3ExactEdgeLengthRads(h3_index), 5) FROM h3_indexes ORDER BY h3_index; + +DROP TABLE h3_indexes; diff --git a/parser/testdata/02165_h3_num_hexagons/ast.json b/parser/testdata/02165_h3_num_hexagons/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02165_h3_num_hexagons/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02165_h3_num_hexagons/metadata.json b/parser/testdata/02165_h3_num_hexagons/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02165_h3_num_hexagons/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02165_h3_num_hexagons/query.sql b/parser/testdata/02165_h3_num_hexagons/query.sql new file mode 100644 index 000000000..9753d6dae --- /dev/null +++ b/parser/testdata/02165_h3_num_hexagons/query.sql @@ -0,0 +1,19 @@ +-- Tags: no-fasttest + +SELECT h3NumHexagons(0); +SELECT h3NumHexagons(1); +SELECT h3NumHexagons(2); +SELECT h3NumHexagons(3); +SELECT h3NumHexagons(4); +SELECT h3NumHexagons(5); +SELECT h3NumHexagons(6); +SELECT h3NumHexagons(7); +SELECT h3NumHexagons(8); +SELECT h3NumHexagons(9); +SELECT h3NumHexagons(10); +SELECT h3NumHexagons(11); +SELECT h3NumHexagons(12); +SELECT h3NumHexagons(13); +SELECT h3NumHexagons(14); +SELECT h3NumHexagons(15); +SELECT h3NumHexagons(16); -- { serverError ARGUMENT_OUT_OF_BOUND } diff --git a/parser/testdata/02165_insert_from_infile/ast.json b/parser/testdata/02165_insert_from_infile/ast.json new file mode 100644 index 000000000..f978cceec --- /dev/null +++ b/parser/testdata/02165_insert_from_infile/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Explain EXPLAIN SYNTAX (children 1)" + }, + { + "explain": " InsertQuery (children 3)" + }, + { + "explain": " Literal 'data.file'" + }, + { + "explain": " Identifier test" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function input (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'x UInt32'" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001844664, + "rows_read": 15, + "bytes_read": 579 + } +} diff --git a/parser/testdata/02165_insert_from_infile/metadata.json b/parser/testdata/02165_insert_from_infile/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02165_insert_from_infile/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02165_insert_from_infile/query.sql b/parser/testdata/02165_insert_from_infile/query.sql new file mode 100644 index 000000000..8cc851fa4 --- /dev/null +++ b/parser/testdata/02165_insert_from_infile/query.sql @@ -0,0 +1,4 @@ +EXPLAIN SYNTAX INSERT INTO test FROM INFILE 'data.file' SELECT x from input('x UInt32') FORMAT TSV; +EXPLAIN SYNTAX INSERT INTO test FROM INFILE 'data.file' WATCH view; -- { clientError SYNTAX_ERROR } +EXPLAIN SYNTAX INSERT INTO test FROM INFILE 'data.file' VALUES (1) -- { clientError SYNTAX_ERROR } +EXPLAIN SYNTAX INSERT INTO test FROM INFILE 'data.file' WITH number AS x SELECT number FROM input('number UInt32'); diff --git a/parser/testdata/02165_replicated_grouping_sets/ast.json b/parser/testdata/02165_replicated_grouping_sets/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02165_replicated_grouping_sets/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02165_replicated_grouping_sets/metadata.json b/parser/testdata/02165_replicated_grouping_sets/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02165_replicated_grouping_sets/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02165_replicated_grouping_sets/query.sql b/parser/testdata/02165_replicated_grouping_sets/query.sql new file mode 100644 index 000000000..30f7021ff --- /dev/null +++ b/parser/testdata/02165_replicated_grouping_sets/query.sql @@ -0,0 +1,65 @@ +SELECT + k1, + k2, + SUM(number) AS sum_value, + count() AS count_value +FROM numbers(6) +GROUP BY + GROUPING SETS + ( + (number % 2 AS k1), + (number % 3 AS k2) + ) +ORDER BY + sum_value ASC, + count_value ASC; + +SELECT + k1, + k2, + SUM(number) AS sum_value, + count() AS count_value +FROM remote('127.0.0.{2,3}', numbers(6)) +GROUP BY + GROUPING SETS + ( + (number % 2 AS k1), + (number % 3 AS k2) + ) +ORDER BY + sum_value ASC, + count_value ASC; + +SELECT + k2, + SUM(number) AS sum_value, + count() AS count_value +FROM remote('127.0.0.{2,3}', numbers(6)) +GROUP BY + GROUPING SETS + ( + (number % 3 AS k2) + ) +ORDER BY + sum_value ASC, + count_value ASC; + +set prefer_localhost_replica = 1; + +-- { echo On } + +SELECT count(), arrayMap(x -> '.', range(number % 10)) AS k FROM remote('127.0.0.{1,2}', numbers(10)) where number > ( queryID() = initialQueryID()) GROUP BY GROUPING SETS ((k)) ORDER BY k SETTINGS group_by_two_level_threshold=9, max_bytes_before_external_group_by=10000000000, max_bytes_ratio_before_external_group_by=0; +SELECT count(), arrayMap(x -> '.', range(number % 10)) AS k FROM remote('127.0.0.{1,2}', numbers(10)) where number > ( queryID() = initialQueryID()) GROUP BY GROUPING SETS ((k), (k, k)) ORDER BY k SETTINGS group_by_two_level_threshold=9, max_bytes_before_external_group_by=10000000000, max_bytes_ratio_before_external_group_by=0; + +SELECT count(), toString(number) AS k FROM remote('127.0.0.{1,2}', numbers(10)) where number > ( queryID() = initialQueryID()) GROUP BY GROUPING SETS ((k)) ORDER BY k SETTINGS group_by_two_level_threshold=9, max_bytes_before_external_group_by=10000000000, max_bytes_ratio_before_external_group_by=0; +SELECT count(), toString(number) AS k FROM remote('127.0.0.{1,2}', numbers(10)) where number > ( queryID() = initialQueryID()) GROUP BY GROUPING SETS ((k), (k, k)) ORDER BY k SETTINGS group_by_two_level_threshold=9, max_bytes_before_external_group_by=10000000000, max_bytes_ratio_before_external_group_by=0; +SELECT count(), toString(number) AS k FROM remote('127.0.0.{1,2}', numbers(10)) where number > ( queryID() = initialQueryID()) GROUP BY GROUPING SETS ((k), (number + 1, k)) ORDER BY k SETTINGS group_by_two_level_threshold=9, max_bytes_before_external_group_by=10000000000, max_bytes_ratio_before_external_group_by=0; +SELECT count(), toString(number) AS k FROM remote('127.0.0.{1,2}', numbers(10)) where number > ( queryID() = initialQueryID()) GROUP BY GROUPING SETS ((k), (number + 1, k), (k, number + 2)) ORDER BY k SETTINGS group_by_two_level_threshold=9, max_bytes_before_external_group_by=10000000000, max_bytes_ratio_before_external_group_by=0; + +SELECT count(), arrayMap(x -> '.', range(number % 10)) AS k FROM remote('127.0.0.{3,2}', numbers(10)) where number > ( queryID() = initialQueryID()) GROUP BY GROUPING SETS ((k)) ORDER BY k SETTINGS group_by_two_level_threshold=9, max_bytes_before_external_group_by=10000000000, max_bytes_ratio_before_external_group_by=0; +SELECT count(), arrayMap(x -> '.', range(number % 10)) AS k FROM remote('127.0.0.{3,2}', numbers(10)) where number > ( queryID() = initialQueryID()) GROUP BY GROUPING SETS ((k), (k, k)) ORDER BY k SETTINGS group_by_two_level_threshold=9, max_bytes_before_external_group_by=10000000000, max_bytes_ratio_before_external_group_by=0; + +SELECT count(), toString(number) AS k FROM remote('127.0.0.{3,2}', numbers(10)) where number > ( queryID() = initialQueryID()) GROUP BY GROUPING SETS ((k)) ORDER BY k SETTINGS group_by_two_level_threshold=9, max_bytes_before_external_group_by=10000000000, max_bytes_ratio_before_external_group_by=0; +SELECT count(), toString(number) AS k FROM remote('127.0.0.{3,2}', numbers(10)) where number > ( queryID() = initialQueryID()) GROUP BY GROUPING SETS ((k), (k, k)) ORDER BY k SETTINGS group_by_two_level_threshold=9, max_bytes_before_external_group_by=10000000000, max_bytes_ratio_before_external_group_by=0; +SELECT count(), toString(number) AS k FROM remote('127.0.0.{3,2}', numbers(10)) where number > ( queryID() = initialQueryID()) GROUP BY GROUPING SETS ((k), (number + 1, k)) ORDER BY k SETTINGS group_by_two_level_threshold=9, max_bytes_before_external_group_by=10000000000, max_bytes_ratio_before_external_group_by=0; +SELECT count(), toString(number) AS k FROM remote('127.0.0.{3,2}', numbers(10)) where number > ( queryID() = initialQueryID()) GROUP BY GROUPING SETS ((k), (number + 1, k), (k, number + 2)) ORDER BY k SETTINGS group_by_two_level_threshold=9, max_bytes_before_external_group_by=10000000000, max_bytes_ratio_before_external_group_by=0; diff --git a/parser/testdata/02167_columns_with_dots_default_values/ast.json b/parser/testdata/02167_columns_with_dots_default_values/ast.json new file mode 100644 index 000000000..fbb808c2f --- /dev/null +++ b/parser/testdata/02167_columns_with_dots_default_values/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_nested_default (children 1)" + }, + { + "explain": " Identifier test_nested_default" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001499503, + "rows_read": 2, + "bytes_read": 90 + } +} diff --git a/parser/testdata/02167_columns_with_dots_default_values/metadata.json b/parser/testdata/02167_columns_with_dots_default_values/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02167_columns_with_dots_default_values/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02167_columns_with_dots_default_values/query.sql b/parser/testdata/02167_columns_with_dots_default_values/query.sql new file mode 100644 index 000000000..81c83c2ce --- /dev/null +++ b/parser/testdata/02167_columns_with_dots_default_values/query.sql @@ -0,0 +1,15 @@ +DROP TABLE IF EXISTS test_nested_default; + +CREATE TABLE test_nested_default +( + `id` String, + `with_dot.str` String, + `with_dot.array` Array(String) +) +ENGINE = MergeTree() +ORDER BY id; + +INSERT INTO test_nested_default(`id`, `with_dot.array`) VALUES('id', ['str1', 'str2']); +SELECT * FROM test_nested_default; + +DROP TABLE test_nested_default; diff --git a/parser/testdata/02168_avro_bug/ast.json b/parser/testdata/02168_avro_bug/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02168_avro_bug/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02168_avro_bug/metadata.json b/parser/testdata/02168_avro_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02168_avro_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02168_avro_bug/query.sql b/parser/testdata/02168_avro_bug/query.sql new file mode 100644 index 000000000..338b5ef8b --- /dev/null +++ b/parser/testdata/02168_avro_bug/query.sql @@ -0,0 +1,5 @@ +-- Tags: no-fasttest, no-parallel +insert into table function file('02168_avro_bug.avro', 'Parquet', 'x UInt64') select * from numbers(10) settings engine_file_truncate_on_insert=1; +insert into table function file('02168_avro_bug.avro', 'Parquet', 'x UInt64') select * from numbers(10); -- { serverError CANNOT_APPEND_TO_FILE } +insert into table function file('02168_avro_bug.avro', 'Parquet', 'x UInt64') select * from numbers(10); -- { serverError CANNOT_APPEND_TO_FILE } +select 'OK'; diff --git a/parser/testdata/02169_fix_view_offset_limit_setting/ast.json b/parser/testdata/02169_fix_view_offset_limit_setting/ast.json new file mode 100644 index 000000000..dc0e2901f --- /dev/null +++ b/parser/testdata/02169_fix_view_offset_limit_setting/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery counter (children 1)" + }, + { + "explain": " Identifier counter" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001574546, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/02169_fix_view_offset_limit_setting/metadata.json b/parser/testdata/02169_fix_view_offset_limit_setting/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02169_fix_view_offset_limit_setting/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02169_fix_view_offset_limit_setting/query.sql b/parser/testdata/02169_fix_view_offset_limit_setting/query.sql new file mode 100644 index 000000000..8ac88ebc5 --- /dev/null +++ b/parser/testdata/02169_fix_view_offset_limit_setting/query.sql @@ -0,0 +1,12 @@ +DROP TABLE IF EXISTS counter; +CREATE TABLE counter (id UInt64, createdAt DateTime) ENGINE = MergeTree() ORDER BY id; +INSERT INTO counter SELECT number, now() FROM numbers(500); + +DROP TABLE IF EXISTS vcounter; +CREATE VIEW vcounter AS SELECT intDiv(id, 10) AS tens, max(createdAt) AS maxid FROM counter GROUP BY tens; + +SELECT tens FROM vcounter ORDER BY tens ASC LIMIT 100 SETTINGS limit = 6, offset = 5; + +SELECT tens FROM vcounter ORDER BY tens ASC LIMIT 100 SETTINGS limit = 6, offset = 0; +DROP TABLE vcounter; +DROP TABLE counter; diff --git a/parser/testdata/02169_map_functions/ast.json b/parser/testdata/02169_map_functions/ast.json new file mode 100644 index 000000000..3d8176f16 --- /dev/null +++ b/parser/testdata/02169_map_functions/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery table_map (children 1)" + }, + { + "explain": " Identifier table_map" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001560583, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/02169_map_functions/metadata.json b/parser/testdata/02169_map_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02169_map_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02169_map_functions/query.sql b/parser/testdata/02169_map_functions/query.sql new file mode 100644 index 000000000..42de17f9b --- /dev/null +++ b/parser/testdata/02169_map_functions/query.sql @@ -0,0 +1,108 @@ +DROP TABLE IF EXISTS table_map; +CREATE TABLE table_map (id UInt32, col Map(String, UInt64)) engine = MergeTree() ORDER BY tuple(); +INSERT INTO table_map SELECT number, map('key1', number, 'key2', number * 2) FROM numbers(1111, 3); +INSERT INTO table_map SELECT number, map('key3', number, 'key2', number + 1, 'key4', number + 2) FROM numbers(100, 4); + +SELECT mapFilter((k, v) -> k like '%3' and v > 102, col) FROM table_map ORDER BY id; +SELECT col, mapFilter((k, v) -> ((v % 10) > 1), col) FROM table_map ORDER BY id ASC; +SELECT mapApply((k, v) -> (k, v + 1), col) FROM table_map ORDER BY id; +SELECT mapFilter((k, v) -> 0, col) from table_map; +SELECT mapApply((k, v) -> tuple(v + 9223372036854775806), col) FROM table_map; -- { serverError BAD_ARGUMENTS } + +SELECT mapFilter((k, v) -> k = 0.1::Float32, map(0.1::Float32, 4, 0.2::Float32, 5)); +SELECT mapFilter((k, v) -> k = 0.1::Float64, map(0.1::Float64, 4, 0.2::Float64, 5)); +SELECT mapFilter((k, v) -> k = array(1,2), map(array(1,2), 4, array(3,4), 5)); +SELECT mapFilter((k, v) -> k = map(1,2), map(map(1,2), 4, map(3,4), 5)); +SELECT mapFilter((k, v) -> k = tuple(1,2), map(tuple(1,2), 4, tuple(3,4), 5)); + +SELECT mapExists((k, v) -> k = 0.1::Float32, map(0.1::Float32, 4, 0.2::Float32, 5)); +SELECT mapExists((k, v) -> k = 0.1::Float64, map(0.1::Float64, 4, 0.2::Float64, 5)); +SELECT mapExists((k, v) -> k = array(1,2), map(array(1,2), 4, array(3,4), 5)); +SELECT mapExists((k, v) -> k = map(1,2), map(map(1,2), 4, map(3,4), 5)); +SELECT mapExists((k, v) -> k = tuple(1,2), map(tuple(1,2), 4, tuple(3,4), 5)); + +SELECT mapAll((k, v) -> k = 0.1::Float32, map(0.1::Float32, 4, 0.2::Float32, 5)); +SELECT mapAll((k, v) -> k = 0.1::Float64, map(0.1::Float64, 4, 0.2::Float64, 5)); +SELECT mapAll((k, v) -> k = array(1,2), map(array(1,2), 4, array(3,4), 5)); +SELECT mapAll((k, v) -> k = map(1,2), map(map(1,2), 4, map(3,4), 5)); +SELECT mapAll((k, v) -> k = tuple(1,2), map(tuple(1,2), 4, tuple(3,4), 5)); + +SELECT mapSort((k, v) -> k, map(0.1::Float32, 4, 0.2::Float32, 5)); +SELECT mapSort((k, v) -> k, map(0.1::Float64, 4, 0.2::Float64, 5)); +SELECT mapSort((k, v) -> k, map(array(1,2), 4, array(3,4), 5)); +SELECT mapSort((k, v) -> k, map(map(1,2), 4, map(3,4), 5)); +SELECT mapSort((k, v) -> k, map(tuple(1,2), 4, tuple(3,4), 5)); + +SELECT mapConcat(col, map('key5', 500), map('key6', 600)) FROM table_map ORDER BY id; +SELECT mapConcat(col, materialize(map('key5', 500)), map('key6', 600)) FROM table_map ORDER BY id; +SELECT concat(map('key5', 500), map('key6', 600)); +SELECT map('key5', 500) || map('key6', 600); + +SELECT mapConcat(map(0.1::Float32, 4), map(0.2::Float32, 5)); +SELECT mapConcat(map(0.1::Float64, 4), map(0.2::Float64, 5)); +SELECT mapConcat(map(array(1,2), 4), map(array(3,4), 5)); +SELECT mapConcat(map(map(1,2), 4), map(map(3,4), 5)); +SELECT mapConcat(map(tuple(1,2), 4), map(tuple(3,4), 5)); + +SELECT mapExists((k, v) -> k LIKE '%3', col) FROM table_map ORDER BY id; +SELECT mapExists((k, v) -> k LIKE '%2' AND v < 1000, col) FROM table_map ORDER BY id; + +SELECT mapAll((k, v) -> k LIKE '%3', col) FROM table_map ORDER BY id; +SELECT mapAll((k, v) -> k LIKE '%2' AND v < 1000, col) FROM table_map ORDER BY id; + +SELECT mapSort(col) FROM table_map ORDER BY id; +SELECT mapSort((k, v) -> v, col) FROM table_map ORDER BY id; +SELECT mapPartialSort((k, v) -> k, 2, col) FROM table_map ORDER BY id; + +SELECT mapUpdate(map(1, 3, 3, 2), map(1, 0, 2, 0)); +SELECT mapApply((x, y) -> (x, x + 1), map(1, 0, 2, 0)); +SELECT mapApply((x, y) -> (x, x + 1), materialize(map(1, 0, 2, 0))); +SELECT mapApply((x, y) -> ('x', 'y'), map(1, 0, 2, 0)); +SELECT mapApply((x, y) -> ('x', 'y'), materialize(map(1, 0, 2, 0))); +SELECT mapApply((x, y) -> (x, x + 1), map(1.0, 0, 2.0, 0)); +SELECT mapApply((x, y) -> (x, x + 1), materialize(map(1.0, 0, 2.0, 0))); + +SELECT mapUpdate(map('k1', 1, 'k2', 2), map('k1', 11, 'k2', 22)); +SELECT mapUpdate(materialize(map('k1', 1, 'k2', 2)), map('k1', 11, 'k2', 22)); +SELECT mapUpdate(map('k1', 1, 'k2', 2), materialize(map('k1', 11, 'k2', 22))); +SELECT mapUpdate(materialize(map('k1', 1, 'k2', 2)), materialize(map('k1', 11, 'k2', 22))); + +SELECT mapUpdate(map('k1', 1, 'k2', 2, 'k3', 3), map('k2', 22, 'k3', 33, 'k4', 44)); +SELECT mapUpdate(materialize(map('k1', 1, 'k2', 2, 'k3', 3)), map('k2', 22, 'k3', 33, 'k4', 44)); +SELECT mapUpdate(map('k1', 1, 'k2', 2, 'k3', 3), materialize(map('k2', 22, 'k3', 33, 'k4', 44))); +SELECT mapUpdate(materialize(map('k1', 1, 'k2', 2, 'k3', 3)), materialize(map('k2', 22, 'k3', 33, 'k4', 44))); + +SELECT mapUpdate(map('k1', 1, 'k2', 2), map('k3', 33, 'k4', 44)); +SELECT mapUpdate(materialize(map('k1', 1, 'k2', 2)), map('k3', 33, 'k4', 44)); +SELECT mapUpdate(map('k1', 1, 'k2', 2), materialize(map('k3', 33, 'k4', 44))); +SELECT mapUpdate(materialize(map('k1', 1, 'k2', 2)), materialize(map('k3', 33, 'k4', 44))); + +WITH (range(0, number % 10), range(0, number % 10))::Map(UInt64, UInt64) AS m1, + (range(0, number % 10, 2), arrayMap(x -> x * x, range(0, number % 10, 2)))::Map(UInt64, UInt64) AS m2 +SELECT DISTINCT mapUpdate(m1, m2) FROM numbers (100000); + +SELECT mapApply(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT mapApply((x, y) -> (x), map(1, 0, 2, 0)); -- { serverError BAD_ARGUMENTS } +SELECT mapApply((x, y) -> ('x'), map(1, 0, 2, 0)); -- { serverError BAD_ARGUMENTS } +SELECT mapApply((x) -> (x, x), map(1, 0, 2, 0)); +SELECT mapApply((x, y) -> (x, 1, 2), map(1, 0, 2, 0)); -- { serverError BAD_ARGUMENTS } +SELECT mapApply((x, y) -> (x, x + 1)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT mapApply(map(1, 0, 2, 0), (x, y) -> (x, x + 1)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT mapApply((x, y) -> (x, x+1), map(1, 0, 2, 0), map(1, 0, 2, 0)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT mapFilter(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT mapFilter((x, y) -> (toInt32(x)), map(1, 0, 2, 0)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT mapFilter((x, y) -> ('x'), map(1, 0, 2, 0)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT mapFilter((x) -> (x, x), map(1, 0, 2, 0)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT mapFilter((x, y) -> (x, 1, 2), map(1, 0, 2, 0)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT mapFilter((x, y) -> (x, x + 1)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT mapFilter(map(1, 0, 2, 0), (x, y) -> (x > 0)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT mapFilter((x, y) -> (x, x + 1), map(1, 0, 2, 0), map(1, 0, 2, 0)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT mapConcat([1, 2], map(1, 2)); -- { serverError NO_COMMON_TYPE } +SELECT mapSort(map(1, 2), map(3, 4)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT mapUpdate(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT mapUpdate(map(1, 3, 3, 2), map(1, 0, 2, 0), map(1, 0, 2, 0)); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +DROP TABLE table_map; diff --git a/parser/testdata/02174_cte_scalar_cache/ast.json b/parser/testdata/02174_cte_scalar_cache/ast.json new file mode 100644 index 000000000..efb4bb0d5 --- /dev/null +++ b/parser/testdata/02174_cte_scalar_cache/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001397708, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02174_cte_scalar_cache/metadata.json b/parser/testdata/02174_cte_scalar_cache/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02174_cte_scalar_cache/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02174_cte_scalar_cache/query.sql b/parser/testdata/02174_cte_scalar_cache/query.sql new file mode 100644 index 000000000..bbf73bf0e --- /dev/null +++ b/parser/testdata/02174_cte_scalar_cache/query.sql @@ -0,0 +1,74 @@ +SET enable_analyzer = 1; + +WITH + ( SELECT sleep(0.0001) FROM system.one ) as a1, + ( SELECT sleep(0.0001) FROM system.one ) as a2, + ( SELECT sleep(0.0001) FROM system.one ) as a3, + ( SELECT sleep(0.0001) FROM system.one ) as a4, + ( SELECT sleep(0.0001) FROM system.one ) as a5 +SELECT '02177_CTE_GLOBAL_ON', a1, a2, a3, a4, a5 FROM system.numbers LIMIT 100 +FORMAT Null +SETTINGS enable_global_with_statement = 1; + +WITH + ( SELECT sleep(0.0001) FROM system.one ) as a1, + ( SELECT sleep(0.0001) FROM system.one ) as a2, + ( SELECT sleep(0.0001) FROM system.one ) as a3, + ( SELECT sleep(0.0001) FROM system.one ) as a4, + ( SELECT sleep(0.0001) FROM system.one ) as a5 +SELECT '02177_CTE_GLOBAL_OFF', a1, a2, a3, a4, a5 FROM system.numbers LIMIT 100 + FORMAT Null +SETTINGS enable_global_with_statement = 0; + +WITH + ( SELECT sleep(0.0001) FROM system.one ) as a1, + ( SELECT sleep(0.0001) FROM system.one ) as a2, + ( SELECT sleep(0.0001) FROM system.one ) as a3, + ( SELECT sleep(0.0001) FROM system.one ) as a4, + ( SELECT sleep(0.0001) FROM system.one ) as a5 +SELECT '02177_CTE_NEW_ANALYZER', a1, a2, a3, a4, a5 FROM system.numbers LIMIT 100 + FORMAT Null +SETTINGS enable_analyzer = 1; + +SYSTEM FLUSH LOGS query_log; +SELECT + '02177_CTE_GLOBAL_ON', + ProfileEvents['SleepFunctionCalls'] as sleep_calls, + ProfileEvents['SleepFunctionMicroseconds'] as sleep_microseconds, + ProfileEvents['ScalarSubqueriesGlobalCacheHit'] as scalar_cache_global_hit, + ProfileEvents['ScalarSubqueriesLocalCacheHit'] as scalar_cache_local_hit, + ProfileEvents['ScalarSubqueriesCacheMiss'] as scalar_cache_miss +FROM system.query_log +WHERE + current_database = currentDatabase() + AND type = 'QueryFinish' + AND query LIKE '%SELECT ''02177_CTE_GLOBAL_ON%' + AND event_date >= yesterday() AND event_time > now() - interval 10 minute; + +SELECT + '02177_CTE_GLOBAL_OFF', + ProfileEvents['SleepFunctionCalls'] as sleep_calls, + ProfileEvents['SleepFunctionMicroseconds'] as sleep_microseconds, + ProfileEvents['ScalarSubqueriesGlobalCacheHit'] as scalar_cache_global_hit, + ProfileEvents['ScalarSubqueriesLocalCacheHit'] as scalar_cache_local_hit, + ProfileEvents['ScalarSubqueriesCacheMiss'] as scalar_cache_miss +FROM system.query_log +WHERE + current_database = currentDatabase() + AND type = 'QueryFinish' + AND query LIKE '%02177_CTE_GLOBAL_OFF%' + AND event_date >= yesterday() AND event_time > now() - interval 10 minute; + +SELECT + '02177_CTE_NEW_ANALYZER', + ProfileEvents['SleepFunctionCalls'] as sleep_calls, + ProfileEvents['SleepFunctionMicroseconds'] as sleep_microseconds, + ProfileEvents['ScalarSubqueriesGlobalCacheHit'] as scalar_cache_global_hit, + ProfileEvents['ScalarSubqueriesLocalCacheHit'] as scalar_cache_local_hit, + ProfileEvents['ScalarSubqueriesCacheMiss'] as scalar_cache_miss +FROM system.query_log +WHERE + current_database = currentDatabase() + AND type = 'QueryFinish' + AND query LIKE '%02177_CTE_NEW_ANALYZER%' + AND event_date >= yesterday() AND event_time > now() - interval 10 minute; diff --git a/parser/testdata/02174_cte_scalar_cache_mv/ast.json b/parser/testdata/02174_cte_scalar_cache_mv/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02174_cte_scalar_cache_mv/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02174_cte_scalar_cache_mv/metadata.json b/parser/testdata/02174_cte_scalar_cache_mv/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02174_cte_scalar_cache_mv/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02174_cte_scalar_cache_mv/query.sql b/parser/testdata/02174_cte_scalar_cache_mv/query.sql new file mode 100644 index 000000000..acafa935e --- /dev/null +++ b/parser/testdata/02174_cte_scalar_cache_mv/query.sql @@ -0,0 +1,232 @@ +-- TEST CACHE +CREATE TABLE t1 (i Int64, j Int64) ENGINE = Memory; +INSERT INTO t1 SELECT number, number FROM system.numbers LIMIT 100; +CREATE TABLE t2 (k Int64, l Int64, m Int64, n Int64) ENGINE = Memory; + +CREATE MATERIALIZED VIEW mv1 TO t2 AS + WITH + (SELECT max(i) FROM t1) AS t1 + SELECT + t1 as k, -- Using local cache x 4 + t1 as l, + t1 as m, + t1 as n + FROM t1 + LIMIT 5; + +set enable_analyzer = 0; + +-- FIRST INSERT +INSERT INTO t1 +WITH + (SELECT max(i) FROM t1) AS t1 +SELECT + number as i, + t1 + t1 + t1 AS j -- Using global cache +FROM system.numbers +LIMIT 100 +SETTINGS + min_insert_block_size_rows=5, + max_insert_block_size=5, + min_insert_block_size_rows_for_materialized_views=5, + max_block_size=5, + max_threads=1; + +SELECT k, l, m, n, count() +FROM t2 +GROUP BY k, l, m, n +ORDER BY k, l, m, n; + +SYSTEM FLUSH LOGS query_log; +-- The main query should have a cache miss and 3 global hits +-- The MV is executed 20 times (100 / 5) and each run does 1 miss and 4 hits to the LOCAL cache +-- In addition to this, to prepare the MV, there is an extra preparation to get the list of columns via +-- InterpreterSelectQuery, which adds 5 miss (since we don't use cache for preparation) +-- So in total we have: +-- Main query: 1 miss, 3 global +-- Preparation: 5 miss +-- Blocks (20): 20 miss, 0 global, 80 local hits + +-- TOTAL: 26 miss, 3 global, 80 local +SELECT + '02177_MV', + ProfileEvents['ScalarSubqueriesGlobalCacheHit'] as scalar_cache_global_hit, + ProfileEvents['ScalarSubqueriesLocalCacheHit'] as scalar_cache_local_hit, + ProfileEvents['ScalarSubqueriesCacheMiss'] as scalar_cache_miss +FROM system.query_log +WHERE + current_database = currentDatabase() + AND type = 'QueryFinish' + AND query LIKE '-- FIRST INSERT\nINSERT INTO t1\n%' + AND event_date >= yesterday() AND event_time > now() - interval 10 minute; + +truncate table t2; +set enable_analyzer = 1; + +-- FIRST INSERT ANALYZER +INSERT INTO t1 +WITH + (SELECT max(i) FROM t1) AS t1 +SELECT + number as i, + t1 + t1 + t1 AS j -- Using global cache +FROM system.numbers +LIMIT 100 +SETTINGS + min_insert_block_size_rows=5, + max_insert_block_size=5, + min_insert_block_size_rows_for_materialized_views=5, + max_block_size=5, + max_threads=1; + +SELECT k, l, m, n, count() +FROM t2 +GROUP BY k, l, m, n +ORDER BY k, l, m, n; + +SYSTEM FLUSH LOGS query_log; + +SELECT + '02177_MV', + ProfileEvents['ScalarSubqueriesGlobalCacheHit'] as scalar_cache_global_hit, + ProfileEvents['ScalarSubqueriesLocalCacheHit'] as scalar_cache_local_hit, + ProfileEvents['ScalarSubqueriesCacheMiss'] as scalar_cache_miss +FROM system.query_log +WHERE + current_database = currentDatabase() + AND type = 'QueryFinish' + AND query LIKE '-- FIRST INSERT ANALYZER\nINSERT INTO t1\n%' + AND event_date >= yesterday() AND event_time > now() - interval 10 minute; + +DROP TABLE mv1; + +set enable_analyzer = 0; + +CREATE TABLE t3 (z Int64) ENGINE = Memory; +CREATE MATERIALIZED VIEW mv2 TO t3 AS +SELECT + -- This includes an unnecessarily complex query to verify that the local cache is used (since it uses t1) + sum(i) + sum(j) + (SELECT * FROM (SELECT min(i) + min(j) FROM (SELECT * FROM system.one _a, t1 _b))) AS z +FROM t1; + +-- SECOND INSERT +INSERT INTO t1 +SELECT 0 as i, number as j from numbers(100) +SETTINGS + min_insert_block_size_rows=5, + max_insert_block_size=5, + min_insert_block_size_rows_for_materialized_views=5, + max_block_size=5, + max_threads=1; + +SELECT * FROM t3 ORDER BY z ASC; +SYSTEM FLUSH LOGS query_log; +SELECT + '02177_MV_2', + ProfileEvents['ScalarSubqueriesGlobalCacheHit'] as scalar_cache_global_hit, + ProfileEvents['ScalarSubqueriesLocalCacheHit'] as scalar_cache_local_hit, + ProfileEvents['ScalarSubqueriesCacheMiss'] as scalar_cache_miss +FROM system.query_log +WHERE + current_database = currentDatabase() + AND type = 'QueryFinish' + AND query LIKE '-- SECOND INSERT\nINSERT INTO t1%' + AND event_date >= yesterday() AND event_time > now() - interval 10 minute; + +truncate table t3; +set enable_analyzer = 1; + +-- SECOND INSERT ANALYZER +INSERT INTO t1 +SELECT 0 as i, number as j from numbers(100) +SETTINGS + min_insert_block_size_rows=5, + max_insert_block_size=5, + min_insert_block_size_rows_for_materialized_views=5, + max_block_size=5, + max_threads=1; + +SELECT * FROM t3 ORDER BY z ASC; +SYSTEM FLUSH LOGS query_log; +SELECT + '02177_MV_2', + ProfileEvents['ScalarSubqueriesGlobalCacheHit'] as scalar_cache_global_hit, + ProfileEvents['ScalarSubqueriesLocalCacheHit'] as scalar_cache_local_hit, + ProfileEvents['ScalarSubqueriesCacheMiss'] as scalar_cache_miss +FROM system.query_log +WHERE + current_database = currentDatabase() + AND type = 'QueryFinish' + AND query LIKE '-- SECOND INSERT ANALYZER\nINSERT INTO t1%' + AND event_date >= yesterday() AND event_time > now() - interval 10 minute; + +DROP TABLE mv2; + +set enable_analyzer = 0; + +CREATE TABLE t4 (z Int64) ENGINE = Memory; +CREATE MATERIALIZED VIEW mv3 TO t4 AS +SELECT + -- This includes an unnecessarily complex query but now it uses t2 so it can be cached + min(i) + min(j) + (SELECT * FROM (SELECT min(k) + min(l) FROM (SELECT * FROM system.one _a, t2 _b))) AS z +FROM t1; + +-- THIRD INSERT +INSERT INTO t1 +SELECT number as i, number as j from numbers(100) + SETTINGS + min_insert_block_size_rows=5, + max_insert_block_size=5, + min_insert_block_size_rows_for_materialized_views=5, + max_block_size=5, + max_threads=1; +SYSTEM FLUSH LOGS query_log; + +SELECT * FROM t4 ORDER BY z ASC; + +SELECT + '02177_MV_3', + ProfileEvents['ScalarSubqueriesGlobalCacheHit'] as scalar_cache_global_hit, + ProfileEvents['ScalarSubqueriesLocalCacheHit'] as scalar_cache_local_hit, + ProfileEvents['ScalarSubqueriesCacheMiss'] as scalar_cache_miss +FROM system.query_log +WHERE + current_database = currentDatabase() + AND type = 'QueryFinish' + AND query LIKE '-- THIRD INSERT\nINSERT INTO t1%' + AND event_date >= yesterday() AND event_time > now() - interval 10 minute; + +truncate table t4; +set enable_analyzer = 1; + +-- THIRD INSERT ANALYZER +INSERT INTO t1 +SELECT number as i, number as j from numbers(100) + SETTINGS + min_insert_block_size_rows=5, + max_insert_block_size=5, + min_insert_block_size_rows_for_materialized_views=5, + max_block_size=5, + max_threads=1; +SYSTEM FLUSH LOGS query_log; + +SELECT * FROM t4 ORDER BY z ASC; + +SELECT + '02177_MV_3', + ProfileEvents['ScalarSubqueriesGlobalCacheHit'] as scalar_cache_global_hit, + ProfileEvents['ScalarSubqueriesLocalCacheHit'] as scalar_cache_local_hit, + ProfileEvents['ScalarSubqueriesCacheMiss'] as scalar_cache_miss +FROM system.query_log +WHERE + current_database = currentDatabase() + AND type = 'QueryFinish' + AND query LIKE '-- THIRD INSERT ANALYZER\nINSERT INTO t1%' + AND event_date >= yesterday() AND event_time > now() - interval 10 minute; + + +DROP TABLE mv3; +DROP TABLE t1; +DROP TABLE t2; +DROP TABLE t3; +DROP TABLE t4; diff --git a/parser/testdata/02175_distributed_join_current_database/ast.json b/parser/testdata/02175_distributed_join_current_database/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02175_distributed_join_current_database/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02175_distributed_join_current_database/metadata.json b/parser/testdata/02175_distributed_join_current_database/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02175_distributed_join_current_database/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02175_distributed_join_current_database/query.sql b/parser/testdata/02175_distributed_join_current_database/query.sql new file mode 100644 index 000000000..94b949df0 --- /dev/null +++ b/parser/testdata/02175_distributed_join_current_database/query.sql @@ -0,0 +1,19 @@ +-- Tags: shard + +drop table if exists local_02175; +drop table if exists dist_02175; + +create table local_02175 engine=Memory() as select * from system.one; +create table dist_02175 as local_02175 engine=Distributed(test_cluster_two_shards, currentDatabase(), local_02175); + +-- { echoOn } +select * from dist_02175 l join local_02175 r using dummy; +select * from dist_02175 l global join local_02175 r using dummy; + +-- explicit database for distributed table +select * from remote('127.1', currentDatabase(), dist_02175) l join local_02175 r using dummy; +select * from remote('127.1', currentDatabase(), dist_02175) l global join local_02175 r using dummy; + +-- { echoOff } +drop table local_02175; +drop table dist_02175; diff --git a/parser/testdata/02176_dict_get_has_implicit_key_cast/ast.json b/parser/testdata/02176_dict_get_has_implicit_key_cast/ast.json new file mode 100644 index 000000000..3cf09860c --- /dev/null +++ b/parser/testdata/02176_dict_get_has_implicit_key_cast/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery 02176_test_simple_key_table (children 1)" + }, + { + "explain": " Identifier 02176_test_simple_key_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001433413, + "rows_read": 2, + "bytes_read": 106 + } +} diff --git a/parser/testdata/02176_dict_get_has_implicit_key_cast/metadata.json b/parser/testdata/02176_dict_get_has_implicit_key_cast/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02176_dict_get_has_implicit_key_cast/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02176_dict_get_has_implicit_key_cast/query.sql b/parser/testdata/02176_dict_get_has_implicit_key_cast/query.sql new file mode 100644 index 000000000..fbc0990e4 --- /dev/null +++ b/parser/testdata/02176_dict_get_has_implicit_key_cast/query.sql @@ -0,0 +1,67 @@ +DROP TABLE IF EXISTS 02176_test_simple_key_table; +CREATE TABLE 02176_test_simple_key_table +( + id UInt64, + value String +) ENGINE=TinyLog; + +INSERT INTO 02176_test_simple_key_table VALUES (0, 'Value'); + +DROP DICTIONARY IF EXISTS 02176_test_simple_key_dictionary; +CREATE DICTIONARY 02176_test_simple_key_dictionary +( + id UInt64, + value String +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE '02176_test_simple_key_table')) +LAYOUT(DIRECT()); + +SELECT dictGet('02176_test_simple_key_dictionary', 'value', toUInt64(0)); +SELECT dictGet('02176_test_simple_key_dictionary', 'value', toUInt8(0)); +SELECT dictGet('02176_test_simple_key_dictionary', 'value', '0'); +SELECT dictGet('02176_test_simple_key_dictionary', 'value', [0]); --{serverError ILLEGAL_TYPE_OF_ARGUMENT} + +SELECT dictHas('02176_test_simple_key_dictionary', toUInt64(0)); +SELECT dictHas('02176_test_simple_key_dictionary', toUInt8(0)); +SELECT dictHas('02176_test_simple_key_dictionary', '0'); +SELECT dictHas('02176_test_simple_key_dictionary', [0]); --{serverError ILLEGAL_TYPE_OF_ARGUMENT} + +DROP DICTIONARY 02176_test_simple_key_dictionary; +DROP TABLE 02176_test_simple_key_table; + +DROP TABLE IF EXISTS 02176_test_complex_key_table; +CREATE TABLE 02176_test_complex_key_table +( + id UInt64, + id_key String, + value String +) ENGINE=TinyLog; + +INSERT INTO 02176_test_complex_key_table VALUES (0, '0', 'Value'); + +DROP DICTIONARY IF EXISTS 02176_test_complex_key_dictionary; +CREATE DICTIONARY 02176_test_complex_key_dictionary +( + id UInt64, + id_key String, + value String +) +PRIMARY KEY id, id_key +SOURCE(CLICKHOUSE(TABLE '02176_test_complex_key_table')) +LAYOUT(COMPLEX_KEY_DIRECT()); + +SELECT dictGet('02176_test_complex_key_dictionary', 'value', tuple(toUInt64(0), '0')); +SELECT dictGet('02176_test_complex_key_dictionary', 'value', tuple(toUInt8(0), '0')); +SELECT dictGet('02176_test_complex_key_dictionary', 'value', tuple('0', '0')); +SELECT dictGet('02176_test_complex_key_dictionary', 'value', tuple([0], '0')); --{serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT dictGet('02176_test_complex_key_dictionary', 'value', tuple(toUInt64(0), 0)); + +SELECT dictHas('02176_test_complex_key_dictionary', tuple(toUInt64(0), '0')); +SELECT dictHas('02176_test_complex_key_dictionary', tuple(toUInt8(0), '0')); +SELECT dictHas('02176_test_complex_key_dictionary', tuple('0', '0')); +SELECT dictHas('02176_test_complex_key_dictionary', tuple([0], '0')); --{serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT dictHas('02176_test_complex_key_dictionary', tuple(toUInt64(0), 0)); + +DROP DICTIONARY 02176_test_complex_key_dictionary; +DROP TABLE 02176_test_complex_key_table; diff --git a/parser/testdata/02176_optimize_aggregation_in_order_empty/ast.json b/parser/testdata/02176_optimize_aggregation_in_order_empty/ast.json new file mode 100644 index 000000000..15b617e26 --- /dev/null +++ b/parser/testdata/02176_optimize_aggregation_in_order_empty/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery data_02176 (children 1)" + }, + { + "explain": " Identifier data_02176" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001231223, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/02176_optimize_aggregation_in_order_empty/metadata.json b/parser/testdata/02176_optimize_aggregation_in_order_empty/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02176_optimize_aggregation_in_order_empty/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02176_optimize_aggregation_in_order_empty/query.sql b/parser/testdata/02176_optimize_aggregation_in_order_empty/query.sql new file mode 100644 index 000000000..35731c63f --- /dev/null +++ b/parser/testdata/02176_optimize_aggregation_in_order_empty/query.sql @@ -0,0 +1,17 @@ +drop table if exists data_02176; +create table data_02176 (key Int) Engine=MergeTree() order by key; + +set optimize_aggregation_in_order=1; + +-- { echoOn } + +-- regression for optimize_aggregation_in_order with empty result set +-- that cause at first +-- "Chunk should have AggregatedChunkInfo in GroupingAggregatedTransform" +-- at first and after +-- "Chunk should have AggregatedChunkInfo in GroupingAggregatedTransform" +select count() from remote('127.{1,2}', currentDatabase(), data_02176) where key = 0 group by key; +select count() from remote('127.{1,2}', currentDatabase(), data_02176) where key = 0 group by key settings distributed_aggregation_memory_efficient=0; + +-- { echoOff } +drop table data_02176; diff --git a/parser/testdata/02176_toStartOfWeek_overflow_pruning/ast.json b/parser/testdata/02176_toStartOfWeek_overflow_pruning/ast.json new file mode 100644 index 000000000..11b93b272 --- /dev/null +++ b/parser/testdata/02176_toStartOfWeek_overflow_pruning/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toStartOfWeek (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDateTime (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '1970-01-01 00:00:00'" + }, + { + "explain": " Literal 'UTC'" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001378347, + "rows_read": 10, + "bytes_read": 398 + } +} diff --git a/parser/testdata/02176_toStartOfWeek_overflow_pruning/metadata.json b/parser/testdata/02176_toStartOfWeek_overflow_pruning/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02176_toStartOfWeek_overflow_pruning/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02176_toStartOfWeek_overflow_pruning/query.sql b/parser/testdata/02176_toStartOfWeek_overflow_pruning/query.sql new file mode 100644 index 000000000..df5499df3 --- /dev/null +++ b/parser/testdata/02176_toStartOfWeek_overflow_pruning/query.sql @@ -0,0 +1,17 @@ +SELECT toStartOfWeek(toDateTime('1970-01-01 00:00:00', 'UTC')); +SELECT toStartOfWeek(toDateTime('1970-01-01 00:00:00', 'Asia/Istanbul')); +SELECT toStartOfWeek(toDateTime('1970-01-01 00:00:00', 'Canada/Atlantic')); +SELECT toStartOfWeek(toDateTime('1970-01-04 00:00:00')); + + +DROP TABLE IF EXISTS t02176; +CREATE TABLE t02176(timestamp DateTime) ENGINE = MergeTree +PARTITION BY toStartOfWeek(timestamp) +ORDER BY tuple(); + +INSERT INTO t02176 VALUES (1559952000); + +SELECT count() FROM t02176 WHERE timestamp >= toDateTime('1970-01-01 00:00:00'); +SELECT count() FROM t02176 WHERE identity(timestamp) >= toDateTime('1970-01-01 00:00:00'); + +DROP TABLE t02176; diff --git a/parser/testdata/02177_issue_31009/ast.json b/parser/testdata/02177_issue_31009/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02177_issue_31009/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02177_issue_31009/metadata.json b/parser/testdata/02177_issue_31009/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02177_issue_31009/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02177_issue_31009/query.sql b/parser/testdata/02177_issue_31009/query.sql new file mode 100644 index 000000000..2640d2a50 --- /dev/null +++ b/parser/testdata/02177_issue_31009/query.sql @@ -0,0 +1,26 @@ +-- Tags: long, no-tsan, no-asan, no-msan, no-debug + +SET max_threads=0; +SET max_insert_threads=0; +SET max_rows_to_read = '50M'; +SET join_algorithm = 'partial_merge'; +SET query_plan_join_swap_table = 0; + +DROP TABLE IF EXISTS left; +DROP TABLE IF EXISTS right; + +CREATE TABLE left ( key UInt32, value String ) ENGINE = MergeTree ORDER BY key SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +CREATE TABLE right ( key UInt32, value String ) ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +INSERT INTO left SELECT number, toString(number) FROM numbers(2536718); +INSERT INTO right SELECT number, toString(number) FROM numbers(2312470); + +SELECT key, count(1) AS cnt +FROM ( + SELECT data.key + FROM ( SELECT key FROM left AS s ) AS data + LEFT JOIN ( SELECT key FROM right GROUP BY key ) AS promo ON promo.key = data.key +) GROUP BY key HAVING count(1) > 1; + +DROP TABLE IF EXISTS left; +DROP TABLE IF EXISTS right; diff --git a/parser/testdata/02177_merge_optimize_aggregation_in_order/ast.json b/parser/testdata/02177_merge_optimize_aggregation_in_order/ast.json new file mode 100644 index 000000000..299456845 --- /dev/null +++ b/parser/testdata/02177_merge_optimize_aggregation_in_order/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery data_02177 (children 1)" + }, + { + "explain": " Identifier data_02177" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001046419, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/02177_merge_optimize_aggregation_in_order/metadata.json b/parser/testdata/02177_merge_optimize_aggregation_in_order/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02177_merge_optimize_aggregation_in_order/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02177_merge_optimize_aggregation_in_order/query.sql b/parser/testdata/02177_merge_optimize_aggregation_in_order/query.sql new file mode 100644 index 000000000..9dc3a2421 --- /dev/null +++ b/parser/testdata/02177_merge_optimize_aggregation_in_order/query.sql @@ -0,0 +1,15 @@ +drop table if exists data_02177; +create table data_02177 (key Int) Engine=MergeTree() order by key; +insert into data_02177 values (1); + +set optimize_aggregation_in_order=1; + +-- { echoOn } + +-- regression for optimize_aggregation_in_order +-- that cause "Chunk should have AggregatedChunkInfo in GroupingAggregatedTransform" error +select count() from remote('127.{1,2}', currentDatabase(), data_02177) group by key; +select count() from remote('127.{1,2}', currentDatabase(), data_02177) group by key settings distributed_aggregation_memory_efficient=0; + +-- { echoOff } +drop table data_02177; diff --git a/parser/testdata/02177_sum_if_not_found/ast.json b/parser/testdata/02177_sum_if_not_found/ast.json new file mode 100644 index 000000000..e1bfee3fe --- /dev/null +++ b/parser/testdata/02177_sum_if_not_found/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function sumIf (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_0" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001798063, + "rows_read": 8, + "bytes_read": 288 + } +} diff --git a/parser/testdata/02177_sum_if_not_found/metadata.json b/parser/testdata/02177_sum_if_not_found/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02177_sum_if_not_found/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02177_sum_if_not_found/query.sql b/parser/testdata/02177_sum_if_not_found/query.sql new file mode 100644 index 000000000..cf961955a --- /dev/null +++ b/parser/testdata/02177_sum_if_not_found/query.sql @@ -0,0 +1,37 @@ +SELECT sumIf(1, 0); +SELECT SumIf(1, 0); +SELECT sUmIf(1, 0); +SELECT sumIF(1, 0); -- { serverError UNKNOWN_FUNCTION } + +DROP TABLE IF EXISTS data; +DROP TABLE IF EXISTS agg; + +CREATE TABLE data +( + `n` UInt32, + `t` DateTime +) +ENGINE = Null; + +CREATE TABLE agg +ENGINE = AggregatingMergeTree +ORDER BY tuple() AS +SELECT + t, + sumIF(n, 0) +FROM data +GROUP BY t; -- { serverError UNKNOWN_FUNCTION} + +SET allow_suspicious_primary_key = 1; + +CREATE TABLE agg +ENGINE = AggregatingMergeTree +ORDER BY tuple() AS +SELECT + t, + sumIf(n, 0) +FROM data +GROUP BY t; + +DROP TABLE data; +DROP TABLE agg; diff --git a/parser/testdata/02178_column_function_insert_from/ast.json b/parser/testdata/02178_column_function_insert_from/ast.json new file mode 100644 index 000000000..c3f52d99b --- /dev/null +++ b/parser/testdata/02178_column_function_insert_from/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery TESTTABLE (children 1)" + }, + { + "explain": " Identifier TESTTABLE" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001384867, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/02178_column_function_insert_from/metadata.json b/parser/testdata/02178_column_function_insert_from/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02178_column_function_insert_from/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02178_column_function_insert_from/query.sql b/parser/testdata/02178_column_function_insert_from/query.sql new file mode 100644 index 000000000..b0d170d01 --- /dev/null +++ b/parser/testdata/02178_column_function_insert_from/query.sql @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS TESTTABLE; + +CREATE TABLE TESTTABLE ( + _id UInt64, pt String, attr_list Array(String) +) ENGINE = MergeTree() PARTITION BY (pt) ORDER BY tuple(); + +INSERT INTO TESTTABLE values (0,'0',['1']), (1,'1',['1']); + +SET max_threads = 1; + +-- There is a bug which is fixed in new analyzer. +SET max_bytes_before_external_sort = 0; +SET max_bytes_ratio_before_external_sort = 0; + +SELECT attr, _id, arrayFilter(x -> (x IN (select '1')), attr_list) z +FROM TESTTABLE ARRAY JOIN z AS attr ORDER BY _id LIMIT 3 BY attr; + +DROP TABLE TESTTABLE; diff --git a/parser/testdata/02179_bool_type/ast.json b/parser/testdata/02179_bool_type/ast.json new file mode 100644 index 000000000..beafab21e --- /dev/null +++ b/parser/testdata/02179_bool_type/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Bool_1" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001093401, + "rows_read": 7, + "bytes_read": 261 + } +} diff --git a/parser/testdata/02179_bool_type/metadata.json b/parser/testdata/02179_bool_type/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02179_bool_type/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02179_bool_type/query.sql b/parser/testdata/02179_bool_type/query.sql new file mode 100644 index 000000000..05cc6d1c6 --- /dev/null +++ b/parser/testdata/02179_bool_type/query.sql @@ -0,0 +1,62 @@ +SELECT toTypeName(true); +SELECT toTypeName(false); + +SELECT not false; +SELECT not 1; +SELECT not 0; +SELECT not 100000000; +SELECT toTypeName(not false); +SELECT toTypeName(not 1); +SELECT toTypeName(not 0); +SELECT toTypeName(not 100000000); + +SELECT false and true; +SELECT 1 and 10; +SELECT 0 and 100000000; +SELECT 1 and true; +SELECT toTypeName(false and true); +SELECT toTypeName(1 and 10); +SELECT toTypeName(0 and 10000000); +SELECT toTypeName(1 and true); + +SELECT xor(false, true); +SELECT xor(1, 10); +SELECT xor(0, 100000000); +SELECT xor(1, true); +SELECT toTypeName(xor(false, true)); +SELECT toTypeName(xor(1, 10)); +SELECT toTypeName(xor(0, 10000000)); +SELECT toTypeName(xor(1, true)); + +SELECT false or true; +SELECT 1 or 10; +SELECT 0 or 100000000; +SELECT 1 or true; +SELECT toTypeName(false or true); +SELECT toTypeName(1 or 10); +SELECT toTypeName(0 or 10000000); +SELECT toTypeName(1 or true); + +SELECT toBool(100000000000); +SELECT toBool(0); +SELECT toBool(-10000000000); +SELECT toBool(100000000000.0000001); +SELECT toBool(toDecimal32(10.10, 2)); +SELECT toBool(toDecimal64(100000000000.1, 2)); +SELECT toBool(toDecimal32(0, 2)); +SELECT toBool('true'); +SELECT toBool('yes'); +SELECT toBool('enabled'); +SELECT toBool('enable'); +SELECT toBool('on'); +SELECT toBool('y'); +SELECT toBool('t'); + +SELECT toBool('false'); +SELECT toBool('no'); +SELECT toBool('disabled'); +SELECT toBool('disable'); +SELECT toBool('off'); +SELECT toBool('n'); +SELECT toBool('f'); + diff --git a/parser/testdata/02179_degrees_radians/ast.json b/parser/testdata/02179_degrees_radians/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02179_degrees_radians/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02179_degrees_radians/metadata.json b/parser/testdata/02179_degrees_radians/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02179_degrees_radians/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02179_degrees_radians/query.sql b/parser/testdata/02179_degrees_radians/query.sql new file mode 100644 index 000000000..ac07b5974 --- /dev/null +++ b/parser/testdata/02179_degrees_radians/query.sql @@ -0,0 +1,46 @@ +-- test conversion from degrees to radians +DROP TABLE IF EXISTS test_degs_to_rads; + +CREATE TABLE test_degs_to_rads (degrees Float64) ENGINE = Memory; + +INSERT INTO test_degs_to_rads VALUES (-1); +INSERT INTO test_degs_to_rads VALUES (-180); +INSERT INTO test_degs_to_rads VALUES (-180.6); +INSERT INTO test_degs_to_rads VALUES (-360); +INSERT INTO test_degs_to_rads VALUES (0); +INSERT INTO test_degs_to_rads VALUES (1); +INSERT INTO test_degs_to_rads VALUES (180); +INSERT INTO test_degs_to_rads VALUES (180.5); +INSERT INTO test_degs_to_rads VALUES (360); + +-- test that converting degrees to radians and back preserves the original value +select DEGREES(RADIANS(degrees)) from test_degs_to_rads order by degrees; +-- test that radians func returns correct value for both int and floats +select RADIANS(degrees) from test_degs_to_rads order by degrees; + +DROP TABLE test_degs_to_rads; + +-- test conversion from radians to degrees +DROP TABLE IF EXISTS test_rads_to_degs; + +CREATE TABLE test_rads_to_degs (radians Float64) ENGINE = Memory; + +INSERT INTO test_rads_to_degs VALUES (-6.283185307179586); +INSERT INTO test_rads_to_degs VALUES (-3.152064629101759); +INSERT INTO test_rads_to_degs VALUES (-3.141592653589793); +INSERT INTO test_rads_to_degs VALUES (-0.017453292519943295); +INSERT INTO test_rads_to_degs VALUES(0); +INSERT INTO test_rads_to_degs VALUES(1); +INSERT INTO test_rads_to_degs VALUES(10); +INSERT INTO test_rads_to_degs VALUES(-10); +INSERT INTO test_rads_to_degs VALUES (0.017453292519943295); +INSERT INTO test_rads_to_degs VALUES (3.141592653589793); +INSERT INTO test_rads_to_degs VALUES (3.1503192998497647); +INSERT INTO test_rads_to_degs VALUES (6.283185307179586); + +-- test that converting radians to degrees and back preserves the original value +select RADIANS(DEGREES(radians)) from test_rads_to_degs order by radians; +-- test that degrees func returns correct value for both int and floats +select DEGREES(radians) from test_rads_to_degs order by radians; + +DROP TABLE test_rads_to_degs; diff --git a/parser/testdata/02179_dict_reload_on_cluster/ast.json b/parser/testdata/02179_dict_reload_on_cluster/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02179_dict_reload_on_cluster/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02179_dict_reload_on_cluster/metadata.json b/parser/testdata/02179_dict_reload_on_cluster/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02179_dict_reload_on_cluster/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02179_dict_reload_on_cluster/query.sql b/parser/testdata/02179_dict_reload_on_cluster/query.sql new file mode 100644 index 000000000..f21fa16ba --- /dev/null +++ b/parser/testdata/02179_dict_reload_on_cluster/query.sql @@ -0,0 +1,41 @@ +-- Tags: no-parallel + +DROP DATABASE IF EXISTS dict_db_02179; +CREATE DATABASE dict_db_02179; + +CREATE TABLE dict_db_02179.dict_data (key UInt64, val UInt64) Engine=Memory(); +CREATE DICTIONARY dict_db_02179.dict +( + key UInt64 DEFAULT 0, + val UInt64 DEFAULT 10 +) +PRIMARY KEY key +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() USER 'default' TABLE 'dict_data' PASSWORD '' DB 'dict_db_02179')) +LIFETIME(MIN 0 MAX 0) +LAYOUT(FLAT()); + +INSERT INTO dict_db_02179.dict_data VALUES(1,11); + +SELECT query_count FROM system.dictionaries WHERE database = 'dict_db_02179' AND name = 'dict'; +SELECT dictGetUInt64('dict_db_02179.dict', 'val', toUInt64(0)); +SELECT query_count FROM system.dictionaries WHERE database = 'dict_db_02179' AND name = 'dict'; + + +SELECT 'SYSTEM RELOAD DICTIONARIES ON CLUSTER test_shard_localhost'; +SET distributed_ddl_output_mode='throw'; +SYSTEM RELOAD DICTIONARIES ON CLUSTER; -- { clientError SYNTAX_ERROR } +SYSTEM RELOAD DICTIONARIES ON CLUSTER test_shard_localhost; +SET distributed_ddl_output_mode='none'; +SELECT query_count FROM system.dictionaries WHERE database = 'dict_db_02179' AND name = 'dict'; +SELECT dictGetUInt64('dict_db_02179.dict', 'val', toUInt64(1)); +SELECT query_count FROM system.dictionaries WHERE database = 'dict_db_02179' AND name = 'dict'; + +SELECT 'CREATE DATABASE'; +DROP DATABASE IF EXISTS empty_db_02179; +CREATE DATABASE empty_db_02179; +SELECT query_count FROM system.dictionaries WHERE database = 'dict_db_02179' AND name = 'dict'; + +DROP DICTIONARY dict_db_02179.dict; +DROP TABLE dict_db_02179.dict_data; +DROP DATABASE dict_db_02179; +DROP DATABASE empty_db_02179; diff --git a/parser/testdata/02179_key_condition_no_common_type/ast.json b/parser/testdata/02179_key_condition_no_common_type/ast.json new file mode 100644 index 000000000..7e18fdbbc --- /dev/null +++ b/parser/testdata/02179_key_condition_no_common_type/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001115308, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/02179_key_condition_no_common_type/metadata.json b/parser/testdata/02179_key_condition_no_common_type/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02179_key_condition_no_common_type/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02179_key_condition_no_common_type/query.sql b/parser/testdata/02179_key_condition_no_common_type/query.sql new file mode 100644 index 000000000..3d2adc3f2 --- /dev/null +++ b/parser/testdata/02179_key_condition_no_common_type/query.sql @@ -0,0 +1,9 @@ +drop table if exists t; + +create table t (c Decimal32(9)) engine MergeTree order by c; + +insert into t values (0.9); + +select * from t where c < 1.2; + +drop table t; diff --git a/parser/testdata/02179_map_cast_to_array/ast.json b/parser/testdata/02179_map_cast_to_array/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02179_map_cast_to_array/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02179_map_cast_to_array/metadata.json b/parser/testdata/02179_map_cast_to_array/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02179_map_cast_to_array/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02179_map_cast_to_array/query.sql b/parser/testdata/02179_map_cast_to_array/query.sql new file mode 100644 index 000000000..5720e4eb0 --- /dev/null +++ b/parser/testdata/02179_map_cast_to_array/query.sql @@ -0,0 +1,29 @@ +WITH map(1, 'Test') AS value, 'Array(Tuple(UInt64, String))' AS type +SELECT value, cast(value, type), cast(materialize(value), type); + +WITH map(1, 'Test') AS value, 'Array(Tuple(UInt64, UInt64))' AS type +SELECT value, cast(value, type), cast(materialize(value), type); --{serverError CANNOT_PARSE_TEXT} + +WITH map(1, '1234') AS value, 'Array(Tuple(UInt64, UInt64))' AS type +SELECT value, cast(value, type), cast(materialize(value), type); + +WITH map(1, [1, 2, 3]) AS value, 'Array(Tuple(UInt64, Array(String)))' AS type +SELECT value, cast(value, type), cast(materialize(value), type); + +WITH map(1, ['1', '2', '3']) AS value, 'Array(Tuple(UInt64, Array(UInt64)))' AS type +SELECT value, cast(value, type), cast(materialize(value), type); + +WITH map(1, map(1, '1234')) AS value, 'Array(Tuple(UInt64, Map(UInt64, String)))' AS type +SELECT value, cast(value, type), cast(materialize(value), type); + +WITH map(1, map(1, '1234')) AS value, 'Array(Tuple(UInt64, Map(UInt64, UInt64)))' AS type +SELECT value, cast(value, type), cast(materialize(value), type); + +WITH map(1, map(1, '1234')) AS value, 'Array(Tuple(UInt64, Array(Tuple(UInt64, String))))' AS type +SELECT value, cast(value, type), cast(materialize(value), type); + +WITH map(1, map(1, '1234')) as value, 'Array(Tuple(UInt64, Array(Tuple(UInt64, UInt64))))' AS type +SELECT value, cast(value, type), cast(materialize(value), type); + +WITH map(1, 'val1', 2, 'val2') AS map +SELECT CAST(map, 'Array(Tuple(k UInt32, v String))') AS c, toTypeName(c); diff --git a/parser/testdata/02179_range_hashed_dictionary_invalid_interval/ast.json b/parser/testdata/02179_range_hashed_dictionary_invalid_interval/ast.json new file mode 100644 index 000000000..ce5ae967c --- /dev/null +++ b/parser/testdata/02179_range_hashed_dictionary_invalid_interval/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery 02179_test_table (children 1)" + }, + { + "explain": " Identifier 02179_test_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001015805, + "rows_read": 2, + "bytes_read": 84 + } +} diff --git a/parser/testdata/02179_range_hashed_dictionary_invalid_interval/metadata.json b/parser/testdata/02179_range_hashed_dictionary_invalid_interval/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02179_range_hashed_dictionary_invalid_interval/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02179_range_hashed_dictionary_invalid_interval/query.sql b/parser/testdata/02179_range_hashed_dictionary_invalid_interval/query.sql new file mode 100644 index 000000000..86c880584 --- /dev/null +++ b/parser/testdata/02179_range_hashed_dictionary_invalid_interval/query.sql @@ -0,0 +1,36 @@ +DROP TABLE IF EXISTS 02179_test_table; +CREATE TABLE 02179_test_table +( + id UInt64, + value String, + start Int64, + end Int64 +) Engine = TinyLog; + +INSERT INTO 02179_test_table VALUES (0, 'Value', 10, 0); +INSERT INTO 02179_test_table VALUES (0, 'Value', 15, 10); +INSERT INTO 02179_test_table VALUES (0, 'Value', 15, 20); + +DROP DICTIONARY IF EXISTS 02179_test_dictionary; +CREATE DICTIONARY 02179_test_dictionary +( + id UInt64, + value String DEFAULT 'DefaultValue', + start Int64, + end Int64 +) PRIMARY KEY id +LAYOUT(RANGE_HASHED()) +SOURCE(CLICKHOUSE(TABLE '02179_test_table')) +RANGE(MIN start MAX end) +LIFETIME(0); + +SELECT dictGet('02179_test_dictionary', 'value', 0, 15); +SELECT dictGet('02179_test_dictionary', 'value', 0, 5); + +SELECT dictHas('02179_test_dictionary', 0, 15); +SELECT dictHas('02179_test_dictionary', 0, 5); + +SELECT * FROM 02179_test_dictionary ORDER BY ALL; + +DROP DICTIONARY 02179_test_dictionary; +DROP TABLE 02179_test_table; diff --git a/parser/testdata/02179_sparse_columns_detach/ast.json b/parser/testdata/02179_sparse_columns_detach/ast.json new file mode 100644 index 000000000..6929bb9d0 --- /dev/null +++ b/parser/testdata/02179_sparse_columns_detach/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_sparse_detach (children 1)" + }, + { + "explain": " Identifier t_sparse_detach" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001178921, + "rows_read": 2, + "bytes_read": 82 + } +} diff --git a/parser/testdata/02179_sparse_columns_detach/metadata.json b/parser/testdata/02179_sparse_columns_detach/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02179_sparse_columns_detach/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02179_sparse_columns_detach/query.sql b/parser/testdata/02179_sparse_columns_detach/query.sql new file mode 100644 index 000000000..c451dae98 --- /dev/null +++ b/parser/testdata/02179_sparse_columns_detach/query.sql @@ -0,0 +1,53 @@ +DROP TABLE IF EXISTS t_sparse_detach; + +CREATE TABLE t_sparse_detach(id UInt64, s String) +ENGINE = MergeTree ORDER BY id +SETTINGS ratio_of_defaults_for_sparse_serialization = 0.9, enable_block_number_column = 0, enable_block_offset_column = 0; + +INSERT INTO t_sparse_detach SELECT number, number % 21 = 0 ? toString(number) : '' FROM numbers(10000); +INSERT INTO t_sparse_detach SELECT number, number % 21 = 0 ? toString(number) : '' FROM numbers(10000); + +OPTIMIZE TABLE t_sparse_detach FINAL; + +SELECT count() FROM t_sparse_detach WHERE s != ''; + +SELECT column, serialization_kind FROM system.parts_columns +WHERE table = 't_sparse_detach' AND database = currentDatabase() AND active +ORDER BY column; + +DETACH TABLE t_sparse_detach; +ATTACH TABLE t_sparse_detach; + +SELECT count() FROM t_sparse_detach WHERE s != ''; + +SELECT column, serialization_kind FROM system.parts_columns +WHERE table = 't_sparse_detach' AND database = currentDatabase() AND active +ORDER BY column; + +TRUNCATE TABLE t_sparse_detach; + +ALTER TABLE t_sparse_detach + MODIFY SETTING vertical_merge_algorithm_min_rows_to_activate = 1, + vertical_merge_algorithm_min_columns_to_activate = 1; + +INSERT INTO t_sparse_detach SELECT number, number % 21 = 0 ? toString(number) : '' FROM numbers(10000); +INSERT INTO t_sparse_detach SELECT number, number % 21 = 0 ? toString(number) : '' FROM numbers(10000); + +OPTIMIZE TABLE t_sparse_detach FINAL; + +SELECT count() FROM t_sparse_detach WHERE s != ''; + +SELECT column, serialization_kind FROM system.parts_columns +WHERE table = 't_sparse_detach' AND database = currentDatabase() AND active +ORDER BY column; + +DETACH TABLE t_sparse_detach; +ATTACH TABLE t_sparse_detach; + +SELECT count() FROM t_sparse_detach WHERE s != ''; + +SELECT column, serialization_kind FROM system.parts_columns +WHERE table = 't_sparse_detach' AND database = currentDatabase() AND active +ORDER BY column; + +DROP TABLE t_sparse_detach; diff --git a/parser/testdata/02180_group_by_lowcardinality/ast.json b/parser/testdata/02180_group_by_lowcardinality/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02180_group_by_lowcardinality/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02180_group_by_lowcardinality/metadata.json b/parser/testdata/02180_group_by_lowcardinality/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02180_group_by_lowcardinality/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02180_group_by_lowcardinality/query.sql b/parser/testdata/02180_group_by_lowcardinality/query.sql new file mode 100644 index 000000000..8f9e88918 --- /dev/null +++ b/parser/testdata/02180_group_by_lowcardinality/query.sql @@ -0,0 +1,12 @@ +-- Tags: no-random-settings + +create table if not exists t_group_by_lowcardinality(p_date Date, val LowCardinality(Nullable(String))) +engine=MergeTree() partition by p_date order by tuple(); + +insert into t_group_by_lowcardinality select today() as p_date, toString(number/5) as val from numbers(10000); +insert into t_group_by_lowcardinality select today() as p_date, Null as val from numbers(100); + +select val, avg(toUInt32(val)) from t_group_by_lowcardinality group by val limit 10 settings max_threads=1, max_rows_to_group_by=100, group_by_overflow_mode='any' format JSONEachRow; + +drop table if exists t_group_by_lowcardinality; + diff --git a/parser/testdata/02180_insert_into_values_settings/ast.json b/parser/testdata/02180_insert_into_values_settings/ast.json new file mode 100644 index 000000000..0fdd9979b --- /dev/null +++ b/parser/testdata/02180_insert_into_values_settings/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00138254, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/02180_insert_into_values_settings/metadata.json b/parser/testdata/02180_insert_into_values_settings/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02180_insert_into_values_settings/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02180_insert_into_values_settings/query.sql b/parser/testdata/02180_insert_into_values_settings/query.sql new file mode 100644 index 000000000..a499ab15f --- /dev/null +++ b/parser/testdata/02180_insert_into_values_settings/query.sql @@ -0,0 +1,4 @@ +drop table if exists t; +create table t (x Bool) engine=Memory(); +insert into t settings bool_true_representation='да' values ('да'); +drop table t; diff --git a/parser/testdata/02181_dictionary_attach_detach/ast.json b/parser/testdata/02181_dictionary_attach_detach/ast.json new file mode 100644 index 000000000..b37748337 --- /dev/null +++ b/parser/testdata/02181_dictionary_attach_detach/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery 02181_test_table (children 1)" + }, + { + "explain": " Identifier 02181_test_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000989456, + "rows_read": 2, + "bytes_read": 84 + } +} diff --git a/parser/testdata/02181_dictionary_attach_detach/metadata.json b/parser/testdata/02181_dictionary_attach_detach/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02181_dictionary_attach_detach/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02181_dictionary_attach_detach/query.sql b/parser/testdata/02181_dictionary_attach_detach/query.sql new file mode 100644 index 000000000..1c30c5a47 --- /dev/null +++ b/parser/testdata/02181_dictionary_attach_detach/query.sql @@ -0,0 +1,39 @@ +DROP TABLE IF EXISTS 02181_test_table; +CREATE TABLE 02181_test_table +( + id UInt64, + value String +) +ENGINE = TinyLog; + +INSERT INTO 02181_test_table VALUES (0, 'Value'); + +DROP DICTIONARY IF EXISTS 02181_test_dictionary; +CREATE DICTIONARY 02181_test_dictionary +( + id UInt64, + value String +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE '02181_test_table')) +LAYOUT(HASHED()) +LIFETIME(0); + +DETACH TABLE 02181_test_dictionary; --{serverError CANNOT_DETACH_DICTIONARY_AS_TABLE} +ATTACH TABLE 02181_test_dictionary; --{serverError INCORRECT_QUERY} + +DETACH DICTIONARY 02181_test_dictionary; +ATTACH DICTIONARY 02181_test_dictionary; + +SELECT * FROM 02181_test_dictionary; + +DETACH DICTIONARY 02181_test_dictionary; +ATTACH DICTIONARY 02181_test_dictionary; + +SELECT * FROM 02181_test_dictionary; + +DETACH DICTIONARY 02181_test_dictionary; +ATTACH DICTIONARY 02181_test_dictionary; + +DROP DICTIONARY 02181_test_dictionary; +DROP TABLE 02181_test_table; diff --git a/parser/testdata/02181_sql_user_defined_functions_invalid_lambda/ast.json b/parser/testdata/02181_sql_user_defined_functions_invalid_lambda/ast.json new file mode 100644 index 000000000..8d543b403 --- /dev/null +++ b/parser/testdata/02181_sql_user_defined_functions_invalid_lambda/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateFunctionQuery 02181_invalid_lambda (children 2)" + }, + { + "explain": " Identifier 02181_invalid_lambda" + }, + { + "explain": " Function lambda (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function plus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function multiply (alias x_doubled) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Identifier x_doubled" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001054336, + "rows_read": 11, + "bytes_read": 444 + } +} diff --git a/parser/testdata/02181_sql_user_defined_functions_invalid_lambda/metadata.json b/parser/testdata/02181_sql_user_defined_functions_invalid_lambda/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02181_sql_user_defined_functions_invalid_lambda/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02181_sql_user_defined_functions_invalid_lambda/query.sql b/parser/testdata/02181_sql_user_defined_functions_invalid_lambda/query.sql new file mode 100644 index 000000000..79cc8fd90 --- /dev/null +++ b/parser/testdata/02181_sql_user_defined_functions_invalid_lambda/query.sql @@ -0,0 +1,4 @@ +CREATE FUNCTION 02181_invalid_lambda AS lambda(((x * 2) AS x_doubled) + x_doubled); --{serverError BAD_ARGUMENTS} +CREATE FUNCTION 02181_invalid_lambda AS lambda(x); --{serverError BAD_ARGUMENTS} +CREATE FUNCTION 02181_invalid_lambda AS lambda(); --{serverError BAD_ARGUMENTS} +CREATE FUNCTION 02181_invalid_lambda AS lambda(tuple(x)) --{serverError BAD_ARGUMENTS} diff --git a/parser/testdata/02183_array_tuple_literals_remote/ast.json b/parser/testdata/02183_array_tuple_literals_remote/ast.json new file mode 100644 index 000000000..0ad5eb8e9 --- /dev/null +++ b/parser/testdata/02183_array_tuple_literals_remote/ast.json @@ -0,0 +1,76 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function any (alias k) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function remote (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '127.0.0.{1,2}'" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 18, + + "statistics": + { + "elapsed": 0.001270568, + "rows_read": 18, + "bytes_read": 733 + } +} diff --git a/parser/testdata/02183_array_tuple_literals_remote/metadata.json b/parser/testdata/02183_array_tuple_literals_remote/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02183_array_tuple_literals_remote/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02183_array_tuple_literals_remote/query.sql b/parser/testdata/02183_array_tuple_literals_remote/query.sql new file mode 100644 index 000000000..25c7e7d73 --- /dev/null +++ b/parser/testdata/02183_array_tuple_literals_remote/query.sql @@ -0,0 +1,11 @@ +SELECT any(array(0)) AS k FROM remote('127.0.0.{1,2}', numbers(10)); +SELECT any(tuple(0, 1)) AS k FROM remote('127.0.0.{1,2}', numbers(10)); +SELECT any(array(array(0, 1), array(2, 3))) AS k FROM remote('127.0.0.{1,2}', numbers(10)); +SELECT any(array(tuple(0, 1), tuple(2, 3))) AS k FROM remote('127.0.0.{1,2}', numbers(10)); +SELECT any(array((0, 1), (2, 3))) AS k FROM remote('127.0.0.{1,2}', numbers(10)); +SELECT any(tuple(array(0, 1), tuple(2, 3), [4], (5, 'a'), 6, 'b')) AS k FROM remote('127.0.0.{1,2}', numbers(10)); +SELECT any(array(number, 1)) AS k FROM remote('127.0.0.{1,2}', numbers(10)); +SELECT any(tuple(number, 1)) AS k FROM remote('127.0.0.{1,2}', numbers(10)); +SELECT any(array(array(0, 1), [2, 3])) AS k FROM remote('127.0.0.{1,2}', numbers(10)); +SELECT any(array(array(0, 1), [number, number])) AS k FROM remote('127.0.0.{1,2}', numbers(10)); +SELECT any([[[number]],[[number + 1], [number + 2, number + 3]]]) AS k FROM remote('127.0.0.{1,2}', numbers(10)); diff --git a/parser/testdata/02183_combinator_if/ast.json b/parser/testdata/02183_combinator_if/ast.json new file mode 100644 index 000000000..3c83c8958 --- /dev/null +++ b/parser/testdata/02183_combinator_if/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function anyIf (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toNullable (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'Hello'" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function arrayJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[UInt64_1, NULL]" + }, + { + "explain": " Literal UInt64_0" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001308945, + "rows_read": 15, + "bytes_read": 602 + } +} diff --git a/parser/testdata/02183_combinator_if/metadata.json b/parser/testdata/02183_combinator_if/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02183_combinator_if/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02183_combinator_if/query.sql b/parser/testdata/02183_combinator_if/query.sql new file mode 100644 index 000000000..ec7164079 --- /dev/null +++ b/parser/testdata/02183_combinator_if/query.sql @@ -0,0 +1,40 @@ +SELECT anyIf(toNullable('Hello'), arrayJoin([1, NULL]) = 0); + +SELECT anyIf(toNullable('Hello'), arrayJoin([1, 1]) = 0); +SELECT anyIf(toNullable('Hello'), arrayJoin([1, 0]) = 0); +SELECT anyIf(toNullable('Hello'), arrayJoin([0, 1]) = 0); +SELECT anyIf(toNullable('Hello'), arrayJoin([0, 0]) = 0); + +SELECT anyIf('Hello', arrayJoin([1, NULL]) = 0); +SELECT anyIf('Hello', arrayJoin([1, NULL]) = 1); +SELECT anyIf('Hello', arrayJoin([1, NULL]) IS NULL); + +SELECT number, anyIf(toNullable('Hello'), arrayJoin([1, NULL]) = 0) FROM numbers(2) GROUP BY number ORDER BY number; +SELECT number, anyIf(toNullable('Hello'), arrayJoin([1, NULL, 0]) = 0) FROM numbers(2) GROUP BY number ORDER BY number; + +SELECT number, anyIf('Hello', arrayJoin([1, NULL]) = 0) FROM numbers(2) GROUP BY number ORDER BY number; +SELECT number, anyIf('Hello', arrayJoin([1, NULL, 0]) = 0) FROM numbers(2) GROUP BY number ORDER BY number; + +SELECT number, anyIf(toNullable('Hello'), arrayJoin([1, 1]) = 0) FROM numbers(2) GROUP BY number ORDER BY number; +SELECT number, anyIf(toNullable('Hello'), arrayJoin([1, 0]) = 0) FROM numbers(2) GROUP BY number ORDER BY number; + + +SELECT anyIf(toNullable('Hello'), arrayJoin([1, NULL]) = 0) FROM remote('127.0.0.{1,2}', system.one); + +SELECT anyIf(toNullable('Hello'), arrayJoin([1, 1]) = 0) FROM remote('127.0.0.{1,2}', system.one); +SELECT anyIf(toNullable('Hello'), arrayJoin([1, 0]) = 0) FROM remote('127.0.0.{1,2}', system.one); +SELECT anyIf(toNullable('Hello'), arrayJoin([0, 1]) = 0) FROM remote('127.0.0.{1,2}', system.one); +SELECT anyIf(toNullable('Hello'), arrayJoin([0, 0]) = 0) FROM remote('127.0.0.{1,2}', system.one); + +SELECT anyIf('Hello', arrayJoin([1, NULL]) = 0) FROM remote('127.0.0.{1,2}', system.one); +SELECT anyIf('Hello', arrayJoin([1, NULL]) = 1) FROM remote('127.0.0.{1,2}', system.one); +SELECT anyIf('Hello', arrayJoin([1, NULL]) IS NULL) FROM remote('127.0.0.{1,2}', system.one); + +SELECT number, anyIf(toNullable('Hello'), arrayJoin([1, NULL]) = 0) FROM remote('127.0.0.{1,2}', numbers(2)) GROUP BY number ORDER BY number; +SELECT number, anyIf(toNullable('Hello'), arrayJoin([1, NULL, 0]) = 0) FROM remote('127.0.0.{1,2}', numbers(2)) GROUP BY number ORDER BY number; + +SELECT number, anyIf('Hello', arrayJoin([1, NULL]) = 0) FROM remote('127.0.0.{1,2}', numbers(2)) GROUP BY number ORDER BY number; +SELECT number, anyIf('Hello', arrayJoin([1, NULL, 0]) = 0) FROM remote('127.0.0.{1,2}', numbers(2)) GROUP BY number ORDER BY number; + +SELECT number, anyIf(toNullable('Hello'), arrayJoin([1, 1]) = 0) FROM remote('127.0.0.{1,2}', numbers(2)) GROUP BY number ORDER BY number; +SELECT number, anyIf(toNullable('Hello'), arrayJoin([1, 0]) = 0) FROM remote('127.0.0.{1,2}', numbers(2)) GROUP BY number ORDER BY number; diff --git a/parser/testdata/02183_dictionary_date_types/ast.json b/parser/testdata/02183_dictionary_date_types/ast.json new file mode 100644 index 000000000..e15b8b600 --- /dev/null +++ b/parser/testdata/02183_dictionary_date_types/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery 02183_dictionary_source_table (children 1)" + }, + { + "explain": " Identifier 02183_dictionary_source_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001147722, + "rows_read": 2, + "bytes_read": 110 + } +} diff --git a/parser/testdata/02183_dictionary_date_types/metadata.json b/parser/testdata/02183_dictionary_date_types/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02183_dictionary_date_types/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02183_dictionary_date_types/query.sql b/parser/testdata/02183_dictionary_date_types/query.sql new file mode 100644 index 000000000..5671f47cd --- /dev/null +++ b/parser/testdata/02183_dictionary_date_types/query.sql @@ -0,0 +1,211 @@ +DROP TABLE IF EXISTS 02183_dictionary_source_table; +CREATE TABLE 02183_dictionary_source_table +( + id UInt64, + value_date Date, + value_date_32 Date32, + value_date_time DateTime, + value_date_time_64 DateTime64 +) ENGINE=TinyLog; + +INSERT INTO 02183_dictionary_source_table VALUES (0, '2019-05-05', '2019-05-05', '2019-05-05', '2019-05-05'); + +SELECT * FROM 02183_dictionary_source_table; + +DROP DICTIONARY IF EXISTS 02183_flat_dictionary; +CREATE DICTIONARY 02183_flat_dictionary +( + id UInt64, + value_date Date, + value_date_32 Date32, + value_date_time DateTime, + value_date_time_64 DateTime64 +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE '02183_dictionary_source_table')) +LIFETIME(0) +LAYOUT(FLAT()); + +SELECT 'Flat dictionary'; +SELECT * FROM 02183_flat_dictionary; + +DROP DICTIONARY 02183_flat_dictionary; + +DROP DICTIONARY IF EXISTS 02183_hashed_dictionary; +CREATE DICTIONARY 02183_hashed_dictionary +( + id UInt64, + value_date Date, + value_date_32 Date32, + value_date_time DateTime, + value_date_time_64 DateTime64 +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE '02183_dictionary_source_table')) +LIFETIME(0) +LAYOUT(HASHED()); + +SELECT 'Hashed dictionary'; +SELECT * FROM 02183_hashed_dictionary; + +DROP DICTIONARY 02183_hashed_dictionary; + +DROP DICTIONARY IF EXISTS 02183_hashed_array_dictionary; +CREATE DICTIONARY 02183_hashed_array_dictionary +( + id UInt64, + value_date Date, + value_date_32 Date32, + value_date_time DateTime, + value_date_time_64 DateTime64 +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE '02183_dictionary_source_table')) +LIFETIME(0) +LAYOUT(HASHED_ARRAY()); + +SELECT 'Hashed array dictionary'; +SELECT * FROM 02183_hashed_array_dictionary; + +DROP DICTIONARY 02183_hashed_array_dictionary; + +DROP DICTIONARY IF EXISTS 02183_cache_dictionary; +CREATE DICTIONARY 02183_cache_dictionary +( + id UInt64, + value_date Date, + value_date_32 Date32, + value_date_time DateTime, + value_date_time_64 DateTime64 +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE '02183_dictionary_source_table')) +LIFETIME(0) +LAYOUT(CACHE(SIZE_IN_CELLS 10)); + +SELECT 'Cache dictionary'; +SELECT dictGet('02183_cache_dictionary', 'value_date', 0); +SELECT * FROM 02183_cache_dictionary; + +DROP DICTIONARY 02183_cache_dictionary; + +DROP DICTIONARY IF EXISTS 02183_direct_dictionary; +CREATE DICTIONARY 02183_direct_dictionary +( + id UInt64, + value_date Date, + value_date_32 Date32, + value_date_time DateTime, + value_date_time_64 DateTime64 +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE '02183_dictionary_source_table')) +LAYOUT(DIRECT()); + +SELECT 'Direct dictionary'; +SELECT * FROM 02183_direct_dictionary; + +DROP DICTIONARY 02183_direct_dictionary; +DROP TABLE 02183_dictionary_source_table; + +DROP TABLE IF EXISTS 02183_ip_trie_dictionary_source_table; +CREATE TABLE 02183_ip_trie_dictionary_source_table +( + prefix String, + value_date Date, + value_date_32 Date32, + value_date_time DateTime, + value_date_time_64 DateTime64 +) ENGINE=TinyLog; + +INSERT INTO 02183_ip_trie_dictionary_source_table VALUES ('127.0.0.1', '2019-05-05', '2019-05-05', '2019-05-05', '2019-05-05'); +SELECT * FROM 02183_ip_trie_dictionary_source_table; + +DROP DICTIONARY IF EXISTS 02183_ip_trie_dictionary; +CREATE DICTIONARY 02183_ip_trie_dictionary +( + prefix String, + value_date Date, + value_date_32 Date32, + value_date_time DateTime, + value_date_time_64 DateTime64 +) +PRIMARY KEY prefix +SOURCE(CLICKHOUSE(TABLE '02183_ip_trie_dictionary_source_table')) +LAYOUT(IP_TRIE(access_to_key_from_attributes 1)) +LIFETIME(0); + +SELECT 'IPTrie dictionary'; +SELECT * FROM 02183_ip_trie_dictionary; + +DROP DICTIONARY 02183_ip_trie_dictionary; +DROP TABLE 02183_ip_trie_dictionary_source_table; + +DROP TABLE IF EXISTS 02183_polygon_dictionary_source_table; +CREATE TABLE 02183_polygon_dictionary_source_table +( + key Array(Array(Array(Tuple(Float64, Float64)))), + value_date Date, + value_date_32 Date32, + value_date_time DateTime, + value_date_time_64 DateTime64 +) ENGINE = TinyLog; + +INSERT INTO 02183_polygon_dictionary_source_table VALUES ([[[(0, 0), (0, 1), (1, 1), (1, 0)]]], '2019-05-05', '2019-05-05', '2019-05-05', '2019-05-05'); + +DROP DICTIONARY IF EXISTS 02183_polygon_dictionary; +CREATE DICTIONARY 02183_polygon_dictionary +( + key Array(Array(Array(Tuple(Float64, Float64)))), + value_date Date, + value_date_32 Date32, + value_date_time DateTime, + value_date_time_64 DateTime64 +) +PRIMARY KEY key +SOURCE(CLICKHOUSE(TABLE '02183_polygon_dictionary_source_table')) +LAYOUT(POLYGON(store_polygon_key_column 1)) +LIFETIME(0); + +SELECT 'Polygon dictionary'; +SELECT * FROM 02183_polygon_dictionary; + +DROP DICTIONARY 02183_polygon_dictionary; +DROP TABLE 02183_polygon_dictionary_source_table; + +DROP TABLE IF EXISTS 02183_range_dictionary_source_table; +CREATE TABLE 02183_range_dictionary_source_table +( + key UInt64, + start UInt64, + end UInt64, + value_date Date, + value_date_32 Date32, + value_date_time DateTime, + value_date_time_64 DateTime64 +) ENGINE = TinyLog; + +INSERT INTO 02183_range_dictionary_source_table VALUES(0, 0, 1, '2019-05-05', '2019-05-05', '2019-05-05', '2019-05-05'); +SELECT * FROM 02183_range_dictionary_source_table; + +CREATE DICTIONARY 02183_range_dictionary +( + key UInt64, + start UInt64, + end UInt64, + value_date Date, + value_date_32 Date32, + value_date_time DateTime, + value_date_time_64 DateTime64 +) +PRIMARY KEY key +SOURCE(CLICKHOUSE(TABLE '02183_range_dictionary_source_table')) +LAYOUT(RANGE_HASHED()) +RANGE(MIN start MAX end) +LIFETIME(0); + +SELECT 'Range dictionary'; +SELECT * FROM 02183_range_dictionary; + +DROP DICTIONARY 02183_range_dictionary; +DROP TABLE 02183_range_dictionary_source_table; diff --git a/parser/testdata/02183_dictionary_no_attributes/ast.json b/parser/testdata/02183_dictionary_no_attributes/ast.json new file mode 100644 index 000000000..ee59140a7 --- /dev/null +++ b/parser/testdata/02183_dictionary_no_attributes/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery 02183_dictionary_test_table (children 1)" + }, + { + "explain": " Identifier 02183_dictionary_test_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000963509, + "rows_read": 2, + "bytes_read": 106 + } +} diff --git a/parser/testdata/02183_dictionary_no_attributes/metadata.json b/parser/testdata/02183_dictionary_no_attributes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02183_dictionary_no_attributes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02183_dictionary_no_attributes/query.sql b/parser/testdata/02183_dictionary_no_attributes/query.sql new file mode 100644 index 000000000..b9c9f1ba9 --- /dev/null +++ b/parser/testdata/02183_dictionary_no_attributes/query.sql @@ -0,0 +1,194 @@ +DROP TABLE IF EXISTS 02183_dictionary_test_table; +CREATE TABLE 02183_dictionary_test_table (id UInt64) ENGINE=TinyLog; +INSERT INTO 02183_dictionary_test_table VALUES (0), (1); + +SELECT * FROM 02183_dictionary_test_table; + +DROP DICTIONARY IF EXISTS 02183_flat_dictionary; +CREATE DICTIONARY 02183_flat_dictionary +( + id UInt64 +) +PRIMARY KEY id +LAYOUT(FLAT()) +SOURCE(CLICKHOUSE(TABLE '02183_dictionary_test_table')) +LIFETIME(0); + +SELECT 'FlatDictionary'; + +SELECT dictGet('02183_flat_dictionary', 'value', 0); -- {serverError BAD_ARGUMENTS} +SELECT dictHas('02183_flat_dictionary', 0); +SELECT dictHas('02183_flat_dictionary', 1); +SELECT dictHas('02183_flat_dictionary', 2); + +SELECT * FROM 02183_flat_dictionary; + +DROP DICTIONARY 02183_flat_dictionary; + +DROP DICTIONARY IF EXISTS 02183_hashed_dictionary; +CREATE DICTIONARY 02183_hashed_dictionary +( + id UInt64 +) +PRIMARY KEY id +LAYOUT(HASHED()) +SOURCE(CLICKHOUSE(TABLE '02183_dictionary_test_table')) +LIFETIME(0); + +SELECT 'HashedDictionary'; + +SELECT dictHas('02183_hashed_dictionary', 0); +SELECT dictHas('02183_hashed_dictionary', 1); +SELECT dictHas('02183_hashed_dictionary', 2); + +SELECT * FROM 02183_hashed_dictionary; + +DROP DICTIONARY 02183_hashed_dictionary; + +DROP DICTIONARY IF EXISTS 02183_hashed_array_dictionary; +CREATE DICTIONARY 02183_hashed_array_dictionary +( + id UInt64 +) +PRIMARY KEY id +LAYOUT(HASHED_ARRAY()) +SOURCE(CLICKHOUSE(TABLE '02183_dictionary_test_table')) +LIFETIME(0); + +SELECT 'HashedArrayDictionary'; + +SELECT dictHas('02183_hashed_array_dictionary', 0); +SELECT dictHas('02183_hashed_array_dictionary', 1); +SELECT dictHas('02183_hashed_array_dictionary', 2); + +SELECT * FROM 02183_hashed_array_dictionary; + +DROP DICTIONARY 02183_hashed_array_dictionary; + +DROP DICTIONARY IF EXISTS 02183_cache_dictionary; +CREATE DICTIONARY 02183_cache_dictionary +( + id UInt64 +) +PRIMARY KEY id +LAYOUT(CACHE(SIZE_IN_CELLS 10)) +SOURCE(CLICKHOUSE(TABLE '02183_dictionary_test_table')) +LIFETIME(0); + +SELECT 'CacheDictionary'; + +SELECT dictHas('02183_cache_dictionary', 0); +SELECT dictHas('02183_cache_dictionary', 1); +SELECT dictHas('02183_cache_dictionary', 2); + +SELECT * FROM 02183_cache_dictionary; + +DROP DICTIONARY 02183_cache_dictionary; + +DROP DICTIONARY IF EXISTS 02183_direct_dictionary; +CREATE DICTIONARY 02183_direct_dictionary +( + id UInt64 +) +PRIMARY KEY id +LAYOUT(HASHED()) +SOURCE(CLICKHOUSE(TABLE '02183_dictionary_test_table')) +LIFETIME(0); + +SELECT 'DirectDictionary'; + +SELECT dictHas('02183_direct_dictionary', 0); +SELECT dictHas('02183_direct_dictionary', 1); +SELECT dictHas('02183_direct_dictionary', 2); + +SELECT * FROM 02183_direct_dictionary; + +DROP DICTIONARY 02183_direct_dictionary; + +DROP TABLE 02183_dictionary_test_table; + +DROP TABLE IF EXISTS ip_trie_dictionary_source_table; +CREATE TABLE ip_trie_dictionary_source_table +( + prefix String +) ENGINE = TinyLog; + +INSERT INTO ip_trie_dictionary_source_table VALUES ('127.0.0.0'); + +DROP DICTIONARY IF EXISTS 02183_ip_trie_dictionary; +CREATE DICTIONARY 02183_ip_trie_dictionary +( + prefix String +) +PRIMARY KEY prefix +SOURCE(CLICKHOUSE(TABLE 'ip_trie_dictionary_source_table')) +LAYOUT(IP_TRIE()) +LIFETIME(0); + +SELECT 'IPTrieDictionary'; + +SELECT dictHas('02183_ip_trie_dictionary', tuple(IPv4StringToNum('127.0.0.0'))); +SELECT dictHas('02183_ip_trie_dictionary', tuple(IPv4StringToNum('127.0.0.1'))); +SELECT * FROM 02183_ip_trie_dictionary; + +DROP DICTIONARY 02183_ip_trie_dictionary; +DROP TABLE ip_trie_dictionary_source_table; + +DROP TABLE IF EXISTS 02183_polygon_dictionary_source_table; +CREATE TABLE 02183_polygon_dictionary_source_table +( + key Array(Array(Array(Tuple(Float64, Float64)))) +) ENGINE = TinyLog; + +INSERT INTO 02183_polygon_dictionary_source_table VALUES ([[[(0, 0), (0, 1), (1, 1), (1, 0)]]]); + +DROP DICTIONARY IF EXISTS 02183_polygon_dictionary; +CREATE DICTIONARY 02183_polygon_dictionary +( + key Array(Array(Array(Tuple(Float64, Float64)))) +) +PRIMARY KEY key +SOURCE(CLICKHOUSE(TABLE '02183_polygon_dictionary_source_table')) +LAYOUT(POLYGON(store_polygon_key_column 1)) +LIFETIME(0); + +SELECT 'PolygonDictionary'; + +SELECT dictHas('02183_polygon_dictionary', tuple(0.5, 0.5)); +SELECT dictHas('02183_polygon_dictionary', tuple(1.5, 1.5)); +SELECT * FROM 02183_polygon_dictionary; + +DROP DICTIONARY 02183_polygon_dictionary; +DROP TABLE 02183_polygon_dictionary_source_table; + +DROP TABLE IF EXISTS 02183_range_dictionary_source_table; +CREATE TABLE 02183_range_dictionary_source_table +( + key UInt64, + start UInt64, + end UInt64 +) +ENGINE = TinyLog; + +INSERT INTO 02183_range_dictionary_source_table VALUES(0, 0, 1); + +DROP DICTIONARY IF EXISTS 02183_range_dictionary; +CREATE DICTIONARY 02183_range_dictionary +( + key UInt64, + start UInt64, + end UInt64 +) +PRIMARY KEY key +SOURCE(CLICKHOUSE(TABLE '02183_range_dictionary_source_table')) +LAYOUT(RANGE_HASHED()) +RANGE(MIN start MAX end) +LIFETIME(0); + +SELECT 'RangeHashedDictionary'; +SELECT * FROM 02183_range_dictionary; +SELECT dictHas('02183_range_dictionary', 0, 0); +SELECT dictHas('02183_range_dictionary', 0, 2); + +DROP DICTIONARY 02183_range_dictionary; +DROP TABLE 02183_range_dictionary_source_table; diff --git a/parser/testdata/02184_default_table_engine/ast.json b/parser/testdata/02184_default_table_engine/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02184_default_table_engine/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02184_default_table_engine/metadata.json b/parser/testdata/02184_default_table_engine/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02184_default_table_engine/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02184_default_table_engine/query.sql b/parser/testdata/02184_default_table_engine/query.sql new file mode 100644 index 000000000..9f4d9d108 --- /dev/null +++ b/parser/testdata/02184_default_table_engine/query.sql @@ -0,0 +1,138 @@ +-- Tags: memory-engine +SET create_table_empty_primary_key_by_default = 0; +SET default_table_engine = 'None'; + +CREATE TABLE table_02184 (x UInt8); --{serverError ENGINE_REQUIRED} +SET default_table_engine = 'Log'; +CREATE TABLE table_02184 (x UInt8); +SHOW CREATE TABLE table_02184; +DROP TABLE table_02184; + +SET default_table_engine = 'MergeTree'; +CREATE TABLE table_02184 (x UInt8); --{serverError BAD_ARGUMENTS} +CREATE TABLE table_02184 (x UInt8, PRIMARY KEY (x)); +SHOW CREATE TABLE table_02184; +DROP TABLE table_02184; + +CREATE TABLE test_optimize_exception (date Date) PARTITION BY toYYYYMM(date) ORDER BY date; +SHOW CREATE TABLE test_optimize_exception; +DROP TABLE test_optimize_exception; +CREATE TABLE table_02184 (x UInt8) PARTITION BY x; --{serverError BAD_ARGUMENTS} +CREATE TABLE table_02184 (x UInt8) ORDER BY x; +SHOW CREATE TABLE table_02184; +DROP TABLE table_02184; + +CREATE TABLE table_02184 (x UInt8) PRIMARY KEY x; +SHOW CREATE TABLE table_02184; +DROP TABLE table_02184; +SET default_table_engine = 'Memory'; +CREATE TABLE numbers1 AS SELECT number FROM numbers(10); +SHOW CREATE TABLE numbers1; +SELECT avg(number) FROM numbers1; +DROP TABLE numbers1; + +SET default_table_engine = 'MergeTree'; +CREATE TABLE numbers2 ORDER BY intHash32(number) SAMPLE BY intHash32(number) AS SELECT number FROM numbers(10); +SELECT sum(number) FROM numbers2; +SHOW CREATE TABLE numbers2; +DROP TABLE numbers2; + +CREATE TABLE numbers3 ENGINE = Log AS SELECT number FROM numbers(10); +SELECT sum(number) FROM numbers3; +SHOW CREATE TABLE numbers3; +DROP TABLE numbers3; + +CREATE TABLE test_table (EventDate Date, CounterID UInt32, UserID UInt64, EventTime DateTime('America/Los_Angeles'), UTCEventTime DateTime('UTC')) PARTITION BY EventDate PRIMARY KEY CounterID; +SET default_table_engine = 'Memory'; +CREATE MATERIALIZED VIEW test_view (Rows UInt64, MaxHitTime DateTime('America/Los_Angeles')) AS SELECT count() AS Rows, max(UTCEventTime) AS MaxHitTime FROM test_table; +CREATE MATERIALIZED VIEW test_view_filtered (EventDate Date, CounterID UInt32) POPULATE AS SELECT CounterID, EventDate FROM test_table WHERE EventDate < '2013-01-01'; +SHOW CREATE TABLE test_view_filtered; +INSERT INTO test_table (EventDate, UTCEventTime) VALUES ('2014-01-02', '2014-01-02 03:04:06'); + +SELECT * FROM test_table; +SELECT * FROM test_view; +SELECT * FROM test_view_filtered; + +DROP TABLE test_view; +DROP TABLE test_view_filtered; + +SET default_table_engine = 'MergeTree'; +CREATE MATERIALIZED VIEW test_view ORDER BY Rows AS SELECT count() AS Rows, max(UTCEventTime) AS MaxHitTime FROM test_table; +SET default_table_engine = 'Memory'; +CREATE TABLE t1 AS test_view; +CREATE TABLE t2 ENGINE=Memory AS test_view; +SHOW CREATE TABLE t1; +SHOW CREATE TABLE t2; +DROP TABLE test_view; +DROP TABLE test_table; +DROP TABLE t1; +DROP TABLE t2; + + +CREATE DATABASE test_02184 ORDER BY kek; -- {serverError UNKNOWN_ELEMENT_IN_AST} +CREATE DATABASE test_02184 SETTINGS x=1; -- {serverError UNKNOWN_SETTING} +CREATE TABLE table_02184 (x UInt8, y int, PRIMARY KEY (x)) ENGINE=MergeTree PRIMARY KEY y; -- {clientError BAD_ARGUMENTS} +SET default_table_engine = 'MergeTree'; +CREATE TABLE table_02184 (x UInt8, y int, PRIMARY KEY (x)) PRIMARY KEY y; -- {clientError BAD_ARGUMENTS} + +CREATE TABLE mt (a UInt64, b Nullable(String), PRIMARY KEY (a, coalesce(b, 'test')), INDEX b_index b TYPE set(123) GRANULARITY 1); +SHOW CREATE TABLE mt; +SET default_table_engine = 'Log'; +CREATE TABLE mt2 AS mt; +SHOW CREATE TABLE mt2; +DROP TABLE mt; + +SET default_table_engine = 'Log'; +CREATE TEMPORARY TABLE tmp (n int); +SHOW CREATE TEMPORARY TABLE tmp; +CREATE TEMPORARY TABLE tmp1 (n int) ENGINE=Memory; +CREATE TEMPORARY TABLE tmp2 (n int) ENGINE=Log; +CREATE TEMPORARY TABLE tmp2 (n int) ORDER BY n; -- {serverError BAD_ARGUMENTS} +CREATE TEMPORARY TABLE tmp2 (n int, PRIMARY KEY (n)); -- {serverError BAD_ARGUMENTS} + +CREATE TABLE log (n int); +SHOW CREATE log; +SET default_table_engine = 'MergeTree'; +CREATE TABLE log1 AS log; +SHOW CREATE log1; +CREATE TABLE mem AS log1 ENGINE=Memory; +SHOW CREATE mem; +DROP TABLE log; +DROP TABLE log1; +DROP TABLE mem; + +SET default_table_engine = 'None'; +CREATE TABLE mem AS SELECT 1 as n; --{serverError ENGINE_REQUIRED} +SET default_table_engine = 'Memory'; +CREATE TABLE mem ORDER BY n AS SELECT 1 as n; -- {serverError BAD_ARGUMENTS} +SET default_table_engine = 'MergeTree'; +CREATE TABLE mt ORDER BY n AS SELECT 1 as n; +CREATE TABLE mem ENGINE=Memory AS SELECT 1 as n; +SHOW CREATE TABLE mt; +SHOW CREATE TABLE mem; +DROP TABLE mt; +DROP TABLE mem; + +CREATE TABLE val AS values('n int', 1, 2); +CREATE TABLE val2 AS val; +CREATE TABLE log ENGINE=Log AS val; +SHOW CREATE TABLE val; +SHOW CREATE TABLE val2; +SHOW CREATE TABLE log; +DROP TABLE val; +DROP TABLE val2; +DROP TABLE log; + +DROP TABLE IF EXISTS kek; +DROP TABLE IF EXISTS lol; +SET default_table_engine = 'Memory'; +CREATE TABLE kek (n int) SETTINGS log_queries=1; +CREATE TABLE lol (n int) ENGINE=MergeTree ORDER BY n SETTINGS min_bytes_for_wide_part=123 SETTINGS log_queries=1; +SHOW CREATE TABLE kek; +SHOW CREATE TABLE lol; +DROP TABLE kek; +DROP TABLE lol; + +SET default_temporary_table_engine = 'Log'; +CREATE TEMPORARY TABLE tmp_log (n int); +SHOW CREATE TEMPORARY TABLE tmp_log; diff --git a/parser/testdata/02184_hash_functions_and_ip_types/ast.json b/parser/testdata/02184_hash_functions_and_ip_types/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02184_hash_functions_and_ip_types/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02184_hash_functions_and_ip_types/metadata.json b/parser/testdata/02184_hash_functions_and_ip_types/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02184_hash_functions_and_ip_types/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02184_hash_functions_and_ip_types/query.sql b/parser/testdata/02184_hash_functions_and_ip_types/query.sql new file mode 100644 index 000000000..2cac61db5 --- /dev/null +++ b/parser/testdata/02184_hash_functions_and_ip_types/query.sql @@ -0,0 +1,61 @@ +-- Tags: no-fasttest, no-openssl-fips + +SET output_format_pretty_single_large_number_tip_threshold = 0; +SET enable_analyzer = 1; + +SELECT + toIPv4('1.2.3.4') AS ipv4, + halfMD5(ipv4), + farmFingerprint64(ipv4), + xxh3(ipv4), + wyHash64(ipv4), + xxHash32(ipv4), + gccMurmurHash(ipv4), + murmurHash2_32(ipv4), + javaHashUTF16LE(ipv4), + intHash64(ipv4), + intHash32(ipv4), + metroHash64(ipv4), + hex(murmurHash3_128(ipv4)), + jumpConsistentHash(ipv4, 42), + sipHash64(ipv4), + hex(sipHash128(ipv4)), + kostikConsistentHash(ipv4, 42), + xxHash64(ipv4), + murmurHash2_64(ipv4), + cityHash64(ipv4), + hiveHash(ipv4), + murmurHash3_64(ipv4), + murmurHash3_32(ipv4), + yandexConsistentHash(ipv4,42) +FORMAT Vertical; + +SELECT + toIPv6('fe80::62:5aff:fed1:daf0') AS ipv6, + halfMD5(ipv6), + hex(MD4(ipv6)), + hex(MD5(ipv6)), + hex(SHA1(ipv6)), + hex(SHA224(ipv6)), + hex(SHA256(ipv6)), + hex(SHA512(ipv6)), + hex(SHA512_256(ipv6)), + farmFingerprint64(ipv6), + javaHash(ipv6), + xxh3(ipv6), + wyHash64(ipv6), + xxHash32(ipv6), + gccMurmurHash(ipv6), + murmurHash2_32(ipv6), + javaHashUTF16LE(ipv6), + metroHash64(ipv6), + hex(sipHash128(ipv6)), + hex(murmurHash3_128(ipv6)), + sipHash64(ipv6), + xxHash64(ipv6), + murmurHash2_64(ipv6), + cityHash64(ipv6), + hiveHash(ipv6), + murmurHash3_64(ipv6), + murmurHash3_32(ipv6) +FORMAT Vertical; diff --git a/parser/testdata/02184_ipv6_cast_test/ast.json b/parser/testdata/02184_ipv6_cast_test/ast.json new file mode 100644 index 000000000..24fd7d426 --- /dev/null +++ b/parser/testdata/02184_ipv6_cast_test/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery ipv6_test26473 (children 1)" + }, + { + "explain": " Identifier ipv6_test26473" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001200413, + "rows_read": 2, + "bytes_read": 80 + } +} diff --git a/parser/testdata/02184_ipv6_cast_test/metadata.json b/parser/testdata/02184_ipv6_cast_test/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02184_ipv6_cast_test/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02184_ipv6_cast_test/query.sql b/parser/testdata/02184_ipv6_cast_test/query.sql new file mode 100644 index 000000000..459e432bc --- /dev/null +++ b/parser/testdata/02184_ipv6_cast_test/query.sql @@ -0,0 +1,16 @@ +drop table if exists ipv6_test26473; + +CREATE TABLE ipv6_test26473 ( +`ip` String, +`ipv6` IPv6 MATERIALIZED toIPv6(ip), +`is_ipv6` Boolean MATERIALIZED isIPv6String(ip), +`cblock` IPv6 MATERIALIZED cutIPv6(ipv6, 10, 1), +`cblock1` IPv6 MATERIALIZED toIPv6(cutIPv6(ipv6, 10, 1)) +) +ENGINE = Memory; + +insert into ipv6_test26473 values ('2600:1011:b104:a86f:2832:b9c6:6d45:237b'); + +select ip, ipv6,cblock, cblock1,is_ipv6, cutIPv6(ipv6, 10, 1) from ipv6_test26473; + +drop table ipv6_test26473; diff --git a/parser/testdata/02184_ipv6_select_parsing/ast.json b/parser/testdata/02184_ipv6_select_parsing/ast.json new file mode 100644 index 000000000..2565fc330 --- /dev/null +++ b/parser/testdata/02184_ipv6_select_parsing/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery ips_v6 (children 1)" + }, + { + "explain": " Identifier ips_v6" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001446599, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/02184_ipv6_select_parsing/metadata.json b/parser/testdata/02184_ipv6_select_parsing/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02184_ipv6_select_parsing/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02184_ipv6_select_parsing/query.sql b/parser/testdata/02184_ipv6_select_parsing/query.sql new file mode 100644 index 000000000..ba5399555 --- /dev/null +++ b/parser/testdata/02184_ipv6_select_parsing/query.sql @@ -0,0 +1,13 @@ +drop table if exists ips_v6; +create table ips_v6(i IPv6) Engine=Memory; + +INSERT INTO ips_v6 SELECT toIPv6('::ffff:127.0.0.1'); + +INSERT INTO ips_v6 values ('::ffff:127.0.0.1'); + +INSERT INTO ips_v6 FORMAT TSV ::ffff:127.0.0.1 + +INSERT INTO ips_v6 SELECT ('::ffff:127.0.0.1'); + +SELECT * FROM ips_v6; +drop table ips_v6; diff --git a/parser/testdata/02184_nested_tuple/ast.json b/parser/testdata/02184_nested_tuple/ast.json new file mode 100644 index 000000000..c31b39a41 --- /dev/null +++ b/parser/testdata/02184_nested_tuple/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001150562, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02184_nested_tuple/metadata.json b/parser/testdata/02184_nested_tuple/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02184_nested_tuple/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02184_nested_tuple/query.sql b/parser/testdata/02184_nested_tuple/query.sql new file mode 100644 index 000000000..09ed8eb72 --- /dev/null +++ b/parser/testdata/02184_nested_tuple/query.sql @@ -0,0 +1,39 @@ +SET allow_suspicious_low_cardinality_types=1; +DROP TABLE IF EXISTS t_nested_tuple; + +CREATE TABLE t_nested_tuple +( + endUserIDs Tuple( + _experience Tuple( + aaid Tuple( + id Nullable(String), + namespace Tuple( + code LowCardinality(Nullable(String)) + ), + primary LowCardinality(Nullable(UInt8)) + ), + mcid Tuple( + id Nullable(String), + namespace Tuple( + code LowCardinality(Nullable(String)) + ), + primary LowCardinality(Nullable(UInt8)) + ) + ) + ) +) +ENGINE = MergeTree ORDER BY tuple(); + +SET output_format_json_named_tuples_as_objects = 1; + +INSERT INTO t_nested_tuple FORMAT JSONEachRow {"endUserIDs":{"_experience":{"aaid":{"id":"id_1","namespace":{"code":"code_1"},"primary":1},"mcid":{"id":"id_2","namespace":{"code":"code_2"},"primary":2}}}}; + +SELECT * FROM t_nested_tuple FORMAT JSONEachRow; +SELECT endUserIDs._experience FROM t_nested_tuple FORMAT JSONEachRow; +SELECT endUserIDs._experience.aaid FROM t_nested_tuple FORMAT JSONEachRow; +SELECT endUserIDs._experience.aaid.id FROM t_nested_tuple FORMAT JSONEachRow; +SELECT endUserIDs._experience.aaid.namespace FROM t_nested_tuple FORMAT JSONEachRow; +SELECT endUserIDs._experience.aaid.namespace.code FROM t_nested_tuple FORMAT JSONEachRow; +SELECT endUserIDs._experience.aaid.primary FROM t_nested_tuple FORMAT JSONEachRow; + +DROP TABLE t_nested_tuple; diff --git a/parser/testdata/02184_range_hashed_dictionary_outside_range_values/ast.json b/parser/testdata/02184_range_hashed_dictionary_outside_range_values/ast.json new file mode 100644 index 000000000..329b3df31 --- /dev/null +++ b/parser/testdata/02184_range_hashed_dictionary_outside_range_values/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery 02184_range_dictionary_source_table (children 1)" + }, + { + "explain": " Identifier 02184_range_dictionary_source_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001233489, + "rows_read": 2, + "bytes_read": 122 + } +} diff --git a/parser/testdata/02184_range_hashed_dictionary_outside_range_values/metadata.json b/parser/testdata/02184_range_hashed_dictionary_outside_range_values/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02184_range_hashed_dictionary_outside_range_values/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02184_range_hashed_dictionary_outside_range_values/query.sql b/parser/testdata/02184_range_hashed_dictionary_outside_range_values/query.sql new file mode 100644 index 000000000..6e892d9d2 --- /dev/null +++ b/parser/testdata/02184_range_hashed_dictionary_outside_range_values/query.sql @@ -0,0 +1,36 @@ +DROP TABLE IF EXISTS 02184_range_dictionary_source_table; +CREATE TABLE 02184_range_dictionary_source_table +( + id UInt64, + start UInt64, + end UInt64, + value_0 String, + value_1 String, + value_2 String +) +ENGINE = TinyLog; + +INSERT INTO 02184_range_dictionary_source_table VALUES (1, 0, 18446744073709551615, 'value0', 'value1', 'value2'); + +DROP DICTIONARY IF EXISTS 02184_range_dictionary; +CREATE DICTIONARY 02184_range_dictionary +( + id UInt64, + start UInt64, + end UInt64, + value_0 String, + value_1 String, + value_2 String +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE '02184_range_dictionary_source_table')) +LAYOUT(RANGE_HASHED()) +RANGE(MIN start MAX end) +LIFETIME(0); + +SELECT * FROM 02184_range_dictionary; +SELECT dictGet('02184_range_dictionary', ('value_0', 'value_1', 'value_2'), 1, 18446744073709551615); +SELECT dictHas('02184_range_dictionary', 1, 18446744073709551615); + +DROP DICTIONARY 02184_range_dictionary; +DROP TABLE 02184_range_dictionary_source_table; diff --git a/parser/testdata/02184_storage_add_support_ttl/ast.json b/parser/testdata/02184_storage_add_support_ttl/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02184_storage_add_support_ttl/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02184_storage_add_support_ttl/metadata.json b/parser/testdata/02184_storage_add_support_ttl/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02184_storage_add_support_ttl/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02184_storage_add_support_ttl/query.sql b/parser/testdata/02184_storage_add_support_ttl/query.sql new file mode 100644 index 000000000..4691eedd2 --- /dev/null +++ b/parser/testdata/02184_storage_add_support_ttl/query.sql @@ -0,0 +1,60 @@ +-- Tags: log-engine +DROP TABLE IF EXISTS mergeTree_02184; +CREATE TABLE mergeTree_02184 (id UInt64, name String, dt Date) Engine=MergeTree ORDER BY id; +ALTER TABLE mergeTree_02184 MODIFY COLUMN name String TTL dt + INTERVAL 1 MONTH; +DETACH TABLE mergeTree_02184; +ATTACH TABLE mergeTree_02184; + +DROP TABLE IF EXISTS distributed_02184; +CREATE TABLE distributed_02184 (id UInt64, name String, dt Date) Engine=Distributed('test_cluster_two_shards', 'default', 'mergeTree_02184', rand()); +ALTER TABLE distributed_02184 MODIFY COLUMN name String TTL dt + INTERVAL 1 MONTH; -- { serverError BAD_ARGUMENTS } +DETACH TABLE distributed_02184; +ATTACH TABLE distributed_02184; + +DROP TABLE IF EXISTS buffer_02184; +CREATE TABLE buffer_02184 (id UInt64, name String, dt Date) ENGINE = Buffer(default, mergeTree_02184, 16, 10, 100, 10000, 1000000, 10000000, 100000000); +ALTER TABLE buffer_02184 MODIFY COLUMN name String TTL dt + INTERVAL 1 MONTH; -- { serverError BAD_ARGUMENTS } +DETACH TABLE buffer_02184; +ATTACH TABLE buffer_02184; + +DROP TABLE IF EXISTS merge_02184; +CREATE TABLE merge_02184 (id UInt64, name String, dt Date) ENGINE = Merge('default', 'distributed_02184'); +ALTER TABLE merge_02184 MODIFY COLUMN name String TTL dt + INTERVAL 1 MONTH; -- { serverError BAD_ARGUMENTS } +DETACH TABLE merge_02184; +ATTACH TABLE merge_02184; + +DROP TABLE IF EXISTS null_02184; +CREATE TABLE null_02184 AS system.one Engine=Null(); +ALTER TABLE null_02184 MODIFY COLUMN dummy Int TTL now() + INTERVAL 1 MONTH; -- { serverError BAD_ARGUMENTS } +DETACH TABLE null_02184; +ATTACH TABLE null_02184; + +DROP TABLE IF EXISTS file_02184; +CREATE TABLE file_02184 (id UInt64, name String, dt Date) ENGINE = File(TabSeparated); +ALTER TABLE file_02184 MODIFY COLUMN name String TTL dt + INTERVAL 1 MONTH; -- { serverError BAD_ARGUMENTS } +DETACH TABLE file_02184; +ATTACH TABLE file_02184; + +DROP TABLE IF EXISTS memory_02184; +CREATE TABLE memory_02184 (id UInt64, name String, dt Date) ENGINE = Memory(); +ALTER TABLE memory_02184 MODIFY COLUMN name String TTL dt + INTERVAL 1 MONTH; -- { serverError BAD_ARGUMENTS } +DETACH TABLE memory_02184; +ATTACH TABLE memory_02184; + +DROP TABLE IF EXISTS log_02184; +CREATE TABLE log_02184 (id UInt64, name String, dt Date) ENGINE = Log(); +ALTER TABLE log_02184 MODIFY COLUMN name String TTL dt + INTERVAL 1 MONTH; -- { serverError BAD_ARGUMENTS } +DETACH TABLE log_02184; +ATTACH TABLE log_02184; + +DROP TABLE IF EXISTS ting_log_02184; +CREATE TABLE ting_log_02184 (id UInt64, name String, dt Date) ENGINE = TinyLog(); +ALTER TABLE ting_log_02184 MODIFY COLUMN name String TTL dt + INTERVAL 1 MONTH; -- { serverError BAD_ARGUMENTS } +DETACH TABLE ting_log_02184; +ATTACH TABLE ting_log_02184; + +DROP TABLE IF EXISTS stripe_log_02184; +CREATE TABLE stripe_log_02184 (id UInt64, name String, dt Date) ENGINE = StripeLog; +ALTER TABLE stripe_log_02184 MODIFY COLUMN name String TTL dt + INTERVAL 1 MONTH; -- { serverError BAD_ARGUMENTS } +DETACH TABLE stripe_log_02184; +ATTACH TABLE stripe_log_02184; diff --git a/parser/testdata/02185_arraySlice_negative_offset_size/ast.json b/parser/testdata/02185_arraySlice_negative_offset_size/ast.json new file mode 100644 index 000000000..76817ab39 --- /dev/null +++ b/parser/testdata/02185_arraySlice_negative_offset_size/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arraySlice (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_2, UInt64_3, UInt64_4, UInt64_5, UInt64_6, UInt64_7, UInt64_8]" + }, + { + "explain": " Literal Int64_-2" + }, + { + "explain": " Literal Int64_-2" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001422149, + "rows_read": 9, + "bytes_read": 401 + } +} diff --git a/parser/testdata/02185_arraySlice_negative_offset_size/metadata.json b/parser/testdata/02185_arraySlice_negative_offset_size/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02185_arraySlice_negative_offset_size/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02185_arraySlice_negative_offset_size/query.sql b/parser/testdata/02185_arraySlice_negative_offset_size/query.sql new file mode 100644 index 000000000..7a6ac0417 --- /dev/null +++ b/parser/testdata/02185_arraySlice_negative_offset_size/query.sql @@ -0,0 +1,25 @@ +select arraySlice([1, 2, 3, 4, 5, 6, 7, 8], -2, -2); +select arraySlice(materialize([1, 2, 3, 4, 5, 6, 7, 8]), -2, -2); +select arraySlice(materialize([1, 2, 3, 4, 5, 6, 7, 8]), materialize(-2), materialize(-2)); + +select arraySlice([1, 2, 3, 4, 5, 6, 7, 8], -2, -1); +select arraySlice(materialize([1, 2, 3, 4, 5, 6, 7, 8]), -2, -1); +select arraySlice(materialize([1, 2, 3, 4, 5, 6, 7, 8]), materialize(-2), materialize(-1)); + +select '-'; +drop table if exists t; +create table t +( + s Array(Int), + l Int8, + r Int8 +) engine = Memory; + +insert into t values ([1, 2, 3, 4, 5, 6, 7, 8], -2, -2), ([1, 2, 3, 4, 5, 6, 7, 8], -3, -3); + +select arraySlice(s, -2, -2) from t; +select arraySlice(s, l, -2) from t; +select arraySlice(s, -2, r) from t; +select arraySlice(s, l, r) from t; + +drop table t; diff --git a/parser/testdata/02185_range_hashed_dictionary_open_ranges/ast.json b/parser/testdata/02185_range_hashed_dictionary_open_ranges/ast.json new file mode 100644 index 000000000..5981681e3 --- /dev/null +++ b/parser/testdata/02185_range_hashed_dictionary_open_ranges/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery 02185_range_dictionary_source_table (children 1)" + }, + { + "explain": " Identifier 02185_range_dictionary_source_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001299274, + "rows_read": 2, + "bytes_read": 122 + } +} diff --git a/parser/testdata/02185_range_hashed_dictionary_open_ranges/metadata.json b/parser/testdata/02185_range_hashed_dictionary_open_ranges/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02185_range_hashed_dictionary_open_ranges/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02185_range_hashed_dictionary_open_ranges/query.sql b/parser/testdata/02185_range_hashed_dictionary_open_ranges/query.sql new file mode 100644 index 000000000..a36c72de0 --- /dev/null +++ b/parser/testdata/02185_range_hashed_dictionary_open_ranges/query.sql @@ -0,0 +1,64 @@ +DROP TABLE IF EXISTS 02185_range_dictionary_source_table; +CREATE TABLE 02185_range_dictionary_source_table +( + id UInt64, + start Nullable(UInt64), + end Nullable(UInt64), + value String +) +ENGINE = TinyLog; + +INSERT INTO 02185_range_dictionary_source_table VALUES (0, NULL, 5000, 'Value0'), (0, 5001, 10000, 'Value1'), (0, 10001, NULL, 'Value2'); + +SELECT 'Source table'; +SELECT * FROM 02185_range_dictionary_source_table; + +DROP DICTIONARY IF EXISTS 02185_range_dictionary; +CREATE DICTIONARY 02185_range_dictionary +( + id UInt64, + start UInt64, + end UInt64, + value String DEFAULT 'DefaultValue' +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE '02185_range_dictionary_source_table')) +LAYOUT(RANGE_HASHED(convert_null_range_bound_to_open 1)) +RANGE(MIN start MAX end) +LIFETIME(0); + +SELECT 'Dictionary convert_null_range_bound_to_open = 1'; +SELECT * FROM 02185_range_dictionary; +SELECT dictGet('02185_range_dictionary', 'value', 0, 0); +SELECT dictGet('02185_range_dictionary', 'value', 0, 5001); +SELECT dictGet('02185_range_dictionary', 'value', 0, 10001); +SELECT dictHas('02185_range_dictionary', 0, 0); +SELECT dictHas('02185_range_dictionary', 0, 5001); +SELECT dictHas('02185_range_dictionary', 0, 10001); + +DROP DICTIONARY 02185_range_dictionary; + +CREATE DICTIONARY 02185_range_dictionary +( + id UInt64, + start UInt64, + end UInt64, + value String DEFAULT 'DefaultValue' +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE '02185_range_dictionary_source_table')) +LAYOUT(RANGE_HASHED(convert_null_range_bound_to_open 0)) +RANGE(MIN start MAX end) +LIFETIME(0); + +SELECT 'Dictionary convert_null_range_bound_to_open = 0'; +SELECT * FROM 02185_range_dictionary; +SELECT dictGet('02185_range_dictionary', 'value', 0, 0); +SELECT dictGet('02185_range_dictionary', 'value', 0, 5001); +SELECT dictGet('02185_range_dictionary', 'value', 0, 10001); +SELECT dictHas('02185_range_dictionary', 0, 0); +SELECT dictHas('02185_range_dictionary', 0, 5001); +SELECT dictHas('02185_range_dictionary', 0, 10001); + +DROP DICTIONARY 02185_range_dictionary; +DROP TABLE 02185_range_dictionary_source_table; diff --git a/parser/testdata/02185_split_by_char/ast.json b/parser/testdata/02185_split_by_char/ast.json new file mode 100644 index 000000000..f1bbc91b0 --- /dev/null +++ b/parser/testdata/02185_split_by_char/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function splitByChar (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal ','" + }, + { + "explain": " Literal '1,2,3'" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001703884, + "rows_read": 8, + "bytes_read": 288 + } +} diff --git a/parser/testdata/02185_split_by_char/metadata.json b/parser/testdata/02185_split_by_char/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02185_split_by_char/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02185_split_by_char/query.sql b/parser/testdata/02185_split_by_char/query.sql new file mode 100644 index 000000000..e2d2d72f8 --- /dev/null +++ b/parser/testdata/02185_split_by_char/query.sql @@ -0,0 +1,6 @@ +select splitByChar(',', '1,2,3'); +select splitByChar('+', '1+2+3'); +-- splitByChar accepts only 1 byte length characters. Test for some special characters that are not necessarily single byte. +select splitByChar('×', '1x2x3'); -- { serverError BAD_ARGUMENTS } +select splitByChar('€', '€1,2,3'); -- { serverError BAD_ARGUMENTS } +select splitByChar('•', '• 1,2,3'); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/02186_range_hashed_dictionary_intersecting_intervals/ast.json b/parser/testdata/02186_range_hashed_dictionary_intersecting_intervals/ast.json new file mode 100644 index 000000000..d137a1745 --- /dev/null +++ b/parser/testdata/02186_range_hashed_dictionary_intersecting_intervals/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery 02186_range_dictionary_source_table (children 1)" + }, + { + "explain": " Identifier 02186_range_dictionary_source_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001235412, + "rows_read": 2, + "bytes_read": 122 + } +} diff --git a/parser/testdata/02186_range_hashed_dictionary_intersecting_intervals/metadata.json b/parser/testdata/02186_range_hashed_dictionary_intersecting_intervals/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02186_range_hashed_dictionary_intersecting_intervals/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02186_range_hashed_dictionary_intersecting_intervals/query.sql b/parser/testdata/02186_range_hashed_dictionary_intersecting_intervals/query.sql new file mode 100644 index 000000000..d61598c1a --- /dev/null +++ b/parser/testdata/02186_range_hashed_dictionary_intersecting_intervals/query.sql @@ -0,0 +1,64 @@ +DROP TABLE IF EXISTS 02186_range_dictionary_source_table; +CREATE TABLE 02186_range_dictionary_source_table +( + id UInt64, + start Date, + end Date, + value String +) +Engine = TinyLog; + +INSERT INTO 02186_range_dictionary_source_table VALUES (1, '2020-01-01', '2100-01-01', 'Value0'); +INSERT INTO 02186_range_dictionary_source_table VALUES (1, '2020-01-02', '2100-01-01', 'Value1'); +INSERT INTO 02186_range_dictionary_source_table VALUES (1, '2020-01-03', '2100-01-01', 'Value2'); + +SELECT 'Source table'; +SELECT * FROM 02186_range_dictionary_source_table ORDER BY ALL; + +DROP DICTIONARY IF EXISTS 02186_range_dictionary; +CREATE DICTIONARY 02186_range_dictionary +( + id UInt64, + start Date, + end Date, + value String +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE '02186_range_dictionary_source_table')) +LAYOUT(RANGE_HASHED(range_lookup_strategy 'min')) +RANGE(MIN start MAX end) +LIFETIME(0); + +SELECT 'Dictionary .range_lookup_strategy = min'; + +SELECT * FROM 02186_range_dictionary; + +select dictGet('02186_range_dictionary', 'value', toUInt64(1), toDate('2020-01-01')); +select dictGet('02186_range_dictionary', 'value', toUInt64(1), toDate('2020-01-02')); +select dictGet('02186_range_dictionary', 'value', toUInt64(1), toDate('2020-01-03')); + +DROP DICTIONARY 02186_range_dictionary; + +CREATE DICTIONARY 02186_range_dictionary +( + id UInt64, + start Date, + end Date, + value String +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE '02186_range_dictionary_source_table')) +LAYOUT(RANGE_HASHED(range_lookup_strategy 'max')) +RANGE(MIN start MAX end) +LIFETIME(0); + +SELECT 'Dictionary .range_lookup_strategy = max'; + +SELECT * FROM 02186_range_dictionary ORDER BY ALL; + +select dictGet('02186_range_dictionary', 'value', toUInt64(1), toDate('2020-01-01')); +select dictGet('02186_range_dictionary', 'value', toUInt64(1), toDate('2020-01-02')); +select dictGet('02186_range_dictionary', 'value', toUInt64(1), toDate('2020-01-03')); + +DROP DICTIONARY 02186_range_dictionary; +DROP TABLE 02186_range_dictionary_source_table; diff --git a/parser/testdata/02187_insert_values_with_mv/ast.json b/parser/testdata/02187_insert_values_with_mv/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02187_insert_values_with_mv/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02187_insert_values_with_mv/metadata.json b/parser/testdata/02187_insert_values_with_mv/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02187_insert_values_with_mv/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02187_insert_values_with_mv/query.sql b/parser/testdata/02187_insert_values_with_mv/query.sql new file mode 100644 index 000000000..fcf915a84 --- /dev/null +++ b/parser/testdata/02187_insert_values_with_mv/query.sql @@ -0,0 +1,62 @@ +--Tags: no-async-insert +-- no-async-insert because initial_query_id in system.query_views_log have query_id +-- of "secondary" flush query with query_kind: AsyncInsertFlush +CREATE TABLE IF NOT EXISTS data_a_02187 (a Int64) ENGINE=Memory; +CREATE TABLE IF NOT EXISTS data_b_02187 (a Int64) ENGINE=Memory; +CREATE MATERIALIZED VIEW IF NOT EXISTS mv1 TO data_b_02187 AS Select sleepEachRow(0.05) as a FROM data_a_02187; +CREATE MATERIALIZED VIEW IF NOT EXISTS mv2 TO data_b_02187 AS Select sleepEachRow(0.05) as a FROM data_a_02187; +CREATE MATERIALIZED VIEW IF NOT EXISTS mv3 TO data_b_02187 AS Select sleepEachRow(0.05) as a FROM data_a_02187; +CREATE MATERIALIZED VIEW IF NOT EXISTS mv4 TO data_b_02187 AS Select sleepEachRow(0.05) as a FROM data_a_02187; +CREATE MATERIALIZED VIEW IF NOT EXISTS mv5 TO data_b_02187 AS Select sleepEachRow(0.05) as a FROM data_a_02187; + +-- INSERT USING VALUES +INSERT INTO data_a_02187 SETTINGS max_threads=1 VALUES (1); +-- INSERT USING TABLE +INSERT INTO data_a_02187 SELECT * FROM system.one SETTINGS max_threads=1; +SYSTEM FLUSH LOGS query_log, query_views_log; + +SELECT 'VALUES', query_duration_ms >= 250 +FROM system.query_log +WHERE + current_database = currentDatabase() + AND event_date >= yesterday() + AND query LIKE '-- INSERT USING VALUES%' + AND type = 'QueryFinish' +LIMIT 1; + +SELECT 'TABLE', query_duration_ms >= 250 +FROM system.query_log +WHERE + current_database = currentDatabase() + AND event_date >= yesterday() + AND query LIKE '-- INSERT USING VALUES%' + AND type = 'QueryFinish' +LIMIT 1; + +WITH + ( + SELECT initial_query_id + FROM system.query_log + WHERE + current_database = currentDatabase() + AND event_date >= yesterday() + AND query LIKE '-- INSERT USING VALUES%' + LIMIT 1 + ) AS q_id +SELECT 'VALUES', view_duration_ms >= 50 +FROM system.query_views_log +WHERE initial_query_id = q_id; + +WITH +( + SELECT initial_query_id + FROM system.query_log + WHERE + current_database = currentDatabase() + AND event_date >= yesterday() + AND query LIKE '-- INSERT USING TABLE%' + LIMIT 1 +) AS q_id +SELECT 'TABLE', view_duration_ms >= 50 +FROM system.query_views_log +WHERE initial_query_id = q_id; diff --git a/parser/testdata/02187_test_final_and_limit_modifier/ast.json b/parser/testdata/02187_test_final_and_limit_modifier/ast.json new file mode 100644 index 000000000..53548d589 --- /dev/null +++ b/parser/testdata/02187_test_final_and_limit_modifier/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_02187 (children 1)" + }, + { + "explain": " Identifier test_02187" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001382529, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/02187_test_final_and_limit_modifier/metadata.json b/parser/testdata/02187_test_final_and_limit_modifier/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02187_test_final_and_limit_modifier/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02187_test_final_and_limit_modifier/query.sql b/parser/testdata/02187_test_final_and_limit_modifier/query.sql new file mode 100644 index 000000000..7c4ae9368 --- /dev/null +++ b/parser/testdata/02187_test_final_and_limit_modifier/query.sql @@ -0,0 +1,15 @@ +DROP TABLE IF EXISTS test_02187; +CREATE TABLE test_02187 ( + info String, + id Int32 +) +ENGINE = ReplacingMergeTree(id) +ORDER BY id; + +INSERT INTO TABLE test_02187 VALUES ('nothing', 1); +INSERT INTO TABLE test_02187 VALUES ('something', 1); + +SELECT * FROM test_02187 FINAL; +SELECT * FROM test_02187 FINAL LIMIT 1; + + diff --git a/parser/testdata/02188_parser_dictionary_primary_key/ast.json b/parser/testdata/02188_parser_dictionary_primary_key/ast.json new file mode 100644 index 000000000..ae4c310fa --- /dev/null +++ b/parser/testdata/02188_parser_dictionary_primary_key/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery 02188_test_dictionary_source (children 1)" + }, + { + "explain": " Identifier 02188_test_dictionary_source" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001514808, + "rows_read": 2, + "bytes_read": 108 + } +} diff --git a/parser/testdata/02188_parser_dictionary_primary_key/metadata.json b/parser/testdata/02188_parser_dictionary_primary_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02188_parser_dictionary_primary_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02188_parser_dictionary_primary_key/query.sql b/parser/testdata/02188_parser_dictionary_primary_key/query.sql new file mode 100644 index 000000000..a939c30b5 --- /dev/null +++ b/parser/testdata/02188_parser_dictionary_primary_key/query.sql @@ -0,0 +1,65 @@ +DROP TABLE IF EXISTS 02188_test_dictionary_source; +CREATE TABLE 02188_test_dictionary_source +( + id UInt64, + value String +) +ENGINE=TinyLog; + +INSERT INTO 02188_test_dictionary_source VALUES (0, 'Value'); + +DROP DICTIONARY IF EXISTS 02188_test_dictionary_simple_primary_key; +CREATE DICTIONARY 02188_test_dictionary_simple_primary_key +( + id UInt64, + value String +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE '02188_test_dictionary_source')) +LAYOUT(DIRECT()); + +SELECT 'Dictionary output'; +SELECT * FROM 02188_test_dictionary_simple_primary_key; +DROP DICTIONARY 02188_test_dictionary_simple_primary_key; + +CREATE DICTIONARY 02188_test_dictionary_simple_primary_key +( + id UInt64, + value String +) +PRIMARY KEY (id) +SOURCE(CLICKHOUSE(TABLE '02188_test_dictionary_source')) +LAYOUT(DIRECT()); + +SELECT 'Dictionary output'; +SELECT * FROM 02188_test_dictionary_simple_primary_key; +DROP DICTIONARY 02188_test_dictionary_simple_primary_key; + +DROP DICTIONARY IF EXISTS 02188_test_dictionary_complex_primary_key; +CREATE DICTIONARY 02188_test_dictionary_complex_primary_key +( + id UInt64, + value String +) +PRIMARY KEY id, value +SOURCE(CLICKHOUSE(TABLE '02188_test_dictionary_source')) +LAYOUT(COMPLEX_KEY_DIRECT()); + +SELECT 'Dictionary output'; +SELECT * FROM 02188_test_dictionary_complex_primary_key; +DROP DICTIONARY 02188_test_dictionary_complex_primary_key; + +CREATE DICTIONARY 02188_test_dictionary_complex_primary_key +( + id UInt64, + value String +) +PRIMARY KEY (id, value) +SOURCE(CLICKHOUSE(TABLE '02188_test_dictionary_source')) +LAYOUT(COMPLEX_KEY_DIRECT()); + +SELECT 'Dictionary output'; +SELECT * FROM 02188_test_dictionary_complex_primary_key; +DROP DICTIONARY 02188_test_dictionary_complex_primary_key; + +DROP TABLE 02188_test_dictionary_source; diff --git a/parser/testdata/02188_table_function_format/ast.json b/parser/testdata/02188_table_function_format/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02188_table_function_format/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02188_table_function_format/metadata.json b/parser/testdata/02188_table_function_format/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02188_table_function_format/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02188_table_function_format/query.sql b/parser/testdata/02188_table_function_format/query.sql new file mode 100644 index 000000000..ff8e2a0d5 --- /dev/null +++ b/parser/testdata/02188_table_function_format/query.sql @@ -0,0 +1,70 @@ +-- Tags: no-fasttest + +select * from format(JSONEachRow, +$$ +{"a": "Hello", "b": 111} +{"a": "World", "b": 123} +{"a": "Hello", "b": 111} +{"a": "World", "b": 123} +{"a": "Hello", "b": 111} +{"a": "World", "b": 123} +{"a": "Hello", "b": 111} +{"a": "World", "b": 123} +{"a": "Hello", "b": 111} +{"a": "World", "b": 123} +{"a": "Hello", "b": 111} +{"a": "World", "b": 123} +{"a": "Hello", "b": 111} +{"a": "World", "b": 123} +{"a": "Hello", "b": 111} +{"a": "World", "b": 123} +{"a": "Hello", "b": 111} +{"a": "World", "b": 123} +{"a": "Hello", "b": 111} +{"a": "World", "b": 123} +$$); + +set max_block_size=5; + +select * from format(JSONEachRow, +$$ +{"a": "Hello", "b": 111} +{"a": "World", "b": 123} +{"a": "Hello", "b": 111} +{"a": "World", "b": 123} +{"a": "Hello", "b": 111} +{"a": "World", "b": 123} +{"a": "Hello", "b": 111} +{"a": "World", "b": 123} +{"a": "Hello", "b": 111} +{"a": "World", "b": 123} +{"a": "Hello", "b": 111} +{"a": "World", "b": 123} +{"a": "Hello", "b": 111} +{"a": "World", "b": 123} +{"a": "Hello", "b": 111} +{"a": "World", "b": 123} +{"a": "Hello", "b": 111} +{"a": "World", "b": 123} +{"a": "Hello", "b": 111} +{"a": "World", "b": 123} +$$); + +select * from format(CSV, '1,2,"[1,2,3]","[[\'abc\'], [], [\'d\', \'e\']]"'); +desc format(CSV, '1,2,"[1,2,3]","[[\'abc\'], [], [\'d\', \'e\']]"'); + +drop table if exists test; + +create table test as format(JSONEachRow, +$$ +{"a": "Hello", "b": 111} +{"a": "World", "b": 123} +{"a": "Hello", "b": 111} +{"a": "Hello", "b": 131} +{"a": "World", "b": 123} +$$); + +select * from test; +desc table test; +drop table test; + diff --git a/parser/testdata/02189_join_type_conversion/ast.json b/parser/testdata/02189_join_type_conversion/ast.json new file mode 100644 index 000000000..a5acd32f4 --- /dev/null +++ b/parser/testdata/02189_join_type_conversion/ast.json @@ -0,0 +1,112 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " QualifiedAsterisk (children 1)" + }, + { + "explain": " Identifier t1" + }, + { + "explain": " QualifiedAsterisk (children 1)" + }, + { + "explain": " Identifier t2" + }, + { + "explain": " TablesInSelectQuery (children 2)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (alias t1) (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1 (alias k)" + }, + { + "explain": " TablesInSelectQueryElement (children 2)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (alias t2) (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Int64_-1 (alias k)" + }, + { + "explain": " TableJoin (children 1)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier t1.k" + }, + { + "explain": " Identifier t2.k" + } + ], + + "rows": 30, + + "statistics": + { + "elapsed": 0.001529394, + "rows_read": 30, + "bytes_read": 1229 + } +} diff --git a/parser/testdata/02189_join_type_conversion/metadata.json b/parser/testdata/02189_join_type_conversion/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02189_join_type_conversion/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02189_join_type_conversion/query.sql b/parser/testdata/02189_join_type_conversion/query.sql new file mode 100644 index 000000000..918065383 --- /dev/null +++ b/parser/testdata/02189_join_type_conversion/query.sql @@ -0,0 +1 @@ +SELECT t1.*, t2.* FROM (SELECT 1 AS k) t1 JOIN (SELECT -1 AS k) t2 ON t1.k = t2.k; diff --git a/parser/testdata/02190_current_metrics_query/ast.json b/parser/testdata/02190_current_metrics_query/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02190_current_metrics_query/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02190_current_metrics_query/metadata.json b/parser/testdata/02190_current_metrics_query/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02190_current_metrics_query/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02190_current_metrics_query/query.sql b/parser/testdata/02190_current_metrics_query/query.sql new file mode 100644 index 000000000..e8b22e92a --- /dev/null +++ b/parser/testdata/02190_current_metrics_query/query.sql @@ -0,0 +1,2 @@ +-- This query itself is also accounted in metric. +SELECT value > 0 FROM system.metrics WHERE metric = 'Query'; diff --git a/parser/testdata/02191_nested_with_dots/ast.json b/parser/testdata/02191_nested_with_dots/ast.json new file mode 100644 index 000000000..8e1864ecc --- /dev/null +++ b/parser/testdata/02191_nested_with_dots/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_nested_with_dots (children 1)" + }, + { + "explain": " Identifier t_nested_with_dots" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001271628, + "rows_read": 2, + "bytes_read": 88 + } +} diff --git a/parser/testdata/02191_nested_with_dots/metadata.json b/parser/testdata/02191_nested_with_dots/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02191_nested_with_dots/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02191_nested_with_dots/query.sql b/parser/testdata/02191_nested_with_dots/query.sql new file mode 100644 index 000000000..cf649ca30 --- /dev/null +++ b/parser/testdata/02191_nested_with_dots/query.sql @@ -0,0 +1,33 @@ +DROP TABLE IF EXISTS t_nested_with_dots; + +CREATE TABLE t_nested_with_dots (n Nested(id UInt64, `values.id` Array(UInt64))) +ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO t_nested_with_dots VALUES ([1], [[1]]); + +SELECT * FROM t_nested_with_dots; +SELECT n.values.id FROM t_nested_with_dots; + +DROP TABLE IF EXISTS t_nested_with_dots; +SET flatten_nested = 0; + +CREATE TABLE t_nested_with_dots (n Nested(id UInt64, `values.id` Array(UInt64))) +ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO t_nested_with_dots VALUES ([(1, [1])]); + +SELECT * FROM t_nested_with_dots; +SELECT n.values.id FROM t_nested_with_dots; + +DROP TABLE IF EXISTS t_nested_with_dots; + +CREATE TABLE t_nested_with_dots (`t.t2` Tuple(`t3.t4.t5` Tuple(`s1.s2` String, `u1.u2` UInt64), `s3.s4.s5` String)) +ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO t_nested_with_dots VALUES ((('a', 1), 'b')); + +SELECT * FROM t_nested_with_dots; +SELECT t.t2.t3.t4.t5.s1.s2, t.t2.t3.t4.t5.u1.u2 FROM t_nested_with_dots; +SELECT t.t2.t3.t4.t5.s1.s2, t.t2.s3.s4.s5 FROM t_nested_with_dots; + +DROP TABLE IF EXISTS t_nested_with_dots; diff --git a/parser/testdata/02191_parse_date_time_best_effort_more_cases/ast.json b/parser/testdata/02191_parse_date_time_best_effort_more_cases/ast.json new file mode 100644 index 000000000..f96d11ed7 --- /dev/null +++ b/parser/testdata/02191_parse_date_time_best_effort_more_cases/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function parseDateTimeBestEffort (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '20220101-010203'" + }, + { + "explain": " Literal 'UTC'" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001479785, + "rows_read": 8, + "bytes_read": 312 + } +} diff --git a/parser/testdata/02191_parse_date_time_best_effort_more_cases/metadata.json b/parser/testdata/02191_parse_date_time_best_effort_more_cases/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02191_parse_date_time_best_effort_more_cases/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02191_parse_date_time_best_effort_more_cases/query.sql b/parser/testdata/02191_parse_date_time_best_effort_more_cases/query.sql new file mode 100644 index 000000000..d30834b90 --- /dev/null +++ b/parser/testdata/02191_parse_date_time_best_effort_more_cases/query.sql @@ -0,0 +1,10 @@ +SELECT parseDateTimeBestEffort('20220101-010203', 'UTC'); +SELECT parseDateTimeBestEffort('20220101+010203', 'UTC'); +SELECT parseDateTimeBestEffort('20220101 010203', 'UTC'); +SELECT parseDateTimeBestEffort('20220101T010203', 'UTC'); +SELECT parseDateTimeBestEffort('20220101T01:02', 'UTC'); +SELECT parseDateTimeBestEffort('20220101-0102', 'UTC'); +SELECT parseDateTimeBestEffort('20220101+0102', 'UTC'); +SELECT parseDateTimeBestEffort('20220101-010203-01', 'UTC'); +SELECT parseDateTimeBestEffort('20220101-010203+0100', 'UTC'); +SELECT parseDateTimeBestEffort('20220101-010203-01:00', 'UTC'); diff --git a/parser/testdata/02192_comment/ast.json b/parser/testdata/02192_comment/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02192_comment/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02192_comment/metadata.json b/parser/testdata/02192_comment/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02192_comment/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02192_comment/query.sql b/parser/testdata/02192_comment/query.sql new file mode 100644 index 000000000..ff56caa77 --- /dev/null +++ b/parser/testdata/02192_comment/query.sql @@ -0,0 +1,16 @@ +# comment +#! comment2 +select 1; +# comment3 +#! comment4 +select 2; # another one comemnt +# +#! +select 3; + +select 1; #! +SELECT # hello +1; +SELECT /* # hello */ 1; +SELECT ' +# hello', 1; diff --git a/parser/testdata/02193_async_insert_tcp_client_1/ast.json b/parser/testdata/02193_async_insert_tcp_client_1/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02193_async_insert_tcp_client_1/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02193_async_insert_tcp_client_1/metadata.json b/parser/testdata/02193_async_insert_tcp_client_1/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02193_async_insert_tcp_client_1/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02193_async_insert_tcp_client_1/query.sql b/parser/testdata/02193_async_insert_tcp_client_1/query.sql new file mode 100644 index 000000000..6bc1cbfbf --- /dev/null +++ b/parser/testdata/02193_async_insert_tcp_client_1/query.sql @@ -0,0 +1,29 @@ +-- Tags: no-fasttest + +SET log_queries = 1; + +DROP TABLE IF EXISTS t_async_insert_02193_1; + +CREATE TABLE t_async_insert_02193_1 (id UInt32, s String) ENGINE = Memory; + +INSERT INTO t_async_insert_02193_1 SETTINGS async_insert = 1 FORMAT CSV 1,aaa + +INSERT INTO t_async_insert_02193_1 SETTINGS async_insert = 1 FORMAT Values (2, 'bbb'); + +SET async_insert = 1; + +INSERT INTO t_async_insert_02193_1 VALUES (3, 'ccc'); +INSERT INTO t_async_insert_02193_1 FORMAT JSONEachRow {"id": 4, "s": "ddd"}; + +SELECT * FROM t_async_insert_02193_1 ORDER BY id; + +SYSTEM FLUSH LOGS query_log; + +SELECT count(), sum(ProfileEvents['AsyncInsertQuery']) FROM system.query_log +WHERE + event_date >= yesterday() AND + type = 'QueryFinish' AND + current_database = currentDatabase() AND + query ILIKE 'INSERT INTO t_async_insert_02193_1%'; + +DROP TABLE IF EXISTS t_async_insert_02193_1; diff --git a/parser/testdata/02200_use_skip_indexes/ast.json b/parser/testdata/02200_use_skip_indexes/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02200_use_skip_indexes/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02200_use_skip_indexes/metadata.json b/parser/testdata/02200_use_skip_indexes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02200_use_skip_indexes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02200_use_skip_indexes/query.sql b/parser/testdata/02200_use_skip_indexes/query.sql new file mode 100644 index 000000000..a5322f3e7 --- /dev/null +++ b/parser/testdata/02200_use_skip_indexes/query.sql @@ -0,0 +1,20 @@ +CREATE TABLE data_02200 ( + key Int, + value Int, + INDEX idx value TYPE minmax GRANULARITY 1 +) +Engine=MergeTree() +ORDER BY key +PARTITION BY key; + +set use_query_condition_cache = false; +set use_skip_indexes_on_data_read = false; + +INSERT INTO data_02200 SELECT number, number FROM numbers(10); + +-- Prevent remote replicas from skipping index analysis in Parallel Replicas. Otherwise, they may return full ranges and trigger max_rows_to_read validation failures. +SET parallel_replicas_index_analysis_only_on_coordinator = 0; + +-- { echoOn } +SELECT * FROM data_02200 WHERE value = 1 SETTINGS use_skip_indexes=1, max_rows_to_read=1; +SELECT * FROM data_02200 WHERE value = 1 SETTINGS use_skip_indexes=0, max_rows_to_read=1; -- { serverError TOO_MANY_ROWS } diff --git a/parser/testdata/02201_use_skip_indexes_if_final/ast.json b/parser/testdata/02201_use_skip_indexes_if_final/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02201_use_skip_indexes_if_final/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02201_use_skip_indexes_if_final/metadata.json b/parser/testdata/02201_use_skip_indexes_if_final/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02201_use_skip_indexes_if_final/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02201_use_skip_indexes_if_final/query.sql b/parser/testdata/02201_use_skip_indexes_if_final/query.sql new file mode 100644 index 000000000..2afc4941c --- /dev/null +++ b/parser/testdata/02201_use_skip_indexes_if_final/query.sql @@ -0,0 +1,16 @@ +CREATE TABLE data_02201 ( + key Int, + value Int, + INDEX idx value TYPE minmax GRANULARITY 1 +) +Engine=AggregatingMergeTree() +ORDER BY key +PARTITION BY key; + +INSERT INTO data_02201 SELECT number, number FROM numbers(10); + +-- { echoOn } +SELECT * FROM data_02201 FINAL WHERE value = 1 SETTINGS use_skip_indexes=0, use_skip_indexes_if_final=0, max_rows_to_read=1; -- { serverError TOO_MANY_ROWS } +SELECT * FROM data_02201 FINAL WHERE value = 1 SETTINGS use_skip_indexes=1, use_skip_indexes_if_final=0, max_rows_to_read=1; -- { serverError TOO_MANY_ROWS } +SELECT * FROM data_02201 FINAL WHERE value = 1 SETTINGS use_skip_indexes=0, use_skip_indexes_if_final=1, max_rows_to_read=1; -- { serverError TOO_MANY_ROWS } +SELECT * FROM data_02201 FINAL WHERE value = 1 SETTINGS use_skip_indexes=1, use_skip_indexes_if_final=1, max_rows_to_read=1; diff --git a/parser/testdata/02202_use_skip_indexes_if_final/ast.json b/parser/testdata/02202_use_skip_indexes_if_final/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02202_use_skip_indexes_if_final/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02202_use_skip_indexes_if_final/metadata.json b/parser/testdata/02202_use_skip_indexes_if_final/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02202_use_skip_indexes_if_final/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02202_use_skip_indexes_if_final/query.sql b/parser/testdata/02202_use_skip_indexes_if_final/query.sql new file mode 100644 index 000000000..44b08cd33 --- /dev/null +++ b/parser/testdata/02202_use_skip_indexes_if_final/query.sql @@ -0,0 +1,20 @@ +-- This tests will show the difference in data with use_skip_indexes_if_final and w/o +-- EDIT: Correct result with be seen with use_skip_indexes_if_final=1 and use_skip_indexes_if_final_exact_mode=1 + +CREATE TABLE data_02201 ( + key Int, + value_max SimpleAggregateFunction(max, Int), + INDEX idx value_max TYPE minmax GRANULARITY 1 +) +Engine=AggregatingMergeTree() +ORDER BY key +PARTITION BY key; + +SYSTEM STOP MERGES data_02201; + +INSERT INTO data_02201 SELECT number, number FROM numbers(10); +INSERT INTO data_02201 SELECT number, number+1 FROM numbers(10); + +-- { echoOn } +SELECT * FROM data_02201 FINAL WHERE value_max = 1 ORDER BY key, value_max SETTINGS use_skip_indexes=1, use_skip_indexes_if_final=0, use_skip_indexes_if_final_exact_mode=0; +SELECT * FROM data_02201 FINAL WHERE value_max = 1 ORDER BY key, value_max SETTINGS use_skip_indexes=1, use_skip_indexes_if_final=1, use_skip_indexes_if_final_exact_mode=0; diff --git a/parser/testdata/02205_ephemeral_1/ast.json b/parser/testdata/02205_ephemeral_1/ast.json new file mode 100644 index 000000000..08ea39902 --- /dev/null +++ b/parser/testdata/02205_ephemeral_1/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001267684, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02205_ephemeral_1/metadata.json b/parser/testdata/02205_ephemeral_1/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02205_ephemeral_1/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02205_ephemeral_1/query.sql b/parser/testdata/02205_ephemeral_1/query.sql new file mode 100644 index 000000000..fd1d2f5fa --- /dev/null +++ b/parser/testdata/02205_ephemeral_1/query.sql @@ -0,0 +1,82 @@ +SET mutations_sync=2; + +DROP TABLE IF EXISTS t_ephemeral_02205_1; + +CREATE TABLE t_ephemeral_02205_1 (x UInt32 DEFAULT y, y UInt32 EPHEMERAL 17, z UInt32 DEFAULT 5) ENGINE = Memory; + +DESCRIBE t_ephemeral_02205_1; + +# Test INSERT without columns list - should participate only ordinary columns (x, z) +INSERT INTO t_ephemeral_02205_1 VALUES (1, 2); +# SELECT * should only return ordinary columns (x, z) - ephemeral is not stored in the table +SELECT * FROM t_ephemeral_02205_1; + +TRUNCATE TABLE t_ephemeral_02205_1; + +INSERT INTO t_ephemeral_02205_1 VALUES (DEFAULT, 2); +SELECT * FROM t_ephemeral_02205_1; + +TRUNCATE TABLE t_ephemeral_02205_1; + +# Test INSERT using ephemerals default +INSERT INTO t_ephemeral_02205_1 (x, y) VALUES (DEFAULT, DEFAULT); +SELECT * FROM t_ephemeral_02205_1; + +TRUNCATE TABLE t_ephemeral_02205_1; + +# Test INSERT using explicit ephemerals value +INSERT INTO t_ephemeral_02205_1 (x, y) VALUES (DEFAULT, 7); +SELECT * FROM t_ephemeral_02205_1; + +# Test ALTER TABLE DELETE +ALTER TABLE t_ephemeral_02205_1 DELETE WHERE x = 7; +SELECT * FROM t_ephemeral_02205_1; + +TRUNCATE TABLE t_ephemeral_02205_1; + +# Test INSERT into column, defaulted to ephemeral, but explicitly provided with value +INSERT INTO t_ephemeral_02205_1 (x, y) VALUES (21, 7); +SELECT * FROM t_ephemeral_02205_1; + + +DROP TABLE IF EXISTS t_ephemeral_02205_1; + +# Test without default +CREATE TABLE t_ephemeral_02205_1 (x UInt32 DEFAULT y, y UInt32 EPHEMERAL, z UInt32 DEFAULT 5) ENGINE = Memory; + +DESCRIBE t_ephemeral_02205_1; + +# Test INSERT without columns list - should participate only ordinary columns (x, z) +INSERT INTO t_ephemeral_02205_1 VALUES (1, 2); +# SELECT * should only return ordinary columns (x, z) - ephemeral is not stored in the table +SELECT * FROM t_ephemeral_02205_1; + +TRUNCATE TABLE t_ephemeral_02205_1; + +INSERT INTO t_ephemeral_02205_1 VALUES (DEFAULT, 2); +SELECT * FROM t_ephemeral_02205_1; + +TRUNCATE TABLE t_ephemeral_02205_1; + +# Test INSERT using ephemerals default +INSERT INTO t_ephemeral_02205_1 (x, y) VALUES (DEFAULT, DEFAULT); +SELECT * FROM t_ephemeral_02205_1; + +TRUNCATE TABLE t_ephemeral_02205_1; + +# Test INSERT using explicit ephemerals value +INSERT INTO t_ephemeral_02205_1 (x, y) VALUES (DEFAULT, 7); +SELECT * FROM t_ephemeral_02205_1; + +# Test ALTER TABLE DELETE +ALTER TABLE t_ephemeral_02205_1 DELETE WHERE x = 7; +SELECT * FROM t_ephemeral_02205_1; + +TRUNCATE TABLE t_ephemeral_02205_1; + +# Test INSERT into column, defaulted to ephemeral, but explicitly provided with value +INSERT INTO t_ephemeral_02205_1 (x, y) VALUES (21, 7); +SELECT * FROM t_ephemeral_02205_1; + +DROP TABLE IF EXISTS t_ephemeral_02205_1; + diff --git a/parser/testdata/02205_map_populate_series_non_const/ast.json b/parser/testdata/02205_map_populate_series_non_const/ast.json new file mode 100644 index 000000000..f6ef0275d --- /dev/null +++ b/parser/testdata/02205_map_populate_series_non_const/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery 02005_test_table (children 1)" + }, + { + "explain": " Identifier 02005_test_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00157209, + "rows_read": 2, + "bytes_read": 84 + } +} diff --git a/parser/testdata/02205_map_populate_series_non_const/metadata.json b/parser/testdata/02205_map_populate_series_non_const/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02205_map_populate_series_non_const/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02205_map_populate_series_non_const/query.sql b/parser/testdata/02205_map_populate_series_non_const/query.sql new file mode 100644 index 000000000..08a3dd51e --- /dev/null +++ b/parser/testdata/02205_map_populate_series_non_const/query.sql @@ -0,0 +1,125 @@ +DROP TABLE IF EXISTS 02005_test_table; +CREATE TABLE 02005_test_table +( + value Map(Int64, Int64) +) +ENGINE = TinyLog; + +SELECT 'mapPopulateSeries with map'; + +SELECT 'Without max key'; + +SELECT mapPopulateSeries(value) FROM 02005_test_table; +TRUNCATE TABLE 02005_test_table; + +INSERT INTO 02005_test_table VALUES (map(0, 5)); +SELECT mapPopulateSeries(value) FROM 02005_test_table; +TRUNCATE TABLE 02005_test_table; + +INSERT INTO 02005_test_table VALUES (map(0, 5, 5, 10)); +SELECT mapPopulateSeries(value) FROM 02005_test_table; +TRUNCATE TABLE 02005_test_table; + +INSERT INTO 02005_test_table VALUES (map(-5, -5, 0, 5, 5, 10)); +SELECT mapPopulateSeries(value) FROM 02005_test_table; +TRUNCATE TABLE 02005_test_table; + +INSERT INTO 02005_test_table VALUES (map(-5, -5, 0, 5, 5, 10, 10, 15)); +SELECT mapPopulateSeries(value) FROM 02005_test_table; +TRUNCATE TABLE 02005_test_table; + +SELECT 'With max key'; + +SELECT mapPopulateSeries(value, materialize(20)) FROM 02005_test_table; +TRUNCATE TABLE 02005_test_table; + +INSERT INTO 02005_test_table VALUES (map(0, 5)); +SELECT mapPopulateSeries(value, materialize(20)) FROM 02005_test_table; +TRUNCATE TABLE 02005_test_table; + +INSERT INTO 02005_test_table VALUES (map(0, 5, 5, 10)); +SELECT mapPopulateSeries(value, materialize(20)) FROM 02005_test_table; +TRUNCATE TABLE 02005_test_table; + +INSERT INTO 02005_test_table VALUES (map(-5, -5, 0, 5, 5, 10)); +SELECT mapPopulateSeries(value) FROM 02005_test_table; +TRUNCATE TABLE 02005_test_table; + +INSERT INTO 02005_test_table VALUES (map(-5, -5, 0, 5, 5, 10, 10, 15)); +SELECT mapPopulateSeries(value, materialize(20)) FROM 02005_test_table; +TRUNCATE TABLE 02005_test_table; + +SELECT 'Possible verflow'; + +SELECT mapPopulateSeries(map(toUInt64(18446744073709551610), toUInt64(5)), 18446744073709551615); +SELECT mapPopulateSeries(map(toUInt64(18446744073709551615), toUInt64(5)), 18446744073709551615); + +SELECT 'Duplicate keys'; + +SELECT mapPopulateSeries(map(1, 4, 1, 5, 5, 6)); +SELECT mapPopulateSeries(map(1, 4, 1, 5, 5, 6), materialize(10)); + +DROP TABLE 02005_test_table; + +DROP TABLE IF EXISTS 02005_test_table; +CREATE TABLE 02005_test_table +( + key Array(Int64), + value Array(Int64) +) +ENGINE = TinyLog; + +SELECT 'mapPopulateSeries with two arrays'; +SELECT 'Without max key'; + +SELECT mapPopulateSeries(key, value) FROM 02005_test_table; +TRUNCATE TABLE 02005_test_table; + +INSERT INTO 02005_test_table VALUES ([0], [5]); +SELECT mapPopulateSeries(key, value) FROM 02005_test_table; +TRUNCATE TABLE 02005_test_table; + +INSERT INTO 02005_test_table VALUES ([0, 5], [5, 10]); +SELECT mapPopulateSeries(key, value) FROM 02005_test_table; +TRUNCATE TABLE 02005_test_table; + +INSERT INTO 02005_test_table VALUES ([-5, 0, 5], [-5, 5, 10]); +SELECT mapPopulateSeries(key, value) FROM 02005_test_table; +TRUNCATE TABLE 02005_test_table; + +INSERT INTO 02005_test_table VALUES ([-5, 0, 5, 10], [-5, 5, 10, 15]); +SELECT mapPopulateSeries(key, value) FROM 02005_test_table; +TRUNCATE TABLE 02005_test_table; + +SELECT 'With max key'; + +SELECT mapPopulateSeries(key, value, materialize(20)) FROM 02005_test_table; +TRUNCATE TABLE 02005_test_table; + +INSERT INTO 02005_test_table VALUES ([0], [5]); +SELECT mapPopulateSeries(key, value, materialize(20)) FROM 02005_test_table; +TRUNCATE TABLE 02005_test_table; + +INSERT INTO 02005_test_table VALUES ([0, 5], [5, 10]); +SELECT mapPopulateSeries(key, value, materialize(20)) FROM 02005_test_table; +TRUNCATE TABLE 02005_test_table; + +INSERT INTO 02005_test_table VALUES ([-5, 0, 5], [-5, 5, 10]); +SELECT mapPopulateSeries(key, value, materialize(20)) FROM 02005_test_table; +TRUNCATE TABLE 02005_test_table; + +INSERT INTO 02005_test_table VALUES ([-5, 0, 5, 10], [-5, 5, 10, 15]); +SELECT mapPopulateSeries(key, value, materialize(20)) FROM 02005_test_table; +TRUNCATE TABLE 02005_test_table; + +SELECT 'Possible verflow'; + +SELECT mapPopulateSeries([18446744073709551610], [5], 18446744073709551615); +SELECT mapPopulateSeries([18446744073709551615], [5], 18446744073709551615); + +SELECT 'Duplicate keys'; + +SELECT mapPopulateSeries([1, 1, 5], [4, 5, 6]); +SELECT mapPopulateSeries([1, 1, 5], [4, 5, 6], materialize(10)); + +DROP TABLE 02005_test_table; diff --git a/parser/testdata/02205_postgresql_functions/ast.json b/parser/testdata/02205_postgresql_functions/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02205_postgresql_functions/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02205_postgresql_functions/metadata.json b/parser/testdata/02205_postgresql_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02205_postgresql_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02205_postgresql_functions/query.sql b/parser/testdata/02205_postgresql_functions/query.sql new file mode 100644 index 000000000..343149f52 --- /dev/null +++ b/parser/testdata/02205_postgresql_functions/query.sql @@ -0,0 +1,59 @@ +--- REGEXP_MATCHES +select match('a key="v" ', 'key="(.*?)"'), REGEXP_MATCHES('a key="v" ', 'key="(.*?)"'); +select match(materialize('a key="v" '), 'key="(.*?)"'), REGEXP_MATCHES(materialize('a key="v" '), 'key="(.*?)"'); + +select match('\0 key="v" ', 'key="(.*?)"'), REGEXP_MATCHES('\0 key="v" ', 'key="(.*?)"'); +select match(materialize('\0 key="v" '), 'key="(.*?)"'), REGEXP_MATCHES(materialize('\0 key="v" '), 'key="(.*?)"'); + + +--- REGEXP_REPLACE +SELECT s, replaceAll(s, '_', 'o') AS a, REGEXP_REPLACE(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['.', '.']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, REGEXP_REPLACE(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['.', '._']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, REGEXP_REPLACE(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['.', '_.']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, REGEXP_REPLACE(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['.', '_._']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, REGEXP_REPLACE(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['._', '.']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, REGEXP_REPLACE(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['._', '._']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, REGEXP_REPLACE(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['._', '_.']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, REGEXP_REPLACE(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['._', '_._']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, REGEXP_REPLACE(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['_.', '.']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, REGEXP_REPLACE(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['_.', '._']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, REGEXP_REPLACE(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['_.', '_.']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, REGEXP_REPLACE(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['_.', '_._']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, REGEXP_REPLACE(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['_._', '.']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, REGEXP_REPLACE(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['_._', '._']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, REGEXP_REPLACE(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['_._', '_.']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, REGEXP_REPLACE(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['_._', '_._']) AS s); + +SELECT s, replaceAll(s, '_', 'oo') AS a, REGEXP_REPLACE(s, '_', 'oo') AS b, a = b FROM (SELECT arrayJoin(['.', '.']) AS s); +SELECT s, replaceAll(s, '_', 'oo') AS a, REGEXP_REPLACE(s, '_', 'oo') AS b, a = b FROM (SELECT arrayJoin(['.', '._']) AS s); +SELECT s, replaceAll(s, '_', 'oo') AS a, REGEXP_REPLACE(s, '_', 'oo') AS b, a = b FROM (SELECT arrayJoin(['.', '_.']) AS s); +SELECT s, replaceAll(s, '_', 'oo') AS a, REGEXP_REPLACE(s, '_', 'oo') AS b, a = b FROM (SELECT arrayJoin(['.', '_._']) AS s); +SELECT s, replaceAll(s, '_', 'oo') AS a, REGEXP_REPLACE(s, '_', 'oo') AS b, a = b FROM (SELECT arrayJoin(['._', '.']) AS s); +SELECT s, replaceAll(s, '_', 'oo') AS a, REGEXP_REPLACE(s, '_', 'oo') AS b, a = b FROM (SELECT arrayJoin(['._', '._']) AS s); +SELECT s, replaceAll(s, '_', 'oo') AS a, REGEXP_REPLACE(s, '_', 'oo') AS b, a = b FROM (SELECT arrayJoin(['._', '_.']) AS s); +SELECT s, replaceAll(s, '_', 'oo') AS a, REGEXP_REPLACE(s, '_', 'oo') AS b, a = b FROM (SELECT arrayJoin(['._', '_._']) AS s); +SELECT s, replaceAll(s, '_', 'oo') AS a, REGEXP_REPLACE(s, '_', 'oo') AS b, a = b FROM (SELECT arrayJoin(['_.', '.']) AS s); +SELECT s, replaceAll(s, '_', 'oo') AS a, REGEXP_REPLACE(s, '_', 'oo') AS b, a = b FROM (SELECT arrayJoin(['_.', '._']) AS s); +SELECT s, replaceAll(s, '_', 'oo') AS a, REGEXP_REPLACE(s, '_', 'oo') AS b, a = b FROM (SELECT arrayJoin(['_.', '_.']) AS s); +SELECT s, replaceAll(s, '_', 'oo') AS a, REGEXP_REPLACE(s, '_', 'oo') AS b, a = b FROM (SELECT arrayJoin(['_.', '_._']) AS s); +SELECT s, replaceAll(s, '_', 'oo') AS a, REGEXP_REPLACE(s, '_', 'oo') AS b, a = b FROM (SELECT arrayJoin(['_._', '.']) AS s); +SELECT s, replaceAll(s, '_', 'oo') AS a, REGEXP_REPLACE(s, '_', 'oo') AS b, a = b FROM (SELECT arrayJoin(['_._', '._']) AS s); +SELECT s, replaceAll(s, '_', 'oo') AS a, REGEXP_REPLACE(s, '_', 'oo') AS b, a = b FROM (SELECT arrayJoin(['_._', '_.']) AS s); +SELECT s, replaceAll(s, '_', 'oo') AS a, REGEXP_REPLACE(s, '_', 'oo') AS b, a = b FROM (SELECT arrayJoin(['_._', '_._']) AS s); + +SELECT s, replaceAll(s, '_', 'o') AS a, REGEXP_REPLACE(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['.', '.']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, REGEXP_REPLACE(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['.', '.__']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, REGEXP_REPLACE(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['.', '__.']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, REGEXP_REPLACE(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['.', '__.__']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, REGEXP_REPLACE(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['.__', '.']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, REGEXP_REPLACE(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['.__', '.__']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, REGEXP_REPLACE(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['.__', '__.']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, REGEXP_REPLACE(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['.__', '__.__']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, REGEXP_REPLACE(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['__.', '.']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, REGEXP_REPLACE(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['__.', '.__']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, REGEXP_REPLACE(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['__.', '__.']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, REGEXP_REPLACE(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['__.', '__.__']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, REGEXP_REPLACE(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['__.__', '.']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, REGEXP_REPLACE(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['__.__', '.__']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, REGEXP_REPLACE(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['__.__', '__.']) AS s); +SELECT s, replaceAll(s, '_', 'o') AS a, REGEXP_REPLACE(s, '_', 'o') AS b, a = b FROM (SELECT arrayJoin(['__.__', '__.__']) AS s); diff --git a/parser/testdata/02206_array_starts_ends_with/ast.json b/parser/testdata/02206_array_starts_ends_with/ast.json new file mode 100644 index 000000000..ca036ddf6 --- /dev/null +++ b/parser/testdata/02206_array_starts_ends_with/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function startsWith (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001159529, + "rows_read": 10, + "bytes_read": 373 + } +} diff --git a/parser/testdata/02206_array_starts_ends_with/metadata.json b/parser/testdata/02206_array_starts_ends_with/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02206_array_starts_ends_with/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02206_array_starts_ends_with/query.sql b/parser/testdata/02206_array_starts_ends_with/query.sql new file mode 100644 index 000000000..39b02c29d --- /dev/null +++ b/parser/testdata/02206_array_starts_ends_with/query.sql @@ -0,0 +1,36 @@ +select startsWith([], []); +select startsWith([1], []); +select startsWith([], [1]); +select '-'; + +select startsWith([NULL], [NULL]); +select startsWith([NULL], []); +select startsWith([], [NULL]); +select startsWith([NULL, 1], [NULL]); +select startsWith([NULL, 1], [1]); +select '-'; + +select startsWith([1, 2, 3, 4], [1, 2, 3]); +select startsWith([1, 2, 3, 4], [1, 2, 4]); +select startsWith(['a', 'b', 'c'], ['a', 'b']); +select startsWith(['a', 'b', 'c'], ['b']); +select '-'; + +select endsWith([], []); +select endsWith([1], []); +select endsWith([], [1]); +select '-'; + +select endsWith([NULL], [NULL]); +select endsWith([NULL], []); +select endsWith([], [NULL]); +select endsWith([1, NULL], [NULL]); +select endsWith([NULL, 1], [NULL]); +select '-'; + +select endsWith([1, 2, 3, 4], [3, 4]); +select endsWith([1, 2, 3, 4], [3]); +select '-'; + +select startsWith([1], emptyArrayUInt8()); +select endsWith([1], emptyArrayUInt8()); diff --git a/parser/testdata/02206_information_schema_show_database/ast.json b/parser/testdata/02206_information_schema_show_database/ast.json new file mode 100644 index 000000000..9a5641a36 --- /dev/null +++ b/parser/testdata/02206_information_schema_show_database/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "ShowCreateDatabaseQuery INFORMATION_SCHEMA (children 1)" + }, + { + "explain": " Identifier INFORMATION_SCHEMA" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001151347, + "rows_read": 2, + "bytes_read": 102 + } +} diff --git a/parser/testdata/02206_information_schema_show_database/metadata.json b/parser/testdata/02206_information_schema_show_database/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02206_information_schema_show_database/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02206_information_schema_show_database/query.sql b/parser/testdata/02206_information_schema_show_database/query.sql new file mode 100644 index 000000000..91a8a0d1d --- /dev/null +++ b/parser/testdata/02206_information_schema_show_database/query.sql @@ -0,0 +1,3 @@ +SHOW CREATE DATABASE INFORMATION_SCHEMA; +SHOW CREATE INFORMATION_SCHEMA.COLUMNS; +SELECT create_table_query FROM system.tables WHERE database ILIKE 'INFORMATION_SCHEMA' AND table ILIKE 'TABLES'; -- supress style check: database = currentDatabase() diff --git a/parser/testdata/02206_minimum_sample_size/ast.json b/parser/testdata/02206_minimum_sample_size/ast.json new file mode 100644 index 000000000..933361531 --- /dev/null +++ b/parser/testdata/02206_minimum_sample_size/ast.json @@ -0,0 +1,124 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function minSampleSizeContinous (alias res) (children 1)" + }, + { + "explain": " ExpressionList (children 5)" + }, + { + "explain": " Literal UInt64_20" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Literal Float64_0.05" + }, + { + "explain": " Literal Float64_0.8" + }, + { + "explain": " Literal Float64_0.05" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Literal 'continous const 1'" + }, + { + "explain": " Function roundBankers (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tupleElement (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier res" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function roundBankers (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tupleElement (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier res" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function roundBankers (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tupleElement (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier res" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Literal UInt64_2" + } + ], + + "rows": 34, + + "statistics": + { + "elapsed": 0.001402191, + "rows_read": 34, + "bytes_read": 1301 + } +} diff --git a/parser/testdata/02206_minimum_sample_size/metadata.json b/parser/testdata/02206_minimum_sample_size/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02206_minimum_sample_size/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02206_minimum_sample_size/query.sql b/parser/testdata/02206_minimum_sample_size/query.sql new file mode 100644 index 000000000..b8f153faa --- /dev/null +++ b/parser/testdata/02206_minimum_sample_size/query.sql @@ -0,0 +1,33 @@ +WITH minSampleSizeContinous(20, 10, 0.05, 0.8, 0.05) AS res SELECT 'continous const 1', roundBankers(res.1, 2), roundBankers(res.2, 2), roundBankers(res.3, 2); +WITH minSampleSizeContinous(0.0, 10.0, 0.05, 0.8, 0.05) AS res SELECT 'continous const 2', roundBankers(res.1, 2), roundBankers(res.2, 2), roundBankers(res.3, 2); +WITH minSampleSizeContinous(20, 10.0, 0.05, 0.8, 0.05) AS res SELECT 'continous const 3', roundBankers(res.1, 2), roundBankers(res.2, 2), roundBankers(res.3, 2); +WITH minSampleSizeContinous(20.0, 10, 0.05, 0.8, 0.05) AS res SELECT 'continous const 4', roundBankers(res.1, 2), roundBankers(res.2, 2), roundBankers(res.3, 2); + +DROP TABLE IF EXISTS minimum_sample_size_continuos; +CREATE TABLE minimum_sample_size_continuos (baseline UInt64, sigma UInt64) ENGINE = Memory(); +INSERT INTO minimum_sample_size_continuos VALUES (20, 10); +INSERT INTO minimum_sample_size_continuos VALUES (200, 10); +WITH minSampleSizeContinous(baseline, sigma, 0.05, 0.8, 0.05) AS res SELECT 'continous UInt64 1', roundBankers(res.1, 2), roundBankers(res.2, 2), roundBankers(res.3, 2) FROM minimum_sample_size_continuos ORDER BY roundBankers(res.1, 2); +WITH minSampleSizeContinous(20, sigma, 0.05, 0.8, 0.05) AS res SELECT 'continous UInt64 2', roundBankers(res.1, 2), roundBankers(res.2, 2), roundBankers(res.3, 2) FROM minimum_sample_size_continuos ORDER BY roundBankers(res.1, 2); +WITH minSampleSizeContinous(baseline, 10, 0.05, 0.8, 0.05) AS res SELECT 'continous UInt64 3', roundBankers(res.1, 2), roundBankers(res.2, 2), roundBankers(res.3, 2) FROM minimum_sample_size_continuos ORDER BY roundBankers(res.1, 2); +DROP TABLE IF EXISTS minimum_sample_size_continuos; + +DROP TABLE IF EXISTS minimum_sample_size_continuos; +CREATE TABLE minimum_sample_size_continuos (baseline Float64, sigma Float64) ENGINE = Memory(); +INSERT INTO minimum_sample_size_continuos VALUES (20, 10); +INSERT INTO minimum_sample_size_continuos VALUES (200, 10); +WITH minSampleSizeContinous(baseline, sigma, 0.05, 0.8, 0.05) AS res SELECT 'continous Float64 1', roundBankers(res.1, 2), roundBankers(res.2, 2), roundBankers(res.3, 2) FROM minimum_sample_size_continuos ORDER BY roundBankers(res.1, 2); +WITH minSampleSizeContinous(20, sigma, 0.05, 0.8, 0.05) AS res SELECT 'continous Float64 2', roundBankers(res.1, 2), roundBankers(res.2, 2), roundBankers(res.3, 2) FROM minimum_sample_size_continuos ORDER BY roundBankers(res.1, 2); +WITH minSampleSizeContinous(baseline, 10, 0.05, 0.8, 0.05) AS res SELECT 'continous UInt64 3', roundBankers(res.1, 2), roundBankers(res.2, 2), roundBankers(res.3, 2) FROM minimum_sample_size_continuos ORDER BY roundBankers(res.1, 2); +DROP TABLE IF EXISTS minimum_sample_size_continuos; + +WITH minSampleSizeConversion(0.9, 0.01, 0.8, 0.05) AS res SELECT 'conversion const 1', roundBankers(res.1, 2), roundBankers(res.2, 2), roundBankers(res.3, 2); +WITH minSampleSizeConversion(0.0, 0.01, 0.8, 0.05) AS res SELECT 'conversion const 2', roundBankers(res.1, 2), roundBankers(res.2, 2), roundBankers(res.3, 2); + +DROP TABLE IF EXISTS minimum_sample_size_conversion; +CREATE TABLE minimum_sample_size_conversion (p1 Float64) ENGINE = Memory(); +INSERT INTO minimum_sample_size_conversion VALUES (0.9); +INSERT INTO minimum_sample_size_conversion VALUES (0.8); +WITH minSampleSizeConversion(p1, 0.01, 0.8, 0.05) AS res SELECT 'conversion Float64 1', roundBankers(res.1, 2), roundBankers(res.2, 2), roundBankers(res.3, 2) FROM minimum_sample_size_conversion ORDER BY roundBankers(res.1, 2); +WITH minSampleSizeConversion(0.9, 0.01, 0.8, 0.05) AS res SELECT 'conversion Float64 2', roundBankers(res.1, 2), roundBankers(res.2, 2), roundBankers(res.3, 2) FROM minimum_sample_size_conversion ORDER BY roundBankers(res.1, 2); +DROP TABLE IF EXISTS minimum_sample_size_conversion; diff --git a/parser/testdata/02207_key_condition_floats/ast.json b/parser/testdata/02207_key_condition_floats/ast.json new file mode 100644 index 000000000..f26db105c --- /dev/null +++ b/parser/testdata/02207_key_condition_floats/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_key_condition_float (children 1)" + }, + { + "explain": " Identifier t_key_condition_float" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001340762, + "rows_read": 2, + "bytes_read": 94 + } +} diff --git a/parser/testdata/02207_key_condition_floats/metadata.json b/parser/testdata/02207_key_condition_floats/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02207_key_condition_floats/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02207_key_condition_floats/query.sql b/parser/testdata/02207_key_condition_floats/query.sql new file mode 100644 index 000000000..65527c652 --- /dev/null +++ b/parser/testdata/02207_key_condition_floats/query.sql @@ -0,0 +1,34 @@ +DROP TABLE IF EXISTS t_key_condition_float; + +CREATE TABLE t_key_condition_float (a Float32) +ENGINE = MergeTree ORDER BY a; + +INSERT INTO t_key_condition_float VALUES (0.1), (0.2); + +SELECT count() FROM t_key_condition_float WHERE a > 0; +SELECT count() FROM t_key_condition_float WHERE a > 0.0; +SELECT count() FROM t_key_condition_float WHERE a > 0::Float32; +SELECT count() FROM t_key_condition_float WHERE a > 0::Float64; + +DROP TABLE t_key_condition_float; + +CREATE TABLE t_key_condition_float (a Float64) +ENGINE = MergeTree ORDER BY a; + +INSERT INTO t_key_condition_float VALUES (0.1), (0.2); + +SELECT count() FROM t_key_condition_float WHERE a > 0; +SELECT count() FROM t_key_condition_float WHERE a > 0.0; +SELECT count() FROM t_key_condition_float WHERE a > 0::Float32; +SELECT count() FROM t_key_condition_float WHERE a > 0::Float64; + +DROP TABLE t_key_condition_float; + +CREATE TABLE t_key_condition_float (a UInt64) +ENGINE = MergeTree ORDER BY a; + +INSERT INTO t_key_condition_float VALUES (1), (2); + +SELECT count() FROM t_key_condition_float WHERE a > 1.5; + +DROP TABLE t_key_condition_float; diff --git a/parser/testdata/02207_subseconds_intervals/ast.json b/parser/testdata/02207_subseconds_intervals/ast.json new file mode 100644 index 000000000..62d07d62c --- /dev/null +++ b/parser/testdata/02207_subseconds_intervals/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'test intervals'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001282209, + "rows_read": 5, + "bytes_read": 185 + } +} diff --git a/parser/testdata/02207_subseconds_intervals/metadata.json b/parser/testdata/02207_subseconds_intervals/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02207_subseconds_intervals/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02207_subseconds_intervals/query.sql b/parser/testdata/02207_subseconds_intervals/query.sql new file mode 100644 index 000000000..c30b3c460 --- /dev/null +++ b/parser/testdata/02207_subseconds_intervals/query.sql @@ -0,0 +1,113 @@ +SELECT 'test intervals'; + +SELECT '- test nanoseconds'; +select toStartOfInterval(toDateTime64('1980-12-12 12:12:12.123456789', 9), INTERVAL 1 NANOSECOND); -- In normal range, source scale matches result +select toStartOfInterval(toDateTime64('1980-12-12 12:12:12.1234567', 7), INTERVAL 1 NANOSECOND); -- In normal range, source scale less than result + +select toStartOfInterval(a, INTERVAL 1 NANOSECOND) from ( select toDateTime64('1980-12-12 12:12:12.123456789', 9) AS a ); -- Non-constant argument + +select toStartOfInterval(toDateTime64('1930-12-12 12:12:12.123456789', 9), INTERVAL 1 NANOSECOND); -- Below normal range, source scale matches result +select toStartOfInterval(toDateTime64('1930-12-12 12:12:12.1234567', 7), INTERVAL 1 NANOSECOND); -- Below normal range, source scale less than result + +select toStartOfInterval(toDateTime64('2220-12-12 12:12:12.123456789', 9), INTERVAL 1 NANOSECOND); -- Above normal range, source scale matches result +select toStartOfInterval(toDateTime64('2220-12-12 12:12:12.1234567', 7), INTERVAL 1 NANOSECOND); -- Above normal range, source scale less than result + + +SELECT '- test microseconds'; +select toStartOfInterval(toDateTime64('1980-12-12 12:12:12.123456', 6), INTERVAL 1 MICROSECOND); -- In normal range, source scale matches result +select toStartOfInterval(toDateTime64('1980-12-12 12:12:12.1234', 4), INTERVAL 1 MICROSECOND); -- In normal range, source scale less than result +select toStartOfInterval(toDateTime64('1980-12-12 12:12:12.12345678', 8), INTERVAL 1 MICROSECOND); -- In normal range, source scale greater than result + +select toStartOfInterval(a, INTERVAL 1 MICROSECOND) from ( select toDateTime64('1980-12-12 12:12:12.12345678', 8) AS a ); -- Non-constant argument + +select toStartOfInterval(toDateTime64('1930-12-12 12:12:12.123456', 6), INTERVAL 1 MICROSECOND); -- Below normal range, source scale matches result +select toStartOfInterval(toDateTime64('1930-12-12 12:12:12.1234', 4), INTERVAL 1 MICROSECOND); -- Below normal range, source scale less than result +select toStartOfInterval(toDateTime64('1930-12-12 12:12:12.12345678', 8), INTERVAL 1 MICROSECOND); -- Below normal range, source scale greater than result + + +select toStartOfInterval(toDateTime64('2220-12-12 12:12:12.123456', 6), INTERVAL 1 MICROSECOND); -- Above normal range, source scale matches result +select toStartOfInterval(toDateTime64('2220-12-12 12:12:12.1234', 4), INTERVAL 1 MICROSECOND); -- Above normal range, source scale less than result +select toStartOfInterval(toDateTime64('2220-12-12 12:12:12.12345678', 8), INTERVAL 1 MICROSECOND); -- Above normal range, source scale greater than result + + +SELECT '- test milliseconds'; +select toStartOfInterval(toDateTime64('1980-12-12 12:12:12.123', 3), INTERVAL 1 MILLISECOND); -- In normal range, source scale matches result +select toStartOfInterval(toDateTime64('1980-12-12 12:12:12.12', 2), INTERVAL 1 MILLISECOND); -- In normal range, source scale less than result +select toStartOfInterval(toDateTime64('1980-12-12 12:12:12.123456', 6), INTERVAL 1 MILLISECOND); -- In normal range, source scale greater than result + +select toStartOfInterval(a, INTERVAL 1 MILLISECOND) from ( select toDateTime64('1980-12-12 12:12:12.123456', 6) AS a ); -- Non-constant argument + +select toStartOfInterval(toDateTime64('1930-12-12 12:12:12.123', 3), INTERVAL 1 MILLISECOND); -- Below normal range, source scale matches result +select toStartOfInterval(toDateTime64('1930-12-12 12:12:12.12', 2), INTERVAL 1 MILLISECOND); -- Below normal range, source scale less than result +select toStartOfInterval(toDateTime64('1930-12-12 12:12:12.123456', 6), INTERVAL 1 MILLISECOND); -- Below normal range, source scale greater than result + +select toStartOfInterval(toDateTime64('2220-12-12 12:12:12.123', 3), INTERVAL 1 MILLISECOND); -- Above normal range, source scale matches result +select toStartOfInterval(toDateTime64('2220-12-12 12:12:12.12', 2), INTERVAL 1 MILLISECOND); -- Above normal range, source scale less than result +select toStartOfInterval(toDateTime64('2220-12-12 12:12:12.123456', 6), INTERVAL 1 MILLISECOND); -- Above normal range, source scale greater than result + + +SELECT 'test add[...]seconds()'; + + +SELECT '- test nanoseconds'; +select addNanoseconds(toDateTime64('1980-12-12 12:12:12.123456789', 9), 1); -- In normal range, source scale matches result +select addNanoseconds(toDateTime64('1980-12-12 12:12:12.1234567', 7), 1); -- In normal range, source scale less than result + +select addNanoseconds(a, 1) from ( select toDateTime64('1980-12-12 12:12:12.123456789', 9) AS a ); -- Non-constant argument + +select addNanoseconds(toDateTime64('1930-12-12 12:12:12.123456789', 9), 1); -- Below normal range, source scale matches result +select addNanoseconds(toDateTime64('1930-12-12 12:12:12.1234567', 7), 1); -- Below normal range, source scale less than result + +select addNanoseconds(toDateTime64('2220-12-12 12:12:12.123456789', 9), 1); -- Above normal range, source scale matches result +select addNanoseconds(toDateTime64('2220-12-12 12:12:12.1234567', 7), 1); -- Above normal range, source scale less than result + + +SELECT '- test microseconds'; +select addMicroseconds(toDateTime64('1980-12-12 12:12:12.123456', 6), 1); -- In normal range, source scale matches result +select addMicroseconds(toDateTime64('1980-12-12 12:12:12.1234', 4), 1); -- In normal range, source scale less than result +select addMicroseconds(toDateTime64('1980-12-12 12:12:12.12345678', 8), 1); -- In normal range, source scale greater than result + +select addMicroseconds(a, 1) from ( select toDateTime64('1980-12-12 12:12:12.123456', 6) AS a ); -- Non-constant argument + +select addMicroseconds(toDateTime64('1930-12-12 12:12:12.123456', 6), 1); -- Below normal range, source scale matches result +select addMicroseconds(toDateTime64('1930-12-12 12:12:12.1234', 4), 1); -- Below normal range, source scale less than result +select addMicroseconds(toDateTime64('1930-12-12 12:12:12.12345678', 8), 1); -- Below normal range, source scale greater than result + +select addMicroseconds(toDateTime64('2220-12-12 12:12:12.123456', 6), 1); -- Above normal range, source scale matches result +select addMicroseconds(toDateTime64('2220-12-12 12:12:12.1234', 4), 1); -- Above normal range, source scale less than result +select addMicroseconds(toDateTime64('2220-12-12 12:12:12.12345678', 8), 1); -- Above normal range, source scale greater than result + + +SELECT '- test milliseconds'; +select addMilliseconds(toDateTime64('1980-12-12 12:12:12.123', 3), 1); -- In normal range, source scale matches result +select addMilliseconds(toDateTime64('1980-12-12 12:12:12.12', 2), 1); -- In normal range, source scale less than result +select addMilliseconds(toDateTime64('1980-12-12 12:12:12.123456', 6), 1); -- In normal range, source scale greater than result + +select addMilliseconds(a, 1) from ( select toDateTime64('1980-12-12 12:12:12.123', 3) AS a ); -- Non-constant argument + +select addMilliseconds(toDateTime64('1930-12-12 12:12:12.123', 3), 1); -- Below normal range, source scale matches result +select addMilliseconds(toDateTime64('1930-12-12 12:12:12.12', 2), 1); -- Below normal range, source scale less than result +select addMilliseconds(toDateTime64('1930-12-12 12:12:12.123456', 6), 1); -- Below normal range, source scale greater than result + +select addMilliseconds(toDateTime64('2220-12-12 12:12:12.123', 3), 1); -- Above normal range, source scale matches result +select addMilliseconds(toDateTime64('2220-12-12 12:12:12.12', 2), 1); -- Above normal range, source scale less than result +select addMilliseconds(toDateTime64('2220-12-12 12:12:12.123456', 6), 1); -- Above normal range, source scale greater than result + +select 'test subtract[...]seconds()'; +select '- test nanoseconds'; +select subtractNanoseconds(toDateTime64('2023-01-01 00:00:00.0000000', 7, 'UTC'), 1); +select subtractNanoseconds(toDateTime64('2023-01-01 00:00:00.0000000', 7, 'UTC'), 100); +select subtractNanoseconds(toDateTime64('2023-01-01 00:00:00.0000000', 7, 'UTC'), -1); +select subtractNanoseconds(toDateTime64('2023-01-01 00:00:00.0000000', 7, 'UTC'), -100); + +select '- test microseconds'; +select subtractMicroseconds(toDateTime64('2023-01-01 00:00:00.0000', 4, 'UTC'), 1); +select subtractMicroseconds(toDateTime64('2023-01-01 00:00:00.0000', 4, 'UTC'), 100); +select subtractMicroseconds(toDateTime64('2023-01-01 00:00:00.0000', 4, 'UTC'), -1); +select subtractMicroseconds(toDateTime64('2023-01-01 00:00:00.0000', 4, 'UTC'), -100); + +select '- test milliseconds'; +select subtractMilliseconds(toDateTime64('2023-01-01 00:00:00.0', 1, 'UTC'), 1); +select subtractMilliseconds(toDateTime64('2023-01-01 00:00:00.0', 1, 'UTC'), 100); +select subtractMilliseconds(toDateTime64('2023-01-01 00:00:00.0', 1, 'UTC'), -1); +select subtractMilliseconds(toDateTime64('2023-01-01 00:00:00.0', 1, 'UTC'), -100); diff --git a/parser/testdata/02207_ttl_move_if_exists/ast.json b/parser/testdata/02207_ttl_move_if_exists/ast.json new file mode 100644 index 000000000..29f65b68a --- /dev/null +++ b/parser/testdata/02207_ttl_move_if_exists/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_ttl_move_if_exists (children 1)" + }, + { + "explain": " Identifier t_ttl_move_if_exists" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001311372, + "rows_read": 2, + "bytes_read": 92 + } +} diff --git a/parser/testdata/02207_ttl_move_if_exists/metadata.json b/parser/testdata/02207_ttl_move_if_exists/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02207_ttl_move_if_exists/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02207_ttl_move_if_exists/query.sql b/parser/testdata/02207_ttl_move_if_exists/query.sql new file mode 100644 index 000000000..ab17d343e --- /dev/null +++ b/parser/testdata/02207_ttl_move_if_exists/query.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS t_ttl_move_if_exists; + +CREATE TABLE t_ttl_move_if_exists (d DateTime, a UInt32) +ENGINE = MergeTree ORDER BY tuple() +TTL d TO DISK IF EXISTS 'non_existing_disk'; + +SHOW CREATE TABLE t_ttl_move_if_exists; + +DROP TABLE IF EXISTS t_ttl_move_if_exists; diff --git a/parser/testdata/02209_short_circuit_node_without_parents/ast.json b/parser/testdata/02209_short_circuit_node_without_parents/ast.json new file mode 100644 index 000000000..cba7ed7a9 --- /dev/null +++ b/parser/testdata/02209_short_circuit_node_without_parents/ast.json @@ -0,0 +1,112 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function if (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function empty (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function range (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal Array_[UInt64_1]" + }, + { + "explain": " Literal Array_[UInt64_2]" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 30, + + "statistics": + { + "elapsed": 0.001233916, + "rows_read": 30, + "bytes_read": 1353 + } +} diff --git a/parser/testdata/02209_short_circuit_node_without_parents/metadata.json b/parser/testdata/02209_short_circuit_node_without_parents/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02209_short_circuit_node_without_parents/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02209_short_circuit_node_without_parents/query.sql b/parser/testdata/02209_short_circuit_node_without_parents/query.sql new file mode 100644 index 000000000..c20ca8359 --- /dev/null +++ b/parser/testdata/02209_short_circuit_node_without_parents/query.sql @@ -0,0 +1,2 @@ +SELECT 1 FROM (SELECT arrayJoin(if(empty(range(number)), [1], [2])) from numbers(1)); + diff --git a/parser/testdata/02210_append_to_dev_dull/ast.json b/parser/testdata/02210_append_to_dev_dull/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02210_append_to_dev_dull/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02210_append_to_dev_dull/metadata.json b/parser/testdata/02210_append_to_dev_dull/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02210_append_to_dev_dull/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02210_append_to_dev_dull/query.sql b/parser/testdata/02210_append_to_dev_dull/query.sql new file mode 100644 index 000000000..a8aaa2f05 --- /dev/null +++ b/parser/testdata/02210_append_to_dev_dull/query.sql @@ -0,0 +1,6 @@ +-- Tags: no-fasttest + +insert into table function file('/dev/null', 'Parquet', 'number UInt64') select * from numbers(10); +insert into table function file('/dev/null', 'ORC', 'number UInt64') select * from numbers(10); +insert into table function file('/dev/null', 'JSON', 'number UInt64') select * from numbers(10); + diff --git a/parser/testdata/02210_processors_profile_log/ast.json b/parser/testdata/02210_processors_profile_log/ast.json new file mode 100644 index 000000000..1c98281f1 --- /dev/null +++ b/parser/testdata/02210_processors_profile_log/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Explain EXPLAIN PIPELINE (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function sleep (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001256881, + "rows_read": 8, + "bytes_read": 310 + } +} diff --git a/parser/testdata/02210_processors_profile_log/metadata.json b/parser/testdata/02210_processors_profile_log/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02210_processors_profile_log/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02210_processors_profile_log/query.sql b/parser/testdata/02210_processors_profile_log/query.sql new file mode 100644 index 000000000..78e30cc59 --- /dev/null +++ b/parser/testdata/02210_processors_profile_log/query.sql @@ -0,0 +1,33 @@ +EXPLAIN PIPELINE SELECT sleep(1); + +SELECT sleep(1) SETTINGS log_processors_profiles=true, log_queries=1, log_queries_min_type='QUERY_FINISH'; +SYSTEM FLUSH LOGS query_log, processors_profile_log; + +WITH + ( + SELECT query_id + FROM system.query_log + WHERE current_database = currentDatabase() AND Settings['log_processors_profiles']='1' + ) AS query_id_ +SELECT + name, + multiIf( + -- ExpressionTransform executes sleep(), + -- so IProcessor::work() will spend 1 sec. + -- We use two different timers to measure time: CLOCK_MONOTONIC for sleep and CLOCK_MONOTONIC_COARSE for profiling + -- that's why we cannot compare directly with 1,000,000 microseconds - let's compare with 900,000 microseconds. + name = 'ExpressionTransform', elapsed_us >= 0.9e6 ? 1 : elapsed_us, + -- SourceFromSingleChunk, that feed data to ExpressionTransform, + -- will feed first block and then wait in PortFull. + name = 'SourceFromSingleChunk', output_wait_elapsed_us >= 0.9e6 ? 1 : output_wait_elapsed_us, + -- LazyOutputFormatLazyOutputFormat is the output + -- so it cannot starts to execute before sleep(1) will be executed. + input_wait_elapsed_us>=1e6 ? 1 : input_wait_elapsed_us) + elapsed, + input_rows, + input_bytes, + output_rows, + output_bytes +FROM system.processors_profile_log +WHERE query_id = query_id_ +ORDER BY name; diff --git a/parser/testdata/02210_toColumnTypeName_toLowCardinality_const/ast.json b/parser/testdata/02210_toColumnTypeName_toLowCardinality_const/ast.json new file mode 100644 index 000000000..b4ad7fa03 --- /dev/null +++ b/parser/testdata/02210_toColumnTypeName_toLowCardinality_const/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toColumnTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toLowCardinality (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001003116, + "rows_read": 9, + "bytes_read": 365 + } +} diff --git a/parser/testdata/02210_toColumnTypeName_toLowCardinality_const/metadata.json b/parser/testdata/02210_toColumnTypeName_toLowCardinality_const/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02210_toColumnTypeName_toLowCardinality_const/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02210_toColumnTypeName_toLowCardinality_const/query.sql b/parser/testdata/02210_toColumnTypeName_toLowCardinality_const/query.sql new file mode 100644 index 000000000..a71c3f306 --- /dev/null +++ b/parser/testdata/02210_toColumnTypeName_toLowCardinality_const/query.sql @@ -0,0 +1 @@ +SELECT toColumnTypeName(toLowCardinality(1)); diff --git a/parser/testdata/02211_jsonl_format_extension/ast.json b/parser/testdata/02211_jsonl_format_extension/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02211_jsonl_format_extension/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02211_jsonl_format_extension/metadata.json b/parser/testdata/02211_jsonl_format_extension/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02211_jsonl_format_extension/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02211_jsonl_format_extension/query.sql b/parser/testdata/02211_jsonl_format_extension/query.sql new file mode 100644 index 000000000..61cc2a408 --- /dev/null +++ b/parser/testdata/02211_jsonl_format_extension/query.sql @@ -0,0 +1,3 @@ +-- Tags: no-fasttest, no-parallel +insert into table function file('data.jsonl', 'JSONEachRow', 'x UInt32') select * from numbers(10) SETTINGS engine_file_truncate_on_insert=1; +select * from file('data.jsonl') order by x; diff --git a/parser/testdata/02212_cte_and_table_alias/ast.json b/parser/testdata/02212_cte_and_table_alias/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02212_cte_and_table_alias/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02212_cte_and_table_alias/metadata.json b/parser/testdata/02212_cte_and_table_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02212_cte_and_table_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02212_cte_and_table_alias/query.sql b/parser/testdata/02212_cte_and_table_alias/query.sql new file mode 100644 index 000000000..ce0fba4bf --- /dev/null +++ b/parser/testdata/02212_cte_and_table_alias/query.sql @@ -0,0 +1,41 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/19222 +SET enable_global_with_statement = 1; + +WITH t AS + ( + SELECT number AS n + FROM numbers(10000) + ) +SELECT count(*) +FROM t AS a +WHERE a.n < 5000; + +WITH t AS + ( + SELECT number AS n + FROM numbers(10000) + ) +SELECT count(*) +FROM t AS a +WHERE t.n < 5000; + + +SET enable_global_with_statement = 0; + +WITH t AS + ( + SELECT number AS n + FROM numbers(10000) + ) +SELECT count(*) +FROM t AS a +WHERE a.n < 5000; + +WITH t AS + ( + SELECT number AS n + FROM numbers(10000) + ) +SELECT count(*) +FROM t AS a +WHERE t.n < 5000; diff --git a/parser/testdata/02212_h3_get_pentagon_indexes/ast.json b/parser/testdata/02212_h3_get_pentagon_indexes/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02212_h3_get_pentagon_indexes/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02212_h3_get_pentagon_indexes/metadata.json b/parser/testdata/02212_h3_get_pentagon_indexes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02212_h3_get_pentagon_indexes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02212_h3_get_pentagon_indexes/query.sql b/parser/testdata/02212_h3_get_pentagon_indexes/query.sql new file mode 100644 index 000000000..d4eab090a --- /dev/null +++ b/parser/testdata/02212_h3_get_pentagon_indexes/query.sql @@ -0,0 +1,32 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS table1; + +CREATE TABLE table1 (resolution UInt8) ENGINE = Memory; + +INSERT INTO table1 VALUES(0); +INSERT INTO table1 VALUES(1); +INSERT INTO table1 VALUES(2); +INSERT INTO table1 VALUES(3); +INSERT INTO table1 VALUES(4); +INSERT INTO table1 VALUES(5); +INSERT INTO table1 VALUES(6); +INSERT INTO table1 VALUES(7); +INSERT INTO table1 VALUES(8); +INSERT INTO table1 VALUES(9); +INSERT INTO table1 VALUES(10); +INSERT INTO table1 VALUES(11); +INSERT INTO table1 VALUES(12); +INSERT INTO table1 VALUES(13); +INSERT INTO table1 VALUES(14); +INSERT INTO table1 VALUES(15); + + +SELECT h3GetPentagonIndexes(resolution) AS indexes from table1 order by indexes; +SELECT h3GetPentagonIndexes(20) AS indexes; -- { serverError ARGUMENT_OUT_OF_BOUND } + +DROP TABLE table1; + +-- tests for const cols +SELECT '-- test for const cols'; +SELECT h3GetPentagonIndexes(arrayJoin([0,1,2,3,4,5,6,7,8,9,10])); diff --git a/parser/testdata/02212_h3_get_res0_indexes/ast.json b/parser/testdata/02212_h3_get_res0_indexes/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02212_h3_get_res0_indexes/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02212_h3_get_res0_indexes/metadata.json b/parser/testdata/02212_h3_get_res0_indexes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02212_h3_get_res0_indexes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02212_h3_get_res0_indexes/query.sql b/parser/testdata/02212_h3_get_res0_indexes/query.sql new file mode 100644 index 000000000..648463f9d --- /dev/null +++ b/parser/testdata/02212_h3_get_res0_indexes/query.sql @@ -0,0 +1,6 @@ +-- Tags: no-fasttest + +SELECT h3GetRes0Indexes(); +SELECT h3GetRes0Indexes(3); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +SELECT h3GetRes0Indexes() FROM system.numbers LIMIT 5; diff --git a/parser/testdata/02212_h3_point_dist/ast.json b/parser/testdata/02212_h3_point_dist/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02212_h3_point_dist/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02212_h3_point_dist/metadata.json b/parser/testdata/02212_h3_point_dist/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02212_h3_point_dist/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02212_h3_point_dist/query.sql b/parser/testdata/02212_h3_point_dist/query.sql new file mode 100644 index 000000000..bcba4be04 --- /dev/null +++ b/parser/testdata/02212_h3_point_dist/query.sql @@ -0,0 +1,32 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS table1; + +CREATE TABLE table1 (lat1 Float64, lon1 Float64, lat2 Float64, lon2 Float64) ENGINE = Memory; + +INSERT INTO table1 VALUES(-10.0 ,0.0, 10.0, 0.0); +INSERT INTO table1 VALUES(-1, -1, 2, 1); +INSERT INTO table1 VALUES(0, 2, 1, 3); +INSERT INTO table1 VALUES(-2, -3, -1, -2); +INSERT INTO table1 VALUES(-87, 0, -85, 3); +INSERT INTO table1 VALUES(-89, 1, -88, 2); +INSERT INTO table1 VALUES(-84, 1, -83, 2); +INSERT INTO table1 VALUES(-88, 90, -86, 91); +INSERT INTO table1 VALUES(-84, -91, -83, -90); +INSERT INTO table1 VALUES(-90, 181, -89, 182); +INSERT INTO table1 VALUES(-84, 181, -83, 182); +INSERT INTO table1 VALUES(-87, 0, -85, 3); + +select '-- select h3PointDistM(lat1, lon1,lat2, lon2) AS k from table1 order by k;'; +select round(h3PointDistM(lat1, lon1,lat2, lon2), 2) AS k from table1 order by k; +select '-- select h3PointDistKm(lat1, lon1,lat2, lon2) AS k from table1 order by k;'; +select round(h3PointDistKm(lat1, lon1,lat2, lon2), 2) AS k from table1 order by k; +select '-- select h3PointDistRads(lat1, lon1,lat2, lon2) AS k from table1 order by k;'; +select round(h3PointDistRads(lat1, lon1,lat2, lon2), 5) AS k from table1 order by k; + +DROP TABLE table1; + +-- tests for const columns +select '-- test for non const cols'; +select round(h3PointDistRads(-10.0 ,0.0, 10.0, arrayJoin([0.0])), 5) as h3PointDistRads; +select round(h3PointDistRads(-10.0 ,0.0, 10.0, toFloat64(0)) , 5)as h3PointDistRads; diff --git a/parser/testdata/02220_array_join_format/ast.json b/parser/testdata/02220_array_join_format/ast.json new file mode 100644 index 000000000..771d43f40 --- /dev/null +++ b/parser/testdata/02220_array_join_format/ast.json @@ -0,0 +1,103 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Explain EXPLAIN SYNTAX (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function range (alias range_) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Identifier point_" + }, + { + "explain": " TablesInSelectQuery (children 2)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.one" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " ArrayJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier range_ (alias point_)" + } + ], + + "rows": 27, + + "statistics": + { + "elapsed": 0.001473494, + "rows_read": 27, + "bytes_read": 1209 + } +} diff --git a/parser/testdata/02220_array_join_format/metadata.json b/parser/testdata/02220_array_join_format/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02220_array_join_format/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02220_array_join_format/query.sql b/parser/testdata/02220_array_join_format/query.sql new file mode 100644 index 000000000..afea68558 --- /dev/null +++ b/parser/testdata/02220_array_join_format/query.sql @@ -0,0 +1 @@ +explain syntax select * from (select range(0, 10) range_, point_ from system.one array join range_ as point_); diff --git a/parser/testdata/02222_allow_experimental_projection_optimization__enable_global_with_statement/ast.json b/parser/testdata/02222_allow_experimental_projection_optimization__enable_global_with_statement/ast.json new file mode 100644 index 000000000..e8272080f --- /dev/null +++ b/parser/testdata/02222_allow_experimental_projection_optimization__enable_global_with_statement/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery data_02222 (children 1)" + }, + { + "explain": " Identifier data_02222" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001124618, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/02222_allow_experimental_projection_optimization__enable_global_with_statement/metadata.json b/parser/testdata/02222_allow_experimental_projection_optimization__enable_global_with_statement/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02222_allow_experimental_projection_optimization__enable_global_with_statement/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02222_allow_experimental_projection_optimization__enable_global_with_statement/query.sql b/parser/testdata/02222_allow_experimental_projection_optimization__enable_global_with_statement/query.sql new file mode 100644 index 000000000..f870b985d --- /dev/null +++ b/parser/testdata/02222_allow_experimental_projection_optimization__enable_global_with_statement/query.sql @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS data_02222; +CREATE TABLE data_02222 engine=MergeTree() ORDER BY dummy AS SELECT * FROM system.one; +-- { echoOn } +WITH + (SELECT * FROM data_02222) AS bm1, + (SELECT * FROM data_02222) AS bm2, + (SELECT * FROM data_02222) AS bm3, + (SELECT * FROM data_02222) AS bm4, + (SELECT * FROM data_02222) AS bm5, + (SELECT * FROM data_02222) AS bm6, + (SELECT * FROM data_02222) AS bm7, + (SELECT * FROM data_02222) AS bm8, + (SELECT * FROM data_02222) AS bm9, + (SELECT * FROM data_02222) AS bm10 +SELECT bm1, bm2, bm3, bm4, bm5, bm6, bm7, bm8, bm9, bm10 FROM data_02222; +-- { echoOff } +DROP TABLE data_02222; diff --git a/parser/testdata/02223_h3_test_const_columns/ast.json b/parser/testdata/02223_h3_test_const_columns/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02223_h3_test_const_columns/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02223_h3_test_const_columns/metadata.json b/parser/testdata/02223_h3_test_const_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02223_h3_test_const_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02223_h3_test_const_columns/query.sql b/parser/testdata/02223_h3_test_const_columns/query.sql new file mode 100644 index 000000000..9c876dd33 --- /dev/null +++ b/parser/testdata/02223_h3_test_const_columns/query.sql @@ -0,0 +1,34 @@ +-- Tags: no-fasttest + +select round(geoToH3(toFloat64(1),toFloat64(0),arrayJoin([1,2])), 2); +select h3ToParent(641573946153969375, arrayJoin([1,2])); +SELECT round(h3HexAreaM2(arrayJoin([1,2])), 2); +SELECT round(h3HexAreaKm2(arrayJoin([1,2])), 2); +SELECT round(h3CellAreaM2(arrayJoin([579205133326352383,589753847883235327,594082350283882495])), 2); +SELECT NULL, toFloat64('-1'), -2147483648, h3CellAreaM2(arrayJoin([9223372036854775807, 65535, NULL])); -- { serverError INCORRECT_DATA } +SELECT round(h3CellAreaRads2(arrayJoin([579205133326352383,589753847883235327,594082350283882495])), 2); +SELECT NULL, toFloat64('-1'), -2147483648, h3CellAreaRads2(arrayJoin([9223372036854775807, 65535, NULL])); -- { serverError INCORRECT_DATA } +SELECT h3GetResolution(arrayJoin([579205133326352383,589753847883235327,594082350283882495])); +SELECT round(h3EdgeAngle(arrayJoin([0,1,2])), 2); +SELECT round(h3EdgeLengthM(arrayJoin([0,1,2])), 2); +SELECT round(h3EdgeLengthKm(arrayJoin([0,1,2])), 2); +WITH h3ToGeo(arrayJoin([579205133326352383,589753847883235327,594082350283882495])) AS p SELECT round(p.1, 2), round(p.2, 2); +SELECT arrayMap(p -> (round(p.1, 2), round(p.2, 2)), h3ToGeoBoundary(arrayJoin([579205133326352383,589753847883235327,594082350283882495]))); +SELECT h3kRing(arrayJoin([579205133326352383]), arrayJoin([toUInt16(1),toUInt16(2),toUInt16(3)])); +SELECT h3GetBaseCell(arrayJoin([579205133326352383,589753847883235327,594082350283882495])); +SELECT h3IndexesAreNeighbors(617420388351344639, arrayJoin([617420388352655359, 617420388351344639, 617420388352917503])); +SELECT h3ToChildren(599405990164561919, arrayJoin([6,5])); +SELECT h3ToParent(599405990164561919, arrayJoin([0,1])); +SELECT h3ToString(arrayJoin([579205133326352383,589753847883235327,594082350283882495])); +SELECT stringToH3(h3ToString(arrayJoin([579205133326352383,589753847883235327,594082350283882495]))); +SELECT h3IsResClassIII(arrayJoin([579205133326352383,589753847883235327,594082350283882495])); +SELECT h3IsPentagon(arrayJoin([stringToH3('8f28308280f18f2'),stringToH3('821c07fffffffff'),stringToH3('0x8f28308280f18f2L'),stringToH3('0x821c07fffffffffL')])); +SELECT h3GetFaces(arrayJoin([stringToH3('8f28308280f18f2'),stringToH3('821c07fffffffff'),stringToH3('0x8f28308280f18f2L'),stringToH3('0x821c07fffffffffL')])); +SELECT h3ToCenterChild(577023702256844799, arrayJoin([1,2,3])); +SELECT round(h3ExactEdgeLengthM(arrayJoin([1298057039473278975,1370114633511206911,1442172227549134847,1514229821587062783])), 2); +SELECT round(h3ExactEdgeLengthKm(arrayJoin([1298057039473278975,1370114633511206911,1442172227549134847,1514229821587062783])), 2); +SELECT round(h3ExactEdgeLengthRads(arrayJoin([1298057039473278975,1370114633511206911,1442172227549134847,1514229821587062783])), 2); +SELECT h3NumHexagons(arrayJoin([1,2,3])); +SELECT h3Line(arrayJoin([stringToH3('85283473fffffff')]), arrayJoin([stringToH3('8528342bfffffff')])); +SELECT h3HexRing(arrayJoin([579205133326352383]), arrayJoin([toUInt16(1),toUInt16(2),toUInt16(3)])); -- { serverError INCORRECT_DATA } +SELECT h3HexRing(arrayJoin([581276613233082367]), arrayJoin([toUInt16(0),toUInt16(1),toUInt16(2)])); diff --git a/parser/testdata/02223_insert_select_schema_inference/ast.json b/parser/testdata/02223_insert_select_schema_inference/ast.json new file mode 100644 index 000000000..ecbf20244 --- /dev/null +++ b/parser/testdata/02223_insert_select_schema_inference/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001343155, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/02223_insert_select_schema_inference/metadata.json b/parser/testdata/02223_insert_select_schema_inference/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02223_insert_select_schema_inference/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02223_insert_select_schema_inference/query.sql b/parser/testdata/02223_insert_select_schema_inference/query.sql new file mode 100644 index 000000000..031ced1b2 --- /dev/null +++ b/parser/testdata/02223_insert_select_schema_inference/query.sql @@ -0,0 +1,5 @@ +drop table if exists test; +create table test (x UInt32, y String, d Date) engine=Memory() as select number as x, toString(number) as y, toDate(number) as d from numbers(10); +insert into table function file('data.native.zst') select * from test settings engine_file_truncate_on_insert=1; +desc file('data.native.zst'); +select * from file('data.native.zst'); diff --git a/parser/testdata/02224_parallel_distributed_insert_select_cluster/ast.json b/parser/testdata/02224_parallel_distributed_insert_select_cluster/ast.json new file mode 100644 index 000000000..a66b6d566 --- /dev/null +++ b/parser/testdata/02224_parallel_distributed_insert_select_cluster/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery dst_02224 (children 1)" + }, + { + "explain": " Identifier dst_02224" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001109051, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/02224_parallel_distributed_insert_select_cluster/metadata.json b/parser/testdata/02224_parallel_distributed_insert_select_cluster/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02224_parallel_distributed_insert_select_cluster/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02224_parallel_distributed_insert_select_cluster/query.sql b/parser/testdata/02224_parallel_distributed_insert_select_cluster/query.sql new file mode 100644 index 000000000..023f220e9 --- /dev/null +++ b/parser/testdata/02224_parallel_distributed_insert_select_cluster/query.sql @@ -0,0 +1,34 @@ +drop table if exists dst_02224; +drop table if exists src_02224; +create table dst_02224 (key Int) engine=Memory(); +create table src_02224 (key Int) engine=Memory(); +insert into src_02224 values (1); + +-- { echoOn } +truncate table dst_02224; +insert into function cluster('test_cluster_two_shards', currentDatabase(), dst_02224, key) +select * from cluster('test_cluster_two_shards', currentDatabase(), src_02224, key) +settings parallel_distributed_insert_select=1, max_distributed_depth=1; -- { serverError TOO_LARGE_DISTRIBUTED_DEPTH } +select * from dst_02224; + +truncate table dst_02224; +insert into function cluster('test_cluster_two_shards', currentDatabase(), dst_02224, key) +select * from cluster('test_cluster_two_shards', currentDatabase(), src_02224, key) +settings parallel_distributed_insert_select=1, max_distributed_depth=2; +select * from dst_02224; + +truncate table dst_02224; +insert into function cluster('test_cluster_two_shards', currentDatabase(), dst_02224, key) +select * from cluster('test_cluster_two_shards', currentDatabase(), src_02224, key) +settings parallel_distributed_insert_select=2, max_distributed_depth=1; +select * from dst_02224; + +truncate table dst_02224; +insert into function remote('127.{1,2}', currentDatabase(), dst_02224, key) +select * from remote('127.{1,2}', currentDatabase(), src_02224, key) +settings parallel_distributed_insert_select=2, max_distributed_depth=1; +select * from dst_02224; +-- { echoOff } + +drop table src_02224; +drop table dst_02224; diff --git a/parser/testdata/02224_s2_test_const_columns/ast.json b/parser/testdata/02224_s2_test_const_columns/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02224_s2_test_const_columns/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02224_s2_test_const_columns/metadata.json b/parser/testdata/02224_s2_test_const_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02224_s2_test_const_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02224_s2_test_const_columns/query.sql b/parser/testdata/02224_s2_test_const_columns/query.sql new file mode 100644 index 000000000..1d3e51065 --- /dev/null +++ b/parser/testdata/02224_s2_test_const_columns/query.sql @@ -0,0 +1,12 @@ +-- Tags: no-fasttest + +SELECT geoToS2(37.79506683, arrayJoin([55.71290588,37.79506683])); +SELECT s2ToGeo(arrayJoin([4704772434919038107,9926594385212866560])); +SELECT s2GetNeighbors(arrayJoin([1157339245694594829, 5074766849661468672])); +SELECT s2CellsIntersect(9926595209846587392, arrayJoin([9926594385212866560, 5074766849661468672])); +SELECT s2CapContains(1157339245694594829, toFloat64(1), arrayJoin([1157347770437378819,1157347770437378389])); +SELECT s2CapUnion(3814912406305146967, toFloat64(1), 1157347770437378819, toFloat64(1)); +SELECT s2RectAdd(5178914411069187297, 5177056748191934217, arrayJoin([5179056748191934217,5177914411069187297])); +SELECT s2RectContains(5179062030687166815, 5177056748191934217, arrayJoin([5177914411069187297, 5177914411069187297])); +SELECT s2RectUnion(5178914411069187297, 5177056748191934217, 5179062030687166815, arrayJoin([5177056748191934217, 5177914411069187297])); +SELECT s2RectIntersection(5178914411069187297, 5177056748191934217, 5179062030687166815, arrayJoin([5177056748191934217,5177914411069187297])); diff --git a/parser/testdata/02226_analyzer_or_like_combine/ast.json b/parser/testdata/02226_analyzer_or_like_combine/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02226_analyzer_or_like_combine/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02226_analyzer_or_like_combine/metadata.json b/parser/testdata/02226_analyzer_or_like_combine/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02226_analyzer_or_like_combine/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02226_analyzer_or_like_combine/query.sql b/parser/testdata/02226_analyzer_or_like_combine/query.sql new file mode 100644 index 000000000..353213347 --- /dev/null +++ b/parser/testdata/02226_analyzer_or_like_combine/query.sql @@ -0,0 +1,34 @@ +-- Tags: no-fasttest +-- no-fasttest: Requires vectorscan +SET allow_hyperscan = 1, max_hyperscan_regexp_length = 0, max_hyperscan_regexp_total_length = 0; +SET optimize_rewrite_like_perfect_affix = 0; -- prevent input/output intereference from another LIKE rewrite pass + +EXPLAIN SYNTAX SELECT materialize('Привет, World') AS s WHERE (s LIKE 'hell%') OR (s ILIKE '%привет%') OR (s ILIKE 'world%') SETTINGS optimize_or_like_chain = 0; +EXPLAIN QUERY TREE run_passes=1 SELECT materialize('Привет, World') AS s WHERE (s LIKE 'hell%') OR (s ILIKE '%привет%') OR (s ILIKE 'world%') SETTINGS optimize_or_like_chain = 0, enable_analyzer = 1; +EXPLAIN SYNTAX SELECT materialize('Привет, World') AS s WHERE (s LIKE 'hell%') OR (s ILIKE '%привет%') OR (s ILIKE 'world%') SETTINGS optimize_or_like_chain = 1; +EXPLAIN QUERY TREE run_passes=1 SELECT materialize('Привет, World') AS s WHERE (s LIKE 'hell%') OR (s ILIKE '%привет%') OR (s ILIKE 'world%') SETTINGS optimize_or_like_chain = 1, enable_analyzer = 1; + +EXPLAIN SYNTAX SELECT materialize('Привет, World') AS s1, materialize('Привет, World') AS s2 WHERE (s1 LIKE 'hell%') OR (s2 ILIKE '%привет%') OR (s1 ILIKE 'world%') SETTINGS optimize_or_like_chain = 1; +EXPLAIN SYNTAX SELECT materialize('Привет, World') AS s1, materialize('Привет, World') AS s2 WHERE (s1 LIKE 'hell%') OR (s2 ILIKE '%привет%') OR (s1 ILIKE 'world%') SETTINGS optimize_or_like_chain = 1, allow_hyperscan = 0; +EXPLAIN SYNTAX SELECT materialize('Привет, World') AS s1, materialize('Привет, World') AS s2 WHERE (s1 LIKE 'hell%') OR (s2 ILIKE '%привет%') OR (s1 ILIKE 'world%') SETTINGS optimize_or_like_chain = 1, max_hyperscan_regexp_length = 10; +EXPLAIN SYNTAX SELECT materialize('Привет, World') AS s1, materialize('Привет, World') AS s2 WHERE (s1 LIKE 'hell%') OR (s2 ILIKE '%привет%') OR (s1 ILIKE 'world%') SETTINGS optimize_or_like_chain = 1, max_hyperscan_regexp_total_length = 10; +EXPLAIN SYNTAX SELECT materialize('Привет, World') AS s1, materialize('Привет, World') AS s2 WHERE (s1 LIKE 'hell%') OR (s2 ILIKE '%привет%') OR (s1 ILIKE 'world%') OR s1 == 'Привет' SETTINGS optimize_or_like_chain = 1; + + +SELECT materialize('Привет, optimized World') AS s WHERE (s LIKE 'hell%') OR (s LIKE '%привет%') OR (s ILIKE '%world') SETTINGS optimize_or_like_chain = 1; +SELECT materialize('Привет, optimized World') AS s WHERE (s LIKE 'hell%') OR (s LIKE '%привет%') OR (s ILIKE '%world') SETTINGS optimize_or_like_chain = 1, enable_analyzer = 1; + +SELECT materialize('Привет, World') AS s WHERE (s LIKE 'hell%') OR (s LIKE '%привет%') OR (s ILIKE '%world') SETTINGS optimize_or_like_chain = 0; +SELECT materialize('Привет, World') AS s WHERE (s LIKE 'hell%') OR (s LIKE '%привет%') OR (s ILIKE '%world') SETTINGS optimize_or_like_chain = 0, enable_analyzer = 1; + +SELECT materialize('Привет, optimized World') AS s WHERE (s LIKE 'hell%') OR (s ILIKE '%привет%') OR (s LIKE 'world%') SETTINGS optimize_or_like_chain = 1; +SELECT materialize('Привет, optimized World') AS s WHERE (s LIKE 'hell%') OR (s ILIKE '%привет%') OR (s LIKE 'world%') SETTINGS optimize_or_like_chain = 1, enable_analyzer = 1; + +SELECT materialize('Привет, World') AS s WHERE (s LIKE 'hell%') OR (s ILIKE '%привет%') OR (s LIKE 'world%') SETTINGS optimize_or_like_chain = 0; +SELECT materialize('Привет, World') AS s WHERE (s LIKE 'hell%') OR (s ILIKE '%привет%') OR (s LIKE 'world%') SETTINGS optimize_or_like_chain = 0, enable_analyzer = 1; + +SELECT materialize('Привет, World') AS s WHERE (s LIKE 'hell%') OR (s ILIKE '%привет%') OR (s ILIKE 'world%') SETTINGS optimize_or_like_chain = 1, enable_analyzer = 1; + +-- Aliases + +EXPLAIN SYNTAX SELECT test, materialize('Привет, World') AS s WHERE ((s LIKE 'hell%') AS test) OR (s ILIKE '%привет%') OR (s ILIKE 'world%') SETTINGS optimize_or_like_chain = 1; diff --git a/parser/testdata/02226_async_insert_table_function/ast.json b/parser/testdata/02226_async_insert_table_function/ast.json new file mode 100644 index 000000000..5dc0ea673 --- /dev/null +++ b/parser/testdata/02226_async_insert_table_function/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_async_insert_table_function (children 1)" + }, + { + "explain": " Identifier t_async_insert_table_function" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001304743, + "rows_read": 2, + "bytes_read": 110 + } +} diff --git a/parser/testdata/02226_async_insert_table_function/metadata.json b/parser/testdata/02226_async_insert_table_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02226_async_insert_table_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02226_async_insert_table_function/query.sql b/parser/testdata/02226_async_insert_table_function/query.sql new file mode 100644 index 000000000..fc4aadfbf --- /dev/null +++ b/parser/testdata/02226_async_insert_table_function/query.sql @@ -0,0 +1,11 @@ +DROP TABLE IF EXISTS t_async_insert_table_function; + +CREATE TABLE t_async_insert_table_function (id UInt32, s String) ENGINE = Memory; + +SET async_insert = 1; + +INSERT INTO function remote('127.0.0.1', currentDatabase(), t_async_insert_table_function) values (1, 'aaa') (2, 'bbb'); + +SELECT * FROM t_async_insert_table_function ORDER BY id; + +DROP TABLE t_async_insert_table_function; diff --git a/parser/testdata/02226_in_untuple_issue_34810/ast.json b/parser/testdata/02226_in_untuple_issue_34810/ast.json new file mode 100644 index 000000000..158154919 --- /dev/null +++ b/parser/testdata/02226_in_untuple_issue_34810/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery calendar (children 1)" + }, + { + "explain": " Identifier calendar" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001092401, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/02226_in_untuple_issue_34810/metadata.json b/parser/testdata/02226_in_untuple_issue_34810/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02226_in_untuple_issue_34810/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02226_in_untuple_issue_34810/query.sql b/parser/testdata/02226_in_untuple_issue_34810/query.sql new file mode 100644 index 000000000..a313d526e --- /dev/null +++ b/parser/testdata/02226_in_untuple_issue_34810/query.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS calendar; +DROP TABLE IF EXISTS events32; + +CREATE TABLE calendar ( `year` Int64, `month` Int64 ) ENGINE = TinyLog; +INSERT INTO calendar VALUES (2000, 1), (2001, 2), (2000, 3); + +CREATE TABLE events32 ( `year` Int32, `month` Int32 ) ENGINE = TinyLog; +INSERT INTO events32 VALUES (2001, 2), (2001, 3); + +SELECT * FROM calendar WHERE (year, month) IN ( SELECT (year, month) FROM events32 ); + +DROP TABLE IF EXISTS calendar; +DROP TABLE IF EXISTS events32; diff --git a/parser/testdata/02226_low_cardinality_text_bloom_filter_index/ast.json b/parser/testdata/02226_low_cardinality_text_bloom_filter_index/ast.json new file mode 100644 index 000000000..f028d58d0 --- /dev/null +++ b/parser/testdata/02226_low_cardinality_text_bloom_filter_index/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery bf_tokenbf_lowcard_test (children 1)" + }, + { + "explain": " Identifier bf_tokenbf_lowcard_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001487538, + "rows_read": 2, + "bytes_read": 98 + } +} diff --git a/parser/testdata/02226_low_cardinality_text_bloom_filter_index/metadata.json b/parser/testdata/02226_low_cardinality_text_bloom_filter_index/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02226_low_cardinality_text_bloom_filter_index/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02226_low_cardinality_text_bloom_filter_index/query.sql b/parser/testdata/02226_low_cardinality_text_bloom_filter_index/query.sql new file mode 100644 index 000000000..d2b30f5e8 --- /dev/null +++ b/parser/testdata/02226_low_cardinality_text_bloom_filter_index/query.sql @@ -0,0 +1,69 @@ +DROP TABLE IF EXISTS bf_tokenbf_lowcard_test; +DROP TABLE IF EXISTS bf_ngram_lowcard_test; + +CREATE TABLE bf_tokenbf_lowcard_test +( + row_id UInt32, + lc LowCardinality(String), + lc_fixed LowCardinality(FixedString(8)), + INDEX lc_bf_tokenbf lc TYPE tokenbf_v1(256,2,0) GRANULARITY 1, + INDEX lc_fixed_bf_tokenbf lc_fixed TYPE tokenbf_v1(256,2,0) GRANULARITY 1 +) Engine=MergeTree() ORDER BY row_id SETTINGS index_granularity = 1; + +CREATE TABLE bf_ngram_lowcard_test +( + row_id UInt32, + lc LowCardinality(String), + lc_fixed LowCardinality(FixedString(8)), + INDEX lc_ngram lc TYPE ngrambf_v1(4,256,2,0) GRANULARITY 1, + INDEX lc_fixed_ngram lc_fixed TYPE ngrambf_v1(4,256,2,0) GRANULARITY 1 +) Engine=MergeTree() ORDER BY row_id SETTINGS index_granularity = 1; + +INSERT INTO bf_tokenbf_lowcard_test VALUES (1, 'K1', 'K1ZZZZZZ'), (2, 'K2', 'K2ZZZZZZ'); +INSERT INTO bf_ngram_lowcard_test VALUES (1, 'K1', 'K1ZZZZZZ'), (2, 'K2', 'K2ZZZZZZ'); +INSERT INTO bf_tokenbf_lowcard_test VALUES (3, 'abCD3ef', 'abCD3ef'), (4, 'abCD4ef', 'abCD4ef'); +INSERT INTO bf_ngram_lowcard_test VALUES (3, 'abCD3ef', 'abCD3ef'), (4, 'abCD4ef', 'abCD4ef'); + +SELECT 'lc_bf_tokenbf'; +SELECT * FROM bf_tokenbf_lowcard_test WHERE like(lc, 'K1') SETTINGS force_data_skipping_indices='lc_bf_tokenbf'; +SELECT * FROM bf_tokenbf_lowcard_test WHERE like(lc, 'K2') SETTINGS force_data_skipping_indices='lc_bf_tokenbf'; +SELECT * FROM bf_tokenbf_lowcard_test WHERE like(lc, 'K3') SETTINGS force_data_skipping_indices='lc_bf_tokenbf'; + +SELECT 'lc_fixed_bf_tokenbf'; +SELECT * FROM bf_tokenbf_lowcard_test WHERE like(lc_fixed, 'K1ZZZZZZ') SETTINGS force_data_skipping_indices='lc_fixed_bf_tokenbf'; +SELECT * FROM bf_tokenbf_lowcard_test WHERE like(lc_fixed, 'K2ZZZZZZ') SETTINGS force_data_skipping_indices='lc_fixed_bf_tokenbf'; +SELECT * FROM bf_tokenbf_lowcard_test WHERE like(lc_fixed, 'K3ZZZZZZ') SETTINGS force_data_skipping_indices='lc_fixed_bf_tokenbf'; + +SELECT 'lc_ngram'; +SELECT * FROM bf_ngram_lowcard_test WHERE like(lc, 'K1') SETTINGS force_data_skipping_indices='lc_ngram'; +SELECT * FROM bf_ngram_lowcard_test WHERE like(lc, 'K2') SETTINGS force_data_skipping_indices='lc_ngram'; +SELECT * FROM bf_ngram_lowcard_test WHERE like(lc, 'K3') SETTINGS force_data_skipping_indices='lc_ngram'; + +SELECT 'lc_fixed_ngram'; +SELECT * FROM bf_ngram_lowcard_test WHERE like(lc_fixed, 'K1ZZZZZZ') SETTINGS force_data_skipping_indices='lc_fixed_ngram'; +SELECT * FROM bf_ngram_lowcard_test WHERE like(lc_fixed, 'K2ZZZZZZ') SETTINGS force_data_skipping_indices='lc_fixed_ngram'; +SELECT * FROM bf_ngram_lowcard_test WHERE like(lc_fixed, 'K3ZZZZZZ') SETTINGS force_data_skipping_indices='lc_fixed_ngram'; + + +SELECT 'lc_bf_tokenbf'; +SELECT * FROM bf_tokenbf_lowcard_test WHERE like(lc, '%CD3%') SETTINGS force_data_skipping_indices='lc_bf_tokenbf'; +SELECT * FROM bf_tokenbf_lowcard_test WHERE like(lc, '%CD4%') SETTINGS force_data_skipping_indices='lc_bf_tokenbf'; +SELECT * FROM bf_tokenbf_lowcard_test WHERE like(lc, '%CD5%') SETTINGS force_data_skipping_indices='lc_bf_tokenbf'; + +SELECT 'lc_fixed_bf_tokenbf'; +SELECT * FROM bf_tokenbf_lowcard_test WHERE like(lc_fixed, '%CD3%') SETTINGS force_data_skipping_indices='lc_fixed_bf_tokenbf'; +SELECT * FROM bf_tokenbf_lowcard_test WHERE like(lc_fixed, '%CD4%') SETTINGS force_data_skipping_indices='lc_fixed_bf_tokenbf'; +SELECT * FROM bf_tokenbf_lowcard_test WHERE like(lc_fixed, '%CD5%') SETTINGS force_data_skipping_indices='lc_fixed_bf_tokenbf'; + +SELECT 'lc_ngram'; +SELECT * FROM bf_ngram_lowcard_test WHERE like(lc, '%CD3%') SETTINGS force_data_skipping_indices='lc_ngram'; +SELECT * FROM bf_ngram_lowcard_test WHERE like(lc, '%CD4%') SETTINGS force_data_skipping_indices='lc_ngram'; +SELECT * FROM bf_ngram_lowcard_test WHERE like(lc, '%CD5%') SETTINGS force_data_skipping_indices='lc_ngram'; + +SELECT 'lc_fixed_ngram'; +SELECT * FROM bf_ngram_lowcard_test WHERE like(lc_fixed, '%CD3%') SETTINGS force_data_skipping_indices='lc_fixed_ngram'; +SELECT * FROM bf_ngram_lowcard_test WHERE like(lc_fixed, '%CD4%') SETTINGS force_data_skipping_indices='lc_fixed_ngram'; +SELECT * FROM bf_ngram_lowcard_test WHERE like(lc_fixed, '%CD5%') SETTINGS force_data_skipping_indices='lc_fixed_ngram'; + +DROP TABLE bf_tokenbf_lowcard_test; +DROP TABLE bf_ngram_lowcard_test; diff --git a/parser/testdata/02227_union_match_by_name/ast.json b/parser/testdata/02227_union_match_by_name/ast.json new file mode 100644 index 000000000..e616e9acc --- /dev/null +++ b/parser/testdata/02227_union_match_by_name/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001227078, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02227_union_match_by_name/metadata.json b/parser/testdata/02227_union_match_by_name/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02227_union_match_by_name/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02227_union_match_by_name/query.sql b/parser/testdata/02227_union_match_by_name/query.sql new file mode 100644 index 000000000..489c3d976 --- /dev/null +++ b/parser/testdata/02227_union_match_by_name/query.sql @@ -0,0 +1,8 @@ +SET enable_analyzer = 1; + +-- { echoOn } + +EXPLAIN header = 1, optimize = 0 SELECT avgWeighted(x, y) FROM (SELECT NULL, 255 AS x, 1 AS y UNION ALL SELECT y, NULL AS x, 1 AS y); +SELECT avgWeighted(x, y) FROM (SELECT NULL, 255 AS x, 1 AS y UNION ALL SELECT y, NULL AS x, 1 AS y); + +-- { echoOff } diff --git a/parser/testdata/02228_merge_tree_insert_memory_usage/ast.json b/parser/testdata/02228_merge_tree_insert_memory_usage/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02228_merge_tree_insert_memory_usage/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02228_merge_tree_insert_memory_usage/metadata.json b/parser/testdata/02228_merge_tree_insert_memory_usage/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02228_merge_tree_insert_memory_usage/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02228_merge_tree_insert_memory_usage/query.sql b/parser/testdata/02228_merge_tree_insert_memory_usage/query.sql new file mode 100644 index 000000000..3f812cc2e --- /dev/null +++ b/parser/testdata/02228_merge_tree_insert_memory_usage/query.sql @@ -0,0 +1,17 @@ +-- Tags: long, no-object-storage +-- no-object-storage: Avoid flakiness due to cache / buffer usage +SET insert_keeper_fault_injection_probability=0; -- to succeed this test can require too many retries due to 100 partitions, so disable fault injections + +-- regression for MEMORY_LIMIT_EXCEEDED error because of deferred final part flush + +drop table if exists data_02228; +create table data_02228 (key1 UInt32, sign Int8, s UInt64) engine = CollapsingMergeTree(sign) order by (key1) partition by key1 % 100 settings auto_statistics_types = ''; +insert into data_02228 select number, 1, number from numbers_mt(10_000) settings max_memory_usage='30Mi', max_partitions_per_insert_block=1024, max_insert_delayed_streams_for_parallel_write=0; +insert into data_02228 select number, 1, number from numbers_mt(10_000) settings max_memory_usage='30Mi', max_partitions_per_insert_block=1024, max_insert_delayed_streams_for_parallel_write=1000000; -- { serverError MEMORY_LIMIT_EXCEEDED } +drop table data_02228; + +drop table if exists data_rep_02228 SYNC; +create table data_rep_02228 (key1 UInt32, sign Int8, s UInt64) engine = ReplicatedCollapsingMergeTree('/clickhouse/{database}', 'r1', sign) order by (key1) partition by key1 % 100 settings auto_statistics_types = ''; +insert into data_rep_02228 select number, 1, number from numbers_mt(10_000) settings max_memory_usage='30Mi', max_partitions_per_insert_block=1024, max_insert_delayed_streams_for_parallel_write=0; +insert into data_rep_02228 select number, 1, number from numbers_mt(10_000) settings max_memory_usage='30Mi', max_partitions_per_insert_block=1024, max_insert_delayed_streams_for_parallel_write=1000000; -- { serverError MEMORY_LIMIT_EXCEEDED } +drop table data_rep_02228 SYNC; diff --git a/parser/testdata/02230_create_table_as_ignore_ttl/ast.json b/parser/testdata/02230_create_table_as_ignore_ttl/ast.json new file mode 100644 index 000000000..f7bd946e2 --- /dev/null +++ b/parser/testdata/02230_create_table_as_ignore_ttl/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery data_02230_ttl (children 1)" + }, + { + "explain": " Identifier data_02230_ttl" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001252956, + "rows_read": 2, + "bytes_read": 80 + } +} diff --git a/parser/testdata/02230_create_table_as_ignore_ttl/metadata.json b/parser/testdata/02230_create_table_as_ignore_ttl/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02230_create_table_as_ignore_ttl/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02230_create_table_as_ignore_ttl/query.sql b/parser/testdata/02230_create_table_as_ignore_ttl/query.sql new file mode 100644 index 000000000..8838f67ec --- /dev/null +++ b/parser/testdata/02230_create_table_as_ignore_ttl/query.sql @@ -0,0 +1,18 @@ +drop table if exists data_02230_ttl; +drop table if exists null_02230_ttl; +create table data_02230_ttl (date Date, key Int) Engine=MergeTree() order by key TTL date + 14; +show create data_02230_ttl format TSVRaw; +create table null_02230_ttl engine=Null() as data_02230_ttl; +show create null_02230_ttl format TSVRaw; +drop table data_02230_ttl; +drop table null_02230_ttl; + +drop table if exists data_02230_column_ttl; +drop table if exists null_02230_column_ttl; +create table data_02230_column_ttl (date Date, value Int TTL date + 7, key Int) Engine=MergeTree() order by key TTL date + 14; +show create data_02230_column_ttl format TSVRaw; +create table null_02230_column_ttl engine=Null() as data_02230_column_ttl; +-- check that order of columns is the same +show create null_02230_column_ttl format TSVRaw; +drop table data_02230_column_ttl; +drop table null_02230_column_ttl; diff --git a/parser/testdata/02231_bloom_filter_sizing/ast.json b/parser/testdata/02231_bloom_filter_sizing/ast.json new file mode 100644 index 000000000..f76208769 --- /dev/null +++ b/parser/testdata/02231_bloom_filter_sizing/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'Bloom filter on sort key'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001276428, + "rows_read": 5, + "bytes_read": 195 + } +} diff --git a/parser/testdata/02231_bloom_filter_sizing/metadata.json b/parser/testdata/02231_bloom_filter_sizing/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02231_bloom_filter_sizing/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02231_bloom_filter_sizing/query.sql b/parser/testdata/02231_bloom_filter_sizing/query.sql new file mode 100644 index 000000000..ee896675d --- /dev/null +++ b/parser/testdata/02231_bloom_filter_sizing/query.sql @@ -0,0 +1,53 @@ +SELECT 'Bloom filter on sort key'; +DROP TABLE IF EXISTS bloom_filter_sizing_pk; +CREATE TABLE bloom_filter_sizing_pk( + key UInt64, + value UInt64, + + -- Very high granularity to have one filter per part. + INDEX key_bf key TYPE bloom_filter(0.01) GRANULARITY 2147483648 +) ENGINE=MergeTree ORDER BY key; + +INSERT INTO bloom_filter_sizing_pk +SELECT +number % 100 as key, -- 100 unique keys +number as value -- whatever +FROM numbers(100_000); + +-- +-- Merge everything into a single part +-- +OPTIMIZE TABLE bloom_filter_sizing_pk FINAL; + +SELECT COUNT() from bloom_filter_sizing_pk WHERE key = 1; + +-- Check bloom filter size. According to https://hur.st/bloomfilter/?n=100&p=0.01 for 100 keys it should be less that 200B +SELECT COUNT() from system.parts where database = currentDatabase() AND table = 'bloom_filter_sizing_pk' and secondary_indices_uncompressed_bytes > 200 and active; + +SELECT 'Bloom filter on non-sort key'; +DROP TABLE IF EXISTS bloom_filter_sizing_sec; +CREATE TABLE bloom_filter_sizing_sec( + key1 UInt64, + key2 UInt64, + value UInt64, + + -- Very high granularity to have one filter per part. + INDEX key_bf key2 TYPE bloom_filter(0.01) GRANULARITY 2147483648 +) ENGINE=MergeTree ORDER BY key1; + +INSERT INTO bloom_filter_sizing_sec +SELECT +number % 100 as key1, -- 100 unique keys +rand() % 100 as key2, -- 100 unique keys +number as value -- whatever +FROM numbers(100_000); + +-- +-- Merge everything into a single part +-- +OPTIMIZE TABLE bloom_filter_sizing_sec FINAL; + +SELECT COUNT() from bloom_filter_sizing_sec WHERE key1 = 1; + +-- Check bloom filter size. According to https://hur.st/bloomfilter/?n=100&p=0.01 for 100 keys it should be less that 200B +SELECT COUNT() from system.parts where database = currentDatabase() AND table = 'bloom_filter_sizing_sec' and secondary_indices_uncompressed_bytes > 200 and active; diff --git a/parser/testdata/02231_buffer_aggregate_states_leak/ast.json b/parser/testdata/02231_buffer_aggregate_states_leak/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02231_buffer_aggregate_states_leak/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02231_buffer_aggregate_states_leak/metadata.json b/parser/testdata/02231_buffer_aggregate_states_leak/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02231_buffer_aggregate_states_leak/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02231_buffer_aggregate_states_leak/query.sql b/parser/testdata/02231_buffer_aggregate_states_leak/query.sql new file mode 100644 index 000000000..7cc267b72 --- /dev/null +++ b/parser/testdata/02231_buffer_aggregate_states_leak/query.sql @@ -0,0 +1,37 @@ +-- Tags: long, no-tsan + +drop table if exists buffer_02231; +drop table if exists out_02231; +drop table if exists in_02231; +drop table if exists mv_02231; + +-- To reproduce leak of memory tracking of aggregate states, +-- background flush is required. +create table buffer_02231 +( + key Int, + v1 AggregateFunction(groupArray, String) +) engine = Buffer(currentDatabase(), 'out_02231', + /* layers= */ 1, + /* min/max time */ 86400, 86400, + /* min/max rows */ 1e9, 1e9, + /* min/max bytes */ 1e12, 1e12, + /* flush time */ 1 +); +create table out_02231 as buffer_02231 engine=Null(); +create table in_02231 (number Int) engine=Null(); + +-- Create lots of INSERT blocks with MV +create materialized view mv_02231 to buffer_02231 as select + number as key, + groupArrayState(toString(number)) as v1 +from in_02231 +group by key; + +set optimize_trivial_insert_select = 1; +insert into in_02231 select * from numbers(5e6) settings max_memory_usage='400Mi', max_threads=1; + +drop table buffer_02231; +drop table out_02231; +drop table in_02231; +drop table mv_02231; diff --git a/parser/testdata/02231_hierarchical_dictionaries_constant/ast.json b/parser/testdata/02231_hierarchical_dictionaries_constant/ast.json new file mode 100644 index 000000000..b2507bc99 --- /dev/null +++ b/parser/testdata/02231_hierarchical_dictionaries_constant/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery hierarchy_source_table (children 1)" + }, + { + "explain": " Identifier hierarchy_source_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001298863, + "rows_read": 2, + "bytes_read": 96 + } +} diff --git a/parser/testdata/02231_hierarchical_dictionaries_constant/metadata.json b/parser/testdata/02231_hierarchical_dictionaries_constant/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02231_hierarchical_dictionaries_constant/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02231_hierarchical_dictionaries_constant/query.sql b/parser/testdata/02231_hierarchical_dictionaries_constant/query.sql new file mode 100644 index 000000000..bc01b4473 --- /dev/null +++ b/parser/testdata/02231_hierarchical_dictionaries_constant/query.sql @@ -0,0 +1,54 @@ +DROP TABLE IF EXISTS hierarchy_source_table; +CREATE TABLE hierarchy_source_table (id UInt64, parent_id UInt64) ENGINE = TinyLog; +INSERT INTO hierarchy_source_table VALUES (1, 0), (2, 1), (3, 1), (4, 2); + +DROP DICTIONARY IF EXISTS hierarchy_flat_dictionary; +CREATE DICTIONARY hierarchy_flat_dictionary +( + id UInt64, + parent_id UInt64 HIERARCHICAL +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE 'hierarchy_source_table')) +LAYOUT(FLAT()) +LIFETIME(MIN 1 MAX 1000); + +SELECT 'Get hierarchy'; +SELECT dictGetHierarchy('hierarchy_flat_dictionary', 0); +SELECT dictGetHierarchy('hierarchy_flat_dictionary', 1); +SELECT dictGetHierarchy('hierarchy_flat_dictionary', 2); +SELECT dictGetHierarchy('hierarchy_flat_dictionary', 3); +SELECT dictGetHierarchy('hierarchy_flat_dictionary', 4); +SELECT dictGetHierarchy('hierarchy_flat_dictionary', 5); + +SELECT 'Get is in hierarchy'; +SELECT dictIsIn('hierarchy_flat_dictionary', 1, 1); +SELECT dictIsIn('hierarchy_flat_dictionary', 2, 1); +SELECT dictIsIn('hierarchy_flat_dictionary', 2, 0); + +SELECT 'Get children'; +SELECT dictGetChildren('hierarchy_flat_dictionary', 0); +SELECT dictGetChildren('hierarchy_flat_dictionary', 1); +SELECT dictGetChildren('hierarchy_flat_dictionary', 2); +SELECT dictGetChildren('hierarchy_flat_dictionary', 3); +SELECT dictGetChildren('hierarchy_flat_dictionary', 4); +SELECT dictGetChildren('hierarchy_flat_dictionary', 5); + +SELECT 'Get all descendants'; +SELECT dictGetDescendants('hierarchy_flat_dictionary', 0); +SELECT dictGetDescendants('hierarchy_flat_dictionary', 1); +SELECT dictGetDescendants('hierarchy_flat_dictionary', 2); +SELECT dictGetDescendants('hierarchy_flat_dictionary', 3); +SELECT dictGetDescendants('hierarchy_flat_dictionary', 4); +SELECT dictGetDescendants('hierarchy_flat_dictionary', 5); + +SELECT 'Get descendants at first level'; +SELECT dictGetDescendants('hierarchy_flat_dictionary', 0, 1); +SELECT dictGetDescendants('hierarchy_flat_dictionary', 1, 1); +SELECT dictGetDescendants('hierarchy_flat_dictionary', 2, 1); +SELECT dictGetDescendants('hierarchy_flat_dictionary', 3, 1); +SELECT dictGetDescendants('hierarchy_flat_dictionary', 4, 1); +SELECT dictGetDescendants('hierarchy_flat_dictionary', 5, 1); + +DROP DICTIONARY hierarchy_flat_dictionary; +DROP TABLE hierarchy_source_table; diff --git a/parser/testdata/02232_functions_to_subcolumns_alias/ast.json b/parser/testdata/02232_functions_to_subcolumns_alias/ast.json new file mode 100644 index 000000000..f14fa0009 --- /dev/null +++ b/parser/testdata/02232_functions_to_subcolumns_alias/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_functions_to_subcolumns_alias (children 1)" + }, + { + "explain": " Identifier t_functions_to_subcolumns_alias" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001323635, + "rows_read": 2, + "bytes_read": 114 + } +} diff --git a/parser/testdata/02232_functions_to_subcolumns_alias/metadata.json b/parser/testdata/02232_functions_to_subcolumns_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02232_functions_to_subcolumns_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02232_functions_to_subcolumns_alias/query.sql b/parser/testdata/02232_functions_to_subcolumns_alias/query.sql new file mode 100644 index 000000000..89383ed4b --- /dev/null +++ b/parser/testdata/02232_functions_to_subcolumns_alias/query.sql @@ -0,0 +1,10 @@ +DROP TABLE IF EXISTS t_functions_to_subcolumns_alias; + +CREATE TABLE t_functions_to_subcolumns_alias (id UInt64, t Tuple(UInt64, String), m Map(String, UInt64)) ENGINE = Memory; +INSERT INTO t_functions_to_subcolumns_alias VALUES (1, (100, 'abc'), map('foo', 1, 'bar', 2)) (2, NULL, map()); + +SELECT count(id) AS cnt FROM t_functions_to_subcolumns_alias FORMAT TSVWithNames; +SELECT tupleElement(t, 1) as t0, t0 FROM t_functions_to_subcolumns_alias FORMAT TSVWithNames; +SELECT mapContains(m, 'foo') AS hit FROM t_functions_to_subcolumns_alias FORMAT TSVWithNames; + +DROP TABLE t_functions_to_subcolumns_alias; diff --git a/parser/testdata/02232_partition_pruner_mixed_constant_type/ast.json b/parser/testdata/02232_partition_pruner_mixed_constant_type/ast.json new file mode 100644 index 000000000..327d38b30 --- /dev/null +++ b/parser/testdata/02232_partition_pruner_mixed_constant_type/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery broken (children 1)" + }, + { + "explain": " Identifier broken" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001137786, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/02232_partition_pruner_mixed_constant_type/metadata.json b/parser/testdata/02232_partition_pruner_mixed_constant_type/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02232_partition_pruner_mixed_constant_type/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02232_partition_pruner_mixed_constant_type/query.sql b/parser/testdata/02232_partition_pruner_mixed_constant_type/query.sql new file mode 100644 index 000000000..a0b582717 --- /dev/null +++ b/parser/testdata/02232_partition_pruner_mixed_constant_type/query.sql @@ -0,0 +1,7 @@ +DROP TABLE IF EXISTS broken; + +CREATE TABLE broken (time UInt64) ENGINE = MergeTree PARTITION BY toYYYYMMDD(toDate(time / 1000)) ORDER BY time; +INSERT INTO broken (time) VALUES (1647353101000), (1647353101001), (1647353101002), (1647353101003); +SELECT * FROM broken WHERE time>-1; + +DROP TABLE broken; diff --git a/parser/testdata/02232_partition_pruner_single_point/ast.json b/parser/testdata/02232_partition_pruner_single_point/ast.json new file mode 100644 index 000000000..729156c6e --- /dev/null +++ b/parser/testdata/02232_partition_pruner_single_point/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery lower_test (children 1)" + }, + { + "explain": " Identifier lower_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001164472, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/02232_partition_pruner_single_point/metadata.json b/parser/testdata/02232_partition_pruner_single_point/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02232_partition_pruner_single_point/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02232_partition_pruner_single_point/query.sql b/parser/testdata/02232_partition_pruner_single_point/query.sql new file mode 100644 index 000000000..0400d0e1b --- /dev/null +++ b/parser/testdata/02232_partition_pruner_single_point/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS lower_test; + +CREATE TABLE lower_test ( + a Int32, + b String +) ENGINE=MergeTree +PARTITION BY b +ORDER BY a; + +INSERT INTO lower_test (a,b) VALUES (1,'A'),(2,'B'),(3,'C'); + +SELECT a FROM lower_test WHERE lower(b) IN ('a','b') order by a; + +DROP TABLE lower_test; diff --git a/parser/testdata/02233_interpolate_1/ast.json b/parser/testdata/02233_interpolate_1/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02233_interpolate_1/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02233_interpolate_1/metadata.json b/parser/testdata/02233_interpolate_1/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02233_interpolate_1/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02233_interpolate_1/query.sql b/parser/testdata/02233_interpolate_1/query.sql new file mode 100644 index 000000000..453de421f --- /dev/null +++ b/parser/testdata/02233_interpolate_1/query.sql @@ -0,0 +1,79 @@ +# Test WITH FILL without INTERPOLATE +SELECT n, source, inter FROM ( + SELECT toFloat32(number % 10) AS n, 'original' AS source, number as inter FROM numbers(10) WHERE number % 3 = 1 +) ORDER BY n WITH FILL FROM 0 TO 11.51 STEP 0.5; + +# Test INTERPOLATE with const +SELECT n, source, inter FROM ( + SELECT toFloat32(number % 10) AS n, 'original' AS source, number as inter FROM numbers(10) WHERE number % 3 = 1 +) ORDER BY n WITH FILL FROM 0 TO 11.51 STEP 0.5 INTERPOLATE (inter AS 42); + +# Test INTERPOLATE with field value +SELECT n, source, inter FROM ( + SELECT toFloat32(number % 10) AS n, 'original' AS source, number as inter FROM numbers(10) WHERE number % 3 = 1 +) ORDER BY n WITH FILL FROM 0 TO 11.51 STEP 0.5 INTERPOLATE (inter AS inter); + +# Test INTERPOLATE with expression +SELECT n, source, inter FROM ( + SELECT toFloat32(number % 10) AS n, 'original' AS source, number as inter FROM numbers(10) WHERE number % 3 = 1 +) ORDER BY n WITH FILL FROM 0 TO 11.51 STEP 0.5 INTERPOLATE (inter AS inter + 1); + +# Test INTERPOLATE with incompatible const - should produce error +SELECT n, source, inter FROM ( + SELECT toFloat32(number % 10) AS n, 'original' AS source, number as inter FROM numbers(10) WHERE number % 3 = 1 +) ORDER BY n WITH FILL FROM 0 TO 11.51 STEP 0.5 INTERPOLATE (inter AS 'inter'); -- { serverError CANNOT_PARSE_TEXT } + +# Test INTERPOLATE with incompatible expression - should produce error +SELECT n, source, inter FROM ( + SELECT toFloat32(number % 10) AS n, 'original' AS source, number as inter FROM numbers(10) WHERE number % 3 = 1 +) ORDER BY n WITH FILL FROM 0 TO 11.51 STEP 0.5 INTERPOLATE (inter AS reverse(inter)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +# Test INTERPOLATE with column from WITH FILL expression - should produce error +SELECT n, source, inter FROM ( + SELECT toFloat32(number % 10) AS n, 'original' AS source, number as inter FROM numbers(10) WHERE number % 3 = 1 +) ORDER BY n WITH FILL FROM 0 TO 11.51 STEP 0.5 INTERPOLATE (n AS n); -- { serverError INVALID_WITH_FILL_EXPRESSION } + +# Test INTERPOLATE with inconsistent column - should produce error +SELECT n, source, inter FROM ( + SELECT toFloat32(number % 10) AS n, 'original' AS source, number as inter FROM numbers(10) WHERE number % 3 = 1 +) ORDER BY n WITH FILL FROM 0 TO 11.51 STEP 0.5 INTERPOLATE (inter AS source); -- { serverError CANNOT_PARSE_TEXT, 32 } + +# Test INTERPOLATE with aliased column +SELECT n, source, inter + 1 AS inter_p FROM ( + SELECT toFloat32(number % 10) AS n, 'original' AS source, number AS inter FROM numbers(10) WHERE (number % 3) = 1 +) ORDER BY n ASC WITH FILL FROM 0 TO 11.51 STEP 0.5 INTERPOLATE ( inter_p AS inter_p + 1 ); + +# Test INTERPOLATE with column not present in select +SELECT source, inter FROM ( + SELECT toFloat32(number % 10) AS n, 'original' AS source, number AS inter, number + 1 AS inter2 FROM numbers(10) WHERE (number % 3) = 1 +) ORDER BY n ASC WITH FILL FROM 0 TO 11.51 STEP 0.5 INTERPOLATE ( inter AS inter2 + inter ); + +# Test INTERPOLATE in sub-select +SELECT n, source, inter FROM ( + SELECT n, source, inter, inter2 FROM ( + SELECT toFloat32(number % 10) AS n, 'original' AS source, number AS inter, number + 1 AS inter2 FROM numbers(10) WHERE (number % 3) = 1 + ) ORDER BY n ASC WITH FILL FROM 0 TO 11.51 STEP 0.5 INTERPOLATE ( inter AS inter + inter2 ) +); + +# Test INTERPOLATE with aggregates +SELECT n, any(source), sum(inter) AS inter_s FROM ( + SELECT toFloat32(number % 10) AS n, 'original' AS source, number AS inter FROM numbers(10) WHERE (number % 3) = 1 +) GROUP BY n +ORDER BY n ASC WITH FILL FROM 0 TO 11.51 STEP 0.5 INTERPOLATE ( inter_s AS inter_s + 1 ); + +# Test INTERPOLATE with Nullable in result +SELECT n, source, inter + NULL AS inter_p FROM ( + SELECT toFloat32(number % 10) AS n, 'original' AS source, number AS inter FROM numbers(10) WHERE (number % 3) = 1 +) ORDER BY n ASC WITH FILL FROM 0 TO 11.51 STEP 0.5 INTERPOLATE ( inter_p AS inter_p + 1 ); + +# Test INTERPOLATE with Nullable in source +SELECT n, source, inter AS inter_p FROM ( + SELECT toFloat32(number % 10) AS n, 'original' AS source, number + NULL AS inter FROM numbers(10) WHERE (number % 3) = 1 +) ORDER BY n ASC WITH FILL FROM 0 TO 11.51 STEP 0.5 INTERPOLATE ( inter_p AS inter_p + 1 ); + +# Test INTERPOLATE for MergeTree +DROP TABLE IF EXISTS t_inter_02233; +CREATE TABLE t_inter_02233 (n Int32) ENGINE = MergeTree ORDER BY n; +INSERT INTO t_inter_02233 VALUES (1),(3),(3),(6),(6),(6); +SELECT n, count() AS m FROM t_inter_02233 GROUP BY n ORDER BY n WITH FILL INTERPOLATE ( m AS m + 1 ); +DROP TABLE IF EXISTS t_inter_02233; diff --git a/parser/testdata/02233_optimize_aggregation_in_order_prefix/ast.json b/parser/testdata/02233_optimize_aggregation_in_order_prefix/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02233_optimize_aggregation_in_order_prefix/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02233_optimize_aggregation_in_order_prefix/metadata.json b/parser/testdata/02233_optimize_aggregation_in_order_prefix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02233_optimize_aggregation_in_order_prefix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02233_optimize_aggregation_in_order_prefix/query.sql b/parser/testdata/02233_optimize_aggregation_in_order_prefix/query.sql new file mode 100644 index 000000000..48af5ae00 --- /dev/null +++ b/parser/testdata/02233_optimize_aggregation_in_order_prefix/query.sql @@ -0,0 +1,25 @@ +-- Tags: no-object-storage + +SET merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.0; + +drop table if exists data_02233; +create table data_02233 (parent_key Int, child_key Int, value Int) engine=MergeTree() order by parent_key; + +-- before inserting data, it may produce empty header +SELECT child_key, parent_key, child_key FROM data_02233 GROUP BY parent_key, child_key, child_key ORDER BY parent_key ASC NULLS LAST SETTINGS max_threads = 1, optimize_aggregation_in_order = 1; +SELECT child_key, parent_key, child_key FROM data_02233 GROUP BY parent_key, child_key, child_key WITH TOTALS ORDER BY parent_key ASC NULLS LAST SETTINGS max_threads = 1, optimize_aggregation_in_order = 1; + +-- { echoOn } +insert into data_02233 select number%10, number%3, number from numbers(100); +explain pipeline select parent_key, child_key, count() from data_02233 group by parent_key, child_key with totals order by parent_key, child_key settings max_threads=1, optimize_aggregation_in_order=1, read_in_order_two_level_merge_threshold=1; +explain pipeline select parent_key, child_key, count() from data_02233 group by parent_key, child_key with totals order by parent_key, child_key settings max_threads=1, optimize_aggregation_in_order=0, read_in_order_two_level_merge_threshold=1; +select parent_key, child_key, count() from data_02233 group by parent_key, child_key with totals order by parent_key, child_key settings max_threads=1, optimize_aggregation_in_order=1; +select parent_key, child_key, count() from data_02233 group by parent_key, child_key with totals order by parent_key, child_key settings max_threads=1, optimize_aggregation_in_order=1, max_block_size=1; +select parent_key, child_key, count() from data_02233 group by parent_key, child_key with totals order by parent_key, child_key settings max_threads=1, optimize_aggregation_in_order=0; + +-- fuzzer +SELECT child_key, parent_key, child_key FROM data_02233 GROUP BY parent_key, child_key, child_key ORDER BY child_key, parent_key ASC NULLS LAST SETTINGS max_threads = 1, optimize_aggregation_in_order = 1; +SELECT child_key, parent_key, child_key FROM data_02233 GROUP BY parent_key, child_key, child_key WITH TOTALS ORDER BY child_key, parent_key ASC NULLS LAST SETTINGS max_threads = 1, optimize_aggregation_in_order = 1; + +-- { echoOff } +drop table data_02233; diff --git a/parser/testdata/02233_optimize_aggregation_in_order_prefix_with_merge/ast.json b/parser/testdata/02233_optimize_aggregation_in_order_prefix_with_merge/ast.json new file mode 100644 index 000000000..37478d7f0 --- /dev/null +++ b/parser/testdata/02233_optimize_aggregation_in_order_prefix_with_merge/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery data_02233 (children 1)" + }, + { + "explain": " Identifier data_02233" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001092143, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/02233_optimize_aggregation_in_order_prefix_with_merge/metadata.json b/parser/testdata/02233_optimize_aggregation_in_order_prefix_with_merge/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02233_optimize_aggregation_in_order_prefix_with_merge/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02233_optimize_aggregation_in_order_prefix_with_merge/query.sql b/parser/testdata/02233_optimize_aggregation_in_order_prefix_with_merge/query.sql new file mode 100644 index 000000000..19812fe73 --- /dev/null +++ b/parser/testdata/02233_optimize_aggregation_in_order_prefix_with_merge/query.sql @@ -0,0 +1,11 @@ +drop table if exists data_02233; +create table data_02233 (partition Int, parent_key Int, child_key Int, value Int) engine=MergeTree() partition by partition order by parent_key; + +insert into data_02233 values (1, 10, 100, 1000)(1, 20, 200, 2000); +insert into data_02233 values (2, 10, 100, 1000)(2, 20, 200, 2000); + +-- fuzzer +SELECT child_key, parent_key, child_key FROM data_02233 GROUP BY parent_key, child_key, child_key ORDER BY child_key, parent_key ASC NULLS LAST SETTINGS max_threads = 1, optimize_aggregation_in_order = 1; +SELECT child_key, parent_key, child_key FROM data_02233 GROUP BY parent_key, child_key, child_key WITH TOTALS ORDER BY child_key, parent_key ASC NULLS LAST SETTINGS max_threads = 1, optimize_aggregation_in_order = 1; + +drop table data_02233; diff --git a/parser/testdata/02233_set_enable_with_statement_cte_perf/ast.json b/parser/testdata/02233_set_enable_with_statement_cte_perf/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02233_set_enable_with_statement_cte_perf/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02233_set_enable_with_statement_cte_perf/metadata.json b/parser/testdata/02233_set_enable_with_statement_cte_perf/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02233_set_enable_with_statement_cte_perf/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02233_set_enable_with_statement_cte_perf/query.sql b/parser/testdata/02233_set_enable_with_statement_cte_perf/query.sql new file mode 100644 index 000000000..c7b2d2b81 --- /dev/null +++ b/parser/testdata/02233_set_enable_with_statement_cte_perf/query.sql @@ -0,0 +1,26 @@ +-- Tags: no-parallel-replicas +-- no-parallel-replicas: read_rows can differ if query execution was cancelled for remote replica(s) + +DROP TABLE IF EXISTS ev; +DROP TABLE IF EXISTS idx; + +CREATE TABLE ev (a Int32, b Int32) Engine=MergeTree() ORDER BY a SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +CREATE TABLE idx (a Int32) Engine=MergeTree() ORDER BY a SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +INSERT INTO ev SELECT number, number FROM numbers(10000000); +INSERT INTO idx SELECT number * 5 FROM numbers(1000); + +-- test_enable_global_with_statement_performance_1 +WITH 'test' AS u SELECT count() FROM ev WHERE a IN (SELECT a FROM idx) SETTINGS enable_global_with_statement = 1; + +-- test_enable_global_with_statement_performance_2 +SELECT count() FROM ev WHERE a IN (SELECT a FROM idx) SETTINGS enable_global_with_statement = 1; + +-- test_enable_global_with_statement_performance_3 +WITH 'test' AS u SELECT count() FROM ev WHERE a IN (SELECT a FROM idx) SETTINGS enable_global_with_statement = 0; + +SYSTEM FLUSH LOGS query_log; + +SELECT count(read_rows) FROM (SELECT read_rows FROM system.query_log WHERE current_database=currentDatabase() AND type='QueryFinish' AND query LIKE '-- test_enable_global_with_statement_performance%' ORDER BY initial_query_start_time_microseconds DESC LIMIT 3) GROUP BY read_rows; + +DROP TABLE IF EXISTS ev; +DROP TABLE IF EXISTS idx; diff --git a/parser/testdata/02233_with_total_empty_chunk/ast.json b/parser/testdata/02233_with_total_empty_chunk/ast.json new file mode 100644 index 000000000..faada4c03 --- /dev/null +++ b/parser/testdata/02233_with_total_empty_chunk/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00131555, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02233_with_total_empty_chunk/metadata.json b/parser/testdata/02233_with_total_empty_chunk/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02233_with_total_empty_chunk/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02233_with_total_empty_chunk/query.sql b/parser/testdata/02233_with_total_empty_chunk/query.sql new file mode 100644 index 000000000..c70b35df4 --- /dev/null +++ b/parser/testdata/02233_with_total_empty_chunk/query.sql @@ -0,0 +1,3 @@ +SET enable_analyzer = 1; + +SELECT (NULL, NULL, NULL, NULL, NULL, NULL, NULL) FROM numbers(0) GROUP BY number WITH TOTALS HAVING sum(number) <= arrayJoin([]) -- { serverError ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER }; diff --git a/parser/testdata/02234_cast_to_ip_address/ast.json b/parser/testdata/02234_cast_to_ip_address/ast.json new file mode 100644 index 000000000..dafc85618 --- /dev/null +++ b/parser/testdata/02234_cast_to_ip_address/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'IPv4 functions'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001201499, + "rows_read": 5, + "bytes_read": 185 + } +} diff --git a/parser/testdata/02234_cast_to_ip_address/metadata.json b/parser/testdata/02234_cast_to_ip_address/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02234_cast_to_ip_address/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02234_cast_to_ip_address/query.sql b/parser/testdata/02234_cast_to_ip_address/query.sql new file mode 100644 index 000000000..82f8f32bb --- /dev/null +++ b/parser/testdata/02234_cast_to_ip_address/query.sql @@ -0,0 +1,94 @@ +SELECT 'IPv4 functions'; + +SELECT IPv4StringToNum('test'); --{serverError CANNOT_PARSE_IPV4} +SELECT IPv4StringToNumOrDefault('test'); +SELECT IPv4StringToNumOrNull('test'); + +SELECT IPv4StringToNum('127.0.0.1'); +SELECT IPv4StringToNumOrDefault('127.0.0.1'); +SELECT IPv4StringToNumOrNull('127.0.0.1'); + +SELECT '--'; + +SELECT toIPv4('test'); --{serverError CANNOT_PARSE_IPV4} +SELECT toIPv4OrDefault('test'); +SELECT toIPv4OrNull('test'); + +SELECT toIPv4('127.0.0.1'); +SELECT toIPv4OrDefault('127.0.0.1'); +SELECT toIPv4OrNull('127.0.0.1'); + +SELECT '--'; + +SELECT toIPv4(toIPv6('::ffff:1.2.3.4')); +SELECT toIPv4(toIPv6('::afff:1.2.3.4')); --{serverError CANNOT_CONVERT_TYPE} +SELECT toIPv4OrDefault(toIPv6('::ffff:1.2.3.4')); +SELECT toIPv4OrDefault(toIPv6('::afff:1.2.3.4')); + +SELECT '--'; + +SELECT cast('test' , 'IPv4'); --{serverError CANNOT_PARSE_IPV4} +SELECT cast('127.0.0.1' , 'IPv4'); + +SELECT '--'; + +SET cast_ipv4_ipv6_default_on_conversion_error = 1; + +SELECT IPv4StringToNum('test'); +SELECT toIPv4('test'); +SELECT IPv4StringToNum(''); +SELECT toIPv4(''); +SELECT cast('test' , 'IPv4'); +SELECT cast('' , 'IPv4'); + +SET cast_ipv4_ipv6_default_on_conversion_error = 0; + +SELECT 'IPv6 functions'; + +SELECT IPv6StringToNum('test'); --{serverError CANNOT_PARSE_IPV6} +SELECT IPv6StringToNumOrDefault('test'); +SELECT IPv6StringToNumOrNull('test'); + +SELECT IPv6StringToNum('::ffff:127.0.0.1'); +SELECT IPv6StringToNumOrDefault('::ffff:127.0.0.1'); +SELECT IPv6StringToNumOrNull('::ffff:127.0.0.1'); + +SELECT '--'; + +SELECT toIPv6('test'); --{serverError CANNOT_PARSE_IPV6} +SELECT toIPv6OrDefault('test'); +SELECT toIPv6OrNull('test'); + +SELECT toIPv6('::ffff:127.0.0.1'); +SELECT toIPv6OrDefault('::ffff:127.0.0.1'); +SELECT toIPv6OrNull('::ffff:127.0.0.1'); + +SELECT toIPv6('::.1.2.3'); --{serverError CANNOT_PARSE_IPV6} +SELECT toIPv6OrDefault('::.1.2.3'); +SELECT toIPv6OrNull('::.1.2.3'); + +SELECT count() FROM numbers_mt(20000000) WHERE NOT ignore(toIPv6OrZero(randomString(8))); + +SELECT '--'; + +SELECT cast('test' , 'IPv6'); -- { serverError CANNOT_PARSE_IPV6 } +SELECT cast('::ffff:127.0.0.1', 'IPv6'); + +SELECT '--'; + +SET cast_ipv4_ipv6_default_on_conversion_error = 1; + +SELECT IPv6StringToNum('test'); +SELECT toIPv6('test'); +SELECT IPv6StringToNum(''); +SELECT toIPv6(''); +SELECT cast('test' , 'IPv6'); +SELECT cast('' , 'IPv6'); + +SELECT '--'; + +SET cast_ipv4_ipv6_default_on_conversion_error = 0; + +SELECT toFixedString('::1', 3) as value, cast(value, 'IPv6'), toIPv6(value); +SELECT toFixedString('', 16) as value, cast(value, 'IPv6'); +SELECT toFixedString('', 16) as value, toIPv6(value); diff --git a/parser/testdata/02234_column_function_short_circuit/ast.json b/parser/testdata/02234_column_function_short_circuit/ast.json new file mode 100644 index 000000000..96103835a --- /dev/null +++ b/parser/testdata/02234_column_function_short_circuit/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery dict_table (children 1)" + }, + { + "explain": " Identifier dict_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001207623, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/02234_column_function_short_circuit/metadata.json b/parser/testdata/02234_column_function_short_circuit/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02234_column_function_short_circuit/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02234_column_function_short_circuit/query.sql b/parser/testdata/02234_column_function_short_circuit/query.sql new file mode 100644 index 000000000..a6a368410 --- /dev/null +++ b/parser/testdata/02234_column_function_short_circuit/query.sql @@ -0,0 +1,43 @@ +DROP TABLE IF EXISTS dict_table; +DROP TABLE IF EXISTS data_table; +DROP DICTIONARY IF EXISTS dict; + +create table dict_table +( + `strField` String, + `dateField` Date, + `float64Field` Float64 +) Engine Log(); + +insert into dict_table values ('SomeStr', toDate('2021-01-01'), 1.1), ('SomeStr2', toDate('2021-01-02'), 2.2); + +create dictionary dict +( + `strField` String, + `dateField` Date, + `float64Field` Float64 +) +PRIMARY KEY strField, dateField +SOURCE (CLICKHOUSE(TABLE 'dict_table')) +LIFETIME(MIN 300 MAX 360) +LAYOUT (COMPLEX_KEY_HASHED()); + +create table data_table +( + `float64Field1` Float64, + `float64Field2` Float64, + `strField1` String, + `strField2` String +) Engine Log(); + +insert into data_table values (1.1, 1.2, 'SomeStr', 'SomeStr'), (2.1, 2.2, 'SomeStr2', 'SomeStr2'); + +select round( + float64Field1 * if(strField1 != '', 1.0, dictGetFloat64('dict', 'float64Field', (strField1, toDate('2021-01-01')))) + + if(strField2 != '', 1.0, dictGetFloat64('dict', 'float64Field', (strField2, toDate('2021-01-01')))) * if(isFinite(float64Field2), float64Field2, 0), + 2) +from data_table; + +DROP DICTIONARY dict; +DROP TABLE dict_table; +DROP TABLE data_table; diff --git a/parser/testdata/02234_position_case_insensitive_utf8/ast.json b/parser/testdata/02234_position_case_insensitive_utf8/ast.json new file mode 100644 index 000000000..1a0e20584 --- /dev/null +++ b/parser/testdata/02234_position_case_insensitive_utf8/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function positionCaseInsensitiveUTF8 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'Hello'" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '%�%'" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001887257, + "rows_read": 10, + "bytes_read": 397 + } +} diff --git a/parser/testdata/02234_position_case_insensitive_utf8/metadata.json b/parser/testdata/02234_position_case_insensitive_utf8/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02234_position_case_insensitive_utf8/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02234_position_case_insensitive_utf8/query.sql b/parser/testdata/02234_position_case_insensitive_utf8/query.sql new file mode 100644 index 000000000..d77b13e7f --- /dev/null +++ b/parser/testdata/02234_position_case_insensitive_utf8/query.sql @@ -0,0 +1,2 @@ +SELECT positionCaseInsensitiveUTF8('Hello', materialize('%\xF0%')); +SELECT DISTINCT positionCaseInsensitiveUTF8(materialize('Hello'), '%\xF0%') FROM numbers(1000); diff --git a/parser/testdata/02235_add_part_offset_virtual_column/ast.json b/parser/testdata/02235_add_part_offset_virtual_column/ast.json new file mode 100644 index 000000000..14b6e0504 --- /dev/null +++ b/parser/testdata/02235_add_part_offset_virtual_column/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_1 (children 1)" + }, + { + "explain": " Identifier t_1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00125805, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/02235_add_part_offset_virtual_column/metadata.json b/parser/testdata/02235_add_part_offset_virtual_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02235_add_part_offset_virtual_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02235_add_part_offset_virtual_column/query.sql b/parser/testdata/02235_add_part_offset_virtual_column/query.sql new file mode 100644 index 000000000..b23d2d251 --- /dev/null +++ b/parser/testdata/02235_add_part_offset_virtual_column/query.sql @@ -0,0 +1,59 @@ +DROP TABLE IF EXISTS t_1; +DROP TABLE IF EXISTS t_random_1; + +CREATE TABLE t_1 +( + `order_0` UInt64, + `ordinary_1` UInt32, + `p_time` Date, + `computed` ALIAS 'computed_' || cast(`p_time` AS String), + `granule` MATERIALIZED cast(`order_0` / 0x2000 AS UInt64) % 3, + INDEX `index_granule` `granule` TYPE minmax GRANULARITY 1 +) +ENGINE = MergeTree +PARTITION BY toYYYYMM(p_time) +ORDER BY order_0 +SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +CREATE TABLE t_random_1 +( + `ordinary_1` UInt32 +) +ENGINE = GenerateRandom(1, 5, 3); + +SET optimize_trivial_insert_select = 1; +INSERT INTO t_1 select rowNumberInAllBlocks(), *, '1984-01-01' from t_random_1 limit 1000000; + +OPTIMIZE TABLE t_1 FINAL; + +ALTER TABLE t_1 ADD COLUMN foo String DEFAULT 'foo'; + +SELECT COUNT(DISTINCT(_part)) FROM t_1; + +SELECT min(_part_offset), max(_part_offset) FROM t_1; +SELECT count(*) FROM t_1 WHERE _part_offset != order_0; +SELECT count(*) FROM t_1 WHERE order_0 IN (SELECT toUInt64(rand64()%1000) FROM system.numbers limit 100) AND _part_offset != order_0; +SELECT count(*) FROM t_1 PREWHERE ordinary_1 > 5000 WHERE _part_offset != order_0; +SELECT order_0, _part_offset, _part FROM t_1 ORDER BY order_0 LIMIT 3; +SELECT order_0, _part_offset, _part FROM t_1 ORDER BY order_0 DESC LIMIT 3; +SELECT order_0, _part_offset, _part FROM t_1 WHERE order_0 <= 1 OR (order_0 BETWEEN 10000 AND 10002) OR order_0 >= 999998 ORDER BY order_0; +SELECT order_0, _part_offset, _part FROM t_1 WHERE order_0 <= 1 OR (order_0 BETWEEN 10000 AND 10002) OR order_0 >= 999998 ORDER BY order_0 DESC; +SELECT order_0, _part_offset, computed FROM t_1 ORDER BY order_0, _part_offset, computed LIMIT 3; +SELECT order_0, _part_offset, computed FROM t_1 ORDER BY order_0 DESC, _part_offset DESC, computed DESC LIMIT 3; +SELECT order_0, _part_offset, _part FROM t_1 WHERE order_0 <= 1 OR order_0 >= 999998 ORDER BY order_0 LIMIT 3; +SELECT _part_offset FROM t_1 ORDER BY order_0 LIMIT 3; +SELECT _part_offset, foo FROM t_1 ORDER BY order_0 LIMIT 3; + +SELECT 'SOME GRANULES FILTERED OUT'; +SELECT count(*), sum(_part_offset), sum(order_0) from t_1 where granule == 0; +SELECT count(*), sum(_part_offset), sum(order_0) from t_1 where granule == 0 AND _part_offset < 100000; +SELECT count(*), sum(_part_offset), sum(order_0) from t_1 where granule == 0 AND _part_offset >= 100000; +SELECT _part_offset FROM t_1 where granule == 0 AND _part_offset >= 100000 ORDER BY order_0 LIMIT 3; +SELECT _part_offset, foo FROM t_1 where granule == 0 AND _part_offset >= 100000 ORDER BY order_0 LIMIT 3; + +SELECT 'PREWHERE'; +SELECT count(*), sum(_part_offset), sum(order_0) from t_1 prewhere granule == 0 where _part_offset >= 100000; +SELECT count(*), sum(_part_offset), sum(order_0) from t_1 prewhere _part != '' where granule == 0; +SELECT count(*), sum(_part_offset), sum(order_0) from t_1 prewhere _part_offset > 100000 where granule == 0; +SELECT _part_offset FROM t_1 PREWHERE order_0 % 10000 == 42 ORDER BY order_0 LIMIT 3; +SELECT _part_offset, foo FROM t_1 PREWHERE order_0 % 10000 == 42 ORDER BY order_0 LIMIT 3; diff --git a/parser/testdata/02235_check_table_sparse_serialization/ast.json b/parser/testdata/02235_check_table_sparse_serialization/ast.json new file mode 100644 index 000000000..4297c35f4 --- /dev/null +++ b/parser/testdata/02235_check_table_sparse_serialization/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_sparse_02235 (children 1)" + }, + { + "explain": " Identifier t_sparse_02235" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001095996, + "rows_read": 2, + "bytes_read": 80 + } +} diff --git a/parser/testdata/02235_check_table_sparse_serialization/metadata.json b/parser/testdata/02235_check_table_sparse_serialization/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02235_check_table_sparse_serialization/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02235_check_table_sparse_serialization/query.sql b/parser/testdata/02235_check_table_sparse_serialization/query.sql new file mode 100644 index 000000000..625be63e0 --- /dev/null +++ b/parser/testdata/02235_check_table_sparse_serialization/query.sql @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS t_sparse_02235; + +CREATE TABLE t_sparse_02235 (a UInt8) ENGINE = MergeTree ORDER BY tuple() +SETTINGS ratio_of_defaults_for_sparse_serialization = 0.9; + +SYSTEM STOP MERGES t_sparse_02235; + +INSERT INTO t_sparse_02235 SELECT 1 FROM numbers(1000); +INSERT INTO t_sparse_02235 SELECT 0 FROM numbers(1000); + +SELECT name, column, serialization_kind FROM system.parts_columns +WHERE database = currentDatabase() AND table = 't_sparse_02235' +ORDER BY name, column; + +CHECK TABLE t_sparse_02235 SETTINGS check_query_single_value_result = 0, max_threads = 1; + +DROP TABLE t_sparse_02235; diff --git a/parser/testdata/02236_explain_pipeline_join/ast.json b/parser/testdata/02236_explain_pipeline_join/ast.json new file mode 100644 index 000000000..db024258a --- /dev/null +++ b/parser/testdata/02236_explain_pipeline_join/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001323472, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02236_explain_pipeline_join/metadata.json b/parser/testdata/02236_explain_pipeline_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02236_explain_pipeline_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02236_explain_pipeline_join/query.sql b/parser/testdata/02236_explain_pipeline_join/query.sql new file mode 100644 index 000000000..46f030800 --- /dev/null +++ b/parser/testdata/02236_explain_pipeline_join/query.sql @@ -0,0 +1,16 @@ +SET query_plan_join_swap_table = false; +SET enable_analyzer = 1; +SET enable_parallel_replicas=0; +SET query_plan_optimize_join_order_limit = 0; + +EXPLAIN PIPELINE +SELECT * FROM +( + SELECT * FROM system.numbers LIMIT 100000 +) t1 +ALL LEFT JOIN +( + SELECT * FROM system.numbers LIMIT 100000 +) t2 +USING number +SETTINGS max_threads=16; diff --git a/parser/testdata/02236_json_each_row_empty_map_schema_inference/ast.json b/parser/testdata/02236_json_each_row_empty_map_schema_inference/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02236_json_each_row_empty_map_schema_inference/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02236_json_each_row_empty_map_schema_inference/metadata.json b/parser/testdata/02236_json_each_row_empty_map_schema_inference/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02236_json_each_row_empty_map_schema_inference/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02236_json_each_row_empty_map_schema_inference/query.sql b/parser/testdata/02236_json_each_row_empty_map_schema_inference/query.sql new file mode 100644 index 000000000..ba6cf584d --- /dev/null +++ b/parser/testdata/02236_json_each_row_empty_map_schema_inference/query.sql @@ -0,0 +1,4 @@ +-- Tags: no-fasttest +set input_format_json_try_infer_named_tuples_from_objects=0; +set input_format_json_read_objects_as_strings=0; +select * from format(JSONEachRow, '{"a" : {}}, {"a" : {"b" : 1}}') diff --git a/parser/testdata/02240_asof_join_biginteger/ast.json b/parser/testdata/02240_asof_join_biginteger/ast.json new file mode 100644 index 000000000..1344a9963 --- /dev/null +++ b/parser/testdata/02240_asof_join_biginteger/ast.json @@ -0,0 +1,118 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 2)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (alias t1) (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_0 (alias k)" + }, + { + "explain": " Function toInt128 (alias v) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '18446744073709551617'" + }, + { + "explain": " TablesInSelectQueryElement (children 2)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (alias t2) (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_0 (alias k)" + }, + { + "explain": " Function toInt128 (alias v) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '18446744073709551616'" + }, + { + "explain": " TableJoin (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier k" + }, + { + "explain": " Identifier v" + } + ], + + "rows": 32, + + "statistics": + { + "elapsed": 0.001511046, + "rows_read": 32, + "bytes_read": 1376 + } +} diff --git a/parser/testdata/02240_asof_join_biginteger/metadata.json b/parser/testdata/02240_asof_join_biginteger/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02240_asof_join_biginteger/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02240_asof_join_biginteger/query.sql b/parser/testdata/02240_asof_join_biginteger/query.sql new file mode 100644 index 000000000..a5c1faae4 --- /dev/null +++ b/parser/testdata/02240_asof_join_biginteger/query.sql @@ -0,0 +1,13 @@ +select * from (select 0 as k, toInt128('18446744073709551617') as v) t1 asof join (select 0 as k, toInt128('18446744073709551616') as v) t2 using(k, v); +select * from (select 0 as k, toInt256('340282366920938463463374607431768211457') as v) t1 asof join (select 0 as k, toInt256('340282366920938463463374607431768211456') as v) t2 using(k, v); + +select * from (select 0 as k, toUInt128('18446744073709551617') as v) t1 asof join (select 0 as k, toUInt128('18446744073709551616') as v) t2 using(k, v); +select * from (select 0 as k, toUInt256('340282366920938463463374607431768211457') as v) t1 asof join (select 0 as k, toUInt256('340282366920938463463374607431768211456') as v) t2 using(k, v); + +SET join_algorithm = 'full_sorting_merge'; + +select * from (select 0 as k, toInt128('18446744073709551617') as v) t1 asof join (select 0 as k, toInt128('18446744073709551616') as v) t2 using(k, v); +select * from (select 0 as k, toInt256('340282366920938463463374607431768211457') as v) t1 asof join (select 0 as k, toInt256('340282366920938463463374607431768211456') as v) t2 using(k, v); + +select * from (select 0 as k, toUInt128('18446744073709551617') as v) t1 asof join (select 0 as k, toUInt128('18446744073709551616') as v) t2 using(k, v); +select * from (select 0 as k, toUInt256('340282366920938463463374607431768211457') as v) t1 asof join (select 0 as k, toUInt256('340282366920938463463374607431768211456') as v) t2 using(k, v); diff --git a/parser/testdata/02240_filesystem_cache_bypass_cache_threshold/ast.json b/parser/testdata/02240_filesystem_cache_bypass_cache_threshold/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02240_filesystem_cache_bypass_cache_threshold/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02240_filesystem_cache_bypass_cache_threshold/metadata.json b/parser/testdata/02240_filesystem_cache_bypass_cache_threshold/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02240_filesystem_cache_bypass_cache_threshold/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02240_filesystem_cache_bypass_cache_threshold/query.sql b/parser/testdata/02240_filesystem_cache_bypass_cache_threshold/query.sql new file mode 100644 index 000000000..e06c6730e --- /dev/null +++ b/parser/testdata/02240_filesystem_cache_bypass_cache_threshold/query.sql @@ -0,0 +1,34 @@ +-- Tags: no-parallel, no-fasttest, no-object-storage, no-random-settings + +-- { echo } + +SYSTEM DROP FILESYSTEM CACHE; +SET enable_filesystem_cache_on_write_operations=0; + +DROP TABLE IF EXISTS test; +CREATE TABLE test (key UInt32, value String) +Engine=MergeTree() +ORDER BY key +SETTINGS min_bytes_for_wide_part = 10485760, + compress_marks=false, + compress_primary_key=false, + serialization_info_version='basic', + disk = disk( + type = cache, + name = '02240_bypass_cache_threshold', + max_size = '128Mi', + path = 'filesystem_cache_bypass_cache_threshold/', + enable_bypass_cache_with_threshold = 1, + bypass_cache_threshold = 100, + disk = 's3_disk'); + +INSERT INTO test SELECT number, toString(number) FROM numbers(100); + +SELECT * FROM test FORMAT Null; +SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache WHERE cache_name = '02240_bypass_cache_threshold' ORDER BY file_segment_range_end, size; +SYSTEM DROP FILESYSTEM CACHE; +SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache WHERE cache_name = '02240_bypass_cache_threshold'; +SELECT * FROM test FORMAT Null; +SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache WHERE cache_name = '02240_bypass_cache_threshold'; +SYSTEM DROP FILESYSTEM CACHE; +SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache WHERE cache_name = '02240_bypass_cache_threshold'; diff --git a/parser/testdata/02240_filesystem_query_cache/ast.json b/parser/testdata/02240_filesystem_query_cache/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02240_filesystem_query_cache/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02240_filesystem_query_cache/metadata.json b/parser/testdata/02240_filesystem_query_cache/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02240_filesystem_query_cache/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02240_filesystem_query_cache/query.sql b/parser/testdata/02240_filesystem_query_cache/query.sql new file mode 100644 index 000000000..ed154f450 --- /dev/null +++ b/parser/testdata/02240_filesystem_query_cache/query.sql @@ -0,0 +1,36 @@ +-- Tags: no-parallel, no-fasttest, no-object-storage, no-random-settings + +-- { echo } + +SYSTEM DROP FILESYSTEM CACHE; +SET enable_filesystem_cache_on_write_operations=0; +SET skip_download_if_exceeds_query_cache=1; +SET filesystem_cache_max_download_size=128; + +DROP TABLE IF EXISTS test; +CREATE TABLE test (key UInt32, value String) +Engine=MergeTree() +ORDER BY key +SETTINGS min_bytes_for_wide_part = 10485760, + serialization_info_version = 'basic', + compress_marks=false, + compress_primary_key=false, + disk = disk( + type = cache, + name = '02240_filesystem_query_cache', + max_size = '128Mi', + path = 'filesystem_query_cache/', + cache_policy='LRU', + cache_on_write_operations= 1, + enable_filesystem_query_cache_limit = 1, + disk = 's3_disk'); +SYSTEM DROP FILESYSTEM CACHE; +INSERT INTO test SELECT number, toString(number) FROM numbers(100); +SELECT * FROM test FORMAT Null; +SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache WHERE cache_name = '02240_filesystem_query_cache' ORDER BY file_segment_range_end, size; +SYSTEM DROP FILESYSTEM CACHE; +SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache WHERE cache_name = '02240_filesystem_query_cache'; +SELECT * FROM test FORMAT Null; +SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache WHERE cache_name = '02240_filesystem_query_cache'; +SYSTEM DROP FILESYSTEM CACHE; +SELECT file_segment_range_begin, file_segment_range_end, size FROM system.filesystem_cache WHERE cache_name = '02240_filesystem_query_cache'; diff --git a/parser/testdata/02240_get_type_serialization_streams/ast.json b/parser/testdata/02240_get_type_serialization_streams/ast.json new file mode 100644 index 000000000..187f633bd --- /dev/null +++ b/parser/testdata/02240_get_type_serialization_streams/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function getTypeSerializationStreams (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'Array(Int8)'" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.00166598, + "rows_read": 7, + "bytes_read": 285 + } +} diff --git a/parser/testdata/02240_get_type_serialization_streams/metadata.json b/parser/testdata/02240_get_type_serialization_streams/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02240_get_type_serialization_streams/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02240_get_type_serialization_streams/query.sql b/parser/testdata/02240_get_type_serialization_streams/query.sql new file mode 100644 index 000000000..72a66269e --- /dev/null +++ b/parser/testdata/02240_get_type_serialization_streams/query.sql @@ -0,0 +1,8 @@ +select getTypeSerializationStreams('Array(Int8)'); +select getTypeSerializationStreams('Map(String, Int64)'); +select getTypeSerializationStreams('Tuple(String, Int64, Float64)'); +select getTypeSerializationStreams('LowCardinality(String)'); +select getTypeSerializationStreams('Nullable(String)'); +select getTypeSerializationStreams([1,2,3]); +select getTypeSerializationStreams(map('a', 1, 'b', 2)); +select getTypeSerializationStreams(tuple('a', 1, 'b', 2)); diff --git a/parser/testdata/02241_array_first_last_or_null/ast.json b/parser/testdata/02241_array_first_last_or_null/ast.json new file mode 100644 index 000000000..cd588b558 --- /dev/null +++ b/parser/testdata/02241_array_first_last_or_null/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'ArrayFirst constant predicate'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001718208, + "rows_read": 5, + "bytes_read": 200 + } +} diff --git a/parser/testdata/02241_array_first_last_or_null/metadata.json b/parser/testdata/02241_array_first_last_or_null/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02241_array_first_last_or_null/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02241_array_first_last_or_null/query.sql b/parser/testdata/02241_array_first_last_or_null/query.sql new file mode 100644 index 000000000..aa8f0cdbf --- /dev/null +++ b/parser/testdata/02241_array_first_last_or_null/query.sql @@ -0,0 +1,29 @@ +SELECT 'ArrayFirst constant predicate'; +SELECT arrayFirstOrNull(x -> 1, emptyArrayUInt8()); +SELECT arrayFirstOrNull(x -> 0, emptyArrayUInt8()); +SELECT arrayFirstOrNull(x -> 1, [1, 2, 3]); +SELECT arrayFirstOrNull(x -> 0, [1, 2, 3]); + +SELECT 'ArrayFirst non constant predicate'; +SELECT arrayFirstOrNull(x -> x >= 2, emptyArrayUInt8()); +SELECT arrayFirstOrNull(x -> x >= 2, [1, 2, 3]); +SELECT arrayFirstOrNull(x -> x >= 2, materialize([1, 2, 3])); + +SELECT 'ArrayFirst with Null'; +SELECT arrayFirstOrNull((x,f) -> f, [1,2,3,NULL], [0,1,0,0]); +SELECT arrayFirstOrNull((x,f) -> f, [1,2,3,NULL], [0,0,0,1]); + +SELECT 'ArrayLast constant predicate'; +SELECT arrayLastOrNull(x -> 1, emptyArrayUInt8()); +SELECT arrayLastOrNull(x -> 0, emptyArrayUInt8()); +SELECT arrayLastOrNull(x -> 1, [1, 2, 3]); +SELECT arrayLastOrNull(x -> 0, [1, 2, 3]); + +SELECT 'ArrayLast non constant predicate'; +SELECT arrayLastOrNull(x -> x >= 2, emptyArrayUInt8()); +SELECT arrayLastOrNull(x -> x >= 2, [1, 2, 3]); +SELECT arrayLastOrNull(x -> x >= 2, materialize([1, 2, 3])); + +SELECT 'ArrayLast with Null'; +SELECT arrayLastOrNull((x,f) -> f, [1,2,3,NULL], [0,1,0,0]); +SELECT arrayLastOrNull((x,f) -> f, [1,2,3,NULL], [0,1,0,1]); \ No newline at end of file diff --git a/parser/testdata/02241_short_circuit_short_column/ast.json b/parser/testdata/02241_short_circuit_short_column/ast.json new file mode 100644 index 000000000..67ed5f7b1 --- /dev/null +++ b/parser/testdata/02241_short_circuit_short_column/ast.json @@ -0,0 +1,250 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function and (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_65536" + }, + { + "explain": " Literal UInt64_2147483647" + }, + { + "explain": " Function throwIf (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function and (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function and (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function and (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function and (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Literal UInt64_1048575" + }, + { + "explain": " Function throwIf (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function and (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function and (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Float64_0" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Function and (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function and (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function and (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_65536" + }, + { + "explain": " Literal UInt64_257" + }, + { + "explain": " Function and (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Float64_1.1754943508222875e-38" + }, + { + "explain": " Literal UInt64_1024" + }, + { + "explain": " Literal Int64_-2147483649" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Function and (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Function and (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function and (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function and (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_65536" + }, + { + "explain": " Literal UInt64_257" + }, + { + "explain": " Function and (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Float64_1.1754943508222875e-38" + }, + { + "explain": " Literal UInt64_1024" + }, + { + "explain": " Literal Int64_-1" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Literal UInt64_65535" + }, + { + "explain": " Literal Int64_-1" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Function and (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Function less (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 76, + + "statistics": + { + "elapsed": 0.001632289, + "rows_read": 76, + "bytes_read": 3454 + } +} diff --git a/parser/testdata/02241_short_circuit_short_column/metadata.json b/parser/testdata/02241_short_circuit_short_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02241_short_circuit_short_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02241_short_circuit_short_column/query.sql b/parser/testdata/02241_short_circuit_short_column/query.sql new file mode 100644 index 000000000..311307fe5 --- /dev/null +++ b/parser/testdata/02241_short_circuit_short_column/query.sql @@ -0,0 +1,2 @@ +SELECT 65536 AND 2147483647, throwIf((((1048575 AND throwIf((0. AND NULL) AND (((65536 AND 257) AND (1.1754943508222875e-38 AND 1024) AND -2147483649) AND NULL) AND (10 AND NULL)) AND (((65536 AND 257) AND (1.1754943508222875e-38 AND 1024) AND -1) AND NULL) AND 65535) AND -1) AND NULL) AND (NULL AND NULL), NULL < number) FROM numbers(10); +SELECT NULL AND throwIf((0 AND NULL) AND 2147483646 AND NULL AND NULL) AND -2147483649 AND (NULL AND NULL) AND NULL FROM system.numbers LIMIT 10; diff --git a/parser/testdata/02242_if_then_else_null_bug/ast.json b/parser/testdata/02242_if_then_else_null_bug/ast.json new file mode 100644 index 000000000..ba8949e74 --- /dev/null +++ b/parser/testdata/02242_if_then_else_null_bug/ast.json @@ -0,0 +1,85 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function if (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function greater (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Literal 'Nullable(Int64)'" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toInt32 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 21, + + "statistics": + { + "elapsed": 0.001173388, + "rows_read": 21, + "bytes_read": 832 + } +} diff --git a/parser/testdata/02242_if_then_else_null_bug/metadata.json b/parser/testdata/02242_if_then_else_null_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02242_if_then_else_null_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02242_if_then_else_null_bug/query.sql b/parser/testdata/02242_if_then_else_null_bug/query.sql new file mode 100644 index 000000000..47b0f38d3 --- /dev/null +++ b/parser/testdata/02242_if_then_else_null_bug/query.sql @@ -0,0 +1,4 @@ +SELECT if(materialize(1) > 0, CAST(NULL, 'Nullable(Int64)'), materialize(toInt32(1))); +SELECT if(materialize(1) > 0, materialize(toInt32(1)), CAST(NULL, 'Nullable(Int64)')); +SELECT if(materialize(1) > 0, CAST(NULL, 'Nullable(Decimal(18, 4))'), materialize(CAST(2, 'Nullable(Decimal(9, 4))'))); +SELECT if(materialize(1) > 0, materialize(CAST(2, 'Nullable(Decimal(9, 4))')), CAST(NULL, 'Nullable(Decimal(18, 4))')); diff --git a/parser/testdata/02242_join_rocksdb/ast.json b/parser/testdata/02242_join_rocksdb/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02242_join_rocksdb/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02242_join_rocksdb/metadata.json b/parser/testdata/02242_join_rocksdb/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02242_join_rocksdb/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02242_join_rocksdb/query.sql b/parser/testdata/02242_join_rocksdb/query.sql new file mode 100644 index 000000000..be8c4bd1d --- /dev/null +++ b/parser/testdata/02242_join_rocksdb/query.sql @@ -0,0 +1,74 @@ +-- Tags: use-rocksdb, no-parallel-replicas +-- no-parallel-replicas: Can't execute any of specified algorithms for specified strictness/kind and right storage type. (NOT_IMPLEMENTED) + +DROP TABLE IF EXISTS rdb; +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE TABLE rdb (key UInt32, value Array(UInt32), value2 String) ENGINE = EmbeddedRocksDB PRIMARY KEY (key); +INSERT INTO rdb + SELECT + toUInt32(sipHash64(number) % 10) as key, + [key, key+1] as value, + ('val2' || toString(key)) as value2 + FROM numbers_mt(10); + +CREATE TABLE t1 (k UInt32) ENGINE = TinyLog; +INSERT INTO t1 SELECT number as k from numbers_mt(10); + +CREATE TABLE t2 (k UInt16) ENGINE = TinyLog; +INSERT INTO t2 SELECT number as k from numbers_mt(10); + +SET join_algorithm = 'direct'; + +SELECT '-- key rename'; +SELECT * FROM (SELECT k as key FROM t2) as t2 INNER JOIN rdb ON rdb.key == t2.key ORDER BY key; + +SELECT '-- using'; +SELECT * FROM (SELECT k as key FROM t2) as t2 INNER JOIN rdb USING key ORDER BY key; + +SELECT '-- left semi'; +SELECT k FROM t2 LEFT SEMI JOIN rdb ON rdb.key == t2.k ORDER BY k; + +SELECT '-- left anti'; +SELECT k FROM t2 LEFT ANTI JOIN rdb ON rdb.key == t2.k ORDER BY k; + +SELECT '-- join_use_nulls left'; +SELECT k, key, toTypeName(value2), value2 FROM t2 LEFT JOIN rdb ON rdb.key == t2.k ORDER BY k SETTINGS join_use_nulls = 1; + +SELECT '-- join_use_nulls inner'; +SELECT k, key, toTypeName(value2), value2 FROM t2 INNER JOIN rdb ON rdb.key == t2.k ORDER BY k SETTINGS join_use_nulls = 1; + +SELECT '-- columns subset'; +SELECT value2 FROM t2 LEFT JOIN rdb ON rdb.key == t2.k ORDER BY k; + +SELECT '--- key types'; +SELECT * FROM t2 INNER JOIN rdb ON rdb.key == t2.k ORDER BY rdb.key; + +-- can't promote right table type +SELECT * FROM (SELECT toUInt64(k) as k FROM t2) as t2 INNER JOIN rdb ON rdb.key == t2.k; -- { serverError NOT_IMPLEMENTED,TYPE_MISMATCH } +-- TODO: support fallcack when right table key type can't be changed +-- SELECT * FROM (SELECT toUInt64(k) as k FROM t2) as t2 INNER JOIN rdb ON rdb.key == t2.k FORMAT Null SETTINGS join_algorithm = 'direct,hash'; + +SELECT '--- totals'; +SELECT rdb.key % 2, sum(k), max(value2) FROM t2 INNER JOIN rdb ON rdb.key == t2.k GROUP BY (rdb.key % 2) WITH TOTALS; + +SELECT '---'; +SELECT * FROM t1 RIGHT JOIN rdb ON rdb.key == t1.k; -- { serverError NOT_IMPLEMENTED } +SELECT * FROM t1 RIGHT JOIN rdb ON rdb.key == t1.k FORMAT Null SETTINGS join_algorithm = 'direct,hash'; + +SELECT * FROM t1 FULL JOIN rdb ON rdb.key == t1.k; -- { serverError NOT_IMPLEMENTED } +SELECT * FROM t1 FULL JOIN rdb ON rdb.key == t1.k FORMAT Null SETTINGS join_algorithm = 'direct,hash'; + +SELECT * FROM t1 INNER JOIN rdb ON rdb.key + 1 == t1.k; -- { serverError NOT_IMPLEMENTED } +SELECT * FROM t1 INNER JOIN rdb ON rdb.key + 1 == t1.k FORMAT Null SETTINGS join_algorithm = 'direct,hash'; + +SELECT * FROM t1 INNER JOIN (SELECT * FROM rdb) AS rdb ON rdb.key == t1.k; -- { serverError NOT_IMPLEMENTED } +SELECT * FROM t1 INNER JOIN (SELECT * FROM rdb) AS rdb ON rdb.key == t1.k FORMAT Null SETTINGS join_algorithm = 'direct,hash'; + +SELECT * FROM t1 RIGHT SEMI JOIN (SELECT * FROM rdb) AS rdb ON rdb.key == t1.k; -- { serverError NOT_IMPLEMENTED } +SELECT * FROM t1 RIGHT ANTI JOIN (SELECT * FROM rdb) AS rdb ON rdb.key == t1.k; -- { serverError NOT_IMPLEMENTED } + +DROP TABLE IF EXISTS rdb; +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; diff --git a/parser/testdata/02242_make_date/ast.json b/parser/testdata/02242_make_date/ast.json new file mode 100644 index 000000000..4bf58551c --- /dev/null +++ b/parser/testdata/02242_make_date/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function makeDate (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal UInt64_1991" + }, + { + "explain": " Literal UInt64_8" + }, + { + "explain": " Literal UInt64_24" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001157728, + "rows_read": 11, + "bytes_read": 419 + } +} diff --git a/parser/testdata/02242_make_date/metadata.json b/parser/testdata/02242_make_date/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02242_make_date/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02242_make_date/query.sql b/parser/testdata/02242_make_date/query.sql new file mode 100644 index 000000000..14d9dbdec --- /dev/null +++ b/parser/testdata/02242_make_date/query.sql @@ -0,0 +1,91 @@ +select toTypeName(makeDate(1991, 8, 24)); +select toTypeName(makeDate(cast(1991 as Nullable(UInt64)), 8, 24)); +select toTypeName(makeDate(1991, cast(8 as Nullable(UInt64)), 24)); +select toTypeName(makeDate(1991, 8, cast(24 as Nullable(UInt64)))); +select toTypeName(makeDate(1991, cast(8 as Nullable(UInt64)), cast(24 as Nullable(UInt64)))); + +select makeDate(1970, 01, 01); +select makeDate(2020, 08, 24); +select makeDate(1980, 10, 17); +select makeDate(-1980, 10, 17); +select makeDate(1980, -10, 17); +select makeDate(1980, 10, -17); +select makeDate(1980.0, 9, 30.0/2); +select makeDate(-1980.0, 9, 32.0/2); +select makeDate(cast(1980.1 as Decimal(20,5)), 9, 17); +select makeDate(cast('-1980.1' as Decimal(20,5)), 9, 18); +select makeDate(cast(1980.1 as Float32), 9, 19); +select makeDate(cast(-1980.1 as Float32), 9, 20); + +select makeDate(cast(1980 as Date), 10, 30); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select makeDate(cast(-1980 as Date), 10, 30); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select makeDate(cast(1980 as Date32), 10, 30); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select makeDate(cast(-1980 as Date32), 10, 30); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select makeDate(cast(1980 as DateTime), 10, 30); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select makeDate(cast(-1980 as DateTime), 10, 30); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select makeDate(cast(1980 as DateTime64), 10, 30); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select makeDate(cast(-1980 as DateTime64), 10, 30); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +select makeDate(0.0, 1, 2); +select makeDate(1980, 15, 1); +select makeDate(1980, 2, 29); +select makeDate(1984, 2, 30); +select makeDate(19800, 12, 3); +select makeDate(2148,1,1); +select makeDate(2149,1,1); +select makeDate(2149,6,6); +select makeDate(2149,6,7); +select makeDate(2150,1,1); +select makeDate(1969,1,1); +select makeDate(1969,12,1); +select makeDate(1969,12,31); +select makeDate(2282,1,1); +select makeDate(2283,1,1); +select makeDate(2283,11,11); +select makeDate(2283,11,12); +select makeDate(2284,1,1); +select makeDate(1924,1,1); +select makeDate(1924,12,1); +select makeDate(1924,12,31); +select makeDate(1970,0,0); +select makeDate(1970,0,1); +select makeDate(1970,1,0); +select makeDate(1990,0,1); +select makeDate(1990,1,0); + +select makeDate(0x7fff+2010,1,1); +select makeDate(0xffff+2010,1,2); +select makeDate(0x7fffffff+2010,1,3); +select makeDate(0xffffffff+2010,1,4); +select makeDate(0x7fffffffffffffff+2010,1,3); +select makeDate(0xffffffffffffffff+2010,1,4); + +select makeDate('1980', '10', '20'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select makeDate('-1980', 3, 17); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +select makeDate('aa', 3, 24); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select makeDate(1994, 'aa', 24); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select makeDate(1984, 3, 'aa'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +select makeDate(True, 3, 24); +select makeDate(1994, True, 24); +select makeDate(1984, 3, True); +select makeDate(False, 3, 24); +select makeDate(1994, False, 24); +select makeDate(1984, 3, False); + +select makeDate(NULL, 3, 4); +select makeDate(1980, NULL, 4); +select makeDate(1980, 3, NULL); + +select makeDate(1980); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +select makeDate(1980, 1, 1, 1); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +select MAKEDATE(1980, 1, 1); +select MAKEDATE(1980, 1); + +select makeDate(year, month, day) from (select NULL as year, 2 as month, 3 as day union all select 1984 as year, 2 as month, 3 as day) order by year, month, day; + +select makeDate(year, month, day) from (select NULL as year, 2 as month, 3 as day union all select NULL as year, 2 as month, 3 as day) order by year, month, day; + +select makeDate(year, month, day) from (select 1984 as year, 2 as month, 3 as day union all select 1984 as year, 2 as month, 4 as day) order by year, month, day; diff --git a/parser/testdata/02242_make_date_mysql/ast.json b/parser/testdata/02242_make_date_mysql/ast.json new file mode 100644 index 000000000..516029801 --- /dev/null +++ b/parser/testdata/02242_make_date_mysql/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function makeDate (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1991" + }, + { + "explain": " Literal UInt64_8" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001467513, + "rows_read": 10, + "bytes_read": 386 + } +} diff --git a/parser/testdata/02242_make_date_mysql/metadata.json b/parser/testdata/02242_make_date_mysql/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02242_make_date_mysql/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02242_make_date_mysql/query.sql b/parser/testdata/02242_make_date_mysql/query.sql new file mode 100644 index 000000000..5070c78d4 --- /dev/null +++ b/parser/testdata/02242_make_date_mysql/query.sql @@ -0,0 +1,41 @@ +select toTypeName(makeDate(1991, 8)); +select toTypeName(makeDate(cast(1991 as Nullable(UInt64)), 8)); +select toTypeName(makeDate(1991, cast(8 as Nullable(UInt64)))); + +select makeDate(1970, 01); +select makeDate(2020, 08); +select makeDate(-1980, 10); +select makeDate(1980, -10); +select makeDate(1980.0, 9); +select makeDate(-1980.0, 9); +select makeDate(cast(1980.1 as Decimal(20,5)), 9); +select makeDate(cast('-1980.1' as Decimal(20,5)), 9); +select makeDate(cast(1980.1 as Float32), 9); +select makeDate(cast(-1980.1 as Float32), 9); + +select makeDate(cast(1980 as Date), 10); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select makeDate(cast(-1980 as Date), 10); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select makeDate(cast(1980 as Date32), 10); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select makeDate(cast(-1980 as Date32), 10); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select makeDate(cast(1980 as DateTime), 10); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select makeDate(cast(-1980 as DateTime), 10); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select makeDate(cast(1980 as DateTime64), 10); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select makeDate(cast(-1980 as DateTime64), 10); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select makeDate('1980', '10'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select makeDate('-1980', 3); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select makeDate('aa', 3); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select makeDate(1994, 'aa'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +select makeDate(0, 1); +select makeDate(19800, 12); +select makeDate(2149, 157); +select makeDate(2149, 158); +select makeDate(1969,355); +select makeDate(1969,356); +select makeDate(1969,357); +select makeDate(1970,0); +select makeDate(1970,1); +select makeDate(1970,2); + +select makeDate(NULL, 3); +select makeDate(1980, NULL); diff --git a/parser/testdata/02242_negative_datetime64/ast.json b/parser/testdata/02242_negative_datetime64/ast.json new file mode 100644 index 000000000..2e2b7136d --- /dev/null +++ b/parser/testdata/02242_negative_datetime64/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDateTime64 (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal '1965-12-12 12:12:12.123'" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Literal 'UTC'" + }, + { + "explain": " Literal 'Decimal64(3)'" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001155525, + "rows_read": 12, + "bytes_read": 463 + } +} diff --git a/parser/testdata/02242_negative_datetime64/metadata.json b/parser/testdata/02242_negative_datetime64/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02242_negative_datetime64/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02242_negative_datetime64/query.sql b/parser/testdata/02242_negative_datetime64/query.sql new file mode 100644 index 000000000..406798419 --- /dev/null +++ b/parser/testdata/02242_negative_datetime64/query.sql @@ -0,0 +1,3 @@ +SELECT cast(toDateTime64('1965-12-12 12:12:12.123', 3, 'UTC') as Decimal64(3)); +SELECT cast(toDateTime64('1975-12-12 12:12:12.123', 3, 'UTC') as Decimal64(3)); +SELECT toDateTime64('1969-12-31 23:59:59.123', 3, 'UTC'); diff --git a/parser/testdata/02242_optimize_to_subcolumns_no_storage/ast.json b/parser/testdata/02242_optimize_to_subcolumns_no_storage/ast.json new file mode 100644 index 000000000..2df4ac081 --- /dev/null +++ b/parser/testdata/02242_optimize_to_subcolumns_no_storage/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001381856, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02242_optimize_to_subcolumns_no_storage/metadata.json b/parser/testdata/02242_optimize_to_subcolumns_no_storage/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02242_optimize_to_subcolumns_no_storage/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02242_optimize_to_subcolumns_no_storage/query.sql b/parser/testdata/02242_optimize_to_subcolumns_no_storage/query.sql new file mode 100644 index 000000000..e6e4663c5 --- /dev/null +++ b/parser/testdata/02242_optimize_to_subcolumns_no_storage/query.sql @@ -0,0 +1,3 @@ +SET optimize_functions_to_subcolumns = 1; +SELECT count(*) FROM numbers(2) AS n1, numbers(3) AS n2, numbers(4) AS n3 +WHERE (n1.number = n2.number) AND (n2.number = n3.number); diff --git a/parser/testdata/02242_subcolumns_sizes/ast.json b/parser/testdata/02242_subcolumns_sizes/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02242_subcolumns_sizes/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02242_subcolumns_sizes/metadata.json b/parser/testdata/02242_subcolumns_sizes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02242_subcolumns_sizes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02242_subcolumns_sizes/query.sql b/parser/testdata/02242_subcolumns_sizes/query.sql new file mode 100644 index 000000000..ab92f4d98 --- /dev/null +++ b/parser/testdata/02242_subcolumns_sizes/query.sql @@ -0,0 +1,33 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS t_subcolumns_sizes; + +CREATE TABLE t_subcolumns_sizes (id UInt64, arr Array(UInt64), n Nullable(String)) +ENGINE = MergeTree ORDER BY id +SETTINGS min_bytes_for_wide_part = 0, serialization_info_version = 'basic', ratio_of_defaults_for_sparse_serialization = 1; + +INSERT INTO t_subcolumns_sizes FORMAT JSONEachRow {"id": 1, "arr": [1, 2, 3], "n": null} + +INSERT INTO t_subcolumns_sizes FORMAT JSONEachRow {"id": 2, "arr": [0], "n": "foo"} + +OPTIMIZE TABLE t_subcolumns_sizes FINAL; + +SELECT + column, + subcolumns.names AS sname, + subcolumns.types AS stype, + subcolumns.bytes_on_disk > 0 +FROM system.parts_columns ARRAY JOIN subcolumns +WHERE database = currentDatabase() AND table = 't_subcolumns_sizes' AND active +ORDER BY column, sname, stype; + +SELECT + any(column_bytes_on_disk) = sum(subcolumns.bytes_on_disk), + any(column_data_compressed_bytes) = sum(subcolumns.data_compressed_bytes), + any(column_data_uncompressed_bytes) = sum(subcolumns.data_uncompressed_bytes), + any(column_marks_bytes) = sum(subcolumns.marks_bytes) +FROM system.parts_columns ARRAY JOIN subcolumns +WHERE database = currentDatabase() AND table = 't_subcolumns_sizes' +AND active AND column = 'd'; + +DROP TABLE IF EXISTS t_subcolumns_sizes; diff --git a/parser/testdata/02242_throw_if_constant_argument/ast.json b/parser/testdata/02242_throw_if_constant_argument/ast.json new file mode 100644 index 000000000..e334977e7 --- /dev/null +++ b/parser/testdata/02242_throw_if_constant_argument/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function throwIf (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function and (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal UInt64_2147483646" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001373896, + "rows_read": 15, + "bytes_read": 591 + } +} diff --git a/parser/testdata/02242_throw_if_constant_argument/metadata.json b/parser/testdata/02242_throw_if_constant_argument/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02242_throw_if_constant_argument/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02242_throw_if_constant_argument/query.sql b/parser/testdata/02242_throw_if_constant_argument/query.sql new file mode 100644 index 000000000..bdde059ef --- /dev/null +++ b/parser/testdata/02242_throw_if_constant_argument/query.sql @@ -0,0 +1 @@ +SELECT throwIf(0 AND 2147483646) FROM system.numbers LIMIT 10; diff --git a/parser/testdata/02243_in_ip_address/ast.json b/parser/testdata/02243_in_ip_address/ast.json new file mode 100644 index 000000000..4f5c51cbe --- /dev/null +++ b/parser/testdata/02243_in_ip_address/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_table (children 1)" + }, + { + "explain": " Identifier test_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001224121, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/02243_in_ip_address/metadata.json b/parser/testdata/02243_in_ip_address/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02243_in_ip_address/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02243_in_ip_address/query.sql b/parser/testdata/02243_in_ip_address/query.sql new file mode 100644 index 000000000..a2c8c37e5 --- /dev/null +++ b/parser/testdata/02243_in_ip_address/query.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table (id UInt64, value_ipv4 IPv4, value_ipv6 IPv6) ENGINE=MergeTree ORDER BY id; + +INSERT INTO test_table VALUES (0, '127.0.0.1', '127.0.0.1'); + +SELECT id FROM test_table WHERE value_ipv4 IN (SELECT value_ipv4 FROM test_table); +SELECT id FROM test_table WHERE value_ipv6 IN (SELECT value_ipv6 FROM test_table); + +DROP TABLE test_table; diff --git a/parser/testdata/02243_ipv6_long_parsing/ast.json b/parser/testdata/02243_ipv6_long_parsing/ast.json new file mode 100644 index 000000000..10bde1344 --- /dev/null +++ b/parser/testdata/02243_ipv6_long_parsing/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_table (children 1)" + }, + { + "explain": " Identifier test_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00111109, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/02243_ipv6_long_parsing/metadata.json b/parser/testdata/02243_ipv6_long_parsing/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02243_ipv6_long_parsing/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02243_ipv6_long_parsing/query.sql b/parser/testdata/02243_ipv6_long_parsing/query.sql new file mode 100644 index 000000000..25225ee0f --- /dev/null +++ b/parser/testdata/02243_ipv6_long_parsing/query.sql @@ -0,0 +1,10 @@ +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table (id UInt64, value IPv6) ENGINE=MergeTree ORDER BY id; + +INSERT INTO test_table VALUES (0, '0000:0000:0000:0000:0000:ffff:1.12.12.12'); +INSERT INTO test_table VALUES (1, '0000:0000:0000:0000:0000:ffff:123.123.123.123'); +INSERT INTO test_table VALUES (2, '0000:0000:0000:0000:0000:ffff:192.168.100.228'); + +SELECT * FROM test_table ORDER BY id; + +DROP TABLE test_table; diff --git a/parser/testdata/02243_make_date32/ast.json b/parser/testdata/02243_make_date32/ast.json new file mode 100644 index 000000000..c26c3c287 --- /dev/null +++ b/parser/testdata/02243_make_date32/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function makeDate32 (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal UInt64_1991" + }, + { + "explain": " Literal UInt64_8" + }, + { + "explain": " Literal UInt64_24" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001251352, + "rows_read": 11, + "bytes_read": 421 + } +} diff --git a/parser/testdata/02243_make_date32/metadata.json b/parser/testdata/02243_make_date32/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02243_make_date32/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02243_make_date32/query.sql b/parser/testdata/02243_make_date32/query.sql new file mode 100644 index 000000000..e6a319d31 --- /dev/null +++ b/parser/testdata/02243_make_date32/query.sql @@ -0,0 +1,87 @@ +select toTypeName(makeDate32(1991, 8, 24)); +select toTypeName(makeDate32(cast(1991 as Nullable(UInt64)), 8, 24)); +select toTypeName(makeDate32(1991, cast(8 as Nullable(UInt64)), 24)); +select toTypeName(makeDate32(1991, 8, cast(24 as Nullable(UInt64)))); +select toTypeName(makeDate32(1991, cast(8 as Nullable(UInt64)), cast(24 as Nullable(UInt64)))); + +select makeDate32(1970, 01, 01); +select makeDate32(2020, 08, 24); +select makeDate32(1980, 10, 17); +select makeDate32(-1980, 10, 17); +select makeDate32(1980, -10, 17); +select makeDate32(1980, 10, -17); +select makeDate32(1980.0, 9, 30.0/2); +select makeDate32(-1980.0, 9, 32.0/2); +select makeDate32(cast(1980.1 as Decimal(20,5)), 9, 17); +select makeDate32(cast('-1980.1' as Decimal(20,5)), 9, 18); +select makeDate32(cast(1980.1 as Float32), 9, 19); +select makeDate32(cast(-1980.1 as Float32), 9, 20); + +select makeDate32(cast(1980 as Date), 10, 30); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select makeDate32(cast(-1980 as Date), 10, 30); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select makeDate32(cast(1980 as Date32), 10, 30); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select makeDate32(cast(-1980 as Date32), 10, 30); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select makeDate32(cast(1980 as DateTime), 10, 30); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select makeDate32(cast(-1980 as DateTime), 10, 30); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select makeDate32(cast(1980 as DateTime64), 10, 30); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select makeDate32(cast(-1980 as DateTime64), 10, 30); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +select makeDate32(0.0, 1, 2); +select makeDate32(1980, 15, 1); +select makeDate32(1980, 2, 29); +select makeDate32(1984, 2, 30); +select makeDate32(19800, 12, 3); +select makeDate32(2148,1,1); +select makeDate32(2149,1,1); +select makeDate32(2149,6,6); +select makeDate32(2149,6,7); +select makeDate32(2150,1,1); +select makeDate32(1969,1,1); +select makeDate32(1969,12,1); +select makeDate32(1969,12,31); +select makeDate32(2298,1,1); +select makeDate32(2299,1,1); +select makeDate32(2299,12,31); +select makeDate32(2300,1,1); +select makeDate32(1899,1,1); +select makeDate32(1899,12,1); +select makeDate32(1899,12,31); +select makeDate32(1970,0,0); +select makeDate32(1970,0,1); +select makeDate32(1970,1,0); +select makeDate32(1990,0,1); +select makeDate32(1990,1,0); + +select makeDate32(0x7fff+2010,1,1); +select makeDate32(0xffff+2010,1,2); +select makeDate32(0x7fffffff+2010,1,3); +select makeDate32(0xffffffff+2010,1,4); +select makeDate32(0x7fffffffffffffff+2010,1,3); +select makeDate32(0xffffffffffffffff+2010,1,4); + +select makeDate32('1980', '10', '20'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select makeDate32('-1980', 3, 17); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +select makeDate32('aa', 3, 24); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select makeDate32(1994, 'aa', 24); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select makeDate32(1984, 3, 'aa'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +select makeDate32(True, 3, 24); +select makeDate32(1994, True, 24); +select makeDate32(1984, 3, True); +select makeDate32(False, 3, 24); +select makeDate32(1994, False, 24); +select makeDate32(1984, 3, False); + +select makeDate32(NULL, 3, 4); +select makeDate32(1980, NULL, 4); +select makeDate32(1980, 3, NULL); + +select makeDate32(1980); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +select makeDate32(1980, 1, 1, 1); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +select makeDate32(year, month, day) from (select NULL as year, 2 as month, 3 as day union all select 1984 as year, 2 as month, 3 as day) order by year, month, day; + +select makeDate32(year, month, day) from (select NULL as year, 2 as month, 3 as day union all select NULL as year, 2 as month, 3 as day) order by year, month, day; + +select makeDate32(year, month, day) from (select 1984 as year, 2 as month, 3 as day union all select 1984 as year, 2 as month, 4 as day) order by year, month, day; diff --git a/parser/testdata/02243_make_date32_mysql/ast.json b/parser/testdata/02243_make_date32_mysql/ast.json new file mode 100644 index 000000000..89e0b17be --- /dev/null +++ b/parser/testdata/02243_make_date32_mysql/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function makeDate32 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1991" + }, + { + "explain": " Literal UInt64_8" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001308377, + "rows_read": 10, + "bytes_read": 388 + } +} diff --git a/parser/testdata/02243_make_date32_mysql/metadata.json b/parser/testdata/02243_make_date32_mysql/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02243_make_date32_mysql/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02243_make_date32_mysql/query.sql b/parser/testdata/02243_make_date32_mysql/query.sql new file mode 100644 index 000000000..dc2dd77d9 --- /dev/null +++ b/parser/testdata/02243_make_date32_mysql/query.sql @@ -0,0 +1,42 @@ +select toTypeName(makeDate32(1991, 8)); +select toTypeName(makeDate32(cast(1991 as Nullable(UInt64)), 8)); +select toTypeName(makeDate32(1991, cast(8 as Nullable(UInt64)))); + +select makeDate32(1900, 01); +select makeDate32(2020, 08); +select makeDate32(-1980, 10); +select makeDate32(1980, -10); +select makeDate32(1980.0, 9); +select makeDate32(-1980.0, 9); +select makeDate32(cast(1980.1 as Decimal(20,5)), 9); +select makeDate32(cast('-1980.1' as Decimal(20,5)), 9); +select makeDate32(cast(1980.1 as Float32), 9); +select makeDate32(cast(-1980.1 as Float32), 9); + +select makeDate32(cast(1980 as Date), 10); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select makeDate32(cast(-1980 as Date), 10); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select makeDate32(cast(1980 as Date32), 10); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select makeDate32(cast(-1980 as Date32), 10); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select makeDate32(cast(1980 as DateTime), 10); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select makeDate32(cast(-1980 as DateTime), 10); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select makeDate32(cast(1980 as DateTime64), 10); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select makeDate32(cast(-1980 as DateTime64), 10); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select makeDate32('1980', '10'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select makeDate32('-1980', 3); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select makeDate32('aa', 3); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select makeDate32(1994, 'aa'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +select makeDate32(0, 1); +select makeDate32(19800, 12); +select makeDate32(2299, 365); +select makeDate32(2299, 366); +select makeDate32(2300, 1); +select makeDate32(1899, 365); +select makeDate32(1899, 366); +select makeDate32(1899, 367); +select makeDate32(1900, 0); +select makeDate32(1900, 1); +select makeDate32(1900, 2); + +select makeDate32(NULL, 3); +select makeDate32(1980, NULL); diff --git a/parser/testdata/02244_casewithexpression_return_type/ast.json b/parser/testdata/02244_casewithexpression_return_type/ast.json new file mode 100644 index 000000000..c1b1ef7c3 --- /dev/null +++ b/parser/testdata/02244_casewithexpression_return_type/ast.json @@ -0,0 +1,40 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Identifier CASE (alias number)" + } + ], + + "rows": 6, + + "statistics": + { + "elapsed": 0.001073403, + "rows_read": 6, + "bytes_read": 220 + } +} diff --git a/parser/testdata/02244_casewithexpression_return_type/metadata.json b/parser/testdata/02244_casewithexpression_return_type/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02244_casewithexpression_return_type/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02244_casewithexpression_return_type/query.sql b/parser/testdata/02244_casewithexpression_return_type/query.sql new file mode 100644 index 000000000..02557a3dd --- /dev/null +++ b/parser/testdata/02244_casewithexpression_return_type/query.sql @@ -0,0 +1,12 @@ + SELECT "number", CASE "number" + WHEN 3 THEN 55 + WHEN 6 THEN 77 + WHEN 9 THEN 95 + ELSE CASE + WHEN "number"=1 THEN 10 + WHEN "number"=10 THEN 100 + ELSE 555555 + END + END AS "LONG_COL_0" + FROM `system`.numbers + LIMIT 20; diff --git a/parser/testdata/02244_column_names_in_shcmea_inference/ast.json b/parser/testdata/02244_column_names_in_shcmea_inference/ast.json new file mode 100644 index 000000000..17ba2c8b3 --- /dev/null +++ b/parser/testdata/02244_column_names_in_shcmea_inference/ast.json @@ -0,0 +1,76 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "InsertQuery (children 3)" + }, + { + "explain": " Function file (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function concat (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function currentDatabase (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Literal '_test_02244'" + }, + { + "explain": " Literal 'TSV'" + }, + { + "explain": " Literal 'x String, y UInt32'" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'Hello, world!'" + }, + { + "explain": " Literal UInt64_42" + }, + { + "explain": " Set" + }, + { + "explain": " Set" + } + ], + + "rows": 18, + + "statistics": + { + "elapsed": 0.001159227, + "rows_read": 18, + "bytes_read": 605 + } +} diff --git a/parser/testdata/02244_column_names_in_shcmea_inference/metadata.json b/parser/testdata/02244_column_names_in_shcmea_inference/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02244_column_names_in_shcmea_inference/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02244_column_names_in_shcmea_inference/query.sql b/parser/testdata/02244_column_names_in_shcmea_inference/query.sql new file mode 100644 index 000000000..532e9f27e --- /dev/null +++ b/parser/testdata/02244_column_names_in_shcmea_inference/query.sql @@ -0,0 +1,11 @@ +insert into function file(currentDatabase() || '_test_02244', 'TSV', 'x String, y UInt32') select 'Hello, world!', 42 settings engine_file_truncate_on_insert=1; +desc file(currentDatabase() || '_test_02244', 'TSV') settings column_names_for_schema_inference='x,y'; + +insert into function file(currentDatabase() || '_test_02244', 'CSV', 'x String, y UInt32') select 'Hello, world!', 42 settings engine_file_truncate_on_insert=1; +desc file(currentDatabase() || '_test_02244', 'CSV') settings column_names_for_schema_inference='x,y'; + +insert into function file(currentDatabase() || '_test_02244', 'JSONCompactEachRow', 'x String, y UInt32') select 'Hello, world!', 42 settings engine_file_truncate_on_insert=1; +desc file(currentDatabase() || '_test_02244', 'JSONCompactEachRow') settings column_names_for_schema_inference='x,y'; + +insert into function file(currentDatabase() || '_test_02244', 'Values', 'x String, y UInt32') select 'Hello, world!', 42 settings engine_file_truncate_on_insert=1; +desc file(currentDatabase() || '_test_02244', 'Values') settings column_names_for_schema_inference='x,y'; diff --git a/parser/testdata/02244_ip_address_invalid_insert/ast.json b/parser/testdata/02244_ip_address_invalid_insert/ast.json new file mode 100644 index 000000000..66ad78235 --- /dev/null +++ b/parser/testdata/02244_ip_address_invalid_insert/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_table_ipv4 (children 1)" + }, + { + "explain": " Identifier test_table_ipv4" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001405334, + "rows_read": 2, + "bytes_read": 82 + } +} diff --git a/parser/testdata/02244_ip_address_invalid_insert/metadata.json b/parser/testdata/02244_ip_address_invalid_insert/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02244_ip_address_invalid_insert/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02244_ip_address_invalid_insert/query.sql b/parser/testdata/02244_ip_address_invalid_insert/query.sql new file mode 100644 index 000000000..52b4c94e8 --- /dev/null +++ b/parser/testdata/02244_ip_address_invalid_insert/query.sql @@ -0,0 +1,81 @@ +DROP TABLE IF EXISTS test_table_ipv4; +CREATE TABLE test_table_ipv4 +( + ip String, + ipv4 IPv4 +) ENGINE = TinyLog; + +INSERT INTO test_table_ipv4 VALUES ('1.1.1.1', '1.1.1.1'), ('', ''); --{error CANNOT_PARSE_IPV4} + +SET input_format_ipv4_default_on_conversion_error = 1; + +INSERT INTO test_table_ipv4 VALUES ('1.1.1.1', '1.1.1.1'), ('', ''); +SELECT ip, ipv4 FROM test_table_ipv4; + +SET input_format_ipv4_default_on_conversion_error = 0; + +DROP TABLE test_table_ipv4; + +DROP TABLE IF EXISTS test_table_ipv4_materialized; +CREATE TABLE test_table_ipv4_materialized +( + ip String, + ipv6 IPv4 MATERIALIZED toIPv4(ip) +) ENGINE = TinyLog; + +INSERT INTO test_table_ipv4_materialized(ip) VALUES ('1.1.1.1'), (''); --{serverError CANNOT_PARSE_IPV4} + +SET input_format_ipv4_default_on_conversion_error = 1; + +INSERT INTO test_table_ipv4_materialized(ip) VALUES ('1.1.1.1'), (''); --{serverError CANNOT_PARSE_IPV4} + +SET cast_ipv4_ipv6_default_on_conversion_error = 1; + +INSERT INTO test_table_ipv4_materialized(ip) VALUES ('1.1.1.1'), (''); +SELECT ip, ipv6 FROM test_table_ipv4_materialized; + +SET input_format_ipv4_default_on_conversion_error = 0; +SET cast_ipv4_ipv6_default_on_conversion_error = 0; + +DROP TABLE test_table_ipv4_materialized; + +DROP TABLE IF EXISTS test_table_ipv6; +CREATE TABLE test_table_ipv6 +( + ip String, + ipv6 IPv6 +) ENGINE = TinyLog; + +INSERT INTO test_table_ipv6 VALUES ('fe80::9801:43ff:fe1f:7690', 'fe80::9801:43ff:fe1f:7690'), ('1.1.1.1', '1.1.1.1'), ('', ''); --{error CANNOT_PARSE_IPV6} + +SET input_format_ipv6_default_on_conversion_error = 1; + +INSERT INTO test_table_ipv6 VALUES ('fe80::9801:43ff:fe1f:7690', 'fe80::9801:43ff:fe1f:7690'), ('1.1.1.1', '1.1.1.1'), ('', ''); +SELECT ip, ipv6 FROM test_table_ipv6; + +SET input_format_ipv6_default_on_conversion_error = 0; + +DROP TABLE test_table_ipv6; + +DROP TABLE IF EXISTS test_table_ipv6_materialized; +CREATE TABLE test_table_ipv6_materialized +( + ip String, + ipv6 IPv6 MATERIALIZED toIPv6(ip) +) ENGINE = TinyLog; + +INSERT INTO test_table_ipv6_materialized(ip) VALUES ('fe80::9801:43ff:fe1f:7690'), ('1.1.1.1'), (''); --{serverError CANNOT_PARSE_IPV6} + +SET input_format_ipv6_default_on_conversion_error = 1; + +INSERT INTO test_table_ipv6_materialized(ip) VALUES ('fe80::9801:43ff:fe1f:7690'), ('1.1.1.1'), (''); --{serverError CANNOT_PARSE_IPV6} + +SET cast_ipv4_ipv6_default_on_conversion_error = 1; + +INSERT INTO test_table_ipv6_materialized(ip) VALUES ('fe80::9801:43ff:fe1f:7690'), ('1.1.1.1'), (''); +SELECT ip, ipv6 FROM test_table_ipv6_materialized; + +SET input_format_ipv6_default_on_conversion_error = 0; +SET cast_ipv4_ipv6_default_on_conversion_error = 0; + +DROP TABLE test_table_ipv6_materialized; diff --git a/parser/testdata/02244_lowcardinality_hash_join/ast.json b/parser/testdata/02244_lowcardinality_hash_join/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02244_lowcardinality_hash_join/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02244_lowcardinality_hash_join/metadata.json b/parser/testdata/02244_lowcardinality_hash_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02244_lowcardinality_hash_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02244_lowcardinality_hash_join/query.sql b/parser/testdata/02244_lowcardinality_hash_join/query.sql new file mode 100644 index 000000000..4ea949e42 --- /dev/null +++ b/parser/testdata/02244_lowcardinality_hash_join/query.sql @@ -0,0 +1,23 @@ +-- Tags: no-parallel +DROP TABLE IF EXISTS lc_table; + +CREATE TABLE lc_table +( + col LowCardinality(String) +) ENGINE=TinyLog; + +INSERT INTO lc_table VALUES('x'); + +SELECT * FROM lc_table INNER JOIN lc_table AS lc_table2 +ON lc_table.col = lc_table2.col; + +SELECT * FROM lc_table INNER JOIN lc_table AS lc_table2 +ON CAST(lc_table.col AS String) = CAST(lc_table2.col AS String); + +SELECT * FROM lc_table INNER JOIN lc_table AS lc_table2 +ON (lc_table.col = lc_table2.col) OR (lc_table.col = lc_table2.col); + +SELECT * FROM lc_table INNER JOIN lc_table AS lc_table2 +ON (CAST(lc_table.col AS String) = CAST(lc_table2.col AS String)) OR (CAST(lc_table.col AS String) = CAST(lc_table2.col AS String)); + +DROP TABLE IF EXISTS lc_table; diff --git a/parser/testdata/02244_make_datetime/ast.json b/parser/testdata/02244_make_datetime/ast.json new file mode 100644 index 000000000..ec75242ee --- /dev/null +++ b/parser/testdata/02244_make_datetime/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function makeDateTime (children 1)" + }, + { + "explain": " ExpressionList (children 6)" + }, + { + "explain": " Literal UInt64_1991" + }, + { + "explain": " Literal UInt64_8" + }, + { + "explain": " Literal UInt64_24" + }, + { + "explain": " Literal UInt64_21" + }, + { + "explain": " Literal UInt64_4" + }, + { + "explain": " Literal UInt64_0" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001252024, + "rows_read": 12, + "bytes_read": 420 + } +} diff --git a/parser/testdata/02244_make_datetime/metadata.json b/parser/testdata/02244_make_datetime/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02244_make_datetime/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02244_make_datetime/query.sql b/parser/testdata/02244_make_datetime/query.sql new file mode 100644 index 000000000..a3d88d896 --- /dev/null +++ b/parser/testdata/02244_make_datetime/query.sql @@ -0,0 +1,39 @@ +select makeDateTime(1991, 8, 24, 21, 4, 0); +select makeDateTime(1991, 8, 24, 21, 4, 0, 'CET'); +select cast(makeDateTime(1991, 8, 24, 21, 4, 0, 'CET') as DateTime('UTC')); + +select toTypeName(makeDateTime(1991, 8, 24, 21, 4, 0)); +select toTypeName(makeDateTime(1991, 8, 24, 21, 4, 0, 'CET')); + +select makeDateTime(1925, 1, 1, 0, 0, 0, 'UTC'); +select makeDateTime(1924, 12, 31, 23, 59, 59, 'UTC'); +select makeDateTime(2283, 11, 11, 23, 59, 59, 'UTC'); +select makeDateTime(2283, 11, 12, 0, 0, 0, 'UTC'); +select makeDateTime(2262, 4, 11, 23, 47, 16, 'UTC'); +select makeDateTime(2262, 4, 11, 23, 47, 17, 'UTC'); +select makeDateTime(2262, 4, 11, 23, 47, 16, 'UTC'); + +select makeDateTime(1984, 0, 1, 0, 0, 0, 'UTC'); +select makeDateTime(1984, 1, 0, 0, 0, 0, 'UTC'); +select makeDateTime(1984, 13, 1, 0, 0, 0, 'UTC'); +select makeDateTime(1984, 1, 41, 0, 0, 0, 'UTC'); +select makeDateTime(1984, 1, 1, 25, 0, 0, 'UTC'); +select makeDateTime(1984, 1, 1, 0, 70, 0, 'UTC'); +select makeDateTime(1984, 1, 1, 0, 0, 70, 'UTC'); +select makeDateTime(1984, 1, 1, 0, 0, 0, 'not a timezone'); -- { serverError BAD_ARGUMENTS } + +select makeDateTime(1984, 1, 1, 0, 0, 0, 'UTC'); +select makeDateTime(1983, 2, 29, 0, 0, 0, 'UTC'); +select makeDateTime(-1984, 1, 1, 0, 0, 0, 'UTC'); +select makeDateTime(1984, -1, 1, 0, 0, 0, 'UTC'); +select makeDateTime(1984, 1, -1, 0, 0, 0, 'UTC'); +select makeDateTime(1984, 1, 1, -1, 0, 0, 'UTC'); +select makeDateTime(1984, 1, 1, 0, -1, 0, 'UTC'); +select makeDateTime(1984, 1, 1, 0, 0, -1, 'UTC'); + +select makeDateTime(65537, 8, 24, 21, 4, 0, 'UTC'); +select makeDateTime(1991, 65537, 24, 21, 4, 0, 'UTC'); +select makeDateTime(1991, 8, 65537, 21, 4, 0, 'UTC'); +select makeDateTime(1991, 8, 24, 65537, 4, 0, 'UTC'); +select makeDateTime(1991, 8, 24, 21, 65537, 0, 'UTC'); +select makeDateTime(1991, 8, 24, 21, 4, 65537, 'UTC'); \ No newline at end of file diff --git a/parser/testdata/02244_url_engine_headers_test/ast.json b/parser/testdata/02244_url_engine_headers_test/ast.json new file mode 100644 index 000000000..19852c546 --- /dev/null +++ b/parser/testdata/02244_url_engine_headers_test/ast.json @@ -0,0 +1,79 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function url (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Identifier url_with_headers" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier url" + }, + { + "explain": " Literal 'http:\/\/127.0.0.1:8123?query=select+12'" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier format" + }, + { + "explain": " Literal 'RawBLOB'" + } + ], + + "rows": 19, + + "statistics": + { + "elapsed": 0.001115815, + "rows_read": 19, + "bytes_read": 771 + } +} diff --git a/parser/testdata/02244_url_engine_headers_test/metadata.json b/parser/testdata/02244_url_engine_headers_test/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02244_url_engine_headers_test/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02244_url_engine_headers_test/query.sql b/parser/testdata/02244_url_engine_headers_test/query.sql new file mode 100644 index 000000000..c172e8101 --- /dev/null +++ b/parser/testdata/02244_url_engine_headers_test/query.sql @@ -0,0 +1,12 @@ +select * from url(url_with_headers, url='http://127.0.0.1:8123?query=select+12', format='RawBLOB'); -- { serverError RECEIVED_ERROR_FROM_REMOTE_IO_SERVER } +select * from url(url_with_headers, url='http://127.0.0.1:8123?query=select+12', format='RawBLOB', headers('X-ClickHouse-Database'='default')); +select * from url(url_with_headers, url='http://127.0.0.1:8123?query=select+12', format='RawBLOB', headers('X-ClickHouse-Database'='default', 'X-ClickHouse-Format'='JSONEachRow')); +select * from url(url_with_headers, url='http://127.0.0.1:8123?query=select+12', format='RawBLOB', headers('X-ClickHouse-Database'='kek')); -- { serverError RECEIVED_ERROR_FROM_REMOTE_IO_SERVER } +select * from url('http://127.0.0.1:8123?query=select+12', 'RawBLOB'); +select * from url('http://127.0.0.1:8123?query=select+12', 'RawBLOB', headers('X-ClickHouse-Database'='default')); +select * from url('http://127.0.0.1:8123?query=select+12', 'RawBLOB', headers('X-ClickHouse-Database'='default', 'X-ClickHouse-Format'='JSONEachRow')); +select * from url('http://127.0.0.1:8123?query=select+12', 'RawBLOB', headers('X-ClickHouse-Format'='JSONEachRow', 'X-ClickHouse-Database'='kek')); -- { serverError RECEIVED_ERROR_FROM_REMOTE_IO_SERVER } +select * from url('http://127.0.0.1:8123?query=select+12', 'RawBLOB', headers('X-ClickHouse-Format'='JSONEachRow', 'X-ClickHouse-Database'=1)); -- { serverError BAD_ARGUMENTS } +drop table if exists url; +create table url (i String) engine=URL('http://127.0.0.1:8123?query=select+12', 'RawBLOB', headers('X-ClickHouse-Format'='JSONEachRow')); +select * from url; diff --git a/parser/testdata/02245_format_string_stack_overflow/ast.json b/parser/testdata/02245_format_string_stack_overflow/ast.json new file mode 100644 index 000000000..0f614346d --- /dev/null +++ b/parser/testdata/02245_format_string_stack_overflow/ast.json @@ -0,0 +1,70 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function format (alias str) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}'" + }, + { + "explain": " Function toString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 16, + + "statistics": + { + "elapsed": 0.001392961, + "rows_read": 16, + "bytes_read": 6636 + } +} diff --git a/parser/testdata/02245_format_string_stack_overflow/metadata.json b/parser/testdata/02245_format_string_stack_overflow/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02245_format_string_stack_overflow/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02245_format_string_stack_overflow/query.sql b/parser/testdata/02245_format_string_stack_overflow/query.sql new file mode 100644 index 000000000..1ee3606d3 --- /dev/null +++ b/parser/testdata/02245_format_string_stack_overflow/query.sql @@ -0,0 +1 @@ +select format('{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}{0}', toString(number)) str from numbers(1); diff --git a/parser/testdata/02245_join_with_nullable_lowcardinality_crash/ast.json b/parser/testdata/02245_join_with_nullable_lowcardinality_crash/ast.json new file mode 100644 index 000000000..d29c19d54 --- /dev/null +++ b/parser/testdata/02245_join_with_nullable_lowcardinality_crash/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery with_nullable (children 1)" + }, + { + "explain": " Identifier with_nullable" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001149635, + "rows_read": 2, + "bytes_read": 78 + } +} diff --git a/parser/testdata/02245_join_with_nullable_lowcardinality_crash/metadata.json b/parser/testdata/02245_join_with_nullable_lowcardinality_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02245_join_with_nullable_lowcardinality_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02245_join_with_nullable_lowcardinality_crash/query.sql b/parser/testdata/02245_join_with_nullable_lowcardinality_crash/query.sql new file mode 100644 index 000000000..75c3633cd --- /dev/null +++ b/parser/testdata/02245_join_with_nullable_lowcardinality_crash/query.sql @@ -0,0 +1,21 @@ +drop table if exists with_nullable; +drop table if exists without_nullable; + +CREATE TABLE with_nullable +( timestamp UInt32, + country LowCardinality(Nullable(String)) ) ENGINE = MergeTree ORDER BY tuple(); + +CREATE TABLE without_nullable +( timestamp UInt32, + country LowCardinality(String)) ENGINE = MergeTree ORDER BY tuple(); + +insert into with_nullable values(0,'f'),(0,'usa'); +insert into without_nullable values(0,'usa'),(0,'us2a'); + +select if(t0.country is null ,t2.country,t0.country) "country" +from without_nullable t0 right outer join with_nullable t2 on t0.country=t2.country +ORDER BY 1 DESC; + +drop table with_nullable; +drop table without_nullable; + diff --git a/parser/testdata/02245_make_datetime64/ast.json b/parser/testdata/02245_make_datetime64/ast.json new file mode 100644 index 000000000..aff4426e1 --- /dev/null +++ b/parser/testdata/02245_make_datetime64/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function makeDateTime64 (children 1)" + }, + { + "explain": " ExpressionList (children 6)" + }, + { + "explain": " Literal UInt64_1991" + }, + { + "explain": " Literal UInt64_8" + }, + { + "explain": " Literal UInt64_24" + }, + { + "explain": " Literal UInt64_21" + }, + { + "explain": " Literal UInt64_4" + }, + { + "explain": " Literal UInt64_0" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.00148269, + "rows_read": 12, + "bytes_read": 422 + } +} diff --git a/parser/testdata/02245_make_datetime64/metadata.json b/parser/testdata/02245_make_datetime64/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02245_make_datetime64/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02245_make_datetime64/query.sql b/parser/testdata/02245_make_datetime64/query.sql new file mode 100644 index 000000000..45a058fc4 --- /dev/null +++ b/parser/testdata/02245_make_datetime64/query.sql @@ -0,0 +1,92 @@ +select makeDateTime64(1991, 8, 24, 21, 4, 0); +select makeDateTime64(1991, 8, 24, 21, 4, 0, 123); +select makeDateTime64(1991, 8, 24, 21, 4, 0, 1234, 6); +select makeDateTime64(1991, 8, 24, 21, 4, 0, 1234, 7, 'CET'); +select cast(makeDateTime64(1991, 8, 24, 21, 4, 0, 1234, 7, 'CET') as DateTime64(7, 'UTC')); + +select toTypeName(makeDateTime64(1991, 8, 24, 21, 4, 0)); +select toTypeName(makeDateTime64(1991, 8, 24, 21, 4, 0, 123)); +select toTypeName(makeDateTime64(1991, 8, 24, 21, 4, 0, 1234, 6)); +select toTypeName(makeDateTime64(1991, 8, 24, 21, 4, 0, 1234, 7, 'CET')); +select toTypeName(cast(makeDateTime64(1991, 8, 24, 21, 4, 0, 1234, 7, 'CET') as DateTime64(7, 'UTC'))); + +select makeDateTime64(1900, 1, 1, 0, 0, 0, 0, 9, 'UTC'); +select makeDateTime64(1899, 12, 31, 23, 59, 59, 999999999, 9, 'UTC'); +select makeDateTime64(2299, 12, 31, 23, 59, 59, 99999999, 8, 'UTC'); +select makeDateTime64(2299, 12, 31, 23, 59, 59, 999999999, 9, 'UTC'); -- { serverError DECIMAL_OVERFLOW } +select makeDateTime64(2262, 4, 11, 23, 47, 16, 854775807, 9, 'UTC'); +select makeDateTime64(2262, 4, 11, 23, 47, 16, 854775808, 9, 'UTC'); -- { serverError DECIMAL_OVERFLOW } +select makeDateTime64(2262, 4, 11, 23, 47, 16, 85477581, 8, 'UTC'); + +select makeDateTime64(1991, 8, 24, 21, 4, 0, 1234, 0, 'CET'); +select makeDateTime64(1991, 8, 24, 21, 4, 0, 1234, 1, 'CET'); +select makeDateTime64(1991, 8, 24, 21, 4, 0, 1234, 2, 'CET'); +select makeDateTime64(1991, 8, 24, 21, 4, 0, 1234, 3, 'CET'); +select makeDateTime64(1991, 8, 24, 21, 4, 0, 1234, 4, 'CET'); +select makeDateTime64(1991, 8, 24, 21, 4, 0, 1234, 5, 'CET'); +select makeDateTime64(1991, 8, 24, 21, 4, 0, 1234, 6, 'CET'); +select makeDateTime64(1991, 8, 24, 21, 4, 0, 1234, 7, 'CET'); +select makeDateTime64(1991, 8, 24, 21, 4, 0, 1234, 8, 'CET'); +select makeDateTime64(1991, 8, 24, 21, 4, 0, 1234, 9, 'CET'); +select makeDateTime64(1991, 8, 24, 21, 4, 0, 1234, 10, 'CET'); -- { serverError ARGUMENT_OUT_OF_BOUND } +select makeDateTime64(1991, 8, 24, 21, 4, 0, 1234, -1, 'CET'); -- { serverError ARGUMENT_OUT_OF_BOUND } + +select makeDateTime64(1984, 0, 1, 0, 0, 0, 0, 9, 'UTC'); +select makeDateTime64(1984, 1, 0, 0, 0, 0, 0, 9, 'UTC'); +select makeDateTime64(1984, 13, 1, 0, 0, 0, 0, 9, 'UTC'); +select makeDateTime64(1984, 1, 41, 0, 0, 0, 0, 9, 'UTC'); +select makeDateTime64(1984, 1, 1, 25, 0, 0, 0, 9, 'UTC'); +select makeDateTime64(1984, 1, 1, 0, 70, 0, 0, 9, 'UTC'); +select makeDateTime64(1984, 1, 1, 0, 0, 70, 0, 9, 'UTC'); +select makeDateTime64(1984, 1, 1, 0, 0, 0, 0, 9, 'not a timezone'); -- { serverError BAD_ARGUMENTS } + +select makeDateTime64(1984, 1, 1, 2, 3, 4, 5, 9, 'UTC'); +select makeDateTime64(1984, 2, 29, 2, 3, 4, 5, 9, 'UTC'); +select makeDateTime64(1983, 2, 29, 2, 3, 4, 5, 9, 'UTC'); +select makeDateTime64(1984, 2, 30, 2, 3, 4, 5, 9, 'UTC'); +select makeDateTime64(1983, 2, 30, 2, 3, 4, 5, 9, 'UTC'); +select makeDateTime64(1984, 2, 31, 2, 3, 4, 5, 9, 'UTC'); +select makeDateTime64(1983, 2, 31, 2, 3, 4, 5, 9, 'UTC'); +select makeDateTime64(1984, 2, 32, 2, 3, 4, 5, 9, 'UTC'); +select makeDateTime64(1983, 2, 32, 2, 3, 4, 5, 9, 'UTC'); + +select makeDateTime64(-1984, 1, 1, 2, 3, 4, 5, 9, 'UTC'); +select makeDateTime64(1984, -1, 1, 2, 3, 4, 5, 9, 'UTC'); +select makeDateTime64(1984, 1, -1, 2, 3, 4, 5, 9, 'UTC'); +select makeDateTime64(1984, 1, 1, -1, 3, 4, 5, 9, 'UTC'); +select makeDateTime64(1984, 1, 1, 2, -1, 4, 5, 9, 'UTC'); +select makeDateTime64(1984, 1, 1, 2, 3, -1, 5, 9, 'UTC'); +select makeDateTime64(1984, 1, 1, 2, 3, 4, -1, 9, 'UTC'); + +select makeDateTime64(NaN, 1, 1, 2, 3, 4, 5, 9, 'UTC'); +select makeDateTime64(1984, NaN, 1, 2, 3, 4, 5, 9, 'UTC'); +select makeDateTime64(1984, 1, NaN, 2, 3, 4, 5, 9, 'UTC'); +select makeDateTime64(1984, 1, 1, NaN, 3, 4, 5, 9, 'UTC'); +select makeDateTime64(1984, 1, 1, 2, NaN, 4, 5, 9, 'UTC'); +select makeDateTime64(1984, 1, 1, 2, 3, NaN, 5, 9, 'UTC'); +select makeDateTime64(1984, 1, 1, 2, 3, 4, NaN, 9, 'UTC'); + +select makeDateTime64(1984.5, 1, 1, 0, 0, 0, 0, 9, 'UTC'); +select makeDateTime64(1984, 1.5, 1, 0, 0, 0, 0, 9, 'UTC'); +select makeDateTime64(1984, 1, 1.5, 0, 0, 0, 0, 9, 'UTC'); +select makeDateTime64(1984, 1, 1, 0.5, 0, 0, 0, 9, 'UTC'); +select makeDateTime64(1984, 1, 1, 0, 0.5, 0, 0, 9, 'UTC'); +select makeDateTime64(1984, 1, 1, 0, 0, 0.5, 0, 9, 'UTC'); +select makeDateTime64(1984, 1, 1, 0, 0, 0, 0.5, 9, 'UTC'); +select makeDateTime64(1984, 1, 1, 0, 0, 0, 0, 9.5, 'UTC'); + +select makeDateTime64(65537, 8, 24, 21, 4, 0); +select makeDateTime64(1991, 65537, 24, 21, 4, 0); +select makeDateTime64(1991, 8, 65537, 21, 4, 0); +select makeDateTime64(1991, 8, 24, 65537, 4, 0); +select makeDateTime64(1991, 8, 24, 21, 65537, 0); +select makeDateTime64(1991, 8, 24, 21, 4, 65537); + +-- bug 58590 +select makeDateTime64(2024, 1, 8, 11, 12, 13, materialize(14)); + +select makeDateTime64(year, 1, 1, 1, 0, 0, 0, precision, timezone) from ( + select 1984 as year, 5 as precision, 'UTC' as timezone + union all + select 1985 as year, 5 as precision, 'UTC' as timezone +); -- { serverError ILLEGAL_COLUMN } diff --git a/parser/testdata/02245_s3_schema_desc/ast.json b/parser/testdata/02245_s3_schema_desc/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02245_s3_schema_desc/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02245_s3_schema_desc/metadata.json b/parser/testdata/02245_s3_schema_desc/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02245_s3_schema_desc/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02245_s3_schema_desc/query.sql b/parser/testdata/02245_s3_schema_desc/query.sql new file mode 100644 index 000000000..8c12d1968 --- /dev/null +++ b/parser/testdata/02245_s3_schema_desc/query.sql @@ -0,0 +1,14 @@ +-- Tags: no-fasttest +-- Tag no-fasttest: Depends on AWS + +desc s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:11111/test/{a,b,c}.tsv'); +desc s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:11111/test/{a,b,c}.tsv', 'TSV'); +desc s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:11111/test/{a,b,c}.tsv', 'TSV', 'c1 UInt64, c2 UInt64, c3 UInt64'); +desc s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:11111/test/{a,b,c}.tsv', 'test', 'testtest'); +desc s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:11111/test/{a,b,c}.tsv', 'TSV', 'c1 UInt64, c2 UInt64, c3 UInt64', 'auto'); +desc s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:11111/test/{a,b,c}.tsv', 'test', 'testtest', 'TSV'); +desc s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:11111/test/{a,b,c}.tsv', 'test', 'testtest', 'TSV', 'c1 UInt64, c2 UInt64, c3 UInt64'); +desc s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:11111/test/{a,b,c}.tsv', 'test', 'testtest', 'TSV', 'c1 UInt64, c2 UInt64, c3 UInt64', 'auto'); + + +SELECT * FROM s3(decodeURLComponent(NULL), [NULL]); --{serverError BAD_ARGUMENTS} diff --git a/parser/testdata/02245_s3_support_read_nested_column/ast.json b/parser/testdata/02245_s3_support_read_nested_column/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02245_s3_support_read_nested_column/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02245_s3_support_read_nested_column/metadata.json b/parser/testdata/02245_s3_support_read_nested_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02245_s3_support_read_nested_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02245_s3_support_read_nested_column/query.sql b/parser/testdata/02245_s3_support_read_nested_column/query.sql new file mode 100644 index 000000000..921f27a0a --- /dev/null +++ b/parser/testdata/02245_s3_support_read_nested_column/query.sql @@ -0,0 +1,45 @@ +-- Tags: no-fasttest +-- Tag no-fasttest: Depends on AWS + +-- { echo } +drop table if exists test_02245_s3_nested_parquet1; +drop table if exists test_02245_s3_nested_parquet2; +set input_format_parquet_import_nested = 1; +set s3_truncate_on_insert = 1; +create table test_02245_s3_nested_parquet1(a Int64, b Tuple(a Int64, b String)) engine=S3(s3_conn, filename='test_02245_s3_nested_parquet1_{_partition_id}', format='Parquet') partition by a; +insert into test_02245_s3_nested_parquet1 values (1, (2, 'a')); + +select a, b.a, b.b from s3(s3_conn, filename='test_02245_s3_nested_parquet1_*', format='Parquet'); + +create table test_02245_s3_nested_parquet2(a Int64, b Tuple(a Int64, b Tuple(c Int64, d String))) engine=S3(s3_conn, filename='test_02245_s3_nested_parquet2_{_partition_id}', format='Parquet') partition by a; +insert into test_02245_s3_nested_parquet2 values (1, (2, (3, 'a'))); + +select a, b.a, b.b.c, b.b.d from s3(s3_conn, filename='test_02245_s3_nested_parquet2_*', format='Parquet', structure='a Int64, b Tuple(a Int64, b Tuple(c Int64, d String))'); + + +drop table if exists test_02245_s3_nested_arrow1; +drop table if exists test_02245_s3_nested_arrow2; +set input_format_arrow_import_nested=1; +create table test_02245_s3_nested_arrow1(a Int64, b Tuple(a Int64, b String)) engine=S3(s3_conn, filename='test_02245_s3_nested_arrow1_{_partition_id}', format='Arrow') partition by a; +insert into test_02245_s3_nested_arrow1 values (1, (2, 'a')); + +select a, b.a, b.b from s3(s3_conn, filename='test_02245_s3_nested_arrow1_*', format='Arrow'); + +create table test_02245_s3_nested_arrow2(a Int64, b Tuple(a Int64, b Tuple(c Int64, d String))) engine=S3(s3_conn, filename='test_02245_s3_nested_arrow2_{_partition_id}', format='Arrow') partition by a; +insert into test_02245_s3_nested_arrow2 values (1, (2, (3, 'a'))); + +select a, b.a, b.b.c, b.b.d from s3(s3_conn, filename='test_02245_s3_nested_arrow2_*', format='Arrow', structure='a Int64, b Tuple(a Int64, b Tuple(c Int64, d String))'); + + +drop table if exists test_02245_s3_nested_orc1; +drop table if exists test_02245_s3_nested_orc2; +set input_format_orc_import_nested=1; +create table test_02245_s3_nested_orc1(a Int64, b Tuple(a Int64, b String)) engine=S3(s3_conn, filename='test_02245_s3_nested_orc1_{_partition_id}', format='ORC') partition by a; +insert into test_02245_s3_nested_orc1 values (1, (2, 'a')); + +select a, b.a, b.b from s3(s3_conn, filename='test_02245_s3_nested_orc1_*', format='ORC'); + +create table test_02245_s3_nested_orc2(a Int64, b Tuple(a Int64, b Tuple(c Int64, d String))) engine=S3(s3_conn, filename='test_02245_s3_nested_orc2_{_partition_id}', format='ORC') partition by a; +insert into test_02245_s3_nested_orc2 values (1, (2, (3, 'a'))); + +select a, b.a, b.b.c, b.b.d from s3(s3_conn, filename='test_02245_s3_nested_orc2_*', format='ORC', structure='a Int64, b Tuple(a Int64, b Tuple(c Int64, d String))'); diff --git a/parser/testdata/02245_s3_virtual_columns/ast.json b/parser/testdata/02245_s3_virtual_columns/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02245_s3_virtual_columns/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02245_s3_virtual_columns/metadata.json b/parser/testdata/02245_s3_virtual_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02245_s3_virtual_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02245_s3_virtual_columns/query.sql b/parser/testdata/02245_s3_virtual_columns/query.sql new file mode 100644 index 000000000..805f7873b --- /dev/null +++ b/parser/testdata/02245_s3_virtual_columns/query.sql @@ -0,0 +1,16 @@ +-- Tags: no-fasttest +-- Tag no-fasttest: Depends on AWS + +-- { echo } +drop table if exists test_02245; +create table test_02245 (a UInt64) engine = S3(s3_conn, filename='test_02245', format=Parquet); +insert into test_02245 select 1 settings s3_truncate_on_insert=1; +select * from test_02245; +select _path from test_02245; + +drop table if exists test_02245_2; +create table test_02245_2 (a UInt64, _path Int32) engine = S3(s3_conn, filename='test_02245_2', format=Parquet); +insert into test_02245_2 select 1, 2 settings s3_truncate_on_insert=1; +select * from test_02245_2; +select _path, isNotNull(_etag) from test_02245_2; +select count() from test_02245_2 where _etag IN (select _etag from test_02245_2); diff --git a/parser/testdata/02245_weird_partitions_pruning/ast.json b/parser/testdata/02245_weird_partitions_pruning/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02245_weird_partitions_pruning/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02245_weird_partitions_pruning/metadata.json b/parser/testdata/02245_weird_partitions_pruning/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02245_weird_partitions_pruning/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02245_weird_partitions_pruning/query.sql b/parser/testdata/02245_weird_partitions_pruning/query.sql new file mode 100644 index 000000000..8b0208da6 --- /dev/null +++ b/parser/testdata/02245_weird_partitions_pruning/query.sql @@ -0,0 +1,63 @@ +-- Tags: no-random-merge-tree-settings + +-- We use a hack - partition by ignore(d1). In some cases there are two columns +-- not fully correlated (<1) (date_begin - date_end or datetime - datetime_in_TZ_with_DST) +-- If we partition by these columns instead of one it will be twice more partitions. +-- Partition by (.., ignore(d1)) allows to partition by the first column but build +-- min_max indexes for both column, so partition pruning works for both columns. +-- It's very similar to min_max skip index but gives bigger performance boost, +-- because partition pruning happens on very early query stage. + + +DROP TABLE IF EXISTS weird_partitions_02245; + +CREATE TABLE weird_partitions_02245(d DateTime, d1 DateTime default d - toIntervalHour(8), id Int64) +Engine=MergeTree +PARTITION BY (toYYYYMM(toDateTime(d)), ignore(d1)) +ORDER BY id; + +INSERT INTO weird_partitions_02245(d, id) +SELECT + toDateTime('2021-12-31 22:30:00') AS d, + number +FROM numbers(1000); + +INSERT INTO weird_partitions_02245(d, id) +SELECT + toDateTime('2022-01-01 00:30:00') AS d, + number +FROM numbers(1000); + +INSERT INTO weird_partitions_02245(d, id) +SELECT + toDateTime('2022-01-31 22:30:00') AS d, + number +FROM numbers(1000); + +INSERT INTO weird_partitions_02245(d, id) +SELECT + toDateTime('2023-01-31 22:30:00') AS d, + number +FROM numbers(1000); + +OPTIMIZE TABLE weird_partitions_02245; +OPTIMIZE TABLE weird_partitions_02245; + +SELECT DISTINCT _partition_id, _partition_value FROM weird_partitions_02245 ORDER BY _partition_id ASC; + +SELECT _partition_id, min(d), max(d), min(d1), max(d1), count() FROM weird_partitions_02245 GROUP BY _partition_id ORDER BY _partition_id ASC; + +select DISTINCT _partition_id from weird_partitions_02245 where d >= '2021-12-31 00:00:00' and d < '2022-01-01 00:00:00' ORDER BY _partition_id; +explain estimate select DISTINCT _partition_id from weird_partitions_02245 where d >= '2021-12-31 00:00:00' and d < '2022-01-01 00:00:00'; + +select DISTINCT _partition_id from weird_partitions_02245 where d >= '2022-01-01 00:00:00' and d1 >= '2021-12-31 00:00:00' and d1 < '2022-01-01 00:00:00' ORDER BY _partition_id;; +explain estimate select DISTINCT _partition_id from weird_partitions_02245 where d >= '2022-01-01 00:00:00' and d1 >= '2021-12-31 00:00:00' and d1 < '2022-01-01 00:00:00'; + +select DISTINCT _partition_id from weird_partitions_02245 where d1 >= '2021-12-31 00:00:00' and d1 < '2022-01-01 00:00:00' ORDER BY _partition_id;; +explain estimate select DISTINCT _partition_id from weird_partitions_02245 where d1 >= '2021-12-31 00:00:00' and d1 < '2022-01-01 00:00:00'; + +select DISTINCT _partition_id from weird_partitions_02245 where d >= '2022-01-01 00:00:00' and d1 >= '2021-12-31 00:00:00' and d1 < '2020-01-01 00:00:00' ORDER BY _partition_id;; +explain estimate select DISTINCT _partition_id from weird_partitions_02245 where d >= '2022-01-01 00:00:00' and d1 >= '2021-12-31 00:00:00' and d1 < '2020-01-01 00:00:00'; + +DROP TABLE weird_partitions_02245; + diff --git a/parser/testdata/02246_flatten_tuple/ast.json b/parser/testdata/02246_flatten_tuple/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02246_flatten_tuple/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02246_flatten_tuple/metadata.json b/parser/testdata/02246_flatten_tuple/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02246_flatten_tuple/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02246_flatten_tuple/query.sql b/parser/testdata/02246_flatten_tuple/query.sql new file mode 100644 index 000000000..e0a84011d --- /dev/null +++ b/parser/testdata/02246_flatten_tuple/query.sql @@ -0,0 +1,14 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS t_flatten_tuple; +DROP TABLE IF EXISTS t_flatten_object; + +SET flatten_nested = 0; + +CREATE TABLE t_flatten_tuple(t Tuple(t1 Nested(a UInt32, s String), b UInt32, t2 Tuple(k String, v UInt32))) ENGINE = Memory; + +INSERT INTO t_flatten_tuple VALUES (([(1, 'a'), (2, 'b')], 3, ('c', 4))); + +SELECT flattenTuple(t) AS ft, toTypeName(ft) FROM t_flatten_tuple; + +DROP TABLE IF EXISTS t_flatten_tuple; diff --git a/parser/testdata/02247_fix_extract_parser/ast.json b/parser/testdata/02247_fix_extract_parser/ast.json new file mode 100644 index 000000000..b1a60417b --- /dev/null +++ b/parser/testdata/02247_fix_extract_parser/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'number: 1' (alias year)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function extract (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier year" + }, + { + "explain": " Literal '\\\\d+'" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001002023, + "rows_read": 10, + "bytes_read": 369 + } +} diff --git a/parser/testdata/02247_fix_extract_parser/metadata.json b/parser/testdata/02247_fix_extract_parser/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02247_fix_extract_parser/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02247_fix_extract_parser/query.sql b/parser/testdata/02247_fix_extract_parser/query.sql new file mode 100644 index 000000000..9b721a6e8 --- /dev/null +++ b/parser/testdata/02247_fix_extract_parser/query.sql @@ -0,0 +1,3 @@ +WITH 'number: 1' as year SELECT extract(year, '\\d+'); +WITH 'number: 2' as mm SELECT extract(mm, '\\d+'); +WITH 'number: 3' as s SELECT extract(s, '\\d+'); diff --git a/parser/testdata/02248_nullable_custom_types_to_string/ast.json b/parser/testdata/02248_nullable_custom_types_to_string/ast.json new file mode 100644 index 000000000..f679020bc --- /dev/null +++ b/parser/testdata/02248_nullable_custom_types_to_string/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toNullable (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Bool_1" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001755433, + "rows_read": 9, + "bytes_read": 349 + } +} diff --git a/parser/testdata/02248_nullable_custom_types_to_string/metadata.json b/parser/testdata/02248_nullable_custom_types_to_string/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02248_nullable_custom_types_to_string/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02248_nullable_custom_types_to_string/query.sql b/parser/testdata/02248_nullable_custom_types_to_string/query.sql new file mode 100644 index 000000000..313f703fd --- /dev/null +++ b/parser/testdata/02248_nullable_custom_types_to_string/query.sql @@ -0,0 +1,6 @@ +select toString(toNullable(true)); +select toString(CAST(NULL, 'Nullable(Bool)')); +select toString(toNullable(toIPv4('0.0.0.0'))); +select toString(CAST(NULL, 'Nullable(IPv4)')); +select toString(toNullable(toIPv6('::ffff:127.0.0.1'))); +select toString(CAST(NULL, 'Nullable(IPv6)')); diff --git a/parser/testdata/02249_insert_select_from_input_schema_inference/ast.json b/parser/testdata/02249_insert_select_from_input_schema_inference/ast.json new file mode 100644 index 000000000..7f1a8b3bb --- /dev/null +++ b/parser/testdata/02249_insert_select_from_input_schema_inference/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00118323, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02249_insert_select_from_input_schema_inference/metadata.json b/parser/testdata/02249_insert_select_from_input_schema_inference/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02249_insert_select_from_input_schema_inference/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02249_insert_select_from_input_schema_inference/query.sql b/parser/testdata/02249_insert_select_from_input_schema_inference/query.sql new file mode 100644 index 000000000..a0c6701fa --- /dev/null +++ b/parser/testdata/02249_insert_select_from_input_schema_inference/query.sql @@ -0,0 +1,8 @@ +set use_structure_from_insertion_table_in_table_functions = 1; + +drop table if exists test_02249; +create table test_02249 (x UInt32, y String) engine=Memory(); +insert into test_02249 select * from input() format JSONEachRow {"x" : 1, "y" : "string1"}, {"y" : "string2", "x" : 2}; + +select * from test_02249; +drop table test_02249; diff --git a/parser/testdata/02249_parse_date_time_basic/ast.json b/parser/testdata/02249_parse_date_time_basic/ast.json new file mode 100644 index 000000000..aede111cd --- /dev/null +++ b/parser/testdata/02249_parse_date_time_basic/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001536106, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02249_parse_date_time_basic/metadata.json b/parser/testdata/02249_parse_date_time_basic/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02249_parse_date_time_basic/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02249_parse_date_time_basic/query.sql b/parser/testdata/02249_parse_date_time_basic/query.sql new file mode 100644 index 000000000..7146462fb --- /dev/null +++ b/parser/testdata/02249_parse_date_time_basic/query.sql @@ -0,0 +1,10 @@ +SET date_time_output_format='iso'; +drop table if exists t; +CREATE TABLE t (a DateTime('UTC'), b String, c String, d String, e Int32) ENGINE = Memory; +INSERT INTO t(a, b, c, d ,e) VALUES ('2022-03-31','','','',1); +INSERT INTO t(a, b, c, d ,e) VALUES (1648804224,'','','',2); +INSERT INTO t(a, b, c, d ,e) VALUES ('2022-03-31 10:18:56','','','',3); +INSERT INTO t(a, b, c, d ,e) VALUES ('2022-03-31T10:18:56','','','',4); +INSERT INTO t(a, b, c, d ,e) VALUES ('1648804224','','','',5); +select a, e from t order by e; +drop table if exists t; diff --git a/parser/testdata/02250_insert_select_from_file_schema_inference/ast.json b/parser/testdata/02250_insert_select_from_file_schema_inference/ast.json new file mode 100644 index 000000000..63c4f87b7 --- /dev/null +++ b/parser/testdata/02250_insert_select_from_file_schema_inference/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00114279, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02250_insert_select_from_file_schema_inference/metadata.json b/parser/testdata/02250_insert_select_from_file_schema_inference/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02250_insert_select_from_file_schema_inference/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02250_insert_select_from_file_schema_inference/query.sql b/parser/testdata/02250_insert_select_from_file_schema_inference/query.sql new file mode 100644 index 000000000..e88cf85e0 --- /dev/null +++ b/parser/testdata/02250_insert_select_from_file_schema_inference/query.sql @@ -0,0 +1,10 @@ +set use_structure_from_insertion_table_in_table_functions = 1; + +insert into table function file(concat(database(),'.data_02250.jsonl')) select (SELECT 1) settings engine_file_truncate_on_insert=1; + +insert into table function file(concat(database(),'.data_02250.jsonl')) select NULL as x settings engine_file_truncate_on_insert=1; +drop table if exists test_02250; +create table test_02250 (x Nullable(UInt32)) engine=Memory(); +insert into test_02250 select * from file(concat(database(),'.data_02250.jsonl')); +select * from test_02250; +drop table test_02250; diff --git a/parser/testdata/02251_alter_enum_nested_struct/ast.json b/parser/testdata/02251_alter_enum_nested_struct/ast.json new file mode 100644 index 000000000..635d6c785 --- /dev/null +++ b/parser/testdata/02251_alter_enum_nested_struct/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery alter_enum_array (children 1)" + }, + { + "explain": " Identifier alter_enum_array" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001287827, + "rows_read": 2, + "bytes_read": 84 + } +} diff --git a/parser/testdata/02251_alter_enum_nested_struct/metadata.json b/parser/testdata/02251_alter_enum_nested_struct/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02251_alter_enum_nested_struct/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02251_alter_enum_nested_struct/query.sql b/parser/testdata/02251_alter_enum_nested_struct/query.sql new file mode 100644 index 000000000..aaef05cd3 --- /dev/null +++ b/parser/testdata/02251_alter_enum_nested_struct/query.sql @@ -0,0 +1,27 @@ +DROP TABLE IF EXISTS alter_enum_array; + +CREATE TABLE alter_enum_array( + Key UInt64, + Value Array(Enum8('Option1'=1, 'Option2'=2)) +) +ENGINE=MergeTree() +ORDER BY tuple(); + +INSERT INTO alter_enum_array VALUES (1, ['Option2', 'Option1']), (2, ['Option1']); + +ALTER TABLE alter_enum_array MODIFY COLUMN Value Array(Enum8('Option1'=1, 'Option2'=2, 'Option3'=3)) SETTINGS mutations_sync=2; + +INSERT INTO alter_enum_array VALUES (3, ['Option1','Option3']); + +SELECT * FROM alter_enum_array ORDER BY Key; + +DETACH TABLE alter_enum_array; +ATTACH TABLE alter_enum_array; + +SELECT * FROM alter_enum_array ORDER BY Key; + +OPTIMIZE TABLE alter_enum_array FINAL; + +SELECT COUNT() FROM system.mutations where table='alter_enum_array' and database=currentDatabase() and not is_done; + +DROP TABLE IF EXISTS alter_enum_array; diff --git a/parser/testdata/02251_last_day_of_month/ast.json b/parser/testdata/02251_last_day_of_month/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02251_last_day_of_month/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02251_last_day_of_month/metadata.json b/parser/testdata/02251_last_day_of_month/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02251_last_day_of_month/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02251_last_day_of_month/query.sql b/parser/testdata/02251_last_day_of_month/query.sql new file mode 100644 index 000000000..dc7a076c0 --- /dev/null +++ b/parser/testdata/02251_last_day_of_month/query.sql @@ -0,0 +1,55 @@ +-- month with 30 days +WITH + toDate('2021-09-12') AS date_value, + toDateTime('2021-09-12 11:22:33') AS date_time_value, + toDateTime64('2021-09-12 11:22:33', 3) AS date_time_64_value +SELECT toLastDayOfMonth(date_value), toLastDayOfMonth(date_time_value), toLastDayOfMonth(date_time_64_value); + +-- month with 31 days +WITH + toDate('2021-03-12') AS date_value, + toDateTime('2021-03-12 11:22:33') AS date_time_value, + toDateTime64('2021-03-12 11:22:33', 3) AS date_time_64_value +SELECT toLastDayOfMonth(date_value), toLastDayOfMonth(date_time_value), toLastDayOfMonth(date_time_64_value); + +-- non leap year February +WITH + toDate('2021-02-12') AS date_value, + toDateTime('2021-02-12 11:22:33') AS date_time_value, + toDateTime64('2021-02-12 11:22:33', 3) AS date_time_64_value +SELECT toLastDayOfMonth(date_value), toLastDayOfMonth(date_time_value), toLastDayOfMonth(date_time_64_value); + +-- leap year February +WITH + toDate('2020-02-12') AS date_value, + toDateTime('2020-02-12 11:22:33') AS date_time_value, + toDateTime64('2020-02-12 11:22:33', 3) AS date_time_64_value +SELECT toLastDayOfMonth(date_value), toLastDayOfMonth(date_time_value), toLastDayOfMonth(date_time_64_value); + +-- December 31 for non-leap year +WITH + toDate('2021-12-12') AS date_value, + toDateTime('2021-12-12 11:22:33') AS date_time_value, + toDateTime64('2021-12-12 11:22:33', 3) AS date_time_64_value +SELECT toLastDayOfMonth(date_value), toLastDayOfMonth(date_time_value), toLastDayOfMonth(date_time_64_value); + +-- December 31 for leap year +WITH + toDate('2020-12-12') AS date_value, + toDateTime('2020-12-12 11:22:33') AS date_time_value, + toDateTime64('2020-12-12 11:22:33', 3) AS date_time_64_value +SELECT toLastDayOfMonth(date_value), toLastDayOfMonth(date_time_value), toLastDayOfMonth(date_time_64_value); + +-- aliases +WITH + toDate('2020-12-12') AS date_value +SELECT last_day(date_value), LAST_DAY(date_value); + +-- boundaries +WITH + toDate('1970-01-01') AS date_value, + toDateTime('1970-01-01 11:22:33') AS date_time_value, + toDateTime64('1900-01-01 11:22:33', 3) AS date_time_64_value +SELECT toLastDayOfMonth(date_value), toLastDayOfMonth(date_time_value), toLastDayOfMonth(date_time_64_value) +SETTINGS enable_extended_results_for_datetime_functions = true; + diff --git a/parser/testdata/02252_executable_user_defined_function_short_circuit/ast.json b/parser/testdata/02252_executable_user_defined_function_short_circuit/ast.json new file mode 100644 index 000000000..3b7691c84 --- /dev/null +++ b/parser/testdata/02252_executable_user_defined_function_short_circuit/ast.json @@ -0,0 +1,94 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Function and (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function greater (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_15" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function test_function (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_4" + } + ], + + "rows": 24, + + "statistics": + { + "elapsed": 0.001264953, + "rows_read": 24, + "bytes_read": 927 + } +} diff --git a/parser/testdata/02252_executable_user_defined_function_short_circuit/metadata.json b/parser/testdata/02252_executable_user_defined_function_short_circuit/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02252_executable_user_defined_function_short_circuit/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02252_executable_user_defined_function_short_circuit/query.sql b/parser/testdata/02252_executable_user_defined_function_short_circuit/query.sql new file mode 100644 index 000000000..fc7fb3974 --- /dev/null +++ b/parser/testdata/02252_executable_user_defined_function_short_circuit/query.sql @@ -0,0 +1,10 @@ +SELECT number FROM numbers(10) WHERE number > 15 and test_function(number, number) == 4; + +SYSTEM FLUSH LOGS query_log; + +SELECT ProfileEvents['ExecuteShellCommand'] FROM system.query_log WHERE + current_database = currentDatabase() + AND type = 'QueryFinish' + AND query == 'SELECT number FROM numbers(10) WHERE number > 15 and test_function(number, number) == 4;' + AND event_date >= yesterday() AND event_time > now() - interval 10 minute + LIMIT 1; diff --git a/parser/testdata/02252_jit_profile_events/ast.json b/parser/testdata/02252_jit_profile_events/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02252_jit_profile_events/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02252_jit_profile_events/metadata.json b/parser/testdata/02252_jit_profile_events/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02252_jit_profile_events/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02252_jit_profile_events/query.sql b/parser/testdata/02252_jit_profile_events/query.sql new file mode 100644 index 000000000..794a57b23 --- /dev/null +++ b/parser/testdata/02252_jit_profile_events/query.sql @@ -0,0 +1,31 @@ +-- Tags: no-fasttest, no-parallel, no-msan + +SET compile_expressions = 1; +SET min_count_to_compile_expression = 0; + +SYSTEM DROP COMPILED EXPRESSION CACHE; + +SELECT number + number + number FROM numbers(1); + +SYSTEM FLUSH LOGS query_log; + +SELECT ProfileEvents['CompileFunction'] FROM system.query_log WHERE + current_database = currentDatabase() + AND type = 'QueryFinish' + AND query == 'SELECT number + number + number FROM numbers(1);' + AND event_date >= yesterday() AND event_time > now() - interval 10 minute + LIMIT 1; + +SET compile_aggregate_expressions = 1; +SET min_count_to_compile_aggregate_expression = 0; + +SELECT avg(number), avg(number + 1), avg(number + 2) FROM numbers(1) GROUP BY number; + +SYSTEM FLUSH LOGS query_log; + +SELECT ProfileEvents['CompileFunction'] FROM system.query_log WHERE + current_database = currentDatabase() + AND type = 'QueryFinish' + AND query == 'SELECT avg(number), avg(number + 1), avg(number + 2) FROM numbers(1) GROUP BY number;' + AND event_date >= yesterday() AND event_time > now() - interval 10 minute + LIMIT 1; diff --git a/parser/testdata/02252_reset_non_existing_setting/ast.json b/parser/testdata/02252_reset_non_existing_setting/ast.json new file mode 100644 index 000000000..5bd1d6a02 --- /dev/null +++ b/parser/testdata/02252_reset_non_existing_setting/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery most_ordinary_mt (children 1)" + }, + { + "explain": " Identifier most_ordinary_mt" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001189514, + "rows_read": 2, + "bytes_read": 84 + } +} diff --git a/parser/testdata/02252_reset_non_existing_setting/metadata.json b/parser/testdata/02252_reset_non_existing_setting/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02252_reset_non_existing_setting/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02252_reset_non_existing_setting/query.sql b/parser/testdata/02252_reset_non_existing_setting/query.sql new file mode 100644 index 000000000..47865c7bb --- /dev/null +++ b/parser/testdata/02252_reset_non_existing_setting/query.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS most_ordinary_mt; + +CREATE TABLE most_ordinary_mt +( + Key UInt64 +) +ENGINE = MergeTree() +ORDER BY tuple(); + +ALTER TABLE most_ordinary_mt RESET SETTING ttl; --{serverError BAD_ARGUMENTS} +ALTER TABLE most_ordinary_mt RESET SETTING allow_remote_fs_zero_copy_replication, xxx; --{serverError BAD_ARGUMENTS} + +DROP TABLE IF EXISTS most_ordinary_mt; diff --git a/parser/testdata/02264_format_insert_compression/ast.json b/parser/testdata/02264_format_insert_compression/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02264_format_insert_compression/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02264_format_insert_compression/metadata.json b/parser/testdata/02264_format_insert_compression/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02264_format_insert_compression/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02264_format_insert_compression/query.sql b/parser/testdata/02264_format_insert_compression/query.sql new file mode 100644 index 000000000..c095a8fbb --- /dev/null +++ b/parser/testdata/02264_format_insert_compression/query.sql @@ -0,0 +1,2 @@ +-- { echo } +EXPLAIN SYNTAX INSERT INTO foo FROM INFILE '/dev/null' COMPRESSION 'gz'; diff --git a/parser/testdata/02264_format_insert_infile/ast.json b/parser/testdata/02264_format_insert_infile/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02264_format_insert_infile/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02264_format_insert_infile/metadata.json b/parser/testdata/02264_format_insert_infile/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02264_format_insert_infile/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02264_format_insert_infile/query.sql b/parser/testdata/02264_format_insert_infile/query.sql new file mode 100644 index 000000000..38ee39d93 --- /dev/null +++ b/parser/testdata/02264_format_insert_infile/query.sql @@ -0,0 +1,2 @@ +-- { echo } +EXPLAIN SYNTAX INSERT INTO foo FROM INFILE '/dev/null'; diff --git a/parser/testdata/02265_column_ttl/ast.json b/parser/testdata/02265_column_ttl/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02265_column_ttl/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02265_column_ttl/metadata.json b/parser/testdata/02265_column_ttl/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02265_column_ttl/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02265_column_ttl/query.sql b/parser/testdata/02265_column_ttl/query.sql new file mode 100644 index 000000000..e7c26cb6c --- /dev/null +++ b/parser/testdata/02265_column_ttl/query.sql @@ -0,0 +1,41 @@ +-- Tags: replica, long + +-- Regression test for possible CHECKSUM_DOESNT_MATCH due to per-column TTL bug. +-- That had been fixed in https://github.com/ClickHouse/ClickHouse/pull/35820 + +drop table if exists ttl_02265; +drop table if exists ttl_02265_r2; + +-- The bug is appears only for Wide part. +create table ttl_02265 (date Date, key Int, value String TTL date + interval 1 month) engine=ReplicatedMergeTree('/clickhouse/tables/{database}/ttl_02265', 'r1') order by key partition by date settings min_bytes_for_wide_part=0, min_bytes_for_full_part_storage=0; +create table ttl_02265_r2 (date Date, key Int, value String TTL date + interval 1 month) engine=ReplicatedMergeTree('/clickhouse/tables/{database}/ttl_02265', 'r2') order by key partition by date settings min_bytes_for_wide_part=0, min_bytes_for_full_part_storage=0; + +-- after, 20100101_0_0_0 will have ttl.txt and value.bin +insert into ttl_02265 values ('2010-01-01', 2010, 'foo'); +-- after, 20100101_0_0_1 will not have neither ttl.txt nor value.bin +optimize table ttl_02265 final; +-- after, 20100101_0_0_2 will not have ttl.txt, but will have value.bin +optimize table ttl_02265 final; +system sync replica ttl_02265 STRICT; +system sync replica ttl_02265_r2 STRICT; + +-- after detach/attach it will not have TTL in-memory, and will not have ttl.txt +detach table ttl_02265; +attach table ttl_02265; + +-- So now the state for 20100101_0_0_2 is as follow: +-- +-- table | in_memory_ttl | ttl.txt | value.bin/mrk2 +-- ttl_02265 | N | N | N +-- ttl_02265_r2 | Y | N | N +-- +-- And hence on the replica that does not have TTL in-memory (this replica), +-- it will try to apply TTL, and the column will be dropped, +-- but on another replica the column won't be dropped since it has in-memory TTL and will not apply TTL. +-- and eventually this will lead to the following error: +-- +-- MergeFromLogEntryTask: Code: 40. DB::Exception: Part 20100101_0_0_3 from r2 has different columns hash. (CHECKSUM_DOESNT_MATCH) (version 22.4.1.1). Data after merge is not byte-identical to data on another replicas. There could be several reasons: 1. Using newer version of compression library after server update. 2. Using another compression method. 3. Non-deterministic compression algorithm (highly unlikely). 4. Non-deterministic merge algorithm due to logical error in code. 5. Data corruption in memory due to bug in code. 6. Data corruption in memory due to hardware issue. 7. Manual modification of source data after server startup. 8. Manual modification of checksums stored in ZooKeeper. 9. Part format related settings like 'enable_mixed_granularity_parts' are different on different replicas. We will download merged part from replica to force byte-identical result. +-- +optimize table ttl_02265 final; +system flush logs part_log; +select * from system.part_log where database = currentDatabase() and table like 'ttl_02265%' and error != 0 and errorCodeToName(error) != 'NO_REPLICA_HAS_PART'; diff --git a/parser/testdata/02265_cross_join_empty_list/ast.json b/parser/testdata/02265_cross_join_empty_list/ast.json new file mode 100644 index 000000000..dae5410de --- /dev/null +++ b/parser/testdata/02265_cross_join_empty_list/ast.json @@ -0,0 +1,97 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function count (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " TablesInSelectQuery (children 3)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (alias n1) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " TablesInSelectQueryElement (children 2)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (alias n2) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " TableJoin" + }, + { + "explain": " TablesInSelectQueryElement (children 2)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (alias n3) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_4" + }, + { + "explain": " TableJoin" + } + ], + + "rows": 25, + + "statistics": + { + "elapsed": 0.001157021, + "rows_read": 25, + "bytes_read": 1005 + } +} diff --git a/parser/testdata/02265_cross_join_empty_list/metadata.json b/parser/testdata/02265_cross_join_empty_list/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02265_cross_join_empty_list/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02265_cross_join_empty_list/query.sql b/parser/testdata/02265_cross_join_empty_list/query.sql new file mode 100644 index 000000000..346a04735 --- /dev/null +++ b/parser/testdata/02265_cross_join_empty_list/query.sql @@ -0,0 +1,6 @@ +SELECT count(1) FROM numbers(2) AS n1, numbers(3) AS n2, numbers(4) AS n3; +SELECT count(*) FROM numbers(2) AS n1, numbers(3) AS n2, numbers(4) AS n3; +SELECT count() FROM numbers(2) AS n1, numbers(3) AS n2, numbers(4) AS n3; +SELECT count(n1.number), count(n2.number), count(n3.number) FROM numbers(2) AS n1, numbers(3) AS n2, numbers(4) AS n3; +SELECT * FROM numbers(2) AS n1, numbers(3) AS n2, numbers(4) AS n3 ORDER BY n1.number, n2.number, n3.number; +SELECT n1.number, n2.number, n3.number FROM numbers(2) AS n1, numbers(3) AS n2, numbers(4) AS n3 ORDER BY n1.number, n2.number, n3.number; diff --git a/parser/testdata/02265_limit_push_down_over_window_functions_bug/ast.json b/parser/testdata/02265_limit_push_down_over_window_functions_bug/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02265_limit_push_down_over_window_functions_bug/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02265_limit_push_down_over_window_functions_bug/metadata.json b/parser/testdata/02265_limit_push_down_over_window_functions_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02265_limit_push_down_over_window_functions_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02265_limit_push_down_over_window_functions_bug/query.sql b/parser/testdata/02265_limit_push_down_over_window_functions_bug/query.sql new file mode 100644 index 000000000..208ec8ef1 --- /dev/null +++ b/parser/testdata/02265_limit_push_down_over_window_functions_bug/query.sql @@ -0,0 +1,16 @@ +SELECT + number, + leadInFrame(number) OVER w AS W +FROM numbers(10) +WINDOW w AS (ORDER BY number ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) +LIMIT 3; + +WITH arrayJoin(['a', 'a', 'b', 'b']) AS field +SELECT + field, + count() OVER (PARTITION BY field) +ORDER BY field ASC +LIMIT 1; + +select * from ( ( select *, count() over () cnt from ( select * from numbers(10000000) ) ) ) limit 3 ; +select * from ( ( select *, count() over () cnt from ( select * from numbers(10000000) ) ) ) order by number limit 3 ; diff --git a/parser/testdata/02265_per_table_ttl_mutation_on_change/ast.json b/parser/testdata/02265_per_table_ttl_mutation_on_change/ast.json new file mode 100644 index 000000000..af79d9a0b --- /dev/null +++ b/parser/testdata/02265_per_table_ttl_mutation_on_change/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery per_table_ttl_02265 (children 1)" + }, + { + "explain": " Identifier per_table_ttl_02265" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001277084, + "rows_read": 2, + "bytes_read": 90 + } +} diff --git a/parser/testdata/02265_per_table_ttl_mutation_on_change/metadata.json b/parser/testdata/02265_per_table_ttl_mutation_on_change/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02265_per_table_ttl_mutation_on_change/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02265_per_table_ttl_mutation_on_change/query.sql b/parser/testdata/02265_per_table_ttl_mutation_on_change/query.sql new file mode 100644 index 000000000..53e2e7222 --- /dev/null +++ b/parser/testdata/02265_per_table_ttl_mutation_on_change/query.sql @@ -0,0 +1,22 @@ +drop table if exists per_table_ttl_02265; +create table per_table_ttl_02265 (key Int, date Date, value String) engine=MergeTree() order by key; +insert into per_table_ttl_02265 values (1, today(), '1'); + +-- { echoOn } +alter table per_table_ttl_02265 modify TTL date + interval 1 month; +select count() from system.mutations where database = currentDatabase() and table = 'per_table_ttl_02265'; +alter table per_table_ttl_02265 modify TTL date + interval 1 month; +select count() from system.mutations where database = currentDatabase() and table = 'per_table_ttl_02265'; +alter table per_table_ttl_02265 modify TTL date + interval 2 month; +select count() from system.mutations where database = currentDatabase() and table = 'per_table_ttl_02265'; +alter table per_table_ttl_02265 modify TTL date + interval 2 month group by key set value = argMax(value, date); +select count() from system.mutations where database = currentDatabase() and table = 'per_table_ttl_02265'; +alter table per_table_ttl_02265 modify TTL date + interval 2 month group by key set value = argMax(value, date); +select count() from system.mutations where database = currentDatabase() and table = 'per_table_ttl_02265'; +alter table per_table_ttl_02265 modify TTL date + interval 2 month recompress codec(ZSTD(17)); +select count() from system.mutations where database = currentDatabase() and table = 'per_table_ttl_02265'; +alter table per_table_ttl_02265 modify TTL date + interval 2 month recompress codec(ZSTD(17)); +select count() from system.mutations where database = currentDatabase() and table = 'per_table_ttl_02265'; + +-- { echoOff } +drop table per_table_ttl_02265; diff --git a/parser/testdata/02265_rename_join_ordinary_to_atomic/ast.json b/parser/testdata/02265_rename_join_ordinary_to_atomic/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02265_rename_join_ordinary_to_atomic/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02265_rename_join_ordinary_to_atomic/metadata.json b/parser/testdata/02265_rename_join_ordinary_to_atomic/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02265_rename_join_ordinary_to_atomic/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02265_rename_join_ordinary_to_atomic/query.sql b/parser/testdata/02265_rename_join_ordinary_to_atomic/query.sql new file mode 100644 index 000000000..235fc86f8 --- /dev/null +++ b/parser/testdata/02265_rename_join_ordinary_to_atomic/query.sql @@ -0,0 +1,20 @@ +-- Tags: no-parallel + +SET send_logs_level = 'fatal'; + +set allow_deprecated_database_ordinary=1; +DROP DATABASE IF EXISTS 02265_atomic_db; +DROP DATABASE IF EXISTS 02265_ordinary_db; + +CREATE DATABASE 02265_atomic_db ENGINE = Atomic; +CREATE DATABASE 02265_ordinary_db ENGINE = Ordinary; + +CREATE TABLE 02265_ordinary_db.join_table ( `a` Int64 ) ENGINE = Join(`ALL`, LEFT, a); +INSERT INTO 02265_ordinary_db.join_table VALUES (111); + +RENAME TABLE 02265_ordinary_db.join_table TO 02265_atomic_db.join_table; + +SELECT * FROM 02265_atomic_db.join_table; + +DROP DATABASE IF EXISTS 02265_atomic_db; +DROP DATABASE IF EXISTS 02265_ordinary_db; diff --git a/parser/testdata/02266_auto_add_nullable/ast.json b/parser/testdata/02266_auto_add_nullable/ast.json new file mode 100644 index 000000000..fd15806a4 --- /dev/null +++ b/parser/testdata/02266_auto_add_nullable/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001108225, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02266_auto_add_nullable/metadata.json b/parser/testdata/02266_auto_add_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02266_auto_add_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02266_auto_add_nullable/query.sql b/parser/testdata/02266_auto_add_nullable/query.sql new file mode 100644 index 000000000..7a9c6fbb1 --- /dev/null +++ b/parser/testdata/02266_auto_add_nullable/query.sql @@ -0,0 +1,17 @@ +SET allow_suspicious_low_cardinality_types = 1; +DROP TABLE IF EXISTS 02266_auto_add_nullable; + +CREATE TABLE 02266_auto_add_nullable +( + val0 Int8 DEFAULT NULL, + val1 Nullable(Int8) DEFAULT NULL, + val2 UInt8 DEFAULT NUll, + val3 String DEFAULT null, + val4 LowCardinality(Int8) DEFAULT NULL, + val5 LowCardinality(Nullable(Int8)) DEFAULT NULL +) +ENGINE = MergeTree order by tuple(); + +DESCRIBE TABLE 02266_auto_add_nullable; + +DROP TABLE IF EXISTS 02266_auto_add_nullable; \ No newline at end of file diff --git a/parser/testdata/02267_empty_arrays_read_reverse/ast.json b/parser/testdata/02267_empty_arrays_read_reverse/ast.json new file mode 100644 index 000000000..5d9a8ebf2 --- /dev/null +++ b/parser/testdata/02267_empty_arrays_read_reverse/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_02267 (children 1)" + }, + { + "explain": " Identifier t_02267" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001371207, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/02267_empty_arrays_read_reverse/metadata.json b/parser/testdata/02267_empty_arrays_read_reverse/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02267_empty_arrays_read_reverse/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02267_empty_arrays_read_reverse/query.sql b/parser/testdata/02267_empty_arrays_read_reverse/query.sql new file mode 100644 index 000000000..0c6c1a46e --- /dev/null +++ b/parser/testdata/02267_empty_arrays_read_reverse/query.sql @@ -0,0 +1,22 @@ +DROP TABLE IF EXISTS t_02267; + +CREATE TABLE t_02267 +( + a Array(String), + b UInt32, + c Array(String) +) +ENGINE = MergeTree +ORDER BY b +SETTINGS index_granularity = 500, index_granularity_bytes = '10Mi'; + +INSERT INTO t_02267 (b, a, c) SELECT 0, ['x'], ['1','2','3','4','5','6'] FROM numbers(1) ; +INSERT INTO t_02267 (b, a, c) SELECT 1, [], ['1','2','3','4','5','6'] FROM numbers(300000); + +OPTIMIZE TABLE t_02267 FINAL; + +SELECT * FROM t_02267 WHERE hasAll(a, ['x']) +ORDER BY b DESC +SETTINGS max_threads=1, max_block_size=1000; + +DROP TABLE IF EXISTS t_02267; diff --git a/parser/testdata/02267_insert_empty_data/ast.json b/parser/testdata/02267_insert_empty_data/ast.json new file mode 100644 index 000000000..a21a0fe25 --- /dev/null +++ b/parser/testdata/02267_insert_empty_data/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001294121, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/02267_insert_empty_data/metadata.json b/parser/testdata/02267_insert_empty_data/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02267_insert_empty_data/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02267_insert_empty_data/query.sql b/parser/testdata/02267_insert_empty_data/query.sql new file mode 100644 index 000000000..b39bd8078 --- /dev/null +++ b/parser/testdata/02267_insert_empty_data/query.sql @@ -0,0 +1,11 @@ +DROP TABLE IF EXISTS t; + +CREATE TABLE t (n UInt32) ENGINE=Memory; + +INSERT INTO t VALUES; -- { clientError NO_DATA_TO_INSERT } + +set throw_if_no_data_to_insert = 0; + +INSERT INTO t VALUES; + +DROP TABLE t; diff --git a/parser/testdata/02267_join_dup_columns_issue36199/ast.json b/parser/testdata/02267_join_dup_columns_issue36199/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02267_join_dup_columns_issue36199/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02267_join_dup_columns_issue36199/metadata.json b/parser/testdata/02267_join_dup_columns_issue36199/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02267_join_dup_columns_issue36199/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02267_join_dup_columns_issue36199/query.sql b/parser/testdata/02267_join_dup_columns_issue36199/query.sql new file mode 100644 index 000000000..ecb219823 --- /dev/null +++ b/parser/testdata/02267_join_dup_columns_issue36199/query.sql @@ -0,0 +1,35 @@ +SET enable_analyzer = 0; + +SET join_algorithm = 'hash'; + +SELECT * FROM ( SELECT 2 AS x ) AS t1 RIGHT JOIN ( SELECT count('x'), count('y'), 2 AS x ) AS t2 ON t1.x = t2.x; +SELECT * FROM ( SELECT 2 AS x ) as t1 RIGHT JOIN ( SELECT count('x'), count('y'), 0 AS x ) AS t2 ON t1.x = t2.x; +SELECT * FROM ( SELECT 2 AS x ) as t1 RIGHT JOIN ( SELECT count('x') :: Nullable(Int32), count('y'), 0 AS x ) AS t2 ON t1.x = t2.x; +SELECT * FROM ( SELECT 2 AS x ) as t1 RIGHT JOIN ( SELECT count('x') :: Nullable(Int32), count('y') :: Nullable(Int32), 0 AS x ) AS t2 ON t1.x = t2.x; +SELECT * FROM ( SELECT count('a'), count('b'), count('c'), 2 AS x ) as t1 RIGHT JOIN ( SELECT count('x'), count('y'), 0 AS x ) AS t2 ON t1.x = t2.x; + +SELECT 'y', * FROM (SELECT count('y'), count('y'), 2 AS x) AS t1 RIGHT JOIN (SELECT count('x'), count('y'), 3 AS x) AS t2 ON t1.x = t2.x; +SELECT * FROM (SELECT arrayJoin([NULL]), 9223372036854775806, arrayJoin([NULL]), NULL AS x) AS t1 RIGHT JOIN (SELECT arrayJoin([arrayJoin([10000000000.])]), NULL AS x) AS t2 ON t1.x = t2.x; + +SET join_algorithm = 'partial_merge'; + +SELECT * FROM ( SELECT 2 AS x ) AS t1 RIGHT JOIN ( SELECT count('x'), count('y'), 2 AS x ) AS t2 ON t1.x = t2.x; +SELECT * FROM ( SELECT 2 AS x ) as t1 RIGHT JOIN ( SELECT count('x'), count('y'), 0 AS x ) AS t2 ON t1.x = t2.x; +SELECT * FROM ( SELECT 2 AS x ) as t1 RIGHT JOIN ( SELECT count('x') :: Nullable(Int32), count('y'), 0 AS x ) AS t2 ON t1.x = t2.x; +SELECT * FROM ( SELECT 2 AS x ) as t1 RIGHT JOIN ( SELECT count('x') :: Nullable(Int32), count('y') :: Nullable(Int32), 0 AS x ) AS t2 ON t1.x = t2.x; +SELECT * FROM ( SELECT count('a'), count('b'), count('c'), 2 AS x ) as t1 RIGHT JOIN ( SELECT count('x'), count('y'), 0 AS x ) AS t2 ON t1.x = t2.x; + +SELECT 'y', * FROM (SELECT count('y'), count('y'), 2 AS x) AS t1 RIGHT JOIN (SELECT count('x'), count('y'), 3 AS x) AS t2 ON t1.x = t2.x; +SELECT * FROM (SELECT arrayJoin([NULL]), 9223372036854775806, arrayJoin([NULL]), NULL AS x) AS t1 RIGHT JOIN (SELECT arrayJoin([arrayJoin([10000000000.])]), NULL AS x) AS t2 ON t1.x = t2.x; + +SET enable_analyzer = 1; +SET join_algorithm = 'hash'; + +SELECT * FROM ( SELECT 2 AS x ) AS t1 RIGHT JOIN ( SELECT count('x'), count('y'), 2 AS x ) AS t2 ON t1.x = t2.x; +SELECT * FROM ( SELECT 2 AS x ) as t1 RIGHT JOIN ( SELECT count('x'), count('y'), 0 AS x ) AS t2 ON t1.x = t2.x; +SELECT * FROM ( SELECT 2 AS x ) as t1 RIGHT JOIN ( SELECT count('x') :: Nullable(Int32), count('y'), 0 AS x ) AS t2 ON t1.x = t2.x; +SELECT * FROM ( SELECT 2 AS x ) as t1 RIGHT JOIN ( SELECT count('x') :: Nullable(Int32), count('y') :: Nullable(Int32), 0 AS x ) AS t2 ON t1.x = t2.x; +SELECT * FROM ( SELECT count('a'), count('b'), count('c'), 2 AS x ) as t1 RIGHT JOIN ( SELECT count('x'), count('y'), 0 AS x ) AS t2 ON t1.x = t2.x; + +SELECT 'y', * FROM (SELECT count('y'), count('y'), 2 AS x) AS t1 RIGHT JOIN (SELECT count('x'), count('y'), 3 AS x) AS t2 ON t1.x = t2.x; +SELECT * FROM (SELECT arrayJoin([NULL]), 9223372036854775806, arrayJoin([NULL]), NULL AS x) AS t1 RIGHT JOIN (SELECT arrayJoin([arrayJoin([10000000000.])]), NULL AS x) AS t2 ON t1.x = t2.x; diff --git a/parser/testdata/02267_jsonlines_ndjson_format/ast.json b/parser/testdata/02267_jsonlines_ndjson_format/ast.json new file mode 100644 index 000000000..93cbd2205 --- /dev/null +++ b/parser/testdata/02267_jsonlines_ndjson_format/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Identifier JSONLines" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001280754, + "rows_read": 12, + "bytes_read": 451 + } +} diff --git a/parser/testdata/02267_jsonlines_ndjson_format/metadata.json b/parser/testdata/02267_jsonlines_ndjson_format/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02267_jsonlines_ndjson_format/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02267_jsonlines_ndjson_format/query.sql b/parser/testdata/02267_jsonlines_ndjson_format/query.sql new file mode 100644 index 000000000..5dfb17da9 --- /dev/null +++ b/parser/testdata/02267_jsonlines_ndjson_format/query.sql @@ -0,0 +1,15 @@ +SELECT * FROM numbers(10) FORMAT JSONLines; +SELECT * FROM numbers(10) FORMAT NDJSON; + +DROP TABLE IF EXISTS 02267_t; + +CREATE TABLE 02267_t (n1 UInt32, n2 UInt32) ENGINE = Memory; + +INSERT INTO 02267_t FORMAT JSONLines {"n1": 1, "n2": 2} {"n1": 3, "n2": 4} {"n1": 5, "n2": 6}; + +INSERT INTO 02267_t FORMAT NDJSON {"n1": 1, "n2": 2} {"n1": 3, "n2": 4} {"n1": 5, "n2": 6}; + +SELECT * FROM 02267_t ORDER BY n1, n2 FORMAT JSONLines; +SELECT * FROM 02267_t ORDER BY n1, n2 FORMAT NDJSON; + +DROP TABLE 02267_t; diff --git a/parser/testdata/02267_output_format_prometheus/ast.json b/parser/testdata/02267_output_format_prometheus/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02267_output_format_prometheus/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02267_output_format_prometheus/metadata.json b/parser/testdata/02267_output_format_prometheus/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02267_output_format_prometheus/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02267_output_format_prometheus/query.sql b/parser/testdata/02267_output_format_prometheus/query.sql new file mode 100644 index 000000000..c04c2226a --- /dev/null +++ b/parser/testdata/02267_output_format_prometheus/query.sql @@ -0,0 +1,165 @@ +SELECT * FROM ( + +SELECT + 'http_requests_total' AS name, + 'counter' AS type, + 'Total number of HTTP requests' AS help, + map('method', 'post', 'code', '200') AS labels, + 1027 AS value, + 1395066363000 :: Float64 AS timestamp +UNION ALL +SELECT + 'http_requests_total' AS name, + 'counter' AS type, + '' AS help, + map('method', 'post', 'code', '400') AS labels, + 3 AS value, + 1395066363000 :: Float64 AS timestamp +UNION ALL +SELECT + 'msdos_file_access_time_seconds' AS name, + '' AS type, + '' AS help, + map('path', 'C:\\DIR\\FILE.TXT', 'error', 'Cannot find file:\n"FILE.TXT"') AS labels, + 1458255915 AS value, + 0 :: Float64 AS timestamp +UNION ALL +SELECT + 'metric_without_timestamp_and_labels' AS name, + '' AS type, + '' AS help, + map() AS labels, + 12.47 AS value, + 0 :: Float64 AS timestamp +UNION ALL +SELECT + 'something_weird' AS name, + '' AS type, + '' AS help, + map('problem', 'division by zero') AS labels, + inf AS value, + -3982045 :: Float64 AS timestamp +UNION ALL +SELECT + 'http_request_duration_seconds' AS name, + 'histogram' AS type, + 'A histogram of the request duration.' AS help, + map('le', '0.05') AS labels, + 24054 AS value, + 0 :: Float64 AS timestamp +UNION ALL +SELECT + 'http_request_duration_seconds' AS name, + 'histogram' AS type, + '' AS help, + map('le', '0.1') AS labels, + 33444 AS value, + 0 :: Float64 AS timestamp +UNION ALL +SELECT + 'http_request_duration_seconds' AS name, + 'histogram' AS type, + '' AS help, + map('le', '0.2') AS labels, + 100392 AS value, + 0 :: Float64 AS timestamp +UNION ALL +SELECT + 'http_request_duration_seconds' AS name, + 'histogram' AS type, + '' AS help, + map('le', '0.5') AS labels, + 129389 AS value, + 0 :: Float64 AS timestamp +UNION ALL +SELECT + 'http_request_duration_seconds' AS name, + 'histogram' AS type, + '' AS help, + map('le', '1') AS labels, + 133988 AS value, + 0 :: Float64 AS timestamp +UNION ALL +SELECT + 'http_request_duration_seconds' AS name, + 'histogram' AS type, + '' AS help, + map('le', '+Inf') AS labels, + 144320 AS value, + 0 :: Float64 AS timestamp +UNION ALL +SELECT + 'http_request_duration_seconds' AS name, + 'histogram' AS type, + '' AS help, + map('sum', '') AS labels, + 53423 AS value, + 0 :: Float64 AS timestamp +UNION ALL +SELECT + 'rpc_duration_seconds' AS name, + 'summary' AS type, + 'A summary of the RPC duration in seconds.' AS help, + map('quantile', '0.01') AS labels, + 3102 AS value, + 0 :: Float64 AS timestamp +UNION ALL +SELECT + 'rpc_duration_seconds' AS name, + 'summary' AS type, + '' AS help, + map('quantile', '0.05') AS labels, + 3272 AS value, + 0 :: Float64 AS timestamp +UNION ALL +SELECT + 'rpc_duration_seconds' AS name, + 'summary' AS type, + '' AS help, + map('quantile', '0.5') AS labels, + 4773 AS value, + 0 :: Float64 AS timestamp +UNION ALL +SELECT + 'rpc_duration_seconds' AS name, + 'summary' AS type, + '' AS help, + map('quantile', '0.9') AS labels, + 9001 AS value, + 0 :: Float64 AS timestamp +UNION ALL +SELECT + 'rpc_duration_seconds' AS name, + 'summary' AS type, + '' AS help, + map('quantile', '0.99') AS labels, + 76656 AS value, + 0 :: Float64 AS timestamp +UNION ALL +SELECT + 'rpc_duration_seconds' AS name, + 'summary' AS type, + '' AS help, + map('count', '') AS labels, + 2693 AS value, + 0 :: Float64 AS timestamp +UNION ALL +SELECT + 'rpc_duration_seconds' AS name, + 'summary' AS type, + '' AS help, + map('sum', '') AS labels, + 1.7560473e+07 AS value, + 0 :: Float64 AS timestamp + +) ORDER BY name, value +FORMAT Prometheus; + +SELECT + 'metric' || toString(number) AS name, + number AS value, + if(number % 2 == 0, 'info ' || toString(number), NULL) AS help, + if(number % 3 == 0, 'counter', NULL) AS type, + if(number == 2, 1395066363000, NULL) AS timestamp +FROM numbers(5) +FORMAT Prometheus; diff --git a/parser/testdata/02267_special_operator_parse_alias_check/ast.json b/parser/testdata/02267_special_operator_parse_alias_check/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02267_special_operator_parse_alias_check/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02267_special_operator_parse_alias_check/metadata.json b/parser/testdata/02267_special_operator_parse_alias_check/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02267_special_operator_parse_alias_check/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02267_special_operator_parse_alias_check/query.sql b/parser/testdata/02267_special_operator_parse_alias_check/query.sql new file mode 100644 index 000000000..40e4122ad --- /dev/null +++ b/parser/testdata/02267_special_operator_parse_alias_check/query.sql @@ -0,0 +1,125 @@ +-- CAST expression + +-- cast(expr [[AS] alias_1] AS Type) + +SELECT cast('1234' AS UInt32); +SELECT cast('1234' AS lhs AS UInt32), lhs; +SELECT cast('1234' lhs AS UInt32), lhs; +SELECT cast(('1234' AS lhs) AS UInt32), lhs; +SELECT cast(('1234' AS lhs) rhs AS UInt32), rhs; +SELECT cast(('1234' AS lhs) AS rhs AS UInt32), rhs; + +-- cast(expr [[AS] alias_1], type_expr [[as] alias_2]) + +SELECT cast('1234', 'UInt32'); +SELECT cast('1234' AS lhs, 'UInt32'), lhs; +SELECT cast('1234' lhs, 'UInt32'), lhs; +SELECT cast('1234', 'UInt32' AS rhs), rhs; +SELECT cast('1234', 'UInt32' rhs), rhs; +SELECT cast('1234' AS lhs, 'UInt32' AS rhs), lhs, rhs; +SELECT cast('1234' lhs, 'UInt32' rhs), lhs, rhs; + +-- SUBSTRING expression + +-- SUBSTRING(expr FROM start) + +SELECT substring('1234' FROM 2); +SELECT substring('1234' AS lhs FROM 2), lhs; +SELECT substring('1234' lhs FROM 2), lhs; +SELECT substring('1234' FROM 2 AS rhs), rhs; +SELECT substring('1234' FROM 2 rhs), rhs; +SELECT substring('1234' AS lhs FROM 2 AS rhs), lhs, rhs; +SELECT substring('1234' lhs FROM 2 rhs), lhs, rhs; +SELECT substring(('1234' AS lhs) FROM (2 AS rhs)), lhs, rhs; + +-- SUBSTRING(expr FROM start FOR length) + +SELECT substring('1234' FROM 2 FOR 2); +SELECT substring('1234' FROM 2 FOR 2 AS lhs), lhs; +SELECT substring('1234' FROM 2 FOR 2 lhs), lhs; + +-- SUBSTRING(expr, start, length) + +SELECT substring('1234' AS arg_1, 2 AS arg_2, 3 AS arg_3), arg_1, arg_2, arg_3; +SELECT substring('1234' arg_1, 2 arg_2, 3 arg_3), arg_1, arg_2, arg_3; + +-- -- TRIM expression ([[LEADING|TRAILING|BOTH] trim_character FROM] input_string) + +SELECT trim(LEADING 'a' AS arg_1 FROM 'abca' AS arg_2), arg_1, arg_2; +SELECT trim(LEADING 'a' arg_1 FROM 'abca' arg_2), arg_1, arg_2; +SELECT trim(LEADING 'a' FROM 'abca' AS arg_2), arg_2; +SELECT trim(LEADING 'a' FROM 'abca' arg_2), arg_2; +SELECT trim(LEADING 'a' AS arg_1 FROM 'abca'), arg_1; +SELECT trim(LEADING 'a' arg_1 FROM 'abca'), arg_1; +SELECT trim(LEADING 'a' FROM 'abca'); + +SELECT trim(TRAILING 'a' AS arg_1 FROM 'abca' AS arg_2), arg_1, arg_2; +SELECT trim(TRAILING 'a' arg_1 FROM 'abca' arg_2), arg_1, arg_2; + +SELECT trim(BOTH 'a' AS arg_1 FROM 'abca' AS arg_2), arg_1, arg_2; +SELECT trim(BOTH 'a' arg_1 FROM 'abca' arg_2), arg_1, arg_2; + +-- Bug #69922 +SELECT trim(LEADING concat('') FROM 'abc'); +SELECT trim(LEADING concat('a', 'b') FROM 'abc'); + +-- EXTRACT expression + +-- EXTRACT(part FROM date) + +SELECT EXTRACT(DAY FROM toDate('2019-05-05') as arg_1), arg_1; +SELECT EXTRACT(DAY FROM toDate('2019-05-05') arg_1), arg_1; + +-- Function extract(haystack, pattern) + +SELECT extract('1234' AS arg_1, '123' AS arg_2), arg_1, arg_2; +SELECT extract('1234' arg_1, '123' arg_2), arg_1, arg_2; + +-- POSITION expression + +-- position(needle IN haystack) + +SELECT position(('123' AS arg_1) IN ('1234' AS arg_2)), arg_1, arg_2; + +-- position(haystack, needle[, start_pos]) + +SELECT position('123' AS arg_1, '1234' AS arg_2), arg_1, arg_2; +SELECT position('123' arg_1, '1234' arg_2), arg_1, arg_2; + +-- dateAdd, dateSub expressions + +-- function(unit, offset, timestamp) + +SELECT dateAdd(DAY, 1 AS arg_1, toDate('2019-05-05') AS arg_2), arg_1, arg_2; +SELECT dateAdd(DAY, 1 arg_1, toDate('2019-05-05') arg_2), arg_1, arg_2; + +-- function(offset, timestamp) + +SELECT dateAdd(DAY, 1 AS arg_1, toDate('2019-05-05') AS arg_2), arg_1, arg_2; +SELECT dateAdd(DAY, 1 arg_1, toDate('2019-05-05') arg_2), arg_1, arg_2; + +-- function(unit, offset, timestamp) + +SELECT dateSub(DAY, 1 AS arg_1, toDate('2019-05-05') AS arg_2), arg_1, arg_2; +SELECT dateSub(DAY, 1 arg_1, toDate('2019-05-05') arg_2), arg_1, arg_2; + +-- function(offset, timestamp) + +SELECT dateSub(DAY, 1 AS arg_1, toDate('2019-05-05') AS arg_2), arg_1, arg_2; +SELECT dateSub(DAY, 1 arg_1, toDate('2019-05-05') arg_2), arg_1, arg_2; + +-- dateDiff expression + +-- dateDiff(unit, startdate, enddate, [timezone]) + +SELECT dateDiff(DAY, toDate('2019-05-05') AS arg_1, toDate('2019-05-06') AS arg_2), arg_1, arg_2; +SELECT dateDiff(DAY, toDate('2019-05-05') arg_1, toDate('2019-05-06') arg_2), arg_1, arg_2; +SELECT dateDiff(DAY, toDate('2019-05-05') AS arg_1, toDate('2019-05-06') AS arg_2, 'UTC'), arg_1, arg_2; +SELECT dateDiff(DAY, toDate('2019-05-05') arg_1, toDate('2019-05-06') arg_2, 'UTC'), arg_1, arg_2; + +-- dateDiff('unit', startdate, enddate, [timezone]) + +SELECT dateDiff('DAY', toDate('2019-05-05') AS arg_1, toDate('2019-05-06') AS arg_2), arg_1, arg_2; +SELECT dateDiff('DAY', toDate('2019-05-05') arg_1, toDate('2019-05-06') arg_2), arg_1, arg_2; +SELECT dateDiff('DAY', toDate('2019-05-05') AS arg_1, toDate('2019-05-06') AS arg_2, 'UTC'), arg_1, arg_2; +SELECT dateDiff('DAY', toDate('2019-05-05') arg_1, toDate('2019-05-06') arg_2, 'UTC'), arg_1, arg_2; diff --git a/parser/testdata/02267_type_inference_for_insert_into_function_null/ast.json b/parser/testdata/02267_type_inference_for_insert_into_function_null/ast.json new file mode 100644 index 000000000..9580d435a --- /dev/null +++ b/parser/testdata/02267_type_inference_for_insert_into_function_null/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "InsertQuery (children 2)" + }, + { + "explain": " Function null (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.00148971, + "rows_read": 8, + "bytes_read": 275 + } +} diff --git a/parser/testdata/02267_type_inference_for_insert_into_function_null/metadata.json b/parser/testdata/02267_type_inference_for_insert_into_function_null/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02267_type_inference_for_insert_into_function_null/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02267_type_inference_for_insert_into_function_null/query.sql b/parser/testdata/02267_type_inference_for_insert_into_function_null/query.sql new file mode 100644 index 000000000..de8332442 --- /dev/null +++ b/parser/testdata/02267_type_inference_for_insert_into_function_null/query.sql @@ -0,0 +1,6 @@ +INSERT INTO function null() SELECT 1; +INSERT INTO function null() SELECT number FROM numbers(10); +INSERT INTO function null() SELECT number, toString(number) FROM numbers(10); +INSERT INTO function null('auto') SELECT 1; +INSERT INTO function null('auto') SELECT number FROM numbers(10); +INSERT INTO function null('auto') SELECT number, toString(number) FROM numbers(10); diff --git a/parser/testdata/02268_json_wrong_root_type_in_schema_inference/ast.json b/parser/testdata/02268_json_wrong_root_type_in_schema_inference/ast.json new file mode 100644 index 000000000..8ba567d47 --- /dev/null +++ b/parser/testdata/02268_json_wrong_root_type_in_schema_inference/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "InsertQuery (children 2)" + }, + { + "explain": " Function file (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function concat (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function currentDatabase (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Literal '_02268_data.jsonl'" + }, + { + "explain": " Literal 'TSV'" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001021975, + "rows_read": 14, + "bytes_read": 508 + } +} diff --git a/parser/testdata/02268_json_wrong_root_type_in_schema_inference/metadata.json b/parser/testdata/02268_json_wrong_root_type_in_schema_inference/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02268_json_wrong_root_type_in_schema_inference/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02268_json_wrong_root_type_in_schema_inference/query.sql b/parser/testdata/02268_json_wrong_root_type_in_schema_inference/query.sql new file mode 100644 index 000000000..1c1309824 --- /dev/null +++ b/parser/testdata/02268_json_wrong_root_type_in_schema_inference/query.sql @@ -0,0 +1,5 @@ +insert into function file(currentDatabase() || '_02268_data.jsonl', 'TSV') select 1; +select * from file(currentDatabase() || '_02268_data.jsonl'); --{serverError CANNOT_EXTRACT_TABLE_STRUCTURE} + +insert into function file(currentDatabase() || '_02268_data.jsonCompactEachRow', 'TSV') select 1; +select * from file(currentDatabase() || '_02268_data.jsonCompactEachRow'); --{serverError CANNOT_EXTRACT_TABLE_STRUCTURE} diff --git a/parser/testdata/02269_create_table_with_collation/ast.json b/parser/testdata/02269_create_table_with_collation/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02269_create_table_with_collation/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02269_create_table_with_collation/metadata.json b/parser/testdata/02269_create_table_with_collation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02269_create_table_with_collation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02269_create_table_with_collation/query.sql b/parser/testdata/02269_create_table_with_collation/query.sql new file mode 100644 index 000000000..1e6138bdf --- /dev/null +++ b/parser/testdata/02269_create_table_with_collation/query.sql @@ -0,0 +1,15 @@ +-- Tags: memory-engine + +EXPLAIN SYNTAX CREATE TABLE t (x varchar(255) COLLATE binary NOT NULL) ENGINE=Memory; + +EXPLAIN SYNTAX CREATE TABLE t (x varchar(255) COLLATE NOT NULL) ENGINE=Memory; -- {clientError SYNTAX_ERROR} +EXPLAIN SYNTAX CREATE TABLE t (x varchar(255) COLLATE NULL) ENGINE=Memory; -- {clientError SYNTAX_ERROR} +EXPLAIN SYNTAX CREATE TABLE t (x varchar(255) COLLATE something_else NOT NULL) ENGINE=Memory; -- {clientError SYNTAX_ERROR} + +SET compatibility_ignore_collation_in_create_table=false; +CREATE TABLE t_02267_collation (x varchar(255) COLLATE utf8_unicode_ci NOT NULL) ENGINE = Memory; -- {serverError NOT_IMPLEMENTED} + +SET compatibility_ignore_collation_in_create_table=true; +CREATE TABLE t_02267_collation (x varchar(255) COLLATE utf8_unicode_ci NOT NULL) ENGINE = Memory; + +DROP TABLE t_02267_collation; diff --git a/parser/testdata/02269_insert_select_with_format_without_schema_inference/ast.json b/parser/testdata/02269_insert_select_with_format_without_schema_inference/ast.json new file mode 100644 index 000000000..f58eef65a --- /dev/null +++ b/parser/testdata/02269_insert_select_with_format_without_schema_inference/ast.json @@ -0,0 +1,70 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "InsertQuery (children 3)" + }, + { + "explain": " Function file (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function concat (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function currentDatabase (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Literal '_02269_data'" + }, + { + "explain": " Literal 'RowBinary'" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Set" + }, + { + "explain": " Set" + } + ], + + "rows": 16, + + "statistics": + { + "elapsed": 0.001512423, + "rows_read": 16, + "bytes_read": 535 + } +} diff --git a/parser/testdata/02269_insert_select_with_format_without_schema_inference/metadata.json b/parser/testdata/02269_insert_select_with_format_without_schema_inference/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02269_insert_select_with_format_without_schema_inference/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02269_insert_select_with_format_without_schema_inference/query.sql b/parser/testdata/02269_insert_select_with_format_without_schema_inference/query.sql new file mode 100644 index 000000000..3c6b668c1 --- /dev/null +++ b/parser/testdata/02269_insert_select_with_format_without_schema_inference/query.sql @@ -0,0 +1,2 @@ +insert into function file(currentDatabase() || '_02269_data', 'RowBinary') select 1 settings engine_file_truncate_on_insert=1; +select * from file(currentDatabase() || '_02269_data', 'RowBinary', 'x UInt8'); diff --git a/parser/testdata/02269_to_start_of_interval_overflow/ast.json b/parser/testdata/02269_to_start_of_interval_overflow/ast.json new file mode 100644 index 000000000..2230af7ac --- /dev/null +++ b/parser/testdata/02269_to_start_of_interval_overflow/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toStartOfInterval (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDateTime64 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '\\0930-12-12 12:12:12.1234567'" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Function toIntervalNanosecond (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1024" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001433919, + "rows_read": 13, + "bytes_read": 549 + } +} diff --git a/parser/testdata/02269_to_start_of_interval_overflow/metadata.json b/parser/testdata/02269_to_start_of_interval_overflow/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02269_to_start_of_interval_overflow/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02269_to_start_of_interval_overflow/query.sql b/parser/testdata/02269_to_start_of_interval_overflow/query.sql new file mode 100644 index 000000000..a3e03c7e8 --- /dev/null +++ b/parser/testdata/02269_to_start_of_interval_overflow/query.sql @@ -0,0 +1,6 @@ +select toStartOfInterval(toDateTime64('\0930-12-12 12:12:12.1234567', 3), toIntervalNanosecond(1024)); -- {serverError DECIMAL_OVERFLOW} + +SELECT + toDateTime64(-9223372036854775808, 1048575, toIntervalNanosecond(9223372036854775806), NULL), + toStartOfInterval(toDateTime64(toIntervalNanosecond(toIntervalNanosecond(257), toDateTime64(toStartOfInterval(toDateTime64(NULL)))), '', 100), toIntervalNanosecond(toStartOfInterval(toDateTime64(toIntervalNanosecond(NULL), NULL)), -1)), + toStartOfInterval(toDateTime64('\0930-12-12 12:12:12.1234567', 3), toIntervalNanosecond(1024)); -- {serverError DECIMAL_OVERFLOW} diff --git a/parser/testdata/02270_client_name/ast.json b/parser/testdata/02270_client_name/ast.json new file mode 100644 index 000000000..b547d44a2 --- /dev/null +++ b/parser/testdata/02270_client_name/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Set" + }, + { + "explain": " Identifier Null" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001146063, + "rows_read": 7, + "bytes_read": 215 + } +} diff --git a/parser/testdata/02270_client_name/metadata.json b/parser/testdata/02270_client_name/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02270_client_name/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02270_client_name/query.sql b/parser/testdata/02270_client_name/query.sql new file mode 100644 index 000000000..31aba7e6f --- /dev/null +++ b/parser/testdata/02270_client_name/query.sql @@ -0,0 +1,3 @@ +select 1 settings log_queries=1, log_queries_min_type='QUERY_FINISH' format Null; +system flush logs query_log; +select client_name from system.query_log where current_database = currentDatabase() and query like 'select 1%' format CSV; diff --git a/parser/testdata/02271_fix_column_matcher_and_column_transformer/ast.json b/parser/testdata/02271_fix_column_matcher_and_column_transformer/ast.json new file mode 100644 index 000000000..9ddcb419b --- /dev/null +++ b/parser/testdata/02271_fix_column_matcher_and_column_transformer/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001467559, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02271_fix_column_matcher_and_column_transformer/metadata.json b/parser/testdata/02271_fix_column_matcher_and_column_transformer/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02271_fix_column_matcher_and_column_transformer/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02271_fix_column_matcher_and_column_transformer/query.sql b/parser/testdata/02271_fix_column_matcher_and_column_transformer/query.sql new file mode 100644 index 000000000..ab89d98c6 --- /dev/null +++ b/parser/testdata/02271_fix_column_matcher_and_column_transformer/query.sql @@ -0,0 +1,77 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS github_events; + +CREATE TABLE github_events +( + `file_time` DateTime, + `event_type` Enum8('CommitCommentEvent' = 1, 'CreateEvent' = 2, 'DeleteEvent' = 3, 'ForkEvent' = 4, 'GollumEvent' = 5, 'IssueCommentEvent' = 6, 'IssuesEvent' = 7, 'MemberEvent' = 8, 'PublicEvent' = 9, 'PullRequestEvent' = 10, 'PullRequestReviewCommentEvent' = 11, 'PushEvent' = 12, 'ReleaseEvent' = 13, 'SponsorshipEvent' = 14, 'WatchEvent' = 15, 'GistEvent' = 16, 'FollowEvent' = 17, 'DownloadEvent' = 18, 'PullRequestReviewEvent' = 19, 'ForkApplyEvent' = 20, 'Event' = 21, 'TeamAddEvent' = 22), + `actor_login` LowCardinality(String), + `repo_name` LowCardinality(String), + `created_at` DateTime, + `updated_at` DateTime, + `action` Enum8('none' = 0, 'created' = 1, 'added' = 2, 'edited' = 3, 'deleted' = 4, 'opened' = 5, 'closed' = 6, 'reopened' = 7, 'assigned' = 8, 'unassigned' = 9, 'labeled' = 10, 'unlabeled' = 11, 'review_requested' = 12, 'review_request_removed' = 13, 'synchronize' = 14, 'started' = 15, 'published' = 16, 'update' = 17, 'create' = 18, 'fork' = 19, 'merged' = 20), + `comment_id` UInt64, + `body` String, + `path` String, + `position` Int32, + `line` Int32, + `ref` LowCardinality(String), + `ref_type` Enum8('none' = 0, 'branch' = 1, 'tag' = 2, 'repository' = 3, 'unknown' = 4), + `creator_user_login` LowCardinality(String), + `number` UInt32, + `title` String, + `labels` Array(LowCardinality(String)), + `state` Enum8('none' = 0, 'open' = 1, 'closed' = 2), + `locked` UInt8, + `assignee` LowCardinality(String), + `assignees` Array(LowCardinality(String)), + `comments` UInt32, + `author_association` Enum8('NONE' = 0, 'CONTRIBUTOR' = 1, 'OWNER' = 2, 'COLLABORATOR' = 3, 'MEMBER' = 4, 'MANNEQUIN' = 5), + `closed_at` DateTime, + `merged_at` DateTime, + `merge_commit_sha` String, + `requested_reviewers` Array(LowCardinality(String)), + `requested_teams` Array(LowCardinality(String)), + `head_ref` LowCardinality(String), + `head_sha` String, + `base_ref` LowCardinality(String), + `base_sha` String, + `merged` UInt8, + `mergeable` UInt8, + `rebaseable` UInt8, + `mergeable_state` Enum8('unknown' = 0, 'dirty' = 1, 'clean' = 2, 'unstable' = 3, 'draft' = 4), + `merged_by` LowCardinality(String), + `review_comments` UInt32, + `maintainer_can_modify` UInt8, + `commits` UInt32, + `additions` UInt32, + `deletions` UInt32, + `changed_files` UInt32, + `diff_hunk` String, + `original_position` UInt32, + `commit_id` String, + `original_commit_id` String, + `push_size` UInt32, + `push_distinct_size` UInt32, + `member_login` LowCardinality(String), + `release_tag_name` String, + `release_name` String, + `review_state` Enum8('none' = 0, 'approved' = 1, 'changes_requested' = 2, 'commented' = 3, 'dismissed' = 4, 'pending' = 5) +) +ENGINE = MergeTree ORDER BY (event_type, repo_name, created_at); + +with + top_repos as ( select repo_name from github_events where event_type = 'WatchEvent' and toDate(created_at) = today() - 1 group by repo_name order by count() desc limit 100 union distinct select repo_name from github_events where event_type = 'WatchEvent' and toMonday(created_at) = toMonday(today() - interval 1 week) group by repo_name order by count() desc limit 100 union distinct select repo_name from github_events where event_type = 'WatchEvent' and toStartOfMonth(created_at) = toStartOfMonth(today()) - interval 1 month group by repo_name order by count() desc limit 100 union distinct select repo_name from github_events where event_type = 'WatchEvent' and toYear(created_at) = toYear(today()) - 1 group by repo_name order by count() desc limit 100 ), + last_day as ( select repo_name, count() as count_last_day, rowNumberInAllBlocks() + 1 as position_last_day from github_events where repo_name in (select repo_name from top_repos) and toDate(created_at) = today() - 1 group by repo_name order by count_last_day desc ), + last_week as ( select repo_name, count() as count_last_week, rowNumberInAllBlocks() + 1 as position_last_week from github_events where repo_name in (select repo_name from top_repos) and toMonday(created_at) = toMonday(today()) - interval 1 week group by repo_name order by count_last_week desc ), + last_month as ( select repo_name, count() as count_last_month, rowNumberInAllBlocks() + 1 as position_last_month from github_events where repo_name in (select repo_name from top_repos) and toStartOfMonth(created_at) = toStartOfMonth(today()) - interval 1 month group by repo_name order by count_last_month desc ) +select d.repo_name, columns('count') from last_day d join last_week w on d.repo_name = w.repo_name join last_month m on d.repo_name = m.repo_name; + +set allow_suspicious_low_cardinality_types=1; + +CREATE TABLE github_events__fuzz_0 (`file_time` Int64, `event_type` Enum8('CommitCommentEvent' = 1, 'CreateEvent' = 2, 'DeleteEvent' = 3, 'ForkEvent' = 4, 'GollumEvent' = 5, 'IssueCommentEvent' = 6, 'IssuesEvent' = 7, 'MemberEvent' = 8, 'PublicEvent' = 9, 'PullRequestEvent' = 10, 'PullRequestReviewCommentEvent' = 11, 'PushEvent' = 12, 'ReleaseEvent' = 13, 'SponsorshipEvent' = 14, 'WatchEvent' = 15, 'GistEvent' = 16, 'FollowEvent' = 17, 'DownloadEvent' = 18, 'PullRequestReviewEvent' = 19, 'ForkApplyEvent' = 20, 'Event' = 21, 'TeamAddEvent' = 22), `actor_login` LowCardinality(String), `repo_name` LowCardinality(Nullable(String)), `created_at` DateTime, `updated_at` DateTime, `action` Array(Enum8('none' = 0, 'created' = 1, 'added' = 2, 'edited' = 3, 'deleted' = 4, 'opened' = 5, 'closed' = 6, 'reopened' = 7, 'assigned' = 8, 'unassigned' = 9, 'labeled' = 10, 'unlabeled' = 11, 'review_requested' = 12, 'review_request_removed' = 13, 'synchronize' = 14, 'started' = 15, 'published' = 16, 'update' = 17, 'create' = 18, 'fork' = 19, 'merged' = 20)), `comment_id` UInt64, `body` String, `path` LowCardinality(String), `position` Int32, `line` Int32, `ref` String, `ref_type` Enum8('none' = 0, 'branch' = 1, 'tag' = 2, 'repository' = 3, 'unknown' = 4), `creator_user_login` Int16, `number` UInt32, `title` String, `labels` Array(Array(LowCardinality(String))), `state` Enum8('none' = 0, 'open' = 1, 'closed' = 2), `locked` UInt8, `assignee` Array(LowCardinality(String)), `assignees` Array(LowCardinality(String)), `comments` UInt32, `author_association` Array(Enum8('NONE' = 0, 'CONTRIBUTOR' = 1, 'OWNER' = 2, 'COLLABORATOR' = 3, 'MEMBER' = 4, 'MANNEQUIN' = 5)), `closed_at` UUID, `merged_at` DateTime, `merge_commit_sha` Nullable(String), `requested_reviewers` Array(LowCardinality(Int64)), `requested_teams` Array(String), `head_ref` String, `head_sha` String, `base_ref` String, `base_sha` String, `merged` Nullable(UInt8), `mergeable` Nullable(UInt8), `rebaseable` LowCardinality(UInt8), `mergeable_state` Array(Enum8('unknown' = 0, 'dirty' = 1, 'clean' = 2, 'unstable' = 3, 'draft' = 4)), `merged_by` LowCardinality(String), `review_comments` UInt32, `maintainer_can_modify` Nullable(UInt8), `commits` UInt32, `additions` Nullable(UInt32), `deletions` UInt32, `changed_files` UInt32, `diff_hunk` Nullable(String), `original_position` UInt32, `commit_id` String, `original_commit_id` String, `push_size` UInt32, `push_distinct_size` UInt32, `member_login` LowCardinality(String), `release_tag_name` LowCardinality(String), `release_name` String, `review_state` Int16) ENGINE = MergeTree ORDER BY (event_type, repo_name, created_at) settings allow_nullable_key=1; + +EXPLAIN PIPELINE header = true, compact = true WITH top_repos AS (SELECT repo_name FROM github_events__fuzz_0 WHERE (event_type = 'WatchEvent') AND (toDate(created_at) = (today() - 1)) GROUP BY repo_name ORDER BY count() DESC LIMIT 100 UNION DISTINCT SELECT repo_name FROM github_events__fuzz_0 WHERE (event_type = 'WatchEvent') AND (toMonday(created_at) = toMonday(today() - toIntervalWeek(1))) GROUP BY repo_name ORDER BY count() DESC LIMIT 100 UNION DISTINCT SELECT repo_name FROM github_events__fuzz_0 PREWHERE (event_type = 'WatchEvent') AND (toStartOfMonth(created_at) = (toStartOfMonth(today()) - toIntervalMonth(1))) GROUP BY repo_name ORDER BY count() DESC LIMIT 100 UNION DISTINCT SELECT repo_name FROM github_events WHERE (event_type = 'WatchEvent') AND (toYear(created_at) = (toYear(today()) - 1)) GROUP BY repo_name ORDER BY count() DESC LIMIT 100), last_day AS (SELECT repo_name, count() AS count_last_day, rowNumberInAllBlocks() + 1 AS position_last_day FROM github_events WHERE (repo_name IN (SELECT repo_name FROM top_repos)) AND (toDate(created_at) = (today() - 1)) GROUP BY repo_name ORDER BY count_last_day DESC), last_week AS (SELECT repo_name, count() AS count_last_week, rowNumberInAllBlocks() + 1 AS position_last_week FROM github_events WHERE (repo_name IN (SELECT repo_name FROM top_repos)) AND (toMonday(created_at) = (toMonday(today()) - toIntervalWeek(2))) GROUP BY repo_name ORDER BY count_last_week DESC), last_month AS (SELECT repo_name, count() AS count_last_month, rowNumberInAllBlocks() + 1 AS position_last_month FROM github_events__fuzz_0 WHERE ('deleted' = 4) AND in(repo_name) AND (toStartOfMonth(created_at) = (toStartOfMonth(today()) - toIntervalMonth(1))) GROUP BY repo_name ORDER BY count_last_month DESC) SELECT d.repo_name, COLUMNS(count) FROM last_day AS d INNER JOIN last_week AS w ON d.repo_name = w.repo_name INNER JOIN last_month AS m ON d.repo_name = m.repo_name format Null; -- { serverError INVALID_SETTING_VALUE } + +DROP TABLE github_events; diff --git a/parser/testdata/02271_int_sql_compatibility/ast.json b/parser/testdata/02271_int_sql_compatibility/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02271_int_sql_compatibility/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02271_int_sql_compatibility/metadata.json b/parser/testdata/02271_int_sql_compatibility/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02271_int_sql_compatibility/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02271_int_sql_compatibility/query.sql b/parser/testdata/02271_int_sql_compatibility/query.sql new file mode 100644 index 000000000..1d0f65760 --- /dev/null +++ b/parser/testdata/02271_int_sql_compatibility/query.sql @@ -0,0 +1,30 @@ +-- Tags: memory-engine +CREATE TEMPORARY TABLE t1_02271 (x INT(11)); +SHOW CREATE TEMPORARY TABLE t1_02271; + +CREATE TEMPORARY TABLE t2_02271 (x INT(11) DEFAULT 1); +SHOW CREATE TEMPORARY TABLE t2_02271; + +CREATE TEMPORARY TABLE t3_02271 (x INT(11) UNSIGNED); +SHOW CREATE TEMPORARY TABLE t3_02271; + +CREATE TEMPORARY TABLE t4_02271 (x INT(11) SIGNED); +SHOW CREATE TEMPORARY TABLE t4_02271; + +CREATE TEMPORARY TABLE t5_02271 (x INT(11) SIGNED DEFAULT 1); +SHOW CREATE TEMPORARY TABLE t5_02271; + +CREATE TEMPORARY TABLE t6_02271 (x INT()); +SHOW CREATE TEMPORARY TABLE t6_02271; + +CREATE TEMPORARY TABLE t7_02271 (x INT() DEFAULT 1); +SHOW CREATE TEMPORARY TABLE t7_02271; + +CREATE TEMPORARY TABLE t8_02271 (x INT() UNSIGNED); +SHOW CREATE TEMPORARY TABLE t8_02271; + +CREATE TEMPORARY TABLE t9_02271 (x INT() SIGNED); +SHOW CREATE TEMPORARY TABLE t9_02271; + +CREATE TEMPORARY TABLE t10_02271 (x INT() SIGNED DEFAULT 1); +SHOW CREATE TEMPORARY TABLE t10_02271; diff --git a/parser/testdata/02271_replace_partition_many_tables/ast.json b/parser/testdata/02271_replace_partition_many_tables/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02271_replace_partition_many_tables/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02271_replace_partition_many_tables/metadata.json b/parser/testdata/02271_replace_partition_many_tables/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02271_replace_partition_many_tables/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02271_replace_partition_many_tables/query.sql b/parser/testdata/02271_replace_partition_many_tables/query.sql new file mode 100644 index 000000000..062b83358 --- /dev/null +++ b/parser/testdata/02271_replace_partition_many_tables/query.sql @@ -0,0 +1,84 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS replace_partition_source; +DROP TABLE IF EXISTS replace_partition_dest1; +DROP TABLE IF EXISTS replace_partition_dest1_2; +DROP TABLE IF EXISTS replace_partition_dest2; +DROP TABLE IF EXISTS replace_partition_dest2_2; + +CREATE TABLE replace_partition_source +( + key UInt64 +) +ENGINE = ReplicatedMergeTree('/test/02271_replace_partition_many/{database}/source', '1') +PARTITION BY key +ORDER BY tuple(); + +INSERT INTO replace_partition_source VALUES (1); + +CREATE TABLE replace_partition_dest1 +( + key UInt64 +) +ENGINE = ReplicatedMergeTree('/test/02271_replace_partition_many/{database}/dest1', '1') +PARTITION BY key +ORDER BY tuple(); + +CREATE TABLE replace_partition_dest1_2 +( + key UInt64 +) +ENGINE = ReplicatedMergeTree('/test/02271_replace_partition_many/{database}/dest1', '2') +PARTITION BY key +ORDER BY tuple(); + + +CREATE TABLE replace_partition_dest2 +( + key UInt64 +) +ENGINE = ReplicatedMergeTree('/test/02271_replace_partition_many/{database}/dest2', '1') +PARTITION BY key +ORDER BY tuple(); + +CREATE TABLE replace_partition_dest2_2 +( + key UInt64 +) +ENGINE = ReplicatedMergeTree('/test/02271_replace_partition_many/{database}/dest2', '2') +PARTITION BY key +ORDER BY tuple(); + + +ALTER TABLE replace_partition_dest1 REPLACE PARTITION 1 FROM replace_partition_source; +ALTER TABLE replace_partition_dest2 REPLACE PARTITION 1 FROM replace_partition_source; + +OPTIMIZE TABLE replace_partition_source FINAL; + +SELECT sleep(3) FORMAT Null; +SELECT sleep(3) FORMAT Null; +SELECT sleep(3) FORMAT Null; +SELECT sleep(3) FORMAT Null; +SELECT sleep(3) FORMAT Null; + +OPTIMIZE TABLE replace_partition_dest1_2 FINAL; +OPTIMIZE TABLE replace_partition_dest2_2 FINAL; + +SELECT sleep(3) FORMAT Null; +SELECT sleep(3) FORMAT Null; +SELECT sleep(3) FORMAT Null; +SELECT sleep(3) FORMAT Null; +SELECT sleep(3) FORMAT Null; + +SELECT * FROM replace_partition_source; +SELECT * FROM replace_partition_dest1; +SELECT * FROM replace_partition_dest2; +SELECT * FROM replace_partition_dest1_2; +SELECT * FROM replace_partition_dest2_2; + + +--DROP TABLE IF EXISTS replace_partition_source; +--DROP TABLE IF EXISTS replace_partition_dest1; +--DROP TABLE IF EXISTS replace_partition_dest1_2; +--DROP TABLE IF EXISTS replace_partition_dest2; +--DROP TABLE IF EXISTS replace_partition_dest2_2; diff --git a/parser/testdata/02271_temporary_table_show_rows_bytes/ast.json b/parser/testdata/02271_temporary_table_show_rows_bytes/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02271_temporary_table_show_rows_bytes/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02271_temporary_table_show_rows_bytes/metadata.json b/parser/testdata/02271_temporary_table_show_rows_bytes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02271_temporary_table_show_rows_bytes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02271_temporary_table_show_rows_bytes/query.sql b/parser/testdata/02271_temporary_table_show_rows_bytes/query.sql new file mode 100644 index 000000000..33ef5feb8 --- /dev/null +++ b/parser/testdata/02271_temporary_table_show_rows_bytes/query.sql @@ -0,0 +1,5 @@ +-- Tags: memory-engine +-- NOTE: database = currentDatabase() is not mandatory + +CREATE TEMPORARY TABLE 02271_temporary_table_show_rows_bytes (A Int64) Engine=Memory as SELECT * FROM numbers(1000); +SELECT database, name, total_rows, total_bytes FROM system.tables WHERE is_temporary AND name = '02271_temporary_table_show_rows_bytes'; diff --git a/parser/testdata/02276_full_sort_join_unsupported/ast.json b/parser/testdata/02276_full_sort_join_unsupported/ast.json new file mode 100644 index 000000000..d51d31055 --- /dev/null +++ b/parser/testdata/02276_full_sort_join_unsupported/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001509309, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/02276_full_sort_join_unsupported/metadata.json b/parser/testdata/02276_full_sort_join_unsupported/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02276_full_sort_join_unsupported/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02276_full_sort_join_unsupported/query.sql b/parser/testdata/02276_full_sort_join_unsupported/query.sql new file mode 100644 index 000000000..dc2ac2cd4 --- /dev/null +++ b/parser/testdata/02276_full_sort_join_unsupported/query.sql @@ -0,0 +1,31 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE TABLE t1 (key UInt32, val UInt32) ENGINE = Memory; +INSERT INTO t1 VALUES (1, 1); + +CREATE TABLE t2 (key UInt32, val UInt32) ENGINE = Memory; +INSERT INTO t2 VALUES (1, 2); + +SET join_algorithm = 'full_sorting_merge'; + +SELECT * FROM t1 JOIN t2 ON t1.key = t2.key OR t1.val = t2.key; -- { serverError NOT_IMPLEMENTED } + +SELECT * FROM t1 ANTI JOIN t2 ON t1.key = t2.key; -- { serverError NOT_IMPLEMENTED } + +SELECT * FROM t1 SEMI JOIN t2 ON t1.key = t2.key; -- { serverError NOT_IMPLEMENTED } + +SELECT * FROM t1 ANY JOIN t2 ON t1.key = t2.key SETTINGS any_join_distinct_right_table_keys = 1; -- { serverError NOT_IMPLEMENTED } + +SELECT * FROM ( SELECT key, sum(val) AS val FROM t1 GROUP BY key WITH TOTALS ) as t1 +JOIN ( SELECT key, sum(val) AS val FROM t2 GROUP BY key WITH TOTALS ) as t2 ON t1.key = t2.key; -- { serverError NOT_IMPLEMENTED } + +SELECT * FROM t1 JOIN ( SELECT key, sum(val) AS val FROM t2 GROUP BY key WITH TOTALS ) as t2 ON t1.key = t2.key; -- { serverError NOT_IMPLEMENTED } + +SELECT * FROM ( SELECT key, sum(val) AS val FROM t1 GROUP BY key WITH TOTALS ) as t1 JOIN t2 ON t1.key = t2.key; -- { serverError NOT_IMPLEMENTED } + +SELECT * FROM t1 FULL JOIN t2 ON t1.key = t2.key AND t2.key > 0; -- { serverError NOT_IMPLEMENTED } +SELECT * FROM t1 FULL JOIN t2 ON t1.key = t2.key AND t1.key > 0; -- { serverError NOT_IMPLEMENTED } + +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; diff --git a/parser/testdata/02277_full_sort_join_misc/ast.json b/parser/testdata/02277_full_sort_join_misc/ast.json new file mode 100644 index 000000000..bebf7ee8a --- /dev/null +++ b/parser/testdata/02277_full_sort_join_misc/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.0015459, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02277_full_sort_join_misc/metadata.json b/parser/testdata/02277_full_sort_join_misc/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02277_full_sort_join_misc/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02277_full_sort_join_misc/query.sql b/parser/testdata/02277_full_sort_join_misc/query.sql new file mode 100644 index 000000000..4297f532b --- /dev/null +++ b/parser/testdata/02277_full_sort_join_misc/query.sql @@ -0,0 +1,24 @@ +SET join_algorithm = 'full_sorting_merge'; + +SELECT * FROM (SELECT 1 as key) AS t1 JOIN (SELECT 1 as key) t2 ON t1.key = t2.key ORDER BY key; + +SELECT * FROM (SELECT 1 as key) AS t1 JOIN (SELECT 1 as key) t2 USING key ORDER BY key; + +SELECT * FROM (SELECT 1 :: UInt32 as key) AS t1 FULL JOIN (SELECT 1 :: Nullable(UInt32) as key) t2 USING (key) ORDER BY key; + +SELECT * FROM (SELECT 1 :: UInt32 as key) AS t1 FULL JOIN (SELECT NULL :: Nullable(UInt32) as key) t2 USING (key) ORDER BY key; + +SELECT * FROM (SELECT 1 :: Int32 as key) AS t1 JOIN (SELECT 1 :: UInt32 as key) t2 ON t1.key = t2.key ORDER BY key; + +SELECT * FROM (SELECT -1 :: Nullable(Int32) as key) AS t1 FULL JOIN (SELECT 4294967295 :: UInt32 as key) t2 ON t1.key = t2.key ORDER BY key; + +SELECT * FROM (SELECT 'a' :: LowCardinality(String) AS key) AS t1 JOIN (SELECT 'a' :: String AS key) AS t2 ON t1.key = t2.key ORDER BY key; + +SELECT * FROM (SELECT 'a' :: LowCardinality(Nullable(String)) AS key) AS t1 JOIN (SELECT 'a' :: String AS key) AS t2 ON t1.key = t2.key ORDER BY key; + +SELECT * FROM (SELECT 'a' :: LowCardinality(Nullable(String)) AS key) AS t1 JOIN (SELECT 'a' :: Nullable(String) AS key) AS t2 ON t1.key = t2.key ORDER BY key; + +SELECT * FROM (SELECT 'a' :: LowCardinality(String) AS key) AS t1 JOIN (SELECT 'a' :: LowCardinality(String) AS key) AS t2 ON t1.key = t2.key ORDER BY key; + +SELECT 5 == count() FROM (SELECT number as a from numbers(5)) as t1 LEFT JOIN (SELECT number as b from numbers(5) WHERE number > 100) as t2 ON t1.a = t2.b ORDER BY 1; +SELECT 5 == count() FROM (SELECT number as a from numbers(5) WHERE number > 100) as t1 RIGHT JOIN (SELECT number as b from numbers(5)) as t2 ON t1.a = t2.b ORDER BY 1; diff --git a/parser/testdata/02280_add_query_level_settings/ast.json b/parser/testdata/02280_add_query_level_settings/ast.json new file mode 100644 index 000000000..9c29609a9 --- /dev/null +++ b/parser/testdata/02280_add_query_level_settings/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery table_for_alter (children 1)" + }, + { + "explain": " Identifier table_for_alter" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001121307, + "rows_read": 2, + "bytes_read": 82 + } +} diff --git a/parser/testdata/02280_add_query_level_settings/metadata.json b/parser/testdata/02280_add_query_level_settings/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02280_add_query_level_settings/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02280_add_query_level_settings/query.sql b/parser/testdata/02280_add_query_level_settings/query.sql new file mode 100644 index 000000000..2d4e2a9e6 --- /dev/null +++ b/parser/testdata/02280_add_query_level_settings/query.sql @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS table_for_alter; + +CREATE TABLE table_for_alter ( + id UInt64, + Data String +) ENGINE = MergeTree() ORDER BY id SETTINGS parts_to_throw_insert = 1, parts_to_delay_insert = 1; + +INSERT INTO table_for_alter VALUES (1, '1'); +INSERT INTO table_for_alter VALUES (2, '2'); -- { serverError TOO_MANY_PARTS } + +INSERT INTO table_for_alter settings parts_to_throw_insert = 100, parts_to_delay_insert = 100 VALUES (2, '2'); + +INSERT INTO table_for_alter VALUES (3, '3'); -- { serverError TOO_MANY_PARTS } + +ALTER TABLE table_for_alter MODIFY SETTING parts_to_throw_insert = 100, parts_to_delay_insert = 100; + +INSERT INTO table_for_alter VALUES (3, '3'); diff --git a/parser/testdata/02281_limit_by_distributed/ast.json b/parser/testdata/02281_limit_by_distributed/ast.json new file mode 100644 index 000000000..1cf0f6308 --- /dev/null +++ b/parser/testdata/02281_limit_by_distributed/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier k" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001031986, + "rows_read": 5, + "bytes_read": 173 + } +} diff --git a/parser/testdata/02281_limit_by_distributed/metadata.json b/parser/testdata/02281_limit_by_distributed/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02281_limit_by_distributed/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02281_limit_by_distributed/query.sql b/parser/testdata/02281_limit_by_distributed/query.sql new file mode 100644 index 000000000..072e778d9 --- /dev/null +++ b/parser/testdata/02281_limit_by_distributed/query.sql @@ -0,0 +1,15 @@ +SELECT k +FROM ( + SELECT k, abs(v) AS _v + FROM remote('127.{1,2}', view(select materialize('foo') as k, -1 as v)) + ORDER BY _v ASC + LIMIT 1 BY k +) +GROUP BY k; + +-- Simplified version of the reproducer provided in [1]. +-- [1]: https://github.com/ClickHouse/ClickHouse/issues/37045 +SELECT dummy +FROM remote('127.{1,2}', system.one) +WHERE dummy IN (SELECT 0) +LIMIT 1 BY dummy; diff --git a/parser/testdata/02282_array_distance/ast.json b/parser/testdata/02282_array_distance/ast.json new file mode 100644 index 000000000..36a127f00 --- /dev/null +++ b/parser/testdata/02282_array_distance/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001498584, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02282_array_distance/metadata.json b/parser/testdata/02282_array_distance/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02282_array_distance/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02282_array_distance/query.sql b/parser/testdata/02282_array_distance/query.sql new file mode 100644 index 000000000..076ee93c3 --- /dev/null +++ b/parser/testdata/02282_array_distance/query.sql @@ -0,0 +1,126 @@ +SET join_algorithm = 'hash'; + +SELECT L1Distance([0, 0, 0], [1, 2, 3]); +SELECT L2Distance([1, 2, 3], [0, 0, 0]); +SELECT L2SquaredDistance([1, 2, 3], [0, 0, 0]); +SELECT LpDistance([1, 2, 3], [0, 0, 0], 3.5); +SELECT LinfDistance([1, 2, 3], [0, 0, 0]); +SELECT cosineDistance([1, 2, 3], [3, 5, 7]); + +SELECT L2Distance([1, 2, 3], NULL); +SELECT L2SquaredDistance([1, 2, 3], NULL); +SELECT cosineDistance([1, 2, 3], [0, 0, 0]); + +-- Overflows +WITH CAST([-547274980, 1790553898, 1981517754, 1908431500, 1352428565, -573412550, -552499284, 2096941042], 'Array(Int32)') AS a +SELECT + L1Distance(a, a), + L2Distance(a, a), + L2SquaredDistance(a, a), + LinfDistance(a, a), + cosineDistance(a, a); + +DROP TABLE IF EXISTS vec1; +DROP TABLE IF EXISTS vec2; +DROP TABLE IF EXISTS vec2f; +DROP TABLE IF EXISTS vec2d; +CREATE TABLE vec1 (id UInt64, v Array(UInt8)) ENGINE = Memory; +CREATE TABLE vec2 (id UInt64, v Array(Int64)) ENGINE = Memory; +CREATE TABLE vec2f (id UInt64, v Array(Float32)) ENGINE = Memory; +CREATE TABLE vec2d (id UInt64, v Array(Float64)) ENGINE = Memory; + +INSERT INTO vec1 VALUES (1, [3, 4, 5]), (2, [2, 4, 8]), (3, [7, 7, 7]); +SELECT L1Distance(v, [0, 0, 0]) FROM vec1; +SELECT L2Distance(v, [0, 0, 0]) FROM vec1; +SELECT L2SquaredDistance(v, [0, 0, 0]) FROM vec1; +SELECT LpDistance(v, [0, 0, 0], 3.14) FROM vec1; +SELECT LinfDistance([5, 4, 3], v) FROM vec1; +SELECT cosineDistance([3, 2, 1], v) FROM vec1; +SELECT LinfDistance(v, materialize([0, -2, 0])) FROM vec1; +SELECT cosineDistance(v, materialize([1., 1., 1.])) FROM vec1; + +INSERT INTO vec2 VALUES (1, [100, 200, 0]), (2, [888, 777, 666]), (3, range(1, 35, 1)), (4, range(3, 37, 1)), (5, range(1, 135, 1)), (6, range(3, 137, 1)); +SELECT + v1.id, + v2.id, + L1Distance(v1.v, v2.v), + LinfDistance(v1.v, v2.v), + LpDistance(v1.v, v2.v, 3.1), + L2Distance(v1.v, v2.v), + L2SquaredDistance(v1.v, v2.v), + cosineDistance(v1.v, v2.v) +FROM vec2 v1, vec2 v2 +WHERE length(v1.v) == length(v2.v) +ORDER BY ALL; + +INSERT INTO vec2f VALUES (1, [100, 200, 0]), (2, [888, 777, 666]), (3, range(1, 35, 1)), (4, range(3, 37, 1)), (5, range(1, 135, 1)), (6, range(3, 137, 1)); +SELECT + v1.id, + v2.id, + L1Distance(v1.v, v2.v), + LinfDistance(v1.v, v2.v), + LpDistance(v1.v, v2.v, 3), + L2Distance(v1.v, v2.v), + L2SquaredDistance(v1.v, v2.v), + cosineDistance(v1.v, v2.v) +FROM vec2f v1, vec2f v2 +WHERE length(v1.v) == length(v2.v) +ORDER BY ALL; + +INSERT INTO vec2d VALUES (1, [100, 200, 0]), (2, [888, 777, 666]), (3, range(1, 35, 1)), (4, range(3, 37, 1)), (5, range(1, 135, 1)), (6, range(3, 137, 1)); +SELECT + v1.id, + v2.id, + L1Distance(v1.v, v2.v), + LinfDistance(v1.v, v2.v), + LpDistance(v1.v, v2.v, 3), + L2Distance(v1.v, v2.v), + L2SquaredDistance(v1.v, v2.v), + cosineDistance(v1.v, v2.v) +FROM vec2d v1, vec2d v2 +WHERE length(v1.v) == length(v2.v) +ORDER BY ALL; + +SELECT + v1.id, + v2.id, + L1Distance(v1.v, v2.v), + LinfDistance(v1.v, v2.v), + LpDistance(v1.v, v2.v, 3), + L2Distance(v1.v, v2.v), + L2SquaredDistance(v1.v, v2.v), + cosineDistance(v1.v, v2.v) +FROM vec2f v1, vec2d v2 +WHERE length(v1.v) == length(v2.v) +ORDER BY ALL; + +SELECT L1Distance([0, 0], [1]); -- { serverError SIZES_OF_ARRAYS_DONT_MATCH } +SELECT L2Distance([1, 2], (3,4)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT L2SquaredDistance([1, 2], (3,4)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT LpDistance([1, 2], [3,4]); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT LpDistance([1, 2], [3,4], -1.); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT LpDistance([1, 2], [3,4], 'aaa'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT LpDistance([1, 2], [3,4], materialize(2.7)); -- { serverError ILLEGAL_COLUMN } + +DROP TABLE vec1; +DROP TABLE vec2; +DROP TABLE vec2f; +DROP TABLE vec2d; + +-- Queries which trigger manually vectorized implementation + +SELECT L2Distance( + [toFloat32(0.0), toFloat32(1.0), toFloat32(2.0), toFloat32(3.0), toFloat32(4.0), toFloat32(5.0), toFloat32(6.0), toFloat32(7.0), toFloat32(8.0), toFloat32(9.0), toFloat32(10.0), toFloat32(11.0), toFloat32(12.0), toFloat32(13.0), toFloat32(14.0), toFloat32(15.0), toFloat32(16.0), toFloat32(17.0), toFloat32(18.0), toFloat32(19.0), toFloat32(20.0), toFloat32(21.0), toFloat32(22.0), toFloat32(23.0), toFloat32(24.0), toFloat32(25.0), toFloat32(26.0), toFloat32(27.0), toFloat32(28.0), toFloat32(29.0), toFloat32(30.0), toFloat32(31.0), toFloat32(32.0), toFloat32(33.0)], + materialize([toFloat32(1.0), toFloat32(2.0), toFloat32(3.0), toFloat32(4.0), toFloat32(5.0), toFloat32(6.0), toFloat32(7.0), toFloat32(8.0), toFloat32(9.0), toFloat32(10.0), toFloat32(11.0), toFloat32(12.0), toFloat32(13.0), toFloat32(14.0), toFloat32(15.0), toFloat32(16.0), toFloat32(17.0), toFloat32(18.0), toFloat32(19.0), toFloat32(20.0), toFloat32(21.0), toFloat32(22.0), toFloat32(23.0), toFloat32(24.0), toFloat32(25.0), toFloat32(26.0), toFloat32(27.0), toFloat32(28.0), toFloat32(29.0), toFloat32(30.0), toFloat32(31.0), toFloat32(32.0), toFloat32(33.0), toFloat32(34.0)])); + +SELECT cosineDistance( + [toFloat32(0.0), toFloat32(1.0), toFloat32(2.0), toFloat32(3.0), toFloat32(4.0), toFloat32(5.0), toFloat32(6.0), toFloat32(7.0), toFloat32(8.0), toFloat32(9.0), toFloat32(10.0), toFloat32(11.0), toFloat32(12.0), toFloat32(13.0), toFloat32(14.0), toFloat32(15.0), toFloat32(16.0), toFloat32(17.0), toFloat32(18.0), toFloat32(19.0), toFloat32(20.0), toFloat32(21.0), toFloat32(22.0), toFloat32(23.0), toFloat32(24.0), toFloat32(25.0), toFloat32(26.0), toFloat32(27.0), toFloat32(28.0), toFloat32(29.0), toFloat32(30.0), toFloat32(31.0), toFloat32(32.0), toFloat32(33.0)], + materialize([toFloat32(1.0), toFloat32(2.0), toFloat32(3.0), toFloat32(4.0), toFloat32(5.0), toFloat32(6.0), toFloat32(7.0), toFloat32(8.0), toFloat32(9.0), toFloat32(10.0), toFloat32(11.0), toFloat32(12.0), toFloat32(13.0), toFloat32(14.0), toFloat32(15.0), toFloat32(16.0), toFloat32(17.0), toFloat32(18.0), toFloat32(19.0), toFloat32(20.0), toFloat32(21.0), toFloat32(22.0), toFloat32(23.0), toFloat32(24.0), toFloat32(25.0), toFloat32(26.0), toFloat32(27.0), toFloat32(28.0), toFloat32(29.0), toFloat32(30.0), toFloat32(31.0), toFloat32(32.0), toFloat32(33.0), toFloat32(34.0)])); + +SELECT L2Distance( + [toFloat64(0.0), toFloat64(1.0), toFloat64(2.0), toFloat64(3.0), toFloat64(4.0), toFloat64(5.0), toFloat64(6.0), toFloat64(7.0), toFloat64(8.0), toFloat64(9.0), toFloat64(10.0), toFloat64(11.0), toFloat64(12.0), toFloat64(13.0), toFloat64(14.0), toFloat64(15.0), toFloat64(16.0), toFloat64(17.0), toFloat64(18.0), toFloat64(19.0), toFloat64(20.0), toFloat64(21.0), toFloat64(22.0), toFloat64(23.0), toFloat64(24.0), toFloat64(25.0), toFloat64(26.0), toFloat64(27.0), toFloat64(28.0), toFloat64(29.0), toFloat64(30.0), toFloat64(31.0), toFloat64(32.0), toFloat64(33.0)], + materialize([toFloat64(1.0), toFloat64(2.0), toFloat64(3.0), toFloat64(4.0), toFloat64(5.0), toFloat64(6.0), toFloat64(7.0), toFloat64(8.0), toFloat64(9.0), toFloat64(10.0), toFloat64(11.0), toFloat64(12.0), toFloat64(13.0), toFloat64(14.0), toFloat64(15.0), toFloat64(16.0), toFloat64(17.0), toFloat64(18.0), toFloat64(19.0), toFloat64(20.0), toFloat64(21.0), toFloat64(22.0), toFloat64(23.0), toFloat64(24.0), toFloat64(25.0), toFloat64(26.0), toFloat64(27.0), toFloat64(28.0), toFloat64(29.0), toFloat64(30.0), toFloat64(31.0), toFloat64(32.0), toFloat64(33.0), toFloat64(34.0)])); + +SELECT cosineDistance( + [toFloat64(0.0), toFloat64(1.0), toFloat64(2.0), toFloat64(3.0), toFloat64(4.0), toFloat64(5.0), toFloat64(6.0), toFloat64(7.0), toFloat64(8.0), toFloat64(9.0), toFloat64(10.0), toFloat64(11.0), toFloat64(12.0), toFloat64(13.0), toFloat64(14.0), toFloat64(15.0), toFloat64(16.0), toFloat64(17.0), toFloat64(18.0), toFloat64(19.0), toFloat64(20.0), toFloat64(21.0), toFloat64(22.0), toFloat64(23.0), toFloat64(24.0), toFloat64(25.0), toFloat64(26.0), toFloat64(27.0), toFloat64(28.0), toFloat64(29.0), toFloat64(30.0), toFloat64(31.0), toFloat64(32.0), toFloat64(33.0)], + materialize([toFloat64(1.0), toFloat64(2.0), toFloat64(3.0), toFloat64(4.0), toFloat64(5.0), toFloat64(6.0), toFloat64(7.0), toFloat64(8.0), toFloat64(9.0), toFloat64(10.0), toFloat64(11.0), toFloat64(12.0), toFloat64(13.0), toFloat64(14.0), toFloat64(15.0), toFloat64(16.0), toFloat64(17.0), toFloat64(18.0), toFloat64(19.0), toFloat64(20.0), toFloat64(21.0), toFloat64(22.0), toFloat64(23.0), toFloat64(24.0), toFloat64(25.0), toFloat64(26.0), toFloat64(27.0), toFloat64(28.0), toFloat64(29.0), toFloat64(30.0), toFloat64(31.0), toFloat64(32.0), toFloat64(33.0), toFloat64(34.0)])); diff --git a/parser/testdata/02283_array_norm/ast.json b/parser/testdata/02283_array_norm/ast.json new file mode 100644 index 000000000..eaa4c0f5b --- /dev/null +++ b/parser/testdata/02283_array_norm/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function L1Norm (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_2, UInt64_3]" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001306205, + "rows_read": 7, + "bytes_read": 287 + } +} diff --git a/parser/testdata/02283_array_norm/metadata.json b/parser/testdata/02283_array_norm/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02283_array_norm/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02283_array_norm/query.sql b/parser/testdata/02283_array_norm/query.sql new file mode 100644 index 000000000..f48d88e3f --- /dev/null +++ b/parser/testdata/02283_array_norm/query.sql @@ -0,0 +1,47 @@ +SELECT L1Norm([1, 2, 3]); +SELECT L2Norm([3., 4., 5.]); +SELECT L2SquaredNorm([3., 4., 5.]); +SELECT LpNorm([3., 4., 5.], 1.1); +SELECT LinfNorm([0, 0, 2]); + +-- Overflows +WITH CAST([-547274980, 1790553898, 1981517754, 1908431500, 1352428565, -573412550, -552499284, 2096941042], 'Array(Int32)') AS a +SELECT + L1Norm(a), + L2Norm(a), + L2SquaredNorm(a), + LpNorm(a,1), + LpNorm(a,2), + LpNorm(a,3.14), + LinfNorm(a); + +DROP TABLE IF EXISTS vec1; +DROP TABLE IF EXISTS vec1f; +DROP TABLE IF EXISTS vec1d; +CREATE TABLE vec1 (id UInt64, v Array(UInt8)) ENGINE = Memory; +CREATE TABLE vec1f (id UInt64, v Array(Float32)) ENGINE = Memory; +CREATE TABLE vec1d (id UInt64, v Array(Float64)) ENGINE = Memory; +INSERT INTO vec1 VALUES (1, [3, 4]), (2, [2]), (3, [3, 3, 3]), (4, NULL), (5, range(7, 27)), (6, range(3, 103)); +INSERT INTO vec1f VALUES (1, [3, 4]), (2, [2]), (3, [3, 3, 3]), (4, NULL), (5, range(7, 27)), (6, range(3, 103)); +INSERT INTO vec1d VALUES (1, [3, 4]), (2, [2]), (3, [3, 3, 3]), (4, NULL), (5, range(7, 27)), (6, range(3, 103)); + +SELECT id, L1Norm(v), L2Norm(v), L2SquaredNorm(v), LpNorm(v, 2.7), LinfNorm(v) FROM vec1; +SELECT id, L1Norm(materialize([5., 6.])) FROM vec1; + +SELECT id, L1Norm(v), L2Norm(v), L2SquaredNorm(v), LpNorm(v, 2.7), LinfNorm(v) FROM vec1f; +SELECT id, L1Norm(materialize([5., 6.])) FROM vec1f; + +SELECT id, L1Norm(v), L2Norm(v), L2SquaredNorm(v), LpNorm(v, 2.7), LinfNorm(v) FROM vec1d; +SELECT id, L1Norm(materialize([5., 6.])) FROM vec1d; + +SELECT L1Norm(1, 2); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +SELECT LpNorm([1,2]); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT LpNorm([1,2], -3.4); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT LpNorm([1,2], 'aa'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT LpNorm([1,2], [1]); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT LpNorm([1,2], materialize(3.14)); -- { serverError ILLEGAL_COLUMN } + +DROP TABLE vec1; +DROP TABLE vec1f; +DROP TABLE vec1d; diff --git a/parser/testdata/02285_executable_user_defined_function_group_by/ast.json b/parser/testdata/02285_executable_user_defined_function_group_by/ast.json new file mode 100644 index 000000000..d8dc02497 --- /dev/null +++ b/parser/testdata/02285_executable_user_defined_function_group_by/ast.json @@ -0,0 +1,79 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function test_function (alias a) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier a" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier a" + } + ], + + "rows": 19, + + "statistics": + { + "elapsed": 0.001631491, + "rows_read": 19, + "bytes_read": 725 + } +} diff --git a/parser/testdata/02285_executable_user_defined_function_group_by/metadata.json b/parser/testdata/02285_executable_user_defined_function_group_by/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02285_executable_user_defined_function_group_by/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02285_executable_user_defined_function_group_by/query.sql b/parser/testdata/02285_executable_user_defined_function_group_by/query.sql new file mode 100644 index 000000000..96db8c088 --- /dev/null +++ b/parser/testdata/02285_executable_user_defined_function_group_by/query.sql @@ -0,0 +1 @@ +SELECT test_function(number, number) as a FROM numbers(10) GROUP BY a ORDER BY a; diff --git a/parser/testdata/02285_hex_bin_support_more_types/ast.json b/parser/testdata/02285_hex_bin_support_more_types/ast.json new file mode 100644 index 000000000..511abef0e --- /dev/null +++ b/parser/testdata/02285_hex_bin_support_more_types/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function hex (alias res) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toUInt128 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_100000000000" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001725425, + "rows_read": 9, + "bytes_read": 368 + } +} diff --git a/parser/testdata/02285_hex_bin_support_more_types/metadata.json b/parser/testdata/02285_hex_bin_support_more_types/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02285_hex_bin_support_more_types/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02285_hex_bin_support_more_types/query.sql b/parser/testdata/02285_hex_bin_support_more_types/query.sql new file mode 100644 index 000000000..0efbd8dd3 --- /dev/null +++ b/parser/testdata/02285_hex_bin_support_more_types/query.sql @@ -0,0 +1,60 @@ +SELECT hex(toUInt128(100000000000)) AS res; + +SELECT hex(toUInt256(100000000000)) AS res; + +SELECT hex(toInt8(100)) AS res; + +SELECT hex(toInt8(-100)) AS res; + +SELECT hex(toInt16(100)) AS res; + +SELECT hex(toInt16(-100)) AS res; + +SELECT hex(toInt32(100)) AS res; + +SELECT hex(toInt32(-100)) AS res; + +SELECT hex(toInt64(100)) AS res; + +SELECT hex(toInt64(-100)) AS res; + +SELECT hex(toInt128(100000000000)) AS res; + +SELECT hex(toInt128(100000000000)) AS res; + +SELECT hex(toInt256(100000000000)) AS res; + +SELECT hex(toInt128(-100000000000)) AS res; + +SELECT hex(toInt256(-100000000000)) AS res; + +SELECT bin(toUInt128(100000000000)) AS res; + +SELECT bin(toUInt256(100000000000)) AS res; + +SELECT bin(toInt8(100)) AS res; + +SELECT bin(toInt8(-100)) AS res; + +SELECT bin(toInt16(100)) AS res; + +SELECT bin(toInt16(-100)) AS res; + +SELECT bin(toInt32(100)) AS res; + +SELECT bin(toInt32(-100)) AS res; + +SELECT bin(toInt64(100)) AS res; + +SELECT bin(toInt64(-100)) AS res; + +SELECT bin(toInt128(100000000000)) AS res; + +SELECT bin(toInt128(100000000000)) AS res; + +SELECT bin(toInt256(100000000000)) AS res; + +SELECT bin(toInt128(-100000000000)) AS res; + +SELECT bin(toInt256(-100000000000)) AS res; + diff --git a/parser/testdata/02286_convert_decimal_type/ast.json b/parser/testdata/02286_convert_decimal_type/ast.json new file mode 100644 index 000000000..a990b850e --- /dev/null +++ b/parser/testdata/02286_convert_decimal_type/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function VALUES (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'x Decimal32(0)'" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001684417, + "rows_read": 12, + "bytes_read": 460 + } +} diff --git a/parser/testdata/02286_convert_decimal_type/metadata.json b/parser/testdata/02286_convert_decimal_type/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02286_convert_decimal_type/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02286_convert_decimal_type/query.sql b/parser/testdata/02286_convert_decimal_type/query.sql new file mode 100644 index 000000000..ffc4c1769 --- /dev/null +++ b/parser/testdata/02286_convert_decimal_type/query.sql @@ -0,0 +1,4 @@ +SELECT * FROM VALUES('x Decimal32(0)', (1)); +SELECT * FROM VALUES('x Decimal64(0)', (2)); +SELECT * FROM VALUES('x Decimal128(0)', (3)); +SELECT * FROM VALUES('x Decimal256(0)', (4)); diff --git a/parser/testdata/02286_function_wyhash/ast.json b/parser/testdata/02286_function_wyhash/ast.json new file mode 100644 index 000000000..5f3341bb2 --- /dev/null +++ b/parser/testdata/02286_function_wyhash/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function wyHash64 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal NULL" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001407888, + "rows_read": 7, + "bytes_read": 257 + } +} diff --git a/parser/testdata/02286_function_wyhash/metadata.json b/parser/testdata/02286_function_wyhash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02286_function_wyhash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02286_function_wyhash/query.sql b/parser/testdata/02286_function_wyhash/query.sql new file mode 100644 index 000000000..3307821b4 --- /dev/null +++ b/parser/testdata/02286_function_wyhash/query.sql @@ -0,0 +1,5 @@ +SELECT wyHash64(NULL); +SELECT wyHash64(''); +SELECT wyHash64(' '); +SELECT wyHash64('qwerty'); + diff --git a/parser/testdata/02286_quantile_tdigest_infinity/ast.json b/parser/testdata/02286_quantile_tdigest_infinity/ast.json new file mode 100644 index 000000000..0bb967d05 --- /dev/null +++ b/parser/testdata/02286_quantile_tdigest_infinity/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '1'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.00153992, + "rows_read": 5, + "bytes_read": 172 + } +} diff --git a/parser/testdata/02286_quantile_tdigest_infinity/metadata.json b/parser/testdata/02286_quantile_tdigest_infinity/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02286_quantile_tdigest_infinity/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02286_quantile_tdigest_infinity/query.sql b/parser/testdata/02286_quantile_tdigest_infinity/query.sql new file mode 100644 index 000000000..d21f73526 --- /dev/null +++ b/parser/testdata/02286_quantile_tdigest_infinity/query.sql @@ -0,0 +1,54 @@ +SELECT '1'; +SELECT quantilesTDigestArray(0.01, 0.1, 0.25, 0.5, 0.75, 0.9, 0.99)(arrayResize(arrayResize([inf], 500000, -inf), 1000000, inf)); +SELECT quantilesTDigestArray(0.01, 0.1, 0.25, 0.5, 0.75, 0.9, 0.99)(arrayResize(arrayResize([inf], 500000, inf), 1000000, -inf)); +SELECT quantilesTDigestArray(0.01, 0.1, 0.25, 0.5, 0.75, 0.9, 0.99)(arrayResize(arrayResize([inf], 500000, inf), 1000000, 0)); +SELECT quantilesTDigestArray(0.01, 0.1, 0.25, 0.5, 0.75, 0.9, 0.99)(arrayResize(arrayResize([inf], 500000, -inf), 1000000, 0)); +SELECT quantilesTDigestArray(0.01, 0.1, 0.25, 0.5, 0.75, 0.9, 0.99)(arrayResize(arrayResize([0], 500000, inf), 1000000, -inf)); +SELECT quantilesTDigestArray(0.01, 0.1, 0.25, 0.5, 0.75, 0.9, 0.99)(arrayResize(arrayResize([0], 500000, -inf), 1000000, inf)); + +SELECT '2'; +SELECT quantilesTDigest(0.05)(x) FROM (SELECT inf*(number%2-0.5) x FROM numbers(300)); +SELECT quantilesTDigest(0.5)(x) FROM (SELECT inf*(number%2-0.5) x FROM numbers(300)); +SELECT quantilesTDigest(0.95)(x) FROM (SELECT inf*(number%2-0.5) x FROM numbers(300)); + +SELECT '3'; +SELECT quantiles(0.5)(inf) FROM numbers(5); +SELECT quantiles(0.5)(inf) FROM numbers(300); +SELECT quantiles(0.5)(-inf) FROM numbers(5); +SELECT quantiles(0.5)(-inf) FROM numbers(300); + +SELECT '4'; +SELECT quantiles(0.5)(arrayJoin([inf, 0, -inf])); +SELECT quantiles(0.5)(arrayJoin([-inf, 0, inf])); +SELECT quantiles(0.5)(arrayJoin([inf, -inf, 0])); +SELECT quantiles(0.5)(arrayJoin([-inf, inf, 0])); +SELECT quantiles(0.5)(arrayJoin([inf, inf, 0, -inf, -inf, -0])); +SELECT quantiles(0.5)(arrayJoin([inf, -inf, 0, -inf, inf, -0])); +SELECT quantiles(0.5)(arrayJoin([-inf, -inf, 0, inf, inf, -0])); + +SELECT '5'; +DROP TABLE IF EXISTS issue32107; +CREATE TABLE issue32107(A Int64, s_quantiles AggregateFunction(quantilesTDigest(0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99), Float64)) ENGINE = AggregatingMergeTree ORDER BY A; +INSERT INTO issue32107 SELECT A, quantilesTDigestState(0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99)(x) FROM (SELECT 1 A, arrayJoin(cast([2.0, inf, number / 33333],'Array(Float64)')) x FROM numbers(100)) GROUP BY A; +OPTIMIZE TABLE issue32107 FINAL; +DROP TABLE IF EXISTS issue32107; + +SELECT '6'; +SELECT quantileTDigest(inf) FROM numbers(200); +SELECT quantileTDigest(inf) FROM numbers(500); +SELECT quantileTDigest(-inf) FROM numbers(200); +SELECT quantileTDigest(-inf) FROM numbers(500); + +SELECT '7'; +SELECT quantileTDigest(x) FROM (SELECT inf AS x UNION ALL SELECT -inf); +SELECT quantileTDigest(x) FROM (SELECT -inf AS x UNION ALL SELECT inf); + +SELECT '8'; +SELECT quantileTDigest(x) FROM (SELECT inf AS x UNION ALL SELECT -inf UNION ALL SELECT -inf); +SELECT quantileTDigest(x) FROM (SELECT inf AS x UNION ALL SELECT inf UNION ALL SELECT -inf); +SELECT quantileTDigest(x) FROM (SELECT -inf AS x UNION ALL SELECT -inf UNION ALL SELECT -inf); +SELECT quantileTDigest(x) FROM (SELECT -inf AS x UNION ALL SELECT inf UNION ALL SELECT -inf); +SELECT quantileTDigest(x) FROM (SELECT inf AS x UNION ALL SELECT -inf UNION ALL SELECT inf); +SELECT quantileTDigest(x) FROM (SELECT inf AS x UNION ALL SELECT inf UNION ALL SELECT inf); +SELECT quantileTDigest(x) FROM (SELECT -inf AS x UNION ALL SELECT -inf UNION ALL SELECT inf); +SELECT quantileTDigest(x) FROM (SELECT -inf AS x UNION ALL SELECT inf UNION ALL SELECT inf); diff --git a/parser/testdata/02286_tuple_numeric_identifier/ast.json b/parser/testdata/02286_tuple_numeric_identifier/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02286_tuple_numeric_identifier/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02286_tuple_numeric_identifier/metadata.json b/parser/testdata/02286_tuple_numeric_identifier/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02286_tuple_numeric_identifier/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02286_tuple_numeric_identifier/query.sql b/parser/testdata/02286_tuple_numeric_identifier/query.sql new file mode 100644 index 000000000..941db4479 --- /dev/null +++ b/parser/testdata/02286_tuple_numeric_identifier/query.sql @@ -0,0 +1,33 @@ + +DROP TABLE IF EXISTS t_tuple_numeric; + +CREATE TABLE t_tuple_numeric (t Tuple(`1` Tuple(`2` Int, `3` Int), `4` Int)) ENGINE = MergeTree ORDER BY tuple(); +SHOW CREATE TABLE t_tuple_numeric; + +INSERT INTO t_tuple_numeric VALUES (((2, 3), 4)); + +SET output_format_json_named_tuples_as_objects = 1; + +SELECT * FROM t_tuple_numeric FORMAT JSONEachRow; +SELECT `t`.`1`.`2`, `t`.`1`.`3`, `t`.`4` FROM t_tuple_numeric; +SELECT t.1.1, t.1.2, t.2 FROM t_tuple_numeric; + +SELECT t.1.3 FROM t_tuple_numeric; -- {serverError NOT_FOUND_COLUMN_IN_BLOCK, ARGUMENT_OUT_OF_BOUND} +SELECT t.4 FROM t_tuple_numeric; -- {serverError NOT_FOUND_COLUMN_IN_BLOCK, ARGUMENT_OUT_OF_BOUND} +SELECT `t`.`1`.`1`, `t`.`1`.`2`, `t`.`2` FROM t_tuple_numeric; -- {serverError UNKNOWN_IDENTIFIER, ARGUMENT_OUT_OF_BOUND} + +DROP TABLE t_tuple_numeric; + +CREATE TABLE t_tuple_numeric (t Tuple(Tuple(Int, Int), Int)) ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO t_tuple_numeric VALUES (((2, 3), 4)); + +SELECT t.1.1, t.1.2, t.2 FROM t_tuple_numeric; +SELECT `t`.`1`.`1`, `t`.`1`.`2`, `t`.`2` FROM t_tuple_numeric; + +DROP TABLE t_tuple_numeric; + +WITH + '{"1":{"key":"value"}}' AS data, + JSONExtract(data, 'Tuple("1" Tuple(key String))') AS parsed_json +SELECT parsed_json AS ssid diff --git a/parser/testdata/02286_vertical_merges_missed_column/ast.json b/parser/testdata/02286_vertical_merges_missed_column/ast.json new file mode 100644 index 000000000..eee8e41cc --- /dev/null +++ b/parser/testdata/02286_vertical_merges_missed_column/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_vertical_merges (children 1)" + }, + { + "explain": " Identifier t_vertical_merges" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001455813, + "rows_read": 2, + "bytes_read": 86 + } +} diff --git a/parser/testdata/02286_vertical_merges_missed_column/metadata.json b/parser/testdata/02286_vertical_merges_missed_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02286_vertical_merges_missed_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02286_vertical_merges_missed_column/query.sql b/parser/testdata/02286_vertical_merges_missed_column/query.sql new file mode 100644 index 000000000..950777f23 --- /dev/null +++ b/parser/testdata/02286_vertical_merges_missed_column/query.sql @@ -0,0 +1,39 @@ +DROP TABLE IF EXISTS t_vertical_merges; + +CREATE TABLE t_vertical_merges +( + a Nullable(String), + b Int8 +) +ENGINE = MergeTree +ORDER BY tuple() +settings + vertical_merge_algorithm_min_columns_to_activate=1, + vertical_merge_algorithm_min_rows_to_activate=1, + min_bytes_for_wide_part=0; + +INSERT INTO t_vertical_merges SELECT NULL, 1; +ALTER TABLE t_vertical_merges ADD COLUMN c String; +OPTIMIZE TABLE t_vertical_merges FINAL; +SELECT a, b, c FROM t_vertical_merges; + +DROP TABLE IF EXISTS t_vertical_merges; + +CREATE TABLE t_vertical_merges +( + a Array(Int16), + b Int8 +) +ENGINE = MergeTree +ORDER BY tuple() +settings + vertical_merge_algorithm_min_columns_to_activate=1, + vertical_merge_algorithm_min_rows_to_activate=1, + min_bytes_for_wide_part=0; + +INSERT INTO t_vertical_merges SELECT [], 1; +ALTER TABLE t_vertical_merges CLEAR COLUMN b; +OPTIMIZE TABLE t_vertical_merges FINAL; +SELECT a, b FROM t_vertical_merges; + +DROP TABLE IF EXISTS t_vertical_merges; diff --git a/parser/testdata/02287_ephemeral_format_crash/ast.json b/parser/testdata/02287_ephemeral_format_crash/ast.json new file mode 100644 index 000000000..be8335761 --- /dev/null +++ b/parser/testdata/02287_ephemeral_format_crash/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001355954, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/02287_ephemeral_format_crash/metadata.json b/parser/testdata/02287_ephemeral_format_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02287_ephemeral_format_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02287_ephemeral_format_crash/query.sql b/parser/testdata/02287_ephemeral_format_crash/query.sql new file mode 100644 index 000000000..a6037e9fd --- /dev/null +++ b/parser/testdata/02287_ephemeral_format_crash/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS test; + +CREATE TABLE test(a UInt8, b String EPHEMERAL) Engine=MergeTree ORDER BY tuple(); +SHOW CREATE TABLE test; +DROP TABLE test; + +CREATE TABLE test(a UInt8, b EPHEMERAL String) Engine=MergeTree ORDER BY tuple(); -- { serverError UNKNOWN_IDENTIFIER } +CREATE TABLE test(a UInt8, b EPHEMERAL 'a' String) Engine=MergeTree ORDER BY tuple(); -- { clientError SYNTAX_ERROR } +CREATE TABLE test(a UInt8, b String EPHEMERAL test) Engine=MergeTree ORDER BY tuple(); -- { serverError UNKNOWN_IDENTIFIER } + +CREATE TABLE test(a UInt8, b String EPHEMERAL 1+2) Engine=MergeTree ORDER BY tuple(); +SHOW CREATE TABLE test; +SELECT * FROM test; +DROP TABLE test; diff --git a/parser/testdata/02287_legacy_column_name_of_tuple_literal_over_distributed/ast.json b/parser/testdata/02287_legacy_column_name_of_tuple_literal_over_distributed/ast.json new file mode 100644 index 000000000..c602abaad --- /dev/null +++ b/parser/testdata/02287_legacy_column_name_of_tuple_literal_over_distributed/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001120291, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02287_legacy_column_name_of_tuple_literal_over_distributed/metadata.json b/parser/testdata/02287_legacy_column_name_of_tuple_literal_over_distributed/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02287_legacy_column_name_of_tuple_literal_over_distributed/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02287_legacy_column_name_of_tuple_literal_over_distributed/query.sql b/parser/testdata/02287_legacy_column_name_of_tuple_literal_over_distributed/query.sql new file mode 100644 index 000000000..a2a8e7c80 --- /dev/null +++ b/parser/testdata/02287_legacy_column_name_of_tuple_literal_over_distributed/query.sql @@ -0,0 +1,4 @@ +SET legacy_column_name_of_tuple_literal=1; +SET prefer_localhost_replica=0; + +select if(in(dummy, tuple(0, 1)), 'ok', 'ok') from remote('localhost', system.one); \ No newline at end of file diff --git a/parser/testdata/02291_dictionary_scalar_subquery_reload/ast.json b/parser/testdata/02291_dictionary_scalar_subquery_reload/ast.json new file mode 100644 index 000000000..dc624d401 --- /dev/null +++ b/parser/testdata/02291_dictionary_scalar_subquery_reload/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_dictionary_source_table (children 1)" + }, + { + "explain": " Identifier test_dictionary_source_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001453463, + "rows_read": 2, + "bytes_read": 108 + } +} diff --git a/parser/testdata/02291_dictionary_scalar_subquery_reload/metadata.json b/parser/testdata/02291_dictionary_scalar_subquery_reload/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02291_dictionary_scalar_subquery_reload/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02291_dictionary_scalar_subquery_reload/query.sql b/parser/testdata/02291_dictionary_scalar_subquery_reload/query.sql new file mode 100644 index 000000000..c75034626 --- /dev/null +++ b/parser/testdata/02291_dictionary_scalar_subquery_reload/query.sql @@ -0,0 +1,37 @@ +DROP TABLE IF EXISTS test_dictionary_source_table; +CREATE TABLE test_dictionary_source_table +( + id UInt64, + value String +) ENGINE = TinyLog; + +DROP TABLE IF EXISTS test_dictionary_view; +CREATE VIEW test_dictionary_view +( + id UInt64, + value String +) AS SELECT id, value FROM test_dictionary_source_table WHERE id = (SELECT max(id) FROM test_dictionary_source_table); + +INSERT INTO test_dictionary_source_table VALUES (1, '1'), (2, '2'), (3, '3'); + +DROP DICTIONARY IF EXISTS test_dictionary; +CREATE DICTIONARY test_dictionary +( + id UInt64, + value String +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE 'test_dictionary_view')) +LIFETIME(MIN 0 MAX 1) +LAYOUT(FLAT()); + +SELECT * FROM test_dictionary; + +INSERT INTO test_dictionary_source_table VALUES (4, '4'); +SYSTEM RELOAD DICTIONARY test_dictionary; + +SELECT * FROM test_dictionary; + +DROP DICTIONARY test_dictionary; +DROP VIEW test_dictionary_view; +DROP TABLE test_dictionary_source_table; diff --git a/parser/testdata/02291_join_const_literal_36279/ast.json b/parser/testdata/02291_join_const_literal_36279/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02291_join_const_literal_36279/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02291_join_const_literal_36279/metadata.json b/parser/testdata/02291_join_const_literal_36279/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02291_join_const_literal_36279/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02291_join_const_literal_36279/query.sql b/parser/testdata/02291_join_const_literal_36279/query.sql new file mode 100644 index 000000000..f7270a0ed --- /dev/null +++ b/parser/testdata/02291_join_const_literal_36279/query.sql @@ -0,0 +1,56 @@ +-- Tags: distributed + +DROP TABLE IF EXISTS test_distributed; +DROP TABLE IF EXISTS test_local; + +SET prefer_localhost_replica = 1; + +-- https://github.com/ClickHouse/ClickHouse/issues/36279 +CREATE TABLE test_local (text String, text2 String) ENGINE = MergeTree() ORDER BY text; +CREATE TABLE test_distributed (text String, text2 String) ENGINE = Distributed('test_shard_localhost', currentDatabase(), test_local); +INSERT INTO test_distributed SELECT randomString(100) AS text, randomString(100) AS text2 FROM system.numbers LIMIT 1; + +SET joined_subquery_requires_alias = 0; + +SELECT COUNT() AS count +FROM test_distributed +INNER JOIN +( + SELECT text + FROM test_distributed + WHERE (text ILIKE '%text-for-search%') AND (text2 ILIKE '%text-for-search%') +) USING (text) +WHERE (text ILIKE '%text-for-search%') AND (text2 ILIKE '%text-for-search%') +; + +DROP TABLE IF EXISTS test_distributed; +DROP TABLE IF EXISTS test_local; + +DROP TABLE IF EXISTS user_local; +DROP TABLE IF EXISTS user_all; +DROP TABLE IF EXISTS event; + +-- https://github.com/ClickHouse/ClickHouse/issues/36300 +CREATE TABLE user_local ( id Int64, name String, age Int32 ) +ENGINE = MergeTree ORDER BY name; + +CREATE TABLE user_all ( id Int64, name String, age Int32 ) +ENGINE = Distributed('test_shard_localhost', currentDatabase(), user_local, rand()); + +CREATE TABLE event ( id Int64, user_id Int64, content String, created_time DateTime ) +ENGINE = MergeTree ORDER BY user_id; + +INSERT INTO user_local (id, name, age) VALUES (1, 'aaa', 21); +INSERT INTO event (id, user_id, content, created_time) VALUES(1, 1, 'hello', '2022-01-05 12:00:00'); + +SELECT + u.name user_name, + 20 AS age_group +FROM user_all u +LEFT JOIN event e ON u.id = e.user_id +WHERE (u.age >= 20 AND u.age < 30) +AND e.created_time > '2022-01-01'; + +DROP TABLE IF EXISTS user_local; +DROP TABLE IF EXISTS user_all; +DROP TABLE IF EXISTS event; diff --git a/parser/testdata/02292_create_function_validate/ast.json b/parser/testdata/02292_create_function_validate/ast.json new file mode 100644 index 000000000..901801dd7 --- /dev/null +++ b/parser/testdata/02292_create_function_validate/ast.json @@ -0,0 +1,31 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateFunctionQuery foo (children 2)" + }, + { + "explain": " Identifier foo" + }, + { + "explain": " Identifier x" + } + ], + + "rows": 3, + + "statistics": + { + "elapsed": 0.001121122, + "rows_read": 3, + "bytes_read": 88 + } +} diff --git a/parser/testdata/02292_create_function_validate/metadata.json b/parser/testdata/02292_create_function_validate/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02292_create_function_validate/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02292_create_function_validate/query.sql b/parser/testdata/02292_create_function_validate/query.sql new file mode 100644 index 000000000..56b546789 --- /dev/null +++ b/parser/testdata/02292_create_function_validate/query.sql @@ -0,0 +1 @@ +create function foo as x -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/02292_h3_unidirectional_funcs/ast.json b/parser/testdata/02292_h3_unidirectional_funcs/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02292_h3_unidirectional_funcs/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02292_h3_unidirectional_funcs/metadata.json b/parser/testdata/02292_h3_unidirectional_funcs/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02292_h3_unidirectional_funcs/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02292_h3_unidirectional_funcs/query.sql b/parser/testdata/02292_h3_unidirectional_funcs/query.sql new file mode 100644 index 000000000..7436b1ba9 --- /dev/null +++ b/parser/testdata/02292_h3_unidirectional_funcs/query.sql @@ -0,0 +1,30 @@ +-- Tags: no-fasttest + +SELECT h3GetDestinationIndexFromUnidirectionalEdge(1248204388774707197); +SELECT h3GetDestinationIndexFromUnidirectionalEdge(599686042433355773); +SELECT h3GetDestinationIndexFromUnidirectionalEdge(stringToH3('85283473ffffff')); + +SELECT h3GetIndexesFromUnidirectionalEdge(1248204388774707199); +SELECT h3GetIndexesFromUnidirectionalEdge(599686042433355775); +SELECT h3GetIndexesFromUnidirectionalEdge(stringToH3('85283473ffffff')); + +SELECT h3GetOriginIndexFromUnidirectionalEdge(1248204388774707199); +SELECT h3GetOriginIndexFromUnidirectionalEdge(1248204388774707197); +SELECT h3GetOriginIndexFromUnidirectionalEdge(599686042433355775); +SELECT h3GetOriginIndexFromUnidirectionalEdge(stringToH3('85283473ffffff')); + +SELECT h3GetUnidirectionalEdgeBoundary(1248204388774707199); +SELECT h3GetUnidirectionalEdgeBoundary(599686042433355773); +SELECT h3GetUnidirectionalEdgeBoundary(stringToH3('85283473ffffff')); + +SELECT h3GetUnidirectionalEdgesFromHexagon(1248204388774707199); +SELECT h3GetUnidirectionalEdgesFromHexagon(599686042433355773); +SELECT h3GetUnidirectionalEdgesFromHexagon(stringToH3('85283473ffffff')); + +select h3GetUnidirectionalEdge(stringToH3('85283473fffffff'), stringToH3('85283477fffffff')); +select h3GetUnidirectionalEdge(stringToH3('85283473fffffff'), stringToH3('85283473fffffff')); +SELECT h3GetUnidirectionalEdge(stringToH3('85283473ffffff'), stringToH3('852\03477fffffff')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT h3UnidirectionalEdgeIsValid(1248204388774707199) as edge; +SELECT h3UnidirectionalEdgeIsValid(1248204388774707197) as edge; +SELECT h3UnidirectionalEdgeIsValid(stringToH3('85283473ffffff')) as edge; diff --git a/parser/testdata/02292_hash_array_tuples/ast.json b/parser/testdata/02292_hash_array_tuples/ast.json new file mode 100644 index 000000000..cc4e06f2f --- /dev/null +++ b/parser/testdata/02292_hash_array_tuples/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'arrays'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001172559, + "rows_read": 5, + "bytes_read": 177 + } +} diff --git a/parser/testdata/02292_hash_array_tuples/metadata.json b/parser/testdata/02292_hash_array_tuples/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02292_hash_array_tuples/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02292_hash_array_tuples/query.sql b/parser/testdata/02292_hash_array_tuples/query.sql new file mode 100644 index 000000000..ea368f56f --- /dev/null +++ b/parser/testdata/02292_hash_array_tuples/query.sql @@ -0,0 +1,25 @@ +SELECT 'arrays'; + +SELECT cityHash64([(1, 'a'), (2, 'b')]); +SELECT cityHash64([(1, 'c'), (2, 'b')]); +SELECT sipHash64([(1, 'a'), (2, 'b')]); +SELECT murmurHash2_64([(1, 'a'), (2, 'b'), (3, 'c')]); + +SELECT cityHash64([(1, [(1, (3, 4, [(5, 6), (7, 8)]))]), (2, [])] AS c), toTypeName(c); + +SELECT cityHash64(materialize([(1, 'a'), (2, 'b')])); +SELECT cityHash64(materialize([(1, 'c'), (2, 'b')])); +SELECT sipHash64(materialize([(1, 'a'), (2, 'b')])); +SELECT murmurHash2_64(materialize([(1, 'a'), (2, 'b'), (3, 'c')])); + +SELECT 'maps'; + +SELECT cityHash64(map(1, 'a', 2, 'b')); +SELECT cityHash64(map(1, 'c', 2, 'b')); +SELECT sipHash64(map(1, 'a', 2, 'b')); +SELECT murmurHash2_64(map(1, 'a', 2, 'b', 3, 'c')); + +SELECT cityHash64(materialize(map(1, 'a', 2, 'b'))); +SELECT cityHash64(materialize(map(1, 'c', 2, 'b'))); +SELECT sipHash64(materialize(map(1, 'a', 2, 'b'))); +SELECT murmurHash2_64(materialize(map(1, 'a', 2, 'b', 3, 'c'))); diff --git a/parser/testdata/02292_nested_not_flattened_detach/ast.json b/parser/testdata/02292_nested_not_flattened_detach/ast.json new file mode 100644 index 000000000..c5e8dfc79 --- /dev/null +++ b/parser/testdata/02292_nested_not_flattened_detach/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_nested_detach (children 1)" + }, + { + "explain": " Identifier t_nested_detach" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001065639, + "rows_read": 2, + "bytes_read": 82 + } +} diff --git a/parser/testdata/02292_nested_not_flattened_detach/metadata.json b/parser/testdata/02292_nested_not_flattened_detach/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02292_nested_not_flattened_detach/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02292_nested_not_flattened_detach/query.sql b/parser/testdata/02292_nested_not_flattened_detach/query.sql new file mode 100644 index 000000000..083b4ff7e --- /dev/null +++ b/parser/testdata/02292_nested_not_flattened_detach/query.sql @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS t_nested_detach; + +SET flatten_nested = 0; +CREATE TABLE t_nested_detach (n Nested(u UInt32, s String)) ENGINE = Log; + +SHOW CREATE TABLE t_nested_detach; +DESC TABLE t_nested_detach; + +SET flatten_nested = 1; + +DETACH TABLE t_nested_detach; +ATTACH TABLE t_nested_detach; + +SHOW CREATE TABLE t_nested_detach; +DESC TABLE t_nested_detach; + +DROP TABLE IF EXISTS t_nested_detach; diff --git a/parser/testdata/02293_arrow_dictionary_indexes/ast.json b/parser/testdata/02293_arrow_dictionary_indexes/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02293_arrow_dictionary_indexes/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02293_arrow_dictionary_indexes/metadata.json b/parser/testdata/02293_arrow_dictionary_indexes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02293_arrow_dictionary_indexes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02293_arrow_dictionary_indexes/query.sql b/parser/testdata/02293_arrow_dictionary_indexes/query.sql new file mode 100644 index 000000000..409d47322 --- /dev/null +++ b/parser/testdata/02293_arrow_dictionary_indexes/query.sql @@ -0,0 +1,3 @@ +-- Tags: no-fasttest +insert into function file(currentDatabase() || '_02293_data.arrow') select toLowCardinality(toString(number)) from numbers(300) settings output_format_arrow_low_cardinality_as_dictionary=1, engine_file_truncate_on_insert=1; +select * from file(currentDatabase() || '_02293_data.arrow') settings max_threads=1; diff --git a/parser/testdata/02293_compatibility_ignore_auto_increment_in_create_table/ast.json b/parser/testdata/02293_compatibility_ignore_auto_increment_in_create_table/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02293_compatibility_ignore_auto_increment_in_create_table/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02293_compatibility_ignore_auto_increment_in_create_table/metadata.json b/parser/testdata/02293_compatibility_ignore_auto_increment_in_create_table/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02293_compatibility_ignore_auto_increment_in_create_table/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02293_compatibility_ignore_auto_increment_in_create_table/query.sql b/parser/testdata/02293_compatibility_ignore_auto_increment_in_create_table/query.sql new file mode 100644 index 000000000..5d9b711ae --- /dev/null +++ b/parser/testdata/02293_compatibility_ignore_auto_increment_in_create_table/query.sql @@ -0,0 +1,60 @@ +-- Tags: no-random-merge-tree-settings +-- Tag no-random-merge-tree-settings: query is rewritten in parser +-- while adding merge tree settings + +select 'disable AUTO_INCREMENT compatibility mode'; +set compatibility_ignore_auto_increment_in_create_table=false; + +select 'create table failed, column +type +AUTO_INCREMENT, compatibility disabled'; +DROP TABLE IF EXISTS ignore_auto_increment SYNC; +CREATE TABLE ignore_auto_increment ( + id int AUTO_INCREMENT +) ENGINE=MergeTree() ORDER BY tuple(); -- {serverError SYNTAX_ERROR} + +select 'enable AUTO_INCREMENT compatibility mode'; +set compatibility_ignore_auto_increment_in_create_table=true; + +select 'create table, +type +AUTO_INCREMENT'; +DROP TABLE IF EXISTS ignore_auto_increment SYNC; +CREATE TABLE ignore_auto_increment ( + id int AUTO_INCREMENT +) ENGINE=MergeTree() ORDER BY tuple(); + +select 'create table, column +AUTO_INCREMENT -type'; +DROP TABLE IF EXISTS ignore_auto_increment SYNC; +CREATE TABLE ignore_auto_increment ( + id AUTO_INCREMENT +) ENGINE=MergeTree() ORDER BY tuple(); +DESCRIBE TABLE ignore_auto_increment; + +select 'create table, several columns +/-type +AUTO_INCREMENT'; +DROP TABLE IF EXISTS ignore_auto_increment SYNC; +CREATE TABLE ignore_auto_increment ( + id int AUTO_INCREMENT, di AUTO_INCREMENT, s String AUTO_INCREMENT +) ENGINE=MergeTree() ORDER BY tuple(); +DESCRIBE TABLE ignore_auto_increment; + +select 'create table, several columns with different default specifiers'; +DROP TABLE IF EXISTS ignore_auto_increment SYNC; +CREATE TABLE ignore_auto_increment ( + di DEFAULT 1, id int AUTO_INCREMENT, s String EPHEMERAL +) ENGINE=MergeTree() ORDER BY tuple(); +DESCRIBE TABLE ignore_auto_increment; + +select 'create table failed, column +type +DEFAULT +AUTO_INCREMENT'; +DROP TABLE IF EXISTS ignore_auto_increment SYNC; +CREATE TABLE ignore_auto_increment (id int DEFAULT 1 AUTO_INCREMENT) ENGINE=MergeTree() ORDER BY tuple(); -- {clientError SYNTAX_ERROR} + +select 'create table failed, column -type +DEFAULT +AUTO_INCREMENT'; +DROP TABLE IF EXISTS ignore_auto_increment SYNC; +CREATE TABLE ignore_auto_increment (id int DEFAULT 1 AUTO_INCREMENT) ENGINE=MergeTree() ORDER BY tuple(); -- {clientError SYNTAX_ERROR} + +select 'create table failed, column +type +AUTO_INCREMENT +DEFAULT'; +DROP TABLE IF EXISTS ignore_auto_increment SYNC; +CREATE TABLE ignore_auto_increment (id int AUTO_INCREMENT DEFAULT 1) ENGINE=MergeTree() ORDER BY tuple(); -- {clientError SYNTAX_ERROR} + +select 'create table failed, column -type +AUTO_INCREMENT +DEFAULT'; +DROP TABLE IF EXISTS ignore_auto_increment SYNC; +CREATE TABLE ignore_auto_increment (id int AUTO_INCREMENT DEFAULT 1) ENGINE=MergeTree() ORDER BY tuple(); -- {clientError SYNTAX_ERROR} + +DROP TABLE IF EXISTS ignore_auto_increment SYNC; diff --git a/parser/testdata/02293_grouping_function/ast.json b/parser/testdata/02293_grouping_function/ast.json new file mode 100644 index 000000000..25f1cc712 --- /dev/null +++ b/parser/testdata/02293_grouping_function/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001137751, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02293_grouping_function/metadata.json b/parser/testdata/02293_grouping_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02293_grouping_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02293_grouping_function/query.sql b/parser/testdata/02293_grouping_function/query.sql new file mode 100644 index 000000000..c858eae26 --- /dev/null +++ b/parser/testdata/02293_grouping_function/query.sql @@ -0,0 +1,109 @@ +set optimize_group_by_function_keys=0; + +SELECT + number, + grouping(number, number % 2, number % 3) AS gr +FROM numbers(10) +GROUP BY + GROUPING SETS ( + (number), + (number % 2) + ) +ORDER BY number, gr; -- { serverError BAD_ARGUMENTS } + +-- { echoOn } +SELECT + number, + grouping(number, number % 2) AS gr +FROM numbers(10) +GROUP BY + GROUPING SETS ( + (number), + (number % 2) + ) +ORDER BY number, gr +SETTINGS force_grouping_standard_compatibility=0; + +SELECT + number, + grouping(number % 2, number) AS gr +FROM numbers(10) +GROUP BY + GROUPING SETS ( + (number), + (number % 2) + ) +ORDER BY number, gr +SETTINGS force_grouping_standard_compatibility=0; + +SELECT + number, + grouping(number, number % 2) = 1 AS gr +FROM numbers(10) +GROUP BY + GROUPING SETS ( + (number), + (number % 2) + ) +ORDER BY number, gr +SETTINGS force_grouping_standard_compatibility=0; + +SELECT + number +FROM numbers(10) +GROUP BY + GROUPING SETS ( + (number), + (number % 2) + ) +ORDER BY number, grouping(number, number % 2) = 1 +SETTINGS force_grouping_standard_compatibility=0; + +SELECT + number, + count(), + grouping(number, number % 2) AS gr +FROM numbers(10) +GROUP BY + GROUPING SETS ( + (number), + (number, number % 2), + () + ) +ORDER BY (gr, number) +SETTINGS force_grouping_standard_compatibility=0; + +SELECT + number +FROM numbers(10) +GROUP BY + GROUPING SETS ( + (number), + (number % 2) + ) +HAVING grouping(number, number % 2) = 2 +ORDER BY number +SETTINGS enable_optimize_predicate_expression = 0, force_grouping_standard_compatibility=0; + +SELECT + number +FROM numbers(10) +GROUP BY + GROUPING SETS ( + (number), + (number % 2) + ) +HAVING grouping(number, number % 2) = 1 +ORDER BY number +SETTINGS enable_optimize_predicate_expression = 0, force_grouping_standard_compatibility=0; + +SELECT + number, + GROUPING(number, number % 2) = 1 as gr +FROM remote('127.0.0.{2,3}', numbers(10)) +GROUP BY + GROUPING SETS ( + (number), + (number % 2)) +ORDER BY number, gr +SETTINGS force_grouping_standard_compatibility=0; diff --git a/parser/testdata/02293_grouping_function_group_by/ast.json b/parser/testdata/02293_grouping_function_group_by/ast.json new file mode 100644 index 000000000..21d896cc3 --- /dev/null +++ b/parser/testdata/02293_grouping_function_group_by/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000929328, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02293_grouping_function_group_by/metadata.json b/parser/testdata/02293_grouping_function_group_by/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02293_grouping_function_group_by/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02293_grouping_function_group_by/query.sql b/parser/testdata/02293_grouping_function_group_by/query.sql new file mode 100644 index 000000000..da6477a18 --- /dev/null +++ b/parser/testdata/02293_grouping_function_group_by/query.sql @@ -0,0 +1,127 @@ +set optimize_group_by_function_keys=0; + +SELECT + number, + grouping(number, number % 2, number % 3) = 6 +FROM remote('127.0.0.{2,3}', numbers(10)) +GROUP BY + number, + number % 2 +ORDER BY number; -- { serverError BAD_ARGUMENTS } + +-- { echoOn } +SELECT + number, + grouping(number, number % 2) = 3 +FROM remote('127.0.0.{2,3}', numbers(10)) +GROUP BY + number, + number % 2 +ORDER BY number +SETTINGS force_grouping_standard_compatibility=0; + +SELECT + number, + grouping(number), + GROUPING(number % 2) +FROM remote('127.0.0.{2,3}', numbers(10)) +GROUP BY + number, + number % 2 +ORDER BY number +SETTINGS force_grouping_standard_compatibility=0; + +SELECT + number, + grouping(number, number % 2) AS gr +FROM remote('127.0.0.{2,3}', numbers(10)) +GROUP BY + number, + number % 2 + WITH ROLLUP +ORDER BY + number, gr +SETTINGS force_grouping_standard_compatibility=0; + +SELECT + number, + grouping(number, number % 2) AS gr +FROM remote('127.0.0.{2,3}', numbers(10)) +GROUP BY + ROLLUP(number, number % 2) +ORDER BY + number, gr +SETTINGS force_grouping_standard_compatibility=0; + +SELECT + number, + grouping(number, number % 2) AS gr +FROM remote('127.0.0.{2,3}', numbers(10)) +GROUP BY + number, + number % 2 + WITH CUBE +ORDER BY + number, gr +SETTINGS force_grouping_standard_compatibility=0; + +SELECT + number, + grouping(number, number % 2) AS gr +FROM remote('127.0.0.{2,3}', numbers(10)) +GROUP BY + CUBE(number, number % 2) +ORDER BY + number, gr +SETTINGS force_grouping_standard_compatibility=0; + +SELECT + number, + grouping(number, number % 2) + 3 as gr +FROM remote('127.0.0.{2,3}', numbers(10)) +GROUP BY + CUBE(number, number % 2) +HAVING grouping(number) != 0 +ORDER BY + number, gr +SETTINGS force_grouping_standard_compatibility=0; + +SELECT + number, + grouping(number, number % 2) as gr +FROM remote('127.0.0.{2,3}', numbers(10)) +GROUP BY + CUBE(number, number % 2) WITH TOTALS +HAVING grouping(number) != 0 +ORDER BY + number, gr; -- { serverError NOT_IMPLEMENTED } + +SELECT + number, + grouping(number, number % 2) as gr +FROM remote('127.0.0.{2,3}', numbers(10)) +GROUP BY + CUBE(number, number % 2) WITH TOTALS +ORDER BY + number, gr +SETTINGS force_grouping_standard_compatibility=0; + +SELECT + number, + grouping(number, number % 2) as gr +FROM remote('127.0.0.{2,3}', numbers(10)) +GROUP BY + ROLLUP(number, number % 2) WITH TOTALS +HAVING grouping(number) != 0 +ORDER BY + number, gr; -- { serverError NOT_IMPLEMENTED } + +SELECT + number, + grouping(number, number % 2) as gr +FROM remote('127.0.0.{2,3}', numbers(10)) +GROUP BY + ROLLUP(number, number % 2) WITH TOTALS +ORDER BY + number, gr +SETTINGS force_grouping_standard_compatibility=0; diff --git a/parser/testdata/02293_h3_distance/ast.json b/parser/testdata/02293_h3_distance/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02293_h3_distance/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02293_h3_distance/metadata.json b/parser/testdata/02293_h3_distance/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02293_h3_distance/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02293_h3_distance/query.sql b/parser/testdata/02293_h3_distance/query.sql new file mode 100644 index 000000000..7bb35d222 --- /dev/null +++ b/parser/testdata/02293_h3_distance/query.sql @@ -0,0 +1,43 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS h3_indexes; + +CREATE TABLE h3_indexes (id int, start String, end String) ENGINE = Memory; + +-- test values taken from h3 library test suite + +INSERT INTO h3_indexes VALUES (1, '830631fffffffff','830780fffffffff'); +INSERT INTO h3_indexes VALUES (2, '830631fffffffff','830783fffffffff'); +INSERT INTO h3_indexes VALUES (3, '830631fffffffff','83079dfffffffff'); +INSERT INTO h3_indexes VALUES (4, '830631fffffffff','830799fffffffff'); +INSERT INTO h3_indexes VALUES (5, '830631fffffffff','8306f5fffffffff'); +INSERT INTO h3_indexes VALUES (6, '830631fffffffff','8306e6fffffffff'); +INSERT INTO h3_indexes VALUES (7, '830631fffffffff','8306e4fffffffff'); +INSERT INTO h3_indexes VALUES (8, '830631fffffffff','830701fffffffff'); +INSERT INTO h3_indexes VALUES (9, '830631fffffffff','830700fffffffff'); +INSERT INTO h3_indexes VALUES (10, '830631fffffffff','830706fffffffff'); +INSERT INTO h3_indexes VALUES (11, '830631fffffffff','830733fffffffff'); +INSERT INTO h3_indexes VALUES (12, '8301a6fffffffff','830014fffffffff'); +INSERT INTO h3_indexes VALUES (13, '8301a6fffffffff','830033fffffffff'); +INSERT INTO h3_indexes VALUES (14, '8301a6fffffffff','830031fffffffff'); +INSERT INTO h3_indexes VALUES (15, '8301a6fffffffff','830022fffffffff'); +INSERT INTO h3_indexes VALUES (16, '8301a6fffffffff','830020fffffffff'); +INSERT INTO h3_indexes VALUES (17, '8301a6fffffffff','830024fffffffff'); +INSERT INTO h3_indexes VALUES (18, '8301a6fffffffff','830120fffffffff'); +INSERT INTO h3_indexes VALUES (19, '8301a6fffffffff','830124fffffffff'); +INSERT INTO h3_indexes VALUES (20, '8301a6fffffffff','8308cdfffffffff'); +INSERT INTO h3_indexes VALUES (21, '8301a5fffffffff','831059fffffffff'); +INSERT INTO h3_indexes VALUES (22, '8301a5fffffffff','830b2dfffffffff'); +INSERT INTO h3_indexes VALUES (23, '8301a5fffffffff','830b29fffffffff'); +INSERT INTO h3_indexes VALUES (24, '8301a5fffffffff','830b76fffffffff'); +INSERT INTO h3_indexes VALUES (25, '8301a5fffffffff','830b43fffffffff'); +INSERT INTO h3_indexes VALUES (26, '8301a5fffffffff','830b4efffffffff'); +INSERT INTO h3_indexes VALUES (27, '8301a5fffffffff','830b48fffffffff'); +INSERT INTO h3_indexes VALUES (28, '8301a5fffffffff','830b49fffffffff'); + + +SELECT h3Distance(stringToH3(start), stringToH3(end)) FROM h3_indexes ORDER BY id; + + +DROP TABLE h3_indexes; + diff --git a/parser/testdata/02293_h3_hex_ring/ast.json b/parser/testdata/02293_h3_hex_ring/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02293_h3_hex_ring/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02293_h3_hex_ring/metadata.json b/parser/testdata/02293_h3_hex_ring/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02293_h3_hex_ring/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02293_h3_hex_ring/query.sql b/parser/testdata/02293_h3_hex_ring/query.sql new file mode 100644 index 000000000..4d9f36994 --- /dev/null +++ b/parser/testdata/02293_h3_hex_ring/query.sql @@ -0,0 +1,35 @@ +-- Tags: no-fasttest + +SELECT h3HexRing(581276613233082367, toUInt16(0)); +SELECT h3HexRing(579205132326352334, toUInt16(1)) as hexRing; -- { serverError INCORRECT_DATA } +SELECT h3HexRing(581276613233082367, -1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT h3HexRing(581276613233082367, toUInt16(-1)); -- { serverError PARAMETER_OUT_OF_BOUND } + +DROP TABLE IF EXISTS h3_indexes; + +-- Test h3 indices and k selected from original test fixture: https://github.com/uber/h3/blob/master/src/apps/testapps + +CREATE TABLE h3_indexes (h3_index UInt64, k UInt16) ENGINE = Memory; + + +INSERT INTO h3_indexes VALUES (581276613233082367,1); +INSERT INTO h3_indexes VALUES (581263419093549055,2); +INSERT INTO h3_indexes VALUES (589753847883235327,3); +INSERT INTO h3_indexes VALUES (594082350283882495,4); +INSERT INTO h3_indexes VALUES (598372386957426687,5); +INSERT INTO h3_indexes VALUES (599542359671177215,6); +INSERT INTO h3_indexes VALUES (604296355086598143,7); +INSERT INTO h3_indexes VALUES (608785214872748031,8); +INSERT INTO h3_indexes VALUES (615732192485572607,9); +INSERT INTO h3_indexes VALUES (617056794467368959,10); +INSERT INTO h3_indexes VALUES (624586477873168383,11); +INSERT INTO h3_indexes VALUES (627882919484481535,12); +INSERT INTO h3_indexes VALUES (634600058503392255,13); +INSERT INTO h3_indexes VALUES (635544851677385791,14); +INSERT INTO h3_indexes VALUES (639763125756281263,15); +INSERT INTO h3_indexes VALUES (644178757620501158,16); + + +SELECT arraySort(h3HexRing(h3_index, k)) FROM h3_indexes ORDER BY h3_index; + +DROP TABLE h3_indexes; diff --git a/parser/testdata/02293_h3_line/ast.json b/parser/testdata/02293_h3_line/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02293_h3_line/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02293_h3_line/metadata.json b/parser/testdata/02293_h3_line/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02293_h3_line/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02293_h3_line/query.sql b/parser/testdata/02293_h3_line/query.sql new file mode 100644 index 000000000..d0c514081 --- /dev/null +++ b/parser/testdata/02293_h3_line/query.sql @@ -0,0 +1,56 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS h3_indexes; + +CREATE TABLE h3_indexes (id int, start String, end String) ENGINE = Memory; + +-- test values taken from h3 library test suite + +INSERT INTO h3_indexes VALUES (1, '830631fffffffff','830780fffffffff'); +INSERT INTO h3_indexes VALUES (2, '830631fffffffff','830783fffffffff'); +INSERT INTO h3_indexes VALUES (3, '830631fffffffff','83079dfffffffff'); +INSERT INTO h3_indexes VALUES (4, '830631fffffffff','830799fffffffff'); +INSERT INTO h3_indexes VALUES (5, '830631fffffffff','8306f5fffffffff'); +INSERT INTO h3_indexes VALUES (6, '830631fffffffff','8306e6fffffffff'); +INSERT INTO h3_indexes VALUES (7, '830631fffffffff','8306e4fffffffff'); +INSERT INTO h3_indexes VALUES (8, '830631fffffffff','830701fffffffff'); +INSERT INTO h3_indexes VALUES (9, '830631fffffffff','830700fffffffff'); +INSERT INTO h3_indexes VALUES (10, '830631fffffffff','830706fffffffff'); +INSERT INTO h3_indexes VALUES (11, '830631fffffffff','830733fffffffff'); +INSERT INTO h3_indexes VALUES (12, '8301a6fffffffff','830014fffffffff'); +INSERT INTO h3_indexes VALUES (13, '8301a6fffffffff','830033fffffffff'); +INSERT INTO h3_indexes VALUES (14, '8301a6fffffffff','830031fffffffff'); +INSERT INTO h3_indexes VALUES (15, '8301a6fffffffff','830022fffffffff'); +INSERT INTO h3_indexes VALUES (16, '8301a6fffffffff','830020fffffffff'); +INSERT INTO h3_indexes VALUES (17, '8301a6fffffffff','830024fffffffff'); +INSERT INTO h3_indexes VALUES (18, '8301a6fffffffff','830120fffffffff'); +INSERT INTO h3_indexes VALUES (19, '8301a6fffffffff','830124fffffffff'); +INSERT INTO h3_indexes VALUES (20, '8301a6fffffffff','8308cdfffffffff'); +INSERT INTO h3_indexes VALUES (21, '8301a5fffffffff','831059fffffffff'); +INSERT INTO h3_indexes VALUES (22, '8301a5fffffffff','830b2dfffffffff'); +INSERT INTO h3_indexes VALUES (23, '8301a5fffffffff','830b29fffffffff'); +INSERT INTO h3_indexes VALUES (24, '8301a5fffffffff','830b76fffffffff'); +INSERT INTO h3_indexes VALUES (25, '8301a5fffffffff','830b43fffffffff'); +INSERT INTO h3_indexes VALUES (26, '8301a5fffffffff','830b4efffffffff'); +INSERT INTO h3_indexes VALUES (27, '8301a5fffffffff','830b48fffffffff'); +INSERT INTO h3_indexes VALUES (28, '8301a5fffffffff','830b49fffffffff'); + +/* +Given two H3 indexes, return the line of indexes between them (inclusive). +This function may fail to find the line between two indexes, for example if they are very far apart. +It may also fail when finding distances for indexes on opposite sides of a pentagon. + +Notes: + The specific output of this function should not be considered stable across library versions. + The only guarantees the library provides are that the line length will be h3Distance(start, end) + 1 + and that every index in the line will be a neighbor of the preceding index. + Lines are drawn in grid space, and may not correspond exactly to either Cartesian lines or great arcs. + +https://h3geo.org/docs/api/traversal + */ + +SELECT length(h3Line(stringToH3(start), stringToH3(end))) FROM h3_indexes ORDER BY id; + +SELECT h3Line(0xffffffffffffff, 0xffffffffffffff); -- { serverError INCORRECT_DATA } + +DROP TABLE h3_indexes; diff --git a/parser/testdata/02293_ilike_on_fixed_strings/ast.json b/parser/testdata/02293_ilike_on_fixed_strings/ast.json new file mode 100644 index 000000000..1df2ea7cd --- /dev/null +++ b/parser/testdata/02293_ilike_on_fixed_strings/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tab (children 1)" + }, + { + "explain": " Identifier tab" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001306527, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/02293_ilike_on_fixed_strings/metadata.json b/parser/testdata/02293_ilike_on_fixed_strings/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02293_ilike_on_fixed_strings/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02293_ilike_on_fixed_strings/query.sql b/parser/testdata/02293_ilike_on_fixed_strings/query.sql new file mode 100644 index 000000000..3838e372e --- /dev/null +++ b/parser/testdata/02293_ilike_on_fixed_strings/query.sql @@ -0,0 +1,10 @@ +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab (col FixedString(2)) engine = MergeTree() ORDER BY col; + +INSERT INTO tab VALUES ('AA') ('Aa'); + +SELECT col, col LIKE '%a', col ILIKE '%a' FROM tab WHERE col = 'AA'; +SELECT col, col LIKE '%a', col ILIKE '%a' FROM tab WHERE col = 'Aa'; + +DROP TABLE IF EXISTS tab; diff --git a/parser/testdata/02293_optimize_aggregation_in_order_Array_functions/ast.json b/parser/testdata/02293_optimize_aggregation_in_order_Array_functions/ast.json new file mode 100644 index 000000000..61a064d81 --- /dev/null +++ b/parser/testdata/02293_optimize_aggregation_in_order_Array_functions/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery data_02293 (children 1)" + }, + { + "explain": " Identifier data_02293" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001503259, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/02293_optimize_aggregation_in_order_Array_functions/metadata.json b/parser/testdata/02293_optimize_aggregation_in_order_Array_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02293_optimize_aggregation_in_order_Array_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02293_optimize_aggregation_in_order_Array_functions/query.sql b/parser/testdata/02293_optimize_aggregation_in_order_Array_functions/query.sql new file mode 100644 index 000000000..2df7c9f2b --- /dev/null +++ b/parser/testdata/02293_optimize_aggregation_in_order_Array_functions/query.sql @@ -0,0 +1,5 @@ +drop table if exists data_02293; +create table data_02293 (a Int64, grp_aggreg AggregateFunction(groupArrayArray, Array(UInt64)), grp_simple SimpleAggregateFunction(groupArrayArray, Array(UInt64))) engine = MergeTree() order by a; +insert into data_02293 select 1 a, groupArrayArrayState([toUInt64(number)]), groupArrayArray([toUInt64(number)]) from numbers(2) group by a; +SELECT arraySort(groupArrayArrayMerge(grp_aggreg)) gra , arraySort(groupArrayArray(grp_simple)) grs FROM data_02293 group by a SETTINGS optimize_aggregation_in_order=1; +drop table data_02293; diff --git a/parser/testdata/02293_ttest_large_samples/ast.json b/parser/testdata/02293_ttest_large_samples/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02293_ttest_large_samples/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02293_ttest_large_samples/metadata.json b/parser/testdata/02293_ttest_large_samples/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02293_ttest_large_samples/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02293_ttest_large_samples/query.sql b/parser/testdata/02293_ttest_large_samples/query.sql new file mode 100644 index 000000000..b46875413 --- /dev/null +++ b/parser/testdata/02293_ttest_large_samples/query.sql @@ -0,0 +1,57 @@ +-- Tags: long + +SELECT roundBankers(result.1, 5), roundBankers(result.2, 5) FROM ( +SELECT + studentTTest(sample, variant) as result +FROM ( +SELECT + toFloat64(number) % 30 AS sample, + 0 AS variant +FROM system.numbers limit 500000 + +UNION ALL + +SELECT + toFloat64(number) % 30 + 0.0022 AS sample, + 1 AS variant +FROM system.numbers limit 500000)); + + +SET max_rows_to_read = 0; + +SELECT roundBankers(result.1, 5), roundBankers(result.2, 5 ) FROM ( +SELECT + studentTTest(sample, variant) as result +FROM ( +SELECT + toFloat64(number) % 30 AS sample, + 0 AS variant +FROM system.numbers limit 50000000 + +UNION ALL + +SELECT + toFloat64(number) % 30 + 0.0022 AS sample, + 1 AS variant +FROM system.numbers limit 50000000)); + + +SELECT roundBankers(result.2, 1025) +FROM +( + SELECT studentTTest(sample, variant) AS result + FROM + ( + SELECT + toFloat64(number) % 30 AS sample, + 1048576 AS variant + FROM system.numbers + LIMIT 1 + UNION ALL + SELECT + (toFloat64(number) % 7) + inf AS sample, + 255 AS variant + FROM system.numbers + LIMIT 1023 + ) +); diff --git a/parser/testdata/02294_decimal_second_errors/ast.json b/parser/testdata/02294_decimal_second_errors/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02294_decimal_second_errors/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02294_decimal_second_errors/metadata.json b/parser/testdata/02294_decimal_second_errors/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02294_decimal_second_errors/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02294_decimal_second_errors/query.sql b/parser/testdata/02294_decimal_second_errors/query.sql new file mode 100644 index 000000000..b9b6d0a62 --- /dev/null +++ b/parser/testdata/02294_decimal_second_errors/query.sql @@ -0,0 +1,11 @@ +SELECT 1 SETTINGS max_execution_time=NaN; -- { clientError CANNOT_PARSE_NUMBER } +SELECT 1 SETTINGS max_execution_time=Infinity; -- { clientError CANNOT_PARSE_NUMBER }; +SELECT 1 SETTINGS max_execution_time=-Infinity; -- { clientError CANNOT_PARSE_NUMBER }; + +-- Ok values +SELECT 1 SETTINGS max_execution_time=-0.5; +SELECT 1 SETTINGS max_execution_time=5.5; +SELECT 1 SETTINGS max_execution_time=-1; +SELECT 1 SETTINGS max_execution_time=0.0; +SELECT 1 SETTINGS max_execution_time=-0.0; +SELECT 1 SETTINGS max_execution_time=10; diff --git a/parser/testdata/02294_dictionaries_hierarchical_index/ast.json b/parser/testdata/02294_dictionaries_hierarchical_index/ast.json new file mode 100644 index 000000000..93e793e6c --- /dev/null +++ b/parser/testdata/02294_dictionaries_hierarchical_index/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_hierarchy_source_table (children 1)" + }, + { + "explain": " Identifier test_hierarchy_source_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001155665, + "rows_read": 2, + "bytes_read": 106 + } +} diff --git a/parser/testdata/02294_dictionaries_hierarchical_index/metadata.json b/parser/testdata/02294_dictionaries_hierarchical_index/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02294_dictionaries_hierarchical_index/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02294_dictionaries_hierarchical_index/query.sql b/parser/testdata/02294_dictionaries_hierarchical_index/query.sql new file mode 100644 index 000000000..7904e612f --- /dev/null +++ b/parser/testdata/02294_dictionaries_hierarchical_index/query.sql @@ -0,0 +1,67 @@ +DROP TABLE IF EXISTS test_hierarchy_source_table; +CREATE TABLE test_hierarchy_source_table +( + id UInt64, + parent_id UInt64 +) ENGINE=MergeTree ORDER BY id; + +INSERT INTO test_hierarchy_source_table VALUES (1, 0); + +DROP DICTIONARY IF EXISTS hierarchy_flat_dictionary_index; +CREATE DICTIONARY hierarchy_flat_dictionary_index +( + id UInt64, + parent_id UInt64 BIDIRECTIONAL +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE 'test_hierarchy_source_table')) +LAYOUT(FLAT()) +LIFETIME(0); -- {serverError BAD_ARGUMENTS } + +DROP DICTIONARY IF EXISTS hierarchy_flat_dictionary_index; +CREATE DICTIONARY hierarchy_flat_dictionary_index +( + id UInt64, + parent_id UInt64 HIERARCHICAL BIDIRECTIONAL +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE 'test_hierarchy_source_table')) +LAYOUT(FLAT()) +LIFETIME(0); + +SELECT * FROM hierarchy_flat_dictionary_index; +SELECT hierarchical_index_bytes_allocated > 0 FROM system.dictionaries WHERE name = 'hierarchy_flat_dictionary_index' AND database = currentDatabase(); + +DROP DICTIONARY hierarchy_flat_dictionary_index; + +DROP DICTIONARY IF EXISTS hierarchy_hashed_dictionary_index; +CREATE DICTIONARY hierarchy_hashed_dictionary_index +( + id UInt64, + parent_id UInt64 HIERARCHICAL BIDIRECTIONAL +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE 'test_hierarchy_source_table')) +LAYOUT(FLAT()) +LIFETIME(0); + +SELECT * FROM hierarchy_hashed_dictionary_index; +SELECT hierarchical_index_bytes_allocated > 0 FROM system.dictionaries WHERE name = 'hierarchy_hashed_dictionary_index' AND database = currentDatabase(); +DROP DICTIONARY hierarchy_hashed_dictionary_index; + +DROP DICTIONARY IF EXISTS hierarchy_hashed_array_dictionary_index; +CREATE DICTIONARY hierarchy_hashed_array_dictionary_index +( + id UInt64, + parent_id UInt64 HIERARCHICAL +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE 'test_hierarchy_source_table')) +LAYOUT(HASHED_ARRAY()) +LIFETIME(0); + +SELECT * FROM hierarchy_hashed_array_dictionary_index; +SELECT hierarchical_index_bytes_allocated > 0 FROM system.dictionaries WHERE name = 'hierarchy_hashed_array_dictionary_index' AND database = currentDatabase(); + +DROP DICTIONARY hierarchy_hashed_array_dictionary_index; +DROP TABLE test_hierarchy_source_table; diff --git a/parser/testdata/02294_fp_seconds_profile/ast.json b/parser/testdata/02294_fp_seconds_profile/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02294_fp_seconds_profile/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02294_fp_seconds_profile/metadata.json b/parser/testdata/02294_fp_seconds_profile/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02294_fp_seconds_profile/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02294_fp_seconds_profile/query.sql b/parser/testdata/02294_fp_seconds_profile/query.sql new file mode 100644 index 000000000..db1e298a6 --- /dev/null +++ b/parser/testdata/02294_fp_seconds_profile/query.sql @@ -0,0 +1,12 @@ +-- Tags: no-parallel +-- Bug: https://github.com/ClickHouse/ClickHouse/issues/38863 + +DROP SETTINGS PROFILE IF EXISTS 02294_profile1, 02294_profile2; + +CREATE SETTINGS PROFILE 02294_profile1 SETTINGS timeout_before_checking_execution_speed = 3 TO default; +SHOW CREATE SETTINGS PROFILE 02294_profile1; + +CREATE SETTINGS PROFILE 02294_profile2 SETTINGS max_execution_time = 0.5 TO default; +SHOW CREATE SETTINGS PROFILE 02294_profile2; + +DROP SETTINGS PROFILE IF EXISTS 02294_profile1, 02294_profile2; diff --git a/parser/testdata/02294_nothing_arguments_in_functions/ast.json b/parser/testdata/02294_nothing_arguments_in_functions/ast.json new file mode 100644 index 000000000..28ceb2cd1 --- /dev/null +++ b/parser/testdata/02294_nothing_arguments_in_functions/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001026865, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02294_nothing_arguments_in_functions/metadata.json b/parser/testdata/02294_nothing_arguments_in_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02294_nothing_arguments_in_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02294_nothing_arguments_in_functions/query.sql b/parser/testdata/02294_nothing_arguments_in_functions/query.sql new file mode 100644 index 000000000..ecf4f9cab --- /dev/null +++ b/parser/testdata/02294_nothing_arguments_in_functions/query.sql @@ -0,0 +1,42 @@ +set enable_named_columns_in_function_tuple = 0; + +select arrayMap(x -> 2 * x, []); +select toTypeName(arrayMap(x -> 2 * x, [])); +select arrayMap((x, y) -> x + y, [], []); +select toTypeName(arrayMap((x, y) -> x + y, [], [])); +select arrayMap((x, y) -> x + y, [], CAST([], 'Array(Int32)')); +select toTypeName(arrayMap((x, y) -> x + y, [], CAST([], 'Array(Int32)'))); + +select arrayFilter(x -> 2 * x < 0, []); +select toTypeName(arrayFilter(x -> 2 * x < 0, [])); + +select toTypeName(arrayMap(x -> CAST(x, 'String'), [])); +select toTypeName(arrayMap(x -> toInt32(x), [])); +select toColumnTypeName(arrayMap(x -> toInt32(x), [])); + +select toTypeName(arrayMap(x -> [x], [])); +select toColumnTypeName(arrayMap(x -> [x], [])); + +select toTypeName(arrayMap(x ->map(1, x), [])); +select toColumnTypeName(arrayMap(x -> map(1, x), [])); + +select toTypeName(arrayMap(x ->tuple(x), [])); +select toColumnTypeName(arrayMap(x -> tuple(1, x), [])); + +select toTypeName(toInt32(assumeNotNull(materialize(NULL)))); +select toColumnTypeName(toInt32(assumeNotNull(materialize(NULL)))); + +select toTypeName(assumeNotNull(materialize(NULL))); +select toColumnTypeName(assumeNotNull(materialize(NULL))); + +select toTypeName([assumeNotNull(materialize(NULL))]); +select toColumnTypeName([assumeNotNull(materialize(NULL))]); + +select toTypeName(map(1, assumeNotNull(materialize(NULL)))); +select toColumnTypeName(map(1, assumeNotNull(materialize(NULL)))); + +select toTypeName(tuple(1, assumeNotNull(materialize(NULL)))); +select toColumnTypeName(tuple(1, assumeNotNull(materialize(NULL)))); + +select toTypeName(assumeNotNull(materialize(NULL)) * 2); +select toColumnTypeName(assumeNotNull(materialize(NULL)) * 2); diff --git a/parser/testdata/02294_optimize_aggregation_in_order_prefix_Array_functions/ast.json b/parser/testdata/02294_optimize_aggregation_in_order_prefix_Array_functions/ast.json new file mode 100644 index 000000000..76d8ba6b2 --- /dev/null +++ b/parser/testdata/02294_optimize_aggregation_in_order_prefix_Array_functions/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery data_02294 (children 1)" + }, + { + "explain": " Identifier data_02294" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001480057, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/02294_optimize_aggregation_in_order_prefix_Array_functions/metadata.json b/parser/testdata/02294_optimize_aggregation_in_order_prefix_Array_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02294_optimize_aggregation_in_order_prefix_Array_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02294_optimize_aggregation_in_order_prefix_Array_functions/query.sql b/parser/testdata/02294_optimize_aggregation_in_order_prefix_Array_functions/query.sql new file mode 100644 index 000000000..5f588a2ac --- /dev/null +++ b/parser/testdata/02294_optimize_aggregation_in_order_prefix_Array_functions/query.sql @@ -0,0 +1,5 @@ +drop table if exists data_02294; +create table data_02294 (a Int64, b Int64, grp_aggreg AggregateFunction(groupArrayArray, Array(UInt64)), grp_simple SimpleAggregateFunction(groupArrayArray, Array(UInt64))) engine = MergeTree() order by a; +insert into data_02294 select intDiv(number, 2) a, 0 b, groupArrayArrayState([toUInt64(number)]), groupArrayArray([toUInt64(number)]) from numbers(4) group by a, b; +SELECT arraySort(groupArrayArrayMerge(grp_aggreg)) gra , arraySort(groupArrayArray(grp_simple)) grs FROM data_02294 group by a, b SETTINGS optimize_aggregation_in_order=1; +drop table data_02294; diff --git a/parser/testdata/02294_stringsearch_with_nonconst_needle/ast.json b/parser/testdata/02294_stringsearch_with_nonconst_needle/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02294_stringsearch_with_nonconst_needle/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02294_stringsearch_with_nonconst_needle/metadata.json b/parser/testdata/02294_stringsearch_with_nonconst_needle/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02294_stringsearch_with_nonconst_needle/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02294_stringsearch_with_nonconst_needle/query.sql b/parser/testdata/02294_stringsearch_with_nonconst_needle/query.sql new file mode 100644 index 000000000..4afa7e315 --- /dev/null +++ b/parser/testdata/02294_stringsearch_with_nonconst_needle/query.sql @@ -0,0 +1,70 @@ +-- tests of "(not) (i)like" functions + +drop table if exists non_const_needle; + +create table non_const_needle + (id UInt32, haystack String, needle String) + engine = MergeTree() + order by id; + +-- 1 - 33: LIKE-syntax, 34-37: re2-syntax +insert into non_const_needle values (1, 'Hello', '') (2, 'Hello', '%') (3, 'Hello', '%%') (4, 'Hello', '%%%') (5, 'Hello', '%_%') (6, 'Hello', '_') (7, 'Hello', '_%') (8, 'Hello', '%_') (9, 'Hello', 'H%o') (10, 'hello', 'H%0') (11, 'hello', 'h%o') (12, 'Hello', 'h%o') (13, 'OHello', '%lhell%') (14, 'OHello', '%hell%') (15, 'hEllo', '%HEL%') (16, 'abcdef', '%aBc%def%') (17, 'ABCDDEF', '%abc%def%') (18, 'Abc\nDef', '%abc%def%') (19, 'abc\ntdef', '%abc%def%') (20, 'abct\ndef', '%abc%dEf%') (21, 'abc\n\ndeF', '%abc%def%') (22, 'abc\n\ntdef', '%abc%deF%') (23, 'Abc\nt\ndef', '%abc%def%') (24, 'abct\n\ndef', '%abc%def%') (25, 'ab\ndef', '%Abc%def%') (26, 'aBc\nef', '%ABC%DEF%') (27, 'ёЁё', 'Ё%Ё') (28, 'ощщЁё', 'Щ%Ё') (29, 'ощЩЁё', '%Щ%Ё') (30, 'Щущпандер', '%щп%е%') (31, 'Щущпандер', '%щП%е%') (32, 'ощщЁё', '%щ%') (33, 'ощЩЁё', '%ё%') (34, 'Hello', '.*') (35, 'Hello', '.*ell.*') (36, 'Hello', 'o$') (37, 'Hello', 'hE.*lO'); + +select 'LIKE'; +select id, haystack, needle, like(haystack, needle) + from non_const_needle + order by id; + +select 'NOT LIKE'; +select id, haystack, needle, not like(haystack, needle) + from non_const_needle + order by id; + +select 'ILIKE'; +select id, haystack, needle, ilike(haystack, needle) + from non_const_needle + order by id; + +select 'NOT ILIKE'; +select id, haystack, needle, not ilike(haystack, needle) + from non_const_needle + order by id; + +select 'MATCH'; +select id, haystack, needle, match(haystack, needle) + from non_const_needle + order by id; + +drop table if exists non_const_needle; + +-- rudimentary tests of "multiSearchFirstIndex()", "multiSearchAnyPosition()" and "multiSearchFirstIndex()" functions + +select 'MULTISEARCHANY'; +select multiSearchAny(materialize('Hello World'), materialize([])); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select 0 = multiSearchAny('Hello World', CAST([], 'Array(String)')); +select 1 = multiSearchAny(materialize('Hello World'), materialize(['orld'])); +select 0 = multiSearchAny(materialize('Hello World'), materialize(['Hallo', 'Welt'])); +select 1 = multiSearchAny(materialize('Hello World'), materialize(['Hallo', 'orld'])); +select 1 = multiSearchAnyCaseInsensitive(materialize('Hello World'), materialize(['WORLD'])); +select 1 = multiSearchAnyUTF8(materialize('Hello World £'), materialize(['WORLD', '£'])); +select 1 = multiSearchAnyCaseInsensitiveUTF8(materialize('Hello World £'), materialize(['WORLD'])); + +select 'MULTISEARCHFIRSTINDEX'; +select multiSearchFirstIndex(materialize('Hello World'), materialize([])); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select 0 = multiSearchFirstIndex('Hello World', CAST([], 'Array(String)')); +select 1 = multiSearchFirstIndex(materialize('Hello World'), materialize(['orld'])); +select 0 = multiSearchFirstIndex(materialize('Hello World'), materialize(['Hallo', 'Welt'])); +select 2 = multiSearchFirstIndex(materialize('Hello World'), materialize(['Hallo', 'orld'])); +select 1 = multiSearchFirstIndexCaseInsensitive(materialize('Hello World'), materialize(['WORLD'])); +select 2 = multiSearchFirstIndexUTF8(materialize('Hello World £'), materialize(['WORLD', '£'])); +select 1 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('Hello World £'), materialize(['WORLD'])); + +select 'MULTISEARCHFIRSTPOSITION'; +select multiSearchFirstPosition(materialize('Hello World'), materialize([])); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select 0 = multiSearchFirstPosition('Hello World', CAST([], 'Array(String)')); +select 8 = multiSearchFirstPosition(materialize('Hello World'), materialize(['orld'])); +select 0 = multiSearchFirstPosition(materialize('Hello World'), materialize(['Hallo', 'Welt'])); +select 8 = multiSearchFirstPosition(materialize('Hello World'), materialize(['Hallo', 'orld'])); +select 7 = multiSearchFirstPositionCaseInsensitive(materialize('Hello World'), materialize(['WORLD'])); +select 13 = multiSearchFirstPositionUTF8(materialize('Hello World £'), materialize(['WORLD', '£'])); +select 7 = multiSearchFirstPositionCaseInsensitiveUTF8(materialize('Hello World £'), materialize(['WORLD'])); diff --git a/parser/testdata/02294_system_certificates/ast.json b/parser/testdata/02294_system_certificates/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02294_system_certificates/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02294_system_certificates/metadata.json b/parser/testdata/02294_system_certificates/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02294_system_certificates/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02294_system_certificates/query.sql b/parser/testdata/02294_system_certificates/query.sql new file mode 100644 index 000000000..1fb70f524 --- /dev/null +++ b/parser/testdata/02294_system_certificates/query.sql @@ -0,0 +1,2 @@ +# Check table structure +DESCRIBE system.certificates; diff --git a/parser/testdata/02295_GROUP_BY_AggregateFunction/ast.json b/parser/testdata/02295_GROUP_BY_AggregateFunction/ast.json new file mode 100644 index 000000000..84a8c8d26 --- /dev/null +++ b/parser/testdata/02295_GROUP_BY_AggregateFunction/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery data_02295 (children 1)" + }, + { + "explain": " Identifier data_02295" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001564828, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/02295_GROUP_BY_AggregateFunction/metadata.json b/parser/testdata/02295_GROUP_BY_AggregateFunction/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02295_GROUP_BY_AggregateFunction/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02295_GROUP_BY_AggregateFunction/query.sql b/parser/testdata/02295_GROUP_BY_AggregateFunction/query.sql new file mode 100644 index 000000000..862c0f8fd --- /dev/null +++ b/parser/testdata/02295_GROUP_BY_AggregateFunction/query.sql @@ -0,0 +1,24 @@ +drop table if exists data_02295; + +create table data_02295 ( + -- the order of "a" and "b" is important here + -- (since finalizeChunk() accepts positions and they may be wrong) + b Int64, + a Int64, + grp_aggreg AggregateFunction(groupArrayArray, Array(UInt64)) +) engine = MergeTree() order by a; +insert into data_02295 select 0 b, intDiv(number, 2) a, groupArrayArrayState([toUInt64(number)]) from numbers(4) group by a, b; + +-- { echoOn } +SELECT grp_aggreg FROM data_02295 GROUP BY a, grp_aggreg ORDER BY a SETTINGS optimize_aggregation_in_order = 0 FORMAT JSONEachRow; +SELECT grp_aggreg FROM data_02295 GROUP BY a, grp_aggreg ORDER BY a SETTINGS optimize_aggregation_in_order = 1 FORMAT JSONEachRow; +SELECT grp_aggreg FROM data_02295 GROUP BY a, grp_aggreg WITH TOTALS ORDER BY a SETTINGS optimize_aggregation_in_order = 0 FORMAT JSONEachRow; +SELECT grp_aggreg FROM data_02295 GROUP BY a, grp_aggreg WITH TOTALS ORDER BY a SETTINGS optimize_aggregation_in_order = 1 FORMAT JSONEachRow; +-- regression for incorrect positions passed to finalizeChunk() +SELECT a, min(b), max(b) FROM data_02295 GROUP BY a ORDER BY a, count() SETTINGS optimize_aggregation_in_order = 1; +SELECT a, min(b), max(b) FROM data_02295 GROUP BY a ORDER BY a, count() SETTINGS optimize_aggregation_in_order = 1, max_threads = 1; +SELECT a, min(b), max(b) FROM data_02295 GROUP BY a WITH TOTALS ORDER BY a, count() SETTINGS optimize_aggregation_in_order = 1; +SELECT a, min(b), max(b) FROM data_02295 GROUP BY a WITH TOTALS ORDER BY a, count() SETTINGS optimize_aggregation_in_order = 1, max_threads = 1; +-- { echoOff } + +drop table data_02295; diff --git a/parser/testdata/02295_global_with_in_subquery/ast.json b/parser/testdata/02295_global_with_in_subquery/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02295_global_with_in_subquery/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02295_global_with_in_subquery/metadata.json b/parser/testdata/02295_global_with_in_subquery/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02295_global_with_in_subquery/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02295_global_with_in_subquery/query.sql b/parser/testdata/02295_global_with_in_subquery/query.sql new file mode 100644 index 000000000..d70209bbc --- /dev/null +++ b/parser/testdata/02295_global_with_in_subquery/query.sql @@ -0,0 +1,17 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/37141 + +WITH (SELECT 1) as v0 +SELECT v0, v > 0 FROM ( + WITH (SELECT 1) AS v1, (SELECT 2) AS v2 + SELECT v1 AS v + UNION ALL + SELECT v2 AS v +) AS a; + +SELECT number FROM numbers(10) +WHERE number IN ( + WITH (SELECT 1) AS v1, (SELECT 2) AS v2 + SELECT v1 AS v + UNION ALL + SELECT v2 AS v +); diff --git a/parser/testdata/02296_nullable_arguments_in_array_filter/ast.json b/parser/testdata/02296_nullable_arguments_in_array_filter/ast.json new file mode 100644 index 000000000..d23b477a3 --- /dev/null +++ b/parser/testdata/02296_nullable_arguments_in_array_filter/ast.json @@ -0,0 +1,82 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayFilter (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function lambda (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function greater (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function multiply (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 20, + + "statistics": + { + "elapsed": 0.001459, + "rows_read": 20, + "bytes_read": 790 + } +} diff --git a/parser/testdata/02296_nullable_arguments_in_array_filter/metadata.json b/parser/testdata/02296_nullable_arguments_in_array_filter/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02296_nullable_arguments_in_array_filter/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02296_nullable_arguments_in_array_filter/query.sql b/parser/testdata/02296_nullable_arguments_in_array_filter/query.sql new file mode 100644 index 000000000..3c1f2b419 --- /dev/null +++ b/parser/testdata/02296_nullable_arguments_in_array_filter/query.sql @@ -0,0 +1,4 @@ +select arrayFilter(x -> 2 * x > 0, []); +select arrayFilter(x -> 2 * x > 0, [NULL]); +select arrayFilter(x -> x % 2 ? NULL : 1, [1, 2, 3, 4]); +select arrayFilter(x -> x % 2, [1, NULL, 3, NULL]); diff --git a/parser/testdata/02296_ttl_non_deterministic/ast.json b/parser/testdata/02296_ttl_non_deterministic/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02296_ttl_non_deterministic/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02296_ttl_non_deterministic/metadata.json b/parser/testdata/02296_ttl_non_deterministic/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02296_ttl_non_deterministic/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02296_ttl_non_deterministic/query.sql b/parser/testdata/02296_ttl_non_deterministic/query.sql new file mode 100644 index 000000000..14d8979a6 --- /dev/null +++ b/parser/testdata/02296_ttl_non_deterministic/query.sql @@ -0,0 +1,34 @@ +-- Tags: replica + +DROP TABLE IF EXISTS t_ttl_non_deterministic; + +CREATE TABLE t_ttl_non_deterministic(A Int64) +ENGINE = MergeTree ORDER BY A TTL now() + toIntervalMonth(1); -- {serverError BAD_ARGUMENTS} + +CREATE TABLE t_ttl_non_deterministic(A Int64) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/ttl1', '1') ORDER BY A TTL now() + toIntervalMonth(1); -- {serverError BAD_ARGUMENTS} + + +CREATE TABLE t_ttl_non_deterministic(A Int64) ENGINE = MergeTree ORDER BY A; +ALTER TABLE t_ttl_non_deterministic MODIFY TTL now() + toIntervalMonth(1); -- {serverError BAD_ARGUMENTS} +DROP TABLE t_ttl_non_deterministic; + +CREATE TABLE t_ttl_non_deterministic(A Int64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/ttl2', '1') ORDER BY A; +ALTER TABLE t_ttl_non_deterministic MODIFY TTL now() + toIntervalMonth(1); -- {serverError BAD_ARGUMENTS} +DROP TABLE t_ttl_non_deterministic; + + +CREATE TABLE t_ttl_non_deterministic(A Int64, B Int64 TTL now() + toIntervalMonth(1)) +ENGINE = MergeTree ORDER BY A; -- {serverError BAD_ARGUMENTS} + +CREATE TABLE t_ttl_non_deterministic(A Int64, B Int64 TTL now() + toIntervalMonth(1)) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/ttl3', '1') ORDER BY A; -- {serverError BAD_ARGUMENTS} + + +CREATE TABLE t_ttl_non_deterministic(A Int64, B Int64) ENGINE = MergeTree ORDER BY A; +ALTER TABLE t_ttl_non_deterministic MODIFY COLUMN B Int64 TTL now() + toIntervalMonth(1); -- {serverError BAD_ARGUMENTS} +DROP TABLE t_ttl_non_deterministic; + +CREATE TABLE t_ttl_non_deterministic(A Int64, B Int64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/ttl4', '1') ORDER BY A; +ALTER TABLE t_ttl_non_deterministic MODIFY COLUMN B Int64 TTL now() + toIntervalMonth(1); -- {serverError BAD_ARGUMENTS} +DROP TABLE t_ttl_non_deterministic; diff --git a/parser/testdata/02302_clash_const_aggegate_join/ast.json b/parser/testdata/02302_clash_const_aggegate_join/ast.json new file mode 100644 index 000000000..6d46070d6 --- /dev/null +++ b/parser/testdata/02302_clash_const_aggegate_join/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery e (children 1)" + }, + { + "explain": " Identifier e" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001106154, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/02302_clash_const_aggegate_join/metadata.json b/parser/testdata/02302_clash_const_aggegate_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02302_clash_const_aggegate_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02302_clash_const_aggegate_join/query.sql b/parser/testdata/02302_clash_const_aggegate_join/query.sql new file mode 100644 index 000000000..32c602e0d --- /dev/null +++ b/parser/testdata/02302_clash_const_aggegate_join/query.sql @@ -0,0 +1,28 @@ +DROP TABLE IF EXISTS e; +-- https://github.com/ClickHouse/ClickHouse/issues/36891 + +CREATE TABLE e ( a UInt64, t DateTime ) ENGINE = MergeTree PARTITION BY toDate(t) ORDER BY tuple(); +INSERT INTO e SELECT 1, toDateTime('2020-02-01 12:00:01') + INTERVAL number MONTH FROM numbers(10); + +SELECT sumIf( 1, if( 1, toDateTime('2020-01-01 00:00:00', 'UTC'), toDateTime('1970-01-01 00:00:00', 'UTC')) > t ) +FROM e JOIN ( SELECT 1 joinKey) AS da ON joinKey = a +WHERE t >= toDateTime('2021-07-19T13:00:00', 'UTC') AND t <= toDateTime('2021-07-19T13:59:59', 'UTC'); + +SELECT any( toDateTime('2020-01-01T00:00:00', 'UTC')) +FROM e JOIN ( SELECT 1 joinKey) AS da ON joinKey = a +PREWHERE t >= toDateTime('2021-07-19T13:00:00', 'UTC'); + +SELECT sumIf( 1, if( 1, toDateTime('2020-01-01 00:00:00', 'UTC'), toDateTime('1970-01-01 00:00:00', 'UTC')) > t ) +FROM e JOIN ( SELECT 1 joinKey) AS da ON joinKey = a +WHERE t >= toDateTime('2020-01-01 00:00:00', 'UTC') AND t <= toDateTime('2021-07-19T13:59:59', 'UTC'); + +SELECT any(toDateTime('2020-01-01 00:00:00')) +FROM e JOIN ( SELECT 1 joinKey) AS da ON joinKey = a +PREWHERE t >= toDateTime('2020-01-01 00:00:00'); + +SELECT any('2020-01-01 00:00:00') FROM e JOIN ( SELECT 1 joinKey) AS da ON joinKey = a PREWHERE t = '2020-01-01 00:00:00'; + +SELECT any('x') FROM e JOIN ( SELECT 1 joinKey) AS da ON joinKey = a PREWHERE toString(a) = 'x'; + +SELECT any('1') FROM e JOIN ( SELECT 1 joinKey) AS da ON joinKey = a PREWHERE toString(a) = '1'; + diff --git a/parser/testdata/02302_column_decl_null_before_defaul_value/ast.json b/parser/testdata/02302_column_decl_null_before_defaul_value/ast.json new file mode 100644 index 000000000..bbb9341bb --- /dev/null +++ b/parser/testdata/02302_column_decl_null_before_defaul_value/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'create table, column +type +NULL'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001674654, + "rows_read": 5, + "bytes_read": 203 + } +} diff --git a/parser/testdata/02302_column_decl_null_before_defaul_value/metadata.json b/parser/testdata/02302_column_decl_null_before_defaul_value/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02302_column_decl_null_before_defaul_value/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02302_column_decl_null_before_defaul_value/query.sql b/parser/testdata/02302_column_decl_null_before_defaul_value/query.sql new file mode 100644 index 000000000..a2c2fc7cb --- /dev/null +++ b/parser/testdata/02302_column_decl_null_before_defaul_value/query.sql @@ -0,0 +1,61 @@ +select 'create table, column +type +NULL'; +DROP TABLE IF EXISTS null_before SYNC; +CREATE TABLE null_before (id INT NULL) ENGINE=MergeTree() ORDER BY tuple(); +DESCRIBE TABLE null_before; + +select 'create table, column +type +NOT NULL'; +DROP TABLE IF EXISTS null_before SYNC; +CREATE TABLE null_before (id INT NOT NULL) ENGINE=MergeTree() ORDER BY tuple(); +DESCRIBE TABLE null_before; + +select 'create table, column +type +NULL +DEFAULT'; +DROP TABLE IF EXISTS null_before SYNC; +CREATE TABLE null_before (id INT NULL DEFAULT 1) ENGINE=MergeTree() ORDER BY tuple(); +DESCRIBE TABLE null_before; + +select 'create table, column +type +NOT NULL +DEFAULT'; +DROP TABLE IF EXISTS null_before SYNC; +CREATE TABLE null_before (id INT NOT NULL DEFAULT 1) ENGINE=MergeTree() ORDER BY tuple(); +DESCRIBE TABLE null_before; + +select 'create table, column +type +DEFAULT +NULL'; +DROP TABLE IF EXISTS null_before SYNC; +CREATE TABLE null_before (id INT DEFAULT 1 NULL) ENGINE=MergeTree() ORDER BY tuple(); +DESCRIBE TABLE null_before; + +select 'create table, column +type +DEFAULT +NOT NULL'; +DROP TABLE IF EXISTS null_before SYNC; +CREATE TABLE null_before (id INT DEFAULT 1 NOT NULL) ENGINE=MergeTree() ORDER BY tuple(); +DESCRIBE TABLE null_before; + +select 'create table, column -type +NULL +DEFAULT'; +DROP TABLE IF EXISTS null_before SYNC; +CREATE TABLE null_before (id NULL DEFAULT 1) ENGINE=MergeTree() ORDER BY tuple(); +DESCRIBE TABLE null_before; + +select 'create table, column -type +NOT NULL +DEFAULT'; +DROP TABLE IF EXISTS null_before SYNC; +CREATE TABLE null_before (id NOT NULL DEFAULT 1) ENGINE=MergeTree() ORDER BY tuple(); +DESCRIBE TABLE null_before; + +select 'create table, column -type +DEFAULT +NULL'; +DROP TABLE IF EXISTS null_before SYNC; +CREATE TABLE null_before (id DEFAULT 1 NULL) ENGINE=MergeTree() ORDER BY tuple(); +DESCRIBE TABLE null_before; + +select 'create table, column -type +DEFAULT +NOT NULL'; +DROP TABLE IF EXISTS null_before SYNC; +CREATE TABLE null_before (id DEFAULT 1 NOT NULL) ENGINE=MergeTree() ORDER BY tuple(); +DESCRIBE TABLE null_before; + +select 'alter column, NULL modifier is not allowed'; +DROP TABLE IF EXISTS null_before SYNC; +CREATE TABLE null_before (id INT NOT NULL) ENGINE=MergeTree() ORDER BY tuple(); +ALTER TABLE null_before ALTER COLUMN id TYPE INT NULL; -- { clientError SYNTAX_ERROR } + +select 'modify column, NULL modifier is not allowed'; +DROP TABLE IF EXISTS null_before SYNC; +CREATE TABLE null_before (id INT NOT NULL) ENGINE=MergeTree() ORDER BY tuple(); +ALTER TABLE null_before MODIFY COLUMN id NULL DEFAULT 1; -- { clientError SYNTAX_ERROR } + +DROP TABLE IF EXISTS null_before SYNC; diff --git a/parser/testdata/02302_defaults_in_columnar_formats/ast.json b/parser/testdata/02302_defaults_in_columnar_formats/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02302_defaults_in_columnar_formats/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02302_defaults_in_columnar_formats/metadata.json b/parser/testdata/02302_defaults_in_columnar_formats/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02302_defaults_in_columnar_formats/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02302_defaults_in_columnar_formats/query.sql b/parser/testdata/02302_defaults_in_columnar_formats/query.sql new file mode 100644 index 000000000..c49bf3928 --- /dev/null +++ b/parser/testdata/02302_defaults_in_columnar_formats/query.sql @@ -0,0 +1,8 @@ +-- Tags: no-fasttest + +insert into function file(currentDatabase() || '_data_02302.parquet') select 1 as x, null::Nullable(UInt8) as xx settings engine_file_truncate_on_insert=1; +select * from file(currentDatabase() || '_data_02302.parquet', auto, 'x UInt8, xx UInt8 default 10, y default 42, z default x + xx + y') settings input_format_parquet_allow_missing_columns=1; +insert into function file(currentDatabase() || '_data_02302.orc') select 1 as x, null::Nullable(UInt8) as xx settings engine_file_truncate_on_insert=1; +select * from file(currentDatabase() || '_data_02302.orc', auto, 'x UInt8, xx UInt8 default 10, y default 42, z default x + xx + y') settings input_format_orc_allow_missing_columns=1; +insert into function file(currentDatabase() || '_data_02302.arrow') select 1 as x, null::Nullable(UInt8) as xx settings engine_file_truncate_on_insert=1; +select * from file(currentDatabase() || '_data_02302.arrow', auto, 'x UInt8, xx UInt8 default 10, y default 42, z default x + xx + y') settings input_format_arrow_allow_missing_columns=1; diff --git a/parser/testdata/02302_join_auto_lc_nullable_bug/ast.json b/parser/testdata/02302_join_auto_lc_nullable_bug/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02302_join_auto_lc_nullable_bug/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02302_join_auto_lc_nullable_bug/metadata.json b/parser/testdata/02302_join_auto_lc_nullable_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02302_join_auto_lc_nullable_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02302_join_auto_lc_nullable_bug/query.sql b/parser/testdata/02302_join_auto_lc_nullable_bug/query.sql new file mode 100644 index 000000000..7f7285d54 --- /dev/null +++ b/parser/testdata/02302_join_auto_lc_nullable_bug/query.sql @@ -0,0 +1,6 @@ + +SET max_bytes_in_join = '100', join_algorithm = 'auto'; + +SELECT 3 == count() FROM (SELECT toLowCardinality(toNullable(number)) AS l FROM system.numbers LIMIT 3) AS s1 +ANY LEFT JOIN (SELECT toLowCardinality(toNullable(number)) AS r FROM system.numbers LIMIT 4) AS s2 ON l = r +; diff --git a/parser/testdata/02302_lc_nullable_string_insert_as_number/ast.json b/parser/testdata/02302_lc_nullable_string_insert_as_number/ast.json new file mode 100644 index 000000000..29f4336a2 --- /dev/null +++ b/parser/testdata/02302_lc_nullable_string_insert_as_number/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery lc_nullable_string (children 1)" + }, + { + "explain": " Identifier lc_nullable_string" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001341428, + "rows_read": 2, + "bytes_read": 88 + } +} diff --git a/parser/testdata/02302_lc_nullable_string_insert_as_number/metadata.json b/parser/testdata/02302_lc_nullable_string_insert_as_number/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02302_lc_nullable_string_insert_as_number/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02302_lc_nullable_string_insert_as_number/query.sql b/parser/testdata/02302_lc_nullable_string_insert_as_number/query.sql new file mode 100644 index 000000000..9859c1559 --- /dev/null +++ b/parser/testdata/02302_lc_nullable_string_insert_as_number/query.sql @@ -0,0 +1,11 @@ +DROP TABLE IF EXISTS lc_nullable_string; + +CREATE TABLE lc_nullable_string(`c1` LowCardinality(Nullable(String)) DEFAULT CAST(NULL, 'LowCardinality(Nullable(String))')) +ENGINE = Memory; + +INSERT INTO lc_nullable_string (c1) FORMAT Values (0); +INSERT INTO lc_nullable_string (c1) Values (1); + +SELECT * FROM lc_nullable_string ORDER BY c1; + +DROP TABLE lc_nullable_string; diff --git a/parser/testdata/02302_projections_GROUP_BY_ORDERY_BY_optimize_aggregation_in_order/ast.json b/parser/testdata/02302_projections_GROUP_BY_ORDERY_BY_optimize_aggregation_in_order/ast.json new file mode 100644 index 000000000..e6008913b --- /dev/null +++ b/parser/testdata/02302_projections_GROUP_BY_ORDERY_BY_optimize_aggregation_in_order/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_agg_proj_02302 (children 1)" + }, + { + "explain": " Identifier test_agg_proj_02302" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001713253, + "rows_read": 2, + "bytes_read": 90 + } +} diff --git a/parser/testdata/02302_projections_GROUP_BY_ORDERY_BY_optimize_aggregation_in_order/metadata.json b/parser/testdata/02302_projections_GROUP_BY_ORDERY_BY_optimize_aggregation_in_order/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02302_projections_GROUP_BY_ORDERY_BY_optimize_aggregation_in_order/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02302_projections_GROUP_BY_ORDERY_BY_optimize_aggregation_in_order/query.sql b/parser/testdata/02302_projections_GROUP_BY_ORDERY_BY_optimize_aggregation_in_order/query.sql new file mode 100644 index 000000000..fd6c1dd79 --- /dev/null +++ b/parser/testdata/02302_projections_GROUP_BY_ORDERY_BY_optimize_aggregation_in_order/query.sql @@ -0,0 +1,11 @@ +drop table if exists test_agg_proj_02302; + +create table test_agg_proj_02302 (x Int32, y Int32, PROJECTION x_plus_y (select sum(x - y), argMax(x, y) group by x + y)) ENGINE = MergeTree order by tuple() settings index_granularity = 1; +insert into test_agg_proj_02302 select intDiv(number, 2), -intDiv(number,3) - 1 from numbers(100); + +-- { echoOn } +select x + y, sum(x - y) as s from test_agg_proj_02302 group by x + y order by s desc limit 5 settings optimize_aggregation_in_order=0, optimize_read_in_order=0; +select x + y, sum(x - y) as s from test_agg_proj_02302 group by x + y order by s desc limit 5 settings optimize_aggregation_in_order=1, optimize_read_in_order=1; + +-- { echoOff } +drop table test_agg_proj_02302; diff --git a/parser/testdata/02302_s3_file_pruning/ast.json b/parser/testdata/02302_s3_file_pruning/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02302_s3_file_pruning/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02302_s3_file_pruning/metadata.json b/parser/testdata/02302_s3_file_pruning/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02302_s3_file_pruning/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02302_s3_file_pruning/query.sql b/parser/testdata/02302_s3_file_pruning/query.sql new file mode 100644 index 000000000..58afb682f --- /dev/null +++ b/parser/testdata/02302_s3_file_pruning/query.sql @@ -0,0 +1,47 @@ +-- Tags: no-parallel, no-fasttest +-- Tag no-fasttest: Depends on S3 + +SET merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.0; + +-- { echo } +drop table if exists test_02302; +create table test_02302 (a UInt64) engine = S3(s3_conn, filename='test_02302_{_partition_id}', format=Parquet) partition by a; +insert into test_02302 select number from numbers(10) settings s3_truncate_on_insert=1; +select * from test_02302; -- { serverError NOT_IMPLEMENTED } +drop table test_02302; + +set max_rows_to_read = 1; + +-- Test s3 table function with glob +select * from s3(s3_conn, filename='test_02302_*', format=Parquet) where _file like '%5'; + +-- Test s3 table with explicit keys (no glob) +-- TODO support truncate table function +drop table if exists test_02302; +create table test_02302 (a UInt64) engine = S3(s3_conn, filename='test_02302.2', format=Parquet); +truncate table test_02302; + +drop table if exists test_02302; +create table test_02302 (a UInt64) engine = S3(s3_conn, filename='test_02302.1', format=Parquet); +truncate table test_02302; + +drop table if exists test_02302; +create table test_02302 (a UInt64) engine = S3(s3_conn, filename='test_02302', format=Parquet); +truncate table test_02302; + +insert into test_02302 select 0 settings s3_create_new_file_on_insert = true; +insert into test_02302 select 1 settings s3_create_new_file_on_insert = true; +insert into test_02302 select 2 settings s3_create_new_file_on_insert = true; + +select * from test_02302 where _file like '%1'; + +select _file, * from test_02302 where _file like '%1'; + +set max_rows_to_read = 2; +select * from test_02302 where (_file like '%.1' OR _file like '%.2') AND a > 1; + +set max_rows_to_read = 999; + +select 'a1' as _file, * from test_02302 where _file like '%1' ORDER BY a; + +drop table test_02302; diff --git a/parser/testdata/02303_cast_nullable_to_custom_types/ast.json b/parser/testdata/02303_cast_nullable_to_custom_types/ast.json new file mode 100644 index 000000000..6e34b1af0 --- /dev/null +++ b/parser/testdata/02303_cast_nullable_to_custom_types/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Literal 'Nullable(String)'" + }, + { + "explain": " Literal 'Nullable(Bool)'" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001599395, + "rows_read": 11, + "bytes_read": 417 + } +} diff --git a/parser/testdata/02303_cast_nullable_to_custom_types/metadata.json b/parser/testdata/02303_cast_nullable_to_custom_types/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02303_cast_nullable_to_custom_types/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02303_cast_nullable_to_custom_types/query.sql b/parser/testdata/02303_cast_nullable_to_custom_types/query.sql new file mode 100644 index 000000000..570fbcde0 --- /dev/null +++ b/parser/testdata/02303_cast_nullable_to_custom_types/query.sql @@ -0,0 +1,30 @@ +select CAST(CAST(NULL, 'Nullable(String)'), 'Nullable(Bool)'); +select CAST(CAST(NULL, 'Nullable(String)'), 'Nullable(IPv4)'); +select CAST(CAST(NULL, 'Nullable(String)'), 'Nullable(IPv6)'); + +select toBool(CAST(NULL, 'Nullable(String)')); +select toIPv4(CAST(NULL, 'Nullable(String)')); +select IPv4StringToNum(CAST(NULL, 'Nullable(String)')); +select toIPv6(CAST(NULL, 'Nullable(String)')); +select IPv6StringToNum(CAST(NULL, 'Nullable(String)')); + +select CAST(number % 2 ? 'true' : NULL, 'Nullable(Bool)') from numbers(2); +select CAST(number % 2 ? '0.0.0.0' : NULL, 'Nullable(IPv4)') from numbers(2); +select CAST(number % 2 ? '0000:0000:0000:0000:0000:0000:0000:0000' : NULL, 'Nullable(IPv6)') from numbers(2); + +set cast_keep_nullable = 1; +select toBool(number % 2 ? 'true' : NULL) from numbers(2); +select toIPv4(number % 2 ? '0.0.0.0' : NULL) from numbers(2); +select toIPv4OrDefault(number % 2 ? '' : NULL) from numbers(2); +select toIPv4OrNull(number % 2 ? '' : NULL) from numbers(2); +select IPv4StringToNum(number % 2 ? '0.0.0.0' : NULL) from numbers(2); +select toIPv6(number % 2 ? '0000:0000:0000:0000:0000:0000:0000:0000' : NULL) from numbers(2); +select toIPv6OrDefault(number % 2 ? '' : NULL) from numbers(2); +select toIPv6OrNull(number % 2 ? '' : NULL) from numbers(2); +select IPv6StringToNum(number % 2 ? '0000:0000:0000:0000:0000:0000:0000:0000' : NULL) from numbers(2); + +select 'fuzzer issue'; +SELECT CAST(if(number % 2, 'truetrue', NULL), 'Nullable(Bool)') FROM numbers(2); -- {serverError CANNOT_PARSE_BOOL} +SELECT CAST(if(number % 2, 'falsefalse', NULL), 'Nullable(Bool)') FROM numbers(2); -- {serverError CANNOT_PARSE_BOOL} +SELECT accurateCastOrNull(if(number % 2, NULL, 'truex'), 'Bool') FROM numbers(4); +SELECT accurateCastOrNull(if(number % 2, 'truex', NULL), 'Bool') FROM numbers(4); diff --git a/parser/testdata/02304_grouping_set_order_by/ast.json b/parser/testdata/02304_grouping_set_order_by/ast.json new file mode 100644 index 000000000..a5b5d38ad --- /dev/null +++ b/parser/testdata/02304_grouping_set_order_by/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function toStartOfHour (alias timex) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier time" + }, + { + "explain": " Identifier id" + }, + { + "explain": " Function count (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.00127539, + "rows_read": 10, + "bytes_read": 370 + } +} diff --git a/parser/testdata/02304_grouping_set_order_by/metadata.json b/parser/testdata/02304_grouping_set_order_by/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02304_grouping_set_order_by/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02304_grouping_set_order_by/query.sql b/parser/testdata/02304_grouping_set_order_by/query.sql new file mode 100644 index 000000000..d1b4ab134 --- /dev/null +++ b/parser/testdata/02304_grouping_set_order_by/query.sql @@ -0,0 +1,11 @@ +SELECT toStartOfHour(time) AS timex, id, count() +FROM +( + SELECT + concat('id', toString(number % 3)) AS id, + toDateTime('2020-01-01') + (number * 60) AS time + FROM numbers(100) +) +GROUP BY + GROUPING SETS ( (timex, id), (timex)) +ORDER BY timex ASC, id; diff --git a/parser/testdata/02304_grouping_sets_with_rollup_cube/ast.json b/parser/testdata/02304_grouping_sets_with_rollup_cube/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02304_grouping_sets_with_rollup_cube/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02304_grouping_sets_with_rollup_cube/metadata.json b/parser/testdata/02304_grouping_sets_with_rollup_cube/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02304_grouping_sets_with_rollup_cube/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02304_grouping_sets_with_rollup_cube/query.sql b/parser/testdata/02304_grouping_sets_with_rollup_cube/query.sql new file mode 100644 index 000000000..25263edc9 --- /dev/null +++ b/parser/testdata/02304_grouping_sets_with_rollup_cube/query.sql @@ -0,0 +1,23 @@ +SELECT + number +FROM + numbers(10) +GROUP BY + GROUPING SETS + ( + number, + number % 2 + ) + WITH ROLLUP; -- { serverError NOT_IMPLEMENTED } + +SELECT + number +FROM + numbers(10) +GROUP BY + GROUPING SETS + ( + number, + number % 2 + ) + WITH CUBE; -- { serverError NOT_IMPLEMENTED } diff --git a/parser/testdata/02304_orc_arrow_parquet_string_as_string/ast.json b/parser/testdata/02304_orc_arrow_parquet_string_as_string/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02304_orc_arrow_parquet_string_as_string/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02304_orc_arrow_parquet_string_as_string/metadata.json b/parser/testdata/02304_orc_arrow_parquet_string_as_string/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02304_orc_arrow_parquet_string_as_string/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02304_orc_arrow_parquet_string_as_string/query.sql b/parser/testdata/02304_orc_arrow_parquet_string_as_string/query.sql new file mode 100644 index 000000000..b771cd67a --- /dev/null +++ b/parser/testdata/02304_orc_arrow_parquet_string_as_string/query.sql @@ -0,0 +1,8 @@ +-- Tags: no-fasttest + +insert into function file(currentDatabase() || '_data_02304.parquet') select 'hello' as s from numbers(3) settings engine_file_truncate_on_insert=1, output_format_parquet_string_as_string=1; +desc file(currentDatabase() || '_data_02304.parquet'); +insert into function file(currentDatabase() || '_data_02304.orc') select 'hello' as s from numbers(3) settings engine_file_truncate_on_insert=1, output_format_orc_string_as_string=1; +desc file(currentDatabase() || '_data_02304.orc'); +insert into function file(currentDatabase() || '_data_02304.arrow') select 'hello' as s from numbers(3) settings engine_file_truncate_on_insert=1, output_format_arrow_string_as_string=1; +desc file(currentDatabase() || '_data_02304.arrow'); diff --git a/parser/testdata/02306_part_types_profile_events/ast.json b/parser/testdata/02306_part_types_profile_events/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02306_part_types_profile_events/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02306_part_types_profile_events/metadata.json b/parser/testdata/02306_part_types_profile_events/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02306_part_types_profile_events/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02306_part_types_profile_events/query.sql b/parser/testdata/02306_part_types_profile_events/query.sql new file mode 100644 index 000000000..cec229dc1 --- /dev/null +++ b/parser/testdata/02306_part_types_profile_events/query.sql @@ -0,0 +1,46 @@ +-- Tags: no-async-insert +-- no-async-insert: 1 part is inserted with async inserts +DROP TABLE IF EXISTS t_parts_profile_events; + +CREATE TABLE t_parts_profile_events (a UInt32) +ENGINE = MergeTree ORDER BY tuple() +SETTINGS min_rows_for_wide_part = 10, min_bytes_for_wide_part = 0; + +SYSTEM STOP MERGES t_parts_profile_events; + +SET log_comment = '02306_part_types_profile_events'; + +INSERT INTO t_parts_profile_events VALUES (1); +INSERT INTO t_parts_profile_events VALUES (1); + +SYSTEM START MERGES t_parts_profile_events; +OPTIMIZE TABLE t_parts_profile_events FINAL; +SYSTEM STOP MERGES t_parts_profile_events; + +INSERT INTO t_parts_profile_events SELECT number FROM numbers(20); + +SYSTEM START MERGES t_parts_profile_events; +OPTIMIZE TABLE t_parts_profile_events FINAL; +SYSTEM STOP MERGES t_parts_profile_events; + +SYSTEM FLUSH LOGS query_log, part_log; + +SELECT count(), sum(ProfileEvents['InsertedWideParts']), sum(ProfileEvents['InsertedCompactParts']) + FROM system.query_log WHERE current_database = currentDatabase() + AND log_comment = '02306_part_types_profile_events' + AND query ILIKE 'INSERT INTO%' AND type = 'QueryFinish'; + +SELECT count(), sum(ProfileEvents['MergedIntoWideParts']), sum(ProfileEvents['MergedIntoCompactParts']) + FROM system.query_log WHERE current_database = currentDatabase() + AND log_comment = '02306_part_types_profile_events' + AND query ILIKE 'OPTIMIZE TABLE%' AND type = 'QueryFinish'; + +SELECT part_type FROM system.part_log WHERE database = currentDatabase() + AND table = 't_parts_profile_events' AND event_type = 'NewPart' + ORDER BY event_time_microseconds; + +SELECT part_type, count() > 0 FROM system.part_log WHERE database = currentDatabase() + AND table = 't_parts_profile_events' AND event_type = 'MergeParts' + GROUP BY part_type ORDER BY part_type; + +DROP TABLE t_parts_profile_events; diff --git a/parser/testdata/02306_window_move_row_number_fix/ast.json b/parser/testdata/02306_window_move_row_number_fix/ast.json new file mode 100644 index 000000000..b652c7828 --- /dev/null +++ b/parser/testdata/02306_window_move_row_number_fix/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function nth_value (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Literal UInt64_1048577" + }, + { + "explain": " WindowDefinition (children 1)" + }, + { + "explain": " Literal UInt64_1023" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.00093141, + "rows_read": 10, + "bytes_read": 369 + } +} diff --git a/parser/testdata/02306_window_move_row_number_fix/metadata.json b/parser/testdata/02306_window_move_row_number_fix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02306_window_move_row_number_fix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02306_window_move_row_number_fix/query.sql b/parser/testdata/02306_window_move_row_number_fix/query.sql new file mode 100644 index 000000000..96dd8f617 --- /dev/null +++ b/parser/testdata/02306_window_move_row_number_fix/query.sql @@ -0,0 +1 @@ +SELECT nth_value(NULL, 1048577) OVER (Rows BETWEEN 1023 FOLLOWING AND UNBOUNDED FOLLOWING) diff --git a/parser/testdata/02307_join_get_array_null/ast.json b/parser/testdata/02307_join_get_array_null/ast.json new file mode 100644 index 000000000..24c8cea0a --- /dev/null +++ b/parser/testdata/02307_join_get_array_null/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery id_val (children 1)" + }, + { + "explain": " Identifier id_val" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001083872, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/02307_join_get_array_null/metadata.json b/parser/testdata/02307_join_get_array_null/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02307_join_get_array_null/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02307_join_get_array_null/query.sql b/parser/testdata/02307_join_get_array_null/query.sql new file mode 100644 index 000000000..b1bbc076c --- /dev/null +++ b/parser/testdata/02307_join_get_array_null/query.sql @@ -0,0 +1,6 @@ +drop table if exists id_val; + +create table id_val(id Int32, val Array(Int32)) engine Join(ANY, LEFT, id) settings join_use_nulls = 1; +select joinGet(id_val, 'val', toInt32(number)) from numbers(1); + +drop table id_val; diff --git a/parser/testdata/02310_generate_multi_columns_with_uuid/ast.json b/parser/testdata/02310_generate_multi_columns_with_uuid/ast.json new file mode 100644 index 000000000..92dbb37bd --- /dev/null +++ b/parser/testdata/02310_generate_multi_columns_with_uuid/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function generateUUIDv4 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function generateUUIDv4 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001352007, + "rows_read": 12, + "bytes_read": 477 + } +} diff --git a/parser/testdata/02310_generate_multi_columns_with_uuid/metadata.json b/parser/testdata/02310_generate_multi_columns_with_uuid/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02310_generate_multi_columns_with_uuid/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02310_generate_multi_columns_with_uuid/query.sql b/parser/testdata/02310_generate_multi_columns_with_uuid/query.sql new file mode 100644 index 000000000..3ab19446b --- /dev/null +++ b/parser/testdata/02310_generate_multi_columns_with_uuid/query.sql @@ -0,0 +1,5 @@ +SELECT generateUUIDv4(1) = generateUUIDv4(2); + +SELECT generateUUIDv4() = generateUUIDv4(1); + +SELECT generateUUIDv4(1) = generateUUIDv4(1); diff --git a/parser/testdata/02310_uuid_v7/ast.json b/parser/testdata/02310_uuid_v7/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02310_uuid_v7/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02310_uuid_v7/metadata.json b/parser/testdata/02310_uuid_v7/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02310_uuid_v7/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02310_uuid_v7/query.sql b/parser/testdata/02310_uuid_v7/query.sql new file mode 100644 index 000000000..e1aa3189d --- /dev/null +++ b/parser/testdata/02310_uuid_v7/query.sql @@ -0,0 +1,8 @@ +-- Tests function generateUUIDv7 + +SELECT toTypeName(generateUUIDv7()); +SELECT substring(hex(generateUUIDv7()), 13, 1); -- check version bits +SELECT bitAnd(bitShiftRight(toUInt128(generateUUIDv7()), 62), 3); -- check variant bits +SELECT generateUUIDv7(1) = generateUUIDv7(2); +SELECT generateUUIDv7() = generateUUIDv7(1); +SELECT generateUUIDv7(1) = generateUUIDv7(1); diff --git a/parser/testdata/02311_create_table_with_unknown_format/ast.json b/parser/testdata/02311_create_table_with_unknown_format/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02311_create_table_with_unknown_format/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02311_create_table_with_unknown_format/metadata.json b/parser/testdata/02311_create_table_with_unknown_format/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02311_create_table_with_unknown_format/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02311_create_table_with_unknown_format/query.sql b/parser/testdata/02311_create_table_with_unknown_format/query.sql new file mode 100644 index 000000000..51baf3a49 --- /dev/null +++ b/parser/testdata/02311_create_table_with_unknown_format/query.sql @@ -0,0 +1,6 @@ +-- Tags: no-fasttest, use-hdfs + +create table test_02311 (x UInt32) engine=File(UnknownFormat); -- {serverError UNKNOWN_FORMAT} +create table test_02311 (x UInt32) engine=URL('http://some/url', UnknownFormat); -- {serverError UNKNOWN_FORMAT} +create table test_02311 (x UInt32) engine=S3('http://host:2020/test/data', UnknownFormat); -- {serverError UNKNOWN_FORMAT} +create table test_02311 (x UInt32) engine=HDFS('http://hdfs:9000/data', UnknownFormat); -- {serverError UNKNOWN_FORMAT} diff --git a/parser/testdata/02311_normalize_utf8_constant/ast.json b/parser/testdata/02311_normalize_utf8_constant/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02311_normalize_utf8_constant/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02311_normalize_utf8_constant/metadata.json b/parser/testdata/02311_normalize_utf8_constant/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02311_normalize_utf8_constant/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02311_normalize_utf8_constant/query.sql b/parser/testdata/02311_normalize_utf8_constant/query.sql new file mode 100644 index 000000000..2747aa073 --- /dev/null +++ b/parser/testdata/02311_normalize_utf8_constant/query.sql @@ -0,0 +1,13 @@ +-- Tags: no-fasttest + +SELECT + 'â' AS s, + normalizeUTF8NFC(s) s1, + normalizeUTF8NFD(s) s2, + normalizeUTF8NFKC(s) s3, + normalizeUTF8NFKD(s) s4, + hex(s), + hex(s1), + hex(s2), + hex(s3), + hex(s4); diff --git a/parser/testdata/02311_range_hashed_dictionary_range_cast/ast.json b/parser/testdata/02311_range_hashed_dictionary_range_cast/ast.json new file mode 100644 index 000000000..cbc89dac0 --- /dev/null +++ b/parser/testdata/02311_range_hashed_dictionary_range_cast/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery dictionary_source_table (children 1)" + }, + { + "explain": " Identifier dictionary_source_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00137428, + "rows_read": 2, + "bytes_read": 98 + } +} diff --git a/parser/testdata/02311_range_hashed_dictionary_range_cast/metadata.json b/parser/testdata/02311_range_hashed_dictionary_range_cast/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02311_range_hashed_dictionary_range_cast/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02311_range_hashed_dictionary_range_cast/query.sql b/parser/testdata/02311_range_hashed_dictionary_range_cast/query.sql new file mode 100644 index 000000000..623b369da --- /dev/null +++ b/parser/testdata/02311_range_hashed_dictionary_range_cast/query.sql @@ -0,0 +1,30 @@ +DROP TABLE IF EXISTS dictionary_source_table; +CREATE TABLE dictionary_source_table +( + key UInt64, + start UInt64, + end UInt64, + value String +) Engine = TinyLog; + +INSERT INTO dictionary_source_table values (1, 0, 18446744073709551615, 'Value'); + +DROP DICTIONARY IF EXISTS range_hashed_dictionary; +CREATE DICTIONARY range_hashed_dictionary +( + key UInt64, + start UInt64, + end UInt64, + value String +) +PRIMARY KEY key +SOURCE(CLICKHOUSE(TABLE 'dictionary_source_table')) +LAYOUT(RANGE_HASHED()) +RANGE(MIN start MAX end) +LIFETIME(0); + +SELECT dictGet('range_hashed_dictionary', 'value', toUInt64(1), toUInt64(18446744073709551615)); +SELECT dictGet('range_hashed_dictionary', 'value', toUInt64(1), toUInt64(-1)); + +DROP DICTIONARY range_hashed_dictionary; +DROP TABLE dictionary_source_table; diff --git a/parser/testdata/02311_system_zookeeper_insert/ast.json b/parser/testdata/02311_system_zookeeper_insert/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02311_system_zookeeper_insert/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02311_system_zookeeper_insert/metadata.json b/parser/testdata/02311_system_zookeeper_insert/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02311_system_zookeeper_insert/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02311_system_zookeeper_insert/query.sql b/parser/testdata/02311_system_zookeeper_insert/query.sql new file mode 100644 index 000000000..8f1836087 --- /dev/null +++ b/parser/testdata/02311_system_zookeeper_insert/query.sql @@ -0,0 +1,43 @@ +-- Tags: zookeeper + +set allow_unrestricted_reads_from_keeper = 'true'; + +drop table if exists test_zkinsert; + +create table test_zkinsert ( + name String, + path String, + value String +) ENGINE Memory; + +-- test recursive create and big transaction +insert into test_zkinsert (name, path, value) values ('c', '/1-insert-testc/c/c/c/c/c/c', 11), ('e', '/1-insert-testc/c/c/d', 10), ('c', '/1-insert-testc/c/c/c/c/c/c/c', 10), ('c', '/1-insert-testc/c/c/c/c/c/c', 9), ('f', '/1-insert-testc/c/c/d', 11), ('g', '/1-insert-testc/c/c/d', 12), ('g', '/1-insert-testc/c/c/e', 13), ('g', '/1-insert-testc/c/c/f', 14), ('g', '/1-insert-testc/c/c/kk', 14); +-- insert same value, suppose to have no side effects +insert into system.zookeeper (name, path, value) SELECT name, '/' || currentDatabase() || path, value from test_zkinsert; + +SELECT * FROM (SELECT path, name, value FROM system.zookeeper ORDER BY path, name) WHERE path LIKE '/' || currentDatabase() || '/1-insert-test%'; + +SELECT '-------------------------'; + +-- test inserting into root path +insert into test_zkinsert (name, path, value) values ('testc', '/2-insert-testx', 'x'); +insert into test_zkinsert (name, path, value) values ('testz', '/2-insert-testx', 'y'); +insert into test_zkinsert (name, path, value) values ('testc', '/2-insert-testz//c/cd/dd//', 'y'); +insert into test_zkinsert (name, path) values ('testc', '/2-insert-testz//c/cd/'); +insert into test_zkinsert (name, value, path) values ('testb', 'z', '/2-insert-testx'); + +insert into system.zookeeper (name, path, value) SELECT name, '/' || currentDatabase() || path, value from test_zkinsert; + +SELECT * FROM (SELECT path, name, value FROM system.zookeeper ORDER BY path, name) WHERE path LIKE '/' || currentDatabase() || '/2-insert-test%'; + +-- test exceptions +insert into system.zookeeper (name, value) values ('abc', 'y'); -- { serverError BAD_ARGUMENTS } +insert into system.zookeeper (path, value) values ('a/b/c', 'y'); -- { serverError BAD_ARGUMENTS } +insert into system.zookeeper (name, version) values ('abc', 111); -- { serverError ILLEGAL_COLUMN } +insert into system.zookeeper (name, versionxyz) values ('abc', 111); -- { serverError NO_SUCH_COLUMN_IN_TABLE } +insert into system.zookeeper (name, path, value) values ('a/b/c', '/', 'y'); -- { serverError BAD_ARGUMENTS } +insert into system.zookeeper (name, path, value) values ('/', '/a/b/c', 'z'); -- { serverError BAD_ARGUMENTS } +insert into system.zookeeper (name, path, value) values ('', '/', 'y'); -- { serverError BAD_ARGUMENTS } +insert into system.zookeeper (name, path, value) values ('abc', '/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc/abc', 'y'); -- { serverError BAD_ARGUMENTS } + +drop table if exists test_zkinsert; diff --git a/parser/testdata/02312_is_not_null_prewhere/ast.json b/parser/testdata/02312_is_not_null_prewhere/ast.json new file mode 100644 index 000000000..381639343 --- /dev/null +++ b/parser/testdata/02312_is_not_null_prewhere/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery bug_36995 (children 1)" + }, + { + "explain": " Identifier bug_36995" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001218648, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/02312_is_not_null_prewhere/metadata.json b/parser/testdata/02312_is_not_null_prewhere/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02312_is_not_null_prewhere/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02312_is_not_null_prewhere/query.sql b/parser/testdata/02312_is_not_null_prewhere/query.sql new file mode 100644 index 000000000..56371d0ec --- /dev/null +++ b/parser/testdata/02312_is_not_null_prewhere/query.sql @@ -0,0 +1,21 @@ +DROP TABLE IF EXISTS bug_36995; + +CREATE TABLE bug_36995( + `time` DateTime, + `product` String) +ENGINE = MergeTree +ORDER BY time AS +SELECT '2022-01-01 00:00:00','1'; + +SELECT * FROM bug_36995 +WHERE (time IS NOT NULL) AND (product IN (SELECT '1')) +SETTINGS optimize_move_to_prewhere = 1; + +SELECT * FROM bug_36995 +WHERE (time IS NOT NULL) AND (product IN (SELECT '1')) +SETTINGS optimize_move_to_prewhere = 0; + +SELECT * FROM bug_36995 +PREWHERE (time IS NOT NULL) WHERE (product IN (SELECT '1')); + +DROP TABLE bug_36995; diff --git a/parser/testdata/02312_parquet_orc_arrow_names_tuples/ast.json b/parser/testdata/02312_parquet_orc_arrow_names_tuples/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02312_parquet_orc_arrow_names_tuples/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02312_parquet_orc_arrow_names_tuples/metadata.json b/parser/testdata/02312_parquet_orc_arrow_names_tuples/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02312_parquet_orc_arrow_names_tuples/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02312_parquet_orc_arrow_names_tuples/query.sql b/parser/testdata/02312_parquet_orc_arrow_names_tuples/query.sql new file mode 100644 index 000000000..4c2158e4a --- /dev/null +++ b/parser/testdata/02312_parquet_orc_arrow_names_tuples/query.sql @@ -0,0 +1,29 @@ +-- Tags: no-fasttest + +drop table if exists test_02312; +create table test_02312 (x Tuple(a UInt32, b UInt32)) engine=File(Parquet); +insert into test_02312 values ((1,2)), ((2,3)), ((3,4)); +select * from test_02312; +drop table test_02312; +create table test_02312 (x Tuple(a UInt32, b UInt32)) engine=File(Arrow); +insert into test_02312 values ((1,2)), ((2,3)), ((3,4)); +select * from test_02312; +drop table test_02312; +create table test_02312 (x Tuple(a UInt32, b UInt32)) engine=File(ORC); +insert into test_02312 values ((1,2)), ((2,3)), ((3,4)); +select * from test_02312; +drop table test_02312; + +create table test_02312 (a Nested(b Nested(c UInt32))) engine=File(Parquet); +insert into test_02312 values ([[(1), (2), (3)]]); +select * from test_02312; +drop table test_02312; +create table test_02312 (a Nested(b Nested(c UInt32))) engine=File(Arrow); +insert into test_02312 values ([[(1), (2), (3)]]); +select * from test_02312; +drop table test_02312; +create table test_02312 (a Nested(b Nested(c UInt32))) engine=File(ORC); +insert into test_02312 values ([[(1), (2), (3)]]); +select * from test_02312; +drop table test_02312; + diff --git a/parser/testdata/02313_avro_records_and_maps/ast.json b/parser/testdata/02313_avro_records_and_maps/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02313_avro_records_and_maps/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02313_avro_records_and_maps/metadata.json b/parser/testdata/02313_avro_records_and_maps/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02313_avro_records_and_maps/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02313_avro_records_and_maps/query.sql b/parser/testdata/02313_avro_records_and_maps/query.sql new file mode 100644 index 000000000..cc15d2559 --- /dev/null +++ b/parser/testdata/02313_avro_records_and_maps/query.sql @@ -0,0 +1,25 @@ +-- Tags: no-fasttest + +insert into function file(currentDatabase() || '_data_02313.avro') select tuple(number, 'String')::Tuple(a UInt32, b String) as t from numbers(3) settings engine_file_truncate_on_insert=1; +desc file(currentDatabase() || '_data_02313.avro'); +select * from file(currentDatabase() || '_data_02313.avro'); + +insert into function file(currentDatabase() || '_data_02313.avro') select tuple(number, tuple(number + 1, number + 2), range(number))::Tuple(a UInt32, b Tuple(c UInt32, d UInt32), e Array(UInt32)) as t from numbers(3) settings engine_file_truncate_on_insert=1; +desc file(currentDatabase() || '_data_02313.avro'); +select * from file(currentDatabase() || '_data_02313.avro'); + +insert into function file(currentDatabase() || '_data_02313.avro', auto, 'a Nested(b UInt32, c UInt32)') select [number, number + 1], [number + 2, number + 3] from numbers(3) settings engine_file_truncate_on_insert=1; +desc file(currentDatabase() || '_data_02313.avro'); +select * from file(currentDatabase() || '_data_02313.avro'); + +insert into function file(currentDatabase() || '_data_02313.avro', auto, 'a Nested(b Nested(c UInt32, d UInt32))') select [[(number, number + 1), (number + 2, number + 3)]] from numbers(3) settings engine_file_truncate_on_insert=1; +desc file(currentDatabase() || '_data_02313.avro'); +select * from file(currentDatabase() || '_data_02313.avro'); + +insert into function file(currentDatabase() || '_data_02313.avro') select map(concat('key_', toString(number)), number) as m from numbers(3) settings engine_file_truncate_on_insert=1; +desc file(currentDatabase() || '_data_02313.avro'); +select * from file(currentDatabase() || '_data_02313.avro'); + +insert into function file(currentDatabase() || '_data_02313.avro') select map(concat('key_', toString(number)), tuple(number, range(number))) as m from numbers(3) settings engine_file_truncate_on_insert=1; +desc file(currentDatabase() || '_data_02313.avro'); +select * from file(currentDatabase() || '_data_02313.avro'); diff --git a/parser/testdata/02313_cross_join_dup_col_names/ast.json b/parser/testdata/02313_cross_join_dup_col_names/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02313_cross_join_dup_col_names/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02313_cross_join_dup_col_names/metadata.json b/parser/testdata/02313_cross_join_dup_col_names/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02313_cross_join_dup_col_names/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02313_cross_join_dup_col_names/query.sql b/parser/testdata/02313_cross_join_dup_col_names/query.sql new file mode 100644 index 000000000..45390c0e8 --- /dev/null +++ b/parser/testdata/02313_cross_join_dup_col_names/query.sql @@ -0,0 +1,15 @@ + +-- https://github.com/ClickHouse/ClickHouse/issues/37561 + +SELECT NULL +FROM + (SELECT NULL) AS s1, + (SELECT count(2), count(1)) AS s2 +; + +SELECT NULL +FROM + (SELECT NULL) AS s1, + (SELECT count(2.), 9223372036854775806, count('-1'), NULL) AS s2, + (SELECT count('-2147483648')) AS any_query, (SELECT NULL) AS check_single_query +; diff --git a/parser/testdata/02313_displayname/ast.json b/parser/testdata/02313_displayname/ast.json new file mode 100644 index 000000000..469f97691 --- /dev/null +++ b/parser/testdata/02313_displayname/ast.json @@ -0,0 +1,40 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function displayName (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 6, + + "statistics": + { + "elapsed": 0.001222583, + "rows_read": 6, + "bytes_read": 221 + } +} diff --git a/parser/testdata/02313_displayname/metadata.json b/parser/testdata/02313_displayname/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02313_displayname/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02313_displayname/query.sql b/parser/testdata/02313_displayname/query.sql new file mode 100644 index 000000000..6b6411b79 --- /dev/null +++ b/parser/testdata/02313_displayname/query.sql @@ -0,0 +1 @@ +select displayName(); diff --git a/parser/testdata/02313_dump_column_structure_low_cardinality/ast.json b/parser/testdata/02313_dump_column_structure_low_cardinality/ast.json new file mode 100644 index 000000000..78048e595 --- /dev/null +++ b/parser/testdata/02313_dump_column_structure_low_cardinality/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function dumpColumnStructure (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '[\\'Hello\\', \\'World\\']'" + }, + { + "explain": " Literal 'Array(LowCardinality(String))'" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001657873, + "rows_read": 10, + "bytes_read": 427 + } +} diff --git a/parser/testdata/02313_dump_column_structure_low_cardinality/metadata.json b/parser/testdata/02313_dump_column_structure_low_cardinality/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02313_dump_column_structure_low_cardinality/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02313_dump_column_structure_low_cardinality/query.sql b/parser/testdata/02313_dump_column_structure_low_cardinality/query.sql new file mode 100644 index 000000000..66ce6184f --- /dev/null +++ b/parser/testdata/02313_dump_column_structure_low_cardinality/query.sql @@ -0,0 +1 @@ +SELECT dumpColumnStructure(['Hello', 'World']::Array(LowCardinality(String))); diff --git a/parser/testdata/02313_group_by_modifiers_with_non_default_types/ast.json b/parser/testdata/02313_group_by_modifiers_with_non_default_types/ast.json new file mode 100644 index 000000000..bbae42745 --- /dev/null +++ b/parser/testdata/02313_group_by_modifiers_with_non_default_types/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test02313 (children 1)" + }, + { + "explain": " Identifier test02313" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001461185, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/02313_group_by_modifiers_with_non_default_types/metadata.json b/parser/testdata/02313_group_by_modifiers_with_non_default_types/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02313_group_by_modifiers_with_non_default_types/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02313_group_by_modifiers_with_non_default_types/query.sql b/parser/testdata/02313_group_by_modifiers_with_non_default_types/query.sql new file mode 100644 index 000000000..d30cc9304 --- /dev/null +++ b/parser/testdata/02313_group_by_modifiers_with_non_default_types/query.sql @@ -0,0 +1,39 @@ +DROP TABLE IF EXISTS test02313; + +CREATE TABLE test02313 +( + a Enum('one' = 1, 'two' = 2), + b Enum('default' = 0, 'non-default' = 1), + c UInt8 +) +ENGINE = MergeTree() +ORDER BY (a, b, c); + +INSERT INTO test02313 SELECT number % 2 + 1 AS a, number % 2 AS b, number FROM numbers(10); + +-- { echoOn } +SELECT + count() as d, a, b, c +FROM test02313 +GROUP BY ROLLUP(a, b, c) +ORDER BY d, a, b, c; + +SELECT + count() as d, a, b, c +FROM test02313 +GROUP BY CUBE(a, b, c) +ORDER BY d, a, b, c; + +SELECT + count() as d, a, b, c +FROM test02313 +GROUP BY GROUPING SETS + ( + (c), + (a, c), + (b, c) + ) +ORDER BY d, a, b, c; + +-- { echoOff } +DROP TABLE test02313; diff --git a/parser/testdata/02313_multiple_limits/ast.json b/parser/testdata/02313_multiple_limits/ast.json new file mode 100644 index 000000000..4b3701e77 --- /dev/null +++ b/parser/testdata/02313_multiple_limits/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function sum (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001865802, + "rows_read": 7, + "bytes_read": 252 + } +} diff --git a/parser/testdata/02313_multiple_limits/metadata.json b/parser/testdata/02313_multiple_limits/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02313_multiple_limits/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02313_multiple_limits/query.sql b/parser/testdata/02313_multiple_limits/query.sql new file mode 100644 index 000000000..2924bd3ae --- /dev/null +++ b/parser/testdata/02313_multiple_limits/query.sql @@ -0,0 +1,106 @@ +SELECT sum(x) +FROM +( + SELECT x + FROM + ( + SELECT number AS x + FROM system.numbers + SETTINGS max_rows_to_read = 10, read_overflow_mode = 'break', max_block_size = 2 + ) + SETTINGS max_rows_to_read = 20, read_overflow_mode = 'break', max_block_size = 2 +); + +SELECT sum(x) +FROM +( + SELECT x + FROM + ( + SELECT number AS x + FROM system.numbers + SETTINGS max_rows_to_read = 20, read_overflow_mode = 'break', max_block_size = 2 + ) + SETTINGS max_rows_to_read = 10, read_overflow_mode = 'break', max_block_size = 2 +); + + +SELECT count() >= 20, count() <= 22 +FROM +( + SELECT x + FROM + ( + SELECT zero AS x + FROM system.zeros + SETTINGS max_block_size = 2, max_rows_to_read = 10, read_overflow_mode = 'break' + ) + UNION ALL + SELECT x + FROM + ( + SELECT zero + 1 AS x + FROM system.zeros + SETTINGS max_block_size = 2, max_rows_to_read = 20, read_overflow_mode = 'break' + ) +); + +SELECT sum(x) >= 10 +FROM +( + SELECT x + FROM + ( + SELECT zero AS x + FROM system.zeros + SETTINGS max_block_size = 2, max_rows_to_read = 10, read_overflow_mode = 'break' + ) + UNION ALL + SELECT x + FROM + ( + SELECT zero + 1 AS x + FROM system.zeros + SETTINGS max_block_size = 2, max_rows_to_read = 20, read_overflow_mode = 'break' + ) +); + +SELECT count() >= 20, count() <= 22 +FROM +( + SELECT x + FROM + ( + SELECT zero AS x + FROM system.zeros + SETTINGS max_block_size = 2, max_rows_to_read = 20, read_overflow_mode = 'break' + ) + UNION ALL + SELECT x + FROM + ( + SELECT zero + 1 AS x + FROM system.zeros + SETTINGS max_block_size = 2, max_rows_to_read = 10, read_overflow_mode = 'break' + ) +); + +SELECT sum(x) <= 10 +FROM +( + SELECT x + FROM + ( + SELECT zero AS x + FROM system.zeros + SETTINGS max_block_size = 2, max_rows_to_read = 20, read_overflow_mode = 'break' + ) + UNION ALL + SELECT x + FROM + ( + SELECT zero + 1 AS x + FROM system.zeros + SETTINGS max_block_size = 2, max_rows_to_read = 10, read_overflow_mode = 'break' + ) +); diff --git a/parser/testdata/02313_negative_datetime64/ast.json b/parser/testdata/02313_negative_datetime64/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02313_negative_datetime64/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02313_negative_datetime64/metadata.json b/parser/testdata/02313_negative_datetime64/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02313_negative_datetime64/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02313_negative_datetime64/query.sql b/parser/testdata/02313_negative_datetime64/query.sql new file mode 100644 index 000000000..a5728074c --- /dev/null +++ b/parser/testdata/02313_negative_datetime64/query.sql @@ -0,0 +1,39 @@ +-- Before UNIX epoch +WITH + toDateTime64('1959-09-16 19:20:12.999999998', 9, 'UTC') AS dt1, + toDateTime64('1959-09-16 19:20:12.999999999', 9, 'UTC') AS dt2 +SELECT + dt1 < dt2, + (dt1 + INTERVAL 1 NANOSECOND) = dt2, + (dt1 + INTERVAL 2 NANOSECOND) > dt2, + (dt1 + INTERVAL 3 NANOSECOND) > dt2; + +-- At UNIX epoch border +WITH + toDateTime64('1969-12-31 23:59:59.999999998', 9, 'UTC') AS dt1, + toDateTime64('1969-12-31 23:59:59.999999999', 9, 'UTC') AS dt2 +SELECT + dt1 < dt2, + (dt1 + INTERVAL 1 NANOSECOND) = dt2, + (dt1 + INTERVAL 2 NANOSECOND) > dt2, + (dt1 + INTERVAL 3 NANOSECOND) > dt2; + +-- After UNIX epoch +WITH + toDateTime64('2001-12-31 23:59:59.999999998', 9, 'UTC') AS dt1, + toDateTime64('2001-12-31 23:59:59.999999999', 9, 'UTC') AS dt2 +SELECT + dt1 < dt2, + (dt1 + INTERVAL 1 NANOSECOND) = dt2, + (dt1 + INTERVAL 2 NANOSECOND) > dt2, + (dt1 + INTERVAL 3 NANOSECOND) > dt2; + +-- At upper DT64 bound (DT64 precision is lower here by design) +WITH + toDateTime64('2282-12-31 23:59:59.999998', 6, 'UTC') AS dt1, + toDateTime64('2282-12-31 23:59:59.999999', 6, 'UTC') AS dt2 +SELECT + dt1 < dt2, + (dt1 + INTERVAL 1 MICROSECOND) = dt2, + (dt1 + INTERVAL 2 MICROSECOND) > dt2, + (dt1 + INTERVAL 3 MICROSECOND) > dt2; diff --git a/parser/testdata/02313_test_fpc_codec/ast.json b/parser/testdata/02313_test_fpc_codec/ast.json new file mode 100644 index 000000000..dc48c24ed --- /dev/null +++ b/parser/testdata/02313_test_fpc_codec/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery codecTest (children 1)" + }, + { + "explain": " Identifier codecTest" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001235417, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/02313_test_fpc_codec/metadata.json b/parser/testdata/02313_test_fpc_codec/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02313_test_fpc_codec/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02313_test_fpc_codec/query.sql b/parser/testdata/02313_test_fpc_codec/query.sql new file mode 100644 index 000000000..4fe54b87c --- /dev/null +++ b/parser/testdata/02313_test_fpc_codec/query.sql @@ -0,0 +1,123 @@ +DROP TABLE IF EXISTS codecTest; + +SET cross_to_inner_join_rewrite = 1; + +CREATE TABLE codecTest ( + key UInt64, + name String, + ref_valueF64 Float64, + ref_valueF32 Float32, + valueF64 Float64 CODEC(FPC), + valueF32 Float32 CODEC(FPC) +) Engine = MergeTree ORDER BY key; + +-- best case - same value +INSERT INTO codecTest (key, name, ref_valueF64, valueF64, ref_valueF32, valueF32) + SELECT number AS n, 'e()', e() AS v, v, v, v FROM system.numbers LIMIT 1, 100; + +-- good case - values that grow insignificantly +INSERT INTO codecTest (key, name, ref_valueF64, valueF64, ref_valueF32, valueF32) + SELECT number AS n, 'log2(n)', log2(n) AS v, v, v, v FROM system.numbers LIMIT 101, 100; + +-- bad case - values differ significantly +INSERT INTO codecTest (key, name, ref_valueF64, valueF64, ref_valueF32, valueF32) + SELECT number AS n, 'n*sqrt(n)', n*sqrt(n) AS v, v, v, v FROM system.numbers LIMIT 201, 100; + +-- worst case - almost like a random values +INSERT INTO codecTest (key, name, ref_valueF64, valueF64, ref_valueF32, valueF32) + SELECT number AS n, 'sin(n*n*n)*n', sin(n * n * n * n* n) AS v, v, v, v FROM system.numbers LIMIT 301, 100; + + +-- These floating-point values are expected to be BINARY equal, so comparing by-value is Ok here. + +-- referencing previous row key, value, and case name to simplify debugging. +SELECT 'F64'; +SELECT + c1.key, c1.name, + c1.ref_valueF64, c1.valueF64, c1.ref_valueF64 - c1.valueF64 AS dF64, + 'prev:', + c2.key, c2.ref_valueF64 +FROM + codecTest as c1, codecTest as c2 +WHERE + dF64 != 0 +AND + c2.key = c1.key - 1 +LIMIT 10; + + +SELECT 'F32'; +SELECT + c1.key, c1.name, + c1.ref_valueF32, c1.valueF32, c1.ref_valueF32 - c1.valueF32 AS dF32, + 'prev:', + c2.key, c2.ref_valueF32 +FROM + codecTest as c1, codecTest as c2 +WHERE + dF32 != 0 +AND + c2.key = c1.key - 1 +LIMIT 10; + +DROP TABLE IF EXISTS codecTest; + +CREATE TABLE codecTest ( + key UInt64, + name String, + ref_valueF64 Float64, + ref_valueF32 Float32, + valueF64 Float64 CODEC(FPC(4)), + valueF32 Float32 CODEC(FPC(4)) +) Engine = MergeTree ORDER BY key; + +-- best case - same value +INSERT INTO codecTest (key, name, ref_valueF64, valueF64, ref_valueF32, valueF32) + SELECT number AS n, 'e()', e() AS v, v, v, v FROM system.numbers LIMIT 1, 100; + +-- good case - values that grow insignificantly +INSERT INTO codecTest (key, name, ref_valueF64, valueF64, ref_valueF32, valueF32) + SELECT number AS n, 'log2(n)', log2(n) AS v, v, v, v FROM system.numbers LIMIT 101, 100; + +-- bad case - values differ significantly +INSERT INTO codecTest (key, name, ref_valueF64, valueF64, ref_valueF32, valueF32) + SELECT number AS n, 'n*sqrt(n)', n*sqrt(n) AS v, v, v, v FROM system.numbers LIMIT 201, 100; + +-- worst case - almost like a random values +INSERT INTO codecTest (key, name, ref_valueF64, valueF64, ref_valueF32, valueF32) + SELECT number AS n, 'sin(n*n*n)*n', sin(n * n * n * n* n) AS v, v, v, v FROM system.numbers LIMIT 301, 100; + + +-- These floating-point values are expected to be BINARY equal, so comparing by-value is Ok here. + +-- referencing previous row key, value, and case name to simplify debugging. +SELECT 'F64'; +SELECT + c1.key, c1.name, + c1.ref_valueF64, c1.valueF64, c1.ref_valueF64 - c1.valueF64 AS dF64, + 'prev:', + c2.key, c2.ref_valueF64 +FROM + codecTest as c1, codecTest as c2 +WHERE + dF64 != 0 +AND + c2.key = c1.key - 1 +LIMIT 10; + + +SELECT 'F32'; +SELECT + c1.key, c1.name, + c1.ref_valueF32, c1.valueF32, c1.ref_valueF32 - c1.valueF32 AS dF32, + 'prev:', + c2.key, c2.ref_valueF32 +FROM + codecTest as c1, codecTest as c2 +WHERE + dF32 != 0 +AND + c2.key = c1.key - 1 +LIMIT 10; + +DROP TABLE IF EXISTS codecTest; diff --git a/parser/testdata/02314_csv_tsv_skip_first_lines/ast.json b/parser/testdata/02314_csv_tsv_skip_first_lines/ast.json new file mode 100644 index 000000000..3fe0e6736 --- /dev/null +++ b/parser/testdata/02314_csv_tsv_skip_first_lines/ast.json @@ -0,0 +1,97 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "InsertQuery (children 3)" + }, + { + "explain": " Function file (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function concat (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function currentDatabase (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Literal '_data_02314.csv'" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function plus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_5" + }, + { + "explain": " Set" + }, + { + "explain": " Set" + } + ], + + "rows": 25, + + "statistics": + { + "elapsed": 0.001251726, + "rows_read": 25, + "bytes_read": 911 + } +} diff --git a/parser/testdata/02314_csv_tsv_skip_first_lines/metadata.json b/parser/testdata/02314_csv_tsv_skip_first_lines/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02314_csv_tsv_skip_first_lines/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02314_csv_tsv_skip_first_lines/query.sql b/parser/testdata/02314_csv_tsv_skip_first_lines/query.sql new file mode 100644 index 000000000..6108aeff7 --- /dev/null +++ b/parser/testdata/02314_csv_tsv_skip_first_lines/query.sql @@ -0,0 +1,9 @@ +insert into function file(currentDatabase() || '_data_02314.csv') select number, number + 1 from numbers(5) settings engine_file_truncate_on_insert=1; +insert into function file(currentDatabase() || '_data_02314.csv') select number, number + 1, number + 2 from numbers(5); +desc file(currentDatabase() || '_data_02314.csv') settings input_format_csv_skip_first_lines=5; +select * from file(currentDatabase() || '_data_02314.csv') order by c1 settings input_format_csv_skip_first_lines=5; + +insert into function file(currentDatabase() || '_data_02314.tsv') select number, number + 1 from numbers(5) settings engine_file_truncate_on_insert=1; +insert into function file(currentDatabase() || '_data_02314.tsv') select number, number + 1, number + 2 from numbers(5); +desc file(currentDatabase() || '_data_02314.tsv') settings input_format_tsv_skip_first_lines=5; +select * from file(currentDatabase() || '_data_02314.tsv') order by c1 settings input_format_tsv_skip_first_lines=5; diff --git a/parser/testdata/02315_grouping_constant_folding/ast.json b/parser/testdata/02315_grouping_constant_folding/ast.json new file mode 100644 index 000000000..01bdf0d41 --- /dev/null +++ b/parser/testdata/02315_grouping_constant_folding/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test02315 (children 1)" + }, + { + "explain": " Identifier test02315" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00123314, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/02315_grouping_constant_folding/metadata.json b/parser/testdata/02315_grouping_constant_folding/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02315_grouping_constant_folding/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02315_grouping_constant_folding/query.sql b/parser/testdata/02315_grouping_constant_folding/query.sql new file mode 100644 index 000000000..5e305d2e6 --- /dev/null +++ b/parser/testdata/02315_grouping_constant_folding/query.sql @@ -0,0 +1,15 @@ +DROP TABLE IF EXISTS test02315; + +CREATE TABLE test02315(a UInt64, b UInt64) ENGINE=MergeTree() ORDER BY (a, b); + +INSERT INTO test02315 SELECT number % 2 as a, number as b FROM numbers(10); + +-- { echoOn } +SELECT count() AS amount, a, b, GROUPING(a, b) FROM test02315 GROUP BY GROUPING SETS ((a, b), (a), ()) ORDER BY (amount, a, b) SETTINGS force_grouping_standard_compatibility=0; + +SELECT count() AS amount, a, b, GROUPING(a, b) FROM test02315 GROUP BY ROLLUP(a, b) ORDER BY (amount, a, b) SETTINGS force_grouping_standard_compatibility=0; + +SELECT count() AS amount, a, b, GROUPING(a, b) FROM test02315 GROUP BY GROUPING SETS ((a, b), (a, a), ()) ORDER BY (amount, a, b) SETTINGS force_grouping_standard_compatibility=0, enable_analyzer=1; + +-- { echoOff } +DROP TABLE test02315; diff --git a/parser/testdata/02315_pmj_union_ubsan_35857/ast.json b/parser/testdata/02315_pmj_union_ubsan_35857/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02315_pmj_union_ubsan_35857/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02315_pmj_union_ubsan_35857/metadata.json b/parser/testdata/02315_pmj_union_ubsan_35857/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02315_pmj_union_ubsan_35857/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02315_pmj_union_ubsan_35857/query.sql b/parser/testdata/02315_pmj_union_ubsan_35857/query.sql new file mode 100644 index 000000000..cbf71f1d5 --- /dev/null +++ b/parser/testdata/02315_pmj_union_ubsan_35857/query.sql @@ -0,0 +1,23 @@ + +SET join_algorithm = 'partial_merge'; + +SELECT NULL +FROM +( + SELECT + NULL, + 1 AS a, + 0 :: Nullable(UInt8) AS c + UNION ALL + SELECT + NULL, + 65536, + NULL +) AS js1 +ALL LEFT JOIN +( + SELECT 2 :: Nullable(UInt8) AS a +) AS js2 +USING (a) +ORDER BY c +; diff --git a/parser/testdata/02315_replace_multiif_to_if/ast.json b/parser/testdata/02315_replace_multiif_to_if/ast.json new file mode 100644 index 000000000..b5c4395a3 --- /dev/null +++ b/parser/testdata/02315_replace_multiif_to_if/ast.json @@ -0,0 +1,85 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Explain EXPLAIN SYNTAX (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function multiIf (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Function toNullable (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10000" + } + ], + + "rows": 21, + + "statistics": + { + "elapsed": 0.001348886, + "rows_read": 21, + "bytes_read": 845 + } +} diff --git a/parser/testdata/02315_replace_multiif_to_if/metadata.json b/parser/testdata/02315_replace_multiif_to_if/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02315_replace_multiif_to_if/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02315_replace_multiif_to_if/query.sql b/parser/testdata/02315_replace_multiif_to_if/query.sql new file mode 100644 index 000000000..59433828b --- /dev/null +++ b/parser/testdata/02315_replace_multiif_to_if/query.sql @@ -0,0 +1,2 @@ +EXPLAIN SYNTAX SELECT multiIf(number = 0, NULL, toNullable(number)) FROM numbers(10000); +EXPLAIN SYNTAX SELECT CASE WHEN number = 0 THEN NULL ELSE toNullable(number) END FROM numbers(10000); diff --git a/parser/testdata/02316_cast_to_ip_address_default_column/ast.json b/parser/testdata/02316_cast_to_ip_address_default_column/ast.json new file mode 100644 index 000000000..eb5c041d5 --- /dev/null +++ b/parser/testdata/02316_cast_to_ip_address_default_column/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001288941, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02316_cast_to_ip_address_default_column/metadata.json b/parser/testdata/02316_cast_to_ip_address_default_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02316_cast_to_ip_address_default_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02316_cast_to_ip_address_default_column/query.sql b/parser/testdata/02316_cast_to_ip_address_default_column/query.sql new file mode 100644 index 000000000..cac7992e3 --- /dev/null +++ b/parser/testdata/02316_cast_to_ip_address_default_column/query.sql @@ -0,0 +1,39 @@ +SET cast_ipv4_ipv6_default_on_conversion_error = 1; + +DROP TABLE IF EXISTS ipv4_test; +CREATE TABLE ipv4_test +( + id UInt64, + value String +) ENGINE=MergeTree ORDER BY id; + +ALTER TABLE ipv4_test MODIFY COLUMN value IPv4 DEFAULT ''; + +SET cast_ipv4_ipv6_default_on_conversion_error = 0; + +DETACH TABLE ipv4_test; +ATTACH TABLE ipv4_test; + +SET cast_ipv4_ipv6_default_on_conversion_error = 1; + +DROP TABLE ipv4_test; + +DROP TABLE IF EXISTS ipv6_test; +CREATE TABLE ipv6_test +( + id UInt64, + value String +) ENGINE=MergeTree ORDER BY id; + +ALTER TABLE ipv6_test MODIFY COLUMN value IPv6 DEFAULT ''; + +SET cast_ipv4_ipv6_default_on_conversion_error = 0; + +DETACH TABLE ipv6_test; +ATTACH TABLE ipv6_test; + +SET cast_ipv4_ipv6_default_on_conversion_error = 1; + +SELECT * FROM ipv6_test; + +DROP TABLE ipv6_test; diff --git a/parser/testdata/02316_const_string_intersact/ast.json b/parser/testdata/02316_const_string_intersact/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02316_const_string_intersact/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02316_const_string_intersact/metadata.json b/parser/testdata/02316_const_string_intersact/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02316_const_string_intersact/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02316_const_string_intersact/query.sql b/parser/testdata/02316_const_string_intersact/query.sql new file mode 100644 index 000000000..33629d2a5 --- /dev/null +++ b/parser/testdata/02316_const_string_intersact/query.sql @@ -0,0 +1,2 @@ + +SELECT 'Play ClickHouse' InterSect SELECT 'Play ClickHouse' diff --git a/parser/testdata/02316_expressions_with_window_functions/ast.json b/parser/testdata/02316_expressions_with_window_functions/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02316_expressions_with_window_functions/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02316_expressions_with_window_functions/metadata.json b/parser/testdata/02316_expressions_with_window_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02316_expressions_with_window_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02316_expressions_with_window_functions/query.sql b/parser/testdata/02316_expressions_with_window_functions/query.sql new file mode 100644 index 000000000..c3137ef37 --- /dev/null +++ b/parser/testdata/02316_expressions_with_window_functions/query.sql @@ -0,0 +1,28 @@ +-- { echoOn } +-- SELECT number, sum(number) + 1 OVER (PARTITION BY (number % 10)) +-- FROM numbers(100) +-- ORDER BY number; -- { clientError SYNTAX_ERROR } + +SELECT number, 1 + sum(number) OVER (PARTITION BY number % 10) +FROM numbers(100) +ORDER BY number; + +SELECT sum(number) + 1 AS x +FROM numbers(100) +GROUP BY number % 10 +ORDER BY x; + +SELECT + number, + sum(number) OVER (PARTITION BY number % 10) / count() OVER (PARTITION BY number % 10), + avg(number) OVER (PARTITION BY number % 10) +FROM numbers(100) +ORDER BY number ASC; + +SELECT sum(number) / sum(sum(number)) OVER (PARTITION BY (number % 10)) +FROM numbers(10000) +GROUP BY number % 10; + +SELECT 1 + sum(number) / sum(sum(number)) OVER (PARTITION BY (number % 10)) +FROM numbers(10000) +GROUP BY number % 10; diff --git a/parser/testdata/02316_literal_no_octal/ast.json b/parser/testdata/02316_literal_no_octal/ast.json new file mode 100644 index 000000000..c7d1548a4 --- /dev/null +++ b/parser/testdata/02316_literal_no_octal/ast.json @@ -0,0 +1,40 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_77" + }, + { + "explain": " Literal UInt64_78" + } + ], + + "rows": 6, + + "statistics": + { + "elapsed": 0.001133642, + "rows_read": 6, + "bytes_read": 207 + } +} diff --git a/parser/testdata/02316_literal_no_octal/metadata.json b/parser/testdata/02316_literal_no_octal/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02316_literal_no_octal/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02316_literal_no_octal/query.sql b/parser/testdata/02316_literal_no_octal/query.sql new file mode 100644 index 000000000..a75335078 --- /dev/null +++ b/parser/testdata/02316_literal_no_octal/query.sql @@ -0,0 +1 @@ +SELECT 077, 078; diff --git a/parser/testdata/02316_values_table_func_bug/ast.json b/parser/testdata/02316_values_table_func_bug/ast.json new file mode 100644 index 000000000..4d69ef5f5 --- /dev/null +++ b/parser/testdata/02316_values_table_func_bug/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function values (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[UInt64_1, Float64_2.2]" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001605656, + "rows_read": 11, + "bytes_read": 441 + } +} diff --git a/parser/testdata/02316_values_table_func_bug/metadata.json b/parser/testdata/02316_values_table_func_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02316_values_table_func_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02316_values_table_func_bug/query.sql b/parser/testdata/02316_values_table_func_bug/query.sql new file mode 100644 index 000000000..7c66cf125 --- /dev/null +++ b/parser/testdata/02316_values_table_func_bug/query.sql @@ -0,0 +1,2 @@ +select * from values([1, 2.2]); +select * from values([[1, 2, 3], [1.1, 2.2, 3.3]]); diff --git a/parser/testdata/02317_distinct_in_order_optimization/ast.json b/parser/testdata/02317_distinct_in_order_optimization/ast.json new file mode 100644 index 000000000..828f05a6b --- /dev/null +++ b/parser/testdata/02317_distinct_in_order_optimization/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '-- enable distinct in order optimization'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001386284, + "rows_read": 5, + "bytes_read": 211 + } +} diff --git a/parser/testdata/02317_distinct_in_order_optimization/metadata.json b/parser/testdata/02317_distinct_in_order_optimization/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02317_distinct_in_order_optimization/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02317_distinct_in_order_optimization/query.sql b/parser/testdata/02317_distinct_in_order_optimization/query.sql new file mode 100644 index 000000000..d05a25882 --- /dev/null +++ b/parser/testdata/02317_distinct_in_order_optimization/query.sql @@ -0,0 +1,117 @@ +select '-- enable distinct in order optimization'; +set optimize_distinct_in_order=1; +select '-- create table with only primary key columns'; +drop table if exists distinct_in_order sync; +create table distinct_in_order (a int) engine=MergeTree() order by a settings index_granularity=10; +select '-- the same values in every chunk, pre-distinct should skip entire chunks with the same key as previous one'; +insert into distinct_in_order (a) select * from zeros(10); +insert into distinct_in_order (a) select * from zeros(10); -- this entire chunk should be skipped in pre-distinct +select distinct * from distinct_in_order settings max_block_size=10, max_threads=1; + +select '-- create table with only primary key columns'; +select '-- pre-distinct should skip part of chunk since it contains values from previous one'; +drop table if exists distinct_in_order sync; +create table distinct_in_order (a int) engine=MergeTree() order by a settings index_granularity=10; +insert into distinct_in_order (a) select * from zeros(10); +insert into distinct_in_order select * from numbers(10); -- first row (0) from this chunk should be skipped in pre-distinct +select distinct a from distinct_in_order settings max_block_size=10, max_threads=1; + +select '-- create table with not only primary key columns'; +drop table if exists distinct_in_order sync; +create table distinct_in_order (a int, b int, c int) engine=MergeTree() order by (a, b) SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +insert into distinct_in_order select number % number, number % 5, number % 10 from numbers(1,1000000); + +select '-- distinct with primary key prefix only'; +select distinct a from distinct_in_order; +select '-- distinct with primary key prefix only, order by sorted column'; +select distinct a from distinct_in_order order by a; +select '-- distinct with primary key prefix only, order by sorted column desc'; +select distinct a from distinct_in_order order by a desc; + +select '-- distinct with full key, order by sorted column'; +select distinct a,b from distinct_in_order order by b; +select '-- distinct with full key, order by sorted column desc'; +select distinct a,b from distinct_in_order order by b desc; + +select '-- distinct with key prefix and non-sorted column, order by non-sorted'; +select distinct a,c from distinct_in_order order by c; +select '-- distinct with key prefix and non-sorted column, order by non-sorted desc'; +select distinct a,c from distinct_in_order order by c desc; + +select '-- distinct with non-key prefix and non-sorted column, order by non-sorted'; +select distinct b,c from distinct_in_order order by c; +select '-- distinct with non-key prefix and non-sorted column, order by non-sorted desc'; +select distinct b,c from distinct_in_order order by c desc; + +select '-- distinct with constants columns'; +-- { echoOn } +select distinct 1 as x, 2 as y from distinct_in_order; +select distinct 1 as x, 2 as y from distinct_in_order order by x; +select distinct 1 as x, 2 as y from distinct_in_order order by x, y; +select a, x from (select distinct a, 1 as x from distinct_in_order order by x) order by a; +select distinct a, 1 as x, 2 as y from distinct_in_order order by a; +select a, b, x, y from(select distinct a, b, 1 as x, 2 as y from distinct_in_order order by a) order by a, b; +select distinct x, y from (select 1 as x, 2 as y from distinct_in_order order by x) order by y; +select distinct a, b, x, y from (select a, b, 1 as x, 2 as y from distinct_in_order order by a) order by a, b; +-- { echoOff } + +drop table if exists distinct_in_order sync; + +select '-- check that distinct in order returns the same result as ordinary distinct'; +drop table if exists distinct_cardinality_low sync; +CREATE TABLE distinct_cardinality_low (low UInt64, medium UInt64, high UInt64) ENGINE MergeTree() ORDER BY (low, medium) SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +INSERT INTO distinct_cardinality_low SELECT number % 1e1, number % 1e2, number % 1e3 FROM numbers_mt(1e4); + +drop table if exists distinct_in_order sync; +drop table if exists ordinary_distinct sync; + +select '-- check that distinct in order WITH order by returns the same result as ordinary distinct'; +create table distinct_in_order (low UInt64, medium UInt64, high UInt64) engine=MergeTree() order by (low, medium) SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +insert into distinct_in_order select distinct * from distinct_cardinality_low order by high settings optimize_distinct_in_order=1; +create table ordinary_distinct (low UInt64, medium UInt64, high UInt64) engine=MergeTree() order by (low, medium) SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +insert into ordinary_distinct select distinct * from distinct_cardinality_low order by high settings optimize_distinct_in_order=0; +select count() as diff from (select distinct * from distinct_in_order except select * from ordinary_distinct); + +drop table if exists distinct_in_order sync; +drop table if exists ordinary_distinct sync; + +select '-- check that distinct in order WITHOUT order by returns the same result as ordinary distinct'; +create table distinct_in_order (low UInt64, medium UInt64, high UInt64) engine=MergeTree() order by (low, medium) SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +insert into distinct_in_order select distinct * from distinct_cardinality_low settings optimize_distinct_in_order=1; +create table ordinary_distinct (low UInt64, medium UInt64, high UInt64) engine=MergeTree() order by (low, medium) SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +insert into ordinary_distinct select distinct * from distinct_cardinality_low settings optimize_distinct_in_order=0; +select count() as diff from (select distinct * from distinct_in_order except select * from ordinary_distinct); + +drop table if exists distinct_in_order; +drop table if exists ordinary_distinct; + +select '-- check that distinct in order WITHOUT order by and WITH filter returns the same result as ordinary distinct'; +create table distinct_in_order (low UInt64, medium UInt64, high UInt64) engine=MergeTree() order by (low, medium) SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +insert into distinct_in_order select distinct * from distinct_cardinality_low where low > 0 settings optimize_distinct_in_order=1; +create table ordinary_distinct (low UInt64, medium UInt64, high UInt64) engine=MergeTree() order by (low, medium) SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +insert into ordinary_distinct select distinct * from distinct_cardinality_low where low > 0 settings optimize_distinct_in_order=0; +select count() as diff from (select distinct * from distinct_in_order except select * from ordinary_distinct); + +drop table if exists distinct_in_order; +drop table if exists ordinary_distinct; +drop table if exists distinct_cardinality_low; + +-- bug 42185 +drop table if exists sorting_key_empty_tuple; +drop table if exists sorting_key_contain_function; + +select '-- bug 42185, distinct in order and empty sort description'; +select '-- distinct in order, sorting key tuple()'; +create table sorting_key_empty_tuple (a int, b int) engine=MergeTree() order by tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +insert into sorting_key_empty_tuple select number % 2, number % 5 from numbers(1,10); +select distinct a from sorting_key_empty_tuple; + +select '-- distinct in order, sorting key contains function'; +create table sorting_key_contain_function (datetime DateTime, a int) engine=MergeTree() order by (toDate(datetime)) SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +insert into sorting_key_contain_function values ('2000-01-01', 1); +insert into sorting_key_contain_function values ('2000-01-01', 2); +select distinct datetime from sorting_key_contain_function; +select distinct toDate(datetime) from sorting_key_contain_function; + +drop table sorting_key_empty_tuple; +drop table sorting_key_contain_function; diff --git a/parser/testdata/02317_functions_with_nothing/ast.json b/parser/testdata/02317_functions_with_nothing/ast.json new file mode 100644 index 000000000..eedce9c21 --- /dev/null +++ b/parser/testdata/02317_functions_with_nothing/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function JSONExtractKeysAndValuesRaw (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001015021, + "rows_read": 10, + "bytes_read": 411 + } +} diff --git a/parser/testdata/02317_functions_with_nothing/metadata.json b/parser/testdata/02317_functions_with_nothing/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02317_functions_with_nothing/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02317_functions_with_nothing/query.sql b/parser/testdata/02317_functions_with_nothing/query.sql new file mode 100644 index 000000000..3bfda3bb6 --- /dev/null +++ b/parser/testdata/02317_functions_with_nothing/query.sql @@ -0,0 +1,7 @@ +SELECT JSONExtractKeysAndValuesRaw(arrayJoin([])); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT JSONHas(arrayJoin([])); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT isValidJSON(arrayJoin([])); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT concat(arrayJoin([]), arrayJoin([NULL, ''])); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT plus(arrayJoin([]), arrayJoin([NULL, 1])); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT sipHash64(arrayJoin([]), [NULL], arrayJoin(['', NULL, '', NULL])); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT [concat(NULL, arrayJoin([]))]; diff --git a/parser/testdata/02317_like_with_trailing_escape/ast.json b/parser/testdata/02317_like_with_trailing_escape/ast.json new file mode 100644 index 000000000..03c7c44aa --- /dev/null +++ b/parser/testdata/02317_like_with_trailing_escape/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tab (children 1)" + }, + { + "explain": " Identifier tab" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001357678, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/02317_like_with_trailing_escape/metadata.json b/parser/testdata/02317_like_with_trailing_escape/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02317_like_with_trailing_escape/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02317_like_with_trailing_escape/query.sql b/parser/testdata/02317_like_with_trailing_escape/query.sql new file mode 100644 index 000000000..521b4a16f --- /dev/null +++ b/parser/testdata/02317_like_with_trailing_escape/query.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab (haystack String, pattern String) engine = MergeTree() ORDER BY haystack; + +INSERT INTO tab VALUES ('haystack', 'pattern\\'); + +-- const pattern +SELECT haystack LIKE 'pattern\\' from tab; -- { serverError CANNOT_PARSE_ESCAPE_SEQUENCE } + +-- non-const pattern +SELECT haystack LIKE pattern from tab; -- { serverError CANNOT_PARSE_ESCAPE_SEQUENCE } + +DROP TABLE IF EXISTS tab; diff --git a/parser/testdata/02318_template_schema_inference_bug/ast.json b/parser/testdata/02318_template_schema_inference_bug/ast.json new file mode 100644 index 000000000..9119e21f0 --- /dev/null +++ b/parser/testdata/02318_template_schema_inference_bug/ast.json @@ -0,0 +1,79 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "InsertQuery (children 2)" + }, + { + "explain": " Function file (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function concat (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function currentDatabase (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Literal '_data_02318.tsv'" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 19, + + "statistics": + { + "elapsed": 0.001378502, + "rows_read": 19, + "bytes_read": 733 + } +} diff --git a/parser/testdata/02318_template_schema_inference_bug/metadata.json b/parser/testdata/02318_template_schema_inference_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02318_template_schema_inference_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02318_template_schema_inference_bug/query.sql b/parser/testdata/02318_template_schema_inference_bug/query.sql new file mode 100644 index 000000000..9dd61b008 --- /dev/null +++ b/parser/testdata/02318_template_schema_inference_bug/query.sql @@ -0,0 +1,2 @@ +insert into function file(currentDatabase() || '_data_02318.tsv') select * from numbers(10); +desc file(currentDatabase() || '_data_02318.tsv', 'Template') SETTINGS format_template_row='nonexist', format_template_resultset='nonexist'; -- {serverError FILE_DOESNT_EXIST} diff --git a/parser/testdata/02319_dict_get_check_arguments_size/ast.json b/parser/testdata/02319_dict_get_check_arguments_size/ast.json new file mode 100644 index 000000000..dd49b0feb --- /dev/null +++ b/parser/testdata/02319_dict_get_check_arguments_size/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery dictionary_source_table (children 1)" + }, + { + "explain": " Identifier dictionary_source_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001153009, + "rows_read": 2, + "bytes_read": 98 + } +} diff --git a/parser/testdata/02319_dict_get_check_arguments_size/metadata.json b/parser/testdata/02319_dict_get_check_arguments_size/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02319_dict_get_check_arguments_size/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02319_dict_get_check_arguments_size/query.sql b/parser/testdata/02319_dict_get_check_arguments_size/query.sql new file mode 100644 index 000000000..e1d1ab9fa --- /dev/null +++ b/parser/testdata/02319_dict_get_check_arguments_size/query.sql @@ -0,0 +1,59 @@ +DROP TABLE IF EXISTS dictionary_source_table; +CREATE TABLE dictionary_source_table +( + id UInt64, + value String +) ENGINE=TinyLog; + +INSERT INTO dictionary_source_table VALUES (0, 'Value'); + +DROP DICTIONARY IF EXISTS test_dictionary; +CREATE DICTIONARY test_dictionary +( + id UInt64, + value String +) +PRIMARY KEY id +LAYOUT(FLAT()) +SOURCE(CLICKHOUSE(TABLE 'dictionary_source_table')) +LIFETIME(0); + +SELECT dictGet('test_dictionary', 'value', 0); +SELECT dictGet('test_dictionary', 'value', 0, 'DefaultValue'); --{serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +SELECT dictGetOrDefault('test_dictionary', 'value', 1, 'DefaultValue'); +SELECT dictGetOrDefault('test_dictionary', 'value', 1, 'DefaultValue', 1); --{serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} + +DROP DICTIONARY test_dictionary; + +DROP TABLE dictionary_source_table; +CREATE TABLE dictionary_source_table +( + key UInt64, + start UInt64, + end UInt64, + value String +) Engine = TinyLog; + +INSERT INTO dictionary_source_table values (0, 0, 5, 'Value'); + +DROP DICTIONARY IF EXISTS range_hashed_dictionary; +CREATE DICTIONARY range_hashed_dictionary +( + key UInt64, + start UInt64, + end UInt64, + value String +) +PRIMARY KEY key +SOURCE(CLICKHOUSE(TABLE 'dictionary_source_table')) +LAYOUT(RANGE_HASHED()) +RANGE(MIN start MAX end) +LIFETIME(0); + +SELECT dictGet('range_hashed_dictionary', 'value', 0, toUInt64(4)); +SELECT dictGet('range_hashed_dictionary', 'value', 4, toUInt64(6), 'DefaultValue'); --{serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +SELECT dictGetOrDefault('range_hashed_dictionary', 'value', 1, toUInt64(6), 'DefaultValue'); +SELECT dictGetOrDefault('range_hashed_dictionary', 'value', 1, toUInt64(6), 'DefaultValue', 1); --{serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} + +DROP DICTIONARY range_hashed_dictionary; +DROP TABLE dictionary_source_table; diff --git a/parser/testdata/02319_lightweight_delete_on_merge_tree/ast.json b/parser/testdata/02319_lightweight_delete_on_merge_tree/ast.json new file mode 100644 index 000000000..1ba1007f6 --- /dev/null +++ b/parser/testdata/02319_lightweight_delete_on_merge_tree/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery merge_table_standard_delete (children 1)" + }, + { + "explain": " Identifier merge_table_standard_delete" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001124009, + "rows_read": 2, + "bytes_read": 106 + } +} diff --git a/parser/testdata/02319_lightweight_delete_on_merge_tree/metadata.json b/parser/testdata/02319_lightweight_delete_on_merge_tree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02319_lightweight_delete_on_merge_tree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02319_lightweight_delete_on_merge_tree/query.sql b/parser/testdata/02319_lightweight_delete_on_merge_tree/query.sql new file mode 100644 index 000000000..47a8d5bb8 --- /dev/null +++ b/parser/testdata/02319_lightweight_delete_on_merge_tree/query.sql @@ -0,0 +1,117 @@ +DROP TABLE IF EXISTS merge_table_standard_delete; + +CREATE TABLE merge_table_standard_delete(id Int32, name String) ENGINE = MergeTree order by id settings min_bytes_for_wide_part=0; + +INSERT INTO merge_table_standard_delete select number, toString(number) from numbers(100); + +SET mutations_sync = 0; +SET check_query_single_value_result = 1; + +DELETE FROM merge_table_standard_delete WHERE id = 10; + +SELECT COUNT() FROM merge_table_standard_delete; + +DETACH TABLE merge_table_standard_delete; +ATTACH TABLE merge_table_standard_delete; +CHECK TABLE merge_table_standard_delete; + +DELETE FROM merge_table_standard_delete WHERE name IN ('1','2','3','4'); + +SELECT COUNT() FROM merge_table_standard_delete; + +DETACH TABLE merge_table_standard_delete; +ATTACH TABLE merge_table_standard_delete; +CHECK TABLE merge_table_standard_delete; + +DELETE FROM merge_table_standard_delete WHERE 1; + +SELECT COUNT() FROM merge_table_standard_delete; + +DETACH TABLE merge_table_standard_delete; +ATTACH TABLE merge_table_standard_delete; +CHECK TABLE merge_table_standard_delete; + +DROP TABLE merge_table_standard_delete; + +drop table if exists t_light; +create table t_light(a int, b int, c int, index i_c(b) type minmax granularity 4) engine = MergeTree order by a partition by c % 5 settings min_bytes_for_wide_part=0; +INSERT INTO t_light SELECT number, number, number FROM numbers(10); + +SELECT '-----lightweight mutation type-----'; + +DELETE FROM t_light WHERE c%5=1; + +DETACH TABLE t_light; +ATTACH TABLE t_light; +CHECK TABLE t_light; + +DELETE FROM t_light WHERE c=4; + +DETACH TABLE t_light; +ATTACH TABLE t_light; +CHECK TABLE t_light; + +alter table t_light MATERIALIZE INDEX i_c SETTINGS mutations_sync=2; +alter table t_light update b=-1 where a<3 SETTINGS mutations_sync=2; +alter table t_light drop index i_c SETTINGS mutations_sync=2; + +DETACH TABLE t_light; +ATTACH TABLE t_light; +CHECK TABLE t_light; + +SELECT command, is_done FROM system.mutations WHERE database = currentDatabase() AND table = 't_light'; + +SELECT '-----Check that select and merge with lightweight delete.-----'; +select count(*) from t_light; +select * from t_light order by a; + +select table, partition, name, rows from system.parts where database = currentDatabase() AND active and table ='t_light' order by name; + +optimize table t_light final SETTINGS mutations_sync=2; +select count(*) from t_light; + +select table, partition, name, rows from system.parts where database = currentDatabase() AND active and table ='t_light' and rows > 0 order by name; + +drop table t_light; + +SELECT '-----Test lightweight delete in multi blocks-----'; +CREATE TABLE t_large(a UInt32, b int) ENGINE=MergeTree order BY a settings min_bytes_for_wide_part=0, index_granularity=8192, index_granularity_bytes='10Mi'; +INSERT INTO t_large SELECT number + 1, number + 1 FROM numbers(100000); + +DELETE FROM t_large WHERE a = 50000; + +DETACH TABLE t_large; +ATTACH TABLE t_large; +CHECK TABLE t_large; + +ALTER TABLE t_large UPDATE b = -2 WHERE a between 1000 and 1005 SETTINGS mutations_sync=2; +ALTER TABLE t_large DELETE WHERE a=1 SETTINGS mutations_sync=2; + +DETACH TABLE t_large; +ATTACH TABLE t_large; +CHECK TABLE t_large; + +SELECT * FROM t_large WHERE a in (1,1000,1005,50000) order by a; + +DROP TABLE t_large; + +SELECT '----Test lighweight delete is disabled if table has projections-----'; + +CREATE TABLE t_proj(a UInt32, b int) ENGINE=MergeTree order BY a settings min_bytes_for_wide_part=0; + +ALTER TABLE t_proj ADD PROJECTION p_1 (SELECT avg(a), avg(b), count()) SETTINGS mutations_sync=2; + +INSERT INTO t_proj SELECT number + 1, number + 1 FROM numbers(1000); + +DELETE FROM t_proj WHERE a < 100; -- { serverError SUPPORT_IS_DISABLED } + +SELECT avg(a), avg(b), count() FROM t_proj; + +DROP TABLE t_proj; + +CREATE TABLE merge_table_standard_delete(id Int32, name String) ENGINE = MergeTree order by id settings min_bytes_for_wide_part=0; +SET allow_experimental_lightweight_delete = false; +DELETE FROM merge_table_standard_delete WHERE id = 10; -- { serverError SUPPORT_IS_DISABLED } +SET enable_lightweight_delete = false; +DELETE FROM merge_table_standard_delete WHERE id = 10; -- { serverError SUPPORT_IS_DISABLED } +DROP TABLE merge_table_standard_delete; diff --git a/parser/testdata/02319_lightweight_delete_on_merge_tree_compact_parts/ast.json b/parser/testdata/02319_lightweight_delete_on_merge_tree_compact_parts/ast.json new file mode 100644 index 000000000..963934b3f --- /dev/null +++ b/parser/testdata/02319_lightweight_delete_on_merge_tree_compact_parts/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery merge_table_standard_delete (children 1)" + }, + { + "explain": " Identifier merge_table_standard_delete" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00113101, + "rows_read": 2, + "bytes_read": 106 + } +} diff --git a/parser/testdata/02319_lightweight_delete_on_merge_tree_compact_parts/metadata.json b/parser/testdata/02319_lightweight_delete_on_merge_tree_compact_parts/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02319_lightweight_delete_on_merge_tree_compact_parts/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02319_lightweight_delete_on_merge_tree_compact_parts/query.sql b/parser/testdata/02319_lightweight_delete_on_merge_tree_compact_parts/query.sql new file mode 100644 index 000000000..c569cf83f --- /dev/null +++ b/parser/testdata/02319_lightweight_delete_on_merge_tree_compact_parts/query.sql @@ -0,0 +1,102 @@ +DROP TABLE IF EXISTS merge_table_standard_delete; + +CREATE TABLE merge_table_standard_delete(id Int32, name String) ENGINE = MergeTree order by id settings min_bytes_for_wide_part=10000000; + +INSERT INTO merge_table_standard_delete select number, toString(number) from numbers(100); + +SELECT COUNT(), part_type FROM system.parts WHERE database = currentDatabase() AND table = 'merge_table_standard_delete' AND active GROUP BY part_type ORDER BY part_type; + +SET mutations_sync = 0; +SET check_query_single_value_result = 1; + +DELETE FROM merge_table_standard_delete WHERE id = 10; +SELECT COUNT(), part_type FROM system.parts WHERE database = currentDatabase() AND table = 'merge_table_standard_delete' AND active GROUP BY part_type ORDER BY part_type; + +SELECT COUNT() FROM merge_table_standard_delete; + +DETACH TABLE merge_table_standard_delete; +ATTACH TABLE merge_table_standard_delete; +CHECK TABLE merge_table_standard_delete; + +DELETE FROM merge_table_standard_delete WHERE name IN ('1','2','3','4'); +SELECT COUNT(), part_type FROM system.parts WHERE database = currentDatabase() AND table = 'merge_table_standard_delete' AND active GROUP BY part_type ORDER BY part_type; + +SELECT COUNT() FROM merge_table_standard_delete; + +DETACH TABLE merge_table_standard_delete; +ATTACH TABLE merge_table_standard_delete; +CHECK TABLE merge_table_standard_delete; + +DELETE FROM merge_table_standard_delete WHERE 1; +SELECT COUNT(), part_type FROM system.parts WHERE database = currentDatabase() AND table = 'merge_table_standard_delete' AND active GROUP BY part_type ORDER BY part_type; + +SELECT COUNT() FROM merge_table_standard_delete; + +DETACH TABLE merge_table_standard_delete; +ATTACH TABLE merge_table_standard_delete; +CHECK TABLE merge_table_standard_delete; + +DROP TABLE merge_table_standard_delete; + +drop table if exists t_light; +create table t_light(a int, b int, c int, index i_c(b) type minmax granularity 4) engine = MergeTree order by a partition by c % 5 settings min_bytes_for_wide_part=10000000; +INSERT INTO t_light SELECT number, number, number FROM numbers(10); +SELECT COUNT(), part_type FROM system.parts WHERE database = currentDatabase() AND table = 't_light' AND active GROUP BY part_type ORDER BY part_type; + +SELECT '-----lightweight mutation type-----'; + +DELETE FROM t_light WHERE c%5=1; + +DETACH TABLE t_light; +ATTACH TABLE t_light; +CHECK TABLE t_light; + +DELETE FROM t_light WHERE c=4; + +DETACH TABLE t_light; +ATTACH TABLE t_light; +CHECK TABLE t_light; + +alter table t_light MATERIALIZE INDEX i_c SETTINGS mutations_sync=2; +alter table t_light update b=-1 where a<3 SETTINGS mutations_sync=2; +alter table t_light drop index i_c SETTINGS mutations_sync=2; + +DETACH TABLE t_light; +ATTACH TABLE t_light; +CHECK TABLE t_light; + +SELECT command, is_done FROM system.mutations WHERE database = currentDatabase() AND table = 't_light'; + +SELECT '-----Check that select and merge with lightweight delete.-----'; +select count(*) from t_light; +select * from t_light order by a; + +select table, partition, name, rows from system.parts where database = currentDatabase() AND active and table ='t_light' order by name; + +optimize table t_light final SETTINGS mutations_sync=2; +select count(*) from t_light; + +select table, partition, name, rows from system.parts where database = currentDatabase() AND active and table ='t_light' and rows > 0 order by name; + +drop table t_light; + +SELECT '-----Test lightweight delete in multi blocks-----'; +CREATE TABLE t_large(a UInt32, b int) ENGINE=MergeTree order BY a settings min_bytes_for_wide_part=0, index_granularity=8192, index_granularity_bytes='10Mi'; +INSERT INTO t_large SELECT number + 1, number + 1 FROM numbers(100000); + +DELETE FROM t_large WHERE a = 50000; + +DETACH TABLE t_large; +ATTACH TABLE t_large; +CHECK TABLE t_large; + +ALTER TABLE t_large UPDATE b = -2 WHERE a between 1000 and 1005 SETTINGS mutations_sync=2; +ALTER TABLE t_large DELETE WHERE a=1 SETTINGS mutations_sync=2; + +DETACH TABLE t_large; +ATTACH TABLE t_large; +CHECK TABLE t_large; + +SELECT * FROM t_large WHERE a in (1,1000,1005,50000) order by a; + +DROP TABLE t_large; diff --git a/parser/testdata/02319_no_columns_in_row_level_filter/ast.json b/parser/testdata/02319_no_columns_in_row_level_filter/ast.json new file mode 100644 index 000000000..aeddf0836 --- /dev/null +++ b/parser/testdata/02319_no_columns_in_row_level_filter/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DROP ROW POLICY query" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001442246, + "rows_read": 1, + "bytes_read": 29 + } +} diff --git a/parser/testdata/02319_no_columns_in_row_level_filter/metadata.json b/parser/testdata/02319_no_columns_in_row_level_filter/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02319_no_columns_in_row_level_filter/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02319_no_columns_in_row_level_filter/query.sql b/parser/testdata/02319_no_columns_in_row_level_filter/query.sql new file mode 100644 index 000000000..27f58dbff --- /dev/null +++ b/parser/testdata/02319_no_columns_in_row_level_filter/query.sql @@ -0,0 +1,36 @@ +DROP ROW POLICY IF EXISTS test_filter_policy ON test_table; +DROP ROW POLICY IF EXISTS test_filter_policy_2 ON test_table; +DROP TABLE IF EXISTS test_table; + +CREATE TABLE test_table (`n` UInt64, `s` String) +ENGINE = MergeTree +PRIMARY KEY n ORDER BY n SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +INSERT INTO test_table SELECT number, concat('some string ', CAST(number, 'String')) FROM numbers(1000000); + +-- Create row policy that doesn't use any column +CREATE ROW POLICY test_filter_policy ON test_table USING False TO ALL; + +-- Run query under default user so that always false row_level_filter is added that doesn't require any columns +SELECT count(1) FROM test_table; +SELECT count(1) FROM test_table PREWHERE (n % 8192) < 4000; +SELECT count(1) FROM test_table WHERE (n % 8192) < 4000; +SELECT count(1) FROM test_table PREWHERE (n % 8192) < 4000 WHERE (n % 33) == 0; + +-- Add policy for default user that will read a column +CREATE ROW POLICY test_filter_policy_2 ON test_table USING (n % 5) >= 3 TO default; + +-- Run query under default user that needs the same column as PREWHERE and WHERE +SELECT count(1) FROM test_table; +SELECT count(1) FROM test_table PREWHERE (n % 8192) < 4000; +SELECT count(1) FROM test_table WHERE (n % 8192) < 4000; +SELECT count(1) FROM test_table PREWHERE (n % 8192) < 4000 WHERE (n % 33) == 0; + +-- Run queries that have division by zero if row level filter isn't applied before prewhere +SELECT count(1) FROM test_table PREWHERE 7 / (n % 5) > 2; +SELECT count(1) FROM test_table WHERE 7 / (n % 5) > 2; +SELECT count(1) FROM test_table PREWHERE 7 / (n % 5) > 2 WHERE (n % 33) == 0; + +DROP TABLE test_table; +DROP ROW POLICY test_filter_policy ON test_table; +DROP ROW POLICY test_filter_policy_2 ON test_table; diff --git a/parser/testdata/02319_quantile_interpolated_weighted/ast.json b/parser/testdata/02319_quantile_interpolated_weighted/ast.json new file mode 100644 index 000000000..3b7d18ac0 --- /dev/null +++ b/parser/testdata/02319_quantile_interpolated_weighted/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery decimal (children 1)" + }, + { + "explain": " Identifier decimal" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001218947, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/02319_quantile_interpolated_weighted/metadata.json b/parser/testdata/02319_quantile_interpolated_weighted/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02319_quantile_interpolated_weighted/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02319_quantile_interpolated_weighted/query.sql b/parser/testdata/02319_quantile_interpolated_weighted/query.sql new file mode 100644 index 000000000..e2da1de9b --- /dev/null +++ b/parser/testdata/02319_quantile_interpolated_weighted/query.sql @@ -0,0 +1,27 @@ +DROP TABLE IF EXISTS decimal; + +CREATE TABLE decimal +( + a Decimal32(4), + b Decimal64(8), + c Decimal128(8) +) ENGINE = Memory; + +INSERT INTO decimal (a, b, c) +SELECT toDecimal32(number - 50, 4), toDecimal64(number - 50, 8) / 3, toDecimal128(number - 50, 8) / 5 +FROM system.numbers LIMIT 101; + +SELECT 'quantileInterpolatedWeighted'; +SELECT medianInterpolatedWeighted(a, 1), medianInterpolatedWeighted(b, 2), medianInterpolatedWeighted(c, 3) as x, toTypeName(x) FROM decimal; +SELECT quantileInterpolatedWeighted(a, 1), quantileInterpolatedWeighted(b, 2), quantileInterpolatedWeighted(c, 3) as x, toTypeName(x) FROM decimal WHERE a < 0; +SELECT quantileInterpolatedWeighted(0.0)(a, 1), quantileInterpolatedWeighted(0.0)(b, 2), quantileInterpolatedWeighted(0.0)(c, 3) FROM decimal WHERE a >= 0; +SELECT quantileInterpolatedWeighted(0.2)(a, 1), quantileInterpolatedWeighted(0.2)(b, 2), quantileInterpolatedWeighted(0.2)(c, 3) FROM decimal WHERE a >= 0; +SELECT quantileInterpolatedWeighted(0.4)(a, 1), quantileInterpolatedWeighted(0.4)(b, 2), quantileInterpolatedWeighted(0.4)(c, 3) FROM decimal WHERE a >= 0; +SELECT quantileInterpolatedWeighted(0.6)(a, 1), quantileInterpolatedWeighted(0.6)(b, 2), quantileInterpolatedWeighted(0.6)(c, 3) FROM decimal WHERE a >= 0; +SELECT quantileInterpolatedWeighted(0.8)(a, 1), quantileInterpolatedWeighted(0.8)(b, 2), quantileInterpolatedWeighted(0.8)(c, 3) FROM decimal WHERE a >= 0; +SELECT quantileInterpolatedWeighted(1.0)(a, 1), quantileInterpolatedWeighted(1.0)(b, 2), quantileInterpolatedWeighted(1.0)(c, 3) FROM decimal WHERE a >= 0; +SELECT quantilesInterpolatedWeighted(0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)(a, 1) FROM decimal; +SELECT quantilesInterpolatedWeighted(0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)(b, 2) FROM decimal; +SELECT quantilesInterpolatedWeighted(0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)(c, 3) FROM decimal; + +DROP TABLE IF EXISTS decimal; diff --git a/parser/testdata/02319_sql_standard_create_drop_index/ast.json b/parser/testdata/02319_sql_standard_create_drop_index/ast.json new file mode 100644 index 000000000..1dece501f --- /dev/null +++ b/parser/testdata/02319_sql_standard_create_drop_index/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_index (children 1)" + }, + { + "explain": " Identifier t_index" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001546554, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/02319_sql_standard_create_drop_index/metadata.json b/parser/testdata/02319_sql_standard_create_drop_index/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02319_sql_standard_create_drop_index/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02319_sql_standard_create_drop_index/query.sql b/parser/testdata/02319_sql_standard_create_drop_index/query.sql new file mode 100644 index 000000000..581b170ee --- /dev/null +++ b/parser/testdata/02319_sql_standard_create_drop_index/query.sql @@ -0,0 +1,39 @@ +drop table if exists t_index; +create table t_index(a int, b String) engine=MergeTree() order by a; + +create index i_a on t_index(a) TYPE minmax GRANULARITY 4; +create index if not exists i_a on t_index(a) TYPE minmax GRANULARITY 2; + +create index i_b on t_index(b) TYPE bloom_filter GRANULARITY 2; + +show create table t_index; +select table, name, type, expr, granularity from system.data_skipping_indices where database = currentDatabase() and table = 't_index'; + +drop index i_a on t_index; +drop index if exists i_a on t_index; + +select table, name, type, expr, granularity from system.data_skipping_indices where database = currentDatabase() and table = 't_index'; + +drop table t_index; + +create table t_index(a int, b String) engine=ReplicatedMergeTree('/test/2319/{database}', '1') order by a; +create table t_index_replica(a int, b String) engine=ReplicatedMergeTree('/test/2319/{database}', '2') order by a; + +create index i_a on t_index(a) TYPE minmax GRANULARITY 4; +create index if not exists i_a on t_index(a) TYPE minmax GRANULARITY 2; + +create index i_b on t_index(b) TYPE bloom_filter GRANULARITY 2; + +show create table t_index; +system sync replica t_index_replica; +show create table t_index_replica; +select table, name, type, expr, granularity from system.data_skipping_indices where database = currentDatabase() and table = 't_index'; + +drop index i_a on t_index; +drop index if exists i_a on t_index; + +select table, name, type, expr, granularity from system.data_skipping_indices where database = currentDatabase() and table = 't_index'; +system sync replica t_index_replica; +select table, name, type, expr, granularity from system.data_skipping_indices where database = currentDatabase() and table = 't_index_replica'; + +drop table t_index; diff --git a/parser/testdata/02319_timeslots_dt64/ast.json b/parser/testdata/02319_timeslots_dt64/ast.json new file mode 100644 index 000000000..2a02b48ff --- /dev/null +++ b/parser/testdata/02319_timeslots_dt64/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function timeSlots (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDateTime64 (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal '2000-01-02 03:04:05.12'" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal 'UTC'" + }, + { + "explain": " Function toDecimal64 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_10000" + }, + { + "explain": " Literal UInt64_0" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001293199, + "rows_read": 15, + "bytes_read": 588 + } +} diff --git a/parser/testdata/02319_timeslots_dt64/metadata.json b/parser/testdata/02319_timeslots_dt64/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02319_timeslots_dt64/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02319_timeslots_dt64/query.sql b/parser/testdata/02319_timeslots_dt64/query.sql new file mode 100644 index 000000000..a6838b8b6 --- /dev/null +++ b/parser/testdata/02319_timeslots_dt64/query.sql @@ -0,0 +1,9 @@ +SELECT timeSlots(toDateTime64('2000-01-02 03:04:05.12', 2, 'UTC'), toDecimal64(10000, 0)); +SELECT timeSlots(toDateTime64('2000-01-02 03:04:05.233', 3, 'UTC'), toDecimal64(10000.12, 2), toDecimal64(634.1, 1)); +SELECT timeSlots(toDateTime64('2000-01-02 03:04:05.3456', 4, 'UTC'), toDecimal64(600, 0), toDecimal64(30, 0)); + +SELECT timeSlots(toDateTime64('2000-01-02 03:04:05.23', 2, 'UTC')); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT timeSlots(toDateTime64('2000-01-02 03:04:05.345', 3, 'UTC'), toDecimal64(62.3, 1), toDecimal64(12.34, 2), 'one more'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT timeSlots(toDateTime64('2000-01-02 03:04:05.456', 3, 'UTC'), 'wrong argument'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT timeSlots(toDateTime64('2000-01-02 03:04:05.123', 3, 'UTC'), toDecimal64(600, 0), 'wrong argument'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT timeSlots(toDateTime64('2000-01-02 03:04:05.1232', 4, 'UTC'), toDecimal64(600, 0), toDecimal64(0, 0)); -- { serverError ILLEGAL_COLUMN } \ No newline at end of file diff --git a/parser/testdata/02320_alter_columns_with_dots/ast.json b/parser/testdata/02320_alter_columns_with_dots/ast.json new file mode 100644 index 000000000..656955f8b --- /dev/null +++ b/parser/testdata/02320_alter_columns_with_dots/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001553136, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/02320_alter_columns_with_dots/metadata.json b/parser/testdata/02320_alter_columns_with_dots/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02320_alter_columns_with_dots/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02320_alter_columns_with_dots/query.sql b/parser/testdata/02320_alter_columns_with_dots/query.sql new file mode 100644 index 000000000..1b48538f9 --- /dev/null +++ b/parser/testdata/02320_alter_columns_with_dots/query.sql @@ -0,0 +1,15 @@ +DROP TABLE IF EXISTS test; +CREATE TABLE test (id String, `abc.1` String, `abc.2` String, `abc` String) ENGINE MergeTree order by id; +DESC TABLE test; +ALTER TABLE test MODIFY COLUMN `abc.1` String AFTER `abc`; +DESC TABLE test; +ALTER TABLE test MODIFY COLUMN `abc.2` String AFTER `abc`; +DESC TABLE test; +ALTER TABLE test MODIFY COLUMN `abc` String AFTER `abc.2`; +DESC TABLE test; +ALTER TABLE test MODIFY COLUMN `abc` String AFTER `id`; +DESC TABLE test; +ALTER TABLE test MODIFY COLUMN `abc` String AFTER `abc.1`; +DESC TABLE test; +ALTER TABLE test DROP COLUMN `abc`; +DESC TABLE test; diff --git a/parser/testdata/02320_mapped_array_witn_const_nullable/ast.json b/parser/testdata/02320_mapped_array_witn_const_nullable/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02320_mapped_array_witn_const_nullable/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02320_mapped_array_witn_const_nullable/metadata.json b/parser/testdata/02320_mapped_array_witn_const_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02320_mapped_array_witn_const_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02320_mapped_array_witn_const_nullable/query.sql b/parser/testdata/02320_mapped_array_witn_const_nullable/query.sql new file mode 100644 index 000000000..1dd06a268 --- /dev/null +++ b/parser/testdata/02320_mapped_array_witn_const_nullable/query.sql @@ -0,0 +1,8 @@ + +select arrayMap(x -> toNullable(1), range(number)) from numbers(3); +select arrayFilter(x -> toNullable(1), range(number)) from numbers(3); +select arrayMap(x -> toNullable(0), range(number)) from numbers(3); +select arrayFilter(x -> toNullable(0), range(number)) from numbers(3); +select arrayMap(x -> NULL::Nullable(UInt8), range(number)) from numbers(3); +select arrayFilter(x -> NULL::Nullable(UInt8), range(number)) from numbers(3); + diff --git a/parser/testdata/02321_nested_short_circuit_functions/ast.json b/parser/testdata/02321_nested_short_circuit_functions/ast.json new file mode 100644 index 000000000..bdcdaa061 --- /dev/null +++ b/parser/testdata/02321_nested_short_circuit_functions/ast.json @@ -0,0 +1,103 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function and (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function greaterOrEquals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Function if (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function notEquals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Function intDiv (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_5" + } + ], + + "rows": 27, + + "statistics": + { + "elapsed": 0.001140315, + "rows_read": 27, + "bytes_read": 1065 + } +} diff --git a/parser/testdata/02321_nested_short_circuit_functions/metadata.json b/parser/testdata/02321_nested_short_circuit_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02321_nested_short_circuit_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02321_nested_short_circuit_functions/query.sql b/parser/testdata/02321_nested_short_circuit_functions/query.sql new file mode 100644 index 000000000..a8ea62d3a --- /dev/null +++ b/parser/testdata/02321_nested_short_circuit_functions/query.sql @@ -0,0 +1,3 @@ +select number >= 0 and if(number != 0, intDiv(1, number), 1) from numbers(5); +select if(number >= 0, if(number != 0, intDiv(1, number), 1), 1) from numbers(5); +select number >= 0 and if(number = 0, 0, if(number == 1, intDiv(1, number), if(number == 2, intDiv(1, number - 1), if(number == 3, intDiv(1, number - 2), intDiv(1, number - 3))))) from numbers(10); diff --git a/parser/testdata/02322_sql_insert_format/ast.json b/parser/testdata/02322_sql_insert_format/ast.json new file mode 100644 index 000000000..e5ae4199d --- /dev/null +++ b/parser/testdata/02322_sql_insert_format/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001240487, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02322_sql_insert_format/metadata.json b/parser/testdata/02322_sql_insert_format/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02322_sql_insert_format/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02322_sql_insert_format/query.sql b/parser/testdata/02322_sql_insert_format/query.sql new file mode 100644 index 000000000..bb04a0160 --- /dev/null +++ b/parser/testdata/02322_sql_insert_format/query.sql @@ -0,0 +1,13 @@ +set schema_inference_use_cache_for_file=0; + +select number as x, number % 3 as y, 'Hello' as z from numbers(5) format SQLInsert; +select number as x, number % 3 as y, 'Hello' as z from numbers(5) format SQLInsert settings output_format_sql_insert_max_batch_size=1; +select number as x, number % 3 as y, 'Hello' as z from numbers(5) format SQLInsert settings output_format_sql_insert_max_batch_size=2; +select number as x, number % 3 as y, 'Hello' as z from numbers(5) format SQLInsert settings output_format_sql_insert_include_column_names=0; +select number as x, number % 3 as y, 'Hello' as z from numbers(5) format SQLInsert settings output_format_sql_insert_use_replace=1; +select number as x, number % 3 as y, 'Hello' as z from numbers(5) format SQLInsert settings output_format_sql_insert_table_name='test'; +select number as x, number % 3 as y, 'Hello' as z from numbers(5) format SQLInsert settings output_format_sql_insert_table_name='test', output_format_sql_insert_quote_names=0; +insert into function file(currentDatabase() || '_02322_data.sql', 'SQLInsert') select number as x, number % 3 as y, 'Hello' as z from numbers(5) settings output_format_sql_insert_max_batch_size=2, output_format_sql_insert_quote_names=0, engine_file_truncate_on_insert=1; +select * from file(currentDatabase() || '_02322_data.sql', 'MySQLDump'); +insert into function file(currentDatabase() || '_02322_data.sql', 'SQLInsert') select number, number % 3, 'Hello' from numbers(5) settings output_format_sql_insert_max_batch_size=2, engine_file_truncate_on_insert=1; +select * from file(currentDatabase() || '_02322_data.sql', 'MySQLDump'); diff --git a/parser/testdata/02323_null_modifier_in_table_function/ast.json b/parser/testdata/02323_null_modifier_in_table_function/ast.json new file mode 100644 index 000000000..c95cc70eb --- /dev/null +++ b/parser/testdata/02323_null_modifier_in_table_function/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function values (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'x UInt8 NOT NULL'" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001370105, + "rows_read": 12, + "bytes_read": 462 + } +} diff --git a/parser/testdata/02323_null_modifier_in_table_function/metadata.json b/parser/testdata/02323_null_modifier_in_table_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02323_null_modifier_in_table_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02323_null_modifier_in_table_function/query.sql b/parser/testdata/02323_null_modifier_in_table_function/query.sql new file mode 100644 index 000000000..1dab0231d --- /dev/null +++ b/parser/testdata/02323_null_modifier_in_table_function/query.sql @@ -0,0 +1,6 @@ +select * from values('x UInt8 NOT NULL', 1); +select * from values('x UInt8 NULL', NULL); +insert into function file(currentDatabase() || '_data_02323.tsv') select number % 2 ? number : NULL from numbers(3) settings engine_file_truncate_on_insert=1; +select * from file(currentDatabase() || '_data_02323.tsv', auto, 'x UInt32 NOT NULL'); +select * from file(currentDatabase() || '_data_02323.tsv', auto, 'x UInt32 NULL'); +select * from generateRandom('x UInt64 NULL', 7, 3) limit 2; diff --git a/parser/testdata/02324_map_combinator_bug/ast.json b/parser/testdata/02324_map_combinator_bug/ast.json new file mode 100644 index 000000000..670d40704 --- /dev/null +++ b/parser/testdata/02324_map_combinator_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery segfault (children 1)" + }, + { + "explain": " Identifier segfault" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00134174, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/02324_map_combinator_bug/metadata.json b/parser/testdata/02324_map_combinator_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02324_map_combinator_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02324_map_combinator_bug/query.sql b/parser/testdata/02324_map_combinator_bug/query.sql new file mode 100644 index 000000000..aa9eefaa9 --- /dev/null +++ b/parser/testdata/02324_map_combinator_bug/query.sql @@ -0,0 +1,26 @@ +DROP TABLE IF EXISTS segfault; +DROP TABLE IF EXISTS segfault_mv; + +CREATE TABLE segfault +( + id UInt32, + uuid UUID, + tags_ids Array(UInt32) +) ENGINE = MergeTree() +ORDER BY (id); + +CREATE MATERIALIZED VIEW segfault_mv + ENGINE = AggregatingMergeTree() + ORDER BY (id) +AS SELECT + id, + uniqState(uuid) as uniq_uuids, + uniqMapState(CAST((tags_ids, arrayMap(_ -> toString(uuid), tags_ids)), 'Map(UInt32, String)')) as uniq_tags_ids +FROM segfault +GROUP BY id; + +INSERT INTO segfault SELECT * FROM generateRandom('id UInt32, uuid UUID, c Array(UInt32)', 10, 5, 5) LIMIT 100; +INSERT INTO segfault SELECT * FROM generateRandom('id UInt32, uuid UUID, c Array(UInt32)', 10, 5, 5) LIMIT 100; +INSERT INTO segfault SELECT * FROM generateRandom('id UInt32, uuid UUID, c Array(UInt32)', 10, 5, 5) LIMIT 100; + +SELECT ignore(CAST((arrayMap(k -> toString(k), mapKeys(uniqMapMerge(uniq_tags_ids) AS m)), mapValues(m)), 'Map(String, UInt32)')) FROM segfault_mv; diff --git a/parser/testdata/02325_compatibility_setting_2/ast.json b/parser/testdata/02325_compatibility_setting_2/ast.json new file mode 100644 index 000000000..5cc26e411 --- /dev/null +++ b/parser/testdata/02325_compatibility_setting_2/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier value" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.settings" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier name" + }, + { + "explain": " Literal 'allow_settings_after_format_in_insert'" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.00123915, + "rows_read": 13, + "bytes_read": 523 + } +} diff --git a/parser/testdata/02325_compatibility_setting_2/metadata.json b/parser/testdata/02325_compatibility_setting_2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02325_compatibility_setting_2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02325_compatibility_setting_2/query.sql b/parser/testdata/02325_compatibility_setting_2/query.sql new file mode 100644 index 000000000..5ce0bf1ef --- /dev/null +++ b/parser/testdata/02325_compatibility_setting_2/query.sql @@ -0,0 +1,13 @@ +select value from system.settings where name='allow_settings_after_format_in_insert'; +select value from system.settings where name='allow_settings_after_format_in_insert' settings compatibility='22.3'; +select value from system.settings where name='allow_settings_after_format_in_insert'; +set compatibility = '22.3'; +select value from system.settings where name='allow_settings_after_format_in_insert'; +set compatibility = '22.4'; +select value from system.settings where name='allow_settings_after_format_in_insert'; +set allow_settings_after_format_in_insert=1; +select value from system.settings where name='allow_settings_after_format_in_insert'; +set compatibility = '22.4'; +select value from system.settings where name='allow_settings_after_format_in_insert'; +set compatibility = '22.3'; +select value from system.settings where name='allow_settings_after_format_in_insert'; diff --git a/parser/testdata/02325_dates_schema_inference/ast.json b/parser/testdata/02325_dates_schema_inference/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02325_dates_schema_inference/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02325_dates_schema_inference/metadata.json b/parser/testdata/02325_dates_schema_inference/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02325_dates_schema_inference/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02325_dates_schema_inference/query.sql b/parser/testdata/02325_dates_schema_inference/query.sql new file mode 100644 index 000000000..3534a0eb4 --- /dev/null +++ b/parser/testdata/02325_dates_schema_inference/query.sql @@ -0,0 +1,70 @@ +-- Tags: no-fasttest + +set input_format_try_infer_dates=1; +set input_format_try_infer_datetimes=1; + +select 'JSONEachRow'; +desc format(JSONEachRow, '{"x" : "2020-01-01"}'); +desc format(JSONEachRow, '{"x" : "2020-01-01 00:00:00.00000"}'); +desc format(JSONEachRow, '{"x" : "2020-01-01 00:00:00"}'); +desc format(JSONEachRow, '{"x" : ["2020-01-01", "2020-01-02"]}'); +desc format(JSONEachRow, '{"x" : ["2020-01-01", "2020-01-01 00:00:00"]}'); +desc format(JSONEachRow, '{"x" : ["2020-01-01 00:00:00", "2020-01-01 00:00:00"]}'); +desc format(JSONEachRow, '{"x" : {"date1" : "2020-01-01 00:00:00", "date2" : "2020-01-01"}}'); +desc format(JSONEachRow, '{"x" : ["2020-01-01 00:00:00", "2020-01-01"]}\n{"x" : ["2020-01-01"]}'); +desc format(JSONEachRow, '{"x" : ["2020-01-01 00:00:00"]}\n{"x" : ["2020-01-01"]}'); +desc format(JSONEachRow, '{"x" : "2020-01-01 00:00:00"}\n{"x" : "2020-01-01"}'); +desc format(JSONEachRow, '{"x" : ["2020-01-01 00:00:00", "Some string"]}'); +desc format(JSONEachRow, '{"x" : "2020-01-01 00:00:00"}\n{"x" : "Some string"}'); +desc format(JSONEachRow, '{"x" : ["2020-01-01 00:00:00", "2020-01-01"]}\n{"x" : ["2020-01-01", "Some string"]}'); +desc format(JSONEachRow, '{"x" : {"key1" : [["2020-01-01 00:00:00"]], "key2" : [["2020-01-01"]]}}\n{"x" : {"key1" : [["2020-01-01"]], "key2" : [["Some string"]]}}'); + +select 'CSV'; +desc format(CSV, '"2020-01-01"'); +desc format(CSV, '"2020-01-01 00:00:00.00000"'); +desc format(CSV, '"2020-01-01 00:00:00"'); +desc format(CSV, '"[\'2020-01-01\', \'2020-01-02\']"'); +desc format(CSV, '"[\'2020-01-01\', \'2020-01-01 00:00:00\']"'); +desc format(CSV, '"[\'2020-01-01 00:00:00\', \'2020-01-01 00:00:00\']"'); +desc format(CSV, '"{\'date1\' : \'2020-01-01 00:00:00\', \'date2\' : \'2020-01-01\'}"'); +desc format(CSV, '"[\'2020-01-01 00:00:00\', \'2020-01-01\']"\n"[\'2020-01-01\']"'); +desc format(CSV, '"[\'2020-01-01 00:00:00\']"\n"[\'2020-01-01\']"'); +desc format(CSV, '"2020-01-01 00:00:00"\n"2020-01-01"'); +desc format(CSV, '"[\'2020-01-01 00:00:00\', \'Some string\']"'); +desc format(CSV, '"2020-01-01 00:00:00"\n"Some string"'); +desc format(CSV, '"[\'2020-01-01 00:00:00\', \'2020-01-01\']"\n"[\'2020-01-01\', \'Some string\']"'); +desc format(CSV, '"{\'key1\' : [[\'2020-01-01 00:00:00\']], \'key2\' : [[\'2020-01-01\']]}"\n"{\'key1\' : [[\'2020-01-01\']], \'key2\' : [[\'Some string\']]}"'); + +select 'TSV'; +desc format(TSV, '2020-01-01'); +desc format(TSV, '2020-01-01 00:00:00.00000'); +desc format(TSV, '2020-01-01 00:00:00'); +desc format(TSV, '[\'2020-01-01\', \'2020-01-02\']'); +desc format(TSV, '[\'2020-01-01\', \'2020-01-01 00:00:00\']'); +desc format(TSV, '[\'2020-01-01 00:00:00\', \'2020-01-01 00:00:00\']'); +desc format(TSV, '{\'date1\' : \'2020-01-01 00:00:00\', \'date2\' : \'2020-01-01\'}'); +desc format(TSV, '[\'2020-01-01 00:00:00\', \'2020-01-01\']\n[\'2020-01-01\']'); +desc format(TSV, '[\'2020-01-01 00:00:00\']\n[\'2020-01-01\']'); +desc format(TSV, '2020-01-01 00:00:00\n2020-01-01'); +desc format(TSV, '[\'2020-01-01 00:00:00\', \'Some string\']'); +desc format(TSV, '2020-01-01 00:00:00\nSome string'); +desc format(TSV, '[\'2020-01-01 00:00:00\', \'2020-01-01\']\n[\'2020-01-01\', \'Some string\']'); +desc format(TSV, '{\'key1\' : [[\'2020-01-01 00:00:00\']], \'key2\' : [[\'2020-01-01\']]}\n{\'key1\' : [[\'2020-01-01\']], \'key2\' : [[\'Some string\']]}'); + +select 'Values'; +desc format(Values, '(\'2020-01-01\')'); +desc format(Values, '(\'2020-01-01 00:00:00.00000\')'); +desc format(Values, '(\'2020-01-01 00:00:00\')'); +desc format(Values, '([\'2020-01-01\', \'2020-01-02\'])'); +desc format(Values, '([\'2020-01-01\', \'2020-01-01 00:00:00\'])'); +desc format(Values, '([\'2020-01-01 00:00:00\', \'2020-01-01 00:00:00\'])'); +desc format(Values, '({\'date1\' : \'2020-01-01 00:00:00\', \'date2\' : \'2020-01-01\'})'); +desc format(Values, '([\'2020-01-01 00:00:00\', \'2020-01-01\'])\n([\'2020-01-01\'])'); +desc format(Values, '([\'2020-01-01 00:00:00\']), ([\'2020-01-01\'])'); +desc format(Values, '(\'2020-01-01 00:00:00\')\n(\'2020-01-01\')'); +desc format(Values, '([\'2020-01-01 00:00:00\', \'Some string\'])'); +desc format(Values, '(\'2020-01-01 00:00:00\')\n(\'Some string\')'); +desc format(Values, '([\'2020-01-01 00:00:00\', \'2020-01-01\'])\n([\'2020-01-01\', \'Some string\'])'); +desc format(Values, '({\'key1\' : [[\'2020-01-01 00:00:00\']], \'key2\' : [[\'2020-01-01\']]})\n({\'key1\' : [[\'2020-01-01\']], \'key2\' : [[\'Some string\']]})'); + + diff --git a/parser/testdata/02326_numbers_from_json_strings_schema_inference/ast.json b/parser/testdata/02326_numbers_from_json_strings_schema_inference/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02326_numbers_from_json_strings_schema_inference/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02326_numbers_from_json_strings_schema_inference/metadata.json b/parser/testdata/02326_numbers_from_json_strings_schema_inference/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02326_numbers_from_json_strings_schema_inference/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02326_numbers_from_json_strings_schema_inference/query.sql b/parser/testdata/02326_numbers_from_json_strings_schema_inference/query.sql new file mode 100644 index 000000000..26488adeb --- /dev/null +++ b/parser/testdata/02326_numbers_from_json_strings_schema_inference/query.sql @@ -0,0 +1,23 @@ +-- Tags: no-fasttest + +set input_format_json_try_infer_numbers_from_strings=1; +set input_format_json_read_numbers_as_strings=0; +set input_format_json_infer_array_of_dynamic_from_array_of_different_types=0; + +desc format(JSONEachRow, '{"x" : "123"}'); +desc format(JSONEachRow, '{"x" : ["123", 123, 12.3]}'); +desc format(JSONEachRow, '{"x" : {"k1" : "123", "k2" : 123}}'); +desc format(JSONEachRow, '{"x" : {"k1" : ["123", "123"], "k2" : [123, 123]}}'); +desc format(JSONEachRow, '{"x" : "123"}\n{"x" : 123}'); +desc format(JSONEachRow, '{"x" : ["123", "456"]}\n{"x" : [123, 456]}'); +desc format(JSONEachRow, '{"x" : {"k1" : "123"}}\n{"x" : {"k2" : 123}}'); +desc format(JSONEachRow, '{"x" : {"k1" : ["123", "123"]}}\n{"x": {"k2" : [123, 123]}}'); +desc format(JSONEachRow, '{"x" : ["123", "Some string"]}'); +desc format(JSONEachRow, '{"x" : {"k1" : "123", "k2" : "Some string"}}'); +desc format(JSONEachRow, '{"x" : {"k1" : ["123", "123"], "k2" : ["Some string"]}}'); +desc format(JSONEachRow, '{"x" : "123"}\n{"x" : "Some string"}'); +desc format(JSONEachRow, '{"x" : ["123", "456"]}\n{"x" : ["Some string"]}'); +desc format(JSONEachRow, '{"x" : {"k1" : "123"}}\n{"x" : {"k2" : "Some string"}}'); +desc format(JSONEachRow, '{"x" : {"k1" : ["123", "123"]}}\n{"x": {"k2" : ["Some string"]}}'); +desc format(JSONEachRow, '{"x" : [123, "Some string"]}'); +desc format(JSONEachRow, '{"x" : {"a" : 123, "b" : "Some string"}}'); diff --git a/parser/testdata/02326_settings_changes_system_table/ast.json b/parser/testdata/02326_settings_changes_system_table/ast.json new file mode 100644 index 000000000..16130d66d --- /dev/null +++ b/parser/testdata/02326_settings_changes_system_table/ast.json @@ -0,0 +1,31 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DescribeQuery (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.settings_changes" + } + ], + + "rows": 3, + + "statistics": + { + "elapsed": 0.001298329, + "rows_read": 3, + "bytes_read": 120 + } +} diff --git a/parser/testdata/02326_settings_changes_system_table/metadata.json b/parser/testdata/02326_settings_changes_system_table/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02326_settings_changes_system_table/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02326_settings_changes_system_table/query.sql b/parser/testdata/02326_settings_changes_system_table/query.sql new file mode 100644 index 000000000..e56cd62ce --- /dev/null +++ b/parser/testdata/02326_settings_changes_system_table/query.sql @@ -0,0 +1,2 @@ +DESC system.settings_changes; +SELECT * FROM system.settings_changes WHERE version = '22.5' diff --git a/parser/testdata/02327_try_infer_integers_schema_inference/ast.json b/parser/testdata/02327_try_infer_integers_schema_inference/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02327_try_infer_integers_schema_inference/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02327_try_infer_integers_schema_inference/metadata.json b/parser/testdata/02327_try_infer_integers_schema_inference/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02327_try_infer_integers_schema_inference/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02327_try_infer_integers_schema_inference/query.sql b/parser/testdata/02327_try_infer_integers_schema_inference/query.sql new file mode 100644 index 000000000..a4a69f4fa --- /dev/null +++ b/parser/testdata/02327_try_infer_integers_schema_inference/query.sql @@ -0,0 +1,46 @@ +-- Tags: no-fasttest + +set input_format_try_infer_integers=1; +set input_format_try_infer_exponent_floats=1; + +select 'JSONEachRow'; +desc format(JSONEachRow, '{"x" : 123}'); +desc format(JSONEachRow, '{"x" : [123, 123]}'); +desc format(JSONEachRow, '{"x" : {"a" : [123, 123]}}'); +desc format(JSONEachRow, '{"x" : {"a" : [123, 123]}}\n{"x" : {"b" : [321, 321]}}'); +desc format(JSONEachRow, '{"x" : 123}\n{"x" : 123.123}'); +desc format(JSONEachRow, '{"x" : 123}\n{"x" : 1e2}'); +desc format(JSONEachRow, '{"x" : [123, 123]}\n{"x" : [321.321, 312]}'); +desc format(JSONEachRow, '{"x" : {"a" : [123, 123]}}\n{"x" : {"b" : [321.321, 123]}}'); + +select 'CSV'; +desc format(CSV, '123'); +desc format(CSV, '"[123, 123]"'); +desc format(CSV, '"{\'a\' : [123, 123]}"'); +desc format(CSV, '"{\'a\' : [123, 123]}"\n"{\'b\' : [321, 321]}"'); +desc format(CSV, '123\n123.123'); +desc format(CSV, '122\n1e2'); +desc format(CSV, '"[123, 123]"\n"[321.321, 312]"'); +desc format(CSV, '"{\'a\' : [123, 123]}"\n"{\'b\' : [321.321, 123]}"'); + +select 'TSV'; +desc format(TSV, '123'); +desc format(TSV, '[123, 123]'); +desc format(TSV, '{\'a\' : [123, 123]}'); +desc format(TSV, '{\'a\' : [123, 123]}\n{\'b\' : [321, 321]}'); +desc format(TSV, '123\n123.123'); +desc format(TSV, '122\n1e2'); +desc format(TSV, '[123, 123]\n[321.321, 312]'); +desc format(TSV, '{\'a\' : [123, 123]}\n{\'b\' : [321.321, 123]}'); + +select 'Values'; +desc format(Values, '(123)'); +desc format(Values, '([123, 123])'); +desc format(Values, '({\'a\' : [123, 123]})'); +desc format(Values, '({\'a\' : [123, 123]}), ({\'b\' : [321, 321]})'); +desc format(Values, '(123), (123.123)'); +desc format(Values, '(122), (1e2)'); +desc format(Values, '([123, 123])\n([321.321, 312])'); +desc format(Values, '({\'a\' : [123, 123]}), ({\'b\' : [321.321, 123]})'); + + diff --git a/parser/testdata/02336_sort_optimization_with_fill/ast.json b/parser/testdata/02336_sort_optimization_with_fill/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02336_sort_optimization_with_fill/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02336_sort_optimization_with_fill/metadata.json b/parser/testdata/02336_sort_optimization_with_fill/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02336_sort_optimization_with_fill/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02336_sort_optimization_with_fill/query.sql b/parser/testdata/02336_sort_optimization_with_fill/query.sql new file mode 100644 index 000000000..242f2c421 --- /dev/null +++ b/parser/testdata/02336_sort_optimization_with_fill/query.sql @@ -0,0 +1,4 @@ +SELECT x, s FROM ( + SELECT 5 AS x, 'Hello' AS s ORDER BY x WITH FILL FROM 1 TO 10 INTERPOLATE (s AS s||'A') +) ORDER BY s; + diff --git a/parser/testdata/02336_sparse_columns_s3/ast.json b/parser/testdata/02336_sparse_columns_s3/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02336_sparse_columns_s3/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02336_sparse_columns_s3/metadata.json b/parser/testdata/02336_sparse_columns_s3/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02336_sparse_columns_s3/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02336_sparse_columns_s3/query.sql b/parser/testdata/02336_sparse_columns_s3/query.sql new file mode 100644 index 000000000..b778f9096 --- /dev/null +++ b/parser/testdata/02336_sparse_columns_s3/query.sql @@ -0,0 +1,43 @@ +-- Tags: no-parallel, no-fasttest, no-object-storage + +DROP TABLE IF EXISTS t_sparse_s3; + +CREATE TABLE t_sparse_s3 (id UInt32, cond UInt8, s String) +engine = MergeTree ORDER BY id +settings ratio_of_defaults_for_sparse_serialization = 0.01, storage_policy = 's3_cache', +min_bytes_for_wide_part = 0, min_compress_block_size = 1, serialization_info_version = 'basic', +index_granularity = 8192, index_granularity_bytes = '10Mi'; + +INSERT INTO t_sparse_s3 SELECT 1, number % 2, '' FROM numbers(8192); +INSERT INTO t_sparse_s3 SELECT 2, number % 2, '' FROM numbers(24576); +INSERT INTO t_sparse_s3 SELECT 3, number % 2, '' FROM numbers(8192); +INSERT INTO t_sparse_s3 SELECT 4, number % 2, '' FROM numbers(24576); +INSERT INTO t_sparse_s3 SELECT 5, number % 2, '' FROM numbers(8192); +INSERT INTO t_sparse_s3 SELECT 6, number % 2, '' FROM numbers(24576); +INSERT INTO t_sparse_s3 SELECT 7, number % 2, '' FROM numbers(8192); +INSERT INTO t_sparse_s3 SELECT 8, number % 2, '' FROM numbers(24576); +INSERT INTO t_sparse_s3 SELECT 9, number % 2, '' FROM numbers(8192); +INSERT INTO t_sparse_s3 SELECT 10, number % 2, '' FROM numbers(24576); +INSERT INTO t_sparse_s3 SELECT 11, number % 2, '' FROM numbers(8000); +INSERT INTO t_sparse_s3 SELECT 12, number % 2, 'foo' FROM numbers(192); +INSERT INTO t_sparse_s3 SELECT 13, number % 2, '' FROM numbers(24576); +INSERT INTO t_sparse_s3 SELECT 14, number % 2, 'foo' FROM numbers(8192); +INSERT INTO t_sparse_s3 SELECT 15, number % 2, '' FROM numbers(24576); +INSERT INTO t_sparse_s3 SELECT 16, number % 2, 'foo' FROM numbers(4730); +INSERT INTO t_sparse_s3 SELECT 17, number % 2, 'foo' FROM numbers(3462); +INSERT INTO t_sparse_s3 SELECT 18, number % 2, '' FROM numbers(24576); + +OPTIMIZE TABLE t_sparse_s3 FINAL; + +SELECT serialization_kind FROM system.parts_columns +WHERE table = 't_sparse_s3' AND active AND column = 's' +AND database = currentDatabase(); + +SET max_threads = 1; + +SELECT count() FROM t_sparse_s3 +PREWHERE cond +WHERE id IN (1, 3, 5, 7, 9, 11, 13, 15, 17) +AND NOT ignore(s); + +DROP TABLE t_sparse_s3; diff --git a/parser/testdata/02337_analyzer_columns_basic/ast.json b/parser/testdata/02337_analyzer_columns_basic/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02337_analyzer_columns_basic/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02337_analyzer_columns_basic/metadata.json b/parser/testdata/02337_analyzer_columns_basic/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02337_analyzer_columns_basic/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02337_analyzer_columns_basic/query.sql b/parser/testdata/02337_analyzer_columns_basic/query.sql new file mode 100644 index 000000000..c132a69ac --- /dev/null +++ b/parser/testdata/02337_analyzer_columns_basic/query.sql @@ -0,0 +1,101 @@ +-- Tags: no-parallel + +SET enable_analyzer = 1; + +-- Empty from section + +SELECT 'Empty from section'; + +DESCRIBE (SELECT dummy); +SELECT dummy; + +SELECT '--'; + +DESCRIBE (SELECT one.dummy); +SELECT one.dummy; + +SELECT '--'; + +DESCRIBE (SELECT system.one.dummy); +SELECT system.one.dummy; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value String +) ENGINE=TinyLog; + +INSERT INTO test_table VALUES (0, 'Value'); + +SELECT 'Table access without table name qualification'; + +SELECT test_id FROM test_table; -- { serverError UNKNOWN_IDENTIFIER } +SELECT test_id FROM test_unknown_table; -- { serverError UNKNOWN_TABLE } + +DESCRIBE (SELECT id FROM test_table); +SELECT id FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT value FROM test_table); +SELECT value FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT id, value FROM test_table); +SELECT id, value FROM test_table; + +SELECT 'Table access with table name qualification'; + +DESCRIBE (SELECT test_table.id FROM test_table); +SELECT test_table.id FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT test_table.value FROM test_table); +SELECT test_table.value FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT test_table.id, test_table.value FROM test_table); +SELECT test_table.id, test_table.value FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT test.id, test.value FROM test_table AS test); +SELECT test.id, test.value FROM test_table AS test; + +DROP TABLE test_table; + +SELECT 'Table access with database and table name qualification'; + +DROP DATABASE IF EXISTS 02337_db; +CREATE DATABASE 02337_db; + +DROP TABLE IF EXISTS 02337_db.test_table; +CREATE TABLE 02337_db.test_table +( + id UInt64, + value String +) ENGINE=TinyLog; + +INSERT INTO 02337_db.test_table VALUES (0, 'Value'); + +SELECT '--'; + +DESCRIBE (SELECT test_table.id, test_table.value FROM 02337_db.test_table); +SELECT test_table.id, test_table.value FROM 02337_db.test_table; + +SELECT '--'; + +DESCRIBE (SELECT 02337_db.test_table.id, 02337_db.test_table.value FROM 02337_db.test_table); +SELECT 02337_db.test_table.id, 02337_db.test_table.value FROM 02337_db.test_table; + +SELECT '--'; + +DESCRIBE (SELECT test_table.id, test_table.value FROM 02337_db.test_table AS test_table); +SELECT test_table.id, test_table.value FROM 02337_db.test_table AS test_table; + +DROP TABLE 02337_db.test_table; +DROP DATABASE 02337_db; diff --git a/parser/testdata/02337_base58/ast.json b/parser/testdata/02337_base58/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02337_base58/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02337_base58/metadata.json b/parser/testdata/02337_base58/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02337_base58/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02337_base58/query.sql b/parser/testdata/02337_base58/query.sql new file mode 100644 index 000000000..d14dd9138 --- /dev/null +++ b/parser/testdata/02337_base58/query.sql @@ -0,0 +1,25 @@ +-- Tags: no-fasttest + +SELECT base58Encode('Hold my beer...'); + +SELECT base58Encode('Hold my beer...', 'Second arg'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT base58Decode('Hold my beer...'); -- { serverError INCORRECT_DATA } + +SELECT base58Decode(encoded) FROM (SELECT base58Encode(val) as encoded FROM (SELECT arrayJoin(['', 'f', 'fo', 'foo', 'foob', 'fooba', 'foobar', 'Hello world!']) val)); +SELECT tryBase58Decode(encoded) FROM (SELECT base58Encode(val) as encoded FROM (SELECT arrayJoin(['', 'f', 'fo', 'foo', 'foob', 'fooba', 'foobar', 'Hello world!']) val)); +SELECT tryBase58Decode(val) FROM (SELECT arrayJoin(['Hold my beer', 'Hold another beer', '3csAg9', 'And a wine', 'And another wine', 'And a lemonade', 't1Zv2yaZ', 'And another wine']) val); + +SELECT base58Encode(val) FROM (SELECT arrayJoin(['', 'f', 'fo', 'foo', 'foob', 'fooba', 'foobar']) val); +SELECT base58Decode(val) FROM (SELECT arrayJoin(['', '2m', '8o8', 'bQbp', '3csAg9', 'CZJRhmz', 't1Zv2yaZ', '']) val); + +SELECT base58Encode(base58Decode('1BWutmTvYPwDtmw9abTkS4Ssr8no61spGAvW1X6NDix')) == '1BWutmTvYPwDtmw9abTkS4Ssr8no61spGAvW1X6NDix'; +select base58Encode('\x00\x0b\xe3\xe1\xeb\xa1\x7a\x47\x3f\x89\xb0\xf7\xe8\xe2\x49\x40\xf2\x0a\xeb\x8e\xbc\xa7\x1a\x88\xfd\xe9\x5d\x4b\x83\xb7\x1a\x09') == '1BWutmTvYPwDtmw9abTkS4Ssr8no61spGAvW1X6NDix'; + +SELECT base58Encode(toFixedString('Hold my beer...', 15)); +SELECT base58Decode(toFixedString('t1Zv2yaZ', 8)); + +SELECT base58Encode(val) FROM (SELECT arrayJoin([toFixedString('', 3), toFixedString('f', 3), toFixedString('fo', 3), toFixedString('foo', 3)]) val); +SELECT base58Decode(val) FROM (SELECT arrayJoin([toFixedString('111', 3), toFixedString('bG7y', 4), toFixedString('bQZu', 4), toFixedString('bQbp', 4)]) val); + +Select base58Encode(reinterpretAsFixedString(byteSwap(toUInt256('256')))) == '1111111111111111111111111111115R'; +Select base58Encode(reinterpretAsString(byteSwap(toUInt256('256')))) == '1111111111111111111111111111112'; -- { reinterpretAsString drops the last null byte hence, encoded value is different than the FixedString version above } diff --git a/parser/testdata/02337_check_translate_qualified_names_matcher/ast.json b/parser/testdata/02337_check_translate_qualified_names_matcher/ast.json new file mode 100644 index 000000000..32b254e25 --- /dev/null +++ b/parser/testdata/02337_check_translate_qualified_names_matcher/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery nested_name_tuples (children 1)" + }, + { + "explain": " Identifier nested_name_tuples" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001341526, + "rows_read": 2, + "bytes_read": 89 + } +} diff --git a/parser/testdata/02337_check_translate_qualified_names_matcher/metadata.json b/parser/testdata/02337_check_translate_qualified_names_matcher/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02337_check_translate_qualified_names_matcher/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02337_check_translate_qualified_names_matcher/query.sql b/parser/testdata/02337_check_translate_qualified_names_matcher/query.sql new file mode 100644 index 000000000..09ab591f9 --- /dev/null +++ b/parser/testdata/02337_check_translate_qualified_names_matcher/query.sql @@ -0,0 +1,10 @@ +CREATE TABLE nested_name_tuples +( + `a` Tuple(x String, y Tuple(i Int32, j String)) +) +ENGINE = Memory; + +INSERT INTO nested_name_tuples VALUES(('asd', (12, 'ddd'))); + +SELECT t.a.y.i FROM nested_name_tuples as t; +SELECT nested_name_tuples.a.y.i FROM nested_name_tuples as t; diff --git a/parser/testdata/02337_join_analyze_stuck/ast.json b/parser/testdata/02337_join_analyze_stuck/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02337_join_analyze_stuck/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02337_join_analyze_stuck/metadata.json b/parser/testdata/02337_join_analyze_stuck/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02337_join_analyze_stuck/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02337_join_analyze_stuck/query.sql b/parser/testdata/02337_join_analyze_stuck/query.sql new file mode 100644 index 000000000..714f43770 --- /dev/null +++ b/parser/testdata/02337_join_analyze_stuck/query.sql @@ -0,0 +1,25 @@ +-- Tags: long + +-- https://github.com/ClickHouse/ClickHouse/issues/21557 + +EXPLAIN SYNTAX +WITH + x AS ( SELECT number FROM numbers(10) ), + cross_sales AS ( + SELECT 1 AS xx + FROM x, x AS d1, x AS d2, x AS d3, x AS d4, x AS d5, x AS d6, x AS d7, x AS d8, x AS d9 + WHERE x.number = d9.number + ) +SELECT xx FROM cross_sales WHERE xx = 2000 FORMAT Null; + +SET max_analyze_depth = 1; + +EXPLAIN SYNTAX +WITH + x AS ( SELECT number FROM numbers(10) ), + cross_sales AS ( + SELECT 1 AS xx + FROM x, x AS d1, x AS d2, x AS d3, x AS d4, x AS d5, x AS d6, x AS d7, x AS d8, x AS d9 + WHERE x.number = d9.number + ) +SELECT xx FROM cross_sales WHERE xx = 2000; diff --git a/parser/testdata/02337_multiple_joins_original_names/ast.json b/parser/testdata/02337_multiple_joins_original_names/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02337_multiple_joins_original_names/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02337_multiple_joins_original_names/metadata.json b/parser/testdata/02337_multiple_joins_original_names/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02337_multiple_joins_original_names/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02337_multiple_joins_original_names/query.sql b/parser/testdata/02337_multiple_joins_original_names/query.sql new file mode 100644 index 000000000..37c7077b5 --- /dev/null +++ b/parser/testdata/02337_multiple_joins_original_names/query.sql @@ -0,0 +1,24 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/34697 + +SET enable_analyzer = 1; + +SELECT table1_id FROM ( + SELECT first.table1_id + FROM (SELECT number+1 as table1_id FROM numbers(1)) as first + JOIN (SELECT number+1 as table2_id FROM numbers(1)) as second ON first.table1_id = second.table2_id + JOIN (SELECT number+1 as table3_id FROM numbers(1)) as third ON first.table1_id = third.table3_id +); + +SELECT table1_id FROM ( + SELECT first.table1_id + FROM (SELECT number+1 as table1_id FROM numbers(1)) as first + JOIN (SELECT number+1 as table2_id FROM numbers(1)) as second ON first.table1_id = second.table2_id + JOIN (SELECT number+1 as table3_id FROM numbers(1)) as third ON first.table1_id = third.table3_id +) SETTINGS multiple_joins_try_to_keep_original_names = 1; + +SELECT aaa FROM ( + SELECT first.table1_id as aaa + FROM (SELECT number+1 as table1_id FROM numbers(1)) as first + JOIN (SELECT number+1 as table2_id FROM numbers(1)) as second ON first.table1_id = second.table2_id + JOIN (SELECT number+1 as table3_id FROM numbers(1)) as third ON first.table1_id = third.table3_id +) SETTINGS multiple_joins_try_to_keep_original_names = 1; diff --git a/parser/testdata/02338_analyzer_constants_basic/ast.json b/parser/testdata/02338_analyzer_constants_basic/ast.json new file mode 100644 index 000000000..56a1c8a18 --- /dev/null +++ b/parser/testdata/02338_analyzer_constants_basic/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001245116, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02338_analyzer_constants_basic/metadata.json b/parser/testdata/02338_analyzer_constants_basic/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02338_analyzer_constants_basic/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02338_analyzer_constants_basic/query.sql b/parser/testdata/02338_analyzer_constants_basic/query.sql new file mode 100644 index 000000000..536202dc2 --- /dev/null +++ b/parser/testdata/02338_analyzer_constants_basic/query.sql @@ -0,0 +1,42 @@ +SET enable_analyzer = 1; + +DESCRIBE (SELECT 1); +SELECT 1; + +SELECT '--'; + +DESCRIBE (SELECT 'test'); +SELECT 'test'; + +SELECT '--'; + +DESCRIBE (SELECT 1, 'test'); +SELECT 1, 'test'; + +SELECT '--'; + +DESCRIBE (SELECT 1, 'test', [1, 2, 3]); +SELECT 1, 'test', [1, 2, 3]; + +SELECT '--'; + +DESCRIBE (SELECT 1, 'test', [1, 2, 3], ['1', '2', '3']); +SELECT 1, 'test', [1, 2, 3], ['1', '2', '3']; + +SELECT '--'; + +DESCRIBE (SELECT NULL); +SELECT NULL; + +SELECT '--'; + +DESCRIBE (SELECT (1, 1)); +SELECT (1, 1); + +SELECT '--'; + +DESCRIBE (SELECT [(1, 1)]); +SELECT [(1, 1)]; + +DESCRIBE (SELECT NULL, 1, 'test', [1, 2, 3], [(1, 1), (1, 1)]); +SELECT NULL, 1, 'test', [1, 2, 3], [(1, 1), (1, 1)]; diff --git a/parser/testdata/02339_analyzer_matcher_basic/ast.json b/parser/testdata/02339_analyzer_matcher_basic/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02339_analyzer_matcher_basic/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02339_analyzer_matcher_basic/metadata.json b/parser/testdata/02339_analyzer_matcher_basic/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02339_analyzer_matcher_basic/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02339_analyzer_matcher_basic/query.sql b/parser/testdata/02339_analyzer_matcher_basic/query.sql new file mode 100644 index 000000000..57c6a9479 --- /dev/null +++ b/parser/testdata/02339_analyzer_matcher_basic/query.sql @@ -0,0 +1,186 @@ +-- Tags: no-parallel + +SET enable_analyzer = 1; + +SELECT 'Matchers without FROM section'; + +DESCRIBE (SELECT *); +SELECT *; + +SELECT '--'; + +DESCRIBE (SELECT COLUMNS(dummy)); +SELECT COLUMNS(dummy); + +SELECT '--'; + +DESCRIBE (SELECT COLUMNS('d')); +SELECT COLUMNS('d'); + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value String +) ENGINE=TinyLog; + +INSERT INTO test_table VALUES (0, 'Value'); + +SELECT 'Unqualified matchers'; + +DESCRIBE (SELECT * FROM test_table); +SELECT * FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT COLUMNS(id) FROM test_table); +SELECT COLUMNS(id) FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT COLUMNS(id), COLUMNS(value) FROM test_table); +SELECT COLUMNS(id), COLUMNS(value) FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT COLUMNS('i'), COLUMNS('v') FROM test_table); +SELECT COLUMNS('i'), COLUMNS('v') FROM test_table; + +SELECT 'Table qualified matchers'; + +DESCRIBE (SELECT test_table.* FROM test_table); +SELECT test_table.* FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT test_table.COLUMNS(id) FROM test_table); +SELECT test_table.COLUMNS(id) FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT test_table.COLUMNS(id), test_table.COLUMNS(value) FROM test_table); +SELECT test_table.COLUMNS(id), test_table.COLUMNS(value) FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT test_table.COLUMNS('i'), test_table.COLUMNS('v') FROM test_table); +SELECT test_table.COLUMNS('i'), test_table.COLUMNS('v') FROM test_table; + +SELECT 'Database and table qualified matchers'; + +DROP DATABASE IF EXISTS 02339_db; +CREATE DATABASE 02339_db; + +DROP TABLE IF EXISTS 02339_db.test_table; +CREATE TABLE 02339_db.test_table +( + id UInt64, + value String +) ENGINE=TinyLog; + +INSERT INTO 02339_db.test_table VALUES (0, 'Value'); + +SELECT '--'; + +DESCRIBE (SELECT 02339_db.test_table.* FROM 02339_db.test_table); +SELECT 02339_db.test_table.* FROM 02339_db.test_table; + +SELECT '--'; + +DESCRIBE (SELECT 02339_db.test_table.COLUMNS(id) FROM 02339_db.test_table); +SELECT 02339_db.test_table.COLUMNS(id) FROM 02339_db.test_table; + +SELECT '--'; + +DESCRIBE (SELECT 02339_db.test_table.COLUMNS(id), 02339_db.test_table.COLUMNS(value) FROM 02339_db.test_table); +SELECT 02339_db.test_table.COLUMNS(id), 02339_db.test_table.COLUMNS(value) FROM 02339_db.test_table; + +SELECT '--'; + +DESCRIBE (SELECT 02339_db.test_table.COLUMNS('i'), 02339_db.test_table.COLUMNS('v') FROM 02339_db.test_table); +SELECT 02339_db.test_table.COLUMNS('i'), 02339_db.test_table.COLUMNS('v') FROM 02339_db.test_table; + +DROP TABLE 02339_db.test_table; +DROP DATABASE 02339_db; + +SELECT 'APPLY transformer'; + +SELECT '--'; + +DESCRIBE (SELECT * APPLY toString FROM test_table); +SELECT * APPLY toString FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT * APPLY (x -> toString(x)) FROM test_table); +SELECT * APPLY (x -> toString(x)) FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT * APPLY (x -> toString(x)) APPLY (x -> length(x)) FROM test_table); +SELECT * APPLY (x -> toString(x)) APPLY (x -> length(x)) FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT * APPLY (x -> toString(x)) APPLY length FROM test_table); +SELECT * APPLY (x -> toString(x)) APPLY length FROM test_table; + +SELECT '--'; +DESCRIBE (SELECT * FROM test_table); +SELECT * FROM test_table; + +SELECT 'EXCEPT transformer'; + +SELECT '--'; + +DESCRIBE (SELECT * EXCEPT (id) FROM test_table); +SELECT * EXCEPT (id) FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT COLUMNS(id, value) EXCEPT (id) FROM test_table); +SELECT COLUMNS(id, value) EXCEPT (id) FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT * EXCEPT (id) APPLY toString FROM test_table); +SELECT * EXCEPT (id) APPLY toString FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT COLUMNS(id, value) EXCEPT (id) APPLY toString FROM test_table); +SELECT COLUMNS(id, value) EXCEPT (id) APPLY toString FROM test_table; + +SELECT 'REPLACE transformer'; + +SELECT '--'; + +DESCRIBE (SELECT * REPLACE (5 AS id) FROM test_table); +SELECT * REPLACE (5 AS id) FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT COLUMNS(id, value) REPLACE (5 AS id) FROM test_table); +SELECT COLUMNS(id, value) REPLACE (5 AS id) FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT * REPLACE (5 AS id, 6 as value) FROM test_table); +SELECT * REPLACE (5 AS id, 6 as value) FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT COLUMNS(id, value) REPLACE (5 AS id, 6 as value) FROM test_table); +SELECT COLUMNS(id, value) REPLACE (5 AS id, 6 as value) FROM test_table; + +SELECT 'Combine EXCEPT, REPLACE, APPLY transformers'; + +SELECT '--'; + +DESCRIBE (SELECT * EXCEPT id REPLACE (5 AS id, 6 as value) APPLY toString FROM test_table); +SELECT * EXCEPT id REPLACE (5 AS id, 6 as value) APPLY toString FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT COLUMNS(id, value) EXCEPT id REPLACE (5 AS id, 6 as value) APPLY toString FROM test_table); +SELECT COLUMNS(id, value) EXCEPT id REPLACE (5 AS id, 6 as value) APPLY toString FROM test_table; diff --git a/parser/testdata/02340_analyzer_functions/ast.json b/parser/testdata/02340_analyzer_functions/ast.json new file mode 100644 index 000000000..0459cbd07 --- /dev/null +++ b/parser/testdata/02340_analyzer_functions/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001431377, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02340_analyzer_functions/metadata.json b/parser/testdata/02340_analyzer_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02340_analyzer_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02340_analyzer_functions/query.sql b/parser/testdata/02340_analyzer_functions/query.sql new file mode 100644 index 000000000..bd0183029 --- /dev/null +++ b/parser/testdata/02340_analyzer_functions/query.sql @@ -0,0 +1,28 @@ +SET enable_analyzer = 1; + +DESCRIBE (SELECT 1 + 1); +SELECT 1 + 1; + +SELECT '--'; + +DESCRIBE (SELECT dummy + dummy); +SELECT dummy + dummy; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value String +) ENGINE=TinyLog; + +INSERT INTO test_table VALUES (0, 'Value'); + +SELECT '--'; + +DESCRIBE (SELECT id + length(value) FROM test_table); +SELECT id + length(value) FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT concat(concat(toString(id), '_'), (value)) FROM test_table); +SELECT concat(concat(toString(id), '_'), (value)) FROM test_table; diff --git a/parser/testdata/02340_union_header/ast.json b/parser/testdata/02340_union_header/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02340_union_header/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02340_union_header/metadata.json b/parser/testdata/02340_union_header/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02340_union_header/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02340_union_header/query.sql b/parser/testdata/02340_union_header/query.sql new file mode 100644 index 000000000..3481d51d6 --- /dev/null +++ b/parser/testdata/02340_union_header/query.sql @@ -0,0 +1,3 @@ +-- { echo } +SELECT a, b, c FROM (SELECT 3 AS a, 2147483647 AS b, 1048575 AS c UNION ALL SELECT -2, NULL, -2) AS js1 ORDER BY a; +SELECT a, b, c, d FROM (SELECT 3 AS a, 2147483647 AS b, 1048575 AS c UNION ALL SELECT -2, NULL, -2) AS js1 ALL LEFT JOIN (SELECT 100 AS a, -9223372036854775808 AS b, NULL AS d UNION ALL SELECT 256, 256, NULL) AS js2 USING (a, b) ORDER BY a DESC NULLS FIRST, '-0.02' ASC, b ASC NULLS FIRST, c DESC NULLS FIRST, 1048575 ASC NULLS LAST, d DESC SETTINGS enable_positional_arguments=0; diff --git a/parser/testdata/02341_analyzer_aliases_basics/ast.json b/parser/testdata/02341_analyzer_aliases_basics/ast.json new file mode 100644 index 000000000..a140c8e61 --- /dev/null +++ b/parser/testdata/02341_analyzer_aliases_basics/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001303981, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02341_analyzer_aliases_basics/metadata.json b/parser/testdata/02341_analyzer_aliases_basics/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02341_analyzer_aliases_basics/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02341_analyzer_aliases_basics/query.sql b/parser/testdata/02341_analyzer_aliases_basics/query.sql new file mode 100644 index 000000000..a6c41e5a9 --- /dev/null +++ b/parser/testdata/02341_analyzer_aliases_basics/query.sql @@ -0,0 +1,52 @@ +SET enable_analyzer = 1; + +SELECT 'Aliases to constants'; + +SELECT 1 as a, a; +SELECT (c + 1) as d, (a + 1) as b, 1 AS a, (b + 1) as c, d; + +WITH 1 as a SELECT a; +WITH a as b SELECT 1 as a, b; + +SELECT 1 AS x, x, x + 1; +SELECT x, x + 1, 1 AS x; +SELECT x, 1 + (2 + (3 AS x)); + +SELECT a AS b, b AS a; -- { serverError CYCLIC_ALIASES, UNKNOWN_IDENTIFIER } + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value String +) ENGINE=TinyLog; + +INSERT INTO test_table VALUES (0, 'Value'); + +SELECT 'Aliases to columns'; + +SELECT id_alias_2, id AS id_alias, id_alias as id_alias_2 FROM test_table; +SELECT id_1, value_1, id as id_1, value as value_1 FROM test_table; + +WITH value_1 as value_2, id_1 as id_2, id AS id_1, value AS value_1 SELECT id_2, value_2 FROM test_table; + +SELECT (id + b) AS id, id as b FROM test_table; -- { serverError CYCLIC_ALIASES, UNKNOWN_IDENTIFIER } +SELECT (1 + b + 1 + id) AS id, b as c, id as b FROM test_table; -- { serverError CYCLIC_ALIASES, UNKNOWN_IDENTIFIER } + +SELECT 'Alias conflict with identifier inside expression'; + +SELECT id AS id FROM test_table; +SELECT (id + 1) AS id FROM test_table; +SELECT (id + 1 + 1 + 1 + id) AS id FROM test_table; + +SELECT 'Alias setting prefer_column_name_to_alias'; + +WITH id AS value SELECT value FROM test_table; + +SET prefer_column_name_to_alias = 1; +WITH id AS value SELECT value FROM test_table; +SET prefer_column_name_to_alias = 0; + +DROP TABLE test_table; + +WITH path('clickhouse.com/a/b/c') AS x SELECT x AS path; diff --git a/parser/testdata/02341_global_join_cte/ast.json b/parser/testdata/02341_global_join_cte/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02341_global_join_cte/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02341_global_join_cte/metadata.json b/parser/testdata/02341_global_join_cte/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02341_global_join_cte/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02341_global_join_cte/query.sql b/parser/testdata/02341_global_join_cte/query.sql new file mode 100644 index 000000000..f6acd822f --- /dev/null +++ b/parser/testdata/02341_global_join_cte/query.sql @@ -0,0 +1,5 @@ +-- { echo } +with rhs as (select * from remote('127.{1,2}', view(select dummy d1, dummy d2 from system.one))) select lhs.d2 from remote('127.{1,2}', view(select dummy d1, dummy d2 from system.one)) lhs global join rhs using (d1) order by rhs.d2 settings enable_analyzer=0; -- { serverError ALIAS_REQUIRED } +with rhs as (select * from remote('127.{1,2}', view(select dummy d1, dummy d2 from system.one))) select lhs.d2 from remote('127.{1,2}', view(select dummy d1, dummy d2 from system.one)) lhs global join rhs using (d1) order by rhs.d2 settings enable_analyzer=1; -- It works with analyzer; rhs is an alias itself. +with rhs as (select * from remote('127.{1,2}', view(select dummy d1, dummy d2 from system.one))) select lhs.d2 from remote('127.{1,2}', view(select dummy d1, dummy d2 from system.one)) lhs global join rhs using (d1) order by rhs.d2 settings joined_subquery_requires_alias=0; +with rhs_ as (select * from remote('127.{1,2}', view(select dummy d1, dummy d2 from system.one))) select lhs.d2 from remote('127.{1,2}', view(select dummy d1, dummy d2 from system.one)) lhs global join rhs_ rhs using (d1) order by rhs.d2 settings joined_subquery_requires_alias=0; diff --git a/parser/testdata/02342_analyzer_compound_types/ast.json b/parser/testdata/02342_analyzer_compound_types/ast.json new file mode 100644 index 000000000..aa77f7441 --- /dev/null +++ b/parser/testdata/02342_analyzer_compound_types/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001296789, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02342_analyzer_compound_types/metadata.json b/parser/testdata/02342_analyzer_compound_types/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02342_analyzer_compound_types/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02342_analyzer_compound_types/query.sql b/parser/testdata/02342_analyzer_compound_types/query.sql new file mode 100644 index 000000000..36617aab2 --- /dev/null +++ b/parser/testdata/02342_analyzer_compound_types/query.sql @@ -0,0 +1,195 @@ +SET enable_analyzer = 1; + +SELECT 'Constant tuple'; + +SELECT cast((1, 'Value'), 'Tuple (id UInt64, value String)') AS value, value.id, value.value; +SELECT cast((1, 'Value'), 'Tuple (id UInt64, value String)') AS value, value.* APPLY toString; +SELECT cast((1, 'Value'), 'Tuple (id UInt64, value String)') AS value, value.COLUMNS(id) APPLY toString; +SELECT cast((1, 'Value'), 'Tuple (id UInt64, value String)') AS value, value.COLUMNS(value) APPLY toString; +SELECT cast((1, 'Value'), 'Tuple (id UInt64, value String)') AS value, value.COLUMNS('i') APPLY toString; +SELECT cast((1, 'Value'), 'Tuple (id UInt64, value String)') AS value, value.COLUMNS('v') APPLY toString; + +SELECT 'Tuple'; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value Tuple(value_0_level_0 Tuple(value_0_level_1 String, value_1_level_1 String), value_1_level_0 String) +) ENGINE=MergeTree ORDER BY id; + +INSERT INTO test_table VALUES (0, (('value_0_level_1', 'value_1_level_1'), 'value_1_level_0')); + +SELECT '--'; + +DESCRIBE (SELECT * FROM test_table); +SELECT * FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT id, value FROM test_table); +SELECT id, value FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT value.value_0_level_0, value.value_1_level_0 FROM test_table); +SELECT value.value_0_level_0, value.value_1_level_0 FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT value AS alias_value, alias_value.value_0_level_0, alias_value.value_1_level_0 FROM test_table); +SELECT value AS alias_value, alias_value.value_0_level_0, alias_value.value_1_level_0 FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT value AS alias_value, alias_value.* FROM test_table); +SELECT value AS alias_value, alias_value.* FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT value AS alias_value, alias_value.* APPLY toString FROM test_table); +SELECT value AS alias_value, alias_value.* APPLY toString FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT value.* FROM test_table); +SELECT value.* FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT value.* APPLY toString FROM test_table); +SELECT value.* APPLY toString FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT value.value_0_level_0.value_0_level_1, value.value_0_level_0.value_1_level_1 FROM test_table); +SELECT value.value_0_level_0.value_0_level_1, value.value_0_level_0.value_1_level_1 FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT value.value_0_level_0 AS alias_value, alias_value.value_0_level_1, alias_value.value_1_level_1 FROM test_table); +SELECT value.value_0_level_0 AS alias_value, alias_value.value_0_level_1, alias_value.value_1_level_1 FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT value.value_0_level_0 AS alias_value, alias_value.* FROM test_table); +SELECT value.value_0_level_0 AS alias_value, alias_value.* FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT value.value_0_level_0 AS alias_value, alias_value.* APPLY toString FROM test_table); +SELECT value.value_0_level_0 AS alias_value, alias_value.* APPLY toString FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT value.value_0_level_0.* FROM test_table); +SELECT value.value_0_level_0.* FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT value.value_0_level_0.* APPLY toString FROM test_table); +SELECT value.value_0_level_0.* APPLY toString FROM test_table; + +DROP TABLE test_table; + +-- SELECT 'Array of tuples'; + +-- DROP TABLE IF EXISTS test_table; +-- CREATE TABLE test_table +-- ( +-- id UInt64, +-- value Array(Tuple(value_0_level_0 Tuple(value_0_level_1 String, value_1_level_1 String), value_1_level_0 String)) +-- ) ENGINE=MergeTree ORDER BY id; + +-- INSERT INTO test_table VALUES (0, [('value_0_level_1', 'value_1_level_1')], ['value_1_level_0']); + +-- DESCRIBE (SELECT * FROM test_table); +-- SELECT * FROM test_table; + +-- SELECT '--'; + +-- DESCRIBE (SELECT value.value_0_level_0, value.value_1_level_0 FROM test_table); +-- SELECT value.value_0_level_0, value.value_1_level_0 FROM test_table; + +-- SELECT '--'; + +-- DESCRIBE (SELECT value.value_0_level_0.value_0_level_1, value.value_0_level_0.value_1_level_1 FROM test_table); +-- SELECT value.value_0_level_0.value_0_level_1, value.value_0_level_0.value_1_level_1 FROM test_table; + +-- SELECT '--'; + +-- DESCRIBE (SELECT value.value_0_level_0 AS alias_value, alias_value.value_0_level_1, alias_value.value_1_level_1 FROM test_table); +-- SELECT value.value_0_level_0 AS alias_value, alias_value.value_0_level_1, alias_value.value_1_level_1 FROM test_table; + +-- SELECT '--'; + +-- DESCRIBE (SELECT value.value_0_level_0 AS alias_value, alias_value.* FROM test_table); +-- SELECT value.value_0_level_0 AS alias_value, alias_value.* FROM test_table; + +-- SELECT '--'; + +-- DESCRIBE (SELECT value.value_0_level_0 AS alias_value, alias_value.* APPLY toString FROM test_table); +-- SELECT value.value_0_level_0 AS alias_value, alias_value.* APPLY toString FROM test_table; + +-- SELECT '--'; + +-- DESCRIBE (SELECT value.value_0_level_0.* FROM test_table); +-- SELECT value.value_0_level_0.* FROM test_table; + +-- SELECT '--'; + +-- DESCRIBE (SELECT value.value_0_level_0.* APPLY toString FROM test_table); +-- SELECT value.value_0_level_0.* APPLY toString FROM test_table; + +-- DROP TABLE test_table; + +SELECT 'Nested'; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value Nested (value_0_level_0 Nested(value_0_level_1 String, value_1_level_1 String), value_1_level_0 String) +) ENGINE=MergeTree ORDER BY id; + +INSERT INTO test_table VALUES (0, [[('value_0_level_1', 'value_1_level_1')]], ['value_1_level_0']); + +DESCRIBE (SELECT * FROM test_table); +SELECT * FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT value.value_0_level_0, value.value_1_level_0 FROM test_table); +SELECT value.value_0_level_0, value.value_1_level_0 FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT value.value_0_level_0.value_0_level_1, value.value_0_level_0.value_1_level_1 FROM test_table); +SELECT value.value_0_level_0.value_0_level_1, value.value_0_level_0.value_1_level_1 FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT value.value_0_level_0 AS value_alias, value_alias.value_0_level_1, value_alias.value_1_level_1 FROM test_table); +SELECT value.value_0_level_0 AS value_alias, value_alias.value_0_level_1, value_alias.value_1_level_1 FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT value.value_0_level_0 AS value_alias, value_alias.* FROM test_table); +SELECT value.value_0_level_0 AS value_alias, value_alias.* FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT value.value_0_level_0 AS value_alias, value_alias.* APPLY toString FROM test_table); +SELECT value.value_0_level_0 AS value_alias, value_alias.* APPLY toString FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT value.value_0_level_0.* FROM test_table); +SELECT value.value_0_level_0.* FROM test_table; + +SELECT '--'; + +DESCRIBE (SELECT value.value_0_level_0.* APPLY toString FROM test_table); +SELECT value.value_0_level_0.* APPLY toString FROM test_table; + +DROP TABLE test_table; diff --git a/parser/testdata/02343_aggregation_pipeline/ast.json b/parser/testdata/02343_aggregation_pipeline/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02343_aggregation_pipeline/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02343_aggregation_pipeline/metadata.json b/parser/testdata/02343_aggregation_pipeline/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02343_aggregation_pipeline/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02343_aggregation_pipeline/query.sql b/parser/testdata/02343_aggregation_pipeline/query.sql new file mode 100644 index 000000000..27c1439c3 --- /dev/null +++ b/parser/testdata/02343_aggregation_pipeline/query.sql @@ -0,0 +1,79 @@ +-- Tags: no-object-storage + +-- produces different pipeline if enabled +set enable_memory_bound_merging_of_aggregation_results = 0; +set merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.0; + +set max_threads = 16; +set prefer_localhost_replica = 1; +set optimize_aggregation_in_order = 0; +set max_block_size = 65505; +set allow_prefetched_read_pool_for_remote_filesystem = 0; +set allow_prefetched_read_pool_for_local_filesystem = 0; + +-- { echoOn } + +explain pipeline select * from (select * from numbers(1e8) group by number) group by number settings max_rows_to_read = 0; +explain pipeline select * from (select * from numbers_mt(1e8) group by number) group by number settings max_rows_to_read = 0; +explain pipeline select * from (select * from numbers_mt(1e8) group by number) order by number settings max_rows_to_read = 0; + +explain pipeline select * from (select * from numbers(1e8) group by number) group by number settings max_rows_to_read = 0, max_threads = 36; +explain pipeline select * from (select * from numbers_mt(1e8) group by number) group by number settings max_rows_to_read = 0, max_threads = 36; +explain pipeline select * from (select * from numbers_mt(1e8) group by number) order by number settings max_rows_to_read = 0, max_threads = 36; + +explain pipeline select * from (select * from numbers(1e8) group by number) group by number settings max_rows_to_read = 0, max_threads = 48; +explain pipeline select * from (select * from numbers_mt(1e8) group by number) group by number settings max_rows_to_read = 0, max_threads = 48; +explain pipeline select * from (select * from numbers_mt(1e8) group by number) order by number settings max_rows_to_read = 0, max_threads = 48; + +explain pipeline select * from (select * from numbers(1e8) group by number) group by number settings max_rows_to_read = 0, max_threads = 49; +explain pipeline select * from (select * from numbers_mt(1e8) group by number) group by number settings max_rows_to_read = 0, max_threads = 49; +explain pipeline select * from (select * from numbers_mt(1e8) group by number) order by number settings max_rows_to_read = 0, max_threads = 49; + +explain pipeline select number from remote('127.0.0.{1,2,3}', system, numbers_mt) group by number settings distributed_aggregation_memory_efficient = 1; + +explain pipeline select number from remote('127.0.0.{1,2,3}', system, numbers_mt) group by number settings distributed_aggregation_memory_efficient = 0; + +-- { echoOff } + +DROP TABLE IF EXISTS proj_agg_02343; + +CREATE TABLE proj_agg_02343 +( + k1 UInt32, + k2 UInt32, + k3 UInt32, + value UInt32, + PROJECTION aaaa + ( + SELECT + k1, + k2, + k3, + sum(value) + GROUP BY k1, k2, k3 + ) +) +ENGINE = MergeTree +ORDER BY tuple(); + +INSERT INTO proj_agg_02343 SELECT 1, number % 2, number % 4, number FROM numbers(100000); +OPTIMIZE TABLE proj_agg_02343 FINAL; + +-- { echoOn } + +explain pipeline SELECT k1, k3, sum(value) v FROM remote('127.0.0.{1,2}', currentDatabase(), proj_agg_02343) GROUP BY k1, k3 SETTINGS distributed_aggregation_memory_efficient = 0; + +explain pipeline SELECT k1, k3, sum(value) v FROM remote('127.0.0.{1,2}', currentDatabase(), proj_agg_02343) GROUP BY k1, k3 SETTINGS distributed_aggregation_memory_efficient = 1; + +-- { echoOff } + +create table t(a UInt64) engine = MergeTree order by (a); +system stop merges t; +create table dist_t as t engine = Distributed(test_cluster_two_shards, currentDatabase(), t, a % 2); +system stop merges dist_t; +insert into dist_t select number from numbers_mt(10); +insert into dist_t select number from numbers_mt(10); + +-- { echoOn } + +explain pipeline select a from remote('127.0.0.{1,2}', currentDatabase(), dist_t) group by a settings max_threads = 2, distributed_aggregation_memory_efficient = 1; diff --git a/parser/testdata/02343_analyzer_column_transformers_strict/ast.json b/parser/testdata/02343_analyzer_column_transformers_strict/ast.json new file mode 100644 index 000000000..d56fdbfaf --- /dev/null +++ b/parser/testdata/02343_analyzer_column_transformers_strict/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001358913, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02343_analyzer_column_transformers_strict/metadata.json b/parser/testdata/02343_analyzer_column_transformers_strict/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02343_analyzer_column_transformers_strict/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02343_analyzer_column_transformers_strict/query.sql b/parser/testdata/02343_analyzer_column_transformers_strict/query.sql new file mode 100644 index 000000000..b55cb85c0 --- /dev/null +++ b/parser/testdata/02343_analyzer_column_transformers_strict/query.sql @@ -0,0 +1,18 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value String +) ENGINE=TinyLog; + +INSERT INTO test_table VALUES (0, 'Value'); + +SELECT * EXCEPT (id) FROM test_table; +SELECT * EXCEPT STRICT (id, value1) FROM test_table; -- { serverError BAD_ARGUMENTS } + +SELECT * REPLACE STRICT (1 AS id, 2 AS value) FROM test_table; +SELECT * REPLACE STRICT (1 AS id, 2 AS value_1) FROM test_table; -- { serverError BAD_ARGUMENTS } + +DROP TABLE IF EXISTS test_table; diff --git a/parser/testdata/02343_analyzer_lambdas/ast.json b/parser/testdata/02343_analyzer_lambdas/ast.json new file mode 100644 index 000000000..896798cb2 --- /dev/null +++ b/parser/testdata/02343_analyzer_lambdas/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001347913, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02343_analyzer_lambdas/metadata.json b/parser/testdata/02343_analyzer_lambdas/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02343_analyzer_lambdas/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02343_analyzer_lambdas/query.sql b/parser/testdata/02343_analyzer_lambdas/query.sql new file mode 100644 index 000000000..07f382700 --- /dev/null +++ b/parser/testdata/02343_analyzer_lambdas/query.sql @@ -0,0 +1,103 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value String +) ENGINE=TinyLog; + +INSERT INTO test_table VALUES (0, 'Value'); + +SELECT 'Standalone lambdas'; + +WITH x -> x + 1 AS lambda SELECT lambda(1); +WITH x -> toString(x) AS lambda SELECT lambda(1), lambda(NULL), lambda([1,2,3]); +WITH x -> toString(x) AS lambda_1, lambda_1 AS lambda_2, lambda_2 AS lambda_3 SELECT lambda_1(1), lambda_2(NULL), lambda_3([1,2,3]); + +WITH x -> x + 1 AS lambda SELECT lambda(id) FROM test_table; +WITH x -> toString(x) AS lambda SELECT lambda(id), lambda(value) FROM test_table; + +SELECT 'Lambda as function parameter'; + +SELECT arrayMap(x -> x + 1, [1,2,3]); +WITH x -> x + 1 AS lambda SELECT arrayMap(lambda, [1,2,3]); +SELECT arrayMap((x -> toString(x)) as lambda, [1,2,3]), arrayMap(lambda, ['1','2','3']); +WITH x -> toString(x) AS lambda_1 SELECT arrayMap(lambda_1 AS lambda_2, [1,2,3]), arrayMap(lambda_2, ['1', '2', '3']); + +SELECT arrayMap(x -> id, [1,2,3]) FROM test_table; +SELECT arrayMap(x -> x + id, [1,2,3]) FROM test_table; +SELECT arrayMap((x -> concat(concat(toString(x), '_'), toString(id))) as lambda, [1,2,3]) FROM test_table; + +SELECT 'Lambda compound argument'; + +DROP TABLE IF EXISTS test_table_tuple; +CREATE TABLE test_table_tuple +( + id UInt64, + value Tuple(value_0_level_0 String, value_1_level_0 String) +) ENGINE=TinyLog; + +INSERT INTO test_table_tuple VALUES (0, ('value_0_level_0', 'value_1_level_0')); + +WITH x -> concat(concat(toString(x.id), '_'), x.value) AS lambda SELECT cast((1, 'Value'), 'Tuple (id UInt64, value String)') AS value, lambda(value); +WITH x -> concat(concat(x.value_0_level_0, '_'), x.value_1_level_0) AS lambda SELECT lambda(value) FROM test_table_tuple; + +SELECT 'Lambda matcher'; + +WITH x -> * AS lambda SELECT lambda(1); +WITH x -> * AS lambda SELECT lambda(1) FROM test_table; + +WITH cast(tuple(1), 'Tuple (value UInt64)') AS compound_value SELECT arrayMap(x -> compound_value.*, [1,2,3]); +WITH cast(tuple(1, 1), 'Tuple (value_1 UInt64, value_2 UInt64)') AS compound_value SELECT arrayMap(x -> compound_value.*, [1,2,3]); -- { serverError UNSUPPORTED_METHOD } +WITH cast(tuple(1, 1), 'Tuple (value_1 UInt64, value_2 UInt64)') AS compound_value SELECT arrayMap(x -> plus(compound_value.*), [1,2,3]); + +WITH cast(tuple(1), 'Tuple (value UInt64)') AS compound_value SELECT id, test_table.* APPLY x -> compound_value.* FROM test_table; +WITH cast(tuple(1, 1), 'Tuple (value_1 UInt64, value_2 UInt64)') AS compound_value SELECT id, test_table.* APPLY x -> compound_value.* FROM test_table; -- { serverError UNSUPPORTED_METHOD } +WITH cast(tuple(1, 1), 'Tuple (value_1 UInt64, value_2 UInt64)') AS compound_value SELECT id, test_table.* APPLY x -> plus(compound_value.*) FROM test_table; + +SELECT 'Lambda untuple'; + +WITH x -> untuple(x) AS lambda SELECT cast((1, 'Value'), 'Tuple (id UInt64, value String)') AS value, lambda(value); + +SELECT 'Lambda carrying'; + +WITH (functor, x) -> functor(x) AS lambda, x -> x + 1 AS functor_1, x -> toString(x) AS functor_2 SELECT lambda(functor_1, 1), lambda(functor_2, 1); +WITH (functor, x) -> functor(x) AS lambda, x -> x + 1 AS functor_1, x -> toString(x) AS functor_2 SELECT lambda(functor_1, id), lambda(functor_2, id) FROM test_table; + + +SELECT 'Lambda legacy syntax'; + +SELECT arrayMap(lambda(tuple(x), x + 1), [1, 2, 3]); + +WITH 222 AS lambda +SELECT arrayMap(lambda(tuple(x), x + 1), [1, 2, 3]); + +SELECT arrayMap(lambda((x,), x + 1), [1, 2, 3]); + +SELECT arraySort(lambda((x, y), y), ['world', 'hello'], [2, 1]); + +WITH 222 AS lambda +SELECT arrayMap(lambda((x, ), x + 1), [1, 2, 3]); + +WITH x -> x + 1 AS lambda +SELECT arrayMap(lambda(tuple(x), x + 1), [1, 2, 3]), lambda(1); + +-- lambda(tuple(x), x + 1) parsed as lambda definion but not as call of lambda defined in WITH +WITH (x, y) -> y AS lambda +SELECT arrayMap(lambda(tuple(x), x + 1), [1, 2, 3]), lambda(tuple(x), x + 1), 1 AS x; -- { serverError BAD_ARGUMENTS } + +WITH (x, y) -> y AS lambda2 +SELECT arrayMap(lambda(tuple(x), x + 1), [1, 2, 3]), lambda2(tuple(x), x + 1), 1 AS x; + + +DROP TABLE test_table_tuple; +DROP TABLE test_table; + +WITH x -> (lambda(x) + 1) AS lambda +SELECT lambda(1); -- {serverError UNSUPPORTED_METHOD } + +WITH + x -> (lambda1(x) + 1) AS lambda, + lambda AS lambda1 +SELECT lambda(1); -- {serverError UNSUPPORTED_METHOD } diff --git a/parser/testdata/02343_analyzer_lambdas_issue_28083/ast.json b/parser/testdata/02343_analyzer_lambdas_issue_28083/ast.json new file mode 100644 index 000000000..447f057d8 --- /dev/null +++ b/parser/testdata/02343_analyzer_lambdas_issue_28083/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001205032, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02343_analyzer_lambdas_issue_28083/metadata.json b/parser/testdata/02343_analyzer_lambdas_issue_28083/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02343_analyzer_lambdas_issue_28083/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02343_analyzer_lambdas_issue_28083/query.sql b/parser/testdata/02343_analyzer_lambdas_issue_28083/query.sql new file mode 100644 index 000000000..dff0d7f82 --- /dev/null +++ b/parser/testdata/02343_analyzer_lambdas_issue_28083/query.sql @@ -0,0 +1,17 @@ +SET enable_analyzer = 1; + +select so, + r +from + (select [('y',0),('n',1)] as cg, + if( arrayMap( x -> x.1, cg ) != ['y', 'n'], 'y', 'n') as so, + arrayFilter( x -> x.1 = so , cg) as r + ); + +select + r +from + (select [('y',0),('n',1)] as cg, + if( arrayMap( x -> x.1, cg ) != ['y', 'n'], 'y', 'n') as so, + arrayFilter( x -> x.1 = so , cg) as r + ); diff --git a/parser/testdata/02343_analyzer_lambdas_issue_36677/ast.json b/parser/testdata/02343_analyzer_lambdas_issue_36677/ast.json new file mode 100644 index 000000000..2b44cfe0e --- /dev/null +++ b/parser/testdata/02343_analyzer_lambdas_issue_36677/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00109909, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02343_analyzer_lambdas_issue_36677/metadata.json b/parser/testdata/02343_analyzer_lambdas_issue_36677/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02343_analyzer_lambdas_issue_36677/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02343_analyzer_lambdas_issue_36677/query.sql b/parser/testdata/02343_analyzer_lambdas_issue_36677/query.sql new file mode 100644 index 000000000..da0b4e8ef --- /dev/null +++ b/parser/testdata/02343_analyzer_lambdas_issue_36677/query.sql @@ -0,0 +1,14 @@ +SET enable_analyzer = 1; + +SELECT + arraySum(x -> ((x.1) / ((x.2) * (x.2))), arrayZip(mag, magerr)) / arraySum(x -> (1. / (x * x)), magerr) AS weightedmeanmag, + arraySum(x -> ((((x.1) - weightedmeanmag) * ((x.1) - weightedmeanmag)) / ((x.2) * (x.2))), arrayZip(mag, magerr)) AS chi2, + [1, 2, 3, 4] AS mag, + [0.1, 0.2, 0.1, 0.2] AS magerr; + +SELECT + arraySum(x -> ((x.1) / ((x.2) * (x.2))), arrayZip(mag, magerr)) / arraySum(x -> (1. / (x * x)), magerr) AS weightedmeanmag, + arraySum(x -> ((((x.1) - weightedmeanmag) * ((x.1) - weightedmeanmag)) / ((x.2) * (x.2))), arrayZip(mag, magerr)) AS chi2, + [1, 2, 3, 4] AS mag, + [0.1, 0.2, 0.1, 0.2] AS magerr +WHERE isFinite(chi2) diff --git a/parser/testdata/02343_create_empty_as_select/ast.json b/parser/testdata/02343_create_empty_as_select/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02343_create_empty_as_select/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02343_create_empty_as_select/metadata.json b/parser/testdata/02343_create_empty_as_select/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02343_create_empty_as_select/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02343_create_empty_as_select/query.sql b/parser/testdata/02343_create_empty_as_select/query.sql new file mode 100644 index 000000000..54f383b55 --- /dev/null +++ b/parser/testdata/02343_create_empty_as_select/query.sql @@ -0,0 +1,18 @@ + +drop table if exists t; +drop table if exists mv; + +create table t engine=Memory empty; -- { clientError SYNTAX_ERROR } +create table t engine=Memory empty as; -- { clientError SYNTAX_ERROR } +create table t engine=Memory as; -- { clientError SYNTAX_ERROR } +create table t engine=Memory empty as select 1; + +show create table t; +select count() from t; + +create materialized view mv engine=Memory empty as select 1; +show create mv; +select count() from mv; + +drop table t; +drop table mv; diff --git a/parser/testdata/02343_group_by_use_nulls/ast.json b/parser/testdata/02343_group_by_use_nulls/ast.json new file mode 100644 index 000000000..be6226401 --- /dev/null +++ b/parser/testdata/02343_group_by_use_nulls/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001258981, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02343_group_by_use_nulls/metadata.json b/parser/testdata/02343_group_by_use_nulls/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02343_group_by_use_nulls/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02343_group_by_use_nulls/query.sql b/parser/testdata/02343_group_by_use_nulls/query.sql new file mode 100644 index 000000000..691cafe63 --- /dev/null +++ b/parser/testdata/02343_group_by_use_nulls/query.sql @@ -0,0 +1,76 @@ +set optimize_group_by_function_keys=0; +-- { echoOn } +SELECT number, number % 2, sum(number) AS val +FROM numbers(10) +GROUP BY ROLLUP(number, number % 2) +ORDER BY (number, number % 2, val) +SETTINGS group_by_use_nulls=1; + +SELECT number, number % 2, sum(number) AS val +FROM numbers(10) +GROUP BY ROLLUP(number, number % 2) +ORDER BY (number, number % 2, val) +SETTINGS group_by_use_nulls=0; + +SELECT number, number % 2, sum(number) AS val +FROM numbers(10) +GROUP BY CUBE(number, number % 2) +ORDER BY (number, number % 2, val) +SETTINGS group_by_use_nulls=1; + +SELECT number, number % 2, sum(number) AS val +FROM numbers(10) +GROUP BY CUBE(number, number % 2) +ORDER BY (number, number % 2, val) +SETTINGS group_by_use_nulls=0; + +SELECT + number, + number % 2, + sum(number) AS val +FROM numbers(10) +GROUP BY + GROUPING SETS ( + (number), + (number % 2) + ) +ORDER BY (number, number % 2, val) +SETTINGS group_by_use_nulls = 1; + +SELECT + number, + number % 2, + sum(number) AS val +FROM numbers(10) +GROUP BY + GROUPING SETS ( + (number), + (number % 2) + ) +ORDER BY (number, number % 2, val) +SETTINGS group_by_use_nulls = 0; + +SELECT number, number % 2, sum(number) AS val +FROM numbers(10) +GROUP BY ROLLUP(number, number % 2) WITH TOTALS +ORDER BY (number, number % 2, val) +SETTINGS group_by_use_nulls=1; + +SELECT number, number % 2, sum(number) AS val +FROM numbers(10) +GROUP BY CUBE(number, number % 2) WITH TOTALS +ORDER BY (number, number % 2, val) +SETTINGS group_by_use_nulls=1; + +SELECT + number, + number % 2, + sum(number) AS val +FROM numbers(10) +GROUP BY + GROUPING SETS ( + (number), + (number % 2) + ) +ORDER BY 1, tuple(val) +SETTINGS group_by_use_nulls = 1, max_bytes_before_external_sort=10, max_bytes_ratio_before_external_sort=0; diff --git a/parser/testdata/02343_group_by_use_nulls_distributed/ast.json b/parser/testdata/02343_group_by_use_nulls_distributed/ast.json new file mode 100644 index 000000000..c0bd0ad93 --- /dev/null +++ b/parser/testdata/02343_group_by_use_nulls_distributed/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001194574, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02343_group_by_use_nulls_distributed/metadata.json b/parser/testdata/02343_group_by_use_nulls_distributed/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02343_group_by_use_nulls_distributed/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02343_group_by_use_nulls_distributed/query.sql b/parser/testdata/02343_group_by_use_nulls_distributed/query.sql new file mode 100644 index 000000000..964857282 --- /dev/null +++ b/parser/testdata/02343_group_by_use_nulls_distributed/query.sql @@ -0,0 +1,53 @@ +set optimize_group_by_function_keys=0; + +-- { echoOn } +SELECT number, number % 2, sum(number) AS val +FROM remote('127.0.0.{2,3}', numbers(10)) +GROUP BY ROLLUP(number, number % 2) +ORDER BY (number, number % 2, val) +SETTINGS group_by_use_nulls=1; + +SELECT number, number % 2, sum(number) AS val +FROM remote('127.0.0.{2,3}', numbers(10)) +GROUP BY ROLLUP(number, number % 2) +ORDER BY (number, number % 2, val) +SETTINGS group_by_use_nulls=0; + +SELECT number, number % 2, sum(number) AS val +FROM remote('127.0.0.{2,3}', numbers(10)) +GROUP BY CUBE(number, number % 2) +ORDER BY (number, number % 2, val) +SETTINGS group_by_use_nulls=1; + +SELECT number, number % 2, sum(number) AS val +FROM remote('127.0.0.{2,3}', numbers(10)) +GROUP BY CUBE(number, number % 2) +ORDER BY (number, number % 2, val) +SETTINGS group_by_use_nulls=0; + +SELECT + number, + number % 2, + sum(number) AS val +FROM remote('127.0.0.{2,3}', numbers(10)) +GROUP BY + GROUPING SETS ( + (number), + (number % 2) + ) +ORDER BY (number, number % 2, val) +SETTINGS group_by_use_nulls = 1; + +SELECT + number, + number % 2, + sum(number) AS val +FROM remote('127.0.0.{2,3}', numbers(10)) +GROUP BY + GROUPING SETS ( + (number), + (number % 2) + ) +ORDER BY (number, number % 2, val) +SETTINGS group_by_use_nulls = 0; + diff --git a/parser/testdata/02343_read_from_s3_compressed_blocks/ast.json b/parser/testdata/02343_read_from_s3_compressed_blocks/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02343_read_from_s3_compressed_blocks/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02343_read_from_s3_compressed_blocks/metadata.json b/parser/testdata/02343_read_from_s3_compressed_blocks/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02343_read_from_s3_compressed_blocks/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02343_read_from_s3_compressed_blocks/query.sql b/parser/testdata/02343_read_from_s3_compressed_blocks/query.sql new file mode 100644 index 000000000..4049cb7b3 --- /dev/null +++ b/parser/testdata/02343_read_from_s3_compressed_blocks/query.sql @@ -0,0 +1,15 @@ +-- Tags: no-parallel, no-fasttest + +DROP TABLE IF EXISTS t_s3_compressed_blocks; + +CREATE TABLE t_s3_compressed_blocks (id UInt64, s String CODEC(NONE)) +ENGINE = MergeTree ORDER BY id +SETTINGS storage_policy = 's3_cache', +min_bytes_for_wide_part = 0; + +INSERT INTO t_s3_compressed_blocks SELECT number, randomPrintableASCII(128) from numbers(57344); + +SET max_threads = 1; +SELECT count() FROM t_s3_compressed_blocks WHERE NOT ignore(s); + +DROP TABLE t_s3_compressed_blocks; diff --git a/parser/testdata/02344_analyzer_multiple_aliases_for_expression/ast.json b/parser/testdata/02344_analyzer_multiple_aliases_for_expression/ast.json new file mode 100644 index 000000000..73bc095d3 --- /dev/null +++ b/parser/testdata/02344_analyzer_multiple_aliases_for_expression/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001108919, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02344_analyzer_multiple_aliases_for_expression/metadata.json b/parser/testdata/02344_analyzer_multiple_aliases_for_expression/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02344_analyzer_multiple_aliases_for_expression/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02344_analyzer_multiple_aliases_for_expression/query.sql b/parser/testdata/02344_analyzer_multiple_aliases_for_expression/query.sql new file mode 100644 index 000000000..3c7ea4677 --- /dev/null +++ b/parser/testdata/02344_analyzer_multiple_aliases_for_expression/query.sql @@ -0,0 +1,27 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value String +) ENGINE=TinyLog; + +INSERT INTO test_table VALUES (0, 'Value'); + +SELECT 1 AS value, 1 AS value; +SELECT id AS value, id AS value FROM test_table; +WITH x -> x + 1 AS lambda, x -> x + 1 AS lambda SELECT lambda(1); +SELECT (SELECT 1) AS subquery, (SELECT 1) AS subquery; + +SELECT 1 AS value, 2 AS value; -- { serverError MULTIPLE_EXPRESSIONS_FOR_ALIAS } +SELECT plus(1, 1) AS value, 2 AS value; -- { serverError MULTIPLE_EXPRESSIONS_FOR_ALIAS } +SELECT (SELECT 1) AS subquery, 1 AS subquery; -- { serverError MULTIPLE_EXPRESSIONS_FOR_ALIAS } +WITH x -> x + 1 AS lambda, x -> x + 2 AS lambda SELECT lambda(1); -- { serverError MULTIPLE_EXPRESSIONS_FOR_ALIAS } +WITH x -> x + 1 AS lambda SELECT (SELECT 1) AS lambda; -- { serverError MULTIPLE_EXPRESSIONS_FOR_ALIAS } +WITH x -> x + 1 AS lambda SELECT 1 AS lambda; -- { serverError MULTIPLE_EXPRESSIONS_FOR_ALIAS } +SELECT id AS value, value AS value FROM test_table; -- { serverError MULTIPLE_EXPRESSIONS_FOR_ALIAS } +SELECT id AS value_1, value AS value_1 FROM test_table; -- { serverError MULTIPLE_EXPRESSIONS_FOR_ALIAS } +SELECT id AS value, (id + 1) AS value FROM test_table; -- { serverError MULTIPLE_EXPRESSIONS_FOR_ALIAS } + +DROP TABLE test_table; diff --git a/parser/testdata/02344_distinct_limit_distiributed/ast.json b/parser/testdata/02344_distinct_limit_distiributed/ast.json new file mode 100644 index 000000000..0c8517822 --- /dev/null +++ b/parser/testdata/02344_distinct_limit_distiributed/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_distinct_limit (children 1)" + }, + { + "explain": " Identifier t_distinct_limit" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001026304, + "rows_read": 2, + "bytes_read": 84 + } +} diff --git a/parser/testdata/02344_distinct_limit_distiributed/metadata.json b/parser/testdata/02344_distinct_limit_distiributed/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02344_distinct_limit_distiributed/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02344_distinct_limit_distiributed/query.sql b/parser/testdata/02344_distinct_limit_distiributed/query.sql new file mode 100644 index 000000000..c963199e0 --- /dev/null +++ b/parser/testdata/02344_distinct_limit_distiributed/query.sql @@ -0,0 +1,26 @@ +drop table if exists t_distinct_limit; + +create table t_distinct_limit (d Date, id Int64) +engine = MergeTree partition by toYYYYMM(d) order by d SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +set max_threads = 10; + +insert into t_distinct_limit select '2021-12-15', -1 from numbers(1e6); +insert into t_distinct_limit select '2021-12-15', -1 from numbers(1e6); +insert into t_distinct_limit select '2021-12-15', -1 from numbers(1e6); +insert into t_distinct_limit select '2022-12-15', 1 from numbers(1e6); +insert into t_distinct_limit select '2022-12-15', 1 from numbers(1e6); +insert into t_distinct_limit select '2022-12-16', 11 from numbers(1); +insert into t_distinct_limit select '2023-12-16', 12 from numbers(1); +insert into t_distinct_limit select '2023-12-16', 13 from numbers(1); +insert into t_distinct_limit select '2023-12-16', 14 from numbers(1); + +set max_block_size = 1024; + +select id from +( + select distinct id from remote('127.0.0.1,127.0.0.2', currentDatabase(),t_distinct_limit) limit 10 +) +order by id; + +drop table if exists t_distinct_limit; diff --git a/parser/testdata/02344_insert_profile_events_stress/ast.json b/parser/testdata/02344_insert_profile_events_stress/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02344_insert_profile_events_stress/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02344_insert_profile_events_stress/metadata.json b/parser/testdata/02344_insert_profile_events_stress/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02344_insert_profile_events_stress/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02344_insert_profile_events_stress/query.sql b/parser/testdata/02344_insert_profile_events_stress/query.sql new file mode 100644 index 000000000..902e1da54 --- /dev/null +++ b/parser/testdata/02344_insert_profile_events_stress/query.sql @@ -0,0 +1,6 @@ +-- Tags: no-parallel, long, no-debug, no-tsan, no-msan, no-asan +SET max_rows_to_read = 0; + +create table data_02344 (key Int) engine=Null; +-- 3e9 rows is enough to fill the socket buffer and cause INSERT hung. +insert into function remote('127.1', currentDatabase(), data_02344) select number from numbers(3e9) settings prefer_localhost_replica=0; diff --git a/parser/testdata/02345_analyzer_subqueries/ast.json b/parser/testdata/02345_analyzer_subqueries/ast.json new file mode 100644 index 000000000..ff4fb8dda --- /dev/null +++ b/parser/testdata/02345_analyzer_subqueries/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001440634, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02345_analyzer_subqueries/metadata.json b/parser/testdata/02345_analyzer_subqueries/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02345_analyzer_subqueries/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02345_analyzer_subqueries/query.sql b/parser/testdata/02345_analyzer_subqueries/query.sql new file mode 100644 index 000000000..d1ec9b58e --- /dev/null +++ b/parser/testdata/02345_analyzer_subqueries/query.sql @@ -0,0 +1,51 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value String +) ENGINE=TinyLog; + +INSERT INTO test_table VALUES (0, 'Value'); + +SELECT 'Scalar subqueries'; + +SELECT (SELECT 1); +WITH 1 AS a SELECT (SELECT a); + +SELECT (SELECT id FROM test_table); +SELECT (SELECT value FROM test_table); +SELECT (SELECT id, value FROM test_table); + +SELECT 'Subqueries FROM section'; + +SELECT a FROM (SELECT 1 AS a) AS b; +SELECT b.a FROM (SELECT 1 AS a) AS b; + +SELECT a FROM (SELECT 1 AS a) AS b; +SELECT b.a FROM (SELECT 1 AS a) AS b; + +WITH 1 AS global_a SELECT a FROM (SELECT global_a AS a) AS b; +WITH 1 AS global_a SELECT b.a FROM (SELECT global_a AS a) AS b; + +SELECT * FROM (SELECT * FROM (SELECT * FROM test_table)); +SELECT * FROM (SELECT id, value FROM (SELECT * FROM test_table)); + +WITH 1 AS a SELECT (SELECT * FROM (SELECT * FROM (SELECT a + 1))); + +SELECT 'Subqueries CTE'; + +WITH subquery AS (SELECT 1 AS a) SELECT * FROM subquery; +WITH subquery AS (SELECT 1 AS a) SELECT a FROM subquery; +WITH subquery AS (SELECT 1 AS a) SELECT subquery.a FROM subquery; +WITH subquery AS (SELECT 1 AS a) SELECT subquery.* FROM subquery; +WITH subquery AS (SELECT 1 AS a) SELECT subquery.* APPLY toString FROM subquery; +WITH subquery AS (SELECT 1 AS a) SELECT subquery_alias.a FROM subquery AS subquery_alias; +WITH subquery AS (SELECT 1 AS a) SELECT subquery_alias.* FROM subquery AS subquery_alias; +WITH subquery AS (SELECT 1 AS a) SELECT subquery_alias.* APPLY toString FROM subquery AS subquery_alias; + +WITH subquery_1 AS (SELECT 1 AS a), subquery_2 AS (SELECT 1 + subquery_1.a FROM subquery_1) SELECT * FROM subquery_2; +WITH subquery_1 AS (SELECT 1 AS a), subquery_2 AS (SELECT (1 + subquery_1.a) AS a FROM subquery_1) SELECT subquery_2.a FROM subquery_2; + +DROP TABLE test_table; diff --git a/parser/testdata/02345_create_table_allow_trailing_comma/ast.json b/parser/testdata/02345_create_table_allow_trailing_comma/ast.json new file mode 100644 index 000000000..ea51c96f5 --- /dev/null +++ b/parser/testdata/02345_create_table_allow_trailing_comma/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery trailing_comma_1 (children 1)" + }, + { + "explain": " Identifier trailing_comma_1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.0010915, + "rows_read": 2, + "bytes_read": 84 + } +} diff --git a/parser/testdata/02345_create_table_allow_trailing_comma/metadata.json b/parser/testdata/02345_create_table_allow_trailing_comma/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02345_create_table_allow_trailing_comma/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02345_create_table_allow_trailing_comma/query.sql b/parser/testdata/02345_create_table_allow_trailing_comma/query.sql new file mode 100644 index 000000000..54a0d47bd --- /dev/null +++ b/parser/testdata/02345_create_table_allow_trailing_comma/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS trailing_comma_1 SYNC; +CREATE TABLE trailing_comma_1 (id INT NOT NULL DEFAULT 1,) ENGINE=MergeTree() ORDER BY tuple(); +DESCRIBE TABLE trailing_comma_1; +DROP TABLE trailing_comma_1; + +DROP TABLE IF EXISTS trailing_comma_2 SYNC; +CREATE TABLE trailing_comma_2 (id INT DEFAULT 1,) ENGINE=MergeTree() ORDER BY tuple(); +DESCRIBE TABLE trailing_comma_2; +DROP TABLE trailing_comma_2; + +DROP TABLE IF EXISTS trailing_comma_3 SYNC; +CREATE TABLE trailing_comma_3 (x UInt8, y UInt8,) ENGINE=MergeTree() ORDER BY tuple(); +DESCRIBE TABLE trailing_comma_3; +DROP TABLE trailing_comma_3; diff --git a/parser/testdata/02345_implicit_transaction/ast.json b/parser/testdata/02345_implicit_transaction/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02345_implicit_transaction/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02345_implicit_transaction/metadata.json b/parser/testdata/02345_implicit_transaction/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02345_implicit_transaction/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02345_implicit_transaction/query.sql b/parser/testdata/02345_implicit_transaction/query.sql new file mode 100644 index 000000000..7c206b298 --- /dev/null +++ b/parser/testdata/02345_implicit_transaction/query.sql @@ -0,0 +1,110 @@ +-- Tags: no-ordinary-database, no-fasttest, no-encrypted-storage +DROP TABLE IF EXISTS landing_to_target; +DROP TABLE IF EXISTS target; +DROP TABLE IF EXISTS landing; + +CREATE TABLE landing (n Int64) engine=MergeTree order by n; +CREATE TABLE target (n Int64) engine=MergeTree order by n; +CREATE MATERIALIZED VIEW landing_to_target TO target AS + SELECT n + throwIf(n == 3333) AS n + FROM landing; + +-- There is no gurantee what is inserted in case if exception +-- That initial check is meaninglessness +-- We only sure that if the internal mv's exception is ignored then all the data is inserted to the table landing +INSERT INTO landing SELECT * FROM numbers(10000) SETTINGS materialized_views_ignore_errors=1; +SELECT 'no_transaction_landing', count() = 10000 FROM landing; +SELECT 'no_transaction_target', count() < 10000 FROM target; + +TRUNCATE TABLE landing; +TRUNCATE TABLE target; + + +BEGIN TRANSACTION; +INSERT INTO landing SELECT * FROM numbers(10000); -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO } +ROLLBACK; +SELECT 'after_transaction_landing', count() FROM landing; +SELECT 'after_transaction_target', count() FROM target; + +-- Same but using implicit_transaction +INSERT INTO landing SETTINGS implicit_transaction=True SELECT * FROM numbers(10000); -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO } +SELECT 'after_implicit_txn_in_query_settings_landing', count() FROM landing; +SELECT 'after_implicit_txn_in_query_settings_target', count() FROM target; + +-- Same but using implicit_transaction in a session +SET implicit_transaction=True; +INSERT INTO landing SELECT * FROM numbers(10000); -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO } +SET implicit_transaction=False; +SELECT 'after_implicit_txn_in_session_landing', count() FROM landing; +SELECT 'after_implicit_txn_in_session_target', count() FROM target; + +-- Reading from incompatible sources with implicit_transaction works the same way as with normal transactions: +-- Currently reading from system tables inside a transaction is Not implemented: +SELECT name, value, changed FROM system.settings where name = 'implicit_transaction' SETTINGS implicit_transaction=True; -- { serverError NOT_IMPLEMENTED } + + +-- Verify that you don't have to manually close transactions with implicit_transaction +SET implicit_transaction=True; +SELECT throwIf(number == 0) FROM numbers(100); -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO } +SELECT throwIf(number == 0) FROM numbers(100); -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO } +SELECT throwIf(number == 0) FROM numbers(100); -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO } +SELECT throwIf(number == 0) FROM numbers(100); -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO } +SET implicit_transaction=False; + +-- implicit_transaction is ignored when inside a transaction (no recursive transaction error) +BEGIN TRANSACTION; +SELECT 'inside_txn_and_implicit', 1 SETTINGS implicit_transaction=True; +SELECT throwIf(number == 0) FROM numbers(100) SETTINGS implicit_transaction=True; -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO } +ROLLBACK; + +SELECT 'inside_txn_and_implicit', 1 SETTINGS implicit_transaction=True; + +-- You can work with transactions even if `implicit_transaction=True` is set +SET implicit_transaction=True; +BEGIN TRANSACTION; +INSERT INTO target SELECT * FROM numbers(10000); +SELECT 'in_transaction', count() FROM target; +ROLLBACK; +SELECT 'out_transaction', count() FROM target; +SET implicit_transaction=False; + + +-- Verify that the transaction_id column is populated correctly +SELECT 'Looking_at_transaction_id_True' FORMAT Null SETTINGS implicit_transaction=1; +-- Verify that the transaction_id column is NOT populated without transaction +SELECT 'Looking_at_transaction_id_False' FORMAT Null SETTINGS implicit_transaction=0; +SYSTEM FLUSH LOGS query_log; + +SELECT + 'implicit_True', + count() as all, + transaction_id = (0,0,'00000000-0000-0000-0000-000000000000') as is_empty +FROM system.query_log +WHERE + current_database = currentDatabase() AND + event_date >= yesterday() AND + query LIKE '-- Verify that the transaction_id column is populated correctly%' +GROUP BY transaction_id +FORMAT JSONEachRow; + +SELECT + 'implicit_False', + count() as all, + transaction_id = (0,0,'00000000-0000-0000-0000-000000000000') as is_empty +FROM system.query_log +WHERE + current_database = currentDatabase() AND + event_date >= yesterday() AND + query LIKE '-- Verify that the transaction_id column is NOT populated without transaction%' +GROUP BY transaction_id +FORMAT JSONEachRow; + +SET implicit_transaction=1; +SET throw_on_unsupported_query_inside_transaction=1; +SELECT * FROM system.one; +SELECT * FROM cluster('test_cluster_interserver_secret', system, one); -- { serverError NOT_IMPLEMENTED } +SELECT * FROM cluster('test_cluster_two_shards', system, one); -- { serverError NOT_IMPLEMENTED } +SET throw_on_unsupported_query_inside_transaction=0; +-- there's not session in the interserver mode +SELECT * FROM cluster('test_cluster_interserver_secret', system, one) FORMAT Null; -- { serverError INVALID_TRANSACTION } +SELECT * FROM cluster('test_cluster_two_shards', system, one); diff --git a/parser/testdata/02345_partial_sort_transform_optimization/ast.json b/parser/testdata/02345_partial_sort_transform_optimization/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02345_partial_sort_transform_optimization/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02345_partial_sort_transform_optimization/metadata.json b/parser/testdata/02345_partial_sort_transform_optimization/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02345_partial_sort_transform_optimization/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02345_partial_sort_transform_optimization/query.sql b/parser/testdata/02345_partial_sort_transform_optimization/query.sql new file mode 100644 index 000000000..35ec675b7 --- /dev/null +++ b/parser/testdata/02345_partial_sort_transform_optimization/query.sql @@ -0,0 +1,4 @@ + +-- Regression for PartialSortingTransform optimization that requires at least 1500 rows. +SELECT * FROM (SELECT * FROM (SELECT 0 a, toNullable(number) b, toString(number) c FROM numbers(1e6)) ORDER BY a DESC, b DESC, c LIMIT 1500) limit 10; +SELECT number FROM (SELECT number, 1 AS k FROM numbers(100000) ORDER BY k ASC, number DESC LIMIT 1025, 1023) LIMIT 5; diff --git a/parser/testdata/02346_additional_filters/ast.json b/parser/testdata/02346_additional_filters/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02346_additional_filters/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02346_additional_filters/metadata.json b/parser/testdata/02346_additional_filters/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02346_additional_filters/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02346_additional_filters/query.sql b/parser/testdata/02346_additional_filters/query.sql new file mode 100644 index 000000000..f4b0d08d3 --- /dev/null +++ b/parser/testdata/02346_additional_filters/query.sql @@ -0,0 +1,104 @@ +-- Tags: distributed +drop table if exists table_1; +drop table if exists table_2; +drop table if exists v_numbers; +drop table if exists mv_table; + +SET max_rows_to_read = 0; + +create table table_1 (x UInt32, y String) engine = MergeTree order by x; +insert into table_1 values (1, 'a'), (2, 'bb'), (3, 'ccc'), (4, 'dddd'); + +CREATE TABLE distr_table (x UInt32, y String) ENGINE = Distributed(test_cluster_two_shards, currentDatabase(), 'table_1'); + +-- { echoOn } + +select * from table_1; +select * from table_1 settings additional_table_filters={'table_1' : 'x != 2'}; +select * from table_1 settings additional_table_filters={'table_1' : 'x != 2 and x != 3'}; +select x from table_1 settings additional_table_filters={'table_1' : 'x != 2'}; +select y from table_1 settings additional_table_filters={'table_1' : 'x != 2'}; +select * from table_1 where x != 3 settings additional_table_filters={'table_1' : 'x != 2'}; +select * from table_1 prewhere x != 4 settings additional_table_filters={'table_1' : 'x != 2'}; +select * from table_1 prewhere x != 4 where x != 3 settings additional_table_filters={'table_1' : 'x != 2'}; +select x from table_1 where x != 3 settings additional_table_filters={'table_1' : 'x != 2'}; +select x from table_1 prewhere x != 4 settings additional_table_filters={'table_1' : 'x != 2'}; +select x from table_1 prewhere x != 4 where x != 3 settings additional_table_filters={'table_1' : 'x != 2'}; +select y from table_1 where x != 3 settings additional_table_filters={'table_1' : 'x != 2'}; +select y from table_1 prewhere x != 4 settings additional_table_filters={'table_1' : 'x != 2'}; +select y from table_1 prewhere x != 4 where x != 3 settings additional_table_filters={'table_1' : 'x != 2'}; +select x from table_1 where x != 2 settings additional_table_filters={'table_1' : 'x != 2'}; +select x from table_1 prewhere x != 2 settings additional_table_filters={'table_1' : 'x != 2'}; +select x from table_1 prewhere x != 2 where x != 2 settings additional_table_filters={'table_1' : 'x != 2'}; + +select * from remote('127.0.0.{1,2}', system.one) settings additional_table_filters={'system.one' : 'dummy = 0'}; +select * from remote('127.0.0.{1,2}', system.one) settings additional_table_filters={'system.one' : 'dummy != 0'}; + +select * from distr_table settings additional_table_filters={'distr_table' : 'x = 2'}; +select * from distr_table settings additional_table_filters={'distr_table' : 'x != 2 and x != 3'}; + +select * from system.numbers limit 5; +select * from system.numbers as t limit 5 settings additional_table_filters={'t' : 'number % 2 != 0'}; +select * from system.numbers limit 5 settings additional_table_filters={'system.numbers' : 'number != 3'}; +select * from system.numbers limit 5 settings additional_table_filters={'system.numbers':'number != 3','table_1':'x!=2'}; +select * from (select number from system.numbers limit 5 union all select x from table_1) order by number settings additional_table_filters={'system.numbers':'number != 3','table_1':'x!=2'}; +select number, x, y from (select number from system.numbers limit 5) f any left join (select x, y from table_1) s on f.number = s.x order by all settings additional_table_filters={'system.numbers' : 'number != 3', 'table_1' : 'x != 2'}; +select b + 1 as c from (select a + 1 as b from (select x + 1 as a from table_1)) settings additional_table_filters={'table_1' : 'x != 2 and x != 3'}; +select dummy from system.one SETTINGS additional_table_filters = {'system.one':'dummy in (select number from numbers(2))'}; + +-- { echoOff } + +create view v_numbers as select number + 1 as x from system.numbers limit 5; + +-- { echoOn } +select * from v_numbers; +select * from v_numbers settings additional_table_filters={'system.numbers' : 'number != 3'}; +select * from v_numbers settings additional_table_filters={'v_numbers' : 'x != 3'}; +select * from v_numbers settings additional_table_filters={'system.numbers' : 'number != 3', 'v_numbers' : 'x != 3'}; + +-- { echoOff } + +create table table_2 (x UInt32, y String) engine = MergeTree order by x; +insert into table_2 values (4, 'dddd'), (5, 'eeeee'), (6, 'ffffff'), (7, 'ggggggg'); + +create materialized view mv_table to table_2 (x UInt32, y String) as select * from table_1; + +-- additional filter for inner tables for Materialized View does not work because it does not create internal interpreter +-- probably it is expected +-- { echoOn } +select * from mv_table; +select * from mv_table settings additional_table_filters={'mv_table' : 'x != 5'}; +select * from mv_table settings additional_table_filters={'table_1' : 'x != 5'}; +select * from mv_table settings additional_table_filters={'table_2' : 'x != 5'}; + +-- { echoOff } + +create table m_table (x UInt32, y String) engine = Merge(currentDatabase(), '^table_'); + +-- additional filter for inner tables for Merge does not work because it does not create internal interpreter +-- probably it is expected +-- { echoOn } +select * from m_table order by x; +select * from m_table order by x settings additional_table_filters={'table_1' : 'x != 2'}; +select * from m_table order by x settings additional_table_filters={'table_2' : 'x != 5'}; +select * from m_table order by x settings additional_table_filters={'table_1' : 'x != 2', 'table_2' : 'x != 5'}; +select * from m_table order by x settings additional_table_filters={'table_1' : 'x != 4'}; +select * from m_table order by x settings additional_table_filters={'table_2' : 'x != 4'}; +select * from m_table order by x settings additional_table_filters={'table_1' : 'x != 4', 'table_2' : 'x != 4'}; +select * from m_table order by x settings additional_table_filters={'m_table' : 'x != 4'}; +select * from m_table order by x settings additional_table_filters={'m_table' : 'x != 4', 'table_1' : 'x != 2', 'table_2' : 'x != 5'}; + +-- additional_result_filter + +select * from table_1 settings additional_result_filter='x != 2'; +select *, x != 2 from table_1 settings additional_result_filter='x != 2'; +select * from table_1 where x != 1 settings additional_result_filter='x != 2'; +select * from table_1 where x != 1 settings additional_result_filter='x != 2 and x != 3'; +select * from table_1 prewhere x != 3 where x != 1 settings additional_result_filter='x != 2'; + +select * from table_1 limit 3 settings additional_result_filter='x != 2'; + +select x + 1 from table_1 settings additional_result_filter='`plus(x, 1)` != 2'; + +select * from (select x + 1 as a, y from table_1 union all select x as a, y from table_1) order by a, y settings additional_result_filter='a = 3'; +select * from (select x + 1 as a, y from table_1 union all select x as a, y from table_1) order by a, y settings additional_result_filter='a != 3'; diff --git a/parser/testdata/02346_additional_filters_distr/ast.json b/parser/testdata/02346_additional_filters_distr/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02346_additional_filters_distr/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02346_additional_filters_distr/metadata.json b/parser/testdata/02346_additional_filters_distr/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02346_additional_filters_distr/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02346_additional_filters_distr/query.sql b/parser/testdata/02346_additional_filters_distr/query.sql new file mode 100644 index 000000000..bc9c1715c --- /dev/null +++ b/parser/testdata/02346_additional_filters_distr/query.sql @@ -0,0 +1,20 @@ +-- Tags: no-parallel, distributed + +create database if not exists shard_0; +create database if not exists shard_1; + +drop table if exists dist_02346; +drop table if exists shard_0.data_02346; +drop table if exists shard_1.data_02346; + +create table shard_0.data_02346 (x UInt32, y String) engine = MergeTree order by x settings index_granularity = 2; +insert into shard_0.data_02346 values (1, 'a'), (2, 'bb'), (3, 'ccc'), (4, 'dddd'); + +create table shard_1.data_02346 (x UInt32, y String) engine = MergeTree order by x settings index_granularity = 2; +insert into shard_1.data_02346 values (5, 'a'), (6, 'bb'), (7, 'ccc'), (8, 'dddd'); + +create table dist_02346 (x UInt32, y String) engine=Distributed('test_cluster_two_shards_different_databases', /* default_database= */ '', data_02346); + +set max_rows_to_read=4; + +select * from dist_02346 order by x settings additional_table_filters={'dist_02346' : 'x > 3 and x < 7'}; diff --git a/parser/testdata/02346_additional_filters_index/ast.json b/parser/testdata/02346_additional_filters_index/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02346_additional_filters_index/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02346_additional_filters_index/metadata.json b/parser/testdata/02346_additional_filters_index/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02346_additional_filters_index/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02346_additional_filters_index/query.sql b/parser/testdata/02346_additional_filters_index/query.sql new file mode 100644 index 000000000..cf5a4b2d8 --- /dev/null +++ b/parser/testdata/02346_additional_filters_index/query.sql @@ -0,0 +1,23 @@ +-- Tags: distributed +create table table_1 (x UInt32, y String, INDEX a (length(y)) TYPE minmax GRANULARITY 1) engine = MergeTree order by x settings index_granularity = 2; +insert into table_1 values (1, 'a'), (2, 'bb'), (3, 'ccc'), (4, 'dddd'); + +CREATE TABLE distr_table (x UInt32, y String) ENGINE = Distributed(test_cluster_two_shards, currentDatabase(), 'table_1'); + +-- { echoOn } +set max_rows_to_read = 2; + +select * from table_1 order by x settings additional_table_filters={'table_1' : 'x > 3'}; +select * from table_1 order by x settings additional_table_filters={'table_1' : 'x < 3'}; + +select * from table_1 order by x settings additional_table_filters={'table_1' : 'length(y) >= 3'}; +select * from table_1 order by x settings additional_table_filters={'table_1' : 'length(y) < 3'}; + +set max_rows_to_read = 4; + +select * from distr_table order by x settings additional_table_filters={'distr_table' : 'x > 3'}; +select * from distr_table order by x settings additional_table_filters={'distr_table' : 'x < 3'}; + +select * from distr_table order by x settings additional_table_filters={'distr_table' : 'length(y) > 3'}; +select * from distr_table order by x settings additional_table_filters={'distr_table' : 'length(y) < 3'}; + diff --git a/parser/testdata/02346_exclude_materialize_skip_indexes_on_insert/ast.json b/parser/testdata/02346_exclude_materialize_skip_indexes_on_insert/ast.json new file mode 100644 index 000000000..11c8d8eac --- /dev/null +++ b/parser/testdata/02346_exclude_materialize_skip_indexes_on_insert/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001206943, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02346_exclude_materialize_skip_indexes_on_insert/metadata.json b/parser/testdata/02346_exclude_materialize_skip_indexes_on_insert/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02346_exclude_materialize_skip_indexes_on_insert/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02346_exclude_materialize_skip_indexes_on_insert/query.sql b/parser/testdata/02346_exclude_materialize_skip_indexes_on_insert/query.sql new file mode 100644 index 000000000..1b6963a86 --- /dev/null +++ b/parser/testdata/02346_exclude_materialize_skip_indexes_on_insert/query.sql @@ -0,0 +1,128 @@ +SET parallel_replicas_local_plan = 1; -- this setting may skip index analysis when false +SET use_skip_indexes_on_data_read = 0; +SET mutations_sync = 2; -- disable asynchronous mutations + +CREATE TABLE tab +( + a UInt64, + b UInt64, + INDEX idx_a a TYPE minmax, + INDEX `id,x_b` b TYPE set(3) +) +ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity = 4; + +INSERT INTO tab SELECT number, number / 50 FROM numbers(100) +SETTINGS exclude_materialize_skip_indexes_on_insert='!@#$^#$&#$$%$,,.,3.45,45.'; -- { serverError CANNOT_PARSE_TEXT } + +CREATE VIEW explain_indexes +AS SELECT trimLeft(explain) AS explain +FROM +( + SELECT * + FROM viewExplain('EXPLAIN', 'indexes = 1', ( + SELECT count() + FROM tab + WHERE (a >= 90) AND (a < 110) AND (b = 2) + )) +) +WHERE (explain LIKE '%Name%') OR (explain LIKE '%Description%') OR (explain LIKE '%Parts%') OR (explain LIKE '%Granules%') OR (explain LIKE '%Range%'); + +SET exclude_materialize_skip_indexes_on_insert='idx_a'; + +SYSTEM STOP MERGES tab; + +INSERT INTO tab SELECT number, number / 50 FROM numbers(100); +INSERT INTO tab SELECT number, number / 50 FROM numbers(100, 100); + +SELECT 'idx_a is excluded, so it should perform no filtering, while `id,x_b` should perform filtering since it is included'; +SELECT * FROM explain_indexes; + +SYSTEM START MERGES tab; +OPTIMIZE TABLE tab FINAL; + +SELECT ''; +SELECT 'After START MERGES and OPTIMIZE TABLE both indexes should participate in filtering'; +SELECT * FROM explain_indexes; + +TRUNCATE TABLE tab; + +INSERT INTO tab SELECT number, number / 50 FROM numbers(100); +INSERT INTO tab SELECT number, number / 50 FROM numbers(100, 100); + +SET mutations_sync = 2; + +ALTER TABLE tab MATERIALIZE INDEX idx_a; +ALTER TABLE tab MATERIALIZE INDEX `id,x_b`; + +SELECT ''; +SELECT 'MATERIALIZE INDEX should also cause both indexes to participate in filtering despite exclude setting'; +SELECT * FROM explain_indexes; + +TRUNCATE TABLE tab; + +SYSTEM STOP MERGES tab; + +INSERT INTO tab SELECT number, number / 50 FROM numbers(100) SETTINGS exclude_materialize_skip_indexes_on_insert='`id,x_b`'; +INSERT INTO tab SELECT number, number / 50 FROM numbers(100, 100) SETTINGS exclude_materialize_skip_indexes_on_insert='`id,x_b`'; + +SELECT ''; +SELECT 'query-level session setting should override session setting at file level, so id,x_b should not be updated'; +SELECT * FROM explain_indexes; + +SYSTEM FLUSH LOGS query_log; + +SELECT ''; +SELECT 'Count query log entries containing index updates on INSERT'; +SELECT count() +FROM system.query_log +WHERE current_database = currentDatabase() + AND query LIKE 'INSERT INTO tab SELECT%' + AND type = 'QueryFinish'; + +TRUNCATE TABLE tab; + +SET exclude_materialize_skip_indexes_on_insert='idx_a, `id,x_b`'; + +SYSTEM STOP MERGES tab; + +INSERT INTO tab SELECT number, number / 50 FROM numbers(100); +INSERT INTO tab SELECT number, number / 50 FROM numbers(100, 100); + +SELECT ''; +SELECT 'Both indexes are excluded, so neither should participate in filtering'; +SELECT * FROM explain_indexes; + +SYSTEM START MERGES tab; +OPTIMIZE TABLE tab FINAL; + +SELECT ''; +SELECT 'After START MERGES and OPTIMIZE TABLE both indexes should participate in filtering'; +SELECT * FROM explain_indexes; + +TRUNCATE TABLE tab; + +INSERT INTO tab SELECT number, number / 50 FROM numbers(100); +INSERT INTO tab SELECT number, number / 50 FROM numbers(100, 100); + +SET mutations_sync = 2; + +ALTER TABLE tab MATERIALIZE INDEX idx_a; +ALTER TABLE tab MATERIALIZE INDEX `id,x_b`; + +SELECT ''; +SELECT 'MATERIALIZE INDEX should also cause both indexes to participate in filtering despite exclude setting'; +SELECT * FROM explain_indexes; + +DROP TABLE tab; +DROP VIEW explain_indexes; + +SYSTEM FLUSH LOGS query_log; + +SELECT ''; +SELECT 'Count query log entries containing index updates on INSERT'; +SELECT count() +FROM system.query_log +WHERE current_database = currentDatabase() + AND query LIKE 'INSERT INTO tab SELECT%' + AND type = 'QueryFinish'; + diff --git a/parser/testdata/02346_exclude_materialize_skip_indexes_on_merge/ast.json b/parser/testdata/02346_exclude_materialize_skip_indexes_on_merge/ast.json new file mode 100644 index 000000000..ff8a3fac3 --- /dev/null +++ b/parser/testdata/02346_exclude_materialize_skip_indexes_on_merge/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001040503, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02346_exclude_materialize_skip_indexes_on_merge/metadata.json b/parser/testdata/02346_exclude_materialize_skip_indexes_on_merge/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02346_exclude_materialize_skip_indexes_on_merge/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02346_exclude_materialize_skip_indexes_on_merge/query.sql b/parser/testdata/02346_exclude_materialize_skip_indexes_on_merge/query.sql new file mode 100644 index 000000000..c3ff41ada --- /dev/null +++ b/parser/testdata/02346_exclude_materialize_skip_indexes_on_merge/query.sql @@ -0,0 +1,73 @@ +SET parallel_replicas_local_plan = 1; -- this setting may skip index analysis when false +SET use_skip_indexes_on_data_read = 0; +SET materialize_skip_indexes_on_insert = 0; +SET mutations_sync = 2; -- disable asynchronous mutations + +DROP TABLE IF EXISTS tab; +CREATE TABLE tab +( + a UInt64, + b UInt64, + INDEX idx_a a TYPE minmax, + INDEX `id,x_b` b TYPE set(3) -- weird but legal idx name just to make sure it works with setting +) +ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity = 4, materialize_skip_indexes_on_merge = 1; + +-- negative test case +ALTER TABLE tab MODIFY SETTING exclude_materialize_skip_indexes_on_merge ='!@#$^#$&#$$%$,,.,3.45,45.'; +INSERT INTO tab SELECT number, number / 50 FROM numbers(100); +OPTIMIZE TABLE tab FINAL; -- { serverError CANNOT_PARSE_TEXT } +TRUNCATE TABLE tab; + +CREATE VIEW explain_indexes +AS SELECT trimLeft(explain) AS explain +FROM +( + SELECT * + FROM viewExplain('EXPLAIN', 'indexes = 1', ( + SELECT count() + FROM tab + WHERE (a >= 90) AND (a < 110) AND (b = 2) + )) +) +WHERE (explain LIKE '%Name%') OR (explain LIKE '%Description%') OR (explain LIKE '%Parts%') OR (explain LIKE '%Granules%') OR (explain LIKE '%Range%'); + +SYSTEM STOP MERGES tab; +ALTER TABLE tab MODIFY SETTING exclude_materialize_skip_indexes_on_merge = 'idx_a'; + +INSERT INTO tab SELECT number, number / 50 FROM numbers(100); +INSERT INTO tab SELECT number, number / 50 FROM numbers(100, 100); + +SELECT ''; +SELECT 'idx_a is excluded, but we have not allowed a merge, so no filtering should occur'; +SELECT * FROM explain_indexes; + +SYSTEM START MERGES tab; +OPTIMIZE TABLE tab FINAL; + +SELECT ''; +SELECT 'After START MERGES and OPTIMIZE FINAL only idx_b should participate in filtering as idx_a is excluded'; +SELECT * FROM explain_indexes; + +ALTER TABLE tab MATERIALIZE INDEX idx_a; + +SELECT ''; +SELECT 'After explicit MATERIALIZE INDEX idx_a should also be materialized'; +SELECT * FROM explain_indexes; + +TRUNCATE TABLE tab; + +ALTER TABLE tab MODIFY SETTING exclude_materialize_skip_indexes_on_merge = 'idx_a, `id,x_b`'; + +INSERT INTO tab SELECT number, number / 50 FROM numbers(100); +INSERT INTO tab SELECT number, number / 50 FROM numbers(100, 100); + +OPTIMIZE TABLE tab FINAL; + +SELECT ''; +SELECT 'Both indexes are excluded, so neither should participate in filtering'; +SELECT * FROM explain_indexes; + +DROP TABLE tab; +DROP VIEW explain_indexes; + diff --git a/parser/testdata/02346_non_negative_derivative/ast.json b/parser/testdata/02346_non_negative_derivative/ast.json new file mode 100644 index 000000000..962176939 --- /dev/null +++ b/parser/testdata/02346_non_negative_derivative/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery nnd (children 1)" + }, + { + "explain": " Identifier nnd" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001286591, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/02346_non_negative_derivative/metadata.json b/parser/testdata/02346_non_negative_derivative/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02346_non_negative_derivative/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02346_non_negative_derivative/query.sql b/parser/testdata/02346_non_negative_derivative/query.sql new file mode 100644 index 000000000..ab648f2ee --- /dev/null +++ b/parser/testdata/02346_non_negative_derivative/query.sql @@ -0,0 +1,65 @@ +DROP TABLE IF EXISTS nnd; + +CREATE TABLE nnd +( + id Int8, ts DateTime64(3, 'UTC'), metric Float64 +) +ENGINE=MergeTree() +ORDER BY id; + +INSERT INTO nnd VALUES (1, toDateTime64('1979-12-12 21:21:21.123456788', 9, 'UTC'), 1.1), (2, toDateTime64('1979-12-12 21:21:21.123456789', 9, 'UTC'), 2.34), (3, toDateTime64('1979-12-12 21:21:21.127', 3, 'UTC'), 3.7); +INSERT INTO nnd VALUES (4, toDateTime64('1979-12-12 21:21:21.129', 3, 'UTC'), 2.1), (5, toDateTime('1979-12-12 21:21:22', 'UTC'), 1.3345), (6, toDateTime('1979-12-12 21:21:23', 'UTC'), 1.54), (7, toDateTime('1979-12-12 21:21:23', 'UTC'), 1.54); + +OPTIMIZE TABLE nnd; + +-- shall work for precise intervals +-- INTERVAL 1 SECOND shall be default +SELECT ( + SELECT + ts, + metric, + nonNegativeDerivative(metric, ts) OVER (PARTITION BY metric ORDER BY ts, metric ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS deriv + FROM nnd + LIMIT 5, 1 + ) = ( + SELECT + ts, + metric, + nonNegativeDerivative(metric, ts, toIntervalSecond(1)) OVER (PARTITION BY metric ORDER BY ts ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS deriv + FROM nnd + LIMIT 5, 1 + ); +SELECT ts, metric, nonNegativeDerivative(metric, ts) OVER (PARTITION BY id>3 ORDER BY ts, metric ASC Rows BETWEEN 1 PRECEDING AND 1 FOLLOWING) AS deriv FROM nnd; +-- Nanosecond +SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 3 NANOSECOND) OVER (PARTITION BY id>3 ORDER BY ts, metric ASC Rows BETWEEN 2 PRECEDING AND 2 FOLLOWING) AS deriv FROM nnd; +-- Microsecond +SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 4 MICROSECOND) OVER (PARTITION BY id>3 ORDER BY ts, metric ASC Rows BETWEEN 1 PRECEDING AND 1 FOLLOWING) AS deriv FROM nnd; +-- Millisecond +SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 5 MILLISECOND) OVER (PARTITION BY id>3 ORDER BY ts, metric ASC Rows BETWEEN 1 PRECEDING AND 1 FOLLOWING) AS deriv FROM nnd; +-- Second +SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 6 SECOND) OVER (PARTITION BY id>3 ORDER BY ts, metric ASC Rows BETWEEN 1 PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd; +-- Minute +SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 7 MINUTE) OVER (PARTITION BY id>3 ORDER BY ts, metric ASC Rows BETWEEN UNBOUNDED PRECEDING AND 2 FOLLOWING) AS deriv FROM nnd; +-- Hour +SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 8 HOUR) OVER (PARTITION BY id>3 ORDER BY ts, metric ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd; +-- Day +SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 9 DAY) OVER (PARTITION BY id>3 ORDER BY ts, metric ASC Rows BETWEEN 3 PRECEDING AND 3 FOLLOWING) AS deriv FROM nnd; +-- Week +SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 10 WEEK) OVER (PARTITION BY id>3 ORDER BY ts, metric ASC Rows BETWEEN 1 PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd; + +-- shall not work for month, quarter, year (intervals with floating number of seconds) +-- Month +SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 11 MONTH) OVER (PARTITION BY metric ORDER BY ts, metric ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +-- Quarter +SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 12 QUARTER) OVER (PARTITION BY metric ORDER BY ts, metric ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +-- Year +SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 13 YEAR) OVER (PARTITION BY metric ORDER BY ts, metric ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +-- test against wrong arguments/types +SELECT ts, metric, nonNegativeDerivative(metric, 1, INTERVAL 3 NANOSECOND) OVER (PARTITION BY metric ORDER BY ts, metric ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd; -- { serverError BAD_ARGUMENTS } +SELECT ts, metric, nonNegativeDerivative('string not datetime', ts, INTERVAL 3 NANOSECOND) OVER (PARTITION BY metric ORDER BY ts, metric ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd; -- { serverError BAD_ARGUMENTS } +SELECT ts, metric, nonNegativeDerivative(metric, ts, INTERVAL 3 NANOSECOND, id) OVER (PARTITION BY metric ORDER BY ts, metric ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd; -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT ts, metric, nonNegativeDerivative(metric) OVER (PARTITION BY metric ORDER BY ts, metric ASC Rows BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS deriv FROM nnd; -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +-- cleanup +DROP TABLE IF EXISTS nnd; diff --git a/parser/testdata/02346_position_countsubstrings_zero_byte/ast.json b/parser/testdata/02346_position_countsubstrings_zero_byte/ast.json new file mode 100644 index 000000000..e34d8880c --- /dev/null +++ b/parser/testdata/02346_position_countsubstrings_zero_byte/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tab (children 1)" + }, + { + "explain": " Identifier tab" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001195266, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/02346_position_countsubstrings_zero_byte/metadata.json b/parser/testdata/02346_position_countsubstrings_zero_byte/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02346_position_countsubstrings_zero_byte/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02346_position_countsubstrings_zero_byte/query.sql b/parser/testdata/02346_position_countsubstrings_zero_byte/query.sql new file mode 100644 index 000000000..6208baf41 --- /dev/null +++ b/parser/testdata/02346_position_countsubstrings_zero_byte/query.sql @@ -0,0 +1,24 @@ +drop table if exists tab; + +create table tab (id UInt32, haystack String, pattern String) engine = MergeTree() order by id; +insert into tab values (1, 'aaaxxxaa\0xxx', 'x'); + +select countSubstrings('aaaxxxaa\0xxx', pattern) from tab where id = 1; +select countSubstringsCaseInsensitive('aaaxxxaa\0xxx', pattern) from tab where id = 1; +select countSubstringsCaseInsensitiveUTF8('aaaxxxaa\0xxx', pattern) from tab where id = 1; + +select countSubstrings(haystack, pattern) from tab where id = 1; +select countSubstringsCaseInsensitive(haystack, pattern) from tab where id = 1; +select countSubstringsCaseInsensitiveUTF8(haystack, pattern) from tab where id = 1; + +insert into tab values (2, 'aaaaa\0x', 'x'); + +select position('aaaaa\0x', pattern) from tab where id = 2; +select positionCaseInsensitive('aaaaa\0x', pattern) from tab where id = 2; +select positionCaseInsensitiveUTF8('aaaaa\0x', pattern) from tab where id = 2; + +select position(haystack, pattern) from tab where id = 2; +select positionCaseInsensitive(haystack, pattern) from tab where id = 2; +select positionCaseInsensitiveUTF8(haystack, pattern) from tab where id = 2; + +drop table if exists tab; diff --git a/parser/testdata/02346_text_index_array_support/ast.json b/parser/testdata/02346_text_index_array_support/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02346_text_index_array_support/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02346_text_index_array_support/metadata.json b/parser/testdata/02346_text_index_array_support/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02346_text_index_array_support/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02346_text_index_array_support/query.sql b/parser/testdata/02346_text_index_array_support/query.sql new file mode 100644 index 000000000..b7f109b57 --- /dev/null +++ b/parser/testdata/02346_text_index_array_support/query.sql @@ -0,0 +1,217 @@ +-- Tags: no-parallel-replicas + +-- Tests that text indexes can be build on and used with Array columns. + +SET enable_analyzer = 1; +SET allow_experimental_full_text_index = 1; + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab +( + id UInt32, + arr Array(String), + arr_fixed Array(FixedString(3)), + INDEX array_idx(arr) TYPE text(tokenizer = 'splitByNonAlpha') GRANULARITY 1, + INDEX array_fixed_idx(arr_fixed) TYPE text(tokenizer = 'splitByNonAlpha') GRANULARITY 1, +) +ENGINE = MergeTree() +ORDER BY (id) +SETTINGS index_granularity = 1; + +INSERT INTO tab SELECT number, ['abc'], ['abc'] FROM numbers(512); +INSERT INTO tab SELECT number, ['foo'], ['foo'] FROM numbers(512); +INSERT INTO tab SELECT number, ['bar'], ['bar'] FROM numbers(512); +INSERT INTO tab SELECT number, ['foo', 'bar'], ['foo', 'bar'] FROM numbers(512); +INSERT INTO tab SELECT number, ['foo', 'baz'], ['foo', 'baz'] FROM numbers(512); +INSERT INTO tab SELECT number, ['bar', 'baz'], ['bar', 'baz'] FROM numbers(512); + +SELECT 'has support'; + +SELECT '-- with String'; +SELECT count() FROM tab WHERE has(arr, 'foo'); +SELECT count() FROM tab WHERE has(arr, 'bar'); +SELECT count() FROM tab WHERE has(arr, 'baz'); +SELECT count() FROM tab WHERE has(arr, 'def'); + +SELECT '-- with FixedString'; +SELECT count() FROM tab WHERE has(arr_fixed, toFixedString('foo', 3)); +SELECT count() FROM tab WHERE has(arr_fixed, toFixedString('bar', 3)); +SELECT count() FROM tab WHERE has(arr_fixed, toFixedString('baz', 3)); +SELECT count() FROM tab WHERE has(arr_fixed, toFixedString('def', 3)); + +SELECT '-- Check that the text index actually gets used (String)'; + +DROP VIEW IF EXISTS explain_index_has; +CREATE VIEW explain_index_has AS ( + SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes = 1 + SELECT count() FROM tab WHERE ( + CASE + WHEN {use_idx_fixed:boolean} = 1 THEN has(arr_fixed, {filter:FixedString(3)}) + ELSE has(arr, {filter:String}) + END + ) + ) + WHERE explain LIKE '%Description:%' OR explain LIKE '%Granules:%' + LIMIT 1, 2 +); + +SELECT '-- -- value exists only in 512 granules'; +SELECT * FROM explain_index_has(use_idx_fixed = 0, filter = 'abc'); + +SELECT '-- -- value exists only in 1024 granules'; +SELECT * FROM explain_index_has(use_idx_fixed = 0, filter = 'baz'); + +SELECT '-- -- value exists only in 1536 granules'; +SELECT * FROM explain_index_has(use_idx_fixed = 0, filter = 'foo'); + +SELECT '-- -- value exists only in 1536 granules'; +SELECT * FROM explain_index_has(use_idx_fixed = 0, filter = 'bar'); + +SELECT '-- -- value does not exist in granules'; +SELECT * FROM explain_index_has(use_idx_fixed = 0, filter = 'def'); + +SELECT '-- Check that the text index actually gets used (FixedString)'; + +SELECT '-- -- value exists only in 512 granules'; +SELECT * FROM explain_index_has(use_idx_fixed = 1, filter = toFixedString('abc', 3)); + +SELECT '-- -- value exists only in 1024 granules'; +SELECT * FROM explain_index_has(use_idx_fixed = 1, filter = toFixedString('baz', 3)); + +SELECT '-- -- value exists only in 1536 granules'; +SELECT * FROM explain_index_has(use_idx_fixed = 1, filter = toFixedString('foo', 3)); + +SELECT '-- -- value exists only in 1536 granules'; +SELECT * FROM explain_index_has(use_idx_fixed = 1, filter = toFixedString('bar', 3)); + +SELECT '-- -- value does not exist in granules'; +SELECT * FROM explain_index_has(use_idx_fixed = 1, filter = toFixedString('def', 3)); + +SELECT 'hasAnyTokens support'; + +SELECT '-- with String'; +SELECT count() FROM tab WHERE hasAnyTokens(arr, 'foo'); +SELECT count() FROM tab WHERE hasAnyTokens(arr, 'bar'); +SELECT count() FROM tab WHERE hasAnyTokens(arr, 'foo bar'); + +SELECT '-- with FixedString'; +SELECT count() FROM tab WHERE hasAnyTokens(arr_fixed, 'foo'); +SELECT count() FROM tab WHERE hasAnyTokens(arr_fixed, 'bar'); +SELECT count() FROM tab WHERE hasAnyTokens(arr_fixed, 'foo bar'); + +SELECT '-- Check that the text index actually gets used (String)'; + +DROP VIEW IF EXISTS explain_index_has_any_tokens; +CREATE VIEW explain_index_has_any_tokens AS ( + SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes = 1 + SELECT count() FROM tab WHERE ( + CASE + WHEN {use_idx_fixed:boolean} = 1 THEN hasAnyTokens(arr_fixed, {filter:String}) + ELSE hasAnyTokens(arr, {filter:String}) + END + ) + ) + WHERE explain LIKE '%Description:%' OR explain LIKE '%Granules:%' + LIMIT 1, 2 +); + +SELECT '-- -- value exists only in 512 granules'; +SELECT * FROM explain_index_has_any_tokens(use_idx_fixed = 0, filter = 'abc'); + +SELECT '-- -- value exists only in 1024 granules'; +SELECT * FROM explain_index_has_any_tokens(use_idx_fixed = 0, filter = 'baz'); + +SELECT '-- -- value exists only in 1536 granules'; +SELECT * FROM explain_index_has_any_tokens(use_idx_fixed = 0, filter = 'foo'); + +SELECT '-- -- value exists only in 2560 granules'; +SELECT * FROM explain_index_has_any_tokens(use_idx_fixed = 0, filter = 'foo bar'); + +SELECT '-- -- value does not exist in granules'; +SELECT * FROM explain_index_has_any_tokens(use_idx_fixed = 0, filter = 'def'); + +SELECT '-- Check that the text index actually gets used (FixedString)'; + +SELECT '-- -- value exists only in 512 granules'; +SELECT * FROM explain_index_has_any_tokens(use_idx_fixed = 1, filter = 'abc'); + +SELECT '-- -- value exists only in 1024 granules'; +SELECT * FROM explain_index_has_any_tokens(use_idx_fixed = 1, filter = 'baz'); + +SELECT '-- -- value exists only in 1536 granules'; +SELECT * FROM explain_index_has_any_tokens(use_idx_fixed = 1, filter = 'foo'); + +SELECT '-- -- value exists only in 2560 granules'; +SELECT * FROM explain_index_has_any_tokens(use_idx_fixed = 1, filter = 'foo bar'); + +SELECT '-- -- value does not exist in granules'; +SELECT * FROM explain_index_has_any_tokens(use_idx_fixed = 1, filter = 'def'); + +SELECT 'hasAllTokens support'; + +SELECT '-- with String'; +SELECT count() FROM tab WHERE hasAllTokens(arr, 'foo'); +SELECT count() FROM tab WHERE hasAllTokens(arr, 'bar'); +SELECT count() FROM tab WHERE hasAllTokens(arr, 'foo bar'); + +SELECT '-- with FixedString'; +SELECT count() FROM tab WHERE hasAllTokens(arr_fixed, 'foo'); +SELECT count() FROM tab WHERE hasAllTokens(arr_fixed, 'bar'); +SELECT count() FROM tab WHERE hasAllTokens(arr_fixed, 'foo bar'); + +SELECT '-- Check that the text index actually gets used (String)'; + +DROP VIEW IF EXISTS explain_index_has_all_tokens; +CREATE VIEW explain_index_has_all_tokens AS ( + SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes = 1 + SELECT count() FROM tab WHERE ( + CASE + WHEN {use_idx_fixed:boolean} = 1 THEN hasAllTokens(arr_fixed, {filter:String}) + ELSE hasAllTokens(arr, {filter:String}) + END + ) + ) + WHERE explain LIKE '%Description:%' OR explain LIKE '%Granules:%' + LIMIT 1, 2 +); + +SELECT '-- -- value exists only in 512 granules'; +SELECT * FROM explain_index_has_all_tokens(use_idx_fixed = 0, filter = 'abc'); + +SELECT '-- -- value exists only in 1024 granules'; +SELECT * FROM explain_index_has_all_tokens(use_idx_fixed = 0, filter = 'baz'); + +SELECT '-- -- value exists only in 1536 granules'; +SELECT * FROM explain_index_has_all_tokens(use_idx_fixed = 0, filter = 'foo'); + +SELECT '-- -- value exists only in 512 granules'; +SELECT * FROM explain_index_has_all_tokens(use_idx_fixed = 0, filter = 'foo bar'); + +SELECT '-- -- value does not exist in granules'; +SELECT * FROM explain_index_has_all_tokens(use_idx_fixed = 0, filter = 'def'); + +SELECT '-- Check that the text index actually gets used (FixedString)'; + +SELECT '-- -- value exists only in 512 granules'; +SELECT * FROM explain_index_has_all_tokens(use_idx_fixed = 1, filter = 'abc'); + +SELECT '-- -- value exists only in 1024 granules'; +SELECT * FROM explain_index_has_all_tokens(use_idx_fixed = 1, filter = 'baz'); + +SELECT '-- -- value exists only in 1536 granules'; +SELECT * FROM explain_index_has_all_tokens(use_idx_fixed = 1, filter = 'foo'); + +SELECT '-- -- value exists only in 512 granules'; +SELECT * FROM explain_index_has_all_tokens(use_idx_fixed = 1, filter = 'foo bar'); + +SELECT '-- -- value does not exist in granules'; +SELECT * FROM explain_index_has_all_tokens(use_idx_fixed = 1, filter = 'def'); + +DROP VIEW explain_index_has; +DROP VIEW explain_index_has_any_tokens; +DROP VIEW explain_index_has_all_tokens; +DROP TABLE tab; diff --git a/parser/testdata/02346_text_index_bug47393/ast.json b/parser/testdata/02346_text_index_bug47393/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02346_text_index_bug47393/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02346_text_index_bug47393/metadata.json b/parser/testdata/02346_text_index_bug47393/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02346_text_index_bug47393/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02346_text_index_bug47393/query.sql b/parser/testdata/02346_text_index_bug47393/query.sql new file mode 100644 index 000000000..004d5715f --- /dev/null +++ b/parser/testdata/02346_text_index_bug47393/query.sql @@ -0,0 +1,27 @@ +-- Test for Bug 47393 + +SET allow_experimental_full_text_index = 1; + +DROP TABLE IF EXISTS tab; +CREATE TABLE tab +( + id UInt64, + str String, + INDEX idx str TYPE text(tokenizer = ngrams(3)) GRANULARITY 1 +) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS min_rows_for_wide_part = 1, min_bytes_for_wide_part = 1; + +INSERT INTO tab (str) VALUES ('I am inverted'); + +SELECT data_version FROM system.parts WHERE database = currentDatabase() AND table = 'tab' AND active = 1; + +-- Update column synchronously +ALTER TABLE tab UPDATE str = 'I am not inverted' WHERE 1 SETTINGS mutations_sync = 1; + +SELECT data_version FROM system.parts WHERE database = currentDatabase() AND table = 'tab' AND active = 1; + +SELECT str FROM tab WHERE str LIKE '%inverted%' SETTINGS force_data_skipping_indices = 'idx'; + +DROP TABLE tab; diff --git a/parser/testdata/02346_text_index_bug54541/ast.json b/parser/testdata/02346_text_index_bug54541/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02346_text_index_bug54541/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02346_text_index_bug54541/metadata.json b/parser/testdata/02346_text_index_bug54541/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02346_text_index_bug54541/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02346_text_index_bug54541/query.sql b/parser/testdata/02346_text_index_bug54541/query.sql new file mode 100644 index 000000000..6d661ca2a --- /dev/null +++ b/parser/testdata/02346_text_index_bug54541/query.sql @@ -0,0 +1,19 @@ +-- Test for AST Fuzzer crash #54541 + +SET allow_experimental_full_text_index = 1; + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab +( + id UInt32, + str String, + INDEX idx str TYPE text(tokenizer = 'splitByNonAlpha') +) +ENGINE = MergeTree +ORDER BY id; + +INSERT INTO tab VALUES (0, 'a'); +SELECT * FROM tab WHERE str == 'b' AND 1.0; + +DROP TABLE IF EXISTS tab; diff --git a/parser/testdata/02346_text_index_bug59039/ast.json b/parser/testdata/02346_text_index_bug59039/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02346_text_index_bug59039/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02346_text_index_bug59039/metadata.json b/parser/testdata/02346_text_index_bug59039/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02346_text_index_bug59039/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02346_text_index_bug59039/query.sql b/parser/testdata/02346_text_index_bug59039/query.sql new file mode 100644 index 000000000..c353c7e7c --- /dev/null +++ b/parser/testdata/02346_text_index_bug59039/query.sql @@ -0,0 +1,20 @@ +-- Test that DROP INDEX removes all index related files. +-- This can't be tested directly but we can at least check that no bad things happen. + +SET allow_experimental_full_text_index = 1; + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab +( + id UInt64, + doc String, + INDEX text_idx doc TYPE text(tokenizer = 'splitByNonAlpha') +) +ENGINE = MergeTree +ORDER BY id +SETTINGS index_granularity = 2, index_granularity_bytes = '10Mi', min_bytes_for_wide_part = 0, min_rows_for_wide_part = 0; + +ALTER TABLE tab DROP INDEX text_idx; + +DROP TABLE tab; diff --git a/parser/testdata/02346_text_index_bug62681/ast.json b/parser/testdata/02346_text_index_bug62681/ast.json new file mode 100644 index 000000000..c2974e17e --- /dev/null +++ b/parser/testdata/02346_text_index_bug62681/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001084648, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02346_text_index_bug62681/metadata.json b/parser/testdata/02346_text_index_bug62681/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02346_text_index_bug62681/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02346_text_index_bug62681/query.sql b/parser/testdata/02346_text_index_bug62681/query.sql new file mode 100644 index 000000000..c467e4fbe --- /dev/null +++ b/parser/testdata/02346_text_index_bug62681/query.sql @@ -0,0 +1,47 @@ +SET allow_experimental_full_text_index = 1; + +DROP TABLE IF EXISTS tab; +CREATE TABLE tab +( + str String, + INDEX text_idx str TYPE text(tokenizer = ngrams(3)) GRANULARITY 1, + INDEX set_idx str TYPE set(10) GRANULARITY 1 +) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS compress_marks = 0; + +INSERT INTO tab (str) VALUES ('I am inverted'); + +OPTIMIZE TABLE tab FINAL; + +-- to double check: `ll -h $(find . -name "*text_idx*")` from build dir +-- sum up .mrk* or .cmrk* files to get marks_bytes +-- sum up .idx files for data_compressed_bytes +-- note that `du` rounds to nearest 4KB so it is not accurate here +-- also note that different runs of db might all show up, only sum up one set +SELECT + database, + table, + name, + type, + type_full, + granularity, + data_compressed_bytes > 100, + data_uncompressed_bytes > 75, + marks_bytes +FROM system.data_skipping_indices WHERE database = currentDatabase() AND type = 'text' FORMAT Vertical; + +-- to double check: `ll -h $(find . -name "*skp_idx*")` from build dir +-- see above notes +SELECT + partition, + name, + secondary_indices_compressed_bytes > 150, + secondary_indices_uncompressed_bytes > 100, + secondary_indices_marks_bytes +FROM system.parts +WHERE database = currentDatabase() AND table = 'tab' AND active = 1 AND partition = 'tuple()' +FORMAT Vertical; + +DROP TABLE tab; diff --git a/parser/testdata/02346_text_index_bug84805/ast.json b/parser/testdata/02346_text_index_bug84805/ast.json new file mode 100644 index 000000000..31011aafa --- /dev/null +++ b/parser/testdata/02346_text_index_bug84805/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001170107, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02346_text_index_bug84805/metadata.json b/parser/testdata/02346_text_index_bug84805/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02346_text_index_bug84805/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02346_text_index_bug84805/query.sql b/parser/testdata/02346_text_index_bug84805/query.sql new file mode 100644 index 000000000..4aa542078 --- /dev/null +++ b/parser/testdata/02346_text_index_bug84805/query.sql @@ -0,0 +1,29 @@ +SET allow_experimental_full_text_index = 1; + +-- Issue 84805: the no-op and ngram tokenizers crash for empty inputs + +SELECT 'Test no_op tokenizer'; + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab ( + str String, + INDEX idx str TYPE text(tokenizer = 'array') ) +ENGINE = MergeTree() +ORDER BY tuple(); + +INSERT INTO TABLE tab (str) VALUES (''); + +DROP TABLE tab; + +SELECT 'Test ngram tokenizer'; + +CREATE TABLE tab ( + str String, + INDEX idx str TYPE text(tokenizer = 'ngrams') ) +ENGINE = MergeTree() +ORDER BY tuple(); + +INSERT INTO TABLE tab (str) VALUES (''); + +DROP TABLE tab; diff --git a/parser/testdata/02346_text_index_bug87887/ast.json b/parser/testdata/02346_text_index_bug87887/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02346_text_index_bug87887/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02346_text_index_bug87887/metadata.json b/parser/testdata/02346_text_index_bug87887/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02346_text_index_bug87887/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02346_text_index_bug87887/query.sql b/parser/testdata/02346_text_index_bug87887/query.sql new file mode 100644 index 000000000..49c808c45 --- /dev/null +++ b/parser/testdata/02346_text_index_bug87887/query.sql @@ -0,0 +1,21 @@ +-- Test for Bugs 87887 and 88119 + +SET allow_experimental_full_text_index = 1; +SET use_skip_indexes_on_data_read = 1; +SET use_skip_indexes = 1; +SET query_plan_direct_read_from_text_index = 1; + +DROP TABLE IF EXISTS tab; +CREATE TABLE tab (c0 LowCardinality(String), INDEX i0 c0 TYPE text(tokenizer = 'splitByNonAlpha')) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO tab(c0) VALUES ('Hello, world!'); + +SELECT 'Test Bug 87887'; +SELECT 'Test hasToken(text, text):', count() FROM tab WHERE hasToken(c0, 'Hello'); +SELECT 'Test hasToken(text, dummy):', count() FROM tab WHERE hasToken(c0, 'Dummy'); + +SELECT 'Test Bug 88119'; +SELECT 'Test hasToken(text, nullable(text)):', count() FROM tab WHERE hasToken(c0, toNullable('Hello')); +SELECT 'Test hasToken(text, nullable(dummy)):', count() FROM tab WHERE hasToken(c0, toNullable('Dummy')); +SELECT 'Test hasToken(text, NULL):', count() FROM tab WHERE hasToken(c0, NULL); + +DROP TABLE tab; diff --git a/parser/testdata/02346_text_index_bug88080/ast.json b/parser/testdata/02346_text_index_bug88080/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02346_text_index_bug88080/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02346_text_index_bug88080/metadata.json b/parser/testdata/02346_text_index_bug88080/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02346_text_index_bug88080/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02346_text_index_bug88080/query.sql b/parser/testdata/02346_text_index_bug88080/query.sql new file mode 100644 index 000000000..e008acde7 --- /dev/null +++ b/parser/testdata/02346_text_index_bug88080/query.sql @@ -0,0 +1,5 @@ +-- Test for issue #88080 + +SET allow_experimental_full_text_index = 1; + +SELECT hasAllTokens('a', '[[(2,1)]]'::Polygon); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } diff --git a/parser/testdata/02346_text_index_bug89605/ast.json b/parser/testdata/02346_text_index_bug89605/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02346_text_index_bug89605/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02346_text_index_bug89605/metadata.json b/parser/testdata/02346_text_index_bug89605/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02346_text_index_bug89605/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02346_text_index_bug89605/query.sql b/parser/testdata/02346_text_index_bug89605/query.sql new file mode 100644 index 000000000..a5d4ce2b2 --- /dev/null +++ b/parser/testdata/02346_text_index_bug89605/query.sql @@ -0,0 +1,51 @@ +-- Test for issue #89605 + +SET allow_experimental_full_text_index = 1; +SET use_skip_indexes_on_data_read = 1; +SET query_plan_direct_read_from_text_index = 0; +SET max_threads = 2; -- make sure it's running multi-threaded + +SELECT 'sparseGrams tokenizer is provided to hasAnyTokens and hasAllTokens functions'; + +DROP TABLE IF EXISTS tab; +CREATE TABLE tab +( + id UInt64, + msg String, + INDEX id_msg msg TYPE text(tokenizer = sparseGrams) +) +ENGINE = MergeTree() +ORDER BY tuple() +SETTINGS index_granularity = 128; + +INSERT INTO tab SELECT number, 'alick' FROM numbers(1024); +INSERT INTO tab SELECT number, 'clickhouse' FROM numbers(1024); +INSERT INTO tab SELECT number, 'clickbench' FROM numbers(1024); +INSERT INTO tab SELECT number, 'blick' FROM numbers(1024); + + +SELECT count() FROM tab WHERE hasAllTokens(msg, sparseGrams('click')); + +SELECT count() FROM tab WHERE hasAnyTokens(msg, sparseGrams('click')); + +DROP TABLE tab; + +SELECT 'sparseGrams tokenizer is provided to the tokens function'; + +DROP TABLE IF EXISTS tab; +CREATE TABLE tab +( + id UInt64, + msg String +) +ENGINE = MergeTree() +ORDER BY tuple(); + +INSERT INTO tab SELECT number, 'alick' FROM numbers(1024); +INSERT INTO tab SELECT number, 'clickhouse' FROM numbers(1024); +INSERT INTO tab SELECT number, 'clickbench' FROM numbers(1024); +INSERT INTO tab SELECT number, 'blick' FROM numbers(1024); + +SELECT sum(length(tokens(msg, 'sparseGrams'))) FROM tab; + +DROP TABLE tab; diff --git a/parser/testdata/02346_text_index_coalescingmergetree/ast.json b/parser/testdata/02346_text_index_coalescingmergetree/ast.json new file mode 100644 index 000000000..4d4c26044 --- /dev/null +++ b/parser/testdata/02346_text_index_coalescingmergetree/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001250058, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02346_text_index_coalescingmergetree/metadata.json b/parser/testdata/02346_text_index_coalescingmergetree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02346_text_index_coalescingmergetree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02346_text_index_coalescingmergetree/query.sql b/parser/testdata/02346_text_index_coalescingmergetree/query.sql new file mode 100644 index 000000000..e1f12c472 --- /dev/null +++ b/parser/testdata/02346_text_index_coalescingmergetree/query.sql @@ -0,0 +1,59 @@ +SET allow_experimental_full_text_index = 1; + +-- Tests text index with the 'CoalescingMergeTree' engine + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab +( + id UInt32, + key String, + value Nullable(String), + INDEX idx_key(key) TYPE text(tokenizer = 'splitByNonAlpha') +) +ENGINE = CoalescingMergeTree() +ORDER BY id; + +SYSTEM STOP MERGES tab; + +INSERT INTO tab VALUES + (1, 'foo', 'foo'), + (2, 'bar', NULL); + +INSERT INTO tab VALUES + (1, 'foo', NULL), + (2, 'bar', 'bar'); + +SELECT 'Take value from the first part'; + +SELECT '-- direct read disabled'; + +SET use_skip_indexes_on_data_read = 0; + +SELECT value FROM tab WHERE hasToken(key, 'foo') ORDER BY value; +SELECT value FROM tab FINAL WHERE hasToken(key, 'foo') ORDER BY value; + +SELECT '-- direct read enabled'; + +SET use_skip_indexes_on_data_read = 1; + +SELECT value FROM tab WHERE hasToken(key, 'foo') ORDER BY value; +SELECT value FROM tab FINAL WHERE hasToken(key, 'foo') ORDER BY value; + +SELECT 'Take value from the second part'; + +SELECT '-- direct read disabled'; + +SET use_skip_indexes_on_data_read = 0; + +SELECT value FROM tab WHERE hasToken(key, 'bar') ORDER BY value; +SELECT value FROM tab FINAL WHERE hasToken(key, 'bar') ORDER BY value; + +SELECT '-- direct read enabled'; + +SET use_skip_indexes_on_data_read = 1; + +SELECT value FROM tab WHERE hasToken(key, 'bar') ORDER BY value; +SELECT value FROM tab FINAL WHERE hasToken(key, 'bar') ORDER BY value; + +DROP TABLE tab; diff --git a/parser/testdata/02346_text_index_collapsingmergetree/ast.json b/parser/testdata/02346_text_index_collapsingmergetree/ast.json new file mode 100644 index 000000000..d673e08fd --- /dev/null +++ b/parser/testdata/02346_text_index_collapsingmergetree/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001303389, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02346_text_index_collapsingmergetree/metadata.json b/parser/testdata/02346_text_index_collapsingmergetree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02346_text_index_collapsingmergetree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02346_text_index_collapsingmergetree/query.sql b/parser/testdata/02346_text_index_collapsingmergetree/query.sql new file mode 100644 index 000000000..3eb519bad --- /dev/null +++ b/parser/testdata/02346_text_index_collapsingmergetree/query.sql @@ -0,0 +1,54 @@ +SET allow_experimental_full_text_index = 1; + +-- Tests text index with the 'CollapsingMergeTree' engine + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab +( + id UInt32, + key String, + value Nullable(String), + sign Int8, + INDEX idx_key(key) TYPE text(tokenizer = 'splitByNonAlpha') +) +ENGINE = CollapsingMergeTree(sign) +ORDER BY id; + +INSERT INTO tab VALUES + (1, 'foo', 'foo', -1), + (2, 'bar', 'bar', -1); + +INSERT INTO tab VALUES + (1, 'foo', 'foo updated', 1), + (2, 'bar', 'bar updated', 1); + +SELECT 'Take values from all parts'; + +SELECT '-- direct read disabled'; + +SET use_skip_indexes_on_data_read = 0; + +SELECT value FROM tab WHERE hasToken(key, 'foo') ORDER BY value; + +SELECT '-- direct read enabled'; + +SET use_skip_indexes_on_data_read = 1; + +SELECT value FROM tab WHERE hasToken(key, 'bar') ORDER BY value; + +SELECT 'Take values from the final part'; + +SELECT '-- direct read disabled'; + +SET use_skip_indexes_on_data_read = 0; + +SELECT value FROM tab FINAL WHERE hasToken(key, 'foo') ORDER BY value; + +SELECT '-- direct read enabled'; + +SET use_skip_indexes_on_data_read = 1; + +SELECT value FROM tab FINAL WHERE hasToken(key, 'bar') ORDER BY value; + +DROP TABLE tab; diff --git a/parser/testdata/02346_text_index_creation/ast.json b/parser/testdata/02346_text_index_creation/ast.json new file mode 100644 index 000000000..87f8f5c58 --- /dev/null +++ b/parser/testdata/02346_text_index_creation/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00144364, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02346_text_index_creation/metadata.json b/parser/testdata/02346_text_index_creation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02346_text_index_creation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02346_text_index_creation/query.sql b/parser/testdata/02346_text_index_creation/query.sql new file mode 100644 index 000000000..ff4707d1d --- /dev/null +++ b/parser/testdata/02346_text_index_creation/query.sql @@ -0,0 +1,674 @@ +SET allow_experimental_full_text_index = 1; +DROP TABLE IF EXISTS tab; + +SELECT 'Must not have no arguments.'; + +CREATE TABLE tab +( + str String, + INDEX idx str TYPE text() -- { serverError INCORRECT_QUERY } +) +ENGINE = MergeTree +ORDER BY tuple(); + +SELECT 'Test single tokenizer argument.'; + +CREATE TABLE tab +( + str String, + INDEX idx str TYPE text(tokenizer = 'splitByNonAlpha') +) +ENGINE = MergeTree +ORDER BY tuple(); +DROP TABLE tab; + +CREATE TABLE tab +( + str String, + INDEX idx str TYPE text(tokenizer = 'ngrams') +) +ENGINE = MergeTree +ORDER BY tuple(); +DROP TABLE tab; + +CREATE TABLE tab +( + str String, + INDEX idx str TYPE text(tokenizer = 'splitByString') +) +ENGINE = MergeTree +ORDER BY tuple(); +DROP TABLE tab; + +CREATE TABLE tab +( + str String, + INDEX idx str TYPE text(tokenizer = 'array') +) +ENGINE = MergeTree +ORDER BY tuple(); +DROP TABLE tab; + +SELECT '-- tokenizer must be splitByNonAlpha, ngrams, splitByString or array.'; + +CREATE TABLE tab +( + str String, + INDEX idx str TYPE text(tokenizer = 'invalid') +) +ENGINE = MergeTree +ORDER BY tuple(); -- { serverError BAD_ARGUMENTS } + +SELECT '-- tokenizer can be identifier or function.'; + +CREATE TABLE tab +( + str String, + INDEX idx str TYPE text(tokenizer = splitByNonAlpha) +) +ENGINE = MergeTree +ORDER BY tuple(); +DROP TABLE tab; + +CREATE TABLE tab +( + str String, + INDEX idx str TYPE text(tokenizer = splitByNonAlpha()) +) +ENGINE = MergeTree +ORDER BY tuple(); +DROP TABLE tab; + +CREATE TABLE tab +( + str String, + INDEX idx str TYPE text(tokenizer = splitByString) +) +ENGINE = MergeTree +ORDER BY tuple(); +DROP TABLE tab; + +CREATE TABLE tab +( + str String, + INDEX idx str TYPE text(tokenizer = splitByString()) +) +ENGINE = MergeTree +ORDER BY tuple(); +DROP TABLE tab; + +CREATE TABLE tab +( + str String, + INDEX idx str TYPE text(tokenizer = ngrams) +) +ENGINE = MergeTree +ORDER BY tuple(); +DROP TABLE tab; + +CREATE TABLE tab +( + str String, + INDEX idx str TYPE text(tokenizer = ngrams()) +) +ENGINE = MergeTree +ORDER BY tuple(); +DROP TABLE tab; + +CREATE TABLE tab +( + str String, + INDEX idx str TYPE text(tokenizer = array) +) +ENGINE = MergeTree +ORDER BY tuple(); +DROP TABLE tab; + +CREATE TABLE tab +( + str String, + INDEX idx str TYPE text(tokenizer = array()) +) +ENGINE = MergeTree +ORDER BY tuple(); +DROP TABLE tab; + +SELECT 'Test ngram size.'; + +CREATE TABLE tab +( + str String, + INDEX idx str TYPE text(tokenizer = ngrams(4)) +) +ENGINE = MergeTree +ORDER BY tuple(); +DROP TABLE tab; + +SELECT '-- ngram size must be between 2 and 8.'; + +CREATE TABLE tab +( + str String, + INDEX idx str TYPE text(tokenizer = ngrams(0)) +) +ENGINE = MergeTree +ORDER BY tuple(); -- { serverError BAD_ARGUMENTS } + +CREATE TABLE tab +( + str String, + INDEX idx str TYPE text(tokenizer = ngrams(9)) +) +ENGINE = MergeTree +ORDER BY tuple(); -- { serverError BAD_ARGUMENTS } + +SELECT 'Test separators argument.'; + +CREATE TABLE tab +( + str String, + INDEX idx str TYPE text(tokenizer = splitByString(['\n', '\\'])) +) +ENGINE = MergeTree +ORDER BY tuple(); +DROP TABLE tab; + +SELECT '-- separators must be array.'; + +CREATE TABLE tab +( + str String, + INDEX idx str TYPE text(tokenizer = splitByString('\n')) +) +ENGINE = MergeTree +ORDER BY tuple(); -- { serverError INCORRECT_QUERY } + +SELECT '-- separators must be an array of strings.'; + +CREATE TABLE tab +( + str String, + INDEX idx str TYPE text(tokenizer = splitByString([1, 2])) +) +ENGINE = MergeTree +ORDER BY tuple(); -- { serverError INCORRECT_QUERY } + +SELECT 'Test bloom_filter_false_positive_rate argument.'; + +SELECT '-- bloom_filter_false_positive_rate must be a double.'; + +CREATE TABLE tab +( + str String, + INDEX idx str TYPE text(tokenizer = 'splitByNonAlpha', bloom_filter_false_positive_rate = 1) +) +ENGINE = MergeTree +ORDER BY tuple(); -- { serverError INCORRECT_QUERY } + +CREATE TABLE tab +( + str String, + INDEX idx str TYPE text(tokenizer = 'splitByNonAlpha', bloom_filter_false_positive_rate = '1024') +) +ENGINE = MergeTree +ORDER BY tuple(); -- { serverError INCORRECT_QUERY } + +CREATE TABLE tab +( + str String, + INDEX idx str TYPE text(tokenizer = 'splitByNonAlpha', bloom_filter_false_positive_rate = 0.5) +) +ENGINE = MergeTree +ORDER BY tuple(); +DROP TABLE tab; + +SELECT '-- bloom_filter_false_positive_rate must be between 0.0 and 1.0.'; + +CREATE TABLE tab +( + str String, + INDEX idx str TYPE text(tokenizer = 'splitByNonAlpha', bloom_filter_false_positive_rate = 0.0) +) +ENGINE = MergeTree +ORDER BY tuple(); -- { serverError INCORRECT_QUERY } + +CREATE TABLE tab +( + str String, + INDEX idx str TYPE text(tokenizer = 'splitByNonAlpha', bloom_filter_false_positive_rate = 1.0) +) +ENGINE = MergeTree +ORDER BY tuple(); -- { serverError INCORRECT_QUERY } + +SELECT 'Test dictionary_block_size argument.'; + +SELECT '-- dictionary_block_size must be an integer.'; + +CREATE TABLE tab +( + str String, + INDEX idx str TYPE text(tokenizer = 'splitByNonAlpha', dictionary_block_size = 1024.0) +) +ENGINE = MergeTree +ORDER BY tuple(); -- { serverError INCORRECT_QUERY } + +CREATE TABLE tab +( + str String, + INDEX idx str TYPE text(tokenizer = 'splitByNonAlpha', dictionary_block_size = '1024') +) +ENGINE = MergeTree +ORDER BY tuple(); -- { serverError INCORRECT_QUERY } + +CREATE TABLE tab +( + str String, + INDEX idx str TYPE text(tokenizer = 'splitByNonAlpha', dictionary_block_size = 1024) +) +ENGINE = MergeTree +ORDER BY tuple(); +DROP TABLE tab; + +SELECT '-- dictionary_block_size must be bigger than 0.'; + +CREATE TABLE tab +( + str String, + INDEX idx str TYPE text(tokenizer = 'splitByNonAlpha', dictionary_block_size = 0) +) +ENGINE = MergeTree +ORDER BY tuple(); -- { serverError INCORRECT_QUERY } + +CREATE TABLE tab +( + str String, + INDEX idx str TYPE text(tokenizer = 'splitByNonAlpha', dictionary_block_size = -1) +) +ENGINE = MergeTree +ORDER BY tuple(); -- { serverError INCORRECT_QUERY } + +SELECT 'Test dictionary_block_frontcoding_compression argument.'; + +SELECT '-- dictionary_block_frontcoding_compression must be an integer.'; + +CREATE TABLE tab +( + str String, + INDEX idx str TYPE text(tokenizer = 'splitByNonAlpha', dictionary_block_frontcoding_compression = 1024.0) +) +ENGINE = MergeTree +ORDER BY tuple(); -- { serverError INCORRECT_QUERY } + +CREATE TABLE tab +( + str String, + INDEX idx str TYPE text(tokenizer = 'splitByNonAlpha', dictionary_block_frontcoding_compression = '1024') +) +ENGINE = MergeTree +ORDER BY tuple(); -- { serverError INCORRECT_QUERY } + +CREATE TABLE tab +( + str String, + INDEX idx str TYPE text(tokenizer = 'splitByNonAlpha', dictionary_block_frontcoding_compression = 1) +) +ENGINE = MergeTree +ORDER BY tuple(); +DROP TABLE tab; + +SELECT '-- dictionary_block_frontcoding_compression must be 0 or 1.'; + +CREATE TABLE tab +( + str String, + INDEX idx str TYPE text(tokenizer = 'splitByNonAlpha', dictionary_block_frontcoding_compression = 2) +) +ENGINE = MergeTree +ORDER BY tuple(); -- { serverError INCORRECT_QUERY } + +CREATE TABLE tab +( + str String, + INDEX idx str TYPE text(tokenizer = 'splitByNonAlpha', dictionary_block_frontcoding_compression = -1) +) +ENGINE = MergeTree +ORDER BY tuple(); -- { serverError INCORRECT_QUERY } + +CREATE TABLE tab +( + str String, + INDEX idx str TYPE text(tokenizer = 'splitByNonAlpha', dictionary_block_frontcoding_compression = 0) +) +ENGINE = MergeTree +ORDER BY tuple(); +DROP TABLE tab; + +SELECT 'Test max_cardinality_for_embedded_postings argument.'; + +SELECT '-- max_cardinality_for_embedded_postings must be an integer.'; + +CREATE TABLE tab +( + str String, + INDEX idx str TYPE text(tokenizer = 'splitByNonAlpha', max_cardinality_for_embedded_postings = 1024.0) +) +ENGINE = MergeTree +ORDER BY tuple(); -- { serverError INCORRECT_QUERY } + +CREATE TABLE tab +( + str String, + INDEX idx str TYPE text(tokenizer = 'splitByNonAlpha', max_cardinality_for_embedded_postings = '1024') +) +ENGINE = MergeTree +ORDER BY tuple(); -- { serverError INCORRECT_QUERY } + +CREATE TABLE tab +( + str String, + INDEX idx str TYPE text(tokenizer = 'splitByNonAlpha', max_cardinality_for_embedded_postings = 1024) +) +ENGINE = MergeTree +ORDER BY tuple(); +DROP TABLE tab; + +SELECT 'Parameters are shuffled.'; + +CREATE TABLE tab +( + str String, + INDEX idx str TYPE text(max_cardinality_for_embedded_postings = 1024, tokenizer = ngrams(4)) +) +ENGINE = MergeTree +ORDER BY tuple(); +DROP TABLE tab; + +SELECT 'Types are incorrect.'; + +CREATE TABLE tab +( + str String, + INDEX idx str TYPE text(tokenizer) +) +ENGINE = MergeTree +ORDER BY tuple(); -- { serverError INCORRECT_QUERY } + +CREATE TABLE tab +( + str String, + INDEX idx str TYPE text(tokenizer = 1) +) +ENGINE = MergeTree +ORDER BY tuple(); -- { serverError INCORRECT_QUERY } + +CREATE TABLE tab +( + str String, + INDEX idx str TYPE text(ngrams) +) +ENGINE = MergeTree +ORDER BY tuple(); -- { serverError INCORRECT_QUERY } + +CREATE TABLE tab +( + str String, + INDEX idx str TYPE text(tokenizer = ngrams('4')) +) +ENGINE = MergeTree +ORDER BY tuple(); -- { serverError INCORRECT_QUERY } + +SELECT 'Same argument appears >1 times.'; + +CREATE TABLE tab +( + str String, + INDEX idx str TYPE text(tokenizer = 'splitByNonAlpha', tokenizer = ngrams(3)) +) +ENGINE = MergeTree +ORDER BY tuple(); -- { serverError INCORRECT_QUERY } + +CREATE TABLE tab +( + str String, + INDEX idx str TYPE text(tokenizer = ngrams(3), tokenizer = ngrams(4)) +) +ENGINE = MergeTree +ORDER BY tuple(); -- { serverError INCORRECT_QUERY } + +CREATE TABLE tab +( + str String, + INDEX idx str TYPE text(tokenizer = 'splitByNonAlpha', bloom_filter_false_positive_rate = 0.5, bloom_filter_false_positive_rate = 0.5) +) +ENGINE = MergeTree +ORDER BY tuple(); -- { serverError INCORRECT_QUERY } + +SELECT 'Non-existing argument.'; + +CREATE TABLE tab +( + str String, + INDEX idx str TYPE text(tokenizer = 'splitByNonAlpha', non_existing_argument = 1024) +) +ENGINE = MergeTree +ORDER BY tuple(); -- { serverError INCORRECT_QUERY } + +SELECT 'Must be created on single column.'; + +CREATE TABLE tab +( + key UInt64, + str1 String, + str2 String, + INDEX idx (str1, str2) TYPE text(tokenizer = 'splitByNonAlpha') +) +ENGINE = MergeTree ORDER BY key; -- { serverError INCORRECT_NUMBER_OF_COLUMNS } + +SELECT 'A column must not have >1 text index'; + +SELECT '-- CREATE TABLE'; + +CREATE TABLE tab( + s String, + INDEX idx_1(s) TYPE text(tokenizer = 'splitByNonAlpha'), + INDEX idx_2(s) TYPE text(tokenizer = ngrams(3)) +) +Engine = MergeTree() +ORDER BY tuple(); -- { serverError BAD_ARGUMENTS } + +SELECT '-- ALTER TABLE'; + +CREATE TABLE tab +( + str String, + INDEX idx_1 (str) TYPE text(tokenizer = 'splitByNonAlpha') +) +ENGINE = MergeTree ORDER BY tuple(); + +ALTER TABLE tab ADD INDEX idx_2(str) TYPE text(tokenizer = ngrams(3)); -- { serverError BAD_ARGUMENTS } + +-- It must still be possible to create a column on the same column with a different expression +ALTER TABLE tab ADD INDEX idx_3(lower(str)) TYPE text(tokenizer = ngrams(3)); + +DROP TABLE tab; + +SELECT 'Must be created on String, FixedString, LowCardinality(String), LowCardinality(FixedString), Array(String) or Array(FixedString) columns.'; + +SELECT '-- Negative tests'; + +CREATE TABLE tab +( + key UInt64, + u64 UInt64, + INDEX idx u64 TYPE text(tokenizer = 'splitByNonAlpha') +) +ENGINE = MergeTree +ORDER BY key; -- { serverError INCORRECT_QUERY } + +CREATE TABLE tab +( + key UInt64, + f32 Float32, + INDEX idx f32 TYPE text(tokenizer = 'splitByNonAlpha') +) +ENGINE = MergeTree +ORDER BY key; -- { serverError INCORRECT_QUERY } + +CREATE TABLE tab +( + key UInt64, + arr Array(UInt64), + INDEX idx arr TYPE text(tokenizer = 'splitByNonAlpha') +) +ENGINE = MergeTree +ORDER BY key; -- { serverError INCORRECT_QUERY } + +CREATE TABLE tab +( + key UInt64, + arr Array(Float32), + INDEX idx arr TYPE text(tokenizer = 'splitByNonAlpha') +) +ENGINE = MergeTree +ORDER BY key; -- { serverError INCORRECT_QUERY } + +CREATE TABLE tab +( + key UInt64, + map Map(UInt64, String), + INDEX idx mapKeys(map) TYPE text(tokenizer = 'splitByNonAlpha') +) +ENGINE = MergeTree +ORDER BY key; -- { serverError INCORRECT_QUERY } + +CREATE TABLE tab +( + key UInt64, + map Map(Float32, String), + INDEX idx mapKeys(map) TYPE text(tokenizer = 'splitByNonAlpha') +) +ENGINE = MergeTree +ORDER BY key; -- { serverError INCORRECT_QUERY } + +CREATE TABLE tab +( + key UInt64, + map Map(String, UInt64), + INDEX idx mapValues(map) TYPE text(tokenizer = 'splitByNonAlpha') +) +ENGINE = MergeTree +ORDER BY key; -- { serverError INCORRECT_QUERY } + +CREATE TABLE tab +( + key UInt64, + map Map(String, Float32), + INDEX idx mapValues(map) TYPE text(tokenizer = 'splitByNonAlpha') +) +ENGINE = MergeTree +ORDER BY key; -- { serverError INCORRECT_QUERY } + +CREATE TABLE tab +( + key UInt64, + n_str Nullable(String), + INDEX idx n_str TYPE text(tokenizer = 'splitByNonAlpha') +) +ENGINE = MergeTree +ORDER BY key; -- { serverError INCORRECT_QUERY } + +SET allow_suspicious_low_cardinality_types = 1; + +CREATE TABLE tab +( + key UInt64, + lc LowCardinality(UInt64), + INDEX idx lc TYPE text(tokenizer = 'splitByNonAlpha') +) +ENGINE = MergeTree +ORDER BY key; -- { serverError INCORRECT_QUERY } + +CREATE TABLE tab +( + key UInt64, + lc LowCardinality(Float32), + INDEX idx lc TYPE text(tokenizer = 'splitByNonAlpha') +) +ENGINE = MergeTree +ORDER BY key; -- { serverError INCORRECT_QUERY } + +SELECT '-- Positive tests'; + +DROP TABLE IF EXISTS tab; +CREATE TABLE tab +( + key UInt64, + str String, + str_fixed FixedString(3), + INDEX idx str TYPE text(tokenizer = 'splitByNonAlpha'), + INDEX idx_fixed str_fixed TYPE text(tokenizer = 'splitByNonAlpha') +) +ENGINE = MergeTree +ORDER BY key; + +INSERT INTO tab VALUES (1, 'foo', toFixedString('foo', 3)), (2, 'bar', toFixedString('bar', 3)), (3, 'baz', toFixedString('baz', 3)); + +SELECT count() FROM tab WHERE str = 'foo' SETTINGS force_data_skipping_indices='idx'; +SELECT count() FROM tab WHERE str_fixed = toFixedString('foo', 3) SETTINGS force_data_skipping_indices='idx_fixed'; + +DROP TABLE IF EXISTS tab; +CREATE TABLE tab +( + key UInt64, + lc LowCardinality(String), + lc_fixed LowCardinality(FixedString(3)), + INDEX idx lc TYPE text(tokenizer = 'splitByNonAlpha'), + INDEX idx_fixed lc_fixed TYPE text(tokenizer = 'splitByNonAlpha') +) +ENGINE = MergeTree +ORDER BY key; + +INSERT INTO tab VALUES (1, 'foo', toFixedString('foo', 3)), (2, 'bar', toFixedString('bar', 3)), (3, 'baz', toFixedString('baz', 3)); + +SELECT count() FROM tab WHERE lc = 'foo' SETTINGS force_data_skipping_indices='idx'; +SELECT count() FROM tab WHERE lc_fixed = toFixedString('foo', 3) SETTINGS force_data_skipping_indices='idx_fixed'; + +DROP TABLE IF EXISTS tab; +CREATE TABLE tab +( + key UInt64, + arr Array(String), + arr_fixed Array(FixedString(3)), + INDEX idx arr TYPE text(tokenizer = 'splitByNonAlpha'), + INDEX idx_fixed arr_fixed TYPE text(tokenizer = 'splitByNonAlpha') +) +ENGINE = MergeTree +ORDER BY key; + +INSERT INTO tab VALUES (1, ['foo'], [toFixedString('foo', 3)]), (2, ['bar'], [toFixedString('bar', 3)]), (3, ['baz'], [toFixedString('baz', 3)]); + +SELECT count() FROM tab WHERE has(arr, 'foo') SETTINGS force_data_skipping_indices='idx'; +SELECT count() FROM tab WHERE has(arr_fixed, toFixedString('foo', 3)) SETTINGS force_data_skipping_indices='idx_fixed'; + +DROP TABLE IF EXISTS tab; +CREATE TABLE tab +( + key UInt64, + map Map(String, String), + map_fixed Map(FixedString(3), FixedString(3)), + INDEX idx_keys mapKeys(map) TYPE text(tokenizer = 'splitByNonAlpha'), + INDEX idx_keys_fixed mapKeys(map_fixed) TYPE text(tokenizer = 'splitByNonAlpha'), + INDEX idx_values mapValues(map) TYPE text(tokenizer = 'splitByNonAlpha'), + INDEX idx_values_fixed mapValues(map_fixed) TYPE text(tokenizer = 'splitByNonAlpha') +) +ENGINE = MergeTree +ORDER BY key; + +INSERT INTO tab VALUES (1, {'foo' : 'foo'}, {'foo' : 'foo'}), (2, {'bar' : 'bar'}, {'bar' : 'bar'}); + +SELECT count() FROM tab WHERE mapContainsKey(map, 'foo') SETTINGS force_data_skipping_indices='idx_keys'; +SELECT count() FROM tab WHERE mapContainsKey(map_fixed, toFixedString('foo', 3)) SETTINGS force_data_skipping_indices='idx_keys_fixed'; +SELECT count() FROM tab WHERE has(mapValues(map), 'foo') SETTINGS force_data_skipping_indices='idx_values'; +SELECT count() FROM tab WHERE has(mapValues(map_fixed), toFixedString('foo', 3)) SETTINGS force_data_skipping_indices='idx_values_fixed'; + +DROP TABLE tab; diff --git a/parser/testdata/02346_text_index_default_granularity/ast.json b/parser/testdata/02346_text_index_default_granularity/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02346_text_index_default_granularity/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02346_text_index_default_granularity/metadata.json b/parser/testdata/02346_text_index_default_granularity/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02346_text_index_default_granularity/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02346_text_index_default_granularity/query.sql b/parser/testdata/02346_text_index_default_granularity/query.sql new file mode 100644 index 000000000..d5374a6b8 --- /dev/null +++ b/parser/testdata/02346_text_index_default_granularity/query.sql @@ -0,0 +1,23 @@ +-- Tags: no-fasttest, no-ordinary-database + +-- Tests that text search indexes use a (non-standard) index granularity of 64 by default. + +SET allow_experimental_full_text_index = 1; + +-- After CREATE TABLE +DROP TABLE IF EXISTS tab; +CREATE TABLE tab(k UInt64, s String, INDEX idx(s) TYPE text(tokenizer = ngrams(2))) ENGINE = MergeTree() ORDER BY k; +SELECT granularity FROM system.data_skipping_indices WHERE database = currentDatabase() AND table = 'tab' AND name = 'idx'; +DROP TABLE tab; + +-- After CREATE + ALTER ADD TABLE +CREATE TABLE tab(k UInt64, s String) ENGINE = MergeTree() ORDER BY k; +ALTER TABLE tab ADD INDEX idx(s) TYPE text(tokenizer = ngrams(2)); +SELECT granularity FROM system.data_skipping_indices WHERE database = currentDatabase() AND table = 'tab' AND name = 'idx'; + +-- After ALTER DROP + ALTER ADD TABLE +ALTER TABLE tab DROP INDEX idx; +ALTER TABLE tab ADD INDEX idx(s) TYPE text(tokenizer = 'splitByNonAlpha'); +SELECT granularity FROM system.data_skipping_indices WHERE database = currentDatabase() AND table = 'tab' AND name = 'idx'; + +DROP TABLE tab; diff --git a/parser/testdata/02346_text_index_detach_attach/ast.json b/parser/testdata/02346_text_index_detach_attach/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02346_text_index_detach_attach/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02346_text_index_detach_attach/metadata.json b/parser/testdata/02346_text_index_detach_attach/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02346_text_index_detach_attach/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02346_text_index_detach_attach/query.sql b/parser/testdata/02346_text_index_detach_attach/query.sql new file mode 100644 index 000000000..016832381 --- /dev/null +++ b/parser/testdata/02346_text_index_detach_attach/query.sql @@ -0,0 +1,17 @@ +-- Test that detaching and attaching parts with a text index works + +SET allow_experimental_full_text_index = 1; + +CREATE TABLE tab +( + key UInt64, + str String, + INDEX inv_idx str TYPE text(tokenizer = 'splitByNonAlpha') GRANULARITY 1 +) +ENGINE = MergeTree +ORDER BY key; + +INSERT INTO tab VALUES (1, 'Hello World'); + +ALTER TABLE tab DETACH PART 'all_1_1_0'; +ALTER TABLE tab ATTACH PART 'all_1_1_0'; diff --git a/parser/testdata/02346_text_index_dictionary_cache/ast.json b/parser/testdata/02346_text_index_dictionary_cache/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02346_text_index_dictionary_cache/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02346_text_index_dictionary_cache/metadata.json b/parser/testdata/02346_text_index_dictionary_cache/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02346_text_index_dictionary_cache/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02346_text_index_dictionary_cache/query.sql b/parser/testdata/02346_text_index_dictionary_cache/query.sql new file mode 100644 index 000000000..23454fd82 --- /dev/null +++ b/parser/testdata/02346_text_index_dictionary_cache/query.sql @@ -0,0 +1,92 @@ +-- Tags: no-parallel, no-parallel-replicas +-- no-parallel: looks at server-wide metrics + +--- These tests verify the caching of a deserialized text index dictionary block in the consecutive executions. + +SET enable_analyzer = 1; +SET allow_experimental_full_text_index = 1; +SET use_skip_indexes_on_data_read = 1; +SET query_plan_direct_read_from_text_index = 1; +SET use_text_index_dictionary_cache = 1; +SET log_queries = 1; +SET max_rows_to_read = 0; + +DROP TABLE IF EXISTS tab; +CREATE TABLE tab +( + id UInt32, + message String, + INDEX idx(message) TYPE text(tokenizer = array, dictionary_block_size = 128) GRANULARITY 1 +) +ENGINE = MergeTree +ORDER BY (id) +SETTINGS index_granularity = 128; + +--- The text index would have two dictionary blocks. +INSERT INTO tab +SELECT + number, + concat('text_', leftPad(toString(number), 3, '0')) +FROM numbers(256); + +DROP VIEW IF EXISTS text_index_cache_stats; +CREATE VIEW text_index_cache_stats AS ( + SELECT + concat('cache_hits = ', toString(ProfileEvents['TextIndexDictionaryBlockCacheHits']), ', cache_misses = ', toString(ProfileEvents['TextIndexDictionaryBlockCacheMisses'])) + FROM system.query_log + WHERE query_kind ='Select' + AND current_database = currentDatabase() + AND endsWith(trimRight(query), concat('hasAnyTokens(message, \'', {filter:String}, '\');')) + AND type='QueryFinish' + LIMIT 1 +); + +SELECT 'Tokens between text_000 -> text_127 are in the first dictionary block and text_128 -> text_255 are in the second dictionary block.'; + +SELECT '--- cache miss on the first dictionary block.'; +SELECT count() FROM tab WHERE hasAnyTokens(message, 'text_000'); + +SYSTEM FLUSH LOGS query_log; +SELECT * FROM text_index_cache_stats(filter = 'text_000'); + +SELECT '--- cache miss on the second dictionary block.'; +SELECT count() FROM tab WHERE hasAnyTokens(message, 'text_128'); + +SYSTEM FLUSH LOGS query_log; +SELECT * FROM text_index_cache_stats(filter = 'text_128'); + +SELECT '--- cache hit on the first dictionary block.'; +SELECT count() FROM tab WHERE hasAnyTokens(message, 'text_127'); + +SYSTEM FLUSH LOGS query_log; +SELECT * FROM text_index_cache_stats(filter = 'text_127'); + +SELECT '--- no profile events when cache is disabled.'; + +SET use_text_index_dictionary_cache = 0; + +SELECT count() FROM tab WHERE hasAnyTokens(message, 'text_126'); + +SET use_text_index_dictionary_cache = 1; + +SYSTEM FLUSH LOGS query_log; +SELECT * FROM text_index_cache_stats(filter = 'text_126'); + +SELECT 'Clear text index cache'; + +SYSTEM DROP TEXT INDEX DICTIONARY CACHE; + +SELECT '--- cache miss on the first dictionary block.'; +SELECT count() FROM tab WHERE hasAnyTokens(message, 'text_125'); + +SYSTEM FLUSH LOGS query_log; +SELECT * FROM text_index_cache_stats(filter = 'text_125'); + +SELECT '--- cache miss on the second dictionary block.'; +SELECT count() FROM tab WHERE hasAnyTokens(message, 'text_129'); + +SYSTEM FLUSH LOGS query_log; +SELECT * FROM text_index_cache_stats(filter = 'text_129'); + +SYSTEM DROP TEXT INDEX DICTIONARY CACHE; +DROP TABLE tab; diff --git a/parser/testdata/02346_text_index_dictionary_frontcoding/ast.json b/parser/testdata/02346_text_index_dictionary_frontcoding/ast.json new file mode 100644 index 000000000..cb586fcd1 --- /dev/null +++ b/parser/testdata/02346_text_index_dictionary_frontcoding/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001769788, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02346_text_index_dictionary_frontcoding/metadata.json b/parser/testdata/02346_text_index_dictionary_frontcoding/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02346_text_index_dictionary_frontcoding/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02346_text_index_dictionary_frontcoding/query.sql b/parser/testdata/02346_text_index_dictionary_frontcoding/query.sql new file mode 100644 index 000000000..1a651b697 --- /dev/null +++ b/parser/testdata/02346_text_index_dictionary_frontcoding/query.sql @@ -0,0 +1,29 @@ +SET allow_experimental_full_text_index = 1; + +-- Tests text index parameter `dictionary_block_frontcoding_compression`. + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab +( + id UInt32, + text_fc String, + text_raw String, + INDEX idx_raw(text_raw) TYPE text(tokenizer = 'splitByNonAlpha', dictionary_block_size = 6, dictionary_block_frontcoding_compression = 0), -- two raw dictionary blocks + INDEX idx_fc(text_fc) TYPE text(tokenizer = 'splitByNonAlpha', dictionary_block_size = 6, dictionary_block_frontcoding_compression = 1), -- two FC-coded dictionary blocks +) +ENGINE = MergeTree() +ORDER BY id; + +INSERT INTO tab VALUES (0, 'foo', 'foo'), (1, 'bar', 'bar'), (2, 'baz', 'baz'), (3, 'foo bar', 'foo bar'), (4, 'foo baz', 'foo baz'), (5, 'bar baz', 'bar baz'), (6, 'abc', 'abc'), (7, 'def', 'def'); + +SELECT count() FROM tab WHERE hasToken(text_raw, 'foo'); +SELECT count() FROM tab WHERE hasToken(text_fc, 'foo'); + +SELECT count() FROM tab WHERE hasToken(text_raw, 'bar'); +SELECT count() FROM tab WHERE hasToken(text_fc, 'bar'); + +SELECT count() FROM tab WHERE hasToken(text_raw, 'abc'); +SELECT count() FROM tab WHERE hasToken(text_fc, 'abc'); + +DROP TABLE tab; diff --git a/parser/testdata/02346_text_index_direct_read/ast.json b/parser/testdata/02346_text_index_direct_read/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02346_text_index_direct_read/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02346_text_index_direct_read/metadata.json b/parser/testdata/02346_text_index_direct_read/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02346_text_index_direct_read/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02346_text_index_direct_read/query.sql b/parser/testdata/02346_text_index_direct_read/query.sql new file mode 100644 index 000000000..4076c5e88 --- /dev/null +++ b/parser/testdata/02346_text_index_direct_read/query.sql @@ -0,0 +1,97 @@ +-- Tags: no-parallel, no-parallel-replicas +-- Tag no-parallel -- due to access to the system.text_log +-- Tag no-parallel-replicas -- direct read is not compatible with parallel replicas + +SET log_queries = 1; + +-- Affects the number of read rows. +SET allow_experimental_full_text_index = 1; +SET use_skip_indexes_on_data_read = 1; +SET query_plan_direct_read_from_text_index = 1; +SET max_rows_to_read = 0; -- system.text_log can be really big +SET enable_analyzer = 0; -- To produce consistent explain outputs + +---------------------------------------------------- +SELECT '- Test direct read optimization from text log'; + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab(k UInt64, text String, INDEX idx(text) TYPE text(tokenizer = 'splitByNonAlpha') GRANULARITY 1) + ENGINE = MergeTree() ORDER BY k + SETTINGS index_granularity = 2, index_granularity_bytes = '10Mi'; + +INSERT INTO tab VALUES (101, 'Alick a01'), + (102, 'Blick a02'); + +---------------------------------------------------- + +SELECT 'Test hasToken:', count() FROM tab WHERE hasToken(text, 'Alick'); +SELECT 'Test hasAllTokens:', count() FROM tab WHERE hasAllTokens(text, ['Alick']); +SELECT 'Test hasAnyTokens:', count() FROM tab WHERE hasAnyTokens(text, ['Alick']); +SELECT 'Test hasToken + length(text):', count() FROM tab WHERE hasToken(text, 'Alick') or length(text) > 1; +SELECT 'Test select text + hasAnyTokens:', text FROM tab WHERE hasAnyTokens(text, ['Alick']); +SELECT 'Test hasToken and hasToken:', count() FROM tab WHERE hasToken(text, 'Alick') and hasToken(text, 'Blick'); +SELECT 'Test hasAnyTokens or hasToken:', count() FROM tab WHERE hasAnyTokens(text, ['Blick']) or hasToken(text, 'Alick'); +SELECT 'Test NOT hasAllTokens:', count() FROM tab WHERE NOT hasAllTokens(text, ['Blick']); + + +---------------------------------------------------- +-- Now check the logs all at once (one by one is too slow) +---------------------------------------------------- +SYSTEM FLUSH LOGS text_log; + +SELECT message +FROM ( + SELECT event_time_microseconds, message FROM system.text_log + WHERE logger_name = 'optimizeDirectReadFromTextIndex' AND startsWith(message, 'Added:') + ORDER BY event_time_microseconds DESC LIMIT 8 +) +ORDER BY event_time_microseconds ASC; + +---------------------------------------------------- +-- Now check that EXPLAIN produces the expected output for the same queries. +-- So this AFTER checking the text_log otherwise the entries will be duplicated. +---------------------------------------------------- +SELECT '- Test direct read optimization with EXPLAIN'; + +SELECT trim(explain) FROM +( + EXPLAIN actions = 1 SELECT 'Test hasToken:', count() FROM tab WHERE hasToken(text, 'Alick') SETTINGS use_skip_indexes_on_data_read = 1 +) WHERE explain LIKE '%Filter column:%'; + +SELECT trim(explain) FROM +( + EXPLAIN actions = 1 SELECT 'Test hasAllTokens:', count() FROM tab WHERE hasAllTokens(text, ['Alick']) SETTINGS use_skip_indexes_on_data_read = 1 +) WHERE explain LIKE '%Filter column:%'; + +SELECT trim(explain) FROM +( + EXPLAIN actions = 1 SELECT 'Test hasAnyTokens:', count() FROM tab WHERE hasAnyTokens(text, ['Alick']) SETTINGS use_skip_indexes_on_data_read = 1 +) WHERE explain LIKE '%Filter column:%'; + +SELECT trim(explain) FROM +( + EXPLAIN actions = 1 SELECT 'Test hasToken + length(text):', count() FROM tab WHERE hasToken(text, 'Alick') or length(text) > 1 SETTINGS use_skip_indexes_on_data_read = 1 +) WHERE explain LIKE '%Filter column:%'; + +SELECT trim(explain) FROM +( + EXPLAIN actions = 1 SELECT 'Test select text + hasAnyTokens:', text FROM tab WHERE hasAnyTokens(text, ['Alick']) SETTINGS use_skip_indexes_on_data_read = 1 +) WHERE explain LIKE '%Prewhere filter column:%'; + +SELECT trim(explain) FROM +( + EXPLAIN actions = 1 SELECT 'Test hasToken and hasToken:', count() FROM tab WHERE hasToken(text, 'Alick') and hasToken(text, 'Blick') SETTINGS use_skip_indexes_on_data_read = 1 +) WHERE explain LIKE '%Prewhere filter column:%'; + +SELECT trim(explain) FROM +( + EXPLAIN actions = 1 SELECT 'Test hasAnyTokens or hasToken:', count() FROM tab WHERE hasAnyTokens(text, ['Blick']) or hasToken(text, 'Alick') SETTINGS use_skip_indexes_on_data_read = 1 +) WHERE explain LIKE '%Filter column:%'; + +SELECT trim(explain) FROM +( + EXPLAIN actions = 1 SELECT 'Test NOT hasAllTokens:', count() FROM tab WHERE NOT hasAllTokens(text, ['Blick']) SETTINGS use_skip_indexes_on_data_read = 1 +) WHERE explain LIKE '%Filter column:%'; + +DROP TABLE tab; diff --git a/parser/testdata/02346_text_index_direct_read_crash/ast.json b/parser/testdata/02346_text_index_direct_read_crash/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02346_text_index_direct_read_crash/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02346_text_index_direct_read_crash/metadata.json b/parser/testdata/02346_text_index_direct_read_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02346_text_index_direct_read_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02346_text_index_direct_read_crash/query.sql b/parser/testdata/02346_text_index_direct_read_crash/query.sql new file mode 100644 index 000000000..a47e36f57 --- /dev/null +++ b/parser/testdata/02346_text_index_direct_read_crash/query.sql @@ -0,0 +1,35 @@ +-- Test that the text index works correctly when the number of rows in a part is smaller than the index_granularity. + +SET allow_experimental_full_text_index = 1; + +SET use_skip_indexes_on_data_read=1; + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab +( + `id` UInt64, + `text` String, + INDEX inv_idx text TYPE text(tokenizer = 'splitByNonAlpha') GRANULARITY 4 +) +ENGINE = MergeTree +ORDER BY id +SETTINGS index_granularity = 32, index_granularity_bytes = 0, min_bytes_for_wide_part = 0; + +INSERT INTO tab VALUES (0,'a'),(1,'b'),(2,'c'); + +SELECT id FROM tab WHERE hasToken(text, 'b'); + +SELECT id FROM tab WHERE hasToken(text, 'c'); + +TRUNCATE TABLE tab; + +INSERT INTO tab VALUES (0,'a'),(1,'b'),(2,'c'),(3,'d'); + +SELECT id FROM tab WHERE hasToken(text, 'b'); + +SELECT id FROM tab WHERE hasToken(text, 'd'); + +INSERT INTO tab SELECT number , 'aaabbbccc' FROM numbers(128); + +SELECT id FROM tab WHERE hasToken(text, 'aaabbbccc'); diff --git a/parser/testdata/02346_text_index_direct_read_with_query_condition_cache/ast.json b/parser/testdata/02346_text_index_direct_read_with_query_condition_cache/ast.json new file mode 100644 index 000000000..53ea4abc3 --- /dev/null +++ b/parser/testdata/02346_text_index_direct_read_with_query_condition_cache/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001120365, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02346_text_index_direct_read_with_query_condition_cache/metadata.json b/parser/testdata/02346_text_index_direct_read_with_query_condition_cache/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02346_text_index_direct_read_with_query_condition_cache/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02346_text_index_direct_read_with_query_condition_cache/query.sql b/parser/testdata/02346_text_index_direct_read_with_query_condition_cache/query.sql new file mode 100644 index 000000000..5e2eaa3b7 --- /dev/null +++ b/parser/testdata/02346_text_index_direct_read_with_query_condition_cache/query.sql @@ -0,0 +1,32 @@ +SET allow_experimental_full_text_index = 1; +SET use_skip_indexes_on_data_read = 1; +SET use_query_condition_cache = 1; + +-- Tests a bug that the direct read optimization (text index) returned wrong results +-- when the query condition cache is enabled. + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab +( + id UInt32, + message String, + INDEX idx(message) TYPE text(tokenizer = splitByNonAlpha), +) +ENGINE = MergeTree +ORDER BY (id); + +INSERT INTO tab(id, message) +VALUES + (1, 'abc+ def- foo!'), + (2, 'abc+ def- bar?'), + (3, 'abc+ baz- foo!'), + (4, 'abc+ baz- bar?'), + (5, 'abc+ zzz- foo!'), + (6, 'abc+ zzz- bar?'); + +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, ['abc']); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, ['ab']); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, ['foo']); + +DROP TABLE tab; diff --git a/parser/testdata/02346_text_index_experimental_flag/ast.json b/parser/testdata/02346_text_index_experimental_flag/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02346_text_index_experimental_flag/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02346_text_index_experimental_flag/metadata.json b/parser/testdata/02346_text_index_experimental_flag/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02346_text_index_experimental_flag/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02346_text_index_experimental_flag/query.sql b/parser/testdata/02346_text_index_experimental_flag/query.sql new file mode 100644 index 000000000..31119b47c --- /dev/null +++ b/parser/testdata/02346_text_index_experimental_flag/query.sql @@ -0,0 +1,24 @@ +-- Tests that CREATE TABLE and ADD INDEX respect settings 'allow_experimental_full_text_index' + +DROP TABLE IF EXISTS tab; + +-- Test CREATE TABLE + +SET allow_experimental_full_text_index = 0; +CREATE TABLE tab1 (id UInt32, str String, INDEX idx str TYPE text(tokenizer = 'splitByNonAlpha')) ENGINE = MergeTree ORDER BY tuple(); -- { serverError SUPPORT_IS_DISABLED } + +SET allow_experimental_full_text_index = 1; +CREATE TABLE tab1 (id UInt32, str String, INDEX idx str TYPE text(tokenizer = 'splitByNonAlpha')) ENGINE = MergeTree ORDER BY tuple(); +DROP TABLE tab1; + +-- Test ADD INDEX + +SET allow_experimental_full_text_index = 0; +CREATE TABLE tab (id UInt32, str String) ENGINE = MergeTree ORDER BY tuple(); +ALTER TABLE tab ADD INDEX idx1 str TYPE text(tokenizer = 'splitByNonAlpha'); -- { serverError SUPPORT_IS_DISABLED } +DROP TABLE tab; + +SET allow_experimental_full_text_index = 1; +CREATE TABLE tab (id UInt32, str String) ENGINE = MergeTree ORDER BY tuple(); +ALTER TABLE tab ADD INDEX idx1 str TYPE text(tokenizer = 'splitByNonAlpha'); +DROP TABLE tab; diff --git a/parser/testdata/02346_text_index_function_hasAnyAllTokens/ast.json b/parser/testdata/02346_text_index_function_hasAnyAllTokens/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02346_text_index_function_hasAnyAllTokens/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02346_text_index_function_hasAnyAllTokens/metadata.json b/parser/testdata/02346_text_index_function_hasAnyAllTokens/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02346_text_index_function_hasAnyAllTokens/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02346_text_index_function_hasAnyAllTokens/query.sql b/parser/testdata/02346_text_index_function_hasAnyAllTokens/query.sql new file mode 100644 index 000000000..365480bfa --- /dev/null +++ b/parser/testdata/02346_text_index_function_hasAnyAllTokens/query.sql @@ -0,0 +1,798 @@ +-- Tags: no-parallel-replicas, long + +SET enable_analyzer = 1; +SET use_query_condition_cache = 0; +SET allow_experimental_full_text_index = 1; + +DROP TABLE IF EXISTS tab; + +SELECT 'Negative tests'; + +CREATE TABLE tab +( + id UInt32, + col_str String, + message String, + arr Array(String), + INDEX idx(`message`) TYPE text(tokenizer = 'splitByNonAlpha'), +) +ENGINE = MergeTree +ORDER BY (id); + +INSERT INTO tab VALUES (1, 'b', 'b', ['c']), (2, 'c', 'c', ['c']), (3, '', '', ['']); + +-- Must accept two arguments +SELECT id FROM tab WHERE hasAnyTokens(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT id FROM tab WHERE hasAnyTokens('a', 'b', 'c'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT id FROM tab WHERE hasAllTokens(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT id FROM tab WHERE hasAllTokens('a', 'b', 'c'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +-- 1st arg must be String or FixedString +SELECT id FROM tab WHERE hasAnyTokens(1, ['a']); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT id FROM tab WHERE hasAllTokens(1, ['a']); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +-- 2nd arg must be const const String or const Array(String) +SELECT id FROM tab WHERE hasAnyTokens(message, 1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT id FROM tab WHERE hasAnyTokens(message, materialize('b')); -- { serverError ILLEGAL_COLUMN } +SELECT id FROM tab WHERE hasAnyTokens(message, materialize(['b'])); -- { serverError ILLEGAL_COLUMN } +SELECT id FROM tab WHERE hasAllTokens(message, 1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT id FROM tab WHERE hasAllTokens(message, materialize('b')); -- { serverError ILLEGAL_COLUMN } +SELECT id FROM tab WHERE hasAllTokens(message, materialize(['b'])); -- { serverError ILLEGAL_COLUMN } +-- Supports a max of 64 needles +SELECT id FROM tab WHERE hasAnyTokens(message, ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'aa', 'bb', 'cc', 'dd', 'ee', 'ff', 'gg', 'hh', 'ii', 'jj', 'kk', 'll', 'mm', 'nn', 'oo', 'pp', 'qq', 'rr', 'ss', 'tt', 'uu', 'vv', 'ww', 'xx', 'yy', 'zz', 'aaa', 'bbb', 'ccc', 'ddd', 'eee', 'fff', 'ggg', 'hhh', 'iii', 'jjj', 'kkk', 'lll', 'mmm']); -- { serverError BAD_ARGUMENTS } +SELECT id FROM tab WHERE hasAnyTokens(message, 'a b c d e f g h i j k l m n o p q r s t u v w x y z aa bb cc dd ee ff gg hh ii jj kk ll mm nn oo pp qq rr ss tt uu vv ww xx yy zz aaa bbb ccc ddd eee fff ggg hhh iii jjj kkk lll mmm'); -- { serverError BAD_ARGUMENTS } + +SELECT 'Test singular aliases'; + +SELECT hasAnyToken('a b', 'b') FORMAT Null; +SELECT hasAnyToken('a b', ['b']) FORMAT Null; +SELECT hasAllToken('a b', 'b') FORMAT Null; +SELECT hasAllToken('a b', ['b']) FORMAT Null; + +SELECT 'Test what happens when hasAnyTokens/All is called on a column without index'; + +-- We expected that the default tokenizer is used +-- { echoOn } +SELECT hasAnyTokens('a b', ['b']); +SELECT hasAnyTokens('a b', ['c']); +SELECT hasAnyTokens('a b', 'b'); +SELECT hasAnyTokens('a b', 'c'); +SELECT hasAnyTokens(materialize('a b'), ['b']); +SELECT hasAnyTokens(materialize('a b'), ['c']); +SELECT hasAnyTokens(materialize('a b'), 'b'); +SELECT hasAnyTokens(materialize('a b'), 'c'); +-- +SELECT hasAllTokens('a b', ['a', 'b']); +SELECT hasAllTokens('a b', ['a', 'c']); +SELECT hasAllTokens('a b', 'a b'); +SELECT hasAllTokens('a b', 'a c'); +SELECT hasAllTokens(materialize('a b'), ['a', 'b']); +SELECT hasAllTokens(materialize('a b'), ['a', 'c']); +SELECT hasAllTokens(materialize('a b'), 'a b'); +SELECT hasAllTokens(materialize('a b'), 'a c'); + +-- These are equivalent to the lines above, but using Search{Any,All} in the filter step. +-- We keep this test because the direct read optimization substituted Search{Any,All} only +-- when they are in the filterStep, and we want to detect any variation eagerly. +SELECT id FROM tab WHERE hasAnyTokens('a b', ['b']); +SELECT id FROM tab WHERE hasAnyTokens('a b', ['c']); +SELECT id FROM tab WHERE hasAnyTokens(col_str, ['b']); +SELECT id FROM tab WHERE hasAnyTokens(col_str, ['c']); + +SELECT id FROM tab WHERE hasAnyTokens('a b', 'b'); +SELECT id FROM tab WHERE hasAnyTokens('a b', 'c'); +SELECT id FROM tab WHERE hasAnyTokens(col_str, 'b'); +SELECT id FROM tab WHERE hasAnyTokens(col_str, 'c'); + +SELECT id FROM tab WHERE hasAllTokens('a b', ['a b']); +SELECT id FROM tab WHERE hasAllTokens('a b', ['a c']); +SELECT id FROM tab WHERE hasAllTokens(col_str, ['a b']); +SELECT id FROM tab WHERE hasAllTokens(col_str, ['a c']); + +SELECT id FROM tab WHERE hasAllTokens('a b', 'a b'); +SELECT id FROM tab WHERE hasAllTokens('a b', 'a c'); +SELECT id FROM tab WHERE hasAllTokens(col_str, 'a a'); +SELECT id FROM tab WHERE hasAllTokens(col_str, 'b c'); + +-- Test search without needle on non-empty columns (all are expected to match nothing) +SELECT count() FROM tab WHERE hasAnyTokens(col_str, []); +SELECT count() FROM tab WHERE hasAllTokens(col_str, []); +SELECT count() FROM tab WHERE hasAnyTokens(col_str, ['']); +SELECT count() FROM tab WHERE hasAnyTokens(col_str, ''); +SELECT count() FROM tab WHERE hasAnyTokens(col_str, ['','']); +-- { echoOff } + +DROP TABLE tab; + +-- Test specifically FixedString columns without text index +CREATE TABLE tab +( + id UInt8, + s FixedString(11) +) +ENGINE = MergeTree +ORDER BY id; + +INSERT INTO tab VALUES (1, 'hello world'), (2, 'goodbye'), (3, 'hello moon'); + +-- { echoOn } +SELECT id FROM tab WHERE hasAnyTokens(s, ['hello']) ORDER BY id; +SELECT id FROM tab WHERE hasAnyTokens(s, ['moon', 'goodbye']) ORDER BY id; +SELECT id FROM tab WHERE hasAnyTokens(s, ['unknown', 'goodbye']) ORDER BY id; + +SELECT id FROM tab WHERE hasAllTokens(s, ['hello', 'world']) ORDER BY id; +SELECT id FROM tab WHERE hasAllTokens(s, ['goodbye']) ORDER BY id; +SELECT id FROM tab WHERE hasAllTokens(s, ['hello', 'moon']) ORDER BY id; +SELECT id FROM tab WHERE hasAllTokens(s, ['hello', 'unknown']) ORDER BY id; +-- { echoOff } + +DROP TABLE tab; + +SELECT 'FixedString input columns'; + +CREATE TABLE tab ( + id Int, + text FixedString(16), + INDEX idx_text(text) TYPE text(tokenizer = 'splitByNonAlpha') +) +ENGINE=MergeTree() +ORDER BY (id); + +INSERT INTO tab VALUES(1, toFixedString('bar', 3)), (2, toFixedString('foo', 3)); + +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(text, ['bar']); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(text, ['bar']); + +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(text, 'bar'); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(text, 'bar'); + +DROP TABLE tab; + +SELECT '-- Default tokenizer'; + +CREATE TABLE tab +( + id UInt32, + message String, + INDEX idx(`message`) TYPE text(tokenizer = 'splitByNonAlpha'), +) +ENGINE = MergeTree +ORDER BY (id); + +INSERT INTO tab(id, message) +VALUES + (1, 'abc+ def- foo!'), + (2, 'abc+ def- bar?'), + (3, 'abc+ baz- foo!'), + (4, 'abc+ baz- bar?'), + (5, 'abc+ zzz- foo!'), + (6, 'abc+ zzz- bar?'); + +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, ['abc']); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, ['ab']); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, ['foo']); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, ['bar']); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, ['abc', 'foo']); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, ['abc', 'bar']); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, ['foo', 'bar']); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, ['foo', 'ba']); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, ['fo', 'ba']); + +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, 'ab+'); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, 'foo-'); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, 'abc+* foo+'); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, 'fo ba'); + +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, ['abc']); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, ['ab']); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, ['foo']); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, ['bar']); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, ['abc', 'foo']); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, ['abc', 'bar']); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, ['foo', 'bar']); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, ['abc', 'fo']); + +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, 'ab+'); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, 'foo-'); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, 'abc+* foo+'); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, 'abc ba'); + +DROP TABLE tab; + +SELECT '-- Ngram tokenizer'; + +CREATE TABLE tab +( + id UInt32, + message String, + INDEX idx(`message`) TYPE text(tokenizer = ngrams(4)), +) +ENGINE = MergeTree +ORDER BY (id); + +INSERT INTO tab +VALUES +(1, 'abcdef'), +(2, 'bcdefg'), +(3, 'cdefgh'), +(4, 'defghi'), +(5, 'efghij'); + +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, ['efgh']); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, ['efg']); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, ['cdef']); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, ['defg']); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, ['cdef', 'defg']); -- search cdefg +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, ['efgh', 'cdef', 'defg']); --search for either cdefg or defgh + +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, 'efgh'); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, 'efg'); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, 'efghi'); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, 'cdefg'); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, 'cdefgh'); + +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, ['efgh']); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, ['efg']); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, ['cdef']); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, ['defg']); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, ['cdef', 'defg']); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, ['efgh', 'cdef', 'defg']); + +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, 'efgh'); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, 'efg'); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, 'efghi'); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, 'cdefg'); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, 'cdefgh'); + +DROP TABLE tab; + +SELECT '-- Split tokenizer'; + +CREATE TABLE tab +( + id UInt32, + message String, + INDEX idx(`message`) TYPE text(tokenizer = splitByString(['()', '\\'])), +) +ENGINE = MergeTree +ORDER BY (id); + +INSERT INTO tab +VALUES +(1, ' a bc d'), +(2, '()()a()bc()d'), +(3, ',()a(),bc,(),d,'), +(4, '\\a\n\\bc\\d\n'), +(5, '\na\n\\bc\\d\\'); + +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, ['a']); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, ['bc']); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, ['d']); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, ['a', 'bc']); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, ['a', 'd']); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, ['bc', 'd']); + +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, 'a*'); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, 'bc(('); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, 'd()'); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, 'a\\bc'); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, 'a d'); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, '\\,bc,()'); + +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, ['a']); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, ['bc']); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, ['d']); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, ['a', 'bc']); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, ['a', 'd']); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, ['bc', 'd']); + +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, 'a*'); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, 'bc(('); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, 'd()'); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, 'a\\bc'); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, 'a d'); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, 'a\\,bc,()'); + +DROP TABLE tab; + +SELECT '-- NoOp tokenizer'; + +CREATE TABLE tab +( + id UInt32, + message String, + INDEX idx(`message`) TYPE text(tokenizer = 'array'), +) +ENGINE = MergeTree +ORDER BY (id); + +INSERT INTO tab +VALUES +(1, 'abc def'), +(2, 'abc fgh'), +(3, 'def efg'), +(4, 'abcdef'); + +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, ['abc']); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, ['def']); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, ['abc', 'def']); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, ['abcdef']); + +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, 'abc'); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, 'abc def'); + +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, ['abc']); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, ['def']); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, ['abc', 'def']); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, ['abcdef']); + +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, 'abc'); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, 'abc def'); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, 'abcdef '); + +DROP TABLE tab; + +SELECT 'Duplicate tokens'; + +CREATE TABLE tab +( + id UInt32, + message String, + INDEX idx(`message`) TYPE text(tokenizer = 'splitByNonAlpha'), +) +ENGINE = MergeTree +ORDER BY (id); + +INSERT INTO tab VALUES + (1, 'hello world'), + (2, 'hello world, hello everyone'); + +SELECT count() FROM tab WHERE hasAnyTokens(message, ['hello']); +SELECT count() FROM tab WHERE hasAnyTokens(message, ['hello', 'hello']); +SELECT count() FROM tab WHERE hasAnyTokens(message, 'hello hello'); + +SELECT count() FROM tab WHERE hasAllTokens(message, ['hello']); +SELECT count() FROM tab WHERE hasAllTokens(message, ['hello', 'hello']); +SELECT count() FROM tab WHERE hasAllTokens(message, 'hello hello'); + +DROP TABLE tab; + +SELECT 'Combination with the tokens function'; + +SELECT '-- Default tokenizer'; +CREATE TABLE tab +( + id UInt32, + message String, + INDEX idx(message) TYPE text(tokenizer = 'splitByNonAlpha'), +) +ENGINE = MergeTree +ORDER BY (id); + +INSERT INTO tab(id, message) +VALUES + (1, 'abc+ def- foo!'), + (2, 'abc+ def- bar?'), + (3, 'abc+ baz- foo!'), + (4, 'abc+ baz- bar?'), + (5, 'abc+ zzz- foo!'), + (6, 'abc+ zzz- bar?'); + +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, tokens('abc', 'splitByNonAlpha')); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, tokens('ab', 'splitByNonAlpha')); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, tokens('foo', 'splitByNonAlpha')); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, tokens('bar', 'splitByNonAlpha')); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, tokens('abc foo', 'splitByNonAlpha')); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, tokens('abc bar', 'splitByNonAlpha')); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, tokens('foo bar', 'splitByNonAlpha')); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, tokens('foo ba', 'splitByNonAlpha')); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, tokens('fo ba', 'splitByNonAlpha')); + +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, tokens('abc', 'splitByNonAlpha')); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, tokens('ab', 'splitByNonAlpha')); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, tokens('foo', 'splitByNonAlpha')); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, tokens('bar', 'splitByNonAlpha')); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, tokens('abc foo', 'splitByNonAlpha')); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, tokens('abc bar', 'splitByNonAlpha')); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, tokens('foo bar', 'splitByNonAlpha')); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, tokens('abc fo', 'splitByNonAlpha')); + +DROP TABLE tab; + +SELECT '-- Ngram tokenizer'; + +CREATE TABLE tab +( + id UInt32, + message String, + INDEX idx(`message`) TYPE text(tokenizer = ngrams(4)), +) +ENGINE = MergeTree +ORDER BY (id); + +INSERT INTO tab +VALUES +(1, 'abcdef'), +(2, 'bcdefg'), +(3, 'cdefgh'), +(4, 'defghi'), +(5, 'efghij'); + +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, tokens('efgh', 'ngrams', 4)); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, tokens('efg', 'ngrams', 4)); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, tokens('cdef', 'ngrams', 4)); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, tokens('defg', 'ngrams', 4)); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, tokens('cdefg', 'ngrams', 4)); -- search cdefg +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, arrayConcat(tokens('cdefg', 'ngrams', 4), tokens('defgh', 'ngrams', 4))); --search for either cdefg or defgh + +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, tokens('efgh', 'ngrams', 4)); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, tokens('efg', 'ngrams', 4)); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, tokens('cdef', 'ngrams', 4)); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, tokens('defg', 'ngrams', 4)); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, tokens('cdefg', 'ngrams', 4)); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, arrayConcat(tokens('cdefg', 'ngrams', 4), tokens('defgh', 'ngrams', 4))); + +DROP TABLE tab; + +SELECT '-- Split tokenizer'; + +CREATE TABLE tab +( + id UInt32, + message String, + INDEX idx(`message`) TYPE text(tokenizer = splitByString(['()', '\\'])), +) +ENGINE = MergeTree +ORDER BY (id); + +INSERT INTO tab +VALUES +(1, ' a bc d'), +(2, '()()a()bc()d'), +(3, ',()a(),bc,(),d,'), +(4, '\\a\n\\bc\\d\n'), +(5, '\na\n\\bc\\d\\'); + +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, tokens('a', 'splitByString', ['()', '\\'])); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, tokens('bc', 'splitByString', ['()', '\\'])); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, tokens('d', 'splitByString', ['()', '\\'])); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, tokens('a()bc', 'splitByString', ['()', '\\'])); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, tokens('a\\d', 'splitByString', ['()', '\\'])); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, tokens('bc\\d', 'splitByString', ['()', '\\'])); + +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, tokens('a', 'splitByString', ['()', '\\'])); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, tokens('bc', 'splitByString', ['()', '\\'])); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, tokens('d', 'splitByString', ['()', '\\'])); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, tokens('a()bc', 'splitByString', ['()', '\\'])); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, tokens('a\\d', 'splitByString', ['()', '\\'])); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, tokens('bc\\d', 'splitByString', ['()', '\\'])); + +DROP TABLE tab; + +SELECT '-- NoOp tokenizer'; + +CREATE TABLE tab +( + id UInt32, + message String, + INDEX idx(`message`) TYPE text(tokenizer = 'array'), +) +ENGINE = MergeTree +ORDER BY (id); + +INSERT INTO tab +VALUES +(1, 'abc def'), +(2, 'abc fgh'), +(3, 'def efg'), +(4, 'abcdef'); + +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, tokens('abc', 'array')); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, tokens('def', 'array')); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, arrayConcat(tokens('def', 'array'), tokens('def', 'array'))); +SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, tokens('abcdef', 'array')); + +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, tokens('abc', 'array')); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, tokens('def', 'array')); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, arrayConcat(tokens('def', 'array'), tokens('def', 'array'))); +SELECT groupArray(id) FROM tab WHERE hasAllTokens(message, tokens('abcdef', 'array')); + +DROP TABLE tab; + +SELECT 'Text index analysis'; + +DROP TABLE IF EXISTS tab; +CREATE TABLE tab +( + id UInt32, + message String, + INDEX idx(`message`) TYPE text(tokenizer = 'splitByNonAlpha') GRANULARITY 1 +) +ENGINE = MergeTree +ORDER BY (id) +SETTINGS index_granularity = 1; + +INSERT INTO tab SELECT number, 'Hello, ClickHouse' FROM numbers(1024); +INSERT INTO tab SELECT number, 'Hello, World' FROM numbers(1024); +INSERT INTO tab SELECT number, 'Hallo, ClickHouse' FROM numbers(1024); +INSERT INTO tab SELECT number, 'ClickHouse is fast, really fast!' FROM numbers(1024); + +SELECT 'hasAnyTokens is used during index analysis'; + +SELECT 'Text index overload 1 should choose none for non-existent term'; +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes=1 + SELECT count() FROM tab WHERE hasAnyTokens(message, ['Click']) +) +WHERE explain LIKE '%Description:%' OR explain LIKE '%Parts:%' OR explain LIKE '%Granules:%' +LIMIT 2, 3; -- Skip the primary index parts and granules. + +SELECT 'Text index overload 2 should choose none for non-existent term'; +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes=1 + SELECT count() FROM tab WHERE hasAnyTokens(message, 'Click') +) +WHERE explain LIKE '%Description:%' OR explain LIKE '%Parts:%' OR explain LIKE '%Granules:%' +LIMIT 2, 3; -- Skip the primary index parts and granules. + +SELECT 'Text index overload 1 should choose 1 part and 1024 granules out of 4 parts and 4096 granules'; +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes=1 + SELECT count() FROM tab WHERE hasAnyTokens(message, ['Hallo']) +) +WHERE explain LIKE '%Description:%' OR explain LIKE '%Parts:%' OR explain LIKE '%Granules:%' +LIMIT 2, 3; + +SELECT 'Text index overload 2 should choose 1 part and 1024 granules out of 4 parts and 4096 granules'; +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes=1 + SELECT count() FROM tab WHERE hasAnyTokens(message, 'Hallo') +) +WHERE explain LIKE '%Description:%' OR explain LIKE '%Parts:%' OR explain LIKE '%Granules:%' +LIMIT 2, 3; + +SELECT 'Text index overload 1 should choose 1 part and 1024 granules out of 4 parts and 4096 granules'; +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes=1 + SELECT count() FROM tab WHERE hasAnyTokens(message, ['Hallo', 'Word']) -- Word does not exist in terms +) +WHERE explain LIKE '%Description:%' OR explain LIKE '%Parts:%' OR explain LIKE '%Granules:%' +LIMIT 2, 3; + +SELECT 'Text index overload 2 should choose 1 part and 1024 granules out of 4 parts and 4096 granules'; +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes=1 + SELECT count() FROM tab WHERE hasAnyTokens(message, 'Hallo Word') -- Word does not exist in terms +) +WHERE explain LIKE '%Description:%' OR explain LIKE '%Parts:%' OR explain LIKE '%Granules:%' +LIMIT 2, 3; + +SELECT 'Text index should choose 2 parts and 2048 granules out of 4 parts and 4096 granules'; +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes=1 + SELECT count() FROM tab WHERE hasAnyTokens(message, ['Hello', 'Word']) +) +WHERE explain LIKE '%Description:%' OR explain LIKE '%Parts:%' OR explain LIKE '%Granules:%' +LIMIT 2, 3; + +SELECT 'Text index should choose 2 parts and 2048 granules out of 4 parts and 4096 granules'; +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes=1 + SELECT count() FROM tab WHERE hasAnyTokens(message, ['Hallo', 'World']) +) +WHERE explain LIKE '%Description:%' OR explain LIKE '%Parts:%' OR explain LIKE '%Granules:%' +LIMIT 2, 3; + +SELECT 'Text index should choose 3 parts and 3072 granules out of 4 parts and 4096 granules'; +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes=1 + SELECT count() FROM tab WHERE hasAnyTokens(message, ['Hello', 'Hallo']) +) +WHERE explain LIKE '%Description:%' OR explain LIKE '%Parts:%' OR explain LIKE '%Granules:%' +LIMIT 2, 3; + +SELECT 'Text index should choose 3 parts and 3072 granules out of 4 parts and 4096 granules'; +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes=1 + SELECT count() FROM tab WHERE hasAnyTokens(message, ['ClickHouse']) +) +WHERE explain LIKE '%Description:%' OR explain LIKE '%Parts:%' OR explain LIKE '%Granules:%' +LIMIT 2, 3; + +SELECT 'Text index should choose all 4 parts and 4096 granules'; +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes=1 + SELECT count() FROM tab WHERE hasAnyTokens(message, ['ClickHouse', 'World']) +) +WHERE explain LIKE '%Description:%' OR explain LIKE '%Parts:%' OR explain LIKE '%Granules:%' +LIMIT 2, 3; + +SELECT 'hasAllTokens is used during index analysis'; + +SELECT 'Text index overload 1 should choose none for non-existent term'; +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes=1 + SELECT count() FROM tab WHERE hasAllTokens(message, ['Click']) +) +WHERE explain LIKE '%Description:%' OR explain LIKE '%Parts:%' OR explain LIKE '%Granules:%' +LIMIT 2, 3; + +SELECT 'Text index overload 2 should choose none for non-existent term'; +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes=1 + SELECT count() FROM tab WHERE hasAllTokens(message, 'Click') +) +WHERE explain LIKE '%Description:%' OR explain LIKE '%Parts:%' OR explain LIKE '%Granules:%' +LIMIT 2, 3; + +SELECT 'Text index overload 1 should choose 1 part and 1024 granules out of 4 parts and 4096 granules'; +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes=1 + SELECT count() FROM tab WHERE hasAllTokens(message, ['Hallo']) +) +WHERE explain LIKE '%Description:%' OR explain LIKE '%Parts:%' OR explain LIKE '%Granules:%' +LIMIT 2, 3; + +SELECT 'Text index overload 2 should choose 1 part and 1024 granules out of 4 parts and 4096 granules'; +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes=1 + SELECT count() FROM tab WHERE hasAllTokens(message, 'Hallo') +) +WHERE explain LIKE '%Description:%' OR explain LIKE '%Parts:%' OR explain LIKE '%Granules:%' +LIMIT 2, 3; + +SELECT 'Text index overload 1 should choose 1 part and 1024 granules out of 4 parts and 4096 granules'; +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes=1 + SELECT count() FROM tab WHERE hasAllTokens(message, ['Hello', 'World']) +) +WHERE explain LIKE '%Description:%' OR explain LIKE '%Parts:%' OR explain LIKE '%Granules:%' +LIMIT 2, 3; + +SELECT 'Text index overload 2 should choose 1 part and 1024 granules out of 4 parts and 4096 granules'; +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes=1 + SELECT count() FROM tab WHERE hasAllTokens(message, 'Hello World') +) +WHERE explain LIKE '%Description:%' OR explain LIKE '%Parts:%' OR explain LIKE '%Granules:%' +LIMIT 2, 3; + +SELECT 'Text index overload 1 should choose none if any term does not exists in dictionary'; +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes=1 + SELECT count() FROM tab WHERE hasAllTokens(message, ['Hallo', 'Word']) -- Word does not exist in terms +) +WHERE explain LIKE '%Description:%' OR explain LIKE '%Parts:%' OR explain LIKE '%Granules:%' +LIMIT 2, 3; + +SELECT 'Text index overload 2 should choose none if any term does not exists in dictionary'; +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes=1 + SELECT count() FROM tab WHERE hasAllTokens(message, 'Hallo Word') -- Word does not exist in terms +) +WHERE explain LIKE '%Description:%' OR explain LIKE '%Parts:%' OR explain LIKE '%Granules:%' +LIMIT 2, 3; + +SELECT 'Text index should choose 2 parts and 2048 granules out of 4 parts and 4096 granules'; +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes=1 + SELECT count() FROM tab WHERE hasAllTokens(message, ['Hello']) +) +WHERE explain LIKE '%Description:%' OR explain LIKE '%Parts:%' OR explain LIKE '%Granules:%' +LIMIT 2, 3; + +SELECT 'Text index should choose none'; +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes=1 + SELECT count() FROM tab WHERE hasAllTokens(message, ['Hallo', 'World']) +) +WHERE explain LIKE '%Description:%' OR explain LIKE '%Parts:%' OR explain LIKE '%Granules:%' +LIMIT 2, 3; + +SELECT 'Text index should choose none'; +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes=1 + SELECT count() FROM tab WHERE hasAllTokens(message, ['Hello', 'Hallo']) +) +WHERE explain LIKE '%Description:%' OR explain LIKE '%Parts:%' OR explain LIKE '%Granules:%' +LIMIT 2, 3; + +SELECT 'Text index should choose 3 parts and 3072 granules out of 4 parts and 4096 granules'; +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes=1 + SELECT count() FROM tab WHERE hasAllTokens(message, ['ClickHouse']) +) +WHERE explain LIKE '%Description:%' OR explain LIKE '%Parts:%' OR explain LIKE '%Granules:%' +LIMIT 2, 3; + +SELECT 'Text index should choose none'; +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes=1 + SELECT count() FROM tab WHERE hasAllTokens(message, ['ClickHouse', 'World']) +) +WHERE explain LIKE '%Description:%' OR explain LIKE '%Parts:%' OR explain LIKE '%Granules:%' +LIMIT 2, 3; + +DROP TABLE tab; + +SELECT 'Chooses mixed granules inside part'; + +DROP TABLE IF EXISTS tab; +CREATE TABLE tab +( + id UInt32, + message String, + INDEX idx(`message`) TYPE text(tokenizer = 'splitByNonAlpha') GRANULARITY 1 +) +ENGINE = MergeTree +ORDER BY (id) +SETTINGS index_granularity = 1; + +INSERT INTO tab +SELECT + number, + CASE + WHEN modulo(number, 4) = 0 THEN 'Hello, ClickHouse' + WHEN modulo(number, 4) = 1 THEN 'Hello, World' + WHEN modulo(number, 4) = 2 THEN 'Hallo, ClickHouse' + WHEN modulo(number, 4) = 3 THEN 'ClickHouse is the fast, really fast!' + END +FROM numbers(1024); + +SELECT 'Text index should choose 50% of granules'; +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes=1 + SELECT count() FROM tab WHERE hasAnyTokens(message, ['Hello', 'World']) +) +WHERE explain LIKE '%Description:%' OR explain LIKE '%Parts:%' OR explain LIKE '%Granules:%' +LIMIT 2, 3; + +SELECT 'Text index should choose all granules'; +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes=1 + SELECT count() FROM tab WHERE hasAnyTokens(message, ['Hello', 'ClickHouse']) +) +WHERE explain LIKE '%Description:%' OR explain LIKE '%Parts:%' OR explain LIKE '%Granules:%' +LIMIT 2, 3; + +SELECT 'Text index should choose 25% of granules'; +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes=1 + SELECT count() FROM tab WHERE hasAllTokens(message, ['Hello', 'World']) +) +WHERE explain LIKE '%Description:%' OR explain LIKE '%Parts:%' OR explain LIKE '%Granules:%' +LIMIT 2, 3; + +DROP TABLE tab; + +CREATE TABLE tab +( + id UInt8, + s FixedString(11) +) +ENGINE = MergeTree +ORDER BY id; + +INSERT INTO tab VALUES (1, 'hello world'), (2, 'goodbye'), (3, 'hello moon'); + +SELECT 'Test hasAnyTokens and hasAllTokens on a non-indexed FixedString column'; + +-- { echoOn } +SELECT id FROM tab WHERE hasAnyTokens(s, ['hello']) ORDER BY id; +SELECT id FROM tab WHERE hasAnyTokens(s, ['moon', 'goodbye']) ORDER BY id; +SELECT id FROM tab WHERE hasAnyTokens(s, ['unknown', 'goodbye']) ORDER BY id; + +SELECT id FROM tab WHERE hasAnyTokens(s, 'hello') ORDER BY id; +SELECT id FROM tab WHERE hasAnyTokens(s, 'moon goodbye') ORDER BY id; +SELECT id FROM tab WHERE hasAnyTokens(s, 'unknown goodbye') ORDER BY id; + +SELECT id FROM tab WHERE hasAllTokens(s, ['hello', 'world']) ORDER BY id; +SELECT id FROM tab WHERE hasAllTokens(s, ['goodbye']) ORDER BY id; +SELECT id FROM tab WHERE hasAllTokens(s, ['hello', 'moon']) ORDER BY id; +SELECT id FROM tab WHERE hasAllTokens(s, ['hello', 'unknown']) ORDER BY id; + +SELECT id FROM tab WHERE hasAllTokens(s, 'hello world') ORDER BY id; +SELECT id FROM tab WHERE hasAllTokens(s, 'goodbye') ORDER BY id; +SELECT id FROM tab WHERE hasAllTokens(s, 'hello moon') ORDER BY id; +SELECT id FROM tab WHERE hasAllTokens(s, 'hello unknown') ORDER BY id; +-- { echoOff } + +DROP TABLE IF EXISTS tab; diff --git a/parser/testdata/02346_text_index_function_hasAnyAllTokens_partially_materialized/ast.json b/parser/testdata/02346_text_index_function_hasAnyAllTokens_partially_materialized/ast.json new file mode 100644 index 000000000..5fd43c4f3 --- /dev/null +++ b/parser/testdata/02346_text_index_function_hasAnyAllTokens_partially_materialized/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001252348, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02346_text_index_function_hasAnyAllTokens_partially_materialized/metadata.json b/parser/testdata/02346_text_index_function_hasAnyAllTokens_partially_materialized/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02346_text_index_function_hasAnyAllTokens_partially_materialized/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02346_text_index_function_hasAnyAllTokens_partially_materialized/query.sql b/parser/testdata/02346_text_index_function_hasAnyAllTokens_partially_materialized/query.sql new file mode 100644 index 000000000..6684e4267 --- /dev/null +++ b/parser/testdata/02346_text_index_function_hasAnyAllTokens_partially_materialized/query.sql @@ -0,0 +1,110 @@ +SET allow_experimental_full_text_index = 1; +SET parallel_replicas_local_plan = 1; -- this setting may skip index analysis when false +SET use_skip_indexes_on_data_read = 0; +SET mutations_sync = 2; -- want synchronous materialize + +-- In this test we make sure that text search functions hasAny/AllTokens work correctly for +-- tables in which only some parts have a materialized index. The expected behavior is that +-- the search acts the same as if the whole column was indexed, but of course an inefficient +-- brute force search is used for the un-indexed rows. Furthermore we test that the same +-- tokenizer used to create the index is used for the un-indexed parts as is specified in +-- the index. + +DROP TABLE IF EXISTS tab; +CREATE TABLE tab +( + id UInt32, + message String, +) +ENGINE = MergeTree +ORDER BY (id) SETTINGS index_granularity = 2; + +DROP VIEW IF EXISTS explain_indexes; +CREATE VIEW explain_indexes +AS SELECT trimLeft(explain) AS explain +FROM +( + SELECT * + FROM viewExplain('EXPLAIN', 'indexes = 1', ( + SELECT groupArray(id) FROM tab WHERE hasAnyTokens(message, ['def']) + )) +) +WHERE (explain LIKE '%Name%') OR (explain LIKE '%Description%') OR (explain LIKE '%Parts%') OR (explain LIKE '%Granules%') OR (explain LIKE '%Range%'); + +SYSTEM STOP MERGES tab; + +-- bar# will be parsed differently by different tokenizers, e.g. splitbyString -> 'bar#'; splitByNonAlpha -> 'bar' +-- we want to test that the search functions will use the same tokenizer on un-materialized column parts +INSERT INTO tab(id, message) +VALUES + (1, 'abc def foo'), + (2, 'abc def bar#'), + (3, 'abc baz foo'); + +ALTER TABLE tab ADD INDEX idx(`message`) TYPE text(tokenizer = 'splitByString') GRANULARITY 1; + +INSERT INTO tab(id, message) +VALUES + (4, 'abc baz bar'), + (5, 'abc zzz foo'), + (6, 'abc zzz bar'); + +-- { echoOn } +SELECT * FROM explain_indexes; + +-- +--hasAnyTokens: +SELECT arraySort(groupArray(id)) FROM tab WHERE hasAnyTokens(message, ['foo']); +SELECT arraySort(groupArray(id)) FROM tab WHERE hasAnyTokens(message, ['bar']); +SELECT arraySort(groupArray(id)) FROM tab WHERE hasAnyTokens(message, ['foo', 'bar']); +SELECT arraySort(groupArray(id)) FROM tab WHERE hasAnyTokens(message, ['foo', 'ba']); +SELECT arraySort(groupArray(id)) FROM tab WHERE hasAnyTokens(message, ['fo', 'ba']); + +-- +--hasAllTokens: +SELECT arraySort(groupArray(id)) FROM tab WHERE hasAllTokens(message, ['foo']); +SELECT arraySort(groupArray(id)) FROM tab WHERE hasAllTokens(message, ['bar']); +SELECT arraySort(groupArray(id)) FROM tab WHERE hasAllTokens(message, ['foo', 'bar']); +SELECT arraySort(groupArray(id)) FROM tab WHERE hasAllTokens(message, ['abc', 'fo']); +-- { echoOff } + +DROP TABLE tab; + +CREATE TABLE tab +( + id UInt32, + message String, +) +ENGINE = MergeTree +ORDER BY (id) SETTINGS index_granularity = 2; + +INSERT INTO tab(id, message) +VALUES + (1, 'abc def foo'), + (2, 'abc def bar#'), + (3, 'abc baz foo'); + +ALTER TABLE tab ADD INDEX idx(`message`) TYPE text(tokenizer = 'splitByNonAlpha') GRANULARITY 1; + +INSERT INTO tab(id, message) +VALUES + (4, 'abc baz bar'), + (5, 'abc zzz foo'), + (6, 'abc zzz bar'); + +-- { echoOn } +SELECT * FROM explain_indexes; + +-- +-- Test that splitByNonAlpha tokenizer is applied even to column part which does not have a materialized text index +SELECT arraySort(groupArray(id)) FROM tab WHERE hasAnyTokens(message, ['bar']); +SELECT arraySort(groupArray(id)) FROM tab WHERE hasAnyTokens(message, ['bar$']); -- test default tokenizer +SELECT arraySort(groupArray(id)) FROM tab WHERE hasAnyTokens(message, tokens('bar$', 'splitByNonAlpha')); +SELECT arraySort(groupArray(id)) FROM tab WHERE hasAllTokens(message, ['bar']); +SELECT arraySort(groupArray(id)) FROM tab WHERE hasAllTokens(message, ['bar$']); -- test default tokenizer +SELECT arraySort(groupArray(id)) FROM tab WHERE hasAllTokens(message, tokens('bar$', 'splitByNonAlpha')); +-- { echoOff } + +DROP TABLE tab; +DROP VIEW explain_indexes; + diff --git a/parser/testdata/02346_text_index_functions_with_empty_needle/ast.json b/parser/testdata/02346_text_index_functions_with_empty_needle/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02346_text_index_functions_with_empty_needle/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02346_text_index_functions_with_empty_needle/metadata.json b/parser/testdata/02346_text_index_functions_with_empty_needle/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02346_text_index_functions_with_empty_needle/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02346_text_index_functions_with_empty_needle/query.sql b/parser/testdata/02346_text_index_functions_with_empty_needle/query.sql new file mode 100644 index 000000000..f7c79a775 --- /dev/null +++ b/parser/testdata/02346_text_index_functions_with_empty_needle/query.sql @@ -0,0 +1,30 @@ +-- Test the behavior of text index functions with empty needle +-- They should not match anything + +-- In search{All,Any} empty needle is different from empty list: +-- See: 02346_text_index_bug86300 + +SET allow_experimental_full_text_index = 1; + +DROP TABLE IF EXISTS tab; +CREATE TABLE tab ( + id Int, + text String, + INDEX idx_text(text) TYPE text(tokenizer = 'splitByNonAlpha') +) +ENGINE = MergeTree() +ORDER BY (id); + +INSERT INTO tab VALUES(1, 'bar'), (2, 'foo'); + +SELECT '-- Plain text index search functions'; +SELECT count() FROM tab WHERE hasAnyTokens(text, ['']); +SELECT count() FROM tab WHERE hasAllTokens(text, ['']); +SELECT count() FROM tab WHERE hasToken(text, ''); + +SELECT '-- Negated text index search functions'; +SELECT count() FROM tab WHERE NOT hasAnyTokens(text, ['']); +SELECT count() FROM tab WHERE NOT hasAllTokens(text, ['']); +SELECT count() FROM tab WHERE NOT hasToken(text, ''); + +DROP TABLE tab; diff --git a/parser/testdata/02346_text_index_header_cache/ast.json b/parser/testdata/02346_text_index_header_cache/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02346_text_index_header_cache/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02346_text_index_header_cache/metadata.json b/parser/testdata/02346_text_index_header_cache/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02346_text_index_header_cache/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02346_text_index_header_cache/query.sql b/parser/testdata/02346_text_index_header_cache/query.sql new file mode 100644 index 000000000..cff1c8607 --- /dev/null +++ b/parser/testdata/02346_text_index_header_cache/query.sql @@ -0,0 +1,87 @@ +-- Tags: no-parallel, no-parallel-replicas +-- no-parallel: looks at server-wide metrics + +--- These tests verify the caching of a deserialized text index header in the consecutive executions. + +SET enable_analyzer = 1; +SET allow_experimental_full_text_index = 1; +SET use_skip_indexes_on_data_read = 1; +SET query_plan_direct_read_from_text_index = 1; +SET use_text_index_header_cache = 1; +SET log_queries = 1; +SET max_rows_to_read = 0; + +DROP TABLE IF EXISTS tab; +CREATE TABLE tab +( + id UInt32, + message String, + INDEX idx(message) TYPE text(tokenizer = array, dictionary_block_size = 128) GRANULARITY 1 +) +ENGINE = MergeTree +ORDER BY (id) +SETTINGS index_granularity = 128; + +--- The text index would have 4 parts/marks. +INSERT INTO tab +SELECT + number, + concat('text_', leftPad(toString(number), 3, '0')) +FROM numbers(512); + +SYSTEM STOP MERGES tab; + +DROP VIEW IF EXISTS text_index_cache_stats; +CREATE VIEW text_index_cache_stats AS ( + SELECT + concat('cache_hits = ', toString(ProfileEvents['TextIndexHeaderCacheHits']), ', cache_misses = ', toString(ProfileEvents['TextIndexHeaderCacheMisses'])) + FROM system.query_log + WHERE query_kind ='Select' + AND current_database = currentDatabase() + AND endsWith(trimRight(query), concat('hasAnyTokens(message, \'', {filter:String}, '\');')) + AND type='QueryFinish' + LIMIT 1 +); + +SELECT '--- cache miss on the first run.'; +SELECT count() FROM tab WHERE hasAnyTokens(message, 'text_000'); + +SYSTEM FLUSH LOGS query_log; +SELECT * FROM text_index_cache_stats(filter = 'text_000'); + +SELECT '--- cache hit on the second run.'; +SELECT count() FROM tab WHERE hasAnyTokens(message, 'text_511'); + +SYSTEM FLUSH LOGS query_log; +SELECT * FROM text_index_cache_stats(filter = 'text_511'); + +SELECT '--- no profile events when cache is disabled.'; + +SET use_text_index_header_cache = 0; + +SELECT count() FROM tab WHERE hasAnyTokens(message, 'text_255'); + +SET use_text_index_header_cache = 1; + +SYSTEM FLUSH LOGS query_log; +SELECT * FROM text_index_cache_stats(filter = 'text_255'); + +SELECT 'Clear text index header cache'; + +SYSTEM DROP TEXT INDEX HEADER CACHE; + +SELECT '--- cache miss on the first run.'; +SELECT count() FROM tab WHERE hasAnyTokens(message, 'text_001'); + +SYSTEM FLUSH LOGS query_log; +SELECT * FROM text_index_cache_stats(filter = 'text_001'); + +SELECT '--- cache hit on the second run.'; +SELECT count() FROM tab WHERE hasAnyTokens(message, 'text_510'); + +SYSTEM FLUSH LOGS query_log; +SELECT * FROM text_index_cache_stats(filter = 'text_510'); + +SYSTEM DROP TEXT INDEX HEADER CACHE; +DROP VIEW text_index_cache_stats; +DROP TABLE tab; diff --git a/parser/testdata/02346_text_index_hits/ast.json b/parser/testdata/02346_text_index_hits/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02346_text_index_hits/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02346_text_index_hits/metadata.json b/parser/testdata/02346_text_index_hits/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02346_text_index_hits/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02346_text_index_hits/query.sql b/parser/testdata/02346_text_index_hits/query.sql new file mode 100644 index 000000000..bd6de0e67 --- /dev/null +++ b/parser/testdata/02346_text_index_hits/query.sql @@ -0,0 +1,116 @@ +-- Tags: stateful, long, no-parallel, no-asan, no-tsan, no-ubsan, no-msan +-- no-*san: too long. + +DROP TABLE IF EXISTS hits_text; + +CREATE TABLE hits_text +( + `CounterID` UInt32, + `EventDate` Date, + `UserID` UInt32, + `SearchPhrase` String, + `URL` String +) +ENGINE = MergeTree +ORDER BY (CounterID, EventDate); + +SET allow_experimental_full_text_index = 1; +SET use_query_condition_cache = 0; + +ALTER TABLE hits_text ADD INDEX idx_search_phrase SearchPhrase TYPE text(tokenizer = 'splitByNonAlpha') GRANULARITY 8; +ALTER TABLE hits_text ADD INDEX idx_url URL TYPE text(tokenizer = 'splitByNonAlpha') GRANULARITY 8; + +SET max_insert_threads = 4; +INSERT INTO hits_text SELECT CounterID, EventDate, UserID,SearchPhrase, URL FROM test.hits; + +SELECT 'hasToken reference without index'; + +SET use_skip_indexes = 0; +SET use_skip_indexes_on_data_read = 0; +SET force_data_skipping_indices = ''; + +SELECT 'idx_search_phrase'; + +SELECT count() FROM hits_text WHERE hasToken(SearchPhrase, 'video'); +SELECT count() FROM hits_text WHERE hasToken(SearchPhrase, 'google'); +SELECT count() FROM hits_text WHERE hasToken(SearchPhrase, 'market'); +SELECT count() FROM hits_text WHERE hasToken(SearchPhrase, 'world'); +SELECT count() FROM hits_text WHERE hasToken(SearchPhrase, 'mail'); +SELECT count() FROM hits_text WHERE hasToken(SearchPhrase, 'amazon'); +SELECT uniqExact(UserID) FROM hits_text WHERE hasToken(SearchPhrase, 'anime'); + +SELECT 'idx_url'; + +SELECT count() FROM hits_text WHERE hasToken(URL, 'com'); +SELECT count() FROM hits_text WHERE hasToken(URL, 'com') AND hasToken(URL, 'mail'); +SELECT count() FROM hits_text WHERE hasToken(URL, 'com') AND NOT hasToken(URL, 'mail'); +SELECT count() FROM hits_text WHERE hasToken(URL, 'http'); +SELECT count() FROM hits_text WHERE hasToken(URL, 'https'); +SELECT count() FROM hits_text WHERE hasToken(URL, 'https') AND CounterID = 33290414; +SELECT count() FROM hits_text WHERE (hasToken(URL, 'https') OR UserID = 7541501) AND CounterID = 33290414; + +SELECT 'idx_search_phrase,idx_url'; + +SELECT uniqExact(UserID), min(EventDate), max(EventDate) FROM hits_text WHERE hasToken(URL, 'https') AND hasToken(SearchPhrase, 'video'); +SELECT count() FROM hits_text WHERE hasToken(URL, 'auto') AND hasToken(SearchPhrase, 'bmw'); + +SELECT 'hasToken direct read from index'; + +SET use_skip_indexes = 1; +SET use_skip_indexes_on_data_read = 1; + +SELECT 'idx_search_phrase'; +SET force_data_skipping_indices = 'idx_search_phrase'; + +SELECT count() FROM hits_text WHERE hasToken(SearchPhrase, 'video'); +SELECT count() FROM hits_text WHERE hasToken(SearchPhrase, 'google'); +SELECT count() FROM hits_text WHERE hasToken(SearchPhrase, 'market'); +SELECT count() FROM hits_text WHERE hasToken(SearchPhrase, 'world'); +SELECT count() FROM hits_text WHERE hasToken(SearchPhrase, 'mail'); +SELECT count() FROM hits_text WHERE hasToken(SearchPhrase, 'amazon'); +SELECT uniqExact(UserID) FROM hits_text WHERE hasToken(SearchPhrase, 'anime'); + +SELECT 'idx_url'; +SET force_data_skipping_indices = 'idx_url'; + +SELECT count() FROM hits_text WHERE hasToken(URL, 'com'); +SELECT count() FROM hits_text WHERE hasToken(URL, 'com') AND hasToken(URL, 'mail'); +SELECT count() FROM hits_text WHERE hasToken(URL, 'com') AND NOT hasToken(URL, 'mail'); +SELECT count() FROM hits_text WHERE hasToken(URL, 'http'); +SELECT count() FROM hits_text WHERE hasToken(URL, 'https'); +SELECT count() FROM hits_text WHERE hasToken(URL, 'https') AND CounterID = 33290414; +SELECT count() FROM hits_text WHERE (hasToken(URL, 'https') OR UserID = 7541501) AND CounterID = 33290414; + +SELECT 'idx_search_phrase,idx_url'; +SET force_data_skipping_indices = 'idx_search_phrase,idx_url'; + +SELECT uniqExact(UserID), min(EventDate), max(EventDate) FROM hits_text WHERE hasToken(URL, 'https') AND hasToken(SearchPhrase, 'video'); +SELECT count() FROM hits_text WHERE hasToken(URL, 'auto') AND hasToken(SearchPhrase, 'bmw'); + +SELECT 'hasAnyTokens/hasAllTokens reference without direct read from index'; + +SET use_skip_indexes = 1; +SET use_skip_indexes_on_data_read = 0; +SET force_data_skipping_indices = 'idx_url'; + +SELECT count() FROM hits_text WHERE hasAnyTokens(URL, ['https', 'http']); +SELECT count() FROM hits_text WHERE hasAllTokens(URL, ['com', 'mail']); +SELECT count() FROM hits_text WHERE hasAllTokens(URL, ['com', 'mail']) AND NOT hasToken(URL, 'http'); +SELECT count() FROM hits_text WHERE hasAnyTokens(URL, ['facebook', 'twitter']); +SELECT count() FROM hits_text WHERE hasToken(URL, 'auto') AND hasAnyTokens(SearchPhrase, ['bmw', 'audi', 'toyota']); +SELECT count() FROM hits_text WHERE hasAnyTokens(URL, ['market', 'shop']) OR hasAnyTokens(SearchPhrase, ['market', 'shop']); + +SELECT 'hasAnyTokens/hasAllTokens direct read from index'; + +SET use_skip_indexes = 1; +SET use_skip_indexes_on_data_read = 1; +SET force_data_skipping_indices = 'idx_url'; + +SELECT count() FROM hits_text WHERE hasAnyTokens(URL, ['https', 'http']); +SELECT count() FROM hits_text WHERE hasAllTokens(URL, ['com', 'mail']); +SELECT count() FROM hits_text WHERE hasAllTokens(URL, ['com', 'mail']) AND NOT hasToken(URL, 'http'); +SELECT count() FROM hits_text WHERE hasAnyTokens(URL, ['facebook', 'twitter']); +SELECT count() FROM hits_text WHERE hasToken(URL, 'auto') AND hasAnyTokens(SearchPhrase, ['bmw', 'audi', 'toyota']); +SELECT count() FROM hits_text WHERE hasAnyTokens(URL, ['market', 'shop']) OR hasAnyTokens(SearchPhrase, ['market', 'shop']); + +DROP TABLE hits_text; diff --git a/parser/testdata/02346_text_index_map_support/ast.json b/parser/testdata/02346_text_index_map_support/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02346_text_index_map_support/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02346_text_index_map_support/metadata.json b/parser/testdata/02346_text_index_map_support/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02346_text_index_map_support/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02346_text_index_map_support/query.sql b/parser/testdata/02346_text_index_map_support/query.sql new file mode 100644 index 000000000..24d6e4890 --- /dev/null +++ b/parser/testdata/02346_text_index_map_support/query.sql @@ -0,0 +1,554 @@ +-- Tags: no-parallel-replicas + +-- Tests that text indexes can be build on and used with Map columns. + +SET enable_analyzer = 1; +SET allow_experimental_full_text_index = 1; + +SELECT 'Function mapKeys'; + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab +( + id UInt32, + map Map(String, String), + map_fixed Map(FixedString(2), String), + INDEX map_keys_idx mapKeys(map) TYPE text(tokenizer = 'splitByNonAlpha'), + INDEX map_fixed_keys_idx mapKeys(map_fixed) TYPE text(tokenizer = 'splitByNonAlpha'), +) +ENGINE = MergeTree +ORDER BY (id); + +INSERT INTO tab VALUES (0, {'K0':'V0', 'K1':'V1'}, {'K0':'V0', 'K1':'V1'}); +INSERT INTO tab VALUES (1, {'K1':'V1', 'K2':'V2'}, {'K1':'V1', 'K2':'V2'}); + +SELECT '-- mapContains support'; + +SELECT '-- -- query with String'; + +SELECT count() FROM tab WHERE mapContains(map, 'K0'); +SELECT count() FROM tab WHERE mapContains(map, 'K1'); +SELECT count() FROM tab WHERE mapContains(map, 'K2'); +SELECT count() FROM tab WHERE mapContains(map, 'K3'); + +SELECT '-- -- query with FixedString'; + +SELECT count() FROM tab WHERE mapContains(map_fixed, toFixedString('K0', 2)); +SELECT count() FROM tab WHERE mapContains(map_fixed, toFixedString('K1', 2)); +SELECT count() FROM tab WHERE mapContains(map_fixed, toFixedString('K2', 2)); +SELECT count() FROM tab WHERE mapContains(map_fixed, toFixedString('K3', 2)); + +SELECT '-- -- Check that the text index actually gets used (String)'; + +DROP VIEW IF EXISTS explain_index_mapContains; +CREATE VIEW explain_index_mapContains AS ( + SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes = 1 + SELECT count() FROM tab WHERE ( + CASE + WHEN {use_idx_fixed:boolean} = 1 THEN mapContains(map_fixed, {filter:FixedString(2)}) + ELSE mapContains(map, {filter:String}) + END + ) + ) + WHERE explain LIKE '%Description:%' OR explain LIKE '%Parts:%' OR explain LIKE '%Granules:%' + LIMIT 2, 3 +); + +SELECT '-- -- -- key exists only in the first granule'; +SELECT * FROM explain_index_mapContains(use_idx_fixed = 0, filter = 'K0'); + +SELECT '-- -- -- key exists only in the second granule'; +SELECT * FROM explain_index_mapContains(use_idx_fixed = 0, filter = 'K2'); + +SELECT '-- -- -- key exists only in both granules'; +SELECT * FROM explain_index_mapContains(use_idx_fixed = 0, filter = 'K1'); + +SELECT '-- -- -- key does not exist in granules'; +SELECT * FROM explain_index_mapContains(use_idx_fixed = 0, filter = 'K3'); + +SELECT '-- -- Check that the text index actually gets used (FixedString)'; + +SELECT '-- -- -- key exists only in the first granule'; +SELECT * FROM explain_index_mapContains(use_idx_fixed = 1, filter = toFixedString('K0', 2)); + +SELECT '-- -- -- key exists only in the second granule'; +SELECT * FROM explain_index_mapContains(use_idx_fixed = 1, filter = toFixedString('K2', 2)); + +SELECT '-- -- -- key exists only in both granules'; +SELECT * FROM explain_index_mapContains(use_idx_fixed = 1, filter = toFixedString('K1', 2)); + +SELECT '-- -- -- key does not exist in granules'; +SELECT * FROM explain_index_mapContains(use_idx_fixed = 1, filter = toFixedString('K3', 2)); + +SELECT '-- operator[] support'; + +SELECT '-- -- query with String'; + +SELECT count() FROM tab WHERE map['K0'] = 'V0'; +SELECT count() FROM tab WHERE map['K1'] = 'V1'; +SELECT count() FROM tab WHERE map['K2'] = 'V2'; +SELECT count() FROM tab WHERE map['K3'] = 'V3'; + +SELECT '-- -- query with FixedString'; + +SELECT count() FROM tab WHERE map_fixed[toFixedString('K0', 2)] = 'V0'; +SELECT count() FROM tab WHERE map_fixed[toFixedString('K1', 2)] = 'V1'; +SELECT count() FROM tab WHERE map_fixed[toFixedString('K2', 2)] = 'V2'; +SELECT count() FROM tab WHERE map_fixed[toFixedString('K3', 2)] = 'V3'; + +SELECT '-- -- Check that the text index actually gets used (String)'; + +DROP VIEW IF EXISTS explain_index_equals; +CREATE VIEW explain_index_equals AS ( + SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes = 1 + SELECT count() FROM tab WHERE ( + CASE + WHEN {use_idx_fixed:boolean} = 1 THEN map_fixed[{filter:FixedString(2)}] = {value:String} + ELSE map[{filter:String}] = {value:String} + END + ) + ) + WHERE explain LIKE '%Description:%' OR explain LIKE '%Parts:%' OR explain LIKE '%Granules:%' + LIMIT 2, 3 +); + +SELECT '-- -- -- key exists only in the first granule'; +SELECT * FROM explain_index_equals(use_idx_fixed = 0, filter = 'K0', value = 'V3'); + +SELECT '-- -- -- key exists only in the second granule'; +SELECT * FROM explain_index_equals(use_idx_fixed = 0, filter = 'K2', value = 'V3'); + +SELECT '-- -- -- key exists only in both granules'; +SELECT * FROM explain_index_equals(use_idx_fixed = 0, filter = 'K1', value = 'V3'); + +SELECT '-- -- -- key does not exist in granules'; +SELECT * FROM explain_index_equals(use_idx_fixed = 0, filter = 'K3', value = 'V3'); + +SELECT '-- -- Check that the text index actually gets used (FixedString)'; + +SELECT '-- -- -- key exists only in the first granule'; +SELECT * FROM explain_index_equals(use_idx_fixed = 1, filter = 'K0', value = 'V3'); + +SELECT '-- -- -- key exists only in the second granule'; +SELECT * FROM explain_index_equals(use_idx_fixed = 1, filter = 'K2', value = 'V3'); + +SELECT '-- -- -- key exists only in both granules'; +SELECT * FROM explain_index_equals(use_idx_fixed = 1, filter = 'K1', value = 'V3'); + +SELECT '-- -- -- key does not exist in granules'; +SELECT * FROM explain_index_equals(use_idx_fixed = 1, filter = 'K3', value = 'V3'); + +SELECT '-- has support'; + +SELECT '-- -- query with String'; + +SELECT count() FROM tab WHERE has(map, 'K0'); +SELECT count() FROM tab WHERE has(map, 'K1'); +SELECT count() FROM tab WHERE has(map, 'K2'); +SELECT count() FROM tab WHERE has(map, 'K3'); + +SELECT '-- -- query with FixedString'; + +SELECT count() FROM tab WHERE has(map_fixed, toFixedString('K0', 2)); +SELECT count() FROM tab WHERE has(map_fixed, toFixedString('K1', 2)); +SELECT count() FROM tab WHERE has(map_fixed, toFixedString('K2', 2)); +SELECT count() FROM tab WHERE has(map_fixed, toFixedString('K3', 2)); + +SELECT '-- -- Check that the text index actually gets used (String)'; + +DROP VIEW IF EXISTS explain_index_has; +CREATE VIEW explain_index_has AS ( + SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes = 1 + SELECT count() FROM tab WHERE ( + CASE + WHEN {use_idx_fixed:boolean} = 1 THEN has(map_fixed, {filter:FixedString(2)}) + ELSE has(map, {filter:String}) + END + ) + ) + WHERE explain LIKE '%Description:%' OR explain LIKE '%Parts:%' OR explain LIKE '%Granules:%' + LIMIT 2, 3 +); + +SELECT '-- -- -- key exists only in the first granule'; +SELECT * FROM explain_index_has(use_idx_fixed = 0, filter = 'K0'); + +SELECT '-- -- -- key exists only in the second granule'; +SELECT * FROM explain_index_has(use_idx_fixed = 0, filter = 'K2'); + +SELECT '-- -- -- key exists only in both granules'; +SELECT * FROM explain_index_has(use_idx_fixed = 0, filter = 'K1'); + +SELECT '-- -- -- key does not exist in granules'; +SELECT * FROM explain_index_has(use_idx_fixed = 0, filter = 'K3'); + +SELECT '-- -- Check that the text index actually gets used (FixedString)'; + +SELECT '-- -- -- key exists only in the first granule'; +SELECT * FROM explain_index_has(use_idx_fixed = 1, filter = toFixedString('K0', 2)); + +SELECT '-- -- -- key exists only in the second granule'; +SELECT * FROM explain_index_has(use_idx_fixed = 1, filter = toFixedString('K2', 2)); + +SELECT '-- -- -- key exists only in both granules'; +SELECT * FROM explain_index_has(use_idx_fixed = 1, filter = toFixedString('K1', 2)); + +SELECT '-- -- -- key does not exist in granules'; +SELECT * FROM explain_index_has(use_idx_fixed = 1, filter = toFixedString('K3', 2)); + +SELECT '-- hasAnyTokens support'; + +SELECT '-- -- query with String'; + +SELECT count() FROM tab WHERE hasAnyTokens(mapKeys(map), 'K0 K1'); +SELECT count() FROM tab WHERE hasAnyTokens(mapKeys(map), 'K1 K2'); +SELECT count() FROM tab WHERE hasAnyTokens(mapKeys(map), 'K2 K3'); +SELECT count() FROM tab WHERE hasAnyTokens(mapKeys(map), 'K3 K4'); + +SELECT '-- -- query with FixedString'; + +SELECT count() FROM tab WHERE hasAnyTokens(mapKeys(map_fixed), 'K0 K1'); +SELECT count() FROM tab WHERE hasAnyTokens(mapKeys(map_fixed), 'K1 K2'); +SELECT count() FROM tab WHERE hasAnyTokens(mapKeys(map_fixed), 'K2 K3'); +SELECT count() FROM tab WHERE hasAnyTokens(mapKeys(map_fixed), 'K3 K4'); + +DROP VIEW IF EXISTS explain_index_has_any_tokens; +CREATE VIEW explain_index_has_any_tokens AS ( + SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes=1 + SELECT count() FROM tab WHERE ( + CASE + WHEN {use_idx_fixed:boolean} = 1 THEN hasAnyTokens(mapKeys(map_fixed), {filter:String}) + ELSE hasAnyTokens(mapKeys(map), {filter:String}) + END + ) + ) + WHERE explain LIKE '%Description:%' OR explain LIKE '%Parts:%' OR explain LIKE '%Granules:%' + LIMIT 2, 3 +); + +SELECT '-- -- Check that the text index actually gets used (String)'; + +SELECT '-- -- -- keys exist in both granules'; +SELECT * FROM explain_index_has_any_tokens(use_idx_fixed = 0, filter = 'K0 K1'); + +SELECT '-- -- -- keys exist in both granules'; +SELECT * FROM explain_index_has_any_tokens(use_idx_fixed = 0, filter = 'K1 K2'); + +SELECT '-- -- -- keys exist only in the first granule'; +SELECT * FROM explain_index_has_any_tokens(use_idx_fixed = 0, filter = 'K2 K3'); + +SELECT '-- -- -- keys do not exist in granules'; +SELECT * FROM explain_index_has_any_tokens(use_idx_fixed = 0, filter = 'K3 K4'); + +SELECT '-- -- Check that the text index actually gets used (FixedString)'; + +SELECT '-- -- -- keys exist in both granules'; +SELECT * FROM explain_index_has_any_tokens(use_idx_fixed = 1, filter = 'K0 K1'); + +SELECT '-- -- -- keys exist in both granules'; +SELECT * FROM explain_index_has_any_tokens(use_idx_fixed = 1, filter = 'K1 K2'); + +SELECT '-- -- -- keys exist only in the first granule'; +SELECT * FROM explain_index_has_any_tokens(use_idx_fixed = 1, filter = 'K2 K3'); + +SELECT '-- -- -- keys do not exist in granules'; +SELECT * FROM explain_index_has_any_tokens(use_idx_fixed = 1, filter = 'K3 K4'); + +SELECT '-- hasAllTokens support'; + +SELECT '-- -- query with String'; + +SELECT count() FROM tab WHERE hasAllTokens(mapKeys(map), 'K0 K1'); +SELECT count() FROM tab WHERE hasAllTokens(mapKeys(map), 'K1 K2'); +SELECT count() FROM tab WHERE hasAllTokens(mapKeys(map), 'K2 K3'); +SELECT count() FROM tab WHERE hasAllTokens(mapKeys(map), 'K3 K4'); + +SELECT '-- -- query with FixedString'; + +SELECT count() FROM tab WHERE hasAllTokens(mapKeys(map_fixed), 'K0 K1'); +SELECT count() FROM tab WHERE hasAllTokens(mapKeys(map_fixed), 'K1 K2'); +SELECT count() FROM tab WHERE hasAllTokens(mapKeys(map_fixed), 'K2 K3'); +SELECT count() FROM tab WHERE hasAllTokens(mapKeys(map_fixed), 'K3 K4'); + +DROP VIEW IF EXISTS explain_index_has_all_tokens; +CREATE VIEW explain_index_has_all_tokens AS ( + SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes=1 + SELECT count() FROM tab WHERE ( + CASE + WHEN {use_idx_fixed:boolean} = 1 THEN hasAllTokens(mapKeys(map_fixed), {filter:String}) + ELSE hasAllTokens(mapKeys(map), {filter:String}) + END + ) + ) + WHERE explain LIKE '%Description:%' OR explain LIKE '%Parts:%' OR explain LIKE '%Granules:%' + LIMIT 2, 3 +); + +SELECT '-- -- Check that the text index actually gets used (String)'; + +SELECT '-- -- -- keys exist in the first granule'; +SELECT * FROM explain_index_has_all_tokens(use_idx_fixed = 0, filter = 'K0 K1'); + +SELECT '-- -- -- keys exist in the second granule'; +SELECT * FROM explain_index_has_all_tokens(use_idx_fixed = 0, filter = 'K1 K2'); + +SELECT '-- -- -- keys do not exist in granules'; +SELECT * FROM explain_index_has_all_tokens(use_idx_fixed = 0, filter = 'K2 K3'); + +SELECT '-- -- -- keys do not exist in granules'; +SELECT * FROM explain_index_has_all_tokens(use_idx_fixed = 0, filter = 'K3 K4'); + +SELECT '-- -- Check that the text index actually gets used (FixedString)'; + +SELECT '-- -- -- keys exist in the first granule'; +SELECT * FROM explain_index_has_all_tokens(use_idx_fixed = 1, filter = 'K0 K1'); + +SELECT '-- -- -- keys exist in the second granule'; +SELECT * FROM explain_index_has_all_tokens(use_idx_fixed = 1, filter = 'K1 K2'); + +SELECT '-- -- -- keys do not exist in granules'; +SELECT * FROM explain_index_has_all_tokens(use_idx_fixed = 1, filter = 'K2 K3'); + +SELECT '-- -- -- keys do not exist in granules'; +SELECT * FROM explain_index_has_all_tokens(use_idx_fixed = 1, filter = 'K3 K4'); + +DROP VIEW explain_index_has_any_tokens; +DROP VIEW explain_index_has_all_tokens; + +SELECT 'Function mapValues'; + +DROP TABLE tab; + +CREATE TABLE tab +( + id UInt32, + map Map(String, String), + map_fixed Map(String, FixedString(2)), + INDEX map_values_idx mapValues(map) TYPE text(tokenizer = 'splitByNonAlpha'), + INDEX map_fixed_values_idx mapValues(map_fixed) TYPE text(tokenizer = 'splitByNonAlpha'), +) +ENGINE = MergeTree +ORDER BY (id); + +INSERT INTO tab VALUES (0, {'K0':'V0', 'K1':'V1'}, {'K0':'V0', 'K1':'V1'}); +INSERT INTO tab VALUES (1, {'K1':'V1', 'K2':'V2'}, {'K1':'V1', 'K2':'V2'}); + +SELECT '-- operator[] support'; + +SELECT '-- -- query with String'; + +SELECT count() FROM tab WHERE map['K0'] = 'V0'; +SELECT count() FROM tab WHERE map['K1'] = 'V1'; +SELECT count() FROM tab WHERE map['K2'] = 'V2'; +SELECT count() FROM tab WHERE map['K3'] = 'V3'; + +SELECT '-- -- query with FixedString'; + +SELECT count() FROM tab WHERE map_fixed['K0'] = toFixedString('V0', 2); +SELECT count() FROM tab WHERE map_fixed['K1'] = toFixedString('V1', 2); +SELECT count() FROM tab WHERE map_fixed['K2'] = toFixedString('V2', 2); +SELECT count() FROM tab WHERE map_fixed['K3'] = toFixedString('V3', 2); + +SELECT '-- -- Check that the text index actually gets used (String)'; + +SELECT '-- -- -- key exists only in the first granule'; +SELECT * FROM explain_index_equals(use_idx_fixed = 0, filter = 'K3', value = 'V0'); + +SELECT '-- -- -- key exists only in the second granule'; +SELECT * FROM explain_index_equals(use_idx_fixed = 0, filter = 'K3', value = 'V2'); + +SELECT '-- -- -- key exists only in both granules'; +SELECT * FROM explain_index_equals(use_idx_fixed = 0, filter = 'K3', value = 'V1'); + +SELECT '-- -- -- key does not exist in granules'; +SELECT * FROM explain_index_equals(use_idx_fixed = 0, filter = 'K3', value = 'V3'); + +SELECT '-- -- Check that the text index actually gets used (FixedString)'; + +SELECT '-- -- -- key exists only in the first granule'; +SELECT * FROM explain_index_equals(use_idx_fixed = 1, filter = 'K3', value = toFixedString('V0', 2)); + +SELECT '-- -- -- key exists only in the second granule'; +SELECT * FROM explain_index_equals(use_idx_fixed = 1, filter = 'K3', value = toFixedString('V2', 2)); + +SELECT '-- -- -- key exists only in both granules'; +SELECT * FROM explain_index_equals(use_idx_fixed = 1, filter = 'K3', value = toFixedString('V1', 2)); + +SELECT '-- -- -- key does not exist in granules'; +SELECT * FROM explain_index_equals(use_idx_fixed = 1, filter = 'K3', value = toFixedString('V3', 2)); + +DROP VIEW IF EXISTS explain_index_has; +CREATE VIEW explain_index_has AS ( + SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes=1 + SELECT count() FROM tab WHERE ( + CASE + WHEN {use_idx_fixed:boolean} = 1 THEN has(mapValues(map_fixed), {filter:FixedString(2)}) + ELSE has(mapValues(map), {filter:String}) + END + ) + ) + WHERE explain LIKE '%Description:%' OR explain LIKE '%Parts:%' OR explain LIKE '%Granules:%' + LIMIT 2, 3 +); + +SELECT '-- has support'; + +SELECT '-- -- Check that the text index actually gets used (String)'; + +SELECT '-- -- -- key exists only in the first granule'; +SELECT * FROM explain_index_has(use_idx_fixed = 0, filter = 'V0'); + +SELECT '-- -- -- key exists only in the second granule'; +SELECT * FROM explain_index_has(use_idx_fixed = 0, filter = 'V2'); + +SELECT '-- -- -- key exists only in both granules'; +SELECT * FROM explain_index_has(use_idx_fixed = 0, filter = 'V1'); + +SELECT '-- -- -- key does not exist in granules'; +SELECT * FROM explain_index_has(use_idx_fixed = 0, filter = 'V3'); + +SELECT '-- -- Check that the text index actually gets used (FixedString)'; + +SELECT '-- -- -- key exists only in the first granule'; +SELECT * FROM explain_index_has(use_idx_fixed = 1, filter = 'V0'); + +SELECT '-- -- -- key exists only in the second granule'; +SELECT * FROM explain_index_has(use_idx_fixed = 1, filter = 'V2'); + +SELECT '-- -- -- key exists only in both granules'; +SELECT * FROM explain_index_has(use_idx_fixed = 1, filter = 'V1'); + +SELECT '-- -- -- key does not exist in granules'; +SELECT * FROM explain_index_has(use_idx_fixed = 1, filter = 'V3'); + +SELECT '-- hasAnyTokens support'; + +SELECT '-- -- query with String'; + +SELECT count() FROM tab WHERE hasAnyTokens(mapValues(map), 'V0 V1'); +SELECT count() FROM tab WHERE hasAnyTokens(mapValues(map), 'V1 V2'); +SELECT count() FROM tab WHERE hasAnyTokens(mapValues(map), 'V2 V3'); +SELECT count() FROM tab WHERE hasAnyTokens(mapValues(map), 'V3 V4'); + +SELECT '-- -- query with FixedString'; + +SELECT count() FROM tab WHERE hasAnyTokens(mapValues(map_fixed), 'V0 V1'); +SELECT count() FROM tab WHERE hasAnyTokens(mapValues(map_fixed), 'V1 V2'); +SELECT count() FROM tab WHERE hasAnyTokens(mapValues(map_fixed), 'V2 V3'); +SELECT count() FROM tab WHERE hasAnyTokens(mapValues(map_fixed), 'V3 V4'); + +DROP VIEW IF EXISTS explain_index_has_any_tokens; +CREATE VIEW explain_index_has_any_tokens AS ( + SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes=1 + SELECT count() FROM tab WHERE ( + CASE + WHEN {use_idx_fixed:boolean} = 1 THEN hasAnyTokens(mapValues(map_fixed), {filter:String}) + ELSE hasAnyTokens(mapValues(map), {filter:String}) + END + ) + ) + WHERE explain LIKE '%Description:%' OR explain LIKE '%Parts:%' OR explain LIKE '%Granules:%' + LIMIT 2, 3 +); + +SELECT '-- -- Check that the text index actually gets used (String)'; + +SELECT '-- -- -- key exists only in the first granule'; +SELECT * FROM explain_index_has_any_tokens(use_idx_fixed = 0, filter = 'V0'); + +SELECT '-- -- -- key exists only in the second granule'; +SELECT * FROM explain_index_has_any_tokens(use_idx_fixed = 0, filter = 'V2'); + +SELECT '-- -- -- key exists only in both granules'; +SELECT * FROM explain_index_has_any_tokens(use_idx_fixed = 0, filter = 'V0 V1'); + +SELECT '-- -- -- key does not exist in granules'; +SELECT * FROM explain_index_has_any_tokens(use_idx_fixed = 0, filter = 'V3'); + +SELECT '-- -- Check that the text index actually gets used (FixedString)'; + +SELECT '-- -- -- key exists only in the first granule'; +SELECT * FROM explain_index_has_any_tokens(use_idx_fixed = 1, filter = 'V0'); + +SELECT '-- -- -- key exists only in the second granule'; +SELECT * FROM explain_index_has_any_tokens(use_idx_fixed = 1, filter = 'V2'); + +SELECT '-- -- -- key exists only in both granules'; +SELECT * FROM explain_index_has_any_tokens(use_idx_fixed = 1, filter = 'V0 V1'); + +SELECT '-- -- -- key does not exist in granules'; +SELECT * FROM explain_index_has_any_tokens(use_idx_fixed = 1, filter = 'V3'); + +SELECT '-- hasAllTokens support'; + +SELECT '-- -- query with String'; + +SELECT count() FROM tab WHERE hasAllTokens(mapValues(map), 'V0 V1'); +SELECT count() FROM tab WHERE hasAllTokens(mapValues(map), 'V1 V2'); +SELECT count() FROM tab WHERE hasAllTokens(mapValues(map), 'V2 V3'); +SELECT count() FROM tab WHERE hasAllTokens(mapValues(map), 'V3 V4'); + +SELECT '-- -- query with FixedString'; + +SELECT count() FROM tab WHERE hasAllTokens(mapValues(map_fixed), 'V0 V1'); +SELECT count() FROM tab WHERE hasAllTokens(mapValues(map_fixed), 'V1 V2'); +SELECT count() FROM tab WHERE hasAllTokens(mapValues(map_fixed), 'V2 V3'); +SELECT count() FROM tab WHERE hasAllTokens(mapValues(map_fixed), 'V3 V4'); + +DROP VIEW IF EXISTS explain_index_has_all_tokens; +CREATE VIEW explain_index_has_all_tokens AS ( + SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes=1 + SELECT count() FROM tab WHERE ( + CASE + WHEN {use_idx_fixed:boolean} = 1 THEN hasAllTokens(mapValues(map_fixed), {filter:String}) + ELSE hasAllTokens(mapValues(map), {filter:String}) + END + ) + ) + WHERE explain LIKE '%Description:%' OR explain LIKE '%Parts:%' OR explain LIKE '%Granules:%' + LIMIT 2, 3 +); + +SELECT '-- -- Check that the text index actually gets used (String)'; + +SELECT '-- -- -- key exists only in the first granule'; +SELECT * FROM explain_index_has_all_tokens(use_idx_fixed = 0, filter = 'V0'); + +SELECT '-- -- -- key exists only in the second granule'; +SELECT * FROM explain_index_has_all_tokens(use_idx_fixed = 0, filter = 'V2'); + +SELECT '-- -- -- key exists only in the first granules'; +SELECT * FROM explain_index_has_all_tokens(use_idx_fixed = 0, filter = 'V0 V1'); + +SELECT '-- -- -- key does not exist in granules'; +SELECT * FROM explain_index_has_all_tokens(use_idx_fixed = 0, filter = 'V3'); + +SELECT '-- -- Check that the text index actually gets used (FixedString)'; + +SELECT '-- -- -- key exists only in the first granule'; +SELECT * FROM explain_index_has_all_tokens(use_idx_fixed = 1, filter = 'V0'); + +SELECT '-- -- -- key exists only in the second granule'; +SELECT * FROM explain_index_has_all_tokens(use_idx_fixed = 1, filter = 'V2'); + +SELECT '-- -- -- key exists only in the first granules'; +SELECT * FROM explain_index_has_all_tokens(use_idx_fixed = 1, filter = 'V0 V1'); + +SELECT '-- -- -- key does not exist in granules'; +SELECT * FROM explain_index_has_all_tokens(use_idx_fixed = 1, filter = 'V3'); + +DROP VIEW explain_index_mapContains; +DROP VIEW explain_index_equals; +DROP VIEW explain_index_has; +DROP VIEW explain_index_has_any_tokens; +DROP VIEW explain_index_has_all_tokens; +DROP TABLE tab; diff --git a/parser/testdata/02346_text_index_match_predicate/ast.json b/parser/testdata/02346_text_index_match_predicate/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02346_text_index_match_predicate/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02346_text_index_match_predicate/metadata.json b/parser/testdata/02346_text_index_match_predicate/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02346_text_index_match_predicate/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02346_text_index_match_predicate/query.sql b/parser/testdata/02346_text_index_match_predicate/query.sql new file mode 100644 index 000000000..620e9173f --- /dev/null +++ b/parser/testdata/02346_text_index_match_predicate/query.sql @@ -0,0 +1,108 @@ +-- Tags: no-parallel-replicas + +-- Tests that match() utilizes the text index + +SET allow_experimental_full_text_index = true; + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab +( + id UInt32, + str String, + INDEX inv_idx(str) TYPE text(tokenizer = 'splitByNonAlpha') GRANULARITY 1 +) +ENGINE = MergeTree +ORDER BY id +SETTINGS index_granularity = 1; +INSERT INTO tab VALUES (1, 'Well, Hello ClickHouse !'), (2, 'Well, Hello World !'), (3, 'Good Weather !'), (4, 'Say Hello !'), (5, 'Its An OLAP Database'), (6, 'True World Champion'); + +SELECT * FROM tab WHERE match(str, ' Hello (ClickHouse|World) ') ORDER BY id; + +-- Read 2/6 granules +-- Required string: ' Hello ' +-- Alternatives: ' Hello ClickHouse ', ' Hello World ' + +SELECT trim(explain) +FROM +( + EXPLAIN PLAN indexes=1 + SELECT * FROM tab WHERE match(str, ' Hello (ClickHouse|World) ') ORDER BY id +) +WHERE + explain LIKE '%Granules: %' +SETTINGS + enable_analyzer = 0; + +SELECT trim(explain) +FROM +( + EXPLAIN PLAN indexes=1 + SELECT * FROM tab WHERE match(str, ' Hello (ClickHouse|World) ') ORDER BY id +) +WHERE + explain LIKE '%Granules: %' +SETTINGS + enable_analyzer = 1; + +SELECT '---'; + +SELECT * FROM tab WHERE match(str, '.* (ClickHouse|World) ') ORDER BY id; + +-- Read 3/6 granules +-- Required string: - +-- Alternatives: ' ClickHouse ', ' World ' + +SELECT trim(explain) +FROM +( + EXPLAIN PLAN indexes = 1 + SELECT * FROM tab WHERE match(str, '.* (ClickHouse|World) ') ORDER BY id +) +WHERE + explain LIKE '%Granules: %' +SETTINGS + enable_analyzer = 0; + +SELECT trim(explain) +FROM +( + EXPLAIN PLAN indexes = 1 + SELECT * FROM tab WHERE match(str, '.* (ClickHouse|World) ') ORDER BY id +) +WHERE + explain LIKE '%Granules: %' +SETTINGS + enable_analyzer = 1; + +SELECT '---'; + +SELECT * FROM tab WHERE match(str, ' OLAP .*') ORDER BY id; + +-- Read 1/6 granules +-- Required string: ' OLAP ' +-- Alternatives: - + +SELECT trim(explain) +FROM +( + EXPLAIN PLAN indexes = 1 + SELECT * FROM tab WHERE match(str, ' OLAP (.*?)*') ORDER BY id +) +WHERE + explain LIKE '%Granules: %' +SETTINGS + enable_analyzer = 0; + +SELECT trim(explain) +FROM +( + EXPLAIN PLAN indexes = 1 + SELECT * FROM tab WHERE match(str, ' OLAP (.*?)*') ORDER BY id +) +WHERE + explain LIKE '%Granules: %' +SETTINGS + enable_analyzer = 1; + +DROP TABLE tab; diff --git a/parser/testdata/02346_text_index_on_lower_column/ast.json b/parser/testdata/02346_text_index_on_lower_column/ast.json new file mode 100644 index 000000000..29477cf3d --- /dev/null +++ b/parser/testdata/02346_text_index_on_lower_column/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00097942, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02346_text_index_on_lower_column/metadata.json b/parser/testdata/02346_text_index_on_lower_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02346_text_index_on_lower_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02346_text_index_on_lower_column/query.sql b/parser/testdata/02346_text_index_on_lower_column/query.sql new file mode 100644 index 000000000..47b2f1ef1 --- /dev/null +++ b/parser/testdata/02346_text_index_on_lower_column/query.sql @@ -0,0 +1,70 @@ +SET enable_analyzer = 1; +SET max_parallel_replicas = 1; +SET use_skip_indexes_on_data_read = 1; +SET allow_experimental_full_text_index = 1; + +-- Tests text index creation on lower(col) and with lower-ed columns at search time + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab +( + text String, + INDEX idx_text text TYPE text(tokenizer = 'splitByNonAlpha') +) +ENGINE = MergeTree +ORDER BY tuple(); + +INSERT INTO tab (text) VALUES ('Hello, world!'); + +SELECT count() FROM tab WHERE hasToken(text, 'Hello'); + +SELECT trim(explain) FROM +( + EXPLAIN actions = 1, indexes = 1 SELECT count() FROM tab WHERE hasToken(text, 'Hello') SETTINGS use_skip_indexes_on_data_read = 1 +) WHERE explain LIKE '%Filter column%' OR explain LIKE '%Name: idx_text%'; + +SELECT count() FROM tab WHERE hasToken(lower(text), lower('Hello')); + +SELECT trim(explain) FROM +( + EXPLAIN actions = 1, indexes = 1 SELECT count() FROM tab WHERE hasToken(lower(text), lower('Hello')) SETTINGS use_skip_indexes_on_data_read = 1 +) WHERE explain LIKE '%Filter column%' OR explain LIKE '%Name: idx_text%'; + +SELECT count() FROM tab WHERE hasAllTokens(text, ['Hello']); + +SELECT trim(explain) FROM +( + EXPLAIN actions = 1, indexes = 1 SELECT count() FROM tab WHERE hasAllTokens(text, ['Hello']) SETTINGS use_skip_indexes_on_data_read = 1 +) WHERE explain LIKE '%Filter column%' OR explain LIKE '%Name: idx_text%'; + +DROP TABLE tab; + +-- -------------------------- + +CREATE TABLE tab (text String, INDEX idx_text lower(text) TYPE text(tokenizer = 'splitByNonAlpha')) ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO tab (text) VALUES ('Hello, world!'); + +SELECT count() FROM tab WHERE hasToken(text, 'Hello'); + +SELECT trim(explain) FROM +( + EXPLAIN actions = 1, indexes = 1 SELECT count() FROM tab WHERE hasToken(text, 'Hello') SETTINGS use_skip_indexes_on_data_read = 1 +) WHERE explain LIKE '%Filter column%' OR explain LIKE '%Name: idx_text%'; + +SELECT count() FROM tab WHERE hasToken(lower(text), lower('Hello')); + +SELECT trim(explain) FROM +( + EXPLAIN actions = 1, indexes = 1 SELECT count() FROM tab WHERE hasToken(lower(text), lower('Hello')) SETTINGS use_skip_indexes_on_data_read = 1 +) WHERE explain LIKE '%Filter column%' OR explain LIKE '%Name: idx_text%'; + +SELECT count() FROM tab WHERE hasAllTokens(lower(text), [lower('Hello')]); + +SELECT trim(explain) FROM +( + EXPLAIN actions = 1, indexes = 1 SELECT count() FROM tab WHERE hasAllTokens(lower(text), [lower('Hello')]) SETTINGS use_skip_indexes_on_data_read = 1 +) WHERE explain LIKE '%Filter column%' OR explain LIKE '%Name: idx_text%'; + +DROP TABLE tab; diff --git a/parser/testdata/02346_text_index_parallel_replicas/ast.json b/parser/testdata/02346_text_index_parallel_replicas/ast.json new file mode 100644 index 000000000..2df63148f --- /dev/null +++ b/parser/testdata/02346_text_index_parallel_replicas/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.002060579, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02346_text_index_parallel_replicas/metadata.json b/parser/testdata/02346_text_index_parallel_replicas/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02346_text_index_parallel_replicas/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02346_text_index_parallel_replicas/query.sql b/parser/testdata/02346_text_index_parallel_replicas/query.sql new file mode 100644 index 000000000..55b3854cc --- /dev/null +++ b/parser/testdata/02346_text_index_parallel_replicas/query.sql @@ -0,0 +1,37 @@ +SET max_parallel_replicas = 3; +SET cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost'; +SET enable_parallel_replicas = 1; +SET parallel_replicas_for_non_replicated_merge_tree=1; +SET allow_experimental_full_text_index = 1; +SET use_skip_indexes_on_data_read = 1; +SET query_plan_direct_read_from_text_index = 1; +SET parallel_replicas_mark_segment_size = 128; +SET parallel_replicas_min_number_of_rows_per_replica = 1000; +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab +( + id UInt64, + str String, + INDEX idx_str str TYPE text(tokenizer = splitByNonAlpha) GRANULARITY 8 +) +ENGINE = MergeTree ORDER BY id PARTITION BY id; + +INSERT INTO tab SELECT 1, arrayStringConcat(arrayMap(x -> toString(number + x * 2), range(5)), ' ') FROM numbers(0, 100000); +INSERT INTO tab SELECT 2, arrayStringConcat(arrayMap(x -> toString(number + x * 2), range(5)), ' ') FROM numbers(100000, 100000); +INSERT INTO tab SELECT 3, arrayStringConcat(arrayMap(x -> toString(number + x * 2), range(5)), ' ') FROM numbers(200000, 100000); + +SELECT count(), sum(id) FROM tab WHERE hasAnyTokens(str, ['34567', '134567', '234567']); +SELECT count(), sum(id) FROM tab WHERE str LIKE '% 34567 %'; + +SYSTEM FLUSH LOGS query_log; + +SELECT + sum(ProfileEvents['ParallelReplicasUsedCount']) > 0, + sum(ProfileEvents['TextIndexUsedEmbeddedPostings']) > 0 +FROM system.query_log +WHERE (current_database = currentDatabase() OR position(query, currentDatabase()) > 0) AND query LIKE '%SELECT%tab%hasAnyTokens%' AND type = 'QueryFinish'; + +DROP TABLE tab; diff --git a/parser/testdata/02346_text_index_part_format/ast.json b/parser/testdata/02346_text_index_part_format/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02346_text_index_part_format/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02346_text_index_part_format/metadata.json b/parser/testdata/02346_text_index_part_format/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02346_text_index_part_format/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02346_text_index_part_format/query.sql b/parser/testdata/02346_text_index_part_format/query.sql new file mode 100644 index 000000000..0cf8a53e9 --- /dev/null +++ b/parser/testdata/02346_text_index_part_format/query.sql @@ -0,0 +1,56 @@ +-- Tags: no-fasttest, no-ordinary-database, no-asan +-- no-asan: runs too long + +-- Basic tests for text index stored in compact vs. wide format, respectively full vs. packed parts + +SET allow_experimental_full_text_index = 1; + +SET parallel_replicas_local_plan=1; -- this setting is randomized, set it explicitly to have local plan for parallel replicas + +DROP TABLE IF EXISTS tab_compact_full; +DROP TABLE IF EXISTS tab_wide_full; + +CREATE TABLE tab_compact_full(id Int32, str String, INDEX idx str TYPE text(tokenizer = 'splitByNonAlpha') GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS min_bytes_for_wide_part = 1e9, min_rows_for_wide_part = 1e9, min_bytes_for_full_part_storage = 0, index_granularity = 3; +CREATE TABLE tab_wide_full(id Int32, str String, INDEX idx str TYPE text(tokenizer = 'splitByNonAlpha') GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS min_bytes_for_wide_part = 0, min_rows_for_wide_part = 0, min_bytes_for_full_part_storage = 0, index_granularity = 3; + +INSERT INTO tab_compact_full VALUES (0, 'foo'), (1, 'bar'), (2, 'baz'), (3, 'foo bar'), (4, 'foo baz'), (5, 'bar baz'), (6, 'abc'), (7, 'def'); +INSERT INTO tab_wide_full VALUES (0, 'foo'), (1, 'bar'), (2, 'baz'), (3, 'foo bar'), (4, 'foo baz'), (5, 'bar baz'), (6, 'abc'), (7, 'def'); + +SELECT 'Check part formats'; + +SELECT table, part_type FROM system.parts WHERE database = currentDatabase() AND table LIKE 'tab_%' ORDER BY table; + +SELECT 'Check tab_compact_full'; + +SELECT id, str +FROM tab_compact_full +WHERE hasToken(str, 'foo') +LIMIT 3; + +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes = 1 + SELECT id + FROM tab_compact_full + WHERE hasToken(str, 'foo') + LIMIT 3 +) +WHERE explain LIKE '%text%' OR explain LIKE '%Granules:%'; + +SELECT 'Check tab_wide_full'; + +SELECT id, str +FROM tab_wide_full +WHERE hasToken(str, 'foo') +LIMIT 3; + +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes = 1 + SELECT id + FROM tab_wide_full + WHERE hasToken(str, 'foo') + LIMIT 3 +) +WHERE explain LIKE '%text%' OR explain LIKE '%Granules:%'; + +DROP TABLE tab_compact_full; +DROP TABLE tab_wide_full; diff --git a/parser/testdata/02346_text_index_postings_cache/ast.json b/parser/testdata/02346_text_index_postings_cache/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02346_text_index_postings_cache/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02346_text_index_postings_cache/metadata.json b/parser/testdata/02346_text_index_postings_cache/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02346_text_index_postings_cache/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02346_text_index_postings_cache/query.sql b/parser/testdata/02346_text_index_postings_cache/query.sql new file mode 100644 index 000000000..454b20a09 --- /dev/null +++ b/parser/testdata/02346_text_index_postings_cache/query.sql @@ -0,0 +1,95 @@ +-- Tags: no-parallel, no-parallel-replicas +-- no-parallel: looks at server-wide metrics + +--- These tests verify the caching of a deserialized text index posting lists in the consecutive executions. + +SET enable_analyzer = 1; +SET allow_experimental_full_text_index = 1; +SET use_skip_indexes_on_data_read = 1; +SET query_plan_direct_read_from_text_index = 1; +SET use_text_index_postings_cache = 1; +SET log_queries = 1; +SET max_rows_to_read = 0; + +DROP TABLE IF EXISTS tab; +CREATE TABLE tab +( + id UInt32, + message String, + INDEX idx(message) TYPE text(tokenizer = array, dictionary_block_size = 128) GRANULARITY 1 +) +ENGINE = MergeTree +ORDER BY (id) +SETTINGS index_granularity = 128; + +INSERT INTO tab SELECT number, 'text_pl_1' FROM numbers(64); +INSERT INTO tab SELECT number, 'text_pl_2' FROM numbers(64); +INSERT INTO tab SELECT number, 'text_pl_3' FROM numbers(64); + +DROP VIEW IF EXISTS text_index_cache_stats; +CREATE VIEW text_index_cache_stats AS ( + SELECT + concat('cache_hits = ', ProfileEvents['TextIndexPostingsCacheHits'], ', cache_misses = ', ProfileEvents['TextIndexPostingsCacheMisses']) + FROM system.query_log + WHERE query_kind ='Select' + AND current_database = currentDatabase() + AND endsWith(trimRight(query), concat('hasAnyTokens(message, \'', {filter:String}, '\');')) + AND type='QueryFinish' + ORDER BY query_start_time_microseconds DESC + LIMIT 1 +); + +SELECT '--- cache miss on the first token.'; +SELECT count() FROM tab WHERE hasAnyTokens(message, 'text_pl_1'); + +SYSTEM FLUSH LOGS query_log; +SELECT * FROM text_index_cache_stats(filter = 'text_pl_1'); + +SELECT '--- cache miss on the second token.'; +SELECT count() FROM tab WHERE hasAnyTokens(message, 'text_pl_2'); + +SYSTEM FLUSH LOGS query_log; +SELECT * FROM text_index_cache_stats(filter = 'text_pl_2'); + +SELECT '--- cache hit on the first token.'; +SELECT count() FROM tab WHERE hasAnyTokens(message, 'text_pl_1'); + +SYSTEM FLUSH LOGS query_log; +SELECT * FROM text_index_cache_stats(filter = 'text_pl_1'); + +SELECT '--- cache hit on the second token.'; +SELECT count() FROM tab WHERE hasAnyTokens(message, 'text_pl_2'); + +SYSTEM FLUSH LOGS query_log; +SELECT * FROM text_index_cache_stats(filter = 'text_pl_2'); + +SELECT '--- no profile events when cache is disabled.'; + +SET use_text_index_postings_cache = 0; + +SELECT count() FROM tab WHERE hasAnyTokens(message, 'text_pl_3'); + +SET use_text_index_postings_cache = 1; + +SYSTEM FLUSH LOGS query_log; +SELECT * FROM text_index_cache_stats(filter = 'text_pl_3'); + +SELECT 'Clear text index postings cache'; + +SYSTEM DROP TEXT INDEX POSTINGS CACHE; + +SELECT '--- cache miss on the first token.'; +SELECT count() FROM tab WHERE hasAnyTokens(message, 'text_pl_1'); + +SYSTEM FLUSH LOGS query_log; +SELECT * FROM text_index_cache_stats(filter = 'text_pl_1'); + +SELECT '--- cache hit on the first token.'; +SELECT count() FROM tab WHERE hasAnyTokens(message, 'text_pl_1'); + +SYSTEM FLUSH LOGS query_log; +SELECT * FROM text_index_cache_stats(filter = 'text_pl_1'); + +SYSTEM DROP TEXT INDEX POSTINGS CACHE; +DROP VIEW text_index_cache_stats; +DROP TABLE tab; diff --git a/parser/testdata/02346_text_index_prefetch/ast.json b/parser/testdata/02346_text_index_prefetch/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02346_text_index_prefetch/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02346_text_index_prefetch/metadata.json b/parser/testdata/02346_text_index_prefetch/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02346_text_index_prefetch/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02346_text_index_prefetch/query.sql b/parser/testdata/02346_text_index_prefetch/query.sql new file mode 100644 index 000000000..f471c4ff5 --- /dev/null +++ b/parser/testdata/02346_text_index_prefetch/query.sql @@ -0,0 +1,34 @@ +-- Tags: no-fasttest, no-parallel-replicas + +SET allow_experimental_full_text_index = 1; +SET use_skip_indexes_on_data_read = 1; +SET allow_prefetched_read_pool_for_remote_filesystem = 1; +SET remote_filesystem_read_prefetch = 1; +SET remote_filesystem_read_method = 'threadpool'; +SET max_rows_to_read = 0; + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab +( + id UInt64, + str String, + INDEX idx_str str TYPE text(tokenizer = splitByNonAlpha) GRANULARITY 8 +) +ENGINE = MergeTree ORDER BY id PARTITION BY id +SETTINGS storage_policy = 's3_cache'; + +INSERT INTO tab SELECT 1, arrayStringConcat(arrayMap(x -> toString(number + x * 2), range(5)), ' ') FROM numbers(0, 100000); +INSERT INTO tab SELECT 2, arrayStringConcat(arrayMap(x -> toString(number + x * 2), range(5)), ' ') FROM numbers(100000, 100000); +INSERT INTO tab SELECT 3, arrayStringConcat(arrayMap(x -> toString(number + x * 2), range(5)), ' ') FROM numbers(200000, 100000); + +SELECT count(), sum(id) FROM tab WHERE hasAnyTokens(str, ['34567', '134567', '234567']); + +SYSTEM FLUSH LOGS query_log; + +SELECT + ProfileEvents['RemoteFSPrefetchedReads'] > 0 +FROM system.query_log +WHERE current_database = currentDatabase() AND query LIKE '%SELECT count(), sum(id) FROM tab%' AND type = 'QueryFinish'; + +DROP TABLE tab; diff --git a/parser/testdata/02346_text_index_preprocessor/ast.json b/parser/testdata/02346_text_index_preprocessor/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02346_text_index_preprocessor/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02346_text_index_preprocessor/metadata.json b/parser/testdata/02346_text_index_preprocessor/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02346_text_index_preprocessor/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02346_text_index_preprocessor/query.sql b/parser/testdata/02346_text_index_preprocessor/query.sql new file mode 100644 index 000000000..a70dc9a4b --- /dev/null +++ b/parser/testdata/02346_text_index_preprocessor/query.sql @@ -0,0 +1,193 @@ +-- Tags: no-parallel +-- Tag no-parallel: Messes with internal cache and global udf factory + +SET allow_experimental_full_text_index = 1; +SET use_skip_indexes_on_data_read = 1; + +-- Tests the preprocessor argument for tokenizers in the text index definitions + +DROP TABLE IF EXISTS tab; + +SELECT 'Positive tests on preprocessor construction and use.'; + +SELECT '- Test single tokenizer and preprocessor argument.'; + +CREATE TABLE tab +( + key UInt64, + str String, + INDEX idx(str) TYPE text(tokenizer = 'splitByNonAlpha', preprocessor = lower(str)) +) +ENGINE = MergeTree +ORDER BY key; + +INSERT INTO tab VALUES (1, 'foo'), (2, 'BAR'), (3, 'Baz'); + +SELECT count() FROM tab WHERE hasToken(str, 'foo'); +SELECT count() FROM tab WHERE hasToken(str, 'FOO'); + +SELECT count() FROM tab WHERE hasToken(str, 'BAR'); +SELECT count() FROM tab WHERE hasToken(str, 'Baz'); + +SELECT count() FROM tab WHERE hasToken(str, 'bar'); +SELECT count() FROM tab WHERE hasToken(str, 'baz'); + +DROP TABLE tab; + +SELECT '- Test preprocessor declaration using column more than once.'; +CREATE TABLE tab +( + key UInt64, + str String, + INDEX idx(str) TYPE text(tokenizer = 'splitByNonAlpha', preprocessor = concat(str, str)) +) +ENGINE = MergeTree +ORDER BY tuple(); + + +INSERT INTO tab VALUES (1, 'foo'), (2, 'BAR'), (3, 'Baz'); + +SELECT count() FROM tab WHERE hasToken(str, 'foo'); +SELECT count() FROM tab WHERE hasToken(str, 'FOO'); + +SELECT count() FROM tab WHERE hasToken(str, 'BAR'); +SELECT count() FROM tab WHERE hasToken(str, 'Baz'); + +SELECT count() FROM tab WHERE hasToken(str, 'bar'); +SELECT count() FROM tab WHERE hasToken(str, 'baz'); + +DROP TABLE tab; + +SELECT '- Test preprocessor declaration using udf.'; +DROP FUNCTION IF EXISTS udf_preprocessor; +CREATE FUNCTION udf_preprocessor AS (s) -> concat(str, lower(str)); + +CREATE TABLE tab +( + key UInt64, + str String, + INDEX idx(str) TYPE text(tokenizer = 'splitByNonAlpha', preprocessor = udf_preprocessor(str)) +) +ENGINE = MergeTree +ORDER BY tuple(); + + +INSERT INTO tab VALUES (1, 'foo'), (2, 'BAR'), (3, 'Baz'); + +SELECT count() FROM tab WHERE hasToken(str, 'foo'); +SELECT count() FROM tab WHERE hasToken(str, 'FOO'); + +SELECT count() FROM tab WHERE hasToken(str, 'BAR'); +SELECT count() FROM tab WHERE hasToken(str, 'Baz'); + +SELECT count() FROM tab WHERE hasToken(str, 'bar'); +SELECT count() FROM tab WHERE hasToken(str, 'baz'); + +DROP TABLE tab; +DROP FUNCTION udf_preprocessor; + +SELECT 'Negative tests on preprocessor construction validations.'; + +-- The preprocessor argument must reference the index column +CREATE TABLE tab +( + key UInt64, + str String, + other_str String, + INDEX idx(str) TYPE text(tokenizer = 'splitByNonAlpha', preprocessor = lower(other_str)) +) +ENGINE = MergeTree ORDER BY tuple(); -- { serverError BAD_ARGUMENTS } + +SELECT '- The preprocessor argument must not reference non-indexed columns'; +CREATE TABLE tab +( + key UInt64, + str String, + other_str String, + INDEX idx(str) TYPE text(tokenizer = 'splitByNonAlpha', preprocessor = concat(str, other_str)) +) +ENGINE = MergeTree ORDER BY tuple(); -- { serverError BAD_ARGUMENTS } + +SELECT '- Index definition may not be and expression when there is preprocessor'; +CREATE TABLE tab +( + key UInt64, + str String, + INDEX idx(upper(str)) TYPE text(tokenizer = 'splitByNonAlpha', preprocessor = lower(str)) +) +ENGINE = MergeTree ORDER BY tuple(); -- { serverError BAD_ARGUMENTS } + +SELECT '-- Not even the same expression'; +CREATE TABLE tab +( + key UInt64, + str String, + INDEX idx(lower(str)) TYPE text(tokenizer = 'splitByNonAlpha', preprocessor = lower(str)) +) +ENGINE = MergeTree ORDER BY tuple(); -- { serverError BAD_ARGUMENTS } + +CREATE TABLE tab +( + key UInt64, + str String, + INDEX idx(upper(str)) TYPE text(tokenizer = 'splitByNonAlpha', preprocessor = lower(upper(str))) +) +ENGINE = MergeTree ORDER BY tuple(); -- { serverError BAD_ARGUMENTS } + +SELECT '- The preprocessor must be an expression'; +CREATE TABLE tab +( + key UInt64, + str String, + INDEX idx(str) TYPE text(tokenizer = 'splitByNonAlpha', preprocessor = nonExistingFunction) +) +ENGINE = MergeTree ORDER BY key; -- { serverError INCORRECT_QUERY } + +SELECT '- The preprocessor must be an expression, with existing functions'; +CREATE TABLE tab +( + key UInt64, + str String, + INDEX idx(str) TYPE text(tokenizer = 'splitByNonAlpha', preprocessor = nonExistingFunction(str)) +) +ENGINE = MergeTree ORDER BY tuple(); -- { serverError UNKNOWN_FUNCTION } + +SELECT '- The preprocessor must have input and output values of the same type (here: String)'; +CREATE TABLE tab +( + key UInt64, + str String, + INDEX idx(str) TYPE text(tokenizer = 'splitByNonAlpha', preprocessor = length(str)) +) +ENGINE = MergeTree ORDER BY tuple(); -- { serverError INCORRECT_QUERY } + +SELECT '- The preprocessor expression must use the column identifier'; +CREATE TABLE tab +( + key UInt64, + str String, + INDEX idx(str) TYPE text(tokenizer = 'splitByNonAlpha', preprocessor = hostname()) +) +ENGINE = MergeTree ORDER BY tuple(); -- { serverError INCORRECT_QUERY } + +SELECT '- The preprocessor expression must use only deterministic functions'; +CREATE TABLE tab +( + key UInt64, + str String, + INDEX idx(str) TYPE text(tokenizer = 'splitByNonAlpha', preprocessor = concat(str, toString(rand()))) +) +ENGINE = MergeTree ORDER BY tuple(); -- { serverError INCORRECT_QUERY } + +SELECT '- The preprocessor expression is not compatible with array columns (yet)'; +CREATE TABLE tab +( + key UInt64, + arr_str Array(String), + INDEX idx(arr_str) TYPE text(tokenizer = 'splitByNonAlpha', preprocessor = lower(arr_str)) +) +ENGINE = MergeTree ORDER BY tuple(); -- { serverError INCORRECT_QUERY } + + + +DROP TABLE IF EXISTS tab; diff --git a/parser/testdata/02346_text_index_queries/ast.json b/parser/testdata/02346_text_index_queries/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02346_text_index_queries/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02346_text_index_queries/metadata.json b/parser/testdata/02346_text_index_queries/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02346_text_index_queries/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02346_text_index_queries/query.sql b/parser/testdata/02346_text_index_queries/query.sql new file mode 100644 index 000000000..2c7801c16 --- /dev/null +++ b/parser/testdata/02346_text_index_queries/query.sql @@ -0,0 +1,221 @@ +-- Tags: no-fasttest +-- no-fasttest: It can be slow + +SET allow_experimental_full_text_index = 1; +SET log_queries = 1; +SET merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.0; + +-- Affects the number of read rows. +SET use_skip_indexes_on_data_read = 0; + +---------------------------------------------------- +SELECT 'Test text(tokenizer = ngram(2))'; + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab(k UInt64, s String, INDEX af(s) TYPE text(tokenizer = ngrams(2)) GRANULARITY 1) + ENGINE = MergeTree() ORDER BY k + SETTINGS index_granularity = 2, index_granularity_bytes = '10Mi'; + +INSERT INTO tab VALUES (101, 'Alick a01'), (102, 'Blick a02'), (103, 'Click a03'), (104, 'Dlick a04'), (105, 'Elick a05'), (106, 'Alick a06'), (107, 'Blick a07'), (108, 'Click a08'), (109, 'Dlick a09'), (110, 'Elick a10'), (111, 'Alick b01'), (112, 'Blick b02'), (113, 'Click b03'), (114, 'Dlick b04'), (115, 'Elick b05'), (116, 'Alick b06'), (117, 'Blick b07'), (118, 'Click b08'), (119, 'Dlick b09'), (120, 'Elick b10'); + +-- check text index was created +SELECT name, type FROM system.data_skipping_indices WHERE table =='tab' AND database = currentDatabase() LIMIT 1; + +-- throw in a random consistency check +CHECK TABLE tab SETTINGS check_query_single_value_result = 1; + +-- search text index with == +SELECT * FROM tab WHERE s == 'Alick a01'; + +-- check the query only read 1 granules (2 rows total; each granule has 2 rows) +SYSTEM FLUSH LOGS query_log; +SELECT read_rows==2 from system.query_log + WHERE query_kind ='Select' + AND current_database = currentDatabase() + AND endsWith(trimRight(query), 'SELECT * FROM tab WHERE s == \'Alick a01\';') + AND type='QueryFinish' + AND result_rows==1 + LIMIT 1; + +-- search text index with LIKE +SELECT * FROM tab WHERE s LIKE '%01%' ORDER BY k; + +-- check the query only read 2 granules (4 rows total; each granule has 2 rows) +SYSTEM FLUSH LOGS query_log; +SELECT read_rows==4 from system.query_log + WHERE query_kind ='Select' + AND current_database = currentDatabase() + AND endsWith(trimRight(query), 'SELECT * FROM tab WHERE s LIKE \'%01%\' ORDER BY k;') + AND type='QueryFinish' + AND result_rows==2 + LIMIT 1; + +-- search text index with hasToken +SELECT * FROM tab WHERE hasToken(s, 'Click') ORDER BY k; + +-- check the query only read 4 granules (8 rows total; each granule has 2 rows) +SYSTEM FLUSH LOGS query_log; +SELECT read_rows==8 from system.query_log + WHERE query_kind ='Select' + AND current_database = currentDatabase() + AND endsWith(trimRight(query), 'SELECT * FROM tab WHERE hasToken(s, \'Click\') ORDER BY k;') + AND type='QueryFinish' + AND result_rows==4 + LIMIT 1; + +---------------------------------------------------- +SELECT 'Test text(tokenizer = "splitByNonAlpha")'; + +DROP TABLE IF EXISTS tab_x; + +CREATE TABLE tab_x(k UInt64, s String, INDEX af(s) TYPE text(tokenizer = 'splitByNonAlpha') GRANULARITY 1) + ENGINE = MergeTree() ORDER BY k + SETTINGS index_granularity = 2, index_granularity_bytes = '10Mi'; + +INSERT INTO tab_x VALUES (101, 'x Alick a01 y'), (102, 'x Blick a02 y'), (103, 'x Click a03 y'), (104, 'x Dlick a04 y'), (105, 'x Elick a05 y'), (106, 'x Alick a06 y'), (107, 'x Blick a07 y'), (108, 'x Click a08 y'), (109, 'x Dlick a09 y'), (110, 'x Elick a10 y'), (111, 'x Alick b01 y'), (112, 'x Blick b02 y'), (113, 'x Click b03 y'), (114, 'x Dlick b04 y'), (115, 'x Elick b05 y'), (116, 'x Alick b06 y'), (117, 'x Blick b07 y'), (118, 'x Click b08 y'), (119, 'x Dlick b09 y'), (120, 'x Elick b10 y'); + +-- check text index was created +SELECT name, type FROM system.data_skipping_indices WHERE table == 'tab_x' AND database = currentDatabase() LIMIT 1; + +-- search text index with hasToken +SELECT * FROM tab_x WHERE hasToken(s, 'Alick') ORDER BY k; + +-- check the query only read 4 granules (8 rows total; each granule has 2 rows) +SYSTEM FLUSH LOGS query_log; +SELECT read_rows==8 from system.query_log + WHERE query_kind ='Select' + AND current_database = currentDatabase() + AND endsWith(trimRight(query), 'SELECT * FROM tab_x WHERE hasToken(s, \'Alick\');') + AND type='QueryFinish' + AND result_rows==4 + LIMIT 1; + +-- search text index with IN operator +SELECT * FROM tab_x WHERE s IN ('x Alick a01 y', 'x Alick a06 y') ORDER BY k; + +-- check the query only read 2 granules (4 rows total; each granule has 2 rows) +SYSTEM FLUSH LOGS query_log; +SELECT read_rows==4 from system.query_log + WHERE query_kind ='Select' + AND current_database = currentDatabase() + AND endsWith(trimRight(query), 'SELECT * FROM tab_x WHERE s IN (\'x Alick a01 y\', \'x Alick a06 y\') ORDER BY k;') + AND type='QueryFinish' + AND result_rows==2 + LIMIT 1; + +---------------------------------------------------- +SELECT 'Test text(tokenizer = ngrams(2)) on a column with two parts'; + + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab(k UInt64, s String) + ENGINE = MergeTree() ORDER BY k + SETTINGS index_granularity = 2, index_granularity_bytes = '10Mi'; + +INSERT INTO tab VALUES (101, 'Alick a01'), (102, 'Blick a02'), (103, 'Click a03'), (104, 'Dlick a04'), (105, 'Elick a05'), (106, 'Alick a06'), (107, 'Blick a07'), (108, 'Click a08'), (109, 'Dlick a09'), (110, 'Elick b10'), (111, 'Alick b01'), (112, 'Blick b02'), (113, 'Click b03'), (114, 'Dlick b04'), (115, 'Elick b05'), (116, 'Alick b06'), (117, 'Blick b07'), (118, 'Click b08'), (119, 'Dlick b09'), (120, 'Elick b10'); +INSERT INTO tab VALUES (201, 'rick c01'), (202, 'mick c02'), (203, 'nick c03'); + +ALTER TABLE tab ADD INDEX af(s) TYPE text(tokenizer = ngrams(2)) GRANULARITY 1 SETTINGS mutations_sync = 2; +OPTIMIZE TABLE tab FINAL; + +-- check text index was created +SELECT name, type FROM system.data_skipping_indices WHERE table == 'tab' AND database = currentDatabase() LIMIT 1; + +-- search text index +SELECT * FROM tab WHERE s LIKE '%01%' ORDER BY k; + +-- check the query only read 3 granules (6 rows total; each granule has 2 rows) +SYSTEM FLUSH LOGS query_log; +SELECT read_rows==6 from system.query_log + WHERE query_kind ='Select' + AND current_database = currentDatabase() + AND endsWith(trimRight(query), 'SELECT * FROM tab WHERE s LIKE \'%01%\' ORDER BY k;') + AND type='QueryFinish' + AND result_rows==3 + LIMIT 1; + +---------------------------------------------------- +SELECT 'Test text(tokenizer = ngrams(2)) on UTF-8 data'; + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab(k UInt64, s String, INDEX af(s) TYPE text(tokenizer = ngrams(2)) GRANULARITY 1) + ENGINE = MergeTree() + ORDER BY k + SETTINGS index_granularity = 2, index_granularity_bytes = '10Mi'; + +INSERT INTO tab VALUES (101, 'Alick 好'), (102, 'clickhouse你好'), (103, 'Click 你'), (104, 'Dlick 你a好'), (105, 'Elick 好好你你'), (106, 'Alick 好a好a你a你'); + +-- check text index was created +SELECT name, type FROM system.data_skipping_indices WHERE table == 'tab' AND database = currentDatabase() LIMIT 1; + +-- search text index +SELECT * FROM tab WHERE s LIKE '%你好%' ORDER BY k; + +-- check the query only read 1 granule (2 rows total; each granule has 2 rows) +SYSTEM FLUSH LOGS query_log; +SELECT read_rows==2 from system.query_log + WHERE query_kind ='Select' + AND current_database = currentDatabase() + AND endsWith(trimRight(query), 'SELECT * FROM tab WHERE s LIKE \'%你好%\' ORDER BY k;') + AND type='QueryFinish' + AND result_rows==1 + LIMIT 1; + +---------------------------------------------------- +SELECT 'Test text(tokenizer = sparseGrams(3, 100)) on UTF-8 data'; + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab(k UInt64, s String, INDEX af(s) TYPE text(tokenizer = sparseGrams(3, 100)) GRANULARITY 1) + ENGINE = MergeTree() + ORDER BY k + SETTINGS index_granularity = 2, index_granularity_bytes = '10Mi'; + +INSERT INTO tab VALUES (101, 'Alick 好'), (102, 'clickhouse你好'), (103, 'Click 你'), (104, 'Dlick 你a好'), (105, 'Elick 好好你你'), (106, 'Alick 好a好a你a你'); + +-- check text index was created +SELECT name, type FROM system.data_skipping_indices WHERE table == 'tab' AND database = currentDatabase() LIMIT 1; + +-- search text index +SELECT * FROM tab WHERE s LIKE '%house你好%' ORDER BY k; + +-- check the query only read 1 granule (2 rows total; each granule has 2 rows) +SYSTEM FLUSH LOGS query_log; +SELECT read_rows==2 from system.query_log + WHERE query_kind ='Select' + AND current_database = currentDatabase() + AND endsWith(trimRight(query), 'SELECT * FROM tab WHERE s LIKE \'%你好%\' ORDER BY k;') + AND type='QueryFinish' + AND result_rows==1 + LIMIT 1; + +---------------------------------------------------- +SELECT 'Test text(tokenizer = sparseGrams(3, 100, 4)) on UTF-8 data'; + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab(k UInt64, s String, INDEX af(s) TYPE text(tokenizer = sparseGrams(3, 100, 4)) GRANULARITY 1) + ENGINE = MergeTree() + ORDER BY k + SETTINGS index_granularity = 2, index_granularity_bytes = '10Mi'; + +INSERT INTO tab VALUES (101, 'Alick 好'), (102, 'clickhouse你好'), (103, 'Click 你'), (104, 'Dlick 你a好'), (105, 'Elick 好好你你'), (106, 'Alick 好a好a你a你'); + +-- check text index was created +SELECT name, type FROM system.data_skipping_indices WHERE table == 'tab' AND database = currentDatabase() LIMIT 1; + +-- search text index +SELECT * FROM tab WHERE s LIKE '%house你好%' ORDER BY k; + +-- check the query only read 1 granule (2 rows total; each granule has 2 rows) +SYSTEM FLUSH LOGS query_log; +SELECT read_rows==2 from system.query_log + WHERE query_kind ='Select' + AND current_database = currentDatabase() + AND endsWith(trimRight(query), 'SELECT * FROM tab WHERE s LIKE \'%你好%\' ORDER BY k;') + AND type='QueryFinish' + AND result_rows==1 + LIMIT 1; diff --git a/parser/testdata/02346_text_index_replacingmergetree/ast.json b/parser/testdata/02346_text_index_replacingmergetree/ast.json new file mode 100644 index 000000000..9ea9f097c --- /dev/null +++ b/parser/testdata/02346_text_index_replacingmergetree/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001724252, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02346_text_index_replacingmergetree/metadata.json b/parser/testdata/02346_text_index_replacingmergetree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02346_text_index_replacingmergetree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02346_text_index_replacingmergetree/query.sql b/parser/testdata/02346_text_index_replacingmergetree/query.sql new file mode 100644 index 000000000..20410b620 --- /dev/null +++ b/parser/testdata/02346_text_index_replacingmergetree/query.sql @@ -0,0 +1,81 @@ +SET allow_experimental_full_text_index = 1; + +-- Tests text index with the 'ReplacingMergeTree' engine + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab +( + id UInt32, + key String, + value String, + INDEX idx_key(key) TYPE text(tokenizer = 'splitByNonAlpha') +) +ENGINE = ReplacingMergeTree() +ORDER BY id; + +SYSTEM STOP MERGES tab; + +INSERT INTO tab VALUES + (1, 'foo', 'foo'), + (2, 'bar', 'bar'); + +INSERT INTO tab VALUES + (1, 'foo', 'foo updated'), + (2, 'baz', 'baz'); + +SELECT 'Updated: foo'; + +SELECT '-- direct read disabled'; + +SET use_skip_indexes_on_data_read = 0; + +SELECT value FROM tab WHERE hasToken(key, 'foo') ORDER BY value; +SELECT value FROM tab FINAL WHERE hasToken(key, 'foo') ORDER BY value; + +SELECT '-- direct read enabled'; + +SET use_skip_indexes_on_data_read = 1; + +SELECT value FROM tab WHERE hasToken(key, 'foo') ORDER BY value; +SELECT value FROM tab FINAL WHERE hasToken(key, 'foo') ORDER BY value; + +SELECT 'Removed: bar'; + +SELECT '-- direct read disabled'; + +SET use_skip_indexes_on_data_read = 0; + +SELECT '-- -- value exists without FINAL'; +SELECT value FROM tab WHERE hasToken(key, 'bar') ORDER BY value; + +SELECT '-- -- value does not exist with FINAL'; +SELECT value FROM tab FINAL WHERE hasToken(key, 'bar') ORDER BY value; + +SELECT '-- direct read enabled'; + +SET use_skip_indexes_on_data_read = 1; + +SELECT '-- -- value exists without FINAL'; +SELECT value FROM tab WHERE hasToken(key, 'bar') ORDER BY value; + +SELECT '-- -- value does not exist with FINAL'; +SELECT value FROM tab FINAL WHERE hasToken(key, 'bar') ORDER BY value; + +SELECT 'New: baz'; + +SELECT '-- direct read disabled'; + +SET use_skip_indexes_on_data_read = 0; + +SELECT value FROM tab WHERE hasToken(key, 'baz') ORDER BY value; +SELECT value FROM tab FINAL WHERE hasToken(key, 'baz') ORDER BY value; + +SELECT '-- direct read enabled'; + +SET use_skip_indexes_on_data_read = 1; + +SELECT value FROM tab WHERE hasToken(key, 'baz') ORDER BY value; +SELECT value FROM tab FINAL WHERE hasToken(key, 'baz') ORDER BY value; + +DROP TABLE tab; diff --git a/parser/testdata/02346_text_index_summingmergetree/ast.json b/parser/testdata/02346_text_index_summingmergetree/ast.json new file mode 100644 index 000000000..83e069311 --- /dev/null +++ b/parser/testdata/02346_text_index_summingmergetree/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001171299, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02346_text_index_summingmergetree/metadata.json b/parser/testdata/02346_text_index_summingmergetree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02346_text_index_summingmergetree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02346_text_index_summingmergetree/query.sql b/parser/testdata/02346_text_index_summingmergetree/query.sql new file mode 100644 index 000000000..b809419d3 --- /dev/null +++ b/parser/testdata/02346_text_index_summingmergetree/query.sql @@ -0,0 +1,63 @@ +SET allow_experimental_full_text_index = 1; + +-- Tests text index with the 'SummingMergeTree' engine + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab +( + id UInt32, + key String, + value UInt32, + INDEX idx_key(key) TYPE text(tokenizer = 'splitByNonAlpha') +) +ENGINE = SummingMergeTree() +ORDER BY id; + +SYSTEM STOP MERGES tab; + +INSERT INTO tab VALUES + (1, 'foo', 1), + (2, 'bar', 2); + +INSERT INTO tab VALUES + (1, 'foo', 1), + (2, 'bar', 2); + +SELECT 'Sum values from all parts'; + +SELECT '-- direct read disabled'; + +SET use_skip_indexes_on_data_read = 0; + +SELECT sum(value) FROM tab WHERE hasToken(key, 'foo'); +SELECT sum(value) FROM tab WHERE hasToken(key, 'bar'); + +SELECT '-- direct read enabled'; + +SET use_skip_indexes_on_data_read = 1; + +SELECT sum(value) FROM tab WHERE hasToken(key, 'foo'); +SELECT sum(value) FROM tab WHERE hasToken(key, 'bar'); + + +SELECT 'Values are summed up during merge'; + +SYSTEM START MERGES tab; +OPTIMIZE TABLE tab FINAL; -- emulate merge + +SELECT '-- direct read disabled'; + +SET use_skip_indexes_on_data_read = 0; + +SELECT value FROM tab WHERE hasToken(key, 'foo'); +SELECT value FROM tab WHERE hasToken(key, 'bar'); + +SELECT '-- direct read enabled'; + +SET use_skip_indexes_on_data_read = 1; + +SELECT value FROM tab WHERE hasToken(key, 'foo'); +SELECT value FROM tab WHERE hasToken(key, 'bar'); + +DROP TABLE tab; diff --git a/parser/testdata/02346_to_hour_monotonicity_fix/ast.json b/parser/testdata/02346_to_hour_monotonicity_fix/ast.json new file mode 100644 index 000000000..11db0e5c5 --- /dev/null +++ b/parser/testdata/02346_to_hour_monotonicity_fix/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_tz_hour (children 1)" + }, + { + "explain": " Identifier test_tz_hour" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001512979, + "rows_read": 2, + "bytes_read": 76 + } +} diff --git a/parser/testdata/02346_to_hour_monotonicity_fix/metadata.json b/parser/testdata/02346_to_hour_monotonicity_fix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02346_to_hour_monotonicity_fix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02346_to_hour_monotonicity_fix/query.sql b/parser/testdata/02346_to_hour_monotonicity_fix/query.sql new file mode 100644 index 000000000..5eb396970 --- /dev/null +++ b/parser/testdata/02346_to_hour_monotonicity_fix/query.sql @@ -0,0 +1,8 @@ +drop table if exists test_tz_hour; + +create table test_tz_hour(t DateTime, x String) engine MergeTree partition by toYYYYMMDD(t) order by x; +insert into test_tz_hour select toDateTime('2021-06-01 00:00:00', 'UTC') + number * 600, 'x' from numbers(1e3); + +select toHour(toTimeZone(t, 'UTC')) as toHour_UTC, toHour(toTimeZone(t, 'Asia/Jerusalem')) as toHour_Israel, count() from test_tz_hour where toHour_Israel = 8 group by toHour_UTC, toHour_Israel; + +drop table test_tz_hour; diff --git a/parser/testdata/02346_to_hour_monotonicity_fix_2/ast.json b/parser/testdata/02346_to_hour_monotonicity_fix_2/ast.json new file mode 100644 index 000000000..42cf182d9 --- /dev/null +++ b/parser/testdata/02346_to_hour_monotonicity_fix_2/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00162373, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/02346_to_hour_monotonicity_fix_2/metadata.json b/parser/testdata/02346_to_hour_monotonicity_fix_2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02346_to_hour_monotonicity_fix_2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02346_to_hour_monotonicity_fix_2/query.sql b/parser/testdata/02346_to_hour_monotonicity_fix_2/query.sql new file mode 100644 index 000000000..5d1452b43 --- /dev/null +++ b/parser/testdata/02346_to_hour_monotonicity_fix_2/query.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS test; + +CREATE TABLE test (stamp DateTime('UTC')) ENGINE = MergeTree PARTITION BY toDate(stamp) ORDER BY tuple() as select toDateTime('2020-01-01', 'UTC')+number*60 from numbers(1e3); + +SELECT count() result FROM test WHERE toHour(stamp, 'America/Montreal') = 7; + +DROP TABLE test; + +CREATE TABLE test (stamp Nullable(DateTime('UTC'))) ENGINE = MergeTree PARTITION BY toDate(stamp) ORDER BY tuple() SETTINGS allow_nullable_key = 1 as select toDateTime('2020-01-01', 'UTC')+number*60 from numbers(1e3); + +SELECT count() result FROM test WHERE toHour(stamp, 'America/Montreal') = 7; + +DROP TABLE test; diff --git a/parser/testdata/02347_rank_corr_nan/ast.json b/parser/testdata/02347_rank_corr_nan/ast.json new file mode 100644 index 000000000..d93a4c2dc --- /dev/null +++ b/parser/testdata/02347_rank_corr_nan/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function rankCorr (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal Float64_nan" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001542617, + "rows_read": 14, + "bytes_read": 548 + } +} diff --git a/parser/testdata/02347_rank_corr_nan/metadata.json b/parser/testdata/02347_rank_corr_nan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02347_rank_corr_nan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02347_rank_corr_nan/query.sql b/parser/testdata/02347_rank_corr_nan/query.sql new file mode 100644 index 000000000..0fd755259 --- /dev/null +++ b/parser/testdata/02347_rank_corr_nan/query.sql @@ -0,0 +1 @@ +SELECT rankCorr(number, nan) FROM numbers(10); diff --git a/parser/testdata/02347_rank_corr_size_overflow/ast.json b/parser/testdata/02347_rank_corr_size_overflow/ast.json new file mode 100644 index 000000000..32e73dfe4 --- /dev/null +++ b/parser/testdata/02347_rank_corr_size_overflow/ast.json @@ -0,0 +1,76 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function round (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function rankCorr (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function negate (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_5000000" + } + ], + + "rows": 18, + + "statistics": + { + "elapsed": 0.001408262, + "rows_read": 18, + "bytes_read": 728 + } +} diff --git a/parser/testdata/02347_rank_corr_size_overflow/metadata.json b/parser/testdata/02347_rank_corr_size_overflow/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02347_rank_corr_size_overflow/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02347_rank_corr_size_overflow/query.sql b/parser/testdata/02347_rank_corr_size_overflow/query.sql new file mode 100644 index 000000000..3ca1ced8d --- /dev/null +++ b/parser/testdata/02347_rank_corr_size_overflow/query.sql @@ -0,0 +1 @@ +SELECT round(rankCorr(number, -number)) FROM numbers(5000000); diff --git a/parser/testdata/02350_views_max_insert_threads/ast.json b/parser/testdata/02350_views_max_insert_threads/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02350_views_max_insert_threads/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02350_views_max_insert_threads/metadata.json b/parser/testdata/02350_views_max_insert_threads/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02350_views_max_insert_threads/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02350_views_max_insert_threads/query.sql b/parser/testdata/02350_views_max_insert_threads/query.sql new file mode 100644 index 000000000..ed099bff9 --- /dev/null +++ b/parser/testdata/02350_views_max_insert_threads/query.sql @@ -0,0 +1,17 @@ +-- Tags: no-parallel +-- no-parallel: it checks the number of threads, which can be lowered in presence of other queries +-- https://github.com/ClickHouse/ClickHouse/issues/37900 + +drop table if exists t; +drop table if exists t_mv; +create table t (a UInt64) Engine = Null; +create materialized view t_mv Engine = Null AS select now() as ts, max(a) from t group by ts; + +insert into t select * from numbers_mt(10e6) settings max_threads = 10, max_insert_threads=10, max_block_size=100000, parallel_view_processing=1; +system flush logs query_log; + +select peak_threads_usage>=10 from system.query_log where + event_date >= yesterday() and + current_database = currentDatabase() and + type = 'QueryFinish' and + startsWith(query, 'insert'); diff --git a/parser/testdata/02351_Map_combinator_dist/ast.json b/parser/testdata/02351_Map_combinator_dist/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02351_Map_combinator_dist/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02351_Map_combinator_dist/metadata.json b/parser/testdata/02351_Map_combinator_dist/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02351_Map_combinator_dist/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02351_Map_combinator_dist/query.sql b/parser/testdata/02351_Map_combinator_dist/query.sql new file mode 100644 index 000000000..937afa548 --- /dev/null +++ b/parser/testdata/02351_Map_combinator_dist/query.sql @@ -0,0 +1,81 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/35359 + +-- sumMap +SELECT x[67] +FROM +( + SELECT + A, + sumMap(CAST(arrayMap(x -> (x, 1), r), 'Map(UInt8,Int64)')) AS x + FROM remote('127.{1,1}', view( + SELECT + number AS A, + range(150) AS r + FROM numbers(60) + WHERE (A % 2) = shardNum() + )) + GROUP BY A + LIMIT 100000000 +) +WHERE A = 53 +SETTINGS prefer_localhost_replica = 0, distributed_aggregation_memory_efficient = 1, group_by_two_level_threshold = 0, group_by_two_level_threshold_bytes = 0; + +-- minMap +SELECT x[0] +FROM +( + SELECT + A, + minMap(CAST(arrayMap(x -> (x, 1), r), 'Map(UInt8,Int64)')) AS x + FROM remote('127.{1,1}', view( + SELECT + number AS A, + range(150) AS r + FROM numbers(60) + WHERE (A % 2) = shardNum() + )) + GROUP BY A + LIMIT 100000000 +) +WHERE A = 41 +SETTINGS prefer_localhost_replica = 0, distributed_aggregation_memory_efficient = 1, group_by_two_level_threshold = 0, group_by_two_level_threshold_bytes = 0; + +-- maxMap +SELECT x[0] +FROM +( + SELECT + A, + maxMap(CAST(arrayMap(x -> (x, 1), r), 'Map(UInt8,Int64)')) AS x + FROM remote('127.{1,1}', view( + SELECT + number AS A, + range(150) AS r + FROM numbers(60) + WHERE (A % 2) = shardNum() + )) + GROUP BY A + LIMIT 100000000 +) +WHERE A = 41 +SETTINGS prefer_localhost_replica = 0, distributed_aggregation_memory_efficient = 1, group_by_two_level_threshold = 0, group_by_two_level_threshold_bytes = 0; + +-- avgMap +SELECT x[0] +FROM +( + SELECT + A, + avgMap(CAST(arrayMap(x -> (x, 1), r), 'Map(UInt8,Int64)')) AS x + FROM remote('127.{1,1}', view( + SELECT + number AS A, + range(150) AS r + FROM numbers(60) + WHERE (A % 2) = shardNum() + )) + GROUP BY A + LIMIT 100000000 +) +WHERE A = 41 +SETTINGS prefer_localhost_replica = 0, distributed_aggregation_memory_efficient = 1, group_by_two_level_threshold = 0, group_by_two_level_threshold_bytes = 0; diff --git a/parser/testdata/02352_grouby_shadows_arg/ast.json b/parser/testdata/02352_grouby_shadows_arg/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02352_grouby_shadows_arg/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02352_grouby_shadows_arg/metadata.json b/parser/testdata/02352_grouby_shadows_arg/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02352_grouby_shadows_arg/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02352_grouby_shadows_arg/query.sql b/parser/testdata/02352_grouby_shadows_arg/query.sql new file mode 100644 index 000000000..953da2382 --- /dev/null +++ b/parser/testdata/02352_grouby_shadows_arg/query.sql @@ -0,0 +1,5 @@ +-- { echoOn } +SELECT toString(dummy) as dummy FROM remote('127.{1,1}', 'system.one') GROUP BY dummy; +SELECT toString(dummy+1) as dummy FROM remote('127.{1,1}', 'system.one') GROUP BY dummy; +SELECT toString((toInt8(dummy)+2) * (toInt8(dummy)+2)) as dummy FROM remote('127.{1,1}', system.one) GROUP BY dummy; +SELECT round(number % 3) AS number FROM remote('127.{1,1}', numbers(20)) GROUP BY number ORDER BY number ASC; diff --git a/parser/testdata/02352_lightweight_delete/ast.json b/parser/testdata/02352_lightweight_delete/ast.json new file mode 100644 index 000000000..b42b19cab --- /dev/null +++ b/parser/testdata/02352_lightweight_delete/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery lwd_test (children 1)" + }, + { + "explain": " Identifier lwd_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001912102, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/02352_lightweight_delete/metadata.json b/parser/testdata/02352_lightweight_delete/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02352_lightweight_delete/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02352_lightweight_delete/query.sql b/parser/testdata/02352_lightweight_delete/query.sql new file mode 100644 index 000000000..b13688282 --- /dev/null +++ b/parser/testdata/02352_lightweight_delete/query.sql @@ -0,0 +1,82 @@ +DROP TABLE IF EXISTS lwd_test; + +CREATE TABLE lwd_test (id UInt64 , value String) ENGINE MergeTree() ORDER BY id SETTINGS index_granularity=8192, index_granularity_bytes='10Mi'; + +INSERT INTO lwd_test SELECT number, randomString(10) FROM system.numbers LIMIT 1000000; + +SET mutations_sync = 0; + +SELECT 'Rows in parts', SUM(rows) FROM system.parts WHERE database = currentDatabase() AND table = 'lwd_test' AND active; +SELECT 'Count', count() FROM lwd_test; +SELECT 'First row', id, length(value) FROM lwd_test ORDER BY id LIMIT 1; + + +SELECT 'Delete 100K rows using lightweight DELETE'; +--ALTER TABLE lwd_test UPDATE _row_exists = 0 WHERE id < 3000000; +DELETE FROM lwd_test WHERE id < 100000; + +SELECT 'Rows in parts', SUM(rows) FROM system.parts WHERE database = currentDatabase() AND table = 'lwd_test' AND active; +SELECT 'Count', count() FROM lwd_test; +SELECT 'First row', id, length(value) FROM lwd_test ORDER BY id LIMIT 1; + + +SELECT 'Force merge to cleanup deleted rows'; +OPTIMIZE TABLE lwd_test FINAL SETTINGS mutations_sync = 2; + +SELECT 'Rows in parts', SUM(rows) FROM system.parts WHERE database = currentDatabase() AND table = 'lwd_test' AND active; +SELECT 'Count', count() FROM lwd_test; +SELECT 'First row', id, length(value) FROM lwd_test ORDER BY id LIMIT 1; + + +SELECT 'Delete 100K more rows using lightweight DELETE'; +DELETE FROM lwd_test WHERE id < 200000; + +SELECT 'Rows in parts', SUM(rows) FROM system.parts WHERE database = currentDatabase() AND table = 'lwd_test' AND active; +SELECT 'Count', count() FROM lwd_test; +SELECT 'First row', id, length(value) FROM lwd_test ORDER BY id LIMIT 1; + + +SELECT 'Do UPDATE mutation'; +ALTER TABLE lwd_test UPDATE value = 'v' WHERE id % 2 == 0 SETTINGS mutations_sync = 2; + +SELECT 'Rows in parts', SUM(rows) FROM system.parts WHERE database = currentDatabase() AND table = 'lwd_test' AND active; +SELECT 'Count', count() FROM lwd_test; +SELECT 'First row', id, length(value) FROM lwd_test ORDER BY id LIMIT 1; + + +SELECT 'Force merge to cleanup deleted rows'; +OPTIMIZE TABLE lwd_test FINAL SETTINGS mutations_sync = 2; + +SELECT 'Rows in parts', SUM(rows) FROM system.parts WHERE database = currentDatabase() AND table = 'lwd_test' AND active; +SELECT 'Count', count() FROM lwd_test; +SELECT 'First row', id, length(value) FROM lwd_test ORDER BY id LIMIT 1; + + +SELECT 'Delete 100K more rows using lightweight DELETE'; +DELETE FROM lwd_test WHERE id < 300000; + +SELECT 'Rows in parts', SUM(rows) FROM system.parts WHERE database = currentDatabase() AND table = 'lwd_test' AND active; +SELECT 'Count', count() FROM lwd_test; +SELECT 'First row', id, length(value) FROM lwd_test ORDER BY id LIMIT 1; + + +SELECT 'Do ALTER DELETE mutation that does a "heavyweight" delete'; +ALTER TABLE lwd_test DELETE WHERE id % 3 == 0 SETTINGS mutations_sync = 2; + +SELECT 'Rows in parts', SUM(rows) FROM system.parts WHERE database = currentDatabase() AND table = 'lwd_test' AND active; +SELECT 'Count', count() FROM lwd_test; +SELECT 'First row', id, length(value) FROM lwd_test ORDER BY id LIMIT 1; + +SELECT 'Delete 100K more rows using lightweight DELETE'; +DELETE FROM lwd_test WHERE id >= 300000 and id < 400000; + + +SELECT 'Force merge to cleanup deleted rows'; +OPTIMIZE TABLE lwd_test FINAL SETTINGS mutations_sync = 2; + +SELECT 'Rows in parts', SUM(rows) FROM system.parts WHERE database = currentDatabase() AND table = 'lwd_test' AND active; +SELECT 'Count', count() FROM lwd_test; +SELECT 'First row', id, length(value) FROM lwd_test ORDER BY id LIMIT 1; + + +DROP TABLE lwd_test; diff --git a/parser/testdata/02352_lightweight_delete_in_partition/ast.json b/parser/testdata/02352_lightweight_delete_in_partition/ast.json new file mode 100644 index 000000000..b4ec39af2 --- /dev/null +++ b/parser/testdata/02352_lightweight_delete_in_partition/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_merge_tree (children 1)" + }, + { + "explain": " Identifier t_merge_tree" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001394401, + "rows_read": 2, + "bytes_read": 76 + } +} diff --git a/parser/testdata/02352_lightweight_delete_in_partition/metadata.json b/parser/testdata/02352_lightweight_delete_in_partition/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02352_lightweight_delete_in_partition/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02352_lightweight_delete_in_partition/query.sql b/parser/testdata/02352_lightweight_delete_in_partition/query.sql new file mode 100644 index 000000000..04371d273 --- /dev/null +++ b/parser/testdata/02352_lightweight_delete_in_partition/query.sql @@ -0,0 +1,23 @@ +DROP TABLE IF EXISTS t_merge_tree SYNC; +DROP TABLE IF EXISTS t_replicated_merge_tree SYNC; + +CREATE TABLE t_merge_tree(time Date, id String , name String) ENGINE = MergeTree() PARTITION BY time ORDER BY id; +CREATE TABLE t_replicated_merge_tree(time Date, id String, name String) ENGINE = ReplicatedMergeTree('/test/02352/{database}/t_rep','1') PARTITION BY time ORDER BY id; + +INSERT INTO t_merge_tree select '2024-08-01', '1', toString(number) FROM numbers(100); +INSERT INTO t_merge_tree select '2024-08-02', '1', toString(number) FROM numbers(100); + +INSERT INTO t_replicated_merge_tree select '2024-08-01', '1', toString(number) FROM numbers(100); +INSERT INTO t_replicated_merge_tree select '2024-08-02', '1', toString(number) FROM numbers(100); + +SELECT COUNT() FROM t_merge_tree; +SELECT COUNT() FROM t_replicated_merge_tree; + +DELETE FROM t_merge_tree IN PARTITION '2024-08-01' WHERE id = '1'; +DELETE FROM t_replicated_merge_tree IN PARTITION '2024-08-01' WHERE id = '1'; + +SELECT COUNT() FROM t_merge_tree; +SELECT COUNT() FROM t_replicated_merge_tree; + +DROP TABLE t_merge_tree SYNC; +DROP TABLE t_replicated_merge_tree SYNC; diff --git a/parser/testdata/02352_lightweight_delete_on_replicated_merge_tree/ast.json b/parser/testdata/02352_lightweight_delete_on_replicated_merge_tree/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02352_lightweight_delete_on_replicated_merge_tree/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02352_lightweight_delete_on_replicated_merge_tree/metadata.json b/parser/testdata/02352_lightweight_delete_on_replicated_merge_tree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02352_lightweight_delete_on_replicated_merge_tree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02352_lightweight_delete_on_replicated_merge_tree/query.sql b/parser/testdata/02352_lightweight_delete_on_replicated_merge_tree/query.sql new file mode 100644 index 000000000..4c3955bf7 --- /dev/null +++ b/parser/testdata/02352_lightweight_delete_on_replicated_merge_tree/query.sql @@ -0,0 +1,65 @@ +-- Tags: long + +DROP TABLE IF EXISTS replicated_table_r1 SYNC; +DROP TABLE IF EXISTS replicated_table_r2 SYNC; + +CREATE TABLE replicated_table_r1(id Int32, name String) ENGINE = ReplicatedMergeTree('/test/02352/{database}/t_rep','1') ORDER BY id; +CREATE TABLE replicated_table_r2(id Int32, name String) ENGINE = ReplicatedMergeTree('/test/02352/{database}/t_rep','2') ORDER BY id; + +INSERT INTO replicated_table_r1 select number, toString(number) FROM numbers(100); + +SET mutations_sync = 0; + +DELETE FROM replicated_table_r1 WHERE id = 10; + +SELECT COUNT() FROM replicated_table_r1; +SELECT COUNT() FROM replicated_table_r2; + +DELETE FROM replicated_table_r2 WHERE name IN ('1','2','3','4'); + +SELECT COUNT() FROM replicated_table_r1; + +DELETE FROM replicated_table_r1 WHERE 1; + +SELECT COUNT() FROM replicated_table_r1; +SELECT COUNT() FROM replicated_table_r2; + +DROP TABLE IF EXISTS replicated_table_r1 SYNC; +DROP TABLE IF EXISTS replicated_table_r2 SYNC; + +DROP TABLE IF EXISTS t_light_r1 SYNC; +DROP TABLE IF EXISTS t_light_r2 SYNC; + +CREATE TABLE t_light_r1(a int, b int, c int, index i_c(b) TYPE minmax granularity 4) ENGINE = ReplicatedMergeTree('/test/02352/{database}/t_light','1') ORDER BY a PARTITION BY c % 5; +CREATE TABLE t_light_r2(a int, b int, c int, index i_c(b) TYPE minmax granularity 4) ENGINE = ReplicatedMergeTree('/test/02352/{database}/t_light','2') ORDER BY a PARTITION BY c % 5; + +INSERT INTO t_light_r1 SELECT number, number, number FROM numbers(10); + +DELETE FROM t_light_r1 WHERE c%5=1; +DELETE FROM t_light_r2 WHERE c=4; + +SELECT '-----Check that select and merge with lightweight delete.-----'; +SELECT count(*) FROM t_light_r1; +SELECT * FROM t_light_r1 ORDER BY a; +SELECT * FROM t_light_r2 ORDER BY a; + +OPTIMIZE TABLE t_light_r1 FINAL SETTINGS mutations_sync = 2; +SELECT count(*) FROM t_light_r1; + +DROP TABLE IF EXISTS t_light_r1 SYNC; +DROP TABLE IF EXISTS t_light_r2 SYNC; + +CREATE TABLE t_light_sync_r1(a int, b int, c int, index i_c(b) TYPE minmax granularity 4) ENGINE = ReplicatedMergeTree('/test/02352/{database}/t_sync','1') ORDER BY a PARTITION BY c % 5 SETTINGS min_bytes_for_wide_part=0; + +INSERT INTO t_light_sync_r1 SELECT number, number, number FROM numbers(10); + +DELETE FROM t_light_sync_r1 WHERE c%3=1; + +SELECT '-----Check fetch part with lightweight delete-----'; +CREATE TABLE t_light_sync_r2(a int, b int, c int, index i_c(b) TYPE minmax granularity 4) ENGINE = ReplicatedMergeTree('/test/02352/{database}/t_sync','2') ORDER BY a PARTITION BY c % 5 SETTINGS min_bytes_for_wide_part=0; +SYSTEM SYNC REPLICA t_light_sync_r2; + +SELECT * FROM t_light_sync_r2 ORDER BY a; + +DROP TABLE IF EXISTS t_light_sync_r1 SYNC; +DROP TABLE IF EXISTS t_light_sync_r2 SYNC; diff --git a/parser/testdata/02353_ascii/ast.json b/parser/testdata/02353_ascii/ast.json new file mode 100644 index 000000000..31dd10e42 --- /dev/null +++ b/parser/testdata/02353_ascii/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function ascii (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '234'" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001798302, + "rows_read": 7, + "bytes_read": 255 + } +} diff --git a/parser/testdata/02353_ascii/metadata.json b/parser/testdata/02353_ascii/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02353_ascii/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02353_ascii/query.sql b/parser/testdata/02353_ascii/query.sql new file mode 100644 index 000000000..5b7a20ad6 --- /dev/null +++ b/parser/testdata/02353_ascii/query.sql @@ -0,0 +1,5 @@ +SELECT ascii('234'); +SELECT ascii(''); +SELECT ascii(materialize('234')); +SELECT ascii(materialize('')); +SELECT ascii(toString(number) || 'abc') from numbers(10); diff --git a/parser/testdata/02353_explain_ast_optimize/ast.json b/parser/testdata/02353_explain_ast_optimize/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02353_explain_ast_optimize/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02353_explain_ast_optimize/metadata.json b/parser/testdata/02353_explain_ast_optimize/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02353_explain_ast_optimize/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02353_explain_ast_optimize/query.sql b/parser/testdata/02353_explain_ast_optimize/query.sql new file mode 100644 index 000000000..a46a47a2e --- /dev/null +++ b/parser/testdata/02353_explain_ast_optimize/query.sql @@ -0,0 +1,6 @@ +-- { echoOn } +EXPLAIN AST optimize=0 SELECT * FROM numbers(0); +EXPLAIN AST optimize=1 SELECT * FROM numbers(0); +EXPLAIN AST optimize=0 SELECT countDistinct(number) FROM numbers(0); +EXPLAIN AST optimize=1 SELECT countDistinct(number) FROM numbers(0); +-- { echoOff } diff --git a/parser/testdata/02353_isnullable/ast.json b/parser/testdata/02353_isnullable/ast.json new file mode 100644 index 000000000..d5bf5505e --- /dev/null +++ b/parser/testdata/02353_isnullable/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function isNullable (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_3" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001288283, + "rows_read": 7, + "bytes_read": 263 + } +} diff --git a/parser/testdata/02353_isnullable/metadata.json b/parser/testdata/02353_isnullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02353_isnullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02353_isnullable/query.sql b/parser/testdata/02353_isnullable/query.sql new file mode 100644 index 000000000..279eea252 --- /dev/null +++ b/parser/testdata/02353_isnullable/query.sql @@ -0,0 +1,11 @@ +SELECT isNullable(3); +SELECT isNullable(toNullable(3)); + +SELECT isNullable(NULL); +SELECT isNullable(materialize(NULL)); + +SELECT isNullable(toLowCardinality(1)); +SELECT isNullable(toNullable(toLowCardinality(1))); + +SELECT isNullable(toLowCardinality(materialize(1))); +SELECT isNullable(toNullable(toLowCardinality(materialize(1)))); diff --git a/parser/testdata/02353_partition_prune_nullable_key/ast.json b/parser/testdata/02353_partition_prune_nullable_key/ast.json new file mode 100644 index 000000000..9d3606fe9 --- /dev/null +++ b/parser/testdata/02353_partition_prune_nullable_key/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery n (children 1)" + }, + { + "explain": " Identifier n" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00145169, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/02353_partition_prune_nullable_key/metadata.json b/parser/testdata/02353_partition_prune_nullable_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02353_partition_prune_nullable_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02353_partition_prune_nullable_key/query.sql b/parser/testdata/02353_partition_prune_nullable_key/query.sql new file mode 100644 index 000000000..5a5109c31 --- /dev/null +++ b/parser/testdata/02353_partition_prune_nullable_key/query.sql @@ -0,0 +1,9 @@ +drop table if exists n; + +create table n(nc Nullable(int)) engine = MergeTree order by (tuple()) partition by (nc) settings allow_nullable_key = 1; + +insert into n values (null); + +select * from n where nc is null; + +drop table n; diff --git a/parser/testdata/02353_simdjson_buffer_overflow/ast.json b/parser/testdata/02353_simdjson_buffer_overflow/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02353_simdjson_buffer_overflow/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02353_simdjson_buffer_overflow/metadata.json b/parser/testdata/02353_simdjson_buffer_overflow/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02353_simdjson_buffer_overflow/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02353_simdjson_buffer_overflow/query.sql b/parser/testdata/02353_simdjson_buffer_overflow/query.sql new file mode 100644 index 000000000..e7c6c2721 --- /dev/null +++ b/parser/testdata/02353_simdjson_buffer_overflow/query.sql @@ -0,0 +1,7 @@ +-- Tag: no-msan: fuzzer can make this query very memory hungry, and under MSan, the MemoryTracker cannot account for the additional memory used by sanitizer, and OOM happens. + +SET max_execution_time = 3; +SET timeout_overflow_mode = 'break'; +SET max_rows_to_read = 0, max_bytes_to_read = 0; + +SELECT count() FROM system.numbers_mt WHERE NOT ignore(JSONExtract('{' || repeat('"a":"b",', rand() % 10) || '"c":"d"}', 'a', 'String')) FORMAT Null; diff --git a/parser/testdata/02353_translate/ast.json b/parser/testdata/02353_translate/ast.json new file mode 100644 index 000000000..08f1cfb0d --- /dev/null +++ b/parser/testdata/02353_translate/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function translate (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal 'Hello? world.'" + }, + { + "explain": " Literal '.?'" + }, + { + "explain": " Literal '!,'" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001389625, + "rows_read": 9, + "bytes_read": 321 + } +} diff --git a/parser/testdata/02353_translate/metadata.json b/parser/testdata/02353_translate/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02353_translate/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02353_translate/query.sql b/parser/testdata/02353_translate/query.sql new file mode 100644 index 000000000..5fd9324f1 --- /dev/null +++ b/parser/testdata/02353_translate/query.sql @@ -0,0 +1,23 @@ +SELECT translate('Hello? world.', '.?', '!,'); +SELECT translate('gtcttgcaag', 'ACGTacgt', 'TGCAtgca'); +SELECT translate(toString(number), '0123456789', 'abcdefghij') FROM numbers(987654, 5); + +SELECT translateUTF8('HôtelGenèv', 'Ááéíóúôè', 'aaeiouoe'); +SELECT translateUTF8('中文内码', '久标准中文内码', 'ユニコードとは'); +SELECT translateUTF8(toString(number), '1234567890', 'ዩय𐑿𐐏নՅðй¿ค') FROM numbers(987654, 5); + +SELECT translate('abc', '', ''); +SELECT translateUTF8('abc', '', ''); + +SELECT translate('abc', 'Ááéíóúôè', 'aaeiouoe'); -- { serverError BAD_ARGUMENTS } +SELECT translateUTF8('abc', 'efg', ''); + +SELECT translateUTF8('中文内码', '中文', ''); +SELECT translate('aAbBcC', 'abc', '12'); + +SELECT translate('aAbBcC', 'abc', '123'); +SELECT translate('aAbBcC', 'abc', ''); +SELECT translate('abc', 'abc', ''); +SELECT translate('aAbBcC', '中文内码', '12'); -- { serverError BAD_ARGUMENTS } + +SELECT translate('aAbBcC', 'ab', 'abc'); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/02354_array_lowcardinality/ast.json b/parser/testdata/02354_array_lowcardinality/ast.json new file mode 100644 index 000000000..a74830612 --- /dev/null +++ b/parser/testdata/02354_array_lowcardinality/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toLowCardinality (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '1'" + }, + { + "explain": " Function toLowCardinality (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '2'" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001736138, + "rows_read": 14, + "bytes_read": 570 + } +} diff --git a/parser/testdata/02354_array_lowcardinality/metadata.json b/parser/testdata/02354_array_lowcardinality/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02354_array_lowcardinality/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02354_array_lowcardinality/query.sql b/parser/testdata/02354_array_lowcardinality/query.sql new file mode 100644 index 000000000..1aa26b290 --- /dev/null +++ b/parser/testdata/02354_array_lowcardinality/query.sql @@ -0,0 +1,14 @@ +SELECT toTypeName([toLowCardinality('1'), toLowCardinality('2')]); +SELECT toTypeName([materialize(toLowCardinality('1')), toLowCardinality('2')]); +SELECT toTypeName([toLowCardinality('1'), materialize(toLowCardinality('2'))]); +SELECT toTypeName([materialize(toLowCardinality('1')), materialize(toLowCardinality('2'))]); + +SELECT toTypeName([toLowCardinality('1'), '2']); +SELECT toTypeName([materialize(toLowCardinality('1')), '2']); +SELECT toTypeName([toLowCardinality('1'), materialize('2')]); +SELECT toTypeName([materialize(toLowCardinality('1')), materialize('2')]); + +SELECT toTypeName(map(toLowCardinality('1'), toLowCardinality('2'))); +SELECT toTypeName(map(materialize(toLowCardinality('1')), toLowCardinality('2'))); +SELECT toTypeName(map(toLowCardinality('1'), materialize(toLowCardinality('2')))); +SELECT toTypeName(map(materialize(toLowCardinality('1')), materialize(toLowCardinality('2')))); diff --git a/parser/testdata/02354_distributed_with_external_aggregation_memory_usage/ast.json b/parser/testdata/02354_distributed_with_external_aggregation_memory_usage/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02354_distributed_with_external_aggregation_memory_usage/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02354_distributed_with_external_aggregation_memory_usage/metadata.json b/parser/testdata/02354_distributed_with_external_aggregation_memory_usage/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02354_distributed_with_external_aggregation_memory_usage/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02354_distributed_with_external_aggregation_memory_usage/query.sql b/parser/testdata/02354_distributed_with_external_aggregation_memory_usage/query.sql new file mode 100644 index 000000000..481557d6d --- /dev/null +++ b/parser/testdata/02354_distributed_with_external_aggregation_memory_usage/query.sql @@ -0,0 +1,31 @@ +-- Tags: long, no-tsan, no-msan, no-asan, no-ubsan, no-debug, no-coverage, no-object-storage, no-random-merge-tree-settings, no-random-settings + +SET max_rows_to_read = '101M'; + +DROP TABLE IF EXISTS t_2354_dist_with_external_aggr; + +create table t_2354_dist_with_external_aggr(a UInt64, b String, c FixedString(100)) engine = MergeTree order by tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +insert into t_2354_dist_with_external_aggr select number, toString(number) as s, toFixedString(s, 100) from numbers_mt(5e7); + +set max_bytes_before_external_group_by = '2G', + max_bytes_ratio_before_external_group_by = 0, + max_threads = 16, + aggregation_memory_efficient_merge_threads = 16, + distributed_aggregation_memory_efficient = 1, + prefer_localhost_replica = 1, + group_by_two_level_threshold = 100000, + group_by_two_level_threshold_bytes = 1000000, + max_block_size = 65505; + +-- whole aggregation state of local aggregation uncompressed is 5.8G +-- it is hard to provide an accurate estimation for memory usage, so 5G is just the actual value taken from the logs + delta +-- also avoid using localhost, so the queries will go over separate connections +-- (otherwise the memory usage for merge will be counted together with the localhost query) +select a, b, c, sum(a) as s +from remote('127.0.0.{2,3}', currentDatabase(), t_2354_dist_with_external_aggr) +group by a, b, c +format Null +settings max_memory_usage = '5Gi', max_result_rows = 0, max_result_bytes = 0; + +DROP TABLE t_2354_dist_with_external_aggr; diff --git a/parser/testdata/02354_numeric_literals_with_underscores/ast.json b/parser/testdata/02354_numeric_literals_with_underscores/ast.json new file mode 100644 index 000000000..c2c811ae8 --- /dev/null +++ b/parser/testdata/02354_numeric_literals_with_underscores/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1000" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001608119, + "rows_read": 5, + "bytes_read": 180 + } +} diff --git a/parser/testdata/02354_numeric_literals_with_underscores/metadata.json b/parser/testdata/02354_numeric_literals_with_underscores/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02354_numeric_literals_with_underscores/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02354_numeric_literals_with_underscores/query.sql b/parser/testdata/02354_numeric_literals_with_underscores/query.sql new file mode 100644 index 000000000..b58bbfc58 --- /dev/null +++ b/parser/testdata/02354_numeric_literals_with_underscores/query.sql @@ -0,0 +1,6 @@ +select 1_000; +select 1.00_00_01; +select 1.000_0001e2; +select 0x12_34_56_78; +select 0x12_34_56_78p1; +select 0b0010_0100_0111; diff --git a/parser/testdata/02354_parse_timedelta/ast.json b/parser/testdata/02354_parse_timedelta/ast.json new file mode 100644 index 000000000..27a5c080a --- /dev/null +++ b/parser/testdata/02354_parse_timedelta/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function parseTimeDelta (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '1 min 35 sec'" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001002283, + "rows_read": 7, + "bytes_read": 273 + } +} diff --git a/parser/testdata/02354_parse_timedelta/metadata.json b/parser/testdata/02354_parse_timedelta/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02354_parse_timedelta/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02354_parse_timedelta/query.sql b/parser/testdata/02354_parse_timedelta/query.sql new file mode 100644 index 000000000..7fe4cdb96 --- /dev/null +++ b/parser/testdata/02354_parse_timedelta/query.sql @@ -0,0 +1,26 @@ +SELECT parseTimeDelta('1 min 35 sec'); +SELECT parseTimeDelta('0m;11.23s.'); +SELECT parseTimeDelta('11hr 25min 3.1s'); +SELECT parseTimeDelta('0.00123 seconds'); +SELECT parseTimeDelta('1yr2mo'); +SELECT parseTimeDelta('11s+22min'); +SELECT parseTimeDelta('1yr-2mo-4w + 12 days, 3 hours : 1 minute ; 33 seconds'); +SELECT parseTimeDelta('1s1ms1us1ns'); +SELECT parseTimeDelta('1s1ms1μs1ns'); // μs U+03BC +SELECT parseTimeDelta('1s1ms1µs1ns'); // µs U+00B5 +SELECT parseTimeDelta('1s - 1ms : 1μs ; 1ns'); +SELECT parseTimeDelta('1.11s1.11ms1.11us1.11ns'); + +-- invalid expressions +SELECT parseTimeDelta(); -- {serverError TOO_FEW_ARGUMENTS_FOR_FUNCTION} +SELECT parseTimeDelta('1yr', 1); -- {serverError TOO_MANY_ARGUMENTS_FOR_FUNCTION} +SELECT parseTimeDelta(1); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT parseTimeDelta(' '); -- {serverError BAD_ARGUMENTS} +SELECT parseTimeDelta('-1yr'); -- {serverError BAD_ARGUMENTS} +SELECT parseTimeDelta('1yr-'); -- {serverError BAD_ARGUMENTS} +SELECT parseTimeDelta('yr2mo'); -- {serverError BAD_ARGUMENTS} +SELECT parseTimeDelta('1.yr2mo'); -- {serverError BAD_ARGUMENTS} +SELECT parseTimeDelta('1-yr'); -- {serverError BAD_ARGUMENTS} +SELECT parseTimeDelta('1 1yr'); -- {serverError BAD_ARGUMENTS} +SELECT parseTimeDelta('1yyr'); -- {serverError BAD_ARGUMENTS} +SELECT parseTimeDelta('1yr-2mo-4w + 12 days, 3 hours : 1 minute ;. 33 seconds'); -- {serverError BAD_ARGUMENTS} diff --git a/parser/testdata/02354_read_in_order_prewhere/ast.json b/parser/testdata/02354_read_in_order_prewhere/ast.json new file mode 100644 index 000000000..8f37806f7 --- /dev/null +++ b/parser/testdata/02354_read_in_order_prewhere/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery order (children 1)" + }, + { + "explain": " Identifier order" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001545503, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/02354_read_in_order_prewhere/metadata.json b/parser/testdata/02354_read_in_order_prewhere/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02354_read_in_order_prewhere/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02354_read_in_order_prewhere/query.sql b/parser/testdata/02354_read_in_order_prewhere/query.sql new file mode 100644 index 000000000..c5abd5945 --- /dev/null +++ b/parser/testdata/02354_read_in_order_prewhere/query.sql @@ -0,0 +1,30 @@ +drop table if exists order; + +CREATE TABLE order +( + ID Int64, + Type Int64, + Num UInt64, + t DateTime +) +ENGINE = MergeTree() +PARTITION BY toYYYYMMDD(t) +ORDER BY (ID, Type, Num); + +system stop merges order; + +insert into order select number%2000, 1, number, (1656700561 - intDiv(intHash32(number), 1000)) from numbers(100000); +insert into order select number%2000, 1, number, (1656700561 - intDiv(intHash32(number), 1000)) from numbers(100000); +insert into order select number%2000, 1, number, (1656700561 - intDiv(intHash32(number), 1000)) from numbers(100000); + +SELECT Num +FROM order +WHERE Type = 1 AND ID = 1 +ORDER BY Num ASC limit 5; + +SELECT Num +FROM order +PREWHERE Type = 1 +WHERE ID = 1 +ORDER BY Num ASC limit 5; + diff --git a/parser/testdata/02354_tuple_element_with_default/ast.json b/parser/testdata/02354_tuple_element_with_default/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02354_tuple_element_with_default/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02354_tuple_element_with_default/metadata.json b/parser/testdata/02354_tuple_element_with_default/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02354_tuple_element_with_default/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02354_tuple_element_with_default/query.sql b/parser/testdata/02354_tuple_element_with_default/query.sql new file mode 100644 index 000000000..89320f4d2 --- /dev/null +++ b/parser/testdata/02354_tuple_element_with_default/query.sql @@ -0,0 +1,23 @@ +-- const tuple argument + +SELECT tupleElement(('hello', 'world'), 1, 'default'); +SELECT tupleElement(('hello', 'world'), 2, 'default'); +SELECT tupleElement(('hello', 'world'), 3, 'default'); +SELECT tupleElement(('hello', 'world'), 'xyz', 'default'); +SELECT tupleElement(('hello', 'world'), 3, [([('a')], 1)]); -- arbitrary default value + +SELECT tupleElement([(1, 2), (3, 4)], 1, 'default'); +SELECT tupleElement([(1, 2), (3, 4)], 2, 'default'); +SELECT tupleElement([(1, 2), (3, 4)], 3, 'default'); + +SELECT '--------'; + +-- non-const tuple argument + +SELECT tupleElement(materialize(('hello', 'world')), 1, 'default'); +SELECT tupleElement(materialize(('hello', 'world')), 2, 'default'); +SELECT tupleElement(materialize(('hello', 'world')), 3, 'default'); +SELECT tupleElement(materialize(('hello', 'world')), 'xzy', 'default'); +SELECT tupleElement(materialize(('hello', 'world')), 'xzy', [([('a')], 1)]); -- arbitrary default value + +SELECT tupleElement([[(count('2147483646'), 1)]], 'aaaa', [[1, 2, 3]]) -- bug #51525 diff --git a/parser/testdata/02354_tuple_lowcardinality/ast.json b/parser/testdata/02354_tuple_lowcardinality/ast.json new file mode 100644 index 000000000..99630d20e --- /dev/null +++ b/parser/testdata/02354_tuple_lowcardinality/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001222921, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02354_tuple_lowcardinality/metadata.json b/parser/testdata/02354_tuple_lowcardinality/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02354_tuple_lowcardinality/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02354_tuple_lowcardinality/query.sql b/parser/testdata/02354_tuple_lowcardinality/query.sql new file mode 100644 index 000000000..44b64aab3 --- /dev/null +++ b/parser/testdata/02354_tuple_lowcardinality/query.sql @@ -0,0 +1,6 @@ +SET allow_suspicious_low_cardinality_types = 1; + +SELECT toTypeName(tuple(toLowCardinality('1'), toLowCardinality(1))); +SELECT toTypeName(tuple(materialize(toLowCardinality('1')), toLowCardinality(1))); +SELECT toTypeName(tuple(toLowCardinality('1'), materialize(toLowCardinality(1)))); +SELECT toTypeName(tuple(materialize(toLowCardinality('1')), materialize(toLowCardinality(1)))); diff --git a/parser/testdata/02354_vector_search_adaptive_index_granularity/ast.json b/parser/testdata/02354_vector_search_adaptive_index_granularity/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02354_vector_search_adaptive_index_granularity/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02354_vector_search_adaptive_index_granularity/metadata.json b/parser/testdata/02354_vector_search_adaptive_index_granularity/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02354_vector_search_adaptive_index_granularity/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02354_vector_search_adaptive_index_granularity/query.sql b/parser/testdata/02354_vector_search_adaptive_index_granularity/query.sql new file mode 100644 index 000000000..812ce640c --- /dev/null +++ b/parser/testdata/02354_vector_search_adaptive_index_granularity/query.sql @@ -0,0 +1,17 @@ +-- Tags: no-fasttest, no-ordinary-database + +-- Tests that vector similarity indexes cannot be created with index_granularity_bytes = 0 + +DROP TABLE IF EXISTS tab; + +-- If adaptive index granularity is disabled, certain vector search queries with PREWHERE run into LOGICAL_ERRORs. +-- CREATE TABLE tab (`id` Int32, `vec` Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance') GRANULARITY 100000000) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity_bytes = 0; +-- INSERT INTO tab SELECT number, [toFloat32(number), 0.] FROM numbers(10000); +-- WITH [1., 0.] AS reference_vec SELECT id, L2Distance(vec, reference_vec) FROM tab PREWHERE toLowCardinality(10) ORDER BY L2Distance(vec, reference_vec) ASC LIMIT 100; +-- As a workaround, force enabled adaptive index granularity for now (it is the default anyways). +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 1)) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity_bytes = 0; -- { serverError INVALID_SETTING_VALUE } + +CREATE TABLE tab(id Int32, vec Array(Float32)) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity_bytes = 0; +ALTER TABLE tab ADD INDEX vec_idx1(vec) TYPE vector_similarity('hnsw', 'cosineDistance', 1); -- { serverError INVALID_SETTING_VALUE } + +DROP TABLE tab; diff --git a/parser/testdata/02354_vector_search_and_other_skipping_indexes/ast.json b/parser/testdata/02354_vector_search_and_other_skipping_indexes/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02354_vector_search_and_other_skipping_indexes/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02354_vector_search_and_other_skipping_indexes/metadata.json b/parser/testdata/02354_vector_search_and_other_skipping_indexes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02354_vector_search_and_other_skipping_indexes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02354_vector_search_and_other_skipping_indexes/query.sql b/parser/testdata/02354_vector_search_and_other_skipping_indexes/query.sql new file mode 100644 index 000000000..73d5ec34f --- /dev/null +++ b/parser/testdata/02354_vector_search_and_other_skipping_indexes/query.sql @@ -0,0 +1,18 @@ +-- Tags: no-fasttest, no-ordinary-database + +-- Usage of vector similarity index and further skipping indexes on the same table (issue #71381) + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab( + val String, + vec Array(Float32), + INDEX ann_idx vec TYPE vector_similarity('hnsw', 'cosineDistance', 1), + INDEX set_idx val TYPE set(100) +) +ENGINE = MergeTree() +ORDER BY tuple(); + +INSERT INTO tab VALUES ('hello world', [0.0]); + +DROP TABLE tab; diff --git a/parser/testdata/02354_vector_search_binary_quantization/ast.json b/parser/testdata/02354_vector_search_binary_quantization/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02354_vector_search_binary_quantization/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02354_vector_search_binary_quantization/metadata.json b/parser/testdata/02354_vector_search_binary_quantization/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02354_vector_search_binary_quantization/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02354_vector_search_binary_quantization/query.sql b/parser/testdata/02354_vector_search_binary_quantization/query.sql new file mode 100644 index 000000000..06f8f4035 --- /dev/null +++ b/parser/testdata/02354_vector_search_binary_quantization/query.sql @@ -0,0 +1,133 @@ +-- Tags: no-fasttest, no-ordinary-database, no-parallel-replicas +-- no-parallel-replicas: The test really wants lower quality result to be returned from the index +-- with rescoring=OFF. That is required to confirm binary quantization works +-- as expected. In parallel replicas, rescoring is always ON. + +-- Test for vector similarity index with binary quantization. +-- Also has good number of calls to reinterpret() to test conversion of native floats to Array(Float32) + +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS dbpedia; + +CREATE TABLE dbpedia +( + id String, + vec Array(Float32), + INDEX vec_idx vec TYPE vector_similarity('hnsw', 'cosineDistance', 1536, 'b1', 64, 128) +) ENGINE = MergeTree +ORDER BY id +SETTINGS index_granularity = 2; + +SELECT '-- INSERT 20 rows using the reinterpret() technique'; + +INSERT INTO dbpedia VALUES +('<dbpedia:Database_transaction>', reinterpret(x'0f458d3cf3b8493cd55c64bc7594243c4ed79f3c1a3cca3b2b686b3cdc91c83b944ca3bcae2b573b2d89833dadb7cebcc6c6603c2411f2bc08d624bd114fb33c8f39d4bcb2e20c3c3d590b3d6538a5bc0b54d33c14416a3c1067223da98600bb540c043bd1d40f3bca7d16bcfa0f43ba3d590bbdd596e8bbec0f5dbcb50e48bcca6527ba239d69bc13cd61bc38463cbd832002bd3129473c24857abc8ec5cbbca782ea3b7fa3d03cd2821cbd5225703c98d4f73cf296343ca36e1ebd41f94ebbf7e387bc1fdb903c04ab66bd64a207bdc9098e3d26328a3d102d1ebd0f0b89bc0593773cbd87d6bbbd134ebda7bc6eb95103dbba7bc908bdf0c692bc3846bc3c81e7fa3c5df99a3ab5bcd4bbff74853d18f89fbc73c402bc27e096bc56502e3d11c33b3c3007b2bcf4a0da3cad7dca3cfc536d3d64a2073dab7324bbb2e28c3ce28ca83c762a423c50e145bc3007323c9b2f94bbcb4d383cd48cc2bcf37e45bdc63ae93b3a8ae63c3f63b1bbf466d6bb114f333cbc65c13c56162a3d169c863bf00017bc6708473cd5e8db3c0b0260bca2860d3dbae792bc95a83c3b540c04bd74388b3c641610bc368e893c404b423b7764c6bbb2a8883de566f0bc106722bb9e5bcfbcdc3fd53c5177e3bc670847bd6a86f53cd7b87dbccc35c9bbbefbdebb6199debcdac126bd022db83aa98600bdbe35633c45b0843cfd017abc49fe54bcb718eebb8b25883aa504bcbcf244c1bb5d8512bded6bf63c3be6ff3c3e7b203d4850c8bb8ace013d7fa3503ca24c093d65fea0bd33e179bcaa518fbc481644bd622ffcb9befbde3c79a870bd605fda3b98606fbca6d4dd3c1fdb903c6d8f1ebcfbf7533de20031bc021549bde23ab5bc8da3b63c9095ed3bbf1d74b9c46a473d0fb915bcfa9bba3c8beb03bb021549bc5446083d932a8e3c9a47033d92b605bca24c893b18e0b03c4085463caa8b133d3b38f3bc1a76ceb9472e333c363c96bcb25695bcda352fbddd616abbfb31d8bceacbb23bca7d963a80ffe93c09f8b93ba36e9e3ad8f1843dfad53e3c6c6d89bbc600653c42e15f3c9e5b4fbc9abb0b3c3ac46a3dace72c3cf1222c3dc30e2e3d2e7114bd0d5ef9bcf9edad3c0bc8dbbdb5bc543d23d7edbcf7e3873b80c5653cc2b2943c1a02c63cd10e143dc7227abde830823cdb1d40bdac21b13b9ff1ec3c7c77153b917dfe3c6742cbbc2a80dabc02673cbdd2821cbcb6f6d83c03c355bcd2821c3cf296b4bbdd27e63af4a05abc351a013cc17810bdac21b1bca504bc3bb75272bd90956d3d131f553c5e1bb03d20fd25bb9a8187bd840813bd577243bbd596683dd2f624bc4a206abdf5c2efba9dffb53ca490b33d7d0d333c687ccfbb0f7f91bcea912ebcd474d3bbd94d9e3c57383f3ca2d880bd09beb5bc10dbaabc295ec53c7ceb1d3ca4562f3c9f43603c7ebbbf3ce0f68a3b336d71bceb27ccbc8e51c33ca53ec0bc602556bd0215c939e2c62c3c67ce42bb74380b3dba951fbdd02603bd86c0453c33a7f5b9f1222c3d283cb03b0f0b09bd005d163d5908e13c4bcef6bc38bac43d0b5453bc6eeb373d2f1f21bd3602923a52eb6b3c93f0093d6f99c4bb728b7bbc79a8703cbd4dd2bb10f319bd6fd3c8b6f97925bd0b0260bd147b6e3ca3a8a2bce28c283df93fa13bd9650d3d5650aebc4d7b86bc0aa646bd6ca70d3dbbcf23bcadcf3dbd7bc9083db7c67abbfbf7d33c675ababa948627bdc382b63b74e697bc0593773b68f0d7bc0fb915bc3adcd93af4a0dabac99585bc932a8ebca6d4ddbcd6d06c3c375e2bbc3ecd133c2b68ebbcb2a888bd4bce763ce830823d8da3363bf570fcbc0828183c8686c13caaa382bcc6c6603bce79f3bc2d6c01bc3285603ca36e1ebda065753c1f4f193cc30eaebc5f3d45bd501b4abc540c843c00971a3cbc6541bcaadd86bdd60a713d9bf50f3cf414e3bca3a8a2bcec61d0bc014527bda578443afca560bd852aa83c3bfe6e3cec0fddbbfc53edbce8c18cbc9690cdbcbc65c13c14416a3c08629c3b4f3339bd74e6173dd7b87dbccda9d13c74e697bdb718ee386d8f9e3d3a16de3cf7e307bc98d4f7bcba951f3c7c77153d9f095cbc29ea3c3c6f0dcd3b2b68ebbb1ea10cbca3a8223d944c233dadcfbdbb8ec54bbcca6527bd883e743b847c1b3c27e016bd49c4d03c98606fbda3c091bc76f0bd3ba69a593d0f0b093da7f672bd2d8983bd47f4ae3c54ef81bd4698153d5616aab9b2e20c3c83940abd96904d3b92b685bdf81d0cbdc2269d3b5a9e7e3c552e993cc99585bb3041b63c49c4503dbf91fc3adb1d40bc0145a7bbe9e321bdd10e94bcd9139a3ca3a822bc468026bdecd558bc2485fabc24bffebbd318babcf2d038bb83ce0ebb1307663c5df91a3c4e11a4bc2f1f213c0cb0ecbce118a0bc6eeb37bca2d8003c08d6243d79e274bb351a813cca65273ca2d800bd2998c93c28b0b8bce16a133c24bf7e3c0ed184bb956eb83cce05eb3c03fd593d3d598b3babff9bbcc5de4f3c14ef76bd85d8343c30cd2d3bba5b1bbd404bc23c55f494bc26f805bd22b5583baa8b93bc2924c13c1b9863bd87e2da3a2b686bbc094aadbc55800c3d21cd47bc9e95533a198ebdbc5e1b30bdb6dee9bbe1a4973c5c118abcb58250bdbcf1b83c295ec5bbf0528abdc7e8f53bd43acfbceacb32bd8eff4f3c6f47d13caadd06bdc722fa3c79567d3d655094bcbaad8ebc45ea083dd9d9153dde837f3c2ec3073de8fb90bc5d85923b7d2522bdc9090e3d416d57bc7642b1bb501bca3cae65db3a45ea88bc131fd5bce4964e3df7e307ba73c4823c7712d3bc3a5062bc5f03413d00971abb84f0a3bc932a8e3cb78c76bc8f73d83ca2860dbb1359d9bb2ba2efbc4f3339bd28b0b83cf891943d28c827bdb035fd3c9ff1ec3cedbd69bdb548ccbcf018863b283c303d706966bcdac126bd51035b3c3e411c3c094a2dbc4938593dc46a473deacbb2bc1237c4bc83ce8ebc0ae04a3c87e25abb66acad3c70bb593c195439bc87a8d6bb57c4363cb9ff01bd7dd3ae3c9a47033df979253cd1d40fbd8fe760bbc518d43cf8cb98bcd5e8db3c1cba78bcb582d0bc4def0ebdd522e03b859e303a1e1082b93333ed3c11c33b3ce91da63c7f694cbd55800cbb38ba44bc3adc593d40d7b93bc9cf093d186ca8ba37b01e3df857903c9f43e03c0a1acf3bfbbdcf3caf4decbc324b5c3c07b40f3dca2ba3bc8c479dbd355485bbf25cb03c005d16bc56a2a1bc8eff4fbc30cdad3c73fe86bb62f5f73c368e89bd13cde1bbb8007f3dd19a8b3c1ea10c3bccfb44bdda35afbc0c76e8bbd160073dc552d83c694cf1baaa510f3d27541f3de5b8e3bc852a28bde23ab53c3a50623b0b54533dbd87d6bcfa61363df857103c0e9780bc49fed43c351a013dd352be3c1ea18c3a1f1515bc300732bdd596683d4b08fbbcec9bd4bcb290993ba70e623cf3f24d3cfd3bfe3c8442973d58acc7bcc995053cf122acbc1a02463d22074cbd47f42e3c7d0db33bf5707cbc755a203b9cc5b13cadcf3d3c68b653bb2abadebb20372a3c3b38f3bbe1a417bc278e233bae9f5f3d1a76ce3c6620b63cea3f3bbd114f333d2d6c813c41bfca3b70bb59bc1ea10c3d42e1df3cfbbd4fbc6c33053b86fa493d8fad5c3b307bbabb83ce8e3c84b61fbdc5de4f3c70f5dd3c68b6533cb8007fbc7dd32e3920fd253c37b01ebd9dffb53c2037aabcbc2bbd3b644b01bb6afa7dbcb939063b3b72773c85642c3c2efd0b3daf87f03bac5bb5bbb3ca1dbb40113ebd5820d0bb0593f7bc55f414bd00979aba46d299bc1b245bbd7ceb1dbde96f19bd2e3710bcba2197bcac95b9bca6ec4cbc27549fbc0b54d33cca7d96bc094a2dbc03c355bced83e53c7ebbbfbc9a2a81bb01b9afbb7151f73c07ee133c95a8bc3b97ec663ccbd92fbc29d24dbc6077c93c11fd3fbd6077c93bc4bc3a38ec9bd4bba4e2a6bcccc140ba17d60a3dc13e8c3be4445bbd702fe23b7e2f483b62f5773cb4603b3b7e47b7bb58acc7bc26be01bd2f1f213d2c16f839a0d9fdbb198e3dbdf7e387bb45248dbc1237c43cd9139a3c33336d3c416d57bc6d8f9ebb4303753b5f03413d682a5cbc8d69b23bc46ac7bbfad53e3c5cd7053d65fe20bcac5bb53c24bf7e3cb58250bba57844bc9a2a81bd9ba31cbd460c9e3cc518d43c90cf71bde1de9bbb211f3bbdf979a53ca6d4dd3cf44ee73c3798afbce91da63c9dad423df89114bcf2d0383cadcfbdbc1ab0d23c08629c3cdb5744bd077a8b3d375eab3b5446883c3333edbb460c1e3d28022c3cf857103dcd91e23c472eb33b6afa7dbcda87a2bb83ce0ebc6f5f403aceb377bbbd4dd23b62bb733c2a0cd23b3333ed3cdafbaa39808be13be71300bd0e9700bd577243bcb939063cbae7923c7fa350bd7d992abcbd13ce3cad09c2bc2a80da3c8408933ba69ad93ca5b2c83c5772433cb5d4c33c87e25a3d31efc23cc85b813c355485bc2efd8b3c7886dbbc956eb8bc6c1b96bc421b643d8dddbabcc2b2943ccc6fcdbc8442173df000173cc38236bd094a2dbd1c80f4bcb6f6d83cea91aebcc552583c24bffebc02673c3bb290993974209c3cd82b893c408546bc28c8273d0fb9153dd16087bc8dddba3be1de9bbcf46656bc90956dbd6550943cce79f33cf85710bc488a4c3ddac1a6bcf01806bcd9d915bd81217f3c864cbdbcc4bcbabc089c203d9656493d21e5b6bc8978f8bb3fb5a4bcf89114bb62f5f7bc106722bc18f89f3c9b2f94bc41bfcab9360212bc183224bd5df91a3de6147dbc84f0233849725d3cab73a43c33f9e83c973edabc3f292dbc2b2ee7bcd19a0b3d1b245b3c57febabb501bca3b944ca33c4938593c4d6397bb9f7de43c3be67f3bf7e3073c675ababc241172bce252a4bc980e7cbb6e253c3d3285603c6f0dcd3ccde3d5b9271a9b3cb6305d3b6742cb3ce83002bd774c573c1115af3c5ea7a7bc034fcdbc87e2dabc7e4737bcc382b63ce4d0d2bb40113ebc9f7d64bc62bbf3bbc3d4a93c4f33b93a01f333bdba951fbde37439bd174a13bdc1ca833ad2bca0bb38d233bcb5484cbc7cb119bbd5e85bbc26a692bc2e7194bcda8722bc8c471d3d1ab0523c6c3385bc6ca78dbcd16087bde5b8633c51c956bd6077c9bc1c80743c1832a43c76b639bdaa8b933c3e0798bc5f3dc5bb9f09dcbcc348b2bcc99585bbf979a53a433df9bb6d03273d95faafbc5225703cb93906bd585ad43c460c9e3c3bac7bbd11c33bbdfb83cbb81b24db3c4768b73ba504bc3b6eb1333d4e11243cd00901bd8c0d99bdac5b35bc70f55d3c767cb53c2a46563cd5e85b3ca0d97dbcfdc7f5bcf81d0cbc2f1fa13b5994d83be23a35bd78c0df3c4a9472bd5f3dc5bc408546bcabada8bb0471e2b87e81bbbcc5de4fbb6ca70d3d3ecd93bca2f5023ddc0551bcd3deb5bb05cdfbbc416d573c3e07983ac9cf893cc4bc3abc6f5fc03c7e81bbbc774c573ce28c28bc67ce423dcc6fcdba6a86f5bc94121f3c80396e3cc2b2943c8b99103dbc9f453d26a612bcc6c660bbe065003d70bb593c59cedc3bd644753c41a7dbba3007b2bc5225f03c835a86b93de582bb64a2873b3b38f33c40d7b9bad14818bcbf1df4bbeacb323caf87703c464622bdf4a05abcb66a61bdbd13ce3ce96f19bde322c639c94312bc9143fa3b4011bebb2fab98bc3b72f7bb64dc0b3c227b543cc4f6bebc513ddf3930cd2d3cbdd9493be9a91d3daadd06bd28b038bca7f672bbc178903c02673c3d3fb5a4bc85d8b4bbd1d48fbcc30e2e3c3554853cec49613b8051dd3c95a8bcbb8978f8bbd474d33c3fb5243c590861bcaffb78bc10a1263c0fb915bddded61bd847c1b3ce5f2e73c6864603c79e2743cb5d443bc26f805bb428f6cba48a23bbc022d38bc0337de3984b69f3c92b6053de91da6bcaf13e8bc2fe59c3c961c45bd10db2abc64a287ba655094bcaf87f03be065003c8978f83c42c9f0bbc2ec18bb84b69f3c4e9d1bbdc94392bc65c49cbb69d8683c3d590bbdd2821c3c9486a7bcbf1d74bca21205bbe830823ccb87bcbc03fd59bc39a2553d5a9e7e3cd3deb5bb49fe54bc0f0b093c0f450dbd00979a3b174a933b9b69183c198ebdbc8c471dbc7151f73c40d7393c6d8f9ebc9f095cbc7c3d91bc844297bc6468833b11c3bb3a0c24f5bbf7a983bc76f03dbb3129c73cd8f104bc8ace01bce6da78bb83940a3cf8051dbad14898bbe5b8633c11fdbf3a4bce76bc9cdda0bb077a8b3c7fa350bcf0c612bdd67ef9ba9fb7e8bc9b69983bd2bca03bff74053d74201cbd2efd8b3cc5decf3b8bd394bb4f6dbd3bb9ff01bd380c383df4a05a3c0b02e0bc186c28bd27549f3caf87f0bb2e3790bced83e5bbdba9373ce713003b4db50abb60eb51bca36e1ebcf08c8ebcad7d4abbfd8d71bcb30422bbb5bcd4bc5c118abc4698153dcc6fcdbcf3b8c93cf85710bd7fa3503cf4a05a3ca86a7b3c7b5580bb9534343b03375ebb57383f3b2e3790bc9ff1ecbbe0300f3ce23a35bd7ebbbf3cc2261d3c40d739bcaa510f3cf1aea3bcbbcfa33c2bdc733c85d8b4bb8f73d8bc1b0cecbc2ec387bcb33ea6bc7956fd3c0145273cd148183d49c4503c1359593d18e0303ce118a0ba01b9af3bc58cdc3cbd87d63c655094bc5ea7a7bcd60a71bdc9cf093d4816443b883ef4ba36769a3ce23a353c8303803bdd27e63a8b25083de0f60abc55ba90bccc6f4dbdc2b214bd7cb1193d59ce5c3bed6b763c3724a73c939e16bc774c57bc8b5f0cbb7b72823cfad5be3cda352f3c5f8fb8bcb15182bbe16a13bd2d8903bcaf87f0bc70bbd93c7c3d913c26320a3d10a1a63ae57edfbc4c24803c4085c63c4646a23c8b9910bc1b9863bce40ad7bbe3aebd3b6f4751bc14416abc57383f3cbd4dd2bc6c1b96bcd3a431bcd3a4b13c0e9780bd9143fa3c728bfb3ce35c4a3abfe3ef3cd5e85b3c4a94f2bce5b863bccb13b4bc469815bcddb35dbc17d60a3c3880c03c30cd2d3daffbf8bcf570fc3ca660d5bb5225703b2fab18babd4d523ce252a4bb472e33bc6e3dab3c2e7194bc5820503d590861bbf1741f3dcaf19ebb04e5ea3c38463c3c658a98bb552e993cafc174bdcb1334bb646883ba97b2e23c36769a3c42e15fbdb256953b864c3d3bcb87bc3cbcf1383c7b8f84bcdd27e63ce322463ccb13b4bcf20a3d3db256153c779ecabb0a6c42bc29d2cdbb91437a3b9bf58f3c005d963c00d19ebcd9658d3c97b2e2bc95e2403c7c3d913c4d6397bb755aa0bb2c1678bdbc2b3dbce8fb90bbe200b13b239d693cf4a0da3b87a856bc9a81073d61816fbcea05373cc75cfe3cb4ecb2bc18f81f3c1ea18c3ce6daf83b28022c3c6afafdba3dc880bb8f3954bbf122acbc12e5503cf5fcf33ae35cca3be566703c0c2475bc5c110a3bc9cf093a96ca513c6620363de887083dd318babc4f6d3d3c2f1fa1bc341bfebbfa9bba3c7dd32e3c0d98fd3c0f0b09bc45ea083d6ac0793cb2e28cbc64a2873c321158bc835a06bd47f42ebde40a573bf9ed2dbcf3b8493c20712ebc131f553b3e411c3c8f73d8bcf85710bda7f6f2bcbcf1b8b9e56670bc1115afbcbaad8e3cf9edad3ca4ca37bd662036bbf536f83cc674edbba24c093c1f4f993b5a2a763c0ed184bc8408933c4ed71fbcf7e3073d1237c4ba18e0b03ccc35493cb78c763cc13e0cbc5dbf963c28b0b8bb227b543cadb74e3cba5b9bb97ceb9d3a90956dbb796eecbc4e4ba8bcc6aef1bcadcf3dbb49ace13b27e096bc3129473c2c50fc3baaa3023d0d98fd3ce71300bd66e6b1b93e07183c7dd3ae3c605fdabc87e25abc953434bcbc6541bd56689dbc2f59a5bc69d8e83b10a126bd98d4773b68b6533c980efc3cd2bc20bc87e2da3c6538a5bc762a423c03fdd93bea3f3b3cd52260ba219343bc2802ac3cf78c81bc61476b3c26f805bcb6305d3c1e6788bcadcfbd3cd48cc23c2b2ee7bcd230a93b3e7b20bde06500bca83077bcaadd063cf1741f3deacb32bce4d0d2bb8ace81ba7151f7bb1b0cecbcf37e45bd0aa6c6baf122ac3c9009f63bc430c33cf927323dca65a7bc8cbba53b71ddee3afb31d83cea912e3d017f2b3c8e51c3bbb752f2bb76f03dba6181efbc00971a3cb939063d6ca70d39bdc15abb8bd3943c953434bbe16a93bce9a91dbcf8cb983c6c33053cf08c0e3badcf3d3d4b08fb3b3d1f07bd7e2f48bc28c8a73c5e1bb0bbe23ab5bb26a6923c8d69b2bbf3f2cdbc6f99c4bc21e5b63a68b653bc1393dd3c3a50e2bb48a23b3d8f39543b513d5f3c8c81213b96904dbc9095ed3cde0f773c69126d3bb5d4c33ca06575bccd91e23a77d8cebcc9439238d26aadbc3602123c55800cbd95faaf3b508fd2bc7117f3bb1e1082bcff74053ca4562f3d4698153d602556bcc3d4a9bc5e6d233c33a7753b7e4737bd85d834ba07b40f3b64dc8bbcd0098139d148183bbb43acbc791cf9bc8c81213c3e0798bc50554ebbd8b700bd79e2f43bb4ec32bd02a1403c8cf5293df5886b3dd2829c3c65fea0bc8f7358bc50a741bb31d7533c2b2e673bf3b8c9bbb33e263dd7b8fd3b3d1f873c4ed71fbcd55c64bc2bdcf3bc11c33bbcb5484cbc9d73be3c3a8a663ccc6f4dbc9ecfd73b6b1603bcf296343cf20abd3cf01806bcf7e387ba9c8badbc186c28bd51035b3b', 'Array(Float32)')), +('<dbpedia:Database_trigger>', reinterpret(x'ea3017bda03559bc164c16bc6b2f023df30026bc04ea953d1f5ac23a0566d03c53c915bcc33f023d164c963bf5f5f03b0f6ea83b99db30bca173f6bc3ff4d83ba4e308bc64f34c3cf9a3a0bc3a8c51bc51532f3da88cd73b8a68273de703033b6f3698bc129ee63b74df663c9e8155bc5291ccbc63f6763ca173f63c4321ed3b72ea1b3d918432bd8f0eccbcf76bd7bc848ab9bc5e53fcbb999d13bdb7fcb6bb6ebd073d6efba4bcb11b9f3b4afc30bdae2f52bb2b9d0d3ce028bfbc1b3058bc82d635bd0b47683d0fafef3c253bdabc3a4e343d770c7b3c613f49bcab3db13c900b22bd801f08bc07dc36bd2484ac3cd22ec6bc81d95f3c1222acbb0fafef3b82d6b53c4497533c5d99243c501b663d7c71583d3a8f7bbc9fbc483d6c88e8bc4ea57f3c6f775fbd88b423bda986833c5ae8ca3c42e6793da6970c3a666609bde9f5a3bb00bd81bc7652233c460d3a3c4cf17b3cb934003c35e9d6bb972403bd18001a3ddf2be93c5734c7bccb15e5bc5dd417bd8252f03cea715e3a97a3e73bbf1518bd56bbb63b838de3bcbaad10bc0817aabc38db773c59ad57bd224ce3bca6d5a93c7517303c445ce03a749e1fbc5fc9623c47482d3da59ab6bcff810ebd9ffae5bcba2c75bc8a6827bdb7fcb63ce6c88f3cb2d576bdc283f3bbb25692bb554226bdbca2dbbc98dedabc5f4d28bcb11ec93c00fec83b7eeae83b2387d6bc70b2d2bba6d529bccd88213c765223bcf630643c9d08c53cac78243b7790c03c7fe4943ce490463b7daff53c3d3dab3cdfed4bbc9d08c5bc1ee1b13a825270bdb11b9f3de8fbf7bb707435bc2111f03c0525893b3e789ebccd4a843ce8ba303cf18d693da751e43bbc643e3c07d98cbc09529dbcf300a63cb93400bd1bef10bc317ea5bcadb397bcc4b812bd3fb6bbbba035d9bc3913c13c61012c3d197cd4bc8060cfbc15d3853c991c78bb3005953cf3c288bcb11b9fbc3cc49a3cd796cd3b11e40ebcb4cfa23b1fde873c6dc0b1bc8e95bbbc0b47e8bcb0e02bbd346d1cbd7f25dc3c95ae1c3d97a367bcc7ebfa3a121f023d7cf51dbbf28715bd5737f13b9b51973da989ad3cc08ea83c98deda3c61c38ebd0f7152bdcbd41d3da6d5a9bd87b7cd3ca616f1bc5734c7bb6fb5fc3c7bba2aba1af5643c0e365f3cf28a3fbda2e9dcbcbab03abc9379fdbb68df993c07d98c3c0855c73c8e571ebbf4bafdbce3148c3c8b22ff3cb4cf223d3c05e2bb2c19c83c474bd73b6f3698bbf3416dbb6004d6ba933b60bda421263bede49a3ab548b3bd7d30113d94358c3c061d7e3d75d9923be3d998bbe647f4bc4459b63a71af283d60889bbd06a143bca84ebabb2387d63c2b1cf23bb6c4edbcd4e5f33cf10e053cdc39c83c0469fa3cc33f02bcd4e5f3bb23460fbdb54833bca84b903cc2040f3dd793a3bc42a2083da8ac813d7d91023d23c5f3bcaf6ac53c1c2a043d36e302bccbd49dbcff818ebd90cd84bc168ab3bb8214d3bcede49a3de606adbc9d46e2bbd755063dd7d4ea3cc0508b3a99db303dd61dbdbcb39185bc0a0c753da6d8d3bc94350c3df10e05bd493d783d7266563b3fb311bcece7443d3dff0d3d51532f3db0e3553c9ccaa7bc7809d1bbe58d1c3c85878f3ccf7dec3a34aee3bc4cf1fb3c6b2f82bb6ad1ba3d5c9ccebc26fa12bd042bdd3cc0508bbca45f43bcca5b8d3c5404093d5b61db3bd32b9c3cdd77e5bb09ce57bb6d01f9bbd5e2493cc38049bd01fb1e3cdbc037bc164c163a2bded4baed22b8bc35e9563d65aafa3a1fde873c5734c7bb59cd81bc1163f33ace42f9bc9b51973b069e99bd71afa83d154f403d178709bda132af3c49be13bdc4b8123d02b2ccbc2095b53c9ad886bdadf1b4bcee5dab3c88f56abddfac043c7614863c2d54bbbc1bf2babc4610e43c17c8503b2635063dd7d4eabc1511a3bc51532f3c68df993c751adabc1e22f9bcc8e5a63b317ea5bc4883203afa1c31bc9a1624bc9c8c8abaf5b7533dc9209a3c85034abc402fcc3c95ae9c3a4a3a4e3d613fc9bc943836bd05a4ed3caa40db3aa327fa3c58319dbc33f40bbd52914cbc567d193d9bd0fbbb695580bcaf671b3dfc95413dacb96bbce3d998ba5250853c17c526bd10ac453c6ad4e43c8641673de493f03c8cde8d3bf7ef1cbdeca9273cd90cb43def981ebdb50a163d39d5a3bc49ff5abc0acb2dbdca99aabb2098df3cda47a7bcb8f98cbda986833d3e781ebd9727ad3cfc95c1bc71edc5bc540489bcc6ad5d3dc631a3bc2635863c292727bc3624cabce493f0bc5f8eefbcf47c603d5d155f3d6dc35b3cd5e2493c3c437fbb205798baa12f05bbe355533ce6c88fbcf341ed3aed60553cd7d4eabb1841e13b5aa703bc5aa7833dbd9c87bd2484ac3c95efe3bc7806a7bc85878fbcd9098a3c1414cdbcc283733bf4ba7d3d1da6be3a3cc41a3b6d01f93c8a68a73c187f7ebcaeee0a3d3c0562bcc10ae3bc962a573d07d90cbd805d253d9ccd51bb73a473bcd8d140bdf57936bdfc9297ba01393cbdd5df9f3bd369393d9919cebc55bee0bbb837aabc7693eabcbb26a13c2057183df30026bc87ba77bd193b0d3c873b133aa4e308bd01393c3dd03ca5bcbf1598bc45d5703c9187dcbb4cf1fbbc2bdbaa3cb6c1c33b3043b2bce9b7063c0a09cbbcf6b4a9bcf300a63c2f083fbce452293bf14c223c12222cbcdbc0b7bcb4cfa23cf14c223dd369b93a7daccb3b3a10973c8d5ac8bc60889b3c3621a03cb934803c6c853ebc2aa3e1bbb6c46d3c32f7353c14144dbcaeee8abcfcd35ebc0525893aa2abbfbcdeb2583d41a8dc3cc8e5263d95ef63bc4b7541bae16332bc042b5d3d31bc42bd1d68213ddbfed4bc2b1cf2bcc823c43cf43b193c89ef96bcbaee57bc1ca968bd74dc3c3dd0b8df375cda6bb9282dfb3c061d7ebca59a363c9a57eb3c21d0a83b637792bce58d1cbd6d0179bcebac513d4c72973cf5b7533dcb15653dcb1565bda3a8153de45229bbaa023e3d5e12353d2bdb2a3cb11ec93cf9e467bb71ed45bb0ef5173d71af28bdd17798bc7a7c0d3d964a81bc5b2014bd4fa2d53ca55c193b801f88bb1c2a843b6f775f3c685efebb10a91b3c4a786b3c2057183c1841613db391853c5aa7033dbedaa43cf57936bd06a1c3bcddf8803bb2d5f63b49ff5a3cac3a07bbf9a3203d20d6fc3c3621203da6d8533c6b0f583ccffe87badac361bd5d155f3d21920b3d8d5ac8bc21920bbd1da63ebd9724833cea71de3ba751e4bc7b3665bcb3942f3c89ef163d2387d6ba596c90bce16088bcc5726a3d06dfe03ccc4daebc6e3cecbc4cb034bcd61dbdbc9ad8063b5e537c3c7e6b843b197cd43c4e64b83a3a8ffbbcdbc0373c8a6827bc9146953c4a3ace3c94358cbb91875c3c67e243bbf63064bcbab0ba3cf14c223d7e6b04bae58d1c3c12222cbda2ae693cdcfb2a3d5e50d2bce741a0bb9e408e3c220e46bc1fde073db259bc3c1f5a423d9ebf72bce355d3ba49be93bc9146153ddfac84bd071a543df28a3f3d1163f33b43e025bd0236123ddc3cf2bbaf671bbb57b80c3d2098df3c292a51bcd61d3d3c8a6bd1bc8a2a0abb8779303d0a8d903c652b163de8fbf738b7be193d40f12e3d77ce5d3aff469b3a164c963d02b24cbc08172abd94b470bb10ac45bc685b543c0dbd4ebcd4c780bc17c5a63ba59a363c691a0dbc27b140bb0c7f31bd7ee73e3dae6defbc1511a3bcf9e4e73cca9cd4bc5b20143dc63123bcbd9c07bb7ee7be3ca7109dbceca9a73cc26500bd21d0a83c6dc0b13b5fc962bd2f465cbd9d4662bce028bfbc3238fd3cf30350bcf30026bc900ba2bb03afa2bccec3143c28ae163dd61a13bd3332a93cd8ce16bd1260c93b061d7ebc8d1cabbcd1b5b5bc49be93bcac78243b64316abc2773a33c73250f3b9d4662bb56f9533cfa1cb1bc819bc23a652b16bbd4660fbcf206fa3c3913c13bbd9c07bdee9b48bc43e0a53b5118bcbc999d133dc9209abc695580bc89305e3c5156d9bb8d1cabbabf15983c2387d6bb5445503ca0f411bd1f5ac23c0c4194bcff818ebb8cde8dbcfc92973c8f0eccbbb310ea3ba751e43c51532f3c0c41943cd4e5733b9cca27bc9727ad3cd9090a3bdb01ff3bfe49c53b4cb0b43c8d98e5babd9fb13b3c02b83c1fde873cb5897a3cb259bcbab6c46dbdc9209abb5156593be21ae03cd4c7003bf76bd7bb96e98fbc5580433d778d163d375fbd3b6c4a4b3c03edbf3bff818e3c5153afbcd520e73c139bbc3c184161bca6d5a93b6efecebb03af223dddf880b865aa7a3c42a5323c78c8093d3662e73c3bca6e3c0ef5973cc7e8503c09521dbbbf53353c8ce1b73bf14f4cbcc8a7893c6c4a4bbb6c88e83c62fe813c23460f3cea30173bbab0ba3b1787093c9ccaa73ce0665cbd0d7c873c0c4194bd63f6f6bca59d60bd66e56d3a3a4e34bd8600a0bcf6f2463ccc0f113de0665c3c6720613de6444a3ddbfe54bcc66fc03c87b7cd3cf2067a3cafe6ffbb30814fbd7790c0ba7a7f37bdb021f33c685bd43c3140883bb73d7e3ccc8ef5bc49c1bdbad2b20bbd6fb57cbc1c6e75bc900ba23c431e433c42a208bc314088bcbe565f3c7a7fb73c5b233e3cece7c43cd5e2c93cc5726a3d9c8c0a3c4fa2d5bb627d66bdc5344dba4883a0bca42126bd586f3abbe6c80f3d0817aa3c70f06f3d1fde07bd0f7152bc2d92583cd177183c2e8c04bd3cc49a3b089364bb4e67e2bbfa5d78bb061d7eba112556bc5ae520bb33f48bbc09ee813cc8e526bcbde078bca0f7bb3b9476d3bb277323bdfa1cb1bc4ea5ff3c5194f63cb9b3643bc2040fbc61c38e3dc6adddbcf43b99bcb44bddbb1ea3943c9862203db87871bc3816eb3c3f32f63c5f8bc53c6bce903cd988ee3ccd4a843c544550bc95ae9cbd0b0621bdbf53b53c42e3cf3c76554d3c9049bf3cf82a90bc224c633c7eea683b0c7fb13c07dcb6bbefd9e5bb220b9c3cd231f03c991c78bc73a1c9bcd5df9f3cb256923cb297d9bb96e98fbc587264bd892d343c1b30d83bb683263c838de3bb24842c3b441b19bcce045cbd623c9fbcf72d3a3d1787893cd4668fbc0990ba3ac3be663cd9886ebcd4e5f3bb540489bb4c72173d2c57e53ba324d03cf3416dbb831129bd751a5abac7e8d0bb71edc53c8aa96e3b1bef10bc85c2023d4dad0abd7a7c8d3d431ec33bee5d2bbd864167bd3e789e3ca5db7d3a3b4b8abcbc6114bcb259bcbc87b74dbd558043bc4fa255bb023692bc5d15dfbbf5f5f0bbf5f570bc64f3ccbcc631233dbbe803bb8c5df23ccc0f913b0a0cf53ba5dbfd3c366267bd1c2d2e3bede49a3c2c57653cf47ce0bbc8266e3cf3c2883b860020bd6c88e83c037105bd3b89273a511512bdaac4a03c1c2a04bdbca25bbdc1c99bbc627abc3cea3341bbf76b57bcd758303c509c813cb29759bb02b24c3ce609573c6d01f9ba0a0cf5bc18009a3ccd88a13cba0e02bd0f6ea8bb2676cd3c193eb7bcd69977bc0428b3bc8d19813bb83a54bb09ced73c9f7b013d4610643cf0d391bb9b51173d2a629a3cf579b63c7fe494bcb7fc363d0a8d103c4afcb0bcac3a073cbe56dfbc10a99bbca3a8953c2c57e5bcb021733b50da1e3c501be63c82d30b3a4459b63ce355d33cdcbd0d3cf3c208bd2635863ba4e3083c95b1c6bce8fb773b2635063bf57936bda5db7d3c7e6b043c55bee03bab3d313c0a8d103d0c7fb1b8b683a6bc4cf1fbbc013ce63c2d5111bdb58650bc914695bb1a7680bc28efdd3cdcbd0d3c1787893c613fc93c596c10bd1af5e4bcf6f246bc0c7fb13c41a8dc3b4c7297bbb975473cad327c3b1d68213a7806a73b4e6762bc26fa123da8ca743c09521d3bcc8ef5bcb7ff60bd4789f43cfade133c6c09043d9d847fbc9862a0bc72ea1b3d35e956bcd94dfbbb24c249bcc5f385bb596c10bd0177d9bb1ea314bd8ed3d8bcb64589bcb87871bb50ddc83ccec6be3b151123bd722839bc061d7eba63b8d93c6ad464bb0139bcbc00fe483d037105bc637712bcf579363c8cde0d3de2dc42bc73632c3c5e0f8bbc9e81d53c72ea9b38e21ae03b95ae1cbb0893e4bc39d5233caa023ebcc0500b3ca6970c3c48864abb873b93bc2aa0373cb021f3bb64f0223cbde0f8bce3148c3915d3053c23468f3c5483edbc5dd417bde741a0bc933b60bbca99aa3c1e22793ce9f5a3bc64f022bc2400e73964f0a23ba59a36bc6f3698bcf43e433c0e33b5bbe31736bbf965833bee9ef23cede49a3c2cd880bc76554dbcc43777bc900b22bcfc117cbb5f4d28bdac3a07bdfc11fcbaa986833cd1f6fc3ccb15e5bcc5f6afbcb50a963bba2cf5bb84c8563c999d13bdd8ce963b2bdb2abce45229bc6ad464bcb2593cbcd177983c78cb33bce490463c0fafef37e160883c666609bba2ab3fbc4e269b3cb3106abc5ae84a3cae2fd2bc7bbaaabc9473a9bcdeb2583c1c2a84bcd32b9cbb3081cfbbde7111bc431ec3bae77f3dbbe9f8cdb9f9e4e7baf43b193d8be4e1bc4bb3debcb4cfa2bb06dfe0bb873b133dd4668fbb7c333b3d220ec6bca0f411bb09529d3c98a0bdbcaeee8abc31bfecbcde71113dc7aa333cebacd1bccc0f113c4af9863c49be933c00bd81baa4e3083d87baf73c64b205bbf5760c3c1552ea3b16cb7a3c45560c3d99dbb03c9281083c26fa92bc0c82db3b0ef5973b873b933bcbd41dbcaeee0abdd07ac23c84c8563c4a786b3c445c603d892d34bc9727ad3ba9c7ca3b07d90cbbdef0f53c0dba243c2968eebb6d82143d0e36dfbccd4a043bd5a182bc3d7e723cd1f3d23c35e9d6bc19baf1bb2b9d0dbbd369b93afb57243c75d992bc01fb1e3cd61d3dbc78476e3d55bee03c346d9cbbb391853ce8bd5a3c14144d3c8311a93c460d3a3cd2f0283cb50a963c999d933a5c5b873c7bfbf1bc121f82bbe5cbb9bc93fa18bd62fe813c528e22bdb837aa3c42a288bc4afc30bcc10ae3bb3a4eb4bc8cde0d3d8c1fd53c0c825bbc3d3dabbbf6b4a9bcbc61143b85034a3d5fc9e23c7f2232bdae2c283d9ad8063df78b013d8cde0d3b7abdd4bc964a01bbadb3973c9727adbc220b9c3cf8a64a3d87baf7bc23460f3b5348fabc3e7bc83c346d1c3dd369393c0a0c753c8503cabc8060cf3cd520e73cf28abf3cd9098abb59aaadbdab7bcebbbe1becbb96e90f3d2fca21bb707435bd46cf1c3d42a5b2bc685e7e3a10acc53cbb26213c68df19bc17c526bc7ea921bcede49abb4a3acebcbe18423df8682dbcb9729d3cb878f1bbadf1343c01fb1ebcd65b5a3ba84b90bb3332a9bc4af9863c4789f4bc2fcaa1b976554dbca8caf43c43e0a5bc66a7503dd2b20bbd9727adbc4845833b7bbaaabc7eeae83b4afc303cbbeb2d3af28795bca2ab3fbc5b9f783cd2f0a8bbc99ffebb6d82143dcc50583cb44bddbcf868ad3c73258fbc73250fbd3081cfbbc8e5263b79419a3c1125d63a6bce90bc95b146bc8e95bb3c6a939d3c1f5ac2bc24c2c9bccb96003b55bee03bf43ec33cf76bd7bb1a792abccb123b3b5b23bebcd0b85fbc7d3011bd6720e13ce87c13bc809e6cbaf2c5323ddbfe54bcbca2dbbc474b57bdb3106a3c25fd3c3c7d6eaeb91ab747bce9b706bcfade133da55c993c5c5e31bdfe8762bb7266d63b3470463c164c963c82d6b5bb0525093d21d0283c78cbb33bba0e02bc1511233c71af283c14d6af3c000173bae49370bba713c7bb0df8c13c9ebf72bc3e781e3c5dd741bc2968eebb3cc41a3d928108ba35e62cbb6c4721bd037105bcfa1c313cbe1bec3c8e571e3c4e64383c892db4bc2965c43c5f4da83cbd9c873cff84b83c4e269bbcdfed4b3c2cd880394497533b2c169ebbe6062dbd476b81bc99194ebbdbfed43c88b423bc6f77dfbbefd9e5bc3913c1bce16332bd2f08bfbc6bce90bb88f56abcdeaf2ebdfa1f5b3c7b36653a4e6438bb220b9cba71edc5bb5e0f8bbc2aa3e1bb00fec83b1ee1b1bcb256123d2aa361bcee5d2bbccc50d8bb069e993c42a5b2bc9ebf723cd4668f3cc5f3053c1bef10bdf6b4293cdd369ebb5ae5a03c35e62cbd699647bdd5a1023cd32b1c3cc823443bafe67f3b4deba73c9c0b6fbceb6b8a3ca0f73b3dd2f0283b28ae163ded22383c29686e3c695500bd5c5e31ba0c4114bd5c9ccebb11e40e3d7fe4943c23a7803cc8a7093c9b51173dc242acbc412c22bd0469fa3cb11e493c97654abc02b576bbe025153a42a208bd1414cd3b5e0f8bbb7655cdbc0c41143d3a4e343de647f4bc64b2053c59ebf4bc8d1cab3cfa1f5bbc4267153cf052f63b5b233e3cc8e5a6bca98603bb9a57eb3b8a6bd13c267977bb3140083b1da63ebdf47ce03b09521dbda9c74a3af43ec3bcb7be99bb825270bbbbe8833c40ee04bd35e6ac3af28715bcb11b9f3c6004563c887686bcf9e13d3d72ea1b3ca2e9dcbb9f7b81bcdb017fbc052589bcbe1842bdff461bbc3a8cd1392fcaa1baa8ac013d4594a9bccd88a1bac66fc0bc8214533c3140883c493df8bcda47273b6669b3bb70710bbd205718bc02b24cbbadb3973cec25e23ce87c133d248102bdaa02bebcc5726abb21928b3bbaee573b18001a3c23460f3dec25e2bb863e3dbd26fa123c6ad464bc5e12b5bacb12bb3be87c133dcdc9e8bcc5f385391c2a043d2111f03c699971bbbaee573caf6ac53b64f34c3cf9e4673c31bf6c3c2c57e5bb', 'Array(Float32)')), +('<dbpedia:Database_tuning>', reinterpret(x'29fc83bbc84b403cc91714bd2b1d76bcb3d2263d023b883d4bf4a0bde646d2bbe61cf7bb0001683cfcbf6b3dd2f713bd9dd1aa3c5763dfbbef6a433c4acac53c7f4e36bcf5bb043c717b7c3d1b0f34bcd2998c3b6991abbca629c83b0ed4a13c31b2a8bcc95b053de7b49e3c545c833cc98fb1bc25bb07bd8cb3a33d755ef13bb316183ce66068bca16069bd319892bc5cce36bd0fba0bbdcdd02dbd356bc23a3a1a0b3d5c70af3ca2f810bddf76973c016f34bb36d90e3d728b41bd201c04bb111fef37d5e459bb619715bbd16fb13cc4245a3c58d12b3d6e1699bd0a4fb4bc5c8ac5bc3af0af3c908653bc9976183cecf51a3d889c02bdd6f41ebbc424da3acd72263d122fb43c7f0a453c6153243a59fb06bc7b37153c1fc84dbc53aa453da5874fbab206d33cce9c01bcda6947bd762ac5bc8ba3de3ca33c02bd571feeb916a45cbc91f41fbc11db7dbd35f3a43c0d22e43ce6be6fbc295a0bbd13fb87bcabf2a63c16e84dbc4b0e37bd823bfcbcc9759b3c2477163d9822e23bb4b8903c2ccf33bd8858113c610fb33b2830303d9990aebabf4165bd4aca453d34e3df3c24ef333d3abc83bcdf76173d64e262bc6eb8113dd1a35dbb0af1acbc179a0bbcab1c823cb89b853cef2652bd8c6f32bc168a463bc57890bdcd7226bdf9b80f3c72712bbdbb2a44bce85617bdaaaeb5bca5e5d6bcf88eb43cdb790cbd827fedbb050eb83c1c9716bd20361a3c88b6983bf830ad3cbc98903a9e73233d9822e23ba9fc773d9086d33aafef313cc84bc0b9ab941fbd0d66d53bf7dcf6bc288e37bdab1c023cd1cdb83b545c83bbbc3a893d2ca5d8bb244d3b3d65f2a7bc4b3892bc83a9c83c34fdf53cd2f793bb3a1a0bbd1205d9bcb857143cbfe3ddbb99d41fbcb3d2a6bc799fedbc845b06bd9a5c02bd5b60ea3c6decbdbce2ebbfbb4bf4a0bbafefb1bc4f0bc2bb0189cabc0f761a3dd1e7cebcf9b88f3b7a0d3abdbc9890bc5b60eabcd45cf7ba834bc13c9052273c2ccfb3bc2830303b0ed4a13caa26d3bcc9b90c3d466f333d541892bc06961a3d7a4166bbd99df33cc8c3ddbcd05f6c3b99aa443c3c3bfdbc0e4cbf3b99eeb53c653619bd6efc82bd17b4213df5bb84bdd99d733c398263bd6c7e71bcd5284b3c0001e83c1bcb423dec97133d823b7cbc6881e6bca21227bc3a1a0b3d8858913d65502fbcf44db83c39c654bc986653bcecdb04bc36d90e3a5d5699bc316eb73be4fb04393044dcbb766e363df8ec3bbceefcf6bcbf4165bd0ddef2389aba89bc7a41e6bb1bb1ac3b6d06d43b0e90303d350dbb3bef40e8bcb61df4bc4f69493b57635f3d139d80bc30884dbdd60eb5bcfdcf303801b3253de3598cbca6cbc03c5fbbfc3bc4acbc3c06961a3da5874fbd5d56993ce7e84a3c1bcb42bcb3ec3cbd1bcb42bc30bcf93b0e4c3f3b54d4a0b93e751d3da2b41fbcd2f793bcdaad38bd244dbbbc7103df3a77dc82bda33c02bc577df53b47559d3cd542613dce9c813a0aadbbbcb703de3bb7cfb13bc917943cfdcfb03cf77eef3ae373a2bbbfe3ddbb1fe2e3bcda93223d6cdcf83b0e90303dd45c77bd349f6ebbf720683c61f51c3a547619bd84751c3cfb7b7abc389c79bb610f333d54ba0a3cf916173a344167bc5c46543dec97133dbbccbc3c6933a43cdddeefbcc17b85bd1bf59d3cc06bc0bc9808cc3b2874a1bbc5bc013dfc47ce3bf050ad3c8365573b889c82bc6426d4bae3b713bdf8ec3bbd6484dbbc5d5699bc838fb23b350dbbbcaa50aeba667a8a3a8cf794bca60f32bd02f796baf9fc003c383e723bbcf617bd4b3812bea6f59b3a77dc823d5df891bc7a6bc1bb39ac3e3c1712293c76b2273d111f6fbde6a4593ceb435dbd4645d83c536654bd73f98dbceabb7abc882e36bdfc61643cb220e93c61b1ab3b523cf93c59590ebcc57810bc2764dc3ce712263c6abb06bd6ad59cbc88fa09bd8307d0bd8307503a1aa1e7bcf323ddbc361d003d7e3ef1bcc73b7bbc8c2b41bd056c3fbc3551acbbbf9fec3cd145d63c7ee069bd2d57163dccc0e8bcc13794bc84759c3c04fef23cafd59b3c42d0afbc577d753c619795bc09694a3c54ba0a3d1f84dc3c3d07d1bc4711ac3c361d00bd8c2b41bd9d4948ba284ac63c6087503d0e9030bc422eb73b95351cbb9e73233d9d05d7bcd2990c3df00cbcbd3ed324bddd8068bcf57793bc571feebc201c84bcc98f31bdc33ef03c588dbabc172cbf3c305e72bcb3741f3c683df5bcbbccbcbc7688ccbcbcf617bd53eeb6bc00a3603c9196183c501b073d3551acbc3a78923cee5afebcddde6fbcc5341fbd9aba89bc20f2a83cd145563c3ddd753dbc6e35bd128dbb3c473b87bb8307503bc7ddf3bc8b1bfc3cf0941ebcb89b85bcd696173de7ce343c1abb7d3b35512cbc861e713d61b1ab3c3a1a0b3df95a083dd925d63b3198923c2db51d3c7facbdbc02998fbc012bc33cc98f31bcbb884b3bdf5c013c25bb87bdd983ddbb5c702fbca9e2e1bbc0afb1bcc917943c0d3cfabcf3df6b3b0a4f343df84a43bda21227bcb7e9c7bc3000ebbc4a42633cb89b053caa6ac4bc876262bd4fc7503c2d5716bd280655bbdbd7933c4bb0af3c61b12bbd7bafb23cec9793bc05cac63cd5e459bc8fa0e9bb13b796bcf8302d3c577d75bd725715bdf3df6b3caedf6cbbfc1df3bce89a083dbc98103bbb88cbbc01894a3cef84d93c95790d391263603d1646d53c83a9c8bc954f323d9aba893d2c8bc23c0e90303c24198fba9028cc3c9196183dc0512abd2ccf333d31ccbebcad9bfb3ab813a33c8417153c17b421bdd001653c5c1228bd229b7dbca60f32bda2f810bbf07a08bd80bc023da629c83cd9835d3c0e32a9bc6cdc783ca102623b6c7ef1bc4ae4db3cf4abbf3cf07a08bdc9b90c3de27df3ba6a190ebdce9c81bc8b1b7cbb02f716bcc06bc03ca102e23a905227bc69abc13a168ac6bcb3ec3c3d43fa0a3c7bf323bdd58652bbf88eb4bc5cce363c47990e3d6d4ac5bc34e35fbc8f5cf8bb4b38123cbabcf7bceaa1e4bc9196983bbf41e5bc69912b3d7e24dbbc2367d1bce83c013c4ff1ab3c54902fbc4e3fee363088cd3cfd13a23b3088cdbcbb5ef0bb47b3a43b1f0cbfbadf76973913b7963bb3d226bd6e1619bca6b12abac137143d02990f3b462b423d122f343dc0c9c73cd1e7ce3bf409c73cdf32263b0e4c3fbce4fb04bd13fb873b4b52a83b0f1813bb0dc45cbdf00cbcbbf84ac3bb739b86bb1bcb42bd54ba0abdb703de3b4f93a43c4bf4a03b421421bd919618bc9deb40bb95d7143d8b1bfcbbeaffeb37179a8bbb571fee3c394e373c1cdb873c5b02633b0f18133a02dd80bce24947bc0969ca3cd5e4593cb3749f3c2d13253d350d3b3cf8ecbb3cafbb053db72db93c5476993c82c35e3cfd2d383d31f619bc4f4f33bdfe9b04bdaa50ae3d36959dbd2d3d003c7bd98d3d656ac53c0dc45c3cf491293bc0c9c73c3a3421bd2d9b873c435892bc005f6f3d5fbb7cbcdfd41ebc58b795bc50790e3bda6947bcda93223c721324bdc865d6bcfe9b843a1b5325bd6a198e3c4c7c83bd6153243cf050ad3d906cbdbb29161abdf5bb84bcab7a093942d0afbc4e25d8bc91da093c87045b3d3d07513da1e84bbc87a6533a1f0c3f3b76e6533cfe9b84bccd2e353c57dbfcbce660e83a361d003cae674f3d0b1b08bd31ccbebc47b3a43c0fba8b3c711df53cd5e4593d99aa443b23c5d83ca18a443c54902f3c451bfdbcda359b3cf88eb43c27c2633cd1cd383c01b3a53c8ccd3936afbb05bc06961abcb4fc01bd7f34203dda351bbaf03697bbe6a4d93c2764dcbb160264bccc62e13cb78bc03bd0bdf3bb71a557bc7e3ef1bce8f80f3ce7ceb4bc0dde72bbaa6a443cb3302ebd151cfa3c47990e3dcdea433b46e7d03c3088cd3c1aff6ebbfce9c6ba3a92a8bcbf9fec3c3088cdbc8be7cf3ab7cfb13b7610af3bde08cbbbcc62e1bc3e751d3d88d0aebc3a1a8bbc47b324bca33c02bdab1c02bc7b7b06bd29b892bca212a7bc4bb0afbb3441e73cf57793bc8b1bfc3ac424dabc31b2a83c42d02f3c17ceb7bc53c45b3dc44eb5bb01b3a53b012b433d1c7d803bfc61e43bd5284bbc13151e3c99eeb5bc3e751d3c716166bc9808ccbbf4abbfbcc17b05bcbbccbc3c60e5573d29169abc95f1aa3ca797143c1b53253d0925d9bc3abc833d11c1673cb7e9473d534c3e3d6594203d541892b8aeabc0bc3eb90e3d473b07bbbcf617bc3a1a0b3d01559ebcd60eb53cda0bc0bc667a0abd172cbf3c69abc13c3e8fb3bb5ba4db3c65ae36bcf4ef303cd0bd733c352751bcc5bc01bd0e4c3fbb177030bc6933a4bbb23aff38ab1c823bf39b7a3c3a9228bd2720ebbb9a5c023d4c7c033d4f4f333ca9e261bc24190f3cd2b3a23cbc6e353c1cdb073d171229bd06969a3c1faeb7bcda351bbc01cd3b3d5fbbfc3cbddc81ba17ce37bc1c97963c6197153d60435f3c39acbe3c8307503c390ac6bb016fb4bbf39b7abddeeeb4bc2bbfee3cf0d88f3cb4b8903ac57890bc1b0fb43c0b1b08bdaeabc0bcfdb51abdb38e35bd5d3c83bb2491acb8d2551b3d688166bb7fd6183d994c3d3cef6ac3bb90284cbb57dbfc3c58d12b3d6dc2e2bafca5d5bc84751c3d7bd90dba683df53a7a8557bd4bf4203d87c0e93acd149f3cf88e343d284a46bc5873243a00bd76bda5bbfb3cfc47ce3c01e7d1bcdaadb83ca9fcf73c0d3cfa3c8307d03c68235fbccefa883cc865d6bcf84ac33a9c63de3c7500ea3c6197153de1dbfa3b3527d1bc350d3bba8417953c8d3b863d683df5bc717b7c3c6a198e3ca102e2bcc39c77bc39684dbcaa0c3d3bc821e5bbe7702d3c12eb423c69ef32bbc06b40bd3551acbba5a1e53b7f92a7bb6d645bbceb6db8bcdf76973afd8b3f3bef9e6fbc7f4eb63c65d8913c243325bc0b1b08bc179a8b3cce9c013c3eb98ebc05e45cbc42eac5bc7271ab3b689b7c3caf190dbc7213a4bc6153243c9a1811bad18947bd84d3a3bcda0bc0bc650cbebcebcb3f3ccd8cbc3b6153a43a88b6183ac44e35bd7e68cc3b9425d7bcd12b40bdd01bfb3bdf32a63cf07a08bc422e37bcef2652bb383ef2bc6ad51c3d1b53a5bc3637163c0a351e3d3e4b42bd91f41fbdf57713b9b4b8103d801a0a3decb1a93c3a78923c39c654bdf5bb843c30bcf9bc90e45a3cb3ecbcbc8307d03c6dc2623c3abc833cdf5c013b739b063d2db51dbc0e90b03ca270ae3cb3d2263dda4f313d9aba893cde902dbde21fecbacd8cbc3cc3fafe3cd01b7b3cd2112a3ce373223c7e24dbbb05b0b0bcc4f02d3cb3a84bbdecdb043d6efc82bcf0941e3d3527d13cda93a2bc7e24db3cd00165bd2b1df63c168a463af433a23ce2635d3ca270aebccca6d23c4b6c3ebc344167bc05ca463c1c7d803cefc8cabaebe5d5bb98c45abccdd0adbc5705583c8ccdb93bda69c73c717bfcbc11dbfd3c01e7513a8704db3cdfd49ebc838f32bb3110b0bc47559d3b4a28cdbcb7e947bcf916173df9741ebb3637963d3924dc3aecb1293d13d12c3db89b853bddde6fbb3abc83bce38d38bcec53223c587324bcbc9890bc3e4bc23cf9749ebc823b7cbc2433253d582f333c2b7bfdbc95790d3dfd13a23beb6d38bb305e72bc2c8b42bd58eb413c716166bdd16fb1bc7544db3c845b863b3a78123dd145d63cd6f49ebbae23de3c8331ab3c2b1d763c389c79bb53aa45bbecdb843c1c9716bbc9751b3ce249c73b0d22e4b9e72cbc3cb6bfecbc83ed393cf44d383d462bc2bcc59226bd067c04bcaa6a443d139d803b455f6ebc711d753c1fe263bc8c551cbdbc10ae3b0f761a3c8221e63b77f618bd43b6193c9a1811bdb771aabdf9749ebc71035fbb68dfedbb05284e3b302ac63c4fc7d03becf59abc093f6fbd3ed324bcaa6ac4bc0d3cfa3bc4245a3cd1cdb8bc363796bc77dc02bd1fae373cd542e13bfc47ce3cf9b80fbd7a0dba3b0ad7963ceb87ce3ccdea43bb7213243ca2b41fba61b1abbce67afebbeb29473c35512c3d0a798fbc93fb7b3c68dfedbc951b06bcaf190dbcc40ac43c1bf59dbc1affee3b6536193d75006a3c9d05d7bb84b98dbcdfd41e3cf0d80fbd4aa0eabcf84ac3bb913811bc3110303becb129bd9efb053c6e16993ca18ac4bcc73b7bbd42a654bcf4d51abc065229bc345bfd3b4afef1bb0652a9bc717bfcbc389c793c32dc033ca6f51b3c694dbabaae81e5bc667a8a3b4fc7d0bbd77c0139d2f7133d6967d0bc3d07d13bdac7ce3be7e8cabc02990f3cd9e164bcecdb04bc5ba4dbbce712a63c95790d3d451bfdbcb3749fba27dc79bc8365d73ba797143dde66523c722d3a3ccd72a63c57dbfcbcf8c2e03b9efb85ba1660eb3cfdf98bbc6881e6bcb7cfb1bbabf226bc11c167bcdf7617bd7fd618bcaf33a3bc4fc7d03b09694abdd5a068bc88b698bc7ae3de3cd93fec3a2ccf33bc3e312cbbda0b40bd111fef3b016fb43c95798d3c06da8b3c3da9493d2b7bfd3bb81323bc23ab423b23abc23b77dc823cf44d38bcd12b40bb24198f3b2491acbcf9b80f3d9dd12abdc40a443a983c78bb5df8113d47f7153c122f343cc9751b3c3af0afbc7f4e363c3982e33c65f2273ce7e8cabbb7cf31bc4efbfcbb12494abcfe9b843c717bfc3bdec4d9bcd66c3c3cba1affbc739b863bdb790cbc50798ebc16e84d3cf381e4bb3110b0bcc3fa7ebccc7cf7bc9eb7943cf44d38bd34e3df398307d0bbd925563c983cf83c8f42623b1fc8cdbcebe5553bfe9b84bc02dd00b94b9699bc4cda8a3a5b60ea3a4ff12b3ca160693c25bb07bdcd2eb5b917b4a1bc98def0bc95359c3b58d1abbcc0f322bc016f343d8ffe70bc7b7b86bacd149f3ca212a7bcef4068bbb747cfbcfdf98b3c4a86d43c05b0b0bb951b063dad9bfb3bdac74e3b9deb403c1646553cbc98903c3a7892bcf07a083de3d1a93a98c45abc661c833ce3b793bc137325bcd638903cecb1a9bc466fb33c58d1ab3cfc1d733c2d9b073d58b7953c4f4f33bcf409c73aec97933c83a948bb91f41f3cb2c2613d5c4654bc9efb053dfd13a23c1efc79bd8c551c3d389cf9baa543de3c5db4203cecdb043daedf6c3d2367513c17ce373c6abb86bbc5bc813bd189473cb7712a3dbc541f3c183c84bd8b01e6bbcd72a6ba7ec653bb183c843c17ce37bb99aa443d6087d0bc3ed3243c0f761a3d5cce363ce2635dbc4645d83b762ac5bcbb2a44baf577933b4a86d43c35c9c9bb882eb63c823bfcbcf39bfa3ad16fb138c95b053c3f5b87bc2491acbac3e0e83c54ba8a3a946948bcc137143d88fa093d82c35ebc3abc033cec390c3c46cd3aba9880e93bc8a9c73cde66d23cf03617bca5ffec3b4cda0abdc8c3ddbcebe5d5bbf720683c13b7963c5d9a0a3c8078113c38e0eabad6da08bd00bd76bd87045bbc3485d8bc9a1891bc8d3b863c9efb853b501b07bdcc7c773c58d1abbbd2112a3cecdb04bcc06b403ae21f6c3c4aa06a3c95f1aa3cc51a09bc53ee363c13590fbd27c2633c168a463b693324bdf0949e3c428cbebb394e37bc71e9483c3485d83a6967d03bf57793bc50351dba209421bc80bc82b9cd2e353c1efcf9bce712263abc6e353d6a7795bb13d1acba5490af3c6ad59cbc2477163aa6b1aabb90e4da3c0ad796bcafef313cb3d226bc75bc78bc8c112b3df5bb043d20f228bbce58903c47551dbaa160e93cbabcf7bb6a77953b95f12a3cb72db93b42d0af3c5cce363c288e37bc9e159cbb8ccd39bd473b07bc58151d3d889c82bb2830b03a587324bbbfe3ddba8d3b063dcd2eb5bc04a06b3c35f3a4bbc4f02d3c30bcf93a807891bcbb445abb3e4bc2bc0a790fbd7b512b3a75445bbbbf9f6cbcabd8903b005f6f3cb206533c4e835fbd5d5699bc2c8b42bc88d02ebd9c635e3c47559d3ca2f8903c58d1ab3c29161abbd5cac3bcc0af313cf00c3c3b4f93a43cd6da883c27206b3ce3d1a93bb857143af491a9bc349f6ebce60261bc47b324bcc9d3223b7e684c3c6c7e71bc72cfb23c058655b86decbd3b3f5b073cc39c77bce660e8baa79794bcc5d697bcf11c013c7ac948bcf4ab3fbc6e30af3be83c813c861ef13cd2b3223c3a92a83cc4c652bc277e723c6594203b93fb7bbb9a1891bb7544db3b7754a0bc5c2cbe3c3ed324bc1f26d5bba7db053c694dba3b20361abce78ac3bbecdb84bc168a463ddd80e83b179a8bbc667a0a3c30006bbde89a08bcf4ef30bc661c033cdb1b05ba24198fbccd141fbc9d49c83cc931aababfe35dbb71e9c8bb7a85d73bc931aa3c83edb93beb2947ba7ec6533ba6b1aa3cd6f49ebc58159dbc649ef13c683d75bc4104dcbc6153a43baf4db9bcd05fec3a9e7323bc876262bd8fa069bbb206533c3eb98e3c473b87bc31b2a83c36d90e3c1ae5d83c1fc84d3bb2645abc2d13a53ab330ae3c3ed324bdd696973b30a263bd649ef1bbf73a7ebb3000eb3a54d4203dab1c02bd9c7d743ceb435d3ac95b853ca25698bb8365d7bb050eb8bcf44d38bc3441e73bd638103c4ae4db3cc5341f3c7161e6bb3968cdbb0dc4dc3caedf6cbc5ce84cbbc137143ddd9afe3c06da0b3cd45c77bcbf41e5bca1e8cbbb0b1b88bc9c7d743b0586553cd145d6bcb2645a3c2477963c8c551cbc20369a3cf467cebcfca555bd05e4dc3b954fb23a0a93a5bcc8c3dd3c', 'Array(Float32)')), +('<dbpedia:Database_index>', reinterpret(x'cdaf1dbcd976583c0a99cdbc7596d83ce5a32b3d9e9a6b3cee5da3bc11e2173c498582bd2eba2e3c7798863ca7b8cd3c7f51a73c86d21cbdfa25b5bc1d46bf3c66c02cbcf4412cba0f0d803b031858bc37d890bc41cb8a3ce1cce53c86d29cbb4d2fb2bca754633b68cdef3ab0aaf0bb46e667bd9efed53beb234a3d5584e8ba595bae3bdfbf22bd617910bdd021a2bb68fa85bd158d1ebd64872abdf03312bdb890273dec2421bc3d582fbdbf119d3c997f0e3bee954ebc7eec65bcd39326bc776b70bdc968813d3e59063c88434abc71ebd1bc0626723c853530bcb31cf53b58f66c3c50a1363d4a2118bdffd112bd76fb193d8bb54ebcbcd7c3bc4d939cbbe1cce53bdc84f23c71ec28bbb82c3d3d16fdf4bc541412bd6b3f743c2638e23c997f0e3d1de2d43a76972fbd97e221bc6c08a03c8127963cd5cca83cbf4871ba2db9573d4a59c3bb9ec701bd6895443bc5913b3d205459bd0c6ee5b88434d9bc1de2543d429f4b3c860a48bc0e7093bd4e30893ca000043da6e40c3cdf5bb83ceef9b8bc5b95873c0e0ca93ccd13883c58f6ecbc5621553ac283213c6013f8bca547203c853607bdf8ec323c57ea00bd6ede0e3d1b0ce6bbcb75c43a808a293cff6da8bccf4c0a3dea86dd3bf30901bb5584e83c4066c9bc614c7a3c4103b6bc3f6572bc7cdfa2ba196f79bcad01183d9f6397bc662417bc31f407bd5b95073c7eece53a0b9aa4bc4d939c3cb9c9a93c78d1883b80ee933c322d0a3de7dd843dd159cd3c5c69483c4167a03b3d58afbc2d812cbd4f690bbd199c0fbdb38136bca09c193d68fa85bb238e32bb43a0223d11e297bbe91687bc7eb5113cc6657cbca956913d2aabbd3bcf1ff4bc4f690bbd6dddb7bcb9653f3c8f282abc3b1e563cf10753bc32c91fbc2600b7bc4410793b9cf1923c7d18a53ccb75c4bce94ddbbc3c2004bda3aab33ca71d0f3d51a28d3c5a3046bd05b69bbd31909dbc8cee50bab890a7bc69969bbcd0858c3cb4b9e1bba9f2263cb8c8d2bcc559103c99534fbdf6179b3cd25aa43cca74ed397808dd3c64872a3cac642b3c37d890bcabc7bebc5005213c88a80b3da4ab8a3c2c4901bcf03312bda47ef4bc57ea803c71237dbddc4d9ebcaf72453c07c35ebc815e6a3c9236c43cf309813dfb5e373d3b83973b5f77e2bcd70602bcd93f843bf542833dff6d28bd818b003c44753a3d360fe53c2739b9bc3ae62abc3673cfbcdb149cbcffd1123c0143c0baa09c993c0f44d4ba2638e2bc853687bd297392bccb75c43cbf48f13b6eb1f8bb2aab3d3c7eb5913c997f8e3ae507963bc357e2bcd39326bda956913d7eb591bdf6179bbc3d90da3b544cbd3c2e82833df03312bcd2be0e3d498582bc055231bb09fce03cfe6c51bc554d943b04b5c43aaa627d3d8d8ae6bc870b1f3bd976583d15f188bb8536873c6ba3de3baf3a9a3c004269bd38acd1bc9f63173cca3d99bb77341cbd7389153b0f4454bc158d9ebc9645b53db4828dbcc0ae09bc39493e3de6db563b74c2173dcdae463dbb3a573ba446c93c211d053de46b80bc26d4f73c0e70133c02a8013d1fe4823cbe10c639c52cfabb3e2cf03c2600373c047d993c6da50cbd9199d7bad392cf3c9f63973c2600b7bc79a549bb9544debbfbfa4c3c4d939c3c12b7afbc4b5a1abced94f7bcf7eb5bbb443d0fbc1ce1fd3c0998f6ba4dcb47bda3aa33bad7a140bc4f690b3db00e5bbc4958ec3c05b61bbd5dce09bda8553abd2ebaae3b8f282a3c59f743bd162a0b3c16fdf43ad1bd37bc2362f3bcec880bbc897cccbcd53093bbc0e5ddbbdebecbbd2fbb853d82c3ab3ce29511bddb4cc7bc876f893c58bf983c6115a63bfbfa4cbd902901bd8e5f7ebc5414923ce1943abd5d6a1fbdc5913bbd67c103bdb00e5bbdc665fc3c461227394f690b3b9199d7bc64872abd8a7da33c5c05de3c7cdfa2bdd2be8e3ca61c38bc88a88bbd5bccdb396b07c9ba3bbaebbcc52dd13c544c3d3dda7886bb2be368bd5af89abcee5d233911e2173c02a8813bbe10c6bc4fa0df3b30f3b0bcca3d99bb63b2923c9953cfbc1ee3ab3cc62e28bbb3498bbbc3200e3b2c49813c3fc95c3d21b99a3c3a4a95bcc21fb7bcfe3426bd52da383b554d943c68315a3ce369523d46e6e73cdebe4bbc5722acbc53133b3dedf861bd5edaf53ce406bfbc464ad2bc88a7b4bbe1943abc9135ed3c2c4901bd196f79bd0d6fbc3dfe3426bd9029013d66f8d73b77cfdabcd605ab3c03b46d3cd4cb51bdf8ecb2baf06ae63ca37288ba1ee3ab3a6422e93b836098bc7187673b4f68343de3ce93bbf26c14bd07c3debc068a5c3b5175f7bb1762363d31f407bde46b803d0a61a2bb92d2d9bc554d14bc6acf1dbc088c8abdf3dc6abcad6502bc71eca8bb71ec28bcda78063c06eec6bbd25a243c7b7ae13b3774263c283a903a4f690b3d0f44d4bcd8a217bd7e50503cc28321bd290ed1bc675d19bc701711bb69cec63c32645e3b874273bbacc8153caa2ad2bc9cf112bcacc8153db110893c0bd178bbf7b3b0bc423be13c69cec6bb8e8c14bd4d2fb23cd8a217bc6523c03c5722acbce64098bc9f9bc2bbe714593d92d2d93c7f89d2bcd020cbbc8e5f7e3c1c0e143bb4b9613c2d1dc2bbd912eebb63ea3dbaccda053d634e28bd9e9aeb3ca547a03dc55990bc5212e43b46ae3c3c94701d3d68315abc6d41a2bb8ceed03c488354bc7fed3c3c19d3633cbd3c85bca1d4c43c31901d3d464a523cb82c3dbd381113bd410336bd0d0bd2bc704fbcbd70b3a6bc01de7ebca271b1bc0afd37bdda13c5bb64872abd90c5963d4a2198bb1252eebcb41d4cbdcc769b3c000a3ebd0727c93c3c2084bc4c92c5bcd5cca83c60dca33c940b5cbccde6f1bc4bf5583ab4820dbbad6582bc1ca952bc939b05bd2229f1bbc490643c228ddbba66f8573c0d6fbc3ca88d65bd2aabbdbca1d4c43b929a2ebc977de03c960d8a3a52aef9bbe7b0ee3cc52dd1bc1836f73c0b9aa43cddbdf43be268fbbc6115a63c2e556d3cc5f5a53b0bfe8ebc0afd373c8025683ca1d4c43cde86a0bb024497b935d6e2bcf26c94bbde2236bc15f108bceb87b4bc940bdc3bbc3b2e3c290ed13ce46aa93ceef938bca47ef43b2564213d158d9ebaf4412cbc786c47bc1f80183d28d6a53c9aefe43cbcd7c33c1ee3ab3c3e59063ddbb0b13c68315a3b2be3e8bb6e7a24bd1e7e6a3c481fea3c15f1883cd70602bcefcd79bc8127963c83c402bde1f8a4bc919957bba71d0f3d46aebc3cb381363c7325abbcc665fcbc940bdc3ca956113d46ae3cbd4612a7bc2aab3db9259bf5bb96e0f3bb1daa293cc2ba75bce194ba3ba6b776bb4f3cf5bb4cf7063d228d5bbc9dc6aa3c3bba6bbc987eb73c43a0a23c39e5533c5b95873c01a7aa3c87a65d3d2155303d997f8ebb8398c3bcddbd74bbc21f373dc79e7ebdfed0bb3b43a0223ddebecbbbbc3bae3c13b886bc1e7f413d5377a53c2bac94bc738915bd755f843cde22b6bc353acd3cd42f3c3d2db9d7bc946f46bcc8cb94bc242b9fbcf16bbdbc3a1dff3bb00e5b3a38acd1bc916203bccf8435bc12b72f3d51a20d3dfc32f8bc77d0b13c8fc43fbc0042693c90fc6abb0552313de46a293d704f3c3d3e9131bce29511b9bb9ec13b9fffacbc312c333de640983bdf230dbc0bfe0ebc8a7d233d987e37bb950c33bb67c183bd4cf7063dab2c003d072749bc000abe3cbdd81abda956113dc52dd1bcdf238d3c41cb0a3c595baebc47e7be3ccad8d7bb33014b3c4bbe043d21f145bd3f6572bdfc5f0ebc96a91fbd01a72a3da446c9bc48e8953bccad6f3cad0198bb6acf1dbb0451da3c6b6c0a3c0c37113cd0850cbdf8b4073c263862bcd6052b3a77341cbdc0ae893c13b806bd1f8018bd8e8c94bc40025f3d7389153dfc32783ddcb108bd359f0e3d201c2ebcd0bc603cc55910bc9b8c513c242b1f3cf542033d24c734bb104682bc308fc63c7f51a7bcfe34a6bcc081f33c8ec3e8bc1b71a7bc8c52bbbcba9d6abcb381b6bb5ccdb23cb1475d3c25ff5fbceffa0fbdfcfb23bca98ebc3bcee7483dd976d8bb81c2543cbb022c3d433cb83c9ab8103cc7cabdbce46b003c2871643c9f63973c60dc23bd2aab3d3d0dd3263cbdd89abc4475babca10c70bcd0858c3cba9d6a3cc04a9fbd8d8a66bcb00edb3bd0204b3ce05c0fbd977d60bbfe9810bc7a0a8b3dfe34263d4475ba3cb4b9613cad65023c31c7713c0fa995bcaa2ad23c86d29cbcf3dceaba812796bc0653883b2bac94ba0a994d3d1b0c663b047d193c6624173dc6c9663d1353c53b2aabbd3ccad8d7bcbf4948bcedc036bcbc0383bc74f96bba433c3839242b1f3d51d9e13a423b61bbffa4fc3b16c5c93c02a8813c48e8953cda13c53a2564a1bcab63d43c43a022bcb82cbd3bc0ae093ce9e9f03ca2a95cbd4d2f323d21f0ee3be6a4823c7325abbb5d6a1fbcf7b3b03d248f093d7288be3c42048d3ce406bf3cba66163cbcd66cbd0b9a24bd70ea7abd3a4a153da95691bcfa25b5bc04b5c4ba2db9d7bb2f56c4bcdb4c47bc4475babc5eda753c6588813c41cb8aba5a94303d6cdc60bc51a20d3d599359ba9162833a111ac3bca8b9243cefcdf93cc692123d6ba35e3b1eab003cf16bbdbb42d7763b916283bc5da1f33b8742f33c91fe183ca271b13d977d60bcf9ed89bc461227bd7909343cfc5f8ebcf4dd41bda5e3b53a9f37d8bb4cf7863ddce8dc3cd1bdb7bc27d54e3d05b61bbc6ede0ebd297392bce1f8a43b617910bb86d29c3ce7dc2dbd1f8018bbd4cb513d0143c03ce9b21cbc1e4716bd940b5c3d5f3f37bd1e7fc1b9a3aab3bb5a2fefbaaa627d3c189a613a2f571b3d66f8573dc45839bc54b0273d1daa293ced94f73c37d8903bb4b9e1bcca3d19bb3a4a953b6386533b9719763ddff6f63c3d58afba7db4ba3b57ea80bc876f093d6a6bb33cf8ecb2bc6a3388bcf479d73cf340553c916203bd047d19bc433c383d2e8283bc12b6d8bc866e32bd818b003c97460cbdd669953a93d3b0bce3ce133c6d4122bdffa4fcbc1c0e143bf67b053dc5f5a5bb4984ab3c6e1563bc5e078c3cc4f4cebcca74ed3cb72b66bc6f163abc00a6d33ce640183de50716bcf9c0f3bc779806bdc182cabcaf72c5bc5da1733ac2bbccbc52da38b9c5913bbcf6179b3c503dccbc65bfd5bb215530bd3774263c6ba3debbd1f5623c8b51643a5175f7bc500521bc0998763ba20d47bcc4f44e3cbe74b03c42040d3c1eab80bc9029813cdbb0313d01a72abc359f0ebc1e7e6abcea4f893b4fa05fbcd32ee5bc16c549bc423b613c0fa9153de5da7fbc302bdcbb2155b0bc4374e33a70b3263dad0198ba1daa293d2fbb053c57222c3d2be3e8bc60408ebc80ee933cf340d53b4e675d3b6895c43c31c7f1bc7b16773ce46a293c04515a3c340222bd065388bbd9126ebc2f8eef3b4576113c63ea3d3ba2a9dcbc88a7b4bcaf7245bcd7d9eb3bbeac5b3b290ed13c7633453c4bf5583cf9ed893c9fff2c3ca47e743cbd0f6fbc461227bc93d3303d2a0fa8bc6da50c3d90612c3dedc10dbd54b0273d088c0abdc7cabdbc614c7a3de8b1c5bc4d2fb23b01de7e3c088c8a3c11e217bda2d51b3c7db4babad1f5e23b7e887bbce231a7bcc0ae093d66c0acbc73252b3d03e1833caa62fdbc2f571b3c7cdfa23cb1ab473bbd0f6fbb2564213d8a7d233c027bebbc894421b9158d1e3c9d2a953c2c80d5ba7d7c0fbc40ca33bdeef9b83ced5cccbccd13883cbf49c83c88a734bc2bac943bf5de18bc67c1033de7dd043d7e50d0bbd0858c3c6c0820bd67c103bdf9245e3cdce8dc3c43d8cd3bd8a297bbd3924f3c457611bd93379bbd59f7c33b8defa739a680a23b85d1453c7e887b3c1e47963c5ccd32bc69961bbdbf75873c4167a0bb211d053c8944a13af30901bc4f3cf5bbcfe89fbc3e2c703c0c0a7bb9f0323b3c0da767bccc12b1bc4e3009bc62b1bb3c024417bbedc10dbd8434d93c2fbb05bd38ad283b196f793c5b311d3d138bf0bbe7dcadbbb5564ebbfe3426bb3be701bc3c57d8bc42048dbcde86a0bc0a994d3c6013f8bbac64abba2e8283bc82c3abbbe46aa9bcad38ec3b8f60d5bc9aef643c8535303c9bf0bb3c9cf1123d0451da3c464ad2bcb6f263bd2739b9bc21f06ebb665cc23c77349cbba0d3edbb7bde4b3a5584683cd3924f3c488354bdedc036bbc5f5a53ce130d03a2c1ceb3c0a99cdbcd0bce03cba66963cd8a2173c0b9aa4bbca3c42bc60dca33cd32ee53c0afdb7bcaa2ad23b6fb2cf3b8b51e43c28d6a53c176236bc2fbb85bca88de53a148c47bc7798863ca6e48cbc939b053da98ebc3c051a86bc47af133d49842bbc27010e3bdbb031bcd0850cbc99b7b93ccb115a380828203cd1bdb7bcf64eefbb78d108bbc89fd53b54b027bd929a2ebd4bf558bce507963c0960cbbcb1abc7bb23c65dbc83fcadbc248f09bb78085d3cdce8dc3c4f3c753c17c6203d308fc63bc96881bb138bf0bb484c803ca038af3cc5f525bc9d6169bbd2f6b93ca13906bb7d7c0f3d111a43bd59f743bb340222bc01df553c17c620bd3f92883b97460c3b40cab3bcbb02acbca6e40cbc7db3e33b104682bcabc73e3c433cb8ba05ed6fbb1eab803c8fc4bf3cd8da423c5c05de3aa95611bdf340553b92d2d93b9719f6bc21f1c5bb5dce09bdbf111dbdb0d7863baa2a523b77349c3c1a3825bd0fa995baf340d5bafa5d60bc60dc233d82c32b3dec880bbcf2a3683c5da2ca3cd393a6bcb6f33abc78a4f23b939b853cb10fb23cad9c56bc960d8abc5edaf5bce94d5b3bc8672a3b89e0363b1d46bf3ceb87343d8d53923c4c9245bcd1bdb73ce815303d9b28e7bb62e9663ab82cbd3bd0bc603ddce8dcbcd5cc28bdaac6673c42d7763bcc761b3b5520febcc702e9bc7d18253925c80bbdbd3c85bccb75443cc52dd1ba6487aa3c763345bcb0aa703cc79efe3cb5ba38bc66c0ac3ad058f63b786c47bb353b243dcb115a3c93d330bcbeac5b3c232ac83cd8a2973c7cdf22bc06ee463c2739393cf67b053d7d18a53c11e297bb25c88b3b42d776bca956113b90fc6abceef9383c4cca703dd9db19bc259bf53ca5e3b53c31909d3c1a70d03b4f68b4381bd511bc929aaebdd021223cf3a43f3cb110893cc0e55dbafbc2213b037c423d3e9131bb94d4873c3df4443b1e4716bc66c02cbd83c482bbecbf5fbc98b6e23b2fbb05bd9e62403dc35762bb8e8c14bdb00e5bba88dfdf3c3f9288badf5b38bc14f0b1bb232a48bc37ab7a3c6f163a3bbcd7433cf441ac3c085ff43cb0d7063c9d2a953ca8553a3b5175f7bcffd1123dba9dea3b3a1d7f3c107dd63c474b293aa4ab0abdc2baf5bb18ffa2bc67c103bcd8dac2bc14f0313c263862bb6d4122bc0bd1783d59f743bddf230dbd2a0f28bc32c8c83c5ea3213c80ee933c152934bc6523403de640183cdce8dc3b6422e9bcd3cafabca2a95c3c3be701bda6e40c3de778433c517577bcb82cbd3b6d78f63b4a2118bcc1e634bc4984ab3cb65725bada7806bc76972f3d4f690bbbd32ee53ac7ca3dbd929aae3ce3a17d3bc21fb7b9302b5cbcbb9e41bdc8cb14bbad65023dea86ddbc2e8203bb582383bcb1e3f2bb4fa05fbbb248343c5a5c853ca09c993b03b46d3ce4063f3c662497bb755f043dbf75073d977de0bcfa25353dd53013bc74c217bc3a4a15bc1daa29bc7325abbbe8b145bc876f89bca47e74bccaa183bb93d330bc3e2cf0bb745ead3b856d5b3c1a3825bd5af89abbbc03033cbb02ac3c48e8153d8843ca3b544cbd3a201cae3c5ea3a1bb349eb73cc55910bbe3a17d3c00a6d33b53afd0bcca74ed3b696adc3c062672bbf107533cdf230dbddb141c3da6e40cbc027bebbabf75073b4f690b3c50a136bbc6c9e63ce8799a3c6a3388bc95a8483bbcd6ecba870b1f3d0ea83e3c57ea803bd1f5e23c63b2923c03e1033bab2c80bc0bd1f8bcdce85cbc27018ebc7808dd3cbaca00bd977de0b8072749bcac0041bcf2082a3c2ce5963c70eafabb5b9507bd84d0eebb8026bfbc4b5a9abc2b48aa3c148c473c6fb24fbc201cae3c665c423d312c333b0e0c293c6e7a243c80263f3bf7b3b03b9dc6aa3cbcd743bc201c2eba08c4353da2a9dc3c5584e8bb238eb23c349e373d1019ecbc9f9b42bbc8672a3da1705a3a6ca435bcb9c9a93c7596d83bafd62fbdc357623cedf8e1bba680a2bc4fa05fbb037c423de6a4023c6da50c3cbf119dbc80ee133d71ec28bc940bdcbbbf7507bcbcd7433d6014cfbab3498b3aae39433c52dab8bc696adcbc5377a5bb26d477bcca74ed3b8535b03c544c3d3a64eb14bd6014cfbc357278bc99b7b9bcc21fb73ad020cb3a5b68713a8cb6a53c279da33cb78f503c5005213dbaca003b5276cebca0382f3cd60454bce2cd3cbc8cb625bc4958ec3ba57f4bbc004269bb211d05bd88dfdfbc7db4ba3c1de2d43bd706823c0451dab8876f893c03e183bc32c91fbc90fc6a3c24634a3c419e74bc4511503d1f80983c5ccdb23c0f0d803c704f3cb948e8153cad38ec3c80ee933bb00e5b3b433c383d8e5f7e3dcc769b3c853607bddb4c473c3981e93ba5e3b5bb9f9bc23c94d4073c991ba43c3f2d473d66f8d7bc6078b93c8944a1bb3cbbc2bbe2cdbc3cae9e843c24c7343ba37288bd', 'Array(Float32)')), +('<dbpedia:Database_theory>', reinterpret(x'baa089bc01447e3c98f70cbd68400c3d3bb8873d24f95f3c471d023c4734b4bc401680bc54b0e0bbf3a4ba3d884bccbcc505043df98825bc8fd840bc4d2f513d3b3de23b09dd57badcda45bcd3a447bd06a276bcf5bc84bc1ccfc63cd175cb3b23e2adba64e2133d2d24113cda2557bc16d429bd6734273ca8340d3d5207d73cc2caa23b6ecc68bd4d9e91bd4b00d5bc69ddb0bd7aa023bd1f901a3cfe6c90bcd6d3433ca962f13b97eb27bd672842b9e7a29b3c1107f13c047fdfbc80b2f2bcc244b0399bc32dbd52fc893baa6309bd4b6f953d905e33bcf0e366bd77eb34bd0b7b143c33993b3c7ef2b6bc07a38e3b87b9743b6fcd00bd8d2f37bdbffe81bc058bc43c0b7b143de504df3c4d181f3d6b6f08bd273441bd69d14b3a14b1123d2d24913c7f0a013de24423bc387da6bbdd60b8bccf52343d2245093d67aeb4bce9dc643cc02c663d63e1fbbc5207573c4734b4bb3f0a1bbdfb3d94bc505ecdbc3994583d6611903b224509bdb6c883bcd994973ce1c97dbc4d24843cff8fa73c413997bcdb3d213ca1a718bde1c9fd3bb7f6e73c46119d3c458baa3c668b1dbcaa6ed63c518164bda9d1b13c63676e3dbaa0893df664763de9dc643a1803263c0f4735bd6e3ba93c1ddb2bbdf664f6ba13b0fa3c3b3de2bc009c0cbbf3a43abd3ee703bddf8fb43c6a63a3bc0c18b9b96f470ebd61b3173d19a04abdb8718dbbd1fb3dbc15c8c4bb8f52cebc669782bbb994243db0e418bbea578a3cf069593bfc54463d061d1c3ce510c4b9b99ff1bc707572bd343660bcb28da2bc9ccf12bd8d23523c82ca3c3d213924bd2fd8e73cf435fa3bd6d3433cc021993aeae8493ca021a63d31f0b13c191a58bd9659d0bbe24f703c247fd2baac86203b5f841b3df0d819bb71760abd50757f3c446813bde94ba53c458b2a3deda99d3c8e3b9cbcfe83c23c1e6d033cd207233a00161a3dc696c3bc809b403c28d1e5bc3d6111ba1f0a28bc5bc3c7bb64f9c53bf069d9bc02bf23bc8a63163c777cf43a32fc163cfb48e13cbbb7bb3dcc0c863d87b974bb63ede03ce5f9913c024516bc7f840e3ddcc3933cd1fb3dbcbf15343c669782bd047412bd58038cbbb436acbb5d6c51bd2473ed3cfcda38bd67ae343cc09b263cb2813d3d3f9b5a3c2fd8e7bb7a2616bb587d19bcefcc34bdaae8e3bcdf8f343d3ddb9ebcbaa009bcb32a473c6c9decbb64f945bded9d38bde95672bbb7eb1abd8728353cfcc3063c73a4ee3cd05e193b754d783c6d235fbd718157bc95c810bd1bc361bd81b38abae58ad13b1ccfc63d8fd8c03c6b00c8bcab7abbbcbd6c2a3ce8c5b23d71768abd8a6ee3bc11f03ebc128de3bb6c0cadbc9c60d23ad659b6bb06a2763c26112a3d575a82bc380319bdf212e33cf5bc04bc03e23a3bb2813d3c19a0ca3bf3a4ba3b213924bc2c18ac3ce83fc0bb668b1dbce61c29bdfc54c6bc1f0aa83b04682dbd9d55053bf441dfbacb00a13c13a52d3d16dff63b55c8aabcfc5446bd40908d3cc61cb63c857fab3c2245093b9c4920bc34b185bcaae8e3baa0b265bc40a73f3da84bbf3c7f840e3d271d8fbbd6dfa8bc3e78c3bcf7650ebd6c0c2dbc0474923c505380bcae3b0fbd74a5863cb17558bca12d8bbc731f14bde7a21bbd271d8f3dfcda383ddb3d213d6a6323bbce2f9d3c2a7aef3c17f740bddf09423d55c8aabc1ccfc63abccf053ad069663c3559f73b9659d03c94c778bcdaab49bc484be6bbf7705bbd5075ffbb861cd0bbb06973bc3d61113de627f63c669702bd250545bd9bb7c83a9cdadfbc47ae413daa63893c5994cbbdc2caa23c4016803d4c236cbd7c49adbcf4b01fbcbbb7bb3c11817e3bc7b9dabc04f9ecbc517617bd8da9c43cf9948abc88ba0cbd047f5f3d7771a7bc51fbf13b20168d3cf28123bb1a3defba1a26bdbc46970fbdcfcc413ddf8f34bbef52a7bcb765a8bc5aac153bc1384bbd64f9453a6af4e2bc83e16ebde627763ca6a2353cd42ababce2d5623ce61c29bdd7eb0d3c9a26093d458b2a3cf175bebc71768a3af988a5bc8b7ac8bc9a31d63cfcc306bdb54d5e3d7bac88bbd0e3f33c587d993bb90e323b4840193d1c55b9bc3665dc3b0611b7bc48d158bc4bf587bc6a6ef03cb659c33c916a983d8f47813cde78823cb28da23c95bcabbcfb4861bdf7658e3d992571bb1181febcb92564ba1803263c446813b89e095cbd3ee783bdf664f63ca13858bd63ede03a5f0a0eb95659ea3c21b331bdbf8f413da962f1bcdc4986bcde83cfbc24f9df3cd99fe4bc857fab3c6eb5363d1b32223dbaabd63b1a263dbc35c8b7bb4bf5073ba6a2b53c17714e3c8e3b1c3b00161abd14b1123dcb00a13c07a30e3d97f6f4bbae2faa3a88ba8cbcceb58fbca50511bb63e1fbbaaf5241bc78f719bd2d24113b2d2fdebcbde6373db31e623c7313afbcd28c7dbc7665c23c84f938bdf7df1b3c62503c3b050552bd1b49d43c43eded3a74a586bc2eaa833d2bf5143c884bcc3b50757f3c9c49a0bce61ca9bc01b33e3c3771c13bb175d8bc4139173dbbceedbccc9dc53bcc86133b380ee6bcd54d513c48d158bd51f0243cb212fdbcde6c1d3d106acc3c580ed9bbb90e323cb1fbca3cd802403b668b9d3cf7f6cdbc002dccbb28c6183c0373fa3c273441bd7994bebd11f03e394c92acbb51f0a4bb31f0313dada9373df7eb003c7399213d22bf963ba350a23dee2f103d1e6d033dd99417bc1f0aa83c858a783cb212fdbb75c89dba9b3dbbbc6d1812bd8596ddbc00169abc2144713b2139a4bc9d5585bc6fcd80bcedb46abcba1a17bd528dc93b50e4bfbc4eb5c3bce02cd9bb235cbb3c46978fbcba31493d271d0fbcd1754b3c5ef243bd5f8f683d024516bcc50584bc33993b3c8d18053d1766013d787d8c3cf28cf0bcc36747bc3f8428bde696b63c4d2404bc91fb573bdf8f34bde7b94dbc131fbbbce35008bb1542523d8c0ca03cb5d350bddde6aabb6a6ef0bc88341a3dd281b0bb7bac88bce2d5623c461c6abc9802da3c60a7b2bbf919e53b0963cabb4f52e8bb458b2a3be367babb8c0c203c63e1fbbbfb48e1bc0957e53cebf42e3ce0a6663cc50584bc76dfcf3c49dd3d3c435caebb91fb573cdc49863c6da9d13ce24f70bce4f8f9b97b3d48bb8d18853bfb48e13b8fd8403cde6c9dbce35b553da57f9e3c7f0a813c2a7aefbb412db23c9f8fce3c13a5ad3cdaabc9bb23e2adbc40908dbdfbb7213b0a6faf3c27a301bcf659a9b9e35088bbcd23b83c30530d3deae8c93b49c60bbc505ecd3b0d9e2b3d258b373d2734c13cd207a3bc14b112bb0139b13ae61ca9bcb5d350bce83fc03c68ba193d0b8661bc0d249e3c6957be3ccc0c86bbc35b62392468a03c46a25c3936d41c3cfbc2eebcce3a6abcc94b323d62390a3d28d1653c14366d3c861c50bddde62abdd7eb8d3d505300bd7f848ebcbef29c3c6b00c83bd4bb793ceeb502bdd298623dfde61d3b2b7b07bca350a2bc98884c3d1bb814bd80b272bc4734b4bb365a8fbc69d1cbbccc8613bc858af83be4f879bb63ede0bbc404ec3b75c89d3cdde62abd916a18bbc4edb93cf281a33a84edd33b3bb8073caf52413d0d2f6bbd7cdaec3c58030c3cb7f6673cc94bb23c3f0a1bbc3b32153dce3aea3c6596eabba350a23c661c5d3d8fc18ebc0373fa3cd4b0ac3c60a7323cf5c7d1bc60b27f3aa8c54c3c271d0f3c387d263c69d14bba9e8369bcbbce6d3cf05e0c3b5b32883ce35bd5bb7659ddbcab7abbbb1aacafba1bb8943cc0a6f33c24ee92bc413997bca35c07bc3213c9bc387da63c9313a2bbfb3d14bd791a313de1c9fdbc03737abc061137bc73a4ee3b02bf233c2957d83c825bfcbca3d614bc990e3f3c471d823ba84b3f3c64e213bdc138cbbaa3d6143c4c23ecbcb17558bcfef2023ca138d8bb1bb8143de8c5b2bc5665cf3c1de790bc964e033dc02119bce83fc03b1aacaf3cf7df9b3b0e30033c63676ebcd1fb3dbc91f08a3c0ec1423c43e2a03c4021cdbc3542c5bbeeb502bda7ae1abbae3b8f3c6c921fbc180ef3bcb069f3bcbccf85bb39890bbc04ee1fbd3ab7ef3c89d1be3b777c74bccb00213da4f92bbdf99f57bcd6d3c33cdbce603c40908dbc43e2a03cf664f6bbeb0094bcfbc26ebc0144febb7ef2b63b98f78c3bd6eaf5bcd4361f3d9e09dc3c4d24043d120889bd36655c3b89e8f0bce9dc643d17f7c03cf54d443cc09b26bc44ee053d1f84353dc50584bbf536123ca12d0bbd54bcc53c8510ebbb4473603ccc86933c825bfc3c4d9e913cb994a43ba3e1613d54b0e03ce09b99bbf3a43a3c754df8bccda92abd2fd8673c094c98bb2d9e1ebc28c618b93efeb53c6d2f443c575a02bcba1a173d1ccfc63c37e081bcf42aadbca7b967bbff8fa73bf441df3c110771bccc9dc53a01447e3c8add23bc1a3defbbef52273d401600bc4016003d6fcd803ce7a29b3c79943e3de57f043d965950bb85106b3db925e43bfcc306bd7888d93afa254abcbef21cbd7f0a013c5207573cff09b5bc668b9dbca9d1b1bb8af455bcfcdab8bcdcc313bd0c18b9bb55d3f73cba31493d49c60b3d484be6ba3ccfb93c517617ba7f8f5b3c187db33b6d2f443d0245163d5994cb3c1e78d0bcf98825bd8e46693c78f799bcb7eb9abc34b105bc8adda33caa6ed6bcac86a03d3bb807bd44f9d23c916a18bdd90ea53c813865bd1181febbd7eb0d3d40908dbc97f6f43c6da9513c506ab2bc6fd8cd3ce57f84bcaadd163cc57f913c42bf89bc60a7b23cc3d6073d6734a7bc1f9b673c1537053d6c921f3d54bcc5bcf05e0cbd1399483d71f0173bdb3d21bd28c698365aa0b03cc58adebb44ee85bb0144fe3b62ca493b2574053cd28cfd3c754df83b7ccf9fbc1ccf463c542b86bd5ddb11bcaf52c139e61c29bb00a759bb6fc19b3c8250af3cf9940abbc834803c43e2a03cdd60383ccf52b43ceeb502bdcf52b43cd99f643c612d25bc3176a43c87b9f43cab7abbbc5de6debcfcce53bdae3b8f3af9948a3c0840b3bbc505043d51f0a4bce3e147bd1d6cebbc095765bce4edac3cdcc3933cc1a78bbb88341abdb441793c916a98bc1213563bc35b62bd44f9d2bb2b00623cd28cfdbb1803a63c85059e3bc7ae8dbb873fe7bccc0c86bb85965dbc27a301bdbaabd6bca4edc6bc328dd6bc884b4cbdb05e263c094c983c48c5f33bfcda38bce8ae003dbcdad23b0729813cd31e55bb7a2616bcfbc26ebb3181713ce350083c3efe35bc5665cfbcb069f3bce7335b3c6f52db3c932ad4bba404f9bac2ca223de1c97dbca35c87bb6d1812bdfdf16abd00169a3dbffe01bd81bed7bcd38d953c2fcd1abb08d1723c8f47813bf664f6bbb765a8bc74a5863caf52413bd1e40b3c2840263c4a63b0bc21b3b1bc3f15e8bc46a2dcbca12d0b3d20168dbc868b903cd7eb0dbc941f873b261cf7bc73a4ee3c928d2f3c9c60d23b990e3fbde2d562bb9e8369bd90e4a5bcdd6038bc9a2609bca4edc6bc0eaa903c9bb7c83c1b32a23cac0c133dee2f10bc0bf5213d23edfa3c721e7cbc6a6ef03cae2faabb09ddd73c6468063dd212f0bb506a32bcaeb51cbb328dd63aaae8633cbffe013d94c7783c1c5539bdfa312fbdc1b2d8bb54bcc5bca12d8bb91d611ebdb6c8833c46111dbd3994d8bb766542bcdc4906bd2d2fde3ce4f8f93c7d6cc4bbc1a78b3c54b0603db6c803bcf5bc04bc591a3ebd6b86babce2ca95bcae3b0fbd51f0a43bd281303ca8c54c3c64e213bdb54d5ebc24ee123d2468a0bc00169abc2d24113ca696503d4d189f3c2da96bbb847346bc7313af3cea578abc3b3295bcd212f03cb6c8033c6138f23c43eded3b9d5505bd331faebd5bc3473c7f8f5b3d1894e5bc1c55b93adc49863bcb7aae3ca35c073ce3e147bd812d183cc94b323c471d823cd28cfdbb3a26303c84f9b839fd602bbcf89372bb0b7b943ba2d5fc3c0eaa90bc7bc33a3c6fc11bbc9d6cb73bafc181bb7436463c07a38e3c98f78c3abef29cbc8f52ce3c635c213ca12d0bbcc6adf53cbde6b7bc809bc0bb96df42bcd207a3bc5181e4bc0e3b50bdd6d3433d0b86e13bb99f713c2c0cc7bb10539abc8596dd3befcc34bdbcda52bc342b133c20215a3c413917bd79943ebc3ee7833b60a7b2bc812d18bd92073dbc651078bb42bf09bb01b3be3c79943e3b731f143c21befe3b63e1fb3c5207d7ba4b6f153c95429eba54b0e0b9b5bc1e3b9ef2a93ceb0b61bb69dd303d24736d3ceb00143d7bac083d5c559fbcd38d15bddf8f34bdbd6caa3a0b006fbc1b3e073dc404ec3c3994d8bc9399943cd1754bba55c82abc2a63bd3c673ff43b7ccf1f3d8728353d328d56bd6a63233c7ddb843c1803263ca404f9bbc4046cbc7b3d48bcbffe81bc0505523c48c5f3bbb39907bd3ddb9ebcbbceed3c813865bcd212f0bc40387fbb3c55ac3b4250c9bb9f8f4ebc10e4593cbaab56bcf281233cac0c133cce2f9d3b11fc233bcc86133dc4046c3b80a725bc81be573b258bb73c77ebb43c68400cbd7659dd3c76d4823cceb50fbc9399943cb441f9bca6a2353c6eb5b6bced9d38bb92987cbc39a03dbd8ae9083d54b0e0bcce2f9d3cd87180bcd069663ce350083c49ddbd3cac9d52ba4bf5873c26112a3d6b00483d30e4cc3a71fbe4bc42bf893a56596abb28ba33bb9802dabc3ee7033a6f470e3dae3b8fbc180e73bcbd6c2abb4016003dd87c4dbd40387fbcea6ebcbc09634a3c941f073d39890b3cf7658e3a3bb8073b4eb543bcde6c9d3be244233c4016003c7e78a93b6d9e04bad281b0bbf7f6cdbb5f9bcd3a9e09dcb93542c5bb34a5203c6b6f883c20215a3ca57f1e3dca6e49bd4a7ae23cbaa0893c4367fb3cd4bbf9bb54a593bc07a30e3d32fc163c56596a3c61bee4baf5bc843c8ae9883c1f909a3cd7659bbc380319bc4d9e91bcef4642bc46119d3ccc9dc53b9d6cb73ce4731fbc84e286bc2473ed3b058b44bb7888d93ca962f1bcc2caa2bc8f47013d2eb5503cf069d9bce510c4bca244bd3c673f743bcd2338bc23edfabbe0218c3c3b3de23b5e7836bcae2faabca1be4a3ddd6038bdb4362c3cc2caa23c2b86543d08c6253c1a26bdbbd6df28bc3b3d623ce1c9fd3b54bcc5bb4f471b3df66476bce4739fbc02d655bbc36747b9a35c07bc46119d3cf5c7d13ae015a73c8250afbce962d73c40900d3d3a26b0bbd29862bdc50504bd306abfbabd60c53b110771bc2c18ac3ce09b19bb002dccbbe962d7bcfef202bc14b1923a24736dbbe57f84bc17e08ebb365a8f3c9181cabc2bf594bc0245963ceec0cf3cc47ef93bbef21cbba96271bb87aea7bcdd60383d9d6cb7bb2840a63cf0d8993cf106fe3b33993bbc613872bc66a2cfbc5053003ce47e6c3c91f08a3bc957973b191ad8bc67aeb4bc1b3e07bd5aac153b3f0a9bbc15c8443c3ddb1ebcd0e3f3bb928d2f3c380ee63c332a7b3c6af4e23b9aa0163d8af455bc401680bc92987c3aa696503c43d63b3cc9d1a43b95c810bbf1efcb3b7bac08bcab002ebdf7650ebc932a543cb925e43c2473ed3b2468a03ba962f13c59944bbcf919e5bbec8686bc3df250b911fca3bcf9948abc3559f73c2f47a83a7efe1bbd0e3003bd518164bb97eb27bd7075723b14bc5fbb56654f3d7ccf9fbad4369fbc58f7263cc8c53fbcd20723bc0628e93c7f0a81bc9e781c3df31ec83b446893bc657f38bd1efec2bcf9940a3d932ad43ca35c87baf7658e3c5399aeba8b003b3b1d6cebbb4c922c3cf87cc03cfcc306bafbc2ee3b46a25cbc5de6de3cb0efe53cf87cc03b92987cbc50e4bfbb3bb887ba7ddb84bc39890bbd68ba993bc94bb2bcde834f3c7181d7bc8d2fb73ae015a7bada25d73c780267bb6fd8cd3aa3e1e1bcc1a70b3c9d5505bdcb00a1bc7e7829bb92987c3cc510d1bcac1760ba41bef13b9ccf12bd9c60d23c5d61843cf207963bf90e183c9313223d9b3dbb3c257405bdfcda38bda95724bd43d63bbd64e2933c458b2abc9e781cbc86a2c2bc60b27f3cd281b03cc58a5e3ce138be3ca2caafbce0a6e63bc3d6873c71fb643c564e9d3a63ed60bbe138bebc04f96cbb4e3b363d6c0cad3bfa312f3c34a5203d191a583ce9d1973c46978fbcab7abbbc23edfabb809bc03b0c18b93cba1a173cb6df35bcc834803bc7ae8dbcc2caa2bb8c9ddf3c9994313c97711a3cf7f6cd3cac17e0bc4d9e91bc2b7b073ce02c593c36d49c3b8c862d3c16dff63bf6ea68bb861cd03b258b373a92987c3cf9948a39cc91e03b10539abcf05e8c3cd99fe4bc387da6bb8f524ebcedb4eaba80a7a53c75c89dba3b3de2bbc35095ba7bc3babba8348d3c4d2404bd80a7a5bcad23453c0eaa903c0505d23ca7ae9abcdbb7aebc59944bbcada9373c87b9f4ba0c92c63c657f383c764e10bcb7eb1abcd770683c43e2a039c7b95abdb5c7ebbb2fcd1aba16d429bc954debbb85965dbce350083c3213c9bce0210c3c0139b13bfd60ab3c3b49c7bafb48e138ef52a73cd4bbf9bac6adf53b990ebf3c54b0e03cd4369f3cafcc4ebd94c7783bbf09cf3cb5bc9e3c3213493c0fcd273d2505c53c50757f3ca1a718bd7f848e3b8af455bc48d158bc8f524ebb3f9bda3cb31315bb46970fbdfefd4fbc8eb529bc328d56b71213d63a587d993a342b133cb542113c0e3003bd95d35d3c', 'Array(Float32)')), +('<dbpedia:Bugatti_Automobiles>', reinterpret(x'13deefbccacd823c1065a0bc4f87033de229ae3c69e087bca18bb1bc17d8febc0377d0bcd240e1bc7b44003cc6d2e2bc1a56973a567c0f3cdfad94bc33b0893cf01346bd419b0fbc9d90b43bf888a33b19d645bc7ebfabbc5e6e51bc84b701bdf88823bdf48f023ded99083c939f3d3c36aaf5bbe8a0c43b2f353b3dfb848e3b39288ebc6c5a453d81bb96bc4416bb3a7b4400bdc0dc8bbb59778c3c352d913cf40c67bcae76373b392720bd9a14f83c2c3abe3b27c1cbbbd2c20e3dd8b8883dacfb8bbb1065a03c419b8fbcddb1a93c214c11bc744df53beb1c81bbcbcb263c754c2ab8c5537f3dd43d3abdbce16bbade2eb1bc5d7209bd3aa6033b83398cbcb36eeabac94e1f3d3ea092bb23c82a3c36ab63bc7946393ddbb2f43c24c64ebca8008f3b372ac73c3e9f243de623bdba754c2a3c4711383d9f0d3c3dfefddd3cd63a70bcb0f42cba27c14b3bec19b73bb17222bae7a2203c30b50cbc66e49cbdce47c03c5201413b214b23bd5cf237bdfc01f33b28bfefbc6dd83a3d65e50abd54ff073d54ff87bbacfb0b3d1858d03cf38ef138f193973c096f263da485c03c1fcf093c7b44003d36aa753b55fcbdbc3132f1bb25c4f23cd6ba413c92a199bb5b74c2bb8aab7c3b26c3a7bc9e0f183d951b573c3e1f533a7bc3403b9d0ff53c8aac6a3c5200d33cc25a013c01fa48bd3729d93bf58cb8bc72512d3c60ebd8bca603363be12a1cbd09ef54bcef1610bd93a0abbba782193d08f1b03c6e588cbaa683873c4e0732bddb3422bc2c3a3e3d9d10e33cae77253bc05c3abd0de8f53b6c5957bd5a769ebc08f0423d0de8753d567c0fbd2248d9bae5a2fd3cccc94abdf48f02bbbc63193dd5bc1dbdcc49793d8a2e98bbb0f42c3d0ce9633ddb34a23cfc82323bf48f02bd58f916bdf90619bd0e67593cf806f6bc4614023c695e5abc959d04bc606d063c6a5c7e3d0ce9633c999713bc087102bd8bac0d3d027b083d6860b63c90a3d23c2741fa3b8a2daa3c06f3e93c9c12bf3c03783e3d61ea6abca504a43cb56cb1bbbfddd63c6d58e93bf3101f3da10b033cf111eabb33b009bbad79013dd2c0b23b3827fdbcdd2ffc3cad7901bde0ab383daa7d96bc3ba595bc007b653c82b9ba3c606d86bca10b833cb173103b64e5673cd936db3c1261e83ba60336bc13609dbcadf92fbc5085a73c3b2368bde0ac263c37286b3ceb9a53bcbd60cfbc4a0e11bd9a94c93cfc8144bd9c91ffbcc553ffbcfa052bbd0ce9e33b1cd31e3c0e6759bc715309badab4d03c991566bd0bed9b3c224ab5bc9a14783b24c560bd3ca1ddbcec98f7bcff7e9dbbb8e7dcbcc7d205bdf58b4a3cfb02e1bce12a9cbb931efe3b06f3e9bc59f64cbda287f9bc1cd2303d7ebfabbc0fe6bcbc0772cdbc3aa6833c4a0e913c5fef10bcad79813ce2a8ee3c8ca9c33ce89e683dcc49793c74cdc63c0cead13abe5f04bcc65322bdf806763d28402fbd6e580c3d981830bd931efe3cd2c120bceb1b13bc74ce343b8b2c3cbb28402f3b3d2041bd21cbd13c6d58e9bc863409bd8436423df60b1c3d0f660e3d8aacea3c1064b23a31b330bc4f8615bdf2913b3c21c9f5bc92a2073dd7b865bc1162d63bb96552bd2c3abebbc1db9dbc37aa18bc15dda4bcb2f185bcb46d7c3bad7981bcb769e73c04f6b33c62ea0dbd0fe5cebc902412bd2248d9bcea9caf3b68dff63cea9d1d3d22ca86bdd5bc9dba8ea78a3dc94e9f3c84b6133d27428b3b02f95abd5af470bde42576bd4318973c11e4033ba48540bc9220dabbd83837bd28419dbc1dd0d4bc057685bd3da06fbc13e0cbbd4694b0bc4f04683db8698aba1ad4693b69df19bd00fd12bc6a5c7e3be91f283dec19b7bc11e4833cf50bf93cf906193d4b0b47bd5d719b3c372a473df40dd5bb3ca24bbd11e4033d54ff07bd431905bcc358253d1b534dbc959d04bc931efe3b60ecc63c31b242bce425f6bce7a1323aec19b7bc5876fb3956fb4f3d9817c2bbe426873ca20a153dc15970bdd43ea8bc0d69b5ba2249473c7153093c1ece78ba79c5f93c097014bc3c229d3cb46e8dbc9998813ad33f16bdd34084bc4e07b2bd1d52023c3c221dbd14df00bd68dff6bcb8e66e3cab7ca8bc3aa603bdab7c283c37aa983cf708d23b9f0dbcbc0fe63cbd5779453d69e007bd1bd40cbd3ca2cbbbd8b808bc959d04bcfc0284bcf806f6bc1d5114bd1ecfe63c6cda16bc1fcf09bd74ceb4b9ad79013dd4bcfaba71d337bd92a119bdda35903da18ac33c8f255d3dd63a70bc057685bd754e863c9025803c9d90b43a537f36bd60eb583ca10b03bd26c503bd48104a3d4b0bc7bc45968c3c26c495bceb1b133d409bec3bee972c3c29c080bcb56cb1bca700ec3c6b5d8fbc69de2b3dc2d9413def1690bc29bf123d214c113db173103c4e8760bc5fedb43c22ca86bde2292ebdf709c03c764b3c3d1cd39ebc0870f13cb4eebbbca683873c33b0093c7ec0993b6a5cfe3cdf2c55bd24c6ce3c89afb4bb3e1ee5bb3e1e653c72512dbd69df99bba4879cbb941f0fbc949cf33bed9988bc7052f8bb8238fbbb3d20c1bb6b5ca13ce3a8113cd6bbafbc4e8672bb969c96bc69de2b3db271343cadf7533ca08d0d3d4693423db46dfcbc13610bbda10b03bdbc61bdbb1162d6bc88b190bcf906993ca486ae3bc05cbabb0bed1bbb5cf2b7bc007be53ca9ff203d4f06c43ce8a0c43c1d52823d5defed3c18d8a1bccc4979bc71d337bbf2118d3c89ae46bd8b2ae03b1857e23c07724dbdd838373cdab3e2bb6565b9bbb965d23ca683873c5fef103c0ee72a3d999793bc9817423da50592bc6662efbbed18c93cae77a5bc4694b0b9ac7acc3c58767bbd7c43923d13618bbb4f86153d5381123d9d10e33c520053bdf48f023ddb33b43c4a0d23bd93a0abbbcc4979bd2fb47b3c4713143c7a44dd3afc0204bcad7981bd2fb4fb3c4f04683c939f3d3ce89e68bc15dd243da782993df58cb83c9e8e58bda4871cbc24470ebdfc82b2ba3e1fd33ce1aa4a3c4d88ce3c577945bc1b54bbba087102bd145e413ca781abbcccca383c5f6de3bc37ab863a75ccd8bc419afe3ca8000f3ce6224f3b244620bd91a2e43b28402fbced981a3dc456493c35acd1bcea1c5e3d2544443c431905bd01fca43c6f5542bcddb03bbb7251ad3ccb4ae7ba00fd123c63e7c3bc3034cd3c2eb833bcb0f42c3cb2f185bb547ec83cf48f82bc419b0f3d7ec0993c55fc3dbdfefe4bbc14df00bde5a3eb3ba87fcfbc34ae2d38cc49f93c30b41e3d04f7213cd9b62cbc5b74c23bb66bc3bc3ca2cb3c89ae46bd9a94493c9d1063bc3ea012bd22ca06bc175aac3ce228403bc94e1fbd6466273d9e8f46bdbfddd6bbd2c120bbacfa9dbccf44f63c62e99fbbae76b73c01fa483df986473a3132713d3c221d3c32b154bcadf9afbccd482ebd4711b8bc74cfa23c636883ba4693423d4219e23ba9fe32bc4a8ce33b3e1ee5bbb56c31bc39a5f23ceb1b13bd9c12bf3c1c51713ad33f16bd8bac8dbce2292e3c0d68c7bc89aec63b05f4573dad79813cbce0fdbc754d983c77c9313d429a213d12e1b9bca7812b3d439669bcb270c6bc9818b0bc7f3e0f3a419a7e3c92a199bd6e571ebda10b03bda00af23c1064b2bc0de875bcda3590bcde2eb1bbfa84eb3a0f65fd3cdf2c553cbae6113d3f1e883be426873ccdc85cbc6269cebc88b0223da97d733bd2c1203dd838b73c4a8ce3bb224859bcc5d5acbc5779c53a26c495bcf111eabce1aacabc2c3a3ebc882f63bca20a95bb7fbc613cbfdce8bc8fa61c3dd04587bd9a9449bbae76b73cd6bbafbcb46e8dbcddaf4dbcb966c03ccccab83c5f6de33c4694b0bce6224f3bed9908bd8d2827bb87b16dbd54fe193d224ab5bc61e97c3b4f0644bd8a2e983c13dfdd3c2446a0bc8436c2bc8a2d2a3cff7c41bbb8689cbcf40ce7bcc25a01bbd2c1a0bcd9356d3ca485c0bcd23ff3bca00cce3cd34004bd7ac51c3c60ebd8bc84b7813cd6ba41bbba63f6b861ea6abcf192a9395af4703cf01434bcc65234bc0aee093d39288e3cec1a25bca308393d3a26b23ca506003dad7901bdf28f5fbc007b653cadf7d3bcd0c4c7ba6565b93ce3a811bc6f56303b36aaf5bb6763003d66e32e3d27428b3c1a5617bcc750d83ca50424bc0a6d4abd5fef10bd7fbdcfbc2a3cf73c13610b3d34af9b3a3927a03b08f130bad0c447bc547ec83a6cda963c6663dd3c931efe3c9996253cddb129bc7e3e6c3c85b4b7bcc3d6f73b616abcbca781ab3a9799cc3cf78911b98a2e98bbe4a5473cabfb683ca18a433a36abe3bbd63b01bd9d0ff5bc352d91bc7ec019bdf708d2bce12a9c3a362bb5bbb8681c3ccbcc943ace45e4bb6b5bb33cc05bccbc0d69353d7c4224bc5181efbc21c9753cbe5e73bce5a2fd3c5d7289bcac7acc3b72521b3d9915e6bb8c29f2bcf11258bd2b3d88bbf709c03cfc81c43a214c113dfb83203d567b213c9c9210bda50424bdf38ef13c4c09eb3bbde10e3b6a5cfebc57f9f33c067429bd6b5d0f3c88b022bbc4d61a3d61e97c3bcdc7eebac75058bcb8e7dcbc8731bfbadab4d0ba949de1b9c456493cd3bed6bb577ab3bc3a26323d9a1509bcdd2f7cbdbde0203d7848953a65644bbcff7cc13b21ca633d912324bd3d20413cd4bcfabad73a933c175b1a3cd2c1a0b967e0e43c8a2e983c4d884e3b19d7333d9e1086bc27c2b9bc90a4403c0772cd3c4c8a2abcbae5233dad79813c29bf123c421874ba8f255d3c3ca15dbcf3101fbd55fdab3c922148bcc05b4cbdee97ac39c05c3abda08c1fbc6addbdbb9a15893ab270463cdb33b43c54ff873c01fc24b9bc62ab3c764b3c3d2eb6d7bca288e739bf5e963ccbcb263c214c91b9754e8639dab3e2bb567ba13ca683873ce12a9c3b2fb47b3c7ec019bd4a0e113ddcb385bb28bfef3c8a2e183c55fdab3bacfb0bbc39a572bb31b242bc1c5171b9b36fd8bcf985d9bb8a2e183c5cf3a5bcc6d2e2bbfb848e3cb66bc3bbc653a23cba64e43c1a5617bd79c6e7bbf093f4bb6e580cbc734ee3bafa052bbc096f263cfa05abbad7b953bc528200bdcbcb26bd3f1df73cf2110d3dd3be56bce4a5473d4e87e0bb1a57853b3c230b3c9d1063bca87f4fbddfad943cf48f02bd5cf3a5bc4e87603c3132f1bc2fb47bbbf709403ce524ab3cb36e6abcdfad94bc480fdcbb8d29953c5085273ced18493c81bc84bcc159f0bc4f8615bb29bf12bdf58d263d73d0903cd340843c0ee72abae42687bcd1423dbcc159f03bb271b43cb6ec823a352d11bdee96bebb16dbc8bb8d2a83bcad7901bdaf745bbced9908bd5d7209bd73d090bd676212bc2a3cf7bcb56d1fbc7052f8bc979a3abc1e5026bd4d884e3c8d2915bc81bc04bc16dc363cee963ebc88b022bc1a57053dac7870bd9e8ed83cb8e66e3cbe5f04bb5af4f0b88436c23b33b089bc3f9e363c3927a0ba2c3a3e3ce3a723bc8d2a833d0b6b6ebd81bb16bc479166bb5d72893c498fadba24470ebd27420bbd902580bcc9cddfbbc653223be820963cf3101fbd5d719b3c4c0ad9bcdeae5fbbf28f5f3d0d6aa3370f65fd3bc2d9c1bccdc85c3d175b9abb7054d43c214c113d1d5114bd6860363b9c92103cde2eb13b7cc1643dc357b73c03f88fbb0aee093cfd0116bc3aa683bd754e86bda3888abb83b6f03cd2c032bd02792c3de0ac26bd17d8febbe12b0a3c86339bbcef16103d4c8b183d8ea78abc58767bbdf2913bbdeb9bc13b21cb51bd5bf4933cc358a5bc13e04b3cb3f017bc48909bbb3132f1bc6cdb043d715389bc7ec019bb61eaea3c5af55e3c4e07323b1162563ba3888abcf093f43cfd8056bd9b141b3d5181efbc9915e63c4790f8bc2eb7453ca506003c91a2e43a1fce9bbcacfa1dbd8caa313b4693c23b883051bd45968cbc11e315bc6a5cfe3b2f35bb3c38a83cbce89fd6bc20cc3f3cbc61bd3b784815bdf291bbbc5cf3a5bc469430ba78c8433db6ec023da306ddbc8cab9fbbdd2f7cbded18c9bcea1b703c72512d3db8681cbc87322dbc83b75e3c3a24563c4b0cb5bcadf92fb9f98647bc0d6a233c17d96c3da8000fbdfffcef3b9d10633c007b65bca9fe32bc1cd31ebd2db9213de7a2a03c67e1d2bb7fbd4f3bf709403c21cbd1bbabfafa3b5200d33ada3590ba951c453d0bed1b3d28411dbd9e8e58bcd83749bd409b6cbb35acd13c8f25dd3b2f37973cdd2ffc3be82096bc23c8aabcda3590bc06f2fbbc1ad4e9ba1ad557bc489109bde2292ebdd141cf3bf28fdf3ca505123cbe5e73bc8b2ae03bc25a813b8ea78abbae7725bb5083cbbaea1bf03ce7a1b23cb965d2bcd63a70bc77c9b1bcf80676bcae77253c27428b3cef1522bc6268603b4f8783bc13618bbc3231a6bb0ee986b903f80fbc489109bc8436c23bcdc76e3b087182bce7a2a0bc087182bc9024923a6ed6debb0aee89bcd33f963cb8689cbc8337b03c0fe54e3c4a8dd13cec98f73a6b5ca1bc5a769ebce6a38ebc2741faba27c239bb28402f3d45145f39912236bc07f38c3bf014b43c08f130bc616abc3b3a25c43b64e6553c61e9fc3b08f1303cf01434bd62694eba51825d3c5fed343b6ed7cc3cfb03cf3ce89ee83a007b653c0a6dcabc175aacbcbe5ef3bbe425f6bc9997133c65644bbc4b8c863ca60336bc13609d3bdeae5fbc51048b3a8b2cbc3b7d3fda3c3f1e88bbec19b73c88b1903c73d0103c9a94c9ba48901bbd74cfa2bcf789113d27420b3d5d7289bb6d5869bc26c503bc04f7a1bcc05b4c3dddb1a9bc4a0e113cc8d117bb7d40c83c1b54bbbc007b653c646627bb48901bbcd142bdbb59f73abbbce16b3c92a287bcfa05abbc6ed65e3cd9b62c3c7cc252bacfc623bc81bb96bccbcc14bb2f3717bc9c92903caa7d16bd892f863be02b673d3ca1dd3c27c1cb3aa10b83bc5779c53b45960cbd92a1193de3a7a33c78489538b56d9f3c2bbc483cc653223cd6bac13ccfc6a33b61ea6a3b72d0edbba683873cfd7e7abcef95d03baef665bcf40d55bcd2c032bb4417293b38a74e3a754caa3c2d37f43ab4ed4d3cae7725bd498e3fbb7848953c3132713b60ec463c0ce963bc6fd570bd429a21bd07f38cbc9d90b43b969ba8b9441729bca7006c3c00fe003ce6233d3ba50600bc538024bdbae6913cae7725bb4514dfbc764bbcbb35adbf3c6fd6013db96552bb12e227bd8cab9f3c9a15093d676380bc0ee7aabb803cb3bbed18c9bb4a0da33c75cb6abca08d8d3c3e9fa43b7054543cc25993bbaa7d163b92a287bbbe5ef33c74cfa2b9959d043d0becadbc5200d33cb965523ce228403b362b35bc62e99f3b9a1589bbda3590ba8ea70a3ded989abc78c8c33c67e1d23c11e4833ced18c93c6e579ebcdab5bebc480fdcbb4a8dd1bbe622cf3cfe7f0bbde4a5473cabfafabc63e831bd498ebf3b1163c43bd2c20e3dbfdd56bbddafcd3b0fe5ceba3c238bbcc8d197bba68387bc5e6fbf3a4f05d6bc0a6dca3c7b44003d88b0a23c2db9a13cf192a93be4a6b53be3a7233c2d38e2bc71d3373d2bbb5abcadf7533cb0f33e3b764a4e3b86331b3c7ac68abcdcb2973b0e66eb3ced9988b8d936dbbb72d0ed3c13610bbd72d0ed3cf093f4bbde300dbd803d21bb81bc04bc28411d3de5a27dbca880bdbc2eb7453cfc8144bbdbb2f4bc489109bc392720bb1ad557bc5fef90bcd33f96bb86b3c9bcc454ed3c2f353b3d4f87833c74cf223d0ee8183d54fe993c62694ebc2741fabc39a5723cd2c28e3a3ba595bb4b8c86bc067429bc51048b3bc8d117bc25c4f2bb2fb4fb3bcb4ae7ba421874bcb0f42cbc24c64ebceb9a53bcf38ef13c4318973cc455db3c5a761e3c106520ba89ae46bddfae02bcf094053d1ecef8bb7053663c13601d3c59770c3d813a57bcdfad94bc79c5f93cb5ec5f3d9f8d6a3ceb1c013dbd5fe1bb0a6c5c3c15de12bdbb6407bd9d91a2bcd340043d0ee9063d3ba4a7bc7fbdcf3cb0f51a3d409c5abcb966c0bae621e1bce91dccbc5fed343c54ff873bf3109f3c82b84c3d4f8695bb332f4a3cbfddd63c85b5a53cb66b43bc5381923c8fa52e3d6860b6bc18d8a13ceb9ad33b7b4480bc0ee898bd2e38053dddaf4d3cde2f1f3d567c8f3c4d884ebdaef665bcdcb297bcc7d2053c51816f3dc159f03cd33f16bc1d5194bcabfafabb784815bdcec791bc74cfa2bcb1f162bce52599bc17d8fe3bc9cecdbc2dba8f3bbae4353d6adc4f3c155bf7bc007be5bc2fb569bdc8d0a93c89af34bb48901bbb6268e0bc4c8b183c78c7d5ba419b8f3bf50bf9bc4694303c8d28a7bc11e4033de52519bcd24061bc656539bcd73a13bce2292e3c979a3abbeb1b133cacfa9d3b4397d7bb6e579ebce2a8eebcb3f017bc6addbd3c16dbc83ca4879cba55fdab3c7fbebdbc646627bc4b0bc7bb2abe243db270c6bbfffc6fbc214c11bdee157f3c9e0f183a9122363ca307cb3c577ab33b3f1e883b72529bbc352d11bc77ca9fba01fc24bb2544c4bc89af34bbf60c8a3bd8b8083c72529b3cd045073c882fe33c9221483aa10b03bceb1c01bc13618b3cce45e4bcbf5e963b1ad5d7ba4f05d63b0ee72a3d547ec8bbbc63193d0d6847bdcbcba6bc12e2273d59770cbc06733b3b38a92abd73d0903b67e1d2bc1ecef83c08f1303d3aa603bd74cfa23cc456493b8fa69cbcf50bf93c', 'Array(Float32)')), +('<dbpedia:Saab_Automobile>', reinterpret(x'69662f3d630f5ebce74fc4bcfde5a33c9d30883c04b88bbc59519cbc3a7891bc4d5cb1bc45604e3c935bc5bae74fc43c7dc565bc9ec5053da84c13bd8d6c903c3e7119bddb84d43ce1db253ddd5db4bc8d6c10bcbdd5c4bcc0f56c3cded8cabcf3954a3b68eb183bc91bcbbbcae7fcbacef785bd683c34bc151181bcbb64813c5b13fbbba81845bcf984ffbb8873883c68b74abd5decda3b89bad03ceffa8b3cb7e68f3c72583f3d7ffc0ebdbb30333b47e892bc322b933a29ce00bd468ac93cb2b939bc52041e3dd757fe3bab1ba03c7258bfb8ae6f963c9a7e48bd7004493c10cac3bb5524c63d36a904bc8077a5bc26e58cbc6b0b41bd56828f3cc5808cbca5f89c3bb761a63c13eaeb3c1448b5bbd5951fbd4ed7473b861f92bc532e99bc7482ba3c1c44183dbf5d09ba215aedbc6fbd803c391ac83d0adb0ebcd5e6ba3cb7e68f3b7dc5653bdcfc043bf24e823d691514bd1448353c34550e3d0daa9bbdc422c33c5a00013d88c4233b7a5422bd78af103df1bcea3ba9768e3dbf5d893b22b8b6bb92314abcbab51cbce9d7083cdbb8223baabdd63c7307a43d2bfb613c208e3bbc01d2fd3c678d4fbc9fef803dc64cbebc99b2963c035ac23cfae248bc31cdc9bc097dc5bc75fd50bd0533a2bcd864ac3bb18fbebc4863293d537f343d9adc91bc7751c73b02dfabbc55d32abdfde5a33b36fa1f3c409b943df713bcbb02abddbbf984ffbc8143d73c5ae9ffbcec14fe3b5dcf8d3c1817423d263628bd636079bc3af3a7bc2429fabc208e3bbaaaa0093db20a553cb20ad5bcdaab74bcb7b2c1bc23e2b1bc940aaa3cf1b9843d8d895dbd0dfb36bbe1dba5399107cfbccec3b73c98a5683ccf8fe93b79f6583d5a0081bcda8ea7bbb1c30c3da73f65bb3ccc073b4191c1bc2f28b8bc6f3897babe505b3c409b94bc3f3d4b3a4d0b963cd5e6ba3cda3d0c3c8f2e6f3c7324713cfe603a3ce4fbcdbc20df563dffbe83bc8463ffbc17b9f83cf7c2a0bcef4b273aecf730bc72dda83cc2ce4c3c2d4fd8bc0daa9b3bbe505b3de9f4553d73b6083d7e407cbcace751bcb93a86bb85702d3cbfae243df3442f3d23e2b13c2587433c72a95abd858d7a3c5dec5abd98a5e8bca4ebeebb20dfd63c3f09fd3c3152333d97d936bc4b83513da79daebc060c023dd9dfc2ba98a568bceed0103c324860bd2636a8bbbd0993bc7162123d5d3df6bb3e8ee6bc8b9330bcdc33393d58f3523c179cab3c2ead213a8077a53c035ac2bb75fdd0bb51daa2bc8f623dbdf53adc3c2d8326bd8a32813c9eab9ebadce29d3c04d5d8bbfe603a3db334d0bcd8642c3dd8e9953c7da898bc0309a73cbcabc93b85702d3c10fe91bc33f7c4bc700449bd62c815bdd30ddb3c0006cc3c75e083bc3ccc87bd0fd4163d34d0243d9d4dd53cd30ddbbc8463ff3c2281023bc73f05bd7162923cd1e3dfbba2f5c13c19750b3d72a95abca6a7813c36fa9fbc5524463d07a465bdfbae7abbb065433b712ec4bc4bb71f3d9af9de3c06ae383c75fd50bda3c1733cf9ec9b3c91074f392491163d5e4aa43c2f79533d9eab1ebc2905b53b373e02bd78002cbd88c4233d0cd1bb3b1179a8bcf88e52bd935b45bd3ccc07bd3421c0bcc32c163d6b3f0fbd30a3cebb87498dbc9d81a33c9551f2b9e380b73c5e9b3f3dbb3033bcca79943c3472dbbce256bcbc57fda53cfb409239f58b77bc19758bbb3ccc073d6b3f0f3c9d3088ba092caa3c6c86573b310198bc8d3842bd7c4a4fbd67de6a3c9b57a83bf9ec1bbc09b193bc315233bdf1b984bca869603c6966af3a5189873ce12c41bd96afbbbb0e25323d935bc5bb038e90bc6c35bcbc9ff2663c5e9bbf3cd8e9953cb6372bbd928265bc131eba3c1f64403dba814ebcbda1f63cef4ba73cbab59cbc683c34bd4d0b163d44e5373b646da7bc869a283c36fa1f3dfb5d5f3b775147bd1be64ebca3530bbb377536bd04843d3a4cfe67bc4419063d36fa9f3c28dbb93caf07fabb33f744b94cad4cbdbd0913bd5827a13b8c5f623d34d0243ca050303b25d8debad24129bc887308bb2ead213c081f7c3d903b1d3c94b90e3da0a1cbbc1d10ca3cb385ebbb0484bdbcb3689e3c765b1abc4b6604bccec337bde205a13b4bb71f3abdd5c4bc7c9beabc208e3bbd3bbf593d36176dbc85c148bd7627ccbcc4a72c3def17d9bcbbdf17bcc27d31bc9eab9e3cb0e0593c9a7e48bd34d0a4bca3c173bd797bc23c2d83a63b06aeb8bb249196392a2fb03c3bbfd93c54a92f3df361fc3c21ec84bda84c933cbad269bdf395ca3c0c80203c9265183de205a1bb357f09bd7a54a23b2956503cd6d9013def6874bc64bec2bca4ebeebc1196f53c7eef60bdc94f99bcb45ecbbb2ff103bd6e2b693ca01c62bce71bf6bbeffa0bbd3f09fd3cd241a9bc5bf6adbc1d8be03c83974dbde828a4bcbcabc93c87498db9327c2e3cc776b9bc18c626bd1a9f86bb9f26353bebcd35bdd241a93b0dc768bcfbae7a3c2baa46bd88c4a33b38bcfe3cafeaacbcb13e233dcae7fcbc9a2dadbc1768ddbbd1e3dfbce683923c2636a8bb468ac9bc3152b3bb0181623c6a902abd4e52de3c965e203c003a1a3dc1029b3cf96732bce5aa323ca5156a3d81260abd4d5cb1bc4d0b96bc0787183cdcfc84bd7c7e9d3ccdcd0a3cc1a4d1bb0b058abcc73f053d8a69353c64be42bba2f5c13b8e04743c4863293d9ec86b3c863c5f3d893fba3c8cbd2bbc4739aebc23ff7e3b4bb79f3ce0ce77bd6915143dd0b9e4bc2f79533cdf5361bb0e42ffbc065d9d3ce71b763c48120ebde28a0a3d17b9783b9055843ddb093ebcbacf033d7c7e9d392efebcbc8d6c103d9534253dd5e6babc9282653b83e5823dc0d81f3ddb67073d0787183d2f79d3bc965e203d10fe113cc2ce4cbdb8dc3cbce42f1cbdcf21013bcba38f3bc0d89f3b809472bc7cf933bd7578673c3248603da370d8bc7785153d5dec5a3c29ce803dd0ed323c693261bce6a0df3b7c4acf3b179c2b3b9a2dad3c4a3c893b2109523bcef785baa1cbc6bc86ebc3bb42ef8a3b15c3cbbc831cb73cf24e02bc534be6bb4ed7473ccb11783c3af3a7ba76f3fdbbb7e60fbb5e4aa43c6343ac3c4bb71f3d7dc5e53c51f76fbc0f6cfa3c0629cfbce8ad0d3df24e023b67db04bd8cbdab3b9ff266bce8ad8db9ee9c42bbc4a72cbd31cdc9bb40b8e1bb6966af3c5575e1bcb77ef33c9903b23bcdead73c765b1abcc8d482bd3f09fdbc0e764dbd30f469ba6bd7f2bc6c698a3caddd7ebc441986bce222ee3c49debfbc68b7ca3ccacaafbce559973d3fecafbc1b1a1dbc87498d3c1b1a9dbbce656e3c065d9dbbf83db73c1f64c0bc4240a63a55d32abd532e193b245d48bc168ffd3b0dfbb6bcb52afd3c619e1a3b4436533cc96ce63cda5a593c8f2eefbbd864ac39ae3b48bd6cbaa5bba7ee493de0608fbc5d3d76bc6539d93cc4c479bcaaf1a4bcfa1617bcaf99113cf58b77bc82a1203c0006ccbb2491963c5d20a93b3d6105bcbf5d09bd386be33b18c626bd831c37bcfcbba83c3d471e3c98881b3a1ee9a9bbb7e60f3de55917bb863c5fbccba30f3c208ebb3c8715bf3c06ae38bd79f658bc84f5963cb5d9e1ba47392ebcbe84a9bc5b7b973c8f11a2bc1b1a9dbd5155393b619e1abd7da898bb0cd13bbcc7c7d4bc30a3ce3ad9305eb9272c55bb64bec23a534800bccdead73ce0b12a3c33a629bc7aa5bdbb34ed71bd135288bc30d71c3cb334d0bb5acc323b87498dba2ead21bd75ac35bcc18704bdfa1617bd6db0d2bcbbdf973be576e43cf69825bd0dc482bbf87105bc9dfcb9bcfcbb28ba05e2063cb49299bb5b47493d93e0aebbd561d1bcdb67873d15f799bcea521fbc34d024bd693261bb425df3bc7ffc0ebdd41a89bbe12c41bc7da8983a87153fbb290535ba6ae145bd7da898bc038e10bc8a69b5bcb15bf0bba37058bcdc3339bcf9b84dbc199258bd9e5a033cd43756bd327c2ebb3ba20cbd63f2103d58d605bcbe84a93cdce29dbc2ead21bd04b88b3bd134fb3ca0a1cb3c245d483ce525c93bcc3b73bd1f980e3db703dd3b537fb4baca7914bdf024873cb031f53a40b861bb58f352bd9ba843bd82a1a03c7f195c3c3086013b42ef8a39038e103dfde5233c52216b3d2905353c983700bc9cd23ebccb1178bd51f7ef3a5bf62d3d22671bbb5524c6bb30f4e9bbaa42403c245dc8bc1abcd3bc7b20d4bb05ffd3bbb0e0d93b2602da3c93fd7bbc3a78113d9ba843bc9a2d2d3c7ffc8ebc75e003bd9427f7bc59a2b7bc2c255dbc3bbf593c648a743d65e83dbd09cee03c1af0213d5f91ecbbdb6787bb5e4a24bc482fdbbc869aa83b40b8e13c64bec23cae6f163c47392ebcf19f1d3ce8ad0dbdcdcd8a3ccd99bc3bb15b70bc557561bb6219313d2baac63c163e62bbb2b9b9bcde29e6bacdea57bc3f3dcbbd030927bdab1b203da0ff94bc4e35113cb3856bbc8f2e6f3cab6c3b3c22b8b6bc4880f6bc82a120bdf041d4bc17b9f83c4b83d1bc41c58fbb9a7ec83bbad2e93b96af3bbde205213a18e373bc09b113bcf69825bd8a6935bdc7aa87bc57fd25bca84c93bc7307a43cadc0b13cc59d59bdc64cbe3bc18704bb2c255dbd9cd2bebb0fa0c83c270f88bcdbd56f3c322b133d4863a9bcc4a72cbb0230c73ca31f3dbaa9c7a93b46be17bc13cd9e3ce42f1cbd2760a33b36a904bc450f333d2c592bbdbab51cbc2a2f303d90c006b9ca79143d90c0063d3ccc073cd5e6ba3b792aa73c666354bdb8108bbcdc33b9bcd6d901bdce48a1bc58d685bc9fd599bd4436d33c162115bdbc77fbbc28a76b3c712e443b9a4afa3cad451bbcd134fb3b57c9d73c245dc8bc70d07a3c1511013d0f6cfa3c3c3af03bbb64013ddb09be3ccba38fbc4bb71fbbc5d127bcf58bf7bc9ff266bbe74f443d30a34e3b6983fcba092caa3cda5a593cb98b213aeca6953c6d01eebba17a2b3d23e231bcb45e4bbc81268abc97d9363c3a44c33bd610b6bccbc0dcba49aaf1bc74319f3da9768e3cd488f13ceed010bcffdbd03c712ec4bc893fba3cb50db03c0aa7c0bb85702d3ab95753bd9f26b5bb436a213b9920ff3c97d9b6bc60efb53c67c19dbc276023bd81f2bb3cecf7b0bbbf7a56bddcffea3bda8ea7bc9e77503da229903ca73f653c5c71c4bce9a33a3c79d90b3c16edc6bc1514e73ab4afe6bcfd02f13b179cab3ce71b763add9102bde71b76bcf21ab43ad25ef63b02dfabbc4a3c89bce9f455bb79f6d8bbce1453baa370583c0e5980bca47d863c0adb0e3b4ea379bc49aaf1bbcf721c3deb7c9abbe2226ebcb065c3bc844632bc322b133d636079bd0e764dbc77a2e2bc967b6d3b6932e1bcf806033c16ed463b47e812bda49ad3bc5221eb3b2d83a63c25d8de3bfb5ddfbaf51d0fbd25d85ebda9935b3cdf872fbd060c823c7bcfb83c7c7e1d3c55f0f7bb0e25323b7a54a23b097dc53c3741e8bb30d79cbb68b7cabc373e82bbdf87af3ca370d83b8ee7263d72dda8bce0b12abc965ea0bc384e96bc0326f43b6db052bcb01428bd5e4aa43b3a95debcb3681ebd94b90e3d065d1dbd596e69ba05ffd3bc9e77d03c49de3fbc728c0dba36c6d13c8eb3583b863cdfbb2602da3bda5ad93c45604e3db492193d34558ebc0c9ded3b9af95ebd84f516bdc91b4bbd716212bdbe505bbbc422433c420cd83ccb1178bdb8100bbd4cfb013b02fcf8bc908c38bbb2ed07bba869603ca869e0bcfcbba8bcca79143bb5bc143b3775b63cc456913b0e25b2bb36c6d1bb311e653c9903b2bcda8e273de71bf63cb18fbe3b908c383c02df2b3d44b1e93bc73f85bbde0c19bde1f8f2bcfc0cc43bc7c7d43ca0ff14bc88e1703c4f01c33c4b83513cbe505bbb179c2b3a837a003c94d6dbbb59511c3cbad2e93a1a9f86bb131eba3c651c8cbaab1ba03c7307243dc32c963c8077253cb13e233be38037bcaf9911bda197f8bc11455a3c2efebc3a46dbe43b39e6f93c0e427fbc629447bbf26b4f3cc29a7ebca512043cd29244bc844632bddf87afbcf7c2a0bc2d328b3bb014283dd3bcbf3c7c7e9dbc4ce19a3c0fd4163d6b0b41bd3a95de3c15c34bbb51daa2bcc8f1cfbca0ff94bceed0903ca4eb6ebdcba30fbc648af43bce48213df5e9c0bc4f01c33c4116abbc6c8657bc2c59ab3c6b0b413c99cf63bd272c55bc8766dabbe525c9bc67c11d3d6360f93bf4f3133ccf8c03bd162115bda47d063cd0edb2bb4067c63cdb6787bc2d4fd8bc683c34bd245d483c63f210bd8766da3bebcd353d678d4fbcded8cabb0309a73ca9c7a9bc2a2fb03c9d3088bb0c80a03c0949f73ceb7c9a3c3bbf59bdab386d3d569fdcbc2316003daa42403a502b3e3acdcd8abb436aa13ccbc0dc3c42ef0abcf664573c4b8351bdde0c19bc7e23af3c3152b3bc277d70bc04b88bbc0d2f053db2ed07bb23e2b13b74319fbb98881bbca31f3d3d09b1933cdb67873cc3f8c73ca7ee493c7b0307bc4116abbc293903bdd168c9bb6343ac3c3e71193de6f1fa3cd8b5c73c20dfd63c0c4c523d8463ffba7b2054baecc3e2bc8d6c103c89ee1ebc0230473d93fd7bbb208ebb3ba01ce23c72a9dabc41c50f3c482fdb3c215aed3cbb30b3bc51a654bb57c9573cb637ab3c83e8e83a1196f53b003a9a3c3e8e66bc1352883c2ab4193d9854cd3cb7e60fbbacca043b6e0e9c3a5fc53abc7800ac3c8d89ddbc651c0c3b135288bcd5b2ecbbb5d9e1bce1f8f2bbdce29d3b4e862c3cadddfebc003a9abc1c95b3bb1ee9a93cc7c754bb58783c3c41c50f3d6597a23c732471bc37249bbbd6dce7bbd5959fba76d6b0bc67db04bdc0f56c3c9b74f53c7578e7bca4ce213b8f2eef3c25bb91baeca6953b12a3a3bc27b1bebcdb6787ba651c0cbb46dbe4bc7aa53d3c792a273cea6fecbca5156a3b9e77d0bb1d6e13bc6646873c20df56bc01d27d3c7da818bd48128e3ca4ce21bcd09c973b9d30883bf075a23cc2ceccbc51da22bccc1e263c5ca512bd3f09fdb9d3bc3fbd1c95b33b443653bc3741683a564ec13c9551f2bc619e1abc92314a3beeedddbc5558143d3bbfd9bb9d4d55bc6b0bc1bc778595b9a81845bddf02c6bceffa8bbb208e3b3cdb09be3b67c11dbce525c93b12f43ebc8b93303cef68f4bcad451bbd9920ff3c93ace0bcb8108bbcc5d1a73b0629cf3af4bf453b9282e53bd5e6babaa370583b7162123c9b74f5bc20c209bd4cfb013b18e3f3ba96e3893c44b1e9bc02dfabbcf1bcea3cbd26603b4436d33c84f5163bfbaefabae68312bd52049e3bb1c30c3dc1029bbc8eb3d83ce07d5c3bfb5d5fbd691594ba7ed2133c15f719bdf21a343b04d5583db1c30c3b0e427f3cc1a4513d1d8b60bbed21ac3cd292443c15c3cbbca6c44e3c5b47c9ba10fe113d144835bb57fda5bcc3f847bc5189073c44e537bca17a2bbc8d38c23bb2b939bc505f0cbdc1021bba26e50cbd3e13d0bc3ba20c3c8eb3d83c5b9864bce5c7ff3bd6d9813cd29244bcc73f853c311ee5bc2429fab9b2b939bbf909e9bbd5e63a3dbe50dbbc9adc113cef4ba73b8a69b53c99cf63bc327caebccf21813c0e427f3c6b5c5cb930a34e3c8412e43cb4ac80bc2dd4c13ce256bcbc3421c0bc4bb71f3d983700bd1d8b603d08022fbd700449bcc4c4f9bbecc3e2bba49a533c7e232f3b5827213be0ce773a20df563cab1ba03c12f43ebdcdcd8abb6343ac3b2f5c063d13eaeb3c7f19dcbc661239bcef4ba7bb56828f3b8412e43c3421c0bc9903323aec484cbd903b1dbd908cb8bb13cd9e3c6a90aa3c2ad1e63b374168bc728c0d39174b90bc502b3ebc2f5c863c858dfa3b22679bbcecf730bc4b83d1bbf698a53cc34963bb571af33cfa33e43cf87105bb58446e3bac96b63af83db73c596e693bf909e9bc6040513c498d243d648a743b1e3a453b3d471ebb5ca512bc0e427f3c935b45bd73b6883c28db393ccc3bf33c55d3aabc3741e83c967b6d3ca7eec9ba28a4853bfc6a8dbb54facaba70b32d3d8e968bbc555814bb2939033d3bbfd9bc33a6a9bc73d3553cebcd35bcc1029b3a83cb9b3cd56151bc23aee3bc213da03c05ffd3bcd3f00dbdf26bcfbcdbb8a2bc19413dbc5558943c7dc565bce55917bde205a13b5ca512bc9af95e3ce5aab23cfe94083b9fd519bdd46ba43c6d93853a3c1da33b1ee9a93cc5d1a73c58f3d23c151181bc792aa73c6d01eebc3a10f5ba8715bf3b8be44bbbe3b405bce222eebc1ddc7bbd90c0063c58446eb93d98b93b6c353cbd9f26b5bc9d8123bc4cfe673c76f37dbd3f097d3c89bad0bca9935b3d9adc91bc890b6cbc63f2903bc29a7ebc98881bbb0c9ded3a62c895bb63f2903cd117aebb11288dbc2636283c73d3d5bcded84a3c23334d3c163ee23cea529fbbc42243bd1817c2b9cbc0dc3b6e2b693d7f19dcbc40b861bdd5951fbd41c58f3cff0f1f3c5e67713c104f2d3c1f980ebd93fdfbbae57664bbea0184bbf24e82bc04b88b3c0fd4963b2bde943c97d936bbf6478abcf9ec1b3c1fb55b3c9cd23e3c9d9e703c4880f6bc33f7443cfe603a3c003a9a3b678d4f3d1bc9813cc8251e3d7785953cac96363b62c8153d826dd2bc24297abcf21ab43c0dfbb6bb2a2fb03ccc6fc13b06aeb8bc99b296bc0b56a53ceed0903c7aa53dbd389fb13b555814bcad459bbb5fc5ba3c', 'Array(Float32)')), +('<dbpedia:Tata_Motors>', reinterpret(x'55e91bbd54bea73ca9a1afbc0d4db5bc2673e8bb57cbe13c5b7732bd45219ebcc3a394bb876c89bce4277d3ca1ab09bc31eaeabc4fd53b3de7a8d9bcca993abc631e16bcf7f6203d9e79ef3ca47ba83b1c70883c842848bd55ac003da3ca6abbc96e463c67449dbb6c2d893c8ab65ebb1236a1bc355920bd6ffda7bcbbfc30bcf85e303a557579bbab714ebb7886513d960913bdd42264bc5921ca3bd4d3213bbb4b733c3a6d00bdf61aefbb1418673cc6618cbc5b77323c000694bcbf71fa3b5d0ab6bb110bad3de33390bcae416dbceb6651bd7e888a3d2cc4e33a3d7aba3c6b02953c3af9dd3d763069bce4ea613b662bd0bc223b3abdd027d13c5b77b23bce450b3da4b8c33babf7973dbff743bdff667dbcd6290a3c9c8582bbf5639dba0c22413bdb8cacbb0782033c2fcb09bd3e6813bd76b6323ddb4f913c005556bae0b2b3bcbc76673d737271badf4a243df1680a3ce5d8ba3b3ea5aebb7e9ab1bd2c87c8bc5dcd9a3c737271bbdf0d093cd6298a3bd3f7ef3c0e78293daa83753c6b8ef23c779878bb9dff38bcabf7173d02ab3e3dfbf1333c8db1713dfb6beabb946afc3cafe083bb1fba5dbc3f93073de640cabc876c893c50c314bdc79e27bd7c44c93b94a103bdc3a394bcd49606bc0ce525bcf4b25fbd44adfbbb1dd897bc4cda28bdbb4bf3ba73232fbcca993abdae41edba8a671c3d101d543c482ed8bcbd8f343c3f9387bc8d373bbd16abeab88dfa9f3c1cad233c3c00043dd115aabbba94213dfa03dbbcca99ba3c404a593db048133c72bb1fbd3f0d3ebda0bdb0bc3af9dd3c63aaf3bc9f1886bcc1c7623d1e52ce3ce345b73c1a57bbbce1a00cbd4c1744bb7153903d9a2f9abce12c6a3d078203bdc5855a3d58a713bc047bdd3a42dddc3c0e3b0ebc4bafb4bc5bb44d3cb82c123ddee294bc38914e3ab2ed3d3cb2b022bcf7b9853cc9312b3da569813c10e0b83be3bf6d3b47b4a13c69e9c7bc876c89bd6b7c4b3d0c22c1bc820ffb3b8cbd84bcb750e0bc6b02953b94a103bca1fa4b3c1ff7f83c1a94d63b3289813c10ce11bd9a2f9a3ba47ba8bc091587bad58af33c4163a63ce859173d9fe17ebc9a2f1a3ca1e8a43c0ca88a3a97ae3dbded7f9e3df475443ca4b8433cb7011e3dda735f3c9be66bbae1dda7bdb750e03cb5ab35bc94a1033d7b19553a03d632bd091587bcd0646c3dcd1a973c397fa73cf25c773c737271bd01bd653de515d63c5b7732bd46c6483d50c3943cc23b853a815829bdcf8226bca43e0d3a72bb9f3cf60848bd3203b83cc45a66bbf7b9853dc6610c3c33f110bd7c3222bd08ea92bbc45a663c886076bc98ebd8bceafec1bc46892dbb5d0ab6bcbb39cc3cca99babc715390bb4f5b85bc0401a73c06e36cbcd4e548bd5e84ec3b72f8ba3ceeaa92bd52a55abceb178fbca98f883c3e68133de6032f3ddb4f11bc4996673dc5c2753bd36b923b8e9f4a3caeb58fbc6feb803c561410bddb9ed33c9b5a8e3c078203b8d152c53cba207fbc5d0ab63baa8375bdc8c91b3d9e2aadbc0f6682bc37af083d18871cbdb2b0223d727e043d8a2a01bc10e0383dd76625bd08ea123d30ad4fbcf47544bc3215dfbcf5dd533cccef223d42a0c13cd49686bb0b7d16bc6feb803ddf87bf3cd15245bd744e233d10ce11bd013108bb6de4dabc29f4443ced7f9ebc99c78abc727e04bccd6959bd8fca3ebcca993a3dbcea89bcda249d3b73e6133c1406c03c0018bbbc4bafb4bca78862bdbe09ebbcc27820bdd5fe95bcd766253d59d207bcb83e39bd86f8e6bcb82c12bdf7f6203d5651abbcb1ffe4bcb04813bd1580f6bba86494bcde5ccb3c72f8babccd1a17bcef4fbdbc723556bdca99ba3c07d1453c4c9d8dbc22fe1e3dfd471c3c9fa4e33c90f5b2bbb5bd5cbd58a7133cda735f3b31ea6abcfac63fbcd0ad9abb99c78a3c58b9babcc78c80bc12733c3de0efce3cbbbf15bd0ce5253ab9e363bd464c123d2f08a53c139eb03c6de45a3c0a52a23b10ce11bd4e30113b68bed33ce345373cdf873fbd1273bc3b074b7cbc1531b43cb097d5b987e63f3cae41ed3b6a9a05bd1799433c49594cbc8416213d26364dbcbfa8813c83ae113d37af88bcc23b05bc37eca3bccbc4ae3c7505753be294f9bc01bd65bd8f8da3bcea3b5d3de9c1a6bccbc4aebcc14dacbc0d109abdbbfcb0bcfd0a813c6dd2333dedbcb93c95de9e3d3a3679bc57cbe1bcff29623b635bb13ce6c613bcad4d003abeba28bdbeba28bd58f6d5bc0d4d35bc8f075a3ce345b7bdd3bad4bc4e3091bc6b02153d504f72bbd63b31bce0b233bd0a52a2bc455eb9bb4fe762bd315e0d3d6ffd27bd9dc21d3df7b985b9504ff2bba6d110bb84a2fe3c40fb963c4fe7e2bc54818c3c7849363d0e8a503d973407bb67d0fa3bfc1c28bd5438debc7e880a3bd9f9a83c5c650bbd530d6a3dc45ae63c32527abc250b593bdbdb6e3c7a2b7cbbb8b86f3c90b817bd49594c3bbc64c0bccae8fcbab95786bdbb4bf3bc397f273d53d04ebd04c40b3c66dc8d3b55e91bbc946a7cbc070ee1bcafe003bd30adcfbc165c28bc486bf3bce38252bb3c4fc63b7aee603c3a6d003d12733c3d59214a3c70dfedbbb2edbd3c2b4a2dbcc806373d5e84ecbc477706bd82c0383dcda674bc06578fbc24e0e43c13dbcb3d139eb0ba71cdc6bbe0b2b33c17d6de3a6dd233bd9771a23c64c3403d517a663cc32ff23ce12c6a3c4d42383c26364d3be333103d8be152baa47b28bc31ea6a3dd5fe153da0bd303b54be27bd9e2aad3b9904a63ce76b3ebd28c9d03ac9f40f3bfbb498bce9d34d3deafe41bd3340533d3c0004bba262db3cefd5063d22c1033dc3e0afbcb4068b3c9f5521bddc7a053c9828f43c50c3943badc7b6ba577c1f3d3db755b9d0eab5bbf3876b3c421a78bb2b0d92bc02ab3ebcec54aaba5cdf413c0fa39d3cfc1c283dbeba283d2aa582bd27dbf73b0e8ad03c43b98e3d535698bc7c8164bb72bb9f3cda735f3bb4060b3d4175cd3c9e79efba223bba3c0694aabc32527abcc673b3bc105aefbb86f866bddbdbee3c84d9053c53d0cebc319b28bd096449bc139e303b3c12ab3ccf82a6bccfbf413c9120a73b98eb583dc9abe13c158076bb91e38b3db8b86fbc03134ebcfd0a813792d7f8bba520d33c0a52a2bca43e8d3cdb4f113dcc01cabc4470603d0d5f5cbc710a623b2da0953c397f273ca91be63aca1f043dcbc4ae3b0b7d96bc7849b63c3070343b8723dbbce001f6bb5a0fa3bcb3db163c5b3a97bc351072bb8c0cc73b6cb9663d2a1f39bd4777863c55e91bbdc3e02f3c5000303ca29f76bded7f1ebc1add043d54fb423de07598ba8e622f3da9decabc30bf763d34a862bc39428cbcb1c2493b784936bc37ec233b50c3143d486b733cbbbf9539e1dd273c8d373b3c22fe9e3c91e38bbc34e57d3daa8375bc70a2523c5dcd9a3d93026d3d6ca73f3db05a3abc811b8e3cd80b503c758bbebc66dc0d3c297a8e3df5effa3b86bbcbbb341c853a7cf5863ceb6651bbb1852e3dd3a8ad3b7a9f9e3be4ea613c30adcf3cf07a313dfd471c3d73232fbcd422643bb0d4f0bc6b8ef239d81df7bc3af9dd3cc9f48fbc9c11603ddf873fbdfc1ca83c7b07ae3c62f321bcff2962bdd4d3a1bce67d65bcac9c423dc3a3943cafe003bcfac63f3d1a1aa03a924b1bbc3d3d1fbc8cbd04bdfa40f6bb5833f13cf43829bcf4b2df3b648625bdd80b503cec542a3bfbf133bc9ba9d03c38914ebdbeba28ba884ecf3c7e9a31bdc23b85bb7411883c84a2febae603af3cd93644bd6d9518391531343c577c9fbca4b8c33c7e14e83a88d4183d0d5fdc3ce72ea3bc61c82dbdcffc5c3de07518bc26f9b13be7f107bd009271bae45e84bc30adcfbbc022b8bcbbbf1539bc76e7bce0efce3b2aa502bc46d86fbcff9d043d59e4aebcd27db9bc381718bd0569363d6c6a243cd0ad1a3cc4ce883c30ad4fbc02e8d9bc17d65ebbba9421bd42a0413c9abbf7bb42dd5cbc6486253c7f3fdcbaed3670b982c038bc24e0e4bb4c9d0d3be0efce3cf733bcbd8af3f93b5b3a173a67d0fa3c06942a3d1bbf4a3b21d3aa3ca9a12f3c3684943b951bbabb25bc163ddae701bcc84352bda91be63bf7b9853c35d3d63c58a7933d815829bbd9bc8dbb951b3abc71cdc6bc780c1b3dced1683d8fcabe3c4e3011bcced1683da4b8c3bc01bde53cf3fb8d3cbebaa83bd6298abc101d543c80f019bc595ee5bc3f9387baf34ad0bae9c1a63ced36f03cc31dcbbc0fa39dbc3215dfbc893ca8bcb83eb9bca1fa4b3cb318b2bc54bea73c25bc163db443a6bc6feb00bdbeba28bd833a6fbcef61643ce72ea33c81a76b3d80f0193cc3e02fbb0fa31d3dd203033d4fd5bb3c6ec00c3cf745e3bc4163263b110badbb1f40273bf3876b3b1531b4bc41754d3c4087743c7360cabc07bf9eba5651ab3ba520533c8cbd043d0e78293c7411083d54bea73b31eaeabcec54aabb8a2a013cdb4f11bd31eaeabc73e693bc83ae11bc2329133d21968f3c76f34dbc3db755bae11a433a223b3abd5651abbc6cb966bdc806b73c183e6ebc58b93abc7d5d163d0ff2dfbbfc965e3c20a8363cce94cdbcd7e05bbd6017703ce7a8d93c009271bcc18ac7bbaeb50f3d7f02413de29479bcff667d3cc3e02f3c73604a3c8e9f4a3c136195bcc5c2f5bc55e91bbdc2b5bbbc609db9bc758b3ebd12f905b930adcf3c3510f2bbc2b53b3d806ad03c50c314bbe4eae1bc7411883c3af9ddbcf7333cba5fafe039eee72d3c784936bdf85e30bbbf71fabc34e57dbc21960f3d5d1c5dbc4163263d864195bc1b45143d99c70abdb267f43c6668eb3cc40ba4bcb7c402bcf7f6203d165c28bd3559a03c69e9473df2d099bcde1f30bc3215df3bb22ad93bcae87cbce9c1a63a84a2febb66dc0dbc1406c03cfcdf0c3c7e888abc5ac6743c3c4fc6baf21f5c3be2089c3c6a9a853c0c34e8bc214d613b5ac6f4bc530d6a3c76f34dbc12f905bc9c11603a573f84bb078283bcbc27a53c7e888a3c3c4fc6bbcf39f8bcfa40f63c1b45143d55ac00bc0f6682bc6ec00cbde59b9fbc69e9c7bb995368bc341c85bde7a8d93cb83eb93c1799c3baf03d16bcfbf133bc2b5cd4bcbf717a3cd152453a5651abba3fd0a23c043ec2b973604abc4433c53b0f2ffbbb1236213a824602bd0092f13bcda6f4bc8af3f93cc9312b3c670702bd85cdf2bba8f0f13c7c32a2bce1a08cbb2d2c73bcb6990ebc494725bdca1f04bc2636cdbc3bd58fbbc2b5bbb95bb44d3c9032cebc2e1a4cb902ab3ebd7a6203bcd0ea35bd6a9a85bb206b1b3daeb58fba3c0084bccae8fc3cdf87bf3bbe09ebbc0ca80a3c73e6133b24e064bc4beccfbb0a8fbdbc08fc39bc744e233df608483cba94a1bc2b0d12bc1add043d3729bf3c421a783ca225403df1a5253be76b3e3cc23b85bde1a00c3d2724263c84a2fe3b8ab65eb9404a59bcef4f3dbde859173cd3a82d3c2761c13c0006143b9abbf7bbba207fbca47ba8bc67d0fa3ba7fc843c2c87483dfb6beabcdf0d89bb2b5c54bc670782bc942d613dde99e6bc5c658b3bb05ababc0c34e83c577c1fbc161f0d3ddc7a053cd4d3a1bb4ebc6ebc84d9853b5438de3cdae7013c807cf7bc3d3d1fbd93760fbd942de1bc5dcd1abd1d27dabcd20303bc32155fbcc23b053d83ebacba8ccf2bbd24e0e4bc455e393cdae7013d98eb58bc319ba8bcdf87bfbc9d4e7b3cdbdbee3c02e859bc522ba4bcbda15b3cce450b3c2b996fbb06570fbda776bbbbb17387bc806a503c26e70a3bde1f30bc3c00043ddc7a053c12733cbbf5a0383c6707023bc755793d7e888a3c79370f3dc7db423bce944d3b573f043c7579173cf6cbac3ba43e0dbb21d32abcc6610c3c4d7f53bceee72dbc1c7088bce075183c90b8973c9fe17e3c43cbb53b4d917abcc022b83c670702bd7c32223b26e78abc37af08bce0efce3beccee03c09de7fbc9388b63b8dfa9f3bd7a3c03bc536983b1f2e00bde76b3e3b165ca8bcad4d80bc989c96bc72f8babcc8c99b3c758b3ebcad8a1b3c52683fbcb82c92bb2b4aad3cca5c9f3cd2cc7b3c741188bc90b8173d89ff8cbc90b897bbcf39f83be76b3ebcbeba283b0006143dfd479cbcbc64c0bbd891193c2724a6bcf07a31bbd49606bc470364bc1deabe3a1f40273c2378d5bc94a1033c513dcb3c66dc0db90782033cb392e8bbeafe41bd535698bac11011bdd6b5e7bbce458b3cb9e3e3b9a91b66bc36fe4a3bac9cc2bc01804a3d332eac3bb56e1a3c51ee88bc07d1c53cf5a0b8bb13edf2bb0927aebce427fd3c0ca88a3cd496863c55ac00bdd9f9a83cf08c58bd499667bcd28fe03c1236a13bf52602bc5e352abc19ef2bbc06570fbdbc76e73cf68e113c5e84ec3c6cb9663c166e4f3b19b290bb85533c39428e9abb40fb16baf4b2dfbcf1f467bca1fa4b3cb480c13cce94cd3b9c97a9b9a38d4fbb8e25143cb5abb5bcec42033ca1e824bda29ff6bb1a94563c1e8fe93cd3f76fbc07bf1e3d41b2e8bc58b93abb5cdf41bb2c87c8bc9fa4e33c802db53b9fa4e33c5ef88e3c1dd8973a4777063b397fa73c5e47d1bb32c61c3c0180ca3b250b593c0ff25fbcb443a6bb2761413c288cb5bbe1a08cbcdb9ed33c6de45a3ba1e824bc6619293cf82115bc091507bc09de7fbc4eaac7ba09272e3c01432fbd1361953bdbdb6ebae4eae13ceb66513cb5ab35bd8e62afbc881134bdbe09eb3b332e2cbcc40b24bd2f4540bd6ca7bfbcaa8375bbc11011bc0b7d16bd37af083dd1d88e3c12f9053ce8e5f43bd27db9bc76a40bbb8641953c052c9bba91e30bbdac9c423cfac63f3c58b9babc17d6deb92b0d123c7235563c63aaf3bc4689adbc9f18863c1f2e803d1e030cbd2c3886bceafec1bbd0ad1a3d337d6e3bbe7d8dbccc3ee5bb9f92bcbb33f1103df733bcbb297a8ebcbc76673c416326bceafe413ce4277d3c4a72993cc278a03c06a651bb595ee53cf7333c3c3559203d1cfc65bc7cf506bcde1fb0bc5fafe03b925dc23c80f0193d3df4f0bc0313ce3b161f8dbc2aa502bdd4d3a13c6ad7203a82d25fbc2b0d123dedbc39bc9dff38bc8416213d4d7f53bc4e6d2c3c6ffd27bdd5fe15bb13edf2bc03d6b2bc416326bd653d773c02abbe3cca5c9fbc8a671cbc337d6e3c79b145bbad8a1bbc297a8e3c206b1bbcf608c83c7a62833c4b296bbc0a5222bd631e963c7f02c1bb37293fba91e38bbbcd6959bc7ed7ccbc0569363d5a0f233ceaec1a3df3fb0dbc08fcb9bcba94a1bcdee2143c5f72453c3817183c481c31bc18c437bc2e1a4cbca86414bc34a8623cd81df7ba811b8e3c86f8e63c86bb4b3c4afef63b3bd58fbc4175cdbcf821953cf52682bdc79e273cc5855abb59e42e3d2c3806bc5d0ab6bac3a3143d9b6cb5bc71cdc63b5f601e3c158076bc1c7088bbe12ceabc2f45c03cad1679bb3c12abbc2673e8bce9c1a63ca1ab09bd8edc653cbe7d8dbbf94c093cac220c3aa1ab893c1f2e00bcc02238bc196962bd73e693ba924b9b3cda249dbcb3554d3cf61a6f3ca9a1af3c9388363cd27d393c464c123dc02238b9f6cbacbbf5a0383befd586bceba3ecbcd4d321bce2081cbc79370f3b7dacd8b8cfbf413c9dff38bdb87b54bc8db171bca4f55ebbfd479cbc0d101abc3e68933c70281c3c86f866bc1c70083d1a1a20bb9e79ef3b7c3222bc5f609e3ce3bf6d3c14c924bd3af9ddbbd3f76f3b0ca88a3b37ec23bc33f190bb3559a0bc824602bd078203bd0b7d96bb504ff23b1a94d6394ebc6ebcf9d8e6bc653d773c0d5f5c3c1a1aa03c9a41c1bb6e0f4f3bf03d163cf3fb8d3c9b5a0ebc0e78a9bab318b2bc6b3f30bc61b6063dcc014abc867eb0392ae21dbce7f1873c9f923cbb22fe9ebc6b7c4b3a06942abcd7e05b3caf1d9f3c38a3f53c0313cebc7e9ab13ca5a69cbcfc59c33ce910e93b7065373cc673b33cce57b23c490a8a3bf5dd53bca98f083d0a52223cf6cbac3c10ce113be3bfedbcb9e3e33acc3e65bc897943bd46d8ef3bd36b123db17387bc52e275bc5563523bb56e9a3c5a4c3ebc1a1aa0bc035069bcd3a82dbcd948eb3b1bbf4abc328981bcb1ff643c35963b3c7e1468bc25ce3d3a341c85bbbc27a5bc6619a93bf438293bbdde763c3bd58fbb139eb0bb5651abbcedf954bc7ed7cc3c6881b8bb0055d6bb9f92bcbb10e0b8bb4ac1dbbbcd69593b583371bc4a7299bb61b606bdccb207b852e2f53cc8806dbc1d64f5bc45219eb9e59b9f3bf07a31bd942d613c20e551bbec54aa3b341c85bc58f6553dd4e5c8bc5651ab3b54818c3cbad13cbcdfc4da3c75c8d93b968349ba55ac003c780c1b3b6881383cc3e0afbca4b84338c79e273d5c658b3bd4d3213bf5ef7abc4dc8013daa83f53cd115aabb4c9d8dbbf1f467bcf5639dbc7d5d96bd608b123c3f9307bdfcdf0c376ec00c3cfdc1d2bca864943cb3db96bc02abbebad49606bdf2e2c0bcd678ccbca5e337bbda61b83ce896323a64d567bb0c34683ce45e043b22c183bc75c8d9bcc40ba43c9916cdbcb355cdbcfc59c33c9e2aad3cc022b83c6926e3ba9a2f9a3c16abeaba101d54bc0f2ffbbcb44326bca00cf33b3c00843ce4eae1bcc3a314bd346b47bc6486a53ccda6f43c897943bd2636cdba37af883c925dc2bc10ce11bb', 'Array(Float32)')), +('<dbpedia:Honda>', reinterpret(x'777e4e3d0f9c27bcb19d74bc0b357abc25c5403bb1d4bcbc93489bbc6b6db3bc79b854bc8ce592bb7688ef3b4292cb3b5b70dabcc6fa97bc663a903c138b233df38d33bd2684573d354ae83bbb23033cdbb2e2bc520a143c8196ccbc40ddd5bbaee8fe3c30e0fc3c30e07cbcb40422bc4ee44fbcaab55bbc18f58ebbb9b234bc967800bd2aeb043dbbb5723d47c56e3c3c69c9bc767b903c89309dbb60a3fdbb8703763d244a513884c6b1bc1195c43c457e89bc2b250b3de48639bd57815e3cac6a513c198d013d26fc883891d7cc3b919325bd82cd943d309c55bd0fe0ce3c2da3b83c8d2c783d7c1f82baa20eacbbe15654bd2d6c70bca87b55bbc44522bcc782663d5fd7073d205b553d838cabbb5547d83c767b10bb59f2acbc0f9c273c54449abb767b903c08f8b5bc8d1f99bdd9f00d3c5503313de442923c08b48e3cd215543cd8fdecba1481023d8600b8bcb40422bd6e9d18bd777e4e3da3899bbddfd8a6bc94c30abb660348bbef5dce3b9d29d13bffe2f5bc5e69f73c12cc0c3c4936bd3be1ce85bc6258f33ca6c65f3d43d6f23ce01c4e3c74c69a3de2080c3d59ae053c977bbe3a4877a63ce390da3c2ce4a1bcce6aff3b5b705abcc5c0113b37bb363d314e0dbd424e24bdd1d1ac3c7bf25abca05936bca3040b3dcf1cb73b4a2c9c3c75c9d83c2406aa3c9582a1bc71da5c3c7a33443dbda130bc2bb7fabcaab55b3ce01ccebc2a7353bcdca8c13c3aeb9b3c630aab3b4f9687bc8ab86bbd6e18083d79fcfb3b812b013c7a33c43b8bae4abdfd20a1bcb5c338bd61995cbde67c983a80a06dbc65000abdebe6833dd8b9453da20eacbc6bb1dabc4bb46abd050c783c463da03d5f1bafbc5b2cb33c93489bbdf3d1da3bd8751ebcfc6ee9bc105bbebb8d1f19bbbfdb36bc1aaa84bb2af8e3ba5977bdbc5833963c87f6163a3ea34fbd111ad53cf6bd18bbeaaf3bbcaee8febc883a3ebd5977bd3cb404223cace2023adaaf243d392c85bd234713bce8b69e3c983ad5bcf308233d7f181fbd1a73bc3a5781de3c1aaa043ddc2d523df7f79ebc48bbcd3ca53e91bd82cd14bdb489323d4740de3c9fdec63c0d62a1bc034aa33ca53e113b111a55bbee9eb73ac20b1c3ddd67583d33514b3c3da0113d119544bb258119bdbe297fb86ee1bf3b7fe1563c244a513d624b143cf44c4abbb20b85bb6eaaf7bc4606d83c5a36543cfaf0bb3b8585c83c8ce5123d57815ebc6977d4bc109206bd001a3e3bf8720e3d3b2f43bd938cc2bc8585c8bc458be83c392c85bcb63e283cc159643cb8f31d3d7aae33bc40149e3c5c2212bc05c8d03c08b48e3bf87fedbb6deb60bc9289043c3506c13c1da321bdd8751ebc3ee776bbe7f707bddd2331bc4e2877bc7aae33bd12cc0c3b3dadf0bc540d523ce40bcabc422780bc8d1f19bc738f523d7096353d12cc8c3d144a3a3de501a9bcf2ce9c3a98b286bc3403033d7e5908bdb2d7fa3c940732bdcf1c373ca059363d18f50e3d950af03c94d0e93ab20b853bab30cbbcf094163de8310e3d4f5fbf3cda88003de9f0a43c728c94bbd9f08d3dea6b94bc1da3213d71da5cbdcb2d3b3d74c61abd79742dbd7d63a9bdb5c3b8bc916c013cb63ea8bcaaeca33be05316bd077d463ddaaf24bda3891bbbf2ce9c3c26c87e3cece9413bcd9e09bd6647ef3c568bffba264030bdd67f3f3c2017ae3b773aa7b9634e523c244ad13b8c60823dc878c5bc944bd93c9ca182bd9296e3bc3ea34fbd5bf56abca92d0dbdace202bdcc231a3d9ce529bdbf978fbc5547583ceb2aabbb5fe4663c95c6483b111a55bd3da011bc628f3bbc9d29d1bc158440bc1c2832bd773aa7bad648f7bb534e3bbdb30e43bda252d33bdaf34bbd258199bca20eacbcc9bc6c3d02cfb33b63d362bd883a3e3d9355fa3cfedfb7bb687416bde501a93cd63b983dbda130bd728c14bbe704e73c7830863bab304bbdf601403a9ee8e7bcba2d243d111a553c09b74cbb9d6df8bca059363cc5cdf0bca6fd27bd83d0d2bbd18d85bb3836263cc7b9ae3c340303bd9aec8c3d138ba3bbe28d1cbd04093a3dfd20213cd0dbcdbcf0a175bc997efc3b16ffafba1ea65fbc9407323de112ad3bc878c5bd43d672bb65000abddc2d523c6214ccbb80930e3d4488aa3ae29afb3cbbecba3b744ee9bb3de4b83b5e6977bd0e2138bb0d2b593accecd13ccb2d3b3c37770f3d3f5507bd387acdbc4ea0283d0e65dfbc5de128bd7155ccbada2a14bc424e243de9f0a43c80a0ed3ca25253bcccec51bd7e6667bbac6a51bde5cae03c5f5fd6bc3b2fc33c2310cbbc18f50e3cf8728ebc66bf20bd60961ebc3740c7bc76bfb73c067a883b7aaeb3bc3b668bbd3fda97bbf10f86bbab67933bf931253c01109d3bf2ce1c3dab74f23c64c9c1b9211aec3abc672abdb1d43c3b0f1717bd944bd93c2b250b3dddece83bd7c366bd6e9d183b11519d3c730704ba26c8fe3c12d9ebbc198d81ba697754bd419c6c3ba05936bdcf605ebc8ce5923cdc2d52bdfde9583d94d0693b625873bdd18d85bb2d6c703cb96e8d3c311745bdcfd88f3b8f599f3cba71cb3c4a7043bc42175c3dba2d243c3f55873db20b05bd6c63123a767b90bc0fe04e3cc1153d3c258119bd71554cbc24062a3cf7c0d6bcffe2f5bb75c9583d97004f3d4bb46a3b9f150f3db015263d7f257ebb3c69493d7f9d2f3d877b273d2d6cf0bad8fdec3c7a6a0c3c615535bbeb2a2b3c2b250b3de6c03fbcdc2d523c0051863c76bfb7bc65ccffbb6af2c3bbf10f063df63888b95503b13cc9bc6cbdcf1cb73b0a69043c0bf1d2baa0d4a5bb37778f3c9d60993c55bf093dd4ca493d20e065bca05936bdf44ccabcde266fbd9f9a1f3db489323da93a6c3d0e2138bc144ababbbe29ff3c59ae85bc8ea7673cc8341ebc9d60993c05ff983c8f667e3c2251b4ba9f9a9fbcde19903caa71b4bcfd2021bc4af5d3bcff9ece3c354a683d4fdaaebcbce2993cfc6ee9bb96855f3cb20b85bcd8827dbcc87845bd4d69e03ce44ff1bbc8417dbcfbe69abb6e1808bd3f55873c6e25e73bf197543b2a2f2c3b9e6357bd2bb7fa3abb3062bc567ea03b5cebc93bade5c03cb1ee01bd7299f33c9193253da8f386bcd73ed63c192f153d6d6650bdec200abc983ad5bc9c6a3a3c2251b4bcff5aa73bb3ca1b3cd604d0bcf1532d3d5ca7a2bb268457bcd57c813c59773dbcc9af0dbc0313dbbb640de9ba5fe466bccd9e893c0254c4bc4099aebc3f62e6bb09b74c3cb97bec3bc20b1cbc9caee13b60a3fd3ba778173da7bcbe3be053163d5099c53be0607538f483123c79ef9cb9f09416bb0d62213afa2704bcbceff8bce097bd3b4d5c81bccc239a3b683dcebaf93125bd35c2193dae6030bc80a0ed3cd5c028bcee67ef3b8c29babcff9ece3ca6fd273c3af87abc1ce48abcaea4573d98b2063d0bf1d2ba034aa33de215eb3c3ee7763c57b8a63c92cdab3c40ddd53cce6a7f3c1de7c8bc8ea7673dda2a143b7c1f02bc192f153b2406aa3b044082bcccecd13ce7f787bc93489b3bbbb5723dfd64c8bafc2a423c1b76fabbe83e6d3c0a323c3c2c69b2bc1b699bbb268457bddbb2e23c7dde983c75c9583d28f525bd05c8d03b3c25a23c3ea34f3bc01f5ebc550331bc392c85bc11511d3d044002bd50627d3ca3cd42bc6fa0563a683d4ebb7e66e7bb458be83a4a397bbba2d7e33c76bf37bcc9bc6c3c7aaeb33c0da6483b2da3b83c4547413c39b1953c8152253dfbafd2bbeaafbb3c9e63573cf1532dbcd604d03cb73407bc8c293a3dd7c366bddaf3cbbc77035fbd48f215ba3192b4ba916c01bd767b103c50559ebb938c42bcce5da0bcbd5d893c76886fbc60118ebcf83b46bc758531bc89301dbd187dddbcd19ae4bcfa2784bdbba813bd458be8bb86c96fbca448b23c105bbe3c2f1407bdc40edabc7c2ce13b8e63c0bb4beb32bb7f181fbb4ee44fbce053963c6ee13fbcc5cdf03b8196ccbc910e953c2ee75fbcf5fe01bd9582213d01952dbdb63ea8bb1ea6dfbc911b743cf2490c3cde9ea03c79fc7bbd4488aa3c2a2f2c3c76886f3cfab973bbbce2993cfc6e693c16ff2f3ce1ce053d8da4a9bc2da338bb8b6a233c19b4a5bb5a6d1c3cf9fadcbb67f9a63c5be80b3dab67133b3939e43c6ee1bfbc59f2acbcd9785c3de8fac53aaf568fbb4d69e0ba39b1953c354ae8bc509945b811511dbc20d386bc83071bbd6fa0563c9ee8673d192f95bb96bca7bcf542a9bce5ca603d69332dbdade540bcccec51bbe73b2fbb7b29a3bb27bb9fbb29b4bc3cf38db3bc2970153db63ea8ba4488aabb0a32bcbc258199bcc14c85bb9737173d8f599f3cbe60473defe25ebab015263cae60303d86c9efbc7dde183dd8b9c53b81964cbb34472abc094c81bc69ef85bc2bb7fa3a76886f3c210d0dbc34472a3c47b88fbd6af243bc8c6de13ca3040bbc677eb73bd7faae3c2baa9b3d95c6483c573db7bc92cd2b3c4ea0a83b192f15bdc3860bbcd7fa2e3bc1def43c068767bcbd5d893c02cf333dc6c34f3c4bebb2bab489b2bc5daa603cf256ebbc8152253de28d1c3c04d2f13c910e953cbb06003d19f8cc3c528f24bc6d66d0bc520a14bd95fd903cb0de5d3db30e433c0746febcf10f063dfa27043b60961ebab96e8d3c4b2fda3b4488aa3a7f9d2fbcba714b3cfab9f3bbcfd88f3b1a73bc3a4de111bdf58650bd59ae05bc5547583d8f667e3c2bb77a3dc504b93b9289043dcb6403bd69ef05bc47fc363c1edda7bbf60140bc98b2063c297015bd4b2fda3a121034bd177a1fbcc3cab2bc8314fabbfc618a3d8da429bbe975b53cad1c09bbc775873b74c61abc077d463cc878453cb548c93bba71cbba6544b13c0973a53c1643d73c59bb64bb08f835bc805cc63c20172ebdab304bbc4ba78b3c3a70acbc177a9f3bc878c5bcf6caf7bc4caac93c6b6d333c8f599f3c0b289b3c6544b1bc6c2ccabc5ea0bf3bea6b14bca54bf03ce2d1433a805c46bc3dadf03cf8728ebbd01ff5ba1a73bcbc0b6cc23c2295db3cc77507bd1210b4bb392c053d72d0bbbb4de191bc83147abc910e15bd8d2c783a4de111bdb30e43bdd290433de44ff13b928984bce44f713c258199bc86bc10bc69ef85b960110ebd33ccbabc53c9aa3cf77cafbcaf560fbc12d9ebbb08c16d3cc504b93b2f2166bbd73ed6bcd7fa2e3be097bd3c198d01bd8de850bd0aadabbc353d893b7b29a33c72ea80baadaef8bcb2d7fabcab7472bc2406aa3cf4c7393b3388133da1ad813dada119bdb7b9973a973797ba3baab2bcf9fa5cbcbe6047bc6e9d983cfc2ac23c8e63c03cfaac14bdb937453cf8720ebddddf893c90d40e3d24c282bce15654bd8f599f3c45c2b0bce112ad3b028b0c3c44cc51bcff5a27bc8aef333b16ff2fbca54bf03c5a3654bc0e21b83c5de1a83c59f22c3cfa0a01bdcaf6723c0cb0e9390687e7bc8baecabcd21554bb1b691bbd04d2f1bb7a33c43b9edb883c319234bb005ee53b88b52dbc7830063db20b853b16bb08bc27bb9f3c4936bd3c0b357abab11864bb06beafbc09fbf3bb5606efbc3b2fc33bc01f5ebda193bc3cb60760bd7b29a33bb404223c0a69843c192f953c677e373d4292cbba35cf783dae6030bcababbabccdab68bd10d6adbc84c6b1bc5c2212bd20d306bd0a69843cd5c0a83c83d0523d8703f6bc39f53cbc068767bc3ee7f63c24c202bd4fda2ebc1210b43cb404a23c26c87e3cf1532dbdaedb1fbcaab5dbbaec208a3cf6bd983c88b5adbc198d01bd9ee867bb496d053df7c056bccb71623c8edeaf3c2baa9bbb9641383b177a9f3b067a08b75de1a83b94c30a3cc878c53cce6affbb70528ebc0edd90bbc218fb3b20172ebd7f25fe3b4a70c33bc3860bbde34cb33acf1cb7bce1dbe4bce34c333c09fbf33c1f5897bc8c6082ba268457bc7e66673ca396fabc8152253cf2ce1cbd973717bdc09a4d3c9c6ababcc607f7bb501e563c48f295bbaa71b43c683dcebca3899bbcbaf6db3bef1927bd2baa9bbcf63808bc777ecebcaf560fbc1da321bc1de7c83beca59abbe15654bc0ca38a3987f6963c6af243bbcb2dbb3c5fe466bb601eedbc8f667ebcfc610a3c75c9d8bb938c423c20e065bcc9375c3d0643403c424e243cfbf3793c848fe9ba9ce5293c0643c03c5503b13b47fcb63ca8f306bd6647ef3b63d362bceda8d8ba56c2c7bb20172ebb55bf09bd3351cbbbd63b98bcbaf65bbc9ce5a9bc09ee14bc04d2f1bbeca59abadba583bc9d6d783d38ff5dbb1aaa843c7c2c613c83079b3cacefe13c0e65dfbc5a7a7b3b418f0d3958fccdbbff5aa7bc2ee75f3ce1dbe43c916c81bdd19a64bba8372ebbb8f31dbcc48949bc0b357a3b0e2138bc8ea7e7bbde19103c29394dbd234793bc4547c13cd30bb3bc353d89bcbfa4eebc887106bbf48392bcfc618a3ccab2cbbc39b1953be112adbcb9b2b4bc7b29233d60dac5baf490f13c2354f2bcee9eb7bc709635bd8152a5bc80938e3c9ca1823c82da73bbe77fd63c8bae4abd7a33c43bebe603bd08b40ebd5a36543c9289043dd401923ce34c333d3a702cbdf645e7bc30d39d3cb015a6bc339572bcb6824fbc96bc27bcb8f39d3ca741cf3bd4cac9bbaee87e3cdc641abc9678803bf0d83dbdfbf3f93bc445223cf38d33bc0d2b593b37bb36bb824884bc983a55bce28d1cbd734b2bbb9f22eebc48f2953cf7f71e3b0ca38abc877ba7bccb6403bdf931a53cf483923b1b699bbcaf63eebce6c03fbca7bc3ebb08b48ebc7c1f82bc6258f33be975353c16c8e7bb1092063cb0594dbdb19d743c9ce5a93b4a2c1cb88f66fe3c2970153c26fc083dfa3463bbd097263c9641383d434483bcd7c3e6ba8ede2fbc2a73d33cbbb5723c21d6c4bc7a6a0cbc8aab0c3dd3c78b3c13cfca3c567e203c2939cdbcfaac94bb40141ebb6603c83a16ff2fbce501a9bbb96e0d3c37bbb6bce8b61ebce7f707bb4de1113cf01c65bc35c219bcdee2c7bc80a0edbc7214633c0edd10bc86bc103d4c66223c1da3a1ba28f5a53b538583bc8152a53c98f62dbd1c6c593b812b81bc077dc63cd57c013cb57f11bccc239abc7d6329bd4d5c813c463d20bbab6713bd964138bd238bba3cf483123ccde2303da05936bd60dac5bc6a290c3c5ab143bc1aaa04bc55cc68bb6f5c2f3ce5bd013c2581193c69332dbc6e25e73cf38db33c567ea0bb340383bcd7b6873cbd5d093c1ea6dfbaec200abc7e5988bcabab3a3c777ece3c7500213dc3860b3d3aeb1bbc8ea7e7bc154d78bcbe60c73cab6793bb61d0243d83147a3c454741bb08b40ebd8d2c783c55bf893aee9e37bc497ae43cb30ec33b9f150fbc910e153c7ce839bcdca841bbf8b6b53cf7f71ebd7ca412bc9ee867bbbe60c73cc3868bbc501e563cdb6ebbbb7e22c03ae545d03c25c5c0bc1c6c59bbc3860b3d5ab1c3bbff9e4e3d058429bc36fc9f3bf7047ebc8f591fbced6431bcb937c5bbcf1cb73c52d3cb3c067a08bcbba813bda8f3063b6af2433c528fa4bc468147bb5488c13b7e22c0bb105b3e3d7d63a9bc977b3e3d80938e3c74418a3c5503313debf362b97e66e73b7e22c0bc858548bccb64833cf30823bdc77507bd130693bc4c66a2bcb190953c5de128bd654431bc64c9c1bc2e1e28bce689773cc6c34fbc1f65f6bc9311d33c5451f9bc458be83cb74166bc4ee44f3c3506c13b47b80f3d3192b43cdaafa4bca3040bbc9bef4a3bb97b6cbcd63b98bcf83b463c931153bc3dad703bc782e6bc83147ab9001a3e3c12cc0c3c8b6aa339ee23483c92cd2bbc66bfa03a9f9a9fbb2cad59bd66bfa0bb9edb883c9baba33ac09acdbcfb6b2b3cae60b03adca8c1bcfab9f33b37bbb6bc8c6082bc2509683ce975353c2da338bc3681b03cd604d03c353d093d1f9cbe3c8152253c8e6340bc37778fbb2bb7fabc98f6ad3b6d66d03a628fbb3cb9b2343d3f55073ccb2d3b3df87f6d3c5977bdbc5a6d1c3c20d3063d6f5cafbba3048bbb60da453c1b691b3d863780bb709635bc65008a3bfaf0bb3cba71cb3b08f8353a6c70713bc09a4d3b74c61abce48639bdde1910bc7b6dcaba297df4bbdddf09bc4227003cb8f31d3d3506c13b9355fabc82da73bb5503b1bc1aee2bbcf77caf3cd545b9bbd8751e3b601e6dbc95fd903b064340bd6fa056bc2b250bbc392c05bd634e52bc7c1f82bc96bca73cde19103cd01f753bac262abccab2cb3cc445a2bc824884bc47405e3c3f1e3f3c0313dbbb1ea6dfbcd589e03b9aec8cbcd9f08d3c59f22cbd3da0113d4de191bce975353de8318ebc496d85bcebe6833c7aaeb3bb7e66e73c1da3a13cffe2f53cc548603cd63b98bb7307843b083c5dbc8211bc3ceda8583b573d37bc9d29d1bc8c6de13b0d62a13c5392e23cf9754c3c187dddbb6fe4fdbb568b7fbb33ccbabc60a37d3cfd2021bd1d2b703c59ae053d84c6313cbbb572bc82cd94bb3da091bbf38d333b654431bd0e655f3bb3ca9b3c6199dc3b154df8bc32d6db3c0973a53a74c61abd777ecebc60dac53c62a900bc7f9d2f3d93481bbc767b903a12cc0c3d2a2fac3cc5c0913c74418ab9496d053dbda130bdbcef78bc740ac2bc6258733b0298eb3aee5a103c1edd27bc1c6cd9bc7c1f02bc0440823b1b3253bdd0dbcdbcab30cb3bf2566bbcaa71343c', 'Array(Float32)')), +('<dbpedia:Mercedes-AMG>', reinterpret(x'aa898fbcc7faf63c0afdb4bc96f40fbd4686f2bc9462153d3483edbcaafcbfbc2c55a2390afdb4bb0db0fcbb9510b23ce1eed73b2669f1bbfb55e9bc8cc8473cf2d08fbd198ae13b60b014bdbe913f3cbce3a23c5a5133bd6c38163c5244b5bb541a82bb9c6102bbbe1e8fbbbe1e0fbb04c683bc31d0a5bce4d7603cb96dcabc2888bb3a2b1ab6bbf815d2bc2dacb03af9503e3c1785b6b9720aa83cbfccab3c9462153d405d523ca3d39fbd8d1f563c9ecb4cbc1de4173dc22865bdb7689f3c3a8bc03913459fba55a06ebc24bbd43b56ba8dbbcfca053d4d3c62374dc9313b85bb493c2b71c43d6d2a85bd8b1aab3cbf670c3b0e945abdde3b10bc337ec2bcbe91bfbc83b69e3ce7f6aabc4613c23bfafeda3c6ee6b2bbb465f7bccd75fabc8d55973c4481c7bbbba8363aeb1a20bd6d8f243d037d863dceb0663c7d21fcb9007a5ebbe92306bc8b8d5b3c2e5acd3c0d596e3c80cd153da7dd75bb14f3bbbdad7298bc35da7b3b7126cabcce59d8bbc6821b3d863a083c89df3e3d9a8b353cd44509bd376cf6bb4f049e3c7d21fc3c1f7692bc2024af3c39fe703dcf07f53c969d01bca713b73b1b529d3b9afe653c71b3993c67bd123de93197bb3a8bc0bc73b8443d6d8f243d254824bd1de4973bd32b6abd70cf3bbc2b1ab63c06668f3c23d7763bffaf9fb91a38febc8a6c8ebd79c0973c740f53bcc107183c03f0363bb8bf2dbd56844cbc8b8ddbbc32b4833b5d047bbce0b3ebbcde3b103c410befbc8fe7913c9a34273d5aa8c1bcc27f733bbaec083da3d39fbd96f40fbd305ff8bca8f794bcca8cf1bcbbffc43cb47f963ded92fb3a20cda0bc740f533bd9fd7bbc59faa4bbce8108bd042b23bc16a1d8bc55d6afbd1a6e3f3caf77c3bcbb72f5bb007ade3cfb55e93cfde7e3bc05f5613d3daa8abc6d59e33cc48b813be95264bdd27dcdbbc107983ccf94443c06d93fbdf83d023dc6f54bbd0afdb4bc3f06c43c651d873c9d743e3caec9a63c651d07bc8fe7113ccd8f993c7bc5c23d2d76efbb715c0b3d9de7ee3cc730b83b364b293d1042773dcee627bcc15ea63cb85a0e3d8b8d5b3de8da883d8669e63c83294f3d337e42bdf8a2a1bc43465bbc620c4ebd4481473d7b1cd13ca713373d64473a3dde21f13c8fe791bc807687bcc07a48bc8cc8c73ba713b73cabd28c3c42efccba82d2403b934876bc8329cf3c53d184bcca19c1bc246446bd6ce1873cc7d9a9bc98f9babb638b8cbc9d743ebd0299283dbf759dbbeb1a20bcbdc7003c57030b3cfb268bbd9fafaabc01b54abd3e58273dc07a483b9d3efdbc206881bc11b3a43b6cabc6bcbd3ab13c0d730d3cfe01033d552d3ebc503f8abc117d633ca8e983bd869f27bc46bcb3bcfdb8853c664ce5bc41627dbd88fb60bca1dc05bb266971bcebb580bc84d76b3c40b4603da49dde3c0b8a043d8097d4bb397884bc3143563b73b8c4bca49dde3b6cabc6bcc568fcba27a45dbd0b5443bb2683903b78dc393dd6d783bbc7a3e83c45d855bccee6a7bcf27981bb8c71b9bce32944bc21eeed3c9f4a0b3d117d633de2d2353d7ca9a0bcbac4d8bc66d9343c6e73823dd20a9d3ad1263fbc7e055a3b7a3873bce1ee57bcc7fa76bcb260cc3cd27dcdbc496ffb3bee9e893c5f59063add00243d8fb1d0bada179b3ddd57b2bba10b64ba0f7838bd0afdb43b0299283d8fd900bb1ba9abbc5b8c1fbdbb72753da8f794bc3dcbd7bb0f78383c2cc852bcca8cf1bb7e3b1bbd995049bcc59e3d3dac8ebabc337e42bc154a4abc0aa6a63af583d7bcd72e12bd8c3bf8bcee9e893cfecb41bda141a5bdbc56d33c174f753c094187bcbafa993c1b73ea3caf0413bdeadf33bda42a2e3ca346d0bcb02560bb73b844bc15d719bd678751bd34b92e3d0919d7bc3dcbd7bc6aa61b3d0582b1bcbe1e8fbd5dc70b3dfcac773c92d01a3cc6829bbb3a8bc03cca19c1ba75f330bd65f5563a878305bd829cff3b16a1d83c9227a93b4eeafebc81ee62bcd3d45bbd185b83bdfbe2383d26c0ffbb42258e3c5d3a3c3d5b8c1f3a96bece3bdc73d4bb4792803b439de93c29150bbcb8bf2d3c8c71b93b5ade023c50256b3ca10b64bd9b6f93bb414130bcb109bebc7ca920bdf2d08fbc3f93133df9f9afbc2b71c43b9554843c95b9233d3e58273d1ba9abbcea6c03bd305ff8bc47a011bd1e05653cff5811bde2d2353d120ab33dc30cc3bcefcde73ab7325e3ca6bca8bc6fca903d3a55ff3b2b1a36bca8c1d33c427c1cbd3f9393bc174ff5bc75bd6fbdc59ebdbcdd9b843ca346503cfb260b3c2f3e2b3bb1093ebd758000bd5e75a83c0a70e5bcd885203cec712ebd4c1b15bde5bbbebbfa341cbc937eb7bc34b9ae3b3a18903d015ebcbbbb35063d89dfbe3b0bab513d2d03bfbc32b403bd851258bd96becebc516057bc249a07bd740f533c2f3eab3c02421a39bce3a23c829cffbb117d633cb02560bc64473a3d6a19ccbb817bb2bcea3642bd1833d33c5f2345bd04d414bd6a194cbc21080dbd6411f9bc1dae56bd995049bd8245713cb816bc3cb62db3bc2e5acd3cc4f0203c772e1d3da8c1d33ce216083b3ef3873cf15f623a2852fa3c8d1f563d83b61e3a3c1dbb3c5d914a3cdb3868bd951032bbcbc7ddbb1e05653cff4a80bc519698bc4f5b2c3d34836dbaa94e233c2dac303c3f3c853c1dae563c57030bbc154a4a3daac67e3d13ee903d7bc5423cf130843d820802bced1f4bbcbc8c943d46bc333ca6bc28bd00d16cbc1e1f04bdd445093debc3913b0b8a04bd00d1ec3cabe01d3dfbe2383c0919573deadfb33b0f212a3c259f32bdce8188bc3cc62c3d0afd34bc3095b9ba07142c3dcc1e6cbd740fd33b461342bcd3d4db3c85bb493d14378e3d5732e9bcc96b243c76d78e3b4e77cebc42250e3da96ff0bc08f889bb8fe791bb266971bce5bb3ebdfe7433bd8b1aab3cccef0d3dcba610bd840dad3b484e2e3d114e853c98f9ba3b178536bd372f873cfbe2b83cd6bd643cd59c173c7c735f3da0d077bc18c022bc410b6f3bbbff44bcfd9055bc0bab513c5244b5bc2d766f3c0c02e03cf03e15bd772e1d3da918e2bc7c44813c5b35113c61d1e1b97f9229bdc48b813c854819bba346503baf04133d607a533c7c002fbb3cc62cbcbafa193d5e75a8bc7f9229bce0b36b3b95b9233df83d823cd482783cf86ce0bc2c1fe1bc552dbe3be7f6aa3cf9503ebbafced1bb5109c9bb8512d83c2ac3a7bcbd0470bce6857d3cf1ecb1bc0c02e03ca62fd93b2cc8d23c2be4f43b727dd8bb2ee79c3b5f5986bbc399123c863a08bd8952efbc740fd33c2ee71cbda1b4553cb17c6e39df5c5d3c4686f2bcceb066ba0787dcbc149cad3a4bc486bc13b84f3abac458bc78dcb9bce245663bf3190d3c7ca920bc28312d3c3b39ddbc746661bc47f79fbc715c8b3cb05ba1bcf00854393e22663cbf3f5c3d0c38a13b63f0abbbf52cc9bcc363d13c92d09abcc4ba5f3da9a531bd1c8d093dab372cbce769dbbc2852fa3ae2d2353cbaec08bb2ac3a73c75bdefb843d3aabb0ac7f3bc7de48c3c6ee632bc317917bd9f79e9bc4162fd3c2cfe93bb7445943c3daa8abd6c3816bcba1be73c4d93f0bcdbc537bdb2ed1bbd9227293d154a4abbc22865bd94d5c53bfcc616bda2efc13beb1a20bc951032bca5d84a3cf667b5bc49fc4abb0234893cc39992bcb5d6243cbdade13c92d01a3dbf3fdc3c171286bc3dcbd7b9c81416bcec71aebc2e908eb744f4773b385054bc5d914abc68dedfbc7e2d0abb87c0f43c3f9393bce3b693b93a557fbc15808b3bf667b5bceeac1a3c94d5c5bce817f83b6e3d413ca298b33cbaec883cb7689fbc6814a13c6aa61bbd607a533ae6488ebd2cfe933c7d57bdbc9be2c3bcc15e26bd547f21bd2068013d0aa6a6b903f0b63b5cadec3cf61027bde98825bc2f3e2bbd825f10bdaf04933c3f9393bcec3b6d3c584c88bc812424bd8ecd723ca96ff0bbcc54adbc63f02b3d17dcc43c28883bbbdbc5b73c4346dbbc3cc6ac3bf86c60bc36f49a3c9095ae3c27da9ebcbbff44bcc9c2b23b7917a63c6bfda93c38f9c5bce040bbbc1dd6863cb5a0e33c8b8d5bbc4818ed3c5995053da10b64bcb7db4fbd5e1009bdbafa99bb0cd3813a9095ae3cf610a7bc4e2040bc1a6e3fbba6bca8bc5ce32dbcf8a2a13b1297823c16f8e63be93117bc4f041e3c96bece3c22518a3cc511eebcf03e15bd7e3b1b3d698c7cb96b1e77bc749c223c24f1153c9de76e3d40b4e0bc2229da3a130fde3cbf759d3aba51a83cfc6f88bccf3db63bf8a221bd4e56013d8a6c0ebbccabbb3c5d3a3cbd7e05dabc4a899a3b130fde3cfafeda3b1c57c8bb4bc486bce39c743b4481473dbe91bf3b015e3c3dae20b5bb664ce5bb049ed3bcdbc537bdfd90d53c46bc33bd316b06bdce59d83c0582b13c2be4f4bc16c988bd77200cbdcc1eec3b2e5acd3c7437833c8cfe883c154a4a3c25e304bccbc75d3c9c90e03c7d573d3bf24340bd27a45dbc6febdd3a7e3b9bbce40d223d905f6dbc01eb8b3c607ad3bc5e75283bd566d6badd9b04bd0c38a1bcebb500bdee03a93c2145fc3cb465773cbf759dba552dbe3c6bc7e8bbf8a2a1bcc8bd073d5ade02bc6a194cbc5a5133bc8097d4bc1712063ca657893b24f1953c7daecb3c883122bb364ba93bec71aebbc1d1563b3095b9bcd72e12bd7078ad3c829cffbc4a53d93c1e05653d56844c3cdd9b04bdad72983bfc39473cd013833b9afe65bb14666cbdb5d6a4bb63f0ab3cd98a4bbab02560bd8a36cdbcf24340bc96f48f3cf9f92f3ded1fcb3cf815d23c8b1a2bbc6814a1b9a37c91bb874d443c6d59e33c8e9003bd007a5e3cda171b3cc34284bc372f87bcf03e95bb2b1ab638b17ceebbba5128bc359d8cbc120a33bb99dd983cbd04703bfc6f88bb59c4633b510949bcbf966a3cd9fd7b3a3e22663a94d545bc1d3ba63b2a36583de3b6133d5f59863ccba6903a2d39803c0c02e03c9e018e3ce92306bddae159bd25e384bb5768aa3be92306bd1917313de84db9bc5b35113d8208823d2145fcbafafe5abd18c0a2bcab37acbc85bb49bbae93e53be480d2bb6c38963c49320cb9c07ac8bcb8bfad3cac58793c1a38febbeb8dd0bc686b2fbde93197bb6c3896bc6b1ef73ce8da883c2c55a2bbaf0493bcd59c17bdb2ed9bbcee03a93bf66735bc3b6f1ebc40b4e03bf583d7bcbe1e8fbc13ee903cbfcc2bbbe0403b3b6f944fbb01eb8b3c15bdfaba7a6e34bccc54adbcd27d4d3b0e94da3c1fe942bd64473a3ce84d393d67bd12bd23b6a9bc4c8e45bc9d3e7dbd1c00babcd84fdf3c7fe937bd67bd92bd1dd606bc83805dbb743703bd36f41abcc7fa76bcac5879bcddcae23ce3b613bce3b6133d427c1cbc652b183cc906853c547f213d7078ad3c0c8f2fbb4ce553bcc93563bcdb3868bbed1f4bbcc6f54b3c812424bdc7748a3ccb70cfbc3f06443bb7681fbd52eda63ae952e43a55d6af3cabaa5c3bea6c033d1afb0e3ccd02cabcf5b918bc92c2093dc023bab94f049e3caf77433c55f77c3cf29ace3afe01833c464983bb5816c73cdbc537bda225033dd20a1dbd7c73df3cc0233abc296c19bc8040c63b6bfda9b8698c7c3b2cfe133d824571bc9d1d303baafcbf3c04c603bd4a891abd185b03bda29833bd62420fbb5bff4f3cd1cf303d023489bc64d409bcb3df8abbd72e92bb7c00af3c0299a83c539b43bc3c1d3bba70782dbd70cf3bbd57030bbcbf751d3dad72983cc0233abb6e3dc1bc79c0173bfcacf7bb3179173dddcae23c98a2ac3b8040c6b807875c3c0c3821bc3b6f9eba04d414bd4b37b73850b23abbf583573c3cc62c3b4481c73b68af81bbfa349c3c57682a3d85f10abda76ac5bc84a88dbbd6d703bddd0024bd59c4e3bcf19523bd68de5fbb4e774ebbd078a2bc16c9883cefe706bd9be2c3bc0f78b83c4ae0a8bc99dd983c023409bd2c1f613ba3d31f3c69c2bdbb3715e83ae245e63b03f036bdfcc616bc7917263c5e1e9a3c2ac3a7bcb71111bb6febdd3b81bf843c03baf5b9922729bc2cfe933a75f3303ce480523dd98acb3cb42888bc2a36d8bbe3b613bc7ca9a0bc4eeafe3a3cc62c3de245663b0feb68ba842efabc85f10abc2d39003d2a36583b874d44bb0cd381bc8c3bf83c162e283d56111cbd2cc8d2b96c0255bdca19c1bb4ae0283c9e018e3cd8a66dbcda6e293cbc8c94bd599585b9aa32813acf3d36bc9a8bb53b450006bcebc311bc503f0a3c484e2ebd1dd6063d6730433cef2476bb1afb8ebb105c163dbf670cbdbf751db9b3df0a3bc4ba5f3cc9c232bc4dc9b13cedc8bcbca1b4553bb96dcabca346d03c5f23c5bcfecbc1bc615eb1bc4dc9b13bd820813c54f2d1bb2cfe933b9de7eeb878dc39baaa53cebbd48278bb4d7223bc9b6f933cf1ec313cd59c17bcfd90d5bcce59583c817bb2bb0b54c33c0007aebce6124d3dca19c13c44f4f73aceb0663c6cabc63c5b56de3a302209bc4481c73c9afee53b36f41a3d6b54383c5bff4f394ae0a83c305f78babdade1bc171286bb4a53593de7f6aabcd59c173dce8108bd5b8c1fbd3886953c23d7f63a7d217c3b8ac31c3d88cc02bbb465773c41983ebc24f1153db465f7bc905f6dbc126141bc330b923b6007233c24f1153cfd1d253cba1b67bc6b5438bb8548993b43d3aa3c4f5bacba88cc023a740f533c0afd34bcde3b103d520e74bb149c2dbcbaec88babc7e833bba1be73cf58357bc100588bc6bc7e8bcc59ebd3c5ce32d3b5d3a3c3c6f219fbc6935ee3c72d4e63aefe7863cfa341c3cf4d53a3dc730b8bcbf670cbcaac6fe3c372f87bc4d93f03b91798c3c9462153cd84f5fbd0ac773bbf29a4ebcc88746bc92c2893c1b736a3caa890fbca7a0063de931173d3be24ebc020c59bb05f5e1bc81bf043cef24f6baa9a5313d55a06eba186914bd5fccb63c9d3efd3b85bb493bdd00243ce27ba73cda6e293c7b1c513c539b433c1a38fe3c7fe9373b55f7fcb8abaa5cbc547fa13cade5c83c80cd15bc39fef0bbfe7433bcfadd0d3dc447afbcd3fc0bbd70cfbb3a15d7993cefe7063c937eb73b791726bbd20a9d3c736136bc5109c9b9a05dc73b3f79f4bc9d743e3c0c38a13c91b67b3ccf07f5bb5a1b72bb3d01993a330b12bda76ac53a58a3163d95b9a33cc27f733cfd1d25bdc81416bd9b6f133b686b2f3b0c02e0bcfb55e9bb5a51333c664ce5bb1917b13bee03a9bb2d76efbc16a1d83c2dac303d3dcb573c03f0363aad7298bcf74b133c2d3900bc450e973a7f92a9bc90085f3b09c2483b53d104bbf3f1dc3c52ed26bdf9f92f3bc730383c1785363b528807bc309539ba3d74c93bb025e0bc3f93133d1f40d1bba7a006bdafced1bb61d1e13c7b52923ba6651abd8d76643ca62fd93b2dac303c8dac25bd13ee903cc64cda3c4c0d04bd69356ebdc814163dd3612bbd5e3f67bcca8c71bd8b1aab3b05f561bbc30cc33cd0ebd23c1c57c83b06d93fbc7c44813cf15f623c2a8de6bc23d7f6bb2229da3cd32bea3b732bf53acb70cf3b6a705a3d00d1ecbc22d24bbc81bf043d07bd1d3caec9a6b98cfe88bb6aa69bbc397884bc39dd233dbe5bfebc9d1d30bc969d81bc4fcedcbcf1ec313c1e9234bcc48b013c07bd9d3ad59c17bdc6821bbd547fa13c6ee6b23c1261c13c78dcb9ba9b1885bb71997abcf74b933c262c023cc59ebd3cef5ab73c4162fdbc5a51333cbb72f5bc405d52bc02429abc56ba8d3b8097d4bc1dd606bc3dcb57bcebc311b7bac4d83c3b39dd3b58a3163defe7063cd32beabae0e92cbbbc8c943b5cadec3bfafe5aba5d91cabcaafc3f3c746661bd01b5ca3c88a4d2bce69f9cbc4a53d93c0d730dbd1e1f843ab30ee9bceadf333c5889f7bc22518abb0eca9b3c105c963bf2d00f3ca7ddf5bbbfccabbcd56656bc4a899a3ca7a006bd54f2d13c06d9bf3ce39cf43cbce3a23c974b1e3ca7a0063d3179973c955404bd36f49abcec3b6dbc23b6293aa94ea33bee76d93c61eb003ddc1cc6bc37a2b7bc6eb0f1baaec9a63bdf921ebbfb55e93c1a38fe3cc96b24bdc39992bb06d9bfbc054cf03a4f5bac3c12d4f13b75f3b0bbabe09d3cef5a37bceaa9f2bb042b23bde8a4c7bbbf759d3cdd9b043cdca9953998a22cbd61d1e1bc14378ebbb62d333c98a22cbb6b1ef7bae40da2bbee9e09bd2a5e88bc77f8dbbb77200cbd8ac39c3bce5958bc50b2babb16a1d83b1daed6bb65f5563d8d47063bc447afbce8a4c73bd59c97bce93117bc439d693ce2d235bdd07822bc13ee10bc2d766fbcbba8b6bc0e3dccbc3ce7f93ce3b693bce12419bbf49f793c268390bcbf751dbcea6c03bd6411f93a48186d3c55a06ebb21457cbbf279813c4fce5c3bfecbc1bba5d8ca3cbba836baa5813cbc274d4f3cf03e953c7126cabb2464c6bc1785b6bb268310bb3b6f1e3cc814963c7daecb3c020c59bb36e689bcd27dcd3b7078ad3bec71aebcf4d53abb2be4743cda090a3cbf3fdc3c92d09abcb5d6a43ce48052bc1c00ba3cfdb8853cf448eb3c18c022bb9ecbccbc2145fcba740f533c874d443c57038b3cc023ba3cb2968d3cfcacf7bc774f6abc2145fc3cbe913fbc1d3ba63b7d57bdbcd2b30e3cd2b38ebb81eee23b69c2bd3cca19c1bcb54955bb80cd15bc13451fbc06d9bfbb', 'Array(Float32)')), +('<dbpedia:Mount_Everest>', reinterpret(x'da781fbdb612d03c051d3fbcf6b833bc8de0fcbc5c420e3c8acc2fbd3a899abc244eb0bc3eb31d3c9b69e1bc1c05053d84c705bc0051bbbceadc2abd4cd3273df6b8b33a1518f0bc7db780bcce2861bcf516b3bc89b6f9bc3f559e3c9cade23c455ac83ad7f01c3c04e4983cda6dc4bc06bfbfbae22a253cd3689a3cb251273c7d4e26bcb03b713d72ab433be801043d6a62183d7fc0f2bab25c023d2978b3b90dcf443d58af303d17c54b3c25e5553dac85a3bcb3f3a7bc8accafbc4c6a4dbc6eea9abc803f83bc13a6a33bd077bdbb94c2b6bcae604abc03a017bd45f16db932635f3d677c963df3c7563c83b14fbc69c0973dd6e541bc4416473bbce9ae3be510273b13a6a3bc5108063dcf6ce2bc1da785bb4e45f4bc97e1debb2734b23c104cec3cf7658fbbae60ca3cf940b63b7d4ea63cfdc838bc6b04993cb0c780bdeb15513d2f4f12bd49e24a3cb82806bdb67b2a3df04aafbae968f5bc5cd933bd8a63d5bb35ebe13c8814f9bb891f543d4fe7f4bced64ad3c215d533c09a5413d3ccd9b3ca09ebf3c8b05d63c057bbe3c3093933c5799fabb23a1d43cbf66563b67da15bdc6df353c95fbdc3c4cd3273d28e10d3c4c3c82bcd8921dbc2b5eb53cc6ea903cd496e53c2ff112bdb1ba813c9c0be23cc548903cd496e5bcb395a8bc2734b23b5108863c98ecb9bce22a25bd14df49bcb786053d7a5dc9bc9a3b163c56d4893d1cfaa93af6b8b33cb32173bc8f8d583ca501693acc5895bcfcbddd3c81e1033db27f723d437f21bc969ddd3c8acc2fbd91dc343d91dcb4bc1d3eabbb938990bb7e1e72bb7de5cb3c3b2b9b3d1f82acbc7e7cf1bb26fb0b3c35543c3cb54204bdc11e0dbc408ec4bb33a7e03c8a358abc051d3f3c2bc70fbd197d023d1cfa29bc8c10b1bc7f294d3cf89e35bdd962e9bb188a013cf33b8c3c28e10d3c437fa13b9c0be23c230aaf3c4cd3a7bcc54890bc9883dfbc932b113d6f2e1c391e49063df3d2b13c37a3983b74581f3dea4585bcdcbc20bdf33b0c3d1dca75ba8a350abd164680bc89b679bc6771bb3c4da373bc0dcfc4bc92e70fbd0dc469bc6f23c1b78a63d5bb6af93d3da035e5bcc723373db5ce4e3dcecae13cc3620ebd188a013c89c15439bd17fa3b71a0683c3e4a43bc45c3a23ac013b23bd124993cd0e0973cddf5c63d341b963b1f822c3c3ab765bcef11093cd538e63c41991fbbfd3193bb051264bdc548903d4979f0bb8d49d73b42dda0bcd0e0973d80cb4dbb9c0b62bdcce45f3da501e93b663895bc9c16bdbcb169813c3d06c23938dcbe3c899309bcf6c38e3c5846d6bbf57f0dbade0022bca9fd203c1bc103bdf75ab43bf9a9903c616c91bb5951b13a497970bcae55ef3bebacf6ba9bdd96bb76d5c6bb6103373cc3620e3ce6bd823cc1aad7bc5a5c8c3d4416473d759ca03c101e21bd23158a3cdc53c6bbf0787a3c9389103c4665a3bb341b96bb70d01c3d5afe0c3de9a3043d6f23c13c99f714bdd67ce73c835350bc1c0585bd6638153cf230313dba0e883c3a899abc5089f5bc208d873cc071b13c8f010e3b28e18d3d781948bdab6fed3c2da2b6bcddf5463b437f213971721d3d46fcc8bb4fe7743dde0022bc6af9bd3c44b847bc27cb573c81d628bc4bc84cbb7528eb3c5b2cd83bf2990b3d1e6cf6bcb8edf63cbb47ae3cb71d2bbddf44233b52aa063d8a350abd1f77d1bbb71d2bbdf183d5baab41a2bcde00a2bc23387a3c84eaf53bbd177a3d860b07bd2ff1123dd3689abc80cb4dbc3798bdbbeb15d1bb1867ccbb90c6fe3b4a1bf1babc75f93a961193bb9e5abe3aca72133c6771bb3cb903adbc845eabbbd077bdbcd2c619bc6b3264bce51b02bd6af93dbdb98ff73c23158abd3310bb3c6a6218bdd0773d3cf22556bdda781f3bb565f43ca38f1c3ac1aa57bc1d33503d41991f3d7c1500bc368d62bb37a3183d9d4fe33aee6f883b694ce23b2fe6373ace33bcba355f17bd7633c63cb71d2bbb3798bd3b5cd933bdb1ba01bdbfda0b3d4eae4ebd69b5bc3cbc5289bdfa4b11bcf3d231bdf369d7bc899389bd4025ea3cc008d7bc16234b3dc82e12bdf940363c7ca1cabc06b4e43b38d1e3bb58af30bd6708e1bcfbed11bdf807903beb8906bcc5a60fbc15814a3b32d7143ccf3e173d874f883c6c3dbf3cd6e541bda4c8423c534c07bdfae2b6bb1872a73c8db231bc1cfa293d33103b3d0046e0bcc1aa573c8de0fcbb1dca75bde6494d3bb542043cb9032dbcec2b073da4c8c23c1bc1833deb89863ceadc2a3d892a2f3bfb1b5d3d51945039d1b0633c034218bd6638953cec2b073db82886bcb8ed76bbdb1a203deae705bdeb20acbc2f7dddbb6a6218bda14b9bbca7aec4bcf6e6febc8597d13bb612d03a961193bdc1aa573d65c4df3cdcea6b3d120423bd97e15ebcc1aa57bd0803413a34b23b3d56d4893dcd913bbdb0a44bbd677c163d1d9caabc7819c83c9bdd163d45c3223ba7b91fbce824f43cbd8b2f3c74fa1fbd355f973ca32642bd32635f3c32d7143cd3689abc26fb8b3c67da153d283f8d3d14d4eebc3e3fe83b49e24abc4979f0bc7b68a4bce82f4fbc034298bb1872273c2338fa3b2a1ab43c197d82bdaec9a43a172ea63bfdc838bd9bd23b3dda6dc43cf0558aba7ac6a3bb11ee6cbd623c5dbca2ed9b3c5532093def11093ce824f4bb8f98b3bc7c15003d309393bc0470633d2abc343d4e2204bdfcbd5dbc776cecbc430b6cbca3bde7bc3379953b5a217dbd803f833cb565f43c2abc34bc397e3f3dfbed11bd46fcc8b87ddaf03c0a529dbc759ca03b4fc4843c8e54b23b326ebabc2052f8bb610e12bc821a2a3d8325853cb03b713d66cfbabc208d07bd845e2bbdffa35fba497970bcc1b5323d9459dc3c67713bbca3bd67bb286d58bb534c07bceb15513d7c43cbbc94c2b63aea4505bd8c10b13b6352933b54ee073d06281a3dee062ebce824f4bcca72133d52cd763b41c7eabb41991fbd8c798bbbb6e4043dbd17fa3c25e555bcde9747bc01f3bbbc2ab159bd52412cbcc54810bdffaeba3c3915e5bc9cade23cf36957bd2a250fbd9f93e4bc41c7eabb8c1b8cbc60ca90bc53e3acbc430b6c3d6b90e3bc7d4e26bd610e123d8dbd8c3c23150abb229679b9423b20bdf57f0d3dfdc8b83cd35d3f3cb612d03c3a20c0bc00e8603d5b2c58bd0dcf443c8de07cbd104c6c3c14dfc9bc834875bc24b78abbe65428bc52aa863d76ca6b3c7b0aa5bb610e123db00da63c82a6743c5fbf353bd1b0e33ac681b6bcb54284bc59ba0bbb537a52bc4c6acd3ccd91bbbcca09b93c28e10dbd89b6f93a5f56dbbc5136d13c4d75283d6fbae63b9a3b16bbe2b66f3cd8341ebdc14cd8bc0337bd3bfad75bbdd0e0173d4bc8ccbb2a1a343c94595cbc3da8c2bc4bc84cbc6fba663c78b0ed3cf3d2313cd4a1c0bc22d1883c9c7f97bdd71ee8bbbb47aebc3bc2c03c089ae6bc51088638104cec3a7777c7bcb96c073dc78c91bcdfd06dbcbbb008bc4025eabb0f13463dd3ffbfbc355f973cf7650f3d9cade239093c673bd8c0683c0bf49dbc5799fabcd2c619bcc07131bc2629573dc6765b3c68aa613cf3c756bdb565f4bc6b04993cedcd87bb16ba703cc07c0cbcb1ba01bd9611133c3973e4bb5f565b3df33b8c3c10c0213db3fe02bdb2e84cbcf3c7563c2a258fbb6cd4643b45c322bd9320363c368de2bbe782f3bc1f82ac3ce2b6efbbb4a003bd1cfaa9bc10b5463bbffd7bbc1c05053d9506383cf474b2bb279d8cbcf2bc7bbc892aaf3c60f85b3cb7b450bd9bd2bbbc9855943c116222bade9747bd18094d3cea0af6ba58afb03ba326423c2b5e353dc4325abc77e0213ded64adbc597f7cbd5d077fbca56ac33b5eb4dabcc071b13c104cec3cd35dbf3ce07dc93cf940363c8a350abc7486ea3ca1d7e53cb5d9293cb1ba813a803428bb413045bd0b8068ba86ad073d8c3e7cba23387a3c4d75a8ba31c15ebb4c6acdbc0ab09c3c903a34bc6d481a3ba5d31d3da4c8423c8eebd73bfe6ab93c44b8c7ba4aa780bcfae2b6bcca72133d075665bc74589fbac07c8c3c8a63d53cbe968a3c3449613c6b32e43c4840ca3b3fecc3ba57768a3c197d023d1c91cf3c83b14fbcb0c7803c856986bc50fdaa3b5c428e3cd71e683c1697003c62a5b7bcc3277f3cb6e404bd90c67ebc0be9c2bcf9a9903ca4311d3bfe015f3c120423bbd5dae6bb3135143c1c0585bcdb48eb3b86a2acbc37983dbc6638953d835350bda14b9b3b7c66003d68aae1bcffaebabc8f8dd83b677c963b21c6adbc41991f3d00513bbd1b58a93cf5add8bc3f83693c8dbd8cbcf33b8c3c8c1b8cbcfa795cbc7109c3bb40f79e3ce75f833d9ef1e3bcfdd313bc677c16bc3135143d2a1a343cf3c7563c6fbae6bc14dfc93c8348f5bc519f2b3c1914a83c66cf3abd54852dbcb32173bd7dda703c4dde02bd5af3313d8a350abd56d4093d6e81403df1ec2f3dee6f08bd78b0edbc90d1593c1ae4f3bce9d14fbd5b2cd8bce6bd023bb8ca063da140403c6e8c9bbcf2998b3c1162a2bdde00223c23150a3d441647bcce9c163dba0e083d6af9bd3ca745eabbc1aa57bca09e3f3baa9f213d2f4f123d10b5c6bcbd17fa3cf2998bbb71a0e8bc309393bbc548903b286d583ce80184bbdc5346bcbe968a3c0761c0bcfdc8383dc78c91bb0d389f3b8a350a3d129bc8ba32635fbb3eb39d3afbed113db1af263cad1cc9bc8ca7d63a56d409bd5b95b2bc8744adbaebacf6bca140c0bb62a537bdb1ba013d8c1b8c3b093ce7bc07ca9ab93d119d3cd2bbbe3b9bdd963c022ce23c3cfb663d7db700bdcfd5bcbce51b82bc828384ba2338fa3b8e827d3ce7f628bd820fcf3c4f5b2a3b28d6b2bc2d0b113d1a1f03bc3449e13ca85b203a051dbf3ba38f1cbd07ca1abc197d02bca5d39d3c8e5432bd2c975bbc9b743c3d950638bc1ae4f3bcd1b0633c1c914f3c9b74bc3a54b3f83c1b58293d18094d3b423b20bc9f9364bc215dd3bcf18355bc5ba08d3ce6bd02bda03565bcc2eed8ba28e10dbb3d06c23c5cd9333d120423bba3bd67bcba31f8bba035e53cf28e303db0c7803c663895bc494ba5bcf1ecaf3cd40a1bbd5ea9ff3c7caca53cbce92ebd38dcbebbbd22d53ab3fe82bb18094d3cd0773dbd090e1cbcdceaeb3bb9612cbd1c0585bcf33b0cbd89c154bcdf44233d1ae4f3bcda781fbc7067c2bc84eaf53cd54341bcb5d9a9bc84c705bda035653cf230b1bce3d7803cbaa52dbd2beaffbb35543cbcbd8b2f3c24dafa3c6d481abdd64e9cbcc8c5b7b837a398bcd0e0973c2734b23c1d33d0bcf6c30e3dd829c33bd8349eb93e3f683c4fe7f43be428013cb6a9f53c27c07cbd52aa063cb1ba01bb57768abb938910bc07f8653d7109433ca1e240bc22d108bc215d53bd759ca0bb6c3dbf3c25590b3b25f030bc60f8db39b8edf6bcfe01df3c0342183b00ba95bcadb36e3a1da785bba60c443d8e54b2bb81e103bc663895bbbd22553bf400fd3c1feb063d8b6e303a00513b3dc9d012bc5ba00d3ce65428b9d124993c8639d2ba74581f3ca035653a5c420ebce75f03bdc85cddbccc4dba3c3da8c2bc88f1883cace3223c80cbcdbce75f033c8bd78a3d67da15bc9855943bc4c9ffbcc09ffcbc845e2b3dfa4b91bc9c163d3cf8fc34bcb54284bc8178293d62a5b7bc4da3733d5d70d93c9925e0bbf1f78a3cb27f723ce3fa70bd8772783c35ebe13ad2c619bcf5a27dbc63e938bb9bdd163b397e3f3d81e1033c79bb48bc7486eabbace3a2bc856906bc53e32c3d5b2cd8bc3f83693c7067c23cc681363b22d188bb2734b23c208d07bbf516333c8b9cfbbc887dd33cfd31933bcc4d3a3d2edb5c3b2c6910bcb27f72b8c01332bdb4a003bda7456a3c6d489abbb3fe02bc56c9aebbb7b4d03c8d49d7bcc1aad73cca09b93b10c021bb998ebabbfb84b7bcc676dbbceb89063da5d31d3aa9896bbaaf6ba5bc64225fbbb437293deb8906bab03b713c781948bac4048f3c0faa6b3c7c38f0bb7d4e26ba8c10b13b77e021bb76ca6b3df5add8bb14766f3b372fe3bcf225d6bb8acc2f3d00e8e0bc6aeee23aa6a3693a5a8ad7bc283f8dbcb8ca863de3cca5bab07680bc0c969e3c1914a83b1448a4bb4c3c823cb437a93be3d7003c5e868fbc4fc4043cd6e5c13c26fb8bbc48a9a43c36f63cbc086c1bbd8acc2f3c4fe7f4bc1162a2bcb54204bde78273bc8597d13b4e17293cd0e0173d53e3ac3c4e4574bc54ee87b96d76653c49e2ca3c437f21bcbdf4093b48404a3cde97473c0803413b81d6a83c553209bc35ebe13cfc26b8b9aa36c73bb8bfabba6a62983a1cfa293b9bdd163d4a26cc3c17d0a6bc927e35bbf04a2fbc4e17a93caacdec3c5c65febc27cbd7bbd5dae63ce2c14a3de2b66f3c1feb063d6e81c0bb297833bd5b9532bc8bfafabc1157473b1e4986bcc967b83c2d0b113bcf3e97bccf3e173ddd5e21bd56f7f939cb1494bc19a0723ad8921dbda501e93cd8929dbcf80790bc0b8b43bcdfd0ed3c534c07bb34b23bbc0803c13b541c53bbca0939b8848c76bcf8fcb43cc4c9ffbbd1193ebd99f714bdfc26383c18db01bcb09970bb7e5901bda7ae443c7b0a253d408e44bcb42c4ebc506685bcbfda0bbd051dbfbc734dc4bc94595cbc10c021bca7456aba1697003c24ac2fbdf36957bca56a43bbb437293bb570cf3ca284413c290fd93c16978038705ce7bc852ef7bc0eda1fbc5bc3fdbbd8921d3de78d4e3bec2b07bd6a62183bdd5ea13cbd2255bd18674c3cb856d13cee6f88bbf0787a3cd0773dbcf47432bcdf44233b9320b6bce47901bcad27a4bc49eda53c0470e33bdd8cecbcc4040fbd3eb31d3c9e5a3e3c28d632bc2ff192bbcd913bbcc4040f3c3263df3c694c623ba1d7e5bb7c434b3b856906bdc14c58bb5ba08dbcbe968abc015c163c90a30e3c1ab6a8bb9bdd16bcb3fe823cd496e5bcee6f883ca8f2453ccc5895bc1da7053c4d0c4e3ccdef3a3c3c64c13c36f6bc3c6a62183ca6a369bc72141e3c4707a43c3263dfbc999995390470633ca8e76abc1c28f5bc8a58fa3c72abc33b23a1543c5cd9b33cdf39c8bc932b113cf11a7b3c1e49063d86ad07bdcc4dbabc24b78abb549008bccf6ce23a73b61e3db6e484bdfa4b913c52d8d13b5c37b33cb437a93cf9a9903c566b2f3c6ca619bcc11e0dbd1cfa29bb88f1083da1d7e5bb3310bb3b803f033d5cce58bc0586193c7bff49bd9a3b163da5d39d3c73b69eba7bffc93b6eea1abdc8c537bd610337bc821a2a3b01f3bbbc175c71bc3135143d10c0a13cee92f8bad64e1cbca326c23c470724bbcbb694bb4d0c4e3dd2bb3ebcd67ce7bcf0e154bd0761c0bb244e30bce479813b9ffc3ebc8178293c566b2f3d5f56dbbb38dcbebc597f7cbb5490083b2d39dc3cc1aa573caf02cb3a7952ee3c874f883bace322bd8f98b33b6d76e53c3fe168bdf5add8bb262957bb8283043b47356f3ca9fd20bc3205603a4a8f263c8f8d58bde9a3843c541cd33c961113bc25590bbabe388b3c4707a4bbc718dcbb0eda9f3a0c961e3df6c38ebc4cd3a73ce968f53cb4c373b94c5f723c9bdd163c2beaffbc5f565bbb852e77bbb96c07bdfe7514bda92bec3c2b535a3b7d4ea63b0d66eabc9a3b163dc3f9b33c54b3783dbe2d303c56d4093d101ea13b616c113d5f28103d1e4906bda5d31dbd90a30e3d244e30bbcf6ce2bc2052f8bab395a8bcb6e4843c677c96bbe53e72bb2296f93b490580bc15ea243dedcd873cbbb0083c4905803c302a39bca45fe83c17e8003deadc2a3b1c28753c3e4ac3bb4dde023c2a250f3d74fa1f3cd3ffbf3cfdd3133b663815bd4fe7743cde0022ba3ccd9b3c19a072bd212f88bc00513bbcdd5ea13cc548103d01f3bb3c7b0aa5bcb9032dbd5f28103d69b53c3cd18218bda38f9cbc1c05853cfad7db3a583bfbbb3135143c309313ba1ab6a8bc4af8003d7f294d3ce7f6a8bc938990bcee6f88bc302ab9bc52aa063c086c1bbc6e8c9b3bfa79dcbc2e44b7bb31ccb9bc6f2e9cbcb5d929bc19a072bb2a1a343c97e1de3cb3fe823c91735abcc548903c74fa1fba5e860fbd7f92a73cda0fc53cb71dab3b9db83d3da4319db88db2313d129bc8bc437f213d8325053a5a217dbb5d077fbcb71d2bbca7aec4bba501e9bc490580bcd2bb3e3ce258703cf6210e3ce0e623bc1ee02bbcc9d012bd3915653c15814abbbfda8b3c86a2acbc1c0505bc3798bdbcdc53463c8034a8bc1c0585bc7f92273d5a5c0c3bc257b33cd2c6193d355f173a7924a3bc677c963a6b32e4bcb3fe02b9526f773c086c9bbc104cec3ca850c53b5fbf353b64805ebbadb3eebcd67ce73c1d3eabbbac116e3c8b6eb03c25598b3c6aeee23c81e1833cfb84b7bc79bbc8bcfb1b5dbca501e93c72abc33b018ae13b14df49bc5485adbc1867ccbc69b53c3a7f9d023de2c14a3de6bd823d1b86f4bbae60ca3bf80790bc168c25bbd82943bcc4325abce782733d8f98b3bca1d7653b635293bcd3ffbf3b0e71c5bc86ad073d1ee0abbcaf2580bcc49b34bcb1dd713bc071b1bcb570cf3a53e3ac3c0b8b433a44ad6cbce3cca53b42dda0bc56d4093d368d623cd7f09c38e11fcabcefd679bb35bd96b835bd16bcc78c11bcb5d9a93c8e5f0d3c1332ee3b2ab1d9bb6af93d3b', 'Array(Float32)')), +('<dbpedia:Mount_Fuji>', reinterpret(x'e44f0b390d5c57392f316fbbb422b9bccf5e0dbca668efbc703e39bc929c19bc480bb4bbfba2983cb54af8bc862f373d58278d3cb35e1a3d15072cbbbda6543d568b2d3d003e6d3b728b20bd62be3fbb3e9cc0bc086079bb6ae0cbbc2225163ded4a5ebc640ba73c3e2509bb2ba931bc5ce91abd9cf9fb3a3816143d41c2ee3bc46651bc0923123d9d6d1c3c22740e3d502d403d8eb3d23ced229f3c81e4e03cfd17bf3cd2e64a3dc4b5493d6fdc293d615cb0bc538d3ebd0d5cd7bbe8d748bb6a6914ba8a68fc3c610d383c6bcba33c20d82e3d06741b3d769c26bd871a8fbcccaf163d31079f3dc5a0a13c95397fbc28ab423d841c20bd21ebc5bc86e03e3c7b5eb43c26add33a98fa863ddfa2a5bcae8afbbb2abe59bc887c1ebbc0a4433a83a8ff3c1d78b03c0e47afbce528d2bba291b93c069c5a3cad286c3cfaf110bbc055cb3be2511c3bc877d7bc9abe25bd252305bd89de2d3d4ee058bdad002dbda42d19bd52c99f3c7b5e343ca47c913d048943bdf40adb3c2b20e93cdca4b6bc8c3eacbb2abe593b5764743cd8e2283d7775ed3c934d21bd58270d3d1fed563def0efdbcbce2b53c08c102bd3c279abc8b2b153c2798abbc198fe9bbc07c84bceb37c7ba3978233c75890f3cc9285f3d6ea2d93bd3201b3c21ebc53bbee024bdc07c04bd929c193a053acb3b224dd5bbef200e3c40e9273caab3453ca1cd9ab91d78b03dd482aabc1e8b47bde8af89bc99ab0e3d70efc0bc29340bbdfd8e763c08c1823c9cf97b3d742700bd3518253c252385bd141cd4bbc877573a13430d3c527a273d1b8d583c77feb53b0f5a463de7ecf0ba774d2ebdb060abbb5800d4bc4285873c069c5a3c41fc3ebcfa40093ddb911fbdb0d7e23c1da06fbccf86cc3bd55b71bd425e4ebdab64cd3c29348bbbe775b9bacc885dbb9cf9fb3c77afbd3b1af1f83c0ebee63c419aaf3c4aa713bdedd326bdbdf54c3b49f68b3c75b1cebb044f733d4fcb30bcf4e29bbb6247883c78d7fc3c1c1621bd447165bc43989ebbd7a858bda72b083dc60231bd9f31bb3cd595c13cab15d5bc5ead39bd0ebe66bcdbb95e3df8f3a1bcab3c0e3d75898fbb0aad603dc48d0a3d6d188b3cef200e3c14a51cbd3d3a313ddb42a7bbfc66b73cc877573c98e875bc0461043d75890fbc496d43bc44faad3d43c05d3bccd7d5ba010186bcdbb95ebc94e9803d1c3ee0bb7f97f93cbb80a63c21c3063d912879bd0be7303dd89330bcc4b5c93cf657c23ce43dfabc0de59fbc59899cbd3e13f8bb656d36bd841c203c2c82f8bb739eb73cf29534bd3342f53c0aade03c4038203c9460b8bc6c2db3bae7c4313c23fedcbc9b6f2d3ba640b0bc7e356abc3deb383dc7ed08baa640303d05128cbc59891cbd8280403bb54a783c4b09233d6482debcb422b93dfd8ef63c17a30bbc8c3eacbcaec44bbd00162e3d86e0bebc5f0f493d61d367bd14a51c3dfe2ad63b5e3602bc6f53e13cb54af83c9fba033cf3a8cb3c092312bda1cd1abc19c9b93c7178093d2df6983c71a048bd532bafba2bf8293d1bb4913ca42d993d07fe693d61d367bc1a52823d7400c73bfe794e3cbda6543cac9e9d3c34ded4bbc4b5c93bd9f5bfbc301c473d670916bdc43e92bc222516bdad28ecbb1bdc50bd1da0efbbe49e03bd13430dbd5e5ec13c1e63083d5fe7893908c102bddcf32e3d161a43bcbf912cbcf15be43c6fdca9bbf15be4bc7afc243c6893e4bb995c96bb54c70e3d937560bc6c2d33bbe5d959bc88a45dbdcd11a6bce7ec70bcddde863ba640b03cc6b3383c96ad1f3b078732bd857eafbbe888503dc2f1aa3d733c283c16b8b3bc3d3a31bda093cabcfd17bf3b94d7efbba47c11bddbb9de3b0ebee6bcd0e8dbbced4adebc50b6083c75b14e3c982246bdd2ac7a3d7c2253bc4eb819bdaa02be3cbf91ac3c1258353bb80b803c18b6223d38efda3cf6084aba1bb4913caffe1b3a46beccbc18051bbc99ab8eba56daa5bc921351bab7975fbd43981ebc0101863c83ba10bd48bc3b3cdbb9de3b77afbd3b26ad53bd42adc63b17a38bbca9a02e39bc4445bd6d188bbc799a15bded229f3c2f316fbc14f414bd5118183d18b6a23aa6686fbc81bca1bde9c2203d301cc7bc30a58f3b04d8bbba58270d3d1bdc503d03c5243cece84e3d252385bc4ee0583c455c3dbd2871f23a19c9393d9000ba3cf8f3213ded4adeba04d83bbde44f8b3c47f89cbc198fe93c1bb411bd10bcd5bb6b1a9cbcb9954e3b6d188b3d2a961a3b8fed223c8669073cfc2c673d1b65193c5f98113d18b622bc45d374bc9b6f2d3c4471653a295cca3ce7c4b1bd4647953c60fa20bd53536eba33cbbdbcbdf5ccbb502d40bbf3800cbb527a27bdd14aebbc05c313bd7c498cbd13317c3d16693b3ca9a02e3db422b9bcb13972bc9dbc14bdb95bfebc00783d3d2523053debfdf6bc35671dbd0427b43cf97d703be8d7c83b3debb83b55291e3c32692ebcb111b33c970f2f3c4f1a29bdd39752bc1fedd6bccbec7dbc774d2e3b65e4ed3cd533b23cdc1bee3c14a59c3c90003a3a06741bbab6be18baa01c933c5578963c1da06fbc48bcbb3caab3453d8657f63c593a24bc9a0d1ebdeeac6dbc774daebc9539ff3aebfdf63bf9b7403c136bccbc3deb38bbd348da3bf33194bc2027273d72da98b9ead5373c00783dbcfba218bdbe2f9d3cfc6637bdd36f933dd893303da917e6bc5fe709bd1a5282bbccd7553dd0e8dbbcb13972bc81e460ba8493573d4b09a33cf6ce79bc4bba2a3dbba8e5bc624788bce860113d3db1683a76eb1e3c9c33ccbcb0d7e2bc8a7a8d3b9189023d64825ebaf5442b3c898fb53cfe2a563deeacedbcaf4d143c0427b4bd0b9838bcee842e3dacc65c3c0c71ff3afc04a83bf97df0bcb80b00bd048943bcaec44b3d6ae0cb3b36f16b3c0ff8b63c157e63bca044d23b9fba833cbff3bbbb35679dbbd1222cbcb9e4463c6d188bbcaeeb84bcb0d762bd4f42e83b733ca83c1bb4113de8d7c83aa45558bd01a07cbdd3209bbd2ce301bca7044fbc9822c63a4f7cb8bca6686fbcc65129bd573c35bde43dfabccd1126bdab64cdbca06b8bbc2a969abb8defb33d3a3c42bcc304c2bc14a51c3d046184b9a7dc0f3dba4656bbb8d12fbdddb7cdbce364333c3e7481bbcf864c3cd64649bb9213d13c8b04dcbc6893e43c96ad9fbcc0a4433c2c9409bcf97df03c70b570bc4ccd413c5a9c333cc02d0cbcec71173c6ea2d9bbb95bfebb568bad3b3b7692bc16f2033c03c5a43c5167903b5fc0d03c1bdc503de364333dd071a4bb4471653c249a3cbd61aba8bc7b0fbcbbf76a593b7ebe323cd731213a00783d3c46beccbc8742cebcf2463c3babed95bc744f3fbdef6f063d5dc261bc34058ebbb9bc07bda6686fbd49e4fabc53536e3d9375e03c4882ebbcc7dbf7bc9375e039d73121bd6c2d33bc95397fbc41fc3e3d72da183c7427003dbd7e153d94afb039a47c113d918902bd6a69943cf8a429baf56c6abbd2acfa3cd184bbbc6c7cab3cc2f1aa3caa79753c8669873c5578963b177c523a70c701bdb54af83b70c781bc8669073cf98f01bc40605f3b485a2cbdea9b67bc044ff3bbef97c53b9c33ccbb4d7ec93cf493a3bc0fa9be3bb720283c50a4f7bc026395bbf7b9513c1a52023dbacf1e3d27982bbb19c9b93ca6686f3c2b2069bd54efcd3c3e2509bc733ca8bcd6f7d03c6f2ba23b35679dbc8c8da43c295cca3c337cc53cd2e6cabb12a72d3d21ebc5bbcead053c2225163c0625233c65e46d3d139285bc6d06fabc2d45113da88d973c5fc050bcf906b93c703eb9bcfe518f3b419a2f3bacc65cbc703eb9bc6cdeba3c05128c3b34ded4bcbfb9ebbc5fc0d03c9b976cbc538d3e3c2438ad3be726c13c335406bc4d07123c1fed56bca093ca3a1e02ff3c4e6921bdb35e1a3d3a8b3a3b0d5c57bca3b9f83cf133a53c2f316fbc301cc7bc19c9b93cd3209bbca06b0bbce4c6423c9f31bbbc96ad1fbb7a24643cbba8e53cd3485abc70c7013c0489c33b697ebc3c59899cbcfa68c8bc5b878b3b12cfec3a3269ae3c053acbbc0c717fbcee842e3cbb80a63c7ff802bc9213513c3debb83c65bcaebc9f80333bcdc2adbc0ff8363c5c3813bc78d77cbb1fc517bc91b1413da34241bb9128f9bb63d1d63ce2511cbd94d76fbc5c11dabcf0821dba198f693c3e7401bd295ccabac7db773cc3dc02bd962457bd4449a63ca31a823c670996bc0f32073d7fd1c9bc3ffe4f3dc02d0cbc450d453cbc1c06bc37dc433d94e900bdc46651bc1d29383d77af3dbc0f5ac63c774dae3c295c4abd1e63083bd80a683c6d067a3b4f42e8ba47205c3ca506603dfe2ad63cdcf32ebdeb3747b9bdcd8dbc794b1d3bf0aa5cb92387a53cfa68c8bc7afca43cb0af23bd43c05d3c913a0a3d4c6b32bd0b0ff03b821e31bd70b5703ba044d2bc05128c3db5ab01bcddde063d13430d3a9624573b265edb3c830909bd9b97ecbbf0829d3cb9954e3b978666bcf29534bc626f473c7bc0c33cb833bf3c8309093dfa4089bc9189023ced221f3c3f87183c887c1e3db8333f3d527aa7bb42368f3a70efc0bc4b31623a73edaf3cb2244a3a18b6a23bef6f86ba21c306bb17cb4abcb6e6d7bc20d82e3c2ba9b1bb9a0d1ebd934da13ad9cd003cc304c23bb6be183d5aebab3c783886bc7a2464bc5a9cb33b5c3813bb3e9cc0bc2523853c5e2471bbd9f5bfbb7b9884bc47a9a43c538d3ebb7b86733bdcf3ae3ce939d83c865776bcf97df03c1da06f3b9e46e3bcae8afb3a532b2f390aad603bdae097bc8a68fc3c6646fdb9f81b61bb70b5703cd3209bbb89403dbc07fe693c9fa872bc2f0930bc290dd23cab644d3c16f203bdbfb96b3cb9bc073caf75533c12cf6cbc821e313b21c386bdec7197bc94d7efbba0934a3c4ca502bde86091bccead853c2c8278bc4f1aa9bb7d5c23380625a3bc00c7b53ca6a2bf3c044ff33c03c524bc2b2069bce3153bbc81bca1bcb584483d414bb73bead5b7bba3cb093c5578163c9b2035bdf7b9d13cb6be183da9a0ae3cf0aadcbc157e633c58278dbbe315bbbb9dbc143c41fc3ebda42d993cb88237bb466fd4bcbc44c53c7bc0433c12cfecbbef0efdbb887c9ebb450dc5bcd39752ba7fa90abd52c99fbc03ede33b4e6921bd2b20e9bcd893303cb4c0a9bccf0f153d3db1e8bcfa4089bcead537bd39a0e23b1e02ffbc2c0bc1bb2871f2bb2027a73c32b8a63bd07124bb5b878bbc4f42683b4d0712bbd9f5bfbbb386d93c6ea259bd0c71ffbc92eb913cdb42a7bb2523853c1d29b83c929c19bc5d4baa3dec7197bce5b11abd7b9884bc83e24fbb4945043d7cfa933ccb26cebcef0e7dbcf98f01bb6296003ca1f5d9bc0b21813d14f494bc44fa2dbc2685943c4d2f51bcb8f9ee3ccf37d43c7c714b3caf4d94bb9c5a853caa8b06bbad002dbc9d95dbbc610db8bca291b9bc494504bcbc44453cc43e92bcc106d3bb85cd27bcca13b73cf2463cbbd61e0a3ca7cafebb774dae3c430f56bc2189b63c7b86733ceb0f083a8f3c9b3c584fccbb7a24e4bcbc1c063cf8f321bcfd8ef6bcdc1bee3cc7ed083d010106bd198fe93c8b04dcbc1e3ccfbc959a883c10bcd5bc830989bc218936bd6d188bbc882d26bcd1222c3c2f6b3fbc805a92bc335406bddfcae43c9d955bbd27e723bcb833bf3c5776853c90003aba645a1f3d39a062bdf6e00a3ca9efa63c8aa24cbc8f3c1b3d0cd288bce004b53bd5e4b93c8b04dcbb5e24f13acf5e8dbc4c1c3abd7e0d2bbcda08d73c2b2069bdc877d73b270f633b2df618bdb5ab813c44fa2d3c5118983c29340bbd2c0b41bdf56c6a3c753a17bd7a24e43c63f88fbc6d067abc6d180bbda83e1fbccead853c34de543b337cc5bcb9e446bd37537bbc7e6f3a3c66a786bcd96cf7baf8a4a9bc739e373d3b9ed13bf6e08abcdd0646ba61d3673c3b9ed1bcffb31ebbeb5e803c7f97793c841c20bcb584c83cd55b713bfd8ef6bb00783d3c428587bc7f97793b0a85213c4285873bb13972bc604919bb5e24713c29340b3cc7ed083cd9f53f3cf15b643c204fe6bb82cf38bc50b608bc1918b23bd97e883c4d2f513b8bdc9c3c57ed3c3ca9efa63cb14b03bc9411c03c7427803c9f8033bdcdc2ad3c383e53bce364b33c96ad1f3c75890f3dd4f9e13c4d0712bd6e7a9abcd1843b3cab64cdbc403820bd7364e7bccc609ebc37537b3cf133253aef6f063d028bd43c9fba833b3bedc9bb069cda3cf2cf843ccc609ebcee3536bb3a8bba3be202a4bc1b6599ba94d76f3c10e30e3a32e0653c07383abccb7546bdbce235bd4060df3c6071d8bc5ce99a3cbee0a43c32e0e5bc3269aebcac9e9dbc15072c3c7f97f9ba9411c0bc74d887bc3093fe3b0c83903c8eda0b3c22748e3cac4fa5bcf5442bbdcb7546bc2f09b0bc425ece3c4e6921ba64825ebcc651a93c3753fbbbb9e4463cf2cf84bcba46d63c624708bd16e0f2bc3e13f8bc7e0d2b3c912879bc728ba0bc36f1ebbc50de473c1fedd6bcad00adbcd235c3bcf5a63a3bde6855bc136bccbb6f53e13a45abb5bc25fc4bbd3e7401bb4285073d36f1ebbcbb31aebc7e6f3abd769ca6bb5f98913c7dab1b3d38c71bbc82cfb8bb12a72dbc5ead39bc224dd5bc6e7a9abbb47131bc36f1ebbc3cd821bc2ba931bdabed95ba6d6783bc5c115a3b42360f3cc3ca71bc044f733a4038a0bc9c82c4bb8cb5e3bc0376acbb5efcb13b862fb73c78d77c3b7c71cbbc742780bbb5d3c03cb9954ebd88a4ddba40e9a73bb54a783bc928df3b3978233daffe9bbc4a589b3a0ff8b6bb65e4edbcaa5136bbcbfe0e3c2ea7203dc07c04bd79c2543c5602e53bd7a858bc8da0bbbc09d4193ce662223d2c0bc13c60faa03bda2f903ca01c933bfba2983b904f32bba47c113d17cbcabc16f283bc8d2984bcae9c8c3b30937ebca1f5d9bc0ebee6bb2df6983bf0829d3be1a014bd6d8fc23bc7ed083c9411403c351825bdf5442b3cf2463c3de860913cfadfffbc9cf9fb3c464715bc898fb5bb32692e3c3fd6103d6f53613ad3209b3b88a4dd3b2abed93b75890f3dce9bf43bd1d3b3bc8258013d1af1f83b799a95bb13430dbdc4b549bd3a02f2bcb5ab813c3a8bba3cb4e8e8bc2ce301bd12cf6cbc45d3f43bf331143dcf5e0dbb60fa203c39292b3cd122acbc290dd2bc9f80b3bc1392853cb5d3403d317e563cc1b75a3c3e2509bdbee0243db30fa2bc3518253de7fe81bc77afbdbb270f63b9715150bd0aade0bc30a58f3c466f54ba2849b3bca42d993bb1c2ba3cc877d73cc7ed08bdf15be43ae1c8d33bece8ce3b63f88fbc821e313d49e4fa3c816da9bb76135ebdebfdf63bf906393cf5a6babc3e1378bcfba2983cc7ed083a0e96a73cd55b71bc09d4993c4d560abdc9285f3c6893e43c82cfb8bc92eb113d610db83c708db13c12a72d3bda574fbca257e9bb56daa5bc959a88bc3bedc93c7b9804bd656d363c92eb113c38efdabcc517d9bcb55c09bb4945043dfc2ce73c816da9bc44faadbbd4f961ba08c102bd3269ae3c1a030a3ce066c4bca704cfbb7427803c6d06fab8b9bc873c7e6f3abb8d1773bcbc0a75bc2cbc48bc3bedc9bc95c2473bcdc22d3cc02d8c3cf62f03bded221fbcf65742bc265e5b3c816da93c59891c3d5b878bbc11f6a53b290d523b3342753c975ea73c568badbcb995ce3b43981e3b069cdabc335406bdc9b1273c703eb93c3fafd7bb6f2ba2bce5d959bbfb5320bcaa8b863c6731d53c7ff882bbb4e868bc8f3c9bbcdcf32e3c0b21813cd1d3b3ba6ec9123aef97c53caeeb843c0de51f3c8c3eac3c9e1ea4b9ea9be7bcde19ddbcd80a683c74d887bc728b203cef0efdbceb37c7ba7b8673bc5f0f493c0738ba3cbc0a753c0cd2083d18051bbc87424e3d3269aebc24e934bc54efcd3cb47131bbab644d3c048943bc96fc173ba47c11bdcf864c3c44fa2d3c871a0f3d69cdb43a2d6dd0bc865776bccd1126bda1f5d9bbce9bf43b5d9a223b1af1f8bc74d887bc7f97f9bb5c3813bcc3dc823cc7ed883b13bac43b5c60d23bcc609e3c13317cbc4c93f13a0b49403b13ba44bbfa68483d66a706bc198f693c25fccbbb7c71cbbbe7ecf03c1d7830ba99ab0e3dfa40093d17cb4a3c6907853c2883033cf2cf843cd4f961bd2d4511bdd646c9bb21c3063ca67a803ce9c2203c9189823cd3485a3cfb53203d55299e3b5c3893bc26369cbcad286c3cf76a59bb6f53e13c975e27bc5a136bbd8331483df8f3a1bcffdbdd3c866987bbcb75463d3bc50abde03e053cd5e439bd4eb8193bb9e446bcfb5320badef19d3c4449a6bb2d1ed83cbba8653ca6f1b73b419aafbc378dcbbc20d82ebcdc553eba816d293c48bc3b3a07fe693cba1e97bc1a030abd342dcd3c4f7cb83c90c6e9bba9a02eba46beccbc5a136b3c8cb5e3bb419aafbb1e027f3cc3dc823d5c60523df295343ce68ae1bb3d3a313cc84f18bd7838063def6f063d89de2d3c800b1abbd14a6b3cd96cf7382189363b03762cbd5fe7093d39292bbd2f31efbca17e223c198f693ca506e0bc42368fbca640303a954b903c49e47a3c45d3f4bc7b9884bc0b0ff03ce8d7483bbdf54c3cae13443cbee0a4bcbb80a63c14f4143c1669bbbbc3ca71bb30a50f3ce02c743cfc04a8bcd6f750bc', 'Array(Float32)')), +('<dbpedia:Andes>', reinterpret(x'891d04bb18cd433d4d925abcf5d13ebd2018613ad1e003bca84979bc82f15cbc20e556bd15d640bac9e7e6bc1308fe3c6e63523cd85461bd1182263aca81d23c85bf9fba55812d3c40f3223c78af3c3cfb8c3abcb6ca873c2f85db3abbeb973c5d70003c806b053dc035e83caca7ddbce86511bd708d5fbd6d96dc3b6dc966bc67b2203d8a133a3de106e03a1766e2bcb6ca073d19672fbd6417e8bc1245523d664b3f3c7097293d4934763b12e9873d4fc6b1bbcab4dcbcc4c65638fb8cba3ce0763e3ce4072dbd8161bbbcdc185abcf2a7b1bcaf0f0cbdad1809bd6819823c9168213d1d54683d30f686bb6b9f59bae597ce37be0bdb3c1c2b28bd58129cbaedaf613c6a7699bcfb8cba3c73b76c3c7f2de43b670eebbc74f58d3b31afe83bdd7f3b3dd7ed7fbcabe431bd5b3c293df895373d005200bcdf0f5d3b4cf8eebb5cff543d593b5cbc2443bbbc3bd212bd71fe8a3daea82a3d2476c5bcf5d13ebdc4934cbd2d5b4ebdcbe8333c1c5e323d1733d8bc1e9289ba974cddbbcd1c8b3ca0d51b3c3f8cc13c1fb1ffbb00e1543c72ea763cf07d243d124552bcbd482fba9a1a20bdeeed82bc23b3193d428344bbdf8088bcb8279f3bbbeb97bc8f01403c98bd88ba9ed4ce3cad37ffbca529363b4fbce73cbbeb97bce33a37bb8e719ebabbb88dbcf605163d7ab009bd3c9f88bcfcf39b3c767be53cf117103d2dff033d9488e4bbf8c8413ba23233b926aa1cbd9a43e03c84f2a9bb974cddbb9bb40bbd3d58eabc44add13d5fc3cdbcb1d2373d1e88bfbb6942c2bcc17309bd25dd263c70649fbced5317bd629190bc90f7753af605163a3316cabc0d1af83c6120e5bc357361bc8b7a1bbd708d5fbb9c442d3d6e0788bd51bd343c84583ebcd30a11bcedb92bbd0b61963c1733d8bb8481febc0eb4e33b1308fe3b9f64703c4d691abd1ace903c3c9f083ddd7fbb3cd03c4ebc9baac1bcfdc0113d29a11f3d344a21bd7ad9c93c2dff033c23e6233c0b61163d5ca38a3c431366bda45c403ddcbc8f3c37742e3ca954103b3e25e03c1766623d82f1dc3a64eea7bc0afa34bdbbb80dbdabda673b42797a3d460ae93cde42e73ce6feaf3cd65dde39abb127bd7fd1193c2bfe36bdd0138ebc56e80e3d20e5d63ceab8de3bd6ce09bc36a7b8bc94555a3dae04f5bbf638203d02af973c888398bcc32c6bbc9a1a203b5fcd173de36d41bca6c3213d3c9f08bb3249543d1bc4463bbf72bcbbfdb647bd74f58dbcd5c3f2bccedfb6bc30ecbc3cc29c49bc32539ebbdb22a4bc80c7cf3b6ad2e3bbd2a3afbb6dfcf03cf3411d3d36a738bd35e48c3da6ec613c215602bd838bc83bdfb3923b6a05ee3cca2588bc6674ff3ca1cbd1bc1ebb49bd6c2f7bbd481500bd858c95bd6e3a123da8166fbceaebe83c0bf0eabb5257a03d8094453cee49cd3beab85ebde53b043df43753bc96e57b3bed53973c1f22abbbca81d2bc040c2f3cbf9b7c3da5f62b3d0cfb013ca39914bd79161e3d040caf3ca95410bda0d51bbd34b0b53b1379293c49ce61bd43ea253d411ce33c12dfbd3ce85bc7bc6086793d0f4e4fbdf7fbcbbc59ac87bdeb290abd76aeefbb037c0d3d2f29913ca72a033d215602bd2a08013d7451583c0b9420bce1dd1f3c26d3dc3caa7dd038f6946abc54dd773c4c9c243cb005423d064086bc75b8393c35e40cbdfb82703bf5d1bebb8da4a8bc51bdb43c21b24cbc67b220bda65d8dbcb5fd913b4e2c46bc6b39453c121248bdc78acf3ccc11f4ba111c923c608679bc9522503d380450bc3cbefe3cb19f2dbcbcaec33c54e7c1bc7d03d7bcbd716f3c0b6196bcc82e05bd4b02b9bbc1025ebd628746bd066946bdc06872bd888318bd111c12bc455107bd664b3f3d5e332cbcf4db083b98e648bd2018e1bcf437d33a9ca0773ccb4ec8bce53b843d23800f3d6e3a123d1699ec3c5257203ca5f62b3c64ee273da9b0da3cdcef193a6942423c664b3fbd089d1d3d1fb17fbdc3032b3df4db08bddcbc0fbdf895b73c3517173c1ebbc9bb50895d3c2fb865bcefb0aebc0c2e8cbce83207bd20bc16bc9d07d93cffb794b8b8279fbcd7edff3c1509cb3b276dc83c2a64cb3c586e66bcb5595cbc1b9b863823dc593c072cf2ba7cda96bc8c4791bc23b3993df1e4053d11ab663cff1d293d7cda163c89508e3ca5f62b3d8c7051bca7864dbd3b051dbc9ed44ebc05d9a43d64eea73c4279fabbbcaec3bcddb2c5bcf117103dbf7c063cf9fc18bd3a38a7bbf605963b52b3ea3c28d4a93c2f29913d92cf023defe3383dcf7922bc29a19f3c74f58dbce2770bbd767be5b97ae393bdabe4313c1f4b6b3db23919bc10e83abc4fc631bce8c15b3a26d35c3d256cfb3b5123493c86b5d53ab75a293c77b986bd74ebc33c5b65693dcab4dc3c34b0b5bc1e92093c040cafbcc1025ebc8ecde83b7585afbc0de76dbca65d8dbd3f63813bca25083db3c93a3dd333d13c0ff204bd5878b03c6c2ffbbabd15253c3cbe7ebb0f4e4fbde363f7bc00ae4a3b4346f0bc069cd0bc44ad513d0ac72a3df925593d74f50d3cc9f1303cd601143cd2a32f3d62eddaba7f04a4ba684c8c3c086a133d1379a9bc8c14073d8a3c7abcdf0f5d3d9ea1443c23dc59bc9ed4cebc3ec915bc01152cbc4934f6b9d03c4ebc4ef9bbbd41e9d83c9f3bb03b39a885bd13acb33c0a2dbfbc7b40abbc732898bc0c2e0c3d7f2d643d5577e33b98bd08bdb6ca07bccab45cbc42503abd5e29e23c1d21debc2539f13c5ad5473d7d9d423c3c95be3dc6faadbb561b993ba16f073d1245d23c9e78843d5e0022bdf04a1abc67b220bdff46e93c78d8fc3c7da70cbbe4a1183dd5341e3c5ad5c73c1deed3bca6ec61bd9baa41bd7e6ab8bc4d36103d812e313b0c57ccbc6c06bb3c075f7cbc3c95beb9d33d9b3c41b64ebcc52db8bc8626813be7981b3d67e5aabc089d1d3dccdee93cf2dabbbce66444bcb1c8edbbdb7eee3c4ccf2ebc6e63d2bc0640063de8c15bbcbbeb173d6fca33bd68a8d6bce56e0ebd3804d0bb1b9b063cdfb312bcda9282bcb8271fbc9a1056bd35e40cbb32531ebdbcae43bcd8baf5bc532496bc36da42bc4a684dbded86a13c00858a3a1f22abbbb16c233a13d573bcbb47623cb8c10a3c38375abb8c3d47bb64e45dbca753c3bcbd15253c809e8fbcd567283d8b7a1bbd922b4d3c871c37bc6e0708bde6313abdb9b7c0bc7f04a43cee200d3caedb34bc23dcd93b0dbe2d3d9488643c23b3193c075f7c3caf6bd6bc5fcd17bdc1cfd33bf2d0f13a51bd34bb4fbce7bc1f7ef53ca3c2d4bc39d1c53ce88ed1bc625e863c657e493c90c4eb3b8c47113d31afe83cc06872bb1182a6bc0f4ecfbbe4a198bcb2068fbcd85e2bbd888318bc7f60eebcc1d99d3cd9eecc3bf9fc18bd1ebbc9bca9e3643caca75d3d2d28443bdcbc0f3cd1fff9bcc1d99dbd56e88ebccb447ebc2fb8e53cc1d91d3dc656f8bc90c46b3d55812dbb3220143c318628bb6354bc3b9191e1baed86213c244d05bd6b39c5bc46ae1e3d5908523c94555a3d0052803c90f7f53c6adcadbbf1e485bd020be23c41b64e3c2807b43c1dc593bce4a1983a3741a4bcaedbb4bc086a933b5f9a8dbc6da026bd141395bc3d8b74bddcbc0f3cf11710bcd6ce893a9f6e3a3c670e6b3d0d1af83c6c063b3dee200dbde2d3d5bc6641753c8425343c851b6a3c77b906bdce1241bdc4c6d63c4283c4bb2807b43a98e6c83ce407adbb7ae3933bf79f01bd02e221bce33a37bc360dcd3cdb552e3cbf9b7cbc2e8fa5bc66747fbd7dd0ccbcd04698bb7ba63f3bdabbc2bc2bcbacbcf11790bb0e81d93c6287c63b9e78043d3a94713cd400473c8c3dc73cd1ff79bbe5cad8bc9ed44e3c6b430fbc85bf1fbbe9283dbbf2da3b3d6da0a63bf862adbc12dfbd3a4feff1bbaca7dd3b47a4d4bb92cf823c78d87cbcb98e00bd2eeb6fbc244d053db75a293c03a5cdbce2770b3c10e83abc809e8fbbe407ad3ce1102aba5e33ac3c170a983b170a183dc6faadbcc49d163dca5812bb6674ff3b3a94713caf4296bbc33635bd22192ebd0993d33ce85bc73beeed023d2242eebcf7d28b3b23b399bce9283d3b4b0c03bbde1927bd7f2de43a79169ebc97231d3c5581adbb593b5cbb5e33ac3b86590b3d54e7c1bbd791b5bb0115ac3cc269bf3ceb85d4bb6dfcf0bce10660bb7131953bbc8503bc3a38a73c6b9f593c7b73b5ba76485bbca82039bb51bdb4bb2eebef3a00858a3d2f5c9bbc11785cbb5324163d2744083dfde9d1bc6a7699bc8c4791bc5f9a0dbdb75a293d1f222bbd3c9f08bc4feff1bcae04f53a34b0b5bcf59eb43c1e9289bcfb82f0bc344aa13bd567283c809e0f3d1800ce3ca69097b8f638a03b411ce33bc560423d7c36e13c71fe0abdea8f1e3aac7e1d3c502d13bc92cf023da9b0dabc0d1af83c9522d0bca8ba243bf8c841bd5908d23c5d66b6bbc336353dc824bbbb809e0fbdbd71efbb5257a03c6b438f3c9c7737bdc85745bd5f67033ba5f62bbc0e8ba3bc301f473cd109c4bcfe50333db98e80bdc303abbc9d0759bc66747fbdf2da3bbc15e00a3d114f1c3d8224e7bcffea1e3d1ace90ba4fbc673b9522503c891d04bd9bddcbbcbd48af3c9f6e3abc64eea73c477b94bb0b6116bdba51acbcbb47e2bc0d1af83c199ab9bcba51ac3cbf9b7c3cc1cfd3bc67b220bcfc26a6bdcedf363c07363cbc1dc5933cb5309c3c16702c3c16702c3d6b9f59bc7cda16bb069cd03c36dac23b74eb433ca3cc9eb98da4a8bbc1cfd33c82fb263be2aa15bb41c0183c761f9bbb909babbc3cc848bc3741243a15ad803bd5c372bcf95863bcda9282bc064006bd684c8cbc7e9378bc8946443cabb127bb2744883cf173da3b6321b2bcd9f896bc8395123c5e33ac3c0115acbc8ecde83c3ef2d53a15e00a3d137929bd1f7ef53ab26259bc9c7737b997f0123af8c8413c7e9378bc0115ac3b3220943c7e93783c1f55353d74c2833cb5fd91bbf1735abcedb9ab3bf07da43cf04a1a3cbb14d83b5089ddbb1e883fbde36d413af3411dba2c320e3c68754cbc92f8423df0d96ebacee9803bc656783c5cd614bd0dbeadbbf958e3bc629110bdfaf2cebcf59e343bd4cd3cbd41c098bcaed1eab923800fbcc068f23914a269bd3774ae3cc8243b3cb6ca87bd15e00abc67b2a0bc61f7a43b4f609d3c73b76cbcf374a7bcc068f2bc8bada5bc7d7482bc1a5de5bca45cc0bb3a61e73b6a056ebc81613b3b9a1056bdaf9e603c65b1d3bb657ec9bc21890cbb256c7bbc379d6ebc477b14ba57de44bb8946443cf95863bd276dc83aeeed823da4668aba49d8ab3c1a019b3b5f67833c10b5b0b94a72173a6c2f7bba5b3ca93c0e81d93a36a7b8bc59df11bcbee21a3dc53702bc86590bbcde75713c4e2c46bdd0130ebca031663da9871a3d4871cabcc46a8c3ca8ed2ebd01152c3c0ff2843c92f8423c644af2bb664b3fbcd30a113cda92823c34b035bceeed02bd6e07883d90c4eb3c22192e3cda88383c8f34ca3dae04f5bc39a805bc0b94a03c7097293a02e2a1ba41b6ce39fdb647bbe7f4e53b922bcdbbe83287bc1509cb3c93c538bb67e5aa3c98e6483ce8c15b3cf79f01bdd2a3af3c3d6234bdf1e4053c92f8c2bc2018e1bc888318bb46ae1ebd561b193b493476bcf40e133c528aaa3c244d05bd77b986bb56e88e3c7fd1193ddb4b64bca9e3e43a6090c3bc54b4b73c609043bb4026ad3b9dde98bc02af173d2f52513cf89537bb59df113d761f1bbd84817ebca6b9d7bb86590bbcbbe1cd3cf8c8c1bcfac90ebca45c40bc56b584bb8a3c7a3c5acbfd3c3774ae3c836208bb4279fabcd2ccef3b5acbfd3c2fb8653d2c328e3cc1025ebdaea82abc738eacbc5e2962bc5cd694bcb16ca3bcf8c8413c2a3b8bbcf66baabc020b62bc7949283c00858a3b531accba88dfe2bb655509bcd7edff3c12e9873c4feff1bbad0e3f3bc49d963bf07da43c7e93783b8946c43ca8ba243d3573613b10b530bcbb4762bc5aa23dbbd3d706bcac4b13bdbb4762bc6321323ca399943af04a9a3bb98e00bc5b32dfbc65b1d3bb3d586abc96e5fb3b347dab3c71cb00bdd82b213cfac98e3c267712bb59df11bce56e0e3d8481fe3b2a0881bb53f18b3c7ffad9bb72f4c0bbfe50b3bcd5c3723a97239d3baa4ac6bbc56042bb6674ffbcf3419dbb244d053d4fc631bc2606e73b97239dbc3e960bbdff13df3c738e2cbd00e1543cd109443d98b3beba086a933c08d0a7bb9392ae3ce86511bdf72e563b56114f3cd7c4bfbc8b7a9b3c59df113d9a1aa0bbf3411d3c994d2abb1b913cbce4d422bb9689b13c1f55b5ba3804503cba7aec3cd109c4bcb22f4fbca72a83bc95c685bad03c4e3b3ec995bc457ac7bcc068723c58129cbcf40e13ba4d3690bceb5c943bf140d0bce727f0bcad377fbcde75f13c34b0b5bbf72ed6bc39a885bc455107bdf04a9a3cd270a5bc36da423d58129cbc67dbe03b9f3bb0bad85eab3ceb298a3b9f6ebabcd85eab3c738e2c3d173d22bdf98b6dbbca8152bcf40e13bb41e958bcc29cc93bc9be263d0de76d3c532496bcfbb5fabc4fbc673c02d8573cbeaf103b1a5de5bce2aa95bc6b9fd9391bf7d03c6354bcbb2c8e583b067310bd26aa9cbcf79f013c9e78843c586ee6bcc6f063bca0a211ba6e30c8bce139ea3cb75aa9bc2a0801bc36dac2bb37d078b9eb85d4bbb98e003dedb9abbb93c5b8bc21898cbce4306dbcc6f0e33cbe0b5b3c8bada5bccd4f153a20efa0bcf958e3bbef0c79bb14461f3d741e4ebc66747fbcf437d33b31af683bf59eb4bc8812edbc6ad2e3bcfbb57a3b3220143de7989b3babb127bd0b94a03bee208d3bd6ce89bb1e9289bc7a0c54bcc623eebc8c4791ba741ece3c224c38bc8c7051bceab85ebc1ebbc9bcd1d6393dcfd5ec3b7ab009bdf7fbcb3cb16c23bb532496bc267712bba0fedbb90960c93b1dc593bc37d0f83bc6fa2dbc5e29e23bfcf39bbc12e987bc7127cbbc19906f3c50fa883c85e85f3c33e33f3bdb2224bc9bddcb3c07363cbca51f6cbc034983bbe9f5b2bc47a4543cea8f1e3cb8c18a3c3cbefe3b7e37aebcbee21a3a0960493c5056533bff46e9bc6a05eebc49a521bbcd454bbb62bad03c625e863c809e8fbcf79f81bc586e663db9eaca3ca8ed2e3b344aa13ccb4e483be7f4653c8dd7b2bcb81d553cfc1c5c3cbb14d8bc3f63813b7c0da13c477b94bc0171763cb8c18abc4a9b573c6086f9bc36dac23be106e0bc5f6703bd612065bd2539f1bc4f93a73b95ef45bbe7989bbc43eaa5b9657ec93c2f5c1bbca690973ca28efdbb146f5f3bf5c7f43c7131953cd49a32bd4b0c83bc9689313c7f606e3cd9f8163db1d2b7bc5fcd17bcf437d3bcbd716f3c477b143d5acb7dbc8812edba5d99403c92020d3c38375a3c5280603cf1e4053cb78db3bc7f2de4bcad0e3fbc21b2ccbb5b32dfbc86598bbc97231dbc39a8853c36da423cc173893cdfa948bcba51ac3c20bc96bc461433bc1dee533c59df113d5e29e2bcb526d2bb77ec90ba50fa88bc8dd7b23a6e30c83b8bd6653c6ffdbdbb6555093da1cb51b972ea76bb7f2de4b9851beabc457a47bb2c98a2bc554ea3bc93bbeebcbd15a53c842534ba4934f6bb92f8c23b1f7e75bb9d07d9bb03a5cd3c6d965c3c33ed89bcd567a83cbb47e2bb8c3dc73c8bada5bbdf8008bd1f7ef5bb072cf2bcef0cf9bc67e5aa3b771551393b059d39d85461bb040cafbb502d933b2807b43b4fc6b13cbcaec33c0ac7aabb38db0fbd4c9c24bd891d843a909bab3ccd1c0bbdcc1174bc9689313df39d673c7ab0093d4614b33cbf9bfc386dfcf03a8e3e94bb6942c2ba2510b1bcdee61cba0171f6bc2018e13c9488e4bc05cfda3b38375a3c77ec103d03a5cd3b3f59b7bc9bddcb3b2a64cbbc23808f3bd2a3afbb625e86bb3d2faabc6a7619bc78afbc3b37742ebd72f440bcb4f27a3c9f3bb0bca3cc1ebd90c46bbca753c33c2a0881bc379deebb20ef20bcb8c18a3ba0d51b3d448411bd84817ebcdf0f5d3cf925d9bce043343ad7c4bf3b386ae43b913597bc59df113d114f9cbc4d0306bb425a043c143c553cc7610f3d0eb4e3bcc9e7663b90c4ebbcfb8270bbac7e1dbdeac2283d994daa3cd9c50cbc431d303c173d22bc605db9bcf5d13ebcfac90ebd0eb4e3bc4e22fc3b224cb8bc296e153b5fc34d3b74f58d3c1f4beb3c1245523ab8279fb858121c3c6555893ccedfb63cdb22a43c6e07083cac7e9dbca690173df40e93bb455187bb8626813c18a4033b099353bc2f52d1bccedf36bc8c70d1bc47d7de3c836288bc020b62ba5e332c3dd590683dcbe8b3bcecec35bcbf7c06bc974cddbc24433bba4d36103deb5c94bc28d429bcdcef993c47480a3cd82ba1bc557763bcb1c86d3ccd1c8b3cf7fbcb3cc1cf533ba84979bc4d5fd0bc89508e3cf9fc18bccf79a2bbb1d237bc47d7de3cc56042bb9656a73a61c41abc6641f5bcdd4c31bb3d2faa3c26a0d23c47a4543dfac90ebdcd4f95bc1733d8bc9e78043cf66baabc3b2eddba49016c3c7d7482bb7ae393bca25b73bde8c15bba9ae795bc215602bde430edbb4d03863be53b843c02af973c6e0788bccc821fbda65d8dbce91e73bca5f6ab3caa21063cf0a6e43cda8838bad79135bda2ffa83baf9e603b', 'Array(Float32)')), +('<dbpedia:Alps>', reinterpret(x'2cc984bcf5b2123daa13a2bcc61e1bb7f5b212bcd29e8bbc727721bd485a0cbdf7beaf3cefcfe5bc94059abc9ea0533d3067cd3b801df3bc12ca28bc6d7a30bdf1c1bebb0ecc323cb89faf3c7907c3bbb51aa63aa09fcebc9e0001bdc62bbd3bfe5a6e3c0e39823cd7c2623cf1c13ebcfed4dfbc064a38bd10be8b3dc81d96bc93069fb9556d2d3dae2bdcbc6cf4a1bc0ecc323ca7a8dcbc9b286c3c8ffb863c33d2923c5401e33c0dc0153c6604533d9220e3bc8892cbbcefc243bbc3c0773ce8b9b53cb40e89bc961e59bd9107a43bcf33c63ca48380bb790743bd86a0f23b0a482e3d4dde903d8f08a93b2cc9843c4469383c1eeaebbcd9a719bd978aa3bb8a84a4bc14bc81bcdf44dd3c23ce9d3aac1fbfbcbc30d6bb9aa2ddbbb02a573cac129d3dcd34cb3cd1c571bc8c9de33c0ee6f63c93a6f1bb9e00813cd83bcf3c1fc3853c4277df3cf4ccd6bcfec73dbd63ec983d22db3f3dc09b9bbc31e039bd6b7b353ca31736bccf26a43ad43d593d68f62bbc18c719bcf63821bcedd0eabce335313c3d60aabbbc30563c9081153c56f3bb3cfcd564bcf4d9783d557acf3b595e01bd0133033ccf33463d7284c3bbfc5bf3bcb010933bed30983c2ce3c8bc7f7d20bc7e04343c1bd2b1bcf75160bdac1fbf3c666400bccd416d3b0adb5e3cb0b0e53bebc44dbc07dd68bc4df854bc385617bc5c76bb3ba5098f3c2d5c353aba91083a40d8913c801d73bcd1c5f1bb8f15cbbb575f063cca1c113c26cc133d595e013d9a028b3d7df896bb1b3f013c5bf02cbd6f864dbb7a80afbbc99602bd7682b9bc19d336bd12bd86bc6b95f93b73fd2fbb28d8b0bc58f2b63ce4bbbfbb3c548d3c2e5b30b9d523953cb693123d5bfd4ebdd09f903d69e9093d24da3a3cc9a3a4bc5c907f3c14d6c53c4078643bfeba1b3da1858abba715ac3b55608b3b7c12db3ca291273dc797073c93061fbd908e37bb50639abbe9d2743c8b17553bf22d893c07d0c63cd49d863b51dc863c49fade3ce2c9e6bcfb4fd63cefc2c3bcaa13a23c0a3b0c3ccfc676bb678a61bb0cda593b902168bdcead373bb02ad73cd2ab2d3d6081d3bc75fc2a3c6213ff3b9280103df2da7d3deab8303de4ae1dbc2049943cc7b14b3cae1eba3cecaa093cdf517f3c769cfdbc5b6a9e3bf95dfd3ce1a305bd4675553d58e5143c19e0d8bb3770dbbc2049943b7178a6bc928090bc3ce73dbc49edbc3cb693923d77ee833c9b1b4a3d4bf9d9bb0458df3be4bbbfbcc9a3a4bc3b612f3b005a69bdf5b2923aaf8a04bc5ffb443cfd348dbcf05574bc44e3a9bcd848f13c39cf03bcee49d73cb20f0ebc660453bd25cd983cc8b0c63c1343153dff4dccbb455c163d2553a7bc8a9ee8ba8892cbbce02a9939f42c04bc80f60cbc8189bd3ca284853d314d093cbc90833ce2c9663cebd1efbcec3d3abd4dde90bcbaab4cbdf6cb51bdc8b0c6bcc9a3a4ba768239bb6f0cdcbc15e262bb68701d3b4bf9d93cc02ecc3bb1a3c33c35d18d3c63ec98bd6b9579bd556d2d3c295e3fbb12bd86bd95984a3d6074313d5e7536bcbe9c203d8afe953d40d8113cdba6943c41fe72bdd7b5c03cf03b303c5ee2053dab99303ddbb3b6bc5c76bb3b22e8613d9803103dea3e3fbddc1f813bce1a873c6213ffbcf93617bb03d250bc19e0583d274580bb29511d3c42f1503dd69c81bc5262153db335efbc74769cb9c220253cf9b0883c6c9474bd1deb70bddba694bca9b4793ce3a200bc57ffd8bc1c5840bca28485bc2b5098bc54f4c0bcbf158d3c475b91bc9e00813c9929f1bbe9c5523ddccc7539e3a280bdf545433be6ba3a3d62065dbd46d5023bac2ce1bc5003edbbbc9003bde5342cbdbab86e3c4debb23cd52315bd91a7763c9d8714bdba91083d46d502bd4476dabcfac9c7bc5b6a9e3bb3284dbd9081953c19edfa3b595e81bb58e5943b981dd43bd6a9a3bb618df03bb010133ccaaf413ac6be6d3c0e4624bdc3c0773d07b602bd0f5f63bb275222bd0849b3bcf22d093dcea0153d4fdd8b39b6a0b4bb0ccdb73cee2f93bcfac947bd5c907fbd4c0577bccd416dbcee2f13bdefcf653c3ce73dbb769cfdbc6ef39cbc2b5dba3ca715acbdd848f13be4ae1d3d82022a3c6c94f43c42e4aebbbe3cf3bc55600b3d17d4bb3cfb35923cb4a139bc840125baed3018bb17418b3cdc2c233d62660a3d04b80c3be2c9663c4a59873d788134bdd13241bcea3ebfbc9e93b1bbd9a7193d1debf0bc65eb13bd083c113dc81d16bd557a4fbce726853d295e3f3dd5c3e73c0ed9543db41babbb9d94b63c264605bce8ac93bd75ef883c7df816ba57ffd8bcbd16923d436a3d3cd9ce7fbc00c7b83c07c3a43cb335ef3cc41f20bd697cbabc00ba16bd80032fbd9d9436bd6810f03c84a1f73ce925003c8f15cbbcdf51ffbb3f5203bd37ea4c3c064ab8bcf7b10d3dc79787bb5d6919bd5defa73b3f5f253cde2b1e3ddda50fbd9d8714bd58e5143d6996fe3c4675553de229143c9d27e7bcbd1612bb6a02493cd82eadbcd43dd9bc65f8353d4c65243d385697bb3f5283bd7e1e783c74833ebdd9b43b3d06575a3cfaaf033c415e203c14d645baab06803a1debf03c536110bd09c21f3c07d046bc716b04bd0cdad93b40f2553c4fdd0b38980310bc436abdbc4668b3bd7509cd3b67ea8ebb9399cfbc7c72083b7092ea3991fa81bd6b6e13bce1a3053d01d3d53cb51aa6bcbda9c2bcd72210bc87790c3df545c33cd9a7993c1bd231bd4c65243d87790cbb947f0bbd8c9d633da1858abc711879bc0458df3cf3c0393d8b17d53c999640bda2ab6b3d77ee83bd475b11bdf5b2123d0dc0153b03df723dcb28aebb52621539edb6a63c8c831fbde84c66bc03c52ebcd7c2e23b6611f53c4e641fbd4675d53cad98ab3b99a362bc857a913b05deedbcff3388bc4b7fe8bc58f236bc9e0d23bdd69c813b5fe1003d05d14b3c4f703cbc0bcebc3cd49d06bd8a8424bd314d09bdb01db53c42d70c3b9c0e283dd9c15d3c66f7b0b850e9a8bb618df03bc6385fbc47eec1bc647227bd58e5143c0f459fbcecb7ab3b49fadebc2560493c0f5fe3b9bc3056bcf22d09bd62732cbc6213ff3bba9108bc9c0186bc7c1f7dbc9cae7abc7291e53ca82149bc2f4e0ebcf5b292baf3cd5bbc7c1f7d3ae7d3f9bb7881b4bc1f70fa3cd5c3e7bc7277a13cb3356fbc043e1b3d49fadebce9c5d2bce92580bc2fe1be3bb02ad7ba658b663ba1255dbae740493c1eea6bbc05c4293c820f4cbd4fdd0bbdc8b0c6bc3d5308ba35d18dbc36643e3c35ebd13c22db3fbde5d47e3c194006bd21c280bb5ae40fbdda2006bd3cda1b3dedd0eabad83bcf3b01e0f7bb3f5203bdc82ab83c7c7288bcefcf65bd977d013cd7a81ebba30a943cc9a3a43bf55265b9f95d7d3c23ce9d3cc19a963de8c6d73cb2afe0bce8b935bc5b6a1ebd999640bc660453bb928010ba2cc9043d0140a5bc10e5713db8928d3cf1c1bebba914a7bc6405583cc31383bc1a66673c043e9bbc840ec7ba3458213dbdb6643cf3cddb3c77ee833cbc9003bd51dc063df2da7dbdad8b89bc5686ecba3863b93c24da3abcedd0eabc4c5882b950639a3a0ed9d43c6f197ebc6f86cdbcfb42b43c6671a2bd50f64a3ca10b993ce1a3853cc93655bb185a4a3d7d8bc73c7c7288b9941fde3cae11983cec4adcbbbcaac73bd63cd4ba2feee0bce64debbc517cd93b2b5dba3af05574bd22488f3c84a1f7bcbc3056bc5b0a713c8e22ed3bd69c813b22e8613a2063d83ce733a7bcd3313cbc7a80afbdd6a923bdcc1b0c3d908eb73c982af63bbb24393c4df854bdb328cd3c0eccb23c3259a6bb385697bc08dc63bbbd23343c962bfbbcfed4dfbc8afe15bde8ac133cba9eaabb8bfd90bb8b2477bce136b63c3ed916bd638c6b3aab9930bc37ea4c3cec57fe3c84f4023d487450bda914a7bc42f150bc820f4cba69e9093b0d60e8bb1b3f81badc2ca33c00c7b8bb769c7d3c74f00dbde0bd493cc737da3c33d2923d71feb4bc0f451f3d537bd43ce7d3793a3f5fa53b50e928bbdc46e7bc75ef88bbdf37bb3cec3d3abc9e9331bb787492bc95a5ecbcf446c8bbc5a5ae3b03df72b97b8ccc3cd23edebb7f8a42bdcd27a93ccc1b0c3da1858a3a8f154b3dc936553b0c47a93ccc1b0cbd66f730bcb4aedb3b1bdf53bcf2dafdbbfcc8c23ce4ae1d399f860fbdb1a3433cec3dba3bceba59bd16d540bd9c01863cb4a1b93b2cc9043d99891e3d9a028bbc77ee833b23ce1d3df8371c3db594973a18c7193c4a73cb3cc23a69bddfb12c3d7e1ef8bca10b193d8e22ed3cdc2c233c4b7fe8bca1858a3cd49d86bc1f707abc63ec983cc19a16bb4bf9593c9aa25d3c7f7d203d44d607bd6d6d8e3d6d7a303d0b544b3c0f451f3c0eccb23cba9e2abb716b043d3fff773c6472a7bbf75160bcf7443ebdac129dbc3ed996bd52e8233d2cc984bb91a7f63a3465c33b35d10d397c7208bd58f2b63c6d6d0ebca4aa66bde13636bd5e6814bd716b043d8786aebcd2b84f3ce6ad18bd0bc11a3d7b063ebda430f5bca09f4e3c9b1bcabb5defa7ba37d088bb595e81bbed3098bb716b043db21c30ba185acabaa2317abcf1b49cb81151bcbbe1a3853c28cb0ebde229943cecaa89bc24478abcd62f32bb4debb23c7b063e3c18c7193d3ee6383d6664003bfcd5e43cddb2313c9e0d23bce925003ca10b99bc2f4e0e3d7814e5bc8978873cc7375abb5a7740bc4c0577bbe726853b17418b3c42d78cbce0bdc93ba30a143d19e0583ca914a7bc2cd626bbd29e8b3c9d87143d99891ebcbab86ebcd0ac323cd3313c3ced30983cb328cdbca7088abc851a643cb3356fbb4668b33c36571c3c083c113df93697bc8f154b3cbda9c2bcf02e0e3d66f7b03a5361903cebc4cdbce9c5d23b4a59073c0856d53cee3c35bc4ef7cf3c678ae13c8b7702bc65eb93bc3664be3becb7abbb17d43b3c0664fc3c9b28ec3c38f6e9bb19e0d8bc0a482ebb56f3bbbcd49d063d768f5b3dcca19a3bec577e3dab99b0bb9114463b0ce77b3daa2de63cecaa893b2de2c3bcb01db5bb6b6e93bc446938bd5bfd4e3c37eaccbc26d935bdb20f8e3c4fdd8bbd6c9474bce3288fbb4fea2dbc9e0001bcb6a034bcf6d8f3bcdecbf0bc45ef46bde93f44bd6081533d558771bdb8920dbd7c12dbbc0bc19abac936553d8a9146bcf93697bbfe5a6ebc3962b43c8b17d5bb9220e33be1a3053c314d893bf7b10d3b44d607b914d6c5bc305a2bbc4bec373d716b84bd9810b23cdcccf5bb8afe953b87790cbd53db01bc10d84fba4deb32bd0dc095bcc5a5ae3d033f203c6989dcbb646505bc87790cbdd5b6453bf7b18d3cf2dafd3c23542cbd65f8b5bc576c283ca10b19bd28cb0e3d6d6d8ebc17e15dbc69895c3c646505bd7a802f3c13e3e73c5c835d3bc8b0463cec57fe3c9b286cbcdf44ddbce92580bc576ca83cad8b89bc5e75363d296b61bb58e5143d5ce30abd87790cbcebd1ef3ceb311d3c13e3e73a2e5b30bc575f863d1668713b82f587bcbb31db3c70f297bc5779ca3b7c7faa3b06575a3b3067cd3b666480bb2de2433cffd35a3ddf373b3d9611b73cbdb6643ae1b0a73c967e06bd44e3293d82f507bd5ce30a3c4a5987bb8c9de3bc908e373ca69c3fbdaf8a843c9114c6bc50036dbc33df343d4a66a9ba7c1ffd3ad53037bcaa2de6b9e2c9e63c9598cabc8918dabc71fe343c517c593c8c831f3c80f60cbd135d59bcd523153c1bdf533cd8210b3cd43dd9bceab8b0bcc6beedbc6b7bb5bb3856173d53db01bd4e04723c22db3f3de342d3bbea3e3fbba516313d4dde10bd0fbf90bc3b7bf3bcf638a13b6ef31cbc9f860f3d0adb5e3c37ddaabc0d5346bdbb1797bc6b6e13bcf3cd5bbdb8920dbc2b5dba3be2c9e6bc4c58023b595e013c517cd93aa0922cbca69c3f3cd63c543c91fa01bc6f197ebb276c66bc4f7d5ebbae2b5cbcf7beaf3cc936553dc1a7383c51dc863c8b0ab33c0849333d5a77c0387317f4bcdc46e73c28f2f4bc55608bbc21cfa2bcec57fe3c9f0c1e3c0f5f633ca291273c958ba8bbf4ccd63c33f9f83bd0acb23c8b77823b7c12dbbc60670f3cc19a16bdadb26f3b928090bb2b5d3a3c727721bceb319dbbe34f75bbe84c66bcc19a963aaf8a84bc9b88993c9caefa3c1ddecebb7df896bc5e6814bd696f98b9857a113d396fd63bfac9473c95a5ecbcf7b10dbd51dc063d2e68d23b0a3b0c3c0bcebc3bf5b212bdd0ac323c8a8424bdb20f8ebbbf158dbd4c0577bc1dc40a3c0140a5bc38e9c73bc3a6333d981032bb385697bc6e936f3c00c7b8bb5af1b1bb3ffff73ac81d163c6671223d962bfb3c1e64dd3b79faa0bcf23aabbbe13636bbd69c013d44d607bddc2ca3bc4becb73c28d8b03c9280903ba4aae6bc75166fbbe72605bdd23edebbbe9ca0bc4debb23cf9b0883b6f0c5cbcc6385fbbe84ce63b768f5b3b314d89bc0133033d296b61bda82e6bbc4e04723b05de6db9bd16923bff33883cb2afe03a2fee603cca2933bdb21cb0bbc6be6dbc769cfdbbcf26a43b7c7208bcb89f2f3a33df343b07c324bda317b6bc17d43bbcc5a52ebbb01d35bc537b543ce3a200bc69967e3bcca11a3cb335efbc716b843cb3356fba6e936fbcf9b008bc678ae1bcabb3743c3ee638bc7ff791bc88ff9abc0763f73b9a1c4fbb9d1a45bc6f6c893cf22d09bd9d1a45bcfb42b4bb9e0081bcaba652bcb693923c8c831fbd817c1b3d7c72083d2ed521b928cb0ebc256debbaee2f93bc7284c33c4e641f3c595e01bd0a3b0c3c87862e3aac8c8ebcd62f32bd01e0773c779bf8bc415e20bcf5b2923c6e936fbb6206ddbcaf8a843cebd16f3ce342533ca907853be33531bc8e8fbcbb9399cf3a14d6453bde2b9ebc4bf9d9bc88ff9aba647fc9ba8003af3c47eec13b666400bd194da83ca092ac3c3259a6bb8afe15bd33d292bb4fdd0b3ca30a14bc1c4b1ebcf552e5bcf7443e3d9a0f2dbc49ed3c3cea3ebfbcbc9083ba9e0da33c869350bb6074313c608e753c5def27ba5fee223b8b0ab3bce5278abc9c0e28bcd4aa28bbdf517f3b0c4729bc43f04b3d276ce63b8494d53c3d5388bb4469b83cb932e03af23aabbc2056b6bc40e5b33c9280103b897887bc928db2bcb02a573cb8acd13cb5346a3c23614ebbf1ce603ce9d2f43c820f4cbbcea015bd46d582bc8afe953b7b8c4cb98a91463b7f7d203d3cda9bbc2e5b303dce1a873c769c7dbad82eadbc9b88193c2de243bb84f402bdc09b9bbcb8b9f3bb004d47bcb21cb0bcb6ad56bcc23a693ce34253bb0dc015bb8e226d3c475b11bb9f860f3c8600a03ac22dc73c947f0bbdbb17973b536e32bd7682b9b921dc443dc61e1bbd16c89ebc5c907f3c4c65243b1ac6143c84a1f7bce9c552bca5098fbc083c913c0cda593c54e79e3cac129d3a2e68d2bcc4bff23b8010d1bc3b61afbaffe0fc3ab40e09bd7178a6bb8f9bd93ce54ef0bc47fb633b696f98bc26f3f93b9d1a45bb07b6823cf1546fbcfd348d3b71feb4bccf2624bd9c01063df048d23c3ee6b83c3ee6383c20e9e6bb3ce7bd3ad7a81e3d7c7208b9013383ba41fe723cbc3d78bc10be0bbdaf8a84bca5a9e1bce5d47ebcb2afe03b415e203d4debb23b0fbf103cdfa40a3cda2da838c22dc739ddb2313c07c3a4bc9ea0d3bbd9c1ddbbf8379c3ca99a353c19ed7abc7092eabb415ea0bc63ec98bc3f5f25bddfb12cbc12bd06bd396234bc595e01bc618df0bb6f792b3cf3536a3c1ac694bce1a3053cfcc842bc62137fbddda58fbc6d7a303d857a113d837b16bd8494553c23ce1d3d45e2243c22488fbc922063bc6d87d2baad8b89bad29e8b3cc5a5aebba10b19bce72685bc79fa203dd5b6c5bbcfc6763c0adb5ebcc81d16bc67ea0e3b91a776bc2defe53cf751e0bc064a38bc7f8a423c083c913b3458a1bca483003c6b6e933a38e947bd58e5943bc99602bcb21cb0bc730ad2bcd52315bd8ffb06bcf04852bc396fd6bce8ac93bc7e11d6bcf439a63b0ed9d4bcdecb70bd9d1ac53c6f197ebbaf97263df943393c29519d3c91fa81bce5d4fe3b6c01c4bc05c4a93ccf33c63c4e04f23bb9189c3cc41fa03b837b96bb7085483bff33883b596b23bda99ab53c8978073d23542c3c4867aebc296be13bd8210bbdebc44dbc6d7ab0bc6ef31cbb08dce33cefc2c3bcc19a16bc4f7ddebbe22994bc54e71e3d536e32bc95a56c3c2b6a5cbc961137397f8a423c74f00d3d4c0577ba55608bbb475b91bb45e2a4bcff40aabb49e01abcd7a81e3cb02a57bc02b9913b2e5b30bcb3951c3c7715eabc02b991bc36f7eebb50e9a83b8b17553c025964bc5ce38a3c798d51bc728443bc7f97e4bc9107a43ab3356fbbb8acd1ba77fba53c24e7dc3b2def653cd23edebc1dd1ac3c8afe15bc8600a0b9ea3e3f3cf2474d3c9213c1bc908195b8f7443ebca68f1d3dff3388b9ca1c113d36f76eba5ae48fbc8d16d0bce5342cbc1deb70bc4881723c9c0e283d9e93313cf1cee0bbf8379cbcd3249abc3d53083dd5c3e73c8494d5bce335b13b730ad2bb26cc13bd57794abc28cb8e3c3fff77bce72685bcd649763cb129d2bc2e5bb03c80032fbcbbb7e93a36571cbd49fadebcfd412f3c99891e3d15e262bc928090bc7d7ea5bccb42f2bb46d5023d9b88193c', 'Array(Float32)')), +('<dbpedia:Swiss_Alps>', reinterpret(x'83ba7f3c5d6b083d37809ebcb5f2f93c85b473bc6d279ebcdb654cbd482ce3bc1ce2443cc346c9bc0e9bc0bc2f1d313d35fe413aae99d1bc9290abbc7ee02dbdb677dcbcdd6c0b3da830d83cf68888bc0b3680bb8449a7bc13148bbd978493bbb9f32c3bf67b3d3c05cd06bd3c7180bb577061bd48b145bd9e58593d2caba5baf67b3d3d5a749a3cc54d08bdc837ab3bb38a333cdcdde3bc5c6449bbf8fd193daf1bae3c577d2c3c0832473cb50c103ba14f47bd03b9fcbc147fd7bc35f47c3ca83a1dbce927ee3bb68121bd24cd1a3d9c682a3de53d4bb964ba6bbc1966f43ce5b8683c6ba8c73d093c8cbcce1bc2bce8bc213d35795fbc2b338ebcb50c103db50c10bc720ebb3ac9bc8dbcb50c903b8b3703bb0a2cbb3c0e91fbbbf18adb3944ca283d5a674f3cf8f04ebc701e0c3c8152393c5ed6543b948a1f3ced1bd63cd19a98bca73da33c01d99ebdffd1dfbc2241793dce280d3d61d048bc3af2a9bc5389c4ba9fe7003d340148bae9276e3cb681a13cdd52f5bb3401483b0ab423bd06451e3d3f51debc04d00c3d387d183c67b159bc9412083c967ace3c96ff30bcbee7943b501739bae5c22d3d7cd96ebd00cfd9bc634f1fbaecb0093d5ed6d4bc108b6fbc473cb43ccda32abd6ca541bd0ca1ccbc404e58bc59f2bdba4ca8333d4c20cb3cfaddf7bc4ab8843c7104f639dce7283ca439eabbbdea1a3d2c3008bc8833ca3bf012443da1427cba3a770cbdbfd7c3bbddda5d3caaa2e33c4f24043d68ae533db871503d99713cbd6eac80bc4345c6b8f58b8ebcc059a0bc32149fbc0e16debba2598cbc5cdce0bc3870cdbb5afc02bb874095bc6bb20c3db018a83c577dac3b27b7bd3c708cde3c4e1d453d7877b4ba7e5bcb3c7196233da23ff639414bd23c9405bd3c586d5bbcc8bf933ad3940c3d215b0fbd7104f63bfcdaf13bc0e1883d108bef3c54fe553ba1d4a93c110dcc3cbb683ebd463f3a3cc82de6ba86b16dbc3e54e4bc4c9be8bbeda0b8bb2241f93a02446bbda3ce1d3ddf59b4bc28c1823c9c68aa3ade5cba3b7c6b1cbcb6fcbe3c37809ebbdc62c63c49a4fa3cce288d3d360887bcbbe3db3c4839aebce1cec53c3b6a413dc353143d7aec45bba92dd23acaac3c3df18a5b3d43cdaebcd108ebbc36f1f6b9abaca8bc3ae55e3cd108ebbcc353943cc3c1e63c5aefb73c03d312bd9c68aa3c121791bd833fe2baa8b5ba3c0a39063bc82d663dc54d883c52112d3ddb7297bcbecd7e3c1ee983bdca9ff1bc67be24ba60dd93bde2cbbfba67be24bc0b29b53c18eedc3b8930c4bc310a5abc4ab8843c85ce89bc04c3c13c2b330ebdf700a0bd3676d9ba4255173dbecd7e3c8930c439fc6c9fbceb21e2bcaa3717bd8d17e13bf294203dcaacbc3b1d67273cbd62323dfc5f543db9f32cbc9a6eb63c5d61c33c5f4e6cbcb38a33bcb093c5bbf01244bde8af56bd6148603ce834b9bbb681213cd4843bbd24c0cfbb833fe23c824f333c8b2a38bc3117a5bb2051ca3c22d326bd7196a33a89b5a6bcfbf4873b121711bdf202733cae99513d8152b9bb5017b93ce7bf273db774d6bbc2ceb13c62455abd7c6b1c3d301a2b3d98f9a43cc443433d3a778cbddc62463c7bf60a3d7390173c9fe700bd0054bcbca2590cbb63cabcbc2bae2bbc920bc9bb9502373dca27dab94f2404bdc059a03c093c8cbc405b2339034165bc1285e33c2a29c93cb6775c3d64c736bd6b2d2abcf8f0ce3c577dacbbd5091e3cff638d3cf11c09bc586d5b3cf8fd993c1195343d006107bc9f5f18bce3c8393c093c8c3b596a553d4c2d96bc9d65a4bd24cd9a3c9be64d3d2b1c7ebd6bb28c3c786a69bcd8f3403b2144ffbc85b473bd60dd13bc9d65a4bb9ded8cbbc1de023c8546a1bc3d576a3d21c961bdda6852bd4ea2a7bc6940813c97efdfbc4c20cb3c1970b93c3f5ea9bb42d0343c5583b8b87c6b1cbcb7eced3c721b063d4ca8333abb75093cf20cb8bcda68d23c110dccbb7be9bf3b397a12bd8fa608bd9b6b303ddc6f113c33044e3c80620abd3b7406bc235583bd21d62cbd0e9b40bd093c0cbdfaf78d3c3c7100bdafa3163dbee714bb0e165ebc9eea06bc2d2d023decb009bd52968f3bd477f03b5017b9bcf11c893c8d927ebca349bbbc06bd353d20d9323b9e58d93c18fba7bbe927eebcec2ba73b758046bcb2129c3ce1ce45bc5c57febad40c243cd606983d778785bc03d3923cbbed20bd758a0bbd111a173d1217913c5f5bb7bc0061073dd27dfcbbfed465397fdd273dc54d883db7eced3b46b7d13c893d0f3b6245da3cdce7a83c9e5859bd8baf9a3b26c78e3c27b7bdbb8f993d3d976a7dbb18733f3bab1afb3b3586aa3c101d1d3ca15c12bd68bb1e3c35795fbdee251b3c9ce347bd730b353d26ba43bc3b6a41bb5479f3bb9be6cd3a4f9256bce253283bcd2b13bdc6c51f3d05c03b3c66c12abc9308c3bc83d10fbc7bf60a3d1966f43bd3940c3caf0ee33cf09a2cbc387d183d1bf2953bffd1df3baca922bd2da89f3c4e278abca6c891bc730bb53ba446b53c253867bb387d98bd3705013c4248ccbc157cd13b6148603c6a30b03cc1de023dd40c243cac310b3c1bf2153d80553fbdcb24543cded7d73bfb6f25bd9e58593c89a85bbc0f2023bdbf5ca6bcd28ac7bcf197a6bd9be64d3cdbea2ebdcc214ebd9096b7bce8bc21bd8ab2a0bbdc6f913bf11c893d26bac3bb46b7d13c8e9c433c60dd933cc2569a3ce7bf27bc301aab3bc8bf13bd2355833dc1d137babaf0a6bccf18bc3cc0c772bc40e005bd0bb19dbca052cd3c21c9613c12922ebd52960f3d101d9dbd7ee02dbb3ddccc3b66c1aabc0548243df875b13ca142fc3b3c7100bb508f50bda5be4c3cedad03bc6d1a533cab1a7bbbd015b6bca2d1233d6aab4dbbbedac9bb224bbebc224179bbc1c46c3b4ab884bc62cd423a79f990bdbbe3dbba4a33223d89a8dbbbb584a7bcac9fdd3a1c6a2d3dbdea1abd225889bca1427c3c309f0dbddb72173dd67eaf3ce7bfa73cdb72973bb19d0abd6da2bbbb79f990bcdf59343a99f61e3d77ff1cbb160134bc292c4fbc55766d3ca15c123d643fceba57020fbdfa722b3c0dab11bc8a3a09bdd9f0babc8ea98ebbafa316bdd5fc52bb044b2abd5393093d6aabcd3c80620ab996ffb03c0e91fbbbf309b2bcb40f963babaca8bc5e51723ce72af4baa6c8113dc4500ebc7b61d7bc957d543afe66133afc5fd4bcbae6e13cbce0d53c4345c63cd015b63b1af59bbb1a636e3d4a3322bd454c853b9af318bcc059a03abf52e13b7d5e513cb37d68bc8d927e3cd5091ebb2642ac387e6510bdf67bbdbc5490033c45c7a23c77ff1cbbd112b0bbed28a13bde69853c3ddc4cbc9a61ebbc9fda35bcbfd7c33c5f5bb73c6f9c2f3c8f1e20bb3117a53d9fe7803b672c773dcda3aa3b96f265bc4ab884bcbb7509bd414bd23bba780fbc3ed9c63af58b8e3c16f4e8bb8930c43cb6fcbeb8a0ca64bc6e2418bdfde1303dae1e34bda8b53a3ddd5f40bce44d1cbd3207543cf9ed483bab3491bb75f85dbbce96dfbcc8372b3d1970b9bd0ca14cbd6daf86bcf9fa133dbd62b2bcd67eafbc920b493c7602233c3207543d6b2daa3c7390973cde4f6f3cb97b95bd41c66f3bf9fa133c5204e2bb2da81fbc78fc963d0f20a3bb454c053d577dac3c709929bc0f98babbe737bfba0d26af3c8e2126bcd09d1ebd6058313c4b309c3c6d271ebdf9e0fd3a015e01bd414bd2bbf58b8e3c2c30883c29b437bca64029bdd27dfc3c7a64dd3b62cd42bc90896cbd8d92febc5a741abc083f123d7cf304bc5afc023dbfd7c3bcf11c893b9eea063dabac28bcc0c7f23c777ababc976a7dbc730b35bd55766dbce350a2bc6342d4baa24cc1bc2ea519bc18733f3cd703923c86cb03bdc443c33c3c71003cab1afbba5f6802bb3de9973ca2d123bda15c12bcdc6f91bbad14efbb4f9f213a6058b1bc39f52fbc79efcb3ba6c891bb35fec13b5204e2bc0d262fbc8055bf3bd67e2f3d309f0dbda5cb173d6ab512bc243bed3b309f0dbc5576edbc6aab4dbc508fd03c7505a9bc9e629e3cc1d1b7bc1ee983bcb5f279bc58f543bb6549133bad9cd7bcec1edc3bde4f6fb9fce436bd0f98ba3cce288d3b9b5e65bc15899c3c5c6e8ebcd09053bd434546bde0de16bcf481c9bcde5cbabc5114b3bbf48e14ba18fb2739d9fd05bd1fdcb8bb6148603c43c063bd5aef37bd2d233d3c4e95dc3b5393093d527fff3c9405bdba710476b62d23bd3c1b60e83b3cdfd23c17f1e2bb767dc03ccf2507bd5c577e3dc6c59fbc65c4303d4f24843ce8bca13b41c6efbc2a29493c38ebeabc8ab2a0bbbdea1a3d8c27b23b64baeb3c254532bd1679cb3c8d9f493affde2a3d06ca003df9fa133c1776453c834cad3a83c74abc83d18fbbeab6953cbdea9abaee251b3a17f1e2bcf8fd99bc0a3986bd5ed6d43c06bd35bba3493b3c45c7a2bcfc5fd43cbddd4fbbe2d80a3c2355033b1cef8fbdc4bee0bc9011d5bc8cac143b86431bbd8546213db77456bc9283603d101d1dbd0bb11dbcaf1b2e3c834c2dbdf20273bcdc6246bb1188e9bbd49106ba1a63ee3ce84184bc108bef3c654913ba120ac63b8d9fc93cc04f5b3c28c102bded28a13c710476bd2baeab3b48b1c5bc4f1a3f3d49a4fa3c1fdcb83c9fe7803c141105bc758a0b3c4bab39bdbf5261bc7099a9bcebb30fbd1c5de23c121791bb28c1823c4542c0bcf202733b7dd6e83b0341e53cff5642bcd112b0b94d98e23b47c1163d01d99ebb728652bc8d927e3ae7b25cbc5d6b083d21c9e1bc77ff1cbcf20cb83c1195b4bb5d6b08bb701e8cbcb6775c3b930843bb28c102bc44ca283c18697a3d4ea2273c7196233c482c633c53015cbdd1086bbc8242683b9a6eb6bbb96ecabcc0e1883c9193313c29a76cbb21c9e1bc2bae2b3d22d3263de7448abc6e2498bc053bd9bcc05920bc9eddbbbbac17f53cf57ec3bb9283e0bb7adffabb6a3030bc397a12bb82d415bc292ccf3ce72a74bb6c1dd9bb8d92febc4f2484bb82ca503ddce7a838709929bc6739c2bb7bf60abdcba9b6bbc8a5fdbadc55fb3bce1b42bac3c1e6bc48be903b205e95bd07354d3b6e926a3cae1e34bc88ab613cd978a3bbb018a8bcded7573c8f8c72bd5f6802bd3fcc7b3da44635bde53dcbbcdd5fc0bc47c196bbf005793d90a3823cef2215bddc55fb3b21447fbb1b6d33bde3c8b9bc0061073d69b818bdce288dbc99ecd93be04c69bc67bea4bb5d54783d25bd49bd0742183d1e64a13b4345c63cf5062cbc69b818bcab3411bcf60326bdd40c24bc5ee39f3dd491863c3ed946bb8736d0bcde5c3abc8d24acbc0061873b9096b73c41d3babcd40ca4bc40c9753bb5ff44bc1307c0bb5fd3cebc843cdcbbd477f0bbf968663b8cac943c94053d3cf700a03c2448b83c7ed3e23ccba9b63cd70312bd7cf3043b8242e83c47b44bbc8d9f493cecb0893bd27d7c3cb87e9bbde62d7abc350b0d3c39e8e4bcd37af63b8c1a67bc5b71943ca1427c393489b0b85c57fe3ca8c205bd48b1453b6da2bb3b370581bcc4500e3c01ccd33b663c483bdf59b43dbc72033d89a85bbb9b6bb03c0a2cbbbbfbe73cbd06bd35bb8ba5d5bcea24e83b20cce7bc6ba8c7bcd108ebb91d5adcbc7ce6b93c8d242cbdf3919abcce965f3c587aa6bc38f8353d0dab91bc337febbb39e8e43c3b7406bc03d312bc577dac3c84b7f93c233ef33c2d9bd43c8b3703bd47c196bc9ce3473ba142fc3c0fa505bd80daa1bcfcda713cd77164bbde4f6f3db7ecedbcafa3163db19d8a3c22d3a63b58ff883cc443433dba780fbd130740bd844927bc56f8c93c721b06bccb24d43cd4ffd83cef15ca3ae7448abcff630d3d121711bb034eb0bc396d473bac310b3dc35394bc7bdc743c6155ab3ced28213c46b751bc2144ff3c5114333d3207543c90a38238d978a3bcbfca783c5e51f2bcf18a5b3cbc72833df48e14bb701e8c3c93158e3b2a36143d48be103c05c0bbbc01cc533c4b9eeebc3579dfbb86be38bd121791bce54a96bbd6f94c3cdd6c8b3cc7c299bc9fe7003d370581bb6a30b03ccda32abc2b330ebdfc5f54b8c8bf933be927eebc9a7b81ba6f8fe43a55fb4fbc84b7f9bbf968663a3c71003c80daa13bc9af42bc46c41c3c405b233da052cd3c86439bbc967acebc06b370bcf9e0fd3b66460d3df012443be3d504bd748d91bcf202733ca9b2b43bfe59c83b6c2aa4bb5107e8bc42d0b4bcf28755bc82cad0bce3d584bd08baafbb3e61af3c9a7b01bd0bb19d3cecb0893d6d1a53bc654913bc3311993bb0a090bc138f283c9a7b813ad9fd05bc2b330e3dab27463cef2215bcd70312bc8e2126bce246dd3cc4c8253da35686bc9cf0923caf0ee33cb0a090bca23f76b9ea2e2d3cde4fefbabc7283bc3bef23bbd606983c9380da3c0a2c3b3c22c6dbbc111a173d24cd9a3ca23f76bcbce0d5bbfa722b3d40c975bd938da53be2cb3fbbe343d7b9d090533cc14c553c6940013d748d113c9e621ebbdc6f913c7099a9bcd022813c5d54f8bb2baeab3beaa9cabb2c9edabbbdddcfbcee1850bc2a29493cf5f9e0ba5aefb7bb2d16f23c8242e83ae44d9cbc5e51f23b472fe93a3fe30b3d67b159bce0de96bbbfd7433c93150ebcba788f3c7ed362bc7602a3bc5977203cd9f03abb23d0a0bb22d3263b9964f13a86b1edbc09aa5ebcde4fefbad9f0ba3cea2468bc3d64b53b547973bda3c4583d41c6ef3c2c30083c58f5c3bbcd2b933c58f5c3bb29391a3dc5bbda3c9308433c758a0bbc147fd73ba5cb97bc920bc9bc2f95c8bcf11c89bc9a616b3a2ba1e03c834c2d3b9283e0bcaaafae3c0d9e46bc4bab393cd9f0babc2e20b7bc86b16dbb108befbca5beccbcea2468bc8ba5553a7fd0dc3b52968f3c0061873b396d473b14043a3bfe59c83cc6c59f3c0151b63cda759dbcb87e1bbca2d1a33c672cf7bcc4c825bce9b91bbd2538673bc6c51fbd662ffd3c748d11bce834b9bbe04ce9bb8c2732bb833fe2bb36f1f6bb65b7653c701447bc0341e5bc09b7a9bcc2569abc5cdc603a96f2e53c87aee7bc7bf60a3d1fdc383cde5c3a3cc73a313c824268bc61552b3bc64a82bc941208bc2355033cedad03ba748d11bcc73ab13b65b7e53b3608073c5680b23b05b676bca7c58bbcac17f53b40e085bc74fb63bc4ca833bccaac3c3c1c6aadbc46c41cb967be243d88b8acbc508fd03c55089b3cdc6246bc720170bbe2d80abbcda3aabce64710bd309f0dbc5d54f8bc04d00cbd7483ccbcd302dfb824c0cf3cde4fef3ccab987bc1cef8f3c9874c23b88ab61bcc837ab3a63ca3c3c730b35bb2241f93c29a7ecbcf9fa133c1ee9033ddd5f40bd93150ebdd9f0babbcc214ebc03b97cbcb2121cbccf2507bca3ce9dbc9308c33c29399a3b8f1e203c405ba33bb96e4a3cc54d88baef9067bc8a2dbe3c1382ddba75f85dbc58ff883cc54d883c2ea599bb4bab393b40d6c0bc967ace3ba9b2b4bb7f58c53ca73da3bc93158e3c225889bcee9d32bd18733f3d893d0fbc01d99e3c97fcaabc5d5478bc1d5adc39309f0d3db6eff3b83e612f3cd27d7c3bab3411bb83ba7fbc8c1ae7bc893d0fbdf10fbebcdfd4d1bbfe59c83cf67bbdbbdddadd3c87aee73c1776c5bbb305d13c8a3a893cfcdaf1bcb28d39bce0d14b3c3e54e4ba21447fbb6926ebbc7a71283a015e01bc778785bcd28a47bbe54a96bc7adffabcf09a2cbc44ca283c27c4083b4ab8843b57020f3c1ee903bdfe59c83b074298bcbe5facbdc3cb2bbca3ce1d3d5576ed3c537c79bca3493b3c1307c0ba3e54e43c7ce6b9bc948a9fbc0341653c911894bc86431b3c25ca14bc56f8493bf30932bcec1e5c3c86b1edbcf57ec3bbeaa9cabc5fd3cebcb305d1b9093c0cbc2538673c9ae953bcda68d2ba111a97b9ada69c3bfde130bc197039bb35f47cb8454c05bd7ed3623c4e95dc3b5f5b37bc74fbe3bbe0d1cbbcdce7a8bc84c4c4bcbfcaf8bb4352113cb6fc3ebc61da0d3cc1c46cbc6aab4dbdc4508e3a083f12bca14f473dd37a763c5d54f83c197d84bc86439b3aa1c7debc61552b3d06b3f03cd02201bc52968f3c7505a9bb78fc163d2baeab3bded7d7bcbdddcfbc663cc83c9964713c034eb03be149e3bcdaed34bcde6905bdee9db23c527fffbc0548a4bba453803c1ee983bcf5f9e03a02446b3c7a645dbc21447f3cd02281bca23ff63c7bdcf43cddda5dbb55766dbc66468d3c88abe13a7de3b33b42484cbcaa2a4cb9e5c2adbb558338bc7111c13b6e9f35bd21d62c3a08ba2fbcc5bb5abc586ddbbcfc5fd4bcff56c23c3f515ebc0ea88bbcfe5948bce927ee3b1966f4bcce280dbd006187bccea024bc81d79bbb4a3322bc540b213ddd6c0b3da4c1523dbee714bd9b5ee53c7be9bf3c00dca43cf202f3bce343d73c9867773ac0e1083c85b473bc3fe30b3d6daf86bbab27463c8f8cf2bb8d17e13cbf5ca6bc2d16f2bb786ae9bc518ccabb2f1d313d9011d53cc44343bcde5cbabb74082fbd29a7ec3c444f0bbc1679cbbb2635e1bc0e16debc8930c4bc17fead3ac6b8d43cb40f163b0b3600bb7e5bcbbb108b6fbc0e917b3cd18d4dbc7f58453caaa263bc0e2329bd3cec9d3b2258093de3c8b9bc76f5d7bc587a263b4d98e2bc92902b3d4aae3f3d', 'Array(Float32)')), +('<dbpedia:Hamburger>', reinterpret(x'b5b8cdbbbe70523c4d12a7bcbd99903c1065623c90f22f3df90322bdd68d34bd1d1b16bda9e6d33c478e0cbd5942f43b6e6e9fbbeaef9a3c5a8527bcc17c24bcd43d84bc32b3b7bc5ee1a93b1fbc1c3cfe5f243d0d66083d49de3cbd011b203c3ca1773cc56d5bbc54010d3c4b71a0bcdb62a53ce0f462b8302054bb6ee78d3cff00abbc9717d13b07d575bc46597cbc2882c4bbf6c194bc67f8273b2cece9bcb567773bbe3a97bc0ded19bd0ed27ebca9b018bd43176f3c2c65d83b5296c13c38b040bcdfcccabc1f6b463c5687f83b2a9cb93cd8dde43c13ea22bcfde6353cfed8923cc0db1d3db1fd51bc7d74833c0d2355bddaf759bd521d533db39e58bc74e4163cc08ac73c1754483c435aa23ccbad973bb140853d57bc883c1c375cbcff5181bccc5cc13c4574973b6db27d3c0b4c93bc34cdacbc7e158a3c8aa4503ca888803adf968f3cbc56ddbc1320de3cc86b8a3b15c1e4bc39d8d83cd73c5ebd17a59e3c118dfa3c1548f6bb5db991bb2d57b53bceef24bd61748d3d9668273c7daa3e3c6db2fd3ceb9e443d6557213d6d7cc2bccabbba3c672e633b7ab9873b706df93c3354be3ca2f7ed3cc43720bdea3379bd12c28abdacaf72bdcd8459bc1c7a8fbce4284d3d2d5735bd11573fbca020ac3ce5a13bbc3bf24d3c698b8b3da8f4f63b2974a1bc43905d3d5e3200bd1e9404bd4d8b153d772624bd6ba580bd4d1227bc63fa78bdb0d5b93be7341f3db2ef2e3c1463113d39292fbc3e770e3c64ec553d70b02c3d43176f3c3c5d993db2efae3c1d1b963cea33f9bb95408f3b6d2bec3c48a881bc06fe33bda5b269bc1e5151bbbbb5d6bc90a1d93b8165babcb5677739d2238f3c4fdbc53cc65115bb570067bb54010d3ddef5083de7341f3d7f4bc53ccb34a9bc940bffbcfd378cba34461b3ba70f923c8a6e15bc8e87e4bca0991abc8f43863cf97c10bd0b90f13b9bfa643ce76adabcbc2022bdb490b5bc0c824e3deaace73c3f5cf33c67b5743d1b0f44bab1c796bc581a5c3dc950ef3b1f358b3cbdcfcb3c638e82bdb482123d2ef83bbc5f90d3bc2974a1bc080a06bd7726a43a9ff813bdee23853d9d65b0bbb62319bdeafdbd3c95c7a03ca23aa1bcd59b573d5c4ec6ba0ded993c3c1ae63c508a6f3d06f010bcb8fa5abc34469bbcb7d242bc0e4bed3c4f62573dcabbbabc227798bbe428cd3c5d765ebd4ad019bdb96526bd5badbf3c51f5babcad1abe3c5eef4c3c9cec41bcb31747bc0fb6b83c9cecc1bcc0548c3ce7f1ebbbe387463c36e721bd2e4912bbad1a3ebdb531bcbc43d390bc4fa58abb819b75bd949f88bd543748bc80b610bdf1225fbd54018d3c03f2613baca14f3db2ef2ebccac9ddbc370fba3c5a85273d239f30bc698b0bbe7c82a6bc23f086bd5d4023bd7976543de4f2113cf71fe83c02cac93c96764a3d3d0cc33cae42563cf3b5423d603f7dbc6b2c12bd1dd8e2bcddc0f8bc6f88943a00b0d4bcf42eb1bd6ee70db8629ca5b9464b59bb43d3103dd1b843bc6e6e1fbd00f387bd8a6e153d370fbabc92bbce3b3837523cc950ef3cccd52f3ce9844f3c70373e3cd5651cbc4531e4bbcf68933b3660103d4e3a3f3d1846a5bc331e83bcbf1f7c3db99b61bc361dddbc2ec280bd8d29913b634b4fbc8456f13c54010d3ce94e943ca539fb3c5260863c33db4f3dcd4e1e3d77adb53bbe70d2bc9cec41bc02940e3dde2bc4babf98eabc975a04bc715f56bdeb2556bdc65195bc9c3d18bd521d53bb93343d3de76a5abcb1c7163c5a85a73cf7e9ac3ab567773c698b0bbd7a68b1bd5c188bbbd2238fbd8420b63cf6c194bab14ea8bbb490353d62d260bc12f8453df8db093cf7e92cbd1548f63cbfb3053c5700e7bcb43fdfba6c03d4bca411633db490b53b214f00bd2e06dfbc8c37b4bb6820403d196ebdbce9c702bd2f2e773dc695f33bc2da773af67e61baeb25d639f07335bd7b5a0e3a0b5a36bde46b00bd81dea83c48b6a4bca8be3bbc8f79c13ca7cc5ebc98c67ab9c651953c1548763b41b99b3c4d04043c621514bd57f2c3bb5779d53bcd4e1ebc78d5cdbbfaa4283d53cc7cbcf71fe8bc5a77043c42328a3cb2acfbbc81de283df214bc3ca70f12bc5f9053bdcabb3abd1b0fc43aae42d63cab00c93ccf6813bd4bf8b1bcbf1f7cbc06adddbc6d7cc23ca2f7edbbedeef43bcac95dbc3c6bbc3ca112093d55a293bc7001833b0c742b3d5f17653c6ba580bc6009c23b80ec4bbc0f3d4a3cbb2ec53c4317ef3cf7a6f93c0b4c13bc8762433ddf960fba7300dd3cf4eb7dbd76859dbcbca7b33c80b6103dd090ab3ca837aa3b7a257ebd9b1580bcf3b5423d171e0dbdaf6a6e3c8499243d4e3a3f3c3d0cc3bc322ca6bc944eb2bcfd378cbc6db27dbdcd0bebbce85cb73b7ab9873c4d4862bd5d76debc41b91bbd457417bda2c1b2bc3f5c73bb9c3d983cc5e649bdd7b5ccba555fe03b74e4163d43176f3c8b96ad3c88cd8e3dac6b94bc187c603c784ebc3bdbe9b6bcc695733b03bca6bda929873c011ba03b498d66badb62a53b2fa765bc0ded193b9b8176bc7a25febc184625bcf63a033cb873493a5eef4c3c80b610bdc4beb13d31c1da39ae0c1bbd5345eb3ce9844f3d595d0f3ce20e583d27d39abcdd3967bdbb2e45bd93343dbdd82e3bbcf28daabcdcdb93bba96d65bd1c29b9bba929873cd03fd5bc4a7fc33b244e5abd3354be3c0fb6b83b5c180bbc7634c73c2b3d40bde8d5253cc17c24bdea33f93cd60623bb2e49923b01515b3c5687783c41b91b3c6ba580bcdc8a3dbc6e1dc93cdf1da13c7b17dbbbd131323dafe3dcbc1846253d95c7a0bcff51013d95408fbc045d2d3c6db27d3c89f5a63c35f5c43bf67e613c1803723b3f5c733da2b30fbca8454dbc27d39abc080a06bc7daabebc5a34d1bcb83d8e3bd03f553d8499a4bcca0c91bc67a751bd196e3d3b16b3c13c5eefccbcc86b0a3dac28e1bb2ef83bbd5f5a98bc331e833c00f387bc205d233bddc078bc38b0403cbdcfcb3c983fe9bc8114e4bbdf960f3c49572b3cfdf4d83c7f4b453d5abb623df811c53bbd48babc2ef83bbc38e6fb3b306307bc033595bc6a70f0bc0335953d9790bfbc7aefc23c530f303d06fe333c95c7203d73005d3b7a9e6c39c8f29b3c5d76debc7a68b13cfbda633ca5b2e9bc227798bcb14ea83c7f4b453d25b9a5bc11573fbde1e6bfbbbbf8893d7d3150bd6771963b975a043c7251333c642f89bc2831ee3bab0049bb38e67bbca148443c9193b6bca17e7f3c7d31d0bcf42eb1bb6c542abca3dba73cf2143c3cd5659c3c725133bd8854a03c4bea0e3d7fc4333c404ed0bce2d81cbd1bbeed3c5ee129bc55a293bb28b8ff3ce76a5a3be7bb303c6bdb3b3ca0999abbdc5402bd70becf3bfd6d473c239fb03cfd370c3b667fb9bc3fad49bc4ad099bc04e43e3b7daabebc03ae83bbd43d843b6ae95ebc715fd6bc0b90f1bc6f88143c586bb2bc2f71aa3c387a853c21d6113ddf960fbcea33f93c4f62d73bf71f68bd395f6abd79fd653b370f3a3c93343d3c47c447bacc5c413b6606cbbbfd6dc7bc517ccc3c7379cb3cb482123cf90322bcae8509bd5b262e3df648a6bcaa87dabbac28613c71299bbc05d69bbc789f92bc22fe29bceb68093d63c43d3cd5de8a3cd2aaa0bb8b45d73cc5b08ebb7de0793b6a3ab53bf63a83bc7f4b45bca454963b924260bb0bd324b9c445433cdc11cfbce0be27bd8206413c45ed05bb58e4203c2193de3c214f803c5fd3063de137963c02940ebb594274bcd606a3bd146311bcb2681dbd01d86cbca70f12bdc22b4e3c06fe333c1b45ffba6e1dc9baf7703ebc0e4b6d3c4b71a0bbba14d03b44fb28bd7a6831bcb225ea3be07b74bc79c72abc8bcc68bb1e9484bcedee74bcd182083cf7a6f93c41b91b3b00f387bb944eb23cc437203d706df93985c13cb993269abb3ce42a3bfe1c713ba66e0b3bb482123c5779553d633d2cbb16b3c1bce1b0043bd614463c2ec200bc275aacbc8d29113c306307bb77e370bc9d14da3cb74bb1bcb14e28bc715f56bbafe3dcb90677a23b007a99bc5d40a3bcf1ec233bc5b08ebb621514bcbb7f1b3d781801bd568778bccdc78c3bee59c0bc03f2613cff873cbd7343903cb026903be1b084bccf9e4e3c0c82cebc3e770ebd668d5cbb35bf89bc22add3bbea76acbb57439abcc78750bd70b0ac3c02948ebba362b9bc1e007b3b153a533b22fea93c4ccf733ceaac67bb03f261bc69121dbd555fe0bcf770bebccdc78c3cba062dbd35bf093cb567773c9e570db809321ebca4cd043d784ebc3bb6e0e5b9bf98eabbf98ab3bc811464bc1b01a13c35f5443dae0c9b3d8de65dbdcbad17bd4091833a67a751bd0335153b03ae03bdc0548cbcdf960fbbb8ecb73c7aefc23c09329ebc9c3d183cd43d04bd69129dbca45496bc8803ca3c8803ca3c7de079bb44fb28bde593183d13ea223d3bbc123dfab24b3a2ef8bb3b09b9afbacd0b6bbd158b293b46d2eabcc9a1453c18bf13bc8bcc68b9f1ec233c5dc7343d39292f3b8ae7033c079fbabc7d7403bcae0c9b3bb69c87bcaa519f3cf2de003d39292f3cb5fb803cb43fdf3bb026103d63faf83b3aca353cb482123d7a9eecbb8b962d3c089197bcc17ca43b7f4bc5ba57f2433df28d2a3d041a7a3c4ccff3bb677116bde85c373c5a3451bdccd52f3bf4208ebc4d48e2bc5942f43c2a8e963ceb17333c14994c3d42e1b33c5d40233b92fe81bcb026903cf5cf37bdec3fcbbca66e8bbb5db991bcc26e01bdbf986a3b1c2939bcf28d2abc28b87f3c01a2b13c706d793c0bd324bd06adddbb0fc4dbbcae0c1bbcafada1bbf0c40b3c7aefc23a275a2c3d3dd6873c8e87643d3d85313d69121dbc0818a93c9bfa64bd5dc7b43c24181fbc5abb62bcd1b8c33cec3fcb3c6f4561bb0151db3b4268c53a3801173c5c9f1cbb39a29dbb8c880abc6b624d3c01d86c3ceb25d63910de503c5f5a983d77e3f03c1d1b163deaac67bcfe5f243d13205ebb4ba75bbb2fa7e53a32e9723ca2f7edbbd0c6663d6473e73cbef763bc940b7f3c9f71023cf55649bcaf6aee3c09321e3d99aa34bdd9057d39f903a23c667f39bcbc565d396c542abc31048ebc7e232dbd819bf5bca8454d3ca9e6d3bc5bad3fbc5003de3ba845cd3b158ba9bbeb6889bcba57833c9ff8933b8fca17bb5687f83c8b962dbdf903223c5a7704bd5345ebbc263294bc2882c43a045dad3c3020543c71e6e73c1e43aebcdfccca3b2632143b153ad33ccabbbabcb99b613cc5b00ebd5db911bdade402bddc5482bc3ca1f7bbd77f913b86b3993c22add3bc7e232dbddf960fbc3788283c789f12bb68ea04bc1e007bbc60fb9e3c83f89dbc9334bdbc03ae833c858b01bd4ccf73bc93ad2bbd6f4561bc858b81ba6d7cc23c8bcce83b39d8583d9ff813bc3aca353c43176fbc6a3ab5bca6a4c63c7f4bc5bc18bf133d0818a9bb5345eb3b3b79dfbb4bea0ebb7b175b3db567f73b80b6903c4d04843c85cf5f3c1206e93ca411633c469c2f3b8fca973ca3db273c3efe9f3d60b86b3cb2689d3b2277183c5f5a98bc9b81763c949f08bdee5940bdda7e6bbb1ecabfba079fba3ca112093d2e06df3c318b9f3d5f1765bdfe0ece3aafada13b755d05bdd0c6663a263294bcce76b63c82d0853d49debcbcbe3a173c2e4912bdd43d84bb25efe03b3f9f26bda845cd3c0891173c84a747bd8de65d3dc437a03b0a6859bdd308f4bb6ee70d3d906b9ebce593183d53456b3c079fbabc87987ebc53889e3ce7349f3c71e6e7396948d83c3d93543ccabbba3c7e59683cc56d5bbd832ed93bb9de94bcd182083d205da3bc39d8d8bc71e6e73aeb68093d5437c8bcf5cf37bb581adc3b2d57b53ce85cb7bc3aca35bc6b62cd3acfe101bb31040e3dfaa428bc409183bc4d8b95bc586bb23b61aa483cec0910bd318b9fba32b3b73b16b3c1bc12f8453c26ab823b7a9eec3c2f71aabb3b795f3c1e9484bc72caa13c28b8ffbc20e4b4bb12c28abc387a85bc667f393c9e8d483c603ffdbc24c7483cf122dfbbf42eb1bb50cda23b9e0637bc9b15003d1ff2d73ba5397bbc32e972bc3c1a663cacaf723c58e4a0bc28316ebbd3d2b8bbb0d5393d3e778e3b0029c33caff1ff3a5b26aebba539fb3c9d65b03dd99986bb7a25febc331e83bcd9057d3cd68db43c73794bbca14844bce300b53c05d69bbb42e133bb3ce42abcb05ccb3cae8509bb18bf13bd8d5fccbc0f078fbca3e94abbd5651cbdedeef43ca17effba77e3f03bdc54023b24c748bc7a257ebc1b0f44bce4af5e3b70becfbb9fb5603c67b574bc4d12273d642f09bdea3379bb638e82bc8bcce83bd30874bceb9ec4bc7940193c95846dbbb1fd51bcafada13ca6a4c63cd0091a3cbd48babcf1a9f03c1e5151bbd2230fbdc3ccd4bcbec1a83adf960f3ca2705c3d8d29913cf122dfbb015f7eb9b05ccbbc8db0223d88034abcf4a79f3b64ec55bc473d363dd820983c4659fc3c3efe9fbc1f6bc63aeaac67bce2d81cbc1412bb3b5afe153d80b6103cc22b4ebaafada1bb2dd0233d29aa5c3c98c6fabbceefa4bc46d2eabb46d2eabc9242e03ce593983c239fb03c18cd36bb6c46073c06f0903b2c2f1dbc171e8d3b3f9f26bd872c083c83718cbbbdcfcbba1121043c8512933c6215143d5942f43ad381e23ccdfd47bb61748dbbb659d4bc1b0fc43b91c971bceee0d13c5db911bdd18208baccd52f3c29ed0f3d8b4557bd5c9f9c3c14994c39e46b00bd47c4473c9d14da3be2510b3c3b3581bb40d5e1bcbfb3053bd99906bc146391bca2c132bc13eaa23b827fafbc184625bb4fdb45bb77ad35bc95846d3c03ae03bb508a6fbb409103bc102f273c02ca493c8cbe45bdfd370c3b526086bb25ef60bcc26e81bca23aa13cbef7e33c331e83bbf6c1943c7aefc23b7e5968bca5397bbc928593bb3b795f3c78d54dbb751a523b8c37b4bcdac19ebc5942f4ba9e8d48bc2e7fcdbb015f7e3b57431a3c171e0d3d789f123dd73cde3ccfe101bc40c73e3c516ea93c482f933bfa61f5bcf7703eba5ee1a9bc65deb2bcacf2a5bc87987ebc1ad988bced31a83ce137963cf30699bc98821c3d8a1dbfbcdac19ebbd606233de678fdbc7818013d3b795fbb52a4e4bbd9057dbc033515bccb34293c92bb4e3d9d14dabc7a6831bc4e3abfbb02ca493c002943bd118d7a3b7b5a8e3dcabbba3c7fc4333cc9a1c5bc45ed85bca7cc5e3c521d533cb5313c3c940bff3b4091833bf2de803b7d31d03ac5e6493c3837d23c275a2c3c668d5c3cf0c48bbb9b4bbb3ce76a5abb6d2becbbf073353c08c7d23b65deb2ba8fca97bc6123b73ca66e0bbc050c573c153ad3bce33670bbfed812bd9bc4293d97e1953cb1fd51bc0ded19bcfb9605bd180372bcb659d43ca0202c3c0493e83be5a13b3dfcbe1dbdeb25d63c70b02c3bd905fdbbd3c415bca8be3bbccabbbabcc86b0abd464bd9bbde2b44bc5db911bceafdbdbbce76b6bb2b0705bcd5de8a3bddc0f83c2b73fb3c48654e3c5eefccbcc4beb1bce1e63f3c3660103ceaace73cc828d7ba76851db97d74833c48a881bccc92fcba6a3a353c6ea45a3a318b1fbd63fa78bca66e0bbb1504183b6c8ae5bc64ecd53c755d85bba61db53c983fe9bb52e7973b975a04bda888003ca362b9bb04e4be3c6c54aabc01d8ecbb9285933cb8fada3ce4284d3cdac19e3cc5e6c9bcc30f883cba14d03b8165ba3b4c204a3aca42ccbce4af5ebc86b3993cbf98ea3b041a7a3c366090bc98821cbd158ba93bb490b5bc28b87f3c01a2b1bc6ab323bd4d04843a10a895bca23a213ce4f2113cff87bc3ca5f59cbbcf9e4ebc09321ebc90e40c3c5dfdef3cf71f683c6a70f03cabca8d3c700103bd4e2c9c3b6c542a3dafe3dcbbcc26863c2bc4d13bfa61753cec3f4b3c7cb8e13cc17ca4bb4fdb453d83718cbc8cbe45bb7aef423b5529a5bbaf6aeebcfed8923a99fb0a3c7b90c93cb43fdf3c940b7f3ce1e63f3d118dfabb9a9c11bddbe936bdee59c03ad5ec2d3c85cfdf3ca70f923c7d74833dec0910baf0c40bbd1938023daa875a3c2cb6ae3cedb8b9bb1f79e93c827fafbabe70523adf1da13b97903f3b46159e3b80b6903bad1abe3be2d89c3cc60ee23ca5b2693c9cb606bba362b9bcff873cbd3020543cb7d2c23c6ea45abb3f9fa63b975a84bd706df93b322c263d8206c1bc7f3d223c158b293cf556493d4865cebcf0c48b3b80b610bcd7b5ccbb5db9913bcac9dd3b99aa34bcc2da77bc8420b6bc36964bbc61748d3bc103b63bc60ee2bc041a7abbc2a4bcbc3efe9fba0fc4dbbab8fa5abc7726243ddc98e0bcdac11e3c22ad53bd9cb606bcc60e623c81dea83cd5ecad3bcc92fcbc8d29113c3f26b83bcd4e9e3cfde6353c2fa7653b54b0b6bb5fd306bdaad8b0bb79fd65bc1c37dc3c3d93d43b5993ca38cbad17bda0a73dbccc26863bcbe3d2bcb39ed8bc32b337bd6771163de76ada3a244edabc4914f8bb8e51a93c9a9c91bb27d39abc98821c3d9d65b0bc7726a4bb007a193cb2681d3cff00abbc629ca53c9cecc13bef8158bc473d363cbd9910bc0f078f3cb490353cc08ac73bc2daf7bc96efb83c7428f5bb69c1c6bc1065e2bb4bea0e3cf2de80bc2fa7e5bb5f90d3bcfdf458bcba06ad3c', 'Array(Float32)')), +('<dbpedia:Pizza>', reinterpret(x'fef33bbd5953273d4934a7bc29e7a83d96381abd6154483c5a276bb97d0ab9bc6791e1bb9d4977bcd697dc3c743692bb6964e7bc22d789bca2cf8abddea63dbca9ee27bcb81de43c1b9b2ebc07e5a0bc4835e5bc3906293dddb6793aaad02bbdf8a7a43de079433d743692bb9f0c41bcb80e66bcb80e663cb9f069bc4124083dea4cac3b92a18c3c6154483cfa5dec3cb59416bb6171863c89bfa53caadf29bd579e9dbb5871a337d94b283d239ccfbce7b51ebdc03b433dffd5bf3c8a92ab3b587123bd76eb1bbdf6d5dc3c23aa8f3cd21ecbbb2bba2e3d6882e3bb64fa533c10d7c33a3d80f83cd788debca80ca4bd249b11bd3ae82cbd07e5a03bf8b622bb9d57b73ab6761a3d3ada6c3b09a9283c12b8093d3907e73b961bdc3bd12d49bcf188073da7391e3d3e7f3a3cfb4db0bc5e91fe3c7ded7abd7182c63ce09681bc7c3733bcf35b8d3c9556163d5226cabc2d7e363bf19705bc75ecd93c08c862bd43cbd13c22ab4d3d91dd043df6e4da3c860a9cbb4be930bc73548e3dc4c294b8e7a620bce96aa83c37511f3de079433dc3e1ce3beee3b9bb2f33c03bd3f150bc717348bcb3b350bd8e0b3dbd1ab92abda8ef653c617186bd6b19f1bbefb6bfbc1f22003d62454a3b64fa53bc20f6433c154f973cb10c87bdd95aa63cb3df0cbcd20fcdbc9b856fbd18d8e43cf97be8bc01b705bd80bfc2bb5944a9bcb7581ebd5053c4bce5e218bc88dda1bc77dc1d3dd5c3183b3bd92e3c0d31383d81af863d970ba0bcfe023abaf26a0b3dea2fee3cf6f29a3cc4d1923c1a8d6e3c2bba2ebd2c9c323d8b6531bd280663bcdf89ffbcb585183c6318503cfed67d3c4f803ebd99c1673bf51097bc1d33fabc07e5203d8c47353d82820c3dfa7aaa3c9547183dc7771ebdf089c5bb2f4100bc154f973c5ccdf63ca3b18ebcbf4a413c0f04bebbddb6f9bcd85b64babd86b9bcc03b43bca70d623cad67b93a5ceab43b79a0a5bce96aa83ca2de083ca2c00c3dc93b263d037b0d3dfef3bbbc492529b9ad6739bce5f196bc7a82a9bb81af86bd5cf9b23c5307903986191abcaac26bbdf510973ccdc3353c89a2e7bc3833233d43dacf3ce6c49cbccf78bf3c3c9e74bc4042043c9aa36b3d052257bd6791613ba3a210bc0a7decbc53f9cf3c96291cbc0ff5bf3cbca5733d037b8d3cfd11b83d31e849bc9ac0293c5e917e3d81af86bd88ce23bd1e413abc6faf403cd05ac3bcad67393d6c09353d4844633dfd11b8bc89a267bc23b90dbdbca4b5bc970ba0bc62530a3de25a89bc7fcec03c9f1b3f3a8d2939bd0c32f6bc617106bd18e6a43ce6c41cbd52264abc0f04be3bd77960bda2b2cc3bb6679c3caba4ef3cfa6b2cbdddd3b73c72720a3d70ae82bc1e15fe3b92bf88bd1f23bebca71c60bd96389a3b1704a13b09a9a83ba1ed06bc16319bbc1b7e703b4f71c03b21d8473c99de253d6882633dcbf0af3bbaef2bb998d0e5bc7fcec03c22ab4dbc382425bdc5a556bb9e0dffbc492529bca0eec4bca1d048398b6531bdf0b5813cf33e4fbdb02a033ce789623c10d7433c5062423dfa5dec3a579e1dbcac86f33c80dc80bd7974e9bcbd78f9bb97ee61bc2005c23bcbf02fbd66cc1b3ce5e2183de5f196bcda2d2cbd596225bdc12b073d4dbcb6bb8a84ebbbcf78bfbccdb4373dbd78f9ba053f153d3adaec3d2e603a3c1d42f83ce7b51ebd3842213ced01b63ca1fc04bd5cdc74bda485d23c6a46ebbce50015bd1b7e70bd45bb95bb608f02bc6b286fbc54e993bce15bc7bbb6761a3c3f61bebcb4b2923cd30e8fbce6b65cbd78a1e3bce86b66bd8e1abb3a3ae82c3d65dc573cdc00b23bd30e0fbdc10e493d938310bb2d61f8bcfe02babc4af9ecbb18e624bc3825e3ba19c966bce97926bc154f173d359c953b27419d3c7ded7a3dd4d3543dbd86b93c6edc3a3ceb11f23c2723213c954718bc8ffcbeba79a025bc5ccd76bb4f71403a1e15febaf32fd1bae7a6a03d75fb573d4cbd74bc138b8fbc4bf82ebd961bdc3cd95aa63c82910a3df188873cbd78793c136e51bdeed4bb3b4926673cffe43dbcfd11b8bc91dd84bc7c1a75bc9e1c7d3b92bf083c9aa36bbce41e91bb77dc9dbc2f42be3ca0fdc23c93844e3c41074abd2c7f743cf16bc9bc971a9e3b137dcfbbaab36d3c2bba2ebd33ac51bddf973f3dbbe02dbd8455923d08b964bcd06883bd9d3af93c164019badeb53bbcc3e0103cdb1eaebcd14a873ca8ef65bdb1fe46bcfb4db0bc65ea973a7edefc3c7edefcbbb3c1103c90dec2bcc4d112b970ae823c3f5240bd96381abdabc1ad3c5ceab4bce5f1963caf2bc13c492529bd21f5853caba4ef3cbad26dbdfb4db03ccbd371bbe798603d311406bce5f1163cb6595cbd4af96cbc239c4fbd88cea3bcccb5f539dde2b53c515286bb80b0c43c90fb00bdc12b07bd4124083c0c5e323d6245cabb098ceabcc4c3d2bbe17885bbc75a60bc3105083cd06883bc0d2378bcd23b893c5062423dad6739bb00b8433d5ceab43c136e51bd8fed40bdb039013d7d0a393d843854bcce873dbd1e323c3c2f33403c155e15bd28f7e4bced01b6babf4ac1ba0522d7bb265f993c6326103dbe5a7d3cd95a26bdbf4ac138f8b622bb81be043c09a9283d4861a1bb4680dbbc1e247cbcf19705bd00d501bdd159853ce7b59e3cbbb471bde5f1963bd22c0b3cc6869cbcfed6fd3c2004843c1d5fb63b93928ebb93928ebc18e7e23c0a7d6c3afa6b2c3dbaef2bbdf25c4b3db91c263b1623db3b3e62fcbc10e6c13ca567563cb02a03bda48414bcb73b603d506242bcd30e8fbc7255cc3c9aa3ebbbe96aa8bcb11b05bd6981a53d5143083dbe773bbca1fc843c7d19373d0f04bebc3f52403d2bba2e3c98fca1bb5ea0fc3c4a162b3dd5c398bcd20f4d3c2ac9acbc94749238f8b6223c5cea343c0e133c3c2715e13c1e24fcbcb3b3d0bcccc4f3bb10c8453cecf3f53c2805a5bbe6c41cbd616346bd853716bdf42e13bc3816653d6debb83b52264abcc6869c3c044e13bdb82ba4bc89b0273c7337d0bc7419543b9384ceba2bba2e3d413386b9bafea93ce5e356bc640894bc91dd04bde6b6dc3b860a1cbd036c8f3d5cf9b2bc87fb1dbc1b9bae3c22c88b3c1ab92a3d0513d9bcd7969ebc2f33c0bc42068c3cd87822bb87ec1f3c7ecffeba8447523bb659dc3c5cf9b23ca2c1cabc19d7a63c08c7243cb594963c1aaa2cbd6171063c6b286f3c01b785bc08b9643d5cf9b23c3bbc70bb3825e3bbbe69fbbcbe5afdbc6080043c2ad82a3cf96ceabcc93b263dd21ecb3c77cd1f3cdce3f33c469d19bccce1b1ba4a07ad3b8d38b73c9b84b139b495d43c4753e13c7b47ef3befb6bf3bfc3e323d7d0a39bc689fa13c9e0dffbbdf897f3c1e413aba302384bbf899643b4bcc72bdb4a394b919c8283d88cea3bce08783bc4af96c3c458f59bdf26a8b3cb72ce2bca3a34ebdd4f092bb75fbd7bc0c5e323c696329bd0f043e3c5781dfbb2c9c323c4f7f00bce6d39a3cf32f51bc6fafc0bceec5bd3a4043c23c10e5033dc21c093cc20d0bbdd12d493c93920ebcd05ac33a946652bc31f7473de78962bbcbff2dbb9e1c7dbb87de5f3c2c7076bdcbd3f1bb3e53febc034f51bb33acd13cedf237bc92bf883b2b9df0ba0e133cbd26509b3ca8ef653c29e8663c044e933afa7a2abd384221bac3e010bd404204bc3e62fc3ca9d1e9bc47701f3d92b00a3ca00b03bdf51f153b5dccb8bc8a92ab3b47701f3d028a0bbcbf593fbc2004043c7d0ab9baa0fd423bcbe2ef3c17f6e0bb66db993cb83aa2bcea4c2c3cb59416bdddc5f7bc97ee613a90dec2bb65cdd9bc4e9ebabcf197053d053097bba493923c7255cc3cb64a5eba37435f3cdc00323dc74be2bc62530a3d6cec76bcb659dcbb6b362f3cc6875a3c3d80f83cc13a85bc1d5f363dc768203de89722bc310646bc3c9ef4bcaac2ebbbb2ee8a3c5ebd3abc852898bc92a24a3c5a262d3c3f613e3ce78962bd128ccdbafdf4f93ca2de883cc058813cdce3f3b9f5f3d83c0431553dd31d0dba7b55af3c530790bc200484bc1f22803c92b08a3c90ec02bd81be043dd6a65abde08703bc2c9c32bc0f0300bac75a60bcd697dc3b11aa49bd77b0613c0c4fb43bdcf1b33b7991a73cd3f1503ce87ae43b850bdabc08b9643cb00dc53b1e247c3d0c41f4bc514308bc80cd823b6b362fbd3907673ca2de08bb036c0f3c9556163d579e9d3c249b91bcfa7a2abce7b59ebc7b46b13b7fdd3e3d42f84bbc9e39bbbbd5c456bca558583a852898bcd06803bc961bdc3c7983e7bc146d93bcccb5f53b844694bd954718bcb495d4bbb4a394bc79a0a53cf6e31c3d7983e73bb9f0693b8b83ad3be7a6203c53f94fbdd85be43cac86f3bcc6869cbcce96bbbcbc9675bae32d0fbd70bd80bc2551d9bc22d709bb2bab30bcb039813c08c8e23cdb1e2ebcc2ff4abbd4f0923c7c37b33c689f21bcb91ca63c9d57b73b5217cc3ce169873b56ad9bbce5f1963bc596583c4107ca3c6c0a73bc5cf9323d145f533c80cd823cf8a7a4bbce87bd3b87ec9fbc01a807bdc21c893cc10e49bc8a846bbc88ce23bc33c98fbcc84a24bd7a566d3c3cbb323c718188bc946514bcc4d1923d31e8c93c22c88bbcc90f6abc6b27b1bb06121bbc3bbcf03c955696bb91b1c83cbca4b5bbd95aa6bc55cb17bb56bc19bd88dda1bc0d40b6bc81be04bd9b932fbb579e1d3de95ce83c75ec593c5eaebcbccda539bd8429563ca476d43c91ce06bdf5f358b779a0253c55da153d019a473b698125bb485223bda1fc043db11b05bdc2ff4a3c9b932f3c7efbba3c3c9ef4ba28f764bd9a94ed3c5a352bbbabb2afbcad68773b44ca13bc3e537eba947492bbc93ba6bcdea77bbc053f15bcecf375bbb6769abb5f9040bc412408bd11c7073df51f153d8fedc03c70ae02bda3a3ce3c4dad38bd4762df3bc596583d4934273cae493d3c44add5bc1a8d6ebc19d7263b0a7decbc89bfa53c954798bc20e7c5bc7190863d4bcc72bc8b65313d6327ce3cc11dc7bbfc2f343be25a093d302304ba7a73abbc76eb9b3cccd2b33ce7b51ebc33c90fbcb02a033db6685abd66cc1bbb9a946d3dbf4ac13c3e537e3c32e70b3cf989a8bc33c90fbd0e133cbc0530173c8d38b7b99d57b73bae493dba147c913ce96aa8bce31f4fbcb668dabb146d133c899369bc6a542b3d5772e13c29f62639e22ecdbce32d8fbcf279093d96291c3c5944293cf33e4fbbc2ffcabb138b8fbcd22c8bbc6081423c0440d33cc20d0bbc32ca4d3d2d6f38bdd5d216bdb4b212bdf43d913c2abbec3bcf69413d16145d3c2d6f383d26501bbdb1fec6bc6081423c9465143d2c9c323d2c8d34bc1ab92abc0b7c2ebd367e19bc32d9cbbcb01c433d92934cbc9ac0a9bb78bea13b17131fbce40153bc22abcd3ce41e91bcb02a833ca1dfc63cd6b49abcaab3edbb1614dd3bec1f323c1640193bc6869c3c00d581bc2ad82abc45bb953c42060c3d9d3a793cffd53f3ad6975cbc08c7a4bb62530a3cb91ca63c2bab30bcb3b3d03c0522d7bc75ec59bce5d458bd45bb953cac85b53cb02a033c4f80bebbd12d493d32f6893c247e53bc21c949bc10f4813d0a7d6cbc201382bd7dfcf83c18d864bb1f14403c3ae82cbdf18807bcbe5a7dbb6963a9bb272321bc3cbb323d9ba22d3c5ccdf6bcbe68bdbc5b17afbcc859a2bb77bf5fbba558d83cde987d3c7c37b3bb7518963da3a290bcb6671cbde33c0dbde24b8b3dc85922bd1532593d3adaec3ccf69413c18e7e2bc6fafc03c91ce863c29f6a6bb3d71fa3c67bd1dbb54e9133d06039d3c97fddfbcd4d354bc42158abce22e4d3b07e520bd72554cbd3bd9ae3beec53dbc86199a3b018bc9bc1f22803c359c15bc3907673c384221bd43cb513c7509983abbe0ad3b8a92abbc6a46eb3bbf4ac1bb6081c23ca39450bc8429d6bce6d39a3cbafea93cdb01703bc2ff4a3c718108bdb4a3943c696467bc145fd33c5e917e3bf88ae63a1d5fb63b044e933c256e173c9d57b7bc2c8db4bc5dccb83c0a7d6cbc45ac973ce178853a741954bb56bc993c5f90c03b44ad553ccdb4b73ca7399e3cf411553c93920e3d4bf8aebc0d23783c06045bbb5863e33a1ab92a3d6163c6bba9fda53c302442398d2af7bb72728a3c20f6c339a72a203d70bd003cf26a8b3c7173c83bf8a8e23cb3df8cbbb3b350bcf6f29abc1b9b2e3ce87ae4ba0c41f43b2004043dfd11b83c9e2bfbbbdf897fbc7c3733bcf88ae63c8c56333cc859223b7edefc3b3d80f8bcfa7a2a3d22d7093daab36dbbd22c8bbca62bdeba80dc80bc4133863cb57758bcd6b49abc709f843c43cbd1bc10f481bc1f22803aa2de08bdd06803bdc7771ebc7a642d3ba9fd25bc717348bc90fb003de5f1963a55ae593cd788de3c53ea51bd5b096f3c8993e9bcf88ae63b2d7e363c2005c2bbdf897f3bc75ae0bcfb4d303cf33ecfbbc7771e3d845512bd7ecffe3b3d9db6bcf5f3d83c5143083dbf4b7fbc0e22ba3b00c6033dc92c28bc88dd21bce5e298bc3906a93c938390bcb6685abc9e0dff3b82744c3c2c8db4bbf0b501bdcda5393c7ded7a3c412408bdc4b4d43c367e193cdb1eae3c87de5f3c0431d53b6163463d32d9cbbc4861a1bc860a9cbda48552bcbca4353ccbf02f3b5953a73c37435f3c86191abcbad2edb931e8c93b07e5a0bc7d0ab9bc7d1937bdf8a8e23cb4a4d23a18f5a23c75fbd7bb741954bd5ccd763c850bda3c66be5bbce06ac53b5953a73af6e31cba8c4735bcfa7a2abc42e94d3cbf4ac13cabb2afbce33c8d38b03981bc7190063b3f5240bc2f41803cbafea93c66cc9b3cd077813c7190863b9c75b3bc631850bb76fa99bba558d83c2013023cc3ef0e3c7a65eb3c2f41003d98fca1bca2c00c3b55aed93b8b65b13c302384bb2d61f8bc9e1c7d3c5f90c0bb22abcdbb19abea3bda3c2a3cb11b053ae5e3563ce6c55a3cdf89ffbcfc3eb23c4cdab2bb5daffabc3915a73c8c56b3bb42150a3d6309d23bd300cf3c2c8db43b9466d23b359c15bd22d7093c00d501bd9c58753cd84ce6bbe96a28bd52264a3b5854e5398c5633bbc2f0cc3c145fd33b08b9e43c4ccb34bbec1fb23c99c1e7bcbbb471bad869243ce87ae4bc139a0d3dad76373c8438d43b1541d7bb9448d6bbf6e31cbc6408143d5061843c3bd92e3d852898bc1704a13b8e0bbd3917f660bcfd20b63c3d8e383c3c8f76bba0ee44bcfa4eeebb2d7e36bb33ac513a1f22003d5ceb723c8537163c697227bc608f023d0a8bacbbe079c33ae310d13a33c98f3c19d7263b099b68bc6cec763bd15905bd06045b3ce40153bbe96aa8bc4f7140bd72638cbc8438d4bbaca3b13a5b0831bbd4e252bc62448c3a39f8e83cda106ebc2ac92c3c256e973cf998a63c154f973c367e993c19d7263d00d501bde40153ba22c88b3c89bf25bc76dddb3b947492bcd31d8dbcb585983c4caef63b21d847bd44ad55bccbf02f3cad6739bc028a0b3d6a54abbbc3e1cebc73548ebc9e39bb3cb6671c3a8c56b33b8a84ebba29e8e6bb00c6833b9556963c5b08b13ca80c24bd5daf7ab97efb3a3b44d9913c01b705bc5ebd3a3ce06a453b578f1fba7b4631bdf42e13bbcbffad3be24b0bbd65ea173d6a376dba08c7a4bcfc3eb2bb3032023d1e41ba3b798367b95bfaf0bc5854e53b062199bcc859a2bcfdf4f9bae5f1163c147c113cdb1e2ebde94d6abc4852a33cbcc2313b6debb83c41074a3c33bbcfbcf96ceabbe78962bc07f41e3c3105083d8b83adbb11aa49bdec02743c9e1c7dbbe41e113da49312bdf16b49bd88cea33cb02a033b34ab13bcbf4ac13cc058813cf989a83ca72a20bc7190863c4bcc723c19c9e6bc10e583bc146d93bc357059bccdb4b73cd21ecb3c390629bc7181083c8274cc3c4671dd3cf43d91bb11aa49bcddb6f93c00c6833ba6571a3d89bf253d492529bd1b8c30bc8d29b93bb2ee8a3ab6761a3d34ab933a44add53b3d9d363d54ccd5bbc10e493d3badf2bc3e53febb6b36afbc82910a3c136ed1b8ede4f73ca484943c92a24a3d1d33fa3b4034c4bce32d0f3d2c7f743bc5b3963ad78720bda8ef653c27245f3c65ea97bc4bea6ebb1e32bc3cbca573bce96aa8bb836410bb2bba2e3c6dfa36bbb1fec63bddb6f93cc82de6bc33d88dbccbf02fbb29e8e63cc92c28bbbd87f7bcd12dc9bcd4d354b934ab13bbf51f153b42f8cb3c2aac6e3b9474123d7ede7c3ba9fd25bc7ecffebca62bdeb87dfcf83bb3c110bce798603c5dbef83cd5c3183b3106463c246fd53cd7885ebcde98fdbc6154c83c6327cebcd13c47bc8e0bbd3ce09681bc90ec02bd8aa129bcb82b243c9d3af93a7ede7cbb90de42bc1a8dee3c5152063c5ccd76bba1fc04bcbad2ed3cc91ee83a1640193c09aa66bbe25a893c5880a1bcb4b2123d265f19bda2c00cbdf42e13bdc3d2d03aedf2b73cdec4b9bba3b18e39062119bb750918bdb72ce2bca3a2103d310646bc3032023d358d173c97fd5fbbf7b760bce23dcb3b8c48f33c4671ddbcbe5a7d3cb6671c3c06039d3c5dbef83ae789e2bb3c8ff63c4af9ec3caadf29bb7427143d22ab4d3b2bab303d62440cbb65f995bb75fbd73c21f505bd9ba2ad3b08d6a23ca2cf8a3c139a8dbcffc77f3c18f5223b7c1a75baa49312bc', 'Array(Float32)')), +('<dbpedia:Idli>', reinterpret(x'e03eb7bc882739bc4a5c88bc60d8a23c25584fbd48b7bf3ce3ef423cfd8217bdc37435bca98e96bcec35bdbc1e3d3f3ce1f72fbc148553bc4b7c7bbc53c275bcb3e0d3b9d196253d7d3cf63aaa470f3cc909a43cdb2fea3b097a1d3d429cafbb1332093d9d97903b1b8c333bc3a78cbc4c21c4bc7de9abbcfb57adbbbe983fbcc8ea7c3c0f0f0c3d1994a0bcbddf463cac3f22bdb60bbe3ca4056b3d66b418bd9a4dffbcf740f33c2f8a19bd63892e3d68df82bd0424493bad4be53c8ebca73c261148bdc5e6a6bc351f883ce6a04e3cdd27fd3c45d35c3ccf9e123dd414da3ceeda853ddb2f6a3df09fc1bb2a016ebc241935bc791979bd8437133c22da1abcc0f6003d67260a3c3fb84cbc902e99bb11071f3d2e9ec93c9f0982bc36f8733dd48eb83c3c07413d4778a5bcbc59a5bce0b815bc1b0692bcaa14383deb9074bca52492bc6c9c51bc4e7f85bc8cc4943c810c293cf7877a3cc2bbbc3c1ae76abd2ca6363df1de5b3c0c2ba93df125e33c1eb71d3d815330bc8067603c59041abde03e373d0f62d6bc219b80ba9888433d3b819fbcfec1b1bcec7c443c4b68cbbcb6d866bc46f2833bf787fa3c21bb73bb5b2f84bd13ff31bd7e2846bd36a5293ce6e7d5bb8510ffbafe08b93bc4c7ffbb536f2bbc29f52abd9bbff0bc57a6583c29f52abd8186073de3efc23cba2e3b3d3433b8bc654eeabc63892e3d63bc053c755c2abc2750623d818687bcc8502bbc4e4c2e3cf125e3bcdfccc53caa14b83c92ac4d3ca6aa333cd024b43c536f2b3db11b983dad92ec3bad4b65bcf91813bdf409c63b6356d7bbe24afabc7faee73ad70ced3c27fd97ba6d554abd3d465bbc2274ecbccf9e123c71bfce3a2168a93c48cbef3c93989d3c882739bdf21133ba3fb84c3ab9ef203d01ed9b3c41b0dfbc67c05b3c3dac89ba56530ebd8d3606bd48cb6fbda796033d8933fcbc8304bc3c65c8c83b8c91bdbd979c73bc778714bda273063d67260a3d891309bbdd078abcfe08b9bbe0718e3b706c043dcd0d7a3cbb1a0bbddd5a543b7bf1183dcd40513c7ef5ee3c92f3d4bc6ccf28b945390bbdb83628bd1994a03cfb8a84bba405eb3a042449bc63892e3d2bedbd3c1b0612bc6f80343d570c873bf43c9dbc363f7b3cd3a2e8bbe845973c8f0ff23c93981d3d810ca93cd19625bc1e3d3f3d62e4e5bc61911bbd01ed1b3d9e1db23c4ddabcbc6442273c605281bc3cc039bc72ab1ebac909a43c809ab73cbe65e8ba86b547bdcd0d7a3b8e56f9bb517798bca0489c3c86fcce3a6ffa123c0d376c3d9cde17bc69eb453d26def0bc9e5089bd07d5543cbd129e3c5dc1e8bc362bcbbca44cf23cc8ca89391e84c63c89e031bd58e5723cab8629bd2ca636bcd130f73c36d8803cfc63703b98bb1abcc17ca2bcb989723ca0481c3ccf9e123ddd070abde951da3bd3d53f3d8dd0d7bbad7e3c3dbe983f3c15f7c43cd53381bdf383a4bc00ba44bdac0ccbbc18db27bd584ba13cbc5925bd4b15813df38324bb3dac093d0a8660bdf2647dba992d8c3c477825bcfad10b3b891309bdf211b3bcee2d503db6d8663d1c452c3d066363bc2cd98d3bb1b5e93b2b20153c7ea224bc9ae684bd7429d3bb1b59dc3bc71191bc71f225bde561343d2a016e3cbd121e3d3400e13cb1e8c03c429caf3bd21cc7bbbf840f3c5f99883d6aa4bebced218d3d1ae76abdf211b3bcff7a2abc6c16b0bbdc1bbabc351f88bd9081e3bcb4994c3b605281bd46bf2cbd9b259f3cd686cbbc6c16b0bbca8f45bb15f7c43b1e7096bca01545bbf211b3bc1e7096bc02f9debc89992abc4d0d943c9e70fc3c10e8f7bbe98431bbc0f680bb05dd413dd3d53f3cbcd3033dc42d2ebc4884e8bce84517bd1eb71d3c8dd0573dd70ced3b1a4d99bb17365f3c402a3ebc36f8f3b9fdd5613c1855863c5d2717bde27dd13d286f093d6c4907bcabb9003d80cd8ebc121362bc1379103a0d9d9abcc53971bccef9493d10c804bcb2a1b9bbb836a83c9365463cfb8a043d429c2f3d4dda3c3cfd1ce9bcf66707bd2a679cbd0aec8e3ceea7aebd4af6d93cb2a1b93c6fb30b3cb346023cd73fc43ba71ca53c3d79b23bd447313c0cc5fa3c3043123cd21cc7bba6775cbdb8d079bb48cbefbb23603c3ddb95983ca6dd8abdeeda85bcc17c22bd286f89bdd1aad53c74d608bd7787943cf0d2183b9b6ca63cf634303df667073d84f08b3cf57bb73cdf99eebb86b5c7bc3f3eeebcbf0ab13ccd40513c9cabc03caa470fbc65c8c8bca8a246bd362bcbbc36a529bb41168e3b5058f1bafa9e34bc1ba0e3bcfd4fc0bc6965243c468c55bca52412bd9e50093c63030d3c0cc57abcdb95183d7d6fcdbc5c824ebb90b4ba3c4b9ba23b07d554bc337abfbc7cfddbbc4970b83b8827393c77dade3b2de550bc073b03bcca5ceebccd0dfa3c4e4c2e3d9e1db23db2a1b9bc35ec30bd314fd5bb1eb71d3ddd070abcb63e153d3400e13c67c05bbdd87edebc967c803d508b48bd802059bd6b2ae03c3e322bbb843713bc2d2c58bdbc59a53bab53d2bb1a4d99bb212122bce3364a3d517718bd04f1f1bcda23273c8020d93b1246393ca240afbc09cd67ba6b900e3d87a117bb6e411a3c706c04bc57d92f3ca7632c3dca8fc5ba5ee08fbc0c5e00bdb11b18bd68ff75bdbc59a53be61aadbcb74a58bcdc1bbabb2f57c23c0f0f0cbd913adcbc666d113dc090d23c11d4473c8d032fbd9ed62a3d9f8fa33c8067e03b97cfcabb2d2cd8bcfdd561bc612b6dbbda76713c3c3a183dd53301bd854356bdc897b2bcb552c5bb0de4213caa0008bdf57b373df9b2e4bcf548603d46f2833cbfd7593c51116a3caa00083d0a86e03b68df823c7264173d10c8843d153e4c3d2525f8bc8fc86a3ce3bc6b3c43a8723af2fd823c6965a4bb77543dbdfcc99e3c185586bc7bf118bdb3ad7c3d980e653c8d7d8dbcec02e6394a29313c2d2c583d4659fe3b2393133cd1aad5bb7721663c4e1957bda7632c3d36d8003d8186873c8933fc3c4a2931bd666d113a5fb97bbca98e163df5f515bd5058f1bbef60a7bba6aab33c8c913d3dd7c5e5390424c9bceea7aeba16b0bdbc755caa3a7cfd5b3dcf9e123cc202c4bc0cf8513c508b483c68ac2b3d1cbf0abd6be358bc1a1ac2bcd11084bbde46243d9226ac3c8c4ab6bc6356d7bc3ac8a63c6e411a3dc625c1bce24a7abcec0266ba7a8b6a3c63d0353c7abec13c4e4c2e3d5c82cebc8067603cc81dd4bae0718ebbe24afabcbd129e3cf7734abc7039ad3c0457a0ba03b257bd6c16303d4a5c08bdfcc99ebb11071f3d46f2833c148553bd87a1173deea7aebbd87e5ebc9649293de03e373c67268abbc3415e3c20af303d5a8a3b3d9841bcbc8a5223bd0a33963c3feba33c7e28463da95bbf3c57d9af3c8fc8eabad21c473c6595f13b0510193caf8a7fbb916d33bdf918133c1b8c33bdddd4b239232de5bbeb49ed3cb346823d5cb525bc12c0973c8bd8443c772166bc601f2a3c9e70fc3cd47a883cec02e6bc0aec0ebc797f273caff0adbb5f335ab9d1634e3c5f66b13c0ef0e4bb3f85f5bcebc34bba663ababcba2ebbbc644227bdf2440abde898e1bc7e2846bc4ec60c3da763acba0e56133caa1438bdd130f7bbb2d410bc3c3a183d48ea163df72080bc5abd12bc4a29313d34668f3bf211333c52e989bb2ed1a0bc4fd2cfbb43db493d570c873a60d8223c8e5679bbde4624bd8a1f4c3d536f2b3cf7eda8bcd2e96f3c78f905bc88f4613cfcc99e3cbb1a0bbdb51feebb0ba507bdd02434bc3b819fbc95a460bcf06cea3cd48e38ba66b4983ba71c253c885a103d612bed3b2a48753c4c21c436dd8dabbcac3fa2bb663aba3cbd121ebc6c0280bca6775c3a4506b4bc8ba5ed3c7264973b08c124bc4bafd2bc89337c3ca6aa333df12563bd49a38f3c77dadebc837e9a3cb38d89bc945116bce0b8153c1c452c3b5bfcac3cbbb4dcba23603c3c7721e63b90e711bbee74573b06963abd6052013c34668f3b547beeba8d03afbc5a43b43b59d1423d3a954f3ceeda853cdd8dabbc8f75a03cdd5a543d157123bd6d8821bc2cd90d3a2750e23ca1ba8d3d7b4463bdfe8edabcbecb96bcd447313c700656bb7fe1be3beea72ebde898e1bb0c7e733d2525f8bc3b819f3c74f6fbb93e32abbc1961493c624a143d5bc955bd1eb71d3c1e3dbfbc60d8a2bc64dc783c9279f6bb74f67b3cc850abbce3229a3be00b60bc6d55cabb0c2b29bccac29cbcf383243c6fb30bbdcec6f23c1b8c33bdf5c2be3c20af30bc9649a93c33f41d3d7b44e3bbdd8dabbc5ead383d9bf247bd2419353b71f225bd74f6fbbc9ea3533c8d36863dd48eb83b6d554a3c75e24bbd9259833b1e7096394e9335bb992d0cbd7abec13ca1cebdbccd40d13b6389ae3c1994203dfc96c7bb244c8cbc10c884bcc8eafc3c43dbc93c9365463c806760bc3347e8bcc288653dfdd561bb192ef23b7b44e33cd3d5bf3c2274ec3cff7a2abdc89732bdf7ed28bd4884683d6c9cd1bcf82cc33c9a80d63cb327db3cb38d89bd4da7e5bc5fb9fbbcea7001bc7ef5eebc82c5213de9cbb83c6c02803ccac29cbc74a3b1bcc8502b3cc8ea7c3cd3a2e8bb89ad5a3c1e70963c152a9cbc073b03bc54e11c3d454dbb3c2d1828bcd45be13c4ec60c3c064fb3ba3cd469bb77543d3c7ad271bc5bfcacbca6dd8a3b125ae9bc59d1c2bbadb1933cd0578bbc9ed6aabb207c593dd24f1ebb4c549bbcca5ceebcea70813de9b708bd15c46d3c52835b3ab63e95bc5f9988bc3c3a183cffad813b62e465bc4ec68cbcbf840f3c3043123db691df3c71bf4e3ccf6b3b3ce4db923c71bf4ebb1bd3babb022c363d71bfce3cf72000bcaa14b83b39898cbce03e37bd5818ca3b53c275bcaf8a7f3c025f0d3a7fae67bbaacdb0ba53a2823c24d22d3c666d11bc18ef573cfef488bc8ba5edbcf91813bdd11004bdd99d853c941ebfbb1c7883bcaff02dbb7d3c763dfc63f03bf82cc3bbb5859c3b0fdc343d9e70fcbc1cbf8abc402abeb6430ea13b405d95bcc5e6263de9fe0f3bdc1bba3b6b5d373df82cc3bc4be2293cadf81abc85a984bc36a5a9bc1cbf0a3c3cd4e93cef19a0bbf7ed283b179c0dbc0de4a1bc7231c0bb00ce74bcb552c5bcf5ae0e3b9ae6043db26e623dba6112bd00ba44bcf548603cf211333cecaf9b3c36d800ba5bc9553deeda05bc0f0f8cbc6052813c185506bcf8f9ebbc03b2d73c0cf8d13c85a9043c941ebfbcf18b913c9b39cf3c62e465bc579228bd68fff5bc7721e6bb0d9d9abc31822cbd825ff3bb232de5bc2cd90d37c6f2e93b2168a93b10e8773db63e15bd9f0982bce898613c16e314bcc46005bc84f00bbd18222fbce64d84bc78408d3c3cd4e93c02f95ebd624a94bc6c16303c7a0549bdb34602bce8df683b1ba0e3bbb499cc3c3942053d533c54bc9a80d6bc1a1ac23c57a6d83b43db493c0e233cbd666d91bc7721e6bc21bbf3bad7721bbd2d2cd83c1c12553cd308173c5283dbbc24e6dd3b7f1416bc84bd343c950a8f3c84f08b3c731d103c9cabc0bc4af6d9ba2405853cad7e3cbc8c4ab6bbb989f23c87a117bd4506b4bcf5ae8ebc00ba44bcc2ee933c536fabbc2558cfbcec02e63b0cc5fabc3e65823c6595f1bc2928023d9e70fc3cfd1ce9bc9ea3d3bbf5f5153dbf0a31398d7d8d3bdce8e2bb036b50ba60ec52bc4a5c08bc78f905bd6c02803cffad813ce78c9e3c0457a03ca71ca5bcff47d3bcde134d3d01a6143d4a5c083d67c05bbca6aab339cc87d8bcaae160bbe5948b3c5818ca3c00876d3d394205bdb1e8c03c639dde3cbf3d88bcf57b37bc83043c3c809ab7bb12136239d45b61bd7d3c763caf2385bc63892ebd1994a03c2a48f53b405d95bc8bd8c4bc967c80bab02f483bf2440abce0710ebd4c549bbb67f332bb351f88bcd024b43c1e0a68bc10c8043dd8e40c3c9e50093d4a2931bda98e963ba4b2a0bc47454ebcaa470fbc9a80563c794cd0bc4e605ebc0f952d3dfb1026bccf9e123c843713bcf38324bceb90f43aca5cee3c3a62783ce0853e3cec35bd3ada76f13b624a143aa5bee3bbe64d04bbd0570bbb5cb5a5bcadc5c33ac17c223de52e5d3c4fd2cfbcb3ad7c3ddaa9c83c7231403c5d6e1ebc70d3febb4222d1bb5a768bbcea70013c9b6ca63852e989bc58e5f2bc101bcf3c0a3396bcc71111bdd6864bbd30fc8abc941ebfba91a00abc2d1828bbaae1e03c4f9ff83bab5352bcd1aa553c458012bd93989dbc3e32abbca0489c3c37b1ec3bf06cea3710c8843cd414da3a01a694bbfd4f40bc90e7913ca928e83ba3f9a73c85107fbcc202c43c232de53c92f3d43c4b9ba23ce3ef42bc5130913a213552bc9d3162b9c3415ebcab86a93c46597e3c809a373de78c1ebcb38d093ce64d843c1dcbcdbc5b967ebca677dcbccd0d7a3c2ca636bca8d59d3c513011bc75e24b3bfef408bd6c1630bcca8f453df383a4bc8f0f723be6d325bcf297d4bc7721e63c9259833c150bf5bb0663633c8e56f9bbbcf3763c337abfbbf125633ca3f9273cb63e95bcac3fa23cb38d093d5cb5253de1f7afbc54f5cc3c0ab9373bc65818bdc7deb93ab9bcc93b4b9ba23ce9cb38bce1f7af3c4b35743ccbcedf3b90b4ba3c323b25bd20afb03ce24a7aba98bb1abd9e70fc3cf409463c45398b3b92f3d4bc9855ec3c27fd17bd0cc57abc0c2b29bd913adc3bc5b3cf3cd4c10fbdb77dafbcb77dafbc27cac03cc8502b3d27b6103c6b5d37bc6356d7bb468cd53b6965a4bc57a6d8ba74f6fb3cf5f5953a5a768b3bc62541bd37179b3b025f0d3de3364a3b46f2833c9b39cf3cd196a53c0a86e0bb7d1c033d11d4473cab86293cc81d543c097a9dbc53c2f53b08c1243cd87edebc70d3febc6c4907bd5a57e43c48ea963c152a9cbb5bc9553c390f2ebdb11b18ba63bc05ba5f66b1bc0c5e803c394285bcf2440a3b9616523d9ae684bce475e4bba27386bcb6d8e6bb3010bbbc8304bc3b6ffa123ced218d3c8e56f9bbd24f1e3b4e19573cbc264e3c834b433b8ebca7bc78f9853cf7ba51bbfec1b1bcd45be1bbb499ccbc05ddc13c9d31e2392de550bafbddcebb449442bc3b1bf13b232de5b939235e3c6fb30bbdd414da3c8ba56dbc64dcf83cfd1ce9ba429caf3c8bd8c43ce72670bced210d3d96c3873c101bcfbc4a5c08bdab86a9bc2102fb3cf5ae8e3c668141bbc090d23c9a80d6bbfd8217bc99c75dbcf2caabbcd3a268bc311cfebc5abd923c4659febcef19a03c8067e038dd5a543c87e81ebb36f8733c6356d7bb0a33163d16b0bd3c6fb30b3d6c0200bd4c549bbb08c1a4bcaa9ad93a69ebc5bc33c1c6bccec6723b86fccebbd0578bbcb8d079bc967c80bc63d035bde475e4baba61123d78f9053c17365fbc7f14163ba3c6d03ceea72ebc0d6a433c7c630a3b2783393d0b3fd9bbf2caab3ccd0d7abc43dbc93c351f883caa470fbdd96a2ebcbe98bfbcaae160bc06c991bc3cd4e9bcd2e96f3cf5ae8e3c3dac09bd980e65bcc7deb93c2bedbdbc29f52abbed218d3caf23053cefb3f13c941e3f3cb691dfbcd308173b7d1c83b962e465ba22746c3bb4cca3bcc897b2bb599e6bbba52412bc3a62f8bbdd5ad4bbf634b0bc885a10bd33ad96bc726497bb2102fb39b63e953c0f0f0c3d584ba13c15f7c43b57d92fbc0ba5073d86fccebc63038d3b022c36b9aff02d3c179c0d3ca730d5bb6b2a60bc430e213c27b6103db0621fbddd8d2b3de84597bc0b3f59bc8cc4143c48ea163c0173bdbbe9515abc74a331bc5bfc2c3c8e5679bce0718e3b0cc57a3abc6d55bdb38d893c6d55cabceeda85bc07828a3ccbcedf3c68df823c5a760bbd14eb813c7039adbc5f99083c9b394fbbe9cbb8bcd4145a3c2750e23ce1c4d8bb56538e3ce4a8bb3c0f95ad3cf2ca2bbc179c8d3c0b3fd9bc67c0dbbc84bdb43bbc26cebab51f6e3d9590b0bb60d8223b2de5d0bccb340e3b252578bca1ba8dbcc116743c1e3dbf3c216829bd286f893d71bf4ebb3dac09bc3347e8bbc17c223b8c913dbc01ed9b3c3c8d62bc2a48753c1a4d99bc8b0b9cbb6d554a3a902e19bce475e43bdaa9c83c92accd3cf350cd3c3fa41cbde951dabc843713bd660763bcb51fee3bec353dbc390faebc90b4babc0173bd3bdda1db3b4745ce3b152a1c3bd45be13c76ce9b3ceb49ed3ce64d84bccb01373cf634b0bc90e7913b6e411abc2ca6b63b848addbbcd0dfa3c1cbf0abc780db63c896653bbe5948b3cf12563bce66df7bba7e94d3c2f5742bc4778a5bbd937d7bcf548e03b43a872bbe52e5dbd7515a33c30fc0a3cca8f453c4c549bbc624a14bda7b676bcb3e0533b22a7c33c27fd173c4c541bbcaacdb03b311cfe3bc42daebc570c873c4f9ff83c6d88213d4580123cc71111bc72ab1e3c87a117bdb975c2bc1a1a423c992d0c3b1e0ae8bce72670bc53c2f53c601faabb988843ba86b54739d4c10fbdd2e9ef3b36f8f3bccd0d7abcfb8a04bcfd8217bc559a153ca677dc3b4b7cfbbba0e2ed3b1b595cbc9e70fc3b311c7e3c3b1bf1bb65fb1f3c8c91bd3c3b1bf13baf8a7f3c7145f0bc15f7443db55245bde561b43ad0578b3cc8ca09bdf18b913bec6894bb451a64bd60a5cbba4af6d93c6a71673c89e031bd20af303b8c175f3a4884683c5058f13b', 'Array(Float32)')), +('<dbpedia:Pilaf>', reinterpret(x'ca6378bd176feabcacfa2cbd72945d3d2ec0bdbc42e8d93c876237bd652a77bd992a9ebc34ce58bc013fa6bcbbe70cbccec1f8bbc2c886bb6a5bd6bafd9040bd6ee6f73b911eb83cfd0b923c583105bd22d1a83c69268dbc2c6ae53c0522553b9a5f673d78989b3c2b8da9bc92f116bc2ec0bd3b2837513d0a80d53ca6f66e3ca3c3da3ca090c63c43368a3c6db1aebcb158ad3ca61933bc2db8953dd39acabbaa1fa6bc9d3aee3c9fb38aba5ec44ebcde0666bd56ae0bbd69260d3c86374b3dac274e3cb7e119bdeea3603b0dd62dbc4ea225bde48f523cc630e43cc703c3bb2db8153d011c623d5ee712bd3c5f6dbd8ebe82bdb81663bd2527813bd81b0fbcdf5ef33c89e5303ddc790f3da136043cc9a900bc46737b3c585e263d9c5d323c6e8e6abc774a6b3dd7c301bdb90c063de0ac23bd876237bdc1f5a73cda7bc4bb858730bde1e16c3d8b954bbb9d30113d68fb20bc62cac13c59936f3d781d4abdac27ce3c2381433d542b923c606a0c3db4b8623def9903bc5706193ddefc883cd4151cbbeffbedbb2985013d0347ce3c12070dbd3cfd82bc290ab03b257f0ebb56db2c3d298f5ebcac27cebc38d4cb3ce5dd02bc93a131bc031a2dbdb2b0babaf8b76e3d0e0bf73bf2cc173bb20848bc22aee43c8b954bbd41e031bc7ea6b63cf301e1bb1d7328bd79cd64bcd14472bcab54efbcd51d44bc855a8f3c837f88ba1e9e143c8a60023db991343c026a12bd9474103dd81b8fbcfe40db3c1b755d3c9784e0bc163aa13c367496bb1c48bc3d351c893c48940a3c66d0b4bc394f1dbdedf3453dafd5b3bc148a86bce20cd9bc61f7e23c9db5bf3c715f94bcebf57a3cafd5333d94514c3df6af46bc37a95f3c331ebe3c8f78fa3c2504bd3c1ca0c9bba96f0bba1e46873c774aebba4f75843cc0450d3d453eb2bc34f11cbcccdc94bbdf5416bd95a9d93cf1f9b8badbd3513c4f52c03c96f709bd5abe5bb7d265013d4f75043d189ad63c3ddabebceff1103d76f25d3cfe40dbbc67a313bc80a401bbabac7cbcea3b833c148a063d20c9803dc17a56bd1ecb353c031aadbc4ca45abcc505f83cea3b03bdda9e083d8bedd8bc9e881ebdb22b8c3bb48bc13cebeb1d3d0112053d17c7f7bcf32425bc91a3e63c761522bcea183f3ce364663dc8e0fe3c6c01943df37c32bd93a1313c00e718bc3d5510bcabf2843dc3a5c2bcb360d5bcabf204bdb63bdcbb48f674bb6a5b563b62ed85bc781d4abd878f58bd0fb1b4bca8f4b9bc9e881e3a5d8f85bc53dd613cd87d79bda28e113cf832403dfb9275bcdc83ec3c0347cebc611aa73b415b03bcfdb3043dc5a38dbc850282bc66d0343c7f7995bc2a351c3cd26f5e3bda00f3bc204e2fbcccdc14bca9c7183dab546fbd8304b73b896adf3bcb8ee4bc202b6bbcfd15efbcc8d6a13c160d803cc92e2f3de364e63c5c6419bc0a8055bccd11de3a38d44b3c37a9dfb8d8739cbc917645bdcd3422ba0bab41bd1d73a8bced6e97bdc5fb1a3d8ff3cbbba33e2c3d4a1784bc307058bcff98683dc85bd03b4e4a98bcdaf695bc708cb5bb7875d73c128c3b3c3043b7bc400dd3ba0fde553c502ffcbaa136843cff8e8bbc9784e0bc21791b3def9903b9898d23bcffbb2cbd4a1704bdd342bdbce53f6dbca6f6ee3b006cc73c400dd33c3116963cb3db263c03474e3d2e6830bdbe9fcf3c898d23bd606a0cbcc77e14bdeb704cbc420b1ebd4340e7bc89e5b0bc720fafbc3f3af4bbf37c323c494425bb32f3513c48948abccb36d73cb20848bd76f25d3c7b505e3c1a9821bd30eb29bd8d18c53ca96f8bbdbdef343c257f0e3df83240bb7615a23cffbbacbbdca6b03c870aaabbb1dddbbc3859fabca69404bd135f9a3cc4ad6a3cfe405b3db410703be35a89bdeb704cbb252701bda8f4b9bcd51d443c1f7b503de81af4bc8f78fa3c878fd83ccd8c2f3a49c9533c7fdbff3cf251c63cbb142e3ab766483a6bb3e3bcbabc203c6a5bd63c38f78fbb92ce52bc86374bbcdefc083d7c23bdba6cdecfb974ea35bdba99dcbc3f3af4bc46b983b89f38b9bcac27ce3c8d93163d0545193df707d4bc0a80553c761fff3c75c7713dca63783b074dc13c80ae5e3bb63b5c3c4e27d4bae83db8bc1e4687bcf6d20a3d6ba9063a19f2e339b7890cbdcb8ee4bcedf3c5bcfa3a683d4a79eebc026a12bdbec213bd0a80d5bb9a5fe73ca9c7183d9bad173d56b8e83cee4bd3bbb7e1193c5e3fa0bc4c1fac3ce1397abc7542c33c37cc23bd7af850bb62ed85bc96f7893c7ea6b6bc9ee02bbca546d4bc85dfbd3ce81af4baeea360bcbc49773db991b43cd3bd8ebcb76648bd15ecf03c8a3d3ebd270c653d855a8fbd3bafd23c870aaabc8d70523d585ea63c176feabc77408ebd950167bc43bb383db90c063c2a351cbbbe247eba2ee3013dd5c536baff8e0b3d87e7653d19c5c2bcdefc08bd8ff3cbba8ac26cbae9ed523d4290cc3c9fbde73cdca630bd37cca33ccc61c33c598912bcda9e88bd81813d3da7449fbc87e7e5bb7ea636bb987a03b95e6c413d0c03cfbdc630e4bb133cd63b63a77d3c97a724bd66d0b43c80aede3bf657b93bef9903bc3b2a243c6c59213dd14472bd4e2754ba63224fbd1fd3ddbceea3e0bc81d94abc76f2ddbcc122c9bcc9b35d3c456b53bdb60ebbbad54a65bd7e2be5bc583185bcd29222bdc70343bd74ea353bc9b35dbbcd34a2bbf78225bcfb9275ba351c09bccc84873c34ce583bf0a12bbc77e880bb45e624bbd1bf433c708c35bde004313c461111bcf42c4dbd3476cbbc964f973cdf5496bb972c53bc031a2dbc97a7a43c43632b3d7dd357bd62cac13ce1397abcff8e8b3c38f70f3b18bd9abc17650d3d89e5b03b8e43b13cec20e7bcb50613bb9e881e3d8d70523c2e456c3c86bc793c56ae0bbb238143bd46b9033c11614fbc6930ea3c8b109dbcaeaa47bc6a5b563b7c23bdbb733a9b3d03c21fba38f70f3da4118bbdfb9275bba473f5bcadcd0b3b61f7e23b3651523c6f34283daca29fbcfd0b12bc86b29cbb06d2ef3b13e4c8bb4dcfc63c7f56d13c6c5921bc41bd6d3c9f38393ca2e69e3ad31ff9bc9524abbc22d1a83cc92e2f3da198eebb713cd0bcbe47423dc9a9803cc1f5273c09adf63c9784603b761f7fbc8d70d2bc776d2f3d6cde4f3c1c48bc3bd8f8cabb0fde55bc2add0e3dacfa2cbd45c3e03be968a43bba41cfbc2b12d8bc2d10a33bbd74e33c91a3e63c20210e3c135f1abb19c5c2bbb3db26bd77408ebc855a8fbcde29aa3c1e2343bcc6d8d6bccf3cca3b204eaf3c9b8a533c58b6333ded168a3c9524abbb23fc143c787557bd6655e33c04f7683c8231583c6880cfbb6930ea3cea18bf3c3cfd02bd1a1d50bc4bc79e3b51025bbcc320143d583b623c9f38b9bc4ffab23c2b12d8ba18bd9a3c62ed05bd9ee02bbcf584da3c215657bcf324a53b896a5fbdf72a18bde53fed3c03474e3c77e8803df832c0bb03c21f3dedf3c5bac0a7773a9524ab3cca63f83cb28df63ca31be8bc78989b3cc1f5273c45c3e0bba4118bbb8e43b13cc2c886ba761f7fbc578b47bad3bd0e3ce1d78fbce635903ca94c473917c7f7bbd7c301bcc0458dbcd8739cbc652af7bc2f3b0f3c6c0194bdb4ae053a624f70baf2cc973b4df28abcc4adeabaf6d20a3ba00b183c585ea6bc57e3d43ca96f0b3d1b75ddbc6e848dbb52b2753c5d99e2bc006c47bcde81b73bc90bebbca090463c68804fbcf37cb23ca94c473899347b3c41bd6d3cb63bdc3c265cca3c072a7d3c98d210bd8d18c5bb30eb29bc64f5adbc36f9c43c28b2a2bbe5dd02bc8e43b1bc319bc4bb48f6f4bcdb4e23bd3dad9dbccb3657ba0fb134bda4118bbc5f74e9bba694843c8912d2bcbbe70cbd00f1753c283751bc1c1b9bbd194a71bdf72a98bb5f74693d85df3dbd2381c33cac7f5b3c42e8d9bcbcc4c8bc72673cbd73e20d3c8029b0bcb4b8e2bc855a0f3d855a8f3c32c6b0bbb0a8123b652a77bc1c25783ce2872abc56ae0bbd88ba443bc9a9003ca140e1bb094b0c3c7b7322bc9249243cde29aa3ce9edd23c31be883c9b8a533cda0073bb4ffab23bea3b83bc0347ce3c55084e3c3f3017bca821dbbce81af43bdf5416bd00e7983c29e76bbc324b5fb598573f3d1c48bc3c3d32cc3cc58049bce6ba3e3b0522d53c2764723d08782dbd8f20ed3cb3db26bd1742493c8a60023da136043d8c3b89bb8ab80f3c85dfbdbc08782d3d7565073d8e43b13c502f7c3ce287aabcc986bc3b728a00bd094b8cbc8b954bbcd7a03d3cbf1a213c50cd913c54b0403c48948abd135f1a3db7ebf6bc0928c8bc6e8e6a3ad7cddebc7069f1bcb96ef03c7875d73cecc8593ceea360bcc320943b7a1b953ce6ba3ebba9c718bd964f97bc19e8063c07c8923cc2c886bc6ad627bd855a0f3c4d54f5bcb90c06bdc9a900bc00e7183d0d5b5c3ca26bcdbc3ee2663cc9a9803d059d26bd7d4ea9bc0274efbbf2a9d3bc47c12b3c2f18cbbcea9d6dbcbe247ebd2eedde3c79cde4bb4e4a983c6e848d3c7c9e0e3d5d99e239379f023d270208bdbabca03b6ba906bd58b633bc2f184b3c22a407bc992a9ebc3e8a59bc25e1f8bcd540883c49ec97bc65209abcb506933c639d20bd372431bd128cbb3b6930ea3cb3db26bdd87d79bc0a80d53c9db5bf3ad8f8ca3cfb92f53bfcbde1bbbabc20bd1e23c33cb085ce3c6272343c8e43b13b713cd03ca136043d883ff33c62ed053c5a392dbde89545bdb383193c28dfc33c334182bbd950583d80a481baf026da3c28df433dc630e4bb898d233c73e28dbc4a79ee3cea3b833d4df20a3bf44f11bc2c3dc4bcb6b6ad3baa77b3bc972cd33ce76ad93ac22af13c29e76bbb0bab413d8b101dbb3a84e63a400dd33ca8f4b9bc9d301139d4151c3c33990f3c2454a23ce83db83c0d2e3bbb6d365dbcb5e3ce3b176feabc2eeddebc964f173cdcfe3dbbd4f2d7bc68fb203d4b4ccd3cfee84d3de40aa43c6cdecf3c2f18cbbb815ef9bc39a72a3d18bd9a3be5dd82bca213c0bb1ecb353c606a0c3d56b8e83cb7890c3d17eabbbb1009c2bc92ced23a708c35bc55839fbcfe405b3b79cde4bca61933bc81fc8ebbdf5ef3bcf85fe13c28b222bcf707543bad2f763c713c50bbc85bd03aec2067bc0ea98cbc6b2e353dbeccf0b9a9c7183d542b923ccf0fa9bc072020b8bdef343b7aa0c33b8587b03c194a71bc0197333d957cb83c85dfbd3c5560db3c70e4423d59eb7c3c290a303cdc790f3cd2ea2fbc9501673c842f233ac3a5c23cfd15efbbc122c9bc283751bc252781bc7aa0c33c31be08bdae2599bc15e213bd290a303d0afb26bd8cc0b73c351c093d31be08bc9474103dabf2843c1e9e94bcf8b76ebc92f196bcbbe70c3d52500bbd1567c2bc81549cbdf44f11bdf5ff2b3b34f19c3a1369f7bc2527013d2381c33cbf72aebcc788f1bc2a359cba75bd143c290ab0bc5831853b5b6e76bc957cb8bced6e17bdd8739c3c60ef3a3d420b1e3d11342ebded160abd21791bbdc580493df026dabcfd0b923d77c53cbdc85bd0bc31be883b50259f3c96f709bdb6b62d3c80a4813bcf0fa93c2787b6bcd8731cbc924924bc761fffbb5ae19fbbf832c03b3ee266bc3341023c6d2c80bc6b0bf1bb0928483c6a5b563bca018e3c8762b7bc0c7e203d48948a3dffbb2cbc4a21613b8b95cbbcfd15efbcef763f3cfc65d4bc2b12d8bca49639bc66d0343c031a2d3c99347b3c02efc0bc0f36633db90c063da213403c18bd1a3c135f1a3c8d7052bcb41070bc50aa4d3ce437c53bf9059f3c8304373d81549cbb54b0403c0d5bdcbc1f7b50bc6678a7bcc5283c3b957c38bd059d26bcac274ebd20218eba0afb26bda28e91bc3f3097bc8912d23c3db77a3ced168a3c957cb8bc62cac1bba31be8bc7417d7bbdddb79bcae7d263b4363abbcd292223d128cbb3bd8f84abc2229b6bc1d5064bccdb9503c326e233caeaac7bbadd7e83c067ae2bc1cc30d3d11342ebb81fc8ebcbf722ebd7dd3573c235effbb7c233d3cb5687d3c270c653ccb09b63cd5c5b63c7007073dec43ab3c4dcfc6bc34ced8bca87968bc334102bc148a863b9326e0bcbec293b981fc8ebb619fd53c769ad0bc61f7e2bc140fb53a8a1a7abc1ea8713c45c3e0ba453eb2bb9b32c63b52a898bcaca21f3c9e881ebbc653283cf9ad11bd6ba9863b761f7fbb307058bd6e09bcbba879e83b2787b63b5d8f053c34f11c3de9edd23bb839a73ce697fa3b8587b0bcebf5fabb6d2c803c3651d23ced6e17bc4ea2a53cbe9fcf3c3116163c1b6b00bdabac7cbc35a1b73c49ec973c70e4423b05cac7bb88dd083c7a1b15bc135f9abc285a153cbd74e33a95a9d93a15ecf0bc8587b03bc90beb3a0f3663bcb7e1193da7441f3da6ec11ba108ef03b6fdc9a3ca00b18bd3bd2963c90c6aaba6db1ae39ea18bf3c52b2f53c2f9d793c3f30973c9bad17bc160d00bc9fb30a3c461191bc48948a3c41b390bbe07f823c074d413b9ee02bbce5dd023dba995c3cc8e0fe3bda7bc4bb1211eabcf707d43b64f52dbbdb4e233bf0499e3c0545193d8835163d3cfd023c379f02bcf3f7033b870aaabca96f0b3d9451ccba59eb7c3cab54ef3b947490bc598912bcdc798f3cd748b0bc8b101dbd7df61bbc701164bbb964933bcfb71bbc4e4a183c2db815bb761fffbc7c23bd3c858730bd4d77b93c06f5b3bcf584daba61f7623de35a89bb6c86c2bb76f2ddba59ebfcbab383193dd6f022bd9784603ba821dbbcf0265abc761fff3c606a8cbc06f5333c5a39ad3cff13babb9934fb38de29aabc1a1d50bb3db7fabc4c1f2c3c26b4573c522dc73cb76648bc46b903bcd748b03c8029303db433343d34f19c3c21799b3b456b533c8c3b09bbb43334bc715f14bd7c9e0e3dcb09b63cb02d41bcbabc203dddd19c3c04723a3da6ec913bfd156fbc8e43b13c1bcd6abcf5845a39b63b5cbc54b0c03c9326e0bb262fa9ba9d3a6ebcae7da6bc37cca3bba49639bcea18bf3cb83927bceac0313c5989123dfb88183dcdb9d0baed78743d54d3043cf85f61bb4f75843c20218ebc644d3b3bd4151c3ce004313cba99dc3c86bcf9bc8f9b3ebc461191bc1d73283d992a1ebdeffbedba6db1ae3cde06e6bbd342bdbbd8731cbcc85bd03ce437c53c0b5334bc28dfc33cb55ea03ca46918bc1e9e943b41e031bc7e21883dcfb71bbbce0781bc644dbb3ce635903cadcd0bbc0522553de83d383b15bfcf3ab38399bad725ec3c8502023b5d8f853b1161cf3b6930eabb5b6ef63ca6f66e3bba99dcbc7a25f2baeb93903cc580493c2c60883bab54ef3c1a40943cc378213dc92e2f3b51d5b9bca671403cbdefb43baa1f26bbc9a980bddf31d2b82ce536bc58b6b33b937eed3bebf5fa3c59ebfc3a3724b13ac4adea3c73bf493ca06325bc1369f73cb90c063da063a53b41e0b13ccd8c2fbca28e11bdaa7733bded6e97bcfae2dabc176feabcaa1f263b7069f1bc3d32ccbc387cbe3c815e79bbeff1103c720fafbc54d3043d031a2dba9bb7743dd81b0f3b14b7a73c950167bc89e5b0bc5989923cb28df6bc3a5745b98bedd8bcbd6a863c26b4d7bbcf94d7bcf251463bdfd9443c28b2a2bc9474903cb4ae85bb9a82abbc312073bccadec93cfbb5393df5a79e3c98ff31bc235e7f3c3674963cc1f5a7bbb7eb763ccfb71bbd4a9c32bc66d034bc74eab53caf5a62bc0c2613bd4611113c9d0dcdbce968243b6e8eea3ce697fa3a17eabbbc1cc30dbdcb0936bcc0450d3cf1740abceec624bafce025bd3120f3bba694043d22a487bc406560bc4971c6bcee4b533cff8e0bbc204eafba1ca049bc019733ba49ec97bb6db12ebc9e881ebc8ff34b3c74ea353de8b809ba8f20ed3cbb6cbb3c97a7a4bb8502023ce83db8bb6c63febb5b1669bcd7c3013be6ba3e3c448e173c3fb5c5bc56333abcc8d6a13c2db8153dcdb9d0bbfa5d2cbcb23569bc2156d73b6fdc9aba578bc7bc67ad70bc03c21f3d95a9d9bc33a36c3d3cfd023c8f9b3ebc78989bbb7fdb7f39a8f4393c85dfbdbc1d50643c6a03493a837f883ca21340bcc580493ca546d43cd144723c69268d3bfd90403d09a399bce5dd82bdfb0d473cf9ad91bceff110bd611a273cfce0a53868d85cbb24d9d0bc88bac43b3120f33c89e530bde3df373c700707bc24ac2f3b044f763c16922ebc3dad9dbcda2337bcd81b8f3cb0b26fbc830437bcfb88183d1ea8713caf5085bcd7256c3b41e0b13bfe631f3d270288bb0db3693adaf615bc756587bc6ad6273c80a481bc4df28a3bffbbacbb9f15f53bf78225bd6e848dbcd46d293d77e8003dbd6a86bc7542c3bc65201abb4498f4baa821dbbcdefc083c8f1690bcf2cc973c9a822b3cb208c8bb0b30f03838016d3dea9dedbb3d55103a4dfce739acfa2cbb0e011abdc44b80bcc653a8bb75bd14bd7a1b15bc911e383cb3dba63bed6e973c761fff3b5df1efbc23fc943a1aa27e3b34f19cbb262f2938fee8cd3b95a9593cd3423d3c94cc1d3bed168a3bc122c9ba41b310bbe437453bdb4e233c2e68b0bc2702083df9059f3c79cd643c0fb1b4bb82aca93c7f7995bc23fc943c5e6cc13c8c9df33b2381c3bb2c6008bd70e442bde20c593d133cd6bcf584dabbde81b7bcb0a8923caeaac73ca090463cb6b62d3d', 'Array(Float32)')), +('<dbpedia:Salad>', reinterpret(x'fb26243c631de33c78a4b4bcc686a53c64ca8dbc261ede3bde0432bce93c66bd4547a2bc4a45af3c791ce93b7fe5ad3c5da7a13bdb2b04bc239fd7ba31a9153d2600d1bccf99fa3c58a914bc3dac01bd32214a3db85eda3b3e60d03c8c7edb3cefc990bc24bde43c4a9fd63c5b4628bb7c2a0dbd08a2443d25c4b63b440b88bcf8dc653d6f3edc3b2aa4b6bc103d65bc35dc6a3ce681453ce888973b33ec01bdea079ebcec4a0a3de7bddfbc2f481cbc07b9ffbc90aa0c3d2f66a93b71633b3cb90b05bd11089dbc0fe33dbd6b0b873b57fce93c2381ca3c9fa4b3bc958a0c3d51a4b5bc741edc3cf1b979bdad08993c146996bc2d5f57bd35fa773cfa1fd23b2be0d0bb399e5d3c29bb713c287f573d6f3edcbc3b87a23c698c003dfa01453d741e5cbd5441c93c492722bd1fdd643bcab97abd070c033cb37edabc2f8436bd47e4b5bb89a5ad3a0143bebc4429953ce91ed93a9ed9fbbae86a8a3a366b88bd63e1483cd35b6d3c146916bcdb4911bde888173dd064b2ba681bf03d5aec003d9709133d31c7223d8d2b063d1d2916bd3abc6ab973c4343d4ca6a8bc4b4c81bd64ac803c1742c43aa44899bc6560cf3c4b4c813a8be899bd08481dbd2a681cbc9ac433bb9ccb053de5271ebdab8992bc05c996bc454722bde2197abcf7a0cb3cd9ca8a3b741edcbcffff51bca8997bbd368915bdedc23e3c2aa4363ce1652bbd294a0fbc2a689cbc490995bcd1dc663d0ae5b03c88bce83cbe09123d213ede3b9aa6a6bcab6b853cd64b043cda24b23d64ac80bc287fd7bc5fcc003cd2c5ab3ae183b83c177edebcdd1b6dbc3c1d64bc7b7d62bd5666283ca4c0cd3a14a5b0bbb2ca0b3cf55d5fbcba471f3d19c14a3c550c813de663b8bc559bf03bb704b33cae804dbc08c0d13c3dac81bd292c82bc6269143cac5b6ebd53c914bd3446293bcdc71e3da4c0cd3c97eb853c9fc2403d36a7223b920b06bd9ed97b3c1fbfd73b95e4333d2fc0503c674914ba478a0e3d9d7f54bc2cab08bd36e33c3cfe879dbcdca3b8bc2825b03b4909153b449af73cfee1443c366b083d00cb09bc4bf9fd3bc1c4b2bc5fea8dbcf05fd23c0866aa3cf62897bd8ea3babca899fbba69c81abceb43383decd979bdcbc0cc3c93833a3dd64b043d5e01c93c0641cb3bedc2bebc810a0d3d04fe5ebd3944363caabedaba13bc6b3b09dede3c03681d3c79e0ceba4802c3bbe86a0a3d375bf1bc27cb883b366b883bbce4b2bc9b5af5bc1985303ca68b05bc50d9fd3c6524b5bcb6df533b20c629bdc25a743cb34240bd45a149bdc3cb84bcfddaf2bc2e9b71bdfb26a4bbe91e593cac1f54bc958a0cbd5d6b07bc480243bd38797e3d53ab87bd2600d13c208a0f3d1949963c7c2a0d3d1baa8fbd724c803c27e915bd6ddde2bc42e6a83cf782bebc33997e3c915e5b3c7e1a763b9b5af53ce1652bbbfb2624bc3482433cf9c52a3d396243bd9881c7bcbd0240bd502c01bd970993bdb37eda3b2e9b71bd177e5e3c2e2a0fbcaf4b85bc394436bc03c244bd7ac9133d4820d0bcc443b93cd301463d78a4343d0aa9963c69c89abb597bf03bc54a0bbd8762413cb49ce7b9b37e5a3de3c6a43c45833cbd69e627bc263ceb3befc9903c5b4628bd0f89163bcf28183be5eb83bcb90b853bad08193d38081c3d95a8993bc16a0b3bcb48983dd544b23cd54432bcd1fa73bdd5804cbc61da76bcecd9f9ba04e051bdcb8432bb4f6149bd53c994bc01433ebc3a4b883b35dc6a3b72bd623c3e42c3bcc2004dbc0d82c43bccde59bdb1ff533dfddaf2bb2b1c6bbd71452e3b2e2a0fbd61daf63cd6bce63cdeaa0a3d654242bdd35b6dbc09de5e3de3c6243db7e625bc2fa2c33aa6c71fbc33997e3cb87c67bd0b3f583ce5eb033d08c051bbc97de0bbd06432bd846b86bc1afd64bc772c003d2be050bc103de53c5f089b3c398050bd9727203c676721bc2d41cabcf48b033ae2fbecbb2a86a93b3f2b083d7d489a3ce3a817bdc3cb843ccd8b04bdc200cd3c79fe5b3c8e85adbbe5eb83bd1244b7ba11cc82bd963edb3b588b87bc0ae5b03c6d6c803dc7fe593d134b093b70eb86bbb6fde03b9863ba3c8a597cbde8a6243d736a8d3c7d66273dd30146bd61bc69bd970993bca964b33c05e7a3bc626914bd1d2996bc90c819bd239fd7bcc668183dee3af3bc6542c2bb261edebc61bc69bc58a9943c5da7a1bc177ede3cc16a8b3dd046a53c4f61c9bbce7b6dbc6560cfbc1f197fbc368995baadea8bbc3ada773d58c7a1bc5b46283d719f55bdf7a0cbbcc3e9113a8987203d03a4373ceb43b83c261edeb9ad79fb3b239f57bc9fa4b3bcf4c71d3d628721bdf8dc653cf53f52bc4802433d807b6fbd5e1fd6bc38089c3b4bf9fdbb54233cbd88daf5bc0c281d3c550c013c93bfd4bc428c01bdba652cbdd26b043d1469163ae1479ebc803f553d9ac433bd64ca0dbc126244bcad08193cd064323d51a4b53ce45c663c174244bc4067223d22eb88bc012531bd4583bcbca77b6ebcb2e898ba0b21cb3cf9a79dbcdeaa0abcd0823fbd1baa8f3c846b8639929af5bae3e4b13c0da0d1bcd64b843cb342c03c0d82c43acdc71e3d6a404f3cac5beebc846b06bb4ee994bb9140ceb777bb6fbb325de4bc28433d3c900434bd36a7a2bd14a530bd16ac82bae86a8abcfec3b7bb83be5bbc68df55b9b92912bd7886273bcf0a8bbdca9b6d3dbaa146bd87804ebc6904b53c6a22c2baf57bec3cfec337bdea079eba628721bd6a5e5cbc25c4363ba44819bc807bef3ce402bf3bc7e0ccbc294a0f3ce50911bce7f979bbe43e593d103d65bc11ea8f3cf57becbcdc852b3d64ca0d3cc0db6d3c323fd73b2363bdbc86ea0c3d6d6c803c8f39fc3957fce9bbd35bedbcdb49913b45a1c9bc6904b5bc7d66a7bc189cebbc56c0cfbc4cc4b53ce7f9f9bceb252bba0cec82bd4a8149bda0fedabaa223babcdf7c663d35be5d3cee1ce6bc0a8b093b68dfd5b914c3bd3bb7e6a53cae804d3dae26263b9f68193d2120d1bcad797b3c3ba5af3ceb61c53cf4a9103c256a8fbabeeb843bf8faf2bce663383c04fedeb70d64b73c698c803ce91ed9bcd76991bb2600513de3e431bc182b09ba9d43ba3af7a0cbbb769de23c7c0c00bd4d5af73cb639fb3a4927a2bc16ca0f3d2e2a8f3b6e8a8d3bb069923c8c24343dda423f3c5b64b5bc1001cb3cbca8983c9b1edb3c5423bcbc7d489abc8021c83cf05f523cdec817bc78689abb103de5bc810a8d3c0d6437bb466c81bd4ca6283df8faf2bce50911bd05ab09baf7823e3abd7a743d449af73b5b46a8bb08a244bb50d9fd3cce3f53bb0f8996bcae6240bbdc679e3c9ac433bd970993bb5f081b3c5a281bbcb8404dbb0125b1bc9ebb6e3dfa01453c5d6b07bdc6c2bf3c56a242bcd2e3383c6d6c003deacb033bf284b1bc3e60d03cf28431bd8ec147bc57fce9bcd2a71ebd4909153bb92912bdc3079f3c5f089bbc50bbf03cb3604dbb47e4353ccde52bbd628721bc64ac80bc0cec02bd6a404f3c8c24343a5684353dd33de0bb2d233d3cf2a2be3c6a5e5c3ca223babcf3ded83cd8ffd23c091af9bb2cc9953ce5eb83bc8be8193c502c813cb2e818bc74004fbc8a1de23ba1ab85bc7fe5adbcb42b053d5fcc803c1b8c02bd12262a3b7cb9fc3c9a88193d00cb893b4067a2bb2e0c023ce183b83cdc671e3cd9e8173dab6b85bd7d6627bc550c013dfbea89bd89a52dbd067d65bd78689abc2a681c3bf7a04bbc22eb083dcf28183d772c00bd7c0c80bc24f97ebdd26b84bbe1a145bb25a6a9bbbf9f533b552a0ebdb37edabc72bde23cb5a3b9bc63ff55bd0edc6bbd6ea89abb4429953ce3c6a4bb5cfaf6bcd5623f3bd2c5ab3cc47f533a2d41cabbffff51bc3adaf73bed68173d2381cabb32033dbb5e1fd6bb5d8914bd08c0d13c9aa626bb371fd7ba1c5e5ebc1b8c823dfe871dbcb929923cdb2b84bbf48b033ddb9ce63c3e60d0bc9f86a6bc89a52dbd8a1de23ca32a0cbcfea52abceee04b3ceae9103d49eb873c04e0d13c60804fbbc200cdbcca2a8bbc5fcc00bd2e9b713b0007a43a631de3bcd7a52b3a16e81cbb66baf6bba75d61bbc6a4b2bb0505b1bcb49c673c4f25afbc072a103cefab833bf98910bc502c813c77d97c3b36a7223c791ce9bc70eb86bd6a22c2bc38cc013cbc8a0bbd2825303c0125b1bc8f1befba371fd7bb2a86293dd9ca8a3b9d9de1bbfb08973cae26263d3b69953c82a04ebc1469963cc49d60bd1f6c02bdb2ca8bbb2ce722bcfbea89bcbf63393db13bee3c9c07203d50d97dbcdb9ce6bc1c22443d08489dbc16ca0fbd40c149bc6a40cfbca75d613b1c04b73cb342c03c698c00bc2861ca3cca9b6dbcadea0b3df31a73bc5aec00bda6c71fbc95a8193ce2197a3c5d6b873c1f6c82bc00e9963ce3e4b1bc0c281dbd00cb893c406722bb6b29943c6cbf55bb6f02423cd59e593c76433bbcabe3393ddec817bc83bedbbcadea8bbb9863babca1ab85bcb5c146bd0505b13c148723bc874434bad1dce63c14c33d3d2e0c82bbc25a743cd544b2bb3e42c3bc1fbf57bcd7a5abbba466a6bb208a0fbce6452b3cfa3ddf3bf4e5aa3c478a8ebc8e6720bd4067223c261e5e3dcf2898ba8e85adbcb6dfd3bcca9b6db958c721bd4c889bbc6e8a8d3cf57b6c3c6ea81a3ca241c7bcc1a6a53c4a8149bd69aa0dbba5deda3bf24817bd929af53a9ce9923db5679f3cd8e1453da1c9923bc200cdba56481bbcdb9ce6bbd7a52bbd929a75bcdb4911bc1001cbbcfe871dbc2120513da448993c917ce83ca5de5abc559bf03c1afde43ce4023fbb78c2c13bb87c673c1d0b89bce029913c0efaf83afea52a3cedc23e3db7e6a53cc923393c213ede3cf48b03bca01c68ba0ac723bc194916bc1c04b7bb067d65bc0623be3bcba2bf3cbe279f3bcda911bb9b00ce3cc6c2bfbce91e593c325d64bb502c01bde00b04bce888173c700914bd8726273c9727203cac0147bc42c89bbb94fb6ebd4ee9143d1ea14abc373de43c072a10bd3abceabc1fbfd7bc7b233b3daf4b05ba070c83bcf07d5f3c371fd7bc6f204fbb5921c93b6f204fbc03862a3b1c04b7bb9ae2c03c1d0b093c4820503d97eb853c2d414abd78689abcf26624ba6767a13b9ac433bd0edc6b3bc47fd33c35be5dbc20a81cbc9d252d3c318b083cd7c338bcda42bf3a61bc693cd7871e3cd4ea0a3c5666a83ae8a6a4bc9fc2c03bcba23f3ca3b97b3c75e913bcefc990bb72dbefbbfc9ed83c9ccb85bb2600d13cff1d5f3c0a8b093db5c1c6bc2a8629bd9d7f54bd77bb6f3cc8ab843d64e81a3da1e71f3dd580cc3c64e89abc02bb723c55b9fd3bc1a6a53c483e5d3d4322433b9fc240bcb206a6bdeb7fd2bc99f9fb3c1adf573cfa0145bc62a5aebae50911bda5dedabb719fd5bbe95af33b466c013d920b063d25c4363c6e8a8dbbc188183d208a0f3d52e04f3c93bf543c46bfd63c6767a13c6026283c662b073d6044b5bb8c7edb3c86ea8c3c4c881bba0aa916bad064b2bb7da241bbbc8a8b3c900434bb6f7af6bcc188983b8f397c3c35be5d3c1d9af83cb7aa0b3cb342c03c9727a0bb72dbefbbbd7af43b6ec627b9a982c0bb101f583c552a0ebb5186a83cbca898bcb06912bcc7e0ccbcac01c7bc851fd5bb6c47a1bcecd979bc3482433d40c149bc698c00ba7efce8bc2363bdbc478a8ebc7b7de2bb6e8a0dbd8969133d5666283c16062a3d9347a03c7f8b86bca1e71fbb1179ff3c2e9b713c38cc013cba65ac3b62c33bbcc686a5bc4ca6283c4049153da1ab053d3e60503d172437bb5dc52e3d27e915bc9140cebc1bc89c3c14a530bcf98910bda98240bc5fcc80bd3b8722bc292c02bcd0a0ccbc11cc02bd3689153c8d49933c8ea3babbb0871fbc7d84343c7181c8bc4bf9fdb978689a3c4f433cbc64e89abc292c023d105bf2bb99db6ebd8501c83cf041c53c1e47a3bc3482c3bc0d64373b3d06a93cdb4911bc2e0c023dfa3ddf3c56a2c23cad08193de86a8abcf266a4bc6b29143df764313b0f6b893c6ddd62bcd83bed3b8c24b4ba64ac00bbcbc04c3d67a33b3c7c2a8dbc15597fbc793af63cb5c146bdb360cd3bdf7c66bbb1e1c63c0f6b89bc11ea0fbd9602c13b4a452fba72f97c3ce1bf523c96204ebc3c3b71bc301a78bb0505b13c227af8bc08662abc25a629bb8bca0c3c9f6819bb22eb08bd89c33a3cb0871f3dbf45ac3b0ac7233cde04323deae9103d900434bd490915bd9ccb05bcdb2b04bd1fdd64bc05e7a33cda2432bc5441c93a588b873c5b64b53c9ae2403c7c0c003ce69fd23c3e24b63c12262a3aa6a912bd31c7223c84c52dbc6ee4b4ba25e243bd51c2423cca2a0b3cf8dce5bb3f2b083c6ec6a7bce509913b62a52e3b9845ad3a8ea3babb95e4b33cc668183b73a6a7bc741edcbc662b87bcbcc6a53b71633b3cbc8a8b3d8ea33a3c7fe52dbcb90b853c4429153caabeda3c1d0b89bc6b2994bba721c7bbf989903cb5852c3d94197c3ce93ce63a03862abda73fd43c51a4b53cd6daf33b7aab86baeb9d5fbd95c626bc13bc6b3c28433d3d922993bb5de3bb3b0cec82bc318b08bb1606aabce7f9f93cb61b6e3c718148bc81281abba964b3bc3dac013db5c1463caba79f3c9ce992bcb87c67bccf2818bbfb26243cdf22bf3c58c7213c27e9153dc7fe59bc2600d13a8fdf54bc08a2c4bc40852fbdce0339bc73a6273d46dde3bb31c722bd5dc5ae39eacb033d8affd4bb7ae7a03ca39b6e3cc6c2bfbcb2e898bc772c803cbbdd603c4ce2c23c38cc81bc49eb07bd67a3bb3cd64b04bd0edcebbc72f97c3b3d06293c769d623c22eb083ceae910bc700914bcd4ea0a3d00e996bbb44912bd4820503ce50911bceb9ddfbb69aa8dbc0ae5b0bcd7a52bbd631de3ba6406283c0edceb3b58c7213d36e3bcbce00b043c95a819bc958a0c3b2b1ceb3c287fd73cb61b6ebcbf452cbc4ca6a83b597bf0bc42aa0e3c9ce992bc5fea0dbdedc23ebc58a914bc1e65b03c83dc68bb101fd83b14c33dba09fc6b3c3399fe3a4d5af73cc3cb84bc37014abc4ecb87bba6a992ba373de43b5de3bbbbf60a0abc2fa2c33cf53fd23ca946a63c674994bcc5d9fa3c8b06a73bb3604d3c2f66a93b6406a83c8fdf543de645ab3cd2a79ebb5a99fdbb6dfbef3a0fa723ba3689153d3446a93c03c244bdfc62be3b00e996bba94626bd6767a13bc443b93c9845ad3c66ba763bc3cb84bb8c7e5b3c47e4353cb42b85bbe1bf523c14c33dbc11797f3b0fc530bc6a7ce93c884b863c8b06a73b3464363cb6dfd33955b9fd3b33997e3bfb08973c4cc4b5bc1afde4ba3926a93c741e5cbc4a452fbdd408983cac1fd4bcd1bed9bca80a8cbce8a6243b47c628bd97eb85bb0ebede3ce09a733c77d97c3aa4c0cdbb75cb863c1ea1caba11ea8f3c25a6a93c091af9babb197b3c4ee9943c492722bcc73a74bc79e0cebcb99af4bc091a79bc83dce83b24dbf13c769d62bb2d41cabc8f1bef3ce86a8abc75e913bda6c71f3c915e5b3d194916bc0b033e3cd769913a68fd62bcdc852bbcd408183c6b9a763c4ca6a8bc8987a0b94c6a0ebc4c6a8ebb4429153c0ae5b03c0ac7a3bb24dbf1bb2d41cabc4f6149bc2cc995bb81281abc7625ae3ceae9903bcd8b843c4ecb07bd8987a0bced86a43c67a3bb3c5aec803cf284b13c602628bdbca818bbc94146bcf57bec3b9ebb6e3cc5bb6dbc84c5ad3ce402bfbb017fd8bbb7aa0bbc256a0f3d239fd7bbfa0145bccb6625bc19a3bd3b6f3e5c3c83dc68bc146916bde681c5bc42c81bbcce03b93cdca3b8bb5d8914bdcda911bc38ea8e3c934720bdd906a53c375bf13b38cc013ceefe58bd2cc9153cd7871e3c1e47233c404995bb325d64bb3689953ba32a0c3b51c242bc8264b43caabedabc75cb06bdb85eda3cfb08173cecd9f9ba3ce1c93b02bbf23b3ba5af3c176051bc72dbef39cf2898bbe681c5bcc5bbed3ca8997bbb884b863a571a77bc1a397f3c11ea8f3c8d49133d89a52dbd9d7fd4bb8501c8bcf4e52a3c1b8c823cab6b053d215cebbb767f553c73881a3dfd4b03bdfea52a3ca448193bc805acbb478a8e3c9b3c683cfe6990bc070c03bd0d8244bc88da75bc6f5ce9bce1479e3c6a404fbbe681453c51a4353c2a8629bceb7fd2bb35faf7bbba652cbc2eb9febc6d19fd3cfe879d3b5b4628bd4ecb87bd1487a3bc4a9f563c08662abb4bbde33cf266a4bc42aa0ebc40a3bcbc7cb97c3c4909953c2bfe5dbc69aa0d3b0aa996bb884b86bbc54a8b3cb2e8983b606242bc428c013b4a9fd63c5a281b3d25c436bc63ff553c0884b73c6a5edcbc22eb08bd2600d13c47c6a8bc6ea89abc6b2914bc6044b5bca0e04d3c9c07203caf4b853ced68173d40c1493c0c289d3c244c82bcf3ded8bbbcc6253cb2ca0b3d9e4a0cbd8282c1bca32a0cbcf48b83bc36a7a2bb72f9fcbb6c652ebdc1a6a5bcd7a5ab3b2e2a8fbcd59e59bc466c01bc16ac823cda7e59bc75e993bc1967233dd1dce63c0ac7a33cb37e5abcd81d60bc67c1c8b99a88993c83be5bbc11ea0f3a153bf2bcc71c67bc2fc0503d0fc5b0bbc46146bc3464b6ba294a8f3c55b9fd3afe879dbc4c889b3c8ec147bcc0dbedbc4b4c01bd504a0ebd485c6a3b6839fdbc4a9fd6bb76252ebc75e913bc95e4b33c', 'Array(Float32)')); + +SELECT '-- Brute-force scan'; + +SELECT id FROM dbpedia +ORDER BY cosineDistance(vec, (SELECT vec FROM dbpedia WHERE id = '<dbpedia:Honda>')) +LIMIT 4 +SETTINGS use_skip_indexes = 0; + +SELECT '-- Approximate nearest neighbor search'; + +SELECT id FROM dbpedia +ORDER BY cosineDistance(vec, (SELECT vec FROM dbpedia WHERE id = '<dbpedia:Honda>')) +LIMIT 4; + +SELECT '-- Verify the vector index is used'; + +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes = 1 + SELECT id FROM dbpedia + ORDER BY cosineDistance(vec, (SELECT vec FROM dbpedia WHERE id = '<dbpedia:Honda>')) + LIMIT 4 +) +WHERE explain LIKE '%vector_similarity%'; + +SELECT '-- Run a couple more ANN queries'; + +SELECT id FROM dbpedia +ORDER BY cosineDistance(vec, (SELECT vec FROM dbpedia WHERE id = '<dbpedia:Pizza>')) +LIMIT 4; + +SELECT id FROM dbpedia +ORDER BY cosineDistance(vec, (SELECT vec FROM dbpedia WHERE id = '<dbpedia:Database_theory>')) +LIMIT 4; + +SELECT '-- Verify the index is small'; +-- Check that an index with binary quantization (1 bit / dimension) is smaller than an index with BFloat16 (2 bytes / dimension) + +SELECT if(data_uncompressed_bytes < (20 * 1536 * 2), 'Good', toString(data_uncompressed_bytes)) +FROM system.data_skipping_indices +WHERE database = currentDatabase() AND name = 'vec_idx'; + +DROP TABLE dbpedia; + +-- Now test that a rescoring multiplier > 1.0 helps with search accuracy +DROP TABLE IF EXISTS tab; +CREATE TABLE tab +( + id Int32, + vec Array(Float32), + INDEX vec_idx vec TYPE vector_similarity('hnsw', 'cosineDistance', 8, 'b1', 64, 128) +) ENGINE = MergeTree +ORDER BY id +SETTINGS index_granularity = 1; + +INSERT INTO tab VALUES + (1, [-0.50, 0.50, -0.75, 0.95, 0.9, 0.9, 0.9, 0.9]), + (2, [-0.20, 0.30, -0.05, -0.08, 0.9, 0.9, 0.9, 0.9]), + (3, [-0.25, -0.25, -0.25, -0.25, 0.9, 0.9, 0.9, 0.9]), + (4, [0.20, -0.50, -0.75, -0.95, 0.9, 0.9, 0.9, 0.9]); + +-- Binary quantization of above vectors : +-- 1, [0, 1, 0, 1, 1, 1, 1, 1] +-- 2, [0, 1, 0, 0, 1, 1, 1, 1] +-- 3, [0, 0, 0, 0, 1, 1, 1, 1] +-- 4, [1, 0, 0, 0, 1, 1, 1, 1] + +-- Query vector is [0, 1, 1, 1, 1, 1, 1, 1] +-- As per hamming distance used in bq index, Row #1 is at distance 1 from the query vector and +-- Row #2 is at distance 2 from the query vector. But Row #2 is nearer as per cosine similarity on +-- full precision vector. By rescoring, we will get the correct nearest neighbour as 2. + +SELECT '-- Result without rescoring'; +-- Expect 1. +SELECT id, +FROM tab +ORDER by cosineDistance(vec, [-0.25, 0.25, 0.10, 0.10, 0.9, 0.9, 0.9, 0.9]) +LIMIT 1 +SETTINGS vector_search_with_rescoring = 0; + +SELECT '-- Result with rescoring'; +-- Expect 2 +SELECT id +FROM tab +ORDER by cosineDistance(vec, [-0.25, 0.25, 0.10, 0.10, 0.9, 0.9, 0.9, 0.9]) +LIMIT 1 +SETTINGS vector_search_with_rescoring = 1, + vector_search_index_fetch_multiplier = 4; + +DROP TABLE tab; diff --git a/parser/testdata/02354_vector_search_choose_correct_index/ast.json b/parser/testdata/02354_vector_search_choose_correct_index/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02354_vector_search_choose_correct_index/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02354_vector_search_choose_correct_index/metadata.json b/parser/testdata/02354_vector_search_choose_correct_index/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02354_vector_search_choose_correct_index/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02354_vector_search_choose_correct_index/query.sql b/parser/testdata/02354_vector_search_choose_correct_index/query.sql new file mode 100644 index 000000000..837b3e9de --- /dev/null +++ b/parser/testdata/02354_vector_search_choose_correct_index/query.sql @@ -0,0 +1,25 @@ +-- Tags: no-fasttest, no-ordinary-database + +SET parallel_replicas_local_plan = 1; -- this setting is randomized, set it explicitly to have local plan for parallel replicas + +-- Test for issue #77978 + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab(id Int32, vec1 Array(Float32), vec2 Array(Float32), INDEX idx vec1 TYPE vector_similarity('hnsw', 'L2Distance', 2)) ENGINE = MergeTree ORDER BY id; + +INSERT INTO tab VALUES (0, [1.0, 0.0], [1.0, 0.0]), (1, [1.1, 0.0], [1.1, 0.0]), (2, [1.2, 0.0], [1.2, 0.0]), (3, [1.3, 0.0], [1.3, 0.0]), (4, [1.4, 0.0], [1.4, 0,0]), (5, [1.5, 0.0], [1.5, 0.0]), (6, [0.0, 2.0], [0.0, 2.0]), (7, [0.0, 2.1], [0.0, 2.1]), (8, [0.0, 2.2], [0.0, 2.2]), (9, [0.0, 2.3], [0.0, 2.3]), (10, [0.0, 2.4], [0.0, 2.4]), (11, [0.0, 2.5], [0.0, 2.5]); + +SELECT 'Searches on vec1 should use the vector index'; +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes=1 WITH [0.0, 2.0] AS reference_vec SELECT id FROM tab ORDER BY L2Distance(vec1, reference_vec) LIMIT 3 +) +WHERE explain ILIKE '%Skip%' OR explain ILIKE '%Name: idx%' OR explain ILIKE '%vector_similarity%'; + +SELECT 'Searches on vec2 should be brute force'; +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes=1 WITH [0.0, 2.0] AS reference_vec SELECT id FROM tab ORDER BY L2Distance(vec2, reference_vec) LIMIT 3 +) +WHERE explain ILIKE '%Skip%' OR explain ILIKE '%Name: idx%' OR explain ILIKE '%vector_similarity%'; + +DROP TABLE tab; diff --git a/parser/testdata/02354_vector_search_default_granularity/ast.json b/parser/testdata/02354_vector_search_default_granularity/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02354_vector_search_default_granularity/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02354_vector_search_default_granularity/metadata.json b/parser/testdata/02354_vector_search_default_granularity/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02354_vector_search_default_granularity/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02354_vector_search_default_granularity/query.sql b/parser/testdata/02354_vector_search_default_granularity/query.sql new file mode 100644 index 000000000..7bb7cf830 --- /dev/null +++ b/parser/testdata/02354_vector_search_default_granularity/query.sql @@ -0,0 +1,16 @@ +-- Tags: no-fasttest, no-ordinary-database + +-- Tests that vector search indexes use a (non-standard) index granularity of 100 mio by default. + +-- After CREATE TABLE +DROP TABLE IF EXISTS tab; +CREATE TABLE tab (id Int32, vec Array(Float32), INDEX idx(vec) TYPE vector_similarity('hnsw', 'L2Distance', 1)) ENGINE = MergeTree ORDER BY id; +SELECT granularity FROM system.data_skipping_indices WHERE database = currentDatabase() AND table = 'tab' AND name = 'idx'; + +-- After ALTER TABLE +DROP TABLE tab; +CREATE TABLE tab (id Int32, vec Array(Float32)) ENGINE = MergeTree ORDER BY id; +ALTER TABLE tab ADD INDEX idx(vec) TYPE vector_similarity('hnsw', 'L2Distance', 1); +SELECT granularity FROM system.data_skipping_indices WHERE database = currentDatabase() AND table = 'tab' AND name = 'idx'; + +DROP TABLE tab; diff --git a/parser/testdata/02354_vector_search_detach_attach/ast.json b/parser/testdata/02354_vector_search_detach_attach/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02354_vector_search_detach_attach/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02354_vector_search_detach_attach/metadata.json b/parser/testdata/02354_vector_search_detach_attach/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02354_vector_search_detach_attach/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02354_vector_search_detach_attach/query.sql b/parser/testdata/02354_vector_search_detach_attach/query.sql new file mode 100644 index 000000000..036a09a37 --- /dev/null +++ b/parser/testdata/02354_vector_search_detach_attach/query.sql @@ -0,0 +1,18 @@ +-- Tags: no-fasttest, no-ordinary-database + +-- Tests that vector similarity indexes can be detached/attached. + +DROP TABLE IF EXISTS tab; +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 2)) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 8192; +INSERT INTO tab VALUES (0, [1.0, 0.0]), (1, [1.1, 0.0]), (2, [1.2, 0.0]), (3, [1.3, 0.0]), (4, [1.4, 0.0]), (5, [0.0, 2.0]), (6, [0.0, 2.1]), (7, [0.0, 2.2]), (8, [0.0, 2.3]), (9, [0.0, 2.4]); + +DETACH TABLE tab SYNC; +ATTACH TABLE tab; + +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, L2Distance(vec, reference_vec) +FROM tab +ORDER BY L2Distance(vec, reference_vec) +LIMIT 3; + +DROP TABLE tab; diff --git a/parser/testdata/02354_vector_search_different_array_sizes/ast.json b/parser/testdata/02354_vector_search_different_array_sizes/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02354_vector_search_different_array_sizes/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02354_vector_search_different_array_sizes/metadata.json b/parser/testdata/02354_vector_search_different_array_sizes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02354_vector_search_different_array_sizes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02354_vector_search_different_array_sizes/query.sql b/parser/testdata/02354_vector_search_different_array_sizes/query.sql new file mode 100644 index 000000000..ea4bf3008 --- /dev/null +++ b/parser/testdata/02354_vector_search_different_array_sizes/query.sql @@ -0,0 +1,15 @@ +-- Tags: no-fasttest, no-ordinary-database + +-- Tests that vector similarity indexes reject INSERTs of Arrays with sizes != than the size specified in the index + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 2)) ENGINE = MergeTree ORDER BY id; + +-- Mixed correct/wrong +INSERT INTO tab values (0, [2.2, 2.3]) (1, [3.1, 3.2, 3.3]); -- { serverError INCORRECT_DATA } + +-- Both wrong but of the same length +INSERT INTO tab values (2, [2.2, 2.3, 2.4]) (3, [3.1, 3.2, 3.3]); -- { serverError INCORRECT_DATA } + +DROP TABLE tab; diff --git a/parser/testdata/02354_vector_search_drop_table_clear_cache/ast.json b/parser/testdata/02354_vector_search_drop_table_clear_cache/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02354_vector_search_drop_table_clear_cache/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02354_vector_search_drop_table_clear_cache/metadata.json b/parser/testdata/02354_vector_search_drop_table_clear_cache/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02354_vector_search_drop_table_clear_cache/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02354_vector_search_drop_table_clear_cache/query.sql b/parser/testdata/02354_vector_search_drop_table_clear_cache/query.sql new file mode 100644 index 000000000..562256915 --- /dev/null +++ b/parser/testdata/02354_vector_search_drop_table_clear_cache/query.sql @@ -0,0 +1,42 @@ +-- Tags: no-fasttest, no-ordinary-database, no-parallel, no-parallel-replicas +-- no-parallel: Vector index cache should not be touched by another test +-- no-parallel-replicas: EXPLAIN plan stability + +-- Verify that vector similarity index cache is cleared when table with vector index is dropped. + +SET enable_analyzer = 1; +SET parallel_replicas_local_plan = 1; -- this setting is randomized, set it explicitly to force local plan for parallel replicas + +DROP TABLE IF EXISTS tab; + +SYSTEM DROP VECTOR SIMILARITY INDEX CACHE; + +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 2)) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 8192; +INSERT INTO tab VALUES (0, [1.0, 0.0]), (1, [1.1, 0.0]), (2, [1.2, 0.0]), (3, [1.3, 0.0]), (4, [1.4, 0.0]), (5, [0.0, 2.0]), (6, [0.0, 2.1]), (7, [0.0, 2.2]), (8, [0.0, 2.3]), (9, [0.0, 2.4]); + +-- Make sure vector index is loaded and used +WITH [0.0, 2.0] AS reference_vec +SELECT id +FROM tab +ORDER BY L2Distance(vec, reference_vec) +LIMIT 3; + +EXPLAIN indexes = 1 +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, L2Distance(vec, reference_vec) +FROM tab +ORDER BY L2Distance(vec, reference_vec) +LIMIT 3; + +-- Make sure vector index cache is utilized. +SELECT name, IF(value > 0, 'Good', 'Zero') FROM system.metrics where name like '%VectorSimilarityIndexCacheBytes%'; + +-- SYNC is important to drop the table/parts/caches immediately +DROP TABLE tab SYNC; + +-- Should be 0 +SELECT name, value FROM system.metrics where name like '%VectorSimilarityIndexCacheBytes%'; + +-- ALTER TABLE ... DROP INDEX <vector index> and MERGE PARTS will also clear +-- any corresponding loaded granules in the vector index cache. These happen +-- in the background as mutations and unused parts are deleted after "old_parts_lifetime" diff --git a/parser/testdata/02354_vector_search_empty_arrays_or_default_values/ast.json b/parser/testdata/02354_vector_search_empty_arrays_or_default_values/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02354_vector_search_empty_arrays_or_default_values/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02354_vector_search_empty_arrays_or_default_values/metadata.json b/parser/testdata/02354_vector_search_empty_arrays_or_default_values/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02354_vector_search_empty_arrays_or_default_values/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02354_vector_search_empty_arrays_or_default_values/query.sql b/parser/testdata/02354_vector_search_empty_arrays_or_default_values/query.sql new file mode 100644 index 000000000..5e425f257 --- /dev/null +++ b/parser/testdata/02354_vector_search_empty_arrays_or_default_values/query.sql @@ -0,0 +1,11 @@ +-- Tags: no-fasttest, no-ordinary-database + +-- Vector similarity indexes must reject empty Arrays or Arrays with default values (issue #52258) + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab (id UInt64, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 1)) ENGINE = MergeTree() ORDER BY id; +INSERT INTO tab VALUES (1, []); -- { serverError INCORRECT_DATA } +INSERT INTO tab (id) VALUES (1); -- { serverError INCORRECT_DATA } + +DROP TABLE tab; diff --git a/parser/testdata/02354_vector_search_expansion_search/ast.json b/parser/testdata/02354_vector_search_expansion_search/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02354_vector_search_expansion_search/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02354_vector_search_expansion_search/metadata.json b/parser/testdata/02354_vector_search_expansion_search/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02354_vector_search_expansion_search/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02354_vector_search_expansion_search/query.sql b/parser/testdata/02354_vector_search_expansion_search/query.sql new file mode 100644 index 000000000..801d9e3de --- /dev/null +++ b/parser/testdata/02354_vector_search_expansion_search/query.sql @@ -0,0 +1,46 @@ +-- Tags: no-fasttest, long, no-asan, no-ubsan, no-tsan, no-debug +-- ^^ Disable test for slow builds: generating data takes time but a sufficiently large data set +-- is necessary for different hnsw_candidate_list_size_for_search settings to make a difference + +-- Tests vector search with setting 'hnsw_candidate_list_size_for_search' + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 2)) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 8192; + +-- Generate random values but with a fixed seed (conceptually), so that the data is deterministic. +-- Unfortunately, no random functions in ClickHouse accepts a seed. Instead, abuse the numbers table + hash functions to provide +-- deterministic randomness. +INSERT INTO tab SELECT number, [sipHash64(number)/18446744073709551615, wyHash64(number)/18446744073709551615] FROM numbers(660000); -- 18446744073709551615 is the biggest UInt64 + +-- hnsw_candidate_list_size_for_search = 0 is illegal +WITH [0.5, 0.5] AS reference_vec +SELECT id, vec, L2Distance(vec, reference_vec) +FROM tab +ORDER BY L2Distance(vec, reference_vec) +LIMIT 3 +SETTINGS hnsw_candidate_list_size_for_search = 0; -- { serverError INVALID_SETTING_VALUE } + +DROP TABLE IF EXISTS results; +CREATE TABLE results(id Int32) ENGINE = Memory; + +-- Standard vector search with default hnsw_candidate_list_size_for_search = 64 +INSERT INTO results + SELECT id + FROM tab + ORDER BY L2Distance(vec, [0.5, 0.5]) + LIMIT 1; + +-- Vector search with custom hnsw_candidate_list_size_for_search +INSERT INTO results + SELECT id + FROM tab + ORDER BY L2Distance(vec, [0.5, 0.5]) + LIMIT 1 + SETTINGS hnsw_candidate_list_size_for_search = 1; + +-- Expect that matches are different +SELECT count(distinct *) FROM results; + +DROP TABLE results; +DROP TABLE tab; diff --git a/parser/testdata/02354_vector_search_index_creation_negative/ast.json b/parser/testdata/02354_vector_search_index_creation_negative/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02354_vector_search_index_creation_negative/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02354_vector_search_index_creation_negative/metadata.json b/parser/testdata/02354_vector_search_index_creation_negative/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02354_vector_search_index_creation_negative/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02354_vector_search_index_creation_negative/query.sql b/parser/testdata/02354_vector_search_index_creation_negative/query.sql new file mode 100644 index 000000000..ae44e1a75 --- /dev/null +++ b/parser/testdata/02354_vector_search_index_creation_negative/query.sql @@ -0,0 +1,51 @@ +-- Tags: no-fasttest, no-ordinary-database + +-- Tests that various conditions are checked during creation of vector search indexes. + +DROP TABLE IF EXISTS tab; + +SELECT 'Three or six index arguments'; +CREATE TABLE tab(vec Array(Float32), INDEX idx vec TYPE vector_similarity) ENGINE = MergeTree ORDER BY tuple(); -- { serverError INCORRECT_QUERY } +CREATE TABLE tab(vec Array(Float32), INDEX idx vec TYPE vector_similarity()) ENGINE = MergeTree ORDER BY tuple(); -- { serverError INCORRECT_QUERY } +CREATE TABLE tab(vec Array(Float32), INDEX idx vec TYPE vector_similarity('cant have', 'two args')) ENGINE = MergeTree ORDER BY tuple(); -- { serverError INCORRECT_QUERY } +CREATE TABLE tab(vec Array(Float32), INDEX idx vec TYPE vector_similarity('cant', 'have', 'four', 'args')) ENGINE = MergeTree ORDER BY tuple(); -- { serverError INCORRECT_QUERY } +CREATE TABLE tab(vec Array(Float32), INDEX idx vec TYPE vector_similarity('cant', 'have', 'five', 'args', '!', '!')) ENGINE = MergeTree ORDER BY tuple(); -- { serverError INCORRECT_QUERY } +CREATE TABLE tab(vec Array(Float32), INDEX idx vec TYPE vector_similarity('cant', 'have', 'seven', 'args', '!', '!', '!')) ENGINE = MergeTree ORDER BY tuple(); -- { serverError INCORRECT_QUERY } + +SELECT '1st argument (method) must be String and hnsw'; +CREATE TABLE tab(vec Array(Float32), INDEX idx vec TYPE vector_similarity(3, 'L2Distance', 1)) ENGINE = MergeTree ORDER BY tuple(); -- { serverError INCORRECT_QUERY } +CREATE TABLE tab(vec Array(Float32), INDEX idx vec TYPE vector_similarity('not_hnsw', 'L2Distance', 1)) ENGINE = MergeTree ORDER BY tuple(); -- { serverError INCORRECT_DATA } + +SELECT '2nd argument (distance function) must be String and L2Distance or cosineDistance'; +CREATE TABLE tab(vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 3, 1)) ENGINE = MergeTree ORDER BY tuple(); -- { serverError INCORRECT_QUERY } +CREATE TABLE tab(vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'invalid_distance', 1)) ENGINE = MergeTree ORDER BY tuple(); -- { serverError INCORRECT_DATA } + +SELECT '3nd argument (dimensions) must be UInt64 and > 0'; +CREATE TABLE tab(vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 'invalid')) ENGINE = MergeTree ORDER BY tuple(); -- { serverError INCORRECT_QUERY } +CREATE TABLE tab(vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 0)) ENGINE = MergeTree ORDER BY tuple(); -- { serverError INCORRECT_DATA } + +SELECT '4nd argument (quantization), if given, must be String and f32, f16, ...'; +CREATE TABLE tab(vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 1, 1, 1, 1)) ENGINE = MergeTree ORDER BY tuple(); -- { serverError INCORRECT_QUERY } +CREATE TABLE tab(vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 1, 'invalid', 2, 1)) ENGINE = MergeTree ORDER BY tuple(); -- { serverError INCORRECT_DATA } + +SELECT '5nd argument (M), if given, must be UInt64 and > 1'; +CREATE TABLE tab(vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 1, 'f32', 'invalid', 1, 1)) ENGINE = MergeTree ORDER BY tuple(); -- { serverError INCORRECT_QUERY } +CREATE TABLE tab(vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 1, 'f32', 1, 1)) ENGINE = MergeTree ORDER BY tuple(); -- { serverError INCORRECT_DATA } + +SELECT 'Binary quantization requires cosine distance'; +CREATE TABLE tab(vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 8, 'b1', 64, 128)) ENGINE = MergeTree ORDER BY tuple(); -- { serverError INCORRECT_DATA } +SELECT 'Binary quantization requires the dimension as a multiple of 8'; +CREATE TABLE tab(vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'cosineDistance', 7, 'b1', 64, 128)) ENGINE = MergeTree ORDER BY tuple(); -- { serverError INCORRECT_DATA } + +SELECT 'Must be created on single column'; +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx (vec, id) TYPE vector_similarity('hnsw', 'L2Distance', 1)) ENGINE = MergeTree ORDER BY tuple(); -- { serverError INCORRECT_NUMBER_OF_COLUMNS } + +SELECT 'Must be created on Array(Float32|Float64|BFloat16) columns'; +SET allow_suspicious_low_cardinality_types = 1; +CREATE TABLE tab(vec UInt64, INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 1)) ENGINE = MergeTree ORDER BY tuple(); -- { serverError ILLEGAL_COLUMN } +CREATE TABLE tab(vec Float32, INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 1)) ENGINE = MergeTree ORDER BY tuple(); -- { serverError ILLEGAL_COLUMN } +CREATE TABLE tab(vec LowCardinality(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 1)) ENGINE = MergeTree ORDER BY tuple(); -- { serverError ILLEGAL_COLUMN } +CREATE TABLE tab(vec Nullable(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 1)) ENGINE = MergeTree ORDER BY tuple(); -- { serverError ILLEGAL_COLUMN } +CREATE TABLE tab(vec Array(UInt64), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 1)) ENGINE = MergeTree ORDER BY tuple(); -- { serverError ILLEGAL_COLUMN } +CREATE TABLE tab(vec Array(Nullable(Float32)), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 1)) ENGINE = MergeTree ORDER BY tuple(); -- { serverError ILLEGAL_COLUMN } +CREATE TABLE tab(vec Array(LowCardinality(Float32)), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 1)) ENGINE = MergeTree ORDER BY tuple(); -- { serverError ILLEGAL_COLUMN } diff --git a/parser/testdata/02354_vector_search_multiple_indexes/ast.json b/parser/testdata/02354_vector_search_multiple_indexes/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02354_vector_search_multiple_indexes/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02354_vector_search_multiple_indexes/metadata.json b/parser/testdata/02354_vector_search_multiple_indexes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02354_vector_search_multiple_indexes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02354_vector_search_multiple_indexes/query.sql b/parser/testdata/02354_vector_search_multiple_indexes/query.sql new file mode 100644 index 000000000..85f82bdd3 --- /dev/null +++ b/parser/testdata/02354_vector_search_multiple_indexes/query.sql @@ -0,0 +1,12 @@ +-- Tags: no-fasttest, no-ordinary-database + +-- Tests that multiple vector similarity indexes can be created on the same column (even if that makes no sense) + +DROP TABLE IF EXISTS tab; +CREATE TABLE tab (id Int32, vec Array(Float32), PRIMARY KEY id, INDEX vec_idx(vec) TYPE vector_similarity('hnsw', 'L2Distance', 1)); + +ALTER TABLE tab ADD INDEX idx(vec) TYPE minmax; +ALTER TABLE tab ADD INDEX vec_idx1(vec) TYPE vector_similarity('hnsw', 'cosineDistance', 1); +ALTER TABLE tab ADD INDEX vec_idx2(vec) TYPE vector_similarity('hnsw', 'L2Distance', 1); -- silly but creating the same index also works for non-vector indexes ... + +DROP TABLE tab; diff --git a/parser/testdata/02354_vector_search_multiple_marks/ast.json b/parser/testdata/02354_vector_search_multiple_marks/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02354_vector_search_multiple_marks/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02354_vector_search_multiple_marks/metadata.json b/parser/testdata/02354_vector_search_multiple_marks/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02354_vector_search_multiple_marks/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02354_vector_search_multiple_marks/query.sql b/parser/testdata/02354_vector_search_multiple_marks/query.sql new file mode 100644 index 000000000..8e3e7d5a8 --- /dev/null +++ b/parser/testdata/02354_vector_search_multiple_marks/query.sql @@ -0,0 +1,23 @@ +-- Tags: no-fasttest, no-ordinary-database, no-tsan +-- no-tsan: generating data takes too long + +-- Tests correctness of vector similarity index with > 1 mark + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 2)) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 8192, index_granularity_bytes = 10485760; +INSERT INTO tab SELECT number, [toFloat32(number), 0.0] from numbers(10000); + +WITH [1.0, 0.0] AS reference_vec +SELECT id, vec, L2Distance(vec, reference_vec) +FROM tab +ORDER BY L2Distance(vec, reference_vec) +LIMIT 1; + +WITH [9000.0, 0.0] AS reference_vec +SELECT id, vec, L2Distance(vec, reference_vec) +FROM tab +ORDER BY L2Distance(vec, reference_vec) +LIMIT 1; + +DROP TABLE tab; diff --git a/parser/testdata/02354_vector_search_part_format/ast.json b/parser/testdata/02354_vector_search_part_format/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02354_vector_search_part_format/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02354_vector_search_part_format/metadata.json b/parser/testdata/02354_vector_search_part_format/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02354_vector_search_part_format/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02354_vector_search_part_format/query.sql b/parser/testdata/02354_vector_search_part_format/query.sql new file mode 100644 index 000000000..eea57bd15 --- /dev/null +++ b/parser/testdata/02354_vector_search_part_format/query.sql @@ -0,0 +1,58 @@ +-- Tags: no-fasttest, no-ordinary-database, no-asan +-- no-asan: runs too long + +-- Basic tests for vector similarity index stored in compact vs. wide format, respectively full vs. packed parts + +SET parallel_replicas_local_plan = 1; -- this setting is randomized, set it explicitly to have local plan for parallel replicas + +DROP TABLE IF EXISTS tab_compact_full; +DROP TABLE IF EXISTS tab_wide_full; + +CREATE TABLE tab_compact_full(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 2) GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS min_bytes_for_wide_part = 1e9, min_rows_for_wide_part = 1e9, min_bytes_for_full_part_storage = 0, index_granularity = 3; +CREATE TABLE tab_wide_full(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 2) GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS min_bytes_for_wide_part = 0, min_rows_for_wide_part = 0, min_bytes_for_full_part_storage = 0, index_granularity = 3; + +INSERT INTO tab_compact_full VALUES (0, [1.0, 0.0]), (1, [1.1, 0.0]), (2, [1.2, 0.0]), (3, [1.3, 0.0]), (4, [1.4, 0.0]), (5, [1.5, 0.0]), (6, [0.0, 2.0]), (7, [0.0, 2.1]), (8, [0.0, 2.2]), (9, [0.0, 2.3]), (10, [0.0, 2.4]), (11, [0.0, 2.5]); +INSERT INTO tab_wide_full VALUES (0, [1.0, 0.0]), (1, [1.1, 0.0]), (2, [1.2, 0.0]), (3, [1.3, 0.0]), (4, [1.4, 0.0]), (5, [1.5, 0.0]), (6, [0.0, 2.0]), (7, [0.0, 2.1]), (8, [0.0, 2.2]), (9, [0.0, 2.3]), (10, [0.0, 2.4]), (11, [0.0, 2.5]); + +SELECT 'Check part formats'; + +SELECT table, part_type FROM system.parts WHERE database = currentDatabase() AND table LIKE 'tab_%' ORDER BY table; + +SELECT 'Check tab_compact_full'; + +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, L2Distance(vec, reference_vec) +FROM tab_compact_full +ORDER BY L2Distance(vec, reference_vec) +LIMIT 3; + +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes = 1 + WITH [0.0, 2.0] AS reference_vec + SELECT id + FROM tab_compact_full + ORDER BY L2Distance(vec, reference_vec) + LIMIT 3 +) +WHERE explain LIKE '%vector_similarity%' OR explain LIKE '%Granules:%'; + +SELECT 'Check tab_wide_full'; + +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, L2Distance(vec, reference_vec) +FROM tab_wide_full +ORDER BY L2Distance(vec, reference_vec) +LIMIT 3; + +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes = 1 + WITH [0.0, 2.0] AS reference_vec + SELECT id + FROM tab_wide_full + ORDER BY L2Distance(vec, reference_vec) + LIMIT 3 +) +WHERE explain LIKE '%vector_similarity%' OR explain LIKE '%Granules:%'; + +DROP TABLE tab_compact_full; +DROP TABLE tab_wide_full; diff --git a/parser/testdata/02354_vector_search_postfiltering_bug/ast.json b/parser/testdata/02354_vector_search_postfiltering_bug/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02354_vector_search_postfiltering_bug/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02354_vector_search_postfiltering_bug/metadata.json b/parser/testdata/02354_vector_search_postfiltering_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02354_vector_search_postfiltering_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02354_vector_search_postfiltering_bug/query.sql b/parser/testdata/02354_vector_search_postfiltering_bug/query.sql new file mode 100644 index 000000000..516a41875 --- /dev/null +++ b/parser/testdata/02354_vector_search_postfiltering_bug/query.sql @@ -0,0 +1,19 @@ +-- Tags: no-fasttest, long, no-asan, no-ubsan, no-msan, no-tsan, no-debug +-- Test for Bug 78161 + +SET enable_analyzer = 1; + +CREATE TABLE tab (id Int32, vec Array(Float32)) ENGINE = MergeTree() ORDER BY id SETTINGS index_granularity = 128; +INSERT INTO tab SELECT number, [randCanonical(), randCanonical()] FROM numbers(10000); + +-- Create index +ALTER TABLE tab ADD INDEX idx_vec vec TYPE vector_similarity('hnsw', 'cosineDistance', 2, 'f32', 64, 400); +ALTER TABLE tab MATERIALIZE INDEX idx_vec SETTINGS mutations_sync=2; + +WITH [1., 2.] AS reference_vec +SELECT * +FROM tab +PREWHERE id < 5000 +ORDER BY cosineDistance(vec, reference_vec) ASC +LIMIT 10 +FORMAT Null; diff --git a/parser/testdata/02354_vector_search_pre_and_post_filtering/ast.json b/parser/testdata/02354_vector_search_pre_and_post_filtering/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02354_vector_search_pre_and_post_filtering/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02354_vector_search_pre_and_post_filtering/metadata.json b/parser/testdata/02354_vector_search_pre_and_post_filtering/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02354_vector_search_pre_and_post_filtering/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02354_vector_search_pre_and_post_filtering/query.sql b/parser/testdata/02354_vector_search_pre_and_post_filtering/query.sql new file mode 100644 index 000000000..01d15010d --- /dev/null +++ b/parser/testdata/02354_vector_search_pre_and_post_filtering/query.sql @@ -0,0 +1,181 @@ +-- Tags: no-fasttest, no-ordinary-database + +-- Tests pre vs. post-filtering for vector search. + +SET enable_analyzer = 1; +SET parallel_replicas_local_plan = 1; -- this setting is randomized, set it explicitly to have local plan for parallel replicas + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab +( + id Int32, + date Date, + attr1 Int32, + attr2 Int32, + vec Array(Float32), + INDEX idx_attr1 attr1 TYPE minmax, + INDEX idx_vec vec TYPE vector_similarity('hnsw', 'L2Distance', 2) GRANULARITY 10000 +) +ENGINE = MergeTree +PARTITION BY date +ORDER BY id +SETTINGS index_granularity = 3; + +INSERT INTO tab VALUES + (1, '2025-01-01', 101, 1001, [1.0, 0.0]), + (2, '2025-01-01', 102, 1002, [1.1, 0.0]), + (3, '2025-01-01', 103, 1003, [1.2, 0.0]), + (4, '2025-01-02', 104, 1003, [1.3, 0.0]), + (5, '2025-01-02', 105, 1004, [1.4, 0.0]), + (6, '2025-01-02', 106, 1005, [1.5, 0.0]), + (7, '2025-01-03', 107, 1005, [1.6, 0.0]), + (8, '2025-01-03', 108, 1006, [1.7, 0.0]), + (9, '2025-01-03', 109, 1007, [1.8, 0.0]), + (10, '2025-01-03', 110, 1008, [1.9, 0.0]), + (11, '2025-01-03', 111, 1009, [2.0, 0.0]), + (12, '2025-01-03', 112, 1010, [2.1, 0.0]); + +SELECT 'Test vector_search_filter_strategy = prefilter'; + +SELECT '-- No additional WHERE clauses present, expect index usage'; +SELECT trimLeft(explain) FROM ( + EXPLAIN indexes = 1 + SELECT id + FROM tab + ORDER BY L2Distance(vec, [1.0, 1.0]) + LIMIT 2 + SETTINGS vector_search_filter_strategy = 'prefilter' +) +WHERE explain LIKE '%vector_similarity%' OR explain LIKE '%Granules: 3/4%'; + +SELECT '-- Additional WHERE clauses present, index usage not expected'; +SELECT trimLeft(explain) FROM ( + EXPLAIN indexes = 1 + SELECT id + FROM tab + WHERE attr2 >= 1006 + ORDER BY L2Distance(vec, [1.0, 1.0]) + LIMIT 2 + SETTINGS vector_search_filter_strategy = 'prefilter' +) +WHERE explain LIKE '%vector_similarity%'; + +SELECT '-- Additional WHERE clauses present, index usage not expected'; +SELECT trimLeft(explain) FROM ( + EXPLAIN indexes = 1 + SELECT id + FROM tab + WHERE attr1 <= 105 + ORDER BY L2Distance(vec, [1.0, 1.0]) + LIMIT 2 + SETTINGS vector_search_filter_strategy = 'prefilter' +) +WHERE explain LIKE '%vector_similarity%'; + +SELECT '-- Additional WHERE clauses present, index usage not expected'; +SELECT trimLeft(explain) FROM ( + EXPLAIN indexes = 1 + SELECT id + FROM tab + WHERE id <= 6 + ORDER BY L2Distance(vec, [1.0, 1.0]) + LIMIT 2 + SETTINGS vector_search_filter_strategy = 'prefilter' +) +WHERE explain LIKE '%vector_similarity%'; + +SELECT 'Test vector_search_filter_strategy = postfilter'; + +SELECT '-- No additional WHERE clauses present, expect index usage'; +SELECT trimLeft(explain) FROM ( + EXPLAIN indexes = 1 + SELECT id + FROM tab + ORDER BY L2Distance(vec, [1.0, 1.0]) + LIMIT 2 + SETTINGS vector_search_filter_strategy = 'postfilter' +) +WHERE explain LIKE '%vector_similarity%' OR explain LIKE '%Granules: 3/4%'; + +SELECT '-- Additional WHERE clauses on partition key present (2 full parts selected), expect index usage'; +SELECT trimLeft(explain) FROM ( + EXPLAIN indexes = 1 + SELECT id + FROM tab + WHERE date <= '2025-01-02' + ORDER BY L2Distance(vec, [1.0, 1.0]) + LIMIT 2 + SETTINGS vector_search_filter_strategy = 'postfilter' +) +WHERE explain LIKE '%vector_similarity%'; + +SELECT '-- Additional WHERE clauses on partition key present (2 full parts selected), expect index usage'; +SELECT trimLeft(explain) FROM ( + EXPLAIN indexes = 1 + SELECT id + FROM tab + WHERE date = '2025-01-03' + AND attr1 = 110 + ORDER BY L2Distance(vec, [1.0, 1.0]) + LIMIT 2 + SETTINGS vector_search_filter_strategy = 'postfilter' +) +WHERE explain LIKE '%vector_similarity%'; + +SELECT '-- Additional WHERE clauses present, 2 full parts selected by partition key / 1 part partially selected by PK, index usage not expected'; +SELECT id +FROM tab +WHERE date = '2025-01-03' AND id <= 9 +ORDER BY L2Distance(vec, [1.0, 1.0]) +LIMIT 2 +SETTINGS log_comment = '02354_vector_search_post_filter_strategy_query1', vector_search_with_rescoring = 1; + +SYSTEM FLUSH LOGS query_log; + +SELECT DISTINCT ProfileEvents['USearchSearchCount'] +FROM system.query_log +WHERE log_comment = '02354_vector_search_post_filter_strategy_query1' +AND current_database = currentDatabase() +AND type = 'QueryFinish'; + +SELECT 'The first 3 neighbours returned by vector index dont pass the attr2 >= 1008 filter. Hence no rows returned by the query...'; +SELECT id +FROM tab +WHERE date = '2025-01-03' AND attr2 >= 1008 +ORDER BY L2Distance(vec, [1.0, 1.0]) +LIMIT 3; + +SELECT '... but there are results for the same query with postfilter multiplier = 2.0'; +SELECT id +FROM tab +WHERE date = '2025-01-03' AND attr2 >= 1008 +ORDER BY L2Distance(vec, [1.0, 1.0]) +LIMIT 3 +SETTINGS vector_search_index_fetch_multiplier = 2.0; + +SELECT '-- Negative parameter values throw an exception'; +SELECT id +FROM tab +WHERE date = '2025-01-03' AND attr2 >= 1008 +ORDER BY L2Distance(vec, [1.0, 1.0]) +LIMIT 3 +SETTINGS vector_search_index_fetch_multiplier = -1.0; -- { serverError INVALID_SETTING_VALUE } + +SELECT '-- Zero parameter values throw an exception'; +SELECT id +FROM tab +WHERE date = '2025-01-03' AND attr2 >= 1008 +ORDER BY L2Distance(vec, [1.0, 1.0]) +LIMIT 3 +SETTINGS vector_search_index_fetch_multiplier = 0.0; -- { serverError INVALID_SETTING_VALUE } + +SELECT '-- Too large parameter values throw an exception'; +SELECT id +FROM tab +WHERE date = '2025-01-03' AND attr2 >= 1008 +ORDER BY L2Distance(vec, [1.0, 1.0]) +LIMIT 3 +SETTINGS vector_search_index_fetch_multiplier = 1001.0; -- { serverError INVALID_SETTING_VALUE } + +DROP TABLE tab; diff --git a/parser/testdata/02354_vector_search_queries/ast.json b/parser/testdata/02354_vector_search_queries/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02354_vector_search_queries/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02354_vector_search_queries/metadata.json b/parser/testdata/02354_vector_search_queries/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02354_vector_search_queries/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02354_vector_search_queries/query.sql b/parser/testdata/02354_vector_search_queries/query.sql new file mode 100644 index 000000000..94285873b --- /dev/null +++ b/parser/testdata/02354_vector_search_queries/query.sql @@ -0,0 +1,279 @@ +-- Tags: no-fasttest, no-ordinary-database + +-- Tests various simple approximate nearest neighborhood (ANN) queries that utilize vector search indexes. + +-- Test runs with analyzer enabled +SET enable_analyzer = 1; + +SELECT '10 rows, index_granularity = 8192, GRANULARITY = 1 million --> 1 granule, 1 indexed block'; + +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 2)) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 8192; +INSERT INTO tab VALUES (0, [1.0, 0.0]), (1, [1.1, 0.0]), (2, [1.2, 0.0]), (3, [1.3, 0.0]), (4, [1.4, 0.0]), (5, [0.0, 2.0]), (6, [0.0, 2.1]), (7, [0.0, 2.2]), (8, [0.0, 2.3]), (9, [0.0, 2.4]); + + +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, L2Distance(vec, reference_vec) +FROM tab +ORDER BY L2Distance(vec, reference_vec) +LIMIT 3; + +EXPLAIN indexes = 1 +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, L2Distance(vec, reference_vec) +FROM tab +ORDER BY L2Distance(vec, reference_vec) +LIMIT 3; + +DROP TABLE tab; + + +SELECT '12 rows, index_granularity = 3, GRANULARITY = 2 --> 4 granules, 2 indexed block'; + +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 2) GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; +INSERT INTO tab VALUES (0, [1.0, 0.0]), (1, [1.1, 0.0]), (2, [1.2, 0.0]), (3, [1.3, 0.0]), (4, [1.4, 0.0]), (5, [1.5, 0.0]), (6, [0.0, 2.0]), (7, [0.0, 2.1]), (8, [0.0, 2.2]), (9, [0.0, 2.3]), (10, [0.0, 2.4]), (11, [0.0, 2.5]); + +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, L2Distance(vec, reference_vec) +FROM tab +ORDER BY L2Distance(vec, reference_vec) +LIMIT 3; + +EXPLAIN indexes = 1 +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, L2Distance(vec, reference_vec) +FROM tab +ORDER BY L2Distance(vec, reference_vec) +LIMIT 3; + +DROP TABLE tab; + + +SELECT 'Special cases'; -- Not a systematic test, just to check that no bad things happen. + +SELECT '-- Non-default metric, hnsw_max_connections_per_layer, hnsw_candidate_list_size_for_construction'; +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'cosineDistance', 2, 'f32', 42, 99) GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; +INSERT INTO tab VALUES (0, [4.6, 2.3]), (1, [2.0, 3.2]), (2, [4.2, 3.4]), (3, [5.3, 2.9]), (4, [2.4, 5.2]), (5, [5.3, 2.3]), (6, [1.0, 9.3]), (7, [5.5, 4.7]), (8, [6.4, 3.5]), (9, [5.3, 2.5]), (10, [6.4, 3.4]), (11, [6.4, 3.2]); + +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, cosineDistance(vec, reference_vec) +FROM tab +ORDER BY cosineDistance(vec, reference_vec) +LIMIT 3; + +EXPLAIN indexes = 1 +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, cosineDistance(vec, reference_vec) +FROM tab +ORDER BY cosineDistance(vec, reference_vec) +LIMIT 3; + +SELECT '-- Setting "max_limit_for_vector_search_queries"'; +EXPLAIN indexes=1 +WITH [0.0, 2.0] as reference_vec +SELECT id, vec, cosineDistance(vec, reference_vec) +FROM tab +ORDER BY cosineDistance(vec, reference_vec) +LIMIT 3 +SETTINGS max_limit_for_vector_search_queries = 2; -- LIMIT 3 > 2 --> don't use the ann index + +DROP TABLE tab; + +SELECT '-- Test all distance metrics x all quantization'; + +DROP TABLE IF EXISTS tab_l2_f64; +DROP TABLE IF EXISTS tab_l2_f32; +DROP TABLE IF EXISTS tab_l2_f16; +DROP TABLE IF EXISTS tab_l2_bf16; +DROP TABLE IF EXISTS tab_l2_i8; +DROP TABLE IF EXISTS tab_cos_f64; +DROP TABLE IF EXISTS tab_cos_f32; +DROP TABLE IF EXISTS tab_cos_f16; +DROP TABLE IF EXISTS tab_cos_bf16; +DROP TABLE IF EXISTS tab_cos_i8; + +CREATE TABLE tab_l2_f64(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 2, 'f64', 0, 0) GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; +CREATE TABLE tab_l2_f32(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 2, 'f32', 0, 0) GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; +CREATE TABLE tab_l2_f16(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 2, 'f16', 0, 0) GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; +CREATE TABLE tab_l2_bf16(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 2, 'bf16', 0, 0) GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; +CREATE TABLE tab_l2_i8(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 2, 'i8', 0, 0) GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; +CREATE TABLE tab_cos_f64(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'cosineDistance', 2, 'f64', 0, 0) GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; +CREATE TABLE tab_cos_f32(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'cosineDistance', 2, 'f32', 0, 0) GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; +CREATE TABLE tab_cos_f16(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'cosineDistance', 2, 'f16', 0, 0) GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; +CREATE TABLE tab_cos_bf16(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'cosineDistance', 2, 'bf16', 0, 0) GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; +CREATE TABLE tab_cos_i8(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'cosineDistance', 2, 'i8', 0, 0) GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; + +INSERT INTO tab_l2_f64 VALUES (0, [4.6, 2.3]), (1, [2.0, 3.2]), (2, [4.2, 3.4]), (3, [5.3, 2.9]), (4, [2.4, 5.2]), (5, [5.3, 2.3]), (6, [1.0, 9.3]), (7, [5.5, 4.7]), (8, [6.4, 3.5]), (9, [5.3, 2.5]), (10, [6.4, 3.4]), (11, [6.4, 3.2]); +INSERT INTO tab_l2_f32 VALUES (0, [4.6, 2.3]), (1, [2.0, 3.2]), (2, [4.2, 3.4]), (3, [5.3, 2.9]), (4, [2.4, 5.2]), (5, [5.3, 2.3]), (6, [1.0, 9.3]), (7, [5.5, 4.7]), (8, [6.4, 3.5]), (9, [5.3, 2.5]), (10, [6.4, 3.4]), (11, [6.4, 3.2]); +INSERT INTO tab_l2_f16 VALUES (0, [4.6, 2.3]), (1, [2.0, 3.2]), (2, [4.2, 3.4]), (3, [5.3, 2.9]), (4, [2.4, 5.2]), (5, [5.3, 2.3]), (6, [1.0, 9.3]), (7, [5.5, 4.7]), (8, [6.4, 3.5]), (9, [5.3, 2.5]), (10, [6.4, 3.4]), (11, [6.4, 3.2]); +INSERT INTO tab_l2_bf16 VALUES (0, [4.6, 2.3]), (1, [2.0, 3.2]), (2, [4.2, 3.4]), (3, [5.3, 2.9]), (4, [2.4, 5.2]), (5, [5.3, 2.3]), (6, [1.0, 9.3]), (7, [5.5, 4.7]), (8, [6.4, 3.5]), (9, [5.3, 2.5]), (10, [6.4, 3.4]), (11, [6.4, 3.2]); +INSERT INTO tab_l2_i8 VALUES (0, [4.6, 2.3]), (1, [2.0, 3.2]), (2, [4.2, 3.4]), (3, [5.3, 2.9]), (4, [2.4, 5.2]), (5, [5.3, 2.3]), (6, [1.0, 9.3]), (7, [5.5, 4.7]), (8, [6.4, 3.5]), (9, [5.3, 2.5]), (10, [6.4, 3.4]), (11, [6.4, 3.2]); +INSERT INTO tab_cos_f64 VALUES (0, [4.6, 2.3]), (1, [2.0, 3.2]), (2, [4.2, 3.4]), (3, [5.3, 2.9]), (4, [2.4, 5.2]), (5, [5.3, 2.3]), (6, [1.0, 9.3]), (7, [5.5, 4.7]), (8, [6.4, 3.5]), (9, [5.3, 2.5]), (10, [6.4, 3.4]), (11, [6.4, 3.2]); +INSERT INTO tab_cos_f32 VALUES (0, [4.6, 2.3]), (1, [2.0, 3.2]), (2, [4.2, 3.4]), (3, [5.3, 2.9]), (4, [2.4, 5.2]), (5, [5.3, 2.3]), (6, [1.0, 9.3]), (7, [5.5, 4.7]), (8, [6.4, 3.5]), (9, [5.3, 2.5]), (10, [6.4, 3.4]), (11, [6.4, 3.2]); +INSERT INTO tab_cos_f16 VALUES (0, [4.6, 2.3]), (1, [2.0, 3.2]), (2, [4.2, 3.4]), (3, [5.3, 2.9]), (4, [2.4, 5.2]), (5, [5.3, 2.3]), (6, [1.0, 9.3]), (7, [5.5, 4.7]), (8, [6.4, 3.5]), (9, [5.3, 2.5]), (10, [6.4, 3.4]), (11, [6.4, 3.2]); +INSERT INTO tab_cos_bf16 VALUES (0, [4.6, 2.3]), (1, [2.0, 3.2]), (2, [4.2, 3.4]), (3, [5.3, 2.9]), (4, [2.4, 5.2]), (5, [5.3, 2.3]), (6, [1.0, 9.3]), (7, [5.5, 4.7]), (8, [6.4, 3.5]), (9, [5.3, 2.5]), (10, [6.4, 3.4]), (11, [6.4, 3.2]); +INSERT INTO tab_cos_i8 VALUES (0, [4.6, 2.3]), (1, [2.0, 3.2]), (2, [4.2, 3.4]), (3, [5.3, 2.9]), (4, [2.4, 5.2]), (5, [5.3, 2.3]), (6, [1.0, 9.3]), (7, [5.5, 4.7]), (8, [6.4, 3.5]), (9, [5.3, 2.5]), (10, [6.4, 3.4]), (11, [6.4, 3.2]); + +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, L2Distance(vec, reference_vec) +FROM tab_l2_f64 +ORDER BY L2Distance(vec, reference_vec) +LIMIT 3; + +EXPLAIN indexes = 1 +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, L2Distance(vec, reference_vec) +FROM tab_l2_f64 +ORDER BY L2Distance(vec, reference_vec) +LIMIT 3; + +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, L2Distance(vec, reference_vec) +FROM tab_l2_f32 +ORDER BY L2Distance(vec, reference_vec) +LIMIT 3; + +EXPLAIN indexes = 1 +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, L2Distance(vec, reference_vec) +FROM tab_l2_f32 +ORDER BY L2Distance(vec, reference_vec) +LIMIT 3; + +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, L2Distance(vec, reference_vec) +FROM tab_l2_f16 +ORDER BY L2Distance(vec, reference_vec) +LIMIT 3; + +EXPLAIN indexes = 1 +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, L2Distance(vec, reference_vec) +FROM tab_l2_f16 +ORDER BY L2Distance(vec, reference_vec) +LIMIT 3; + +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, L2Distance(vec, reference_vec) +FROM tab_l2_bf16 +ORDER BY L2Distance(vec, reference_vec) +LIMIT 3; + +EXPLAIN indexes = 1 +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, L2Distance(vec, reference_vec) +FROM tab_l2_bf16 +ORDER BY L2Distance(vec, reference_vec) +LIMIT 3; + +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, L2Distance(vec, reference_vec) +FROM tab_l2_i8 +ORDER BY L2Distance(vec, reference_vec) +LIMIT 3; + +EXPLAIN indexes = 1 +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, L2Distance(vec, reference_vec) +FROM tab_l2_i8 +ORDER BY L2Distance(vec, reference_vec) +LIMIT 3; + +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, cosineDistance(vec, reference_vec) +FROM tab_cos_f64 +ORDER BY cosineDistance(vec, reference_vec) +LIMIT 3; + +EXPLAIN indexes = 1 +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, cosineDistance(vec, reference_vec) +FROM tab_cos_f64 +ORDER BY cosineDistance(vec, reference_vec) +LIMIT 3; + +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, cosineDistance(vec, reference_vec) +FROM tab_cos_f32 +ORDER BY cosineDistance(vec, reference_vec) +LIMIT 3; + +EXPLAIN indexes = 1 +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, cosineDistance(vec, reference_vec) +FROM tab_cos_f32 +ORDER BY cosineDistance(vec, reference_vec) +LIMIT 3; + +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, cosineDistance(vec, reference_vec) +FROM tab_cos_f16 +ORDER BY cosineDistance(vec, reference_vec) +LIMIT 3; + +EXPLAIN indexes = 1 +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, cosineDistance(vec, reference_vec) +FROM tab_cos_f16 +ORDER BY cosineDistance(vec, reference_vec) +LIMIT 3; + +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, cosineDistance(vec, reference_vec) +FROM tab_cos_bf16 +ORDER BY cosineDistance(vec, reference_vec) +LIMIT 3; + +EXPLAIN indexes = 1 +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, cosineDistance(vec, reference_vec) +FROM tab_cos_bf16 +ORDER BY cosineDistance(vec, reference_vec) +LIMIT 3; + +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, cosineDistance(vec, reference_vec) +FROM tab_cos_i8 +ORDER BY cosineDistance(vec, reference_vec) +LIMIT 3; + +EXPLAIN indexes = 1 +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, cosineDistance(vec, reference_vec) +FROM tab_cos_i8 +ORDER BY cosineDistance(vec, reference_vec) +LIMIT 3; + +DROP TABLE tab_l2_f64; +DROP TABLE tab_l2_f32; +DROP TABLE tab_l2_f16; +DROP TABLE tab_l2_bf16; +DROP TABLE tab_l2_i8; +DROP TABLE tab_cos_f64; +DROP TABLE tab_cos_f32; +DROP TABLE tab_cos_f16; +DROP TABLE tab_cos_bf16; +DROP TABLE tab_cos_i8; + +SELECT '-- Index on Array(Float64) column'; +CREATE TABLE tab(id Int32, vec Array(Float64), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 2) GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; +INSERT INTO tab VALUES (0, [1.0, 0.0]), (1, [1.1, 0.0]), (2, [1.2, 0.0]), (3, [1.3, 0.0]), (4, [1.4, 0.0]), (5, [1.5, 0.0]), (6, [0.0, 2.0]), (7, [0.0, 2.1]), (8, [0.0, 2.2]), (9, [0.0, 2.3]), (10, [0.0, 2.4]), (11, [0.0, 2.5]); + +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, L2Distance(vec, reference_vec) +FROM tab +ORDER BY L2Distance(vec, reference_vec) +LIMIT 3; + +DROP TABLE tab; + +SELECT '-- Index on Array(BFloat16) column'; +CREATE TABLE tab(id Int32, vec Array(BFloat16), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 2) GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; +INSERT INTO tab VALUES (0, [1.0, 0.0]), (1, [1.1, 0.0]), (2, [1.2, 0.0]), (3, [1.3, 0.0]), (4, [1.4, 0.0]), (5, [1.5, 0.0]), (6, [0.0, 2.0]), (7, [0.0, 2.1]), (8, [0.0, 2.2]), (9, [0.0, 2.3]), (10, [0.0, 2.4]), (11, [0.0, 2.5]); + +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, L2Distance(vec, reference_vec) +FROM tab +ORDER BY L2Distance(vec, reference_vec) +LIMIT 3; + +DROP TABLE tab; diff --git a/parser/testdata/02354_vector_search_reference_vector_types/ast.json b/parser/testdata/02354_vector_search_reference_vector_types/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02354_vector_search_reference_vector_types/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02354_vector_search_reference_vector_types/metadata.json b/parser/testdata/02354_vector_search_reference_vector_types/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02354_vector_search_reference_vector_types/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02354_vector_search_reference_vector_types/query.sql b/parser/testdata/02354_vector_search_reference_vector_types/query.sql new file mode 100644 index 000000000..89bbc61a9 --- /dev/null +++ b/parser/testdata/02354_vector_search_reference_vector_types/query.sql @@ -0,0 +1,185 @@ +-- Tags: no-fasttest, no-ordinary-database, no-parallel +-- no-parallel: SQL functions are not per-database, they are global + +-- Tests that vector search queries work with reference vectors of different data types. + +SET enable_analyzer = 1; +SET parallel_replicas_local_plan = 1; -- this setting is randomized, set it explicitly to force local plan for parallel replicas + +DROP TABLE IF EXISTS tab_f64; +DROP TABLE IF EXISTS tab_f32; +DROP TABLE IF EXISTS tab_bf16; + +SELECT 'Create tables with vector similarity indexs on Float64, Float32 and BFloat16 columns'; + +CREATE TABLE tab_f64(id Int32, vec Array(Float64), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 2)) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 2; +INSERT INTO tab_f64 VALUES (0, [1.0, 0.0]), (1, [1.1, 0.0]), (2, [1.2, 0.0]), (3, [1.3, 0.0]), (4, [1.4, 0.0]), (5, [0.0, 2.0]), (6, [0.0, 2.1]), (7, [0.0, 2.2]), (8, [0.0, 2.3]), (9, [0.0, 2.4]); + +CREATE TABLE tab_f32(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 2)) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 2; +INSERT INTO tab_f32 VALUES (0, [1.0, 0.0]), (1, [1.1, 0.0]), (2, [1.2, 0.0]), (3, [1.3, 0.0]), (4, [1.4, 0.0]), (5, [0.0, 2.0]), (6, [0.0, 2.1]), (7, [0.0, 2.2]), (8, [0.0, 2.3]), (9, [0.0, 2.4]); + +CREATE TABLE tab_bf16(id Int32, vec Array(BFloat16), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 2)) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 2; +INSERT INTO tab_bf16 VALUES (0, [1.0, 0.0]), (1, [1.1, 0.0]), (2, [1.2, 0.0]), (3, [1.3, 0.0]), (4, [1.4, 0.0]), (5, [0.0, 2.0]), (6, [0.0, 2.1]), (7, [0.0, 2.2]), (8, [0.0, 2.3]), (9, [0.0, 2.4]); + +DROP FUNCTION IF EXISTS constF64; +CREATE FUNCTION constF64 AS () -> [toFloat64(0.0), toFloat64(2.0)]; + +DROP FUNCTION IF EXISTS constF32; +CREATE FUNCTION constF32 AS () -> [toFloat32(0.0), toFloat32(2.0)]; + +DROP FUNCTION IF EXISTS constBF16; +CREATE FUNCTION constBF16 AS () -> [toBFloat16(0.0), toBFloat16(2.0)]; + +SELECT 'Run all combinations of vector search queries: column type x reference vector type'; + +SELECT id +FROM tab_f64 +ORDER BY L2Distance(vec, constF64()) +LIMIT 1; + +SELECT id +FROM tab_f64 +ORDER BY L2Distance(vec, constF32()) +LIMIT 1; + +SELECT id +FROM tab_f64 +ORDER BY L2Distance(vec, constBF16()) +LIMIT 1; + +SELECT id +FROM tab_f32 +ORDER BY L2Distance(vec, constF64()) +LIMIT 1; + +SELECT id +FROM tab_f32 +ORDER BY L2Distance(vec, constF32()) +LIMIT 1; + +SELECT id +FROM tab_f32 +ORDER BY L2Distance(vec, constBF16()) +LIMIT 1; + +SELECT id +FROM tab_bf16 +ORDER BY L2Distance(vec, constF64()) +LIMIT 1; + +SELECT id +FROM tab_bf16 +ORDER BY L2Distance(vec, constF32()) +LIMIT 1; + +SELECT id +FROM tab_bf16 +ORDER BY L2Distance(vec, constBF16()) +LIMIT 1; + +SELECT 'Check that the index is used for all combinations of vector search queries: column type x reference vector type'; + +SELECT trimLeft(explain) AS explain FROM ( +EXPLAIN indexes = 1 +SELECT id +FROM tab_f64 +ORDER BY L2Distance(vec, constF64()) +LIMIT 1 +) +WHERE explain LIKE '%vector_similarity%'; + +SELECT trimLeft(explain) AS explain FROM ( +EXPLAIN indexes = 1 +SELECT id +FROM tab_f64 +ORDER BY L2Distance(vec, constF32()) +LIMIT 1 +) +WHERE explain LIKE '%vector_similarity%'; + +SELECT trimLeft(explain) AS explain FROM ( +EXPLAIN indexes = 1 +SELECT id +FROM tab_f64 +ORDER BY L2Distance(vec, constBF16()) +LIMIT 1 +) +WHERE explain LIKE '%vector_similarity%'; + +SELECT trimLeft(explain) AS explain FROM ( +EXPLAIN indexes = 1 +SELECT id +FROM tab_f32 +ORDER BY L2Distance(vec, constF64()) +LIMIT 1 +) +WHERE explain LIKE '%vector_similarity%'; + +SELECT trimLeft(explain) AS explain FROM ( +EXPLAIN indexes = 1 +SELECT id +FROM tab_f32 +ORDER BY L2Distance(vec, constF32()) +LIMIT 1 +) +WHERE explain LIKE '%vector_similarity%'; + +SELECT trimLeft(explain) AS explain FROM ( +EXPLAIN indexes = 1 +SELECT id +FROM tab_f32 +ORDER BY L2Distance(vec, constBF16()) +LIMIT 1 +) +WHERE explain LIKE '%vector_similarity%'; + +SELECT trimLeft(explain) AS explain FROM ( +EXPLAIN indexes = 1 +SELECT id +FROM tab_bf16 +ORDER BY L2Distance(vec, constF64()) +LIMIT 1 +) +WHERE explain LIKE '%vector_similarity%'; + +SELECT trimLeft(explain) AS explain FROM ( +EXPLAIN indexes = 1 +SELECT id +FROM tab_bf16 +ORDER BY L2Distance(vec, constF32()) +LIMIT 1 +) +WHERE explain LIKE '%vector_similarity%'; + +SELECT trimLeft(explain) AS explain FROM ( +EXPLAIN indexes = 1 +SELECT id +FROM tab_bf16 +ORDER BY L2Distance(vec, constBF16()) +LIMIT 1 +) +WHERE explain LIKE '%vector_similarity%'; + +DROP FUNCTION constF64; +DROP FUNCTION constF32; +DROP FUNCTION constBF16; + +SELECT 'Check that non-const reference vectors also work'; + +DROP FUNCTION IF EXISTS nonConstF32; +CREATE FUNCTION nonConstF32 AS (arg1) -> (SELECT [toFloat32((arg1 % 10)/10), toFloat32((arg1 % 10)/10)]); + +SELECT trimLeft(explain) AS explain FROM ( +EXPLAIN indexes = 1 +SELECT id +FROM tab_f32 +ORDER BY L2Distance(vec, nonConstF32(rand())) +LIMIT 1 +) +WHERE explain LIKE '%vector_similarity%'; +DROP FUNCTION nonConstF32; + +DROP TABLE tab_f64; +DROP TABLE tab_f32; +DROP TABLE tab_bf16; + diff --git a/parser/testdata/02354_vector_search_rescoring/ast.json b/parser/testdata/02354_vector_search_rescoring/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02354_vector_search_rescoring/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02354_vector_search_rescoring/metadata.json b/parser/testdata/02354_vector_search_rescoring/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02354_vector_search_rescoring/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02354_vector_search_rescoring/query.sql b/parser/testdata/02354_vector_search_rescoring/query.sql new file mode 100644 index 000000000..4fcd00137 --- /dev/null +++ b/parser/testdata/02354_vector_search_rescoring/query.sql @@ -0,0 +1,142 @@ +-- Tags: no-fasttest, no-ordinary-database, no-parallel-replicas +-- no-parallel-replicas: If parallel replicas are on, the optimization (no rescoring) may not work. + +-- Test for setting 'vector_search_with_rescoring' + +SET enable_analyzer = 1; +SET parallel_replicas_local_plan = 1; -- this setting is randomized, set it explicitly to force local plan for parallel replicas + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab(id Int32, vec Array(Float32), attr1 Int32, INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 2)) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 2; +INSERT INTO tab VALUES (0, [1.0, 0.0], 50), (1, [1.1, 0.0], 50), (2, [1.2, 0.0], 50), (3, [1.3, 0.0], 50), (4, [1.4, 0.0], 50), (5, [0.0, 2.0], 50), (6, [0.0, 2.1], 50), (7, [0.0, 2.2], 50), (8, [0.0, 2.3], 50), (9, [0.0, 2.4], 50); + +SELECT 'Test "SELECT id" without and with rescoring'; + +WITH [0.0, 2.0] AS reference_vec +SELECT id +FROM tab +ORDER BY L2Distance(vec, reference_vec) +LIMIT 3 +SETTINGS vector_search_with_rescoring = 0; + +SELECT '-- Expect column "_distance" in EXPLAIN. Column "vec" is not expected for ReadFromMergeTree.'; +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN header = 1 + WITH [0.0, 2.0] AS reference_vec + SELECT id + FROM tab + ORDER BY L2Distance(vec, reference_vec) + LIMIT 3 + SETTINGS vector_search_with_rescoring = 0) +WHERE (explain LIKE '%_distance%' OR explain LIKE '%vec%Array%') AND explain NOT LIKE '%L2Distance%'; + +WITH [0.0, 2.0] AS reference_vec +SELECT id +FROM tab +ORDER BY L2Distance(vec, reference_vec) +LIMIT 3 +SETTINGS vector_search_with_rescoring = 1; + +SELECT '-- Dont expect column "_distance" in EXPLAIN.'; +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN header = 1 + WITH [0.0, 2.0] AS reference_vec + SELECT id + FROM tab + ORDER BY L2Distance(vec, reference_vec) + LIMIT 3 + SETTINGS vector_search_with_rescoring = 1) +WHERE (explain LIKE '%_distance%'); + +SELECT 'Test "SELECT id, vec" without and with rescoring'; + +-- SELECTing vec explicitly disables the optimization +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec +FROM tab +ORDER BY L2Distance(vec, reference_vec) +LIMIT 3 +SETTINGS vector_search_with_rescoring = 0; + +SELECT '-- Dont expect column "_distance" in EXPLAIN.'; +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN header = 1 + WITH [0.0, 2.0] AS reference_vec + SELECT id, vec + FROM tab + ORDER BY L2Distance(vec, reference_vec) + LIMIT 3 + SETTINGS vector_search_with_rescoring = 0) +WHERE (explain LIKE '%_distance%'); + +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec +FROM tab +ORDER BY L2Distance(vec, reference_vec) +LIMIT 3 +SETTINGS vector_search_with_rescoring = 1; + +SELECT '-- Dont expect column "_distance" in EXPLAIN.'; +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN header = 1 + WITH [0.0, 2.0] AS reference_vec + SELECT id, vec + FROM tab + ORDER BY L2Distance(vec, reference_vec) + LIMIT 3 + SETTINGS vector_search_with_rescoring = 1) +WHERE (explain LIKE '%_distance%'); + +SELECT 'Test optimization in the presence of other predicates'; + +-- Output will be 0,1,2 +WITH [1.0, 0.0] AS reference_vec +SELECT id +FROM tab +WHERE id <= 3 +ORDER BY L2Distance(vec, reference_vec) +LIMIT 3 +SETTINGS vector_search_with_rescoring = 0; + +-- Since filter will select partial ranges from part, brute-force search will select 4,5,6 +WITH [1.0, 0.0] AS reference_vec +SELECT id, attr1 +FROM tab +WHERE id > 3 +ORDER BY L2Distance(vec, reference_vec) +LIMIT 3 +SETTINGS vector_search_with_rescoring = 0; + +SELECT 'Test for filter that selects full part, optimization will take effect'; + +SELECT '-- Expect column "_distance" in EXPLAIN. Column "vec" is not expected for ReadFromMergeTree.'; +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN header = 1 + WITH [0.0, 2.0] AS reference_vec + SELECT id + FROM tab + WHERE id >= 0 + ORDER BY L2Distance(vec, reference_vec) + LIMIT 5 + SETTINGS vector_search_with_rescoring = 0) +WHERE (explain LIKE '%_distance%' OR explain LIKE '%vec%Array%') AND explain NOT LIKE '%L2Distance%'; + +-- Output will be 5,6,7,8,9 +WITH [0.0, 2.0] AS reference_vec +SELECT id +FROM tab +WHERE id >= 0 +ORDER BY L2Distance(vec, reference_vec) +LIMIT 5 +SETTINGS vector_search_with_rescoring = 0; + +SELECT 'Predicate on non-PK attribute'; +-- Output will be 0,1,2,3,4 +WITH [1.0, 0.0] AS reference_vec +SELECT id, attr1 +FROM tab +WHERE attr1 >= 50 +ORDER BY L2Distance(vec, reference_vec) +LIMIT 5 +SETTINGS vector_search_with_rescoring = 0; diff --git a/parser/testdata/02354_vector_search_rescoring_and_prewhere/ast.json b/parser/testdata/02354_vector_search_rescoring_and_prewhere/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02354_vector_search_rescoring_and_prewhere/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02354_vector_search_rescoring_and_prewhere/metadata.json b/parser/testdata/02354_vector_search_rescoring_and_prewhere/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02354_vector_search_rescoring_and_prewhere/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02354_vector_search_rescoring_and_prewhere/query.sql b/parser/testdata/02354_vector_search_rescoring_and_prewhere/query.sql new file mode 100644 index 000000000..4583f7a51 --- /dev/null +++ b/parser/testdata/02354_vector_search_rescoring_and_prewhere/query.sql @@ -0,0 +1,130 @@ +-- Tags: no-fasttest, no-ordinary-database, no-parallel-replicas + +-- Test for setting 'vector_search_with_rescoring' with filters. + +SET enable_analyzer = 1; +SET parallel_replicas_local_plan = 1; -- this setting is randomized, set it explicitly to force local plan for parallel replicas + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab ( + id Int32, + attr1 Int32, + attr2 Int32, + vec Array(Float32), + INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 2), +) +ENGINE = MergeTree +ORDER BY id +SETTINGS index_granularity = 2; + +INSERT INTO tab VALUES + (0, 100, 0, [0.311, 0.311]), + (1, 101, 100, [0.236, 0.236]), + (2, 102, 200, [0.97, 0.97]), + (3, 103, 300, [0.369, 0.369]), + (4, 104, 400, [0.593, 0.593]), + (5, 105, 500, [0.276, 0.276]), + (6, 106, 600, [0.58, 0.58]), + (7, 107, 700, [0.197, 0.197]), + (8, 108, 800, [0.134, 0.134]), + (9, 109, 900, [0.484, 0.484]), + (10, 110, 1000, [0.945, 0.945]), + (11, 111, 1100, [0.406, 0.406]), + (12, 112, 1200, [0.105, 0.105]), + (13, 113, 1300, [0.635, 0.635]), + (14, 114, 1400, [0.94, 0.94]), + (15, 115, 1500, [0.655, 0.655]), + (16, 116, 1600, [0.252, 0.252]), + (17, 117, 1700, [0.737, 0.737]), + (18, 118, 1800, [0.612, 0.612]), + (19, 119, 1900, [0.217, 0.217]); + +SELECT 'Reference results without filters'; + +SELECT id, attr1, attr2, vec +FROM tab +ORDER BY L2Distance(vec, [0.2, 0.3]); + +SELECT 'Ensure rescoring optimization works with enabled and disabled PREWHERE'; +-- Expect IDs 16 & 19 for next 2 queries + +SELECT id +FROM tab +WHERE attr1 > 110 +ORDER BY L2Distance(vec, [0.2, 0.3]) +LIMIT 4 +SETTINGS query_plan_optimize_prewhere = 0, + optimize_move_to_prewhere = 0; + +SELECT id +FROM tab +WHERE attr1 > 110 +ORDER BY L2Distance(vec, [0.2, 0.3]) +LIMIT 4 +SETTINGS query_plan_optimize_prewhere = 1, + optimize_move_to_prewhere = 1; + +SELECT 'Test with enabled rescoring'; +-- Expect 16 & 19, and additionally 18 and 17 because they are in the same granules + +SELECT id +FROM tab +WHERE attr1 > 110 +ORDER BY L2Distance(vec, [0.2, 0.3]) +LIMIT 4 +SETTINGS vector_search_with_rescoring = 1; + +SELECT 'With enabled rescoring and post-filter multiplier = 3, search quality will be slightly different (better)'; +SELECT id +FROM tab +WHERE attr1 > 110 +ORDER BY L2Distance(vec, [0.2, 0.3]) +LIMIT 4 +SETTINGS vector_search_with_rescoring = 1, + vector_search_index_fetch_multiplier = 3; + +SELECT 'Check that explicit PREWHERE disables the optimization'; +-- Expect no _distance column in result +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN header = 1 + SELECT id + FROM tab + PREWHERE attr1 > 110 + ORDER BY L2Distance(vec, [0.2, 0.3]) + LIMIT 4 + ) +WHERE (explain LIKE '%_distance%'); + +SELECT 'Query with explicit PREWHERE works'; +SELECT id +FROM tab +PREWHERE attr1 > 110 +ORDER BY L2Distance(vec, [0.2, 0.3]) +LIMIT 4; + +SELECT 'Select all 20 neighbours with the rescoring optimization, distances got from vector index'; +SELECT id, attr1, attr2 +FROM tab +ORDER BY L2Distance(vec, [0.2, 0.3]) +LIMIT 20; + +SELECT 'Ensure that optimization was effective for above query, _distance should be seen'; +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN header = 1 + SELECT id, attr1, attr2, L2Distance(vec, [0.2, 0.3]), + FROM tab + ORDER BY L2Distance(vec, [0.2, 0.3]) + LIMIT 20 + ) +WHERE (explain LIKE '%_distance%'); + +SELECT 'Just a test with 2 predicates'; +SELECT 'id 16 & 19 will be again output'; +SELECT id +FROM tab +WHERE attr1 > 110 AND attr2 > 50 +ORDER BY L2Distance(vec, [0.2, 0.3]) +LIMIT 4; + +DROP TABLE tab; diff --git a/parser/testdata/02354_vector_search_rescoring_distance_in_select_list/ast.json b/parser/testdata/02354_vector_search_rescoring_distance_in_select_list/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02354_vector_search_rescoring_distance_in_select_list/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02354_vector_search_rescoring_distance_in_select_list/metadata.json b/parser/testdata/02354_vector_search_rescoring_distance_in_select_list/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02354_vector_search_rescoring_distance_in_select_list/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02354_vector_search_rescoring_distance_in_select_list/query.sql b/parser/testdata/02354_vector_search_rescoring_distance_in_select_list/query.sql new file mode 100644 index 000000000..c4aa1debd --- /dev/null +++ b/parser/testdata/02354_vector_search_rescoring_distance_in_select_list/query.sql @@ -0,0 +1,111 @@ +-- Tags: no-fasttest, no-ordinary-database, no-parallel-replicas +--- no-parallel-replicas: Because the test records and verifies +--- _distance values returned from the rescoring optimization. + +-- Issue #85514 +-- +-- Tests that the rescoring optimization works when distance function is +-- present explicitly in the SELECT columns list, apart from ORDER BY. +-- The return type of the cosineDistance/L2Distance function will vary +-- based on the data type of the 2 input arguments. + +SET enable_analyzer = 1; +SET vector_search_with_rescoring = 0; + +SELECT 'Create tables with Array(Float32) and Array(BFloat16) column'; + +DROP TABLE IF EXISTS tab_f32; +DROP TABLE IF EXISTS tab_bf16; + +CREATE TABLE tab_f32 +( + id Int32, + vec Array(Float32), + INDEX vector_index vec TYPE vector_similarity('hnsw', 'L2Distance', 2) +) +ENGINE = MergeTree +ORDER BY id +SETTINGS index_granularity = 1; + +CREATE TABLE tab_bf16 +( + id Int32, + vec Array(BFloat16), + INDEX vector_index vec TYPE vector_similarity('hnsw', 'L2Distance', 2) +) +ENGINE = MergeTree +ORDER BY id +SETTINGS index_granularity = 1; + +INSERT INTO tab_f32 VALUES (0, [1.0, 0.0]), + (1, [1.1, 0.0]), + (2, [1.2, 0.0]), + (3, [1.3, 0.0]), + (4, [1.4, 0.0]), + (5, [0.0, 2.0]), + (6, [0.0, 2.1]), + (7, [0.0, 2.2]), + (8, [0.0, 2.3]), + (9, [0.0, 2.4]); + +INSERT INTO tab_bf16 VALUES (0, [1.0, 0.0]), + (1, [1.1, 0.0]), + (2, [1.2, 0.0]), + (3, [1.3, 0.0]), + (4, [1.4, 0.0]), + (5, [0.0, 2.0]), + (6, [0.0, 2.1]), + (7, [0.0, 2.2]), + (8, [0.0, 2.3]), + (9, [0.0, 2.4]); + +-- The nearest neighbours to [0.0, 0.2] are 5,6,7,8 + +SELECT 'Column: Array(Float32)'; + +SELECT '-- Search vector: Array(Float64)'; +WITH CAST([0.0, 2.0] AS Array(Float64)) AS reference_vec +SELECT id, L2Distance(vec, reference_vec) +FROM tab_f32 +ORDER BY L2Distance(vec, reference_vec) +LIMIT 4; + +SELECT '-- Search vector: Array(Float32)'; +WITH CAST([0.0, 2.0] AS Array(BFloat16)) AS reference_vec +SELECT id, L2Distance(vec, reference_vec) +FROM tab_f32 +ORDER BY L2Distance(vec, reference_vec) +LIMIT 4; + +SELECT '-- Search vector: Array(BFloat16)'; +WITH CAST([0.0, 2.0] AS Array(BFloat16)) AS reference_vec +SELECT id, L2Distance(vec, reference_vec) +FROM tab_f32 +ORDER BY L2Distance(vec, reference_vec) +LIMIT 4; + +SELECT 'Column: Array(BFloat16)'; + +SELECT '-- Search vector: Array(Float64)'; +WITH [0.0, 2.0] AS reference_vec +SELECT id, L2Distance(vec, reference_vec) +FROM tab_bf16 +ORDER BY L2Distance(vec, reference_vec) +LIMIT 4; + +SELECT '-- Search vector: Array(Float32)'; +WITH CAST([0.0, 2.0] AS Array(BFloat16)) AS reference_vec +SELECT id, L2Distance(vec, reference_vec) +FROM tab_bf16 +ORDER BY L2Distance(vec, reference_vec) +LIMIT 4; + +SELECT '-- Search vector: Array(BFloat16)'; +WITH CAST([0.0, 2.0] AS Array(Float32)) AS reference_vec +SELECT id, L2Distance(vec, reference_vec) +FROM tab_bf16 +ORDER BY L2Distance(vec, reference_vec) +LIMIT 4; + +DROP TABLE tab_f32; +DROP TABLE tab_bf16; diff --git a/parser/testdata/02354_vector_search_subquery/ast.json b/parser/testdata/02354_vector_search_subquery/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02354_vector_search_subquery/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02354_vector_search_subquery/metadata.json b/parser/testdata/02354_vector_search_subquery/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02354_vector_search_subquery/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02354_vector_search_subquery/query.sql b/parser/testdata/02354_vector_search_subquery/query.sql new file mode 100644 index 000000000..85d8b1a75 --- /dev/null +++ b/parser/testdata/02354_vector_search_subquery/query.sql @@ -0,0 +1,51 @@ +-- Tags: no-fasttest, no-ordinary-database + +SET enable_analyzer = 1; -- analyzer vs. non-analyzer produce slightly different EXPLAIN + +-- Reference vector for vector search is computed by a subquery (issue #69085) + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'cosineDistance', 2, 'f16', 0, 0) GRANULARITY 2) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3; +INSERT INTO tab VALUES (0, [4.6, 2.3]), (1, [2.0, 3.2]), (2, [4.2, 3.4]), (3, [5.3, 2.9]), (4, [2.4, 5.2]), (5, [5.3, 2.3]), (6, [1.0, 9.3]), (7, [5.5, 4.7]), (8, [6.4, 3.5]), (9, [5.3, 2.5]), (10, [6.4, 3.4]), (11, [6.4, 3.2]); + +-- this works +EXPLAIN indexes = 1 +WITH [0., 2.] AS reference_vec +SELECT + id, + vec, + cosineDistance(vec, reference_vec) AS distance +FROM tab +ORDER BY distance +LIMIT 1; + +-- this also work +EXPLAIN indexes = 1 +WITH ( + SELECT vec + FROM tab + LIMIT 1 +) AS reference_vec +SELECT + id, + vec, + cosineDistance(vec, reference_vec) AS distance +FROM tab +ORDER BY distance +LIMIT 1; + +-- and this works as well +EXPLAIN indexes = 1 +WITH ( + SELECT [0., 2.] +) AS reference_vec +SELECT + id, + vec, + cosineDistance(vec, reference_vec) AS distance +FROM tab +ORDER BY distance +LIMIT 1; + +DROP TABLE tab; diff --git a/parser/testdata/02354_vector_search_unquoted_index_parameters/ast.json b/parser/testdata/02354_vector_search_unquoted_index_parameters/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02354_vector_search_unquoted_index_parameters/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02354_vector_search_unquoted_index_parameters/metadata.json b/parser/testdata/02354_vector_search_unquoted_index_parameters/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02354_vector_search_unquoted_index_parameters/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02354_vector_search_unquoted_index_parameters/query.sql b/parser/testdata/02354_vector_search_unquoted_index_parameters/query.sql new file mode 100644 index 000000000..d039d24f1 --- /dev/null +++ b/parser/testdata/02354_vector_search_unquoted_index_parameters/query.sql @@ -0,0 +1,21 @@ +-- Tags: no-fasttest, no-ordinary-database + +-- Tests that quoted and unquoted parameters can be passed to vector search indexes. + +DROP TABLE IF EXISTS tab1; +DROP TABLE IF EXISTS tab2; + +CREATE TABLE tab1 (id Int32, vec Array(Float32), PRIMARY KEY id, INDEX vec_idx(vec) TYPE vector_similarity('hnsw', 'L2Distance', 1)); +CREATE TABLE tab2 (id Int32, vec Array(Float32), PRIMARY KEY id, INDEX vec_idx(vec) TYPE vector_similarity(hnsw, L2Distance, 1)); + +DROP TABLE tab1; +DROP TABLE tab2; + +CREATE TABLE tab1 (id Int32, vec Array(Float32), PRIMARY KEY id); +CREATE TABLE tab2 (id Int32, vec Array(Float32), PRIMARY KEY id); + +ALTER TABLE tab1 ADD INDEX idx1(vec) TYPE vector_similarity('hnsw', 'L2Distance', 1); +ALTER TABLE tab2 ADD INDEX idx2(vec) TYPE vector_similarity(hnsw, L2Distance, 1); + +DROP TABLE tab1; +DROP TABLE tab2; diff --git a/parser/testdata/02354_vector_search_vector_similarity_index_cache/ast.json b/parser/testdata/02354_vector_search_vector_similarity_index_cache/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02354_vector_search_vector_similarity_index_cache/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02354_vector_search_vector_similarity_index_cache/metadata.json b/parser/testdata/02354_vector_search_vector_similarity_index_cache/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02354_vector_search_vector_similarity_index_cache/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02354_vector_search_vector_similarity_index_cache/query.sql b/parser/testdata/02354_vector_search_vector_similarity_index_cache/query.sql new file mode 100644 index 000000000..230dad0c4 --- /dev/null +++ b/parser/testdata/02354_vector_search_vector_similarity_index_cache/query.sql @@ -0,0 +1,33 @@ +-- Tags: no-parallel, no-fasttest, no-ordinary-database +-- no-parallel: looks at server-wide metrics + +-- Tests the vector index cache. + +SET parallel_replicas_local_plan = 1; + +SYSTEM DROP VECTOR SIMILARITY INDEX CACHE; +SELECT metric, value FROM system.metrics WHERE metric = 'VectorSimilarityIndexCacheBytes'; + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab(id Int32, vec Array(Float32), INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 2)) ENGINE = MergeTree ORDER BY id; +INSERT INTO tab VALUES (0, [1.0, 0.0]), (1, [1.1, 0.0]), (2, [1.2, 0.0]), (3, [1.3, 0.0]), (4, [1.4, 0.0]), (5, [1.5, 0.0]), (6, [0.0, 2.0]), (7, [0.0, 2.1]), (8, [0.0, 2.2]), (9, [0.0, 2.3]), (10, [0.0, 2.4]), (11, [0.0, 2.5]); + +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, L2Distance(vec, reference_vec) +FROM tab +ORDER BY L2Distance(vec, reference_vec) +LIMIT 3; + +WITH [0.0, 2.0] AS reference_vec +SELECT id, vec, L2Distance(vec, reference_vec) +FROM tab +ORDER BY L2Distance(vec, reference_vec) +LIMIT 3; + +SYSTEM FLUSH LOGS query_log; + +SELECT ProfileEvents['VectorSimilarityIndexCacheHits'], ProfileEvents['VectorSimilarityIndexCacheMisses'] +FROM system.query_log +WHERE event_date >= yesterday() AND current_database = currentDatabase() AND type = 'QueryFinish' AND query LIKE '%ORDER BY L2Distance%' +ORDER BY event_time_microseconds; diff --git a/parser/testdata/02354_vector_search_with_huge_dimension/ast.json b/parser/testdata/02354_vector_search_with_huge_dimension/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02354_vector_search_with_huge_dimension/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02354_vector_search_with_huge_dimension/metadata.json b/parser/testdata/02354_vector_search_with_huge_dimension/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02354_vector_search_with_huge_dimension/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02354_vector_search_with_huge_dimension/query.sql b/parser/testdata/02354_vector_search_with_huge_dimension/query.sql new file mode 100644 index 000000000..a27e81763 --- /dev/null +++ b/parser/testdata/02354_vector_search_with_huge_dimension/query.sql @@ -0,0 +1,41 @@ +-- Tags: no-fasttest, no-ordinary-database + +-- Tests vector search over vectors with a huge dimension (32k) + +SET parallel_replicas_local_plan = 1; + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab( + id Int32, + attr1 Int32, + vec Array(Float32), + INDEX idx vec TYPE vector_similarity('hnsw', 'L2Distance', 32768)) +ENGINE = MergeTree +ORDER BY id +SETTINGS index_granularity = 2; + +INSERT INTO tab + SELECT number, + number * 100, + CAST(arrayWithConstant(32768, number / 10) as Array(Float32)) + FROM numbers(10); + +SELECT '-- Plan must contain vector index usage'; +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes = 1 + SELECT id + FROM tab + ORDER BY L2Distance(vec, arrayWithConstant(32768, 0.2)) + LIMIT 3 +) +WHERE explain ILIKE '%Skip%' OR explain ILIKE '%Name: idx%' OR explain ILIKE '%vector_similarity%'; + +SELECT '-- Run vector search'; +-- Nearest vectors to [0.9,0.9...,0.9] are [0.9,...], [0.8,...], [0.7,...] +SELECT id +FROM tab +ORDER BY L2Distance(vec, arrayWithConstant(32768, 0.9)) +LIMIT 3; + +DROP TABLE tab; diff --git a/parser/testdata/02354_window_expression_with_aggregation_expression/ast.json b/parser/testdata/02354_window_expression_with_aggregation_expression/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02354_window_expression_with_aggregation_expression/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02354_window_expression_with_aggregation_expression/metadata.json b/parser/testdata/02354_window_expression_with_aggregation_expression/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02354_window_expression_with_aggregation_expression/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02354_window_expression_with_aggregation_expression/query.sql b/parser/testdata/02354_window_expression_with_aggregation_expression/query.sql new file mode 100644 index 000000000..21da3c5f4 --- /dev/null +++ b/parser/testdata/02354_window_expression_with_aggregation_expression/query.sql @@ -0,0 +1,16 @@ +SELECT + sum(a)*100/sum(sum(a)) OVER (PARTITION BY b) AS r +FROM +( + SELECT 1 AS a, 2 AS b + UNION ALL + SELECT 3 AS a, 4 AS b + UNION ALL + SELECT 5 AS a, 2 AS b + +) AS t +GROUP BY b; + +-- { echoOn } +SELECT arrayMap(x -> (x + 1), groupArray(number) OVER ()) AS result +FROM numbers(10); diff --git a/parser/testdata/02354_with_statement_non_exist_column/ast.json b/parser/testdata/02354_with_statement_non_exist_column/ast.json new file mode 100644 index 000000000..93b3655cf --- /dev/null +++ b/parser/testdata/02354_with_statement_non_exist_column/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.002089941, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/02354_with_statement_non_exist_column/metadata.json b/parser/testdata/02354_with_statement_non_exist_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02354_with_statement_non_exist_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02354_with_statement_non_exist_column/query.sql b/parser/testdata/02354_with_statement_non_exist_column/query.sql new file mode 100644 index 000000000..869c335b6 --- /dev/null +++ b/parser/testdata/02354_with_statement_non_exist_column/query.sql @@ -0,0 +1,7 @@ +DROP TEMPORARY TABLE IF EXISTS t1; +DROP TEMPORARY TABLE IF EXISTS t2; + +CREATE TEMPORARY TABLE t1 (a Int64); +CREATE TEMPORARY TABLE t2 (a Int64, b Int64); + +WITH b AS bb SELECT bb FROM t2 WHERE a IN (SELECT a FROM t1); diff --git a/parser/testdata/02355_column_type_name_lc/ast.json b/parser/testdata/02355_column_type_name_lc/ast.json new file mode 100644 index 000000000..fc22cac8f --- /dev/null +++ b/parser/testdata/02355_column_type_name_lc/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toColumnTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toLowCardinality (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'foo'" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001461298, + "rows_read": 11, + "bytes_read": 457 + } +} diff --git a/parser/testdata/02355_column_type_name_lc/metadata.json b/parser/testdata/02355_column_type_name_lc/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02355_column_type_name_lc/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02355_column_type_name_lc/query.sql b/parser/testdata/02355_column_type_name_lc/query.sql new file mode 100644 index 000000000..13a6393aa --- /dev/null +++ b/parser/testdata/02355_column_type_name_lc/query.sql @@ -0,0 +1 @@ +SELECT toColumnTypeName(toLowCardinality(materialize('foo'))); diff --git a/parser/testdata/02355_control_block_size_in_aggregator/ast.json b/parser/testdata/02355_control_block_size_in_aggregator/ast.json new file mode 100644 index 000000000..bc6b82fa7 --- /dev/null +++ b/parser/testdata/02355_control_block_size_in_aggregator/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00132065, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02355_control_block_size_in_aggregator/metadata.json b/parser/testdata/02355_control_block_size_in_aggregator/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02355_control_block_size_in_aggregator/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02355_control_block_size_in_aggregator/query.sql b/parser/testdata/02355_control_block_size_in_aggregator/query.sql new file mode 100644 index 000000000..f9f9661a7 --- /dev/null +++ b/parser/testdata/02355_control_block_size_in_aggregator/query.sql @@ -0,0 +1,10 @@ +SET max_block_size = 4213; + +--- We allocate space for one more row in case nullKeyData is present. +SELECT DISTINCT (blockSize() <= 4214) +FROM +( + SELECT number + FROM numbers(100000) + GROUP BY number +); diff --git a/parser/testdata/02355_control_block_size_in_array_join/ast.json b/parser/testdata/02355_control_block_size_in_array_join/ast.json new file mode 100644 index 000000000..4d783e024 --- /dev/null +++ b/parser/testdata/02355_control_block_size_in_array_join/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001179157, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02355_control_block_size_in_array_join/metadata.json b/parser/testdata/02355_control_block_size_in_array_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02355_control_block_size_in_array_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02355_control_block_size_in_array_join/query.sql b/parser/testdata/02355_control_block_size_in_array_join/query.sql new file mode 100644 index 000000000..f094a6126 --- /dev/null +++ b/parser/testdata/02355_control_block_size_in_array_join/query.sql @@ -0,0 +1,13 @@ +SET max_block_size = 8192; + +SELECT DISTINCT blockSize() <= 8192 +FROM +( + SELECT n + FROM + ( + SELECT range(0, rand() % 10) AS x + FROM numbers(1000000) + ) + LEFT ARRAY JOIN x AS n +) diff --git a/parser/testdata/02356_insert_query_log_metrics/ast.json b/parser/testdata/02356_insert_query_log_metrics/ast.json new file mode 100644 index 000000000..5e3f3655a --- /dev/null +++ b/parser/testdata/02356_insert_query_log_metrics/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery 02356_destination (children 3)" + }, + { + "explain": " Identifier 02356_destination" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration a (children 1)" + }, + { + "explain": " DataType Int64" + }, + { + "explain": " ColumnDeclaration b (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function Memory" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001380839, + "rows_read": 10, + "bytes_read": 368 + } +} diff --git a/parser/testdata/02356_insert_query_log_metrics/metadata.json b/parser/testdata/02356_insert_query_log_metrics/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02356_insert_query_log_metrics/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02356_insert_query_log_metrics/query.sql b/parser/testdata/02356_insert_query_log_metrics/query.sql new file mode 100644 index 000000000..e43fba239 --- /dev/null +++ b/parser/testdata/02356_insert_query_log_metrics/query.sql @@ -0,0 +1,5 @@ +CREATE TABLE 02356_destination (a Int64, b String) ENGINE = Memory; + +INSERT INTO 02356_destination (a, b) SELECT * FROM generateRandom('a Int64, b String') LIMIT 100 SETTINGS max_threads=1, max_block_size=100; +SYSTEM FLUSH LOGS query_log; +SELECT read_rows = written_rows, read_rows = result_rows, read_bytes = written_bytes, read_bytes = result_bytes FROM system.query_log where normalized_query_hash = 1214411238725380014 and type='QueryFinish' and current_database = currentDatabase() FORMAT CSV; \ No newline at end of file diff --git a/parser/testdata/02356_trivial_count_with_empty_set/ast.json b/parser/testdata/02356_trivial_count_with_empty_set/ast.json new file mode 100644 index 000000000..9f5de6111 --- /dev/null +++ b/parser/testdata/02356_trivial_count_with_empty_set/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001186471, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/02356_trivial_count_with_empty_set/metadata.json b/parser/testdata/02356_trivial_count_with_empty_set/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02356_trivial_count_with_empty_set/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02356_trivial_count_with_empty_set/query.sql b/parser/testdata/02356_trivial_count_with_empty_set/query.sql new file mode 100644 index 000000000..89630d1aa --- /dev/null +++ b/parser/testdata/02356_trivial_count_with_empty_set/query.sql @@ -0,0 +1,9 @@ +drop table if exists test; + +create table test(a Int64) Engine=MergeTree order by tuple(); + +set optimize_trivial_count_query=1, empty_result_for_aggregation_by_empty_set=1; + +select count() from test; + +drop table test; diff --git a/parser/testdata/02357_file_default_value/ast.json b/parser/testdata/02357_file_default_value/ast.json new file mode 100644 index 000000000..2f7b6b759 --- /dev/null +++ b/parser/testdata/02357_file_default_value/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function file (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'nonexistent.txt'" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001427963, + "rows_read": 7, + "bytes_read": 266 + } +} diff --git a/parser/testdata/02357_file_default_value/metadata.json b/parser/testdata/02357_file_default_value/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02357_file_default_value/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02357_file_default_value/query.sql b/parser/testdata/02357_file_default_value/query.sql new file mode 100644 index 000000000..070b868c7 --- /dev/null +++ b/parser/testdata/02357_file_default_value/query.sql @@ -0,0 +1,3 @@ +SELECT file('nonexistent.txt'); -- { serverError FILE_DOESNT_EXIST } +SELECT file('nonexistent.txt', 'default'); +SELECT file('nonexistent.txt', NULL); diff --git a/parser/testdata/02360_small_notation_h_for_hour_interval/ast.json b/parser/testdata/02360_small_notation_h_for_hour_interval/ast.json new file mode 100644 index 000000000..9c89a7186 --- /dev/null +++ b/parser/testdata/02360_small_notation_h_for_hour_interval/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function dateDiff (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal 'h'" + }, + { + "explain": " Function toDateTime (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '2018-01-01 22:00:00'" + }, + { + "explain": " Function toDateTime (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '2018-01-02 23:00:00'" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001706259, + "rows_read": 13, + "bytes_read": 522 + } +} diff --git a/parser/testdata/02360_small_notation_h_for_hour_interval/metadata.json b/parser/testdata/02360_small_notation_h_for_hour_interval/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02360_small_notation_h_for_hour_interval/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02360_small_notation_h_for_hour_interval/query.sql b/parser/testdata/02360_small_notation_h_for_hour_interval/query.sql new file mode 100644 index 000000000..f7fa762b6 --- /dev/null +++ b/parser/testdata/02360_small_notation_h_for_hour_interval/query.sql @@ -0,0 +1,2 @@ +SELECT dateDiff('h', toDateTime('2018-01-01 22:00:00'), toDateTime('2018-01-02 23:00:00')); +SELECT toDateTime('2018-01-01 22:00:00') + INTERVAL 4 h diff --git a/parser/testdata/02362_part_log_merge_algorithm/ast.json b/parser/testdata/02362_part_log_merge_algorithm/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02362_part_log_merge_algorithm/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02362_part_log_merge_algorithm/metadata.json b/parser/testdata/02362_part_log_merge_algorithm/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02362_part_log_merge_algorithm/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02362_part_log_merge_algorithm/query.sql b/parser/testdata/02362_part_log_merge_algorithm/query.sql new file mode 100644 index 000000000..231451f8f --- /dev/null +++ b/parser/testdata/02362_part_log_merge_algorithm/query.sql @@ -0,0 +1,28 @@ +CREATE TABLE data_horizontal ( + key Int +) +Engine=MergeTree() +ORDER BY key +SETTINGS old_parts_lifetime = 600, vertical_merge_algorithm_min_rows_to_activate = 100000000; + +INSERT INTO data_horizontal VALUES (1); +OPTIMIZE TABLE data_horizontal FINAL; +SYSTEM FLUSH LOGS part_log; +SELECT table, part_name, event_type, merge_algorithm FROM system.part_log WHERE event_date >= yesterday() AND database = currentDatabase() AND table = 'data_horizontal' AND event_type IN ('NewPart', 'MergeParts') ORDER BY event_time_microseconds; + +CREATE TABLE data_vertical +( + key UInt64, + value String +) +ENGINE = MergeTree() +ORDER BY key +SETTINGS index_granularity_bytes = 0, enable_mixed_granularity_parts = 0, min_bytes_for_wide_part = 0, +vertical_merge_algorithm_min_rows_to_activate = 1, vertical_merge_algorithm_min_columns_to_activate = 1, +old_parts_lifetime = 600; + +INSERT INTO data_vertical VALUES (1, '1'); +INSERT INTO data_vertical VALUES (2, '2'); +OPTIMIZE TABLE data_vertical FINAL; +SYSTEM FLUSH LOGS part_log; +SELECT table, part_name, event_type, merge_algorithm FROM system.part_log WHERE event_date >= yesterday() AND database = currentDatabase() AND table = 'data_vertical' AND event_type IN ('NewPart', 'MergeParts') ORDER BY event_time_microseconds; diff --git a/parser/testdata/02363_mapupdate_improve/ast.json b/parser/testdata/02363_mapupdate_improve/ast.json new file mode 100644 index 000000000..c53589e2c --- /dev/null +++ b/parser/testdata/02363_mapupdate_improve/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery map_test (children 1)" + }, + { + "explain": " Identifier map_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001462241, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/02363_mapupdate_improve/metadata.json b/parser/testdata/02363_mapupdate_improve/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02363_mapupdate_improve/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02363_mapupdate_improve/query.sql b/parser/testdata/02363_mapupdate_improve/query.sql new file mode 100644 index 000000000..c3cd8fff9 --- /dev/null +++ b/parser/testdata/02363_mapupdate_improve/query.sql @@ -0,0 +1,10 @@ +DROP TABLE IF EXISTS map_test; +CREATE TABLE map_test(`tags` Map(String, String)) ENGINE = MergeTree PRIMARY KEY tags ORDER BY tags SETTINGS index_granularity = 8192; +INSERT INTO map_test (tags) VALUES (map('fruit','apple','color','red')); +INSERT INTO map_test (tags) VALUES (map('fruit','apple','color','red')); +INSERT INTO map_test (tags) VALUES (map('fruit','apple','color','red')); +INSERT INTO map_test (tags) VALUES (map('fruit','apple','color','red')); +INSERT INTO map_test (tags) VALUES (map('fruit','apple','color','red')); +SELECT mapUpdate(mapFilter((k, v) -> (k in ('fruit')), tags), map('season', 'autumn')) FROM map_test; +SELECT mapUpdate(map('season','autumn'), mapFilter((k, v) -> (k in ('fruit')), tags)) FROM map_test; +DROP TABLE map_test; diff --git a/parser/testdata/02364_dictionary_datetime_64_attribute_crash/ast.json b/parser/testdata/02364_dictionary_datetime_64_attribute_crash/ast.json new file mode 100644 index 000000000..0d02f9423 --- /dev/null +++ b/parser/testdata/02364_dictionary_datetime_64_attribute_crash/ast.json @@ -0,0 +1,73 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery dat (children 3)" + }, + { + "explain": " Identifier dat" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration blockNum (children 1)" + }, + { + "explain": " DataType Decimal (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " ColumnDeclaration eventTimestamp (children 1)" + }, + { + "explain": " DataType DateTime64 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_9" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Identifier eventTimestamp" + } + ], + + "rows": 17, + + "statistics": + { + "elapsed": 0.001406842, + "rows_read": 17, + "bytes_read": 639 + } +} diff --git a/parser/testdata/02364_dictionary_datetime_64_attribute_crash/metadata.json b/parser/testdata/02364_dictionary_datetime_64_attribute_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02364_dictionary_datetime_64_attribute_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02364_dictionary_datetime_64_attribute_crash/query.sql b/parser/testdata/02364_dictionary_datetime_64_attribute_crash/query.sql new file mode 100644 index 000000000..77fc9e118 --- /dev/null +++ b/parser/testdata/02364_dictionary_datetime_64_attribute_crash/query.sql @@ -0,0 +1,15 @@ +create table dat (blockNum Decimal(10,0), eventTimestamp DateTime64(9)) Engine=MergeTree() primary key eventTimestamp; +insert into dat values (1, '2022-01-24 02:30:00.008122000'); + +CREATE DICTIONARY datDictionary +( + `blockNum` Decimal(10, 0), + `eventTimestamp` DateTime64(9) +) +PRIMARY KEY blockNum +SOURCE(CLICKHOUSE(TABLE 'dat')) +LIFETIME(MIN 0 MAX 1000) +LAYOUT(FLAT()); + +select (select eventTimestamp from datDictionary); +select count(*) from dat where eventTimestamp >= (select eventTimestamp from datDictionary); diff --git a/parser/testdata/02364_multiSearch_function_family/ast.json b/parser/testdata/02364_multiSearch_function_family/ast.json new file mode 100644 index 000000000..620fbaeb8 --- /dev/null +++ b/parser/testdata/02364_multiSearch_function_family/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00126719, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02364_multiSearch_function_family/metadata.json b/parser/testdata/02364_multiSearch_function_family/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02364_multiSearch_function_family/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02364_multiSearch_function_family/query.sql b/parser/testdata/02364_multiSearch_function_family/query.sql new file mode 100644 index 000000000..65ad3a7ed --- /dev/null +++ b/parser/testdata/02364_multiSearch_function_family/query.sql @@ -0,0 +1,538 @@ +SET send_logs_level = 'fatal'; + +select 0 = multiSearchAny('\0', CAST([], 'Array(String)')); +select 0 = multiSearchAnyCaseInsensitive('\0', CAST([], 'Array(String)')); +select 0 = multiSearchAnyCaseInsensitiveUTF8('\0', CAST([], 'Array(String)')); +select 0 = multiSearchAnyUTF8('\0', CAST([], 'Array(String)')); +select 0 = multiSearchFirstIndex('\0', CAST([], 'Array(String)')); +select 0 = multiSearchFirstIndexCaseInsensitive('\0', CAST([], 'Array(String)')); +select 0 = multiSearchFirstIndexCaseInsensitiveUTF8('\0', CAST([], 'Array(String)')); +select 0 = multiSearchFirstIndexUTF8('\0', CAST([], 'Array(String)')); +select 0 = multiSearchFirstPosition('\0', CAST([], 'Array(String)')); +select 0 = multiSearchFirstPositionCaseInsensitive('\0', CAST([], 'Array(String)')); +select 0 = multiSearchFirstPositionCaseInsensitiveUTF8('\0', CAST([], 'Array(String)')); +select 0 = multiSearchFirstPositionUTF8('\0', CAST([], 'Array(String)')); +select [] = multiSearchAllPositions('\0', CAST([], 'Array(String)')); +select [] = multiSearchAllPositionsCaseInsensitive('\0', CAST([], 'Array(String)')); +select [] = multiSearchAllPositionsCaseInsensitiveUTF8('\0', CAST([], 'Array(String)')); +select [] = multiSearchAllPositionsUTF8('\0', CAST([], 'Array(String)')); + +select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['b']); +select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bc']); +select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bcd']); +select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bcde']); +select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bcdef']); +select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bcdefg']); +select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bcdefgh']); + +select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abcdefgh']); +select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abcdefg']); +select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abcdef']); +select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abcde']); +select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abcd']); +select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abc']); +select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['ab']); +select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['a']); + +select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['c']); +select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['cd']); +select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['cde']); +select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['cdef']); +select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['cdefg']); +select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['cdefgh']); + +select [4] = multiSearchAllPositions(materialize('abcdefgh'), ['defgh']); +select [4] = multiSearchAllPositions(materialize('abcdefgh'), ['defg']); +select [4] = multiSearchAllPositions(materialize('abcdefgh'), ['def']); +select [4] = multiSearchAllPositions(materialize('abcdefgh'), ['de']); +select [4] = multiSearchAllPositions(materialize('abcdefgh'), ['d']); + +select [5] = multiSearchAllPositions(materialize('abcdefgh'), ['e']); +select [5] = multiSearchAllPositions(materialize('abcdefgh'), ['ef']); +select [5] = multiSearchAllPositions(materialize('abcdefgh'), ['efg']); +select [5] = multiSearchAllPositions(materialize('abcdefgh'), ['efgh']); + +select [6] = multiSearchAllPositions(materialize('abcdefgh'), ['fgh']); +select [6] = multiSearchAllPositions(materialize('abcdefgh'), ['fg']); +select [6] = multiSearchAllPositions(materialize('abcdefgh'), ['f']); + +select [7] = multiSearchAllPositions(materialize('abcdefgh'), ['g']); +select [7] = multiSearchAllPositions(materialize('abcdefgh'), ['gh']); + +select [8] = multiSearchAllPositions(materialize('abcdefgh'), ['h']); + +select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['b']) from system.numbers limit 10; +select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bc']) from system.numbers limit 10; +select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bcd']) from system.numbers limit 10; +select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bcde']) from system.numbers limit 10; +select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bcdef']) from system.numbers limit 10; +select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bcdefg']) from system.numbers limit 10; +select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bcdefgh']) from system.numbers limit 10; + +select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abcdefgh']) from system.numbers limit 10; +select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abcdefg']) from system.numbers limit 10; +select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abcdef']) from system.numbers limit 10; +select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abcde']) from system.numbers limit 10; +select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abcd']) from system.numbers limit 10; +select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abc']) from system.numbers limit 10; +select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['ab']) from system.numbers limit 10; +select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['a']) from system.numbers limit 10; + +select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['c']) from system.numbers limit 10; +select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['cd']) from system.numbers limit 10; +select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['cde']) from system.numbers limit 10; +select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['cdef']) from system.numbers limit 10; +select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['cdefg']) from system.numbers limit 10; +select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['cdefgh']) from system.numbers limit 10; + +select [4] = multiSearchAllPositions(materialize('abcdefgh'), ['defgh']) from system.numbers limit 10; +select [4] = multiSearchAllPositions(materialize('abcdefgh'), ['defg']) from system.numbers limit 10; +select [4] = multiSearchAllPositions(materialize('abcdefgh'), ['def']) from system.numbers limit 10; +select [4] = multiSearchAllPositions(materialize('abcdefgh'), ['de']) from system.numbers limit 10; +select [4] = multiSearchAllPositions(materialize('abcdefgh'), ['d']) from system.numbers limit 10; + +select [5] = multiSearchAllPositions(materialize('abcdefgh'), ['e']) from system.numbers limit 10; +select [5] = multiSearchAllPositions(materialize('abcdefgh'), ['ef']) from system.numbers limit 10; +select [5] = multiSearchAllPositions(materialize('abcdefgh'), ['efg']) from system.numbers limit 10; +select [5] = multiSearchAllPositions(materialize('abcdefgh'), ['efgh']) from system.numbers limit 10; + +select [6] = multiSearchAllPositions(materialize('abcdefgh'), ['fgh']) from system.numbers limit 10; +select [6] = multiSearchAllPositions(materialize('abcdefgh'), ['fg']) from system.numbers limit 10; +select [6] = multiSearchAllPositions(materialize('abcdefgh'), ['f']) from system.numbers limit 10; + +select [7] = multiSearchAllPositions(materialize('abcdefgh'), ['g']) from system.numbers limit 10; +select [7] = multiSearchAllPositions(materialize('abcdefgh'), ['gh']) from system.numbers limit 10; + +select [8] = multiSearchAllPositions(materialize('abcdefgh'), ['h']) from system.numbers limit 10; + +select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['b']) from system.numbers limit 129; +select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bc']) from system.numbers limit 129; +select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bcd']) from system.numbers limit 10; +select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bcde']) from system.numbers limit 129; +select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bcdef']) from system.numbers limit 129; +select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bcdefg']) from system.numbers limit 129; +select [2] = multiSearchAllPositions(materialize('abcdefgh'), ['bcdefgh']) from system.numbers limit 129; + +select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abcdefgh']) from system.numbers limit 129; +select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abcdefg']) from system.numbers limit 129; +select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abcdef']) from system.numbers limit 129; +select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abcde']) from system.numbers limit 129; +select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abcd']) from system.numbers limit 129; +select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['abc']) from system.numbers limit 129; +select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['ab']) from system.numbers limit 129; +select [1] = multiSearchAllPositions(materialize('abcdefgh'), ['a']) from system.numbers limit 129; + +select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['c']) from system.numbers limit 129; +select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['cd']) from system.numbers limit 129; +select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['cde']) from system.numbers limit 129; +select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['cdef']) from system.numbers limit 129; +select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['cdefg']) from system.numbers limit 129; +select [3] = multiSearchAllPositions(materialize('abcdefgh'), ['cdefgh']) from system.numbers limit 129; + +select [4] = multiSearchAllPositions(materialize('abcdefgh'), ['defgh']) from system.numbers limit 129; +select [4] = multiSearchAllPositions(materialize('abcdefgh'), ['defg']) from system.numbers limit 129; +select [4] = multiSearchAllPositions(materialize('abcdefgh'), ['def']) from system.numbers limit 129; +select [4] = multiSearchAllPositions(materialize('abcdefgh'), ['de']) from system.numbers limit 129; +select [4] = multiSearchAllPositions(materialize('abcdefgh'), ['d']) from system.numbers limit 129; + +select [5] = multiSearchAllPositions(materialize('abcdefgh'), ['e']) from system.numbers limit 129; +select [5] = multiSearchAllPositions(materialize('abcdefgh'), ['ef']) from system.numbers limit 129; +select [5] = multiSearchAllPositions(materialize('abcdefgh'), ['efg']) from system.numbers limit 129; +select [5] = multiSearchAllPositions(materialize('abcdefgh'), ['efgh']) from system.numbers limit 129; + +select [6] = multiSearchAllPositions(materialize('abcdefgh'), ['fgh']) from system.numbers limit 129; +select [6] = multiSearchAllPositions(materialize('abcdefgh'), ['fg']) from system.numbers limit 129; +select [6] = multiSearchAllPositions(materialize('abcdefgh'), ['f']) from system.numbers limit 129; + +select [7] = multiSearchAllPositions(materialize('abcdefgh'), ['g']) from system.numbers limit 129; +select [7] = multiSearchAllPositions(materialize('abcdefgh'), ['gh']) from system.numbers limit 129; + +select [8] = multiSearchAllPositions(materialize('abcdefgh'), ['h']) from system.numbers limit 129; + +select [2] = multiSearchAllPositions(materialize('abc'), ['b']); +select [2] = multiSearchAllPositions(materialize('abc'), ['bc']); +select [0] = multiSearchAllPositions(materialize('abc'), ['bcde']); +select [0] = multiSearchAllPositions(materialize('abc'), ['bcdef']); +select [0] = multiSearchAllPositions(materialize('abc'), ['bcdefg']); +select [0] = multiSearchAllPositions(materialize('abc'), ['bcdefgh']); + +select [0] = multiSearchAllPositions(materialize('abc'), ['abcdefg']); +select [0] = multiSearchAllPositions(materialize('abc'), ['abcdef']); +select [0] = multiSearchAllPositions(materialize('abc'), ['abcde']); +select [0] = multiSearchAllPositions(materialize('abc'), ['abcd']); +select [1] = multiSearchAllPositions(materialize('abc'), ['abc']); +select [1] = multiSearchAllPositions(materialize('abc'), ['ab']); +select [1] = multiSearchAllPositions(materialize('abc'), ['a']); + +select [3] = multiSearchAllPositions(materialize('abcd'), ['c']); +select [3] = multiSearchAllPositions(materialize('abcd'), ['cd']); +select [0] = multiSearchAllPositions(materialize('abcd'), ['cde']); +select [0] = multiSearchAllPositions(materialize('abcd'), ['cdef']); +select [0] = multiSearchAllPositions(materialize('abcd'), ['cdefg']); +select [0] = multiSearchAllPositions(materialize('abcd'), ['cdefgh']); + +select [0] = multiSearchAllPositions(materialize('abc'), ['defgh']); +select [0] = multiSearchAllPositions(materialize('abc'), ['defg']); +select [0] = multiSearchAllPositions(materialize('abc'), ['def']); +select [0] = multiSearchAllPositions(materialize('abc'), ['de']); +select [0] = multiSearchAllPositions(materialize('abc'), ['d']); + + +select [2] = multiSearchAllPositions(materialize('abc'), ['b']) from system.numbers limit 10; +select [2] = multiSearchAllPositions(materialize('abc'), ['bc']) from system.numbers limit 10; +select [0] = multiSearchAllPositions(materialize('abc'), ['bcde']) from system.numbers limit 10; +select [0] = multiSearchAllPositions(materialize('abc'), ['bcdef']) from system.numbers limit 10; +select [0] = multiSearchAllPositions(materialize('abc'), ['bcdefg']) from system.numbers limit 10; +select [0] = multiSearchAllPositions(materialize('abc'), ['bcdefgh']) from system.numbers limit 10; + + +select [0] = multiSearchAllPositions(materialize('abc'), ['abcdefg']) from system.numbers limit 10; +select [0] = multiSearchAllPositions(materialize('abc'), ['abcdef']) from system.numbers limit 10; +select [0] = multiSearchAllPositions(materialize('abc'), ['abcde']) from system.numbers limit 10; +select [0] = multiSearchAllPositions(materialize('abc'), ['abcd']) from system.numbers limit 10; +select [1] = multiSearchAllPositions(materialize('abc'), ['abc']) from system.numbers limit 10; +select [1] = multiSearchAllPositions(materialize('abc'), ['ab']) from system.numbers limit 10; +select [1] = multiSearchAllPositions(materialize('abc'), ['a']) from system.numbers limit 10; + +select [3] = multiSearchAllPositions(materialize('abcd'), ['c']) from system.numbers limit 10; +select [3] = multiSearchAllPositions(materialize('abcd'), ['cd']) from system.numbers limit 10; +select [0] = multiSearchAllPositions(materialize('abcd'), ['cde']) from system.numbers limit 10; +select [0] = multiSearchAllPositions(materialize('abcd'), ['cdef']) from system.numbers limit 10; +select [0] = multiSearchAllPositions(materialize('abcd'), ['cdefg']) from system.numbers limit 10; +select [0] = multiSearchAllPositions(materialize('abcd'), ['cdefgh']) from system.numbers limit 10; + +select [0] = multiSearchAllPositions(materialize('abc'), ['defgh']) from system.numbers limit 10; +select [0] = multiSearchAllPositions(materialize('abc'), ['defg']) from system.numbers limit 10; +select [0] = multiSearchAllPositions(materialize('abc'), ['def']) from system.numbers limit 10; +select [0] = multiSearchAllPositions(materialize('abc'), ['de']) from system.numbers limit 10; +select [0] = multiSearchAllPositions(materialize('abc'), ['d']) from system.numbers limit 10; + +select [1] = multiSearchAllPositions(materialize('abc'), ['']); +select [1] = multiSearchAllPositions(materialize('abc'), ['']) from system.numbers limit 10; +select [1] = multiSearchAllPositions(materialize('abc'), ['']) from system.numbers limit 100; +select [1] = multiSearchAllPositions(materialize('abc'), ['']) from system.numbers limit 1000; + +select [1] = multiSearchAllPositions(materialize('abab'), ['ab']); +select [1] = multiSearchAllPositions(materialize('abababababababababababab'), ['abab']); +select [1] = multiSearchAllPositions(materialize('abababababababababababab'), ['abababababababababa']); + +select [1] = multiSearchAllPositions(materialize('abc'), materialize([''])); +select [1] = multiSearchAllPositions(materialize('abc'), materialize([''])) from system.numbers limit 10; +select [1] = multiSearchAllPositions(materialize('abab'), materialize(['ab'])); +select [2] = multiSearchAllPositions(materialize('abab'), materialize(['ba'])); +select [1] = multiSearchAllPositionsCaseInsensitive(materialize('aBaB'), materialize(['abab'])); +select [3] = multiSearchAllPositionsUTF8(materialize('ab€ab'), materialize(['€'])); +select [3] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('ab€AB'), materialize(['€ab'])); +-- checks the correct handling of broken utf-8 sequence +select [0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize(''), materialize(['a\x90\x90\x90\x90\x90\x90'])); + +select 1 = multiSearchAny(materialize('abcdefgh'), ['b']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['bc']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['bcd']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['bcde']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['bcdef']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['bcdefg']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['bcdefgh']); + +select 1 = multiSearchAny(materialize('abcdefgh'), ['abcdefgh']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['abcdefg']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['abcdef']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['abcde']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['abcd']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['abc']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['ab']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['a']); + +select 1 = multiSearchAny(materialize('abcdefgh'), ['c']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['cd']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['cde']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['cdef']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['cdefg']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['cdefgh']); + +select 1 = multiSearchAny(materialize('abcdefgh'), ['defgh']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['defg']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['def']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['de']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['d']); + +select 1 = multiSearchAny(materialize('abcdefgh'), ['e']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['ef']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['efg']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['efgh']); + +select 1 = multiSearchAny(materialize('abcdefgh'), ['fgh']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['fg']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['f']); + +select 1 = multiSearchAny(materialize('abcdefgh'), ['g']); +select 1 = multiSearchAny(materialize('abcdefgh'), ['gh']); + +select 1 = multiSearchAny(materialize('abcdefgh'), ['h']); + +select 1 = multiSearchAny(materialize('abcdefgh'), ['b']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['bc']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['bcd']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['bcde']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['bcdef']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['bcdefg']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['bcdefgh']) from system.numbers limit 10; + +select 1 = multiSearchAny(materialize('abcdefgh'), ['abcdefgh']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['abcdefg']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['abcdef']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['abcde']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['abcd']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['abc']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['ab']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['a']) from system.numbers limit 10; + +select 1 = multiSearchAny(materialize('abcdefgh'), ['c']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['cd']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['cde']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['cdef']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['cdefg']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['cdefgh']) from system.numbers limit 10; + +select 1 = multiSearchAny(materialize('abcdefgh'), ['defgh']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['defg']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['def']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['de']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['d']) from system.numbers limit 10; + +select 1 = multiSearchAny(materialize('abcdefgh'), ['e']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['ef']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['efg']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['efgh']) from system.numbers limit 10; + +select 1 = multiSearchAny(materialize('abcdefgh'), ['fgh']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['fg']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['f']) from system.numbers limit 10; + +select 1 = multiSearchAny(materialize('abcdefgh'), ['g']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['gh']) from system.numbers limit 10; + +select 1 = multiSearchAny(materialize('abcdefgh'), ['h']) from system.numbers limit 10; + +select 1 = multiSearchAny(materialize('abcdefgh'), ['b']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['bc']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['bcd']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcdefgh'), ['bcde']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['bcdef']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['bcdefg']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['bcdefgh']) from system.numbers limit 129; + +select 1 = multiSearchAny(materialize('abcdefgh'), ['abcdefgh']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['abcdefg']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['abcdef']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['abcde']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['abcd']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['abc']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['ab']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['a']) from system.numbers limit 129; + +select 1 = multiSearchAny(materialize('abcdefgh'), ['c']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['cd']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['cde']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['cdef']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['cdefg']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['cdefgh']) from system.numbers limit 129; + +select 1 = multiSearchAny(materialize('abcdefgh'), ['defgh']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['defg']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['def']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['de']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['d']) from system.numbers limit 129; + +select 1 = multiSearchAny(materialize('abcdefgh'), ['e']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['ef']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['efg']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['efgh']) from system.numbers limit 129; + +select 1 = multiSearchAny(materialize('abcdefgh'), ['fgh']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['fg']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['f']) from system.numbers limit 129; + +select 1 = multiSearchAny(materialize('abcdefgh'), ['g']) from system.numbers limit 129; +select 1 = multiSearchAny(materialize('abcdefgh'), ['gh']) from system.numbers limit 129; + +select 1 = multiSearchAny(materialize('abcdefgh'), ['h']) from system.numbers limit 129; + +select 1 = multiSearchAny(materialize('abc'), ['b']); +select 1 = multiSearchAny(materialize('abc'), ['bc']); +select 0 = multiSearchAny(materialize('abc'), ['bcde']); +select 0 = multiSearchAny(materialize('abc'), ['bcdef']); +select 0 = multiSearchAny(materialize('abc'), ['bcdefg']); +select 0 = multiSearchAny(materialize('abc'), ['bcdefgh']); + +select 0 = multiSearchAny(materialize('abc'), ['abcdefg']); +select 0 = multiSearchAny(materialize('abc'), ['abcdef']); +select 0 = multiSearchAny(materialize('abc'), ['abcde']); +select 0 = multiSearchAny(materialize('abc'), ['abcd']); +select 1 = multiSearchAny(materialize('abc'), ['abc']); +select 1 = multiSearchAny(materialize('abc'), ['ab']); +select 1 = multiSearchAny(materialize('abc'), ['a']); + +select 1 = multiSearchAny(materialize('abcd'), ['c']); +select 1 = multiSearchAny(materialize('abcd'), ['cd']); +select 0 = multiSearchAny(materialize('abcd'), ['cde']); +select 0 = multiSearchAny(materialize('abcd'), ['cdef']); +select 0 = multiSearchAny(materialize('abcd'), ['cdefg']); +select 0 = multiSearchAny(materialize('abcd'), ['cdefgh']); + +select 0 = multiSearchAny(materialize('abc'), ['defgh']); +select 0 = multiSearchAny(materialize('abc'), ['defg']); +select 0 = multiSearchAny(materialize('abc'), ['def']); +select 0 = multiSearchAny(materialize('abc'), ['de']); +select 0 = multiSearchAny(materialize('abc'), ['d']); + + +select 1 = multiSearchAny(materialize('abc'), ['b']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abc'), ['bc']) from system.numbers limit 10; +select 0 = multiSearchAny(materialize('abc'), ['bcde']) from system.numbers limit 10; +select 0 = multiSearchAny(materialize('abc'), ['bcdef']) from system.numbers limit 10; +select 0 = multiSearchAny(materialize('abc'), ['bcdefg']) from system.numbers limit 10; +select 0 = multiSearchAny(materialize('abc'), ['bcdefgh']) from system.numbers limit 10; + + +select 0 = multiSearchAny(materialize('abc'), ['abcdefg']) from system.numbers limit 10; +select 0 = multiSearchAny(materialize('abc'), ['abcdef']) from system.numbers limit 10; +select 0 = multiSearchAny(materialize('abc'), ['abcde']) from system.numbers limit 10; +select 0 = multiSearchAny(materialize('abc'), ['abcd']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abc'), ['abc']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abc'), ['ab']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abc'), ['a']) from system.numbers limit 10; + +select 1 = multiSearchAny(materialize('abcd'), ['c']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abcd'), ['cd']) from system.numbers limit 10; +select 0 = multiSearchAny(materialize('abcd'), ['cde']) from system.numbers limit 10; +select 0 = multiSearchAny(materialize('abcd'), ['cdef']) from system.numbers limit 10; +select 0 = multiSearchAny(materialize('abcd'), ['cdefg']) from system.numbers limit 10; +select 0 = multiSearchAny(materialize('abcd'), ['cdefgh']) from system.numbers limit 10; + +select 0 = multiSearchAny(materialize('abc'), ['defgh']) from system.numbers limit 10; +select 0 = multiSearchAny(materialize('abc'), ['defg']) from system.numbers limit 10; +select 0 = multiSearchAny(materialize('abc'), ['def']) from system.numbers limit 10; +select 0 = multiSearchAny(materialize('abc'), ['de']) from system.numbers limit 10; +select 0 = multiSearchAny(materialize('abc'), ['d']) from system.numbers limit 10; + +select 1 = multiSearchAny(materialize('abc'), ['']); +select 1 = multiSearchAny(materialize('abc'), ['']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('abc'), ['']) from system.numbers limit 100; +select 1 = multiSearchAny(materialize('abc'), ['']) from system.numbers limit 1000; + +select 1 = multiSearchAny(materialize('abab'), ['ab']); +select 1 = multiSearchAny(materialize('abababababababababababab'), ['abab']); +select 1 = multiSearchAny(materialize('abababababababababababab'), ['abababababababababa']); + + +select 0 = multiSearchFirstPosition(materialize('abcdefgh'), ['z', 'pq']) from system.numbers limit 10; +select 1 = multiSearchFirstPosition(materialize('abcdefgh'), ['a', 'b', 'c', 'd']) from system.numbers limit 10; +select 1 = multiSearchFirstPosition(materialize('abcdefgh'), ['defgh', 'bcd', 'abcd', 'c']) from system.numbers limit 10; +select 1 = multiSearchFirstPosition(materialize('abcdefgh'), ['', 'bcd', 'bcd', 'c']) from system.numbers limit 10; +select 2 = multiSearchFirstPosition(materialize('abcdefgh'), ['something', 'bcd', 'bcd', 'c']) from system.numbers limit 10; +select 6 = multiSearchFirstPosition(materialize('abcdefgh'), ['something', 'bcdz', 'fgh', 'f']) from system.numbers limit 10; + +select 0 = multiSearchFirstPositionCaseInsensitive(materialize('abcdefgh'), ['z', 'pq']) from system.numbers limit 10; +select 1 = multiSearchFirstPositionCaseInsensitive(materialize('aBcdefgh'), ['A', 'b', 'c', 'd']) from system.numbers limit 10; +select 1 = multiSearchFirstPositionCaseInsensitive(materialize('abCDefgh'), ['defgh', 'bcd', 'aBCd', 'c']) from system.numbers limit 10; +select 1 = multiSearchFirstPositionCaseInsensitive(materialize('abCdeFgH'), ['', 'bcd', 'bcd', 'c']) from system.numbers limit 10; +select 2 = multiSearchFirstPositionCaseInsensitive(materialize('ABCDEFGH'), ['something', 'bcd', 'bcd', 'c']) from system.numbers limit 10; +select 6 = multiSearchFirstPositionCaseInsensitive(materialize('abcdefgh'), ['sOmEthIng', 'bcdZ', 'fGh', 'F']) from system.numbers limit 10; + +select 0 = multiSearchFirstPositionUTF8(materialize('абвгдежз'), ['л', 'ъ']) from system.numbers limit 10; +select 1 = multiSearchFirstPositionUTF8(materialize('абвгдежз'), ['а', 'б', 'в', 'г']) from system.numbers limit 10; +select 1 = multiSearchFirstPositionUTF8(materialize('абвгдежз'), ['гдежз', 'бвг', 'абвг', 'вг']) from system.numbers limit 10; +select 1 = multiSearchFirstPositionUTF8(materialize('абвгдежз'), ['', 'бвг', 'бвг', 'в']) from system.numbers limit 10; +select 2 = multiSearchFirstPositionUTF8(materialize('абвгдежз'), ['что', 'в', 'гдз', 'бвг']) from system.numbers limit 10; +select 6 = multiSearchFirstPositionUTF8(materialize('абвгдежз'), ['з', 'бвгя', 'ежз', 'з']) from system.numbers limit 10; + +select 0 = multiSearchFirstPositionCaseInsensitiveUTF8(materialize('аБвгДежз'), ['Л', 'Ъ']) from system.numbers limit 10; +select 1 = multiSearchFirstPositionCaseInsensitiveUTF8(materialize('аБвгДежз'), ['А', 'б', 'в', 'г']) from system.numbers limit 10; +select 1 = multiSearchFirstPositionCaseInsensitiveUTF8(materialize('аБвгДежз'), ['гДеЖз', 'бВг', 'АБВг', 'вг']) from system.numbers limit 10; +select 1 = multiSearchFirstPositionCaseInsensitiveUTF8(materialize('аБвгДежз'), ['', 'бвг', 'Бвг', 'в']) from system.numbers limit 10; +select 2 = multiSearchFirstPositionCaseInsensitiveUTF8(materialize('аБвгДежз'), ['что', 'в', 'гдз', 'бвг']) from system.numbers limit 10; +select 6 = multiSearchFirstPositionCaseInsensitiveUTF8(materialize('аБвгДежЗ'), ['З', 'бвгЯ', 'ЕЖз', 'з']) from system.numbers limit 10; + +select +[ +0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 +] = +multiSearchAllPositions(materialize('string'), +['o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'str']); + +select 254 = multiSearchFirstIndex(materialize('string'), +['o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'str']); + + +select +[ +0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 +] = +multiSearchAllPositions(materialize('string'), +['o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'str']); + +select 255 = multiSearchFirstIndex(materialize('string'), +['o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'str']); + +select multiSearchAllPositions(materialize('string'), +['o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'str']); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +select multiSearchFirstIndex(materialize('string'), +['o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', +'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'o', 'str']); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } diff --git a/parser/testdata/02364_setting_cross_to_inner_rewrite/ast.json b/parser/testdata/02364_setting_cross_to_inner_rewrite/ast.json new file mode 100644 index 000000000..22fd9ac90 --- /dev/null +++ b/parser/testdata/02364_setting_cross_to_inner_rewrite/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001135486, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/02364_setting_cross_to_inner_rewrite/metadata.json b/parser/testdata/02364_setting_cross_to_inner_rewrite/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02364_setting_cross_to_inner_rewrite/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02364_setting_cross_to_inner_rewrite/query.sql b/parser/testdata/02364_setting_cross_to_inner_rewrite/query.sql new file mode 100644 index 000000000..fba57ead8 --- /dev/null +++ b/parser/testdata/02364_setting_cross_to_inner_rewrite/query.sql @@ -0,0 +1,20 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE TABLE t1 ( x Int ) Engine = Log; +INSERT INTO t1 VALUES ( 1 ), ( 2 ), ( 3 ); + +CREATE TABLE t2 ( x Int ) Engine = Log; +INSERT INTO t2 VALUES ( 2 ), ( 3 ), ( 4 ); + +SET cross_to_inner_join_rewrite = 1; +SELECT count() = 1 FROM t1, t2 WHERE t1.x > t2.x; +SELECT count() = 2 FROM t1, t2 WHERE t1.x = t2.x; +SELECT count() = 2 FROM t1 CROSS JOIN t2 WHERE t1.x = t2.x; +SELECT count() = 1 FROM t1 CROSS JOIN t2 WHERE t1.x > t2.x; + +SET cross_to_inner_join_rewrite = 2; +SELECT count() = 1 FROM t1, t2 WHERE t1.x > t2.x; -- { serverError INCORRECT_QUERY } +SELECT count() = 2 FROM t1, t2 WHERE t1.x = t2.x; +SELECT count() = 2 FROM t1 CROSS JOIN t2 WHERE t1.x = t2.x; +SELECT count() = 1 FROM t1 CROSS JOIN t2 WHERE t1.x > t2.x; -- do not force rewrite explicit CROSS diff --git a/parser/testdata/02364_window_case/ast.json b/parser/testdata/02364_window_case/ast.json new file mode 100644 index 000000000..54c802328 --- /dev/null +++ b/parser/testdata/02364_window_case/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier CASE" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001713079, + "rows_read": 5, + "bytes_read": 176 + } +} diff --git a/parser/testdata/02364_window_case/metadata.json b/parser/testdata/02364_window_case/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02364_window_case/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02364_window_case/query.sql b/parser/testdata/02364_window_case/query.sql new file mode 100644 index 000000000..b34686c3c --- /dev/null +++ b/parser/testdata/02364_window_case/query.sql @@ -0,0 +1,4 @@ +SELECT CASE + WHEN sum(number) over () > 0 THEN number + 1 + ELSE 0 END +FROM numbers(10) diff --git a/parser/testdata/02365_multisearch_random_tests/ast.json b/parser/testdata/02365_multisearch_random_tests/ast.json new file mode 100644 index 000000000..f54a8a3c8 --- /dev/null +++ b/parser/testdata/02365_multisearch_random_tests/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001376754, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02365_multisearch_random_tests/metadata.json b/parser/testdata/02365_multisearch_random_tests/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02365_multisearch_random_tests/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02365_multisearch_random_tests/query.sql b/parser/testdata/02365_multisearch_random_tests/query.sql new file mode 100644 index 000000000..3243dd476 --- /dev/null +++ b/parser/testdata/02365_multisearch_random_tests/query.sql @@ -0,0 +1,379 @@ +SET send_logs_level = 'fatal'; + +select [4, 1, 1, 2, 6, 1, 1, 0, 4, 1, 14, 0, 10, 0, 16, 6] = multiSearchAllPositions(materialize('jmdqwjbrxlbatqeixknricfk'), ['qwjbrxlba', 'jmd', '', 'mdqwjbrxlbatqe', 'jbrxlbatqeixknric', 'jmdqwjbrxlbatqeixknri', '', 'fdtmnwtts', 'qwjbrxlba', '', 'qeixknricfk', 'hzjjgrnoilfkvzxaemzhf', 'lb', 'kamz', 'ixknr', 'jbrxlbatq']) from system.numbers limit 10; +select [0, 0, 0, 2, 3, 0, 1, 0, 5, 0, 0, 0, 11, 10, 6, 7] = multiSearchAllPositions(materialize('coxcctuehmzkbrsmodfvx'), ['bkhnp', 'nlypjvriuk', 'rkslxwfqjjivcwdexrdtvjdtvuu', 'oxcctuehm', 'xcctuehmzkbrsm', 'kfrieuocovykjmkwxbdlkgwctwvcuh', 'coxc', 'lbwvetgxyndxjqqwthtkgasbafii', 'ctuehmzkbrsmodfvx', 'obzldxjldxowk', 'ngfikgigeyll', 'wdaejjukowgvzijnw', 'zkbr', 'mzkb', 'tuehm', 'ue']) from system.numbers limit 10; +select [1, 1, 0, 0, 0, 1, 1, 1, 4, 0, 6, 6, 0, 10, 1, 5] = multiSearchAllPositions(materialize('mpswgtljbbrmivkcglamemayfn'), ['', 'm', 'saejhpnfgfq', 'rzanrkdssmmkanqjpfi', 'oputeneprgoowg', 'mp', '', '', 'wgtljbbrmivkcglamemay', 'cbpthtrgrmgfypizi', 'tl', 'tlj', 'xuhs', 'brmivkcglamemayfn', '', 'gtljb']) from system.numbers limit 10; +select [1, 0, 0, 8, 6, 0, 7, 1, 3, 0, 0, 0, 0, 12] = multiSearchAllPositions(materialize('arbphzbbecypbzsqsljurtddve'), ['arbphzb', 'mnrboimjfijnti', 'cikcrd', 'becypbz', 'z', 'uocmqgnczhdcrvtqrnaxdxjjlhakoszuwc', 'bbe', '', 'bp', 'yhltnexlpdijkdzt', 'jkwjmrckvgmccmmrolqvy', 'vdxmicjmfbtsbqqmqcgtnrvdgaucsgspwg', 'witlfqwvhmmyjrnrzttrikhhsrd', 'pbzsqsljurt']) from system.numbers limit 10; +select [7, 0, 0, 8, 0, 2, 0, 0, 6, 0, 2, 0, 3, 1] = multiSearchAllPositions(materialize('aizovxqpzcbbxuhwtiaaqhdqjdei'), ['qpzcbbxuhw', 'jugrpglqbm', 'dspwhzpyjohhtizegrnswhjfpdz', 'pzcbbxuh', 'vayzeszlycke', 'i', 'gvrontcpqavsjxtjwzgwxugiyhkhmhq', 'gyzmeroxztgaurmrqwtmsxcqnxaezuoapatvu', 'xqpzc', 'mjiswsvlvlpqrhhptqq', 'iz', 'hmzjxxfjsvcvdpqwtrdrp', 'zovxqpzcbbxuhwtia', 'ai']) from system.numbers limit 10; +select [0, 0, 0, 19, 14, 22, 10, 0, 0, 13, 0, 8] = multiSearchAllPositions(materialize('ydfgiluhyxwqdfiwtzobwzscyxhuov'), ['srsoubrgghleyheujsbwwwykerzlqphgejpxvog', 'axchkyleddjwkvbuyhmekpbbbztxdlm', 'zqodzvlkmfe', 'obwz', 'fi', 'zsc', 'xwq', 'pvmurvrd', 'uulcdtexckmrsokmgdpkstlkoavyrmxeaacvydxf', 'dfi', 'mxcngttujzgtlssrmluaflmjuv', 'hyxwqdfiwtzobwzscyxhu']) from system.numbers limit 10; +select [6, 1, 1, 0, 0, 5, 1, 0, 8, 0, 5, 0, 2, 12, 0, 15, 0, 0] = multiSearchAllPositions(materialize('pyepgwainvmwekwhhqxxvzdjw'), ['w', '', '', 'gvvkllofjnxvcu', 'kmwwhboplctvzazcyfpxhwtaddfnhekei', 'gwainv', 'pyepgwain', 'ekpnogkzzmbpfynsunwqp', 'invmwe', 'hrxpiplfplqjsstuybksuteoz', 'gwa', 'akfpyduqrwosxcbdemtxrxvundrgse', 'yepgwainvmw', 'wekwhhqxxvzdjw', 'fyimzvedmyriubgoznmcav', 'whhq', 'ozxowbwdqfisuupyzaqynoprgsjhkwlum', 'vpoufrofekajksdp']) from system.numbers limit 10; +select [0, 0, 5, 1, 1, 0, 15, 1, 5, 10, 4, 0, 1, 0, 3, 0, 0, 0] = multiSearchAllPositions(materialize('lqwahffxurkbhhzytequotkfk'), ['rwjqudpuaiufle', 'livwgbnflvy', 'hffxurkbhh', '', '', 'xcajwbqbttzfzfowjubmmgnmssat', 'zytequ', 'lq', 'h', 'rkbhh', 'a', 'immejthwgdr', '', 'llhhnlhcvnxxorzzjt', 'w', 'cvjynqxcivmmmvc', 'wexjomdcmursppjtsweybheyxzleuz', 'fzronsnddfxwlkkzidiknhpjipyrcrzel']) from system.numbers limit 10; +select [0, 1, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 1] = multiSearchAllPositions(materialize('nkddriylnakicwgdwrfxpodqea'), ['izwdpgrgpmjlwkanjrffgela', '', 'kicw', 'hltmfymgmrjckdiylkzjlvvyuleksikdjrg', 'yigveskrbidknjxigwilmkgyizewikh', 'xyvzhsnqmuec', 'odcgzlavzrwesjks', 'oilvfgliktoujukpgzvhmokdgkssqgqot', 'llsfsurvimbahwqtbqbp', 'nxj', 'pimydixeobdxmdkvhcyzcgnbhzsydx', 'couzmvxedobuohibgxwoxvmpote', 'driylnakicwgdwrf', 'nkddr']) from system.numbers limit 10; +select [0, 0, 0, 3, 0, 15, 0, 0, 12, 7, 0, 0, 0, 0, 5, 0] = multiSearchAllPositions(materialize('jnckhtjqwycyihuejibqmddrdxe'), ['tajzx', 'vuddoylclxatcjvinusdwt', 'spxkhxvzsljkmnzpeubszjnhqczavgtqopxn', 'ckhtjqwycyi', 'xlbfzdxspldoes', 'u', 'czosfebeznt', 'gzhabdsuyreisxvyfrfrkq', 'yihuejibqmd', 'jqwycyihuejibqm', 'cfbvprgzx', 'hxu', 'vxbhrfpzacgd', 'afoaij', 'htjqwycyihu', 'httzbskqd']) from system.numbers limit 10; +select [0, 0, 12, 4, 4, 0, 13, 23, 0, 1, 0, 2, 0, 0, 0, 3, 0, 0] = multiSearchAllPositions(materialize('dzejajvpoojdkqbnayahygidyrjmb'), ['khwxxvtnqhobbvwgwkpusjlhlzifiuclycml', 'nzvuhtwdaivo', 'dkqbnayahygidyr', 'jajvpoo', 'j', 'wdtbvwmeqgyvetu', 'kqbn', 'idyrjmb', 'tsnxuxevsxrxpgpfdgrkhwqpkse', '', 'efsdgzuefhdzkmquxu', 'zejajvpoojdkqbnayahyg', 'ugwfuighbygrxyctop', 'fcbxzbdugc', 'dxmzzrcplob', 'ejaj', 'wmmupyxrylvawsyfccluiiene', 'ohzmsqhpzbafvbzqwzftbvftei']) from system.numbers limit 10; +select [6, 8, 1, 4, 0, 10, 0, 1, 14, 0, 1, 0, 5, 0, 0, 0, 0, 15, 0, 1] = multiSearchAllPositions(materialize('ffaujlverosspbzaqefjzql'), ['lvero', 'erossp', 'f', 'ujlverosspbz', 'btfimgklzzxlbkbuqyrmnud', 'osspb', 'muqexvtjuaar', 'f', 'bzaq', 'lprihswhwkdhqciqhfaowarn', 'ffaujlve', 'uhbbjrqjb', 'jlver', 'umucyhbbu', 'pjthtzmgxhvpbdphesnnztuu', 'xfqhfdfsbbazactpastzvzqudgk', 'lvovjfoatc', 'z', 'givejzhoqsd', '']) from system.numbers limit 10; +select [5, 7, 0, 1, 6, 0, 0, 1, 1, 2, 0, 1, 4, 2, 0, 6, 0, 0] = multiSearchAllPositions(materialize('hzftozkvquknsahhxefzg'), ['ozkvquknsahhxefzg', 'kv', 'lkdhmafrec', '', 'zkvquknsahh', 'xmjuizyconipirigdmhqclox', 'dqqwolnkkwbyyjicsoshidbay', '', '', 'zf', 'sonvmkapcjcakgpejvn', 'hzftoz', 't', 'zftozkvqukns', 'dyuqohvehxsvdzdlqzl', 'zkvquknsahhx', 'vueohmytvmglqwptfbhxffspf', 'ilkdurxg']) from system.numbers limit 10; +select [1, 7, 6, 4, 0, 1, 0, 0, 0, 9, 7, 1, 1, 0, 0, 0] = multiSearchAllPositions(materialize('aapdygjzrhskntrphianzjob'), ['', 'jz', 'gjzrh', 'dygjzrhskntrphia', 'qcnahphlxmdru', '', 'rnwvzdn', 'isbekwuivytqggsxniqojrvpwjdr', 'sstwvgyavbwxvjojrpg', 'rhskn', 'jzrhskntrp', '', '', 'toilvppgjizaxtidizgbgygubmob', 'vjwzwpvsklkxqgeqqmtssnhlmw', 'znvpjjlydvzhkt']) from system.numbers limit 10; +select [0, 1, 0, 1, 0, 0, 10, 0, 0, 0, 11, 0, 5, 0] = multiSearchAllPositions(materialize('blwpfdjjkxettfetdoxvxbyk'), ['wgylnwqcrojacofrcanjme', 'bl', 'qqcunzpvgi', '', 'ijemdmmdxkakrawwdqrjtrttig', 'qwkaifalc', 'xe', 'zqocnfuvzowuqkmwrfxw', 'xpaayeljvly', 'wvphqqhulpepjjjnxjfudfcomajc', 'ettfetdoxvx', 'ikablovwhnbohibbuhwjshhdemidgreqf', 'fdjjkxett', 'kiairehwbxveqkcfqhgopztgpatljgqp']) from system.numbers limit 10; +select [0, 0, 6, 1, 1, 0, 0, 1, 2, 0, 0, 0, 0, 0] = multiSearchAllPositions(materialize('vghzgedqpnqtvaoonwsz'), ['mfyndhucfpzjxzaezny', 'niejb', 'edqpnqt', '', 'v', 'kivdvealqadzdatziujdnvymmia', 'lvznmgwtlwevcxyfbkqc', 'vghzge', 'gh', 'tbzle', 'vjiqponbvgvguuhqdijbdeu', 'mshlyabasgukboknbqgmmmj', 'kjk', 'abkeftpnpvdkfyrxbrihyfxcfxablv']) from system.numbers limit 10; +select [0, 0, 0, 0, 9, 0, 7, 0, 9, 8, 0, 0] = multiSearchAllPositions(materialize('oaghnutqsqcnwvmzrnxgacsovxiko'), ['upien', 'moqszigvduvvwvmpemupvmmzctbrbtqggrk', 'igeiaccvxejtfvifrmimwpewllcggji', 'wnwjorpzgsqiociw', 'sq', 'rkysegpoej', 'tqsqcnwvmzrnxgacsovxiko', 'ioykypvfjufbicpyrpfuhugk', 's', 'qsqcnwvmzrnxgacsov', 'hhbeisvmpnkwmimgyfmybtljiu', 'kfozjowd']) from system.numbers limit 10; +select [0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 1, 20, 5, 0, 0, 14, 1, 1, 0, 0] = multiSearchAllPositions(materialize('wbjfsevqspsvbwlzrkhcfuhxddbq'), ['ltgjbz', 's', 'qdfnmggupdfxjfnmvwyrqopxtxf', 'sazlkmaikcltojbzbmdfddu', 'yzanifqxufyfwrxzkhngoxkrrph', 'iwskc', 'xkykshryphyfnwcnmjfqjrixykmzmwm', 'wwpenztbhkdbwidfkypqlxivsjs', 'rlkevy', 'qigywtkezwd', '', 'c', 'sevqspsvbwlzrk', 'gwg', 'iduhrjsrtodxdkjykjoghtjtvplrscitxnvt', 'wlzrkhcfuhxddb', '', 'wbjfsev', 'zytusrcvqbazb', 'tec']) from system.numbers limit 10; +select [0, 1, 5, 0, 6, 8, 0, 3, 2, 0, 0, 9, 0, 4, 0, 0] = multiSearchAllPositions(materialize('mxiifpzlovgfozpgirtio'), ['srullnscuzenzhp', '', 'f', 'apetxezid', 'pzlovgf', 'lo', 'ecbmso', 'i', 'xiifpzlovgfozpgir', 'bnefwypvctubvslsesnctqspdyctq', 'tdncmgbikboss', 'o', 'zmgobcarxlxaho', 'ifpzlovgfozpg', 'dwmjqyylvsxzfr', 'pxhrecconce']) from system.numbers limit 10; +select [0, 0, 0, 2, 0, 0, 2, 0, 8, 0, 0, 0, 7, 0, 0, 0, 21, 3, 1, 8] = multiSearchAllPositions(materialize('jtvnrdpdevgnzexqdrrxqgiujexhm'), ['ibkvzoqmiyfgfztupug', 'iqzeixfykxcghlbgsicxiywlurrgjsywwk', 'vzdffjzlqxgzdcrkgoro', 'tvnrdpdevgnzexqdr', 'nqywueahcmoojtyjlhfpysk', 'iqalixciiidvrtmpzozfb', 'tv', 'rxkfeasoff', 'devgnzexqdrrxqgiuj', 'kvvuvyplboowjrestyvdfrxdjjujvkxy', 'shkhpneekuyyqtxfxutvz', 'yy', 'pdevgnz', 'nplpydxiwnbvlhoorcmqkycqisi', 'jlkxplbftfkxqgnqnaw', 'qdggpjenbrwbjtorbi', 'qgiuje', 'vnrdpd', '', 'dev']) from system.numbers limit 10; +select [14, 0, 0, 7, 20, 6, 0, 13, 0, 0, 20, 0, 20, 2, 0, 8, 2, 11, 2, 0] = multiSearchAllPositions(materialize('asjwxabjrwgcdviokfaoqvqiafz'), ['v', 'zqngytligwwpzxhatyayvdnbbj', 'gjicovfzgbyagiirn', 'bjrwgcdviok', 'oqvqiafz', 'abjrwgc', 'wulrpfzh', 'dviokfao', 'esnchjuiufjadqmdtrpcd', 'tkodqzsjchpaftk', 'oqvq', 'eyoshlrlvmnqjmtmloryvg', 'oqv', 'sjwx', 'uokueelyytnoidplwmmox', 'jrwgcdviokfaoqvqiaf', 'sjwxabjrwgcdviokfaoqvqi', 'gcdviokfa', 'sjwxab', 'zneabsnfucjcwauxmudyxibnmxzfx']) from system.numbers limit 10; +select [0, 16, 8, 0, 10, 0, 0, 0, 0, 1, 0, 6, 0, 1, 0, 4, 0, 6, 0, 0] = multiSearchAllPositions(materialize('soxfqagiuhkaylzootfjy'), ['eveprzxphyenbrnnznpctvxn', 'oo', 'iuhka', 'ikutjhrnvzfb', 'h', 'duyvvjizristnkczgwj', 'ihfrp', 'afpyrlj', 'uonp', 'soxfqagiuhkaylzootfjy', 'qeckxkoxldpzzpmkbvcex', 'agiuhkaylzo', 'tckcumkbsgrgqjvtlijack', '', 'fnfweqlldcdnwfaohqohp', 'fqagiuhkayl', 'pqnvwprxwwrcjqvfsbfimwye', 'agi', 'ta', 'r']) from system.numbers limit 10; +select [3, 7, 1, 6, 0, 1, 0, 11, 0, 9, 17, 1, 18, 12] = multiSearchAllPositions(materialize('ladbcypcbcxahmujwezkvweud'), ['db', 'pcbcxahm', 'lad', 'ypcb', 'atevkzyyxhphtuekymhh', 'lad', 'mltjrwaibetrtwpfa', 'xahmujwezkvweud', 'dg', 'bcxahmujw', 'we', '', 'e', 'ahmujwezkvw']) from system.numbers limit 10; +select [6, 0, 11, 0, 7, 0, 0, 0, 6, 1, 0, 3, 0, 0, 0, 0] = multiSearchAllPositions(materialize('hhkscgmqzmuwltmrhtxnnzsxl'), ['gmqzmuwltmrh', 'qtescwjubeqhurqoqfjauwxdoc', 'uwltmrh', 'qlhyfuspwdtecdbrmrqcnxghhlnbmzs', 'm', 'kcsuocwokvohnqonnfzmeiqtomehksehwc', 'hoxocyilgrxxoek', 'nisnlmbdczjsiw', 'gmqz', '', 'cqzz', 'k', 'utxctwtzelxmtioyqshxedecih', 'ifsmsljxzkyuigdtunwk', 'ojxvxwdosaqjhrnjwisss', 'dz']) from system.numbers limit 10; +select [0, 0, 19, 7, 0, 0, 1, 0, 0, 12, 0, 0, 1, 0, 1, 1, 5, 0, 23, 8] = multiSearchAllPositions(materialize('raxgcqizulxfwivauupqnofbijxfr'), ['sxvhaxlrpviwuinrcebtfepxxkhxxgqu', 'cuodfevkpszuimhymxypktdvicmyxm', 'pqnof', 'i', 'ufpljiniflkctwkwcrsbdhvrvkizticpqkgvq', 'osojyhejhrlhjvqrtobwthjgw', '', 'anzlevtxre', 'ufnpkjvgidirrnpvbsndfnovebdily', 'fwivauupqnofbi', 'rywyadwcvk', 'ltnlhftdfefmkenadahcpxw', '', 'xryluzlhnsqk', 'r', '', 'cqizulxfwivauupqnofb', 'y', 'fb', 'zulxfwivauupqnofbijxf']) from system.numbers limit 10; +select [4, 0, 0, 0, 0, 24, 1, 2, 0, 2, 0, 0, 8, 0] = multiSearchAllPositions(materialize('cwcqyjjodlepauupgobsgrzdvii'), ['q', 'yjppewylsqbnjwnhokzqtauggsjhhhkkkqsy', 'uutltzhjtc', 'pkmuptmzzeqhichaikwbggronli', 'erzgcuxnec', 'dvii', '', 'w', 'fkmpha', 'wcqyjjodlepauupgobsgrz', 'cbnmwirigaf', 'fcumlot', 'odlepauu', 'lthautlklktfukpt']) from system.numbers limit 10; +select [1, 1, 1, 1, 22, 0, 0, 8, 18, 15] = multiSearchAllPositions(materialize('vpscxxibyhvtmrdzrocvdngpb'), ['', '', '', '', 'n', 'agrahemfuhmftacvpnaxkx', 'dqqwvfsrqv', 'byhvtmrdzrocv', 'ocvdn', 'dzrocvdngpb']) from system.numbers limit 10; +select [1, 1, 1, 15, 10, 0, 0, 0, 0, 2] = multiSearchAllPositions(materialize('nfoievsrpvheprosjdsoiz'), ['', 'nfo', '', 'osjd', 'vheprosjdsoiz', 'az', 'blhvdycvjnxaipvxybs', 'umgxmpkvuvuvdaczkz', 'gfspmnzidixcjgjw', 'f']) from system.numbers limit 10; +select [0, 0, 2, 2, 0, 0, 0, 11, 10, 4, 9, 1, 6, 4, 0, 0] = multiSearchAllPositions(materialize('bdmfwdisdlgbcidshnhautsye'), ['uxdceftnmnqpveljer', 'xdnh', 'dmf', 'dmfwdisdlgbc', 'cpwnaijpkpyjgaq', 'doquvlrzhusjbxyqcqxvwr', 'llppnnmtqggyfoxtawnngsiiunvjjxxsufh', 'gbcidshnhau', 'lgbcids', 'f', 'dlgbc', 'bdmfwdisdlgbcids', 'disdlgbcidshnhautsy', 'fwdisdlgbcidshn', 'zfpbfc', 'triqajlyfmxlredivqiambigmge']) from system.numbers limit 10; +select [0, 0, 16, 0, 0, 0, 14, 6, 2, 1, 0, 0, 1, 0, 10, 12, 0, 0, 0, 0] = multiSearchAllPositions(materialize('absimumlxdlxuzpyrunivcb'), ['jglfzroni', 'wzfmtbjlcdxlbpialqjafjwz', 'yrun', 'fgmljkkp', 'nniob', 'fdektoyhxrumiycvkwekphypgti', 'zp', 'um', 'bsimu', '', 'yslsnfisaebuujltpgcskhhqcucdhb', 'xlaphsqgqsfykhilddctrawerneqoigb', '', 'pdvcfxdlurmegspidojt', 'd', 'xu', 'fdp', 'xjrqmybmccjbjtvyvdh', 'nvhdfatqi', 'neubuiykajzcrzdbvpwjhlpdmd']) from system.numbers limit 10; +select [0, 0, 0, 9, 0, 0, 1, 1, 1, 1] = multiSearchAllPositions(materialize('lvyenvktdnylszlypuwqecohy'), ['ihlsiynj', 'ctcnhbkumvbgfdclwjhsswpqyfrx', 'rpgqwkydwlfclcuupoynwrfffogxesvmbj', 'dnyl', 'coeqgdtbemkhgplprfxgwpl', 'dkbshktectbduxlcaptlzspq', 'l', 'lvyenvktdnylszlypuw', 'lvyenvk', '']) from system.numbers limit 10; +select [1, 0, 0, 0, 0, 1, 2, 22, 8, 17, 1, 13, 0, 0, 0, 0, 0, 5] = multiSearchAllPositions(materialize('wphcobonpgaqwgfenotzadgqezx'), ['', 'qeuycfhkfjwokxgrkaodqioaotkepzlhnrv', 'taehtytq', 'gejlcipocalc', 'poyvvvntrvqazixkwigtairjvxkgouiuva', '', 'phc', 'dg', 'npgaqwg', 'notzadgqe', '', 'wgfe', 'smipuxgvntys', 'qhrfdytbfeujzievelffzrv', 'cfmzw', 'hcywnyguzjredwjbqtwyuhtewuhzkc', 'tssfeinoykdauderpjyxtmb', 'obonpgaqwgfen']) from system.numbers limit 10; +select [0, 0, 0, 0, 0, 6, 6, 0, 0, 2, 0, 5, 2, 0, 6, 3] = multiSearchAllPositions(materialize('qvslufpsddtfudzrzlvrzdra'), ['jxsgyzgnjwyd', 'hqhxzhskwivpuqkjheywwfhthm', 'kbwlwadilqhgwlcpxkadkamsnzngms', 'fxunda', 'nlltydufobnfxjyhch', 'fpsddtfudzrzl', 'fp', 'ykhxjyqtvjbykskbejpnmbxpumknqucu', 'iyecekjcbkowdothxc', 'vslufpsddtfu', 'mjgtofkjeknlikrugkfhxlioicevil', 'uf', 'vslufpsdd', 'cxizdzygyu', 'fpsddtfudzrz', 'slufp']) from system.numbers limit 10; +select [12, 0, 0, 0, 0, 1, 6, 0, 1, 2] = multiSearchAllPositions(materialize('ydsbycnifbcforymknzfi'), ['forymkn', 'vgxtcdkfmjhc', 'ymugjvtmtzvghmifolzdihutqoisl', 'fzooddrlhi', 'bdefmxxdepcqi', '', 'cnif', 'ilzbhegpcnkdkooopaguljlie', '', 'dsbycnifbcforym']) from system.numbers limit 10; +select [0, 2, 4, 1, 1, 3, 0, 0, 0, 7] = multiSearchAllPositions(materialize('sksoirfwdhpdyxrkklhc'), ['vuixtegnp', 'ks', 'oirfwdhpd', 'sksoirf', 'skso', 'soi', 'eoxpa', 'vpfmzovgatllf', 'txsezmqvduxbmwu', 'fw']) from system.numbers limit 10; +select [2, 21, 8, 10, 6, 0, 1, 11, 0, 0, 21, 4, 29, 0] = multiSearchAllPositions(materialize('wlkublfclrvgixpbvgliylzbuuoyai'), ['l', 'ylzbuu', 'clr', 'rvgi', 'lf', 'bqtzaqjdfhvgddyaywaiybk', '', 'vgixpbv', 'ponnohwdvrq', 'dqioxovlbvobwkgeghlqxtwre', 'y', 'ublfclrvgix', 'a', 'eoxxbkaawwsdgzfweci']) from system.numbers limit 10; +select [0, 0, 2, 1, 1, 9, 1, 0, 0, 1] = multiSearchAllPositions(materialize('llpbsbgmfiadwvvsciak'), ['knyjtntotuldifbndcpxzsdwdduv', 'lfhofdxavpsiporpdyfziqzcni', 'lpbsbgmf', 'llpbsbgmfi', 'llpbsbgmfiadwvv', 'fia', '', 'uomksovcuhfmztuqwzwchmwvonk', 'ujbasmokvghmredszgwe', '']) from system.numbers limit 10; +select [3, 0, 0, 0, 6, 1, 7, 0, 2, 1, 1, 0, 7, 0, 1, 0, 1, 1, 5, 11] = multiSearchAllPositions(materialize('hnmrouevovxrzrejesigfukkmbiid'), ['m', 'apqlvipphjbui', 'wkepvtnpu', 'amjvdpudkdsddjgsmzhzovnwjrzjirdoxk', 'ue', '', 'evov', 'qoplzddxjejvbmthnplyha', 'nmrouevovxrz', '', 'hnmrouev', 'hnzevrvlmxnjmvhitgdhgd', 'evovxrzrejesig', 'yvlxrjaqdaizishkftgcuikt', '', 'buyrmbkvqukochjteumqchrhxgtmuorsdgzlfn', '', 'hnmrouevov', 'ouevovx', 'xr']) from system.numbers limit 10; +select [0, 13, 0, 0, 0, 0, 0, 14, 0, 0, 1, 12, 0, 1] = multiSearchAllPositions(materialize('uwfgpemgdjimotxuxrxxoynxoaw'), ['uzcevfdfy', 'otxuxrxxoynxoa', 'xeduvwhrogxccwhnzkiolksry', 'pxdszcyzxlrvkymhomz', 'vhsacxoaymycvcevuujpvozsqklahstmvgt', 'zydsajykft', 'vdvqynfhlhoilkhjjkcehnpmwgdtfkspk', 'txuxrx', 'slcaryelankprkeyzaucfhe', 'iocwevqwpkbrbqvddaob', 'uwfg', 'motxuxrxx', 'kpzbg', '']) from system.numbers limit 10; +select [1, 1, 0, 6, 6, 0, 0, 0, 8, 0, 8, 14, 1, 5, 6, 0, 0, 1] = multiSearchAllPositions(materialize('epudevopgooprmhqzjdvjvqm'), ['ep', 'epudevopg', 'tlyinfnhputxggivtyxgtupzs', 'vopgoop', 'v', 'hjfcoemfk', 'zjyhmybeuzxkuwaxtcut', 'txrxzndoxyzgnzepjzagc', 'pgooprmhqzj', 'wmtqcbsofbe', 'pgo', 'm', '', 'evopgooprmhqzjdv', 'vopgooprmhqzjdv', 'gmvqubpsnvrabixk', 'wjevqrrywloomnpsjbuybhkhzdeamj', '']) from system.numbers limit 10; +select [15, 4, 4, 0, 0, 1, 1, 0, 0, 0, 0, 20, 0, 10, 1, 1, 0, 2, 4, 3] = multiSearchAllPositions(materialize('uogsfbdefogwnekfoeobtkrgiceksz'), ['kfoeobtkrgice', 'sfbd', 'sfbdefogwn', 'zwtenhiqavmqoolkvjiqjfb', 'vnjkshyvpwhrauackplqllakcjyamvsuokrxbfv', 'uog', '', 'qtzuhdcdymytgtscvzlzswdlrqidreuuuqk', 'vlridmjlbxyiljpgxsctzygzyawqqysf', 'xsnkwyrmjaaaryvrdgtoshdxpvgsjjrov', 'fanchgljgwosfamgscuuriwospheze', 'btkrgicek', 'ohsclekvizgfoatxybxbjoxpsd', 'ogwnekfoeobtkr', '', '', 'vtzcobbhadfwubkcd', 'og', 's', 'gs']) from system.numbers limit 10; +select [0, 0, 5, 1, 0, 5, 1, 6, 0, 1, 9, 0, 1, 1] = multiSearchAllPositions(materialize('aoiqztelubikzmxchloa'), ['blc', 'p', 'ztelubikzmxchlo', 'aoiqztelubi', 'uckqledkyfboolq', 'ztelubikzmxch', 'a', 'telubikzm', 'powokpdraslpadpwvrqpbb', 'aoiqztelu', 'u', 'kishbitagsxnhyyswn', '', '']) from system.numbers limit 10; +select [5, 11, 0, 0, 0, 5, 0, 0, 0, 1, 16, 0, 0, 0, 0, 0] = multiSearchAllPositions(materialize('egxmimubhidowgnfziwgnlqiw'), ['imubhidowgnfzi', 'dowgnf', 'yqpcpfvnfpxetozraxbmzxxcvtzm', 'xkbaqvzlqjyjoiqourezbzwaqkfyekcfie', 'jjctusdmxr', 'imubhi', 'zawnslbfrtqohnztmnssxscymonlhkitq', 'oxcitennfpuoptwrlmc', 'ac', 'egxmi', 'fziwgn', 'rt', 'fuxfuctdmawmhxxxg', 'suulqkrsfgynruygjckrmizsksjcfwath', 'slgsq', 'zcbqjpehilwyztumebmdrsl']) from system.numbers limit 10; +select [20, 0, 9, 0, 0, 14, 0, 5, 8, 3, 0, 0, 0, 4] = multiSearchAllPositions(materialize('zczprzdcvcqzqdnhubyoblg'), ['obl', 'lzrjyezgqqoiydn', 'vc', 'nbvwfpmqlziedob', 'pnezljnnujjbyviqsdpaqkkrlogeht', 'dn', 'irvgeaq', 'rzdcvcqzqdnh', 'cvcqzqdnh', 'zprzdcv', 'wvvgoexuevmqjeqavsianoviubfixdpe', 'aeavhqipsvfkcynyrtlxwpegwqmnd', 'blckyiacwgfaoarfkptwcei', 'prz']) from system.numbers limit 10; +select [2, 1, 1, 9, 10, 5, 0, 0, 0, 2, 9, 7, 9, 0, 1, 9, 7, 0] = multiSearchAllPositions(materialize('mvovpvuhjwdzjwojcxxrbxy'), ['vo', '', '', 'jwdz', 'wdzj', 'pvu', 'ocxprubxhjnji', 'phzfbtacrg', 'jguuqhhxbrwbo', 'vovpvuhjwd', 'jw', 'u', 'jwdzjwojcx', 'nlwfvolaklizslylbvcgicbjw', '', 'jwd', 'uhjwdz', 'bbcsuvtru']) from system.numbers limit 10; +select [2, 0, 21, 0, 0, 0, 3, 0, 0, 0, 0, 10, 1, 18] = multiSearchAllPositions(materialize('nmdkwvafhcbipwoqtsrzitwxsnabwf'), ['m', 'ohlfouwyucostahqlwlbkjgmdhdyagnihtmlt', 'itwx', 'jjkyhungzqqyzxrq', 'abkqvxxpu', 'lvzgnaxzctaarxuqowcski', 'dkwvafhcb', 'xuxjexmeeqvyjmpznpdmcn', 'vklvpoaakfnhtkprnijihxdbbhbllnz', 'fpcdgmcrwmdbflnijjmljlhtkszkocnafzaubtxp', 'hmysdmmhnebmhpjrrqpjdqsgeuutsj', 'cbipwoqtsrzitwxsna', 'nm', 'srzitwx']) from system.numbers limit 10; +select [17, 5, 0, 13, 0, 0, 10, 1, 0, 19, 10, 8, 0, 4] = multiSearchAllPositions(materialize('gfvndbztroigxfujasvcdgfbh'), ['asvcdgf', 'dbztroigxfujas', 'pr', 'xfujas', 'nxwdmqsobxgm', 'wdvoepclqfhy', 'oigxfu', '', 'flgcghcfeiqvhvqiriciywbkhrxraxvneu', 'vcd', 'oigxfu', 'troigxfuj', 'gbnyvjhptuehkefhwjo', 'ndbz']) from system.numbers limit 10; +select [0, 14, 1, 0, 0, 1, 1, 11, 0, 8, 6, 0, 3, 19, 7, 0] = multiSearchAllPositions(materialize('nofwsbvvzgijgskbqjwyjmtfdogzzo'), ['kthjocfzvys', 'skbqjwyjmtfdo', 'nof', 'mfapvffuhueofutby', 'vqmkgjldhqohipgecie', 'nofwsbv', '', 'ijgs', 'telzjcbsloysamquwsoaso', 'vzgijgskbqjwyjmt', 'bvvzgijgskbqjwyjmtfd', 'hdlvuoylcmoicsejofcgnvddx', 'fwsbvvzgijgskb', 'wyjm', 'vvzgijg', 'fwzysuvkjtdiufetvlfwf']) from system.numbers limit 10; +select [10, 2, 13, 0, 0, 0, 2, 0, 9, 2, 4, 1, 1, 0, 1, 6] = multiSearchAllPositions(materialize('litdbgdtgtbkyflsvpjbqwsg'), ['tbky', 'itdbgdtgtb', 'yflsvpjb', 'ikbylslpoqxeqoqurbdehlroympy', 'hxejlgsbthvjalqjybc', 'sontq', 'itdbgd', 'ozqwgcjqmqqlkiaqppitsvjztwkh', 'gtbkyf', 'itdbgdtgtbkyfls', 'dbg', 'litdb', '', 'qesbakrnkbtfvwu', 'litd', 'g']) from system.numbers limit 10; +select [0, 0, 1, 1, 5, 0, 8, 12, 0, 2, 0, 7, 0, 6] = multiSearchAllPositions(materialize('ijzojxumpvcxwgekqimrkomvuzl'), ['xirqhjqibnirldvbfsb', 'htckarpuctrasdxoosutyxqioizsnzi', '', '', 'jxu', 'dskssv', 'mpvcxwgekqi', 'xwgek', 'qsuexmzfcxlrhkvlzwceqxfkyzogpoku', 'jzojx', 'carjpqihtpjniqz', 'umpvcxwgekq', 'krpkzzrxxtvfhdopjpqcyxfnbas', 'xumpvcxwg']) from system.numbers limit 10; +select [0, 0, 0, 6, 0, 8, 0, 2, 0, 0, 0, 0, 14, 0, 0, 1, 1, 0, 0, 0] = multiSearchAllPositions(materialize('zpplelzzxsjwktedrrtqhfmoufv'), ['jzzlntsokwlm', 'cb', 'wuxotyiegupflu', 'lzzxsjwkte', 'owbxgndpcmfuizpcduvucnntgryn', 'zxsjwktedrrtqhf', 'kystlupelnmormqmqclgjakfwnyt', 'pple', 'lishqmxa', 'mulwlrbizkmtbved', 'uchtfzizjiooetgjfydhmzbtmqsyhayd', 'hrzgjifkinwyxnazokuhicvloaygeinpd', 'tedrrt', 'shntwxsuxux', 'evrjehtdzzoxkismtfnqp', 'z', '', 'nxtybut', 'vfdchgqclhxpqpmitppysbvxepzhxv', 'wxmvmvjlrrehwylgqhpehzotgrzkgi']) from system.numbers limit 10; + +select [15, 19, 0, 0, 15, 0, 0, 1, 2, 6] = multiSearchAllPositionsUTF8(materialize('зжерхмчсйирдчрришкраоддцфгх'), ['ришкра', 'раоддц', 'фттиалусгоцжлтщзвумрдчи', 'влййи', 'ришкра', 'цгфжуцгивй', 'ккгжхрггчфглх', 'з', 'жерхмчсйи', 'мчсйирдчрришкраоддц']) from system.numbers limit 10; +select [0, 0, 0, 1, 4, 0, 14, 0, 1, 8, 8, 9, 0, 0, 4, 0] = multiSearchAllPositionsUTF8(materialize('етвхйчдобкчукхпщлмжпфайтфдоизщ'), ['амфшужперосрфщфлижйййжжжй', 'ххкбщшзлмщггтшцпсдйкдшйвхскемц', 'ергйплгпнглккшкарещимгапхг', '', 'хйчдо', 'вввбжовшзйбгуоиждепйабаххеквщижтйиухос', 'хпщл', 'жфуомщуххнедзхищнгхрквлпмзауеегз', 'етвхй', 'о', 'о', 'бк', 'цфецккифж', 'аизлокл', 'х', 'слщгеивлевбчнчбтшгфмжрфка']) from system.numbers limit 10; +select [0, 0, 1, 2, 0, 0, 14, 0, 3, 0, 0, 0] = multiSearchAllPositionsUTF8(materialize('йбемооабурнирйофшдгпснж'), ['гпфцл', 'нчбперпмцкввдчсщвзйрдфнф', '', 'бем', 'ч', 'жгш', 'йофшдгпснж', 'шасгафчг', 'емооабур', 'пиохцжццгппщчопзйлмуотз', 'рпдомнфвопхкшешйишумбацтл', 'нисиийфррбдоц']) from system.numbers limit 10; +select [1, 18, 12, 0, 0, 1, 1, 3, 7, 0, 0, 0] = multiSearchAllPositionsUTF8(materialize('гсщнфийтфзжцйпфбйалущ'), ['', 'алущ', 'цйпфбйал', 'цвбфцйвсвлицсчнргпцнр', 'х', 'гс', '', 'щн', 'й', 'дгйрвцщтп', 'уитвквоффвцхфишрлерйцувф', 'кфтййлпнзжчижвглзкижн']) from system.numbers limit 10; +select [14, 0, 5, 5, 0, 6, 0, 16, 0, 0] = multiSearchAllPositionsUTF8(materialize('ефщнйнуйебнснлрцгкеитбг'), ['лрцгкеитб', 'епклжфцпнфопе', 'йнуйебн', 'й', 'тлт', 'нуйебнснлрцгкеит', 'глечршгвотумкимтлм', 'цгк', 'щгйчой', 'звкцкчк']) from system.numbers limit 10; +select [0, 1, 18, 6, 0, 3, 0, 0, 25, 0, 0, 1, 16, 5, 1, 7, 0, 0] = multiSearchAllPositionsUTF8(materialize('пумгмцшмжштсшлачсжарерфиозиг'), ['чсуубфийемквмоотванухмбрфхжоест', '', 'жар', 'цшмжш', 'жртещтинтвпочнкдткцза', 'м', 'адзгтбаскщгдшжл', 'штфжшллезпджигщфлезфгзчайанхктицштйй', 'о', 'етадаарйсцейдошшцечхзлшлрртсрггцртспд', 'зтвшалрпфлщбцд', 'пу', 'ч', 'мцшмжштсшлачсж', '', 'шмжшт', 'ещтжшйтчзчаноемрбц', 'тевбусешйрйчшзо']) from system.numbers limit 10; +select [7, 10, 0, 0, 0, 0, 1, 12, 9, 2, 0, 0, 0, 4, 1, 1, 0, 6] = multiSearchAllPositionsUTF8(materialize('дупгвндвйжмаузнллнзл'), ['двйжмаузн', 'жмаузнлл', 'емйжркоблновцгпезрдавкбелцщста', 'щзкгм', 'лебрпцрсутшриащгайвц', 'лзнмл', 'д', 'ауз', 'йжмау', 'упгвндвйж', 'жщсббфвихг', 'всигсеигцбгаелтчкирлнзшзцжещнс', 'рмшиеиесрлщципщхкхтоцщчйоо', 'гвн', '', '', 'йадеоцлпшпвщзещзкхйрейопмажбб', 'ндв']) from system.numbers limit 10; +select [0, 0, 0, 8, 3, 10, 22, 0, 13, 11, 0, 1, 18, 0, 1, 0] = multiSearchAllPositionsUTF8(materialize('жшзфппавввслфцлнщшопкдшку'), ['саоткнхфодзаа', 'кйхванкзаисйбврщве', 'бчоуучватхфукчф', 'вввслфц', 'з', 'вслфцлнщшопк', 'дшк', 'из', 'фцл', 'с', 'зртмцтпощпщхк', 'жшзфппавввслфц', 'шопк', 'збтхрсдтатхпрзлхдооощифачхчфн', '', 'жщшийугз']) from system.numbers limit 10; +select [2, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 21, 0, 6, 0] = multiSearchAllPositionsUTF8(materialize('пчботухвгдчекмжндбоожш'), ['чботухвгдчекмжндб', 'от', 'гвсжжйлбтщчучнхсмдйни', 'жцжзмшлибшефуоуомпацбщщу', 'онхфлуцйлхтбмц', 'йтепжу', 'хтдрпвкщрли', 'аддайф', 'нхегщккбфедндоацкиз', 'йгкцзртфжгв', 'буелрщмхйохгибжндфшщвшрлдччрмфмс', 'цщцтзфнщ', 'уч', 'пчб', 'жш', 'пнфббтшйгхйрочнлксщпгвжтч', 'ухвг', 'лсцппузазщрйхймщбзоршощбзленхп']) from system.numbers limit 10; +select [0, 0, 4, 11, 0, 0, 0, 0, 0, 11, 2, 4, 6, 0, 0, 1, 2, 0, 0, 0] = multiSearchAllPositionsUTF8(materialize('тжрмчпваухрхуфбгнифгбопфт'), ['дпмгкекщлнемссаицщпащтиуцхкфчихтц', 'акйиуоатунтчф', 'мчпва', 'рхуфбгнифгб', 'кнаишж', 'пчвотенеафкухжцешбцхг', 'опеа', 'ушчадфтчхечеуркбтел', 'ашшптаударчжчмвалтдхкимищпф', 'рхуфбгниф', 'ж', 'мчпваухрхуфбгнифг', 'пваухрху', 'зргачбтцдахвймсбсврбндзтнущхвп', 'асбфцавбгуолг', 'тж', 'жрмчпваухрх', 'мрвзцгоб', 'чрцснчсдхтзжвнздзфцвхеилишдбж', 'кчт']) from system.numbers limit 10; +select [0, 2, 4, 0, 6, 0, 0, 0, 0, 19, 7, 1, 0, 1, 0, 0, 2, 10, 0, 1] = multiSearchAllPositionsUTF8(materialize('опрурпгабеарушиойцрхвбнсщ'), ['йошуоесдщеж', 'пр', 'урпгабеарушиой', 'хщиаршблашфажщметчзи', 'пгабеарушиойцрхвб', 'щцбдвц', 'еечрззвкожзсдурйщувмцйшихдц', 'офхачгсзашфзозрлба', 'айдфжджшжлрргмабапткбцпиизигдтс', 'рх', 'габ', '', 'цнкдбфчщшмчулврбцчакщвзхлазфа', '', 'екбтфпфилсаванхфкмчнпумехиищди', 'епвщхаклшомвцжбф', 'прурпгабе', 'еарушиойцрхв', 'црвтгрзтитц', 'опрурпг']) from system.numbers limit 10; +select [0, 10, 1, 0, 0, 0, 0, 0, 10, 0, 15, 2] = multiSearchAllPositionsUTF8(materialize('угпщлзчжшбзвууцшатпщцр'), ['цоуарцжсз', 'бз', '', 'пщфтзрч', 'лфуипмсдмнхнгйнтк', 'айжунцйбйцасчфдхй', 'щдфщлцптплсачв', 'грв', 'бзвууц', 'бумййшдшфашцгзфвчвзвтсувнжс', 'цшатпщ', 'гпщлзчжшб']) from system.numbers limit 10; +select [0, 15, 0, 1, 5, 0, 0, 5, 0, 0, 0, 1, 0, 0] = multiSearchAllPositionsUTF8(materialize('цнлеодлмдцдйснитвдчтхжизв'), ['ивкчсзшугоцжчохщцабл', 'итвдчт', 'кнх', '', 'одлм', 'ктшфзбщзцуймагсоукщщудвуфо', 'ххеаефудгчхр', 'одлмдцдйснитвдчт', 'умцлпкв', 'зщсокйтцзачщафвбповжгнлавсгйг', 'бкибм', '', 'охсоихнцчцшевчеележтука', 'фаийхгжнсгищгщц']) from system.numbers limit 10; +select [0, 0, 0, 2, 0, 0, 0, 0, 3, 2, 3, 6, 0, 0, 0, 12, 4, 1] = multiSearchAllPositionsUTF8(materialize('бгдбувдужщвоошлтчрбй'), ['щвбаиф', 'итчднесжкчжвпжйвл', 'мм', 'г', 'хктзгтзазфгщшфгбеулцмдмдбдпчзх', 'сфуак', 'злйфцощегзекщб', 'фшлдтолрщфзжчмих', 'дбувдужщ', 'гдб', 'дбувдужщ', 'в', 'лчищкечнжщисцичбнзшмулпмлп', 'чжцсгмгфвлиецахзнрбмщин', 'обпжвй', 'о', 'бувдужщвоош', '']) from system.numbers limit 10; +select [0, 2, 5, 3, 2, 0, 1, 0, 0, 4, 2, 0, 0, 0, 0, 0] = multiSearchAllPositionsUTF8(materialize('шсушлорзфжзудбсейенм'), ['чнзпбновтршеумбвщчлх', 'су', 'лорзфж', 'ушлорзфжзудб', 'сушлорзфжзудбсейенм', 'ткдрхфнб', '', 'пщд', 'чбдцмщ', 'шлорзфж', 'су', 'сккигркедчожжемгнайвйчтдмхлтти', 'мц', 'пхнхрхйцйсйбхчлктз', 'иафжстлйфцр', 'алщщлангнбнйхлшлфшйонщек']) from system.numbers limit 10; +select [12, 1, 0, 5, 0, 10, 1, 0, 7, 4, 0, 1, 12, 1, 1, 1, 0, 1, 15, 0] = multiSearchAllPositionsUTF8(materialize('ощзллчубоггцвжриуардрулащйпу'), ['цвжр', '', 'нмзкаиудзтиффззшзканжвулт', 'лчубоггцвжриуардрулащйпу', 'чтцлзшуижолибаоххвшихбфжйхетивп', 'ггцвжри', '', 'йдгнвс', 'у', 'л', 'зпщнжуойдлдвхокцжнзйсйзе', '', 'цв', '', '', '', 'ехлцзгвф', '', 'риу', 'уйжгтжноомонгщ']) from system.numbers limit 10; +select [0, 12, 13, 20, 0, 1, 0, 0, 3, 4] = multiSearchAllPositionsUTF8(materialize('цбкифйтшузажопнжщарбштвдерзтдш'), ['щлмлижтншчсмксгтнсврро', 'жопнжщарбштвд', 'опнжщарб', 'бштвдерзтд', 'пуфслейщбкжмпнш', 'ц', 'маве', 'кмйхойрдлшцхишдтищвйбцкщуигваещгтнхйц', 'кифй', 'и']) from system.numbers limit 10; +select [0, 6, 0, 0, 0, 8, 0, 3, 6, 0] = multiSearchAllPositionsUTF8(materialize('еачачгбмомоххкгнвштггпчудл'), ['ндзчфчвжтцщпхщуккбеф', 'г', 'рткнфвчтфннхлжфцкгштймгмейжй', 'йчннбщфкщф', 'лсртщиндшшкичзррущвдйвнаркмешерв', 'момоххк', 'рфафчмсизлрхзуа', 'ч', 'гбмомоххкгнвштг', 'валжпошзбгзлвевчнтз']) from system.numbers limit 10; +select [0, 0, 10, 0, 8, 13, 0, 0, 19, 15, 3, 1] = multiSearchAllPositionsUTF8(materialize('зокимчгхухшкшмтшцчффвззкалпва'), ['цалфжажщщширнрвтпвмщжннрагвойм', 'оукзрдцсадешжмз', 'хшкшмтшцч', 'ауилтсаомуркпаркбцркугм', 'хухшкшмтшцчффв', 'шмтшцч', 'зщгшпцхзгцншднпеусмтжбцшч', 'щлраащсйлщрд', 'ффвзз', 'тшцчффвззкалпв', 'кимчгхухшкш', '']) from system.numbers limit 10; +select [0, 0, 1, 0, 6, 0, 6, 0, 5, 0, 13, 0, 0, 6] = multiSearchAllPositionsUTF8(materialize('йдйндиибщекгтчбфйдредпхв'), ['тдршвтцихцичощнцницшдхйбогбчубие', 'акппакуцйсхцдххнотлгирввоу', '', 'улщвзхохблтксчтб', 'и', 'ибейзчшклепзриж', 'иибщекгт', 'шидббеухчпшусцнрз', 'диибщекгтчбфйд', 'дейуонечзйзлдкшщрцйбйклччсцуй', 'тч', 'лшицлшме', 'чйнжчоейасмрщегтхвйвеевбма', 'ии']) from system.numbers limit 10; +select [15, 3, 3, 2, 0, 11, 0, 0, 0, 2, 0, 4, 0, 1, 1, 3, 0, 0, 0, 0] = multiSearchAllPositionsUTF8(materialize('нхгбфчшджсвхлкхфвтдтлж'), ['хфвтдтлж', 'гбфчшд', 'гбфчш', 'х', 'ачдгбккжра', 'вхлк', 'мщчвещлвшдщпдиимлшрвнщнфсзгщм', 'жчоббгшзщлгеепщжкчецумегпйчт', 'жжд', 'хг', 'мтсааролшгмоуйфйгщгтрв', 'бфчшд', 'чейрбтофпшишгуасоодлакчдф', 'н', 'нхгбфч', 'гбф', 'гдежсх', 'йифжацзгжбклх', 'ещпзущпбаолплвевфиаибшйубйцсзгт', 'жезгчжатзтучжб']) from system.numbers limit 10; +select [0, 10, 1, 0, 0, 0, 4, 0, 13, 1, 12, 1, 0, 6] = multiSearchAllPositionsUTF8(materialize('акбдестрдшерунпвойзв'), ['нркчх', 'шерунп', '', 'зжвахслфббтоиоцрзаззасгнфчх', 'шлжмдг', 'тлйайвцжчсфтцйрчосмижт', 'дестрдшерунп', 'мвамйшцбдщпчлрщд', 'у', 'акбдестрд', 'рунпвойз', '', 'айздцоилсйшцфнчтхбн', 'с']) from system.numbers limit 10; +select [1, 0, 0, 3, 2, 1, 0, 0, 1, 10, 7, 0, 5, 0, 8, 4, 1, 0, 8, 1] = multiSearchAllPositionsUTF8(materialize('кйхпукаеуддтйччхлнпсуклрф'), ['кйхпукаеуддтйччхл', 'йатлрйкстлхфхз', 'фгихслшкж', 'хпу', 'йхпукаеу', '', 'сруакбфоа', 'оажуз', 'кйхпукаеуддтйччх', 'ддтйччхлн', 'аеуддтйччхл', 'тмажиойщтпуцглхфишеиф', 'укаеуддтйччхлнпс', 'ретифе', 'еуддтйччхлнпсуклр', 'пукаеуд', 'кйхпу', 'таппфггвджлцпжшпишбпциуохсцх', 'еуд', '']) from system.numbers limit 10; +select [2, 3, 3, 16, 5, 13, 0, 0, 0, 18, 0, 6, 0, 16, 0, 10, 3, 0] = multiSearchAllPositionsUTF8(materialize('плврйщовкзнбзлбжнсатрцщщучтйач'), ['лврйщовкзнбзлбж', 'врйщовкзнбзлбжнса', 'врйщовкзнбз', 'жнсатрцщщучтйач', 'йщовкзнбзлбжнсатрцщщуч', 'злбжнсатрцщ', 'ввтбрдт', 'нжйапойг', 'ннцппгперхйвдхоеожупйебочуежбвб', 'сатрцщщу', 'деваийтна', 'щ', 'вкжйгкужжгтевлцм', 'жнс', 'датг', 'знбзлбжнсатрцщщучтйа', 'врйщовк', 'оашмкгчдзщефм']) from system.numbers limit 10; +select [3, 1, 19, 1, 0, 0, 0, 0, 11, 3, 0, 0] = multiSearchAllPositionsUTF8(materialize('фчдеахвщжхутхрккхасвсхепщ'), ['деах', '', 'свсхепщ', '', 'анчнсржйоарвтщмрж', 'нечбтшщвркгд', 'вштчцгшж', 'з', 'у', 'деахвщ', 'ххкцжрвзкжзжчугнфцшуиаклтмц', 'фцкжшо']) from system.numbers limit 10; +select [16, 0, 0, 1, 8, 14, 0, 12, 12, 5, 0, 0, 16, 0, 11, 0] = multiSearchAllPositionsUTF8(materialize('щмнжчввбжцчммчшсрхйшбктш'), ['срхйшбк', 'йлзцнржчууочвселцхоучмщфчмнфос', 'еижлафатшхщгшейххжтубзвшпгзмзцод', '', 'бжцчммчшсрхй', 'чшсрхй', 'влемчммйтителщвзган', 'ммч', 'ммчшсрх', 'чввбж', 'нобзжучшошмбщешлхжфгдхлпнгпопип', 'цгт', 'срхйш', 'лкклмйжтеа', 'чммчшсрхйшбктш', 'йежффзнфтнжхфедгбоахпг']) from system.numbers limit 10; +select [1, 12, 9, 5, 1, 0, 6, 3, 0, 1] = multiSearchAllPositionsUTF8(materialize('кжнщсашдзитдмщцхуоебтфжл'), ['', 'дмщцхуоебт', 'зитдмщцхуоебт', 'сашдзитдмщцхуое', 'кжнщ', 'тхкйтшебчигбтмглшеужззоббдилмдм', 'ашдзитдмщцхуоебтф', 'нщсашдз', 'аузщшр', 'кжнщсашдз']) from system.numbers limit 10; +select [2, 0, 0, 0, 1, 0, 2, 0, 0, 17, 0, 8, 7, 14, 0, 0, 0, 7, 9, 23] = multiSearchAllPositionsUTF8(materialize('закуфгхчтшивзчжаппбжнтслщввущ'), ['а', 'днойвхфрммтж', 'внтлжрхзрпчбтуркшдатннглечг', 'ахиеушжтфкгцщтзхмжнрхдшт', '', 'тцчгрззржмдшйщфдцрбшжеичч', 'а', 'ктиечцпршнфнбчуолипацчдсосцнлфаццм', 'аусрлхдцегферуо', 'ппбжнт', 'жкццуосгвп', 'чтшивзчжаппб', 'хчтшивзчжаппб', 'чжаппбжнтслщ', 'ччрлфдмлу', 'щзршффбфчзо', 'ущуймшддннрхзийлваежщухч', 'хчтши', 'тшивзчжаппбжнтсл', 'слщв']) from system.numbers limit 10; +select [1, 1, 9, 2, 0, 3, 7, 0, 0, 19, 2, 2, 0, 8] = multiSearchAllPositionsUTF8(materialize('мвкзккупнокченйнзкшбдрай'), ['м', '', 'н', 'вкз', 'гдпертшйбртотунур', 'к', 'упнокченйнзкшбдр', 'нфшрг', 'нмждрйббдцлйемжпулдвкещхтжч', 'ш', 'вкзккупнокченйнзкшбдр', 'вкзккупнокченйнзкшбдрай', 'адииксвеавогтйторчтцвемвойшпгбнз', 'пнокченй']) from system.numbers limit 10; +select [15, 0, 0, 1, 12, 1, 0, 0, 1, 11, 0, 4, 0, 2] = multiSearchAllPositionsUTF8(materialize('отарлшпсабждфалпшножид'), ['лпшно', 'вт', 'лпжшосндутхорлиифжаакш', 'отарлшпсабждфалпшнож', 'дфал', '', 'бкцжучншжбгзжхщпзхирртнбийбтж', 'уцвцкшдзревпршурбсвйнемоетчс', '', 'ждфал', 'тлскхрнпмойчбцпфущфгф', 'рлшпсабж', 'нхнмк', 'тарлшпса']) from system.numbers limit 10; +select [0, 2, 0, 20, 0, 17, 18, 0, 1, 1, 21, 1, 0, 1, 6, 26] = multiSearchAllPositionsUTF8(materialize('ачйвцштвобижнзжнчбппйеабтцнйн'), ['сзхшзпетншйисщкшрвйшжуогцвбл', 'чйвцштво', 'евз', 'пй', 'хуждапрахитйажрищуллйзвчт', 'чбппйе', 'бппйеабтцнйн', 'схш', 'а', 'ачйвцштвобижнзжнчбпп', 'йеабтцнй', '', 'ег', '', 'штвобижнзжнчбпп', 'цн']) from system.numbers limit 10; +select [1, 0, 0, 3, 4, 12, 0, 9, 0, 12, 0, 0, 8, 0, 10, 3, 4, 1, 1, 9] = multiSearchAllPositionsUTF8(materialize('жмхоужежйуфцзеусеоднчкечфмемба'), ['', 'идосйксзнщйервосогф', 'тхмсйлвкул', 'хоужежйуф', 'оужежйуфцзеусеоднчкечфм', 'цзеусеоднчкеч', 'бецвдиубххвхйкажуурщщшщфбзххт', 'йуфцзеусеодн', 'мглкфтуеайсржисстнпкгебфцпа', 'цзеусео', 'уехцфучецчгшйиржтсмгхакчшввохочжпухс', 'дчвмсбткзталшбу', 'жйуфцзеусеоднчке', 'ччшщтдбпвчд', 'уфцзеусеоднчкечфмем', 'хоужежйуфцзеусеоднчкечф', 'оуже', '', 'жмхоужежйуфцзеу', 'й']) from system.numbers limit 10; +select [0, 0, 0, 3, 0, 0, 0, 0, 1, 0, 1, 0, 1, 2, 0, 0, 0, 6] = multiSearchAllPositionsUTF8(materialize('лшпцхкмтресзпзйвцфрз'), ['енрнцепацлщлблкццжсч', 'ецжужлуфаееоггрчохпчн', 'зхзнгасхебнаейбддсфб', 'пцхкмтресзпзйв', 'фчетгеодщтавиииухцундпнхлчте', 'шшгсдошкфлгдвкурбуохзчзучбжйк', 'мцщщцп', 'рх', '', 'зйошвщцгхбж', '', 'ввлпнамуцвлпзеух', '', 'шпцхкмтре', 'маабтруздрфйпзшлсжшгож', 'фдчптишмштссщшдшгх', 'оллохфпкаем', 'кмтресзпз']) from system.numbers limit 10; +select [2, 5, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 1, 1, 12, 0, 0, 0, 4, 8] = multiSearchAllPositionsUTF8(materialize('есипзсвшемлхчзмйрсфз'), ['с', 'з', 'пщчсмаиахппферзжбпвиибаачй', 'гтщкзоиежав', 'свшемлхчзм', 'шийанбке', 'зхе', 'авркудфаусзквкфффйцпзжщввенттб', 'ножцваушапиж', 'иизкежлщиафицкчщмалнпсащсднкс', 'вчмв', 'кщеурмуужжлшррце', '', '', 'х', 'алзебзпчеложихашжвхмйхрицн', 'тпзмумчшдпицпдшиаог', 'сулксфчоштаййзбзшкджббщшсей', 'пзсвшемлхчзм', 'ш']) from system.numbers limit 10; +select [0, 1, 2, 4, 0, 0, 14, 1, 13, 4, 0, 0, 1, 1] = multiSearchAllPositionsUTF8(materialize('сзиимонзффичвфжоеулсадону'), ['зфтшебтршхддмеесчд', '', 'зиимонзф', 'имон', 'езбдйшжичценлгршщшаумайаицй', 'птпщемтбмднацлг', 'фжоеулса', '', 'вфжоеулсадону', 'имонзфф', 'йщвдфдиркважгйджгжашарчучйххйднпт', 'дй', '', '']) from system.numbers limit 10; +select [12, 0, 24, 0, 9, 0, 1, 0, 0, 0] = multiSearchAllPositionsUTF8(materialize('ижсщщрзжфнгццпзкфбвезгбохлж'), ['ццпзкфбвез', 'ацррвхоптаоснулнжкщжел', 'охлж', 'тнсхбпшщнб', 'фнг', 'урйвг', '', 'цохс', 'щбйрйкжчмйзачуефч', 'афа']) from system.numbers limit 10; +select [9, 0, 0, 0, 1, 0, 7, 7, 0, 0, 1, 0, 7, 0, 0, 8, 0, 3, 0, 0] = multiSearchAllPositionsUTF8(materialize('рерфвирачйнашхрмцебфдйааеммд'), ['чйнашхрмцебфдйааеммд', 'сжщзснвкущлжплцзлизаомдизцнжлмййбохрцч', 'еппбжджмримфчйеаолидпцруоовх', 'едтжкоийггснехшсчйлвфбкцжжрчтш', '', 'пжахфднхсотй', 'ра', 'рач', 'вчримуцнхбкуйжрвфиугзфсзг', 'кщфехрххциаашщсифвашгйцвхевцщнйахтбпжщ', '', 'ртщиобчжстовйчфабалзц', 'рачйнашхрмцебфдйаае', 'ощгжосччфкуг', 'гехвжнщжссидмрфчйтнепдсртбажм', 'а', 'ицжлсрсиатевбвнжрдмзцувввтзцфтвгвш', 'рф', 'прсмлча', 'ндлхшцааурмзфгверуфниац']) from system.numbers limit 10; +select [2, 14, 10, 0, 6, 15, 1, 0, 0, 4, 5, 17, 0, 0, 3, 0, 3, 0, 9, 0] = multiSearchAllPositionsUTF8(materialize('влфощсшкщумчллфшшвбшинфппкчуи'), ['лфощ', 'лфшшвбшинфпп', 'умчллфшшвбшинф', 'слмтнг', 'сшкщумчллфшшвбшинф', 'фшшвб', '', 'рчфбчййсффнодцтнтнбцмолф', 'щфнщокхжккшкудлцжрлжкнп', 'ощ', 'щсшкщумчлл', 'швбшинфппкч', 'септзкщотишсехийлоцчапщжшжсфмщхсацг', 'нт', 'фощсшкщумчллфшшвбшинфп', 'нщпдш', 'фощс', 'мивсмча', 'щумч', 'щчйнткжпмгавфтйтибпхх']) from system.numbers limit 10; +select [0, 10, 0, 0, 0, 0, 0, 3, 0, 0, 0, 2, 0, 11, 0, 0] = multiSearchAllPositionsUTF8(materialize('еаиалмзхцгфунфеагшчцд'), ['йнш', 'гфун', 'жлйудмхнсвфхсуедспщбтутс', 'елмуийгдйучшфлтхцппамфклйг', 'евйдецц', 'пчтфцоучфбсйщпвдацмчриуцжлтжк', 'нстмпумчспцвцмахб', 'иалмз', 'зифчп', 'чогфщимоопт', 'фдйблзеп', 'аиа', 'щугмзужзлйдктш', 'фунфеагшч', 'нйхшмсгцфжчхжвхгдхцуппдц', 'асмвмтнрейшгардллмсрзгзфйи']) from system.numbers limit 10; +select [23, 0, 8, 0, 0, 0, 0, 0, 0, 4, 0, 5, 7, 1, 9, 4] = multiSearchAllPositionsUTF8(materialize('зузйфзлхходфрхгтбпржшрктпйхеоп'), ['ктпйхео', 'лжитуддикчсмкглдфнзцроцбзтсугпвмхзллжж', 'х', 'меуфтено', 'фтдшбшрпоцедктсийка', 'кхтоомтбчвеонксабшйптаихжбтирпзшймчемжим', 'чиаущлрдкухцрдумсвивпафгмр', 'фрнпродв', 'тдгтишхйсашвмдгкчбмшн', 'йфзлхходфрхгтбпржшр', 'бежшлрйврзмумеуооплкицхлйажвцчнчсеакм', 'ф', 'лхходфрхгтб', '', 'ходфрхгтбпржшр', 'й']) from system.numbers limit 10; +select [0, 0, 0, 1, 0, 1, 22, 1, 0, 0, 0, 0, 18, 1, 0, 0, 0, 1] = multiSearchAllPositionsUTF8(materialize('чфгвчхчпщазтгмбнплдгщикойчднж'), ['мштцгтмблаезочкхзвхгрбпкбмзмтбе', 'канбжгсшхшз', 'кзинвщйччажацзйнсанкнщ', 'чфгвчхчпщазтгмбнп', 'етйцгтбнщзнржнйхж', '', 'ик', '', 'еизщвпрохдгхир', 'псумйгшфбвгщдмхжтц', 'слмжопинйхнштх', 'йшралцицммбщлквмгхцввизопнт', 'л', 'чфгвчхчпщазтгмбнплдгщ', 'пбзмхжнпгикиищжтшботкцеолчцгхпбвхи', 'хзкцгрмшгхпхуоцгоудойнжлсоййосссмрткцес', 'ажуофйпщратдйцбржжлжнжащцикжиа', '']) from system.numbers limit 10; +select [6, 0, 2, 5, 2, 9, 10, 0, 0, 4, 0, 6, 3, 2] = multiSearchAllPositionsUTF8(materialize('ишогпсисжашфшлйичлба'), ['сисжашфшлй', 'пднещбгзпмшепкфосовбеге', 'шогп', 'пс', 'шогпси', 'жаш', 'аш', 'деисмжатуклдшфлщчубфс', 'грмквкщзур', 'гпсис', 'кйпкбцмисчхдмшбу', 'сисжашф', 'о', 'шо']) from system.numbers limit 10; +select [8, 15, 13, 0, 1, 2, 5, 2, 9, 0, 0, 0] = multiSearchAllPositionsUTF8(materialize('нсчщчвсанпрлисблснокзагансхм'), ['анпрлисблснокзагансхм', 'блснокз', 'исб', 'дрмгвснпл', '', 'счщчвса', 'чвсанпрлисблснокзагансх', 'счщчвсанпрлис', 'нпрли', 'пциишуецнймуодасмжсойглретиефо', 'фхимщвкехшлг', 'слщмаимшжчфхзпрцмхшуниврлуйлжмфжц']) from system.numbers limit 10; +select [0, 5, 0, 0, 14, 0, 12, 0, 2, 3, 0, 3, 21, 5] = multiSearchAllPositionsUTF8(materialize('хажуижанндвблищдтлорпзчфзк'), ['щуфхл', 'и', 'фцежлакчннуувпаму', 'щесщжрчиктфсмтжнхекзфс', 'ищдтлорпзчф', 'дееичч', 'блищ', 'гиефгйзбдвишхбкбнфпкддмбтзиутч', 'ажуижа', 'жуижанндвблищдтлорпзчфзк', 'чщщдзетвщтччмудвзчгг', 'ж', 'пзчфз', 'ижанн']) from system.numbers limit 10; +select [0, 0, 0, 9, 15, 0, 0, 0, 1, 3, 0, 0, 1, 0, 10, 0, 4, 0, 0, 7] = multiSearchAllPositionsUTF8(materialize('россроапцмцагвиигнозхзчотус'), ['ошажбчвхсншсвйршсашкм', 'пфдчпдчдмауцгкйдажрйефапввшжлшгд', 'иеаочутввжмемчушлуч', 'цмцагвиигно', 'ииг', 'ммпжщожфйкакбущчирзоммагеиучнщмтвгихк', 'укррхбпезбжууеипрзжсло', 'ншопзжфзббилйбувгпшшиохврнфчч', '', 'ссроап', 'лийщфшдн', 'йчкбцциснгначдцйчпа', 'россроапцмцагвииг', 'кштндцтсшорввжсфщчмщчжфжквзралнивчзт', 'мца', 'нбтзетфтздцао', 'сроа', 'мщсфие', 'дткодбошенищйтрподублжскенлдик', 'апцмцагвиигноз']) from system.numbers limit 10; +select [16, 0, 0, 2, 1, 1, 0, 1, 9, 0, 0, 3] = multiSearchAllPositionsUTF8(materialize('тйсдйилфзчфплсджбарйиолцус'), ['жбарйиолцу', 'цназщжждефлбрджктеглщпунйжддгпммк', 'хгжоашцшсзкеазуцесудифчнощр', 'йс', '', 'тйсдйилфзчфп', 'ивфсплшвслфмлтххжчсстзл', '', 'зчфплсдж', 'йртопзлодбехрфижчдцйс', 'цлащцкенмшеоерееиуноп', 'с']) from system.numbers limit 10; +select [3, 2, 1, 1, 0, 0, 0, 14, 6, 0] = multiSearchAllPositionsUTF8(materialize('нсцннйрмщфбшщховвццбдеишиохл'), ['цннйр', 'сцннйрм', 'н', 'нс', 'двтфхйзгеиеиауимбчхмщрцутф', 'пчтмшйцзсфщзшгнхщсутфжтлпаввфгххв', 'лшмусе', 'ховвццбд', 'йрмщфбшщховвццбдеи', 'гндруущрфзсфжикшзцжбил']) from system.numbers limit 10; +select [0, 18, 0, 1, 2, 0, 0, 0, 1, 7, 10, 0, 1, 0, 2, 0, 0, 18] = multiSearchAllPositionsUTF8(materialize('щидмфрсготсгхбомлмущлаф'), ['тлтфхпмфдлуоцгчскусфжчкфцхдухм', 'мущла', 'емлвзузхгндгафги', '', 'идмфрсготсгхбомлмущла', 'зфаргзлщолисцфдщсеайапибд', 'кдхоорхзжтсйимкггйлжни', 'лчгупсзждплаблаеклсвчвгвдмхклщк', 'щидмфр', 'сготсгхбомлму', 'тсгхбомлмущла', 'хсзафйлкчлди', '', 'й', 'ид', 'щлйпмздйхфзайсщсасейлфцгхфк', 'шдщчбшжбмййзеормнрноейй', 'мущ']) from system.numbers limit 10; +select [0, 13, 0, 0, 1, 0, 7, 7, 8, 0, 2, 0, 3, 0, 0, 13] = multiSearchAllPositionsUTF8(materialize('трцмлщввадлжввзчфипп'), ['хшзйийфжмдпуигсбтглй', 'ввзчфи', 'нсцчцгзегммтсшбатщзузпкшрг', 'гувйддежзфилйтш', '', 'хгзечиа', 'ввадлжввз', 'ввадлжввзчфи', 'ва', 'щтшсамклегш', 'рцмлщ', 'учзмиерфбтцучйдглбщсз', 'цмлщввадлжввзчфи', 'орйжччцнаррбоабцжзйлл', 'квпжматпцсхзузхвмйч', 'ввзчфип']) from system.numbers limit 10; +select [0, 1, 1, 0, 11, 4, 1, 2, 0, 0] = multiSearchAllPositionsUTF8(materialize('инкщблбвнскцдндбмсщщш'), ['жхрбсусахрфкафоилмецчебржкписуз', 'инкщблбвнс', '', 'зисгжфлашймлджинаоджруй', 'кцднд', 'щблбвнскцдндбмсщщ', 'инкщблбвнс', 'н', 'зб', 'фчпупшйфшбдфенгитатхч']) from system.numbers limit 10; +select [6, 0, 4, 20, 1, 0, 5, 0, 1, 0] = multiSearchAllPositionsUTF8(materialize('рзтецуйхлоорйхдбжашнларнцт'), ['у', 'бпгййекцчглпдвсцсещщкакцзтцбччввл', 'ецуйхлоо', 'нлар', 'рз', 'ккнжзшекфирфгсгбрнвжчл', 'цуйхлоорйхдбжашн', 'йнучгрчдлйвводт', 'рзте', 'нткрввтубчлщк']) from system.numbers limit 10; + +select [1, 1, 0, 0, 1, 0, 0, 3, 3, 3, 1, 0, 8, 0, 8, 1, 0, 1] = multiSearchAllPositionsCaseInsensitive(materialize('OTMMDcziXMLglehgkklbcGeAZkkdh'), ['', 'OTmmDCZiX', 'SfwUmhcGTvdYgxlzsBJpikOxVrg', 'ngqLQNIkqwguAHyqA', '', 'VVZPhzGizPnKJAkRPbosoNGJTeO', 'YHpLYTVkHnhTxMODfABor', 'mMdcZi', 'MmdCZI', 'MMdCZixmlg', '', 'hgaQHHHkIQRpPjv', 'ixMLgLeHgkkL', 'uKozJxZBorYWjrx', 'i', '', 'WSOYdEKatHkWiCtlwsCbKRnXuKcLggbkBxoq', '']) from system.numbers limit 10; +select [4, 15, 0, 0, 0, 0, 5, 0, 5, 1, 0, 1, 13, 0, 0, 3] = multiSearchAllPositionsCaseInsensitive(materialize('VcrBhHvWSFXnSEdYCYpU'), ['bhhVwSfXnSEd', 'DycyP', 'kEbKocUxLxmIAFQDiUNoAmJd', 'bsOjljbyCEcedqL', 'uJZxIXwICFBPDlUPRyDHMmTxv', 'BCIPfyArrdtv', 'hHv', 'eEMkLteHsuwsxkJKG', 'hHVWsFxNseDy', '', 'HsFlleAQfyVVCoOSLQqTNTaA', '', 'sEDY', 'UMCKQJY', 'j', 'rBhHvw']) from system.numbers limit 10; +select [1, 1, 0, 0, 1, 0, 0, 0, 0, 0] = multiSearchAllPositionsCaseInsensitive(materialize('wZyCLyiWnNNdNAPWeGSQZcdqk'), ['w', '', 'vlgiXgFTplwqRbnwBumAjHvQuM', 'QoIRVKDHMlapLNiIZXvwYxluUivjY', 'WZY', 'gAFpUfPDAwgzARCIMrtbZUsNcR', 'egkLWqqdNiETeETsMG', 'dzSlJaoHKlQmENIboow', 'vPNBhcaIfsgLH', 'mlWPTCBDVTdKHxlvIUVcJXBrmTcJokAls']) from system.numbers limit 10; +select [0, 10, 0, 1, 7, 1, 6, 1, 8, 0] = multiSearchAllPositionsCaseInsensitive(materialize('pqliUxqpRcOOKMjtrZSEsdW'), ['YhskuppNFdWaTaZo', 'Coo', 'mTEADzHXPeSMCQaYbKpikXBqcfIGKs', 'PQLiUxq', 'qpRCoOK', 'PQLIu', 'XQPrcoOK', '', 'pR', 'cTmgRtcSdRIklNQVcGZthwfarLtAYh']) from system.numbers limit 10; +select [16, 1, 1, 1, 1, 4, 17, 0, 0, 0, 1, 0, 0, 0, 20, 0] = multiSearchAllPositionsCaseInsensitive(materialize('kJyseeDFCeUWoqMfubYqJqWA'), ['fub', 'kJY', '', '', 'Kj', 's', 'uBYQJq', 'sUqCmHUZIBtZPswObXSrYCwrdxdznM', 'mtZDCJENYuikJnCcJfRcSCDYDPXU', 'IDXjRjHhmjqXmCOlQ', '', 'jiEwAxIsJDu', 'YXqcEKbHxlgUliIALorSKDMlGGWeCO', 'OstKrLpYuASEUrIlIuHIRdwLr', 'qJq', 'tnmvMTFvjsW']) from system.numbers limit 10; +select [11, 3, 1, 0, 9, 0, 0, 0, 0, 8, 3, 0] = multiSearchAllPositionsCaseInsensitive(materialize('EBSPtFpDaCIydASuyreS'), ['iyD', 'sptfpdAciyDAsuyR', 'EbS', 'IJlqfAcPMTUsTFXkvmtsma', 'AcIYda', 'fbWuKoCaCpRMddUr', 'srlRzZKeOQGGLtTLOwylLNpVM', 'ZeIgfTFxUyNwDkbnpeiPxQumD', 'j', 'daciydA', 'sp', 'dyGFtyfnngIIbcCRQzphoqIgIMt']) from system.numbers limit 10; +select [6, 0, 0, 0, 10, 0, 1, 4, 0, 15, 0, 2, 2, 6] = multiSearchAllPositionsCaseInsensitive(materialize('QvlLEEsgpydemRZAZcYbqPZHx'), ['eSgpYDEMRzAzcyBQPzH', 'NUabuIKDlDxoPXoZOKbUMdioqwQjQAiArv', 'pRFrIAGTrggEOBBxFmnZKRPtsUHEMUEg', 'CDvyjef', 'YdEMrzaZc', 'BO', '', 'leEsgPyDEmRzaZCYBqPz', 'EzcTkEbqVXaVKXNuoxqNWHM', 'Z', 'cuuHNcHCcLGb', 'V', 'vllEes', 'eS']) from system.numbers limit 10; +select [0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 5, 7, 5, 0, 11, 1] = multiSearchAllPositionsCaseInsensitive(materialize('eiCZvPdGJSmwxMIrZvEzfYFOFJmV'), ['lSydrmJDeXDYHGFFiFOOJGyCbCCDbLzbSbub', 'ewsAVflvcTBQFtvWBwuZOJKkrUArIg', 'fpEkBWaBkRWypFWtMz', 'YatSURyNtcSuerWWlTBSdBNClO', 'YO', 'CZvpdg', 'uoH', 'gtGwQSVqSJDVROmsBIxjuVNfrQnxDhWGXLBH', 'IKNs', 'HElLuRMlsRgINaNp', 'V', 'DGjsMW', 'vPDgJSmW', 'SGCwNiAmNfHSwLGZkRYEqrxBTaDRAWcyHZYzn', 'mWXMiRZvezfYf', '']) from system.numbers limit 10; +select [23, 1, 0, 17, 0, 0, 9, 3, 0, 2] = multiSearchAllPositionsCaseInsensitive(materialize('BizUwoENfLxIIYVDflhOaxyPJw'), ['yPJ', '', 'gExRSJWtZwOptFTkNlBGuxyQrAu', 'FLH', 'hCqo', 'oVGcArersxMUCNewhTMmjpyZYAIU', 'FlXIiYVdflHoAX', 'ZuWOe', 'bhfAfNdgEAtGdHylxkjgvU', 'IZUWo']) from system.numbers limit 10; +select [0, 9, 0, 0, 0, 0, 1, 0, 0, 1, 3, 0, 13, 0, 3, 5] = multiSearchAllPositionsCaseInsensitive(materialize('loKxfFSIAjbRcguvSnCdTdyk'), ['UWLIDIermdFaQVqEsdpPpAJ', 'ajBrcg', 'xmDmuYoRpGu', 'wlNjlKhVzpC', 'MxIjTspHAQCDbGrIdepFmLHgQzfO', 'FybQUvFFJwMxpVQRrsKSNHfKyyf', '', 'vBWzlOChNgEf', 'DiCssjczvdDYZVXdCfdSDrWaxmgpPXDiD', '', 'kxFFSIAjBRCGUVSNcD', 'LrPRUqeehMZapsyNJdu', 'cGuVSNcdTdy', 'NmZpHGkBIHVSoOcj', 'KxffSIAjBr', 'ffsIaJB']) from system.numbers limit 10; +select [14, 0, 11, 0, 10, 0, 0, 0, 13, 1, 2, 11, 5, 0] = multiSearchAllPositionsCaseInsensitive(materialize('uijOrdZfWXamCseueEbq'), ['sE', 'VV', 'AmcsEu', 'fUNjxmUKgnDLHbbezdTOzyLaknQ', 'XAmCsE', 'HqprIpxIcOTkDIKcVK', 'NbmirQlNsTHnAVKlF', 'VVDNOxFKSnQGKPsTqgtwLhZnIPkL', 'c', '', 'IJ', 'aM', 'rDzF', 'YFwP']) from system.numbers limit 10; +select [0, 8, 17, 0, 1, 0, 0, 0, 0, 0, 5, 0] = multiSearchAllPositionsCaseInsensitive(materialize('PzIxktujxHZsaDlwSGQPgvA'), ['zrYlZdnUxlPrVJJeZEASwdCHlNEm', 'jxhZS', 'sGQPgV', 'MZMChmRBgsxhdgspUhALoxmrkZVp', 'pzIxktuJxHzsADlw', 'xavwOAibQuoKg', 'vuuETOrWLBNLhrMeWLgGQpeFPdcWmWu', 'TZrAgmdorqZIdudhyCMypHYKFO', 'ztcCyGxRKrcUTv', 'OUvwdMZrcZuwGtjuEBeGU', 'k', 'rFTpnfGIOCfwktWnyOMeXQZelkYwqZ']) from system.numbers limit 10; +select [3, 1, 4, 1, 0, 17, 13, 0, 0, 0, 0, 0, 8, 0] = multiSearchAllPositionsCaseInsensitive(materialize('pUOaQLUvgmqvxaMsfJpud'), ['OaqLUvGm', '', 'aQ', '', 'VajqJSlkmQTOYcedjiwZwqNH', 'f', 'xaMsfj', 'CirvGMezpiIoacBGAGQhTJyr', 'vucKngiFjTlzltKHexFVFuUlVbey', 'ppalHtIYycBCEjsgsXbFeecpkQMNr', 'nEgIYVoGkhTsFgBUSHJvIcYCYbuOBP', 'efjBVRVzknGrikGHxExlFEtYf', 'v', 'QgRBCaGlwNYWRslDylOrfPxZxAOF']) from system.numbers limit 10; +select [14, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 20, 5, 0, 4, 0] = multiSearchAllPositionsCaseInsensitive(materialize('WZNWOCjFkCAAzIptkUtyPCyC'), ['iPTkuT', 'BngeNlFbKymzMYmNPfV', 'XKEjbLtADFMqS', 'dbRQKJGSFhzljAiZV', 'wZnwoCjFKCAAzIPTKuTYpc', 'yBaUvSSGOEL', 'iEYopROOYKxBwPdCgbPNPAsMwVksHgagnO', 'TljXPJVebHqrnhSiTGwpMaNeKy', 'wzNWocjF', 'bLxLrZnOCeIfxkfZEOcqDteUvc', 'CtHYpAZDANEv', '', 'XMAMpGYMiOb', 'y', 'o', 'floswnnFjXDTxantSvDYPSnaORL', 'WOcjFkcAaZIp', 'buqBHbZsLDnCUDhLdgd']) from system.numbers limit 10; +select [0, 20, 14, 0, 2, 0, 1, 14, 0, 0, 0, 1, 0, 26, 0, 0] = multiSearchAllPositionsCaseInsensitive(materialize('XJMggEHaxfddDadtwKMCcPsMlSFVJ'), ['NzbUAZvCsnRnuzTglTsoT', 'ccP', 'ADTwKmc', 'JaUzcvWHMotuEMUtjsTfJzrsXqKf', 'jMGgEHaXfdddAdTWKMCcpsM', 'SMnb', '', 'AdTWkMccPSMlsfv', 'fVjPVafkp', 'goqsYAFqhhnCkGwhg', 'CNHNPZHZreFwhRMr', '', 'vcimNhmdbtoiCgVzNuvdgZG', 'sfvJ', 'AqKmroxmRMSFAKjfhwrzxmNSSjMHxKow', 'Xhub']) from system.numbers limit 10; +select [0, 0, 7, 0, 1, 1, 0, 0, 13, 0, 1, 1, 5, 0] = multiSearchAllPositionsCaseInsensitive(materialize('VQuEWycGbGcTcCCvWkujgdoWjKgVYy'), ['UevGaXmEAtBdWsPhBfqp', 'aQOrNMPmoVGSu', 'c', 'TMhzvbNJCaxtGNUgRBmTFEqgNBIBpSJ', '', 'vq', 'pVNUTCqXr', 'QSvkansbdPbvVmQpcQXDk', 'cCCvwkUjgdOWjKgVYy', 'EtCGaEzsSbJ', 'V', '', 'WycgBgCTCcCvwkujgdoWJKgv', 'xPBJqKrZbZHJawYvPxgqrgxPN']) from system.numbers limit 10; +select [4, 1, 0, 0, 0, 0, 0, 0, 0, 18] = multiSearchAllPositionsCaseInsensitive(materialize('LODBfQsqxfeNuoGtzvrUMRVWNKUKKs'), ['Bf', 'lOdbfQs', 'ZDSDfKXABsFiZRwsebyU', 'DT', 'GEUukPEwWZ', 'GNSbrGYqEDWNNCFRYokZbZEzGzc', 'kYCF', 'Kh', 'jRMxqdmGYpTkePeReXJNdnxagceitMJlmbbro', 'VrumrvWnKU']) from system.numbers limit 10; +select [1, 1, 3, 1, 10, 0, 9, 2, 2, 0, 0, 0, 0, 0, 8, 0, 1, 11, 8, 0] = multiSearchAllPositionsCaseInsensitive(materialize('lStPVtsQypFlZQoQhCuP'), ['', '', 'tpV', 'L', 'PF', 'pGPggwbkQMZandXugTpUorlPOubk', 'yPFlz', 'sTPVTsQyPfLzQOqhCU', 'StPVtSq', 'cbCxBjAfJXYgueqMFNIoSguFm', 'AosIZKMPduRfumDZ', 'AGcNTHObH', 'oPaGpsQ', 'kwQCczyY', 'q', 'HHUYdzGAzVJyn', '', 'fLZQoqHcUp', 'q', 'SSonzfqLVwIGzdHtj']) from system.numbers limit 10; +select [0, 1, 2, 0, 0, 0, 13, 1, 27, 1, 0, 1, 3, 1, 0, 1, 3, 0] = multiSearchAllPositionsCaseInsensitive(materialize('NhKJtvBUddKWpseWwRiMyBsTWmlk'), ['toBjODDZoRAjFeppAdsne', '', 'HKjTvBu', 'QpFOZJzUHHQAExAqkdoBpSbXzPnTzuPd', 'gE', 'hLmXhcEOwCkatUrLGuEIJRkjATPlqBjKPOV', 'Ps', 'NH', 'l', '', 'aSZiWpmNKfglqAbMZpEwZKmIVNjyJTtDianY', 'NhKJTvBUDDkwpS', 'KJtvbUDDKWPSewwrimYbstwm', 'NHKJTvbudDKwpSEwwR', 'hmMeWEpksVAaXd', 'NHkJTvBUDd', 'kjTvbudd', 'kmwUzfEpWSIWkEylDeRPpJDGb']) from system.numbers limit 10; +select [0, 5, 0, 0, 0, 1, 1, 15, 2, 3, 4, 5] = multiSearchAllPositionsCaseInsensitive(materialize('NAfMyPcNINKcgsShJMascJunjJva'), ['ftHhHaJoHcALmFYVvNaazowvQlgxwqdTBkIF', 'yp', 'zDEdjPPkAdtkBqgLpBfCtsepRZScuQKbyxeYP', 'yPPTvdFcwNsUSeqdAUGySOGVIhxsJhMkZRGI', 'JQEqJOlnSSam', 'nAFmy', '', 'sHJmaScjUnJj', 'afmY', 'FmYpcnINKCg', 'MYPCniNkcgSS', 'YPCNiNkCgSsHjmasCJuNjJ']) from system.numbers limit 10; +select [0, 0, 6, 3, 2, 0, 8, 2, 2, 10, 0, 0, 14, 0, 0, 3] = multiSearchAllPositionsCaseInsensitive(materialize('hgpZVERvggiLOpjMJhgUhpBKaN'), ['Nr', 'jMcd', 'e', 'PZVeRvggiLOPjmjh', 'GpZVe', 'cVbWQeTQGhYcWEANtAiihYzVGUoHKH', 'VGgilOPj', 'GPZVervgGiLopjmjHGuHp', 'GP', 'gil', 'fzwDPTewvwuCvpxNZDi', 'gLLycXDitSXUZTgwyeQgMSyC', 'PJmjh', 'bTQdrFiMiBtYBcEnYbKlqpTvGLmo', 'ggHxiDatVcGTiMogkIWDxmNnKyVDJth', 'pzv']) from system.numbers limit 10; +select [7, 1, 9, 3, 0, 0, 2, 0, 1, 11] = multiSearchAllPositionsCaseInsensitive(materialize('xUHVawrEvgeYyUZGmGZejClfinvNS'), ['RevGeYyuz', 'XUHvAWrev', 'Vg', 'hvawR', 'eRQbWyincvqjohEcYHMwmDbjU', 'nuQCxaoxEdadhptAhZMxkZl', 'UhVAwREvGEy', 'lHtwTFqlcQcoOAkujHSaj', '', 'eYYUzgMgzEjCLfIn']) from system.numbers limit 10; +select [0, 0, 8, 5, 9, 1, 0, 4, 12, 6, 4, 0, 0, 12] = multiSearchAllPositionsCaseInsensitive(materialize('DbtStWzfvScJMGVPQEGkGFoS'), ['CSjYiEgihaqQDxZsOiSDCWXPrBdiVg', 'aQukOYRCSLiildgifpuUXvepbXuAXnYMyk', 'fvsCjmgv', 'TWZFV', 'VscjMgVpQ', 'dBtSTwZfVsCjmGVP', 'wqpMklzJiEvqRFnZYMfd', 'StwZfVScJ', 'j', 'wzfVsCjmGV', 'STWZfVS', 'kdrDcqSnKFvKGAcsjcAPEwUUGWxh', 'UtrcmrgonvUlLnzWXvZI', 'jMgvP']) from system.numbers limit 10; +select [0, 0, 0, 0, 7, 3, 0, 11, 1, 10, 0, 0, 7, 1, 4, 0, 17, 3, 15, 0] = multiSearchAllPositionsCaseInsensitive(materialize('YSBdcQkWhYJMtqdEXFoLfDmSFeQrf'), ['TnclcrBJjLBtkdVtecaZQTUZjkXBC', 'SPwzygXYMrxKzdmBRTbppBQSvDADMUIWSEpVI', 'QnMXyFwUouXBoCGLtbBPDSxyaLTcjLcf', 'dOwcYyLWtJEhlXxiQLRYQBcU', 'KWhYjMtqdEXFo', 'BD', 'nnPsgvdYUIhjaMRVcbpPGWOgVjJxoUsliZi', 'j', '', 'YjmtQdeXF', 'peeOAjH', 'agVscUvPQNDwxyFfXpuUVPJZOjpSBv', 'kWh', '', 'dcQKWHYjmTQD', 'qjWSZOgiTCJyEvXYqaPFqbwvrwadJsGVTOhD', 'xfoL', 'b', 'DeXf', 'HyBR']) from system.numbers limit 10; +select [4, 0, 0, 13, 1, 0, 3, 13, 16, 1, 0, 1, 16, 1, 12, 0, 0, 0] = multiSearchAllPositionsCaseInsensitive(materialize('SoVPMQNqmaTGuzYxDvZvapSuPiaP'), ['pMqNQMAtGuzYxDVz', 'TEJtgLhyredMnIpoZfmWvNwpkxnm', 'XRWmsfWVOCHhk', 'u', '', 'HvkXtxFdhVIyccpzFFSL', 'VPM', 'uZyXDVzvAPsUpIaP', 'xDvzV', 'sovpmqNQmATguZYx', 'wEG', 'soVPmQnQ', 'XDVzV', '', 'GUZyXdvzva', 'FetUahWwGtwEpVdlJCJntL', 'B', 'lSCUttZM']) from system.numbers limit 10; +select [1, 0, 1, 2, 15, 0, 0, 0, 1, 0] = multiSearchAllPositionsCaseInsensitive(materialize('zFWmqRMtsDjSeWBSFoqvWsrV'), ['', 'GItrPyYRBwNUqwSaUBpbHJ', '', 'f', 'BsfOQvWsR', 'JgvsMUZzWaddD', 'wxRECkgoCBPjSMRorZpBwuOQL', 'xHKLLxUoWexAM', '', 'YlckoSedfStmFOumjm']) from system.numbers limit 10; +select [11, 1, 1, 1, 0, 0, 1, 0, 4, 0, 0, 0, 1, 0, 5, 8] = multiSearchAllPositionsCaseInsensitive(materialize('THBuPkHbMokPQgchYfBFFXme'), ['KpqGchyfBF', '', '', 'TH', 'NjnC', 'ssbzgYTybNDbtuwJnvCCM', 'tHbupKHBMOkPQgcHy', 'RpOBhT', 'uPKHbMoKpq', 'oNQLkpSKwocBuPglKvciSjttK', 'TaCqLisKvOjznOxnTuZe', 'HmQJhFyZrcfeWbXVXsnqpcgRlg', 'tHB', 'gkFGbYje', 'pkhbMokPq', 'Bm']) from system.numbers limit 10; +select [7, 10, 0, 0, 9, 0, 0, 3, 0, 10] = multiSearchAllPositionsCaseInsensitive(materialize('ESKeuHuVsDbiNtvxUrfPFjxblv'), ['uvsDBiNtV', 'DbInTvxu', 'YcLzbvwQghvrCtCGTWVuosE', 'cGMNo', 'SDb', 'nFIRTLImfrLpxsVFMBJKHBKdSeBy', 'EUSiPjqCXVOFOJkGnKYdrpuxzlbKizCURgQ', 'KeUHU', 'gStFdxQlrDcUEbOlhLjdtQlddJ', 'DBInTVx']) from system.numbers limit 10; +select [1, 0, 2, 18, 1, 3, 15, 8, 0, 0, 1, 3, 0, 23, 2, 0, 8, 0] = multiSearchAllPositionsCaseInsensitive(materialize('TzczIDSFtrkjCmDQyHxSlvYTNVKjMT'), ['', 'AmIFsYdYFaIYObkyiXtxgvnwMVZxLNlmytkSqAyb', 'ZcZI', 'HXsLVYTnvKjm', '', 'CZiDsFtRKJ', 'DQYhxSl', 'fTRKjCmdqYHxsLvYtNvk', 'hxVpKFQojYDnGjPaTNPhGkRFzkNhnMUeDLKnd', 'RBVNIxIvzjGYmQBNFhubBMOMvInMQMqXQnjnzyw', '', 'c', 'vcvyskDmNYOobeNSfmlWcpfpXHfdAdgZNXzNm', 'ytnvKJM', 'ZcZidsFtRKjcmdqy', 'IRNETsfz', 'fTR', 'POwVxuBifnvZmtBICqOWhbOmrcU']) from system.numbers limit 10; +select [14, 16, 10, 2, 6, 1, 0, 8, 0, 0, 12, 1, 0, 1, 0, 0] = multiSearchAllPositionsCaseInsensitive(materialize('tejdZOLhjpFLkGBWTGPfmk'), ['GBWtgPF', 'Wt', 'PflkgBWTgpFmK', 'ejdZOLhJPFlKgb', 'o', 'TejDZ', 'HlQfCP', 'hJP', 'ydiyWEfPGyRwcKGfGVdYxAXmkY', 'QsOyrgkTGMpVUAmLjtnWEIW', 'LKGBw', 'tejDzolHJpFLKgbWT', 'IK', '', 'WrzLpcmudcIJEBapkToDbYSazKTwilW', 'DmEWOxoieDsQHYsLNelMc']) from system.numbers limit 10; +select [9, 0, 1, 4, 13, 0, 0, 1, 3, 7, 9, 0, 1, 1, 0, 7] = multiSearchAllPositionsCaseInsensitive(materialize('ZWHpzwUiXxltWPAIGGxIcJB'), ['XxLTWpA', 'YOv', '', 'pzwUIXXl', 'wp', 'lpMMLDAuflLnWMFrETXRethzCUZOWfQ', 'la', '', 'HPZ', 'UixxlTw', 'xXLTWP', 'YlfpbSBqkbddrVwTEmXxgymedH', '', '', 'QZWlplahlCRTMjmNBeoSlcBoKBTnNZAS', 'UiXxlTwPAiGG']) from system.numbers limit 10; +select [0, 9, 6, 0, 4, 0, 3, 0, 0, 0, 0, 0] = multiSearchAllPositionsCaseInsensitive(materialize('NytxaLUvmiojEepjuCzwUYPoWL'), ['LcOnnmjbZSifx', 'm', 'lUvMIOjeE', 'vuZsNMSsutiLCDbClPUSsrziohmoZaQeXtKG', 'XaLuvm', 'hlUevDfTSEGOjvLNdRTYjJQvMvwrMpwy', 'TXALuVmioJeePjUczw', 'pKaQKZg', 'PAdX', 'FKLMfNAwNqeZeWplTLjd', 'DODpbzUmMCzfGZwfkjH', 'HMcEGRHLspYdJIiJXqwjDUBp']) from system.numbers limit 10; +select [2, 1, 0, 16, 8, 1, 6, 0, 0, 1, 8, 0, 7, 0, 9, 1, 1, 0, 0, 1] = multiSearchAllPositionsCaseInsensitive(materialize('WGVvkXuhsbzkLqiIEOuyiRfomy'), ['GVv', '', 'VbldWXHWzdziNcJKqIkDWrO', 'iEOUyIRFomy', 'hsBZklqiieOuy', '', 'X', 'emXjmIqLvXsNz', 'rxhVkujX', 'wgvvK', 'HsBzKLQiie', 'wVzJBMSdKOqjiNrXrfLEjjXozolCgYv', 'UHsbzklQiiEouyirf', 'UOvUsiKtUnwIt', 'SBZKLqiIEoUYIrfom', 'wg', '', 'BefhETEirL', 'WyTCSmbKLbkQ', '']) from system.numbers limit 10; +select [8, 1, 2, 8, 1, 0, 5, 0, 0, 4, 0, 1, 14, 0, 0, 7, 0, 1] = multiSearchAllPositionsCaseInsensitive(materialize('uyWhVSwxUFitYoVQqUaCVlsZN'), ['XufitYOVqqUACVlszn', '', 'ywH', 'XUFIT', 'uywHvSWXuFIt', 'dGhpjGRnQlrZhzGeInmOj', 'vswXuFitYovqQuA', 'dHCfJRAAQJUZeMJNXLqrqYCygdozjAC', 'rojpIwYfNLECl', 'hVswxufiTYov', 'bgJdgRoye', '', 'ovQ', 'AdVrJlq', 'krJFOKilvBTGZ', 'WxuFITYOV', 'AsskQjNPViwyTF', 'u']) from system.numbers limit 10; +select [0, 2, 0, 0, 0, 6, 0, 5, 0, 15, 0, 0, 3, 0] = multiSearchAllPositionsCaseInsensitive(materialize('BEKRRKLkptaZQvBxKoBL'), ['HTwmOxzMykTOkDVKjSbOqaAbg', 'eKrRKl', 'UrLKPVVwK', 'TyuqYmTlQDMXJUfbiTCr', 'fyHrUaoMGdq', 'KLkPtaZq', 'cPUJp', 'RKLk', 'yMnNgUOpDdP', 'BX', 'tXZScAuxcwYEfSKXzyfioYPWsrpuZz', 'dsiqhlAKbCXkyTjBbXGxOENd', 'k', 'juPjORNFlAoEeMAUVH']) from system.numbers limit 10; +select [9, 0, 0, 0, 1, 4, 2, 0, 0, 0, 0, 8, 0, 2, 0, 3, 0, 3] = multiSearchAllPositionsCaseInsensitive(materialize('PFkLcrbouhBTisTkuUcO'), ['UhBtistKU', 'ioQunYMFWHD', 'VgYHTKZazRtfgRtvywtIgVoBqNBwVn', 'ijSNLKch', 'pFKlcrBOuhbtIsTku', 'lCRboUHBtI', 'fKLCRBOu', 'XTeBYUCBQVFwqRkElrvDOpZiZYmh', 'KzXfBUupnT', 'OgIjgQO', 'icmYVdmekJlUGSmPLXHc', 'OuH', 'BWDGzBZFhTKQErIRCbtUDIIjzw', 'F', 'LuWyPfSdNHIAOYwRMFhP', 'kL', 'PQmvXDCkEhrlFBkUmRqqWBxYi', 'kLcrbo']) from system.numbers limit 10; +select [0, 1, 1, 6, 14, 3, 0, 1, 9, 1, 9, 0, 1, 10, 0, 0] = multiSearchAllPositionsCaseInsensitive(materialize('pfynpJvgIjSqXWlZzqSGPTTW'), ['ZzeqsJPmHmpoYyTnKcWJGReOSUCITAX', '', 'P', 'jvGIj', 'wLZzQsgP', 'YnPjVGij', 'DmpcmWsyilwHwAFcKpLhkiV', '', 'I', 'pFy', 'IjsqxwLZzqSgpT', 'pKpe', 'PfynpJvgiJSqXwlzZ', 'jsQXwLZZqs', 'onQyQzglEOJwMCO', 'GV']) from system.numbers limit 10; +select [1, 17, 1, 20, 0, 0, 5, 0, 0, 0, 24, 0] = multiSearchAllPositionsCaseInsensitive(materialize('BLNRADHLMQstZkAlKJVylmBUDHqEVa'), ['bLnRaDhLm', 'kJVYlmbuD', 'bLnr', 'yLMbU', 'eAZtcqAMoqPEgwtcrHTgooQcOOCmn', 'jPmVwqZfp', 'aDHlmqS', 'fmaauDbUAQsTeijxJFhpRFjkbYPX', 'aqIXStybzbcMjyDKRUFBrhfRcNjauljlqolfDX', 'WPIuzORuNbTGTNb', 'uDhqeVa', 'fQRglSARIviYABcjGeLK']) from system.numbers limit 10; +select [2, 0, 4, 5, 1, 15, 1, 9, 0, 0] = multiSearchAllPositionsCaseInsensitive(materialize('BEwjMzphoTMoGikbrjTVyqDq'), ['E', 'sClgniMsZoGTEuLO', 'jmzphotmoGIKBRjtv', 'MZPhOtmo', '', 'Kb', '', 'otm', 'tVpxYRttoVpRLencV', 'SJAhAuMttGaeMsalRjeelAGG']) from system.numbers limit 10; +select [1, 0, 0, 0, 0, 0, 4, 0, 0, 19, 0, 7] = multiSearchAllPositionsCaseInsensitive(materialize('yNnYRQfcyemQdxUEPOiwRn'), ['', 'SJteoGNeIAMPWWBltkNKMrWDiVfR', 'kKnnKQhIPiekpnqTXJuyHfvWL', 'GPDUQEMWKzEEpvjLaIRYiuNfpzxsnSBX', 'oPrngRKwruyH', 'ukTSzFePSeVoeZeLQlAaOUe', 'yRqfcyemQDXUepo', 'CwmxidvpPHIbkJnVfSpbiZY', 'FUxmQdFVISApa', 'iwr', 'ciGHzDpMGNQbytsKpRP', 'Fcy']) from system.numbers limit 10; +select [0, 1, 0, 11, 2, 0, 1, 3, 0, 0, 0, 21] = multiSearchAllPositionsCaseInsensitive(materialize('EgGWQFaRsjTzAzejYhVrboju'), ['DVnaLFtCeuFJsFMLsfk', '', 'thaqudWdT', 'Tzazejy', 'GGW', 'RolbbeLLHOJpzmUgCN', '', 'gwqfarsjtzaZeJYHvR', 'KkaoIcijmfILoe', 'UofWvICTEbwVgISstVjIzkdrrGryxNB', 'UJEvDeESWShjvsJeioXMddXDkaWkOiCV', 'B']) from system.numbers limit 10; +select [0, 5, 2, 0, 0, 7, 0, 0, 0, 11, 0, 12, 22, 10, 0, 12] = multiSearchAllPositionsCaseInsensitive(materialize('ONgpDBjfRUCmkAOabDkgHXICkKuuL'), ['XiMhnzJKAulYUCAUkHa', 'dbj', 'nGpDbJFRU', 'xwbyFAiJjkohARSeXmaU', 'QgsJHnGqKZOsFCfxXEBexQHrNpewEBFgme', 'JFruCM', 'DLiobjNSVmQk', 'vx', 'HYQYzwiCArqkVOwnjoVNZxhbjFaMK', 'Cm', 'ckHlrEXBPMrVIlyD', 'M', 'xI', 'UcmkAOabdKg', 'jursqSsWYOLbXMLQAEhvnuHclcrNcKqB', 'mKaoaBdKghxiCkkUUL']) from system.numbers limit 10; +select [0, 1, 0, 1, 0, 0, 0, 0, 7, 21] = multiSearchAllPositionsCaseInsensitive(materialize('WhdlibCbKUmdiGbJRshgdOWe'), ['kDPiHmzbHUZB', '', 'CukBhVOzElTdbEBHyrspj', '', 'QOmMle', 'wiRqgNwjpdfgyQabxzksjg', 'RgilTJqakLrXnlWMn', 'bSPXSjkbypwqyazFLQ', 'CBkuMDiGbJRShGdOWe', 'dow']) from system.numbers limit 10; +select [0, 8, 0, 1, 1, 0, 1, 7, 0, 0, 0, 0] = multiSearchAllPositionsCaseInsensitive(materialize('oOccAoDDoPzHUyRqdWhJxNmATEqtE'), ['LFuvoQkVx', 'DoPzh', 'YaBSTdWvmUzlgRloppaShkRmLC', 'oO', '', 'eeEpOSLSXbyaOxTscOPoaTcKcchPmSGThk', '', 'dDO', 'oFXmyIJtmcSnebywDlKruvPUgmPFzEnMvA', 'vCs', 'MsxHLTgQcaQYZdPWJshIMWbk', 'yqrjIzvrxd']) from system.numbers limit 10; +select [0, 16, 0, 0, 0, 0, 7, 1, 0, 0, 1, 2, 1, 4, 0, 3] = multiSearchAllPositionsCaseInsensitive(materialize('FtjOSBIjcnZecmFEoECoep'), ['FQQwzxsyauVUBufEBdLTKKSdxSxoMFpL', 'EOecoEP', 'HGWzNTDfHxLtKrIODGnDehl', 'ZxirLbookpoHaxvASAMfiZUhYlfuJJN', 'mKh', 'GZaxbwVOEEsApJgkLFBRXvmrymSp', 'Ij', '', 'X', 'AnCEVAe', 'fTj', 'tjOSbIjcNZECMfeoEC', '', 'OsBIjcN', 'LtdJpFximOmwYmawvlAIadIstt', 'JOsBiJCNzEc']) from system.numbers limit 10; +select [0, 2, 0, 0, 19, 0, 0, 12, 1, 0, 3, 1, 0, 0] = multiSearchAllPositionsCaseInsensitive(materialize('ugpnWWncvqSLsYUCVXRZk'), ['yOWnQmZuhppRVZamgmRIXXMDQdeUich', 'gPNww', 'jlyFSbvmjaYPsMe', 'fQUeGVxgQdmPbVH', 'rZk', 'ariCX', 'grAffMPlefMQvugtAzN', 'LsYuCVX', '', 'jZFoQdWEWJFfSmNDqxIyNjvxnZJ', 'P', 'UgPN', 'JmKMsbegxNvusaiGGAZKglq', 'qArXLxzdYvabPv']) from system.numbers limit 10; +select [0, 0, 0, 0, 0, 0, 8, 0, 0, 1, 1, 15, 0, 1, 7, 0] = multiSearchAllPositionsCaseInsensitive(materialize('nxwotjpplUAXvoQaHgQzr'), ['ABiEhaADbBLzPwhSfhu', 'TbIqtlkCnFdPgvXAYpUuLjqnnDjDD', 'oPszWpzxuhcyuWxiOyfMBi', 'fLkacEEeHXCYuGYQXbDHKTBntqCQOnD', 'GHGZkWVqyooxtKtFTh', 'CvHcLTbMOQBKNCizyEXIZSgFxJY', 'PlUAxVoQah', 'zrhYwNUzoYjUSswEFEQKvkI', 'c', 'NXWOt', '', 'qAhG', 'JNqCpsMJfOcDxWLVhSSqyNauaRxC', '', 'PpLuaxV', 'DLITYGE']) from system.numbers limit 10; +select [2, 0, 0, 1, 0, 0, 28, 1, 16, 1] = multiSearchAllPositionsCaseInsensitive(materialize('undxzJRxBhUkJpInxxJZvcUkINlya'), ['ndxzjRxbhuKjP', 'QdJVLzIyWazIfRcXU', 'oiXcYEsTIKdDZSyQ', 'U', 'dRLPRY', 'jTQRHyW', 'Y', '', 'nxxJZVcU', '']) from system.numbers limit 10; +select [1, 4, 1, 0, 4, 1, 0, 1, 16, 1, 0, 0, 0, 8, 12, 14, 0, 2] = multiSearchAllPositionsCaseInsensitive(materialize('lrDgweYHmpzOASVeiFcrDQUsv'), ['', 'gwEYhMP', 'LrDGwEyHmPzOaSVEifC', 'oMN', 'gwEYhMpZO', 'lrdGWEy', 'pOKrxN', 'lrDgwEyhmpZoaSv', 'eifcrdqU', 'LrDgw', 'dUvarZ', 'giYIvswNbNaBWprMd', 'pPPqKPhVaBhNdmZqrBmb', 'hmPzoASVEiF', 'O', 'SVEi', 'gIGLmHnctIkFsDFfeJWahtjDzjPXwY', 'rDGweyHmP']) from system.numbers limit 10; +select [0, 0, 11, 1, 1, 1, 0, 16, 0, 1, 5, 0, 0, 0, 2, 0, 2, 0] = multiSearchAllPositionsCaseInsensitive(materialize('XAtDvcDVPxZSQsnmVSXMvHcKVab'), ['bFLmyGwEdXiyNfnzjKxUlhweubGMeuHxaL', 'IhXOeTDqcamcAHzSh', 'ZSQsNMvsxmVHcK', '', '', '', 'dbrLiMzYMQotrvgwjh', 'MvsxMV', 'zMp', 'XaTDvCdvpXzsqSNMVSxm', 'v', 'LkUkcjfrhyFmgPXPmXNkuDjGYlSfzPi', 'ULpAlGowytswrAqYdaufOyWybVOhWMQrvxqMs', 'wGdptUwQtNaS', 'ATdVcdVPXzSqsnmVSXMvHcKVab', 'JnhhGhONmMlUvrKGjQcsWbQGgDCYSDOlor', 'atdvCdvpXzsqSnMVSxMVhCkvAb', 'ybNczkKjdlMoOavqBaouwI']) from system.numbers limit 10; +select [8, 0, 0, 0, 4, 0, 0, 5, 5, 2] = multiSearchAllPositionsCaseInsensitive(materialize('XPquCTjqgYymRuwolcgmcIqS'), ['qgyYMruW', 'tPWiStuETZYRkfjfqBeTfYlhmsjRjMVLJZ', 'PkTdqDkRpPpQAMksmkRNXydKBmrlOAzIKe', 'wDUMtn', 'UcTJQgYYMRuWoLCgMcI', 'PieFD', 'kCBaCC', 'Ct', 'C', 'pQuctjqgyymRuwOLCgmc']) from system.numbers limit 10; + +select [1, 0, 7, 1, 0, 24, 17, 0, 0, 0, 2, 0, 1, 7, 4, 1, 12, 8] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('гГБаДнФбпнЩврЩшЩЩМщЕБшЩПЖПчдт'), ['', 'таОХхрзИДжЛСдЖКЧжБВЩжЛкКХУКждАКРеаЗТгч', 'Ф', '', 'ЙЩИФМфАГщХзКЩЧТЙжмуГшСЛ', 'ПЖпчдТ', 'ЩМщЕбшЩПжПч', 'ФгА', 'гУД', 'зУцкжРоППЖчиШйЗЕшаНаЧаЦх', 'гбаДНФбПНЩВРЩШЩщМЩеБшЩпжПЧд', 'РДЧЖАбрФЦ', 'гГ', 'ФбпНщвр', 'адНфБПнщвРщШщщМщЕбШщ', 'ггб', 'ВРЩ', 'бПНщврЩш']) from system.numbers limit 10; +select [0, 12, 8, 0, 12, 0, 0, 10, 0, 8, 4, 6] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('айРВбЧБжКВИхБкчФЖЖНВнпФйФБДфЗ'), ['ЛрЦфуУДВК', 'хБкчфЖжНвнпфйфБдФ', 'жКВИХБкчФЖжНвнПф', 'кЖчвУцВСфЗБТИфбСжТИдРкшгзХвщ', 'хбк', 'штДезйААУЛчнЖофМисНЗо', 'нлнШЧВЙхОПежкцевчлКрайдХНчНб', 'вИХбкчфжжНВН', 'ЩдзЦТуоЛДСеШГфЦ', 'ЖКВихбКЧфжЖ', 'вбЧбЖкВихБкЧфЖжНВ', 'Чб']) from system.numbers limit 10; +select [18, 15, 0, 0, 0, 0, 5, 0, 14, 1, 0, 0, 0, 0, 0, 15] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('пМИОкоЗжГйНТПЙацччЧАЩгЕВБбЕ'), ['ЧЧАЩгЕВБ', 'а', 'ФбРВщшййпХдфаЗЖлЛСЗПРШПпАОинЧКзЩхждН', 'ЛфРКДЙВСУСЙОчтнИкРЗбСГфкЩреИхЛлчХчШСч', 'ШйвБПАдФдФепЗТкНУрААйеЧПВйТоЧмБГДгс', 'ФтЙлЖЕсИАХИФЗаЕМшсшуцлцАМФМгбО', 'КО', 'лиШБнлпОХИнБаФЩдмцпжЗИЛнвсЩЙ', 'йацччЧАщгевбБЕ', 'ПмИоКозжГйНТП', 'ИГНннСчКАИСБщцП', 'ПнжмЙЛвШтЩейХЛутОРЩжифбЗчгМУЛруГпх', 'ХжЗПлГЖЛйсбпрЩОТИеБвулДСиГзлЛНГ', 'учклЦНЕгжмщлжАшщжМд', 'ЩеПОЙтЖзСифОУ', 'АЦЧ']) from system.numbers limit 10; +select [10, 0, 1, 1, 6, 1, 7, 6, 0, 0, 0, 2, 12, 0, 6, 0, 4, 8, 0, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('квхБнцхйзЕпйИмтЙхфзвгдФ'), ['еПйИМт', 'хгкиМжСБИТНщенЩИщНСкй', '', 'Квхб', 'цхЙЗЕПйИмТйХФЗ', 'к', 'хйЗЕПЙИмтй', 'Цх', 'нКлШбМЖГйШкРзадрЛ', 'ДштШвБШТг', 'СЦКйЕамЦщПглдСзМлоНШарУтМднЕтв', 'ВхБнцхйЗЕПйимТ', 'йимтЙХФЗВГД', 'жчссунЙаРцМкЖУЦщнцОЕхнРж', 'цХЙЗЕП', 'ОгНФдМЛПТИдшцмХИеКйРЛД', 'бнЦхЙ', 'ЙЗе', 'згЩищШ', 'фХлФчлХ']) from system.numbers limit 10; +select [0, 0, 0, 12, 0, 0, 27, 1, 0, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('хНпсРТХВдтоЦчдлеФПвнЛгЗКлПйнМВ'), ['ШиБфЗШПДЧхОЩшхфщЗЩ', 'иГйСЧЗтШЛуч', 'АЗХЦхедхОуРАСВЙС', 'цчдЛЕфП', 'СДбйГйВЕРмЙЩЛщнжен', 'НДлцСфТшАщижгфмуЖицжчзегЕСЕНп', 'й', '', 'йлчМкРИЙиМКЙжссЦТцРГзщнхТмОР', 'ПРцГувЧкйУХггОгЖНРРсшГДрлЧНжГМчрХЗфЧЕ']) from system.numbers limit 10; +select [0, 0, 2, 0, 10, 7, 1, 1, 0, 9, 0, 2, 0, 17, 0, 0, 0, 6, 5, 2] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('ЙзЗжпжДЕСУхчйдттСЙзоЗо'), ['щОЙУшееЧщкхГККреБкВ', 'жВ', 'ззЖпждЕсУХчЙДТТсЙ', 'ЙЦШЦЙЖзХШРвнкЕд', 'УхчйДтТсйЗОз', 'дЕСу', '', '', 'дсцеррищндЗдНкжаНЦ', 'сУхчЙдттсйзОзО', 'ЦЖРжмц', 'ЗЗ', 'СгЛГАГЕЖНгщОеЖЦДмБССцЩафзЗ', 'Сйзоз', 'ЦГХТЕвЕЗБМА', 'пмВоиеХГжВшдфАЖАшТйуСщШчИДРЙБнФц', 'Оа', 'ждЕ', 'ПжДесу', 'ЗзЖПждЕСУ']) from system.numbers limit 10; +select [0, 0, 0, 0, 5, 1, 0, 6, 0, 1, 17, 15, 1, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('уФШЙбШТоХВбзЦцЖОЕКТлщхнЖГ'), ['цЛ', 'ууМ', 'ТИгЙолМФсибтЕМнетквЦИЩИБккйн', 'оФОаМогсХЧЦооДТПхб', 'бШтОХВбЗцЦЖоЕКтЛ', 'уфШйбШтоХ', 'фдтщрФОЦсшигдПУхЛцнХрЦл', 'ШтО', 'НИкИТрбФБГИДКфшзЕмЙнДЖОсЙпЩцщкеЖхкР', 'уфШЙБш', 'екТлщ', 'ЖоекТл', 'уфШйБшТоХвбз', 'ТуОхдЗмгФеТаафЙм']) from system.numbers limit 10; +select [0, 1, 6, 1, 0, 1, 0, 0, 0, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('чМЩБЛЛПРлщкВУбПефХВФлАЗШао'), ['гаТкЛВнрвдПМоеКПОйр', 'ч', 'ЛпрЛЩКвуБпе', 'ЧмЩб', 'ц', '', 'жгаччЖйГЧацмдсИИВЩЩжВЛо', 'йГеЙнБзгнкЦЛБКдОЕЧ', 'ПоЦРвпЕЗСАШж', 'ЙОНЦОбиееО']) from system.numbers limit 10; +select [2, 0, 17, 1, 0, 0, 0, 5, 0, 4, 0, 0, 0, 0, 0, 2] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('ЕаЩичщМЦЖиЗБЛЧжуНМЧК'), ['АЩиЧЩ', 'ИлУсшДБнжщаатуРТтраПОЙКЩйТГ', 'НМЧк', 'Еа', 'зАВФЛЩбФрМВШбПФГгВЕвЖббИТйе', 'РЗНРБЩ', 'ЦдЙНГпефзЛчпУ', 'ч', 'НШШчПЗР', 'ИчЩмЦжИЗБлЧЖУНМч', 'аннвГДлмОнТЖЗЙ', 'ШдчЩшЕБвхПУсШпг', 'гФИШНфЖПжймРчхАБШкЖ', 'ЖзгЖАБлШЗДпд', 'Д', 'ащиЧ']) from system.numbers limit 10; +select [4, 1, 0, 7, 0, 7, 1, 1, 0, 3, 7, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('иОцХКЙвувМИжШдУМУЕйНсБ'), ['ХкйвуВмИжШдУм', '', 'звМАОМЩщЙПшкиТчЩдгТЦмфзеИ', 'вуВМиж', 'КДщчшЙВЕ', 'в', '', 'ИоЦхКЙВувМижШ', 'ЕвТАРи', 'цхКЙвувмИЖШДумуе', 'вУвМи', 'зПШИХчУщШХУвврХйсуЙЗеВЧКНмКШ']) from system.numbers limit 10; +select [0, 5, 0, 0, 0, 0, 0, 12, 0, 11] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('ЦОфбчУФсвТймЦчдщгЩжИАБ'), ['йлрк', 'ЧуФсвтйМцчдЩгщ', 'МНлЕжорв', 'иНзТЖМсмх', 'шЕМЖжпИчсБжмтЧЙчщФХб', 'жШХДнФКАЩГсОЩвЕаам', 'НпКЦХулЛвФчШЕЗкхХо', 'мЦчДЩгЩжиАб', 'мпцгВАЕ', 'Й']) from system.numbers limit 10; +select [1, 0, 0, 0, 8, 0, 2, 0, 0, 7] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('чТХЙНщФфцИНБаеЖкОвлиУДР'), ['', 'рВХмжКцНцИЙраштМппсодЛнЧАКуЩ', 'ИХфХЖЧХВкзЩВЙхчфМрчдтКздиОфЙжУ', 'Гзлр', 'фЦи', 'абПф', 'тХЙНщффЦИн', 'нссГбВеЖх', 'амлЗщрсУ', 'фФ']) from system.numbers limit 10; +select [0, 9, 11, 0, 11, 1, 0, 0, 0, 1, 6, 1, 0, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('зДЗпщАцвТгРдврщхЩфЖл'), ['йХЛ', 'Т', 'рд', 'АИЦщгниДфВОе', 'Р', 'здзпщ', 'вКТвВШмгч', 'ввирАйбЗЕЕНПс', 'тХиХоОтхПК', '', 'аЦВТгРДврщ', '', 'уЗЗЖвУЕйтчудноЕКМЖцВРаНТЙЗСОиЕ', 'оЕфПхЕДжАаНхЕцЖжжофЦхкШоБЙр']) from system.numbers limit 10; +select [1, 1, 0, 0, 1, 7, 0, 0, 0, 2] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('йЛПЛшмЦШНЖРрЧрМцкЖзЕНжЧДелФжАн'), ['', 'йЛПлшМЦшНЖррч', 'ПНКдфтДейуиШзЗХАРУХизВ', 'ПценмщЧОФУСЙЖв', '', 'ЦшнжрРчрМЦКЖЗе', 'МрПзЕАгжРбТЧ', 'ЕДФмаФНвТЦгКТЧЦжцЛбещЛ', 'УтПУвЛкТасдЦкеИмОещНИАоИжЖдЛРгБЩнвЖКЛЕП', 'Л']) from system.numbers limit 10; +select [1, 5, 1, 1, 0, 0, 1, 1, 0, 2, 19, 0, 2, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('сйДпмжнДРщКБгфцЖОчтГНБ'), ['', 'МЖнДРщ', 'Сй', '', 'пУщ', 'йгВИАЦнозаемТиХВвожКАПТдкПИаж', 'Сйд', 'СЙДпмжНдРщ', 'ФПщБцАпетаЙФГ', 'ЙдпМжНдрЩКбГфЦжОЧТГНб', 'т', 'гллрБВМнвУБгНаЙцМцТйЙФпзЧОЙЛвчЙ', 'йДПМжндРЩкБ', 'ЗмфОмГСНПщшЧкиССдГБУсчМ']) from system.numbers limit 10; +select [0, 18, 10, 5, 0, 2, 8, 1, 4, 11] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('ХпИРддХрмВНйфчвгШиЧМКП'), ['хЗФДлДУБЙаЦтжРБЗсуйнЦпш', 'иЧмК', 'внЙ', 'д', 'зиМУЩГиГ', 'ПИр', 'РМвнЙфчвгШич', '', 'РдДхРМ', 'нЙфчВГШИ']) from system.numbers limit 10; +select [18, 0, 0, 1, 0, 0, 6, 0, 0, 9] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('нГгФкдуФШуИТбшпХфтаГт'), ['Таг', 'рРпшУйчГд', 'гК', '', 'лаВНбездпШШ', 'ЕБРйаНрОБожкКИсв', 'ДУфШУитБ', 'ГРиГШфШтйфЖлРФзфбащМЗ', 'мхЩжЛнК', 'ШуИтБШ']) from system.numbers limit 10; +select [13, 0, 0, 7, 0, 15, 0, 0, 15, 0, 0, 5, 6, 0, 18, 21, 11, 1] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('рлобшдПЦИхжФуХщжгПФукшзт'), ['УхщжГ', 'ТВщЦфФсчЩГ', 'ЕжФШойжуЛРМчУвк', 'пцИХжфуХЩж', 'бР', 'щЖГПфуКШЗТ', 'йжРГгЛуШКдлил', 'ТщЖГкбШНИщЩеЩлаАГхрАфЙНцЦгВкб', 'щжГПфУ', 'бкаДБЛХ', 'АЗ', 'шДПЦихжфух', 'дП', 'вфнЙобСцвЩмКОбЦсИббФКзЩ', 'пФУкшзТ', 'К', 'жфу', '']) from system.numbers limit 10; +select [12, 19, 8, 1, 0, 0, 0, 15, 0, 0, 12, 2, 0, 4, 0, 0, 0, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('ЦкЛЗепкЕХЩГлКФрБдТрлвйАхдООШ'), ['лК', 'рЛв', 'Ехщ', '', 'еаПКБгЦЩАоЗВонйТЗгМхццСАаодМЕЩГ', 'ишОНиеБидфбФБЖриУЩЩ', 'дуж', 'РбДТ', 'пЗсГХКсгРущкЙРФкАНЩОржФвбЦнЩНЖЩ', 'щрОУАГФащзхффКвЕйизцсйВТШКбнБПеОГ', 'лкФрБдТРлвЙа', 'КЛзеп', 'УЛФЗРшкРщзеФуМвгПасШЧЛАЦр', 'зеПКеХщглкфР', 'ЦЖЗдХеМЕ', 'зЖжрт', 'уЩФрйрЖдЦз', 'МфцУГЩтвПАЦжтМТоеищЕфнЖй']) from system.numbers limit 10; +select [0, 0, 1, 0, 1, 0, 0, 7, 0, 5, 1, 6, 1, 1, 1, 5, 6, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('РННЕШвжМКФтшДЙлфЛИзЙ'), ['ГаМРош', 'Дтфс', '', 'еБбиаКщГхххШвхМЖКзЛАезФУчХо', 'РНн', 'сВбТМ', 'ЖЗЦПБчиСйе', 'жМкфтШДЙл', 'нЖХуеДзтЧтулиСХпТпеМлИа', 'ШВжМкФТШдЙлфл', '', 'вЖМКфТ', '', '', '', 'швЖМКфтШДЙЛфлИЗй', 'вЖмКФТ', 'еМ']) from system.numbers limit 10; +select [0, 0, 15, 1, 0, 0, 8, 1, 0, 0, 0, 4, 8, 10] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('РиучГийдХутДЕЙДпфиуд'), ['ЩмгцлЖрц', 'ЕСжСлЩЧИЖгЗЛлф', 'дП', '', 'щГЦаБтПШВзЦСрриСЙбД', 'тдРгОЛТШ', 'д', '', 'КЕбЗКСХЦТщЦДЖХпфаЧйоХАл', 'мТвзелНКрЖЧЦПпЕЙвдШтеШйБ', 'ЙОТКрБСШпШд', 'ЧГ', 'ДХУТДЕЙд', 'УТд']) from system.numbers limit 10; +select [0, 0, 0, 0, 15, 0, 0, 0, 11, 0, 0, 5, 1, 1, 0, 2, 3, 0, 0, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('МшазшргхОПивОлбмДоебАшцН'), ['ЦИшштН', 'еМСЗкФЕКДйОНМ', 'ЛСчГрбеРЕбЩМПМЗЦИп', 'ХнИПЧжЗдзФщЗ', 'бмдоЕ', 'гМОдйсбТСЦЩбФВЗШзшщбчегаЕмЕБаХаРР', 'фщнР', 'щмТчФчсМАОгчБщшг', 'иВ', 'УщцГОшТзпУХКоКЖБеМШ', 'мйаАЛцАегСмПОаСТИСфбЧДБКоИВчбЦЙ', 'шРгхоп', '', '', 'еИпАЩпнЛцФжЩХИрЧаИИТЛвшиСНЩ', 'шаЗ', 'АЗ', 'ФгдтфвКЩБреногуир', 'ДБжШгщШБЩпЖИЛК', 'ЧдРЩрбфЛзЙклхдМСФУЙЛн']) from system.numbers limit 10; +select [5, 0, 0, 18, 13, 0, 2, 7, 0, 0, 1, 15, 1, 0, 0, 0, 3, 0, 0, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('хщеКЗПчуНЙтрЧЩгфСбоКЕАДТййАрр'), ['зп', 'хчПЦшпДбзСфНВЧзНжЕМФОП', 'ЧЖхЕУк', 'БОКеАдтЙЙа', 'чЩГфС', 'шллддЩщеМжШйкЩн', 'щЕкзпЧуНЙТ', 'ЧунйтРЧщгФс', 'ввНздЙуоТЖРаВЙчМИчхРвфЛЖБН', 'ЗХМХПщПкктцАзщЙкдпжф', '', 'ГФСбОкеАДтйЙа', '', 'МБХВЕчпБМчуххРбнИМЛТшЩИщЙгаДцзЛАМвйаО', 'ЛкОзц', 'ЕцпАДЗСРрсЕвтВщДвцбЗузУннТИгХжхрцПДРДПм', 'екЗПЧунЙТРчщгФсбоК', 'шпИфЕчгШжцГВСйм', 'ЛхйЧбЧД', 'ВзЗоМцкЩНХГж']) from system.numbers limit 10; +select [0, 0, 6, 20, 0, 10, 0, 0, 0, 9, 10, 3, 23, 1, 0, 0, 2, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('лцапШиХчЛДшдксСНИбшгикзчЙанми'), ['ХууатТдтбодМГЧгщЧнклШтЗПНчкЦОаЙг', 'МЦЧчпИхКЛаФхщХдРУДщжУчфлжахц', 'иХЧлдшдкСсНИбШГикзЧЙ', 'гикЗчйА', 'ГсТзЛОфИББлекЩАсЛвмБ', 'Д', 'ЦХрТЖощНрУШфнужзжецсНХВфЩБбДУоМШШиГйж', 'йуВдЕзоггПВДЖб', 'ЙфБГйХМбжоакЖЛфБаГИаБФСнБЖсТшбмЗЙТГОДКИ', 'ЛДШдКССНИБшГикзч', 'ДШдКССниБ', 'аПШИХчЛДШДКсс', 'з', '', 'ФоохПЩОГЖоУШлКшзЙДоуп', 'хАДХЩхлвУИсшчрбРШУдФА', 'ЦА', 'гвптУФлчУуРхпрмЖКИрБеЩКчН']) from system.numbers limit 10; +select [0, 4, 5, 7, 15, 3, 3, 17, 7, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('зЗАЩлЕЕЕПИохЧчШвКЧйрсКХдд'), ['пКРбуШОНТЙБГНзИРвЖБсхрЛщчИрлЧУ', 'ЩЛЕЕЕПиоХЧ', 'ЛеЕеп', 'Еепио', 'швкЧйрС', 'ащЛеееПИох', 'АЩлеЕЕпиОхЧЧШвкЧЙРсК', 'КчйРскхД', 'ЕЕПИохччшВКчй', 'у']) from system.numbers limit 10; +select [1, 12, 0, 8, 1, 1, 0, 1, 5, 0, 1, 0, 0, 0, 0, 3, 1, 0, 4, 5] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('ПмКСйСКЖККмШеоигЙчПфжТ'), ['', 'Шео', 'РчвлдЙЙлПщуКмтН', 'жкКмшЕоИГЙЧ', '', '', 'йРмМЖнПиЙ', '', 'йс', 'тфФРСцл', '', 'щлЩХиКсС', 'кпнТЖпФЩиЙЛ', 'абкКптбИВгмЧкцфЦртЛДЦФФВоУхЗБн', 'чНшоВСГДМйДлтвфмхХВВуеЩЦВтЖтв', 'кС', '', 'фидБлйеЙЧШРЗЗОулщеЕЩщЙсЙшА', 'СЙс', 'йсКжкКМшЕо']) from system.numbers limit 10; +select [0, 0, 1, 0, 2, 2, 1, 2, 7, 0, 1, 2, 1, 0, 6, 8] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('УгЖЕугАЩХйчидаррлжНпфФГшр'), ['утвШ', 'кЕвФч', 'угжеУг', 'тШлТвЕШЗчЖеЛНджЦазЩХцж', 'гЖеугаЩхй', 'ГжЕугаЩХйЧидАР', 'УгжЕУГаЩХЙЧИда', 'гЖеу', 'ащхЙчИ', 'мЧлщгкЛдмЙЩРЧДИу', '', 'ГжеугАщХйЧиДаРРЛЖНП', '', 'зЕМвИКбУГКЩФшоГЧГ', 'ГАЩХйчИДАррлЖНпФфг', 'ЩХЙчИдАррЛЖНпфФгш']) from system.numbers limit 10; +select [1, 0, 0, 7, 0, 6, 0, 11, 0, 0, 0, 2, 0, 0, 0, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('ЗЕГЛЩПцГНтзЕЦШЧхНКГТХЙЙФШ'), ['', 'шзкиЗсаИщАБмаз', 'Ж', 'ц', 'гШуЕжЛСПодРнхе', 'пцГНтЗЕЦ', 'щРкЩАеНржЙПМАизшщКвЗщглТкКИф', 'ЗеЦшчхнКГтхЙЙ', 'пелгЩКкцвтфнжЖУуКосЙлкЛ', 'рф', 'хНШчНрАХМШщфЧкЩБНзХУкилЙмП', 'ЕгЛЩПЦгнтзецШЧ', 'ЩУчБчРнЖугабУоиХоИККтО', 'СГмЦШтФШЛмЙЩ', 'ауТПЛШВадоХМПиБу', 'ЩЩйр']) from system.numbers limit 10; +select [2, 2, 1, 0, 0, 0, 0, 0, 1, 0, 7, 9, 0, 15, 0, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('гЙЧЙФХнЖБвомгАШГбОВГксИйцз'), ['ЙЧйфхНЖбвО', 'Й', 'гЙЧйфхнЖбв', 'хсЩмШЙЙММВЦмУБТчгзУЛР', 'зктшп', 'дЕоиЖлгШж', 'хКкаНЛБ', 'ЗКйСчсоЗшскГЩбИта', '', 'у', 'НжбВОмгашГ', 'БВо', 'ещфРШлчСчмаЖШПЧфоК', 'шгбо', 'ЙСтШШДЩшзМмдпЧдЙЖевТвоУСЕп', 'Л']) from system.numbers limit 10; +select [0, 9, 0, 0, 18, 13, 13, 11, 0, 0, 4, 1] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('ЙЛмоЦСдТаоФчШКЖЦСНРаРЦзоС'), ['ДфгЗАасВфаМмшхчлмР', 'аоФчШкЖцСнРАРЦзОС', 'зЩзнйтФРТЙжУлхФВт', 'чЦкШВчЕщДУМкхЛУЩФшА', 'н', 'Шк', 'шКЖцсНРаРцЗос', 'фчшкЖцснрАРЦз', 'лку', 'пЧШМЦквоемЕщ', 'о', 'йЛМоцСДТАофЧшкжЦСнРаРЦзос']) from system.numbers limit 10; +select [21, 0, 0, 17, 1, 11, 0, 2, 0, 7] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('кЧЖнЕбМЛпШЗХиЙжиМщлнСФрПЧЖВН'), ['сФ', 'гцХаШЛсаШЛкшфЧОКЛцзешХСиЩоаЕОш', 'Г', 'МщЛНСФРпч', '', 'зХ', 'ОАДепНпСГшгФАЦмлуНуШШЗфдЧРШфрБЛчРМ', 'чЖне', 'СфЕАбФн', 'М']) from system.numbers limit 10; +select [4, 0, 1, 1, 0, 2, 4, 16, 3, 6, 5, 0, 0, 6, 1, 0, 5, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('кдАпЩСШИСцРхтеСиФЖЧСсОоц'), ['пщСшиСцрХТЕсифЖчССоОц', 'рхнкикДТКДВШчиЖЦнВм', '', '', 'жПЛСнЦцн', 'дА', 'ПщсШИсцрХтЕс', 'иФжЧсСоОЦ', 'ап', 'с', 'щсШИ', 'МАзашДРПЩПзРТЛАсБцкСШнЕРЙцИЩлТЛеУ', 'ичцпДбАК', 'сшИСЦрхтЕсифжчСсООц', 'КдАПЩСшИСЦРХТЕсИфЖЧСсо', 'ЛнБсИПоМЩвЛпиЩЗЖСд', 'щс', 'шщДНБаСщЗАхкизжнЛАХЙ']) from system.numbers limit 10; +select [0, 13, 0, 2, 16, 1, 3, 0, 9, 0, 2, 0, 1, 4, 0, 0, 0, 1] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('иНхеЕкхЩщмгзМГхсгРБсхОКцУгуНБ'), ['ДиоУлФЖЛисУСЕтсЕалщн', 'МгХсгрБСХО', 'ЖХНцршПшгйО', 'нХЕЕкхЩ', 'сГРбсхОКцУг', '', 'х', 'Ж', 'щМгЗмгхСг', 'СрпхДГОУ', 'НхеЕкХщ', 'ПМтБцЦЙЖАЙКВБпФ', 'ИнхеЕ', 'еЕКхЩ', 'мМГлРзш', 'гтдоЙБСВещкЩАЩЦйТВИгоАЦлчКнНРНПДЖшСЧа', 'ЖшеН', '']) from system.numbers limit 10; +select [1, 5, 0, 0, 3, 0, 2, 0, 14, 14, 1, 0, 17, 13, 3, 25] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('айлзсЗБоГйтГжЙРККФхКшлНРОрЦкфо'), ['', 'с', 'Д', 'шиБраНИЦЧуИжп', 'Лз', 'ДРБСУфКСшцГДц', 'йЛЗСЗбОгЙтГЖйРК', 'ЕЙЦсвРЕШшщЕЗб', 'ЙркКфхкшЛнРОР', 'ЙРкКФхкШ', 'а', 'ГдоДКшСудНл', 'КФхКшлНРоР', 'ж', 'лзСзБогйТГЖйрККф', 'оР']) from system.numbers limit 10; +select [6, 0, 8, 10, 1, 0, 1, 13, 0, 0, 0, 2, 2, 0, 4, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('РучУлрХчЗУпИчДТЕфщИЙщрНлн'), ['РХЧ', 'оДсГСЛЙшйиЧРСКзчХВоХарцНШ', 'ЧЗУпИ', 'УПичдТе', 'Р', 'ВЙЩхжАутПСНЦфхКщеЩИуЧдчусцАесзМпмУв', '', 'ЧдТ', 'ООсШИ', 'ФШсВжХтБУШз', 'ЕЩуДдшкМУРЕБшщпДОСАцйауи', 'УЧ', 'УЧУЛрХчзуПИчдТеФщий', 'йнЦцДСхйШВЛнШКМСфмдЩВйлнеЖуВдС', 'улрхчзупиЧдтефщИ', 'СХТЧШшГТВвлЕИчНОВи']) from system.numbers limit 10; +select [0, 0, 0, 2, 1, 1, 0, 1, 19, 0, 0, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('УецжлЦЦщМшРГгЩЩдБмхЖЗЧзШЙб'), ['НзИуАузуРЗРуКфоТМмлПкрсмЕЕЕнТ', 'ЕЩГХхЧш', 'ХоЙпООчфЖввИжЙшЖжЕФОтБхлВен', 'ЕЦЖЛЦцщ', '', '', 'ухогСИФвемдпаШЗуЛтлизОЧ', 'УецЖ', 'ХЖзЧЗ', 'П', 'мБкзХ', 'уБуОБхШ']) from system.numbers limit 10; +select [6, 1, 15, 5, 0, 0, 0, 3, 2, 4, 0, 12, 0, 2, 0, 3, 1, 6, 0, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('ГЖФеачМаКчПСпкВкхсПтг'), ['чмАкЧ', '', 'ВкХс', 'ачМА', 'КлтжУлОЛршБЕблФЩ', 'тцуМфж', 'л', 'фе', 'Жф', 'ЕАЧМак', 'лЖЕРТнФбЧЙТййвзШМСплИхбЙЛЖзДпм', 'СпкВК', 'ЩзчжИш', 'жФеАчМ', 'КбЦбйЕШмКтЩЕКдуЩтмпИЕВТЖл', 'ФЕаЧмАКчПСПквкхспТ', 'гжФеАЧмаКчпСп', 'ЧмАК', 'дцкДННМБцйЕгайхшжПГх', 'ТЩбвЦЖАНшрАШФДчОщй']) from system.numbers limit 10; +select [1, 6, 0, 1, 0, 0, 3, 1, 2, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('МФННЧйОнцЛИЧЕПШПЧйоГФО'), ['', 'йОн', 'шУлгИЛЛРЙАсфЗоИЙЗРхуПбОЙсшдхо', 'МФННчЙоНц', 'лзВжбЦзфкзтуОйзуЗ', 'ЖГДщшЦзсжщцЦЖеЧвРфНИНОСАОщг', 'ННчйОНЦлИчЕПШ', '', 'Ф', 'ЩрИдНСлЙуАНЗвЕчмчАКмФУипндиП']) from system.numbers limit 10; +select [5, 0, 8, 13, 0, 0, 0, 1, 0, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('зВйймХЩМзЦГЕкЕКфоСтхПблуКМхц'), ['МХщмз', 'НАНрШоНДмурМлО', 'мзцгЕкек', 'кеКфоСтХПбЛУК', 'СУУксО', 'ЦоШжЧфйШЦаГЧйбЛШГЙггцРРчт', 'НбтвВбМ', '', 'тЩФкСтоСЧЦЦЙаСДЩСГЙГРИФЗОЗфбТДЙИб', 'ВГж']) from system.numbers limit 10; +select [0, 0, 0, 8, 19, 0, 3, 12, 1, 4] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('ДпбЙЖНЗбПнЛбахБаХТуабШ'), ['цИаЩвгеИР', 'Ф', 'РЖиА', 'БпнЛб', 'У', 'Тфн', 'Б', 'БА', '', 'ЙЖНзБПнлбАхбаХ']) from system.numbers limit 10; +select [0, 0, 0, 0, 0, 1, 0, 17, 1, 0, 1, 1, 1, 11, 0, 1, 0, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('ТЦмЩОинХзоДДпПНЩигрРщОзКц'), ['ЕжЙВпПл', 'ВКфКТ', 'ШкДсЖхшфоПИадУбхФЩБчОАкпУеБхи', 'НТЕЙОШЦЖоЩбзВзшс', 'учГгуКФзлУдНУУуПУлкаЦЕ', '', 'фАПМКуЧйБЧзСоЗргШДб', 'ИГРрщОзк', '', 'йупОМшУйзВиВрЛЩЕеЩмп', '', '', '', 'дДППнщИгРР', 'ШФвИЧакеЦвШ', 'ТцМЩоинхЗОДдппнЩ', 'мрОгЩшЩеЧ', 'еЖРиркуаОТсАолЩДББВАМБфРфпШшРРРм']) from system.numbers limit 10; +select [3, 0, 0, 0, 0, 0, 1, 0, 0, 14, 0, 1, 0, 1, 1, 1, 0, 7] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('аОкиЛгКйхаОГОУзЦЛрбцш'), ['кИЛГкйхАогоУЗЦл', 'щЧДпХИхбпсГвфДФХкчХ', 'ШвАмБЗлДОИПткжхФТФН', 'щфсхФмЦсЛеувЙО', 'лВУЖц', 'еИщРшозЖАдцтКииДУлДОУФв', 'а', 'ХгЦРШ', 'ФзрЖкРЗЩЧИеЧцКФИфЧЧжаооИФк', 'уЗ', 'фЦФдцжжМчЗЖлиСЧзлщжжЦт', '', 'МдхжизИХфвбМААрйФНХдЕжп', 'аОкиЛг', 'АОКИЛгкйХАОГОУЗЦ', '', 'МбЖйрсумщиеОЩк', 'КйХАоГоУЗцлРБЦШ']) from system.numbers limit 10; +select [0, 0, 2, 1, 0, 0, 12, 0, 17, 0, 0, 0, 2, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('КУчЛХФчЛХшвбМЦинРвНрФМРкмиеЕп'), ['ТБЩБзхАмщПщЧПИФПашгЕТиКЦМБМпСЩСуЩМчтшеш', 'йлВЕЙшфшаШЗШЩВХЦчЛБс', 'УЧл', '', 'ЛДсЖщмНЦсКуфЗуГиука', 'РТТОТфГЕлЩЕгЛтДфлВЖШГзЦЖвнЗ', 'БМцИНРвнРф', 'ОЕИЕдИсАНаифТПмузЧчЖфШЕуеЩсслСШМоЖуЩЛМп', 'рвНРфМркМи', 'ЦзБМСиКчУжКУЩИИПУДвлбдБИОЙКТЛвтз', 'злСГе', 'ВдтцвОИРМЕжХО', 'учЛХфЧл', 'БшччШбУзЕТзфКпиШжнезвоеК']) from system.numbers limit 10; +select [0, 7, 0, 0, 0, 0, 7, 6, 0, 16, 12, 12, 15, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('оЖиогсфклШМСДрбхРбМбрЕщНЙЗйод'), ['иПмДКейууОклНХГЗсбаЙдШ', 'ФКлШмсДрБХРбМбрещНЙЗЙОд', 'арчжтСТнк', 'чбТНЛЕжооЗшзОУ', 'ощАЩучРСУгауДхГКлмОхЙцЕо', 'аЛбкиЦаКМбКхБМДнмФМкйРвРр', 'ФКлШмСДрбХРбм', 'СфклШ', 'еДйилкУлиИчХЙШтхцЗБУ', 'хрБ', 'СДрбХрбМБР', 'СдрбхРБ', 'бхрБМБРЕщНйз', 'КИб']) from system.numbers limit 10; +select [22, 1, 8, 0, 0, 1, 0, 3, 0, 6, 20, 0, 0, 0, 4, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('ЕЖДФбКужЙЦЦмсЖГГжБзеЙнПйЙри'), ['НПййР', '', 'Жй', 'Щ', 'ФхУО', 'ЕЖДфБКУЖйЦЦмСжГГ', 'НФЙзщЩГЧпфсфЦШОМЕЗгцрс', 'д', 'ЦтщДДЖтбвкгКонСк', 'кУЖЙЦЦм', 'ЕйНПййРИ', 'РчеЙйичФбдЦОтпчлТЖИлДучЙПгЗр', 'внчзшЗзОнФфхДгфзХТеНПШРшфБТЖДйф', 'кНснгмулМуГНурщЕББСузВмбнЧаХ', 'фбКУЖйЦцМсЖГгЖб', 'ЩСЕ']) from system.numbers limit 10; +select [0, 0, 0, 1, 10, 4, 0, 0, 5, 0, 1, 0, 7, 0, 3, 7, 0, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('чБхлжгКЖХлЙнкКЦфжЕгЖАндЧ'), ['ПдмРрЖАтВнСдСБШпПЗГгшИ', 'цшцг', 'тчАЙЧОеЕАвГпЗцЖЧгдХуЛСЛНрвАЖщ', '', 'Лй', 'Л', 'ОйррцУжчуЦБАжтшл', 'вХУКк', 'жгКжхЛЙН', 'уцбЕЕОЧГКУПуШХВЕчГБнт', '', 'ПсАжБИКштЕаН', 'КжхлЙН', 'ЩгШухЦПАТКежхгХксгокбщФЙПсдТНШФЦ', 'Х', 'кЖХЛйНккЦФжЕГЖ', 'ЙзРДСПднаСтбЧЖхощ', 'пАПОУЧмИпслБЗПфУ']) from system.numbers limit 10; +select [0, 0, 0, 5, 2, 16, 4, 4, 11, 0, 0, 3, 3, 0, 0, 6] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('кпМаоуГГфвощолЦЩщЧПРОКепеА'), ['ЗзуФжНшщПТнЧЦКВОиАУсЧХОШбк', 'тмПкАпеайзуХсурШй', 'АЕЦавбШиСДвВДумВкиИУБШЕ', 'о', 'ПМаОУггФВощоЛЦЩЩЧПрокЕПеа', 'щЩ', 'аоУг', 'аОуГгФВ', 'оЩоЛЦЩщчПРОК', 'виХЛшчБсщ', 'УчАМаЦкйДЦфКСмГУЧт', 'мАоУ', 'МАО', 'щФФА', 'Н', 'У']) from system.numbers limit 10; +select [0, 3, 10, 8, 3, 0, 4, 0, 9, 4, 1, 9] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('щЙЧРпшИцхпргЦНуДййусЧЧнЖ'), ['ДлУцтееЖБКХгМзСВжА', 'чРпШИЦ', 'пргЦнУДЙЙУ', 'Ц', 'ЧРПш', 'нЩрЕвмрМеРйхтшЩче', 'РпШИЦхПРГцнУд', 'ПНоЙтПкоаОКгПОМЦпДЛФЩДНКПбСгЗНЗ', 'ХПРГцНудЙЙ', 'рПши', '', 'ХПРГ']) from system.numbers limit 10; +select [11, 4, 1, 0, 1, 0, 0, 0, 0, 12, 0, 9, 5, 0, 16, 0, 12, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('пкзщщЛНОНбфЦноИЧфхбФ'), ['ф', 'щщл', 'ПКзЩщЛНОн', 'ЩшФйЧБНДОИзМхеЖНЦцеЛлУЧ', '', 'сЗоЙТклйДШкДИЗгЖ', 'орЛФХПвБбУхНс', 'доЗмЩВу', 'ШиЕ', 'ЦНО', 'ндЩдРУЖШМпнзНссЖШДЦФвпТмуМЙйцН', 'НбФЦнОИч', 'ЩлНонБФ', 'ЛдРжКММЙм', 'чфх', 'ЦматДйиСфЦфааЦо', 'ЦНОИчФх', 'иржЦщн']) from system.numbers limit 10; +select [0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 3, 0, 5] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('чЖажцВбшЛттзДааАугШщАйПгщП'), ['ШгУТсчГОВЦЦеЛАСфдЗоЗЦВЛйлТДзчвЛва', 'УшЕшищЖткрвРСйиФЗйТФТЛЗаЗ', 'ВдикЙббщузоФХщХХГтЗоДпхбЕкМщц', 'срйеХ', 'рАшуПсЙоДнхчВкПЖ', '', 'гНЗбКРНСБВрАВФлнДШг', 'фХЧгмКнлПШлЩР', 'мкйЗбИФрЗахжгАдвЕ', 'чжаЖцВБШлТ', 'лХЕСрлПрОс', '', 'ЗЧПтчЙОцвОФУФО', 'ажцвБшЛТт', 'уНчЖШчМЕА', 'ц']) from system.numbers limit 10; +select [7, 1, 0, 7, 1, 19, 8, 6, 3, 0, 2, 13, 6, 0] = multiSearchAllPositionsCaseInsensitiveUTF8(materialize('НТКПпмБжДцбАКПНСЖоиТФД'), ['б', '', 'аУщЛМХЖбвИтНчГБМГдДнч', 'Б', 'НТкппм', 'и', 'Жд', 'МБждЦбАкП', 'кппмБждцБа', 'ПЕрнЦпМЦВгЧЧгГ', 'ткПпМБЖДцбаКпнСжО', 'кПнСЖоИ', 'МБжДцБакпН', 'гхОХжГуОвШШАкфКМщсшФДШеИжоАйг']) from system.numbers limit 10; + +select 0 = multiSearchAny(materialize('mpnsguhwsitzvuleiwebwjfitmsg'), ['wbirxqoabpblrnvvmjizj', 'cfcxhuvrexyzyjsh', 'oldhtubemyuqlqbwvwwkwin', 'bumoozxdkjglzu', 'intxlfohlxmajjomw', 'dxkeghohv', 'arsvmwwkjeopnlwnan', 'ouugllgowpqtaxslcopkytbfhifaxbgt', 'hkedmjlbcrzvryaopjqdjjc', 'tbqkljywstuahzh', 'o', 'wowoclosyfcuwotmvjygzuzhrery', 'vpefjiffkhlggntcu', 'ytdixvasrorhripzfhjdmlhqksmctyycwp']) from system.numbers limit 10; +select 0 = multiSearchAny(materialize('qjjzqexjpgkglgxpzrbqbnskq'), ['vaiatcjacmlffdzsejpdareqzy', 'xspcfzdufkmecud', 'bcvtbuqtctq', 'nkcopwbfytgemkqcfnnno', 'dylxnzuyhq', 'tno', 'scukuhufly', 'cdyquzuqlptv', 'ohluyfeksyxepezdhqmtfmgkvzsyph', 'ualzwtahvqvtijwp', 'jg', 'gwbawqlngzcknzgtmlj', 'qimvjcgbkkp', 'eaedbcgyrdvv', 'qcwrncjoewwedyyewcdkh', 'uqcvhngoqngmitjfxpznqomertqnqcveoqk', 'ydrgjiankgygpm', 'axepgap']) from system.numbers limit 10; +select 0 = multiSearchAny(materialize('fdkmtqmxnegwvnjhghjq'), ['vynkybvdmhgeezybbdqfrukibisj', 'knazzamgjjpavwhvdkwigykh', 'peumnifrmdhhmrqqnemw', 'lmsnyvqoisinlaqobxojlwfbi', 'oqwfzs', 'dymudxxeodwjpgbibnkvr', 'vomtfsnizkplgzktqyoiw', 'yoyfuhlpgrzds', 'cefao', 'gi', 'srpgxfjwl', 'etsjusdeiwbfe', 'ikvtzdopxo', 'ljfkavrau', 'soqdhxtenfrkmeic', 'ktprjwfcelzbup', 'pcvuoddqwsaurcqdtjfnczekwni', 'agkqkqxkfbkfgyqliahsljim']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('khljxzxlpcrxpkrfybbfk'), ['', 'lpc', 'rxpkrfybb', 'crxp', '', 'pkr', 'jxzxlpcrxpkrf', '', 'xzxlpcr', 'xpk', 'fyb', 'xzxlpcrxpkrfybbfk', 'k', 'lpcrxp', 'ljxzxlpcr', 'r', 'pkr', 'fk']) from system.numbers limit 10; +select 0 = multiSearchAny(materialize('rbrizgjbigvzfnpgmpkqxoqxvdj'), ['ee', 'cohqnb', 'msol', 'yhlujcvhklnhuomy', 'ietn', 'vgmnlkcsybtokrepzrm', 'wspiryefojxysgrzsxyrluykxfnnbzdstcel', 'mxisnsivndbefqxwznimwgazuulupbaihavg', 'vpzdjvqqeizascxmzdhuq', 'pgvncohlxcqjhfkm', 'mbaypcnfapltsegquurahlsruqvipfhrhq', 'ioxjbcyyqujfveujfhnfdfokfcrlsincjbdt', 'cnvlujyowompdrqjwjx', 'wobwed', 'kdfhaoxiuifotmptcmdbk', 'leoamsnorcvtlmokdomkzuo', 'jjw', 'ogugysetxuqmvggneosbsfbonszepsatq']) from system.numbers limit 10; +select 0 = multiSearchAny(materialize('uymwxzyjbfegbhgswiqhinf'), ['lizxzbzlwljkr', 'ukxygktlpzuyijcqeqktxenlaqi', 'onperabgbdiafsxwbvpjtyt', 'xfqgoqvhqph', 'aflmcwabtwgmajmmqelxwkaolyyhmdlc', 'yfz', 'meffuiaicvwed', 'hhzvgmifzamgftkifaeowayjrnnzw', 'nwewybtajv', 'ectiye', 'epjeiljegmqqjncubj', 'zsjgftqjrn', 'pssng', 'raqoarfhdoeujulvqmdo']) from system.numbers limit 10; +select 0 = multiSearchAny(materialize('omgghgnzjmecpzqmtcvw'), ['fjhlzbszodmzavzg', 'gfofrnwrxprkfiokv', 'jmjiiqpgznlmyrxwewzqzbe', 'pkyrsqkltlmxr', 'crqgkgqkkyujcyoc', 'endagbcxwqhueczuasykmajfsvtcmh', 'xytmxtrnkdysuwltqomehddp', 'etmdxyyfotfyifwvbykghijvwv', 'mwqtgrncyhkfhjdg', 'iuvymofrqpp', 'pgllsdanlhzqhkstwsmzzftp', 'disjylcceufxtjdvhy']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('mznihnmshftvnmmhnrulizzpslq'), ['nrul', 'mshftvnmmhnr', 'z', 'mhnrulizzps', 'hftvnmmhnrul', 'ihnmshftvnmmhnrulizzp', 'izz', '', 'uli', 'nihnmshftvnmmhnru', 'hnrulizzp', 'nrulizz']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('ruqmqrsxrbftvruvahonradau'), ['uqmqrsxrbft', 'ftv', 'tvruvahonrad', 'mqrsxrbftvruvahon', 'rbftvruvah', 'qrsxrbftvru', 'o', 'ahonradau', 'a', 'ft', '', 'u', 'rsxrbftvruvahonradau', 'ruvahon', 'bftvruvahonradau', 'qrsxrbftvru', 't', 'vahonrada', 'vruvahonradau', 'onra']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('gpsevxtcoeexrltyzduyidmtzxf'), ['exrltyzduyid', 'vxtcoeexrltyz', 'xr', 'ltyzduyidmt', 'yzduy', 'exr', 'coeexrltyzduy', 'coeexrltyzduy', 'rlty', 'rltyzduyidm', 'exrltyz', 'xtcoeexrlty', 'vxtcoeexrltyzduyidm', '', 'coeexrl', 'sevxtcoeexrltyzdu', 'dmt', '']) from system.numbers limit 10; +select 0 = multiSearchAny(materialize('dyhycfhzyewaikgursyxfkuv'), ['sktnofpugrmyxmbizzrivmhn', 'fhlgadpoqcvktbfzncxbllvwutdawmw', 'eewzjpcgzrqmltbgmhafwlwqb', 'tpogbkyj', 'rtllntxjgkzs', 'mirbvsqexscnzglogigbujgdwjvcv', 'iktwpgjsakemewmahgqza', 'xgfvzkvqgiuoihjjnxwwpznxhz', 'nxaumpaknreklbwynvxdsmatjekdlxvklh', 'zadzwqhgfxqllihuudozxeixyokhny', 'tdqpgfpzexlkslodps', 'slztannufxaabqfcjyfquafgfhfb', 'xvjldhfuwurvkb', 'aecv', 'uycfsughpikqsbcmwvqygdyexkcykhbnau', 'jr']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('vbcsettndwuntnruiyclvvwoo'), ['dwuntnru', '', 'ttndwuntnruiyclvv', 'ntnr', 'nruiyclvvw', 'wo', '', 'bcsettndwuntnruiycl', 'yc', 'untnruiyclvvw', 'csettndwuntnr', 'ntnruiyclvvwo']) from system.numbers limit 10; +select 0 = multiSearchAny(materialize('pqqnugshlczcuxhpjxjbcnro'), ['dpeedqy', 'rtsc', 'jdgla', 'qkgudqjiyzvlvsj', 'xmfxawhijgxxtydbd', 'ebgzazqthb', 'wyrjhvhwzhmpybnylirrn', 'iviqbyuclayqketooztwegtkgwnsezfl', 'bhvidy', 'hijctxxweboq', 't', 'osnzfbziidteiaifgaanm']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('loqchlxspwuvvccucskuytr'), ['', 'k', 'qchlxspwu', 'u', 'hlxspwuvv', 'wuvvccucsku', 'vcc', 'uyt', 'uvv', 'spwu', 'ytr', 'wuvvccucs', 'xspwuv', 'lxspwuvvccuc', 'spwuvvccu', 'oqchlxspwuvvccucskuy']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('pjjyzupzwllshlnatiujmwvaofr'), ['lnatiujmwvao', '', 'zupzwllsh', 'nati', 'wllshl', 'hlnatiujmwv', 'mwvao', 'shlnat', 'ati', 'wllshlnatiujmwvao', 'wllshlnatiujmwvaofr', 'nat']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('iketunkleyaqaxdlocci'), ['nkleyaqaxd', 'etunkleyaq', 'yaqaxdlocci', 'tunkleyaq', 'eyaqaxdlocc', 'leyaq', 'nkleyaqaxdl', 'tunkleya', 'kleyaqa', 'etunkleya', 'leyaqa', 'dlo', 'yaqa', 'leyaqaxd', 'etunkleyaq', '']) from system.numbers limit 10; +select 0 = multiSearchAny(materialize('drqianqtangmgbdwruvblkqd'), ['wusajejyucamkyl', 'wsgibljugzrpkniliy', 'lhwqqiuafwffyersqjgjvvvfurx', 'jfokpzzxfdonelorqu', 'ccwkpcgac', 'jmyulqpndkmzbfztobwtm', 'rwrgfkccgxht', 'ggldjecrgbngkonphtcxrkcviujihidjx', 'spwweavbiokizv', 'lv', 'krb', 'vstnhvkbwlqbconaxgbfobqky', 'pvxwdc', 'thrl', 'ahsblffdveamceonqwrbeyxzccmux', 'yozji', 'oejtaxwmeovtqtz', 'zsnzznvqpxdvdxhznxrjn', 'hse', 'kcmkrccxmljzizracxwmpoaggywhdfpxkq']) from system.numbers limit 10; +select 0 = multiSearchAny(materialize('yasnpckniistxcejowfijjsvkdajz'), ['slkpxhtsmrtvtm', 'crsbq', 'rdeshtxbfrlfwpsqojassxmvlfbzefldavmgme', 'ipetilcbpsfroefkjirquciwtxhrimbmwnlyv', 'knjpwkmdwbvdbapuyqbtsw', 'horueidziztxovqhsicnklmharuxhtgrsr', 'ofohrgpz', 'oneqnwyevbaqsonrcpmxcynflojmsnix', 'shg', 'nglqzczevgevwawdfperpeytuodjlf']) from system.numbers limit 10; +select 0 = multiSearchAny(materialize('ueptpscfgxhplwsueckkxs'), ['ohhygchclbpcdwmftperprn', 'dvpjdqmqckekndvcerqrpkxen', 'lohhvarnmyi', 'zppd', 'qmqxgfewitsunbuhffozcpjtc', 'hsjbioisycsrawktqssjovkmltxodjgv', 'dbzuunwbkrtosyvctdujqtvaawfnvuq', 'gupbvpqthqxae', 'abjdmijaaiasnccgxttmqdsz', 'uccyumqoyqe', 'kxxliepyzlc', 'wbqcqtbyyjbqcgdbpkmzugksmcxhvr', 'piedxm', 'uncpphzoif', 'exkdankwck', 'qeitzozdrqopsergzr', 'hesgrhaftgesnzflrrtjdobxhbepjoas', 'wfpexx']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('ldrzgttlqaphekkkdukgngl'), ['gttlqaphekkkdukgn', 'ekkkd', 'gttlqaphe', 'qaphek', 'h', 'kdu', 'he', 'phek', '', 'drzgttlqaphekkkd']) from system.numbers limit 10; +select 1 = multiSearchAny(materialize('ololo'), ['ololo', 'ololo', 'ololo']); + +select 1 = multiSearchAnyUTF8(materialize('иечбпрхгебилцмпфвжцс'), ['лцмпфвж', 'ечбпрхгебилц', 'фвж', 'мпфвж', 'вжцс', 'пфвжцс', 'ц', 'чбпрхгебил', 'илцмп', 'фвж', 'ечбпрхгеби', '', 'б', 'хгеб', '', '', 'ил', 'ебилцмпфвжцс']) from system.numbers limit 10; +select 0 = multiSearchAnyUTF8(materialize('змейдмоодкшуищвеишчддуцпх'), ['здсщесгдкзмчбжчщчиоо', 'чфззцмудщхтфрмсзрвшйщ', 'рлунбнзрфубуббдочтвлзмпгскузохк', 'ктзлебцам', 'вчспмж', 'нгкк', 'гпзйа', 'щпйкччнабакцтлапсбваихншхфридб', 'афсузжнайхфи', 'йрздеучфдбсвпжохрз', 'ошбечпзлг', 'полшхидфр']) from system.numbers limit 10; +select 1 = multiSearchAnyUTF8(materialize('лшнуухевгплвйужчошгнкнгбпщф'), ['гбпщф', 'б', 'ф', 'чошгнкнг', 'йужчо', 'гплвйужчошгнкн', 'бпщф', 'плвйужч', 'шгнкнг', 'хевгплвй', 'плвйужчошгн', 'вй', 'лвйужчошгнкнгбпщф', 'лвйужчошгнкн']) from system.numbers limit 10; +select 1 = multiSearchAnyUTF8(materialize('кцпгуоойвщталпобщафибирад'), ['ойвщталпобща', 'щта', 'пгуоойвщтал', 'ф', 'общ', 'цпгуоойвщталпобща', 'побщ', 'ф', 'цпгуоойвщталпобщафиб', 'побщаф', 'лпобщафи', 'цпгуоойвщталпобщафи', 'пгуоойвщталпобщаф', 'талпоб', 'уоойвщталпо', 'гуоойвщтал', 'уоойвщталп', 'щ', '', 'цпгуоойвщталпобщафибирад']) from system.numbers limit 10; +select 1 = multiSearchAnyUTF8(materialize('фвгйсеккзбщвфтмблщходео'), ['еккзбщвфтмблщходе', 'йсеккзбщвфтм', 'вфтмблщходео', 'вгйсеккзбщ', '', 'йсеккзбщвфт', 'бщвфтмблщход', 'ккзбщвфтмблщход', 'ккзбщвфтм', 'еккзбщвфтмблщходе', 'еккзбщвфтмблщх', 'вгйсеккзбщвф', 'оде', 'оде', '', 'бщвфтмблщх', 'б', 'йсеккзбщвфтмблщходео', 'вфтмблщ', 'кзбщ']) from system.numbers limit 10; +select 0 = multiSearchAnyUTF8(materialize('хбаипфшнкнлтбшрскшщдувчтг'), ['хгшгднфуркшщвфгдглххс', 'цогчщки', 'тдмщшйзйхиквмб', 'етелфмшвмтзгеурнтбгчнщпмйпйжжциш', 'чсбк', 'ибащлшздеуревжйфуепфхкузбзао', 'дкмбщдсбжййсвгкхбхпшноншлщ', 'щхбеехнцегрфжжу', 'збфлпгсмащр', 'скчдигцнсзфрещйлвзнбнл', 'освзелагррдоортлрз', 'утхрч', 'йкбрвруенчччпшрнгмхобщимантешищщбж', 'жгивтеншхкцаргдасгирфанебкзаспбдшж', 'ййекжшщцщ', 'ефдсфбунйчдбуй', 'бвжцирзшмзщ', 'випжцщйзхнгахчсцвфгщзкдтвчйцемшлй', 'лдрфгвднеиопннтчсйффвлхемввег', 'бмтцжжеоебщупфчазпгхггцегнрутр']) from system.numbers limit 10; +select 0 = multiSearchAnyUTF8(materialize('фбуоойпцщишщлбхчрсллзвг'), ['уччхщ', 'вщчсарфмйшгшпйфгмжугмщжкцщгйжзфл', 'кклл', 'лпнжирпсиуо', 'нчипзфщхнтштхйхщрпзитко', 'вйпсдергвцзсцсгмхпбз', 'чфщдфоилгцевпц', 'чааиае', 'чгингршжтчпу', 'щетбнгутшйсгмвмучдхстнбрптничихб']) from system.numbers limit 10; +select 1 = multiSearchAnyUTF8(materialize('лйвзжфснтлгбгцерлзсжфещ'), ['зсжф', '', 'бгц', 'зжфснтлгбгц', 'л', 'цер', 'жфснтлгбгц', 'тлгбг', 'це', 'гбгцерл', 'нтлгбгцерлзсж', 'жфещ', 'взжфснтлг', 'фснтлгбгцерлзсжфещ', 'нтлгбгцерлзсж', 'зжфснтлгбг', 'взжфснтлгбгцерлз', 'взжфснтлгбгце']) from system.numbers limit 10; +select 1 = multiSearchAnyUTF8(materialize('нфдцжбхуучеинивсжуеблмйрзцршз'), ['чеинивсжуеблм', 'жуебл', 'блмйрзцрш', 'цр', 'м', 'фдцжбхуучеинивсжуеблмйрзцр', 'нивсж', 'ивсжуеблмй', 'й', 'всжуеблмйрзцршз']) from system.numbers limit 10; +select 1 = multiSearchAnyUTF8(materialize('всщромуцйсхрпчщрхгбцмхшуиоб'), ['муцйсхрп', '', 'уцйсхрп', 'сщромуцйсхрпчщ', 'схрпчщр', 'сщромуцйсхрп', '', 'уцйсхрпчщрхгбцмх', '', 'цмхшуиоб', 'гбц', 'пчщр', 'цйсхрпчщр', 'омуцйсхрпч', 'схрпчщрхгбцм', 'йсхрпчщрхгбцм', '', 'пчщрхгбцм', 'уцйсхрпчщрхгбцмх', 'омуцйсхрпчщ']) from system.numbers limit 10; +select 0 = multiSearchAnyUTF8(materialize('уузшсржоцчтсачтедебозцвчвс'), ['бомбсзхйхкх', 'отвгстзихфойукарацуздшгбщеховпзкй', 'мфнев', 'вйийшшггилцохнзбхрлхи', 'втинбтпсщрбевзуокб', 'оиойвулхкзлифкзиххт', 'зацччзвибшицщрзиптвицзхщхкбйгшфи', 'кнузршшднмвтощрцвтрулхцх', 'рчбкагчкпзжвтбажиабиркдсройцл', 'щргчкзожийтпдзфч', 'щбошгщзсжтнжцтлкщитеееигзцлцсмч', 'сцкк']) from system.numbers limit 10; +select 0 = multiSearchAnyUTF8(materialize('щчбслгзвйдйжрнщчвфшй'), ['пдашзбалйнзвузкдвймц', 'щхтшйоч', 'фднвфигозржаз', 'рйфопхкшщвщдвл', 'цдкйхтусожпешпджпатфуиткп', 'щпбчсслгщййлвскшц', 'жпснс', 'уиицуувешвмчмиеднлекшснчлйц', 'пххаедштхмчщчбч', 'ичтмжз', 'лсбкчу', 'бгфдвпзрл', 'йицц', 'цфйвфлнвопкмщк', 'бгщцвбелхефв', 'мймсвзаелхнжйчохомлизенфш', 'трйднхндшсщмпвщомашчнгхд', 'жфцнифлгдзйе', 'зспкшщщенбцжгл', 'рщтб']) from system.numbers limit 10; +select 0 = multiSearchAnyUTF8(materialize('шщпееасбтхогвгвцниуевисгшгбч'), ['гпа', 'стимсркзебхрвфпиемзчзу', 'нзгофухвекудблкадбшшусбеулрлмгфнйгиух', 'кфиашфобакщворувгвкчавфзшх', 'гфпгщгедкмтгрдодфпуйддхзчждихгрчтб', 'тцтжр', 'рцйна', 'йцбпбдрреаолг', 'житсфосшлтгсщдцидгсгфтвлз', 'жвтнжедцфцтхжчщч']) from system.numbers limit 10; +select 0 = multiSearchAnyUTF8(materialize('вхкшгфпфмнщаохтмизпврйопцуйзмк'), ['дтчбкхащаткифружжейабфйкйтрскбощиеч', 'фтоуабхмдааиснрбраттклмйонлфна', 'цадзиднщймшкщолттпгщбх', 'кштбчжтждпкцнтщвмухнлби', 'микудпдпумцдцгфахгб', 'ирик', 'емлжухвмк', 'чгуросфйдцшигцхжрухжпшдкфгдклмдцнмодкп', 'ттбнллквдувтфжвчттжщажзчлнбждчщцонцлуж', 'елцофйамкхзегхклйгглаувфтуувее', 'двкзчсифвтекб', 'шсус']) from system.numbers limit 10; +select 0 = multiSearchAnyUTF8(materialize('йхцглкцвзтшщочпзмнчтуеао'), ['йечдай', 'дащжщзлосмй', 'афуккгугаазшрчпцнхщцтмлфф', 'чфтфскрфйщк', 'жлччкцшнфижтехппафхвщфс', 'бзжчв', 'щкщймнкщлпедидсу', 'оцбажцзшзйпптгщтфекртдпдзшодвойвох', 'йжддбссерхичгнчлкидвгбдзуфембрц', 'ктщвшкрщмдшчогхфхусдотсщтцхтищ', 'пшстккамнбнардпзчлшечхундргтоегцзр', 'нсрнфузгжррчнжначучиелебрб', 'шгжмквршжтккднгаткзтпвкгзхшйр', 'змквцефтулхфохбнхбакдичудфмйчп']) from system.numbers limit 10; +select 1 = multiSearchAnyUTF8(materialize('шждйрчйавщбйфвмнжоржмвдфжх'), ['ор', '', 'йрчйавщбйфвмнжо', 'вщбйфвмнжорж', 'ждйрчйавщбйфвмнжорж', 'йавщбйф', 'дф', 'вщбйф', 'бйфвмнжорж', 'мнж']) from system.numbers limit 10; +select 0 = multiSearchAnyUTF8(materialize('кдшнсйршгвлицбенйбцфрсаччетфм'), ['асмун', 'йогкдчодиф', 'лштйбжнзфкикмпбитжшгкбоослщгзнщо', 'улштжцисцажзчштгжтфффабйлофедуфме', 'дрпгкчджихшзммймиамзфнуиорлищзгйвху', 'йиоршнйоввквбдвдзасма', 'члмвасмфрхжсхрбцро', 'лшкизщушборшчшастйсцкжцбонсшейрщ', 'масдфкршлупасвйфщфважсуфсейшзлащхрж', 'дгхшщферодщцнйна', 'цзфзждбавкжрткст', 'рфбожзееаце', 'кошомвгпрщсдквазчавожпечдиуйлщадфкгфи', 'бшпхнхсгшикеавааизцсйажсдийаачбхч']) from system.numbers limit 10; +select 0 = multiSearchAnyUTF8(materialize('хтиелйтарквурйлжпеегфш'), ['зпмйвзуднцпвжкбмйрпушдуавднвцх', 'фбссчгчвжакуагдвижйтщтшоабпхабжш', 'щхшибаскрщбшрндххщт', 'сммрсцзмптисвим', 'цсргщфж', 'восжбшйштезвлкммвдхд', 'вбсапкефецщжквплуо', 'даеуфчвеби', 'бтптлжпин', 'шчддтнсйкщйщ', 'фжхщецпзчбйкц', 'цсвфпздхрщхцбуцвтег']) from system.numbers limit 10; +select 0 = multiSearchAnyUTF8(materialize('апрчвзфжмбутццрйщкар'), ['индхжз', 'жилцовщччгстби', 'ажс', 'фктйамйтаг', 'шммнзачггоннксцушпчн', 'чдлйтзтоцдгзццисц', 'пнбтувщцдсчнщмсакрлгфмгрй', 'овмсйнщзушвщгуитщрхвйодф', 'бзлштезвлаижхбмигйзалчолшеунлц', 'фкжпеввгшгащз', 'тменбщжмсхщсогттршгек', 'чап', 'х', 'шкомегурлнйпшбщглав']) from system.numbers limit 10; +select 0 = multiSearchAnyUTF8(materialize('двхопооллаеийтпцчфжштнргкк'), ['йймчнздешхбццбжибопгктрнркевпиз', 'фйрохсамщцнмф', 'ййхфдпецжзгнуорвбплоахрфиле', 'пкллкацнвдббогг', 'йщдезамтжйзихщжмцлх', 'гдзувмщиеулиддердшпитвд', 'фхтунйшзхтщжтзхгцорошднпбс', 'фнситбеелцдкйщойлатиуухгффдвищсше', 'нзщщщндцрнищпхйвтбвмцтнуадцбву', 'вбщкапшнв', 'зйлмуимчскщнивтшлчмуузщепшйр', 'шжбвйдр', 'гддждбкначдттфшжшхпфиклртпгм', 'еншащцфафчнгбнщххнзочбтпушщорегшцзб', 'уунеущкззоетбучкц', 'щасифзоажребийещ', 'пщбххсдгйтт', 'хшсчуотрт', 'жкднйрозбцшужчшбкккагрщчхат', 'шачефцгч']) from system.numbers limit 10; + +select 0 = multiSearchAnyCaseInsensitive(materialize('QWyWngrQGrDmZxgRnlOMYHBtuMW'), ['ZnvckNbkeVHnIBwAwpPZIr', 'NCzFhWQmOqIGQzMORw', 'tDYaxfQXWpKNLsawBUUOmik', 'IMveCViyAvmoTEQqmbcTbdfjULnnl', 'NRvsdotmmfwumsDpDtZU', 'mnqVnwWOvMiD', 'HXpHrMvGQpbuhVgnUkfFPqjpoRdhXBrFB', 'awtr', 'IMIdOmMHZccbOZHhWOKcKjkwwgkJSfxHDCzR', 'jPLISbIwWJEKPwgvajTxVLws', 'HBfRrzEC', 'VXsysGnAsFbqNOvIaR', 'upCaeaIOK', 'GUDFkrzBiqrbZVnS', 'MoCOePXRlVqCQpSCaIKpEXkH', 'rfF', 'fjhMEpySIpevBVWLOpqi', 'KdeskLSktU', 'vjUuNUlBEGkQyRuojZLyrmf', 'SvSxotkTKCeVzNICcSZLsScKsf']) from system.numbers limit 10; +select 0 = multiSearchAnyCaseInsensitive(materialize('gcDqqBCNqhQgVVgsxMXkevYIAxNl'), ['BHnoKRqOoKgmOVkjtehGSsInDvavDWOhkKAUL', 'nYqpmKPTWGdnyMcg', 'TIplHzsSXUz', 'SiQwpQgEdZ', 'YoJTWBJgsbJvq', 'CwyazvXERUFMCJWhTjvltxFBkkvMwAysRLe', 'tXUxqmPbYFeLUlNrNlvKFKAwLhCXg', 'vUbNusJGlwsOyAqxPS', 'ME', 'ASUzpELipnYwAknh', 'VtTdMpsQALpibryKQfPBzDFNLz', 'KmujbORrULAYfSBDyYvA', 'BaLGNBliWdgmqnzUx', 'IzwKIbbSUiwhFQrujMgRcigX', 'pnS', 'UKSZbRGwGtFyLMSxcinKvBvaX']) from system.numbers limit 10; +select 1 = multiSearchAnyCaseInsensitive(materialize('HCPOPUUEVVsuZDbyRnbowGuOMhQ'), ['UzDbYrNBoWgUo', '', 'pUUEVVsUzdByrNB', 'nBO', 'SUZdbYrNbOWgUoMH', 'pOpuUevVSUZDbYRnb', 'bowGUoMh', 'VsUZDbyrNbo', 'suzdBYrN', 'uueVvsUZDBYRnBoW', 'gUom', 'eVvsuzDBYRNBoWgUOM']) from system.numbers limit 10; +select 0 = multiSearchAnyCaseInsensitive(materialize('RIDPJWYYSGBFWyXikHofbTcZAnj'), ['aFxQyVe', 'OcnZBgPsA', 'iBQaH', 'oesSvsWtgQprSSIPaDHdW', 'EfytiMfW', 'qHiFjeUvQRm', 'LfQkfmhTMUfoTOmGJUnJpevIoPpfpzMuKKjv', 'scYbCYNzJhEMMg', 'yTLwClSbqklywqDiSKmEdyfU', 'HYlGFMM', 'TMQhjOMTImXbCv', 'AVtzpxurFkmpVkddQANedlyVlQsCXWcRjEr']) from system.numbers limit 10; +select 1 = multiSearchAnyCaseInsensitive(materialize('GEsmYgXgMWWYsdhZaVvikXZiN'), ['wySd', 'smYgxGMWWYsDHZ', 'vIk', 'smyGxgmwWysDHzAvvikxZi', 'WYsdHZAvVI', 'YGxGmwWYSDhzavvI', 'XzI', 'ySDhZAvvIK', '', 'myGXgmwWySdHz', 'MYGxgmwWySdHZaVvik', 'wYsDhzAvvikXz', 'wwYsdHzav', 'Z']) from system.numbers limit 10; +select 0 = multiSearchAnyCaseInsensitive(materialize('XKCeCpxYeaYOWzIDcreyPWJWdrck'), ['tTRLUYJTkSWOabLJlIBshARIkwVRKemt', 'jQgn', 'wdNRsKIVunGlvwqkwn', 'BsbKGBJlkWQDBwqqeIjENvtkQue', 'yLuUru', 'zoLGzThznNmsitmJFIjQ', 'WFKnfdrnoxOWcXBqxkvqrFbahQx', 'QHbgRXcfuESPcMkwGJuDN', 'NPqfqLS', 'bi', 'HnccYFPObXjeGYtrmAEHDZQiXTvbNcOiesqRPS', 'KobVCJewfUsjBXDfgSnPxzeJhz', 'AqYNUPOYDZjwXx', 'xbZydBGZFFYFsFHwm']) from system.numbers limit 10; +select 1 = multiSearchAnyCaseInsensitive(materialize('AnIhBNnXKYQwRSuSqrDCnI'), ['', 'HBNNxkyqWRS', 'xKyqwrSUSQR', 'yQwr', 'ihbnnxKYQWrsUS', 'bnnXkYqwrSuS', 'qWRs', 'nXKyqWRSUS', 'qrdcN', 'NiHBnNXkYQWrS', 'NnXkYQwRSUsqRDCn', 'rSusqRd']) from system.numbers limit 10; +select 0 = multiSearchAnyCaseInsensitive(materialize('OySHBUpomaqcWHcHgyufm'), ['lihJlyBiOyyqzeveErImIJuJlfl', 'WyfAXSwZPcxOEDtiCGBJvkCHNnYfA', 'hZ', 'fDQzngAutwHSVeoGVihUyvHXmAE', 'aCpcZqWKdNqTdLwBnQENgQptIyRuOT', 'PFQVrlctEwb', 'ggpNUNnWqoubvmAFdjhLXzohmT', 'VFsfaLwcwNME', 'nHuIzNMciJjmK', 'OryyjtFfIaxViPXRyzKiMu', 'XufDMKXzqKjYynmmZzZHcDm', 'xWbDgq', 'ArElRZqdLQmN', 'obzvBzKQuJXZHMVmEBgFdnnQvtZSV', 'ZEHSnSmlbfsjc', 'gjmWPiLylEkYMTFCOVFB']) from system.numbers limit 10; +select 1 = multiSearchAnyCaseInsensitive(materialize('NwMuwbdjhSYlzKoAZIceDx'), ['ZKOaZ', 'wBDJhsYlZKo', 'hSy', 'MUwbDjHsyl', 'sYlzK', 'ylZKOAZ', 'y', 'lZKoaZICEdX', 'azIce', 'djHSylZkoAzice', 'djHsYLZKoAzi', 'dJHSYlZK', 'muWbDJHsYLzKOaziC', 'zi']) from system.numbers limit 10; +select 0 = multiSearchAnyCaseInsensitive(materialize('gtBXzVqRbepHJVsMocOxn'), ['DidFXiqhRVBCHBVklLHudA', 'yEhumIpaYXlj', 'iaEmViTRLPM', 'vTwKBlbpaJZGYGdMifOVd', 'zvgfzWeLsMQNLutdAdCeuAgEBhy', 'Ca', 'iHabiaRoIeiJgSx', 'EBfgrJnzHbuinysDBKc', 'kT', 'SGIT', 'BTRuKgHDuXMzxwwEgvE', 'OWJIeTLqLfaPT', 'BQM', 'yMimBqutKovoBIvMBok', 'zIBCYVNYAwu', 'EFDEFWGqvuxygsLszSwSiWYEqJu', 'QJDIXvPOYtvhPyfIKqebhTfL', 'ssALaXRxjguUIVKMCdWRPkivww']) from system.numbers limit 10; +select 1 = multiSearchAnyCaseInsensitive(materialize('MowjvqBkjnVTelCcXpoSuUowuzF'), ['Su', 'vqBkJNvTelC', 'Elccxp', 'vtElc', 'JVqBkJnVTELCcxpOsU', 'OsUuOWUz', 'ElccxPoSU', 'wJVQbkJNVtElCC', 'xpOSUUo', 'VQbkJnvTELCCXp', '', 'TeLcCxPOsuuO']) from system.numbers limit 10; +select 1 = multiSearchAnyCaseInsensitive(materialize('VfVQmlYIDdGBpRyfoeuLffUUpMordC'), ['vqMLyIddgBPrYFoEulFFu', 'lyIDdgBPrYFOeul', 'dGBPRYFOeUlffUupmOrD', 'OEulffU', 'pMordc', 'FVqmlyiDdgBpRyFoeUlFfuUpMOrD', 'PmO', 'o', 'YiDDgbPRYFOe', 'DGBPryfoeU', 'yIDdgbpRyFOeULfFU', 'lyIddgBPryfoeulfFuU', 'gbPrYfOeUlFfuupmO', 'yFoeULF']) from system.numbers limit 10; +select 0 = multiSearchAnyCaseInsensitive(materialize('CdnrzjzmwtMMPLjgcXWsbtrBs'), ['RfgIUeerlPIozKpRQR', 'QRoYzjZlgngJxX', 'mEbqlBIzTQH', 'UmrfJxKyTllktPfyHA', 'ukoZeOPA', 'pbbRaUcJijcxt', 'Rg', 'lSBG', 'HvuwuiqVy', 'Fo', 'aGpUVjaFCrOwFCvjc', 'zKhfkgymcWmXdsSrqAHBnxJhvcpplgUecg', 'ioTdwUnrJBGUEESnxKuaRM', 'QciYRCjRDUxPkafN']) from system.numbers limit 10; +select 0 = multiSearchAnyCaseInsensitive(materialize('miTQkQcxbKMwGOyzzRJpfXLyGx'), ['yMwgQQJkeshUugm', 'wGVe', 'XncShWqjp', 'KWjGQCOsfMKWRcgCfebkXZwZ', 'SFWbU', 'WdFDMIcfWeApTteNfcDsHIjEB', 'XRuUJznPOCQbK', 'tibBMGZHiIKVAKuUAIwuRAAfG', 'VVCqVGGObZLQsuqUjrXrsBSQJKChGpZxb', 'bWYAOLuwMcwWYeECkpVYLGeWHRrIp', 'SLzCgfkRWmZQQcQzP', 'VvfOhFBhfiVezUSPdIbr']) from system.numbers limit 10; +select 1 = multiSearchAnyCaseInsensitive(materialize('KXoTIgVktxiXoEwfoLCENiEhz'), ['oLCENie', 'xix', 'en', 'IgvktxIXoEWFOLCEnieHz', 'xOEWFoL', 'LC', 'ktxIxoEwfolCenie', 'ce', 'oTIGvktXIXOE', 'eW', 'otigVKTXIXOEwFolC', 'E', 'CEni', 'gVKtxIxoEwfOLCENieh']) from system.numbers limit 10; +select 1 = multiSearchAnyCaseInsensitive(materialize('DXKzSivrdLuBdCrEYfMEgPhOZ'), ['', 'sIVRDlUBdcr', 'luBDcrE', 'rDLUbDCreY', 'KzSiVRdLuBDCr', 'dcREYFme', 'lUbdCReyFMEgph', 'sivrDlubdCr', 'BdcreYfMEgP', 'ZSiVrdluBDCrEYfmegpHOZ']) from system.numbers limit 10; +select 0 = multiSearchAnyCaseInsensitive(materialize('lTvINMXVojkokvNBXPZOm'), ['ZQOJMEJfrjm', 'vIpmXnGlmWze', 'wbdDKcjrrIzBHypzJU', 'omotHOYbZjWfyVNeNtyOsfXPALJG', 'SXxu', 'yZPDFsZq', 'OVYVWUjQDSQTKRgKoHSovXbROLRQ', 'RnXWZfZwHipewOJimTeRoNRYIdcZGzv', 'sizoEJibbfzwqFb', 'vgFmePQYlajiqSyBpvaKdmMYZohM', 'ENsFoFCxDQofsBSkLZRtOcJNU', 'nG']) from system.numbers limit 10; +select 0 = multiSearchAnyCaseInsensitive(materialize('LsTqxiGRdvQClVNBCGMOUHOAmOqPEC'), ['NdFuUQEUWaxS', 'fdOHzUzineBDnWJJvhPNZgB', 'rYAWGIBPxOLrjuquqGjLLoIHrHqSFmjh', 'IVgYBJARY', 'ToivVgUJAxRJoCIFo', 'yQXGrRjhIqFtC', 'PNYdEPsWVqjZOhanGNAq', 'nrQIDDOfETr', 'usJcPtiHKhgKtYO', 'vPKqumGhPbmAJGAoiyZHJvNBd', 'eXINlP', 'WQeESQJcJJV']) from system.numbers limit 10; +select 1 = multiSearchAnyCaseInsensitive(materialize('gRzzQYOwLNiDcMFjXzSFleV'), ['XZSfLe', 'wLnIdcMFjxZSf', 'F', 'm', 'Le', 'qYoWLNidcMFjXzsf', 'zqyoWlNIdcMFj', '', 'oWlnIDCMfJxzsfL', 'wlNIdCmfjXzS']) from system.numbers limit 10; +select 0 = multiSearchAnyCaseInsensitive(materialize('cYnMXJMJCdibMXoUQHEw'), ['BFrGFZRgzwHGkUVbBiZMe', 'piORdVIWHMBsBDeJRLbGZAHGBrzNg', 'bmDePbTPnFQiCFfBJUxAEYNSbgrOoM', 'gtzeAGwqjFrasTQUgAscfcangexE', 'okLG', 'l', 'EBkkGYNZZURgFgJPlb', 'HDQVngp', 'vEHhtBqWhZHCOrqEKO', 'fgqdFc', 'COig', 'VftTpSXAmTmvnShHJqJTdEFcyKPUN', 'WDI', 'knBm']) from system.numbers limit 10; + +select 1 = multiSearchAnyCaseInsensitiveUTF8(materialize('мтдчЛВЖАгвзщущвкфИКмТбжВ'), ['щУщвкФИкМ', 'чЛвжАГвЗЩуЩвКФикм', 'ДчлвЖАГвзЩУЩвКфИКМтБЖВ', 'ЖагвзщуЩВКФикМТБжВ', 'ВжагВзЩУ', 'гВЗщущвкфИКмТБж', 'ГвЗщ', 'щВкФикМТБЖВ', 'вЖАГВзщущ', 'взЩуЩвКФИкМТ', 'ЧЛВЖагвЗщуЩВк', 'тДчлвЖагвзЩуЩвкфИк', 'ТДЧлвжаГВзЩущВ', 'тДчлВжАГВЗЩУ']) from system.numbers limit 10; +select 0 = multiSearchAnyCaseInsensitiveUTF8(materialize('дтрцФхИнпиОШфдАгзктвбУвсб'), ['чТрВиУРФсРпДЩОащчзЦНцхИДА', 'ЗжмПВтмиойУГхАЦПиДУЦноНуййЩХаФТофшЩ', 'уБшлОЙцМПгетЖЧетШжу', 'ЧзИАУХобФрачТеХОШбМщЖСамиМВАКРщАЦ', 'ВйвТзхЙФЧоАЖвщиушАз', 'ЦшИфххкжиФйСЛЛНЛчВоЙВПпхиИ', 'ОатЕтщкЦпбСБйцОшГШРОшхБцщЙЧиУЩЕеФлщ', 'цСПпЧА', 'ШЧНфПмФсКМКДВЦАоФчОУеТЦИзЦ', 'зАбдЛНДГИ', 'фхЩлЗДНСсКЖИФлУАбЛеТФЕпЖлпПхЙиТЕ', 'иВшкНслТКМШЗиДПйфвйНкМЛхеФДзИм', 'лпушПБванпцев', 'ЧОшЧЧмшЦЛЙйГСДФйЛАв']) from system.numbers limit 10; +select 0 = multiSearchAnyCaseInsensitiveUTF8(materialize('нщМаНдЧЛмиВврПокПШмКБичкхвРГ'), ['АЙбаЙйШЛЙРЦмЗчВеИЕощсЦ', 'щЦФдВжчТСЩВКЦСпачЙсумщАтЩувеиниХПДоМС', 'иоАкДРршуойиЩищпрфВаЦПж', 'еЖПйШкГжЧтоГЙМВ', 'ЩПалиБ', 'ТвВлт', 'оХжйЛФеКчхЗВвЕ', 'ерцЩ', 'ШХЖОАрзеп', 'ККМрфктКГишпГЩхаллхДиВИИЛЗДеКйХмжШ']) from system.numbers limit 10; +select 1 = multiSearchAnyCaseInsensitiveUTF8(materialize('вШЙчоМгОттЧАЕнЧаВеЦщчЧошМУ'), ['ЧОмГотТчАЕН', 'ОмГотТчАЕнчАвецЩчч', 'ЧАВецЩч', 'ТЧАеНЧаВ', 'ттчаЕнча', 'ТчАЕ', 'мготтЧАенчавЕЦЩ', 'НЧаВец', 'тТЧаенчАвецщчЧошм', 'Ав', 'ТЧаЕнчавецщчЧоШму', 'аЕнЧав', 'АеНЧав', 'шйЧомГОТТчаЕнчАВЕ', 'шйчоМгОтТЧаЕНчаВеЦщЧчош', 'МУ', 'ошМ', 'гОТтЧаеНЧА']) from system.numbers limit 10; +select 0 = multiSearchAnyCaseInsensitiveUTF8(materialize('фйадзЧмщЖШйЖЛшцГигцШ'), ['НТХеМРшДНУЗгадцуЧИ', 'жпСИКЩМлНлиоктлЦИвНЛ', 'КхшКРчХ', 'кгТЗаШИарХЧЛЖмСЖм', 'ОмиЛй', 'жЕРбФЩНуЕКЕАВоБМОнАЕнКщшзйПкОЗ', 'гиЗдадкбжХМЗслшВИШай', 'двтЗйЙНгПуТзД', 'ТНкмаВЕФ', 'Шеа']) from system.numbers limit 10; +select 1 = multiSearchAnyCaseInsensitiveUTF8(materialize('ШЕшхмеЦХеАСКощеКИфлсТЧИЗЛ'), ['КифЛсТ', 'ХеаСКощЕк', 'КифлсТЧ', 'шХМеЦхЕаскОЩеКИ', 'ЕшхмЕцХеаСК', 'ХЕасКоЩ', 'чИ', 'ЕцхеАсКОЩек', 'ЩЕкИфлс', 'асКощЕкифЛсТ']) from system.numbers limit 10; +select 0 = multiSearchAnyCaseInsensitiveUTF8(materialize('шоКнВЕрОЖЛпУйХзСугКПВжиРсЙпо'), ['игВербфНахчжЙггч', 'лтимрдфЕг', 'нкеаЖАшНБвйСдКИВГДшАГиАТнФШ', 'МжсТЙМГОииУКВГнцткДнцсоАд', 'ХтпгУСдБдцАЖЛАННоЕцзЕшштккз', 'ншУЦгФСЖшмс', 'нЩшМ', 'гоЖхМшаЕмаДРЧБЛИТпмЗОоД', 'фГКШхчФбЕГЛйкчПИЙххуМГНШзхг', 'ХпХщПЦАзщтг']) from system.numbers limit 10; +select 0 = multiSearchAnyCaseInsensitiveUTF8(materialize('кЧбоЗХфвБХхусмШгНаШШаГзш'), ['Ури', 'лММшткфНзцЦСВАдЩПМШфйОМшефигЖлуЕП', 'сМтЕдчЦафйСТЖЗфлРЙПЦдипжШскцВКХЦЖ', 'АУкжИФцшЛБЦЧм', 'ФПлнАаДСХзфоХПСБоСгМТОкЗЧйЛ', 'ЦшСГЛрцДмнНнХщивППттжв', 'жзЕгнциФ', 'МШЛсЙЧтЛАГжд', 'уИиЕжцоРНх', 'ЧбйГуХтшОНкрЧИеПД', 'ЦдЩЕкКвРЦжщЧциекЗРйхрббЖуЧ', 'иВжен', 'ГчОржвБГсжштРЕБ', 'ШоЖдуЙфчсЧегумщс', 'йчЙГ', 'РДедвТ']) from system.numbers limit 10; +select 0 = multiSearchAnyCaseInsensitiveUTF8(materialize('ткРНбЩаРкгГчХшецИкНЕнСЖкйзАуУЖ'), ['ХлЖхУИллрРННйЗйсРуШЧвМбЧЧщфФЦц', 'СЛчКБцСФДшлфщаФлЙСзШабмбхуБжТСТ', 'УКУиввЗЩуВМцпчбпнДГбпЕЖрПбИДркМРОеЧмЧдГ', 'ПчщвШЩвГсЛмММГБ', 'хКЦЧсчжХЩИЖХеНнтоФЦлнмЛЧРФКпмСшгСЧДБ', 'удсЗйУДНЧУнтЕйЦЗЖзВСх', 'хПЖЙИрцхмУкКоСмГсвПаДОаЦНЖПп', 'сВОей', 'ЩЦжщоабнСгдчрХнЩиМХзжЩмФцррвД', 'ЦИсйнЦДоЕДглЕЦД', 'жзйПфБфУФоцзмКЩГПЧХхщщПТпдодмап', 'ДНХГНипжШлСхХхСнШЩЛИснУйЧЩЖДССФфиС', 'ОйЩНнйЕшцФчБГЛвхЖ', 'КЧРВшИуШйВфрпБНМсУмнСЦРпхЗАщЗУСвЧйБХтшХЧ', 'зЛбНу', 'ЗСрзпшЕйРржПСсФсШиМдйМЦГхдйтРКЩКНцкбмгС', 'СУццБуКнчОищГ', 'уЕГЧлЗБНпУисЕЛ']) from system.numbers limit 10; +select 1 = multiSearchAnyCaseInsensitiveUTF8(materialize('ВЦХсЖЗЧЙБЗНбРитщстеМНжвВ'), ['итщст', 'ЧйБЗНбрИтщстЕМнЖ', 'ХСЖЗЧйбзНБриТщ', 'Темнж', 'сЖзЧЙБзнб', 'хСжЗчйБзнБрИтЩстЕм', 'БзнБРиТщ', 'ЗчЙбзНбрИТщ', 'чйбЗНбри', 'зЧйбзНБРИ', 'нБРитщсТе', 'зНб', 'цхСжзчйБЗнБРИТЩСтЕм', 'жЗЧЙБЗнбрит']) from system.numbers limit 10; +select 0 = multiSearchAnyCaseInsensitiveUTF8(materialize('ХцМШКАБАОххЕижгГХЩГиНциД'), ['ОРАБЕРВомЛфГНМИКупбхЛаАкЗдМзтш', 'лЗУЩнлбмиЛАфсгМРкцВтлснййишИНАС', 'ТлжлУоУгжукФжЖва', 'жоСШПоУНЩшРМГшОЛзЦБЛиЛдТхПДнфжн', 'чнСУЗбДаГогжДфвШКеЙПБПутрРпсалцоБ', 'ЙозоПщчакщаАлРХбЦгац', 'иаИСсчЙЧБШорлгЧТнчцйзоВБХбхЙФтоЩ', 'ПСзсБЗЕщурфДЛХйГИеПНрмииаРнвСФч', 'ЦйЖЕуТфЖбхЩМтйсЙОгЛбхгтКЕЩСАЩ', 'гтЗуЩлужДУцФВПЛмрБТсСНпА', 'тГвлбчЗМасМЖхдЕгхмЩксоЩдрквук', 'ВРаг']) from system.numbers limit 10; +select 1 = multiSearchAnyCaseInsensitiveUTF8(materialize('тУйВЖдНнщцЗЖфКгфжГфиХСБЕЩ'), ['КгФЖГФи', 'сБе', 'ЖФ', 'гфжгФИхсбе', 'ВЖДНнщЦзжфКГфЖгфИхсбещ', 'ВЖДНнЩЦзжфкГ', 'вЖДННЩЦзжФКГфЖгФ', 'ф', 'НщЦЗж', 'нщЦЗЖФк', 'Их', 'дННщцзЖФКгф', '', 'нщцзжФкг']) from system.numbers limit 10; +select 0 = multiSearchAnyCaseInsensitiveUTF8(materialize('ШкКРаоПеЗалРсТОиовРжгЙЧМКЛШ'), ['рчсажЕК', 'пЧТМфУрУММждЛйжзУрбкмам', 'бАШеНмВШзлзтушШШсхОсцрчЙПКИБнКжфЧЕХ', 'ЖМЛшбсУМкшфзочщАЖцМбмШСбВб', 'гтРХсщхАИОащчлИЧуйиСпСДФПбРл', 'ЧуОРУаоойГбУппМйЩФДКПВ', 'уУпугйРЕетвцБес', 'ЙЖЦТбСЖж', 'ИБКЛ', 'ТДтвОШСХГКУИПСмФМтНМзвбЦрднлхвДРсРФ', 'вВгНЙХИрвйЕЗпчРГЩ', 'ПчмТуивШб']) from system.numbers limit 10; +select 0 = multiSearchAnyCaseInsensitiveUTF8(materialize('РлчгхзуВШежХЦуМмнВЙщдцО'), ['ХшвМЦДШпЩОСшЦПдруа', 'ФИЦчУвРкпнПшИЕСЧАувиХд', 'фшвбЦОИЗфпИУМщзОЧЗфВцЙПнмтаТгг', 'мЖЩйавтнМСЛ', 'НВбШ', 'ааФДДрВвЙТдПд', 'ЗнчЧущшхЙС', 'рзуСзнеДфЩПуХЙЕл', 'ШСЩсАгдЦбНиШмшКрКс', 'ггнЕфБГзрОнАГЙзЧеИП', 'вшТИпЧдЖРкМНшзпиоиЩчзДмлШКТдпЦчж', 'фЦТЙц', 'ОтУшмбптТКЗеПлЧцЛОкЩБпккфгИн', 'ЩпвхпЗлШБЦ']) from system.numbers limit 10; +select 1 = multiSearchAnyCaseInsensitiveUTF8(materialize('ЙбйнхНщЧЖщчГОАпчФнЛШФбгЛа'), ['щчг', '', 'апЧфНЛШфб', 'ЙнхНЩЧЖщчгОАПЧф', 'ХНщЧжЩЧгоАпч', 'ХНщЧжщчГо', 'нщЧжщчГОа', 'чЖЩЧГоапЧФНл', 'оапчФ', 'щЧГОАпЧФНлшФ', 'ЩЧГОАпЧФНЛшфБг', 'БЙНхнщчЖщчГоаПЧФНЛШФБгЛ', 'ОапЧфн', 'ф', 'БглА', 'ш', 'шфбГ', 'ХнЩЧЖщчГоА', 'ХНщчжщЧгоапч', 'хНЩчжщЧГоапчфнлшФбгЛ']) from system.numbers limit 10; +select 0 = multiSearchAnyCaseInsensitiveUTF8(materialize('кдЙДТЩеВЕфйКЩЦДиКБМф'), ['щфЛ', 'фЧЩЩичрКйЦКхеИИАпоБВЙЗбДАФио', 'мИтиЦРоВЙсБбСлНзиЛЧОфФевТмижщК', 'тЙгнКШфНТЕБЛцтГШЦхШхБ', 'уаабРГрМЙпМаБуЗпБЙчНивЦеДК', 'мпВЛНДеКПУгРЛЛинзуЕщиВШ', 'ЩжКйШшпгллщУ', 'пршЙПцхХЗжБС', 'нбЗНЙШБш', 'йцхИщиоцаМРсвнНфКБекзЛкчТ', 'хсмЦмнТрЩкДТЖиХщцкЦМх', 'ГмЛАбМщЗцЦйаОНвзуЗмЕКПБЙмАЕЛГ', 'ОЦХРЗРмкжмРИЖИЙ', 'з', 'лЕТкпкдЗчЗшжНфо', 'ИТПфйгЖЛзУТсЩ', 'ОфрбЛпГА', 'МЖооШпЦмсуГцАвМЕ']) from system.numbers limit 10; +select 1 = multiSearchAnyCaseInsensitiveUTF8(materialize('ЩГТРШКИОРБРеНЖПКиуМОкхЛугИе'), ['брЕнЖ', 'РбрЕНЖпКиУМокХЛу', 'ГТрШКИорБРеНЖпКиУМ', 'рШКиоРбрЕнЖпкИУМОК', 'ИорбрЕнЖПК', 'Окхл', 'шкИоРБРеНЖПк', 'ТРШкИоРБрЕнжПКИУМОкхл', 'КИОРБРЕнжпкиУм', 'Н', 'КиОРбРЕнЖпкИУмоКхл', 'к', 'ГтРшКИоРБРЕнЖпк', 'гтрШкиорбрЕНЖпк']) from system.numbers limit 10; +select 0 = multiSearchAnyCaseInsensitiveUTF8(materialize('ШНвпкфЗвгДжУЙГлрТШаШЛгНЗг'), ['нЗБенВшщрЛАрблцщшБАдзччммсцКЖ', 'бЗЩхзЗЗбФЕйМоазщугБбмМ', 'рЙсВжВсхдйлЩгБтХлчсщФ', 'пиБшКРнбВБгЕуЖ', 'жПшнхпШзУБрУЛРНЩДиаГШщКдЕвшоуПС', 'чЕщкЗмДуузуСдддзгКлИнгРмЙщВКТчхзЗЛ', 'кЖУЗЖС', 'щххОВМшуажвН', 'фбцЖМ', 'ДШитЧЩДсйНбдШеООУдг', 'ЛХПфБВХЦТИаФПЕвгкпкпщлхмЙхГбц', 'чЦсщЗщрМ']) from system.numbers limit 10; +select 1 = multiSearchAnyCaseInsensitiveUTF8(materialize('ФРХгаСлчЧОцкШгзмКЗшФфББвЧ'), ['кзШфФб', 'ГАслЧЧОцкшг', 'ФфббВЧ', 'ЦкШ', '', 'АслчЧОЦКШгзМкЗШффбБвч', 'РХгаслЧчОЦКШГз', 'РхгаслчЧОцКШгзМкзшФфБбВ', 'Шг', 'Ф', 'ХГАслчЧоцКШГзМкзш', 'ШгЗмКЗшфФб']) from system.numbers limit 10; +select 1 = multiSearchAnyCaseInsensitiveUTF8(materialize('ЧдйШкхОлалщНйбССХКаФзОМрКЕЙР'), ['бссХкафзОм', 'ХОЛаЛщнйБссХкаФз', 'лаЛщнйБсСХ', 'ЩнЙбСсхКаФЗО', 'йБСсХКАФЗОмР', 'йшкХолаЛЩНйбсСхК', 'С', '', 'ЙшкхОлалщНЙБсСхКаФзом', 'Йр', 'щнЙБссхКАфзоМрК', 'рКе']) from system.numbers limit 10; + +select 1 = multiSearchFirstIndex(materialize('alhpvldsiwsydwhfdasqju'), ['sydwh', 'dwh', 'dwhfdasqj', 'w', 'briozrtpq', 'fdasq', 'lnuvpuxdhhuxjbolw', 'vldsiws', 'dasqju', 'uancllygwoifwnnp', 'wfxputfnen', 'hzaclvjumecnmweungz']) from system.numbers limit 10; +select 0 = multiSearchFirstIndex(materialize('kcwchxxyujbhrxkxgnomg'), ['jmvqipszutxfnhdfaxqwoxcz', 'nrgzkbsakdtdiiyphozjoauyughyvlz', 'qbszx', 'sllthykcnttqecpequommemygee', 'bvsbdiufrrrjxaxzxgbd', 'hdkpcmpdyjildw', 'frxkyukiywngfcxfzwkcun', 'dmvxf', 'esamivybor', 'eoggdynqwlnlxr']) from system.numbers limit 10; +select 0 = multiSearchFirstIndex(materialize('zggbeilrfpkleafjjldgyfgn'), ['rpypxkpgvljhqthneremvabcd', 'qchzlnsctuwkdxqcrjgihvtfxhqxfqsxm', 'vtozkivjyqcqetmqenuihq', 'fixcvjyzbzejmwdivjf', 'lydoolvnuuamwlnzbyuuwpqqjaxf', 'elkodwthxqpcybwezm', 'wpiju', 'wdzuuwumlqfvga', 'iokphkai', 'wkbwdstplhivjyk', 'wxfbhfturuqoymwklohawgwltptytc', 'jehprkzofqvurepbvuwdqj']) from system.numbers limit 10; +select 9 = multiSearchFirstIndex(materialize('bwhfigqufrbwsrnnkjdzjhplfck'), ['v', 'ovusuizkdn', 'ttnsliwvxbvck', 'uh', 'lfourtjqblwdtvbgtbejkygkdurerqqdwm', 'snmtctvqmyyqiz', 'ckpixecvternrg', 'gluetlfyforxcygqnj', 'igqufrbwsr', 'om', 'huwazltjsnohsrcbfttzwyvcrobdixsuerkle', 'gqufrbwsrnnkjdzj', 'hfigqufrbwsrn', 'lhhyosbtznyeqzsddnqkfxayiyyajggxb', 'igqufrbwsrnnkjdzjhplf', 'pl', 'jtbqaqakbkesnazbvlaaojppxlbxccs', 'gqufrbwsrnnkjdz']) from system.numbers limit 10; +select 0 = multiSearchFirstIndex(materialize('yevfiumtjatfdnqixatbprvzwqlfgu'), ['ozghvskaixje', 'vmdrvdjhwxdvajmkpcxigsjzmtuhdxgllhzrpqd', 'qfhnxpcmtzpociajidwlcvobjfyxfcugsxy', 'pgamvhedjibcghinjrnowqzkfzibmfmh', 'bcmrdzpcczhquy', 'czosacvwfsbdvwwyirpvbve', 'qu', 'fdkobwlnmxbpvjkapextlbcrny', 'bqutjqobkyobhtpevjvewyksnoqyjunnnmtocr', 'kjlgff', 'oitltmhdburybwfxrjtxdiry', 'kiokuquyllpeagxygqugfmtm', 'wlbkl', 'khubpmstqjzzjzmsvfmrbmknykszqvue', 'lqrbmyndsztyrkcgqxcsnsanqjigimaxce', 'nitnyonuzedorrtkxhhgedohqcojbvtvjx']) from system.numbers limit 10; +select 0 = multiSearchFirstIndex(materialize('wmvuoeqphsycrvtxghrcozortmdnh'), ['hv', 'ugcmpebvlzgdtcmgkbgzyfel', 'qvmofayljsvybupvvnbhhibsz', 'zvlihxmyxlxwbffwjzjrfjgimmltftqqre', 'mwassqvxptav', 'jrumvqzkiaewngoufhrleakcfrsaxhpxyg', 'sxlxwhvkpavgfhxrxcbnqbstyrejtosxwe', 'psnlqakyfhcupryqatrmwqlswwjylpaiqammx', 'ivozojwldsgtnxpvsi', 'epyzjs', 'legi', 'sdqxxahfbddhacqrglgdcmlslraxfaahhfyodon']) from system.numbers limit 10; +select 12 = multiSearchFirstIndex(materialize('lebwdwxfdzwquhqhbvmte'), ['mwhruilzxvlyrgxivavxbbsq', 'ubuiizuasp', 'xpkzcsf', 'qpeqitoqqqeivohajzhmjbo', 'kbftixqmqgonemmbfpazcvf', 'iyhluioqs', 'hws', 'tupfdksgc', 'ows', 'pngzkoedabstewcdtdc', 'zdmyczldeftgdlwedcjfcoqycjcivf', '', 'xt', 'syuojejhbblohzwvjzzedzgmwc']) from system.numbers limit 10; +select 7 = multiSearchFirstIndex(materialize('wcrqaoecjwkhnskrbahqxfqgf'), ['qegldkdmyaznlmlhzvxfgoukngzbatnuq', 'khgcvgrifwtc', 'hkwcpogbbdqulizrycmneqmqynvj', 'zkqjf', 'xfduxyy', 'ructdekcoywfxsvpumfefoglljptsuwd', 'wkhnskrbahq', 'crqaoecjwkh', 'ikmpbunpguleinptzfelysiqc', 'lhldcci', 'nooepfypkoxxbriztycqam', 'uxeroptbiqrjartlnxzhhnlvjp']) from system.numbers limit 10; +select 0 = multiSearchFirstIndex(materialize('psgkkcwttitgrjsobiofheyohadu'), ['achfrepey', 'minlzeiwgjfvvmhnevisky', 'oxfghfdthtyczzveppcoxrued', 'ydhaupodnezvxhcqahfkwtpvxnymriixf', 'slxsbxidylxyurq', 'socyyabwbjdabnuqswrtjtqogirctqsk', 'lvbnacirctyxxspjmispi', 'oj', 'ihmmuuqlosorrwhfxvpygfrzsqpmilcvjodmcz', 'idmtmemqfyrlbwhxz', 'hsqfsfdzvslwbtlwrfavez', 'gszl', 'ei', 'pnywjnezncpjtyazuudpaxulyv', 'iqgavdjfqmxufapuziwwzkdmovdprlhfpl', 'yigk', 'mjidozklrpedutllijluv', 'vixwko']) from system.numbers limit 10; +select 3 = multiSearchFirstIndex(materialize('xtjxvytsseiqrpkbspwipjns'), ['bwmoghrdbaeybrmsnucbd', 'zoslqabihtlcqatlczbf', 'sseiqrpkbspwipjn', 'mdnbzcvtayycqfbycwum', 'npueimpsprhfdfnbtyzcogqsb', 'ytsseiqrpkbspwipj', 'fzvhcobygkwqohwutfyauwocwid', 'naacyhhkirpqlywrrpforhkcjrjsnz', 'vezbzderculzpmsehxqrkoihfoziaxhghh', 'mvvdfqzskcyomjbaxjfrtmbduvm', 'pwipjns', 'tsseiqrpkbspwipjn', 'sseiqrpkbspwip', 'qgrtbcdqcbybzevizw', 'isjouwql', 'rlbeidykltcyopzsfstukduxabothywwbq']) from system.numbers limit 10; +select 0 = multiSearchFirstIndex(materialize('zxmeusmehplcgbqabjof'), ['hqxgrw', 'fydjyrr', 'cocwtbazwjrswygttvrna', 'wpkvowuq', 'mwnzdxihrxihzhqtl', 'ljkjtmrfbonhqkioyzotyeegrw', 'ofxo', 'rjubwtpbweratrelqlrqotl', 'wvxkcil', 'qvolxxgqs', 'afqlhjnlvxowtnuuzywxuob', 'slwbmq']) from system.numbers limit 10; +select 0 = multiSearchFirstIndex(materialize('tjcmtoisgbilkygushkpuxklis'), ['bkdohwx', 'dfohgzhcjqirlbrokwy', 'zaemgqgxltznvkccyumhgsftnfigbol', 'otgcaybejwe', 'qn', 'gvfzcyhvmsnbgkulsqrzeekmjkc', 'cajuyauvmhkrriehgwfmtqbkupysudle', 'pmcupysyllzpstolkfpdvieffxaupqtjty', 'elhlzvescbfpayngnnalzixxgunqdhx', 'cvxpgdnqcxeesk', 'etlewyipypeiiowuoewulkpalvcfe', 'ordhwrkwqq', 'wnroixlkrqnydblfrtlbywc', 'xshujuttvcdxzbetuvifiqi', 'meqqxqhntkvzwoptnwskdgsxsgjdawe', 'dnmicrfshqnzosxhnrftxxeifoqlnfdhheg']) from system.numbers limit 10; +select 0 = multiSearchFirstIndex(materialize('iepqqbvekaflprupsmnpoijrld'), ['kqomoeysekwcplpegdwcdoeh', 'mwdvr', 'aobviioktzwzmpilblbdwstndhimabfgct', 'vqustluciruiyfkoontehnwylnauwpol', 'utcqnitztcgr', 'ityszrqmlwzspnrwdcvdhtziob', 'hmll', 'ilfzvuxbkyppwejtp', 'euxdzqcqutnfeiivw', 'rbcjlmjniiznzaktsuawnfjzqjri', 'fzyxlzzretsshklrkwru', 'jrujmdevqqojloz']) from system.numbers limit 10; +select 0 = multiSearchFirstIndex(materialize('cufztqffwjhtlkysekklpaywemm'), ['cpawuauqodogaitybtvplknjrsb', 'ynsocxfnxshzwnhlrfilynvz', 'ylrpytgcvtiumdckm', 'mvgrkueaslpgnjvvhzairgldtl', 'iliorsjypskmxfuuplfagktoycywb', 'drvwngp', 'zviuhcxaspwmqqz', 'qfgmrmhycskus', 'szj', 'rooivliiqufztcqlhrqyqvp', 'tufdmsmwue', 'cssowtldgwksbzlqyfereodcpuedighwd', 'odcjdffchhabtaxjvnr', 'o']) from system.numbers limit 10; +select 7 = multiSearchFirstIndex(materialize('zqwvlarwmhhtjjgwrivwfpsjkvx'), ['zcwhagxehtswbdkey', 'okezglmrjoim', 'ilwdviqimijzgoopmxdswouh', 'aqztpsntwjqpluygrvwdyz', 'uzxhjuhiwpz', 'akgc', 'larwmhhtjjgwrivwfpsj', 'isqghxsmcrwlgyloslmlyeboywtttgejdyma', 'arwmhhtjjgwri', 'rwmhhtjj']) from system.numbers limit 10; +select 9 = multiSearchFirstIndex(materialize('fuddujwwcewlhthgwsrn'), ['shtzrrtukxmdovtixf', 'rkcnzzzojqvvysm', 'jlamctgphjqcxlvmpzyxtghnoaq', 'pthrwvbheydmrot', 'kpniaqbcrgtxdyxxdxonbbltbdo', 'igulngxgtauumhckvbdt', 'khgrmskijoxruzzzaigjxonsc', 'rxzeykfxwssltw', 'hthg', '']) from system.numbers limit 10; +select 0 = multiSearchFirstIndex(materialize('jtgvvkggpkqhbxptjgoy'), ['nplzawmacgtqfxsp', 'oosw', 'akw', 'hnsenqoqwiydiufozomkyirgjepeqw', 'fpafgahvfdxukzvskbuy', 'tqimmsqffiqfoni', 'rrxkjklmkdhxqwcpfyutqzxu', 'esfqeujcbqxwnvodkwwdbsyozptaf', 'rqnyguyz', 'fftl', 'ccfyavxtxrpi', 'wftpsblszgovfgf']) from system.numbers limit 10; +select 0 = multiSearchFirstIndex(materialize('steccxkwnptybaddcuau'), ['qagxfznhjaxtyclxdsi', 'rtxwptfyzgthkwrx', 'rmcoxxs', 'vlubx', 'siecygstzivz', 'tksiagm', 'kq', 'dgsqrobxegmdbjkanb', 'lxokyvhveklvdakrxyiqokr', 'tgpmehwdrirpfjonqzhqshbo', 'cqmkargvsfjoxrguymtzsfwkg', 'avkmufhoywprjw', 'xzywtvlpoozmgkrcavevwebv', 'hfiuwslapamiceaouznxm', 'tmfjhqddafhhjbybfphlbwu', 'mrigvhmjvdpny']) from system.numbers limit 10; +select 0 = multiSearchFirstIndex(materialize('ccbgxzoivbqtmyzqyooyepnmwufizz'), ['lcclseplkhxbrrzlnani', 'xggxivwqlpxmpypzovprdkmhrcgjkro', 'dbbmiegotfxjxybs', 'hqtcowpupsyqfx', 'znatfzjbeevbaqbmpofhywbyfxn', 'mnditiygex', 'lazqapwjswhkuimwmjoyseyucllnrfxrwnzj', 'jg', 'dmqwnuvsufgffuubhqeugwcanvflseorrydyyxvr', 'wpjfcfwfgjiybncrw', 'joucnvxxcyjyqlwhrzwnstyj', 'babtxkzasyaffxzd', 'wgcfdyhwxjoytbxffdxbdfinolbltnhqkvyzybc', 'yhrgwbdwopznltjtyidxawqg', 'bvrrt', 'bcwmsys', 'ijdjojhhzaiyjyai', 'eevxwppogogdbmqpbeqtembiqxeiwf']) from system.numbers limit 10; +select 2 = multiSearchFirstIndex(materialize('xrwjeznohtbdvijwsbdksf'), ['hwdfufmoemohatqafdrcvdk', 'tbdvijwsbdks', 'xzwjczbuteujfjifzkbxvezs', 'bdvijwsbd', 'eznohtbdvijwsbdks', 'xadezwhbbmlqz', 'b', 'socrdjxsibkb', 'dk', 'eznohtbdvijws', 'pavsosnncajr', 'jixlmxxmxnnbpebjhitvtsaiwzmtqq', 'yuxmmnrqz', 'mpzytweuycabvu', 'tbdvi', 'ip']) from system.numbers limit 10; + +select 0 = multiSearchFirstIndexUTF8(materialize('црвтгмсрооацволепкщкпнгшкамщ'), ['гйцбсханрейщжнфбхтщбйала', 'дирдфнжпнччхаоцшрийнйнечллтгцбфедгсш', 'жфйндбффаилбндмлточиирасдзйлжбдзег', 'жвоуйфсйойфцвгзшцитсчпкч', 'ршонтбгщжооилчхрзшгсдцпзчесххцп', 'пйучихссгнхщлутвменлмм', 'хишгешегдефесо', 'знупгж', 'щчфу', 'знвтжифбнщсибеноожжметачаохфхсжосдзйуп', 'ггтоцйпгхчсбохлрчлваисивжбшбохдурввагш', 'щлийбчштбсч']) from system.numbers limit 10; +select 5 = multiSearchFirstIndexUTF8(materialize('опднхссгртрхтотлпагхжипхпитраб'), ['шфршсцешушклудефцугщцмйщлошечедзг', 'нйумйхфщцгщклдожхвосочжжислцрц', 'згтпвзцбхйптцбагсвцгтнф', 'пшичси', 'ссгртрхтотлпа', 'апзазогвсбежзрйгщоитмдкн', 'непгайтзкгштглхифмзданоихц', 'пднхссгртрхтотлпагхжипхпитр', 'ждднфлрзалшптсбтущвошрйтхкцнегшхрсв', 'брп', 'сгртрхтотлпагхжипх', 'нхссгртрхтотлпагхжипхп', 'пагхж', 'мфкжм']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexUTF8(materialize('овччцнтчайомсйййоуйуучщххиффсб'), ['жжрддцпнехйр', 'шзбвуооинпаххесйкпкошжмцзгхе', 'ррсннилщлщжгцтйрпхабкехахззнтщемагдйшпсч', 'пуфугнказепщ', 'гддхтплвд', 'сщсчи', 'бйрсахедщфкхиевкетнс', 'йфжцжшпхлййхачзхнфоц', 'цтмтжлщдщофисзрвтбо', 'кщсевбоуйб', 'щгаапзкн', 'осймщовшчозцййизм', 'фкмаат', 'бкзцсдонфгттнфтаглпрцтбхбсок', 'жлмичлйнйсжбгсейбсиезщдмутационжгмзп', 'нбищижнлпмтморлхцхвеибщщлкйкндлтпбд']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexUTF8(materialize('фдситчщдвхмфйтхшдтуцтщжрочщо'), ['ейшфдннтйечгк', 'фуйщгбйшдцирзб', 'ехйцмчщрсртнк', 'увтцмдорщжфгцгзущпувтщкнрфсйбщрзй', 'хчщпхвуарнббпзсцшчщуносйгщпсбтх', 'жтдчрхфмхцххккзппзбнуббс', 'тчохнмбаваошернеймгготлузвсбрщезднеил', 'стссчкшрчррйбхдуефвеепщшзмербгц', 'жбезжпещ', 'вйтсрхптлкшвавдаакгохжцоощд', 'искеубочвчмдхе', 'щмлочпзбунщнхлрдлщтбеощчшчхцелшоп', 'екуийтсйукцн', 'дочахгжошвшйжцпчзвжйкис', 'лтеенешпсболгчиожпжобка', 'букзппщрчбпшвпопвйцач']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexUTF8(materialize('гопвмрутфпфбхмидшлуб'), ['цнхшдойгщн', 'дкаежщрапщпщеа', 'фмогимдничрфтхмсцмчпдфтиофнтйц', 'фчмсщисхщуп', 'ощмвдчефозйжбеесбмещочевцчд', 'апкбцйщжщабвппофм', 'мтйоддлфцгдуммптднпщшрн', 'икхнсмжчбхнфхнссгл', 'ущмунинлбпрман', 'ллкнечрезп', 'ажтнвбиччджсзтйешйффдгдрувер', 'йрщ', 'чигдкйшфщжужзлвщулквдфщхубги', 'иккшсмаеодейнкмгхбдлоижххдан']) from system.numbers limit 10; +select 12 = multiSearchFirstIndexUTF8(materialize('срлцчуийдлрзтейоцгиз'), ['жщлнвбубжпф', 'оклвцедмиср', 'нлзхмчдзрззегщ', 'хоу', 'шайиуд', 'ерслщтзцфзвмйтжвфеблщдурстмйжо', 'жмгуйузнчгтт', 'стеглмрдмирйрумилвшнзззр', 'втедлчрчайвщнллнцдмурутш', 'цимхргмрвмщиогврнпиччубцйе', 'ктчтцбснзцйцймридвш', 'ейоц']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexUTF8(materialize('лрицжленфилзсжпжйнцжжупупдфз'), ['чпбрмлрнцмвеуфу', 'рмпизмпжчшбхдудчшохтжш', 'гргцжчпгщищннусв', 'ийщтщвзчшпдзитщубакусхавслрсбткб', 'бйбакижцтибгбгхжцвйчжжщжсжкзф', 'чгрп', 'чуносжусжфчмфжхрщзлщрдвбашажаанча', 'чекршбш', 'лбцкхйсооцц', 'сгвнлегвфмпчтййлрмд', 'наатущркхйимхщщг', 'щпзоеимфощулбзхафпц', 'дцабцхлврк', 'умидмчуегтхпу', 'дщнаойрмчсуффиббдйопдииуефосжхнлржрйлз', 'щзжетезвндхптпфлк', 'бгчемкццдбжп', 'иихуеоцедгрсеужрииомкбззцнгфифоаневц']) from system.numbers limit 10; +select 3 = multiSearchFirstIndexUTF8(materialize('бхжвчашрощбмсбущлхевозожзуцгбе'), ['амидхмуеийхрнчйейтущлуегрртщрхвг', 'фнисцщггбщйа', 'хжвчашрощбмсбу', 'фщвщцнеспдддцчччекчвеещ', 'ущуджсшежчелмкдмщхашв', 'цкуфбиз', 'евозожз', 'ппт', 'лвцнелшхцш', 'ощбмсбущлхев', 'ефхсзишшвтмцжнвклцуо', 'цржсржмчвмфмнеещхмиркчмцойвйц', 'ашрощбмсбущлхевозожзу', 'гхщншфрщзтнтжкмлщанв', '', 'хевозо', 'ощбмсбущлхевозожзуц', 'возожзуц']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexUTF8(materialize('мзчатйжщгтзлвефчшмлшт'), ['гхшфрунирйдзтеафщгк', 'ймхмфлц', 'звуумивмвштчтнтеобзщесакийгк', 'чщжетзнцишхрммтбцакиббчп', 'блмидикавущщдпгпчхйаатйанд', 'цмщшбклгцгмчредмущаофпткеф', 'бнетввйцзпдерхщ', 'ицйнцрввемсвтштчфрпжнатаихцклкц', 'дзлщсштофвздтмчвсефишс', 'пбзртдцвгкглцфесидлвваисщр', 'ммеилбзфнчищч', 'жш', 'лздиззтпемкх', 'байлужднфугмкшгвгулффмщзхомпав', 'рсзнббедсчзущафббзбйоелид', 'цфшйкцксйгуйо']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexUTF8(materialize('жжмзмащйфжщлрффбпврзнидииейщ'), ['ржфзнлйщсздйткаоцруйцгцт', 'илинксщмгщшещееифвпданмйлж', 'кг', 'гпааймцщпмсочтеиффосицхйпруйшнццвс', 'кнзфгжйирблщлл', 'ищуушфчорзлкбцппидчннцвхщщжййнкфтлрдчм', 'тбтдчлвцилргоргжсфбоо', 'ехаех', 'нехщмдлйджждмрцпйкбрнщсифхфщ', 'тцжпснйофцжфивзфбхзузщтмдкцжплавозмше']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexUTF8(materialize('биаризлрвххжкпщтккучфизуршткпн'), ['йбручвндбщвссаеха', 'ол', 'еузкмпогщзгзафшдшоплбфнфдккх', 'ибзихщйфбтаз', 'ибрчиейш', 'нафрпбснзрузнтмнйиомтечтшзбкпзутдилтф', 'тщтбапцчдий', 'щкнггмфцжрзщцзжвлкчбммхтхтуж', 'ваам', 'цкфиушзигбжтацнчдлжжзфшщммтнлж', 'туфовжтнкзщсщщизмрйкхкпц', 'пирзксзикфтшодожшчцг', 'жфчфцфвлйбмеглжйдазгптзщгж', 'тутириждкзчвтсоажп', 'мотзусбхту', 'слщкгхжщфщоцкцтрлгп', 'бругтбфесвсшцхнтулк', 'восур', 'ссежгнггщдтишхйнн', 'вгзосзгоукмтубахжнзгшн']) from system.numbers limit 10; +select 8 = multiSearchFirstIndexUTF8(materialize('мчслвбжвманджййсикнврцдчмш'), ['рлбмй', 'иб', 'жажлцсзхйфдцудппефвжфк', 'огггхзгтцфслхацбщ', 'дзтцкогаибевсйещпг', 'зпцтйзфмвгщшуоилл', 'етщзгцпдйчзмфнхпфцен', 'нджййсик', 'сикнврцдчмш', 'жййсикн', 'икнврцдч', 'паокаочввеулщв', '', '', 'кечзсшип', 'вбжвманджййсикнвр']) from system.numbers limit 10; +select 2 = multiSearchFirstIndexUTF8(materialize('нвррммппогдйншбшнехнвлхм'), ['нфошцншблеооту', 'лх', 'цртд', 'огдйншбшн', 'уулддйдщицчпшбоиоцшй', '', 'дрдужзжпцкслетгвп', 'й', 'мппогдйншбшнех', 'дйншб', 'лжвофчзвдд', 'рммппогдйншб', 'ехнв', 'втущсщзбчсжцмаанчлнасп']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexUTF8(materialize('удехбкабиацхпгзнхжелшц'), ['фмнбтйезсфоахофофдблкжщжфмгхтзс', 'тщтамзафозхлз', 'цшжфсбл', 'йзгзилупшллвипучхавшнмщафзмнк', 'лу', 'гтебпднцчвмктщсзи', 'лпщлмцийгуеджекшд', 'пцдхфоецфрунзм', 'зис', 'хпж', 'цтцплхцжишфнплуеохн', 'впх', 'чцчдацлуецрчцжижфиквтйийкез', 'гчшмекотд', 'пйгкцчафеавзихзтххтсмкал', 'сжфхпцгдфицжслрдчлдхлсувчнрогнву']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexUTF8(materialize('щщвфчгамтжашнуошбзшуйчтшх'), ['дийу', 'жеомлсжщймемрсччошдфажцтдп', 'нгопнцквбф', 'хопб', 'ив', 'чвфвшфрдфелрдбтатшвейтг', 'вхкцадмупдчбаушшлдксйв', 'жтжбсвмшшсйеуфдпбдлкквдиовж', 'гтсдолснхесйцкйкмищгсзедх', 'ошплп', 'ифпуррикбопйгиччи', 'чдфймудаибвфчжтзглс', 'зпцмвпнлтунвйж', 'еждрйитхччещлцч', 'вмофсужхгрнзехкх', 'щжгквкрфжмжжсефпахст']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexUTF8(materialize('рфгигуужжцфмоаешщечувщгонт'), ['слащченщлуоцргврбаб', 'тцизут', 'лйрсцолзклжбчрзгббммоищщ', 'уицмлоилзф', 'зпхмшвфйккфщщп', 'ймижрпдщмшв', 'пуощжлрмжлщхмкйгщшщивдпчпжчл', 'ойахшафнж', 'гксомбвцрсбжепхкхжхнсббци', 'панлраптщмцмйфебцщемйахенг', 'сохлгожштлднчсзпгтифсйгфмфп', 'аждчвзну', 'дхшуфд', 'борзизцхнийбщгхепрнзшй', 'фщшздруггрке', 'оевупрйщктнолшбкунзжху']) from system.numbers limit 10; +select 8 = multiSearchFirstIndexUTF8(materialize('кщзпапйднучлктхжслмищ'), ['апмдйлсафхугшдезксш', 'кйрм', 'цйивайчшуалгащсхйш', 'злорнмхекг', 'сгщврурфопжнлхкбилдч', 'бнлпщшнвубддрлижпайм', 'нукдонццнрмовфнбгзщсшщшдичежффе', 'йднучлктхжс', 'зпапйднучлктхж', 'затйотдсмпбевлжаиутсуг']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexUTF8(materialize('жцажссефррршнфмнупщаоафгкщваа'), ['жфпщкгзкрмтщчцтжйчпйдошбшоцд', 'бхгйлйдробптвущшппзуиидежнлтпбжащткцф', 'хлещазйцепдханпажчизнхгншйуазщхй', 'ашцк', 'фрбммхдднчзшс', 'нжцанилзжаречвучозрущцдщаон', 'длмчзцрмжщбневрхуонпйейм', 'шкбщттврлпреабпоиожнууупшмкере', 'вуцпщдиифпеоурчвибойбпкпбкйбшхдбхнаббж', 'нртжвкдйтнлншцанцпугтогщгчигзтоищпм', 'цкплнкщлкшемощмстздхпацефогтск', 'цвждйбсмпгацфн', 'шсжшрзрардтпщлгчфздумупд', 'цйииткглчжйвуейеиииинврщу', 'унлодтулшпймашоквббчйнибтвалалрвбцж', 'нбнфнвйишйжлзхкахчмнлшзуеенк', 'бшлпсщжквпцахигчдтибкййб', 'фчакпзовтрлкншзцулшщмпзж']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexUTF8(materialize('иматеччдфлггшпучумджпфпзмвх'), ['дахахпчлцлаачгцгтфпнжлшчйуцбшсг', 'атжйувхец', 'грдсбвиднницдвшпйршгмегцаоопнжгй', 'чзлхречмктфащмтеечуиагоуб', 'савбхлпилийщтихутйчдгфсойй', 'вбгочбзистзщшденусцофит', 'мар', 'дфшажхдсри', 'тжлмщшж', 'птсрсщгшммв', 'ре', 'зратамкткфкинййй', 'гуцмсизулвазужфдмхнелфнжббдтрудчтнфцр', 'нйчинеучкхнпчгнйвчвсвлгминуцахгщввжц', 'ечагчнуулфббгбел', 'йшжуговрйкащцофдокфчушжктнптйеззушфо']) from system.numbers limit 10; +select 11 = multiSearchFirstIndexUTF8(materialize('азтммйтшхцхлгдрнтхфжбдрлцхщ'), ['нпучщфвспндщшспзмшочгсщжчйгжбжзжжтн', 'хккдйшабисдузфртнллщпбоуооврайцз', 'йпхрфжждгпнйаспйппвхбргшйвжччт', 'ффеее', 'кежцновв', 'еххрчштарзмкпйззсйлмплхбчбулзибвчбщ', 'шфжйдотрщттфхобббг', 'ожоцжущопгоцимсфчйщцддзнфи', 'цуимеимймкфччц', 'прммщмтбт', 'хцхлгдрнтхфж', 'лгд', 'цжбдаичхпщзцасбиршшикджцунйохдлхй', 'пидхцмхйнспйокнттмййвчщпхап', 'йтйзмеаизкшйошзвфучйирг', 'хцхлгдр']) from system.numbers limit 10; + +select 0 = multiSearchFirstIndexCaseInsensitive(materialize('gyhTlBTDPlwbsznFtODVUzGJtq'), ['seSqNDSccPGLUJjb', 'xHvtZaHNEwtPVTRHuTPZDFERaTsDoSdX', 'QCeZOYqoYDU', 'bsybOMriWGxpwvJhbPfYR', 'FFHhlxfSLzMYwLPPz', 'tvDAJjaLNCCsLPbN', 'kOykGaSibakfHcr', 'mWAZaefkrIuYafkCDegF', 'ILrFDapnEDGCZWEQxSDHjWnjJmeMJlcMXh', 'zHvaaTgspUDUx', 'tss', 'laUe', 'euUKFLSUqGCjgj', 'Kd', 'MxyBG', 'qRXMsQbNsmFKbYSfEKieYGOxfVvSOuQZw', 'PdBrNIsprvTHfTuLgObTt', 'kMekbxI']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexCaseInsensitive(materialize('ZxTznPEbfoBfLElYOrRiHrDLMmTpIh'), ['bJhYwKLeeLvLmXwWvQHWFkDQp', 'dLyZmUicTZmUfjfsFjxxgOiMJn', 'UCYbbGcY', 'kpPiwfWHEuh', 'jviwmHeiTQGxlTKGVEnse', 'cVnEyLFjKXiLebXjjVxvVeNzPPhizhAWnfCFr', 'gkcoAlFFA', 'ahZFvTJLErKpnnqesNYueUzI', 'VIJXPlFhp', 'rxWeMpmRFMZYwHnUP', 'iFwXBONeEUkQTxczRgm', 'ZnbOGKnoWh', 'SokGzZpkdaMe', 'EfKstISJNTmwrJAsxJoAqAzmZgGCzVRoC', 'HTmHWsY', 'CpRDbhLIroWakVkTQujcAJgrHHxc']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexCaseInsensitive(materialize('VELfidaBvVtAghxjkrdZnG'), ['fvEFyRHvixuAYbuXygKeD', 'zFNHINreSOFksEGssBI', 'hcdWEcKDGWvfu', 'KczaFjvN', 'nZLTZAYSbfqcNWzWuGatDPUBYaRzuMBO', 'UdOdfdyPWPlUVeBzLRPMnqKLSuHvHgKX', 'DgVLuvxPhqRdSHVRSeoJwWeJQKQnKqFM', 'NNfgQylawNsoRJNpmFJVjAtoYy', 'tWFyALHEAyladtnPaTsmFJQfafkFjL', 'lYIXNiApypgtQuziDNKYfjwAqT', 'QjbTezRorweORubheFFrj', 'htIjVIFzLlMJDsPnBPF', 'ltDTemMQEgITf', 'fprmapUHaSQNLkRLWAfhOZNy', 'dOJMvPoNCUjEk', 'm', 'vEEXwfF', 'aVIsuUeKGAcmBcxOHubKuk']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexCaseInsensitive(materialize('kOzLaInSCOFHikwfkXaBfkyjdQ'), ['t', 'emHGfAiZSkZaVTSfplxRiPoDZUTT', 'YHnGJDTzxsboDsLPGHChMHwrHHICBIs', 'gbcbVHSlVeVDOeILWtSLkKfVVjG', 'fPaJjbnNthEwWZyg', 'qS', 'PCQxoLaSdQOjioMKPglmoWR', 'KLMNszm', 'TCErEFyxOvqnHs', 'dRbGzEJqvIGAcilZoHlXtZpjmLLZfsYueKqo', 'iKHmNSbGgaJYJEdMkbobXTdlFgAGEJMQ', 'mUGB']) from system.numbers limit 10; +select 1 = multiSearchFirstIndexCaseInsensitive(materialize('JGcICnWOGwFmJzHjtGJM'), ['fmJzHj', 'LhGTreYju', 'yCELHyNLiAJENFOLKOeuvEPxDPUQj', 'kWqx', 'OBnNMuaeQWmZqjWvQI', 'ektduDXTNNeelv', 'J', 'iCNwoGwfMJzhjtGJ', 'uiIipgCRWeKm', 'bNIWEfWyZlLd']) from system.numbers limit 10; +select 7 = multiSearchFirstIndexCaseInsensitive(materialize('fsoSePRpplvNyBVQYjRFHHIh'), ['ZqGBzyQJYuhTupkOLLqgXdtIkhZx', 'pouH', 'mzCauXdgBdEpuzzFkfJ', 'uOrjMmsHkPpGAhjJwVOFw', 'KbKrrCJrTtiuu', 'jxbLtHIrwYXDERFHfMzVJxgUAofwUrB', 'PLvNyBVQYjRfhhi', 'wTPkeRGqqYiIxwExFu', 'PplvNybvqyJ', 'qOWuzwzvWrvzamVTPUZPMmZkIESq', 'ZDGM', 'nLyiGwqGIcr', 'GdaWtNcVvIYClQBiomWUrBNNKWV', 'QQxsPMoliytEtQ', 'TVarlkYnCsDWm', 'BvqYJr', 'YJr', 'sePrPPLVNYbvqYJRFhh', 'ybvq', 'VQYjrFHh']) from system.numbers limit 10; +select 3 = multiSearchFirstIndexCaseInsensitive(materialize('aliAsDgMSDPISdriLduBFnuWaaRej'), ['gWOFTxMrQGQaLrpJamvRhgeHwk', 'iWsBLzLycWvbJXBNlBazmJqxNlaPX', 'Ri', 'FPLRURSsjvsySncekcxaWQFGKn', 'wgXSTVzddtSGJQWxucYorRjnQQlJcd', 'wOLJWZcjHEatZWYfIwGIqnuzdcHKSFqfARfNLky', 'eEECZMNmWcoEnVeSrDNJxcOKDz', 'duBF', 'EhfLOjeEOQ', 'dUbFNUWA']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexCaseInsensitive(materialize('EUzxPFYxMsJaTDzAKRXgZIVSFXU'), ['TDKAgICICjzBKHRqgFAuPCSODemldGGd', 'LvMluSJTIlgL', 'srbRhQKjPIchsipVHsjxwhK', 'vdurVsYkUWiFQVaDOnoNIJEX', 'UzZsZqAUNjMvWJaTqSWMHpzlDhVOaLzHPZfV', 'XcnnPXXEJJv', 'JSwFBNnYzNbIRZdeMfYiAfxzWfnCQFqoTUjns', 'HBMeqdLkrhebQeYfPzfJKAZgtuWHl', 'cMfSOnWgJvGhFPjgZdMBncnqdX', 'orDafpQXkrADEikyLVTHYmbVxtD', 'Vz', 'bfYwQkUC', 'q', 'YqomKpmYpHGv']) from system.numbers limit 10; +select 4 = multiSearchFirstIndexCaseInsensitive(materialize('mDFzyOuNsuOCSzyjWXxePRRIAHi'), ['TfejIlXcxqqoVmNHsOocEogH', 'clyblaTFmyY', 'JQfxMAWVnQDucIQ', 'jw', 'fGetlRA', 'uWwCOCd', 'rInhyxSIFiogdCCdTPqJNrqVaKIPWvLFI', 'mimSJjfCWI', 'jqnJvNZXMEPorpIxpWkhCoiGzlcfqRGyWxQL', 'bxCJeVlWhqGHoakarZcK', 'unsUOcSZyjwxxe', 'E', 'PR', 'nsUoCSZyjwxXEPr', 'sfotzRPMmalUSjHkZDDOzjens', 'zYJwxx', 'DFzyouNsUocsZ', 'QBaQfeznthSEMIPFwuvtolRzrXjjhpUY', 'sQPVBaoeYlUyZRHtapfGM', 'lPiZLi']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexCaseInsensitive(materialize('VOAJfSkbDvNWAZNLIwqUgvBOddX'), ['pHrGGgJ', 'VohjtPdQZSNeKAlChDCnRTelroghFbZXVpnD', 'rnWebvdsmiFypMKL', 'NtKRiJOfAkWyKvubXrkOODgmZxvfOohsnHJEO', 'nxsDisKarasSZwESIInCJnYREUcoRUTXHBUH', 'mXYYr', 'jujScxeTBWujKhKyAswXPRszFcOKMSbk', 'INEegRWNgEoxqwNaGZV', 'VVyjMXVWVyuaOwiVnEsYN', 'mkLXSmXppxJhFsmH', 'pRVnBrWjqPeUDHvhVuDbzUgy', 'PzchFdPTkOCIVhCKml', 'KXaGWnzqoHBd', 'PhzQVqIOLleqDSYNHLjAceHLKYPhCVq', 'aixxTqAtOAOylYGSYwtMkZbrKGnQLVxnq', 'ruEiaxeRaOOXGggRSPlUOGWSjxh', 'prSULtHvDMw', 'vEpaIIDbGvIePYIHHZVNSPYJl']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexCaseInsensitive(materialize('ZHcEinZEFtfmHBLuCHntUhbIgY'), ['GKElMPEtmkLl', 'mkrzzjSRfXThuCQHkbZxRbhcymzTxcn', 'PREwQjxBJkpkiyuYEvtMZNFELgbINWsgf', 'lFEGlPtaDJSyoXzwREiRfpzNpsaBYo', 'tmVTuLPhqhgnFNhHvqpmc', 'NtijVhVfAwpRsvkUTkhwxcHJ', 'O', 'FSweqlUXdDcrlT', 'uljEFtKVjIzAEUBUeKZXzCWmG', 'dBIsjfm', 'CNaZCAQdKGiRUDOGMtUvFigloLEUr', 'yWjizKZ', 'QqPVdyIFXcweHz', 'uPmgGWGjhzt']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexCaseInsensitive(materialize('AYMpbVsUQqAfoaMiJcYsulujYoSIx'), ['aXECumHNmAEefHPJy', 'hTosrERBdVCIilCYcMdHwaRh', 'PVDBpwrc', 'uFvQRPePvmzmocOauvEjqoxMhytzOwPSOCjmtm', 'kQqIlSCHDmWXCKN', 'ybAHGYDEDvvOJsF', 'WpkANi', 'cFGuzEcdahZtTdLFNBrRW', 'EBaybUFxO', 'mRlZUzHzMsMAgvtRtATEDLQvXZnZHw', 'uqxckjqpCBHiLgSPRz', 'Lv', 'AJcRfAvBmQVMOjaFfMfNHJt', 'FYsPM', 'pkKXTPgijOHFclqgVq', 'Ck']) from system.numbers limit 10; +select 11 = multiSearchFirstIndexCaseInsensitive(materialize('gmKSXWkNhKckrVNgvwiP'), ['bdJMecfCwQlrsgxkqA', 'NTgcYkMNDnTiQj', 'fmRZvPRkvNFnamMxyseerPoNBa', 'rfcRLxKJIVkLaRiUSTqnKYUrH', 'YSUWAyEvbUHc', 'PridoKqGiaCKp', 'quwOidiRRFT', 'yHmxxUyeVwXKnuAofwYD', 'gichY', 'QlNKUQpsQPxAg', 'knhkCKRVNGvWIp', 'jAuJorWkuxaGcEvpkXpqetHnWToeEp', 'KnHKCKrvNgVW', 'tCvFhhhzqegmltWKea', 'luZUmrtKmmgasVXS', 'mageZacuFgxBOkBfHsfJVBeAFx', 'hKC', 'hkRCMCgJScJusY', 'MKSXWknHkckrVNgv', 'osbRPcYXDxgYjSodlMgV']) from system.numbers limit 10; +select 15 = multiSearchFirstIndexCaseInsensitive(materialize('lcXsRFUrGxroGIcpdeSJGiSseJldX'), ['pBYVjxNcQiyAFfzBvHYHhheAHZpeLcieaTu', 'SQSQp', 'OQePajOcTpkOhSKmoIKCAcUDRGsQFln', 'AYMDhpMbxWpBXytgWYXjq', 'gkUC', 'oWcNKfmSTwoWNxrfXjyMpst', 'fQSqkjRNiBGSfceVgJsxgZLSnUu', 'LRrhUjQstxBlmPWLGFMwbLCaBEkWdNJ', 'cZnaActZVoCZhffIMlkMbvbT', 'Uxg', 'vlKdriGMajSlGdmrwoAEBrdI', 'Fl', 'XzcNdlUJShjddbUQiRtR', 'AqowAuWqVQMppR', 'SRFUrGXrOgiCP', 'k']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexCaseInsensitive(materialize('KhwhbOzWvobUwJcteCHguFCn'), ['LkDYrpvDfPL', 'CIaTaShobVIaWjdbsNsCMdZKlGdtWuJmn', 'zYcsxxFyfuGrPdTPgEvGbXoYy', 'vDIeYpJbLMGMuRkIrPkAnqDDkqXPzy', 'Ievib', 'CREiuEsErFgvGEkQzThHtYtPmcL', 'JjRWKyALtSkoGmRxh', 'JxPhpijkDOpncCKyDEyXvKNua', 'jo', 'mKpFscuBEABMAlQO', 'qiFTgJpcnUMRKzTEuKY', 'pXBtITxCPRaXijM', 'guYVLpIbu', 'tSKYIxv', 'oDnWaFAmsXGRdGvRPhbCIvFSFQNlSVYB', 'phdckINUiYL']) from system.numbers limit 10; +select 14 = multiSearchFirstIndexCaseInsensitive(materialize('pXFoUGwVTAItBqgbBaQwAqmeh'), ['LfBevBpGnaSlmGhbeZ', 'NtBYzEksiXvYI', 'jMeRw', 'omtaduY', 'BsWyvNdkfXsTBxf', 'CtoOIvaesuca', 'pgJcRIBVbyaPBgGsNKP', 'bAwdUMnwKvMXfFHQWrtfMeqcORIJH', 'GDxZblrqWSxUJFjEuXArPtfHPdwSNGGL', 'LLxcfp', 'NrLghkFpwCdvHJBfPBgiMatNRaDKjO', 'XCzr', 'cCojPpfLkGZnaWBGpaZvrGMwgHNF', 'BaQWAQmE', 'AQ', 'RtxxEZDfcEZAgURg']) from system.numbers limit 10; +select 5 = multiSearchFirstIndexCaseInsensitive(materialize('KoLaGGWMRbPbKNChdKPGuNCDKZtWRX'), ['FBmf', 'QJxevrlVWhTDAJetlGoEBZWYz', 'tKoWKKXBOATZukMuBEaYYBPHuyncskOZYD', 'kgjgTpaHXji', '', 'xOJWVRvQoAYNVSN', 'YApQjWJCFuusXpTLfmLPinKNEuqfYAz', 'GXGfZJxhHcChCaoLwNNocnCjtIuw', 'ZLBHIwyivzQDbGsmVNBFDpVaWkIDRqsl', 'Kp', 'EyrNtIFdsoUWqLcVOpuqJBdMQ', 'AggwmRBpbknCHdKPgun', 'xNlnPtyQsdqH', 'hDk']) from system.numbers limit 10; +select 6 = multiSearchFirstIndexCaseInsensitive(materialize('OlyNppgrtlubvhpJfxeWsRHpr'), ['slbiGvzIFnqPgKZbzuh', 'fakuDHZWkYbXycUwNWC', 'HnVViUypZxAsLJocdwFFPgTDIkI', 'bLx', 'fmXVYOINsdIMmTJAQYWbBAuX', 'pjFXews', 'BG', 'vrSQLb', 'ub', 'pREPyIjRhXGKZovTqlDyYIuoYHewBH', 'hnNQpJmOKnGMlVbkSOyJxoQMdbGhTAsQU', 'UwaNyOQuYpkE', 'yHNlFVnuOLUxqHyzAtNgNohLT', 'YJRazuUZkP', 'z', 'lUbVhpjFxEWsRhP']) from system.numbers limit 10; +select 6 = multiSearchFirstIndexCaseInsensitive(materialize('ryHzepjmzFdLkCcYqoFCgnJh'), ['cLwBRJmuspkoOgKwtLXLbKFsj', 'YSgEdzTdYTZAEtaoJpjyfwymbERCVvveR', 'RzdDRzKjPXQzberVJRry', 'HUitVdjGjxYwIaLozmnKcCpFOjotfpAy', 'LWqtEkIiSvufymDiYjwt', 'FDlKCCYqoFCGNj', 'jmZfdlKCcyQOFcGnJ', 'OZCPsxgxYHdhqlnPnfRVGOJRL', 'JfhoyhbUhmDrKtYjZDCDFDcdNs', 'KCCYqo', 'EPJMzFDLKcCYQ', 'zLQb', 'qsqFDGqVnDX', 'MzfdLkCCyQOFc']) from system.numbers limit 10; +select 5 = multiSearchFirstIndexCaseInsensitive(materialize('oQLuuhKsqjdTaZmMiThIJrtwSrFv'), ['MsfVCGMIlgwomkNhkKn', 'fBzcso', 'meOeEdkEbFjgyAaeQeuqZXFFXqIxBkLbYiPk', 'tNV', 'i', 'EwuTkQnYCWktMAIdZEeJkgl', '', 'hUo', 'dtAzmMITHijRtwsrFV', 'vhnipYCl', 'puor', 'TazMmiTh', 'ITHIJRTWSrf', 'luuHksqJDTaz', 'uHkSQjDtazMMiThIjrtwSRFV', 'gpWugfu', 'QjdtazmmIthIjRTWSRFV', 'ZdJpc']) from system.numbers limit 10; + +select 0 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('ИпрУщйжббКВНИчйацпцоЛП'), ['цШСкЕвеГЕЗЦщруИБтЦсБГАу', 'Хнщта', 'БшА', 'СалШйР', 'ЩфДГРРчшБДММГЧоноЖСчдпВХшшгйН', 'бЕжПШЦддожнЧоЕишчшЕЙфСщиВПФМ', 'ТЗзГФх', 'Чфл', 'КнНкнЖЕкППварНрхдгЙкДешмСКИЛкеО', 'ЖИсЧПСФФМДиТШХЦфмЗУпфрУщСЛщсфмвШ', 'ллЙумпхчОсЦМщУ', 'ГМУНЦФшНУбРжоПвШШщлВФАтоРфИ', 'БХцжеНЗкжЗЗшЦзфгдЖОзЗЖщКМИШАтЦАп', 'мтСкЕнбХШнЛхХГР']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('цмйвГЖруДлдЦавхЖАлоЕхЗКд'), ['ХфБПМДВХЙ', 'саЗваАбднХбЦттмКсМбШбВМУйНКСЖжХЦНц', 'плиЩщШАцЖсхГ', 'ЗнУЕФЗВаНА', 'ЧДйСаЗГЕшойСжбсуЩуЩщбПР', 'ЧЕуЩкФБВвчмабШЦтЖбОрЗп', 'йХбМсрТАФм', 'РЖСЗвЦлНВПЧщГУцЖ', 'ГГлЩрОХКнШРТуДФ', 'шСабРжла', 'ЕчБвгаРЧифаЙщХПпГЦхчШ', 'дайшйцВНЩЧуцйдМХг', 'УнзНКЧххВрцЩМлАнЖСДОДцбИгЛЛР', 'сЛЗзПбиАгзК']) from system.numbers limit 10; +select 2 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('дфЧлзОжММФкЖгиЗЩлоШжФТкцк'), ['ЗРТцИрсФСбПрщГЗ', '', 'ЖГИЗщлОш', 'АДПН', '', 'чЛЗОЖмМфКжг', 'Мфкж', 'ндаовк', 'зГЛРГАНШмСмШМефазшеБкзДвЕШиЖСЗЧПИфо', 'ФЧЛзОЖммфКжгиЗЩ']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('ИИКДМЛхРчнвЙЕкВЧелТйЛВТ'), ['АчшОЛтНЙуЦЛЙфАКУйуТЗМеЗщОХТМЗеТА', 'НЦУДбчфРТОпЛкОгВпоО', 'неДавнНРеАУфТтфАнДчтнУМЛПШнроАчжш', 'бГржВПЧлЛтСВТтаМЦШШ', 'БщГщРнБхЕЛоЛсмЙцВЕГ', 'цбАжЦРеу', 'ХсЦРаНиН', 'нббДдВЗРС', 'змОПпеЛЖзушлнДЛфчЗлцЙЛфЖрЛКг', 'фШиЖСУоаНПйИВшшаоуЙУА', 'ЛктХиШРП', 'МапщВйцХч', 'жмУТкуГбУ', 'сйпзДЩоНдШЕТбПзФтсрмАФГСз', 'ЛБУвйладЕижрКзШУАгНЩчЕмАа', 'мЧпФлМчРбШРблмтмПМоС']) from system.numbers limit 10; +select 8 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('ПоДУЗАтХншЦатИшХвмИЖчГнжчНцух'), ['жЛЧХзкжлиЛцЩбЧСнЛУжЖпКРвиСРН', 'шадмЩеУШБврУдЕБЗИгмЗЕФШчЦБСзПидтАлб', 'йпГмШСз', 'хЖФЙиПГЗЩавиЗЩйПнБЗЦЩмАЧ', 'ХесщтлбСИуЦ', 'вар', 'ЙкМаСхаЩаЗнФЩфКжПщб', 'ОдУзАТХншЦатИШхвМиЖчгнЖч', 'ЗВЗДБпФфцвжУКвНсбухссбЙКЙйккЛиим', 'гХхсГЛшдфЖЛбгчоЕмоЧр']) from system.numbers limit 10; +select 7 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('ихзКЖЩсЧРСЖсЖжЛАшкТхИйТгМБпск'), ['ДРОБм', 'нз', 'тОЛ', 'щРзуЖрТ', 'Мдд', 'АЦГРК', 'Чрсжсжжл', 'чРсжсЖжл', 'ктхИйтГмБ', 'аАзЙддМДЦЩФкРТЧзЧПУойоТхБиЧПлХДв', 'иЙтгМбп', 'РицлПн', 'йДГнЧкЕв', 'ВМЩцАш', 'хКЩнДшуБЕЛТФГВгцБПРихШЙХгГД', 'иЙТГМ']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('жггкщцзщшамдбРЗжйТзвхшАпХСбе'), ['лВТвтлРБжиЛЦвРЦкАЦаНБгуОН', 'рШаавцжзМрзВЧДРСузб', 'оемрЗМгФБНмжп', 'ЛбмХбФЧШГЛХИуТСрфхп', 'ЖшТдтЧйчМР', 'ЧнИМбфУпмЙлШЗТрТИкКИЩОЧеМщПЩлдБ', 'ГвРдПжГдБаснилз', 'уТнТчТРЗИЛ', 'ИТЕВ', 'дИСЖпПнПСНОвсЩЩшНтХЧшВ', 'штабтлМнсчРЗтфсТЩублЕЧйцеЦТтХ', 'ХбхгУШвАзкшЖ']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('нсЩЙЕМмЧЛСйФцГВМиатГХш'), ['КсОПЧИкВсКшРхнкхБжду', 'мШмпТащжФ', 'ББЖнианЧЦпмрГЩГМаЛКжА', 'арИжзжфГТУДИРРРбцил', 'дфдмшМИщТиЗПруКфОнСЦ', 'Рцч', 'гмДгВДАтсщКЗлхвжЦУеФДАТГЙЦЧОЗвРш', 'чфХЩсДбУбВжАМшРлКРщв', 'нцБйсУ', 'фасДЕчвчДмбтЖХвоД', 'аБЧшЖшЖАКргОИшпШЧзТбтфйвкЕц', 'ЗжжсмкжЛд', 'щщлПзг', 'бП']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('сКиурчоиаЦйхгаУДПфчИтИК'), ['МЧПцУАМрХКЧмАЦннУшмРчкЖКХвху', 'РвДуВиашрРКкмжшЖНШБфлцжБЦР', 'йМУиУчНЧчРшДйБЗфЩЦйПсцгкДС', 'НсмаЛзЧвНЦШФуВРпзБГзйКцп', 'ЖлМЛУХОБллСЗСКвМКМдГчЩ', 'ЩХПШиобЛх', 'аФАЖВтРиЦнжбкСожУЖЙипм', 'аУГжУНуМУВФлж', 'ШБчтЗкЖНЙк', 'ЩоГПГчНП', 'мВЗйЛаХПоЕМХиИйДлшРгзугЙЖлнМппКЦ', 'вчмДФхНеЦйЗсЗйкфпОщПтШпспИМдГйВМх', 'ИЗИжЧжаГЩСуцСЩдкскздмЖЦ', 'дАмфЕбгс', 'ГМттнхчЩжМЧДфщШБкфчтЧ', 'ШЕииФБпщЙИДцРиЖжЩл', 'ОпуОлБ', 'хБ']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('риШМбгиПЖннНоЧргзГзеДпЛиНт'), ['икДкбйдройВУсвФзрПСусДнАШо', 'чуУеТкУВФхз', 'ЕГпйчехЗвЛлБблЧПДм', 'зеоЩЧожКЛбШЩдАрКБНйшКВШаЗгПш', 'виФКуЗОтгВмТкБ', 'цДрЙгЗРаЧКаМДдБЕЧзСРщВФзПВЧГвЩрАУшс', 'мБЗИУдчХХжТж', 'ФТНМмгЖилуЛйМ', 'ЗегЩЦнЦщцИк', 'оГОусхФсДЖДЩИЕХЗпсПЩХБТГЕп', 'АУКНзАДНкусВЧХвАж', 'КвКрбсВлНАоЗсфХОйЦхТ', 'вФдеХацЧБкрхМЖЗЧчКшпфВчс', 'йХшиОвХЗжТпДТбвУрпшЕ']) from system.numbers limit 10; +select 11 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('МойрЙлтЖйБдББЛЕЕЦузЛфпИЕГт'), ['ПОжЦЩа', 'СШзЧФтСЗохЦЗдФтцНТу', 'вЕдТ', 'ечУФаМДнХщЕНУи', 'вмеосТзБАБуроЙУЛгФжДсЧщтчЕзлепгк', 'ИЧтБрцПмРаВрйИвНЛСйпЖжУВдНрурКшоКХП', 'ЕН', 'щКЦЩгФБСХпкпит', 'ей', 'ЕахшеОМРдЕГХуГЖчвКХМЕ', 'Гт', 'НужЛЛЙОАл']) from system.numbers limit 10; +select 11 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('еззЦАвУаДнзИКЙнЙдртРоП'), ['КгЩбшПЛКвтИРцйчккгЧчЧмтГ', 'кЛппСФщзМмТйВЕтбЩЦлО', 'ШпдзиЖх', 'иИХ', 'пУаАФгсмтофНФХиЦЕтТЗсОШЗЙ', 'фаКАБТцФМиКЖрИКшГБЗБ', 'идЖЙдЦММУнХЦЦфсФМ', 'МиЦечЖЦЙмРВЙОХсБРНнрлйЙшц', 'ТфдСтМгтмимТМАучтхПНЦлуф', 'бейККЛСггУЦБсокЕЙпнРЧ', 'цавУАДНЗИКЙнЙд', 'ЩйЕЖчЧщаПшжФсхХЛЕТчвмЙнуце', 'РТРОП', 'цАВуАДнзИкЙНЙдРтРо', 'аЩПИд', 'ОСчКшОАчВмр', '', 'уЙЛИуЕУвцДшНОгбТбИШв', 'АВУаднзИКЙНйдР', 'жТйоП']) from system.numbers limit 10; +select 12 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('шйМЦУщвфщшбмлТНВохСЖНУ'), ['хшТАпТоШхКНсДпвДЕчДМНбАНччд', 'ХКуПСтфСйРжмБглОШЙлйДкСФВйВ', 'хпмНЦМУШеАД', 'чзмЧВвлбЧкАщПкзТгеуГущб', 'шзжрДд', 'еЗГОЙНйИБЗДщИИНицмсЙЗгФУл', 'кнщЙхооДТООе', 'всзЙнТшжФЗДБДрщВДлбвулДИаз', 'мп', 'уБОйцзнМпИсксхефбдЕЛйгИмГШГЗЩ', 'ОМпзШШщчФФнвУЧгжчиндЧч', 'щВФЩШбмЛТн', 'бм', 'БпфнкнйЗцПдЧЩбВ']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('НЗБлОбшмОПктткоччКиКрФсбкШАХ'), ['нффЕББУЖГшЖвГфЦФГЕСщсЩЧлфнАшшктизУ', 'нСмпцхшИои', 'ЧИчЗУтйЦхГезппФРХХШуцЗШВ', 'РИнщН', 'НЩдВТсЙсОдхРбМФнСпАбОПкудБФСчмб', 'йхглпдКтртгош', 'ибгУРАБцх', 'ИЕиЛрИДафмЗИкТвАуГчШугбЧмЛШщсОЧбБкП', 'ЩСМуХМ', 'АУсмдЗБвКфЩ', 'пгбТНОйц', 'МоИ', 'КОйкзОЕИЗМЩ', 'чщттЛРНнГхЗхХй', 'ЩшцЧРКмШЖЩЦемтЧУЛГкХтВНзОжУХТпН', 'ЕшбБНчрДпЩЧМлераУЖХйфйдчтсчПШ', 'дбФйтИАшДйЩтбФйШуПиРлГмВОШаСлШЧИвфЖщгж', 'ОДжТЦщпщИжфуеЩмн', 'ПЛНЕзжСчВКДттуФРУ', 'БбмеГЩХшжрцОжХНииВКВлдиХБДСмнНфХЛТХ']) from system.numbers limit 10; +select 4 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('ЕКаЖСЗЗЕЗгПдШкфцЙТцл'), ['ЙКМИХРОХ', 'НвМУХзфчДбАРЙДу', 'чмщжФшшжсЗТв', 'жСЗзеЗг', 'ЛФсКзВСдЦД', 'АЖсЗЗЕЗГ', 'Пдшкфц', 'усйсКщшрДрвнФЛедуГХ', '', 'цйтЦ', 'Ощс', 'ЕЗГпдшКф', 'ззеЗгп', 'УгЛйхШТтшрЛ', 'ЗзЕЗгП', 'КЛмТЩРтрзБбЩРгФбиОБазУнтУЦ']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('чЕжАфАрБпКбДмшАшТШККауЩИхНВО'), ['ЧЙпЗЧЧлйПЙЖЙшККг', 'зйхуМЩАИПГЗА', 'ЙцехноХниИбзБЧ', 'чВомЗОфУроС', 'дбРхХЗрзоДДШщЕДжиФаЙ', 'еЛзТцЩДиДГрдМОНЧУнеТуДЩЧЦпГЕщПОРсйпЧ', 'ФчнпМРЧцПЙЩЩвфДХПнУхцЩСИ', 'цлШеУкМБнжЧлУцСуСЙуотшМфйс', 'лугГлкщКщкзЛйпбдсишргДДшОувр', 'ЗРИаФЛЗФрСзм', 'аЗвжВгхЩоЦ', 'чГКлеБНДнИЖЧеШЧДнИвсГДЖЖфБМНсУЦосВс', 'щЦнПУзЧщнЩЕ', 'рВУв']) from system.numbers limit 10; +select 20 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('анктгЦВВкЧвЖиБпфТйлр'), ['НшДПчтсСЧпкидаХжаЙчаДчЦГшГ', 'ХнцЛШИрХВаРхнЧИЙрОЛЛИТпППфгЖЩФ', 'ФАЛущПупмдМБмтйзУшрВМзцзШжгД', 'ГчЛЧеЛДХеипдшЦЦмаШНаРшУТ', 'фОЕфжО', 'ТНсУАнчшУЛЦкцчЙ', 'ЛйЦКБЗГЦйКЩиОПуТЦкБкБувснЙи', 'Бунф', 'ИтХЛШСУНЦВйРСЙчДчНвйшЗЦй', 'АцСКнзБаЖУДЖегавйБгужШАДЙтжИВк', 'ЦцХщфирДПрСуХзхЖМЕщ', 'кфдБЖКншвУФкЗДКуЙ', 'СкиСЦЗЦРмгЦНпБхфХДЙщЛзХ', 'йУепВЖАПНбАЩуЛжвЧпхМ', 'БпЧшпДочУвибщерйхйтОБАСПнЧМИОЩ', 'чФгНЗщвхавбшсООоВштбЧ', 'уДиЕцнЙХВЕйИАГдЕ', 'тп', 'ЧЕРЖсгВ', 'вЖибПФТЙЛ']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('ипозйпхЛОЛТлСМХЩдМвМгШИвГиЛп'), ['ФСГзиГррБДНКГЛХбААФхИ', 'гегпАвхДЕ', 'ЦХжзщХИвхп', 'ЗЖ', 'ХОКцКзЩо', 'абИОрГПМТКшБ', 'кмХТмФихСЦсшУдхВбИШМНАНмпмХОЗйПЩч', 'еОжТСкфЕТУУжГ', 'НтщМЕПЧИКЙКйй', 'ежСикИвйЛж', 'ушЩФОтпБзЩЛЗЦЧЙиВгБЧоПХНгОуАДТЙж', 'фМЕРефнутпнцФРнрГЖ', 'хшДЧзнХпфорвЩжмГРЦуХГ', 'ЧЖн', 'вВзгОСхгНумм', 'ЗДоВлСжпфщСКсщХаолЛнЛЗбСхвЩвЩНоЩЩМ']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('МрЗтВФуЖРеЕШЧхПФбжжхчД'), ['щжОожЦндцШйТАй', 'йуРСЦУЗФУЦПвРфевСлфдРещЦтИтЩЩТг', 'ЕГЧдмХмРАлнЧ', 'йнкФизГСЗнуКбЙВЙчАТТрСхаЙШтсдгХ', 'ЧПрнРЖЙцХИщ', 'зЕ', 'СжВЩчГзБХбйТиклкдШШИееАлЧЩН', 'МШщГйБХжЙпйЕЗТзКмпе', 'НКбНщОМДзлдЧОс', 'НчзВХОпХХШМОХФумБгсрРЧИчВтгутВЩо']) from system.numbers limit 10; +select 0 = multiSearchFirstIndexCaseInsensitiveUTF8(materialize('упТУЖелФкЧЧУЦРжоБтХсжКщД'), ['щКшуОЖааЖйнЕбДИжМК', 'ЕкнШцХРВтНйШоНбЙйУоЧщУиРпШЧхмКЧжх', 'рвЩЗоЗхшЗвлизкСзебЩКМКжбша', 'ДииБНСШвцЦбаСсИжЕЗмхмВ', 'СЦоБЗПМтмшрУлрДТФГЖиувШЗууШзв', 'ЦЗБЕзВХЙбйВОмЗпхндЗ', 'ЗНизЧВШкГВтпсЖж', 'уШиБПЙЧтРаЕгИ', 'ЙшпПА', 'ЧоММаАйМСфбхуФкефФштгУА']) from system.numbers limit 10; + diff --git a/parser/testdata/02366_asof_optimize_predicate_bug_37813/ast.json b/parser/testdata/02366_asof_optimize_predicate_bug_37813/ast.json new file mode 100644 index 000000000..8adf38b93 --- /dev/null +++ b/parser/testdata/02366_asof_optimize_predicate_bug_37813/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001581335, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/02366_asof_optimize_predicate_bug_37813/metadata.json b/parser/testdata/02366_asof_optimize_predicate_bug_37813/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02366_asof_optimize_predicate_bug_37813/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02366_asof_optimize_predicate_bug_37813/query.sql b/parser/testdata/02366_asof_optimize_predicate_bug_37813/query.sql new file mode 100644 index 000000000..4db42a225 --- /dev/null +++ b/parser/testdata/02366_asof_optimize_predicate_bug_37813/query.sql @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (c1 Int32, c2 Int32) ENGINE MergeTree ORDER BY c1; +INSERT INTO t1 (c1, c2) VALUES (1, 10), (1, 20), (1, 30); + +DROP TABLE IF EXISTS t2; +CREATE TABLE t2 (c1 Int32, c2 Int32, c3 String) ENGINE MergeTree ORDER BY (c1, c2, c3); +INSERT INTO t2 (c1, c2, c3) VALUES (1, 5, 'a'), (1, 15, 'b'), (1, 25, 'c'); + +SET enable_optimize_predicate_expression = 1; +WITH + v1 AS (SELECT t1.c2, t2.c2, t2.c3 FROM t1 ASOF JOIN t2 USING (c1, c2)) + SELECT count() FROM v1 WHERE c3 = 'b'; + +SET enable_optimize_predicate_expression = 0; +WITH + v1 AS (SELECT t1.c2, t2.c2, t2.c3 FROM t1 ASOF JOIN t2 USING (c1, c2)) + SELECT count() FROM v1 WHERE c3 = 'b'; diff --git a/parser/testdata/02366_decimal_agg_state_conversion/ast.json b/parser/testdata/02366_decimal_agg_state_conversion/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02366_decimal_agg_state_conversion/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02366_decimal_agg_state_conversion/metadata.json b/parser/testdata/02366_decimal_agg_state_conversion/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02366_decimal_agg_state_conversion/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02366_decimal_agg_state_conversion/query.sql b/parser/testdata/02366_decimal_agg_state_conversion/query.sql new file mode 100644 index 000000000..edcab325d --- /dev/null +++ b/parser/testdata/02366_decimal_agg_state_conversion/query.sql @@ -0,0 +1,62 @@ +select sumMerge(y) from +( + select cast(x, 'AggregateFunction(sum, Decimal(50, 10))') y from + ( + select arrayReduce('sumState', [toDecimal256('0.000001', 10), toDecimal256('1.1', 10)]) x + ) +); + +select minMerge(y) from +( + select cast(x, 'AggregateFunction(min, Decimal(18, 10))') y from + ( + select arrayReduce('minState', [toDecimal64('0.000001', 10), toDecimal64('1.1', 10)]) x + ) +); + + +drop table if exists consumer_02366; +drop table if exists producer_02366; +drop table if exists mv_02366; + +CREATE TABLE consumer_02366 +( + `id` UInt16, + `dec` AggregateFunction(argMin, Decimal(24, 10), UInt16) +) +ENGINE = AggregatingMergeTree +PRIMARY KEY id +ORDER BY id; + +CREATE TABLE producer_02366 +( + `id` UInt16, + `dec` String +) +ENGINE = MergeTree +PRIMARY KEY id +ORDER BY id; + +CREATE MATERIALIZED VIEW mv_02366 TO consumer_02366 AS +SELECT + id, + argMinState(dec, id) AS dec +FROM +( + SELECT + id, + toDecimal128(dec, 10) AS dec + FROM producer_02366 +) +GROUP BY id; + +INSERT INTO producer_02366 (*) VALUES (19, '.1'); + +SELECT + id, + finalizeAggregation(dec) +FROM consumer_02366; + +drop table consumer_02366; +drop table producer_02366; +drop table mv_02366; diff --git a/parser/testdata/02366_direct_dictionary_dict_has/ast.json b/parser/testdata/02366_direct_dictionary_dict_has/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02366_direct_dictionary_dict_has/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02366_direct_dictionary_dict_has/metadata.json b/parser/testdata/02366_direct_dictionary_dict_has/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02366_direct_dictionary_dict_has/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02366_direct_dictionary_dict_has/query.sql b/parser/testdata/02366_direct_dictionary_dict_has/query.sql new file mode 100644 index 000000000..413348114 --- /dev/null +++ b/parser/testdata/02366_direct_dictionary_dict_has/query.sql @@ -0,0 +1,45 @@ + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value String +) +ENGINE = Memory; + +DROP TABLE IF EXISTS test_lookup_table; +CREATE TABLE test_lookup_table +( + id UInt64, + lookup_key UInt64, +) +ENGINE = Memory; + +INSERT INTO test_table VALUES(0, 'value_0'); + +INSERT INTO test_lookup_table VALUES(0, 0); +INSERT INTO test_lookup_table VALUES(1, 0); +INSERT INTO test_lookup_table VALUES(2, 0); +INSERT INTO test_lookup_table VALUES(3, 1); +INSERT INTO test_lookup_table VALUES(4, 0); +INSERT INTO test_lookup_table VALUES(5, 1); +INSERT INTO test_lookup_table VALUES(6, 0); +INSERT INTO test_lookup_table VALUES(7, 2); +INSERT INTO test_lookup_table VALUES(8, 1); +INSERT INTO test_lookup_table VALUES(9, 0); + +DROP DICTIONARY IF EXISTS test_dictionary; +CREATE DICTIONARY test_dictionary +( + id UInt64, + value String +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE 'test_table')) +LAYOUT(DIRECT()); + +SELECT id, lookup_key, dictHas('test_dictionary', lookup_key) FROM test_lookup_table ORDER BY id ASC; + +DROP DICTIONARY test_dictionary; +DROP TABLE test_table; +DROP TABLE test_lookup_table; diff --git a/parser/testdata/02366_explain_query_tree/ast.json b/parser/testdata/02366_explain_query_tree/ast.json new file mode 100644 index 000000000..5958d4fbe --- /dev/null +++ b/parser/testdata/02366_explain_query_tree/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001137155, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02366_explain_query_tree/metadata.json b/parser/testdata/02366_explain_query_tree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02366_explain_query_tree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02366_explain_query_tree/query.sql b/parser/testdata/02366_explain_query_tree/query.sql new file mode 100644 index 000000000..82621ec90 --- /dev/null +++ b/parser/testdata/02366_explain_query_tree/query.sql @@ -0,0 +1,38 @@ +SET enable_analyzer = 1; + +EXPLAIN QUERY TREE run_passes = 0 SELECT 1; + +SELECT '--'; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value String +) ENGINE=TinyLog; + +INSERT INTO test_table VALUES (0, 'Value'); + +EXPLAIN QUERY TREE run_passes = 0 SELECT id, value FROM test_table; + +SELECT '--'; + +EXPLAIN QUERY TREE run_passes = 1 SELECT id, value FROM test_table; + +SELECT '--'; + +EXPLAIN QUERY TREE run_passes = 0 SELECT arrayMap(x -> x + id, [1, 2, 3]) FROM test_table; + +SELECT '--'; + +EXPLAIN QUERY TREE run_passes = 1 SELECT arrayMap(x -> x + 1, [1, 2, 3]) FROM test_table; + +SELECT '--'; + +EXPLAIN QUERY TREE run_passes = 0 WITH x -> x + 1 AS lambda SELECT lambda(id) FROM test_table; + +SELECT '--'; + +EXPLAIN QUERY TREE run_passes = 1 WITH x -> x + 1 AS lambda SELECT lambda(id) FROM test_table; + +DROP TABLE test_table; diff --git a/parser/testdata/02366_kql_create_table/ast.json b/parser/testdata/02366_kql_create_table/ast.json new file mode 100644 index 000000000..be7a18f06 --- /dev/null +++ b/parser/testdata/02366_kql_create_table/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery Customers (children 1)" + }, + { + "explain": " Identifier Customers" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001911858, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/02366_kql_create_table/metadata.json b/parser/testdata/02366_kql_create_table/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02366_kql_create_table/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02366_kql_create_table/query.sql b/parser/testdata/02366_kql_create_table/query.sql new file mode 100644 index 000000000..75a81c5db --- /dev/null +++ b/parser/testdata/02366_kql_create_table/query.sql @@ -0,0 +1,29 @@ +DROP TABLE IF EXISTS Customers; +CREATE TABLE Customers +( + FirstName Nullable(String), + LastName String, + Occupation String, + Education String, + Age Nullable(UInt8) +) ENGINE = Memory; + +INSERT INTO Customers VALUES ('Theodore','Diaz','Skilled Manual','Bachelors',28),('Stephanie','Cox','Management abcd defg','Bachelors',33),('Peter','Nara','Skilled Manual','Graduate Degree',26),('Latoya','Shen','Professional','Graduate Degree',25),('Apple','','Skilled Manual','Bachelors',28),(NULL,'why','Professional','Partial College',38); +Select '-- test create table --' ; +Select * from kql($$Customers|project FirstName$$) limit 1;; +DROP TABLE IF EXISTS kql_table1; +CREATE TABLE kql_table1 ENGINE = Memory AS select *, now() as new_column From kql($$Customers | project LastName | filter LastName=='Diaz'$$); +select LastName from kql_table1 limit 1; +DROP TABLE IF EXISTS kql_table2; +CREATE TABLE kql_table2 +( + FirstName Nullable(String), + LastName String, + Age Nullable(UInt8) +) ENGINE = Memory; +INSERT INTO kql_table2 select * from kql($$Customers|project FirstName,LastName,Age | filter FirstName=='Theodore'$$); +select * from kql_table2 limit 1; +-- select * from kql($$Customers | where FirstName !in ("test", "test2")$$); +DROP TABLE IF EXISTS Customers; +DROP TABLE IF EXISTS kql_table1; +DROP TABLE IF EXISTS kql_table2; diff --git a/parser/testdata/02366_kql_datatype/ast.json b/parser/testdata/02366_kql_datatype/ast.json new file mode 100644 index 000000000..7d8c6b020 --- /dev/null +++ b/parser/testdata/02366_kql_datatype/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001592852, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02366_kql_datatype/metadata.json b/parser/testdata/02366_kql_datatype/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02366_kql_datatype/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02366_kql_datatype/query.sql b/parser/testdata/02366_kql_datatype/query.sql new file mode 100644 index 000000000..ac5dd8bcf --- /dev/null +++ b/parser/testdata/02366_kql_datatype/query.sql @@ -0,0 +1,118 @@ +set allow_experimental_kusto_dialect=1; +set dialect = 'kusto'; + +print '-- bool' +print bool(true); +print bool(true); +print bool(null); +print '-- int'; +print int(123); +print int(null); +print int('4'); -- { clientError BAD_ARGUMENTS } +print '-- long'; +print long(123); +print long(0xff); +print long(-1); +print long(null); +print 456; +print '-- real'; +print real(0.01); +print real(null); +print real(nan); +print real(+inf); +print real(-inf); +print double('4.2'); -- { clientError BAD_ARGUMENTS } +print '-- datetime'; +print datetime(2015-12-31 23:59:59.9); +print datetime(2015-12-31); +print datetime('2014-05-25T08:20:03.123456'); +print datetime('2014-11-08 15:55:55'); +print datetime('2014-11-08 15:55'); +print datetime('2014-11-08'); +print datetime(null); +print datetime('2014-05-25T08:20:03.123456Z'); +print datetime('2014-11-08 15:55:55.123456Z'); +print '-- time'; +print time('14.02:03:04.12345'); +print time('12:30:55.123'); +print time(1d); +print time(-1d); +print time(6nanoseconds); +print time(6tick); +print time(2); +print time(2) + 1d; +print '-- guid' +print guid(74be27de-1e4e-49d9-b579-fe0b331d3642); +print guid(null); +print '-- timespan (time)'; +print timespan(2d); -- 2 days +--print timespan(1.5h); -- 1.5 hour +print timespan(30m); -- 30 minutes +print timespan(10s); -- 10 seconds +--print timespan(0.1s); -- 0.1 second +print timespan(100ms); -- 100 millisecond +print timespan(10microsecond); -- 10 microseconds +print timespan(1tick); -- 100 nanoseconds +--print timespan(1.5h) / timespan(30m); +print timespan('12.23:12:23') / timespan(1s); +print '-- null'; +print isnull(null); +print bool(null), int(null), long(null), real(null), double(null); +print '-- decimal'; +print decimal(null); +print decimal(123.345); +print decimal(1e5); +print '-- dynamic'; -- no support for mixed types and bags for now +print dynamic(null); +print dynamic(1); +print dynamic(timespan(1d)); +print dynamic([1,2,3]); +print dynamic([[1], [2], [3]]); +print dynamic(['a', "b", 'c']); +print '-- cast functions' +print '--tobool("true")'; -- == true +print tobool('true'); -- == true +print tobool('true') == toboolean('true'); -- == true +print '-- tobool("false")'; -- == false +print tobool('false'); -- == false +print tobool('false') == toboolean('false'); -- == false +print '-- tobool(1)'; -- == true +print tobool(1); -- == true +print tobool(1) == toboolean(1); -- == true +print '-- tobool(123)'; -- == true +print tobool(123); -- == true +print tobool(123) == toboolean(123); -- == true +print '-- tobool("abc")'; -- == null +print tobool('abc'); -- == null +print tobool('abc') == toboolean('abc'); -- == null +print '-- todouble()'; +print todouble('123.4'); +print todouble('abc') == null; +print '-- toreal()'; +print toreal("123.4"); +print toreal('abc') == null; +print '-- toint()'; +print toint("123") == int(123); +print toint('abc'); +print '-- tostring()'; +print tostring(123); +print tostring(null) == ''; +print '-- todatetime()'; +print todatetime("2015-12-24") == datetime(2015-12-24); +print todatetime('abc') == null; +print '-- make_timespan()'; +print v1=make_timespan(1,12), v2=make_timespan(1,12,30), v3=make_timespan(1,12,30,55.123); +print '-- totimespan()'; +print totimespan(1tick); +print totimespan('0.00:01:00'); +print totimespan('abc'); +print totimespan('12.23:12:23') / totimespan(1s); +-- print totimespan(strcat('12.', '23', ':12:', '23')) / timespan(1s); -> 1120343 +print '-- tolong()'; +print tolong('123'); +print tolong('abc'); +print '-- todecimal()'; +print todecimal(123.345); +print todecimal(null); +print todecimal('abc'); +-- print todecimal(4 * 2 + 3); -> 11 diff --git a/parser/testdata/02366_kql_distinct/ast.json b/parser/testdata/02366_kql_distinct/ast.json new file mode 100644 index 000000000..3e2585b4b --- /dev/null +++ b/parser/testdata/02366_kql_distinct/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery Customers (children 1)" + }, + { + "explain": " Identifier Customers" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001475793, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/02366_kql_distinct/metadata.json b/parser/testdata/02366_kql_distinct/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02366_kql_distinct/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02366_kql_distinct/query.sql b/parser/testdata/02366_kql_distinct/query.sql new file mode 100644 index 000000000..c202f43f8 --- /dev/null +++ b/parser/testdata/02366_kql_distinct/query.sql @@ -0,0 +1,29 @@ +DROP TABLE IF EXISTS Customers; +CREATE TABLE Customers +( + FirstName Nullable(String), + LastName String, + Occupation String, + Education String, + Age Nullable(UInt8) +) ENGINE = Memory; + +INSERT INTO Customers VALUES ('Theodore','Diaz','Skilled Manual','Bachelors',28), ('Stephanie','Cox','Management abcd defg','Bachelors',33),('Peter','Nara','Skilled Manual','Graduate Degree',26),('Latoya','Shen','Professional','Graduate Degree',25),('Apple','','Skilled Manual','Bachelors',28),(NULL,'why','Professional','Partial College',38); + +set allow_experimental_kusto_dialect=1; +set dialect = 'kusto'; + +print '-- distinct * --'; +Customers | distinct *; + +print '-- distinct one column --'; +Customers | distinct Occupation; + +print '-- distinct two column --'; +Customers | distinct Occupation, Education; + +print '-- distinct with where --'; +Customers where Age <30 | distinct Occupation, Education; + +print '-- distinct with where, order --'; +Customers |where Age <30 | order by Age| distinct Occupation, Education; diff --git a/parser/testdata/02366_kql_extend/ast.json b/parser/testdata/02366_kql_extend/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02366_kql_extend/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02366_kql_extend/metadata.json b/parser/testdata/02366_kql_extend/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02366_kql_extend/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02366_kql_extend/query.sql b/parser/testdata/02366_kql_extend/query.sql new file mode 100644 index 000000000..2e907c7bb --- /dev/null +++ b/parser/testdata/02366_kql_extend/query.sql @@ -0,0 +1,59 @@ +-- datatable(Supplier:string, Fruit:string, Price: real, Purchase:datetime) +-- [ +-- 'Aldi','Apple',4,'2016-09-10', +-- 'Costco','Apple',2,'2016-09-11', +-- 'Aldi','Apple',6,'2016-09-10', +-- 'Costco','Snargaluff',100,'2016-09-12', +-- 'Aldi','Apple',7,'2016-09-12', +-- 'Aldi','Snargaluff',400,'2016-09-11', +-- 'Costco','Snargaluff',104,'2016-09-12', +-- 'Aldi','Apple',5,'2016-09-12', +-- 'Aldi','Snargaluff',600,'2016-09-11', +-- 'Costco','Snargaluff',200,'2016-09-10', +-- ] + +DROP TABLE IF EXISTS Ledger; +CREATE TABLE Ledger +( + Supplier Nullable(String), + Fruit String , + Price Float64, + Purchase Date +) ENGINE = Memory; +INSERT INTO Ledger VALUES ('Aldi','Apple',4,'2016-09-10'), ('Costco','Apple',2,'2016-09-11'), ('Aldi','Apple',6,'2016-09-10'), ('Costco','Snargaluff',100,'2016-09-12'), ('Aldi','Apple',7,'2016-09-12'), ('Aldi','Snargaluff',400,'2016-09-11'),('Costco','Snargaluff',104,'2016-09-12'),('Aldi','Apple',5,'2016-09-12'),('Aldi','Snargaluff',600,'2016-09-11'),('Costco','Snargaluff',200,'2016-09-10'); + +set allow_experimental_kusto_dialect=1; +set dialect = 'kusto'; + +print '-- extend #1 --'; +Ledger | extend PriceInCents = 100 * Price | take 2; + +print '-- extend #2 --'; +Ledger | extend PriceInCents = 100 * Price | sort by PriceInCents asc | project Fruit, PriceInCents | take 2; + +print '-- extend #3 --'; +Ledger | extend PriceInCents = 100 * Price | sort by PriceInCents asc | project Fruit, PriceInCents | summarize AveragePrice = avg(PriceInCents), Purchases = count() by Fruit | extend Sentence = strcat(Fruit, ' cost ', tostring(AveragePrice), ' on average based on ', tostring(Purchases), ' samples.') | project Sentence | sort by Sentence asc; + +print '-- extend #4 --'; +Ledger | extend a = Price | extend b = a | extend c = a, d = b + 500 | extend Pass = bool(b == a and c == a and d == b + 500) | summarize binary_all_and(Pass); + +print '-- extend #5 --'; +Ledger | take 2 | extend strcat(Fruit, ' was purchased from ', Supplier, ' for $', tostring(Price), ' on ', tostring(Purchase)) | extend PriceInCents = 100 * Price; + +print '-- extend #6 --'; +Ledger | extend Price = 100 * Price; + +print '-- extend #7 --'; +print a = 4 | extend a = 5; + +print '-- extend #8 --'; +-- print x = 5 | extend array_sort_desc(range(0, x), range(1, x + 1)) + +print '-- extend #9 --'; +print x = 19 | extend = 4 + ; -- { clientError SYNTAX_ERROR } + +print '-- extend #10 --'; +Ledger | extend PriceInCents = * Price | sort by PriceInCents asc | project Fruit, PriceInCents | summarize AveragePrice = avg(PriceInCents), Purchases = count() by Fruit | extend Sentence = strcat(Fruit, ' cost ', tostring(AveragePrice), ' on average based on ', tostring(Purchases), ' samples.') | project Sentence; -- { clientError SYNTAX_ERROR } + +print '-- extend #11 --'; -- should ideally return this in the future: 5 [2,1] because of the alias ex +print x = 5 | extend ex = array_sort_desc(dynamic([1, 2]), dynamic([3, 4])); diff --git a/parser/testdata/02366_kql_func_binary/ast.json b/parser/testdata/02366_kql_func_binary/ast.json new file mode 100644 index 000000000..65d8e5206 --- /dev/null +++ b/parser/testdata/02366_kql_func_binary/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001607052, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02366_kql_func_binary/metadata.json b/parser/testdata/02366_kql_func_binary/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02366_kql_func_binary/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02366_kql_func_binary/query.sql b/parser/testdata/02366_kql_func_binary/query.sql new file mode 100644 index 000000000..0ea577ae3 --- /dev/null +++ b/parser/testdata/02366_kql_func_binary/query.sql @@ -0,0 +1,6 @@ +set allow_experimental_kusto_dialect=1; +set dialect='kusto'; +print ' -- binary functions'; +print binary_and(4,7), binary_or(4,7); +print binary_xor(2, 5), bitset_count_ones(42); +print bitset_count_ones(binary_shift_left(binary_and(4,7), 1)); diff --git a/parser/testdata/02366_kql_func_datetime/ast.json b/parser/testdata/02366_kql_func_datetime/ast.json new file mode 100644 index 000000000..a54565896 --- /dev/null +++ b/parser/testdata/02366_kql_func_datetime/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001580929, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02366_kql_func_datetime/metadata.json b/parser/testdata/02366_kql_func_datetime/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02366_kql_func_datetime/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02366_kql_func_datetime/query.sql b/parser/testdata/02366_kql_func_datetime/query.sql new file mode 100644 index 000000000..aa80d22e2 --- /dev/null +++ b/parser/testdata/02366_kql_func_datetime/query.sql @@ -0,0 +1,87 @@ +set allow_experimental_kusto_dialect=1; +set dialect = 'kusto'; + +print '-- dayofmonth()'; +print dayofmonth(datetime(2015-12-31)); +print '-- dayofweek()'; +print dayofweek(datetime(2015-12-31)); +print '-- dayofyear()'; +print dayofyear(datetime(2015-12-31)); +print '-- getmonth()'; +print getmonth(datetime(2015-10-12)); +print '-- getyear()'; +print getyear(datetime(2015-10-12)); +print '-- hoursofday()'; +print hourofday(datetime(2015-12-31 23:59:59.9)); +print '-- startofday()'; +print startofday(datetime(2017-01-01 10:10:17)); +print startofday(datetime(2017-01-01 10:10:17), -1); +print startofday(datetime(2017-01-01 10:10:17), 1); +print '-- endofday()'; +print endofday(datetime(2017-01-01 10:10:17)); +print endofday(datetime(2017-01-01 10:10:17), -1); +print endofday(datetime(2017-01-01 10:10:17), 1); +print '-- endofmonth()'; +print endofmonth(datetime(2017-01-01 10:10:17)); +print endofmonth(datetime(2017-01-01 10:10:17), -1); +print endofmonth(datetime(2017-01-01 10:10:17), 1); +print endofmonth(datetime(2022-09-23)); +print '-- startofweek()'; +print startofweek(datetime(2017-01-01 10:10:17)); +print startofweek(datetime(2017-01-01 10:10:17), -1); +print startofweek(datetime(2017-01-01 10:10:17), 1); +print '-- endofweek()'; +print endofweek(datetime(2017-01-01 10:10:17)); +print endofweek(datetime(2017-01-01 10:10:17), -1); +print endofweek(datetime(2017-01-01 10:10:17), 1); +print '-- startofyear()'; +print startofyear(datetime(2017-01-01 10:10:17)); +print startofyear(datetime(2017-01-01 10:10:17), -1); +print startofyear(datetime(2017-01-01 10:10:17), 1); +print '-- endofyear()'; +print endofyear(datetime(2017-01-01 10:10:17)); +print endofyear(datetime(2017-01-01 10:10:17), -1); +print endofyear(datetime(2017-01-01 10:10:17), 1); +print '-- unixtime_seconds_todatetime()'; +print unixtime_seconds_todatetime(1546300800); +print unixtime_seconds_todatetime(1d); +print unixtime_seconds_todatetime(-1d); +print '-- unixtime_microseconds_todatetime'; +print unixtime_microseconds_todatetime(1546300800000000); +print '-- unixtime_milliseconds_todatetime()'; +print unixtime_milliseconds_todatetime(1546300800000); +print '-- unixtime_nanoseconds_todatetime()'; +print unixtime_nanoseconds_todatetime(1546300800000000000); +print '-- weekofyear()'; +print week_of_year(datetime(2000-01-01)); +print '-- monthofyear()'; +print monthofyear(datetime(2015-12-31)); +print '-- weekofyear()'; +print week_of_year(datetime(2000-01-01)); +print '-- now()'; +print getyear(now(-2d))>1900; +print '-- make_datetime()'; +print make_datetime(2017,10,01,12,10) == datetime(2017-10-01 12:10:00); +print year_month_day_hour_minute = make_datetime(2017,10,01,12,10); +print year_month_day_hour_minute_second = make_datetime(2017,10,01,12,11,0.1234567); +print '-- format_datetime'; +print format_datetime(datetime(2015-12-14 02:03:04.12345), 'y-M-d h:m:s.fffffff'); +print v1=format_datetime(datetime(2017-01-29 09:00:05),'yy-MM-dd [HH:mm:ss]'), v2=format_datetime(datetime(2017-01-29 09:00:05), 'yyyy-M-dd [H:mm:ss]'), v3=format_datetime(datetime(2017-01-29 09:00:05), 'yy-MM-dd [hh:mm:ss tt]'); +print '-- format_timespan()'; +print format_timespan(time('14.02:03:04.12345'), 'h:m:s.fffffff'); +print v1=format_timespan(time('29.09:00:05.12345'), 'dd.hh:mm:ss:FF'); +-- print v2=format_timespan(time('29.09:00:05.12345'), 'ddd.h:mm:ss [fffffff]'); == '029.9:00:05 [1234500]' +print '-- ago()'; +-- print ago(1d) - now(); +print '-- datetime_diff()'; +print year = datetime_diff('year',datetime(2017-01-01),datetime(2000-12-31)), quarter = datetime_diff('quarter',datetime(2017-07-01),datetime(2017-03-30)), month = datetime_diff('month',datetime(2017-01-01),datetime(2015-12-30)), week = datetime_diff('week',datetime(2017-10-29 00:00),datetime(2017-09-30 23:59)), day = datetime_diff('day',datetime(2017-10-29 00:00),datetime(2017-09-30 23:59)), hour = datetime_diff('hour',datetime(2017-10-31 01:00),datetime(2017-10-30 23:59)), minute = datetime_diff('minute',datetime(2017-10-30 23:05:01),datetime(2017-10-30 23:00:59)), second = datetime_diff('second',datetime(2017-10-30 23:00:10.100),datetime(2017-10-30 23:00:00.900)); +-- millisecond = datetime_diff('millisecond',datetime(2017-10-30 23:00:00.200100),datetime(2017-10-30 23:00:00.100900)), +-- microsecond = datetime_diff('microsecond',datetime(2017-10-30 23:00:00.1009001),datetime(2017-10-30 23:00:00.1008009)), +-- nanosecond = datetime_diff('nanosecond',datetime(2017-10-30 23:00:00.0000000),datetime(2017-10-30 23:00:00.0000007)) +print '-- datetime_part()'; +print year = datetime_part("year", datetime(2017-10-30 01:02:03.7654321)),quarter = datetime_part("quarter", datetime(2017-10-30 01:02:03.7654321)),month = datetime_part("month", datetime(2017-10-30 01:02:03.7654321)),weekOfYear = datetime_part("week_of_year", datetime(2017-10-30 01:02:03.7654321)),day = datetime_part("day", datetime(2017-10-30 01:02:03.7654321)),dayOfYear = datetime_part("dayOfYear", datetime(2017-10-30 01:02:03.7654321)),hour = datetime_part("hour", datetime(2017-10-30 01:02:03.7654321)),minute = datetime_part("minute", datetime(2017-10-30 01:02:03.7654321)),second = datetime_part("second", datetime(2017-10-30 01:02:03.7654321)); +-- millisecond = datetime_part("millisecond", dt), +-- microsecond = datetime_part("microsecond", dt), +-- nanosecond = datetime_part("nanosecond", dt) +print '-- datetime_add()'; +print year = datetime_add('year',1,make_datetime(2017,1,1)),quarter = datetime_add('quarter',1,make_datetime(2017,1,1)),month = datetime_add('month',1,make_datetime(2017,1,1)),week = datetime_add('week',1,make_datetime(2017,1,1)),day = datetime_add('day',1,make_datetime(2017,1,1)),hour = datetime_add('hour',1,make_datetime(2017,1,1)),minute = datetime_add('minute',1,make_datetime(2017,1,1)),second = datetime_add('second',1,make_datetime(2017,1,1)); diff --git a/parser/testdata/02366_kql_func_dynamic/ast.json b/parser/testdata/02366_kql_func_dynamic/ast.json new file mode 100644 index 000000000..b6a0edb67 --- /dev/null +++ b/parser/testdata/02366_kql_func_dynamic/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery array_test (children 1)" + }, + { + "explain": " Identifier array_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001371871, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/02366_kql_func_dynamic/metadata.json b/parser/testdata/02366_kql_func_dynamic/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02366_kql_func_dynamic/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02366_kql_func_dynamic/query.sql b/parser/testdata/02366_kql_func_dynamic/query.sql new file mode 100644 index 000000000..ac0023467 --- /dev/null +++ b/parser/testdata/02366_kql_func_dynamic/query.sql @@ -0,0 +1,162 @@ +DROP TABLE IF EXISTS array_test; +CREATE TABLE array_test (floats Array(Float64), + strings Array(String), + nullable_strings Array(Nullable(String)) + ) ENGINE=Memory; +INSERT INTO array_test VALUES([1.0, 2.5], ['a', 'c'], ['A', NULL, 'C']); +set allow_experimental_kusto_dialect=1; +set dialect = 'kusto'; +print '-- constant index value'; +array_test | project floats[0], strings[1], nullable_strings; +print '-- array_length()'; +print array_length(dynamic(['John', 'Denver', 'Bob', 'Marley'])) == 4; +print array_length(dynamic([1, 2, 3])) == 3; +print '-- array_sum()'; +print array_sum(dynamic([2, 5, 3])) == 10; +print array_sum(dynamic([2.5, 5.5, 3])) == 11; +print '-- array_index_of()'; +print array_index_of(dynamic(['John', 'Denver', 'Bob', 'Marley']), 'Marley'); +print array_index_of(dynamic([1, 2, 3]), 2); +print '-- array_iif()'; +print array_iif(dynamic([true,false,true]), dynamic([1,2,3]), dynamic([4,5,6])); +print array_iif(dynamic([1,0,1]), dynamic([1,2,3]), dynamic([4,5,6])); +print array_iif(dynamic([true,false,true]), dynamic([1,2]), dynamic([4,5,6])); +print array_iif(dynamic(['a','b','c']), dynamic([1,2,3]), dynamic([4,5,6])); +print '-- array_concat()'; +print array_concat(dynamic([1,2,3]),dynamic([4,5,6])); +print '-- array_reverse()'; +print array_reverse(dynamic([])); +print array_reverse(dynamic([1])); +print array_reverse(dynamic([1,2,3,4])); +print array_reverse(dynamic(["this", "is", "an", "example"])); +print '-- array_rotate_left()'; +print array_rotate_left(dynamic([]), 0); +print array_rotate_left(dynamic([]), 500); +print array_rotate_left(dynamic([]), -500); +print array_rotate_left(dynamic([1,2,3,4,5]), 2); +print array_rotate_left(dynamic([1,2,3,4,5]), 5); +print array_rotate_left(dynamic([1,2,3,4,5]), 7); +print array_rotate_left(dynamic([1,2,3,4,5]), -2); +print array_rotate_left(dynamic([1,2,3,4,5]), -5); +print array_rotate_left(dynamic([1,2,3,4,5]), -7); +print '-- array_rotate_right()'; +print array_rotate_right(dynamic([]), 0); +print array_rotate_right(dynamic([]), 500); +print array_rotate_right(dynamic([]), -500); +print array_rotate_right(dynamic([1,2,3,4,5]), 2); +print array_rotate_right(dynamic([1,2,3,4,5]), 5); +print array_rotate_right(dynamic([1,2,3,4,5]), 7); +print array_rotate_right(dynamic([1,2,3,4,5]), -2); +print array_rotate_right(dynamic([1,2,3,4,5]), -5); +print array_rotate_right(dynamic([1,2,3,4,5]), -7); +print '-- array_shift_left()'; +print array_shift_left(dynamic([]), 0); +print array_shift_left(dynamic([]), 555); +print array_shift_left(dynamic([]), -555); +print array_shift_left(dynamic([1,2,3,4,5]), 2); +print array_shift_left(dynamic([1,2,3,4,5]), -2); +print array_shift_left(dynamic([1,2,3,4,5]), 2, -1); +print array_shift_left(dynamic(['a', 'b', 'c']), 2); +print '-- array_shift_right()'; +print array_shift_left(dynamic([]), 0); +print array_shift_left(dynamic([]), 555); +print array_shift_left(dynamic([]), -555); +print array_shift_right(dynamic([1,2,3,4,5]), -2); +print array_shift_right(dynamic([1,2,3,4,5]), 2); +print array_shift_right(dynamic([1,2,3,4,5]), -2, -1); +print array_shift_right(dynamic(['a', 'b', 'c']), -2); +print '-- array_slice()'; +--print array_slice(dynamic([1,2,3]), 1, 2); -- will enable whe analyzer dixed +print array_slice(dynamic([1,2,3,4,5]), -3, -2); +print '-- array_split()'; +print array_split(dynamic([1,2,3,4,5]), dynamic([1,-2])); +print array_split(dynamic([1,2,3,4,5]), 2); +print array_split(dynamic([1,2,3,4,5]), dynamic([1,3])); +print array_split(dynamic([1,2,3,4,5]), dynamic([-1,-2])); +print '-- array_sort_asc()'; +print array_sort_asc(dynamic([null, 'd', 'a', 'c', 'c'])); +print array_sort_asc(dynamic([4, 1, 3, 2])); +print array_sort_asc(dynamic(['b', 'a', 'c']), dynamic(['q', 'p', 'r']))[0]; +print array_sort_asc(dynamic(['q', 'p', 'r']), dynamic(['clickhouse','hello', 'world'])); +print array_sort_asc( dynamic(['d', null, 'a', 'c', 'c']) , false); +print array_sort_asc( dynamic(['d', null, 'a', 'c', 'c']) , 1 > 2); +print array_sort_asc( dynamic([null, null, null]) , false); +print array_sort_asc(dynamic([2, 1, null,3, null]), dynamic([20, 10, 40, 30, 50]), 1 < 2)[0]; +print array_sort_asc(dynamic(['1','3','4','5','2']),dynamic(["a","b","c","d","e"]), dynamic(["a","b","c","d","e"]), dynamic(["a","b","c","d","e"]))[3]; +print array_sort_asc(split("John,Paul,George,Ringo", ",")); +print array_sort_asc(dynamic([null,"blue","yellow","green",null])); +print array_sort_asc(dynamic([null,"blue","yellow","green",null]), false); +print '-- array_sort_desc()'; +print array_sort_desc(dynamic([null, 'd', 'a', 'c', 'c'])); +print array_sort_desc(dynamic([4, 1, 3, 2])); +print array_sort_desc(dynamic(['b', 'a', 'c']), dynamic(['q', 'p', 'r']))[0]; +print array_sort_desc(dynamic(['q', 'p', 'r']), dynamic(['clickhouse','hello', 'world'])); +print array_sort_desc( dynamic(['d', null, 'a', 'c', 'c']) , false); +print array_sort_desc( dynamic(['d', null, 'a', 'c', 'c']) , 1 > 2); +print array_sort_desc( dynamic([null, null, null]) , false); +print array_sort_desc(dynamic([2, 1, null,3, null]), dynamic([20, 10, 40, 30, 50]), 1 < 2)[0]; +print array_sort_desc(dynamic(['1','3','4','5','2']),dynamic(["a","b","c","d","e"]), dynamic(["a","b","c","d","e"]), dynamic(["a","b","c","d","e"]))[3]; +print array_sort_desc(split("John,Paul,George,Ringo", ",")); +print array_sort_desc(dynamic([null,"blue","yellow","green",null])); +print array_sort_desc(dynamic([null,"blue","yellow","green",null]), false); +print '-- jaccard_index()'; +print jaccard_index(dynamic([1, 1, 2, 2, 3, 3]), dynamic([1, 2, 3, 4, 4, 4])); +print jaccard_index(dynamic([1, 2, 3]), dynamic([])); +print jaccard_index(dynamic([]), dynamic([1, 2, 3, 4])); +print jaccard_index(dynamic([]), dynamic([])); +print jaccard_index(dynamic([1, 2, 3]), dynamic([4, 5, 6, 7])); +print jaccard_index(dynamic(['a', 's', 'd']), dynamic(['f', 'd', 's', 'a'])); +print jaccard_index(dynamic(['Chewbacca', 'Darth Vader', 'Han Solo']), dynamic(['Darth Sidious', 'Darth Vader'])); +print '-- pack_array()'; +print pack_array(); -- { clientError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +print x = 1 | extend y = x * 2 | extend z = y * 2 | extend pack_array(x,y,z); +print pack_array(strcat('a', 'b'), format_ipv4(42), tostring(4.2)); +print '-- repeat()'; +print repeat(1, 0); +print repeat(1, 3); +print repeat("asd", 3); +print repeat(timespan(1d), 3); +print repeat(true, 3); +print repeat(1, -3); +print repeat(6.7,-4); +print '-- set_difference()'; +print set_difference(dynamic([]), dynamic([])); +print set_difference(dynamic([]), dynamic([9])); +print set_difference(dynamic([]), dynamic(["asd"])); +print set_difference(dynamic([1, 1, 2, 2, 3, 3]), dynamic([1, 2, 3])); +print array_sort_asc(set_difference(dynamic([1, 4, 2, 3, 5, 4, 6]), dynamic([1, 2, 3])))[0]; +print set_difference(dynamic([4]), dynamic([1, 2, 3])); +print array_sort_asc(set_difference(dynamic([1, 2, 3, 4, 5]), dynamic([5]), dynamic([2, 4])))[0]; +print array_sort_asc(set_difference(dynamic([1, 2, 3]), dynamic([])))[0]; +print array_sort_asc(set_difference(dynamic(['a', 's', 'd']), dynamic(['a', 'f'])))[0]; +print array_sort_asc(set_difference(dynamic(['Chewbacca', 'Darth Vader', 'Han Solo']), dynamic(['Darth Sidious', 'Darth Vader'])))[0]; +print '-- set_has_element()'; +print set_has_element(dynamic([]), 9); +print set_has_element(dynamic(["this", "is", "an", "example"]), "example"); +print set_has_element(dynamic(["this", "is", "an", "example"]), "examplee"); +print set_has_element(dynamic([1, 2, 3]), 2); +print set_has_element(dynamic([1, 2, 3, 4.2]), 4); +print '-- set_intersect()'; +print set_intersect(dynamic([]), dynamic([])); +print array_sort_asc(set_intersect(dynamic([1, 1, 2, 2, 3, 3]), dynamic([1, 2, 3])))[0]; +print array_sort_asc(set_intersect(dynamic([1, 4, 2, 3, 5, 4, 6]), dynamic([1, 2, 3])))[0]; +print set_intersect(dynamic([4]), dynamic([1, 2, 3])); +print set_intersect(dynamic([1, 2, 3, 4, 5]), dynamic([1, 3, 5]), dynamic([2, 5])); +print set_intersect(dynamic([1, 2, 3]), dynamic([])); +print set_intersect(dynamic(['a', 's', 'd']), dynamic(['a', 'f'])); +print set_intersect(dynamic(['Chewbacca', 'Darth Vader', 'Han Solo']), dynamic(['Darth Sidious', 'Darth Vader'])); +print '-- set_union()'; +print set_union(dynamic([]), dynamic([])); +print array_sort_asc(set_union(dynamic([1, 1, 2, 2, 3, 3]), dynamic([1, 2, 3])))[0]; +print array_sort_asc(set_union(dynamic([1, 4, 2, 3, 5, 4, 6]), dynamic([1, 2, 3])))[0]; +print array_sort_asc(set_union(dynamic([4]), dynamic([1, 2, 3])))[0]; +print array_sort_asc(set_union(dynamic([1, 3, 4]), dynamic([5]), dynamic([2, 4])))[0]; +print array_sort_asc(set_union(dynamic([1, 2, 3]), dynamic([])))[0]; +print array_sort_asc(set_union(dynamic(['a', 's', 'd']), dynamic(['a', 'f'])))[0]; +print array_sort_asc(set_union(dynamic(['Chewbacca', 'Darth Vader', 'Han Solo']), dynamic(['Darth Sidious', 'Darth Vader'])))[0]; +print '-- zip()'; +print zip(dynamic([]), dynamic([])); +print zip(dynamic([1,3,5]), dynamic([2,4,6])); +print zip(dynamic(['Darth','Master']), dynamic(['Vader','Yoda']), dynamic(['has a suit','doesn\'t have a suit'])); +print zip(dynamic([1,2,3]), dynamic([10,20])); +print zip(dynamic([]), dynamic([1,2,3])); diff --git a/parser/testdata/02366_kql_func_ip/ast.json b/parser/testdata/02366_kql_func_ip/ast.json new file mode 100644 index 000000000..3c2ba19ef --- /dev/null +++ b/parser/testdata/02366_kql_func_ip/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00157303, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02366_kql_func_ip/metadata.json b/parser/testdata/02366_kql_func_ip/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02366_kql_func_ip/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02366_kql_func_ip/query.sql b/parser/testdata/02366_kql_func_ip/query.sql new file mode 100644 index 000000000..27f92d6ed --- /dev/null +++ b/parser/testdata/02366_kql_func_ip/query.sql @@ -0,0 +1,132 @@ +set allow_experimental_kusto_dialect=1; +set dialect='kusto'; +print '-- ipv4_is_private(\'127.0.0.1\')'; +print ipv4_is_private('127.0.0.1'); +print '-- ipv4_is_private(\'10.1.2.3\')'; +print ipv4_is_private('10.1.2.3'); +print '-- ipv4_is_private(\'192.168.1.1/24\')'; +print ipv4_is_private('192.168.1.1/24'); +print 'ipv4_is_private(strcat(\'192.\',\'168.\',\'1.\',\'1\',\'/24\'))'; +print ipv4_is_private(strcat('192.','168.','1.','1','/24')); +print '-- ipv4_is_private(\'abc\')'; +print ipv4_is_private('abc'); -- == null + +print '-- ipv4_netmask_suffix(\'192.168.1.1/24\')'; +print ipv4_netmask_suffix('192.168.1.1/24'); -- == 24 +print '-- ipv4_netmask_suffix(\'192.168.1.1\')'; +print ipv4_netmask_suffix('192.168.1.1'); -- == 32 +print '-- ipv4_netmask_suffix(\'127.0.0.1/16\')'; +print ipv4_netmask_suffix('127.0.0.1/16'); -- == 16 +print '-- ipv4_netmask_suffix(\'abc\')'; +print ipv4_netmask_suffix('abc'); -- == null +print 'ipv4_netmask_suffix(strcat(\'127.\', \'0.\', \'0.1/16\'))'; +print ipv4_netmask_suffix(strcat('127.', '0.', '0.1/16')); -- == 16 + +print '-- ipv4_is_in_range(\'127.0.0.1\', \'127.0.0.1\')'; +print ipv4_is_in_range('127.0.0.1', '127.0.0.1'); -- == true +print '-- ipv4_is_in_range(\'192.168.1.6\', \'192.168.1.1/24\')'; +print ipv4_is_in_range('192.168.1.6', '192.168.1.1/24'); -- == true +print '-- ipv4_is_in_range(\'192.168.1.1\', \'192.168.2.1/24\')'; +print ipv4_is_in_range('192.168.1.1', '192.168.2.1/24'); -- == false +print '-- ipv4_is_in_range(strcat(\'192.\',\'168.\', \'1.1\'), \'192.168.2.1/24\')'; +print ipv4_is_in_range(strcat('192.','168.', '1.1'), '192.168.2.1/24'); -- == false +print '-- ipv4_is_in_range(\'abc\', \'127.0.0.1\')'; -- == null +print ipv4_is_in_range('abc', '127.0.0.1'); + +print '-- parse_ipv6(127.0.0.1)'; +print parse_ipv6('127.0.0.1'); +print '-- parse_ipv6(fe80::85d:e82c:9446:7994)'; +print parse_ipv6('fe80::85d:e82c:9446:7994'); +print '-- parse_ipv4(\'127.0.0.1\')'; +print parse_ipv4('127.0.0.1'); +print '-- parse_ipv4(\'192.1.168.1\') < parse_ipv4(\'192.1.168.2\')'; +print parse_ipv4('192.1.168.1') < parse_ipv4('192.1.168.2'); +print '-- parse_ipv4(arrayStringConcat([\'127\', \'0\', \'0\', \'1\'], \'.\'))'; +print parse_ipv4(arrayStringConcat(['127', '0', '0', '1'], '.')); -- { clientError UNKNOWN_FUNCTION } + +print '-- parse_ipv4_mask(\'127.0.0.1\', 24) == 2130706432'; +print parse_ipv4_mask('127.0.0.1', 24); +print '-- parse_ipv4_mask(\'abc\', 31)'; +print parse_ipv4_mask('abc', 31) +print '-- parse_ipv4_mask(\'192.1.168.2\', 1000)'; +print parse_ipv4_mask('192.1.168.2', 1000); +print '-- parse_ipv4_mask(\'192.1.168.2\', 31) == parse_ipv4_mask(\'192.1.168.3\', 31)'; +--print parse_ipv4_mask('192.1.168.2', 31) == parse_ipv4_mask('192.1.168.3', 31); // this qual failed in analyzer 3221334018 +print parse_ipv4_mask('192.1.168.2', 31); +print parse_ipv4_mask('192.1.168.3', 31); +print '-- ipv4_is_match(\'127.0.0.1\', \'127.0.0.1\')'; +print ipv4_is_match('127.0.0.1', '127.0.0.1'); +print '-- ipv4_is_match(\'192.168.1.1\', \'192.168.1.255\')'; +print ipv4_is_match('192.168.1.1', '192.168.1.255'); +print '-- ipv4_is_match(\'192.168.1.1/24\', \'192.168.1.255/24\')'; +print ipv4_is_match('192.168.1.1/24', '192.168.1.255/24'); +print '-- ipv4_is_match(\'192.168.1.1\', \'192.168.1.255\', 24)'; +print ipv4_is_match('192.168.1.1', '192.168.1.255', 24); +print '-- ipv4_is_match(\'abc\', \'def\', 24)'; +print ipv4_is_match('abc', 'dev', 24); +print '-- ipv4_compare()'; +print ipv4_compare('127.0.0.1', '127.0.0.1'); +print ipv4_compare('192.168.1.1', '192.168.1.255'); +print ipv4_compare('192.168.1.255', '192.168.1.1'); +print ipv4_compare('192.168.1.1/24', '192.168.1.255/24'); +print ipv4_compare('192.168.1.1', '192.168.1.255', 24); +print ipv4_compare('192.168.1.1/24', '192.168.1.255'); +print ipv4_compare('192.168.1.1', '192.168.1.255/24'); +print ipv4_compare('192.168.1.1/30', '192.168.1.255/24'); +print ipv4_compare('192.168.1.1', '192.168.1.0', 31); +print ipv4_compare('192.168.1.1/24', '192.168.1.255', 31); +print ipv4_compare('192.168.1.1', '192.168.1.255', 24); +print '-- format_ipv4()'; +print format_ipv4('192.168.1.255', 24); +print format_ipv4('192.168.1.1', 32); +print format_ipv4('192.168.1.1/24', 32); +print format_ipv4(3232236031, 24); +print format_ipv4('192.168.1.1/24', -1) == ''; +print format_ipv4('abc', 24) == ''; +print format_ipv4(strcat('127.0', '.0.', '1', '/32'), 12 + 12); +print '-- format_ipv4_mask()'; +print format_ipv4_mask('192.168.1.255', 24); +print format_ipv4_mask(3232236031, 24); +print format_ipv4_mask('192.168.1.1', 24); +print format_ipv4_mask('192.168.1.1', 32); +print format_ipv4_mask('192.168.1.1/24', 32); +print format_ipv4_mask('192.168.1.1/24', -1) == ''; +print format_ipv4_mask('abc', 24) == ''; +print format_ipv4_mask(strcat('127.0', '.0.', '1', '/32'), 12 + 12); +print '-- parse_ipv6_mask()'; +print parse_ipv6_mask("127.0.0.1", 24); +print parse_ipv6_mask("fe80::85d:e82c:9446:7994", 120); +print parse_ipv6_mask("192.168.255.255", 120); +print parse_ipv6_mask("192.168.255.255/24", 124); +print parse_ipv6_mask("255.255.255.255", 128); +print parse_ipv6_mask("fe80::85d:e82c:9446:7994", 128); +print parse_ipv6_mask("fe80::85d:e82c:9446:7994/120", 124); +print parse_ipv6_mask("::192.168.255.255", 128); +print parse_ipv6_mask("::192.168.255.255/24", 128); +print '-- ipv6_is_match()'; +print ipv6_is_match('::ffff:7f00:1', '127.0.0.1') == true; +print ipv6_is_match('fe80::85d:e82c:9446:7994', 'fe80::85d:e82c:9446:7995') == false; +print ipv6_is_match('192.168.1.1/24', '192.168.1.255/24') == true; +print ipv6_is_match('fe80::85d:e82c:9446:7994/127', 'fe80::85d:e82c:9446:7995/127') == true; +print ipv6_is_match('fe80::85d:e82c:9446:7994', 'fe80::85d:e82c:9446:7995', 127) == true; +print ipv6_is_match('192.168.1.1', '192.168.1.1'); -- // Equal IPs +print ipv6_is_match('192.168.1.1/24', '192.168.1.255'); -- // 24 bit IP4-prefix is used for comparison +print ipv6_is_match('192.168.1.1', '192.168.1.255/24'); -- // 24 bit IP4-prefix is used for comparison +print ipv6_is_match('192.168.1.1/30', '192.168.1.255/24'); -- // 24 bit IP4-prefix is used for comparison +print ipv6_is_match('fe80::85d:e82c:9446:7994', 'fe80::85d:e82c:9446:7994'); -- // Equal IPs +print ipv6_is_match('fe80::85d:e82c:9446:7994/120', 'fe80::85d:e82c:9446:7998'); -- // 120 bit IP6-prefix is used for comparison +print ipv6_is_match('fe80::85d:e82c:9446:7994', 'fe80::85d:e82c:9446:7998/120'); -- // 120 bit IP6-prefix is used for comparison +print ipv6_is_match('fe80::85d:e82c:9446:7994/120', 'fe80::85d:e82c:9446:7998/120'); -- // 120 bit IP6-prefix is used for comparison +print ipv6_is_match('192.168.1.1', '::ffff:c0a8:0101'); -- // Equal IPs +print ipv6_is_match('192.168.1.1/24', '::ffff:c0a8:01ff'); -- // 24 bit IP-prefix is used for comparison +print ipv6_is_match('::ffff:c0a8:0101', '192.168.1.255/24'); -- // 24 bit IP-prefix is used for comparison +print ipv6_is_match('::192.168.1.1/30', '192.168.1.255/24'); -- // 24 bit IP-prefix is used for comparison +print ipv6_is_match('192.168.1.1', '192.168.1.0', 31); -- // 31 bit IP4-prefix is used for comparison +print ipv6_is_match('192.168.1.1/24', '192.168.1.255', 31); -- // 24 bit IP4-prefix is used for comparison +print ipv6_is_match('192.168.1.1', '192.168.1.255', 24); -- // 24 bit IP4-prefix is used for comparison +print ipv6_is_match('fe80::85d:e82c:9446:7994', 'fe80::85d:e82c:9446:7995', 127); -- // 127 bit IP6-prefix is used for comparison +print ipv6_is_match('fe80::85d:e82c:9446:7994/127', 'fe80::85d:e82c:9446:7998', 120); -- // 120 bit IP6-prefix is used for comparison +print ipv6_is_match('fe80::85d:e82c:9446:7994/120', 'fe80::85d:e82c:9446:7998', 127); -- // 120 bit IP6-prefix is used for comparison +print ipv6_is_match('192.168.1.1/24', '::ffff:c0a8:01ff', 127); -- // 127 bit IP6-prefix is used for comparison +print ipv6_is_match('::ffff:c0a8:0101', '192.168.1.255', 120); -- // 120 bit IP6-prefix is used for comparison +print ipv6_is_match('::192.168.1.1/30', '192.168.1.255/24', 127); -- // 120 bit IP6-prefix is used for comparison diff --git a/parser/testdata/02366_kql_func_math/ast.json b/parser/testdata/02366_kql_func_math/ast.json new file mode 100644 index 000000000..a4499a754 --- /dev/null +++ b/parser/testdata/02366_kql_func_math/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001423796, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02366_kql_func_math/metadata.json b/parser/testdata/02366_kql_func_math/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02366_kql_func_math/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02366_kql_func_math/query.sql b/parser/testdata/02366_kql_func_math/query.sql new file mode 100644 index 000000000..7eb39257a --- /dev/null +++ b/parser/testdata/02366_kql_func_math/query.sql @@ -0,0 +1,8 @@ +set allow_experimental_kusto_dialect=1; +set dialect = 'kusto'; +print '-- isnan --'; +print isnan(double(nan)); +print isnan(4.2); +print isnan(4); -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO } +print isnan(real(+inf)); +print isnan(dynamic(null)); -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO } diff --git a/parser/testdata/02366_kql_func_scalar/ast.json b/parser/testdata/02366_kql_func_scalar/ast.json new file mode 100644 index 000000000..889e10668 --- /dev/null +++ b/parser/testdata/02366_kql_func_scalar/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery Bin_at_test (children 1)" + }, + { + "explain": " Identifier Bin_at_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001268826, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/02366_kql_func_scalar/metadata.json b/parser/testdata/02366_kql_func_scalar/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02366_kql_func_scalar/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02366_kql_func_scalar/query.sql b/parser/testdata/02366_kql_func_scalar/query.sql new file mode 100644 index 000000000..088488048 --- /dev/null +++ b/parser/testdata/02366_kql_func_scalar/query.sql @@ -0,0 +1,27 @@ +DROP TABLE IF EXISTS Bin_at_test; +CREATE TABLE Bin_at_test +( + `Date` DateTime('UTC'), + Num Nullable(UInt8) +) ENGINE = Memory; +INSERT INTO Bin_at_test VALUES ('2018-02-24T15:14:01',3), ('2018-02-23T16:14:01',4), ('2018-02-26T15:14:01',5); + +set allow_experimental_kusto_dialect=1; +set dialect = 'kusto'; +print '-- bin_at()'; +print bin_at(6.5, 2.5, 7); +print bin_at(1h, 1d, 12h); +print bin_at(datetime(2017-05-15 10:20:00.0), 1d, datetime(1970-01-01 12:00:00.0)); +print bin_at(datetime(2017-05-17 10:20:00.0), 7d, datetime(2017-06-04 00:00:00.0)); +Bin_at_test | summarize sum(Num) by d = todatetime(bin_at(Date, 1d, datetime('2018-02-24 15:14:00'))) | order by d; +print '-- bin()'; +print bin(4.5, 1); +print bin(datetime(1970-05-11 13:45:07), 1d); +print bin(16d, 7d); +print bin(datetime(1970-05-11 13:45:07.345623), 1ms); +-- print bin(datetime(2022-09-26 10:13:23.987234), 6ms); -> 2022-09-26 10:13:23.982000000 +print bin(datetime(1970-05-11 13:45:07.345623), 1microsecond); +print bin(datetime(2022-09-26 10:13:23.987234), 6microseconds); +print bin(datetime(1970-05-11 13:45:07.456345672), 16microseconds); +-- print bin(datetime(2022-09-26 10:13:23.987234128), 1tick); -> 2022-09-26 10:13:23.987234100 +-- print bin(datetime(2022-09-26 10:13:23.987234128), 99nanosecond); -> null diff --git a/parser/testdata/02366_kql_func_string/ast.json b/parser/testdata/02366_kql_func_string/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02366_kql_func_string/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02366_kql_func_string/metadata.json b/parser/testdata/02366_kql_func_string/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02366_kql_func_string/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02366_kql_func_string/query.sql b/parser/testdata/02366_kql_func_string/query.sql new file mode 100644 index 000000000..9145a4e79 --- /dev/null +++ b/parser/testdata/02366_kql_func_string/query.sql @@ -0,0 +1,314 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS Customers; +CREATE TABLE Customers +( + FirstName Nullable(String), + LastName String, + Occupation String, + Education String, + Age Nullable(UInt8) +) ENGINE = Memory; + +INSERT INTO Customers VALUES ('Theodore','Diaz','Skilled Manual','Bachelors',28), ('Stephanie','Cox','Management abcd defg','Bachelors',33),('Peter','Nara','Skilled Manual','Graduate Degree',26),('Latoya','Shen','Professional','Graduate Degree',25),('Apple','','Skilled Manual','Bachelors',28),(NULL,'why','Professional','Partial College',38); + +-- datatable (Version:string) [ +-- '1.2.3.4', +-- '1.2', +-- '1.2.3', +-- '1' +-- ] + +DROP TABLE IF EXISTS Versions; +CREATE TABLE Versions +( + Version String +) ENGINE = Memory; +INSERT INTO Versions VALUES ('1.2.3.4'),('1.2'),('1.2.3'),('1'); + + +set allow_experimental_kusto_dialect=1; +set dialect='kusto'; +print '-- test String Functions --'; + +print '-- Customers |where Education contains \'degree\''; +Customers |where Education contains 'degree' | order by LastName; +print ''; +print '-- Customers |where Education !contains \'degree\''; +Customers |where Education !contains 'degree' | order by LastName; +print ''; +print '-- Customers |where Education contains \'Degree\''; +Customers |where Education contains 'Degree' | order by LastName; +print ''; +print '-- Customers |where Education !contains \'Degree\''; +Customers |where Education !contains 'Degree' | order by LastName; +print ''; +print '-- Customers | where FirstName endswith \'RE\''; +Customers | where FirstName endswith 'RE' | order by LastName; +print ''; +print '-- Customers | where ! FirstName endswith \'RE\''; +Customers | where FirstName ! endswith 'RE' | order by LastName; +print ''; +print '--Customers | where FirstName endswith_cs \'re\''; +Customers | where FirstName endswith_cs 're' | order by LastName; +print ''; +print '-- Customers | where FirstName !endswith_cs \'re\''; +Customers | where FirstName !endswith_cs 're' | order by LastName; +print ''; +print '-- Customers | where Occupation == \'Skilled Manual\''; +Customers | where Occupation == 'Skilled Manual' | order by LastName; +print ''; +print '-- Customers | where Occupation != \'Skilled Manual\''; +Customers | where Occupation != 'Skilled Manual' | order by LastName; +print ''; +print '-- Customers | where Occupation has \'skilled\''; +Customers | where Occupation has 'skilled' | order by LastName; +print ''; +print '-- Customers | where Occupation !has \'skilled\''; +Customers | where Occupation !has 'skilled' | order by LastName; +print ''; +print '-- Customers | where Occupation has \'Skilled\''; +Customers | where Occupation has 'Skilled'| order by LastName; +print ''; +print '-- Customers | where Occupation !has \'Skilled\''; +Customers | where Occupation !has 'Skilled'| order by LastName; +print ''; +print '-- Customers | where Occupation hasprefix_cs \'Ab\''; +Customers | where Occupation hasprefix_cs 'Ab'| order by LastName; +print ''; +print '-- Customers | where Occupation !hasprefix_cs \'Ab\''; +Customers | where Occupation !hasprefix_cs 'Ab'| order by LastName; +print ''; +print '-- Customers | where Occupation hasprefix_cs \'ab\''; +Customers | where Occupation hasprefix_cs 'ab'| order by LastName; +print ''; +print '-- Customers | where Occupation !hasprefix_cs \'ab\''; +Customers | where Occupation !hasprefix_cs 'ab'| order by LastName; +print ''; +print '-- Customers | where Occupation hassuffix \'Ent\''; +Customers | where Occupation hassuffix 'Ent'| order by LastName; +print ''; +print '-- Customers | where Occupation !hassuffix \'Ent\''; +Customers | where Occupation !hassuffix 'Ent'| order by LastName; +print ''; +print '-- Customers | where Occupation hassuffix \'ent\''; +Customers | where Occupation hassuffix 'ent'| order by LastName; +print ''; +print '-- Customers | where Occupation hassuffix \'ent\''; +Customers | where Occupation hassuffix 'ent'| order by LastName; +print ''; +print '-- Customers |where Education in (\'Bachelors\',\'High School\')'; +Customers |where Education in ('Bachelors','High School')| order by LastName; +print ''; +print '-- Customers | where Education !in (\'Bachelors\',\'High School\')'; +Customers | where Education !in ('Bachelors','High School')| order by LastName; +print ''; +print '-- Customers | where FirstName matches regex \'P.*r\''; +Customers | where FirstName matches regex 'P.*r'| order by LastName; +print ''; +print '-- Customers | where FirstName startswith \'pet\''; +Customers | where FirstName startswith 'pet'| order by LastName; +print ''; +print '-- Customers | where FirstName !startswith \'pet\''; +Customers | where FirstName !startswith 'pet'| order by LastName; +print ''; +print '-- Customers | where FirstName startswith_cs \'pet\''; +Customers | where FirstName startswith_cs 'pet'| order by LastName; +print ''; +print '-- Customers | where FirstName !startswith_cs \'pet\''; +Customers | where FirstName !startswith_cs 'pet'| order by LastName; +print ''; +print '-- Customers | where isempty(LastName)'; +Customers | where isempty(LastName); +print ''; +print '-- Customers | where isnotempty(LastName)'; +Customers | where isnotempty(LastName); +print ''; +print '-- Customers | where isnotnull(FirstName)'; +Customers | where isnotnull(FirstName)| order by LastName; +print ''; +print '-- Customers | where isnull(FirstName)'; +Customers | where isnull(FirstName)| order by LastName; +print ''; +print '-- Customers | project url_decode(\'https%3A%2F%2Fwww.test.com%2Fhello%20word\') | take 1'; +Customers | project url_decode('https%3A%2F%2Fwww.test.com%2Fhello%20word') | take 1; +print ''; +print '-- Customers | project url_encode(\'https://www.test.com/hello word\') | take 1'; +Customers | project url_encode('https://www.test.com/hello word') | take 1; +print ''; +print '-- Customers | project name_abbr = strcat(substring(FirstName,0,3), \' \', substring(LastName,2))'; +Customers | project name_abbr = strcat(substring(FirstName,0,3), ' ', substring(LastName,2))| order by LastName; +print ''; +print '-- Customers | project name = strcat(FirstName, \' \', LastName)'; +Customers | project name = strcat(FirstName, ' ', LastName)| order by LastName; +print ''; +print '-- Customers | project FirstName, strlen(FirstName)'; +Customers | project FirstName, strlen(FirstName)| order by LastName; +print ''; +print '-- Customers | project strrep(FirstName,2,\'_\')'; +Customers | project strrep(FirstName,2,'_')| order by LastName; +print ''; +print '-- Customers | project toupper(FirstName)'; +Customers | project toupper(FirstName)| order by LastName; +print ''; +print '-- Customers | project tolower(FirstName)'; +Customers | project tolower(FirstName)| order by LastName; +print ''; +print '-- support subquery for in orerator (https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/in-cs-operator) (subquery need to be wraped with bracket inside bracket); TODO: case-insensitive not supported yet'; +Customers | where Age in ((Customers|project Age|where Age < 30)) | order by LastName; +-- Customer | where LastName in~ ("diaz", "cox") +print ''; +print '-- has_all (https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/has-all-operator); TODO: subquery not supported yet'; +Customers | where Occupation has_all ('manual', 'skilled') | order by LastName; +print ''; +print '-- has_any (https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/has-anyoperator); TODO: subquery not supported yet'; +Customers|where Occupation has_any ('Skilled','abcd'); +print ''; +print '-- countof (https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/countoffunction)'; +Customers | project countof('The cat sat on the mat', 'at') | take 1; +Customers | project countof('The cat sat on the mat', 'at', 'normal') | take 1; +Customers | project countof('The cat sat on the mat', '\\s.he', 'regex') | take 1; +print ''; +print '-- extract ( https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/extractfunction)'; +print extract('(\\b[A-Z]+\\b).+(\\b\\d+)', 0, 'The price of PINEAPPLE ice cream is 20'); +print extract('(\\b[A-Z]+\\b).+(\\b\\d+)', 1, 'The price of PINEAPPLE ice cream is 20'); +print extract('(\\b[A-Z]+\\b).+(\\b\\d+)', 2, 'The price of PINEAPPLE ice cream is 20'); +print extract('(\\b[A-Z]+\\b).+(\\b\\d+)', 3, 'The price of PINEAPPLE ice cream is 20'); +print extract('(\\b[A-Z]+\\b).+(\\b\\d+)', 2, 'The price of PINEAPPLE ice cream is 20', typeof(real)); +print extract("x=([0-9.]+)", 1, "hello x=45.6|wo" , typeof(bool)); +print extract("x=([0-9.]+)", 1, "hello x=45.6|wo" , typeof(date)); +print extract("x=([0-9.]+)", 1, "hello x=45.6|wo" , typeof(guid)); +print extract("x=([0-9.]+)", 1, "hello x=45.6|wo" , typeof(int)); +print extract("x=([0-9.]+)", 1, "hello x=45.6|wo" , typeof(long)); +print extract("x=([0-9.]+)", 1, "hello x=45.6|wo" , typeof(real)); +print extract("x=([0-9.]+)", 1, "hello x=45.6|wo" , typeof(decimal)); +print ''; +print '-- extract_all (https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/extractallfunction); TODO: captureGroups not supported yet'; +Customers | project extract_all('(\\w)(\\w+)(\\w)','The price of PINEAPPLE ice cream is 20') | take 1; +print ''; +print '-- extract_json (https://learn.microsoft.com/en-us/azure/data-explorer/kusto/query/extractjsonfunction)'; +print extract_json('', ''); -- { serverError BAD_ARGUMENTS } +print extract_json('a', ''); -- { serverError BAD_ARGUMENTS } +print extract_json('$.firstName', ''); +print extract_json('$.phoneNumbers[0].type', ''); +print extractjson('$.firstName', '{"firstName":"John","lastName":"doe","age":26,"address":{"streetAddress":"naist street","city":"Nara","postalCode":"630-0192"},"phoneNumbers":[{"type":"iPhone","number":"0123-4567-8888"},{"type":"home","number":"0123-4567-8910"}]}'); +print extract_json('$.phoneNumbers[0].type', '{"firstName":"John","lastName":"doe","age":26,"address":{"streetAddress":"naist street","city":"Nara","postalCode":"630-0192"},"phoneNumbers":[{"type":"iPhone","number":"0123-4567-8888"},{"type":"home","number":"0123-4567-8910"}]}', typeof(string)); +print extract_json('$.phoneNumbers[0].type', '{"firstName":"John","lastName":"doe","age":26,"address":{"streetAddress":"naist street","city":"Nara","postalCode":"630-0192"},"phoneNumbers":[{"type":"iPhone","number":"0123-4567-8888"},{"type":"home","number":"0123-4567-8910"}]}', typeof(int)); +print extract_json('$.age', '{"firstName":"John","lastName":"doe","age":26,"address":{"streetAddress":"naist street","city":"Nara","postalCode":"630-0192"},"phoneNumbers":[{"type":"iPhone","number":"0123-4567-8888"},{"type":"home","number":"0123-4567-8910"}]}'); +print extract_json('$.age', '{"firstName":"John","lastName":"doe","age":26,"address":{"streetAddress":"naist street","city":"Nara","postalCode":"630-0192"},"phoneNumbers":[{"type":"iPhone","number":"0123-4567-8888"},{"type":"home","number":"0123-4567-8910"}]}', typeof(int)); +print extract_json('$.age', '{"firstName":"John","lastName":"doe","age":26,"address":{"streetAddress":"naist street","city":"Nara","postalCode":"630-0192"},"phoneNumbers":[{"type":"iPhone","number":"0123-4567-8888"},{"type":"home","number":"0123-4567-8910"}]}', typeof(long)); +-- print extract_json('$.age', '{"firstName":"John","lastName":"doe","age":26,"address":{"streetAddress":"naist street","city":"Nara","postalCode":"630-0192"},"phoneNumbers":[{"type":"iPhone","number":"0123-4567-8888"},{"type":"home","number":"0123-4567-8910"}]}', typeof(bool)); -> true +print extract_json('$.age', '{"firstName":"John","lastName":"doe","age":26,"address":{"streetAddress":"naist street","city":"Nara","postalCode":"630-0192"},"phoneNumbers":[{"type":"iPhone","number":"0123-4567-8888"},{"type":"home","number":"0123-4567-8910"}]}', typeof(double)); +print extract_json('$.age', '{"firstName":"John","lastName":"doe","age":26,"address":{"streetAddress":"naist street","city":"Nara","postalCode":"630-0192"},"phoneNumbers":[{"type":"iPhone","number":"0123-4567-8888"},{"type":"home","number":"0123-4567-8910"}]}', typeof(guid)); +-- print extract_json('$.phoneNumbers', '{"firstName":"John","lastName":"doe","age":26,"address":{"streetAddress":"naist street","city":"Nara","postalCode":"630-0192"},"phoneNumbers":[{"type":"iPhone","number":"0123-4567-8888"},{"type":"home","number":"0123-4567-8910"}]}', typeof(dynamic)); we won't be able to handle this particular case for a while, because it should return a dictionary +print ''; +print '-- split (https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/splitfunction)'; +Customers | project split('aa_bb', '_') | take 1; +Customers | project split('aaa_bbb_ccc', '_', 1) | take 1; +Customers | project split('', '_') | take 1; +Customers | project split('a__b', '_') | take 1; +Customers | project split('aabbcc', 'bb') | take 1; +Customers | project split('aabbcc', '') | take 1; +Customers | project split('aaa_bbb_ccc', '_', -1) | take 1; +Customers | project split('aaa_bbb_ccc', '_', 10) | take 1; +print ''; +print '-- strcat_delim (https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/strcat-delimfunction); TODO: only support string now.'; +Customers | project strcat_delim('-', '1', '2', strcat('A','b')) | take 1; +-- Customers | project strcat_delim('-', '1', '2', 'A' , 1s); +print ''; +print '-- indexof (https://docs.microsoft.com/en-us/azure/data-explorer/kusto/query/indexoffunction); TODO: length and occurrence not supported yet'; +Customers | project indexof('abcdefg','cde') | take 1; +Customers | project indexof('abcdefg','cde',2) | take 1; +Customers | project indexof('abcdefg','cde',6) | take 1; +print '-- base64_encode_fromguid()'; +-- print base64_encode_fromguid(guid(null)); +print base64_encode_fromguid(guid('ae3133f2-6e22-49ae-b06a-16e6a9b212eb')); +print base64_encode_fromguid(dynamic(null)); -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO } +print base64_encode_fromguid("abcd1231"); -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO } +print '-- base64_decode_toarray()'; +print base64_decode_toarray(''); +print base64_decode_toarray('S3VzdG8='); +print '-- base64_decode_toguid()'; +print base64_decode_toguid("JpbpECu8dUy7Pv5gbeJXAA=="); +print base64_decode_toguid(base64_encode_fromguid(guid('ae3133f2-6e22-49ae-b06a-16e6a9b212eb'))) == guid('ae3133f2-6e22-49ae-b06a-16e6a9b212eb'); +print '-- base64_encode_tostring'; +print base64_encode_tostring(''); +print base64_encode_tostring('Kusto1'); +print '-- base64_decode_tostring'; +print base64_decode_tostring(''); +print base64_decode_tostring('S3VzdG8x'); +print '-- parse_url()'; +print parse_url('scheme://username:password@host:1234/this/is/a/path?k1=v1&k2=v2#fragment'); +print '-- parse_urlquery()'; +print parse_urlquery('k1=v1&k2=v2&k3=v3'); +print '-- strcmp()'; +print strcmp('ABC','ABC'), strcmp('abc','ABC'), strcmp('ABC','abc'), strcmp('abcde','abc'); +print '-- substring()'; +print substring("ABCD", -2, 2); +print '-- translate()'; +print translate('krasp', 'otsku', 'spark'), translate('abc', '', 'ab'), translate('abc', 'x', 'abc'); +print '-- trim()'; +print trim("--", "--https://www.ibm.com--"); +print trim("[^\w]+", strcat("- ","Te st", "1", "// $")); +print trim("", " asd "); +print trim("a$", "asd"); +print trim("^a", "asd"); +print '-- trim_start()'; +print trim_start("https://", "https://www.ibm.com"); +print trim_start("[^\w]+", strcat("- ","Te st", "1", "// $")); +print trim_start("asd$", "asdw"); +print trim_start("asd$", "asd"); +print trim_start("d$", "asd"); +print '-- trim_end()'; +print trim_end("://www.ibm.com", "https://www.ibm.com"); +print trim_end("[^\w]+", strcat("- ","Te st", "1", "// $")); +print trim_end("^asd", "wasd"); +print trim_end("^asd", "asd"); +print trim_end("^a", "asd"); +print '-- trim, trim_start, trim_end all at once'; +print str = "--https://bing.com--", pattern = '--' | extend start = trim_start(pattern, str), end = trim_end(pattern, str), both = trim(pattern, str); +print '-- replace_regex'; +print replace_regex(strcat('Number is ', '1'), 'is (\d+)', 'was: \1'); +print '-- has_any_index()'; +print has_any_index('this is an example', dynamic(['this', 'example'])), has_any_index("this is an example", dynamic(['not', 'example'])), has_any_index("this is an example", dynamic(['not', 'found'])), has_any_index("this is an example", dynamic([])); +print '-- parse_version()'; +print parse_version(42); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +-- print parse_version(''); -> NULL +print parse_version('1.2.3.40'); +print parse_version('1.2'); +print parse_version(strcat('1.', '2')); +print parse_version('1.2.4.5.6'); +print parse_version('moo'); +print parse_version('moo.boo.foo'); +print parse_version(strcat_delim('.', 'moo', 'boo', 'foo')); +Versions | project parse_version(Version); +print '-- parse_json()'; +print parse_json(dynamic([1, 2, 3])); +print parse_json('{"a":123.5, "b":"{\\"c\\":456}"}'); +print '-- parse_command_line()'; +print parse_command_line(55, 'windows'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +-- print parse_command_line((52 + 3) * 4 % 2, 'windows'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +print parse_command_line('', 'windows'); +print parse_command_line(strrep(' ', 6), 'windows'); +-- print parse_command_line('echo \"hello world!\" print$?', 'windows'); -> ["echo","hello world!","print$?"] +-- print parse_command_line("yolo swag 'asd bcd' \"moo moo \"", 'windows'); -> ["yolo","swag","'asd","bcd'","moo moo "] +-- print parse_command_line(strcat_delim(' ', "yolo", "swag", "\'asd bcd\'", "\"moo moo \""), 'windows'); -> ["yolo","swag","'asd","bcd'","moo moo "] +print '-- reverse()'; +print reverse(123); +print reverse(123.34); +print reverse(''); +print reverse("asd"); +print reverse(dynamic([])); +print reverse(dynamic([1, 2, 3])); +print reverse(dynamic(['Darth', "Vader"])); +print reverse(datetime(2017-10-15 12:00)); +-- print reverse(timespan(3h)); -> 00:00:30 +Customers | where Education contains 'degree' | order by reverse(FirstName); +print '-- parse_csv()'; +print parse_csv(''); +print parse_csv(65); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +print parse_csv('aaa'); +print result=parse_csv('aa,b,cc'); +print result_multi_record=parse_csv('record1,a,b,c\nrecord2,x,y,z'); +-- print result=parse_csv('aa,"b,b,b",cc,"Escaping quotes: ""Title""","line1\nline2"'); -> ["aa","b,b,b","cc","Escaping quotes: \"Title\"","line1\nline2"] +-- print parse_csv(strcat(strcat_delim(',', 'aa', '"b,b,b"', 'cc', '"Escaping quotes: ""Title"""', '"line1\nline2"'), '\r\n', strcat_delim(',', 'asd', 'qcf'))); -> ["aa","b,b,b","cc","Escaping quotes: \"Title\"","line1\nline2"] diff --git a/parser/testdata/02366_kql_mvexpand/ast.json b/parser/testdata/02366_kql_mvexpand/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02366_kql_mvexpand/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02366_kql_mvexpand/metadata.json b/parser/testdata/02366_kql_mvexpand/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02366_kql_mvexpand/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02366_kql_mvexpand/query.sql b/parser/testdata/02366_kql_mvexpand/query.sql new file mode 100644 index 000000000..6ed12d601 --- /dev/null +++ b/parser/testdata/02366_kql_mvexpand/query.sql @@ -0,0 +1,40 @@ +-- datatable(a: int, b: dynamic, c: dynamic, d: dynamic) [ +-- 1, dynamic(['Salmon', 'Steak', 'Chicken']), dynamic([1, 2, 3, 4]), dynamic([5, 6, 7, 8]) +-- ] + +DROP TABLE IF EXISTS mv_expand_test_table; +CREATE TABLE mv_expand_test_table +( + a UInt8, + b Array(String), + c Array(Int8), + d Array(Int8) +) ENGINE = Memory; +INSERT INTO mv_expand_test_table VALUES (1, ['Salmon', 'Steak','Chicken'],[1,2,3,4],[5,6,7,8]); +set allow_experimental_kusto_dialect=1; +set dialect='kusto'; +print '-- mv-expand --'; +print '-- mv_expand_test_table | mv-expand c --'; +mv_expand_test_table | mv-expand c; +print '-- mv_expand_test_table | mv-expand c, d --'; +mv_expand_test_table | mv-expand c, d; +print '-- mv_expand_test_table | mv-expand b | mv-expand c --'; +mv_expand_test_table | mv-expand b | mv-expand c; +print '-- mv_expand_test_table | mv-expand with_itemindex=index b, c, d --'; +mv_expand_test_table | mv-expand with_itemindex=index b, c, d; +print '-- mv_expand_test_table | mv-expand array_concat(c,d) --'; +mv_expand_test_table | mv-expand array_concat(c,d); +print '-- mv_expand_test_table | mv-expand x = c, y = d --'; +mv_expand_test_table | mv-expand x = c, y = d; +print '-- mv_expand_test_table | mv-expand xy = array_concat(c, d) --'; +mv_expand_test_table | mv-expand xy = array_concat(c, d); +print '-- mv_expand_test_table | mv-expand xy = array_concat(c, d) limit 2| summarize count() by xy --'; +mv_expand_test_table | mv-expand xy = array_concat(c, d) limit 2| summarize count() by xy; +print '-- mv_expand_test_table | mv-expand with_itemindex=index c,d to typeof(bool) --'; +mv_expand_test_table | mv-expand with_itemindex=index c,d to typeof(bool); +print '-- mv_expand_test_table | mv-expand c to typeof(bool) --'; +mv_expand_test_table | mv-expand c to typeof(bool); +SET max_query_size = 28; +SET dialect='kusto'; +mv_expand_test_table | mv-expand c, d; -- { serverError SYNTAX_ERROR } +SET max_query_size=262144; diff --git a/parser/testdata/02366_kql_operator_in_sql/ast.json b/parser/testdata/02366_kql_operator_in_sql/ast.json new file mode 100644 index 000000000..49417c853 --- /dev/null +++ b/parser/testdata/02366_kql_operator_in_sql/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery Customers (children 1)" + }, + { + "explain": " Identifier Customers" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001221624, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/02366_kql_operator_in_sql/metadata.json b/parser/testdata/02366_kql_operator_in_sql/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02366_kql_operator_in_sql/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02366_kql_operator_in_sql/query.sql b/parser/testdata/02366_kql_operator_in_sql/query.sql new file mode 100644 index 000000000..0b02faa06 --- /dev/null +++ b/parser/testdata/02366_kql_operator_in_sql/query.sql @@ -0,0 +1,42 @@ +DROP TABLE IF EXISTS Customers; +CREATE TABLE Customers +( + FirstName Nullable(String), + LastName String, + Occupation String, + Education String, + Age Nullable(UInt8) +) ENGINE = Memory; + +INSERT INTO Customers VALUES ('Theodore','Diaz','Skilled Manual','Bachelors',28),('Stephanie','Cox','Management abcd defg','Bachelors',33),('Peter','Nara','Skilled Manual','Graduate Degree',26),('Latoya','Shen','Professional','Graduate Degree',25),('Apple','','Skilled Manual','Bachelors',28),(NULL,'why','Professional','Partial College',38); +Select '-- #1 --' ; +select * from kql($$Customers | where FirstName !in ('Peter', 'Latoya')$$); +Select '-- #2 --' ; +select * from kql($$Customers | where FirstName !in ("test", "test2")$$); +Select '-- #3 --' ; +select * from kql($$Customers | where FirstName !contains 'Pet'$$); +Select '-- #4 --' ; +select * from kql($$Customers | where FirstName !contains_cs 'Pet'$$); +Select '-- #5 --' ; +select * from kql($$Customers | where FirstName !endswith 'ter'$$); +Select '-- #6 --' ; +select * from kql($$Customers | where FirstName !endswith_cs 'ter'$$); +Select '-- #7 --' ; +select * from kql($$Customers | where FirstName != 'Peter'$$); +Select '-- #8 --' ; +select * from kql($$Customers | where FirstName !has 'Peter'$$); +Select '-- #9 --' ; +select * from kql($$Customers | where FirstName !has_cs 'peter'$$); +Select '-- #10 --' ; +-- select * from kql($$Customers | where FirstName !hasprefix 'Peter'$$); -- will enable when analyzer fixed `and` issue +Select '-- #11 --' ; +--select * from kql($$Customers | where FirstName !hasprefix_cs 'Peter'$$); +Select '-- #12 --' ; +--select * from kql($$Customers | where FirstName !hassuffix 'Peter'$$); +Select '-- #13 --' ; +--select * from kql($$Customers | where FirstName !hassuffix_cs 'Peter'$$); +Select '-- #14 --' ; +select * from kql($$Customers | where FirstName !startswith 'Peter'$$); +Select '-- #15 --' ; +select * from kql($$Customers | where FirstName !startswith_cs 'Peter'$$); +DROP TABLE IF EXISTS Customers; diff --git a/parser/testdata/02366_kql_summarize/ast.json b/parser/testdata/02366_kql_summarize/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02366_kql_summarize/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02366_kql_summarize/metadata.json b/parser/testdata/02366_kql_summarize/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02366_kql_summarize/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02366_kql_summarize/query.sql b/parser/testdata/02366_kql_summarize/query.sql new file mode 100644 index 000000000..7a669733e --- /dev/null +++ b/parser/testdata/02366_kql_summarize/query.sql @@ -0,0 +1,103 @@ +-- datatable(FirstName:string, LastName:string, Occupation:string, Education:string, Age:int) [ +-- 'Theodore', 'Diaz', 'Skilled Manual', 'Bachelors', 28, +-- 'Stephanie', 'Cox', 'Management abcd defg', 'Bachelors', 33, +-- 'Peter', 'Nara', 'Skilled Manual', 'Graduate Degree', 26, +-- 'Latoya', 'Shen', 'Professional', 'Graduate Degree', 25, +-- 'Joshua', 'Lee', 'Professional', 'Partial College', 26, +-- 'Edward', 'Hernandez', 'Skilled Manual', 'High School', 36, +-- 'Dalton', 'Wood', 'Professional', 'Partial College', 42, +-- 'Christine', 'Nara', 'Skilled Manual', 'Partial College', 33, +-- 'Cameron', 'Rodriguez', 'Professional', 'Partial College', 28, +-- 'Angel', 'Stewart', 'Professional', 'Partial College', 46, +-- 'Apple', '', 'Skilled Manual', 'Bachelors', 28, +-- dynamic(null), 'why', 'Professional', 'Partial College', 38 +-- ] + +DROP TABLE IF EXISTS Customers; +CREATE TABLE Customers +( + FirstName Nullable(String), + LastName String, + Occupation String, + Education String, + Age Nullable(UInt8) +) ENGINE = Memory; + +INSERT INTO Customers VALUES ('Theodore','Diaz','Skilled Manual','Bachelors',28),('Stephanie','Cox','Management abcd defg','Bachelors',33),('Peter','Nara','Skilled Manual','Graduate Degree',26),('Latoya','Shen','Professional','Graduate Degree',25),('Joshua','Lee','Professional','Partial College',26),('Edward','Hernandez','Skilled Manual','High School',36),('Dalton','Wood','Professional','Partial College',42),('Christine','Nara','Skilled Manual','Partial College',33),('Cameron','Rodriguez','Professional','Partial College',28),('Angel','Stewart','Professional','Partial College',46),('Apple','','Skilled Manual','Bachelors',28),(NULL,'why','Professional','Partial College',38); + +drop table if exists EventLog; +create table EventLog +( + LogEntry String, + Created Int64 +) ENGINE = Memory; + +insert into EventLog values ('Darth Vader has entered the room.', 546), ('Rambo is suspciously looking at Darth Vader.', 245234), ('Darth Sidious electrocutes both using Force Lightning.', 245554); + +drop table if exists Dates; +create table Dates +( + EventTime DateTime, +) ENGINE = Memory; + +Insert into Dates VALUES ('2015-10-12') , ('2016-10-12'); +Select '-- test summarize --' ; +set allow_experimental_kusto_dialect=1; +set dialect='kusto'; +Customers | summarize count(), min(Age), max(Age), avg(Age), sum(Age); +Customers | summarize count(), min(Age), max(Age), avg(Age), sum(Age) by Occupation | order by Occupation; +Customers | summarize countif(Age>40) by Occupation | order by Occupation; +Customers | summarize MyMax = maxif(Age, Age<40) by Occupation | order by Occupation; +Customers | summarize MyMin = minif(Age, Age<40) by Occupation | order by Occupation; +Customers | summarize MyAvg = avgif(Age, Age<40) by Occupation | order by Occupation; +Customers | summarize MySum = sumif(Age, Age<40) by Occupation | order by Occupation; +Customers | summarize dcount(Education); +Customers | summarize dcountif(Education, Occupation=='Professional'); +Customers | summarize count_ = count() by bin(Age, 10) | order by count_ asc; +Customers | summarize job_count = count() by Occupation | where job_count > 0 | order by Occupation; +Customers | summarize 'Edu Count'=count() by Education | sort by 'Edu Count' desc; -- { clientError SYNTAX_ERROR } + +print '-- make_list() --'; +Customers | summarize f_list = make_list(Education) by Occupation | sort by Occupation; +Customers | summarize f_list = make_list(Education, 2) by Occupation | sort by Occupation; +print '-- make_list_if() --'; +Customers | summarize f_list = make_list_if(FirstName, Age>30) by Occupation | sort by Occupation; +Customers | summarize f_list = make_list_if(FirstName, Age>30, 1) by Occupation | sort by Occupation; +print '-- make_set() --'; +Customers | summarize f_list = make_set(Education) by Occupation | sort by Occupation; +Customers | summarize f_list = make_set(Education, 2) by Occupation | sort by Occupation; +print '-- make_set_if() --'; +Customers | summarize f_list = make_set_if(Education, Age>30) by Occupation | sort by Occupation; +Customers | summarize f_list = make_set_if(Education, Age>30, 1) by Occupation | sort by Occupation; +print '-- stdev() --'; +Customers | project Age | summarize stdev(Age); +print '-- stdevif() --'; +Customers | project Age | summarize stdevif(Age, Age%2==0); +print '-- binary_all_and --'; +Customers | project Age | where Age > 40 | summarize binary_all_and(Age); +print '-- binary_all_or --'; +Customers | project Age | where Age > 40 | summarize binary_all_or(Age); +print '-- binary_all_xor --'; +Customers | project Age | where Age > 40 | summarize binary_all_xor(Age); + +Customers | project Age | summarize percentile(Age, 95); +Customers | project Age | summarize percentiles(Age, 5, 50, 95)|project round(percentiles_Age[0],2),round(percentiles_Age[1],2),round(percentiles_Age[2],2); +Customers | project Age | summarize percentiles(Age, 5, 50, 95)[1]; +Customers | summarize w=count() by AgeBucket=bin(Age, 5) | summarize percentilew(AgeBucket, w, 75); +Customers | summarize w=count() by AgeBucket=bin(Age, 5) | summarize percentilesw(AgeBucket, w, 50, 75, 99.9); + +print '-- Summarize following sort --'; +Customers | sort by FirstName | summarize count() by Occupation | sort by Occupation; + +print '-- summarize with bin --'; +EventLog | summarize count=count() by bin(Created, 1000) | sort by Created asc; +EventLog | summarize count=count() by bin(unixtime_seconds_todatetime(Created/1000), 1s) | sort by Columns1 asc; +EventLog | summarize count=count() by time_label=bin(Created/1000, 1s) | sort by time_label asc; +Dates | project bin(datetime(EventTime), 1m); +print '-- make_list_with_nulls --'; +Customers | summarize t = make_list_with_nulls(FirstName); +Customers | summarize f_list = make_list_with_nulls(FirstName) by Occupation | sort by Occupation; +Customers | summarize f_list = make_list_with_nulls(FirstName), a_list = make_list_with_nulls(Age) by Occupation | sort by Occupation; +-- TODO: +-- arg_max() +-- arg_min() diff --git a/parser/testdata/02366_kql_tabular/ast.json b/parser/testdata/02366_kql_tabular/ast.json new file mode 100644 index 000000000..5940ca203 --- /dev/null +++ b/parser/testdata/02366_kql_tabular/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery Customers (children 1)" + }, + { + "explain": " Identifier Customers" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001259083, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/02366_kql_tabular/metadata.json b/parser/testdata/02366_kql_tabular/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02366_kql_tabular/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02366_kql_tabular/query.sql b/parser/testdata/02366_kql_tabular/query.sql new file mode 100644 index 000000000..0af347aab --- /dev/null +++ b/parser/testdata/02366_kql_tabular/query.sql @@ -0,0 +1,94 @@ +DROP TABLE IF EXISTS Customers; +CREATE TABLE Customers +( + FirstName Nullable(String), + LastName String, + Occupation String, + Education String, + Age Nullable(UInt8) +) ENGINE = Memory; + +INSERT INTO Customers VALUES ('Theodore','Diaz','Skilled Manual','Bachelors',28), ('Stephanie','Cox','Management','Bachelors',33), ('Peter','Nara','Skilled Manual','Graduate Degree',26), ('Latoya','Shen','Professional','Graduate Degree',25), ('Joshua','Lee','Professional','Partial College',26), ('Edward','Hernandez','Skilled Manual','High School',36), ('Dalton','Wood','Professional','Partial College',42), ('Christine','Nara','Skilled Manual','Partial College',33), ('Cameron','Rodriguez','Professional','Partial College',28), ('Angel','Stewart','Professional','Partial College',46); + +set allow_experimental_kusto_dialect=1; +set dialect='kusto'; +print '-- test Query only has table name: --'; +Customers; + +print '-- Query has Column Selection --'; +Customers | project FirstName,LastName,Occupation; + +print '-- Query has limit --'; +Customers | project FirstName,LastName,Occupation | take 5; +Customers | project FirstName,LastName,Occupation | limit 5; + +print '-- Query has second limit with bigger value --'; +Customers | project FirstName,LastName,Occupation | take 5 | take 7; + +print '-- Query has second limit with smaller value --'; +Customers | project FirstName,LastName,Occupation | take 5 | take 3; + +print '-- Query has second Column selection --'; +Customers | project FirstName,LastName,Occupation | take 3 | project FirstName,LastName; + +print '-- Query has second Column selection with extra column --'; +Customers| project FirstName,LastName,Occupation | take 3 | project FirstName,LastName,Education;-- { serverError UNKNOWN_IDENTIFIER } + +print '-- Query with desc sort --'; +Customers | project FirstName | take 5 | sort by FirstName desc; +Customers | project Occupation | take 5 | order by Occupation desc; + +print '-- Query with asc sort --'; +Customers | project Occupation | take 5 | sort by Occupation asc; + +print '-- Query with sort (without keyword asc desc) --'; +Customers | project FirstName | take 5 | sort by FirstName; +Customers | project Occupation | take 5 | order by Occupation; + +print '-- Query with sort 2 Columns with different direction --'; +Customers | project FirstName,LastName,Occupation | take 5 | sort by Occupation asc, LastName desc; + +print '-- Query with second sort --'; +Customers | project FirstName,LastName,Occupation | take 5 | sort by Occupation desc |sort by Occupation asc, LastName desc; + +print '-- Test String Equals (==) --'; +Customers | project FirstName,LastName,Occupation | where Occupation == 'Skilled Manual'; + +print '-- Test String Not equals (!=) --'; +Customers | project FirstName,LastName,Occupation | where Occupation != 'Skilled Manual'; + +print '-- Test Filter using a list (in) --'; +Customers | project FirstName,LastName,Occupation,Education | where Education in ('Bachelors','High School'); + +print '-- Test Filter using a list (!in) --'; +set dialect='kusto'; +Customers | project FirstName,LastName,Occupation,Education | where Education !in ('Bachelors','High School'); + +print '-- Test Filter using common string operations (contains_cs) --'; +Customers | project FirstName,LastName,Occupation,Education | where Education contains_cs 'Coll'; + +print '-- Test Filter using common string operations (startswith_cs) --'; +Customers | project FirstName,LastName,Occupation,Education | where Occupation startswith_cs 'Prof'; + +print '-- Test Filter using common string operations (endswith_cs) --'; +Customers | project FirstName,LastName,Occupation,Education | where FirstName endswith_cs 'a'; + +print '-- Test Filter using numerical equal (==) --'; +Customers | project FirstName,LastName,Occupation,Education,Age | where Age == 26; + +print '-- Test Filter using numerical great and less (> , <) --'; +Customers | project FirstName,LastName,Occupation,Education,Age | where Age > 30 and Age < 40; + +print '-- Test Filter using multi where --'; +Customers | project FirstName,LastName,Occupation,Education,Age | where Age > 30 | where Occupation == 'Professional'; + +print '-- Complex query with unknown function --'; +hits | where CounterID == 62 and EventDate >= '2013-07-14' and EventDate <= '2013-07-15' and IsRefresh == 0 and DontCountHits == 0 | summarize count() by d=bin(poopoo(EventTime), 1m) | order by d | limit 10; -- { clientError UNKNOWN_FUNCTION } + +print '-- Missing column in front of startsWith --'; +StormEvents | where startswith "W" | summarize Count=count() by State; -- { clientError SYNTAX_ERROR } + +SET max_query_size = 55; +SET dialect='kusto'; +Customers | where Education contains 'degree' | order by LastName; -- { serverError SYNTAX_ERROR } +SET max_query_size=262144; diff --git a/parser/testdata/02366_normalize_aggregate_function_types_and_states/ast.json b/parser/testdata/02366_normalize_aggregate_function_types_and_states/ast.json new file mode 100644 index 000000000..957730377 --- /dev/null +++ b/parser/testdata/02366_normalize_aggregate_function_types_and_states/ast.json @@ -0,0 +1,139 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function countMerge (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 5)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function countState (alias a) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Float64_0.5" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function countState (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function countIfState (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function countArrayState (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_2]" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function countArrayIfState (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_2]" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 39, + + "statistics": + { + "elapsed": 0.001865728, + "rows_read": 39, + "bytes_read": 1708 + } +} diff --git a/parser/testdata/02366_normalize_aggregate_function_types_and_states/metadata.json b/parser/testdata/02366_normalize_aggregate_function_types_and_states/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02366_normalize_aggregate_function_types_and_states/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02366_normalize_aggregate_function_types_and_states/query.sql b/parser/testdata/02366_normalize_aggregate_function_types_and_states/query.sql new file mode 100644 index 000000000..3d2900a9b --- /dev/null +++ b/parser/testdata/02366_normalize_aggregate_function_types_and_states/query.sql @@ -0,0 +1,3 @@ +SELECT countMerge(*) FROM (SELECT countState(0.5) AS a UNION ALL SELECT countState() UNION ALL SELECT countIfState(2, 1) UNION ALL SELECT countArrayState([1, 2]) UNION ALL SELECT countArrayIfState([1, 2], 1)); + +SELECT quantileMerge(*) FROM (SELECT quantilesState(0.5)(1) AS a UNION ALL SELECT quantileStateIf(2, identity(1))); diff --git a/parser/testdata/02366_union_decimal_conversion/ast.json b/parser/testdata/02366_union_decimal_conversion/ast.json new file mode 100644 index 000000000..b0137c900 --- /dev/null +++ b/parser/testdata/02366_union_decimal_conversion/ast.json @@ -0,0 +1,160 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function sum (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier a" + }, + { + "explain": " Function sum (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier b" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function cluster (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier test_cluster_two_shards" + }, + { + "explain": " Function view (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function CAST (alias a) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal 'Decimal(7, 2)'" + }, + { + "explain": " Literal UInt64_0 (alias b)" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Function CAST (alias b) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal 'Decimal(7, 2)'" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2" + } + ], + + "rows": 46, + + "statistics": + { + "elapsed": 0.001477064, + "rows_read": 46, + "bytes_read": 2094 + } +} diff --git a/parser/testdata/02366_union_decimal_conversion/metadata.json b/parser/testdata/02366_union_decimal_conversion/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02366_union_decimal_conversion/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02366_union_decimal_conversion/query.sql b/parser/testdata/02366_union_decimal_conversion/query.sql new file mode 100644 index 000000000..4451d0747 --- /dev/null +++ b/parser/testdata/02366_union_decimal_conversion/query.sql @@ -0,0 +1 @@ +select sum(a), sum(b) from cluster(test_cluster_two_shards, view(select cast(number as Decimal(7, 2)) a, 0 as b from numbers(2) union all select 0, cast(number as Decimal(7, 2)) as b from numbers(2))); diff --git a/parser/testdata/02366_window_function_order_by/ast.json b/parser/testdata/02366_window_function_order_by/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02366_window_function_order_by/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02366_window_function_order_by/metadata.json b/parser/testdata/02366_window_function_order_by/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02366_window_function_order_by/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02366_window_function_order_by/query.sql b/parser/testdata/02366_window_function_order_by/query.sql new file mode 100644 index 000000000..a3a02355c --- /dev/null +++ b/parser/testdata/02366_window_function_order_by/query.sql @@ -0,0 +1,9 @@ +-- { echoOn } +SELECT groupArray(tuple(value)) OVER () +FROM (select number value from numbers(10)) +ORDER BY value ASC; + +SELECT count() OVER (ORDER BY number + 1) FROM numbers(10) ORDER BY number; + +SELECT count() OVER (ORDER BY number + 1) + 1 as foo FROM numbers(10) +ORDER BY foo; diff --git a/parser/testdata/02366_with_fill_date/ast.json b/parser/testdata/02366_with_fill_date/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02366_with_fill_date/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02366_with_fill_date/metadata.json b/parser/testdata/02366_with_fill_date/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02366_with_fill_date/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02366_with_fill_date/query.sql b/parser/testdata/02366_with_fill_date/query.sql new file mode 100644 index 000000000..baaec92de --- /dev/null +++ b/parser/testdata/02366_with_fill_date/query.sql @@ -0,0 +1,5 @@ + +SELECT toDate('2022-02-01') AS d1 +FROM numbers(18) AS number +ORDER BY d1 ASC WITH FILL FROM toDateTime('2022-02-01') TO toDateTime('2022-07-01') STEP toIntervalMonth(1); -- { serverError INVALID_WITH_FILL_EXPRESSION } + diff --git a/parser/testdata/02367_analyzer_table_alias_columns/ast.json b/parser/testdata/02367_analyzer_table_alias_columns/ast.json new file mode 100644 index 000000000..8ec02275c --- /dev/null +++ b/parser/testdata/02367_analyzer_table_alias_columns/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001428362, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02367_analyzer_table_alias_columns/metadata.json b/parser/testdata/02367_analyzer_table_alias_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02367_analyzer_table_alias_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02367_analyzer_table_alias_columns/query.sql b/parser/testdata/02367_analyzer_table_alias_columns/query.sql new file mode 100644 index 000000000..3d5c2a4ad --- /dev/null +++ b/parser/testdata/02367_analyzer_table_alias_columns/query.sql @@ -0,0 +1,41 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + alias_value_1 ALIAS id + alias_value_2 + 1, + alias_value_2 ALIAS id + 5 +) ENGINE=MergeTree ORDER BY tuple(); + +INSERT INTO test_table VALUES (0); + +SELECT id, alias_value_1, alias_value_2 FROM test_table; + +DROP TABLE test_table; + +CREATE TABLE test_table +( + id UInt64, + value String, + alias_value ALIAS ((id + 1) AS inside_value) + inside_value +) ENGINE=MergeTree ORDER BY tuple(); + +INSERT INTO test_table VALUES (0, 'Value'); + +SELECT id, value, alias_value FROM test_table; + +DROP TABLE test_table; + +CREATE TABLE test_table +( + id UInt64, + value String, + alias_value ALIAS ((id + 1) AS value) + value +) ENGINE=MergeTree ORDER BY tuple(); + +INSERT INTO test_table VALUES (0, 'Value'); + +SELECT id, value, alias_value FROM test_table; + +DROP TABLE test_table; diff --git a/parser/testdata/02367_optimize_trivial_count_with_array_join/ast.json b/parser/testdata/02367_optimize_trivial_count_with_array_join/ast.json new file mode 100644 index 000000000..43a002102 --- /dev/null +++ b/parser/testdata/02367_optimize_trivial_count_with_array_join/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001311036, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/02367_optimize_trivial_count_with_array_join/metadata.json b/parser/testdata/02367_optimize_trivial_count_with_array_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02367_optimize_trivial_count_with_array_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02367_optimize_trivial_count_with_array_join/query.sql b/parser/testdata/02367_optimize_trivial_count_with_array_join/query.sql new file mode 100644 index 000000000..a3fb46e9e --- /dev/null +++ b/parser/testdata/02367_optimize_trivial_count_with_array_join/query.sql @@ -0,0 +1,11 @@ +drop table if exists t; +drop table if exists t1; + +create table t(id UInt32) engine MergeTree order by id as select 1; + +create table t1(a Array(UInt32)) ENGINE = MergeTree ORDER BY tuple() as select [1,2]; + +select count() from t array join (select a from t1) AS _a settings optimize_trivial_count_query=1; + +drop table t; +drop table t1; diff --git a/parser/testdata/02368_analyzer_table_functions/ast.json b/parser/testdata/02368_analyzer_table_functions/ast.json new file mode 100644 index 000000000..a1a056a59 --- /dev/null +++ b/parser/testdata/02368_analyzer_table_functions/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00117874, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02368_analyzer_table_functions/metadata.json b/parser/testdata/02368_analyzer_table_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02368_analyzer_table_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02368_analyzer_table_functions/query.sql b/parser/testdata/02368_analyzer_table_functions/query.sql new file mode 100644 index 000000000..8c3ea96fb --- /dev/null +++ b/parser/testdata/02368_analyzer_table_functions/query.sql @@ -0,0 +1,10 @@ +SET enable_analyzer = 1; + +SELECT c1, c2, c3, c4 FROM format('CSV', '1,2,"[1,2,3]","[[\'abc\'], [], [\'d\', \'e\']]"'); +SELECT f.c1, f.c2, f.c3, f.c4 FROM format('CSV', '1,2,"[1,2,3]","[[\'abc\'], [], [\'d\', \'e\']]"') AS f; +SELECT f.* FROM format('CSV', '1,2,"[1,2,3]","[[\'abc\'], [], [\'d\', \'e\']]"') AS f; + +WITH 'CSV' as format_name, '1,2,"[1,2,3]","[[\'abc\'], [], [\'d\', \'e\']]"' AS format_value SELECT c1, c2, c3, c4 FROM format('CSV', format_value); +WITH concat('1,2,"[1,2,3]",','"[[\'abc\'], [], [\'d\', \'e\']]"') AS format_value SELECT c1, c2, c3, c4 FROM format('CSV', format_value); + +SELECT format, format_value, c1, c2, c3, c4 FROM format('CSV' AS format, '1,2,"[1,2,3]","[[\'abc\'], [], [\'d\', \'e\']]"' AS format_value); diff --git a/parser/testdata/02369_analyzer_array_join_function/ast.json b/parser/testdata/02369_analyzer_array_join_function/ast.json new file mode 100644 index 000000000..932f7f350 --- /dev/null +++ b/parser/testdata/02369_analyzer_array_join_function/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001297858, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02369_analyzer_array_join_function/metadata.json b/parser/testdata/02369_analyzer_array_join_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02369_analyzer_array_join_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02369_analyzer_array_join_function/query.sql b/parser/testdata/02369_analyzer_array_join_function/query.sql new file mode 100644 index 000000000..8200fd60e --- /dev/null +++ b/parser/testdata/02369_analyzer_array_join_function/query.sql @@ -0,0 +1,59 @@ +SET enable_analyzer = 1; + +SELECT arrayJoin([1, 2, 3]); + +SELECT '--'; + +SELECT arrayJoin([1, 2, 3]) AS a, arrayJoin([1, 2, 3]); + +SELECT '--'; + +SELECT arrayJoin([1, 2, 3]) AS a, a; + +SELECT '--'; + +SELECT arrayJoin([[1, 2, 3]]) AS a, arrayJoin(a) AS b; + +SELECT '--'; + +SELECT arrayJoin([1, 2, 3]) AS a, arrayJoin([1, 2, 3, 4]) AS b; + +SELECT '--'; + +SELECT arrayMap(x -> arrayJoin([1, 2, 3]), [1, 2, 3]); + +SELECT arrayMap(x -> arrayJoin(x), [[1, 2, 3]]); -- { serverError BAD_ARGUMENTS } + +SELECT arrayMap(x -> arrayJoin(cast(x, 'Array(UInt8)')), [[1, 2, 3]]); -- { serverError BAD_ARGUMENTS } + +SELECT '--'; + +SELECT arrayMap(x -> x + a, [1, 2, 3]), arrayJoin([1,2,3]) as a; + +SELECT '--'; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value_1 Array(UInt8), + value_2 Array(UInt8), +) ENGINE=MergeTree ORDER BY tuple(); + +INSERT INTO test_table VALUES (0, [1, 2, 3], [1, 2, 3, 4]); + +SELECT id, arrayJoin(value_1) FROM test_table; + +SELECT '--'; + +SELECT id, arrayJoin(value_1) AS a, a FROM test_table; + +-- SELECT '--'; + +-- SELECT id, arrayJoin(value_1), arrayJoin(value_2) FROM test_table; + +-- SELECT '--'; + +-- SELECT id, arrayJoin(value_1), arrayJoin(value_2), arrayJoin([5, 6]) FROM test_table; + +DROP TABLE test_table; diff --git a/parser/testdata/02370_analyzer_in_function/ast.json b/parser/testdata/02370_analyzer_in_function/ast.json new file mode 100644 index 000000000..5d66f7f83 --- /dev/null +++ b/parser/testdata/02370_analyzer_in_function/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001396453, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02370_analyzer_in_function/metadata.json b/parser/testdata/02370_analyzer_in_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02370_analyzer_in_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02370_analyzer_in_function/query.sql b/parser/testdata/02370_analyzer_in_function/query.sql new file mode 100644 index 000000000..9fb05ada8 --- /dev/null +++ b/parser/testdata/02370_analyzer_in_function/query.sql @@ -0,0 +1,27 @@ +SET enable_analyzer = 1; + +SELECT 1 IN 1; +SELECT 1 IN (1); +SELECT 1 IN 0; +SELECT 1 IN (0); +SELECT 1 IN (1, 2); +SELECT (1, 1) IN ((1, 1), (1, 2)); +SELECT (1, 1) IN ((1, 2), (1, 2)); +SELECT 1 IN (((1), (2))); + +SELECT '--'; + +SELECT 1 IN [1]; +SELECT 1 IN [0]; +SELECT 1 IN [1, 2]; +SELECT (1, 1) IN [(1, 1), (1, 2)]; +SELECT (1, 1) IN [(1, 2), (1, 2)]; + +SELECT (1, 2) IN 1; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT (1, 2) IN [1]; -- { serverError INCORRECT_ELEMENT_OF_SET } +SELECT (1, 2) IN (((1, 2), (1, 2)), ((1, 2), (1, 2))); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT (1, 2) IN [((1, 2), (1, 2)), ((1, 2), (1, 2))]; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +select (select 1) in (1); +select in(untuple(((1), (1)))); +select in(untuple(((select 1), (1)))); diff --git a/parser/testdata/02370_extractAll_regress/ast.json b/parser/testdata/02370_extractAll_regress/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02370_extractAll_regress/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02370_extractAll_regress/metadata.json b/parser/testdata/02370_extractAll_regress/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02370_extractAll_regress/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02370_extractAll_regress/query.sql b/parser/testdata/02370_extractAll_regress/query.sql new file mode 100644 index 000000000..6d2551249 --- /dev/null +++ b/parser/testdata/02370_extractAll_regress/query.sql @@ -0,0 +1,5 @@ +-- Regression for UB (stack-use-after-scope) in extactAll() +SELECT + '{"a":"1","b":"2","c":"","d":"4"}{"a":"1","b":"2","c":"","d":"4"}{"a":"1","b":"2","c":"","d":"4"}{"a":"1","b":"2","c":"","d":"4"}' AS json, + extractAll(json, '"([^"]*)":') AS keys, + extractAll(json, ':"\0[^"]*)"') AS values; diff --git a/parser/testdata/02371_analyzer_join_cross/ast.json b/parser/testdata/02371_analyzer_join_cross/ast.json new file mode 100644 index 000000000..6025f831e --- /dev/null +++ b/parser/testdata/02371_analyzer_join_cross/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001273833, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02371_analyzer_join_cross/metadata.json b/parser/testdata/02371_analyzer_join_cross/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02371_analyzer_join_cross/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02371_analyzer_join_cross/query.sql b/parser/testdata/02371_analyzer_join_cross/query.sql new file mode 100644 index 000000000..26497bcbc --- /dev/null +++ b/parser/testdata/02371_analyzer_join_cross/query.sql @@ -0,0 +1,79 @@ +SET enable_analyzer = 1; +SET single_join_prefer_left_table = 0; + +DROP TABLE IF EXISTS test_table_join_1; +CREATE TABLE test_table_join_1 +( + id UInt64, + value String +) ENGINE = MergeTree ORDER BY tuple(); + +DROP TABLE IF EXISTS test_table_join_2; +CREATE TABLE test_table_join_2 +( + id UInt64, + value String +) ENGINE = MergeTree ORDER BY tuple(); + +DROP TABLE IF EXISTS test_table_join_3; +CREATE TABLE test_table_join_3 +( + id UInt64, + value String +) ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO test_table_join_1 VALUES (0, 'Join_1_Value_0'); +INSERT INTO test_table_join_1 VALUES (1, 'Join_1_Value_1'); +INSERT INTO test_table_join_1 VALUES (3, 'Join_1_Value_3'); + +INSERT INTO test_table_join_2 VALUES (0, 'Join_2_Value_0'); +INSERT INTO test_table_join_2 VALUES (1, 'Join_2_Value_1'); +INSERT INTO test_table_join_2 VALUES (2, 'Join_2_Value_2'); + +INSERT INTO test_table_join_3 VALUES (0, 'Join_3_Value_0'); +INSERT INTO test_table_join_3 VALUES (1, 'Join_3_Value_1'); +INSERT INTO test_table_join_3 VALUES (2, 'Join_3_Value_2'); + +SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value +FROM test_table_join_1, test_table_join_2 ORDER BY ALL; + +SELECT '--'; + +SELECT t1.id, t1.value, t2.id, t2.value FROM test_table_join_1 AS t1, test_table_join_2 AS t2 ORDER BY ALL; + +SELECT '--'; + +SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value +FROM test_table_join_1 AS t1, test_table_join_2 AS t2 ORDER BY ALL; + +SELECT '--'; + +SELECT t1.id, t1.value, t2.id, t2.value FROM test_table_join_1 AS t1, test_table_join_2 AS t2 ORDER BY ALL; + +SELECT '--'; + +SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value FROM test_table_join_1 AS t1, test_table_join_2 AS t2 ORDER BY ALL; + +SELECT '--'; + +SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_2.id, test_table_join_2.value, test_table_join_3.id, test_table_join_3.value +FROM test_table_join_1, test_table_join_2, test_table_join_3 ORDER BY ALL; + +SELECT '--'; + +SELECT t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1, test_table_join_2 AS t2, test_table_join_3 AS t3 ORDER BY ALL; + +SELECT '--'; + +SELECT t1.id, test_table_join_1.id, t1.value, test_table_join_1.value, t2.id, test_table_join_2.id, t2.value, test_table_join_2.value, +t3.id, test_table_join_3.id, t3.value, test_table_join_3.value +FROM test_table_join_1 AS t1, test_table_join_2 AS t2, test_table_join_3 AS t3 ORDER BY ALL; + +SELECT id FROM test_table_join_1, test_table_join_2; -- { serverError AMBIGUOUS_IDENTIFIER } + +SELECT value FROM test_table_join_1, test_table_join_2; -- { serverError AMBIGUOUS_IDENTIFIER } + +DROP TABLE test_table_join_1; +DROP TABLE test_table_join_2; +DROP TABLE test_table_join_3; diff --git a/parser/testdata/02371_create_temporary_table_as_with_columns_list/ast.json b/parser/testdata/02371_create_temporary_table_as_with_columns_list/ast.json new file mode 100644 index 000000000..97c9bc601 --- /dev/null +++ b/parser/testdata/02371_create_temporary_table_as_with_columns_list/ast.json @@ -0,0 +1,76 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery test_02327 (children 3)" + }, + { + "explain": " Identifier test_02327" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration name (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function VALUES (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'Vasya'" + }, + { + "explain": " Literal 'Petya'" + } + ], + + "rows": 18, + + "statistics": + { + "elapsed": 0.001255682, + "rows_read": 18, + "bytes_read": 685 + } +} diff --git a/parser/testdata/02371_create_temporary_table_as_with_columns_list/metadata.json b/parser/testdata/02371_create_temporary_table_as_with_columns_list/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02371_create_temporary_table_as_with_columns_list/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02371_create_temporary_table_as_with_columns_list/query.sql b/parser/testdata/02371_create_temporary_table_as_with_columns_list/query.sql new file mode 100644 index 000000000..7d8f297b5 --- /dev/null +++ b/parser/testdata/02371_create_temporary_table_as_with_columns_list/query.sql @@ -0,0 +1,3 @@ +CREATE TEMPORARY TABLE test_02327 (name String) AS SELECT * FROM VALUES(('Vasya'), ('Petya')); +SELECT * FROM test_02327; +DROP TABLE test_02327; diff --git a/parser/testdata/02371_select_projection_normal_agg/ast.json b/parser/testdata/02371_select_projection_normal_agg/ast.json new file mode 100644 index 000000000..643378321 --- /dev/null +++ b/parser/testdata/02371_select_projection_normal_agg/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery video_log (children 1)" + }, + { + "explain": " Identifier video_log" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001546062, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/02371_select_projection_normal_agg/metadata.json b/parser/testdata/02371_select_projection_normal_agg/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02371_select_projection_normal_agg/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02371_select_projection_normal_agg/query.sql b/parser/testdata/02371_select_projection_normal_agg/query.sql new file mode 100644 index 000000000..8650fb6b8 --- /dev/null +++ b/parser/testdata/02371_select_projection_normal_agg/query.sql @@ -0,0 +1,126 @@ +DROP TABLE IF EXISTS video_log; + +CREATE TABLE video_log +( + `datetime` DateTime, + `user_id` UInt64, + `device_id` UInt64, + `domain` LowCardinality(String), + `bytes` UInt64, + `duration` UInt64 +) +ENGINE = MergeTree +PARTITION BY toDate(datetime) +ORDER BY (user_id, device_id) +SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +DROP TABLE IF EXISTS rng; + +CREATE TABLE rng +( + `user_id_raw` UInt64, + `device_id_raw` UInt64, + `domain_raw` UInt64, + `bytes_raw` UInt64, + `duration_raw` UInt64 +) +ENGINE = GenerateRandom(1024); + +INSERT INTO video_log SELECT + toUnixTimestamp('2022-07-22 01:00:00') + + (rowNumberInAllBlocks() / 20000), + user_id_raw % 100000000 AS user_id, + device_id_raw % 200000000 AS device_id, + domain_raw % 100, + (bytes_raw % 1024) + 128, + (duration_raw % 300) + 100 +FROM rng +LIMIT 1728000; + +INSERT INTO video_log SELECT + toUnixTimestamp('2022-07-22 01:00:00') + + (rowNumberInAllBlocks() / 20000), + user_id_raw % 100000000 AS user_id, + 100 AS device_id, + domain_raw % 100, + (bytes_raw % 1024) + 128, + (duration_raw % 300) + 100 +FROM rng +LIMIT 10; + +DROP TABLE IF EXISTS video_log_result; + +CREATE TABLE video_log_result +( + `hour` DateTime, + `sum_bytes` UInt64, + `avg_duration` Float64 +) +ENGINE = MergeTree +PARTITION BY toDate(hour) +ORDER BY sum_bytes +SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +INSERT INTO video_log_result SELECT + toStartOfHour(datetime) AS hour, + sum(bytes), + avg(duration) +FROM video_log +WHERE (toDate(hour) = '2022-07-22') AND (device_id = '100') --(device_id = '100') Make sure it's not good and doesn't go into prewhere. +GROUP BY hour; + + +ALTER TABLE video_log ADD PROJECTION p_norm +( + SELECT + datetime, + device_id, + bytes, + duration + ORDER BY device_id +); + +ALTER TABLE video_log MATERIALIZE PROJECTION p_norm settings mutations_sync=1; + +ALTER TABLE video_log ADD PROJECTION p_agg +( + SELECT + toStartOfHour(datetime) AS hour, + domain, + sum(bytes), + avg(duration) + GROUP BY + hour, + domain +); + +ALTER TABLE video_log MATERIALIZE PROJECTION p_agg settings mutations_sync=1; + +SELECT + equals(sum_bytes1, sum_bytes2), + equals(avg_duration1, avg_duration2) +FROM +( + SELECT + toStartOfHour(datetime) AS hour, + sum(bytes) AS sum_bytes1, + avg(duration) AS avg_duration1 + FROM video_log + WHERE (toDate(hour) = '2022-07-22') AND (device_id = '100') --(device_id = '100') Make sure it's not good and doesn't go into prewhere. + GROUP BY hour +) +LEFT JOIN +( + SELECT + `hour`, + `sum_bytes` AS sum_bytes2, + `avg_duration` AS avg_duration2 + FROM video_log_result +) +USING (hour) settings joined_subquery_requires_alias=0; + +DROP TABLE IF EXISTS video_log; + +DROP TABLE IF EXISTS rng; + +DROP TABLE IF EXISTS video_log_result; diff --git a/parser/testdata/02372_nowInBlock/ast.json b/parser/testdata/02372_nowInBlock/ast.json new file mode 100644 index 000000000..5401df10b --- /dev/null +++ b/parser/testdata/02372_nowInBlock/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001428245, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02372_nowInBlock/metadata.json b/parser/testdata/02372_nowInBlock/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02372_nowInBlock/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02372_nowInBlock/query.sql b/parser/testdata/02372_nowInBlock/query.sql new file mode 100644 index 000000000..ca085bd91 --- /dev/null +++ b/parser/testdata/02372_nowInBlock/query.sql @@ -0,0 +1,6 @@ +SET max_rows_to_read = 0, max_bytes_to_read = 0; + +SELECT count() FROM (SELECT DISTINCT nowInBlock(), nowInBlock('Pacific/Pitcairn') FROM system.numbers LIMIT 2); +SELECT nowInBlock(1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT nowInBlock(NULL) IS NULL; +SELECT nowInBlock('UTC', 'UTC'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } diff --git a/parser/testdata/02373_analyzer_join_use_nulls/ast.json b/parser/testdata/02373_analyzer_join_use_nulls/ast.json new file mode 100644 index 000000000..72d4c3624 --- /dev/null +++ b/parser/testdata/02373_analyzer_join_use_nulls/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001195604, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02373_analyzer_join_use_nulls/metadata.json b/parser/testdata/02373_analyzer_join_use_nulls/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02373_analyzer_join_use_nulls/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02373_analyzer_join_use_nulls/query.sql b/parser/testdata/02373_analyzer_join_use_nulls/query.sql new file mode 100644 index 000000000..89ea4d5c6 --- /dev/null +++ b/parser/testdata/02373_analyzer_join_use_nulls/query.sql @@ -0,0 +1,73 @@ +SET enable_analyzer = 1; +SET join_use_nulls = 1; + +DROP TABLE IF EXISTS test_table_join_1; +CREATE TABLE test_table_join_1 +( + id UInt64, + value String +) ENGINE = MergeTree ORDER BY tuple(); + +DROP TABLE IF EXISTS test_table_join_2; +CREATE TABLE test_table_join_2 +( + id UInt64, + value String +) ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO test_table_join_1 VALUES (0, 'Join_1_Value_0'); +INSERT INTO test_table_join_1 VALUES (1, 'Join_1_Value_1'); +INSERT INTO test_table_join_1 VALUES (2, 'Join_1_Value_2'); + +INSERT INTO test_table_join_2 VALUES (0, 'Join_2_Value_0'); +INSERT INTO test_table_join_2 VALUES (1, 'Join_2_Value_1'); +INSERT INTO test_table_join_2 VALUES (3, 'Join_2_Value_3'); + +-- { echoOn } + +SELECT t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY ALL; + +SELECT '--'; + +SELECT t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY ALL; + +SELECT '--'; + +SELECT t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY ALL; + +SELECT '--'; + +SELECT t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY ALL; + +SELECT '--'; + +SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), +t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) ORDER BY ALL; + +SELECT '--'; + +SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), +t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) +FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) ORDER BY ALL; + +SELECT '--'; + +SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), +t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) +FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) ORDER BY ALL; + +SELECT '--'; + +SELECT id AS using_id, toTypeName(using_id), t1.id AS t1_id, toTypeName(t1_id), t1.value AS t1_value, toTypeName(t1_value), +t2.id AS t2_id, toTypeName(t2_id), t2.value AS t2_value, toTypeName(t2_value) +FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) ORDER BY ALL; + +-- { echoOff } + +DROP TABLE test_table_join_1; +DROP TABLE test_table_join_2; diff --git a/parser/testdata/02374_analyzer_array_join/ast.json b/parser/testdata/02374_analyzer_array_join/ast.json new file mode 100644 index 000000000..ef2f758c9 --- /dev/null +++ b/parser/testdata/02374_analyzer_array_join/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001290839, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02374_analyzer_array_join/metadata.json b/parser/testdata/02374_analyzer_array_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02374_analyzer_array_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02374_analyzer_array_join/query.sql b/parser/testdata/02374_analyzer_array_join/query.sql new file mode 100644 index 000000000..5a517ed9b --- /dev/null +++ b/parser/testdata/02374_analyzer_array_join/query.sql @@ -0,0 +1,85 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value String, + value_array Array(UInt64), + value_array_array Array(Array(UInt64)) +) ENGINE=MergeTree ORDER BY tuple(); + +INSERT INTO test_table VALUES (0, 'Value', [1, 2, 3], [[1, 2, 3]]), (0, 'Value', [4, 5, 6], [[1, 2, 3], [4, 5, 6]]); + +-- { echoOn } + +SELECT 'ARRAY JOIN with constant'; + +SELECT id, value, value_1 FROM test_table ARRAY JOIN [1, 2, 3] AS value_1; + +SELECT '--'; + +SELECT id, value FROM test_table ARRAY JOIN [1, 2, 3] AS value; + +SELECT '--'; + +WITH [1, 2, 3] AS constant_array SELECT id, value FROM test_table ARRAY JOIN constant_array AS value; + +SELECT '--'; + +WITH [1, 2, 3] AS constant_array SELECT id, value, value_1 FROM test_table ARRAY JOIN constant_array AS value_1; + +SELECT '--'; + +SELECT id, value, value_1, value_2 FROM test_table ARRAY JOIN [[1, 2, 3]] AS value_1 ARRAY JOIN value_1 AS value_2; + +SELECT 1 AS value FROM test_table ARRAY JOIN [1,2,3] AS value; + +SELECT 'ARRAY JOIN with column'; + +SELECT id, value, test_table.value_array FROM test_table ARRAY JOIN value_array; + +SELECT '--'; + +SELECT id, value_array, value FROM test_table ARRAY JOIN value_array AS value; + +SELECT '--'; + +SELECT id, value, value_array, value_array_element FROM test_table ARRAY JOIN value_array AS value_array_element; + +SELECT '--'; + +SELECT id, value, value_array AS value_array_array_alias FROM test_table ARRAY JOIN value_array_array_alias; + +SELECT '--'; + +SELECT id AS value FROM test_table ARRAY JOIN value_array AS value; + +SELECT '--'; + +SELECT id, value, value_array AS value_array_array_alias, value_array_array_alias_element FROM test_table ARRAY JOIN value_array_array_alias AS value_array_array_alias_element; + +SELECT '--'; + +SELECT id, value, value_array_array, value_array_array_inner_element, value_array_array_inner_element, value_array_array_inner_inner_element +FROM test_table ARRAY JOIN value_array_array AS value_array_array_inner_element +ARRAY JOIN value_array_array_inner_element AS value_array_array_inner_inner_element; + +SELECT '--'; +SELECT 1 FROM system.one ARRAY JOIN arrayMap(x -> ignore(*), []); +SELECT arrayFilter(x -> notEmpty(concat(x, 'hello')), ['']) +FROM system.one +ARRAY JOIN + [0] AS elem, + arrayMap(x -> concat(x, ignore(ignore(toLowCardinality('03147_parquet_memory_tracking.parquet'), 37, 37, toUInt128(37), 37, 37, toLowCardinality(37), 37), 8, ignore(ignore(1., 36, 8, 8)), *), 'hello'), ['']) AS unused +WHERE NOT ignore(elem) +GROUP BY + sum(ignore(ignore(ignore(1., 1, 36, 8, 8), ignore(52, 37, 37, '03147_parquet_memory_tracking.parquet', 37, 37, toUInt256(37), 37, 37, toNullable(37), 37, 37), 1., 1, 36, 8, 8), emptyArrayToSingle(arrayMap(x -> toString(x), arrayMap(x -> nullIf(x, 2), arrayJoin([[1]])))))) IGNORE NULLS, + modulo(toLowCardinality('03147_parquet_memory_tracking.parquet'), number, toLowCardinality(3)); -- { serverError UNKNOWN_IDENTIFIER } + +-- { echoOff } + +DROP TABLE test_table; + +select [1, 2] as arr, x from system.one array join arr as x; +select x + 1 as x from (select [number] as arr from numbers(2)) as s array join arr as x; diff --git a/parser/testdata/02374_combine_multi_if_and_count_if_opt/ast.json b/parser/testdata/02374_combine_multi_if_and_count_if_opt/ast.json new file mode 100644 index 000000000..9c3d41c42 --- /dev/null +++ b/parser/testdata/02374_combine_multi_if_and_count_if_opt/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery m (children 1)" + }, + { + "explain": " Identifier m" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001448178, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/02374_combine_multi_if_and_count_if_opt/metadata.json b/parser/testdata/02374_combine_multi_if_and_count_if_opt/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02374_combine_multi_if_and_count_if_opt/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02374_combine_multi_if_and_count_if_opt/query.sql b/parser/testdata/02374_combine_multi_if_and_count_if_opt/query.sql new file mode 100644 index 000000000..05472e5e3 --- /dev/null +++ b/parser/testdata/02374_combine_multi_if_and_count_if_opt/query.sql @@ -0,0 +1,11 @@ +drop table if exists m; + +create table m (a int) engine Log; + +insert into m values (1); + +set enable_analyzer = true, optimize_rewrite_sum_if_to_count_if=1; + +EXPLAIN QUERY TREE select sum(multiIf(a = 1, 1, 0)) from m; + +drop table m; diff --git a/parser/testdata/02374_in_tuple_index/ast.json b/parser/testdata/02374_in_tuple_index/ast.json new file mode 100644 index 000000000..c33cba21b --- /dev/null +++ b/parser/testdata/02374_in_tuple_index/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_in_tuple_index (children 1)" + }, + { + "explain": " Identifier t_in_tuple_index" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001290789, + "rows_read": 2, + "bytes_read": 84 + } +} diff --git a/parser/testdata/02374_in_tuple_index/metadata.json b/parser/testdata/02374_in_tuple_index/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02374_in_tuple_index/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02374_in_tuple_index/query.sql b/parser/testdata/02374_in_tuple_index/query.sql new file mode 100644 index 000000000..4f489f74e --- /dev/null +++ b/parser/testdata/02374_in_tuple_index/query.sql @@ -0,0 +1,23 @@ +DROP TABLE IF EXISTS t_in_tuple_index; + +CREATE TABLE t_in_tuple_index +( + `ID` String, + `USER_ID` String, + `PLATFORM` LowCardinality(String) +) +ENGINE = MergeTree() +ORDER BY (PLATFORM, USER_ID, ID) +SETTINGS index_granularity = 2048, index_granularity_bytes = '10Mi'; + +INSERT INTO t_in_tuple_index VALUES ('1', 33, 'insta'), ('2', 33, 'insta'); + +SELECT count() +FROM t_in_tuple_index +WHERE (PLATFORM, USER_ID) IN (('insta', '33')); + +SELECT count() +FROM t_in_tuple_index +WHERE (PLATFORM, USER_ID) IN (('insta', '33'), ('insta', '22')); + +DROP TABLE IF EXISTS t_in_tuple_index; diff --git a/parser/testdata/02374_regexp_replace/ast.json b/parser/testdata/02374_regexp_replace/ast.json new file mode 100644 index 000000000..590260e3a --- /dev/null +++ b/parser/testdata/02374_regexp_replace/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'https:\/\/www.clickhouse.com\/' (alias s)" + }, + { + "explain": " Function REGEXP_REPLACE (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Identifier s" + }, + { + "explain": " Literal '^https?:\/\/(?:www\\\\.)?([^\/]+)\/.*$'" + }, + { + "explain": " Literal '\\\\1'" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001563168, + "rows_read": 10, + "bytes_read": 405 + } +} diff --git a/parser/testdata/02374_regexp_replace/metadata.json b/parser/testdata/02374_regexp_replace/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02374_regexp_replace/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02374_regexp_replace/query.sql b/parser/testdata/02374_regexp_replace/query.sql new file mode 100644 index 000000000..326adb7e6 --- /dev/null +++ b/parser/testdata/02374_regexp_replace/query.sql @@ -0,0 +1 @@ +SELECT 'https://www.clickhouse.com/' AS s, REGEXP_REPLACE(s, '^https?://(?:www\.)?([^/]+)/.*$', '\1'); diff --git a/parser/testdata/02375_analyzer_union/ast.json b/parser/testdata/02375_analyzer_union/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02375_analyzer_union/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02375_analyzer_union/metadata.json b/parser/testdata/02375_analyzer_union/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02375_analyzer_union/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02375_analyzer_union/query.sql b/parser/testdata/02375_analyzer_union/query.sql new file mode 100644 index 000000000..d2aac49da --- /dev/null +++ b/parser/testdata/02375_analyzer_union/query.sql @@ -0,0 +1,71 @@ +SET enable_analyzer = 0; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value String +) ENGINE=MergeTree ORDER BY tuple(); + +INSERT INTO test_table VALUES (0, 'Value'); + +-- { echoOn } + +SELECT 'Union constants'; + +SELECT 1 UNION ALL SELECT 1; + +SELECT '--'; + +SELECT 1 UNION DISTINCT SELECT 1 UNION ALL SELECT 1; + +SELECT '--'; + +SELECT 1 INTERSECT SELECT 1; + +SELECT '--'; + +SELECT 1 EXCEPT SELECT 1; + +SELECT '--'; + +SELECT id FROM (SELECT 1 AS id UNION ALL SELECT 1); + +SELECT 'Union non constants'; + +SELECT value FROM (SELECT 1 as value UNION ALL SELECT 1 UNION ALL SELECT 1); + +SELECT '--'; + +SELECT id FROM test_table UNION ALL SELECT id FROM test_table; + +SELECT '--'; + +SELECT id FROM test_table UNION DISTINCT SELECT id FROM test_table; + +SELECT '--'; + +SELECT id FROM test_table INTERSECT SELECT id FROM test_table; + +SELECT '--'; +SELECT id FROM test_table EXCEPT SELECT id FROM test_table; + +SELECT '--'; + +SELECT id FROM (SELECT id FROM test_table UNION ALL SELECT id FROM test_table); + +SELECT '--'; + +SELECT id FROM (SELECT id FROM test_table UNION DISTINCT SELECT id FROM test_table); + +SELECT '--'; + +SELECT id FROM (SELECT id FROM test_table INTERSECT SELECT id FROM test_table); + +SELECT '--'; + +SELECT id FROM (SELECT id FROM test_table EXCEPT SELECT id FROM test_table); + +-- { echoOff } + +DROP TABLE test_table; diff --git a/parser/testdata/02375_double_escaping_json/ast.json b/parser/testdata/02375_double_escaping_json/ast.json new file mode 100644 index 000000000..3490aaf50 --- /dev/null +++ b/parser/testdata/02375_double_escaping_json/ast.json @@ -0,0 +1,36 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '\\\\' (alias \")" + }, + { + "explain": " Identifier JSON" + }, + { + "explain": " Set" + } + ], + + "rows": 7 +} diff --git a/parser/testdata/02375_double_escaping_json/metadata.json b/parser/testdata/02375_double_escaping_json/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02375_double_escaping_json/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02375_double_escaping_json/query.sql b/parser/testdata/02375_double_escaping_json/query.sql new file mode 100644 index 000000000..ecfb24fca --- /dev/null +++ b/parser/testdata/02375_double_escaping_json/query.sql @@ -0,0 +1 @@ +SELECT '\\' AS `"` FORMAT JSON SETTINGS output_format_write_statistics = 0; diff --git a/parser/testdata/02375_scalar_lc_cte/ast.json b/parser/testdata/02375_scalar_lc_cte/ast.json new file mode 100644 index 000000000..d6c4737c4 --- /dev/null +++ b/parser/testdata/02375_scalar_lc_cte/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Subquery (alias bar) (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toLowCardinality (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'a'" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier bar" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001602829, + "rows_read": 14, + "bytes_read": 557 + } +} diff --git a/parser/testdata/02375_scalar_lc_cte/metadata.json b/parser/testdata/02375_scalar_lc_cte/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02375_scalar_lc_cte/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02375_scalar_lc_cte/query.sql b/parser/testdata/02375_scalar_lc_cte/query.sql new file mode 100644 index 000000000..800183338 --- /dev/null +++ b/parser/testdata/02375_scalar_lc_cte/query.sql @@ -0,0 +1 @@ +WITH ( SELECT toLowCardinality('a') ) AS bar SELECT bar \ No newline at end of file diff --git a/parser/testdata/02376_analyzer_in_function_subquery/ast.json b/parser/testdata/02376_analyzer_in_function_subquery/ast.json new file mode 100644 index 000000000..4324f1ea9 --- /dev/null +++ b/parser/testdata/02376_analyzer_in_function_subquery/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001365744, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02376_analyzer_in_function_subquery/metadata.json b/parser/testdata/02376_analyzer_in_function_subquery/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02376_analyzer_in_function_subquery/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02376_analyzer_in_function_subquery/query.sql b/parser/testdata/02376_analyzer_in_function_subquery/query.sql new file mode 100644 index 000000000..05062301b --- /dev/null +++ b/parser/testdata/02376_analyzer_in_function_subquery/query.sql @@ -0,0 +1,60 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value String +) ENGINE=MergeTree ORDER BY tuple(); + +INSERT INTO test_table VALUES (0, 'Value_0'), (1, 'Value_1'), (2, 'Value_2'); + +DROP TABLE IF EXISTS test_table_for_in; +CREATE TABLE test_table_for_in +( + id UInt64 +) ENGINE=MergeTree ORDER BY tuple(); + +INSERT INTO test_table_for_in VALUES (0), (1); + +-- { echoOn } + +SELECT id, value FROM test_table WHERE 1 IN (SELECT 1); + +SELECT '--'; + +SELECT id, value FROM test_table WHERE 0 IN (SELECT 1); + +SELECT '--'; + +SELECT id, value FROM test_table WHERE id IN (SELECT 1); + +SELECT '--'; + +SELECT id, value FROM test_table WHERE id IN (SELECT 2); + +SELECT '--'; + +SELECT id, value FROM test_table WHERE id IN test_table_for_in; + +SELECT '--'; + +SELECT id, value FROM test_table WHERE id IN (SELECT id FROM test_table_for_in); + +SELECT '--'; + +SELECT id, value FROM test_table WHERE id IN (SELECT id FROM test_table_for_in UNION DISTINCT SELECT id FROM test_table_for_in); + +SELECT '--'; + +WITH cte_test_table_for_in AS (SELECT id FROM test_table_for_in) SELECT id, value FROM test_table WHERE id IN cte_test_table_for_in; + +SELECT '--'; + +WITH cte_test_table_for_in AS (SELECT id FROM test_table_for_in) SELECT id, value +FROM test_table WHERE id IN (SELECT id FROM cte_test_table_for_in UNION DISTINCT SELECT id FROM cte_test_table_for_in); + +-- { echoOff } + +DROP TABLE test_table; +DROP TABLE test_table_for_in; diff --git a/parser/testdata/02376_arrow_dict_with_string/ast.json b/parser/testdata/02376_arrow_dict_with_string/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02376_arrow_dict_with_string/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02376_arrow_dict_with_string/metadata.json b/parser/testdata/02376_arrow_dict_with_string/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02376_arrow_dict_with_string/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02376_arrow_dict_with_string/query.sql b/parser/testdata/02376_arrow_dict_with_string/query.sql new file mode 100644 index 000000000..307498f7b --- /dev/null +++ b/parser/testdata/02376_arrow_dict_with_string/query.sql @@ -0,0 +1,4 @@ +-- Tags: no-parallel, no-fasttest +insert into function file(02376_data.arrow) select toLowCardinality(toString(number)) as x from numbers(10) settings output_format_arrow_string_as_string=1, output_format_arrow_low_cardinality_as_dictionary=1, engine_file_truncate_on_insert=1; +desc file (02376_data.arrow); +select * from file(02376_data.arrow); diff --git a/parser/testdata/02377_analyzer_in_function_set/ast.json b/parser/testdata/02377_analyzer_in_function_set/ast.json new file mode 100644 index 000000000..a34fe7c46 --- /dev/null +++ b/parser/testdata/02377_analyzer_in_function_set/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001368396, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02377_analyzer_in_function_set/metadata.json b/parser/testdata/02377_analyzer_in_function_set/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02377_analyzer_in_function_set/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02377_analyzer_in_function_set/query.sql b/parser/testdata/02377_analyzer_in_function_set/query.sql new file mode 100644 index 000000000..00aa40ccf --- /dev/null +++ b/parser/testdata/02377_analyzer_in_function_set/query.sql @@ -0,0 +1,23 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value String +) ENGINE=TinyLog; + +INSERT INTO test_table VALUES (0, 'Value_0'), (1, 'Value_1'), (2, 'Value_2'); + +DROP TABLE IF EXISTS special_set_table; +CREATE TABLE special_set_table +( + id UInt64 +) ENGINE=Set; + +INSERT INTO special_set_table VALUES (0), (1); + +SELECT id, value FROM test_table WHERE id IN special_set_table; + +DROP TABLE special_set_table; +DROP TABLE test_table; diff --git a/parser/testdata/02377_executable_function_settings/ast.json b/parser/testdata/02377_executable_function_settings/ast.json new file mode 100644 index 000000000..0e4347464 --- /dev/null +++ b/parser/testdata/02377_executable_function_settings/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Explain EXPLAIN SYNTAX (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function executable (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal ''" + }, + { + "explain": " Literal 'JSON'" + }, + { + "explain": " Literal 'data String'" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001290118, + "rows_read": 14, + "bytes_read": 541 + } +} diff --git a/parser/testdata/02377_executable_function_settings/metadata.json b/parser/testdata/02377_executable_function_settings/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02377_executable_function_settings/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02377_executable_function_settings/query.sql b/parser/testdata/02377_executable_function_settings/query.sql new file mode 100644 index 000000000..ae0dc49c2 --- /dev/null +++ b/parser/testdata/02377_executable_function_settings/query.sql @@ -0,0 +1,9 @@ +EXPLAIN SYNTAX SELECT * from executable('', 'JSON', 'data String'); +SELECT '--------------------'; +EXPLAIN SYNTAX SELECT * from executable('', 'JSON', 'data String', SETTINGS max_command_execution_time=100); +SELECT '--------------------'; +EXPLAIN SYNTAX SELECT * from executable('', 'JSON', 'data String', SETTINGS max_command_execution_time=100, command_read_timeout=1); +SELECT '--------------------'; + +SELECT * from executable('JSON', 'data String', SETTINGS max_command_execution_time=100); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT * from executable('JSON', 'data String', 'TEST', 'TEST'); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/02377_fix_file_virtual_column/ast.json b/parser/testdata/02377_fix_file_virtual_column/ast.json new file mode 100644 index 000000000..ccea90ebd --- /dev/null +++ b/parser/testdata/02377_fix_file_virtual_column/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_02377 (children 1)" + }, + { + "explain": " Identifier test_02377" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001217441, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/02377_fix_file_virtual_column/metadata.json b/parser/testdata/02377_fix_file_virtual_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02377_fix_file_virtual_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02377_fix_file_virtual_column/query.sql b/parser/testdata/02377_fix_file_virtual_column/query.sql new file mode 100644 index 000000000..5d79e7f12 --- /dev/null +++ b/parser/testdata/02377_fix_file_virtual_column/query.sql @@ -0,0 +1,7 @@ +drop table if exists test_02377; +create table test_02377 (n UInt32, s String) engine=File(CSVWithNames); +insert into test_02377 values(1, 's') (2, 'x') (3, 'y'); +select * from test_02377 order by n; +select *, _path, _file from test_02377 format Null; +select _path, _file from test_02377 format Null; +drop table test_02377; diff --git a/parser/testdata/02377_majority_insert_quorum_zookeeper_long/ast.json b/parser/testdata/02377_majority_insert_quorum_zookeeper_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02377_majority_insert_quorum_zookeeper_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02377_majority_insert_quorum_zookeeper_long/metadata.json b/parser/testdata/02377_majority_insert_quorum_zookeeper_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02377_majority_insert_quorum_zookeeper_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02377_majority_insert_quorum_zookeeper_long/query.sql b/parser/testdata/02377_majority_insert_quorum_zookeeper_long/query.sql new file mode 100644 index 000000000..796b33ff4 --- /dev/null +++ b/parser/testdata/02377_majority_insert_quorum_zookeeper_long/query.sql @@ -0,0 +1,75 @@ +-- Tags: long, zookeeper, no-replicated-database, no-shared-merge-tree + +-- no-replicated-database: +-- The number of replicas is doubled, so `SYSTEM STOP FETCHES` stop not enough replicas. +-- no-shared-merge-tree: no quorum inserts + +SET insert_quorum_parallel = false; + +SET select_sequential_consistency = 1; + +DROP TABLE IF EXISTS quorum1; +DROP TABLE IF EXISTS quorum2; +DROP TABLE IF EXISTS quorum3; + +CREATE TABLE quorum1(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_02377/quorum', '1') ORDER BY x PARTITION BY y; +CREATE TABLE quorum2(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_02377/quorum', '2') ORDER BY x PARTITION BY y; + +-- insert_quorum = n/2 + 1 , so insert will be written to both replica +SET insert_quorum = 'auto'; +SET insert_keeper_fault_injection_probability=0; + +INSERT INTO quorum1 VALUES (1, '2018-11-15'); +INSERT INTO quorum1 VALUES (2, '2018-11-15'); +INSERT INTO quorum1 VALUES (3, '2018-12-16'); + +SELECT x FROM quorum1 ORDER BY x; +SELECT x FROM quorum2 ORDER BY x; + +DROP TABLE quorum1; +DROP TABLE quorum2; + +-- Create 3 replicas and stop sync 2 replicas +CREATE TABLE quorum1(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_02377/quorum1', '1') ORDER BY x PARTITION BY y; +CREATE TABLE quorum2(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_02377/quorum1', '2') ORDER BY x PARTITION BY y; +CREATE TABLE quorum3(x UInt32, y Date) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_02377/quorum1', '3') ORDER BY x PARTITION BY y; + +-- Insert should be successful +-- stop replica 3 +SYSTEM STOP FETCHES quorum3; +INSERT INTO quorum1 VALUES (1, '2018-11-15'); +SELECT x FROM quorum1 ORDER BY x; +SELECT x FROM quorum2 ORDER BY x; +SELECT x FROM quorum3 ORDER BY x; -- {serverError REPLICA_IS_NOT_IN_QUORUM} + +-- Sync replica 3 +SYSTEM START FETCHES quorum3; +SYSTEM SYNC REPLICA quorum3; +SELECT x FROM quorum3 ORDER BY x; + +-- Stop 2 replicas , so insert wont be successful +SYSTEM STOP FETCHES quorum2; +SYSTEM STOP FETCHES quorum3; +SET insert_quorum_timeout = 5000; +INSERT INTO quorum1 VALUES (2, '2018-11-15'); -- { serverError UNKNOWN_STATUS_OF_INSERT } +SELECT x FROM quorum1 ORDER BY x; +SELECT x FROM quorum2 ORDER BY x; +SELECT x FROM quorum3 ORDER BY x; + +-- Sync replica 2 and 3 +SYSTEM START FETCHES quorum2; +SYSTEM SYNC REPLICA quorum2; +SYSTEM START FETCHES quorum3; +SYSTEM SYNC REPLICA quorum3; + +SET insert_quorum_timeout = 600000; -- set default value back +INSERT INTO quorum1 VALUES (3, '2018-11-15'); +SELECT x FROM quorum1 ORDER BY x; +SYSTEM SYNC REPLICA quorum2; +SYSTEM SYNC REPLICA quorum3; +SELECT x FROM quorum2 ORDER BY x; +SELECT x FROM quorum3 ORDER BY x; + +DROP TABLE quorum1; +DROP TABLE quorum2; +DROP TABLE quorum3; diff --git a/parser/testdata/02377_modify_column_from_lc/ast.json b/parser/testdata/02377_modify_column_from_lc/ast.json new file mode 100644 index 000000000..82846aeb8 --- /dev/null +++ b/parser/testdata/02377_modify_column_from_lc/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_modify_from_lc_1 (children 1)" + }, + { + "explain": " Identifier t_modify_from_lc_1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001459168, + "rows_read": 2, + "bytes_read": 88 + } +} diff --git a/parser/testdata/02377_modify_column_from_lc/metadata.json b/parser/testdata/02377_modify_column_from_lc/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02377_modify_column_from_lc/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02377_modify_column_from_lc/query.sql b/parser/testdata/02377_modify_column_from_lc/query.sql new file mode 100644 index 000000000..efee323e8 --- /dev/null +++ b/parser/testdata/02377_modify_column_from_lc/query.sql @@ -0,0 +1,43 @@ +DROP TABLE IF EXISTS t_modify_from_lc_1; +DROP TABLE IF EXISTS t_modify_from_lc_2; + +SET allow_suspicious_low_cardinality_types = 1; + +CREATE TABLE t_modify_from_lc_1 +( + id UInt64, + a LowCardinality(UInt32) CODEC(NONE) +) +ENGINE = MergeTree ORDER BY tuple() +SETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192, index_granularity_bytes = '10Mi'; + +CREATE TABLE t_modify_from_lc_2 +( + id UInt64, + a LowCardinality(UInt32) CODEC(NONE) +) +ENGINE = MergeTree ORDER BY tuple() +SETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192, index_granularity_bytes = '10Mi'; + +INSERT INTO t_modify_from_lc_1 SELECT number, number FROM numbers(100000); +INSERT INTO t_modify_from_lc_2 SELECT number, number FROM numbers(100000); + +OPTIMIZE TABLE t_modify_from_lc_1 FINAL; +OPTIMIZE TABLE t_modify_from_lc_2 FINAL; + +ALTER TABLE t_modify_from_lc_1 MODIFY COLUMN a UInt32; + +-- Check that dictionary of LowCardinality is actually +-- dropped and total size on disk is reduced. +WITH groupArray((table, bytes))::Map(String, UInt64) AS stats +SELECT + length(stats), stats['t_modify_from_lc_1'] < stats['t_modify_from_lc_2'] +FROM +( + SELECT table, sum(bytes_on_disk) AS bytes FROM system.parts + WHERE database = currentDatabase() AND table LIKE 't_modify_from_lc%' AND active + GROUP BY table +); + +DROP TABLE IF EXISTS t_modify_from_lc_1; +DROP TABLE IF EXISTS t_modify_from_lc_2; diff --git a/parser/testdata/02377_modify_column_from_nested/ast.json b/parser/testdata/02377_modify_column_from_nested/ast.json new file mode 100644 index 000000000..135694c32 --- /dev/null +++ b/parser/testdata/02377_modify_column_from_nested/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_nested_modify (children 1)" + }, + { + "explain": " Identifier t_nested_modify" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001145796, + "rows_read": 2, + "bytes_read": 82 + } +} diff --git a/parser/testdata/02377_modify_column_from_nested/metadata.json b/parser/testdata/02377_modify_column_from_nested/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02377_modify_column_from_nested/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02377_modify_column_from_nested/query.sql b/parser/testdata/02377_modify_column_from_nested/query.sql new file mode 100644 index 000000000..8270cce62 --- /dev/null +++ b/parser/testdata/02377_modify_column_from_nested/query.sql @@ -0,0 +1,21 @@ +DROP TABLE IF EXISTS t_nested_modify; + +CREATE TABLE t_nested_modify (id UInt64, `n.a` Array(UInt32), `n.b` Array(String)) +ENGINE = MergeTree ORDER BY id +SETTINGS min_bytes_for_wide_part = 0; + +INSERT INTO t_nested_modify VALUES (1, [2], ['aa']); +INSERT INTO t_nested_modify VALUES (2, [44, 55], ['bb', 'cc']); + +SELECT id, `n.a`, `n.b`, toTypeName(`n.b`) FROM t_nested_modify ORDER BY id; + +ALTER TABLE t_nested_modify MODIFY COLUMN `n.b` String; + +SELECT id, `n.a`, `n.b`, toTypeName(`n.b`) FROM t_nested_modify ORDER BY id; + +DETACH TABLE t_nested_modify; +ATTACH TABLE t_nested_modify; + +SELECT id, `n.a`, `n.b`, toTypeName(`n.b`) FROM t_nested_modify ORDER BY id; + +DROP TABLE t_nested_modify; diff --git a/parser/testdata/02377_optimize_sorting_by_input_stream_properties/ast.json b/parser/testdata/02377_optimize_sorting_by_input_stream_properties/ast.json new file mode 100644 index 000000000..48213ee92 --- /dev/null +++ b/parser/testdata/02377_optimize_sorting_by_input_stream_properties/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000887633, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02377_optimize_sorting_by_input_stream_properties/metadata.json b/parser/testdata/02377_optimize_sorting_by_input_stream_properties/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02377_optimize_sorting_by_input_stream_properties/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02377_optimize_sorting_by_input_stream_properties/query.sql b/parser/testdata/02377_optimize_sorting_by_input_stream_properties/query.sql new file mode 100644 index 000000000..d33761881 --- /dev/null +++ b/parser/testdata/02377_optimize_sorting_by_input_stream_properties/query.sql @@ -0,0 +1,40 @@ +set optimize_sorting_by_input_stream_properties=1; + +DROP TABLE IF EXISTS optimize_sorting; +CREATE TABLE optimize_sorting (a UInt64, b UInt64) ENGINE MergeTree() ORDER BY tuple(); +INSERT INTO optimize_sorting VALUES(0, 0); +INSERT INTO optimize_sorting VALUES(0xFFFFffffFFFFffff, 0xFFFFffffFFFFffff); +-- { echoOn } +-- order by for MergeTree w/o sorting key +SELECT a, b from optimize_sorting order by a, b; +-- { echoOff } + +DROP TABLE IF EXISTS optimize_sorting; +CREATE TABLE optimize_sorting (a UInt64, b UInt64, c UInt64) ENGINE MergeTree() ORDER BY (a, b); +INSERT INTO optimize_sorting SELECT number, number%5, number%2 from numbers(0, 5); +INSERT INTO optimize_sorting SELECT number, number%5, number%2 from numbers(5, 5); + +-- { echoOn } +SELECT a from optimize_sorting order by a; +SELECT c from optimize_sorting order by c; +-- queries with unary function in order by +SELECT a from optimize_sorting order by -a; +SELECT a from optimize_sorting order by toFloat64(a); +-- queries with non-unary function in order by +SELECT a, a+1 from optimize_sorting order by a+1; +SELECT a, a-1 from optimize_sorting order by a-1; +SELECT a, sipHash64(a,'a') from optimize_sorting order by sipHash64(a,'a'); +-- queries with aliases +SELECT a as a from optimize_sorting order by a; +SELECT a+1 as a from optimize_sorting order by a; +SELECT toFloat64(a) as a from optimize_sorting order by a; +SELECT sipHash64(a) as a from optimize_sorting order by a; +-- queries with filter +SELECT a FROM optimize_sorting WHERE a > 0 ORDER BY a; +SELECT a > 0 FROM optimize_sorting WHERE a > 0; +SELECT a FROM (SELECT a FROM optimize_sorting) WHERE a != 0 ORDER BY a; +SELECT a FROM (SELECT sipHash64(a) AS a FROM optimize_sorting) WHERE a != 0 ORDER BY a; +-- queries with non-trivial action's chain in expression +SELECT a, z FROM (SELECT sipHash64(a) AS a, a + 1 AS z FROM (SELECT a FROM optimize_sorting ORDER BY a + 1)) ORDER BY a + 1; +-- { echoOff } +DROP TABLE IF EXISTS optimize_sorting; diff --git a/parser/testdata/02377_optimize_sorting_by_input_stream_properties_2/ast.json b/parser/testdata/02377_optimize_sorting_by_input_stream_properties_2/ast.json new file mode 100644 index 000000000..6e5b8fb7c --- /dev/null +++ b/parser/testdata/02377_optimize_sorting_by_input_stream_properties_2/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tab (children 1)" + }, + { + "explain": " Identifier tab" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001096357, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/02377_optimize_sorting_by_input_stream_properties_2/metadata.json b/parser/testdata/02377_optimize_sorting_by_input_stream_properties_2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02377_optimize_sorting_by_input_stream_properties_2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02377_optimize_sorting_by_input_stream_properties_2/query.sql b/parser/testdata/02377_optimize_sorting_by_input_stream_properties_2/query.sql new file mode 100644 index 000000000..e390686b3 --- /dev/null +++ b/parser/testdata/02377_optimize_sorting_by_input_stream_properties_2/query.sql @@ -0,0 +1,21 @@ +drop table if exists tab; +create table tab (x UInt32, y UInt32) engine = MergeTree order by x; + +insert into tab select number, number from numbers(10); +insert into tab select number + 10, number + 10 from numbers(10); + +set optimize_sorting_by_input_stream_properties=1; +set optimize_aggregation_in_order=1; +set enable_memory_bound_merging_of_aggregation_results=1; +set prefer_localhost_replica=1; + +-- Nothing is working here :( + +select sum(y) as s from remote('127.0.0.{1,2}', currentDatabase(), tab) group by x order by x; +select replaceAll(trimLeft(explain), '__table1.', '') from (explain actions = 1, sorting=1, description=0 select sum(y) as s from remote('127.0.0.{1,2}', currentDatabase(), tab) group by x order by x) where explain ilike '%sort%' or explain like '%ReadFromMergeTree%' or explain like '%Aggregat%'; + +select sum(y) as s from remote('127.0.0.{1,2}', currentDatabase(), tab) group by x order by x desc; +select replaceAll(trimLeft(explain), '__table1.', '') from (explain actions = 1, sorting=1, description=0 select sum(y) as s from remote('127.0.0.{1,2}', currentDatabase(), tab) group by x order by x desc ) where explain ilike '%sort%' or explain like '%ReadFromMergeTree%' or explain like '%Aggregat%'; + +select sum(y) as s from remote('127.0.0.{1,2}', currentDatabase(), tab) group by x order by x, s; +select replaceAll(trimLeft(explain), '__table1.', '') from (explain actions = 1, sorting=1, description=0 select sum(y) as s from remote('127.0.0.{1,2}', currentDatabase(), tab) group by x order by x, s) where explain ilike '%sort%' or explain like '%ReadFromMergeTree%' or explain like '%Aggregat%'; diff --git a/parser/testdata/02378_analyzer_projection_names/ast.json b/parser/testdata/02378_analyzer_projection_names/ast.json new file mode 100644 index 000000000..5a901b57e --- /dev/null +++ b/parser/testdata/02378_analyzer_projection_names/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001457429, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02378_analyzer_projection_names/metadata.json b/parser/testdata/02378_analyzer_projection_names/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02378_analyzer_projection_names/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02378_analyzer_projection_names/query.sql b/parser/testdata/02378_analyzer_projection_names/query.sql new file mode 100644 index 000000000..39670f1db --- /dev/null +++ b/parser/testdata/02378_analyzer_projection_names/query.sql @@ -0,0 +1,559 @@ +SET enable_analyzer = 1; +SET single_join_prefer_left_table = 0; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value String +) ENGINE=TinyLog; + +INSERT INTO test_table VALUES (0, 'Value'); + +DROP TABLE IF EXISTS test_table_in; +CREATE TABLE test_table_in +( + id UInt64 +) ENGINE=TinyLog; + +DROP TABLE IF EXISTS test_table_compound; +CREATE TABLE test_table_compound +( + id UInt64, + tuple_value Tuple(value_1 UInt64, value_2 String) +) ENGINE=TinyLog; + +INSERT INTO test_table_compound VALUES (0, tuple(0, 'Value')); + +DROP TABLE IF EXISTS test_table_join_1; +CREATE TABLE test_table_join_1 +( + id UInt64, + value String, + value_join_1 String +) ENGINE=TinyLog; + +INSERT INTO test_table_join_1 VALUES (0, 'Join_1_Value', 'Join_1_Value'); + +DROP TABLE IF EXISTS test_table_join_2; +CREATE TABLE test_table_join_2 +( + id UInt64, + value String, + value_join_2 String +) ENGINE=TinyLog; + +INSERT INTO test_table_join_2 VALUES (0, 'Join_2_Value', 'Join_2_Value'); + +DROP TABLE IF EXISTS test_table_join_3; +CREATE TABLE test_table_join_3 +( + id UInt64, + value String, + value_join_3 String +) ENGINE=TinyLog; + +INSERT INTO test_table_join_3 VALUES (0, 'Join_3_Value', 'Join_3_Value'); + +-- { echoOn } + +SELECT 'Constants'; + +DESCRIBE (SELECT 1, 'Value'); + +SELECT '--'; + +DESCRIBE (SELECT 1 + 1, concat('Value_1', 'Value_2')); + +SELECT '--'; + +DESCRIBE (SELECT cast(tuple(1, 'Value'), 'Tuple (id UInt64, value String)')); + +SELECT 'Columns'; + +DESCRIBE (SELECT test_table.id, test_table.id, id FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT * FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT * APPLY toString FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT * APPLY x -> toString(x) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT tuple_value.* FROM test_table_compound); + +SELECT '--'; + +DESCRIBE (SELECT tuple_value.* APPLY x -> x FROM test_table_compound); + +SELECT '--'; + +DESCRIBE (SELECT tuple_value.* APPLY toString FROM test_table_compound); + +SELECT '--'; + +DESCRIBE (SELECT tuple_value.* APPLY x -> toString(x) FROM test_table_compound); + +SELECT 'Constants with aliases'; + +DESCRIBE (SELECT 1 AS a, a AS b, b, b AS c, c, 'Value' AS d, d AS e, e AS f); + +SELECT '--'; + +DESCRIBE (SELECT plus(1 AS a, a AS b), plus(b, b), plus(b, b) AS c, concat('Value' AS d, d) AS e, e); + +SELECT '--'; + +DESCRIBE (SELECT cast(tuple(1, 'Value'), 'Tuple (id UInt64, value String)') AS a, a.id, a.value); + +SELECT '--'; + +DESCRIBE (SELECT cast(tuple(1, 'Value'), 'Tuple (id UInt64, value String)') AS a, a.*); + +SELECT '--'; + +DESCRIBE (SELECT cast(tuple(1, 'Value'), 'Tuple (id UInt64, value String)') AS a, a.* EXCEPT id); + +SELECT '--'; + +DESCRIBE (SELECT cast(tuple(1, 'Value'), 'Tuple (id UInt64, value String)') AS a, a.* EXCEPT value); + +SELECT '--'; + +DESCRIBE (SELECT cast(tuple(1, 'Value'), 'Tuple (id UInt64, value String)') AS a, a.* EXCEPT value APPLY toString); + +SELECT '--'; + +DESCRIBE (SELECT cast(tuple(1, 'Value'), 'Tuple (id UInt64, value String)') AS a, a.* EXCEPT value APPLY x -> toString(x)); + +SELECT '--'; + +DESCRIBE (SELECT cast(tuple(1, 'Value'), 'Tuple (id UInt64, value String)') AS a, untuple(a)); + +SELECT '--'; + +DESCRIBE (SELECT cast(tuple(1, 'Value'), 'Tuple (id UInt64, value String)') AS a, untuple(a) AS b); + +SELECT 'Columns with aliases'; + +DESCRIBE (SELECT test_table.id AS a, a, test_table.id AS b, b AS c, c FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT plus(test_table.id AS a, test_table.id), plus(id, id AS b), plus(b, b), plus(test_table.id, test_table.id) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT test_table.* REPLACE id + (id AS id_alias) AS id, id_alias FROM test_table); + +SELECT 'Matcher'; + +DESCRIBE (SELECT * FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT test_table.* FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT 1 AS id, 2 AS value, * FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT 1 AS id, 2 AS value, * FROM test_table AS t1); + +SELECT 'Lambda'; + +DESCRIBE (SELECT arrayMap(x -> x + 1, [1,2,3])); + +SELECT '--'; + +DESCRIBE (SELECT 1 AS a, arrayMap(x -> x + a, [1,2,3])); + +SELECT '--'; + +DESCRIBE (SELECT arrayMap(x -> x + test_table.id + test_table.id + id, [1,2,3]) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT arrayMap(x -> x + (test_table.id AS first) + (test_table.id AS second) + id, [1,2,3]) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT arrayMap(x -> test_table.* EXCEPT value, [1,2,3]) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT arrayMap(x -> tt.* EXCEPT value, [1,2,3]) FROM test_table as tt); + +SELECT '--'; + +DESCRIBE (SELECT arrayMap(x -> test_table.* EXCEPT value APPLY x -> x, [1,2,3]) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT arrayMap(x -> test_table.* EXCEPT value APPLY toString, [1,2,3]) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT arrayMap(x -> test_table.* EXCEPT value APPLY x -> toString(x), [1,2,3]) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT cast(tuple(1), 'Tuple (id UInt64)') AS compound_value, arrayMap(x -> compound_value.*, [1,2,3])); + +SELECT '--'; + +DESCRIBE (SELECT cast(tuple(1), 'Tuple (id UInt64)') AS compound_value, arrayMap(x -> compound_value.* APPLY x -> x, [1,2,3])); + +SELECT '--'; + +DESCRIBE (SELECT cast(tuple(1), 'Tuple (id UInt64)') AS compound_value, arrayMap(x -> compound_value.* APPLY toString, [1,2,3])); + +SELECT '--'; + +DESCRIBE (SELECT cast(tuple(1), 'Tuple (id UInt64)') AS compound_value, arrayMap(x -> compound_value.* APPLY x -> toString(x), [1,2,3])); + +SELECT '--'; + +DESCRIBE (SELECT cast(tuple(1, 'Value'), 'Tuple (id UInt64, value String)') AS compound_value, arrayMap(x -> compound_value.* EXCEPT value, [1,2,3])); + +SELECT '--'; + +DESCRIBE (SELECT cast(tuple(1, 'Value'), 'Tuple (id UInt64, value String)') AS compound_value, arrayMap(x -> compound_value.* EXCEPT value APPLY x -> x, [1,2,3])); + +SELECT '--'; + +DESCRIBE (SELECT cast(tuple(1, 'Value'), 'Tuple (id UInt64, value String)') AS compound_value, arrayMap(x -> compound_value.* EXCEPT value APPLY toString, [1,2,3])); + +SELECT '--'; + +DESCRIBE (SELECT cast(tuple(1, 'Value'), 'Tuple (id UInt64, value String)') AS compound_value, arrayMap(x -> compound_value.* EXCEPT value APPLY x -> toString(x), [1,2,3])); + +SELECT '--'; + +DESCRIBE (SELECT cast(tuple(1), 'Tuple (id UInt64)') AS a, arrayMap(x -> untuple(a), [1,2,3]) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT cast(tuple(1), 'Tuple (id UInt64)') AS a, arrayMap(x -> untuple(a) AS untupled_value, [1,2,3]) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT cast(tuple(1), 'Tuple (id UInt64)') AS a, untuple(a) AS untupled_value, arrayMap(x -> untupled_value, [1,2,3]) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT cast(tuple(1), 'Tuple (id UInt64)') AS a, untuple(a) AS untupled_value, arrayMap(x -> untupled_value AS untupled_value_in_lambda, [1,2,3]) FROM test_table); + +SELECT 'Standalone lambda'; + +DESCRIBE (WITH x -> x + 1 AS test_lambda SELECT test_lambda(1)); + +SELECT '--'; + +DESCRIBE (WITH x -> * AS test_lambda SELECT test_lambda(1) AS lambda_value, lambda_value FROM test_table); + +SELECT 'Subquery'; + +DESCRIBE (SELECT (SELECT 1), (SELECT 2), (SELECT 3) AS a, (SELECT 4)); + +SELECT '--'; + +DESCRIBE (SELECT arrayMap(x -> (SELECT 1), [1,2,3]), arrayMap(x -> (SELECT 2) AS a, [1, 2, 3]), arrayMap(x -> (SELECT 1), [1,2,3])); + +SELECT '--'; + +DESCRIBE (SELECT (SELECT 1 AS a, 2 AS b) AS c, c.a, c.b); + +SELECT '--'; + +DESCRIBE (SELECT (SELECT 1 AS a, 2 AS b) AS c, c.*); + +SELECT '--'; + +DESCRIBE (SELECT (SELECT 1 UNION DISTINCT SELECT 1), (SELECT 2 UNION DISTINCT SELECT 2), (SELECT 3 UNION DISTINCT SELECT 3) AS a, (SELECT 4 UNION DISTINCT SELECT 4)); + +SELECT '--'; + +DESCRIBE (SELECT arrayMap(x -> (SELECT 1 UNION DISTINCT SELECT 1), [1,2,3]), arrayMap(x -> (SELECT 2 UNION DISTINCT SELECT 2) AS a, [1, 2, 3]), +arrayMap(x -> (SELECT 3 UNION DISTINCT SELECT 3), [1,2,3])); + +SELECT '--'; + +DESCRIBE (SELECT (SELECT 1 AS a, 2 AS b UNION DISTINCT SELECT 1, 2) AS c, c.a, c.b); + +SELECT '--'; + +DESCRIBE (SELECT (SELECT 1 AS a, 2 AS b UNION DISTINCT SELECT 1, 2) AS c, c.*); + +SELECT '--'; + +DESCRIBE (SELECT (SELECT 1), (SELECT 2 UNION DISTINCT SELECT 2), (SELECT 3) AS a, (SELECT 4 UNION DISTINCT SELECT 4)); + +SELECT '--'; + +DESCRIBE (SELECT arrayMap(x -> (SELECT 1 UNION DISTINCT SELECT 1), [1,2,3]), arrayMap(x -> (SELECT 2) AS a, [1, 2, 3]), +arrayMap(x -> (SELECT 3 UNION DISTINCT SELECT 3), [1,2,3])); + +SELECT 'Window functions'; + +DESCRIBE (SELECT count() OVER ()); + +SELECT '--'; + +DESCRIBE (SELECT count() OVER () AS window_function); + +SELECT '--'; + +DESCRIBE (SELECT count() OVER (PARTITION BY id) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT count() OVER (PARTITION BY id, value) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT count() OVER (PARTITION BY id, value ORDER BY id) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT count() OVER (PARTITION BY id, value ORDER BY id ASC, value DESC ROWS CURRENT ROW) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT count() OVER (PARTITION BY id, value ORDER BY id ASC, value DESC ROWS BETWEEN CURRENT ROW AND CURRENT ROW) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT count() OVER (PARTITION BY id, value ORDER BY id ASC, value DESC RANGE CURRENT ROW) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT count() OVER (PARTITION BY id, value ORDER BY id ASC, value DESC RANGE BETWEEN CURRENT ROW AND CURRENT ROW) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT count() OVER (PARTITION BY (id AS id_alias), (value AS value_alias) ORDER BY id ASC, value DESC ROWS CURRENT ROW) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT count() OVER (PARTITION BY id, value ORDER BY (id AS id_alias) ASC, (value AS value_alias) DESC ROWS CURRENT ROW) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT count() OVER (PARTITION BY id, value ORDER BY id ASC, value DESC ROWS BETWEEN 1 PRECEDING AND 2 FOLLOWING) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT count() OVER (PARTITION BY id, value ORDER BY id ASC, value DESC ROWS BETWEEN 1 + 1 PRECEDING AND 2 + 2 FOLLOWING) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT count() OVER (PARTITION BY id, value ORDER BY id ASC, value DESC ROWS BETWEEN ((1 + 1) AS frame_offset_begin) PRECEDING AND ((2 + 2) AS frame_offset_end) FOLLOWING) +FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT count() OVER (ORDER BY toNullable(id) NULLS FIRST) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT count() OVER (ORDER BY toNullable(id) NULLS LAST) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT count() OVER (ORDER BY id WITH FILL FROM 1 TO 5 STEP 1) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT count() OVER (ORDER BY id WITH FILL FROM 1 + 1 TO 6 STEP 1 + 1) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT count() OVER (ORDER BY id WITH FILL FROM ((1 + 1) AS from) TO (6 AS to) STEP ((1 + 1) AS step)) FROM test_table); + +SELECT 'Window functions WINDOW'; + +DESCRIBE (SELECT count() OVER window_name FROM test_table WINDOW window_name AS (PARTITION BY id)); + +SELECT '--'; + +DESCRIBE (SELECT count() OVER window_name FROM test_table WINDOW window_name AS (PARTITION BY id ORDER BY value)); + +SELECT '--'; + +DESCRIBE (SELECT count() OVER (window_name ORDER BY id) FROM test_table WINDOW window_name AS (PARTITION BY id)); + +SELECT 'IN function'; + +DESCRIBE (SELECT id IN (SELECT 1) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT id IN (SELECT id FROM test_table_in) FROM test_table); + +SELECT '--'; + +DESCRIBE (SELECT id IN test_table_in FROM test_table); + +SELECT '--'; + +DESCRIBE (WITH test_table_in_cte AS (SELECT id FROM test_table) SELECT id IN (SELECT id FROM test_table_in_cte) FROM test_table); + +SELECT '--'; + +DESCRIBE (WITH test_table_in_cte AS (SELECT id FROM test_table) SELECT id IN test_table_in_cte FROM test_table); + +SELECT '--'; + +DESCRIBE (WITH test_table_in_cte_1 AS (SELECT 1 AS c1), test_table_in_cte_2 AS (SELECT 1 AS c1) SELECT * +FROM test_table_in_cte_1 INNER JOIN test_table_in_cte_2 as test_table_in_cte_2 ON test_table_in_cte_1.c1 = test_table_in_cte_2.c1); + +SELECT '--'; + +DESCRIBE (WITH test_table_in_cte_1 AS (SELECT 1 AS c1), test_table_in_cte_2 AS (SELECT 1 AS c1 UNION ALL SELECT 1 AS c1) SELECT * +FROM test_table_in_cte_1 INNER JOIN test_table_in_cte_2 as test_table_in_cte_2 ON test_table_in_cte_1.c1 = test_table_in_cte_2.c1); + +SELECT 'Joins'; + +DESCRIBE (SELECT * FROM test_table_join_1, test_table_join_2); + +SELECT '--'; + +DESCRIBE (SELECT * FROM test_table_join_1 AS t1, test_table_join_2 AS t2); + +SELECT '--'; + +DESCRIBE (SELECT * APPLY toString FROM test_table_join_1 AS t1, test_table_join_2 AS t2); + +SELECT '--'; + +DESCRIBE (SELECT * APPLY x -> toString(x) FROM test_table_join_1 AS t1, test_table_join_2 AS t2); + +SELECT '--'; + +DESCRIBE (SELECT test_table_join_1.*, test_table_join_2.* FROM test_table_join_1 INNER JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id); + +SELECT '--'; + +DESCRIBE (SELECT t1.*, t2.* FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id); + +SELECT '--'; + +DESCRIBE (SELECT test_table_join_1.* APPLY toString, test_table_join_2.* APPLY toString FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id); + +SELECT '--'; + +DESCRIBE (SELECT test_table_join_1.* APPLY x -> toString(x), test_table_join_2.* APPLY x -> toString(x) FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id); + +SELECT '--'; + +DESCRIBE (SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_1.value_join_1, test_table_join_2.id, test_table_join_2.value, test_table_join_2.value_join_2 +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id); + +SELECT '--'; + +DESCRIBE (SELECT t1.id, t1.value, t1.value_join_1, t2.id, t2.value, t2.value_join_2 FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id); + +SELECT 'Multiple JOINS'; + +DESCRIBE (SELECT * FROM test_table_join_1, test_table_join_2, test_table_join_3); + +SELECT '--'; + +DESCRIBE (SELECT * FROM test_table_join_1 AS t1, test_table_join_2 AS t2, test_table_join_3 AS t3); + +SELECT '--'; + +DESCRIBE (SELECT * APPLY toString FROM test_table_join_1 AS t1, test_table_join_2 AS t2, test_table_join_3 AS t3); + +SELECT '--'; + +DESCRIBE (SELECT * APPLY x -> toString(x) FROM test_table_join_1 AS t1, test_table_join_2 AS t2, test_table_join_3 AS t3); + +SELECT '--'; + +DESCRIBE (SELECT test_table_join_1.*, test_table_join_2.*, test_table_join_3.* +FROM test_table_join_1 INNER JOIN test_table_join_2 ON test_table_join_1.id = test_table_join_2.id +INNER JOIN test_table_join_3 ON test_table_join_2.id = test_table_join_3.id); + +SELECT '--'; + +DESCRIBE (SELECT t1.*, t2.*, t3.* +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id); + +SELECT '--'; + +DESCRIBE (SELECT test_table_join_1.* APPLY toString, test_table_join_2.* APPLY toString, test_table_join_3.* APPLY toString +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id); + +SELECT '--'; + +DESCRIBE (SELECT test_table_join_1.* APPLY x -> toString(x), test_table_join_2.* APPLY x -> toString(x), test_table_join_3.* APPLY x -> toString(x) +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id); + +SELECT '--'; + +DESCRIBE (SELECT test_table_join_1.id, test_table_join_1.value, test_table_join_1.value_join_1, test_table_join_2.id, test_table_join_2.value, test_table_join_2.value_join_2, +test_table_join_3.id, test_table_join_3.value, test_table_join_3.value_join_3 +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id); + +SELECT '--'; + +DESCRIBE (SELECT t1.id, t1.value, t1.value_join_1, t2.id, t2.value, t2.value_join_2, t3.id, t3.value, t3.value_join_3 +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id); + +SELECT 'Joins USING'; + +DESCRIBE (SELECT * FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id)); + +SELECT '--'; + +DESCRIBE (SELECT * FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id, value)); + +SELECT '--'; + +DESCRIBE (SELECT id, t1.id, t1.value, t2.id, t2.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id)); + +SELECT '--'; + +DESCRIBE (SELECT id, value, t1.id, t1.value, t2.id, t2.value FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id, value)); + +SELECT 'Multiple Joins USING'; + +SELECT '--'; + +DESCRIBE (SELECT * FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING (id)); + +SELECT '--'; + +DESCRIBE (SELECT * FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id, value) INNER JOIN test_table_join_3 AS t3 USING (id, value)); + +SELECT '--'; + +DESCRIBE (SELECT id, t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING (id)); + +SELECT '--'; + +DESCRIBE (SELECT id, value, t1.id, t1.value, t2.id, t2.value, t3.id, t3.value +FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id, value) INNER JOIN test_table_join_3 AS t3 USING (id, value)); + +SELECT 'Special functions array, tuple'; +DESCRIBE (SELECT [], array(), [1], array(1), [1, 2], array(1, 2), tuple(1), (1, 2), [[], []], [([], [])], ([], []), ([([], []), ([], [])])); + +-- { echoOff } + +DROP TABLE test_table_join_1; +DROP TABLE test_table_join_2; +DROP TABLE test_table_join_3; +DROP TABLE test_table; +DROP TABLE test_table_compound; diff --git a/parser/testdata/02378_part_log_profile_events_replicated/ast.json b/parser/testdata/02378_part_log_profile_events_replicated/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02378_part_log_profile_events_replicated/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02378_part_log_profile_events_replicated/metadata.json b/parser/testdata/02378_part_log_profile_events_replicated/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02378_part_log_profile_events_replicated/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02378_part_log_profile_events_replicated/query.sql b/parser/testdata/02378_part_log_profile_events_replicated/query.sql new file mode 100644 index 000000000..605b61212 --- /dev/null +++ b/parser/testdata/02378_part_log_profile_events_replicated/query.sql @@ -0,0 +1,40 @@ +-- Tags: long, replica, no-replicated-database, no-parallel, no-shared-merge-tree +-- no-shared-merge-tree: depend on events for replicatied merge tree + +DROP TABLE IF EXISTS part_log_profile_events_r1 SYNC; +DROP TABLE IF EXISTS part_log_profile_events_r2 SYNC; + +CREATE TABLE part_log_profile_events_r1 (x UInt64) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_02378/part_log_profile_events', 'r1') +ORDER BY x +PARTITION BY x >= 128 +; + +CREATE TABLE part_log_profile_events_r2 (x UInt64) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_02378/part_log_profile_events', 'r2') +ORDER BY x +PARTITION BY x >= 128 +; + +-- SYSTEM STOP MERGES part_log_profile_events_r1; +-- SYSTEM STOP MERGES part_log_profile_events_r2; + +SET max_block_size = 64, max_insert_block_size = 64, min_insert_block_size_rows = 64; + +INSERT INTO part_log_profile_events_r1 SELECT number FROM numbers(1000); + +SYSTEM SYNC REPLICA part_log_profile_events_r2; + +SYSTEM FLUSH LOGS part_log; + +SELECT + count() > 1 + AND SUM(ProfileEvents['ZooKeeperTransactions']) >= 4 +FROM system.part_log +WHERE event_time > now() - INTERVAL 10 MINUTE + AND database == currentDatabase() AND table == 'part_log_profile_events_r2' + AND event_type == 'DownloadPart' +; + +DROP TABLE part_log_profile_events_r1 SYNC; +DROP TABLE part_log_profile_events_r2 SYNC; diff --git a/parser/testdata/02379_analyzer_subquery_depth/ast.json b/parser/testdata/02379_analyzer_subquery_depth/ast.json new file mode 100644 index 000000000..08f49bb3a --- /dev/null +++ b/parser/testdata/02379_analyzer_subquery_depth/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001141105, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02379_analyzer_subquery_depth/metadata.json b/parser/testdata/02379_analyzer_subquery_depth/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02379_analyzer_subquery_depth/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02379_analyzer_subquery_depth/query.sql b/parser/testdata/02379_analyzer_subquery_depth/query.sql new file mode 100644 index 000000000..40303e0f9 --- /dev/null +++ b/parser/testdata/02379_analyzer_subquery_depth/query.sql @@ -0,0 +1,4 @@ +SET enable_analyzer = 1; + +SELECT (SELECT a FROM (SELECT 1 AS a)) SETTINGS max_subquery_depth = 1; -- { serverError TOO_DEEP_SUBQUERIES } +SELECT (SELECT a FROM (SELECT 1 AS a)) SETTINGS max_subquery_depth = 2; diff --git a/parser/testdata/02380_analyzer_join_sample/ast.json b/parser/testdata/02380_analyzer_join_sample/ast.json new file mode 100644 index 000000000..3a5c23c55 --- /dev/null +++ b/parser/testdata/02380_analyzer_join_sample/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001106565, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02380_analyzer_join_sample/metadata.json b/parser/testdata/02380_analyzer_join_sample/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02380_analyzer_join_sample/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02380_analyzer_join_sample/query.sql b/parser/testdata/02380_analyzer_join_sample/query.sql new file mode 100644 index 000000000..2bb6d1e13 --- /dev/null +++ b/parser/testdata/02380_analyzer_join_sample/query.sql @@ -0,0 +1,30 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS test_table_join_1; +CREATE TABLE test_table_join_1 +( + id UInt64, + value String +) ENGINE=MergeTree +ORDER BY id +SAMPLE BY id; + +INSERT INTO test_table_join_1 VALUES (0, 'Value'), (1, 'Value_1'); + +DROP TABLE IF EXISTS test_table_join_2; +CREATE TABLE test_table_join_2 +( + id UInt64, + value String +) ENGINE=MergeTree +ORDER BY id +SAMPLE BY id; + +INSERT INTO test_table_join_2 VALUES (0, 'Value'), (1, 'Value_1'); + +SELECT t1.id AS t1_id, t2.id AS t2_id, t1._sample_factor AS t1_sample_factor, t2._sample_factor AS t2_sample_factor +FROM test_table_join_1 AS t1 SAMPLE 1/2 INNER JOIN test_table_join_2 AS t2 SAMPLE 1/2 ON t1.id = t2.id +ORDER BY ALL; + +DROP TABLE test_table_join_1; +DROP TABLE test_table_join_2; diff --git a/parser/testdata/02381_analyzer_join_final/ast.json b/parser/testdata/02381_analyzer_join_final/ast.json new file mode 100644 index 000000000..b00c25efc --- /dev/null +++ b/parser/testdata/02381_analyzer_join_final/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001294906, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02381_analyzer_join_final/metadata.json b/parser/testdata/02381_analyzer_join_final/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02381_analyzer_join_final/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02381_analyzer_join_final/query.sql b/parser/testdata/02381_analyzer_join_final/query.sql new file mode 100644 index 000000000..0db81ac77 --- /dev/null +++ b/parser/testdata/02381_analyzer_join_final/query.sql @@ -0,0 +1,34 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS test_table_join_1; +CREATE TABLE test_table_join_1 +( + id UInt64, + value UInt64 +) ENGINE=SummingMergeTree(value) +ORDER BY id +SAMPLE BY id; + +SYSTEM STOP MERGES test_table_join_1; +INSERT INTO test_table_join_1 VALUES (0, 1), (1, 1); +INSERT INTO test_table_join_1 VALUES (0, 2); + +DROP TABLE IF EXISTS test_table_join_2; +CREATE TABLE test_table_join_2 +( + id UInt64, + value UInt64 +) ENGINE=SummingMergeTree(value) +ORDER BY id +SAMPLE BY id; + +SYSTEM STOP MERGES test_table_join_2; +INSERT INTO test_table_join_2 VALUES (0, 1), (1, 1); +INSERT INTO test_table_join_2 VALUES (1, 2); + +SELECT t1.id AS t1_id, t2.id AS t2_id, t1.value AS t1_value, t2.value AS t2_value +FROM test_table_join_1 AS t1 FINAL INNER JOIN test_table_join_2 AS t2 FINAL ON t1.id = t2.id +ORDER BY t1_id; + +DROP TABLE test_table_join_1; +DROP TABLE test_table_join_2; diff --git a/parser/testdata/02381_compress_marks_and_primary_key/ast.json b/parser/testdata/02381_compress_marks_and_primary_key/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02381_compress_marks_and_primary_key/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02381_compress_marks_and_primary_key/metadata.json b/parser/testdata/02381_compress_marks_and_primary_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02381_compress_marks_and_primary_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02381_compress_marks_and_primary_key/query.sql b/parser/testdata/02381_compress_marks_and_primary_key/query.sql new file mode 100644 index 000000000..3c413ad21 --- /dev/null +++ b/parser/testdata/02381_compress_marks_and_primary_key/query.sql @@ -0,0 +1,54 @@ +-- Tags: no-random-merge-tree-settings + +SET optimize_trivial_insert_select = 1; + +drop table if exists test_02381; +create table test_02381(a UInt64, b UInt64) ENGINE = MergeTree order by (a, b) SETTINGS compress_marks = false, compress_primary_key = false, ratio_of_defaults_for_sparse_serialization = 1, serialization_info_version = 'basic', auto_statistics_types = ''; +insert into test_02381 select number, number * 10 from system.numbers limit 1000000; + +drop table if exists test_02381_compress; +create table test_02381_compress(a UInt64, b UInt64) ENGINE = MergeTree order by (a, b) + SETTINGS compress_marks = true, compress_primary_key = true, marks_compression_codec = 'ZSTD(3)', primary_key_compression_codec = 'ZSTD(3)', marks_compress_block_size = 65536, primary_key_compress_block_size = 65536, ratio_of_defaults_for_sparse_serialization = 1, serialization_info_version = 'basic', auto_statistics_types = ''; +insert into test_02381_compress select number, number * 10 from system.numbers limit 1000000; + +select * from test_02381_compress where a = 1000 limit 1; +optimize table test_02381_compress final; +select * from test_02381_compress where a = 1000 limit 1; + +-- Compare the size of marks on disk +select table, sum(rows), sum(bytes_on_disk) sum_bytes, sum(marks_bytes) sum_marks_bytes, (sum_bytes - sum_marks_bytes) exclude_marks from system.parts_columns where active and database = currentDatabase() and table like 'test_02381%' group by table order by table; + +-- Switch to compressed and uncompressed +-- Test wide part +alter table test_02381 modify setting compress_marks=true, compress_primary_key=true; +insert into test_02381 select number, number * 10 from system.numbers limit 1000000; + +alter table test_02381_compress modify setting compress_marks=false, compress_primary_key=false; +insert into test_02381_compress select number, number * 10 from system.numbers limit 1000000; + +select * from test_02381_compress where a = 10000 limit 1; +optimize table test_02381_compress final; +select * from test_02381_compress where a = 10000 limit 1; + +select * from test_02381 where a = 10000 limit 1; +optimize table test_02381 final; +select * from test_02381 where a = 10000 limit 1; + +select table, sum(rows), sum(bytes_on_disk) sum_bytes, sum(marks_bytes) sum_marks_bytes, (sum_bytes - sum_marks_bytes) exclude_marks from system.parts_columns where active and database = currentDatabase() and table like 'test_02381%' group by table order by table; + +drop table if exists test_02381; +drop table if exists test_02381_compress; + +-- Test compact part +drop table if exists test_02381_compact; +create table test_02381_compact (a UInt64, b String) ENGINE = MergeTree order by (a, b) SETTINGS auto_statistics_types = ''; + +insert into test_02381_compact values (1, 'Hello'); +alter table test_02381_compact modify setting compress_marks = true, compress_primary_key = true; +insert into test_02381_compact values (2, 'World'); + +select * from test_02381_compact order by a; +optimize table test_02381_compact final; +select * from test_02381_compact order by a; + +drop table if exists test_02381_compact; diff --git a/parser/testdata/02381_parseDateTime64BestEffortUS/ast.json b/parser/testdata/02381_parseDateTime64BestEffortUS/ast.json new file mode 100644 index 000000000..466b213a5 --- /dev/null +++ b/parser/testdata/02381_parseDateTime64BestEffortUS/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001170766, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02381_parseDateTime64BestEffortUS/metadata.json b/parser/testdata/02381_parseDateTime64BestEffortUS/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02381_parseDateTime64BestEffortUS/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02381_parseDateTime64BestEffortUS/query.sql b/parser/testdata/02381_parseDateTime64BestEffortUS/query.sql new file mode 100644 index 000000000..510ed4b12 --- /dev/null +++ b/parser/testdata/02381_parseDateTime64BestEffortUS/query.sql @@ -0,0 +1,23 @@ +SET output_format_pretty_display_footer_column_names=0; +SELECT 'parseDateTime64BestEffortUS'; + +SELECT + s, + parseDateTime64BestEffortUS(s,3,'UTC') AS a +FROM +( + SELECT arrayJoin([ +'01-02-1930 12:00:00', +'12.02.1930 12:00:00', +'13/02/1930 12:00:00', +'02/25/1930 12:00:00' +]) AS s) +FORMAT PrettySpaceNoEscapes; + +SELECT ''; + +SELECT 'parseDateTime64BestEffortUSOrNull'; +SELECT parseDateTime64BestEffortUSOrNull('01/45/1925 16:00:00',3,'UTC'); + +SELECT 'parseDateTime64BestEffortUSOrZero'; +SELECT parseDateTime64BestEffortUSOrZero('01/45/1925 16:00:00',3,'UTC'); diff --git a/parser/testdata/02381_parse_array_of_tuples/ast.json b/parser/testdata/02381_parse_array_of_tuples/ast.json new file mode 100644 index 000000000..9d55c5f6b --- /dev/null +++ b/parser/testdata/02381_parse_array_of_tuples/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_parse_tuples (children 1)" + }, + { + "explain": " Identifier t_parse_tuples" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001582656, + "rows_read": 2, + "bytes_read": 80 + } +} diff --git a/parser/testdata/02381_parse_array_of_tuples/metadata.json b/parser/testdata/02381_parse_array_of_tuples/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02381_parse_array_of_tuples/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02381_parse_array_of_tuples/query.sql b/parser/testdata/02381_parse_array_of_tuples/query.sql new file mode 100644 index 000000000..51db5a0fe --- /dev/null +++ b/parser/testdata/02381_parse_array_of_tuples/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS t_parse_tuples; + +CREATE TABLE t_parse_tuples +( + id UInt32, + arr Array(Array(Tuple(c1 Int32, c2 UInt8))) +) +ENGINE = Memory; + +INSERT INTO t_parse_tuples VALUES (1, [[]]), (2, [[(500, -10)]]), (3, [[(500, '10')]]); + +SELECT * FROM t_parse_tuples ORDER BY id; + +DROP TABLE IF EXISTS t_parse_tuples; diff --git a/parser/testdata/02381_setting_value_auto/ast.json b/parser/testdata/02381_setting_value_auto/ast.json new file mode 100644 index 000000000..4d92a24cc --- /dev/null +++ b/parser/testdata/02381_setting_value_auto/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Identifier value" + }, + { + "explain": " Identifier changed" + }, + { + "explain": " Identifier type" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.settings" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier name" + }, + { + "explain": " Literal 'insert_quorum'" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001396978, + "rows_read": 15, + "bytes_read": 556 + } +} diff --git a/parser/testdata/02381_setting_value_auto/metadata.json b/parser/testdata/02381_setting_value_auto/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02381_setting_value_auto/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02381_setting_value_auto/query.sql b/parser/testdata/02381_setting_value_auto/query.sql new file mode 100644 index 000000000..5b536a9d7 --- /dev/null +++ b/parser/testdata/02381_setting_value_auto/query.sql @@ -0,0 +1,10 @@ +SELECT value, changed, type FROM system.settings WHERE name = 'insert_quorum'; + +SET insert_quorum = 'auto'; +SELECT value, changed, type FROM system.settings WHERE name = 'insert_quorum'; + +SET insert_quorum = 0; +SELECT value, changed, type FROM system.settings WHERE name = 'insert_quorum'; + +SET insert_quorum = 1; +SELECT value, changed, type FROM system.settings WHERE name = 'insert_quorum'; diff --git a/parser/testdata/02382_analyzer_matcher_join_using/ast.json b/parser/testdata/02382_analyzer_matcher_join_using/ast.json new file mode 100644 index 000000000..c0989b81b --- /dev/null +++ b/parser/testdata/02382_analyzer_matcher_join_using/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00097813, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02382_analyzer_matcher_join_using/metadata.json b/parser/testdata/02382_analyzer_matcher_join_using/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02382_analyzer_matcher_join_using/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02382_analyzer_matcher_join_using/query.sql b/parser/testdata/02382_analyzer_matcher_join_using/query.sql new file mode 100644 index 000000000..6a0b58e7b --- /dev/null +++ b/parser/testdata/02382_analyzer_matcher_join_using/query.sql @@ -0,0 +1,74 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS test_table_join_1; +CREATE TABLE test_table_join_1 +( + id UInt8, + value String +) ENGINE = TinyLog; + +DROP TABLE IF EXISTS test_table_join_2; +CREATE TABLE test_table_join_2 +( + id UInt16, + value String +) ENGINE = TinyLog; + +DROP TABLE IF EXISTS test_table_join_3; +CREATE TABLE test_table_join_3 +( + id UInt64, + value String +) ENGINE = TinyLog; + +INSERT INTO test_table_join_1 VALUES (0, 'Join_1_Value_0'); +INSERT INTO test_table_join_1 VALUES (1, 'Join_1_Value_1'); +INSERT INTO test_table_join_1 VALUES (2, 'Join_1_Value_2'); + +INSERT INTO test_table_join_2 VALUES (0, 'Join_2_Value_0'); +INSERT INTO test_table_join_2 VALUES (1, 'Join_2_Value_1'); +INSERT INTO test_table_join_2 VALUES (3, 'Join_2_Value_3'); + +INSERT INTO test_table_join_3 VALUES (0, 'Join_3_Value_0'); +INSERT INTO test_table_join_3 VALUES (1, 'Join_3_Value_1'); +INSERT INTO test_table_join_3 VALUES (4, 'Join_3_Value_4'); + +-- { echoOn } + +SELECT * FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) ORDER BY id, t1.value; + +SELECT * FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id, id, id) ORDER BY id, t1.value; -- { serverError BAD_ARGUMENTS } + +SELECT '--'; + +SELECT * FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 USING (id) ORDER BY id, t1.value; + +SELECT '--'; + +SELECT * FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 USING (id) ORDER BY id, t1.value; + +SELECT '--'; + +SELECT * FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 USING (id) ORDER BY id, t1.value; + +SELECT '--'; + +SELECT * FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) INNER JOIN test_table_join_3 AS t3 USING (id) ORDER BY id, t1.value; + +SELECT '--'; + +SELECT * FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) LEFT JOIN test_table_join_3 AS t3 USING (id) ORDER BY id, t1.value; + +SELECT '--'; + +SELECT * FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) RIGHT JOIN test_table_join_3 AS t3 USING (id) ORDER BY id, t1.value; + +SELECT '--'; + +SELECT * FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 USING (id) FULL JOIN test_table_join_3 AS t3 USING (id) ORDER BY id, t1.value; + +-- { echoOff } + +DROP TABLE test_table_join_1; +DROP TABLE test_table_join_2; +DROP TABLE test_table_join_3; diff --git a/parser/testdata/02382_join_and_filtering_set/ast.json b/parser/testdata/02382_join_and_filtering_set/ast.json new file mode 100644 index 000000000..54f9d0bee --- /dev/null +++ b/parser/testdata/02382_join_and_filtering_set/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001025809, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/02382_join_and_filtering_set/metadata.json b/parser/testdata/02382_join_and_filtering_set/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02382_join_and_filtering_set/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02382_join_and_filtering_set/query.sql b/parser/testdata/02382_join_and_filtering_set/query.sql new file mode 100644 index 000000000..69bb8e7c2 --- /dev/null +++ b/parser/testdata/02382_join_and_filtering_set/query.sql @@ -0,0 +1,43 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE TABLE t1 (x UInt64, y UInt64) ENGINE = MergeTree ORDER BY y +AS SELECT sipHash64(number, 't1_x') % 100 AS x, sipHash64(number, 't1_y') % 100 AS y FROM numbers(100); + +CREATE TABLE t2 (x UInt64, y UInt64) ENGINE = MergeTree ORDER BY y +AS SELECT sipHash64(number, 't2_x') % 100 AS x, sipHash64(number, 't2_y') % 100 AS y FROM numbers(100); + +SET max_rows_in_set_to_optimize_join = 1000; +SET join_algorithm = 'full_sorting_merge'; + +-- different combinations of conditions on key/attribute columns for the left/right tables +SELECT count() FROM t1 JOIN t2 ON t1.x = t2.x; +SELECT count() FROM t1 JOIN t2 ON t1.x = t2.x WHERE t1.y % 2 == 0; +SELECT count() FROM t1 JOIN t2 ON t1.x = t2.x WHERE t1.x % 2 == 0; +SELECT count() FROM t1 JOIN t2 ON t1.x = t2.x WHERE t2.y % 2 == 0; +SELECT count() FROM t1 JOIN t2 ON t1.x = t2.x WHERE t2.x % 2 == 0; +SELECT count() FROM t1 JOIN t2 ON t1.x = t2.x WHERE t1.y % 2 == 0 AND t2.y % 2 == 0; +SELECT count() FROM t1 JOIN t2 ON t1.x = t2.x WHERE t1.x % 2 == 0 AND t2.x % 2 == 0 AND t1.y % 2 == 0 AND t2.y % 2 == 0; + +SELECT 'bug with constant columns in join keys'; + +SELECT * FROM ( SELECT 'a' AS key ) AS t1 +INNER JOIN ( SELECT 'a' AS key ) AS t2 +ON t1.key = t2.key +; + +SELECT count() > 1 FROM (EXPLAIN PIPELINE + SELECT * FROM ( SELECT materialize('a') AS key ) AS t1 + INNER JOIN ( SELECT materialize('a') AS key ) AS t2 + ON t1.key = t2.key +) WHERE explain ilike '%FilterBySetOnTheFlyTransform%' +; + +SELECT count() == 0 FROM (EXPLAIN PIPELINE + SELECT * FROM ( SELECT 'a' AS key ) AS t1 + INNER JOIN ( SELECT 'a' AS key ) AS t2 + ON t1.key = t2.key +) WHERE explain ilike '%FilterBySetOnTheFlyTransform%' +; + + diff --git a/parser/testdata/02383_analyzer_merge_tree_self_join/ast.json b/parser/testdata/02383_analyzer_merge_tree_self_join/ast.json new file mode 100644 index 000000000..19f85a038 --- /dev/null +++ b/parser/testdata/02383_analyzer_merge_tree_self_join/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001056193, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02383_analyzer_merge_tree_self_join/metadata.json b/parser/testdata/02383_analyzer_merge_tree_self_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02383_analyzer_merge_tree_self_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02383_analyzer_merge_tree_self_join/query.sql b/parser/testdata/02383_analyzer_merge_tree_self_join/query.sql new file mode 100644 index 000000000..fbd6fe4db --- /dev/null +++ b/parser/testdata/02383_analyzer_merge_tree_self_join/query.sql @@ -0,0 +1,44 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS test_table_join_1; +CREATE TABLE test_table_join_1 +( + id UInt64, + value String +) ENGINE = MergeTree ORDER BY id; + +DROP TABLE IF EXISTS test_table_join_2; +CREATE TABLE test_table_join_2 +( + id UInt64, + value String +) ENGINE = MergeTree ORDER BY id; + +INSERT INTO test_table_join_1 VALUES (0, 'Join_1_Value_0'); +INSERT INTO test_table_join_1 VALUES (1, 'Join_1_Value_1'); +INSERT INTO test_table_join_1 VALUES (2, 'Join_1_Value_2'); + +INSERT INTO test_table_join_2 VALUES (0, 'Join_2_Value_0'); +INSERT INTO test_table_join_2 VALUES (1, 'Join_2_Value_1'); +INSERT INTO test_table_join_2 VALUES (3, 'Join_2_Value_3'); + +-- { echoOn } + +SELECT * FROM test_table_join_1 AS t1 INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY t1.id, t1.value; + +SELECT '--'; + +SELECT * FROM test_table_join_1 AS t1 LEFT JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY t1.id, t1.value; + +SELECT '--'; + +SELECT * FROM test_table_join_1 AS t1 RIGHT JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY t1.id, t1.value; + +SELECT '--'; + +SELECT * FROM test_table_join_1 AS t1 FULL JOIN test_table_join_2 AS t2 ON t1.id = t2.id ORDER BY t1.id, t1.value; + +-- { echoOff } + +DROP TABLE test_table_join_1; +DROP TABLE test_table_join_2; diff --git a/parser/testdata/02383_array_signed_const_positive_index/ast.json b/parser/testdata/02383_array_signed_const_positive_index/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02383_array_signed_const_positive_index/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02383_array_signed_const_positive_index/metadata.json b/parser/testdata/02383_array_signed_const_positive_index/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02383_array_signed_const_positive_index/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02383_array_signed_const_positive_index/query.sql b/parser/testdata/02383_array_signed_const_positive_index/query.sql new file mode 100644 index 000000000..4f92215f4 --- /dev/null +++ b/parser/testdata/02383_array_signed_const_positive_index/query.sql @@ -0,0 +1,36 @@ +-- { echo } + +SELECT materialize([[13]])[1::Int8]; +SELECT materialize([['Hello']])[1::Int8]; +SELECT materialize([13])[1::Int8]; +SELECT materialize(['Hello'])[1::Int8]; + +SELECT materialize([[13], [14]])[2::Int8]; +SELECT materialize([['Hello'], ['world']])[2::Int8]; +SELECT materialize([13, 14])[2::Int8]; +SELECT materialize(['Hello', 'world'])[2::Int8]; + +SELECT materialize([[13], [14]])[3::Int8]; +SELECT materialize([['Hello'], ['world']])[3::Int8]; +SELECT materialize([13, 14])[3::Int8]; +SELECT materialize(['Hello', 'world'])[3::Int8]; + +SELECT materialize([[13], [14]])[0::Int8]; +SELECT materialize([['Hello'], ['world']])[0::Int8]; +SELECT materialize([13, 14])[0::Int8]; +SELECT materialize(['Hello', 'world'])[0::Int8]; + +SELECT materialize([[13], [14]])[-1]; +SELECT materialize([['Hello'], ['world']])[-1]; +SELECT materialize([13, 14])[-1]; +SELECT materialize(['Hello', 'world'])[-1]; + +SELECT materialize([[13], [14]])[-9223372036854775808]; +SELECT materialize([['Hello'], ['world']])[-9223372036854775808]; +SELECT materialize([13, 14])[-9223372036854775808]; +SELECT materialize(['Hello', 'world'])[-9223372036854775808]; + +SELECT materialize([[toNullable(13)], [14]])[-9223372036854775808]; +SELECT materialize([['Hello'], [toNullable('world')]])[-9223372036854775808]; +SELECT materialize([13, toNullable(14)])[-9223372036854775808]; +SELECT materialize(['Hello', toLowCardinality('world')])[-9223372036854775808]; diff --git a/parser/testdata/02383_schema_inference_hints/ast.json b/parser/testdata/02383_schema_inference_hints/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02383_schema_inference_hints/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02383_schema_inference_hints/metadata.json b/parser/testdata/02383_schema_inference_hints/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02383_schema_inference_hints/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02383_schema_inference_hints/query.sql b/parser/testdata/02383_schema_inference_hints/query.sql new file mode 100644 index 000000000..350faaedc --- /dev/null +++ b/parser/testdata/02383_schema_inference_hints/query.sql @@ -0,0 +1,10 @@ +-- Tags: no-fasttest +desc format(JSONEachRow, '{"x" : 1, "y" : "String", "z" : "0.0.0.0" }') settings schema_inference_hints='x UInt8, z IPv4'; +desc format(JSONEachRow, '{"x" : 1, "y" : "String"}\n{"z" : "0.0.0.0", "y" : "String2"}\n{"x" : 2}') settings schema_inference_hints='x UInt8, z IPv4'; +desc format(JSONEachRow, '{"x" : null}') settings schema_inference_hints='x Nullable(UInt32)'; +desc format(JSONEachRow, '{"x" : []}') settings schema_inference_hints='x Array(UInt32)'; +desc format(JSONEachRow, '{"x" : {}}') settings schema_inference_hints='x Map(String, String)'; + +desc format(CSV, '1,"String","0.0.0.0"') settings schema_inference_hints='c1 UInt8, c3 IPv4'; +desc format(CSV, '1,"String","0.0.0.0"') settings schema_inference_hints='x UInt8, z IPv4', column_names_for_schema_inference='x, y, z'; +desc format(CSV, '\\N') settings schema_inference_hints='x Nullable(UInt32)', column_names_for_schema_inference='x'; diff --git a/parser/testdata/02384_analyzer_dict_get_join_get/ast.json b/parser/testdata/02384_analyzer_dict_get_join_get/ast.json new file mode 100644 index 000000000..f0ba09895 --- /dev/null +++ b/parser/testdata/02384_analyzer_dict_get_join_get/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001424129, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02384_analyzer_dict_get_join_get/metadata.json b/parser/testdata/02384_analyzer_dict_get_join_get/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02384_analyzer_dict_get_join_get/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02384_analyzer_dict_get_join_get/query.sql b/parser/testdata/02384_analyzer_dict_get_join_get/query.sql new file mode 100644 index 000000000..a8ad5c4d9 --- /dev/null +++ b/parser/testdata/02384_analyzer_dict_get_join_get/query.sql @@ -0,0 +1,59 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value String +) ENGINE=TinyLog; + +INSERT INTO test_table VALUES (0, 'Value'); + +DROP DICTIONARY IF EXISTS test_dictionary; +CREATE DICTIONARY test_dictionary +( + id UInt64, + value String +) +PRIMARY KEY id +LAYOUT(FLAT()) +SOURCE(CLICKHOUSE(TABLE 'test_table')) +LIFETIME(0); + +SELECT 'Dictionary'; + +SELECT * FROM test_dictionary; + +SELECT dictGet('test_dictionary', 'value', toUInt64(0)); + +SELECT dictGet(test_dictionary, 'value', toUInt64(0)); + +WITH 'test_dictionary' AS dictionary SELECT dictGet(dictionary, 'value', toUInt64(0)); + +WITH 'invalid_dictionary' AS dictionary SELECT dictGet(dictionary, 'value', toUInt64(0)); -- { serverError BAD_ARGUMENTS } + +DROP DICTIONARY test_dictionary; +DROP TABLE test_table; + +DROP TABLE IF EXISTS test_table_join; +CREATE TABLE test_table_join +( + id UInt64, + value String +) ENGINE=Join(Any, Left, id); + +INSERT INTO test_table_join VALUES (0, 'Value'); + +SELECT 'JOIN'; + +SELECT * FROM test_table_join; + +SELECT joinGet('test_table_join', 'value', toUInt64(0)); + +SELECT joinGet(test_table_join, 'value', toUInt64(0)); + +WITH 'test_table_join' AS join_table SELECT joinGet(join_table, 'value', toUInt64(0)); + +WITH 'invalid_test_table_join' AS join_table SELECT joinGet(join_table, 'value', toUInt64(0)); -- { serverError UNKNOWN_TABLE } + +DROP TABLE test_table_join; diff --git a/parser/testdata/02384_decrypt_bad_arguments/ast.json b/parser/testdata/02384_decrypt_bad_arguments/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02384_decrypt_bad_arguments/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02384_decrypt_bad_arguments/metadata.json b/parser/testdata/02384_decrypt_bad_arguments/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02384_decrypt_bad_arguments/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02384_decrypt_bad_arguments/query.sql b/parser/testdata/02384_decrypt_bad_arguments/query.sql new file mode 100644 index 000000000..7a3042513 --- /dev/null +++ b/parser/testdata/02384_decrypt_bad_arguments/query.sql @@ -0,0 +1,2 @@ +-- Tags: no-fasttest +SELECT decrypt('aes-128-gcm', [1024, 65535, NULL, NULL, 9223372036854775807, 1048576, NULL], 'text', 'key', 'IV'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } diff --git a/parser/testdata/02384_nullable_low_cardinality_as_dict_in_arrow/ast.json b/parser/testdata/02384_nullable_low_cardinality_as_dict_in_arrow/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02384_nullable_low_cardinality_as_dict_in_arrow/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02384_nullable_low_cardinality_as_dict_in_arrow/metadata.json b/parser/testdata/02384_nullable_low_cardinality_as_dict_in_arrow/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02384_nullable_low_cardinality_as_dict_in_arrow/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02384_nullable_low_cardinality_as_dict_in_arrow/query.sql b/parser/testdata/02384_nullable_low_cardinality_as_dict_in_arrow/query.sql new file mode 100644 index 000000000..975e7fb88 --- /dev/null +++ b/parser/testdata/02384_nullable_low_cardinality_as_dict_in_arrow/query.sql @@ -0,0 +1,8 @@ +-- Tags: no-fasttest + +insert into function file(02384_data.arrow) select toLowCardinality(toNullable('abc')) as lc settings output_format_arrow_low_cardinality_as_dictionary=1, output_format_arrow_string_as_string=0, engine_file_truncate_on_insert=1; +desc file(02384_data.arrow); +select * from file(02384_data.arrow); +insert into function file(02384_data.arrow) select toLowCardinality(toNullable('abc')) as lc settings output_format_arrow_low_cardinality_as_dictionary=1, output_format_arrow_string_as_string=1, engine_file_truncate_on_insert=1; +desc file(02384_data.arrow); +select * from file(02384_data.arrow); diff --git a/parser/testdata/02385_analyzer_aliases_compound_expression/ast.json b/parser/testdata/02385_analyzer_aliases_compound_expression/ast.json new file mode 100644 index 000000000..136f8a090 --- /dev/null +++ b/parser/testdata/02385_analyzer_aliases_compound_expression/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001217705, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02385_analyzer_aliases_compound_expression/metadata.json b/parser/testdata/02385_analyzer_aliases_compound_expression/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02385_analyzer_aliases_compound_expression/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02385_analyzer_aliases_compound_expression/query.sql b/parser/testdata/02385_analyzer_aliases_compound_expression/query.sql new file mode 100644 index 000000000..6c42607cc --- /dev/null +++ b/parser/testdata/02385_analyzer_aliases_compound_expression/query.sql @@ -0,0 +1,21 @@ +SET enable_analyzer = 1; + +SELECT cast(tuple(1, 'Value'), 'Tuple(first UInt64, second String)') AS value, value.first, value.second; + +SELECT '--'; + +WITH (x -> x + 1) AS lambda SELECT lambda(1); + +WITH (x -> x + 1) AS lambda SELECT lambda.nested(1); -- { serverError UNKNOWN_IDENTIFIER } + +SELECT '--'; + +SELECT * FROM (SELECT 1) AS t1, t1 AS t2; + +SELECT '--'; + +SELECT * FROM t1 AS t2, (SELECT 1) AS t1; + +SELECT * FROM (SELECT 1) AS t1, t1.nested AS t2; -- { serverError UNKNOWN_IDENTIFIER } + +SELECT * FROM t1.nested AS t2, (SELECT 1) AS t1; -- { serverError UNKNOWN_IDENTIFIER } diff --git a/parser/testdata/02385_profile_events_overflow/ast.json b/parser/testdata/02385_profile_events_overflow/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02385_profile_events_overflow/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02385_profile_events_overflow/metadata.json b/parser/testdata/02385_profile_events_overflow/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02385_profile_events_overflow/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02385_profile_events_overflow/query.sql b/parser/testdata/02385_profile_events_overflow/query.sql new file mode 100644 index 000000000..9006241dd --- /dev/null +++ b/parser/testdata/02385_profile_events_overflow/query.sql @@ -0,0 +1,20 @@ +-- Tags: no-parallel +SET system_events_show_zero_values = 1; + +CREATE TEMPORARY TABLE t (x UInt64); +INSERT INTO t SELECT value FROM system.events WHERE event = 'OverflowBreak'; +SELECT count() FROM system.numbers FORMAT Null SETTINGS max_rows_to_read = 1, read_overflow_mode = 'break'; +INSERT INTO t SELECT value FROM system.events WHERE event = 'OverflowBreak'; +SELECT max(x) - min(x) FROM t; + +TRUNCATE TABLE t; +INSERT INTO t SELECT value FROM system.events WHERE event = 'OverflowThrow'; +SELECT count() FROM system.numbers SETTINGS max_rows_to_read = 1, read_overflow_mode = 'throw'; -- { serverError TOO_MANY_ROWS } +INSERT INTO t SELECT value FROM system.events WHERE event = 'OverflowThrow'; +SELECT max(x) - min(x) FROM t; + +TRUNCATE TABLE t; +INSERT INTO t SELECT value FROM system.events WHERE event = 'OverflowAny'; +SELECT number, count() FROM numbers(100000) GROUP BY number FORMAT Null SETTINGS max_rows_to_group_by = 1, group_by_overflow_mode = 'any'; +INSERT INTO t SELECT value FROM system.events WHERE event = 'OverflowAny'; +SELECT max(x) - min(x) FROM t; diff --git a/parser/testdata/02386_analyzer_in_function_nested_subqueries/ast.json b/parser/testdata/02386_analyzer_in_function_nested_subqueries/ast.json new file mode 100644 index 000000000..9cd5c252e --- /dev/null +++ b/parser/testdata/02386_analyzer_in_function_nested_subqueries/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001364076, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02386_analyzer_in_function_nested_subqueries/metadata.json b/parser/testdata/02386_analyzer_in_function_nested_subqueries/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02386_analyzer_in_function_nested_subqueries/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02386_analyzer_in_function_nested_subqueries/query.sql b/parser/testdata/02386_analyzer_in_function_nested_subqueries/query.sql new file mode 100644 index 000000000..64d24aae1 --- /dev/null +++ b/parser/testdata/02386_analyzer_in_function_nested_subqueries/query.sql @@ -0,0 +1,3 @@ +SET enable_analyzer = 1; + +SELECT (NULL IN (SELECT 9223372036854775806 IN (SELECT 65536), inf, NULL IN (NULL))) IN (SELECT NULL IN (NULL)); diff --git a/parser/testdata/02386_set_columns_order/ast.json b/parser/testdata/02386_set_columns_order/ast.json new file mode 100644 index 000000000..19cdc94c8 --- /dev/null +++ b/parser/testdata/02386_set_columns_order/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery userid_set (children 1)" + }, + { + "explain": " Identifier userid_set" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000992972, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/02386_set_columns_order/metadata.json b/parser/testdata/02386_set_columns_order/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02386_set_columns_order/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02386_set_columns_order/query.sql b/parser/testdata/02386_set_columns_order/query.sql new file mode 100644 index 000000000..dab5ad305 --- /dev/null +++ b/parser/testdata/02386_set_columns_order/query.sql @@ -0,0 +1,22 @@ +DROP TABLE IF EXISTS userid_set; +DROP TABLE IF EXISTS userid_test; +DROP TABLE IF EXISTS userid_set2; + +CREATE TABLE userid_set(userid UInt64, name String) ENGINE = Set; +INSERT INTO userid_set VALUES (1, 'Mary'),(2, 'Jane'),(3, 'Mary'),(4, 'Jack'); + +CREATE TABLE userid_test (userid UInt64, name String) ENGINE = MergeTree() PARTITION BY (intDiv(userid, 500)) ORDER BY (userid) SETTINGS index_granularity = 8192; +INSERT INTO userid_test VALUES (1, 'Jack'),(2, 'Mary'),(3, 'Mary'),(4, 'John'),(5, 'Mary'); + +SELECT * FROM userid_test WHERE (userid, name) IN (userid_set); + +CREATE TABLE userid_set2(userid UInt64, name String, birthdate Date) ENGINE = Set; +INSERT INTO userid_set2 values (1,'John', '1990-01-01'); + +WITH 'John' AS name, toDate('1990-01-01') AS birthdate +SELECT * FROM numbers(10) +WHERE (number, name, birthdate) IN (userid_set2); + +DROP TABLE userid_set; +DROP TABLE userid_test; +DROP TABLE userid_set2; diff --git a/parser/testdata/02387_analyzer_cte/ast.json b/parser/testdata/02387_analyzer_cte/ast.json new file mode 100644 index 000000000..43f7615e3 --- /dev/null +++ b/parser/testdata/02387_analyzer_cte/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001198504, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02387_analyzer_cte/metadata.json b/parser/testdata/02387_analyzer_cte/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02387_analyzer_cte/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02387_analyzer_cte/query.sql b/parser/testdata/02387_analyzer_cte/query.sql new file mode 100644 index 000000000..95da7be86 --- /dev/null +++ b/parser/testdata/02387_analyzer_cte/query.sql @@ -0,0 +1,26 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value String +) ENGINE=MergeTree ORDER BY tuple(); + +INSERT INTO test_table VALUES (0, 'Value'); + +WITH cte_subquery AS (SELECT 1) SELECT * FROM cte_subquery; + +SELECT '--'; + +WITH cte_subquery AS (SELECT * FROM test_table) SELECT * FROM cte_subquery; + +SELECT '--'; + +WITH cte_subquery AS (SELECT 1 UNION DISTINCT SELECT 1) SELECT * FROM cte_subquery; + +SELECT '--'; + +WITH cte_subquery AS (SELECT * FROM test_table UNION DISTINCT SELECT * FROM test_table) SELECT * FROM cte_subquery; + +DROP TABLE test_table; diff --git a/parser/testdata/02387_parse_date_as_datetime/ast.json b/parser/testdata/02387_parse_date_as_datetime/ast.json new file mode 100644 index 000000000..0f9f6b521 --- /dev/null +++ b/parser/testdata/02387_parse_date_as_datetime/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery test (children 2)" + }, + { + "explain": " Identifier test" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration i (children 1)" + }, + { + "explain": " DataType Int64" + }, + { + "explain": " ColumnDeclaration d (children 1)" + }, + { + "explain": " DataType DateTime" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001028645, + "rows_read": 8, + "bytes_read": 279 + } +} diff --git a/parser/testdata/02387_parse_date_as_datetime/metadata.json b/parser/testdata/02387_parse_date_as_datetime/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02387_parse_date_as_datetime/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02387_parse_date_as_datetime/query.sql b/parser/testdata/02387_parse_date_as_datetime/query.sql new file mode 100644 index 000000000..24d367e56 --- /dev/null +++ b/parser/testdata/02387_parse_date_as_datetime/query.sql @@ -0,0 +1,19 @@ +CREATE TEMPORARY TABLE test (`i` Int64, `d` DateTime); + +INSERT INTO test FORMAT JSONEachRow {"i": 123, "d": "2022-05-03"}; + +INSERT INTO test FORMAT JSONEachRow {"i": 456, "d": "2022-05-03 01:02:03"}; + +SELECT * FROM test ORDER BY i; + +DROP TABLE test; + +CREATE TEMPORARY TABLE test (`i` Int64, `d` DateTime64); + +INSERT INTO test FORMAT JSONEachRow {"i": 123, "d": "2022-05-03"}; + +INSERT INTO test FORMAT JSONEachRow {"i": 456, "d": "2022-05-03 01:02:03"}; + +SELECT * FROM test ORDER BY i; + +DROP TABLE test; diff --git a/parser/testdata/02388_analyzer_recursive_lambda/ast.json b/parser/testdata/02388_analyzer_recursive_lambda/ast.json new file mode 100644 index 000000000..a4436e65c --- /dev/null +++ b/parser/testdata/02388_analyzer_recursive_lambda/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000948919, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02388_analyzer_recursive_lambda/metadata.json b/parser/testdata/02388_analyzer_recursive_lambda/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02388_analyzer_recursive_lambda/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02388_analyzer_recursive_lambda/query.sql b/parser/testdata/02388_analyzer_recursive_lambda/query.sql new file mode 100644 index 000000000..31d6f91a3 --- /dev/null +++ b/parser/testdata/02388_analyzer_recursive_lambda/query.sql @@ -0,0 +1,5 @@ +SET enable_analyzer = 1; + +WITH x -> plus(lambda(1), x) AS lambda SELECT lambda(1048576); -- { serverError UNSUPPORTED_METHOD }; + +WITH lambda(lambda(plus(x, x, -1)), tuple(x), x + 2147483646) AS lambda, x -> plus(lambda(1), x, 2) AS lambda SELECT 1048576, lambda(1048576); -- { serverError UNSUPPORTED_METHOD }; diff --git a/parser/testdata/02388_conversion_from_string_with_datetime64_to_date_and_date32/ast.json b/parser/testdata/02388_conversion_from_string_with_datetime64_to_date_and_date32/ast.json new file mode 100644 index 000000000..ed4ad68b2 --- /dev/null +++ b/parser/testdata/02388_conversion_from_string_with_datetime64_to_date_and_date32/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDate (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '2022-08-22 01:02:03'" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001215042, + "rows_read": 7, + "bytes_read": 272 + } +} diff --git a/parser/testdata/02388_conversion_from_string_with_datetime64_to_date_and_date32/metadata.json b/parser/testdata/02388_conversion_from_string_with_datetime64_to_date_and_date32/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02388_conversion_from_string_with_datetime64_to_date_and_date32/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02388_conversion_from_string_with_datetime64_to_date_and_date32/query.sql b/parser/testdata/02388_conversion_from_string_with_datetime64_to_date_and_date32/query.sql new file mode 100644 index 000000000..4fa2b024d --- /dev/null +++ b/parser/testdata/02388_conversion_from_string_with_datetime64_to_date_and_date32/query.sql @@ -0,0 +1,33 @@ +SELECT toDate('2022-08-22 01:02:03'); +SELECT toDate32('2022-08-22 01:02:03'); + +SELECT toDate('2022-08-22 01:02:03.1'); +SELECT toDate32('2022-08-22 01:02:03.1'); + +SELECT toDate('2022-08-22 01:02:03.123456'); +SELECT toDate32('2022-08-22 01:02:03.123456'); + +SELECT toDate('2022-08-22T01:02:03'); +SELECT toDate32('2022-08-22T01:02:03'); + +SELECT toDate('2022-08-22T01:02:03.1'); +SELECT toDate32('2022-08-22T01:02:03.1'); + +SELECT toDate('2022-08-22T01:02:03.123456'); +SELECT toDate32('2022-08-22T01:02:03.123456'); + + +SELECT toDate('2022-08-22+01:02:03'); -- { serverError CANNOT_PARSE_TEXT } +SELECT toDate32('2022-08-22+01:02:03'); -- { serverError CANNOT_PARSE_TEXT } + +SELECT toDate('2022-08-22 01:02:0'); -- { serverError CANNOT_PARSE_TEXT } +SELECT toDate32('2022-08-22 01:02:0'); -- { serverError CANNOT_PARSE_TEXT } + +SELECT toDate('2022-08-22 01:02:03.'); -- { serverError CANNOT_PARSE_TEXT } +SELECT toDate32('2022-08-22 01:02:03.'); -- { serverError CANNOT_PARSE_TEXT } + +SELECT toDate('2022-08-22 01:02:03.111a'); -- { serverError CANNOT_PARSE_TEXT } +SELECT toDate32('2022-08-22 01:02:03.2b'); -- { serverError CANNOT_PARSE_TEXT } + +SELECT toDate('2022-08-22 01:02:03.a'); -- { serverError CANNOT_PARSE_TEXT } +SELECT toDate32('2022-08-22 01:02:03.b'); -- { serverError CANNOT_PARSE_TEXT } diff --git a/parser/testdata/02389_analyzer_nested_lambda/ast.json b/parser/testdata/02389_analyzer_nested_lambda/ast.json new file mode 100644 index 000000000..174e751d0 --- /dev/null +++ b/parser/testdata/02389_analyzer_nested_lambda/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001527776, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02389_analyzer_nested_lambda/metadata.json b/parser/testdata/02389_analyzer_nested_lambda/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02389_analyzer_nested_lambda/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02389_analyzer_nested_lambda/query.sql b/parser/testdata/02389_analyzer_nested_lambda/query.sql new file mode 100644 index 000000000..a0a120b12 --- /dev/null +++ b/parser/testdata/02389_analyzer_nested_lambda/query.sql @@ -0,0 +1,141 @@ +SET enable_analyzer = 1; +SET max_execution_time = 300; + +-- { echoOn } + +SELECT arrayMap(x -> x + arrayMap(x -> x + 1, [1])[1], [1,2,3]); + +SELECT '--'; + +SELECT arrayMap(x -> x + arrayMap(x -> 5, [1])[1], [1,2,3]); + +SELECT '--'; + +SELECT 5 AS constant, arrayMap(x -> x + arrayMap(x -> constant, [1])[1], [1,2,3]); + +SELECT '--'; + +SELECT arrayMap(x -> x + arrayMap(x -> x, [1])[1], [1,2,3]); + +SELECT '--'; + +SELECT arrayMap(x -> x + arrayMap(y -> x + y, [1])[1], [1,2,3]); + +SELECT '--'; + +SELECT arrayMap(x -> x + arrayMap(x -> (SELECT 5), [1])[1], [1,2,3]); + +SELECT '--'; + +SELECT (SELECT 5) AS subquery, arrayMap(x -> x + arrayMap(x -> subquery, [1])[1], [1,2,3]); + +SELECT '--'; + +SELECT arrayMap(x -> x + arrayMap(x -> (SELECT 5 UNION DISTINCT SELECT 5), [1])[1], [1,2,3]); + +SELECT '--'; + +SELECT (SELECT 5 UNION DISTINCT SELECT 5) AS subquery, arrayMap(x -> x + arrayMap(x -> subquery, [1])[1], [1,2,3]); + +SELECT '--'; + +WITH x -> toString(x) AS lambda SELECT arrayMap(x -> lambda(x), [1,2,3]); + +SELECT '--'; + +WITH x -> toString(x) AS lambda SELECT arrayMap(x -> arrayMap(y -> concat(lambda(x), '_', lambda(y)), [1,2,3]), [1,2,3]); + +SELECT '--'; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value String +) ENGINE=TinyLog; + +INSERT INTO test_table VALUES (0, 'Value'); + +SELECT arrayMap(x -> x + arrayMap(x -> id, [1])[1], [1,2,3]) FROM test_table; + +SELECT '--'; + +SELECT arrayMap(x -> x + arrayMap(x -> x + id, [1])[1], [1,2,3]) FROM test_table; + +SELECT '--'; + +SELECT arrayMap(x -> x + arrayMap(y -> x + y + id, [1])[1], [1,2,3]) FROM test_table; + +SELECT '--'; + +SELECT id AS id_alias, arrayMap(x -> x + arrayMap(y -> x + y + id_alias, [1])[1], [1,2,3]) FROM test_table; + +SELECT '--'; + +SELECT arrayMap(x -> x + arrayMap(x -> 5, [1])[1], [1,2,3]) FROM test_table; + +SELECT '--'; + +SELECT 5 AS constant, arrayMap(x -> x + arrayMap(x -> constant, [1])[1], [1,2,3]) FROM test_table; + +SELECT '--'; + +SELECT 5 AS constant, arrayMap(x -> x + arrayMap(x -> x + constant, [1])[1], [1,2,3]) FROM test_table; + +SELECT '--'; + +SELECT 5 AS constant, arrayMap(x -> x + arrayMap(x -> x + id + constant, [1])[1], [1,2,3]) FROM test_table; + +SELECT '--'; + +SELECT 5 AS constant, arrayMap(x -> x + arrayMap(y -> x + y + id + constant, [1])[1], [1,2,3]) FROM test_table; + +SELECT '--'; + +SELECT arrayMap(x -> x + arrayMap(x -> id + (SELECT id FROM test_table), [1])[1], [1,2,3]) FROM test_table; + +SELECT '--'; + +SELECT arrayMap(x -> id + arrayMap(x -> id + (SELECT id FROM test_table), [1])[1], [1,2,3]) FROM test_table; + +SELECT '--'; + +SELECT arrayMap(x -> id + arrayMap(x -> id + (SELECT id FROM test_table UNION DISTINCT SELECT id FROM test_table), [1])[1], [1,2,3]) FROM test_table; + +SELECT '--'; + +WITH x -> toString(id) AS lambda SELECT arrayMap(x -> lambda(x), [1,2,3]) FROM test_table; + +SELECT '--'; + +WITH x -> toString(id) AS lambda SELECT arrayMap(x -> arrayMap(y -> lambda(y), [1,2,3]), [1,2,3]) FROM test_table; + +SELECT '--'; + +WITH x -> toString(id) AS lambda SELECT arrayMap(x -> arrayMap(y -> concat(lambda(x), '_', lambda(y)), [1,2,3]), [1,2,3]) FROM test_table; + +SELECT '--'; + +SELECT arrayMap(x -> concat(concat(concat(concat(concat(toString(id), '___\0_______\0____'), toString(id), concat(concat(toString(id), ''), toString(id)), toString(id)), + arrayMap(x -> concat(concat(concat(concat(toString(id), ''), toString(id)), toString(id), '___\0_______\0____'), toString(id)) AS lambda, [NULL, inf, 1, 1]), + concat(toString(id), NULL), toString(id)), toString(id))) AS lambda, [NULL, NULL, 2147483647]) +FROM test_table WHERE concat(concat(concat(toString(id), '___\0_______\0____'), toString(id)), concat(toString(id), NULL), toString(id)); + +SELECT '--'; + +SELECT arrayMap(x -> splitByChar(toString(id), arrayMap(x -> toString(1), [NULL])), [NULL]) FROM test_table; -- { serverError ILLEGAL_COLUMN }; + +DROP TABLE test_table; + +-- { echoOff } + +SELECT + groupArray(number) AS counts, + arraySum(arrayMap(x -> (x + 1), counts)) as hello, + arrayMap(x -> (x / hello), counts) AS res +FROM numbers(1000000) FORMAT Null; + +SELECT + arrayWithConstant(pow(10,5), 1) AS nums, + arrayMap(x -> x, nums) AS m, + arrayMap(x -> x + arraySum(m), m) AS res FORMAT Null; diff --git a/parser/testdata/02391_hashed_dictionary_shards/ast.json b/parser/testdata/02391_hashed_dictionary_shards/ast.json new file mode 100644 index 000000000..185d4b455 --- /dev/null +++ b/parser/testdata/02391_hashed_dictionary_shards/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_table (children 1)" + }, + { + "explain": " Identifier test_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000999347, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/02391_hashed_dictionary_shards/metadata.json b/parser/testdata/02391_hashed_dictionary_shards/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02391_hashed_dictionary_shards/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02391_hashed_dictionary_shards/query.sql b/parser/testdata/02391_hashed_dictionary_shards/query.sql new file mode 100644 index 000000000..018f6b2cf --- /dev/null +++ b/parser/testdata/02391_hashed_dictionary_shards/query.sql @@ -0,0 +1,113 @@ +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + key UInt64, + value UInt16 +) ENGINE=Memory() AS SELECT number, number FROM numbers(1e5); + +DROP TABLE IF EXISTS test_table_nullable; +CREATE TABLE test_table_nullable +( + key UInt64, + value Nullable(UInt16) +) ENGINE=Memory() AS SELECT number, number % 2 == 0 ? NULL : number FROM numbers(1e5); + +DROP TABLE IF EXISTS test_table_string; +CREATE TABLE test_table_string +( + key String, + value UInt16 +) ENGINE=Memory() AS SELECT 'foo' || number::String, number FROM numbers(1e5); + +DROP TABLE IF EXISTS test_table_complex; +CREATE TABLE test_table_complex +( + key_1 UInt64, + key_2 UInt64, + value UInt16 +) ENGINE=Memory() AS SELECT number, number, number FROM numbers(1e5); + +DROP DICTIONARY IF EXISTS test_dictionary_10_shards; +CREATE DICTIONARY test_dictionary_10_shards +( + key UInt64, + value UInt16 +) PRIMARY KEY key +SOURCE(CLICKHOUSE(TABLE test_table)) +LAYOUT(SPARSE_HASHED(SHARDS 10)) +LIFETIME(0); + +SHOW CREATE test_dictionary_10_shards; +SYSTEM RELOAD DICTIONARY test_dictionary_10_shards; +SELECT element_count FROM system.dictionaries WHERE database = currentDatabase() AND name = 'test_dictionary_10_shards'; +SELECT count() FROM test_table WHERE dictGet('test_dictionary_10_shards', 'value', key) != value; + +DROP DICTIONARY test_dictionary_10_shards; + +DROP DICTIONARY IF EXISTS test_dictionary_10_shards_nullable; +CREATE DICTIONARY test_dictionary_10_shards_nullable +( + key UInt64, + value Nullable(UInt16) +) PRIMARY KEY key +SOURCE(CLICKHOUSE(TABLE test_table_nullable)) +LAYOUT(SPARSE_HASHED(SHARDS 10)) +LIFETIME(0); + +SHOW CREATE test_dictionary_10_shards_nullable; +SYSTEM RELOAD DICTIONARY test_dictionary_10_shards_nullable; +SELECT element_count FROM system.dictionaries WHERE database = currentDatabase() AND name = 'test_dictionary_10_shards_nullable'; +SELECT count() FROM test_table_nullable WHERE dictGet('test_dictionary_10_shards_nullable', 'value', key) != value; + +DROP DICTIONARY test_dictionary_10_shards_nullable; + +DROP DICTIONARY IF EXISTS test_complex_dictionary_10_shards; +CREATE DICTIONARY test_complex_dictionary_10_shards +( + key_1 UInt64, + key_2 UInt64, + value UInt16 +) PRIMARY KEY key_1, key_2 +SOURCE(CLICKHOUSE(TABLE test_table_complex)) +LAYOUT(COMPLEX_KEY_SPARSE_HASHED(SHARDS 10)) +LIFETIME(0); + +SYSTEM RELOAD DICTIONARY test_complex_dictionary_10_shards; +SHOW CREATE test_complex_dictionary_10_shards; +SELECT element_count FROM system.dictionaries WHERE database = currentDatabase() and name = 'test_complex_dictionary_10_shards'; +SELECT count() FROM test_table_complex WHERE dictGet('test_complex_dictionary_10_shards', 'value', (key_1, key_2)) != value; + +DROP DICTIONARY test_complex_dictionary_10_shards; + +DROP DICTIONARY IF EXISTS test_dictionary_10_shards_string; +CREATE DICTIONARY test_dictionary_10_shards_string +( + key String, + value UInt16 +) PRIMARY KEY key +SOURCE(CLICKHOUSE(TABLE test_table_string)) +LAYOUT(SPARSE_HASHED(SHARDS 10)) +LIFETIME(0); + +SYSTEM RELOAD DICTIONARY test_dictionary_10_shards_string; + +DROP DICTIONARY test_dictionary_10_shards_string; + +DROP DICTIONARY IF EXISTS test_dictionary_10_shards_incremental; +CREATE DICTIONARY test_dictionary_10_shards_incremental +( + key UInt64, + value UInt16 +) PRIMARY KEY key +SOURCE(CLICKHOUSE(TABLE test_table_last_access UPDATE_FIELD last_access)) +LAYOUT(SPARSE_HASHED(SHARDS 10)) +LIFETIME(0); + +SYSTEM RELOAD DICTIONARY test_dictionary_10_shards_incremental; -- { serverError BAD_ARGUMENTS } + +DROP DICTIONARY test_dictionary_10_shards_incremental; + +DROP TABLE test_table; +DROP TABLE test_table_nullable; +DROP TABLE test_table_string; +DROP TABLE test_table_complex; diff --git a/parser/testdata/02391_recursive_buffer/ast.json b/parser/testdata/02391_recursive_buffer/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02391_recursive_buffer/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02391_recursive_buffer/metadata.json b/parser/testdata/02391_recursive_buffer/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02391_recursive_buffer/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02391_recursive_buffer/query.sql b/parser/testdata/02391_recursive_buffer/query.sql new file mode 100644 index 000000000..aff5373e2 --- /dev/null +++ b/parser/testdata/02391_recursive_buffer/query.sql @@ -0,0 +1,14 @@ +-- Tags: no-parallel +-- because of system.tables poisoning + +DROP TABLE IF EXISTS test; +CREATE TABLE test (key UInt32) Engine = Buffer(currentDatabase(), test, 16, 10, 100, 10000, 1000000, 10000000, 100000000); +SELECT * FROM test; -- { serverError INFINITE_LOOP } +SELECT * FROM system.tables WHERE table = 'test' AND database = currentDatabase() FORMAT Null; +DROP TABLE test; + +DROP TABLE IF EXISTS test1; +DROP TABLE IF EXISTS test2; +CREATE TABLE test1 (key UInt32) Engine = Buffer(currentDatabase(), test2, 16, 10, 100, 10000, 1000000, 10000000, 100000000); +CREATE TABLE test2 (key UInt32) Engine = Buffer(currentDatabase(), test1, 16, 10, 100, 10000, 1000000, 10000000, 100000000); -- { serverError INFINITE_LOOP } +DROP TABLE test1; diff --git a/parser/testdata/02392_every_setting_must_have_documentation/ast.json b/parser/testdata/02392_every_setting_must_have_documentation/ast.json new file mode 100644 index 000000000..6fe3d5eed --- /dev/null +++ b/parser/testdata/02392_every_setting_must_have_documentation/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier name" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.settings" + }, + { + "explain": " Function less (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function length (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier description" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001421981, + "rows_read": 15, + "bytes_read": 581 + } +} diff --git a/parser/testdata/02392_every_setting_must_have_documentation/metadata.json b/parser/testdata/02392_every_setting_must_have_documentation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02392_every_setting_must_have_documentation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02392_every_setting_must_have_documentation/query.sql b/parser/testdata/02392_every_setting_must_have_documentation/query.sql new file mode 100644 index 000000000..87c6ccaa0 --- /dev/null +++ b/parser/testdata/02392_every_setting_must_have_documentation/query.sql @@ -0,0 +1 @@ +SELECT name FROM system.settings WHERE length(description) < 10; diff --git a/parser/testdata/02393_every_metric_must_have_documentation/ast.json b/parser/testdata/02393_every_metric_must_have_documentation/ast.json new file mode 100644 index 000000000..6f6282deb --- /dev/null +++ b/parser/testdata/02393_every_metric_must_have_documentation/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001213665, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02393_every_metric_must_have_documentation/metadata.json b/parser/testdata/02393_every_metric_must_have_documentation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02393_every_metric_must_have_documentation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02393_every_metric_must_have_documentation/query.sql b/parser/testdata/02393_every_metric_must_have_documentation/query.sql new file mode 100644 index 000000000..cf98b6b2c --- /dev/null +++ b/parser/testdata/02393_every_metric_must_have_documentation/query.sql @@ -0,0 +1,2 @@ +SET system_events_show_zero_values = true; +SELECT metric FROM system.metrics WHERE length(description) < 10; diff --git a/parser/testdata/02394_every_profile_event_must_have_documentation/ast.json b/parser/testdata/02394_every_profile_event_must_have_documentation/ast.json new file mode 100644 index 000000000..cc74074c7 --- /dev/null +++ b/parser/testdata/02394_every_profile_event_must_have_documentation/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001103444, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02394_every_profile_event_must_have_documentation/metadata.json b/parser/testdata/02394_every_profile_event_must_have_documentation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02394_every_profile_event_must_have_documentation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02394_every_profile_event_must_have_documentation/query.sql b/parser/testdata/02394_every_profile_event_must_have_documentation/query.sql new file mode 100644 index 000000000..1dad6c432 --- /dev/null +++ b/parser/testdata/02394_every_profile_event_must_have_documentation/query.sql @@ -0,0 +1,2 @@ +SET system_events_show_zero_values = true; +SELECT event FROM system.events WHERE length(description) < 10; diff --git a/parser/testdata/02395_every_merge_tree_setting_must_have_documentation/ast.json b/parser/testdata/02395_every_merge_tree_setting_must_have_documentation/ast.json new file mode 100644 index 000000000..72e5101d5 --- /dev/null +++ b/parser/testdata/02395_every_merge_tree_setting_must_have_documentation/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier name" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.merge_tree_settings" + }, + { + "explain": " Function less (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function length (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier description" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001092263, + "rows_read": 15, + "bytes_read": 592 + } +} diff --git a/parser/testdata/02395_every_merge_tree_setting_must_have_documentation/metadata.json b/parser/testdata/02395_every_merge_tree_setting_must_have_documentation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02395_every_merge_tree_setting_must_have_documentation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02395_every_merge_tree_setting_must_have_documentation/query.sql b/parser/testdata/02395_every_merge_tree_setting_must_have_documentation/query.sql new file mode 100644 index 000000000..6a7d4ad51 --- /dev/null +++ b/parser/testdata/02395_every_merge_tree_setting_must_have_documentation/query.sql @@ -0,0 +1 @@ +SELECT name FROM system.merge_tree_settings WHERE length(description) < 10; diff --git a/parser/testdata/02398_subquery_where_pushdown_and_limit_offset/ast.json b/parser/testdata/02398_subquery_where_pushdown_and_limit_offset/ast.json new file mode 100644 index 000000000..4c455014c --- /dev/null +++ b/parser/testdata/02398_subquery_where_pushdown_and_limit_offset/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000946047, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/02398_subquery_where_pushdown_and_limit_offset/metadata.json b/parser/testdata/02398_subquery_where_pushdown_and_limit_offset/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02398_subquery_where_pushdown_and_limit_offset/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02398_subquery_where_pushdown_and_limit_offset/query.sql b/parser/testdata/02398_subquery_where_pushdown_and_limit_offset/query.sql new file mode 100644 index 000000000..d63c04ec7 --- /dev/null +++ b/parser/testdata/02398_subquery_where_pushdown_and_limit_offset/query.sql @@ -0,0 +1,11 @@ +drop table if exists t; +create table t engine=Log as select * from system.numbers limit 20; + +set enable_optimize_predicate_expression=1; +select number from (select number from t order by number desc offset 3) where number < 18; +explain syntax select number from (select number from t order by number desc offset 3) where number < 18; + +select number from (select number from t order by number limit 5) where number % 2; +explain syntax select number from (select number from t order by number limit 5) where number % 2; + +drop table t; diff --git a/parser/testdata/02399_merge_tree_mutate_in_partition/ast.json b/parser/testdata/02399_merge_tree_mutate_in_partition/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02399_merge_tree_mutate_in_partition/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02399_merge_tree_mutate_in_partition/metadata.json b/parser/testdata/02399_merge_tree_mutate_in_partition/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02399_merge_tree_mutate_in_partition/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02399_merge_tree_mutate_in_partition/query.sql b/parser/testdata/02399_merge_tree_mutate_in_partition/query.sql new file mode 100644 index 000000000..c56acf9c3 --- /dev/null +++ b/parser/testdata/02399_merge_tree_mutate_in_partition/query.sql @@ -0,0 +1,18 @@ + +drop table if exists mt; +drop table if exists m; + +create table mt (p int, n int) engine=MergeTree order by tuple() partition by p; +create table m (n int) engine=Memory; +insert into mt values (1, 1), (2, 1); +insert into mt values (1, 2), (2, 2); +select *, _part from mt order by _part; + +alter table mt update n = n + (n not in m) in partition id '1' where 1 settings mutations_sync=1; +drop table m; +optimize table mt final; + +select mutation_id, command, parts_to_do_names, parts_to_do, is_done from system.mutations where database=currentDatabase(); +select * from mt order by p, n; + +drop table mt; diff --git a/parser/testdata/02400_create_table_on_cluster_normalization/ast.json b/parser/testdata/02400_create_table_on_cluster_normalization/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02400_create_table_on_cluster_normalization/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02400_create_table_on_cluster_normalization/metadata.json b/parser/testdata/02400_create_table_on_cluster_normalization/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02400_create_table_on_cluster_normalization/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02400_create_table_on_cluster_normalization/query.sql b/parser/testdata/02400_create_table_on_cluster_normalization/query.sql new file mode 100644 index 000000000..85831a21b --- /dev/null +++ b/parser/testdata/02400_create_table_on_cluster_normalization/query.sql @@ -0,0 +1,26 @@ +-- Tags: no-replicated-database +-- Tag no-replicated-database: ON CLUSTER is not allowed +drop table if exists local_t_l5ydey; + +create table local_t_l5ydey on cluster test_shard_localhost ( + c_qv5rv INTEGER , + c_rutjs4 INTEGER , + c_wmj INTEGER , + c_m3 TEXT NOT NULL, + primary key(c_qv5rv) +) engine=ReplicatedMergeTree('/clickhouse/tables/test_' || currentDatabase() || '/{shard}/local_t_l5ydey', '{replica}'); + +create table t_l5ydey on cluster test_shard_localhost as local_t_l5ydey + engine=Distributed('test_shard_localhost', currentDatabase(),'local_t_l5ydey', rand()); + +insert into local_t_l5ydey values (1, 2, 3, '4'); +insert into t_l5ydey values (5, 6, 7, '8'); +system flush distributed t_l5ydey; + +select * from t_l5ydey order by c_qv5rv; +show create t_l5ydey; + +-- Correct error code if creating database with the same path as table has +create database local_t_l5ydey engine=Replicated('/clickhouse/tables/test_' || currentDatabase() || '/{shard}/local_t_l5ydey', '1', '1'); -- { serverError BAD_ARGUMENTS } + +drop table local_t_l5ydey; diff --git a/parser/testdata/02400_memory_accounting_on_error/ast.json b/parser/testdata/02400_memory_accounting_on_error/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02400_memory_accounting_on_error/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02400_memory_accounting_on_error/metadata.json b/parser/testdata/02400_memory_accounting_on_error/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02400_memory_accounting_on_error/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02400_memory_accounting_on_error/query.sql b/parser/testdata/02400_memory_accounting_on_error/query.sql new file mode 100644 index 000000000..60c08cf0d --- /dev/null +++ b/parser/testdata/02400_memory_accounting_on_error/query.sql @@ -0,0 +1,4 @@ +-- max_block_size to avoid randomization +SELECT * FROM generateRandom('i Array(Int8)', 1, 1, 1048577) LIMIT 65536 SETTINGS max_memory_usage='1Gi', max_block_size=65505, log_queries=1; -- { serverError MEMORY_LIMIT_EXCEEDED } +SYSTEM FLUSH LOGS query_log; +SELECT * FROM system.query_log WHERE event_date >= yesterday() AND current_database = currentDatabase() AND memory_usage > 100e6 FORMAT JSONEachRow; diff --git a/parser/testdata/02401_merge_tree_old_tmp_dirs_cleanup/ast.json b/parser/testdata/02401_merge_tree_old_tmp_dirs_cleanup/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02401_merge_tree_old_tmp_dirs_cleanup/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02401_merge_tree_old_tmp_dirs_cleanup/metadata.json b/parser/testdata/02401_merge_tree_old_tmp_dirs_cleanup/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02401_merge_tree_old_tmp_dirs_cleanup/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02401_merge_tree_old_tmp_dirs_cleanup/query.sql b/parser/testdata/02401_merge_tree_old_tmp_dirs_cleanup/query.sql new file mode 100644 index 000000000..01e054504 --- /dev/null +++ b/parser/testdata/02401_merge_tree_old_tmp_dirs_cleanup/query.sql @@ -0,0 +1,13 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS test_inserts; + +CREATE TABLE test_inserts (`key` Int, `part` Int) ENGINE = MergeTree PARTITION BY part ORDER BY key +SETTINGS temporary_directories_lifetime = 0, merge_tree_clear_old_temporary_directories_interval_seconds = 0; + +INSERT INTO test_inserts SELECT sleep(1), number FROM numbers(10) +SETTINGS max_insert_delayed_streams_for_parallel_write = 100, max_insert_block_size = 1, min_insert_block_size_rows = 1; + +SELECT count(), sum(part) FROM test_inserts; + +DROP TABLE test_inserts; diff --git a/parser/testdata/02402_external_disk_metrics/ast.json b/parser/testdata/02402_external_disk_metrics/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02402_external_disk_metrics/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02402_external_disk_metrics/metadata.json b/parser/testdata/02402_external_disk_metrics/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02402_external_disk_metrics/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02402_external_disk_metrics/query.sql b/parser/testdata/02402_external_disk_metrics/query.sql new file mode 100644 index 000000000..b4a9b7240 --- /dev/null +++ b/parser/testdata/02402_external_disk_metrics/query.sql @@ -0,0 +1,95 @@ +-- Tags: no-parallel, no-fasttest, long, no-random-settings + +SET max_bytes_before_external_sort = 33554432; +SET max_bytes_ratio_before_external_sort = 0; +SET max_block_size = 1048576; + +SELECT number FROM (SELECT number FROM numbers(2097152)) ORDER BY number * 1234567890123456789 LIMIT 2097142, 10 +SETTINGS log_comment='02402_external_disk_mertrics/sort' +FORMAT Null; + +SET max_bytes_before_external_group_by = '100M'; +SET max_bytes_ratio_before_external_group_by = 0; +SET max_memory_usage = '410M'; +SET group_by_two_level_threshold = '100K'; +SET group_by_two_level_threshold_bytes = '50M'; + +SELECT sum(k), sum(c) FROM (SELECT number AS k, sum(number) AS c FROM (SELECT * FROM system.numbers LIMIT 2097152) GROUP BY k) +SETTINGS log_comment='02402_external_disk_mertrics/aggregation' +FORMAT Null; + +SET join_algorithm = 'partial_merge'; +SET default_max_bytes_in_join = 0; +SET max_bytes_in_join = 10000000; + +SELECT n, j * 2097152 FROM +(SELECT number * 200000 as n FROM numbers(5)) nums +ANY LEFT JOIN ( SELECT number * 2 AS n, number AS j FROM numbers(1000000) ) js2 +USING n +ORDER BY n +SETTINGS log_comment='02402_external_disk_mertrics/join' +FORMAT Null; + +SYSTEM FLUSH LOGS query_log; + +SELECT + if( + any(ProfileEvents['ExternalProcessingFilesTotal']) >= 1 AND + any(ProfileEvents['ExternalProcessingCompressedBytesTotal']) >= 100000 AND + any(ProfileEvents['ExternalProcessingUncompressedBytesTotal']) >= 100000 AND + any(ProfileEvents['ExternalSortWritePart']) >= 1 AND + any(ProfileEvents['ExternalSortMerge']) >= 1 AND + any(ProfileEvents['ExternalSortCompressedBytes']) >= 100000 AND + any(ProfileEvents['ExternalSortUncompressedBytes']) >= 100000 AND + count() == 1, + 'ok', + 'fail: ' || toString(count()) || ' ' || toString(any(ProfileEvents)) + ) + FROM system.query_log WHERE current_database = currentDatabase() + AND log_comment = '02402_external_disk_mertrics/sort' + AND query ILIKE 'SELECT%2097152%' AND type = 'QueryFinish'; + +SELECT + if( + any(ProfileEvents['ExternalProcessingFilesTotal']) >= 1 AND + any(ProfileEvents['ExternalProcessingCompressedBytesTotal']) >= 100000 AND + any(ProfileEvents['ExternalProcessingUncompressedBytesTotal']) >= 100000 AND + any(ProfileEvents['ExternalAggregationWritePart']) >= 1 AND + any(ProfileEvents['ExternalAggregationMerge']) >= 1 AND + any(ProfileEvents['ExternalAggregationCompressedBytes']) >= 100000 AND + any(ProfileEvents['ExternalAggregationUncompressedBytes']) >= 100000 AND + count() == 1, + 'ok', + 'fail: ' || toString(count()) || ' ' || toString(any(ProfileEvents)) + ) + FROM system.query_log WHERE current_database = currentDatabase() + AND log_comment = '02402_external_disk_mertrics/aggregation' + AND query ILIKE 'SELECT%2097152%' AND type = 'QueryFinish'; + +SELECT + if( + any(ProfileEvents['ExternalProcessingFilesTotal']) >= 1 AND + any(ProfileEvents['ExternalProcessingCompressedBytesTotal']) >= 100000 AND + any(ProfileEvents['ExternalProcessingUncompressedBytesTotal']) >= 100000 AND + any(ProfileEvents['ExternalJoinWritePart']) >= 1 AND + any(ProfileEvents['ExternalJoinMerge']) >= 0 AND + any(ProfileEvents['ExternalJoinCompressedBytes']) >= 100000 AND + any(ProfileEvents['ExternalJoinUncompressedBytes']) >= 100000 AND + count() == 1, + 'ok', + 'fail: ' || toString(count()) || ' ' || toString(any(ProfileEvents)) + ) + FROM system.query_log + WHERE current_database = currentDatabase() + AND log_comment = '02402_external_disk_mertrics/join' + AND query ILIKE 'SELECT%2097152%' AND type = 'QueryFinish'; + +-- Do not check values because they can be not recorded, just existence +SYSTEM FLUSH LOGS metric_log; +SELECT + CurrentMetric_TemporaryFilesForAggregation, + CurrentMetric_TemporaryFilesForJoin, + CurrentMetric_TemporaryFilesForSort +FROM system.metric_log +ORDER BY event_time DESC LIMIT 5 +FORMAT Null; diff --git a/parser/testdata/02402_merge_engine_with_view/ast.json b/parser/testdata/02402_merge_engine_with_view/ast.json new file mode 100644 index 000000000..b95786643 --- /dev/null +++ b/parser/testdata/02402_merge_engine_with_view/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00103498, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02402_merge_engine_with_view/metadata.json b/parser/testdata/02402_merge_engine_with_view/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02402_merge_engine_with_view/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02402_merge_engine_with_view/query.sql b/parser/testdata/02402_merge_engine_with_view/query.sql new file mode 100644 index 000000000..3998f410a --- /dev/null +++ b/parser/testdata/02402_merge_engine_with_view/query.sql @@ -0,0 +1,16 @@ +SET merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.0; + +-- #40014 +CREATE TABLE m0 (id UInt64) ENGINE=MergeTree ORDER BY id SETTINGS index_granularity = 1, ratio_of_defaults_for_sparse_serialization = 1.0; +INSERT INTO m0 SELECT number FROM numbers(10); +CREATE TABLE m1 (id UInt64, s String) ENGINE=MergeTree ORDER BY id SETTINGS index_granularity = 1, ratio_of_defaults_for_sparse_serialization = 1.0; +INSERT INTO m1 SELECT number, 'boo' FROM numbers(10); +CREATE VIEW m1v AS SELECT id FROM m1; + +CREATE TABLE m2 (id UInt64) ENGINE=Merge(currentDatabase(),'m0|m1v'); + +SELECT * FROM m2 WHERE id > 1 AND id < 5 ORDER BY id SETTINGS force_primary_key=1, max_bytes_to_read=64; + +-- #40706 +CREATE VIEW v AS SELECT 1; +SELECT 1 FROM merge(currentDatabase(), '^v$'); diff --git a/parser/testdata/02403_ttl_column_multiple_times/ast.json b/parser/testdata/02403_ttl_column_multiple_times/ast.json new file mode 100644 index 000000000..f11145ab8 --- /dev/null +++ b/parser/testdata/02403_ttl_column_multiple_times/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery ttl_table (children 1)" + }, + { + "explain": " Identifier ttl_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001648095, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/02403_ttl_column_multiple_times/metadata.json b/parser/testdata/02403_ttl_column_multiple_times/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02403_ttl_column_multiple_times/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02403_ttl_column_multiple_times/query.sql b/parser/testdata/02403_ttl_column_multiple_times/query.sql new file mode 100644 index 000000000..a1114eb15 --- /dev/null +++ b/parser/testdata/02403_ttl_column_multiple_times/query.sql @@ -0,0 +1,28 @@ +DROP TABLE IF EXISTS ttl_table; + +CREATE TABLE ttl_table +( + EventDate Date, + Longitude Float64 TTL EventDate + toIntervalWeek(2) +) +ENGINE = MergeTree() +ORDER BY EventDate +SETTINGS vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1; + +SYSTEM STOP MERGES ttl_table; + +INSERT INTO ttl_table VALUES(toDate('2020-10-01'), 144); + +SELECT * FROM ttl_table; + +SYSTEM START MERGES ttl_table; + +OPTIMIZE TABLE ttl_table FINAL; + +SELECT * FROM ttl_table; + +OPTIMIZE TABLE ttl_table FINAL; + +SELECT * FROM ttl_table; + +DROP TABLE IF EXISTS ttl_table; diff --git a/parser/testdata/02404_lightweight_delete_vertical_merge/ast.json b/parser/testdata/02404_lightweight_delete_vertical_merge/ast.json new file mode 100644 index 000000000..a29d76241 --- /dev/null +++ b/parser/testdata/02404_lightweight_delete_vertical_merge/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery lwd_test (children 1)" + }, + { + "explain": " Identifier lwd_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001109709, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/02404_lightweight_delete_vertical_merge/metadata.json b/parser/testdata/02404_lightweight_delete_vertical_merge/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02404_lightweight_delete_vertical_merge/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02404_lightweight_delete_vertical_merge/query.sql b/parser/testdata/02404_lightweight_delete_vertical_merge/query.sql new file mode 100644 index 000000000..15354731f --- /dev/null +++ b/parser/testdata/02404_lightweight_delete_vertical_merge/query.sql @@ -0,0 +1,184 @@ +DROP TABLE IF EXISTS lwd_test; + +CREATE TABLE lwd_test +( + `id` UInt64, + `value` String +) +ENGINE = MergeTree +ORDER BY id +SETTINGS + vertical_merge_algorithm_min_rows_to_activate = 1, + vertical_merge_algorithm_min_columns_to_activate = 1, + min_rows_for_wide_part = 1, + min_bytes_for_wide_part = 1, + enable_block_number_column = 0, + enable_block_offset_column = 0; + +INSERT INTO lwd_test SELECT number AS id, toString(number) AS value FROM numbers(10); + +SELECT * FROM lwd_test ORDER BY id, value; + +SELECT name, part_type +FROM system.parts +WHERE (database = currentDatabase()) AND (table = 'lwd_test') AND active +ORDER BY name; + + +SELECT name, column, type, rows +FROM system.parts_columns +WHERE (database = currentDatabase()) AND (table = 'lwd_test') AND active +ORDER BY name, column; + + + +SET mutations_sync = 0; + +-- delete some rows using LWD +DELETE FROM lwd_test WHERE (id % 3) = 0; + + +SELECT * FROM lwd_test ORDER BY id, value; + + +SELECT name, part_type +FROM system.parts +WHERE (database = currentDatabase()) AND (table = 'lwd_test') AND active +ORDER BY name; + + +SELECT name, column, type, rows +FROM system.parts_columns +WHERE (database = currentDatabase()) AND (table = 'lwd_test') AND active +ORDER BY name, column; + + +-- optimize table to physically delete the rows +OPTIMIZE TABLE lwd_test FINAL SETTINGS mutations_sync = 2; + +SELECT * FROM lwd_test ORDER BY id, value; + + +SELECT name, part_type +FROM system.parts +WHERE (database = currentDatabase()) AND (table = 'lwd_test') AND active +ORDER BY name; + + +SELECT name, column, type, rows +FROM system.parts_columns +WHERE (database = currentDatabase()) AND (table = 'lwd_test') AND active +ORDER BY name, column; + + +-- delete more rows +DELETE FROM lwd_test WHERE (id % 2) = 0; + + +SELECT * FROM lwd_test ORDER BY id, value; + + +SELECT name, part_type +FROM system.parts +WHERE (database = currentDatabase()) AND (table = 'lwd_test') AND active +ORDER BY name; + + +SELECT name, column, type, rows +FROM system.parts_columns +WHERE (database = currentDatabase()) AND (table = 'lwd_test') AND active +ORDER BY name, column; + + +-- add another part that doesn't have deleted rows +INSERT INTO lwd_test SELECT number AS id, toString(number+100) AS value FROM numbers(10); + +SELECT * FROM lwd_test ORDER BY id, value; + +SELECT name, part_type +FROM system.parts +WHERE (database = currentDatabase()) AND (table = 'lwd_test') AND active +ORDER BY name; + + +SELECT name, column, type, rows +FROM system.parts_columns +WHERE (database = currentDatabase()) AND (table = 'lwd_test') AND active +ORDER BY name, column; + + +-- optimize table to merge 2 parts together: the 1st has LDW rows and the 2nd doesn't have LWD rows +-- physically delete the rows +OPTIMIZE TABLE lwd_test FINAL SETTINGS mutations_sync = 2; + +SELECT * FROM lwd_test ORDER BY id, value; + + +SELECT name, part_type +FROM system.parts +WHERE (database = currentDatabase()) AND (table = 'lwd_test') AND active +ORDER BY name; + + +SELECT name, column, type, rows +FROM system.parts_columns +WHERE (database = currentDatabase()) AND (table = 'lwd_test') AND active +ORDER BY name, column; + + +-- add another part that doesn't have deleted rows +INSERT INTO lwd_test SELECT number AS id, toString(number+200) AS value FROM numbers(10); + +SELECT * FROM lwd_test ORDER BY id, value; + +SELECT name, part_type +FROM system.parts +WHERE (database = currentDatabase()) AND (table = 'lwd_test') AND active +ORDER BY name; + + +SELECT name, column, type, rows +FROM system.parts_columns +WHERE (database = currentDatabase()) AND (table = 'lwd_test') AND active +ORDER BY name, column; + + +-- delete more rows +DELETE FROM lwd_test WHERE (id % 3) = 2; + + +SELECT * FROM lwd_test ORDER BY id, value; + + +SELECT name, part_type +FROM system.parts +WHERE (database = currentDatabase()) AND (table = 'lwd_test') AND active +ORDER BY name; + + +SELECT name, column, type, rows +FROM system.parts_columns +WHERE (database = currentDatabase()) AND (table = 'lwd_test') AND active +ORDER BY name, column; + + +-- optimize table to merge 2 parts together, both of them have LWD rows +-- physically delete the rows +OPTIMIZE TABLE lwd_test FINAL SETTINGS mutations_sync = 2; + +SELECT * FROM lwd_test ORDER BY id, value; + + +SELECT name, part_type +FROM system.parts +WHERE (database = currentDatabase()) AND (table = 'lwd_test') AND active +ORDER BY name; + + +SELECT name, column, type, rows +FROM system.parts_columns +WHERE (database = currentDatabase()) AND (table = 'lwd_test') AND active +ORDER BY name, column; + + +DROP TABLE lwd_test; diff --git a/parser/testdata/02404_memory_bound_merging/ast.json b/parser/testdata/02404_memory_bound_merging/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02404_memory_bound_merging/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02404_memory_bound_merging/metadata.json b/parser/testdata/02404_memory_bound_merging/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02404_memory_bound_merging/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02404_memory_bound_merging/query.sql b/parser/testdata/02404_memory_bound_merging/query.sql new file mode 100644 index 000000000..800a89ea5 --- /dev/null +++ b/parser/testdata/02404_memory_bound_merging/query.sql @@ -0,0 +1,89 @@ +-- Tags: no-parallel, no-random-merge-tree-settings, long + +drop table if exists pr_t; +drop table if exists dist_t_different_dbs; +drop table if exists shard_1.t_different_dbs; +drop table if exists t_different_dbs; +drop table if exists dist_t; +drop table if exists t; + +set optimize_trivial_insert_select = 1; + +create table t(a UInt64, b UInt64) engine=MergeTree order by a; +system stop merges t; +insert into t select number, number from numbers_mt(1e6); + +set enable_memory_bound_merging_of_aggregation_results = 1; +set max_threads = 4; +set optimize_aggregation_in_order = 1; +set optimize_read_in_order = 1; +set prefer_localhost_replica = 1; + +-- slightly different transforms will be generated by reading steps if we let settings randomisation to change this setting value -- +set read_in_order_two_level_merge_threshold = 1000; + +create table dist_t as t engine = Distributed(test_cluster_two_shards, currentDatabase(), t, a % 2); + +-- { echoOn } -- +explain pipeline select a from remote(test_cluster_two_shards, currentDatabase(), t) group by a; + +select a from remote(test_cluster_two_shards, currentDatabase(), t) group by a order by a limit 5 offset 100500; + +explain pipeline select a from remote(test_cluster_two_shards, currentDatabase(), dist_t) group by a; + +select a from remote(test_cluster_two_shards, currentDatabase(), dist_t) group by a order by a limit 5 offset 100500; + +-- { echoOff } -- + +set aggregation_in_order_max_block_bytes = '1Mi'; +set max_block_size = 500; +-- actual block size might be slightly bigger than the limit -- +select max(bs) < 70000 from (select avg(a), max(blockSize()) as bs from remote(test_cluster_two_shards, currentDatabase(), t) group by a); + +-- beautiful case when we have different sorting key definitions in tables involved in distributed query => different plans => different sorting properties of local aggregation results -- +create database if not exists shard_1; +create table t_different_dbs(a UInt64, b UInt64) engine = MergeTree order by a; +create table shard_1.t_different_dbs(a UInt64, b UInt64) engine = MergeTree order by tuple(); + +insert into t_different_dbs select number % 1000, number % 1000 from numbers_mt(1e6); +insert into shard_1.t_different_dbs select number % 1000, number % 1000 from numbers_mt(1e6); + +create table dist_t_different_dbs as t engine = Distributed(test_cluster_two_shards_different_databases_with_local, '', t_different_dbs); + +-- { echoOn } -- +explain pipeline select a, count() from dist_t_different_dbs group by a order by a limit 5 offset 500; + +select a, count() from dist_t_different_dbs group by a order by a limit 5 offset 500; +select a, count() from dist_t_different_dbs group by a, b order by a limit 5 offset 500; + +-- { echoOff } -- + +drop table if exists pr_t; + +create table pr_t(a UInt64, b UInt64) engine=MergeTree order by a; +insert into pr_t select number % 1000, number % 1000 from numbers_mt(1e6); + +set enable_parallel_replicas = 1; +set parallel_replicas_for_non_replicated_merge_tree = 1; +set max_parallel_replicas = 3; +set cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost'; +set distributed_aggregation_memory_efficient=1; +set parallel_replicas_only_with_analyzer = 0; -- necessary for CI run with disabled analyzer + +select count() from pr_t; + +-- { echoOn } -- +explain pipeline select a from pr_t group by a order by a limit 5 offset 500 settings parallel_replicas_local_plan=0; +explain pipeline select a from pr_t group by a order by a limit 5 offset 500 SETTINGS enable_analyzer=1, parallel_replicas_local_plan=1; + +select a, count() from pr_t group by a order by a limit 5 offset 500; +select a, count() from pr_t group by a, b order by a limit 5 offset 500; + +-- { echoOff } -- + +drop table if exists pr_t; +drop table if exists dist_t_different_dbs; +drop table if exists shard_1.t_different_dbs; +drop table if exists t_different_dbs; +drop table if exists dist_t; +drop table if exists t; diff --git a/parser/testdata/02405_avro_read_nested/ast.json b/parser/testdata/02405_avro_read_nested/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02405_avro_read_nested/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02405_avro_read_nested/metadata.json b/parser/testdata/02405_avro_read_nested/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02405_avro_read_nested/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02405_avro_read_nested/query.sql b/parser/testdata/02405_avro_read_nested/query.sql new file mode 100644 index 000000000..512c48a2f --- /dev/null +++ b/parser/testdata/02405_avro_read_nested/query.sql @@ -0,0 +1,9 @@ +-- Tags: no-fasttest, no-parallel + +set flatten_nested = 1; + +insert into function file(02405_data.avro) select [(1, 'aa'), (2, 'bb')]::Nested(x UInt32, y String) as nested settings engine_file_truncate_on_insert=1; +select * from file(02405_data.avro, auto, 'nested Nested(x UInt32, y String)'); + +insert into function file(02405_data.avro) select [(1, (2, ['aa', 'bb']), [(3, 'cc'), (4, 'dd')]), (5, (6, ['ee', 'ff']), [(7, 'gg'), (8, 'hh')])]::Nested(x UInt32, y Tuple(y1 UInt32, y2 Array(String)), z Nested(z1 UInt32, z2 String)) as nested settings engine_file_truncate_on_insert=1; +select * from file(02405_data.avro, auto, 'nested Nested(x UInt32, y Tuple(y1 UInt32, y2 Array(String)), z Nested(z1 UInt32, z2 String))'); diff --git a/parser/testdata/02405_pmj_issue_40335/ast.json b/parser/testdata/02405_pmj_issue_40335/ast.json new file mode 100644 index 000000000..3cf908317 --- /dev/null +++ b/parser/testdata/02405_pmj_issue_40335/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001446924, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/02405_pmj_issue_40335/metadata.json b/parser/testdata/02405_pmj_issue_40335/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02405_pmj_issue_40335/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02405_pmj_issue_40335/query.sql b/parser/testdata/02405_pmj_issue_40335/query.sql new file mode 100644 index 000000000..e50e27b82 --- /dev/null +++ b/parser/testdata/02405_pmj_issue_40335/query.sql @@ -0,0 +1,15 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE TABLE t1 (x UInt64) ENGINE = TinyLog; +INSERT INTO t1 VALUES (1), (2), (3); + +CREATE TABLE t2 (x UInt64, value String) ENGINE = TinyLog; +INSERT INTO t2 VALUES (1, 'a'), (2, 'b'), (2, 'c'); +INSERT INTO t2 VALUES (3, 'd'), (3, 'e'), (4, 'f'); + +SET max_block_size=3; +SET max_joined_block_size_rows = 2; +SET join_algorithm='partial_merge'; + +SELECT value FROM t1 LEFT JOIN t2 ON t1.x = t2.x ORDER BY value; diff --git a/parser/testdata/02406_minmax_behaviour/ast.json b/parser/testdata/02406_minmax_behaviour/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02406_minmax_behaviour/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02406_minmax_behaviour/metadata.json b/parser/testdata/02406_minmax_behaviour/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02406_minmax_behaviour/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02406_minmax_behaviour/query.sql b/parser/testdata/02406_minmax_behaviour/query.sql new file mode 100644 index 000000000..314374a26 --- /dev/null +++ b/parser/testdata/02406_minmax_behaviour/query.sql @@ -0,0 +1,146 @@ +-- { echoOn } +SET compile_aggregate_expressions=0; + +WITH + arrayJoin([1, 2, 3, nan, 4, 5]) AS data, + arrayJoin([nan, 1, 2, 3, 4]) AS data2, + arrayJoin([1, 2, 3, 4, nan]) AS data3, + arrayJoin([nan, nan, nan]) AS data4, + arrayJoin([nan, 1, 2, 3, nan]) AS data5 +SELECT + min(data), + min(data2), + min(data3), + min(data4), + min(data5); + +WITH + arrayJoin([1, 2, 3, nan, 4, 5]) AS data, + arrayJoin([nan, 1, 2, 3, 4]) AS data2, + arrayJoin([1, 2, 3, 4, nan]) AS data3, + arrayJoin([nan, nan, nan]) AS data4, + arrayJoin([nan, 1, 2, 3, nan]) AS data5 +SELECT + max(data), + max(data2), + max(data3), + max(data4), + max(data5); + +Select max(number) from numbers(100) settings max_threads=1, max_block_size=10; +Select max(-number) from numbers(100); +Select min(number) from numbers(100) settings max_threads=1, max_block_size=10; +Select min(-number) from numbers(100); + +SELECT minIf(number, rand() % 2 == 3) from numbers(10) settings max_threads=1, max_block_size=5; +SELECT maxIf(number, rand() % 2 == 3) from numbers(10) settings max_threads=1, max_block_size=5; + +SELECT minIf(number::Float64, rand() % 2 == 3) from numbers(10) settings max_threads=1, max_block_size=5; +SELECT maxIf(number::Float64, rand() % 2 == 3) from numbers(10) settings max_threads=1, max_block_size=5; + +SELECT minIf(number::String, number < 10) as number from numbers(10, 1000); +SELECT maxIf(number::String, number < 10) as number from numbers(10, 1000); +SELECT maxIf(number::String, number % 3), maxIf(number::String, number % 5), minIf(number::String, number % 3), minIf(number::String, number > 10) from numbers(400); + +SELECT minIf(number::Nullable(String), number < 10) as number from numbers(10, 1000); +SELECT maxIf(number::Nullable(String), number < 10) as number from numbers(10, 1000); + +SELECT min(n::Nullable(String)) from (Select if(number < 15 and number % 2 == 1, number * 2, NULL) as n from numbers(10, 20)); +SELECT max(n::Nullable(String)) from (Select if(number < 15 and number % 2 == 1, number * 2, NULL) as n from numbers(10, 20)); + +SELECT max(number) from (Select if(number % 2 == 1, NULL, -number::Int8) as number FROM numbers(128)); +SELECT min(number) from (Select if(number % 2 == 1, NULL, -number::Int8) as number FROM numbers(128)); + +SELECT argMax(number, now()) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=100; +SELECT argMax(number, now()) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=20000; +SELECT argMax(number, 1) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=100; +SELECT argMax(number, 1) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=20000; +SELECT argMax(number::String, 1) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=100; +SELECT argMax(number::String, 1) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=20000; +SELECT argMax(number, now() + number) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=100; +SELECT argMax(number, now() + number) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=20000; +SELECT argMaxIf(number, now() + number, number % 10 < 20) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=100; +SELECT argMaxIf(number, now() + number, number % 10 < 20) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=20000; +SELECT argMaxIf(number, now() + number, number % 10 > 20) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=100; +SELECT argMaxIf(number, now() + number, number % 10 > 20) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=20000; +SELECT argMax(number, number::Float64) from numbers(2029); +SELECT argMaxIf(number, number::Float64, number > 2030) from numbers(2029); +SELECT argMaxIf(number, number::Float64, number > 2030) from numbers(2032); +SELECT argMax(number, -number::Float64) from numbers(2029); +SELECT argMaxIf(number, -number::Float64, number > 2030) from numbers(2029); +SELECT argMaxIf(number, -number::Float64, number > 2030) from numbers(2032); + +SELECT argMin(number, now()) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=100; +SELECT argMin(number, now()) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=20000; +SELECT argMin(number, 1) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=100; +SELECT argMin(number, 1) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=20000; +SELECT argMin(number::String, 1) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=100; +SELECT argMin(number::String, 1) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=20000; +SELECT argMin(number, now() + number) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=100; +SELECT argMin(number, now() + number) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=20000; +SELECT argMinIf(number, now() + number, number % 10 < 20) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=100; +SELECT argMinIf(number, now() + number, number % 10 < 20) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=20000; +SELECT argMinIf(number, now() + number, number % 10 > 20) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=100; +SELECT argMinIf(number, now() + number, number % 10 > 20) FROM (Select number as number from numbers(10, 10000)) settings max_threads=1, max_block_size=20000; +SELECT argMin(number, number::Float64) from numbers(2029); +SELECT argMinIf(number, number::Float64, number > 2030) from numbers(2029); +SELECT argMinIf(number, number::Float64, number > 2030) from numbers(2032); +SELECT argMin(number, -number::Float64) from numbers(2029); +SELECT argMinIf(number, -number::Float64, number > 2030) from numbers(2029); +SELECT argMinIf(number, -number::Float64, number > 2030) from numbers(2032); + +Select argMax((n, n), n) t, toTypeName(t) FROM (Select if(number % 3 == 0, NULL, number) as n from numbers(10)); +Select argMaxIf((n, n), n, n < 5) t, toTypeName(t) FROM (Select if(number % 3 == 0, NULL, number) as n from numbers(10)); +Select argMaxIf((n, n), n, n > 5) t, toTypeName(t) FROM (Select if(number % 3 == 0, NULL, number) as n from numbers(10)); + +Select argMin((n, n), n) t, toTypeName(t) FROM (Select if(number % 3 == 0, NULL, number) as n from numbers(10)); +Select argMinIf((n, n), n, n < 5) t, toTypeName(t) FROM (Select if(number % 3 == 0, NULL, number) as n from numbers(10)); +Select argMinIf((n, n), n, n > 5) t, toTypeName(t) FROM (Select if(number % 3 == 0, NULL, number) as n from numbers(10)); + +SET compile_aggregate_expressions=1; +SET min_count_to_compile_aggregate_expression=0; + +WITH + arrayJoin([1, 2, 3, nan, 4, 5]) AS data, + arrayJoin([nan, 1, 2, 3, 4]) AS data2, + arrayJoin([1, 2, 3, 4, nan]) AS data3, + arrayJoin([nan, nan, nan]) AS data4, + arrayJoin([nan, 1, 2, 3, nan]) AS data5 +SELECT + min(data), + min(data2), + min(data3), + min(data4), + min(data5); + +WITH + arrayJoin([1, 2, 3, nan, 4, 5]) AS data, + arrayJoin([nan, 1, 2, 3, 4]) AS data2, + arrayJoin([1, 2, 3, 4, nan]) AS data3, + arrayJoin([nan, nan, nan]) AS data4, + arrayJoin([nan, 1, 2, 3, nan]) AS data5 +SELECT + max(data), + max(data2), + max(data3), + max(data4), + max(data5); + +SELECT minIf(number, rand() % 2 == 3) from numbers(10); +SELECT maxIf(number, rand() % 2 == 3) from numbers(10); + +SELECT minIf(number::Float64, rand() % 2 == 3) from numbers(10); +SELECT maxIf(number::Float64, rand() % 2 == 3) from numbers(10); + +SELECT minIf(number::String, number < 10) as number from numbers(10, 1000); +SELECT maxIf(number::String, number < 10) as number from numbers(10, 1000); +SELECT maxIf(number::String, number % 3), maxIf(number::String, number % 5), minIf(number::String, number % 3), minIf(number::String, number > 10) from numbers(400); + +SELECT minIf(number::Nullable(String), number < 10) as number from numbers(10, 1000); +SELECT maxIf(number::Nullable(String), number < 10) as number from numbers(10, 1000); + +SELECT min(n::Nullable(String)) from (Select if(number < 15 and number % 2 == 1, number * 2, NULL) as n from numbers(10, 20)); +SELECT max(n::Nullable(String)) from (Select if(number < 15 and number % 2 == 1, number * 2, NULL) as n from numbers(10, 20)); + +SELECT max(number::Nullable(Decimal64(3))) from numbers(11) settings max_block_size=10; +SELECT min(-number::Nullable(Decimal64(3))) from numbers(11) settings max_block_size=10; diff --git a/parser/testdata/02406_try_read_datetime64_bug/ast.json b/parser/testdata/02406_try_read_datetime64_bug/ast.json new file mode 100644 index 000000000..9c4998a39 --- /dev/null +++ b/parser/testdata/02406_try_read_datetime64_bug/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDateTime64OrDefault (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal 'Aaaa e a.a.aaaaaaaaa'" + }, + { + "explain": " Literal UInt64_9" + }, + { + "explain": " Literal 'UTC'" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001427538, + "rows_read": 9, + "bytes_read": 345 + } +} diff --git a/parser/testdata/02406_try_read_datetime64_bug/metadata.json b/parser/testdata/02406_try_read_datetime64_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02406_try_read_datetime64_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02406_try_read_datetime64_bug/query.sql b/parser/testdata/02406_try_read_datetime64_bug/query.sql new file mode 100644 index 000000000..846e732ba --- /dev/null +++ b/parser/testdata/02406_try_read_datetime64_bug/query.sql @@ -0,0 +1,2 @@ +select toDateTime64OrDefault('Aaaa e a.a.aaaaaaaaa', 9, 'UTC'); +desc format(CSV, '"Aaaa e a.a.aaaaaaaaa"'); diff --git a/parser/testdata/02407_array_element_from_map_wrong_type/ast.json b/parser/testdata/02407_array_element_from_map_wrong_type/ast.json new file mode 100644 index 000000000..f14a1bcb8 --- /dev/null +++ b/parser/testdata/02407_array_element_from_map_wrong_type/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function arrayElement (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier m" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Function materialize (alias m) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function map (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'key'" + }, + { + "explain": " Literal UInt64_42" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001810393, + "rows_read": 14, + "bytes_read": 529 + } +} diff --git a/parser/testdata/02407_array_element_from_map_wrong_type/metadata.json b/parser/testdata/02407_array_element_from_map_wrong_type/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02407_array_element_from_map_wrong_type/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02407_array_element_from_map_wrong_type/query.sql b/parser/testdata/02407_array_element_from_map_wrong_type/query.sql new file mode 100644 index 000000000..0e8d7f4f7 --- /dev/null +++ b/parser/testdata/02407_array_element_from_map_wrong_type/query.sql @@ -0,0 +1 @@ +select m[0], materialize(map('key', 42)) as m; -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} diff --git a/parser/testdata/02408_to_fixed_string_short_circuit/ast.json b/parser/testdata/02408_to_fixed_string_short_circuit/ast.json new file mode 100644 index 000000000..57c02095f --- /dev/null +++ b/parser/testdata/02408_to_fixed_string_short_circuit/ast.json @@ -0,0 +1,91 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function if (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function less (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Function toFixedString (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '123'" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2" + } + ], + + "rows": 23, + + "statistics": + { + "elapsed": 0.001363769, + "rows_read": 23, + "bytes_read": 897 + } +} diff --git a/parser/testdata/02408_to_fixed_string_short_circuit/metadata.json b/parser/testdata/02408_to_fixed_string_short_circuit/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02408_to_fixed_string_short_circuit/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02408_to_fixed_string_short_circuit/query.sql b/parser/testdata/02408_to_fixed_string_short_circuit/query.sql new file mode 100644 index 000000000..9e817bbe5 --- /dev/null +++ b/parser/testdata/02408_to_fixed_string_short_circuit/query.sql @@ -0,0 +1 @@ +select if(number < 0, toFixedString(materialize('123'), 2), NULL) from numbers(2); diff --git a/parser/testdata/02409_url_format_detection/ast.json b/parser/testdata/02409_url_format_detection/ast.json new file mode 100644 index 000000000..9f57b8864 --- /dev/null +++ b/parser/testdata/02409_url_format_detection/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DescribeQuery (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function url (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal 'http:\/\/localhost:8888\/test\/data.tsv?get=parameterHere'" + }, + { + "explain": " Identifier auto" + }, + { + "explain": " Literal 'x UInt32'" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001593565, + "rows_read": 7, + "bytes_read": 276 + } +} diff --git a/parser/testdata/02409_url_format_detection/metadata.json b/parser/testdata/02409_url_format_detection/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02409_url_format_detection/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02409_url_format_detection/query.sql b/parser/testdata/02409_url_format_detection/query.sql new file mode 100644 index 000000000..c94700cfd --- /dev/null +++ b/parser/testdata/02409_url_format_detection/query.sql @@ -0,0 +1 @@ +desc url('http://localhost:8888/test/data.tsv?get=parameterHere', auto, 'x UInt32'); diff --git a/parser/testdata/02410_csv_empty_fields_inference/ast.json b/parser/testdata/02410_csv_empty_fields_inference/ast.json new file mode 100644 index 000000000..87fe543c4 --- /dev/null +++ b/parser/testdata/02410_csv_empty_fields_inference/ast.json @@ -0,0 +1,40 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DescribeQuery (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function format (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier CSV" + }, + { + "explain": " Literal ',,,'" + } + ], + + "rows": 6, + + "statistics": + { + "elapsed": 0.000966875, + "rows_read": 6, + "bytes_read": 198 + } +} diff --git a/parser/testdata/02410_csv_empty_fields_inference/metadata.json b/parser/testdata/02410_csv_empty_fields_inference/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02410_csv_empty_fields_inference/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02410_csv_empty_fields_inference/query.sql b/parser/testdata/02410_csv_empty_fields_inference/query.sql new file mode 100644 index 000000000..39b4f09d7 --- /dev/null +++ b/parser/testdata/02410_csv_empty_fields_inference/query.sql @@ -0,0 +1,2 @@ +desc format(CSV, ',,,'); +desc format(CSV, '123,,abv,') diff --git a/parser/testdata/02410_to_decimal_or_default/ast.json b/parser/testdata/02410_to_decimal_or_default/ast.json new file mode 100644 index 000000000..fd25a0f15 --- /dev/null +++ b/parser/testdata/02410_to_decimal_or_default/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDecimal32OrDefault (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal UInt64_111" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '123.456'" + }, + { + "explain": " Literal 'Decimal32(3)'" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001515797, + "rows_read": 15, + "bytes_read": 578 + } +} diff --git a/parser/testdata/02410_to_decimal_or_default/metadata.json b/parser/testdata/02410_to_decimal_or_default/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02410_to_decimal_or_default/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02410_to_decimal_or_default/query.sql b/parser/testdata/02410_to_decimal_or_default/query.sql new file mode 100644 index 000000000..8db464038 --- /dev/null +++ b/parser/testdata/02410_to_decimal_or_default/query.sql @@ -0,0 +1,9 @@ +SELECT toDecimal32OrDefault(111, 3, 123.456::Decimal32(3)) AS x, toTypeName(x); +SELECT toDecimal64OrDefault(222, 3, 123.456::Decimal64(3)) AS x, toTypeName(x); +SELECT toDecimal128OrDefault(333, 3, 123.456::Decimal128(3)) AS x, toTypeName(x); +SELECT toDecimal256OrDefault(444, 3, 123.456::Decimal256(3)) AS x, toTypeName(x); + +SELECT toDecimal32OrDefault('Hello', 3, 123.456::Decimal32(3)) AS x, toTypeName(x); +SELECT toDecimal64OrDefault('Hello', 3, 123.456::Decimal64(3)) AS x, toTypeName(x); +SELECT toDecimal128OrDefault('Hello', 3, 123.456::Decimal128(3)) AS x, toTypeName(x); +SELECT toDecimal256OrDefault('Hello', 3, 123.456::Decimal256(3)) AS x, toTypeName(x); diff --git a/parser/testdata/02411_legacy_geobase/ast.json b/parser/testdata/02411_legacy_geobase/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02411_legacy_geobase/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02411_legacy_geobase/metadata.json b/parser/testdata/02411_legacy_geobase/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02411_legacy_geobase/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02411_legacy_geobase/query.sql b/parser/testdata/02411_legacy_geobase/query.sql new file mode 100644 index 000000000..4e044c3f1 --- /dev/null +++ b/parser/testdata/02411_legacy_geobase/query.sql @@ -0,0 +1,15 @@ +-- Tags: no-fasttest + +SELECT regionToName(number::UInt32, 'en') FROM numbers(13); +SELECT regionToName(number::UInt32, 'xy') FROM numbers(13); -- { serverError POCO_EXCEPTION } + +SELECT regionToName(number::UInt32, 'en'), regionToCity(number::UInt32) AS id, regionToName(id, 'en') FROM numbers(13); +SELECT regionToName(number::UInt32, 'en'), regionToArea(number::UInt32) AS id, regionToName(id, 'en') FROM numbers(13); +SELECT regionToName(number::UInt32, 'en'), regionToDistrict(number::UInt32) AS id, regionToName(id, 'en') FROM numbers(13); +SELECT regionToName(number::UInt32, 'en'), regionToCountry(number::UInt32) AS id, regionToName(id, 'en') FROM numbers(13); +SELECT regionToName(number::UInt32, 'en'), regionToContinent(number::UInt32) AS id, regionToName(id, 'en') FROM numbers(13); +SELECT regionToName(number::UInt32, 'en'), regionToTopContinent(number::UInt32) AS id, regionToName(id, 'en') FROM numbers(13); +SELECT regionToName(number::UInt32, 'en'), regionToPopulation(number::UInt32) AS id, regionToName(id, 'en') FROM numbers(13); +SELECT regionToName(n1.number::UInt32, 'en') || (regionIn(n1.number::UInt32, n2.number::UInt32) ? ' is in ' : ' is not in ') || regionToName(n2.number::UInt32, 'en') FROM numbers(13) AS n1 CROSS JOIN numbers(13) AS n2; +SELECT regionHierarchy(number::UInt32) AS arr, arrayMap(id -> regionToName(id, 'en'), arr) FROM numbers(13); +SELECT regionToName(number::UInt32, 'es') FROM numbers(4); diff --git a/parser/testdata/02412_nlp/ast.json b/parser/testdata/02412_nlp/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02412_nlp/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02412_nlp/metadata.json b/parser/testdata/02412_nlp/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02412_nlp/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02412_nlp/query.sql b/parser/testdata/02412_nlp/query.sql new file mode 100644 index 000000000..42c3f1087 --- /dev/null +++ b/parser/testdata/02412_nlp/query.sql @@ -0,0 +1,20 @@ +-- Tags: no-fasttest + +SET allow_experimental_nlp_functions = 1; + +SELECT lemmatize('en', 'wolves'); +SELECT lemmatize('en', 'dogs'); +SELECT lemmatize('en', 'looking'); +SELECT lemmatize('en', 'took'); +SELECT lemmatize('en', 'imported'); +SELECT lemmatize('en', 'tokenized'); +SELECT lemmatize('en', 'flown'); + +SELECT synonyms('en', 'crucial'); +SELECT synonyms('en', 'cheerful'); +SELECT synonyms('en', 'yet'); +SELECT synonyms('en', 'quiz'); +SELECT synonyms('ru', 'главный'); +SELECT synonyms('ru', 'веселый'); +SELECT synonyms('ru', 'правда'); +SELECT synonyms('ru', 'экзамен'); diff --git a/parser/testdata/02414_all_new_table_functions_must_be_documented/ast.json b/parser/testdata/02414_all_new_table_functions_must_be_documented/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02414_all_new_table_functions_must_be_documented/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02414_all_new_table_functions_must_be_documented/metadata.json b/parser/testdata/02414_all_new_table_functions_must_be_documented/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02414_all_new_table_functions_must_be_documented/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02414_all_new_table_functions_must_be_documented/query.sql b/parser/testdata/02414_all_new_table_functions_must_be_documented/query.sql new file mode 100644 index 000000000..48c6fdc8a --- /dev/null +++ b/parser/testdata/02414_all_new_table_functions_must_be_documented/query.sql @@ -0,0 +1,7 @@ +-- This outputs the list of undocumented table functions. +-- No new items in the list should appear. Please help shorten this list down to zero elements. +SELECT name FROM system.table_functions WHERE length(description) < 10 +AND name NOT IN ( + -- these table functions are not enabled in fast test + 'cosn', 'oss', 'hdfs', 'hdfsCluster', 'hive', 'mysql', 'postgresql', 's3', 's3Cluster', 'sqlite', 'urlCluster', 'mergeTreeParts' +) ORDER BY name; diff --git a/parser/testdata/02415_all_new_functions_must_be_documented/ast.json b/parser/testdata/02415_all_new_functions_must_be_documented/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02415_all_new_functions_must_be_documented/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02415_all_new_functions_must_be_documented/metadata.json b/parser/testdata/02415_all_new_functions_must_be_documented/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02415_all_new_functions_must_be_documented/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02415_all_new_functions_must_be_documented/query.sql b/parser/testdata/02415_all_new_functions_must_be_documented/query.sql new file mode 100644 index 000000000..110379b4c --- /dev/null +++ b/parser/testdata/02415_all_new_functions_must_be_documented/query.sql @@ -0,0 +1,19 @@ +-- This outputs the list of undocumented functions. +-- No new items in the list should appear. Please help shorten this list down to zero elements. +SELECT name FROM system.functions WHERE NOT is_aggregate AND origin = 'System' AND alias_to = '' AND length(description) < 10 +AND name NOT IN ( + -- these functions are not enabled in fast test + 'aes_decrypt_mysql', 'aes_encrypt_mysql', 'decrypt', 'encrypt', + 'convertCharset', + 'detectLanguage', 'detectLanguageMixed', + 'geoToH3', + 'h3CellAreaM2', 'h3CellAreaRads2', 'h3Distance', 'h3EdgeAngle', 'h3EdgeLengthKm', 'h3EdgeLengthM', 'h3ExactEdgeLengthKm', 'h3ExactEdgeLengthM', 'h3ExactEdgeLengthRads', 'h3GetBaseCell', + 'h3GetDestinationIndexFromUnidirectionalEdge', 'h3GetFaces', 'h3GetIndexesFromUnidirectionalEdge', 'h3GetOriginIndexFromUnidirectionalEdge', 'h3GetPentagonIndexes', 'h3GetRes0Indexes', + 'h3GetResolution', 'h3GetUnidirectionalEdge', 'h3GetUnidirectionalEdgeBoundary', 'h3GetUnidirectionalEdgesFromHexagon', 'h3HexAreaKm2', 'h3HexAreaM2', 'h3HexRing', 'h3IndexesAreNeighbors', + 'h3IsPentagon', 'h3IsResClassIII', 'h3IsValid', 'h3Line', 'h3NumHexagons', 'h3PointDistKm', 'h3PointDistM', 'h3PointDistRads', 'h3ToCenterChild', 'h3ToChildren', 'h3ToGeo', + 'h3ToGeoBoundary', 'h3ToParent', 'h3ToString', 'h3UnidirectionalEdgeIsValid', 'h3kRing', 'stringToH3', + 'geoToS2', 's2CapContains', 's2CapUnion', 's2CellsIntersect', 's2GetNeighbors', 's2RectAdd', 's2RectContains', 's2RectIntersection', 's2RectUnion', 's2ToGeo', + 'normalizeUTF8NFC', 'normalizeUTF8NFD', 'normalizeUTF8NFKC', 'normalizeUTF8NFKD', 'bech32Encode', 'bech32Decode', + 'lemmatize', 'stem', 'synonyms', 'kql_array_sort_asc', 'kql_array_sort_desc', + 'detectCharset', 'detectLanguageUnknown', 'detectProgrammingLanguage', 'detectTonality' +) ORDER BY name; diff --git a/parser/testdata/02415_all_new_functions_must_have_version_information/ast.json b/parser/testdata/02415_all_new_functions_must_have_version_information/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02415_all_new_functions_must_have_version_information/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02415_all_new_functions_must_have_version_information/metadata.json b/parser/testdata/02415_all_new_functions_must_have_version_information/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02415_all_new_functions_must_have_version_information/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02415_all_new_functions_must_have_version_information/query.sql b/parser/testdata/02415_all_new_functions_must_have_version_information/query.sql new file mode 100644 index 000000000..be5e450be --- /dev/null +++ b/parser/testdata/02415_all_new_functions_must_have_version_information/query.sql @@ -0,0 +1,22 @@ +-- This outputs the list of functions without version information. +-- No new items in the list should appear. Please help shorten this list down to zero elements. +SELECT name FROM system.functions WHERE NOT is_aggregate AND origin = 'System' AND alias_to = '' AND introduced_in == '' +AND name NOT IN ( + -- these functions are not enabled in fast test + 'aes_decrypt_mysql', 'aes_encrypt_mysql', 'decrypt', 'encrypt', + 'convertCharset', + 'detectLanguage', 'detectLanguageMixed', + 'geoToH3', + 'h3CellAreaM2', 'h3CellAreaRads2', 'h3Distance', 'h3EdgeAngle', 'h3EdgeLengthKm', 'h3EdgeLengthM', 'h3ExactEdgeLengthKm', 'h3ExactEdgeLengthM', 'h3ExactEdgeLengthRads', 'h3GetBaseCell', + 'h3GetDestinationIndexFromUnidirectionalEdge', 'h3GetFaces', 'h3GetIndexesFromUnidirectionalEdge', 'h3GetOriginIndexFromUnidirectionalEdge', 'h3GetPentagonIndexes', 'h3GetRes0Indexes', + 'h3GetResolution', 'h3GetUnidirectionalEdge', 'h3GetUnidirectionalEdgeBoundary', 'h3GetUnidirectionalEdgesFromHexagon', 'h3HexAreaKm2', 'h3HexAreaM2', 'h3HexRing', 'h3IndexesAreNeighbors', + 'h3IsPentagon', 'h3IsResClassIII', 'h3IsValid', 'h3Line', 'h3NumHexagons', 'h3PointDistKm', 'h3PointDistM', 'h3PointDistRads', 'h3ToCenterChild', 'h3ToChildren', 'h3ToGeo', + 'h3ToGeoBoundary', 'h3ToParent', 'h3ToString', 'h3UnidirectionalEdgeIsValid', 'h3kRing', 'stringToH3', + 'geoToS2', 's2CapContains', 's2CapUnion', 's2CellsIntersect', 's2GetNeighbors', 's2RectAdd', 's2RectContains', 's2RectIntersection', 's2RectUnion', 's2ToGeo', + 'normalizeUTF8NFC', 'normalizeUTF8NFD', 'normalizeUTF8NFKC', 'normalizeUTF8NFKD', + 'lemmatize', 'tokenize', 'stem', 'synonyms', 'kql_array_sort_asc', 'kql_array_sort_desc', + 'detectCharset', 'detectLanguageUnknown', 'detectProgrammingLanguage', 'detectTonality', 'bech32Encode', 'bech32Decode', + 'BLAKE3', 'JSONMergePatch', 'MD4', 'MD5', 'RIPEMD160', 'SHA1', 'SHA224', 'SHA256', 'SHA384', 'SHA512', 'SHA512_256', 'ULIDStringToDateTime', 'generateULID', 'halfMD5', + 'idnaDecode', 'idnaEncode', 'keccak256', 'punycodeDecode', 'punycodeEncode', 'seriesPeriodDetectFFT', 'sqidDecode', 'sqidEncode', 'tryDecrypt', 'tryIdnaEncode', 'tryPunycodeDecode', 'uniqThetaIntersect', + 'uniqThetaNot', 'uniqThetaUnion' +) ORDER BY name; diff --git a/parser/testdata/02416_grouping_function_compatibility/ast.json b/parser/testdata/02416_grouping_function_compatibility/ast.json new file mode 100644 index 000000000..2da3d1c28 --- /dev/null +++ b/parser/testdata/02416_grouping_function_compatibility/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test02416 (children 1)" + }, + { + "explain": " Identifier test02416" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001052646, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/02416_grouping_function_compatibility/metadata.json b/parser/testdata/02416_grouping_function_compatibility/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02416_grouping_function_compatibility/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02416_grouping_function_compatibility/query.sql b/parser/testdata/02416_grouping_function_compatibility/query.sql new file mode 100644 index 000000000..ed21055ad --- /dev/null +++ b/parser/testdata/02416_grouping_function_compatibility/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS test02416; + +CREATE TABLE test02416(a UInt64, b UInt64) ENGINE=MergeTree() ORDER BY (a, b); + +INSERT INTO test02416 SELECT number % 2 as a, number as b FROM numbers(10); + +-- { echoOn } +SELECT count() AS amount, a, b, GROUPING(a, b) FROM test02416 GROUP BY GROUPING SETS ((a, b), (a), ()) ORDER BY (amount, a, b); + +SELECT count() AS amount, a, b, GROUPING(a, b) FROM test02416 GROUP BY ROLLUP(a, b) ORDER BY (amount, a, b); + +-- { echoOff } +DROP TABLE test02416; + diff --git a/parser/testdata/02416_in_set_same_ast_diff_columns/ast.json b/parser/testdata/02416_in_set_same_ast_diff_columns/ast.json new file mode 100644 index 000000000..d41008081 --- /dev/null +++ b/parser/testdata/02416_in_set_same_ast_diff_columns/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery set_crash (children 3)" + }, + { + "explain": " Identifier set_crash" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " ColumnDeclaration key1 (children 1)" + }, + { + "explain": " DataType Int32" + }, + { + "explain": " ColumnDeclaration id1 (children 1)" + }, + { + "explain": " DataType Int64" + }, + { + "explain": " ColumnDeclaration c1 (children 1)" + }, + { + "explain": " DataType Int64" + }, + { + "explain": " Storage definition (children 3)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Identifier id1" + }, + { + "explain": " Identifier key1" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001197034, + "rows_read": 14, + "bytes_read": 478 + } +} diff --git a/parser/testdata/02416_in_set_same_ast_diff_columns/metadata.json b/parser/testdata/02416_in_set_same_ast_diff_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02416_in_set_same_ast_diff_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02416_in_set_same_ast_diff_columns/query.sql b/parser/testdata/02416_in_set_same_ast_diff_columns/query.sql new file mode 100644 index 000000000..c3475f37e --- /dev/null +++ b/parser/testdata/02416_in_set_same_ast_diff_columns/query.sql @@ -0,0 +1,3 @@ +CREATE TABLE set_crash (key1 Int32, id1 Int64, c1 Int64) ENGINE = MergeTree PARTITION BY id1 ORDER BY key1; +INSERT INTO set_crash VALUES (-1, 1, 0); +SELECT 1 in (-1, 1) FROM set_crash WHERE (key1, id1) in (-1, 1); diff --git a/parser/testdata/02416_input_json_formats/ast.json b/parser/testdata/02416_input_json_formats/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02416_input_json_formats/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02416_input_json_formats/metadata.json b/parser/testdata/02416_input_json_formats/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02416_input_json_formats/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02416_input_json_formats/query.sql b/parser/testdata/02416_input_json_formats/query.sql new file mode 100644 index 000000000..396c5a14a --- /dev/null +++ b/parser/testdata/02416_input_json_formats/query.sql @@ -0,0 +1,13 @@ +-- Tags: no-parallel, no-fasttest +insert into function file(02416_data.json) select number::UInt32 as n, 'Hello' as s, range(number) as a from numbers(3) settings engine_file_truncate_on_insert=1; +desc file(02416_data.json); +select * from file(02416_data.json); + +insert into function file(02416_data.jsonCompact) select number::UInt32 as n, 'Hello' as s, range(number) as a from numbers(3) settings engine_file_truncate_on_insert=1; +desc file(02416_data.jsonCompact); +select * from file(02416_data.jsonCompact); + +insert into function file(02416_data.jsonColumnsWithMetadata) select number::UInt32 as n, 'Hello' as s, range(number) as a from numbers(3) settings engine_file_truncate_on_insert=1; +desc file(02416_data.jsonColumnsWithMetadata); +select * from file(02416_data.jsonColumnsWithMetadata); + diff --git a/parser/testdata/02416_json_tuple_to_array_schema_inference/ast.json b/parser/testdata/02416_json_tuple_to_array_schema_inference/ast.json new file mode 100644 index 000000000..2902a76f2 --- /dev/null +++ b/parser/testdata/02416_json_tuple_to_array_schema_inference/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001205601, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02416_json_tuple_to_array_schema_inference/metadata.json b/parser/testdata/02416_json_tuple_to_array_schema_inference/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02416_json_tuple_to_array_schema_inference/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02416_json_tuple_to_array_schema_inference/query.sql b/parser/testdata/02416_json_tuple_to_array_schema_inference/query.sql new file mode 100644 index 000000000..4bdf61cac --- /dev/null +++ b/parser/testdata/02416_json_tuple_to_array_schema_inference/query.sql @@ -0,0 +1,5 @@ +set input_format_json_infer_array_of_dynamic_from_array_of_different_types=0; +desc format(JSONEachRow, '{"x" : [[42, null], [24, null]]}'); +desc format(JSONEachRow, '{"x" : [[[42, null], []], 24]}'); +desc format(JSONEachRow, '{"x" : {"key" : [42, null]}}'); + diff --git a/parser/testdata/02416_keeper_map/ast.json b/parser/testdata/02416_keeper_map/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02416_keeper_map/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02416_keeper_map/metadata.json b/parser/testdata/02416_keeper_map/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02416_keeper_map/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02416_keeper_map/query.sql b/parser/testdata/02416_keeper_map/query.sql new file mode 100644 index 000000000..6037a8835 --- /dev/null +++ b/parser/testdata/02416_keeper_map/query.sql @@ -0,0 +1,38 @@ +-- Tags: no-ordinary-database, no-fasttest, long + +DROP TABLE IF EXISTS 02416_test SYNC; + +CREATE TABLE 02416_test (key String, value UInt32) Engine=KeeperMap('/' || currentDatabase() || '/test2416'); -- { serverError BAD_ARGUMENTS } +CREATE TABLE 02416_test (key String, value UInt32) Engine=KeeperMap('/' || currentDatabase() || '/test2416') PRIMARY KEY(key2); -- { serverError UNKNOWN_IDENTIFIER } +CREATE TABLE 02416_test (key String, value UInt32) Engine=KeeperMap('/' || currentDatabase() || '/test2416') PRIMARY KEY(key, value); -- { serverError BAD_ARGUMENTS } +CREATE TABLE 02416_test (key String, value UInt32) Engine=KeeperMap('/' || currentDatabase() || '/test2416') PRIMARY KEY(concat(key, value)); -- { serverError BAD_ARGUMENTS } +CREATE TABLE 02416_test (key Tuple(String, UInt32), value UInt64) Engine=KeeperMap('/' || currentDatabase() || '/test2416') PRIMARY KEY(key); + +DROP TABLE IF EXISTS 02416_test SYNC; +CREATE TABLE 02416_test (key String, value UInt32) Engine=KeeperMap('/' || currentDatabase() || '/test2416') PRIMARY KEY(key); + +INSERT INTO 02416_test SELECT '1_1', number FROM numbers(1000); +SELECT COUNT(1) == 1 FROM 02416_test; + +INSERT INTO 02416_test SELECT concat(toString(number), '_1'), number FROM numbers(1000); +SELECT COUNT(1) == 1000 FROM 02416_test; +SELECT uniqExact(key) == 32 FROM (SELECT * FROM 02416_test LIMIT 32 SETTINGS max_block_size = 1); +SELECT SUM(value) == 1 + 99 + 900 FROM 02416_test WHERE key IN ('1_1', '99_1', '900_1'); + +DROP TABLE IF EXISTS 02416_test SYNC; +DROP TABLE IF EXISTS 02416_test_memory; + +CREATE TABLE 02416_test (k UInt32, value UInt64, dummy Tuple(UInt32, Float64), bm AggregateFunction(groupBitmap, UInt64)) Engine=KeeperMap('/' || currentDatabase() || '/test2416') PRIMARY KEY(k); +CREATE TABLE 02416_test_memory AS 02416_test Engine = Memory; + +INSERT INTO 02416_test SELECT number % 77 AS k, SUM(number) AS value, (1, 1.2), bitmapBuild(groupArray(number)) FROM numbers(10000) group by k; + +INSERT INTO 02416_test_memory SELECT number % 77 AS k, SUM(number) AS value, (1, 1.2), bitmapBuild(groupArray(number)) FROM numbers(10000) group by k; + +SELECT A.a = B.a, A.b = B.b, A.c = B.c, A.d = B.d, A.e = B.e FROM ( SELECT 0 AS a, groupBitmapMerge(bm) AS b , SUM(k) AS c, SUM(value) AS d, SUM(dummy.1) AS e FROM 02416_test) A ANY LEFT JOIN (SELECT 0 AS a, groupBitmapMerge(bm) AS b , SUM(k) AS c, SUM(value) AS d, SUM(dummy.1) AS e FROM 02416_test_memory) B USING a ORDER BY a; + +TRUNCATE TABLE 02416_test; +SELECT 0 == COUNT(1) FROM 02416_test; + +DROP TABLE IF EXISTS 02416_test SYNC; +DROP TABLE IF EXISTS 02416_test_memory; diff --git a/parser/testdata/02416_rocksdb_delete_update/ast.json b/parser/testdata/02416_rocksdb_delete_update/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02416_rocksdb_delete_update/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02416_rocksdb_delete_update/metadata.json b/parser/testdata/02416_rocksdb_delete_update/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02416_rocksdb_delete_update/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02416_rocksdb_delete_update/query.sql b/parser/testdata/02416_rocksdb_delete_update/query.sql new file mode 100644 index 000000000..0cf23e97f --- /dev/null +++ b/parser/testdata/02416_rocksdb_delete_update/query.sql @@ -0,0 +1,42 @@ +-- Tags: no-ordinary-database, no-fasttest, use-rocksdb + +DROP TABLE IF EXISTS 02416_rocksdb; + +CREATE TABLE 02416_rocksdb (key UInt64, value String, value2 UInt64) Engine=EmbeddedRocksDB PRIMARY KEY(key); + +INSERT INTO 02416_rocksdb VALUES (1, 'Some string', 0), (2, 'Some other string', 0), (3, 'random', 0), (4, 'random2', 0); + +SELECT * FROM 02416_rocksdb ORDER BY key; +SELECT '-----------'; + +DELETE FROM 02416_rocksdb WHERE value LIKE 'Some%string'; + +SELECT * FROM 02416_rocksdb ORDER BY key; +SELECT '-----------'; + +ALTER TABLE 02416_rocksdb DELETE WHERE key >= 4; + +SELECT * FROM 02416_rocksdb ORDER BY key; +SELECT '-----------'; + +DELETE FROM 02416_rocksdb WHERE 1 = 1; +SELECT count() FROM 02416_rocksdb; +SELECT '-----------'; + +INSERT INTO 02416_rocksdb VALUES (1, 'String', 10), (2, 'String', 20), (3, 'String', 30), (4, 'String', 40); +SELECT * FROM 02416_rocksdb ORDER BY key; +SELECT '-----------'; + +ALTER TABLE 02416_rocksdb UPDATE value = 'Another' WHERE key > 2; +SELECT * FROM 02416_rocksdb ORDER BY key; +SELECT '-----------'; + +ALTER TABLE 02416_rocksdb UPDATE key = key * 10 WHERE 1 = 1; -- { serverError BAD_ARGUMENTS } +SELECT * FROM 02416_rocksdb ORDER BY key; +SELECT '-----------'; + +ALTER TABLE 02416_rocksdb UPDATE value2 = value2 * 10 + 2 WHERE 1 = 1; +SELECT * FROM 02416_rocksdb ORDER BY key; +SELECT '-----------'; + +DROP TABLE IF EXISTS 02416_rocksdb; diff --git a/parser/testdata/02416_row_policy_always_false_index/ast.json b/parser/testdata/02416_row_policy_always_false_index/ast.json new file mode 100644 index 000000000..fcd7a98ed --- /dev/null +++ b/parser/testdata/02416_row_policy_always_false_index/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tbl (children 1)" + }, + { + "explain": " Identifier tbl" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001463149, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/02416_row_policy_always_false_index/metadata.json b/parser/testdata/02416_row_policy_always_false_index/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02416_row_policy_always_false_index/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02416_row_policy_always_false_index/query.sql b/parser/testdata/02416_row_policy_always_false_index/query.sql new file mode 100644 index 000000000..c233e99ec --- /dev/null +++ b/parser/testdata/02416_row_policy_always_false_index/query.sql @@ -0,0 +1,17 @@ +drop table if exists tbl; + +create table tbl (s String, i int) engine MergeTree order by i; + +insert into tbl values ('123', 123); + +drop row policy if exists filter on tbl; + +create row policy filter on tbl using 0 to all; + +set max_rows_to_read = 0; + +select * from tbl; + +drop row policy filter on tbl; + +drop table tbl; diff --git a/parser/testdata/02417_from_select_syntax/ast.json b/parser/testdata/02417_from_select_syntax/ast.json new file mode 100644 index 000000000..e8f629838 --- /dev/null +++ b/parser/testdata/02417_from_select_syntax/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001019227, + "rows_read": 11, + "bytes_read": 430 + } +} diff --git a/parser/testdata/02417_from_select_syntax/metadata.json b/parser/testdata/02417_from_select_syntax/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02417_from_select_syntax/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02417_from_select_syntax/query.sql b/parser/testdata/02417_from_select_syntax/query.sql new file mode 100644 index 000000000..ce6cb3a14 --- /dev/null +++ b/parser/testdata/02417_from_select_syntax/query.sql @@ -0,0 +1,4 @@ +FROM numbers(1) SELECT number; +WITH 1 as n FROM numbers(1) SELECT number * n; +FROM (FROM numbers(1) SELECT *) SELECT number; +FROM (FROM numbers(1) SELECT *) AS select SELECT number; diff --git a/parser/testdata/02417_json_object_each_row_format/ast.json b/parser/testdata/02417_json_object_each_row_format/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02417_json_object_each_row_format/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02417_json_object_each_row_format/metadata.json b/parser/testdata/02417_json_object_each_row_format/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02417_json_object_each_row_format/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02417_json_object_each_row_format/query.sql b/parser/testdata/02417_json_object_each_row_format/query.sql new file mode 100644 index 000000000..47f6a4dc7 --- /dev/null +++ b/parser/testdata/02417_json_object_each_row_format/query.sql @@ -0,0 +1,7 @@ +-- Tags: no-parallel, no-fasttest +set input_format_json_try_infer_numbers_from_strings=1; +select number, 'Hello' as str, range(number) as arr from numbers(3) format JSONObjectEachRow; +insert into function file(02417_data.jsonObjectEachRow) select number, 'Hello' as str, range(number) as arr from numbers(3) settings engine_file_truncate_on_insert=1; +desc file(02417_data.jsonObjectEachRow); +select * from file(02417_data.jsonObjectEachRow); + diff --git a/parser/testdata/02417_keeper_map_create_drop/ast.json b/parser/testdata/02417_keeper_map_create_drop/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02417_keeper_map_create_drop/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02417_keeper_map_create_drop/metadata.json b/parser/testdata/02417_keeper_map_create_drop/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02417_keeper_map_create_drop/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02417_keeper_map_create_drop/query.sql b/parser/testdata/02417_keeper_map_create_drop/query.sql new file mode 100644 index 000000000..49340167e --- /dev/null +++ b/parser/testdata/02417_keeper_map_create_drop/query.sql @@ -0,0 +1,20 @@ +-- Tags: no-ordinary-database, no-fasttest + +DROP TABLE IF EXISTS 02417_test SYNC; + +CREATE TABLE 02417_test (key UInt64, value UInt64) Engine=KeeperMap('/' || currentDatabase() || '/test2417') PRIMARY KEY(key); +INSERT INTO 02417_test VALUES (1, 11); +SELECT * FROM 02417_test ORDER BY key; +SELECT '------'; + +CREATE TABLE 02417_test_another (key UInt64, value UInt64) Engine=KeeperMap('/' || currentDatabase() || '/test2417') PRIMARY KEY(key); +INSERT INTO 02417_test_another VALUES (2, 22); +SELECT * FROM 02417_test_another ORDER BY key; +SELECT '------'; +SELECT * FROM 02417_test ORDER BY key; +SELECT '------'; + +DROP TABLE 02417_test SYNC; +SELECT * FROM 02417_test_another ORDER BY key; + +DROP TABLE 02417_test_another SYNC; diff --git a/parser/testdata/02417_null_variadic_behaviour/ast.json b/parser/testdata/02417_null_variadic_behaviour/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02417_null_variadic_behaviour/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02417_null_variadic_behaviour/metadata.json b/parser/testdata/02417_null_variadic_behaviour/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02417_null_variadic_behaviour/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02417_null_variadic_behaviour/query.sql b/parser/testdata/02417_null_variadic_behaviour/query.sql new file mode 100644 index 000000000..00c07ede0 --- /dev/null +++ b/parser/testdata/02417_null_variadic_behaviour/query.sql @@ -0,0 +1,47 @@ +-- { echo } +SELECT avgWeighted(number, number) t, toTypeName(t) FROM numbers(1); +SELECT avgWeighted(number, number + 1) t, toTypeName(t) FROM numbers(0); + +SELECT avgWeighted(toNullable(number), number) t, toTypeName(t) FROM numbers(1); +SELECT avgWeighted(if(number < 10000, NULL, number), number) t, toTypeName(t) FROM numbers(100); +SELECT avgWeighted(if(number < 50, NULL, number), number) t, toTypeName(t) FROM numbers(100); + +SELECT avgWeighted(number, if(number < 10000, NULL, number)) t, toTypeName(t) FROM numbers(100); +SELECT avgWeighted(number, if(number < 50, NULL, number)) t, toTypeName(t) FROM numbers(100); + +SELECT avgWeighted(toNullable(number), if(number < 10000, NULL, number)) t, toTypeName(t) FROM numbers(100); +SELECT avgWeighted(toNullable(number), if(number < 50, NULL, number)) t, toTypeName(t) FROM numbers(100); +SELECT avgWeighted(if(number < 10000, NULL, number), toNullable(number)) t, toTypeName(t) FROM numbers(100); +SELECT avgWeighted(if(number < 50, NULL, number), toNullable(number)) t, toTypeName(t) FROM numbers(100); + +SELECT avgWeighted(toNullable(number), if(number < 500, NULL, number)) t, toTypeName(t) FROM numbers(1000); + +SELECT avgWeighted(if(number < 10000, NULL, number), if(number < 10000, NULL, number)) t, toTypeName(t) FROM numbers(100); +SELECT avgWeighted(if(number < 50, NULL, number), if(number < 10000, NULL, number)) t, toTypeName(t) FROM numbers(100); +SELECT avgWeighted(if(number < 10000, NULL, number), if(number < 50, NULL, number)) t, toTypeName(t) FROM numbers(100); +SELECT avgWeighted(if(number < 50, NULL, number), if(number < 50, NULL, number)) t, toTypeName(t) FROM numbers(100); + +SELECT avgWeighted(if(number < 10000, NULL, number), if(number < 500, NULL, number)) t, toTypeName(t) FROM numbers(1000); + +SELECT avgWeightedIf(number, number, number % 10) t, toTypeName(t) FROM numbers(100); +SELECT avgWeightedIf(number, number, toNullable(number % 10)) t, toTypeName(t) FROM numbers(100); +SELECT avgWeightedIf(number, number, if(number < 10000, NULL, number % 10)) t, toTypeName(t) FROM numbers(100); +SELECT avgWeightedIf(number, number, if(number < 50, NULL, number % 10)) t, toTypeName(t) FROM numbers(100); +SELECT avgWeightedIf(number, number, if(number < 0, NULL, number % 10)) t, toTypeName(t) FROM numbers(100); + +SELECT avgWeightedIf(number, number, toNullable(number % 10)) t, toTypeName(t) FROM numbers(1000); + +SELECT avgWeightedIf(if(number < 10000, NULL, number), if(number < 10000, NULL, number), if(number < 10000, NULL, number % 10)) t, toTypeName(t) FROM numbers(100); +SELECT avgWeightedIf(if(number < 50, NULL, number), if(number < 10000, NULL, number), if(number < 10000, NULL, number % 10)) t, toTypeName(t) FROM numbers(100); +SELECT avgWeightedIf(if(number < 10000, NULL, number), if(number < 50, NULL, number), if(number < 10000, NULL, number % 10)) t, toTypeName(t) FROM numbers(100); +SELECT avgWeightedIf(if(number < 50, NULL, number), if(number < 50, NULL, number), if(number < 10000, NULL, number % 10)) t, toTypeName(t) FROM numbers(100); + +SELECT avgWeightedIf(if(number < 10000, NULL, number), if(number < 10000, NULL, number), if(number < 50, NULL, number % 10)) t, toTypeName(t) FROM numbers(100); +SELECT avgWeightedIf(if(number < 50, NULL, number), if(number < 10000, NULL, number), if(number < 50, NULL, number % 10)) t, toTypeName(t) FROM numbers(100); +SELECT avgWeightedIf(if(number < 10000, NULL, number), if(number < 50, NULL, number), if(number < 50, NULL, number % 10)) t, toTypeName(t) FROM numbers(100); +SELECT avgWeightedIf(if(number < 50, NULL, number), if(number < 50, NULL, number), if(number < 50, NULL, number % 10)) t, toTypeName(t) FROM numbers(100); + +SELECT avgWeightedIf(if(number < 10000, NULL, number), if(number < 10000, NULL, number), if(number < 0, NULL, number % 10)) t, toTypeName(t) FROM numbers(100); +SELECT avgWeightedIf(if(number < 50, NULL, number), if(number < 10000, NULL, number), if(number < 0, NULL, number % 10)) t, toTypeName(t) FROM numbers(100); +SELECT avgWeightedIf(if(number < 10000, NULL, number), if(number < 50, NULL, number), if(number < 0, NULL, number % 10)) t, toTypeName(t) FROM numbers(100); +SELECT avgWeightedIf(if(number < 50, NULL, number), if(number < 50, NULL, number), if(number < 0, NULL, number % 10)) t, toTypeName(t) FROM numbers(100); diff --git a/parser/testdata/02418_aggregate_combinators/ast.json b/parser/testdata/02418_aggregate_combinators/ast.json new file mode 100644 index 000000000..bc47537fe --- /dev/null +++ b/parser/testdata/02418_aggregate_combinators/ast.json @@ -0,0 +1,70 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function uniqStateMap (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function map (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 16, + + "statistics": + { + "elapsed": 0.001266023, + "rows_read": 16, + "bytes_read": 634 + } +} diff --git a/parser/testdata/02418_aggregate_combinators/metadata.json b/parser/testdata/02418_aggregate_combinators/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02418_aggregate_combinators/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02418_aggregate_combinators/query.sql b/parser/testdata/02418_aggregate_combinators/query.sql new file mode 100644 index 000000000..029660456 --- /dev/null +++ b/parser/testdata/02418_aggregate_combinators/query.sql @@ -0,0 +1,36 @@ +select uniqStateMap(map(1, number)) from numbers(10); +select uniqStateForEachMapForEachMap(map(1, [map(1, [number, number])])) from numbers(10); +select uniqStateForEachResample(30, 75, 30)([number, number + 1], 30) from numbers(10); +select uniqStateMapForEachResample(30, 75, 30)([map(1, number)], 30) from numbers(10); +select uniqStateForEachMerge(x) as y from (select uniqStateForEachState([number]) as x from numbers(10)); +select uniqMerge(y[1]) from (select uniqStateForEachMerge(x) as y from (select uniqStateForEachState([number]) as x from numbers(10))); + +drop table if exists test; +create table test (x Map(UInt8, AggregateFunction(uniq, UInt64))) engine=Memory; +insert into test select uniqStateMap(map(1, number)) from numbers(10); +select * from test format Null; +select mapApply(k, v -> (k, finalizeAggregation(v)), x) from test; +truncate table test; +drop table test; + +create table test (x Map(UInt8, Array(Map(UInt8, Array(AggregateFunction(uniq, UInt64)))))) engine=Memory; +insert into test select uniqStateForEachMapForEachMap(map(1, [map(1, [number, number])])) from numbers(10); +select mapApply(k, v -> (k, arrayMap(x -> mapApply(k, v -> (k, arrayMap(x -> finalizeAggregation(x), v)), x), v)), x) from test; +select * from test format Null; +truncate table test; +drop table test; + +create table test (x Array(Array(AggregateFunction(uniq, UInt64)))) engine=Memory; +insert into test select uniqStateForEachResample(30, 75, 30)([number, number + 1], 30) from numbers(10); +select arrayMap(x -> arrayMap(x -> finalizeAggregation(x), x), x) from test; +select * from test format Null; +truncate table test; +drop table test; + +create table test (x Array(Array(Map(UInt8, AggregateFunction(uniq, UInt64))))) engine=Memory; +insert into test select uniqStateMapForEachResample(30, 75, 30)([map(1, number)], 30) from numbers(10); +select arrayMap(x -> arrayMap(x -> mapApply(k, v -> (k, finalizeAggregation(v)), x), x), x) from test; +select * from test format Null; +truncate table test; +drop table test; + diff --git a/parser/testdata/02418_keeper_map_keys_limit/ast.json b/parser/testdata/02418_keeper_map_keys_limit/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02418_keeper_map_keys_limit/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02418_keeper_map_keys_limit/metadata.json b/parser/testdata/02418_keeper_map_keys_limit/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02418_keeper_map_keys_limit/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02418_keeper_map_keys_limit/query.sql b/parser/testdata/02418_keeper_map_keys_limit/query.sql new file mode 100644 index 000000000..a8c3b9dff --- /dev/null +++ b/parser/testdata/02418_keeper_map_keys_limit/query.sql @@ -0,0 +1,30 @@ +-- Tags: no-ordinary-database, no-fasttest + +DROP TABLE IF EXISTS 02418_test SYNC; + +CREATE TABLE 02418_test (key UInt64, value Float64) Engine=KeeperMap('/' || currentDatabase() || '/test2418', 3) PRIMARY KEY(key); + +INSERT INTO 02418_test VALUES (1, 1.1), (2, 2.2); +SELECT count() FROM 02418_test; + +INSERT INTO 02418_test VALUES (3, 3.3), (4, 4.4); -- { serverError LIMIT_EXCEEDED } + +INSERT INTO 02418_test VALUES (1, 2.1), (2, 3.2), (3, 3.3); +SELECT count() FROM 02418_test; + +CREATE TABLE 02418_test_another (key UInt64, value Float64) Engine=KeeperMap('/' || currentDatabase() || '/test2418', 4) PRIMARY KEY(key); +INSERT INTO 02418_test VALUES (4, 4.4); -- { serverError LIMIT_EXCEEDED } +INSERT INTO 02418_test_another VALUES (4, 4.4); + +SELECT count() FROM 02418_test; +SELECT count() FROM 02418_test_another; + +DELETE FROM 02418_test WHERE key <= 2; +INSERT INTO 02418_test VALUES (1, 1.1); +INSERT INTO 02418_test VALUES (2, 1.1); -- { serverError LIMIT_EXCEEDED } + +SELECT count() FROM 02418_test; +SELECT count() FROM 02418_test_another; + +DROP TABLE 02418_test SYNC; +DROP TABLE 02418_test_another SYNC; diff --git a/parser/testdata/02418_tautological_if_index/ast.json b/parser/testdata/02418_tautological_if_index/ast.json new file mode 100644 index 000000000..a040bf5e3 --- /dev/null +++ b/parser/testdata/02418_tautological_if_index/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001059763, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02418_tautological_if_index/metadata.json b/parser/testdata/02418_tautological_if_index/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02418_tautological_if_index/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02418_tautological_if_index/query.sql b/parser/testdata/02418_tautological_if_index/query.sql new file mode 100644 index 000000000..696c98d99 --- /dev/null +++ b/parser/testdata/02418_tautological_if_index/query.sql @@ -0,0 +1,24 @@ +SET merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.0; + +DROP TABLE IF EXISTS constCondOptimization; + +CREATE TABLE constCondOptimization +( + d Date DEFAULT today(), + time DateTime DEFAULT now(), + n Int64 +) +ENGINE = MergeTree ORDER BY (time, n) SETTINGS index_granularity = 1; + +INSERT INTO constCondOptimization (n) SELECT number FROM system.numbers LIMIT 10000; + +-- The queries should use index. +SET max_rows_to_read = 2; + +-- Prevent remote replicas from skipping index analysis in Parallel Replicas. Otherwise, they may return full ranges and trigger max_rows_to_read validation failures. +SET parallel_replicas_index_analysis_only_on_coordinator = 0; + +SELECT count() FROM constCondOptimization WHERE if(0, 1, n = 1000); +SELECT count() FROM constCondOptimization WHERE if(0, 1, n = 1000) AND 1 = 1; + +DROP TABLE constCondOptimization; diff --git a/parser/testdata/02419_contingency_array_nullable/ast.json b/parser/testdata/02419_contingency_array_nullable/ast.json new file mode 100644 index 000000000..25fbbc9fd --- /dev/null +++ b/parser/testdata/02419_contingency_array_nullable/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function contingency (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal Array_[UInt64_1, NULL]" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001040521, + "rows_read": 8, + "bytes_read": 308 + } +} diff --git a/parser/testdata/02419_contingency_array_nullable/metadata.json b/parser/testdata/02419_contingency_array_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02419_contingency_array_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02419_contingency_array_nullable/query.sql b/parser/testdata/02419_contingency_array_nullable/query.sql new file mode 100644 index 000000000..92e371272 --- /dev/null +++ b/parser/testdata/02419_contingency_array_nullable/query.sql @@ -0,0 +1 @@ +SELECT contingency(1, [1, NULL]); -- { serverError NOT_IMPLEMENTED } diff --git a/parser/testdata/02420_final_setting/ast.json b/parser/testdata/02420_final_setting/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02420_final_setting/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02420_final_setting/metadata.json b/parser/testdata/02420_final_setting/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02420_final_setting/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02420_final_setting/query.sql b/parser/testdata/02420_final_setting/query.sql new file mode 100644 index 000000000..d8ca24da3 --- /dev/null +++ b/parser/testdata/02420_final_setting/query.sql @@ -0,0 +1,137 @@ +-- { echoOn } +SYSTEM STOP MERGES tbl; + +-- simple test case +create table if not exists replacing_mt (x String) engine=ReplacingMergeTree() ORDER BY x; + +insert into replacing_mt values ('abc'); +insert into replacing_mt values ('abc'); + +-- expected output is 2 because final is turned off +select count() from replacing_mt; + +set final = 1; +-- expected output is 1 because final is turned on +select count() from replacing_mt; + +-- JOIN test cases +create table if not exists lhs (x String) engine=ReplacingMergeTree() ORDER BY x; +create table if not exists rhs (x String) engine=ReplacingMergeTree() ORDER BY x; + +insert into lhs values ('abc'); +insert into lhs values ('abc'); + +insert into rhs values ('abc'); +insert into rhs values ('abc'); + +set final = 0; +-- expected output is 4 because select_final == 0 +select count() from lhs inner join rhs on lhs.x = rhs.x; + +set final = 1; +-- expected output is 1 because final == 1 +select count() from lhs inner join rhs on lhs.x = rhs.x; + +-- regular non final table +set final = 1; +create table if not exists regular_mt_table (x String) engine=MergeTree() ORDER BY x; +insert into regular_mt_table values ('abc'); +insert into regular_mt_table values ('abc'); +-- expected output is 1, it should silently ignore final modifier +select count() from regular_mt_table; + +-- view test +create materialized VIEW mv_regular_mt_table TO regular_mt_table AS SELECT * FROM regular_mt_table; +create view nv_regular_mt_table AS SELECT * FROM mv_regular_mt_table; + +set final=1; +select count() from nv_regular_mt_table; + +-- join on mix of tables that support / do not support select final with explain +create table if not exists left_table (id UInt64, val_left String) engine=ReplacingMergeTree() ORDER BY id; +create table if not exists middle_table (id UInt64, val_middle String) engine=MergeTree() ORDER BY id; +create table if not exists right_table (id UInt64, val_right String) engine=ReplacingMergeTree() ORDER BY id; +insert into left_table values (1,'a'); +insert into left_table values (1,'b'); +insert into left_table values (1,'c'); +insert into middle_table values (1,'a'); +insert into middle_table values (1,'b'); +insert into right_table values (1,'a'); +insert into right_table values (1,'b'); +insert into right_table values (1,'c'); +-- expected output +-- 1 c a c +-- 1 c b c +select left_table.id,val_left, val_middle, val_right from left_table + inner join middle_table on left_table.id = middle_table.id + inner join right_table on middle_table.id = right_table.id +ORDER BY left_table.id, val_left, val_middle, val_right; + +explain syntax select left_table.id,val_left, val_middle, val_right from left_table + inner join middle_table on left_table.id = middle_table.id + inner join right_table on middle_table.id = right_table.id +ORDER BY left_table.id, val_left, val_middle, val_right; + +-- extra: same with subquery +select left_table.id,val_left, val_middle, val_right from left_table + inner join middle_table on left_table.id = middle_table.id + inner join (SELECT * FROM right_table WHERE id = 1) r on middle_table.id = r.id +ORDER BY left_table.id, val_left, val_middle, val_right; + +-- distributed tables +drop table if exists left_table; +drop table if exists middle_table; +drop table if exists right_table; +create table if not exists left_table (id UInt64, val_left String) engine=ReplacingMergeTree() ORDER BY id; +create table if not exists middle_table (id UInt64, val_middle String) engine=MergeTree() ORDER BY id; +create table if not exists right_table_local (id UInt64, val_right String) engine=ReplacingMergeTree() ORDER BY id; +create table if not exists right_table engine=Distributed('test_shard_localhost', currentDatabase(), right_table_local) AS right_table_local; +insert into left_table values (1,'a'); +insert into left_table values (1,'b'); +insert into left_table values (1,'c'); +insert into middle_table values (1,'a'); +insert into middle_table values (1,'b'); +insert into right_table_local values (1,'a'); +insert into right_table_local values (1,'b'); +insert into right_table_local values (1,'c'); +SET prefer_localhost_replica=0; +-- expected output: +-- 1 c 1 a 1 c +-- 1 c 1 b 1 c +select left_table.*,middle_table.*, right_table.* from left_table + inner join middle_table on left_table.id = middle_table.id + inner join right_table on middle_table.id = right_table.id +ORDER BY left_table.id, val_left, val_middle, val_right; + +SET prefer_localhost_replica=1; +-- expected output: +-- 1 c 1 a 1 c +-- 1 c 1 b 1 c +select left_table.*,middle_table.*, right_table.* from left_table + inner join middle_table on left_table.id = middle_table.id + inner join right_table on middle_table.id = right_table.id +ORDER BY left_table.id, val_left, val_middle, val_right; + +-- Quite exotic with Merge engine +DROP TABLE IF EXISTS table_to_merge_a; +DROP TABLE IF EXISTS table_to_merge_b; +DROP TABLE IF EXISTS table_to_merge_c; +DROP TABLE IF EXISTS merge_table; + +create table if not exists table_to_merge_a (id UInt64, val String) engine=ReplacingMergeTree() ORDER BY id; +create table if not exists table_to_merge_b (id UInt64, val String) engine=MergeTree() ORDER BY id; +create table if not exists table_to_merge_c (id UInt64, val String) engine=ReplacingMergeTree() ORDER BY id; +CREATE TABLE merge_table Engine=Merge(currentDatabase(), '^(table_to_merge_[a-z])$') AS table_to_merge_a; + +insert into table_to_merge_a values (1,'a'); +insert into table_to_merge_a values (1,'b'); +insert into table_to_merge_a values (1,'c'); +insert into table_to_merge_b values (2,'a'); +insert into table_to_merge_b values (2,'b'); +insert into table_to_merge_c values (3,'a'); +insert into table_to_merge_c values (3,'b'); +insert into table_to_merge_c values (3,'c'); + +-- expected output: +-- 1 c, 2 a, 2 b, 3 c +SELECT * FROM merge_table ORDER BY id, val; diff --git a/parser/testdata/02420_final_setting_analyzer/ast.json b/parser/testdata/02420_final_setting_analyzer/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02420_final_setting_analyzer/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02420_final_setting_analyzer/metadata.json b/parser/testdata/02420_final_setting_analyzer/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02420_final_setting_analyzer/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02420_final_setting_analyzer/query.sql b/parser/testdata/02420_final_setting_analyzer/query.sql new file mode 100644 index 000000000..a1660d46b --- /dev/null +++ b/parser/testdata/02420_final_setting_analyzer/query.sql @@ -0,0 +1,113 @@ +-- { echoOn } +set enable_analyzer=1; +SYSTEM STOP MERGES tbl; + +-- simple test case +create table if not exists replacing_mt (x String) engine=ReplacingMergeTree() ORDER BY x; + +insert into replacing_mt values ('abc'); +insert into replacing_mt values ('abc'); + +-- expected output is 2 because final is turned off +select count() from replacing_mt; + +set final = 1; +-- expected output is 1 because final is turned on +select count() from replacing_mt; + +-- JOIN test cases +create table if not exists lhs (x String) engine=ReplacingMergeTree() ORDER BY x; +create table if not exists rhs (x String) engine=ReplacingMergeTree() ORDER BY x; + +insert into lhs values ('abc'); +insert into lhs values ('abc'); + +insert into rhs values ('abc'); +insert into rhs values ('abc'); + +set final = 0; +-- expected output is 4 because select_final == 0 +select count() from lhs inner join rhs on lhs.x = rhs.x; + +set final = 1; +-- expected output is 1 because final == 1 +select count() from lhs inner join rhs on lhs.x = rhs.x; + +-- regular non final table +set final = 1; +create table if not exists regular_mt_table (x String) engine=MergeTree() ORDER BY x; +insert into regular_mt_table values ('abc'); +insert into regular_mt_table values ('abc'); +-- expected output is 2, it should silently ignore final modifier +select count() from regular_mt_table; + +-- view test +create materialized VIEW mv_regular_mt_table TO regular_mt_table AS SELECT * FROM regular_mt_table; +create view nv_regular_mt_table AS SELECT * FROM mv_regular_mt_table; + +set final=1; +select count() from nv_regular_mt_table; + +-- join on mix of tables that support / do not support select final with explain +create table if not exists left_table (id UInt64, val_left String) engine=ReplacingMergeTree() ORDER BY id; +create table if not exists middle_table (id UInt64, val_middle String) engine=MergeTree() ORDER BY id; +create table if not exists right_table (id UInt64, val_right String) engine=ReplacingMergeTree() ORDER BY id; +insert into left_table values (1,'a'); +insert into left_table values (1,'b'); +insert into left_table values (1,'c'); +insert into middle_table values (1,'a'); +insert into middle_table values (1,'b'); +insert into right_table values (1,'a'); +insert into right_table values (1,'b'); +insert into right_table values (1,'c'); +-- expected output +-- 1 c a c +-- 1 c b c +select left_table.id,val_left, val_middle, val_right from left_table + inner join middle_table on left_table.id = middle_table.id + inner join right_table on middle_table.id = right_table.id +ORDER BY left_table.id, val_left, val_middle, val_right; + +explain syntax select left_table.id,val_left, val_middle, val_right from left_table + inner join middle_table on left_table.id = middle_table.id + inner join right_table on middle_table.id = right_table.id + ORDER BY left_table.id, val_left, val_middle, val_right; + + +explain syntax select left_table.id,val_left, val_middle, val_right from left_table + inner join middle_table on left_table.id = middle_table.id + inner join right_table on middle_table.id = right_table.id + ORDER BY left_table.id, val_left, val_middle, val_right SETTINGS enable_analyzer=0; + +-- extra: same with subquery +select left_table.id,val_left, val_middle, val_right from left_table + inner join middle_table on left_table.id = middle_table.id + inner join (SELECT * FROM right_table WHERE id = 1) r on middle_table.id = r.id +ORDER BY left_table.id, val_left, val_middle, val_right; + +-- Quite exotic with Merge engine +DROP TABLE IF EXISTS table_to_merge_a; +DROP TABLE IF EXISTS table_to_merge_b; +DROP TABLE IF EXISTS table_to_merge_c; +DROP TABLE IF EXISTS merge_table; + +create table if not exists table_to_merge_a (id UInt64, val String) engine=ReplacingMergeTree() ORDER BY id; +create table if not exists table_to_merge_b (id UInt64, val String) engine=MergeTree() ORDER BY id; +create table if not exists table_to_merge_c (id UInt64, val String) engine=ReplacingMergeTree() ORDER BY id; +CREATE TABLE merge_table Engine=Merge(currentDatabase(), '^(table_to_merge_[a-z])$') AS table_to_merge_a; + +insert into table_to_merge_a values (1,'a'); +insert into table_to_merge_a values (1,'b'); +insert into table_to_merge_a values (1,'c'); +insert into table_to_merge_b values (2,'a'); +insert into table_to_merge_b values (2,'b'); +insert into table_to_merge_c values (3,'a'); +insert into table_to_merge_c values (3,'b'); +insert into table_to_merge_c values (3,'c'); + +-- expected output: +-- 1 c, 2 a, 2 b, 3 c +SELECT * FROM merge_table ORDER BY id, val; + +select sum(number) from numbers(10) settings final=1; +select sum(number) from remote('127.0.0.{1,2}', numbers(10)) settings final=1; diff --git a/parser/testdata/02420_key_condition_actions_dag_bug_40599/ast.json b/parser/testdata/02420_key_condition_actions_dag_bug_40599/ast.json new file mode 100644 index 000000000..e624d3eec --- /dev/null +++ b/parser/testdata/02420_key_condition_actions_dag_bug_40599/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery tba (children 3)" + }, + { + "explain": " Identifier tba" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration event_id (children 1)" + }, + { + "explain": " DataType Int64" + }, + { + "explain": " ColumnDeclaration event_dt (children 1)" + }, + { + "explain": " DataType Int64" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Identifier event_id" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001295857, + "rows_read": 11, + "bytes_read": 385 + } +} diff --git a/parser/testdata/02420_key_condition_actions_dag_bug_40599/metadata.json b/parser/testdata/02420_key_condition_actions_dag_bug_40599/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02420_key_condition_actions_dag_bug_40599/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02420_key_condition_actions_dag_bug_40599/query.sql b/parser/testdata/02420_key_condition_actions_dag_bug_40599/query.sql new file mode 100644 index 000000000..4d2feacfd --- /dev/null +++ b/parser/testdata/02420_key_condition_actions_dag_bug_40599/query.sql @@ -0,0 +1,10 @@ +create table tba (event_id Int64, event_dt Int64) Engine =MergeTree order by event_id ; +insert into tba select number%500, 20220822 from numbers(1e6); + +select count() from ( + SELECT event_dt FROM ( + select event_dt, 403 AS event_id from ( + select event_dt from tba as tba + where event_id = 9 and ((tba.event_dt >= 20220822 and tba.event_dt <= 20220822)) + ) + ) tba WHERE tba.event_dt >= 20220822 and tba.event_dt <= 20220822 and event_id = 403 ); diff --git a/parser/testdata/02421_decimal_in_precision_issue_41125/ast.json b/parser/testdata/02421_decimal_in_precision_issue_41125/ast.json new file mode 100644 index 000000000..956220e5b --- /dev/null +++ b/parser/testdata/02421_decimal_in_precision_issue_41125/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery dtest (children 1)" + }, + { + "explain": " Identifier dtest" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001458218, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/02421_decimal_in_precision_issue_41125/metadata.json b/parser/testdata/02421_decimal_in_precision_issue_41125/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02421_decimal_in_precision_issue_41125/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02421_decimal_in_precision_issue_41125/query.sql b/parser/testdata/02421_decimal_in_precision_issue_41125/query.sql new file mode 100644 index 000000000..e779498f4 --- /dev/null +++ b/parser/testdata/02421_decimal_in_precision_issue_41125/query.sql @@ -0,0 +1,43 @@ +DROP TABLE IF EXISTS dtest; + + +CREATE TABLE dtest ( `a` Decimal(18, 0), `b` Decimal(18, 1), `c` Decimal(36, 0) ) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO dtest VALUES ('33', '44.4', '35'); + +SELECT count() == 0 FROM (SELECT '33.3' :: Decimal(9, 1) AS a WHERE a IN ('33.33' :: Decimal(9, 2))); + +SELECT count() == 0 FROM dtest WHERE a IN toDecimal32('33.3000', 4); +SELECT count() == 0 FROM dtest WHERE a IN toDecimal64('33.3000', 4); +SELECT count() == 0 FROM dtest WHERE a IN toDecimal128('33.3000', 4); +SELECT count() == 0 FROM dtest WHERE a IN toDecimal256('33.3000', 4); + +SELECT count() == 0 FROM dtest WHERE b IN toDecimal32('44.4000', 0); +SELECT count() == 0 FROM dtest WHERE b IN toDecimal64('44.4000', 0); +SELECT count() == 0 FROM dtest WHERE b IN toDecimal128('44.4000', 0); +SELECT count() == 0 FROM dtest WHERE b IN toDecimal256('44.4000', 0); + +SELECT count() == 1 FROM dtest WHERE b IN toDecimal32('44.4000', 4); +SELECT count() == 1 FROM dtest WHERE b IN toDecimal64('44.4000', 4); +SELECT count() == 1 FROM dtest WHERE b IN toDecimal128('44.4000', 4); +SELECT count() == 1 FROM dtest WHERE b IN toDecimal256('44.4000', 4); + +SET enable_analyzer = 1; + +SELECT count() == 0 FROM (SELECT '33.3' :: Decimal(9, 1) AS a WHERE a IN ('33.33' :: Decimal(9, 2))); + +SELECT count() == 0 FROM dtest WHERE a IN toDecimal32('33.3000', 4); +SELECT count() == 0 FROM dtest WHERE a IN toDecimal64('33.3000', 4); +SELECT count() == 0 FROM dtest WHERE a IN toDecimal128('33.3000', 4); +SELECT count() == 0 FROM dtest WHERE a IN toDecimal256('33.3000', 4); + +SELECT count() == 0 FROM dtest WHERE b IN toDecimal32('44.4000', 0); +SELECT count() == 0 FROM dtest WHERE b IN toDecimal64('44.4000', 0); +SELECT count() == 0 FROM dtest WHERE b IN toDecimal128('44.4000', 0); +SELECT count() == 0 FROM dtest WHERE b IN toDecimal256('44.4000', 0); + +SELECT count() == 1 FROM dtest WHERE b IN toDecimal32('44.4000', 4); +SELECT count() == 1 FROM dtest WHERE b IN toDecimal64('44.4000', 4); +SELECT count() == 1 FROM dtest WHERE b IN toDecimal128('44.4000', 4); +SELECT count() == 1 FROM dtest WHERE b IN toDecimal256('44.4000', 4); + +DROP TABLE IF EXISTS dtest; diff --git a/parser/testdata/02421_explain_subquery/ast.json b/parser/testdata/02421_explain_subquery/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02421_explain_subquery/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02421_explain_subquery/metadata.json b/parser/testdata/02421_explain_subquery/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02421_explain_subquery/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02421_explain_subquery/query.sql b/parser/testdata/02421_explain_subquery/query.sql new file mode 100644 index 000000000..02f45e8cc --- /dev/null +++ b/parser/testdata/02421_explain_subquery/query.sql @@ -0,0 +1,61 @@ +SET enable_analyzer = 0; + +SELECT count() > 3 FROM (EXPLAIN PIPELINE header = 1 SELECT * FROM system.numbers ORDER BY number DESC) WHERE explain LIKE '%Header: number UInt64%'; +SELECT count() > 0 FROM (EXPLAIN PLAN SELECT * FROM system.numbers ORDER BY number DESC) WHERE explain ILIKE '%Sort%'; +SELECT count() > 0 FROM (EXPLAIN SELECT * FROM system.numbers ORDER BY number DESC) WHERE explain ILIKE '%Sort%'; +SELECT count() > 0 FROM (EXPLAIN CURRENT TRANSACTION); +SELECT count() == 1 FROM (EXPLAIN SYNTAX SELECT number FROM system.numbers ORDER BY number DESC) WHERE explain ILIKE 'SELECT%'; +SELECT trim(explain) == 'Asterisk' FROM (EXPLAIN AST SELECT * FROM system.numbers LIMIT 10) WHERE explain LIKE '%Asterisk%'; + +SELECT * FROM ( + EXPLAIN AST SELECT * FROM ( + EXPLAIN PLAN SELECT * FROM ( + EXPLAIN SYNTAX SELECT trim(explain) == 'Asterisk' FROM ( + EXPLAIN AST SELECT * FROM system.numbers LIMIT 10 + ) WHERE explain LIKE '%Asterisk%' + ) + ) +) FORMAT Null; + +SELECT (EXPLAIN SYNTAX oneline = 1 SELECT 1) == 'SELECT 1'; + +SELECT * FROM viewExplain('', ''); -- { serverError BAD_ARGUMENTS } +SELECT * FROM viewExplain('EXPLAIN AST', ''); -- { serverError BAD_ARGUMENTS } +SELECT * FROM viewExplain('EXPLAIN AST', '', 1); -- { serverError BAD_ARGUMENTS } +SELECT * FROM viewExplain('EXPLAIN AST', '', ''); -- { serverError BAD_ARGUMENTS } + +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( a UInt64 ) Engine = MergeTree ORDER BY tuple() AS SELECT number AS a FROM system.numbers LIMIT 100000; + +SELECT rows > 1000 FROM (EXPLAIN ESTIMATE SELECT sum(a) FROM t1); +SELECT count() == 1 FROM (EXPLAIN ESTIMATE SELECT sum(a) FROM t1); + +DROP TABLE t1; + +SET enable_analyzer = 1; + +SELECT count() > 3 FROM (EXPLAIN PIPELINE header = 1 SELECT * FROM system.numbers ORDER BY number DESC) WHERE explain LIKE '%Header: \_\_table1.number UInt64%'; +SELECT count() > 0 FROM (EXPLAIN PLAN SELECT * FROM system.numbers ORDER BY number DESC) WHERE explain ILIKE '%Sort%'; +SELECT count() > 0 FROM (EXPLAIN SELECT * FROM system.numbers ORDER BY number DESC) WHERE explain ILIKE '%Sort%'; +SELECT count() > 0 FROM (EXPLAIN CURRENT TRANSACTION); +SELECT count() == 1 FROM (EXPLAIN SYNTAX SELECT number FROM system.numbers ORDER BY number DESC) WHERE explain ILIKE 'SELECT%'; +SELECT trim(explain) == 'Asterisk' FROM (EXPLAIN AST SELECT * FROM system.numbers LIMIT 10) WHERE explain LIKE '%Asterisk%'; + +SELECT * FROM ( + EXPLAIN AST SELECT * FROM ( + EXPLAIN PLAN SELECT * FROM ( + EXPLAIN SYNTAX SELECT trim(explain) == 'Asterisk' FROM ( + EXPLAIN AST SELECT * FROM system.numbers LIMIT 10 + ) WHERE explain LIKE '%Asterisk%' + ) + ) +) FORMAT Null; + +SELECT (EXPLAIN SYNTAX oneline = 1 SELECT 1) == 'SELECT 1 FROM system.one'; + +SELECT * FROM viewExplain('', ''); -- { serverError BAD_ARGUMENTS } +SELECT * FROM viewExplain('EXPLAIN AST', ''); -- { serverError BAD_ARGUMENTS } +SELECT * FROM viewExplain('EXPLAIN AST', '', 1); -- { serverError BAD_ARGUMENTS } +SELECT * FROM viewExplain('EXPLAIN AST', '', ''); -- { serverError BAD_ARGUMENTS } + +-- EXPLAIN ESTIMATE is not supported in experimental analyzer diff --git a/parser/testdata/02421_exponential_join_rewrite_21557/ast.json b/parser/testdata/02421_exponential_join_rewrite_21557/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02421_exponential_join_rewrite_21557/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02421_exponential_join_rewrite_21557/metadata.json b/parser/testdata/02421_exponential_join_rewrite_21557/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02421_exponential_join_rewrite_21557/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02421_exponential_join_rewrite_21557/query.sql b/parser/testdata/02421_exponential_join_rewrite_21557/query.sql new file mode 100644 index 000000000..596112fa2 --- /dev/null +++ b/parser/testdata/02421_exponential_join_rewrite_21557/query.sql @@ -0,0 +1,455 @@ +-- Tags: long + +-- https://github.com/ClickHouse/ClickHouse/issues/21557 + +DROP TABLE IF EXISTS store_returns; +DROP TABLE IF EXISTS catalog_sales; +DROP TABLE IF EXISTS catalog_returns; +DROP TABLE IF EXISTS date_dim; +DROP TABLE IF EXISTS store; +DROP TABLE IF EXISTS customer; +DROP TABLE IF EXISTS customer_demographics; +DROP TABLE IF EXISTS promotion; +DROP TABLE IF EXISTS household_demographics; +DROP TABLE IF EXISTS customer_address; +DROP TABLE IF EXISTS income_band; +DROP TABLE IF EXISTS item; + +CREATE TABLE store_sales +( + `ss_sold_date_sk` Nullable(Int64), + `ss_sold_time_sk` Nullable(Int64), + `ss_item_sk` Int64, + `ss_customer_sk` Nullable(Int64), + `ss_cdemo_sk` Nullable(Int64), + `ss_hdemo_sk` Nullable(Int64), + `ss_addr_sk` Nullable(Int64), + `ss_store_sk` Nullable(Int64), + `ss_promo_sk` Nullable(Int64), + `ss_ticket_number` Int64, + `ss_quantity` Nullable(Int64), + `ss_wholesale_cost` Nullable(Float32), + `ss_list_price` Nullable(Float32), + `ss_sales_price` Nullable(Float32), + `ss_ext_discount_amt` Nullable(Float32), + `ss_ext_sales_price` Nullable(Float32), + `ss_ext_wholesale_cost` Nullable(Float32), + `ss_ext_list_price` Nullable(Float32), + `ss_ext_tax` Nullable(Float32), + `ss_coupon_amt` Nullable(Float32), + `ss_net_paid` Nullable(Float32), + `ss_net_paid_inc_tax` Nullable(Float32), + `ss_net_profit` Nullable(Float32), + `ss_promo_sk_nn` Int16, + `ss_promo_sk_n2` Nullable(Int16) +) +ENGINE = MergeTree ORDER BY (ss_item_sk, ss_ticket_number); + +CREATE TABLE store_returns +( + `sr_returned_date_sk` Nullable(Int64), + `sr_return_time_sk` Nullable(Int64), + `sr_item_sk` Int64, + `sr_customer_sk` Nullable(Int64), + `sr_cdemo_sk` Nullable(Int64), + `sr_hdemo_sk` Nullable(Int64), + `sr_addr_sk` Nullable(Int64), + `sr_store_sk` Nullable(Int64), + `sr_reason_sk` Nullable(Int64), + `sr_ticket_number` Int64, + `sr_return_quantity` Nullable(Int64), + `sr_return_amt` Nullable(Float32), + `sr_return_tax` Nullable(Float32), + `sr_return_amt_inc_tax` Nullable(Float32), + `sr_fee` Nullable(Float32), + `sr_return_ship_cost` Nullable(Float32), + `sr_refunded_cash` Nullable(Float32), + `sr_reversed_charge` Nullable(Float32), + `sr_store_credit` Nullable(Float32), + `sr_net_loss` Nullable(Float32) +) +ENGINE = MergeTree ORDER BY (sr_item_sk, sr_ticket_number); + +CREATE TABLE catalog_sales +( + `cs_sold_date_sk` Nullable(Int64), + `cs_sold_time_sk` Nullable(Int64), + `cs_ship_date_sk` Nullable(Int64), + `cs_bill_customer_sk` Nullable(Int64), + `cs_bill_cdemo_sk` Nullable(Int64), + `cs_bill_hdemo_sk` Nullable(Int64), + `cs_bill_addr_sk` Nullable(Int64), + `cs_ship_customer_sk` Nullable(Int64), + `cs_ship_cdemo_sk` Nullable(Int64), + `cs_ship_hdemo_sk` Nullable(Int64), + `cs_ship_addr_sk` Nullable(Int64), + `cs_call_center_sk` Nullable(Int64), + `cs_catalog_page_sk` Nullable(Int64), + `cs_ship_mode_sk` Nullable(Int64), + `cs_warehouse_sk` Nullable(Int64), + `cs_item_sk` Int64, + `cs_promo_sk` Nullable(Int64), + `cs_order_number` Int64, + `cs_quantity` Nullable(Int64), + `cs_wholesale_cost` Nullable(Float32), + `cs_list_price` Nullable(Float32), + `cs_sales_price` Nullable(Float32), + `cs_ext_discount_amt` Nullable(Float32), + `cs_ext_sales_price` Nullable(Float32), + `cs_ext_wholesale_cost` Nullable(Float32), + `cs_ext_list_price` Nullable(Float32), + `cs_ext_tax` Nullable(Float32), + `cs_coupon_amt` Nullable(Float32), + `cs_ext_ship_cost` Nullable(Float32), + `cs_net_paid` Nullable(Float32), + `cs_net_paid_inc_tax` Nullable(Float32), + `cs_net_paid_inc_ship` Nullable(Float32), + `cs_net_paid_inc_ship_tax` Nullable(Float32), + `cs_net_profit` Nullable(Float32) +) +ENGINE = MergeTree ORDER BY (cs_item_sk, cs_order_number); + +CREATE TABLE catalog_returns +( + `cr_returned_date_sk` Nullable(Int64), + `cr_returned_time_sk` Nullable(Int64), + `cr_item_sk` Int64, + `cr_refunded_customer_sk` Nullable(Int64), + `cr_refunded_cdemo_sk` Nullable(Int64), + `cr_refunded_hdemo_sk` Nullable(Int64), + `cr_refunded_addr_sk` Nullable(Int64), + `cr_returning_customer_sk` Nullable(Int64), + `cr_returning_cdemo_sk` Nullable(Int64), + `cr_returning_hdemo_sk` Nullable(Int64), + `cr_returning_addr_sk` Nullable(Int64), + `cr_call_center_sk` Nullable(Int64), + `cr_catalog_page_sk` Nullable(Int64), + `cr_ship_mode_sk` Nullable(Int64), + `cr_warehouse_sk` Nullable(Int64), + `cr_reason_sk` Nullable(Int64), + `cr_order_number` Int64, + `cr_return_quantity` Nullable(Int64), + `cr_return_amount` Nullable(Float32), + `cr_return_tax` Nullable(Float32), + `cr_return_amt_inc_tax` Nullable(Float32), + `cr_fee` Nullable(Float32), + `cr_return_ship_cost` Nullable(Float32), + `cr_refunded_cash` Nullable(Float32), + `cr_reversed_charge` Nullable(Float32), + `cr_store_credit` Nullable(Float32), + `cr_net_loss` Nullable(Float32) +) +ENGINE = MergeTree ORDER BY (cr_item_sk, cr_order_number); + +CREATE TABLE date_dim +( + `d_date_sk` Int64, + `d_date_id` String, + `d_date` Nullable(Date), + `d_month_seq` Nullable(Int64), + `d_week_seq` Nullable(Int64), + `d_quarter_seq` Nullable(Int64), + `d_year` Nullable(Int64), + `d_dow` Nullable(Int64), + `d_moy` Nullable(Int64), + `d_dom` Nullable(Int64), + `d_qoy` Nullable(Int64), + `d_fy_year` Nullable(Int64), + `d_fy_quarter_seq` Nullable(Int64), + `d_fy_week_seq` Nullable(Int64), + `d_day_name` Nullable(String), + `d_quarter_name` Nullable(String), + `d_holiday` Nullable(String), + `d_weekend` Nullable(String), + `d_following_holiday` Nullable(String), + `d_first_dom` Nullable(Int64), + `d_last_dom` Nullable(Int64), + `d_same_day_ly` Nullable(Int64), + `d_same_day_lq` Nullable(Int64), + `d_current_day` Nullable(String), + `d_current_week` Nullable(String), + `d_current_month` Nullable(String), + `d_current_quarter` Nullable(String), + `d_current_year` Nullable(String) +) +ENGINE = MergeTree ORDER BY d_date_sk; + +CREATE TABLE store +( + `s_store_sk` Int64, + `s_store_id` String, + `s_rec_start_date` Nullable(Date), + `s_rec_end_date` Nullable(Date), + `s_closed_date_sk` Nullable(Int64), + `s_store_name` Nullable(String), + `s_number_employees` Nullable(Int64), + `s_floor_space` Nullable(Int64), + `s_hours` Nullable(String), + `s_manager` Nullable(String), + `s_market_id` Nullable(Int64), + `s_geography_class` Nullable(String), + `s_market_desc` Nullable(String), + `s_market_manager` Nullable(String), + `s_division_id` Nullable(Int64), + `s_division_name` Nullable(String), + `s_company_id` Nullable(Int64), + `s_company_name` Nullable(String), + `s_street_number` Nullable(String), + `s_street_name` Nullable(String), + `s_street_type` Nullable(String), + `s_suite_number` Nullable(String), + `s_city` Nullable(String), + `s_county` Nullable(String), + `s_state` Nullable(String), + `s_zip` Nullable(String), + `s_country` Nullable(String), + `s_gmt_offset` Nullable(Float32), + `s_tax_precentage` Nullable(Float32) +) +ENGINE = MergeTree ORDER BY s_store_sk; + +CREATE TABLE customer +( + `c_customer_sk` Int64, + `c_customer_id` String, + `c_current_cdemo_sk` Nullable(Int64), + `c_current_hdemo_sk` Nullable(Int64), + `c_current_addr_sk` Nullable(Int64), + `c_first_shipto_date_sk` Nullable(Int64), + `c_first_sales_date_sk` Nullable(Int64), + `c_salutation` Nullable(String), + `c_first_name` Nullable(String), + `c_last_name` Nullable(String), + `c_preferred_cust_flag` Nullable(String), + `c_birth_day` Nullable(Int64), + `c_birth_month` Nullable(Int64), + `c_birth_year` Nullable(Int64), + `c_birth_country` Nullable(String), + `c_login` Nullable(String), + `c_email_address` Nullable(String), + `c_last_review_date` Nullable(String) +) +ENGINE = MergeTree ORDER BY c_customer_sk; + +CREATE TABLE customer_demographics +( + `cd_demo_sk` Int64, + `cd_gender` Nullable(String), + `cd_marital_status` Nullable(String), + `cd_education_status` Nullable(String), + `cd_purchase_estimate` Nullable(Int64), + `cd_credit_rating` Nullable(String), + `cd_dep_count` Nullable(Int64), + `cd_dep_employed_count` Nullable(Int64), + `cd_dep_college_count` Nullable(Int64) +) +ENGINE = MergeTree ORDER BY cd_demo_sk; + +CREATE TABLE promotion +( + `p_promo_sk` Int64, + `p_promo_id` String, + `p_start_date_sk` Nullable(Int64), + `p_end_date_sk` Nullable(Int64), + `p_item_sk` Nullable(Int64), + `p_cost` Nullable(Float64), + `p_response_target` Nullable(Int64), + `p_promo_name` Nullable(String), + `p_channel_dmail` Nullable(String), + `p_channel_email` Nullable(String), + `p_channel_catalog` Nullable(String), + `p_channel_tv` Nullable(String), + `p_channel_radio` Nullable(String), + `p_channel_press` Nullable(String), + `p_channel_event` Nullable(String), + `p_channel_demo` Nullable(String), + `p_channel_details` Nullable(String), + `p_purpose` Nullable(String), + `p_discount_active` Nullable(String) +) +ENGINE = MergeTree ORDER BY p_promo_sk; + +CREATE TABLE household_demographics +( + `hd_demo_sk` Int64, + `hd_income_band_sk` Nullable(Int64), + `hd_buy_potential` Nullable(String), + `hd_dep_count` Nullable(Int64), + `hd_vehicle_count` Nullable(Int64) +) +ENGINE = MergeTree ORDER BY hd_demo_sk; + +CREATE TABLE customer_address +( + `ca_address_sk` Int64, + `ca_address_id` String, + `ca_street_number` Nullable(String), + `ca_street_name` Nullable(String), + `ca_street_type` Nullable(String), + `ca_suite_number` Nullable(String), + `ca_city` Nullable(String), + `ca_county` Nullable(String), + `ca_state` Nullable(String), + `ca_zip` Nullable(String), + `ca_country` Nullable(String), + `ca_gmt_offset` Nullable(Float32), + `ca_location_type` Nullable(String) +) +ENGINE = MergeTree ORDER BY ca_address_sk; + +CREATE TABLE income_band +( + `ib_income_band_sk` Int64, + `ib_lower_bound` Nullable(Int64), + `ib_upper_bound` Nullable(Int64) +) +ENGINE = MergeTree ORDER BY ib_income_band_sk; + +CREATE TABLE item +( + `i_item_sk` Int64, + `i_item_id` String, + `i_rec_start_date` Nullable(Date), + `i_rec_end_date` Nullable(Date), + `i_item_desc` Nullable(String), + `i_current_price` Nullable(Float32), + `i_wholesale_cost` Nullable(Float32), + `i_brand_id` Nullable(Int64), + `i_brand` Nullable(String), + `i_class_id` Nullable(Int64), + `i_class` Nullable(String), + `i_category_id` Nullable(Int64), + `i_category` Nullable(String), + `i_manufact_id` Nullable(Int64), + `i_manufact` Nullable(String), + `i_size` Nullable(String), + `i_formulation` Nullable(String), + `i_color` Nullable(String), + `i_units` Nullable(String), + `i_container` Nullable(String), + `i_manager_id` Nullable(Int64), + `i_product_name` Nullable(String) +) +ENGINE = MergeTree ORDER BY i_item_sk; + +-- `parallel_hash` uses two-level hash tables (that have 256 tables internally). +-- it preallocates too much data as the result and memory exception occurs. +SET join_algorithm = 'hash'; +EXPLAIN SYNTAX +WITH + cs_ui AS + ( + SELECT + cs_item_sk, + sum(cs_ext_list_price) AS sale, + sum((cr_refunded_cash + cr_reversed_charge) + cr_store_credit) AS refund + FROM catalog_sales , catalog_returns + WHERE (cs_item_sk = cr_item_sk) AND (cs_order_number = cr_order_number) + GROUP BY cs_item_sk + HAVING sum(cs_ext_list_price) > (2 * sum((cr_refunded_cash + cr_reversed_charge) + cr_store_credit)) + ), + cross_sales AS + ( + SELECT + i_product_name AS product_name, + i_item_sk AS item_sk, + s_store_name AS store_name, + s_zip AS store_zip, + ad1.ca_street_number AS b_street_number, + ad1.ca_street_name AS b_street_name, + ad1.ca_city AS b_city, + ad1.ca_zip AS b_zip, + ad2.ca_street_number AS c_street_number, + ad2.ca_street_name AS c_street_name, + ad2.ca_city AS c_city, + ad2.ca_zip AS c_zip, + d1.d_year AS syear, + d2.d_year AS fsyear, + d3.d_year AS s2year, + count(*) AS cnt, + sum(ss_wholesale_cost) AS s1, + sum(ss_list_price) AS s2, + sum(ss_coupon_amt) AS s3 + FROM store_sales + , store_returns + , cs_ui + , date_dim AS d1 + , date_dim AS d2 + , date_dim AS d3 + , store + , customer + , customer_demographics AS cd1 + , customer_demographics AS cd2 + , promotion + , household_demographics AS hd1 + , household_demographics AS hd2 + , customer_address AS ad1 + , customer_address AS ad2 + , income_band AS ib1 + , income_band AS ib2 + , item + WHERE (ss_store_sk = s_store_sk) AND (ss_sold_date_sk = d1.d_date_sk) AND (ss_customer_sk = c_customer_sk) AND (ss_cdemo_sk = cd1.cd_demo_sk) AND (ss_hdemo_sk = hd1.hd_demo_sk) AND (ss_addr_sk = ad1.ca_address_sk) AND (ss_item_sk = i_item_sk) AND (ss_item_sk = sr_item_sk) AND (ss_ticket_number = sr_ticket_number) AND (ss_item_sk = cs_ui.cs_item_sk) AND (c_current_cdemo_sk = cd2.cd_demo_sk) AND (c_current_hdemo_sk = hd2.hd_demo_sk) AND (c_current_addr_sk = ad2.ca_address_sk) AND (c_first_sales_date_sk = d2.d_date_sk) AND (c_first_shipto_date_sk = d3.d_date_sk) AND (ss_promo_sk = p_promo_sk) AND (hd1.hd_income_band_sk = ib1.ib_income_band_sk) AND (hd2.hd_income_band_sk = ib2.ib_income_band_sk) AND (cd1.cd_marital_status != cd2.cd_marital_status) AND (i_color IN ('maroon', 'burnished', 'dim', 'steel', 'navajo', 'chocolate')) AND ((i_current_price >= 35) AND (i_current_price <= (35 + 10))) AND ((i_current_price >= (35 + 1)) AND (i_current_price <= (35 + 15))) + GROUP BY + i_product_name, + i_item_sk, + s_store_name, + s_zip, + ad1.ca_street_number, + ad1.ca_street_name, + ad1.ca_city, + ad1.ca_zip, + ad2.ca_street_number, + ad2.ca_street_name, + ad2.ca_city, + ad2.ca_zip, + d1.d_year, + d2.d_year, + d3.d_year + ) +SELECT + cs1.product_name, + cs1.store_name, + cs1.store_zip, + cs1.b_street_number, + cs1.b_street_name, + cs1.b_city, + cs1.b_zip, + cs1.c_street_number, + cs1.c_street_name, + cs1.c_city, + cs1.c_zip, + cs1.syear, + cs1.cnt, + cs1.s1 AS s11, + cs1.s2 AS s21, + cs1.s3 AS s31, + cs2.s1 AS s12, + cs2.s2 AS s22, + cs2.s3 AS s32, + cs2.syear, + cs2.cnt +FROM cross_sales AS cs1 , cross_sales AS cs2 +WHERE (cs1.item_sk = cs2.item_sk) AND (cs1.syear = 2000) AND (cs2.syear = (2000 + 1)) AND (cs2.cnt <= cs1.cnt) AND (cs1.store_name = cs2.store_name) AND (cs1.store_zip = cs2.store_zip) +ORDER BY + cs1.product_name ASC, + cs1.store_name ASC, + cs2.cnt ASC, + cs1.s1 ASC, + cs2.s1 ASC +FORMAT Null +; + +SELECT 'Ok'; + +DROP TABLE IF EXISTS store_returns; +DROP TABLE IF EXISTS catalog_sales; +DROP TABLE IF EXISTS catalog_returns; +DROP TABLE IF EXISTS date_dim; +DROP TABLE IF EXISTS store; +DROP TABLE IF EXISTS customer; +DROP TABLE IF EXISTS customer_demographics; +DROP TABLE IF EXISTS promotion; +DROP TABLE IF EXISTS household_demographics; +DROP TABLE IF EXISTS customer_address; +DROP TABLE IF EXISTS income_band; +DROP TABLE IF EXISTS item; diff --git a/parser/testdata/02421_json_decimals_as_strings/ast.json b/parser/testdata/02421_json_decimals_as_strings/ast.json new file mode 100644 index 000000000..bfdefc266 --- /dev/null +++ b/parser/testdata/02421_json_decimals_as_strings/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDecimal128 (alias d) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Float64_42.42" + }, + { + "explain": " Literal UInt64_5" + }, + { + "explain": " Identifier JSONEachRow" + }, + { + "explain": " Set" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.00172868, + "rows_read": 10, + "bytes_read": 353 + } +} diff --git a/parser/testdata/02421_json_decimals_as_strings/metadata.json b/parser/testdata/02421_json_decimals_as_strings/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02421_json_decimals_as_strings/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02421_json_decimals_as_strings/query.sql b/parser/testdata/02421_json_decimals_as_strings/query.sql new file mode 100644 index 000000000..dd7d9958a --- /dev/null +++ b/parser/testdata/02421_json_decimals_as_strings/query.sql @@ -0,0 +1,4 @@ +select toDecimal128(42.42, 5) as d format JSONEachRow settings output_format_json_quote_decimals=1; +insert into function file(02421_data.jsonl) select '42.42' as d settings engine_file_truncate_on_insert=1; +select * from file(02421_data.jsonl, auto, 'd Decimal32(3)'); + diff --git a/parser/testdata/02422_insert_different_granularity/ast.json b/parser/testdata/02422_insert_different_granularity/ast.json new file mode 100644 index 000000000..9e459f30e --- /dev/null +++ b/parser/testdata/02422_insert_different_granularity/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '=== ataptive granularity: table one -; table two + ==='" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001304436, + "rows_read": 5, + "bytes_read": 225 + } +} diff --git a/parser/testdata/02422_insert_different_granularity/metadata.json b/parser/testdata/02422_insert_different_granularity/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02422_insert_different_granularity/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02422_insert_different_granularity/query.sql b/parser/testdata/02422_insert_different_granularity/query.sql new file mode 100644 index 000000000..8d5c43fd9 --- /dev/null +++ b/parser/testdata/02422_insert_different_granularity/query.sql @@ -0,0 +1,81 @@ +SELECT '=== ataptive granularity: table one -; table two + ==='; + +DROP TABLE IF EXISTS table_one; +CREATE TABLE table_one (id UInt64, value UInt64) +ENGINE = MergeTree +PARTITION BY id +ORDER BY value +SETTINGS index_granularity = 8192, index_granularity_bytes = 0, min_bytes_for_wide_part = 100; + +DROP TABLE IF EXISTS table_two; +CREATE TABLE table_two (id UInt64, value UInt64) +ENGINE = MergeTree +PARTITION BY id +ORDER BY value +SETTINGS index_granularity = 8192, index_granularity_bytes = 1024, min_bytes_for_wide_part = 100; + +INSERT INTO table_one SELECT intDiv(number, 10), number FROM numbers(100); + +ALTER TABLE table_two REPLACE PARTITION 0 FROM table_one; + +SELECT '=== ataptive granularity: table one -; table two - ==='; + +DROP TABLE IF EXISTS table_one; + +CREATE TABLE table_one (id UInt64, value UInt64) +ENGINE = MergeTree +PARTITION BY id +ORDER BY value +SETTINGS index_granularity = 8192, index_granularity_bytes = 0, min_bytes_for_wide_part = 100; + +DROP TABLE IF EXISTS table_two; + +CREATE TABLE table_two (id UInt64, value UInt64) +ENGINE = MergeTree +PARTITION BY id +ORDER BY value +SETTINGS index_granularity = 8192, index_granularity_bytes = 0, min_bytes_for_wide_part = 100; + +INSERT INTO table_one SELECT intDiv(number, 10), number FROM numbers(100); + +ALTER TABLE table_two REPLACE PARTITION 0 FROM table_one; + +SELECT '=== ataptive granularity: table one +; table two + ==='; + +DROP TABLE IF EXISTS table_one; +CREATE TABLE table_one (id UInt64, value UInt64) +ENGINE = MergeTree +PARTITION BY id +ORDER BY value +SETTINGS index_granularity = 8192, index_granularity_bytes = 1024, min_bytes_for_wide_part = 100; + +DROP TABLE IF EXISTS table_two; +CREATE TABLE table_two (id UInt64, value UInt64) +ENGINE = MergeTree +PARTITION BY id +ORDER BY value +SETTINGS index_granularity = 8192, index_granularity_bytes = 1024, min_bytes_for_wide_part = 100; + +INSERT INTO table_one SELECT intDiv(number, 10), number FROM numbers(100); + +ALTER TABLE table_two REPLACE PARTITION 0 FROM table_one; + +SELECT '=== ataptive granularity: table one +; table two - ==='; + +DROP TABLE IF EXISTS table_one; +CREATE TABLE table_one (id UInt64, value UInt64) +ENGINE = MergeTree +PARTITION BY id +ORDER BY value +SETTINGS index_granularity = 8192, index_granularity_bytes = 1024, min_bytes_for_wide_part = 100; + +DROP TABLE IF EXISTS table_two; +CREATE TABLE table_two (id UInt64, value UInt64) +ENGINE = MergeTree +PARTITION BY id +ORDER BY value +SETTINGS index_granularity = 8192, index_granularity_bytes = 0, min_bytes_for_wide_part = 100; + +INSERT INTO table_one SELECT intDiv(number, 10), number FROM numbers(100); + +ALTER TABLE table_two REPLACE PARTITION 0 FROM table_one; -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/02422_msgpack_uuid_wrong_column/ast.json b/parser/testdata/02422_msgpack_uuid_wrong_column/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02422_msgpack_uuid_wrong_column/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02422_msgpack_uuid_wrong_column/metadata.json b/parser/testdata/02422_msgpack_uuid_wrong_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02422_msgpack_uuid_wrong_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02422_msgpack_uuid_wrong_column/query.sql b/parser/testdata/02422_msgpack_uuid_wrong_column/query.sql new file mode 100644 index 000000000..4d790354d --- /dev/null +++ b/parser/testdata/02422_msgpack_uuid_wrong_column/query.sql @@ -0,0 +1,4 @@ +-- Tags: no-parallel, no-fasttest + +insert into function file(02422_data.msgpack) select toUUID('f4cdd80d-5d15-4bdc-9527-adcca635ec1f') as uuid settings output_format_msgpack_uuid_representation='ext'; +select * from file(02422_data.msgpack, auto, 'x Int32'); -- {serverError ILLEGAL_COLUMN} diff --git a/parser/testdata/02422_read_numbers_as_strings/ast.json b/parser/testdata/02422_read_numbers_as_strings/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02422_read_numbers_as_strings/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02422_read_numbers_as_strings/metadata.json b/parser/testdata/02422_read_numbers_as_strings/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02422_read_numbers_as_strings/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02422_read_numbers_as_strings/query.sql b/parser/testdata/02422_read_numbers_as_strings/query.sql new file mode 100644 index 000000000..a1b3afa81 --- /dev/null +++ b/parser/testdata/02422_read_numbers_as_strings/query.sql @@ -0,0 +1,8 @@ +-- Tags: no-fasttest + +set input_format_json_read_numbers_as_strings=1; +set input_format_json_infer_array_of_dynamic_from_array_of_different_types=0; +select * from format(JSONEachRow, '{"x" : 123}\n{"x" : "str"}'); +select * from format(JSONEachRow, '{"x" : [123, "str"]}'); +select * from format(JSONEachRow, '{"x" : [123, "456"]}\n{"x" : ["str", "rts"]}'); + diff --git a/parser/testdata/02423_json_quote_float64/ast.json b/parser/testdata/02423_json_quote_float64/ast.json new file mode 100644 index 000000000..6af2df8d7 --- /dev/null +++ b/parser/testdata/02423_json_quote_float64/ast.json @@ -0,0 +1,100 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function CAST (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '42424.4242424242'" + }, + { + "explain": " Literal 'Float64'" + }, + { + "explain": " Function array (alias arr) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '42.42'" + }, + { + "explain": " Literal 'Float64'" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '42.42'" + }, + { + "explain": " Literal 'Float64'" + }, + { + "explain": " Function tuple (alias tuple) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '42.42'" + }, + { + "explain": " Literal 'Float64'" + }, + { + "explain": " Identifier JSONEachRow" + }, + { + "explain": " Set" + } + ], + + "rows": 26, + + "statistics": + { + "elapsed": 0.001211503, + "rows_read": 26, + "bytes_read": 973 + } +} diff --git a/parser/testdata/02423_json_quote_float64/metadata.json b/parser/testdata/02423_json_quote_float64/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02423_json_quote_float64/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02423_json_quote_float64/query.sql b/parser/testdata/02423_json_quote_float64/query.sql new file mode 100644 index 000000000..5cfbfabd2 --- /dev/null +++ b/parser/testdata/02423_json_quote_float64/query.sql @@ -0,0 +1,3 @@ +select 42424.4242424242::Float64 as x, [42.42::Float64, 42.42::Float64] as arr, tuple(42.42::Float64) as tuple format JSONEachRow settings output_format_json_quote_64bit_floats=1; +select 42424.4242424242::Float64 as x, [42.42::Float64, 42.42::Float64] as arr, tuple(42.42::Float64) as tuple format JSONEachRow settings output_format_json_quote_64bit_floats=0; + diff --git a/parser/testdata/02423_multidimensional_array_get_data_at/ast.json b/parser/testdata/02423_multidimensional_array_get_data_at/ast.json new file mode 100644 index 000000000..45a21c32d --- /dev/null +++ b/parser/testdata/02423_multidimensional_array_get_data_at/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function formatRow (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'RawBLOB'" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Array_[Array_[UInt64_33]]" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001138752, + "rows_read": 12, + "bytes_read": 469 + } +} diff --git a/parser/testdata/02423_multidimensional_array_get_data_at/metadata.json b/parser/testdata/02423_multidimensional_array_get_data_at/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02423_multidimensional_array_get_data_at/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02423_multidimensional_array_get_data_at/query.sql b/parser/testdata/02423_multidimensional_array_get_data_at/query.sql new file mode 100644 index 000000000..5a98159a7 --- /dev/null +++ b/parser/testdata/02423_multidimensional_array_get_data_at/query.sql @@ -0,0 +1,7 @@ +SELECT formatRow('RawBLOB', [[[33]], []]); -- { serverError NOT_IMPLEMENTED } +SELECT formatRow('RawBLOB', [[[]], []]); -- { serverError NOT_IMPLEMENTED } +SELECT formatRow('RawBLOB', [[[[[[[0x48, 0x65, 0x6c, 0x6c, 0x6f]]]]]], []]); -- { serverError NOT_IMPLEMENTED } +SELECT formatRow('RawBLOB', []::Array(Array(Nothing))); -- { serverError NOT_IMPLEMENTED } +SELECT formatRow('RawBLOB', [[], [['Hello']]]); -- { serverError NOT_IMPLEMENTED } +SELECT formatRow('RawBLOB', [[['World']], []]); -- { serverError NOT_IMPLEMENTED } +SELECT formatRow('RawBLOB', []::Array(String)); -- { serverError NOT_IMPLEMENTED } diff --git a/parser/testdata/02424_pod_array_overflow/ast.json b/parser/testdata/02424_pod_array_overflow/ast.json new file mode 100644 index 000000000..4dab4bb9b --- /dev/null +++ b/parser/testdata/02424_pod_array_overflow/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function format (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier Native" + }, + { + "explain": " Literal '\u0002\u0002\u0002k0\u001AMap(FixedString(1), Int64)\u0001\\0\\0\\0\\0\\0\\0\\0�\\0�\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0d\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0�1?Vi\u0011�%'" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001201734, + "rows_read": 12, + "bytes_read": 586 + } +} diff --git a/parser/testdata/02424_pod_array_overflow/metadata.json b/parser/testdata/02424_pod_array_overflow/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02424_pod_array_overflow/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02424_pod_array_overflow/query.sql b/parser/testdata/02424_pod_array_overflow/query.sql new file mode 100644 index 000000000..50c46cf19 --- /dev/null +++ b/parser/testdata/02424_pod_array_overflow/query.sql @@ -0,0 +1 @@ +SELECT * FROM format(Native, '\x02\x02\x02\x6b\x30\x1a\x4d\x61\x70\x28\x46\x69\x78\x65\x64\x53\x74\x72\x69\x6e\x67\x28\x31\x29\x2c\x20\x49\x6e\x74\x36\x34\x29\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x7f\x00\x7f\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x64\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xcf\x31\x3f\x56\x69\x11\x89\x25'); -- { serverError CANNOT_EXTRACT_TABLE_STRUCTURE } diff --git a/parser/testdata/02425_categorical_information_value_properties/ast.json b/parser/testdata/02425_categorical_information_value_properties/ast.json new file mode 100644 index 000000000..e5b5995a5 --- /dev/null +++ b/parser/testdata/02425_categorical_information_value_properties/ast.json @@ -0,0 +1,127 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function round (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function arrayJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function categoricalInformationValue (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tupleElement (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function tupleElement (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayJoin (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Literal Tuple_(UInt64_0, UInt64_0)" + }, + { + "explain": " Literal Tuple_(NULL, UInt64_2)" + }, + { + "explain": " Literal Tuple_(UInt64_1, UInt64_0)" + }, + { + "explain": " Literal Tuple_(UInt64_1, UInt64_1)" + } + ], + + "rows": 35, + + "statistics": + { + "elapsed": 0.001391905, + "rows_read": 35, + "bytes_read": 1561 + } +} diff --git a/parser/testdata/02425_categorical_information_value_properties/metadata.json b/parser/testdata/02425_categorical_information_value_properties/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02425_categorical_information_value_properties/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02425_categorical_information_value_properties/query.sql b/parser/testdata/02425_categorical_information_value_properties/query.sql new file mode 100644 index 000000000..bc033ec4a --- /dev/null +++ b/parser/testdata/02425_categorical_information_value_properties/query.sql @@ -0,0 +1,14 @@ +SELECT round(arrayJoin(categoricalInformationValue(x.1, x.2)), 3) FROM (SELECT arrayJoin([(0, 0), (NULL, 2), (1, 0), (1, 1)]) AS x); +SELECT corr(c1, c2) FROM VALUES((0, 0), (NULL, 2), (1, 0), (1, 1)); +SELECT round(arrayJoin(categoricalInformationValue(c1, c2)), 3) FROM VALUES((0, 0), (NULL, 2), (1, 0), (1, 1)); +SELECT round(arrayJoin(categoricalInformationValue(c1, c2)), 3) FROM VALUES((0, 0), (NULL, 1), (1, 0), (1, 1)); +SELECT categoricalInformationValue(c1, c2) FROM VALUES((0, 0), (NULL, 1)); +SELECT categoricalInformationValue(c1, c2) FROM VALUES((NULL, 1)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT categoricalInformationValue(dummy, dummy); +SELECT categoricalInformationValue(dummy, dummy) WHERE 0; +SELECT categoricalInformationValue(c1, c2) FROM VALUES((toNullable(0), 0)); +SELECT groupUniqArray(*) FROM VALUES(toNullable(0)); +SELECT groupUniqArray(*) FROM VALUES(NULL); +SELECT categoricalInformationValue(c1, c2) FROM VALUES((NULL, NULL)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT categoricalInformationValue(c1, c2) FROM VALUES((0, 0), (NULL, 0)); +SELECT quantiles(0.5, 0.9)(c1) FROM VALUES(0::Nullable(UInt8)); diff --git a/parser/testdata/02426_create_suspicious_fixed_string/ast.json b/parser/testdata/02426_create_suspicious_fixed_string/ast.json new file mode 100644 index 000000000..b8ff230aa --- /dev/null +++ b/parser/testdata/02426_create_suspicious_fixed_string/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery fixed_string (children 3)" + }, + { + "explain": " Identifier fixed_string" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration id (children 1)" + }, + { + "explain": " DataType UInt64" + }, + { + "explain": " ColumnDeclaration s (children 1)" + }, + { + "explain": " DataType FixedString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_256" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Identifier id" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001315686, + "rows_read": 14, + "bytes_read": 514 + } +} diff --git a/parser/testdata/02426_create_suspicious_fixed_string/metadata.json b/parser/testdata/02426_create_suspicious_fixed_string/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02426_create_suspicious_fixed_string/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02426_create_suspicious_fixed_string/query.sql b/parser/testdata/02426_create_suspicious_fixed_string/query.sql new file mode 100644 index 000000000..9bcbeb608 --- /dev/null +++ b/parser/testdata/02426_create_suspicious_fixed_string/query.sql @@ -0,0 +1,4 @@ +CREATE TABLE fixed_string (id UInt64, s FixedString(256)) ENGINE = MergeTree() ORDER BY id; +CREATE TABLE suspicious_fixed_string (id UInt64, s FixedString(257)) ENGINE = MergeTree() ORDER BY id; -- { serverError ILLEGAL_COLUMN } +SET allow_suspicious_fixed_string_types = 1; +CREATE TABLE suspicious_fixed_string (id UInt64, s FixedString(257)) ENGINE = MergeTree() ORDER BY id; diff --git a/parser/testdata/02426_pod_array_overflow_2/ast.json b/parser/testdata/02426_pod_array_overflow_2/ast.json new file mode 100644 index 000000000..a39116e8d --- /dev/null +++ b/parser/testdata/02426_pod_array_overflow_2/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function format (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier Native" + }, + { + "explain": " Literal '\u0002\u0002\u0002k0#Array(Tuple(FixedString(1), Int64))\u0001\\0\\0\\0\\0\\0\\0\\0�����\\0����������������\\0�\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0d\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0�1?Vi\u0011�%'" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001029486, + "rows_read": 12, + "bytes_read": 644 + } +} diff --git a/parser/testdata/02426_pod_array_overflow_2/metadata.json b/parser/testdata/02426_pod_array_overflow_2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02426_pod_array_overflow_2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02426_pod_array_overflow_2/query.sql b/parser/testdata/02426_pod_array_overflow_2/query.sql new file mode 100644 index 000000000..6a0d97ace --- /dev/null +++ b/parser/testdata/02426_pod_array_overflow_2/query.sql @@ -0,0 +1 @@ +SELECT * FROM format(Native, 'k0\x23Array(Tuple(FixedString(1), Int64))\0\0\0\0\0\0\0�����\0����������������\0�\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0d\0\0\0\0\0\0\0\0\0\0\0\0\0�1?Vi�%'); -- { serverError CANNOT_EXTRACT_TABLE_STRUCTURE } diff --git a/parser/testdata/02426_pod_array_overflow_3/ast.json b/parser/testdata/02426_pod_array_overflow_3/ast.json new file mode 100644 index 000000000..38b980862 --- /dev/null +++ b/parser/testdata/02426_pod_array_overflow_3/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function format (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier Native" + }, + { + "explain": " Literal '\u0001\u0001\u0001x\\fArray(UInt8)\u0001\\0����'" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001146823, + "rows_read": 12, + "bytes_read": 476 + } +} diff --git a/parser/testdata/02426_pod_array_overflow_3/metadata.json b/parser/testdata/02426_pod_array_overflow_3/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02426_pod_array_overflow_3/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02426_pod_array_overflow_3/query.sql b/parser/testdata/02426_pod_array_overflow_3/query.sql new file mode 100644 index 000000000..caabf7d16 --- /dev/null +++ b/parser/testdata/02426_pod_array_overflow_3/query.sql @@ -0,0 +1 @@ +SELECT * FROM format(Native, '\x01\x01\x01x\x0CArray(UInt8)\x01\x00\xBD\xEF\xBF\xBD\xEF\xBF\xBD\xEF'); -- { serverError CANNOT_EXTRACT_TABLE_STRUCTURE } diff --git a/parser/testdata/02426_to_string_nullable_fixedstring/ast.json b/parser/testdata/02426_to_string_nullable_fixedstring/ast.json new file mode 100644 index 000000000..154800b19 --- /dev/null +++ b/parser/testdata/02426_to_string_nullable_fixedstring/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function CAST (alias s) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'a'" + }, + { + "explain": " Literal 'Nullable(FixedString(1))'" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier s" + }, + { + "explain": " Function toString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier s" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001511215, + "rows_read": 14, + "bytes_read": 528 + } +} diff --git a/parser/testdata/02426_to_string_nullable_fixedstring/metadata.json b/parser/testdata/02426_to_string_nullable_fixedstring/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02426_to_string_nullable_fixedstring/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02426_to_string_nullable_fixedstring/query.sql b/parser/testdata/02426_to_string_nullable_fixedstring/query.sql new file mode 100644 index 000000000..f6dcc8f92 --- /dev/null +++ b/parser/testdata/02426_to_string_nullable_fixedstring/query.sql @@ -0,0 +1,2 @@ +SELECT CAST('a', 'Nullable(FixedString(1))') as s, toTypeName(s), toString(s); +SELECT number, toTypeName(s), toString(s) FROM (SELECT number, if(number % 3 = 0, NULL, toFixedString(toString(number), 1)) AS s from numbers(10)) ORDER BY number; diff --git a/parser/testdata/02427_column_nullable_ubsan/ast.json b/parser/testdata/02427_column_nullable_ubsan/ast.json new file mode 100644 index 000000000..201c5c14f --- /dev/null +++ b/parser/testdata/02427_column_nullable_ubsan/ast.json @@ -0,0 +1,154 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal UInt64_0 (alias a)" + }, + { + "explain": " Function toNullable (alias b) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function toString (alias c) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Float64_1000000" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier a" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier b" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier c" + }, + { + "explain": " Literal UInt64_1500" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 44, + + "statistics": + { + "elapsed": 0.001269605, + "rows_read": 44, + "bytes_read": 2017 + } +} diff --git a/parser/testdata/02427_column_nullable_ubsan/metadata.json b/parser/testdata/02427_column_nullable_ubsan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02427_column_nullable_ubsan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02427_column_nullable_ubsan/query.sql b/parser/testdata/02427_column_nullable_ubsan/query.sql new file mode 100644 index 000000000..3d1a51804 --- /dev/null +++ b/parser/testdata/02427_column_nullable_ubsan/query.sql @@ -0,0 +1 @@ +SELECT * FROM (SELECT * FROM (SELECT 0 AS a, toNullable(number) AS b, toString(number) AS c FROM numbers(1000000.)) ORDER BY a DESC, b DESC, c ASC LIMIT 1500) LIMIT 10; diff --git a/parser/testdata/02427_msan_group_array_resample/ast.json b/parser/testdata/02427_msan_group_array_resample/ast.json new file mode 100644 index 000000000..a3d6ace50 --- /dev/null +++ b/parser/testdata/02427_msan_group_array_resample/ast.json @@ -0,0 +1,175 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayMap (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function lambda (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function finalizeAggregation (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Identifier state" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function groupArrayResample (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal UInt64_9223372036854775806" + }, + { + "explain": " Literal UInt64_1048575" + }, + { + "explain": " Literal UInt64_65537" + }, + { + "explain": " Function groupArrayStateResample (alias state) (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_9223372036854775806" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Literal UInt64_2147483648" + }, + { + "explain": " Literal UInt64_65535" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_100" + } + ], + + "rows": 51, + + "statistics": + { + "elapsed": 0.001212767, + "rows_read": 51, + "bytes_read": 2273 + } +} diff --git a/parser/testdata/02427_msan_group_array_resample/metadata.json b/parser/testdata/02427_msan_group_array_resample/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02427_msan_group_array_resample/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02427_msan_group_array_resample/query.sql b/parser/testdata/02427_msan_group_array_resample/query.sql new file mode 100644 index 000000000..6eccf59a6 --- /dev/null +++ b/parser/testdata/02427_msan_group_array_resample/query.sql @@ -0,0 +1 @@ +SELECT arrayMap(x -> finalizeAggregation(x), state) FROM (SELECT groupArrayResample(9223372036854775806, 1048575, 65537)(number, number % 3), groupArrayStateResample(10, 2147483648, 65535)(number, number % 9223372036854775806) AS state FROM numbers(100)); diff --git a/parser/testdata/02427_mutate_and_zero_copy_replication_zookeeper/ast.json b/parser/testdata/02427_mutate_and_zero_copy_replication_zookeeper/ast.json new file mode 100644 index 000000000..42119e782 --- /dev/null +++ b/parser/testdata/02427_mutate_and_zero_copy_replication_zookeeper/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery mutate_and_zero_copy_replication1 (children 1)" + }, + { + "explain": " Identifier mutate_and_zero_copy_replication1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001012769, + "rows_read": 2, + "bytes_read": 118 + } +} diff --git a/parser/testdata/02427_mutate_and_zero_copy_replication_zookeeper/metadata.json b/parser/testdata/02427_mutate_and_zero_copy_replication_zookeeper/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02427_mutate_and_zero_copy_replication_zookeeper/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02427_mutate_and_zero_copy_replication_zookeeper/query.sql b/parser/testdata/02427_mutate_and_zero_copy_replication_zookeeper/query.sql new file mode 100644 index 000000000..e3c8583cc --- /dev/null +++ b/parser/testdata/02427_mutate_and_zero_copy_replication_zookeeper/query.sql @@ -0,0 +1,40 @@ +DROP TABLE IF EXISTS mutate_and_zero_copy_replication1; +DROP TABLE IF EXISTS mutate_and_zero_copy_replication2; + +CREATE TABLE mutate_and_zero_copy_replication1 +( + a UInt64, + b String, + c Float64 +) +ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_02427_mutate_and_zero_copy_replication/alter', '1') +ORDER BY tuple() +SETTINGS old_parts_lifetime=0, cleanup_delay_period=300, max_cleanup_delay_period=300, cleanup_delay_period_random_add=300, min_bytes_for_wide_part = 0; + +CREATE TABLE mutate_and_zero_copy_replication2 +( + a UInt64, + b String, + c Float64 +) +ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_02427_mutate_and_zero_copy_replication/alter', '2') +ORDER BY tuple() +SETTINGS old_parts_lifetime=0, cleanup_delay_period=300, max_cleanup_delay_period=300, cleanup_delay_period_random_add=300; + + +INSERT INTO mutate_and_zero_copy_replication1 VALUES (1, '1', 1.0); +SYSTEM SYNC REPLICA mutate_and_zero_copy_replication2; + +SET mutations_sync=2; + +ALTER TABLE mutate_and_zero_copy_replication1 UPDATE a = 2 WHERE 1; + +DROP TABLE mutate_and_zero_copy_replication1 SYNC; + +DETACH TABLE mutate_and_zero_copy_replication2; +ATTACH TABLE mutate_and_zero_copy_replication2; + +SELECT * FROM mutate_and_zero_copy_replication2 WHERE NOT ignore(*); + +DROP TABLE IF EXISTS mutate_and_zero_copy_replication1; +DROP TABLE IF EXISTS mutate_and_zero_copy_replication2; diff --git a/parser/testdata/02428_batch_nullable_assert/ast.json b/parser/testdata/02428_batch_nullable_assert/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02428_batch_nullable_assert/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02428_batch_nullable_assert/metadata.json b/parser/testdata/02428_batch_nullable_assert/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02428_batch_nullable_assert/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02428_batch_nullable_assert/query.sql b/parser/testdata/02428_batch_nullable_assert/query.sql new file mode 100644 index 000000000..eed9b9d34 --- /dev/null +++ b/parser/testdata/02428_batch_nullable_assert/query.sql @@ -0,0 +1,24 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/41470 +SELECT + roundBankers(100), + -9223372036854775808, + roundBankers(result.2, 256) +FROM + ( + SELECT studentTTest(sample, variant) AS result + FROM + ( + SELECT + toFloat64(number) % NULL AS sample, + 0 AS variant + FROM system.numbers + LIMIT 1025 + UNION ALL + SELECT + (toFloat64(number) % 9223372036854775807) + nan AS sample, + -9223372036854775808 AS variant + FROM system.numbers + LIMIT 1024 + ) + ) +FORMAT CSV diff --git a/parser/testdata/02428_combinators_with_over_statement/ast.json b/parser/testdata/02428_combinators_with_over_statement/ast.json new file mode 100644 index 000000000..2d88ada56 --- /dev/null +++ b/parser/testdata/02428_combinators_with_over_statement/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001185291, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/02428_combinators_with_over_statement/metadata.json b/parser/testdata/02428_combinators_with_over_statement/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02428_combinators_with_over_statement/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02428_combinators_with_over_statement/query.sql b/parser/testdata/02428_combinators_with_over_statement/query.sql new file mode 100644 index 000000000..a52a7f252 --- /dev/null +++ b/parser/testdata/02428_combinators_with_over_statement/query.sql @@ -0,0 +1,11 @@ +drop table if exists test; +create table test (x AggregateFunction(uniq, UInt64), y Int64) engine=Memory; +set max_insert_threads = 1; +insert into test select uniqState(number) as x, number as y from numbers(10) group by number order by y; +select uniqStateMap(map(1, x)) OVER (PARTITION BY y) from test; +select uniqStateForEach([x]) OVER (PARTITION BY y) from test; +select uniqStateResample(30, 75, 30)([x], 30) OVER (PARTITION BY y) from test; +select uniqStateForEachMapForEach([map(1, [x])]) OVER (PARTITION BY y) from test; +select uniqStateDistinctMap(map(1, x)) OVER (PARTITION BY y) from test; +drop table test; + diff --git a/parser/testdata/02428_decimal_in_floating_point_literal/ast.json b/parser/testdata/02428_decimal_in_floating_point_literal/ast.json new file mode 100644 index 000000000..5f0331c2b --- /dev/null +++ b/parser/testdata/02428_decimal_in_floating_point_literal/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery decimal_in_float_test (children 1)" + }, + { + "explain": " Identifier decimal_in_float_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001210251, + "rows_read": 2, + "bytes_read": 94 + } +} diff --git a/parser/testdata/02428_decimal_in_floating_point_literal/metadata.json b/parser/testdata/02428_decimal_in_floating_point_literal/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02428_decimal_in_floating_point_literal/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02428_decimal_in_floating_point_literal/query.sql b/parser/testdata/02428_decimal_in_floating_point_literal/query.sql new file mode 100644 index 000000000..a0d921151 --- /dev/null +++ b/parser/testdata/02428_decimal_in_floating_point_literal/query.sql @@ -0,0 +1,63 @@ +DROP TABLE IF EXISTS decimal_in_float_test; + +CREATE TABLE decimal_in_float_test ( `a` Decimal(18, 0), `b` Decimal(36, 2) ) ENGINE = Memory; +INSERT INTO decimal_in_float_test VALUES ('33', '44.44'); + +SELECT toDecimal32(1.555,3) IN (1.5551); +SELECT toDecimal32(1.555,3) IN (1.5551,1.555); +SELECT toDecimal32(1.555,3) IN (1.5551,1.555000); +SELECT toDecimal32(1.555,3) IN (1.550,1.5); + +SELECT toDecimal64(1.555,3) IN (1.5551); +SELECT toDecimal64(1.555,3) IN (1.5551,1.555); +SELECT toDecimal64(1.555,3) IN (1.5551,1.555000); +SELECT toDecimal64(1.555,3) IN (1.550,1.5); + +SELECT toDecimal128(1.555,3) IN (1.5551); +SELECT toDecimal128(1.555,3) IN (1.5551,1.555); +SELECT toDecimal128(1.555,3) IN (1.5551,1.555000); +SELECT toDecimal128(1.555,3) IN (1.550,1.5); + +SELECT toDecimal256(1.555,3) IN (1.5551); +SELECT toDecimal256(1.555,3) IN (1.5551,1.555); +SELECT toDecimal256(1.555,3) IN (1.5551,1.555000); +SELECT toDecimal256(1.555,3) IN (1.550,1.5); + + +SELECT count() == 1 FROM decimal_in_float_test WHERE a IN (33); +SELECT count() == 1 FROM decimal_in_float_test WHERE a IN (33.0); +SELECT count() == 1 FROM decimal_in_float_test WHERE a NOT IN (33.333); +SELECT count() == 1 FROM decimal_in_float_test WHERE b IN (44.44); +SELECT count() == 1 FROM decimal_in_float_test WHERE b NOT IN (44.4,44.444); + +SET enable_analyzer = 1; + + +SELECT toDecimal32(1.555,3) IN (1.5551); +SELECT toDecimal32(1.555,3) IN (1.5551,1.555); +SELECT toDecimal32(1.555,3) IN (1.5551,1.555000); +SELECT toDecimal32(1.555,3) IN (1.550,1.5); + +SELECT toDecimal64(1.555,3) IN (1.5551); +SELECT toDecimal64(1.555,3) IN (1.5551,1.555); +SELECT toDecimal64(1.555,3) IN (1.5551,1.555000); +SELECT toDecimal64(1.555,3) IN (1.550,1.5); + +SELECT toDecimal128(1.555,3) IN (1.5551); +SELECT toDecimal128(1.555,3) IN (1.5551,1.555); +SELECT toDecimal128(1.555,3) IN (1.5551,1.555000); +SELECT toDecimal128(1.555,3) IN (1.550,1.5); + +SELECT toDecimal256(1.555,3) IN (1.5551); +SELECT toDecimal256(1.555,3) IN (1.5551,1.555); +SELECT toDecimal256(1.555,3) IN (1.5551,1.555000); +SELECT toDecimal256(1.555,3) IN (1.550,1.5); + + +SELECT count() == 1 FROM decimal_in_float_test WHERE a IN (33); +SELECT count() == 1 FROM decimal_in_float_test WHERE a IN (33.0); +SELECT count() == 1 FROM decimal_in_float_test WHERE a NOT IN (33.333); +SELECT count() == 1 FROM decimal_in_float_test WHERE b IN (44.44); +SELECT count() == 1 FROM decimal_in_float_test WHERE b NOT IN (44.4,44.444); + +DROP TABLE IF EXISTS decimal_in_float_test; diff --git a/parser/testdata/02428_delete_with_settings/ast.json b/parser/testdata/02428_delete_with_settings/ast.json new file mode 100644 index 000000000..644896def --- /dev/null +++ b/parser/testdata/02428_delete_with_settings/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001049166, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/02428_delete_with_settings/metadata.json b/parser/testdata/02428_delete_with_settings/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02428_delete_with_settings/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02428_delete_with_settings/query.sql b/parser/testdata/02428_delete_with_settings/query.sql new file mode 100644 index 000000000..618c08608 --- /dev/null +++ b/parser/testdata/02428_delete_with_settings/query.sql @@ -0,0 +1,5 @@ +drop table if exists test; +create table test (id Int32, key String) engine=MergeTree() order by tuple() settings index_granularity = 8192, index_granularity_bytes = '10Mi'; +insert into test select number, toString(number) from numbers(1000000); +delete from test where id % 2 = 0 SETTINGS mutations_sync=0; +select count() from test; diff --git a/parser/testdata/02428_index_analysis_with_null_literal/ast.json b/parser/testdata/02428_index_analysis_with_null_literal/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02428_index_analysis_with_null_literal/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02428_index_analysis_with_null_literal/metadata.json b/parser/testdata/02428_index_analysis_with_null_literal/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02428_index_analysis_with_null_literal/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02428_index_analysis_with_null_literal/query.sql b/parser/testdata/02428_index_analysis_with_null_literal/query.sql new file mode 100644 index 000000000..091fbbe17 --- /dev/null +++ b/parser/testdata/02428_index_analysis_with_null_literal/query.sql @@ -0,0 +1,21 @@ +-- From https://github.com/ClickHouse/ClickHouse/issues/41814 +drop table if exists test; + +create table test(a UInt64, m UInt64, d DateTime) engine MergeTree partition by toYYYYMM(d) order by (a, m, d) SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +insert into test select number, number, '2022-01-01 00:00:00' from numbers(1000000); + +select count() from test where a = (select toUInt64(1) where 1 = 2) settings enable_early_constant_folding = 0, force_primary_key = 1; + +drop table test; + +-- From https://github.com/ClickHouse/ClickHouse/issues/34063 +drop table if exists test_null_filter; + +create table test_null_filter(key UInt64, value UInt32) engine MergeTree order by key SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +insert into test_null_filter select number, number from numbers(10000000); + +select count() from test_null_filter where key = null and value > 0 settings force_primary_key = 1; + +drop table test_null_filter; diff --git a/parser/testdata/02428_parameterized_view_param_in_select_section/ast.json b/parser/testdata/02428_parameterized_view_param_in_select_section/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02428_parameterized_view_param_in_select_section/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02428_parameterized_view_param_in_select_section/metadata.json b/parser/testdata/02428_parameterized_view_param_in_select_section/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02428_parameterized_view_param_in_select_section/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02428_parameterized_view_param_in_select_section/query.sql b/parser/testdata/02428_parameterized_view_param_in_select_section/query.sql new file mode 100644 index 000000000..53a10633f --- /dev/null +++ b/parser/testdata/02428_parameterized_view_param_in_select_section/query.sql @@ -0,0 +1,58 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/56564 + +create table t(z String, ts DateTime) Engine=Memory as +select '1', '2020-01-01 00:00:00'; + +CREATE VIEW v1 AS +SELECT z, 'test' = {m:String} AS c +FROM t +WHERE ts > '2019-01-01 00:00:00' +GROUP BY z, c; + +CREATE VIEW v2 AS +SELECT z, {m:String} AS c +FROM t +WHERE ts > '2019-01-01 00:00:00' +GROUP BY z, c; + +CREATE VIEW v3 AS +SELECT z, {m:String} +FROM t; + +select * from v1(m='test'); +select * from v2(m='test'); +select * from v3(m='test'); + +drop table t; +drop view v1; +drop view v2; +drop view v3; + +create table t(z String, ts DateTime) Engine=MergeTree ORDER BY z as +select '1', '2020-01-01 00:00:00'; + +CREATE VIEW v1 AS +SELECT z, 'test' = {m:String} AS c +FROM t +WHERE ts > '2019-01-01 00:00:00' +GROUP BY z, c; + +CREATE VIEW v2 AS +SELECT z, {m:String} AS c +FROM t +WHERE ts > '2019-01-01 00:00:00' +GROUP BY z, c; + +CREATE VIEW v3 AS +SELECT z, {m:String} +FROM t; + +select * from v1(m='test'); +select * from v2(m='test'); +select * from v3(m='test'); + +drop table t; +drop view v1; +drop view v2; +drop view v3; + diff --git a/parser/testdata/02428_partial_sort_optimization_bug/ast.json b/parser/testdata/02428_partial_sort_optimization_bug/ast.json new file mode 100644 index 000000000..b789a2eae --- /dev/null +++ b/parser/testdata/02428_partial_sort_optimization_bug/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery partial_sort_opt_bug (children 3)" + }, + { + "explain": " Identifier partial_sort_opt_bug" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration x (children 1)" + }, + { + "explain": " DataType UInt64" + }, + { + "explain": " Storage definition (children 3)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Set" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001070886, + "rows_read": 11, + "bytes_read": 383 + } +} diff --git a/parser/testdata/02428_partial_sort_optimization_bug/metadata.json b/parser/testdata/02428_partial_sort_optimization_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02428_partial_sort_optimization_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02428_partial_sort_optimization_bug/query.sql b/parser/testdata/02428_partial_sort_optimization_bug/query.sql new file mode 100644 index 000000000..cff680023 --- /dev/null +++ b/parser/testdata/02428_partial_sort_optimization_bug/query.sql @@ -0,0 +1,11 @@ +create table partial_sort_opt_bug (x UInt64) engine = MergeTree order by tuple() settings index_granularity = 1000; + +insert into partial_sort_opt_bug select number + 100000 from numbers(4000); + +insert into partial_sort_opt_bug select number from numbers(1000); +insert into partial_sort_opt_bug select number + 200000 from numbers(3000); +insert into partial_sort_opt_bug select number + 1000 from numbers(4000); +optimize table partial_sort_opt_bug final; + +select x from partial_sort_opt_bug order by x limit 2000 settings max_block_size = 4000; + diff --git a/parser/testdata/02429_combinators_in_array_reduce/ast.json b/parser/testdata/02429_combinators_in_array_reduce/ast.json new file mode 100644 index 000000000..8c2e89e27 --- /dev/null +++ b/parser/testdata/02429_combinators_in_array_reduce/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayReduce (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'uniqStateMap'" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function map (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_2" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001132721, + "rows_read": 13, + "bytes_read": 506 + } +} diff --git a/parser/testdata/02429_combinators_in_array_reduce/metadata.json b/parser/testdata/02429_combinators_in_array_reduce/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02429_combinators_in_array_reduce/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02429_combinators_in_array_reduce/query.sql b/parser/testdata/02429_combinators_in_array_reduce/query.sql new file mode 100644 index 000000000..a961ae3a1 --- /dev/null +++ b/parser/testdata/02429_combinators_in_array_reduce/query.sql @@ -0,0 +1,6 @@ +select arrayReduce('uniqStateMap', [map(1, 2)]); +select arrayReduce('uniqStateForEach', [[1], [2]]); +select arrayReduce('uniqStateForEachMapForEach', [[map(1, [2])]]); +select arrayReduceInRanges('uniqStateMap', [(1, 3), (2, 3), (3, 3)], [map(1, 'a'), map(1, 'b'), map(1, 'c'), map(1, 'd'), map(1, 'e')]); +select arrayReduceInRanges('uniqStateForEach', [(1, 3), (2, 3), (3, 3)], [['a'], ['b'], ['c'],['d'], ['e']]); +select arrayReduceInRanges('uniqStateForEachMapForEach', [(1, 3), (2, 3), (3, 3)], [[map(1, ['a'])], [map(1, ['b'])], [map(1, ['c'])], [map(1, ['d'])], [map(1, ['e'])]]); diff --git a/parser/testdata/02429_groupBitmap_chain_state/ast.json b/parser/testdata/02429_groupBitmap_chain_state/ast.json new file mode 100644 index 000000000..f942d5de0 --- /dev/null +++ b/parser/testdata/02429_groupBitmap_chain_state/ast.json @@ -0,0 +1,103 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function groupBitmapAnd (alias y) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier z" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function groupBitmapState (alias z) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier u" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (alias a1) (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_123 (alias u)" + } + ], + + "rows": 27, + + "statistics": + { + "elapsed": 0.001336252, + "rows_read": 27, + "bytes_read": 1235 + } +} diff --git a/parser/testdata/02429_groupBitmap_chain_state/metadata.json b/parser/testdata/02429_groupBitmap_chain_state/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02429_groupBitmap_chain_state/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02429_groupBitmap_chain_state/query.sql b/parser/testdata/02429_groupBitmap_chain_state/query.sql new file mode 100644 index 000000000..27e549b60 --- /dev/null +++ b/parser/testdata/02429_groupBitmap_chain_state/query.sql @@ -0,0 +1,6 @@ +SELECT groupBitmapAnd(z) y FROM ( SELECT groupBitmapState(u) AS z FROM ( SELECT 123 AS u ) AS a1 ); +SELECT groupBitmapAnd(y) FROM (SELECT groupBitmapAndState(z) y FROM ( SELECT groupBitmapState(u) AS z FROM ( SELECT 123 AS u ) AS a1 ) AS a2); + +SELECT groupBitmapAnd(z) FROM ( SELECT minState(u) AS z FROM ( SELECT 123 AS u ) AS a1 ) AS a2; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT groupBitmapOr(z) FROM ( SELECT maxState(u) AS z FROM ( SELECT '123' AS u ) AS a1 ) AS a2; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT groupBitmapXor(z) FROM ( SELECT countState() AS z FROM ( SELECT '123' AS u ) AS a1 ) AS a2; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } diff --git a/parser/testdata/02429_offset_pipeline_stuck_bug/ast.json b/parser/testdata/02429_offset_pipeline_stuck_bug/ast.json new file mode 100644 index 000000000..dee3cc57f --- /dev/null +++ b/parser/testdata/02429_offset_pipeline_stuck_bug/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery t (children 3)" + }, + { + "explain": " Identifier t" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function Log" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_20" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001297741, + "rows_read": 14, + "bytes_read": 503 + } +} diff --git a/parser/testdata/02429_offset_pipeline_stuck_bug/metadata.json b/parser/testdata/02429_offset_pipeline_stuck_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02429_offset_pipeline_stuck_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02429_offset_pipeline_stuck_bug/query.sql b/parser/testdata/02429_offset_pipeline_stuck_bug/query.sql new file mode 100644 index 000000000..59365ca5b --- /dev/null +++ b/parser/testdata/02429_offset_pipeline_stuck_bug/query.sql @@ -0,0 +1,4 @@ +CREATE TABLE t ENGINE = Log AS SELECT * FROM system.numbers LIMIT 20; +SET enable_optimize_predicate_expression = 0; +SELECT number FROM (select number FROM t ORDER BY number OFFSET 3) WHERE number < NULL; + diff --git a/parser/testdata/02430_bitmap_transform_exception_code/ast.json b/parser/testdata/02430_bitmap_transform_exception_code/ast.json new file mode 100644 index 000000000..bb62dce95 --- /dev/null +++ b/parser/testdata/02430_bitmap_transform_exception_code/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function bitmapTransform (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function arrayReduce (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'groupBitmapState'" + }, + { + "explain": " Literal Array_[UInt64_1]" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_2]" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_2, UInt64_3]" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001204195, + "rows_read": 12, + "bytes_read": 515 + } +} diff --git a/parser/testdata/02430_bitmap_transform_exception_code/metadata.json b/parser/testdata/02430_bitmap_transform_exception_code/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02430_bitmap_transform_exception_code/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02430_bitmap_transform_exception_code/query.sql b/parser/testdata/02430_bitmap_transform_exception_code/query.sql new file mode 100644 index 000000000..5a30f9598 --- /dev/null +++ b/parser/testdata/02430_bitmap_transform_exception_code/query.sql @@ -0,0 +1 @@ +SELECT bitmapTransform(arrayReduce('groupBitmapState', [1]), [1, 2], [1, 2, 3]); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/02430_initialize_aggregation_with_combinators/ast.json b/parser/testdata/02430_initialize_aggregation_with_combinators/ast.json new file mode 100644 index 000000000..a6b932df3 --- /dev/null +++ b/parser/testdata/02430_initialize_aggregation_with_combinators/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function initializeAggregation (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'uniqStateMap'" + }, + { + "explain": " Function map (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_2" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.00130014, + "rows_read": 11, + "bytes_read": 425 + } +} diff --git a/parser/testdata/02430_initialize_aggregation_with_combinators/metadata.json b/parser/testdata/02430_initialize_aggregation_with_combinators/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02430_initialize_aggregation_with_combinators/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02430_initialize_aggregation_with_combinators/query.sql b/parser/testdata/02430_initialize_aggregation_with_combinators/query.sql new file mode 100644 index 000000000..b50b4ee01 --- /dev/null +++ b/parser/testdata/02430_initialize_aggregation_with_combinators/query.sql @@ -0,0 +1,4 @@ +select initializeAggregation('uniqStateMap', map(1, 2)); +select initializeAggregation('uniqStateForEach', [1, 2]); +select initializeAggregation('uniqStateForEachMapForEach', [map(1, [2])]); + diff --git a/parser/testdata/02431_single_value_or_null_empty/ast.json b/parser/testdata/02431_single_value_or_null_empty/ast.json new file mode 100644 index 000000000..325d7ef00 --- /dev/null +++ b/parser/testdata/02431_single_value_or_null_empty/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function singleValueOrNull (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_0" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.000975054, + "rows_read": 13, + "bytes_read": 523 + } +} diff --git a/parser/testdata/02431_single_value_or_null_empty/metadata.json b/parser/testdata/02431_single_value_or_null_empty/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02431_single_value_or_null_empty/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02431_single_value_or_null_empty/query.sql b/parser/testdata/02431_single_value_or_null_empty/query.sql new file mode 100644 index 000000000..e213ecc7a --- /dev/null +++ b/parser/testdata/02431_single_value_or_null_empty/query.sql @@ -0,0 +1,35 @@ +select singleValueOrNull(number) from numbers(0) with totals; + +SELECT + 0.5 IN ( + SELECT singleValueOrNull(*) + FROM + ( + SELECT 1048577 + FROM numbers(0) + ) +WITH TOTALS + ), + NULL, + NULL NOT IN ( +SELECT + 2147483647, + 1024 IN ( + SELECT + [NULL, 2147483648, NULL, NULL], + number + FROM numbers(7, 100) + ), + [NULL, NULL, NULL, NULL, NULL], + number +FROM numbers(1048576) +WHERE NULL + ), + NULL NOT IN ( +SELECT number +FROM numbers(0) + ) +GROUP BY NULL +WITH CUBE; + +SELECT anyHeavy('1') FROM (SELECT anyHeavy(1)); diff --git a/parser/testdata/02433_default_expression_operator_in/ast.json b/parser/testdata/02433_default_expression_operator_in/ast.json new file mode 100644 index 000000000..b04940e84 --- /dev/null +++ b/parser/testdata/02433_default_expression_operator_in/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery dep (children 1)" + }, + { + "explain": " Identifier dep" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000952432, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/02433_default_expression_operator_in/metadata.json b/parser/testdata/02433_default_expression_operator_in/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02433_default_expression_operator_in/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02433_default_expression_operator_in/query.sql b/parser/testdata/02433_default_expression_operator_in/query.sql new file mode 100644 index 000000000..e009bc5cd --- /dev/null +++ b/parser/testdata/02433_default_expression_operator_in/query.sql @@ -0,0 +1,37 @@ +DROP TABLE IF EXISTS dep; +DROP TABLE IF EXISTS dep2; +DROP TABLE IF EXISTS id_join; + +CREATE TABLE id_join (`country` String, `location` Array(Int32)) ENGINE = Join(ANY, LEFT, country); + +INSERT INTO id_join values ('CLICK', [1234]); + +CREATE TABLE dep +( + `id` Int32, + `country` LowCardinality(String), + `purchase_location` UInt16 MATERIALIZED if(id IN joinGet(concat(currentDatabase(), '.id_join'), 'location', 'CLICK'), 123, 456) +) +ENGINE = ReplicatedMergeTree('/test/02433/{database}/dep', '1') ORDER BY tuple(); + +SHOW CREATE TABLE dep; + +TRUNCATE TABLE id_join; + +CREATE TABLE dep2 +( + `id` Int32, + `country` LowCardinality(String), + `purchase_location` UInt16 MATERIALIZED if(id IN joinGet(concat(currentDatabase(), '.id_join'), 'location', 'CLICK'), 123, 456) +) +ENGINE = ReplicatedMergeTree('/test/02433/{database}/dep', '2') ORDER BY tuple(); + +SHOW CREATE TABLE dep2; + +-- Ensure that a table name cannot be passed to IN as string literal +create table test (n int, m default n in 'default.table_name') engine=Memory; -- { serverError TYPE_MISMATCH } +create table test (n int, m default in(n, 'default.table_name')) engine=Memory; -- { serverError TYPE_MISMATCH } + +DROP TABLE dep; +DROP TABLE dep2; +DROP TABLE id_join; diff --git a/parser/testdata/02436_system_zookeeper_context/ast.json b/parser/testdata/02436_system_zookeeper_context/ast.json new file mode 100644 index 000000000..a12f4e735 --- /dev/null +++ b/parser/testdata/02436_system_zookeeper_context/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery mt (children 1)" + }, + { + "explain": " Identifier mt" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001238077, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/02436_system_zookeeper_context/metadata.json b/parser/testdata/02436_system_zookeeper_context/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02436_system_zookeeper_context/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02436_system_zookeeper_context/query.sql b/parser/testdata/02436_system_zookeeper_context/query.sql new file mode 100644 index 000000000..ae44405e7 --- /dev/null +++ b/parser/testdata/02436_system_zookeeper_context/query.sql @@ -0,0 +1,7 @@ +DROP TABLE IF EXISTS mt; +create table mt (n int, s String) engine=MergeTree order by n; +insert into mt values (1, ''); +set allow_nondeterministic_mutations=1; +alter table mt update s = (select toString(groupArray((*,))) from system.zookeeper where path='/') where n=1 settings mutations_sync=2; +select distinct n from mt; +DROP TABLE mt; diff --git a/parser/testdata/02438_sync_replica_lightweight/ast.json b/parser/testdata/02438_sync_replica_lightweight/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02438_sync_replica_lightweight/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02438_sync_replica_lightweight/metadata.json b/parser/testdata/02438_sync_replica_lightweight/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02438_sync_replica_lightweight/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02438_sync_replica_lightweight/query.sql b/parser/testdata/02438_sync_replica_lightweight/query.sql new file mode 100644 index 000000000..4754c1575 --- /dev/null +++ b/parser/testdata/02438_sync_replica_lightweight/query.sql @@ -0,0 +1,48 @@ +-- Tags: no-replicated-database, no-shared-merge-tree +-- Tag no-replicated-database: different number of replicas +-- Tag no-shared-merge-tree: sync replica lightweight by default + +-- May affect part names +set prefer_warmed_unmerged_parts_seconds=0; +set ignore_cold_parts_seconds=0; + +create table rmt1 (n int) engine=ReplicatedMergeTree('/test/{database}/02438/', '1') order by tuple() settings cache_populated_by_fetch=0; +create table rmt2 (n int) engine=ReplicatedMergeTree('/test/{database}/02438/', '2') order by tuple() settings cache_populated_by_fetch=0; + +system stop replicated sends rmt1; +system stop merges rmt2; + +set insert_keeper_fault_injection_probability=0; + +insert into rmt1 values (1); +insert into rmt1 values (2); +system sync replica rmt2 pull; -- does not wait +select type, new_part_name from system.replication_queue where database=currentDatabase() and table='rmt2' order by new_part_name; +select 1, n, _part from rmt1 order by n; +select 2, n, _part from rmt2 order by n; + +set optimize_throw_if_noop = 1; +system sync replica rmt1 pull; +optimize table rmt1 final; + +system start replicated sends rmt1; +system sync replica rmt2 lightweight; -- waits for fetches, not merges +select type, new_part_name from system.replication_queue where database=currentDatabase() and table='rmt2' order by new_part_name; +select 3, n, _part from rmt1 order by n; +select 4, n from rmt2 order by n; +select type, new_part_name from system.replication_queue where database=currentDatabase() and table='rmt2' order by new_part_name; + +system start merges rmt2; +system sync replica rmt2; + +insert into rmt2 values (3); +system sync replica rmt2 pull; +optimize table rmt2 final; + +system sync replica rmt1 strict; + +select 5, n, _part from rmt1 order by n; +select 6, n, _part from rmt2 order by n; + +drop table rmt1; +drop table rmt2; diff --git a/parser/testdata/02439_merge_selecting_partitions/ast.json b/parser/testdata/02439_merge_selecting_partitions/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02439_merge_selecting_partitions/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02439_merge_selecting_partitions/metadata.json b/parser/testdata/02439_merge_selecting_partitions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02439_merge_selecting_partitions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02439_merge_selecting_partitions/query.sql b/parser/testdata/02439_merge_selecting_partitions/query.sql new file mode 100644 index 000000000..9c718d105 --- /dev/null +++ b/parser/testdata/02439_merge_selecting_partitions/query.sql @@ -0,0 +1,36 @@ +-- Tags: no-shared-merge-tree +-- Predicate works in a different way +drop table if exists rmt; + +create table rmt (n int, m int) engine=ReplicatedMergeTree('/test/02439/{shard}/{database}', '{replica}') partition by n order by n; +insert into rmt select number, number from numbers(50); +insert into rmt values (1, 2); +insert into rmt values (1, 3); +insert into rmt values (1, 4); +insert into rmt values (1, 5); +insert into rmt values (1, 6); +insert into rmt values (1, 7); +insert into rmt values (1, 8); +insert into rmt values (1, 9); +-- there's nothing to merge in all partitions but '1' + +optimize table rmt partition tuple(123); + +set optimize_throw_if_noop=1; +optimize table rmt partition tuple(123); -- { serverError CANNOT_ASSIGN_OPTIMIZE } + +select sleepEachRow(3) as higher_probability_of_reproducing_the_issue format Null; +system flush logs zookeeper_log, query_log; + +-- it should not list unneeded partitions where we cannot merge anything +select * from system.zookeeper_log where path like '/test/02439/' || getMacro('shard') || '/' || currentDatabase() || '/block_numbers/%' + and op_num in ('List', 'SimpleList', 'FilteredList') + and path not like '%/block_numbers/1' and path not like '%/block_numbers/123' + and event_time >= now() - interval 1 minute + -- avoid race with tests like 02311_system_zookeeper_insert + and (query_id is null or query_id='' or query_id in + (select query_id from system.query_log + where event_time >= now() - interval 1 minute and current_database=currentDatabase()) + ); + +drop table rmt; diff --git a/parser/testdata/02440_mutations_finalization/ast.json b/parser/testdata/02440_mutations_finalization/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02440_mutations_finalization/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02440_mutations_finalization/metadata.json b/parser/testdata/02440_mutations_finalization/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02440_mutations_finalization/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02440_mutations_finalization/query.sql b/parser/testdata/02440_mutations_finalization/query.sql new file mode 100644 index 000000000..92ed6a327 --- /dev/null +++ b/parser/testdata/02440_mutations_finalization/query.sql @@ -0,0 +1,35 @@ +-- Tags: no-parallel + +create table mut (n int) engine=ReplicatedMergeTree('/test/02440/{database}/mut', '1') order by tuple(); +set insert_keeper_fault_injection_probability=0; +insert into mut values (1); +system stop merges mut; +alter table mut update n = 2 where n = 1; +-- it will create MUTATE_PART entry, but will not execute it + +system sync replica mut pull; +select mutation_id, command, parts_to_do_names, is_done from system.mutations where database=currentDatabase() and table='mut'; + +-- merges (and mutations) will start again after detach/attach, we need to avoid this somehow... +create table tmp (n int) engine=MergeTree order by tuple() settings index_granularity=1; +insert into tmp select * from numbers(1000); +alter table tmp update n = sleepEachRow(1) where 1; +select sleepEachRow(2) as higher_probablility_of_reproducing_the_issue format Null; + +-- it will not execute MUTATE_PART, because another mutation is currently executing (in tmp) +alter table mut modify setting max_number_of_mutations_for_replica=1; +detach table mut; +attach table mut; + +-- mutation should not be finished yet +select * from mut; +select mutation_id, command, parts_to_do_names, is_done from system.mutations where database=currentDatabase() and table='mut'; + +alter table mut modify setting max_number_of_mutations_for_replica=100; +system sync replica mut; + +-- and now it should (is_done may be 0, but it's okay) +select * from mut; +select mutation_id, command, parts_to_do_names from system.mutations where database=currentDatabase() and table='mut'; + +drop table tmp; -- btw, it will check that mutation can be cancelled between blocks on shutdown diff --git a/parser/testdata/02441_alter_delete_and_drop_column/ast.json b/parser/testdata/02441_alter_delete_and_drop_column/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02441_alter_delete_and_drop_column/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02441_alter_delete_and_drop_column/metadata.json b/parser/testdata/02441_alter_delete_and_drop_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02441_alter_delete_and_drop_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02441_alter_delete_and_drop_column/query.sql b/parser/testdata/02441_alter_delete_and_drop_column/query.sql new file mode 100644 index 000000000..ed8d245ed --- /dev/null +++ b/parser/testdata/02441_alter_delete_and_drop_column/query.sql @@ -0,0 +1,33 @@ +-- Tags: no-replicated-database, no-shared-merge-tree +-- no-shared-merge-tree: depend on system.replication_queue + +create table mut (n int, m int, k int) engine=ReplicatedMergeTree('/test/02441/{database}/mut', '1') order by n; +set insert_keeper_fault_injection_probability=0; +system stop merges mut; +insert into mut values (1, 2, 3), (10, 20, 30); + +alter table mut delete where n = 10; + +-- a funny way to wait for a MUTATE_PART to be assigned +select sleepEachRow(2) from url('http://localhost:8123/?param_tries={1..10}&query=' || encodeURLComponent( + 'select 1 where ''MUTATE_PART'' not in (select type from system.replication_queue where database=''' || currentDatabase() || ''' and table=''mut'')' + ), 'LineAsString', 's String') settings max_threads=1, http_make_head_request=0 format Null; + +alter table mut drop column k settings alter_sync=0; + +-- a funny way to wait for ALTER_METADATA to disappear from the replication queue +select sleepEachRow(2) from url('http://localhost:8123/?param_tries={1..10}&query=' || encodeURLComponent( + 'select * from system.replication_queue where database=''' || currentDatabase() || ''' and table=''mut'' and type=''ALTER_METADATA''' + ), 'LineAsString', 's String') settings max_threads=1, http_make_head_request=0 format Null; + +system sync replica mut pull; + +select sleepEachRow(2) from url('http://localhost:8123/?param_tries={1..10}&query=' || encodeURLComponent( + 'select * from system.replication_queue where database=''' || currentDatabase() || ''' and table=''mut'' and type=''ALTER_METADATA''' + ), 'LineAsString', 's String') settings max_threads=1, http_make_head_request=0 format Null; + +select type, new_part_name, parts_to_merge from system.replication_queue where database=currentDatabase() and table='mut' and type != 'GET_PART'; +system start merges mut; +set receive_timeout=30; +system sync replica mut; +select * from mut; diff --git a/parser/testdata/02442_auxiliary_zookeeper_endpoint_id/ast.json b/parser/testdata/02442_auxiliary_zookeeper_endpoint_id/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02442_auxiliary_zookeeper_endpoint_id/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02442_auxiliary_zookeeper_endpoint_id/metadata.json b/parser/testdata/02442_auxiliary_zookeeper_endpoint_id/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02442_auxiliary_zookeeper_endpoint_id/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02442_auxiliary_zookeeper_endpoint_id/query.sql b/parser/testdata/02442_auxiliary_zookeeper_endpoint_id/query.sql new file mode 100644 index 000000000..92725db8d --- /dev/null +++ b/parser/testdata/02442_auxiliary_zookeeper_endpoint_id/query.sql @@ -0,0 +1,22 @@ +-- Tags: no-fasttest, no-shared-merge-tree +-- no-shared-merge-tree -- shared merge tree doesn't support aux zookeepers + +drop table if exists t1_r1 sync; +drop table if exists t1_r2 sync; +drop table if exists t2 sync; + +create table t1_r1 (x Int32) engine=ReplicatedMergeTree('/test/02442/{database}/t', 'r1') order by x; + +create table t1_r2 (x Int32) engine=ReplicatedMergeTree('/test/02442/{database}/t', 'r2') order by x; + +-- create table with same replica_path as t1_r1 +create table t2 (x Int32) engine=ReplicatedMergeTree('zookeeper2:/test/02442/{database}/t', 'r1') order by x; +drop table t2 sync; + +-- insert data into one replica +insert into t1_r1 select * from generateRandom('x Int32') LIMIT 10013; +system sync replica t1_r2; +select count() from t1_r2; + +drop table t1_r1 sync; +drop table t1_r2 sync; diff --git a/parser/testdata/02448_clone_replica_lost_part/ast.json b/parser/testdata/02448_clone_replica_lost_part/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02448_clone_replica_lost_part/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02448_clone_replica_lost_part/metadata.json b/parser/testdata/02448_clone_replica_lost_part/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02448_clone_replica_lost_part/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02448_clone_replica_lost_part/query.sql b/parser/testdata/02448_clone_replica_lost_part/query.sql new file mode 100644 index 000000000..cc1398cb8 --- /dev/null +++ b/parser/testdata/02448_clone_replica_lost_part/query.sql @@ -0,0 +1,164 @@ +-- Tags: long, no-shared-merge-tree +-- no-shared-merge-tree: depend on replication queue/fetches + +SET insert_keeper_fault_injection_probability=0; -- disable fault injection; part ids are non-deterministic in case of insert retries + +drop table if exists rmt1; +drop table if exists rmt2; +create table rmt1 (n int) engine=ReplicatedMergeTree('/test/02448/{database}/rmt', '1') order by tuple() + settings min_replicated_logs_to_keep=1, max_replicated_logs_to_keep=2, + max_cleanup_delay_period=1, cleanup_delay_period=0, cleanup_delay_period_random_add=1, + cleanup_thread_preferred_points_per_iteration=0, old_parts_lifetime=0, max_parts_to_merge_at_once=4, + merge_selecting_sleep_ms=1000, max_merge_selecting_sleep_ms=2000; +create table rmt2 (n int) engine=ReplicatedMergeTree('/test/02448/{database}/rmt', '2') order by tuple() + settings min_replicated_logs_to_keep=1, max_replicated_logs_to_keep=2, + max_cleanup_delay_period=1, cleanup_delay_period=0, cleanup_delay_period_random_add=1, + cleanup_thread_preferred_points_per_iteration=0, old_parts_lifetime=0, max_parts_to_merge_at_once=4, + merge_selecting_sleep_ms=1000, max_merge_selecting_sleep_ms=2000; + +-- insert part only on one replica +system stop replicated sends rmt1; +insert into rmt1 values (1); +detach table rmt1; -- make replica inactive +system start replicated sends rmt1; + +-- trigger log rotation, rmt1 will be lost +insert into rmt2 values (2); +insert into rmt2 values (3); +insert into rmt2 values (4); +insert into rmt2 values (5); +-- check that entry was not removed from the queue (part is not lost) +set receive_timeout=5; +system sync replica rmt2; -- {serverError TIMEOUT_EXCEEDED} +set receive_timeout=300; + +select 1, arraySort(groupArray(n)) from rmt2; + +-- rmt1 will mimic rmt2 +attach table rmt1; +system sync replica rmt1; +system sync replica rmt2; + +-- check that no parts are lost +select 2, arraySort(groupArray(n)) from rmt1; +select 3, arraySort(groupArray(n)) from rmt2; + + +truncate table rmt1; +truncate table rmt2; + + +-- insert parts only on one replica and merge them +system stop replicated sends rmt2; +insert into rmt2 values (1); +insert into rmt2 values (2); +system sync replica rmt2; +optimize table rmt2 final; +system sync replica rmt2; +-- give it a chance to remove source parts +select sleep(2) format Null; -- increases probability of reproducing the issue +detach table rmt2; +system start replicated sends rmt2; + + +-- trigger log rotation, rmt2 will be lost +insert into rmt1 values (3); +insert into rmt1 values (4); +insert into rmt1 values (5); +set receive_timeout=5; +-- check that entry was not removed from the queue (part is not lost) +system sync replica rmt1; -- {serverError TIMEOUT_EXCEEDED} +set receive_timeout=300; + +select 4, arraySort(groupArray(n)) from rmt1; + +-- rmt1 will mimic rmt2 +system stop fetches rmt1; +attach table rmt2; +system sync replica rmt2; +-- give rmt2 a chance to remove merged part (but it should not do it) +select sleep(2) format Null; -- increases probability of reproducing the issue +system start fetches rmt1; +system sync replica rmt1; + +-- check that no parts are lost +select 5, arraySort(groupArray(n)) from rmt1; +select 6, arraySort(groupArray(n)) from rmt2; + + +-- insert part only on one replica +system stop replicated sends rmt1; +insert into rmt1 values (123); +alter table rmt1 update n=10 where n=123 settings mutations_sync=1; +-- give it a chance to remove source part +select sleep(2) format Null; -- increases probability of reproducing the issue +detach table rmt1; -- make replica inactive +system start replicated sends rmt1; + +-- trigger log rotation, rmt1 will be lost +insert into rmt2 values (20); +insert into rmt2 values (30); +insert into rmt2 values (40); +insert into rmt2 values (50); +-- check that entry was not removed from the queue (part is not lost) +set receive_timeout=5; +system sync replica rmt2; -- {serverError TIMEOUT_EXCEEDED} +set receive_timeout=300; + +select 7, arraySort(groupArray(n)) from rmt2; + +-- rmt1 will mimic rmt2 +system stop fetches rmt2; +attach table rmt1; +system sync replica rmt1; +-- give rmt1 a chance to remove mutated part (but it should not do it) +select sleep(2) format Null; -- increases probability of reproducing the issue +system start fetches rmt2; +system sync replica rmt2; + +-- check that no parts are lost +select 8, arraySort(groupArray(n)) from rmt1; +select 9, arraySort(groupArray(n)) from rmt2; + +-- avoid arbitrary merges after inserting +optimize table rmt2 final; +-- insert parts (all_18_18_0, all_19_19_0) on both replicas (will be deduplicated, but it does not matter) +insert into rmt1 values (100); +insert into rmt2 values (100); +insert into rmt1 values (200); +insert into rmt2 values (200); + +-- otherwise we can get exception on drop part +system sync replica rmt2; +system sync replica rmt1; + +detach table rmt1; + +-- create a gap in block numbers by dropping part +insert into rmt2 values (300); +alter table rmt2 drop part 'all_19_19_0'; -- remove 200 +insert into rmt2 values (400); +insert into rmt2 values (500); +insert into rmt2 values (600); +system sync replica rmt2; +-- merge through gap +optimize table rmt2; +-- give it a chance to cleanup log + +select sleepEachRow(2) from url('http://localhost:8123/?param_tries={1..10}&query=' || encodeURLComponent( + 'select value from system.zookeeper where path=''/test/02448/' || currentDatabase() || '/rmt/replicas/1'' and name=''is_lost'' and value=''0''' + ), 'LineAsString', 's String') settings max_threads=1, http_make_head_request=0 format Null; + +-- rmt1 will mimic rmt2, but will not be able to fetch parts for a while +system stop replicated sends rmt2; +attach table rmt1; +-- rmt1 should not show the value (200) from dropped part +select throwIf(n = 200) from rmt1 format Null; +select 11, arraySort(groupArray(n)) from rmt2; + +system start replicated sends rmt2; +system sync replica rmt1; +select 12, arraySort(groupArray(n)) from rmt1; + +drop table rmt1; +drop table rmt2; diff --git a/parser/testdata/02449_check_dependencies_and_table_shutdown/ast.json b/parser/testdata/02449_check_dependencies_and_table_shutdown/ast.json new file mode 100644 index 000000000..75711aace --- /dev/null +++ b/parser/testdata/02449_check_dependencies_and_table_shutdown/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery table (children 1)" + }, + { + "explain": " Identifier table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001144797, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/02449_check_dependencies_and_table_shutdown/metadata.json b/parser/testdata/02449_check_dependencies_and_table_shutdown/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02449_check_dependencies_and_table_shutdown/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02449_check_dependencies_and_table_shutdown/query.sql b/parser/testdata/02449_check_dependencies_and_table_shutdown/query.sql new file mode 100644 index 000000000..cb10c50d5 --- /dev/null +++ b/parser/testdata/02449_check_dependencies_and_table_shutdown/query.sql @@ -0,0 +1,41 @@ +DROP TABLE IF EXISTS table; +DROP DICTIONARY IF EXISTS dict; +DROP TABLE IF EXISTS view; + +CREATE TABLE view (id UInt32, value String) ENGINE=ReplicatedMergeTree('/test/2449/{database}', '1') ORDER BY id; +INSERT INTO view VALUES (1, 'v'); + +CREATE DICTIONARY dict (id UInt32, value String) +PRIMARY KEY id +SOURCE(CLICKHOUSE(host 'localhost' port tcpPort() user 'default' db currentDatabase() table 'view')) +LAYOUT (HASHED()) LIFETIME (MIN 600 MAX 600); + +SHOW CREATE dict; + +CREATE TABLE table +( + col MATERIALIZED dictGet(currentDatabase() || '.dict', 'value', toUInt32(1)), + phys Int +) +ENGINE = MergeTree() +ORDER BY tuple(); + +SHOW CREATE TABLE table; + +SELECT * FROM dictionary('dict'); + +DROP TABLE view; -- {serverError HAVE_DEPENDENT_OBJECTS} + +-- check that table is not readonly +INSERT INTO view VALUES (2, 'a'); + +DROP DICTIONARY dict; -- {serverError HAVE_DEPENDENT_OBJECTS} + +-- check that dictionary was not detached +SELECT * FROM dictionary('dict'); +SYSTEM RELOAD DICTIONARY dict; +SELECT * FROM dictionary('dict') ORDER BY id; + +DROP TABLE table; +DROP DICTIONARY dict; +DROP TABLE view; diff --git a/parser/testdata/02451_variadic_null_garbage_data/ast.json b/parser/testdata/02451_variadic_null_garbage_data/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02451_variadic_null_garbage_data/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02451_variadic_null_garbage_data/metadata.json b/parser/testdata/02451_variadic_null_garbage_data/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02451_variadic_null_garbage_data/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02451_variadic_null_garbage_data/query.sql b/parser/testdata/02451_variadic_null_garbage_data/query.sql new file mode 100644 index 000000000..5092362e1 --- /dev/null +++ b/parser/testdata/02451_variadic_null_garbage_data/query.sql @@ -0,0 +1,20 @@ +-- { echoOn } +SELECT argMax((n, n), n) t, toTypeName(t) FROM (SELECT if(number >= 100, NULL, number) AS n from numbers(10)); +SELECT argMax((n, n), n) t, toTypeName(t) FROM (SELECT if(number <= 100, NULL, number) AS n from numbers(10)); +SELECT argMax((n, n), n) t, toTypeName(t) FROM (SELECT if(number % 3 = 0, NULL, number) AS n from numbers(10)); + +SELECT argMax((n, n), n) t, toTypeName(t) FROM (SELECT if(number >= 100, NULL, number::Int32) AS n from numbers(10)); +SELECT argMax((n, n), n) t, toTypeName(t) FROM (SELECT if(number <= 100, NULL, number::Int32) AS n from numbers(10)); +SELECT argMax((n, n), n) t, toTypeName(t) FROM (SELECT if(number % 3 = 0, NULL, number::Int32) AS n from numbers(10)); + +SELECT argMin((n, n), n) t, toTypeName(t) FROM (SELECT if(number >= 100, NULL, number) AS n from numbers(5, 10)); +SELECT argMin((n, n), n) t, toTypeName(t) FROM (SELECT if(number <= 100, NULL, number) AS n from numbers(5, 10)); +SELECT argMin((n, n), n) t, toTypeName(t) FROM (SELECT if(number % 5 == 0, NULL, number) as n from numbers(5, 10)); + +SELECT argMin((n, n), n) t, toTypeName(t) FROM (SELECT if(number >= 100, NULL, number::Int32) AS n from numbers(5, 10)); +SELECT argMin((n, n), n) t, toTypeName(t) FROM (SELECT if(number <= 100, NULL, number::Int32) AS n from numbers(5, 10)); +SELECT argMin((n, n), n) t, toTypeName(t) FROM (SELECT if(number % 5 == 0, NULL, number::Int32) as n from numbers(5, 10)); + +SELECT argMaxIf((n, n), n, n > 100) t, toTypeName(t) FROM (SELECT if(number % 3 = 0, NULL, number) AS n from numbers(50)); +SELECT argMaxIf((n, n), n, n < 100) t, toTypeName(t) FROM (SELECT if(number % 3 = 0, NULL, number) AS n from numbers(50)); +SELECT argMaxIf((n, n), n, n % 5 == 0) t, toTypeName(t) FROM (SELECT if(number % 3 = 0, NULL, number) AS n from numbers(50)); diff --git a/parser/testdata/02452_check_low_cardinality/ast.json b/parser/testdata/02452_check_low_cardinality/ast.json new file mode 100644 index 000000000..633e354ff --- /dev/null +++ b/parser/testdata/02452_check_low_cardinality/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_low_cardinality_string (children 1)" + }, + { + "explain": " Identifier test_low_cardinality_string" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001583421, + "rows_read": 2, + "bytes_read": 106 + } +} diff --git a/parser/testdata/02452_check_low_cardinality/metadata.json b/parser/testdata/02452_check_low_cardinality/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02452_check_low_cardinality/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02452_check_low_cardinality/query.sql b/parser/testdata/02452_check_low_cardinality/query.sql new file mode 100644 index 000000000..166be2814 --- /dev/null +++ b/parser/testdata/02452_check_low_cardinality/query.sql @@ -0,0 +1,54 @@ +DROP TABLE IF EXISTS test_low_cardinality_string; +DROP TABLE IF EXISTS test_low_cardinality_uuid; +DROP TABLE IF EXISTS test_low_cardinality_int; +CREATE TABLE test_low_cardinality_string (data String) ENGINE MergeTree ORDER BY data; +CREATE TABLE test_low_cardinality_uuid (data String) ENGINE MergeTree ORDER BY data; +CREATE TABLE test_low_cardinality_int (data String) ENGINE MergeTree ORDER BY data; +INSERT INTO test_low_cardinality_string (data) VALUES ('{"a": "hi", "b": "hello", "c": "hola", "d": "see you, bye, bye"}'); +INSERT INTO test_low_cardinality_int (data) VALUES ('{"a": 11, "b": 2222, "c": 33333333, "d": 4444444444444444}'); +INSERT INTO test_low_cardinality_uuid (data) VALUES ('{"a": "2d49dc6e-ddce-4cd0-afb8-790956df54c4", "b": "2d49dc6e-ddce-4cd0-afb8-790956df54c3", "c": "2d49dc6e-ddce-4cd0-afb8-790956df54c1", "d": "2d49dc6e-ddce-4cd0-afb8-790956df54c1"}'); +SELECT JSONExtract(data, 'Tuple( + a LowCardinality(String), + b LowCardinality(String), + c LowCardinality(String), + d LowCardinality(String) + )') AS json FROM test_low_cardinality_string; +SELECT JSONExtract(data, 'Tuple( + a LowCardinality(FixedString(20)), + b LowCardinality(FixedString(20)), + c LowCardinality(FixedString(20)), + d LowCardinality(FixedString(20)) + )') AS json FROM test_low_cardinality_string; +SELECT JSONExtract(data, 'Tuple( + a LowCardinality(Int8), + b LowCardinality(Int8), + c LowCardinality(Int8), + d LowCardinality(Int8) + )') AS json FROM test_low_cardinality_int; +SELECT JSONExtract(data, 'Tuple( + a LowCardinality(Int16), + b LowCardinality(Int16), + c LowCardinality(Int16), + d LowCardinality(Int16) + )') AS json FROM test_low_cardinality_int; +SELECT JSONExtract(data, 'Tuple( + a LowCardinality(Int32), + b LowCardinality(Int32), + c LowCardinality(Int32), + d LowCardinality(Int32) + )') AS json FROM test_low_cardinality_int; +SELECT JSONExtract(data, 'Tuple( + a LowCardinality(Int64), + b LowCardinality(Int64), + c LowCardinality(Int64), + d LowCardinality(Int64) + )') AS json FROM test_low_cardinality_int; +SELECT JSONExtract(data, 'Tuple( + a LowCardinality(UUID), + b LowCardinality(UUID), + c LowCardinality(UUID), + d LowCardinality(UUID) + )') AS json FROM test_low_cardinality_uuid; +DROP TABLE test_low_cardinality_string; +DROP TABLE test_low_cardinality_uuid; +DROP TABLE test_low_cardinality_int; diff --git a/parser/testdata/02452_json_utf8_validation/ast.json b/parser/testdata/02452_json_utf8_validation/ast.json new file mode 100644 index 000000000..6c8be8645 --- /dev/null +++ b/parser/testdata/02452_json_utf8_validation/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001433922, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02452_json_utf8_validation/metadata.json b/parser/testdata/02452_json_utf8_validation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02452_json_utf8_validation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02452_json_utf8_validation/query.sql b/parser/testdata/02452_json_utf8_validation/query.sql new file mode 100644 index 000000000..e0ddbcdc9 --- /dev/null +++ b/parser/testdata/02452_json_utf8_validation/query.sql @@ -0,0 +1,42 @@ +SET output_format_write_statistics = 0; +SET output_format_json_validate_utf8 = 1; +SELECT 'JSONCompact'; +SELECT '\xED\x20\xA8' AS s FORMAT JSONCompact; +SELECT 'JSON'; +SELECT '\xED\x20\xA8' AS s FORMAT JSON; +SELECT 'XML'; +SELECT '\xED\x20\xA8' AS s FORMAT XML; +SELECT 'JSONColumnsWithMetadata'; +SELECT '\xED\x20\xA8' AS s FORMAT JSONColumnsWithMetadata; +SELECT 'JSONEachRow'; +SELECT '\xED\x20\xA8' AS s FORMAT JSONEachRow; +SELECT 'JSONCompactEachRow'; +SELECT '\xED\x20\xA8' AS s FORMAT JSONCompactEachRow; +SELECT 'JSONColumns'; +SELECT '\xED\x20\xA8' AS s FORMAT JSONColumns; +SELECT 'JSONCompactColumns'; +SELECT '\xED\x20\xA8' AS s FORMAT JSONCompactColumns; +SELECT 'JSONObjectEachRow'; +SELECT '\xED\x20\xA8' AS s FORMAT JSONObjectEachRow; + +SET output_format_json_validate_utf8 = 0; +SELECT 'JSONCompact'; +SELECT '\xED\x20\xA8' AS s FORMAT JSONCompact; +SELECT 'JSON'; +SELECT '\xED\x20\xA8' AS s FORMAT JSON; +SELECT 'XML'; +SELECT '\xED\x20\xA8' AS s FORMAT XML; +SELECT 'JSONColumnsWithMetadata'; +SELECT '\xED\x20\xA8' AS s FORMAT JSONColumnsWithMetadata; +SELECT 'JSONEachRow'; +SELECT '\xED\x20\xA8' AS s FORMAT JSONEachRow; +SELECT 'JSONCompactEachRow'; +SELECT '\xED\x20\xA8' AS s FORMAT JSONCompactEachRow; +SELECT 'JSONColumns'; +SELECT '\xED\x20\xA8' AS s FORMAT JSONColumns; +SELECT 'JSONCompactColumns'; +SELECT '\xED\x20\xA8' AS s FORMAT JSONCompactColumns; +SELECT 'JSONObjectEachRow'; +SELECT '\xED\x20\xA8' AS s FORMAT JSONObjectEachRow; + + diff --git a/parser/testdata/02453_check_path_in_errors_logger/ast.json b/parser/testdata/02453_check_path_in_errors_logger/ast.json new file mode 100644 index 000000000..a5cfb4ea3 --- /dev/null +++ b/parser/testdata/02453_check_path_in_errors_logger/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "InsertQuery (children 3)" + }, + { + "explain": " Function file (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier 02453_data.jsonl" + }, + { + "explain": " Identifier TSV" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Set" + }, + { + "explain": " Set" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001252642, + "rows_read": 12, + "bytes_read": 378 + } +} diff --git a/parser/testdata/02453_check_path_in_errors_logger/metadata.json b/parser/testdata/02453_check_path_in_errors_logger/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02453_check_path_in_errors_logger/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02453_check_path_in_errors_logger/query.sql b/parser/testdata/02453_check_path_in_errors_logger/query.sql new file mode 100644 index 000000000..f8f206ec9 --- /dev/null +++ b/parser/testdata/02453_check_path_in_errors_logger/query.sql @@ -0,0 +1,3 @@ +insert into function file(02453_data.jsonl, TSV) select 1 settings engine_file_truncate_on_insert=1; +select * from file(02453_data.jsonl, auto, 'x UInt32') settings input_format_allow_errors_num=1, input_format_record_errors_file_path='../error_file'; -- {serverError DATABASE_ACCESS_DENIED} + diff --git a/parser/testdata/02454_compressed_marks_in_compact_part/ast.json b/parser/testdata/02454_compressed_marks_in_compact_part/ast.json new file mode 100644 index 000000000..918d23428 --- /dev/null +++ b/parser/testdata/02454_compressed_marks_in_compact_part/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery cc (children 1)" + }, + { + "explain": " Identifier cc" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001286574, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/02454_compressed_marks_in_compact_part/metadata.json b/parser/testdata/02454_compressed_marks_in_compact_part/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02454_compressed_marks_in_compact_part/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02454_compressed_marks_in_compact_part/query.sql b/parser/testdata/02454_compressed_marks_in_compact_part/query.sql new file mode 100644 index 000000000..332b1c05d --- /dev/null +++ b/parser/testdata/02454_compressed_marks_in_compact_part/query.sql @@ -0,0 +1,6 @@ +drop table if exists cc sync; +create table cc (a UInt64, b String) ENGINE = MergeTree order by (a, b) SETTINGS compress_marks = true; +insert into cc values (2, 'World'); +alter table cc detach part 'all_1_1_0'; +alter table cc attach part 'all_1_1_0'; +select * from cc; diff --git a/parser/testdata/02454_disable_mergetree_with_lightweight_delete_column/ast.json b/parser/testdata/02454_disable_mergetree_with_lightweight_delete_column/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02454_disable_mergetree_with_lightweight_delete_column/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02454_disable_mergetree_with_lightweight_delete_column/metadata.json b/parser/testdata/02454_disable_mergetree_with_lightweight_delete_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02454_disable_mergetree_with_lightweight_delete_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02454_disable_mergetree_with_lightweight_delete_column/query.sql b/parser/testdata/02454_disable_mergetree_with_lightweight_delete_column/query.sql new file mode 100644 index 000000000..19835503d --- /dev/null +++ b/parser/testdata/02454_disable_mergetree_with_lightweight_delete_column/query.sql @@ -0,0 +1,23 @@ +-- Tags: memory-engine +drop table if exists t_row_exists; + +create table t_row_exists(a int, _row_exists int) engine=MergeTree order by a; --{serverError ILLEGAL_COLUMN} + +create table t_row_exists(a int, b int) engine=MergeTree order by a; +alter table t_row_exists add column _row_exists int; --{serverError ILLEGAL_COLUMN} +alter table t_row_exists rename column b to _row_exists; --{serverError ILLEGAL_COLUMN} +alter table t_row_exists rename column _row_exists to c; --{serverError NOT_FOUND_COLUMN_IN_BLOCK} +alter table t_row_exists drop column _row_exists; --{serverError NOT_FOUND_COLUMN_IN_BLOCK} +alter table t_row_exists drop column unknown_column; --{serverError NOT_FOUND_COLUMN_IN_BLOCK} +drop table t_row_exists; + +create table t_row_exists(a int, _row_exists int) engine=Memory; +insert into t_row_exists values(1,1); +select * from t_row_exists; +drop table t_row_exists; + +create table t_row_exists(a int, b int) engine=Memory; +alter table t_row_exists add column _row_exists int; +alter table t_row_exists drop column _row_exists; +alter table t_row_exists rename column b to _row_exists; +drop table t_row_exists; diff --git a/parser/testdata/02454_json_object_each_row_column_for_object_name/ast.json b/parser/testdata/02454_json_object_each_row_column_for_object_name/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02454_json_object_each_row_column_for_object_name/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02454_json_object_each_row_column_for_object_name/metadata.json b/parser/testdata/02454_json_object_each_row_column_for_object_name/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02454_json_object_each_row_column_for_object_name/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02454_json_object_each_row_column_for_object_name/query.sql b/parser/testdata/02454_json_object_each_row_column_for_object_name/query.sql new file mode 100644 index 000000000..49ad1c670 --- /dev/null +++ b/parser/testdata/02454_json_object_each_row_column_for_object_name/query.sql @@ -0,0 +1,12 @@ +-- Tags: no-fasttest, no-parallel +set format_json_object_each_row_column_for_object_name='name'; +set input_format_json_try_infer_numbers_from_strings=1; + +select number, concat('name_', toString(number)) as name from numbers(3) format JSONObjectEachRow; +select number, concat('name_', toString(number)) as name, number + 1 as x from numbers(3) format JSONObjectEachRow; +select concat('name_', toString(number)) as name, number from numbers(3) format JSONObjectEachRow; + +insert into function file(02454_data.jsonobjecteachrow) select number, concat('name_', toString(number)) as name from numbers(3) settings engine_file_truncate_on_insert=1; +desc file(02454_data.jsonobjecteachrow); +select * from file(02454_data.jsonobjecteachrow); + diff --git a/parser/testdata/02455_count_state_asterisk/ast.json b/parser/testdata/02455_count_state_asterisk/ast.json new file mode 100644 index 000000000..727fdd654 --- /dev/null +++ b/parser/testdata/02455_count_state_asterisk/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery a (children 1)" + }, + { + "explain": " Identifier a" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001597367, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/02455_count_state_asterisk/metadata.json b/parser/testdata/02455_count_state_asterisk/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02455_count_state_asterisk/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02455_count_state_asterisk/query.sql b/parser/testdata/02455_count_state_asterisk/query.sql new file mode 100644 index 000000000..cb6ded3de --- /dev/null +++ b/parser/testdata/02455_count_state_asterisk/query.sql @@ -0,0 +1,11 @@ +drop table if exists a; +drop table if exists b; + +create table a (i int, j int) engine Log; +create materialized view b engine Log as select countState(*) from a; + +insert into a values (1, 2); +select countMerge(*) from b; + +drop table b; +drop table a; diff --git a/parser/testdata/02455_duplicate_column_names_in_schema_inference/ast.json b/parser/testdata/02455_duplicate_column_names_in_schema_inference/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02455_duplicate_column_names_in_schema_inference/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02455_duplicate_column_names_in_schema_inference/metadata.json b/parser/testdata/02455_duplicate_column_names_in_schema_inference/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02455_duplicate_column_names_in_schema_inference/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02455_duplicate_column_names_in_schema_inference/query.sql b/parser/testdata/02455_duplicate_column_names_in_schema_inference/query.sql new file mode 100644 index 000000000..f67e5496a --- /dev/null +++ b/parser/testdata/02455_duplicate_column_names_in_schema_inference/query.sql @@ -0,0 +1,7 @@ +-- Tags: no-fasttest + +desc format(JSONEachRow, '{"x" : 1, "x" : 2}'); -- {serverError CANNOT_EXTRACT_TABLE_STRUCTURE} +desc format(JSONEachRow, '{"x" : 1, "y" : 2}\n{"x" : 2, "x" : 3}'); -- {serverError CANNOT_EXTRACT_TABLE_STRUCTURE} +desc format(CSVWithNames, 'a,b,a\n1,2,3'); -- {serverError CANNOT_EXTRACT_TABLE_STRUCTURE} +desc format(CSV, '1,2,3') settings column_names_for_schema_inference='a, b, a'; -- {serverError CANNOT_EXTRACT_TABLE_STRUCTURE} + diff --git a/parser/testdata/02455_extract_fixed_string_from_nested_json/ast.json b/parser/testdata/02455_extract_fixed_string_from_nested_json/ast.json new file mode 100644 index 000000000..d07ff2a0e --- /dev/null +++ b/parser/testdata/02455_extract_fixed_string_from_nested_json/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_fixed_string_nested_json (children 1)" + }, + { + "explain": " Identifier test_fixed_string_nested_json" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001719147, + "rows_read": 2, + "bytes_read": 110 + } +} diff --git a/parser/testdata/02455_extract_fixed_string_from_nested_json/metadata.json b/parser/testdata/02455_extract_fixed_string_from_nested_json/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02455_extract_fixed_string_from_nested_json/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02455_extract_fixed_string_from_nested_json/query.sql b/parser/testdata/02455_extract_fixed_string_from_nested_json/query.sql new file mode 100644 index 000000000..7466bd7e2 --- /dev/null +++ b/parser/testdata/02455_extract_fixed_string_from_nested_json/query.sql @@ -0,0 +1,5 @@ +DROP TABLE IF EXISTS test_fixed_string_nested_json; +CREATE TABLE test_fixed_string_nested_json (data String) ENGINE MergeTree ORDER BY data; +INSERT INTO test_fixed_string_nested_json (data) VALUES ('{"a" : {"b" : {"c" : 1, "d" : "str"}}}'); +SELECT JSONExtract(data, 'Tuple(a FixedString(24))') AS json FROM test_fixed_string_nested_json; +DROP TABLE test_fixed_string_nested_json; \ No newline at end of file diff --git a/parser/testdata/02455_improve_feedback_when_replacing_partition_with_different_primary_key/ast.json b/parser/testdata/02455_improve_feedback_when_replacing_partition_with_different_primary_key/ast.json new file mode 100644 index 000000000..85312456e --- /dev/null +++ b/parser/testdata/02455_improve_feedback_when_replacing_partition_with_different_primary_key/ast.json @@ -0,0 +1,79 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery test_a (children 3)" + }, + { + "explain": " Identifier test_a" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " ColumnDeclaration id (children 1)" + }, + { + "explain": " DataType UInt32" + }, + { + "explain": " ColumnDeclaration company (children 1)" + }, + { + "explain": " DataType UInt32" + }, + { + "explain": " ColumnDeclaration total (children 1)" + }, + { + "explain": " DataType UInt64" + }, + { + "explain": " Storage definition (children 4)" + }, + { + "explain": " Function SummingMergeTree (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Identifier company" + }, + { + "explain": " Identifier id" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier id" + }, + { + "explain": " Identifier company" + } + ], + + "rows": 19, + + "statistics": + { + "elapsed": 0.001704889, + "rows_read": 19, + "bytes_read": 657 + } +} diff --git a/parser/testdata/02455_improve_feedback_when_replacing_partition_with_different_primary_key/metadata.json b/parser/testdata/02455_improve_feedback_when_replacing_partition_with_different_primary_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02455_improve_feedback_when_replacing_partition_with_different_primary_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02455_improve_feedback_when_replacing_partition_with_different_primary_key/query.sql b/parser/testdata/02455_improve_feedback_when_replacing_partition_with_different_primary_key/query.sql new file mode 100644 index 000000000..d000fb447 --- /dev/null +++ b/parser/testdata/02455_improve_feedback_when_replacing_partition_with_different_primary_key/query.sql @@ -0,0 +1,4 @@ +CREATE TABLE test_a (id UInt32, company UInt32, total UInt64) ENGINE=SummingMergeTree() PARTITION BY company PRIMARY KEY (id) ORDER BY (id, company); +INSERT INTO test_a SELECT number%10 as id, number%2 as company, count() as total FROM numbers(100) GROUP BY id,company; +CREATE TABLE test_b (id UInt32, company UInt32, total UInt64) ENGINE=SummingMergeTree() PARTITION BY company ORDER BY (id, company); +ALTER TABLE test_b REPLACE PARTITION '0' FROM test_a; -- {serverError BAD_ARGUMENTS} diff --git a/parser/testdata/02456_BLAKE3_hash_function_test/ast.json b/parser/testdata/02456_BLAKE3_hash_function_test/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02456_BLAKE3_hash_function_test/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02456_BLAKE3_hash_function_test/metadata.json b/parser/testdata/02456_BLAKE3_hash_function_test/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02456_BLAKE3_hash_function_test/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02456_BLAKE3_hash_function_test/query.sql b/parser/testdata/02456_BLAKE3_hash_function_test/query.sql new file mode 100644 index 000000000..88484f482 --- /dev/null +++ b/parser/testdata/02456_BLAKE3_hash_function_test/query.sql @@ -0,0 +1,5 @@ +-- Tags: no-fasttest + +SELECT hex(BLAKE3('test_1')); +SELECT hex(BLAKE3('test_2')); +SELECT hex(BLAKE3('test_3')); diff --git a/parser/testdata/02456_aggregate_state_conversion/ast.json b/parser/testdata/02456_aggregate_state_conversion/ast.json new file mode 100644 index 000000000..7755c4838 --- /dev/null +++ b/parser/testdata/02456_aggregate_state_conversion/ast.json @@ -0,0 +1,109 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function hex (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal 'AggregateFunction(sum, Decimal(50, 10))'" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayReduce (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'sumState'" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDecimal256 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '0.0000010.000001'" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + } + ], + + "rows": 29, + + "statistics": + { + "elapsed": 0.001411136, + "rows_read": 29, + "bytes_read": 1256 + } +} diff --git a/parser/testdata/02456_aggregate_state_conversion/metadata.json b/parser/testdata/02456_aggregate_state_conversion/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02456_aggregate_state_conversion/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02456_aggregate_state_conversion/query.sql b/parser/testdata/02456_aggregate_state_conversion/query.sql new file mode 100644 index 000000000..3c05c59de --- /dev/null +++ b/parser/testdata/02456_aggregate_state_conversion/query.sql @@ -0,0 +1 @@ +SELECT hex(CAST(x, 'AggregateFunction(sum, Decimal(50, 10))')) FROM (SELECT arrayReduce('sumState', [toDecimal256('0.0000010.000001', 10)]) AS x) GROUP BY x; diff --git a/parser/testdata/02456_alter-nullable-column-bag-2/ast.json b/parser/testdata/02456_alter-nullable-column-bag-2/ast.json new file mode 100644 index 000000000..1ded7f153 --- /dev/null +++ b/parser/testdata/02456_alter-nullable-column-bag-2/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001194726, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/02456_alter-nullable-column-bag-2/metadata.json b/parser/testdata/02456_alter-nullable-column-bag-2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02456_alter-nullable-column-bag-2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02456_alter-nullable-column-bag-2/query.sql b/parser/testdata/02456_alter-nullable-column-bag-2/query.sql new file mode 100644 index 000000000..d66c5f0e5 --- /dev/null +++ b/parser/testdata/02456_alter-nullable-column-bag-2/query.sql @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS t1 SYNC; +CREATE TABLE t1 (v UInt64) ENGINE=ReplicatedMergeTree('/test/tables/{database}/test/t1', 'r1') ORDER BY v PARTITION BY v; +INSERT INTO t1 values(1); +ALTER TABLE t1 ADD COLUMN s String; +INSERT INTO t1 values(1, '1'); +ALTER TABLE t1 MODIFY COLUMN s Nullable(String); +-- SELECT _part, * FROM t1; + +alter table t1 detach partition 1; + +SELECT _part, * FROM t1; +--0 rows in set. Elapsed: 0.001 sec. + +alter table t1 attach partition 1; +select count() from t1; + diff --git a/parser/testdata/02456_alter-nullable-column-bag/ast.json b/parser/testdata/02456_alter-nullable-column-bag/ast.json new file mode 100644 index 000000000..ff9fdee61 --- /dev/null +++ b/parser/testdata/02456_alter-nullable-column-bag/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery column_modify_test (children 1)" + }, + { + "explain": " Identifier column_modify_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00125964, + "rows_read": 2, + "bytes_read": 88 + } +} diff --git a/parser/testdata/02456_alter-nullable-column-bag/metadata.json b/parser/testdata/02456_alter-nullable-column-bag/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02456_alter-nullable-column-bag/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02456_alter-nullable-column-bag/query.sql b/parser/testdata/02456_alter-nullable-column-bag/query.sql new file mode 100644 index 000000000..6fab3fa37 --- /dev/null +++ b/parser/testdata/02456_alter-nullable-column-bag/query.sql @@ -0,0 +1,26 @@ +DROP TABLE IF EXISTS column_modify_test; + +CREATE TABLE column_modify_test (id UInt64, val String, other_col UInt64) engine=MergeTree ORDER BY id SETTINGS min_bytes_for_wide_part=0; +INSERT INTO column_modify_test VALUES (1,'one',0); +INSERT INTO column_modify_test VALUES (2,'two',0); + +-- on 21.9 that was done via mutations mechanism +ALTER TABLE column_modify_test MODIFY COLUMN val Nullable(String); + +-- but since 21.10 it only applies that to new part, so old parts keep the old schema +--SELECT * FROM system.mutations; + +INSERT INTO column_modify_test VALUES (3,Null,0); + +--select name, path, type, active, modification_time from system.parts_columns where table='column_modify_test' and column='val'; + +-- till now everythings looks ok +--SELECT * FROM column_modify_test; + +-- now we do mutation. It will affect one of the parts +-- and it what part it will update columns.txt to the latest 'correct' state w/o updating the column file! +alter table column_modify_test update other_col=1 where id = 1 SETTINGS mutations_sync=1; + +-- row 1 is damaged now: the column files of val columns & columns.txt is out of sync! +SELECT *, throwIf(val <> 'one') FROM column_modify_test WHERE id = 1 FORMAT CSV; + diff --git a/parser/testdata/02456_datetime_schema_inference/ast.json b/parser/testdata/02456_datetime_schema_inference/ast.json new file mode 100644 index 000000000..cd19a2fa2 --- /dev/null +++ b/parser/testdata/02456_datetime_schema_inference/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function format (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'TSV'" + }, + { + "explain": " Literal '222222222222222'" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001215585, + "rows_read": 12, + "bytes_read": 458 + } +} diff --git a/parser/testdata/02456_datetime_schema_inference/metadata.json b/parser/testdata/02456_datetime_schema_inference/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02456_datetime_schema_inference/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02456_datetime_schema_inference/query.sql b/parser/testdata/02456_datetime_schema_inference/query.sql new file mode 100644 index 000000000..123bb324f --- /dev/null +++ b/parser/testdata/02456_datetime_schema_inference/query.sql @@ -0,0 +1,15 @@ +select * from format('TSV', '222222222222222'); +select * from format('TSV', '22222222222.2222'); +set date_time_input_format = 'basic'; +select * from format('TSV', '2022-04-22T03:45:06.381'); +select * from format('TSV', '2022-04-22T03:45:06.381Z'); +select * from format('TSV', '01/12/1925'); +set date_time_input_format = 'best_effort'; +select * from format('TSV', '2022-04-22T03:45:06.381'); +select toTimeZone(c1, 'UTC') from format('TSV', '2022-04-22T03:45:06.381Z'); +select * from format('TSV', '01/12/1925'); +set date_time_input_format = 'best_effort_us'; +select * from format('TSV', '2022-04-22T03:45:06.381'); +select toTimeZone(c1, 'UTC') from format('TSV', '2022-04-22T03:45:06.381Z'); +select * from format('TSV', '01/12/1925'); +select * from format(CSV, '""'); diff --git a/parser/testdata/02456_keeper_retries_during_insert/ast.json b/parser/testdata/02456_keeper_retries_during_insert/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02456_keeper_retries_during_insert/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02456_keeper_retries_during_insert/metadata.json b/parser/testdata/02456_keeper_retries_during_insert/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02456_keeper_retries_during_insert/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02456_keeper_retries_during_insert/query.sql b/parser/testdata/02456_keeper_retries_during_insert/query.sql new file mode 100644 index 000000000..774370bc1 --- /dev/null +++ b/parser/testdata/02456_keeper_retries_during_insert/query.sql @@ -0,0 +1,26 @@ +-- Tags: replica + +DROP TABLE IF EXISTS keeper_retries_r1 SYNC; +DROP TABLE IF EXISTS keeper_retries_r2 SYNC; + +CREATE TABLE keeper_retries_r1(a UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test/02456_keeper_retries_during_insert', 'r1') ORDER BY tuple (); +CREATE TABLE keeper_retries_r2(a UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test/02456_keeper_retries_during_insert', 'r2') ORDER BY tuple(); + +INSERT INTO keeper_retries_r1 SETTINGS insert_keeper_fault_injection_probability=0 VALUES (1); +INSERT INTO keeper_retries_r1 SETTINGS insert_keeper_fault_injection_probability=1, insert_keeper_max_retries=0 VALUES (2); -- { serverError KEEPER_EXCEPTION } +INSERT INTO keeper_retries_r1 SETTINGS insert_keeper_fault_injection_probability=1, insert_keeper_retry_max_backoff_ms=10 VALUES (3); -- { serverError KEEPER_EXCEPTION } + +SET insert_quorum=2; +INSERT INTO keeper_retries_r1 SETTINGS insert_keeper_fault_injection_probability=0 VALUES (11); +INSERT INTO keeper_retries_r1 SETTINGS insert_keeper_fault_injection_probability=1, insert_keeper_max_retries=0 VALUES (12); -- { serverError KEEPER_EXCEPTION } +INSERT INTO keeper_retries_r1 SETTINGS insert_keeper_fault_injection_probability=1, insert_keeper_retry_max_backoff_ms=1 VALUES (13); -- { serverError KEEPER_EXCEPTION } + +-- INSERT INTO keeper_retries_r1 SETTINGS insert_keeper_fault_injection_mode=1, insert_keeper_fault_injection_probability=0.05, insert_keeper_fault_injection_seed=1 VALUES (21); +-- INSERT INTO keeper_retries_r1 SETTINGS insert_keeper_fault_injection_mode=1, insert_keeper_fault_injection_probability=0.2, insert_keeper_max_retries=100, insert_keeper_retry_max_backoff_ms=1, insert_keeper_fault_injection_seed=2 VALUES (22); +-- INSERT INTO keeper_retries_r1 SETTINGS insert_keeper_fault_injection_mode=1, insert_keeper_fault_injection_probability=0.3, insert_keeper_max_retries=100, insert_keeper_retry_max_backoff_ms=1, insert_keeper_fault_injection_seed=3 VALUES (23); +-- INSERT INTO keeper_retries_r1 SETTINGS insert_keeper_fault_injection_mode=1, insert_keeper_fault_injection_probability=0.4, insert_keeper_max_retries=100, insert_keeper_retry_max_backoff_ms=1, insert_keeper_fault_injection_seed=4 VALUES (24); + +SELECT * FROM keeper_retries_r1 order by a; + +DROP TABLE keeper_retries_r1 SYNC; +DROP TABLE keeper_retries_r2 SYNC; diff --git a/parser/testdata/02456_summing_mt_lc/ast.json b/parser/testdata/02456_summing_mt_lc/ast.json new file mode 100644 index 000000000..a877d3649 --- /dev/null +++ b/parser/testdata/02456_summing_mt_lc/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001199916, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02456_summing_mt_lc/metadata.json b/parser/testdata/02456_summing_mt_lc/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02456_summing_mt_lc/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02456_summing_mt_lc/query.sql b/parser/testdata/02456_summing_mt_lc/query.sql new file mode 100644 index 000000000..297eb64c6 --- /dev/null +++ b/parser/testdata/02456_summing_mt_lc/query.sql @@ -0,0 +1,20 @@ +SET allow_suspicious_low_cardinality_types = 1; + +DROP TABLE IF EXISTS t_summing_lc; + +CREATE TABLE t_summing_lc +( + `key` UInt32, + `val` LowCardinality(UInt32), + `date` DateTime +) +ENGINE = SummingMergeTree(val) +PARTITION BY date +ORDER BY key; + +INSERT INTO t_summing_lc VALUES (1, 1, '2020-01-01'), (2, 1, '2020-01-02'), (1, 5, '2020-01-01'), (2, 5, '2020-01-02'); + +OPTIMIZE TABLE t_summing_lc FINAL; +SELECT * FROM t_summing_lc ORDER BY key; + +DROP TABLE t_summing_lc; diff --git a/parser/testdata/02457_datediff_via_unix_epoch/ast.json b/parser/testdata/02457_datediff_via_unix_epoch/ast.json new file mode 100644 index 000000000..98ea453c5 --- /dev/null +++ b/parser/testdata/02457_datediff_via_unix_epoch/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'year'" + }, + { + "explain": " Function dateDiff (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal 'year'" + }, + { + "explain": " Function toDate32 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '1969-12-25'" + }, + { + "explain": " Function toDate32 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '1970-01-05'" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001551315, + "rows_read": 14, + "bytes_read": 529 + } +} diff --git a/parser/testdata/02457_datediff_via_unix_epoch/metadata.json b/parser/testdata/02457_datediff_via_unix_epoch/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02457_datediff_via_unix_epoch/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02457_datediff_via_unix_epoch/query.sql b/parser/testdata/02457_datediff_via_unix_epoch/query.sql new file mode 100644 index 000000000..ce9777129 --- /dev/null +++ b/parser/testdata/02457_datediff_via_unix_epoch/query.sql @@ -0,0 +1,18 @@ +select 'year', date_diff('year', toDate32('1969-12-25'), toDate32('1970-01-05')); +select 'year', date_diff('year', toDateTime64('1969-12-25 10:00:00.000', 3), toDateTime64('1970-01-05 10:00:00.000', 3)); + +select 'quarter', date_diff('quarter', toDate32('1969-12-25'), toDate32('1970-01-05')); +select 'quarter', date_diff('quarter', toDateTime64('1969-12-25 10:00:00.000', 3), toDateTime64('1970-01-05 10:00:00.000', 3)); + +select 'month', date_diff('month', toDate32('1969-12-25'), toDate32('1970-01-05')); +select 'month', date_diff('month', toDateTime64('1969-12-25 10:00:00.000', 3), toDateTime64('1970-01-05 10:00:00.000', 3)); + +select 'week', date_diff('week', toDate32('1969-12-25'), toDate32('1970-01-05')); +select 'week', date_diff('week', toDateTime64('1969-12-25 10:00:00.000', 3), toDateTime64('1970-01-05 10:00:00.000', 3)); + +select 'day', date_diff('day', toDate32('1969-12-25'), toDate32('1970-01-05')); +select 'day', date_diff('day', toDateTime64('1969-12-25 10:00:00.000', 3), toDateTime64('1970-01-05 10:00:00.000', 3)); + +select 'minute', date_diff('minute', toDate32('1969-12-31'), toDate32('1970-01-01')); + +select 'second', date_diff('second', toDate32('1969-12-31'), toDate32('1970-01-01')); diff --git a/parser/testdata/02457_filesystem_function/ast.json b/parser/testdata/02457_filesystem_function/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02457_filesystem_function/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02457_filesystem_function/metadata.json b/parser/testdata/02457_filesystem_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02457_filesystem_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02457_filesystem_function/query.sql b/parser/testdata/02457_filesystem_function/query.sql new file mode 100644 index 000000000..d8322bc65 --- /dev/null +++ b/parser/testdata/02457_filesystem_function/query.sql @@ -0,0 +1,6 @@ +-- Tags: no-fasttest + +select filesystemCapacity('s3_disk') >= filesystemAvailable('s3_disk') and filesystemAvailable('s3_disk') >= filesystemUnreserved('s3_disk'); +select filesystemCapacity('default') >= filesystemAvailable('default') and filesystemAvailable('default') >= 0 and filesystemUnreserved('default') >= 0; + +select filesystemCapacity('__un_exists_disk'); -- { serverError UNKNOWN_DISK } diff --git a/parser/testdata/02457_key_condition_with_types_that_cannot_be_nullable/ast.json b/parser/testdata/02457_key_condition_with_types_that_cannot_be_nullable/ast.json new file mode 100644 index 000000000..567cac5c9 --- /dev/null +++ b/parser/testdata/02457_key_condition_with_types_that_cannot_be_nullable/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001102086, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/02457_key_condition_with_types_that_cannot_be_nullable/metadata.json b/parser/testdata/02457_key_condition_with_types_that_cannot_be_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02457_key_condition_with_types_that_cannot_be_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02457_key_condition_with_types_that_cannot_be_nullable/query.sql b/parser/testdata/02457_key_condition_with_types_that_cannot_be_nullable/query.sql new file mode 100644 index 000000000..690ec6c70 --- /dev/null +++ b/parser/testdata/02457_key_condition_with_types_that_cannot_be_nullable/query.sql @@ -0,0 +1,9 @@ +drop table if exists test; + +create table test (Printer LowCardinality(String), IntervalStart DateTime) engine MergeTree partition by (hiveHash(Printer), toYear(IntervalStart)) order by (Printer, IntervalStart); + +insert into test values ('printer1', '2006-02-07 06:28:15'); + +select Printer from test where Printer='printer1'; + +drop table test; diff --git a/parser/testdata/02457_morton_coding/ast.json b/parser/testdata/02457_morton_coding/ast.json new file mode 100644 index 000000000..fe1172ca4 --- /dev/null +++ b/parser/testdata/02457_morton_coding/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '----- START -----'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001285508, + "rows_read": 5, + "bytes_read": 188 + } +} diff --git a/parser/testdata/02457_morton_coding/metadata.json b/parser/testdata/02457_morton_coding/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02457_morton_coding/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02457_morton_coding/query.sql b/parser/testdata/02457_morton_coding/query.sql new file mode 100644 index 000000000..955cb2e05 --- /dev/null +++ b/parser/testdata/02457_morton_coding/query.sql @@ -0,0 +1,137 @@ +SELECT '----- START -----'; +drop table if exists morton_numbers_02457; +create table morton_numbers_02457( + n1 UInt32, + n2 UInt32, + n3 UInt16, + n4 UInt16, + n5 UInt8, + n6 UInt8, + n7 UInt8, + n8 UInt8 +) + Engine=MergeTree() + ORDER BY n1 SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +SELECT '----- CONST -----'; +select mortonEncode(1,2,3,4); +select mortonDecode(4, 2149); +select mortonEncode(65534, 65533); +select mortonDecode(2, 4294967286); +select mortonEncode(4294967286); +select mortonDecode(1, 4294967286); + +SELECT '----- 256, 8 -----'; +insert into morton_numbers_02457 +select n1.number, n2.number, n3.number, n4.number, n5.number, n6.number, n7.number, n8.number +from numbers(256-4, 4) n1 + cross join numbers(256-4, 4) n2 + cross join numbers(256-4, 4) n3 + cross join numbers(256-4, 4) n4 + cross join numbers(256-4, 4) n5 + cross join numbers(256-4, 4) n6 + cross join numbers(256-4, 4) n7 + cross join numbers(256-4, 4) n8 +; +drop table if exists morton_numbers_1_02457; +create table morton_numbers_1_02457( + n1 UInt64, + n2 UInt64, + n3 UInt64, + n4 UInt64, + n5 UInt64, + n6 UInt64, + n7 UInt64, + n8 UInt64 +) + Engine=MergeTree() + ORDER BY n1 SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +insert into morton_numbers_1_02457 +select untuple(mortonDecode(8, mortonEncode(n1, n2, n3, n4, n5, n6, n7, n8))) +from morton_numbers_02457; + +( + select * from morton_numbers_02457 + union distinct + select * from morton_numbers_1_02457 +) +except +( + select * from morton_numbers_02457 + intersect + select * from morton_numbers_1_02457 +); +drop table if exists morton_numbers_1_02457; + +SELECT '----- 65536, 4 -----'; +insert into morton_numbers_02457 +select n1.number, n2.number, n3.number, n4.number, 0, 0, 0, 0 +from numbers(pow(2, 16)-8,8) n1 + cross join numbers(pow(2, 16)-8, 8) n2 + cross join numbers(pow(2, 16)-8, 8) n3 + cross join numbers(pow(2, 16)-8, 8) n4 +; + +create table morton_numbers_2_02457( + n1 UInt64, + n2 UInt64, + n3 UInt64, + n4 UInt64 +) + Engine=MergeTree() + ORDER BY n1 SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +insert into morton_numbers_2_02457 +select untuple(mortonDecode(4, mortonEncode(n1, n2, n3, n4))) +from morton_numbers_02457; + +( + select n1, n2, n3, n4 from morton_numbers_02457 + union distinct + select n1, n2, n3, n4 from morton_numbers_2_02457 +) +except +( + select n1, n2, n3, n4 from morton_numbers_02457 + intersect + select n1, n2, n3, n4 from morton_numbers_2_02457 +); +drop table if exists morton_numbers_2_02457; + +SELECT '----- 4294967296, 2 -----'; +insert into morton_numbers_02457 +select n1.number, n2.number, 0, 0, 0, 0, 0, 0 +from numbers(pow(2, 32)-8,8) n1 + cross join numbers(pow(2, 32)-8, 8) n2 + cross join numbers(pow(2, 32)-8, 8) n3 + cross join numbers(pow(2, 32)-8, 8) n4 +; + +drop table if exists morton_numbers_3_02457; +create table morton_numbers_3_02457( + n1 UInt64, + n2 UInt64 +) + Engine=MergeTree() + ORDER BY n1 SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +insert into morton_numbers_3_02457 +select untuple(mortonDecode(2, mortonEncode(n1, n2))) +from morton_numbers_02457; + +( + select n1, n2 from morton_numbers_3_02457 + union distinct + select n1, n2 from morton_numbers_3_02457 +) +except +( + select n1, n2 from morton_numbers_3_02457 + intersect + select n1, n2 from morton_numbers_3_02457 +); +drop table if exists morton_numbers_3_02457; + +SELECT '----- END -----'; +drop table if exists morton_numbers_02457; diff --git a/parser/testdata/02457_morton_coding_with_mask/ast.json b/parser/testdata/02457_morton_coding_with_mask/ast.json new file mode 100644 index 000000000..3d5c2d58d --- /dev/null +++ b/parser/testdata/02457_morton_coding_with_mask/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '----- START -----'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.000887614, + "rows_read": 5, + "bytes_read": 188 + } +} diff --git a/parser/testdata/02457_morton_coding_with_mask/metadata.json b/parser/testdata/02457_morton_coding_with_mask/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02457_morton_coding_with_mask/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02457_morton_coding_with_mask/query.sql b/parser/testdata/02457_morton_coding_with_mask/query.sql new file mode 100644 index 000000000..c95205769 --- /dev/null +++ b/parser/testdata/02457_morton_coding_with_mask/query.sql @@ -0,0 +1,143 @@ +SELECT '----- START -----'; + +SELECT '----- CONST -----'; +select mortonEncode((1,2,3,1), 1,2,3,4); +select mortonDecode((1, 2, 3, 1), 4205569); +select mortonEncode((1,1), 65534, 65533); +select mortonDecode((1,1), 4294967286); +select mortonEncode(tuple(1), 4294967286); +select mortonDecode(tuple(1), 4294967286); +select mortonEncode(tuple(4), 128); +select mortonDecode(tuple(4), 2147483648); +select mortonEncode((4,4,4,4), 128, 128, 128, 128); + +SELECT '----- (1,2,1,2) -----'; +drop table if exists morton_numbers_mask_02457; +create table morton_numbers_mask_02457( + n1 UInt8, + n2 UInt8, + n3 UInt8, + n4 UInt8 +) + Engine=MergeTree() + ORDER BY n1 SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +insert into morton_numbers_mask_02457 +select n1.number, n2.number, n3.number, n4.number +from numbers(256-16, 16) n1 + cross join numbers(256-16, 16) n2 + cross join numbers(256-16, 16) n3 + cross join numbers(256-16, 16) n4 +; +drop table if exists morton_numbers_mask_1_02457; +create table morton_numbers_mask_1_02457( + n1 UInt64, + n2 UInt64, + n3 UInt64, + n4 UInt64 +) + Engine=MergeTree() + ORDER BY n1 SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +insert into morton_numbers_mask_1_02457 +select untuple(mortonDecode((1,2,1,2), mortonEncode((1,2,1,2), n1, n2, n3, n4))) +from morton_numbers_mask_02457; + +( + select * from morton_numbers_mask_02457 + union distinct + select * from morton_numbers_mask_1_02457 +) +except +( + select * from morton_numbers_mask_02457 + intersect + select * from morton_numbers_mask_1_02457 +); +drop table if exists morton_numbers_mask_02457; +drop table if exists morton_numbers_mask_1_02457; + +SELECT '----- (1,4) -----'; +drop table if exists morton_numbers_mask_02457; +create table morton_numbers_mask_02457( + n1 UInt32, + n2 UInt8 +) + Engine=MergeTree() + ORDER BY n1 SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +insert into morton_numbers_mask_02457 +select n1.number, n2.number +from numbers(pow(2, 32)-64, 64) n1 + cross join numbers(pow(2, 8)-64, 64) n2 +; +drop table if exists morton_numbers_mask_2_02457; +create table morton_numbers_mask_2_02457( + n1 UInt64, + n2 UInt64 +) + Engine=MergeTree() + ORDER BY n1 SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +insert into morton_numbers_mask_2_02457 +select untuple(mortonDecode((1,4), mortonEncode((1,4), n1, n2))) +from morton_numbers_mask_02457; + +( + select * from morton_numbers_mask_02457 + union distinct + select * from morton_numbers_mask_2_02457 +) +except +( + select * from morton_numbers_mask_02457 + intersect + select * from morton_numbers_mask_2_02457 +); +drop table if exists morton_numbers_mask_02457; +drop table if exists morton_numbers_mask_2_02457; + +SELECT '----- (1,1,2) -----'; +drop table if exists morton_numbers_mask_02457; +create table morton_numbers_mask_02457( + n1 UInt16, + n2 UInt16, + n3 UInt8, +) + Engine=MergeTree() + ORDER BY n1 SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +insert into morton_numbers_mask_02457 +select n1.number, n2.number, n3.number +from numbers(pow(2, 16)-64, 64) n1 + cross join numbers(pow(2, 16)-64, 64) n2 + cross join numbers(pow(2, 8)-64, 64) n3 +; +drop table if exists morton_numbers_mask_3_02457; +create table morton_numbers_mask_3_02457( + n1 UInt64, + n2 UInt64, + n3 UInt64 +) + Engine=MergeTree() + ORDER BY n1 SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +insert into morton_numbers_mask_3_02457 +select untuple(mortonDecode((1,1,2), mortonEncode((1,1,2), n1, n2, n3))) +from morton_numbers_mask_02457; + +( + select * from morton_numbers_mask_02457 + union distinct + select * from morton_numbers_mask_3_02457 +) +except +( + select * from morton_numbers_mask_02457 + intersect + select * from morton_numbers_mask_3_02457 +); +drop table if exists morton_numbers_mask_02457; +drop table if exists morton_numbers_mask_3_02457; + +SELECT '----- END -----'; diff --git a/parser/testdata/02457_parse_date_time_best_effort/ast.json b/parser/testdata/02457_parse_date_time_best_effort/ast.json new file mode 100644 index 000000000..8bc05bd53 --- /dev/null +++ b/parser/testdata/02457_parse_date_time_best_effort/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function parseDateTimeBestEffort (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '01\/12\/2017, 18:31:44'" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001170498, + "rows_read": 7, + "bytes_read": 290 + } +} diff --git a/parser/testdata/02457_parse_date_time_best_effort/metadata.json b/parser/testdata/02457_parse_date_time_best_effort/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02457_parse_date_time_best_effort/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02457_parse_date_time_best_effort/query.sql b/parser/testdata/02457_parse_date_time_best_effort/query.sql new file mode 100644 index 000000000..5eb00049b --- /dev/null +++ b/parser/testdata/02457_parse_date_time_best_effort/query.sql @@ -0,0 +1,16 @@ +select parseDateTimeBestEffort('01/12/2017, 18:31:44'); +select parseDateTimeBestEffortUS('01/12/2017, 18:31:44'); +select parseDateTimeBestEffort('01/12/2017,18:31:44'); +select parseDateTimeBestEffortUS('01/12/2017,18:31:44'); +select parseDateTimeBestEffort('01/12/2017 , 18:31:44'); +select parseDateTimeBestEffortUS('01/12/2017 ,18:31:44'); +select parseDateTimeBestEffortUS('18:31:44, 31/12/2015'); +select parseDateTimeBestEffortUS('18:31:44 , 31/12/2015'); +select parseDateTimeBestEffort('18:31:44, 31/12/2015'); +select parseDateTimeBestEffort('18:31:44 , 31/12/2015'); +select parseDateTimeBestEffort('01/12/2017,'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTimeBestEffortUS('18:31:44,,,, 31/12/2015'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTimeBestEffortUS('18:31:44, 31/12/2015,'); -- { serverError CANNOT_PARSE_TEXT } +select parseDateTimeBestEffort('01/12/2017, 18:31:44,'); -- { serverError CANNOT_PARSE_TEXT } +select parseDateTimeBestEffort('01/12/2017, ,,,18:31:44'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTimeBestEffort('18:31:44 ,,,,, 31/12/2015'); -- { serverError CANNOT_PARSE_DATETIME } diff --git a/parser/testdata/02457_s3_cluster_schema_inference/ast.json b/parser/testdata/02457_s3_cluster_schema_inference/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02457_s3_cluster_schema_inference/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02457_s3_cluster_schema_inference/metadata.json b/parser/testdata/02457_s3_cluster_schema_inference/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02457_s3_cluster_schema_inference/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02457_s3_cluster_schema_inference/query.sql b/parser/testdata/02457_s3_cluster_schema_inference/query.sql new file mode 100644 index 000000000..6182a1a22 --- /dev/null +++ b/parser/testdata/02457_s3_cluster_schema_inference/query.sql @@ -0,0 +1,38 @@ +-- Tags: no-fasttest +-- Tag no-fasttest: Depends on AWS + +desc s3Cluster('test_cluster_one_shard_three_replicas_localhost', 'http://localhost:11111/test/{a,b}.tsv'); +desc s3Cluster('test_cluster_one_shard_three_replicas_localhost', 'http://localhost:11111/test/{a,b}.tsv', 'TSV'); +desc s3Cluster('test_cluster_one_shard_three_replicas_localhost', 'http://localhost:11111/test/{a,b}.tsv', 'test', 'testtest'); +desc s3Cluster('test_cluster_one_shard_three_replicas_localhost', 'http://localhost:11111/test/{a,b}.tsv', 'test', 'testtest', 'TSV'); +desc s3Cluster('test_cluster_one_shard_three_replicas_localhost', 'http://localhost:11111/test/{a,b}.tsv', 'auto'); +desc s3Cluster('test_cluster_one_shard_three_replicas_localhost', 'http://localhost:11111/test/{a,b}.tsv', 'TSV', 'auto'); +desc s3Cluster('test_cluster_one_shard_three_replicas_localhost', 'http://localhost:11111/test/{a,b}.tsv', 'TSV', 'auto', 'auto'); +desc s3Cluster('test_cluster_one_shard_three_replicas_localhost', 'http://localhost:11111/test/{a,b}.tsv', 'test', 'testtest', 'auto'); +desc s3Cluster('test_cluster_one_shard_three_replicas_localhost', 'http://localhost:11111/test/{a,b}.tsv', 'test', 'testtest', 'TSV', 'auto'); +desc s3Cluster('test_cluster_one_shard_three_replicas_localhost', 'http://localhost:11111/test/{a,b}.tsv', 'test', 'testtest', 'TSV', 'auto', 'auto'); +desc s3Cluster('test_cluster_one_shard_three_replicas_localhost', 'http://localhost:11111/test/{a,b}.tsv', NOSIGN); +desc s3Cluster('test_cluster_one_shard_three_replicas_localhost', 'http://localhost:11111/test/{a,b}.tsv', NOSIGN, 'TSV'); +desc s3Cluster('test_cluster_one_shard_three_replicas_localhost', 'http://localhost:11111/test/{a,b}.tsv', NOSIGN, 'TSV', 'auto'); +desc s3Cluster('test_cluster_one_shard_three_replicas_localhost', 'http://localhost:11111/test/{a,b}.tsv', NOSIGN, 'TSV', 'auto', 'auto'); +desc s3Cluster('test_cluster_one_shard_three_replicas_localhost', 'http://localhost:11111/test/{a,b}.tsv', headers(MyCustomHeader = 'SomeValue')); +desc s3Cluster('test_cluster_one_shard_three_replicas_localhost', 'http://localhost:11111/test/{a,b}.tsv', 'TSV', 'auto', headers(MyCustomHeader = 'SomeValue'), 'auto'); + + +select * from s3Cluster('test_cluster_one_shard_three_replicas_localhost', 'http://localhost:11111/test/{a,b}.tsv') order by c1, c2, c3; +select * from s3Cluster('test_cluster_one_shard_three_replicas_localhost', 'http://localhost:11111/test/{a,b}.tsv', 'TSV') order by c1, c2, c3; +select * from s3Cluster('test_cluster_one_shard_three_replicas_localhost', 'http://localhost:11111/test/{a,b}.tsv', 'test', 'testtest') order by c1, c2, c3; +select * from s3Cluster('test_cluster_one_shard_three_replicas_localhost', 'http://localhost:11111/test/{a,b}.tsv', 'test', 'testtest', 'TSV') order by c1, c2, c3; +select * from s3Cluster('test_cluster_one_shard_three_replicas_localhost', 'http://localhost:11111/test/{a,b}.tsv', 'auto') order by c1, c2, c3; +select * from s3Cluster('test_cluster_one_shard_three_replicas_localhost', 'http://localhost:11111/test/{a,b}.tsv', 'TSV', 'auto') order by c1, c2, c3; +select * from s3Cluster('test_cluster_one_shard_three_replicas_localhost', 'http://localhost:11111/test/{a,b}.tsv', 'TSV', 'auto', 'auto') order by c1, c2, c3; +select * from s3Cluster('test_cluster_one_shard_three_replicas_localhost', 'http://localhost:11111/test/{a,b}.tsv', 'test', 'testtest', 'auto') order by c1, c2, c3; +select * from s3Cluster('test_cluster_one_shard_three_replicas_localhost', 'http://localhost:11111/test/{a,b}.tsv', 'test', 'testtest', 'TSV', 'auto') order by c1, c2, c3; +select * from s3Cluster('test_cluster_one_shard_three_replicas_localhost', 'http://localhost:11111/test/{a,b}.tsv', 'test', 'testtest', 'TSV', 'auto', 'auto') order by c1, c2, c3; +select * from s3Cluster('test_cluster_one_shard_three_replicas_localhost', 'http://localhost:11111/test/{a,b}.tsv', NOSIGN) order by c1, c2, c3; +select * from s3Cluster('test_cluster_one_shard_three_replicas_localhost', 'http://localhost:11111/test/{a,b}.tsv', NOSIGN, 'TSV') order by c1, c2, c3; +select * from s3Cluster('test_cluster_one_shard_three_replicas_localhost', 'http://localhost:11111/test/{a,b}.tsv', NOSIGN, 'TSV', 'auto') order by c1, c2, c3; +select * from s3Cluster('test_cluster_one_shard_three_replicas_localhost', 'http://localhost:11111/test/{a,b}.tsv', NOSIGN, 'TSV', 'auto', 'auto') order by c1, c2, c3; +select * from s3Cluster('test_cluster_one_shard_three_replicas_localhost', 'http://localhost:11111/test/{a,b}.tsv', headers(MyCustomHeader = 'SomeValue')) order by c1, c2, c3; +select * from s3Cluster('test_cluster_one_shard_three_replicas_localhost', 'http://localhost:11111/test/{a,b}.tsv', 'TSV', 'auto', headers(MyCustomHeader = 'SomeValue'), 'auto') order by c1, c2, c3; + diff --git a/parser/testdata/02457_tuple_of_intervals/ast.json b/parser/testdata/02457_tuple_of_intervals/ast.json new file mode 100644 index 000000000..d76b516f3 --- /dev/null +++ b/parser/testdata/02457_tuple_of_intervals/ast.json @@ -0,0 +1,79 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Explain EXPLAIN SYNTAX (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Function toIntervalSecond (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Int64_-1" + }, + { + "explain": " Function toIntervalMinute (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function toIntervalMonth (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Int64_-3" + }, + { + "explain": " Function toIntervalYear (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 19, + + "statistics": + { + "elapsed": 0.000980214, + "rows_read": 19, + "bytes_read": 790 + } +} diff --git a/parser/testdata/02457_tuple_of_intervals/metadata.json b/parser/testdata/02457_tuple_of_intervals/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02457_tuple_of_intervals/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02457_tuple_of_intervals/query.sql b/parser/testdata/02457_tuple_of_intervals/query.sql new file mode 100644 index 000000000..69340199d --- /dev/null +++ b/parser/testdata/02457_tuple_of_intervals/query.sql @@ -0,0 +1,75 @@ +EXPLAIN SYNTAX SELECT INTERVAL '-1 SECOND 2 MINUTE -3 MONTH 1 YEAR'; + +SELECT '---'; + +SELECT negate(INTERVAL 1 SECOND); +SELECT addTupleOfIntervals('2022-10-11'::Date, tuple(INTERVAL 1 DAY)); +SELECT subtractTupleOfIntervals('2022-10-11'::Date, tuple(INTERVAL 1 DAY)); +SELECT addInterval(tuple(INTERVAL 1 SECOND), INTERVAL 1 SECOND); +SELECT subtractInterval(tuple(INTERVAL 1 SECOND), INTERVAL 1 SECOND); + +SELECT addTupleOfIntervals('2022-10-11'::Date, (INTERVAL 1 DAY, INTERVAL 1 MONTH)); +SELECT subtractTupleOfIntervals('2022-10-11'::Date, (INTERVAL 1 DAY, INTERVAL 1 MONTH)); +SELECT addInterval((INTERVAL 1 DAY, INTERVAL 1 SECOND), INTERVAL 1 SECOND); +SELECT subtractInterval(tuple(INTERVAL 1 DAY, INTERVAL 1 SECOND), INTERVAL 1 SECOND); +SELECT '---'; + +SELECT addInterval((), INTERVAL 1 MONTH); +SELECT subtractInterval(tuple(), INTERVAL 1 SECOND); + +SELECT '---'; + +SELECT '2022-10-11'::Date + tuple(INTERVAL 1 DAY); +SELECT '2022-10-11'::Date - tuple(INTERVAL 1 DAY); +SELECT tuple(INTERVAL 1 DAY) + '2022-10-11'::Date; +SELECT tuple(INTERVAL 1 DAY) - '2022-10-11'::Date; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +WITH tuple(INTERVAL 1 SECOND) + INTERVAL 1 SECOND as expr SELECT expr, toTypeName(expr); +WITH tuple(INTERVAL 1 SECOND) - INTERVAL 1 SECOND as expr SELECT expr, toTypeName(expr); +WITH INTERVAL 1 SECOND + tuple(INTERVAL 1 SECOND) as expr SELECT expr, toTypeName(expr); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +WITH INTERVAL 1 SECOND - tuple(INTERVAL 1 SECOND) as expr SELECT expr, toTypeName(expr); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT '---'; + +WITH INTERVAL 1 SECOND + INTERVAL 1 SECOND + INTERVAL 1 SECOND as expr SELECT expr, toTypeName(expr); +WITH INTERVAL 1 HOUR + INTERVAL 1 SECOND + INTERVAL 1 SECOND as expr SELECT expr, toTypeName(expr); +WITH INTERVAL 1 SECOND + INTERVAL 1 HOUR + INTERVAL 1 SECOND as expr SELECT expr, toTypeName(expr); +WITH INTERVAL 1 SECOND + INTERVAL 1 SECOND + INTERVAL 1 HOUR as expr SELECT expr, toTypeName(expr); + +WITH - INTERVAL 1 SECOND - INTERVAL 1 SECOND - INTERVAL 1 SECOND as expr SELECT expr, toTypeName(expr); +WITH - INTERVAL 1 HOUR - INTERVAL 1 SECOND - INTERVAL 1 SECOND as expr SELECT expr, toTypeName(expr); +WITH - INTERVAL 1 SECOND - INTERVAL 1 HOUR - INTERVAL 1 SECOND as expr SELECT expr, toTypeName(expr); +WITH - INTERVAL 1 SECOND - INTERVAL 1 SECOND - INTERVAL 1 HOUR as expr SELECT expr, toTypeName(expr); + +SELECT '---'; + +WITH '2022-01-30'::Date + INTERVAL 1 MONTH + INTERVAL 1 DAY AS e1, + '2022-01-30'::Date + (INTERVAL 1 MONTH + INTERVAL 1 DAY) AS e2, + '2022-01-30'::Date + (INTERVAL 1 MONTH, INTERVAL 1 DAY) AS e3, + '2022-01-30'::Date + INTERVAL '1 MONTH 1 DAY' AS e4 +SELECT e1 == e2 AND e2 == e3 AND e3 == e4, e1; + +WITH '2022-01-30'::Date + INTERVAL 1 DAY + INTERVAL 1 MONTH AS e1, + '2022-01-30'::Date + (INTERVAL 1 DAY + INTERVAL 1 MONTH) AS e2, + '2022-01-30'::Date + (INTERVAL 1 DAY, INTERVAL 1 MONTH) AS e3, + '2022-01-30'::Date + INTERVAL '1 DAY 1 MONTH' AS e4 +SELECT e1 == e2 AND e2 == e3 AND e3 == e4, e1; + +WITH '2022-10-11'::Date + INTERVAL -1 SECOND + INTERVAL 2 MINUTE + INTERVAL -3 MONTH + INTERVAL 1 YEAR AS e1, + '2022-10-11'::Date + (INTERVAL -1 SECOND + INTERVAL 2 MINUTE + INTERVAL -3 MONTH + INTERVAL 1 YEAR) AS e2, + '2022-10-11'::Date + (INTERVAL -1 SECOND, INTERVAL 2 MINUTE, INTERVAL -3 MONTH, INTERVAL 1 YEAR) AS e3, + '2022-10-11'::Date + INTERVAL '-1 SECOND 2 MINUTE -3 MONTH 1 YEAR' AS e4 +SELECT e1 == e2 AND e2 == e3 AND e3 == e4, e1; + +WITH '2022-10-11'::DateTime - INTERVAL 1 QUARTER - INTERVAL -3 WEEK - INTERVAL 1 YEAR - INTERVAL 1 HOUR AS e1, + '2022-10-11'::DateTime + (- INTERVAL 1 QUARTER - INTERVAL -3 WEEK - INTERVAL 1 YEAR - INTERVAL 1 HOUR) AS e2, + '2022-10-11'::DateTime - (INTERVAL 1 QUARTER, INTERVAL -3 WEEK, INTERVAL 1 YEAR, INTERVAL 1 HOUR) AS e3, + '2022-10-11'::DateTime - INTERVAL '1 QUARTER -3 WEEK 1 YEAR 1 HOUR' AS e4 +SELECT e1 == e2 AND e2 == e3 AND e3 == e4, e1; + + +WITH '2022-10-11'::DateTime64 - INTERVAL 1 YEAR - INTERVAL 4 MONTH - INTERVAL 1 SECOND AS e1, + '2022-10-11'::DateTime64 + (- INTERVAL 1 YEAR - INTERVAL 4 MONTH - INTERVAL 1 SECOND) AS e2, + '2022-10-11'::DateTime64 - (INTERVAL 1 YEAR, INTERVAL 4 MONTH, INTERVAL 1 SECOND) AS e3, + '2022-10-11'::DateTime64 - INTERVAL '1 YEAR 4 MONTH 1 SECOND' AS e4 +SELECT e1 == e2 AND e2 == e3 AND e3 == e4, e1; diff --git a/parser/testdata/02458_datediff_date32/ast.json b/parser/testdata/02458_datediff_date32/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02458_datediff_date32/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02458_datediff_date32/metadata.json b/parser/testdata/02458_datediff_date32/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02458_datediff_date32/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02458_datediff_date32/query.sql b/parser/testdata/02458_datediff_date32/query.sql new file mode 100644 index 000000000..e41070e81 --- /dev/null +++ b/parser/testdata/02458_datediff_date32/query.sql @@ -0,0 +1,101 @@ +-- { echo } + +-- Date32 vs Date32 +SELECT dateDiff('second', toDate32('1927-01-01', 'UTC'), toDate32('1927-01-02', 'UTC'), 'UTC'); +SELECT dateDiff('minute', toDate32('1927-01-01', 'UTC'), toDate32('1927-01-02', 'UTC'), 'UTC'); +SELECT dateDiff('hour', toDate32('1927-01-01', 'UTC'), toDate32('1927-01-02', 'UTC'), 'UTC'); +SELECT dateDiff('day', toDate32('1927-01-01', 'UTC'), toDate32('1927-01-02', 'UTC'), 'UTC'); +SELECT dateDiff('week', toDate32('1927-01-01', 'UTC'), toDate32('1927-01-08', 'UTC'), 'UTC'); +SELECT dateDiff('month', toDate32('1927-01-01', 'UTC'), toDate32('1927-02-01', 'UTC'), 'UTC'); +SELECT dateDiff('quarter', toDate32('1927-01-01', 'UTC'), toDate32('1927-04-01', 'UTC'), 'UTC'); +SELECT dateDiff('year', toDate32('1927-01-01', 'UTC'), toDate32('1928-01-01', 'UTC'), 'UTC'); + +-- With DateTime64 +-- Date32 vs DateTime64 +SELECT dateDiff('second', toDate32('1927-01-01', 'UTC'), toDateTime64('1927-01-02 00:00:00', 3, 'UTC'), 'UTC'); +SELECT dateDiff('minute', toDate32('1927-01-01', 'UTC'), toDateTime64('1927-01-02 00:00:00', 3, 'UTC'), 'UTC'); +SELECT dateDiff('hour', toDate32('1927-01-01', 'UTC'), toDateTime64('1927-01-02 00:00:00', 3, 'UTC'), 'UTC'); +SELECT dateDiff('day', toDate32('1927-01-01', 'UTC'), toDateTime64('1927-01-02 00:00:00', 3, 'UTC'), 'UTC'); +SELECT dateDiff('week', toDate32('1927-01-01', 'UTC'), toDateTime64('1927-01-08 00:00:00', 3, 'UTC'), 'UTC'); +SELECT dateDiff('month', toDate32('1927-01-01', 'UTC'), toDateTime64('1927-02-01 00:00:00', 3, 'UTC'), 'UTC'); +SELECT dateDiff('quarter', toDate32('1927-01-01', 'UTC'), toDateTime64('1927-04-01 00:00:00', 3, 'UTC'), 'UTC'); +SELECT dateDiff('year', toDate32('1927-01-01', 'UTC'), toDateTime64('1928-01-01 00:00:00', 3, 'UTC'), 'UTC'); + +-- DateTime64 vs Date32 +SELECT dateDiff('second', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), toDate32('1927-01-02', 'UTC'), 'UTC'); +SELECT dateDiff('minute', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), toDate32('1927-01-02', 'UTC'), 'UTC'); +SELECT dateDiff('hour', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), toDate32('1927-01-02', 'UTC'), 'UTC'); +SELECT dateDiff('day', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), toDate32('1927-01-02', 'UTC'), 'UTC'); +SELECT dateDiff('week', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), toDate32('1927-01-08', 'UTC'), 'UTC'); +SELECT dateDiff('month', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), toDate32('1927-02-01', 'UTC'), 'UTC'); +SELECT dateDiff('quarter', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), toDate32('1927-04-01', 'UTC'), 'UTC'); +SELECT dateDiff('year', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), toDate32('1928-01-01', 'UTC'), 'UTC'); + +-- With DateTime +-- Date32 vs DateTime +SELECT dateDiff('second', toDate32('2015-08-18', 'UTC'), toDateTime('2015-08-19 00:00:00', 'UTC'), 'UTC'); +SELECT dateDiff('minute', toDate32('2015-08-18', 'UTC'), toDateTime('2015-08-19 00:00:00', 'UTC'), 'UTC'); +SELECT dateDiff('hour', toDate32('2015-08-18', 'UTC'), toDateTime('2015-08-19 00:00:00', 'UTC'), 'UTC'); +SELECT dateDiff('day', toDate32('2015-08-18', 'UTC'), toDateTime('2015-08-19 00:00:00', 'UTC'), 'UTC'); +SELECT dateDiff('week', toDate32('2015-08-18', 'UTC'), toDateTime('2015-08-25 00:00:00', 'UTC'), 'UTC'); +SELECT dateDiff('month', toDate32('2015-08-18', 'UTC'), toDateTime('2015-09-18 00:00:00', 'UTC'), 'UTC'); +SELECT dateDiff('quarter', toDate32('2015-08-18', 'UTC'), toDateTime('2015-11-18 00:00:00', 'UTC'), 'UTC'); +SELECT dateDiff('year', toDate32('2015-08-18', 'UTC'), toDateTime('2016-08-18 00:00:00', 'UTC'), 'UTC'); + +-- DateTime vs Date32 +SELECT dateDiff('second', toDateTime('2015-08-18 00:00:00', 'UTC'), toDate32('2015-08-19', 'UTC'), 'UTC'); +SELECT dateDiff('minute', toDateTime('2015-08-18 00:00:00', 'UTC'), toDate32('2015-08-19', 'UTC'), 'UTC'); +SELECT dateDiff('hour', toDateTime('2015-08-18 00:00:00', 'UTC'), toDate32('2015-08-19', 'UTC'), 'UTC'); +SELECT dateDiff('day', toDateTime('2015-08-18 00:00:00', 'UTC'), toDate32('2015-08-19', 'UTC'), 'UTC'); +SELECT dateDiff('week', toDateTime('2015-08-18 00:00:00', 'UTC'), toDate32('2015-08-25', 'UTC'), 'UTC'); +SELECT dateDiff('month', toDateTime('2015-08-18 00:00:00', 'UTC'), toDate32('2015-09-18', 'UTC'), 'UTC'); +SELECT dateDiff('quarter', toDateTime('2015-08-18 00:00:00', 'UTC'), toDate32('2015-11-18', 'UTC'), 'UTC'); +SELECT dateDiff('year', toDateTime('2015-08-18 00:00:00', 'UTC'), toDate32('2016-08-18', 'UTC'), 'UTC'); + +-- With Date +-- Date32 vs Date +SELECT dateDiff('second', toDate32('2015-08-18', 'UTC'), toDate('2015-08-19', 'UTC'), 'UTC'); +SELECT dateDiff('minute', toDate32('2015-08-18', 'UTC'), toDate('2015-08-19', 'UTC'), 'UTC'); +SELECT dateDiff('hour', toDate32('2015-08-18', 'UTC'), toDate('2015-08-19', 'UTC'), 'UTC'); +SELECT dateDiff('day', toDate32('2015-08-18', 'UTC'), toDate('2015-08-19', 'UTC'), 'UTC'); +SELECT dateDiff('week', toDate32('2015-08-18', 'UTC'), toDate('2015-08-25', 'UTC'), 'UTC'); +SELECT dateDiff('month', toDate32('2015-08-18', 'UTC'), toDate('2015-09-18', 'UTC'), 'UTC'); +SELECT dateDiff('quarter', toDate32('2015-08-18', 'UTC'), toDate('2015-11-18', 'UTC'), 'UTC'); +SELECT dateDiff('year', toDate32('2015-08-18', 'UTC'), toDate('2016-08-18', 'UTC'), 'UTC'); + +-- Date vs Date32 +SELECT dateDiff('second', toDate('2015-08-18', 'UTC'), toDate32('2015-08-19', 'UTC'), 'UTC'); +SELECT dateDiff('minute', toDate('2015-08-18', 'UTC'), toDate32('2015-08-19', 'UTC'), 'UTC'); +SELECT dateDiff('hour', toDate('2015-08-18', 'UTC'), toDate32('2015-08-19', 'UTC'), 'UTC'); +SELECT dateDiff('day', toDate('2015-08-18', 'UTC'), toDate32('2015-08-19', 'UTC'), 'UTC'); +SELECT dateDiff('week', toDate('2015-08-18', 'UTC'), toDate32('2015-08-25', 'UTC'), 'UTC'); +SELECT dateDiff('month', toDate('2015-08-18', 'UTC'), toDate32('2015-09-18', 'UTC'), 'UTC'); +SELECT dateDiff('quarter', toDate('2015-08-18', 'UTC'), toDate32('2015-11-18', 'UTC'), 'UTC'); +SELECT dateDiff('year', toDate('2015-08-18', 'UTC'), toDate32('2016-08-18', 'UTC'), 'UTC'); + +-- Const vs non-const columns +SELECT dateDiff('day', toDate32('1927-01-01', 'UTC'), materialize(toDate32('1927-01-02', 'UTC')), 'UTC'); +SELECT dateDiff('day', toDate32('1927-01-01', 'UTC'), materialize(toDateTime64('1927-01-02 00:00:00', 3, 'UTC')), 'UTC'); +SELECT dateDiff('day', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), materialize(toDate32('1927-01-02', 'UTC')), 'UTC'); +SELECT dateDiff('day', toDate32('2015-08-18', 'UTC'), materialize(toDateTime('2015-08-19 00:00:00', 'UTC')), 'UTC'); +SELECT dateDiff('day', toDateTime('2015-08-18 00:00:00', 'UTC'), materialize(toDate32('2015-08-19', 'UTC')), 'UTC'); +SELECT dateDiff('day', toDate32('2015-08-18', 'UTC'), materialize(toDate('2015-08-19', 'UTC')), 'UTC'); +SELECT dateDiff('day', toDate('2015-08-18', 'UTC'), materialize(toDate32('2015-08-19', 'UTC')), 'UTC'); + +-- Non-const vs const columns +SELECT dateDiff('day', materialize(toDate32('1927-01-01', 'UTC')), toDate32('1927-01-02', 'UTC'), 'UTC'); +SELECT dateDiff('day', materialize(toDate32('1927-01-01', 'UTC')), toDateTime64('1927-01-02 00:00:00', 3, 'UTC'), 'UTC'); +SELECT dateDiff('day', materialize(toDateTime64('1927-01-01 00:00:00', 3, 'UTC')), toDate32('1927-01-02', 'UTC'), 'UTC'); +SELECT dateDiff('day', materialize(toDate32('2015-08-18', 'UTC')), toDateTime('2015-08-19 00:00:00', 'UTC'), 'UTC'); +SELECT dateDiff('day', materialize(toDateTime('2015-08-18 00:00:00', 'UTC')), toDate32('2015-08-19', 'UTC'), 'UTC'); +SELECT dateDiff('day', materialize(toDate32('2015-08-18', 'UTC')), toDate('2015-08-19', 'UTC'), 'UTC'); +SELECT dateDiff('day', materialize(toDate('2015-08-18', 'UTC')), toDate32('2015-08-19', 'UTC'), 'UTC'); + +-- Non-const vs non-const columns +SELECT dateDiff('day', materialize(toDate32('1927-01-01', 'UTC')), materialize(toDate32('1927-01-02', 'UTC')), 'UTC'); +SELECT dateDiff('day', materialize(toDate32('1927-01-01', 'UTC')), materialize(toDateTime64('1927-01-02 00:00:00', 3, 'UTC')), 'UTC'); +SELECT dateDiff('day', materialize(toDateTime64('1927-01-01 00:00:00', 3, 'UTC')), materialize(toDate32('1927-01-02', 'UTC')), 'UTC'); +SELECT dateDiff('day', materialize(toDate32('2015-08-18', 'UTC')), materialize(toDateTime('2015-08-19 00:00:00', 'UTC')), 'UTC'); +SELECT dateDiff('day', materialize(toDateTime('2015-08-18 00:00:00', 'UTC')), materialize(toDate32('2015-08-19', 'UTC')), 'UTC'); +SELECT dateDiff('day', materialize(toDate32('2015-08-18', 'UTC')), materialize(toDate('2015-08-19', 'UTC')), 'UTC'); +SELECT dateDiff('day', materialize(toDate('2015-08-18', 'UTC')), materialize(toDate32('2015-08-19', 'UTC')), 'UTC'); diff --git a/parser/testdata/02458_default_setting/ast.json b/parser/testdata/02458_default_setting/ast.json new file mode 100644 index 000000000..991f3ce02 --- /dev/null +++ b/parser/testdata/02458_default_setting/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier value" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.settings" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier name" + }, + { + "explain": " Literal 'max_insert_block_size'" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001158427, + "rows_read": 13, + "bytes_read": 507 + } +} diff --git a/parser/testdata/02458_default_setting/metadata.json b/parser/testdata/02458_default_setting/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02458_default_setting/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02458_default_setting/query.sql b/parser/testdata/02458_default_setting/query.sql new file mode 100644 index 000000000..96c27488c --- /dev/null +++ b/parser/testdata/02458_default_setting/query.sql @@ -0,0 +1,7 @@ +SELECT value FROM system.settings where name='max_insert_block_size'; +SET max_insert_block_size=100000; +SELECT value FROM system.settings where name='max_insert_block_size'; +SELECT changed FROM system.settings where name='max_insert_block_size'; +SET max_insert_block_size=DEFAULT; +SELECT value FROM system.settings where name='max_insert_block_size'; +SELECT changed FROM system.settings where name='max_insert_block_size'; diff --git a/parser/testdata/02458_key_condition_not_like_prefix/ast.json b/parser/testdata/02458_key_condition_not_like_prefix/ast.json new file mode 100644 index 000000000..9266f5004 --- /dev/null +++ b/parser/testdata/02458_key_condition_not_like_prefix/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery data (children 3)" + }, + { + "explain": " Identifier data" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration str (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Identifier str" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001163386, + "rows_read": 9, + "bytes_read": 302 + } +} diff --git a/parser/testdata/02458_key_condition_not_like_prefix/metadata.json b/parser/testdata/02458_key_condition_not_like_prefix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02458_key_condition_not_like_prefix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02458_key_condition_not_like_prefix/query.sql b/parser/testdata/02458_key_condition_not_like_prefix/query.sql new file mode 100644 index 000000000..a6f0b9133 --- /dev/null +++ b/parser/testdata/02458_key_condition_not_like_prefix/query.sql @@ -0,0 +1,12 @@ +CREATE TABLE data (str String) ENGINE=MergeTree ORDER BY str; +INSERT INTO data (str) SELECT 'aa' FROM numbers(100000); +INSERT INTO data (str) SELECT 'ba' FROM numbers(100000); +INSERT INTO data (str) SELECT 'ca' FROM numbers(100000); +SELECT count() FROM data WHERE str NOT LIKE 'a%' SETTINGS force_primary_key=1; +SELECT count() FROM data WHERE str NOT LIKE 'a%%' SETTINGS force_primary_key=1; +SELECT count() FROM data WHERE str NOT LIKE 'a' SETTINGS force_primary_key=1; -- { serverError INDEX_NOT_USED } +SELECT count() FROM data WHERE str NOT LIKE '%a' SETTINGS force_primary_key=1; -- { serverError INDEX_NOT_USED } +SELECT count() FROM data WHERE str NOT LIKE 'a_' SETTINGS force_primary_key=1; -- { serverError INDEX_NOT_USED } +SELECT count() FROM data WHERE str NOT LIKE 'a%_' SETTINGS force_primary_key=1; -- { serverError INDEX_NOT_USED } +SELECT count() FROM data WHERE str NOT LIKE '_a' SETTINGS force_primary_key=1; -- { serverError INDEX_NOT_USED } +SELECT count() FROM data WHERE str NOT LIKE 'a%\_' SETTINGS force_primary_key=1; -- { serverError INDEX_NOT_USED } diff --git a/parser/testdata/02458_relax_too_many_parts/ast.json b/parser/testdata/02458_relax_too_many_parts/ast.json new file mode 100644 index 000000000..5c215585e --- /dev/null +++ b/parser/testdata/02458_relax_too_many_parts/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001239828, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/02458_relax_too_many_parts/metadata.json b/parser/testdata/02458_relax_too_many_parts/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02458_relax_too_many_parts/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02458_relax_too_many_parts/query.sql b/parser/testdata/02458_relax_too_many_parts/query.sql new file mode 100644 index 000000000..01f9edce6 --- /dev/null +++ b/parser/testdata/02458_relax_too_many_parts/query.sql @@ -0,0 +1,36 @@ +DROP TABLE IF EXISTS test; +CREATE TABLE test (x UInt64, s String) ENGINE = MergeTree ORDER BY tuple() SETTINGS parts_to_throw_insert = 3, max_parts_to_merge_at_once = 1; + +-- The "too many parts" threshold works: +SET max_block_size = 1, min_insert_block_size_rows = 1, min_insert_block_size_bytes = 1; +SYSTEM STOP MERGES test; +INSERT INTO test VALUES (1, 'a'); +INSERT INTO test VALUES (2, 'a'); +INSERT INTO test VALUES (3, 'a'); +INSERT INTO test VALUES (4, 'a'); -- { serverError TOO_MANY_PARTS } + +-- But it can be relaxed with a setting: +ALTER TABLE test MODIFY SETTING max_avg_part_size_for_too_many_parts = '1M'; + +-- It works in the same way if parts are small: +SYSTEM START MERGES test; +OPTIMIZE TABLE test FINAL SETTINGS optimize_throw_if_noop=1; +SYSTEM STOP MERGES test; + +INSERT INTO test VALUES (5, 'a'); +INSERT INTO test VALUES (6, 'a'); +INSERT INTO test VALUES (7, 'a'); -- { serverError TOO_MANY_PARTS } + +-- But it allows having more parts if their average size is large: +SYSTEM START MERGES test; +OPTIMIZE TABLE test FINAL SETTINGS optimize_throw_if_noop=1; +SYSTEM STOP MERGES test; + +SET max_block_size = 65000, min_insert_block_size_rows = 65000, min_insert_block_size_bytes = '1M'; +INSERT INTO test SELECT number, randomString(1000) FROM numbers(0, 10000); +INSERT INTO test SELECT number, randomString(1000) FROM numbers(10000, 10000); +INSERT INTO test SELECT number, randomString(1000) FROM numbers(20000, 10000); + +SELECT count(), round(avg(bytes), -6) FROM system.parts WHERE database = currentDatabase() AND table = 'test' AND active; + +DROP TABLE test; diff --git a/parser/testdata/02458_use_structure_from_insertion_table/ast.json b/parser/testdata/02458_use_structure_from_insertion_table/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02458_use_structure_from_insertion_table/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02458_use_structure_from_insertion_table/metadata.json b/parser/testdata/02458_use_structure_from_insertion_table/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02458_use_structure_from_insertion_table/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02458_use_structure_from_insertion_table/query.sql b/parser/testdata/02458_use_structure_from_insertion_table/query.sql new file mode 100644 index 000000000..845f895b7 --- /dev/null +++ b/parser/testdata/02458_use_structure_from_insertion_table/query.sql @@ -0,0 +1,41 @@ +-- Tags: no-parallel, no-fasttest + +insert into function file(02458_data.jsonl) select NULL as x, 42 as y settings engine_file_truncate_on_insert=1; +insert into function file(02458_data.jsoncompacteachrow) select NULL as x, 42 as y settings engine_file_truncate_on_insert=1; +drop table if exists test; +create table test (x Nullable(UInt32), y UInt32) engine=Memory(); + +set use_structure_from_insertion_table_in_table_functions=2; +set input_format_json_infer_incomplete_types_as_strings=0; + +insert into test select * from file(02458_data.jsonl); +insert into test select x, 1 from file(02458_data.jsonl); +insert into test select x, y from file(02458_data.jsonl); +insert into test select x + 1, y from file(02458_data.jsonl); -- {serverError CANNOT_EXTRACT_TABLE_STRUCTURE} +insert into test select x, z from file(02458_data.jsonl); +insert into test select * from file(02458_data.jsoncompacteachrow); +insert into test select x, 1 from file(02458_data.jsoncompacteachrow); -- {serverError CANNOT_EXTRACT_TABLE_STRUCTURE} +insert into test select x, y from file(02458_data.jsoncompacteachrow); -- {serverError CANNOT_EXTRACT_TABLE_STRUCTURE} +insert into test select x + 1, y from file(02458_data.jsoncompacteachrow); -- {serverError CANNOT_EXTRACT_TABLE_STRUCTURE} +insert into test select x, z from file(02458_data.jsoncompacteachrow); -- {serverError CANNOT_EXTRACT_TABLE_STRUCTURE} +insert into test select * from input() format CSV 1,2 + +insert into test select x, y from input() format CSV 1,2 -- {serverError CANNOT_EXTRACT_TABLE_STRUCTURE} + +insert into test select x, y from input() format JSONEachRow {"x" : null, "y" : 42}; + +select * from test order by y; + +drop table test; +create table test (x Nullable(UInt32)) engine=Memory(); +insert into test select * from file(02458_data.jsonl); +insert into test select x from file(02458_data.jsonl); +insert into test select y from file(02458_data.jsonl); +insert into test select y as x from file(02458_data.jsonl); +insert into test select c1 from input() format CSV 1,2; -- {serverError CANNOT_EXTRACT_TABLE_STRUCTURE} + +insert into test select x from input() format JSONEachRow {"x" : null, "y" : 42}; + +select * from test order by x; + +drop table test; diff --git a/parser/testdata/02459_group_by_all/ast.json b/parser/testdata/02459_group_by_all/ast.json new file mode 100644 index 000000000..2c4a6db22 --- /dev/null +++ b/parser/testdata/02459_group_by_all/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery group_by_all (children 1)" + }, + { + "explain": " Identifier group_by_all" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001294131, + "rows_read": 2, + "bytes_read": 76 + } +} diff --git a/parser/testdata/02459_group_by_all/metadata.json b/parser/testdata/02459_group_by_all/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02459_group_by_all/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02459_group_by_all/query.sql b/parser/testdata/02459_group_by_all/query.sql new file mode 100644 index 000000000..8281c201e --- /dev/null +++ b/parser/testdata/02459_group_by_all/query.sql @@ -0,0 +1,35 @@ +DROP TABLE IF EXISTS group_by_all; + +CREATE TABLE group_by_all +( + a String, + b int, + c int +) +engine = Memory; + +insert into group_by_all values ('abc1', 1, 1), ('abc2', 1, 1), ('abc3', 1, 1), ('abc4', 1, 1); + +select a, count(b) from group_by_all group by all order by a; +select substring(a, 1, 3), count(b) from group_by_all group by all; +select substring(a, 1, 3), substring(substring(a, 1, 2), 1, count(b)) from group_by_all group by all; +select substring(a, 1, 3), substring(substring(a, 1, 2), c, count(b)) from group_by_all group by all; +select substring(a, 1, 3), substring(substring(a, c, 2), c, count(b)) from group_by_all group by all; +select substring(a, 1, 3), substring(substring(a, c + 1, 2), 1, count(b)) from group_by_all group by all; +select substring(a, 1, 3), substring(substring(a, c + 1, 2), c, count(b)) from group_by_all group by all; +select substring(a, 1, 3), substring(substring(substring(a, c, count(b)), 1, count(b)), 1, count(b)) from group_by_all group by all; +select substring(a, 1, 3), substring(a, 1, count(b)) from group_by_all group by all; +select count(b) AS len, substring(a, 1, 3), substring(a, 1, len) from group_by_all group by all; + +SET enable_analyzer = 1; + +select a, count(b) from group_by_all group by all order by a; +select substring(a, 1, 3), count(b) from group_by_all group by all; +select substring(a, 1, 3), substring(substring(a, 1, 2), 1, count(b)) from group_by_all group by all; +select substring(a, 1, 3), substring(substring(a, 1, 2), c, count(b)) from group_by_all group by all; +select substring(a, 1, 3), substring(substring(a, c, 2), c, count(b)) from group_by_all group by all; +select substring(a, 1, 3), substring(substring(a, c + 1, 2), 1, count(b)) from group_by_all group by all; +select substring(a, 1, 3), substring(substring(a, c + 1, 2), c, count(b)) from group_by_all group by all; +select substring(a, 1, 3), substring(substring(substring(a, c, count(b)), 1, count(b)), 1, count(b)) from group_by_all group by all; +select substring(a, 1, 3), substring(a, 1, count(b)) from group_by_all group by all; +select count(b) AS len, substring(a, 1, 3), substring(a, 1, len) from group_by_all group by all; diff --git a/parser/testdata/02459_low_cardinality_uint128_aggregator/ast.json b/parser/testdata/02459_low_cardinality_uint128_aggregator/ast.json new file mode 100644 index 000000000..e51b3fcc0 --- /dev/null +++ b/parser/testdata/02459_low_cardinality_uint128_aggregator/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001023965, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02459_low_cardinality_uint128_aggregator/metadata.json b/parser/testdata/02459_low_cardinality_uint128_aggregator/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02459_low_cardinality_uint128_aggregator/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02459_low_cardinality_uint128_aggregator/query.sql b/parser/testdata/02459_low_cardinality_uint128_aggregator/query.sql new file mode 100644 index 000000000..893e5514b --- /dev/null +++ b/parser/testdata/02459_low_cardinality_uint128_aggregator/query.sql @@ -0,0 +1,9 @@ +SET allow_suspicious_low_cardinality_types = 1; +-- LC UInt128 +CREATE TABLE group_by_pk_lc_uint128 (`k` LowCardinality(UInt128), `v` UInt32) ENGINE = MergeTree ORDER BY k PARTITION BY v%50; +INSERT INTO group_by_pk_lc_uint128 SELECT number / 100, number FROM numbers(1000); +SELECT k, sum(v) AS s FROM group_by_pk_lc_uint128 GROUP BY k ORDER BY k ASC LIMIT 1024 SETTINGS optimize_aggregation_in_order = 1; +-- LC UInt256 +CREATE TABLE group_by_pk_lc_uint256 (`k` LowCardinality(UInt256), `v` UInt32) ENGINE = MergeTree ORDER BY k PARTITION BY v%50; +INSERT INTO group_by_pk_lc_uint256 SELECT number / 100, number FROM numbers(1000); +SELECT k, sum(v) AS s FROM group_by_pk_lc_uint256 GROUP BY k ORDER BY k ASC LIMIT 1024 SETTINGS optimize_aggregation_in_order = 1; diff --git a/parser/testdata/02459_materialized_view_default_value/ast.json b/parser/testdata/02459_materialized_view_default_value/ast.json new file mode 100644 index 000000000..c12321f30 --- /dev/null +++ b/parser/testdata/02459_materialized_view_default_value/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery session (children 1)" + }, + { + "explain": " Identifier session" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001463807, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/02459_materialized_view_default_value/metadata.json b/parser/testdata/02459_materialized_view_default_value/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02459_materialized_view_default_value/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02459_materialized_view_default_value/query.sql b/parser/testdata/02459_materialized_view_default_value/query.sql new file mode 100644 index 000000000..16a814233 --- /dev/null +++ b/parser/testdata/02459_materialized_view_default_value/query.sql @@ -0,0 +1,36 @@ +DROP TABLE IF EXISTS session; +DROP TABLE IF EXISTS queue; +DROP TABLE IF EXISTS forward; + +CREATE TABLE session +( + `day` Date, + `uid` String, + `dummy` String DEFAULT '' +) +ENGINE = MergeTree +ORDER BY (day, uid); + +CREATE TABLE queue +( + `day` Date, + `uid` String +) +ENGINE = MergeTree +ORDER BY (day, uid); + +CREATE MATERIALIZED VIEW IF NOT EXISTS forward TO session AS +SELECT + day, + uid +FROM queue; + +insert into queue values ('2019-05-01', 'test'); + +SELECT * FROM queue; +SELECT * FROM session; +SELECT * FROM forward; + +DROP TABLE session; +DROP TABLE queue; +DROP TABLE forward; diff --git a/parser/testdata/02459_read_in_order_bufer/ast.json b/parser/testdata/02459_read_in_order_bufer/ast.json new file mode 100644 index 000000000..e817e3144 --- /dev/null +++ b/parser/testdata/02459_read_in_order_bufer/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery mytable_stored (children 3)" + }, + { + "explain": " Identifier mytable_stored" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration a (children 1)" + }, + { + "explain": " DataType UInt8" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Identifier a" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001393913, + "rows_read": 9, + "bytes_read": 317 + } +} diff --git a/parser/testdata/02459_read_in_order_bufer/metadata.json b/parser/testdata/02459_read_in_order_bufer/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02459_read_in_order_bufer/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02459_read_in_order_bufer/query.sql b/parser/testdata/02459_read_in_order_bufer/query.sql new file mode 100644 index 000000000..5a6e0a3db --- /dev/null +++ b/parser/testdata/02459_read_in_order_bufer/query.sql @@ -0,0 +1,13 @@ +CREATE TABLE mytable_stored (`a` UInt8) ENGINE = MergeTree ORDER BY a; +CREATE TABLE mytable (`a` UInt8) ENGINE = Buffer(currentDatabase(), 'mytable_stored', 4, 600, 3600, 10, 100, 10000, 10000000); +INSERT INTO mytable VALUES (0); +INSERT INTO mytable VALUES (1); +INSERT INTO mytable VALUES (2); +INSERT INTO mytable VALUES (3); +INSERT INTO mytable VALUES (4); +INSERT INTO mytable VALUES (5); +INSERT INTO mytable VALUES (6); +INSERT INTO mytable VALUES (7); +INSERT INTO mytable VALUES (8); +INSERT INTO mytable VALUES (9); +SELECT a FROM mytable ORDER BY a DESC LIMIT 5; diff --git a/parser/testdata/02460_prewhere_row_level_policy/ast.json b/parser/testdata/02460_prewhere_row_level_policy/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02460_prewhere_row_level_policy/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02460_prewhere_row_level_policy/metadata.json b/parser/testdata/02460_prewhere_row_level_policy/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02460_prewhere_row_level_policy/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02460_prewhere_row_level_policy/query.sql b/parser/testdata/02460_prewhere_row_level_policy/query.sql new file mode 100644 index 000000000..fc98fa773 --- /dev/null +++ b/parser/testdata/02460_prewhere_row_level_policy/query.sql @@ -0,0 +1,9 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/40956#issuecomment-1262096612 +DROP TABLE IF EXISTS row_level_policy_prewhere; +DROP ROW POLICY IF EXISTS row_level_policy_prewhere_policy0 ON row_level_policy_prewhere; + +CREATE TABLE row_level_policy_prewhere (x Int16, y String) ENGINE = MergeTree ORDER BY x; +INSERT INTO row_level_policy_prewhere(y, x) VALUES ('A',1), ('B',2), ('C',3); +CREATE ROW POLICY row_level_policy_prewhere_policy0 ON row_level_policy_prewhere FOR SELECT USING x >= 0 TO default; +SELECT * FROM row_level_policy_prewhere PREWHERE y = 'foo'; +DROP TABLE row_level_policy_prewhere; diff --git a/parser/testdata/02461_alter_update_respect_part_column_type_bug/ast.json b/parser/testdata/02461_alter_update_respect_part_column_type_bug/ast.json new file mode 100644 index 000000000..5a31433b1 --- /dev/null +++ b/parser/testdata/02461_alter_update_respect_part_column_type_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery src (children 1)" + }, + { + "explain": " Identifier src" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001303565, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/02461_alter_update_respect_part_column_type_bug/metadata.json b/parser/testdata/02461_alter_update_respect_part_column_type_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02461_alter_update_respect_part_column_type_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02461_alter_update_respect_part_column_type_bug/query.sql b/parser/testdata/02461_alter_update_respect_part_column_type_bug/query.sql new file mode 100644 index 000000000..7f48b41aa --- /dev/null +++ b/parser/testdata/02461_alter_update_respect_part_column_type_bug/query.sql @@ -0,0 +1,94 @@ +drop table if exists src; +create table src( A Int64, B String, C String) Engine=MergeTree order by A SETTINGS min_bytes_for_wide_part=0; +insert into src values(1, 'one', 'test'); + +alter table src detach partition tuple(); +alter table src modify column B Nullable(String); +alter table src attach partition tuple(); + +alter table src update C = 'test1' where 1 settings mutations_sync=2; +select * from src; + + +drop table if exists src; +create table src( A String, B String, C String) Engine=MergeTree order by A SETTINGS min_bytes_for_wide_part=0; +insert into src values('one', 'one', 'test'); + +alter table src detach partition tuple(); +alter table src modify column A LowCardinality(String); +alter table src attach partition tuple(); + +alter table src update C = 'test1' where 1 settings mutations_sync=2; +select * from src; + + +drop table if exists src; +create table src( A String, B String, C String) Engine=MergeTree order by A SETTINGS min_bytes_for_wide_part=0; +insert into src values('one', 'one', 'test'); + +alter table src detach partition tuple(); +alter table src modify column A LowCardinality(String); +alter table src attach partition tuple(); + +alter table src modify column C LowCardinality(String); +select * from src; + +drop table if exists src; +create table src( A String, B String, C String) Engine=MergeTree order by A SETTINGS min_bytes_for_wide_part=0; +insert into src values('one', 'one', 'test'); + +alter table src detach partition tuple(); +alter table src modify column B Nullable(String); +alter table src attach partition tuple(); + +alter table src rename column B to D; +select * from src; + +select '-----'; + +drop table if exists src; +create table src( A Int64, B String, C String) Engine=ReplicatedMergeTree('/clickhouse/{database}/test/src1', '1') order by A SETTINGS min_bytes_for_wide_part=0; +insert into src values(1, 'one', 'test'); + +alter table src detach partition tuple(); +alter table src modify column B Nullable(String); +alter table src attach partition tuple(); + +alter table src update C = 'test1' where 1 settings mutations_sync=2; +select * from src; + + +drop table if exists src; +create table src( A String, B String, C String) Engine=ReplicatedMergeTree('/clickhouse/{database}/test/src2', '1') order by A SETTINGS min_bytes_for_wide_part=0; +insert into src values('one', 'one', 'test'); + +alter table src detach partition tuple(); +alter table src modify column A LowCardinality(String); +alter table src attach partition tuple(); + +alter table src update C = 'test1' where 1 settings mutations_sync=2; +select * from src; + + +drop table if exists src; +create table src( A String, B String, C String) Engine=ReplicatedMergeTree('/clickhouse/{database}/test/src3', '1') order by A SETTINGS min_bytes_for_wide_part=0; +insert into src values('one', 'one', 'test'); + +alter table src detach partition tuple(); +alter table src modify column A LowCardinality(String); +alter table src attach partition tuple(); + +alter table src modify column C LowCardinality(String); +select * from src; + +drop table if exists src; +create table src( A String, B String, C String) Engine=ReplicatedMergeTree('/clickhouse/{database}/test/src4', '1') order by A SETTINGS min_bytes_for_wide_part=0; +insert into src values('one', 'one', 'test'); + +alter table src detach partition tuple(); +alter table src modify column B Nullable(String); +alter table src attach partition tuple(); + +alter table src rename column B to D; +select * from src; + diff --git a/parser/testdata/02461_join_lc_issue_42380/ast.json b/parser/testdata/02461_join_lc_issue_42380/ast.json new file mode 100644 index 000000000..5368dc98a --- /dev/null +++ b/parser/testdata/02461_join_lc_issue_42380/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1__fuzz_13 (children 1)" + }, + { + "explain": " Identifier t1__fuzz_13" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001148941, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/02461_join_lc_issue_42380/metadata.json b/parser/testdata/02461_join_lc_issue_42380/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02461_join_lc_issue_42380/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02461_join_lc_issue_42380/query.sql b/parser/testdata/02461_join_lc_issue_42380/query.sql new file mode 100644 index 000000000..8b5c6846b --- /dev/null +++ b/parser/testdata/02461_join_lc_issue_42380/query.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS t1__fuzz_13; +DROP TABLE IF EXISTS t2__fuzz_47; + +SET allow_suspicious_low_cardinality_types = 1; + +CREATE TABLE t1__fuzz_13 (id Nullable(Int16)) ENGINE = MergeTree() ORDER BY id SETTINGS allow_nullable_key = 1; +CREATE TABLE t2__fuzz_47 (id LowCardinality(Int16)) ENGINE = MergeTree() ORDER BY id; + +INSERT INTO t1__fuzz_13 VALUES (1); +INSERT INTO t2__fuzz_47 VALUES (1); + +SELECT * FROM t1__fuzz_13 FULL OUTER JOIN t2__fuzz_47 ON 1 = 2 +ORDER BY ALL; diff --git a/parser/testdata/02461_mullable_pk_monotonicity_bug/ast.json b/parser/testdata/02461_mullable_pk_monotonicity_bug/ast.json new file mode 100644 index 000000000..c48b8960e --- /dev/null +++ b/parser/testdata/02461_mullable_pk_monotonicity_bug/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery tab (children 3)" + }, + { + "explain": " Identifier tab" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration x (children 1)" + }, + { + "explain": " DataType Nullable (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " DataType UInt8" + }, + { + "explain": " Storage definition (children 3)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Set" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001147493, + "rows_read": 12, + "bytes_read": 392 + } +} diff --git a/parser/testdata/02461_mullable_pk_monotonicity_bug/metadata.json b/parser/testdata/02461_mullable_pk_monotonicity_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02461_mullable_pk_monotonicity_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02461_mullable_pk_monotonicity_bug/query.sql b/parser/testdata/02461_mullable_pk_monotonicity_bug/query.sql new file mode 100644 index 000000000..a5d75afbc --- /dev/null +++ b/parser/testdata/02461_mullable_pk_monotonicity_bug/query.sql @@ -0,0 +1,65 @@ +create table tab (x Nullable(UInt8)) engine = MergeTree order by x settings allow_nullable_key = 1, index_granularity = 2; +insert into tab select number from numbers(4); +set allow_suspicious_low_cardinality_types=1; +set max_rows_to_read = 2; + +-- Prevent remote replicas from skipping index analysis in Parallel Replicas. Otherwise, they may return full ranges and trigger max_rows_to_read validation failures. +SET parallel_replicas_index_analysis_only_on_coordinator = 0; + +SELECT x + 1 FROM tab where plus(x, 1) <= 2 order by x; +SELECT x + 1 FROM tab where plus(x, 1::Nullable(UInt8)) <= 2 order by x; +SELECT x + 1 FROM tab where plus(x, 1::LowCardinality(UInt8)) <= 2 order by x; +SELECT x + 1 FROM tab where plus(x, 1::LowCardinality(Nullable(UInt8))) <= 2 order by x; +SELECT 1 + x FROM tab where plus(1, x) <= 2 order by x; +SELECT 1 + x FROM tab where plus(1::Nullable(UInt8), x) <= 2 order by x; +SELECT 1 + x FROM tab where plus(1::LowCardinality(UInt8), x) <= 2 order by x; +SELECT 1 + x FROM tab where plus(1::LowCardinality(Nullable(UInt8)), x) <= 2 order by x; + +drop table tab; +set max_rows_to_read = 100; +create table tab (x LowCardinality(UInt8)) engine = MergeTree order by x settings allow_nullable_key = 1, index_granularity = 2; +insert into tab select number from numbers(4); + +set max_rows_to_read = 2; +SELECT x + 1 FROM tab where plus(x, 1) <= 2 order by x; +SELECT x + 1 FROM tab where plus(x, 1::Nullable(UInt8)) <= 2 order by x; +SELECT x + 1 FROM tab where plus(x, 1::LowCardinality(UInt8)) <= 2 order by x; +SELECT x + 1 FROM tab where plus(x, 1::LowCardinality(Nullable(UInt8))) <= 2 order by x; +SELECT 1 + x FROM tab where plus(1, x) <= 2 order by x; +SELECT 1 + x FROM tab where plus(1::Nullable(UInt8), x) <= 2 order by x; +SELECT 1 + x FROM tab where plus(1::LowCardinality(UInt8), x) <= 2 order by x; +SELECT 1 + x FROM tab where plus(1::LowCardinality(Nullable(UInt8)), x) <= 2 order by x; + +drop table tab; +set max_rows_to_read = 100; +create table tab (x UInt128) engine = MergeTree order by x settings allow_nullable_key = 1, index_granularity = 2; +insert into tab select number from numbers(4); + +set max_rows_to_read = 2; +SELECT x + 1 FROM tab where plus(x, 1) <= 2 order by x; +SELECT x + 1 FROM tab where plus(x, 1::Nullable(UInt8)) <= 2 order by x; +SELECT x + 1 FROM tab where plus(x, 1::LowCardinality(UInt8)) <= 2 order by x; +SELECT x + 1 FROM tab where plus(x, 1::LowCardinality(Nullable(UInt8))) <= 2 order by x; +SELECT 1 + x FROM tab where plus(1, x) <= 2 order by x; +SELECT 1 + x FROM tab where plus(1::Nullable(UInt8), x) <= 2 order by x; +SELECT 1 + x FROM tab where plus(1::LowCardinality(UInt8), x) <= 2 order by x; +SELECT 1 + x FROM tab where plus(1::LowCardinality(Nullable(UInt8)), x) <= 2 order by x; + +set max_rows_to_read = 100; +SELECT x + 1 FROM tab WHERE (x + 1::LowCardinality(UInt8)) <= -9223372036854775808 order by x; + +drop table tab; +create table tab (x DateTime) engine = MergeTree order by x settings allow_nullable_key = 1, index_granularity = 2; +insert into tab select toDateTime('2022-02-02') + number from numbers(4); + +set max_rows_to_read = 2; +SELECT x + 1 FROM tab where plus(x, 1) <= toDateTime('2022-02-02') + 2 order by x; +SELECT x + 1 FROM tab where plus(x, 1::Nullable(UInt8)) <= toDateTime('2022-02-02') + 2 order by x; +SELECT x + 1 FROM tab where plus(x, 1::LowCardinality(UInt8)) <= toDateTime('2022-02-02') + 2 order by x; +SELECT x + 1 FROM tab where plus(x, 1::LowCardinality(Nullable(UInt8))) <= toDateTime('2022-02-02') + 2 order by x; +SELECT 1 + x FROM tab where plus(1, x) <= toDateTime('2022-02-02') + 2 order by x; +SELECT 1 + x FROM tab where plus(1::Nullable(UInt8), x) <= toDateTime('2022-02-02') + 2 order by x; +SELECT 1 + x FROM tab where plus(1::LowCardinality(UInt8), x) <= toDateTime('2022-02-02') + 2 order by x; +SELECT 1 + x FROM tab where plus(1::LowCardinality(Nullable(UInt8)), x) <= toDateTime('2022-02-02') + 2 order by x; + +SELECT x + 1 FROM tab WHERE (x + CAST('1', 'Nullable(UInt8)')) <= -2147483647 ORDER BY x ASC NULLS FIRST; diff --git a/parser/testdata/02461_welch_t_test_fuzz/ast.json b/parser/testdata/02461_welch_t_test_fuzz/ast.json new file mode 100644 index 000000000..45784e4fd --- /dev/null +++ b/parser/testdata/02461_welch_t_test_fuzz/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery welch_ttest__fuzz_7 (children 1)" + }, + { + "explain": " Identifier welch_ttest__fuzz_7" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001230314, + "rows_read": 2, + "bytes_read": 90 + } +} diff --git a/parser/testdata/02461_welch_t_test_fuzz/metadata.json b/parser/testdata/02461_welch_t_test_fuzz/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02461_welch_t_test_fuzz/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02461_welch_t_test_fuzz/query.sql b/parser/testdata/02461_welch_t_test_fuzz/query.sql new file mode 100644 index 000000000..b6cd09eba --- /dev/null +++ b/parser/testdata/02461_welch_t_test_fuzz/query.sql @@ -0,0 +1,7 @@ +DROP TABLE IF EXISTS welch_ttest__fuzz_7; +CREATE TABLE welch_ttest__fuzz_7 (left UInt128, right UInt128) ENGINE = Memory; + +INSERT INTO welch_ttest__fuzz_7 VALUES (0.010268, 0), (0.000167, 0), (0.000167, 0), (0.159258, 1), (0.136278, 1), (0.122389, 1); + +SELECT roundBankers(welchTTest(left, right).2, 6) from welch_ttest__fuzz_7; +SELECT roundBankers(studentTTest(left, right).2, 6) from welch_ttest__fuzz_7; diff --git a/parser/testdata/02462_distributions/ast.json b/parser/testdata/02462_distributions/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02462_distributions/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02462_distributions/metadata.json b/parser/testdata/02462_distributions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02462_distributions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02462_distributions/query.sql b/parser/testdata/02462_distributions/query.sql new file mode 100644 index 000000000..b45dc897f --- /dev/null +++ b/parser/testdata/02462_distributions/query.sql @@ -0,0 +1,24 @@ +# Values should be between 0 and 1 +SELECT DISTINCT if (a >= toFloat64(0) AND a <= toFloat64(1), 'Ok', 'Fail') FROM (SELECT randUniform(0, 1) AS a FROM numbers(100000)); +# Mean should be around 0 +SELECT DISTINCT if (m >= toFloat64(-0.2) AND m <= toFloat64(0.2), 'Ok', 'Fail') FROM (SELECT avg(a) as m FROM (SELECT randNormal(0, 5) AS a FROM numbers(100000))); +# Values should be >= 0 +SELECT DISTINCT if (a >= toFloat64(0), 'Ok', 'Fail') FROM (SELECT randLogNormal(0, 5) AS a FROM numbers(100000)); +# Values should be >= 0 +SELECT DISTINCT if (a >= toFloat64(0), 'Ok', 'Fail') FROM (SELECT randExponential(15) AS a FROM numbers(100000)); +# Values should be >= 0 +SELECT DISTINCT if (a >= toFloat64(0), 'Ok', 'Fail') FROM (SELECT randChiSquared(3) AS a FROM numbers(100000)); +# Mean should be around 0 +SELECT DISTINCT if (m > toFloat64(-0.2) AND m < toFloat64(0.2), 'Ok', 'Fail') FROM (SELECT avg(a) as m FROM (SELECT randStudentT(5) AS a FROM numbers(100000))); +# Values should be >= 0 +SELECT DISTINCT if (a >= toFloat64(0), 'Ok', 'Fail') FROM (SELECT randFisherF(3, 4) AS a FROM numbers(100000)); +# There should be only 0s and 1s +SELECT a FROM (SELECT DISTINCT randBernoulli(0.5) AS a FROM numbers(100000)) ORDER BY a; +# Values should be >= 0 +SELECT DISTINCT if (a >= toFloat64(0), 'Ok', 'Fail') FROM (SELECT randBinomial(3, 0.5) AS a FROM numbers(100000)); +# Values should be >= 0 +SELECT DISTINCT if (a >= toFloat64(0), 'Ok', 'Fail') FROM (SELECT randNegativeBinomial(3, 0.5) AS a FROM numbers(100000)); +# Values should be >= 0 +SELECT DISTINCT if (a >= toFloat64(0), 'Ok', 'Fail') FROM (SELECT randPoisson(44) AS a FROM numbers(100000)); +# No errors +SELECT randUniform(1, 2, 1), randNormal(0, 1, 'abacaba'), randLogNormal(0, 10, 'b'), randChiSquared(1, 1), randStudentT(7, '8'), randFisherF(23, 42, 100), randBernoulli(0.5, 2), randBinomial(3, 0.5, 1), randNegativeBinomial(3, 0.5, 2), randPoisson(44, 44) FORMAT Null; diff --git a/parser/testdata/02462_match_regexp_pk/ast.json b/parser/testdata/02462_match_regexp_pk/ast.json new file mode 100644 index 000000000..b57642477 --- /dev/null +++ b/parser/testdata/02462_match_regexp_pk/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery mt_match_pk (children 3)" + }, + { + "explain": " Identifier mt_match_pk" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration v (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " Storage definition (children 3)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Identifier v" + }, + { + "explain": " Set" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001297586, + "rows_read": 10, + "bytes_read": 325 + } +} diff --git a/parser/testdata/02462_match_regexp_pk/metadata.json b/parser/testdata/02462_match_regexp_pk/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02462_match_regexp_pk/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02462_match_regexp_pk/query.sql b/parser/testdata/02462_match_regexp_pk/query.sql new file mode 100644 index 000000000..a47539b1c --- /dev/null +++ b/parser/testdata/02462_match_regexp_pk/query.sql @@ -0,0 +1,10 @@ +CREATE TABLE mt_match_pk (v String) ENGINE = MergeTree ORDER BY v SETTINGS index_granularity = 1; +INSERT INTO mt_match_pk VALUES ('a'), ('aaa'), ('aba'), ('bac'), ('acccca'); + +SET force_primary_key = 1; +SELECT count() FROM mt_match_pk WHERE match(v, '^a'); +SELECT count() FROM mt_match_pk WHERE match(v, '^ab'); +SELECT count() FROM mt_match_pk WHERE match(v, '^a.'); +SELECT count() FROM mt_match_pk WHERE match(v, '^ab*'); +SELECT count() FROM mt_match_pk WHERE match(v, '^ac?'); +SELECT count() FROM mt_match_pk WHERE match(v, '^a$|^b'); -- {serverError INDEX_NOT_USED} diff --git a/parser/testdata/02462_number_to_datetype/ast.json b/parser/testdata/02462_number_to_datetype/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02462_number_to_datetype/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02462_number_to_datetype/metadata.json b/parser/testdata/02462_number_to_datetype/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02462_number_to_datetype/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02462_number_to_datetype/query.sql b/parser/testdata/02462_number_to_datetype/query.sql new file mode 100644 index 000000000..5d361f2b0 --- /dev/null +++ b/parser/testdata/02462_number_to_datetype/query.sql @@ -0,0 +1,35 @@ +-- { echoOn } + +-- toDate +select toYYYYMMDD(toDate(recordTimestamp, 'Europe/Amsterdam')), toDate(recordTimestamp, 'Europe/Amsterdam'), toInt64(1665519765) as recordTimestamp, toTypeName(recordTimestamp); +select toYYYYMMDD(toDate(recordTimestamp, 'Europe/Amsterdam')), toDate(recordTimestamp, 'Europe/Amsterdam'), toUInt64(1665519765) as recordTimestamp, toTypeName(recordTimestamp); +select toYYYYMMDD(toDate(recordTimestamp, 'Europe/Amsterdam')), toDate(recordTimestamp, 'Europe/Amsterdam'), toInt32(1665519765) as recordTimestamp, toTypeName(recordTimestamp); +select toYYYYMMDD(toDate(recordTimestamp, 'Europe/Amsterdam')), toDate(recordTimestamp, 'Europe/Amsterdam'), toUInt32(1665519765) as recordTimestamp, toTypeName(recordTimestamp); +select toYYYYMMDD(toDate(recordTimestamp, 'Europe/Amsterdam')), toDate(recordTimestamp, 'Europe/Amsterdam'), toFloat32(1665519765) as recordTimestamp, toTypeName(recordTimestamp); +select toYYYYMMDD(toDate(recordTimestamp, 'Europe/Amsterdam')), toDate(recordTimestamp, 'Europe/Amsterdam'), toFloat64(1665519765) as recordTimestamp, toTypeName(recordTimestamp); + +-- toDate32 +select toYYYYMMDD(toDate32(recordTimestamp, 'Europe/Amsterdam')), toDate32(recordTimestamp, 'Europe/Amsterdam'), toInt64(1665519765) as recordTimestamp, toTypeName(recordTimestamp); +select toYYYYMMDD(toDate32(recordTimestamp, 'Europe/Amsterdam')), toDate32(recordTimestamp, 'Europe/Amsterdam'), toUInt64(1665519765) as recordTimestamp, toTypeName(recordTimestamp); +select toYYYYMMDD(toDate32(recordTimestamp, 'Europe/Amsterdam')), toDate32(recordTimestamp, 'Europe/Amsterdam'), toInt32(1665519765) as recordTimestamp, toTypeName(recordTimestamp); +select toYYYYMMDD(toDate32(recordTimestamp, 'Europe/Amsterdam')), toDate32(recordTimestamp, 'Europe/Amsterdam'), toUInt32(1665519765) as recordTimestamp, toTypeName(recordTimestamp); +select toYYYYMMDD(toDate32(recordTimestamp, 'Europe/Amsterdam')), toDate32(recordTimestamp, 'Europe/Amsterdam'), toFloat32(1665519765) as recordTimestamp, toTypeName(recordTimestamp); +select toYYYYMMDD(toDate32(recordTimestamp, 'Europe/Amsterdam')), toDate32(recordTimestamp, 'Europe/Amsterdam'), toFloat64(1665519765) as recordTimestamp, toTypeName(recordTimestamp); + +-- toDateTime +select toYYYYMMDD(toDateTime(recordTimestamp, 'Europe/Amsterdam')), toDateTime(recordTimestamp, 'Europe/Amsterdam'), toInt64(1665519765) as recordTimestamp, toTypeName(recordTimestamp); +select toYYYYMMDD(toDateTime(recordTimestamp, 'Europe/Amsterdam')), toDateTime(recordTimestamp, 'Europe/Amsterdam'), toUInt64(1665519765) as recordTimestamp, toTypeName(recordTimestamp); +select toYYYYMMDD(toDateTime(recordTimestamp, 'Europe/Amsterdam')), toDateTime(recordTimestamp, 'Europe/Amsterdam'), toInt32(1665519765) as recordTimestamp, toTypeName(recordTimestamp); +select toYYYYMMDD(toDateTime(recordTimestamp, 'Europe/Amsterdam')), toDateTime(recordTimestamp, 'Europe/Amsterdam'), toUInt32(1665519765) as recordTimestamp, toTypeName(recordTimestamp); +select toYYYYMMDD(toDateTime(recordTimestamp, 'Europe/Amsterdam')), toDateTime(recordTimestamp, 'Europe/Amsterdam'), toFloat32(1665519765) as recordTimestamp, toTypeName(recordTimestamp); +select toYYYYMMDD(toDateTime(recordTimestamp, 'Europe/Amsterdam')), toDateTime(recordTimestamp, 'Europe/Amsterdam'), toFloat64(1665519765) as recordTimestamp, toTypeName(recordTimestamp); + +-- toDateTime64 +select toYYYYMMDD(toDateTime64(recordTimestamp, 3, 'Europe/Amsterdam')), toDateTime64(recordTimestamp, 3, 'Europe/Amsterdam'), toInt64(1665519765) as recordTimestamp, toTypeName(recordTimestamp); +select toYYYYMMDD(toDateTime64(recordTimestamp, 3, 'Europe/Amsterdam')), toDateTime64(recordTimestamp, 3, 'Europe/Amsterdam'), toUInt64(1665519765) as recordTimestamp, toTypeName(recordTimestamp); +select toYYYYMMDD(toDateTime64(recordTimestamp, 3, 'Europe/Amsterdam')), toDateTime64(recordTimestamp, 3, 'Europe/Amsterdam'), toInt32(1665519765) as recordTimestamp, toTypeName(recordTimestamp); +select toYYYYMMDD(toDateTime64(recordTimestamp, 3, 'Europe/Amsterdam')), toDateTime64(recordTimestamp, 3, 'Europe/Amsterdam'), toUInt32(1665519765) as recordTimestamp, toTypeName(recordTimestamp); +select toYYYYMMDD(toDateTime64(recordTimestamp, 3, 'Europe/Amsterdam')), toDateTime64(recordTimestamp, 3, 'Europe/Amsterdam'), toFloat32(1665519765) as recordTimestamp, toTypeName(recordTimestamp); +select toYYYYMMDD(toDateTime64(recordTimestamp, 3, 'Europe/Amsterdam')), toDateTime64(recordTimestamp, 3, 'Europe/Amsterdam'), toFloat64(1665519765) as recordTimestamp, toTypeName(recordTimestamp); + +-- { echoOff } diff --git a/parser/testdata/02463_julian_day_ubsan/ast.json b/parser/testdata/02463_julian_day_ubsan/ast.json new file mode 100644 index 000000000..aecafa454 --- /dev/null +++ b/parser/testdata/02463_julian_day_ubsan/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function fromModifiedJulianDay (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '9223372036854775807'" + }, + { + "explain": " Literal 'Int64'" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001031345, + "rows_read": 10, + "bytes_read": 402 + } +} diff --git a/parser/testdata/02463_julian_day_ubsan/metadata.json b/parser/testdata/02463_julian_day_ubsan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02463_julian_day_ubsan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02463_julian_day_ubsan/query.sql b/parser/testdata/02463_julian_day_ubsan/query.sql new file mode 100644 index 000000000..2174a5cb4 --- /dev/null +++ b/parser/testdata/02463_julian_day_ubsan/query.sql @@ -0,0 +1 @@ +SELECT fromModifiedJulianDay(9223372036854775807 :: Int64); -- { serverError CANNOT_FORMAT_DATETIME } diff --git a/parser/testdata/02464_decimal_scale_buffer_overflow/ast.json b/parser/testdata/02464_decimal_scale_buffer_overflow/ast.json new file mode 100644 index 000000000..700e3b974 --- /dev/null +++ b/parser/testdata/02464_decimal_scale_buffer_overflow/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery series__fuzz_35 (children 1)" + }, + { + "explain": " Identifier series__fuzz_35" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001425629, + "rows_read": 2, + "bytes_read": 82 + } +} diff --git a/parser/testdata/02464_decimal_scale_buffer_overflow/metadata.json b/parser/testdata/02464_decimal_scale_buffer_overflow/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02464_decimal_scale_buffer_overflow/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02464_decimal_scale_buffer_overflow/query.sql b/parser/testdata/02464_decimal_scale_buffer_overflow/query.sql new file mode 100644 index 000000000..355d9012f --- /dev/null +++ b/parser/testdata/02464_decimal_scale_buffer_overflow/query.sql @@ -0,0 +1,5 @@ +DROP TABLE IF EXISTS series__fuzz_35; +CREATE TABLE series__fuzz_35 (`i` UInt8, `x_value` Decimal(18, 14), `y_value` DateTime) ENGINE = Memory; +INSERT INTO series__fuzz_35(i, x_value, y_value) VALUES (1, 5.6,-4.4),(2, -9.6,3),(3, -1.3,-4),(4, 5.3,9.7),(5, 4.4,0.037),(6, -8.6,-7.8),(7, 5.1,9.3),(8, 7.9,-3.6),(9, -8.2,0.62),(10, -3,7.3); +SELECT skewSamp(x_value) FROM (SELECT x_value as x_value FROM series__fuzz_35 LIMIT 2) FORMAT Null; +DROP TABLE series__fuzz_35; diff --git a/parser/testdata/02465_limit_trivial_max_rows_to_read/ast.json b/parser/testdata/02465_limit_trivial_max_rows_to_read/ast.json new file mode 100644 index 000000000..56e10ee83 --- /dev/null +++ b/parser/testdata/02465_limit_trivial_max_rows_to_read/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_max_rows_to_read (children 1)" + }, + { + "explain": " Identifier t_max_rows_to_read" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001167486, + "rows_read": 2, + "bytes_read": 88 + } +} diff --git a/parser/testdata/02465_limit_trivial_max_rows_to_read/metadata.json b/parser/testdata/02465_limit_trivial_max_rows_to_read/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02465_limit_trivial_max_rows_to_read/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02465_limit_trivial_max_rows_to_read/query.sql b/parser/testdata/02465_limit_trivial_max_rows_to_read/query.sql new file mode 100644 index 000000000..700a54044 --- /dev/null +++ b/parser/testdata/02465_limit_trivial_max_rows_to_read/query.sql @@ -0,0 +1,22 @@ +DROP TABLE IF EXISTS t_max_rows_to_read; + +CREATE TABLE t_max_rows_to_read (a UInt64) +ENGINE = MergeTree ORDER BY a +SETTINGS index_granularity = 4, index_granularity_bytes = '10Mi'; + +INSERT INTO t_max_rows_to_read SELECT number FROM numbers(100); + +SET max_block_size = 10; +SET max_rows_to_read = 20; +SET read_overflow_mode = 'throw'; + +SELECT number FROM numbers(30); -- { serverError TOO_MANY_ROWS } +SELECT number FROM numbers(30) LIMIT 21; -- { serverError TOO_MANY_ROWS } +SELECT number FROM numbers(30) LIMIT 1; +SELECT number FROM numbers(5); + +SELECT a FROM t_max_rows_to_read LIMIT 1; +SELECT a FROM t_max_rows_to_read LIMIT 11 offset 11; -- { serverError TOO_MANY_ROWS } +SELECT a FROM t_max_rows_to_read WHERE a > 50 LIMIT 1; -- { serverError TOO_MANY_ROWS } + +DROP TABLE t_max_rows_to_read; diff --git a/parser/testdata/02466_distributed_query_profiler/ast.json b/parser/testdata/02466_distributed_query_profiler/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02466_distributed_query_profiler/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02466_distributed_query_profiler/metadata.json b/parser/testdata/02466_distributed_query_profiler/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02466_distributed_query_profiler/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02466_distributed_query_profiler/query.sql b/parser/testdata/02466_distributed_query_profiler/query.sql new file mode 100644 index 000000000..171cc2a75 --- /dev/null +++ b/parser/testdata/02466_distributed_query_profiler/query.sql @@ -0,0 +1,15 @@ +-- This is a regression test for EINTR handling in MultiplexedConnections::getReplicaForReading() + +select * from remote('127.{2,4}', view( + -- This is the emulation of the slow query, the server will return a line each 0.1 second + select sleep(0.1) from numbers(20) settings max_block_size=1) +) +-- LIMIT is to activate query cancellation in case of enough rows already read. +limit 10 +settings + -- This is to avoid draining in background and got the exception during query execution + drain_timeout=-1, + -- This is to activate as much signals as possible to trigger EINTR + query_profiler_real_time_period_ns=1, + -- This is to use MultiplexedConnections + use_hedged_requests=0; diff --git a/parser/testdata/02467_cross_join_three_table_functions/ast.json b/parser/testdata/02467_cross_join_three_table_functions/ast.json new file mode 100644 index 000000000..521b2bcf5 --- /dev/null +++ b/parser/testdata/02467_cross_join_three_table_functions/ast.json @@ -0,0 +1,97 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function count (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 3)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (alias a) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " TablesInSelectQueryElement (children 2)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (alias b) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_11" + }, + { + "explain": " TableJoin" + }, + { + "explain": " TablesInSelectQueryElement (children 2)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (alias c) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_12" + }, + { + "explain": " TableJoin" + } + ], + + "rows": 25, + + "statistics": + { + "elapsed": 0.001328883, + "rows_read": 25, + "bytes_read": 997 + } +} diff --git a/parser/testdata/02467_cross_join_three_table_functions/metadata.json b/parser/testdata/02467_cross_join_three_table_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02467_cross_join_three_table_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02467_cross_join_three_table_functions/query.sql b/parser/testdata/02467_cross_join_three_table_functions/query.sql new file mode 100644 index 000000000..5c7da815b --- /dev/null +++ b/parser/testdata/02467_cross_join_three_table_functions/query.sql @@ -0,0 +1 @@ +SELECT count(*) FROM numbers(10) AS a, numbers(11) AS b, numbers(12) AS c; diff --git a/parser/testdata/02467_set_with_lowcardinality_type/ast.json b/parser/testdata/02467_set_with_lowcardinality_type/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02467_set_with_lowcardinality_type/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02467_set_with_lowcardinality_type/metadata.json b/parser/testdata/02467_set_with_lowcardinality_type/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02467_set_with_lowcardinality_type/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02467_set_with_lowcardinality_type/query.sql b/parser/testdata/02467_set_with_lowcardinality_type/query.sql new file mode 100644 index 000000000..1607d9697 --- /dev/null +++ b/parser/testdata/02467_set_with_lowcardinality_type/query.sql @@ -0,0 +1,31 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/42460 +DROP TABLE IF EXISTS bloom_filter_nullable_index__fuzz_0; +CREATE TABLE bloom_filter_nullable_index__fuzz_0 +( + `order_key` UInt64, + `str` Nullable(String), + INDEX idx str TYPE bloom_filter GRANULARITY 1 +) +ENGINE = MergeTree ORDER BY order_key SETTINGS index_granularity = 6, index_granularity_bytes = '10Mi'; + +INSERT INTO bloom_filter_nullable_index__fuzz_0 VALUES (1, 'test'); +INSERT INTO bloom_filter_nullable_index__fuzz_0 VALUES (2, 'test2'); + +DROP TABLE IF EXISTS bloom_filter_nullable_index__fuzz_1; +CREATE TABLE bloom_filter_nullable_index__fuzz_1 +( + `order_key` UInt64, + `str` String, + INDEX idx str TYPE bloom_filter GRANULARITY 1 +) +ENGINE = MergeTree ORDER BY order_key SETTINGS index_granularity = 6, index_granularity_bytes = '10Mi'; + +INSERT INTO bloom_filter_nullable_index__fuzz_0 VALUES (1, 'test'); +INSERT INTO bloom_filter_nullable_index__fuzz_0 VALUES (2, 'test2'); + +DROP TABLE IF EXISTS nullable_string_value__fuzz_2; +CREATE TABLE nullable_string_value__fuzz_2 (`value` LowCardinality(String)) ENGINE = TinyLog; +INSERT INTO nullable_string_value__fuzz_2 VALUES ('test'); + +SELECT * FROM bloom_filter_nullable_index__fuzz_0 WHERE str IN (SELECT value FROM nullable_string_value__fuzz_2); +SELECT * FROM bloom_filter_nullable_index__fuzz_1 WHERE str IN (SELECT value FROM nullable_string_value__fuzz_2); diff --git a/parser/testdata/02468_has_any_tuple/ast.json b/parser/testdata/02468_has_any_tuple/ast.json new file mode 100644 index 000000000..4912e0478 --- /dev/null +++ b/parser/testdata/02468_has_any_tuple/ast.json @@ -0,0 +1,100 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toUInt8 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Function toUInt8 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toInt16 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Function toInt16 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_3" + } + ], + + "rows": 26, + + "statistics": + { + "elapsed": 0.001207064, + "rows_read": 26, + "bytes_read": 1085 + } +} diff --git a/parser/testdata/02468_has_any_tuple/metadata.json b/parser/testdata/02468_has_any_tuple/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02468_has_any_tuple/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02468_has_any_tuple/query.sql b/parser/testdata/02468_has_any_tuple/query.sql new file mode 100644 index 000000000..12c7222d5 --- /dev/null +++ b/parser/testdata/02468_has_any_tuple/query.sql @@ -0,0 +1,4 @@ +select [(toUInt8(3), toUInt8(3))] = [(toInt16(3), toInt16(3))]; +select hasAny([(toInt16(3), toInt16(3))],[(toInt16(3), toInt16(3))]); +select arrayFilter(x -> x = (toInt16(3), toInt16(3)), arrayZip([toUInt8(3)], [toUInt8(3)])); +select hasAny([(toUInt8(3), toUInt8(3))],[(toInt16(3), toInt16(3))]); diff --git a/parser/testdata/02469_fix_aliases_parser/ast.json b/parser/testdata/02469_fix_aliases_parser/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02469_fix_aliases_parser/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02469_fix_aliases_parser/metadata.json b/parser/testdata/02469_fix_aliases_parser/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02469_fix_aliases_parser/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02469_fix_aliases_parser/query.sql b/parser/testdata/02469_fix_aliases_parser/query.sql new file mode 100644 index 000000000..65eea8e9c --- /dev/null +++ b/parser/testdata/02469_fix_aliases_parser/query.sql @@ -0,0 +1,9 @@ +SELECT sum(number number number) FROM numbers(10); -- { clientError SYNTAX_ERROR } +SELECT sum(number number) FROM numbers(10); -- { clientError SYNTAX_ERROR } +SELECT sum(number AS number) FROM numbers(10); + +SELECT [number number number] FROM numbers(1); -- { clientError SYNTAX_ERROR } +SELECT [number number] FROM numbers(1); -- { clientError SYNTAX_ERROR } +SELECT [number AS number] FROM numbers(1); + +SELECT cast('1234' lhs lhs, 'UInt32'), lhs; -- { clientError SYNTAX_ERROR } \ No newline at end of file diff --git a/parser/testdata/02469_interval_msan/ast.json b/parser/testdata/02469_interval_msan/ast.json new file mode 100644 index 000000000..9b3f4c8f0 --- /dev/null +++ b/parser/testdata/02469_interval_msan/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function plus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function now (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '1'" + }, + { + "explain": " Literal 'Int128'" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001106343, + "rows_read": 12, + "bytes_read": 436 + } +} diff --git a/parser/testdata/02469_interval_msan/metadata.json b/parser/testdata/02469_interval_msan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02469_interval_msan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02469_interval_msan/query.sql b/parser/testdata/02469_interval_msan/query.sql new file mode 100644 index 000000000..4b4a9f746 --- /dev/null +++ b/parser/testdata/02469_interval_msan/query.sql @@ -0,0 +1,19 @@ +SELECT now() + 1::Int128; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT now() + 1::Int256; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT now() + 1::UInt128; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT now() + 1::UInt256; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT now() - 1::Int128; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT now() - 1::Int256; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT now() - 1::UInt128; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT now() - 1::UInt256; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT now() + INTERVAL 1::Int128 SECOND - now(); +SELECT now() + INTERVAL 1::Int256 SECOND - now(); +SELECT now() + INTERVAL 1::UInt128 SECOND - now(); +SELECT now() + INTERVAL 1::UInt256 SECOND - now(); + +SELECT today() + INTERVAL 1::Int128 DAY - today(); +SELECT today() + INTERVAL 1::Int256 DAY - today(); +SELECT today() + INTERVAL 1::UInt128 DAY - today(); +SELECT today() + INTERVAL 1::UInt256 DAY - today(); diff --git a/parser/testdata/02470_suspicious_low_cardinality_msan/ast.json b/parser/testdata/02470_suspicious_low_cardinality_msan/ast.json new file mode 100644 index 000000000..895d63a74 --- /dev/null +++ b/parser/testdata/02470_suspicious_low_cardinality_msan/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery alias_2__fuzz_25 (children 1)" + }, + { + "explain": " Identifier alias_2__fuzz_25" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001208132, + "rows_read": 2, + "bytes_read": 84 + } +} diff --git a/parser/testdata/02470_suspicious_low_cardinality_msan/metadata.json b/parser/testdata/02470_suspicious_low_cardinality_msan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02470_suspicious_low_cardinality_msan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02470_suspicious_low_cardinality_msan/query.sql b/parser/testdata/02470_suspicious_low_cardinality_msan/query.sql new file mode 100644 index 000000000..6969be1ca --- /dev/null +++ b/parser/testdata/02470_suspicious_low_cardinality_msan/query.sql @@ -0,0 +1,6 @@ +DROP TABLE IF EXISTS alias_2__fuzz_25; +SET allow_suspicious_low_cardinality_types = 1; +CREATE TABLE alias_2__fuzz_25 (`dt` LowCardinality(Date), `col` DateTime, `col2` Nullable(Int256), `colAlias0` Nullable(DateTime64(3)) ALIAS col, `colAlias3` Nullable(Int32) ALIAS col3 + colAlias0, `colAlias1` LowCardinality(UInt16) ALIAS colAlias0 + col2, `colAlias2` LowCardinality(Int32) ALIAS colAlias0 + colAlias1, `col3` Nullable(UInt8)) ENGINE = MergeTree ORDER BY dt; +insert into alias_2__fuzz_25 (dt, col, col2, col3) values ('2020-02-01', 1, 2, 3); +SELECT colAlias0, colAlias2, colAlias3 FROM alias_2__fuzz_25; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +DROP TABLE alias_2__fuzz_25; diff --git a/parser/testdata/02471_wrong_date_monotonicity/ast.json b/parser/testdata/02471_wrong_date_monotonicity/ast.json new file mode 100644 index 000000000..eeb4d848d --- /dev/null +++ b/parser/testdata/02471_wrong_date_monotonicity/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tdm__fuzz_23 (children 1)" + }, + { + "explain": " Identifier tdm__fuzz_23" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001265393, + "rows_read": 2, + "bytes_read": 76 + } +} diff --git a/parser/testdata/02471_wrong_date_monotonicity/metadata.json b/parser/testdata/02471_wrong_date_monotonicity/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02471_wrong_date_monotonicity/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02471_wrong_date_monotonicity/query.sql b/parser/testdata/02471_wrong_date_monotonicity/query.sql new file mode 100644 index 000000000..40d64e533 --- /dev/null +++ b/parser/testdata/02471_wrong_date_monotonicity/query.sql @@ -0,0 +1,5 @@ +DROP TABLE IF EXISTS tdm__fuzz_23; +CREATE TABLE tdm__fuzz_23 (`x` UInt256) ENGINE = MergeTree ORDER BY x SETTINGS write_final_mark = 0; +INSERT INTO tdm__fuzz_23 FORMAT Values (1); +SELECT count(x) FROM tdm__fuzz_23 WHERE toDate(x) < toDate(now(), 'Asia/Istanbul') SETTINGS max_rows_to_read = 1; +DROP TABLE tdm__fuzz_23; diff --git a/parser/testdata/02472_segfault_expression_parser/ast.json b/parser/testdata/02472_segfault_expression_parser/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02472_segfault_expression_parser/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02472_segfault_expression_parser/metadata.json b/parser/testdata/02472_segfault_expression_parser/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02472_segfault_expression_parser/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02472_segfault_expression_parser/query.sql b/parser/testdata/02472_segfault_expression_parser/query.sql new file mode 100644 index 000000000..4994da5dd --- /dev/null +++ b/parser/testdata/02472_segfault_expression_parser/query.sql @@ -0,0 +1 @@ +SELECT TIMESTAMP_SUB (SELECT ILIKE INTO OUTFILE , accurateCast ) FROM TIMESTAMP_SUB ( MINUTE , ) GROUP BY accurateCast; -- { clientError SYNTAX_ERROR } diff --git a/parser/testdata/02473_extract_low_cardinality_from_json/ast.json b/parser/testdata/02473_extract_low_cardinality_from_json/ast.json new file mode 100644 index 000000000..bc74acec5 --- /dev/null +++ b/parser/testdata/02473_extract_low_cardinality_from_json/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function JSONExtract (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '{\"a\" : {\"b\" : {\"c\" : 1, \"d\" : \"str\"}}}'" + }, + { + "explain": " Literal 'Tuple( a LowCardinality(String), b LowCardinality(String), c LowCardinality(String), d LowCardinality(String))'" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001386493, + "rows_read": 8, + "bytes_read": 430 + } +} diff --git a/parser/testdata/02473_extract_low_cardinality_from_json/metadata.json b/parser/testdata/02473_extract_low_cardinality_from_json/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02473_extract_low_cardinality_from_json/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02473_extract_low_cardinality_from_json/query.sql b/parser/testdata/02473_extract_low_cardinality_from_json/query.sql new file mode 100644 index 000000000..721b7885e --- /dev/null +++ b/parser/testdata/02473_extract_low_cardinality_from_json/query.sql @@ -0,0 +1,2 @@ +SELECT JSONExtract('{"a" : {"b" : {"c" : 1, "d" : "str"}}}', 'Tuple( a LowCardinality(String), b LowCardinality(String), c LowCardinality(String), d LowCardinality(String))'); +SELECT JSONExtract('{"a" : {"b" : {"c" : 1, "d" : "str"}}}', 'Tuple( a String, b LowCardinality(String), c LowCardinality(String), d LowCardinality(String))'); diff --git a/parser/testdata/02473_map_element_nullable/ast.json b/parser/testdata/02473_map_element_nullable/ast.json new file mode 100644 index 000000000..b1e09966e --- /dev/null +++ b/parser/testdata/02473_map_element_nullable/ast.json @@ -0,0 +1,109 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function map (alias m) (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function arrayElement (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier m" + }, + { + "explain": " Function toNullable (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function arrayElement (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier m" + }, + { + "explain": " Function toNullable (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function arrayElement (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier m" + }, + { + "explain": " Function toNullable (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_3" + } + ], + + "rows": 29, + + "statistics": + { + "elapsed": 0.001488314, + "rows_read": 29, + "bytes_read": 1086 + } +} diff --git a/parser/testdata/02473_map_element_nullable/metadata.json b/parser/testdata/02473_map_element_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02473_map_element_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02473_map_element_nullable/query.sql b/parser/testdata/02473_map_element_nullable/query.sql new file mode 100644 index 000000000..e9c351d11 --- /dev/null +++ b/parser/testdata/02473_map_element_nullable/query.sql @@ -0,0 +1,19 @@ +WITH map(1, 2, 3, NULL) AS m SELECT m[toNullable(1)], m[toNullable(2)], m[toNullable(3)]; +WITH map(1, 2, 3, NULL) AS m SELECT m[materialize(toNullable(1))], m[materialize(toNullable(2))], m[materialize(toNullable(3))]; +WITH materialize(map(1, 2, 3, NULL)) AS m SELECT m[toNullable(1)], m[toNullable(2)], m[toNullable(3)]; +WITH materialize(map(1, 2, 3, NULL)) AS m SELECT m[materialize(toNullable(1))], m[materialize(toNullable(2))], m[materialize(toNullable(3))]; + +WITH map('a', 2, 'b', NULL) AS m SELECT m[toNullable('a')], m[toNullable('b')], m[toNullable('c')]; +WITH map('a', 2, 'b', NULL) AS m SELECT m[materialize(toNullable('a'))], m[materialize(toNullable('b'))], m[materialize(toNullable('c'))]; +WITH materialize(map('a', 2, 'b', NULL)) AS m SELECT m[toNullable('a')], m[toNullable('b')], m[toNullable('c')]; +WITH materialize(map('a', 2, 'b', NULL)) AS m SELECT m[materialize(toNullable('a'))], m[materialize(toNullable('b'))], m[materialize(toNullable('c'))]; + +WITH map(1, 2, 3, NULL) AS m SELECT m[1], m[2], m[3]; +WITH map(1, 2, 3, NULL) AS m SELECT m[materialize(1)], m[materialize(2)], m[materialize(3)]; +WITH materialize(map(1, 2, 3, NULL)) AS m SELECT m[1], m[2], m[3]; +WITH materialize(map(1, 2, 3, NULL)) AS m SELECT m[materialize(1)], m[materialize(2)], m[materialize(3)]; + +WITH map('a', 2, 'b', NULL) AS m SELECT m['a'], m['b'], m['c']; +WITH map('a', 2, 'b', NULL) AS m SELECT m[materialize('a')], m[materialize('b')], m[materialize('c')]; +WITH materialize(map('a', 2, 'b', NULL)) AS m SELECT m['a'], m['b'], m['c']; +WITH materialize(map('a', 2, 'b', NULL)) AS m SELECT m[materialize('a')], m[materialize('b')], m[materialize('c')]; diff --git a/parser/testdata/02473_prewhere_with_bigint/ast.json b/parser/testdata/02473_prewhere_with_bigint/ast.json new file mode 100644 index 000000000..29f8d16eb --- /dev/null +++ b/parser/testdata/02473_prewhere_with_bigint/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery prewhere_int128 (children 1)" + }, + { + "explain": " Identifier prewhere_int128" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001419085, + "rows_read": 2, + "bytes_read": 82 + } +} diff --git a/parser/testdata/02473_prewhere_with_bigint/metadata.json b/parser/testdata/02473_prewhere_with_bigint/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02473_prewhere_with_bigint/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02473_prewhere_with_bigint/query.sql b/parser/testdata/02473_prewhere_with_bigint/query.sql new file mode 100644 index 000000000..ef1ec4904 --- /dev/null +++ b/parser/testdata/02473_prewhere_with_bigint/query.sql @@ -0,0 +1,24 @@ +DROP TABLE IF EXISTS prewhere_int128; +DROP TABLE IF EXISTS prewhere_int256; +DROP TABLE IF EXISTS prewhere_uint128; +DROP TABLE IF EXISTS prewhere_uint256; + +CREATE TABLE prewhere_int128 (a Int128) ENGINE=MergeTree ORDER BY a; +INSERT INTO prewhere_int128 VALUES (1); +SELECT a FROM prewhere_int128 PREWHERE a; -- { serverError ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER } +DROP TABLE prewhere_int128; + +CREATE TABLE prewhere_int256 (a Int256) ENGINE=MergeTree ORDER BY a; +INSERT INTO prewhere_int256 VALUES (1); +SELECT a FROM prewhere_int256 PREWHERE a; -- { serverError ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER } +DROP TABLE prewhere_int256; + +CREATE TABLE prewhere_uint128 (a UInt128) ENGINE=MergeTree ORDER BY a; +INSERT INTO prewhere_uint128 VALUES (1); +SELECT a FROM prewhere_uint128 PREWHERE a; -- { serverError ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER } +DROP TABLE prewhere_uint128; + +CREATE TABLE prewhere_uint256 (a UInt256) ENGINE=MergeTree ORDER BY a; +INSERT INTO prewhere_uint256 VALUES (1); +SELECT a FROM prewhere_uint256 PREWHERE a; -- { serverError ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER } +DROP TABLE prewhere_uint256; diff --git a/parser/testdata/02474_analyzer_subqueries_table_expression_modifiers/ast.json b/parser/testdata/02474_analyzer_subqueries_table_expression_modifiers/ast.json new file mode 100644 index 000000000..6de77a225 --- /dev/null +++ b/parser/testdata/02474_analyzer_subqueries_table_expression_modifiers/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001489643, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02474_analyzer_subqueries_table_expression_modifiers/metadata.json b/parser/testdata/02474_analyzer_subqueries_table_expression_modifiers/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02474_analyzer_subqueries_table_expression_modifiers/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02474_analyzer_subqueries_table_expression_modifiers/query.sql b/parser/testdata/02474_analyzer_subqueries_table_expression_modifiers/query.sql new file mode 100644 index 000000000..e86c86704 --- /dev/null +++ b/parser/testdata/02474_analyzer_subqueries_table_expression_modifiers/query.sql @@ -0,0 +1,17 @@ +SET enable_analyzer = 1; + +SELECT * FROM (SELECT 1) FINAL; -- { serverError UNSUPPORTED_METHOD } +SELECT * FROM (SELECT 1) SAMPLE 1/2; -- { serverError UNSUPPORTED_METHOD } +SELECT * FROM (SELECT 1) FINAL SAMPLE 1/2; -- { serverError UNSUPPORTED_METHOD } + +WITH cte_subquery AS (SELECT 1) SELECT * FROM cte_subquery FINAL; -- { serverError UNSUPPORTED_METHOD } +WITH cte_subquery AS (SELECT 1) SELECT * FROM cte_subquery SAMPLE 1/2; -- { serverError UNSUPPORTED_METHOD } +WITH cte_subquery AS (SELECT 1) SELECT * FROM cte_subquery FINAL SAMPLE 1/2; -- { serverError UNSUPPORTED_METHOD } + +SELECT * FROM (SELECT 1 UNION ALL SELECT 1) FINAL; -- { serverError UNSUPPORTED_METHOD } +SELECT * FROM (SELECT 1 UNION ALL SELECT 1) SAMPLE 1/2; -- { serverError UNSUPPORTED_METHOD } +SELECT * FROM (SELECT 1 UNION ALL SELECT 1) FINAL SAMPLE 1/2; -- { serverError UNSUPPORTED_METHOD } + +WITH cte_subquery AS (SELECT 1 UNION ALL SELECT 1) SELECT * FROM cte_subquery FINAL; -- { serverError UNSUPPORTED_METHOD } +WITH cte_subquery AS (SELECT 1 UNION ALL SELECT 1) SELECT * FROM cte_subquery SAMPLE 1/2; -- { serverError UNSUPPORTED_METHOD } +WITH cte_subquery AS (SELECT 1 UNION ALL SELECT 1) SELECT * FROM cte_subquery FINAL SAMPLE 1/2; -- { serverError UNSUPPORTED_METHOD } diff --git a/parser/testdata/02474_create_user_query_fuzzer_bug/ast.json b/parser/testdata/02474_create_user_query_fuzzer_bug/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02474_create_user_query_fuzzer_bug/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02474_create_user_query_fuzzer_bug/metadata.json b/parser/testdata/02474_create_user_query_fuzzer_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02474_create_user_query_fuzzer_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02474_create_user_query_fuzzer_bug/query.sql b/parser/testdata/02474_create_user_query_fuzzer_bug/query.sql new file mode 100644 index 000000000..3ef1469cf --- /dev/null +++ b/parser/testdata/02474_create_user_query_fuzzer_bug/query.sql @@ -0,0 +1 @@ +EXPLAIN AST ALTER user WITH a; -- { clientError SYNTAX_ERROR } diff --git a/parser/testdata/02474_extract_fixedstring_from_json/ast.json b/parser/testdata/02474_extract_fixedstring_from_json/ast.json new file mode 100644 index 000000000..eff5adbaa --- /dev/null +++ b/parser/testdata/02474_extract_fixedstring_from_json/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function JSONExtract (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '{\"a\": 123456}'" + }, + { + "explain": " Literal 'FixedString(11)'" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001177908, + "rows_read": 8, + "bytes_read": 310 + } +} diff --git a/parser/testdata/02474_extract_fixedstring_from_json/metadata.json b/parser/testdata/02474_extract_fixedstring_from_json/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02474_extract_fixedstring_from_json/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02474_extract_fixedstring_from_json/query.sql b/parser/testdata/02474_extract_fixedstring_from_json/query.sql new file mode 100644 index 000000000..bbb9f5506 --- /dev/null +++ b/parser/testdata/02474_extract_fixedstring_from_json/query.sql @@ -0,0 +1,15 @@ +SELECT JSONExtract('{"a": 123456}', 'FixedString(11)'); +SELECT JSONExtract('{"a": 123456}', 'FixedString(12)'); +SELECT JSONExtract('{"a": "123456"}', 'a', 'FixedString(5)'); +SELECT JSONExtract('{"a": "123456"}', 'a', 'FixedString(6)'); +SELECT JSONExtract('{"a": 123456}', 'a', 'FixedString(5)'); +SELECT JSONExtract('{"a": 123456}', 'a', 'FixedString(6)'); +SELECT JSONExtract(materialize('{"a": 131231}'), 'a', 'LowCardinality(FixedString(5))') FROM numbers(2); +SELECT JSONExtract(materialize('{"a": 131231}'), 'a', 'LowCardinality(FixedString(6))') FROM numbers(2); +SELECT JSONExtract(materialize('{"a": 131231, "b": 1234}'), 'b', 'LowCardinality(FixedString(4))'); +SELECT JSONExtract(materialize('{"a": 131231, "b": "1234"}'), 'b', 'LowCardinality(FixedString(4))'); +SELECT JSONExtract(materialize('{"a": {"b": 131231} }'), 'a', 'LowCardinality(FixedString(12))'); +SELECT JSONExtract(materialize('{"a": 131231, "b": 1234567890}'), 'b', 'LowCardinality(FixedString(4))'); +SELECT JSONExtract(materialize('{"a": 131231, "b": 1234567890}'), 'b', 'LowCardinality(FixedString(10))'); +SELECT JSONExtract(materialize('{"a": 18446744073709551615}'), 'a', 'LowCardinality(FixedString(20))'); +SELECT JSONExtract(materialize('{"a": -9223372036854775807}'), 'a', 'LowCardinality(FixedString(20))'); diff --git a/parser/testdata/02474_fix_function_parser_bug/ast.json b/parser/testdata/02474_fix_function_parser_bug/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02474_fix_function_parser_bug/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02474_fix_function_parser_bug/metadata.json b/parser/testdata/02474_fix_function_parser_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02474_fix_function_parser_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02474_fix_function_parser_bug/query.sql b/parser/testdata/02474_fix_function_parser_bug/query.sql new file mode 100644 index 000000000..67d97aa1c --- /dev/null +++ b/parser/testdata/02474_fix_function_parser_bug/query.sql @@ -0,0 +1 @@ +CREATE DATABASE conv_mian ENGINE QALL(COLUMNS('|T.D'),¸mp} -- { clientError SYNTAX_ERROR } diff --git a/parser/testdata/02474_timeDiff_UTCTimestamp/ast.json b/parser/testdata/02474_timeDiff_UTCTimestamp/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02474_timeDiff_UTCTimestamp/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02474_timeDiff_UTCTimestamp/metadata.json b/parser/testdata/02474_timeDiff_UTCTimestamp/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02474_timeDiff_UTCTimestamp/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02474_timeDiff_UTCTimestamp/query.sql b/parser/testdata/02474_timeDiff_UTCTimestamp/query.sql new file mode 100644 index 000000000..231bebc15 --- /dev/null +++ b/parser/testdata/02474_timeDiff_UTCTimestamp/query.sql @@ -0,0 +1,12 @@ +-- all tests should be equal to zero as timediff is same as dateDiff('second', ... ) +SELECT dateDiff('second', toDate32('1927-01-01'), toDate32('1927-01-02')) - timeDiff(toDate32('1927-01-01'), toDate32('1927-01-02')) <= 2; +SELECT dateDiff('second', toDate32('1927-01-01'), toDateTime64('1927-01-02 00:00:00', 3)) - timeDiff(toDate32('1927-01-01'), toDateTime64('1927-01-02 00:00:00', 3)) <= 2; +SELECT dateDiff('second', toDateTime64('1927-01-01 00:00:00', 3), toDate32('1927-01-02')) - timeDiff(toDateTime64('1927-01-01 00:00:00', 3), toDate32('1927-01-02')) <= 2; +SELECT dateDiff('second', toDate32('2015-08-18'), toDateTime('2015-08-19 00:00:00')) - timeDiff(toDate32('2015-08-18'), toDateTime('2015-08-19 00:00:00')) <= 2; +SELECT dateDiff('second', toDateTime('2015-08-18 00:00:00'), toDate32('2015-08-19')) - timeDiff(toDateTime('2015-08-18 00:00:00'), toDate32('2015-08-19')) <= 2; +SELECT dateDiff('second', toDate32('2015-08-18'), toDate('2015-08-19')) - timeDiff(toDate32('2015-08-18'), toDate('2015-08-19')) <= 2; +SELECT dateDiff('second', toDate('2015-08-18'), toDate32('2015-08-19')) - timeDiff(toDate('2015-08-18'), toDate32('2015-08-19')) <= 2; + +-- UTCTimestamp equals to now('UTC') +SELECT dateDiff('s', UTCTimestamp(), now('UTC')) <= 2; +SELECT timeDiff(UTCTimestamp(), now('UTC')) <= 2; diff --git a/parser/testdata/02474_unhex_in_fix_string/ast.json b/parser/testdata/02474_unhex_in_fix_string/ast.json new file mode 100644 index 000000000..0139b9005 --- /dev/null +++ b/parser/testdata/02474_unhex_in_fix_string/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery unhex_in_fix_string_table (children 1)" + }, + { + "explain": " Identifier unhex_in_fix_string_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001632567, + "rows_read": 2, + "bytes_read": 102 + } +} diff --git a/parser/testdata/02474_unhex_in_fix_string/metadata.json b/parser/testdata/02474_unhex_in_fix_string/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02474_unhex_in_fix_string/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02474_unhex_in_fix_string/query.sql b/parser/testdata/02474_unhex_in_fix_string/query.sql new file mode 100644 index 000000000..288336aa4 --- /dev/null +++ b/parser/testdata/02474_unhex_in_fix_string/query.sql @@ -0,0 +1,4 @@ +drop table if exists unhex_in_fix_string_table; +create table unhex_in_fix_string_table ( dt Date, s1 FixedString(20), s2 String) engine=MergeTree partition by dt order by tuple(); +insert into unhex_in_fix_string_table values(today(), '436C69636B486F757365', '436C69636B486F757365'); +select unhex(s1), unhex(s2) from unhex_in_fix_string_table; diff --git a/parser/testdata/02475_analysis_of_variance/ast.json b/parser/testdata/02475_analysis_of_variance/ast.json new file mode 100644 index 000000000..25c81333e --- /dev/null +++ b/parser/testdata/02475_analysis_of_variance/ast.json @@ -0,0 +1,76 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function analysisOfVariance (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Identifier Null" + } + ], + + "rows": 18, + + "statistics": + { + "elapsed": 0.001509364, + "rows_read": 18, + "bytes_read": 698 + } +} diff --git a/parser/testdata/02475_analysis_of_variance/metadata.json b/parser/testdata/02475_analysis_of_variance/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02475_analysis_of_variance/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02475_analysis_of_variance/query.sql b/parser/testdata/02475_analysis_of_variance/query.sql new file mode 100644 index 000000000..0a11afc86 --- /dev/null +++ b/parser/testdata/02475_analysis_of_variance/query.sql @@ -0,0 +1,9 @@ +SELECT analysisOfVariance(number, number % 2) FROM numbers(10) FORMAT Null; +SELECT analysisOfVariance(number :: Decimal32(5), number % 2) FROM numbers(10) FORMAT Null; +SELECT analysisOfVariance(number :: Decimal256(5), number % 2) FROM numbers(10) FORMAT Null; + +SELECT analysisOfVariance(1.11, -20); -- { serverError BAD_ARGUMENTS } +SELECT analysisOfVariance(1.11, 20 :: UInt128); -- { serverError BAD_ARGUMENTS } +SELECT analysisOfVariance(1.11, 9000000000000000); -- { serverError BAD_ARGUMENTS } + +SELECT analysisOfVariance(number, number % 2), analysisOfVariance(100000000000000000000., number % 65535) FROM numbers(1048575); diff --git a/parser/testdata/02475_analyzer_join_tree_subquery/ast.json b/parser/testdata/02475_analyzer_join_tree_subquery/ast.json new file mode 100644 index 000000000..487faa400 --- /dev/null +++ b/parser/testdata/02475_analyzer_join_tree_subquery/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001288355, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02475_analyzer_join_tree_subquery/metadata.json b/parser/testdata/02475_analyzer_join_tree_subquery/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02475_analyzer_join_tree_subquery/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02475_analyzer_join_tree_subquery/query.sql b/parser/testdata/02475_analyzer_join_tree_subquery/query.sql new file mode 100644 index 000000000..c9e7ac191 --- /dev/null +++ b/parser/testdata/02475_analyzer_join_tree_subquery/query.sql @@ -0,0 +1,7 @@ +SET enable_analyzer = 1; + +WITH subquery AS (SELECT sum(number) FROM numbers(10)) SELECT * FROM subquery; + +SELECT '--'; + +WITH subquery AS (SELECT sum(number) FROM numbers(10)) SELECT (SELECT * FROM subquery); diff --git a/parser/testdata/02475_analyzer_subquery_compound_expression/ast.json b/parser/testdata/02475_analyzer_subquery_compound_expression/ast.json new file mode 100644 index 000000000..a308e2a25 --- /dev/null +++ b/parser/testdata/02475_analyzer_subquery_compound_expression/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001314743, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02475_analyzer_subquery_compound_expression/metadata.json b/parser/testdata/02475_analyzer_subquery_compound_expression/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02475_analyzer_subquery_compound_expression/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02475_analyzer_subquery_compound_expression/query.sql b/parser/testdata/02475_analyzer_subquery_compound_expression/query.sql new file mode 100644 index 000000000..fc9e9d44b --- /dev/null +++ b/parser/testdata/02475_analyzer_subquery_compound_expression/query.sql @@ -0,0 +1,7 @@ +SET enable_analyzer=1; + +SELECT cast(tuple(1, 2), 'Tuple(value_1 UInt64, value_2 UInt64)') AS value, value.value_1, value.value_2; + +SELECT '--'; + +SELECT value.value_1, value.value_2 FROM (SELECT cast(tuple(1, 2), 'Tuple(value_1 UInt64, value_2 UInt64)') AS value); diff --git a/parser/testdata/02475_bad_cast_low_cardinality_to_string_bug/ast.json b/parser/testdata/02475_bad_cast_low_cardinality_to_string_bug/ast.json new file mode 100644 index 000000000..6331548e7 --- /dev/null +++ b/parser/testdata/02475_bad_cast_low_cardinality_to_string_bug/ast.json @@ -0,0 +1,121 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function if (alias res) (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Function extract (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'aaaaaa'" + }, + { + "explain": " Literal 'LowCardinality(String)'" + }, + { + "explain": " Literal '\\\\w'" + }, + { + "explain": " Function extract (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'bbbbb'" + }, + { + "explain": " Literal 'LowCardinality(String)'" + }, + { + "explain": " Literal '\\\\w*'" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2" + } + ], + + "rows": 33, + + "statistics": + { + "elapsed": 0.001336042, + "rows_read": 33, + "bytes_read": 1380 + } +} diff --git a/parser/testdata/02475_bad_cast_low_cardinality_to_string_bug/metadata.json b/parser/testdata/02475_bad_cast_low_cardinality_to_string_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02475_bad_cast_low_cardinality_to_string_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02475_bad_cast_low_cardinality_to_string_bug/query.sql b/parser/testdata/02475_bad_cast_low_cardinality_to_string_bug/query.sql new file mode 100644 index 000000000..3b2abfb3c --- /dev/null +++ b/parser/testdata/02475_bad_cast_low_cardinality_to_string_bug/query.sql @@ -0,0 +1 @@ +SELECT if(materialize(0), extract(materialize(CAST('aaaaaa', 'LowCardinality(String)')), '\\w'), extract(materialize(CAST('bbbbb', 'LowCardinality(String)')), '\\w*')) AS res FROM numbers(2); diff --git a/parser/testdata/02475_date_time_schema_inference_bug/ast.json b/parser/testdata/02475_date_time_schema_inference_bug/ast.json new file mode 100644 index 000000000..30749ae13 --- /dev/null +++ b/parser/testdata/02475_date_time_schema_inference_bug/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function format (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier CSV" + }, + { + "explain": " Literal '\"\"'" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001118256, + "rows_read": 12, + "bytes_read": 446 + } +} diff --git a/parser/testdata/02475_date_time_schema_inference_bug/metadata.json b/parser/testdata/02475_date_time_schema_inference_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02475_date_time_schema_inference_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02475_date_time_schema_inference_bug/query.sql b/parser/testdata/02475_date_time_schema_inference_bug/query.sql new file mode 100644 index 000000000..1aea4a802 --- /dev/null +++ b/parser/testdata/02475_date_time_schema_inference_bug/query.sql @@ -0,0 +1 @@ +select * from format(CSV, '""'); diff --git a/parser/testdata/02475_join_bug_42832/ast.json b/parser/testdata/02475_join_bug_42832/ast.json new file mode 100644 index 000000000..a36b57c5d --- /dev/null +++ b/parser/testdata/02475_join_bug_42832/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tab1 (children 1)" + }, + { + "explain": " Identifier tab1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00110136, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/02475_join_bug_42832/metadata.json b/parser/testdata/02475_join_bug_42832/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02475_join_bug_42832/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02475_join_bug_42832/query.sql b/parser/testdata/02475_join_bug_42832/query.sql new file mode 100644 index 000000000..e383949fb --- /dev/null +++ b/parser/testdata/02475_join_bug_42832/query.sql @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS tab1; +DROP TABLE IF EXISTS tab2; + +SET allow_suspicious_low_cardinality_types = 1; + +CREATE TABLE tab1 (a1 Int32, b1 Int32, val UInt64) ENGINE = MergeTree ORDER BY a1; +CREATE TABLE tab2 (a2 LowCardinality(Int32), b2 Int32) ENGINE = MergeTree ORDER BY a2; + +INSERT INTO tab1 SELECT number, number, 1 from numbers(4); +INSERT INTO tab2 SELECT number + 2, number + 2 from numbers(4); + +SELECT sum(val), count(val) FROM tab1 FULL OUTER JOIN tab2 ON b1 - 2 = a2 OR a1 = b2 SETTINGS join_use_nulls = 0; +SELECT sum(val), count(val) FROM tab1 FULL OUTER JOIN tab2 ON b1 - 2 = a2 OR a1 = b2 SETTINGS join_use_nulls = 1; + +DROP TABLE IF EXISTS tab1; +DROP TABLE IF EXISTS tab2; diff --git a/parser/testdata/02475_or_function_alias_and_const_where/ast.json b/parser/testdata/02475_or_function_alias_and_const_where/ast.json new file mode 100644 index 000000000..3234a8544 --- /dev/null +++ b/parser/testdata/02475_or_function_alias_and_const_where/ast.json @@ -0,0 +1,97 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function and (alias value) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function sum (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier value" + }, + { + "explain": " WindowDefinition" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 25, + + "statistics": + { + "elapsed": 0.001142496, + "rows_read": 25, + "bytes_read": 953 + } +} diff --git a/parser/testdata/02475_or_function_alias_and_const_where/metadata.json b/parser/testdata/02475_or_function_alias_and_const_where/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02475_or_function_alias_and_const_where/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02475_or_function_alias_and_const_where/query.sql b/parser/testdata/02475_or_function_alias_and_const_where/query.sql new file mode 100644 index 000000000..53b97a1dd --- /dev/null +++ b/parser/testdata/02475_or_function_alias_and_const_where/query.sql @@ -0,0 +1,2 @@ +SELECT (number = 1) AND (number = 2) AS value, sum(value) OVER () FROM numbers(1) WHERE 1; +SELECT (number = 1) AND (number = 2) AS value, sum(value) OVER () FROM numbers(1) WHERE 1 SETTINGS enable_analyzer=1; diff --git a/parser/testdata/02475_positive_modulo/ast.json b/parser/testdata/02475_positive_modulo/ast.json new file mode 100644 index 000000000..8417b551a --- /dev/null +++ b/parser/testdata/02475_positive_modulo/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function positive_modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1000" + }, + { + "explain": " Literal UInt64_32" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001129084, + "rows_read": 8, + "bytes_read": 302 + } +} diff --git a/parser/testdata/02475_positive_modulo/metadata.json b/parser/testdata/02475_positive_modulo/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02475_positive_modulo/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02475_positive_modulo/query.sql b/parser/testdata/02475_positive_modulo/query.sql new file mode 100644 index 000000000..5f1fdad3c --- /dev/null +++ b/parser/testdata/02475_positive_modulo/query.sql @@ -0,0 +1,4 @@ +SELECT positive_modulo(1000, 32); +SELECT positive_modulo(1000, -32); +SELECT positive_modulo(-1000, -32); +SELECT positive_modulo(-1000, 32); \ No newline at end of file diff --git a/parser/testdata/02475_precise_decimal_arithmetics/ast.json b/parser/testdata/02475_precise_decimal_arithmetics/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02475_precise_decimal_arithmetics/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02475_precise_decimal_arithmetics/metadata.json b/parser/testdata/02475_precise_decimal_arithmetics/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02475_precise_decimal_arithmetics/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02475_precise_decimal_arithmetics/query.sql b/parser/testdata/02475_precise_decimal_arithmetics/query.sql new file mode 100644 index 000000000..435b72c01 --- /dev/null +++ b/parser/testdata/02475_precise_decimal_arithmetics/query.sql @@ -0,0 +1,45 @@ +-- Tags: no-fasttest + +-- check cases when one of operands is zero +SELECT divideDecimal(toDecimal32(0, 2), toDecimal128(11.123456, 6)); +SELECT divideDecimal(toDecimal64(123.123, 3), toDecimal64(0, 1)); -- { serverError ILLEGAL_DIVISION } +SELECT multiplyDecimal(toDecimal32(0, 2), toDecimal128(11.123456, 6)); +SELECT multiplyDecimal(toDecimal32(123.123, 3), toDecimal128(0, 1)); + +-- don't look at strange query result -- it happens due to bad float precision: toUInt256(1e38) == 99999999999999997752612184630461283328 +SELECT multiplyDecimal(toDecimal256(1e38, 0), toDecimal256(1e38, 0)); +SELECT divideDecimal(toDecimal256(1e66, 0), toDecimal256(1e-10, 10), 0); + +-- fits Decimal256, but scale is too big to fit +SELECT multiplyDecimal(toDecimal256(1e38, 0), toDecimal256(1e38, 0), 2); -- { serverError DECIMAL_OVERFLOW } +SELECT divideDecimal(toDecimal256(1e72, 0), toDecimal256(1e-5, 5), 2); -- { serverError DECIMAL_OVERFLOW } + +-- does not fit Decimal256 +SELECT multiplyDecimal(toDecimal256('1e38', 0), toDecimal256('1e38', 0)); -- { serverError DECIMAL_OVERFLOW } +SELECT multiplyDecimal(toDecimal256(1e39, 0), toDecimal256(1e39, 0), 0); -- { serverError DECIMAL_OVERFLOW } +SELECT divideDecimal(toDecimal256(1e39, 0), toDecimal256(1e-38, 39)); -- { serverError DECIMAL_OVERFLOW } + +-- test different signs +SELECT divideDecimal(toDecimal128(123.76, 2), toDecimal128(11.123456, 6)); +SELECT divideDecimal(toDecimal32(123.123, 3), toDecimal128(11.4, 1), 2); +SELECT divideDecimal(toDecimal128(-123.76, 2), toDecimal128(11.123456, 6)); +SELECT divideDecimal(toDecimal32(123.123, 3), toDecimal128(-11.4, 1), 2); +SELECT divideDecimal(toDecimal32(-123.123, 3), toDecimal128(-11.4, 1), 2); + +SELECT multiplyDecimal(toDecimal64(123.76, 2), toDecimal128(11.123456, 6)); +SELECT multiplyDecimal(toDecimal32(123.123, 3), toDecimal128(11.4, 1), 2); +SELECT multiplyDecimal(toDecimal64(-123.76, 2), toDecimal128(11.123456, 6)); +SELECT multiplyDecimal(toDecimal32(123.123, 3), toDecimal128(-11.4, 1), 2); +SELECT multiplyDecimal(toDecimal32(-123.123, 3), toDecimal128(-11.4, 1), 2); + +-- check against non-const columns +SELECT sum(multiplyDecimal(toDecimal64(number, 1), toDecimal64(number, 5))) FROM numbers(1000); +SELECT sum(divideDecimal(toDecimal64(number, 1), toDecimal64(number, 5))) FROM (select * from numbers(1000) OFFSET 1); + +-- check against Nullable type +SELECT multiplyDecimal(toNullable(toDecimal64(10, 1)), toDecimal64(100, 5)); +SELECT multiplyDecimal(toDecimal64(10, 1), toNullable(toDecimal64(100, 5))); +SELECT multiplyDecimal(toNullable(toDecimal64(10, 1)), toNullable(toDecimal64(100, 5))); +SELECT divideDecimal(toNullable(toDecimal64(10, 1)), toDecimal64(100, 5)); +SELECT divideDecimal(toDecimal64(10, 1), toNullable(toDecimal64(100, 5))); +SELECT divideDecimal(toNullable(toDecimal64(10, 1)), toNullable(toDecimal64(100, 5))); diff --git a/parser/testdata/02475_split_with_max_substrings/ast.json b/parser/testdata/02475_split_with_max_substrings/ast.json new file mode 100644 index 000000000..58d30f1c1 --- /dev/null +++ b/parser/testdata/02475_split_with_max_substrings/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '-- negative tests'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001162648, + "rows_read": 5, + "bytes_read": 188 + } +} diff --git a/parser/testdata/02475_split_with_max_substrings/metadata.json b/parser/testdata/02475_split_with_max_substrings/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02475_split_with_max_substrings/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02475_split_with_max_substrings/query.sql b/parser/testdata/02475_split_with_max_substrings/query.sql new file mode 100644 index 000000000..e0b7bf0a8 --- /dev/null +++ b/parser/testdata/02475_split_with_max_substrings/query.sql @@ -0,0 +1,175 @@ +SELECT '-- negative tests'; +SELECT splitByChar(',', '1,2,3', ''); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT splitByRegexp('[ABC]', 'oneAtwoBthreeC', ''); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT alphaTokens('abca1abc', ''); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT splitByAlpha('abca1abc', ''); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT splitByNonAlpha(' 1! a, b. ', ''); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT splitByWhitespace(' 1! a, b. ', ''); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT splitByString(', ', '1, 2 3, 4,5, abcde', ''); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT '-- splitByChar'; +SELECT '-- (default)'; +SELECT splitByChar('=', 'a==b=c=d'); +SELECT splitByChar('=', 'a==b=c=d', -1); +SELECT splitByChar('=', 'a==b=c=d', 0); +SELECT splitByChar('=', 'a==b=c=d', 1); +SELECT splitByChar('=', 'a==b=c=d', 2); +SELECT splitByChar('=', 'a==b=c=d', 3); +SELECT splitByChar('=', 'a==b=c=d', 4); +SELECT splitByChar('=', 'a==b=c=d', 5); +SELECT splitByChar('=', 'a==b=c=d', 6); +SELECT '-- (include remainder)'; +SELECT splitByChar('=', 'a==b=c=d') SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByChar('=', 'a==b=c=d', -1) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByChar('=', 'a==b=c=d', 0) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByChar('=', 'a==b=c=d', 1) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByChar('=', 'a==b=c=d', 2) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByChar('=', 'a==b=c=d', 3) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByChar('=', 'a==b=c=d', 4) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByChar('=', 'a==b=c=d', 5) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByChar('=', 'a==b=c=d', 6) SETTINGS splitby_max_substrings_includes_remaining_string = 1; + +SELECT '-- splitByString'; +SELECT '-- (default)'; +SELECT splitByString('', 'a==b=c=d') SETTINGS splitby_max_substrings_includes_remaining_string = 0; +SELECT splitByString('', 'a==b=c=d', -1) SETTINGS splitby_max_substrings_includes_remaining_string = 0; +SELECT splitByString('', 'a==b=c=d', 0) SETTINGS splitby_max_substrings_includes_remaining_string = 0; +SELECT splitByString('', 'a==b=c=d', 1) SETTINGS splitby_max_substrings_includes_remaining_string = 0; +SELECT splitByString('', 'a==b=c=d', 2) SETTINGS splitby_max_substrings_includes_remaining_string = 0; +SELECT splitByString('', 'a==b=c=d', 3) SETTINGS splitby_max_substrings_includes_remaining_string = 0; +SELECT splitByString('', 'a==b=c=d', 4) SETTINGS splitby_max_substrings_includes_remaining_string = 0; +SELECT splitByString('', 'a==b=c=d', 5) SETTINGS splitby_max_substrings_includes_remaining_string = 0; +SELECT splitByString('', 'a==b=c=d', 6) SETTINGS splitby_max_substrings_includes_remaining_string = 0; +SELECT splitByString('', 'a==b=c=d', 7) SETTINGS splitby_max_substrings_includes_remaining_string = 0; +SELECT splitByString('', 'a==b=c=d', 7) SETTINGS splitby_max_substrings_includes_remaining_string = 0; +SELECT splitByString('', 'a==b=c=d', 8) SETTINGS splitby_max_substrings_includes_remaining_string = 0; +SELECT splitByString('', 'a==b=c=d', 9) SETTINGS splitby_max_substrings_includes_remaining_string = 0; +SELECT splitByString('=', 'a==b=c=d') SETTINGS splitby_max_substrings_includes_remaining_string = 0; +SELECT splitByString('=', 'a==b=c=d', -1) SETTINGS splitby_max_substrings_includes_remaining_string = 0; +SELECT splitByString('=', 'a==b=c=d', 0) SETTINGS splitby_max_substrings_includes_remaining_string = 0; +SELECT splitByString('=', 'a==b=c=d', 1) SETTINGS splitby_max_substrings_includes_remaining_string = 0; +SELECT splitByString('=', 'a==b=c=d', 2) SETTINGS splitby_max_substrings_includes_remaining_string = 0; +SELECT splitByString('=', 'a==b=c=d', 3) SETTINGS splitby_max_substrings_includes_remaining_string = 0; +SELECT splitByString('=', 'a==b=c=d', 4) SETTINGS splitby_max_substrings_includes_remaining_string = 0; +SELECT splitByString('=', 'a==b=c=d', 5) SETTINGS splitby_max_substrings_includes_remaining_string = 0; +SELECT splitByString('=', 'a==b=c=d', 6) SETTINGS splitby_max_substrings_includes_remaining_string = 0; +SELECT '-- (include remainder)'; +SELECT splitByString('', 'a==b=c=d') SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByString('', 'a==b=c=d', -1) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByString('', 'a==b=c=d', 0) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByString('', 'a==b=c=d', 1) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByString('', 'a==b=c=d', 2) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByString('', 'a==b=c=d', 3) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByString('', 'a==b=c=d', 4) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByString('', 'a==b=c=d', 5) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByString('', 'a==b=c=d', 6) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByString('', 'a==b=c=d', 7) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByString('', 'a==b=c=d', 8) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByString('', 'a==b=c=d', 9) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByString('=', 'a==b=c=d') SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByString('=', 'a==b=c=d', -1) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByString('=', 'a==b=c=d', 0) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByString('=', 'a==b=c=d', 1) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByString('=', 'a==b=c=d', 2) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByString('=', 'a==b=c=d', 3) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByString('=', 'a==b=c=d', 4) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByString('=', 'a==b=c=d', 5) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByString('=', 'a==b=c=d', 6) SETTINGS splitby_max_substrings_includes_remaining_string = 1; + + +SELECT '-- splitByRegexp'; +SELECT '-- (default)'; +SELECT splitByRegexp('\\d+', 'a12bc23de345f'); +SELECT splitByRegexp('\\d+', 'a12bc23de345f', -1); +SELECT splitByRegexp('\\d+', 'a12bc23de345f', 0); +SELECT splitByRegexp('\\d+', 'a12bc23de345f', 1); +SELECT splitByRegexp('\\d+', 'a12bc23de345f', 2); +SELECT splitByRegexp('\\d+', 'a12bc23de345f', 3); +SELECT splitByRegexp('\\d+', 'a12bc23de345f', 4); +SELECT splitByRegexp('\\d+', 'a12bc23de345f', 5); +SELECT splitByRegexp('', 'a12bc23de345f'); +SELECT splitByRegexp('', 'a12bc23de345f', -1); +SELECT splitByRegexp('', 'a12bc23de345f', 0); +SELECT splitByRegexp('', 'a12bc23de345f', 1); +SELECT splitByRegexp('', 'a12bc23de345f', 2); +SELECT splitByRegexp('', 'a12bc23de345f', 3); +SELECT splitByRegexp('', 'a12bc23de345f', 4); +SELECT splitByRegexp('', 'a12bc23de345f', 5); +SELECT '-- (include remainder)'; +SELECT splitByRegexp('', 'a12bc23de345f') SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByRegexp('', 'a12bc23de345f', -1) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByRegexp('', 'a12bc23de345f', 0) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByRegexp('', 'a12bc23de345f', 1) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByRegexp('', 'a12bc23de345f', 2) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByRegexp('', 'a12bc23de345f', 3) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByRegexp('', 'a12bc23de345f', 4) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByRegexp('', 'a12bc23de345f', 5) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByRegexp('\\d+', 'a12bc23de345f') SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByRegexp('\\d+', 'a12bc23de345f', -1) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByRegexp('\\d+', 'a12bc23de345f', 0) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByRegexp('\\d+', 'a12bc23de345f', 1) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByRegexp('\\d+', 'a12bc23de345f', 2) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByRegexp('\\d+', 'a12bc23de345f', 3) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByRegexp('\\d+', 'a12bc23de345f', 4) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByRegexp('\\d+', 'a12bc23de345f', 5) SETTINGS splitby_max_substrings_includes_remaining_string = 1; + +SELECT '-- splitByAlpha'; +SELECT '-- (default)'; +SELECT splitByAlpha('ab.cd.ef.gh'); +SELECT splitByAlpha('ab.cd.ef.gh', -1); +SELECT splitByAlpha('ab.cd.ef.gh', 0); +SELECT splitByAlpha('ab.cd.ef.gh', 1); +SELECT splitByAlpha('ab.cd.ef.gh', 2); +SELECT splitByAlpha('ab.cd.ef.gh', 3); +SELECT splitByAlpha('ab.cd.ef.gh', 4); +SELECT splitByAlpha('ab.cd.ef.gh', 5); +SELECT '-- (include remainder)'; +SELECT splitByAlpha('ab.cd.ef.gh') SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByAlpha('ab.cd.ef.gh', -1) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByAlpha('ab.cd.ef.gh', 0) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByAlpha('ab.cd.ef.gh', 1) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByAlpha('ab.cd.ef.gh', 2) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByAlpha('ab.cd.ef.gh', 3) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByAlpha('ab.cd.ef.gh', 4) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByAlpha('ab.cd.ef.gh', 5) SETTINGS splitby_max_substrings_includes_remaining_string = 1; + +SELECT '-- splitByNonAlpha'; +SELECT '-- (default)'; +SELECT splitByNonAlpha('128.0.0.1'); +SELECT splitByNonAlpha('128.0.0.1', -1); +SELECT splitByNonAlpha('128.0.0.1', 0); +SELECT splitByNonAlpha('128.0.0.1', 1); +SELECT splitByNonAlpha('128.0.0.1', 2); +SELECT splitByNonAlpha('128.0.0.1', 3); +SELECT splitByNonAlpha('128.0.0.1', 4); +SELECT splitByNonAlpha('128.0.0.1', 5); +SELECT '-- (include remainder)'; +SELECT splitByNonAlpha('128.0.0.1') SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByNonAlpha('128.0.0.1', -1) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByNonAlpha('128.0.0.1', 0) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByNonAlpha('128.0.0.1', 1) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByNonAlpha('128.0.0.1', 2) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByNonAlpha('128.0.0.1', 3) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByNonAlpha('128.0.0.1', 4) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByNonAlpha('128.0.0.1', 5) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +-- +-- +SELECT '-- splitByWhitespace'; +SELECT '-- (default)'; +SELECT splitByWhitespace('Nein, nein, nein! Doch!'); +SELECT splitByWhitespace('Nein, nein, nein! Doch!', -1); +SELECT splitByWhitespace('Nein, nein, nein! Doch!', 0); +SELECT splitByWhitespace('Nein, nein, nein! Doch!', 1); +SELECT splitByWhitespace('Nein, nein, nein! Doch!', 2); +SELECT splitByWhitespace('Nein, nein, nein! Doch!', 3); +SELECT splitByWhitespace('Nein, nein, nein! Doch!', 4); +SELECT splitByWhitespace('Nein, nein, nein! Doch!', 5); +SELECT '-- (include remainder)'; +SELECT splitByWhitespace('Nein, nein, nein! Doch!') SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByWhitespace('Nein, nein, nein! Doch!', -1) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByWhitespace('Nein, nein, nein! Doch!', 0) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByWhitespace('Nein, nein, nein! Doch!', 1) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByWhitespace('Nein, nein, nein! Doch!', 2) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByWhitespace('Nein, nein, nein! Doch!', 3) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByWhitespace('Nein, nein, nein! Doch!', 4) SETTINGS splitby_max_substrings_includes_remaining_string = 1; +SELECT splitByWhitespace('Nein, nein, nein! Doch!', 5) SETTINGS splitby_max_substrings_includes_remaining_string = 1; diff --git a/parser/testdata/02476_analyzer_join_with_unused_columns/ast.json b/parser/testdata/02476_analyzer_join_with_unused_columns/ast.json new file mode 100644 index 000000000..594f7c546 --- /dev/null +++ b/parser/testdata/02476_analyzer_join_with_unused_columns/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000981338, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02476_analyzer_join_with_unused_columns/metadata.json b/parser/testdata/02476_analyzer_join_with_unused_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02476_analyzer_join_with_unused_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02476_analyzer_join_with_unused_columns/query.sql b/parser/testdata/02476_analyzer_join_with_unused_columns/query.sql new file mode 100644 index 000000000..feb6786ff --- /dev/null +++ b/parser/testdata/02476_analyzer_join_with_unused_columns/query.sql @@ -0,0 +1,19 @@ +SET enable_analyzer = 1; + +SELECT subquery_1.id, subquery_2.id FROM (SELECT 1 AS id, 2 AS value) AS subquery_1, (SELECT 3 AS id, 4 AS value) AS subquery_2; + +SELECT '--'; + +SELECT subquery_1.value, subquery_2.value FROM (SELECT 1 AS id, 2 AS value) AS subquery_1, (SELECT 3 AS id, 4 AS value) AS subquery_2; + +SELECT '--'; + +SELECT COLUMNS('id') FROM (SELECT 1 AS id, 2 AS value) AS subquery_1, (SELECT 3 AS id, 4 AS value) AS subquery_2; + +SELECT '--'; + +SELECT COLUMNS('value') FROM (SELECT 1 AS id, 2 AS value) AS subquery_1, (SELECT 3 AS id, 4 AS value) AS subquery_2; + +SELECT '--'; + +SELECT * FROM (SELECT 1 AS id, 2 AS value) AS subquery_1, (SELECT 3 AS id, 4 AS value) AS subquery_2; diff --git a/parser/testdata/02476_fix_cast_parser_bug/ast.json b/parser/testdata/02476_fix_cast_parser_bug/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02476_fix_cast_parser_bug/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02476_fix_cast_parser_bug/metadata.json b/parser/testdata/02476_fix_cast_parser_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02476_fix_cast_parser_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02476_fix_cast_parser_bug/query.sql b/parser/testdata/02476_fix_cast_parser_bug/query.sql new file mode 100644 index 000000000..6b01b3a8c --- /dev/null +++ b/parser/testdata/02476_fix_cast_parser_bug/query.sql @@ -0,0 +1 @@ +SELECT CAST(a, b -> c) ++; -- { clientError SYNTAX_ERROR } diff --git a/parser/testdata/02476_fuse_sum_count/ast.json b/parser/testdata/02476_fuse_sum_count/ast.json new file mode 100644 index 000000000..c2a5f1b43 --- /dev/null +++ b/parser/testdata/02476_fuse_sum_count/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001202271, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02476_fuse_sum_count/metadata.json b/parser/testdata/02476_fuse_sum_count/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02476_fuse_sum_count/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02476_fuse_sum_count/query.sql b/parser/testdata/02476_fuse_sum_count/query.sql new file mode 100644 index 000000000..2319e8144 --- /dev/null +++ b/parser/testdata/02476_fuse_sum_count/query.sql @@ -0,0 +1,38 @@ +SET enable_analyzer = 1; +SET optimize_syntax_fuse_functions = 1; + +DROP TABLE IF EXISTS fuse_tbl; + +CREATE TABLE fuse_tbl(a Nullable(Int8), b Int8) Engine = Log; + +INSERT INTO fuse_tbl VALUES (1, 1), (2, 2), (NULL, 3); + +SELECT avg(a), sum(a) FROM (SELECT a FROM fuse_tbl); +SELECT avg(a), sum(a) FROM (SELECT a FROM fuse_tbl WHERE isNull(a)); +SELECT avg(a), sum(a) FROM (SELECT a FROM fuse_tbl WHERE isNotNull(a)); + +SELECT avg(b), sum(b) FROM (SELECT b FROM fuse_tbl); +SELECT avg(b) * 3, sum(b) + 1 + count(b), count(b) * count(b), count() FROM (SELECT b FROM fuse_tbl); + +SELECT sum(b), count(b) from (SELECT x as b FROM (SELECT sum(b) as x, count(b) FROM fuse_tbl) ); + +SELECT sum(a + 1), sum(b), count(b), avg(b), count(a + 1), sum(a + 2), count(a) from fuse_tbl SETTINGS optimize_syntax_fuse_functions = 0; +SELECT sum(a + 1), sum(b), count(b), avg(b), count(a + 1), sum(a + 2), count(a) from fuse_tbl; + +EXPLAIN QUERY TREE run_passes = 1 SELECT sum(a), avg(a) from fuse_tbl; +EXPLAIN QUERY TREE run_passes = 1 SELECT sum(b), avg(b) from fuse_tbl; +EXPLAIN QUERY TREE run_passes = 1 SELECT sum(a + 1), sum(b), count(b), avg(b), count(a + 1), sum(a + 2), count(a) from fuse_tbl; +EXPLAIN QUERY TREE run_passes = 1 SELECT avg(b) * 3, sum(b) + 1 + count(b), count(b) * count(b) FROM (SELECT b FROM fuse_tbl); + +EXPLAIN QUERY TREE run_passes = 1 SELECT sum(b), count(b) from (SELECT x as b FROM (SELECT sum(b) as x, count(b) FROM fuse_tbl) ); + +SELECT sum(x), count(x), avg(x) FROM (SELECT number :: Decimal32(0) AS x FROM numbers(0)) SETTINGS optimize_syntax_fuse_functions = 0; +SELECT sum(x), count(x), avg(x) FROM (SELECT number :: Decimal32(0) AS x FROM numbers(0)); + +SELECT sum(x), count(x), avg(x), toTypeName(sum(x)), toTypeName(count(x)), toTypeName(avg(x)) FROM (SELECT number :: Decimal32(0) AS x FROM numbers(10)) SETTINGS optimize_syntax_fuse_functions = 0; +SELECT sum(x), count(x), avg(x), toTypeName(sum(x)), toTypeName(count(x)), toTypeName(avg(x)) FROM (SELECT number :: Decimal32(0) AS x FROM numbers(10)); + +-- TODO: uncomment after https://github.com/ClickHouse/ClickHouse/pull/43372 +-- SELECT avg(b), x - 2 AS b FROM (SELECT number as x FROM numbers(1)) GROUP BY x; + +DROP TABLE fuse_tbl; diff --git a/parser/testdata/02476_query_parameters_insert/ast.json b/parser/testdata/02476_query_parameters_insert/ast.json new file mode 100644 index 000000000..228070caa --- /dev/null +++ b/parser/testdata/02476_query_parameters_insert/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery 02476_query_parameters_insert (children 1)" + }, + { + "explain": " Identifier 02476_query_parameters_insert" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001377192, + "rows_read": 2, + "bytes_read": 110 + } +} diff --git a/parser/testdata/02476_query_parameters_insert/metadata.json b/parser/testdata/02476_query_parameters_insert/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02476_query_parameters_insert/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02476_query_parameters_insert/query.sql b/parser/testdata/02476_query_parameters_insert/query.sql new file mode 100644 index 000000000..de866ccbc --- /dev/null +++ b/parser/testdata/02476_query_parameters_insert/query.sql @@ -0,0 +1,8 @@ +DROP TABLE IF EXISTS 02476_query_parameters_insert; +CREATE TABLE 02476_query_parameters_insert (x Int32) ENGINE=MergeTree() ORDER BY tuple(); + +SET param_x = 1; +INSERT INTO 02476_query_parameters_insert VALUES ({x: Int32}); +SELECT * FROM 02476_query_parameters_insert; + +DROP TABLE 02476_query_parameters_insert; diff --git a/parser/testdata/02476_query_parameters_without_serialisation/ast.json b/parser/testdata/02476_query_parameters_without_serialisation/ast.json new file mode 100644 index 000000000..015d92d98 --- /dev/null +++ b/parser/testdata/02476_query_parameters_without_serialisation/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00113271, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02476_query_parameters_without_serialisation/metadata.json b/parser/testdata/02476_query_parameters_without_serialisation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02476_query_parameters_without_serialisation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02476_query_parameters_without_serialisation/query.sql b/parser/testdata/02476_query_parameters_without_serialisation/query.sql new file mode 100644 index 000000000..ca62e44c6 --- /dev/null +++ b/parser/testdata/02476_query_parameters_without_serialisation/query.sql @@ -0,0 +1,29 @@ +SET param_num=42; +SET param_str='hello'; +SET param_date='2022-08-04 18:30:53'; +SET param_map={'2b95a497-3a5d-49af-bf85-15763318cde7': [1.2, 3.4]}; +SELECT {num:UInt64}, {str:String}, {date:DateTime}, {map:Map(UUID, Array(Float32))}; +SELECT toTypeName({num:UInt64}), toTypeName({str:String}), toTypeName({date:DateTime}), toTypeName({map:Map(UUID, Array(Float32))}); + +SET param_id=42; +SET param_arr=[1, 2, 3]; +SET param_map_2={'abc': 22, 'def': 33}; +SET param_mul_arr=[[4, 5, 6], [7], [8, 9]]; +SET param_map_arr={10: [11, 12], 13: [14, 15]}; +SET param_map_map_arr={'ghj': {'klm': [16, 17]}, 'nop': {'rst': [18]}}; +SELECT {id: Int64}, {arr: Array(UInt8)}, {map_2: Map(String, UInt8)}, {mul_arr: Array(Array(UInt8))}, {map_arr: Map(UInt8, Array(UInt8))}, {map_map_arr: Map(String, Map(String, Array(UInt8)))}; +SELECT toTypeName({id: Int64}), toTypeName({arr: Array(UInt8)}), toTypeName({map_2: Map(String, UInt8)}), toTypeName({mul_arr: Array(Array(UInt8))}), toTypeName({map_arr: Map(UInt8, Array(UInt8))}), toTypeName({map_map_arr: Map(String, Map(String, Array(UInt8)))}); + +SET param_tbl=numbers; +SET param_db=system; +SET param_col=number; +SELECT {col:Identifier} FROM {db:Identifier}.{tbl:Identifier} LIMIT 1 OFFSET 5; + +SET param_arr_arr_arr=[[['a', 'b', 'c'], ['d', 'e', 'f']], [['g', 'h', 'i'], ['j', 'k', 'l']]]; +SET param_tuple_tuple_tuple=(((1, 'a', '2b95a497-3a5d-49af-bf85-15763318cde7', 3.14))); +SET param_arr_map_tuple=[{1:(2, '2022-08-04 18:30:53', 's'), 3:(4, '2020-08-04 18:30:53', 't')}]; +SET param_map_arr_tuple_map={'a':[(1,{10:1, 20:2}),(2, {30:3, 40:4})], 'b':[(3, {50:5, 60:6}),(4, {70:7, 80:8})]}; +SELECT {arr_arr_arr: Array(Array(Array(String)))}, toTypeName({arr_arr_arr: Array(Array(Array(String)))}); +SELECT {tuple_tuple_tuple: Tuple(Tuple(Tuple(Int32, String, UUID, Float32)))}, toTypeName({tuple_tuple_tuple: Tuple(Tuple(Tuple(Int32, String, UUID, Float32)))}); +SELECT {arr_map_tuple: Array(Map(UInt64, Tuple(Int16, DateTime, String)))}, toTypeName({arr_map_tuple: Array(Map(UInt64, Tuple(Int16, DateTime, String)))}); +SELECT {map_arr_tuple_map: Map(String, Array(Tuple(UInt8, Map(UInt32, Int64))))}, toTypeName({map_arr_tuple_map: Map(String, Array(Tuple(UInt8, Map(UInt32, Int64))))}); diff --git a/parser/testdata/02477_age/ast.json b/parser/testdata/02477_age/ast.json new file mode 100644 index 000000000..4ec74a678 --- /dev/null +++ b/parser/testdata/02477_age/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'Various intervals'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001201007, + "rows_read": 5, + "bytes_read": 188 + } +} diff --git a/parser/testdata/02477_age/metadata.json b/parser/testdata/02477_age/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02477_age/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02477_age/query.sql b/parser/testdata/02477_age/query.sql new file mode 100644 index 000000000..72a692f61 --- /dev/null +++ b/parser/testdata/02477_age/query.sql @@ -0,0 +1,82 @@ +SELECT 'Various intervals'; + +SELECT age('year', toDate('2017-12-31'), toDate('2016-01-01')); +SELECT age('year', toDate('2017-12-31'), toDate('2017-01-01')); +SELECT age('year', toDate('2017-12-31'), toDate('2018-01-01')); +SELECT age('quarter', toDate('2017-12-31'), toDate('2016-01-01')); +SELECT age('quarter', toDate('2017-12-31'), toDate('2017-01-01')); +SELECT age('quarter', toDate('2017-12-31'), toDate('2018-01-01')); +SELECT age('month', toDate('2017-12-31'), toDate('2016-01-01')); +SELECT age('month', toDate('2017-12-31'), toDate('2017-01-01')); +SELECT age('month', toDate('2017-12-31'), toDate('2018-01-01')); +SELECT age('week', toDate('2017-12-31'), toDate('2016-01-01')); +SELECT age('week', toDate('2017-12-31'), toDate('2017-01-01')); +SELECT age('week', toDate('2017-12-31'), toDate('2018-01-01')); +SELECT age('day', toDate('2017-12-31'), toDate('2016-01-01')); +SELECT age('day', toDate('2017-12-31'), toDate('2017-01-01')); +SELECT age('day', toDate('2017-12-31'), toDate('2018-01-01')); +SELECT age('hour', toDate('2017-12-31'), toDate('2016-01-01'), 'UTC'); +SELECT age('hour', toDate('2017-12-31'), toDate('2017-01-01'), 'UTC'); +SELECT age('hour', toDate('2017-12-31'), toDate('2018-01-01'), 'UTC'); +SELECT age('minute', toDate('2017-12-31'), toDate('2016-01-01'), 'UTC'); +SELECT age('minute', toDate('2017-12-31'), toDate('2017-01-01'), 'UTC'); +SELECT age('minute', toDate('2017-12-31'), toDate('2018-01-01'), 'UTC'); +SELECT age('second', toDate('2017-12-31'), toDate('2016-01-01'), 'UTC'); +SELECT age('second', toDate('2017-12-31'), toDate('2017-01-01'), 'UTC'); +SELECT age('second', toDate('2017-12-31'), toDate('2018-01-01'), 'UTC'); + +SELECT 'DateTime arguments'; +SELECT age('day', toDateTime('2016-01-01 00:00:01', 'UTC'), toDateTime('2016-01-02 00:00:00', 'UTC'), 'UTC'); +SELECT age('hour', toDateTime('2016-01-01 00:00:01', 'UTC'), toDateTime('2016-01-02 00:00:00', 'UTC'), 'UTC'); +SELECT age('minute', toDateTime('2016-01-01 00:00:01', 'UTC'), toDateTime('2016-01-02 00:00:00', 'UTC'), 'UTC'); +SELECT age('second', toDateTime('2016-01-01 00:00:01', 'UTC'), toDateTime('2016-01-02 00:00:00', 'UTC'), 'UTC'); + +SELECT 'Date and DateTime arguments'; + +SELECT age('second', toDate('2017-12-31'), toDateTime('2016-01-01 00:00:00', 'UTC'), 'UTC'); +SELECT age('second', toDateTime('2017-12-31 00:00:00', 'UTC'), toDate('2017-01-01'), 'UTC'); +SELECT age('second', toDateTime('2017-12-31 00:00:00', 'UTC'), toDateTime('2018-01-01 00:00:00', 'UTC')); + +SELECT 'Constant and non-constant arguments'; + +SELECT age('minute', materialize(toDate('2017-12-31')), toDate('2016-01-01'), 'UTC'); +SELECT age('minute', toDate('2017-12-31'), materialize(toDate('2017-01-01')), 'UTC'); +SELECT age('minute', materialize(toDate('2017-12-31')), materialize(toDate('2018-01-01')), 'UTC'); + +SELECT 'Case insensitive'; + +SELECT age('YeAr', toDate('2017-12-31'), toDate('2016-01-01')); + +SELECT 'Dependance of timezones'; + +SELECT age('month', toDate('2014-10-26'), toDate('2014-10-27'), 'Asia/Istanbul'); +SELECT age('week', toDate('2014-10-26'), toDate('2014-10-27'), 'Asia/Istanbul'); +SELECT age('day', toDate('2014-10-26'), toDate('2014-10-27'), 'Asia/Istanbul'); +SELECT age('hour', toDate('2014-10-26'), toDate('2014-10-27'), 'Asia/Istanbul'); +SELECT age('minute', toDate('2014-10-26'), toDate('2014-10-27'), 'Asia/Istanbul'); +SELECT age('second', toDate('2014-10-26'), toDate('2014-10-27'), 'Asia/Istanbul'); + +SELECT age('month', toDate('2014-10-26'), toDate('2014-10-27'), 'UTC'); +SELECT age('week', toDate('2014-10-26'), toDate('2014-10-27'), 'UTC'); +SELECT age('day', toDate('2014-10-26'), toDate('2014-10-27'), 'UTC'); +SELECT age('hour', toDate('2014-10-26'), toDate('2014-10-27'), 'UTC'); +SELECT age('minute', toDate('2014-10-26'), toDate('2014-10-27'), 'UTC'); +SELECT age('second', toDate('2014-10-26'), toDate('2014-10-27'), 'UTC'); + +SELECT age('month', toDateTime('2014-10-26 00:00:00', 'Asia/Istanbul'), toDateTime('2014-10-27 00:00:00', 'Asia/Istanbul')); +SELECT age('week', toDateTime('2014-10-26 00:00:00', 'Asia/Istanbul'), toDateTime('2014-10-27 00:00:00', 'Asia/Istanbul')); +SELECT age('day', toDateTime('2014-10-26 00:00:00', 'Asia/Istanbul'), toDateTime('2014-10-27 00:00:00', 'Asia/Istanbul')); +SELECT age('hour', toDateTime('2014-10-26 00:00:00', 'Asia/Istanbul'), toDateTime('2014-10-27 00:00:00', 'Asia/Istanbul')); +SELECT age('minute', toDateTime('2014-10-26 00:00:00', 'Asia/Istanbul'), toDateTime('2014-10-27 00:00:00', 'Asia/Istanbul')); +SELECT age('second', toDateTime('2014-10-26 00:00:00', 'Asia/Istanbul'), toDateTime('2014-10-27 00:00:00', 'Asia/Istanbul')); + +SELECT age('month', toDateTime('2014-10-26 00:00:00', 'UTC'), toDateTime('2014-10-27 00:00:00', 'UTC')); +SELECT age('week', toDateTime('2014-10-26 00:00:00', 'UTC'), toDateTime('2014-10-27 00:00:00', 'UTC')); +SELECT age('day', toDateTime('2014-10-26 00:00:00', 'UTC'), toDateTime('2014-10-27 00:00:00', 'UTC')); +SELECT age('hour', toDateTime('2014-10-26 00:00:00', 'UTC'), toDateTime('2014-10-27 00:00:00', 'UTC')); +SELECT age('minute', toDateTime('2014-10-26 00:00:00', 'UTC'), toDateTime('2014-10-27 00:00:00', 'UTC')); +SELECT age('second', toDateTime('2014-10-26 00:00:00', 'UTC'), toDateTime('2014-10-27 00:00:00', 'UTC')); + +SELECT 'Additional test'; + +SELECT number = age('month', now() - INTERVAL number MONTH, now()) FROM system.numbers LIMIT 10; diff --git a/parser/testdata/02477_age_date32/ast.json b/parser/testdata/02477_age_date32/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02477_age_date32/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02477_age_date32/metadata.json b/parser/testdata/02477_age_date32/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02477_age_date32/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02477_age_date32/query.sql b/parser/testdata/02477_age_date32/query.sql new file mode 100644 index 000000000..aa913a8d1 --- /dev/null +++ b/parser/testdata/02477_age_date32/query.sql @@ -0,0 +1,104 @@ +-- { echo } + +-- Date32 vs Date32 +SELECT age('second', toDate32('1927-01-01', 'UTC'), toDate32('1927-01-02', 'UTC'), 'UTC'); +SELECT age('minute', toDate32('1927-01-01', 'UTC'), toDate32('1927-01-02', 'UTC'), 'UTC'); +SELECT age('hour', toDate32('1927-01-01', 'UTC'), toDate32('1927-01-02', 'UTC'), 'UTC'); +SELECT age('day', toDate32('1927-01-01', 'UTC'), toDate32('1927-01-02', 'UTC'), 'UTC'); +SELECT age('week', toDate32('1927-01-01', 'UTC'), toDate32('1927-01-08', 'UTC'), 'UTC'); +SELECT age('month', toDate32('1927-01-01', 'UTC'), toDate32('1927-02-01', 'UTC'), 'UTC'); +SELECT age('quarter', toDate32('1927-01-01', 'UTC'), toDate32('1927-04-01', 'UTC'), 'UTC'); +SELECT age('year', toDate32('1927-01-01', 'UTC'), toDate32('1928-01-01', 'UTC'), 'UTC'); + +-- With DateTime64 +-- Date32 vs DateTime64 +SELECT age('second', toDate32('1927-01-01', 'UTC'), toDateTime64('1927-01-02 00:00:00', 3, 'UTC'), 'UTC'); +SELECT age('minute', toDate32('1927-01-01', 'UTC'), toDateTime64('1927-01-02 00:00:00', 3, 'UTC'), 'UTC'); +SELECT age('hour', toDate32('1927-01-01', 'UTC'), toDateTime64('1927-01-02 00:00:00', 3, 'UTC'), 'UTC'); +SELECT age('day', toDate32('1927-01-01', 'UTC'), toDateTime64('1927-01-02 00:00:00', 3, 'UTC'), 'UTC'); +SELECT age('week', toDate32('1927-01-01', 'UTC'), toDateTime64('1927-01-08 00:00:00', 3, 'UTC'), 'UTC'); +SELECT age('month', toDate32('1927-01-01', 'UTC'), toDateTime64('1927-02-01 00:00:00', 3, 'UTC'), 'UTC'); +SELECT age('quarter', toDate32('1927-01-01', 'UTC'), toDateTime64('1927-04-01 00:00:00', 3, 'UTC'), 'UTC'); +SELECT age('year', toDate32('1927-01-01', 'UTC'), toDateTime64('1928-01-01 00:00:00', 3, 'UTC'), 'UTC'); + +-- DateTime64 vs Date32 +SELECT age('second', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), toDate32('1927-01-02', 'UTC'), 'UTC'); +SELECT age('minute', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), toDate32('1927-01-02', 'UTC'), 'UTC'); +SELECT age('hour', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), toDate32('1927-01-02', 'UTC'), 'UTC'); +SELECT age('day', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), toDate32('1927-01-02', 'UTC'), 'UTC'); +SELECT age('week', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), toDate32('1927-01-08', 'UTC'), 'UTC'); +SELECT age('month', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), toDate32('1927-02-01', 'UTC'), 'UTC'); +SELECT age('quarter', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), toDate32('1927-04-01', 'UTC'), 'UTC'); +SELECT age('year', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), toDate32('1928-01-01', 'UTC'), 'UTC'); + +-- With DateTime +-- Date32 vs DateTime +SELECT age('second', toDate32('2015-08-18', 'UTC'), toDateTime('2015-08-19 00:00:00', 'UTC'), 'UTC'); +SELECT age('minute', toDate32('2015-08-18', 'UTC'), toDateTime('2015-08-19 00:00:00', 'UTC'), 'UTC'); +SELECT age('hour', toDate32('2015-08-18', 'UTC'), toDateTime('2015-08-19 00:00:00', 'UTC'), 'UTC'); +SELECT age('day', toDate32('2015-08-18', 'UTC'), toDateTime('2015-08-19 00:00:00', 'UTC'), 'UTC'); +SELECT age('week', toDate32('2015-08-18', 'UTC'), toDateTime('2015-08-25 00:00:00', 'UTC'), 'UTC'); +SELECT age('month', toDate32('2015-08-18', 'UTC'), toDateTime('2015-09-18 00:00:00', 'UTC'), 'UTC'); +SELECT age('quarter', toDate32('2015-08-18', 'UTC'), toDateTime('2015-11-18 00:00:00', 'UTC'), 'UTC'); +SELECT age('year', toDate32('2015-08-18', 'UTC'), toDateTime('2016-08-18 00:00:00', 'UTC'), 'UTC'); + +-- DateTime vs Date32 +SELECT age('second', toDateTime('2015-08-18 00:00:00', 'UTC'), toDate32('2015-08-19', 'UTC'), 'UTC'); +SELECT age('minute', toDateTime('2015-08-18 00:00:00', 'UTC'), toDate32('2015-08-19', 'UTC'), 'UTC'); +SELECT age('hour', toDateTime('2015-08-18 00:00:00', 'UTC'), toDate32('2015-08-19', 'UTC'), 'UTC'); +SELECT age('day', toDateTime('2015-08-18 00:00:00', 'UTC'), toDate32('2015-08-19', 'UTC'), 'UTC'); +SELECT age('week', toDateTime('2015-08-18 00:00:00', 'UTC'), toDate32('2015-08-25', 'UTC'), 'UTC'); +SELECT age('month', toDateTime('2015-08-18 00:00:00', 'UTC'), toDate32('2015-09-18', 'UTC'), 'UTC'); +SELECT age('quarter', toDateTime('2015-08-18 00:00:00', 'UTC'), toDate32('2015-11-18', 'UTC'), 'UTC'); +SELECT age('year', toDateTime('2015-08-18 00:00:00', 'UTC'), toDate32('2016-08-18', 'UTC'), 'UTC'); + +-- With Date +-- Date32 vs Date +SELECT age('second', toDate32('2015-08-18', 'UTC'), toDate('2015-08-19', 'UTC'), 'UTC'); +SELECT age('minute', toDate32('2015-08-18', 'UTC'), toDate('2015-08-19', 'UTC'), 'UTC'); +SELECT age('hour', toDate32('2015-08-18', 'UTC'), toDate('2015-08-19', 'UTC'), 'UTC'); +SELECT age('day', toDate32('2015-08-18', 'UTC'), toDate('2015-08-19', 'UTC'), 'UTC'); +SELECT age('week', toDate32('2015-08-18', 'UTC'), toDate('2015-08-25', 'UTC'), 'UTC'); +SELECT age('month', toDate32('2015-08-18', 'UTC'), toDate('2015-09-18', 'UTC'), 'UTC'); +SELECT age('quarter', toDate32('2015-08-18', 'UTC'), toDate('2015-11-18', 'UTC'), 'UTC'); +SELECT age('year', toDate32('2015-08-18', 'UTC'), toDate('2016-08-18', 'UTC'), 'UTC'); + +-- Date vs Date32 +SELECT age('second', toDate('2015-08-18', 'UTC'), toDate32('2015-08-19', 'UTC'), 'UTC'); +SELECT age('minute', toDate('2015-08-18', 'UTC'), toDate32('2015-08-19', 'UTC'), 'UTC'); +SELECT age('hour', toDate('2015-08-18', 'UTC'), toDate32('2015-08-19', 'UTC'), 'UTC'); +SELECT age('day', toDate('2015-08-18', 'UTC'), toDate32('2015-08-19', 'UTC'), 'UTC'); +SELECT age('week', toDate('2015-08-18', 'UTC'), toDate32('2015-08-25', 'UTC'), 'UTC'); +SELECT age('month', toDate('2015-08-18', 'UTC'), toDate32('2015-09-18', 'UTC'), 'UTC'); +SELECT age('quarter', toDate('2015-08-18', 'UTC'), toDate32('2015-11-18', 'UTC'), 'UTC'); +SELECT age('year', toDate('2015-08-18', 'UTC'), toDate32('2016-08-18', 'UTC'), 'UTC'); + +-- Const vs non-const columns +SELECT age('day', toDate32('1927-01-01', 'UTC'), materialize(toDate32('1927-01-02', 'UTC')), 'UTC'); +SELECT age('day', toDate32('1927-01-01', 'UTC'), materialize(toDateTime64('1927-01-02 00:00:00', 3, 'UTC')), 'UTC'); +SELECT age('day', toDateTime64('1927-01-01 00:00:00', 3, 'UTC'), materialize(toDate32('1927-01-02', 'UTC')), 'UTC'); +SELECT age('day', toDate32('2015-08-18', 'UTC'), materialize(toDateTime('2015-08-19 00:00:00', 'UTC')), 'UTC'); +SELECT age('day', toDateTime('2015-08-18 00:00:00', 'UTC'), materialize(toDate32('2015-08-19', 'UTC')), 'UTC'); +SELECT age('day', toDate32('2015-08-18', 'UTC'), materialize(toDate('2015-08-19', 'UTC')), 'UTC'); +SELECT age('day', toDate('2015-08-18', 'UTC'), materialize(toDate32('2015-08-19', 'UTC')), 'UTC'); + +-- Non-const vs const columns +SELECT age('day', materialize(toDate32('1927-01-01', 'UTC')), toDate32('1927-01-02', 'UTC'), 'UTC'); +SELECT age('day', materialize(toDate32('1927-01-01', 'UTC')), toDateTime64('1927-01-02 00:00:00', 3, 'UTC'), 'UTC'); +SELECT age('day', materialize(toDateTime64('1927-01-01 00:00:00', 3, 'UTC')), toDate32('1927-01-02', 'UTC'), 'UTC'); +SELECT age('day', materialize(toDate32('2015-08-18', 'UTC')), toDateTime('2015-08-19 00:00:00', 'UTC'), 'UTC'); +SELECT age('day', materialize(toDateTime('2015-08-18 00:00:00', 'UTC')), toDate32('2015-08-19', 'UTC'), 'UTC'); +SELECT age('day', materialize(toDate32('2015-08-18', 'UTC')), toDate('2015-08-19', 'UTC'), 'UTC'); +SELECT age('day', materialize(toDate('2015-08-18', 'UTC')), toDate32('2015-08-19', 'UTC'), 'UTC'); + +-- Non-const vs non-const columns +SELECT age('day', materialize(toDate32('1927-01-01', 'UTC')), materialize(toDate32('1927-01-02', 'UTC')), 'UTC'); +SELECT age('day', materialize(toDate32('1927-01-01', 'UTC')), materialize(toDateTime64('1927-01-02 00:00:00', 3, 'UTC')), 'UTC'); +SELECT age('day', materialize(toDateTime64('1927-01-01 00:00:00', 3, 'UTC')), materialize(toDate32('1927-01-02', 'UTC')), 'UTC'); +SELECT age('day', materialize(toDate32('2015-08-18', 'UTC')), materialize(toDateTime('2015-08-19 00:00:00', 'UTC')), 'UTC'); +SELECT age('day', materialize(toDateTime('2015-08-18 00:00:00', 'UTC')), materialize(toDate32('2015-08-19', 'UTC')), 'UTC'); +SELECT age('day', materialize(toDate32('2015-08-18', 'UTC')), materialize(toDate('2015-08-19', 'UTC')), 'UTC'); +SELECT age('day', materialize(toDate('2015-08-18', 'UTC')), materialize(toDate32('2015-08-19', 'UTC')), 'UTC'); + +-- UBsan issue detected by fuzzer +SELECT age('minute', toDate32(1234567890123456, 'UTC'), toDateTime64('1927-01-02 00:00:00', 3, 'UTC'), 'UTC') diff --git a/parser/testdata/02477_age_datetime64/ast.json b/parser/testdata/02477_age_datetime64/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02477_age_datetime64/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02477_age_datetime64/metadata.json b/parser/testdata/02477_age_datetime64/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02477_age_datetime64/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02477_age_datetime64/query.sql b/parser/testdata/02477_age_datetime64/query.sql new file mode 100644 index 000000000..b5fa4da88 --- /dev/null +++ b/parser/testdata/02477_age_datetime64/query.sql @@ -0,0 +1,81 @@ +-- { echo } + +-- DateTime64 vs DateTime64 same scale +SELECT age('second', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-01-01 00:00:10', 0, 'UTC')); +SELECT age('second', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-01-01 00:10:00', 0, 'UTC')); +SELECT age('second', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-01-01 01:00:00', 0, 'UTC')); +SELECT age('second', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-01-01 01:10:10', 0, 'UTC')); + +SELECT age('minute', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-01-01 00:10:00', 0, 'UTC')); +SELECT age('minute', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-01-01 10:00:00', 0, 'UTC')); + +SELECT age('hour', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-01-01 10:00:00', 0, 'UTC')); + +SELECT age('day', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-01-02 00:00:00', 0, 'UTC')); +SELECT age('month', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1927-02-01 00:00:00', 0, 'UTC')); +SELECT age('year', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), toDateTime64('1928-01-01 00:00:00', 0, 'UTC')); + +-- DateTime64 vs DateTime64 different scale +SELECT age('second', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-01-01 00:00:10', 3, 'UTC')); +SELECT age('second', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-01-01 00:10:00', 3, 'UTC')); +SELECT age('second', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-01-01 01:00:00', 3, 'UTC')); +SELECT age('second', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-01-01 01:10:10', 3, 'UTC')); + +SELECT age('minute', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-01-01 00:10:00', 3, 'UTC')); +SELECT age('minute', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-01-01 10:00:00', 3, 'UTC')); + +SELECT age('hour', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-01-01 10:00:00', 3, 'UTC')); + +SELECT age('day', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-01-02 00:00:00', 3, 'UTC')); +SELECT age('month', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1927-02-01 00:00:00', 3, 'UTC')); +SELECT age('year', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), toDateTime64('1928-01-01 00:00:00', 3, 'UTC')); + +-- With DateTime +-- DateTime64 vs DateTime +SELECT age('second', toDateTime64('2015-08-18 00:00:00', 0, 'UTC'), toDateTime('2015-08-18 00:00:00', 'UTC')); +SELECT age('second', toDateTime64('2015-08-18 00:00:00', 0, 'UTC'), toDateTime('2015-08-18 00:00:10', 'UTC')); +SELECT age('second', toDateTime64('2015-08-18 00:00:00', 0, 'UTC'), toDateTime('2015-08-18 00:10:00', 'UTC')); +SELECT age('second', toDateTime64('2015-08-18 00:00:00', 0, 'UTC'), toDateTime('2015-08-18 01:00:00', 'UTC')); +SELECT age('second', toDateTime64('2015-08-18 00:00:00', 0, 'UTC'), toDateTime('2015-08-18 01:10:10', 'UTC')); + +-- DateTime vs DateTime64 +SELECT age('second', toDateTime('2015-08-18 00:00:00', 'UTC'), toDateTime64('2015-08-18 00:00:00', 3, 'UTC')); +SELECT age('second', toDateTime('2015-08-18 00:00:00', 'UTC'), toDateTime64('2015-08-18 00:00:10', 3, 'UTC')); +SELECT age('second', toDateTime('2015-08-18 00:00:00', 'UTC'), toDateTime64('2015-08-18 00:10:00', 3, 'UTC')); +SELECT age('second', toDateTime('2015-08-18 00:00:00', 'UTC'), toDateTime64('2015-08-18 01:00:00', 3, 'UTC')); +SELECT age('second', toDateTime('2015-08-18 00:00:00', 'UTC'), toDateTime64('2015-08-18 01:10:10', 3, 'UTC')); + +-- With Date +-- DateTime64 vs Date +SELECT age('day', toDateTime64('2015-08-18 00:00:00', 0, 'UTC'), toDate('2015-08-19', 'UTC')); + +-- Date vs DateTime64 +SELECT age('day', toDate('2015-08-18', 'UTC'), toDateTime64('2015-08-19 00:00:00', 3, 'UTC')); + +-- Same thing but const vs non-const columns +SELECT age('second', toDateTime64('1927-01-01 00:00:00', 0, 'UTC'), materialize(toDateTime64('1927-01-01 00:00:10', 0, 'UTC'))); +SELECT age('second', toDateTime64('1927-01-01 00:00:00', 6, 'UTC'), materialize(toDateTime64('1927-01-01 00:00:10', 3, 'UTC'))); +SELECT age('second', toDateTime64('2015-08-18 00:00:00', 0, 'UTC'), materialize(toDateTime('2015-08-18 00:00:10', 'UTC'))); +SELECT age('second', toDateTime('2015-08-18 00:00:00', 'UTC'), materialize(toDateTime64('2015-08-18 00:00:10', 3, 'UTC'))); +SELECT age('day', toDateTime64('2015-08-18 00:00:00', 0, 'UTC'), materialize(toDate('2015-08-19', 'UTC'))); +SELECT age('day', toDate('2015-08-18', 'UTC'), materialize(toDateTime64('2015-08-19 00:00:00', 3, 'UTC'))); + +-- Same thing but non-const vs const columns +SELECT age('second', materialize(toDateTime64('1927-01-01 00:00:00', 0, 'UTC')), toDateTime64('1927-01-01 00:00:10', 0, 'UTC')); +SELECT age('second', materialize(toDateTime64('1927-01-01 00:00:00', 6, 'UTC')), toDateTime64('1927-01-01 00:00:10', 3, 'UTC')); +SELECT age('second', materialize(toDateTime64('2015-08-18 00:00:00', 0, 'UTC')), toDateTime('2015-08-18 00:00:10', 'UTC')); +SELECT age('second', materialize(toDateTime('2015-08-18 00:00:00', 'UTC')), toDateTime64('2015-08-18 00:00:10', 3, 'UTC')); +SELECT age('day', materialize(toDateTime64('2015-08-18 00:00:00', 0, 'UTC')), toDate('2015-08-19', 'UTC')); +SELECT age('day', materialize(toDate('2015-08-18', 'UTC')), toDateTime64('2015-08-19 00:00:00', 3, 'UTC')); + +-- Same thing but non-const vs non-const columns +SELECT age('second', materialize(toDateTime64('1927-01-01 00:00:00', 0, 'UTC')), materialize(toDateTime64('1927-01-01 00:00:10', 0, 'UTC'))); +SELECT age('second', materialize(toDateTime64('1927-01-01 00:00:00', 6, 'UTC')), materialize(toDateTime64('1927-01-01 00:00:10', 3, 'UTC'))); +SELECT age('second', materialize(toDateTime64('2015-08-18 00:00:00', 0, 'UTC')), materialize(toDateTime('2015-08-18 00:00:10', 'UTC'))); +SELECT age('second', materialize(toDateTime('2015-08-18 00:00:00', 'UTC')), materialize(toDateTime64('2015-08-18 00:00:10', 3, 'UTC'))); +SELECT age('day', materialize(toDateTime64('2015-08-18 00:00:00', 0, 'UTC')), materialize(toDate('2015-08-19', 'UTC'))); +SELECT age('day', materialize(toDate('2015-08-18', 'UTC')), materialize(toDateTime64('2015-08-19 00:00:00', 3, 'UTC'))); + +-- UBsan bug #66638 +set session_timezone = 'UTC'; +SELECT age('second', toDateTime(1157339245694594829, 6, 'UTC'), toDate('2015-08-18')) diff --git a/parser/testdata/02477_analyzer_array_join_with_join/ast.json b/parser/testdata/02477_analyzer_array_join_with_join/ast.json new file mode 100644 index 000000000..1b9a8f8a3 --- /dev/null +++ b/parser/testdata/02477_analyzer_array_join_with_join/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001152311, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02477_analyzer_array_join_with_join/metadata.json b/parser/testdata/02477_analyzer_array_join_with_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02477_analyzer_array_join_with_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02477_analyzer_array_join_with_join/query.sql b/parser/testdata/02477_analyzer_array_join_with_join/query.sql new file mode 100644 index 000000000..3d2cc1b16 --- /dev/null +++ b/parser/testdata/02477_analyzer_array_join_with_join/query.sql @@ -0,0 +1,143 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value String, + value_array Array(UInt64) +) ENGINE=MergeTree ORDER BY id; + +INSERT INTO test_table VALUES (0, 'Value_0', [1,2,3]); + +-- { echoOn } + +SELECT * FROM test_table ARRAY JOIN value_array; + +SELECT '--'; + +SELECT *, value_array_element FROM test_table ARRAY JOIN value_array AS value_array_element; + +SELECT '--'; + +SELECT *, value_array FROM test_table ARRAY JOIN value_array AS value_array; + +SELECT '--'; + +SELECT *, value_array FROM test_table ARRAY JOIN [4,5,6] AS value_array; + +SELECT '--'; + +SELECT *, value_array, value_element FROM test_table ARRAY JOIN value_array, [4,5,6] AS value_element; + +SELECT '--'; + +SELECT * FROM (SELECT [dummy, dummy] AS dummy FROM system.one) AS subquery ARRAY JOIN dummy INNER JOIN system.one USING (dummy); + +SELECT '--'; + +SELECT * FROM (SELECT [0] AS id) AS subquery_1 ARRAY JOIN id INNER JOIN (SELECT 0 AS id) AS subquery_2 USING (id); + +SELECT '--'; + +SELECT * FROM (SELECT [1] AS id) AS subquery_1 ARRAY JOIN id INNER JOIN (SELECT 0 AS id) AS subquery_2 USING (id); + +SELECT '--'; + +SELECT * FROM (SELECT [0] AS id) AS subquery_1 ARRAY JOIN id INNER JOIN (SELECT 1 AS id) AS subquery_2 USING (id); + +SELECT '--'; + +SELECT * FROM (SELECT [1] AS id) AS subquery_1 ARRAY JOIN id INNER JOIN (SELECT 1 AS id) AS subquery_2 USING (id); + +SELECT '--'; + +SELECT * FROM (SELECT [5] AS id) AS subquery_1 ARRAY JOIN [1,2,3] AS id INNER JOIN (SELECT 1 AS id) AS subquery_2 USING (id); + +SELECT '--'; + +SELECT * FROM (SELECT [0] AS id) AS subquery ARRAY JOIN id INNER JOIN test_table USING (id); + +SELECT '--'; + +SELECT * FROM (SELECT [1] AS id) AS subquery ARRAY JOIN id INNER JOIN test_table USING (id); + +SELECT '--'; + +SELECT * FROM (SELECT [0] AS id) AS subquery ARRAY JOIN id AS id INNER JOIN test_table USING (id); + +SELECT '--'; + +SELECT * FROM (SELECT [1] AS id) AS subquery ARRAY JOIN id AS id INNER JOIN test_table USING (id); + +SELECT '--'; + +SELECT *, id FROM (SELECT [0] AS id) AS subquery ARRAY JOIN id AS id INNER JOIN test_table USING (id); + +SELECT '--'; + +SELECT *, id FROM (SELECT [1] AS id) AS subquery ARRAY JOIN id AS id INNER JOIN test_table USING (id); + +SELECT '--'; + +SELECT * FROM (SELECT [0] AS value) AS subquery ARRAY JOIN value AS id INNER JOIN test_table USING (id); + +SELECT '--'; + +SELECT * FROM (SELECT [1] AS value) AS subquery ARRAY JOIN value AS id INNER JOIN test_table USING (id); + +SELECT '--'; + +SELECT *, id FROM (SELECT [0] AS value) AS subquery ARRAY JOIN value AS id INNER JOIN test_table USING (id); + +SELECT '--'; + +SELECT *, id FROM (SELECT [1] AS value) AS subquery ARRAY JOIN value AS id INNER JOIN test_table USING (id); + +SELECT '--'; + +SELECT * FROM (SELECT [0] AS id) AS subquery ARRAY JOIN [0] AS id INNER JOIN test_table USING (id); + +SELECT '--'; + +SELECT * FROM (SELECT [0] AS id) AS subquery ARRAY JOIN [1] AS id INNER JOIN test_table USING (id); + +SELECT '--'; + +SELECT *, id FROM (SELECT [0] AS id) AS subquery ARRAY JOIN [0] AS id INNER JOIN test_table USING (id); + +SELECT '--'; + +SELECT *, id FROM (SELECT [0] AS id) AS subquery ARRAY JOIN [1] AS id INNER JOIN test_table USING (id); + +SELECT '--'; + +SELECT * FROM (SELECT [5] AS id) AS subquery ARRAY JOIN [0] AS id INNER JOIN test_table USING (id); + +SELECT '--'; + +SELECT * FROM (SELECT [5] AS id) AS subquery ARRAY JOIN [1] AS id INNER JOIN test_table USING (id); + +SELECT '--'; + +SELECT *, id FROM (SELECT [5] AS id) AS subquery ARRAY JOIN [0] AS id INNER JOIN test_table USING (id); + +SELECT '--'; + +SELECT *, id FROM (SELECT [5] AS id) AS subquery ARRAY JOIN [1] AS id INNER JOIN test_table USING (id); + +SELECT '--'; + +SELECT * FROM (SELECT [5] AS id_array) AS subquery ARRAY JOIN id_array, [0] AS id INNER JOIN test_table USING (id); + +SELECT '--'; + +SELECT * FROM (SELECT [[0]] AS id) AS subquery ARRAY JOIN id AS id_nested_array ARRAY JOIN id_nested_array AS id INNER JOIN test_table USING (id); + +SELECT '--'; + +SELECT *, id FROM (SELECT [[0]] AS id) AS subquery ARRAY JOIN id AS id_nested_array ARRAY JOIN id_nested_array AS id INNER JOIN test_table USING (id); + +-- { echoOff } + +DROP TABLE test_table; diff --git a/parser/testdata/02477_analyzer_ast_key_condition_crash/ast.json b/parser/testdata/02477_analyzer_ast_key_condition_crash/ast.json new file mode 100644 index 000000000..5e0442fb8 --- /dev/null +++ b/parser/testdata/02477_analyzer_ast_key_condition_crash/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001449929, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02477_analyzer_ast_key_condition_crash/metadata.json b/parser/testdata/02477_analyzer_ast_key_condition_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02477_analyzer_ast_key_condition_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02477_analyzer_ast_key_condition_crash/query.sql b/parser/testdata/02477_analyzer_ast_key_condition_crash/query.sql new file mode 100644 index 000000000..2fc1cc45c --- /dev/null +++ b/parser/testdata/02477_analyzer_ast_key_condition_crash/query.sql @@ -0,0 +1,15 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64 +) ENGINE = MergeTree ORDER BY id; + +INSERT INTO test_table VALUES (1); + +SELECT * FROM test_table WHERE id = 1; + +SELECT * FROM test_table WHERE id = 1 SETTINGS query_plan_optimize_primary_key = 0; + +DROP TABLE test_table; diff --git a/parser/testdata/02477_exists_fuzz_43478/ast.json b/parser/testdata/02477_exists_fuzz_43478/ast.json new file mode 100644 index 000000000..65fc668a0 --- /dev/null +++ b/parser/testdata/02477_exists_fuzz_43478/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery test_rows_compact_part__fuzz_11 (children 3)" + }, + { + "explain": " Identifier test_rows_compact_part__fuzz_11" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration x (children 1)" + }, + { + "explain": " DataType UInt32" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Identifier x" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001331806, + "rows_read": 9, + "bytes_read": 352 + } +} diff --git a/parser/testdata/02477_exists_fuzz_43478/metadata.json b/parser/testdata/02477_exists_fuzz_43478/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02477_exists_fuzz_43478/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02477_exists_fuzz_43478/query.sql b/parser/testdata/02477_exists_fuzz_43478/query.sql new file mode 100644 index 000000000..c225befed --- /dev/null +++ b/parser/testdata/02477_exists_fuzz_43478/query.sql @@ -0,0 +1,3 @@ +create table test_rows_compact_part__fuzz_11 (x UInt32) engine = MergeTree order by x; +insert into test_rows_compact_part__fuzz_11 select 1; +select 1 from test_rows_compact_part__fuzz_11 where exists(select 1) settings enable_analyzer=1; diff --git a/parser/testdata/02477_fuse_quantiles/ast.json b/parser/testdata/02477_fuse_quantiles/ast.json new file mode 100644 index 000000000..703b62625 --- /dev/null +++ b/parser/testdata/02477_fuse_quantiles/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001057965, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02477_fuse_quantiles/metadata.json b/parser/testdata/02477_fuse_quantiles/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02477_fuse_quantiles/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02477_fuse_quantiles/query.sql b/parser/testdata/02477_fuse_quantiles/query.sql new file mode 100644 index 000000000..8ddc029f7 --- /dev/null +++ b/parser/testdata/02477_fuse_quantiles/query.sql @@ -0,0 +1,19 @@ +SET enable_analyzer = 1; +SET optimize_syntax_fuse_functions = 1; + +DROP TABLE IF EXISTS fuse_tbl; + +CREATE TABLE fuse_tbl(a Nullable(Int32), b Int32) Engine = Log; + +INSERT INTO fuse_tbl SELECT number, number + 1 FROM numbers(1000); + +SELECT quantile(0.8)(a), toTypeName(quantile(0.8)(a)), quantile(0.9)(a), toTypeName(quantile(0.9)(a)) FROM fuse_tbl; +SELECT quantile(0.8)(b), toTypeName(quantile(0.8)(b)), quantile(0.9)(b), toTypeName(quantile(0.9)(b)) FROM fuse_tbl; +SELECT quantile(0.8)(b), toTypeName(quantile(0.8)(b)), quantile(0.1)(b), toTypeName(quantile(0.1)(b)) FROM fuse_tbl; + +SELECT quantile(a - 1), quantile(b - 1) + 1, quantile(0.8)(b - 1) + 1, quantile(0.8)(b - 1) + 2, quantile(0.9)(b - 1) + 1 FROM fuse_tbl; + +SELECT quantile(0.5)(b), quantile(0.9)(b) from (SELECT x + 1 as b FROM (SELECT quantile(0.5)(b) as x, quantile(0.9)(b) FROM fuse_tbl) GROUP BY x); +EXPLAIN QUERY TREE run_passes = 1 SELECT quantile(0.5)(b), quantile(0.9)(b) from (SELECT x + 1 as b FROM (SELECT quantile(0.5)(b) as x, quantile(0.9)(b) FROM fuse_tbl) GROUP BY x); + +DROP TABLE fuse_tbl; diff --git a/parser/testdata/02477_invalid_reads/ast.json b/parser/testdata/02477_invalid_reads/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02477_invalid_reads/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02477_invalid_reads/metadata.json b/parser/testdata/02477_invalid_reads/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02477_invalid_reads/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02477_invalid_reads/query.sql b/parser/testdata/02477_invalid_reads/query.sql new file mode 100644 index 000000000..1e362fc75 --- /dev/null +++ b/parser/testdata/02477_invalid_reads/query.sql @@ -0,0 +1,61 @@ +-- MIN, MAX AND FAMILY should check for errors in its input +SELECT finalizeAggregation(CAST(unhex('0F00000030'), 'AggregateFunction(min, String)')); -- { serverError CANNOT_READ_ALL_DATA } +SELECT finalizeAggregation(CAST(unhex('FFFF000030'), 'AggregateFunction(min, String)')); -- { serverError CANNOT_READ_ALL_DATA } + +-- UBSAN +SELECT 'ubsan', hex(finalizeAggregation(CAST(unhex('4000000030313233343536373839303132333435363738393031323334353637383930313233343536373839303132333435363738393031323334353637383930313233010000000000000000'), + 'AggregateFunction(argMax, String, UInt64)'))); + +-- aggThrow should check for errors in its input +SELECT finalizeAggregation(CAST('', 'AggregateFunction(aggThrow(0.), UInt8)')); -- { serverError ATTEMPT_TO_READ_AFTER_EOF } + +-- categoricalInformationValue should check for errors in its input +SELECT finalizeAggregation(CAST(unhex('01000000000000000100000000000000'), + 'AggregateFunction(categoricalInformationValue, UInt8, UInt8)')); -- { serverError CANNOT_READ_ALL_DATA } +SELECT finalizeAggregation(CAST(unhex('0101000000000000000100000000000000020000000000000001000000000000'), + 'AggregateFunction(categoricalInformationValue, Nullable(UInt8), UInt8)')); -- { serverError CANNOT_READ_ALL_DATA } + +-- groupArray should check for errors in its input +SELECT finalizeAggregation(CAST(unhex('5FF3001310132'), 'AggregateFunction(groupArray, String)')); -- { serverError CANNOT_READ_ALL_DATA } +SELECT finalizeAggregation(CAST(unhex('FF000000000000000001000000000000000200000000000000'), 'AggregateFunction(groupArray, UInt64)')); -- { serverError CANNOT_READ_ALL_DATA } + +-- Same for groupArrayMovingXXXX +SELECT finalizeAggregation(CAST(unhex('0FF00000000000000001000000000000000300000000000000'), 'AggregateFunction(groupArrayMovingSum, UInt64)')); -- { serverError CANNOT_READ_ALL_DATA } +SELECT finalizeAggregation(CAST(unhex('0FF00000000000000001000000000000000300000000000000'), 'AggregateFunction(groupArrayMovingAvg, UInt64)')); -- { serverError CANNOT_READ_ALL_DATA } + +-- Histogram +SELECT finalizeAggregation(CAST(unhex('00000000000024C000000000000018C00500000000000024C0000000000000F03F00000000000022C0000000000000F03F00000000000020C0000000000000'), + 'AggregateFunction(histogram(5), Int64)')); -- { serverError CANNOT_READ_ALL_DATA } + +-- StatisticalSample +SELECT finalizeAggregation(CAST(unhex('0F01000000000000244000000000000026400000000000002840000000000000244000000000000026400000000000002840000000000000F03F'), + 'AggregateFunction(mannWhitneyUTest, Float64, UInt8)')); -- { serverError CANNOT_READ_ALL_DATA } + +-- maxIntersections +SELECT finalizeAggregation(CAST(unhex('0F010000000000000001000000000000000300000000000000FFFFFFFFFFFFFFFF03340B9B047F000001000000000000000500000065000000FFFFFFFFFFFFFFFF'), + 'AggregateFunction(maxIntersections, UInt8, UInt8)')); -- { serverError CANNOT_READ_ALL_DATA } + +-- sequenceNextNode (This was fine because it would fail in the next readBinary call, but better to add a test) +SELECT finalizeAggregation(CAST(unhex('FFFFFFF014181056F38010000000000000001FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF'), + 'AggregateFunction(sequenceNextNode(''forward'', ''head''), DateTime, Nullable(String), UInt8, Nullable(UInt8))')) + SETTINGS allow_experimental_funnel_functions=1; -- { serverError CANNOT_READ_ALL_DATA } + +-- Fuzzer (ALL) +SELECT finalizeAggregation(CAST(unhex('FFFFFFF014181056F38010000000000000001FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF014181056F38010000000000000001FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF'), + 'AggregateFunction(sequenceNextNode(\'forward\', \'head\'), DateTime, Nullable(String), UInt8, Nullable(UInt8))')) + SETTINGS allow_experimental_funnel_functions = 1; -- { serverError TOO_LARGE_ARRAY_SIZE } + +-- Fuzzer 2 (UBSAN) +SELECT finalizeAggregation(CAST(unhex('FFFFFFF014181056F38010000000000000001FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF'), + 'AggregateFunction(sequenceNextNode(\'forward\', \'head\'), DateTime, Nullable(String), UInt8, Nullable(UInt8))')) + SETTINGS allow_experimental_funnel_functions = 1; -- { serverError CANNOT_READ_ALL_DATA } + +-- uniqUpTo +SELECT finalizeAggregation(CAST(unhex('04128345AA2BC97190'), + 'AggregateFunction(uniqUpTo(10), String)')); -- { serverError CANNOT_READ_ALL_DATA } + +-- quantiles +SELECT finalizeAggregation(CAST(unhex('0F0000000000000000'), + 'AggregateFunction(quantileExact, UInt64)')); -- { serverError CANNOT_READ_ALL_DATA } +SELECT finalizeAggregation(CAST(unhex('0F000000000000803F'), + 'AggregateFunction(quantileTDigest, UInt64)')); -- { serverError CANNOT_READ_ALL_DATA } diff --git a/parser/testdata/02477_is_null_parser/ast.json b/parser/testdata/02477_is_null_parser/ast.json new file mode 100644 index 000000000..209c3ed4b --- /dev/null +++ b/parser/testdata/02477_is_null_parser/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Explain EXPLAIN SYNTAX (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function isNotNull (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function plus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function isNull (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001421774, + "rows_read": 13, + "bytes_read": 523 + } +} diff --git a/parser/testdata/02477_is_null_parser/metadata.json b/parser/testdata/02477_is_null_parser/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02477_is_null_parser/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02477_is_null_parser/query.sql b/parser/testdata/02477_is_null_parser/query.sql new file mode 100644 index 000000000..f3ec0affd --- /dev/null +++ b/parser/testdata/02477_is_null_parser/query.sql @@ -0,0 +1,3 @@ +EXPLAIN SYNTAX SELECT 1 IS NULL + 1 IS NOT NULL; +EXPLAIN SYNTAX SELECT 1 IS NULL = 0; +EXPLAIN SYNTAX SELECT 1 IS NULL :: Int32; diff --git a/parser/testdata/02477_logical_expressions_optimizer_issue_89803/ast.json b/parser/testdata/02477_logical_expressions_optimizer_issue_89803/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02477_logical_expressions_optimizer_issue_89803/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02477_logical_expressions_optimizer_issue_89803/metadata.json b/parser/testdata/02477_logical_expressions_optimizer_issue_89803/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02477_logical_expressions_optimizer_issue_89803/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02477_logical_expressions_optimizer_issue_89803/query.sql b/parser/testdata/02477_logical_expressions_optimizer_issue_89803/query.sql new file mode 100644 index 000000000..4412f4d62 --- /dev/null +++ b/parser/testdata/02477_logical_expressions_optimizer_issue_89803/query.sql @@ -0,0 +1,61 @@ +-- Github issue 89803 + +SELECT 'Integer'; + +DROP TABLE IF EXISTS tab_int; +CREATE TABLE tab_int +( + `col_int` UInt64 +) +ENGINE = MergeTree +ORDER BY tuple(); + +INSERT INTO tab_int VALUES (1), (2); + +SELECT 'Negative checks'; +SELECT count() FROM tab_int WHERE col_int = 1 AND col_int = 2; +SELECT count() FROM tab_int WHERE col_int = 1 AND col_int = '2'; +SELECT count() FROM tab_int WHERE col_int = '1' AND col_int = 2; + +SELECT 'Positive checks'; +SELECT count() FROM tab_int WHERE col_int = 1; +SELECT count() FROM tab_int WHERE col_int = '1'; + +SELECT count() FROM tab_int WHERE col_int = 1 AND col_int = '1'; +SELECT count() FROM tab_int WHERE col_int = '1' AND col_int = 1; + +SELECT count() FROM tab_int WHERE col_int = '1' AND (col_int = 1 OR col_int = 2); +SELECT count() FROM tab_int WHERE (col_int = 1 OR col_int = 2) AND col_int = '1'; + +DROP TABLE tab_int; + +SELECT 'Boolean'; + +DROP TABLE IF EXISTS tab_bool; +CREATE TABLE tab_bool +( + `col_bool` Boolean +) +ENGINE = MergeTree +ORDER BY tuple(); + +INSERT INTO tab_bool VALUES (true), (false); + +SELECT 'Negative checks'; +SELECT count() FROM tab_bool WHERE col_bool = true AND col_bool = false; +SELECT count() FROM tab_bool WHERE col_bool = true AND col_bool = 'false'; +SELECT count() FROM tab_bool WHERE col_bool = 'true' AND col_bool = false; + +SELECT 'Positive checks'; +SELECT count() FROM tab_bool WHERE col_bool = true; +SELECT count() FROM tab_bool WHERE col_bool = 'true'; +SELECT count() FROM tab_bool WHERE col_bool = false; +SELECT count() FROM tab_bool WHERE col_bool = 'false'; + +SELECT count() FROM tab_bool WHERE col_bool = true AND col_bool = 'true'; +SELECT count() FROM tab_bool WHERE col_bool = 'true' AND col_bool = true; + +SELECT count() FROM tab_bool WHERE col_bool = false AND col_bool = 'false'; +SELECT count() FROM tab_bool WHERE col_bool = 'false' AND col_bool = false; + +DROP TABLE tab_bool; diff --git a/parser/testdata/02477_logical_expressions_optimizer_low_cardinality/ast.json b/parser/testdata/02477_logical_expressions_optimizer_low_cardinality/ast.json new file mode 100644 index 000000000..54635d2c1 --- /dev/null +++ b/parser/testdata/02477_logical_expressions_optimizer_low_cardinality/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_logical_expressions_optimizer_low_cardinality (children 1)" + }, + { + "explain": " Identifier t_logical_expressions_optimizer_low_cardinality" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00126866, + "rows_read": 2, + "bytes_read": 146 + } +} diff --git a/parser/testdata/02477_logical_expressions_optimizer_low_cardinality/metadata.json b/parser/testdata/02477_logical_expressions_optimizer_low_cardinality/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02477_logical_expressions_optimizer_low_cardinality/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02477_logical_expressions_optimizer_low_cardinality/query.sql b/parser/testdata/02477_logical_expressions_optimizer_low_cardinality/query.sql new file mode 100644 index 000000000..b328e9658 --- /dev/null +++ b/parser/testdata/02477_logical_expressions_optimizer_low_cardinality/query.sql @@ -0,0 +1,25 @@ +DROP TABLE IF EXISTS t_logical_expressions_optimizer_low_cardinality; +set optimize_min_equality_disjunction_chain_length=3; +CREATE TABLE t_logical_expressions_optimizer_low_cardinality (a LowCardinality(String), b UInt32) ENGINE = Memory; + +-- LowCardinality case, ignore optimize_min_equality_disjunction_chain_length limit, optimizer applied +-- Chain of OR equals +EXPLAIN SYNTAX SELECT a FROM t_logical_expressions_optimizer_low_cardinality WHERE a = 'x' OR a = 'y'; +EXPLAIN QUERY TREE SELECT a FROM t_logical_expressions_optimizer_low_cardinality WHERE a = 'x' OR a = 'y' SETTINGS enable_analyzer = 1; +EXPLAIN SYNTAX SELECT a FROM t_logical_expressions_optimizer_low_cardinality WHERE a = 'x' OR 'y' = a; +EXPLAIN QUERY TREE SELECT a FROM t_logical_expressions_optimizer_low_cardinality WHERE a = 'x' OR 'y' = a SETTINGS enable_analyzer = 1; +-- Chain of AND notEquals +EXPLAIN SYNTAX SELECT a FROM t_logical_expressions_optimizer_low_cardinality WHERE a <> 'x' AND a <> 'y'; +EXPLAIN QUERY TREE SELECT a FROM t_logical_expressions_optimizer_low_cardinality WHERE a <> 'x' AND a <> 'y' SETTINGS enable_analyzer = 1; +EXPLAIN SYNTAX SELECT a FROM t_logical_expressions_optimizer_low_cardinality WHERE a <> 'x' AND 'y' <> a; +EXPLAIN QUERY TREE SELECT a FROM t_logical_expressions_optimizer_low_cardinality WHERE a <> 'x' AND 'y' <> a SETTINGS enable_analyzer = 1; + +-- Non-LowCardinality case, optimizer not applied for short chains +-- Chain of OR equals +EXPLAIN SYNTAX SELECT a FROM t_logical_expressions_optimizer_low_cardinality WHERE b = 0 OR b = 1; +EXPLAIN QUERY TREE SELECT a FROM t_logical_expressions_optimizer_low_cardinality WHERE b = 0 OR b = 1 SETTINGS enable_analyzer = 1; +-- Chain of AND notEquals +EXPLAIN SYNTAX SELECT a FROM t_logical_expressions_optimizer_low_cardinality WHERE b <> 0 AND b <> 1; +EXPLAIN QUERY TREE SELECT a FROM t_logical_expressions_optimizer_low_cardinality WHERE b <> 0 AND b <> 1 SETTINGS enable_analyzer = 1; + +DROP TABLE t_logical_expressions_optimizer_low_cardinality; diff --git a/parser/testdata/02477_single_value_data_string_regression/ast.json b/parser/testdata/02477_single_value_data_string_regression/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02477_single_value_data_string_regression/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02477_single_value_data_string_regression/metadata.json b/parser/testdata/02477_single_value_data_string_regression/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02477_single_value_data_string_regression/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02477_single_value_data_string_regression/query.sql b/parser/testdata/02477_single_value_data_string_regression/query.sql new file mode 100644 index 000000000..3337a79c0 --- /dev/null +++ b/parser/testdata/02477_single_value_data_string_regression/query.sql @@ -0,0 +1,121 @@ + +-- Context: https://github.com/ClickHouse/ClickHouse/issues/42916 + +-- STRING WITH 10 CHARACTERS +-- SELECT version() AS v, hex(argMaxState('0123456789', number)) AS state FROM numbers(1) FORMAT CSV + +CREATE TABLE argmaxstate_hex_small +( + `v` String, + `state` String +) +ENGINE = TinyLog; + +INSERT into argmaxstate_hex_small VALUES ('22.8.5.29','0B0000003031323334353637383900010000000000000000'), ('22.8.6.71','0A00000030313233343536373839010000000000000000'); + +-- Assert that the current version will write the same as 22.8.5 (last known good 22.8 minor) +SELECT + (SELECT hex(argMaxState('0123456789', number)) FROM numbers(1)) = state +FROM argmaxstate_hex_small +WHERE v = '22.8.5.29'; + +-- Assert that the current version can read correctly both the old and the regression states +SELECT + v, + length(finalizeAggregation(CAST(unhex(state) AS AggregateFunction(argMax, String, UInt64)))) +FROM argmaxstate_hex_small; + +-- STRING WITH 54 characters +-- SELECT version() AS v, hex(argMaxState('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz', number)) AS state FROM numbers(1) FORMAT CSV +CREATE TABLE argmaxstate_hex_large +( + `v` String, + `state` String +) +ENGINE = TinyLog; + +INSERT into argmaxstate_hex_large VALUES ('22.8.5.29','350000004142434445464748494A4B4C4D4E4F505152535455565758595A6162636465666768696A6B6C6D6E6F707172737475767778797A00010000000000000000'), ('22.8.6.71','340000004142434445464748494A4B4C4D4E4F505152535455565758595A6162636465666768696A6B6C6D6E6F707172737475767778797A010000000000000000'); + +SELECT + (SELECT hex(argMaxState('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz', number)) FROM numbers(1)) = state +FROM argmaxstate_hex_large +WHERE v = '22.8.5.29'; + +SELECT + v, + length(finalizeAggregation(CAST(unhex(state) AS AggregateFunction(argMax, String, UInt64)))) +FROM argmaxstate_hex_large; + +-- STRING WITH 0 characters +-- SELECT version() AS v, hex(argMaxState('', number)) AS state FROM numbers(1) FORMAT CSV +CREATE TABLE argmaxstate_hex_empty +( + `v` String, + `state` String +) +ENGINE = TinyLog; + +INSERT into argmaxstate_hex_empty VALUES ('22.8.5.29','0100000000010000000000000000'), ('22.8.6.71','00000000010000000000000000'); + +SELECT + (SELECT hex(argMaxState('', number)) FROM numbers(1)) = state +FROM argmaxstate_hex_empty +WHERE v = '22.8.5.29'; + +SELECT v, length(finalizeAggregation(CAST(unhex(state) AS AggregateFunction(argMax, String, UInt64)))) +FROM argmaxstate_hex_empty; + +-- Right in the border of small and large buffers +-- SELECT hex(argMaxState('0123456789012345678901234567890123456789012345' as a, number)) AS state, length(a) FROM numbers(1) FORMAT CSV +SELECT '46_OK', finalizeAggregation(CAST(unhex('2F0000003031323334353637383930313233343536373839303132333435363738393031323334353637383930313233343500010000000000000000'), 'AggregateFunction(argMax, String, UInt64)')); +SELECT '46_KO', finalizeAggregation(CAST(unhex('2E00000030313233343536373839303132333435363738393031323334353637383930313233343536373839303132333435010000000000000000'), 'AggregateFunction(argMax, String, UInt64)')); + +-- SELECT hex(argMaxState('01234567890123456789012345678901234567890123456' as a, number)) AS state, length(a) FROM numbers(1) FORMAT CSV +SELECT '47_OK', finalizeAggregation(CAST(unhex('30000000303132333435363738393031323334353637383930313233343536373839303132333435363738393031323334353600010000000000000000'), 'AggregateFunction(argMax, String, UInt64)')); +SELECT '47_KO', finalizeAggregation(CAST(unhex('2F0000003031323334353637383930313233343536373839303132333435363738393031323334353637383930313233343536010000000000000000'), 'AggregateFunction(argMax, String, UInt64)')); + +-- SELECT hex(argMaxState('012345678901234567890123456789012345678901234567' as a, number)) AS state, length(a) FROM numbers(1) FORMAT CSV +SELECT '48_OK', finalizeAggregation(CAST(unhex('3100000030313233343536373839303132333435363738393031323334353637383930313233343536373839303132333435363700010000000000000000'), 'AggregateFunction(argMax, String, UInt64)')); +SELECT '48_KO', finalizeAggregation(CAST(unhex('30000000303132333435363738393031323334353637383930313233343536373839303132333435363738393031323334353637010000000000000000'), 'AggregateFunction(argMax, String, UInt64)')); + +-- Right in the allocation limit (power of 2) +-- SELECT hex(argMaxState('012345678901234567890123456789012345678901234567890123456789012' as a, number)) AS state, length(a) FROM numbers(1) FORMAT CSV +SELECT '63_OK', finalizeAggregation(CAST(unhex('4000000030313233343536373839303132333435363738393031323334353637383930313233343536373839303132333435363738393031323334353637383930313200010000000000000000'), 'AggregateFunction(argMax, String, UInt64)')); +SELECT '63_KO', finalizeAggregation(CAST(unhex('3F000000303132333435363738393031323334353637383930313233343536373839303132333435363738393031323334353637383930313233343536373839303132010000000000000000'), 'AggregateFunction(argMax, String, UInt64)')); +-- SELECT hex(argMaxState('0123456789012345678901234567890123456789012345678901234567890123' as a, number)) AS state, length(a) FROM numbers(1) FORMAT CSV +SELECT '64_OK', finalizeAggregation(CAST(unhex('410000003031323334353637383930313233343536373839303132333435363738393031323334353637383930313233343536373839303132333435363738393031323300010000000000000000'), 'AggregateFunction(argMax, String, UInt64)')); +SELECT '64_KO', finalizeAggregation(CAST(unhex('4000000030313233343536373839303132333435363738393031323334353637383930313233343536373839303132333435363738393031323334353637383930313233010000000000000000'), 'AggregateFunction(argMax, String, UInt64)')); + +SELECT '-1', maxMerge(x), length(maxMerge(x)) from (select CAST(unhex('ffffffff') || randomString(100500), 'AggregateFunction(max, String)') as x); +SELECT '-2', maxMerge(x), length(maxMerge(x)) from (select CAST(unhex('fffffffe') || randomString(100500), 'AggregateFunction(max, String)') as x); +SELECT '-2^31', maxMerge(x), length(maxMerge(x)) from (select CAST(unhex('00000080') || randomString(100500), 'AggregateFunction(max, String)') as x); + +SELECT '2^31-1', maxMerge(x) from (select CAST(unhex('ffffff7f') || randomString(100500), 'AggregateFunction(max, String)') as x); -- { serverError CANNOT_READ_ALL_DATA } + +SELECT '2^31-2', maxMerge(x) from (select CAST(unhex('feffff7f') || randomString(100500), 'AggregateFunction(max, String)') as x); -- { serverError CANNOT_READ_ALL_DATA } + +SELECT '2^30', maxMerge(x) from (select CAST(unhex('00000040') || randomString(100500), 'AggregateFunction(max, String)') as x); -- { serverError CANNOT_READ_ALL_DATA } +SELECT '2^30+1', maxMerge(x) from (select CAST(unhex('01000040') || randomString(100500), 'AggregateFunction(max, String)') as x); -- { serverError CANNOT_READ_ALL_DATA } + +SELECT '2^30-1', maxMerge(x) from (select CAST(unhex('ffffff3f') || randomString(100500), 'AggregateFunction(max, String)') as x); -- { serverError CANNOT_READ_ALL_DATA } +-- The following query works, but it's too long and consumes to much memory +-- SELECT '2^30-1', length(maxMerge(x)) from (select CAST(unhex('ffffff3f') || randomString(0x3FFFFFFF - 1) || 'x', 'AggregateFunction(max, String)') as x); +SELECT '1M without 0', length(maxMerge(x)) from (select CAST(unhex('00001000') || randomString(0x00100000 - 1) || 'x', 'AggregateFunction(max, String)') as x); +SELECT '1M with 0', length(maxMerge(x)) from (select CAST(unhex('00001000') || randomString(0x00100000 - 1) || '\0', 'AggregateFunction(max, String)') as x); + +SELECT 'fuzz1', finalizeAggregation(CAST(unhex('3000000\0303132333435363738393031323334353637383930313233343536373839303132333435363738393031323334353600010000000000000000'), 'AggregateFunction(argMax, String, UInt64)')); -- { serverError INCORRECT_DATA } +SELECT 'fuzz2', finalizeAggregation(CAST(unhex('04000000' || '30313233' || '01' || 'ffffffffffffffff'), 'AggregateFunction(argMax, String, UInt64)')) as x, length(x); +SELECT 'fuzz3', finalizeAggregation(CAST(unhex('04000000' || '30313233' || '00' || 'ffffffffffffffff'), 'AggregateFunction(argMax, String, UInt64)')) as x, length(x); -- { serverError INCORRECT_DATA } +SELECT 'fuzz4', finalizeAggregation(CAST(unhex('04000000' || '30313233' || '00'), 'AggregateFunction(argMax, String, UInt64)')) as x, length(x); -- { serverError INCORRECT_DATA } +SELECT 'fuzz5', finalizeAggregation(CAST(unhex('0100000000000000000FFFFFFFF0'), 'AggregateFunction(argMax, UInt64, String)')); -- { serverError INCORRECT_DATA } + + +drop table if exists aggr; +create table aggr (n int, s AggregateFunction(max, String)) engine=MergeTree order by n; +insert into aggr select 1, maxState(''); +insert into aggr select 2, maxState('\0'); +insert into aggr select 3, maxState('\0\0\0\0'); +insert into aggr select 4, maxState('abrac\0dabra\0'); +select n, maxMerge(s) as x, length(x) from aggr group by n order by n; +select maxMerge(s) as x, length(x) from aggr; +drop table aggr; diff --git a/parser/testdata/02478_analyzer_table_expression_aliases/ast.json b/parser/testdata/02478_analyzer_table_expression_aliases/ast.json new file mode 100644 index 000000000..8c0fc120b --- /dev/null +++ b/parser/testdata/02478_analyzer_table_expression_aliases/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001291933, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02478_analyzer_table_expression_aliases/metadata.json b/parser/testdata/02478_analyzer_table_expression_aliases/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02478_analyzer_table_expression_aliases/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02478_analyzer_table_expression_aliases/query.sql b/parser/testdata/02478_analyzer_table_expression_aliases/query.sql new file mode 100644 index 000000000..a1eb88c63 --- /dev/null +++ b/parser/testdata/02478_analyzer_table_expression_aliases/query.sql @@ -0,0 +1,50 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value String +) ENGINE = TinyLog; + +INSERT INTO test_table VALUES (0, 'Value'); + +SELECT * FROM test_table AS test_table; + +SELECT '--'; + +SELECT * FROM test_table AS t1, t1; + +SELECT '--'; + +SELECT * FROM t1, test_table AS t1; + +SELECT '--'; + +SELECT * FROM test_table AS test_table, test_table; + +SELECT '--'; + +SELECT * FROM (SELECT 1) AS test_table, test_table AS subquery; + +SELECT '--'; + +SELECT * FROM test_table AS subquery, (SELECT 1) AS test_table; + +SELECT '--'; + +WITH cte_subquery AS (SELECT 1) SELECT * FROM cte_subquery AS cte_subquery; + +SELECT '--'; + +WITH cte_subquery AS (SELECT 1) SELECT * FROM cte_subquery AS cte_subquery, cte_subquery AS subquery; + +SELECT '--'; + +SELECT * FROM t3, test_table AS t1, t1 AS t2, t2 AS t3; + +SELECT '--'; + +SELECT * FROM t3 AS t4, (SELECT 1) AS t1, t1 AS t2, t2 AS t3; + +DROP TABLE test_table; diff --git a/parser/testdata/02478_factorial/ast.json b/parser/testdata/02478_factorial/ast.json new file mode 100644 index 000000000..420919e60 --- /dev/null +++ b/parser/testdata/02478_factorial/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function factorial (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Int64_-1" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001061844, + "rows_read": 10, + "bytes_read": 378 + } +} diff --git a/parser/testdata/02478_factorial/metadata.json b/parser/testdata/02478_factorial/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02478_factorial/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02478_factorial/query.sql b/parser/testdata/02478_factorial/query.sql new file mode 100644 index 000000000..74d34bd98 --- /dev/null +++ b/parser/testdata/02478_factorial/query.sql @@ -0,0 +1,7 @@ +select factorial(-1) = 1; +select factorial(0) = 1; +select factorial(10) = 3628800; + +select factorial(100); -- { serverError BAD_ARGUMENTS } +select factorial('100'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select factorial(100.1234); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } diff --git a/parser/testdata/02478_projection_and_alter_low_cardinality/ast.json b/parser/testdata/02478_projection_and_alter_low_cardinality/ast.json new file mode 100644 index 000000000..aa32de695 --- /dev/null +++ b/parser/testdata/02478_projection_and_alter_low_cardinality/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery testing (children 1)" + }, + { + "explain": " Identifier testing" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001223276, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/02478_projection_and_alter_low_cardinality/metadata.json b/parser/testdata/02478_projection_and_alter_low_cardinality/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02478_projection_and_alter_low_cardinality/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02478_projection_and_alter_low_cardinality/query.sql b/parser/testdata/02478_projection_and_alter_low_cardinality/query.sql new file mode 100644 index 000000000..b811513b3 --- /dev/null +++ b/parser/testdata/02478_projection_and_alter_low_cardinality/query.sql @@ -0,0 +1,28 @@ +DROP TABLE IF EXISTS testing; + +CREATE TABLE testing +( + a String, + b String, + c String, + d String, + PROJECTION proj_1 + ( + SELECT b, c + ORDER BY d + ) +) +ENGINE = MergeTree() +PRIMARY KEY (a) +ORDER BY (a, b) +SETTINGS index_granularity = 8192, index_granularity_bytes = 0, min_bytes_for_wide_part = 0; + +INSERT INTO testing SELECT randomString(5), randomString(5), randomString(5), randomString(5) FROM numbers(10); + +OPTIMIZE TABLE testing FINAL; + +ALTER TABLE testing MODIFY COLUMN c LowCardinality(String) SETTINGS mutations_sync=2; + +SELECT * FROM system.mutations WHERE database = currentDatabase() AND table = 'testing' AND not is_done; + +DROP TABLE testing; diff --git a/parser/testdata/02478_projection_with_group_by_alter/ast.json b/parser/testdata/02478_projection_with_group_by_alter/ast.json new file mode 100644 index 000000000..e30903fe9 --- /dev/null +++ b/parser/testdata/02478_projection_with_group_by_alter/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery testing (children 1)" + }, + { + "explain": " Identifier testing" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001450265, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/02478_projection_with_group_by_alter/metadata.json b/parser/testdata/02478_projection_with_group_by_alter/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02478_projection_with_group_by_alter/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02478_projection_with_group_by_alter/query.sql b/parser/testdata/02478_projection_with_group_by_alter/query.sql new file mode 100644 index 000000000..3c08c913a --- /dev/null +++ b/parser/testdata/02478_projection_with_group_by_alter/query.sql @@ -0,0 +1,58 @@ +DROP TABLE IF EXISTS testing; + +CREATE TABLE testing +( + a String, + b String, + c Int32, + d Int32, + e Int32, + PROJECTION proj_1 + ( + SELECT c ORDER BY d + ), + PROJECTION proj_2 + ( + SELECT c ORDER BY e, d + ) +) +ENGINE = MergeTree() PRIMARY KEY (a) SETTINGS min_bytes_for_wide_part = 0; + +INSERT INTO testing SELECT number, number, number, number, number%2 FROM numbers(5); + +-- { echoOn } + +OPTIMIZE TABLE testing FINAL; + +SELECT c FROM testing ORDER BY d; +SELECT c FROM testing ORDER BY e, d; + +-- update all columns used by proj_1 +ALTER TABLE testing UPDATE c = c+1, d = d+2 WHERE True SETTINGS mutations_sync=2; + +SELECT * FROM system.mutations WHERE database = currentDatabase() AND table = 'testing' AND not is_done; + +SELECT c FROM testing ORDER BY d; +SELECT c FROM testing ORDER BY e, d; + + +-- update only one column +ALTER TABLE testing UPDATE d = d-1 WHERE True SETTINGS mutations_sync=2; + +SELECT * FROM system.mutations WHERE database = currentDatabase() AND table = 'testing' AND not is_done; + +SELECT c FROM testing ORDER BY d; +SELECT c FROM testing ORDER BY e, d; + + +-- update only another one column +ALTER TABLE testing UPDATE c = c-1 WHERE True SETTINGS mutations_sync=2; + +SELECT * FROM system.mutations WHERE database = currentDatabase() AND table = 'testing' AND not is_done; + +SELECT c FROM testing ORDER BY d; +SELECT c FROM testing ORDER BY e, d; + +-- { echoOff } + +DROP TABLE testing; diff --git a/parser/testdata/02478_window_frame_type_groups/ast.json b/parser/testdata/02478_window_frame_type_groups/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02478_window_frame_type_groups/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02478_window_frame_type_groups/metadata.json b/parser/testdata/02478_window_frame_type_groups/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02478_window_frame_type_groups/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02478_window_frame_type_groups/query.sql b/parser/testdata/02478_window_frame_type_groups/query.sql new file mode 100644 index 000000000..a01e1813c --- /dev/null +++ b/parser/testdata/02478_window_frame_type_groups/query.sql @@ -0,0 +1,7 @@ +SET enable_analyzer = 0; + +SELECT toUInt64(dense_rank(1) OVER (ORDER BY 100 ASC GROUPS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING)) FROM numbers(10); -- { serverError NOT_IMPLEMENTED } + +SET enable_analyzer = 1; + +SELECT toUInt64(dense_rank(1) OVER (ORDER BY 100 ASC GROUPS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING)) FROM numbers(10); -- { serverError NOT_IMPLEMENTED } diff --git a/parser/testdata/02479_analyzer_aggregation_crash/ast.json b/parser/testdata/02479_analyzer_aggregation_crash/ast.json new file mode 100644 index 000000000..a73428f20 --- /dev/null +++ b/parser/testdata/02479_analyzer_aggregation_crash/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001300153, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02479_analyzer_aggregation_crash/metadata.json b/parser/testdata/02479_analyzer_aggregation_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02479_analyzer_aggregation_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02479_analyzer_aggregation_crash/query.sql b/parser/testdata/02479_analyzer_aggregation_crash/query.sql new file mode 100644 index 000000000..1e8907406 --- /dev/null +++ b/parser/testdata/02479_analyzer_aggregation_crash/query.sql @@ -0,0 +1,13 @@ +SET enable_analyzer = 1; +SET compile_aggregate_expressions = 1; +SET min_count_to_compile_aggregate_expression = 0; + +DROP TABLE IF EXISTS lc_00906__fuzz_46; +CREATE TABLE lc_00906__fuzz_46 (`b` Int64) ENGINE = MergeTree ORDER BY b; +INSERT INTO lc_00906__fuzz_46 SELECT '0123456789' FROM numbers(10); + +SELECT count(3.4028234663852886e38), b FROM lc_00906__fuzz_46 GROUP BY b; + +SELECT count(1), b FROM lc_00906__fuzz_46 GROUP BY b; + +DROP TABLE lc_00906__fuzz_46; diff --git a/parser/testdata/02479_analyzer_aggregation_totals_rollup_crash_fix/ast.json b/parser/testdata/02479_analyzer_aggregation_totals_rollup_crash_fix/ast.json new file mode 100644 index 000000000..cf727921f --- /dev/null +++ b/parser/testdata/02479_analyzer_aggregation_totals_rollup_crash_fix/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001200878, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02479_analyzer_aggregation_totals_rollup_crash_fix/metadata.json b/parser/testdata/02479_analyzer_aggregation_totals_rollup_crash_fix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02479_analyzer_aggregation_totals_rollup_crash_fix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02479_analyzer_aggregation_totals_rollup_crash_fix/query.sql b/parser/testdata/02479_analyzer_aggregation_totals_rollup_crash_fix/query.sql new file mode 100644 index 000000000..004e61ee1 --- /dev/null +++ b/parser/testdata/02479_analyzer_aggregation_totals_rollup_crash_fix/query.sql @@ -0,0 +1,5 @@ +SET enable_analyzer = 1; + +SELECT anyLast(number) FROM numbers(1) GROUP BY number WITH ROLLUP WITH TOTALS; + +SELECT tuple(tuple(0.0001)), anyLast(number) FROM numbers(1) GROUP BY number WITH ROLLUP WITH TOTALS; diff --git a/parser/testdata/02479_analyzer_join_with_constants/ast.json b/parser/testdata/02479_analyzer_join_with_constants/ast.json new file mode 100644 index 000000000..2db7fca7c --- /dev/null +++ b/parser/testdata/02479_analyzer_join_with_constants/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001163188, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02479_analyzer_join_with_constants/metadata.json b/parser/testdata/02479_analyzer_join_with_constants/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02479_analyzer_join_with_constants/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02479_analyzer_join_with_constants/query.sql b/parser/testdata/02479_analyzer_join_with_constants/query.sql new file mode 100644 index 000000000..62efc0c14 --- /dev/null +++ b/parser/testdata/02479_analyzer_join_with_constants/query.sql @@ -0,0 +1,33 @@ +SET enable_analyzer = 1; + +SELECT * FROM (SELECT 1 AS id) AS t1 INNER JOIN (SELECT 1 AS id) AS t2 ON t1.id = t2.id AND 1; + +SELECT '--'; + +SELECT * FROM (SELECT 1 AS id) AS t1 INNER JOIN (SELECT 2 AS id) AS t2 ON t1.id = t2.id AND 1; + +SELECT '--'; + +SELECT * FROM (SELECT 1 AS id) AS t1 INNER JOIN (SELECT 1 AS id) AS t2 ON t1.id = t2.id AND 0; + +SELECT '--'; + +SELECT * FROM (SELECT 1 AS id) AS t1 INNER JOIN (SELECT 2 AS id) AS t2 ON t1.id = t2.id OR 1; + +SELECT '--'; + +SELECT * FROM (SELECT 1 AS id, 1 AS value) AS t1 ASOF LEFT JOIN (SELECT 1 AS id, 1 AS value) AS t2 ON (t1.id = t2.id) AND 1 == 1 AND (t1.value >= t2.value); + +SELECT '--'; + +SELECT * FROM (SELECT 1 AS id, 1 AS value) AS t1 ASOF LEFT JOIN (SELECT 1 AS id, 1 AS value) AS t2 ON (t1.id = t2.id) AND 1 != 1 AND (t1.value >= t2.value); + +SELECT '--'; + +SELECT b.dt FROM (SELECT NULL > NULL AS pk, 1 AS dt FROM numbers(5)) AS a ASOF LEFT JOIN (SELECT NULL AS pk, 1 AS dt) AS b ON (a.pk = b.pk) AND 1 != 1 AND (a.dt >= b.dt) SETTINGS enable_analyzer = 0; -- { serverError INVALID_JOIN_ON_EXPRESSION } +SELECT b.dt FROM (SELECT NULL > NULL AS pk, 1 AS dt FROM numbers(5)) AS a ASOF LEFT JOIN (SELECT NULL AS pk, 1 AS dt) AS b ON (a.pk = b.pk) AND 1 != 1 AND (a.dt >= b.dt) SETTINGS enable_analyzer = 1; + +SELECT '--'; + +-- Fuzzed +SELECT * FROM (SELECT 1 AS id, 1 AS value) AS t1 ASOF LEFT JOIN (SELECT 1 AS id, 1 AS value) AS t2 ON (t1.id = t2.id) AND (toUInt256(1) IN (SELECT materialize(1))) AND (1 != 1) AND (t1.value >= t2.value); diff --git a/parser/testdata/02479_if_with_null_and_cullable_const/ast.json b/parser/testdata/02479_if_with_null_and_cullable_const/ast.json new file mode 100644 index 000000000..2e19dcfe1 --- /dev/null +++ b/parser/testdata/02479_if_with_null_and_cullable_const/ast.json @@ -0,0 +1,82 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function if (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Function toNullable (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2" + } + ], + + "rows": 20, + + "statistics": + { + "elapsed": 0.001263438, + "rows_read": 20, + "bytes_read": 772 + } +} diff --git a/parser/testdata/02479_if_with_null_and_cullable_const/metadata.json b/parser/testdata/02479_if_with_null_and_cullable_const/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02479_if_with_null_and_cullable_const/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02479_if_with_null_and_cullable_const/query.sql b/parser/testdata/02479_if_with_null_and_cullable_const/query.sql new file mode 100644 index 000000000..b684de88c --- /dev/null +++ b/parser/testdata/02479_if_with_null_and_cullable_const/query.sql @@ -0,0 +1,3 @@ +SELECT if(number % 2, NULL, toNullable(1)) FROM numbers(2); +SELECT if(number % 2, toNullable(1), NULL) FROM numbers(2); + diff --git a/parser/testdata/02479_mysql_connect_to_self/ast.json b/parser/testdata/02479_mysql_connect_to_self/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02479_mysql_connect_to_self/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02479_mysql_connect_to_self/metadata.json b/parser/testdata/02479_mysql_connect_to_self/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02479_mysql_connect_to_self/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02479_mysql_connect_to_self/query.sql b/parser/testdata/02479_mysql_connect_to_self/query.sql new file mode 100644 index 000000000..9b0c0abb5 --- /dev/null +++ b/parser/testdata/02479_mysql_connect_to_self/query.sql @@ -0,0 +1,80 @@ +-- Tags: no-fasttest + +SET send_logs_level = 'fatal'; -- failed connection tries are ok, if it succeeded after retry. + +DROP TABLE IF EXISTS foo; + +CREATE TABLE foo (key UInt32, a String, b Int64, c String) ENGINE = TinyLog; +INSERT INTO foo VALUES (1, 'one', -1, 'een'), (2, 'two', -2, 'twee'), (3, 'three', -3, 'drie'), (4, 'four', -4, 'vier'), (5, 'five', -5, 'vijf'); + +SET enable_analyzer = 1; + +SELECT '---'; +SELECT * FROM mysql('127.0.0.1:9004', currentDatabase(), foo, 'default', '', SETTINGS connect_timeout = 100, connection_wait_timeout = 100) ORDER BY key; + +SELECT '---'; +SELECT count() FROM mysql('127.0.0.1:9004', currentDatabase(), foo, 'default', '', SETTINGS connect_timeout = 100, connection_wait_timeout = 100); + +SELECT '---'; +SELECT 1 FROM mysql('127.0.0.1:9004', currentDatabase(), foo, 'default', '', SETTINGS connect_timeout = 100, connection_wait_timeout = 100); + +SELECT '---'; +SELECT key FROM mysql('127.0.0.1:9004', currentDatabase(), foo, 'default', '', SETTINGS connect_timeout = 100, connection_wait_timeout = 100) ORDER BY key; + +SELECT '---'; +SELECT b, a FROM mysql('127.0.0.1:9004', currentDatabase(), foo, 'default', '', SETTINGS connect_timeout = 100, connection_wait_timeout = 100) ORDER BY a; + +SELECT '---'; +SELECT b, a FROM mysql('127.0.0.1:9004', currentDatabase(), foo, 'default', '', SETTINGS connect_timeout = 100, connection_wait_timeout = 100) ORDER BY c; + +SELECT '---'; +SELECT b FROM mysql('127.0.0.1:9004', currentDatabase(), foo, 'default', '', SETTINGS connect_timeout = 100, connection_wait_timeout = 100) WHERE c != 'twee' ORDER BY b; + +SELECT '---'; +SELECT count() FROM mysql('127.0.0.1:9004', currentDatabase(), foo, 'default', '', SETTINGS connect_timeout = 100, connection_wait_timeout = 100) WHERE c != 'twee'; + +EXPLAIN QUERY TREE dump_ast = 1 +SELECT * FROM mysql( + '127.0.0.1:9004', currentDatabase(), foo, 'default', '', + SETTINGS connection_wait_timeout = 123, connect_timeout = 40123002, read_write_timeout = 40123001, connection_pool_size = 3 +); + +SELECT '---'; +SELECT count() FROM mysql('127.0.0.1:9004', currentDatabase(), foo, 'default', '', SETTINGS connection_pool_size = 1, connect_timeout = 100, connection_wait_timeout = 100); +SELECT count() FROM mysql('127.0.0.1:9004', currentDatabase(), foo, 'default', '', SETTINGS connection_pool_size = 0); -- { serverError BAD_ARGUMENTS } + +SELECT '---'; +SELECT * FROM mysql('[::1]:9004', currentDatabase(), foo, 'default', '', SETTINGS connect_timeout = 100, connection_wait_timeout = 100) ORDER BY key; + +SELECT '---'; +SELECT count() FROM mysql('[::1]:9004', currentDatabase(), foo, 'default', '', SETTINGS connect_timeout = 100, connection_wait_timeout = 100); + +SELECT '---'; +SELECT 1 FROM mysql('[::1]:9004', currentDatabase(), foo, 'default', '', SETTINGS connect_timeout = 100, connection_wait_timeout = 100); + +SELECT '---'; +SELECT key FROM mysql('[::1]:9004', currentDatabase(), foo, 'default', '', SETTINGS connect_timeout = 100, connection_wait_timeout = 100) ORDER BY key; + +SELECT '---'; +SELECT b, a FROM mysql('[::1]:9004', currentDatabase(), foo, 'default', '', SETTINGS connect_timeout = 100, connection_wait_timeout = 100) ORDER BY a; + +SELECT '---'; +SELECT b, a FROM mysql('[::1]:9004', currentDatabase(), foo, 'default', '', SETTINGS connect_timeout = 100, connection_wait_timeout = 100) ORDER BY c; + +SELECT '---'; +SELECT b FROM mysql('[::1]:9004', currentDatabase(), foo, 'default', '', SETTINGS connect_timeout = 100, connection_wait_timeout = 100) WHERE c != 'twee' ORDER BY b; + +SELECT '---'; +SELECT count() FROM mysql('[::1]:9004', currentDatabase(), foo, 'default', '', SETTINGS connect_timeout = 100, connection_wait_timeout = 100) WHERE c != 'twee'; + +EXPLAIN QUERY TREE dump_ast = 1 +SELECT * FROM mysql( + '[::1]:9004', currentDatabase(), foo, 'default', '', + SETTINGS connection_wait_timeout = 123, connect_timeout = 40123002, read_write_timeout = 40123001, connection_pool_size = 3 +); + +SELECT '---'; +SELECT count() FROM mysql('[::1]:9004', currentDatabase(), foo, 'default', '', SETTINGS connection_pool_size = 1, connect_timeout = 100, connection_wait_timeout = 100); +SELECT count() FROM mysql('[::1]:9004', currentDatabase(), foo, 'default', '', SETTINGS connection_pool_size = 0); -- { serverError BAD_ARGUMENTS } + +DROP TABLE foo; diff --git a/parser/testdata/02479_nullable_primary_key_non_first_column/ast.json b/parser/testdata/02479_nullable_primary_key_non_first_column/ast.json new file mode 100644 index 000000000..1c33b626c --- /dev/null +++ b/parser/testdata/02479_nullable_primary_key_non_first_column/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_table (children 1)" + }, + { + "explain": " Identifier test_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001287318, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/02479_nullable_primary_key_non_first_column/metadata.json b/parser/testdata/02479_nullable_primary_key_non_first_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02479_nullable_primary_key_non_first_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02479_nullable_primary_key_non_first_column/query.sql b/parser/testdata/02479_nullable_primary_key_non_first_column/query.sql new file mode 100644 index 000000000..2d56e315b --- /dev/null +++ b/parser/testdata/02479_nullable_primary_key_non_first_column/query.sql @@ -0,0 +1,11 @@ +drop table if exists test_table; +create table test_table (A Nullable(String), B Nullable(String)) engine MergeTree order by (A,B) settings index_granularity = 1, allow_nullable_key=1; +insert into test_table values ('a', 'b'), ('a', null), (null, 'b'); +select * from test_table where B is null; +drop table test_table; + +DROP TABLE IF EXISTS dm_metric_small2; +CREATE TABLE dm_metric_small2 (`x` Nullable(Int64), `y` Nullable(Int64), `z` Nullable(Int64)) ENGINE = MergeTree() ORDER BY (x, y, z) SETTINGS index_granularity = 1, allow_nullable_key = 1; +INSERT INTO dm_metric_small2 VALUES (1,1,NULL) (1,1,1) (1,2,0) (1,2,1) (1,2,NULL) (1,2,NULL); +SELECT * FROM dm_metric_small2 WHERE (x = 1) AND (y = 1) AND z IS NULL; +DROP TABLE dm_metric_small2; \ No newline at end of file diff --git a/parser/testdata/02480_analyzer_alias_nullptr/ast.json b/parser/testdata/02480_analyzer_alias_nullptr/ast.json new file mode 100644 index 000000000..04b16b6d7 --- /dev/null +++ b/parser/testdata/02480_analyzer_alias_nullptr/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000963044, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02480_analyzer_alias_nullptr/metadata.json b/parser/testdata/02480_analyzer_alias_nullptr/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02480_analyzer_alias_nullptr/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02480_analyzer_alias_nullptr/query.sql b/parser/testdata/02480_analyzer_alias_nullptr/query.sql new file mode 100644 index 000000000..07503de1b --- /dev/null +++ b/parser/testdata/02480_analyzer_alias_nullptr/query.sql @@ -0,0 +1,3 @@ +SET enable_analyzer = 1; + +SELECT min(b), x AS b FROM (SELECT max(number) FROM numbers(1)); -- { serverError UNKNOWN_IDENTIFIER } diff --git a/parser/testdata/02480_every_asynchronous_metric_must_have_documentation/ast.json b/parser/testdata/02480_every_asynchronous_metric_must_have_documentation/ast.json new file mode 100644 index 000000000..71ae38ce1 --- /dev/null +++ b/parser/testdata/02480_every_asynchronous_metric_must_have_documentation/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier metric" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.asynchronous_metrics" + }, + { + "explain": " Function less (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function length (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier description" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001039547, + "rows_read": 15, + "bytes_read": 595 + } +} diff --git a/parser/testdata/02480_every_asynchronous_metric_must_have_documentation/metadata.json b/parser/testdata/02480_every_asynchronous_metric_must_have_documentation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02480_every_asynchronous_metric_must_have_documentation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02480_every_asynchronous_metric_must_have_documentation/query.sql b/parser/testdata/02480_every_asynchronous_metric_must_have_documentation/query.sql new file mode 100644 index 000000000..3f0ab58cc --- /dev/null +++ b/parser/testdata/02480_every_asynchronous_metric_must_have_documentation/query.sql @@ -0,0 +1 @@ +SELECT metric FROM system.asynchronous_metrics WHERE length(description) < 10; diff --git a/parser/testdata/02480_interval_casting_and_subquery/ast.json b/parser/testdata/02480_interval_casting_and_subquery/ast.json new file mode 100644 index 000000000..8d72d8c6a --- /dev/null +++ b/parser/testdata/02480_interval_casting_and_subquery/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toIntervalSecond (alias interval) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_5" + }, + { + "explain": " Function plus (alias res) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDateTime (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '2017-01-01 00:00:00'" + }, + { + "explain": " Identifier interval" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001316512, + "rows_read": 13, + "bytes_read": 542 + } +} diff --git a/parser/testdata/02480_interval_casting_and_subquery/metadata.json b/parser/testdata/02480_interval_casting_and_subquery/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02480_interval_casting_and_subquery/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02480_interval_casting_and_subquery/query.sql b/parser/testdata/02480_interval_casting_and_subquery/query.sql new file mode 100644 index 000000000..cb6eccb06 --- /dev/null +++ b/parser/testdata/02480_interval_casting_and_subquery/query.sql @@ -0,0 +1,25 @@ +SELECT toIntervalSecond(5) AS interval, toDateTime('2017-01-01 00:00:00') + interval AS res; +SELECT toIntervalMinute(5) AS interval, toDateTime('2017-01-01 00:00:00') + interval AS res; +SELECT toIntervalHour(5) AS interval, toDateTime('2017-01-01 00:00:00') + interval AS res; +SELECT toIntervalDay(5) AS interval, toDateTime('2017-01-01 00:00:00') + interval AS res; +SELECT toIntervalMonth(5) AS interval, toDateTime('2017-01-01 00:00:00') + interval AS res; +SELECT toIntervalQuarter(5) AS interval, toDateTime('2017-01-01 00:00:00') + interval AS res; +SELECT toIntervalYear(5) AS interval, toDateTime('2017-01-01 00:00:00') + interval AS res; +SELECT CAST(5 AS IntervalNanosecond); +SELECT CAST(5 AS IntervalMicrosecond); +SELECT CAST(5 AS IntervalMillisecond); +SELECT CAST(5 AS IntervalSecond); +SELECT CAST(5 AS IntervalMinute); +SELECT CAST(5 AS IntervalHour); +SELECT CAST(5 AS IntervalDay); +SELECT CAST(5 AS IntervalWeek); +SELECT CAST(5 AS IntervalMonth); +SELECT CAST(5 AS IntervalQuarter); +SELECT CAST(5 AS IntervalYear); +SELECT (SELECT toIntervalSecond(5)) AS interval, toDateTime('2017-01-01 00:00:00') + interval AS res; +SELECT (SELECT toIntervalMinute(5)) AS interval, toDateTime('2017-01-01 00:00:00') + interval AS res; +SELECT (SELECT toIntervalHour(5)) AS interval, toDateTime('2017-01-01 00:00:00') + interval AS res; +SELECT (SELECT toIntervalDay(5)) AS interval, toDateTime('2017-01-01 00:00:00') + interval AS res; +SELECT (SELECT toIntervalMonth(5)) AS interval, toDateTime('2017-01-01 00:00:00') + interval AS res; +SELECT (SELECT toIntervalQuarter(5)) AS interval, toDateTime('2017-01-01 00:00:00') + interval AS res; +SELECT (SELECT toIntervalYear(5)) AS interval, toDateTime('2017-01-01 00:00:00') + interval AS res; diff --git a/parser/testdata/02480_max_map_null_totals/ast.json b/parser/testdata/02480_max_map_null_totals/ast.json new file mode 100644 index 000000000..70786f0a9 --- /dev/null +++ b/parser/testdata/02480_max_map_null_totals/ast.json @@ -0,0 +1,124 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function maxMap (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Function minus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_4" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier number" + } + ], + + "rows": 34, + + "statistics": + { + "elapsed": 0.001249175, + "rows_read": 34, + "bytes_read": 1325 + } +} diff --git a/parser/testdata/02480_max_map_null_totals/metadata.json b/parser/testdata/02480_max_map_null_totals/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02480_max_map_null_totals/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02480_max_map_null_totals/query.sql b/parser/testdata/02480_max_map_null_totals/query.sql new file mode 100644 index 000000000..be2c566dd --- /dev/null +++ b/parser/testdata/02480_max_map_null_totals/query.sql @@ -0,0 +1,39 @@ +SELECT maxMap([number % 3, number % 4 - 1], [number, NULL]) FROM numbers(3) GROUP BY number WITH TOTALS ORDER BY number; +SELECT maxMap([number % 3, number % 4 - 1], [number, NULL]) FROM numbers(3) GROUP BY number WITH ROLLUP ORDER BY number; +SELECT maxMap([number % 3, number % 4 - 1], [number, NULL]) FROM numbers(3) GROUP BY number WITH CUBE ORDER BY number; + +SELECT minMap([number % 3, number % 4 - 1], [number, NULL]) FROM numbers(3) GROUP BY number WITH TOTALS ORDER BY number; +SELECT minMap([number % 3, number % 4 - 1], [number, NULL]) FROM numbers(3) GROUP BY number WITH ROLLUP ORDER BY number; +SELECT minMap([number % 3, number % 4 - 1], [number, NULL]) FROM numbers(3) GROUP BY number WITH CUBE ORDER BY number; + +SELECT sumMap([number % 3, number % 4 - 1], [number, NULL]) FROM numbers(3) GROUP BY number WITH TOTALS ORDER BY number; +SELECT sumMap([number % 3, number % 4 - 1], [number, NULL]) FROM numbers(3) GROUP BY number WITH ROLLUP ORDER BY number; +SELECT sumMap([number % 3, number % 4 - 1], [number, NULL]) FROM numbers(3) GROUP BY number WITH CUBE ORDER BY number; + +SELECT '-'; + +SELECT maxMap([number % 3, number % 4 - 1], [number :: Float64, NULL]) FROM numbers(3) GROUP BY number WITH TOTALS ORDER BY number; +SELECT maxMap([number % 3, number % 4 - 1], [number :: Float64, NULL]) FROM numbers(3) GROUP BY number WITH ROLLUP ORDER BY number; +SELECT maxMap([number % 3, number % 4 - 1], [number :: Float64, NULL]) FROM numbers(3) GROUP BY number WITH CUBE ORDER BY number; + +SELECT minMap([number % 3, number % 4 - 1], [number :: Float64, NULL]) FROM numbers(3) GROUP BY number WITH TOTALS ORDER BY number; +SELECT minMap([number % 3, number % 4 - 1], [number :: Float64, NULL]) FROM numbers(3) GROUP BY number WITH ROLLUP ORDER BY number; +SELECT minMap([number % 3, number % 4 - 1], [number :: Float64, NULL]) FROM numbers(3) GROUP BY number WITH CUBE ORDER BY number; + +SELECT sumMap([number % 3, number % 4 - 1], [number :: Float64, NULL]) FROM numbers(3) GROUP BY number WITH TOTALS ORDER BY number; +SELECT sumMap([number % 3, number % 4 - 1], [number :: Float64, NULL]) FROM numbers(3) GROUP BY number WITH ROLLUP ORDER BY number; +SELECT sumMap([number % 3, number % 4 - 1], [number :: Float64, NULL]) FROM numbers(3) GROUP BY number WITH CUBE ORDER BY number; + +SELECT '-'; + +SELECT maxMap([number % 3, number % 4 - 1], [number :: UInt256, NULL]) FROM numbers(3) GROUP BY number WITH TOTALS ORDER BY number; +SELECT maxMap([number % 3, number % 4 - 1], [number :: UInt256, NULL]) FROM numbers(3) GROUP BY number WITH ROLLUP ORDER BY number; +SELECT maxMap([number % 3, number % 4 - 1], [number :: UInt256, NULL]) FROM numbers(3) GROUP BY number WITH CUBE ORDER BY number; + +SELECT minMap([number % 3, number % 4 - 1], [number :: UInt256, NULL]) FROM numbers(3) GROUP BY number WITH TOTALS ORDER BY number; +SELECT minMap([number % 3, number % 4 - 1], [number :: UInt256, NULL]) FROM numbers(3) GROUP BY number WITH ROLLUP ORDER BY number; +SELECT minMap([number % 3, number % 4 - 1], [number :: UInt256, NULL]) FROM numbers(3) GROUP BY number WITH CUBE ORDER BY number; + +SELECT sumMap([number % 3, number % 4 - 1], [number :: UInt256, NULL]) FROM numbers(3) GROUP BY number WITH TOTALS ORDER BY number; +SELECT sumMap([number % 3, number % 4 - 1], [number :: UInt256, NULL]) FROM numbers(3) GROUP BY number WITH ROLLUP ORDER BY number; +SELECT sumMap([number % 3, number % 4 - 1], [number :: UInt256, NULL]) FROM numbers(3) GROUP BY number WITH CUBE ORDER BY number; diff --git a/parser/testdata/02480_parse_date_time_best_effort_math_overflow/ast.json b/parser/testdata/02480_parse_date_time_best_effort_math_overflow/ast.json new file mode 100644 index 000000000..945c4374e --- /dev/null +++ b/parser/testdata/02480_parse_date_time_best_effort_math_overflow/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function format (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier TSV" + }, + { + "explain": " Literal '9279104479c7da1114861274de32208ead91b60e'" + }, + { + "explain": " Set" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001266058, + "rows_read": 13, + "bytes_read": 498 + } +} diff --git a/parser/testdata/02480_parse_date_time_best_effort_math_overflow/metadata.json b/parser/testdata/02480_parse_date_time_best_effort_math_overflow/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02480_parse_date_time_best_effort_math_overflow/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02480_parse_date_time_best_effort_math_overflow/query.sql b/parser/testdata/02480_parse_date_time_best_effort_math_overflow/query.sql new file mode 100644 index 000000000..5102fb472 --- /dev/null +++ b/parser/testdata/02480_parse_date_time_best_effort_math_overflow/query.sql @@ -0,0 +1,3 @@ +select * from format(TSV, '9279104479c7da1114861274de32208ead91b60e') settings date_time_input_format='best_effort'; +select parseDateTime64BestEffortOrNull('9279104477', 9); +select toDateTime64OrNull('9279104477', 9); diff --git a/parser/testdata/02480_s3_support_wildcard/ast.json b/parser/testdata/02480_s3_support_wildcard/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02480_s3_support_wildcard/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02480_s3_support_wildcard/metadata.json b/parser/testdata/02480_s3_support_wildcard/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02480_s3_support_wildcard/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02480_s3_support_wildcard/query.sql b/parser/testdata/02480_s3_support_wildcard/query.sql new file mode 100644 index 000000000..6078dd139 --- /dev/null +++ b/parser/testdata/02480_s3_support_wildcard/query.sql @@ -0,0 +1,29 @@ +-- Tags: no-parallel, no-fasttest +-- Tag no-fasttest: Depends on AWS + +-- { echo } +drop table if exists test_02480_support_wildcard_write; +drop table if exists test_02480_support_wildcard_write2; +create table test_02480_support_wildcard_write (a UInt64, b String) engine = S3(s3_conn, filename='test_02480_support_wildcard_{_partition_id}', format=Parquet) partition by a; +set s3_truncate_on_insert=1; +insert into test_02480_support_wildcard_write values (1, 'a'), (22, 'b'), (333, 'c'); + +select a, b from s3(s3_conn, filename='test_02480_support_wildcard_*', format=Parquet) order by a; +select a, b from s3(s3_conn, filename='test_02480_support_wildcard_?', format=Parquet) order by a; +select a, b from s3(s3_conn, filename='test_02480_support_wildcard_??', format=Parquet) order by a; +select a, b from s3(s3_conn, filename='test_02480_support_wildcard_?*?', format=Parquet) order by a; +select a, b from s3(s3_conn, filename='test_02480_support_wildcard_{1,333}', format=Parquet) order by a; +select a, b from s3(s3_conn, filename='test_02480_support_wildcard_{1..333}', format=Parquet) order by a; + +create table test_02480_support_wildcard_write2 (a UInt64, b String) engine = S3(s3_conn, filename='prefix/test_02480_support_wildcard_{_partition_id}', format=Parquet) partition by a; +set s3_truncate_on_insert=1; +insert into test_02480_support_wildcard_write2 values (4, 'd'), (55, 'f'), (666, 'g'); + +select a, b from s3(s3_conn, filename='*/test_02480_support_wildcard_*', format=Parquet) order by a; +select a, b from s3(s3_conn, filename='*/test_02480_support_wildcard_?', format=Parquet) order by a; +select a, b from s3(s3_conn, filename='prefix/test_02480_support_wildcard_??', format=Parquet) order by a; +select a, b from s3(s3_conn, filename='prefi?/test_02480_support_wildcard_*', format=Parquet) order by a; +select a, b from s3(s3_conn, filename='p?*/test_02480_support_wildcard_{56..666}', format=Parquet) order by a; + +drop table test_02480_support_wildcard_write; +drop table test_02480_support_wildcard_write2; diff --git a/parser/testdata/02480_suspicious_lowcard_in_key/ast.json b/parser/testdata/02480_suspicious_lowcard_in_key/ast.json new file mode 100644 index 000000000..f0a73ed55 --- /dev/null +++ b/parser/testdata/02480_suspicious_lowcard_in_key/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001278364, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02480_suspicious_lowcard_in_key/metadata.json b/parser/testdata/02480_suspicious_lowcard_in_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02480_suspicious_lowcard_in_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02480_suspicious_lowcard_in_key/query.sql b/parser/testdata/02480_suspicious_lowcard_in_key/query.sql new file mode 100644 index 000000000..4408bd2f0 --- /dev/null +++ b/parser/testdata/02480_suspicious_lowcard_in_key/query.sql @@ -0,0 +1,11 @@ +set allow_suspicious_low_cardinality_types=1; + +drop table if exists test; + +create table test (val LowCardinality(Float32)) engine MergeTree order by val; + +insert into test values (nan); + +select count() from test where toUInt64(val) = -1; -- { serverError CANNOT_CONVERT_TYPE } + +drop table if exists test; diff --git a/parser/testdata/02480_tlp_nan/ast.json b/parser/testdata/02480_tlp_nan/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02480_tlp_nan/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02480_tlp_nan/metadata.json b/parser/testdata/02480_tlp_nan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02480_tlp_nan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02480_tlp_nan/query.sql b/parser/testdata/02480_tlp_nan/query.sql new file mode 100644 index 000000000..55318e0cb --- /dev/null +++ b/parser/testdata/02480_tlp_nan/query.sql @@ -0,0 +1,15 @@ +-- {echo} +SELECT sqrt(-1) as x, not(x), not(not(x)), (not(x)) IS NULL SETTINGS enable_analyzer=1; +SELECT sqrt(-1) as x, not(x), not(not(x)), (not(x)) IS NULL SETTINGS enable_analyzer=0; + +SELECT -inf as x, not(x), not(not(x)), (not(x)) IS NULL SETTINGS enable_analyzer=1; +SELECT -inf as x, not(x), not(not(x)), (not(x)) IS NULL SETTINGS enable_analyzer=0; + +SELECT NULL as x, not(x), not(not(x)), (not(x)) IS NULL SETTINGS enable_analyzer=1; +SELECT NULL as x, not(x), not(not(x)), (not(x)) IS NULL SETTINGS enable_analyzer=0; + +SELECT inf as x, not(x), not(not(x)), (not(x)) IS NULL SETTINGS enable_analyzer=1; +SELECT inf as x, not(x), not(not(x)), (not(x)) IS NULL SETTINGS enable_analyzer=0; + +SELECT nan as x, not(x), not(not(x)), (not(x)) IS NULL SETTINGS enable_analyzer=1; +SELECT nan as x, not(x), not(not(x)), (not(x)) IS NULL SETTINGS enable_analyzer=0; diff --git a/parser/testdata/02481_aggregation_in_order_plan/ast.json b/parser/testdata/02481_aggregation_in_order_plan/ast.json new file mode 100644 index 000000000..3ceffbf9e --- /dev/null +++ b/parser/testdata/02481_aggregation_in_order_plan/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tab (children 1)" + }, + { + "explain": " Identifier tab" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001462949, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/02481_aggregation_in_order_plan/metadata.json b/parser/testdata/02481_aggregation_in_order_plan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02481_aggregation_in_order_plan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02481_aggregation_in_order_plan/query.sql b/parser/testdata/02481_aggregation_in_order_plan/query.sql new file mode 100644 index 000000000..139e0ed4b --- /dev/null +++ b/parser/testdata/02481_aggregation_in_order_plan/query.sql @@ -0,0 +1,9 @@ +drop table if exists tab; +create table tab (a Int32, b Int32, c Int32, d Int32) engine = MergeTree order by (a, b, c); + +insert into tab select 0, number % 3, 2 - intDiv(number, 3), (number % 3 + 1) * 10 from numbers(6); +insert into tab select 0, number % 3, 2 - intDiv(number, 3), (number % 3 + 1) * 100 from numbers(6); + +select a, any(b), c, d from tab where b = 1 group by a, c, d order by c, d settings optimize_aggregation_in_order=1, query_plan_aggregation_in_order=1; +select * from (explain actions = 1, sorting=1 select a, any(b), c, d from tab where b = 1 group by a, c, d settings optimize_aggregation_in_order=1, query_plan_aggregation_in_order=1) where explain like '%ReadFromMergeTree%' or explain like '%Aggregating%' or explain like '%Order:%' settings enable_analyzer=0; +select * from (explain actions = 1, sorting=1 select a, any(b), c, d from tab where b = 1 group by a, c, d settings optimize_aggregation_in_order=1, query_plan_aggregation_in_order=1) where explain like '%ReadFromMergeTree%' or explain like '%Aggregating%' or explain like '%Order:%' settings enable_analyzer=1; diff --git a/parser/testdata/02481_analyzer_join_alias_unknown_identifier_crash/ast.json b/parser/testdata/02481_analyzer_join_alias_unknown_identifier_crash/ast.json new file mode 100644 index 000000000..ec4b75fc9 --- /dev/null +++ b/parser/testdata/02481_analyzer_join_alias_unknown_identifier_crash/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001159407, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02481_analyzer_join_alias_unknown_identifier_crash/metadata.json b/parser/testdata/02481_analyzer_join_alias_unknown_identifier_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02481_analyzer_join_alias_unknown_identifier_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02481_analyzer_join_alias_unknown_identifier_crash/query.sql b/parser/testdata/02481_analyzer_join_alias_unknown_identifier_crash/query.sql new file mode 100644 index 000000000..8b5b272f5 --- /dev/null +++ b/parser/testdata/02481_analyzer_join_alias_unknown_identifier_crash/query.sql @@ -0,0 +1,36 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS test_table_join_1; +CREATE TABLE test_table_join_1 +( + id UInt8, + value String +) +ENGINE = TinyLog; + +INSERT INTO test_table_join_1 VALUES (0, 'Value_0'); + +DROP TABLE IF EXISTS test_table_join_2; +CREATE TABLE test_table_join_2 +( + id UInt16, + value String +) +ENGINE = TinyLog; + +INSERT INTO test_table_join_2 VALUES (0, 'Value_1'); + +SELECT + toTypeName(t2_value), + t2.value AS t2_value +FROM test_table_join_1 AS t1 +INNER JOIN test_table_join_2 USING (id); -- { serverError UNKNOWN_IDENTIFIER }; + +SELECT + toTypeName(t2_value), + t2.value AS t2_value +FROM test_table_join_1 AS t1 +INNER JOIN test_table_join_2 AS t2 USING (id); + +DROP TABLE test_table_join_1; +DROP TABLE test_table_join_2; diff --git a/parser/testdata/02481_analyzer_optimize_aggregation_arithmetics/ast.json b/parser/testdata/02481_analyzer_optimize_aggregation_arithmetics/ast.json new file mode 100644 index 000000000..fe224023f --- /dev/null +++ b/parser/testdata/02481_analyzer_optimize_aggregation_arithmetics/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001901217, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02481_analyzer_optimize_aggregation_arithmetics/metadata.json b/parser/testdata/02481_analyzer_optimize_aggregation_arithmetics/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02481_analyzer_optimize_aggregation_arithmetics/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02481_analyzer_optimize_aggregation_arithmetics/query.sql b/parser/testdata/02481_analyzer_optimize_aggregation_arithmetics/query.sql new file mode 100644 index 000000000..e68de0af5 --- /dev/null +++ b/parser/testdata/02481_analyzer_optimize_aggregation_arithmetics/query.sql @@ -0,0 +1,14 @@ +SET enable_analyzer = 1; +SET optimize_arithmetic_operations_in_aggregate_functions = 1; + +-- { echoOn } + +EXPLAIN QUERY TREE SELECT avg(log(2) * number) FROM numbers(10); + +EXPLAIN QUERY TREE SELECT avg(number * log(2)) FROM numbers(10); + +SELECT round(avg(log(2) * number), 6) AS k FROM numbers(10000000) GROUP BY number % 3, number % 2; + +SELECT round(avg(number * log(2)), 6) AS k FROM numbers(10000000) GROUP BY number % 3, number % 2; + +-- { echoOff } diff --git a/parser/testdata/02481_analyzer_optimize_grouping_sets_keys/ast.json b/parser/testdata/02481_analyzer_optimize_grouping_sets_keys/ast.json new file mode 100644 index 000000000..9d73097ce --- /dev/null +++ b/parser/testdata/02481_analyzer_optimize_grouping_sets_keys/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001339151, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02481_analyzer_optimize_grouping_sets_keys/metadata.json b/parser/testdata/02481_analyzer_optimize_grouping_sets_keys/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02481_analyzer_optimize_grouping_sets_keys/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02481_analyzer_optimize_grouping_sets_keys/query.sql b/parser/testdata/02481_analyzer_optimize_grouping_sets_keys/query.sql new file mode 100644 index 000000000..8e6b132f5 --- /dev/null +++ b/parser/testdata/02481_analyzer_optimize_grouping_sets_keys/query.sql @@ -0,0 +1,29 @@ +set enable_analyzer = 1; +set optimize_syntax_fuse_functions = 0; + +EXPLAIN QUERY TREE run_passes=1 +SELECT avg(log(2) * number) AS k FROM numbers(10000000) +GROUP BY GROUPING SETS (((number % 2) * (number % 3)), number % 3, number % 2) +HAVING avg(log(2) * number) > 3465735.3 +ORDER BY k; + +EXPLAIN QUERY TREE run_passes=1 +SELECT avg(log(2) * number) AS k FROM numbers(10000000) +GROUP BY GROUPING SETS (((number % 2) * (number % 3), number % 3, number % 2), (number % 4)) +HAVING avg(log(2) * number) > 3465735.3 +ORDER BY k; + +EXPLAIN QUERY TREE run_passes=1 +SELECT avg(log(2) * number) AS k FROM numbers(10000000) +GROUP BY GROUPING SETS (((number % 2) * (number % 3), number % 3), (number % 2)) +HAVING avg(log(2) * number) > 3465735.3 +ORDER BY k; + +EXPLAIN QUERY TREE run_passes=1 +SELECT count() FROM numbers(1000) +GROUP BY GROUPING SETS + ( + (number, number + 1, number +2), + (number % 2, number % 3), + (number / 2, number / 3) + ); diff --git a/parser/testdata/02481_array_join_with_map/ast.json b/parser/testdata/02481_array_join_with_map/ast.json new file mode 100644 index 000000000..0b5388ea2 --- /dev/null +++ b/parser/testdata/02481_array_join_with_map/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery arrays_test (children 1)" + }, + { + "explain": " Identifier arrays_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00139459, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/02481_array_join_with_map/metadata.json b/parser/testdata/02481_array_join_with_map/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02481_array_join_with_map/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02481_array_join_with_map/query.sql b/parser/testdata/02481_array_join_with_map/query.sql new file mode 100644 index 000000000..564b99e6e --- /dev/null +++ b/parser/testdata/02481_array_join_with_map/query.sql @@ -0,0 +1,25 @@ +DROP TABLE IF EXISTS arrays_test; + +CREATE TABLE arrays_test +( + s String, + arr1 Array(UInt8), + map1 Map(UInt8, String), + map2 Map(UInt8, String) +) ENGINE = Memory; + +INSERT INTO arrays_test +VALUES ('Hello', [1,2], map(1, '1', 2, '2'), map(1, '1')), ('World', [3,4,5], map(3, '3', 4, '4', 5, '5'), map(3, '3', 4, '4')), ('Goodbye', [], map(), map()); + + +select s, arr1, map1 from arrays_test array join arr1, map1 settings enable_unaligned_array_join = 1; + +select s, arr1, map1 from arrays_test left array join arr1, map1 settings enable_unaligned_array_join = 1; + +select s, map1 from arrays_test array join map1; + +select s, map1 from arrays_test left array join map1; + +select s, map1, map2 from arrays_test array join map1, map2 settings enable_unaligned_array_join = 1; + +select s, map1, map2 from arrays_test left array join map1, map2 settings enable_unaligned_array_join = 1; diff --git a/parser/testdata/02481_default_value_used_in_row_level_filter/ast.json b/parser/testdata/02481_default_value_used_in_row_level_filter/ast.json new file mode 100644 index 000000000..86e073b31 --- /dev/null +++ b/parser/testdata/02481_default_value_used_in_row_level_filter/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_rlp (children 1)" + }, + { + "explain": " Identifier test_rlp" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001178967, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/02481_default_value_used_in_row_level_filter/metadata.json b/parser/testdata/02481_default_value_used_in_row_level_filter/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02481_default_value_used_in_row_level_filter/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02481_default_value_used_in_row_level_filter/query.sql b/parser/testdata/02481_default_value_used_in_row_level_filter/query.sql new file mode 100644 index 000000000..ce1662699 --- /dev/null +++ b/parser/testdata/02481_default_value_used_in_row_level_filter/query.sql @@ -0,0 +1,25 @@ +DROP TABLE IF EXISTS test_rlp; + +CREATE TABLE test_rlp (a Int32, b Int32) ENGINE=MergeTree() ORDER BY a SETTINGS index_granularity=5, index_granularity_bytes = '10Mi'; + +INSERT INTO test_rlp SELECT number, number FROM numbers(15); + +ALTER TABLE test_rlp ADD COLUMN c Int32 DEFAULT b+10; + +-- { echoOn } + +SELECT a, c FROM test_rlp WHERE c%2 == 0 AND b < 5; + +DROP POLICY IF EXISTS test_rlp_policy ON test_rlp; + +CREATE ROW POLICY test_rlp_policy ON test_rlp FOR SELECT USING c%2 == 0 TO default; + +SELECT a, c FROM test_rlp WHERE b < 5 SETTINGS optimize_move_to_prewhere = 0; + +SELECT a, c FROM test_rlp PREWHERE b < 5; + +-- { echoOff } + +DROP POLICY test_rlp_policy ON test_rlp; + +DROP TABLE test_rlp; diff --git a/parser/testdata/02481_fix_parameters_parsing/ast.json b/parser/testdata/02481_fix_parameters_parsing/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02481_fix_parameters_parsing/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02481_fix_parameters_parsing/metadata.json b/parser/testdata/02481_fix_parameters_parsing/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02481_fix_parameters_parsing/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02481_fix_parameters_parsing/query.sql b/parser/testdata/02481_fix_parameters_parsing/query.sql new file mode 100644 index 000000000..6164ec777 --- /dev/null +++ b/parser/testdata/02481_fix_parameters_parsing/query.sql @@ -0,0 +1,2 @@ +SELECT func(1)(2)(3); -- { clientError SYNTAX_ERROR } +SELECT * FROM VALUES(1)(2); -- { clientError SYNTAX_ERROR } diff --git a/parser/testdata/02481_i43247_ubsan_in_minmaxany/ast.json b/parser/testdata/02481_i43247_ubsan_in_minmaxany/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02481_i43247_ubsan_in_minmaxany/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02481_i43247_ubsan_in_minmaxany/metadata.json b/parser/testdata/02481_i43247_ubsan_in_minmaxany/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02481_i43247_ubsan_in_minmaxany/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02481_i43247_ubsan_in_minmaxany/query.sql b/parser/testdata/02481_i43247_ubsan_in_minmaxany/query.sql new file mode 100644 index 000000000..c893e49fe --- /dev/null +++ b/parser/testdata/02481_i43247_ubsan_in_minmaxany/query.sql @@ -0,0 +1,7 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/43247 +SELECT finalizeAggregation(CAST('AggregateFunction(categoricalInformationValue, Nullable(UInt8), UInt8)AggregateFunction(categoricalInformationValue, Nullable(UInt8), UInt8)', + 'AggregateFunction(min, String)')); -- { serverError CANNOT_READ_ALL_DATA } + +-- Value from hex(minState('0123456789012345678901234567890123456789012345678901234567890123')). Size 63 + 1 (64) +SELECT finalizeAggregation(CAST(unhex('4000000030313233343536373839303132333435363738393031323334353637383930313233343536373839303132333435363738393031323334353637383930313233'), + 'AggregateFunction(min, String)')); diff --git a/parser/testdata/02481_inject_random_order_for_select_without_order_by/ast.json b/parser/testdata/02481_inject_random_order_for_select_without_order_by/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02481_inject_random_order_for_select_without_order_by/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02481_inject_random_order_for_select_without_order_by/metadata.json b/parser/testdata/02481_inject_random_order_for_select_without_order_by/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02481_inject_random_order_for_select_without_order_by/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02481_inject_random_order_for_select_without_order_by/query.sql b/parser/testdata/02481_inject_random_order_for_select_without_order_by/query.sql new file mode 100644 index 000000000..ac8776026 --- /dev/null +++ b/parser/testdata/02481_inject_random_order_for_select_without_order_by/query.sql @@ -0,0 +1,39 @@ +-- A test for setting `inject_random_order_for_select_without_order_by` + +-- The setting is disabled by default, enable it for the test. +SET inject_random_order_for_select_without_order_by = 1; + +-- Works only with enabled analyzer +SET enable_analyzer = 1; + +-- If enabled, `ORDER BY rand()` is injected into the query plan. +-- We can not test the query result directly (because of randomization), we test the presence +-- or absence of sorting operators in the query plan. + +SELECT 'Simple SELECT: expect a Sorting step injected'; +SELECT count() +FROM (EXPLAIN PLAN SELECT number FROM numbers(5)) +WHERE explain LIKE '%Sorting%'; + +SELECT 'Simple SELECT with ORDER BY: no 2nd ORDER BY injected'; +SELECT count() +FROM (EXPLAIN PLAN SELECT number FROM numbers(5) ORDER BY number) +WHERE explain LIKE '%Sorting%'; + +SELECT 'UNION: expect ORDER BY injected into each child'; +SELECT count() +FROM ( + EXPLAIN PLAN + SELECT number FROM numbers(5) + UNION ALL + SELECT number FROM numbers(5) +) +WHERE explain LIKE '%Sorting%'; + +-- Now disable the setting +SET inject_random_order_for_select_without_order_by = 0; + +SELECT 'Simple SELECT: no ORDER BY injected'; +SELECT count() +FROM (EXPLAIN PLAN SELECT number FROM numbers(5)) +WHERE explain LIKE '%Sorting%'; diff --git a/parser/testdata/02481_low_cardinality_with_short_circuit_functins/ast.json b/parser/testdata/02481_low_cardinality_with_short_circuit_functins/ast.json new file mode 100644 index 000000000..6ee957d79 --- /dev/null +++ b/parser/testdata/02481_low_cardinality_with_short_circuit_functins/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001290818, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02481_low_cardinality_with_short_circuit_functins/metadata.json b/parser/testdata/02481_low_cardinality_with_short_circuit_functins/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02481_low_cardinality_with_short_circuit_functins/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02481_low_cardinality_with_short_circuit_functins/query.sql b/parser/testdata/02481_low_cardinality_with_short_circuit_functins/query.sql new file mode 100644 index 000000000..6f33db6aa --- /dev/null +++ b/parser/testdata/02481_low_cardinality_with_short_circuit_functins/query.sql @@ -0,0 +1,26 @@ +set short_circuit_function_evaluation='force_enable'; + +select 'if with one LC argument'; +select if(0, toLowCardinality('a'), 'b'); +select if(1, toLowCardinality('a'), 'b'); +select if(materialize(0), materialize(toLowCardinality('a')), materialize('b')); +select if(number % 2, toLowCardinality('a'), 'b') from numbers(2); +select if(number % 2, materialize(toLowCardinality('a')), materialize('b')) from numbers(2); + +select 'if with LC and NULL arguments'; +select if(0, toLowCardinality('a'), NULL); +select if(1, toLowCardinality('a'), NULL); +select if(materialize(0), materialize(toLowCardinality('a')), NULL); +select if(number % 2, toLowCardinality('a'), NULL) from numbers(2); +select if(number % 2, materialize(toLowCardinality('a')), NULL) from numbers(2); + +select 'if with two LC arguments'; +select if(0, toLowCardinality('a'), toLowCardinality('b')); +select if(1, toLowCardinality('a'), toLowCardinality('b')); +select if(materialize(0), materialize(toLowCardinality('a')), materialize(toLowCardinality('b'))); +select if(number % 2, toLowCardinality('a'), toLowCardinality('b')) from numbers(2); +select if(number % 2, materialize(toLowCardinality('a')), materialize(toLowCardinality('a'))) from numbers(2); + +select if(number % 2, toLowCardinality(number), NULL) from numbers(2); +select if(number % 2, toLowCardinality(number), toLowCardinality(number + 1)) from numbers(2); + diff --git a/parser/testdata/02481_low_cardinality_with_short_circuit_functins_mutations/ast.json b/parser/testdata/02481_low_cardinality_with_short_circuit_functins_mutations/ast.json new file mode 100644 index 000000000..a3b794a96 --- /dev/null +++ b/parser/testdata/02481_low_cardinality_with_short_circuit_functins_mutations/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery issue_46128 (children 1)" + }, + { + "explain": " Identifier issue_46128" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00108051, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/02481_low_cardinality_with_short_circuit_functins_mutations/metadata.json b/parser/testdata/02481_low_cardinality_with_short_circuit_functins_mutations/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02481_low_cardinality_with_short_circuit_functins_mutations/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02481_low_cardinality_with_short_circuit_functins_mutations/query.sql b/parser/testdata/02481_low_cardinality_with_short_circuit_functins_mutations/query.sql new file mode 100644 index 000000000..9d183dde9 --- /dev/null +++ b/parser/testdata/02481_low_cardinality_with_short_circuit_functins_mutations/query.sql @@ -0,0 +1,14 @@ +drop table if exists issue_46128; + +create table issue_46128 ( + id Int64, + a LowCardinality(Nullable(String)), + b LowCardinality(Nullable(String)) +) Engine = MergeTree order by id +as select number%100, 'xxxx', 'yyyy' from numbers(10); + +ALTER TABLE issue_46128 UPDATE a = b WHERE id= 1 settings mutations_sync=2; + +select * from issue_46128 where id <= 2 order by id; + +drop table issue_46128; diff --git a/parser/testdata/02481_merge_array_join_sample_by/ast.json b/parser/testdata/02481_merge_array_join_sample_by/ast.json new file mode 100644 index 000000000..2c17191e3 --- /dev/null +++ b/parser/testdata/02481_merge_array_join_sample_by/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery 02481_mergetree (children 1)" + }, + { + "explain": " Identifier 02481_mergetree" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001220653, + "rows_read": 2, + "bytes_read": 82 + } +} diff --git a/parser/testdata/02481_merge_array_join_sample_by/metadata.json b/parser/testdata/02481_merge_array_join_sample_by/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02481_merge_array_join_sample_by/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02481_merge_array_join_sample_by/query.sql b/parser/testdata/02481_merge_array_join_sample_by/query.sql new file mode 100644 index 000000000..1c2123a99 --- /dev/null +++ b/parser/testdata/02481_merge_array_join_sample_by/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS 02481_mergetree; +DROP TABLE IF EXISTS 02481_merge; + +CREATE TABLE 02481_mergetree(x UInt64, y UInt64, arr Array(String)) ENGINE = MergeTree ORDER BY x SAMPLE BY x SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +CREATE TABLE 02481_merge(x UInt64, y UInt64, arr Array(String)) ENGINE = Merge(currentDatabase(), '^(02481_mergetree)$'); + +INSERT INTO 02481_mergetree SELECT number, number + 1, [1,2] FROM system.numbers LIMIT 100000; + +SELECT count() FROM 02481_mergetree SAMPLE 1 / 2 ARRAY JOIN arr WHERE x != 0; +SELECT count() FROM 02481_merge SAMPLE 1 / 2 ARRAY JOIN arr WHERE x != 0; + +DROP TABLE 02481_mergetree; +DROP TABLE 02481_merge; diff --git a/parser/testdata/02481_pk_analysis_with_enum_to_string/ast.json b/parser/testdata/02481_pk_analysis_with_enum_to_string/ast.json new file mode 100644 index 000000000..9af4af27d --- /dev/null +++ b/parser/testdata/02481_pk_analysis_with_enum_to_string/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery gen (children 1)" + }, + { + "explain": " Identifier gen" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001080403, + "rows_read": 2, + "bytes_read": 59 + } +} diff --git a/parser/testdata/02481_pk_analysis_with_enum_to_string/metadata.json b/parser/testdata/02481_pk_analysis_with_enum_to_string/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02481_pk_analysis_with_enum_to_string/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02481_pk_analysis_with_enum_to_string/query.sql b/parser/testdata/02481_pk_analysis_with_enum_to_string/query.sql new file mode 100644 index 000000000..021a55ef2 --- /dev/null +++ b/parser/testdata/02481_pk_analysis_with_enum_to_string/query.sql @@ -0,0 +1,23 @@ +CREATE TABLE gen +( + repo_name String, + event_type Enum8('CommitCommentEvent' = 1, 'CreateEvent' = 2, 'DeleteEvent' = 3, 'ForkEvent' = 4, 'GollumEvent' = 5, 'IssueCommentEvent' = 6, 'IssuesEvent' = 7, 'MemberEvent' = 8, 'PublicEvent' = 9, 'PullRequestEvent' = 10, 'PullRequestReviewCommentEvent' = 11, 'PushEvent' = 12, 'ReleaseEvent' = 13, 'SponsorshipEvent' = 14, 'WatchEvent' = 15, 'GistEvent' = 16, 'FollowEvent' = 17, 'DownloadEvent' = 18, 'PullRequestReviewEvent' = 19, 'ForkApplyEvent' = 20, 'Event' = 21, 'TeamAddEvent' = 22), + actor_login String, + created_at DateTime, + action Enum8('none' = 0, 'created' = 1, 'added' = 2, 'edited' = 3, 'deleted' = 4, 'opened' = 5, 'closed' = 6, 'reopened' = 7, 'assigned' = 8, 'unassigned' = 9, 'labeled' = 10, 'unlabeled' = 11, 'review_requested' = 12, 'review_request_removed' = 13, 'synchronize' = 14, 'started' = 15, 'published' = 16, 'update' = 17, 'create' = 18, 'fork' = 19, 'merged' = 20), + number UInt32, + merged_at DateTime +) +ENGINE = GenerateRandom; + +CREATE TABLE github_events AS gen ENGINE=MergeTree ORDER BY (event_type, repo_name, created_at) SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +INSERT INTO github_events SELECT * FROM gen LIMIT 100000; + +INSERT INTO github_events VALUES ('apache/pulsar','PullRequestEvent','hangc0276','2021-01-22 06:58:03','opened',9276,'1970-01-01 00:00:00') ('apache/pulsar','PullRequestEvent','hangc0276','2021-01-25 02:38:07','closed',9276,'1970-01-01 00:00:00') ('apache/pulsar','PullRequestEvent','hangc0276','2021-01-25 02:38:09','reopened',9276,'1970-01-01 00:00:00') ('apache/pulsar','PullRequestEvent','hangc0276','2021-04-22 06:05:09','closed',9276,'2021-04-22 06:05:08') ('apache/pulsar','IssueCommentEvent','hangc0276','2021-01-23 00:32:09','created',9276,'1970-01-01 00:00:00') ('apache/pulsar','IssueCommentEvent','hangc0276','2021-01-23 02:52:11','created',9276,'1970-01-01 00:00:00') ('apache/pulsar','IssueCommentEvent','hangc0276','2021-01-24 03:02:31','created',9276,'1970-01-01 00:00:00') ('apache/pulsar','IssueCommentEvent','hangc0276','2021-01-25 02:16:42','created',9276,'1970-01-01 00:00:00') ('apache/pulsar','IssueCommentEvent','hangc0276','2021-01-26 06:52:42','created',9276,'1970-01-01 00:00:00') ('apache/pulsar','IssueCommentEvent','hangc0276','2021-01-27 01:10:33','created',9276,'1970-01-01 00:00:00') ('apache/pulsar','IssueCommentEvent','hangc0276','2021-01-29 02:11:41','created',9276,'1970-01-01 00:00:00') ('apache/pulsar','IssueCommentEvent','hangc0276','2021-02-02 07:35:40','created',9276,'1970-01-01 00:00:00') ('apache/pulsar','IssueCommentEvent','hangc0276','2021-02-03 00:44:26','created',9276,'1970-01-01 00:00:00') ('apache/pulsar','IssueCommentEvent','hangc0276','2021-02-03 02:14:26','created',9276,'1970-01-01 00:00:00') ('apache/pulsar','PullRequestReviewEvent','codelipenghui','2021-03-29 14:31:25','created',9276,'1970-01-01 00:00:00') ('apache/pulsar','PullRequestReviewEvent','eolivelli','2021-03-29 16:34:02','created',9276,'1970-01-01 00:00:00'); + +OPTIMIZE TABLE github_events FINAL; + +SELECT count() +FROM github_events +WHERE (repo_name = 'apache/pulsar') AND (toString(event_type) IN ('PullRequestEvent', 'PullRequestReviewCommentEvent', 'PullRequestReviewEvent', 'IssueCommentEvent')) AND (actor_login NOT IN ('github-actions[bot]', 'codecov-commenter')) AND (number = 9276); diff --git a/parser/testdata/02481_prewhere_filtered_rows_div_by_zero/ast.json b/parser/testdata/02481_prewhere_filtered_rows_div_by_zero/ast.json new file mode 100644 index 000000000..54c815968 --- /dev/null +++ b/parser/testdata/02481_prewhere_filtered_rows_div_by_zero/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_filter (children 1)" + }, + { + "explain": " Identifier test_filter" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001096774, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/02481_prewhere_filtered_rows_div_by_zero/metadata.json b/parser/testdata/02481_prewhere_filtered_rows_div_by_zero/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02481_prewhere_filtered_rows_div_by_zero/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02481_prewhere_filtered_rows_div_by_zero/query.sql b/parser/testdata/02481_prewhere_filtered_rows_div_by_zero/query.sql new file mode 100644 index 000000000..e3ceda156 --- /dev/null +++ b/parser/testdata/02481_prewhere_filtered_rows_div_by_zero/query.sql @@ -0,0 +1,28 @@ +DROP TABLE IF EXISTS test_filter; + +-- { echoOn } +CREATE TABLE test_filter(a Int32, b Int32, c Int32) ENGINE = MergeTree() ORDER BY a SETTINGS index_granularity = 3, index_granularity_bytes = '10Mi'; + +INSERT INTO test_filter SELECT number, number+1, (number/2 + 1) % 2 FROM numbers(15); + +SELECT _part_offset, intDiv(_part_offset, 3) as granule, * FROM test_filter ORDER BY _part_offset; + +-- Check that division by zero occurs on some rows +SELECT intDiv(b, c) FROM test_filter; -- { serverError ILLEGAL_DIVISION } +-- Filter out those rows using WHERE or PREWHERE +SELECT intDiv(b, c) FROM test_filter WHERE c != 0; +SELECT intDiv(b, c) FROM test_filter PREWHERE c != 0; +SELECT intDiv(b, c) FROM test_filter PREWHERE c != 0 WHERE b%2 != 0; + + +SET mutations_sync = 2; + +-- Delete all rows where division by zero could occur +DELETE FROM test_filter WHERE c = 0; +-- Test that now division by zero doesn't occur without explicit condition +SELECT intDiv(b, c) FROM test_filter; +SELECT * FROM test_filter PREWHERE intDiv(b, c) > 0; +SELECT * FROM test_filter PREWHERE b != 0 WHERE intDiv(b, c) > 0; + +-- { echoOff } +DROP TABLE test_filter; diff --git a/parser/testdata/02481_s3_throw_if_mismatch_files/ast.json b/parser/testdata/02481_s3_throw_if_mismatch_files/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02481_s3_throw_if_mismatch_files/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02481_s3_throw_if_mismatch_files/metadata.json b/parser/testdata/02481_s3_throw_if_mismatch_files/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02481_s3_throw_if_mismatch_files/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02481_s3_throw_if_mismatch_files/query.sql b/parser/testdata/02481_s3_throw_if_mismatch_files/query.sql new file mode 100644 index 000000000..7ec1d3ebd --- /dev/null +++ b/parser/testdata/02481_s3_throw_if_mismatch_files/query.sql @@ -0,0 +1,12 @@ +-- Tags: no-parallel, no-fasttest +-- Tag no-fasttest: Depends on AWS + +-- { echo } +drop table if exists test_02481_mismatch_files; +create table test_02481_mismatch_files (a UInt64, b String) engine = S3(s3_conn, filename='test_02481_mismatch_files_{_partition_id}', format=Parquet) partition by a; +set s3_truncate_on_insert=1; +insert into test_02481_mismatch_files values (1, 'a'), (22, 'b'), (333, 'c'); + +select a, b from s3(s3_conn, filename='test_02481_mismatch_filesxxx*', format=Parquet); -- { serverError CANNOT_EXTRACT_TABLE_STRUCTURE } + +select a, b from s3(s3_conn, filename='test_02481_mismatch_filesxxx*', format=Parquet) settings s3_throw_on_zero_files_match=1; -- { serverError FILE_DOESNT_EXIST } diff --git a/parser/testdata/02481_xxh3_hash_function/ast.json b/parser/testdata/02481_xxh3_hash_function/ast.json new file mode 100644 index 000000000..f6f4794a2 --- /dev/null +++ b/parser/testdata/02481_xxh3_hash_function/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function xxh3 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'ClickHouse'" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001167474, + "rows_read": 7, + "bytes_read": 261 + } +} diff --git a/parser/testdata/02481_xxh3_hash_function/metadata.json b/parser/testdata/02481_xxh3_hash_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02481_xxh3_hash_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02481_xxh3_hash_function/query.sql b/parser/testdata/02481_xxh3_hash_function/query.sql new file mode 100644 index 000000000..cd87f08a6 --- /dev/null +++ b/parser/testdata/02481_xxh3_hash_function/query.sql @@ -0,0 +1 @@ +SELECT xxh3('ClickHouse'); diff --git a/parser/testdata/02482_execute_functions_before_sorting_bug/ast.json b/parser/testdata/02482_execute_functions_before_sorting_bug/ast.json new file mode 100644 index 000000000..1e634f699 --- /dev/null +++ b/parser/testdata/02482_execute_functions_before_sorting_bug/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00120718, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02482_execute_functions_before_sorting_bug/metadata.json b/parser/testdata/02482_execute_functions_before_sorting_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02482_execute_functions_before_sorting_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02482_execute_functions_before_sorting_bug/query.sql b/parser/testdata/02482_execute_functions_before_sorting_bug/query.sql new file mode 100644 index 000000000..f1a17df5f --- /dev/null +++ b/parser/testdata/02482_execute_functions_before_sorting_bug/query.sql @@ -0,0 +1,9 @@ +set allow_suspicious_low_cardinality_types=1; +drop table if exists test; +create table test (x LowCardinality(Int32)) engine=Memory; +insert into test select 1; +insert into test select 2; +select x + 1e10 from test order by 1e10, x; +select x + (1e10 + 1e20) from test order by (1e10 + 1e20), x; +select x + (pow(2, 2) + pow(3, 2)) from test order by (pow(2,2) + pow(3, 2)), x; +drop table test; diff --git a/parser/testdata/02482_if_with_nothing_argument/ast.json b/parser/testdata/02482_if_with_nothing_argument/ast.json new file mode 100644 index 000000000..311532439 --- /dev/null +++ b/parser/testdata/02482_if_with_nothing_argument/ast.json @@ -0,0 +1,70 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function array (alias arr) (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function if (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function empty (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier arr" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Function arrayElement (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier arr" + }, + { + "explain": " Literal Int64_-1" + } + ], + + "rows": 16, + + "statistics": + { + "elapsed": 0.001441586, + "rows_read": 16, + "bytes_read": 598 + } +} diff --git a/parser/testdata/02482_if_with_nothing_argument/metadata.json b/parser/testdata/02482_if_with_nothing_argument/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02482_if_with_nothing_argument/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02482_if_with_nothing_argument/query.sql b/parser/testdata/02482_if_with_nothing_argument/query.sql new file mode 100644 index 000000000..af46ef30d --- /dev/null +++ b/parser/testdata/02482_if_with_nothing_argument/query.sql @@ -0,0 +1,3 @@ +select [] as arr, if(empty(arr), 0, arr[-1]); +select [] as arr, multiIf(empty(arr), 0, length(arr) > 1, arr[-1], 0); + diff --git a/parser/testdata/02482_insert_into_dist_race/ast.json b/parser/testdata/02482_insert_into_dist_race/ast.json new file mode 100644 index 000000000..b5521a0cc --- /dev/null +++ b/parser/testdata/02482_insert_into_dist_race/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tmp_02482 (children 1)" + }, + { + "explain": " Identifier tmp_02482" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001629763, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/02482_insert_into_dist_race/metadata.json b/parser/testdata/02482_insert_into_dist_race/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02482_insert_into_dist_race/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02482_insert_into_dist_race/query.sql b/parser/testdata/02482_insert_into_dist_race/query.sql new file mode 100644 index 000000000..fc7896b16 --- /dev/null +++ b/parser/testdata/02482_insert_into_dist_race/query.sql @@ -0,0 +1,26 @@ +DROP TABLE IF EXISTS tmp_02482; +DROP TABLE IF EXISTS dist_02482; + +-- This test produces warning +SET send_logs_level = 'error'; +SET prefer_localhost_replica=0; + +CREATE TABLE tmp_02482 (i UInt64, n LowCardinality(String)) ENGINE = Memory; +CREATE TABLE dist_02482(i UInt64, n LowCardinality(Nullable(String))) ENGINE = Distributed(test_cluster_two_shards, currentDatabase(), tmp_02482, i); + +SET distributed_foreground_insert=1; + +INSERT INTO dist_02482 VALUES (1, '1'), (2, '2'); +INSERT INTO dist_02482 SELECT number, number FROM numbers(1000); + +SET distributed_foreground_insert=0; + +SYSTEM STOP DISTRIBUTED SENDS dist_02482; + +INSERT INTO dist_02482 VALUES (1, '1'),(2, '2'); +INSERT INTO dist_02482 SELECT number, number FROM numbers(1000); + +SYSTEM FLUSH DISTRIBUTED dist_02482; + +DROP TABLE tmp_02482; +DROP TABLE dist_02482; diff --git a/parser/testdata/02482_value_block_assert/ast.json b/parser/testdata/02482_value_block_assert/ast.json new file mode 100644 index 000000000..fa8e9c12f --- /dev/null +++ b/parser/testdata/02482_value_block_assert/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001672206, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02482_value_block_assert/metadata.json b/parser/testdata/02482_value_block_assert/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02482_value_block_assert/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02482_value_block_assert/query.sql b/parser/testdata/02482_value_block_assert/query.sql new file mode 100644 index 000000000..339fd9d03 --- /dev/null +++ b/parser/testdata/02482_value_block_assert/query.sql @@ -0,0 +1,24 @@ +SET allow_suspicious_low_cardinality_types=1; +CREATE TABLE range_key_dictionary_source_table__fuzz_323 +( + `key` UInt256, + `start_date` Int8, + `end_date` LowCardinality(UInt256), + `value` Tuple(UInt8, Array(DateTime), Decimal(9, 1), Array(Int16), Array(UInt8)), + `value_nullable` UUID +) +ENGINE = TinyLog; +INSERT INTO range_key_dictionary_source_table__fuzz_323 FORMAT Values +(1, toDate('2019-05-20'), toDate('2019-05-20'), 'First', 'First'); -- { error CANNOT_PARSE_INPUT_ASSERTION_FAILED } + + +CREATE TABLE complex_key_dictionary_source_table__fuzz_267 +( + `id` Decimal(38, 30), + `id_key` Array(UUID), + `value` Array(Nullable(DateTime64(3))), + `value_nullable` Nullable(UUID) +) +ENGINE = TinyLog; +INSERT INTO complex_key_dictionary_source_table__fuzz_267 FORMAT Values +(1, 'key', 'First', 'First'); -- { error CANNOT_READ_ARRAY_FROM_TEXT } diff --git a/parser/testdata/02483_add_engine_full_column_to_system_databases/ast.json b/parser/testdata/02483_add_engine_full_column_to_system_databases/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02483_add_engine_full_column_to_system_databases/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02483_add_engine_full_column_to_system_databases/metadata.json b/parser/testdata/02483_add_engine_full_column_to_system_databases/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02483_add_engine_full_column_to_system_databases/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02483_add_engine_full_column_to_system_databases/query.sql b/parser/testdata/02483_add_engine_full_column_to_system_databases/query.sql new file mode 100644 index 000000000..66fd3684f --- /dev/null +++ b/parser/testdata/02483_add_engine_full_column_to_system_databases/query.sql @@ -0,0 +1,3 @@ +DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}; +CREATE DATABASE IF NOT EXISTS {CLICKHOUSE_DATABASE:Identifier} ENGINE = Replicated('some/path/' || currentDatabase() || '/replicated_database_test', 'shard_1', 'replica_1') SETTINGS max_broken_tables_ratio=1; +SELECT engine_full FROM system.databases WHERE name = current_database(); diff --git a/parser/testdata/02483_check_virtuals_shile_using_structure_from_insertion_table/ast.json b/parser/testdata/02483_check_virtuals_shile_using_structure_from_insertion_table/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02483_check_virtuals_shile_using_structure_from_insertion_table/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02483_check_virtuals_shile_using_structure_from_insertion_table/metadata.json b/parser/testdata/02483_check_virtuals_shile_using_structure_from_insertion_table/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02483_check_virtuals_shile_using_structure_from_insertion_table/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02483_check_virtuals_shile_using_structure_from_insertion_table/query.sql b/parser/testdata/02483_check_virtuals_shile_using_structure_from_insertion_table/query.sql new file mode 100644 index 000000000..2a0e5e749 --- /dev/null +++ b/parser/testdata/02483_check_virtuals_shile_using_structure_from_insertion_table/query.sql @@ -0,0 +1,9 @@ +-- Tags: no-parallel + +drop table if exists test; +create table test (line String, _file String, _path String) engine=Memory; +insert into function file(02483_data.LineAsString) select 'Hello' settings engine_file_truncate_on_insert=1; +set use_structure_from_insertion_table_in_table_functions=2; +insert into test select *, _file, _path from file(02483_data.LineAsString); +select line, _file from test; +drop table test; diff --git a/parser/testdata/02483_cuturlparameter_with_arrays/ast.json b/parser/testdata/02483_cuturlparameter_with_arrays/ast.json new file mode 100644 index 000000000..e3038c62f --- /dev/null +++ b/parser/testdata/02483_cuturlparameter_with_arrays/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001308243, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02483_cuturlparameter_with_arrays/metadata.json b/parser/testdata/02483_cuturlparameter_with_arrays/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02483_cuturlparameter_with_arrays/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02483_cuturlparameter_with_arrays/query.sql b/parser/testdata/02483_cuturlparameter_with_arrays/query.sql new file mode 100644 index 000000000..0cf95d10a --- /dev/null +++ b/parser/testdata/02483_cuturlparameter_with_arrays/query.sql @@ -0,0 +1,64 @@ +SET enable_analyzer = 1; + +-- { echoOn } + +SELECT + cutURLParameter('http://bigmir.net/?a=b&c=d', []), + cutURLParameter('http://bigmir.net/?a=b&c=d', ['a']), + cutURLParameter('http://bigmir.net/?a=b&c=d', ['a', 'c']), + cutURLParameter('http://bigmir.net/?a=b&c=d', ['c']), + cutURLParameter('http://bigmir.net/?a=b&c=d#e=f', ['a', 'e']), + cutURLParameter('http://bigmir.net/?a&c=d#e=f', ['c', 'e']), + cutURLParameter('http://bigmir.net/?a&c=d#e=f', ['e']), + cutURLParameter('http://bigmir.net/?a=b&c=d#e=f&g=h', ['b', 'g']), + cutURLParameter('http://bigmir.net/?a=b&c=d#e', ['a', 'e']), + cutURLParameter('http://bigmir.net/?a=b&c=d#e&g=h', ['c', 'g']), + cutURLParameter('http://bigmir.net/?a=b&c=d#e&g=h', ['e', 'g']), + cutURLParameter('http://bigmir.net/?a=b&c=d#test?e=f&g=h', ['test', 'e']), + cutURLParameter('http://bigmir.net/?a=b&c=d#test?e=f&g=h', ['test', 'g']), + cutURLParameter('//bigmir.net/?a=b&c=d', []), + cutURLParameter('//bigmir.net/?a=b&c=d', ['a']), + cutURLParameter('//bigmir.net/?a=b&c=d', ['a', 'c']), + cutURLParameter('//bigmir.net/?a=b&c=d#e=f', ['a', 'e']), + cutURLParameter('//bigmir.net/?a&c=d#e=f', ['a']), + cutURLParameter('//bigmir.net/?a&c=d#e=f', ['a', 'c']), + cutURLParameter('//bigmir.net/?a&c=d#e=f', ['a', 'e']), + cutURLParameter('//bigmir.net/?a=b&c=d#e=f&g=h', ['c', 'g']), + cutURLParameter('//bigmir.net/?a=b&c=d#e', ['a', 'c']), + cutURLParameter('//bigmir.net/?a=b&c=d#e', ['a', 'e']), + cutURLParameter('//bigmir.net/?a=b&c=d#e&g=h', ['c', 'e']), + cutURLParameter('//bigmir.net/?a=b&c=d#e&g=h', ['e', 'g']), + cutURLParameter('//bigmir.net/?a=b&c=d#test?e=f&g=h', ['test', 'e']), + cutURLParameter('//bigmir.net/?a=b&c=d#test?e=f&g=h', ['test', 'g']) + FORMAT Vertical; + +SELECT + cutURLParameter(materialize('http://bigmir.net/?a=b&c=d'), []), + cutURLParameter(materialize('http://bigmir.net/?a=b&c=d'), ['a']), + cutURLParameter(materialize('http://bigmir.net/?a=b&c=d'), ['a', 'c']), + cutURLParameter(materialize('http://bigmir.net/?a=b&c=d'), ['c']), + cutURLParameter(materialize('http://bigmir.net/?a=b&c=d#e=f'), ['a', 'e']), + cutURLParameter(materialize('http://bigmir.net/?a&c=d#e=f'), ['c', 'e']), + cutURLParameter(materialize('http://bigmir.net/?a&c=d#e=f'), ['e']), + cutURLParameter(materialize('http://bigmir.net/?a=b&c=d#e=f&g=h'), ['b', 'g']), + cutURLParameter(materialize('http://bigmir.net/?a=b&c=d#e'), ['a', 'e']), + cutURLParameter(materialize('http://bigmir.net/?a=b&c=d#e&g=h'), ['c', 'g']), + cutURLParameter(materialize('http://bigmir.net/?a=b&c=d#e&g=h'), ['e', 'g']), + cutURLParameter(materialize('http://bigmir.net/?a=b&c=d#test?e=f&g=h'), ['test', 'e']), + cutURLParameter(materialize('http://bigmir.net/?a=b&c=d#test?e=f&g=h'), ['test', 'g']), + cutURLParameter(materialize('//bigmir.net/?a=b&c=d'), []), + cutURLParameter(materialize('//bigmir.net/?a=b&c=d'), ['a']), + cutURLParameter(materialize('//bigmir.net/?a=b&c=d'), ['a', 'c']), + cutURLParameter(materialize('//bigmir.net/?a=b&c=d#e=f'), ['a', 'e']), + cutURLParameter(materialize('//bigmir.net/?a&c=d#e=f'), ['a']), + cutURLParameter(materialize('//bigmir.net/?a&c=d#e=f'), ['a', 'c']), + cutURLParameter(materialize('//bigmir.net/?a&c=d#e=f'), ['a', 'e']), + cutURLParameter(materialize('//bigmir.net/?a=b&c=d#e=f&g=h'), ['c', 'g']), + cutURLParameter(materialize('//bigmir.net/?a=b&c=d#e'), ['a', 'c']), + cutURLParameter(materialize('//bigmir.net/?a=b&c=d#e'), ['a', 'e']), + cutURLParameter(materialize('//bigmir.net/?a=b&c=d#e&g=h'), ['c', 'e']), + cutURLParameter(materialize('//bigmir.net/?a=b&c=d#e&g=h'), ['e', 'g']), + cutURLParameter(materialize('//bigmir.net/?a=b&c=d#test?e=f&g=h'), ['test', 'e']), + cutURLParameter(materialize('//bigmir.net/?a=b&c=d#test?e=f&g=h'), ['test', 'g']) + FORMAT Vertical; +-- { echoOff } diff --git a/parser/testdata/02483_substitute_udf_create/ast.json b/parser/testdata/02483_substitute_udf_create/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02483_substitute_udf_create/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02483_substitute_udf_create/metadata.json b/parser/testdata/02483_substitute_udf_create/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02483_substitute_udf_create/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02483_substitute_udf_create/query.sql b/parser/testdata/02483_substitute_udf_create/query.sql new file mode 100644 index 000000000..9cfb198cf --- /dev/null +++ b/parser/testdata/02483_substitute_udf_create/query.sql @@ -0,0 +1,31 @@ +-- Tags: no-parallel + +DROP TABLE IF EXISTS 02483_substitute_udf; +DROP FUNCTION IF EXISTS 02483_plusone; +DROP FUNCTION IF EXISTS 02483_plustwo; +DROP FUNCTION IF EXISTS 02483_plusthree; + +-- { echo } +CREATE FUNCTION 02483_plusone AS (a) -> a + 1; +CREATE TABLE 02483_substitute_udf (id UInt32, number UInt32 DEFAULT 02483_plusone(id)) ENGINE=MergeTree() ORDER BY id; +DESC TABLE 02483_substitute_udf; +INSERT INTO 02483_substitute_udf (id, number) VALUES (1, NULL); +SELECT * FROM 02483_substitute_udf ORDER BY id; + +CREATE FUNCTION 02483_plustwo AS (a) -> a + 2; +ALTER TABLE 02483_substitute_udf MODIFY COLUMN number UInt32 DEFAULT 02483_plustwo(id); +DESC TABLE 02483_substitute_udf; +INSERT INTO 02483_substitute_udf (id, number) VALUES (5, NULL); +SELECT * FROM 02483_substitute_udf ORDER BY id; + +CREATE FUNCTION 02483_plusthree AS (a) -> a + 3; +ALTER TABLE 02483_substitute_udf DROP COLUMN number; +ALTER TABLE 02483_substitute_udf ADD COLUMN new_number UInt32 DEFAULT 02483_plusthree(id); +DESC TABLE 02483_substitute_udf; +INSERT INTO 02483_substitute_udf (id, new_number) VALUES (10, NULL); +SELECT * FROM 02483_substitute_udf ORDER BY id; + +DROP TABLE 02483_substitute_udf; +DROP FUNCTION 02483_plusone; +DROP FUNCTION 02483_plustwo; +DROP FUNCTION 02483_plusthree; diff --git a/parser/testdata/02484_substitute_udf_storage_args/ast.json b/parser/testdata/02484_substitute_udf_storage_args/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02484_substitute_udf_storage_args/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02484_substitute_udf_storage_args/metadata.json b/parser/testdata/02484_substitute_udf_storage_args/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02484_substitute_udf_storage_args/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02484_substitute_udf_storage_args/query.sql b/parser/testdata/02484_substitute_udf_storage_args/query.sql new file mode 100644 index 000000000..a39c6009d --- /dev/null +++ b/parser/testdata/02484_substitute_udf_storage_args/query.sql @@ -0,0 +1,37 @@ +-- Tags: no-parallel + +DROP TABLE IF EXISTS 02484_substitute_udf; +DROP FUNCTION IF EXISTS 02484_plusone; +DROP FUNCTION IF EXISTS 02484_plustwo; +DROP FUNCTION IF EXISTS 02484_plusthree; +DROP FUNCTION IF EXISTS 02484_plusthreemonths; +DROP FUNCTION IF EXISTS 02484_plusthreedays; + +CREATE FUNCTION 02484_plusone AS (a) -> a + 1; +CREATE FUNCTION 02484_plustwo AS (a) -> a + 2; +CREATE FUNCTION 02484_plusthreemonths AS (a) -> a + INTERVAL 3 MONTH; + +-- { echo } +CREATE TABLE 02484_substitute_udf (id UInt32, dt DateTime, number UInt32) +ENGINE=MergeTree() +ORDER BY 02484_plusone(id) +PARTITION BY 02484_plustwo(id) +SAMPLE BY 02484_plusone(id) +TTL 02484_plusthreemonths(dt); + +SHOW CREATE TABLE 02484_substitute_udf; + +CREATE FUNCTION 02484_plusthree AS (a) -> a + 3; +ALTER TABLE 02484_substitute_udf ADD COLUMN id2 UInt64, MODIFY ORDER BY (02484_plusone(id), 02484_plusthree(id2)); +SHOW CREATE TABLE 02484_substitute_udf; + +CREATE FUNCTION 02484_plusthreedays AS (a) -> a + INTERVAL 3 DAY; +ALTER TABLE 02484_substitute_udf MODIFY TTL 02484_plusthreedays(dt); +SHOW CREATE TABLE 02484_substitute_udf; + +DROP TABLE 02484_substitute_udf; +DROP FUNCTION 02484_plusone; +DROP FUNCTION 02484_plustwo; +DROP FUNCTION 02484_plusthree; +DROP FUNCTION 02484_plusthreemonths; +DROP FUNCTION 02484_plusthreedays; diff --git a/parser/testdata/02486_truncate_and_unexpected_parts/ast.json b/parser/testdata/02486_truncate_and_unexpected_parts/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02486_truncate_and_unexpected_parts/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02486_truncate_and_unexpected_parts/metadata.json b/parser/testdata/02486_truncate_and_unexpected_parts/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02486_truncate_and_unexpected_parts/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02486_truncate_and_unexpected_parts/query.sql b/parser/testdata/02486_truncate_and_unexpected_parts/query.sql new file mode 100644 index 000000000..507ac7289 --- /dev/null +++ b/parser/testdata/02486_truncate_and_unexpected_parts/query.sql @@ -0,0 +1,69 @@ + +create table rmt (n int) engine=ReplicatedMergeTree('/test/02468/{database}', '1') order by tuple() partition by n % 2 settings replicated_max_ratio_of_wrong_parts=0, max_suspicious_broken_parts=0, max_suspicious_broken_parts_bytes=0; +create table rmt1 (n int) engine=ReplicatedMergeTree('/test/02468/{database}', '2') order by tuple() partition by n % 2 settings replicated_max_ratio_of_wrong_parts=0, max_suspicious_broken_parts=0, max_suspicious_broken_parts_bytes=0; + +system stop cleanup rmt; +system stop merges rmt1; + +insert into rmt select * from numbers(10) settings max_block_size=1, max_insert_threads=1; + +alter table rmt drop partition id '0'; +truncate table rmt1; + +system sync replica rmt; +system sync replica rmt1; + +detach table rmt sync; +detach table rmt1 sync; + +attach table rmt; +attach table rmt1; + +insert into rmt values (1); +insert into rmt1 values (2); +system sync replica rmt; +system sync replica rmt1; + +select *, _table from merge(currentDatabase(), '') order by _table, (*,); +select 0; + +create table rmt2 (n int) engine=ReplicatedMergeTree('/test/02468/{database}2', '1') order by tuple() partition by n % 2 settings replicated_max_ratio_of_wrong_parts=0, max_suspicious_broken_parts=0, max_suspicious_broken_parts_bytes=0; + +system stop cleanup rmt; +system stop merges rmt1; +insert into rmt select * from numbers(10) settings max_block_size=1, max_insert_threads=1; +system sync replica rmt1 lightweight; + +alter table rmt replace partition id '0' from rmt2; +alter table rmt1 move partition id '1' to table rmt2; + +detach table rmt sync; +detach table rmt1 sync; + +attach table rmt; +attach table rmt1; + +insert into rmt values (1); +insert into rmt1 values (2); +system sync replica rmt; +system sync replica rmt1; +system sync replica rmt2; + +select *, _table from merge(currentDatabase(), '') order by _table, (*,); + + +create table rmt3 (n int) engine=ReplicatedMergeTree('/test/02468/{database}3', '1') order by tuple() settings replicated_max_ratio_of_wrong_parts=0, max_suspicious_broken_parts=0, max_suspicious_broken_parts_bytes=0; +set insert_keeper_fault_injection_probability=0; +insert into rmt3 values (1); +insert into rmt3 values (2); +insert into rmt3 values (3); + +system stop cleanup rmt3; +system sync replica rmt3 pull; +alter table rmt3 drop part 'all_1_1_0'; +optimize table rmt3 final; + +detach table rmt3 sync; +attach table rmt3; + +select * from rmt3 order by n; diff --git a/parser/testdata/02487_create_index_normalize_functions/ast.json b/parser/testdata/02487_create_index_normalize_functions/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02487_create_index_normalize_functions/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02487_create_index_normalize_functions/metadata.json b/parser/testdata/02487_create_index_normalize_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02487_create_index_normalize_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02487_create_index_normalize_functions/query.sql b/parser/testdata/02487_create_index_normalize_functions/query.sql new file mode 100644 index 000000000..2155f5d66 --- /dev/null +++ b/parser/testdata/02487_create_index_normalize_functions/query.sql @@ -0,0 +1,6 @@ + +create table rmt (n int, ts DateTime64(8, 'UTC')) engine=ReplicatedMergeTree('/test/02487/{database}/rmt', '1') order by n; +alter table rmt add index idx1 date(ts) TYPE MinMax GRANULARITY 1; +create index idx2 on rmt date(ts) TYPE MinMax GRANULARITY 1; +system restart replica rmt; +create table rmt2 (n int, ts DateTime64(8, 'UTC'), index idx1 date(ts) TYPE MinMax GRANULARITY 1, index idx2 date(ts) TYPE MinMax GRANULARITY 1) engine=ReplicatedMergeTree('/test/02487/{database}/rmt', '2') order by n; diff --git a/parser/testdata/02489_analyzer_indexes/ast.json b/parser/testdata/02489_analyzer_indexes/ast.json new file mode 100644 index 000000000..c5013e7af --- /dev/null +++ b/parser/testdata/02489_analyzer_indexes/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001493588, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02489_analyzer_indexes/metadata.json b/parser/testdata/02489_analyzer_indexes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02489_analyzer_indexes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02489_analyzer_indexes/query.sql b/parser/testdata/02489_analyzer_indexes/query.sql new file mode 100644 index 000000000..dcf18016d --- /dev/null +++ b/parser/testdata/02489_analyzer_indexes/query.sql @@ -0,0 +1,61 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value_1 String, + value_2 String, + value_3 String, + INDEX value_1_idx (value_1) TYPE bloom_filter GRANULARITY 1, + INDEX value_2_idx (value_2) TYPE ngrambf_v1(3, 512, 2, 0) GRANULARITY 1, + INDEX value_3_idx (value_3) TYPE tokenbf_v1(512, 3, 0) GRANULARITY 1 +) ENGINE=MergeTree ORDER BY id; + +INSERT INTO test_table SELECT number, toString(number), toString(number), toString(number) FROM numbers(10); + +SELECT count() FROM test_table WHERE id = 1 SETTINGS force_primary_key = 1; + +SELECT count() FROM test_table WHERE value_1 = '1' SETTINGS force_data_skipping_indices = 'value_1_idx'; + +SELECT count() FROM test_table WHERE id = 1 AND value_1 = '1' SETTINGS force_primary_key = 1, force_data_skipping_indices = 'value_1_idx'; + +SELECT count() FROM test_table WHERE value_2 = '1' SETTINGS force_data_skipping_indices = 'value_2_idx'; + +SELECT count() FROM test_table WHERE value_1 = '1' AND value_2 = '1' SETTINGS force_data_skipping_indices = 'value_1_idx, value_2_idx'; + +SELECT count() FROM test_table WHERE id = 1 AND value_1 = '1' AND value_2 = '1' SETTINGS force_primary_key = 1, force_data_skipping_indices = 'value_1_idx, value_2_idx'; + +SELECT count() FROM test_table WHERE value_3 = '1' SETTINGS force_data_skipping_indices = 'value_3_idx'; + +SELECT count() FROM test_table WHERE id = 1 AND value_3 = '1' SETTINGS force_primary_key = 1, force_data_skipping_indices = 'value_3_idx'; + +SELECT count() FROM test_table WHERE id = 1 AND value_1 = '1' AND value_2 = '1' AND value_3 = '1' +SETTINGS force_primary_key = 1, force_data_skipping_indices = 'value_1_idx, value_2_idx, value_3_idx'; + +SELECT count() FROM test_table AS t1 INNER JOIN (SELECT number AS id FROM numbers(10)) AS t2 ON t1.id = t2.id +WHERE t1.id = 1 SETTINGS force_primary_key = 1; + +SELECT count() FROM test_table AS t1 INNER JOIN (SELECT number AS id FROM numbers(10)) AS t2 ON t1.id = t2.id +WHERE t1.value_1 = '1' SETTINGS force_data_skipping_indices = 'value_1_idx'; + +SELECT count() FROM test_table AS t1 INNER JOIN (SELECT number AS id FROM numbers(10)) AS t2 ON t1.id = t2.id +WHERE t1.id = 1 AND t1.value_1 = '1' SETTINGS force_primary_key = 1, force_data_skipping_indices = 'value_1_idx'; + +SELECT count() FROM test_table AS t1 INNER JOIN (SELECT number AS id FROM numbers(10)) AS t2 ON t1.id = t2.id +WHERE t1.value_2 = '1' SETTINGS force_data_skipping_indices = 'value_2_idx'; + +SELECT count() FROM test_table AS t1 INNER JOIN (SELECT number AS id FROM numbers(10)) AS t2 ON t1.id = t2.id +WHERE t1.value_1 = '1' AND t1.value_2 = '1' SETTINGS force_data_skipping_indices = 'value_1_idx, value_2_idx'; + +SELECT count() FROM test_table AS t1 INNER JOIN (SELECT number AS id FROM numbers(10)) AS t2 ON t1.id = t2.id +WHERE t1.id = 1 AND t1.value_1 = '1' AND t1.value_2 = '1' SETTINGS force_primary_key = 1, force_data_skipping_indices = 'value_1_idx, value_2_idx'; + +SELECT count() FROM test_table AS t1 INNER JOIN (SELECT number AS id FROM numbers(10)) AS t2 ON t1.id = t2.id +WHERE t1.value_3 = '1' SETTINGS force_data_skipping_indices = 'value_3_idx'; + +SELECT count() FROM test_table AS t1 INNER JOIN (SELECT number AS id FROM numbers(10)) AS t2 ON t1.id = t2.id +WHERE t1.id = 1 AND t1.value_1 = '1' AND t1.value_2 = '1' AND t1.value_3 = '1' +SETTINGS force_primary_key = 1, force_data_skipping_indices = 'value_1_idx, value_2_idx, value_3_idx'; + +DROP TABLE test_table; diff --git a/parser/testdata/02490_replacing_merge_tree_is_deleted_column/ast.json b/parser/testdata/02490_replacing_merge_tree_is_deleted_column/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02490_replacing_merge_tree_is_deleted_column/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02490_replacing_merge_tree_is_deleted_column/metadata.json b/parser/testdata/02490_replacing_merge_tree_is_deleted_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02490_replacing_merge_tree_is_deleted_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02490_replacing_merge_tree_is_deleted_column/query.sql b/parser/testdata/02490_replacing_merge_tree_is_deleted_column/query.sql new file mode 100644 index 000000000..80c18ae30 --- /dev/null +++ b/parser/testdata/02490_replacing_merge_tree_is_deleted_column/query.sql @@ -0,0 +1,174 @@ +-- Tags: zookeeper + +-- Settings allow_deprecated_syntax_for_merge_tree prevent to enable the is_deleted column +set allow_deprecated_syntax_for_merge_tree=0; + +-- Test the bahaviour without the is_deleted column +DROP TABLE IF EXISTS test; +CREATE TABLE test (uid String, version UInt32, is_deleted UInt8) ENGINE = ReplacingMergeTree(version) Order by (uid) settings allow_experimental_replacing_merge_with_cleanup=1; +INSERT INTO test (*) VALUES ('d1', 1, 0), ('d2', 1, 0), ('d6', 1, 0), ('d4', 1, 0), ('d6', 2, 1), ('d3', 1, 0), ('d1', 2, 1), ('d5', 1, 0), ('d4', 2, 1), ('d1', 3, 0), ('d1', 4, 1), ('d4', 3, 0), ('d1', 5, 0); +SELECT '== Test SELECT ... FINAL - no is_deleted =='; +select * from test FINAL order by uid; +OPTIMIZE TABLE test FINAL CLEANUP; +select * from test order by uid; + +DROP TABLE IF EXISTS test; +CREATE TABLE test (uid String, version UInt32, is_deleted UInt8) ENGINE = ReplacingMergeTree(version) Order by (uid) SETTINGS clean_deleted_rows='Always', allow_experimental_replacing_merge_with_cleanup=1; +INSERT INTO test (*) VALUES ('d1', 1, 0), ('d2', 1, 0), ('d6', 1, 0), ('d4', 1, 0), ('d6', 2, 1), ('d3', 1, 0), ('d1', 2, 1), ('d5', 1, 0), ('d4', 2, 1), ('d1', 3, 0), ('d1', 4, 1), ('d4', 3, 0), ('d1', 5, 0); +SELECT '== Test SELECT ... FINAL - no is_deleted SETTINGS clean_deleted_rows=Always =='; +select * from test FINAL order by uid; +OPTIMIZE TABLE test FINAL CLEANUP; +select * from test order by uid; + +-- Test the new behaviour +DROP TABLE IF EXISTS test; +CREATE TABLE test (uid String, version UInt32, is_deleted UInt8) ENGINE = ReplacingMergeTree(version, is_deleted) Order by (uid) settings allow_experimental_replacing_merge_with_cleanup=1; +INSERT INTO test (*) VALUES ('d1', 1, 0), ('d2', 1, 0), ('d6', 1, 0), ('d4', 1, 0), ('d6', 2, 1), ('d3', 1, 0), ('d1', 2, 1), ('d5', 1, 0), ('d4', 2, 1), ('d1', 3, 0), ('d1', 4, 1), ('d4', 3, 0), ('d1', 5, 0); +SELECT '== Test SELECT ... FINAL =='; +select * from test FINAL order by uid; +select * from test order by uid; + +SELECT '== Insert backups =='; +INSERT INTO test (*) VALUES ('d6', 1, 0), ('d4', 1, 0), ('d6', 2, 1), ('d3', 1, 0), ('d1', 2, 1), ('d5', 1, 0), ('d4', 2, 1); +select * from test FINAL order by uid; + +SELECT '== Insert a second batch with overlaping data =='; +INSERT INTO test (*) VALUES ('d4', 1, 0), ('d6', 2, 1), ('d3', 1, 0), ('d1', 2, 1), ('d5', 1, 0), ('d4', 2, 1), ('d1', 3, 1), ('d1', 4, 1), ('d4', 3, 0), ('d1', 5, 0), ('d2', 2, 1), ('d2', 3, 0), ('d3', 2, 1), ('d3', 3, 0); +select * from test FINAL order by uid; + +DROP TABLE IF EXISTS test; +CREATE TABLE test (uid String, version UInt32, is_deleted UInt8) ENGINE = ReplacingMergeTree(version, is_deleted) Order by (uid) settings allow_experimental_replacing_merge_with_cleanup=1; + +-- Expect d6 to be version=3 is_deleted=false +INSERT INTO test (*) VALUES ('d1', 1, 0), ('d1', 2, 1), ('d1', 3, 0), ('d1', 4, 1), ('d1', 5, 0), ('d2', 1, 0), ('d3', 1, 0), ('d4', 1, 0), ('d5', 1, 0), ('d6', 1, 0), ('d6', 3, 0); +-- Insert previous version of 'd6' but only v=3 is_deleted=false will remain +INSERT INTO test (*) VALUES ('d1', 1, 0), ('d1', 2, 1), ('d1', 3, 0), ('d1', 4, 1), ('d1', 5, 0), ('d2', 1, 0), ('d3', 1, 0), ('d4', 1, 0), ('d5', 1, 0), ('d6', 1, 0), ('d6', 2, 1); +SELECT '== Only last version remains after OPTIMIZE W/ CLEANUP =='; +OPTIMIZE TABLE test FINAL CLEANUP; +select * from test order by uid; + +-- insert d6 v=3 is_deleted=true (timestamp more recent so this version should be the one take into acount) +INSERT INTO test (*) VALUES ('d1', 1, 0), ('d1', 2, 1), ('d1', 3, 0), ('d1', 4, 1), ('d1', 5, 0), ('d2', 1, 0), ('d3', 1, 0), ('d4', 1, 0), ('d5', 1, 0), ('d6', 1, 0), ('d6', 3, 1); + +SELECT '== OPTIMIZE W/ CLEANUP (remove d6) =='; +OPTIMIZE TABLE test FINAL CLEANUP; +-- No d6 anymore +select * from test order by uid; + +DROP TABLE IF EXISTS test; +CREATE TABLE test (uid String, version UInt32, is_deleted UInt8) ENGINE = ReplacingMergeTree(version, is_deleted) Order by (uid) SETTINGS clean_deleted_rows='Always', allow_experimental_replacing_merge_with_cleanup=1; + +SELECT '== Test of the SETTINGS clean_deleted_rows as Always =='; +INSERT INTO test (*) VALUES ('d1', 1, 0), ('d2', 1, 0), ('d6', 1, 0), ('d4', 1, 0), ('d6', 2, 1), ('d3', 1, 0), ('d1', 2, 1), ('d5', 1, 0), ('d4', 2, 1), ('d1', 3, 0), ('d1', 4, 1), ('d4', 3, 0), ('d1', 5, 0); +-- Even if the setting is set to Always, the SELECT FINAL doesn't delete rows +select * from test FINAL order by uid; +select * from test order by uid; + +OPTIMIZE TABLE test FINAL; +-- d6 has to be removed since we set clean_deleted_rows as 'Always' +select * from test where is_deleted=0 order by uid; + +SELECT '== Test of the SETTINGS clean_deleted_rows as Never =='; +ALTER TABLE test MODIFY SETTING clean_deleted_rows='Never'; +INSERT INTO test (*) VALUES ('d1', 1, 0), ('d2', 1, 0), ('d6', 1, 0), ('d4', 1, 0), ('d6', 2, 1), ('d3', 1, 0), ('d1', 2, 1), ('d5', 1, 0), ('d4', 2, 1), ('d1', 3, 0), ('d1', 4, 1), ('d4', 3, 0), ('d1', 5, 0); +INSERT INTO test (*) VALUES ('d1', 1, 0), ('d2', 1, 0), ('d6', 1, 0), ('d4', 1, 0), ('d6', 2, 1), ('d3', 1, 0), ('d1', 2, 1), ('d5', 1, 0), ('d4', 2, 1), ('d1', 3, 0), ('d1', 4, 1), ('d4', 3, 0), ('d1', 5, 0); +OPTIMIZE TABLE test FINAL; +-- d6 has NOT to be removed since we set clean_deleted_rows as 'Never' +select * from test order by uid; + +DROP TABLE IF EXISTS testCleanupR1; + +CREATE TABLE testCleanupR1 (uid String, version UInt32, is_deleted UInt8) + ENGINE = ReplicatedReplacingMergeTree('/clickhouse/{database}/tables/test_cleanup/', 'r1', version, is_deleted) + ORDER BY uid settings allow_experimental_replacing_merge_with_cleanup=1; + + +INSERT INTO testCleanupR1 (*) VALUES ('d1', 1, 0),('d2', 1, 0),('d3', 1, 0),('d4', 1, 0); +INSERT INTO testCleanupR1 (*) VALUES ('d3', 2, 1); +INSERT INTO testCleanupR1 (*) VALUES ('d1', 2, 1); +SYSTEM SYNC REPLICA testCleanupR1; -- Avoid "Cannot select parts for optimization: Entry for part all_2_2_0 hasn't been read from the replication log yet" + +OPTIMIZE TABLE testCleanupR1 FINAL CLEANUP; + +-- Only d3 to d5 remain +SELECT '== (Replicas) Test optimize =='; +SELECT * FROM testCleanupR1 order by uid; + +------------------------------ + +DROP TABLE IF EXISTS testSettingsR1; + +CREATE TABLE testSettingsR1 (col1 String, version UInt32, is_deleted UInt8) + ENGINE = ReplicatedReplacingMergeTree('/clickhouse/{database}/tables/test_setting/', 'r1', version, is_deleted) + ORDER BY col1 + SETTINGS clean_deleted_rows = 'Always', allow_experimental_replacing_merge_with_cleanup=1; + +INSERT INTO testSettingsR1 (*) VALUES ('c1', 1, 1),('c2', 1, 0),('c3', 1, 1),('c4', 1, 0); +SYSTEM SYNC REPLICA testSettingsR1; -- Avoid "Cannot select parts for optimization: Entry for part all_2_2_0 hasn't been read from the replication log yet" + +OPTIMIZE TABLE testSettingsR1 FINAL; + +-- Only d3 to d5 remain +SELECT '== (Replicas) Test settings =='; +SELECT * FROM testSettingsR1 where is_deleted=0 order by col1; + + +------------------------------ +-- Check errors +DROP TABLE IF EXISTS test; +CREATE TABLE test (uid String, version UInt32, is_deleted UInt8) ENGINE = ReplacingMergeTree(version, is_deleted) Order by (uid) settings allow_experimental_replacing_merge_with_cleanup=1; + +-- is_deleted == 0/1 +INSERT INTO test (*) VALUES ('d1', 1, 2); -- { serverError INCORRECT_DATA } + +DROP TABLE IF EXISTS test; +-- checkis_deleted type +CREATE TABLE test (uid String, version UInt32, is_deleted String) ENGINE = ReplacingMergeTree(version, is_deleted) Order by (uid); -- { serverError BAD_TYPE_OF_FIELD } + +CREATE TABLE test (uid String, version UInt32, is_deleted UInt8) ENGINE = ReplacingMergeTree(version, is_deleted) Order by (uid); +INSERT INTO test (*) VALUES ('d1', 1, 0), ('d2', 1, 0), ('d6', 1, 0), ('d4', 1, 0), ('d6', 2, 1), ('d3', 1, 0), ('d1', 2, 1), ('d5', 1, 0), ('d4', 2, 1), ('d1', 3, 0), ('d1', 4, 1), ('d4', 3, 0), ('d1', 5, 0); +select 'no cleanup 1', * from test FINAL order by uid; +OPTIMIZE TABLE test FINAL CLEANUP; -- { serverError SUPPORT_IS_DISABLED } +select 'no cleanup 2', * from test order by uid; +DROP TABLE test; + +CREATE TABLE test (uid String, version UInt32, is_deleted UInt8) ENGINE = ReplicatedReplacingMergeTree('/clickhouse/{database}/tables/no_cleanup/', 'r1', version, is_deleted) Order by (uid); +INSERT INTO test (*) VALUES ('d1', 1, 0), ('d2', 1, 0), ('d6', 1, 0), ('d4', 1, 0), ('d6', 2, 1), ('d3', 1, 0), ('d1', 2, 1), ('d5', 1, 0), ('d4', 2, 1), ('d1', 3, 0), ('d1', 4, 1), ('d4', 3, 0), ('d1', 5, 0); +select 'no cleanup 3', * from test FINAL order by uid; +OPTIMIZE TABLE test FINAL CLEANUP; -- { serverError SUPPORT_IS_DISABLED } +select 'no cleanup 4', * from test order by uid; +DROP TABLE test; + +-- is_deleted column for other mergeTrees - ErrorCodes::LOGICAL_ERROR) + +-- Check clean_deleted_rows='Always' for other MergeTrees +SELECT '== Check cleanup & settings for other merge trees =='; +CREATE TABLE testMT (uid String, version UInt32, is_deleted UInt8) ENGINE = MergeTree() Order by (uid) SETTINGS clean_deleted_rows='Always', allow_experimental_replacing_merge_with_cleanup=1; +INSERT INTO testMT (*) VALUES ('d1', 1, 1); +OPTIMIZE TABLE testMT FINAL CLEANUP; -- { serverError CANNOT_ASSIGN_OPTIMIZE } +OPTIMIZE TABLE testMT FINAL; +SELECT * FROM testMT order by uid; + +CREATE TABLE testSummingMT (uid String, version UInt32, is_deleted UInt8) ENGINE = SummingMergeTree() Order by (uid) SETTINGS clean_deleted_rows='Always', allow_experimental_replacing_merge_with_cleanup=1; +INSERT INTO testSummingMT (*) VALUES ('d1', 1, 1); +OPTIMIZE TABLE testSummingMT FINAL CLEANUP; -- { serverError CANNOT_ASSIGN_OPTIMIZE } +OPTIMIZE TABLE testSummingMT FINAL; +SELECT * FROM testSummingMT order by uid; + +CREATE TABLE testAggregatingMT (uid String, version UInt32, is_deleted UInt8) ENGINE = AggregatingMergeTree() Order by (uid) SETTINGS clean_deleted_rows='Always', allow_experimental_replacing_merge_with_cleanup=1; +INSERT INTO testAggregatingMT (*) VALUES ('d1', 1, 1); +OPTIMIZE TABLE testAggregatingMT FINAL CLEANUP; -- { serverError CANNOT_ASSIGN_OPTIMIZE } +OPTIMIZE TABLE testAggregatingMT FINAL; +SELECT * FROM testAggregatingMT order by uid; + +CREATE TABLE testCollapsingMT (uid String, version UInt32, is_deleted UInt8, sign Int8) ENGINE = CollapsingMergeTree(sign) Order by (uid) SETTINGS clean_deleted_rows='Always', allow_experimental_replacing_merge_with_cleanup=1; +INSERT INTO testCollapsingMT (*) VALUES ('d1', 1, 1, 1); +OPTIMIZE TABLE testCollapsingMT FINAL CLEANUP; -- { serverError CANNOT_ASSIGN_OPTIMIZE } +OPTIMIZE TABLE testCollapsingMT FINAL; +SELECT * FROM testCollapsingMT order by uid; + +CREATE TABLE testVersionedCMT (uid String, version UInt32, is_deleted UInt8, sign Int8) ENGINE = VersionedCollapsingMergeTree(sign, version) Order by (uid) SETTINGS clean_deleted_rows='Always', allow_experimental_replacing_merge_with_cleanup=1; +INSERT INTO testVersionedCMT (*) VALUES ('d1', 1, 1, 1); +OPTIMIZE TABLE testVersionedCMT FINAL CLEANUP; -- { serverError CANNOT_ASSIGN_OPTIMIZE } +OPTIMIZE TABLE testVersionedCMT FINAL; +SELECT * FROM testVersionedCMT order by uid; diff --git a/parser/testdata/02490_replacing_merge_tree_is_deleted_column_transform_opt/ast.json b/parser/testdata/02490_replacing_merge_tree_is_deleted_column_transform_opt/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02490_replacing_merge_tree_is_deleted_column_transform_opt/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02490_replacing_merge_tree_is_deleted_column_transform_opt/metadata.json b/parser/testdata/02490_replacing_merge_tree_is_deleted_column_transform_opt/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02490_replacing_merge_tree_is_deleted_column_transform_opt/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02490_replacing_merge_tree_is_deleted_column_transform_opt/query.sql b/parser/testdata/02490_replacing_merge_tree_is_deleted_column_transform_opt/query.sql new file mode 100644 index 000000000..dab053d3a --- /dev/null +++ b/parser/testdata/02490_replacing_merge_tree_is_deleted_column_transform_opt/query.sql @@ -0,0 +1,72 @@ +-- Test for FINAL query on ReplacingMergeTree + is_deleted makes use of optimizations. + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab ( + pkey String, + id Int32, + v Int32, + version UInt64, + is_deleted UInt8 +) Engine = ReplacingMergeTree(version,is_deleted) +PARTITION BY pkey ORDER BY id +SETTINGS index_granularity=512; + +-- insert 10000 rows in partition 'A' and delete half of them and merge the 2 parts +INSERT INTO tab SELECT 'A', number, number, 1, 0 FROM numbers(10000); +INSERT INTO tab SELECT 'A', number, number + 1, 2, IF(number % 2 = 0, 0, 1) FROM numbers(10000); + +OPTIMIZE TABLE tab SETTINGS mutations_sync = 2; + +SYSTEM STOP MERGES tab; + +-- insert 10000 rows in partition 'B' and delete half of them, but keep 2 parts +INSERT INTO tab SELECT 'B', number+1000000, number, 1, 0 FROM numbers(10000); +INSERT INTO tab SELECT 'B', number+1000000, number + 1, 2, IF(number % 2 = 0, 0, 1) FROM numbers(10000); + +SET do_not_merge_across_partitions_select_final=1; + +-- verify : 10000 rows expected +SELECT count() +FROM tab FINAL; + +-- add a filter : 9950 rows expected +SELECT count() +FROM tab FINAL +WHERE id >= 100; + +-- only even id's are left - 0 rows expected +SELECT count() +FROM tab FINAL +WHERE (id % 2) = 1; + +-- 10000 rows expected +SELECT count() +FROM tab FINAL +WHERE (id % 2) = 0; + +-- create some more partitions +INSERT INTO tab SELECT 'C', number+2000000, number, 1, 0 FROM numbers(100); + +-- insert and delete some rows to get intersecting/non-intersecting ranges in same partition +INSERT INTO tab SELECT 'D', number+3000000, number, 1, 0 FROM numbers(10000); +INSERT INTO tab SELECT 'D', number+3000000, number + 1, 1, IF(number % 2 = 0, 0, 1) FROM numbers(5000); + +INSERT INTO tab SELECT 'E', number+4000000, number, 1, 0 FROM numbers(100); + +-- Total 10000 (From A & B) + 100 (From C) + 7500 (From D) + 100 (From E) = 17700 rows +SELECT count() +FROM tab FINAL +SETTINGS do_not_merge_across_partitions_select_final=0,split_intersecting_parts_ranges_into_layers_final=0; + +SELECT count() +FROM tab FINAL +SETTINGS do_not_merge_across_partitions_select_final=1,split_intersecting_parts_ranges_into_layers_final=1; + +SYSTEM START MERGES tab; +OPTIMIZE TABLE tab FINAL SETTINGS mutations_sync = 2; + +SELECT count() +FROM tab FINAL; + +DROP TABLE IF EXISTS tab; diff --git a/parser/testdata/02491_part_log_has_table_uuid/ast.json b/parser/testdata/02491_part_log_has_table_uuid/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02491_part_log_has_table_uuid/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02491_part_log_has_table_uuid/metadata.json b/parser/testdata/02491_part_log_has_table_uuid/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02491_part_log_has_table_uuid/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02491_part_log_has_table_uuid/query.sql b/parser/testdata/02491_part_log_has_table_uuid/query.sql new file mode 100644 index 000000000..fa9c103e3 --- /dev/null +++ b/parser/testdata/02491_part_log_has_table_uuid/query.sql @@ -0,0 +1,22 @@ +-- Tags: no-ordinary-database + +create table data_02491 (key Int) engine=MergeTree() order by tuple() settings old_parts_lifetime=600; +insert into data_02491 values (1); +optimize table data_02491 final; +truncate table data_02491; + +system flush logs part_log; +with (select uuid from system.tables where database = currentDatabase() and table = 'data_02491') as table_uuid_ +select + table_uuid != toUUIDOrDefault(Null), + event_type, + merge_reason, + part_name +from system.part_log +where + database = currentDatabase() and + table = 'data_02491' and + table_uuid = table_uuid_ +order by event_time_microseconds; + +drop table data_02491; diff --git a/parser/testdata/02493_analyzer_sum_if_to_count_if/ast.json b/parser/testdata/02493_analyzer_sum_if_to_count_if/ast.json new file mode 100644 index 000000000..6d881696e --- /dev/null +++ b/parser/testdata/02493_analyzer_sum_if_to_count_if/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001443389, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02493_analyzer_sum_if_to_count_if/metadata.json b/parser/testdata/02493_analyzer_sum_if_to_count_if/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02493_analyzer_sum_if_to_count_if/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02493_analyzer_sum_if_to_count_if/query.sql b/parser/testdata/02493_analyzer_sum_if_to_count_if/query.sql new file mode 100644 index 000000000..171e08096 --- /dev/null +++ b/parser/testdata/02493_analyzer_sum_if_to_count_if/query.sql @@ -0,0 +1,24 @@ +SET enable_analyzer = 1; +SET optimize_rewrite_sum_if_to_count_if = 1; + +EXPLAIN QUERY TREE (SELECT sumIf(1, (number % 2) == 0) FROM numbers(10)); + +SELECT '--'; + +SELECT sumIf(1, (number % 2) == 0) FROM numbers(10); + +SELECT '--'; + +EXPLAIN QUERY TREE (SELECT sum(if((number % 2) == 0, 1, 0)) FROM numbers(10)); + +SELECT '--'; + +SELECT sum(if((number % 2) == 0, 1, 0)) FROM numbers(10); + +SELECT '--'; + +EXPLAIN QUERY TREE (SELECT sum(if((number % 2) == 0, 0, 1)) FROM numbers(10)); + +SELECT '--'; + +SELECT sum(if((number % 2) == 0, 0, 1)) FROM numbers(10); diff --git a/parser/testdata/02493_analyzer_table_functions_untuple/ast.json b/parser/testdata/02493_analyzer_table_functions_untuple/ast.json new file mode 100644 index 000000000..34de9d577 --- /dev/null +++ b/parser/testdata/02493_analyzer_table_functions_untuple/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001412228, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02493_analyzer_table_functions_untuple/metadata.json b/parser/testdata/02493_analyzer_table_functions_untuple/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02493_analyzer_table_functions_untuple/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02493_analyzer_table_functions_untuple/query.sql b/parser/testdata/02493_analyzer_table_functions_untuple/query.sql new file mode 100644 index 000000000..c9687783d --- /dev/null +++ b/parser/testdata/02493_analyzer_table_functions_untuple/query.sql @@ -0,0 +1,47 @@ +SET enable_analyzer = 1; + +SELECT number FROM numbers(untuple(tuple(1))); + +SELECT '--'; + +SELECT number FROM numbers(untuple(tuple(0, 2))); + +SELECT '--'; + +SELECT number FROM numbers(untuple(tuple(1, 2))); + +SELECT '--'; + +SELECT cast(tuple(1), 'Tuple(value UInt64)') AS value, number FROM numbers(untuple(value)); + +SELECT '--'; + +SELECT cast(tuple(0, 1), 'Tuple(value_1 UInt64, value_2 UInt64)') AS value, number FROM numbers(untuple(value)); + +SELECT '--'; + +SELECT cast(tuple(1, 2), 'Tuple(value_1 UInt64, value_2 UInt64)') AS value, number FROM numbers(untuple(value)); + +SELECT '--'; + +SELECT cast(tuple(1), 'Tuple(value UInt64)') AS value, number FROM numbers(value.*); + +SELECT '--'; + +SELECT cast(tuple(0, 1), 'Tuple(value_1 UInt64, value_2 UInt64)') AS value, number FROM numbers(value.*); + +SELECT '--'; + +SELECT cast(tuple(1, 2), 'Tuple(value_1 UInt64, value_2 UInt64)') AS value, number FROM numbers(value.*); + +SELECT '--'; + +SELECT cast(tuple('1'), 'Tuple(value String)') AS value, number FROM numbers(value.* APPLY x -> toUInt64(x)); + +SELECT '--'; + +SELECT cast(tuple('0', '1'), 'Tuple(value_1 String, value_2 String)') AS value, number FROM numbers(value.* APPLY x -> toUInt64(x)); + +SELECT '--'; + +SELECT cast(tuple('1', '2'), 'Tuple(value_1 String, value_2 String)') AS value, number FROM numbers(value.* APPLY x -> toUInt64(x)); diff --git a/parser/testdata/02493_analyzer_uniq_injective_functions_elimination/ast.json b/parser/testdata/02493_analyzer_uniq_injective_functions_elimination/ast.json new file mode 100644 index 000000000..14ec03c07 --- /dev/null +++ b/parser/testdata/02493_analyzer_uniq_injective_functions_elimination/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001287587, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02493_analyzer_uniq_injective_functions_elimination/metadata.json b/parser/testdata/02493_analyzer_uniq_injective_functions_elimination/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02493_analyzer_uniq_injective_functions_elimination/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02493_analyzer_uniq_injective_functions_elimination/query.sql b/parser/testdata/02493_analyzer_uniq_injective_functions_elimination/query.sql new file mode 100644 index 000000000..ca37c6f38 --- /dev/null +++ b/parser/testdata/02493_analyzer_uniq_injective_functions_elimination/query.sql @@ -0,0 +1,14 @@ +SET enable_analyzer = 1, optimize_injective_functions_inside_uniq = 1; + +-- Simple test +EXPLAIN QUERY TREE SELECT uniqCombined(tuple('')) FROM numbers(1); +SELECT uniqCombined(tuple('')) FROM numbers(1); + +-- Test with chain of injective functions +EXPLAIN QUERY TREE SELECT uniqCombined(tuple(materialize(tuple(number)))) FROM numbers(10); +SELECT uniqCombined(tuple(materialize(toString(number)))) FROM numbers(10); + +-- No or partial optimization cases +EXPLAIN QUERY TREE SELECT uniq(abs(number)) FROM numbers(10); -- no elimination as `abs` is not injective +EXPLAIN QUERY TREE SELECT uniq(toString(abs(materialize(number)))) FROM numbers(10); -- only eliminate `toString` +EXPLAIN QUERY TREE SELECT uniq(tuple(number, 1)) FROM numbers(10); -- no elimination as `tuple` has multiple arguments diff --git a/parser/testdata/02493_do_not_assume_that_the_original_query_was_valid_when_transforming_joins/ast.json b/parser/testdata/02493_do_not_assume_that_the_original_query_was_valid_when_transforming_joins/ast.json new file mode 100644 index 000000000..e266fc60e --- /dev/null +++ b/parser/testdata/02493_do_not_assume_that_the_original_query_was_valid_when_transforming_joins/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery table1 (children 3)" + }, + { + "explain": " Identifier table1" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration column1 (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001607419, + "rows_read": 11, + "bytes_read": 386 + } +} diff --git a/parser/testdata/02493_do_not_assume_that_the_original_query_was_valid_when_transforming_joins/metadata.json b/parser/testdata/02493_do_not_assume_that_the_original_query_was_valid_when_transforming_joins/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02493_do_not_assume_that_the_original_query_was_valid_when_transforming_joins/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02493_do_not_assume_that_the_original_query_was_valid_when_transforming_joins/query.sql b/parser/testdata/02493_do_not_assume_that_the_original_query_was_valid_when_transforming_joins/query.sql new file mode 100644 index 000000000..6df562363 --- /dev/null +++ b/parser/testdata/02493_do_not_assume_that_the_original_query_was_valid_when_transforming_joins/query.sql @@ -0,0 +1,26 @@ +CREATE TABLE table1 (column1 String) ENGINE=MergeTree() ORDER BY tuple(); +CREATE TABLE table2 (column1 String, column2 String, column3 String) ENGINE=MergeTree() ORDER BY tuple(); +CREATE TABLE table3 (column3 String) ENGINE=MergeTree() ORDER BY tuple(); + +SELECT + * +FROM +( + SELECT + column1 + FROM table1 + GROUP BY + column1 +) AS a +ANY LEFT JOIN +( + SELECT + * + FROM table2 +) AS b ON (b.column1 = a.column1) AND (b.column2 = a.column2) +ANY LEFT JOIN +( + SELECT + * + FROM table3 +) AS c ON c.column3 = b.column3; -- {serverError UNKNOWN_IDENTIFIER} diff --git a/parser/testdata/02493_max_streams_for_merge_tree_reading/ast.json b/parser/testdata/02493_max_streams_for_merge_tree_reading/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02493_max_streams_for_merge_tree_reading/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02493_max_streams_for_merge_tree_reading/metadata.json b/parser/testdata/02493_max_streams_for_merge_tree_reading/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02493_max_streams_for_merge_tree_reading/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02493_max_streams_for_merge_tree_reading/query.sql b/parser/testdata/02493_max_streams_for_merge_tree_reading/query.sql new file mode 100644 index 000000000..cbf645dde --- /dev/null +++ b/parser/testdata/02493_max_streams_for_merge_tree_reading/query.sql @@ -0,0 +1,38 @@ +-- Tags: no-random-merge-tree-settings + +SET merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.0; + +drop table if exists t; +create table t (x UInt64) engine = MergeTree order by x; +insert into t select number from numbers_mt(10000000) settings max_insert_threads=8; + +set allow_prefetched_read_pool_for_remote_filesystem = 0; +set allow_prefetched_read_pool_for_local_filesystem = 0; + +-- { echo } + +-- The number of output streams is limited by max_streams_for_merge_tree_reading +select sum(x) from t settings max_threads=32, max_streams_for_merge_tree_reading=16, allow_asynchronous_read_from_io_pool_for_merge_tree=0; +select * from (explain pipeline select sum(x) from t settings max_threads=32, max_streams_for_merge_tree_reading=16, allow_asynchronous_read_from_io_pool_for_merge_tree=0) where explain like '%Resize%' or explain like '%MergeTreeSelect%'; + +-- Without asynchronous_read, max_streams_for_merge_tree_reading limits max_streams * max_streams_to_max_threads_ratio +select sum(x) from t settings max_threads=4, max_streams_for_merge_tree_reading=16, allow_asynchronous_read_from_io_pool_for_merge_tree=0, max_streams_to_max_threads_ratio=8; +select * from (explain pipeline select sum(x) from t settings max_threads=4, max_streams_for_merge_tree_reading=16, allow_asynchronous_read_from_io_pool_for_merge_tree=0, max_streams_to_max_threads_ratio=8) where explain like '%Resize%' or explain like '%MergeTreeSelect%'; + +-- With asynchronous_read, read in max_streams_for_merge_tree_reading async streams and resize to max_threads +select sum(x) from t settings max_threads=4, max_streams_for_merge_tree_reading=16, allow_asynchronous_read_from_io_pool_for_merge_tree=1; +select * from (explain pipeline select sum(x) from t settings max_threads=4, max_streams_for_merge_tree_reading=16, allow_asynchronous_read_from_io_pool_for_merge_tree=1) where explain like '%Resize%' or explain like '%MergeTreeSelect%'; + +-- With asynchronous_read, read using max_streams * max_streams_to_max_threads_ratio async streams, resize to max_streams_for_merge_tree_reading outp[ut streams, resize to max_threads after aggregation +select sum(x) from t settings max_threads=4, max_streams_for_merge_tree_reading=16, allow_asynchronous_read_from_io_pool_for_merge_tree=1, max_streams_to_max_threads_ratio=8; +select * from (explain pipeline select sum(x) from t settings max_threads=4, max_streams_for_merge_tree_reading=16, allow_asynchronous_read_from_io_pool_for_merge_tree=1, max_streams_to_max_threads_ratio=8) where explain like '%Resize%' or explain like '%MergeTreeSelect%'; + +-- For read-in-order, disable everything +set query_plan_remove_redundant_sorting=0; -- to keep reading in order +select sum(x) from (select x from t order by x) settings max_threads=4, max_streams_for_merge_tree_reading=16, allow_asynchronous_read_from_io_pool_for_merge_tree=1, optimize_read_in_order=1, query_plan_read_in_order=1; +select * from (explain pipeline select sum(x) from (select x from t order by x) settings max_threads=4, max_streams_for_merge_tree_reading=16, allow_asynchronous_read_from_io_pool_for_merge_tree=1, optimize_read_in_order=1, query_plan_read_in_order=1) where explain like '%Resize%'; +select sum(x) from (select x from t order by x) settings max_threads=4, max_streams_for_merge_tree_reading=16, allow_asynchronous_read_from_io_pool_for_merge_tree=1, max_streams_to_max_threads_ratio=8, optimize_read_in_order=1, query_plan_read_in_order=1; +select * from (explain pipeline select sum(x) from (select x from t order by x) settings max_threads=4, max_streams_for_merge_tree_reading=16, allow_asynchronous_read_from_io_pool_for_merge_tree=1, max_streams_to_max_threads_ratio=8, optimize_read_in_order=1, query_plan_read_in_order=1) where explain like '%Resize%'; + +-- { echoOff } +drop table t; diff --git a/parser/testdata/02493_numeric_literals_with_underscores/ast.json b/parser/testdata/02493_numeric_literals_with_underscores/ast.json new file mode 100644 index 000000000..1fc841c1f --- /dev/null +++ b/parser/testdata/02493_numeric_literals_with_underscores/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1234" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001072622, + "rows_read": 5, + "bytes_read": 180 + } +} diff --git a/parser/testdata/02493_numeric_literals_with_underscores/metadata.json b/parser/testdata/02493_numeric_literals_with_underscores/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02493_numeric_literals_with_underscores/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02493_numeric_literals_with_underscores/query.sql b/parser/testdata/02493_numeric_literals_with_underscores/query.sql new file mode 100644 index 000000000..af8eb77e9 --- /dev/null +++ b/parser/testdata/02493_numeric_literals_with_underscores/query.sql @@ -0,0 +1,154 @@ +SELECT 1234; -- Positive integer (+ implied) +SELECT 1_234; +SELECT 1_2_3_4; +SELECT +1234; -- Positive integer (+ explicit) +SELECT +1_234; +SELECT +1_2_3_4; +SELECT -1234; -- Negative integer +SELECT -1_234; +SELECT -1_2_3_4; +SELECT 12.34; -- Positive floating point with . notation +SELECT 12.3_4; +SELECT 1_2.34; +SELECT 1_2.3_4; +SELECT -12.34; -- Negative floating point with . notation +SELECT -12.3_4; +SELECT -1_2.34; +SELECT -1_2.3_4; +SELECT 34e21; -- Positive floating point with positive scientific notation (+ implied) +SELECT 3_4e21; +SELECT 34e2_1; +SELECT 3_4e2_1; +SELECT 34e+21; -- Positive floating point with positive scientific notation (+ explicit) +SELECT 3_4e+21; +SELECT 34e+2_1; +SELECT 3_4e+2_1; +SELECT 34e-21; -- Positive floating point with negative scientific notation +SELECT 3_4e-21; +SELECT 34e-2_1; +SELECT 3_4e-2_1; +SELECT -34e21; -- Negative floating point with positive scientific notation (+ implied) +SELECT -3_4e21; +SELECT -34e2_1; +SELECT -3_4e2_1; +SELECT -34e+21; -- Negative floating point with positive scientific notation (+ explicit) +SELECT -3_4e+21; +SELECT -34e+2_1; +SELECT -3_4e+2_1; +SELECT -34e-21; -- Negative floating point with negative scientific notation +SELECT -3_4e-21; +SELECT -34e-2_1; +SELECT -3_4e-2_1; +SELECT 1.34e21; -- Positive floating point (with .) with positive scientific notation (+ implied) +SELECT 1.3_4e21; +SELECT 1.34e2_1; +SELECT 1.3_4e2_1; +SELECT 1.34e+21; -- Positive floating point (with .) with positive scientific notation (+ explicit) +SELECT 1.3_4e+21; +SELECT 1.34e+2_1; +SELECT 1.3_4e+2_1; +SELECT 1.34e-21; -- Positive floating point (with .) with negative scientific notation +SELECT 1.3_4e-21; +SELECT 1.34e-2_1; +SELECT 1.3_4e-2_1; +SELECT -1.34e21; -- Negative floating point (with .) with positive scientific notation (+ implied) +SELECT -1.3_4e21; +SELECT -1.34e2_1; +SELECT -1.3_4e2_1; +SELECT -1.34e+21; -- Negative floating point (with .) with positive scientific notation (+ explicit) +SELECT -1.3_4e+21; +SELECT -1.34e+2_1; +SELECT -1.3_4e+2_1; +SELECT -1.34e-21; -- Negative floating point (with .) with negative scientific notation +SELECT -1.3_4e-21; +SELECT -1.34e-2_1; +SELECT -1.3_4e-2_1; +SELECT -.34e21; -- Negative floating point (with .) with positive scientific notation (+ implied) +SELECT -.3_4e21; +SELECT -.34e2_1; +SELECT -.3_4e2_1; +SELECT -.34e+21; -- Negative floating point (with .) with positive scientific notation (+ explicit) +SELECT -.3_4e+21; +SELECT -.34e+2_1; +SELECT -.3_4e+2_1; +SELECT -.34e-21; -- Negative floating point (with .) with negative scientific notation +SELECT -.3_4e-21; +SELECT -.34e-2_1; +SELECT -.3_4e-2_1; +SELECT NaN; -- Specials +SELECT nan; +SELECT inf; +SELECT +inf; +SELECT -inf; +SELECT Inf; +SELECT +Inf; +SELECT -Inf; +SELECT INF; +SELECT +INF; +SELECT -INF; +SELECT 0b1111; -- Binary +SELECT 0b1_111; +SELECT 0b1_1_1_1; +SELECT -0b1111; +SELECT -0b1_111; +SELECT -0b1_1_1_1; +SELECT 0x1234; -- Hex +SELECT 0x1_234; +SELECT 0x1_2_3_4; +SELECT -0x1234; +SELECT -0x1_234; +SELECT -0x1_2_3_4; +SELECT 0xee; +SELECT 0xe_e; +SELECT 0x1.234; -- Hex fractions +SELECT 0x1.2_3_4; +SELECT -0x1.234; +SELECT -0x1.2_3_4; +SELECT 0x0.ee; +SELECT 0x0.e_e; +SELECT 0x1.234p01; -- Hex scientific notation +SELECT 0x1.2_34p01; +SELECT 0x1.234p0_1; +SELECT 0x1.234p+01; +SELECT 0x1.2_34p+01; +SELECT 0x1.2_34p+0_1; +SELECT 0x1.234p-01; +SELECT 0x1.2_34p-01; +SELECT 0x1.2_34p-0_1; +SELECT -0x1.234p01; +SELECT -0x1.2_34p01; +SELECT -0x1.2_34p0_1; +SELECT -0x1.234p+01; +SELECT -0x1.2_34p+01; +SELECT -0x1.2_34p+0_1; +SELECT -0x1.234p-01; +SELECT -0x1.2_34p-01; +SELECT -0x1.2_34p-0_1; + +-- Things that are not a number + +select _1000; -- { serverError UNKNOWN_IDENTIFIER } +select _1000 FROM (SELECT 1 AS _1000) FORMAT Null; +select -_1; -- { serverError UNKNOWN_IDENTIFIER } +select -_1 FROM (SELECT -1 AS _1) FORMAT Null; +select +_1; -- { serverError UNKNOWN_IDENTIFIER } +select 1__0; -- { serverError UNKNOWN_IDENTIFIER } +select 1_; -- { serverError UNKNOWN_IDENTIFIER } +select 1_ ; -- { serverError UNKNOWN_IDENTIFIER } +select 10_; -- { serverError UNKNOWN_IDENTIFIER } +select 1_e5; -- { serverError UNKNOWN_IDENTIFIER } +select 1e_5; -- { serverError UNKNOWN_IDENTIFIER } +select 1e5_; -- { serverError UNKNOWN_IDENTIFIER } +select 1e_; -- { serverError UNKNOWN_IDENTIFIER } +select 1_.; -- { clientError SYNTAX_ERROR } +select 1e_1; -- { serverError UNKNOWN_IDENTIFIER } +select 0_x2; -- { serverError UNKNOWN_IDENTIFIER } +select 0x2_p2; -- { serverError UNKNOWN_IDENTIFIER } +select 0x2p_2; -- { serverError UNKNOWN_IDENTIFIER } +select 0x2p2_; -- { serverError UNKNOWN_IDENTIFIER } +select 0b; -- { serverError UNKNOWN_IDENTIFIER } +select 0b ; -- { serverError UNKNOWN_IDENTIFIER } +select 0x; -- { serverError UNKNOWN_IDENTIFIER } +select 0x ; -- { serverError UNKNOWN_IDENTIFIER } +select 0x_; -- { serverError UNKNOWN_IDENTIFIER } +select 0x_1; -- { serverError UNKNOWN_IDENTIFIER } diff --git a/parser/testdata/02494_analyzer_compound_expression_crash_fix/ast.json b/parser/testdata/02494_analyzer_compound_expression_crash_fix/ast.json new file mode 100644 index 000000000..fcd5abbba --- /dev/null +++ b/parser/testdata/02494_analyzer_compound_expression_crash_fix/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001127326, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02494_analyzer_compound_expression_crash_fix/metadata.json b/parser/testdata/02494_analyzer_compound_expression_crash_fix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02494_analyzer_compound_expression_crash_fix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02494_analyzer_compound_expression_crash_fix/query.sql b/parser/testdata/02494_analyzer_compound_expression_crash_fix/query.sql new file mode 100644 index 000000000..20b0bdd46 --- /dev/null +++ b/parser/testdata/02494_analyzer_compound_expression_crash_fix/query.sql @@ -0,0 +1,16 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table ( + fingerprint UInt16, + fields Nested(name Array(UInt32), value String) +) ENGINE = MergeTree +ORDER BY fingerprint; + +INSERT INTO test_table VALUES (0, [[1]], ['1']); + +SELECT fields.name FROM (SELECT fields.name FROM test_table); + +SELECT fields.name, fields.value FROM (SELECT fields.name FROM test_table); -- { serverError UNKNOWN_IDENTIFIER } + +DROP TABLE IF EXISTS test_table; diff --git a/parser/testdata/02494_analyzer_cte_resolution_in_subquery_fix/ast.json b/parser/testdata/02494_analyzer_cte_resolution_in_subquery_fix/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02494_analyzer_cte_resolution_in_subquery_fix/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02494_analyzer_cte_resolution_in_subquery_fix/metadata.json b/parser/testdata/02494_analyzer_cte_resolution_in_subquery_fix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02494_analyzer_cte_resolution_in_subquery_fix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02494_analyzer_cte_resolution_in_subquery_fix/query.sql b/parser/testdata/02494_analyzer_cte_resolution_in_subquery_fix/query.sql new file mode 100644 index 000000000..de7c7242b --- /dev/null +++ b/parser/testdata/02494_analyzer_cte_resolution_in_subquery_fix/query.sql @@ -0,0 +1,9 @@ +WITH a AS (SELECT t1.number AS n1, t2.number AS n2 FROM numbers(1) AS t1, numbers(1) AS t2), b AS (SELECT sum(n1) AS s FROM a) +SELECT * FROM b AS l, a AS r; + +WITH a AS (SELECT t1.number AS n1, t2.number AS n2 FROM numbers(1) AS t1, numbers(1) AS t2), b AS (SELECT sum(n1) AS s FROM a) +SELECT * FROM b AS l, a AS r; + +WITH a AS (SELECT number FROM numbers(1)), b AS (SELECT number FROM a) SELECT * FROM b as l, a as r; + +WITH a AS (SELECT number FROM numbers(1)), b AS (SELECT number FROM a) SELECT * FROM a as l, b as r; diff --git a/parser/testdata/02494_array_function_range/ast.json b/parser/testdata/02494_array_function_range/ast.json new file mode 100644 index 000000000..d95fffae1 --- /dev/null +++ b/parser/testdata/02494_array_function_range/ast.json @@ -0,0 +1,100 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function and (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function range (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_100" + }, + { + "explain": " Function range (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal UInt64_100" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function range (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal UInt64_100" + }, + { + "explain": " Function range (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal UInt64_100" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 26, + + "statistics": + { + "elapsed": 0.001355815, + "rows_read": 26, + "bytes_read": 1022 + } +} diff --git a/parser/testdata/02494_array_function_range/metadata.json b/parser/testdata/02494_array_function_range/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02494_array_function_range/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02494_array_function_range/query.sql b/parser/testdata/02494_array_function_range/query.sql new file mode 100644 index 000000000..bd945d552 --- /dev/null +++ b/parser/testdata/02494_array_function_range/query.sql @@ -0,0 +1,10 @@ +SELECT range(100) == range(0, 100) and range(0, 100) == range(0, 100, 1); +SELECT range(100) == range(cast('100', 'Int8')) and range(100) == range(cast('100', 'Int16')) and range(100) == range(cast('100', 'Int32')) and range(100) == range(cast('100', 'Int64')); +SELECT range(cast('100', 'Int8')) == range(0, cast('100', 'Int8')) and range(0, cast('100', 'Int8')) == range(0, cast('100', 'Int8'), 1) and range(0, cast('100', 'Int8')) == range(0, cast('100', 'Int8'), cast('1', 'Int8')); +SELECT range(-1, 1); +SELECT range(-1, 1, 2); +SELECT range(1,1); +SELECT range(5, 0, -1); +SELECT range(5, -1, -1); +SELECT range(1, 257, 65535); +SELECT range(cast(number - 5, 'Int8'), cast(number + 5, 'Int8')) from system.numbers limit 10; \ No newline at end of file diff --git a/parser/testdata/02494_combinators_with_null_argument/ast.json b/parser/testdata/02494_combinators_with_null_argument/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02494_combinators_with_null_argument/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02494_combinators_with_null_argument/metadata.json b/parser/testdata/02494_combinators_with_null_argument/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02494_combinators_with_null_argument/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02494_combinators_with_null_argument/query.sql b/parser/testdata/02494_combinators_with_null_argument/query.sql new file mode 100644 index 000000000..e18fd741a --- /dev/null +++ b/parser/testdata/02494_combinators_with_null_argument/query.sql @@ -0,0 +1,11 @@ +-- { echoOn } + +select sumIf(1, NULL); +select sumIf(NULL, 1); +select sumIf(NULL, NULL); +select countIf(1, NULL); +select countIf(NULL, 1); +select countIf(1, NULL); +select sumArray([NULL, NULL]); +select countArray([NULL, NULL]); + diff --git a/parser/testdata/02494_optimize_group_by_function_keys_and_alias_columns/ast.json b/parser/testdata/02494_optimize_group_by_function_keys_and_alias_columns/ast.json new file mode 100644 index 000000000..7e335d4ad --- /dev/null +++ b/parser/testdata/02494_optimize_group_by_function_keys_and_alias_columns/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery t (children 3)" + }, + { + "explain": " Identifier t" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration timestamp (children 1)" + }, + { + "explain": " DataType DateTime" + }, + { + "explain": " ColumnDeclaration day (children 1)" + }, + { + "explain": " Function toYYYYMMDD (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier timestamp" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Identifier timestamp" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001612486, + "rows_read": 13, + "bytes_read": 473 + } +} diff --git a/parser/testdata/02494_optimize_group_by_function_keys_and_alias_columns/metadata.json b/parser/testdata/02494_optimize_group_by_function_keys_and_alias_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02494_optimize_group_by_function_keys_and_alias_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02494_optimize_group_by_function_keys_and_alias_columns/query.sql b/parser/testdata/02494_optimize_group_by_function_keys_and_alias_columns/query.sql new file mode 100644 index 000000000..ae4654bb1 --- /dev/null +++ b/parser/testdata/02494_optimize_group_by_function_keys_and_alias_columns/query.sql @@ -0,0 +1,7 @@ +CREATE TABLE t(timestamp DateTime, day ALIAS toYYYYMMDD(timestamp)) Engine = MergeTree ORDER BY timestamp; + +INSERT INTO t (timestamp) VALUES ('2022-11-25 22:33:19'::DateTime), ('2022-11-25 22:33:19'::DateTime - INTERVAL 1 DAY), ('2022-11-25 22:33:19'::DateTime + INTERVAL 1 DAY), ('2022-11-25 22:33:19'::DateTime - INTERVAL 2 DAY), ('2022-11-25 22:33:19'::DateTime + INTERVAL 2 DAY); +INSERT INTO t (timestamp) VALUES ('2022-11-25 22:33:19'::DateTime), ('2022-11-25 22:33:19'::DateTime - INTERVAL 1 DAY), ('2022-11-25 22:33:19'::DateTime + INTERVAL 1 DAY), ('2022-11-25 22:33:19'::DateTime - INTERVAL 2 DAY), ('2022-11-25 22:33:19'::DateTime + INTERVAL 2 DAY); +INSERT INTO t (timestamp) VALUES ('2022-11-25 22:33:19'::DateTime), ('2022-11-25 22:33:19'::DateTime - INTERVAL 1 DAY), ('2022-11-25 22:33:19'::DateTime + INTERVAL 1 DAY), ('2022-11-25 22:33:19'::DateTime - INTERVAL 2 DAY), ('2022-11-25 22:33:19'::DateTime + INTERVAL 2 DAY); + +SELECT day, timestamp FROM remote('127.0.0.{1,2}', currentDatabase(), t) GROUP BY day, timestamp ORDER BY timestamp; diff --git a/parser/testdata/02494_parser_string_binary_literal/ast.json b/parser/testdata/02494_parser_string_binary_literal/ast.json new file mode 100644 index 000000000..105407db9 --- /dev/null +++ b/parser/testdata/02494_parser_string_binary_literal/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal ''" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001011187, + "rows_read": 5, + "bytes_read": 171 + } +} diff --git a/parser/testdata/02494_parser_string_binary_literal/metadata.json b/parser/testdata/02494_parser_string_binary_literal/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02494_parser_string_binary_literal/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02494_parser_string_binary_literal/query.sql b/parser/testdata/02494_parser_string_binary_literal/query.sql new file mode 100644 index 000000000..ebfe2a198 --- /dev/null +++ b/parser/testdata/02494_parser_string_binary_literal/query.sql @@ -0,0 +1,29 @@ +select b''; +select b'0' == '\0'; +select b'00110000'; -- 0 +select b'0011000100110000'; -- 10 +select b'111001101011010110001011111010001010111110010101' == '测试'; + +select B''; +select B'0' == '\0'; +select B'00110000'; -- 0 +select B'0011000100110000'; -- 10 +select B'111001101011010110001011111010001010111110010101' == '测试'; + +select x''; +select x'0' == '\0'; +select x'30'; -- 0 +select x'3130'; -- 10 +select x'e6b58be8af95' == '测试'; + +select X''; +select X'0' == '\0'; +select X'30'; -- 0 +select X'3130'; -- 10 +select X'e6b58be8af95' == '测试'; + + +select x'' == b''; +select x'0' == b'0'; +select X'' == X''; +select X'0' == X'0'; diff --git a/parser/testdata/02494_query_cache_bugs/ast.json b/parser/testdata/02494_query_cache_bugs/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02494_query_cache_bugs/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02494_query_cache_bugs/metadata.json b/parser/testdata/02494_query_cache_bugs/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02494_query_cache_bugs/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02494_query_cache_bugs/query.sql b/parser/testdata/02494_query_cache_bugs/query.sql new file mode 100644 index 000000000..755a5fae9 --- /dev/null +++ b/parser/testdata/02494_query_cache_bugs/query.sql @@ -0,0 +1,57 @@ +-- Tags: no-parallel +-- Tag no-parallel: Messes with internal cache + +-- Test for Bug 56258 + +SYSTEM DROP QUERY CACHE; + +SELECT '-- Bug 56258: Check literals (ASTLiteral)'; + +SELECT 10 FORMAT Vertical SETTINGS use_query_cache = 1; +SELECT 10 AS x FORMAT Vertical SETTINGS use_query_cache = 1; + +SELECT count(*) FROM system.query_cache; + +SYSTEM DROP QUERY CACHE; + +SELECT '-- Bug 56258: Check functions (ASTFunction)'; + +SELECT toUInt64(42) FORMAT Vertical SETTINGS use_query_cache = 1; +SELECT toUInt64(42) AS x FORMAT Vertical SETTINGS use_query_cache = 1; + +SELECT count(*) FROM system.query_cache; + +SYSTEM DROP QUERY CACHE; + +SELECT '-- Bug 56258: Check identifiers (ASTIdentifier)'; + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab(c UInt64) ENGINE = Memory AS SELECT 1; + +SELECT c FROM tab FORMAT Vertical SETTINGS use_query_cache = 1; +SELECT c AS x FROM tab FORMAT Vertical SETTINGS use_query_cache = 1; + +SELECT count(*) FROM system.query_cache; + +DROP TABLE tab; + +SELECT '-- Bug 67476: Queries with overflow mode != throw must not be cached by the query cache'; + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab(c UInt64) ENGINE = Memory; + +SYSTEM DROP QUERY CACHE; +SELECT sum(c) FROM tab SETTINGS read_overflow_mode = 'break', use_query_cache = 1; -- { serverError QUERY_CACHE_USED_WITH_NON_THROW_OVERFLOW_MODE } +SELECT sum(c) FROM tab SETTINGS read_overflow_mode_leaf = 'break', use_query_cache = 1; -- { serverError QUERY_CACHE_USED_WITH_NON_THROW_OVERFLOW_MODE } +SELECT sum(c) FROM tab SETTINGS group_by_overflow_mode = 'break', use_query_cache = 1; -- { serverError QUERY_CACHE_USED_WITH_NON_THROW_OVERFLOW_MODE } +SELECT sum(c) FROM tab SETTINGS sort_overflow_mode = 'break', use_query_cache = 1; -- { serverError QUERY_CACHE_USED_WITH_NON_THROW_OVERFLOW_MODE } +SELECT sum(c) FROM tab SETTINGS result_overflow_mode = 'break', use_query_cache = 1; -- { serverError QUERY_CACHE_USED_WITH_NON_THROW_OVERFLOW_MODE } +SELECT sum(c) FROM tab SETTINGS timeout_overflow_mode = 'break', use_query_cache = 1; -- { serverError QUERY_CACHE_USED_WITH_NON_THROW_OVERFLOW_MODE } +SELECT sum(c) FROM tab SETTINGS set_overflow_mode = 'break', use_query_cache = 1; -- { serverError QUERY_CACHE_USED_WITH_NON_THROW_OVERFLOW_MODE } +SELECT sum(c) FROM tab SETTINGS join_overflow_mode = 'break', use_query_cache = 1; -- { serverError QUERY_CACHE_USED_WITH_NON_THROW_OVERFLOW_MODE } +SELECT sum(c) FROM tab SETTINGS transfer_overflow_mode = 'break', use_query_cache = 1; -- { serverError QUERY_CACHE_USED_WITH_NON_THROW_OVERFLOW_MODE } +SELECT sum(c) FROM tab SETTINGS distinct_overflow_mode = 'break', use_query_cache = 1; -- { serverError QUERY_CACHE_USED_WITH_NON_THROW_OVERFLOW_MODE } + +SYSTEM DROP QUERY CACHE; diff --git a/parser/testdata/02494_query_cache_case_agnostic_matching/ast.json b/parser/testdata/02494_query_cache_case_agnostic_matching/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02494_query_cache_case_agnostic_matching/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02494_query_cache_case_agnostic_matching/metadata.json b/parser/testdata/02494_query_cache_case_agnostic_matching/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02494_query_cache_case_agnostic_matching/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02494_query_cache_case_agnostic_matching/query.sql b/parser/testdata/02494_query_cache_case_agnostic_matching/query.sql new file mode 100644 index 000000000..c94d82cdc --- /dev/null +++ b/parser/testdata/02494_query_cache_case_agnostic_matching/query.sql @@ -0,0 +1,27 @@ +-- Tags: no-parallel +-- Tag no-parallel: Messes with internal cache + +-- Start with empty query cache (QC) +SYSTEM DROP QUERY CACHE; + +-- Insert an entry into the query cache. +SELECT 1 SETTINGS use_query_cache = true; +-- Check that entry in QC exists +SELECT COUNT(*) FROM system.query_cache; + +-- Run the same SELECT but with different case (--> select). We want its result to be served from the QC. +SELECT '---'; +select 1 SETTINGS use_query_cache = true; + +-- There should still be just one entry in the QC +SELECT COUNT(*) FROM system.query_cache; + +-- The second query should cause a QC hit. +SYSTEM FLUSH LOGS query_log; +SELECT ProfileEvents['QueryCacheHits'], ProfileEvents['QueryCacheMisses'] +FROM system.query_log +WHERE type = 'QueryFinish' + AND current_database = currentDatabase() + AND query = 'select 1 SETTINGS use_query_cache = true;'; + +SYSTEM DROP QUERY CACHE; diff --git a/parser/testdata/02494_query_cache_compression/ast.json b/parser/testdata/02494_query_cache_compression/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02494_query_cache_compression/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02494_query_cache_compression/metadata.json b/parser/testdata/02494_query_cache_compression/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02494_query_cache_compression/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02494_query_cache_compression/query.sql b/parser/testdata/02494_query_cache_compression/query.sql new file mode 100644 index 000000000..3d17deebd --- /dev/null +++ b/parser/testdata/02494_query_cache_compression/query.sql @@ -0,0 +1,35 @@ +-- Tags: no-parallel +-- Tag no-parallel: Messes with internal cache + +SYSTEM DROP QUERY CACHE; +DROP TABLE IF EXISTS t; + +-- Create test table with lot's of rows +CREATE TABLE t(c String) ENGINE=MergeTree ORDER BY c; +INSERT INTO t SELECT multiIf(n = 0, 'abc', n = 1, 'def', n = 2, 'abc', n = 3, 'jkl', '<unused>') FROM (SELECT number % 4 AS n FROM numbers(1200)); +OPTIMIZE TABLE t FINAL; + +-- Run query which, store *compressed* result in query cache +SELECT '-- insert with enabled compression'; +SELECT * FROM t ORDER BY c +SETTINGS use_query_cache = true, query_cache_compress_entries = true; + +-- Run again to check that no bad things happen and that the result is as expected +SELECT '-- read from cache'; +SELECT * FROM t ORDER BY c +SETTINGS use_query_cache = true; + +SYSTEM DROP QUERY CACHE; + +-- Run query which, store *uncompressed* result in query cache +SELECT '-- insert with disabled compression'; +SELECT * FROM t ORDER BY c +SETTINGS use_query_cache = true, query_cache_compress_entries = false; + +-- Run again to check that no bad things happen and that the result is as expected +SELECT '-- read from cache'; +SELECT * FROM t ORDER BY c +SETTINGS use_query_cache = true; + +DROP TABLE t; +SYSTEM DROP QUERY CACHE; diff --git a/parser/testdata/02494_query_cache_drop_cache/ast.json b/parser/testdata/02494_query_cache_drop_cache/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02494_query_cache_drop_cache/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02494_query_cache_drop_cache/metadata.json b/parser/testdata/02494_query_cache_drop_cache/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02494_query_cache_drop_cache/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02494_query_cache_drop_cache/query.sql b/parser/testdata/02494_query_cache_drop_cache/query.sql new file mode 100644 index 000000000..3d064169a --- /dev/null +++ b/parser/testdata/02494_query_cache_drop_cache/query.sql @@ -0,0 +1,34 @@ +-- Tags: no-parallel +-- Tag no-parallel: Messes with internal cache + +-- (it's silly to use what will be tested below but we have to assume other tests cluttered the query cache) +SYSTEM DROP QUERY CACHE; + +SELECT 'Cache query result in query cache'; +SELECT 1 SETTINGS use_query_cache = true; +SELECT count(*) FROM system.query_cache; + +SELECT 'DROP entries with a certain tag, no entry will match'; +SYSTEM DROP QUERY CACHE TAG 'tag'; +SELECT count(*) FROM system.query_cache; + +SELECT 'After a full DROP, the cache is empty now'; +SYSTEM DROP QUERY CACHE; +SELECT count(*) FROM system.query_cache; + +-- More tests for DROP with tags: + +SELECT 'Cache query result with different or no tag in query cache'; +SELECT 1 SETTINGS use_query_cache = true; +SELECT 1 SETTINGS use_query_cache = true, query_cache_tag = 'abc'; +SELECT 1 SETTINGS use_query_cache = true, query_cache_tag = 'def'; +SELECT 2 SETTINGS use_query_cache = true; +SELECT count(*) FROM system.query_cache; + +SELECT 'DROP entries with certain tags'; +SYSTEM DROP QUERY CACHE TAG ''; +SELECT count(*) FROM system.query_cache; +SYSTEM DROP QUERY CACHE TAG 'def'; +SELECT count(*) FROM system.query_cache; +SYSTEM DROP QUERY CACHE TAG 'abc'; +SELECT count(*) FROM system.query_cache; diff --git a/parser/testdata/02494_query_cache_eligible_queries/ast.json b/parser/testdata/02494_query_cache_eligible_queries/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02494_query_cache_eligible_queries/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02494_query_cache_eligible_queries/metadata.json b/parser/testdata/02494_query_cache_eligible_queries/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02494_query_cache_eligible_queries/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02494_query_cache_eligible_queries/query.sql b/parser/testdata/02494_query_cache_eligible_queries/query.sql new file mode 100644 index 000000000..14e991025 --- /dev/null +++ b/parser/testdata/02494_query_cache_eligible_queries/query.sql @@ -0,0 +1,67 @@ +-- Tags: no-parallel +-- Tag no-parallel: Messes with internal cache + +SYSTEM DROP QUERY CACHE; +DROP TABLE IF EXISTS eligible_test; +DROP TABLE IF EXISTS eligible_test2; + +-- enable query cache session-wide but also force it individually in each of below statements +SET use_query_cache = true; +SET query_cache_system_table_handling = 'save'; + +-- check that SELECT statements create entries in the query cache ... +SELECT 1 SETTINGS use_query_cache = true; +SELECT COUNT(*) FROM system.query_cache; + +SYSTEM DROP QUERY CACHE; + +-- ... and all other statements also should not create entries: + +-- CREATE +CREATE TABLE eligible_test (a String) ENGINE=MergeTree ORDER BY a; -- SETTINGS use_query_cache = true; -- SETTINGS rejected as unknown +SELECT COUNT(*) FROM system.query_cache; + +-- ALTER +ALTER TABLE eligible_test ADD COLUMN b String SETTINGS use_query_cache = true; +SELECT COUNT(*) FROM system.query_cache; + +-- INSERT +INSERT INTO eligible_test VALUES('a', 'b'); -- SETTINGS use_query_cache = true; -- SETTINGS rejected as unknown +SELECT COUNT(*) FROM system.query_cache; +INSERT INTO eligible_test SELECT * FROM eligible_test SETTINGS use_query_cache = true; +SELECT COUNT(*) FROM system.query_cache; + +-- SHOW +SHOW TABLES SETTINGS use_query_cache = true; +SELECT COUNT(*) FROM system.query_cache; + +-- CHECK +CHECK TABLE eligible_test SETTINGS use_query_cache = true, check_query_single_value_result = 1; +SELECT COUNT(*) FROM system.query_cache; + +-- DESCRIBE +DESCRIBE TABLE eligible_test SETTINGS use_query_cache = true; +SELECT COUNT(*) FROM system.query_cache; + +-- EXISTS +EXISTS TABLE eligible_test SETTINGS use_query_cache = true; +SELECT COUNT(*) FROM system.query_cache; + +-- KILL +KILL QUERY WHERE query_id='3-857d-4a57-9ee0-3c7da5d60a90' SETTINGS use_query_cache = true; +SELECT COUNT(*) FROM system.query_cache; + +-- OPTIMIZE +OPTIMIZE TABLE eligible_test FINAL SETTINGS use_query_cache = true; +SELECT COUNT(*) FROM system.query_cache; + +-- TRUNCATE +TRUNCATE TABLE eligible_test SETTINGS use_query_cache = true; +SELECT COUNT(*) FROM system.query_cache; + +-- RENAME +RENAME TABLE eligible_test TO eligible_test2 SETTINGS use_query_cache = true; +SELECT COUNT(*) FROM system.query_cache; + +SYSTEM DROP QUERY CACHE; +DROP TABLE eligible_test2; diff --git a/parser/testdata/02494_query_cache_empty_tuple/ast.json b/parser/testdata/02494_query_cache_empty_tuple/ast.json new file mode 100644 index 000000000..ea0633e63 --- /dev/null +++ b/parser/testdata/02494_query_cache_empty_tuple/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Set" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001136044, + "rows_read": 14, + "bytes_read": 509 + } +} diff --git a/parser/testdata/02494_query_cache_empty_tuple/metadata.json b/parser/testdata/02494_query_cache_empty_tuple/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02494_query_cache_empty_tuple/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02494_query_cache_empty_tuple/query.sql b/parser/testdata/02494_query_cache_empty_tuple/query.sql new file mode 100644 index 000000000..8e133143e --- /dev/null +++ b/parser/testdata/02494_query_cache_empty_tuple/query.sql @@ -0,0 +1,2 @@ +SELECT tuple(), 0 FROM numbers(1) SETTINGS use_query_cache = true; +SELECT tuple(), 0 FROM numbers(1) SETTINGS use_query_cache = true; diff --git a/parser/testdata/02494_query_cache_events/ast.json b/parser/testdata/02494_query_cache_events/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02494_query_cache_events/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02494_query_cache_events/metadata.json b/parser/testdata/02494_query_cache_events/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02494_query_cache_events/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02494_query_cache_events/query.sql b/parser/testdata/02494_query_cache_events/query.sql new file mode 100644 index 000000000..62113acc4 --- /dev/null +++ b/parser/testdata/02494_query_cache_events/query.sql @@ -0,0 +1,20 @@ +-- Tags: no-parallel +-- Tag no-parallel: Messes with internal cache + +-- Start with empty query cache QC +SYSTEM DROP QUERY CACHE; + +SELECT 1 SETTINGS use_query_cache = true; +SELECT 1 SETTINGS use_query_cache = true; + +SYSTEM FLUSH LOGS query_log; +SELECT ProfileEvents['QueryCacheHits'], ProfileEvents['QueryCacheMisses'] +FROM system.query_log +WHERE type = 'QueryFinish' + AND current_database = currentDatabase() + AND query = 'SELECT 1 SETTINGS use_query_cache = true;' +ORDER BY event_time_microseconds; + +-- (The 1st execution was a cache miss, the 2nd execution was a cache hit) + +SYSTEM DROP QUERY CACHE; diff --git a/parser/testdata/02494_query_cache_exception_handling/ast.json b/parser/testdata/02494_query_cache_exception_handling/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02494_query_cache_exception_handling/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02494_query_cache_exception_handling/metadata.json b/parser/testdata/02494_query_cache_exception_handling/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02494_query_cache_exception_handling/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02494_query_cache_exception_handling/query.sql b/parser/testdata/02494_query_cache_exception_handling/query.sql new file mode 100644 index 000000000..70a443cc7 --- /dev/null +++ b/parser/testdata/02494_query_cache_exception_handling/query.sql @@ -0,0 +1,10 @@ +-- Tags: no-parallel +-- Tag no-parallel: Messes with internal cache + +SYSTEM DROP QUERY CACHE; + +-- If an exception is thrown during query execution, no entry must be created in the query cache +SELECT throwIf(1) SETTINGS use_query_cache = true; -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO } +SELECT COUNT(*) FROM system.query_cache; + +SYSTEM DROP QUERY CACHE; diff --git a/parser/testdata/02494_query_cache_explain/ast.json b/parser/testdata/02494_query_cache_explain/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02494_query_cache_explain/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02494_query_cache_explain/metadata.json b/parser/testdata/02494_query_cache_explain/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02494_query_cache_explain/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02494_query_cache_explain/query.sql b/parser/testdata/02494_query_cache_explain/query.sql new file mode 100644 index 000000000..decdd92c4 --- /dev/null +++ b/parser/testdata/02494_query_cache_explain/query.sql @@ -0,0 +1,24 @@ +-- Tags: no-parallel +-- Tag no-parallel: Messes with internal cache + +SET enable_analyzer = 1; +SET query_cache_system_table_handling = 'save'; + +SYSTEM DROP QUERY CACHE; + +-- Run a silly query with a non-trivial plan and put the result into the query cache QC +SELECT 1 + number from system.numbers LIMIT 1 SETTINGS use_query_cache = true; +SELECT count(*) FROM system.query_cache; + +-- EXPLAIN PLAN should show the same regardless if the result is calculated or read from the QC +EXPLAIN PLAN SELECT 1 + number from system.numbers LIMIT 1; +EXPLAIN PLAN SELECT 1 + number from system.numbers LIMIT 1 SETTINGS use_query_cache = true; -- (*) + +-- EXPLAIN PIPELINE should show the same regardless if the result is calculated or read from the QC +EXPLAIN PIPELINE SELECT 1 + number from system.numbers LIMIT 1; +EXPLAIN PIPELINE SELECT 1 + number from system.numbers LIMIT 1 SETTINGS use_query_cache = true; -- (*) + +-- Statements (*) must not cache their results into the QC +SELECT count(*) FROM system.query_cache; + +SYSTEM DROP QUERY CACHE; diff --git a/parser/testdata/02494_query_cache_ignore_output_settings/ast.json b/parser/testdata/02494_query_cache_ignore_output_settings/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02494_query_cache_ignore_output_settings/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02494_query_cache_ignore_output_settings/metadata.json b/parser/testdata/02494_query_cache_ignore_output_settings/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02494_query_cache_ignore_output_settings/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02494_query_cache_ignore_output_settings/query.sql b/parser/testdata/02494_query_cache_ignore_output_settings/query.sql new file mode 100644 index 000000000..4df805fca --- /dev/null +++ b/parser/testdata/02494_query_cache_ignore_output_settings/query.sql @@ -0,0 +1,35 @@ +-- Tags: no-parallel-replicas +-- no-parallel-replicas: the query from query_log errors due to missing columns. + +-- Checks that the query cache ignores output format related settings (settings starting with 'output_format_') + +SET max_block_size = 100; + +DROP TABLE IF EXISTS tab; +CREATE TABLE tab(c UInt64) ENGINE = Memory AS SELECT 1; + +SELECT '03710', c FROM tab SETTINGS use_query_cache = 1, output_format_tsv_crlf_end_of_line = 0; +SELECT '03710', c FROM tab SETTINGS use_query_cache = 1, output_format_tsv_crlf_end_of_line = 1; +SELECT '03710', c FROM tab SETTINGS use_query_cache = 1, max_block_size = 1; +SELECT '03710', c FROM tab SETTINGS use_query_cache = 1, max_block_size = 1; +SELECT '03710', c FROM tab FORMAT CSV SETTINGS use_query_cache = 1, max_block_size = 1; -- Same query as before but with different FORMAT, unfortunately that's a miss because the query cache uses the AST structure as key +SELECT '03710', c FROM tab FORMAT TSV SETTINGS use_query_cache = 1, max_block_size = 1; +SELECT '03710', c FROM tab SETTINGS use_query_cache = 1, max_block_size = 1 FORMAT CSV; +SELECT '03710', c FROM tab SETTINGS use_query_cache = 1, max_block_size = 1 FORMAT TSV; + +SYSTEM FLUSH LOGS query_log; + +SELECT + Settings['output_format_tsv_crlf_end_of_line'], + Settings['max_block_size'], + ProfileEvents['QueryCacheHits'] > 0 ? 'hit' : '', + ProfileEvents['QueryCacheMisses'] > 0 ? 'miss' : '' +FROM + system.query_log +WHERE + type = 'QueryFinish' + AND event_time > now() - 600 + AND current_database = currentDatabase() + AND query LIKE 'SELECT \'03710\', %' +ORDER BY + event_time_microseconds; diff --git a/parser/testdata/02494_query_cache_key/ast.json b/parser/testdata/02494_query_cache_key/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02494_query_cache_key/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02494_query_cache_key/metadata.json b/parser/testdata/02494_query_cache_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02494_query_cache_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02494_query_cache_key/query.sql b/parser/testdata/02494_query_cache_key/query.sql new file mode 100644 index 000000000..28d32dec1 --- /dev/null +++ b/parser/testdata/02494_query_cache_key/query.sql @@ -0,0 +1,70 @@ +-- Tags: no-parallel +-- Tag no-parallel: Messes with internal cache + +-- Tests that the key of the query cache is not only formed by the query AST but also by +-- (1) the current database (`USE db`, issue #64136), +-- (2) the query settings + + +SELECT 'Test (1)'; + +SYSTEM DROP QUERY CACHE; + +DROP DATABASE IF EXISTS db1; +DROP DATABASE IF EXISTS db2; + +CREATE DATABASE db1; +CREATE DATABASE db2; + +CREATE TABLE db1.tab(a UInt64, PRIMARY KEY a); +CREATE TABLE db2.tab(a UInt64, PRIMARY KEY a); + +INSERT INTO db1.tab values(1); +INSERT INTO db2.tab values(2); + +USE db1; +SELECT * FROM tab SETTINGS use_query_cache = 1; + +USE db2; +SELECT * FROM tab SETTINGS use_query_cache = 1; + +DROP DATABASE db1; +DROP DATABASE db2; + +SYSTEM DROP QUERY CACHE; + + +SELECT 'Test (2)'; + +-- test with query-level settings +SELECT 1 SETTINGS use_query_cache = 1, limit = 1, use_skip_indexes = 0 Format Null; +SELECT 1 SETTINGS use_query_cache = 1, use_skip_indexes = 0 Format Null; +SELECT 1 SETTINGS use_query_cache = 1, use_skip_indexes = 1 Format Null; +SELECT 1 SETTINGS use_query_cache = 1, max_block_size = 1 Format Null; + +-- 4x the same query but with different settings each. There should yield four entries in the query cache. +SELECT count(query) FROM system.query_cache; + +SYSTEM DROP QUERY CACHE; + +-- test with mixed session-level/query-level settings +SET use_query_cache = 1; +SET limit = 1; +SELECT 1 SETTINGS use_skip_indexes = 0 Format Null; +SET limit = default; +SET use_skip_indexes = 0; +SELECT 1 Format Null; +SET use_skip_indexes = 1; +SELECT 1 SETTINGS use_skip_indexes = 1 Format Null; +SET use_skip_indexes = default; +SET max_block_size = 1; +SELECT 1 Format Null; +SET max_block_size = default; + +SET use_query_cache = default; + +-- 4x the same query but with different settings each. There should yield four entries in the query cache. +SELECT count(query) FROM system.query_cache; + +SYSTEM DROP QUERY CACHE; + diff --git a/parser/testdata/02494_query_cache_log_comment/ast.json b/parser/testdata/02494_query_cache_log_comment/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02494_query_cache_log_comment/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02494_query_cache_log_comment/metadata.json b/parser/testdata/02494_query_cache_log_comment/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02494_query_cache_log_comment/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02494_query_cache_log_comment/query.sql b/parser/testdata/02494_query_cache_log_comment/query.sql new file mode 100644 index 000000000..bf9da049c --- /dev/null +++ b/parser/testdata/02494_query_cache_log_comment/query.sql @@ -0,0 +1,22 @@ +-- Tags: no-parallel +-- Tag no-parallel: Messes with internal cache + +-- Check that setting 'log_comment' is ignored in query cache lookups + +SYSTEM DROP QUERY CACHE; + +SELECT 1 SETTINGS use_query_cache = 1, log_comment='aaa' FORMAT Null; +SELECT 1 SETTINGS use_query_cache = 1, log_comment='bbb' FORMAT Null; +SELECT 1 SETTINGS use_query_cache = 1, log_comment='aaa' FORMAT Null; + +SYSTEM FLUSH LOGS query_log; + +SELECT log_comment, ProfileEvents['QueryCacheHits'], ProfileEvents['QueryCacheMisses'] +FROM system.query_log +WHERE type = 'QueryFinish' + AND event_time > now() - 600 + AND current_database = currentDatabase() + AND query LIKE 'SELECT 1 SETTINGS use_query_cache%' +ORDER BY event_time_microseconds; + +SYSTEM DROP QUERY CACHE; diff --git a/parser/testdata/02494_query_cache_metrics/ast.json b/parser/testdata/02494_query_cache_metrics/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02494_query_cache_metrics/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02494_query_cache_metrics/metadata.json b/parser/testdata/02494_query_cache_metrics/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02494_query_cache_metrics/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02494_query_cache_metrics/query.sql b/parser/testdata/02494_query_cache_metrics/query.sql new file mode 100644 index 000000000..9ca1b0f65 --- /dev/null +++ b/parser/testdata/02494_query_cache_metrics/query.sql @@ -0,0 +1,11 @@ +-- Tags: no-parallel +-- Tag no-parallel: Messes with internal cache + +SYSTEM DROP QUERY CACHE; + +-- Create an entry in the query cache +SELECT 1 SETTINGS use_query_cache = true FORMAT Null; + +SELECT metric, value FROM system.metrics WHERE metric = 'QueryCacheEntries'; + +SYSTEM DROP QUERY CACHE; diff --git a/parser/testdata/02494_query_cache_min_query_duration/ast.json b/parser/testdata/02494_query_cache_min_query_duration/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02494_query_cache_min_query_duration/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02494_query_cache_min_query_duration/metadata.json b/parser/testdata/02494_query_cache_min_query_duration/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02494_query_cache_min_query_duration/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02494_query_cache_min_query_duration/query.sql b/parser/testdata/02494_query_cache_min_query_duration/query.sql new file mode 100644 index 000000000..ee0b73f43 --- /dev/null +++ b/parser/testdata/02494_query_cache_min_query_duration/query.sql @@ -0,0 +1,18 @@ +-- Tags: no-parallel +-- Tag no-parallel: Messes with internal cache + +SYSTEM DROP QUERY CACHE; + +-- This creates an entry in the query cache ... +SELECT 1 SETTINGS use_query_cache = true; +SELECT COUNT(*) FROM system.query_cache; + +SYSTEM DROP QUERY CACHE; + +SELECT '---'; + +-- ... but this does not because the query executes much faster than the specified minumum query duration for caching the result +SELECT 1 SETTINGS use_query_cache = true, query_cache_min_query_duration = 10000; +SELECT COUNT(*) FROM system.query_cache; + +SYSTEM DROP QUERY CACHE; diff --git a/parser/testdata/02494_query_cache_min_query_runs/ast.json b/parser/testdata/02494_query_cache_min_query_runs/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02494_query_cache_min_query_runs/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02494_query_cache_min_query_runs/metadata.json b/parser/testdata/02494_query_cache_min_query_runs/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02494_query_cache_min_query_runs/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02494_query_cache_min_query_runs/query.sql b/parser/testdata/02494_query_cache_min_query_runs/query.sql new file mode 100644 index 000000000..ffdf12169 --- /dev/null +++ b/parser/testdata/02494_query_cache_min_query_runs/query.sql @@ -0,0 +1,32 @@ +-- Tags: no-parallel +-- Tag no-parallel: Messes with internal cache + +SYSTEM DROP QUERY CACHE; + +-- Cache the query after the 1st query invocation +SELECT 1 SETTINGS use_query_cache = true, query_cache_min_query_runs = 0; +SELECT COUNT(*) FROM system.query_cache; + +SELECT '---'; + +SYSTEM DROP QUERY CACHE; + +-- Cache the query result after the 2nd query invocation +SELECT 1 SETTINGS use_query_cache = true, query_cache_min_query_runs = 1; +SELECT COUNT(*) FROM system.query_cache; +SELECT 1 SETTINGS use_query_cache = true, query_cache_min_query_runs = 1; +SELECT COUNT(*) FROM system.query_cache; + +SELECT '---'; + +SYSTEM DROP QUERY CACHE; + +-- Cache the query result after the 3rd query invocation +SELECT 1 SETTINGS use_query_cache = true, query_cache_min_query_runs = 2; +SELECT COUNT(*) FROM system.query_cache; +SELECT 1 SETTINGS use_query_cache = true, query_cache_min_query_runs = 2; +SELECT COUNT(*) FROM system.query_cache; +SELECT 1 SETTINGS use_query_cache = true, query_cache_min_query_runs = 2; +SELECT COUNT(*) FROM system.query_cache; + +SYSTEM DROP QUERY CACHE; diff --git a/parser/testdata/02494_query_cache_nondeterministic_functions/ast.json b/parser/testdata/02494_query_cache_nondeterministic_functions/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02494_query_cache_nondeterministic_functions/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02494_query_cache_nondeterministic_functions/metadata.json b/parser/testdata/02494_query_cache_nondeterministic_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02494_query_cache_nondeterministic_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02494_query_cache_nondeterministic_functions/query.sql b/parser/testdata/02494_query_cache_nondeterministic_functions/query.sql new file mode 100644 index 000000000..477655e47 --- /dev/null +++ b/parser/testdata/02494_query_cache_nondeterministic_functions/query.sql @@ -0,0 +1,23 @@ +-- Tags: no-parallel +-- Tag no-parallel: Messes with internal cache + +SYSTEM DROP QUERY CACHE; + +SELECT '-- query_cache_nondeterministic_function_handling = throw'; +SELECT count(now()) SETTINGS use_query_cache = true; -- { serverError QUERY_CACHE_USED_WITH_NONDETERMINISTIC_FUNCTIONS } +SELECT count(now()) SETTINGS use_query_cache = true, query_cache_nondeterministic_function_handling = 'throw'; -- { serverError QUERY_CACHE_USED_WITH_NONDETERMINISTIC_FUNCTIONS } +SELECT count(*) FROM system.query_cache; + +SYSTEM DROP QUERY CACHE; + +SELECT '-- query_cache_nondeterministic_function_handling = save'; +SELECT count(now()) SETTINGS use_query_cache = true, query_cache_nondeterministic_function_handling = 'save'; +SELECT count(*) FROM system.query_cache; + +SYSTEM DROP QUERY CACHE; + +SELECT '-- query_cache_nondeterministic_function_handling = ignore'; +SELECT count(now()) SETTINGS use_query_cache = true, query_cache_nondeterministic_function_handling = 'ignore'; +SELECT count(*) FROM system.query_cache; + +SYSTEM DROP QUERY CACHE; diff --git a/parser/testdata/02494_query_cache_normalize_ast/ast.json b/parser/testdata/02494_query_cache_normalize_ast/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02494_query_cache_normalize_ast/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02494_query_cache_normalize_ast/metadata.json b/parser/testdata/02494_query_cache_normalize_ast/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02494_query_cache_normalize_ast/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02494_query_cache_normalize_ast/query.sql b/parser/testdata/02494_query_cache_normalize_ast/query.sql new file mode 100644 index 000000000..fc45b5a11 --- /dev/null +++ b/parser/testdata/02494_query_cache_normalize_ast/query.sql @@ -0,0 +1,29 @@ +-- Tags: no-parallel +-- Tag no-parallel: Messes with internal cache + +-- Start with empty query cache (QC) +SYSTEM DROP QUERY CACHE; + +-- Run query whose result gets cached in the query cache. +-- Besides "use_query_cache", pass two more knobs (one QC-specific knob and one non-QC-specific knob). We just care +-- *that* they are passed and not about their effect. +SELECT 1 SETTINGS use_query_cache = true, query_cache_nondeterministic_function_handling = 'save', max_threads = 16; + +-- Check that entry in QC exists +SELECT COUNT(*) FROM system.query_cache; + +-- Run the same SELECT but with different SETTINGS. We want its result to be served from the QC (--> passive mode, achieve it by +-- disabling active mode) +SELECT '---'; +SELECT 1 SETTINGS use_query_cache = true, enable_writes_to_query_cache = false, max_threads = 16; + +-- Technically, both SELECT queries have different ASTs, leading to different QC keys. QC does some AST normalization (erase all +-- QC-related settings) such that the keys match regardless. Verify by checking that the second query caused a QC hit. +SYSTEM FLUSH LOGS query_log; +SELECT ProfileEvents['QueryCacheHits'], ProfileEvents['QueryCacheMisses'] +FROM system.query_log +WHERE type = 'QueryFinish' + AND current_database = currentDatabase() + AND query = 'SELECT 1 SETTINGS use_query_cache = true, enable_writes_to_query_cache = false, max_threads = 16;'; + +SYSTEM DROP QUERY CACHE; diff --git a/parser/testdata/02494_query_cache_passive_usage/ast.json b/parser/testdata/02494_query_cache_passive_usage/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02494_query_cache_passive_usage/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02494_query_cache_passive_usage/metadata.json b/parser/testdata/02494_query_cache_passive_usage/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02494_query_cache_passive_usage/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02494_query_cache_passive_usage/query.sql b/parser/testdata/02494_query_cache_passive_usage/query.sql new file mode 100644 index 000000000..f675695c3 --- /dev/null +++ b/parser/testdata/02494_query_cache_passive_usage/query.sql @@ -0,0 +1,39 @@ +-- Tags: no-parallel +-- Tag no-parallel: Messes with internal cache + +-- Start with empty query cache (QC). +SYSTEM DROP QUERY CACHE; + +-- By default, don't write query result into QC. +SELECT 1; +SELECT COUNT(*) FROM system.query_cache; + +SELECT '-----'; + +-- Try to retrieve query from empty QC using the passive mode. Do this by disabling the active mode. The cache should still be empty (no insert). +SELECT 1 SETTINGS use_query_cache = true, enable_writes_to_query_cache = false; +SELECT COUNT(*) FROM system.query_cache; + +SELECT '-----'; + +-- Put query into cache. +SELECT 1 SETTINGS use_query_cache = true; +SELECT COUNT(*) FROM system.query_cache; + +SELECT '-----'; + +/* Run same query with passive mode again. There must still be one entry in the QC and we must have a QC hit. */ + +SELECT 1 SETTINGS use_query_cache = true, enable_writes_to_query_cache = false; +SELECT COUNT(*) FROM system.query_cache; + +SYSTEM FLUSH LOGS query_log; +SELECT ProfileEvents['QueryCacheHits'], ProfileEvents['QueryCacheMisses'] +FROM system.query_log +WHERE type = 'QueryFinish' + AND current_database = currentDatabase() + /* NOTE: client incorrectly join comments from the previous line into query, hence LIKE */ + AND query LIKE '%\nSELECT 1 SETTINGS use_query_cache = true, enable_writes_to_query_cache = false;' +ORDER BY event_time_microseconds; + +SYSTEM DROP QUERY CACHE; diff --git a/parser/testdata/02494_query_cache_query_log/ast.json b/parser/testdata/02494_query_cache_query_log/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02494_query_cache_query_log/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02494_query_cache_query_log/metadata.json b/parser/testdata/02494_query_cache_query_log/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02494_query_cache_query_log/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02494_query_cache_query_log/query.sql b/parser/testdata/02494_query_cache_query_log/query.sql new file mode 100644 index 000000000..9ada6e1f9 --- /dev/null +++ b/parser/testdata/02494_query_cache_query_log/query.sql @@ -0,0 +1,67 @@ +-- Tags: no-parallel +-- Tag no-parallel: Messes with internal cache + +SYSTEM DROP QUERY CACHE; + +-- DROP TABLE system.query_log; -- debugging + + + +SELECT '-- Run a query with query cache not enabled'; +SELECT 124437993; + +SYSTEM FLUSH LOGS query_log; + +-- Field 'query_cache_usage' should be 'None' +SELECT type, query, query_cache_usage +FROM system.query_log +WHERE current_database = currentDatabase() + AND query = 'SELECT 124437993;' + AND type = 'QueryFinish' +ORDER BY type, query_cache_usage; + + + +SELECT '-- Run a query with query cache enabled'; +SELECT 124437994 SETTINGS use_query_cache = 1; + +SYSTEM FLUSH LOGS query_log; + +-- Field 'query_cache_usage' should be 'Write' +SELECT type, query, query_cache_usage +FROM system.query_log +WHERE current_database = currentDatabase() + AND query = 'SELECT 124437994 SETTINGS use_query_cache = 1;' + AND type = 'QueryFinish' +ORDER BY type, query_cache_usage; + + + +SELECT '-- Run the same query with query cache enabled'; +SELECT 124437994 SETTINGS use_query_cache = 1; + +SYSTEM FLUSH LOGS query_log; + +-- Field 'query_cache_usage' should be 'Read' +SELECT type, query, query_cache_usage +FROM system.query_log +WHERE current_database = currentDatabase() + AND query = 'SELECT 124437994 SETTINGS use_query_cache = 1;' + AND type = 'QueryFinish' +ORDER BY type, query_cache_usage; + + + +SELECT '-- Throw exception with query cache enabled'; +SELECT 124437995, throwIf(1) SETTINGS use_query_cache = 1; -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO } + +SYSTEM FLUSH LOGS query_log; + +-- Field 'query_cache_usage' should be 'None' +SELECT query, query_cache_usage +FROM system.query_log +WHERE current_database = currentDatabase() + AND query = 'SELECT 124437995, throwIf(1) SETTINGS use_query_cache = 1;' + AND type = 'ExceptionWhileProcessing'; + +SYSTEM DROP QUERY CACHE; diff --git a/parser/testdata/02494_query_cache_secrets/ast.json b/parser/testdata/02494_query_cache_secrets/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02494_query_cache_secrets/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02494_query_cache_secrets/metadata.json b/parser/testdata/02494_query_cache_secrets/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02494_query_cache_secrets/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02494_query_cache_secrets/query.sql b/parser/testdata/02494_query_cache_secrets/query.sql new file mode 100644 index 000000000..66427df8f --- /dev/null +++ b/parser/testdata/02494_query_cache_secrets/query.sql @@ -0,0 +1,13 @@ +-- Tags: no-parallel, no-fasttest +-- Tag no-fasttest: Depends on OpenSSL +-- Tag no-parallel: Messes with internal cache + +SYSTEM DROP QUERY CACHE; + +-- Cache a result of a query with secret in the query cache +SELECT hex(encrypt('aes-128-ecb', 'plaintext', 'passwordpassword')) SETTINGS use_query_cache = true; + +-- The secret should not be revealed in system.query_cache +SELECT query FROM system.query_cache; + +SYSTEM DROP QUERY CACHE; diff --git a/parser/testdata/02494_query_cache_sparse_columns/ast.json b/parser/testdata/02494_query_cache_sparse_columns/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02494_query_cache_sparse_columns/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02494_query_cache_sparse_columns/metadata.json b/parser/testdata/02494_query_cache_sparse_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02494_query_cache_sparse_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02494_query_cache_sparse_columns/query.sql b/parser/testdata/02494_query_cache_sparse_columns/query.sql new file mode 100644 index 000000000..6266996ac --- /dev/null +++ b/parser/testdata/02494_query_cache_sparse_columns/query.sql @@ -0,0 +1,21 @@ +-- Tags: no-parallel + +DROP TABLE IF EXISTS t_cache_sparse; +SYSTEM DROP QUERY CACHE; + +CREATE TABLE t_cache_sparse (id UInt64, v UInt64) +ENGINE = MergeTree ORDER BY id +SETTINGS ratio_of_defaults_for_sparse_serialization = 0.9; + +SYSTEM STOP MERGES t_cache_sparse; + +INSERT INTO t_cache_sparse SELECT number, number FROM numbers(10000); +INSERT INTO t_cache_sparse SELECT number, 0 FROM numbers(10000); + +SET max_threads = 1; + +SELECT v FROM t_cache_sparse SETTINGS use_query_cache = 1, max_threads = 1 FORMAT Null; +SELECT v FROM t_cache_sparse SETTINGS use_query_cache = 1, max_threads = 1 FORMAT Null; +SELECT count() FROM system.query_cache WHERE query LIKE 'SELECT v FROM t_cache_sparse%'; + +DROP TABLE t_cache_sparse; diff --git a/parser/testdata/02494_query_cache_squash_partial_results/ast.json b/parser/testdata/02494_query_cache_squash_partial_results/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02494_query_cache_squash_partial_results/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02494_query_cache_squash_partial_results/metadata.json b/parser/testdata/02494_query_cache_squash_partial_results/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02494_query_cache_squash_partial_results/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02494_query_cache_squash_partial_results/query.sql b/parser/testdata/02494_query_cache_squash_partial_results/query.sql new file mode 100644 index 000000000..391cc7f7e --- /dev/null +++ b/parser/testdata/02494_query_cache_squash_partial_results/query.sql @@ -0,0 +1,51 @@ +-- Tags: no-parallel +-- Tag no-parallel: Messes with internal cache + +SYSTEM DROP QUERY CACHE; +DROP TABLE IF EXISTS t; + +-- Create test table with "many" rows +CREATE TABLE t(c String) ENGINE=MergeTree ORDER BY c; +SYSTEM STOP MERGES t; -- retain multiple parts to make the SELECT process multiple chunks +INSERT INTO t values ('abc') ('def') ('ghi') ('jkl'); +INSERT INTO t values ('abc') ('def') ('ghi') ('jkl'); +INSERT INTO t values ('abc') ('def') ('ghi') ('jkl'); +INSERT INTO t values ('abc') ('def') ('ghi') ('jkl'); +INSERT INTO t values ('abc') ('def') ('ghi') ('jkl'); +INSERT INTO t values ('abc') ('def') ('ghi') ('jkl'); +INSERT INTO t values ('abc') ('def') ('ghi') ('jkl'); +INSERT INTO t values ('abc') ('def') ('ghi') ('jkl'); +INSERT INTO t values ('abc') ('def') ('ghi') ('jkl'); +INSERT INTO t values ('abc') ('def') ('ghi') ('jkl'); +INSERT INTO t values ('abc') ('def') ('ghi') ('jkl'); +INSERT INTO t values ('abc') ('def') ('ghi') ('jkl'); +INSERT INTO t values ('abc') ('def') ('ghi') ('jkl'); +INSERT INTO t values ('abc') ('def') ('ghi') ('jkl'); +INSERT INTO t values ('abc') ('def') ('ghi') ('jkl'); +INSERT INTO t values ('abc') ('def') ('ghi') ('jkl'); +INSERT INTO t values ('abc') ('def') ('ghi') ('jkl'); + +-- Run query which reads multiple chunks (small max_block_size), cache result in query cache, force squashing of partial results +SELECT '-- insert with enabled squashing'; +SELECT * FROM t ORDER BY c +SETTINGS max_block_size = 3, use_query_cache = true, query_cache_squash_partial_results = true; + +-- Run again to check that no bad things happen and that the result is as expected +SELECT '-- read from cache'; +SELECT * FROM t ORDER BY c +SETTINGS max_block_size = 3, use_query_cache = true; + +SYSTEM DROP QUERY CACHE; + +-- Run query which reads multiple chunks (small max_block_size), cache result in query cache, but **disable** squashing of partial results +SELECT '-- insert with disabled squashing'; +SELECT * FROM t ORDER BY c +SETTINGS max_block_size = 3, use_query_cache = true, query_cache_squash_partial_results = false; + +-- Run again to check that no bad things happen and that the result is as expected +SELECT '-- read from cache'; +SELECT * FROM t ORDER BY c +SETTINGS max_block_size = 3, use_query_cache = true; + +DROP TABLE t; +SYSTEM DROP QUERY CACHE; diff --git a/parser/testdata/02494_query_cache_system_tables/ast.json b/parser/testdata/02494_query_cache_system_tables/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02494_query_cache_system_tables/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02494_query_cache_system_tables/metadata.json b/parser/testdata/02494_query_cache_system_tables/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02494_query_cache_system_tables/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02494_query_cache_system_tables/query.sql b/parser/testdata/02494_query_cache_system_tables/query.sql new file mode 100644 index 000000000..12eaec0f8 --- /dev/null +++ b/parser/testdata/02494_query_cache_system_tables/query.sql @@ -0,0 +1,70 @@ +-- Tags: no-parallel +-- Tag no-parallel: Messes with internal cache + +SYSTEM DROP QUERY CACHE; + +SELECT 'The Default for query_cache_system_table_handling is = throw'; +-- Test that the query cache rejects queries that involve system tables. +SELECT * FROM system.one SETTINGS use_query_cache = 1; -- { serverError QUERY_CACHE_USED_WITH_SYSTEM_TABLE } +SELECT count(*) FROM system.query_cache; + +SYSTEM DROP QUERY CACHE; + +SELECT 'Check behavior of query_cache_system_table_handling = throw'; +-- Test that the query cache rejects queries that involve system tables. +SELECT * FROM system.one SETTINGS use_query_cache = 1, query_cache_system_table_handling = 'throw'; -- { serverError QUERY_CACHE_USED_WITH_SYSTEM_TABLE } +SELECT count(*) FROM system.query_cache; + +SYSTEM DROP QUERY CACHE; + +SELECT 'Check behavior of query_cache_system_table_handling = save'; +-- Test that the query cache saves the result of queries that involve system tables. +SELECT * FROM system.one SETTINGS use_query_cache = 1, query_cache_system_table_handling = 'save'; +SELECT count(*) FROM system.query_cache; + +SYSTEM DROP QUERY CACHE; + +SELECT 'Check behavior of query_cache_system_table_handling = ignore'; +-- Test that the query cache ignores the result of queries that involve system tables. +SELECT * FROM system.one SETTINGS use_query_cache = 1, query_cache_system_table_handling = 'ignore'; +SELECT count(*) FROM system.query_cache; + +SYSTEM DROP QUERY CACHE; + +SELECT 'Other tests'; + +-- Edge case which doesn't work well due to conceptual reasons (QueryCache is AST-based), test it anyways to have it documented. +USE system; +SELECT * FROM one SETTINGS use_query_cache = 1; -- doesn't throw but should + +-- This query uses system.zero internally. Since the query cache works at AST level it does not "see' system.zero and must not complain. +SELECT * SETTINGS use_query_cache = 1; + +-- information_schema is also treated as a system table +SELECT * FROM information_schema.tables SETTINGS use_query_cache = 1; -- { serverError QUERY_CACHE_USED_WITH_SYSTEM_TABLE } +SELECT * FROM INFORMATION_SCHEMA.TABLES SETTINGS use_query_cache = 1; -- { serverError QUERY_CACHE_USED_WITH_SYSTEM_TABLE } + +-- Issue #69010: A system table name appears as a literal. That's okay and must not throw. +DROP TABLE IF EXISTS tab; +CREATE TABLE tab (uid Int16, name String) ENGINE = Memory; +SELECT * FROM tab WHERE name = 'system.one' SETTINGS use_query_cache = true; +DROP TABLE tab; + +-- System tables can be "hidden" inside e.g. table functions +SELECT * FROM clusterAllReplicas('test_shard_localhost', system.one) SETTINGS use_query_cache = 1; -- {serverError QUERY_CACHE_USED_WITH_SYSTEM_TABLE } +SELECT * FROM clusterAllReplicas('test_shard_localhost', 'system.one') SETTINGS use_query_cache = 1; -- {serverError QUERY_CACHE_USED_WITH_SYSTEM_TABLE } +-- Note how in the previous query ^^ 'system.one' is also a literal. ClusterAllReplicas gets special handling. + +-- Criminal edge case that a user creates a table named "system". The query cache must not reject queries against it. +DROP TABLE IF EXISTS system; +CREATE TABLE system (c UInt64) ENGINE = Memory; +SElECT * FROM system SETTINGS use_query_cache = 1; +DROP TABLE system; + +-- But queries against system.system are rejected. +DROP TABLE IF EXISTS system.system; +CREATE TABLE system.system (c UInt64) ENGINE = Memory; +SElECT * FROM system.system SETTINGS use_query_cache = 1; -- { serverError QUERY_CACHE_USED_WITH_SYSTEM_TABLE } +DROP TABLE system.system; + +SYSTEM DROP QUERY CACHE; diff --git a/parser/testdata/02494_query_cache_tag/ast.json b/parser/testdata/02494_query_cache_tag/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02494_query_cache_tag/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02494_query_cache_tag/metadata.json b/parser/testdata/02494_query_cache_tag/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02494_query_cache_tag/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02494_query_cache_tag/query.sql b/parser/testdata/02494_query_cache_tag/query.sql new file mode 100644 index 000000000..62d36f6eb --- /dev/null +++ b/parser/testdata/02494_query_cache_tag/query.sql @@ -0,0 +1,34 @@ +-- Tags: no-parallel +-- Tag no-parallel: Messes with internal cache + +SYSTEM DROP QUERY CACHE; + +-- Store the result a single query with a tag in the query cache and check that the system table knows about the tag +SELECT 1 SETTINGS use_query_cache = true, query_cache_tag = 'abc'; + +SELECT query, tag FROM system.query_cache; + +SELECT '---'; + +SYSTEM DROP QUERY CACHE; + +-- Store the result of the same query with two different tags. The cache should store two entries. +SELECT 1 SETTINGS use_query_cache = true; -- default query_cache_tag = '' +SELECT 1 SETTINGS use_query_cache = true, query_cache_tag = 'abc'; +SELECT query, tag FROM system.query_cache ORDER BY ALL; + +SELECT '---'; + +SYSTEM DROP QUERY CACHE; + +-- Like before but the tag is set standalone. + +SET query_cache_tag = 'abc'; +SELECT 1 SETTINGS use_query_cache = true; + +SET query_cache_tag = 'def'; +SELECT 1 SETTINGS use_query_cache = true; + +SELECT query, tag FROM system.query_cache ORDER BY ALL; + +SYSTEM DROP QUERY CACHE; diff --git a/parser/testdata/02494_query_cache_totals_extremes/ast.json b/parser/testdata/02494_query_cache_totals_extremes/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02494_query_cache_totals_extremes/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02494_query_cache_totals_extremes/metadata.json b/parser/testdata/02494_query_cache_totals_extremes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02494_query_cache_totals_extremes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02494_query_cache_totals_extremes/query.sql b/parser/testdata/02494_query_cache_totals_extremes/query.sql new file mode 100644 index 000000000..8fde4a668 --- /dev/null +++ b/parser/testdata/02494_query_cache_totals_extremes/query.sql @@ -0,0 +1,46 @@ +-- Tags: no-parallel +-- Tag no-parallel: Messes with internal cache + +SYSTEM DROP QUERY CACHE; +DROP TABLE IF EXISTS tbl; + +CREATE TABLE tbl (key UInt64, agg UInt64) ENGINE = MergeTree ORDER BY key; +INSERT INTO tbl VALUES (1, 3), (2, 2), (1, 4), (1, 1); + +-- A query with totals calculation. The result should be written into / read from the query cache. +-- Check that both queries produce the same result and that a query cache entry exists. +SELECT '1st run:'; +SELECT key, sum(agg) FROM tbl GROUP BY key WITH totals ORDER BY key SETTINGS use_query_cache = 1; +SELECT '2nd run:'; +SELECT key, sum(agg) FROM tbl GROUP BY key WITH totals ORDER BY key SETTINGS use_query_cache = 1; + +SELECT count(*) FROM system.query_cache; + +SELECT '---'; + +SYSTEM DROP QUERY CACHE; + +-- A query with extremes calculation. The result should be written into / read from the query cache. +-- Check that both queries produce the same result. +SELECT '1st run:'; +SELECT key, sum(agg) FROM tbl GROUP BY key ORDER BY key SETTINGS use_query_cache = 1, extremes = 1; +SELECT '2nd run:'; +SELECT key, sum(agg) FROM tbl GROUP BY key ORDER BY key SETTINGS use_query_cache = 1, extremes = 1; + +SELECT count(*) FROM system.query_cache; + +SELECT '---'; + +SYSTEM DROP QUERY CACHE; + +-- A query with totals and extremes calculation. The result should be written into / read from the query cache. +-- Check that both queries produce the same result. +SELECT '1st run:'; +SELECT key, sum(agg) FROM tbl GROUP BY key WITH totals ORDER BY key SETTINGS use_query_cache = 1, extremes = 1; +SELECT '2nd run:'; +SELECT key, sum(agg) FROM tbl GROUP BY key WITH totals ORDER BY key SETTINGS use_query_cache = 1, extremes = 1; + +SELECT count(*) FROM system.query_cache; +DROP TABLE IF EXISTS tbl; + +SYSTEM DROP QUERY CACHE; diff --git a/parser/testdata/02494_query_cache_ttl_long/ast.json b/parser/testdata/02494_query_cache_ttl_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02494_query_cache_ttl_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02494_query_cache_ttl_long/metadata.json b/parser/testdata/02494_query_cache_ttl_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02494_query_cache_ttl_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02494_query_cache_ttl_long/query.sql b/parser/testdata/02494_query_cache_ttl_long/query.sql new file mode 100644 index 000000000..acaf34ee8 --- /dev/null +++ b/parser/testdata/02494_query_cache_ttl_long/query.sql @@ -0,0 +1,29 @@ +-- Tags: no-fasttest, no-parallel, long +-- Tag no-fasttest: Test runtime is > 6 sec +-- Tag long: Test runtime is > 6 sec +-- Tag no-parallel: Messes with internal cache + +SYSTEM DROP QUERY CACHE; + +-- Cache query result into query cache with a TTL of 3 sec +SELECT 1 SETTINGS use_query_cache = true, query_cache_ttl = 3; + +-- Expect one non-stale cache entry +SELECT COUNT(*) FROM system.query_cache; +SELECT stale FROM system.query_cache; + +-- Wait until entry is expired +SELECT sleep(3); +SELECT sleep(3); +SELECT stale FROM system.query_cache; + +SELECT '---'; + +-- Run same query as before +SELECT 1 SETTINGS use_query_cache = true, query_cache_ttl = 3; + +-- The entry should have been refreshed (non-stale) +SELECT COUNT(*) FROM system.query_cache; +SELECT stale FROM system.query_cache; + +SYSTEM DROP QUERY CACHE; diff --git a/parser/testdata/02494_query_cache_udf_sql/ast.json b/parser/testdata/02494_query_cache_udf_sql/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02494_query_cache_udf_sql/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02494_query_cache_udf_sql/metadata.json b/parser/testdata/02494_query_cache_udf_sql/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02494_query_cache_udf_sql/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02494_query_cache_udf_sql/query.sql b/parser/testdata/02494_query_cache_udf_sql/query.sql new file mode 100644 index 000000000..18870f0e8 --- /dev/null +++ b/parser/testdata/02494_query_cache_udf_sql/query.sql @@ -0,0 +1,27 @@ +-- Tags: no-parallel +-- Tag no-parallel: Messes with internal cache + +-- Test for issue #77553: SQL-defined UDFs may be non-deterministic. The query cache should treat them as such, i.e. reject them. +-- Also see test_executable_function_query_cache in tests/integration/test_executable_user_defined_function + +SYSTEM DROP QUERY CACHE; +DROP FUNCTION IF EXISTS udf; + +CREATE FUNCTION udf AS (a) -> a + 1; + +SELECT '-- query_cache_nondeterministic_function_handling = throw'; +SELECT udf(1) FORMAT Null SETTINGS use_query_cache = true, query_cache_nondeterministic_function_handling = 'throw'; -- { serverError QUERY_CACHE_USED_WITH_NONDETERMINISTIC_FUNCTIONS } +SELECT count(*) FROM system.query_cache; +SYSTEM DROP QUERY CACHE; + +SELECT '-- query_cache_nondeterministic_function_handling = save'; +SELECT udf(1) FORMAT Null SETTINGS use_query_cache = true, query_cache_nondeterministic_function_handling = 'save'; +SELECT count(*) FROM system.query_cache; +SYSTEM DROP QUERY CACHE; + +SELECT '-- query_cache_nondeterministic_function_handling = ignore'; +SELECT udf(1) FORMAT Null SETTINGS use_query_cache = true, query_cache_nondeterministic_function_handling = 'ignore'; +SELECT count(*) FROM system.query_cache; +SYSTEM DROP QUERY CACHE; + +DROP FUNCTION udf; diff --git a/parser/testdata/02494_query_cache_user_quotas/ast.json b/parser/testdata/02494_query_cache_user_quotas/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02494_query_cache_user_quotas/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02494_query_cache_user_quotas/metadata.json b/parser/testdata/02494_query_cache_user_quotas/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02494_query_cache_user_quotas/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02494_query_cache_user_quotas/query.sql b/parser/testdata/02494_query_cache_user_quotas/query.sql new file mode 100644 index 000000000..123c9d211 --- /dev/null +++ b/parser/testdata/02494_query_cache_user_quotas/query.sql @@ -0,0 +1,29 @@ +-- Tags: no-parallel +-- Tag no-parallel: Messes with internal cache + +-- Tests per-user quotas of the query cache. Settings 'query_cache_max_size_in_bytes' and 'query_cache_max_entries' are actually supposed to +-- be used in a settings profile, together with a readonly constraint. For simplicity, test both settings stand-alone in a stateless test +-- instead of an integration test - the relevant logic will still be covered by that. + +SYSTEM DROP QUERY CACHE; + +SET query_cache_max_size_in_bytes = 1; +SELECT 'Run SELECT with quota that current user may use only 1 byte in the query cache', 1 SETTINGS use_query_cache = true; +SELECT 'Expect no entries in the query cache', count(*) FROM system.query_cache; + +SET query_cache_max_size_in_bytes = DEFAULT; +SELECT 'Run SELECT again but w/o quota', 1 SETTINGS use_query_cache = true; +SELECT 'Expect one entry in the query cache', count(*) FROM system.query_cache; + +SELECT '---'; +SYSTEM DROP QUERY CACHE; + +SELECT 'Run SELECT which writes its result in the query cache', 1 SETTINGS use_query_cache = true; +SET query_cache_max_entries = 1; +SELECT 'Run another SELECT with quota that current user may write only 1 entry in the query cache', 1 SETTINGS use_query_cache = true; +SELECT 'Expect one entry in the query cache', count(*) FROM system.query_cache; +SET query_cache_max_entries = DEFAULT; +SELECT 'Run another SELECT w/o quota', 1 SETTINGS use_query_cache = true; +SELECT 'Expect two entries in the query cache', count(*) FROM system.query_cache; + +SYSTEM DROP QUERY CACHE; diff --git a/parser/testdata/02494_query_cache_user_quotas_after_drop/ast.json b/parser/testdata/02494_query_cache_user_quotas_after_drop/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02494_query_cache_user_quotas_after_drop/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02494_query_cache_user_quotas_after_drop/metadata.json b/parser/testdata/02494_query_cache_user_quotas_after_drop/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02494_query_cache_user_quotas_after_drop/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02494_query_cache_user_quotas_after_drop/query.sql b/parser/testdata/02494_query_cache_user_quotas_after_drop/query.sql new file mode 100644 index 000000000..f09e43ee0 --- /dev/null +++ b/parser/testdata/02494_query_cache_user_quotas_after_drop/query.sql @@ -0,0 +1,41 @@ +-- Tags: no-parallel +-- Tag no-parallel: Messes with internal cache + +-- Tests per-user quotas of the query cache. Settings 'query_cache_max_size_in_bytes' and 'query_cache_max_entries' are actually supposed to +-- be used in a settings profile, together with a readonly constraint. For simplicity, test both settings stand-alone in a stateless test +-- instead of an integration test - the relevant logic will still be covered by that. + +SYSTEM DROP QUERY CACHE; + +-- Run SELECT with quota that current user may write only 1 entry in the query cache +SET query_cache_max_entries = 1; +SELECT 'a' SETTINGS use_query_cache = true; +SELECT 'b' SETTINGS use_query_cache = true; +SELECT count(*) FROM system.query_cache; -- expect 1 entry + +-- Run SELECTs again but w/o quota +SET query_cache_max_entries = DEFAULT; +SELECT 'c' SETTINGS use_query_cache = true; +SELECT 'd' SETTINGS use_query_cache = true; +SELECT count(*) FROM system.query_cache; -- expect 3 entries + +SYSTEM DROP QUERY CACHE; + +-- Run the same as above after a DROP QUERY CACHE. +SELECT '--'; + +SET query_cache_max_entries = 1; +SELECT 'a' SETTINGS use_query_cache = true; +SELECT 'b' SETTINGS use_query_cache = true; +SELECT count(*) FROM system.query_cache; -- expect 1 entry + +-- Run SELECTs again but w/o quota +SET query_cache_max_entries = DEFAULT; +SELECT 'c' SETTINGS use_query_cache = true; +SELECT 'd' SETTINGS use_query_cache = true; +SELECT count(*) FROM system.query_cache; -- expect 3 entries + +SYSTEM DROP QUERY CACHE; + +-- SELECT '---'; + diff --git a/parser/testdata/02495_analyzer_storage_join/ast.json b/parser/testdata/02495_analyzer_storage_join/ast.json new file mode 100644 index 000000000..106739180 --- /dev/null +++ b/parser/testdata/02495_analyzer_storage_join/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001509217, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/02495_analyzer_storage_join/metadata.json b/parser/testdata/02495_analyzer_storage_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02495_analyzer_storage_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02495_analyzer_storage_join/query.sql b/parser/testdata/02495_analyzer_storage_join/query.sql new file mode 100644 index 000000000..e76bd2dd2 --- /dev/null +++ b/parser/testdata/02495_analyzer_storage_join/query.sql @@ -0,0 +1,92 @@ +DROP TABLE IF EXISTS t; +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS tj; + +SET enable_analyzer = 1; +SET single_join_prefer_left_table = 0; + +CREATE TABLE tj (key2 UInt64, key1 Int64, a UInt64, b UInt64, x UInt64, y UInt64) ENGINE = Join(ALL, RIGHT, key1, key2); +INSERT INTO tj VALUES (2, -2, 20, 200, 2000, 20000), (3, -3, 30, 300, 3000, 30000), (4, -4, 40, 400, 4000, 40000), (5, -5, 50, 500, 5000, 50000), (6, -6, 60, 600, 6000, 60000); + +SELECT '--- no name clashes ---'; + +CREATE TABLE t1 (id2 UInt64, id1 Int64, val UInt64) ENGINE = Memory; +INSERT INTO t1 VALUES (1, -1, 11), (2, -2, 22), (3, -3, 33), (4, -4, 44), (5, -5, 55); + +SELECT * FROM t1 ALL RIGHT JOIN tj ON t1.id1 == tj.key1 AND t1.id2 == tj.key2 ORDER BY key1 FORMAT TSVWithNames; +SELECT id1, val, key1, b, x FROM t1 ALL RIGHT JOIN tj ON t1.id1 == tj.key1 AND t1.id2 == tj.key2 ORDER BY key1 FORMAT TSVWithNames; +SELECT t1.id1, t1.val, tj.key1, tj.b, tj.x FROM t1 ALL RIGHT JOIN tj ON t1.id1 == tj.key1 AND t1.id2 == tj.key2 ORDER BY key1 FORMAT TSVWithNames; +SELECT val, b, x FROM t1 ALL RIGHT JOIN tj ON t1.id1 == tj.key1 AND t1.id2 == tj.key2 ORDER BY key1 FORMAT TSVWithNames; +SELECT val FROM t1 ALL RIGHT JOIN tj ON t1.id1 == tj.key1 AND t1.id2 == tj.key2 ORDER BY key1 FORMAT TSVWithNames; +SELECT x FROM t1 ALL RIGHT JOIN tj ON t1.id1 == tj.key1 AND t1.id2 == tj.key2 ORDER BY key1 FORMAT TSVWithNames; + +SELECT '--- name clashes ---'; + +CREATE TABLE t (key2 UInt64, key1 Int64, b UInt64, x UInt64, val UInt64) ENGINE = Memory; +INSERT INTO t VALUES (1, -1, 11, 111, 1111), (2, -2, 22, 222, 2222), (3, -3, 33, 333, 2222), (4, -4, 44, 444, 4444), (5, -5, 55, 555, 5555); + +SELECT '-- using --'; + +SELECT * FROM t ALL RIGHT JOIN tj USING (key1, key2) ORDER BY key1 FORMAT TSVWithNames; +SELECT key1 FROM t ALL RIGHT JOIN tj USING (key1, key2) ORDER BY key1 FORMAT TSVWithNames; +SELECT t.key1, tj.key1 FROM t ALL RIGHT JOIN tj USING (key1, key2) ORDER BY key1 FORMAT TSVWithNames; +SELECT t.key2, tj.key2 FROM t ALL RIGHT JOIN tj USING (key1, key2) ORDER BY key1 FORMAT TSVWithNames; +SELECT t.b, tj.b FROM t ALL RIGHT JOIN tj USING (key1, key2) ORDER BY key1 FORMAT TSVWithNames; +SELECT t.x, tj.b FROM t ALL RIGHT JOIN tj USING (key1, key2) ORDER BY key1 FORMAT TSVWithNames; +SELECT tj.a FROM t ALL RIGHT JOIN tj USING (key1, key2) ORDER BY key1 FORMAT TSVWithNames; +SELECT tj.b FROM t ALL RIGHT JOIN tj USING (key1, key2) ORDER BY key1 FORMAT TSVWithNames; +SELECT tj.x FROM t ALL RIGHT JOIN tj USING (key1, key2) ORDER BY key1 FORMAT TSVWithNames; +SELECT tj.y FROM t ALL RIGHT JOIN tj USING (key1, key2) ORDER BY key1 FORMAT TSVWithNames; +SELECT a FROM t ALL RIGHT JOIN tj USING (key1, key2) ORDER BY key1 FORMAT TSVWithNames; +SELECT b FROM t ALL RIGHT JOIN tj USING (key1, key2) ORDER BY key1 FORMAT TSVWithNames; -- { serverError AMBIGUOUS_IDENTIFIER } +SELECT x FROM t ALL RIGHT JOIN tj USING (key1, key2) ORDER BY key1 FORMAT TSVWithNames; -- { serverError AMBIGUOUS_IDENTIFIER } +SELECT y FROM t ALL RIGHT JOIN tj USING (key1, key2) ORDER BY key1 FORMAT TSVWithNames; +SELECT t.val FROM t ALL RIGHT JOIN tj USING (key1, key2) ORDER BY key1 FORMAT TSVWithNames; +SELECT val FROM t ALL RIGHT JOIN tj USING (key1, key2) ORDER BY key1 FORMAT TSVWithNames; + +SELECT '-- on --'; + +SELECT * FROM t ALL RIGHT JOIN tj ON t.key1 == tj.key1 AND t.key2 == tj.key2 ORDER BY t.key1 FORMAT TSVWithNames; +SELECT key1 FROM t ALL RIGHT JOIN tj ON t.key1 == tj.key1 AND t.key2 == tj.key2 ORDER BY t.key1 FORMAT TSVWithNames; -- { serverError AMBIGUOUS_IDENTIFIER } +SELECT t.key1, tj.key1 FROM t ALL RIGHT JOIN tj ON t.key1 == tj.key1 AND t.key2 == tj.key2 ORDER BY t.key1 FORMAT TSVWithNames; +SELECT t.key2, tj.key2 FROM t ALL RIGHT JOIN tj ON t.key1 == tj.key1 AND t.key2 == tj.key2 ORDER BY t.key1 FORMAT TSVWithNames; +SELECT t.b, tj.b FROM t ALL RIGHT JOIN tj ON t.key1 == tj.key1 AND t.key2 == tj.key2 ORDER BY t.key1 FORMAT TSVWithNames; +SELECT t.x, tj.b FROM t ALL RIGHT JOIN tj ON t.key1 == tj.key1 AND t.key2 == tj.key2 ORDER BY t.key1 FORMAT TSVWithNames; +SELECT tj.a FROM t ALL RIGHT JOIN tj ON t.key1 == tj.key1 AND t.key2 == tj.key2 ORDER BY t.key1 FORMAT TSVWithNames; +SELECT tj.b FROM t ALL RIGHT JOIN tj ON t.key1 == tj.key1 AND t.key2 == tj.key2 ORDER BY t.key1 FORMAT TSVWithNames; +SELECT tj.x FROM t ALL RIGHT JOIN tj ON t.key1 == tj.key1 AND t.key2 == tj.key2 ORDER BY t.key1 FORMAT TSVWithNames; +SELECT tj.y FROM t ALL RIGHT JOIN tj ON t.key1 == tj.key1 AND t.key2 == tj.key2 ORDER BY t.key1 FORMAT TSVWithNames; +SELECT a FROM t ALL RIGHT JOIN tj ON t.key1 == tj.key1 AND t.key2 == tj.key2 ORDER BY t.key1 FORMAT TSVWithNames; +SELECT b FROM t ALL RIGHT JOIN tj ON t.key1 == tj.key1 AND t.key2 == tj.key2 ORDER BY t.key1 FORMAT TSVWithNames; -- { serverError AMBIGUOUS_IDENTIFIER } +SELECT x FROM t ALL RIGHT JOIN tj ON t.key1 == tj.key1 AND t.key2 == tj.key2 ORDER BY t.key1 FORMAT TSVWithNames; -- { serverError AMBIGUOUS_IDENTIFIER } +SELECT y FROM t ALL RIGHT JOIN tj ON t.key1 == tj.key1 AND t.key2 == tj.key2 ORDER BY t.key1 FORMAT TSVWithNames; +SELECT t.val FROM t ALL RIGHT JOIN tj ON t.key1 == tj.key1 AND t.key2 == tj.key2 ORDER BY t.key1 FORMAT TSVWithNames; +SELECT val FROM t ALL RIGHT JOIN tj ON t.key1 == tj.key1 AND t.key2 == tj.key2 ORDER BY t.key1 FORMAT TSVWithNames; + +SELECT '--- unsupported and illegal conditions ---'; + +SELECT * FROM t ALL RIGHT JOIN tj ON t.key1 == tj.key1 AND t.key2 == tj.key2 + 1 FORMAT TSVWithNames; -- { serverError INCOMPATIBLE_TYPE_OF_JOIN } +SELECT * FROM t ALL RIGHT JOIN tj ON t.key1 + 1 == tj.key1 AND toUInt64(t.key2 - 1) == tj.key2 ORDER BY t.key1, tj.key2 FORMAT TSVWithNames; -- Ok: expression on the left table + +SELECT * FROM t ALL RIGHT JOIN tj ON t.key1 == tj.key1 AND t.key2 == tj.key2 AND 1 == 1 ORDER BY ALL SETTINGS query_plan_use_new_logical_join_step = 0 FORMAT TSVWithNames; -- { serverError INCOMPATIBLE_TYPE_OF_JOIN } +SELECT * FROM t ALL RIGHT JOIN tj ON t.key1 == tj.key1 AND t.key2 == tj.key2 AND 1 == 1 ORDER BY ALL SETTINGS query_plan_use_new_logical_join_step = 1, enable_parallel_replicas=0 FORMAT TSVWithNames; +SELECT * FROM t ALL RIGHT JOIN tj ON t.key1 == tj.key1 AND t.key2 == tj.key2 AND 1 == 2 FORMAT TSVWithNames; -- { serverError INCOMPATIBLE_TYPE_OF_JOIN } + +SELECT * FROM t ALL RIGHT JOIN tj ON t.key1 == tj.key1 AND t.key2 == tj.key2 AND tj.a == 20 FORMAT TSVWithNames; -- { serverError INCOMPATIBLE_TYPE_OF_JOIN } +SELECT * FROM t ALL RIGHT JOIN tj ON t.key1 == tj.key1 AND t.key2 == tj.key2 AND t.b == 22 ORDER BY t.key1, tj.key2 FORMAT TSVWithNames; -- Ok: t.b from the left table + +SELECT * FROM t ALL RIGHT JOIN tj ON t.key1 == tj.key1 AND t.key2 == tj.key2 AND 1 != 1 FORMAT TSVWithNames; -- { serverError INCOMPATIBLE_TYPE_OF_JOIN } +SELECT * FROM t ALL RIGHT JOIN tj ON t.key1 == tj.key1 AND t.key2 == tj.key2 AND NULL ORDER BY ALL SETTINGS query_plan_use_new_logical_join_step = 0 FORMAT TSVWithNames; -- { serverError INCOMPATIBLE_TYPE_OF_JOIN } +SELECT * FROM t ALL RIGHT JOIN tj ON t.key1 == tj.key1 AND t.key2 == tj.key2 AND NULL ORDER BY ALL SETTINGS query_plan_use_new_logical_join_step = 1, enable_parallel_replicas=0 FORMAT TSVWithNames; + +SELECT * FROM t ALL RIGHT JOIN tj ON t.key1 == tj.key1 AND t.key2 == tj.key2 AND 'aaa' FORMAT TSVWithNames; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT * FROM t ALL RIGHT JOIN tj ON 'aaa' FORMAT TSVWithNames; -- { serverError INVALID_JOIN_ON_EXPRESSION } + +SELECT * FROM t ALL RIGHT JOIN tj ON t.key1 == tj.key1 AND t.key2 == tj.key2 AND 1 ORDER BY ALL SETTINGS query_plan_use_new_logical_join_step = 0 FORMAT TSVWithNames; -- { serverError INCOMPATIBLE_TYPE_OF_JOIN } +SELECT * FROM t ALL RIGHT JOIN tj ON t.key1 == tj.key1 AND t.key2 == tj.key2 AND 1 ORDER BY ALL SETTINGS query_plan_use_new_logical_join_step = 1, enable_parallel_replicas=0 FORMAT TSVWithNames; +SELECT * FROM t ALL RIGHT JOIN tj ON 0 FORMAT TSVWithNames; -- { serverError INCOMPATIBLE_TYPE_OF_JOIN } +SELECT * FROM t ALL RIGHT JOIN tj ON 1 FORMAT TSVWithNames; -- { serverError INCOMPATIBLE_TYPE_OF_JOIN } + +DROP TABLE IF EXISTS t; +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS tj; diff --git a/parser/testdata/02495_concat_with_separator/ast.json b/parser/testdata/02495_concat_with_separator/ast.json new file mode 100644 index 000000000..c54fb7f57 --- /dev/null +++ b/parser/testdata/02495_concat_with_separator/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00153881, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02495_concat_with_separator/metadata.json b/parser/testdata/02495_concat_with_separator/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02495_concat_with_separator/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02495_concat_with_separator/query.sql b/parser/testdata/02495_concat_with_separator/query.sql new file mode 100644 index 000000000..7167d48a1 --- /dev/null +++ b/parser/testdata/02495_concat_with_separator/query.sql @@ -0,0 +1,72 @@ +SET allow_suspicious_low_cardinality_types=1; + +-- negative tests +SELECT concatWithSeparator(materialize('|'), 'a', 'b'); -- { serverError ILLEGAL_COLUMN } +SELECT concatWithSeparator(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +-- special cases +SELECT concatWithSeparator('|') = ''; +SELECT concatWithSeparator('|', 'a') == 'a'; + +SELECT concatWithSeparator('|', 'a', 'b') == 'a|b'; +SELECT concatWithSeparator('|', 'a', materialize('b')) == 'a|b'; +SELECT concatWithSeparator('|', materialize('a'), 'b') == 'a|b'; +SELECT concatWithSeparator('|', materialize('a'), materialize('b')) == 'a|b'; + +SELECT concatWithSeparator('|', 'a', toFixedString('b', 1)) == 'a|b'; +SELECT concatWithSeparator('|', 'a', materialize(toFixedString('b', 1))) == 'a|b'; +SELECT concatWithSeparator('|', materialize('a'), toFixedString('b', 1)) == 'a|b'; +SELECT concatWithSeparator('|', materialize('a'), materialize(toFixedString('b', 1))) == 'a|b'; + +SELECT concatWithSeparator('|', toFixedString('a', 1), 'b') == 'a|b'; +SELECT concatWithSeparator('|', toFixedString('a', 1), materialize('b')) == 'a|b'; +SELECT concatWithSeparator('|', materialize(toFixedString('a', 1)), 'b') == 'a|b'; +SELECT concatWithSeparator('|', materialize(toFixedString('a', 1)), materialize('b')) == 'a|b'; + +SELECT concatWithSeparator('|', toFixedString('a', 1), toFixedString('b', 1)) == 'a|b'; +SELECT concatWithSeparator('|', toFixedString('a', 1), materialize(toFixedString('b', 1))) == 'a|b'; +SELECT concatWithSeparator('|', materialize(toFixedString('a', 1)), toFixedString('b', 1)) == 'a|b'; +SELECT concatWithSeparator('|', materialize(toFixedString('a', 1)), materialize(toFixedString('b', 1))) == 'a|b'; + +SELECT concatWithSeparator(null, 'a', 'b') == null; +SELECT concatWithSeparator('1', null, 'b') == null; +SELECT concatWithSeparator('1', 'a', null) == null; + +-- Const String + non-const non-String/non-FixedString type' +SELECT concatWithSeparator('|', 'a', materialize(42 :: Int8)) == 'a|42'; +SELECT concatWithSeparator('|', 'a', materialize(43 :: Int16)) == 'a|43'; +SELECT concatWithSeparator('|', 'a', materialize(44 :: Int32)) == 'a|44'; +SELECT concatWithSeparator('|', 'a', materialize(45 :: Int64)) == 'a|45'; +SELECT concatWithSeparator('|', 'a', materialize(46 :: Int128)) == 'a|46'; +SELECT concatWithSeparator('|', 'a', materialize(47 :: Int256)) == 'a|47'; +SELECT concatWithSeparator('|', 'a', materialize(48 :: UInt8)) == 'a|48'; +SELECT concatWithSeparator('|', 'a', materialize(49 :: UInt16)) == 'a|49'; +SELECT concatWithSeparator('|', 'a', materialize(50 :: UInt32)) == 'a|50'; +SELECT concatWithSeparator('|', 'a', materialize(51 :: UInt64)) == 'a|51'; +SELECT concatWithSeparator('|', 'a', materialize(52 :: UInt128)) == 'a|52'; +SELECT concatWithSeparator('|', 'a', materialize(53 :: UInt256)) == 'a|53'; +SELECT concatWithSeparator('|', 'a', materialize(42.42 :: Float32)) == 'a|42.42'; +SELECT concatWithSeparator('|', 'a', materialize(43.43 :: Float64)) == 'a|43.43'; +SELECT concatWithSeparator('|', 'a', materialize(44.44 :: Decimal(2))) == 'a|44'; +SELECT concatWithSeparator('|', 'a', materialize(true :: Bool)) == 'a|true'; +SELECT concatWithSeparator('|', 'a', materialize(false :: Bool)) == 'a|false'; +SELECT concatWithSeparator('|', 'a', materialize('foo' :: String)) == 'a|foo'; +SELECT concatWithSeparator('|', 'a', materialize('bar' :: FixedString(3))) == 'a|bar'; +SELECT concatWithSeparator('|', 'a', materialize('foo' :: Nullable(String))) == 'a|foo'; +SELECT concatWithSeparator('|', 'a', materialize('bar' :: Nullable(FixedString(3)))) == 'a|bar'; +SELECT concatWithSeparator('|', 'a', materialize('foo' :: LowCardinality(String))) == 'a|foo'; +SELECT concatWithSeparator('|', 'a', materialize('bar' :: LowCardinality(FixedString(3)))) == 'a|bar'; +SELECT concatWithSeparator('|', 'a', materialize('foo' :: LowCardinality(Nullable(String)))) == 'a|foo'; +SELECT concatWithSeparator('|', 'a', materialize('bar' :: LowCardinality(Nullable(FixedString(3))))) == 'a|bar'; +SELECT concatWithSeparator('|', 'a', materialize(42 :: LowCardinality(Nullable(UInt32)))) == 'a|42'; +SELECT concatWithSeparator('|', 'a', materialize(42 :: LowCardinality(UInt32))) == 'a|42'; +SELECT concatWithSeparator('|', 'a', materialize('fae310ca-d52a-4923-9e9b-02bf67f4b009' :: UUID)) == 'a|fae310ca-d52a-4923-9e9b-02bf67f4b009'; +SELECT concatWithSeparator('|', 'a', materialize('2023-11-14' :: Date)) == 'a|2023-11-14'; +SELECT concatWithSeparator('|', 'a', materialize('2123-11-14' :: Date32)) == 'a|2123-11-14'; +SELECT concatWithSeparator('|', 'a', materialize('2023-11-14 05:50:12' :: DateTime('Europe/Amsterdam'))) == 'a|2023-11-14 05:50:12'; +SELECT concatWithSeparator('|', 'a', materialize('hallo' :: Enum('hallo' = 1))) == 'a|hallo'; +SELECT concatWithSeparator('|', 'a', materialize(['foo', 'bar'] :: Array(String))) == 'a|[\'foo\',\'bar\']'; +SELECT concatWithSeparator('|', 'a', materialize((42, 'foo') :: Tuple(Int32, String))) == 'a|(42,\'foo\')'; +SELECT concatWithSeparator('|', 'a', materialize(map(42, 'foo') :: Map(Int32, String))) == 'a|{42:\'foo\'}'; +SELECT concatWithSeparator('|', 'a', materialize('122.233.64.201' :: IPv4)) == 'a|122.233.64.201'; +SELECT concatWithSeparator('|', 'a', materialize('2001:0001:130F:0002:0003:09C0:876A:130B' :: IPv6)) == 'a|2001:0001:130F:0002:0003:09C0:876A:130B'; diff --git a/parser/testdata/02495_s3_filter_by_file/ast.json b/parser/testdata/02495_s3_filter_by_file/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02495_s3_filter_by_file/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02495_s3_filter_by_file/metadata.json b/parser/testdata/02495_s3_filter_by_file/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02495_s3_filter_by_file/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02495_s3_filter_by_file/query.sql b/parser/testdata/02495_s3_filter_by_file/query.sql new file mode 100644 index 000000000..8d6d8a8a5 --- /dev/null +++ b/parser/testdata/02495_s3_filter_by_file/query.sql @@ -0,0 +1,22 @@ +-- Tags: no-parallel, no-fasttest + +DROP TABLE IF EXISTS t_s3_filter_02495; + +CREATE TABLE t_s3_filter_02495 (a UInt64) +ENGINE = S3(s3_conn, filename = 'test_02495_{_partition_id}', format = Parquet) +PARTITION BY a; + +INSERT INTO t_s3_filter_02495 SELECT number FROM numbers(10) SETTINGS s3_truncate_on_insert=1; + +SET max_rows_to_read = 5; + +WITH splitByChar('_', _file)[3]::UInt64 AS num +SELECT count(), min(num), max(num) +FROM s3(s3_conn, filename = 'test_02495_*', format = Parquet) +WHERE num >= 5; + +SELECT *, _file +FROM s3(s3_conn, filename = 'test_02495_1', format = Parquet) +WHERE _file = 'test_02495_1'; + +DROP TABLE t_s3_filter_02495; diff --git a/parser/testdata/02495_sum_if_to_count_if_bug/ast.json b/parser/testdata/02495_sum_if_to_count_if_bug/ast.json new file mode 100644 index 000000000..c774bbc27 --- /dev/null +++ b/parser/testdata/02495_sum_if_to_count_if_bug/ast.json @@ -0,0 +1,94 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function sum (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function if (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1024" + }, + { + "explain": " Set" + } + ], + + "rows": 24, + + "statistics": + { + "elapsed": 0.001745267, + "rows_read": 24, + "bytes_read": 922 + } +} diff --git a/parser/testdata/02495_sum_if_to_count_if_bug/metadata.json b/parser/testdata/02495_sum_if_to_count_if_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02495_sum_if_to_count_if_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02495_sum_if_to_count_if_bug/query.sql b/parser/testdata/02495_sum_if_to_count_if_bug/query.sql new file mode 100644 index 000000000..c00c0ba4e --- /dev/null +++ b/parser/testdata/02495_sum_if_to_count_if_bug/query.sql @@ -0,0 +1,3 @@ +select sum(if((number % NULL) = 2, 0, 1)) FROM numbers(1024) settings optimize_rewrite_sum_if_to_count_if=0; +select sum(if((number % NULL) = 2, 0, 1)) FROM numbers(1024) settings optimize_rewrite_sum_if_to_count_if=1, enable_analyzer=0; +select sum(if((number % NULL) = 2, 0, 1)) FROM numbers(1024) settings optimize_rewrite_sum_if_to_count_if=1, enable_analyzer=1; diff --git a/parser/testdata/02496_format_datetime_in_joda_syntax/ast.json b/parser/testdata/02496_format_datetime_in_joda_syntax/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02496_format_datetime_in_joda_syntax/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02496_format_datetime_in_joda_syntax/metadata.json b/parser/testdata/02496_format_datetime_in_joda_syntax/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02496_format_datetime_in_joda_syntax/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02496_format_datetime_in_joda_syntax/query.sql b/parser/testdata/02496_format_datetime_in_joda_syntax/query.sql new file mode 100644 index 000000000..b2b29cc55 --- /dev/null +++ b/parser/testdata/02496_format_datetime_in_joda_syntax/query.sql @@ -0,0 +1,99 @@ +-- { echoOn } +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'G'), formatDateTimeInJodaSyntax(datetime64, 'G'), formatDateTimeInJodaSyntax(date, 'G'), formatDateTimeInJodaSyntax(date32, 'G'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'GG'), formatDateTimeInJodaSyntax(datetime64, 'GG'), formatDateTimeInJodaSyntax(date, 'GG'), formatDateTimeInJodaSyntax(date32, 'GG'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'GGG'), formatDateTimeInJodaSyntax(datetime64, 'GGG'), formatDateTimeInJodaSyntax(date, 'GGG'), formatDateTimeInJodaSyntax(date32, 'GGG'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'GGGG'), formatDateTimeInJodaSyntax(datetime64, 'GGGG'), formatDateTimeInJodaSyntax(date, 'GGGG'), formatDateTimeInJodaSyntax(date32, 'GGGG'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'GGGGG'), formatDateTimeInJodaSyntax(datetime64, 'GGGGG'), formatDateTimeInJodaSyntax(date, 'GGGGG'), formatDateTimeInJodaSyntax(date32, 'GGGGG'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'C'), formatDateTimeInJodaSyntax(datetime64, 'C'), formatDateTimeInJodaSyntax(date, 'C'), formatDateTimeInJodaSyntax(date32, 'C'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'CC'), formatDateTimeInJodaSyntax(datetime64, 'CC'), formatDateTimeInJodaSyntax(date, 'CC'), formatDateTimeInJodaSyntax(date32, 'CC'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'CCC'), formatDateTimeInJodaSyntax(datetime64, 'CCC'), formatDateTimeInJodaSyntax(date, 'CCC'), formatDateTimeInJodaSyntax(date32, 'CCC'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'Y'), formatDateTimeInJodaSyntax(datetime64, 'Y'), formatDateTimeInJodaSyntax(date, 'Y'), formatDateTimeInJodaSyntax(date32, 'Y'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'YY'), formatDateTimeInJodaSyntax(datetime64, 'YY'), formatDateTimeInJodaSyntax(date, 'YY'), formatDateTimeInJodaSyntax(date32, 'YY'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'YYY'), formatDateTimeInJodaSyntax(datetime64, 'YYY'), formatDateTimeInJodaSyntax(date, 'YYY'), formatDateTimeInJodaSyntax(date32, 'YYY'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'YYYY'), formatDateTimeInJodaSyntax(datetime64, 'YYYY'), formatDateTimeInJodaSyntax(date, 'YYYY'), formatDateTimeInJodaSyntax(date32, 'YYYY'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'YYYYY'), formatDateTimeInJodaSyntax(datetime64, 'YYYYY'), formatDateTimeInJodaSyntax(date, 'YYYYY'), formatDateTimeInJodaSyntax(date32, 'YYYYY'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'e'), formatDateTimeInJodaSyntax(datetime64, 'e'), formatDateTimeInJodaSyntax(date, 'e'), formatDateTimeInJodaSyntax(date32, 'e'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'ee'), formatDateTimeInJodaSyntax(datetime64, 'ee'), formatDateTimeInJodaSyntax(date, 'ee'), formatDateTimeInJodaSyntax(date32, 'ee'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'E'), formatDateTimeInJodaSyntax(datetime64, 'E'), formatDateTimeInJodaSyntax(date, 'E'), formatDateTimeInJodaSyntax(date32, 'E'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'EE'), formatDateTimeInJodaSyntax(datetime64, 'EE'), formatDateTimeInJodaSyntax(date, 'EE'), formatDateTimeInJodaSyntax(date32, 'EE'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'EEE'), formatDateTimeInJodaSyntax(datetime64, 'EEE'), formatDateTimeInJodaSyntax(date, 'EEE'), formatDateTimeInJodaSyntax(date32, 'EEE'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'EEEE'), formatDateTimeInJodaSyntax(datetime64, 'EEEE'), formatDateTimeInJodaSyntax(date, 'EEEE'), formatDateTimeInJodaSyntax(date32, 'EEEE'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'EEEEE'), formatDateTimeInJodaSyntax(datetime64, 'EEEEE'), formatDateTimeInJodaSyntax(date, 'EEEEE'), formatDateTimeInJodaSyntax(date32, 'EEEEE'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'y'), formatDateTimeInJodaSyntax(datetime64, 'y'), formatDateTimeInJodaSyntax(date, 'y'), formatDateTimeInJodaSyntax(date32, 'y'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'yy'), formatDateTimeInJodaSyntax(datetime64, 'yy'), formatDateTimeInJodaSyntax(date, 'yy'), formatDateTimeInJodaSyntax(date32, 'yy'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'yyy'), formatDateTimeInJodaSyntax(datetime64, 'yyy'), formatDateTimeInJodaSyntax(date, 'yyy'), formatDateTimeInJodaSyntax(date32, 'yyy'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'yyyy'), formatDateTimeInJodaSyntax(datetime64, 'yyyy'), formatDateTimeInJodaSyntax(date, 'yyyy'), formatDateTimeInJodaSyntax(date32, 'yyyy'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'yyyyy'), formatDateTimeInJodaSyntax(datetime64, 'yyyyy'), formatDateTimeInJodaSyntax(date, 'yyyyy'), formatDateTimeInJodaSyntax(date32, 'yyyyy'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'D'), formatDateTimeInJodaSyntax(datetime64, 'D'), formatDateTimeInJodaSyntax(date, 'D'), formatDateTimeInJodaSyntax(date32, 'D'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'DD'), formatDateTimeInJodaSyntax(datetime64, 'DD'), formatDateTimeInJodaSyntax(date, 'DD'), formatDateTimeInJodaSyntax(date32, 'DD'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'DDD'), formatDateTimeInJodaSyntax(datetime64, 'DDD'), formatDateTimeInJodaSyntax(date, 'DDD'), formatDateTimeInJodaSyntax(date32, 'DDD'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'M'), formatDateTimeInJodaSyntax(datetime64, 'M'), formatDateTimeInJodaSyntax(date, 'M'), formatDateTimeInJodaSyntax(date32, 'M'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'MM'), formatDateTimeInJodaSyntax(datetime64, 'MM'), formatDateTimeInJodaSyntax(date, 'MM'), formatDateTimeInJodaSyntax(date32, 'MM'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'MMM'), formatDateTimeInJodaSyntax(datetime64, 'MMM'), formatDateTimeInJodaSyntax(date, 'MMM'), formatDateTimeInJodaSyntax(date32, 'MMM'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'MMMM'), formatDateTimeInJodaSyntax(datetime64, 'MMMM'), formatDateTimeInJodaSyntax(date, 'MMMM'), formatDateTimeInJodaSyntax(date32, 'MMMM'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'd'), formatDateTimeInJodaSyntax(datetime64, 'd'), formatDateTimeInJodaSyntax(date, 'd'), formatDateTimeInJodaSyntax(date32, 'd'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'dd'), formatDateTimeInJodaSyntax(datetime64, 'dd'), formatDateTimeInJodaSyntax(date, 'dd'), formatDateTimeInJodaSyntax(date32, 'dd'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'ddd'), formatDateTimeInJodaSyntax(datetime64, 'ddd'), formatDateTimeInJodaSyntax(date, 'ddd'), formatDateTimeInJodaSyntax(date32, 'ddd'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'a'), formatDateTimeInJodaSyntax(datetime64, 'a'), formatDateTimeInJodaSyntax(date, 'a'), formatDateTimeInJodaSyntax(date32, 'a'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'aa'), formatDateTimeInJodaSyntax(datetime64, 'aa'), formatDateTimeInJodaSyntax(date, 'aa'), formatDateTimeInJodaSyntax(date32, 'aa'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'aaa'), formatDateTimeInJodaSyntax(datetime64, 'aaa'), formatDateTimeInJodaSyntax(date, 'aaa'), formatDateTimeInJodaSyntax(date32, 'aaa'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'K'), formatDateTimeInJodaSyntax(datetime64, 'K'), formatDateTimeInJodaSyntax(date, 'K'), formatDateTimeInJodaSyntax(date32, 'K'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'KK'), formatDateTimeInJodaSyntax(datetime64, 'KK'), formatDateTimeInJodaSyntax(date, 'KK'), formatDateTimeInJodaSyntax(date32, 'KK'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'KKK'), formatDateTimeInJodaSyntax(datetime64, 'KKK'), formatDateTimeInJodaSyntax(date, 'KKK'), formatDateTimeInJodaSyntax(date32, 'KKK'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'h'), formatDateTimeInJodaSyntax(datetime64, 'h'), formatDateTimeInJodaSyntax(date, 'h'), formatDateTimeInJodaSyntax(date32, 'h'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'hh'), formatDateTimeInJodaSyntax(datetime64, 'hh'), formatDateTimeInJodaSyntax(date, 'hh'), formatDateTimeInJodaSyntax(date32, 'hh'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'hhh'), formatDateTimeInJodaSyntax(datetime64, 'hhh'), formatDateTimeInJodaSyntax(date, 'hhh'), formatDateTimeInJodaSyntax(date32, 'hhh'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'H'), formatDateTimeInJodaSyntax(datetime64, 'H'), formatDateTimeInJodaSyntax(date, 'H'), formatDateTimeInJodaSyntax(date32, 'H'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'HH'), formatDateTimeInJodaSyntax(datetime64, 'HH'), formatDateTimeInJodaSyntax(date, 'HH'), formatDateTimeInJodaSyntax(date32, 'HH'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'HHH'), formatDateTimeInJodaSyntax(datetime64, 'HHH'), formatDateTimeInJodaSyntax(date, 'HHH'), formatDateTimeInJodaSyntax(date32, 'HHH'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'k'), formatDateTimeInJodaSyntax(datetime64, 'k'), formatDateTimeInJodaSyntax(date, 'k'), formatDateTimeInJodaSyntax(date32, 'k'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'kk'), formatDateTimeInJodaSyntax(datetime64, 'kk'), formatDateTimeInJodaSyntax(date, 'kk'), formatDateTimeInJodaSyntax(date32, 'kk'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'kkk'), formatDateTimeInJodaSyntax(datetime64, 'kkk'), formatDateTimeInJodaSyntax(date, 'kkk'), formatDateTimeInJodaSyntax(date32, 'kkk'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'm'), formatDateTimeInJodaSyntax(datetime64, 'm'), formatDateTimeInJodaSyntax(date, 'm'), formatDateTimeInJodaSyntax(date32, 'm'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'mm'), formatDateTimeInJodaSyntax(datetime64, 'mm'), formatDateTimeInJodaSyntax(date, 'mm'), formatDateTimeInJodaSyntax(date32, 'mm'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'mmm'), formatDateTimeInJodaSyntax(datetime64, 'mmm'), formatDateTimeInJodaSyntax(date, 'mmm'), formatDateTimeInJodaSyntax(date32, 'mmm'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 's'), formatDateTimeInJodaSyntax(datetime64, 's'), formatDateTimeInJodaSyntax(date, 's'), formatDateTimeInJodaSyntax(date32, 's'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'ss'), formatDateTimeInJodaSyntax(datetime64, 'ss'), formatDateTimeInJodaSyntax(date, 'ss'), formatDateTimeInJodaSyntax(date32, 'ss'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'sss'), formatDateTimeInJodaSyntax(datetime64, 'sss'), formatDateTimeInJodaSyntax(date, 'sss'), formatDateTimeInJodaSyntax(date32, 'sss'); + +with '2018-01-12 22:33:44' as s, toDateTime(s, 'UTC') as datetime, toDateTime64(s, 6, 'UTC') as datetime64, toDate(s) as date, toDate32(s) as date32 select formatDateTimeInJodaSyntax(datetime, 'zzzz'), formatDateTimeInJodaSyntax(datetime64, 'zzzz'); + +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'G123DDD'), formatDateTimeInJodaSyntax(datetime64, 'G123DDD'), formatDateTimeInJodaSyntax(date, 'G123DDD'), formatDateTimeInJodaSyntax(date32, 'G123DDD'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'G\'\'DDD'), formatDateTimeInJodaSyntax(datetime64, 'G\'\'DDD'), formatDateTimeInJodaSyntax(date, 'G\'\'DDD'), formatDateTimeInJodaSyntax(date32, 'G\'\'DDD'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'G\'aaa\'DDD'), formatDateTimeInJodaSyntax(datetime64, 'G\'aaa\'DDD'), formatDateTimeInJodaSyntax(date, 'G\'aaa\'DDD'), formatDateTimeInJodaSyntax(date32, 'G\'aaa\'DDD'); +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'G\'a\'\'aa\'DDD'), formatDateTimeInJodaSyntax(datetime64, 'G\'a\'\'aa\'DDD'), formatDateTimeInJodaSyntax(date, 'G\'a\'\'aa\'DDD'), formatDateTimeInJodaSyntax(date32, 'G\'a\'\'aa\'DDD'); + +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'x'), formatDateTimeInJodaSyntax(datetime64, 'x'), formatDateTimeInJodaSyntax(date, 'x'), formatDateTimeInJodaSyntax(date32, 'x'); + +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'w'), formatDateTimeInJodaSyntax(datetime64, 'w'), formatDateTimeInJodaSyntax(date, 'w'), formatDateTimeInJodaSyntax(date32, 'w'); + +with '2018-01-12 22:33:44' as s, toDateTime(s) as datetime, toDateTime64(s, 6) as datetime64, toDate(s) as date, toDate32(s) as date32 SELECT formatDateTimeInJodaSyntax(datetime, 'S'), formatDateTimeInJodaSyntax(datetime64, 'S'), formatDateTimeInJodaSyntax(date, 'S'), formatDateTimeInJodaSyntax(date32, 'S'); +with '2018-01-12 22:33:44.55' as s, toDateTime64(s, 6) as datetime64 SELECT formatDateTimeInJodaSyntax(datetime64, 'S'); +with '2018-01-12 22:33:44.55' as s, toDateTime64(s, 6) as datetime64 SELECT formatDateTimeInJodaSyntax(datetime64, 'SS'); +with '2018-01-12 22:33:44.55' as s, toDateTime64(s, 6) as datetime64 SELECT formatDateTimeInJodaSyntax(datetime64, 'SSS'); +with '2018-01-12 22:33:44.55' as s, toDateTime64(s, 6) as datetime64 SELECT formatDateTimeInJodaSyntax(datetime64, 'SSSS'); +with '2018-01-12 22:33:44.55' as s, toDateTime64(s, 6) as datetime64 SELECT formatDateTimeInJodaSyntax(datetime64, 'SSSSS'); +with '2018-01-12 22:33:44.55' as s, toDateTime64(s, 6) as datetime64 SELECT formatDateTimeInJodaSyntax(datetime64, 'SSSSSS'); +with '2018-01-12 22:33:44.55' as s, toDateTime64(s, 6) as datetime64 SELECT formatDateTimeInJodaSyntax(datetime64, 'SSSSSSS'); +with '2018-01-12 22:33:44.55' as s, toDateTime64(s, 6) as datetime64 SELECT formatDateTimeInJodaSyntax(datetime64, 'SSSSSSSS'); +with '2018-01-12 22:33:44.55' as s, toDateTime64(s, 6) as datetime64 SELECT formatDateTimeInJodaSyntax(datetime64, 'SSSSSSSSS'); +with '2018-01-12 22:33:44.55' as s, toDateTime64(s, 6) as datetime64 SELECT formatDateTimeInJodaSyntax(datetime64, 'SSSSSSSSSS'); +-- { echoOff } + +SELECT formatDateTimeInJodaSyntax(toDateTime('2018-01-12 22:33:44'), 'z'); -- { serverError NOT_IMPLEMENTED } +SELECT formatDateTimeInJodaSyntax(toDateTime('2018-01-12 22:33:44'), 'zz'); -- { serverError NOT_IMPLEMENTED } +SELECT formatDateTimeInJodaSyntax(toDateTime('2018-01-12 22:33:44'), 'zzz'); -- { serverError NOT_IMPLEMENTED } +SELECT formatDateTimeInJodaSyntax(toDateTime('2018-01-12 22:33:44'), 'Z'); -- { serverError NOT_IMPLEMENTED } +SELECT formatDateTimeInJodaSyntax(toDateTime('2018-01-12 22:33:44'), 'b'); -- { serverError NOT_IMPLEMENTED } + +SELECT formatDateTimeInJodaSyntax(toDate32('2018-01-12 22:33:44'), 'z'); -- { serverError NOT_IMPLEMENTED } +SELECT formatDateTimeInJodaSyntax(toDate32('2018-01-12 22:33:44'), 'zz'); -- { serverError NOT_IMPLEMENTED } +SELECT formatDateTimeInJodaSyntax(toDate32('2018-01-12 22:33:44'), 'zzz'); -- { serverError NOT_IMPLEMENTED } +SELECT formatDateTimeInJodaSyntax(toDate32('2018-01-12 22:33:44'), 'Z'); -- { serverError NOT_IMPLEMENTED } +SELECT formatDateTimeInJodaSyntax(toDate32('2018-01-12 22:33:44'), 'b'); -- { serverError NOT_IMPLEMENTED } + +SELECT formatDateTimeInJodaSyntax(toDate32('2018-01-12 22:33:44'), '\'aaaa\'\''); -- { serverError BAD_ARGUMENTS } + +-- Bug #64613 +select formatDateTimeInJodaSyntax(toDate('2012-05-29'), 'D'); +select formatDateTimeInJodaSyntax(toDateTime('2010-10-27 13:41:27'), 'D'); diff --git a/parser/testdata/02496_from_unixtime_in_joda_syntax/ast.json b/parser/testdata/02496_from_unixtime_in_joda_syntax/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02496_from_unixtime_in_joda_syntax/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02496_from_unixtime_in_joda_syntax/metadata.json b/parser/testdata/02496_from_unixtime_in_joda_syntax/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02496_from_unixtime_in_joda_syntax/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02496_from_unixtime_in_joda_syntax/query.sql b/parser/testdata/02496_from_unixtime_in_joda_syntax/query.sql new file mode 100644 index 000000000..969dbe4fc --- /dev/null +++ b/parser/testdata/02496_from_unixtime_in_joda_syntax/query.sql @@ -0,0 +1,72 @@ +-- { echoOn } +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'G', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'GG', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'GGG', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'GGGG', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'GGGGG', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'C', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'CC', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'CCC', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'Y', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'YY', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'YYY', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'YYYY', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'YYYYY', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'e', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'ee', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'E', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'EE', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'EEE', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'EEEE', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'EEEEE', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'y', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'yy', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'yyy', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'yyyy', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'yyyyy', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'D', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'DD', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'DDD', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'M', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'MM', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'MMM', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'MMMM', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'd', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'dd', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'ddd', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'a', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'aa', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'aaa', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'K', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'KK', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'KKK', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'h', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'hh', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'hhh', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'H', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'HH', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'HHH', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'k', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'kk', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'kkk', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'm', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'mm', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'mmm', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 's', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'ss', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'sss', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'zzzz', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'x', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'w', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'S', 'UTC'); +SELECT fromUnixTimestampInJodaSyntax(1669804872, 'SS', 'UTC'); +with '2018-01-12 22:33:44.55' as s, toDateTime64(s, 6) as datetime64 SELECT fromUnixTimestampInJodaSyntax(datetime64, 'S', 'UTC'); +with '2018-01-12 22:33:44.55' as s, toDateTime64(s, 6) as datetime64 SELECT fromUnixTimestampInJodaSyntax(datetime64, 'SS', 'UTC'); +with '2018-01-12 22:33:44.55' as s, toDateTime64(s, 6) as datetime64 SELECT fromUnixTimestampInJodaSyntax(datetime64, 'SSS', 'UTC'); +with '2018-01-12 22:33:44.55' as s, toDateTime64(s, 6) as datetime64 SELECT fromUnixTimestampInJodaSyntax(datetime64, 'SSSS', 'UTC'); +with '2018-01-12 22:33:44.55' as s, toDateTime64(s, 6) as datetime64 SELECT fromUnixTimestampInJodaSyntax(datetime64, 'SSSSS', 'UTC'); +with '2018-01-12 22:33:44.55' as s, toDateTime64(s, 6) as datetime64 SELECT fromUnixTimestampInJodaSyntax(datetime64, 'SSSSSS', 'UTC'); +with '2018-01-12 22:33:44.55' as s, toDateTime64(s, 6) as datetime64 SELECT fromUnixTimestampInJodaSyntax(datetime64, 'SSSSSSS', 'UTC'); +with '2018-01-12 22:33:44.55' as s, toDateTime64(s, 6) as datetime64 SELECT fromUnixTimestampInJodaSyntax(datetime64, 'SSSSSSSS', 'UTC'); +with '2018-01-12 22:33:44.55' as s, toDateTime64(s, 6) as datetime64 SELECT fromUnixTimestampInJodaSyntax(datetime64, 'SSSSSSSSS', 'UTC'); +-- { echoOff } diff --git a/parser/testdata/02496_storage_s3_profile_events/ast.json b/parser/testdata/02496_storage_s3_profile_events/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02496_storage_s3_profile_events/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02496_storage_s3_profile_events/metadata.json b/parser/testdata/02496_storage_s3_profile_events/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02496_storage_s3_profile_events/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02496_storage_s3_profile_events/query.sql b/parser/testdata/02496_storage_s3_profile_events/query.sql new file mode 100644 index 000000000..20fc303f3 --- /dev/null +++ b/parser/testdata/02496_storage_s3_profile_events/query.sql @@ -0,0 +1,24 @@ +-- Tags: no-parallel, no-fasttest, no-random-settings + +DROP TABLE IF EXISTS t_s3_events_02496; + +CREATE TABLE t_s3_events_02496 (a UInt64) +ENGINE = S3(s3_conn, filename = 'test_02496_{_partition_id}', format = Parquet) +PARTITION BY a; + +INSERT INTO t_s3_events_02496 SELECT number FROM numbers(10) SETTINGS s3_truncate_on_insert=1; + +SET max_threads = 1; +SET parallel_replicas_for_cluster_engines = 0; +SELECT count() FROM s3(s3_conn, filename = 'test_02496_*', format = Parquet, structure = 'a UInt64'); +SYSTEM FLUSH LOGS query_log; + +SELECT + ProfileEvents['S3HeadObject'], + ProfileEvents['S3ListObjects'], + ProfileEvents['RemoteFSPrefetches'], + ProfileEvents['IOBufferAllocBytes'] < 100000 +FROM system.query_log WHERE current_database = currentDatabase() +AND type = 'QueryFinish' AND query ILIKE 'SELECT count() FROM s3%test_02496%'; + +DROP TABLE t_s3_events_02496; diff --git a/parser/testdata/02497_analyzer_sum_if_count_if_pass_crash_fix/ast.json b/parser/testdata/02497_analyzer_sum_if_count_if_pass_crash_fix/ast.json new file mode 100644 index 000000000..fdad00631 --- /dev/null +++ b/parser/testdata/02497_analyzer_sum_if_count_if_pass_crash_fix/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001130728, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02497_analyzer_sum_if_count_if_pass_crash_fix/metadata.json b/parser/testdata/02497_analyzer_sum_if_count_if_pass_crash_fix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02497_analyzer_sum_if_count_if_pass_crash_fix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02497_analyzer_sum_if_count_if_pass_crash_fix/query.sql b/parser/testdata/02497_analyzer_sum_if_count_if_pass_crash_fix/query.sql new file mode 100644 index 000000000..7533a3332 --- /dev/null +++ b/parser/testdata/02497_analyzer_sum_if_count_if_pass_crash_fix/query.sql @@ -0,0 +1,4 @@ +SET enable_analyzer = 1; +SET optimize_rewrite_sum_if_to_count_if = 1; + +SELECT sum(if((number % 2) = 0 AS cond_expr, 1 AS one_expr, 0 AS zero_expr) AS if_expr), sum(cond_expr), sum(if_expr), one_expr, zero_expr FROM numbers(100); diff --git a/parser/testdata/02497_having_without_actual_aggregation_bug/ast.json b/parser/testdata/02497_having_without_actual_aggregation_bug/ast.json new file mode 100644 index 000000000..1bf6f1bcc --- /dev/null +++ b/parser/testdata/02497_having_without_actual_aggregation_bug/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001754325, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02497_having_without_actual_aggregation_bug/metadata.json b/parser/testdata/02497_having_without_actual_aggregation_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02497_having_without_actual_aggregation_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02497_having_without_actual_aggregation_bug/query.sql b/parser/testdata/02497_having_without_actual_aggregation_bug/query.sql new file mode 100644 index 000000000..e5fd26e48 --- /dev/null +++ b/parser/testdata/02497_having_without_actual_aggregation_bug/query.sql @@ -0,0 +1,8 @@ +SET enable_analyzer = 1; + +select number from numbers_mt(10) having number >= 9; + +select count() from numbers_mt(100) having count() > 1; + +select queryID() as t from numbers(10) with totals having t = initialQueryID(); -- { serverError NOT_IMPLEMENTED } +select count() from (select queryID() as t from remote('127.0.0.{1..3}', numbers(10)) with totals having t = initialQueryID()) settings prefer_localhost_replica = 1; -- { serverError NOT_IMPLEMENTED } diff --git a/parser/testdata/02497_if_transform_strings_to_enum/ast.json b/parser/testdata/02497_if_transform_strings_to_enum/ast.json new file mode 100644 index 000000000..2fc7682a6 --- /dev/null +++ b/parser/testdata/02497_if_transform_strings_to_enum/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001379275, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02497_if_transform_strings_to_enum/metadata.json b/parser/testdata/02497_if_transform_strings_to_enum/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02497_if_transform_strings_to_enum/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02497_if_transform_strings_to_enum/query.sql b/parser/testdata/02497_if_transform_strings_to_enum/query.sql new file mode 100644 index 000000000..c5e254836 --- /dev/null +++ b/parser/testdata/02497_if_transform_strings_to_enum/query.sql @@ -0,0 +1,52 @@ +SET enable_analyzer = 1; +SET optimize_if_transform_strings_to_enum = 1; + +SELECT transform(number, [2, 4, 6], ['google', 'censor.net', 'yahoo'], 'other') FROM system.numbers LIMIT 10; +EXPLAIN SYNTAX SELECT transform(number, [2, 4, 6], ['google', 'censor.net', 'yahoo'], 'other') FROM system.numbers LIMIT 10; +EXPLAIN QUERY TREE run_passes = 1 SELECT transform(number, [2, 4, 6], ['google', 'censor.net', 'yahoo'], 'other') FROM system.numbers LIMIT 10; + +SELECT number > 5 ? 'censor.net' : 'google' FROM system.numbers LIMIT 10; +EXPLAIN SYNTAX SELECT number > 5 ? 'censor.net' : 'google' FROM system.numbers LIMIT 10; +EXPLAIN QUERY TREE run_passes = 1 SELECT number > 5 ? 'censor.net' : 'google' FROM system.numbers LIMIT 10; + +SELECT CONCAT(transform(number, [2, 4, 6], ['google', 'censor.net', 'yahoo'], 'other'), '1') FROM system.numbers LIMIT 10; +EXPLAIN SYNTAX SELECT CONCAT(transform(number, [2, 4, 6], ['google', 'censor.net', 'yahoo'], 'other'), '1') FROM system.numbers LIMIT 10; +EXPLAIN QUERY TREE run_passes = 1 SELECT CONCAT(transform(number, [2, 4, 6], ['google', 'censor.net', 'yahoo'], 'other'), '1') FROM system.numbers LIMIT 10; + +SELECT CONCAT(number > 5 ? 'censor.net' : 'google', '1') FROM system.numbers LIMIT 10; +EXPLAIN SYNTAX SELECT CONCAT(number > 5 ? 'censor.net' : 'google', '1') FROM system.numbers LIMIT 10; +EXPLAIN QUERY TREE run_passes = 1 SELECT CONCAT(number > 5 ? 'censor.net' : 'google', '1') FROM system.numbers LIMIT 10; + +SELECT t1.value FROM (SELECT number > 5 ? 'censor.net' : 'google' as value FROM system.numbers LIMIT 10) as t1; +EXPLAIN SYNTAX SELECT t1.value FROM (SELECT number > 5 ? 'censor.net' : 'google' as value FROM system.numbers LIMIT 10) as t1; +EXPLAIN QUERY TREE run_passes = 1 SELECT t1.value FROM (SELECT number > 5 ? 'censor.net' : 'google' as value FROM system.numbers LIMIT 10) as t1; + +SELECT t1.value FROM (SELECT transform(number, [2, 4, 6], ['google', 'censor.net', 'yahoo'], 'other') as value FROM system.numbers LIMIT 10) as t1; +EXPLAIN SYNTAX SELECT t1.value FROM (SELECT transform(number, [2, 4, 6], ['google', 'censor.net', 'yahoo'], 'other') as value FROM system.numbers LIMIT 10) as t1; +EXPLAIN QUERY TREE run_passes = 1 SELECT t1.value FROM (SELECT transform(number, [2, 4, 6], ['google', 'censor.net', 'yahoo'], 'other') as value FROM system.numbers LIMIT 10) as t1; + +SELECT number > 5 ? 'censor.net' : 'google' as value, value FROM system.numbers LIMIT 10; +EXPLAIN SYNTAX SELECT number > 5 ? 'censor.net' : 'google' as value, value FROM system.numbers LIMIT 10; +EXPLAIN QUERY TREE run_passes = 1 SELECT number > 5 ? 'censor.net' : 'google' as value, value FROM system.numbers LIMIT 10; + +SELECT transform(number, [2, 4, 6], ['google', 'censor.net', 'yahoo'], 'other') as value, value FROM system.numbers LIMIT 10; +EXPLAIN SYNTAX SELECT transform(number, [2, 4, 6], ['google', 'censor.net', 'yahoo'], 'other') as value, value FROM system.numbers LIMIT 10; +EXPLAIN QUERY TREE run_passes = 1 SELECT transform(number, [2, 4, 6], ['google', 'censor.net', 'yahoo'], 'other') as value, value FROM system.numbers LIMIT 10; + +SELECT transform(number, [NULL], ['google', 'censor.net', 'yahoo'], 'other') FROM (SELECT NULL as number FROM system.numbers LIMIT 10); +EXPLAIN SYNTAX SELECT transform(number, [NULL], ['google', 'censor.net', 'yahoo'], 'other') FROM (SELECT NULL as number FROM system.numbers LIMIT 10); +EXPLAIN QUERY TREE run_passes = 1 SELECT transform(number, [NULL], ['google', 'censor.net', 'yahoo'], 'other') FROM (SELECT NULL as number FROM system.numbers LIMIT 10); + +SELECT transform(number, NULL, ['google', 'censor.net', 'yahoo'], 'other') FROM system.numbers LIMIT 10; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +EXPLAIN SYNTAX run_query_tree_passes = 1 SELECT transform(number, NULL, ['google', 'censor.net', 'yahoo'], 'other') FROM system.numbers LIMIT 10; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +EXPLAIN QUERY TREE run_passes = 1 SELECT transform(number, NULL, ['google', 'censor.net', 'yahoo'], 'other') FROM system.numbers LIMIT 10; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SET optimize_if_transform_strings_to_enum = 0; + +SELECT transform(number, [2, 4, 6], ['google', 'censor.net', 'yahoo'], 'other') FROM system.numbers LIMIT 10; +EXPLAIN SYNTAX SELECT transform(number, [2, 4, 6], ['google', 'censor.net', 'yahoo'], 'other') FROM system.numbers LIMIT 10; +EXPLAIN QUERY TREE run_passes = 1 SELECT transform(number, [2, 4, 6], ['google', 'censor.net', 'yahoo'], 'other') FROM system.numbers LIMIT 10; + +SELECT number > 5 ? 'censor.net' : 'google' FROM system.numbers LIMIT 10; +EXPLAIN SYNTAX SELECT number > 5 ? 'censor.net' : 'google' FROM system.numbers LIMIT 10; +EXPLAIN QUERY TREE run_passes = 1 SELECT number > 5 ? 'censor.net' : 'google' FROM system.numbers LIMIT 10; diff --git a/parser/testdata/02497_remote_disk_fat_column/ast.json b/parser/testdata/02497_remote_disk_fat_column/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02497_remote_disk_fat_column/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02497_remote_disk_fat_column/metadata.json b/parser/testdata/02497_remote_disk_fat_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02497_remote_disk_fat_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02497_remote_disk_fat_column/query.sql b/parser/testdata/02497_remote_disk_fat_column/query.sql new file mode 100644 index 000000000..bba83384b --- /dev/null +++ b/parser/testdata/02497_remote_disk_fat_column/query.sql @@ -0,0 +1,10 @@ +-- Tags: no-random-settings, no-fasttest, no-tsan, no-asan, no-msan +set allow_suspicious_fixed_string_types=1; +create table fat_granularity (x UInt32, fat FixedString(160000)) engine = MergeTree order by x settings storage_policy = 's3_cache'; + +SET max_memory_usage='10G'; + +insert into fat_granularity select number, toString(number) || '_' from numbers(100000) settings max_block_size = 3000, max_insert_threads = 8, min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0; + +-- Too large sizes of FixedString to deserialize +select x from fat_granularity prewhere fat like '256\_%' settings max_threads=2; diff --git a/parser/testdata/02497_schema_inference_nulls/ast.json b/parser/testdata/02497_schema_inference_nulls/ast.json new file mode 100644 index 000000000..bddb10fc0 --- /dev/null +++ b/parser/testdata/02497_schema_inference_nulls/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'JSONEachRow'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001229419, + "rows_read": 5, + "bytes_read": 182 + } +} diff --git a/parser/testdata/02497_schema_inference_nulls/metadata.json b/parser/testdata/02497_schema_inference_nulls/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02497_schema_inference_nulls/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02497_schema_inference_nulls/query.sql b/parser/testdata/02497_schema_inference_nulls/query.sql new file mode 100644 index 000000000..767e753a8 --- /dev/null +++ b/parser/testdata/02497_schema_inference_nulls/query.sql @@ -0,0 +1,75 @@ +select 'JSONEachRow'; +set schema_inference_make_columns_nullable=1; +set input_format_json_try_infer_named_tuples_from_objects=0; +set input_format_json_read_objects_as_strings=0; +set input_format_json_infer_incomplete_types_as_strings=0; +set input_format_json_read_numbers_as_strings=0; +set input_format_json_infer_array_of_dynamic_from_array_of_different_types=0; + +desc format(JSONEachRow, '{"x" : 1234}, {"x" : "String"}') settings input_format_json_try_infer_numbers_from_strings=1; -- { serverError CANNOT_EXTRACT_TABLE_STRUCTURE } +desc format(JSONEachRow, '{"x" : [null, 1]}'); +desc format(JSONEachRow, '{"x" : [null, 1]}, {"x" : []}'); +desc format(JSONEachRow, '{"x" : [null, 1]}, {"x" : [null]}'); +desc format(JSONEachRow, '{"x" : [null, 1]}, {"x" : [1, null]}'); +desc format(JSONEachRow, '{"x" : [null, 1]}, {"x" : ["abc", 1]}'); +desc format(JSONEachRow, '{"x" : [null, 1]}, {"x" : ["abc", null]}'); +desc format(JSONEachRow, '{"x" : {}}, {"x" : {"a" : 1}}'); +desc format(JSONEachRow, '{"x" : {"a" : null}}, {"x" : {"b" : 1}}'); +desc format(JSONEachRow, '{"x" : null}, {"x" : [1, 2]}'); +desc format(JSONEachRow, '{"x" : [[], [null], [1, 2, 3]]}'); +desc format(JSONEachRow, '{"x" : [{"a" : null}, {"b" : 1}]}'); +desc format(JSONEachRow, '{"x" : [["2020-01-01", null, "1234"], ["abcd"]]}'); + +set schema_inference_make_columns_nullable='auto'; +desc format(JSONEachRow, '{"x" : [1, 2]}'); +desc format(JSONEachRow, '{"x" : [null, 1]}'); +desc format(JSONEachRow, '{"x" : [1, 2]}, {"x" : [3]}'); +desc format(JSONEachRow, '{"x" : [1, 2]}, {"x" : [null]}'); + +select 'JSONCompactEachRow'; +set schema_inference_make_columns_nullable=1; +desc format(JSONCompactEachRow, '[1234], ["String"]') settings input_format_json_try_infer_numbers_from_strings=1; -- { serverError CANNOT_EXTRACT_TABLE_STRUCTURE } +desc format(JSONCompactEachRow, '[[null, 1]]'); +desc format(JSONCompactEachRow, '[[null, 1]], [[]]'); +desc format(JSONCompactEachRow, '[[null, 1]], [[null]]'); +desc format(JSONCompactEachRow, '[[null, 1]], [[1, null]]'); +desc format(JSONCompactEachRow, '[[null, 1]], [["abc", 1]]'); +desc format(JSONCompactEachRow, '[[null, 1]], [["abc", null]]'); +desc format(JSONCompactEachRow, '[{}], [{"a" : 1}]'); +desc format(JSONCompactEachRow, '[{"a" : null}], [{"b" : 1}]'); +desc format(JSONCompactEachRow, '[null], [[1, 2]]'); +desc format(JSONCompactEachRow, '[[[], [null], [1, 2, 3]]]'); +desc format(JSONCompactEachRow, '[[{"a" : null}, {"b" : 1}]]'); +desc format(JSONCompactEachRow, '[[["2020-01-01", null, "1234"], ["abcd"]]]'); + +set schema_inference_make_columns_nullable='auto'; +desc format(JSONCompactEachRow, '[[1, 2]]'); +desc format(JSONCompactEachRow, '[[null, 1]]'); +desc format(JSONCompactEachRow, '[[1, 2]], [[3]]'); +desc format(JSONCompactEachRow, '[[1, 2]], [[null]]'); + + +select 'CSV'; +set schema_inference_make_columns_nullable=1; +desc format(CSV, '"[null, 1]"'); +desc format(CSV, '"[null, 1]"\n"[]"'); +desc format(CSV, '"[null, 1]"\n"[null]"'); +desc format(CSV, '"[null, 1]"\n"[1, null]"'); +desc format(CSV, '"{}"\n"{\'a\' : 1}"'); +desc format(CSV, '"{\'a\' : null}"\n"{\'b\' : 1}"'); +desc format(CSV, '"[[], [null], [1, 2, 3]]"'); +desc format(CSV, '"[{\'a\' : null}, {\'b\' : 1}]"'); +desc format(CSV, '"[[\'2020-01-01\', null, \'1234\'], [\'abcd\']]"'); + +set schema_inference_make_columns_nullable='auto'; +desc format(CSV, '"[1,2]"'); +desc format(CSV, '"[NULL, 1]"'); +desc format(CSV, '"[1, 2]"\n"[3]"'); +desc format(CSV, '"[1, 2]"\n"[null]"'); + +set schema_inference_make_columns_nullable=0; +desc format(CSV, '\\N,\\N,1\nb,\\N,1'); +set schema_inference_make_columns_nullable='auto'; +desc format(CSV, '\\N,\\N,1\nb,\\N,1'); +set schema_inference_make_columns_nullable=3; +desc format(CSV, '\\N,\\N,1\nb,\\N,1'); diff --git a/parser/testdata/02497_source_part_is_intact_when_mutation/ast.json b/parser/testdata/02497_source_part_is_intact_when_mutation/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02497_source_part_is_intact_when_mutation/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02497_source_part_is_intact_when_mutation/metadata.json b/parser/testdata/02497_source_part_is_intact_when_mutation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02497_source_part_is_intact_when_mutation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02497_source_part_is_intact_when_mutation/query.sql b/parser/testdata/02497_source_part_is_intact_when_mutation/query.sql new file mode 100644 index 000000000..4702f9252 --- /dev/null +++ b/parser/testdata/02497_source_part_is_intact_when_mutation/query.sql @@ -0,0 +1,39 @@ +-- Tags: no-replicated-database, no-ordinary-database, no-encrypted-storage + +SET mutations_sync = 1; +SET check_query_single_value_result = 0; + +DROP TABLE IF EXISTS t_source_part_is_intact; + +CREATE TABLE t_source_part_is_intact (id UInt64, u UInt64) +ENGINE = MergeTree ORDER BY id +SETTINGS min_bytes_for_wide_part=1, ratio_of_defaults_for_sparse_serialization = 0.5; + +INSERT INTO t_source_part_is_intact SELECT + number, + if (number % 11 = 0, number, 0) +FROM numbers(2000); + +CHECK TABLE t_source_part_is_intact SETTINGS max_threads = 1; +SELECT 1, count() FROM t_source_part_is_intact; + +BEGIN TRANSACTION; +-- size of the file serialization.json is the same in the new part but checksum is different +ALTER TABLE t_source_part_is_intact update u = 0 where u != 0; +ROLLBACK; + +CHECK TABLE t_source_part_is_intact SETTINGS max_threads = 1; + +BEGIN TRANSACTION; +-- size of the file serialization.json is different in the new part +ALTER TABLE t_source_part_is_intact update u = 1 WHERE 1; +ROLLBACK; + +CHECK TABLE t_source_part_is_intact SETTINGS max_threads = 1; + +DETACH TABLE t_source_part_is_intact; +ATTACH TABLE t_source_part_is_intact; + +CHECK TABLE t_source_part_is_intact SETTINGS max_threads = 1; + +DROP TABLE t_source_part_is_intact; diff --git a/parser/testdata/02497_storage_join_right_assert/ast.json b/parser/testdata/02497_storage_join_right_assert/ast.json new file mode 100644 index 000000000..736f443b3 --- /dev/null +++ b/parser/testdata/02497_storage_join_right_assert/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001300072, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/02497_storage_join_right_assert/metadata.json b/parser/testdata/02497_storage_join_right_assert/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02497_storage_join_right_assert/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02497_storage_join_right_assert/query.sql b/parser/testdata/02497_storage_join_right_assert/query.sql new file mode 100644 index 000000000..eabaa2363 --- /dev/null +++ b/parser/testdata/02497_storage_join_right_assert/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE TABLE t1 (key UInt64, a UInt64) ENGINE = Memory; +CREATE TABLE t2 (key UInt64, a UInt64) ENGINE = Join(ALL, RIGHT, key); + +INSERT INTO t1 VALUES (1, 1), (2, 2); +INSERT INTO t2 VALUES (2, 2), (3, 3); + +SET enable_analyzer = 0; +SELECT * FROM t1 ALL RIGHT JOIN t2 USING (key) ORDER BY key; + +SET enable_analyzer = 1; +SELECT * FROM t1 ALL RIGHT JOIN t2 USING (key) ORDER BY key; diff --git a/parser/testdata/02498_analyzer_aggregate_functions_arithmetic_operations_pass_fix/ast.json b/parser/testdata/02498_analyzer_aggregate_functions_arithmetic_operations_pass_fix/ast.json new file mode 100644 index 000000000..e9030987b --- /dev/null +++ b/parser/testdata/02498_analyzer_aggregate_functions_arithmetic_operations_pass_fix/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001128939, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02498_analyzer_aggregate_functions_arithmetic_operations_pass_fix/metadata.json b/parser/testdata/02498_analyzer_aggregate_functions_arithmetic_operations_pass_fix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02498_analyzer_aggregate_functions_arithmetic_operations_pass_fix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02498_analyzer_aggregate_functions_arithmetic_operations_pass_fix/query.sql b/parser/testdata/02498_analyzer_aggregate_functions_arithmetic_operations_pass_fix/query.sql new file mode 100644 index 000000000..76c44f9e0 --- /dev/null +++ b/parser/testdata/02498_analyzer_aggregate_functions_arithmetic_operations_pass_fix/query.sql @@ -0,0 +1,18 @@ +SET enable_analyzer = 1; +SET optimize_arithmetic_operations_in_aggregate_functions = 1; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value UInt64 +) ENGINE=MergeTree ORDER BY id; + +INSERT INTO test_table VALUES (1, 1); +INSERT INTO test_table VALUES (1, 1); + +SELECT sum((2 * id) as func), func FROM test_table GROUP BY id; + +SELECT max(100-number), min(100-number) FROM numbers(2); + +select (sum(toDecimal64(2.11, 15) - number), 1) FROM numbers(2); diff --git a/parser/testdata/02498_analyzer_settings_push_down/ast.json b/parser/testdata/02498_analyzer_settings_push_down/ast.json new file mode 100644 index 000000000..d34f34336 --- /dev/null +++ b/parser/testdata/02498_analyzer_settings_push_down/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001151599, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02498_analyzer_settings_push_down/metadata.json b/parser/testdata/02498_analyzer_settings_push_down/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02498_analyzer_settings_push_down/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02498_analyzer_settings_push_down/query.sql b/parser/testdata/02498_analyzer_settings_push_down/query.sql new file mode 100644 index 000000000..472ab358d --- /dev/null +++ b/parser/testdata/02498_analyzer_settings_push_down/query.sql @@ -0,0 +1,43 @@ +SET enable_analyzer = 1; +SET optimize_functions_to_subcolumns = 0; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table (id UInt64, value Tuple(a UInt64)) ENGINE=MergeTree ORDER BY id; + +INSERT INTO test_table VALUES (0, tuple(0)); + +-- { echoOn } + +SELECT value FROM (SELECT tupleElement(value, 'a') AS value FROM test_table); + +EXPLAIN QUERY TREE SELECT value FROM ( + SELECT tupleElement(value, 'a') AS value FROM test_table +); + +SELECT '--'; + +EXPLAIN QUERY TREE SELECT value FROM ( + SELECT tupleElement(value, 'a') AS value FROM test_table +) SETTINGS optimize_functions_to_subcolumns = 1; + +SELECT '--'; + +EXPLAIN QUERY TREE SELECT value FROM ( + SELECT tupleElement(value, 'a') AS value FROM test_table SETTINGS optimize_functions_to_subcolumns = 0 +) SETTINGS optimize_functions_to_subcolumns = 1; + +SELECT '--'; + +EXPLAIN QUERY TREE SELECT value FROM ( + SELECT tupleElement(value, 'a') AS value FROM test_table +) SETTINGS optimize_functions_to_subcolumns = 0; + +SELECT '--'; + +EXPLAIN QUERY TREE SELECT value FROM ( + SELECT tupleElement(value, 'a') AS value FROM test_table SETTINGS optimize_functions_to_subcolumns = 1 +) SETTINGS optimize_functions_to_subcolumns = 0; + +-- { echoOff } + +DROP TABLE test_table; diff --git a/parser/testdata/02498_storage_join_key_positions/ast.json b/parser/testdata/02498_storage_join_key_positions/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02498_storage_join_key_positions/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02498_storage_join_key_positions/metadata.json b/parser/testdata/02498_storage_join_key_positions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02498_storage_join_key_positions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02498_storage_join_key_positions/query.sql b/parser/testdata/02498_storage_join_key_positions/query.sql new file mode 100644 index 000000000..04fd4baae --- /dev/null +++ b/parser/testdata/02498_storage_join_key_positions/query.sql @@ -0,0 +1,69 @@ +-- Tags: no-parallel-replicas +-- It generates plan with _reading_ from storage join, but reading from storage join with complex keys is currently not supported. + +SET enable_parallel_replicas = 0; + +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS tj; +DROP TABLE IF EXISTS tjj; + +CREATE TABLE t1 (key1 UInt64, key2 UInt64, key3 UInt64) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO t1 VALUES (11, 12, 13), (21, 22, 23), (31, 32, 33), (41, 42, 43), (51, 52, 53); + +CREATE TABLE tj (key2 UInt64, key1 UInt64, key3 UInt64, attr UInt64) ENGINE = Join(ALL, INNER, key3, key2, key1); +INSERT INTO tj VALUES (22, 21, 23, 2000), (32, 31, 33, 3000), (42, 41, 43, 4000), (52, 51, 53, 5000), (62, 61, 63, 6000); + +CREATE TABLE tjj (key2 UInt64, key1 UInt64, key3 UInt64, attr UInt64) ENGINE = Join(ALL, INNER, key3, key2, key1); +INSERT INTO tjj VALUES (11, 11, 11, 1000), (21, 21, 21, 2000), (31, 31, 31, 3000), (41, 41, 41, 4000), (51, 51, 51, 5000), (61, 61, 61, 6000); + +SELECT '--- using ---'; +SELECT * FROM t1 ALL INNER JOIN tj USING (key1, key2, key3) ORDER BY key1; +SELECT key1, key2, key3, attr FROM t1 ALL INNER JOIN tj USING (key1, key2, key3) ORDER BY key1; +SELECT key1, key2, key3, attr FROM t1 ALL INNER JOIN tj USING (key2, key3, key1) ORDER BY key1; +SELECT key1, key2, key3, attr FROM t1 ALL INNER JOIN tj USING (key3, key2, key1) ORDER BY key1; +SELECT key1, key2, key3, attr FROM t1 ALL INNER JOIN tj USING (key1, key3, key2) ORDER BY key1; + +SELECT '--- on ---'; +SELECT * FROM t1 ALL INNER JOIN tj ON t1.key3 = tj.key3 AND t1.key2 = tj.key2 AND t1.key1 = tj.key1 ORDER BY t1.key1; +SELECT * FROM t1 ALL INNER JOIN tj ON t1.key2 = tj.key2 AND t1.key3 = tj.key3 AND t1.key1 = tj.key1 ORDER BY t1.key1; +SELECT * FROM t1 ALL INNER JOIN tj ON t1.key3 = tj.key3 AND t1.key1 = tj.key1 AND t1.key2 = tj.key2 ORDER BY t1.key1; +SELECT * FROM t1 ALL INNER JOIN tj ON t1.key1 = tj.key1 AND t1.key3 = tj.key3 AND t1.key2 = tj.key2 ORDER BY t1.key1; + +SELECT '--- on different name ---'; +SELECT * FROM (SELECT key3 AS c, key1 AS a, key2 AS b FROM t1) AS t1 ALL INNER JOIN tj ON t1.a = tj.key1 AND t1.c = tj.key3 AND t1.b = tj.key2 ORDER BY t1.a; +SELECT * FROM (SELECT key3 AS c, key1 AS a, key2 AS b FROM t1) AS t1 ALL INNER JOIN tj ON t1.a = tj.key1 AND t1.b = tj.key2 AND t1.c = tj.key3 ORDER BY t1.a; +SELECT * FROM (SELECT key3 AS c, key1 AS a, key2 AS b FROM t1) AS t1 ALL INNER JOIN tj ON t1.c = tj.key3 AND t1.a = tj.key1 AND t1.b = tj.key2 ORDER BY t1.a; + + +SELECT * FROM t1 ALL INNER JOIN tj ON t1.key1 = tj.key1 AND t1.key3 = tj.key3 AND t1.key2 = tj.key2 AND 0; -- { serverError INCOMPATIBLE_TYPE_OF_JOIN,INVALID_JOIN_ON_EXPRESSION } +SELECT * FROM t1 ALL INNER JOIN tj ON t1.key1 = tj.key1 AND t1.key3 = tj.key3 AND t1.key2 = tj.key2 AND 1 > 1; -- { serverError INCOMPATIBLE_TYPE_OF_JOIN,INVALID_JOIN_ON_EXPRESSION } + +SELECT '--- incompatible ---'; +SELECT * FROM t1 ALL INNER JOIN tj ON 1; -- { serverError INCOMPATIBLE_TYPE_OF_JOIN,INVALID_JOIN_ON_EXPRESSION } +SELECT * FROM t1 ALL INNER JOIN tj ON 0; -- { serverError INCOMPATIBLE_TYPE_OF_JOIN,INVALID_JOIN_ON_EXPRESSION } +SELECT * FROM t1 ALL INNER JOIN tj ON NULL; -- { serverError INCOMPATIBLE_TYPE_OF_JOIN,INVALID_JOIN_ON_EXPRESSION } +SELECT * FROM t1 ALL INNER JOIN tj ON 1 != 1; -- { serverError INCOMPATIBLE_TYPE_OF_JOIN,INVALID_JOIN_ON_EXPRESSION } + +-- Here is another error code because equality is handled differently in CollectJoinOnKeysVisitor. +-- We can change the error code, but it will become inconsistent for other cases +-- where we actually expect AMBIGUOUS_COLUMN_NAME instead of INVALID_JOIN_ON_EXPRESSION/INCOMPATIBLE_TYPE_OF_JOIN. +-- These checks are more reliable after switching to a new analyzer, they return INCOMPATIBLE_TYPE_OF_JOIN consistent with cases above +SELECT * FROM t1 ALL INNER JOIN tj ON 1 == 1; -- { serverError INCOMPATIBLE_TYPE_OF_JOIN,AMBIGUOUS_COLUMN_NAME } +SELECT * FROM t1 ALL INNER JOIN tj ON 1 == 2; -- { serverError INCOMPATIBLE_TYPE_OF_JOIN,AMBIGUOUS_COLUMN_NAME } + +SELECT * FROM t1 ALL INNER JOIN tj USING (key1, key2, attr); -- { serverError INCOMPATIBLE_TYPE_OF_JOIN,UNKNOWN_IDENTIFIER } +SELECT * FROM t1 ALL INNER JOIN tj USING (key1, key2, key3, attr); -- { serverError INCOMPATIBLE_TYPE_OF_JOIN,UNKNOWN_IDENTIFIER } + +SELECT * FROM t1 ALL INNER JOIN tj USING (key2, key3); -- { serverError INCOMPATIBLE_TYPE_OF_JOIN } +SELECT * FROM t1 ALL INNER JOIN tj ON t1.key1 = tj.attr; -- { serverError INCOMPATIBLE_TYPE_OF_JOIN } +SELECT * FROM t1 ALL INNER JOIN tj ON t1.key1 = tj.key1; -- { serverError INCOMPATIBLE_TYPE_OF_JOIN } +SELECT * FROM t1 ALL INNER JOIN tj ON t1.key1 = tj.key1 AND t1.key2 = tj.key2 AND t1.key3 = tj.attr; -- { serverError INCOMPATIBLE_TYPE_OF_JOIN } +SELECT * FROM t1 ALL INNER JOIN tj ON t1.key1 = tj.key1 AND t1.key2 = tj.key2 AND t1.key3 = tj.key3 AND t1.key1 = tj.key1; -- { serverError INCOMPATIBLE_TYPE_OF_JOIN } + +SELECT '--- reuse column from left ---'; +SELECT * FROM t1 ALL INNER JOIN tjj ON t1.key1 = tjj.key1 AND t1.key1 = tjj.key2 AND t1.key1 = tjj.key3 ORDER BY t1.key1; +SELECT * FROM t1 ALL INNER JOIN tjj ON t1.key1 = tjj.key1 AND t1.key1 = tjj.key3 AND t1.key1 = tjj.key2 ORDER BY t1.key1; + +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS tj; +DROP TABLE IF EXISTS tjj; diff --git a/parser/testdata/02499_analyzer_aggregate_function_lambda_crash_fix/ast.json b/parser/testdata/02499_analyzer_aggregate_function_lambda_crash_fix/ast.json new file mode 100644 index 000000000..128516643 --- /dev/null +++ b/parser/testdata/02499_analyzer_aggregate_function_lambda_crash_fix/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001003202, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02499_analyzer_aggregate_function_lambda_crash_fix/metadata.json b/parser/testdata/02499_analyzer_aggregate_function_lambda_crash_fix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02499_analyzer_aggregate_function_lambda_crash_fix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02499_analyzer_aggregate_function_lambda_crash_fix/query.sql b/parser/testdata/02499_analyzer_aggregate_function_lambda_crash_fix/query.sql new file mode 100644 index 000000000..7ac817aec --- /dev/null +++ b/parser/testdata/02499_analyzer_aggregate_function_lambda_crash_fix/query.sql @@ -0,0 +1,4 @@ +SET enable_analyzer = 1; + +SELECT count((t, x_0, x_1) -> ((key_2, x_0, x_1) IN (NULL, NULL, '0.3'))) FROM numbers(10); -- { serverError UNSUPPORTED_METHOD } +SELECT count((t, x_0, x_1) -> ((key_2, x_0, x_1) IN (NULL, NULL, '0.3'))) OVER (PARTITION BY id) FROM numbers(10); -- { serverError UNSUPPORTED_METHOD } diff --git a/parser/testdata/02499_analyzer_set_index/ast.json b/parser/testdata/02499_analyzer_set_index/ast.json new file mode 100644 index 000000000..348ca2b26 --- /dev/null +++ b/parser/testdata/02499_analyzer_set_index/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001208326, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02499_analyzer_set_index/metadata.json b/parser/testdata/02499_analyzer_set_index/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02499_analyzer_set_index/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02499_analyzer_set_index/query.sql b/parser/testdata/02499_analyzer_set_index/query.sql new file mode 100644 index 000000000..52d96cfca --- /dev/null +++ b/parser/testdata/02499_analyzer_set_index/query.sql @@ -0,0 +1,18 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value String, + INDEX value_idx (value) TYPE set(1000) GRANULARITY 1 +) ENGINE=MergeTree ORDER BY id; + +INSERT INTO test_table SELECT number, toString(number) FROM numbers(10); + +SELECT count() FROM test_table WHERE value = '1' SETTINGS force_data_skipping_indices = 'value_idx'; + +SELECT count() FROM test_table AS t1 INNER JOIN (SELECT number AS id FROM numbers(10)) AS t2 ON t1.id = t2.id +WHERE t1.value = '1' SETTINGS force_data_skipping_indices = 'value_idx'; + +DROP TABLE test_table; diff --git a/parser/testdata/02499_escaped_quote_schema_inference/ast.json b/parser/testdata/02499_escaped_quote_schema_inference/ast.json new file mode 100644 index 000000000..5aabff8aa --- /dev/null +++ b/parser/testdata/02499_escaped_quote_schema_inference/ast.json @@ -0,0 +1,40 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DescribeQuery (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function format (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier CSV" + }, + { + "explain": " Literal '\"[\\'abc\\\\\\'\\']\"'" + } + ], + + "rows": 6, + + "statistics": + { + "elapsed": 0.001182537, + "rows_read": 6, + "bytes_read": 210 + } +} diff --git a/parser/testdata/02499_escaped_quote_schema_inference/metadata.json b/parser/testdata/02499_escaped_quote_schema_inference/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02499_escaped_quote_schema_inference/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02499_escaped_quote_schema_inference/query.sql b/parser/testdata/02499_escaped_quote_schema_inference/query.sql new file mode 100644 index 000000000..34c523387 --- /dev/null +++ b/parser/testdata/02499_escaped_quote_schema_inference/query.sql @@ -0,0 +1,2 @@ +desc format(CSV, '"[\'abc\\\'\']"'); +desc format(Values, '(\'abc\\\'\')'); diff --git a/parser/testdata/02499_extract_key_value_pairs_multiple_input/ast.json b/parser/testdata/02499_extract_key_value_pairs_multiple_input/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02499_extract_key_value_pairs_multiple_input/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02499_extract_key_value_pairs_multiple_input/metadata.json b/parser/testdata/02499_extract_key_value_pairs_multiple_input/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02499_extract_key_value_pairs_multiple_input/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02499_extract_key_value_pairs_multiple_input/query.sql b/parser/testdata/02499_extract_key_value_pairs_multiple_input/query.sql new file mode 100644 index 000000000..c7e854f5c --- /dev/null +++ b/parser/testdata/02499_extract_key_value_pairs_multiple_input/query.sql @@ -0,0 +1,690 @@ +-- { echoOn } + +-- basic tests + +-- expected output: {'age':'31','name':'neymar','nationality':'brazil','team':'psg'} +WITH + extractKeyValuePairs('name:neymar, age:31 team:psg,nationality:brazil') AS s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; + +-- keys and values starting with number, underscore and other special characters +-- expected output: {'$nationality':'@brazil','1name':'neymar','4ge':'31','_team':'_psg'} +WITH + extractKeyValuePairs('1name:neymar, 4ge:31 _team:_psg,$nationality:@brazil') AS s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; + +-- only special characters +-- expected output: {'#':'#','$':'$','@':'@','_':'_'} +WITH + extractKeyValuePairs('_:_, @:@ #:#,$:$') AS s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; + +-- special (not control) characters in the middle of elements +-- expected output: {'age':'3!','name':'ney!mar','nationality':'br4z!l','t&am':'@psg'} +WITH + extractKeyValuePairs('name:ney!mar, age:3! t&am:@psg,nationality:br4z!l') AS s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; + +-- non-standard escape characters (i.e not \n, \r, \t and etc), back-slash should be preserved +-- expected output: {'amount\\z':'$5\\h','currency':'\\$USD'} +WITH + extractKeyValuePairs('currency:\$USD, amount\z:$5\h') AS s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; + +-- invalid escape sequence at the end of file should be ignored +-- expected output: {'key':'invalid_escape_sequence','valid_key':'valid_value'} +WITH + extractKeyValuePairsWithEscaping('valid_key:valid_value key:invalid_escape_sequence\\', ':', ' ', '"') AS s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; + +-- standard escape sequences are covered by unit tests + +-- simple quoting +-- expected output: {'age':'31','name':'neymar','team':'psg'} +WITH + extractKeyValuePairs('name:"neymar", "age":31 "team":"psg"') AS s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; + +-- empty values +-- expected output: {'age':'','name':'','nationality':''} +WITH + extractKeyValuePairs('name:"", age: , nationality:') AS s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; + +-- empty keys +-- empty keys are not allowed, thus empty output is expected +WITH + extractKeyValuePairs('"":abc, :def') AS s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; + +-- semi-colon as pair delimiter +-- expected output: {'age':'31','anotherkey':'anothervalue','name':'neymar','random_key':'value_with_comma,still_part_of_value:still_part_of_value','team':'psg'} +WITH + extractKeyValuePairs('name:neymar;age:31;team:psg;random_key:value_with_comma,still_part_of_value:still_part_of_value;anotherkey:anothervalue', ':', ';') AS s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; + +-- both comma and semi-colon as pair delimiters +-- expected output: {'age':'31','last_key':'last_value','name':'neymar','nationality':'brazil','team':'psg'} +WITH + extractKeyValuePairs('name:neymar;age:31;team:psg;nationality:brazil,last_key:last_value', ':', ';,') AS s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; + +-- single quote as quoting character +-- expected output: {'age':'31','last_key':'last_value','name':'neymar','nationality':'brazil','team':'psg'} +WITH + extractKeyValuePairs('name:\'neymar\';\'age\':31;team:psg;nationality:brazil,last_key:last_value', ':', ';,', '\'') AS s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; + +-- NO ESCAPING TESTS +-- expected output: {'age':'31','name':'neymar','nationality':'brazil','team':'psg'} +WITH + extractKeyValuePairs('name:neymar, age:31 team:psg,nationality:brazil', ':', ', ', '"') AS s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; + +-- special (not control) characters in the middle of elements +-- expected output: {'age':'3!','name':'ney!mar','nationality':'br4z!l','t&am':'@psg'} +WITH + extractKeyValuePairs('name:ney!mar, age:3! t&am:@psg,nationality:br4z!l', ':', ', ', '"') AS s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; + +-- non-standard escape characters (i.e not \n, \r, \t and etc), it should accept everything +-- expected output: {'amount\\z':'$5\\h','currency':'\\$USD'} +WITH + extractKeyValuePairs('currency:\$USD, amount\z:$5\h', ':', ', ', '"') AS s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; + +-- standard escape sequences, it should return it as it is +-- expected output: {'key1':'header\nbody','key2':'start_of_text\tend_of_text'} +WITH + extractKeyValuePairs('key1:header\nbody key2:start_of_text\tend_of_text', ':', ', ', '"') AS s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; + +-- standard escape sequences are covered by unit tests + +-- simple quoting +-- expected output: {'age':'31','name':'neymar','team':'psg'} +WITH + extractKeyValuePairs('name:"neymar", "age":31 "team":"psg"', ':', ', ', '"') AS s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; + +-- empty values +-- expected output: {'age':'','name':'','nationality':''} +WITH + extractKeyValuePairs('name:"", age: , nationality:', ':', ', ', '"') AS s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; + +-- empty keys +-- empty keys are not allowed, thus empty output is expected +WITH + extractKeyValuePairs('"":abc, :def', ':', ', ', '"') AS s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; + +-- semi-colon as pair delimiter +-- expected output: {'age':'31','name':'neymar','nationality':'brazil','team':'psg'} +WITH + extractKeyValuePairs('name:neymar;age:31;team:psg;nationality:brazil', ':', ';', '"') AS s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; + +-- both comma and semi-colon as pair delimiters +-- expected output: {'age':'31','last_key':'last_value','name':'neymar','nationality':'brazil','team':'psg'} +WITH + extractKeyValuePairs('name:neymar;age:31;team:psg;nationality:brazil,last_key:last_value', ':', ';,', '"') AS s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; + +-- single quote as quoting character +-- expected output: {'age':'31','last_key':'last_value','name':'neymar','nationality':'brazil','team':'psg'} +WITH + extractKeyValuePairs('name:\'neymar\';\'age\':31;team:psg;nationality:brazil,last_key:last_value', ':', ';,', '\'') AS s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; + +-- { echoOff } + +-- cross parameter validation tests +-- should fail because key value delimiter conflicts with pair delimiters +WITH + extractKeyValuePairs('not_important', ':', ',:', '\'') AS s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; -- {serverError BAD_ARGUMENTS} + +-- should fail because key value delimiter conflicts with quoting characters +WITH + extractKeyValuePairs('not_important', ':', ',', '\':') AS s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; -- {serverError BAD_ARGUMENTS} + +-- should fail because pair delimiters conflicts with quoting characters +WITH + extractKeyValuePairs('not_important', ':', ',', ',') AS s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; -- {serverError BAD_ARGUMENTS} + +-- should fail because data_column argument must be of type String +WITH + extractKeyValuePairs([1, 2]) AS s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} + +-- should fail because key_value_delimiter argument must be of type String +WITH + extractKeyValuePairs('', [1, 2]) AS s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} + +-- should fail because pair_delimiters argument must be of type String +WITH + extractKeyValuePairs('', ':', [1, 2]) AS s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} + +-- should fail because quoting_character argument must be of type String +WITH + extractKeyValuePairs('', ':', ' ', [1, 2]) AS s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} + +-- should fail because pair delimiters can contain at most 8 characters +WITH + extractKeyValuePairs('not_important', ':', '123456789', '\'') AS s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; -- {serverError BAD_ARGUMENTS} + +-- should fail because no argument has been provided +WITH + extractKeyValuePairs() AS s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} + +-- should fail because one extra argument / non existent has been provided +WITH + extractKeyValuePairs('a', ':', ',', '"', 'accept', '') AS s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} + +-- Should fail allowed because it exceeds the max number of pairs +SET extract_key_value_pairs_max_pairs_per_row = 1; +WITH + extractKeyValuePairs('key1:value1,key2:value2') AS s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; -- {serverError LIMIT_EXCEEDED} + +-- { echoOn } + +SET extract_key_value_pairs_max_pairs_per_row = 2; +-- Should be allowed because it no longer exceeds the max number of pairs +-- expected output: {'key1':'value1','key2':'value2'} +WITH + extractKeyValuePairs('key1:value1,key2:value2') AS s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; + +SET extract_key_value_pairs_max_pairs_per_row = 0; +-- Should be allowed because max pairs per row is set to 0 (unlimited) +-- expected output: {'key1':'value1','key2':'value2'} +WITH + extractKeyValuePairs('key1:value1,key2:value2') AS s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; + +-- should not fail because pair delimiters contains 8 characters, which is within the limit +WITH + extractKeyValuePairs('not_important', ':', '12345678', '\'') AS s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; + +-- key value delimiter should be considered valid part of value +WITH + extractKeyValuePairs('formula=1+2=3 argument1=1 argument2=2 result=3, char="=" char2== string="foo=bar"', '=') AS s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; + +-- https://github.com/ClickHouse/ClickHouse/issues/56357 +WITH + extractKeyValuePairs('{"a":"1", "b":"2"}', ':', '{, }') as s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; + +-- check str_to_map alias (it is case-insensitive) +WITH + sTr_tO_mAp('name:neymar, age:31 team:psg,nationality:brazil') AS s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; + +-- check mapFromString alias +WITH + mapFromString('name:neymar, age:31 team:psg,nationality:brazil') AS s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; + +-- Quoting character must be escaped, otherwise key will be discarded. Consider using `unexpected_quoting_character_strategy` +WITH + extractKeyValuePairs('key:"#123"junk", second_key:0') as s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; + +-- test unexpected_quoting_character_strategy +-- unexpected_quoting_character_strategy=accept, the quoting characters shall become part of the key +WITH + extractKeyValuePairs('name"abc":5', ':', ' ,;', '\"', 'accept') as s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; + +-- unexpected_quoting_character_strategy=invalid, empty return +WITH + extractKeyValuePairs('name"abc":5', ':', ' ,;', '\"', 'invalid') as s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; + +-- unexpected_quoting_character_strategy=promote, abc=5 +WITH + extractKeyValuePairs('name"abc":5', ':', ' ,;', '\"', 'promote') as s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; + +-- test unexpected_quoting_character_strategy +-- unexpected_quoting_character_strategy=accept, the quoting character shall become part of the key +WITH + extractKeyValuePairs('name"abc:5', ':', ' ,;', '\"', 'accept') as s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; + +-- unexpected_quoting_character_strategy=invalid, start reading key from abc +WITH + extractKeyValuePairs('name"abc:5', ':', ' ,;', '\"', 'invalid') as s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; + +-- unexpected_quoting_character_strategy=promote, start reading abc as quoted key but fails to find closing quoting character +WITH + extractKeyValuePairs('name"abc:5', ':', ' ,;', '\"', 'promote') as s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; + +-- test unexpected_quoting_character_strategy +-- unexpected_quoting_character_strategy=accept, the quoting characters shall become part of the value +WITH + extractKeyValuePairs('key:val"abc', ':', ' ,;', '\"', 'accept') as s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; + +-- unexpected_quoting_character_strategy=invalid, empty return +WITH + extractKeyValuePairs('key:val"abc', ':', ' ,;', '\"', 'invalid') as s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; + +-- unexpected_quoting_character_strategy=promote, empty +WITH + extractKeyValuePairs('key:val"abc', ':', ' ,;', '\"', 'promote') as s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; + +-- test unexpected_quoting_character_strategy +-- unexpected_quoting_character_strategy=accept, the quoting characters shall become part of the value +WITH + extractKeyValuePairs('key:val"abc"', ':', ' ,;', '\"', 'accept') as s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; + +-- unexpected_quoting_character_strategy=invalid, empty +WITH + extractKeyValuePairs('key:val"abc"', ':', ' ,;', '\"', 'invalid') as s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; + +-- unexpected_quoting_character_strategy=promote, start reading abc as quoted key but fails to find closing quoting character +WITH + extractKeyValuePairs('key:val"abc"', ':', ' ,;', '\"', 'promote') as s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; + +-- after parsing a quoted value, the next key should only start after a pair delimiter +WITH + extractKeyValuePairs('key:"quoted_value"junk,second_key:0') as s_map, + CAST( + arrayMap( + (x) -> (x, s_map[x]), arraySort(mapKeys(s_map)) + ), + 'Map(String,String)' + ) AS x +SELECT + x; diff --git a/parser/testdata/02499_quantile_nan_ubsan_msan/ast.json b/parser/testdata/02499_quantile_nan_ubsan_msan/ast.json new file mode 100644 index 000000000..41bce481a --- /dev/null +++ b/parser/testdata/02499_quantile_nan_ubsan_msan/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function quantiles (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function now (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Literal 'DateTime(\\'UTC\\')'" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Float64_0.5" + }, + { + "explain": " Literal UInt64_0" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.00121176, + "rows_read": 14, + "bytes_read": 529 + } +} diff --git a/parser/testdata/02499_quantile_nan_ubsan_msan/metadata.json b/parser/testdata/02499_quantile_nan_ubsan_msan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02499_quantile_nan_ubsan_msan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02499_quantile_nan_ubsan_msan/query.sql b/parser/testdata/02499_quantile_nan_ubsan_msan/query.sql new file mode 100644 index 000000000..d8a8a040a --- /dev/null +++ b/parser/testdata/02499_quantile_nan_ubsan_msan/query.sql @@ -0,0 +1,22 @@ +SELECT quantiles(0.5)(now()::DateTime('UTC')) WHERE 0; +SELECT quantiles(0.5)(now()::DateTime('UTC')) WHERE 0 WITH TOTALS; +SELECT arrayReduce('quantiles(0.5)', []::Array(DateTime('UTC'))); +SELECT quantiles(0.5, 1.1754943508222875e-38, 0.0001, -0., 0.0001, -0., 0.0001, 0., 0.5)(now()::DateTime('UTC')) WHERE 0 WITH TOTALS; + +SELECT DISTINCT arrayReduce('quantiles(0.5)', materialize([]::Array(DateTime('UTC')))) FROM numbers(1000) LIMIT 10; +SELECT DISTINCT arrayReduce('quantiles(0, 0.5, 0.9, 1)', materialize([]::Array(DateTime('UTC')))) FROM numbers(1000) LIMIT 10; +SELECT DISTINCT arrayReduce('quantiles(0.5)', [0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFE]) FROM numbers(1000) LIMIT 10; +SELECT DISTINCT arrayReduce('quantilesDeterministic(0.5)', materialize([]::Array(DateTime('UTC'))), []::Array(UInt64)) FROM numbers(1000) LIMIT 10; +SELECT DISTINCT arrayReduce('quantilesDeterministic(0, 0.5, 0.9, 1)', materialize([]::Array(DateTime('UTC'))), []::Array(UInt64)) FROM numbers(1000) LIMIT 10; +SELECT DISTINCT arrayReduce('quantiles(0.5)', [CAST(-1, 'UInt256'), CAST(-2, 'UInt256')]) FROM numbers(1000) LIMIT 10; +SELECT DISTINCT arrayReduce('quantiles(0.5)', []::Array(Float64)) FROM numbers(1000) LIMIT 10; + +SELECT quantile(0.5)(now()::DateTime('UTC')) WHERE 0; +SELECT quantile(0.5)(now()::DateTime('UTC')) WHERE 0 WITH TOTALS; +SELECT arrayReduce('quantile(0.5)', []::Array(DateTime('UTC'))); + +SELECT DISTINCT arrayReduce('quantile(0.5)', materialize([]::Array(DateTime('UTC')))) FROM numbers(1000) LIMIT 10; +SELECT DISTINCT arrayReduce('quantile(0.5)', [0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFE]) FROM numbers(1000) LIMIT 10; +SELECT DISTINCT arrayReduce('quantileDeterministic(0.5)', materialize([]::Array(DateTime('UTC'))), []::Array(UInt64)) FROM numbers(1000) LIMIT 10; +SELECT DISTINCT arrayReduce('quantile(0.5)', [CAST(-1, 'UInt256'), CAST(-2, 'UInt256')]) FROM numbers(1000) LIMIT 10; +SELECT DISTINCT arrayReduce('quantile(0.5)', []::Array(Float64)) FROM numbers(1000) LIMIT 10; diff --git a/parser/testdata/02499_read_json_objects_as_strings/ast.json b/parser/testdata/02499_read_json_objects_as_strings/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02499_read_json_objects_as_strings/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02499_read_json_objects_as_strings/metadata.json b/parser/testdata/02499_read_json_objects_as_strings/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02499_read_json_objects_as_strings/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02499_read_json_objects_as_strings/query.sql b/parser/testdata/02499_read_json_objects_as_strings/query.sql new file mode 100644 index 000000000..64e708513 --- /dev/null +++ b/parser/testdata/02499_read_json_objects_as_strings/query.sql @@ -0,0 +1,7 @@ +-- Tags: no-fasttest +set input_format_json_read_objects_as_strings=1; +set input_format_json_try_infer_named_tuples_from_objects=0; +desc format(JSONEachRow, '{"x" : "abc"}, {"x" : {"a" : 10, "b" : "abc"}}'); +select * from format(JSONEachRow, '{"x" : "abc"}, {"x" : {"a" : 10, "b" : "abc"}}'); +desc format(JSONEachRow, '{"x" : {"a" : "b"}}, {"x" : {"a" : 1, "b" : [1,2,3]}}'); +select * from format(JSONEachRow, '{"x" : {"a" : "b"}}, {"x" : {"a" : 1, "b" : [1,2,3]}}'); diff --git a/parser/testdata/02500_analyzer_storage_view_crash_fix/ast.json b/parser/testdata/02500_analyzer_storage_view_crash_fix/ast.json new file mode 100644 index 000000000..2c306beba --- /dev/null +++ b/parser/testdata/02500_analyzer_storage_view_crash_fix/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001037484, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02500_analyzer_storage_view_crash_fix/metadata.json b/parser/testdata/02500_analyzer_storage_view_crash_fix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02500_analyzer_storage_view_crash_fix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02500_analyzer_storage_view_crash_fix/query.sql b/parser/testdata/02500_analyzer_storage_view_crash_fix/query.sql new file mode 100644 index 000000000..f0484a685 --- /dev/null +++ b/parser/testdata/02500_analyzer_storage_view_crash_fix/query.sql @@ -0,0 +1,19 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + f1 Int32, + f2 Int32, + pk Int32 +) ENGINE = MergeTree PARTITION BY pk ORDER BY f1; + +INSERT INTO test_table SELECT number, number, number FROM numbers(10); + +DROP VIEW IF EXISTS test_view; +CREATE VIEW test_view AS SELECT f1, f2 FROM test_table WHERE pk = 2; + +SELECT * FROM test_view; + +DROP VIEW test_view; +DROP TABLE test_table; diff --git a/parser/testdata/02500_prevent_drop_nested_if_empty_part/ast.json b/parser/testdata/02500_prevent_drop_nested_if_empty_part/ast.json new file mode 100644 index 000000000..4c9110a87 --- /dev/null +++ b/parser/testdata/02500_prevent_drop_nested_if_empty_part/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery 02500_nested (children 1)" + }, + { + "explain": " Identifier 02500_nested" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001513921, + "rows_read": 2, + "bytes_read": 76 + } +} diff --git a/parser/testdata/02500_prevent_drop_nested_if_empty_part/metadata.json b/parser/testdata/02500_prevent_drop_nested_if_empty_part/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02500_prevent_drop_nested_if_empty_part/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02500_prevent_drop_nested_if_empty_part/query.sql b/parser/testdata/02500_prevent_drop_nested_if_empty_part/query.sql new file mode 100644 index 000000000..d8564546b --- /dev/null +++ b/parser/testdata/02500_prevent_drop_nested_if_empty_part/query.sql @@ -0,0 +1,27 @@ +DROP TABLE IF EXISTS 02500_nested; + +SET flatten_nested = 1; + +CREATE TABLE 02500_nested(nes Nested(a Int32, b Int32)) Engine=MergeTree ORDER BY tuple(); +INSERT INTO 02500_nested(nes.a, nes.b) VALUES ([1], [2]); +ALTER TABLE 02500_nested ADD COLUMN z Int32; +ALTER TABLE 02500_nested DROP COLUMN nes; -- { serverError BAD_ARGUMENTS } +DROP TABLE 02500_nested; + +CREATE TABLE 02500_nested(nes Nested(a Int32, b Int32), z Int32) Engine=MergeTree ORDER BY tuple(); +INSERT INTO 02500_nested(nes.a, nes.b, z) VALUES ([1], [2], 2); +ALTER TABLE 02500_nested DROP COLUMN nes; +DROP TABLE 02500_nested; + +SET flatten_nested = 0; + +CREATE TABLE 02500_nested(nes Nested(a Int32, b Int32)) Engine=MergeTree ORDER BY tuple(); +INSERT INTO 02500_nested(nes) VALUES ([(1, 2)]); +ALTER TABLE 02500_nested ADD COLUMN z Int32; +ALTER TABLE 02500_nested DROP COLUMN nes; -- { serverError BAD_ARGUMENTS } +DROP TABLE 02500_nested; + +CREATE TABLE 02500_nested(nes Array(Tuple(a Int32, b Int32)), z Int32) Engine=MergeTree ORDER BY tuple(); +INSERT INTO 02500_nested(nes, z) VALUES ([(1, 2)], 2); +ALTER TABLE 02500_nested DROP COLUMN nes; +DROP TABLE 02500_nested; diff --git a/parser/testdata/02501_analyzer_expired_context_crash_fix/ast.json b/parser/testdata/02501_analyzer_expired_context_crash_fix/ast.json new file mode 100644 index 000000000..92227a8c1 --- /dev/null +++ b/parser/testdata/02501_analyzer_expired_context_crash_fix/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00124546, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02501_analyzer_expired_context_crash_fix/metadata.json b/parser/testdata/02501_analyzer_expired_context_crash_fix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02501_analyzer_expired_context_crash_fix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02501_analyzer_expired_context_crash_fix/query.sql b/parser/testdata/02501_analyzer_expired_context_crash_fix/query.sql new file mode 100644 index 000000000..e2c940c82 --- /dev/null +++ b/parser/testdata/02501_analyzer_expired_context_crash_fix/query.sql @@ -0,0 +1,15 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + b Int64, + a Int64, + grp_aggreg AggregateFunction(groupArrayArray, Array(UInt64)) +) ENGINE = MergeTree() ORDER BY a; + +INSERT INTO test_table SELECT 0, 0, groupArrayArrayState([toUInt64(1)]); + +SELECT b, a, JSONLength(grp_aggreg, 100, NULL) FROM test_table SETTINGS optimize_aggregation_in_order = 1; + +DROP TABLE test_table; diff --git a/parser/testdata/02501_limits_on_result_for_view/ast.json b/parser/testdata/02501_limits_on_result_for_view/ast.json new file mode 100644 index 000000000..5b115cfc0 --- /dev/null +++ b/parser/testdata/02501_limits_on_result_for_view/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery 02501_test (children 1)" + }, + { + "explain": " Identifier 02501_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001404449, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/02501_limits_on_result_for_view/metadata.json b/parser/testdata/02501_limits_on_result_for_view/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02501_limits_on_result_for_view/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02501_limits_on_result_for_view/query.sql b/parser/testdata/02501_limits_on_result_for_view/query.sql new file mode 100644 index 000000000..aa9bcb0e5 --- /dev/null +++ b/parser/testdata/02501_limits_on_result_for_view/query.sql @@ -0,0 +1,25 @@ +DROP TABLE IF EXISTS 02501_test; +DROP TABLE IF EXISTS 02501_dist; +DROP VIEW IF EXISTS 02501_view; + + +-- create local table +CREATE TABLE 02501_test(`a` UInt64) ENGINE = Memory; + +-- create dist table +CREATE TABLE 02501_dist(`a` UInt64) ENGINE = Distributed(test_cluster_two_shards, currentDatabase(), 02501_test); + +-- create view +CREATE VIEW 02501_view(`a` UInt64) AS SELECT a FROM 02501_dist; + +-- insert data +insert into 02501_test values(5),(6),(7),(8); + +-- test +SELECT * from 02501_view settings max_result_rows = 1; -- { serverError TOO_MANY_ROWS_OR_BYTES } +SELECT sum(a) from 02501_view settings max_result_rows = 1; + + +DROP TABLE IF EXISTS 02501_test; +DROP TABLE IF EXISTS 02501_dist; +DROP VIEW IF EXISTS 02501_view; \ No newline at end of file diff --git a/parser/testdata/02502_analyzer_insert_select_crash_fix/ast.json b/parser/testdata/02502_analyzer_insert_select_crash_fix/ast.json new file mode 100644 index 000000000..9e2ff56b5 --- /dev/null +++ b/parser/testdata/02502_analyzer_insert_select_crash_fix/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001409975, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02502_analyzer_insert_select_crash_fix/metadata.json b/parser/testdata/02502_analyzer_insert_select_crash_fix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02502_analyzer_insert_select_crash_fix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02502_analyzer_insert_select_crash_fix/query.sql b/parser/testdata/02502_analyzer_insert_select_crash_fix/query.sql new file mode 100644 index 000000000..a438276bd --- /dev/null +++ b/parser/testdata/02502_analyzer_insert_select_crash_fix/query.sql @@ -0,0 +1,26 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value String +) ENGINE=MergeTree ORDER BY id; + +INSERT INTO test_table SELECT 0, 'Value_0'; + +DROP TABLE IF EXISTS test_table_data; +CREATE TABLE test_table_data +( + id UInt64, + value String +) ENGINE=MergeTree ORDER BY id; + +INSERT INTO test_table_data VALUES (1, 'Value_1'), (2, 'Value_2'); + +INSERT INTO test_table SELECT id, value FROM test_table_data; + +SELECT id, value FROM test_table ORDER BY id; + +DROP TABLE test_table_data; +DROP TABLE test_table; diff --git a/parser/testdata/02502_bad_values_schema_inference/ast.json b/parser/testdata/02502_bad_values_schema_inference/ast.json new file mode 100644 index 000000000..6cbc3b681 --- /dev/null +++ b/parser/testdata/02502_bad_values_schema_inference/ast.json @@ -0,0 +1,40 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DescribeQuery (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function format (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier Values" + }, + { + "explain": " Literal '(\\'abc)'" + } + ], + + "rows": 6, + + "statistics": + { + "elapsed": 0.00104827, + "rows_read": 6, + "bytes_read": 205 + } +} diff --git a/parser/testdata/02502_bad_values_schema_inference/metadata.json b/parser/testdata/02502_bad_values_schema_inference/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02502_bad_values_schema_inference/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02502_bad_values_schema_inference/query.sql b/parser/testdata/02502_bad_values_schema_inference/query.sql new file mode 100644 index 000000000..67ac09832 --- /dev/null +++ b/parser/testdata/02502_bad_values_schema_inference/query.sql @@ -0,0 +1,2 @@ +desc format(Values, '(\'abc)'); -- { serverError CANNOT_EXTRACT_TABLE_STRUCTURE } + diff --git a/parser/testdata/02502_fuzz_bad_cast_to_ast_literal/ast.json b/parser/testdata/02502_fuzz_bad_cast_to_ast_literal/ast.json new file mode 100644 index 000000000..cdd0da329 --- /dev/null +++ b/parser/testdata/02502_fuzz_bad_cast_to_ast_literal/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001034847, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02502_fuzz_bad_cast_to_ast_literal/metadata.json b/parser/testdata/02502_fuzz_bad_cast_to_ast_literal/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02502_fuzz_bad_cast_to_ast_literal/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02502_fuzz_bad_cast_to_ast_literal/query.sql b/parser/testdata/02502_fuzz_bad_cast_to_ast_literal/query.sql new file mode 100644 index 000000000..3db59d9a3 --- /dev/null +++ b/parser/testdata/02502_fuzz_bad_cast_to_ast_literal/query.sql @@ -0,0 +1,7 @@ +SET allow_deprecated_syntax_for_merge_tree=1; +DROP TABLE IF EXISTS test54378; +CREATE TABLE test54378 (`part_date` Date, `pk_date` Date, `date` Date) ENGINE = MergeTree(part_date, pk_date, 8192); +INSERT INTO test54378 values ('2018-04-19', '2018-04-19', '2018-04-19'); +SELECT 232 FROM test54378 PREWHERE (part_date = (SELECT toDate('2018-04-19'))) IN (SELECT toDate('2018-04-19')) GROUP BY toDate(toDate(-2147483649, NULL), NULL), -inf; +DROP TABLE test54378; + diff --git a/parser/testdata/02503_bad_compatibility_setting/ast.json b/parser/testdata/02503_bad_compatibility_setting/ast.json new file mode 100644 index 000000000..169a37f21 --- /dev/null +++ b/parser/testdata/02503_bad_compatibility_setting/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001166709, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02503_bad_compatibility_setting/metadata.json b/parser/testdata/02503_bad_compatibility_setting/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02503_bad_compatibility_setting/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02503_bad_compatibility_setting/query.sql b/parser/testdata/02503_bad_compatibility_setting/query.sql new file mode 100644 index 000000000..178c6a875 --- /dev/null +++ b/parser/testdata/02503_bad_compatibility_setting/query.sql @@ -0,0 +1,3 @@ +set compatibility='a.a'; -- { serverError BAD_ARGUMENTS } +select value, changed from system.settings where name = 'compatibility' + diff --git a/parser/testdata/02503_in_lc_const_args_bug/ast.json b/parser/testdata/02503_in_lc_const_args_bug/ast.json new file mode 100644 index 000000000..5def2bc6b --- /dev/null +++ b/parser/testdata/02503_in_lc_const_args_bug/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function substr (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toLowCardinality (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'abc'" + }, + { + "explain": " Function in (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001432078, + "rows_read": 15, + "bytes_read": 568 + } +} diff --git a/parser/testdata/02503_in_lc_const_args_bug/metadata.json b/parser/testdata/02503_in_lc_const_args_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02503_in_lc_const_args_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02503_in_lc_const_args_bug/query.sql b/parser/testdata/02503_in_lc_const_args_bug/query.sql new file mode 100644 index 000000000..6756e3815 --- /dev/null +++ b/parser/testdata/02503_in_lc_const_args_bug/query.sql @@ -0,0 +1,2 @@ +SELECT substr(toLowCardinality('abc'), 1 in 1) AS x GROUP BY x; + diff --git a/parser/testdata/02503_join_switch_alias_fuzz/ast.json b/parser/testdata/02503_join_switch_alias_fuzz/ast.json new file mode 100644 index 000000000..b530803ae --- /dev/null +++ b/parser/testdata/02503_join_switch_alias_fuzz/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (alias a) (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1 (alias id)" + }, + { + "explain": " Literal '' (alias test)" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001279862, + "rows_read": 15, + "bytes_read": 614 + } +} diff --git a/parser/testdata/02503_join_switch_alias_fuzz/metadata.json b/parser/testdata/02503_join_switch_alias_fuzz/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02503_join_switch_alias_fuzz/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02503_join_switch_alias_fuzz/query.sql b/parser/testdata/02503_join_switch_alias_fuzz/query.sql new file mode 100644 index 000000000..113a8493d --- /dev/null +++ b/parser/testdata/02503_join_switch_alias_fuzz/query.sql @@ -0,0 +1,4 @@ +SELECT * FROM (SELECT 1 AS id, '' AS test) AS a +LEFT JOIN (SELECT test, 1 AS id, NULL AS test) AS b ON b.id = a.id +SETTINGS join_algorithm = 'auto', max_rows_in_join = 1, enable_analyzer = 1 +; diff --git a/parser/testdata/02503_mysql_compat_utc_timestamp/ast.json b/parser/testdata/02503_mysql_compat_utc_timestamp/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02503_mysql_compat_utc_timestamp/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02503_mysql_compat_utc_timestamp/metadata.json b/parser/testdata/02503_mysql_compat_utc_timestamp/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02503_mysql_compat_utc_timestamp/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02503_mysql_compat_utc_timestamp/query.sql b/parser/testdata/02503_mysql_compat_utc_timestamp/query.sql new file mode 100644 index 000000000..d6716f272 --- /dev/null +++ b/parser/testdata/02503_mysql_compat_utc_timestamp/query.sql @@ -0,0 +1,2 @@ +-- PowerBI is doing this query. It should work at least somehow, not necessarily in the same way as in MySQL. +SELECT TIMEDIFF(NOW(), UTC_TIMESTAMP()) DIV 600; diff --git a/parser/testdata/02504_bar_fractions/ast.json b/parser/testdata/02504_bar_fractions/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02504_bar_fractions/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02504_bar_fractions/metadata.json b/parser/testdata/02504_bar_fractions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02504_bar_fractions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02504_bar_fractions/query.sql b/parser/testdata/02504_bar_fractions/query.sql new file mode 100644 index 000000000..d182bced5 --- /dev/null +++ b/parser/testdata/02504_bar_fractions/query.sql @@ -0,0 +1,7 @@ +SELECT + number / 8 AS width, + bar(width, 0, 3, 3) AS b, + bar(width - 0.001, 0, 3, 3) AS `b_minus`, + hex(b), + hex(b_minus) +FROM numbers(20); diff --git a/parser/testdata/02504_disallow_arrayjoin_in_mutations/ast.json b/parser/testdata/02504_disallow_arrayjoin_in_mutations/ast.json new file mode 100644 index 000000000..86843c768 --- /dev/null +++ b/parser/testdata/02504_disallow_arrayjoin_in_mutations/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_02504 (children 1)" + }, + { + "explain": " Identifier test_02504" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000970537, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/02504_disallow_arrayjoin_in_mutations/metadata.json b/parser/testdata/02504_disallow_arrayjoin_in_mutations/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02504_disallow_arrayjoin_in_mutations/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02504_disallow_arrayjoin_in_mutations/query.sql b/parser/testdata/02504_disallow_arrayjoin_in_mutations/query.sql new file mode 100644 index 000000000..d261a71d9 --- /dev/null +++ b/parser/testdata/02504_disallow_arrayjoin_in_mutations/query.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS test_02504; + +CREATE TABLE test_02504 (`a` UInt32,`b` UInt32) ENGINE = MergeTree ORDER BY a; +INSERT INTO test_02504 values (1, 1) (2, 2), (3, 3); +SELECT * FROM test_02504; + +ALTER TABLE test_02504 UPDATE b = 33 WHERE arrayJoin([1, 2]) = a; -- { serverError UNEXPECTED_EXPRESSION} + +DROP TABLE test_02504; \ No newline at end of file diff --git a/parser/testdata/02504_explain_ast_insert/ast.json b/parser/testdata/02504_explain_ast_insert/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02504_explain_ast_insert/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02504_explain_ast_insert/metadata.json b/parser/testdata/02504_explain_ast_insert/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02504_explain_ast_insert/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02504_explain_ast_insert/query.sql b/parser/testdata/02504_explain_ast_insert/query.sql new file mode 100644 index 000000000..3b8a64e6e --- /dev/null +++ b/parser/testdata/02504_explain_ast_insert/query.sql @@ -0,0 +1,2 @@ +explain ast insert into test values (balabala); +explain ast insert into test format TabSeparated balabala; \ No newline at end of file diff --git a/parser/testdata/02504_parse_datetime_best_effort_calebeaires/ast.json b/parser/testdata/02504_parse_datetime_best_effort_calebeaires/ast.json new file mode 100644 index 000000000..f74cb59b6 --- /dev/null +++ b/parser/testdata/02504_parse_datetime_best_effort_calebeaires/ast.json @@ -0,0 +1,76 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery my_table (children 2)" + }, + { + "explain": " Identifier my_table" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 5)" + }, + { + "explain": " ColumnDeclaration col_date (children 1)" + }, + { + "explain": " DataType Date" + }, + { + "explain": " ColumnDeclaration col_date32 (children 1)" + }, + { + "explain": " DataType Date32" + }, + { + "explain": " ColumnDeclaration col_datetime (children 1)" + }, + { + "explain": " DataType DateTime (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'UTC'" + }, + { + "explain": " ColumnDeclaration col_datetime32 (children 1)" + }, + { + "explain": " DataType DateTime32 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'UTC'" + }, + { + "explain": " ColumnDeclaration col_datetime64 (children 1)" + }, + { + "explain": " DataType DateTime64" + } + ], + + "rows": 18, + + "statistics": + { + "elapsed": 0.001637943, + "rows_read": 18, + "bytes_read": 717 + } +} diff --git a/parser/testdata/02504_parse_datetime_best_effort_calebeaires/metadata.json b/parser/testdata/02504_parse_datetime_best_effort_calebeaires/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02504_parse_datetime_best_effort_calebeaires/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02504_parse_datetime_best_effort_calebeaires/query.sql b/parser/testdata/02504_parse_datetime_best_effort_calebeaires/query.sql new file mode 100644 index 000000000..e551ec515 --- /dev/null +++ b/parser/testdata/02504_parse_datetime_best_effort_calebeaires/query.sql @@ -0,0 +1,5 @@ +CREATE TEMPORARY TABLE my_table (col_date Date, col_date32 Date32, col_datetime DateTime('UTC'), col_datetime32 DateTime32('UTC'), col_datetime64 DateTime64); +insert into `my_table` (`col_date`, `col_date32`, `col_datetime`, `col_datetime32`, `col_datetime64`) values (parseDateTime64BestEffort('1969-01-01'), '1969-01-01', parseDateTime64BestEffort('1969-01-01 10:42:00'), parseDateTime64BestEffort('1969-01-01 10:42:00'), parseDateTime64BestEffort('1969-01-01 10:42:00')); + +-- The values for Date32 and DateTime64 will be year 1969, while the values of Date, DateTime will contain a value affected by implementation-defined overflow and can be arbitrary. +SELECT toYear(col_date), col_date32, toYear(col_datetime), toYear(col_datetime32), col_datetime64 FROM my_table; diff --git a/parser/testdata/02504_regexp_dictionary_table_source/ast.json b/parser/testdata/02504_regexp_dictionary_table_source/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02504_regexp_dictionary_table_source/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02504_regexp_dictionary_table_source/metadata.json b/parser/testdata/02504_regexp_dictionary_table_source/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02504_regexp_dictionary_table_source/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02504_regexp_dictionary_table_source/query.sql b/parser/testdata/02504_regexp_dictionary_table_source/query.sql new file mode 100644 index 000000000..604c5b179 --- /dev/null +++ b/parser/testdata/02504_regexp_dictionary_table_source/query.sql @@ -0,0 +1,88 @@ +-- Tags: use-vectorscan + +DROP DICTIONARY IF EXISTS regexp_dict1; +DROP TABLE IF EXISTS regexp_dictionary_source_table; + +CREATE TABLE regexp_dictionary_source_table +( + id UInt64, + parent_id UInt64, + regexp String, + keys Array(String), + values Array(String), +) ENGINE=TinyLog; + +-- test back reference. + +INSERT INTO regexp_dictionary_source_table VALUES (1, 0, 'Linux/(\d+[\.\d]*).+tlinux', ['name', 'version'], ['TencentOS', '\1']); +INSERT INTO regexp_dictionary_source_table VALUES (2, 0, '(\d+)/tclwebkit(\d+[\.\d]*)', ['name', 'version', 'comment'], ['Android', '$1', 'test $1 and $2']); +INSERT INTO regexp_dictionary_source_table VALUES (3, 2, '33/tclwebkit', ['version'], ['13']); +INSERT INTO regexp_dictionary_source_table VALUES (4, 2, '3[12]/tclwebkit', ['version'], ['12']); +INSERT INTO regexp_dictionary_source_table VALUES (5, 2, '3[12]/tclwebkit', ['version'], ['11']); +INSERT INTO regexp_dictionary_source_table VALUES (6, 2, '3[12]/tclwebkit', ['version'], ['10']); + +create dictionary regexp_dict1 +( + regexp String, + name String, + version Nullable(UInt64), + comment String default 'nothing' +) +PRIMARY KEY(regexp) +SOURCE(CLICKHOUSE(TABLE 'regexp_dictionary_source_table')) +LIFETIME(0) +LAYOUT(regexp_tree); + +select * from dictionary(regexp_dict1); + +select dictGet('regexp_dict1', ('name', 'version', 'comment'), 'Linux/101.tlinux'); +select dictGet('regexp_dict1', ('name', 'version', 'comment'), '33/tclwebkit11.10x'); +select dictGet('regexp_dict1', ('name', 'version', 'comment'), '30/tclwebkit'); +select dictGetOrDefault('regexp_dict1', ('name', 'version', 'comment'), '30/tclwebkit', ('', 0, 'default')); + +--test column input + +DROP table IF EXISTS needle_table; +CREATE TABLE needle_table +( + key String +) +ENGINE=TinyLog; + +INSERT INTO needle_table select concat(toString(number + 30), '/tclwebkit', toString(number)) from system.numbers limit 15; + +select * from needle_table; +select dictGet(regexp_dict1, ('name', 'version'), key) from needle_table; + +-- test invalid +INSERT INTO regexp_dictionary_source_table VALUES (6, 2, '3[12]/tclwebkit', ['version'], ['10']); +SYSTEM RELOAD dictionary regexp_dict1; -- { serverError INCORRECT_DICTIONARY_DEFINITION } + +truncate table regexp_dictionary_source_table; + +INSERT INTO regexp_dictionary_source_table VALUES (6, 2, '3[12]/tclwebkit', ['version'], ['10']); +SYSTEM RELOAD dictionary regexp_dict1; -- { serverError INCORRECT_DICTIONARY_DEFINITION } + +truncate table regexp_dictionary_source_table; + +INSERT INTO regexp_dictionary_source_table VALUES (1, 2, 'Linux/(\d+[\.\d]*).+tlinux', ['name', 'version'], ['TencentOS', '\1']); +INSERT INTO regexp_dictionary_source_table VALUES (2, 3, '(\d+)/tclwebkit(\d+[\.\d]*)', ['name', 'version', 'comment'], ['Android', '$1', 'test $1 and $2']); +INSERT INTO regexp_dictionary_source_table VALUES (3, 1, '(\d+)/tclwebkit(\d+[\.\d]*)', ['name', 'version', 'comment'], ['Android', '$1', 'test $1 and $2']); +SYSTEM RELOAD dictionary regexp_dict1; -- { serverError INCORRECT_DICTIONARY_DEFINITION } + +-- test priority +truncate table regexp_dictionary_source_table; +INSERT INTO regexp_dictionary_source_table VALUES (1, 0, '(\d+)/tclwebkit', ['name', 'version'], ['Android', '$1']); +INSERT INTO regexp_dictionary_source_table VALUES (3, 1, '33/tclwebkit', ['name'], ['Android1']); -- child has more priority than parents. +INSERT INTO regexp_dictionary_source_table VALUES (2, 0, '33/tclwebkit', ['version', 'comment'], ['13', 'matched 3']); -- larger id has lower priority than small id. +SYSTEM RELOAD dictionary regexp_dict1; +select dictGet(regexp_dict1, ('name', 'version', 'comment'), '33/tclwebkit'); + +truncate table regexp_dictionary_source_table; +SYSTEM RELOAD dictionary regexp_dict1; -- { serverError INCORRECT_DICTIONARY_DEFINITION } + +select * from dictionary(regexp_dict1); + +DROP DICTIONARY IF EXISTS regexp_dict1; +DROP TABLE IF EXISTS regexp_dictionary_source_table; +DROP TABLE IF EXISTS needle_table; diff --git a/parser/testdata/02505_forbid_paths_in_datetime_timezone/ast.json b/parser/testdata/02505_forbid_paths_in_datetime_timezone/ast.json new file mode 100644 index 000000000..dfe5f1020 --- /dev/null +++ b/parser/testdata/02505_forbid_paths_in_datetime_timezone/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDateTime (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal '\/abc'" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001249658, + "rows_read": 8, + "bytes_read": 291 + } +} diff --git a/parser/testdata/02505_forbid_paths_in_datetime_timezone/metadata.json b/parser/testdata/02505_forbid_paths_in_datetime_timezone/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02505_forbid_paths_in_datetime_timezone/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02505_forbid_paths_in_datetime_timezone/query.sql b/parser/testdata/02505_forbid_paths_in_datetime_timezone/query.sql new file mode 100644 index 000000000..63edad6c9 --- /dev/null +++ b/parser/testdata/02505_forbid_paths_in_datetime_timezone/query.sql @@ -0,0 +1,6 @@ +select toDateTime(0, '/abc'); -- { serverError BAD_ARGUMENTS } +select toDateTime(0, './abc'); -- { serverError BAD_ARGUMENTS } +select toDateTime(0, '../abc'); -- { serverError BAD_ARGUMENTS } +select toDateTime(0, '~/abc'); -- { serverError BAD_ARGUMENTS } +select toDateTime(0, 'abc/../../cba'); -- { serverError BAD_ARGUMENTS } + diff --git a/parser/testdata/02506_date_time64_floating_point_negative_value/ast.json b/parser/testdata/02506_date_time64_floating_point_negative_value/ast.json new file mode 100644 index 000000000..628fe9c7c --- /dev/null +++ b/parser/testdata/02506_date_time64_floating_point_negative_value/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toUnixTimestamp64Milli (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDateTime64 (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal '1969-12-31 23:59:59.999'" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Literal 'Europe\/Amsterdam'" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001603815, + "rows_read": 11, + "bytes_read": 458 + } +} diff --git a/parser/testdata/02506_date_time64_floating_point_negative_value/metadata.json b/parser/testdata/02506_date_time64_floating_point_negative_value/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02506_date_time64_floating_point_negative_value/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02506_date_time64_floating_point_negative_value/query.sql b/parser/testdata/02506_date_time64_floating_point_negative_value/query.sql new file mode 100644 index 000000000..dd663c780 --- /dev/null +++ b/parser/testdata/02506_date_time64_floating_point_negative_value/query.sql @@ -0,0 +1,4 @@ +select toUnixTimestamp64Milli(toDateTime64('1969-12-31 23:59:59.999', 3, 'Europe/Amsterdam')); +select toUnixTimestamp64Milli(toDateTime64('1969-12-31 23:59:59.999', 3, 'UTC')); +select fromUnixTimestamp64Milli(toInt64(-1), 'Europe/Amsterdam'); +select fromUnixTimestamp64Milli(toInt64(-1), 'UTC'); diff --git a/parser/testdata/02507_to_unix_timestamp_overflow/ast.json b/parser/testdata/02507_to_unix_timestamp_overflow/ast.json new file mode 100644 index 000000000..3640f2bec --- /dev/null +++ b/parser/testdata/02507_to_unix_timestamp_overflow/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toUnixTimestamp (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDateTime64 (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal '1928-12-31 12:12:12.123'" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Literal 'UTC'" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001057197, + "rows_read": 11, + "bytes_read": 438 + } +} diff --git a/parser/testdata/02507_to_unix_timestamp_overflow/metadata.json b/parser/testdata/02507_to_unix_timestamp_overflow/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02507_to_unix_timestamp_overflow/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02507_to_unix_timestamp_overflow/query.sql b/parser/testdata/02507_to_unix_timestamp_overflow/query.sql new file mode 100644 index 000000000..42479f6db --- /dev/null +++ b/parser/testdata/02507_to_unix_timestamp_overflow/query.sql @@ -0,0 +1,2 @@ +SELECT toUnixTimestamp(toDateTime64('1928-12-31 12:12:12.123', 3, 'UTC')); -- { serverError DECIMAL_OVERFLOW } +SELECT toInt64(toDateTime64('1928-12-31 12:12:12.123', 3, 'UTC')); diff --git a/parser/testdata/02508_bad_graphite/ast.json b/parser/testdata/02508_bad_graphite/ast.json new file mode 100644 index 000000000..943b71072 --- /dev/null +++ b/parser/testdata/02508_bad_graphite/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_graphite (children 1)" + }, + { + "explain": " Identifier test_graphite" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001246433, + "rows_read": 2, + "bytes_read": 78 + } +} diff --git a/parser/testdata/02508_bad_graphite/metadata.json b/parser/testdata/02508_bad_graphite/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02508_bad_graphite/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02508_bad_graphite/query.sql b/parser/testdata/02508_bad_graphite/query.sql new file mode 100644 index 000000000..a0ca9dcf6 --- /dev/null +++ b/parser/testdata/02508_bad_graphite/query.sql @@ -0,0 +1,6 @@ +DROP TABLE IF EXISTS test_graphite; +create table test_graphite (key UInt32, Path String, Time DateTime('UTC'), Value UInt8, Version UInt32, col UInt64) + engine = GraphiteMergeTree('graphite_rollup') order by key; + +INSERT INTO test_graphite (key) VALUES (0); -- { serverError BAD_ARGUMENTS } +DROP TABLE test_graphite; diff --git a/parser/testdata/02508_index_analysis_to_date_timezone/ast.json b/parser/testdata/02508_index_analysis_to_date_timezone/ast.json new file mode 100644 index 000000000..3740b34f1 --- /dev/null +++ b/parser/testdata/02508_index_analysis_to_date_timezone/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery table (children 1)" + }, + { + "explain": " Identifier table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001533609, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/02508_index_analysis_to_date_timezone/metadata.json b/parser/testdata/02508_index_analysis_to_date_timezone/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02508_index_analysis_to_date_timezone/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02508_index_analysis_to_date_timezone/query.sql b/parser/testdata/02508_index_analysis_to_date_timezone/query.sql new file mode 100644 index 000000000..a7e4f6e7a --- /dev/null +++ b/parser/testdata/02508_index_analysis_to_date_timezone/query.sql @@ -0,0 +1,10 @@ +DROP TABLE IF EXISTS table; +CREATE TABLE table (uid UUID, date DateTime('Asia/Kamchatka')) ENGINE = MergeTree ORDER BY date; + +INSERT INTO `table` VALUES ('4c36abda-8bd8-11eb-8204-005056aa8bf6', '2021-03-24 01:04:27'), ('4c408902-8bd8-11eb-8204-005056aa8bf6', '2021-03-24 01:04:27'), ('4c5bf20a-8bd8-11eb-8204-005056aa8bf6', '2021-03-24 01:04:27'), ('4c61623a-8bd8-11eb-8204-005056aa8bf6', '2021-03-24 01:04:27'), ('4c6efab2-8bd8-11eb-a952-005056aa8bf6', '2021-03-24 01:04:27'); + +SELECT uid, date, toDate(date) = toDate('2021-03-24') AS res FROM table WHERE res = 1 ORDER BY uid, date; +SELECT '---'; +SELECT uid, date, toDate(date) = toDate('2021-03-24') AS res FROM table WHERE toDate(date) = toDate('2021-03-24') ORDER BY uid, date; + +DROP TABLE table; diff --git a/parser/testdata/02509_h3_arguments/ast.json b/parser/testdata/02509_h3_arguments/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02509_h3_arguments/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02509_h3_arguments/metadata.json b/parser/testdata/02509_h3_arguments/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02509_h3_arguments/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02509_h3_arguments/query.sql b/parser/testdata/02509_h3_arguments/query.sql new file mode 100644 index 000000000..b5b8b9497 --- /dev/null +++ b/parser/testdata/02509_h3_arguments/query.sql @@ -0,0 +1,13 @@ +-- Tags: no-fasttest + +select h3ToParent(641573946153969375, 1); +select h3ToParent(641573946153969375, arrayJoin([1,2])); + +DROP TABLE IF EXISTS data_table; + +CREATE TABLE data_table (id UInt64, longitude Float64, latitude Float64) ENGINE=MergeTree ORDER BY id; +INSERT INTO data_table SELECT number, number, number FROM numbers(10); +SELECT geoToH3(longitude, latitude, toUInt8(8)) AS h3Index FROM data_table ORDER BY 1; +SELECT geoToH3(longitude, latitude, toUInt8(longitude - longitude + 8)) AS h3Index FROM data_table ORDER BY 1; + +DROP TABLE data_table; diff --git a/parser/testdata/02510_group_by_prewhere_null/ast.json b/parser/testdata/02510_group_by_prewhere_null/ast.json new file mode 100644 index 000000000..b2ec4ffa0 --- /dev/null +++ b/parser/testdata/02510_group_by_prewhere_null/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery table1 (children 1)" + }, + { + "explain": " Identifier table1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00108506, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/02510_group_by_prewhere_null/metadata.json b/parser/testdata/02510_group_by_prewhere_null/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02510_group_by_prewhere_null/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02510_group_by_prewhere_null/query.sql b/parser/testdata/02510_group_by_prewhere_null/query.sql new file mode 100644 index 000000000..90a638d0b --- /dev/null +++ b/parser/testdata/02510_group_by_prewhere_null/query.sql @@ -0,0 +1,25 @@ +DROP TABLE IF EXISTS table1; + +create table table1 ( + col1 Int32, + col2 Int32 +) +ENGINE = MergeTree +partition by tuple() +order by col1; + +INSERT INTO table1 VALUES (1, 2), (1, 4); + +with NULL as pid +select a.col1, sum(a.col2) as summ +from table1 a +prewhere (pid is null or a.col2 = pid) +group by a.col1; + +with 123 as pid +select a.col1, sum(a.col2) as summ +from table1 a +prewhere (pid is null or a.col2 = pid) +group by a.col1; + +DROP TABLE table1; diff --git a/parser/testdata/02511_complex_literals_as_aggregate_function_parameters/ast.json b/parser/testdata/02511_complex_literals_as_aggregate_function_parameters/ast.json new file mode 100644 index 000000000..4a15f1380 --- /dev/null +++ b/parser/testdata/02511_complex_literals_as_aggregate_function_parameters/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function sumMapFilteredState (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_2, UInt64_3]" + }, + { + "explain": " Literal Array_[UInt64_10, UInt64_10, UInt64_10]" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_2]" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001552165, + "rows_read": 12, + "bytes_read": 545 + } +} diff --git a/parser/testdata/02511_complex_literals_as_aggregate_function_parameters/metadata.json b/parser/testdata/02511_complex_literals_as_aggregate_function_parameters/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02511_complex_literals_as_aggregate_function_parameters/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02511_complex_literals_as_aggregate_function_parameters/query.sql b/parser/testdata/02511_complex_literals_as_aggregate_function_parameters/query.sql new file mode 100644 index 000000000..92b5f0143 --- /dev/null +++ b/parser/testdata/02511_complex_literals_as_aggregate_function_parameters/query.sql @@ -0,0 +1,4 @@ +SELECT toTypeName(sumMapFilteredState([1, 2])([1, 2, 3], [10, 10, 10])); +SELECT hex(sumMapFilteredState([1, 2])([1, 2, 3], [10, 10, 10])); +SELECT hex(unhex('02010A00000000000000020A00000000000000')::AggregateFunction(1, sumMapFiltered([1, 2]), Array(UInt8), Array(UInt8))); +SELECT sumMapFilteredMerge([1, 2])(*) FROM remote('127.0.0.{1,2}', view(SELECT sumMapFilteredState([1, 2])([1, 2, 3], [10, 10, 10]))); diff --git a/parser/testdata/02512_array_join_name_resolution/ast.json b/parser/testdata/02512_array_join_name_resolution/ast.json new file mode 100644 index 000000000..266228cc7 --- /dev/null +++ b/parser/testdata/02512_array_join_name_resolution/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery x (children 1)" + }, + { + "explain": " Identifier x" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001231337, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/02512_array_join_name_resolution/metadata.json b/parser/testdata/02512_array_join_name_resolution/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02512_array_join_name_resolution/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02512_array_join_name_resolution/query.sql b/parser/testdata/02512_array_join_name_resolution/query.sql new file mode 100644 index 000000000..5bcea9677 --- /dev/null +++ b/parser/testdata/02512_array_join_name_resolution/query.sql @@ -0,0 +1,19 @@ +DROP TABLE IF EXISTS x; +CREATE TABLE x ( `arr.key` Array(String), `arr.value` Array(String), `n` String ) ENGINE = Memory; +INSERT INTO x VALUES (['Hello', 'World'], ['abc', 'def'], 'test'); + +SELECT + key, + any(toString(n)) +FROM +( + SELECT + arr.key AS key, + n + FROM x + ARRAY JOIN arr +) +GROUP BY key +ORDER BY key; + +DROP TABLE x; diff --git a/parser/testdata/02513_analyzer_duplicate_alias_crash_fix/ast.json b/parser/testdata/02513_analyzer_duplicate_alias_crash_fix/ast.json new file mode 100644 index 000000000..7176d32d2 --- /dev/null +++ b/parser/testdata/02513_analyzer_duplicate_alias_crash_fix/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00109169, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02513_analyzer_duplicate_alias_crash_fix/metadata.json b/parser/testdata/02513_analyzer_duplicate_alias_crash_fix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02513_analyzer_duplicate_alias_crash_fix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02513_analyzer_duplicate_alias_crash_fix/query.sql b/parser/testdata/02513_analyzer_duplicate_alias_crash_fix/query.sql new file mode 100644 index 000000000..e54252b5c --- /dev/null +++ b/parser/testdata/02513_analyzer_duplicate_alias_crash_fix/query.sql @@ -0,0 +1,4 @@ +SET enable_analyzer = 1; + +SELECT toUInt64(NULL) AS x FROM (SELECT 1) HAVING x IN + (SELECT NULL FROM (SELECT x IN (SELECT x IN (SELECT 1), x IN (SELECT 1) FROM (SELECT 1 WHERE x IN (SELECT NULL FROM (SELECT NULL)))))); diff --git a/parser/testdata/02513_analyzer_sort_msan/ast.json b/parser/testdata/02513_analyzer_sort_msan/ast.json new file mode 100644 index 000000000..4a448262b --- /dev/null +++ b/parser/testdata/02513_analyzer_sort_msan/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery products (children 1)" + }, + { + "explain": " Identifier products" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001585497, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/02513_analyzer_sort_msan/metadata.json b/parser/testdata/02513_analyzer_sort_msan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02513_analyzer_sort_msan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02513_analyzer_sort_msan/query.sql b/parser/testdata/02513_analyzer_sort_msan/query.sql new file mode 100644 index 000000000..b86a15e9e --- /dev/null +++ b/parser/testdata/02513_analyzer_sort_msan/query.sql @@ -0,0 +1,8 @@ +DROP TABLE IF EXISTS products; + +SET enable_analyzer = 1; + +CREATE TABLE products (`price` UInt32) ENGINE = Memory; +INSERT INTO products VALUES (1); + +SELECT rank() OVER (ORDER BY price) AS rank FROM products ORDER BY rank; diff --git a/parser/testdata/02513_broken_datetime64_init_on_mac/ast.json b/parser/testdata/02513_broken_datetime64_init_on_mac/ast.json new file mode 100644 index 000000000..f271d8080 --- /dev/null +++ b/parser/testdata/02513_broken_datetime64_init_on_mac/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '1670853969'" + }, + { + "explain": " Literal 'DateTime64(3, \\'UTC\\')'" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001390604, + "rows_read": 8, + "bytes_read": 307 + } +} diff --git a/parser/testdata/02513_broken_datetime64_init_on_mac/metadata.json b/parser/testdata/02513_broken_datetime64_init_on_mac/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02513_broken_datetime64_init_on_mac/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02513_broken_datetime64_init_on_mac/query.sql b/parser/testdata/02513_broken_datetime64_init_on_mac/query.sql new file mode 100644 index 000000000..f8d3b2847 --- /dev/null +++ b/parser/testdata/02513_broken_datetime64_init_on_mac/query.sql @@ -0,0 +1 @@ +select 1670853969::DateTime64(3, 'UTC'); diff --git a/parser/testdata/02513_date_string_comparison/ast.json b/parser/testdata/02513_date_string_comparison/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02513_date_string_comparison/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02513_date_string_comparison/metadata.json b/parser/testdata/02513_date_string_comparison/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02513_date_string_comparison/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02513_date_string_comparison/query.sql b/parser/testdata/02513_date_string_comparison/query.sql new file mode 100644 index 000000000..40bc80709 --- /dev/null +++ b/parser/testdata/02513_date_string_comparison/query.sql @@ -0,0 +1,65 @@ +CREATE TABLE datetime_date_table ( + col_date Date, + col_datetime DateTime, + col_datetime64 DateTime64(3), + col_date_string String, + col_datetime_string String, + col_datetime64_string DateTime64, + col_date_lc LowCardinality(String), + col_datetime_lc LowCardinality(String), + col_datetime64_lc LowCardinality(String), + PRIMARY KEY col_date +) ENGINE = MergeTree; + +INSERT INTO datetime_date_table VALUES ('2020-03-04', '2020-03-04 10:23:45', '2020-03-04 10:23:45.123', '2020-03-04', '2020-03-04 10:23:45', '2020-03-04 10:23:45.123', '2020-03-04', '2020-03-04 10:23:45', '2020-03-04 10:23:45.123'); +INSERT INTO datetime_date_table VALUES ('2020-03-05', '2020-03-05 12:23:45', '2020-03-05 12:23:45.123', '2020-03-05', '2020-03-05 12:23:45', '2020-03-05 12:23:45.123', '2020-03-05', '2020-03-05 12:23:45', '2020-03-05 12:23:45.123'); +INSERT INTO datetime_date_table VALUES ('2020-04-05', '2020-04-05 00:10:45', '2020-04-05 00:10:45.123', '2020-04-05', '2020-04-05 00:10:45', '2020-04-05 00:10:45.123', '2020-04-05', '2020-04-05 00:10:45', '2020-04-05 00:10:45.123'); + +SELECT 'Date'; +SELECT count() FROM datetime_date_table WHERE col_date > '2020-03-04'; +SELECT count() FROM datetime_date_table WHERE col_date > '2020-03-04'::Date; +SELECT count() FROM datetime_date_table WHERE col_date > '2020-03-04 10:20:45'; -- { serverError TYPE_MISMATCH } +SELECT count() FROM datetime_date_table WHERE col_date > '2020-03-04 10:20:45'::DateTime; +SELECT count() FROM datetime_date_table WHERE col_date > '2020-03-04 10:20:45.100'; -- { serverError TYPE_MISMATCH } +SELECT count() FROM datetime_date_table WHERE col_date > '2020-03-04 10:20:45.100'::DateTime64(3); + +SELECT 'DateTime'; +SELECT count() FROM datetime_date_table WHERE col_datetime > '2020-03-04'; +SELECT count() FROM datetime_date_table WHERE col_datetime > '2020-03-04'::Date; +SELECT count() FROM datetime_date_table WHERE col_datetime > '2020-03-04 10:20:45'; +SELECT count() FROM datetime_date_table WHERE col_datetime > '2020-03-04 10:20:45'::DateTime; +SELECT count() FROM datetime_date_table WHERE col_datetime > '2020-03-04 10:20:45.100'; -- { serverError TYPE_MISMATCH } +SELECT count() FROM datetime_date_table WHERE col_datetime > '2020-03-04 10:20:45.100'::DateTime64(3); + +SELECT 'Date String'; +SELECT count() FROM datetime_date_table WHERE col_date_string > '2020-03-04'; +SELECT count() FROM datetime_date_table WHERE col_date_string > '2020-03-04'::Date; -- { serverError NO_COMMON_TYPE } +SELECT count() FROM datetime_date_table WHERE col_date_string > '2020-03-04 10:20:45'; +SELECT count() FROM datetime_date_table WHERE col_date_string > '2020-03-04 10:20:45'::DateTime; -- { serverError NO_COMMON_TYPE } +SELECT count() FROM datetime_date_table WHERE col_date_string > '2020-03-04 10:20:45.100'; +SELECT count() FROM datetime_date_table WHERE col_date_string > '2020-03-04 10:20:45.100'::DateTime64(3); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT 'DateTime String'; +SELECT count() FROM datetime_date_table WHERE col_datetime_string > '2020-03-04'; +SELECT count() FROM datetime_date_table WHERE col_datetime_string > '2020-03-04'::Date; -- { serverError NO_COMMON_TYPE } +SELECT count() FROM datetime_date_table WHERE col_datetime_string > '2020-03-04 10:20:45'; +SELECT count() FROM datetime_date_table WHERE col_datetime_string > '2020-03-04 10:20:45'::DateTime; -- { serverError NO_COMMON_TYPE } +SELECT count() FROM datetime_date_table WHERE col_datetime_string > '2020-03-04 10:20:45.100'; +SELECT count() FROM datetime_date_table WHERE col_datetime_string > '2020-03-04 10:20:45.100'::DateTime64(3); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT 'Date LC'; +SELECT count() FROM datetime_date_table WHERE col_date_lc > '2020-03-04'; +SELECT count() FROM datetime_date_table WHERE col_date_lc > '2020-03-04'::Date; -- { serverError NO_COMMON_TYPE } +SELECT count() FROM datetime_date_table WHERE col_date_lc > '2020-03-04 10:20:45'; +SELECT count() FROM datetime_date_table WHERE col_date_lc > '2020-03-04 10:20:45'::DateTime; -- { serverError NO_COMMON_TYPE } +SELECT count() FROM datetime_date_table WHERE col_date_lc > '2020-03-04 10:20:45.100'; +SELECT count() FROM datetime_date_table WHERE col_date_lc > '2020-03-04 10:20:45.100'::DateTime64(3); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT 'DateTime LC'; +SELECT count() FROM datetime_date_table WHERE col_datetime_lc > '2020-03-04'; +SELECT count() FROM datetime_date_table WHERE col_datetime_lc > '2020-03-04'::Date; -- { serverError NO_COMMON_TYPE } +SELECT count() FROM datetime_date_table WHERE col_datetime_lc > '2020-03-04 10:20:45'; +SELECT count() FROM datetime_date_table WHERE col_datetime_lc > '2020-03-04 10:20:45'::DateTime; -- { serverError NO_COMMON_TYPE } +SELECT count() FROM datetime_date_table WHERE col_datetime_lc > '2020-03-04 10:20:45.100'; +SELECT count() FROM datetime_date_table WHERE col_datetime_lc > '2020-03-04 10:20:45.100'::DateTime64(3); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + diff --git a/parser/testdata/02513_prewhere_combine_step_filters/ast.json b/parser/testdata/02513_prewhere_combine_step_filters/ast.json new file mode 100644 index 000000000..c051614cf --- /dev/null +++ b/parser/testdata/02513_prewhere_combine_step_filters/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery table_02513 (children 1)" + }, + { + "explain": " Identifier table_02513" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001292139, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/02513_prewhere_combine_step_filters/metadata.json b/parser/testdata/02513_prewhere_combine_step_filters/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02513_prewhere_combine_step_filters/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02513_prewhere_combine_step_filters/query.sql b/parser/testdata/02513_prewhere_combine_step_filters/query.sql new file mode 100644 index 000000000..de90734a2 --- /dev/null +++ b/parser/testdata/02513_prewhere_combine_step_filters/query.sql @@ -0,0 +1,25 @@ +DROP TABLE IF EXISTS table_02513; + +CREATE TABLE table_02513 (n UInt64) ENGINE=MergeTree() ORDER BY tuple() SETTINGS index_granularity=100; + +INSERT INTO table_02513 SELECT number+11*13*1000 FROM numbers(20); + +SET mutations_sync=2; +SET max_threads=1; + +DELETE FROM table_02513 WHERE n%10=0; + +-- { echoOn } +SELECT * FROM table_02513; +SELECT * FROM table_02513 WHERE n%11; +SELECT * FROM table_02513 PREWHERE n%11; +SELECT * FROM table_02513 WHERE n%11 AND n%13; +SELECT * FROM table_02513 PREWHERE n%11 WHERE n%13; + +SELECT * FROM table_02513 WHERE n%143011; +SELECT * FROM table_02513 PREWHERE n%143011; +SELECT * FROM table_02513 WHERE n%143011 AND n%13; +SELECT * FROM table_02513 PREWHERE n%143011 WHERE n%13; +-- { echoOff } + +DROP TABLE table_02513; diff --git a/parser/testdata/02513_validate_data_types/ast.json b/parser/testdata/02513_validate_data_types/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02513_validate_data_types/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02513_validate_data_types/metadata.json b/parser/testdata/02513_validate_data_types/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02513_validate_data_types/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02513_validate_data_types/query.sql b/parser/testdata/02513_validate_data_types/query.sql new file mode 100644 index 000000000..c216add48 --- /dev/null +++ b/parser/testdata/02513_validate_data_types/query.sql @@ -0,0 +1,9 @@ +-- Tags: no-fasttest + +set allow_suspicious_low_cardinality_types=0; +select CAST(1000000, 'LowCardinality(UInt64)'); -- {serverError SUSPICIOUS_TYPE_FOR_LOW_CARDINALITY} +desc file(nonexist.json, JSONEachRow, 'lc LowCardinality(UInt64)'); -- {serverError SUSPICIOUS_TYPE_FOR_LOW_CARDINALITY} + +set allow_suspicious_fixed_string_types=0; +select CAST('', 'FixedString(1000)'); -- {serverError ILLEGAL_COLUMN} +desc file(nonexist.json, JSONEachRow, 'fs FixedString(1000)'); -- {serverError ILLEGAL_COLUMN} diff --git a/parser/testdata/02514_analyzer_drop_join_on/ast.json b/parser/testdata/02514_analyzer_drop_join_on/ast.json new file mode 100644 index 000000000..0b22e88f2 --- /dev/null +++ b/parser/testdata/02514_analyzer_drop_join_on/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery a (children 1)" + }, + { + "explain": " Identifier a" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001429543, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/02514_analyzer_drop_join_on/metadata.json b/parser/testdata/02514_analyzer_drop_join_on/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02514_analyzer_drop_join_on/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02514_analyzer_drop_join_on/query.sql b/parser/testdata/02514_analyzer_drop_join_on/query.sql new file mode 100644 index 000000000..d0b571ced --- /dev/null +++ b/parser/testdata/02514_analyzer_drop_join_on/query.sql @@ -0,0 +1,47 @@ +DROP TABLE IF EXISTS a; +DROP TABLE IF EXISTS b; +DROP TABLE IF EXISTS c; +DROP TABLE IF EXISTS d; + +CREATE TABLE a (k UInt64, a1 UInt64, a2 String) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO a VALUES (1, 1, 'a'), (2, 2, 'b'), (3, 3, 'c'); + +CREATE TABLE b (k UInt64, b1 UInt64, b2 String) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO b VALUES (1, 1, 'a'), (2, 2, 'b'), (3, 3, 'c'); + +CREATE TABLE c (k UInt64, c1 UInt64, c2 String) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO c VALUES (1, 1, 'a'), (2, 2, 'b'), (3, 3, 'c'); + +CREATE TABLE d (k UInt64, d1 UInt64, d2 String) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO d VALUES (1, 1, 'a'), (2, 2, 'b'), (3, 3, 'c'); + +SET enable_analyzer = 1; +SET query_plan_join_swap_table = 'false'; +SET enable_parallel_replicas = 0; +SET query_plan_optimize_join_order_limit = 2; +SET optimize_empty_string_comparisons=0; + +-- { echoOn } + +EXPLAIN PLAN header = 1 +SELECT count() FROM a JOIN b ON b.b1 = a.a1 JOIN c ON c.c1 = b.b1 JOIN d ON d.d1 = c.c1 GROUP BY a.a2 +; + +EXPLAIN PLAN header = 1 +SELECT a.a2, d.d2 FROM a JOIN b USING (k) JOIN c USING (k) JOIN d USING (k) +; + +EXPLAIN PLAN header = 1 +SELECT b.bx FROM a +JOIN (SELECT b1, b2 || 'x' AS bx FROM b ) AS b ON b.b1 = a.a1 +JOIN c ON c.c1 = b.b1 +JOIN (SELECT number AS d1 from numbers(10)) AS d ON d.d1 = c.c1 +WHERE c.c2 != '' ORDER BY a.a2 +; + +-- { echoOff } + +DROP TABLE IF EXISTS a; +DROP TABLE IF EXISTS b; +DROP TABLE IF EXISTS c; +DROP TABLE IF EXISTS d; diff --git a/parser/testdata/02514_bad_index_granularity/ast.json b/parser/testdata/02514_bad_index_granularity/ast.json new file mode 100644 index 000000000..f9a95b160 --- /dev/null +++ b/parser/testdata/02514_bad_index_granularity/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001143575, + "rows_read": 2, + "bytes_read": 55 + } +} diff --git a/parser/testdata/02514_bad_index_granularity/metadata.json b/parser/testdata/02514_bad_index_granularity/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02514_bad_index_granularity/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02514_bad_index_granularity/query.sql b/parser/testdata/02514_bad_index_granularity/query.sql new file mode 100644 index 000000000..975af2d07 --- /dev/null +++ b/parser/testdata/02514_bad_index_granularity/query.sql @@ -0,0 +1,7 @@ +CREATE TABLE t +( + id Int64, + d String, + p Map(String, String) +) +ENGINE = ReplacingMergeTree order by id settings index_granularity = 0; -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/02514_if_with_lazy_low_cardinality/ast.json b/parser/testdata/02514_if_with_lazy_low_cardinality/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02514_if_with_lazy_low_cardinality/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02514_if_with_lazy_low_cardinality/metadata.json b/parser/testdata/02514_if_with_lazy_low_cardinality/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02514_if_with_lazy_low_cardinality/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02514_if_with_lazy_low_cardinality/query.sql b/parser/testdata/02514_if_with_lazy_low_cardinality/query.sql new file mode 100644 index 000000000..b169cfd0a --- /dev/null +++ b/parser/testdata/02514_if_with_lazy_low_cardinality/query.sql @@ -0,0 +1,8 @@ +-- Tags: no-fasttest +-- no-fasttest: upper/lowerUTF8 use ICU + +create table if not exists t (`arr.key` Array(LowCardinality(String)), `arr.value` Array(LowCardinality(String))) engine = Memory; +insert into t (`arr.key`, `arr.value`) values (['a'], ['b']); +select if(true, if(lowerUTF8(arr.key) = 'a', 1, 2), 3) as x from t left array join arr; +drop table t; + diff --git a/parser/testdata/02514_null_dictionary_source/ast.json b/parser/testdata/02514_null_dictionary_source/ast.json new file mode 100644 index 000000000..c855beb28 --- /dev/null +++ b/parser/testdata/02514_null_dictionary_source/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery null_dict (children 1)" + }, + { + "explain": " Identifier null_dict" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001260562, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/02514_null_dictionary_source/metadata.json b/parser/testdata/02514_null_dictionary_source/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02514_null_dictionary_source/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02514_null_dictionary_source/query.sql b/parser/testdata/02514_null_dictionary_source/query.sql new file mode 100644 index 000000000..bfd36042f --- /dev/null +++ b/parser/testdata/02514_null_dictionary_source/query.sql @@ -0,0 +1,46 @@ +DROP DICTIONARY IF EXISTS null_dict; +CREATE DICTIONARY null_dict ( + id UInt64, + val UInt8, + default_val UInt8 DEFAULT 123, + nullable_val Nullable(UInt8) +) +PRIMARY KEY id +SOURCE(NULL()) +LAYOUT(FLAT()) +LIFETIME(0); + +SELECT + dictGet('null_dict', 'val', 1337), + dictGetOrNull('null_dict', 'val', 1337), + dictGetOrDefault('null_dict', 'val', 1337, 111), + dictGetUInt8('null_dict', 'val', 1337), + dictGetUInt8OrDefault('null_dict', 'val', 1337, 111); + +SELECT + dictGet('null_dict', 'default_val', 1337), + dictGetOrNull('null_dict', 'default_val', 1337), + dictGetOrDefault('null_dict', 'default_val', 1337, 111), + dictGetUInt8('null_dict', 'default_val', 1337), + dictGetUInt8OrDefault('null_dict', 'default_val', 1337, 111); + +SELECT + dictGet('null_dict', 'nullable_val', 1337), + dictGetOrNull('null_dict', 'nullable_val', 1337), + dictGetOrDefault('null_dict', 'nullable_val', 1337, 111); + +SELECT val, nullable_val FROM null_dict; + +DROP DICTIONARY IF EXISTS null_ip_dict; +CREATE DICTIONARY null_ip_dict ( + network String, + val UInt8 DEFAULT 77 +) +PRIMARY KEY network +SOURCE(NULL()) +LAYOUT(IP_TRIE()) +LIFETIME(0); + +SELECT dictGet('null_ip_dict', 'val', toIPv4('127.0.0.1')); + +SELECT network, val FROM null_ip_dict; diff --git a/parser/testdata/02514_tsv_zero_started_number/ast.json b/parser/testdata/02514_tsv_zero_started_number/ast.json new file mode 100644 index 000000000..0f171e166 --- /dev/null +++ b/parser/testdata/02514_tsv_zero_started_number/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function format (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier TSV" + }, + { + "explain": " Literal '0123'" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001343683, + "rows_read": 15, + "bytes_read": 554 + } +} diff --git a/parser/testdata/02514_tsv_zero_started_number/metadata.json b/parser/testdata/02514_tsv_zero_started_number/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02514_tsv_zero_started_number/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02514_tsv_zero_started_number/query.sql b/parser/testdata/02514_tsv_zero_started_number/query.sql new file mode 100644 index 000000000..d2058ea8f --- /dev/null +++ b/parser/testdata/02514_tsv_zero_started_number/query.sql @@ -0,0 +1,2 @@ +select toTypeName(*), * from format(TSV, '0123'); + diff --git a/parser/testdata/02515_aggregate_functions_statistics/ast.json b/parser/testdata/02515_aggregate_functions_statistics/ast.json new file mode 100644 index 000000000..234cc43bb --- /dev/null +++ b/parser/testdata/02515_aggregate_functions_statistics/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery fh (children 1)" + }, + { + "explain": " Identifier fh" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001038158, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/02515_aggregate_functions_statistics/metadata.json b/parser/testdata/02515_aggregate_functions_statistics/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02515_aggregate_functions_statistics/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02515_aggregate_functions_statistics/query.sql b/parser/testdata/02515_aggregate_functions_statistics/query.sql new file mode 100644 index 000000000..df6e0cb06 --- /dev/null +++ b/parser/testdata/02515_aggregate_functions_statistics/query.sql @@ -0,0 +1,41 @@ +DROP TABLE IF EXISTS fh; + +CREATE TABLE fh(a_value UInt32, b_value Float64, c_value Float64, d_value Float64) ENGINE = Memory; + +INSERT INTO fh(a_value, b_value, c_value, d_value) VALUES (1, 5.6,-4.4, 2.6),(2, -9.6, 3, 3.3),(3, -1.3,-4, 1.2),(4, 5.3,9.7,2.3),(5, 4.4,0.037,1.222),(6, -8.6,-7.8,2.1233),(7, 5.1,9.3,8.1222),(8, 7.9,-3.6,9.837),(9, -8.2,0.62,8.43555),(10, -3,7.3,6.762); + +SELECT corrMatrix(a_value) FROM (select a_value from fh limit 0); + +SELECT corrMatrix(a_value) FROM (select a_value from fh limit 1); + +SELECT corrMatrix(a_value, b_value, c_value, d_value) FROM (select a_value, b_value, c_value, d_value from fh limit 0); + +SELECT corrMatrix(a_value, b_value, c_value, d_value) FROM (select a_value, b_value, c_value, d_value from fh limit 1); + +SELECT arrayMap(x -> arrayMap(y -> round(y, 5), x), corrMatrix(a_value, b_value, c_value, d_value)) FROM fh; + +SELECT round(abs(corr(x1,x2) - corrMatrix(x1,x2)[1][2]), 5), round(abs(corr(x1,x1) - corrMatrix(x1,x2)[1][1]), 5), round(abs(corr(x2,x2) - corrMatrix(x1,x2)[2][2]), 5) from (select randNormal(100, 1) as x1, randNormal(100,5) as x2 from numbers(100000)); + +SELECT covarSampMatrix(a_value) FROM (select a_value from fh limit 0); + +SELECT covarSampMatrix(a_value) FROM (select a_value from fh limit 1); + +SELECT covarSampMatrix(a_value, b_value, c_value, d_value) FROM (select a_value, b_value, c_value, d_value from fh limit 0); + +SELECT covarSampMatrix(a_value, b_value, c_value, d_value) FROM (select a_value, b_value, c_value, d_value from fh limit 1); + +SELECT arrayMap(x -> arrayMap(y -> round(y, 5), x), covarSampMatrix(a_value, b_value, c_value, d_value)) FROM fh; + +SELECT round(abs(covarSamp(x1,x2) - covarSampMatrix(x1,x2)[1][2]), 5), round(abs(covarSamp(x1,x1) - covarSampMatrix(x1,x2)[1][1]), 5), round(abs(covarSamp(x2,x2) - covarSampMatrix(x1,x2)[2][2]), 5) from (select randNormal(100, 1) as x1, randNormal(100,5) as x2 from numbers(100000)); + +SELECT covarPopMatrix(a_value) FROM (select a_value from fh limit 0); + +SELECT covarPopMatrix(a_value) FROM (select a_value from fh limit 1); + +SELECT covarPopMatrix(a_value, b_value, c_value, d_value) FROM (select a_value, b_value, c_value, d_value from fh limit 0); + +SELECT covarPopMatrix(a_value, b_value, c_value, d_value) FROM (select a_value, b_value, c_value, d_value from fh limit 1); + +SELECT arrayMap(x -> arrayMap(y -> round(y, 5), x), covarPopMatrix(a_value, b_value, c_value, d_value)) FROM fh; + +SELECT round(abs(covarPop(x1,x2) - covarPopMatrix(x1,x2)[1][2]), 5), round(abs(covarPop(x1,x1) - covarPopMatrix(x1,x2)[1][1]), 5), round(abs(covarPop(x2,x2) - covarPopMatrix(x1,x2)[2][2]), 5) from (select randNormal(100, 1) as x1, randNormal(100,5) as x2 from numbers(100000)); diff --git a/parser/testdata/02515_analyzer_null_for_empty/ast.json b/parser/testdata/02515_analyzer_null_for_empty/ast.json new file mode 100644 index 000000000..584427063 --- /dev/null +++ b/parser/testdata/02515_analyzer_null_for_empty/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001122049, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02515_analyzer_null_for_empty/metadata.json b/parser/testdata/02515_analyzer_null_for_empty/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02515_analyzer_null_for_empty/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02515_analyzer_null_for_empty/query.sql b/parser/testdata/02515_analyzer_null_for_empty/query.sql new file mode 100644 index 000000000..e12f21574 --- /dev/null +++ b/parser/testdata/02515_analyzer_null_for_empty/query.sql @@ -0,0 +1,4 @@ +SET enable_analyzer = 1; +SET aggregate_functions_null_for_empty = 1; + +SELECT max(aggr) FROM (SELECT max('92233720368547758.06') AS aggr FROM system.one); diff --git a/parser/testdata/02515_and_or_if_multiif_not_return_lc/ast.json b/parser/testdata/02515_and_or_if_multiif_not_return_lc/ast.json new file mode 100644 index 000000000..15413b326 --- /dev/null +++ b/parser/testdata/02515_and_or_if_multiif_not_return_lc/ast.json @@ -0,0 +1,88 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function if (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function toLowCardinality (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 22, + + "statistics": + { + "elapsed": 0.001344947, + "rows_read": 22, + "bytes_read": 892 + } +} diff --git a/parser/testdata/02515_and_or_if_multiif_not_return_lc/metadata.json b/parser/testdata/02515_and_or_if_multiif_not_return_lc/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02515_and_or_if_multiif_not_return_lc/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02515_and_or_if_multiif_not_return_lc/query.sql b/parser/testdata/02515_and_or_if_multiif_not_return_lc/query.sql new file mode 100644 index 000000000..0ccccd4d9 --- /dev/null +++ b/parser/testdata/02515_and_or_if_multiif_not_return_lc/query.sql @@ -0,0 +1,5 @@ +select toTypeName(if(toLowCardinality(number % 2), 1, 2)) from numbers(1); +select toTypeName(multiIf(toLowCardinality(number % 2), 1, 1, 2, 3)) from numbers(1); +select toTypeName(toLowCardinality(number % 2) and 2) from numbers(1); +select toTypeName(toLowCardinality(number % 2) or 2) from numbers(1); + diff --git a/parser/testdata/02515_distinct_zero_size_key_bug_44831/ast.json b/parser/testdata/02515_distinct_zero_size_key_bug_44831/ast.json new file mode 100644 index 000000000..b6de951db --- /dev/null +++ b/parser/testdata/02515_distinct_zero_size_key_bug_44831/ast.json @@ -0,0 +1,88 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Function if (alias res) (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function greater (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal 't'" + }, + { + "explain": " Literal ''" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier res" + } + ], + + "rows": 22, + + "statistics": + { + "elapsed": 0.001536455, + "rows_read": 22, + "bytes_read": 816 + } +} diff --git a/parser/testdata/02515_distinct_zero_size_key_bug_44831/metadata.json b/parser/testdata/02515_distinct_zero_size_key_bug_44831/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02515_distinct_zero_size_key_bug_44831/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02515_distinct_zero_size_key_bug_44831/query.sql b/parser/testdata/02515_distinct_zero_size_key_bug_44831/query.sql new file mode 100644 index 000000000..96072b281 --- /dev/null +++ b/parser/testdata/02515_distinct_zero_size_key_bug_44831/query.sql @@ -0,0 +1 @@ +SELECT DISTINCT NULL, if(number > 0, 't', '') AS res FROM numbers(1) ORDER BY res; diff --git a/parser/testdata/02515_generate_ulid/ast.json b/parser/testdata/02515_generate_ulid/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02515_generate_ulid/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02515_generate_ulid/metadata.json b/parser/testdata/02515_generate_ulid/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02515_generate_ulid/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02515_generate_ulid/query.sql b/parser/testdata/02515_generate_ulid/query.sql new file mode 100644 index 000000000..4059090a7 --- /dev/null +++ b/parser/testdata/02515_generate_ulid/query.sql @@ -0,0 +1,3 @@ +-- Tags: no-fasttest + +SELECT generateULID(1) != generateULID(2), toTypeName(generateULID()); diff --git a/parser/testdata/02515_projections_with_totals/ast.json b/parser/testdata/02515_projections_with_totals/ast.json new file mode 100644 index 000000000..b6468e47f --- /dev/null +++ b/parser/testdata/02515_projections_with_totals/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001042418, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/02515_projections_with_totals/metadata.json b/parser/testdata/02515_projections_with_totals/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02515_projections_with_totals/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02515_projections_with_totals/query.sql b/parser/testdata/02515_projections_with_totals/query.sql new file mode 100644 index 000000000..1e4b5c6f2 --- /dev/null +++ b/parser/testdata/02515_projections_with_totals/query.sql @@ -0,0 +1,10 @@ +DROP TABLE IF EXISTS t; +CREATE TABLE t (x UInt8, PROJECTION p (SELECT x GROUP BY x)) ENGINE = MergeTree ORDER BY (); +INSERT INTO t VALUES (0); +SET group_by_overflow_mode = 'any', max_rows_to_group_by = 1000, totals_mode = 'after_having_auto'; +SELECT x FROM t GROUP BY x WITH TOTALS; + +SET optimize_aggregation_in_order=1; +SELECT x FROM t GROUP BY x WITH TOTALS; + +DROP TABLE t; diff --git a/parser/testdata/02515_tuple_lambda_parsing/ast.json b/parser/testdata/02515_tuple_lambda_parsing/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02515_tuple_lambda_parsing/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02515_tuple_lambda_parsing/metadata.json b/parser/testdata/02515_tuple_lambda_parsing/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02515_tuple_lambda_parsing/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02515_tuple_lambda_parsing/query.sql b/parser/testdata/02515_tuple_lambda_parsing/query.sql new file mode 100644 index 000000000..4ec49f30e --- /dev/null +++ b/parser/testdata/02515_tuple_lambda_parsing/query.sql @@ -0,0 +1,7 @@ +explain ast select tuple(a) -> f(a); -- { clientError SYNTAX_ERROR } +explain ast select tuple(a, b) -> f(a); -- { clientError SYNTAX_ERROR } +explain ast select (tuple(a)) -> f(a); -- { clientError SYNTAX_ERROR } +explain ast select (f(a)) -> f(a); -- { clientError SYNTAX_ERROR } +explain ast select (a::UInt64) -> f(a); -- { clientError SYNTAX_ERROR } +explain ast select (1) -> f(a); -- { clientError SYNTAX_ERROR } +explain ast select (1::UInt64) -> f(a); -- { clientError SYNTAX_ERROR } diff --git a/parser/testdata/02516_projections_and_context/ast.json b/parser/testdata/02516_projections_and_context/ast.json new file mode 100644 index 000000000..73f6a36a9 --- /dev/null +++ b/parser/testdata/02516_projections_and_context/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test1__fuzz_37 (children 1)" + }, + { + "explain": " Identifier test1__fuzz_37" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001427479, + "rows_read": 2, + "bytes_read": 80 + } +} diff --git a/parser/testdata/02516_projections_and_context/metadata.json b/parser/testdata/02516_projections_and_context/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02516_projections_and_context/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02516_projections_and_context/query.sql b/parser/testdata/02516_projections_and_context/query.sql new file mode 100644 index 000000000..ec14fc0aa --- /dev/null +++ b/parser/testdata/02516_projections_and_context/query.sql @@ -0,0 +1,10 @@ +DROP TABLE IF EXISTS test1__fuzz_37; +CREATE TABLE test1__fuzz_37 (`i` Date) ENGINE = MergeTree ORDER BY i; +insert into test1__fuzz_37 values ('2020-10-10'); +set enable_analyzer = 0; +SELECT count() FROM test1__fuzz_37 GROUP BY dictHas(NULL, (dictHas(NULL, (('', materialize(NULL)), materialize(NULL))), 'KeyKey')), dictHas('test_dictionary', tuple(materialize('Ke\0'))), tuple(dictHas(NULL, (tuple('Ke\0Ke\0Ke\0Ke\0Ke\0Ke\0\0\0\0Ke\0'), materialize(NULL)))), 'test_dicti\0nary', (('', materialize(NULL)), dictHas(NULL, (dictHas(NULL, tuple(materialize(NULL))), 'KeyKeyKeyKeyKeyKeyKeyKey')), materialize(NULL)); -- { serverError BAD_ARGUMENTS } +SELECT count() FROM test1__fuzz_37 GROUP BY dictHas('non_existing_dictionary', materialize('a')); -- { serverError BAD_ARGUMENTS } +set enable_analyzer = 1; +SELECT count() FROM test1__fuzz_37 GROUP BY dictHas(NULL, (dictHas(NULL, (('', materialize(NULL)), materialize(NULL))), 'KeyKey')), dictHas('test_dictionary', tuple(materialize('Ke\0'))), tuple(dictHas(NULL, (tuple('Ke\0Ke\0Ke\0Ke\0Ke\0Ke\0\0\0\0Ke\0'), materialize(NULL)))), 'test_dicti\0nary', (('', materialize(NULL)), dictHas(NULL, (dictHas(NULL, tuple(materialize(NULL))), 'KeyKeyKeyKeyKeyKeyKeyKey')), materialize(NULL)); -- { serverError BAD_ARGUMENTS } +SELECT count() FROM test1__fuzz_37 GROUP BY dictHas('non_existing_dictionary', materialize('a')); -- { serverError BAD_ARGUMENTS } +DROP TABLE test1__fuzz_37; diff --git a/parser/testdata/02516_projections_with_rollup/ast.json b/parser/testdata/02516_projections_with_rollup/ast.json new file mode 100644 index 000000000..d487a818e --- /dev/null +++ b/parser/testdata/02516_projections_with_rollup/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery video_log (children 1)" + }, + { + "explain": " Identifier video_log" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00148062, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/02516_projections_with_rollup/metadata.json b/parser/testdata/02516_projections_with_rollup/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02516_projections_with_rollup/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02516_projections_with_rollup/query.sql b/parser/testdata/02516_projections_with_rollup/query.sql new file mode 100644 index 000000000..a87621073 --- /dev/null +++ b/parser/testdata/02516_projections_with_rollup/query.sql @@ -0,0 +1,121 @@ +DROP TABLE IF EXISTS video_log; +DROP TABLE IF EXISTS video_log_result__fuzz_0; +DROP TABLE IF EXISTS rng; + +CREATE TABLE video_log +( + `datetime` DateTime, + `user_id` UInt64, + `device_id` UInt64, + `domain` LowCardinality(String), + `bytes` UInt64, + `duration` UInt64 +) +ENGINE = MergeTree +PARTITION BY toDate(datetime) +ORDER BY (user_id, device_id) +SETTINGS index_granularity_bytes=10485760, index_granularity=8192; + +CREATE TABLE video_log_result__fuzz_0 +( + `hour` Nullable(DateTime), + `sum_bytes` UInt64, + `avg_duration` Float64 +) +ENGINE = MergeTree +PARTITION BY toDate(hour) +ORDER BY sum_bytes +SETTINGS allow_nullable_key = 1; + +CREATE TABLE rng +( + `user_id_raw` UInt64, + `device_id_raw` UInt64, + `domain_raw` UInt64, + `bytes_raw` UInt64, + `duration_raw` UInt64 +) +ENGINE = GenerateRandom(1024); + +INSERT INTO video_log SELECT + toUnixTimestamp('2022-07-22 01:00:00') + (rowNumberInAllBlocks() / 20000), + user_id_raw % 100000000 AS user_id, + device_id_raw % 200000000 AS device_id, + domain_raw % 100, + (bytes_raw % 1024) + 128, + (duration_raw % 300) + 100 +FROM rng +LIMIT 1728000; + +INSERT INTO video_log SELECT + toUnixTimestamp('2022-07-22 01:00:00') + (rowNumberInAllBlocks() / 20000), + user_id_raw % 100000000 AS user_id, + 100 AS device_id, + domain_raw % 100, + (bytes_raw % 1024) + 128, + (duration_raw % 300) + 100 +FROM rng +LIMIT 10; + +ALTER TABLE video_log + ADD PROJECTION p_norm + ( + SELECT + datetime, + device_id, + bytes, + duration + ORDER BY device_id + ); + +ALTER TABLE video_log + MATERIALIZE PROJECTION p_norm +SETTINGS mutations_sync = 1; + +ALTER TABLE video_log + ADD PROJECTION p_agg + ( + SELECT + toStartOfHour(datetime) AS hour, + domain, + sum(bytes), + avg(duration) + GROUP BY + hour, + domain + ); + +ALTER TABLE video_log + MATERIALIZE PROJECTION p_agg +SETTINGS mutations_sync = 1; + +-- We are not interested in the result of this query, but it should not produce a logical error. +SELECT + avg_duration1, + avg_duration1 = avg_duration2 +FROM +( + SELECT + sum(bytes), + hour, + toStartOfHour(datetime) AS hour, + avg(duration) AS avg_duration1 + FROM video_log + GROUP BY hour + WITH ROLLUP + WITH TOTALS +) +LEFT JOIN +( + SELECT + hour, + sum_bytes AS sum_bytes2, + avg_duration AS avg_duration2 + FROM video_log_result__fuzz_0 +) USING (hour) +SETTINGS joined_subquery_requires_alias = 0 +FORMAT Null; + +DROP TABLE video_log; +DROP TABLE video_log_result__fuzz_0; +DROP TABLE rng; diff --git a/parser/testdata/02517_executable_pool_bad_input_query/ast.json b/parser/testdata/02517_executable_pool_bad_input_query/ast.json new file mode 100644 index 000000000..31d024e6f --- /dev/null +++ b/parser/testdata/02517_executable_pool_bad_input_query/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery test_table (children 3)" + }, + { + "explain": " Identifier test_table" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration value (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function ExecutablePool (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal 'nonexist.py'" + }, + { + "explain": " Literal 'TabSeparated'" + }, + { + "explain": " Identifier foobar" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.000952966, + "rows_read": 12, + "bytes_read": 444 + } +} diff --git a/parser/testdata/02517_executable_pool_bad_input_query/metadata.json b/parser/testdata/02517_executable_pool_bad_input_query/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02517_executable_pool_bad_input_query/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02517_executable_pool_bad_input_query/query.sql b/parser/testdata/02517_executable_pool_bad_input_query/query.sql new file mode 100644 index 000000000..c016c93b6 --- /dev/null +++ b/parser/testdata/02517_executable_pool_bad_input_query/query.sql @@ -0,0 +1,4 @@ +CREATE TABLE test_table (value String) ENGINE=ExecutablePool('nonexist.py', 'TabSeparated', (foobar)); -- {serverError BAD_ARGUMENTS} +CREATE TABLE test_table (value String) ENGINE=ExecutablePool('nonexist.py', 'TabSeparated', '(SELECT 1)'); -- {serverError BAD_ARGUMENTS} +CREATE TABLE test_table (value String) ENGINE=ExecutablePool('nonexist.py', 'TabSeparated', [1,2,3]); -- {serverError BAD_ARGUMENTS} + diff --git a/parser/testdata/02517_union_columns_order/ast.json b/parser/testdata/02517_union_columns_order/ast.json new file mode 100644 index 000000000..d898f2df8 --- /dev/null +++ b/parser/testdata/02517_union_columns_order/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery t1 (children 3)" + }, + { + "explain": " Identifier t1" + }, + { + "explain": " Columns definition (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration c0 (children 1)" + }, + { + "explain": " DataType Int32" + }, + { + "explain": " Identifier c0" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function MergeTree" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001103129, + "rows_read": 9, + "bytes_read": 295 + } +} diff --git a/parser/testdata/02517_union_columns_order/metadata.json b/parser/testdata/02517_union_columns_order/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02517_union_columns_order/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02517_union_columns_order/query.sql b/parser/testdata/02517_union_columns_order/query.sql new file mode 100644 index 000000000..c02dacfa0 --- /dev/null +++ b/parser/testdata/02517_union_columns_order/query.sql @@ -0,0 +1,32 @@ +CREATE TABLE t1 (c0 Int32, PRIMARY KEY (c0)) ENGINE = MergeTree; +SELECT DISTINCT * +FROM +( + SELECT DISTINCT + cos(sign(exp(t1.c0))), + -min2(pow(t1.c0, t1.c0), intDiv(t1.c0, t1.c0)), + t1.c0, + t1.c0, + erf(abs(-t1.c0)) + FROM t1 + WHERE t1.c0 > 0 + UNION ALL + SELECT DISTINCT + cos(sign(exp(t1.c0))), + -min2(pow(t1.c0, t1.c0), intDiv(t1.c0, t1.c0)), + t1.c0, + t1.c0, + erf(abs(-t1.c0)) + FROM t1 + WHERE NOT (t1.c0 > 0) + UNION ALL + SELECT DISTINCT + cos(sign(exp(t1.c0))), + -min2(pow(t1.c0, t1.c0), intDiv(t1.c0, t1.c0)), + t1.c0, + t1.c0, + erf(abs(-t1.c0)) + FROM t1 + WHERE t1.c0 > (0 IS NULL) +); + diff --git a/parser/testdata/02517_uuid_parsing/ast.json b/parser/testdata/02517_uuid_parsing/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02517_uuid_parsing/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02517_uuid_parsing/metadata.json b/parser/testdata/02517_uuid_parsing/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02517_uuid_parsing/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02517_uuid_parsing/query.sql b/parser/testdata/02517_uuid_parsing/query.sql new file mode 100644 index 000000000..6d04d8b62 --- /dev/null +++ b/parser/testdata/02517_uuid_parsing/query.sql @@ -0,0 +1,26 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/44668 + +CREATE TABLE temp +( + `id` UInt64, + `field1` UUID, + `field2` UUID, + `field3` Int64, + `field4` Int64, + `field5` LowCardinality(String), + `field6` FixedString(3), + `field7` String, + `field8` Nullable(UUID), + `event_at` DateTime('UTC'), + `order_id` Nullable(UUID), + `identity` LowCardinality(String) +) +ENGINE = MergeTree +PARTITION BY toYYYYMM(event_at) +ORDER BY (field1, event_at, field2, field5, id) +SETTINGS index_granularity = 8192; + +INSERT INTO temp (id, field1, field2, field3, field4, field5, field6, field7, field8, event_at, order_id, identity) +VALUES ('1011','1d83904a-c31d-4a6c-bbf0-217656b46444','1d83904a-c31d-4a6c-bbf0-217656b46444',-200,0,'FOO','BAR','','1d83904a-c31d-4a6c-bbf0-217656b46444','2022-12-18 03:14:56','','dispatcher'),('10112222334444','1d83904a-c31d-4a6c-bbf0-217656b46444','1d83904a-c31d-4a6c-bbf0-217656b46444',12300,0,'FOO','BAR','','1d83904a-c31d-4a6c-bbf0-217656b46444','2022-12-17 23:37:18','1d83904a-c31d-4a6c-bbf0-217656b46444','other'); + +SELECT * FROM temp ORDER BY id; diff --git a/parser/testdata/02517_wrong_total_structure_crash/ast.json b/parser/testdata/02517_wrong_total_structure_crash/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02517_wrong_total_structure_crash/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02517_wrong_total_structure_crash/metadata.json b/parser/testdata/02517_wrong_total_structure_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02517_wrong_total_structure_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02517_wrong_total_structure_crash/query.sql b/parser/testdata/02517_wrong_total_structure_crash/query.sql new file mode 100644 index 000000000..86bc8c112 --- /dev/null +++ b/parser/testdata/02517_wrong_total_structure_crash/query.sql @@ -0,0 +1,26 @@ +-- Tags: no-ordinary-database +CREATE OR REPLACE TABLE alias10__fuzz_13 (`Id` Array(Array(UInt256)), `EventDate` Array(String), `field1` Array(Array(Nullable(Int8))), `field2` Array(Date), `field3` Array(Array(Array(UInt128)))) ENGINE = Distributed(test_shard_localhost, currentDatabase(), alias_local10); + +set allow_deprecated_syntax_for_merge_tree=1; +CREATE OR REPLACE TABLE alias_local10 ( + Id Int8, + EventDate Date DEFAULT '2000-01-01', + field1 Int8, + field2 String, + field3 ALIAS CASE WHEN field1 = 1 THEN field2 ELSE '0' END +) ENGINE = MergeTree(EventDate, (Id, EventDate), 8192); + +SET prefer_localhost_replica = 0; + +SELECT field1 FROM alias10__fuzz_13 WHERE arrayEnumerateDense(NULL, tuple('0.2147483646'), NULL) GROUP BY field1, arrayEnumerateDense(('0.02', '0.1', '0'), NULL) WITH TOTALS; -- { serverError TYPE_MISMATCH } + + +CREATE OR REPLACE TABLE local (x Int8) ENGINE = Memory; +CREATE OR REPLACE TABLE distributed (x Array(Int8)) ENGINE = Distributed(test_shard_localhost, currentDatabase(), local); +SET prefer_localhost_replica = 0; +SELECT x FROM distributed GROUP BY x WITH TOTALS; -- { serverError TYPE_MISMATCH } + +DROP TABLE distributed; +DROP TABLE local; +DROP TABLE alias_local10; +DROP TABLE alias10__fuzz_13; diff --git a/parser/testdata/02518_delete_on_materialized_view/ast.json b/parser/testdata/02518_delete_on_materialized_view/ast.json new file mode 100644 index 000000000..1efb71100 --- /dev/null +++ b/parser/testdata/02518_delete_on_materialized_view/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery kek (children 1)" + }, + { + "explain": " Identifier kek" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00114412, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/02518_delete_on_materialized_view/metadata.json b/parser/testdata/02518_delete_on_materialized_view/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02518_delete_on_materialized_view/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02518_delete_on_materialized_view/query.sql b/parser/testdata/02518_delete_on_materialized_view/query.sql new file mode 100644 index 000000000..f655d0ce6 --- /dev/null +++ b/parser/testdata/02518_delete_on_materialized_view/query.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS kek; +DROP TABLE IF EXISTS kekv; + +CREATE TABLE kek (a UInt32) ENGINE = MergeTree ORDER BY a; +CREATE MATERIALIZED VIEW kekv ENGINE = MergeTree ORDER BY tuple() AS SELECT * FROM kek; + +INSERT INTO kek VALUES (1); +DELETE FROM kekv WHERE a = 1; -- { serverError BAD_ARGUMENTS} + +DELETE FROM kekv WHERE a = 1; -- { serverError BAD_ARGUMENTS} + +DROP TABLE IF EXISTS kek; +DROP TABLE IF EXISTS kekv; diff --git a/parser/testdata/02518_merge_engine_nullable_43324/ast.json b/parser/testdata/02518_merge_engine_nullable_43324/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02518_merge_engine_nullable_43324/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02518_merge_engine_nullable_43324/metadata.json b/parser/testdata/02518_merge_engine_nullable_43324/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02518_merge_engine_nullable_43324/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02518_merge_engine_nullable_43324/query.sql b/parser/testdata/02518_merge_engine_nullable_43324/query.sql new file mode 100644 index 000000000..1223c4e95 --- /dev/null +++ b/parser/testdata/02518_merge_engine_nullable_43324/query.sql @@ -0,0 +1,18 @@ + +DROP TABLE IF EXISTS foo; +DROP TABLE IF EXISTS foo__fuzz_0; +DROP TABLE IF EXISTS foo_merge; + +CREATE TABLE foo (`Id` Int32, `Val` Int32) ENGINE = MergeTree ORDER BY Id; +CREATE TABLE foo__fuzz_0 (`Id` Int64, `Val` Nullable(Int32)) ENGINE = MergeTree ORDER BY Id; + +INSERT INTO foo SELECT number, number % 5 FROM numbers(10); +INSERT INTO foo__fuzz_0 SELECT number, number % 5 FROM numbers(10); + +CREATE TABLE merge1 AS foo ENGINE = Merge(currentDatabase(), '^foo'); +CREATE TABLE merge2 (`Id` Int32, `Val` Int32) ENGINE = Merge(currentDatabase(), '^foo'); +CREATE TABLE merge3 (`Id` Int32, `Val` Int32) ENGINE = Merge(currentDatabase(), '^foo__fuzz_0'); + +SELECT * FROM merge1 WHERE Val = 3 AND Val = 1; +SELECT * FROM merge2 WHERE Val = 3 AND Val = 1; +SELECT * FROM merge3 WHERE Val = 3 AND Val = 1; diff --git a/parser/testdata/02518_qualified_asterisks_alias_table_name/ast.json b/parser/testdata/02518_qualified_asterisks_alias_table_name/ast.json new file mode 100644 index 000000000..6f8e61263 --- /dev/null +++ b/parser/testdata/02518_qualified_asterisks_alias_table_name/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_table_join_1 (children 1)" + }, + { + "explain": " Identifier test_table_join_1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001281505, + "rows_read": 2, + "bytes_read": 86 + } +} diff --git a/parser/testdata/02518_qualified_asterisks_alias_table_name/metadata.json b/parser/testdata/02518_qualified_asterisks_alias_table_name/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02518_qualified_asterisks_alias_table_name/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02518_qualified_asterisks_alias_table_name/query.sql b/parser/testdata/02518_qualified_asterisks_alias_table_name/query.sql new file mode 100644 index 000000000..808fa58a9 --- /dev/null +++ b/parser/testdata/02518_qualified_asterisks_alias_table_name/query.sql @@ -0,0 +1,25 @@ +DROP TABLE IF EXISTS test_table_join_1; +CREATE TABLE test_table_join_1 (id UInt64, value String) ENGINE = TinyLog; + +DROP TABLE IF EXISTS test_table_join_2; +CREATE TABLE test_table_join_2 (id UInt64, value String) ENGINE = TinyLog; + +DROP TABLE IF EXISTS test_table_join_3; +CREATE TABLE test_table_join_3 (id UInt64, value String ) ENGINE = TinyLog; + +INSERT INTO test_table_join_1 VALUES (1, 'a'); +INSERT INTO test_table_join_2 VALUES (1, 'b'); +INSERT INTO test_table_join_3 VALUES (1, 'c'); + + +SELECT + test_table_join_1.* APPLY toString, + test_table_join_2.* APPLY toString, + test_table_join_3.* APPLY toString +FROM test_table_join_1 AS t1 + INNER JOIN test_table_join_2 AS t2 ON t1.id = t2.id + INNER JOIN test_table_join_3 AS t3 ON t2.id = t3.id; + +DROP TABLE test_table_join_1; +DROP TABLE test_table_join_2; +DROP TABLE test_table_join_3; \ No newline at end of file diff --git a/parser/testdata/02518_rewrite_aggregate_function_with_if/ast.json b/parser/testdata/02518_rewrite_aggregate_function_with_if/ast.json new file mode 100644 index 000000000..4f8b4a8d8 --- /dev/null +++ b/parser/testdata/02518_rewrite_aggregate_function_with_if/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001359947, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02518_rewrite_aggregate_function_with_if/metadata.json b/parser/testdata/02518_rewrite_aggregate_function_with_if/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02518_rewrite_aggregate_function_with_if/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02518_rewrite_aggregate_function_with_if/query.sql b/parser/testdata/02518_rewrite_aggregate_function_with_if/query.sql new file mode 100644 index 000000000..4ed13307c --- /dev/null +++ b/parser/testdata/02518_rewrite_aggregate_function_with_if/query.sql @@ -0,0 +1,28 @@ +set enable_analyzer = true; +-- { echoOn } + +set optimize_rewrite_aggregate_function_with_if = false; +EXPLAIN QUERY TREE run_passes = 1 select sum(if(number % 2, number, 0)) from numbers(100); +EXPLAIN QUERY TREE run_passes = 1 select sum(if(number % 2, 0, number)) from numbers(100); + +EXPLAIN QUERY TREE run_passes = 1 select sum(if(number % 2, number, null)) from numbers(100); +EXPLAIN QUERY TREE run_passes = 1 select sum(if(number % 2, null, number)) from numbers(100); + +EXPLAIN QUERY TREE run_passes = 1 select avg(if(number % 2, number, null)) from numbers(100); +EXPLAIN QUERY TREE run_passes = 1 select avg(if(number % 2, null, number)) from numbers(100); + +EXPLAIN QUERY TREE run_passes = 1 select quantiles(0.5, 0.9, 0.99)(if(number % 2, number, null)) from numbers(100); +EXPLAIN QUERY TREE run_passes = 1 select quantiles(0.5, 0.9, 0.99)(if(number % 2, null, number)) from numbers(100); + +set optimize_rewrite_aggregate_function_with_if = true; +EXPLAIN QUERY TREE run_passes = 1 select sum(if(number % 2, number, 0)) from numbers(100); +EXPLAIN QUERY TREE run_passes = 1 select sum(if(number % 2, 0, number)) from numbers(100); + +EXPLAIN QUERY TREE run_passes = 1 select sum(if(number % 2, number, null)) from numbers(100); +EXPLAIN QUERY TREE run_passes = 1 select sum(if(number % 2, null, number)) from numbers(100); + +EXPLAIN QUERY TREE run_passes = 1 select avg(if(number % 2, number, null)) from numbers(100); +EXPLAIN QUERY TREE run_passes = 1 select avg(if(number % 2, null, number)) from numbers(100); + +EXPLAIN QUERY TREE run_passes = 1 select quantiles(0.5, 0.9, 0.99)(if(number % 2, number, null)) from numbers(100); +EXPLAIN QUERY TREE run_passes = 1 select quantiles(0.5, 0.9, 0.99)(if(number % 2, null, number)) from numbers(100); diff --git a/parser/testdata/02519_monotonicity_fuzz/ast.json b/parser/testdata/02519_monotonicity_fuzz/ast.json new file mode 100644 index 000000000..c2b4391ab --- /dev/null +++ b/parser/testdata/02519_monotonicity_fuzz/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001038994, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/02519_monotonicity_fuzz/metadata.json b/parser/testdata/02519_monotonicity_fuzz/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02519_monotonicity_fuzz/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02519_monotonicity_fuzz/query.sql b/parser/testdata/02519_monotonicity_fuzz/query.sql new file mode 100644 index 000000000..57da69171 --- /dev/null +++ b/parser/testdata/02519_monotonicity_fuzz/query.sql @@ -0,0 +1,10 @@ +DROP TABLE IF EXISTS t; +CREATE TABLE t (x Decimal(18, 3)) ENGINE = MergeTree ORDER BY x; +INSERT INTO t VALUES (1.1); +SELECT * FROM t WHERE toUInt64(x) = 1; +DROP TABLE t; + +CREATE TABLE t (x DateTime64(3)) ENGINE = MergeTree ORDER BY x; +INSERT INTO t VALUES (1000); +SELECT x::UInt64 FROM t WHERE toUInt64(x) = 1; +DROP TABLE t; diff --git a/parser/testdata/02520_group_array_last/ast.json b/parser/testdata/02520_group_array_last/ast.json new file mode 100644 index 000000000..8779e5c15 --- /dev/null +++ b/parser/testdata/02520_group_array_last/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery simple_agg_groupArrayLastArray (children 1)" + }, + { + "explain": " Identifier simple_agg_groupArrayLastArray" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001230164, + "rows_read": 2, + "bytes_read": 112 + } +} diff --git a/parser/testdata/02520_group_array_last/metadata.json b/parser/testdata/02520_group_array_last/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02520_group_array_last/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02520_group_array_last/query.sql b/parser/testdata/02520_group_array_last/query.sql new file mode 100644 index 000000000..94773d5d5 --- /dev/null +++ b/parser/testdata/02520_group_array_last/query.sql @@ -0,0 +1,34 @@ +drop table if exists simple_agg_groupArrayLastArray; + +-- { echo } +-- BAD_ARGUMENTS +select groupArrayLast(number+1) from numbers(5); -- { serverError BAD_ARGUMENTS } +select groupArrayLastArray([number+1]) from numbers(5); -- { serverError BAD_ARGUMENTS } +-- groupArrayLast by number +select groupArrayLast(1)(number+1) from numbers(5); +select groupArrayLast(3)(number+1) from numbers(5); +select groupArrayLast(3)(number+1) from numbers(10); +-- groupArrayLast by String +select groupArrayLast(3)((number+1)::String) from numbers(5); +select groupArrayLast(3)((number+1)::String) from numbers(10); +-- groupArrayLastArray +select groupArrayLastArray(3)([1,2,3,4,5,6]); +select groupArrayLastArray(3)(['1','2','3','4','5','6']); +-- groupArrayLastMerge +-- [10,8,9] + [10,8,9] => [10,10,9] => [10,10,8] => [9,10,8] +-- ^ ^ ^ ^^ +-- (position to insert at) +select groupArrayLast(3)(number+1) state from remote('127.{1,1}', view(select * from numbers(10))); +select groupArrayLast(3)((number+1)::String) state from remote('127.{1,1}', view(select * from numbers(10))); +select groupArrayLast(3)([number+1]) state from remote('127.{1,1}', view(select * from numbers(10))); +select groupArrayLast(100)(number+1) state from remote('127.{1,1}', view(select * from numbers(10))); +select groupArrayLast(100)((number+1)::String) state from remote('127.{1,1}', view(select * from numbers(10))); +select groupArrayLast(100)([number+1]) state from remote('127.{1,1}', view(select * from numbers(10))); +-- SimpleAggregateFunction +create table simple_agg_groupArrayLastArray (key Int, value SimpleAggregateFunction(groupArrayLastArray(5), Array(UInt64))) engine=AggregatingMergeTree() order by key; +insert into simple_agg_groupArrayLastArray values (1, [1,2,3]), (1, [4,5,6]), (2, [4,5,6]), (2, [1,2,3]); +select * from simple_agg_groupArrayLastArray order by key, value; +system stop merges simple_agg_groupArrayLastArray; +insert into simple_agg_groupArrayLastArray values (1, [7,8]), (2, [7,8]); +select * from simple_agg_groupArrayLastArray order by key, value; +select * from simple_agg_groupArrayLastArray final order by key, value; diff --git a/parser/testdata/02521_aggregation_by_partitions/ast.json b/parser/testdata/02521_aggregation_by_partitions/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02521_aggregation_by_partitions/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02521_aggregation_by_partitions/metadata.json b/parser/testdata/02521_aggregation_by_partitions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02521_aggregation_by_partitions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02521_aggregation_by_partitions/query.sql b/parser/testdata/02521_aggregation_by_partitions/query.sql new file mode 100644 index 000000000..34d453faa --- /dev/null +++ b/parser/testdata/02521_aggregation_by_partitions/query.sql @@ -0,0 +1,278 @@ +-- Tags: long, no-object-storage + +SET merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.0; + +set max_threads = 16; +set allow_aggregate_partitions_independently = 1; +set force_aggregate_partitions_independently = 1; +set optimize_use_projections = 0; +set optimize_trivial_insert_select = 1; + +set allow_prefetched_read_pool_for_remote_filesystem = 0; +set allow_prefetched_read_pool_for_local_filesystem = 0; + +create table t1(a UInt32) engine=MergeTree order by tuple() partition by a % 4 settings index_granularity = 8192, index_granularity_bytes = 10485760; + +system stop merges t1; + +insert into t1 select number from numbers_mt(1e6); +insert into t1 select number from numbers_mt(1e6); + +-- { echoOn } +explain pipeline select a from t1 group by a; +-- { echoOff } + +select count() from (select throwIf(count() != 2) from t1 group by a); + +drop table t1; + +create table t2(a UInt32) engine=MergeTree order by tuple() partition by a % 8 SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +system stop merges t2; + +insert into t2 select number from numbers_mt(1e6); +insert into t2 select number from numbers_mt(1e6); + +-- { echoOn } +explain pipeline select a from t2 group by a; +-- { echoOff } + +select count() from (select throwIf(count() != 2) from t2 group by a); + +drop table t2; + +create table t3(a UInt32) engine=MergeTree order by tuple() partition by a % 16 SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +system stop merges t3; + +insert into t3 select number from numbers_mt(1e6); +insert into t3 select number from numbers_mt(1e6); + +-- { echoOn } +explain pipeline select a from t3 group by a; +-- { echoOff } + +select count() from (select throwIf(count() != 2) from t3 group by a); + +select throwIf(count() != 4) from remote('127.0.0.{1,2}', currentDatabase(), t3) group by a format Null; + +-- if we happened to switch to external aggregation at some point, merging will happen as usual +select count() from (select throwIf(count() != 2) from t3 group by a) settings max_bytes_before_external_group_by = '1Ki', max_bytes_ratio_before_external_group_by = 0; + +drop table t3; + +-- aggregation in order -- + +set optimize_aggregation_in_order = 1; + +create table t4(a UInt32) engine=MergeTree order by a partition by a % 4 SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +system stop merges t4; + +insert into t4 select number from numbers_mt(1e6); +insert into t4 select number from numbers_mt(1e6); + +-- { echoOn } +explain pipeline select a from t4 group by a settings read_in_order_two_level_merge_threshold = 1e12; +-- { echoOff } + +select count() from (select throwIf(count() != 2) from t4 group by a); + +drop table t4; + +create table t5(a UInt32) engine=MergeTree order by a partition by a % 8 SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +system stop merges t5; + +insert into t5 select number from numbers_mt(1e6); +insert into t5 select number from numbers_mt(1e6); + +-- { echoOn } +explain pipeline select a from t5 group by a settings read_in_order_two_level_merge_threshold = 1e12; +-- { echoOff } + +select count() from (select throwIf(count() != 2) from t5 group by a); + +drop table t5; + +create table t6(a UInt32) engine=MergeTree order by a partition by a % 16 SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +system stop merges t6; + +insert into t6 select number from numbers_mt(1e6); +insert into t6 select number from numbers_mt(1e6); + +-- { echoOn } +explain pipeline select a from t6 group by a settings read_in_order_two_level_merge_threshold = 1e12; +-- { echoOff } + +select count() from (select throwIf(count() != 2) from t6 group by a); + +drop table t6; + +set optimize_aggregation_in_order = 0; + +create table t7(a UInt32) engine=MergeTree order by a partition by intDiv(a, 2) SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +insert into t7 select number from numbers_mt(100); + +select replaceRegexpOne(explain, '^[ ]*(.*)', '\\1') from ( + explain actions=1 select intDiv(a, 2) as a1 from t7 group by a1 +) where explain like '%Skip merging: %'; + +drop table t7; + +create table t8(a UInt32) engine=MergeTree order by a partition by intDiv(a, 2) * 2 + 1 SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +insert into t8 select number from numbers_mt(100); + +select replaceRegexpOne(explain, '^[ ]*(.*)', '\\1') from ( + explain actions=1 select intDiv(a, 2) + 1 as a1 from t8 group by a1 +) where explain like '%Skip merging: %'; + +drop table t8; + +create table t9(a UInt32) engine=MergeTree order by a partition by intDiv(a, 2) SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +insert into t9 select number from numbers_mt(100); + +select replaceRegexpOne(explain, '^[ ]*(.*)', '\\1') from ( + explain actions=1 select intDiv(a, 3) as a1 from t9 group by a1 +) where explain like '%Skip merging: %'; + +drop table t9; + +create table t10(a UInt32, b UInt32) engine=MergeTree order by a partition by (intDiv(a, 2), intDiv(b, 3)) SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +insert into t10 select number, number from numbers_mt(100); + +select replaceRegexpOne(explain, '^[ ]*(.*)', '\\1') from ( + explain actions=1 select intDiv(a, 2) + 1 as a1, intDiv(b, 3) as b1 from t10 group by a1, b1, pi() +) where explain like '%Skip merging: %'; + +drop table t10; + +-- multiplication by 2 is not injective, so optimization is not applicable +create table t11(a UInt32, b UInt32) engine=MergeTree order by a partition by (intDiv(a, 2), intDiv(b, 3)) SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +insert into t11 select number, number from numbers_mt(100); + +select replaceRegexpOne(explain, '^[ ]*(.*)', '\\1') from ( + explain actions=1 select intDiv(a, 2) + 1 as a1, intDiv(b, 3) * 2 as b1 from t11 group by a1, b1, pi() +) where explain like '%Skip merging: %'; + +drop table t11; + +create table t12(a UInt32, b UInt32) engine=MergeTree order by a partition by a % 16; + +insert into t12 select number, number from numbers_mt(100); + +select replaceRegexpOne(explain, '^[ ]*(.*)', '\\1') from ( + explain actions=1 select a, b from t12 group by a, b, pi() +) where explain like '%Skip merging: %'; + +drop table t12; + +create table t13(a UInt32, b UInt32) engine=MergeTree order by a partition by (intDiv(a, 2), intDiv(b, 3)) SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +insert into t13 select number, number from numbers_mt(100); + +select replaceRegexpOne(explain, '^[ ]*(.*)', '\\1') from ( + explain actions=1 select s from t13 group by intDiv(a, 2) + intDiv(b, 3) as s, pi() +) where explain like '%Skip merging: %'; + +drop table t13; + +create table t14(a UInt32, b UInt32) engine=MergeTree order by a partition by intDiv(a, 2) + intDiv(b, 3) SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +insert into t14 select number, number from numbers_mt(100); + +select replaceRegexpOne(explain, '^[ ]*(.*)', '\\1') from ( + explain actions=1 select intDiv(a, 2) as a1, intDiv(b, 3) as b1 from t14 group by a1, b1, pi() +) where explain like '%Skip merging: %'; + +drop table t14; + +-- to few partitions -- +create table t15(a UInt32, b UInt32) engine=MergeTree order by a partition by a < 90 SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +insert into t15 select number, number from numbers_mt(100); + +select replaceRegexpOne(explain, '^[ ]*(.*)', '\\1') from ( + explain actions=1 select a from t15 group by a +) where explain like '%Skip merging: %' +settings force_aggregate_partitions_independently = 0; + +drop table t15; + +-- to many partitions -- +create table t16(a UInt32, b UInt32) engine=MergeTree order by a partition by a % 16 SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +insert into t16 select number, number from numbers_mt(100); + +select replaceRegexpOne(explain, '^[ ]*(.*)', '\\1') from ( + explain actions=1 select a from t16 group by a +) where explain like '%Skip merging: %' +settings force_aggregate_partitions_independently = 0, max_number_of_partitions_for_independent_aggregation = 4; + +drop table t16; + +-- to big skew -- +create table t17(a UInt32, b UInt32) engine=MergeTree order by a partition by a < 90 SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +insert into t17 select number, number from numbers_mt(100); + +select replaceRegexpOne(explain, '^[ ]*(.*)', '\\1') from ( + explain actions=1 select a from t17 group by a +) where explain like '%Skip merging: %' +settings force_aggregate_partitions_independently = 0, max_threads = 4; + +drop table t17; + +create table t18(a UInt32, b UInt32) engine=MergeTree order by a partition by a SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +insert into t18 select number, number from numbers_mt(50); + +select replaceRegexpOne(explain, '^[ ]*(.*)', '\\1') from ( + explain actions=1 select a1 from t18 group by intDiv(a, 2) as a1 +) where explain like '%Skip merging: %'; + +drop table t18; + +create table t19(a UInt32, b UInt32) engine=MergeTree order by a partition by a SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +insert into t19 select number, number from numbers_mt(50); + +select replaceRegexpOne(explain, '^[ ]*(.*)', '\\1') from ( + explain actions=1 select a1 from t19 group by blockNumber() as a1 +) where explain like '%Skip merging: %'; + +drop table t19; + +create table t20(a UInt32, b UInt32) engine=MergeTree order by a partition by a SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +insert into t20 select number, number from numbers_mt(50); + +select replaceRegexpOne(explain, '^[ ]*(.*)', '\\1') from ( + explain actions=1 select a1 from t20 group by rand(a) as a1 +) where explain like '%Skip merging: %'; + +drop table t20; + +create table t21(a UInt64, b UInt64) engine=MergeTree order by a partition by a % 16 SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +insert into t21 select number, number from numbers_mt(1e6); + +select a from t21 group by a limit 10 format Null; + +drop table t21; + +create table t22(a UInt32, b UInt32) engine=SummingMergeTree order by a partition by a % 16 SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +insert into t22 select number, number from numbers_mt(1e6); + +select replaceRegexpOne(explain, '^[ ]*(.*)', '\\1') from ( + explain actions=1 select a from t22 final group by a +) where explain like '%Skip merging: %'; + +drop table t22; diff --git a/parser/testdata/02521_analyzer_aggregation_without_column/ast.json b/parser/testdata/02521_analyzer_aggregation_without_column/ast.json new file mode 100644 index 000000000..c80e60b71 --- /dev/null +++ b/parser/testdata/02521_analyzer_aggregation_without_column/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000997871, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02521_analyzer_aggregation_without_column/metadata.json b/parser/testdata/02521_analyzer_aggregation_without_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02521_analyzer_aggregation_without_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02521_analyzer_aggregation_without_column/query.sql b/parser/testdata/02521_analyzer_aggregation_without_column/query.sql new file mode 100644 index 000000000..50bf3cd45 --- /dev/null +++ b/parser/testdata/02521_analyzer_aggregation_without_column/query.sql @@ -0,0 +1,15 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + c0 String ALIAS c1, + c1 String, + c2 String, +) ENGINE = MergeTree ORDER BY c1; + +INSERT INTO test_table VALUES ('a', 'b'); + +SELECT MAX(1) FROM test_table; + +DROP TABLE test_table; diff --git a/parser/testdata/02521_analyzer_array_join_crash/ast.json b/parser/testdata/02521_analyzer_array_join_crash/ast.json new file mode 100644 index 000000000..4e989d545 --- /dev/null +++ b/parser/testdata/02521_analyzer_array_join_crash/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001459587, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02521_analyzer_array_join_crash/metadata.json b/parser/testdata/02521_analyzer_array_join_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02521_analyzer_array_join_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02521_analyzer_array_join_crash/query.sql b/parser/testdata/02521_analyzer_array_join_crash/query.sql new file mode 100644 index 000000000..f5d601303 --- /dev/null +++ b/parser/testdata/02521_analyzer_array_join_crash/query.sql @@ -0,0 +1,24 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value String +) ENGINE = MergeTree ORDER BY id; + +INSERT INTO test_table VALUES (0, 'Value'); + +-- { echoOn } + +SELECT id, value_element, value FROM test_table ARRAY JOIN [[1,2,3]] AS value_element, value_element AS value; -- { serverError UNKNOWN_IDENTIFIER } + +SELECT id, value_element, value FROM test_table ARRAY JOIN [[1,2,3]] AS value_element ARRAY JOIN value_element AS value; + +SELECT value_element, value FROM test_table ARRAY JOIN [1048577] AS value_element ARRAY JOIN arrayMap(x -> value_element, ['']) AS value; + +SELECT arrayFilter(x -> notEmpty(concat(x)), [NULL, NULL]) FROM system.one ARRAY JOIN [1048577] AS elem ARRAY JOIN arrayMap(x -> splitByChar(x, elem), ['']) AS unused; -- { serverError ILLEGAL_COLUMN } + +-- { echoOff } + +DROP TABLE test_table; diff --git a/parser/testdata/02521_cannot_find_column_in_projection/ast.json b/parser/testdata/02521_cannot_find_column_in_projection/ast.json new file mode 100644 index 000000000..219bfe9b1 --- /dev/null +++ b/parser/testdata/02521_cannot_find_column_in_projection/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001190962, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/02521_cannot_find_column_in_projection/metadata.json b/parser/testdata/02521_cannot_find_column_in_projection/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02521_cannot_find_column_in_projection/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02521_cannot_find_column_in_projection/query.sql b/parser/testdata/02521_cannot_find_column_in_projection/query.sql new file mode 100644 index 000000000..6ee8ec071 --- /dev/null +++ b/parser/testdata/02521_cannot_find_column_in_projection/query.sql @@ -0,0 +1,5 @@ +drop table if exists test; +create table test(day Date, id UInt32) engine=MergeTree partition by day order by tuple(); +insert into test select toDate('2023-01-05') AS day, number from numbers(10); +with toUInt64(id) as id_with select day, count(id_with) from test where day >= '2023-01-01' group by day limit 1000; +drop table test; diff --git a/parser/testdata/02521_grouping_sets_plus_memory_efficient_aggr/ast.json b/parser/testdata/02521_grouping_sets_plus_memory_efficient_aggr/ast.json new file mode 100644 index 000000000..5d47f0bb1 --- /dev/null +++ b/parser/testdata/02521_grouping_sets_plus_memory_efficient_aggr/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001147168, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02521_grouping_sets_plus_memory_efficient_aggr/metadata.json b/parser/testdata/02521_grouping_sets_plus_memory_efficient_aggr/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02521_grouping_sets_plus_memory_efficient_aggr/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02521_grouping_sets_plus_memory_efficient_aggr/query.sql b/parser/testdata/02521_grouping_sets_plus_memory_efficient_aggr/query.sql new file mode 100644 index 000000000..03c20869d --- /dev/null +++ b/parser/testdata/02521_grouping_sets_plus_memory_efficient_aggr/query.sql @@ -0,0 +1,3 @@ +set distributed_aggregation_memory_efficient = 1; + +select number as a, number+1 as b from remote('127.0.0.{1,2}', numbers_mt(1e5)) group by grouping sets ((a), (b)) format Null; diff --git a/parser/testdata/02521_lightweight_delete_and_ttl/ast.json b/parser/testdata/02521_lightweight_delete_and_ttl/ast.json new file mode 100644 index 000000000..05d735a46 --- /dev/null +++ b/parser/testdata/02521_lightweight_delete_and_ttl/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery lwd_test_02521 (children 1)" + }, + { + "explain": " Identifier lwd_test_02521" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001219105, + "rows_read": 2, + "bytes_read": 80 + } +} diff --git a/parser/testdata/02521_lightweight_delete_and_ttl/metadata.json b/parser/testdata/02521_lightweight_delete_and_ttl/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02521_lightweight_delete_and_ttl/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02521_lightweight_delete_and_ttl/query.sql b/parser/testdata/02521_lightweight_delete_and_ttl/query.sql new file mode 100644 index 000000000..6bb8b5444 --- /dev/null +++ b/parser/testdata/02521_lightweight_delete_and_ttl/query.sql @@ -0,0 +1,45 @@ +DROP TABLE IF EXISTS lwd_test_02521; + +CREATE TABLE lwd_test_02521 (id UInt64, value String, event_time DateTime) +ENGINE MergeTree() +ORDER BY id +SETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192, index_granularity_bytes = '10Mi'; + +INSERT INTO lwd_test_02521 SELECT number, randomString(10), now() - INTERVAL 2 MONTH FROM numbers(50000); +INSERT INTO lwd_test_02521 SELECT number, randomString(10), now() FROM numbers(50000); + +OPTIMIZE TABLE lwd_test_02521 FINAL SETTINGS mutations_sync = 1; + +SET mutations_sync=1; + +-- { echoOn } +SELECT 'Rows in parts', SUM(rows) FROM system.parts WHERE database = currentDatabase() AND table = 'lwd_test_02521' AND active; +SELECT 'Count', count() FROM lwd_test_02521; + + +DELETE FROM lwd_test_02521 WHERE id < 25000; + +SELECT 'Rows in parts', SUM(rows) FROM system.parts WHERE database = currentDatabase() AND table = 'lwd_test_02521' AND active; +SELECT 'Count', count() FROM lwd_test_02521; + + +ALTER TABLE lwd_test_02521 MODIFY TTL event_time + INTERVAL 1 MONTH SETTINGS mutations_sync = 1; + +SELECT 'Rows in parts', SUM(rows) FROM system.parts WHERE database = currentDatabase() AND table = 'lwd_test_02521' AND active; +SELECT 'Count', count() FROM lwd_test_02521; + + +ALTER TABLE lwd_test_02521 DELETE WHERE id >= 40000 SETTINGS mutations_sync = 1; + +SELECT 'Rows in parts', SUM(rows) FROM system.parts WHERE database = currentDatabase() AND table = 'lwd_test_02521' AND active; +SELECT 'Count', count() FROM lwd_test_02521; + + +OPTIMIZE TABLE lwd_test_02521 FINAL SETTINGS mutations_sync = 1; + +SELECT 'Rows in parts', SUM(rows) FROM system.parts WHERE database = currentDatabase() AND table = 'lwd_test_02521' AND active; +SELECT 'Count', count() FROM lwd_test_02521; + +-- { echoOff } + +DROP TABLE lwd_test_02521; diff --git a/parser/testdata/02521_to_custom_day_of_week/ast.json b/parser/testdata/02521_to_custom_day_of_week/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02521_to_custom_day_of_week/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02521_to_custom_day_of_week/metadata.json b/parser/testdata/02521_to_custom_day_of_week/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02521_to_custom_day_of_week/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02521_to_custom_day_of_week/query.sql b/parser/testdata/02521_to_custom_day_of_week/query.sql new file mode 100644 index 000000000..4b194bf4b --- /dev/null +++ b/parser/testdata/02521_to_custom_day_of_week/query.sql @@ -0,0 +1,10 @@ + +with toDate('2023-01-09') as date_mon, date_mon - 1 as date_sun select toDayOfWeek(date_mon), toDayOfWeek(date_sun); +with toDate('2023-01-09') as date_mon, date_mon - 1 as date_sun select toDayOfWeek(date_mon, 0), toDayOfWeek(date_sun, 0); +with toDate('2023-01-09') as date_mon, date_mon - 1 as date_sun select toDayOfWeek(date_mon, 1), toDayOfWeek(date_sun, 1); +with toDate('2023-01-09') as date_mon, date_mon - 1 as date_sun select toDayOfWeek(date_mon, 2), toDayOfWeek(date_sun, 2); +with toDate('2023-01-09') as date_mon, date_mon - 1 as date_sun select toDayOfWeek(date_mon, 3), toDayOfWeek(date_sun, 3); +with toDate('2023-01-09') as date_mon, date_mon - 1 as date_sun select toDayOfWeek(date_mon, 4), toDayOfWeek(date_sun, 4); +with toDate('2023-01-09') as date_mon, date_mon - 1 as date_sun select toDayOfWeek(date_mon, 5), toDayOfWeek(date_sun, 5); + +select toDayOfWeek(today(), -1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } diff --git a/parser/testdata/02522_different_types_in_storage_merge/ast.json b/parser/testdata/02522_different_types_in_storage_merge/ast.json new file mode 100644 index 000000000..06426229e --- /dev/null +++ b/parser/testdata/02522_different_types_in_storage_merge/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001105539, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02522_different_types_in_storage_merge/metadata.json b/parser/testdata/02522_different_types_in_storage_merge/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02522_different_types_in_storage_merge/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02522_different_types_in_storage_merge/query.sql b/parser/testdata/02522_different_types_in_storage_merge/query.sql new file mode 100644 index 000000000..d15997f4f --- /dev/null +++ b/parser/testdata/02522_different_types_in_storage_merge/query.sql @@ -0,0 +1,8 @@ +SET merge_table_max_tables_to_look_for_schema_inference = 1; + +CREATE TABLE test_s64_local (date Date, value Int64) ENGINE = MergeTree order by tuple(); +CREATE TABLE test_u64_local (date Date, value UInt64) ENGINE = MergeTree order by tuple(); +CREATE TABLE test_s64_distributed AS test_s64_local ENGINE = Distributed('test_shard_localhost', currentDatabase(), test_s64_local, rand()); +CREATE TABLE test_u64_distributed AS test_u64_local ENGINE = Distributed('test_shard_localhost', currentDatabase(), test_u64_local, rand()); + +SELECT * FROM merge(currentDatabase(), '') WHERE value = 1048575; diff --git a/parser/testdata/02523_array_shuffle/ast.json b/parser/testdata/02523_array_shuffle/ast.json new file mode 100644 index 000000000..02aee326b --- /dev/null +++ b/parser/testdata/02523_array_shuffle/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayShuffle (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001294653, + "rows_read": 8, + "bytes_read": 305 + } +} diff --git a/parser/testdata/02523_array_shuffle/metadata.json b/parser/testdata/02523_array_shuffle/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02523_array_shuffle/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02523_array_shuffle/query.sql b/parser/testdata/02523_array_shuffle/query.sql new file mode 100644 index 000000000..3653c263e --- /dev/null +++ b/parser/testdata/02523_array_shuffle/query.sql @@ -0,0 +1,71 @@ +SELECT arrayShuffle([]); +SELECT arrayShuffle([], 0xbad_cafe); +SELECT arrayShuffle([9223372036854775808]); +SELECT arrayShuffle([9223372036854775808], 0xbad_cafe); +SELECT arrayShuffle([1,2,3,4,5,6,7,8,9,10], 0xbad_cafe); +SELECT arrayShuffle(materialize([1,2,3,4,5,6,7,8,9,10]), 0xbad_cafe); +SELECT arrayShuffle([1,2,3,4,5,6,7,8,9,10.1], 0xbad_cafe); +SELECT arrayShuffle([1,2,3,4,5,6,7,8,9,9223372036854775808], 0xbad_cafe); +SELECT arrayShuffle([1,2,3,4,5,6,7,8,9,NULL], 0xbad_cafe); +SELECT arrayShuffle([toFixedString('123', 3), toFixedString('456', 3), toFixedString('789', 3), toFixedString('ABC', 3), toFixedString('000', 3)], 0xbad_cafe); +SELECT arrayShuffle([toFixedString('123', 3), toFixedString('456', 3), toFixedString('789', 3), toFixedString('ABC', 3), NULL], 0xbad_cafe); +SELECT arrayShuffle(['storage','tiger','imposter','terminal','uniform','sensation'], 0xbad_cafe); +SELECT arrayShuffle(['storage','tiger',NULL,'terminal','uniform','sensation'], 0xbad_cafe); +SELECT arrayShuffle([NULL]); +SELECT arrayShuffle([NULL,NULL]); +SELECT arrayShuffle([[1,2,3,4],[-1,-2,-3,-4],[10,20,30,40],[100,200,300,400,500,600,700,800,900],[2,4,8,16,32,64]], 0xbad_cafe); +SELECT arrayShuffle(materialize([[1,2,3,4],[-1,-2,-3,-4],[10,20,30,40],[100,200,300,400,500,600,700,800,900],[2,4,8,16,32,64]]), 0xbad_cafe); +SELECT arrayShuffle([[1,2,3,4],[NULL,-2,-3,-4],[10,20,30,40],[100,200,300,400,500,600,700,800,900],[2,4,8,16,32,64]], 0xbad_cafe); +SELECT arrayShuffle(groupArray(x),0xbad_cafe) FROM (SELECT number as x from system.numbers LIMIT 100); +SELECT arrayShuffle(groupArray(toUInt64(x)),0xbad_cafe) FROM (SELECT number as x from system.numbers LIMIT 100); +SELECT arrayShuffle([tuple(1, -1), tuple(99999999, -99999999), tuple(3, -3)], 0xbad_cafe); +SELECT arrayShuffle([tuple(1, NULL), tuple(2, 'a'), tuple(3, 'A')], 0xbad_cafe); +SELECT arrayPartialShuffle([]); -- trivial cases (equivalent to arrayShuffle) +SELECT arrayPartialShuffle([], 0); +SELECT arrayPartialShuffle([], 0, 0xbad_cafe); +SELECT arrayPartialShuffle([9223372036854775808]); +SELECT arrayPartialShuffle([9223372036854775808], 0); +SELECT arrayPartialShuffle([9223372036854775808], 0, 0xbad_cafe); +SELECT arrayPartialShuffle([1,2,3,4,5,6,7,8,9,10], 0, 0xbad_cafe); +SELECT arrayPartialShuffle([1,2,3,4,5,6,7,8,9,10.1], 0, 0xbad_cafe); +SELECT arrayPartialShuffle([1,2,3,4,5,6,7,8,9,9223372036854775808], 0, 0xbad_cafe); +SELECT arrayPartialShuffle([1,2,3,4,5,6,7,8,9,NULL], 0, 0xbad_cafe); +SELECT arrayPartialShuffle([toFixedString('123', 3), toFixedString('456', 3), toFixedString('789', 3), toFixedString('ABC', 3), toFixedString('000', 3)], 0, 0xbad_cafe); +SELECT arrayPartialShuffle([toFixedString('123', 3), toFixedString('456', 3), toFixedString('789', 3), toFixedString('ABC', 3), NULL], 0, 0xbad_cafe); +SELECT arrayPartialShuffle(['storage','tiger','imposter','terminal','uniform','sensation'], 0, 0xbad_cafe); +SELECT arrayPartialShuffle(['storage','tiger',NULL,'terminal','uniform','sensation'], 0, 0xbad_cafe); +SELECT arrayPartialShuffle([NULL]); +SELECT arrayPartialShuffle([NULL,NULL]); +SELECT arrayPartialShuffle([[1,2,3,4],[-1,-2,-3,-4],[10,20,30,40],[100,200,300,400,500,600,700,800,900],[2,4,8,16,32,64]], 0, 0xbad_cafe); +SELECT arrayPartialShuffle([[1,2,3,4],[NULL,-2,-3,-4],[10,20,30,40],[100,200,300,400,500,600,700,800,900],[2,4,8,16,32,64]], 0, 0xbad_cafe); +SELECT arrayPartialShuffle(groupArray(x),0,0xbad_cafe) FROM (SELECT number as x from system.numbers LIMIT 100); +SELECT arrayPartialShuffle(groupArray(toUInt64(x)),0,0xbad_cafe) FROM (SELECT number as x from system.numbers LIMIT 100); +SELECT arrayPartialShuffle([tuple(1, -1), tuple(99999999, -99999999), tuple(3, -3)], 0, 0xbad_cafe); +SELECT arrayPartialShuffle([tuple(1, NULL), tuple(2, 'a'), tuple(3, 'A')], 0, 0xbad_cafe); +SELECT arrayPartialShuffle([NULL,NULL,NULL], 2); -- other, mostly non-trivial cases +SELECT arrayPartialShuffle([1,2,3,4,5,6,7,8,9,10], 1, 0xbad_cafe); +SELECT arrayPartialShuffle([1,2,3,4,5,6,7,8,9,10], 2, 0xbad_cafe); +SELECT arrayPartialShuffle([1,2,3,4,5,6,7,8,9,10], 4, 0xbad_cafe); +SELECT arrayPartialShuffle([1,2,3,4,5,6,7,8,9,10], 8, 0xbad_cafe); +SELECT arrayPartialShuffle([1,2,3,4,5,6,7,8,9,10], 9, 0xbad_cafe); +SELECT arrayPartialShuffle([1,2,3,4,5,6,7,8,9,10], 10, 0xbad_cafe); +SELECT arrayPartialShuffle([1,2,3,4,5,6,7,8,9,10], 100, 0xbad_cafe); +SELECT arrayPartialShuffle([1,2,3,4,5,6,7,8,9,10.1], 4, 0xbad_cafe); +SELECT arrayPartialShuffle([1,2,3,4,5,6,7,8,9,9223372036854775808], 4, 0xbad_cafe); +SELECT arrayPartialShuffle([1,2,3,4,5,6,7,8,9,NULL], 4, 0xbad_cafe); +SELECT arrayPartialShuffle([toFixedString('123', 3), toFixedString('456', 3), toFixedString('789', 3), toFixedString('ABC', 3), toFixedString('000', 3)], 3, 0xbad_cafe); +SELECT arrayPartialShuffle([toFixedString('123', 3), toFixedString('456', 3), toFixedString('789', 3), toFixedString('ABC', 3), NULL], 3, 0xbad_cafe); +SELECT arrayPartialShuffle(['storage','tiger','imposter','terminal','uniform','sensation'], 3, 0xbad_cafe); +SELECT arrayPartialShuffle(['storage','tiger',NULL,'terminal','uniform','sensation'], 3, 0xbad_cafe); +SELECT arrayPartialShuffle([[1,2,3,4],[-1,-2,-3,-4],[10,20,30,40],[100,200,300,400,500,600,700,800,900],[2,4,8,16,32,64]], 2, 0xbad_cafe); +SELECT arrayPartialShuffle([[1,2,3,4],[NULL,-2,-3,-4],[10,20,30,40],[100,200,300,400,500,600,700,800,900],[2,4,8,16,32,64]], 2, 0xbad_cafe); +SELECT arrayPartialShuffle(groupArray(x),20,0xbad_cafe) FROM (SELECT number as x from system.numbers LIMIT 100); +SELECT arrayPartialShuffle(groupArray(toUInt64(x)),20,0xbad_cafe) FROM (SELECT number as x from system.numbers LIMIT 100); +SELECT arrayPartialShuffle([tuple(1, -1), tuple(99999999, -99999999), tuple(3, -3)], 2, 0xbad_cafe); +SELECT arrayPartialShuffle([tuple(1, NULL), tuple(2, 'a'), tuple(3, 'A')], 2, 0xbad_cafe); +SELECT arrayShuffle([1, 2, 3], 42) FROM numbers(10); -- for constant array we do not materialize it and each row gets the same permutation +SELECT arrayShuffle(materialize([1, 2, 3]), 42) FROM numbers(10); +SELECT arrayShuffle(1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT arrayShuffle([1], 'a'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT arrayShuffle([1], 1.1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT arrayShuffle([1], 0xcafe, 1); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } \ No newline at end of file diff --git a/parser/testdata/02523_range_const_start/ast.json b/parser/testdata/02523_range_const_start/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02523_range_const_start/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02523_range_const_start/metadata.json b/parser/testdata/02523_range_const_start/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02523_range_const_start/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02523_range_const_start/query.sql b/parser/testdata/02523_range_const_start/query.sql new file mode 100644 index 000000000..869dec3c1 --- /dev/null +++ b/parser/testdata/02523_range_const_start/query.sql @@ -0,0 +1,9 @@ +SELECT + c1, + range(0, c1) AS zero_as_start_val, + range(1, c1) AS one_as_start_val, + range(c1) AS no_start_val, + range(c1, c1 * 2) AS val_as_start, + range(c1, c1 * c1, c1) AS complex_start_step +FROM values(1, 2, 3, 4, 5) +FORMAT Vertical; diff --git a/parser/testdata/02524_fuzz_and_fuss/ast.json b/parser/testdata/02524_fuzz_and_fuss/ast.json new file mode 100644 index 000000000..b9498a0da --- /dev/null +++ b/parser/testdata/02524_fuzz_and_fuss/ast.json @@ -0,0 +1,190 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal Array_[UInt64_9223372036854775806, UInt64_1048575]" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function sumMap (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Identifier val" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDateTime64 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Float64_1" + }, + { + "explain": " Literal 'Decimal(10,2)'" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Float64_10.000100135803223" + }, + { + "explain": " Literal 'Decimal(10,2)'" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Float64_-0" + }, + { + "explain": " Literal 'Decimal(10,2)'" + }, + { + "explain": " Identifier cnt" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Function toDateTime64 (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Literal '0.0000001023'" + }, + { + "explain": " Literal Array_[UInt64_1025, UInt64_256]" + }, + { + "explain": " Literal '102.5'" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Literal Array_[NULL]" + }, + { + "explain": " Function array (alias val) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'a'" + }, + { + "explain": " Literal 'FixedString(1)'" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal ''" + }, + { + "explain": " Literal 'FixedString(1)'" + }, + { + "explain": " Literal Array_[UInt64_1024, UInt64_100] (alias cnt)" + } + ], + + "rows": 56, + + "statistics": + { + "elapsed": 0.0013487, + "rows_read": 56, + "bytes_read": 2441 + } +} diff --git a/parser/testdata/02524_fuzz_and_fuss/metadata.json b/parser/testdata/02524_fuzz_and_fuss/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02524_fuzz_and_fuss/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02524_fuzz_and_fuss/query.sql b/parser/testdata/02524_fuzz_and_fuss/query.sql new file mode 100644 index 000000000..98e18e83d --- /dev/null +++ b/parser/testdata/02524_fuzz_and_fuss/query.sql @@ -0,0 +1 @@ +SELECT [9223372036854775806, 1048575], [], sumMap(val, [toDateTime64([CAST(1., 'Decimal(10,2)'), CAST(10.000100135803223, 'Decimal(10,2)')], NULL), CAST(-0., 'Decimal(10,2)')], cnt) FROM (SELECT toDateTime64('0.0000001023', [1025, 256], '102.5', NULL), [NULL], [CAST('a', 'FixedString(1)'), CAST('', 'FixedString(1)')] AS val, [1024, 100] AS cnt); diff --git a/parser/testdata/02524_fuzz_and_fuss_2/ast.json b/parser/testdata/02524_fuzz_and_fuss_2/ast.json new file mode 100644 index 000000000..b71b0909f --- /dev/null +++ b/parser/testdata/02524_fuzz_and_fuss_2/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery data_a_02187 (children 1)" + }, + { + "explain": " Identifier data_a_02187" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001078356, + "rows_read": 2, + "bytes_read": 76 + } +} diff --git a/parser/testdata/02524_fuzz_and_fuss_2/metadata.json b/parser/testdata/02524_fuzz_and_fuss_2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02524_fuzz_and_fuss_2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02524_fuzz_and_fuss_2/query.sql b/parser/testdata/02524_fuzz_and_fuss_2/query.sql new file mode 100644 index 000000000..ff2e041b7 --- /dev/null +++ b/parser/testdata/02524_fuzz_and_fuss_2/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS data_a_02187; + +CREATE TABLE data_a_02187 +( + `a` Nullable(Int64) +) +ENGINE = Memory; + +INSERT INTO data_a_02187 +SELECT * +FROM system.one +SETTINGS max_block_size = '1', min_insert_block_size_rows = '65536', min_insert_block_size_bytes = '0', max_insert_threads = '0', max_threads = '3', receive_timeout = '10', receive_data_timeout_ms = '10000', connections_with_failover_max_tries = '0', extremes = '1', use_uncompressed_cache = '0', optimize_move_to_prewhere = '1', optimize_move_to_prewhere_if_final = '0', replication_alter_partitions_sync = '2', totals_mode = 'before_having', allow_suspicious_low_cardinality_types = '1', compile_expressions = '1', min_count_to_compile_expression = '0', group_by_two_level_threshold = '100', distributed_aggregation_memory_efficient = '0', distributed_group_by_no_merge = '1', optimize_distributed_group_by_sharding_key = '1', optimize_skip_unused_shards = '1', optimize_skip_unused_shards_rewrite_in = '1', force_optimize_skip_unused_shards = '2', optimize_skip_unused_shards_nesting = '1', force_optimize_skip_unused_shards_nesting = '2', merge_tree_min_rows_for_concurrent_read = '10000', force_primary_key = '1', network_compression_method = 'ZSTD', network_zstd_compression_level = '7', log_queries = '0', log_queries_min_type = 'QUERY_FINISH', distributed_product_mode = 'local', insert_quorum = '2', insert_quorum_timeout = '0', insert_quorum_parallel = '0', select_sequential_consistency = '1', join_use_nulls = '1', any_join_distinct_right_table_keys = '1', preferred_max_column_in_block_size_bytes = '32', distributed_foreground_insert = '1', insert_allow_materialized_columns = '1', use_index_for_in_with_subqueries = '1', joined_subquery_requires_alias = '0', empty_result_for_aggregation_by_empty_set = '1', allow_suspicious_codecs = '1', query_profiler_real_time_period_ns = '0', query_profiler_cpu_time_period_ns = '0', opentelemetry_start_trace_probability = '1', max_rows_to_read = '1000000', read_overflow_mode = 'break', max_rows_to_group_by = '10', group_by_overflow_mode = 'any', max_rows_to_sort = '100', sort_overflow_mode = 'break', max_result_rows = '10', max_execution_time = '9', max_execution_speed = '1', max_bytes_in_join = '100', join_algorithm = 'partial_merge', max_memory_usage = '1099511627776', log_query_threads = '1', send_logs_level = 'fatal', enable_optimize_predicate_expression = '1', prefer_localhost_replica = '1', optimize_read_in_order = '1', optimize_aggregation_in_order = '1', read_in_order_two_level_merge_threshold = '1', allow_introspection_functions = '1', check_query_single_value_result = '1', default_table_engine = 'Memory', mutations_sync = '2', convert_query_to_cnf = '0', optimize_arithmetic_operations_in_aggregate_functions = '1', optimize_duplicate_order_by_and_distinct = '0', optimize_multiif_to_if = '0', optimize_functions_to_subcolumns = '1', optimize_using_constraints = '1', optimize_substitute_columns = '1', optimize_append_index = '1', transform_null_in = '1', data_type_default_nullable = '1', cast_keep_nullable = '1', cast_ipv4_ipv6_default_on_conversion_error = '0', system_events_show_zero_values = '1', enable_global_with_statement = '1', optimize_on_insert = '0', optimize_rewrite_sum_if_to_count_if = '1', distributed_ddl_output_mode = 'throw', union_default_mode = 'ALL', optimize_aggregators_of_group_by_keys = '1', optimize_group_by_function_keys = '1', short_circuit_function_evaluation = 'enable', async_insert = '1', enable_filesystem_cache = '0', allow_deprecated_database_ordinary = '1', allow_deprecated_syntax_for_merge_tree = '1', allow_experimental_nlp_functions = '1', optimize_use_projections = '1', input_format_null_as_default = '1', input_format_ipv4_default_on_conversion_error = '0', input_format_ipv6_default_on_conversion_error = '0', output_format_json_named_tuples_as_objects = '1', output_format_write_statistics = '0', output_format_pretty_row_numbers = '1'; + +DROP TABLE data_a_02187; diff --git a/parser/testdata/02525_analyzer_function_in_crash_fix/ast.json b/parser/testdata/02525_analyzer_function_in_crash_fix/ast.json new file mode 100644 index 000000000..0454c3fd9 --- /dev/null +++ b/parser/testdata/02525_analyzer_function_in_crash_fix/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001136093, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02525_analyzer_function_in_crash_fix/metadata.json b/parser/testdata/02525_analyzer_function_in_crash_fix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02525_analyzer_function_in_crash_fix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02525_analyzer_function_in_crash_fix/query.sql b/parser/testdata/02525_analyzer_function_in_crash_fix/query.sql new file mode 100644 index 000000000..dd1688ad4 --- /dev/null +++ b/parser/testdata/02525_analyzer_function_in_crash_fix/query.sql @@ -0,0 +1,14 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt8, + value Nullable(Decimal(38, 2)) +) ENGINE = MergeTree ORDER BY id; + +INSERT INTO test_table VALUES (1, '22.5'), (2, Null); + +SELECT id IN toDecimal64(257, NULL) FROM test_table; + +DROP TABLE test_table; diff --git a/parser/testdata/02525_different_engines_in_temporary_tables/ast.json b/parser/testdata/02525_different_engines_in_temporary_tables/ast.json new file mode 100644 index 000000000..a38416880 --- /dev/null +++ b/parser/testdata/02525_different_engines_in_temporary_tables/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery table_merge_tree_02525 (children 1)" + }, + { + "explain": " Identifier table_merge_tree_02525" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001324542, + "rows_read": 2, + "bytes_read": 96 + } +} diff --git a/parser/testdata/02525_different_engines_in_temporary_tables/metadata.json b/parser/testdata/02525_different_engines_in_temporary_tables/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02525_different_engines_in_temporary_tables/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02525_different_engines_in_temporary_tables/query.sql b/parser/testdata/02525_different_engines_in_temporary_tables/query.sql new file mode 100644 index 000000000..58e9ecab3 --- /dev/null +++ b/parser/testdata/02525_different_engines_in_temporary_tables/query.sql @@ -0,0 +1,68 @@ +DROP TEMPORARY TABLE IF EXISTS table_merge_tree_02525; +CREATE TEMPORARY TABLE table_merge_tree_02525 +( + id UInt64, + info String +) +ENGINE = MergeTree +ORDER BY id +PRIMARY KEY id; +INSERT INTO table_merge_tree_02525 VALUES (1, 'a'), (2, 'b'); +INSERT INTO table_merge_tree_02525 VALUES (3, 'c'); +OPTIMIZE TABLE table_merge_tree_02525 FINAL; +SELECT * FROM table_merge_tree_02525; +-- Check that temporary table with MergeTree is not sent to remote servers +-- The query with remote() should not fail +SELECT dummy FROM remote('127.0.0.{1,2}', system, one); +DROP TEMPORARY TABLE table_merge_tree_02525; + +DROP TEMPORARY TABLE IF EXISTS table_log_02525; +CREATE TEMPORARY TABLE table_log_02525 +( + id UInt64, + info String +) +ENGINE = Log; +INSERT INTO table_log_02525 VALUES (1, 'a'), (2, 'b'), (3, 'c'); +SELECT * FROM table_log_02525; +DROP TEMPORARY TABLE table_log_02525; + +DROP TEMPORARY TABLE IF EXISTS table_stripe_log_02525; +CREATE TEMPORARY TABLE table_stripe_log_02525 +( + id UInt64, + info String +) +ENGINE = StripeLog; +INSERT INTO table_stripe_log_02525 VALUES (1, 'a'), (2, 'b'), (3, 'c'); +SELECT * FROM table_stripe_log_02525; +DROP TEMPORARY TABLE table_stripe_log_02525; + +DROP TEMPORARY TABLE IF EXISTS table_tiny_log_02525; +CREATE TEMPORARY TABLE table_tiny_log_02525 +( + id UInt64, + info String +) +ENGINE = TinyLog; +INSERT INTO table_tiny_log_02525 VALUES (1, 'a'), (2, 'b'), (3, 'c'); +SELECT * FROM table_tiny_log_02525; +DROP TEMPORARY TABLE table_tiny_log_02525; + +DROP TEMPORARY TABLE IF EXISTS table_replicated_merge_tree_02525; +CREATE TEMPORARY TABLE table_replicated_merge_tree_02525 +( + id UInt64, + info String +) +ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_02525/table_replicated_merge_tree_02525', 'r1') +ORDER BY id +PRIMARY KEY id; -- { serverError INCORRECT_QUERY } + +DROP TEMPORARY TABLE IF EXISTS table_keeper_map_02525; +CREATE TEMPORARY TABLE table_keeper_map_02525 +( + key String, + value UInt32 +) Engine=KeeperMap('/' || currentDatabase() || '/test02525') +PRIMARY KEY(key); -- { serverError INCORRECT_QUERY } diff --git a/parser/testdata/02525_jit_logical_functions_nan/ast.json b/parser/testdata/02525_jit_logical_functions_nan/ast.json new file mode 100644 index 000000000..85f92ff55 --- /dev/null +++ b/parser/testdata/02525_jit_logical_functions_nan/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001075299, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02525_jit_logical_functions_nan/metadata.json b/parser/testdata/02525_jit_logical_functions_nan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02525_jit_logical_functions_nan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02525_jit_logical_functions_nan/query.sql b/parser/testdata/02525_jit_logical_functions_nan/query.sql new file mode 100644 index 000000000..6b4770c76 --- /dev/null +++ b/parser/testdata/02525_jit_logical_functions_nan/query.sql @@ -0,0 +1,9 @@ +SET min_count_to_compile_expression = 0; + +SELECT NOT NOT cos(MAX(pow(1523598955, 763027371))) FROM numbers(1) SETTINGS compile_expressions = 0; + +SELECT NOT NOT cos(MAX(pow(1523598955, 763027371))) FROM numbers(1) SETTINGS compile_expressions = 1; + +SELECT not(not(materialize(nan))) FROM numbers(1) SETTINGS compile_expressions = 0; + +SELECT not(not(materialize(nan))) FROM numbers(1) SETTINGS compile_expressions = 1; diff --git a/parser/testdata/02525_range_hashed_dictionary_update_field/ast.json b/parser/testdata/02525_range_hashed_dictionary_update_field/ast.json new file mode 100644 index 000000000..db2704806 --- /dev/null +++ b/parser/testdata/02525_range_hashed_dictionary_update_field/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_table (children 1)" + }, + { + "explain": " Identifier test_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001173477, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/02525_range_hashed_dictionary_update_field/metadata.json b/parser/testdata/02525_range_hashed_dictionary_update_field/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02525_range_hashed_dictionary_update_field/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02525_range_hashed_dictionary_update_field/query.sql b/parser/testdata/02525_range_hashed_dictionary_update_field/query.sql new file mode 100644 index 000000000..2534333af --- /dev/null +++ b/parser/testdata/02525_range_hashed_dictionary_update_field/query.sql @@ -0,0 +1,37 @@ +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + uid Int64, + start Int64, + end Int64, + insert_time DateTime +) ENGINE = MergeTree ORDER BY (uid, start); + +DROP DICTIONARY IF EXISTS test_dictionary; +CREATE DICTIONARY test_dictionary +( + start Int64, + end Int64, + insert_time DateTime, + uid Int64 +) PRIMARY KEY uid +LAYOUT(RANGE_HASHED()) +RANGE(MIN start MAX end) +SOURCE(CLICKHOUSE(TABLE 'test_table' UPDATE_FIELD 'insert_time' UPDATE_LAG 10)) +LIFETIME(MIN 1 MAX 2); + +INSERT INTO test_table VALUES (1, 0, 100, '2022-12-26 11:38:34'), (1, 101, 200, '2022-12-26 11:38:34'), (2, 0, 999, '2022-12-26 11:38:34'), (2, 1000, 10000, '2022-12-26 11:38:34'); + +SELECT * FROM test_dictionary; +SELECT dictGet('test_dictionary', 'insert_time', toUInt64(1), 10); + +SELECT sleep(3) format Null; +SELECT sleep(3) format Null; + +SELECT '--'; + +SELECT * FROM test_dictionary; +SELECT dictGet('test_dictionary', 'insert_time', toUInt64(1), 10); + +DROP DICTIONARY test_dictionary; +DROP TABLE test_table; diff --git a/parser/testdata/02526_kv_engine_different_filter_type/ast.json b/parser/testdata/02526_kv_engine_different_filter_type/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02526_kv_engine_different_filter_type/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02526_kv_engine_different_filter_type/metadata.json b/parser/testdata/02526_kv_engine_different_filter_type/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02526_kv_engine_different_filter_type/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02526_kv_engine_different_filter_type/query.sql b/parser/testdata/02526_kv_engine_different_filter_type/query.sql new file mode 100644 index 000000000..11e373284 --- /dev/null +++ b/parser/testdata/02526_kv_engine_different_filter_type/query.sql @@ -0,0 +1,16 @@ +-- Tags: zookeeper, no-ordinary-database, use-rocksdb + +DROP TABLE IF EXISTS 02526_keeper_map; +DROP TABLE IF EXISTS 02526_rocksdb; + +CREATE TABLE 02526_keeper_map (`key` String, `value` UInt32) ENGINE = KeeperMap('/' || currentDatabase() || '/02526_kv_filter_types') PRIMARY KEY key; +INSERT INTO 02526_keeper_map SELECT * FROM generateRandom('`key` String, `value` UInt32') LIMIT 100; +SELECT * FROM 02526_keeper_map WHERE key in (SELECT number * 5 FROM numbers(1000)) FORMAT Null; + +DROP TABLE 02526_keeper_map; + +CREATE TABLE 02526_rocksdb (`key` String, `value` UInt32) ENGINE = EmbeddedRocksDB PRIMARY KEY key; +INSERT INTO 02526_rocksdb SELECT * FROM generateRandom('`key` String, `value` UInt32') LIMIT 100; +SELECT * FROM 02526_rocksdb WHERE key in (SELECT number * 5 FROM numbers(1000)) FORMAT Null; + +DROP TABLE 02526_rocksdb; diff --git a/parser/testdata/02526_merge_join_int_decimal/ast.json b/parser/testdata/02526_merge_join_int_decimal/ast.json new file mode 100644 index 000000000..b11a4ffe2 --- /dev/null +++ b/parser/testdata/02526_merge_join_int_decimal/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery foo (children 1)" + }, + { + "explain": " Identifier foo" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001301536, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/02526_merge_join_int_decimal/metadata.json b/parser/testdata/02526_merge_join_int_decimal/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02526_merge_join_int_decimal/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02526_merge_join_int_decimal/query.sql b/parser/testdata/02526_merge_join_int_decimal/query.sql new file mode 100644 index 000000000..b354f2020 --- /dev/null +++ b/parser/testdata/02526_merge_join_int_decimal/query.sql @@ -0,0 +1,27 @@ +DROP TABLE IF EXISTS foo; +DROP TABLE IF EXISTS foo1; +DROP TABLE IF EXISTS foo_merge; +DROP TABLE IF EXISTS t2; + +CREATE TABLE foo(Id Int32, Val Int32) Engine=MergeTree PARTITION BY Val ORDER BY Id; +CREATE TABLE foo1(Id Int32, Val Decimal32(9)) Engine=MergeTree PARTITION BY Val ORDER BY Id; +INSERT INTO foo SELECT number, number%5 FROM numbers(100000); +INSERT INTO foo1 SELECT number, 1 FROM numbers(100000); + +CREATE TABLE foo_merge as foo ENGINE=Merge(currentDatabase(), '^foo'); + +CREATE TABLE t2 (Id Int32, Val Int64, X UInt256) Engine=Memory; +INSERT INTO t2 values (4, 3, 4); + +SELECT * FROM foo_merge WHERE Val = 3 AND Id = 3; +SELECT count(), X FROM foo_merge JOIN t2 USING Val WHERE Val = 3 AND Id = 3 AND t2.X == 4 GROUP BY X; +SELECT count(), X FROM foo_merge JOIN t2 USING Val WHERE Val = 3 AND (Id = 3 AND t2.X == 4) GROUP BY X; +SELECT count(), X FROM foo_merge JOIN t2 USING Val WHERE Val = 3 AND Id = 3 GROUP BY X; +SELECT count(), X FROM (SELECT * FROM foo_merge) f JOIN t2 USING Val WHERE Val = 3 AND Id = 3 GROUP BY X; + +SELECT 7, count(1000.0001), -9223372036854775807 FROM foo_merge INNER JOIN t2 USING (Val) WHERE (((NULL AND -2 AND (Val = NULL)) AND (Id = NULL) AND (Val = NULL) AND (Id = NULL)) AND (Id = NULL) AND Val AND NULL) AND ((3 AND NULL AND -2147483648 AND (Val = NULL)) AND (Id = NULL) AND (Val = NULL)) AND ((NULL AND -2 AND (Val = NULL)) AND (Id = NULL) AND (Val = NULL)) AND 2147483647 WITH TOTALS; + +DROP TABLE IF EXISTS foo; +DROP TABLE IF EXISTS foo1; +DROP TABLE IF EXISTS foo_merge; +DROP TABLE IF EXISTS t2; \ No newline at end of file diff --git a/parser/testdata/02527_storage_merge_prewhere_different_type/ast.json b/parser/testdata/02527_storage_merge_prewhere_different_type/ast.json new file mode 100644 index 000000000..6d7e39195 --- /dev/null +++ b/parser/testdata/02527_storage_merge_prewhere_different_type/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery foo (children 1)" + }, + { + "explain": " Identifier foo" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001357286, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/02527_storage_merge_prewhere_different_type/metadata.json b/parser/testdata/02527_storage_merge_prewhere_different_type/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02527_storage_merge_prewhere_different_type/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02527_storage_merge_prewhere_different_type/query.sql b/parser/testdata/02527_storage_merge_prewhere_different_type/query.sql new file mode 100644 index 000000000..a0732de4b --- /dev/null +++ b/parser/testdata/02527_storage_merge_prewhere_different_type/query.sql @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS foo; +DROP TABLE IF EXISTS merge1; +DROP TABLE IF EXISTS merge2; + +CREATE TABLE foo(Id Int32, Val Nullable(Int32)) Engine=MergeTree ORDER BY Id; +INSERT INTO foo VALUES (1, 2), (3, 4); + +CREATE TABLE merge1(Id Int32, Val Int32) Engine=Merge(currentDatabase(), '^foo'); +SELECT Val FROM merge1 PREWHERE Val = 65536 OR Val = 2; -- { serverError ILLEGAL_PREWHERE } + +CREATE TABLE merge2(Id Int32, Val Nullable(Int32)) Engine=Merge(currentDatabase(), '^foo'); +SELECT Val FROM merge2 PREWHERE Val = 65536 OR Val = 2; + +DROP TABLE merge2; +DROP TABLE merge1; +DROP TABLE foo; diff --git a/parser/testdata/02530_ip_part_id/ast.json b/parser/testdata/02530_ip_part_id/ast.json new file mode 100644 index 000000000..e3f6908bf --- /dev/null +++ b/parser/testdata/02530_ip_part_id/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery ip_part_test (children 1)" + }, + { + "explain": " Identifier ip_part_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001187918, + "rows_read": 2, + "bytes_read": 76 + } +} diff --git a/parser/testdata/02530_ip_part_id/metadata.json b/parser/testdata/02530_ip_part_id/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02530_ip_part_id/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02530_ip_part_id/query.sql b/parser/testdata/02530_ip_part_id/query.sql new file mode 100644 index 000000000..bf704eaa1 --- /dev/null +++ b/parser/testdata/02530_ip_part_id/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS ip_part_test; + +CREATE TABLE ip_part_test ( ipv4 IPv4, ipv6 IPv6 ) ENGINE = MergeTree PARTITION BY ipv4 ORDER BY ipv4 AS SELECT '1.2.3.4', '::ffff:1.2.3.4'; + +SELECT *, _part FROM ip_part_test; + +DROP TABLE IF EXISTS ip_part_test; + +CREATE TABLE ip_part_test ( ipv4 IPv4, ipv6 IPv6 ) ENGINE = MergeTree PARTITION BY ipv6 ORDER BY ipv6 AS SELECT '1.2.3.4', '::ffff:1.2.3.4'; + +SELECT *, _part FROM ip_part_test; + +DROP TABLE IF EXISTS ip_part_test; + diff --git a/parser/testdata/02531_ipv4_arithmetic/ast.json b/parser/testdata/02531_ipv4_arithmetic/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02531_ipv4_arithmetic/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02531_ipv4_arithmetic/metadata.json b/parser/testdata/02531_ipv4_arithmetic/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02531_ipv4_arithmetic/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02531_ipv4_arithmetic/query.sql b/parser/testdata/02531_ipv4_arithmetic/query.sql new file mode 100644 index 000000000..88c8cf936 --- /dev/null +++ b/parser/testdata/02531_ipv4_arithmetic/query.sql @@ -0,0 +1,4 @@ +-- { echoOn } +SELECT number, ip, ip % number FROM (SELECT number, toIPv4('1.2.3.4') as ip FROM numbers(10, 20)); +SELECT number, ip, number % ip FROM (SELECT number, toIPv4OrNull('0.0.0.3') as ip FROM numbers(10, 20)); + diff --git a/parser/testdata/02531_semi_join_null_const_bug/ast.json b/parser/testdata/02531_semi_join_null_const_bug/ast.json new file mode 100644 index 000000000..06d05d390 --- /dev/null +++ b/parser/testdata/02531_semi_join_null_const_bug/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001106727, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02531_semi_join_null_const_bug/metadata.json b/parser/testdata/02531_semi_join_null_const_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02531_semi_join_null_const_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02531_semi_join_null_const_bug/query.sql b/parser/testdata/02531_semi_join_null_const_bug/query.sql new file mode 100644 index 000000000..6f7412ad4 --- /dev/null +++ b/parser/testdata/02531_semi_join_null_const_bug/query.sql @@ -0,0 +1,11 @@ +SET join_use_nulls = 1; + +SELECT b.id +FROM ( + SELECT toLowCardinality(0 :: UInt32) AS id + GROUP BY [] +) AS a +SEMI LEFT JOIN ( + SELECT toLowCardinality(1 :: UInt64) AS id +) AS b +USING (id); diff --git a/parser/testdata/02531_storage_join_null_44940/ast.json b/parser/testdata/02531_storage_join_null_44940/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02531_storage_join_null_44940/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02531_storage_join_null_44940/metadata.json b/parser/testdata/02531_storage_join_null_44940/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02531_storage_join_null_44940/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02531_storage_join_null_44940/query.sql b/parser/testdata/02531_storage_join_null_44940/query.sql new file mode 100644 index 000000000..136fc8bbe --- /dev/null +++ b/parser/testdata/02531_storage_join_null_44940/query.sql @@ -0,0 +1,18 @@ + +SET allow_suspicious_low_cardinality_types = 1; + +DROP TABLE IF EXISTS t1__fuzz_8; +DROP TABLE IF EXISTS full_join__fuzz_4; + +CREATE TABLE t1__fuzz_8 (`x` LowCardinality(UInt32), `str` Nullable(Int16)) ENGINE = Memory; +INSERT INTO t1__fuzz_8 VALUES (1, 1), (2, 2); + +CREATE TABLE full_join__fuzz_4 (`x` LowCardinality(UInt32), `s` LowCardinality(String)) ENGINE = Join(`ALL`, FULL, x) SETTINGS join_use_nulls = 1; +INSERT INTO full_join__fuzz_4 VALUES (1, '1'), (2, '2'), (3, '3'); + +SET join_use_nulls = 1; + +SELECT * FROM t1__fuzz_8 FULL OUTER JOIN full_join__fuzz_4 USING (x) ORDER BY x DESC, str ASC, s ASC NULLS LAST; + +DROP TABLE IF EXISTS t1__fuzz_8; +DROP TABLE IF EXISTS full_join__fuzz_4; diff --git a/parser/testdata/02532_analyzer_aggregation_with_rollup/ast.json b/parser/testdata/02532_analyzer_aggregation_with_rollup/ast.json new file mode 100644 index 000000000..53751e752 --- /dev/null +++ b/parser/testdata/02532_analyzer_aggregation_with_rollup/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001105988, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02532_analyzer_aggregation_with_rollup/metadata.json b/parser/testdata/02532_analyzer_aggregation_with_rollup/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02532_analyzer_aggregation_with_rollup/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02532_analyzer_aggregation_with_rollup/query.sql b/parser/testdata/02532_analyzer_aggregation_with_rollup/query.sql new file mode 100644 index 000000000..587ef71df --- /dev/null +++ b/parser/testdata/02532_analyzer_aggregation_with_rollup/query.sql @@ -0,0 +1,20 @@ +SET enable_analyzer = 1; + +SELECT + sum(a.number) AS total, + c.number AS cn, + b.number AS bn, + grouping(c.number) + grouping(b.number) AS l, + rank() OVER (PARTITION BY grouping(c.number) + grouping(b.number), multiIf(grouping(c.number) = 0, b.number, NULL) ORDER BY sum(a.number) DESC) AS r +FROM numbers(10) AS a, numbers(10) AS b, numbers(10) AS c +GROUP BY + cn, + bn + WITH ROLLUP +ORDER BY + total ASC, + cn ASC, + bn ASC, + l ASC, + r ASC +LIMIT 10; diff --git a/parser/testdata/02532_profileevents_server_startup_time/ast.json b/parser/testdata/02532_profileevents_server_startup_time/ast.json new file mode 100644 index 000000000..a0092d31e --- /dev/null +++ b/parser/testdata/02532_profileevents_server_startup_time/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier event" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.events" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier event" + }, + { + "explain": " Literal 'ServerStartupMilliseconds'" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001215353, + "rows_read": 13, + "bytes_read": 510 + } +} diff --git a/parser/testdata/02532_profileevents_server_startup_time/metadata.json b/parser/testdata/02532_profileevents_server_startup_time/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02532_profileevents_server_startup_time/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02532_profileevents_server_startup_time/query.sql b/parser/testdata/02532_profileevents_server_startup_time/query.sql new file mode 100644 index 000000000..d7c97d08c --- /dev/null +++ b/parser/testdata/02532_profileevents_server_startup_time/query.sql @@ -0,0 +1 @@ +SELECT event FROM system.events WHERE event = 'ServerStartupMilliseconds' \ No newline at end of file diff --git a/parser/testdata/02533_generate_random_schema_inference/ast.json b/parser/testdata/02533_generate_random_schema_inference/ast.json new file mode 100644 index 000000000..c57c5959e --- /dev/null +++ b/parser/testdata/02533_generate_random_schema_inference/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000872577, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/02533_generate_random_schema_inference/metadata.json b/parser/testdata/02533_generate_random_schema_inference/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02533_generate_random_schema_inference/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02533_generate_random_schema_inference/query.sql b/parser/testdata/02533_generate_random_schema_inference/query.sql new file mode 100644 index 000000000..9ca435374 --- /dev/null +++ b/parser/testdata/02533_generate_random_schema_inference/query.sql @@ -0,0 +1,6 @@ +drop table if exists test; +create table test (x UInt32, y String) engine=Memory; +insert into test select * from generateRandom() limit 10; +select count() from test; +drop table test; + diff --git a/parser/testdata/02534_analyzer_grouping_function/ast.json b/parser/testdata/02534_analyzer_grouping_function/ast.json new file mode 100644 index 000000000..79acfbe58 --- /dev/null +++ b/parser/testdata/02534_analyzer_grouping_function/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001149035, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02534_analyzer_grouping_function/metadata.json b/parser/testdata/02534_analyzer_grouping_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02534_analyzer_grouping_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02534_analyzer_grouping_function/query.sql b/parser/testdata/02534_analyzer_grouping_function/query.sql new file mode 100644 index 000000000..ee1cc1d88 --- /dev/null +++ b/parser/testdata/02534_analyzer_grouping_function/query.sql @@ -0,0 +1,41 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value String +) ENGINE=MergeTree ORDER BY id; + +INSERT INTO test_table VALUES (0, 'Value'); + +-- { echoOn } + +EXPLAIN QUERY TREE SELECT grouping(id), grouping(value) FROM test_table GROUP BY id, value; + +SELECT grouping(id) AS grouping_id, grouping(value) AS grouping_value, id, value FROM test_table +GROUP BY id, value ORDER BY grouping_id, grouping_value; + +EXPLAIN QUERY TREE SELECT grouping(id), grouping(value) FROM test_table GROUP BY ROLLUP (id, value); + +SELECT grouping(id) AS grouping_id, grouping(value) AS grouping_value, id, value FROM test_table +GROUP BY ROLLUP (id, value) ORDER BY grouping_id, grouping_value; + +EXPLAIN QUERY TREE SELECT grouping(id), grouping(value) FROM test_table GROUP BY CUBE (id, value); + +SELECT grouping(id) AS grouping_id, grouping(value) AS grouping_value, id, value FROM test_table +GROUP BY CUBE (id, value) ORDER BY grouping_id, grouping_value; + +EXPLAIN QUERY TREE SELECT grouping(id), grouping(value) FROM test_table GROUP BY GROUPING SETS (id, value); + +SELECT grouping(id) AS grouping_id, grouping(value) AS grouping_value, id, value FROM test_table +GROUP BY GROUPING SETS (id, value) ORDER BY grouping_id, grouping_value; + +EXPLAIN QUERY TREE SELECT grouping(id), grouping(value) FROM test_table GROUP BY GROUPING SETS ((id), (value)); + +SELECT grouping(id) AS grouping_id, grouping(value) AS grouping_value, id, value FROM test_table +GROUP BY GROUPING SETS ((id), (value)) ORDER BY grouping_id, grouping_value; + +-- { echoOff } + +DROP TABLE test_table; diff --git a/parser/testdata/02534_default_granularity/ast.json b/parser/testdata/02534_default_granularity/ast.json new file mode 100644 index 000000000..0c064dfbf --- /dev/null +++ b/parser/testdata/02534_default_granularity/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery users_02534 (children 1)" + }, + { + "explain": " Identifier users_02534" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001599355, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/02534_default_granularity/metadata.json b/parser/testdata/02534_default_granularity/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02534_default_granularity/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02534_default_granularity/query.sql b/parser/testdata/02534_default_granularity/query.sql new file mode 100644 index 000000000..e3de5fce7 --- /dev/null +++ b/parser/testdata/02534_default_granularity/query.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS users_02534; +CREATE TABLE users_02534 (id Int16, name String, INDEX bf_idx(name) TYPE minmax) ENGINE=MergeTree ORDER BY id; +SHOW CREATE TABLE users_02534; +DROP TABLE users_02534; + +CREATE TABLE users_02534 (id Int16, name String) ENGINE=MergeTree ORDER BY id; +ALTER TABLE users_02534 ADD INDEX bf_idx(name) TYPE minmax; +SHOW CREATE TABLE users_02534; +DROP TABLE users_02534; diff --git a/parser/testdata/02534_join_prewhere_bug/ast.json b/parser/testdata/02534_join_prewhere_bug/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02534_join_prewhere_bug/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02534_join_prewhere_bug/metadata.json b/parser/testdata/02534_join_prewhere_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02534_join_prewhere_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02534_join_prewhere_bug/query.sql b/parser/testdata/02534_join_prewhere_bug/query.sql new file mode 100644 index 000000000..016c92597 --- /dev/null +++ b/parser/testdata/02534_join_prewhere_bug/query.sql @@ -0,0 +1,51 @@ + +DROP TABLE IF EXISTS test1; +DROP TABLE IF EXISTS test2; + +CREATE TABLE test1 ( `col1` UInt64, `col2` Int8 ) ENGINE = MergeTree ORDER BY col1; +CREATE TABLE test2 ( `col1` UInt64, `col3` Int16 ) ENGINE = MergeTree ORDER BY col1; + +INSERT INTO test1 VALUES (123, 123), (12321, -30), (321, -32); +INSERT INTO test2 VALUES (123, 5600), (321, 5601); + +SET join_use_nulls = 1; + +-- { echoOn } + +SELECT * FROM test1 LEFT JOIN test2 ON test1.col1 = test2.col1 +WHERE test2.col1 IS NULL +ORDER BY test2.col1 +; + +SELECT * FROM test2 RIGHT JOIN test1 ON test2.col1 = test1.col1 +WHERE test2.col1 IS NULL +ORDER BY test2.col1 +; + +SELECT * FROM test1 LEFT JOIN test2 ON test1.col1 = test2.col1 +WHERE test2.col1 IS NOT NULL +ORDER BY test2.col1 +; + +SELECT * FROM test2 RIGHT JOIN test1 ON test2.col1 = test1.col1 +WHERE test2.col1 IS NOT NULL +ORDER BY test2.col1 +; + +SELECT test2.col1, test1.* FROM test2 RIGHT JOIN test1 ON test2.col1 = test1.col1 +WHERE test2.col1 IS NOT NULL +ORDER BY test2.col1 +; + +SELECT test2.col3, test1.* FROM test2 RIGHT JOIN test1 ON test2.col1 = test1.col1 +WHERE test2.col1 IS NOT NULL +ORDER BY test2.col1 +; + +SELECT col2, col2 + 1 FROM test1 +FULL OUTER JOIN test2 USING (col1) +PREWHERE (col2 * 2) :: UInt8 +; + +DROP TABLE IF EXISTS test1; +DROP TABLE IF EXISTS test2; diff --git a/parser/testdata/02534_keyed_siphash/ast.json b/parser/testdata/02534_keyed_siphash/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02534_keyed_siphash/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02534_keyed_siphash/metadata.json b/parser/testdata/02534_keyed_siphash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02534_keyed_siphash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02534_keyed_siphash/query.sql b/parser/testdata/02534_keyed_siphash/query.sql new file mode 100644 index 000000000..d771669dc --- /dev/null +++ b/parser/testdata/02534_keyed_siphash/query.sql @@ -0,0 +1,359 @@ +-- Test Vectors from the SipHash reference C implementation: +-- Written in 2012 by +-- Jean-Philippe Aumasson <jeanphilippe.aumasson@gmail.com> +-- Daniel J. Bernstein <djb@cr.yp.to> +-- Released under CC0 + +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + '')); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61))); +select hex(sipHash64Keyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62))); + +-- CH tests +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0)) == sipHash64(char(0)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1)) == sipHash64(char(0, 1)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2)) == sipHash64(char(0, 1, 2)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3)) == sipHash64(char(0, 1, 2, 3)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4)) == sipHash64(char(0, 1, 2, 3, 4)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5)) == sipHash64(char(0, 1, 2, 3, 4, 5)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62)); +select sipHash64Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63)) == sipHash64(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0)) == sipHash128(char(0)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1)) == sipHash128(char(0, 1)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2)) == sipHash128(char(0, 1, 2)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3)) == sipHash128(char(0, 1, 2, 3)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4)) == sipHash128(char(0, 1, 2, 3, 4)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5)) == sipHash128(char(0, 1, 2, 3, 4, 5)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62)); +select sipHash128Keyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63)) == sipHash128(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63)); + +select sipHash64Keyed((0, 0), '1'); -- { serverError BAD_ARGUMENTS } +select sipHash128Keyed((0, 0), '1'); -- { serverError BAD_ARGUMENTS } +select sipHash64Keyed(toUInt64(0), '1'); -- { serverError BAD_ARGUMENTS } +select sipHash128Keyed(toUInt64(0), '1'); -- { serverError BAD_ARGUMENTS } + +select hex(sipHash64()); +SELECT hex(sipHash128()); +select hex(sipHash64Keyed()); +SELECT hex(sipHash128Keyed()); + +SELECT 'Check bug with hashing of const integer values'; +DROP TABLE IF EXISTS tab; +CREATE TABLE tab (key Tuple(UInt64, UInt64), val UInt64) ENGINE=Memory; +INSERT INTO tab VALUES ((2, 2), 4); +-- these two statements must produce the same result +SELECT sipHash64Keyed(key, val) FROM tab; +SELECT sipHash64Keyed(key, 4::UInt64) FROM tab; +SELECT hex(sipHash128Keyed(key, val)) FROM tab; +SELECT hex(sipHash128Keyed(key, 4::UInt64)) FROM tab; +DROP TABLE tab; + +SELECT 'Check memsan bug'; +SELECT sipHash64Keyed((2::UInt64, toUInt64(2)), 4) GROUP BY toUInt64(2); +SELECT hex(sipHash64Keyed((toUInt64(9223372036854775806), toUInt64(-9223372036854775808)), char(2147483646, -2147483648, 1, 3, 4, 7, 2147483647))) GROUP BY toUInt64(257), (toUInt64(9223372036854775806), toUInt64(2147483646)); +SELECT sipHash64Keyed((toUInt64(9223372036854775806), 9223372036854775808::UInt64), char(2)) GROUP BY toUInt64(9223372036854775806); + +SELECT 'Check const columns'; +DROP TABLE IF EXISTS sipHashKeyed_test; +CREATE TABLE sipHashKeyed_test ENGINE = Memory() AS SELECT 1 a, 'test' b; +SELECT sipHash64Keyed((toUInt64(0), toUInt64(0)), 1, 'test'); +SELECT sipHash64(tuple(*)) FROM sipHashKeyed_test; +SELECT sipHash64Keyed((toUInt64(0), toUInt64(0)), tuple(*)) FROM sipHashKeyed_test; +SELECT sipHash64Keyed((toUInt64(0), toUInt64(0)), a, b) FROM sipHashKeyed_test; +SELECT hex(sipHash128Keyed((toUInt64(0), toUInt64(0)), 1, 'test')); +SELECT hex(sipHash128(tuple(*))) FROM sipHashKeyed_test; +SELECT hex(sipHash128Keyed((toUInt64(0), toUInt64(0)), tuple(*))) FROM sipHashKeyed_test; +SELECT hex(sipHash128Keyed((toUInt64(0), toUInt64(0)), a, b)) FROM sipHashKeyed_test; +DROP TABLE sipHashKeyed_test; + +SELECT 'Check multiple keys as tuple from a table'; +DROP TABLE IF EXISTS sipHashKeyed_keys; +CREATE TABLE sipHashKeyed_keys (key Tuple(UInt64, UInt64), val UInt64) ENGINE=Memory; +INSERT INTO sipHashKeyed_keys VALUES ((2, 2), 4); +INSERT INTO sipHashKeyed_keys VALUES ((4, 4), 4); +SELECT sipHash64Keyed(key, val) FROM sipHashKeyed_keys ORDER by key; +SELECT hex(sipHash128Keyed(key, val)) FROM sipHashKeyed_keys ORDER by key; +DROP TABLE sipHashKeyed_keys; + +SELECT 'Check multiple keys as separate ints from a table'; +DROP TABLE IF EXISTS sipHashKeyed_keys; +CREATE TABLE sipHashKeyed_keys (key0 UInt64, key1 UInt64, val UInt64) ENGINE=Memory; +INSERT INTO sipHashKeyed_keys VALUES (2, 2, 4); +INSERT INTO sipHashKeyed_keys VALUES (4, 4, 4); +SELECT sipHash64Keyed((key0, key1), val) FROM sipHashKeyed_keys ORDER by key0; +SELECT hex(sipHash128Keyed((key0, key1), val)) FROM sipHashKeyed_keys ORDER by key0; +SELECT 'Check constant key and data from a table'; +SELECT sipHash64Keyed((2::UInt64, 2::UInt64), val) FROM sipHashKeyed_keys ORDER by val; +SELECT hex(sipHash128Keyed((2::UInt64, 2::UInt64), val)) FROM sipHashKeyed_keys ORDER by val; +DROP TABLE sipHashKeyed_keys; + +SELECT 'Check multiple keys as separate ints from a table with constant data'; +DROP TABLE IF EXISTS sipHashKeyed_keys; +CREATE TABLE sipHashKeyed_keys (key0 UInt64, key1 UInt64) ENGINE=Memory; +INSERT INTO sipHashKeyed_keys VALUES (2, 2); +INSERT INTO sipHashKeyed_keys VALUES (4, 4); +SELECT sipHash64Keyed((key0, key1), 4::UInt64) FROM sipHashKeyed_keys ORDER by key0; +SELECT hex(sipHash128Keyed((key0, key1), 4::UInt64)) FROM sipHashKeyed_keys ORDER by key0; +DROP TABLE sipHashKeyed_keys; + +SELECT 'Check asan bug'; +SELECT sipHash128((toUInt64(9223372036854775806), 1)) = sipHash128(1) GROUP BY sipHash128(1::UInt8), toUInt64(9223372036854775806); + +SELECT 'Check bug found fuzzing'; +SELECT [(255, 1048575)], sipHash128ReferenceKeyed((toUInt64(2147483646), toUInt64(9223372036854775807)), ([(NULL, 100), (NULL, NULL), (1024, 10)], toUInt64(2), toUInt64(1024)), ''), hex(sipHash128ReferenceKeyed((-9223372036854775807, 1.), '-1', NULL)), ('', toUInt64(65535), [(9223372036854775807, 9223372036854775806)], toUInt64(65536)), arrayJoin((NULL, 65537, 255), [(NULL, NULL)]) GROUP BY tupleElement((NULL, NULL, NULL, -1), toUInt64(2), 2) = NULL SETTINGS enable_analyzer=1; -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT hex(sipHash128ReferenceKeyed((0::UInt64, 0::UInt64), ([1, 1]))); + +SELECT 'Test arrays and maps'; +DROP TABLE IF EXISTS sipHashKeyed_keys; +CREATE TABLE sipHashKeyed_keys (`a` Map(String, String)) ENGINE = Memory; +INSERT INTO sipHashKeyed_keys FORMAT VALUES ({'a':'b', 'c':'d'}), ({'e':'f', 'g':'h'}); + +SELECT hex(sipHash128ReferenceKeyed((0::UInt64, materialize(0::UInt64)), a)) FROM sipHashKeyed_keys ORDER BY a; +DROP TABLE sipHashKeyed_keys; + +SELECT 'Test empty arrays and maps'; +SELECT sipHash64Keyed((1::UInt64, 2::UInt64), []); +SELECT hex(sipHash128Keyed((1::UInt64, 2::UInt64), [])); +SELECT sipHash64Keyed((1::UInt64, 2::UInt64), mapFromArrays([], [])); +SELECT hex(sipHash128Keyed((1::UInt64, 2::UInt64), mapFromArrays([], []))); +SELECT 'Test maps with arrays as keys'; +SELECT sipHash64Keyed((1::UInt64, 2::UInt64), map([0], 1, [2], 3)); +SELECT hex(sipHash128Keyed((1::UInt64, 2::UInt64), map([0], 1, [2], 3))); +SELECT sipHash64Keyed((materialize(1::UInt64), 2::UInt64), map([0], 1, [2], 3)) FROM numbers(2); +SELECT hex(sipHash128Keyed((materialize(1::UInt64), 2::UInt64), map([0], 1, [2], 3))) FROM numbers(2); diff --git a/parser/testdata/02534_s3_cluster_insert_select_schema_inference/ast.json b/parser/testdata/02534_s3_cluster_insert_select_schema_inference/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02534_s3_cluster_insert_select_schema_inference/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02534_s3_cluster_insert_select_schema_inference/metadata.json b/parser/testdata/02534_s3_cluster_insert_select_schema_inference/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02534_s3_cluster_insert_select_schema_inference/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02534_s3_cluster_insert_select_schema_inference/query.sql b/parser/testdata/02534_s3_cluster_insert_select_schema_inference/query.sql new file mode 100644 index 000000000..41278b0c1 --- /dev/null +++ b/parser/testdata/02534_s3_cluster_insert_select_schema_inference/query.sql @@ -0,0 +1,9 @@ +-- Tags: no-fasttest +-- Tag no-fasttest: Depends on AWS + +drop table if exists test; +create table test (x UInt32, y UInt32, z UInt32) engine=Memory(); +insert into test select * from s3Cluster('test_cluster_one_shard_three_replicas_localhost', 'http://localhost:11111/test/a.tsv'); +select * from test; +drop table test; + diff --git a/parser/testdata/02534_s3_heap_use_after_free/ast.json b/parser/testdata/02534_s3_heap_use_after_free/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02534_s3_heap_use_after_free/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02534_s3_heap_use_after_free/metadata.json b/parser/testdata/02534_s3_heap_use_after_free/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02534_s3_heap_use_after_free/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02534_s3_heap_use_after_free/query.sql b/parser/testdata/02534_s3_heap_use_after_free/query.sql new file mode 100644 index 000000000..b9f815e5a --- /dev/null +++ b/parser/testdata/02534_s3_heap_use_after_free/query.sql @@ -0,0 +1,5 @@ +-- Tags: no-fasttest +-- Tag no-fasttest: Depends on AWS + +select * from s3('http://localhost:11111/test/a.tsv', CustomSeparated); + diff --git a/parser/testdata/02535_analyzer_group_by_use_nulls/ast.json b/parser/testdata/02535_analyzer_group_by_use_nulls/ast.json new file mode 100644 index 000000000..4d7389516 --- /dev/null +++ b/parser/testdata/02535_analyzer_group_by_use_nulls/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001129359, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02535_analyzer_group_by_use_nulls/metadata.json b/parser/testdata/02535_analyzer_group_by_use_nulls/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02535_analyzer_group_by_use_nulls/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02535_analyzer_group_by_use_nulls/query.sql b/parser/testdata/02535_analyzer_group_by_use_nulls/query.sql new file mode 100644 index 000000000..ca89db9c1 --- /dev/null +++ b/parser/testdata/02535_analyzer_group_by_use_nulls/query.sql @@ -0,0 +1,130 @@ +SET enable_analyzer=1; + +-- { echoOn } +SELECT number, number % 2, sum(number) AS val +FROM numbers(10) +GROUP BY ROLLUP(number, number % 2) +ORDER BY (number, number % 2, val) +SETTINGS group_by_use_nulls=1; + +set optimize_group_by_function_keys = 0; + +SELECT number, number % 2, sum(number) AS val +FROM numbers(10) +GROUP BY ROLLUP(number, number % 2) +ORDER BY (number, number % 2, val) +SETTINGS group_by_use_nulls=1; + +SELECT number, number % 2, sum(number) AS val +FROM numbers(10) +GROUP BY ROLLUP(number, number % 2) +ORDER BY (number, number % 2, val) +SETTINGS group_by_use_nulls=0; + +SELECT number, number % 2, sum(number) AS val +FROM numbers(10) +GROUP BY CUBE(number, number % 2) +ORDER BY (number, number % 2, val) +SETTINGS group_by_use_nulls=1; + +SELECT number, number % 2, sum(number) AS val +FROM numbers(10) +GROUP BY CUBE(number, number % 2) +ORDER BY (number, number % 2, val) +SETTINGS group_by_use_nulls=0; + +SELECT + number, + number % 2, + sum(number) AS val +FROM numbers(10) +GROUP BY + GROUPING SETS ( + (number), + (number % 2) + ) +ORDER BY (number, number % 2, val) +SETTINGS group_by_use_nulls = 1; + +SELECT + number, + number % 2, + sum(number) AS val +FROM numbers(10) +GROUP BY + GROUPING SETS ( + (number), + (number % 2) + ) +ORDER BY (number, number % 2, val) +SETTINGS group_by_use_nulls = 0; + +SELECT number, number % 2, sum(number) AS val +FROM numbers(10) +GROUP BY ROLLUP(number, number % 2) WITH TOTALS +ORDER BY (number, number % 2, val) +SETTINGS group_by_use_nulls=1; + +SELECT number, number % 2, sum(number) AS val +FROM numbers(10) +GROUP BY CUBE(number, number % 2) WITH TOTALS +ORDER BY (number, number % 2, val) +SETTINGS group_by_use_nulls=1; + +SELECT + number, + number % 2, + sum(number) AS val +FROM numbers(10) +GROUP BY + GROUPING SETS ( + (number), + (number % 2) + ) +ORDER BY 1, tuple(val) +SETTINGS group_by_use_nulls = 1, max_bytes_before_external_sort=10, max_bytes_ratio_before_external_sort=0; + +CREATE TABLE test +ENGINE = ReplacingMergeTree +PRIMARY KEY id +AS SELECT number AS id FROM numbers(100); + +SELECT id +FROM test +GROUP BY id + WITH CUBE +HAVING id IN ( + SELECT id + FROM test +) +FORMAT `NUll` +SETTINGS enable_analyzer = 1, group_by_use_nulls = true; + +SELECT id +FROM test +FINAL +GROUP BY id + WITH CUBE +HAVING id IN ( + SELECT DISTINCT id + FROM test + FINAL +) +FORMAT `NUll` +SETTINGS enable_analyzer = 1, group_by_use_nulls = true; + +SELECT id +FROM test +FINAL +GROUP BY + GROUPING SETS ((id)) +ORDER BY + id IN ( + SELECT DISTINCT id + FROM test + FINAL + LIMIT 4 + ) ASC +LIMIT 256 BY id +FORMAT `NUll` +SETTINGS enable_analyzer = 1, group_by_use_nulls=true; diff --git a/parser/testdata/02535_analyzer_limit_offset/ast.json b/parser/testdata/02535_analyzer_limit_offset/ast.json new file mode 100644 index 000000000..bf2ae5b15 --- /dev/null +++ b/parser/testdata/02535_analyzer_limit_offset/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000844992, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02535_analyzer_limit_offset/metadata.json b/parser/testdata/02535_analyzer_limit_offset/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02535_analyzer_limit_offset/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02535_analyzer_limit_offset/query.sql b/parser/testdata/02535_analyzer_limit_offset/query.sql new file mode 100644 index 000000000..96aef9557 --- /dev/null +++ b/parser/testdata/02535_analyzer_limit_offset/query.sql @@ -0,0 +1,3 @@ +SET enable_analyzer = 1; + +SELECT number FROM numbers(100) LIMIT 10 OFFSET 10; diff --git a/parser/testdata/02535_ip_parser_not_whole/ast.json b/parser/testdata/02535_ip_parser_not_whole/ast.json new file mode 100644 index 000000000..e238a29e2 --- /dev/null +++ b/parser/testdata/02535_ip_parser_not_whole/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function format (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier CSVWithNamesAndTypes" + }, + { + "explain": " Literal 'ip,port\\nIPv6,UInt16\\n::1,42\\n'" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001436187, + "rows_read": 12, + "bytes_read": 491 + } +} diff --git a/parser/testdata/02535_ip_parser_not_whole/metadata.json b/parser/testdata/02535_ip_parser_not_whole/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02535_ip_parser_not_whole/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02535_ip_parser_not_whole/query.sql b/parser/testdata/02535_ip_parser_not_whole/query.sql new file mode 100644 index 000000000..675707d19 --- /dev/null +++ b/parser/testdata/02535_ip_parser_not_whole/query.sql @@ -0,0 +1,3 @@ +SELECT * FROM format(CSVWithNamesAndTypes, 'ip,port\nIPv6,UInt16\n::1,42\n'); +SELECT * FROM format(TSVWithNamesAndTypes, 'ip\tport\nIPv6\tUInt16\n::1\t42\n'); +SELECT * FROM format(JSONCompactEachRowWithNamesAndTypes, '["ip","port"]\n["IPv6","UInt16"]\n["::1",42]\n'); diff --git a/parser/testdata/02536_date_from_number_inference_fix/ast.json b/parser/testdata/02536_date_from_number_inference_fix/ast.json new file mode 100644 index 000000000..499efc35f --- /dev/null +++ b/parser/testdata/02536_date_from_number_inference_fix/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001332623, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02536_date_from_number_inference_fix/metadata.json b/parser/testdata/02536_date_from_number_inference_fix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02536_date_from_number_inference_fix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02536_date_from_number_inference_fix/query.sql b/parser/testdata/02536_date_from_number_inference_fix/query.sql new file mode 100644 index 000000000..b32ea4ebe --- /dev/null +++ b/parser/testdata/02536_date_from_number_inference_fix/query.sql @@ -0,0 +1,5 @@ +set input_format_json_try_infer_numbers_from_strings=1; +desc format(JSONEachRow, '{"x" : "20000101"}'); +select * from format(JSONEachRow, '{"x" : "20000101"}'); +select * from format(JSONEachRow, '{"x" : "19000101"}'); + diff --git a/parser/testdata/02536_delta_gorilla_corruption/ast.json b/parser/testdata/02536_delta_gorilla_corruption/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02536_delta_gorilla_corruption/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02536_delta_gorilla_corruption/metadata.json b/parser/testdata/02536_delta_gorilla_corruption/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02536_delta_gorilla_corruption/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02536_delta_gorilla_corruption/query.sql b/parser/testdata/02536_delta_gorilla_corruption/query.sql new file mode 100644 index 000000000..3accc726d --- /dev/null +++ b/parser/testdata/02536_delta_gorilla_corruption/query.sql @@ -0,0 +1,39 @@ +-- Tags: no-asan +-- no-asan: the flaky check complains that the test sometimes runs > 60 sec on asan builds + +set allow_suspicious_codecs=1; + +select 'Original bug: the same query executed multiple times yielded different results.'; +select 'For unclear reasons this happened only in Release builds, not in Debug builds.'; + +drop table if exists bug_delta_gorilla; + +create table bug_delta_gorilla +(value_bug UInt64 codec (Delta, Gorilla)) +engine = MergeTree +order by tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi' +as (select 0 from numbers(20000000)); + +select count(*) +from bug_delta_gorilla +where 0 <> value_bug; + +select count(*) +from bug_delta_gorilla +where 0 <> value_bug; + +select count(*) +from bug_delta_gorilla +where 0 <> value_bug; + +drop table if exists bug_delta_gorilla; + +select 'The same issue in a much smaller repro happens also in Debug builds'; + +create table bug_delta_gorilla (val UInt64 codec (Delta, Gorilla)) +engine = MergeTree +order by val SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +insert into bug_delta_gorilla values (0)(1)(3); +select * from bug_delta_gorilla; + +drop table if exists bug_delta_gorilla; diff --git a/parser/testdata/02536_distributed_detach_table/ast.json b/parser/testdata/02536_distributed_detach_table/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02536_distributed_detach_table/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02536_distributed_detach_table/metadata.json b/parser/testdata/02536_distributed_detach_table/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02536_distributed_detach_table/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02536_distributed_detach_table/query.sql b/parser/testdata/02536_distributed_detach_table/query.sql new file mode 100644 index 000000000..92bee1ee5 --- /dev/null +++ b/parser/testdata/02536_distributed_detach_table/query.sql @@ -0,0 +1,16 @@ +-- test detach distributed table with pending files +CREATE TABLE test_02536 (n Int8) ENGINE=MergeTree() ORDER BY tuple(); +CREATE TABLE test_dist_02536 (n Int8) ENGINE=Distributed(test_cluster_two_shards, currentDatabase(), test_02536, rand()); +SYSTEM STOP DISTRIBUTED SENDS test_dist_02536; + +INSERT INTO test_dist_02536 SELECT number FROM numbers(5) SETTINGS prefer_localhost_replica=0; +SELECT count(n), sum(n) FROM test_dist_02536; -- 0 0 + +DETACH TABLE test_dist_02536; +ATTACH TABLE test_dist_02536; + +SYSTEM FLUSH DISTRIBUTED test_dist_02536; + +SELECT count(n), sum(n) FROM test_dist_02536; -- 10 20 +DROP TABLE test_02536; +DROP TABLE test_dist_02536; diff --git a/parser/testdata/02536_replace_with_nonconst_needle_and_replacement/ast.json b/parser/testdata/02536_replace_with_nonconst_needle_and_replacement/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02536_replace_with_nonconst_needle_and_replacement/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02536_replace_with_nonconst_needle_and_replacement/metadata.json b/parser/testdata/02536_replace_with_nonconst_needle_and_replacement/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02536_replace_with_nonconst_needle_and_replacement/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02536_replace_with_nonconst_needle_and_replacement/query.sql b/parser/testdata/02536_replace_with_nonconst_needle_and_replacement/query.sql new file mode 100644 index 000000000..26473521b --- /dev/null +++ b/parser/testdata/02536_replace_with_nonconst_needle_and_replacement/query.sql @@ -0,0 +1,99 @@ +-- Tests that functions replaceOne(), replaceAll(), replaceRegexpOne(), replaceRegexpAll() work with with non-const pattern and replacement arguments + +DROP TABLE IF EXISTS test_tab; + +CREATE TABLE test_tab + (id UInt32, haystack String, needle String, replacement String) + engine = MergeTree() + ORDER BY id; + +INSERT INTO test_tab VALUES (1, 'Hello World', 'l', 'xx') (2, 'Hello World', 'll', 'x') (3, 'Hello World', 'not_found', 'x') (4, 'Hello World', '[eo]', 'x') (5, 'Hello World', '.', 'x'); + + +SELECT '** replaceAll() **'; + +SELECT '- non-const needle, const replacement'; +SELECT id, haystack, needle, 'x', replaceAll(haystack, needle, 'x') FROM test_tab ORDER BY id; +SELECT id, haystack, needle, 'x', replaceAll('Hello World', needle, 'x') FROM test_tab ORDER BY id; + +SELECT '- const needle, non-const replacement'; +SELECT id, haystack, 'l', replacement, replaceAll(haystack, 'l', replacement) FROM test_tab ORDER BY id; +SELECT id, haystack, 'l', replacement, replaceAll('Hello World', 'l', replacement) FROM test_tab ORDER BY id; + +SELECT '- non-const needle, non-const replacement'; +SELECT id, haystack, needle, replacement, replaceAll(haystack, needle, replacement) FROM test_tab ORDER BY id; +SELECT id, haystack, needle, replacement, replaceAll('Hello World', needle, replacement) FROM test_tab ORDER BY id; + + +SELECT '** replaceOne() **'; + +SELECT '- non-const needle, const replacement'; +SELECT id, haystack, needle, 'x', replaceOne(haystack, needle, 'x') FROM test_tab ORDER BY id; +SELECT id, haystack, needle, 'x', replaceOne('Hello World', needle, 'x') FROM test_tab ORDER BY id; + +SELECT '- const needle, non-const replacement'; +SELECT id, haystack, 'l', replacement, replaceOne(haystack, 'l', replacement) FROM test_tab ORDER BY id; +SELECT id, haystack, 'l', replacement, replaceOne('Hello World', 'l', replacement) FROM test_tab ORDER BY id; + +SELECT '- non-const needle, non-const replacement'; +SELECT id, haystack, needle, replacement, replaceOne(haystack, needle, replacement) FROM test_tab ORDER BY id; +SELECT id, haystack, needle, replacement, replaceOne('Hello World', needle, replacement) FROM test_tab ORDER BY id; + +SELECT '** replaceRegexpAll() **'; + +SELECT '- non-const needle, const replacement'; +SELECT id, haystack, needle, 'x', replaceRegexpAll(haystack, needle, 'x') FROM test_tab ORDER BY id; +SELECT id, haystack, needle, 'x', replaceRegexpAll('Hello World', needle, 'x') FROM test_tab ORDER BY id; + +SELECT '- const needle, non-const replacement'; +SELECT id, haystack, 'l', replacement, replaceRegexpAll(haystack, 'l', replacement) FROM test_tab ORDER BY id; +SELECT id, haystack, 'l', replacement, replaceRegexpAll('Hello World', 'l', replacement) FROM test_tab ORDER BY id; + +SELECT '- non-const needle, non-const replacement'; +SELECT id, haystack, needle, replacement, replaceRegexpAll(haystack, needle, replacement) FROM test_tab ORDER BY id; +SELECT id, haystack, needle, replacement, replaceRegexpAll('Hello World', needle, replacement) FROM test_tab ORDER BY id; + +SELECT '** replaceRegexpOne() **'; + +SELECT '- non-const needle, const replacement'; +SELECT id, haystack, needle, 'x', replaceRegexpOne(haystack, needle, 'x') FROM test_tab ORDER BY id; +SELECT id, haystack, needle, 'x', replaceRegexpOne('Hello World', needle, 'x') FROM test_tab ORDER BY id; + +SELECT '- const needle, non-const replacement'; +SELECT id, haystack, 'l', replacement, replaceRegexpOne(haystack, 'l', replacement) FROM test_tab ORDER BY id; +SELECT id, haystack, 'l', replacement, replaceRegexpOne('Hello World', 'l', replacement) FROM test_tab ORDER BY id; + +SELECT '- non-const needle, non-const replacement'; +SELECT id, haystack, needle, replacement, replaceRegexpOne(haystack, needle, replacement) FROM test_tab ORDER BY id; +SELECT id, haystack, needle, replacement, replaceRegexpOne('Hello World', needle, replacement) FROM test_tab ORDER BY id; + +DROP TABLE IF EXISTS test_tab; + +SELECT 'Empty needles do not throw an exception'; + +CREATE TABLE test_tab + (id UInt32, haystack String, needle String, replacement String) + engine = MergeTree() + ORDER BY id; + +INSERT INTO test_tab VALUES (1, 'Hello World', 'l', 'x') (2, 'Hello World', '', 'y'); + +SELECT '- non-const needle, const replacement'; +SELECT replaceAll(haystack, needle, 'x') FROM test_tab; +SELECT replaceOne(haystack, needle, 'x') FROM test_tab; +SELECT replaceRegexpAll(haystack, needle, 'x') FROM test_tab; +SELECT replaceRegexpOne(haystack, needle, 'x') FROM test_tab; + +SELECT '- const needle, non-const replacement'; +SELECT replaceAll(haystack, '', replacement) FROM test_tab; +SELECT replaceOne(haystack, '', replacement) FROM test_tab; +SELECT replaceRegexpAll(haystack, '', replacement) FROM test_tab; +SELECT replaceRegexpOne(haystack, '', replacement) FROM test_tab; + +SELECT '- non-const needle, non-const replacement'; +SELECT replaceAll(haystack, needle, replacement) FROM test_tab; +SELECT replaceOne(haystack, needle, replacement) FROM test_tab; +SELECT replaceRegexpAll(haystack, needle, replacement) FROM test_tab; +SELECT replaceRegexpOne(haystack, needle, replacement) FROM test_tab; + +DROP TABLE IF EXISTS test_tab; diff --git a/parser/testdata/02536_system_sync_file_cache/ast.json b/parser/testdata/02536_system_sync_file_cache/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02536_system_sync_file_cache/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02536_system_sync_file_cache/metadata.json b/parser/testdata/02536_system_sync_file_cache/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02536_system_sync_file_cache/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02536_system_sync_file_cache/query.sql b/parser/testdata/02536_system_sync_file_cache/query.sql new file mode 100644 index 000000000..8a5ee31ec --- /dev/null +++ b/parser/testdata/02536_system_sync_file_cache/query.sql @@ -0,0 +1,3 @@ +-- Tags: no-fasttest, no-parallel +-- no-fasttest: Will perform 'sync' syscall (it can take time) +system sync file cache; diff --git a/parser/testdata/02537_system_formats/ast.json b/parser/testdata/02537_system_formats/ast.json new file mode 100644 index 000000000..5a66c6dc3 --- /dev/null +++ b/parser/testdata/02537_system_formats/ast.json @@ -0,0 +1,70 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.formats" + }, + { + "explain": " Function in (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier name" + }, + { + "explain": " Literal Tuple_('CSV', 'Native')" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier name" + } + ], + + "rows": 16, + + "statistics": + { + "elapsed": 0.001206885, + "rows_read": 16, + "bytes_read": 599 + } +} diff --git a/parser/testdata/02537_system_formats/metadata.json b/parser/testdata/02537_system_formats/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02537_system_formats/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02537_system_formats/query.sql b/parser/testdata/02537_system_formats/query.sql new file mode 100644 index 000000000..7a09daf32 --- /dev/null +++ b/parser/testdata/02537_system_formats/query.sql @@ -0,0 +1 @@ +SELECT * FROM system.formats WHERE name IN ('CSV', 'Native') ORDER BY name; diff --git a/parser/testdata/02538_alter_rename_sequence/ast.json b/parser/testdata/02538_alter_rename_sequence/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02538_alter_rename_sequence/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02538_alter_rename_sequence/metadata.json b/parser/testdata/02538_alter_rename_sequence/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02538_alter_rename_sequence/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02538_alter_rename_sequence/query.sql b/parser/testdata/02538_alter_rename_sequence/query.sql new file mode 100644 index 000000000..d5e987951 --- /dev/null +++ b/parser/testdata/02538_alter_rename_sequence/query.sql @@ -0,0 +1,61 @@ +-- Tags: no-shared-merge-tree +-- Stop replication queues +DROP TABLE IF EXISTS wrong_metadata; + +CREATE TABLE wrong_metadata( + column1 UInt64, + column2 UInt64, + column3 UInt64 +) +ENGINE ReplicatedMergeTree('/test/{database}/tables/wrong_metadata', '1') +ORDER BY tuple(); + +INSERT INTO wrong_metadata VALUES (1, 2, 3); + +SYSTEM STOP REPLICATION QUEUES wrong_metadata; + +ALTER TABLE wrong_metadata RENAME COLUMN column1 TO column1_renamed SETTINGS replication_alter_partitions_sync = 0; + +INSERT INTO wrong_metadata VALUES (4, 5, 6); + +SELECT * FROM wrong_metadata ORDER BY column1; + +SYSTEM START REPLICATION QUEUES wrong_metadata; + +SYSTEM SYNC REPLICA wrong_metadata; + +ALTER TABLE wrong_metadata RENAME COLUMN column2 to column2_renamed SETTINGS replication_alter_partitions_sync = 2; + +SELECT * FROM wrong_metadata ORDER BY column1_renamed FORMAT JSONEachRow; + +DROP TABLE IF EXISTS wrong_metadata; + + +CREATE TABLE wrong_metadata_wide( + column1 UInt64, + column2 UInt64, + column3 UInt64 +) +ENGINE ReplicatedMergeTree('/test/{database}/tables/wrong_metadata_wide', '1') +ORDER BY tuple() +SETTINGS min_bytes_for_wide_part = 0; + +INSERT INTO wrong_metadata_wide VALUES (1, 2, 3); + +SYSTEM STOP REPLICATION QUEUES wrong_metadata_wide; + +ALTER TABLE wrong_metadata_wide RENAME COLUMN column1 TO column1_renamed SETTINGS replication_alter_partitions_sync = 0; + +INSERT INTO wrong_metadata_wide VALUES (4, 5, 6); + +SELECT * FROM wrong_metadata_wide ORDER by column1; + +SYSTEM START REPLICATION QUEUES wrong_metadata_wide; + +SYSTEM SYNC REPLICA wrong_metadata_wide; + +ALTER TABLE wrong_metadata_wide RENAME COLUMN column2 to column2_renamed SETTINGS replication_alter_partitions_sync = 2; + +SELECT * FROM wrong_metadata_wide ORDER BY column1_renamed FORMAT JSONEachRow; + +DROP TABLE IF EXISTS wrong_metadata_wide; diff --git a/parser/testdata/02538_analyzer_create_table_as_select/ast.json b/parser/testdata/02538_analyzer_create_table_as_select/ast.json new file mode 100644 index 000000000..85e9748da --- /dev/null +++ b/parser/testdata/02538_analyzer_create_table_as_select/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001539211, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02538_analyzer_create_table_as_select/metadata.json b/parser/testdata/02538_analyzer_create_table_as_select/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02538_analyzer_create_table_as_select/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02538_analyzer_create_table_as_select/query.sql b/parser/testdata/02538_analyzer_create_table_as_select/query.sql new file mode 100644 index 000000000..16634e996 --- /dev/null +++ b/parser/testdata/02538_analyzer_create_table_as_select/query.sql @@ -0,0 +1,18 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS test_table_data; +CREATE TABLE test_table_data +( + id UInt64, + value String +) ENGINE=MergeTree() ORDER BY id; + +INSERT INTO test_table_data VALUES (0, 'Value'); + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table ENGINE=MergeTree() ORDER BY tuple() AS SELECT * FROM test_table_data; + +SELECT * FROM test_table; + +DROP TABLE test_table_data; +DROP TABLE test_table; diff --git a/parser/testdata/02538_ngram_bf_index_with_null/ast.json b/parser/testdata/02538_ngram_bf_index_with_null/ast.json new file mode 100644 index 000000000..eaa55e6ea --- /dev/null +++ b/parser/testdata/02538_ngram_bf_index_with_null/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery 02538_bf_ngrambf_map_values_test (children 1)" + }, + { + "explain": " Identifier 02538_bf_ngrambf_map_values_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001411937, + "rows_read": 2, + "bytes_read": 116 + } +} diff --git a/parser/testdata/02538_ngram_bf_index_with_null/metadata.json b/parser/testdata/02538_ngram_bf_index_with_null/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02538_ngram_bf_index_with_null/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02538_ngram_bf_index_with_null/query.sql b/parser/testdata/02538_ngram_bf_index_with_null/query.sql new file mode 100644 index 000000000..b53c219ff --- /dev/null +++ b/parser/testdata/02538_ngram_bf_index_with_null/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS 02538_bf_ngrambf_map_values_test; + +CREATE TABLE 02538_bf_ngrambf_map_values_test (`row_id` Int128, `map` Map(String, String), `map_fixed` Map(FixedString(2), String), +INDEX map_values_ngrambf mapKeys(map) TYPE ngrambf_v1(4, 256, 2, 0) GRANULARITY 1, +INDEX map_fixed_values_ngrambf mapKeys(map_fixed) TYPE ngrambf_v1(4, 256, 2, 0) GRANULARITY 1) +ENGINE = MergeTree +ORDER BY row_id +SETTINGS index_granularity = 1; + +INSERT INTO 02538_bf_ngrambf_map_values_test VALUES (1, {'a': 'a'}, {'b': 'b'}); + +SELECT * FROM 02538_bf_ngrambf_map_values_test PREWHERE (map['']) = 'V2V\0V2V2V2V2V2V2' WHERE (map[NULL]) = 'V2V\0V2V2V2V2V2V2V2V\0V2V2V2V2V2V2V2V\0V2V2V2V2V2V2V2V\0V2V2V2V2V2V2' SETTINGS force_data_skipping_indices = 'map_values_ngrambf'; + +DROP TABLE 02538_bf_ngrambf_map_values_test; diff --git a/parser/testdata/02538_nullable_array_tuple_timeseries/ast.json b/parser/testdata/02538_nullable_array_tuple_timeseries/ast.json new file mode 100644 index 000000000..fd85559ba --- /dev/null +++ b/parser/testdata/02538_nullable_array_tuple_timeseries/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tbl (children 1)" + }, + { + "explain": " Identifier tbl" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001226682, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/02538_nullable_array_tuple_timeseries/metadata.json b/parser/testdata/02538_nullable_array_tuple_timeseries/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02538_nullable_array_tuple_timeseries/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02538_nullable_array_tuple_timeseries/query.sql b/parser/testdata/02538_nullable_array_tuple_timeseries/query.sql new file mode 100644 index 000000000..26451c93e --- /dev/null +++ b/parser/testdata/02538_nullable_array_tuple_timeseries/query.sql @@ -0,0 +1,31 @@ +DROP TABLE IF EXISTS tbl; + +-- Checks that (floating-point) time series codecs can be combined +-- with Nullable and +-- with composite types Array and Tuple + +CREATE TABLE tbl ( + -- Nullable + v1_gor Nullable(Float64) CODEC(Gorilla), + v1_fpc Nullable(Float64) CODEC(FPC), + -- Array + v2_gor Array(Float64) CODEC(Gorilla), + v2_fpc Array(Float64) CODEC(FPC), + v3_gor Array(Array(Float64)) CODEC(Gorilla), + v3_fpc Array(Array(Float64)) CODEC(FPC), + v4_gor Array(Nullable(Float64)) CODEC(Gorilla), + v4_fpc Array(Nullable(Float64)) CODEC(FPC), + v5_gor Array(Tuple(Float64)) CODEC(Gorilla), + v5_fpc Array(Tuple(Float64)) CODEC(FPC), + -- Tuple + v6_gor Tuple(Float64) CODEC(Gorilla), + v6_fpc Tuple(Float64) CODEC(FPC), + v7_gor Tuple(Tuple(Float64)) CODEC(Gorilla), + v7_fpc Tuple(Tuple(Float64)) CODEC(FPC), + v8_gor Tuple(Nullable(Float64)) CODEC(Gorilla), + v8_fpc Tuple(Nullable(Float64)) CODEC(FPC), + v9_gor Tuple(Array(Float64)) CODEC(Gorilla), + v9_fpc Tuple(Array(Float64)) CODEC(FPC), +) Engine = MergeTree ORDER BY tuple(); + +DROP TABLE IF EXISTS tbl; diff --git a/parser/testdata/02539_generate_random_ip/ast.json b/parser/testdata/02539_generate_random_ip/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02539_generate_random_ip/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02539_generate_random_ip/metadata.json b/parser/testdata/02539_generate_random_ip/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02539_generate_random_ip/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02539_generate_random_ip/query.sql b/parser/testdata/02539_generate_random_ip/query.sql new file mode 100644 index 000000000..597b3a5be --- /dev/null +++ b/parser/testdata/02539_generate_random_ip/query.sql @@ -0,0 +1,2 @@ +-- Check that the function works for Ipv4 and Ipv6 and gives at least something plausible: +SELECT uniq(v4) > 1000, uniq(v6) > 1000 FROM (SELECT * FROM generateRandom('v4 IPv4, v6 IPv6') LIMIT 100000); diff --git a/parser/testdata/02539_generate_random_low_cardinality/ast.json b/parser/testdata/02539_generate_random_low_cardinality/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02539_generate_random_low_cardinality/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02539_generate_random_low_cardinality/metadata.json b/parser/testdata/02539_generate_random_low_cardinality/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02539_generate_random_low_cardinality/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02539_generate_random_low_cardinality/query.sql b/parser/testdata/02539_generate_random_low_cardinality/query.sql new file mode 100644 index 000000000..c524d2ea5 --- /dev/null +++ b/parser/testdata/02539_generate_random_low_cardinality/query.sql @@ -0,0 +1,2 @@ +-- Check that the function works for LowCardinality and gives at least something plausible: +SELECT uniq(x) > 1000 FROM (SELECT * FROM generateRandom('x Array(LowCardinality(Nullable(String)))') LIMIT 100000); diff --git a/parser/testdata/02539_generate_random_map/ast.json b/parser/testdata/02539_generate_random_map/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02539_generate_random_map/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02539_generate_random_map/metadata.json b/parser/testdata/02539_generate_random_map/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02539_generate_random_map/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02539_generate_random_map/query.sql b/parser/testdata/02539_generate_random_map/query.sql new file mode 100644 index 000000000..a4b25ea30 --- /dev/null +++ b/parser/testdata/02539_generate_random_map/query.sql @@ -0,0 +1,17 @@ +-- Check that max length works + +SELECT max(length(mapKeys(a))) +FROM +( + SELECT a + FROM generateRandom('a Map(String, String)', 20, 5, 1) + LIMIT 1000 +); + +SELECT max(length(mapKeys(a))) +FROM +( + SELECT a + FROM generateRandom('a Map(String, String)', 20, 5, 20) + LIMIT 1000 +); diff --git a/parser/testdata/02539_vertical_merge_compact_parts/ast.json b/parser/testdata/02539_vertical_merge_compact_parts/ast.json new file mode 100644 index 000000000..a21c8e2f5 --- /dev/null +++ b/parser/testdata/02539_vertical_merge_compact_parts/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_compact_vertical_merge (children 1)" + }, + { + "explain": " Identifier t_compact_vertical_merge" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001419872, + "rows_read": 2, + "bytes_read": 100 + } +} diff --git a/parser/testdata/02539_vertical_merge_compact_parts/metadata.json b/parser/testdata/02539_vertical_merge_compact_parts/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02539_vertical_merge_compact_parts/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02539_vertical_merge_compact_parts/query.sql b/parser/testdata/02539_vertical_merge_compact_parts/query.sql new file mode 100644 index 000000000..6cb9faeb5 --- /dev/null +++ b/parser/testdata/02539_vertical_merge_compact_parts/query.sql @@ -0,0 +1,45 @@ +DROP TABLE IF EXISTS t_compact_vertical_merge; + +CREATE TABLE t_compact_vertical_merge (id UInt64, s LowCardinality(String), arr Array(UInt64)) +ENGINE MergeTree ORDER BY id +SETTINGS + index_granularity = 16, + min_bytes_for_wide_part = 0, + min_rows_for_wide_part = 100, + vertical_merge_algorithm_min_rows_to_activate = 1, + vertical_merge_algorithm_min_columns_to_activate = 1, + allow_vertical_merges_from_compact_to_wide_parts = 1, + min_bytes_for_full_part_storage = 0; + +INSERT INTO t_compact_vertical_merge SELECT number, toString(number), range(number % 10) FROM numbers(40); +INSERT INTO t_compact_vertical_merge SELECT number, toString(number), range(number % 10) FROM numbers(40); + +OPTIMIZE TABLE t_compact_vertical_merge FINAL; +SYSTEM FLUSH LOGS part_log; + +WITH splitByChar('_', part_name) AS name_parts, + name_parts[2]::UInt64 AS min_block, + name_parts[3]::UInt64 AS max_block +SELECT min_block, max_block, event_type, merge_algorithm, part_type FROM system.part_log +WHERE + database = currentDatabase() AND + table = 't_compact_vertical_merge' AND + min_block = 1 AND max_block = 2 +ORDER BY event_time_microseconds; + +INSERT INTO t_compact_vertical_merge SELECT number, toString(number), range(number % 10) FROM numbers(40); + +OPTIMIZE TABLE t_compact_vertical_merge FINAL; +SYSTEM FLUSH LOGS part_log; + +WITH splitByChar('_', part_name) AS name_parts, + name_parts[2]::UInt64 AS min_block, + name_parts[3]::UInt64 AS max_block +SELECT min_block, max_block, event_type, merge_algorithm, part_type FROM system.part_log +WHERE + database = currentDatabase() AND + table = 't_compact_vertical_merge' AND + min_block = 1 AND max_block = 3 +ORDER BY event_time_microseconds; + +DROP TABLE t_compact_vertical_merge; diff --git a/parser/testdata/02540_analyzer_matcher_alias_materialized_columns/ast.json b/parser/testdata/02540_analyzer_matcher_alias_materialized_columns/ast.json new file mode 100644 index 000000000..53929658f --- /dev/null +++ b/parser/testdata/02540_analyzer_matcher_alias_materialized_columns/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00149705, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02540_analyzer_matcher_alias_materialized_columns/metadata.json b/parser/testdata/02540_analyzer_matcher_alias_materialized_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02540_analyzer_matcher_alias_materialized_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02540_analyzer_matcher_alias_materialized_columns/query.sql b/parser/testdata/02540_analyzer_matcher_alias_materialized_columns/query.sql new file mode 100644 index 000000000..58840796c --- /dev/null +++ b/parser/testdata/02540_analyzer_matcher_alias_materialized_columns/query.sql @@ -0,0 +1,33 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value_alias ALIAS concat('AliasValue_', toString(id)), + value_materialized MATERIALIZED concat('MaterializedValue_', toString(id)) +) ENGINE=MergeTree ORDER BY id; + +INSERT INTO test_table VALUES (0); + +-- { echoOn } + +SELECT * FROM test_table AS test_table_alias; + +SELECT test_table_alias.* FROM test_table AS test_table_alias; + +SELECT * FROM test_table AS test_table_alias SETTINGS asterisk_include_alias_columns = 1; + +SELECT test_table_alias.* FROM test_table AS test_table_alias SETTINGS asterisk_include_alias_columns = 1; + +SELECT * FROM test_table AS test_table_alias SETTINGS asterisk_include_materialized_columns = 1; + +SELECT test_table_alias.* FROM test_table AS test_table_alias SETTINGS asterisk_include_materialized_columns = 1; + +SELECT * FROM test_table AS test_table_alias SETTINGS asterisk_include_alias_columns = 1, asterisk_include_materialized_columns = 1; + +SELECT test_table_alias.* FROM test_table AS test_table_alias SETTINGS asterisk_include_alias_columns = 1, asterisk_include_materialized_columns = 1; + +-- { echoOff } + +DROP TABLE test_table; diff --git a/parser/testdata/02540_date_column_consistent_insert_behaviour/ast.json b/parser/testdata/02540_date_column_consistent_insert_behaviour/ast.json new file mode 100644 index 000000000..91ce20a01 --- /dev/null +++ b/parser/testdata/02540_date_column_consistent_insert_behaviour/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery 02540_date (children 1)" + }, + { + "explain": " Identifier 02540_date" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001140568, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/02540_date_column_consistent_insert_behaviour/metadata.json b/parser/testdata/02540_date_column_consistent_insert_behaviour/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02540_date_column_consistent_insert_behaviour/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02540_date_column_consistent_insert_behaviour/query.sql b/parser/testdata/02540_date_column_consistent_insert_behaviour/query.sql new file mode 100644 index 000000000..8a5c88fca --- /dev/null +++ b/parser/testdata/02540_date_column_consistent_insert_behaviour/query.sql @@ -0,0 +1,99 @@ +DROP TABLE IF EXISTS 02540_date; +CREATE TABLE 02540_date (txt String, x Date) engine=Memory; + +-- Date: Supported range of values: [1970-01-01, 2149-06-06]. +-- ^----closed interval---^ + +INSERT INTO 02540_date VALUES('65535', 65535); +INSERT INTO 02540_date VALUES('toUInt16(65535)', toUInt16(65535)); -- #43370 weird one -> used to be 1970-01-01 +INSERT INTO 02540_date VALUES('toInt32(65535)', toInt32(65535)); +INSERT INTO 02540_date VALUES('toUInt32(65535)', toUInt32(65535)); +INSERT INTO 02540_date VALUES('toDate(65535)', toDate(65535)); + +INSERT INTO 02540_date VALUES('CAST(65535 as UInt16)', CAST(65535 as UInt16)); +INSERT INTO 02540_date VALUES('CAST(65535 as Int32)', CAST(65535 as Int32)); +INSERT INTO 02540_date VALUES('CAST(65535 as UInt32)', CAST(65535 as UInt32)); +INSERT INTO 02540_date VALUES('CAST(65535 as Date)', CAST(65535 as Date)); + +INSERT INTO 02540_date VALUES('65534', 65534); +INSERT INTO 02540_date VALUES('toUInt16(65534)', toUInt16(65534)); +INSERT INTO 02540_date VALUES('toInt32(65534)', toInt32(65534)); +INSERT INTO 02540_date VALUES('toUInt32(65534)', toUInt32(65534)); +INSERT INTO 02540_date VALUES('toDate(65534)', toDate(65534)); + +INSERT INTO 02540_date VALUES('CAST(65534 as UInt16)', CAST(65534 as UInt16)); +INSERT INTO 02540_date VALUES('CAST(65534 as Int32)', CAST(65534 as Int32)); +INSERT INTO 02540_date VALUES('CAST(65534 as UInt32)', CAST(65534 as UInt32)); +INSERT INTO 02540_date VALUES('CAST(65534 as Date)', CAST(65534 as Date)); + +INSERT INTO 02540_date VALUES('0', 0); +INSERT INTO 02540_date VALUES('toUInt16(0)', toUInt16(0)); +INSERT INTO 02540_date VALUES('toInt32(0)', toInt32(0)); +INSERT INTO 02540_date VALUES('toUInt32(0)', toUInt32(0)); +INSERT INTO 02540_date VALUES('toDate(0)', toDate(0)); + +INSERT INTO 02540_date VALUES('CAST(0 as UInt16)', CAST(0 as UInt16)); +INSERT INTO 02540_date VALUES('CAST(0 as Int32)', CAST(0 as Int32)); +INSERT INTO 02540_date VALUES('CAST(0 as UInt32)', CAST(0 as UInt32)); +INSERT INTO 02540_date VALUES('CAST(0 as Date)', CAST(0 as Date)); + + +-- 65536 will be done using the TZ settings (comments in #45914) +-- We can expect either 1970-01-01 or 1970-01-02 +-- time_zone.toDayNum(std::min(time_t(from), time_t(0xFFFFFFFF))) +INSERT INTO 02540_date VALUES('65536', 65536); +INSERT INTO 02540_date VALUES('toUInt16(65536)', toUInt16(65536)); -- Narrowing conversion 65536 ==> 0 +INSERT INTO 02540_date VALUES('toInt32(65536)', toInt32(65536)); +INSERT INTO 02540_date VALUES('toUInt32(65536)', toUInt32(65536)); +INSERT INTO 02540_date VALUES('toDate(65536)', toDate(65536)); + +INSERT INTO 02540_date VALUES('CAST(65536 as UInt16)', CAST(65536 as UInt16)); -- Narrowing conversion 65536 ==> 0 +INSERT INTO 02540_date VALUES('CAST(65536 as Int32)', CAST(65536 as Int32)); +INSERT INTO 02540_date VALUES('CAST(65536 as UInt32)', CAST(65536 as UInt32)); +INSERT INTO 02540_date VALUES('CAST(65536 as Date)', CAST(65536 as Date)); + + +SELECT x, txt FROM 02540_date WHERE txt == '65535'; +SELECT x, txt FROM 02540_date WHERE txt == 'toUInt16(65535)'; +SELECT x, txt FROM 02540_date WHERE txt == 'toInt32(65535)'; +SELECT x, txt FROM 02540_date WHERE txt == 'toUInt32(65535)'; +SELECT x, txt FROM 02540_date WHERE txt == 'toDate(65535)'; + +SELECT x, txt FROM 02540_date WHERE txt == 'CAST(65535 as UInt16)'; +SELECT x, txt FROM 02540_date WHERE txt == 'CAST(65535 as Int32)'; +SELECT x, txt FROM 02540_date WHERE txt == 'CAST(65535 as UInt32)'; +SELECT x, txt FROM 02540_date WHERE txt == 'CAST(65535 as Date)'; + +SELECT x, txt FROM 02540_date WHERE txt == '65534'; +SELECT x, txt FROM 02540_date WHERE txt == 'toUInt16(65534)'; +SELECT x, txt FROM 02540_date WHERE txt == 'toInt32(65534)'; +SELECT x, txt FROM 02540_date WHERE txt == 'toUInt32(65534)'; +SELECT x, txt FROM 02540_date WHERE txt == 'toDate(65534)'; + +SELECT x, txt FROM 02540_date WHERE txt == 'CAST(65534 as UInt16)'; +SELECT x, txt FROM 02540_date WHERE txt == 'CAST(65534 as Int32)'; +SELECT x, txt FROM 02540_date WHERE txt == 'CAST(65534 as UInt32)'; +SELECT x, txt FROM 02540_date WHERE txt == 'CAST(65534 as Date)'; + +SELECT x, txt FROM 02540_date WHERE txt == '0'; +SELECT x, txt FROM 02540_date WHERE txt == 'toUInt16(0)'; +SELECT x, txt FROM 02540_date WHERE txt == 'toInt32(0)'; +SELECT x, txt FROM 02540_date WHERE txt == 'toUInt32(0)'; +SELECT x, txt FROM 02540_date WHERE txt == 'toDate(0)'; + +SELECT x, txt FROM 02540_date WHERE txt == 'CAST(0 as UInt16)'; +SELECT x, txt FROM 02540_date WHERE txt == 'CAST(0 as Int32)'; +SELECT x, txt FROM 02540_date WHERE txt == 'CAST(0 as UInt32)'; +SELECT x, txt FROM 02540_date WHERE txt == 'CAST(0 as Date)'; + +SELECT (x == CAST(65536 as Date)), txt FROM 02540_date WHERE txt == '65536'; +SELECT (x == CAST(65536 as Date)), txt FROM 02540_date WHERE txt == 'toInt32(65536)'; +SELECT (x == CAST(65536 as Date)), txt FROM 02540_date WHERE txt == 'toUInt32(65536)'; +SELECT (x == CAST(65536 as Date)), txt FROM 02540_date WHERE txt == 'toDate(65536)'; + +SELECT (x == CAST(65536 as Date)), txt FROM 02540_date WHERE txt == 'CAST(65536 as Int32)'; +SELECT (x == CAST(65536 as Date)), txt FROM 02540_date WHERE txt == 'CAST(65536 as UInt32)'; +SELECT (x == CAST(65536 as Date)), txt FROM 02540_date WHERE txt == 'CAST(65536 as Date)'; + +SELECT x, txt FROM 02540_date WHERE txt == 'toUInt16(65536)'; +SELECT x, txt FROM 02540_date WHERE txt == 'CAST(65536 as UInt16)'; diff --git a/parser/testdata/02540_duplicate_primary_key/ast.json b/parser/testdata/02540_duplicate_primary_key/ast.json new file mode 100644 index 000000000..5b69e2c46 --- /dev/null +++ b/parser/testdata/02540_duplicate_primary_key/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001303255, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/02540_duplicate_primary_key/metadata.json b/parser/testdata/02540_duplicate_primary_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02540_duplicate_primary_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02540_duplicate_primary_key/query.sql b/parser/testdata/02540_duplicate_primary_key/query.sql new file mode 100644 index 000000000..6905c9d51 --- /dev/null +++ b/parser/testdata/02540_duplicate_primary_key/query.sql @@ -0,0 +1,89 @@ +drop table if exists test; + +set allow_suspicious_low_cardinality_types = 1; + +CREATE TABLE test +( + `coverage` DateTime, + `haunt` Nullable(Float32) CODEC(Gorilla, ZSTD(1)), + `sail` Nullable(Float32) CODEC(Gorilla, ZSTD(1)), + `empowerment_turnstile` UInt8, + `empowerment_haversack` Nullable(Int16), + `empowerment_function` Nullable(Int16), + `empowerment_guidance` Nullable(Int32), + `empowerment_high` Nullable(Int32), + `trading_id` Nullable(Int32), + `guidance` Nullable(Int32), + `empowerment_rawhide` Int32, + `memo` Nullable(Int16), + `oeuvre` Nullable(Int16), + `bun` Nullable(Int16), + `tramp` String, + `anthropology_total` Nullable(Float32), + `situation_name` String, + `timing` Nullable(String), + `NAME_cockroach` String, + `NAME_toe` String, + `business_error_methane` FixedString(110), + `business_instrumentation_methane` FixedString(15), + `market` UInt8, + `crew_memo` Nullable(Int16), + `crew_oeuvre` Nullable(Int16), + `crew_fortnight` Nullable(Int16), + `princess_memo` Nullable(Int16), + `princess_oeuvre` Nullable(Int16), + `princess_fortnight` Nullable(Int16), + `emerald` Nullable(Float32), + `cannon_crate` Nullable(String), + `thinking` String, + `SectorMen` String, + `rage_name` Nullable(String), + `DevelopmentalLigandName` String, + `chard_heavy_quadrant` UInt64, + `poster_effective` Nullable(String), + PROJECTION chrysalis_trapezium_ham + ( + SELECT + empowerment_turnstile, + toStartOfInterval(coverage, toIntervalMonth(1)), + toStartOfWeek(coverage, 10), + toStartOfInterval(coverage, toIntervalDay(1)), + NAME_toe, + NAME_cockroach, + situation_name, + memo, + oeuvre, + crew_memo, + crew_oeuvre, + bun, + sum(multiIf(crew_memo IS NULL, 0, 1)), + sum(multiIf(crew_oeuvre IS NULL, 0, 1)), + sum(multiIf(crew_fortnight IS NULL, 0, 1)), + max(toStartOfInterval(coverage, toIntervalDay(1))), + max(CAST(CAST(toStartOfInterval(coverage, toIntervalDay(1)), 'Nullable(DATE)'), 'Nullable(TIMESTAMP)')), + min(toStartOfInterval(coverage, toIntervalDay(1))), + min(CAST(CAST(toStartOfInterval(coverage, toIntervalDay(1)), 'Nullable(DATE)'), 'Nullable(TIMESTAMP)')), + count(), + sum(1) + GROUP BY + empowerment_turnstile, + toStartOfInterval(coverage, toIntervalMonth(1)), + toStartOfWeek(coverage, 10), + toStartOfInterval(coverage, toIntervalDay(1)), + empowerment_turnstile, + toStartOfInterval(coverage, toIntervalMonth(1)), + toStartOfWeek(coverage, 10), + toStartOfInterval(coverage, toIntervalDay(1)), + NAME_toe, + NAME_cockroach, + situation_name, + memo, + oeuvre, + crew_memo, + crew_oeuvre, + bun + ) +) +ENGINE = MergeTree +PARTITION BY toYYYYMM(coverage) +ORDER BY (coverage, situation_name, NAME_toe, NAME_cockroach); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/02540_duplicate_primary_key2/ast.json b/parser/testdata/02540_duplicate_primary_key2/ast.json new file mode 100644 index 000000000..91ca53b58 --- /dev/null +++ b/parser/testdata/02540_duplicate_primary_key2/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001136329, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/02540_duplicate_primary_key2/metadata.json b/parser/testdata/02540_duplicate_primary_key2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02540_duplicate_primary_key2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02540_duplicate_primary_key2/query.sql b/parser/testdata/02540_duplicate_primary_key2/query.sql new file mode 100644 index 000000000..53800c955 --- /dev/null +++ b/parser/testdata/02540_duplicate_primary_key2/query.sql @@ -0,0 +1,91 @@ +drop table if exists test; + +set allow_suspicious_low_cardinality_types = 1; + +CREATE TABLE test +( + `timestamp` DateTime, + `latitude` Nullable(Float32) CODEC(Gorilla, ZSTD(1)), + `longitude` Nullable(Float32) CODEC(Gorilla, ZSTD(1)), + `xxxx1` LowCardinality(UInt8), + `xxxx2` LowCardinality(Nullable(Int16)), + `xxxx3` LowCardinality(Nullable(Int16)), + `xxxx4` Nullable(Int32), + `xxxx5` LowCardinality(Nullable(Int32)), + `xxxx6` Nullable(Int32), + `xxxx7` Nullable(Int32), + `xxxx8` LowCardinality(Int32), + `xxxx9` LowCardinality(Nullable(Int16)), + `xxxx10` LowCardinality(Nullable(Int16)), + `xxxx11` LowCardinality(Nullable(Int16)), + `xxxx12` LowCardinality(String), + `xxxx13` Nullable(Float32), + `xxxx14` LowCardinality(String), + `xxxx15` LowCardinality(Nullable(String)), + `xxxx16` LowCardinality(String), + `xxxx17` LowCardinality(String), + `xxxx18` FixedString(19), + `xxxx19` FixedString(17), + `xxxx20` LowCardinality(UInt8), + `xxxx21` LowCardinality(Nullable(Int16)), + `xxxx22` LowCardinality(Nullable(Int16)), + `xxxx23` LowCardinality(Nullable(Int16)), + `xxxx24` LowCardinality(Nullable(Int16)), + `xxxx25` LowCardinality(Nullable(Int16)), + `xxxx26` LowCardinality(Nullable(Int16)), + `xxxx27` Nullable(Float32), + `xxxx28` LowCardinality(Nullable(String)), + `xxxx29` LowCardinality(String), + `xxxx30` LowCardinality(String), + `xxxx31` LowCardinality(Nullable(String)), + `xxxx32` UInt64, + PROJECTION cumsum_projection_simple + ( + SELECT + xxxx1, + toStartOfInterval(timestamp, toIntervalMonth(1)), + toStartOfWeek(timestamp, 8), + toStartOfInterval(timestamp, toIntervalDay(1)), + xxxx17, + xxxx16, + xxxx14, + xxxx9, + xxxx10, + xxxx21, + xxxx22, + xxxx11, + sum(multiIf(xxxx21 IS NULL, 0, 1)), + sum(multiIf(xxxx22 IS NULL, 0, 1)), + sum(multiIf(xxxx23 IS NULL, 0, 1)), + max(toStartOfInterval(timestamp, toIntervalDay(1))), + max(CAST(CAST(toStartOfInterval(timestamp, toIntervalDay(1)), 'Nullable(DATE)'), 'Nullable(TIMESTAMP)')), + min(toStartOfInterval(timestamp, toIntervalDay(1))), + min(CAST(CAST(toStartOfInterval(timestamp, toIntervalDay(1)), 'Nullable(DATE)'), 'Nullable(TIMESTAMP)')), + count(), + sum(1), + COUNTDistinct(xxxx16), + COUNTDistinct(xxxx31), + COUNTDistinct(xxxx14), + COUNTDistinct(CAST(toStartOfInterval(timestamp, toIntervalDay(1)), 'Nullable(DATE)')) + GROUP BY + xxxx1, + toStartOfInterval(timestamp, toIntervalMonth(1)), + toStartOfWeek(timestamp, 8), + toStartOfInterval(timestamp, toIntervalDay(1)), + xxxx1, + toStartOfInterval(timestamp, toIntervalMonth(1)), + toStartOfWeek(timestamp, 8), + toStartOfInterval(timestamp, toIntervalDay(1)), + xxxx17, + xxxx16, + xxxx14, + xxxx9, + xxxx10, + xxxx21, + xxxx22, + xxxx11 + ) +) +ENGINE = MergeTree +PARTITION BY toYYYYMM(timestamp) +ORDER BY (xxxx17, xxxx14, xxxx16, toStartOfDay(timestamp), left(xxxx19, 10), timestamp); -- { serverError BAD_ARGUMENTS} diff --git a/parser/testdata/02541_analyzer_grouping_sets_crash_fix/ast.json b/parser/testdata/02541_analyzer_grouping_sets_crash_fix/ast.json new file mode 100644 index 000000000..26a020712 --- /dev/null +++ b/parser/testdata/02541_analyzer_grouping_sets_crash_fix/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001346106, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02541_analyzer_grouping_sets_crash_fix/metadata.json b/parser/testdata/02541_analyzer_grouping_sets_crash_fix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02541_analyzer_grouping_sets_crash_fix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02541_analyzer_grouping_sets_crash_fix/query.sql b/parser/testdata/02541_analyzer_grouping_sets_crash_fix/query.sql new file mode 100644 index 000000000..b9aa251bc --- /dev/null +++ b/parser/testdata/02541_analyzer_grouping_sets_crash_fix/query.sql @@ -0,0 +1,5 @@ +SET enable_analyzer = 1; + +WITH pow(NULL, 256) AS four SELECT NULL AS two GROUP BY GROUPING SETS ((pow(two, 65536))); + +WITH (SELECT pow(two, 1) GROUP BY GROUPING SETS ((pow(1, 9)))) AS four SELECT 2 AS two GROUP BY pow(1, two); diff --git a/parser/testdata/02541_empty_function_support_ip/ast.json b/parser/testdata/02541_empty_function_support_ip/ast.json new file mode 100644 index 000000000..f98af63e5 --- /dev/null +++ b/parser/testdata/02541_empty_function_support_ip/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function empty (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toIPv6 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '::'" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001559958, + "rows_read": 9, + "bytes_read": 340 + } +} diff --git a/parser/testdata/02541_empty_function_support_ip/metadata.json b/parser/testdata/02541_empty_function_support_ip/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02541_empty_function_support_ip/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02541_empty_function_support_ip/query.sql b/parser/testdata/02541_empty_function_support_ip/query.sql new file mode 100644 index 000000000..9362ff10d --- /dev/null +++ b/parser/testdata/02541_empty_function_support_ip/query.sql @@ -0,0 +1,9 @@ +SELECT empty(toIPv6('::')); +SELECT notEmpty(toIPv6('::')); +SELECT empty(toIPv6('::1')); +SELECT notEmpty(toIPv6('::1')); + +SELECT empty(toIPv4('0.0.0.0')); +SELECT notEmpty(toIPv4('0.0.0.0')); +SELECT empty(toIPv4('127.0.0.1')); +SELECT notEmpty(toIPv4('127.0.0.1')); diff --git a/parser/testdata/02541_lightweight_delete_on_cluster/ast.json b/parser/testdata/02541_lightweight_delete_on_cluster/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02541_lightweight_delete_on_cluster/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02541_lightweight_delete_on_cluster/metadata.json b/parser/testdata/02541_lightweight_delete_on_cluster/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02541_lightweight_delete_on_cluster/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02541_lightweight_delete_on_cluster/query.sql b/parser/testdata/02541_lightweight_delete_on_cluster/query.sql new file mode 100644 index 000000000..bac686ff9 --- /dev/null +++ b/parser/testdata/02541_lightweight_delete_on_cluster/query.sql @@ -0,0 +1,21 @@ +-- Tags: distributed, no-replicated-database +-- Tag no-replicated-database: ON CLUSTER is not allowed + +SET distributed_ddl_output_mode='throw'; + +CREATE TABLE t1_local ON CLUSTER test_shard_localhost(partition_col_1 String, tc1 int,tc2 int) ENGINE=MergeTree() PARTITION BY partition_col_1 ORDER BY tc1; + +INSERT INTO t1_local VALUES('partition1', 1,1); +INSERT INTO t1_local VALUES('partition2', 1,2); +INSERT INTO t1_local VALUES('partition1', 2,3); +INSERT INTO t1_local VALUES('partition2', 2,4); + +-- { echoOn } + +SELECT * FROM t1_local ORDER BY tc1, tc2; + +DELETE FROM t1_local ON CLUSTER test_shard_localhost WHERE tc1 = 1; + +SELECT * FROM t1_local ORDER BY tc1, tc2; + +-- { echoOff } diff --git a/parser/testdata/02541_multiple_ignore_with_nested_select/ast.json b/parser/testdata/02541_multiple_ignore_with_nested_select/ast.json new file mode 100644 index 000000000..debd5b752 --- /dev/null +++ b/parser/testdata/02541_multiple_ignore_with_nested_select/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001248008, + "rows_read": 5, + "bytes_read": 169 + } +} diff --git a/parser/testdata/02541_multiple_ignore_with_nested_select/metadata.json b/parser/testdata/02541_multiple_ignore_with_nested_select/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02541_multiple_ignore_with_nested_select/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02541_multiple_ignore_with_nested_select/query.sql b/parser/testdata/02541_multiple_ignore_with_nested_select/query.sql new file mode 100644 index 000000000..fe2d99805 --- /dev/null +++ b/parser/testdata/02541_multiple_ignore_with_nested_select/query.sql @@ -0,0 +1,20 @@ +SELECT DISTINCT * +FROM + ( + SELECT DISTINCT * + FROM + ( + SELECT DISTINCT + 0.5, + number % 65536 AS number + FROM numbers(2) + ORDER BY + ignore(ignore(-1, 10.0001)) DESC NULLS LAST, + ignore(2147483648) DESC NULLS FIRST, + ignore(255, 0.0001) ASC, + number ASC + ) + ORDER BY number ASC NULLS FIRST + ) +WHERE ignore(2147483648) +ORDER BY number DESC \ No newline at end of file diff --git a/parser/testdata/02541_tuple_element_with_null/ast.json b/parser/testdata/02541_tuple_element_with_null/ast.json new file mode 100644 index 000000000..e2b4ad1c1 --- /dev/null +++ b/parser/testdata/02541_tuple_element_with_null/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_tuple_element (children 1)" + }, + { + "explain": " Identifier test_tuple_element" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001278197, + "rows_read": 2, + "bytes_read": 88 + } +} diff --git a/parser/testdata/02541_tuple_element_with_null/metadata.json b/parser/testdata/02541_tuple_element_with_null/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02541_tuple_element_with_null/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02541_tuple_element_with_null/query.sql b/parser/testdata/02541_tuple_element_with_null/query.sql new file mode 100644 index 000000000..e1581ce37 --- /dev/null +++ b/parser/testdata/02541_tuple_element_with_null/query.sql @@ -0,0 +1,19 @@ +DROP TABLE IF EXISTS test_tuple_element; +CREATE TABLE test_tuple_element +( + tuple Tuple(k1 Nullable(UInt64), k2 UInt64) +) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS index_granularity = 8192; + +INSERT INTO test_tuple_element VALUES (tuple(1,2)), (tuple(NULL, 3)); + +SELECT + tupleElement(tuple, 'k1', 0) fine_k1_with_0, + tupleElement(tuple, 'k1', NULL) k1_with_null, + tupleElement(tuple, 'k2', 0) k2_with_0, + tupleElement(tuple, 'k2', NULL) k2_with_null +FROM test_tuple_element; + +DROP TABLE test_tuple_element; diff --git a/parser/testdata/02542_case_no_else/ast.json b/parser/testdata/02542_case_no_else/ast.json new file mode 100644 index 000000000..cd6b7a706 --- /dev/null +++ b/parser/testdata/02542_case_no_else/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function caseWithExpression (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal NULL" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.00141623, + "rows_read": 10, + "bytes_read": 357 + } +} diff --git a/parser/testdata/02542_case_no_else/metadata.json b/parser/testdata/02542_case_no_else/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02542_case_no_else/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02542_case_no_else/query.sql b/parser/testdata/02542_case_no_else/query.sql new file mode 100644 index 000000000..0c7975a75 --- /dev/null +++ b/parser/testdata/02542_case_no_else/query.sql @@ -0,0 +1,14 @@ +SELECT CASE 1 WHEN 1 THEN 2 END; + +SELECT id, + CASE id + WHEN 1 THEN 'Z' + END x +FROM (SELECT 1 as id); + +SELECT id, + CASE id + WHEN 1 THEN 'Z' + ELSE 'X' + END x +FROM (SELECT 1 as id); diff --git a/parser/testdata/02542_table_function_format/ast.json b/parser/testdata/02542_table_function_format/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02542_table_function_format/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02542_table_function_format/metadata.json b/parser/testdata/02542_table_function_format/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02542_table_function_format/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02542_table_function_format/query.sql b/parser/testdata/02542_table_function_format/query.sql new file mode 100644 index 000000000..e32e9001b --- /dev/null +++ b/parser/testdata/02542_table_function_format/query.sql @@ -0,0 +1,36 @@ +desc format(JSONEachRow, +$$ +{"a": "Hello", "b": 111} +{"a": "World", "b": 123} +{"a": "Hello", "b": 111} +{"a": "World", "b": 123} +$$); + +desc format(JSONEachRow, 'a String, b Int64', +$$ +{"a": "Hello", "b": 111} +{"a": "World", "b": 123} +{"a": "Hello", "b": 111} +{"a": "World", "b": 123} +$$); + +select * from format(JSONEachRow, 'a String, b Int64', +$$ +{"a": "Hello", "b": 111} +{"a": "World", "b": 123} +{"a": "Hello", "b": 111} +{"a": "World", "b": 123} +$$); + +desc format(CSV, '1,2,"[1,2,3]","[[\'abc\'], [], [\'d\', \'e\']]"'); +desc format(CSV, 'a1 Int32, a2 UInt64, a3 Array(Int32), a4 Array(Array(String))', '1,2,"[1,2,3]","[[\'abc\'], [], [\'d\', \'e\']]"'); +select * from format(CSV, '1,2,"[1,2,3]","[[\'abc\'], [], [\'d\', \'e\']]"'); +select * from format(CSV, 'a1 Int32, a2 UInt64, a3 Array(Int32), a4 Array(Array(String))', '1,2,"[1,2,3]","[[\'abc\'], [], [\'d\', \'e\']]"'); + +drop table if exists test; + +create table test as format(TSV, 'cust_id UInt128', '20210129005809043707\n123456789\n987654321'); + +select * from test; +desc table test; +drop table test; diff --git a/parser/testdata/02542_transform_new/ast.json b/parser/testdata/02542_transform_new/ast.json new file mode 100644 index 000000000..d7d4537cb --- /dev/null +++ b/parser/testdata/02542_transform_new/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function transform (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_2]" + }, + { + "explain": " Literal Array_[UInt64_9, UInt64_1]" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal NULL" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.002305429, + "rows_read": 12, + "bytes_read": 475 + } +} diff --git a/parser/testdata/02542_transform_new/metadata.json b/parser/testdata/02542_transform_new/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02542_transform_new/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02542_transform_new/query.sql b/parser/testdata/02542_transform_new/query.sql new file mode 100644 index 000000000..f3475d615 --- /dev/null +++ b/parser/testdata/02542_transform_new/query.sql @@ -0,0 +1,48 @@ +select transform(2, [1,2], [9,1], materialize(null)); +select transform(2, [1,2], [9,1], materialize(7)); +select transform(2, [1,2], [9,1], null); +select transform(2, [1,2], [9,1], 7); +select transform(1, [1,2], [9,1], null); +select transform(1, [1,2], [9,1], 7); +select transform(5, [1,2], [9,1], null); +select transform(5, [1,2], [9,1], 7); +select transform(2, [1,2], [9,1]); +select transform(1, [1,2], [9,1]); +select transform(7, [1,2], [9,1]); + +select transform(2, [1,2], ['a','b'], materialize(null)); +select transform(2, [1,2], ['a','b'], materialize('c')); +select transform(2, [1,2], ['a','b'], null); +select transform(2, [1,2], ['a','b'], 'c'); +select transform(1, [1,2], ['a','b'], null); +select transform(1, [1,2], ['a','b'], 'c'); +select transform(5, [1,2], ['a','b'], null); +select transform(5, [1,2], ['a','b'], 'c'); + +select 'sep1'; +SELECT transform(number, [2], [toDecimal32(1, 1)], materialize(80000)) as x FROM numbers(2); +select 'sep2'; +SELECT transform(number, [2], [toDecimal32(1, 1)], 80000) as x FROM numbers(2); +select 'sep3'; +SELECT transform(toDecimal32(2, 1), [toDecimal32(2, 1)], [1]); +select 'sep4'; +SELECT transform(8000, [1], [toDecimal32(2, 1)]); +select 'sep5'; +SELECT transform(toDecimal32(8000,0), [1], [toDecimal32(2, 1)]); +select 'sep6'; +SELECT transform(-9223372036854775807, [-1], [toDecimal32(1024, 3)]) FROM system.numbers LIMIT 7; -- { serverError BAD_ARGUMENTS } +SELECT [NULL, NULL, NULL, NULL], transform(number, [2147483648], [toDecimal32(1, 2)]) AS x FROM numbers(257) WHERE materialize(10); -- { serverError BAD_ARGUMENTS } +SELECT transform(-2147483649, [1], [toDecimal32(1, 2)]) GROUP BY [1] WITH TOTALS; -- { serverError BAD_ARGUMENTS } + +SELECT 'issue #53187'; +SELECT + CAST(number, 'String') AS v2, + caseWithExpression('x', 'y', 0, cond2) AS cond1, + toNullable('0' = v2) AS cond2 +FROM numbers(2); +SELECT '-'; +SELECT + CAST(number, 'String') AS v2, + caseWithExpression('x', 'y', 0, cond2) AS cond1, + toNullable('1' = v2) AS cond2 +FROM numbers(2); diff --git a/parser/testdata/02542_transform_old/ast.json b/parser/testdata/02542_transform_old/ast.json new file mode 100644 index 000000000..76af424e7 --- /dev/null +++ b/parser/testdata/02542_transform_old/ast.json @@ -0,0 +1,85 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function transform (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal Array_[UInt64_2, UInt64_4, UInt64_6]" + }, + { + "explain": " Literal Array_['google', 'yandex', 'yahoo']" + }, + { + "explain": " Literal 'other'" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier x" + } + ], + + "rows": 21, + + "statistics": + { + "elapsed": 0.00159689, + "rows_read": 21, + "bytes_read": 834 + } +} diff --git a/parser/testdata/02542_transform_old/metadata.json b/parser/testdata/02542_transform_old/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02542_transform_old/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02542_transform_old/query.sql b/parser/testdata/02542_transform_old/query.sql new file mode 100644 index 000000000..01a960ec3 --- /dev/null +++ b/parser/testdata/02542_transform_old/query.sql @@ -0,0 +1,25 @@ +SELECT transform(number, [2, 4, 6], ['google', 'yandex', 'yahoo'], 'other') as x FROM numbers(10) GROUP BY x ORDER BY x; +SELECT '#1'; +SELECT transform(number, [2, 4, 6], [29, 20, 21], 22) as x FROM numbers(10) GROUP BY x ORDER BY x; +SELECT '#2'; +SELECT transform(number, [2, 4, 6], [29, 20, 21]) as x FROM numbers(10) GROUP BY x ORDER BY x; +SELECT '#3'; +SELECT transform(toString(number), ['2', '4', '6'], [29, 20, 21], 22) as x FROM numbers(10) GROUP BY x ORDER BY x; +SELECT '#4'; +SELECT transform(toString(number), ['2', '4', '6'], ['google', 'yandex', 'yahoo'], 'other') as x FROM numbers(10) GROUP BY x ORDER BY x; +SELECT '#5'; +SELECT transform(toString(number), ['2', '4', '6'], ['google', 'yandex', 'yahoo']) as x FROM numbers(10) GROUP BY x ORDER BY x; +SELECT '----'; +SELECT transform(number, [2, 4, 6], ['google', 'yandex', 'yahoo'], materialize('other')) as x FROM numbers(10) GROUP BY x ORDER BY x; +SELECT '#1'; +SELECT transform(number, [2, 4, 6], [29, 20, 21], materialize(22)) as x FROM numbers(10) GROUP BY x ORDER BY x; +SELECT '#3'; +SELECT transform(toString(number), ['2', '4', '6'], [29, 20, 21], materialize(22)) as x FROM numbers(10) GROUP BY x ORDER BY x; +SELECT '#4'; +SELECT transform(toString(number), ['2', '4', '6'], ['google', 'yandex', 'yahoo'], materialize('other')) as x FROM numbers(10) GROUP BY x ORDER BY x; +SELECT '----'; +SELECT transform(number, [2, 4, 6], [2900, 2000, 2100], 2200) as x FROM numbers(10) GROUP BY x ORDER BY x; +SELECT '#1'; +SELECT transform(number, [2, 4, 6], [2900, 2000, 2100], materialize(2200)) as x FROM numbers(10) GROUP BY x ORDER BY x; +SELECT '----'; +SELECT transform(number, [1], [null]) FROM system.numbers LIMIT 1; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } diff --git a/parser/testdata/02551_ipv4_implicit_uint64/ast.json b/parser/testdata/02551_ipv4_implicit_uint64/ast.json new file mode 100644 index 000000000..50baae10f --- /dev/null +++ b/parser/testdata/02551_ipv4_implicit_uint64/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery ip4test (children 3)" + }, + { + "explain": " Identifier ip4test" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration ip (children 1)" + }, + { + "explain": " DataType IPv4" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function Memory" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001149118, + "rows_read": 8, + "bytes_read": 278 + } +} diff --git a/parser/testdata/02551_ipv4_implicit_uint64/metadata.json b/parser/testdata/02551_ipv4_implicit_uint64/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02551_ipv4_implicit_uint64/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02551_ipv4_implicit_uint64/query.sql b/parser/testdata/02551_ipv4_implicit_uint64/query.sql new file mode 100644 index 000000000..ff04f5538 --- /dev/null +++ b/parser/testdata/02551_ipv4_implicit_uint64/query.sql @@ -0,0 +1,4 @@ +CREATE TABLE ip4test (ip IPv4) ENGINE=Memory; +INSERT INTO ip4test VALUES (22906492245), (2319771222); +SELECT * FROM ip4test; +DROP TABLE ip4test; diff --git a/parser/testdata/02552_analyzer_optimize_group_by_function_keys_crash/ast.json b/parser/testdata/02552_analyzer_optimize_group_by_function_keys_crash/ast.json new file mode 100644 index 000000000..2de6e9751 --- /dev/null +++ b/parser/testdata/02552_analyzer_optimize_group_by_function_keys_crash/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001043714, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02552_analyzer_optimize_group_by_function_keys_crash/metadata.json b/parser/testdata/02552_analyzer_optimize_group_by_function_keys_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02552_analyzer_optimize_group_by_function_keys_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02552_analyzer_optimize_group_by_function_keys_crash/query.sql b/parser/testdata/02552_analyzer_optimize_group_by_function_keys_crash/query.sql new file mode 100644 index 000000000..85740cd85 --- /dev/null +++ b/parser/testdata/02552_analyzer_optimize_group_by_function_keys_crash/query.sql @@ -0,0 +1,3 @@ +SET enable_analyzer = 1; + +SELECT NULL GROUP BY tuple('0.0000000007'), count(NULL) OVER (ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) -- { serverError ILLEGAL_AGGREGATION }; diff --git a/parser/testdata/02552_check_referential_table_dependencies/ast.json b/parser/testdata/02552_check_referential_table_dependencies/ast.json new file mode 100644 index 000000000..73e8e707b --- /dev/null +++ b/parser/testdata/02552_check_referential_table_dependencies/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery mv (children 1)" + }, + { + "explain": " Identifier mv" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001191227, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/02552_check_referential_table_dependencies/metadata.json b/parser/testdata/02552_check_referential_table_dependencies/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02552_check_referential_table_dependencies/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02552_check_referential_table_dependencies/query.sql b/parser/testdata/02552_check_referential_table_dependencies/query.sql new file mode 100644 index 000000000..e83006eca --- /dev/null +++ b/parser/testdata/02552_check_referential_table_dependencies/query.sql @@ -0,0 +1,29 @@ +DROP TABLE IF EXISTS mv; +DROP TABLE IF EXISTS src; +DROP TABLE IF EXISTS dst; + +CREATE TABLE src (x UInt8) ENGINE = Memory; +CREATE TABLE dst (x UInt8) ENGINE = Memory; +CREATE MATERIALIZED VIEW mv TO dst AS SELECT x FROM src; + +SET check_referential_table_dependencies = 1; + +-- Can't drop because of referential dependencies +DROP TABLE src; -- { serverError HAVE_DEPENDENT_OBJECTS } +DROP TABLE dst; -- { serverError HAVE_DEPENDENT_OBJECTS } + +-- Ok to drop in the correct order +DROP TABLE mv; +DROP TABLE src; +DROP TABLE dst; + +-- Check again with check_referential_table_dependencies = 0 +CREATE TABLE src (x UInt8) ENGINE = Memory; +CREATE TABLE dst (x UInt8) ENGINE = Memory; +CREATE MATERIALIZED VIEW mv TO dst AS SELECT x FROM src; + +SET check_referential_table_dependencies = 0; + +DROP TABLE src; +DROP TABLE dst; +DROP TABLE mv; diff --git a/parser/testdata/02552_client_format_settings/ast.json b/parser/testdata/02552_client_format_settings/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02552_client_format_settings/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02552_client_format_settings/metadata.json b/parser/testdata/02552_client_format_settings/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02552_client_format_settings/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02552_client_format_settings/query.sql b/parser/testdata/02552_client_format_settings/query.sql new file mode 100644 index 000000000..6a63a421d --- /dev/null +++ b/parser/testdata/02552_client_format_settings/query.sql @@ -0,0 +1,5 @@ +-- Test from https://github.com/ClickHouse/ClickHouse/issues/45880 + +-- { echo } +SELECT number FROM numbers(5) SETTINGS output_format_json_array_of_rows = 1 FORMAT JSONEachRow; +SELECT number FROM numbers(5) FORMAT JSONEachRow SETTINGS output_format_json_array_of_rows = 1; diff --git a/parser/testdata/02552_inner_join_with_where_true/ast.json b/parser/testdata/02552_inner_join_with_where_true/ast.json new file mode 100644 index 000000000..85e5752d8 --- /dev/null +++ b/parser/testdata/02552_inner_join_with_where_true/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery t0 (children 3)" + }, + { + "explain": " Identifier t0" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration c0 (children 1)" + }, + { + "explain": " DataType Int32" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function Memory" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001260157, + "rows_read": 8, + "bytes_read": 269 + } +} diff --git a/parser/testdata/02552_inner_join_with_where_true/metadata.json b/parser/testdata/02552_inner_join_with_where_true/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02552_inner_join_with_where_true/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02552_inner_join_with_where_true/query.sql b/parser/testdata/02552_inner_join_with_where_true/query.sql new file mode 100644 index 000000000..223fafb53 --- /dev/null +++ b/parser/testdata/02552_inner_join_with_where_true/query.sql @@ -0,0 +1,9 @@ +CREATE TABLE t0 (c0 Int32) ENGINE = Memory; +CREATE TABLE t1 (c1 Int32) ENGINE = Memory; + +INSERT INTO t0(c0) VALUES (1), (2); +INSERT INTO t1(c1) VALUES (1); + +SELECT max(1), count() FROM t0 AS t0 LEFT JOIN t1 ON true WHERE 1; +SELECT max(1), count() FROM t0 AS t0 INNER JOIN t1 ON t0.c0 = t1.c1 WHERE 1; +SELECT max(1), count() FROM t0 AS t0 INNER JOIN t1 ON true WHERE 0; diff --git a/parser/testdata/02552_regression_crash/ast.json b/parser/testdata/02552_regression_crash/ast.json new file mode 100644 index 000000000..438729f67 --- /dev/null +++ b/parser/testdata/02552_regression_crash/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery store_sales (children 1)" + }, + { + "explain": " Identifier store_sales" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001312123, + "rows_read": 2, + "bytes_read": 75 + } +} diff --git a/parser/testdata/02552_regression_crash/metadata.json b/parser/testdata/02552_regression_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02552_regression_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02552_regression_crash/query.sql b/parser/testdata/02552_regression_crash/query.sql new file mode 100644 index 000000000..af272dbce --- /dev/null +++ b/parser/testdata/02552_regression_crash/query.sql @@ -0,0 +1,14 @@ +CREATE TABLE store_sales +( + `ss_sold_date_sk` Float64, + `ss_sold_time_sk` Float64, + `ss_customer_sk` Float64, + `ss_cdemo_sk` Float64, + `ss_hdemo_sk` Float64 +) +ENGINE = Memory; + +insert into store_sales values (-3.273, -1.452, 4.267, 20.0, 40.0),(0.121, -0.615, 4.290, 20.0, 40.0), (-1.099, 2.755, -3.060, 20.0, 40.0),(1.090, 2.945, -2.346, 20.0, 40.0), (0.305, 2.179, -1.205, 20.0, 40.0),(-0.925, 0.702, 1.134, 20.0, 40.0), (3.178, -1.316, 7.221, 20.0, 40.0),(-2.756, -0.473, 2.569, 20.0, 40.0), (3.665, 2.303, 0.226, 20.0, 40.0),(1.662, 1.951, -0.070, 20.0, 40.0), (2.869, 0.593, 3.249, 20.0, 40.0),(0.818, -0.593, 4.594, 20.0, 40.0), (-1.917, 0.916, 0.209, 20.0, 40.0),(2.706, 1.523, 1.307, 20.0, 40.0), (0.219, 2.162, -1.214, 20.0, 40.0),(-4.510, 1.376, -2.007, 20.0, 40.0), (4.284, -0.515, 6.173, 20.0, 40.0),(-1.101, 2.810, -3.170, 20.0, 40.0), (-1.810, -1.117, 4.329, 20.0, 40.0),(0.055, 1.115, 0.797, 20.0, 40.0), (-2.178, 2.904, -3.898, 20.0, 40.0),(-3.494, -1.814, 4.882, 20.0, 40.0), (3.027, 0.476, 3.562, 20.0, 40.0),(-1.434, 1.151, -0.018, 20.0, 40.0), (1.180, 0.992, 1.606, 20.0, 40.0),(0.015, 0.971, 1.067, 20.0, 40.0), (-0.511, -0.875, 4.495, 20.0, 40.0),(0.961, 2.348, -1.216, 20.0, 40.0), (-2.279, 0.038, 1.785, 20.0, 40.0),(-1.568, -0.248, 2.712, 20.0, 40.0), (-0.496, 0.366, 2.020, 20.0, 40.0),(1.177, -1.401, 6.390, 20.0, 40.0), (2.882, -1.442, 7.325, 20.0, 40.0),(-1.066, 1.817, -1.167, 20.0, 40.0), (-2.144, 2.791, -3.655, 20.0, 40.0),(-4.370, 2.228, -3.642, 20.0, 40.0), (3.996, 2.775, -0.553, 20.0, 40.0),(0.289, 2.055, -0.965, 20.0, 40.0), (-0.588, -1.601, 5.908, 20.0, 40.0),(-1.801, 0.417, 1.265, 20.0, 40.0), (4.375, -1.499, 8.186, 20.0, 40.0),(-2.618, 0.038, 1.615, 20.0, 40.0), (3.616, -0.833, 6.475, 20.0, 40.0),(-4.045, -1.558, 4.094, 20.0, 40.0), (-3.962, 0.636, -0.253, 20.0, 40.0),(3.505, 2.625, -0.497, 20.0, 40.0), (3.029, -0.523, 5.560, 20.0, 40.0),(-3.520, -0.474, 2.188, 20.0, 40.0), (2.430, -1.469, 7.154, 20.0, 40.0),(1.547, -1.654, 7.082, 20.0, 40.0), (-1.370, 0.575, 1.165, 20.0, 40.0),(-1.869, -1.555, 5.176, 20.0, 40.0), (3.536, 2.841, -0.913, 20.0, 40.0),(-3.810, 1.220, -1.344, 20.0, 40.0), (-1.971, 1.462, -0.910, 20.0, 40.0),(-0.243, 0.167, 2.545, 20.0, 40.0), (-1.403, 2.645, -2.991, 20.0, 40.0),(0.532, -0.114, 3.494, 20.0, 40.0), (-1.678, 0.975, 0.212, 20.0, 40.0),(-0.656, 2.140, -1.609, 20.0, 40.0), (1.743, 2.631, -1.390, 20.0, 40.0),(2.586, 2.943, -1.593, 20.0, 40.0), (-0.512, 2.969, -3.195, 20.0, 40.0),(2.283, -0.100, 4.342, 20.0, 40.0), (-4.293, 0.872, -0.890, 20.0, 40.0),(3.411, 1.300, 2.106, 20.0, 40.0), (-0.281, 2.951, -3.042, 20.0, 40.0),(-4.442, 0.384, 0.012, 20.0, 40.0), (1.194, 1.746, 0.104, 20.0, 40.0),(-1.152, 1.862, -1.300, 20.0, 40.0), (1.362, -1.341, 6.363, 20.0, 40.0),(-4.488, 2.618, -4.481, 20.0, 40.0), (3.419, -0.564, 5.837, 20.0, 40.0),(-3.392, 0.396, 0.512, 20.0, 40.0), (-1.629, -0.909, 4.003, 20.0, 40.0),(4.447, -1.088, 7.399, 20.0, 40.0), (-1.232, 1.699, -1.014, 20.0, 40.0),(-1.286, -0.609, 3.575, 20.0, 40.0), (2.437, 2.796, -1.374, 20.0, 40.0),(-4.864, 1.989, -3.410, 20.0, 40.0), (-1.716, -1.399, 4.940, 20.0, 40.0),(-3.084, 1.858, -2.259, 20.0, 40.0), (2.828, -0.319, 5.053, 20.0, 40.0),(-1.226, 2.586, -2.786, 20.0, 40.0), (2.456, 0.092, 4.044, 20.0, 40.0),(-0.989, 2.375, -2.245, 20.0, 40.0), (3.268, 0.935, 2.765, 20.0, 40.0),(-4.128, -1.995, 4.927, 20.0, 40.0), (-1.083, 2.197, -1.935, 20.0, 40.0),(-3.471, -1.198, 3.660, 20.0, 40.0), (4.617, -1.136, 7.579, 20.0, 40.0),(2.054, -1.675, 7.378, 20.0, 40.0), (4.106, 2.326, 0.402, 20.0, 40.0),(1.558, 0.310, 3.158, 20.0, 40.0), (0.792, 0.900, 1.596, 20.0, 40.0),(-3.229, 0.300, 0.785, 20.0, 40.0), (3.787, -0.793, 6.479, 20.0, 40.0),(1.786, 2.288, -0.684, 20.0, 40.0), (2.643, 0.223, 3.875, 20.0, 40.0),(-3.592, 2.122, -3.040, 20.0, 40.0), (4.519, -1.760, 8.779, 20.0, 40.0),(3.221, 2.255, 0.101, 20.0, 40.0), (4.151, 1.788, 1.500, 20.0, 40.0),(-1.033, -1.195, 4.874, 20.0, 40.0), (-1.636, -1.037, 4.257, 20.0, 40.0),(-3.548, 1.911, -2.596, 20.0, 40.0), (4.829, -0.293, 6.001, 20.0, 40.0),(-4.684, -1.664, 3.986, 20.0, 40.0), (4.531, -0.503, 6.271, 20.0, 40.0),(-3.503, -1.606, 4.460, 20.0, 40.0), (-2.036, -1.522, 5.027, 20.0, 40.0),(-0.473, -0.617, 3.997, 20.0, 40.0), (-1.554, -1.630, 5.483, 20.0, 40.0),(-3.567, -1.043, 3.302, 20.0, 40.0), (-2.038, 0.579, 0.823, 20.0, 40.0),(-3.040, 0.857, -0.233, 20.0, 40.0), (4.610, 0.562, 4.181, 20.0, 40.0),(-3.323, -1.938, 5.215, 20.0, 40.0), (4.314, 1.720, 1.717, 20.0, 40.0),(-1.220, 0.615, 1.161, 20.0, 40.0), (-2.556, 1.120, -0.519, 20.0, 40.0),(-3.717, -0.108, 1.358, 20.0, 40.0), (4.689, -1.826, 8.996, 20.0, 40.0),(3.452, 0.506, 3.713, 20.0, 40.0), (2.472, 0.612, 3.012, 20.0, 40.0),(3.452, 0.450, 3.826, 20.0, 40.0), (1.207, 2.585, -1.567, 20.0, 40.0),(-4.826, 1.090, -1.593, 20.0, 40.0), (3.116, -1.118, 6.794, 20.0, 40.0),(0.448, 2.732, -2.240, 20.0, 40.0), (-1.096, -0.525, 3.503, 20.0, 40.0),(-4.680, -0.238, 1.137, 20.0, 40.0), (2.552, -1.403, 7.082, 20.0, 40.0),(0.719, 2.997, -2.635, 20.0, 40.0), (0.347, -1.966, 7.105, 20.0, 40.0),(2.958, -0.404, 5.288, 20.0, 40.0), (0.722, -1.950, 7.261, 20.0, 40.0),(-2.851, -0.986, 3.546, 20.0, 40.0), (-4.316, -0.439, 1.721, 20.0, 40.0),(-1.685, -0.201, 2.560, 20.0, 40.0), (1.856, 0.190, 3.549, 20.0, 40.0),(-2.052, 0.206, 1.562, 20.0, 40.0), (-2.504, -0.646, 3.041, 20.0, 40.0),(3.235, 0.882, 2.854, 20.0, 40.0), (-1.366, -1.573, 5.463, 20.0, 40.0),(-3.447, 2.419, -3.562, 20.0, 40.0), (4.155, 2.092, 0.893, 20.0, 40.0),(-0.935, 0.209, 2.116, 20.0, 40.0), (3.117, -1.821, 8.201, 20.0, 40.0),(3.759, 0.577, 3.725, 20.0, 40.0), (-0.938, 2.992, -3.453, 20.0, 40.0),(-0.525, 2.341, -1.945, 20.0, 40.0), (4.540, 2.625, 0.019, 20.0, 40.0),(-2.097, 1.190, -0.429, 20.0, 40.0), (-2.672, 1.983, -2.302, 20.0, 40.0),(-3.038, -1.490, 4.460, 20.0, 40.0), (-0.943, 2.149, -1.770, 20.0, 40.0),(0.739, 1.598, 0.174, 20.0, 40.0), (1.828, 1.853, 0.208, 20.0, 40.0),(4.856, 0.137, 5.153, 20.0, 40.0), (-1.617, 0.468, 1.255, 20.0, 40.0),(-1.972, 2.053, -2.092, 20.0, 40.0), (-4.633, 1.389, -2.094, 20.0, 40.0),(-3.628, -1.156, 3.498, 20.0, 40.0), (3.597, 1.034, 2.731, 20.0, 40.0),(-1.488, -0.002, 2.261, 20.0, 40.0), (0.749, 1.921, -0.468, 20.0, 40.0),(1.304, -1.371, 6.394, 20.0, 40.0), (4.587, 2.936, -0.579, 20.0, 40.0),(-2.241, 1.791, -1.703, 20.0, 40.0), (-2.945, 1.372, -1.216, 20.0, 40.0),(1.375, 0.395, 2.898, 20.0, 40.0), (-1.281, -0.641, 3.642, 20.0, 40.0),(2.178, 0.895, 2.299, 20.0, 40.0), (3.031, -0.786, 6.087, 20.0, 40.0),(-1.385, -0.375, 3.058, 20.0, 40.0), (4.041, -0.431, 5.882, 20.0, 40.0),(0.480, -0.507, 4.254, 20.0, 40.0), (-3.797, 0.140, 0.822, 20.0, 40.0),(2.355, 2.502, -0.827, 20.0, 40.0), (1.376, -1.583, 6.854, 20.0, 40.0),(0.164, 1.405, 0.273, 20.0, 40.0), (-1.273, 1.471, -0.579, 20.0, 40.0),(0.770, 2.246, -1.107, 20.0, 40.0), (4.552, 2.904, -0.533, 20.0, 40.0),(4.259, -1.772, 8.674, 20.0, 40.0), (-0.309, 1.159, 0.528, 20.0, 40.0),(3.581, 2.700, -0.610, 20.0, 40.0), (-3.202, 0.346, 0.707, 20.0, 40.0),(-1.575, 1.242, -0.271, 20.0, 40.0), (-1.584, -0.493, 3.194, 20.0, 40.0),(-3.778, 0.150, 0.810, 20.0, 40.0), (-4.675, 1.749, -2.835, 20.0, 40.0),(3.567, -0.792, 6.367, 20.0, 40.0), (-0.417, 1.399, -0.006, 20.0, 40.0),(-4.672, 2.007, -3.349, 20.0, 40.0), (-1.034, 0.196, 2.090, 20.0, 40.0),(-3.796, 2.496, -3.890, 20.0, 40.0), (3.532, -0.497, 5.759, 20.0, 40.0),(4.868, -1.359, 8.151, 20.0, 40.0), (-0.769, 0.302, 2.011, 20.0, 40.0),(4.475, 2.612, 0.014, 20.0, 40.0), (-3.532, -0.395, 2.024, 20.0, 40.0),(0.322, 0.675, 1.812, 20.0, 40.0), (-2.028, -1.942, 5.870, 20.0, 40.0),(1.810, -1.244, 6.392, 20.0, 40.0), (-0.783, 1.242, 0.124, 20.0, 40.0),(-4.745, -1.300, 3.227, 20.0, 40.0), (1.902, 1.973, 0.005, 20.0, 40.0),(-3.453, -1.429, 4.132, 20.0, 40.0), (1.559, 0.986, 1.808, 20.0, 40.0),(0.128, 2.754, -2.443, 20.0, 40.0), (2.759, 1.727, 0.926, 20.0, 40.0),(-4.468, 1.690, -2.614, 20.0, 40.0), (-2.368, -1.922, 5.659, 20.0, 40.0),(-2.766, 2.128, -2.640, 20.0, 40.0), (0.967, -1.825, 7.133, 20.0, 40.0),(-2.854, 2.855, -4.136, 20.0, 40.0), (-2.944, 1.875, -2.222, 20.0, 40.0),(-2.632, -0.983, 3.649, 20.0, 40.0), (2.427, 2.239, -0.266, 20.0, 40.0),(-1.726, -0.838, 3.812, 20.0, 40.0), (0.007, -0.903, 4.809, 20.0, 40.0),(-2.013, 1.092, -0.191, 20.0, 40.0), (-0.449, 0.970, 0.836, 20.0, 40.0),(1.396, 0.411, 2.876, 20.0, 40.0), (-1.115, -1.790, 6.023, 20.0, 40.0),(3.748, 1.917, 1.039, 20.0, 40.0), (2.978, 1.043, 2.404, 20.0, 40.0),(-3.969, 2.514, -4.013, 20.0, 40.0), (4.455, -0.050, 5.328, 20.0, 40.0),(-3.065, -0.846, 3.160, 20.0, 40.0), (-1.069, 2.167, -1.869, 20.0, 40.0),(3.016, -1.393, 7.294, 20.0, 40.0), (0.045, -1.928, 6.879, 20.0, 40.0),(-2.555, -0.984, 3.690, 20.0, 40.0), (-1.995, -0.054, 2.111, 20.0, 40.0),(4.600, -0.509, 6.318, 20.0, 40.0), (-1.942, 1.215, -0.402, 20.0, 40.0),(1.262, 2.765, -1.899, 20.0, 40.0), (2.617, -1.106, 6.521, 20.0, 40.0),(1.737, 0.554, 2.761, 20.0, 40.0), (-2.197, 0.632, 0.638, 20.0, 40.0),(4.768, 2.618, 0.147, 20.0, 40.0), (-3.737, -0.939, 3.010, 20.0, 40.0),(-2.623, 0.595, 0.499, 20.0, 40.0), (4.752, -0.340, 6.057, 20.0, 40.0),(2.333, -1.037, 6.240, 20.0, 40.0), (4.234, -1.882, 8.881, 20.0, 40.0),(-3.393, -0.812, 2.927, 20.0, 40.0), (0.885, 1.383, 0.678, 20.0, 40.0),(0.123, 2.937, -2.812, 20.0, 40.0), (2.969, 0.760, 2.964, 20.0, 40.0),(-4.929, 1.251, -1.967, 20.0, 40.0), (1.916, 2.223, -0.488, 20.0, 40.0),(-0.020, -1.740, 6.469, 20.0, 40.0), (0.702, -1.272, 5.895, 20.0, 40.0),(2.496, 2.648, -1.048, 20.0, 40.0), (4.067, -1.475, 7.984, 20.0, 40.0),(-3.717, 1.851, -2.561, 20.0, 40.0), (1.678, -0.624, 5.088, 20.0, 40.0),(1.073, 0.695, 2.146, 20.0, 40.0), (1.842, -0.749, 5.419, 20.0, 40.0),(-3.518, 1.909, -2.578, 20.0, 40.0), (2.229, 1.189, 1.737, 20.0, 40.0),(4.987, 2.893, -0.292, 20.0, 40.0), (-4.809, 1.043, -1.490, 20.0, 40.0),(-0.241, -0.728, 4.334, 20.0, 40.0), (-3.331, 0.590, 0.156, 20.0, 40.0),(-0.455, 2.621, -2.470, 20.0, 40.0), (1.492, 1.223, 1.301, 20.0, 40.0),(3.948, 2.841, -0.709, 20.0, 40.0), (0.732, 0.446, 2.475, 20.0, 40.0),(2.400, 2.390, -0.579, 20.0, 40.0), (-2.718, 1.427, -1.213, 20.0, 40.0),(-1.826, 1.451, -0.815, 20.0, 40.0), (1.125, 0.438, 2.686, 20.0, 40.0),(-4.918, 1.880, -3.219, 20.0, 40.0), (3.068, -0.442, 5.418, 20.0, 40.0),(1.982, 1.201, 1.589, 20.0, 40.0), (0.701, -1.709, 6.768, 20.0, 40.0),(-1.496, 2.564, -2.877, 20.0, 40.0), (-3.812, 0.974, -0.853, 20.0, 40.0),(-3.405, 2.018, -2.739, 20.0, 40.0), (2.211, 2.889, -1.674, 20.0, 40.0),(-2.481, 2.931, -4.103, 20.0, 40.0), (-3.721, 2.765, -4.391, 20.0, 40.0),(-1.768, -1.292, 4.699, 20.0, 40.0), (-4.462, 1.058, -1.347, 20.0, 40.0),(-3.516, -1.942, 5.126, 20.0, 40.0), (0.485, 2.420, -1.597, 20.0, 40.0),(-0.492, 0.242, 2.270, 20.0, 40.0), (4.245, 1.689, 1.744, 20.0, 40.0),(2.234, 0.364, 3.389, 20.0, 40.0), (2.629, 2.224, -0.134, 20.0, 40.0),(-4.375, 1.221, -1.630, 20.0, 40.0), (-0.618, 1.374, -0.057, 20.0, 40.0),(-2.580, -1.604, 4.918, 20.0, 40.0), (0.159, 1.104, 0.871, 20.0, 40.0),(-3.597, 0.975, -0.749, 20.0, 40.0); +INSERT INTO store_sales (ss_sold_time_sk) VALUES (1); +INSERT INTO store_sales (ss_cdemo_sk) VALUES (0.1); +select stochasticLinearRegressionState(0.03, 0.00001, 2, 'Momentum')(ss_sold_time_sk, ss_sold_time_sk, ss_sold_time_sk) as ss_wholesale_cost from store_sales format Null; diff --git a/parser/testdata/02552_siphash128_reference/ast.json b/parser/testdata/02552_siphash128_reference/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02552_siphash128_reference/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02552_siphash128_reference/metadata.json b/parser/testdata/02552_siphash128_reference/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02552_siphash128_reference/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02552_siphash128_reference/query.sql b/parser/testdata/02552_siphash128_reference/query.sql new file mode 100644 index 000000000..46f292d66 --- /dev/null +++ b/parser/testdata/02552_siphash128_reference/query.sql @@ -0,0 +1,254 @@ +-- Test Vectors from the SipHash reference C implementation: +-- Written by +-- Jean-Philippe Aumasson <jeanphilippe.aumasson@gmail.com> +-- Daniel J. Bernstein <djb@cr.yp.to> +-- Released under CC0 +-- https://github.com/veorq/SipHash/blob/eee7d0d84dc7731df2359b243aa5e75d85f6eaef/vectors.h#L645 + +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + '')); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61))); +select hex(sipHash128ReferenceKeyed((toUInt64(506097522914230528), toUInt64(1084818905618843912)), + char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62))); + +-- CH tests +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0)) == sipHash128Reference(char(0)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1)) == sipHash128Reference(char(0, 1)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2)) == sipHash128Reference(char(0, 1, 2)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3)) == sipHash128Reference(char(0, 1, 2, 3)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4)) == sipHash128Reference(char(0, 1, 2, 3, 4)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62)); +select sipHash128ReferenceKeyed((toUInt64(0),toUInt64(0)),char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63)) == sipHash128Reference(char(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63)); + +select sipHash128ReferenceKeyed((0, 0), '1'); -- { serverError BAD_ARGUMENTS } +select sipHash128ReferenceKeyed(toUInt64(0), '1'); -- { serverError BAD_ARGUMENTS } + +SELECT hex(sipHash128Reference()) = hex(reverse(unhex('1CE422FEE7BD8DE20000000000000000'))) or hex(sipHash128()) = '1CE422FEE7BD8DE20000000000000000'; +SELECT hex(sipHash128ReferenceKeyed()) = hex(reverse(unhex('1CE422FEE7BD8DE20000000000000000'))) or hex(sipHash128Keyed()) = '1CE422FEE7BD8DE20000000000000000'; + +SELECT 'Check bug with hashing of const integer values'; +DROP TABLE IF EXISTS tab; +CREATE TABLE tab (key Tuple(UInt64, UInt64), val UInt64) ENGINE=Memory; +INSERT INTO tab VALUES ((2, 2), 4); +-- these two statements must produce the same result +SELECT hex(sipHash128ReferenceKeyed(key, val)) FROM tab; +SELECT hex(sipHash128ReferenceKeyed(key, 4::UInt64)) FROM tab; +DROP TABLE tab; + +SELECT 'Check memsan bug'; +SELECT hex(sipHash128ReferenceKeyed((toUInt64(2), toUInt64(-9223372036854775807)))) GROUP BY (toUInt64(506097522914230528), toUInt64(now64(2, NULL + NULL), 1084818905618843912)), toUInt64(2), NULL + NULL, char(-2147483649, 1); + +SELECT 'Check const columns'; +DROP TABLE IF EXISTS sipHashKeyed_test; +CREATE TABLE sipHashKeyed_test ENGINE = Memory() AS SELECT 1 a, 'test' b; +SELECT hex(sipHash128ReferenceKeyed((toUInt64(0), toUInt64(0)), 1, 'test')); +SELECT hex(sipHash128Reference(tuple(*))) FROM sipHashKeyed_test; +SELECT hex(sipHash128ReferenceKeyed((toUInt64(0), toUInt64(0)), tuple(*))) FROM sipHashKeyed_test; +SELECT hex(sipHash128ReferenceKeyed((toUInt64(0), toUInt64(0)), a, b)) FROM sipHashKeyed_test; +DROP TABLE sipHashKeyed_test; + +SELECT 'Check multiple keys as tuple from a table'; +DROP TABLE IF EXISTS sipHashKeyed_keys; +CREATE TABLE sipHashKeyed_keys (key Tuple(UInt64, UInt64), val UInt64) ENGINE=Memory; +INSERT INTO sipHashKeyed_keys VALUES ((2, 2), 4); +INSERT INTO sipHashKeyed_keys VALUES ((4, 4), 4); +SELECT hex(sipHash128ReferenceKeyed(key, val)) FROM sipHashKeyed_keys ORDER by key; +DROP TABLE sipHashKeyed_keys; + +SELECT 'Check multiple keys as separate ints from a table'; +DROP TABLE IF EXISTS sipHashKeyed_keys; +CREATE TABLE sipHashKeyed_keys (key0 UInt64, key1 UInt64, val UInt64) ENGINE=Memory; +INSERT INTO sipHashKeyed_keys VALUES (2, 2, 4); +INSERT INTO sipHashKeyed_keys VALUES (4, 4, 4); +SELECT hex(sipHash128ReferenceKeyed((key0, key1), val)) FROM sipHashKeyed_keys ORDER by key0; +SELECT 'Check constant key and data from a table'; +SELECT hex(sipHash128ReferenceKeyed((2::UInt64, 2::UInt64), val)) FROM sipHashKeyed_keys ORDER by val; +DROP TABLE sipHashKeyed_keys; + +SELECT 'Check multiple keys as separate ints from a table with constant data'; +DROP TABLE IF EXISTS sipHashKeyed_keys; +CREATE TABLE sipHashKeyed_keys (key0 UInt64, key1 UInt64) ENGINE=Memory; +INSERT INTO sipHashKeyed_keys VALUES (2, 2); +INSERT INTO sipHashKeyed_keys VALUES (4, 4); +SELECT hex(sipHash128ReferenceKeyed((key0, key1), 4::UInt64)) FROM sipHashKeyed_keys ORDER by key0; +DROP TABLE sipHashKeyed_keys; diff --git a/parser/testdata/02552_sparse_columns_intersect/ast.json b/parser/testdata/02552_sparse_columns_intersect/ast.json new file mode 100644 index 000000000..73cdf15c0 --- /dev/null +++ b/parser/testdata/02552_sparse_columns_intersect/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_sparse_intersect (children 1)" + }, + { + "explain": " Identifier t_sparse_intersect" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001231926, + "rows_read": 2, + "bytes_read": 88 + } +} diff --git a/parser/testdata/02552_sparse_columns_intersect/metadata.json b/parser/testdata/02552_sparse_columns_intersect/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02552_sparse_columns_intersect/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02552_sparse_columns_intersect/query.sql b/parser/testdata/02552_sparse_columns_intersect/query.sql new file mode 100644 index 000000000..cdad50583 --- /dev/null +++ b/parser/testdata/02552_sparse_columns_intersect/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS t_sparse_intersect; + +CREATE TABLE t_sparse_intersect (a UInt64, c Int64) ENGINE = MergeTree +ORDER BY tuple() SETTINGS ratio_of_defaults_for_sparse_serialization = 0.8; + +SYSTEM STOP MERGES t_sparse_intersect; + +INSERT INTO t_sparse_intersect SELECT if (number % 10 = 0, number, 0), number FROM numbers(1000); +INSERT INTO t_sparse_intersect SELECT number, number FROM numbers(1000); + +SELECT count() FROM (SELECT * FROM t_sparse_intersect EXCEPT SELECT * FROM t_sparse_intersect); +SELECT count() FROM (SELECT * FROM t_sparse_intersect INTERSECT SELECT * FROM t_sparse_intersect); + +DROP TABLE t_sparse_intersect; diff --git a/parser/testdata/02553_new_type_json_attach_partition/ast.json b/parser/testdata/02553_new_type_json_attach_partition/ast.json new file mode 100644 index 000000000..a48275fb9 --- /dev/null +++ b/parser/testdata/02553_new_type_json_attach_partition/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00098565, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02553_new_type_json_attach_partition/metadata.json b/parser/testdata/02553_new_type_json_attach_partition/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02553_new_type_json_attach_partition/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02553_new_type_json_attach_partition/query.sql b/parser/testdata/02553_new_type_json_attach_partition/query.sql new file mode 100644 index 000000000..c7f3a42f7 --- /dev/null +++ b/parser/testdata/02553_new_type_json_attach_partition/query.sql @@ -0,0 +1,15 @@ +SET enable_json_type = 1; + +DROP TABLE IF EXISTS t_json_attach_partition; + +CREATE TABLE t_json_attach_partition(b UInt64, c JSON) ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO t_json_attach_partition FORMAT JSONEachRow {"b": 1, "c" : {"k1": 1}}; + +ALTER TABLE t_json_attach_partition DETACH PARTITION tuple(); +INSERT INTO t_json_attach_partition FORMAT JSONEachRow {"b": 1, "c" : {"k1": [1, 2]}}; + +ALTER TABLE t_json_attach_partition ATTACH PARTITION tuple(); +SELECT * FROM t_json_attach_partition ORDER BY toString(c) FORMAT JSONEachRow; + +DROP TABLE t_json_attach_partition; diff --git a/parser/testdata/02554_fix_grouping_sets_predicate_push_down/ast.json b/parser/testdata/02554_fix_grouping_sets_predicate_push_down/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02554_fix_grouping_sets_predicate_push_down/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02554_fix_grouping_sets_predicate_push_down/metadata.json b/parser/testdata/02554_fix_grouping_sets_predicate_push_down/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02554_fix_grouping_sets_predicate_push_down/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02554_fix_grouping_sets_predicate_push_down/query.sql b/parser/testdata/02554_fix_grouping_sets_predicate_push_down/query.sql new file mode 100644 index 000000000..19261acbc --- /dev/null +++ b/parser/testdata/02554_fix_grouping_sets_predicate_push_down/query.sql @@ -0,0 +1,159 @@ +-- Tags: no-object-storage + +-- Specific value doesn't matter, we just need it to be fixed, because it is a part of `EXPLAIN PIPELINE` output. +SET max_threads = 8; + +DROP TABLE IF EXISTS test_grouping_sets_predicate; + +CREATE TABLE test_grouping_sets_predicate +( + day_ Date, + type_1 String +) +ENGINE=MergeTree +ORDER BY day_; + +INSERT INTO test_grouping_sets_predicate SELECT + toDate('2023-01-05') AS day_, + 'hello, world' +FROM numbers (10); + +SELECT '---Explain Syntax---'; +EXPLAIN SYNTAX +SELECT * +FROM +( + SELECT + day_, + if(type_1 = '', 'all', type_1) AS type_1 + FROM + ( + SELECT + day_, + type_1 + FROM test_grouping_sets_predicate + WHERE day_ = '2023-01-05' + GROUP BY + GROUPING SETS ( + (day_, type_1), + (day_)) + ) AS t +) +WHERE type_1 = 'all'; + +SELECT ''; +SELECT '---Explain Pipeline---'; +EXPLAIN PIPELINE +SELECT * +FROM +( + SELECT + day_, + if(type_1 = '', 'all', type_1) AS type_1 + FROM + ( + SELECT + day_, + type_1 + FROM test_grouping_sets_predicate + WHERE day_ = '2023-01-05' + GROUP BY + GROUPING SETS ( + (day_, type_1), + (day_)) + ) AS t +) +WHERE type_1 = 'all' settings enable_analyzer=0; + +-- Query plan with analyzer has less Filter steps (which is more optimal) +EXPLAIN PIPELINE +SELECT * +FROM +( + SELECT + day_, + if(type_1 = '', 'all', type_1) AS type_1 + FROM + ( + SELECT + day_, + type_1 + FROM test_grouping_sets_predicate + WHERE day_ = '2023-01-05' + GROUP BY + GROUPING SETS ( + (day_, type_1), + (day_)) + ) AS t +) +WHERE type_1 = 'all' settings enable_analyzer=1; + +SELECT ''; +SELECT '---Result---'; +SELECT * +FROM +( + SELECT + day_, + if(type_1 = '', 'all', type_1) AS type_1 + FROM + ( + SELECT + day_, + type_1 + FROM test_grouping_sets_predicate + WHERE day_ = '2023-01-05' + GROUP BY + GROUPING SETS ( + (day_, type_1), + (day_)) + ) AS t +) +WHERE type_1 = 'all'; + +SELECT ''; +SELECT '---Explain Pipeline---'; +EXPLAIN PIPELINE +SELECT * +FROM +( + SELECT + day_, + if(type_1 = '', 'all', type_1) AS type_1 + FROM + ( + SELECT + day_, + type_1 + FROM test_grouping_sets_predicate + GROUP BY + GROUPING SETS ( + (day_, type_1), + (day_)) + ) AS t +) +WHERE day_ = '2023-01-05' settings enable_analyzer=0; + +-- Query plan with analyzer has less Filter steps (which is more optimal) +EXPLAIN PIPELINE +SELECT * +FROM +( + SELECT + day_, + if(type_1 = '', 'all', type_1) AS type_1 + FROM + ( + SELECT + day_, + type_1 + FROM test_grouping_sets_predicate + GROUP BY + GROUPING SETS ( + (day_, type_1), + (day_)) + ) AS t +) +WHERE day_ = '2023-01-05' settings enable_analyzer=1; + +DROP TABLE test_grouping_sets_predicate; diff --git a/parser/testdata/02554_format_json_columns_for_empty/ast.json b/parser/testdata/02554_format_json_columns_for_empty/ast.json new file mode 100644 index 000000000..037d5187e --- /dev/null +++ b/parser/testdata/02554_format_json_columns_for_empty/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery json_columns (children 1)" + }, + { + "explain": " Identifier json_columns" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001028292, + "rows_read": 2, + "bytes_read": 76 + } +} diff --git a/parser/testdata/02554_format_json_columns_for_empty/metadata.json b/parser/testdata/02554_format_json_columns_for_empty/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02554_format_json_columns_for_empty/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02554_format_json_columns_for_empty/query.sql b/parser/testdata/02554_format_json_columns_for_empty/query.sql new file mode 100644 index 000000000..55179ebac --- /dev/null +++ b/parser/testdata/02554_format_json_columns_for_empty/query.sql @@ -0,0 +1,5 @@ +DROP TABLE IF EXISTS json_columns; + +CREATE TABLE json_columns (n UInt32, s String) ENGINE = MergeTree order by n; + +SELECT * FROM json_columns FORMAT JSONColumns; diff --git a/parser/testdata/02554_invalid_create_view_syntax/ast.json b/parser/testdata/02554_invalid_create_view_syntax/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02554_invalid_create_view_syntax/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02554_invalid_create_view_syntax/metadata.json b/parser/testdata/02554_invalid_create_view_syntax/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02554_invalid_create_view_syntax/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02554_invalid_create_view_syntax/query.sql b/parser/testdata/02554_invalid_create_view_syntax/query.sql new file mode 100644 index 000000000..ad6c83cde --- /dev/null +++ b/parser/testdata/02554_invalid_create_view_syntax/query.sql @@ -0,0 +1 @@ +CREATE VIEW X TO Y AS SELECT 1; -- { clientError SYNTAX_ERROR } diff --git a/parser/testdata/02554_log_faminy_support_storage_policy/ast.json b/parser/testdata/02554_log_faminy_support_storage_policy/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02554_log_faminy_support_storage_policy/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02554_log_faminy_support_storage_policy/metadata.json b/parser/testdata/02554_log_faminy_support_storage_policy/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02554_log_faminy_support_storage_policy/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02554_log_faminy_support_storage_policy/query.sql b/parser/testdata/02554_log_faminy_support_storage_policy/query.sql new file mode 100644 index 000000000..bdddf5e27 --- /dev/null +++ b/parser/testdata/02554_log_faminy_support_storage_policy/query.sql @@ -0,0 +1,27 @@ +-- Tags: no-fasttest, log-engine + +DROP TABLE IF EXISTS test_2554_log; +CREATE TABLE test_2554_log (n UInt32) ENGINE = Log SETTINGS storage_policy = 'default'; + +INSERT INTO test_2554_log SELECT 1; +SELECT * FROM test_2554_log; + +DROP TABLE test_2554_log; + +DROP TABLE IF EXISTS test_2554_tinylog; +CREATE TABLE test_2554_tinylog (n UInt32) ENGINE = Log SETTINGS storage_policy = 'default'; + +INSERT INTO test_2554_tinylog SELECT 1; +SELECT * FROM test_2554_tinylog; + +DROP TABLE test_2554_tinylog; + +DROP TABLE IF EXISTS test_2554_stripelog; +CREATE TABLE test_2554_stripelog (n UInt32) ENGINE = StripeLog SETTINGS storage_policy = 's3_cache'; + +INSERT INTO test_2554_stripelog SELECT 1; +SELECT * FROM test_2554_stripelog; + +DROP TABLE test_2554_stripelog; + +CREATE TABLE test_2554_error (n UInt32) ENGINE = Log SETTINGS disk = 'default', storage_policy = 'default'; -- { serverError INVALID_SETTING_VALUE } diff --git a/parser/testdata/02554_rewrite_count_distinct_if_with_count_distinct_implementation/ast.json b/parser/testdata/02554_rewrite_count_distinct_if_with_count_distinct_implementation/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02554_rewrite_count_distinct_if_with_count_distinct_implementation/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02554_rewrite_count_distinct_if_with_count_distinct_implementation/metadata.json b/parser/testdata/02554_rewrite_count_distinct_if_with_count_distinct_implementation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02554_rewrite_count_distinct_if_with_count_distinct_implementation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02554_rewrite_count_distinct_if_with_count_distinct_implementation/query.sql b/parser/testdata/02554_rewrite_count_distinct_if_with_count_distinct_implementation/query.sql new file mode 100644 index 000000000..a81f53c1e --- /dev/null +++ b/parser/testdata/02554_rewrite_count_distinct_if_with_count_distinct_implementation/query.sql @@ -0,0 +1,8 @@ +-- Tags: no-parallel +SELECT countDistinctIf(number % 10, number % 5 = 2) FROM numbers(1000); +EXPLAIN SYNTAX SELECT countDistinctIf(number % 10, number % 5 = 2) FROM numbers(1000); + +-- disable by default +SET rewrite_count_distinct_if_with_count_distinct_implementation = 1; +SELECT countDistinctIf(number % 10, number % 5 = 2) FROM numbers(1000); +EXPLAIN SYNTAX SELECT countDistinctIf(number % 10, number % 5 = 2) FROM numbers(1000); diff --git a/parser/testdata/02559_add_parts/ast.json b/parser/testdata/02559_add_parts/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02559_add_parts/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02559_add_parts/metadata.json b/parser/testdata/02559_add_parts/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02559_add_parts/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02559_add_parts/query.sql b/parser/testdata/02559_add_parts/query.sql new file mode 100644 index 000000000..9f4e85a32 --- /dev/null +++ b/parser/testdata/02559_add_parts/query.sql @@ -0,0 +1,20 @@ +-- Check MergeTree declaration in new format +CREATE TABLE check_system_tables + ( + name1 UInt8, + name2 UInt8, + name3 UInt8 + ) ENGINE = MergeTree() + ORDER BY name1 + PARTITION BY name2 + SAMPLE BY name1 + SETTINGS min_bytes_for_wide_part = 0, compress_marks=false, compress_primary_key=false; + +SELECT parts, active_parts,total_marks FROM system.tables WHERE name = 'check_system_tables' AND database = currentDatabase(); +INSERT INTO check_system_tables VALUES (1, 1, 1); +SELECT parts, active_parts,total_marks FROM system.tables WHERE name = 'check_system_tables' AND database = currentDatabase(); +INSERT INTO check_system_tables VALUES (1, 2, 1); +SELECT parts, active_parts,total_marks FROM system.tables WHERE name = 'check_system_tables' AND database = currentDatabase(); +ALTER TABLE check_system_tables DETACH PARTITION 1; +SELECT parts, active_parts,total_marks FROM system.tables WHERE name = 'check_system_tables' AND database = currentDatabase(); +DROP TABLE IF EXISTS check_system_tables; diff --git a/parser/testdata/02559_ip_types_bloom/ast.json b/parser/testdata/02559_ip_types_bloom/ast.json new file mode 100644 index 000000000..b0f257277 --- /dev/null +++ b/parser/testdata/02559_ip_types_bloom/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery ip_bloom (children 1)" + }, + { + "explain": " Identifier ip_bloom" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001097204, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/02559_ip_types_bloom/metadata.json b/parser/testdata/02559_ip_types_bloom/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02559_ip_types_bloom/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02559_ip_types_bloom/query.sql b/parser/testdata/02559_ip_types_bloom/query.sql new file mode 100644 index 000000000..b3fc16deb --- /dev/null +++ b/parser/testdata/02559_ip_types_bloom/query.sql @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS ip_bloom; + +CREATE TABLE ip_bloom +( + `a` UInt32, + `ip4` Nullable(IPv4), + `ip6` Nullable(IPv6), + INDEX x4 ip4 TYPE bloom_filter(0.1) GRANULARITY 3, + INDEX x6 ip6 TYPE bloom_filter(0.1) GRANULARITY 3 +) +ENGINE = MergeTree +ORDER BY a; + +INSERT INTO ip_bloom VALUES (1, '1.1.1.1', '::1'); + +SELECT * FROM ip_bloom; + +DROP TABLE ip_bloom; diff --git a/parser/testdata/02559_multiple_read_steps_in_prewhere/ast.json b/parser/testdata/02559_multiple_read_steps_in_prewhere/ast.json new file mode 100644 index 000000000..9c635f22d --- /dev/null +++ b/parser/testdata/02559_multiple_read_steps_in_prewhere/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_02559 (children 1)" + }, + { + "explain": " Identifier test_02559" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001196255, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/02559_multiple_read_steps_in_prewhere/metadata.json b/parser/testdata/02559_multiple_read_steps_in_prewhere/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02559_multiple_read_steps_in_prewhere/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02559_multiple_read_steps_in_prewhere/query.sql b/parser/testdata/02559_multiple_read_steps_in_prewhere/query.sql new file mode 100644 index 000000000..020b80cda --- /dev/null +++ b/parser/testdata/02559_multiple_read_steps_in_prewhere/query.sql @@ -0,0 +1,59 @@ +DROP TABLE IF EXISTS test_02559; + +CREATE TABLE test_02559 (id1 UInt64, id2 UInt64) ENGINE=MergeTree ORDER BY id1 SETTINGS min_bytes_for_wide_part = 0; + +INSERT INTO test_02559 SELECT number, number FROM numbers(10); + +DROP ROW POLICY IF EXISTS 02559_filter_1 ON test_02559; +DROP ROW POLICY IF EXISTS 02559_filter_2 ON test_02559; + +SET enable_multiple_prewhere_read_steps=true, move_all_conditions_to_prewhere=true; + +-- { echoOn } + +SELECT cast(id1 as UInt16) AS id16 FROM test_02559 PREWHERE id16 and (id2 % 40000) LIMIT 10; + +SELECT cast(id1 as UInt16) AS cond1, (id2 % 40000) AS cond2, (cond1 AND cond2) AS cond FROM test_02559 PREWHERE cond LIMIT 10; + +SELECT cast(id1 as UInt16) AS cond1, (if(id2 > 3, id2, NULL) % 40000) AS cond2, (cond1 AND cond2) AS cond FROM test_02559 PREWHERE cond LIMIT 10; + +SELECT cast(id1 as UInt16) AS cond1, (id2 % 40000) AS cond2, (cond1 AND cond2) AS cond FROM test_02559 PREWHERE cond AND id2 > 4 LIMIT 10; + +SELECT cast(id1 as UInt16) AS cond1, (id2 % 40000) AS cond2, (cond1 AND cond2) AS cond FROM test_02559 PREWHERE id2 > 5 AND cond LIMIT 10; + +SELECT cast(id1 as UInt16) AS cond1, (id2 % 40000) AS cond2, (cond1 AND cond2) AS cond FROM test_02559 PREWHERE cond1 AND id2 > 6 AND cond2 LIMIT 10; + +SELECT cast(id1 as UInt16) AS cond1 FROM test_02559 PREWHERE cond1 LIMIT 10; + +SELECT * FROM test_02559 PREWHERE id1 <= 3 AND id2 > 0 WHERE (id1 + id2 < 15) LIMIT 10; + +SELECT count() FROM test_02559 PREWHERE id2>=0 AND (1 OR ignore(id1)) WHERE ignore(id1)=0; + +SELECT count() FROM test_02559 PREWHERE ignore(id1); + +SELECT count() FROM test_02559 PREWHERE 1 OR ignore(id1); + +SELECT count() FROM test_02559 PREWHERE ignore(id1) AND id2 > 0; + +SELECT count() FROM test_02559 PREWHERE (1 OR ignore(id1)) AND id2 > 0; + +SELECT count() FROM test_02559 PREWHERE (id1 <= 10 AND id2 > 0) AND ignore(id1); + +SELECT count() FROM test_02559 PREWHERE ignore(id1) AND (id1 <= 10 AND id2 > 0); + +SELECT count() FROM test_02559 PREWHERE (id1 <= 10 AND id2 > 0) AND (1 OR ignore(id1)); + +SELECT count() FROM test_02559 PREWHERE (1 OR ignore(id1)) AND (id1 <= 10 AND id2 > 0); + +CREATE ROW POLICY 02559_filter_1 ON test_02559 USING id2=2 AS permissive TO ALL; +SELECT * FROM test_02559; + +CREATE ROW POLICY 02559_filter_2 ON test_02559 USING id2<=2 AS restrictive TO ALL; +SELECT * FROM test_02559; + +-- { echoOff } + +DROP ROW POLICY IF EXISTS 02559_filter_1 ON test_02559; +DROP ROW POLICY IF EXISTS 02559_filter_2 ON test_02559; + +DROP TABLE test_02559; diff --git a/parser/testdata/02559_multiple_read_steps_in_prewhere_fuzz/ast.json b/parser/testdata/02559_multiple_read_steps_in_prewhere_fuzz/ast.json new file mode 100644 index 000000000..508538e20 --- /dev/null +++ b/parser/testdata/02559_multiple_read_steps_in_prewhere_fuzz/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery test_02559__fuzz_20 (children 3)" + }, + { + "explain": " Identifier test_02559__fuzz_20" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration id1 (children 1)" + }, + { + "explain": " DataType Int16" + }, + { + "explain": " ColumnDeclaration id2 (children 1)" + }, + { + "explain": " DataType Decimal (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_18" + }, + { + "explain": " Literal UInt64_14" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Identifier id1" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001357018, + "rows_read": 14, + "bytes_read": 519 + } +} diff --git a/parser/testdata/02559_multiple_read_steps_in_prewhere_fuzz/metadata.json b/parser/testdata/02559_multiple_read_steps_in_prewhere_fuzz/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02559_multiple_read_steps_in_prewhere_fuzz/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02559_multiple_read_steps_in_prewhere_fuzz/query.sql b/parser/testdata/02559_multiple_read_steps_in_prewhere_fuzz/query.sql new file mode 100644 index 000000000..20f159cbe --- /dev/null +++ b/parser/testdata/02559_multiple_read_steps_in_prewhere_fuzz/query.sql @@ -0,0 +1,7 @@ +CREATE TABLE test_02559__fuzz_20(`id1` Int16, `id2` Decimal(18, 14)) ENGINE = MergeTree ORDER BY id1; + +INSERT INTO test_02559__fuzz_20 SELECT number, number FROM numbers(10); + +SET enable_multiple_prewhere_read_steps=true, move_all_conditions_to_prewhere=true; + +SELECT count() FROM test_02559__fuzz_20 PREWHERE (id2 >= 104) AND ((-9223372036854775808 OR (inf OR -2147483649 OR NULL) OR NULL) OR 1 OR ignore(ignore(id1) OR NULL, id1)) WHERE ignore(id1) = 0; diff --git a/parser/testdata/02559_multiple_read_steps_in_prewhere_missing_columns/ast.json b/parser/testdata/02559_multiple_read_steps_in_prewhere_missing_columns/ast.json new file mode 100644 index 000000000..d7a91bba9 --- /dev/null +++ b/parser/testdata/02559_multiple_read_steps_in_prewhere_missing_columns/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_02559 (children 1)" + }, + { + "explain": " Identifier test_02559" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001015066, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/02559_multiple_read_steps_in_prewhere_missing_columns/metadata.json b/parser/testdata/02559_multiple_read_steps_in_prewhere_missing_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02559_multiple_read_steps_in_prewhere_missing_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02559_multiple_read_steps_in_prewhere_missing_columns/query.sql b/parser/testdata/02559_multiple_read_steps_in_prewhere_missing_columns/query.sql new file mode 100644 index 000000000..f6299122e --- /dev/null +++ b/parser/testdata/02559_multiple_read_steps_in_prewhere_missing_columns/query.sql @@ -0,0 +1,30 @@ +DROP TABLE IF EXISTS test_02559; +CREATE TABLE test_02559 (x UInt8, s String) ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO test_02559 VALUES (1, 'Hello, world!'); + +ALTER TABLE test_02559 ADD COLUMN y UInt8 DEFAULT 0; +INSERT INTO test_02559 VALUES (2, 'Goodbye.', 3); +SELECT * FROM test_02559 ORDER BY x; + +SET enable_multiple_prewhere_read_steps=true, move_all_conditions_to_prewhere=true; + +-- { echoOn } +SELECT s FROM test_02559 PREWHERE x AND y ORDER BY s; +SELECT s, y FROM test_02559 PREWHERE y ORDER BY s; +SELECT s, y FROM test_02559 PREWHERE NOT y ORDER BY s; +SELECT s, y FROM test_02559 PREWHERE x AND y ORDER BY s; +SELECT s, y FROM test_02559 PREWHERE x AND NOT y ORDER BY s; +SELECT s, y FROM test_02559 PREWHERE y AND x ORDER BY s; +SELECT s, y FROM test_02559 PREWHERE (NOT y) AND x ORDER BY s; + +ALTER TABLE test_02559 ADD COLUMN z UInt8 DEFAULT 10; +INSERT INTO test_02559 VALUES (3, 'So long, and thanks for all the fish.', 42, 0); +SELECT * FROM test_02559 ORDER BY x; + +SELECT s FROM test_02559 PREWHERE z ORDER BY s; +SELECT s FROM test_02559 PREWHERE y AND z ORDER BY s; +SELECT s, z FROM test_02559 PREWHERE NOT y AND z ORDER BY s; +-- { echoOff } + +DROP TABLE test_02559; diff --git a/parser/testdata/02559_multiple_read_steps_in_prewhere_missing_columns_2/ast.json b/parser/testdata/02559_multiple_read_steps_in_prewhere_missing_columns_2/ast.json new file mode 100644 index 000000000..b15e3a9e0 --- /dev/null +++ b/parser/testdata/02559_multiple_read_steps_in_prewhere_missing_columns_2/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_02559 (children 1)" + }, + { + "explain": " Identifier t_02559" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001229552, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/02559_multiple_read_steps_in_prewhere_missing_columns_2/metadata.json b/parser/testdata/02559_multiple_read_steps_in_prewhere_missing_columns_2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02559_multiple_read_steps_in_prewhere_missing_columns_2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02559_multiple_read_steps_in_prewhere_missing_columns_2/query.sql b/parser/testdata/02559_multiple_read_steps_in_prewhere_missing_columns_2/query.sql new file mode 100644 index 000000000..0791d3a53 --- /dev/null +++ b/parser/testdata/02559_multiple_read_steps_in_prewhere_missing_columns_2/query.sql @@ -0,0 +1,26 @@ +DROP TABLE IF EXISTS t_02559; + +CREATE TABLE t_02559 ( + key UInt64, + value Array(String)) +ENGINE = MergeTree +ORDER BY key +SETTINGS index_granularity=400, min_bytes_for_wide_part=0; + +INSERT INTO t_02559 SELECT number, +if (number < 100 OR number > 1000, + [toString(number)], + emptyArrayString()) + FROM numbers(2000); + +SET enable_multiple_prewhere_read_steps=1, move_all_conditions_to_prewhere=1; + +SELECT * FROM t_02559 +WHERE (key < 5 OR key > 500) + AND NOT has(value, toString(key)) + AND length(value) == 1 +LIMIT 10 +SETTINGS max_block_size = 81, + max_threads = 1; + +DROP TABLE IF EXISTS t_02559; diff --git a/parser/testdata/02559_multiple_read_steps_in_prewhere_reuse_computation/ast.json b/parser/testdata/02559_multiple_read_steps_in_prewhere_reuse_computation/ast.json new file mode 100644 index 000000000..839489d25 --- /dev/null +++ b/parser/testdata/02559_multiple_read_steps_in_prewhere_reuse_computation/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_02559 (children 1)" + }, + { + "explain": " Identifier t_02559" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001271582, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/02559_multiple_read_steps_in_prewhere_reuse_computation/metadata.json b/parser/testdata/02559_multiple_read_steps_in_prewhere_reuse_computation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02559_multiple_read_steps_in_prewhere_reuse_computation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02559_multiple_read_steps_in_prewhere_reuse_computation/query.sql b/parser/testdata/02559_multiple_read_steps_in_prewhere_reuse_computation/query.sql new file mode 100644 index 000000000..544f5f03c --- /dev/null +++ b/parser/testdata/02559_multiple_read_steps_in_prewhere_reuse_computation/query.sql @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS t_02559; +CREATE TABLE t_02559 (a Int64, b Int64, c Int64) ENGINE = MergeTree ORDER BY a; + +INSERT INTO t_02559 SELECT number, number, number FROM numbers(3); + +SET enable_multiple_prewhere_read_steps = 1; + +-- { echoOn } + +SELECT a FROM t_02559 PREWHERE sin(a) < b AND sin(a) < c; +SELECT sin(a) > 2 FROM t_02559 PREWHERE sin(a) < b AND sin(a) < c; +SELECT sin(a) < a FROM t_02559 PREWHERE sin(a) < b AND sin(a) < c AND sin(a) > -a; +SELECT sin(a) < a FROM t_02559 PREWHERE sin(a) < b AND a <= c AND sin(a) > -a; + +-- {echoOff} + +DROP TABLE t_02559; diff --git a/parser/testdata/02559_nested_multiple_levels_default/ast.json b/parser/testdata/02559_nested_multiple_levels_default/ast.json new file mode 100644 index 000000000..fd96ed89d --- /dev/null +++ b/parser/testdata/02559_nested_multiple_levels_default/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery data_compact (children 1)" + }, + { + "explain": " Identifier data_compact" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00132537, + "rows_read": 2, + "bytes_read": 76 + } +} diff --git a/parser/testdata/02559_nested_multiple_levels_default/metadata.json b/parser/testdata/02559_nested_multiple_levels_default/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02559_nested_multiple_levels_default/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02559_nested_multiple_levels_default/query.sql b/parser/testdata/02559_nested_multiple_levels_default/query.sql new file mode 100644 index 000000000..9dcdab82a --- /dev/null +++ b/parser/testdata/02559_nested_multiple_levels_default/query.sql @@ -0,0 +1,31 @@ +DROP TABLE IF EXISTS data_compact; +DROP TABLE IF EXISTS data_memory; +DROP TABLE IF EXISTS data_wide; + +-- compact +DROP TABLE IF EXISTS data_compact; +CREATE TABLE data_compact +( + `root.array` Array(UInt8), +) +ENGINE = MergeTree() +ORDER BY tuple() +SETTINGS min_rows_for_wide_part=100, min_bytes_for_wide_part=1e9; +INSERT INTO data_compact VALUES ([0]); +ALTER TABLE data_compact ADD COLUMN root.nested_array Array(Array(UInt8)); +SELECT table, part_type FROM system.parts WHERE table = 'data_compact' AND database = currentDatabase(); +SELECT root.nested_array FROM data_compact; + +-- wide +DROP TABLE IF EXISTS data_wide; +CREATE TABLE data_wide +( + `root.array` Array(UInt8), +) +ENGINE = MergeTree() +ORDER BY tuple() +SETTINGS min_rows_for_wide_part=0, min_bytes_for_wide_part=0; +INSERT INTO data_wide VALUES ([0]); +ALTER TABLE data_wide ADD COLUMN root.nested_array Array(Array(UInt8)); +SELECT table, part_type FROM system.parts WHERE table = 'data_wide' AND database = currentDatabase(); +SELECT root.nested_array FROM data_wide; diff --git a/parser/testdata/02560_agg_state_deserialization_hash_table_crash/ast.json b/parser/testdata/02560_agg_state_deserialization_hash_table_crash/ast.json new file mode 100644 index 000000000..a36aef173 --- /dev/null +++ b/parser/testdata/02560_agg_state_deserialization_hash_table_crash/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tab (children 1)" + }, + { + "explain": " Identifier tab" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00128596, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/02560_agg_state_deserialization_hash_table_crash/metadata.json b/parser/testdata/02560_agg_state_deserialization_hash_table_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02560_agg_state_deserialization_hash_table_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02560_agg_state_deserialization_hash_table_crash/query.sql b/parser/testdata/02560_agg_state_deserialization_hash_table_crash/query.sql new file mode 100644 index 000000000..c9777ed31 --- /dev/null +++ b/parser/testdata/02560_agg_state_deserialization_hash_table_crash/query.sql @@ -0,0 +1,4 @@ +DROP TABLE IF EXISTS tab; +create table tab (d Int64, s AggregateFunction(groupUniqArrayArray, Array(UInt64)), c SimpleAggregateFunction(groupUniqArrayArray, Array(UInt64))) engine = SummingMergeTree() order by d; +INSERT INTO tab VALUES (1, 'このコー'); -- { error TOO_LARGE_ARRAY_SIZE } +DROP TABLE tab; diff --git a/parser/testdata/02560_analyzer_materialized_view/ast.json b/parser/testdata/02560_analyzer_materialized_view/ast.json new file mode 100644 index 000000000..6d7834aa2 --- /dev/null +++ b/parser/testdata/02560_analyzer_materialized_view/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001306266, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02560_analyzer_materialized_view/metadata.json b/parser/testdata/02560_analyzer_materialized_view/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02560_analyzer_materialized_view/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02560_analyzer_materialized_view/query.sql b/parser/testdata/02560_analyzer_materialized_view/query.sql new file mode 100644 index 000000000..3fdef366d --- /dev/null +++ b/parser/testdata/02560_analyzer_materialized_view/query.sql @@ -0,0 +1,41 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value String +) ENGINE=MergeTree ORDER BY id; + +DROP VIEW IF EXISTS test_materialized_view; +CREATE MATERIALIZED VIEW test_materialized_view +( + id UInt64, + value String +) ENGINE=MergeTree ORDER BY id AS SELECT id, value FROM test_table; + +INSERT INTO test_table VALUES (0, 'Value_0'); +SELECT id, value FROM test_materialized_view ORDER BY id; + +SELECT '--'; + +INSERT INTO test_table VALUES (1, 'Value_1'); +SELECT id, value FROM test_materialized_view ORDER BY id; + +DROP TABLE IF EXISTS test_table_data; +CREATE TABLE test_table_data +( + id UInt64, + value String +) ENGINE=MergeTree ORDER BY id; + +INSERT INTO test_table_data VALUES (2, 'Value_2'), (3, 'Value_3'); + +SELECT '--'; + +INSERT INTO test_table SELECT id, value FROM test_table_data; +SELECT id, value FROM test_materialized_view ORDER BY id; + +DROP TABLE test_table_data; +DROP VIEW test_materialized_view; +DROP TABLE test_table; diff --git a/parser/testdata/02560_count_digits/ast.json b/parser/testdata/02560_count_digits/ast.json new file mode 100644 index 000000000..b751dc74b --- /dev/null +++ b/parser/testdata/02560_count_digits/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function countDigits (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_0" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001282067, + "rows_read": 7, + "bytes_read": 264 + } +} diff --git a/parser/testdata/02560_count_digits/metadata.json b/parser/testdata/02560_count_digits/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02560_count_digits/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02560_count_digits/query.sql b/parser/testdata/02560_count_digits/query.sql new file mode 100644 index 000000000..19f5403bd --- /dev/null +++ b/parser/testdata/02560_count_digits/query.sql @@ -0,0 +1,22 @@ +SELECT countDigits(0); +SELECT countDigits(1); +SELECT countDigits(-1); +SELECT countDigits(12345); +SELECT countDigits(-12345); +SELECT countDigits(0xFFFFFFFFFFFFFFFF); +SELECT countDigits(CAST(0x8000000000000000 AS Int64)); +SELECT countDigits(CAST(-1 AS UInt128)); +SELECT countDigits(CAST(-1 AS UInt256)); +SELECT countDigits(CAST(CAST(-1 AS UInt128) DIV 2 + 1 AS Int128)); +SELECT countDigits(CAST(CAST(-1 AS UInt256) DIV 2 + 1 AS Int256)); + +SELECT countDigits(-123.45678::Decimal32(5)); +SELECT countDigits(-123.456789::Decimal64(6)); +SELECT countDigits(-123.4567890::Decimal128(7)); +SELECT countDigits(-123.45678901::Decimal256(8)); + +-- this behavior can be surprising, but actually reasonable: +SELECT countDigits(-123.456::Decimal32(5)); +SELECT countDigits(-123.4567::Decimal64(6)); +SELECT countDigits(-123.45678::Decimal128(7)); +SELECT countDigits(-123.456789::Decimal256(8)); diff --git a/parser/testdata/02560_null_as_default/ast.json b/parser/testdata/02560_null_as_default/ast.json new file mode 100644 index 000000000..d092d84f2 --- /dev/null +++ b/parser/testdata/02560_null_as_default/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001198217, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/02560_null_as_default/metadata.json b/parser/testdata/02560_null_as_default/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02560_null_as_default/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02560_null_as_default/query.sql b/parser/testdata/02560_null_as_default/query.sql new file mode 100644 index 000000000..1aaa2c41a --- /dev/null +++ b/parser/testdata/02560_null_as_default/query.sql @@ -0,0 +1,12 @@ +drop table if exists test; +create table test (x UInt64) engine=Memory(); +set insert_null_as_default=1; +insert into test select number % 2 ? NULL : 42 as x from numbers(2); +select * from test order by x; +drop table test; + +create table test (x LowCardinality(String) default 'Hello') engine=Memory(); +insert into test select (number % 2 ? NULL : 'World')::LowCardinality(Nullable(String)) from numbers(2); +select * from test order by x; +drop table test; + diff --git a/parser/testdata/02560_quantile_min_max/ast.json b/parser/testdata/02560_quantile_min_max/ast.json new file mode 100644 index 000000000..ace64eda9 --- /dev/null +++ b/parser/testdata/02560_quantile_min_max/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery nums (children 1)" + }, + { + "explain": " Identifier nums" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001320352, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/02560_quantile_min_max/metadata.json b/parser/testdata/02560_quantile_min_max/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02560_quantile_min_max/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02560_quantile_min_max/query.sql b/parser/testdata/02560_quantile_min_max/query.sql new file mode 100644 index 000000000..94a16508a --- /dev/null +++ b/parser/testdata/02560_quantile_min_max/query.sql @@ -0,0 +1,10 @@ +DROP TABLE IF EXISTS nums; + +CREATE TABLE nums(n UInt32) ENGINE = Memory; + +INSERT INTO nums VALUES (4),(2),(1),(3); + +SELECT quantilesExactExclusive(0.1, 0.9)(n) FROM nums; +SELECT quantilesExactInclusive(0, 1)(n) FROM nums; + +DROP TABLE nums; diff --git a/parser/testdata/02560_regexp_denial_of_service/ast.json b/parser/testdata/02560_regexp_denial_of_service/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02560_regexp_denial_of_service/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02560_regexp_denial_of_service/metadata.json b/parser/testdata/02560_regexp_denial_of_service/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02560_regexp_denial_of_service/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02560_regexp_denial_of_service/query.sql b/parser/testdata/02560_regexp_denial_of_service/query.sql new file mode 100644 index 000000000..3a02c12c6 --- /dev/null +++ b/parser/testdata/02560_regexp_denial_of_service/query.sql @@ -0,0 +1,58 @@ +-- Tags: no-fasttest, use-vectorscan + +DROP TABLE IF EXISTS t; + +-- test that the check which rejects hyperscan regexes with too big bounded repeats works + +-- {n} +SELECT multiMatchAny('test', ['.{51}']); -- { serverError HYPERSCAN_CANNOT_SCAN_TEXT } +SELECT multiMatchAny('test', ['.{ 51}']); -- { serverError HYPERSCAN_CANNOT_SCAN_TEXT } +SELECT multiMatchAny('test', ['.{51 }']); -- { serverError HYPERSCAN_CANNOT_SCAN_TEXT } +SELECT multiMatchAny('test', ['prefix.{51}']); -- { serverError HYPERSCAN_CANNOT_SCAN_TEXT } +SELECT multiMatchAny('test', ['.{51}.suffix']); -- { serverError HYPERSCAN_CANNOT_SCAN_TEXT } +SELECT multiMatchAny('test', ['.{4,4}midfix{51}']); -- { serverError HYPERSCAN_CANNOT_SCAN_TEXT } + +-- {n,} +SELECT multiMatchAny('test', ['.{51,}']); -- { serverError HYPERSCAN_CANNOT_SCAN_TEXT } +SELECT multiMatchAny('test', ['.{ 51,}']); -- { serverError HYPERSCAN_CANNOT_SCAN_TEXT } +SELECT multiMatchAny('test', ['.{51 ,}']); -- { serverError HYPERSCAN_CANNOT_SCAN_TEXT } +SELECT multiMatchAny('test', ['.{51, }']); -- { serverError HYPERSCAN_CANNOT_SCAN_TEXT } +SELECT multiMatchAny('test', ['prefix.{51,}']); -- { serverError HYPERSCAN_CANNOT_SCAN_TEXT } +SELECT multiMatchAny('test', ['.{51,}.suffix']); -- { serverError HYPERSCAN_CANNOT_SCAN_TEXT } +SELECT multiMatchAny('test', ['.{4,4}midfix{51,}']); -- { serverError HYPERSCAN_CANNOT_SCAN_TEXT } + +-- {n,m} +SELECT multiMatchAny('test', ['.{1,51}']); -- { serverError HYPERSCAN_CANNOT_SCAN_TEXT } +SELECT multiMatchAny('test', ['.{51,52}']); -- { serverError HYPERSCAN_CANNOT_SCAN_TEXT } +SELECT multiMatchAny('test', ['.{ 51,52}']); -- { serverError HYPERSCAN_CANNOT_SCAN_TEXT } +SELECT multiMatchAny('test', ['.{51 ,52}']); -- { serverError HYPERSCAN_CANNOT_SCAN_TEXT } +SELECT multiMatchAny('test', ['.{51, 52}']); -- { serverError HYPERSCAN_CANNOT_SCAN_TEXT } +SELECT multiMatchAny('test', ['.{51,52 }']); -- { serverError HYPERSCAN_CANNOT_SCAN_TEXT } +SELECT multiMatchAny('test', ['prefix.{1,51}']); -- { serverError HYPERSCAN_CANNOT_SCAN_TEXT } +SELECT multiMatchAny('test', ['.{1,51}.suffix']); -- { serverError HYPERSCAN_CANNOT_SCAN_TEXT } +SELECT multiMatchAny('test', ['.{4,4}midfix{1,51}']); -- { serverError HYPERSCAN_CANNOT_SCAN_TEXT } + +-- test that the check is implemented in all functions which use vectorscan + +CREATE TABLE t(c String) Engine=MergeTree() ORDER BY c; +INSERT INTO t VALUES('Hallo Welt'); + +SELECT multiMatchAny('Hallo Welt', ['.{51}']); -- { serverError HYPERSCAN_CANNOT_SCAN_TEXT } +SELECT multiMatchAny(c, ['.{51}']) FROM t; -- { serverError HYPERSCAN_CANNOT_SCAN_TEXT } + +SELECT multiMatchAnyIndex('Hallo Welt', ['.{51}']); -- { serverError HYPERSCAN_CANNOT_SCAN_TEXT } +SELECT multiMatchAnyIndex(c, ['.{51}']) FROM t; -- { serverError HYPERSCAN_CANNOT_SCAN_TEXT } + +SELECT multiMatchAllIndices('Hallo Welt', ['.{51}']); -- { serverError HYPERSCAN_CANNOT_SCAN_TEXT } +SELECT multiMatchAllIndices(c, ['.{51}']) FROM t; -- { serverError HYPERSCAN_CANNOT_SCAN_TEXT } + +SELECT multiFuzzyMatchAny('Hallo Welt', 1, ['.{51}']); -- { serverError HYPERSCAN_CANNOT_SCAN_TEXT } +SELECT multiFuzzyMatchAny(c, 1, ['.{51}']) FROM t; -- { serverError HYPERSCAN_CANNOT_SCAN_TEXT } + +SELECT multiFuzzyMatchAnyIndex('Hallo Welt', 1, ['.{51}']); -- { serverError HYPERSCAN_CANNOT_SCAN_TEXT } +SELECT multiFuzzyMatchAnyIndex(c, 1, ['.{51}']) FROM t; -- { serverError HYPERSCAN_CANNOT_SCAN_TEXT } + +SELECT multiFuzzyMatchAllIndices('Hallo Welt', 1, ['.{51}']); -- { serverError HYPERSCAN_CANNOT_SCAN_TEXT } +SELECT multiFuzzyMatchAllIndices(c, 1, ['.{51}']) FROM t; -- { serverError HYPERSCAN_CANNOT_SCAN_TEXT } + +DROP TABLE t; diff --git a/parser/testdata/02560_vertical_merge_memory_usage/ast.json b/parser/testdata/02560_vertical_merge_memory_usage/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02560_vertical_merge_memory_usage/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02560_vertical_merge_memory_usage/metadata.json b/parser/testdata/02560_vertical_merge_memory_usage/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02560_vertical_merge_memory_usage/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02560_vertical_merge_memory_usage/query.sql b/parser/testdata/02560_vertical_merge_memory_usage/query.sql new file mode 100644 index 000000000..f6763f1e5 --- /dev/null +++ b/parser/testdata/02560_vertical_merge_memory_usage/query.sql @@ -0,0 +1,13 @@ +-- Tags: no-object-storage +drop table if exists tvm; +create table tvm (c0 UInt64, c1 UInt64, c2 UInt64, c3 UInt64, c4 UInt64, c5 UInt64, c6 UInt64, c7 UInt64, c8 UInt64, c9 UInt64, c10 UInt64, c11 UInt64, c12 UInt64, c13 UInt64, c14 UInt64, c15 UInt64, c16 UInt64, c17 UInt64, c18 UInt64, c19 UInt64, c20 UInt64, c21 UInt64, c22 UInt64, c23 UInt64, c24 UInt64, c25 UInt64, c26 UInt64, c27 UInt64, c28 UInt64, c29 UInt64, c30 UInt64, c31 UInt64, c32 UInt64, c33 UInt64, c34 UInt64, c35 UInt64, c36 UInt64, c37 UInt64, c38 UInt64, c39 UInt64, c40 UInt64, c41 UInt64, c42 UInt64, c43 UInt64, c44 UInt64, c45 UInt64, c46 UInt64, c47 UInt64, c48 UInt64, c49 UInt64, c50 UInt64, c51 UInt64, c52 UInt64, c53 UInt64, c54 UInt64, c55 UInt64, c56 UInt64, c57 UInt64, c58 UInt64, c59 UInt64, c60 UInt64, c61 UInt64, c62 UInt64, c63 UInt64, c64 UInt64, c65 UInt64, c66 UInt64, c67 UInt64, c68 UInt64, c69 UInt64, c70 UInt64, c71 UInt64, c72 UInt64, c73 UInt64, c74 UInt64, c75 UInt64, c76 UInt64, c77 UInt64, c78 UInt64, c79 UInt64, c80 UInt64, c81 UInt64, c82 UInt64, c83 UInt64, c84 UInt64, c85 UInt64, c86 UInt64, c87 UInt64, c88 UInt64, c89 UInt64, c90 UInt64, c91 UInt64, c92 UInt64, c93 UInt64, c94 UInt64, c95 UInt64, c96 UInt64, c97 UInt64, c98 UInt64, c99 UInt64, c100 UInt64, c101 UInt64, c102 UInt64, c103 UInt64, c104 UInt64, c105 UInt64, c106 UInt64, c107 UInt64, c108 UInt64, c109 UInt64, c110 UInt64, c111 UInt64, c112 UInt64, c113 UInt64, c114 UInt64, c115 UInt64, c116 UInt64, c117 UInt64, c118 UInt64, c119 UInt64, c120 UInt64, c121 UInt64, c122 UInt64, c123 UInt64, c124 UInt64, c125 UInt64, c126 UInt64, c127 UInt64, c128 UInt64, c129 UInt64, c130 UInt64, c131 UInt64, c132 UInt64, c133 UInt64, c134 UInt64, c135 UInt64, c136 UInt64, c137 UInt64, c138 UInt64, c139 UInt64, c140 UInt64, c141 UInt64, c142 UInt64, c143 UInt64, c144 UInt64, c145 UInt64, c146 UInt64, c147 UInt64, c148 UInt64, c149 UInt64, c150 UInt64, c151 UInt64, c152 UInt64, c153 UInt64, c154 UInt64, c155 UInt64, c156 UInt64, c157 UInt64, c158 UInt64, c159 UInt64, c160 UInt64, c161 UInt64, c162 UInt64, c163 UInt64, c164 UInt64, c165 UInt64, c166 UInt64, c167 UInt64, c168 UInt64, c169 UInt64, c170 UInt64, c171 UInt64, c172 UInt64, c173 UInt64, c174 UInt64, c175 UInt64, c176 UInt64, c177 UInt64, c178 UInt64, c179 UInt64, c180 UInt64, c181 UInt64, c182 UInt64, c183 UInt64, c184 UInt64, c185 UInt64, c186 UInt64, c187 UInt64, c188 UInt64, c189 UInt64, c190 UInt64, c191 UInt64, c192 UInt64, c193 UInt64, c194 UInt64, c195 UInt64, c196 UInt64, c197 UInt64, c198 UInt64, c199 UInt64, c200 UInt64, c201 UInt64, c202 UInt64, c203 UInt64, c204 UInt64, c205 UInt64, c206 UInt64, c207 UInt64, c208 UInt64, c209 UInt64, c210 UInt64, c211 UInt64, c212 UInt64, c213 UInt64, c214 UInt64, c215 UInt64, c216 UInt64, c217 UInt64, c218 UInt64, c219 UInt64, c220 UInt64, c221 UInt64, c222 UInt64, c223 UInt64, c224 UInt64, c225 UInt64, c226 UInt64, c227 UInt64, c228 UInt64, c229 UInt64, c230 UInt64, c231 UInt64, c232 UInt64, c233 UInt64, c234 UInt64, c235 UInt64, c236 UInt64, c237 UInt64, c238 UInt64, c239 UInt64, c240 UInt64, c241 UInt64, c242 UInt64, c243 UInt64, c244 UInt64, c245 UInt64, c246 UInt64, c247 UInt64, c248 UInt64, c249 UInt64, c250 UInt64, c251 UInt64, c252 UInt64, c253 UInt64, c254 UInt64, c255 UInt64, c256 UInt64, c257 UInt64, c258 UInt64, c259 UInt64, c260 UInt64, c261 UInt64, c262 UInt64, c263 UInt64, c264 UInt64, c265 UInt64, c266 UInt64, c267 UInt64, c268 UInt64, c269 UInt64, c270 UInt64, c271 UInt64, c272 UInt64, c273 UInt64, c274 UInt64, c275 UInt64, c276 UInt64, c277 UInt64, c278 UInt64, c279 UInt64, c280 UInt64, c281 UInt64, c282 UInt64, c283 UInt64, c284 UInt64, c285 UInt64, c286 UInt64, c287 UInt64, c288 UInt64, c289 UInt64, c290 UInt64, c291 UInt64, c292 UInt64, c293 UInt64, c294 UInt64, c295 UInt64, c296 UInt64, c297 UInt64, c298 UInt64, c299 UInt64) engine = MergeTree order by tuple() settings min_rows_for_wide_part = 10, min_bytes_for_wide_part=0, vertical_merge_algorithm_min_rows_to_activate=1, max_merge_delayed_streams_for_parallel_write=1000; + +insert into tvm select number + 0, number + 1, number + 2, number + 3, number + 4, number + 5, number + 6, number + 7, number + 8, number + 9, number + 10, number + 11, number + 12, number + 13, number + 14, number + 15, number + 16, number + 17, number + 18, number + 19, number + 20, number + 21, number + 22, number + 23, number + 24, number + 25, number + 26, number + 27, number + 28, number + 29, number + 30, number + 31, number + 32, number + 33, number + 34, number + 35, number + 36, number + 37, number + 38, number + 39, number + 40, number + 41, number + 42, number + 43, number + 44, number + 45, number + 46, number + 47, number + 48, number + 49, number + 50, number + 51, number + 52, number + 53, number + 54, number + 55, number + 56, number + 57, number + 58, number + 59, number + 60, number + 61, number + 62, number + 63, number + 64, number + 65, number + 66, number + 67, number + 68, number + 69, number + 70, number + 71, number + 72, number + 73, number + 74, number + 75, number + 76, number + 77, number + 78, number + 79, number + 80, number + 81, number + 82, number + 83, number + 84, number + 85, number + 86, number + 87, number + 88, number + 89, number + 90, number + 91, number + 92, number + 93, number + 94, number + 95, number + 96, number + 97, number + 98, number + 99, number + 100, number + 101, number + 102, number + 103, number + 104, number + 105, number + 106, number + 107, number + 108, number + 109, number + 110, number + 111, number + 112, number + 113, number + 114, number + 115, number + 116, number + 117, number + 118, number + 119, number + 120, number + 121, number + 122, number + 123, number + 124, number + 125, number + 126, number + 127, number + 128, number + 129, number + 130, number + 131, number + 132, number + 133, number + 134, number + 135, number + 136, number + 137, number + 138, number + 139, number + 140, number + 141, number + 142, number + 143, number + 144, number + 145, number + 146, number + 147, number + 148, number + 149, number + 150, number + 151, number + 152, number + 153, number + 154, number + 155, number + 156, number + 157, number + 158, number + 159, number + 160, number + 161, number + 162, number + 163, number + 164, number + 165, number + 166, number + 167, number + 168, number + 169, number + 170, number + 171, number + 172, number + 173, number + 174, number + 175, number + 176, number + 177, number + 178, number + 179, number + 180, number + 181, number + 182, number + 183, number + 184, number + 185, number + 186, number + 187, number + 188, number + 189, number + 190, number + 191, number + 192, number + 193, number + 194, number + 195, number + 196, number + 197, number + 198, number + 199, number + 200, number + 201, number + 202, number + 203, number + 204, number + 205, number + 206, number + 207, number + 208, number + 209, number + 210, number + 211, number + 212, number + 213, number + 214, number + 215, number + 216, number + 217, number + 218, number + 219, number + 220, number + 221, number + 222, number + 223, number + 224, number + 225, number + 226, number + 227, number + 228, number + 229, number + 230, number + 231, number + 232, number + 233, number + 234, number + 235, number + 236, number + 237, number + 238, number + 239, number + 240, number + 241, number + 242, number + 243, number + 244, number + 245, number + 246, number + 247, number + 248, number + 249, number + 250, number + 251, number + 252, number + 253, number + 254, number + 255, number + 256, number + 257, number + 258, number + 259, number + 260, number + 261, number + 262, number + 263, number + 264, number + 265, number + 266, number + 267, number + 268, number + 269, number + 270, number + 271, number + 272, number + 273, number + 274, number + 275, number + 276, number + 277, number + 278, number + 279, number + 280, number + 281, number + 282, number + 283, number + 284, number + 285, number + 286, number + 287, number + 288, number + 289, number + 290, number + 291, number + 292, number + 293, number + 294, number + 295, number + 296, number + 297, number + 298, number + 299 from numbers(20); + +optimize table tvm final; + +system flush logs part_log; +-- should be about 4MB +select formatReadableSize(peak_memory_usage), * from system.part_log where table = 'tvm' and database = currentDatabase() and event_date >= today() - 1 and event_type = 'MergeParts' and peak_memory_usage > 100_000_000 format Vertical; + +drop table tvm; diff --git a/parser/testdata/02560_window_ntile/ast.json b/parser/testdata/02560_window_ntile/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02560_window_ntile/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02560_window_ntile/metadata.json b/parser/testdata/02560_window_ntile/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02560_window_ntile/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02560_window_ntile/query.sql b/parser/testdata/02560_window_ntile/query.sql new file mode 100644 index 000000000..d0e4d557e --- /dev/null +++ b/parser/testdata/02560_window_ntile/query.sql @@ -0,0 +1,28 @@ +-- { echo } + +-- Normal cases +select a, b, ntile(3) over (partition by a order by b rows between unbounded preceding and unbounded following) from(select intDiv(number,10) as a, number%10 as b from numbers(20)); +select a, b, ntile(3) over (partition by a order by b) from(select intDiv(number,10) as a, number%10 as b from numbers(20)); +select a, b, ntile(2) over (partition by a order by b) from(select intDiv(number,10) as a, number%10 as b from numbers(20)); +select a, b, ntile(1) over (partition by a order by b) from(select intDiv(number,10) as a, number%10 as b from numbers(20)); +select a, b, ntile(100) over (partition by a order by b) from(select intDiv(number,10) as a, number%10 as b from numbers(20)); +select a, b, ntile(65535) over (partition by a order by b) from (select 1 as a, number as b from numbers(65535)) limit 100; + + + +-- Bad arguments +select a, b, ntile(3.0) over (partition by a order by b) from(select intDiv(number,10) as a, number%10 as b from numbers(20)); -- { serverError BAD_ARGUMENTS } +select a, b, ntile('2') over (partition by a order by b) from(select intDiv(number,10) as a, number%10 as b from numbers(20)); -- { serverError BAD_ARGUMENTS } +select a, b, ntile(0) over (partition by a order by b) from(select intDiv(number,10) as a, number%10 as b from numbers(20)); -- { serverError BAD_ARGUMENTS } +select a, b, ntile(-2) over (partition by a order by b) from(select intDiv(number,10) as a, number%10 as b from numbers(20)); -- { serverError BAD_ARGUMENTS } +select a, b, ntile(b + 1) over (partition by a order by b) from(select intDiv(number,10) as a, number%10 as b from numbers(20)); -- { serverError BAD_ARGUMENTS } +select a, b, ntile() over (partition by a order by b) from(select intDiv(number,10) as a, number%10 as b from numbers(20)); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +select a, b, ntile(3, 2) over (partition by a order by b) from(select intDiv(number,10) as a, number%10 as b from numbers(20)); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +-- Bad window type +select a, b, ntile(2) over (partition by a) from(select intDiv(number,10) as a, number%10 as b from numbers(20)); -- { serverError BAD_ARGUMENTS } +select a, b, ntile(2) over (partition by a order by b rows between 4 preceding and unbounded following) from(select intDiv(number,10) as a, number%10 as b from numbers(20)); -- { serverError BAD_ARGUMENTS } +select a, b, ntile(2) over (partition by a order by b rows between unbounded preceding and 4 following) from(select intDiv(number,10) as a, number%10 as b from numbers(20)); -- { serverError BAD_ARGUMENTS } +select a, b, ntile(2) over (partition by a order by b rows between 4 preceding and 4 following) from(select intDiv(number,10) as a, number%10 as b from numbers(20));; -- { serverError BAD_ARGUMENTS } +select a, b, ntile(2) over (partition by a order by b rows between current row and 4 following) from(select intDiv(number,10) as a, number%10 as b from numbers(20));; -- { serverError BAD_ARGUMENTS } +select a, b, ntile(2) over (partition by a order by b range unbounded preceding) from(select intDiv(number,10) as a, number%10 as b from numbers(20));; -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/02560_with_fill_int256_int/ast.json b/parser/testdata/02560_with_fill_int256_int/ast.json new file mode 100644 index 000000000..b85ff9547 --- /dev/null +++ b/parser/testdata/02560_with_fill_int256_int/ast.json @@ -0,0 +1,88 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function multiply (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal 'Int128'" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 3)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Literal UInt64_8" + } + ], + + "rows": 22, + + "statistics": + { + "elapsed": 0.001442098, + "rows_read": 22, + "bytes_read": 825 + } +} diff --git a/parser/testdata/02560_with_fill_int256_int/metadata.json b/parser/testdata/02560_with_fill_int256_int/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02560_with_fill_int256_int/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02560_with_fill_int256_int/query.sql b/parser/testdata/02560_with_fill_int256_int/query.sql new file mode 100644 index 000000000..42647f109 --- /dev/null +++ b/parser/testdata/02560_with_fill_int256_int/query.sql @@ -0,0 +1,9 @@ +SELECT (number * 2)::Int128 FROM numbers(10) ORDER BY 1 ASC WITH FILL FROM 3 TO 8; +SELECT (number * 2)::Int256 FROM numbers(10) ORDER BY 1 ASC WITH FILL FROM 3 TO 8; +SELECT (number * 2)::UInt128 FROM numbers(10) ORDER BY 1 ASC WITH FILL FROM 3 TO 8; +SELECT (number * 2)::UInt256 FROM numbers(10) ORDER BY 1 ASC WITH FILL FROM 3 TO 8; + +SELECT (number * 2)::Int128 FROM numbers(10) ORDER BY 1 ASC WITH FILL FROM -3 TO 5; +SELECT (number * 2)::Int256 FROM numbers(10) ORDER BY 1 ASC WITH FILL FROM -3 TO 5; +SELECT (number * 2)::UInt128 FROM numbers(10) ORDER BY 1 ASC WITH FILL FROM -3 TO 5; -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT (number * 2)::UInt256 FROM numbers(10) ORDER BY 1 ASC WITH FILL FROM -3 TO 5; -- { serverError ARGUMENT_OUT_OF_BOUND } diff --git a/parser/testdata/02561_sorting_constants_and_distinct_crash/ast.json b/parser/testdata/02561_sorting_constants_and_distinct_crash/ast.json new file mode 100644 index 000000000..53f45fb18 --- /dev/null +++ b/parser/testdata/02561_sorting_constants_and_distinct_crash/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_table (children 1)" + }, + { + "explain": " Identifier test_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001396672, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/02561_sorting_constants_and_distinct_crash/metadata.json b/parser/testdata/02561_sorting_constants_and_distinct_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02561_sorting_constants_and_distinct_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02561_sorting_constants_and_distinct_crash/query.sql b/parser/testdata/02561_sorting_constants_and_distinct_crash/query.sql new file mode 100644 index 000000000..93c10dce5 --- /dev/null +++ b/parser/testdata/02561_sorting_constants_and_distinct_crash/query.sql @@ -0,0 +1,28 @@ +drop table if exists test_table; +CREATE TABLE test_table (string_value String) ENGINE = MergeTree ORDER BY string_value SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +system stop merges test_table; +insert into test_table select * from ( + select 'test_value_1' + from numbers_mt(250000) + union all + select 'test_value_2' + from numbers_mt(2000000) +) +order by rand(); + +select distinct + 'constant_1' as constant_value, + count(*) over(partition by constant_value, string_value) as value_cnt +from ( + select string_value + from test_table +) +order by all; + +select distinct + 'constant_1' as constant_value, * + from (select string_value from test_table) + ORDER BY constant_value, string_value settings max_threads=1; + +system start merges test_table; +drop table test_table; diff --git a/parser/testdata/02561_with_fill_date_datetime_incompatible/ast.json b/parser/testdata/02561_with_fill_date_datetime_incompatible/ast.json new file mode 100644 index 000000000..827629b1f --- /dev/null +++ b/parser/testdata/02561_with_fill_date_datetime_incompatible/ast.json @@ -0,0 +1,40 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function today (alias a) (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 6, + + "statistics": + { + "elapsed": 0.00127604, + "rows_read": 6, + "bytes_read": 225 + } +} diff --git a/parser/testdata/02561_with_fill_date_datetime_incompatible/metadata.json b/parser/testdata/02561_with_fill_date_datetime_incompatible/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02561_with_fill_date_datetime_incompatible/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02561_with_fill_date_datetime_incompatible/query.sql b/parser/testdata/02561_with_fill_date_datetime_incompatible/query.sql new file mode 100644 index 000000000..ed634d66c --- /dev/null +++ b/parser/testdata/02561_with_fill_date_datetime_incompatible/query.sql @@ -0,0 +1,2 @@ +SELECT today() AS a +ORDER BY a ASC WITH FILL FROM now() - toIntervalMonth(1) TO now() + toIntervalDay(1) STEP 82600; -- { serverError INVALID_WITH_FILL_EXPRESSION } diff --git a/parser/testdata/02562_regexp_extract/ast.json b/parser/testdata/02562_regexp_extract/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02562_regexp_extract/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02562_regexp_extract/metadata.json b/parser/testdata/02562_regexp_extract/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02562_regexp_extract/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02562_regexp_extract/query.sql b/parser/testdata/02562_regexp_extract/query.sql new file mode 100644 index 000000000..1ebc9d46c --- /dev/null +++ b/parser/testdata/02562_regexp_extract/query.sql @@ -0,0 +1,62 @@ +-- { echoOn } +select regexpExtract('100-200', '(\\d+)-(\\d+)', 1); +select regexpExtract('100-200', '(\\d+)-(\\d+)'); +select regexpExtract('100-200', '(\\d+)-(\\d+)', 2); +select regexpExtract('100-200', '(\\d+)-(\\d+)', 0); +select regexpExtract('100-200', '(\\d+).*', 1); +select regexpExtract('100-200', '([a-z])', 1); +select regexpExtract(null, '([a-z])', 1); +select regexpExtract('100-200', null, 1); +select regexpExtract('100-200', '([a-z])', null); + +select REGEXP_EXTRACT('100-200', '(\\d+)-(\\d+)', 1); +select REGEXP_EXTRACT('100-200', '(\\d+)-(\\d+)'); +select REGEXP_EXTRACT('100-200', '(\\d+)-(\\d+)', 0); + +select regexpExtract('0123456789', '(\d+)(\d+)', 0); +select regexpExtract('0123456789', '(\d+)(\d+)', 1); +select regexpExtract('0123456789', '(\d+)(\d+)', 2); + +select regexpExtract(materialize('100-200'), '(\\d+)-(\\d+)'); +select regexpExtract(materialize('100-200'), '(\\d+)-(\\d+)', 1); +select regexpExtract(materialize('100-200'), '(\\d+)-(\\d+)', 2); +select regexpExtract(materialize('100-200'), '(\\d+).*', 1); +select regexpExtract(materialize('100-200'), '([a-z])', 1); +select regexpExtract(materialize(null), '([a-z])', 1); +select regexpExtract(materialize('100-200'), null, 1); +select regexpExtract(materialize('100-200'), '([a-z])', null); + +select regexpExtract('100-200', '(\\d+)-(\\d+)', materialize(1)); +select regexpExtract('100-200', '(\\d+)-(\\d+)', materialize(2)); +select regexpExtract('100-200', '(\\d+).*', materialize(1)); +select regexpExtract('100-200', '([a-z])', materialize(1)); +select regexpExtract(null, '([a-z])', materialize(1)); +select regexpExtract('100-200', null, materialize(1)); +select regexpExtract('100-200', '([a-z])', materialize(null)); + +select regexpExtract(materialize('100-200'), '(\\d+)-(\\d+)', materialize(1)); +select regexpExtract(materialize('100-200'), '(\\d+)-(\\d+)', materialize(2)); +select regexpExtract(materialize('100-200'), '(\\d+).*', materialize(1)); +select regexpExtract(materialize('100-200'), '([a-z])', materialize(1)); +select regexpExtract(materialize(null), '([a-z])', materialize(1)); +select regexpExtract(materialize('100-200'), null, materialize(1)); +select regexpExtract(materialize('100-200'), '([a-z])', materialize(null)); +select regexpExtract('100-200', '(\\d+)-(\\d+)', number) from numbers(3); +select regexpExtract(materialize('100-200'), '(\\d+)-(\\d+)', number) from numbers(3); +select regexpExtract(number::String || '-' || (2*number)::String, '(\\d+)-(\\d+)', 1) from numbers(3); +select regexpExtract(number::String || '-' || (2*number)::String, '(\\d+)-(\\d+)', number%3) from numbers(5); +select regexpExtract('100-200100-200', '(\\d+)-(\\d+)(\\d+)-(\\d+)', materialize(3)); + +select regexpExtract('100-200'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +select regexpExtract('100-200', '(\\d+)-(\\d+)', 1, 2); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +select regexpExtract(cast('100-200' as FixedString(10)), '(\\d+)-(\\d+)', 1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select regexpExtract('100-200', cast('(\\d+)-(\\d+)' as FixedString(20)), 1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select regexpExtract('100-200', '(\\d+)-(\\d+)', 'a'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select regexpExtract(100, '(\\d+)-(\\d+)', 1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select regexpExtract('100-200', 1, 1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select regexpExtract('100-200', materialize('(\\d+)-(\\d+)'), 1); -- { serverError ILLEGAL_COLUMN } +select regexpExtract('100-200', '(\\d+)-(\\d+)', 3); -- { serverError INDEX_OF_POSITIONAL_ARGUMENT_IS_OUT_OF_RANGE } +select regexpExtract('100-200', '(\\d+)-(\\d+)', -1); -- { serverError INDEX_OF_POSITIONAL_ARGUMENT_IS_OUT_OF_RANGE } +select regexpExtract('100-200', '\\d+-\\d+', 0); +select regexpExtract('100-200', '\\d+-\\d+', 1);-- { serverError INDEX_OF_POSITIONAL_ARGUMENT_IS_OUT_OF_RANGE } +-- { echoOff } diff --git a/parser/testdata/02562_with_fill_nullable/ast.json b/parser/testdata/02562_with_fill_nullable/ast.json new file mode 100644 index 000000000..388ed7131 --- /dev/null +++ b/parser/testdata/02562_with_fill_nullable/ast.json @@ -0,0 +1,97 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toNullable (alias d) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function plus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '2023-02-09'" + }, + { + "explain": " Literal 'Date'" + }, + { + "explain": " Function multiply (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier d" + } + ], + + "rows": 25, + + "statistics": + { + "elapsed": 0.001264225, + "rows_read": 25, + "bytes_read": 995 + } +} diff --git a/parser/testdata/02562_with_fill_nullable/metadata.json b/parser/testdata/02562_with_fill_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02562_with_fill_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02562_with_fill_nullable/query.sql b/parser/testdata/02562_with_fill_nullable/query.sql new file mode 100644 index 000000000..d2ca09e1a --- /dev/null +++ b/parser/testdata/02562_with_fill_nullable/query.sql @@ -0,0 +1,4 @@ +SELECT toNullable('2023-02-09'::Date + number * 10) AS d FROM numbers(2) ORDER BY d WITH FILL; +SELECT '---'; +SELECT number % 2 ? NULL : toNullable('2023-02-09'::Date + number) AS d FROM numbers(5) ORDER BY d ASC NULLS LAST WITH FILL; +-- TODO: NULLS FIRST does not work correctly with FILL. diff --git a/parser/testdata/02563_analyzer_merge/ast.json b/parser/testdata/02563_analyzer_merge/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02563_analyzer_merge/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02563_analyzer_merge/metadata.json b/parser/testdata/02563_analyzer_merge/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02563_analyzer_merge/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02563_analyzer_merge/query.sql b/parser/testdata/02563_analyzer_merge/query.sql new file mode 100644 index 000000000..6c252c227 --- /dev/null +++ b/parser/testdata/02563_analyzer_merge/query.sql @@ -0,0 +1,83 @@ +-- Tags: no-parallel + +SET enable_analyzer = 1; + +DROP DATABASE IF EXISTS 02563_db; +CREATE DATABASE 02563_db; + +DROP TABLE IF EXISTS 02563_db.test_merge_table_1; +CREATE TABLE 02563_db.test_merge_table_1 +( + id UInt64, + value String +) ENGINE=MergeTree ORDER BY id; + +INSERT INTO 02563_db.test_merge_table_1 VALUES (0, 'Value_0'); + +DROP TABLE IF EXISTS 02563_db.test_merge_table_2; +CREATE TABLE 02563_db.test_merge_table_2 +( + id UInt64, + value String +) ENGINE=MergeTree ORDER BY id; + +INSERT INTO 02563_db.test_merge_table_2 VALUES (1, 'Value_1'); + +DROP TABLE IF EXISTS 02563_db.test_merge_table; +CREATE TABLE 02563_db.test_merge_table +( + id UInt64, + value String +) ENGINE=Merge(02563_db, '^test_merge_table'); + +SELECT id, value, _database, _table FROM 02563_db.test_merge_table ORDER BY id; + +DROP TABLE 02563_db.test_merge_table; +DROP TABLE 02563_db.test_merge_table_1; +DROP TABLE 02563_db.test_merge_table_2; + +CREATE TABLE 02563_db.t_1 +( + timestamp DateTime64(9), + a String, + b String +) +ENGINE = MergeTree +PARTITION BY formatDateTime(toStartOfMinute(timestamp), '%Y%m%d%H', 'UTC') +ORDER BY (timestamp, a, b); + +CREATE TABLE 02563_db.dist_t_1 (timestamp DateTime64(9), a String, b String) ENGINE = Distributed('test_shard_localhost', '02563_db', 't_1'); + +CREATE TABLE 02563_db.m ENGINE = Merge('02563_db', '^dist_'); + +INSERT INTO 02563_db.t_1 (timestamp, a, b) +select + addMinutes(toDateTime64('2024-07-13 22:00:00', 9, 'UTC'), number), + randomString(5), + randomString(5) +from numbers(30); + +INSERT INTO 02563_db.t_1 (timestamp, a, b) +select + addMinutes(toDateTime64('2024-07-13 23:00:00', 9, 'UTC'), number), + randomString(5), + randomString(5) +from numbers(30); + +INSERT INTO 02563_db.t_1 (timestamp, a, b) +select + addMinutes(toDateTime64('2024-07-14 00:00:00', 9, 'UTC'), number), + randomString(5), + randomString(5) +from numbers(100); + + +SELECT '91138316-5127-45ac-9c25-4ad8779777b4', + count() +FROM 02563_db.m; + +DROP TABLE 02563_db.t_1; +DROP TABLE 02563_db.dist_t_1; +DROP TABLE 02563_db.m; + +DROP DATABASE 02563_db; diff --git a/parser/testdata/02564_analyzer_cross_to_inner/ast.json b/parser/testdata/02564_analyzer_cross_to_inner/ast.json new file mode 100644 index 000000000..ea3bd927f --- /dev/null +++ b/parser/testdata/02564_analyzer_cross_to_inner/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001329537, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02564_analyzer_cross_to_inner/metadata.json b/parser/testdata/02564_analyzer_cross_to_inner/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02564_analyzer_cross_to_inner/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02564_analyzer_cross_to_inner/query.sql b/parser/testdata/02564_analyzer_cross_to_inner/query.sql new file mode 100644 index 000000000..09cba098f --- /dev/null +++ b/parser/testdata/02564_analyzer_cross_to_inner/query.sql @@ -0,0 +1,92 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS t3; +DROP TABLE IF EXISTS t4; +DROP TABLE IF EXISTS t5; + +CREATE TABLE t1 (a UInt64, b UInt64) ENGINE = Log; +INSERT INTO t1 VALUES (1, 2), (3, 4), (5, 6); + +CREATE TABLE t2 (a UInt64, b UInt64) ENGINE = Log; +INSERT INTO t2 VALUES (3, 4), (5, 6), (7, 8); + +CREATE TABLE t3 (a UInt64, b UInt64) ENGINE = Log; +INSERT INTO t3 VALUES (5, 6), (7, 8), (9, 10); + +CREATE TABLE t4 (a UInt64, b UInt64) ENGINE = Log; +INSERT INTO t4 VALUES (7, 8), (9, 10), (11, 12); + +CREATE TABLE t5 (a UInt64, b UInt64) ENGINE = Log; +INSERT INTO t5 VALUES (9, 10), (11, 12), (13, 14); + +SET cross_to_inner_join_rewrite = 1; + +SELECT * FROM t1, t2, (SELECT a as x from t3 where a + 1 = b ) as t3 +WHERE t1.a = if(t2.b > 0, t2.a, 0) AND t2.a = t3.x AND 1 +; + +SELECT * FROM t1, t2, (SELECT a as x from t3 where a + 1 = b ) as t3 +WHERE t1.a = if(t2.b > 0, t2.a, 0) +ORDER BY t1.a, t2.a, t3.x +; + +SELECT * FROM t1, t2, t3, t4, t5 +WHERE t2.a = t3.a AND t1.b = t5.b; + +SELECT * FROM t1, t2, t3, t4, t5 +WHERE t2.a = t3.a AND t1.b = t5.b AND t4.a = t5.a; + +SELECT * FROM t1, t2, t3, t4, t5 +WHERE t1.a = t3.a AND t3.b = t4.b AND t1.a = t4.a AND t2.a = t5.a; + +SELECT * FROM t1, t2, t3, t4, t5 +WHERE t1.a = t2.a AND t1.a = t3.a AND t1.a = t4.a AND t1.a = t5.a +AND t2.a = t3.a AND t2.a = t4.a AND t2.a = t5.a +AND t3.a = t4.a AND t3.a = t5.a +AND t4.a = t5.a; + +-- { echoOn } + +EXPLAIN QUERY TREE +SELECT * FROM t1, t2, (SELECT a as x from t3 where a + 1 = b ) as t3 +WHERE t1.a = if(t2.b > 0, t2.a, 0) AND t2.a = t3.x AND 1; + +EXPLAIN QUERY TREE +SELECT * FROM t1, t2, (SELECT a as x from t3 where a + 1 = b ) as t3 +WHERE t1.a = if(t2.b > 0, t2.a, 0) AND t2.a = t3.x AND 1 +SETTINGS cross_to_inner_join_rewrite = 0; + +EXPLAIN QUERY TREE +SELECT * FROM t1, t2, (SELECT a as x from t3 where a + 1 = b ) as t3 +WHERE t1.a = if(t2.b > 0, t2.a, 0); + +EXPLAIN QUERY TREE dump_ast=1 +SELECT * FROM t1, t2, t3, t4, t5 +WHERE t2.a = t3.a AND t1.b = t5.b; + +EXPLAIN QUERY TREE dump_ast=1 +SELECT * FROM t1, t2, t3, t4, t5 +WHERE t2.a = t3.a AND t1.b = t5.b AND t4.a = t5.a; + +EXPLAIN QUERY TREE dump_ast=1 +SELECT * FROM t1, t2, t3, t4, t5 +WHERE t1.a = t3.a AND t3.b = t4.b AND t1.a = t4.a AND t2.a = t5.a; + +EXPLAIN QUERY TREE dump_ast=1 +SELECT * FROM t1, t2, t3, t4, t5 +WHERE t1.a = t2.a AND t1.a = t3.a AND t1.a = t4.a AND t1.a = t5.a +AND t2.a = t3.a AND t2.a = t4.a AND t2.a = t5.a +AND t3.a = t4.a AND t3.a = t5.a +AND t4.a = t5.a; + +-- { echoOff } + +SELECT * FROM t1, t2, (SELECT a as x from t3 where a + 1 = b ) as t3 +WHERE t1.a = if(t2.b > 0, t2.a, 0) +SETTINGS cross_to_inner_join_rewrite = 2; -- { serverError INCORRECT_QUERY } + +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS t3; diff --git a/parser/testdata/02564_analyzer_ssb_cross_to_inner/ast.json b/parser/testdata/02564_analyzer_ssb_cross_to_inner/ast.json new file mode 100644 index 000000000..50c341788 --- /dev/null +++ b/parser/testdata/02564_analyzer_ssb_cross_to_inner/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00117163, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02564_analyzer_ssb_cross_to_inner/metadata.json b/parser/testdata/02564_analyzer_ssb_cross_to_inner/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02564_analyzer_ssb_cross_to_inner/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02564_analyzer_ssb_cross_to_inner/query.sql b/parser/testdata/02564_analyzer_ssb_cross_to_inner/query.sql new file mode 100644 index 000000000..f09b6a1d6 --- /dev/null +++ b/parser/testdata/02564_analyzer_ssb_cross_to_inner/query.sql @@ -0,0 +1,106 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS customer; +DROP TABLE IF EXISTS part; +DROP TABLE IF EXISTS supplier; +DROP TABLE IF EXISTS lineorder; +DROP TABLE IF EXISTS date; + +CREATE TABLE customer +( + C_CUSTKEY UInt32, + C_NAME String, + C_ADDRESS String, + C_CITY LowCardinality(String), + C_NATION LowCardinality(String), + C_REGION LowCardinality(String), + C_PHONE String, + C_MKTSEGMENT LowCardinality(String) +) +ENGINE = MergeTree ORDER BY (C_CUSTKEY); + +CREATE TABLE lineorder +( + LO_ORDERKEY UInt32, + LO_LINENUMBER UInt8, + LO_CUSTKEY UInt32, + LO_PARTKEY UInt32, + LO_SUPPKEY UInt32, + LO_ORDERDATE Date, + LO_ORDERPRIORITY LowCardinality(String), + LO_SHIPPRIORITY UInt8, + LO_QUANTITY UInt8, + LO_EXTENDEDPRICE UInt32, + LO_ORDTOTALPRICE UInt32, + LO_DISCOUNT UInt8, + LO_REVENUE UInt32, + LO_SUPPLYCOST UInt32, + LO_TAX UInt8, + LO_COMMITDATE Date, + LO_SHIPMODE LowCardinality(String) +) +ENGINE = MergeTree PARTITION BY toYear(LO_ORDERDATE) ORDER BY (LO_ORDERDATE, LO_ORDERKEY); + +CREATE TABLE part +( + P_PARTKEY UInt32, + P_NAME String, + P_MFGR LowCardinality(String), + P_CATEGORY LowCardinality(String), + P_BRAND LowCardinality(String), + P_COLOR LowCardinality(String), + P_TYPE LowCardinality(String), + P_SIZE UInt8, + P_CONTAINER LowCardinality(String) +) +ENGINE = MergeTree ORDER BY P_PARTKEY; + +CREATE TABLE supplier +( + S_SUPPKEY UInt32, + S_NAME String, + S_ADDRESS String, + S_CITY LowCardinality(String), + S_NATION LowCardinality(String), + S_REGION LowCardinality(String), + S_PHONE String +) +ENGINE = MergeTree ORDER BY S_SUPPKEY; + +CREATE TABLE date +( + D_DATEKEY Date, + D_DATE FixedString(18), + D_DAYOFWEEK LowCardinality(String), + D_MONTH LowCardinality(String), + D_YEAR UInt16, + D_YEARMONTHNUM UInt32, + D_YEARMONTH LowCardinality(FixedString(7)), + D_DAYNUMINWEEK UInt8, + D_DAYNUMINMONTH UInt8, + D_DAYNUMINYEAR UInt16, + D_MONTHNUMINYEAR UInt8, + D_WEEKNUMINYEAR UInt8, + D_SELLINGSEASON String, + D_LASTDAYINWEEKFL UInt8, + D_LASTDAYINMONTHFL UInt8, + D_HOLIDAYFL UInt8, + D_WEEKDAYFL UInt8 +) +ENGINE = MergeTree ORDER BY D_DATEKEY; + +set cross_to_inner_join_rewrite = 2; + +EXPLAIN QUERY TREE dump_ast=1 +select D_YEARMONTHNUM, S_CITY, P_BRAND, sum(LO_REVENUE - LO_SUPPLYCOST) as profit +from date, customer, supplier, part, lineorder +where LO_CUSTKEY = C_CUSTKEY + and LO_SUPPKEY = S_SUPPKEY + and LO_PARTKEY = P_PARTKEY + and LO_ORDERDATE = D_DATEKEY + and S_NATION = 'UNITED KINGDOM' + and P_CATEGORY = 'MFGR#21' + and (LO_QUANTITY between 34 and 44) + and (LO_ORDERDATE between toDate('1996-01-01') and toDate('1996-12-31')) +group by D_YEARMONTHNUM, S_CITY, P_BRAND +order by D_YEARMONTHNUM, S_CITY, P_BRAND; diff --git a/parser/testdata/02564_date_format/ast.json b/parser/testdata/02564_date_format/ast.json new file mode 100644 index 000000000..53108a5e5 --- /dev/null +++ b/parser/testdata/02564_date_format/ast.json @@ -0,0 +1,70 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function DATE_FORMAT (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDateTime (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '2018-01-02 22:33:44'" + }, + { + "explain": " Literal '%a'" + }, + { + "explain": " Function DATE_FORMAT (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDate32 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '2018-01-02'" + }, + { + "explain": " Literal '%a'" + } + ], + + "rows": 16, + + "statistics": + { + "elapsed": 0.001448259, + "rows_read": 16, + "bytes_read": 626 + } +} diff --git a/parser/testdata/02564_date_format/metadata.json b/parser/testdata/02564_date_format/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02564_date_format/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02564_date_format/query.sql b/parser/testdata/02564_date_format/query.sql new file mode 100644 index 000000000..1a1a1b7aa --- /dev/null +++ b/parser/testdata/02564_date_format/query.sql @@ -0,0 +1,30 @@ +SELECT DATE_FORMAT(toDateTime('2018-01-02 22:33:44'), '%a'), DATE_FORMAT(toDate32('2018-01-02'), '%a'); +SELECT DATE_FORMAT(toDateTime('2018-01-02 22:33:44'), '%b'), DATE_FORMAT(toDate32('2018-01-02'), '%b'); +SELECT DATE_FORMAT(toDateTime('2018-01-02 22:33:44'), '%c'), DATE_FORMAT(toDate32('2018-01-02'), '%c'); +SELECT DATE_FORMAT(toDateTime('2018-01-02 22:33:44'), '%C'), DATE_FORMAT(toDate32('2018-01-02'), '%C'); +SELECT DATE_FORMAT(toDateTime('2018-01-02 22:33:44'), '%d'), DATE_FORMAT(toDate32('2018-01-02'), '%d'); +SELECT DATE_FORMAT(toDateTime('2018-01-02 22:33:44'), '%D'), DATE_FORMAT(toDate32('2018-01-02'), '%D'); +SELECT DATE_FORMAT(toDateTime('2018-01-02 22:33:44'), '%e'), DATE_FORMAT(toDate32('2018-01-02'), '%e'); +SELECT DATE_FORMAT(toDateTime('2018-01-02 22:33:44'), '%F'), DATE_FORMAT(toDate32('2018-01-02'), '%F'); +SELECT DATE_FORMAT(toDateTime('2018-01-02 22:33:44'), '%h'), DATE_FORMAT(toDate32('2018-01-02'), '%h'); +SELECT DATE_FORMAT(toDateTime('2018-01-02 22:33:44'), '%H'), DATE_FORMAT(toDate32('2018-01-02'), '%H'); +SELECT DATE_FORMAT(toDateTime('2018-01-02 02:33:44'), '%H'); +SELECT DATE_FORMAT(toDateTime('2018-01-02 22:33:44'), '%i'), DATE_FORMAT(toDate32('2018-01-02'), '%i'); +SELECT DATE_FORMAT(toDateTime('2018-01-02 22:33:44'), '%I'), DATE_FORMAT(toDate32('2018-01-02'), '%I'); +SELECT DATE_FORMAT(toDateTime('2018-01-02 11:33:44'), '%I'); +SELECT DATE_FORMAT(toDateTime('2018-01-02 00:33:44'), '%I'); +SELECT DATE_FORMAT(toDateTime('2018-01-01 00:33:44'), '%j'), DATE_FORMAT(toDate32('2018-01-01'), '%j'); +SELECT DATE_FORMAT(toDateTime('2000-12-31 00:33:44'), '%j'), DATE_FORMAT(toDate32('2000-12-31'), '%j'); +SELECT DATE_FORMAT(toDateTime('2000-12-31 00:33:44'), '%k'), DATE_FORMAT(toDate32('2000-12-31'), '%k'); +SELECT DATE_FORMAT(toDateTime('2018-01-02 22:33:44'), '%m'), DATE_FORMAT(toDate32('2018-01-02'), '%m'); +SELECT DATE_FORMAT(toDateTime('2018-01-02 22:33:44'), '%M'), DATE_FORMAT(toDate32('2018-01-02'), '%M'); +SELECT DATE_FORMAT(toDateTime('2018-01-02 22:33:44'), '%n'), DATE_FORMAT(toDate32('2018-01-02'), '%n'); +SELECT DATE_FORMAT(toDateTime('2018-01-02 00:33:44'), '%p'), DATE_FORMAT(toDate32('2018-01-02'), '%p'); +SELECT DATE_FORMAT(toDateTime('2018-01-02 11:33:44'), '%p'); +SELECT DATE_FORMAT(toDateTime('2018-01-02 12:33:44'), '%p'); +SELECT DATE_FORMAT(toDateTime('2018-01-02 22:33:44'), '%r'), DATE_FORMAT(toDate32('2018-01-02'), '%r'); +SELECT DATE_FORMAT(toDateTime('2018-01-02 22:33:44'), '%R'), DATE_FORMAT(toDate32('2018-01-02'), '%R'); +SELECT DATE_FORMAT(toDateTime('2018-01-02 22:33:44'), '%S'), DATE_FORMAT(toDate32('2018-01-02'), '%S'); +SELECT DATE_FORMAT(toDateTime('2018-01-02 22:33:44'), '%t'), DATE_FORMAT(toDate32('2018-01-02'), '%t'); +SELECT DATE_FORMAT(toDateTime('2018-01-02 22:33:44'), '%T'), DATE_FORMAT(toDate32('2018-01-02'), '%T'); +SELECT DATE_FORMAT(toDateTime('2018-01-02 22:33:44'), '%W'), DATE_FORMAT(toDate32('2018-01-02'), '%W'); diff --git a/parser/testdata/02564_read_in_order_final_desc/ast.json b/parser/testdata/02564_read_in_order_final_desc/ast.json new file mode 100644 index 000000000..5354c5307 --- /dev/null +++ b/parser/testdata/02564_read_in_order_final_desc/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001132385, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02564_read_in_order_final_desc/metadata.json b/parser/testdata/02564_read_in_order_final_desc/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02564_read_in_order_final_desc/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02564_read_in_order_final_desc/query.sql b/parser/testdata/02564_read_in_order_final_desc/query.sql new file mode 100644 index 000000000..c1a8ba907 --- /dev/null +++ b/parser/testdata/02564_read_in_order_final_desc/query.sql @@ -0,0 +1,39 @@ +SET optimize_read_in_order = 1; +DROP TABLE IF EXISTS mytable; + +CREATE TABLE mytable +( + timestamp UInt64, + insert_timestamp UInt64, + key UInt64, + value Float64 +) ENGINE = ReplacingMergeTree(insert_timestamp) + PRIMARY KEY (key, timestamp) + ORDER BY (key, timestamp); + +INSERT INTO mytable (timestamp, insert_timestamp, key, value) VALUES (1900000010000, 1675159000000, 5, 555), (1900000010000, 1675159770000, 5, -1), (1900000020000, 1675159770000, 5, -0.0002), (1900000030000, 1675159770000, 5, 0), (1900000020000, 1675159700000, 5, 555), (1900000040000, 1675159770000, 5, 0.05), (1900000050000, 1675159770000, 5, 1); + +SELECT timestamp, value +FROM mytable FINAL +WHERE key = 5 +ORDER BY timestamp DESC; + +SELECT if(explain like '%ReadType: InOrder%', 'Ok', 'Error: ' || explain) FROM ( + EXPLAIN PLAN actions = 1 + SELECT timestamp, value + FROM mytable FINAL + WHERE key = 5 + ORDER BY timestamp SETTINGS enable_vertical_final = 0 +) WHERE explain like '%ReadType%'; + + +SELECT if(explain like '%ReadType: Default%', 'Ok', 'Error: ' || explain) FROM ( + EXPLAIN PLAN actions = 1 + SELECT timestamp, value + FROM mytable FINAL + WHERE key = 5 + ORDER BY timestamp DESC +) WHERE explain like '%ReadType%'; + + +DROP TABLE IF EXISTS mytable; diff --git a/parser/testdata/02565_analyzer_limit_settings/ast.json b/parser/testdata/02565_analyzer_limit_settings/ast.json new file mode 100644 index 000000000..15c5a74ec --- /dev/null +++ b/parser/testdata/02565_analyzer_limit_settings/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001108047, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02565_analyzer_limit_settings/metadata.json b/parser/testdata/02565_analyzer_limit_settings/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02565_analyzer_limit_settings/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02565_analyzer_limit_settings/query.sql b/parser/testdata/02565_analyzer_limit_settings/query.sql new file mode 100644 index 000000000..1dd6735e6 --- /dev/null +++ b/parser/testdata/02565_analyzer_limit_settings/query.sql @@ -0,0 +1,30 @@ +SET enable_analyzer = 1; + +-- { echoOn } +SET limit = 0; + +SELECT * FROM numbers(10); +SELECT * FROM numbers(10) SETTINGS limit=5, offset=2; +SELECT count(*) FROM (SELECT * FROM numbers(10)); +SELECT count(*) FROM (SELECT * FROM numbers(10) SETTINGS limit=5); +SELECT count(*) FROM (SELECT * FROM numbers(10)) SETTINGS limit=5; +SELECT count(*) FROM view(SELECT * FROM numbers(10)); +SELECT count(*) FROM view(SELECT * FROM numbers(10) SETTINGS limit=5); +SELECT count(*) FROM view(SELECT * FROM numbers(10)) SETTINGS limit=5; + +SET limit = 3; +SELECT * FROM numbers(10); +SELECT * FROM numbers(10) SETTINGS limit=5, offset=2; +SELECT count(*) FROM (SELECT * FROM numbers(10)); +SELECT count(*) FROM (SELECT * FROM numbers(10) SETTINGS limit=5); +SELECT count(*) FROM (SELECT * FROM numbers(10)) SETTINGS limit=5; +SELECT count(*) FROM view(SELECT * FROM numbers(10)); +SELECT count(*) FROM view(SELECT * FROM numbers(10) SETTINGS limit=5); +SELECT count(*) FROM view(SELECT * FROM numbers(10)) SETTINGS limit=5; + +SET limit = 4; +SET offset = 1; +SELECT * FROM numbers(10); +SELECT * FROM numbers(10) LIMIT 3 OFFSET 2; +SELECT * FROM numbers(10) LIMIT 5 OFFSET 2; +-- { echoOff } diff --git a/parser/testdata/02565_update_empty_nested/ast.json b/parser/testdata/02565_update_empty_nested/ast.json new file mode 100644 index 000000000..23077e91e --- /dev/null +++ b/parser/testdata/02565_update_empty_nested/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_update_empty_nested (children 1)" + }, + { + "explain": " Identifier t_update_empty_nested" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00105935, + "rows_read": 2, + "bytes_read": 94 + } +} diff --git a/parser/testdata/02565_update_empty_nested/metadata.json b/parser/testdata/02565_update_empty_nested/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02565_update_empty_nested/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02565_update_empty_nested/query.sql b/parser/testdata/02565_update_empty_nested/query.sql new file mode 100644 index 000000000..333168476 --- /dev/null +++ b/parser/testdata/02565_update_empty_nested/query.sql @@ -0,0 +1,22 @@ +DROP TABLE IF EXISTS t_update_empty_nested; + +CREATE TABLE t_update_empty_nested +( + `id` UInt32, + `nested.arr1` Array(UInt64), +) +ENGINE = MergeTree +ORDER BY id +SETTINGS min_bytes_for_wide_part = 0, index_granularity = 8192, index_granularity_bytes = '10Mi'; + +SET mutations_sync = 2; + +INSERT INTO t_update_empty_nested SELECT 1, range(number % 10) FROM numbers(100000); +ALTER TABLE t_update_empty_nested ADD COLUMN `nested.arr2` Array(UInt64); + +ALTER TABLE t_update_empty_nested UPDATE `nested.arr2` = `nested.arr1` WHERE 1; + +SELECT * FROM t_update_empty_nested FORMAT Null; +SELECT sum(length(nested.arr1)), sum(length(nested.arr2)) FROM t_update_empty_nested; + +DROP TABLE t_update_empty_nested; diff --git a/parser/testdata/02566_analyzer_limit_settings_distributed/ast.json b/parser/testdata/02566_analyzer_limit_settings_distributed/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02566_analyzer_limit_settings_distributed/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02566_analyzer_limit_settings_distributed/metadata.json b/parser/testdata/02566_analyzer_limit_settings_distributed/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02566_analyzer_limit_settings_distributed/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02566_analyzer_limit_settings_distributed/query.sql b/parser/testdata/02566_analyzer_limit_settings_distributed/query.sql new file mode 100644 index 000000000..a2620f436 --- /dev/null +++ b/parser/testdata/02566_analyzer_limit_settings_distributed/query.sql @@ -0,0 +1,34 @@ +-- Tags: distributed + +SET enable_analyzer = 1; + +SELECT 'limit', * FROM remote('127.1', view(SELECT * FROM numbers(10))) SETTINGS limit=5; +SELECT 'offset', * FROM remote('127.1', view(SELECT * FROM numbers(10))) SETTINGS offset=5; + +SELECT + 'limit w/ GROUP BY', + count(), + number +FROM remote('127.{1,2}', view( + SELECT intDiv(number, 2) AS number + FROM numbers(10) +)) +GROUP BY number +ORDER BY + count() ASC, + number DESC +SETTINGS limit=2; + +SELECT + 'limit/offset w/ GROUP BY', + count(), + number +FROM remote('127.{1,2}', view( + SELECT intDiv(number, 2) AS number + FROM numbers(10) +)) +GROUP BY number +ORDER BY + count() ASC, + number DESC +SETTINGS limit=2, offset=2; diff --git a/parser/testdata/02567_and_consistency/ast.json b/parser/testdata/02567_and_consistency/ast.json new file mode 100644 index 000000000..2bc8cf35d --- /dev/null +++ b/parser/testdata/02567_and_consistency/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toBool (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function sin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function SUM (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001317147, + "rows_read": 11, + "bytes_read": 440 + } +} diff --git a/parser/testdata/02567_and_consistency/metadata.json b/parser/testdata/02567_and_consistency/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02567_and_consistency/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02567_and_consistency/query.sql b/parser/testdata/02567_and_consistency/query.sql new file mode 100644 index 000000000..0442a6dad --- /dev/null +++ b/parser/testdata/02567_and_consistency/query.sql @@ -0,0 +1,94 @@ +SELECT toBool(sin(SUM(number))) AS x +FROM +( + SELECT 1 AS number +) +GROUP BY number +HAVING 1 AND sin(sum(number)) +ORDER BY ALL +SETTINGS enable_optimize_predicate_expression = 0; + +SELECT '====='; + +SELECT toBool(sin(SUM(number))) AS x +FROM +( + SELECT 1 AS number +) +GROUP BY number +HAVING 1 AND sin(1) +ORDER BY ALL +SETTINGS enable_optimize_predicate_expression = 0; + +SELECT '====='; + +SELECT toBool(sin(SUM(number))) AS x +FROM +( + SELECT 1 AS number +) +GROUP BY number +HAVING x AND sin(sum(number)) +ORDER BY ALL +SETTINGS enable_optimize_predicate_expression = 1; + +SELECT '====='; + +SELECT toBool(sin(SUM(number))) AS x +FROM +( + SELECT 1 AS number +) +GROUP BY number +HAVING 1 AND sin(sum(number)) +ORDER BY ALL +SETTINGS enable_optimize_predicate_expression = 0; + +SELECT '====='; + +SELECT 1 and sin(1); + +SELECT '====='; + +SELECT 'enable_analyzer'; + +SET enable_analyzer = 1; + +SELECT toBool(sin(SUM(number))) AS x +FROM +( + SELECT 1 AS number +) +GROUP BY number +HAVING 1 AND sin(sum(number)) +ORDER BY ALL +SETTINGS enable_optimize_predicate_expression = 1; + +select '#45440'; + +DROP TABLE IF EXISTS t2; +CREATE TABLE t2(c0 Int32) ENGINE = MergeTree ORDER BY c0; +INSERT INTO t2 VALUES (928386547), (1541944097), (2086579505), (1990427322), (-542998757), (390253678), (554855248), (203290629), (1504693323); + +SELECT + MAX(left.c0), + min2(left.c0, -(-left.c0) * (radians(left.c0) - radians(left.c0))) AS g, + (((-1925024212 IS NOT NULL) IS NOT NULL) != radians(tan(1216286224))) AND cos(lcm(MAX(left.c0), -1966575216) OR (MAX(left.c0) * 1180517420)) AS h, + NOT h, + h IS NULL +FROM t2 AS left +GROUP BY g +ORDER BY g DESC; + +SELECT '='; + +SELECT MAX(left.c0), min2(left.c0, -(-left.c0) * (radians(left.c0) - radians(left.c0))) as g, (((-1925024212 IS NOT NULL) IS NOT NULL) != radians(tan(1216286224))) AND cos(lcm(MAX(left.c0), -1966575216) OR (MAX(left.c0) * 1180517420)) as h, not h, h is null + FROM t2 AS left + GROUP BY g HAVING h ORDER BY g DESC SETTINGS enable_optimize_predicate_expression = 0; +SELECT '='; + +SELECT MAX(left.c0), min2(left.c0, -(-left.c0) * (radians(left.c0) - radians(left.c0))) as g, (((-1925024212 IS NOT NULL) IS NOT NULL) != radians(tan(1216286224))) AND cos(lcm(MAX(left.c0), -1966575216) OR (MAX(left.c0) * 1180517420)) as h, not h, h is null + FROM t2 AS left + GROUP BY g HAVING h ORDER BY g DESC SETTINGS enable_optimize_predicate_expression = 1; + +DROP TABLE IF EXISTS t2; diff --git a/parser/testdata/02568_and_consistency/ast.json b/parser/testdata/02568_and_consistency/ast.json new file mode 100644 index 000000000..3e2f605f6 --- /dev/null +++ b/parser/testdata/02568_and_consistency/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001045475, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/02568_and_consistency/metadata.json b/parser/testdata/02568_and_consistency/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02568_and_consistency/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02568_and_consistency/query.sql b/parser/testdata/02568_and_consistency/query.sql new file mode 100644 index 000000000..4e76da784 --- /dev/null +++ b/parser/testdata/02568_and_consistency/query.sql @@ -0,0 +1,42 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (c0 Int32, PRIMARY KEY (c0)) ENGINE=MergeTree; +INSERT INTO t1 VALUES (1554690688); + +select '='; + +SELECT MIN(t1.c0) +FROM t1 +GROUP BY + (-sign(cos(t1.c0))) * (-max2(t1.c0, t1.c0 / t1.c0)), + t1.c0 * t1.c0, + sign(-exp(-t1.c0)) +HAVING -(-(MIN(t1.c0) + MIN(t1.c0))) AND (pow('{b' > '-657301241', log(-1004522121)) IS NOT NULL) +UNION ALL +SELECT MIN(t1.c0) +FROM t1 +GROUP BY + (-sign(cos(t1.c0))) * (-max2(t1.c0, t1.c0 / t1.c0)), + t1.c0 * t1.c0, + sign(-exp(-t1.c0)) +HAVING NOT (-(-(MIN(t1.c0) + MIN(t1.c0))) AND (pow('{b' > '-657301241', log(-1004522121)) IS NOT NULL)) +UNION ALL +SELECT MIN(t1.c0) +FROM t1 +GROUP BY + (-sign(cos(t1.c0))) * (-max2(t1.c0, t1.c0 / t1.c0)), + t1.c0 * t1.c0, + sign(-exp(-t1.c0)) +HAVING (-(-(MIN(t1.c0) + MIN(t1.c0))) AND (pow('{b' > '-657301241', log(-1004522121)) IS NOT NULL)) IS NULL +SETTINGS aggregate_functions_null_for_empty = 1, enable_optimize_predicate_expression = 0; + +select '='; + +SELECT MIN(t1.c0) +FROM t1 +GROUP BY t1.c0 +HAVING and(MIN(t1.c0) + MIN(t1.c0), 1) +SETTINGS aggregate_functions_null_for_empty = 1, enable_optimize_predicate_expression = 0; + +select '='; + +DROP TABLE IF EXISTS t1; diff --git a/parser/testdata/02568_array_map_const_low_cardinality/ast.json b/parser/testdata/02568_array_map_const_low_cardinality/ast.json new file mode 100644 index 000000000..e4b198bf4 --- /dev/null +++ b/parser/testdata/02568_array_map_const_low_cardinality/ast.json @@ -0,0 +1,85 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayMap (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function lambda (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function plus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toLowCardinality (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '1'" + }, + { + "explain": " Literal 'Nullable(UInt8)'" + }, + { + "explain": " Literal Array_[UInt64_1]" + } + ], + + "rows": 21, + + "statistics": + { + "elapsed": 0.001649634, + "rows_read": 21, + "bytes_read": 860 + } +} diff --git a/parser/testdata/02568_array_map_const_low_cardinality/metadata.json b/parser/testdata/02568_array_map_const_low_cardinality/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02568_array_map_const_low_cardinality/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02568_array_map_const_low_cardinality/query.sql b/parser/testdata/02568_array_map_const_low_cardinality/query.sql new file mode 100644 index 000000000..ed3fca5e0 --- /dev/null +++ b/parser/testdata/02568_array_map_const_low_cardinality/query.sql @@ -0,0 +1 @@ +SELECT arrayMap(x -> (toLowCardinality(1) + 1::Nullable(UInt8)), [1]); diff --git a/parser/testdata/02568_json_array_length/ast.json b/parser/testdata/02568_json_array_length/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02568_json_array_length/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02568_json_array_length/metadata.json b/parser/testdata/02568_json_array_length/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02568_json_array_length/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02568_json_array_length/query.sql b/parser/testdata/02568_json_array_length/query.sql new file mode 100644 index 000000000..4f2127b9c --- /dev/null +++ b/parser/testdata/02568_json_array_length/query.sql @@ -0,0 +1,16 @@ +-- { echoOn } +select JSONArrayLength(null); +select JSONArrayLength(''); +select JSONArrayLength('[]'); +select JSONArrayLength('[1,2,3]'); +select JSONArrayLength('[[1,2],[5,6,7]]'); +select JSONArrayLength('[{"a":123},{"b":"hello"}]'); +select JSONArrayLength('[1,2,3,[33,44],{"key":[2,3,4]}]'); +select JSONArrayLength('{"key":"not a json array"}'); +select JSONArrayLength('[1,2,3,4,5'); + +select JSON_ARRAY_LENGTH('[1,2,3,4,5'); +select JSON_ARRAY_LENGTH('[1,2,3,4,5]'); + +select JSONArrayLength(2); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select JSONArrayLength(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } diff --git a/parser/testdata/02569_order_by_aggregation_result/ast.json b/parser/testdata/02569_order_by_aggregation_result/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02569_order_by_aggregation_result/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02569_order_by_aggregation_result/metadata.json b/parser/testdata/02569_order_by_aggregation_result/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02569_order_by_aggregation_result/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02569_order_by_aggregation_result/query.sql b/parser/testdata/02569_order_by_aggregation_result/query.sql new file mode 100644 index 000000000..3fef0374d --- /dev/null +++ b/parser/testdata/02569_order_by_aggregation_result/query.sql @@ -0,0 +1,60 @@ +-- Github issues: +-- - https://github.com/ClickHouse/ClickHouse/issues/46268 +-- - https://github.com/ClickHouse/ClickHouse/issues/46273 + +-- Queries that the original PR (https://github.com/ClickHouse/ClickHouse/pull/42827) tried to fix +SELECT (number = 1) AND (number = 2) AS value, sum(value) OVER () FROM numbers(1) WHERE 1; +SELECT time, round(exp_smooth, 10), bar(exp_smooth, -9223372036854775807, 1048575, 50) AS bar FROM (SELECT 2 OR (number = 0) OR (number >= 1) AS value, number AS time, exponentialTimeDecayedSum(2147483646)(value, time) OVER (RANGE BETWEEN CURRENT ROW AND CURRENT ROW) AS exp_smooth FROM numbers(1) WHERE 10) WHERE 25; + +CREATE TABLE ttttttt +( + `timestamp` DateTime, + `col1` Float64, + `col2` Float64, + `col3` Float64 +) +ENGINE = MergeTree() +ORDER BY tuple(); + +INSERT INTO ttttttt VALUES ('2023-02-20 00:00:00', 1, 2, 3); + +-- Query that https://github.com/ClickHouse/ClickHouse/pull/42827 broke +SELECT + argMax(col1, timestamp) AS col1, + argMax(col2, timestamp) AS col2, + col1 / col2 AS final_col +FROM ttttttt +GROUP BY + col3 +ORDER BY final_col DESC; + +SELECT + argMax(col1, timestamp) AS col1, + col1 / 10 AS final_col, + final_col + 1 AS final_col2 +FROM ttttttt +GROUP BY col3; + +-- https://github.com/ClickHouse/ClickHouse/issues/46724 + +CREATE TABLE table1 +( + id String, + device UUID +) +ENGINE = MergeTree() ORDER BY tuple(); + +INSERT INTO table1 VALUES ('notEmpty', '417ddc5d-e556-4d27-95dd-a34d84e46a50'); +INSERT INTO table1 VALUES ('', '417ddc5d-e556-4d27-95dd-a34d84e46a50'); +INSERT INTO table1 VALUES ('', '00000000-0000-0000-0000-000000000000'); + +SELECT + if(empty(id), toString(device), id) AS device, + multiIf( + notEmpty(id),'a', + device == '00000000-0000-0000-0000-000000000000', 'b', + 'c' ) AS device_id_type, + count() +FROM table1 +GROUP BY device, device_id_type +ORDER BY device; diff --git a/parser/testdata/02572_materialized_views_ignore_errors/ast.json b/parser/testdata/02572_materialized_views_ignore_errors/ast.json new file mode 100644 index 000000000..de44fd72a --- /dev/null +++ b/parser/testdata/02572_materialized_views_ignore_errors/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001414467, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02572_materialized_views_ignore_errors/metadata.json b/parser/testdata/02572_materialized_views_ignore_errors/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02572_materialized_views_ignore_errors/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02572_materialized_views_ignore_errors/query.sql b/parser/testdata/02572_materialized_views_ignore_errors/query.sql new file mode 100644 index 000000000..7990a2594 --- /dev/null +++ b/parser/testdata/02572_materialized_views_ignore_errors/query.sql @@ -0,0 +1,40 @@ +set prefer_localhost_replica=1; + +drop table if exists data_02572; +drop table if exists proxy_02572; +drop table if exists push_to_proxy_mv_02572; +drop table if exists receiver_02572; + +create table data_02572 (key Int) engine=Memory(); + +create table proxy_02572 (key Int) engine=Distributed('test_shard_localhost', currentDatabase(), 'receiver_02572'); +-- ensure that insert fails +insert into proxy_02572 values (1); -- { serverError UNKNOWN_TABLE } + +-- proxy data with MV +create materialized view push_to_proxy_mv_02572 to proxy_02572 as select * from data_02572; + +-- { echoOn } +select * from data_02572 order by key; + +insert into data_02572 settings materialized_views_ignore_errors=1 values (2); +select * from data_02572 order by key; +-- check system.query_views_log +system flush logs query_views_log; +-- lower(status) to pass through clickhouse-test "exception" check +select lower(status::String), errorCodeToName(exception_code) +from system.query_views_log where + view_name = concatWithSeparator('.', currentDatabase(), 'push_to_proxy_mv_02572') and + view_target = concatWithSeparator('.', currentDatabase(), 'proxy_02572') + order by event_date, event_time +; + +-- materialized_views_ignore_errors=0 +insert into data_02572 values (1); -- { serverError UNKNOWN_TABLE } +select * from data_02572 order by key; + +create table receiver_02572 as data_02572; + +insert into data_02572 values (3); +select * from data_02572 order by key; +select * from receiver_02572 order by key; diff --git a/parser/testdata/02572_max_intersections/ast.json b/parser/testdata/02572_max_intersections/ast.json new file mode 100644 index 000000000..afbed8749 --- /dev/null +++ b/parser/testdata/02572_max_intersections/ast.json @@ -0,0 +1,70 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function hex (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function maxIntersectionsState (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function VALUES (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Tuple_(UInt64_1, UInt64_3)" + }, + { + "explain": " Literal Tuple_(UInt64_3, UInt64_5)" + } + ], + + "rows": 16, + + "statistics": + { + "elapsed": 0.001290562, + "rows_read": 16, + "bytes_read": 668 + } +} diff --git a/parser/testdata/02572_max_intersections/metadata.json b/parser/testdata/02572_max_intersections/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02572_max_intersections/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02572_max_intersections/query.sql b/parser/testdata/02572_max_intersections/query.sql new file mode 100644 index 000000000..5ac865222 --- /dev/null +++ b/parser/testdata/02572_max_intersections/query.sql @@ -0,0 +1 @@ +SELECT hex(maxIntersectionsState(*)) FROM VALUES((1, 3), (3, 5)); diff --git a/parser/testdata/02572_system_logs_materialized_views_ignore_errors/ast.json b/parser/testdata/02572_system_logs_materialized_views_ignore_errors/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02572_system_logs_materialized_views_ignore_errors/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02572_system_logs_materialized_views_ignore_errors/metadata.json b/parser/testdata/02572_system_logs_materialized_views_ignore_errors/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02572_system_logs_materialized_views_ignore_errors/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02572_system_logs_materialized_views_ignore_errors/query.sql b/parser/testdata/02572_system_logs_materialized_views_ignore_errors/query.sql new file mode 100644 index 000000000..3f05fd7f7 --- /dev/null +++ b/parser/testdata/02572_system_logs_materialized_views_ignore_errors/query.sql @@ -0,0 +1,32 @@ +-- Tags: no-parallel, no-replicated-database +-- Tag no-parallel: due to attaching to system.query_log +-- Tag no-replicated-database: Replicated database will has extra queries + +-- Attach MV to system.query_log and check that writing query_log will not fail + +set log_queries=1; + +drop table if exists log_proxy_02572; +drop table if exists push_to_logs_proxy_mv_02572; + +-- create log tables +system flush logs query_log; +create table log_proxy_02572 as system.query_log engine=Distributed('test_shard_localhost', currentDatabase(), 'receiver_02572'); +create materialized view push_to_logs_proxy_mv_02572 to log_proxy_02572 as select * from system.query_log; + +select 1 format Null; +system flush logs query_log; +system flush logs query_log; + +drop table log_proxy_02572; +drop table push_to_logs_proxy_mv_02572; + +set log_queries=0; + +system flush logs query_log; +-- lower() to pass through clickhouse-test "exception" check +select replaceAll(query, '\n', '\\n'), lower(type::String), errorCodeToName(exception_code) + from system.query_log + where current_database = currentDatabase() + order by event_time_microseconds + format CSV; diff --git a/parser/testdata/02573_insert_null_as_default_null_as_empty_nested/ast.json b/parser/testdata/02573_insert_null_as_default_null_as_empty_nested/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02573_insert_null_as_default_null_as_empty_nested/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02573_insert_null_as_default_null_as_empty_nested/metadata.json b/parser/testdata/02573_insert_null_as_default_null_as_empty_nested/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02573_insert_null_as_default_null_as_empty_nested/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02573_insert_null_as_default_null_as_empty_nested/query.sql b/parser/testdata/02573_insert_null_as_default_null_as_empty_nested/query.sql new file mode 100644 index 000000000..3afe585c9 --- /dev/null +++ b/parser/testdata/02573_insert_null_as_default_null_as_empty_nested/query.sql @@ -0,0 +1,27 @@ +-- { echo } +--- ensure that input_format_null_as_default allow writes to Nullable columns too +select * from format(JSONEachRow, 'payload Tuple(pull_request Tuple(merged_by Tuple(login Nullable(String))))', '{"payload" : {"pull_request": {"merged_by": {"login": "root"}}}}') settings input_format_null_as_default=1; +select * from format(JSONEachRow, 'payload Tuple(pull_request Tuple(merged_by Tuple(login Nullable(String))))', '{"payload" : {"pull_request": {"merged_by": null}}}') settings input_format_null_as_default=1; +--- tuple +select * from format(JSONEachRow, 'payload Tuple(pull_request Tuple(merged_by Tuple(login String)))', '{"payload" : {"pull_request": {"merged_by": {"login": "root"}}}}') settings input_format_null_as_default=0; +select * from format(JSONEachRow, 'payload Tuple(pull_request Tuple(merged_by Tuple(login String)))', '{"payload" : {"pull_request": {"merged_by": {"login": "root"}}}}') settings input_format_null_as_default=1; +select * from format(JSONEachRow, 'payload Tuple(pull_request Tuple(merged_by Tuple(login String)))', '{"payload" : {}}') settings input_format_null_as_default=0; +select * from format(JSONEachRow, 'payload Tuple(pull_request Tuple(merged_by Tuple(login String)))', '{"payload" : {}}') settings input_format_null_as_default=1; +select * from format(JSONEachRow, 'payload Tuple(pull_request Tuple(merged_by Tuple(login String)))', '{"payload" : {"pull_request": {"merged_by": null}}}') settings input_format_null_as_default=0; -- { serverError CANNOT_PARSE_INPUT_ASSERTION_FAILED } +select * from format(JSONEachRow, 'payload Tuple(pull_request Tuple(merged_by Tuple(login String)))', '{"payload" : {"pull_request": {"merged_by": null}}}') settings input_format_null_as_default=1; +--- map +set input_format_json_try_infer_named_tuples_from_objects=0; +set input_format_json_read_objects_as_strings=0; +select * from format(JSONEachRow, '{"payload" : {"pull_request": {"merged_by": {"login": "root"}}}}') settings input_format_null_as_default=0; +select * from format(JSONEachRow, '{"payload" : {"pull_request": {"merged_by": {"login": "root"}}}}') settings input_format_null_as_default=1; +select * from format(JSONEachRow, 'payload Map(String, String)', '{"payload" : {}}') settings input_format_null_as_default=0; +select * from format(JSONEachRow, 'payload Map(String, String)', '{"payload" : {}}') settings input_format_null_as_default=1; +select * from format(JSONEachRow, 'payload Map(String, Map(String, Map(String, String)))', '{"payload" : {"pull_request": {"merged_by": null}}}') settings input_format_null_as_default=0; -- { serverError CANNOT_PARSE_INPUT_ASSERTION_FAILED } +select * from format(JSONEachRow, 'payload Map(String, Map(String, Map(String, String)))', '{"payload" : {"pull_request": {"merged_by": null}}}') settings input_format_null_as_default=1; +--- array +select * from format(JSONEachRow, 'payload Array(String)', '{"payload" : ["root"]}') settings input_format_null_as_default=0; +select * from format(JSONEachRow, 'payload Array(String)', '{"payload" : ["root"]}') settings input_format_null_as_default=1; +select * from format(JSONEachRow, 'payload Array(String)', '{"payload" : []}') settings input_format_null_as_default=0; +select * from format(JSONEachRow, 'payload Array(String)', '{"payload" : []}') settings input_format_null_as_default=1; +select * from format(JSONEachRow, 'payload Array(String)', '{"payload" : null}') settings input_format_null_as_default=0; -- { serverError CANNOT_READ_ARRAY_FROM_TEXT } +select * from format(JSONEachRow, 'payload Array(String)', '{"payload" : null}') settings input_format_null_as_default=1; diff --git a/parser/testdata/02573_quantile_fuse_msan/ast.json b/parser/testdata/02573_quantile_fuse_msan/ast.json new file mode 100644 index 000000000..24888544b --- /dev/null +++ b/parser/testdata/02573_quantile_fuse_msan/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001366188, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02573_quantile_fuse_msan/metadata.json b/parser/testdata/02573_quantile_fuse_msan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02573_quantile_fuse_msan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02573_quantile_fuse_msan/query.sql b/parser/testdata/02573_quantile_fuse_msan/query.sql new file mode 100644 index 000000000..efeef0b0e --- /dev/null +++ b/parser/testdata/02573_quantile_fuse_msan/query.sql @@ -0,0 +1,5 @@ +SET optimize_syntax_fuse_functions=1; +CREATE TEMPORARY TABLE datetime (`d` DateTime('UTC')); +SELECT quantile(0.1)(d), quantile(0.5)(d) FROM datetime; +INSERT INTO datetime SELECT * FROM generateRandom() LIMIT 10; +SELECT max(cityHash64(*)) > 0 FROM (SELECT quantile(0.1)(d), quantile(0.5)(d) FROM datetime); diff --git a/parser/testdata/02574_suspicious_low_cardinality_msan/ast.json b/parser/testdata/02574_suspicious_low_cardinality_msan/ast.json new file mode 100644 index 000000000..c32f403be --- /dev/null +++ b/parser/testdata/02574_suspicious_low_cardinality_msan/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery table1__fuzz_19 (children 1)" + }, + { + "explain": " Identifier table1__fuzz_19" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001409259, + "rows_read": 2, + "bytes_read": 82 + } +} diff --git a/parser/testdata/02574_suspicious_low_cardinality_msan/metadata.json b/parser/testdata/02574_suspicious_low_cardinality_msan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02574_suspicious_low_cardinality_msan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02574_suspicious_low_cardinality_msan/query.sql b/parser/testdata/02574_suspicious_low_cardinality_msan/query.sql new file mode 100644 index 000000000..f2841ac88 --- /dev/null +++ b/parser/testdata/02574_suspicious_low_cardinality_msan/query.sql @@ -0,0 +1,10 @@ +DROP TABLE IF EXISTS table1__fuzz_19; + +SET allow_suspicious_low_cardinality_types = 1; +CREATE TABLE table1__fuzz_19 (`id` LowCardinality(UInt16), `v` DateTime64(3, 'UTC')) ENGINE = ReplacingMergeTree(v) PARTITION BY id % 200 ORDER BY id; +INSERT INTO table1__fuzz_19 SELECT number - 205, number FROM numbers(10); +INSERT INTO table1__fuzz_19 SELECT number - 205, number FROM numbers(400, 10); + +SELECT 1023, (((id % -9223372036854775807) = NULL) OR ((id % NULL) = 100) OR ((id % NULL) = 65537)) = ((id % inf) = 9223372036854775806), (id % NULL) = NULL, (id % 3.4028234663852886e38) = 1023, 2147483646 FROM table1__fuzz_19 ORDER BY (((id % 1048577) = 1024) % id) = 1023 DESC NULLS FIRST, id % 2147483646 ASC NULLS FIRST, ((id % 1) = 9223372036854775807) OR ((id % NULL) = 257) DESC NULLS FIRST; + +DROP TABLE table1__fuzz_19; diff --git a/parser/testdata/02575_map_hashing_msan/ast.json b/parser/testdata/02575_map_hashing_msan/ast.json new file mode 100644 index 000000000..526bfa7a3 --- /dev/null +++ b/parser/testdata/02575_map_hashing_msan/ast.json @@ -0,0 +1,70 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function cityHash64 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function map (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal 'Hello'" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'World'" + }, + { + "explain": " Literal 'LowCardinality(String)'" + } + ], + + "rows": 16, + + "statistics": + { + "elapsed": 0.001141277, + "rows_read": 16, + "bytes_read": 633 + } +} diff --git a/parser/testdata/02575_map_hashing_msan/metadata.json b/parser/testdata/02575_map_hashing_msan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02575_map_hashing_msan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02575_map_hashing_msan/query.sql b/parser/testdata/02575_map_hashing_msan/query.sql new file mode 100644 index 000000000..2fe3620e6 --- /dev/null +++ b/parser/testdata/02575_map_hashing_msan/query.sql @@ -0,0 +1,7 @@ +SELECT cityHash64(map(1, 'Hello'), CAST(materialize('World') AS LowCardinality(String))); +SELECT cityHash64(map(), CAST(materialize('') AS LowCardinality(Nullable(String)))); +SELECT materialize(42) as last_element, cityHash64(map(), CAST(materialize('') AS LowCardinality(Nullable(String))), last_element) from numbers(3); + +SET allow_suspicious_low_cardinality_types = 1; +CREATE TEMPORARY TABLE datetime__fuzz_14 (`d` LowCardinality(Nullable(UInt128))); +SELECT max(mapPopulateSeries(mapPopulateSeries(map(toInt64(1048), toInt64(9223), 3, -2147))), toInt64(1048), map('11', 257, '', NULL), cityHash64(*)) > NULL FROM (SELECT max(cityHash64(mapPopulateSeries(mapPopulateSeries(map(toInt64(1048), toInt64(2147), 655, -2147))), *)) > NULL, map(toInt64(-2147), toInt64(100.0001), -2147, NULL), mapPopulateSeries(map(toInt64(1024), toInt64(1048), 1048, -1)), map(toInt64(256), toInt64(NULL), -1, NULL), quantile(0.0001)(d) FROM datetime__fuzz_14 WITH TOTALS); diff --git a/parser/testdata/02575_merge_prewhere_default_expression/ast.json b/parser/testdata/02575_merge_prewhere_default_expression/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02575_merge_prewhere_default_expression/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02575_merge_prewhere_default_expression/metadata.json b/parser/testdata/02575_merge_prewhere_default_expression/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02575_merge_prewhere_default_expression/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02575_merge_prewhere_default_expression/query.sql b/parser/testdata/02575_merge_prewhere_default_expression/query.sql new file mode 100644 index 000000000..83c1d5126 --- /dev/null +++ b/parser/testdata/02575_merge_prewhere_default_expression/query.sql @@ -0,0 +1,38 @@ +-- Allow PREWHERE when Merge() and MergeTree has different DEFAULT expression + +DROP TABLE IF EXISTS m; +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE TABLE m +( + `a` String, + `f` UInt8 DEFAULT 0 +) +ENGINE = Merge(currentDatabase(), '^(t1|t2)$'); + +CREATE TABLE t1 +( + a String, + f UInt8 DEFAULT 1 +) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS index_granularity = 8192; +INSERT INTO t1 (a) VALUES ('OK'); + +CREATE TABLE t2 +( + a String, + f UInt8 DEFAULT 2 +) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS index_granularity = 8192; +INSERT INTO t2 (a) VALUES ('OK'); + +-- { echoOn } +SELECT * FROM m PREWHERE a = 'OK' ORDER BY a, f; +SELECT * FROM m PREWHERE f = 1 ORDER BY a, f; +SELECT * FROM m WHERE f = 0 SETTINGS optimize_move_to_prewhere=0; +SELECT * FROM m WHERE f = 0 SETTINGS optimize_move_to_prewhere=1; diff --git a/parser/testdata/02575_merge_prewhere_different_default_kind/ast.json b/parser/testdata/02575_merge_prewhere_different_default_kind/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02575_merge_prewhere_different_default_kind/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02575_merge_prewhere_different_default_kind/metadata.json b/parser/testdata/02575_merge_prewhere_different_default_kind/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02575_merge_prewhere_different_default_kind/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02575_merge_prewhere_different_default_kind/query.sql b/parser/testdata/02575_merge_prewhere_different_default_kind/query.sql new file mode 100644 index 000000000..88c7923a5 --- /dev/null +++ b/parser/testdata/02575_merge_prewhere_different_default_kind/query.sql @@ -0,0 +1,47 @@ +-- Prohibit PREWHERE when Merge and MergeTree has different default type of the column + +DROP TABLE IF EXISTS m; +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE TABLE m +( + a String, + date Date, + f UInt8 +) +ENGINE = Merge(currentDatabase(), '^(t1|t2)$'); + +CREATE TABLE t1 +( + a String, + date Date, + f UInt8 ALIAS 0 +) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS index_granularity = 8192; +INSERT INTO t1 (a) VALUES ('OK'); + +-- { echoOn } +-- for pure PREWHERE it is not addressed yet. +SELECT * FROM m PREWHERE a = 'OK'; +SELECT * FROM m PREWHERE f = 0; -- { serverError ILLEGAL_PREWHERE } +SELECT * FROM m WHERE f = 0 SETTINGS optimize_move_to_prewhere=0; +SELECT * FROM m WHERE f = 0 SETTINGS optimize_move_to_prewhere=1; +-- { echoOff } + +CREATE TABLE t2 +( + a String, + date Date, + f UInt8, +) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS index_granularity = 8192; +INSERT INTO t2 (a) VALUES ('OK'); + +-- { echoOn } +SELECT * FROM m WHERE f = 0 SETTINGS optimize_move_to_prewhere=1; +-- { echoOff } diff --git a/parser/testdata/02575_merge_prewhere_ephemeral/ast.json b/parser/testdata/02575_merge_prewhere_ephemeral/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02575_merge_prewhere_ephemeral/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02575_merge_prewhere_ephemeral/metadata.json b/parser/testdata/02575_merge_prewhere_ephemeral/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02575_merge_prewhere_ephemeral/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02575_merge_prewhere_ephemeral/query.sql b/parser/testdata/02575_merge_prewhere_ephemeral/query.sql new file mode 100644 index 000000000..85e03647d --- /dev/null +++ b/parser/testdata/02575_merge_prewhere_ephemeral/query.sql @@ -0,0 +1,38 @@ +-- You cannot query EPHEMERAL + +DROP TABLE IF EXISTS m; +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE TABLE m +( + `a` String, + `f` UInt8 EPHEMERAL 0 +) +ENGINE = Merge(currentDatabase(), '^(t1|t2)$'); + +CREATE TABLE t1 +( + a String, + f UInt8 DEFAULT 1 +) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS index_granularity = 8192; +INSERT INTO t1 (a) VALUES ('OK'); + +CREATE TABLE t2 +( + a String, + f UInt8 DEFAULT 2 +) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS index_granularity = 8192; +INSERT INTO t2 (a) VALUES ('OK'); + +-- { echoOn } +SELECT * FROM m PREWHERE a = 'OK' ORDER BY a; +SELECT * FROM m PREWHERE f = 1 ORDER BY a; -- { serverError ILLEGAL_PREWHERE } +SELECT * FROM m WHERE a = 'OK' SETTINGS optimize_move_to_prewhere=0; +SELECT * FROM m WHERE a = 'OK' SETTINGS optimize_move_to_prewhere=1; diff --git a/parser/testdata/02575_merge_prewhere_materialized/ast.json b/parser/testdata/02575_merge_prewhere_materialized/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02575_merge_prewhere_materialized/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02575_merge_prewhere_materialized/metadata.json b/parser/testdata/02575_merge_prewhere_materialized/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02575_merge_prewhere_materialized/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02575_merge_prewhere_materialized/query.sql b/parser/testdata/02575_merge_prewhere_materialized/query.sql new file mode 100644 index 000000000..eae72274c --- /dev/null +++ b/parser/testdata/02575_merge_prewhere_materialized/query.sql @@ -0,0 +1,43 @@ +-- Allow PREWHERE when Merge has DEFAULT and MergeTree has MATERIALIZED + +DROP TABLE IF EXISTS m; +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE TABLE m +( + `a` String, + `f` UInt8 DEFAULT 0 +) +ENGINE = Merge(currentDatabase(), '^(t1|t2)$'); + +CREATE TABLE t1 +( + a String, + f UInt8 MATERIALIZED 1 +) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS index_granularity = 8192; +INSERT INTO t1 (a) VALUES ('OK'); + +CREATE TABLE t2 +( + a String, + f UInt8 DEFAULT 2 +) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS index_granularity = 8192; +INSERT INTO t2 (a) VALUES ('OK'); + +-- { echoOn } +SELECT * FROM m PREWHERE a = 'OK' ORDER BY a, f; +SELECT * FROM m PREWHERE f = 1 ORDER BY a, f; +SELECT * FROM m WHERE f = 0 SETTINGS optimize_move_to_prewhere=0; +SELECT * FROM m WHERE f = 0 SETTINGS optimize_move_to_prewhere=1; +-- { echoOff } + +DROP TABLE m; +DROP TABLE t1; +DROP TABLE t2; diff --git a/parser/testdata/02576_predicate_push_down_sorting_fix/ast.json b/parser/testdata/02576_predicate_push_down_sorting_fix/ast.json new file mode 100644 index 000000000..cb735f095 --- /dev/null +++ b/parser/testdata/02576_predicate_push_down_sorting_fix/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001231983, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02576_predicate_push_down_sorting_fix/metadata.json b/parser/testdata/02576_predicate_push_down_sorting_fix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02576_predicate_push_down_sorting_fix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02576_predicate_push_down_sorting_fix/query.sql b/parser/testdata/02576_predicate_push_down_sorting_fix/query.sql new file mode 100644 index 000000000..486a26613 --- /dev/null +++ b/parser/testdata/02576_predicate_push_down_sorting_fix/query.sql @@ -0,0 +1,3 @@ +SET enable_analyzer = 1; + +EXPLAIN header = 1, actions = 1 SELECT number FROM (SELECT number FROM numbers(2) ORDER BY ignore(2)) WHERE ignore(2); diff --git a/parser/testdata/02576_rewrite_array_exists_to_has/ast.json b/parser/testdata/02576_rewrite_array_exists_to_has/ast.json new file mode 100644 index 000000000..07d2c143b --- /dev/null +++ b/parser/testdata/02576_rewrite_array_exists_to_has/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001349433, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02576_rewrite_array_exists_to_has/metadata.json b/parser/testdata/02576_rewrite_array_exists_to_has/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02576_rewrite_array_exists_to_has/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02576_rewrite_array_exists_to_has/query.sql b/parser/testdata/02576_rewrite_array_exists_to_has/query.sql new file mode 100644 index 000000000..b5a123e37 --- /dev/null +++ b/parser/testdata/02576_rewrite_array_exists_to_has/query.sql @@ -0,0 +1,19 @@ +set enable_analyzer = true; + +set optimize_rewrite_array_exists_to_has = false; +EXPLAIN QUERY TREE run_passes = 1 select arrayExists(x -> x = 5 , materialize(range(10))) from numbers(10); +EXPLAIN QUERY TREE run_passes = 1 select arrayExists(x -> 5 = x , materialize(range(10))) from numbers(10); + +set optimize_rewrite_array_exists_to_has = true; +EXPLAIN QUERY TREE run_passes = 1 select arrayExists(x -> x = 5 , materialize(range(10))) from numbers(10); +EXPLAIN QUERY TREE run_passes = 1 select arrayExists(x -> 5 = x , materialize(range(10))) from numbers(10); + +set enable_analyzer = false; + +set optimize_rewrite_array_exists_to_has = false; +EXPLAIN SYNTAX select arrayExists(x -> x = 5 , materialize(range(10))) from numbers(10); +EXPLAIN SYNTAX select arrayExists(x -> 5 = x , materialize(range(10))) from numbers(10); + +set optimize_rewrite_array_exists_to_has = true; +EXPLAIN SYNTAX select arrayExists(x -> x = 5 , materialize(range(10))) from numbers(10); +EXPLAIN SYNTAX select arrayExists(x -> 5 = x , materialize(range(10))) from numbers(10); diff --git a/parser/testdata/02577_analyzer_array_join_calc_twice/ast.json b/parser/testdata/02577_analyzer_array_join_calc_twice/ast.json new file mode 100644 index 000000000..bd47159f8 --- /dev/null +++ b/parser/testdata/02577_analyzer_array_join_calc_twice/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001329406, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02577_analyzer_array_join_calc_twice/metadata.json b/parser/testdata/02577_analyzer_array_join_calc_twice/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02577_analyzer_array_join_calc_twice/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02577_analyzer_array_join_calc_twice/query.sql b/parser/testdata/02577_analyzer_array_join_calc_twice/query.sql new file mode 100644 index 000000000..0b281dd4f --- /dev/null +++ b/parser/testdata/02577_analyzer_array_join_calc_twice/query.sql @@ -0,0 +1,5 @@ +SET enable_analyzer = 1; + +SELECT 1 + arrayJoin(a) AS m FROM (SELECT [1, 2, 3] AS a) GROUP BY m; + +SELECT 1 + arrayJoin(a) AS m FROM (SELECT [1, 2, 3] AS a) GROUP BY 1 + arrayJoin(a); diff --git a/parser/testdata/02577_keepermap_delete_update/ast.json b/parser/testdata/02577_keepermap_delete_update/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02577_keepermap_delete_update/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02577_keepermap_delete_update/metadata.json b/parser/testdata/02577_keepermap_delete_update/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02577_keepermap_delete_update/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02577_keepermap_delete_update/query.sql b/parser/testdata/02577_keepermap_delete_update/query.sql new file mode 100644 index 000000000..ae80e6ead --- /dev/null +++ b/parser/testdata/02577_keepermap_delete_update/query.sql @@ -0,0 +1,44 @@ +-- Tags: no-ordinary-database, no-fasttest + +DROP TABLE IF EXISTS 02577_keepermap_delete_update; + +CREATE TABLE 02577_keepermap_delete_update (key UInt64, value String, value2 UInt64) ENGINE=KeeperMap('/' || currentDatabase() || '/test02577_keepermap_delete_update') PRIMARY KEY(key); + +INSERT INTO 02577_keepermap_delete_update VALUES (1, 'Some string', 0), (2, 'Some other string', 0), (3, 'random', 0), (4, 'random2', 0); + +SELECT *, _version FROM 02577_keepermap_delete_update ORDER BY key; +SELECT '-----------'; + +DELETE FROM 02577_keepermap_delete_update WHERE value LIKE 'Some%string'; + +SELECT *, _version FROM 02577_keepermap_delete_update ORDER BY key; +SELECT '-----------'; + +ALTER TABLE 02577_keepermap_delete_update DELETE WHERE key >= 4; + +SELECT *, _version FROM 02577_keepermap_delete_update ORDER BY key; +SELECT '-----------'; + +DELETE FROM 02577_keepermap_delete_update WHERE 1 = 1; +SELECT count() FROM 02577_keepermap_delete_update; +SELECT '-----------'; + +INSERT INTO 02577_keepermap_delete_update VALUES (1, 'String', 10), (2, 'String', 20), (3, 'String', 30), (4, 'String', 40); +SELECT *, _version FROM 02577_keepermap_delete_update ORDER BY key; +SELECT '-----------'; + +ALTER TABLE 02577_keepermap_delete_update UPDATE value = 'Another' WHERE key > 2; +SELECT *, _version FROM 02577_keepermap_delete_update ORDER BY key; +SELECT '-----------'; + +ALTER TABLE 02577_keepermap_delete_update UPDATE key = key * 10 WHERE 1 = 1; -- { serverError BAD_ARGUMENTS } +SELECT *, _version FROM 02577_keepermap_delete_update ORDER BY key; +SELECT '-----------'; + +ALTER TABLE 02577_keepermap_delete_update UPDATE value2 = value2 * 10 + 2 WHERE value2 < 100; +SELECT *, _version FROM 02577_keepermap_delete_update ORDER BY key; +SELECT '-----------'; + +ALTER TABLE 02577_keepermap_delete_update ON CLUSTER test_shard_localhost UPDATE value2 = value2 * 10 + 2 WHERE value2 < 100; -- { serverError BAD_ARGUMENTS } + +DROP TABLE IF EXISTS 02577_keepermap_delete_update; diff --git a/parser/testdata/02578_ipv4_codec_t64/ast.json b/parser/testdata/02578_ipv4_codec_t64/ast.json new file mode 100644 index 000000000..eeb895414 --- /dev/null +++ b/parser/testdata/02578_ipv4_codec_t64/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery ipv4_t64 (children 1)" + }, + { + "explain": " Identifier ipv4_t64" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001011743, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/02578_ipv4_codec_t64/metadata.json b/parser/testdata/02578_ipv4_codec_t64/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02578_ipv4_codec_t64/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02578_ipv4_codec_t64/query.sql b/parser/testdata/02578_ipv4_codec_t64/query.sql new file mode 100644 index 000000000..63a19cba5 --- /dev/null +++ b/parser/testdata/02578_ipv4_codec_t64/query.sql @@ -0,0 +1,3 @@ +DROP TABLE IF EXISTS ipv4_t64; +CREATE TABLE ipv4_t64 (uid Int16, ip IPv4 CODEC(T64), INDEX ip_idx ip TYPE bloom_filter GRANULARITY 4) ENGINE=MergeTree ORDER BY uid; +DROP TABLE IF EXISTS ipv4_t64; diff --git a/parser/testdata/02578_parameterized_rename_queries/ast.json b/parser/testdata/02578_parameterized_rename_queries/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02578_parameterized_rename_queries/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02578_parameterized_rename_queries/metadata.json b/parser/testdata/02578_parameterized_rename_queries/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02578_parameterized_rename_queries/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02578_parameterized_rename_queries/query.sql b/parser/testdata/02578_parameterized_rename_queries/query.sql new file mode 100644 index 000000000..de36f8ae3 --- /dev/null +++ b/parser/testdata/02578_parameterized_rename_queries/query.sql @@ -0,0 +1,53 @@ +-- Tags: no-parallel + +-- Case 1: RENAME DATABASE + +DROP DATABASE IF EXISTS 02661_db; +DROP DATABASE IF EXISTS 02661_db1; + +SET param_old_db_name = 02661_db; +SET param_new_db_name = 02661_db1; + +CREATE DATABASE {old_db_name:Identifier}; +RENAME DATABASE {old_db_name:Identifier} TO {new_db_name:Identifier}; + +SELECT name FROM system.databases WHERE name = {new_db_name:String}; + +-- Case 2: RENAME TABLE + +DROP TABLE IF EXISTS 02661_t; +DROP TABLE IF EXISTS 02661_t1; + +SET param_old_tbl_name = 02661_t; +SET param_new_tbl_name = 02661_t1; + +CREATE TABLE {new_db_name:Identifier}.{old_tbl_name:Identifier} (a UInt64) ENGINE = MergeTree ORDER BY tuple(); +RENAME TABLE {new_db_name:Identifier}.{old_tbl_name:Identifier} TO {new_db_name:Identifier}.{new_tbl_name:Identifier}; + +-- NOTE: no 'database = currentDatabase()' on purpose +SELECT name FROM system.tables WHERE name = {new_tbl_name:String}; + +-- Case 3: RENAME DICTIONARY + +DROP DICTIONARY IF EXISTS 02661_d; +DROP DICTIONARY IF EXISTS 02661_d1; + +SET param_old_dict_name = 02661_d; +SET param_new_dict_name = 02661_d1; + +CREATE DICTIONARY {new_db_name:Identifier}.{old_dict_name:Identifier} (id UInt64, val UInt8) PRIMARY KEY id SOURCE(NULL()) LAYOUT(FLAT()) LIFETIME(0); +RENAME DICTIONARY {new_db_name:Identifier}.{old_dict_name:Identifier} TO {new_db_name:Identifier}.{new_dict_name:Identifier}; + +SELECT name FROM system.dictionaries WHERE name = {new_dict_name:String}; + +-- Case 4: EXCHANGE TABLES + +CREATE TABLE {new_db_name:Identifier}.{old_tbl_name:Identifier} (a UInt64) ENGINE = MergeTree ORDER BY tuple(); +EXCHANGE TABLES {new_db_name:Identifier}.{old_tbl_name:Identifier} AND {new_db_name:Identifier}.{new_tbl_name:Identifier}; + +-- Case 5: EXCHANGE DICTIONARIES + +CREATE DICTIONARY {new_db_name:Identifier}.{old_dict_name:Identifier} (id UInt64, val UInt8) PRIMARY KEY id SOURCE(NULL()) LAYOUT(FLAT()) LIFETIME(0); +EXCHANGE DICTIONARIES {new_db_name:Identifier}.{old_dict_name:Identifier} AND {new_db_name:Identifier}.{new_dict_name:Identifier}; + +DROP DATABASE {new_db_name:Identifier}; diff --git a/parser/testdata/02579_fill_empty_chunk/ast.json b/parser/testdata/02579_fill_empty_chunk/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02579_fill_empty_chunk/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02579_fill_empty_chunk/metadata.json b/parser/testdata/02579_fill_empty_chunk/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02579_fill_empty_chunk/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02579_fill_empty_chunk/query.sql b/parser/testdata/02579_fill_empty_chunk/query.sql new file mode 100644 index 000000000..aeae98df7 --- /dev/null +++ b/parser/testdata/02579_fill_empty_chunk/query.sql @@ -0,0 +1,13 @@ +-- this SELECT produces empty chunk in FillingTransform + +SET enable_positional_arguments = 0; +SET enable_analyzer = 0; + +SELECT + 2 AS x, + arrayJoin([NULL, NULL, NULL]) +GROUP BY + GROUPING SETS ( + (0), + ([NULL, NULL, NULL])) +ORDER BY x ASC WITH FILL FROM 1 TO 10; diff --git a/parser/testdata/02579_fill_empty_chunk_analyzer/ast.json b/parser/testdata/02579_fill_empty_chunk_analyzer/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02579_fill_empty_chunk_analyzer/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02579_fill_empty_chunk_analyzer/metadata.json b/parser/testdata/02579_fill_empty_chunk_analyzer/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02579_fill_empty_chunk_analyzer/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02579_fill_empty_chunk_analyzer/query.sql b/parser/testdata/02579_fill_empty_chunk_analyzer/query.sql new file mode 100644 index 000000000..144640149 --- /dev/null +++ b/parser/testdata/02579_fill_empty_chunk_analyzer/query.sql @@ -0,0 +1,14 @@ +-- this SELECT produces empty chunk in FillingTransform + +SET enable_positional_arguments = 0; +SET enable_analyzer = 1; + +-- With analyzer this special query has correct output +SELECT + 2 AS x, + arrayJoin([NULL, NULL, NULL]) +GROUP BY + GROUPING SETS ( + (0), + ([NULL, NULL, NULL])) +ORDER BY x ASC WITH FILL FROM 1 TO 10; diff --git a/parser/testdata/02579_parameterized_replace/ast.json b/parser/testdata/02579_parameterized_replace/ast.json new file mode 100644 index 000000000..8035de5d6 --- /dev/null +++ b/parser/testdata/02579_parameterized_replace/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001051185, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02579_parameterized_replace/metadata.json b/parser/testdata/02579_parameterized_replace/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02579_parameterized_replace/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02579_parameterized_replace/query.sql b/parser/testdata/02579_parameterized_replace/query.sql new file mode 100644 index 000000000..b710a6cae --- /dev/null +++ b/parser/testdata/02579_parameterized_replace/query.sql @@ -0,0 +1,2 @@ +SET param_test_a=30; +SELECT * REPLACE({test_a:UInt32} as number) FROM numbers(2); diff --git a/parser/testdata/02580_like_substring_search_bug/ast.json b/parser/testdata/02580_like_substring_search_bug/ast.json new file mode 100644 index 000000000..e6ecf16d9 --- /dev/null +++ b/parser/testdata/02580_like_substring_search_bug/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function like (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'Win\\\\Sys'" + }, + { + "explain": " Literal '%Win\\\\Sys%'" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.000952613, + "rows_read": 8, + "bytes_read": 293 + } +} diff --git a/parser/testdata/02580_like_substring_search_bug/metadata.json b/parser/testdata/02580_like_substring_search_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02580_like_substring_search_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02580_like_substring_search_bug/query.sql b/parser/testdata/02580_like_substring_search_bug/query.sql new file mode 100644 index 000000000..10ce3cdde --- /dev/null +++ b/parser/testdata/02580_like_substring_search_bug/query.sql @@ -0,0 +1 @@ +SELECT 'Win\Sys' LIKE '%Win\Sys%'; diff --git a/parser/testdata/02581_share_big_sets_between_multiple_mutations_tasks_long/ast.json b/parser/testdata/02581_share_big_sets_between_multiple_mutations_tasks_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02581_share_big_sets_between_multiple_mutations_tasks_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02581_share_big_sets_between_multiple_mutations_tasks_long/metadata.json b/parser/testdata/02581_share_big_sets_between_multiple_mutations_tasks_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02581_share_big_sets_between_multiple_mutations_tasks_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02581_share_big_sets_between_multiple_mutations_tasks_long/query.sql b/parser/testdata/02581_share_big_sets_between_multiple_mutations_tasks_long/query.sql new file mode 100644 index 000000000..8290b7e34 --- /dev/null +++ b/parser/testdata/02581_share_big_sets_between_multiple_mutations_tasks_long/query.sql @@ -0,0 +1,53 @@ +-- Tags: long, no-debug, no-tsan, no-asan, no-ubsan, no-msan, no-parallel, no-sanitize-coverage + +-- no-parallel because the sets use a lot of memory, which may interfere with other tests + +DROP TABLE IF EXISTS 02581_trips; + +CREATE TABLE 02581_trips(id UInt32, description String, id2 UInt32, PRIMARY KEY id) ENGINE=MergeTree ORDER BY id; + +-- Make multiple parts +INSERT INTO 02581_trips SELECT number, '', number FROM numbers(10000); +INSERT INTO 02581_trips SELECT number+10000000, '', number FROM numbers(10000); +INSERT INTO 02581_trips SELECT number+20000000, '', number FROM numbers(10000); +INSERT INTO 02581_trips SELECT number+30000000, '', number FROM numbers(10000); + +SELECT count() from 02581_trips WHERE description = ''; + +SELECT name FROM system.parts WHERE database=currentDatabase() AND table = '02581_trips' AND active ORDER BY name; + +-- Start multiple mutations simultaneously +SYSTEM STOP MERGES 02581_trips; +ALTER TABLE 02581_trips UPDATE description='5' WHERE id IN (SELECT (number*10 + 5)::UInt32 FROM numbers(10000000)) SETTINGS mutations_sync=0; +ALTER TABLE 02581_trips UPDATE description='6' WHERE id IN (SELECT (number*10 + 6)::UInt32 FROM numbers(10000000)) SETTINGS mutations_sync=0; +ALTER TABLE 02581_trips DELETE WHERE id IN (SELECT (number*10 + 7)::UInt32 FROM numbers(10000000)) SETTINGS mutations_sync=0; +ALTER TABLE 02581_trips UPDATE description='8' WHERE id IN (SELECT (number*10 + 8)::UInt32 FROM numbers(10000000)) SETTINGS mutations_sync=0; +SYSTEM START MERGES 02581_trips; + +-- Wait for mutations to finish +SELECT count() FROM 02581_trips SETTINGS select_sequential_consistency = 1; + +DELETE FROM 02581_trips WHERE id IN (SELECT (number*10 + 9)::UInt32 FROM numbers(10000000)) SETTINGS lightweight_deletes_sync = 2; +SELECT count(), _part from 02581_trips WHERE description = '' GROUP BY _part ORDER BY _part SETTINGS select_sequential_consistency=1; + +SET max_rows_to_read = 0; -- system.text_log can be really big +SYSTEM FLUSH LOGS text_log; +-- Check that in every mutation there were parts that built sets (log messages like 'Created Set with 10000000 entries from 10000000 rows in 0.388989187 sec.' ) +-- and parts that shared sets (log messages like 'Got set from cache in 0.388930505 sec.' ) +WITH ( + SELECT uuid + FROM system.tables + WHERE (database = currentDatabase()) AND (name = '02581_trips') + ) AS table_uuid +SELECT + CAST(splitByChar('_', query_id)[5], 'UInt64') AS mutation_version, -- '5521485f-8a40-4aba-87a2-00342c369563::all_3_3_0_6' + sum(message LIKE 'Created Set with % entries%') >= 1 AS has_parts_for_which_set_was_built, + sum(message LIKE 'Got set from cache%') >= 1 AS has_parts_that_shared_set +FROM system.text_log +WHERE + query_id LIKE concat(CAST(table_uuid, 'String'), '::all\\_%') + AND (event_date >= yesterday()) + AND (message LIKE 'Created Set with % entries%' OR message LIKE 'Got set from cache%') +GROUP BY mutation_version ORDER BY mutation_version FORMAT TSVWithNames; + +DROP TABLE 02581_trips; diff --git a/parser/testdata/02581_share_big_sets_between_mutation_tasks/ast.json b/parser/testdata/02581_share_big_sets_between_mutation_tasks/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02581_share_big_sets_between_mutation_tasks/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02581_share_big_sets_between_mutation_tasks/metadata.json b/parser/testdata/02581_share_big_sets_between_mutation_tasks/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02581_share_big_sets_between_mutation_tasks/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02581_share_big_sets_between_mutation_tasks/query.sql b/parser/testdata/02581_share_big_sets_between_mutation_tasks/query.sql new file mode 100644 index 000000000..ad8bef7fb --- /dev/null +++ b/parser/testdata/02581_share_big_sets_between_mutation_tasks/query.sql @@ -0,0 +1,64 @@ +-- Tags: no-tsan, no-asan, no-ubsan, no-msan, no-fasttest +-- no-fasttest: Slow test +-- no sanitizers: too slow sometimes + +DROP TABLE IF EXISTS 02581_trips; + +CREATE TABLE 02581_trips(id UInt32, id2 UInt32, description String) ENGINE=MergeTree ORDER BY id SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +-- Make multiple parts +INSERT INTO 02581_trips SELECT number, number, '' FROM numbers(10000); +INSERT INTO 02581_trips SELECT number+10000, number+10000, '' FROM numbers(10000); +INSERT INTO 02581_trips SELECT number+20000, number+20000, '' FROM numbers(10000); +INSERT INTO 02581_trips SELECT number+30000, number+30000, '' FROM numbers(10000); + +-- { echoOn } +SELECT count(), _part FROM 02581_trips GROUP BY _part ORDER BY _part; + +-- Run mutation with a 'IN big subquery' +ALTER TABLE 02581_trips UPDATE description='1' WHERE id IN (SELECT (number*10+1)::UInt32 FROM numbers(10000000)) SETTINGS mutations_sync=2; +SELECT count(), _part FROM 02581_trips WHERE description = '' GROUP BY _part ORDER BY _part; +ALTER TABLE 02581_trips UPDATE description='2' WHERE id IN (SELECT (number*10+2)::UInt32 FROM numbers(10000)) SETTINGS mutations_sync=2; +SELECT count(), _part FROM 02581_trips WHERE description = '' GROUP BY _part ORDER BY _part; + +-- Run mutation with `id 'IN big subquery' +ALTER TABLE 02581_trips UPDATE description='a' WHERE id IN (SELECT (number*10)::UInt32 FROM numbers(10000000)) SETTINGS mutations_sync=2; +SELECT count() from 02581_trips WHERE description = ''; + +ALTER TABLE 02581_trips UPDATE description='a' WHERE id IN (SELECT (number*10 + 1)::UInt32 FROM numbers(10000000)) SETTINGS mutations_sync=2, max_rows_in_set=1000; +SELECT count() from 02581_trips WHERE description = ''; + +-- Run mutation with func(`id`) IN big subquery +ALTER TABLE 02581_trips UPDATE description='b' WHERE id::UInt64 IN (SELECT (number*10 + 2)::UInt32 FROM numbers(10000000)) SETTINGS mutations_sync=2; +SELECT count() from 02581_trips WHERE description = ''; + +-- Run mutation with non-PK `id2` IN big subquery +ALTER TABLE 02581_trips UPDATE description='c' WHERE id2 IN (SELECT (number*10 + 3)::UInt32 FROM numbers(10000000)) SETTINGS mutations_sync=2; +SELECT count() from 02581_trips WHERE description = ''; + +-- Run mutation with PK and non-PK IN big subquery +ALTER TABLE 02581_trips UPDATE description='c' +WHERE + (id IN (SELECT (number*10 + 4)::UInt32 FROM numbers(10000000))) OR + (id2 IN (SELECT (number*10 + 4)::UInt32 FROM numbers(10000000))) +SETTINGS mutations_sync=2; +SELECT count() from 02581_trips WHERE description = ''; + +-- Run mutation with PK and non-PK IN big subquery +ALTER TABLE 02581_trips UPDATE description='c' +WHERE + (id::UInt64 IN (SELECT (number*10 + 5)::UInt32 FROM numbers(10000000))) OR + (id2::UInt64 IN (SELECT (number*10 + 5)::UInt32 FROM numbers(10000000))) +SETTINGS mutations_sync=2; +SELECT count() from 02581_trips WHERE description = ''; + +-- Run mutation with PK and non-PK IN big subquery +ALTER TABLE 02581_trips UPDATE description='c' +WHERE + (id::UInt32 IN (SELECT (number*10 + 6)::UInt32 FROM numbers(10000000))) OR + ((id2+1)::String IN (SELECT (number*10 + 6)::UInt32 FROM numbers(10000000))) +SETTINGS mutations_sync=2; +SELECT count() from 02581_trips WHERE description = ''; +-- { echoOff } + +DROP TABLE 02581_trips; diff --git a/parser/testdata/02581_share_big_sets_between_mutation_tasks_long/ast.json b/parser/testdata/02581_share_big_sets_between_mutation_tasks_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02581_share_big_sets_between_mutation_tasks_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02581_share_big_sets_between_mutation_tasks_long/metadata.json b/parser/testdata/02581_share_big_sets_between_mutation_tasks_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02581_share_big_sets_between_mutation_tasks_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02581_share_big_sets_between_mutation_tasks_long/query.sql b/parser/testdata/02581_share_big_sets_between_mutation_tasks_long/query.sql new file mode 100644 index 000000000..2d94714c7 --- /dev/null +++ b/parser/testdata/02581_share_big_sets_between_mutation_tasks_long/query.sql @@ -0,0 +1,81 @@ +-- Tags: long, no-debug, no-tsan, no-asan, no-ubsan, no-msan, no-parallel + +-- no-parallel because the sets use a lot of memory, which may interfere with other tests + +DROP TABLE IF EXISTS 02581_trips; + +CREATE TABLE 02581_trips(id UInt32, description String, id2 UInt32, PRIMARY KEY id) ENGINE=MergeTree ORDER BY id; + +-- Make multiple parts +INSERT INTO 02581_trips SELECT number, '', number FROM numbers(10000); +INSERT INTO 02581_trips SELECT number+10000000, '', number FROM numbers(10000); +INSERT INTO 02581_trips SELECT number+20000000, '', number FROM numbers(10000); +INSERT INTO 02581_trips SELECT number+30000000, '', number FROM numbers(10000); + +SELECT count() from 02581_trips WHERE description = ''; + + +SELECT name FROM system.parts WHERE database=currentDatabase() AND table = '02581_trips' AND active ORDER BY name; + +-- Run mutation with `id` a 'IN big subquery' +ALTER TABLE 02581_trips UPDATE description='a' WHERE id IN (SELECT (number*10)::UInt32 FROM numbers(10000000)) SETTINGS mutations_sync=2; +SELECT count() from 02581_trips WHERE description = ''; + +ALTER TABLE 02581_trips UPDATE description='a' WHERE id IN (SELECT (number*10 + 1)::UInt32 FROM numbers(10000000)) SETTINGS mutations_sync=2, max_rows_in_set=1000; +SELECT count() from 02581_trips WHERE description = ''; + +-- Run mutation with func(`id`) IN big subquery +ALTER TABLE 02581_trips UPDATE description='b' WHERE id::UInt64 IN (SELECT (number*10 + 2)::UInt32 FROM numbers(10000000)) SETTINGS mutations_sync=2; +SELECT count() from 02581_trips WHERE description = ''; + +-- Run mutation with non-PK `id2` IN big subquery +--SELECT count(), _part FROM 02581_trips WHERE id2 IN (SELECT (number*10 + 3)::UInt32 FROM numbers(10000000)) GROUP BY _part ORDER BY _part; +--EXPLAIN SELECT (), _part FROM 02581_trips WHERE id2 IN (SELECT (number*10 + 3)::UInt32 FROM numbers(10000000)); +ALTER TABLE 02581_trips UPDATE description='c' WHERE id2 IN (SELECT (number*10 + 3)::UInt32 FROM numbers(10000000)) SETTINGS mutations_sync=2; +SELECT count() from 02581_trips WHERE description = ''; + +-- Run mutation with PK and non-PK IN big subquery +ALTER TABLE 02581_trips UPDATE description='c' +WHERE + (id IN (SELECT (number*10 + 4)::UInt32 FROM numbers(10000000))) OR + (id2 IN (SELECT (number*10 + 4)::UInt32 FROM numbers(10000000))) +SETTINGS mutations_sync=2; +SELECT count() from 02581_trips WHERE description = ''; + +-- Run mutation with PK and non-PK IN big subquery +ALTER TABLE 02581_trips UPDATE description='c' +WHERE + (id::UInt64 IN (SELECT (number*10 + 5)::UInt32 FROM numbers(10000000))) OR + (id2::UInt64 IN (SELECT (number*10 + 5)::UInt32 FROM numbers(10000000))) +SETTINGS mutations_sync=2; +SELECT count() from 02581_trips WHERE description = ''; + +-- Run mutation with PK and non-PK IN big subquery +ALTER TABLE 02581_trips UPDATE description='c' +WHERE + (id::UInt32 IN (SELECT (number*10 + 6)::UInt32 FROM numbers(10000000))) OR + ((id2+1)::String IN (SELECT (number*10 + 6)::UInt32 FROM numbers(10000000))) +SETTINGS mutations_sync=2; +SELECT count() from 02581_trips WHERE description = ''; + +SET max_rows_to_read = 0; -- system.text_log can be really big +SYSTEM FLUSH LOGS text_log; +-- Check that in every mutation there were parts that built sets (log messages like 'Created Set with 10000000 entries from 10000000 rows in 0.388989187 sec.' ) +-- and parts that shared sets (log messages like 'Got set from cache in 0.388930505 sec.' ) +WITH ( + SELECT uuid + FROM system.tables + WHERE (database = currentDatabase()) AND (name = '02581_trips') + ) AS table_uuid +SELECT + CAST(splitByChar('_', query_id)[5], 'UInt64') AS mutation_version, -- '5521485f-8a40-4aba-87a2-00342c369563::all_3_3_0_6' + sum(message LIKE 'Created Set with % entries%') >= 1 AS has_parts_for_which_set_was_built, + sum(message LIKE 'Got set from cache%') >= 1 AS has_parts_that_shared_set +FROM system.text_log +WHERE + query_id LIKE concat(CAST(table_uuid, 'String'), '::all\\_%') + AND (event_date >= yesterday()) + AND (message LIKE 'Created Set with % entries%' OR message LIKE 'Got set from cache%') +GROUP BY mutation_version ORDER BY mutation_version FORMAT TSVWithNames; + +DROP TABLE 02581_trips; diff --git a/parser/testdata/02581_share_big_sets_between_mutation_tasks_with_storage_set/ast.json b/parser/testdata/02581_share_big_sets_between_mutation_tasks_with_storage_set/ast.json new file mode 100644 index 000000000..5120ee5fd --- /dev/null +++ b/parser/testdata/02581_share_big_sets_between_mutation_tasks_with_storage_set/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery 02581_trips (children 1)" + }, + { + "explain": " Identifier 02581_trips" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00125558, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/02581_share_big_sets_between_mutation_tasks_with_storage_set/metadata.json b/parser/testdata/02581_share_big_sets_between_mutation_tasks_with_storage_set/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02581_share_big_sets_between_mutation_tasks_with_storage_set/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02581_share_big_sets_between_mutation_tasks_with_storage_set/query.sql b/parser/testdata/02581_share_big_sets_between_mutation_tasks_with_storage_set/query.sql new file mode 100644 index 000000000..9a14f7862 --- /dev/null +++ b/parser/testdata/02581_share_big_sets_between_mutation_tasks_with_storage_set/query.sql @@ -0,0 +1,32 @@ +DROP TABLE IF EXISTS 02581_trips; + +CREATE TABLE 02581_trips(id UInt32, description String, id2 UInt32, PRIMARY KEY id) ENGINE=MergeTree ORDER BY id; + +-- Make multiple parts +INSERT INTO 02581_trips SELECT number, '', number FROM numbers(10000); +INSERT INTO 02581_trips SELECT number+10000000, '', number FROM numbers(10000); +INSERT INTO 02581_trips SELECT number+20000000, '', number FROM numbers(10000); +INSERT INTO 02581_trips SELECT number+30000000, '', number FROM numbers(10000); + +SELECT count() from 02581_trips WHERE description = ''; + + +SELECT name FROM system.parts WHERE database=currentDatabase() AND table = '02581_trips' AND active ORDER BY name; + +CREATE TABLE 02581_set (id UInt32) ENGINE = Set; + +INSERT INTO 02581_set SELECT number*10+7 FROM numbers(10000000); + +-- Run mutation with PK `id` IN big set +ALTER TABLE 02581_trips UPDATE description='d' WHERE id IN 02581_set SETTINGS mutations_sync=2; +SELECT count() from 02581_trips WHERE description = ''; + +INSERT INTO 02581_set SELECT number*10+8 FROM numbers(10000000); + +-- Run mutation with PK `id` IN big set after it is updated +ALTER TABLE 02581_trips UPDATE description='d' WHERE id IN 02581_set SETTINGS mutations_sync=2; +SELECT count() from 02581_trips WHERE description = ''; + + +DROP TABLE 02581_set; +DROP TABLE 02581_trips; diff --git a/parser/testdata/02581_width_bucket/ast.json b/parser/testdata/02581_width_bucket/ast.json new file mode 100644 index 000000000..74b0675ca --- /dev/null +++ b/parser/testdata/02581_width_bucket/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery mytable (children 1)" + }, + { + "explain": " Identifier mytable" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00110742, + "rows_read": 2, + "bytes_read": 67 + } +} diff --git a/parser/testdata/02581_width_bucket/metadata.json b/parser/testdata/02581_width_bucket/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02581_width_bucket/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02581_width_bucket/query.sql b/parser/testdata/02581_width_bucket/query.sql new file mode 100644 index 000000000..b37562324 --- /dev/null +++ b/parser/testdata/02581_width_bucket/query.sql @@ -0,0 +1,62 @@ +CREATE TABLE mytable +( + operand Float64, + low Float64, + high Float64, + count UInt64, + PRIMARY KEY (operand, low, high, count) +) ENGINE = MergeTree(); + +INSERT INTO mytable VALUES (3, -100, 200, 10), (0, 0, 10, 4), (3, 0, 10, 3), (4.333, 1, 11, 3), (4.34, 1, 11, 3), (-7.6, -10, 0, 4), (-6, -5, -1, 2), (1, 3, 0, 1), (3, 2, 5, 0); + +SELECT operand, low, high, count, WIDTH_BUCKET(operand, low, high, count) FROM mytable WHERE count != 0; +SELECT '----------'; +-- zero is not valid for count +SELECT operand, low, high, count, WIDTH_BUCKET(operand, low, high, count) FROM mytable WHERE count = 0; -- { serverError BAD_ARGUMENTS } +-- operand, low and high cannot be NaN +SELECT WIDTH_BUCKET(0, 10, NaN, 10); -- { serverError BAD_ARGUMENTS } +SELECT WIDTH_BUCKET(NaN, 0, 10, 10); -- { serverError BAD_ARGUMENTS } +SELECT WIDTH_BUCKET(0, NaN, 10, 10); -- { serverError BAD_ARGUMENTS } +-- low and high cannot be Inf +SELECT WIDTH_BUCKET(1, -Inf, 10, 10); -- { serverError BAD_ARGUMENTS } +-- low and high cannot be Inf +SELECT WIDTH_BUCKET(1, 0, Inf, 10); -- { serverError BAD_ARGUMENTS } +-- operand can be Inf +SELECT WIDTH_BUCKET(-Inf, 0, 10, 10); +SELECT WIDTH_BUCKET(Inf, 0, 10, 10); +SELECT '----------'; +-- IntXX types +SELECT toInt64(operand) AS operand, toInt32(low) AS low, toInt16(high) AS high, count, WIDTH_BUCKET(operand, low, high, count) FROM mytable WHERE count != 0; +SELECT '----------'; +-- UIntXX types +SELECT toUInt8(toInt8(operand)) AS operand, toUInt16(toInt16(low)) AS low, toUInt32(toInt32(high)) AS high, count, WIDTH_BUCKET(operand, low, high, count) FROM mytable WHERE count != 0; +SELECT '----------'; +SELECT WIDTH_BUCKET(1, 2, 3, -1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT WIDTH_BUCKET(1, 2, 3, 1.3); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT WIDTH_BUCKET('a', 1, 2, 3); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT WIDTH_BUCKET(1, toUInt128(42), 2, 3); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT WIDTH_BUCKET(1, 2, toInt128(42), 3); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT WIDTH_BUCKET(1, 2, 3, toInt256(42)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT '----------'; +-- Return type checks +SELECT toTypeName(WIDTH_BUCKET(1, 2, 3, toUInt8(1))); +SELECT toTypeName(WIDTH_BUCKET(1, 2, 3, toUInt16(1))); +SELECT toTypeName(WIDTH_BUCKET(1, 2, 3, toUInt32(1))); +SELECT toTypeName(WIDTH_BUCKET(1, 2, 3, toUInt64(1))); +SELECT '----------'; +-- Test handling ColumnConst +SELECT WIDTH_BUCKET(1, low, high, count) FROM mytable WHERE count != 0; +SELECT WIDTH_BUCKET(operand, 2, high, count) FROM mytable WHERE count != 0; +SELECT WIDTH_BUCKET(3, 3, high, count) FROM mytable WHERE count != 0; +SELECT WIDTH_BUCKET(operand, low, 4, count) FROM mytable WHERE count != 0; +SELECT WIDTH_BUCKET(5, low, 5, count) FROM mytable WHERE count != 0; +SELECT WIDTH_BUCKET(operand, 6, 6, count) FROM mytable WHERE count != 0; +SELECT WIDTH_BUCKET(7, 7, 7, count) FROM mytable WHERE count != 0; +SELECT WIDTH_BUCKET(operand, low, high, 8) FROM mytable WHERE count != 0; +SELECT WIDTH_BUCKET(9, low, high, 9) FROM mytable WHERE count != 0; +SELECT WIDTH_BUCKET(operand, 10, high, 10) FROM mytable WHERE count != 0; +SELECT WIDTH_BUCKET(11, 11, high, 11) FROM mytable WHERE count != 0; +SELECT WIDTH_BUCKET(operand, low, 12, 12) FROM mytable WHERE count != 0; +SELECT WIDTH_BUCKET(13, low, 13, 13) FROM mytable WHERE count != 0; +SELECT WIDTH_BUCKET(operand, 14, 14, 14) FROM mytable WHERE count != 0; +SELECT WIDTH_BUCKET(15, 15, 15, 15) FROM mytable WHERE count != 0; \ No newline at end of file diff --git a/parser/testdata/02582_analyzer_join_subquery_empty_column_list/ast.json b/parser/testdata/02582_analyzer_join_subquery_empty_column_list/ast.json new file mode 100644 index 000000000..52ea22dbe --- /dev/null +++ b/parser/testdata/02582_analyzer_join_subquery_empty_column_list/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001324526, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02582_analyzer_join_subquery_empty_column_list/metadata.json b/parser/testdata/02582_analyzer_join_subquery_empty_column_list/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02582_analyzer_join_subquery_empty_column_list/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02582_analyzer_join_subquery_empty_column_list/query.sql b/parser/testdata/02582_analyzer_join_subquery_empty_column_list/query.sql new file mode 100644 index 000000000..33c9296a0 --- /dev/null +++ b/parser/testdata/02582_analyzer_join_subquery_empty_column_list/query.sql @@ -0,0 +1,12 @@ +SET enable_analyzer = 1; +-- { echoOn } + +SELECT a FROM ( select 1 AS a ) AS t1, ( select 2 AS b, 3 AS c) AS t2; +SELECT a FROM ( select 1 AS a UNION ALL select 1 as a ) AS t1, ( select 2 AS b, 3 AS c) AS t2; +SELECT a FROM ( select 1 AS a ) AS t1, ( select 2 AS b, 3 AS c UNION ALL select 2 as b, 3 as c) AS t2; +SELECT a FROM ( select 1 AS a UNION ALL select 1 as a ) AS t1, ( select 2 AS b, 3 AS c UNION ALL select 2 as b, 3 as c) AS t2; +SELECT a FROM ( select * from ( select 1 AS a UNION ALL select 1 as a) ) AS t1, ( select * from ( select 2 AS b, 3 AS c UNION ALL select 2 as b, 3 as c )) AS t2; +SELECT b FROM ( select 1 AS a UNION ALL select 1 as a ) AS t1, ( select 2 AS b, 3 AS c UNION ALL select 2 as b, 3 as c) AS t2; +SELECT c FROM ( select 1 AS a UNION ALL select 1 as a ) AS t1, ( select 2 AS b, 3 AS c UNION ALL select 2 as b, 3 as c) AS t2; +SELECT 42 FROM ( select 1 AS a UNION ALL select 1 as a ) AS t1, ( select 2 AS b, 3 AS c UNION ALL select 2 as b, 3 as c) AS t2; +SELECT count() FROM ( select 1 AS a UNION ALL select 1 as a ) AS t1, ( select 2 AS b, 3 AS c UNION ALL select 2 as b, 3 as c) AS t2; diff --git a/parser/testdata/02582_async_reading_with_small_limit/ast.json b/parser/testdata/02582_async_reading_with_small_limit/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02582_async_reading_with_small_limit/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02582_async_reading_with_small_limit/metadata.json b/parser/testdata/02582_async_reading_with_small_limit/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02582_async_reading_with_small_limit/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02582_async_reading_with_small_limit/query.sql b/parser/testdata/02582_async_reading_with_small_limit/query.sql new file mode 100644 index 000000000..406cab821 --- /dev/null +++ b/parser/testdata/02582_async_reading_with_small_limit/query.sql @@ -0,0 +1,23 @@ +-- Tags: no-object-storage + +SET merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.0; + +drop table if exists t; + +create table t(a UInt64) engine=MergeTree order by tuple(); + +system stop merges t; + +insert into t select * from numbers_mt(1e3); +insert into t select * from numbers_mt(1e3); +insert into t select * from numbers_mt(1e3); + +set allow_asynchronous_read_from_io_pool_for_merge_tree = 1; +set max_streams_for_merge_tree_reading = 64; +set max_block_size = 65409; + +-- slightly different transforms will be generated by reading steps if we let settings randomisation to change this setting value -- +set read_in_order_two_level_merge_threshold = 1000; + +-- for pretty simple queries (no filter, aggregation and so on) with a limit smaller than the `max_block_size` we request reading using only a single stream for better performance -- +explain pipeline select * from t limit 100; diff --git a/parser/testdata/02583_map_literal_cast/ast.json b/parser/testdata/02583_map_literal_cast/ast.json new file mode 100644 index 000000000..2c956524d --- /dev/null +++ b/parser/testdata/02583_map_literal_cast/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Tuple_('a', UInt64_1)" + }, + { + "explain": " Literal Tuple_('b', UInt64_2)" + }, + { + "explain": " Literal 'Map(String, UInt8)'" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001305607, + "rows_read": 11, + "bytes_read": 442 + } +} diff --git a/parser/testdata/02583_map_literal_cast/metadata.json b/parser/testdata/02583_map_literal_cast/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02583_map_literal_cast/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02583_map_literal_cast/query.sql b/parser/testdata/02583_map_literal_cast/query.sql new file mode 100644 index 000000000..23bb88088 --- /dev/null +++ b/parser/testdata/02583_map_literal_cast/query.sql @@ -0,0 +1,9 @@ +SELECT CAST([('a', 1), ('b', 2)], 'Map(String, UInt8)'); +SELECT CAST([('abc', 22), ('def', 33)], 'Map(String, UInt8)'); +SELECT CAST([(10, [11, 12]), (13, [14, 15])], 'Map(UInt8, Array(UInt8))'); +SELECT CAST([('ghj', [('klm', [16, 17])]), ('nop', [('rst', [18])])], 'Map(String, Map(String, Array(UInt8)))'); + +SELECT CAST((('a', 1), ('b', 2)), 'Map(String, UInt8)'); -- { serverError TYPE_MISMATCH } +SELECT CAST((('abc', 22), ('def', 33)), 'Map(String, UInt8)'); -- { serverError TYPE_MISMATCH } +SELECT CAST(((10, [11, 12]), (13, [14, 15])), 'Map(UInt8, Array(UInt8))'); -- { serverError TYPE_MISMATCH } +SELECT CAST((('ghj', (('klm', [16, 17]))), ('nop', (('rst', [18])))), 'Map(String, Map(String, Array(UInt8)))'); -- { serverError TYPE_MISMATCH } diff --git a/parser/testdata/02584_range_ipv4/ast.json b/parser/testdata/02584_range_ipv4/ast.json new file mode 100644 index 000000000..a5d9a16c5 --- /dev/null +++ b/parser/testdata/02584_range_ipv4/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function range (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toIPv4 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '172.31.0.0'" + }, + { + "explain": " Function toIPv4 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '172.31.0.10'" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.0013367, + "rows_read": 12, + "bytes_read": 469 + } +} diff --git a/parser/testdata/02584_range_ipv4/metadata.json b/parser/testdata/02584_range_ipv4/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02584_range_ipv4/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02584_range_ipv4/query.sql b/parser/testdata/02584_range_ipv4/query.sql new file mode 100644 index 000000000..1241b7270 --- /dev/null +++ b/parser/testdata/02584_range_ipv4/query.sql @@ -0,0 +1,3 @@ +SELECT range(toIPv4('172.31.0.0'), toIPv4('172.31.0.10')); +SELECT range(2887712768, toIPv4('172.31.0.10')); +SELECT range(toIPv4('172.31.0.0'), 2887712778); diff --git a/parser/testdata/02586_generate_random_structure/ast.json b/parser/testdata/02586_generate_random_structure/ast.json new file mode 100644 index 000000000..d7980b8d4 --- /dev/null +++ b/parser/testdata/02586_generate_random_structure/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function generateRandomStructure (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_5" + }, + { + "explain": " Literal UInt64_42" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001255775, + "rows_read": 8, + "bytes_read": 307 + } +} diff --git a/parser/testdata/02586_generate_random_structure/metadata.json b/parser/testdata/02586_generate_random_structure/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02586_generate_random_structure/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02586_generate_random_structure/query.sql b/parser/testdata/02586_generate_random_structure/query.sql new file mode 100644 index 000000000..e2e8409b3 --- /dev/null +++ b/parser/testdata/02586_generate_random_structure/query.sql @@ -0,0 +1,20 @@ +select generateRandomStructure(5, 42); +select toTypeName(generateRandomStructure(5, 42)); +select toColumnTypeName(generateRandomStructure(5, 42)); +SELECT * FROM generateRandom(generateRandomStructure(5, 42), 42) LIMIT 1; + +select generateRandomStructure(5, 42, 42); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +select generateRandomStructure('5'); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +select generateRandomStructure(5, '42'); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +select generateRandomStructure(materialize(5), 42); -- {serverError ILLEGAL_COLUMN} +select generateRandomStructure(5, materialize(42)); -- {serverError ILLEGAL_COLUMN} + +desc generateRandom(10000000); +select * from generateRandom(10000000) limit 1; +select * from generateRandom(10000000, 2) limit 1; +select * from generateRandom(10000000, 2, 2) limit 1; +select * from generateRandom(10000000, 2, 2, 2) limit 1; -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} + +set allow_suspicious_low_cardinality_types=1; +select generateRandomStructure(5, 4); + diff --git a/parser/testdata/02587_csv_big_numbers_inference/ast.json b/parser/testdata/02587_csv_big_numbers_inference/ast.json new file mode 100644 index 000000000..ede964aa3 --- /dev/null +++ b/parser/testdata/02587_csv_big_numbers_inference/ast.json @@ -0,0 +1,40 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DescribeQuery (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function format (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'CSV'" + }, + { + "explain": " Literal '100000000000000000000'" + } + ], + + "rows": 6, + + "statistics": + { + "elapsed": 0.00122292, + "rows_read": 6, + "bytes_read": 215 + } +} diff --git a/parser/testdata/02587_csv_big_numbers_inference/metadata.json b/parser/testdata/02587_csv_big_numbers_inference/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02587_csv_big_numbers_inference/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02587_csv_big_numbers_inference/query.sql b/parser/testdata/02587_csv_big_numbers_inference/query.sql new file mode 100644 index 000000000..45a930345 --- /dev/null +++ b/parser/testdata/02587_csv_big_numbers_inference/query.sql @@ -0,0 +1,5 @@ +desc format('CSV', '100000000000000000000'); +select * from format('CSV', '100000000000000000000'); +desc format('CSV', '-100000000000000000000'); +select * from format('CSV', '-100000000000000000000'); + diff --git a/parser/testdata/02589_bson_invalid_document_size/ast.json b/parser/testdata/02589_bson_invalid_document_size/ast.json new file mode 100644 index 000000000..8fba635a5 --- /dev/null +++ b/parser/testdata/02589_bson_invalid_document_size/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001156144, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02589_bson_invalid_document_size/metadata.json b/parser/testdata/02589_bson_invalid_document_size/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02589_bson_invalid_document_size/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02589_bson_invalid_document_size/query.sql b/parser/testdata/02589_bson_invalid_document_size/query.sql new file mode 100644 index 000000000..b536b8d5c --- /dev/null +++ b/parser/testdata/02589_bson_invalid_document_size/query.sql @@ -0,0 +1,4 @@ +set input_format_parallel_parsing=1; +set max_threads=0; +select * from format(BSONEachRow, 'x UInt32', x'00000000'); -- {serverError INCORRECT_DATA} + diff --git a/parser/testdata/02590_bson_duplicate_column/ast.json b/parser/testdata/02590_bson_duplicate_column/ast.json new file mode 100644 index 000000000..a01e57c64 --- /dev/null +++ b/parser/testdata/02590_bson_duplicate_column/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function format (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Identifier BSONEachRow" + }, + { + "explain": " Literal 'x UInt32, y UInt32'" + }, + { + "explain": " Literal '\u001A\\0\\0\\0\u0010x\\0*\\0\\0\\0\u0010x\\0*\\0\\0\\0\u0010y\\0*\\0\\0\\0\\0'" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.00104124, + "rows_read": 13, + "bytes_read": 538 + } +} diff --git a/parser/testdata/02590_bson_duplicate_column/metadata.json b/parser/testdata/02590_bson_duplicate_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02590_bson_duplicate_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02590_bson_duplicate_column/query.sql b/parser/testdata/02590_bson_duplicate_column/query.sql new file mode 100644 index 000000000..ea70fb9ba --- /dev/null +++ b/parser/testdata/02590_bson_duplicate_column/query.sql @@ -0,0 +1 @@ +select * from format(BSONEachRow, 'x UInt32, y UInt32', x'1a0000001078002a0000001078002a0000001079002a00000000'); -- {serverError INCORRECT_DATA} diff --git a/parser/testdata/02591_bson_long_tuple/ast.json b/parser/testdata/02591_bson_long_tuple/ast.json new file mode 100644 index 000000000..56ad924b2 --- /dev/null +++ b/parser/testdata/02591_bson_long_tuple/ast.json @@ -0,0 +1,76 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function tuple (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 11)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Literal UInt64_4" + }, + { + "explain": " Literal UInt64_5" + }, + { + "explain": " Literal UInt64_6" + }, + { + "explain": " Literal UInt64_7" + }, + { + "explain": " Literal UInt64_8" + }, + { + "explain": " Literal UInt64_9" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Literal UInt64_11" + }, + { + "explain": " Identifier BSONEachRow" + } + ], + + "rows": 18, + + "statistics": + { + "elapsed": 0.001343064, + "rows_read": 18, + "bytes_read": 602 + } +} diff --git a/parser/testdata/02591_bson_long_tuple/metadata.json b/parser/testdata/02591_bson_long_tuple/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02591_bson_long_tuple/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02591_bson_long_tuple/query.sql b/parser/testdata/02591_bson_long_tuple/query.sql new file mode 100644 index 000000000..e24150c8e --- /dev/null +++ b/parser/testdata/02591_bson_long_tuple/query.sql @@ -0,0 +1,2 @@ +select tuple(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11) as x format BSONEachRow; + diff --git a/parser/testdata/02596_build_set_and_remote/ast.json b/parser/testdata/02596_build_set_and_remote/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02596_build_set_and_remote/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02596_build_set_and_remote/metadata.json b/parser/testdata/02596_build_set_and_remote/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02596_build_set_and_remote/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02596_build_set_and_remote/query.sql b/parser/testdata/02596_build_set_and_remote/query.sql new file mode 100644 index 000000000..4785446c1 --- /dev/null +++ b/parser/testdata/02596_build_set_and_remote/query.sql @@ -0,0 +1,20 @@ +-- {echoOn} +SELECT arrayExists(x -> (x IN (SELECT '2')), [2]) FROM system.one; + +SELECT arrayExists(x -> (x IN (SELECT '2')), [2]) FROM remote('127.0.0.{2,3}', system.one); +SELECT arrayExists(x -> (x IN (SELECT '2')), [2]) FROM remote('127.0.0.{2,3}'); + +SELECT arrayExists(x -> (x IN (SELECT '2')), [2]) FROM remote('127.0.0.{2,3}', system.one) GROUP BY NULL; +SELECT arrayExists(x -> (x IN (SELECT '2')), [2]) FROM remote('127.0.0.{2,3}') GROUP BY NULL; + +SELECT arrayExists(x -> (x IN (SELECT '2')), [2]) FROM remote('127.0.0.{2,3}', system.one) GROUP BY 1; +SELECT arrayExists(x -> (x IN (SELECT '2')), [2]) FROM remote('127.0.0.{2,3}') GROUP BY 1; + +SELECT arrayExists(x -> (x IN (SELECT '2')), [2]) FROM remote('127.0.0.{2,3}', system.one) GROUP BY 'A'; +SELECT arrayExists(x -> (x IN (SELECT '2')), [2]) FROM remote('127.0.0.{2,3}') GROUP BY 'A'; + +SELECT 1 IN ( SELECT 1 ) FROM remote('127.0.0.{1,2}', system.one) GROUP BY dummy; +SELECT 1 IN ( SELECT 1 ) FROM remote('127.0.0.{1,2}') GROUP BY dummy; + +SELECT 1000.0001, toUInt64(arrayJoin([NULL, 257, 65536, NULL])), arrayExists(x -> (x IN (SELECT '2.55')), [-9223372036854775808]) FROM remote('127.0.0.{1,2}', system.one) GROUP BY NULL, NULL, NULL, NULL; +SELECT 1000.0001, toUInt64(arrayJoin([NULL, 257, 65536, NULL])), arrayExists(x -> (x IN (SELECT '2.55')), [-9223372036854775808]) FROM remote('127.0.0.{1,2}') GROUP BY NULL, NULL, NULL, NULL; diff --git a/parser/testdata/02597_column_delete_and_replication/ast.json b/parser/testdata/02597_column_delete_and_replication/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02597_column_delete_and_replication/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02597_column_delete_and_replication/metadata.json b/parser/testdata/02597_column_delete_and_replication/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02597_column_delete_and_replication/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02597_column_delete_and_replication/query.sql b/parser/testdata/02597_column_delete_and_replication/query.sql new file mode 100644 index 000000000..bede52a99 --- /dev/null +++ b/parser/testdata/02597_column_delete_and_replication/query.sql @@ -0,0 +1,35 @@ +-- Tags: no-shared-merge-tree +-- Tag no-shared-merge-tree - in SMT this works differently + +-- Test for MergeTreeData::checkDropCommandDoesntAffectInProgressMutations() basically + +CREATE TABLE test ( + `c_id` String, + `p_id` String, + `d` String +) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test/test_table', '1') +ORDER BY (c_id, p_id); + +set mutations_sync=0; + +INSERT INTO test SELECT '1', '11', '111' FROM numbers(3); +INSERT INTO test SELECT '2', '22', '22' FROM numbers(3); + +-- this mutation will run in background and will block next mutation +ALTER TABLE test UPDATE d = d || throwIf(1) where 1; + +-- this mutation cannot be started until previuos ALTER finishes (in background), and will lead to DROP COLUMN failed with BAD_ARGUMENTS +ALTER TABLE test ADD COLUMN x UInt32 default 0; +ALTER TABLE test UPDATE d = d || '1' where x = 42; +ALTER TABLE test DROP COLUMN x SETTINGS mutations_sync = 2; --{serverError BAD_ARGUMENTS} + +-- unblock +KILL MUTATION WHERE database = currentDatabase() AND command LIKE '%throwIf%' SYNC FORMAT Null; +ALTER TABLE test UPDATE x = x + 1 where 1 SETTINGS mutations_sync = 2; + +ALTER TABLE test DROP COLUMN x SETTINGS mutations_sync = 2; + +select * from test format Null; + +DROP TABLE test; diff --git a/parser/testdata/02597_column_update_and_replication/ast.json b/parser/testdata/02597_column_update_and_replication/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02597_column_update_and_replication/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02597_column_update_and_replication/metadata.json b/parser/testdata/02597_column_update_and_replication/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02597_column_update_and_replication/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02597_column_update_and_replication/query.sql b/parser/testdata/02597_column_update_and_replication/query.sql new file mode 100644 index 000000000..cb58d3e9f --- /dev/null +++ b/parser/testdata/02597_column_update_and_replication/query.sql @@ -0,0 +1,33 @@ +-- Tags: no-shared-merge-tree +-- Tag no-shared-merge-tree - in SMT this works differently + +-- Test for MergeTreeData::checkDropCommandDoesntAffectInProgressMutations() basically + +CREATE TABLE test ( + `c_id` String, + `p_id` String, + `d` String +) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test/test_table', '1') +ORDER BY (c_id, p_id); + +INSERT INTO test SELECT '1', '11', '111' FROM numbers(3); + +INSERT INTO test SELECT '2', '22', '22' FROM numbers(3); + +set mutations_sync=0; + +ALTER TABLE test UPDATE d = d || throwIf(1) where 1; + +ALTER TABLE test ADD COLUMN x UInt32 default 0; +ALTER TABLE test UPDATE x = x + 1 where 1; +ALTER TABLE test DROP COLUMN x SETTINGS mutations_sync = 2; --{serverError BAD_ARGUMENTS} + +KILL MUTATION WHERE database = currentDatabase() AND command LIKE '%throwIf%' SYNC FORMAT Null; +ALTER TABLE test UPDATE x = x + 1 where 1 SETTINGS mutations_sync = 2; + +ALTER TABLE test DROP COLUMN x SETTINGS mutations_sync = 2; + +select * from test format Null; + +DROP TABLE test; diff --git a/parser/testdata/02597_column_update_tricky_expression_and_replication/ast.json b/parser/testdata/02597_column_update_tricky_expression_and_replication/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02597_column_update_tricky_expression_and_replication/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02597_column_update_tricky_expression_and_replication/metadata.json b/parser/testdata/02597_column_update_tricky_expression_and_replication/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02597_column_update_tricky_expression_and_replication/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02597_column_update_tricky_expression_and_replication/query.sql b/parser/testdata/02597_column_update_tricky_expression_and_replication/query.sql new file mode 100644 index 000000000..d86a4464c --- /dev/null +++ b/parser/testdata/02597_column_update_tricky_expression_and_replication/query.sql @@ -0,0 +1,27 @@ +-- Tags: no-shared-merge-tree +-- Tag no-shared-merge-tree - in SMT this works differently + +-- Test for MergeTreeData::checkDropCommandDoesntAffectInProgressMutations() basically + +DROP TABLE IF EXISTS test SYNC; +CREATE TABLE test +( + c_id String, + p_id String, + d UInt32, +) +Engine = ReplicatedMergeTree('/clickhouse/tables/{database}/test/test_table', '1') +ORDER BY (c_id, p_id); + +INSERT INTO test SELECT '1', '11', '111' FROM numbers(5); +ALTER TABLE test UPDATE d = d + throwIf(1) where 1 SETTINGS mutations_sync=0; +ALTER TABLE test ADD COLUMN x UInt32 default 0 SETTINGS mutations_sync=0; +ALTER TABLE test UPDATE d = x + 1 where 1 SETTINGS mutations_sync=0; + +ALTER TABLE test DROP COLUMN x SETTINGS mutations_sync=2; -- { serverError BAD_ARGUMENTS } +KILL MUTATION WHERE database = currentDatabase() AND command LIKE '%throwIf%' SYNC FORMAT Null; + +ALTER TABLE test UPDATE x = x + 1 where 1 SETTINGS mutations_sync=2; +ALTER TABLE test DROP COLUMN x SETTINGS mutations_sync=2; +SELECT * from test format Null; +DROP TABLE test; diff --git a/parser/testdata/02597_projection_materialize_and_replication/ast.json b/parser/testdata/02597_projection_materialize_and_replication/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02597_projection_materialize_and_replication/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02597_projection_materialize_and_replication/metadata.json b/parser/testdata/02597_projection_materialize_and_replication/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02597_projection_materialize_and_replication/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02597_projection_materialize_and_replication/query.sql b/parser/testdata/02597_projection_materialize_and_replication/query.sql new file mode 100644 index 000000000..0f3e6f7c9 --- /dev/null +++ b/parser/testdata/02597_projection_materialize_and_replication/query.sql @@ -0,0 +1,28 @@ +CREATE TABLE test ( + `c_id` String, + `p_id` String, + `d` String +) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test/test_table', '1') +ORDER BY (c_id, p_id); + +INSERT INTO test SELECT '1', '11', '111' FROM numbers(30); + +INSERT INTO test SELECT '2', '22', '22' FROM numbers(30); + +set mutations_sync=0; + +ALTER TABLE test UPDATE d = d || toString(sleepEachRow(0.1)) where 1; + +ALTER TABLE test ADD PROJECTION d_order ( SELECT min(c_id) GROUP BY `d`); +ALTER TABLE test MATERIALIZE PROJECTION d_order; +ALTER TABLE test DROP PROJECTION d_order SETTINGS mutations_sync = 2; --{serverError BAD_ARGUMENTS} + +-- just to wait prev mutation +ALTER TABLE test DELETE where d = 'Hello' SETTINGS mutations_sync = 2; + +ALTER TABLE test DROP PROJECTION d_order SETTINGS mutations_sync = 2; + +select * from test format Null; + +DROP TABLE test; diff --git a/parser/testdata/02661_quantile_approx/ast.json b/parser/testdata/02661_quantile_approx/ast.json new file mode 100644 index 000000000..0ad4caadd --- /dev/null +++ b/parser/testdata/02661_quantile_approx/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001350074, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02661_quantile_approx/metadata.json b/parser/testdata/02661_quantile_approx/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02661_quantile_approx/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02661_quantile_approx/query.sql b/parser/testdata/02661_quantile_approx/query.sql new file mode 100644 index 000000000..732ce645c --- /dev/null +++ b/parser/testdata/02661_quantile_approx/query.sql @@ -0,0 +1,54 @@ +set enable_analyzer = 1; + +-- { echoOn } +with arrayJoin([0, 1, 2, 10]) as x select quantilesGK(100, 0.5, 0.4, 0.1)(x); +with arrayJoin([0, 6, 7, 9, 10]) as x select quantileGK(100, 0.5)(x); + +select quantilesGK(10000, 0.25, 0.5, 0.75, 0.0, 1.0, 0, 1)(number + 1) from numbers(1000); +select quantilesGK(10000, 0.01, 0.1, 0.11)(number + 1) from numbers(10); + +with number + 1 as col select quantilesGK(10000, 0.25, 0.5, 0.75)(col), count(col), quantilesGK(10000, 0.0, 1.0)(col), sum(col) from numbers(1000); + +select quantilesGK(1, 100/1000, 200/1000, 250/1000, 314/1000, 777/1000)(number + 1) from numbers(1000); +select quantilesGK(10, 100/1000, 200/1000, 250/1000, 314/1000, 777/1000)(number + 1) from numbers(1000); +select quantilesGK(100, 100/1000, 200/1000, 250/1000, 314/1000, 777/1000)(number + 1) from numbers(1000); +select quantilesGK(1000, 100/1000, 200/1000, 250/1000, 314/1000, 777/1000)(number + 1) from numbers(1000); +select quantilesGK(10000, 100/1000, 200/1000, 250/1000, 314/1000, 777/1000)(number + 1) from numbers(1000); + +SELECT quantileGKMerge(100, 0.5)(x) +FROM +( + SELECT quantileGKState(100, 0.5)(number + 1) AS x + FROM numbers(49999) +); + +SELECT quantilesGKMerge(100, 0.5, 0.9, 0.99)(x) +FROM +( + SELECT quantilesGKState(100, 0.5, 0.9, 0.99)(number + 1) AS x + FROM numbers(49999) +); + +select medianGK()(number) from numbers(10) SETTINGS enable_analyzer = 0; -- { serverError BAD_ARGUMENTS } +select medianGK()(number) from numbers(10) SETTINGS enable_analyzer = 1; -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +select quantileGK()(number) from numbers(10) SETTINGS enable_analyzer = 0; -- { serverError BAD_ARGUMENTS } +select quantileGK()(number) from numbers(10) SETTINGS enable_analyzer = 1; -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +select medianGK(100)(number) from numbers(10); +select quantileGK(100)(number) from numbers(10); +select quantileGK(100, 0.5)(number) from numbers(10); +select quantileGK(100, 0.5, 0.75)(number) from numbers(10); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +select quantileGK('abc', 0.5)(number) from numbers(10); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select quantileGK(1.23, 0.5)(number) from numbers(10); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select quantileGK(-100, 0.5)(number) from numbers(10); -- { serverError BAD_ARGUMENTS } + +select quantilesGK()(number) from numbers(10) SETTINGS enable_analyzer = 0; -- { serverError BAD_ARGUMENTS } +select quantilesGK()(number) from numbers(10) SETTINGS enable_analyzer = 1; -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +select quantilesGK(100)(number) from numbers(10); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +select quantilesGK(100, 0.5)(number) from numbers(10); +select quantilesGK('abc', 0.5, 0.75)(number) from numbers(10); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select quantilesGK(1.23, 0.5, 0.75)(number) from numbers(10); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select quantilesGK(-100, 0.5, 0.75)(number) from numbers(10); -- { serverError BAD_ARGUMENTS } +-- { echoOff } diff --git a/parser/testdata/02662_first_last_value/ast.json b/parser/testdata/02662_first_last_value/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02662_first_last_value/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02662_first_last_value/metadata.json b/parser/testdata/02662_first_last_value/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02662_first_last_value/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02662_first_last_value/query.sql b/parser/testdata/02662_first_last_value/query.sql new file mode 100644 index 000000000..16768bd6f --- /dev/null +++ b/parser/testdata/02662_first_last_value/query.sql @@ -0,0 +1,28 @@ +-- { echo } + +-- create table +drop table if exists test; +create table test(`a` Nullable(Int32), `b` Nullable(Int32)) ENGINE = Memory; +insert into test (a,b) values (1,null), (2,3), (4, 5), (6,null); + +-- first value +select first_value(b) from test; +select first_value(b) ignore nulls from test; +select first_value(b) respect nulls from test; + +-- last value +select last_value(b) from test; +select last_value(b) ignore nulls from test; +select last_value(b) respect nulls from test; + +SET enable_analyzer = 1; + +-- first value +select first_value(b) from test; +select first_value(b) ignore nulls from test; +select first_value(b) respect nulls from test; + +-- last value +select last_value(b) from test; +select last_value(b) ignore nulls from test; +select last_value(b) respect nulls from test; diff --git a/parser/testdata/02662_sparse_columns_mutations_1/ast.json b/parser/testdata/02662_sparse_columns_mutations_1/ast.json new file mode 100644 index 000000000..60044b6b5 --- /dev/null +++ b/parser/testdata/02662_sparse_columns_mutations_1/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001592513, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02662_sparse_columns_mutations_1/metadata.json b/parser/testdata/02662_sparse_columns_mutations_1/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02662_sparse_columns_mutations_1/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02662_sparse_columns_mutations_1/query.sql b/parser/testdata/02662_sparse_columns_mutations_1/query.sql new file mode 100644 index 000000000..3bf37e8e6 --- /dev/null +++ b/parser/testdata/02662_sparse_columns_mutations_1/query.sql @@ -0,0 +1,49 @@ +SET mutations_sync = 2; + +DROP TABLE IF EXISTS t_sparse_mutations_1; + +CREATE TABLE t_sparse_mutations_1 (key UInt8, id UInt64, s String) +ENGINE = MergeTree ORDER BY id PARTITION BY key +SETTINGS ratio_of_defaults_for_sparse_serialization = 0.9; + +INSERT INTO t_sparse_mutations_1 SELECT 1, number, if (number % 21 = 0, 'foo', '') FROM numbers (10000); + +SELECT name, type, serialization_kind FROM system.parts_columns +WHERE database = currentDatabase() AND table = 't_sparse_mutations_1' AND column = 's' AND active +ORDER BY name; + +SELECT countIf(s = 'foo'), arraySort(groupUniqArray(s)) FROM t_sparse_mutations_1; + +ALTER TABLE t_sparse_mutations_1 MODIFY COLUMN s Nullable(String); + +SELECT name, type, serialization_kind FROM system.parts_columns +WHERE database = currentDatabase() AND table = 't_sparse_mutations_1' AND column = 's' AND active +ORDER BY name; + +SELECT countIf(s = 'foo'), arraySort(groupUniqArray(s)) FROM t_sparse_mutations_1; + +INSERT INTO t_sparse_mutations_1 SELECT 2, number, if (number % 21 = 0, 'foo', '') FROM numbers (10000); + +SELECT name, type, serialization_kind FROM system.parts_columns +WHERE database = currentDatabase() AND table = 't_sparse_mutations_1' AND column = 's' AND active +ORDER BY name; + +SELECT countIf(s = 'foo'), arraySort(groupUniqArray(s)) FROM t_sparse_mutations_1; + +ALTER TABLE t_sparse_mutations_1 MODIFY COLUMN s String; + +SELECT name, type, serialization_kind FROM system.parts_columns +WHERE database = currentDatabase() AND table = 't_sparse_mutations_1' AND column = 's' AND active +ORDER BY name; + +SELECT countIf(s = 'foo'), arraySort(groupUniqArray(s)) FROM t_sparse_mutations_1; + +OPTIMIZE TABLE t_sparse_mutations_1 FINAL; + +SELECT name, type, serialization_kind FROM system.parts_columns +WHERE database = currentDatabase() AND table = 't_sparse_mutations_1' AND column = 's' AND active +ORDER BY name; + +SELECT countIf(s = 'foo'), arraySort(groupUniqArray(s)) FROM t_sparse_mutations_1; + +DROP TABLE t_sparse_mutations_1; diff --git a/parser/testdata/02662_sparse_columns_mutations_2/ast.json b/parser/testdata/02662_sparse_columns_mutations_2/ast.json new file mode 100644 index 000000000..4a91c5d5b --- /dev/null +++ b/parser/testdata/02662_sparse_columns_mutations_2/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001358825, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02662_sparse_columns_mutations_2/metadata.json b/parser/testdata/02662_sparse_columns_mutations_2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02662_sparse_columns_mutations_2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02662_sparse_columns_mutations_2/query.sql b/parser/testdata/02662_sparse_columns_mutations_2/query.sql new file mode 100644 index 000000000..561bd1642 --- /dev/null +++ b/parser/testdata/02662_sparse_columns_mutations_2/query.sql @@ -0,0 +1,33 @@ +SET mutations_sync = 2; + +DROP TABLE IF EXISTS t_sparse_mutations_2; + +CREATE TABLE t_sparse_mutations_2 (key UInt8, id UInt64, s String) +ENGINE = MergeTree ORDER BY id PARTITION BY key +SETTINGS ratio_of_defaults_for_sparse_serialization = 0.9; + +INSERT INTO t_sparse_mutations_2 SELECT 1, number, toString(number) FROM numbers (10000); + +SELECT type, serialization_kind FROM system.parts_columns +WHERE database = currentDatabase() AND table = 't_sparse_mutations_2' AND column = 's' AND active +ORDER BY name; + +SELECT count(), sum(s::UInt64) FROM t_sparse_mutations_2 WHERE s != ''; + +ALTER TABLE t_sparse_mutations_2 UPDATE s = '' WHERE id % 13 != 0; + +SELECT type, serialization_kind FROM system.parts_columns +WHERE database = currentDatabase() AND table = 't_sparse_mutations_2' AND column = 's' AND active +ORDER BY name; + +SELECT count(), sum(s::UInt64) FROM t_sparse_mutations_2 WHERE s != ''; + +OPTIMIZE TABLE t_sparse_mutations_2 FINAL; + +SELECT type, serialization_kind FROM system.parts_columns +WHERE database = currentDatabase() AND table = 't_sparse_mutations_2' AND column = 's' AND active +ORDER BY name; + +SELECT count(), sum(s::UInt64) FROM t_sparse_mutations_2 WHERE s != ''; + +DROP TABLE t_sparse_mutations_2; diff --git a/parser/testdata/02662_sparse_columns_mutations_3/ast.json b/parser/testdata/02662_sparse_columns_mutations_3/ast.json new file mode 100644 index 000000000..6f5a52801 --- /dev/null +++ b/parser/testdata/02662_sparse_columns_mutations_3/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000952246, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02662_sparse_columns_mutations_3/metadata.json b/parser/testdata/02662_sparse_columns_mutations_3/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02662_sparse_columns_mutations_3/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02662_sparse_columns_mutations_3/query.sql b/parser/testdata/02662_sparse_columns_mutations_3/query.sql new file mode 100644 index 000000000..6976d350d --- /dev/null +++ b/parser/testdata/02662_sparse_columns_mutations_3/query.sql @@ -0,0 +1,86 @@ +SET mutations_sync = 2; + +DROP TABLE IF EXISTS t_sparse_mutations_3; + +CREATE TABLE t_sparse_mutations_3 (key UInt8, id UInt64, s String) +ENGINE = MergeTree ORDER BY id PARTITION BY key +SETTINGS ratio_of_defaults_for_sparse_serialization = 0.9, serialization_info_version = 'basic'; + +INSERT INTO t_sparse_mutations_3 SELECT 1, number, toString(tuple(1, 0, '1', '0', '')) FROM numbers (10000); + +SELECT type, serialization_kind FROM system.parts_columns +WHERE database = currentDatabase() AND table = 't_sparse_mutations_3' AND column = 's' AND active +ORDER BY name; + +ALTER TABLE t_sparse_mutations_3 MODIFY COLUMN s Tuple(UInt64, UInt64, String, String, String); + +SELECT + type, + serialization_kind, + subcolumns.names, + subcolumns.types, + subcolumns.serializations +FROM system.parts_columns +WHERE database = currentDatabase() AND table = 't_sparse_mutations_3' AND column = 's' AND active +ORDER BY name; + +SELECT sum(s.1), sum(s.2), groupUniqArray(s.3), groupUniqArray(s.4), groupUniqArray(s.5) FROM t_sparse_mutations_3; + +OPTIMIZE TABLE t_sparse_mutations_3 FINAL; + +SELECT + type, + serialization_kind, + subcolumns.names, + subcolumns.types, + subcolumns.serializations +FROM system.parts_columns +WHERE database = currentDatabase() AND table = 't_sparse_mutations_3' AND column = 's' AND active +ORDER BY name; + +SELECT sum(s.1), sum(s.2), groupUniqArray(s.3), groupUniqArray(s.4), groupUniqArray(s.5) FROM t_sparse_mutations_3; + +ALTER TABLE t_sparse_mutations_3 MODIFY COLUMN s Tuple(UInt64, UInt64, UInt64, UInt64, String); + +SELECT + type, + serialization_kind, + subcolumns.names, + subcolumns.types, + subcolumns.serializations +FROM system.parts_columns +WHERE database = currentDatabase() AND table = 't_sparse_mutations_3' AND column = 's' AND active +ORDER BY name; + +SELECT sum(s.1), sum(s.2), sum(s.3), sum(s.4), groupUniqArray(s.5) FROM t_sparse_mutations_3; + +OPTIMIZE TABLE t_sparse_mutations_3 FINAL; + +SELECT + type, + serialization_kind, + subcolumns.names, + subcolumns.types, + subcolumns.serializations +FROM system.parts_columns +WHERE database = currentDatabase() AND table = 't_sparse_mutations_3' AND column = 's' AND active +ORDER BY name; + +SELECT sum(s.1), sum(s.2), sum(s.3), sum(s.4), groupUniqArray(s.5) FROM t_sparse_mutations_3; + +SET mutations_sync=2; +ALTER TABLE t_sparse_mutations_3 MODIFY COLUMN s Tuple(Nullable(UInt64), Nullable(UInt64), Nullable(UInt64), Nullable(UInt64), Nullable(String)); + +SELECT + type, + serialization_kind, + subcolumns.names, + subcolumns.types, + subcolumns.serializations +FROM system.parts_columns +WHERE database = currentDatabase() AND table = 't_sparse_mutations_3' AND column = 's' AND active +ORDER BY name; + +SELECT sum(s.1), sum(s.2), sum(s.3), sum(s.4), groupUniqArray(s.5) FROM t_sparse_mutations_3; + +DROP TABLE t_sparse_mutations_3; diff --git a/parser/testdata/02662_sparse_columns_mutations_4/ast.json b/parser/testdata/02662_sparse_columns_mutations_4/ast.json new file mode 100644 index 000000000..a7592af00 --- /dev/null +++ b/parser/testdata/02662_sparse_columns_mutations_4/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001439441, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02662_sparse_columns_mutations_4/metadata.json b/parser/testdata/02662_sparse_columns_mutations_4/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02662_sparse_columns_mutations_4/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02662_sparse_columns_mutations_4/query.sql b/parser/testdata/02662_sparse_columns_mutations_4/query.sql new file mode 100644 index 000000000..039af6584 --- /dev/null +++ b/parser/testdata/02662_sparse_columns_mutations_4/query.sql @@ -0,0 +1,21 @@ +SET mutations_sync = 2; + +DROP TABLE IF EXISTS t_sparse_mutations_4; + +CREATE TABLE t_sparse_mutations_4 (k UInt64, v UInt64) +ENGINE = MergeTree ORDER BY k +SETTINGS ratio_of_defaults_for_sparse_serialization = 0.9; + +INSERT INTO t_sparse_mutations_4 SELECT number, 0 FROM numbers(10000); + +SELECT type, serialization_kind FROM system.parts_columns +WHERE database = currentDatabase() AND table = 't_sparse_mutations_4' AND column = 'v' AND active +ORDER BY name; + +ALTER TABLE t_sparse_mutations_4 MODIFY COLUMN v String; + +SELECT type, serialization_kind FROM system.parts_columns +WHERE database = currentDatabase() AND table = 't_sparse_mutations_4' AND column = 'v' AND active +ORDER BY name; + +DROP TABLE t_sparse_mutations_4; diff --git a/parser/testdata/02662_sparse_columns_mutations_5/ast.json b/parser/testdata/02662_sparse_columns_mutations_5/ast.json new file mode 100644 index 000000000..3361d1ee4 --- /dev/null +++ b/parser/testdata/02662_sparse_columns_mutations_5/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001228865, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02662_sparse_columns_mutations_5/metadata.json b/parser/testdata/02662_sparse_columns_mutations_5/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02662_sparse_columns_mutations_5/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02662_sparse_columns_mutations_5/query.sql b/parser/testdata/02662_sparse_columns_mutations_5/query.sql new file mode 100644 index 000000000..6543598b4 --- /dev/null +++ b/parser/testdata/02662_sparse_columns_mutations_5/query.sql @@ -0,0 +1,21 @@ +SET mutations_sync = 2; + +DROP TABLE IF EXISTS t_sparse_mutations_5; + +CREATE TABLE t_sparse_mutations_5 (k UInt64, t Tuple(UInt64, UInt64)) +ENGINE = MergeTree ORDER BY k +SETTINGS ratio_of_defaults_for_sparse_serialization = 0.9, serialization_info_version = 'basic'; + +INSERT INTO t_sparse_mutations_5 SELECT number, (0, 0) FROM numbers(10000); + +SELECT type, serialization_kind, subcolumns.names, subcolumns.types, subcolumns.serializations FROM system.parts_columns +WHERE database = currentDatabase() AND table = 't_sparse_mutations_5' AND column = 't' AND active +ORDER BY name; + +ALTER TABLE t_sparse_mutations_5 MODIFY COLUMN t Tuple(UInt64, String); + +SELECT type, serialization_kind, subcolumns.names, subcolumns.types, subcolumns.serializations FROM system.parts_columns +WHERE database = currentDatabase() AND table = 't_sparse_mutations_5' AND column = 't' AND active +ORDER BY name; + +DROP TABLE t_sparse_mutations_5; diff --git a/parser/testdata/02668_column_block_number/ast.json b/parser/testdata/02668_column_block_number/ast.json new file mode 100644 index 000000000..5e7e2fe3c --- /dev/null +++ b/parser/testdata/02668_column_block_number/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001117868, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/02668_column_block_number/metadata.json b/parser/testdata/02668_column_block_number/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02668_column_block_number/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02668_column_block_number/query.sql b/parser/testdata/02668_column_block_number/query.sql new file mode 100644 index 000000000..bc4c815e7 --- /dev/null +++ b/parser/testdata/02668_column_block_number/query.sql @@ -0,0 +1,33 @@ +DROP TABLE IF EXISTS test; + +CREATE TABLE test (id UInt32, a UInt32) ENGINE = MergeTree ORDER BY id +SETTINGS enable_block_number_column = 1, enable_block_offset_column = 0; + +INSERT INTO test(id,a) VALUES (1,1),(2,2),(3,3); +INSERT INTO test(id,a) VALUES (4,4),(5,5),(6,6); + +SELECT '*** BEFORE MUTATION BEFORE MERGE ***'; +SELECT id,a,_block_number,_part from test ORDER BY id; + +set mutations_sync=1; +ALTER TABLE test UPDATE a=0 WHERE id<4; + +SELECT '*** AFTER MUTATION BEFORE MERGE ***'; +SELECT id,a,_block_number,_part from test ORDER BY id; + +OPTIMIZE TABLE test FINAL; + +SELECT '*** AFTER MUTATION AFTER MERGE ***'; +SELECT *,_block_number,_part from test ORDER BY id; + +INSERT INTO test(id,a) VALUES (7,7),(8,8),(9,9); + +SELECT '*** AFTER MUTATION AFTER MERGE , NEW BLOCK ***'; +SELECT *,_block_number,_part from test ORDER BY id; + +OPTIMIZE TABLE test FINAL; + +SELECT '*** AFTER MUTATION AFTER MERGE , NEW BLOCK MERGED ***'; +SELECT *,_block_number,_part from test ORDER BY id; + +DROP TABLE test; \ No newline at end of file diff --git a/parser/testdata/02668_column_block_number_vertical_merge/ast.json b/parser/testdata/02668_column_block_number_vertical_merge/ast.json new file mode 100644 index 000000000..3f1a633bc --- /dev/null +++ b/parser/testdata/02668_column_block_number_vertical_merge/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001143553, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/02668_column_block_number_vertical_merge/metadata.json b/parser/testdata/02668_column_block_number_vertical_merge/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02668_column_block_number_vertical_merge/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02668_column_block_number_vertical_merge/query.sql b/parser/testdata/02668_column_block_number_vertical_merge/query.sql new file mode 100644 index 000000000..c5361a55c --- /dev/null +++ b/parser/testdata/02668_column_block_number_vertical_merge/query.sql @@ -0,0 +1,39 @@ +DROP TABLE IF EXISTS test; + +CREATE TABLE test (id UInt32, a UInt32) ENGINE = MergeTree ORDER BY id +SETTINGS + enable_block_number_column = 1, + enable_block_offset_column = 0, + vertical_merge_algorithm_min_rows_to_activate = 1, + vertical_merge_algorithm_min_columns_to_activate = 0, + min_rows_for_wide_part = 1, + min_bytes_for_wide_part = 1; + +INSERT INTO test(id,a) VALUES (1,1),(2,2),(3,3); +INSERT INTO test(id,a) VALUES (4,4),(5,5),(6,6); + +SELECT '*** BEFORE MUTATION BEFORE MERGE ***'; +SELECT id,a,_block_number,_part from test ORDER BY id; + +set mutations_sync=1; +ALTER TABLE test UPDATE a=0 WHERE id<4; + +SELECT '*** AFTER MUTATION BEFORE MERGE ***'; +SELECT id,a,_block_number,_part from test ORDER BY id; + +OPTIMIZE TABLE test FINAL; + +SELECT '*** AFTER MUTATION AFTER MERGE ***'; +SELECT *,_block_number,_part from test ORDER BY id; + +INSERT INTO test(id,a) VALUES (7,7),(8,8),(9,9); + +SELECT '*** AFTER MUTATION AFTER MERGE , NEW BLOCK ***'; +SELECT *,_block_number,_part from test ORDER BY id; + +OPTIMIZE TABLE test FINAL; + +SELECT '*** AFTER MUTATION AFTER MERGE , NEW BLOCK MERGED ***'; +SELECT *,_block_number,_part from test ORDER BY id; + +DROP TABLE test; \ No newline at end of file diff --git a/parser/testdata/02668_column_block_number_with_projections/ast.json b/parser/testdata/02668_column_block_number_with_projections/ast.json new file mode 100644 index 000000000..146d738d8 --- /dev/null +++ b/parser/testdata/02668_column_block_number_with_projections/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001216402, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/02668_column_block_number_with_projections/metadata.json b/parser/testdata/02668_column_block_number_with_projections/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02668_column_block_number_with_projections/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02668_column_block_number_with_projections/query.sql b/parser/testdata/02668_column_block_number_with_projections/query.sql new file mode 100644 index 000000000..25f5e094f --- /dev/null +++ b/parser/testdata/02668_column_block_number_with_projections/query.sql @@ -0,0 +1,19 @@ +DROP TABLE IF EXISTS t; +CREATE TABLE t (x UInt8, PROJECTION p (SELECT x GROUP BY x)) ENGINE = MergeTree ORDER BY () SETTINGS enable_block_number_column = 1; + +INSERT INTO t VALUES (0); +INSERT INTO t VALUES (1),(1); +INSERT INTO t VALUES (2),(3); + +SELECT x FROM t GROUP BY x; +OPTIMIZE TABLE t FINAL; + +SELECT '*** AFTER FIRST OPTIMIZE ***'; +SELECT x,_block_number FROM t; + +INSERT INTO t VALUES (4), (5), (6); +OPTIMIZE TABLE t FINAL; +SELECT '*** AFTER SECOND OPTIMIZE ***'; +SELECT x,_block_number FROM t; + +DROP TABLE t; \ No newline at end of file diff --git a/parser/testdata/02668_logical_optimizer_removing_redundant_checks/ast.json b/parser/testdata/02668_logical_optimizer_removing_redundant_checks/ast.json new file mode 100644 index 000000000..099a3d86b --- /dev/null +++ b/parser/testdata/02668_logical_optimizer_removing_redundant_checks/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000996514, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02668_logical_optimizer_removing_redundant_checks/metadata.json b/parser/testdata/02668_logical_optimizer_removing_redundant_checks/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02668_logical_optimizer_removing_redundant_checks/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02668_logical_optimizer_removing_redundant_checks/query.sql b/parser/testdata/02668_logical_optimizer_removing_redundant_checks/query.sql new file mode 100644 index 000000000..dabdcfd55 --- /dev/null +++ b/parser/testdata/02668_logical_optimizer_removing_redundant_checks/query.sql @@ -0,0 +1,41 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS 02668_logical_optimizer; + +CREATE TABLE 02668_logical_optimizer +(a Int32, b LowCardinality(String)) +ENGINE=Memory; + +INSERT INTO 02668_logical_optimizer VALUES (1, 'test'), (2, 'test2'), (3, 'another'); + +-- Chain of OR equals +SET optimize_min_equality_disjunction_chain_length = 2; + +SELECT * FROM 02668_logical_optimizer WHERE a = 1 OR 3 = a OR 1 = a; +EXPLAIN QUERY TREE SELECT * FROM 02668_logical_optimizer WHERE a = 1 OR 3 = a OR 1 = a; + +SELECT * FROM 02668_logical_optimizer WHERE a = 1 OR 1 = a; +EXPLAIN QUERY TREE SELECT * FROM 02668_logical_optimizer WHERE a = 1 OR 1 = a; + +-- Chain of AND equals +SELECT * FROM 02668_logical_optimizer WHERE a = 1 AND 2 = a; +EXPLAIN QUERY TREE SELECT * FROM 02668_logical_optimizer WHERE a = 1 AND 2 = a; + +SELECT * FROM 02668_logical_optimizer WHERE 3 = a AND b = 'another' AND a = 3; +EXPLAIN QUERY TREE SELECT * FROM 02668_logical_optimizer WHERE a = 3 AND b = 'another' AND a = 3; + +SELECT * FROM 02668_logical_optimizer WHERE a = 2 AND 2 = a; +EXPLAIN QUERY TREE SELECT * FROM 02668_logical_optimizer WHERE a = 2 AND 2 = a; + +-- Chain of AND notEquals +SET optimize_min_inequality_conjunction_chain_length = 2; + +SELECT * FROM 02668_logical_optimizer WHERE a <> 1 AND 3 <> a AND 1 <> a; +EXPLAIN QUERY TREE SELECT * FROM 02668_logical_optimizer WHERE a <> 1 AND 3 <> a AND 1 <> a; + +SELECT * FROM 02668_logical_optimizer WHERE a <> 1 AND 1 <> a; +EXPLAIN QUERY TREE SELECT * FROM 02668_logical_optimizer WHERE a <> 1 AND 1 <> a; + +SELECT a FROM 02668_logical_optimizer WHERE (b = 'test') AND ('test' = b); + +SELECT (k = 3) OR ( (k = 1) OR (k = 2) OR ( (NULL OR 1) = k ) ) FROM ( SELECT materialize(1) AS k ); diff --git a/parser/testdata/02668_parse_datetime/ast.json b/parser/testdata/02668_parse_datetime/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02668_parse_datetime/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02668_parse_datetime/metadata.json b/parser/testdata/02668_parse_datetime/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02668_parse_datetime/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02668_parse_datetime/query.sql b/parser/testdata/02668_parse_datetime/query.sql new file mode 100644 index 000000000..96443b1a8 --- /dev/null +++ b/parser/testdata/02668_parse_datetime/query.sql @@ -0,0 +1,247 @@ +-- { echoOn } +-- year +select parseDateTime('2020', '%Y', 'UTC') = toDateTime('2020-01-01', 'UTC'); + +-- month +select parseDateTime('02', '%m', 'UTC') = toDateTime('2000-02-01', 'UTC'); +select parseDateTime('07', '%m', 'UTC') = toDateTime('2000-07-01', 'UTC'); +select parseDateTime('11-', '%m-', 'UTC') = toDateTime('2000-11-01', 'UTC'); +select parseDateTime('00', '%m'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime('13', '%m'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime('12345', '%m'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime('02', '%c', 'UTC') = toDateTime('2000-02-01', 'UTC'); +select parseDateTime('07', '%c', 'UTC') = toDateTime('2000-07-01', 'UTC'); +select parseDateTime('11-', '%c-', 'UTC') = toDateTime('2000-11-01', 'UTC'); +select parseDateTime('00', '%c'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime('13', '%c'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime('12345', '%c'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime('jun', '%b', 'UTC') = toDateTime('2000-06-01', 'UTC'); +select parseDateTime('JUN', '%b', 'UTC') = toDateTime('2000-06-01', 'UTC'); +select parseDateTime('abc', '%b'); -- { serverError CANNOT_PARSE_DATETIME } +set formatdatetime_parsedatetime_m_is_month_name = 1; +select parseDateTime('may', '%M', 'UTC') = toDateTime('2000-05-01', 'UTC'); +select parseDateTime('MAY', '%M', 'UTC') = toDateTime('2000-05-01', 'UTC'); +select parseDateTime('september', '%M', 'UTC') = toDateTime('2000-09-01', 'UTC'); +select parseDateTime('summer', '%M'); -- { serverError CANNOT_PARSE_DATETIME } +set formatdatetime_parsedatetime_m_is_month_name = 0; +select parseDateTime('08', '%M', 'UTC') = toDateTime('1970-01-01 00:08:00', 'UTC'); +select parseDateTime('59', '%M', 'UTC') = toDateTime('1970-01-01 00:59:00', 'UTC'); +select parseDateTime('00/', '%M/', 'UTC') = toDateTime('1970-01-01 00:00:00', 'UTC'); +select parseDateTime('60', '%M', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime('-1', '%M', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime('123456789', '%M', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +set formatdatetime_parsedatetime_m_is_month_name = 1; + +-- day of month +select parseDateTime('07', '%d', 'UTC') = toDateTime('2000-01-07', 'UTC'); +select parseDateTime('01', '%d', 'UTC') = toDateTime('2000-01-01', 'UTC'); +select parseDateTime('/11', '/%d', 'UTC') = toDateTime('2000-01-11', 'UTC'); +select parseDateTime('00', '%d'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime('32', '%d'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime('12345', '%d'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime('02-31', '%m-%d'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime('04-31', '%m-%d'); -- { serverError CANNOT_PARSE_DATETIME } +-- The last one is chosen if multiple months of year if supplied +select parseDateTime('01 31 20 02', '%m %d %d %m', 'UTC') = toDateTime('2000-02-20', 'UTC'); +select parseDateTime('02 31 20 04', '%m %d %d %m', 'UTC') = toDateTime('2000-04-20', 'UTC'); +select parseDateTime('02 31 01', '%m %d %m', 'UTC') = toDateTime('2000-01-31', 'UTC'); +select parseDateTime('2000-02-29', '%Y-%m-%d', 'UTC') = toDateTime('2000-02-29', 'UTC'); +select parseDateTime('2001-02-29', '%Y-%m-%d'); -- { serverError CANNOT_PARSE_DATETIME } + +-- day of year +select parseDateTime('001', '%j', 'UTC') = toDateTime('2000-01-01', 'UTC'); +select parseDateTime('007', '%j', 'UTC') = toDateTime('2000-01-07', 'UTC'); +select parseDateTime('/031/', '/%j/', 'UTC') = toDateTime('2000-01-31', 'UTC'); +select parseDateTime('032', '%j', 'UTC') = toDateTime('2000-02-01', 'UTC'); +select parseDateTime('060', '%j', 'UTC') = toDateTime('2000-02-29', 'UTC'); +select parseDateTime('365', '%j', 'UTC') = toDateTime('2000-12-30', 'UTC'); +select parseDateTime('366', '%j', 'UTC') = toDateTime('2000-12-31', 'UTC'); +select parseDateTime('1980 001', '%Y %j', 'UTC') = toDateTime('1980-01-01', 'UTC'); +select parseDateTime('1980 007', '%Y %j', 'UTC') = toDateTime('1980-01-07', 'UTC'); +select parseDateTime('1980 /007', '%Y /%j', 'UTC') = toDateTime('1980-01-07', 'UTC'); +select parseDateTime('1980 /031/', '%Y /%j/', 'UTC') = toDateTime('1980-01-31', 'UTC'); +select parseDateTime('1980 032', '%Y %j', 'UTC') = toDateTime('1980-02-01', 'UTC'); +select parseDateTime('1980 060', '%Y %j', 'UTC') = toDateTime('1980-02-29', 'UTC'); +select parseDateTime('1980 366', '%Y %j', 'UTC') = toDateTime('1980-12-31', 'UTC'); +select parseDateTime('1981 366', '%Y %j'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime('367', '%j'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime('000', '%j'); -- { serverError CANNOT_PARSE_DATETIME } +-- The last one is chosen if multiple day of years are supplied. +select parseDateTime('2000 366 2001', '%Y %j %Y'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime('2001 366 2000', '%Y %j %Y', 'UTC') = toDateTime('2000-12-31', 'UTC'); + +-- hour of day +select parseDateTime('07', '%H', 'UTC') = toDateTime('1970-01-01 07:00:00', 'UTC'); +select parseDateTime('23', '%H', 'UTC') = toDateTime('1970-01-01 23:00:00', 'UTC'); +select parseDateTime('00', '%H', 'UTC') = toDateTime('1970-01-01 00:00:00', 'UTC'); +select parseDateTime('10', '%H', 'UTC') = toDateTime('1970-01-01 10:00:00', 'UTC'); +select parseDateTime('24', '%H', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime('-1', '%H', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime('1234567', '%H', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime('07', '%k', 'UTC') = toDateTime('1970-01-01 07:00:00', 'UTC'); +select parseDateTime('23', '%k', 'UTC') = toDateTime('1970-01-01 23:00:00', 'UTC'); +select parseDateTime('00', '%k', 'UTC') = toDateTime('1970-01-01 00:00:00', 'UTC'); +select parseDateTime('10', '%k', 'UTC') = toDateTime('1970-01-01 10:00:00', 'UTC'); +select parseDateTime('24', '%k', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime('-1', '%k', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime('1234567', '%k', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } + +-- hour of half day +select parseDateTime('07', '%h', 'UTC') = toDateTime('1970-01-01 07:00:00', 'UTC'); +select parseDateTime('12', '%h', 'UTC') = toDateTime('1970-01-01 00:00:00', 'UTC'); +select parseDateTime('01', '%h', 'UTC') = toDateTime('1970-01-01 01:00:00', 'UTC'); +select parseDateTime('10', '%h', 'UTC') = toDateTime('1970-01-01 10:00:00', 'UTC'); +select parseDateTime('00', '%h', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime('13', '%h', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime('123456789', '%h', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime('07', '%I', 'UTC') = toDateTime('1970-01-01 07:00:00', 'UTC'); +select parseDateTime('12', '%I', 'UTC') = toDateTime('1970-01-01 00:00:00', 'UTC'); +select parseDateTime('01', '%I', 'UTC') = toDateTime('1970-01-01 01:00:00', 'UTC'); +select parseDateTime('10', '%I', 'UTC') = toDateTime('1970-01-01 10:00:00', 'UTC'); +select parseDateTime('00', '%I', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime('13', '%I', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime('123456789', '%I', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime('07', '%l', 'UTC') = toDateTime('1970-01-01 07:00:00', 'UTC'); +select parseDateTime('12', '%l', 'UTC') = toDateTime('1970-01-01 00:00:00', 'UTC'); +select parseDateTime('01', '%l', 'UTC') = toDateTime('1970-01-01 01:00:00', 'UTC'); +select parseDateTime('10', '%l', 'UTC') = toDateTime('1970-01-01 10:00:00', 'UTC'); +select parseDateTime('00', '%l', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime('13', '%l', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime('123456789', '%l', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } + +-- half of day +select parseDateTime('07 PM', '%H %p', 'UTC') = toDateTime('1970-01-01 07:00:00', 'UTC'); +select parseDateTime('07 AM', '%H %p', 'UTC') = toDateTime('1970-01-01 07:00:00', 'UTC'); +select parseDateTime('07 pm', '%H %p', 'UTC') = toDateTime('1970-01-01 07:00:00', 'UTC'); +select parseDateTime('07 am', '%H %p', 'UTC') = toDateTime('1970-01-01 07:00:00', 'UTC'); +select parseDateTime('00 AM', '%H %p', 'UTC') = toDateTime('1970-01-01 00:00:00', 'UTC'); +select parseDateTime('00 PM', '%H %p', 'UTC') = toDateTime('1970-01-01 00:00:00', 'UTC'); +select parseDateTime('00 am', '%H %p', 'UTC') = toDateTime('1970-01-01 00:00:00', 'UTC'); +select parseDateTime('00 pm', '%H %p', 'UTC') = toDateTime('1970-01-01 00:00:00', 'UTC'); +select parseDateTime('01 PM', '%h %p', 'UTC') = toDateTime('1970-01-01 13:00:00', 'UTC'); +select parseDateTime('01 AM', '%h %p', 'UTC') = toDateTime('1970-01-01 01:00:00', 'UTC'); +select parseDateTime('06 PM', '%h %p', 'UTC') = toDateTime('1970-01-01 18:00:00', 'UTC'); +select parseDateTime('06 AM', '%h %p', 'UTC') = toDateTime('1970-01-01 06:00:00', 'UTC'); +select parseDateTime('12 PM', '%h %p', 'UTC') = toDateTime('1970-01-01 12:00:00', 'UTC'); +select parseDateTime('12 AM', '%h %p', 'UTC') = toDateTime('1970-01-01 00:00:00', 'UTC'); + +-- minute +select parseDateTime('08', '%i', 'UTC') = toDateTime('1970-01-01 00:08:00', 'UTC'); +select parseDateTime('59', '%i', 'UTC') = toDateTime('1970-01-01 00:59:00', 'UTC'); +select parseDateTime('00/', '%i/', 'UTC') = toDateTime('1970-01-01 00:00:00', 'UTC'); +select parseDateTime('60', '%i', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime('-1', '%i', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime('123456789', '%i', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } + +-- second +select parseDateTime('09', '%s', 'UTC') = toDateTime('1970-01-01 00:00:09', 'UTC'); +select parseDateTime('58', '%s', 'UTC') = toDateTime('1970-01-01 00:00:58', 'UTC'); +select parseDateTime('00/', '%s/', 'UTC') = toDateTime('1970-01-01 00:00:00', 'UTC'); +select parseDateTime('60', '%s', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime('-1', '%s', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime('123456789', '%s', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } + +-- microsecond +select parseDateTime('000000', '%f', 'UTC') = toDateTime('1970-01-01 00:00:00', 'UTC'); +select parseDateTime('456789', '%f', 'UTC') = toDateTime('1970-01-01 00:00:00', 'UTC'); +select parseDateTime('42', '%f', 'UTC') = toDateTime('1970-01-01 00:00:00', 'UTC'); -- { serverError NOT_ENOUGH_SPACE } +select parseDateTime('12ABCD', '%f', 'UTC') = toDateTime('1970-01-01 00:00:00', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } + +-- mixed YMD format +select parseDateTime('2021-01-04+23:00:00.654321', '%Y-%m-%d+%H:%i:%s.%f', 'UTC') = toDateTime('2021-01-04 23:00:00', 'UTC'); +select parseDateTime('2019-07-03 11:04:10.975319', '%Y-%m-%d %H:%i:%s.%f', 'UTC') = toDateTime('2019-07-03 11:04:10', 'UTC'); +select parseDateTime('10:04:11 03-07-2019.242424', '%s:%i:%H %d-%m-%Y.%f', 'UTC') = toDateTime('2019-07-03 11:04:10', 'UTC'); + +-- *OrZero, *OrNull, str_to_date +select parseDateTimeOrZero('10:04:11 03-07-2019', '%s:%i:%H %d-%m-%Y', 'UTC') = toDateTime('2019-07-03 11:04:10', 'UTC'); +select parseDateTimeOrZero('10:04:11 invalid 03-07-2019', '%s:%i:%H %d-%m-%Y', 'UTC') = toDateTime('1970-01-01 00:00:00', 'UTC'); +select parseDateTimeOrNull('10:04:11 03-07-2019', '%s:%i:%H %d-%m-%Y', 'UTC') = toDateTime('2019-07-03 11:04:10', 'UTC'); +select parseDateTimeOrNull('10:04:11 invalid 03-07-2019', '%s:%i:%H %d-%m-%Y', 'UTC') IS NULL; +select str_to_date('10:04:11 03-07-2019', '%s:%i:%H %d-%m-%Y', 'UTC') = toDateTime('2019-07-03 11:04:10', 'UTC'); +select sTr_To_DaTe('10:04:11 03-07-2019', '%s:%i:%H %d-%m-%Y', 'UTC') = toDateTime('2019-07-03 11:04:10', 'UTC'); +select str_to_date('10:04:11 invalid 03-07-2019', '%s:%i:%H %d-%m-%Y', 'UTC') IS NULL; + +-- Error handling +select parseDateTime(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +select parseDateTime('12 AM', '%h %p', 'UTC', 'a fourth argument'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +-- Fuzzer crash bug #53715 +select parseDateTime('', '', toString(number)) from numbers(13); -- { serverError ILLEGAL_COLUMN } + +-- %h +select parseDateTime('Aug 13, 2022, 7:58:32 PM', '%b %e, %G, %h:%i:%s %p', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime('Aug 13, 2022, 07:58:32 PM', '%b %e, %G, %h:%i:%s %p', 'UTC'); +-- %l accepts single or double digits inputs +select parseDateTime('Aug 13, 2022, 7:58:32 PM', '%b %e, %G, %l:%i:%s %p', 'UTC'); +select parseDateTime('Aug 13, 2022, 07:58:32 PM', '%b %e, %G, %l:%i:%s %p', 'UTC'); +-- %H +select parseDateTime('Aug 13, 2022, 7:58:32', '%b %e, %G, %H:%i:%s', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime('Aug 13, 2022, 07:58:32', '%b %e, %G, %H:%i:%s', 'UTC'); +-- %k accepts single or double digits inputs +select parseDateTime('Aug 13, 2022, 7:58:32', '%b %e, %G, %k:%i:%s', 'UTC'); +select parseDateTime('Aug 13, 2022, 07:58:32', '%b %e, %G, %k:%i:%s', 'UTC'); +-- %m +select parseDateTime('8 13, 2022, 7:58:32', '%m %e, %G, %k:%i:%s', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime('08 13, 2022, 07:58:32', '%m %e, %G, %k:%i:%s', 'UTC'); +-- %c accepts single or double digits inputs +select parseDateTime('8 13, 2022, 7:58:32', '%c %e, %G, %k:%i:%s', 'UTC'); +select parseDateTime('08 13, 2022, 07:58:32', '%c %e, %G, %k:%i:%s', 'UTC'); + +-- The format string argument is optional +set session_timezone = 'UTC'; -- don't randomize the session timezone +select parseDateTime('2021-01-04 23:12:34') = toDateTime('2021-01-04 23:12:34'); +select parseDateTime(''); -- { serverError NOT_ENOUGH_SPACE } + +-- Test setting 'parsedatetime_e_requires_space_padding' +-- In the default behavior, leading spaces for %e are optional +select parseDateTime(' 1/12/2024', '%e/%m/%Y') settings parsedatetime_e_requires_space_padding = 0; +select parseDateTime(' 1/12/2024', '%e/%m/%Y') settings parsedatetime_e_requires_space_padding = 0; +select parseDateTime('1/12/2024', '%e/%m/%Y') settings parsedatetime_e_requires_space_padding = 0; +-- If we enable the legacy behavior, leading spaces for %e are mandatory +select parseDateTime(' 1/12/2024', '%e/%m/%Y') settings parsedatetime_e_requires_space_padding = 1; -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime(' 1/12/2024', '%e/%m/%Y') settings parsedatetime_e_requires_space_padding = 1; +select parseDateTime('1/12/2024', '%e/%m/%Y') settings parsedatetime_e_requires_space_padding = 1; -- { serverError CANNOT_PARSE_DATETIME } + +-- ------------------------------------------------------------------------------------------------------------------------- +-- Tests for parseDateTime64, these are not systematic + +select parseDateTime64(''); -- { serverError NOT_ENOUGH_SPACE } +select parseDateTime64('2021-01-04 23:12:34.118'); -- { serverError NOT_ENOUGH_SPACE } +select parseDateTime64('2177-10-09 10:30:10.123'); -- { serverError NOT_ENOUGH_SPACE } +select parseDateTime64('2021-01-04 23:12:34.118112') = toDateTime64('2021-01-04 23:12:34.118112', 6); +select parseDateTime64('2021-01-04 23:12:34.118112', '%Y-%m-%d %H:%i:%s.%f') = toDateTime64('2021-01-04 23:12:34.118112', 6); +select parseDateTime64('2021-01-04 23:12:34.118', '%Y-%m-%d %H:%i:%s.%f'); -- { serverError NOT_ENOUGH_SPACE } +select parseDateTime64('2021-01-04 23:12:34.11811235', '%Y-%m-%d %H:%i:%s.%f'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime64('2021-01-04 23:12:34.118112', '%Y-%m-%d %H:%i:%s'); -- { serverError CANNOT_PARSE_DATETIME } +-- leap vs non-leap years +select parseDateTime64('2024-02-29 11:23:34.123433', '%Y-%m-%d %H:%i:%s.%f') = toDateTime64('2024-02-29 11:23:34.123433', 6); +select parseDateTime64('2023-02-29 11:22:33.123433', '%Y-%m-%d %H:%i:%s.%f'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime64('2024-02-28 23:22:33.123433', '%Y-%m-%d %H:%i:%s.%f') = toDateTime64('2024-02-28 23:22:33.123433', 6); +select parseDateTime64('2023-02-28 23:22:33.123433', '%Y-%m-%d %H:%i:%s.%f') = toDateTime64('2023-02-28 23:22:33.123433', 6); +-- parseDateTime64OrNull +select parseDateTime64OrNull('2021-01-04 23:12:34.118') IS NULL; +select parseDateTime64OrNull('2021-01-04 23:12:34.118', '%Y-%m-%d %H:%i:%s.%f') IS NULL; +select parseDateTime64OrNull('2021-01-04 23:12:34.118112', '%Y-%m-%d %H:%i:%s') IS NULL; +select parseDateTime64OrNull('2021-01-04 23:12:34.11811235', '%Y-%m-%d %H:%i:%s.%f') IS NULL; +-- parseDateTime64OrZero +select parseDateTime64OrZero('2021-01-04 23:12:34.118') = toDateTime64('1970-01-01 00:00:00', 6); +select parseDateTime64OrZero('2021-01-04 23:12:34.118', '%Y-%m-%d %H:%i:%s.%f') = toDateTime64('1970-01-01 00:00:00', 6); +select parseDateTime64OrZero('2021-01-04 23:12:34.118112', '%Y-%m-%d %H:%i:%s') = toDateTime64('1970-01-01 00:00:00', 6); +select parseDateTime64OrZero('2021-01-04 23:12:34.11811235', '%Y-%m-%d %H:%i:%s.%f') = toDateTime64('1970-01-01 00:00:00', 6); + +-- Test parseDataTime64 supports range between 1900 - 2299 +select parseDateTime64('1899-12-31 23:59:59', '%Y-%m-%d %H:%i:%s'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime64('1900-01-01 00:00:00', '%Y-%m-%d %H:%i:%s') = toDateTime64('1900-01-01 00:00:00', 0); +select parseDateTime64('1920-06-06 00:00:01', '%Y-%m-%d %H:%i:%s') = toDateTime64('1920-06-06 00:00:01', 0); +select parseDateTime64('1970-01-01 00:00:00', '%Y-%m-%d %H:%i:%s') = toDateTime64('1970-01-01 00:00:00', 0); +select parseDateTime64('1971-02-03 04:05:06', '%Y-%m-%d %H:%i:%s') = toDateTime64('1971-02-03 04:05:06', 0); +select parseDateTime64('2105-02-03 04:05:06', '%Y-%m-%d %H:%i:%s') = toDateTime64('2105-02-03 04:05:06', 0); +select parseDateTime64('2106-02-07 06:28:15', '%Y-%m-%d %H:%i:%s') = toDateTime64('2106-02-07 06:28:15', 0); +select parseDateTime64('2106-02-08 06:28:15', '%Y-%m-%d %H:%i:%s') = toDateTime64('2106-02-08 06:28:15', 0); +select parseDateTime64('2299-12-31 23:59:59', '%Y-%m-%d %H:%i:%s') = toDateTime64('2299-12-31 23:59:59', 0); +select parseDateTime64('2300-01-01 00:00:00', '%Y-%m-%d %H:%i:%s'); -- { serverError CANNOT_PARSE_DATETIME } + +-- ------------------------------------------------------------------------------------------------------------------------- + + +-- { echoOff } diff --git a/parser/testdata/02668_parse_datetime_in_joda_syntax/ast.json b/parser/testdata/02668_parse_datetime_in_joda_syntax/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02668_parse_datetime_in_joda_syntax/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02668_parse_datetime_in_joda_syntax/metadata.json b/parser/testdata/02668_parse_datetime_in_joda_syntax/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02668_parse_datetime_in_joda_syntax/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02668_parse_datetime_in_joda_syntax/query.sql b/parser/testdata/02668_parse_datetime_in_joda_syntax/query.sql new file mode 100644 index 000000000..c6e0d8cf1 --- /dev/null +++ b/parser/testdata/02668_parse_datetime_in_joda_syntax/query.sql @@ -0,0 +1,345 @@ +-- { echoOn } +-- empty +select parseDateTimeInJodaSyntax(' ', ' ', 'UTC') = toDateTime('1970-01-01', 'UTC'); + +-- era +select parseDateTimeInJodaSyntax('AD 1999', 'G YYYY', 'UTC') = toDateTime('1999-01-01', 'UTC'); +select parseDateTimeInJodaSyntax('ad 1999', 'G YYYY', 'UTC') = toDateTime('1999-01-01', 'UTC'); +select parseDateTimeInJodaSyntax('Ad 1999', 'G YYYY', 'UTC') = toDateTime('1999-01-01', 'UTC'); +select parseDateTimeInJodaSyntax('AD 1999', 'G yyyy', 'UTC') = toDateTime('1999-01-01', 'UTC'); +select parseDateTimeInJodaSyntax('AD 1999 2000', 'G YYYY yyyy', 'UTC') = toDateTime('2000-01-01', 'UTC'); +select parseDateTimeInJodaSyntax('AD 1999 2000', 'G yyyy YYYY', 'UTC') = toDateTime('2000-01-01', 'UTC'); +select parseDateTimeInJodaSyntax('AD 1999', 'G Y'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTimeInJodaSyntax('AD 1999', 'G YY'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTimeInJodaSyntax('AD 1999', 'G YYY', 'UTC'); +select parseDateTimeInJodaSyntax('BC', 'G'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTimeInJodaSyntax('AB', 'G'); -- { serverError CANNOT_PARSE_DATETIME } + +-- year of era +select parseDateTimeInJodaSyntax('2106', 'YYYY', 'UTC') = toDateTime('2106-01-01', 'UTC'); +select parseDateTimeInJodaSyntax('1970', 'YYYY', 'UTC') = toDateTime('1970-01-01', 'UTC'); +select parseDateTimeInJodaSyntax('1969', 'YYYY', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTimeInJodaSyntax('2107', 'YYYY', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTimeInJodaSyntax('+1999', 'YYYY', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } + +select parseDateTimeInJodaSyntax('12', 'YY', 'UTC') = toDateTime('2012-01-01', 'UTC'); +select parseDateTimeInJodaSyntax('69', 'YY', 'UTC') = toDateTime('2069-01-01', 'UTC'); +select parseDateTimeInJodaSyntax('70', 'YY', 'UTC') = toDateTime('1970-01-01', 'UTC'); +select parseDateTimeInJodaSyntax('99', 'YY', 'UTC') = toDateTime('1999-01-01', 'UTC'); +select parseDateTimeInJodaSyntax('01', 'YY', 'UTC') = toDateTime('2001-01-01', 'UTC'); +select parseDateTimeInJodaSyntax('1', 'YY', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } + +select parseDateTimeInJodaSyntax('99 98 97', 'YY YY YY', 'UTC') = toDateTime('1997-01-01', 'UTC'); + +-- year +select parseDateTimeInJodaSyntax('12', 'yy', 'UTC') = toDateTime('2012-01-01', 'UTC'); +select parseDateTimeInJodaSyntax('69', 'yy', 'UTC') = toDateTime('2069-01-01', 'UTC'); +select parseDateTimeInJodaSyntax('70', 'yy', 'UTC') = toDateTime('1970-01-01', 'UTC'); +select parseDateTimeInJodaSyntax('99', 'yy', 'UTC') = toDateTime('1999-01-01', 'UTC'); +select parseDateTimeInJodaSyntax('+99', 'yy', 'UTC') = toDateTime('1999-01-01', 'UTC'); +select parseDateTimeInJodaSyntax('+99 02', 'yy MM', 'UTC') = toDateTime('1999-02-01', 'UTC'); +select parseDateTimeInJodaSyntax('10 +10', 'MM yy', 'UTC') = toDateTime('2010-10-01', 'UTC'); +select parseDateTimeInJodaSyntax('10+2001', 'MMyyyy', 'UTC') = toDateTime('2001-10-01', 'UTC'); +select parseDateTimeInJodaSyntax('+200110', 'yyyyMM', 'UTC') = toDateTime('2001-10-01', 'UTC'); +select parseDateTimeInJodaSyntax('1970', 'yyyy', 'UTC') = toDateTime('1970-01-01', 'UTC'); +select parseDateTimeInJodaSyntax('2106', 'yyyy', 'UTC') = toDateTime('2106-01-01', 'UTC'); +select parseDateTimeInJodaSyntax('1969', 'yyyy', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTimeInJodaSyntax('2107', 'yyyy', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } + +-- week year +select parseDateTimeInJodaSyntax('2106', 'xxxx', 'UTC') = toDateTime('2106-01-04', 'UTC'); +select parseDateTimeInJodaSyntax('1971', 'xxxx', 'UTC') = toDateTime('1971-01-04', 'UTC'); +select parseDateTimeInJodaSyntax('2025', 'xxxx', 'UTC') = toDateTime('2024-12-30', 'UTC'); +select parseDateTimeInJodaSyntax('12', 'xx', 'UTC') = toDateTime('2012-01-02', 'UTC'); +select parseDateTimeInJodaSyntax('69', 'xx', 'UTC') = toDateTime('2068-12-31', 'UTC'); +select parseDateTimeInJodaSyntax('99', 'xx', 'UTC') = toDateTime('1999-01-04', 'UTC'); +select parseDateTimeInJodaSyntax('01', 'xx', 'UTC') = toDateTime('2001-01-01', 'UTC'); +select parseDateTimeInJodaSyntax('+10', 'xx', 'UTC') = toDateTime('2010-01-04', 'UTC'); +select parseDateTimeInJodaSyntax('+99 01', 'xx ww', 'UTC') = toDateTime('1999-01-04', 'UTC'); +select parseDateTimeInJodaSyntax('+99 02', 'xx ww', 'UTC') = toDateTime('1999-01-11', 'UTC'); +select parseDateTimeInJodaSyntax('10 +10', 'ww xx', 'UTC') = toDateTime('2010-03-08', 'UTC'); +select parseDateTimeInJodaSyntax('2+10', 'wwxx', 'UTC') = toDateTime('2010-01-11', 'UTC'); +select parseDateTimeInJodaSyntax('+102', 'xxM', 'UTC') = toDateTime('2010-02-01', 'UTC'); +select parseDateTimeInJodaSyntax('+20102', 'xxxxM', 'UTC') = toDateTime('2010-02-01', 'UTC'); +select parseDateTimeInJodaSyntax('1970', 'xxxx', 'UTC'); -- { serverError VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE } +select parseDateTimeInJodaSyntax('1969', 'xxxx', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTimeInJodaSyntax('2107', 'xxxx', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } + +-- century of era +select parseDateTimeInJodaSyntax('20', 'CC', 'UTC') = toDateTime('2000-01-01', 'UTC'); +select parseDateTimeInJodaSyntax('21', 'CC', 'UTC') = toDateTime('2100-01-01', 'UTC'); +select parseDateTimeInJodaSyntax('19', 'CC', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTimeInJodaSyntax('22', 'CC', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } + +-- month +select parseDateTimeInJodaSyntax('1', 'M', 'UTC') = toDateTime('2000-01-01', 'UTC'); +select parseDateTimeInJodaSyntax(' 7', ' MM', 'UTC') = toDateTime('2000-07-01', 'UTC'); +select parseDateTimeInJodaSyntax('11', 'M', 'UTC') = toDateTime('2000-11-01', 'UTC'); +select parseDateTimeInJodaSyntax('10-', 'M-', 'UTC') = toDateTime('2000-10-01', 'UTC'); +select parseDateTimeInJodaSyntax('-12-', '-M-', 'UTC') = toDateTime('2000-12-01', 'UTC'); +select parseDateTimeInJodaSyntax('0', 'M', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTimeInJodaSyntax('13', 'M', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTimeInJodaSyntax('12345', 'M', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +--- Ensure MMM and MMMM specifiers consume both short- and long-form month names +select parseDateTimeInJodaSyntax('Aug', 'MMM', 'UTC') = toDateTime('2000-08-01', 'UTC'); +select parseDateTimeInJodaSyntax('AuG', 'MMM', 'UTC') = toDateTime('2000-08-01', 'UTC'); +select parseDateTimeInJodaSyntax('august', 'MMM', 'UTC') = toDateTime('2000-08-01', 'UTC'); +select parseDateTimeInJodaSyntax('Aug', 'MMMM', 'UTC') = toDateTime('2000-08-01', 'UTC'); +select parseDateTimeInJodaSyntax('AuG', 'MMMM', 'UTC') = toDateTime('2000-08-01', 'UTC'); +select parseDateTimeInJodaSyntax('august', 'MMMM', 'UTC') = toDateTime('2000-08-01', 'UTC'); +--- invalid month names +select parseDateTimeInJodaSyntax('Decembr', 'MMM', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTimeInJodaSyntax('Decembr', 'MMMM', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTimeInJodaSyntax('Decemberary', 'MMM', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTimeInJodaSyntax('Decemberary', 'MMMM', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTimeInJodaSyntax('asdf', 'MMM', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTimeInJodaSyntax('asdf', 'MMMM', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } + +-- day of month +select parseDateTimeInJodaSyntax('1', 'd', 'UTC') = toDateTime('2000-01-01', 'UTC'); +select parseDateTimeInJodaSyntax('7 ', 'dd ', 'UTC') = toDateTime('2000-01-07', 'UTC'); +select parseDateTimeInJodaSyntax('/11', '/dd', 'UTC') = toDateTime('2000-01-11', 'UTC'); +select parseDateTimeInJodaSyntax('/31/', '/d/', 'UTC') = toDateTime('2000-01-31', 'UTC'); +select parseDateTimeInJodaSyntax('0', 'd', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTimeInJodaSyntax('32', 'd', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTimeInJodaSyntax('12345', 'd', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTimeInJodaSyntax('02-31', 'M-d', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTimeInJodaSyntax('04-31', 'M-d', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +-- The last one is chosen if multiple day of months are supplied. +select parseDateTimeInJodaSyntax('2 31 1', 'M d M', 'UTC') = toDateTime('2000-01-31', 'UTC'); +select parseDateTimeInJodaSyntax('1 31 20 2', 'M d d M', 'UTC') = toDateTime('2000-02-20', 'UTC'); +select parseDateTimeInJodaSyntax('2 31 20 4', 'M d d M', 'UTC') = toDateTime('2000-04-20', 'UTC'); +--- Leap year +select parseDateTimeInJodaSyntax('2020-02-29', 'YYYY-M-d', 'UTC') = toDateTime('2020-02-29', 'UTC'); +select parseDateTimeInJodaSyntax('2001-02-29', 'YYYY-M-d', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } + +-- day of year +select parseDateTimeInJodaSyntax('1', 'D', 'UTC') = toDateTime('2000-01-01', 'UTC'); +select parseDateTimeInJodaSyntax('7 ', 'DD ', 'UTC') = toDateTime('2000-01-07', 'UTC'); +select parseDateTimeInJodaSyntax('/11', '/DD', 'UTC') = toDateTime('2000-01-11', 'UTC'); +select parseDateTimeInJodaSyntax('/31/', '/DDD/', 'UTC') = toDateTime('2000-01-31', 'UTC'); +select parseDateTimeInJodaSyntax('32', 'D', 'UTC') = toDateTime('2000-02-01', 'UTC'); +select parseDateTimeInJodaSyntax('60', 'D', 'UTC') = toDateTime('2000-02-29', 'UTC'); +select parseDateTimeInJodaSyntax('365', 'D', 'UTC') = toDateTime('2000-12-30', 'UTC'); +select parseDateTimeInJodaSyntax('366', 'D', 'UTC') = toDateTime('2000-12-31', 'UTC'); +select parseDateTimeInJodaSyntax('1999 1', 'yyyy D', 'UTC') = toDateTime('1999-01-01', 'UTC'); +select parseDateTimeInJodaSyntax('1999 7 ', 'yyyy DD ', 'UTC') = toDateTime('1999-01-07', 'UTC'); +select parseDateTimeInJodaSyntax('1999 /11', 'yyyy /DD', 'UTC') = toDateTime('1999-01-11', 'UTC'); +select parseDateTimeInJodaSyntax('1999 /31/', 'yyyy /DD/', 'UTC') = toDateTime('1999-01-31', 'UTC'); +select parseDateTimeInJodaSyntax('1999 32', 'yyyy D', 'UTC') = toDateTime('1999-02-01', 'UTC'); +select parseDateTimeInJodaSyntax('1999 60', 'yyyy D', 'UTC') = toDateTime('1999-03-01', 'UTC'); +select parseDateTimeInJodaSyntax('1999 365', 'yyyy D', 'UTC') = toDateTime('1999-12-31', 'UTC'); +select parseDateTimeInJodaSyntax('1999 366', 'yyyy D', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +--- Ensure all days of year are checked against final selected year +select parseDateTimeInJodaSyntax('2001 366 2000', 'yyyy D yyyy', 'UTC') = toDateTime('2000-12-31', 'UTC'); +select parseDateTimeInJodaSyntax('2000 366 2001', 'yyyy D yyyy', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTimeInJodaSyntax('0', 'D', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTimeInJodaSyntax('367', 'D', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } + +-- hour of day +select parseDateTimeInJodaSyntax('7', 'H', 'UTC') = toDateTime('1970-01-01 07:00:00', 'UTC'); +select parseDateTimeInJodaSyntax('23', 'HH', 'UTC') = toDateTime('1970-01-01 23:00:00', 'UTC'); +select parseDateTimeInJodaSyntax('0', 'HHH', 'UTC') = toDateTime('1970-01-01 00:00:00', 'UTC'); +select parseDateTimeInJodaSyntax('10', 'HHHHHHHH', 'UTC') = toDateTime('1970-01-01 10:00:00', 'UTC'); +--- invalid hour od day +select parseDateTimeInJodaSyntax('24', 'H', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTimeInJodaSyntax('-1', 'H', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTimeInJodaSyntax('123456789', 'H', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } + +-- clock hour of day +select parseDateTimeInJodaSyntax('7', 'k', 'UTC') = toDateTime('1970-01-01 07:00:00', 'UTC'); +select parseDateTimeInJodaSyntax('24', 'kk', 'UTC') = toDateTime('1970-01-01 00:00:00', 'UTC'); +select parseDateTimeInJodaSyntax('1', 'kkk', 'UTC') = toDateTime('1970-01-01 01:00:00', 'UTC'); +select parseDateTimeInJodaSyntax('10', 'kkkkkkkk', 'UTC') = toDateTime('1970-01-01 10:00:00', 'UTC'); +-- invalid clock hour of day +select parseDateTimeInJodaSyntax('25', 'k', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTimeInJodaSyntax('0', 'k', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTimeInJodaSyntax('123456789', 'k', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } + +-- hour of half day +select parseDateTimeInJodaSyntax('7', 'K', 'UTC') = toDateTime('1970-01-01 07:00:00', 'UTC'); +select parseDateTimeInJodaSyntax('11', 'KK', 'UTC') = toDateTime('1970-01-01 11:00:00', 'UTC'); +select parseDateTimeInJodaSyntax('0', 'KKK', 'UTC') = toDateTime('1970-01-01 00:00:00', 'UTC'); +select parseDateTimeInJodaSyntax('10', 'KKKKKKKK', 'UTC') = toDateTime('1970-01-01 10:00:00', 'UTC'); +-- invalid hour of half day +select parseDateTimeInJodaSyntax('12', 'K', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTimeInJodaSyntax('-1', 'K', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTimeInJodaSyntax('123456789', 'K', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } + +-- clock hour of half day +select parseDateTimeInJodaSyntax('7', 'h', 'UTC') = toDateTime('1970-01-01 07:00:00', 'UTC'); +select parseDateTimeInJodaSyntax('12', 'hh', 'UTC') = toDateTime('1970-01-01 00:00:00', 'UTC'); +select parseDateTimeInJodaSyntax('1', 'hhh', 'UTC') = toDateTime('1970-01-01 01:00:00', 'UTC'); +select parseDateTimeInJodaSyntax('10', 'hhhhhhhh', 'UTC') = toDateTime('1970-01-01 10:00:00', 'UTC'); +-- invalid clock hour of half day +select parseDateTimeInJodaSyntax('13', 'h', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTimeInJodaSyntax('0', 'h', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTimeInJodaSyntax('123456789', 'h', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } + +-- half of day +--- Half of day has no effect if hour or clockhour of day is provided hour of day tests +select parseDateTimeInJodaSyntax('7 PM', 'H a', 'UTC') = toDateTime('1970-01-01 07:00:00', 'UTC'); +select parseDateTimeInJodaSyntax('7 AM', 'H a', 'UTC') = toDateTime('1970-01-01 07:00:00', 'UTC'); +select parseDateTimeInJodaSyntax('7 pm', 'H a', 'UTC') = toDateTime('1970-01-01 07:00:00', 'UTC'); +select parseDateTimeInJodaSyntax('7 am', 'H a', 'UTC') = toDateTime('1970-01-01 07:00:00', 'UTC'); +select parseDateTimeInJodaSyntax('0 PM', 'H a', 'UTC') = toDateTime('1970-01-01 00:00:00', 'UTC'); +select parseDateTimeInJodaSyntax('0 AM', 'H a', 'UTC') = toDateTime('1970-01-01 00:00:00', 'UTC'); +select parseDateTimeInJodaSyntax('0 pm', 'H a', 'UTC') = toDateTime('1970-01-01 00:00:00', 'UTC'); +select parseDateTimeInJodaSyntax('0 am', 'H a', 'UTC') = toDateTime('1970-01-01 00:00:00', 'UTC'); +select parseDateTimeInJodaSyntax('7 PM', 'k a', 'UTC') = toDateTime('1970-01-01 07:00:00', 'UTC'); +select parseDateTimeInJodaSyntax('7 AM', 'k a', 'UTC') = toDateTime('1970-01-01 07:00:00', 'UTC'); +select parseDateTimeInJodaSyntax('7 pm', 'k a', 'UTC') = toDateTime('1970-01-01 07:00:00', 'UTC'); +select parseDateTimeInJodaSyntax('7 am', 'k a', 'UTC') = toDateTime('1970-01-01 07:00:00', 'UTC'); +select parseDateTimeInJodaSyntax('24 PM', 'k a', 'UTC') = toDateTime('1970-01-01 00:00:00', 'UTC'); +select parseDateTimeInJodaSyntax('24 AM', 'k a', 'UTC') = toDateTime('1970-01-01 00:00:00', 'UTC'); +select parseDateTimeInJodaSyntax('24 pm', 'k a', 'UTC') = toDateTime('1970-01-01 00:00:00', 'UTC'); +select parseDateTimeInJodaSyntax('24 am', 'k a', 'UTC') = toDateTime('1970-01-01 00:00:00', 'UTC'); +-- Half of day has effect if hour or clockhour of halfday is provided +select parseDateTimeInJodaSyntax('0 PM', 'K a', 'UTC') = toDateTime('1970-01-01 12:00:00', 'UTC'); +select parseDateTimeInJodaSyntax('0 AM', 'K a', 'UTC') = toDateTime('1970-01-01 00:00:00', 'UTC'); +select parseDateTimeInJodaSyntax('6 PM', 'K a', 'UTC') = toDateTime('1970-01-01 18:00:00', 'UTC'); +select parseDateTimeInJodaSyntax('6 AM', 'K a', 'UTC') = toDateTime('1970-01-01 06:00:00', 'UTC'); +select parseDateTimeInJodaSyntax('11 PM', 'K a', 'UTC') = toDateTime('1970-01-01 23:00:00', 'UTC'); +select parseDateTimeInJodaSyntax('11 AM', 'K a', 'UTC') = toDateTime('1970-01-01 11:00:00', 'UTC'); +select parseDateTimeInJodaSyntax('1 PM', 'h a', 'UTC') = toDateTime('1970-01-01 13:00:00', 'UTC'); +select parseDateTimeInJodaSyntax('1 AM', 'h a', 'UTC') = toDateTime('1970-01-01 01:00:00', 'UTC'); +select parseDateTimeInJodaSyntax('6 PM', 'h a', 'UTC') = toDateTime('1970-01-01 18:00:00', 'UTC'); +select parseDateTimeInJodaSyntax('6 AM', 'h a', 'UTC') = toDateTime('1970-01-01 06:00:00', 'UTC'); +select parseDateTimeInJodaSyntax('12 PM', 'h a', 'UTC') = toDateTime('1970-01-01 12:00:00', 'UTC'); +select parseDateTimeInJodaSyntax('12 AM', 'h a', 'UTC') = toDateTime('1970-01-01 00:00:00', 'UTC'); +-- time gives precendent to most recent time specifier +select parseDateTimeInJodaSyntax('0 1 AM', 'H h a', 'UTC') = toDateTime('1970-01-01 01:00:00', 'UTC'); +select parseDateTimeInJodaSyntax('12 1 PM', 'H h a', 'UTC') = toDateTime('1970-01-01 13:00:00', 'UTC'); +select parseDateTimeInJodaSyntax('1 AM 0', 'h a H', 'UTC') = toDateTime('1970-01-01 00:00:00', 'UTC'); +select parseDateTimeInJodaSyntax('1 AM 12', 'h a H', 'UTC') = toDateTime('1970-01-01 12:00:00', 'UTC'); + +-- minute +select parseDateTimeInJodaSyntax('8', 'm', 'UTC') = toDateTime('1970-01-01 00:08:00', 'UTC'); +select parseDateTimeInJodaSyntax('59', 'mm', 'UTC') = toDateTime('1970-01-01 00:59:00', 'UTC'); +select parseDateTimeInJodaSyntax('0/', 'mmm/', 'UTC') = toDateTime('1970-01-01 00:00:00', 'UTC'); +select parseDateTimeInJodaSyntax('60', 'm', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTimeInJodaSyntax('-1', 'm', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTimeInJodaSyntax('123456789', 'm', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } + +-- second +select parseDateTimeInJodaSyntax('9', 's', 'UTC') = toDateTime('1970-01-01 00:00:09', 'UTC'); +select parseDateTimeInJodaSyntax('58', 'ss', 'UTC') = toDateTime('1970-01-01 00:00:58', 'UTC'); +select parseDateTimeInJodaSyntax('0/', 's/', 'UTC') = toDateTime('1970-01-01 00:00:00', 'UTC'); +select parseDateTimeInJodaSyntax('60', 's', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTimeInJodaSyntax('-1', 's', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTimeInJodaSyntax('123456789', 's', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } + +-- integer overflow in AST Fuzzer +select parseDateTimeInJodaSyntax('19191919191919191919191919191919', 'CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC', 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } + +-- *OrZero, *OrNull +select parseDateTimeInJodaSyntaxOrZero('2001 366 2000', 'yyyy D yyyy', 'UTC') = toDateTime('2000-12-31', 'UTC'); +select parseDateTimeInJodaSyntaxOrZero('2001 invalid 366 2000', 'yyyy D yyyy', 'UTC') = toDateTime('1970-01-01', 'UTC'); +select parseDateTimeInJodaSyntaxOrNull('2001 366 2000', 'yyyy D yyyy', 'UTC') = toDateTime('2000-12-31', 'UTC'); +select parseDateTimeInJodaSyntaxOrNull('2001 invalid 366 2000', 'yyyy D yyyy', 'UTC') IS NULL; + +-- Error handling +select parseDateTimeInJodaSyntax(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +select parseDateTimeInJodaSyntax('12 AM', 'h a', 'UTC', 'a fourth argument'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +-- The format string argument is optional +set session_timezone = 'UTC'; -- don't randomize the session timezone +select parseDateTimeInJodaSyntax('2021-01-04 23:12:34') = toDateTime('2021-01-04 23:12:34'); +select parseDateTimeInJodaSyntax('2024-10-09 10:30:10-0812'); -- { serverError CANNOT_PARSE_DATETIME } + +-- timezone and timezone offset +select parseDateTimeInJodaSyntax('2024-10-09 10:30:10-0812', 'yyyy-MM-dd HH:mm:ssZ') = toDateTime64('2024-10-09 18:42:10', 6); +select parseDateTimeInJodaSyntax('2024-10-09 10:30:10-08123', 'yyyy-MM-dd HH:mm:ssZZZ'); -- {serverError CANNOT_PARSE_DATETIME} +select parseDateTimeInJodaSyntax('2024-10-09 10:30:10EST', 'yyyy-MM-dd HH:mm:ssz') = toDateTime64('2024-10-09 15:30:10', 6); +select parseDateTimeInJodaSyntax('2024-10-09 10:30:10EST', 'yyyy-MM-dd HH:mm:sszzz') = toDateTime64('2024-10-09 15:30:10', 6); +-- incorrect timezone offset and timezone +select parseDateTimeInJodaSyntax('2024-10-09 10:30:10-8000', 'yyyy-MM-dd HH:mm:ssZ'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTimeInJodaSyntax('2024-10-09 10:30:10ABCD', 'yyyy-MM-dd HH:mm:ssz'); -- { serverError BAD_ARGUMENTS } + +-- ------------------------------------------------------------------------------------------------------------------------- +-- Tests for parseDateTime64InJodaSyntax, these are not systematic + +select parseDateTime64InJodaSyntax('', '') = toDateTime64('1970-01-01 00:00:00', 0); +select parseDateTime64InJodaSyntax('2177-10-09 10:30:10.123', 'yyyy-MM-dd HH:mm:ss.SSS'); +select parseDateTime64InJodaSyntax('+0000', 'Z') = toDateTime64('1970-01-01 00:00:00', 0); +select parseDateTime64InJodaSyntax('08:01', 'HH:ss') = toDateTime64('1970-01-01 08:00:01', 0); +select parseDateTime64InJodaSyntax('2024-01-02', 'yyyy-MM-dd') = toDateTime64('2024-01-02 00:00:00', 0); +select parseDateTime64InJodaSyntax('10:30:50', 'HH:mm:ss') = toDateTime64('1970-01-01 10:30:50', 0); +select parseDateTime64InJodaSyntax('2024-12-31 23:30:10.123456-0800', 'yyyy-MM-dd HH:mm:ss.SSSSSSZ') = toDateTime64('2025-01-01 07:30:10.123456', 6); +select parseDateTime64InJodaSyntax('2024-01-01 00:00:01.123456+0800', 'yyyy-MM-dd HH:mm:ss.SSSSSSZ') = toDateTime64('2023-12-31 16:00:01.123456', 6); +select parseDateTime64InJodaSyntax('2021-01-04 23:12:34') = toDateTime64('2021-01-04 23:12:34', 0); +select parseDateTime64InJodaSyntax('2021-01-04 23:12:34.331', 'yyyy-MM-dd HH:mm:ss.SSS') = toDateTime64('2021-01-04 23:12:34.331', 3); +select parseDateTime64InJodaSyntax('2021/01/04 23:12:34.331', 'yyyy/MM/dd HH:mm:ss.SSS') = toDateTime64('2021-01-04 23:12:34.331', 3); +select parseDateTime64InJodaSyntax('2021-01-04 23:12:34.331'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime64InJodaSyntax('2021-01-04 23:12:34.331', 'yyyy-MM-dd HH:mm:ss.SSSS') = toDateTime64('2021-01-04 23:12:34.0331', 4); +select parseDateTime64InJodaSyntax('2021-01-04 23:12:34.331', 'yyyy-MM-dd HH:mm:ss.SS'); -- { serverError CANNOT_PARSE_DATETIME } + +-- Timezone and timezone offset +select parseDateTime64InJodaSyntax('2024-10-09 10:30:10-0812'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123456-0812', 'yyyy-MM-dd HH:mm:ss.SSSSSSZ') = toDateTime64('2024-10-09 18:42:10.123456', 6); +select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123456-08123', 'yyyy-MM-dd HH:mm:ss.SSSSSSZZZ'); -- {serverError CANNOT_PARSE_DATETIME} +select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123456EST', 'yyyy-MM-dd HH:mm:ss.SSSSSSz') = toDateTime64('2024-10-09 15:30:10.123456', 6); +select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123456EST', 'yyyy-MM-dd HH:mm:ss.SSSSSSzzz') = toDateTime64('2024-10-09 15:30:10.123456', 6); +select parseDateTime64InJodaSyntax('2024-11-05-0800 01:02:03.123456', 'yyyy-MM-ddZ HH:mm:ss.SSSSSS') = toDateTime64('2024-11-05 09:02:03.123456', 6); +select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123456America/Los_Angeles', 'yyyy-MM-dd HH:mm:ss.SSSSSSz') = toDateTime64('2024-10-09 17:30:10.123456', 6); +select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123456Australia/Adelaide', 'yyyy-MM-dd HH:mm:ss.SSSSSSz') = toDateTime64('2024-10-09 00:00:10.123456', 6); +select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123', 'yyyy-dd-MM HH:mm:ss.SSS') = toDateTime64('2024-09-10 10:30:10.123', 3); +select parseDateTime64InJodaSyntax('999999 10-09-202410:30:10', 'SSSSSSSSS dd-MM-yyyyHH:mm:ss'); -- {serverError CANNOT_PARSE_DATETIME } +select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123456-0845', 'yyyy-MM-dd HH:mm:ss.SSSSSSZ') = toDateTime64('2024-10-09 19:15:10.123456', 6); + +-- incorrect timezone offset and timezone +select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123456-8000', 'yyyy-MM-dd HH:mm:ss.SSSSSSZ'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime64InJodaSyntax('2024-10-09 10:30:10.123456ABCD', 'yyyy-MM-dd HH:mm:ss.SSSSSSz'); -- { serverError BAD_ARGUMENTS } +select parseDateTime64InJodaSyntax('2023-02-29 11:22:33Not/Timezone', 'yyyy-MM-dd HH:mm:ssz'); -- { serverError BAD_ARGUMENTS } + +-- leap vs non-leap years +select parseDateTime64InJodaSyntax('2024-02-29 11:23:34America/Los_Angeles', 'yyyy-MM-dd HH:mm:ssz') = toDateTime64('2024-02-29 19:23:34', 0); +select parseDateTime64InJodaSyntax('2023-02-29 11:22:33America/Los_Angeles', 'yyyy-MM-dd HH:mm:ssz'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime64InJodaSyntax('2024-02-28 23:22:33America/Los_Angeles', 'yyyy-MM-dd HH:mm:ssz') = toDateTime64('2024-02-29 07:22:33', 0); +select parseDateTime64InJodaSyntax('2023-02-28 23:22:33America/Los_Angeles', 'yyyy-MM-dd HH:mm:ssz') = toDateTime64('2023-03-01 07:22:33', 0); +select parseDateTime64InJodaSyntax('2024-03-01 00:22:33-8000', 'yyyy-MM-dd HH:mm:ssZ'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime64InJodaSyntax('2023-03-01 00:22:33-8000', 'yyyy-MM-dd HH:mm:ssZ'); -- { serverError CANNOT_PARSE_DATETIME } + +-- parseDateTime64InJodaSyntaxOrNull +select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123', 'yyyy-MM-dd HH:mm:ss.SSS') = toDateTime64('2024-10-09 10:30:10.123', 3); +select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123456', 'yyyy-MM-dd HH:mm:ss.SSSSSS') = toDateTime64('2024-10-09 10:30:10.123456', 6); +select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123456789', 'yyyy-MM-dd HH:mm:ss.SSSSSSSSS'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123456-0800', 'yyyy-MM-dd HH:mm:ss.SSSSSSZ') = toDateTime64('2024-10-09 18:30:10.123456', 6); +select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123456America/Los_Angeles', 'yyyy-MM-dd HH:mm:ss.SSSSSSz') = toDateTime64('2024-10-09 17:30:10.123456', 6); +select parseDateTime64InJodaSyntaxOrNull('2024-10-09 10:30:10.123', 'yyyy-dd-MM HH:mm:ss.SSS') = toDateTime64('2024-09-10 10:30:10.123', 3); +select parseDateTime64InJodaSyntaxOrNull('2023-02-29 11:22:33America/Los_Angeles', 'yyyy-MM-dd HH:mm:ssz') is NULL; +select parseDateTime64InJodaSyntaxOrNull('', '') = toDateTime64('1970-01-01 00:00:00', 0); +select parseDateTime64InJodaSyntaxOrNull('2177-10-09 10:30:10.123', 'yyyy-MM-dd HH:mm:ss.SSS'); + +-- parseDateTime64InJodaSyntaxOrZero +select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123', 'yyyy-MM-dd HH:mm:ss.SSS') = toDateTime64('2024-10-09 10:30:10.123', 3); +select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123456', 'yyyy-MM-dd HH:mm:ss.SSSSSS') = toDateTime64('2024-10-09 10:30:10.123456', 6); +select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123456789', 'yyyy-MM-dd HH:mm:ss.SSSSSSSSS'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123456-0800', 'yyyy-MM-dd HH:mm:ss.SSSSSSZ') = toDateTime64('2024-10-09 18:30:10.123456', 6); +select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123456America/Los_Angeles', 'yyyy-MM-dd HH:mm:ss.SSSSSSz') = toDateTime64('2024-10-09 17:30:10.123456', 6); +select parseDateTime64InJodaSyntaxOrZero('2024-10-09 10:30:10.123', 'yyyy-dd-MM HH:mm:ss.SSS') = toDateTime64('2024-09-10 10:30:10.123', 3); +select parseDateTime64InJodaSyntaxOrZero('wrong value', 'yyyy-dd-MM HH:mm:ss.SSS') = toDateTime64('1970-01-01 00:00:00.000', 3); +select parseDateTime64InJodaSyntaxOrZero('2023-02-29 11:22:33America/Los_Angeles', 'yyyy-MM-dd HH:mm:ssz') = toDateTime64('1970-01-01 00:00:00', 0); +select parseDateTime64InJodaSyntaxOrZero('', '') = toDateTime64('1970-01-01 00:00:00', 0); +select parseDateTime64InJodaSyntaxOrZero('2177-10-09 10:30:10.123', 'yyyy-MM-dd HH:mm:ss.SSS') = toDateTime64('2177-10-09 10:30:10.123', 3); + +-- Test parseDataTime64InJodaSyntax supports range between 1900 - 2299 +select parseDateTime64InJodaSyntax('1899-12-31 23:59:59', 'yyyy-MM-dd HH:mm:ss'); -- { serverError CANNOT_PARSE_DATETIME } +select parseDateTime64InJodaSyntax('1900-01-01 00:00:00', 'yyyy-MM-dd HH:mm:ss') = toDateTime64('1900-01-01 00:00:00', 0); +select parseDateTime64InJodaSyntax('1920-06-06 00:00:01', 'yyyy-MM-dd HH:mm:ss') = toDateTime64('1920-06-06 00:00:01', 0); +select parseDateTime64InJodaSyntax('1970-01-01 00:00:00', 'yyyy-MM-dd HH:mm:ss') = toDateTime64('1970-01-01 00:00:00', 0); +select parseDateTime64InJodaSyntax('1971-02-03 04:05:06', 'yyyy-MM-dd HH:mm:ss') = toDateTime64('1971-02-03 04:05:06', 0); +select parseDateTime64InJodaSyntax('2105-02-03 04:05:06', 'yyyy-MM-dd HH:mm:ss') = toDateTime64('2105-02-03 04:05:06', 0); +select parseDateTime64InJodaSyntax('2106-02-07 06:28:15', 'yyyy-MM-dd HH:mm:ss') = toDateTime64('2106-02-07 06:28:15', 0); +select parseDateTime64InJodaSyntax('2106-02-07 16:28:15', 'yyyy-MM-dd HH:mm:ss') = toDateTime64('2106-02-07 16:28:15', 0); +select parseDateTime64InJodaSyntax('2299-12-31 23:59:59', 'yyyy-MM-dd HH:mm:ss') = toDateTime64('2299-12-31 23:59:59', 0); +select parseDateTime64InJodaSyntax('2300-01-01 00:00:00', 'yyyy-MM-dd HH:mm:ss'); -- { serverError CANNOT_PARSE_DATETIME } + +-- Test parseDateTimeInJodaSyntax with 3 repetitions in format and 4 digits year +select parseDateTimeInJodaSyntax('2025', 'YYY', 'UTC') = toDateTime('2025-01-01', 'UTC'); +select parseDateTimeInJodaSyntax('2025', 'xxx', 'UTC') = toDateTime('2024-12-30', 'UTC'); +select parseDateTimeInJodaSyntax('2025', 'yyy', 'UTC') = toDateTime('2025-01-01', 'UTC'); + +-- ------------------------------------------------------------------------------------------------------------------------- + +-- { echoOff } diff --git a/parser/testdata/02668_ulid_decoding/ast.json b/parser/testdata/02668_ulid_decoding/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02668_ulid_decoding/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02668_ulid_decoding/metadata.json b/parser/testdata/02668_ulid_decoding/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02668_ulid_decoding/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02668_ulid_decoding/query.sql b/parser/testdata/02668_ulid_decoding/query.sql new file mode 100644 index 000000000..85344bdf4 --- /dev/null +++ b/parser/testdata/02668_ulid_decoding/query.sql @@ -0,0 +1,10 @@ +-- Tags: no-fasttest + +SELECT dateDiff('minute', ULIDStringToDateTime(generateULID()), now()) <= 1; +SELECT toTimezone(ULIDStringToDateTime('01GWJWKW30MFPQJRYEAF4XFZ9E'), 'America/Costa_Rica'); +SELECT ULIDStringToDateTime('01GWJWKW30MFPQJRYEAF4XFZ9E', 'America/Costa_Rica'); +SELECT ULIDStringToDateTime('01GWJWKW30MFPQJRYEAF4XFZ9', 'America/Costa_Rica'); -- { serverError ILLEGAL_COLUMN } +SELECT ULIDStringToDateTime('01GWJWKW30MFPQJRYEAF4XFZ9E', 'America/Costa_Ric'); -- { serverError BAD_ARGUMENTS } +SELECT ULIDStringToDateTime('01GWJWKW30MFPQJRYEAF4XFZ9E0'); -- { serverError ILLEGAL_COLUMN } +SELECT ULIDStringToDateTime(1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT ULIDStringToDateTime(1, 2); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } diff --git a/parser/testdata/02669_alter_modify_to_nullable/ast.json b/parser/testdata/02669_alter_modify_to_nullable/ast.json new file mode 100644 index 000000000..f2c3aba4d --- /dev/null +++ b/parser/testdata/02669_alter_modify_to_nullable/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_modify_to_nullable (children 1)" + }, + { + "explain": " Identifier t_modify_to_nullable" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00125552, + "rows_read": 2, + "bytes_read": 92 + } +} diff --git a/parser/testdata/02669_alter_modify_to_nullable/metadata.json b/parser/testdata/02669_alter_modify_to_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02669_alter_modify_to_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02669_alter_modify_to_nullable/query.sql b/parser/testdata/02669_alter_modify_to_nullable/query.sql new file mode 100644 index 000000000..e7545e4c4 --- /dev/null +++ b/parser/testdata/02669_alter_modify_to_nullable/query.sql @@ -0,0 +1,31 @@ +DROP TABLE IF EXISTS t_modify_to_nullable; + +CREATE TABLE t_modify_to_nullable (key UInt64, id UInt64, s String) +ENGINE = MergeTree ORDER BY id PARTITION BY key +SETTINGS min_bytes_for_wide_part = 0, ratio_of_defaults_for_sparse_serialization = 0.9; + +INSERT INTO t_modify_to_nullable SELECT 1, number, 'foo' FROM numbers(10000); +INSERT INTO t_modify_to_nullable SELECT 2, number, if (number % 23 = 0, 'bar', '') FROM numbers(10000); + +SELECT name, type, serialization_kind FROM system.parts_columns +WHERE database = currentDatabase() AND table = 't_modify_to_nullable' AND column = 's' AND active +ORDER BY name; + +SELECT count(s), countIf(s != ''), arraySort(groupUniqArray(s)) FROM t_modify_to_nullable; + +SET mutations_sync = 2; +ALTER TABLE t_modify_to_nullable MODIFY COLUMN s Nullable(String); + +SELECT name, type, serialization_kind FROM system.parts_columns +WHERE database = currentDatabase() AND table = 't_modify_to_nullable' AND column = 's' AND active +ORDER BY name; + +SELECT count(s), countIf(s != ''), arraySort(groupUniqArray(s)) FROM t_modify_to_nullable; + +SYSTEM FLUSH LOGS part_log; + +SELECT part_name, read_rows FROM system.part_log +WHERE database = currentDatabase() AND table = 't_modify_to_nullable' AND event_type = 'MutatePart' +ORDER BY part_name; + +DROP TABLE t_modify_to_nullable; diff --git a/parser/testdata/02670_constant_skip_index/ast.json b/parser/testdata/02670_constant_skip_index/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02670_constant_skip_index/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02670_constant_skip_index/metadata.json b/parser/testdata/02670_constant_skip_index/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02670_constant_skip_index/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02670_constant_skip_index/query.sql b/parser/testdata/02670_constant_skip_index/query.sql new file mode 100644 index 000000000..97dd2ab33 --- /dev/null +++ b/parser/testdata/02670_constant_skip_index/query.sql @@ -0,0 +1,25 @@ + +DROP TABLE IF EXISTS t_constant_index; + +CREATE TABLE t_constant_index +( + id UInt64, + INDEX t_constant_index 'foo' TYPE set(2) GRANULARITY 1 +) ENGINE = MergeTree +ORDER BY id; -- { serverError INCORRECT_QUERY } + +CREATE TABLE t_constant_index +( + id UInt64, + INDEX t_constant_index id + rand() TYPE set(2) GRANULARITY 1 +) ENGINE = MergeTree +ORDER BY id; -- { serverError BAD_ARGUMENTS } + +CREATE TABLE t_constant_index +( + id UInt64, + INDEX t_constant_index id * 2 TYPE set(2) GRANULARITY 1 +) ENGINE = MergeTree +ORDER BY id; + +DROP TABLE t_constant_index; diff --git a/parser/testdata/02674_and_consistency/ast.json b/parser/testdata/02674_and_consistency/ast.json new file mode 100644 index 000000000..afa9db6d7 --- /dev/null +++ b/parser/testdata/02674_and_consistency/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function SUM (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001257891, + "rows_read": 7, + "bytes_read": 257 + } +} diff --git a/parser/testdata/02674_and_consistency/metadata.json b/parser/testdata/02674_and_consistency/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02674_and_consistency/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02674_and_consistency/query.sql b/parser/testdata/02674_and_consistency/query.sql new file mode 100644 index 000000000..5988832ba --- /dev/null +++ b/parser/testdata/02674_and_consistency/query.sql @@ -0,0 +1,21 @@ +SELECT SUM(number) +FROM +( + SELECT 10 AS number +) +GROUP BY number +HAVING 1 AND sin(SUMOrNull(number)) +SETTINGS enable_optimize_predicate_expression = 0; + +select '#45218'; + +SELECT SUM(number) +FROM +( + SELECT 10 AS number +) +GROUP BY cos(min2(number, number) % number) - number +HAVING ((-sign(-233841197)) IS NOT NULL) AND sin(lcm(SUM(number), SUM(number)) >= ('372497213' IS NOT NULL)) +SETTINGS aggregate_functions_null_for_empty = 1, enable_optimize_predicate_expression = 0; + +select '='; diff --git a/parser/testdata/02674_date_int_string_json_inference/ast.json b/parser/testdata/02674_date_int_string_json_inference/ast.json new file mode 100644 index 000000000..609bda68c --- /dev/null +++ b/parser/testdata/02674_date_int_string_json_inference/ast.json @@ -0,0 +1,40 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DescribeQuery (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function format (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier JSONEachRow" + }, + { + "explain": " Literal '{\"x\" : \"2020-01-01\"}, {\"x\" : \"1000\"}'" + } + ], + + "rows": 6, + + "statistics": + { + "elapsed": 0.001309971, + "rows_read": 6, + "bytes_read": 239 + } +} diff --git a/parser/testdata/02674_date_int_string_json_inference/metadata.json b/parser/testdata/02674_date_int_string_json_inference/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02674_date_int_string_json_inference/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02674_date_int_string_json_inference/query.sql b/parser/testdata/02674_date_int_string_json_inference/query.sql new file mode 100644 index 000000000..21abf763c --- /dev/null +++ b/parser/testdata/02674_date_int_string_json_inference/query.sql @@ -0,0 +1,2 @@ +desc format(JSONEachRow, '{"x" : "2020-01-01"}, {"x" : "1000"}') + diff --git a/parser/testdata/02674_null_default_structure/ast.json b/parser/testdata/02674_null_default_structure/ast.json new file mode 100644 index 000000000..7397b1a76 --- /dev/null +++ b/parser/testdata/02674_null_default_structure/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function null (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001254367, + "rows_read": 10, + "bytes_read": 373 + } +} diff --git a/parser/testdata/02674_null_default_structure/metadata.json b/parser/testdata/02674_null_default_structure/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02674_null_default_structure/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02674_null_default_structure/query.sql b/parser/testdata/02674_null_default_structure/query.sql new file mode 100644 index 000000000..fcc5af216 --- /dev/null +++ b/parser/testdata/02674_null_default_structure/query.sql @@ -0,0 +1,3 @@ +SELECT * FROM null(); +DESCRIBE null(); +insert into table function null() select 1, 'str'; diff --git a/parser/testdata/02674_trivial_count_analyzer/ast.json b/parser/testdata/02674_trivial_count_analyzer/ast.json new file mode 100644 index 000000000..cb8f33e89 --- /dev/null +++ b/parser/testdata/02674_trivial_count_analyzer/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery m3 (children 1)" + }, + { + "explain": " Identifier m3" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001294784, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/02674_trivial_count_analyzer/metadata.json b/parser/testdata/02674_trivial_count_analyzer/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02674_trivial_count_analyzer/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02674_trivial_count_analyzer/query.sql b/parser/testdata/02674_trivial_count_analyzer/query.sql new file mode 100644 index 000000000..c13a9dc68 --- /dev/null +++ b/parser/testdata/02674_trivial_count_analyzer/query.sql @@ -0,0 +1,45 @@ +drop table if exists m3; +drop table if exists replacing_m3; + +-- { echoOn } +set enable_analyzer=1; +set optimize_trivial_count_query=1; + +create table m3(a Int64, b UInt64) Engine=MergeTree order by tuple(); + +select count() from m3; + +insert into m3 values (0,0); +insert into m3 values (-1,1); + +select trimBoth(explain) from (explain select count() from m3) where explain like '%ReadFromPreparedSource (Optimized trivial count)%'; +select count() from m3; +select count(*) from m3; +select count(a) from m3; +select count(b) from m3; +select count() + 1 from m3; + +drop table m3; + +-- checking queries with FINAL +create table replacing_m3(a Int64, b UInt64) Engine=ReplacingMergeTree() order by (a, b); +SYSTEM STOP MERGES replacing_m3; + +select count() from replacing_m3; + +insert into replacing_m3 values (0,0); +insert into replacing_m3 values (0,0); +insert into replacing_m3 values (-1,1); +insert into replacing_m3 values (-2,2); + +select trimBoth(explain) from (explain select count() from replacing_m3) where explain like '%ReadFromPreparedSource (Optimized trivial count)%'; +select count() from replacing_m3; +select count(*) from replacing_m3; +select count(a) from replacing_m3; +select count(b) from replacing_m3; + +select count() from replacing_m3 FINAL; +select count(a) from replacing_m3 FINAL; +select count(b) from replacing_m3 FINAL; + +drop table replacing_m3; diff --git a/parser/testdata/02675_is_ipv6_function_fix/ast.json b/parser/testdata/02675_is_ipv6_function_fix/ast.json new file mode 100644 index 000000000..51cc5470a --- /dev/null +++ b/parser/testdata/02675_is_ipv6_function_fix/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function isIPv6String (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '1234::1234:'" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001250467, + "rows_read": 7, + "bytes_read": 270 + } +} diff --git a/parser/testdata/02675_is_ipv6_function_fix/metadata.json b/parser/testdata/02675_is_ipv6_function_fix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02675_is_ipv6_function_fix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02675_is_ipv6_function_fix/query.sql b/parser/testdata/02675_is_ipv6_function_fix/query.sql new file mode 100644 index 000000000..c28b4a5dc --- /dev/null +++ b/parser/testdata/02675_is_ipv6_function_fix/query.sql @@ -0,0 +1 @@ +SELECT isIPv6String('1234::1234:'); \ No newline at end of file diff --git a/parser/testdata/02675_predicate_push_down_filled_join_fix/ast.json b/parser/testdata/02675_predicate_push_down_filled_join_fix/ast.json new file mode 100644 index 000000000..4048be4d3 --- /dev/null +++ b/parser/testdata/02675_predicate_push_down_filled_join_fix/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000983357, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02675_predicate_push_down_filled_join_fix/metadata.json b/parser/testdata/02675_predicate_push_down_filled_join_fix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02675_predicate_push_down_filled_join_fix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02675_predicate_push_down_filled_join_fix/query.sql b/parser/testdata/02675_predicate_push_down_filled_join_fix/query.sql new file mode 100644 index 000000000..d9a97112c --- /dev/null +++ b/parser/testdata/02675_predicate_push_down_filled_join_fix/query.sql @@ -0,0 +1,29 @@ +SET enable_analyzer = 1; +SET single_join_prefer_left_table = 0; +SET optimize_move_to_prewhere = 0; +SET query_plan_optimize_join_order_limit = 0; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value String +) ENGINE=MergeTree ORDER BY id; + +INSERT INTO test_table VALUES (0, 'Value'); + +DROP TABLE IF EXISTS test_table_join; +CREATE TABLE test_table_join +( + id UInt64, + value String +) ENGINE = Join(All, inner, id); + +INSERT INTO test_table_join VALUES (0, 'JoinValue'); + +EXPLAIN header = 1, actions = 1 SELECT t1.id, t1.value, t2.value FROM test_table AS t1 INNER JOIN test_table_join AS t2 ON t1.id = t2.id WHERE t1.id = 0; + +SELECT t1.id, t1.value, t2.value FROM test_table AS t1 INNER JOIN test_table_join AS t2 ON t1.id = t2.id WHERE t1.id = 0; + +DROP TABLE test_table_join; +DROP TABLE test_table; diff --git a/parser/testdata/02675_sparse_columns_clear_column/ast.json b/parser/testdata/02675_sparse_columns_clear_column/ast.json new file mode 100644 index 000000000..6aba5d4af --- /dev/null +++ b/parser/testdata/02675_sparse_columns_clear_column/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_sparse_columns_clear (children 1)" + }, + { + "explain": " Identifier t_sparse_columns_clear" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000799723, + "rows_read": 2, + "bytes_read": 96 + } +} diff --git a/parser/testdata/02675_sparse_columns_clear_column/metadata.json b/parser/testdata/02675_sparse_columns_clear_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02675_sparse_columns_clear_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02675_sparse_columns_clear_column/query.sql b/parser/testdata/02675_sparse_columns_clear_column/query.sql new file mode 100644 index 000000000..0403eadc6 --- /dev/null +++ b/parser/testdata/02675_sparse_columns_clear_column/query.sql @@ -0,0 +1,36 @@ +DROP TABLE IF EXISTS t_sparse_columns_clear; + +CREATE TABLE t_sparse_columns_clear (arr Array(UInt64), v UInt64) +ENGINE = MergeTree ORDER BY tuple() +SETTINGS + ratio_of_defaults_for_sparse_serialization = 0.9, + min_bytes_for_wide_part = 0, + enable_block_number_column = 0, + enable_block_offset_column = 0; + +INSERT INTO t_sparse_columns_clear SELECT [number], 0 FROM numbers(1000); + +SELECT column, serialization_kind FROM system.parts_columns +WHERE database = currentDatabase() AND table = 't_sparse_columns_clear' AND active +ORDER BY column; + +SET mutations_sync = 2; +SET alter_sync = 2; + +ALTER TABLE t_sparse_columns_clear CLEAR COLUMN v; + +SELECT column, serialization_kind FROM system.parts_columns +WHERE database = currentDatabase() AND table = 't_sparse_columns_clear' AND active +ORDER BY column; + +OPTIMIZE TABLE t_sparse_columns_clear FINAL; + +SELECT column, serialization_kind FROM system.parts_columns +WHERE database = currentDatabase() AND table = 't_sparse_columns_clear' AND active +ORDER BY column; + +DROP TABLE t_sparse_columns_clear SYNC; + +SYSTEM FLUSH LOGS text_log; +SET max_rows_to_read = 0; -- system.text_log can be really big +SELECT count(), groupArray(message) FROM system.text_log WHERE logger_name LIKE '%' || currentDatabase() || '.t_sparse_columns_clear' || '%' AND level = 'Error'; diff --git a/parser/testdata/02676_analyzer_limit_offset/ast.json b/parser/testdata/02676_analyzer_limit_offset/ast.json new file mode 100644 index 000000000..d490b3a19 --- /dev/null +++ b/parser/testdata/02676_analyzer_limit_offset/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000982611, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02676_analyzer_limit_offset/metadata.json b/parser/testdata/02676_analyzer_limit_offset/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02676_analyzer_limit_offset/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02676_analyzer_limit_offset/query.sql b/parser/testdata/02676_analyzer_limit_offset/query.sql new file mode 100644 index 000000000..09f3d180b --- /dev/null +++ b/parser/testdata/02676_analyzer_limit_offset/query.sql @@ -0,0 +1,34 @@ +set enable_analyzer=1; + +DROP TABLE IF EXISTS test; +CREATE TABLE test (i UInt64) Engine = MergeTree() order by i; +INSERT INTO test SELECT number FROM numbers(100); +INSERT INTO test SELECT number FROM numbers(10,100); +OPTIMIZE TABLE test FINAL; + +-- Only set limit +SET limit = 5; +SELECT * FROM test ORDER BY i; -- 5 rows +SELECT * FROM test ORDER BY i OFFSET 20; -- 5 rows +SELECT * FROM (SELECT i FROM test LIMIT 10 OFFSET 50) TMP ORDER BY i; -- 5 rows +SELECT * FROM test ORDER BY i LIMIT 4 OFFSET 192; -- 4 rows +SELECT * FROM test ORDER BY i LIMIT 10 OFFSET 195; -- 5 rows + +-- Only set offset +SET limit = 0; +SET offset = 195; +SELECT * FROM test ORDER BY i; -- 5 rows +SELECT * FROM test ORDER BY i OFFSET 20; -- no result +SELECT * FROM test ORDER BY i LIMIT 100; -- no result +SET offset = 10; +SELECT * FROM test ORDER BY i LIMIT 20 OFFSET 100; -- 10 rows +SELECT * FROM test ORDER BY i LIMIT 11 OFFSET 100; -- 1 rows + +-- offset and limit together +SET limit = 10; +SELECT * FROM test ORDER BY i LIMIT 50 OFFSET 50; -- 10 rows +SELECT * FROM test ORDER BY i LIMIT 50 OFFSET 190; -- 0 rows +SELECT * FROM test ORDER BY i LIMIT 50 OFFSET 185; -- 5 rows +SELECT * FROM test ORDER BY i LIMIT 18 OFFSET 5; -- 8 rows + +DROP TABLE test; diff --git a/parser/testdata/02676_distinct_reading_in_order_analyzer/ast.json b/parser/testdata/02676_distinct_reading_in_order_analyzer/ast.json new file mode 100644 index 000000000..fd6ff48d0 --- /dev/null +++ b/parser/testdata/02676_distinct_reading_in_order_analyzer/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001109638, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/02676_distinct_reading_in_order_analyzer/metadata.json b/parser/testdata/02676_distinct_reading_in_order_analyzer/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02676_distinct_reading_in_order_analyzer/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02676_distinct_reading_in_order_analyzer/query.sql b/parser/testdata/02676_distinct_reading_in_order_analyzer/query.sql new file mode 100644 index 000000000..6a219cd37 --- /dev/null +++ b/parser/testdata/02676_distinct_reading_in_order_analyzer/query.sql @@ -0,0 +1,9 @@ +drop table if exists t; + +set enable_analyzer=1; + +create table t (a UInt64, b UInt64) engine=MergeTree() order by (a); +insert into t select number % 2, number from numbers(10); + +set optimize_distinct_in_order=1; +select trimBoth(explain) from (explain pipeline select distinct a from t) where explain like '%InOrder%'; diff --git a/parser/testdata/02676_kafka_murmur_hash/ast.json b/parser/testdata/02676_kafka_murmur_hash/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02676_kafka_murmur_hash/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02676_kafka_murmur_hash/metadata.json b/parser/testdata/02676_kafka_murmur_hash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02676_kafka_murmur_hash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02676_kafka_murmur_hash/query.sql b/parser/testdata/02676_kafka_murmur_hash/query.sql new file mode 100644 index 000000000..d2847b757 --- /dev/null +++ b/parser/testdata/02676_kafka_murmur_hash/query.sql @@ -0,0 +1,8 @@ +-- Test are taken from: https://github.com/apache/kafka/blob/139f7709bd3f5926901a21e55043388728ccca78/clients/src/test/java/org/apache/kafka/common/utils/UtilsTest.java#L93 +-- and the reference is generated with: https://pastila.nl/?06465d36/87f8ab2c9f6501c54f1c0879a13c8626 + +SELECT kafkaMurmurHash('21'); +SELECT kafkaMurmurHash('foobar'); +SELECT kafkaMurmurHash('a-little-bit-long-string'); +SELECT kafkaMurmurHash('a-little-bit-longer-string'); +SELECT kafkaMurmurHash('lkjh234lh9fiuh90y23oiuhsafujhadof229phr9h19h89h8'); diff --git a/parser/testdata/02676_to_decimal_string/ast.json b/parser/testdata/02676_to_decimal_string/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02676_to_decimal_string/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02676_to_decimal_string/metadata.json b/parser/testdata/02676_to_decimal_string/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02676_to_decimal_string/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02676_to_decimal_string/query.sql b/parser/testdata/02676_to_decimal_string/query.sql new file mode 100644 index 000000000..d24deb91b --- /dev/null +++ b/parser/testdata/02676_to_decimal_string/query.sql @@ -0,0 +1,47 @@ +-- Regular types +SELECT toDecimalString(2, 77); -- more digits required than exist +SELECT toDecimalString(2.123456, 2); -- rounding +SELECT toDecimalString(-2, 77); -- more digits required than exist +SELECT toDecimalString(-2.123456, 2); -- rounding + +SELECT toDecimalString(2.9876, 60); -- more digits required than exist (took 60 as it is float by default) +SELECT toDecimalString(2.1456, 2); -- rounding +SELECT toDecimalString(-2.9876, 60); -- more digits required than exist +SELECT toDecimalString(-2.1456, 2); -- rounding + +-- Float32 and Float64 tests. No sense to test big float precision -- the result will be a mess anyway. +SELECT toDecimalString(64.123::Float32, 10); +SELECT toDecimalString(64.234::Float64, 10); +SELECT toDecimalString(-64.123::Float32, 10); +SELECT toDecimalString(-64.234::Float64, 10); + +-- Decimals +SELECT toDecimalString(-32.345::Decimal32(3), 3); +SELECT toDecimalString(32.345::Decimal32(3), 77); -- more digits required than exist +SELECT toDecimalString(32.456::Decimal32(3), 2); -- rounding +SELECT toDecimalString('-64.5671232345'::Decimal64(10), 10); +SELECT toDecimalString('128.78932312332132985464'::Decimal128(20), 20); +SELECT toDecimalString('-128.78932312332132985464123123'::Decimal128(26), 20); -- rounding +SELECT toDecimalString('128.78932312332132985464'::Decimal128(20), 77); -- more digits required than exist +SELECT toDecimalString('128.789323123321329854641231237893231233213298546'::Decimal256(45), 10); -- rounding +SELECT toDecimalString('-128.789323123321329854641231237893231233213298546'::Decimal256(45), 77); -- more digits required than exist + +-- Max number of decimal fractional digits is defined as 77 for Int/UInt/Decimal and 60 for Float. +-- These values shall work OK. +SELECT toDecimalString('32.32'::Float32, 61); -- {serverError CANNOT_PRINT_FLOAT_OR_DOUBLE_NUMBER} +SELECT toDecimalString('64.64'::Float64, 61); -- {serverError CANNOT_PRINT_FLOAT_OR_DOUBLE_NUMBER} +SELECT toDecimalString('88'::UInt8, 78); -- {serverError CANNOT_PRINT_FLOAT_OR_DOUBLE_NUMBER} +SELECT toDecimalString('646464'::Int256, 78); -- {serverError CANNOT_PRINT_FLOAT_OR_DOUBLE_NUMBER} +SELECT toDecimalString('-128.789323123321329854641231237893231233213298546'::Decimal256(45), 78); -- {serverError CANNOT_PRINT_FLOAT_OR_DOUBLE_NUMBER} + +-- wrong types: #52407 and similar +SELECT toDecimalString('256.256'::Decimal256(45), *); -- {serverError ILLEGAL_COLUMN} +SELECT toDecimalString('128.128'::Decimal128(30), 'str'); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT toDecimalString('64.64'::Decimal64(10)); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +SELECT toDecimalString('64.64'::Decimal64(10), 3, 3); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} + +-- Zero precision checks +SELECT toDecimalString(1, 0); +SELECT toDecimalString(1.123456, 0); -- rounding +SELECT toDecimalString(-1, 0); +SELECT toDecimalString(-1.123456, 0); -- rounding diff --git a/parser/testdata/02676_trailing_commas/ast.json b/parser/testdata/02676_trailing_commas/ast.json new file mode 100644 index 000000000..691fa97b1 --- /dev/null +++ b/parser/testdata/02676_trailing_commas/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.00108839, + "rows_read": 5, + "bytes_read": 177 + } +} diff --git a/parser/testdata/02676_trailing_commas/metadata.json b/parser/testdata/02676_trailing_commas/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02676_trailing_commas/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02676_trailing_commas/query.sql b/parser/testdata/02676_trailing_commas/query.sql new file mode 100644 index 000000000..7fb64bb57 --- /dev/null +++ b/parser/testdata/02676_trailing_commas/query.sql @@ -0,0 +1,9 @@ +SELECT 1,; +SELECT 1, FROM numbers(1); +WITH 1 as a SELECT a, FROM numbers(1); +WITH 1 as from SELECT from, from + from, from in [0], FROM numbers(1); +SELECT n, FROM (SELECT 1 AS n); +SELECT (1, 'foo')::Tuple(a Int, b String,); +SELECT (1, 'foo')::Tuple(a Int, b String,,); -- { clientError SYNTAX_ERROR } +SELECT (1, 'foo')::Tuple(Int, String,); +SELECT (1, (2,'foo'))::Tuple(Int, Tuple(Int, String,),); diff --git a/parser/testdata/02677_analyzer_bitmap_has_any/ast.json b/parser/testdata/02677_analyzer_bitmap_has_any/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02677_analyzer_bitmap_has_any/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02677_analyzer_bitmap_has_any/metadata.json b/parser/testdata/02677_analyzer_bitmap_has_any/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02677_analyzer_bitmap_has_any/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02677_analyzer_bitmap_has_any/query.sql b/parser/testdata/02677_analyzer_bitmap_has_any/query.sql new file mode 100644 index 000000000..dc906a92f --- /dev/null +++ b/parser/testdata/02677_analyzer_bitmap_has_any/query.sql @@ -0,0 +1,35 @@ +SELECT + bitmapHasAny(bitmapBuild([toUInt8(1)]), ( + SELECT groupBitmapState(toUInt8(1)) + )) has1, + bitmapHasAny(bitmapBuild([toUInt64(1)]), ( + SELECT groupBitmapState(toUInt64(2)) + )) has2; + +SELECT '--------------'; + +SELECT * +FROM +( + SELECT + bitmapHasAny(bitmapBuild([toUInt8(1)]), ( + SELECT groupBitmapState(toUInt8(1)) + )) has1, + bitmapHasAny(bitmapBuild([toUInt64(1)]), ( + SELECT groupBitmapState(toUInt64(2)) + )) has2 +) SETTINGS enable_analyzer = 0; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT '--------------'; + +SELECT * +FROM +( + SELECT + bitmapHasAny(bitmapBuild([toUInt8(1)]), ( + SELECT groupBitmapState(toUInt8(1)) + )) has1, + bitmapHasAny(bitmapBuild([toUInt64(1)]), ( + SELECT groupBitmapState(toUInt64(2)) + )) has2 +) SETTINGS enable_analyzer = 1; diff --git a/parser/testdata/02677_analyzer_compound_expressions/ast.json b/parser/testdata/02677_analyzer_compound_expressions/ast.json new file mode 100644 index 000000000..633296a9a --- /dev/null +++ b/parser/testdata/02677_analyzer_compound_expressions/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001136076, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02677_analyzer_compound_expressions/metadata.json b/parser/testdata/02677_analyzer_compound_expressions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02677_analyzer_compound_expressions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02677_analyzer_compound_expressions/query.sql b/parser/testdata/02677_analyzer_compound_expressions/query.sql new file mode 100644 index 000000000..90781f701 --- /dev/null +++ b/parser/testdata/02677_analyzer_compound_expressions/query.sql @@ -0,0 +1,44 @@ +SET enable_analyzer = 1; + +WITH ('a', 'b')::Tuple(c1 String, c2 String) AS t +SELECT t.c1, t.c2; + +WITH materialize(('a', 'b')::Tuple(c1 String, c2 String)) AS t +SELECT t.c1, t.c2; + +WITH (1, ('a', 'b'))::Tuple(c1 UInt64, t1 Tuple(c1 String, c2 String)) AS t +SELECT t.c1, t.t1.c1, t.t1.c2; + +WITH materialize((1, ('a', 'b'))::Tuple(c1 UInt64, t1 Tuple(c1 String, c2 String))) AS t +SELECT t.c1, t.t1.c1, t.t1.c2; + +WITH [1, 2, 3] AS arr SELECT arr.size0; +WITH materialize([1, 2, 3]) AS arr SELECT arr.size0; + +WITH [1, 2, NULL] AS arr SELECT arr.null; +WITH materialize([1, 2, NULL]) AS arr SELECT arr.null; + +WITH [[1, 2], [], [3]] AS arr SELECT arr.size0, arr.size1; +WITH materialize([[1, 2], [], [3]]) AS arr SELECT arr.size0, arr.size1; + +WITH map('foo', 1, 'bar', 2) AS m SELECT m.keys, m.values; +WITH materialize(map('foo', 1, 'bar', 2)) AS m SELECT m.keys, m.values; +WITH map('foo', 1, 'bar', 2) AS m SELECT m.*; + +WITH map('foo', (1, 2), 'bar', (3, 4))::Map(String, Tuple(a UInt64, b UInt64)) AS m +SELECT m.keys, m.values, m.values.a, m.values.b; + +WITH materialize(map('foo', (1, 2), 'bar', (3, 4))::Map(String, Tuple(a UInt64, b UInt64))) AS m +SELECT m.keys, m.values, m.values.a, m.values.b; + +WITH map('foo', (1, 2), 'bar', (3, 4))::Map(String, Tuple(a UInt64, b UInt64)) AS m +SELECT m.keys, m.values, m.values.*; + +WITH materialize(map('foo', (1, 2), 'bar', (3, 4))::Map(String, Tuple(a UInt64, b UInt64))) AS m +SELECT m.keys, m.values, m.values.*; + +WITH [1, 2, 3] AS arr SELECT arr.*; -- { serverError UNSUPPORTED_METHOD } + +SELECT getSubcolumn([1, 2, 3], 'size0'); +SELECT getSubcolumn([1, 2, 3], materialize('size0')); -- { serverError ILLEGAL_COLUMN } +SELECT getSubcolumn([1, 2, 3], 'aaa'); -- { serverError ILLEGAL_COLUMN } diff --git a/parser/testdata/02677_decode_url_component/ast.json b/parser/testdata/02677_decode_url_component/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02677_decode_url_component/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02677_decode_url_component/metadata.json b/parser/testdata/02677_decode_url_component/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02677_decode_url_component/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02677_decode_url_component/query.sql b/parser/testdata/02677_decode_url_component/query.sql new file mode 100644 index 000000000..68345b5de --- /dev/null +++ b/parser/testdata/02677_decode_url_component/query.sql @@ -0,0 +1,5 @@ +SELECT + encodeURLComponent('кликхаус') AS encoded, + decodeURLComponent(encoded) = 'кликхаус' AS expected_EQ; + +SELECT DISTINCT decodeURLComponent(encodeURLComponent(randomString(100) AS x)) = x FROM numbers(100000); diff --git a/parser/testdata/02677_get_subcolumn_array_of_tuples/ast.json b/parser/testdata/02677_get_subcolumn_array_of_tuples/ast.json new file mode 100644 index 000000000..3a644f307 --- /dev/null +++ b/parser/testdata/02677_get_subcolumn_array_of_tuples/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001006087, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02677_get_subcolumn_array_of_tuples/metadata.json b/parser/testdata/02677_get_subcolumn_array_of_tuples/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02677_get_subcolumn_array_of_tuples/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02677_get_subcolumn_array_of_tuples/query.sql b/parser/testdata/02677_get_subcolumn_array_of_tuples/query.sql new file mode 100644 index 000000000..956659798 --- /dev/null +++ b/parser/testdata/02677_get_subcolumn_array_of_tuples/query.sql @@ -0,0 +1,13 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS t_get_subcolumn; + +CREATE TABLE t_get_subcolumn (id UInt64, n Nested(u UInt64, s String)) ENGINE = MergeTree ORDER BY id; + +INSERT INTO t_get_subcolumn VALUES (1, [42], ['foo']); + +SELECT getSubcolumn(n, 'u') FROM t_get_subcolumn; +SELECT getSubcolumn(n, 's') FROM t_get_subcolumn; +SELECT getSubcolumn(n, 'size0') FROM t_get_subcolumn; + +DROP TABLE t_get_subcolumn; diff --git a/parser/testdata/02677_grace_hash_limit_race/ast.json b/parser/testdata/02677_grace_hash_limit_race/ast.json new file mode 100644 index 000000000..5618d1c4a --- /dev/null +++ b/parser/testdata/02677_grace_hash_limit_race/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_grace_hash (children 1)" + }, + { + "explain": " Identifier test_grace_hash" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001215682, + "rows_read": 2, + "bytes_read": 82 + } +} diff --git a/parser/testdata/02677_grace_hash_limit_race/metadata.json b/parser/testdata/02677_grace_hash_limit_race/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02677_grace_hash_limit_race/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02677_grace_hash_limit_race/query.sql b/parser/testdata/02677_grace_hash_limit_race/query.sql new file mode 100644 index 000000000..55262ab24 --- /dev/null +++ b/parser/testdata/02677_grace_hash_limit_race/query.sql @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS test_grace_hash; + +CREATE TABLE test_grace_hash (id UInt32, value UInt64) ENGINE = MergeTree ORDER BY id; + +INSERT INTO test_grace_hash SELECT number, number % 100 = 0 FROM numbers(100000); + +SET join_algorithm = 'grace_hash'; + +SELECT count() FROM ( + SELECT f.id FROM test_grace_hash AS f + LEFT JOIN test_grace_hash AS d + ON f.id = d.id + LIMIT 1000 +); + +DROP TABLE test_grace_hash; diff --git a/parser/testdata/02678_explain_pipeline_graph_with_projection/ast.json b/parser/testdata/02678_explain_pipeline_graph_with_projection/ast.json new file mode 100644 index 000000000..f1f6b28f9 --- /dev/null +++ b/parser/testdata/02678_explain_pipeline_graph_with_projection/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001352575, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/02678_explain_pipeline_graph_with_projection/metadata.json b/parser/testdata/02678_explain_pipeline_graph_with_projection/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02678_explain_pipeline_graph_with_projection/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02678_explain_pipeline_graph_with_projection/query.sql b/parser/testdata/02678_explain_pipeline_graph_with_projection/query.sql new file mode 100644 index 000000000..e8b7405d6 --- /dev/null +++ b/parser/testdata/02678_explain_pipeline_graph_with_projection/query.sql @@ -0,0 +1,12 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1(ID UInt64, name String) engine=MergeTree order by ID; + +insert into t1(ID, name) values (1, 'abc'), (2, 'bbb'); + +-- The returned node order is uncertain +explain pipeline graph=1 select count(ID) from t1 FORMAT Null; +explain pipeline graph=1 select sum(1) from t1 FORMAT Null; +explain pipeline graph=1 select min(ID) from t1 FORMAT Null; +explain pipeline graph=1 select max(ID) from t1 FORMAT Null; + +DROP TABLE t1; diff --git a/parser/testdata/02679_explain_merge_tree_prewhere_row_policy/ast.json b/parser/testdata/02679_explain_merge_tree_prewhere_row_policy/ast.json new file mode 100644 index 000000000..131204cb7 --- /dev/null +++ b/parser/testdata/02679_explain_merge_tree_prewhere_row_policy/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_table (children 1)" + }, + { + "explain": " Identifier test_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001494771, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/02679_explain_merge_tree_prewhere_row_policy/metadata.json b/parser/testdata/02679_explain_merge_tree_prewhere_row_policy/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02679_explain_merge_tree_prewhere_row_policy/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02679_explain_merge_tree_prewhere_row_policy/query.sql b/parser/testdata/02679_explain_merge_tree_prewhere_row_policy/query.sql new file mode 100644 index 000000000..491197939 --- /dev/null +++ b/parser/testdata/02679_explain_merge_tree_prewhere_row_policy/query.sql @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value String +) ENGINE=MergeTree ORDER BY id; + +INSERT INTO test_table VALUES (0, 'Value'); + +DROP ROW POLICY IF EXISTS test_row_policy ON test_table; +CREATE ROW POLICY test_row_policy ON test_table USING id >= 5 TO ALL; + +EXPLAIN header = 1, actions = 1 SELECT id, value FROM test_table PREWHERE id = 5 settings enable_analyzer=0; +EXPLAIN header = 1, actions = 1 SELECT id, value FROM test_table PREWHERE id = 5 settings enable_analyzer=1; + +DROP ROW POLICY test_row_policy ON test_table; +DROP TABLE test_table; diff --git a/parser/testdata/02679_query_parameters_dangling_pointer/ast.json b/parser/testdata/02679_query_parameters_dangling_pointer/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02679_query_parameters_dangling_pointer/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02679_query_parameters_dangling_pointer/metadata.json b/parser/testdata/02679_query_parameters_dangling_pointer/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02679_query_parameters_dangling_pointer/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02679_query_parameters_dangling_pointer/query.sql b/parser/testdata/02679_query_parameters_dangling_pointer/query.sql new file mode 100644 index 000000000..b835ecd2c --- /dev/null +++ b/parser/testdata/02679_query_parameters_dangling_pointer/query.sql @@ -0,0 +1,4 @@ +-- There is no use-after-free in the following query: + +SET param_o = 'a'; +CREATE TABLE test.xxx (a Int64) ENGINE=MergeTree ORDER BY ({o:String}); -- { serverError ILLEGAL_COLUMN } diff --git a/parser/testdata/02680_datetime64_monotonic_check/ast.json b/parser/testdata/02680_datetime64_monotonic_check/ast.json new file mode 100644 index 000000000..972da9f0e --- /dev/null +++ b/parser/testdata/02680_datetime64_monotonic_check/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery 02680_datetime64_monotonic_check (children 1)" + }, + { + "explain": " Identifier 02680_datetime64_monotonic_check" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001521431, + "rows_read": 2, + "bytes_read": 116 + } +} diff --git a/parser/testdata/02680_datetime64_monotonic_check/metadata.json b/parser/testdata/02680_datetime64_monotonic_check/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02680_datetime64_monotonic_check/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02680_datetime64_monotonic_check/query.sql b/parser/testdata/02680_datetime64_monotonic_check/query.sql new file mode 100644 index 000000000..6036831d0 --- /dev/null +++ b/parser/testdata/02680_datetime64_monotonic_check/query.sql @@ -0,0 +1,28 @@ +DROP TABLE IF EXISTS 02680_datetime64_monotonic_check; +DROP TABLE IF EXISTS 02680_datetime_monotonic_check_lc; + +CREATE TABLE 02680_datetime64_monotonic_check (`t` DateTime64(3), `x` Nullable(Decimal(18, 14))) +ENGINE = MergeTree +PARTITION BY toYYYYMMDD(t) +ORDER BY x SETTINGS allow_nullable_key = 1; + +INSERT INTO 02680_datetime64_monotonic_check VALUES (toDateTime64('2023-03-13 00:00:00', 3, 'Asia/Jerusalem'), 123); + +SELECT toHour(toTimeZone(t, 'UTC')) AS toHour_UTC, toHour(toTimeZone(t, 'Asia/Jerusalem')) AS toHour_Israel, count() +FROM 02680_datetime64_monotonic_check +WHERE toHour_Israel = 0 +GROUP BY toHour_UTC, toHour_Israel; + +DROP TABLE 02680_datetime64_monotonic_check; + +SET allow_suspicious_low_cardinality_types = 1; +CREATE TABLE 02680_datetime_monotonic_check_lc (`timestamp` LowCardinality(UInt32)) +ENGINE = MergeTree +ORDER BY timestamp +SETTINGS index_granularity = 1; + +INSERT INTO 02680_datetime_monotonic_check_lc VALUES (2); + +SELECT toDateTime(timestamp, 'Asia/Jerusalem') FROM 02680_datetime_monotonic_check_lc WHERE toHour(toDateTime(timestamp, 'Asia/Jerusalem')) = 2; + +DROP TABLE 02680_datetime_monotonic_check_lc diff --git a/parser/testdata/02680_default_star/ast.json b/parser/testdata/02680_default_star/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02680_default_star/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02680_default_star/metadata.json b/parser/testdata/02680_default_star/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02680_default_star/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02680_default_star/query.sql b/parser/testdata/02680_default_star/query.sql new file mode 100644 index 000000000..d560bd01e --- /dev/null +++ b/parser/testdata/02680_default_star/query.sql @@ -0,0 +1,6 @@ +-- These queries yield syntax error, not logical error. + +CREATE TEMPORARY TABLE test (ad DEFAULT *); -- { clientError SYNTAX_ERROR } +CREATE TEMPORARY TABLE test (ad INT DEFAULT *); -- { clientError SYNTAX_ERROR } +CREATE TEMPORARY TABLE test (ad DEFAULT * NOT NULL); -- { clientError SYNTAX_ERROR } +CREATE TEMPORARY TABLE test (ad DEFAULT t.* NOT NULL); -- { clientError SYNTAX_ERROR } diff --git a/parser/testdata/02680_illegal_type_of_filter_projection/ast.json b/parser/testdata/02680_illegal_type_of_filter_projection/ast.json new file mode 100644 index 000000000..ebd9c1da8 --- /dev/null +++ b/parser/testdata/02680_illegal_type_of_filter_projection/ast.json @@ -0,0 +1,82 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery test_tuple (children 3)" + }, + { + "explain": " Identifier test_tuple" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " ColumnDeclaration p (children 1)" + }, + { + "explain": " DataType DateTime" + }, + { + "explain": " ColumnDeclaration i (children 1)" + }, + { + "explain": " DataType int" + }, + { + "explain": " ColumnDeclaration j (children 1)" + }, + { + "explain": " DataType int" + }, + { + "explain": " Storage definition (children 4)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDate (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier p" + }, + { + "explain": " Identifier i" + }, + { + "explain": " Identifier j" + }, + { + "explain": " Set" + } + ], + + "rows": 20, + + "statistics": + { + "elapsed": 0.001325489, + "rows_read": 20, + "bytes_read": 664 + } +} diff --git a/parser/testdata/02680_illegal_type_of_filter_projection/metadata.json b/parser/testdata/02680_illegal_type_of_filter_projection/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02680_illegal_type_of_filter_projection/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02680_illegal_type_of_filter_projection/query.sql b/parser/testdata/02680_illegal_type_of_filter_projection/query.sql new file mode 100644 index 000000000..cfd39dcb7 --- /dev/null +++ b/parser/testdata/02680_illegal_type_of_filter_projection/query.sql @@ -0,0 +1,4 @@ +CREATE TABLE test_tuple (`p` DateTime, `i` int, `j` int) ENGINE = MergeTree PARTITION BY (toDate(p), i) ORDER BY j SETTINGS index_granularity = 1; +insert into test_tuple values (1, 1, 1); +set parallel_replicas_local_plan = 1, parallel_replicas_support_projection = 1, optimize_aggregation_in_order = 0; +SELECT count() FROM test_tuple PREWHERE sipHash64(sipHash64(p, toString(toDate(p))), toString(toDate(p))) % -0. WHERE i > NULL settings optimize_trivial_count_query=0, optimize_use_implicit_projections=1; -- { serverError ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER } diff --git a/parser/testdata/02680_instr_alias_for_position_case_insensitive/ast.json b/parser/testdata/02680_instr_alias_for_position_case_insensitive/ast.json new file mode 100644 index 000000000..e9fea63d7 --- /dev/null +++ b/parser/testdata/02680_instr_alias_for_position_case_insensitive/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function INSTR (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'hello'" + }, + { + "explain": " Literal 'e'" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001301772, + "rows_read": 8, + "bytes_read": 282 + } +} diff --git a/parser/testdata/02680_instr_alias_for_position_case_insensitive/metadata.json b/parser/testdata/02680_instr_alias_for_position_case_insensitive/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02680_instr_alias_for_position_case_insensitive/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02680_instr_alias_for_position_case_insensitive/query.sql b/parser/testdata/02680_instr_alias_for_position_case_insensitive/query.sql new file mode 100644 index 000000000..c1c55c2c9 --- /dev/null +++ b/parser/testdata/02680_instr_alias_for_position_case_insensitive/query.sql @@ -0,0 +1,2 @@ +select INSTR('hello', 'e'); +select INSTR('hELlo', 'L'); diff --git a/parser/testdata/02680_lc_null_as_default/ast.json b/parser/testdata/02680_lc_null_as_default/ast.json new file mode 100644 index 000000000..078316c05 --- /dev/null +++ b/parser/testdata/02680_lc_null_as_default/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_null_as_default__fuzz_46 (children 1)" + }, + { + "explain": " Identifier test_null_as_default__fuzz_46" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001313285, + "rows_read": 2, + "bytes_read": 110 + } +} diff --git a/parser/testdata/02680_lc_null_as_default/metadata.json b/parser/testdata/02680_lc_null_as_default/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02680_lc_null_as_default/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02680_lc_null_as_default/query.sql b/parser/testdata/02680_lc_null_as_default/query.sql new file mode 100644 index 000000000..f6bfad377 --- /dev/null +++ b/parser/testdata/02680_lc_null_as_default/query.sql @@ -0,0 +1,6 @@ +drop table if exists test_null_as_default__fuzz_46; +SET allow_suspicious_low_cardinality_types = 1; +CREATE TABLE test_null_as_default__fuzz_46 (a Nullable(DateTime64(3)), b LowCardinality(Float32) DEFAULT a + 1000) ENGINE = Memory; +INSERT INTO test_null_as_default__fuzz_46 SELECT 1, NULL UNION ALL SELECT 2, NULL; +drop table test_null_as_default__fuzz_46; + diff --git a/parser/testdata/02680_mysql_ast_logical_err/ast.json b/parser/testdata/02680_mysql_ast_logical_err/ast.json new file mode 100644 index 000000000..6ef890d71 --- /dev/null +++ b/parser/testdata/02680_mysql_ast_logical_err/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery foo (children 3)" + }, + { + "explain": " Identifier foo" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " ColumnDeclaration key (children 1)" + }, + { + "explain": " DataType UInt32" + }, + { + "explain": " ColumnDeclaration a (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " ColumnDeclaration b (children 1)" + }, + { + "explain": " DataType Int64" + }, + { + "explain": " ColumnDeclaration c (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function TinyLog" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001092809, + "rows_read": 14, + "bytes_read": 483 + } +} diff --git a/parser/testdata/02680_mysql_ast_logical_err/metadata.json b/parser/testdata/02680_mysql_ast_logical_err/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02680_mysql_ast_logical_err/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02680_mysql_ast_logical_err/query.sql b/parser/testdata/02680_mysql_ast_logical_err/query.sql new file mode 100644 index 000000000..78ce1b68b --- /dev/null +++ b/parser/testdata/02680_mysql_ast_logical_err/query.sql @@ -0,0 +1,10 @@ +CREATE TABLE foo (key UInt32, a String, b Int64, c String) ENGINE = TinyLog; + +SELECT count() FROM mysql( + mysql('127.0.0.1:9004', currentDatabase(), 'foo', 'default', ''), + '127.0.0.1:9004', currentDatabase(), 'foo', '', '', + SETTINGS connect_timeout = 100, connection_wait_timeout = 100, read_write_timeout = 300); -- { serverError UNKNOWN_FUNCTION } +SELECT count() FROM mysql( + mysql('127.0.0.1:9004', currentDatabase(), 'foo', 'default', '', SETTINGS connection_pool_size = 1), + '127.0.0.1:9004', currentDatabase(), 'foo', '', '', + SETTINGS connect_timeout = 100, connection_wait_timeout = 100, read_write_timeout = 300); -- { serverError UNKNOWN_FUNCTION, UNSUPPORTED_METHOD } diff --git a/parser/testdata/02681_aggregation_by_partitions_bug/ast.json b/parser/testdata/02681_aggregation_by_partitions_bug/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02681_aggregation_by_partitions_bug/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02681_aggregation_by_partitions_bug/metadata.json b/parser/testdata/02681_aggregation_by_partitions_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02681_aggregation_by_partitions_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02681_aggregation_by_partitions_bug/query.sql b/parser/testdata/02681_aggregation_by_partitions_bug/query.sql new file mode 100644 index 000000000..32b4b5507 --- /dev/null +++ b/parser/testdata/02681_aggregation_by_partitions_bug/query.sql @@ -0,0 +1,10 @@ +-- Tags: no-random-merge-tree-settings + +set max_threads = 16; + +create table t(a UInt32) engine=MergeTree order by tuple() partition by a % 16; + +insert into t select * from numbers_mt(1e6); + +set allow_aggregate_partitions_independently=1, force_aggregate_partitions_independently=1; +select count(distinct a) from t; diff --git a/parser/testdata/02681_comparsion_tuple_elimination_ast/ast.json b/parser/testdata/02681_comparsion_tuple_elimination_ast/ast.json new file mode 100644 index 000000000..69acf6c5f --- /dev/null +++ b/parser/testdata/02681_comparsion_tuple_elimination_ast/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001568615, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02681_comparsion_tuple_elimination_ast/metadata.json b/parser/testdata/02681_comparsion_tuple_elimination_ast/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02681_comparsion_tuple_elimination_ast/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02681_comparsion_tuple_elimination_ast/query.sql b/parser/testdata/02681_comparsion_tuple_elimination_ast/query.sql new file mode 100644 index 000000000..7f36b0568 --- /dev/null +++ b/parser/testdata/02681_comparsion_tuple_elimination_ast/query.sql @@ -0,0 +1,8 @@ +SET optimize_move_to_prewhere = 1; -- works only for PREWHERE + +CREATE TABLE t1 (a UInt64, b UInt64, c UInt64, d UInt64) ENGINE = Memory; +INSERT INTO t1 SELECT number, number * 10, number * 100, number * 1000 FROM numbers(1000000); + +EXPLAIN SYNTAX +SELECT * FROM t1 +WHERE (a, b) = (1, 2) AND (c, d, a) = (3, 4, 5) OR (a, b, 1000) = (c, 10, d) OR ((a, b), 1000) = ((c, 10), d); diff --git a/parser/testdata/02681_group_array_too_large_size/ast.json b/parser/testdata/02681_group_array_too_large_size/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02681_group_array_too_large_size/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02681_group_array_too_large_size/metadata.json b/parser/testdata/02681_group_array_too_large_size/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02681_group_array_too_large_size/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02681_group_array_too_large_size/query.sql b/parser/testdata/02681_group_array_too_large_size/query.sql new file mode 100644 index 000000000..7b09f9b46 --- /dev/null +++ b/parser/testdata/02681_group_array_too_large_size/query.sql @@ -0,0 +1,8 @@ +-- This query throw high-level exception instead of low-level "too large size passed to allocator": + +SELECT * FROM format(CSV, 'entitypArray AggregateFunction(groupArray, String)', +'295TMiews.viewN""""""TabSeparated +d St"" + + +r'); -- { serverError TOO_LARGE_ARRAY_SIZE } diff --git a/parser/testdata/02681_undrop_query/ast.json b/parser/testdata/02681_undrop_query/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02681_undrop_query/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02681_undrop_query/metadata.json b/parser/testdata/02681_undrop_query/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02681_undrop_query/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02681_undrop_query/query.sql b/parser/testdata/02681_undrop_query/query.sql new file mode 100644 index 000000000..c33b74d49 --- /dev/null +++ b/parser/testdata/02681_undrop_query/query.sql @@ -0,0 +1,89 @@ +-- Tags: no-ordinary-database, no-replicated-database, distributed, zookeeper + +set database_atomic_wait_for_drop_and_detach_synchronously = 0; + +select 'test MergeTree undrop'; +drop table if exists 02681_undrop_mergetree sync; +create table 02681_undrop_mergetree (id Int32) Engine=MergeTree() order by id; +insert into 02681_undrop_mergetree values (1),(2),(3); +drop table 02681_undrop_mergetree; +select table from system.dropped_tables where table = '02681_undrop_mergetree' limit 1; +undrop table 02681_undrop_mergetree; +select * from 02681_undrop_mergetree order by id; +drop table 02681_undrop_mergetree sync; + +select 'test detach'; +drop table if exists 02681_undrop_detach sync; +create table 02681_undrop_detach (id Int32, num Int32) Engine=MergeTree() order by id; +insert into 02681_undrop_detach values (1, 1); +detach table 02681_undrop_detach sync; +undrop table 02681_undrop_detach; -- { serverError TABLE_ALREADY_EXISTS } +attach table 02681_undrop_detach; +alter table 02681_undrop_detach update num = 2 where id = 1; +select command from system.mutations where table='02681_undrop_detach' and database=currentDatabase() limit 1; +drop table 02681_undrop_detach sync; + +select 'test MergeTree with cluster'; +drop table if exists 02681_undrop_uuid_on_cluster on cluster test_shard_localhost sync format Null; +create table 02681_undrop_uuid_on_cluster on cluster test_shard_localhost (id Int32) Engine=MergeTree() order by id format Null; +insert into 02681_undrop_uuid_on_cluster values (1),(2),(3); +drop table 02681_undrop_uuid_on_cluster on cluster test_shard_localhost format Null; +select table from system.dropped_tables where table = '02681_undrop_uuid_on_cluster' limit 1; +undrop table 02681_undrop_uuid_on_cluster on cluster test_shard_localhost format Null; +select * from 02681_undrop_uuid_on_cluster order by id; +drop table 02681_undrop_uuid_on_cluster sync; + +select 'test MergeTree without uuid on cluster'; +drop table if exists 02681_undrop_no_uuid_on_cluster on cluster test_shard_localhost sync format Null; +create table 02681_undrop_no_uuid_on_cluster on cluster test_shard_localhost (id Int32) Engine=MergeTree() order by id format Null; +insert into 02681_undrop_no_uuid_on_cluster values (1),(2),(3); +drop table 02681_undrop_no_uuid_on_cluster on cluster test_shard_localhost format Null; +select table from system.dropped_tables where table = '02681_undrop_no_uuid_on_cluster' limit 1; +undrop table 02681_undrop_no_uuid_on_cluster on cluster test_shard_localhost format Null; +select * from 02681_undrop_no_uuid_on_cluster order by id; +drop table 02681_undrop_no_uuid_on_cluster on cluster test_shard_localhost sync format Null; + +select 'test ReplicatedMergeTree undrop'; +drop table if exists 02681_undrop_replicatedmergetree sync; +create table 02681_undrop_replicatedmergetree (id Int32) Engine=ReplicatedMergeTree('/clickhouse/tables/{database}/02681_undrop_replicatedmergetree', 'test_undrop') order by id; +insert into 02681_undrop_replicatedmergetree values (1),(2),(3); +drop table 02681_undrop_replicatedmergetree; +select table from system.dropped_tables where table = '02681_undrop_replicatedmergetree' limit 1; +undrop table 02681_undrop_replicatedmergetree; +select * from 02681_undrop_replicatedmergetree order by id; +drop table 02681_undrop_replicatedmergetree sync; + +select 'test Log undrop'; +drop table if exists 02681_undrop_log sync; +create table 02681_undrop_log (id Int32) Engine=Log(); +insert into 02681_undrop_log values (1),(2),(3); +drop table 02681_undrop_log; +select table from system.dropped_tables where table = '02681_undrop_log' limit 1; +undrop table 02681_undrop_log; +select * from 02681_undrop_log order by id; +drop table 02681_undrop_log sync; + +select 'test Distributed undrop'; +drop table if exists 02681_undrop_distributed sync; +create table 02681_undrop_distributed (id Int32) Engine = Distributed(test_shard_localhost, currentDatabase(), 02681_undrop, rand()); +drop table 02681_undrop_distributed; +select table from system.dropped_tables where table = '02681_undrop_distributed' limit 1; +undrop table 02681_undrop_distributed; +drop table 02681_undrop_distributed sync; + +select 'test MergeTree drop and undrop multiple times'; +drop table if exists 02681_undrop_multiple sync; +create table 02681_undrop_multiple (id Int32) Engine=MergeTree() order by id; +insert into 02681_undrop_multiple values (1); +drop table 02681_undrop_multiple; +create table 02681_undrop_multiple (id Int32) Engine=MergeTree() order by id; +insert into 02681_undrop_multiple values (2); +drop table 02681_undrop_multiple; +create table 02681_undrop_multiple (id Int32) Engine=MergeTree() order by id; +insert into 02681_undrop_multiple values (3); +drop table 02681_undrop_multiple; +select table from system.dropped_tables where table = '02681_undrop_multiple' limit 1; +undrop table 02681_undrop_multiple; +select * from 02681_undrop_multiple order by id; +undrop table 02681_undrop_multiple; -- { serverError TABLE_ALREADY_EXISTS } +drop table 02681_undrop_multiple sync; diff --git a/parser/testdata/02682_quantiles_too_large_size/ast.json b/parser/testdata/02682_quantiles_too_large_size/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02682_quantiles_too_large_size/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02682_quantiles_too_large_size/metadata.json b/parser/testdata/02682_quantiles_too_large_size/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02682_quantiles_too_large_size/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02682_quantiles_too_large_size/query.sql b/parser/testdata/02682_quantiles_too_large_size/query.sql new file mode 100644 index 000000000..fff98f667 Binary files /dev/null and b/parser/testdata/02682_quantiles_too_large_size/query.sql differ diff --git a/parser/testdata/02683_native_too_large_size/ast.json b/parser/testdata/02683_native_too_large_size/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02683_native_too_large_size/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02683_native_too_large_size/metadata.json b/parser/testdata/02683_native_too_large_size/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02683_native_too_large_size/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02683_native_too_large_size/query.sql b/parser/testdata/02683_native_too_large_size/query.sql new file mode 100644 index 000000000..e8752477f Binary files /dev/null and b/parser/testdata/02683_native_too_large_size/query.sql differ diff --git a/parser/testdata/02684_bson/ast.json b/parser/testdata/02684_bson/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02684_bson/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02684_bson/metadata.json b/parser/testdata/02684_bson/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02684_bson/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02684_bson/query.sql b/parser/testdata/02684_bson/query.sql new file mode 100644 index 000000000..cab5600ef Binary files /dev/null and b/parser/testdata/02684_bson/query.sql differ diff --git a/parser/testdata/02685_bson2/ast.json b/parser/testdata/02685_bson2/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02685_bson2/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02685_bson2/metadata.json b/parser/testdata/02685_bson2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02685_bson2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02685_bson2/query.sql b/parser/testdata/02685_bson2/query.sql new file mode 100644 index 000000000..fc65d2952 Binary files /dev/null and b/parser/testdata/02685_bson2/query.sql differ diff --git a/parser/testdata/02685_decimal256_various/ast.json b/parser/testdata/02685_decimal256_various/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02685_decimal256_various/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02685_decimal256_various/metadata.json b/parser/testdata/02685_decimal256_various/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02685_decimal256_various/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02685_decimal256_various/query.sql b/parser/testdata/02685_decimal256_various/query.sql new file mode 100644 index 000000000..545eaefe3 --- /dev/null +++ b/parser/testdata/02685_decimal256_various/query.sql @@ -0,0 +1,65 @@ +-- { echoOn } + +SELECT 1.1::Decimal(60, 30); +SELECT round(1.1::Decimal(60, 30)); +SELECT round(1.1::Decimal(60, 30), 1); +SELECT round(1.234567890123456789012345678901::Decimal(60, 30), 1); +SELECT round(1.234567890123456789012345678901::Decimal(60, 30), 30); +SELECT round(1.234567890123456789012345678901::Decimal(60, 30), 31); +SELECT round(1.234567890123456789012345678901::Decimal(60, 30), 20); + +SELECT hex(1.234567890123456789012345678901::Decimal(60, 30)); +SELECT bin(1.234567890123456789012345678901::Decimal(60, 30)); +SELECT reinterpret(unhex(hex(1.234567890123456789012345678901::Decimal(60, 30))), 'Decimal(60, 30)'); + +SELECT arraySum([1.2::Decimal(60, 30), 3.45::Decimal(61, 29)]); +SELECT arraySum([1.2::Decimal(60, 30), 3.45::Decimal(3, 2)]); + +SELECT arrayMin([1.2::Decimal(60, 30), 3.45::Decimal(61, 29)]); +SELECT arrayMax([1.2::Decimal(60, 30), 3.45::Decimal(61, 29)]); +SELECT arrayAvg([1.2::Decimal(60, 30), 3.45::Decimal(61, 29)]); + +SELECT round(arrayProduct([1.2::Decimal(60, 30), 3.45::Decimal(61, 29)]), 6); +SELECT toTypeName(arrayProduct([1.2::Decimal(60, 30), 3.45::Decimal(61, 29)])); + +SELECT arrayCumSum([1.2::Decimal(60, 30), 3.45::Decimal(61, 29)]); +SELECT arrayCumSumNonNegative([1.2::Decimal(60, 30), 3.45::Decimal(61, 29)]); +SELECT arrayDifference([1.2::Decimal(60, 30), 3.45::Decimal(61, 29)]); + +SELECT arrayCompact([1.2::Decimal(60, 30) AS x, x, x, x, 3.45::Decimal(3, 2) AS y, y, x, x]); + +SELECT 1.2::Decimal(2, 1) IN (1.2::Decimal(60, 30), 3.4::Decimal(60, 30)); +SELECT 1.23::Decimal(3, 2) IN (1.2::Decimal(60, 30), 3.4::Decimal(60, 30)); +SELECT 1.2::Decimal(60, 30) IN (1.2::Decimal(2, 1)); + +SELECT toTypeName([1.2::Decimal(60, 30), 3.45::Decimal(3, 2)]); +SELECT toTypeName(arraySum([1.2::Decimal(60, 30), 3.45::Decimal(3, 2)])); + +SELECT arrayJoin(sumMap(x)) FROM (SELECT [('Hello', 1.2::Decimal256(30)), ('World', 3.4::Decimal256(30))]::Map(String, Decimal256(30)) AS x UNION ALL SELECT [('World', 5.6::Decimal256(30)), ('GoodBye', -111.222::Decimal256(30))]::Map(String, Decimal256(30))) ORDER BY 1; + +SELECT mapAdd(map('Hello', 1.2::Decimal128(30), 'World', 3.4::Decimal128(30)), map('World', 5.6::Decimal128(30), 'GoodBye', -111.222::Decimal128(30))); +SELECT mapSubtract(map('Hello', 1.2::Decimal128(30), 'World', 3.4::Decimal128(30)), map('World', 5.6::Decimal128(30), 'GoodBye', -111.222::Decimal128(30))); + +SELECT arraySort(arrayIntersect([1, 2, 3]::Array(UInt256), [2, 3, 4]::Array(UInt256))); +SELECT toTypeName(arraySort(arrayIntersect([1, 2, 3]::Array(UInt256), [2, 3, 4]::Array(UInt128)))); +SELECT toTypeName(arraySort(arrayIntersect([1, 2, 3]::Array(UInt256), [2, 3, 4]::Array(Int128)))); +SELECT arraySort(arrayIntersect([1, 2, 3]::Array(UInt256), [2, 3, 4]::Array(Int128))); +SELECT arraySort(arrayIntersect([1, 2, 3]::Array(UInt256), [2, 3, 4]::Array(Int8))); +SELECT toTypeName(arraySort(arrayIntersect([1, 2, 3]::Array(UInt256), [2, 3, 4]::Array(Int8)))); + +SELECT arraySort(arrayIntersect([1.1::Decimal256(70), 2.34::Decimal256(60), 3.456::Decimal256(50)], [2.34::Decimal256(65), 3.456::Decimal256(55), 4.5678::Decimal256(45)])); +SELECT arraySort(arrayIntersect([1.1::Decimal256(1)], [1.12::Decimal256(2)])); -- Note: this is correct but the semantics has to be clarified in the docs. +SELECT arraySort(arrayIntersect([1.1::Decimal256(2)], [1.12::Decimal256(2)])); +SELECT arraySort(arrayIntersect([1.1::Decimal128(1)], [1.12::Decimal128(2)])); -- Note: this is correct but the semantics has to be clarified in the docs. +SELECT arraySort(arrayIntersect([1.1::Decimal128(2)], [1.12::Decimal128(2)])); + +select coalesce(cast('123', 'Nullable(Decimal(20, 10))'), 0); +select coalesce(cast('123', 'Nullable(Decimal(40, 10))'), 0); +select coalesce(cast('123', 'Decimal(40, 10)'), 0); + +DROP TABLE IF EXISTS decimal_insert_cast_issue; +create table decimal_insert_cast_issue (a Decimal(76, 0)) engine = TinyLog; +SET param_param = 1; +INSERT INTO decimal_insert_cast_issue VALUES ({param:Nullable(Decimal(41, 0))}); +SELECT * FROM decimal_insert_cast_issue; +DROP TABLE decimal_insert_cast_issue; diff --git a/parser/testdata/02686_bson3/ast.json b/parser/testdata/02686_bson3/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02686_bson3/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02686_bson3/metadata.json b/parser/testdata/02686_bson3/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02686_bson3/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02686_bson3/query.sql b/parser/testdata/02686_bson3/query.sql new file mode 100644 index 000000000..05a73e814 Binary files /dev/null and b/parser/testdata/02686_bson3/query.sql differ diff --git a/parser/testdata/02687_native_fuzz/ast.json b/parser/testdata/02687_native_fuzz/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02687_native_fuzz/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02687_native_fuzz/metadata.json b/parser/testdata/02687_native_fuzz/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02687_native_fuzz/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02687_native_fuzz/query.sql b/parser/testdata/02687_native_fuzz/query.sql new file mode 100644 index 000000000..0cd113909 Binary files /dev/null and b/parser/testdata/02687_native_fuzz/query.sql differ diff --git a/parser/testdata/02688_aggregate_states/ast.json b/parser/testdata/02688_aggregate_states/ast.json new file mode 100644 index 000000000..ddf91e260 --- /dev/null +++ b/parser/testdata/02688_aggregate_states/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '\u0001\\0'" + }, + { + "explain": " Literal 'AggregateFunction(groupBitmap, UInt32)'" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001183012, + "rows_read": 8, + "bytes_read": 316 + } +} diff --git a/parser/testdata/02688_aggregate_states/metadata.json b/parser/testdata/02688_aggregate_states/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02688_aggregate_states/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02688_aggregate_states/query.sql b/parser/testdata/02688_aggregate_states/query.sql new file mode 100644 index 000000000..575f4b1bb --- /dev/null +++ b/parser/testdata/02688_aggregate_states/query.sql @@ -0,0 +1,7 @@ +SELECT '\x01\x00'::AggregateFunction(groupBitmap, UInt32); -- { serverError INCORRECT_DATA } +SELECT '\x01\x01\x01'::AggregateFunction(groupBitmap, UInt64); -- { serverError STD_EXCEPTION } +SELECT '\x02\x00\x0d'::AggregateFunction(topK, UInt256); -- { serverError CANNOT_READ_ALL_DATA } +SELECT unhex('bebebebebebebebebebebebebebebebebebebebebebebebebebebebebebebe0c0c3131313131313131313131313173290aee00b300')::AggregateFunction(minDistinct, Int8); -- { serverError TOO_LARGE_ARRAY_SIZE } +SELECT unhex('01000b0b0b0d0d0d0d7175616e74696c6554696d696e672c20496e743332000300')::AggregateFunction(quantileTiming, Int32); -- { serverError INCORRECT_DATA } +SELECT unhex('010001')::AggregateFunction(quantileTiming, Int32); -- { serverError INCORRECT_DATA } +SELECT unhex('0a00797979797979797979790a0a6e')::AggregateFunction(minForEach, Ring); -- { serverError TOO_LARGE_ARRAY_SIZE } diff --git a/parser/testdata/02688_long_aggregate_function_names/ast.json b/parser/testdata/02688_long_aggregate_function_names/ast.json new file mode 100644 index 000000000..635578b43 --- /dev/null +++ b/parser/testdata/02688_long_aggregate_function_names/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function minOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNull (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001590086, + "rows_read": 7, + "bytes_read": 6256 + } +} diff --git a/parser/testdata/02688_long_aggregate_function_names/metadata.json b/parser/testdata/02688_long_aggregate_function_names/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02688_long_aggregate_function_names/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02688_long_aggregate_function_names/query.sql b/parser/testdata/02688_long_aggregate_function_names/query.sql new file mode 100644 index 000000000..266bbd629 --- /dev/null +++ b/parser/testdata/02688_long_aggregate_function_names/query.sql @@ -0,0 +1 @@ +SELECT minOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNullOrNull(1); -- { serverError TOO_LARGE_STRING_SIZE } diff --git a/parser/testdata/02689_meaningless_data_types/ast.json b/parser/testdata/02689_meaningless_data_types/ast.json new file mode 100644 index 000000000..017062145 --- /dev/null +++ b/parser/testdata/02689_meaningless_data_types/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '0'" + }, + { + "explain": " Literal 'Bool(Upyachka)'" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001204324, + "rows_read": 8, + "bytes_read": 290 + } +} diff --git a/parser/testdata/02689_meaningless_data_types/metadata.json b/parser/testdata/02689_meaningless_data_types/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02689_meaningless_data_types/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02689_meaningless_data_types/query.sql b/parser/testdata/02689_meaningless_data_types/query.sql new file mode 100644 index 000000000..8ae702d66 --- /dev/null +++ b/parser/testdata/02689_meaningless_data_types/query.sql @@ -0,0 +1,3 @@ +SELECT 0::Bool(Upyachka); -- { serverError DATA_TYPE_CANNOT_HAVE_ARGUMENTS } +SELECT [(1, 2), (3, 4)]::Ring(Upyachka); -- { serverError DATA_TYPE_CANNOT_HAVE_ARGUMENTS } +SELECT '1.1.1.1'::IPv4('Hello, world!'); -- { serverError DATA_TYPE_CANNOT_HAVE_ARGUMENTS } diff --git a/parser/testdata/02690_subquery_identifiers/ast.json b/parser/testdata/02690_subquery_identifiers/ast.json new file mode 100644 index 000000000..02d30de17 --- /dev/null +++ b/parser/testdata/02690_subquery_identifiers/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_str (children 1)" + }, + { + "explain": " Identifier t_str" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001286, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/02690_subquery_identifiers/metadata.json b/parser/testdata/02690_subquery_identifiers/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02690_subquery_identifiers/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02690_subquery_identifiers/query.sql b/parser/testdata/02690_subquery_identifiers/query.sql new file mode 100644 index 000000000..07fcb0fff --- /dev/null +++ b/parser/testdata/02690_subquery_identifiers/query.sql @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS t_str; + +CREATE TABLE t_str +( + `creation_time` String +) +ENGINE = MergeTree +PARTITION BY creation_time +ORDER BY creation_time; + +insert into t_str values ('2020-02-02'); + +select 1 as x from t_str where cast('1970-01-01' as date) <= cast((select max('1970-01-01') from numbers(1)) as date); +select * from ( select 1 as x from t_str where cast('1970-01-01' as date) <= cast((select max('1970-01-01') from numbers(1)) as date)); +SELECT * FROM (SELECT * FROM t_str WHERE (SELECT any('1970-01-01'))::Date > today()); + +DROP TABLE t_str; diff --git a/parser/testdata/02691_drop_column_with_projections_replicated/ast.json b/parser/testdata/02691_drop_column_with_projections_replicated/ast.json new file mode 100644 index 000000000..e22cd345d --- /dev/null +++ b/parser/testdata/02691_drop_column_with_projections_replicated/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery 02691_drop_column_replicated (children 1)" + }, + { + "explain": " Identifier 02691_drop_column_replicated" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001284511, + "rows_read": 2, + "bytes_read": 108 + } +} diff --git a/parser/testdata/02691_drop_column_with_projections_replicated/metadata.json b/parser/testdata/02691_drop_column_with_projections_replicated/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02691_drop_column_with_projections_replicated/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02691_drop_column_with_projections_replicated/query.sql b/parser/testdata/02691_drop_column_with_projections_replicated/query.sql new file mode 100644 index 000000000..c28c2f233 --- /dev/null +++ b/parser/testdata/02691_drop_column_with_projections_replicated/query.sql @@ -0,0 +1,11 @@ +DROP TABLE IF EXISTS 02691_drop_column_replicated; + +CREATE TABLE 02691_drop_column_replicated (col1 Int64, col2 Int64, PROJECTION 02691_drop_column_replicated (SELECT * ORDER BY col1 )) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test/02691_drop_column', 'r1') +ORDER BY col1; + +INSERT INTO 02691_drop_column_replicated VALUES (1, 2); + +ALTER TABLE 02691_drop_column_replicated DROP COLUMN col2 SETTINGS alter_sync = 2; + +DROP TABLE 02691_drop_column_replicated; diff --git a/parser/testdata/02691_multiple_joins_backtick_identifiers/ast.json b/parser/testdata/02691_multiple_joins_backtick_identifiers/ast.json new file mode 100644 index 000000000..326bb2ed7 --- /dev/null +++ b/parser/testdata/02691_multiple_joins_backtick_identifiers/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001096114, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/02691_multiple_joins_backtick_identifiers/metadata.json b/parser/testdata/02691_multiple_joins_backtick_identifiers/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02691_multiple_joins_backtick_identifiers/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02691_multiple_joins_backtick_identifiers/query.sql b/parser/testdata/02691_multiple_joins_backtick_identifiers/query.sql new file mode 100644 index 000000000..4a56c1286 --- /dev/null +++ b/parser/testdata/02691_multiple_joins_backtick_identifiers/query.sql @@ -0,0 +1,49 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS t3; + +CREATE TABLE t1 (`1a` Nullable(Int64), `2b` Nullable(String)) engine = Memory; +CREATE TABLE t2 (`3c` Nullable(Int64), `4d` Nullable(String)) engine = Memory; +CREATE TABLE t3 (`5e` Nullable(Int64), `6f` Nullable(String)) engine = Memory; + +SELECT + `1a`, + `2b` +FROM t1 AS tt1 +INNER JOIN +( + SELECT `3c` + FROM t2 +) AS tt2 ON tt1.`1a` = tt2.`3c` +INNER JOIN +( + SELECT `6f` + FROM t3 +) AS tt3 ON tt1.`2b` = tt3.`6f`; + +DROP TABLE t1; +DROP TABLE t2; +DROP TABLE t3; + +CREATE TABLE t1 (`a` Nullable(Int64), `b` Nullable(String)) engine = Memory; +CREATE TABLE t2 (`c` Nullable(Int64), `d` Nullable(String)) engine = Memory; +CREATE TABLE t3 (`e` Nullable(Int64), `f` Nullable(String)) engine = Memory; + +SELECT + a, + b +FROM t1 AS tt1 +INNER JOIN +( + SELECT c + FROM t2 +) AS tt2 ON tt1.a = tt2.c +INNER JOIN +( + SELECT f + FROM t3 +) AS tt3 ON tt1.b = tt3.f; + +DROP TABLE t1; +DROP TABLE t2; +DROP TABLE t3; diff --git a/parser/testdata/02692_multiple_joins_unicode/ast.json b/parser/testdata/02692_multiple_joins_unicode/ast.json new file mode 100644 index 000000000..964d5c6a7 --- /dev/null +++ b/parser/testdata/02692_multiple_joins_unicode/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery store (children 1)" + }, + { + "explain": " Identifier store" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001113946, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/02692_multiple_joins_unicode/metadata.json b/parser/testdata/02692_multiple_joins_unicode/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02692_multiple_joins_unicode/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02692_multiple_joins_unicode/query.sql b/parser/testdata/02692_multiple_joins_unicode/query.sql new file mode 100644 index 000000000..d622c556e --- /dev/null +++ b/parser/testdata/02692_multiple_joins_unicode/query.sql @@ -0,0 +1,24 @@ +DROP TABLE IF EXISTS store; +DROP TABLE IF EXISTS location; +DROP TABLE IF EXISTS sales; + +CREATE TABLE store (id UInt32, "名称" String, "状态" String) ENGINE=MergeTree() Order by id; +CREATE TABLE location (id UInt32, name String) ENGINE=MergeTree() Order by id; +CREATE TABLE sales ("日期" Date, "店铺" UInt32, "地址" UInt32, "销售额" Float32) ENGINE=MergeTree() Order by "日期"; + +INSERT INTO store VALUES (1,'店铺1','启用'),(2,'店铺2','停用'); +INSERT INTO location VALUES (1,'上海市'),(2,'北京市'); +INSERT INTO sales VALUES ('2021-01-01',1,1,10),('2021-01-02',2,2,20); + +SELECT + `日期`, + location.name, + store.`状态` +FROM sales +LEFT JOIN store ON store.id = `店铺` +LEFT JOIN location ON location.id = `地址` +ORDER BY 1, 2, 3; + +DROP TABLE store; +DROP TABLE location; +DROP TABLE sales; diff --git a/parser/testdata/02693_multiple_joins_in/ast.json b/parser/testdata/02693_multiple_joins_in/ast.json new file mode 100644 index 000000000..fc25adc8d --- /dev/null +++ b/parser/testdata/02693_multiple_joins_in/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery temp_table3 (children 3)" + }, + { + "explain": " Identifier temp_table3" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration val0 (children 1)" + }, + { + "explain": " DataType UInt64" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function Memory (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001076911, + "rows_read": 9, + "bytes_read": 328 + } +} diff --git a/parser/testdata/02693_multiple_joins_in/metadata.json b/parser/testdata/02693_multiple_joins_in/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02693_multiple_joins_in/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02693_multiple_joins_in/query.sql b/parser/testdata/02693_multiple_joins_in/query.sql new file mode 100644 index 000000000..8be52948d --- /dev/null +++ b/parser/testdata/02693_multiple_joins_in/query.sql @@ -0,0 +1,3 @@ +create temporary table temp_table3(val0 UInt64) ENGINE=Memory(); +select * from (select 1 as id) t1 inner join (select 1 as id) t2 on t1.id=t2.id inner join (select 1 as id) t3 on t1.id=t3.id where t1.id in temp_table3; +select * from (select 1 as id) t1 inner join (select 1 as id) t2 on t1.id=t2.id where t1.id in temp_table3; diff --git a/parser/testdata/02694_wrong_identifier_shouldnt_be_accepted/ast.json b/parser/testdata/02694_wrong_identifier_shouldnt_be_accepted/ast.json new file mode 100644 index 000000000..bf2d49e82 --- /dev/null +++ b/parser/testdata/02694_wrong_identifier_shouldnt_be_accepted/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001536714, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/02694_wrong_identifier_shouldnt_be_accepted/metadata.json b/parser/testdata/02694_wrong_identifier_shouldnt_be_accepted/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02694_wrong_identifier_shouldnt_be_accepted/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02694_wrong_identifier_shouldnt_be_accepted/query.sql b/parser/testdata/02694_wrong_identifier_shouldnt_be_accepted/query.sql new file mode 100644 index 000000000..e929b1e62 --- /dev/null +++ b/parser/testdata/02694_wrong_identifier_shouldnt_be_accepted/query.sql @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS s; + +CREATE TABLE t1 ( k Int64, x Int64) ENGINE = Memory; +CREATE TABLE t2 ( x Int64 ) ENGINE = Memory; + +create table s (k Int64, d DateTime) Engine=Memory; + +SELECT * FROM t1 +INNER JOIN s ON t1.k = s.k +INNER JOIN t2 ON t2.x = t1.x +WHERE (t1.d >= now()); -- { serverError UNKNOWN_IDENTIFIER } + +DROP TABLE t1; +DROP TABLE t2; +DROP TABLE s; diff --git a/parser/testdata/02695_logical_optimizer_alias_bug/ast.json b/parser/testdata/02695_logical_optimizer_alias_bug/ast.json new file mode 100644 index 000000000..1461fa447 --- /dev/null +++ b/parser/testdata/02695_logical_optimizer_alias_bug/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery test_local (children 3)" + }, + { + "explain": " Identifier test_local" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration id (children 1)" + }, + { + "explain": " DataType UInt32" + }, + { + "explain": " ColumnDeclaration path (children 1)" + }, + { + "explain": " DataType LowCardinality (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Identifier id" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001192626, + "rows_read": 13, + "bytes_read": 475 + } +} diff --git a/parser/testdata/02695_logical_optimizer_alias_bug/metadata.json b/parser/testdata/02695_logical_optimizer_alias_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02695_logical_optimizer_alias_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02695_logical_optimizer_alias_bug/query.sql b/parser/testdata/02695_logical_optimizer_alias_bug/query.sql new file mode 100644 index 000000000..5b13eea5e --- /dev/null +++ b/parser/testdata/02695_logical_optimizer_alias_bug/query.sql @@ -0,0 +1,2 @@ +create table test_local (id UInt32, path LowCardinality(String)) engine = MergeTree order by id; +WITH ((position(path, '/a') > 0) AND (NOT (position(path, 'a') > 0))) OR (path = '/b') OR (path = '/b/') as alias1 SELECT max(alias1) FROM remote('127.0.0.{1,2}', currentDatabase(), test_local) WHERE (id = 299386662); diff --git a/parser/testdata/02695_storage_join_insert_select_deadlock/ast.json b/parser/testdata/02695_storage_join_insert_select_deadlock/ast.json new file mode 100644 index 000000000..fb45911ed --- /dev/null +++ b/parser/testdata/02695_storage_join_insert_select_deadlock/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_table_join (children 1)" + }, + { + "explain": " Identifier test_table_join" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001135535, + "rows_read": 2, + "bytes_read": 82 + } +} diff --git a/parser/testdata/02695_storage_join_insert_select_deadlock/metadata.json b/parser/testdata/02695_storage_join_insert_select_deadlock/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02695_storage_join_insert_select_deadlock/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02695_storage_join_insert_select_deadlock/query.sql b/parser/testdata/02695_storage_join_insert_select_deadlock/query.sql new file mode 100644 index 000000000..595285113 --- /dev/null +++ b/parser/testdata/02695_storage_join_insert_select_deadlock/query.sql @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS test_table_join; + +CREATE TABLE test_table_join +( + id UInt64, + value String +) ENGINE = Join(Any, Left, id); + +INSERT INTO test_table_join VALUES (1, 'q'); + +INSERT INTO test_table_join SELECT * from test_table_join; -- { serverError DEADLOCK_AVOIDED } + +INSERT INTO test_table_join SELECT * FROM (SELECT 1 as id) AS t1 ANY LEFT JOIN test_table_join USING (id); -- { serverError DEADLOCK_AVOIDED } +INSERT INTO test_table_join SELECT id, toString(id) FROM (SELECT 1 as id) AS t1 ANY LEFT JOIN (SELECT id FROM test_table_join) AS t2 USING (id); -- { serverError DEADLOCK_AVOIDED } + +DROP TABLE IF EXISTS test_table_join; diff --git a/parser/testdata/02696_ignore_inacc_tables_mat_view_atttach/ast.json b/parser/testdata/02696_ignore_inacc_tables_mat_view_atttach/ast.json new file mode 100644 index 000000000..5b71b74c2 --- /dev/null +++ b/parser/testdata/02696_ignore_inacc_tables_mat_view_atttach/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001349038, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02696_ignore_inacc_tables_mat_view_atttach/metadata.json b/parser/testdata/02696_ignore_inacc_tables_mat_view_atttach/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02696_ignore_inacc_tables_mat_view_atttach/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02696_ignore_inacc_tables_mat_view_atttach/query.sql b/parser/testdata/02696_ignore_inacc_tables_mat_view_atttach/query.sql new file mode 100644 index 000000000..25e0ddf2e --- /dev/null +++ b/parser/testdata/02696_ignore_inacc_tables_mat_view_atttach/query.sql @@ -0,0 +1,23 @@ +SET send_logs_level = 'fatal'; + +CREATE TABLE test_table (n Int32, s String) ENGINE MergeTree PARTITION BY n ORDER BY n; + +CREATE TABLE mview_backend (n Int32, n2 Int64) ENGINE MergeTree PARTITION BY n ORDER BY n; + +CREATE MATERIALIZED VIEW mview TO mview_backend AS SELECT n, n * n AS "n2" FROM test_table; + +DROP TABLE test_table; + +DETACH TABLE mview; + +/* Check that we don't get an exception with the option. */ +ATTACH TABLE mview; + +/* Check if the data in the materialized view is updated after the restore.*/ +CREATE TABLE test_table (n Int32, s String) ENGINE MergeTree PARTITION BY n ORDER BY n; + +INSERT INTO test_table VALUES (3,'some_val'); + +SELECT n,s FROM test_table ORDER BY n; +SELECT n,n2 FROM mview ORDER by n; + diff --git a/parser/testdata/02697_alter_dependencies/ast.json b/parser/testdata/02697_alter_dependencies/ast.json new file mode 100644 index 000000000..e7648db1d --- /dev/null +++ b/parser/testdata/02697_alter_dependencies/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery mv_source (children 3)" + }, + { + "explain": " Identifier mv_source" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration a (children 1)" + }, + { + "explain": " DataType Int64" + }, + { + "explain": " ColumnDeclaration insert_time (children 1)" + }, + { + "explain": " DataType DateTime" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Identifier insert_time" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001651042, + "rows_read": 12, + "bytes_read": 437 + } +} diff --git a/parser/testdata/02697_alter_dependencies/metadata.json b/parser/testdata/02697_alter_dependencies/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02697_alter_dependencies/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02697_alter_dependencies/query.sql b/parser/testdata/02697_alter_dependencies/query.sql new file mode 100644 index 000000000..fbde3495e --- /dev/null +++ b/parser/testdata/02697_alter_dependencies/query.sql @@ -0,0 +1,16 @@ +CREATE TABLE mv_source (a Int64, insert_time DateTime) ENGINE = MergeTree() ORDER BY insert_time; +CREATE TABLE mv_target (a Int64, insert_time DateTime) ENGINE = MergeTree() ORDER BY insert_time; +CREATE MATERIALIZED VIEW source_to_target to mv_target as Select * from mv_source where a not in (Select sleepEachRow(0.1) from numbers(50)); + +ALTER TABLE mv_source MODIFY TTL insert_time + toIntervalDay(1); +SYSTEM FLUSH LOGS query_log; +-- This is a fancy way to check that the MV hasn't been called (no functions executed by ALTER) +SELECT + ProfileEvents['FunctionExecute'], + ProfileEvents['TableFunctionExecute'] +FROM system.query_log +WHERE + type = 'QueryFinish' AND + query like '%ALTER TABLE mv_source%' AND + current_database = currentDatabase() AND + event_time > now() - INTERVAL 10 minute; diff --git a/parser/testdata/02698_marked_dropped_tables/ast.json b/parser/testdata/02698_marked_dropped_tables/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02698_marked_dropped_tables/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02698_marked_dropped_tables/metadata.json b/parser/testdata/02698_marked_dropped_tables/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02698_marked_dropped_tables/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02698_marked_dropped_tables/query.sql b/parser/testdata/02698_marked_dropped_tables/query.sql new file mode 100644 index 000000000..3a1160395 --- /dev/null +++ b/parser/testdata/02698_marked_dropped_tables/query.sql @@ -0,0 +1,12 @@ +-- Tags: no-ordinary-database + +SET database_atomic_wait_for_drop_and_detach_synchronously = 0; +DROP TABLE IF EXISTS 25400_dropped_tables; + +CREATE TABLE 25400_dropped_tables (id Int32) Engine=MergeTree() ORDER BY id; +INSERT INTO 25400_dropped_tables VALUES (1),(2); +INSERT INTO 25400_dropped_tables VALUES (3),(4); +DROP TABLE 25400_dropped_tables; + +SELECT table, engine FROM system.dropped_tables WHERE database = currentDatabase() LIMIT 1; +SELECT database, table, name FROM system.dropped_tables_parts WHERE database = currentDatabase() and table = '25400_dropped_tables'; diff --git a/parser/testdata/02699_polygons_sym_difference_rollup/ast.json b/parser/testdata/02699_polygons_sym_difference_rollup/ast.json new file mode 100644 index 000000000..06ff512bf --- /dev/null +++ b/parser/testdata/02699_polygons_sym_difference_rollup/ast.json @@ -0,0 +1,76 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function polygonsSymDifferenceCartesian (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Tuple_(Float64_1, Float64_1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + } + ], + + "rows": 18, + + "statistics": + { + "elapsed": 0.001555551, + "rows_read": 18, + "bytes_read": 753 + } +} diff --git a/parser/testdata/02699_polygons_sym_difference_rollup/metadata.json b/parser/testdata/02699_polygons_sym_difference_rollup/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02699_polygons_sym_difference_rollup/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02699_polygons_sym_difference_rollup/query.sql b/parser/testdata/02699_polygons_sym_difference_rollup/query.sql new file mode 100644 index 000000000..680b98fb1 --- /dev/null +++ b/parser/testdata/02699_polygons_sym_difference_rollup/query.sql @@ -0,0 +1,4 @@ +SELECT polygonsSymDifferenceCartesian([[[(1., 1.)]] AS x], [x]) GROUP BY x WITH ROLLUP; +SELECT [[(2147483647, 0.), (10.0001, 65535), (1, 255), (1023, 2147483646)]], polygonsSymDifferenceCartesian([[[(2147483647, 0.), (10.0001, 65535), (1023, 2147483646)]]], [[[(1000.0001, 10.0001)]]]) GROUP BY [[(2147483647, 0.), (10.0001, 65535), (1023, 2147483646)]] WITH ROLLUP SETTINGS enable_analyzer=0; +SELECT [[(2147483647, 0.), (10.0001, 65535), (1, 255), (1023, 2147483646)]], polygonsSymDifferenceCartesian([[[(2147483647, 0.), (10.0001, 65535), (1023, 2147483646)]]], [[[(1000.0001, 10.0001)]]]) GROUP BY [[(2147483647, 0.), (10.0001, 65535), (1023, 2147483646)]] WITH ROLLUP SETTINGS enable_analyzer=1; +SELECT polygonsSymDifferenceCartesian([[[(100.0001, 1000.0001), (-20., 20.), (10., 10.), (20., 20.), (20., -20.), (1000.0001, 1.1920928955078125e-7)]],[[(0.0001, 100000000000000000000.)]] AS x],[x]) GROUP BY x WITH ROLLUP; diff --git a/parser/testdata/02699_polygons_sym_difference_total/ast.json b/parser/testdata/02699_polygons_sym_difference_total/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02699_polygons_sym_difference_total/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02699_polygons_sym_difference_total/metadata.json b/parser/testdata/02699_polygons_sym_difference_total/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02699_polygons_sym_difference_total/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02699_polygons_sym_difference_total/query.sql b/parser/testdata/02699_polygons_sym_difference_total/query.sql new file mode 100644 index 000000000..53d0a3bb5 --- /dev/null +++ b/parser/testdata/02699_polygons_sym_difference_total/query.sql @@ -0,0 +1,2 @@ +SET enable_analyzer=0; +SELECT [(9223372036854775807, 1.1754943508222875e-38)], x, NULL, polygonsSymDifferenceCartesian([[[(1.1754943508222875e-38, 1.1920928955078125e-7), (0.5, 0.5)]], [[(1.1754943508222875e-38, 1.1920928955078125e-7), (1.1754943508222875e-38, 1.1920928955078125e-7)], [(0., 1.0001)]], [[(1., 1.0001)]] AS x], [[[(3.4028234663852886e38, 0.9999)]]]) GROUP BY GROUPING SETS ((x)) WITH TOTALS diff --git a/parser/testdata/02699_polygons_sym_difference_total_analyzer/ast.json b/parser/testdata/02699_polygons_sym_difference_total_analyzer/ast.json new file mode 100644 index 000000000..40e424df7 --- /dev/null +++ b/parser/testdata/02699_polygons_sym_difference_total_analyzer/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001289405, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02699_polygons_sym_difference_total_analyzer/metadata.json b/parser/testdata/02699_polygons_sym_difference_total_analyzer/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02699_polygons_sym_difference_total_analyzer/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02699_polygons_sym_difference_total_analyzer/query.sql b/parser/testdata/02699_polygons_sym_difference_total_analyzer/query.sql new file mode 100644 index 000000000..40f610ae5 --- /dev/null +++ b/parser/testdata/02699_polygons_sym_difference_total_analyzer/query.sql @@ -0,0 +1,2 @@ +SET enable_analyzer=1; +SELECT [(9223372036854775807, 1.1754943508222875e-38)], x, NULL, polygonsSymDifferenceCartesian([[[(1.1754943508222875e-38, 1.1920928955078125e-7), (0.5, 0.5)]], [[(1.1754943508222875e-38, 1.1920928955078125e-7), (1.1754943508222875e-38, 1.1920928955078125e-7)], [(0., 1.0001)]], [[(1., 1.0001)]] AS x], [[[(3.4028234663852886e38, 0.9999)]]]) GROUP BY GROUPING SETS ((x)) WITH TOTALS diff --git a/parser/testdata/02700_regexp_operator/ast.json b/parser/testdata/02700_regexp_operator/ast.json new file mode 100644 index 000000000..636980893 --- /dev/null +++ b/parser/testdata/02700_regexp_operator/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function match (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'ab'" + }, + { + "explain": " Literal 'a.*b'" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001204834, + "rows_read": 8, + "bytes_read": 282 + } +} diff --git a/parser/testdata/02700_regexp_operator/metadata.json b/parser/testdata/02700_regexp_operator/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02700_regexp_operator/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02700_regexp_operator/query.sql b/parser/testdata/02700_regexp_operator/query.sql new file mode 100644 index 000000000..5a5275bf1 --- /dev/null +++ b/parser/testdata/02700_regexp_operator/query.sql @@ -0,0 +1 @@ +SELECT 'ab' REGEXP 'a.*b'; diff --git a/parser/testdata/02701_invalid_having_NOT_AN_AGGREGATE/ast.json b/parser/testdata/02701_invalid_having_NOT_AN_AGGREGATE/ast.json new file mode 100644 index 000000000..00a7adfae --- /dev/null +++ b/parser/testdata/02701_invalid_having_NOT_AN_AGGREGATE/ast.json @@ -0,0 +1,91 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 5)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier a" + }, + { + "explain": " Function sum (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier b" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal UInt64_1 (alias a)" + }, + { + "explain": " Literal UInt64_1 (alias b)" + }, + { + "explain": " Literal UInt64_0 (alias c)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier a" + }, + { + "explain": " Identifier c" + }, + { + "explain": " Set" + } + ], + + "rows": 23, + + "statistics": + { + "elapsed": 0.001385154, + "rows_read": 23, + "bytes_read": 857 + } +} diff --git a/parser/testdata/02701_invalid_having_NOT_AN_AGGREGATE/metadata.json b/parser/testdata/02701_invalid_having_NOT_AN_AGGREGATE/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02701_invalid_having_NOT_AN_AGGREGATE/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02701_invalid_having_NOT_AN_AGGREGATE/query.sql b/parser/testdata/02701_invalid_having_NOT_AN_AGGREGATE/query.sql new file mode 100644 index 000000000..9cfc4d830 --- /dev/null +++ b/parser/testdata/02701_invalid_having_NOT_AN_AGGREGATE/query.sql @@ -0,0 +1 @@ +SELECT a, sum(b) FROM (SELECT 1 AS a, 1 AS b, 0 AS c) GROUP BY a HAVING c SETTINGS enable_analyzer=1 -- { serverError NOT_AN_AGGREGATE } diff --git a/parser/testdata/02701_non_parametric_function/ast.json b/parser/testdata/02701_non_parametric_function/ast.json new file mode 100644 index 000000000..087eb0fa7 --- /dev/null +++ b/parser/testdata/02701_non_parametric_function/ast.json @@ -0,0 +1,76 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Function greater (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function toUInt64 (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 18, + + "statistics": + { + "elapsed": 0.001015939, + "rows_read": 18, + "bytes_read": 674 + } +} diff --git a/parser/testdata/02701_non_parametric_function/metadata.json b/parser/testdata/02701_non_parametric_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02701_non_parametric_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02701_non_parametric_function/query.sql b/parser/testdata/02701_non_parametric_function/query.sql new file mode 100644 index 000000000..6c708d9ac --- /dev/null +++ b/parser/testdata/02701_non_parametric_function/query.sql @@ -0,0 +1 @@ +SELECT * FROM system.numbers WHERE number > toUInt64(10)(number) LIMIT 10; -- { serverError FUNCTION_CANNOT_HAVE_PARAMETERS } diff --git a/parser/testdata/02702_logical_optimizer_with_nulls/ast.json b/parser/testdata/02702_logical_optimizer_with_nulls/ast.json new file mode 100644 index 000000000..d0fae8b4d --- /dev/null +++ b/parser/testdata/02702_logical_optimizer_with_nulls/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001331177, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02702_logical_optimizer_with_nulls/metadata.json b/parser/testdata/02702_logical_optimizer_with_nulls/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02702_logical_optimizer_with_nulls/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02702_logical_optimizer_with_nulls/query.sql b/parser/testdata/02702_logical_optimizer_with_nulls/query.sql new file mode 100644 index 000000000..9e2927334 --- /dev/null +++ b/parser/testdata/02702_logical_optimizer_with_nulls/query.sql @@ -0,0 +1,35 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS 02702_logical_optimizer; + +CREATE TABLE 02702_logical_optimizer +(a Int32, b LowCardinality(String)) +ENGINE=Memory; + +INSERT INTO 02702_logical_optimizer VALUES (1, 'test'), (2, 'test2'), (3, 'another'); + +SET optimize_min_equality_disjunction_chain_length = 3; + +SELECT * FROM 02702_logical_optimizer WHERE a = 1 OR 3 = a OR NULL = a; +EXPLAIN QUERY TREE SELECT * FROM 02702_logical_optimizer WHERE a = 1 OR 3 = a OR NULL = a; + +SELECT * FROM 02702_logical_optimizer WHERE a = 1 OR 3 = a OR 2 = a OR a = NULL; +EXPLAIN QUERY TREE SELECT * FROM 02702_logical_optimizer WHERE a = 1 OR 3 = a OR 2 = a OR a = NULL; + +DROP TABLE 02702_logical_optimizer; + +DROP TABLE IF EXISTS 02702_logical_optimizer_with_null_column; + +CREATE TABLE 02702_logical_optimizer_with_null_column +(a Nullable(Int32), b LowCardinality(String)) +ENGINE=Memory; + +INSERT INTO 02702_logical_optimizer_with_null_column VALUES (1, 'test'), (2, 'test2'), (3, 'another'); + +SELECT * FROM 02702_logical_optimizer_with_null_column WHERE a = 1 OR 3 = a OR 2 = a; +EXPLAIN QUERY TREE SELECT * FROM 02702_logical_optimizer_with_null_column WHERE a = 1 OR 3 = a OR 2 = a; + +SELECT materialize(1) AS k WHERE NULL OR (0 OR (k = 2) OR (k = CAST(1, 'Nullable(UInt8)') OR k = 3)); +SELECT (k = 2) OR (k = 1) OR ((NULL OR 1) = k) FROM (SELECT 1 AS k); + +DROP TABLE 02702_logical_optimizer_with_null_column; diff --git a/parser/testdata/02703_explain_query_tree_is_forbidden_with_old_analyzer/ast.json b/parser/testdata/02703_explain_query_tree_is_forbidden_with_old_analyzer/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02703_explain_query_tree_is_forbidden_with_old_analyzer/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02703_explain_query_tree_is_forbidden_with_old_analyzer/metadata.json b/parser/testdata/02703_explain_query_tree_is_forbidden_with_old_analyzer/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02703_explain_query_tree_is_forbidden_with_old_analyzer/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02703_explain_query_tree_is_forbidden_with_old_analyzer/query.sql b/parser/testdata/02703_explain_query_tree_is_forbidden_with_old_analyzer/query.sql new file mode 100644 index 000000000..c028e74f1 --- /dev/null +++ b/parser/testdata/02703_explain_query_tree_is_forbidden_with_old_analyzer/query.sql @@ -0,0 +1,2 @@ +set enable_analyzer=0; +EXPLAIN QUERY TREE run_passes = true, dump_passes = true SELECT 1; -- { serverError NOT_IMPLEMENTED } diff --git a/parser/testdata/02704_storage_merge_explain_graph_crash/ast.json b/parser/testdata/02704_storage_merge_explain_graph_crash/ast.json new file mode 100644 index 000000000..6539b21e1 --- /dev/null +++ b/parser/testdata/02704_storage_merge_explain_graph_crash/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery foo (children 1)" + }, + { + "explain": " Identifier foo" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001482141, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/02704_storage_merge_explain_graph_crash/metadata.json b/parser/testdata/02704_storage_merge_explain_graph_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02704_storage_merge_explain_graph_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02704_storage_merge_explain_graph_crash/query.sql b/parser/testdata/02704_storage_merge_explain_graph_crash/query.sql new file mode 100644 index 000000000..db5eddf2a --- /dev/null +++ b/parser/testdata/02704_storage_merge_explain_graph_crash/query.sql @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS foo; +DROP TABLE IF EXISTS foo2; +DROP TABLE IF EXISTS foo2_dist; +DROP TABLE IF EXISTS merge1; + +CREATE TABLE foo (`Id` Int32, `Val` Int32) ENGINE = MergeTree ORDER BY Id; +INSERT INTO foo SELECT number, number FROM numbers(100); + +CREATE TABLE foo2 (`Id` Int32, `Val` Int32) ENGINE = MergeTree ORDER BY Id; +INSERT INTO foo2 SELECT number, number FROM numbers(100); +CREATE TABLE foo2_dist (`Id` UInt32, `Val` String) ENGINE = Distributed(test_shard_localhost, currentDatabase(), foo2); + +CREATE TABLE merge1 AS foo ENGINE = Merge(currentDatabase(), '^(foo|foo2_dist)$'); + +EXPLAIN PIPELINE graph = 1, compact = 1 SELECT * FROM merge1 FORMAT Null; +EXPLAIN PIPELINE graph = 1, compact = 1 SELECT * FROM merge1 FORMAT Null SETTINGS enable_analyzer=1; diff --git a/parser/testdata/02705_grouping_keys_equal_keys/ast.json b/parser/testdata/02705_grouping_keys_equal_keys/ast.json new file mode 100644 index 000000000..03b154730 --- /dev/null +++ b/parser/testdata/02705_grouping_keys_equal_keys/ast.json @@ -0,0 +1,40 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function count (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 6, + + "statistics": + { + "elapsed": 0.001245825, + "rows_read": 6, + "bytes_read": 215 + } +} diff --git a/parser/testdata/02705_grouping_keys_equal_keys/metadata.json b/parser/testdata/02705_grouping_keys_equal_keys/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02705_grouping_keys_equal_keys/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02705_grouping_keys_equal_keys/query.sql b/parser/testdata/02705_grouping_keys_equal_keys/query.sql new file mode 100644 index 000000000..fcf5b4d2c --- /dev/null +++ b/parser/testdata/02705_grouping_keys_equal_keys/query.sql @@ -0,0 +1,7 @@ +SELECT count() +FROM numbers(2) +GROUP BY +GROUPING SETS ( + (number, number + 0, number + 1), + (number % 1048576, number % -9223372036854775808), + (number / 2, number / 2)); diff --git a/parser/testdata/02705_projection_and_ast_optimizations_bug/ast.json b/parser/testdata/02705_projection_and_ast_optimizations_bug/ast.json new file mode 100644 index 000000000..b8b35236f --- /dev/null +++ b/parser/testdata/02705_projection_and_ast_optimizations_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001495343, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/02705_projection_and_ast_optimizations_bug/metadata.json b/parser/testdata/02705_projection_and_ast_optimizations_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02705_projection_and_ast_optimizations_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02705_projection_and_ast_optimizations_bug/query.sql b/parser/testdata/02705_projection_and_ast_optimizations_bug/query.sql new file mode 100644 index 000000000..5589fbeeb --- /dev/null +++ b/parser/testdata/02705_projection_and_ast_optimizations_bug/query.sql @@ -0,0 +1,6 @@ +drop table if exists t1; +CREATE TABLE t1 (c0 Int32) ENGINE = MergeTree() ORDER BY c0 PARTITION BY (- (c0)); +insert into t1 values(1); +SELECT (- ((((tan (t1.c0)))+(t1.c0)))), (cos ((sin (pow(t1.c0,t1.c0))))), ((gcd((- (t1.c0)),((t1.c0)+(t1.c0))))*((- ((- (t1.c0)))))) FROM t1 GROUP BY (sqrt ((- (t1.c0)))), t1.c0, pow((erf ((- (t1.c0)))),t1.c0); +drop table t1; + diff --git a/parser/testdata/02705_settings_check_changed_flag/ast.json b/parser/testdata/02705_settings_check_changed_flag/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02705_settings_check_changed_flag/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02705_settings_check_changed_flag/metadata.json b/parser/testdata/02705_settings_check_changed_flag/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02705_settings_check_changed_flag/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02705_settings_check_changed_flag/query.sql b/parser/testdata/02705_settings_check_changed_flag/query.sql new file mode 100644 index 000000000..151e7a66b --- /dev/null +++ b/parser/testdata/02705_settings_check_changed_flag/query.sql @@ -0,0 +1,82 @@ +---SettingFieldNumber +SELECT changed from system.settings where name = 'mysql_max_rows_to_insert'; +SET mysql_max_rows_to_insert = 123123; + +select changed from system.settings where name = 'mysql_max_rows_to_insert'; +set mysql_max_rows_to_insert = 123123; +select changed from system.settings where name = 'mysql_max_rows_to_insert'; +set mysql_max_rows_to_insert = 65536; +select changed from system.settings where name = 'mysql_max_rows_to_insert'; + +---SettingAutoWrapper + +select changed from system.settings where name = 'insert_quorum'; +set insert_quorum = 123123; +select changed from system.settings where name = 'insert_quorum'; +set insert_quorum = 123123; +select changed from system.settings where name = 'insert_quorum'; +set insert_quorum = 0; +select changed from system.settings where name = 'insert_quorum'; + +---SettingFieldMaxThreads + +select changed from system.settings where name = 'max_alter_threads'; +set max_alter_threads = 123123; +select changed from system.settings where name = 'max_alter_threads'; +set max_alter_threads = 123123; +select changed from system.settings where name = 'max_alter_threads'; +set max_alter_threads = 0; +select changed from system.settings where name = 'max_alter_threads'; + +---SettingFieldTimespanUnit + +select changed from system.settings where name = 'drain_timeout'; +set drain_timeout = 123123; +select changed from system.settings where name = 'drain_timeout'; +set drain_timeout = 123123; +select changed from system.settings where name = 'drain_timeout'; +set drain_timeout = 3; +select changed from system.settings where name = 'drain_timeout'; + + +---SettingFieldChar + +select changed from system.settings where name = 'format_csv_delimiter'; +set format_csv_delimiter = ','; +select changed from system.settings where name = 'format_csv_delimiter'; +set format_csv_delimiter = ','; +select changed from system.settings where name = 'format_csv_delimiter'; +set format_csv_delimiter = ','; +select changed from system.settings where name = 'format_csv_delimiter'; + + +---SettingFieldURI + +select changed from system.settings where name = 'format_avro_schema_registry_url'; +set format_avro_schema_registry_url = 'https://github.com/ClickHouse/ClickHouse/tree/master/src/Core'; +select changed from system.settings where name = 'format_avro_schema_registry_url'; +set format_avro_schema_registry_url = 'https://github.com/ClickHouse/ClickHouse/tree/master/src/Core'; +select changed from system.settings where name = 'format_avro_schema_registry_url'; +set format_avro_schema_registry_url = ''; +select changed from system.settings where name = 'format_avro_schema_registry_url'; + + +--- SettingFieldEnum + +select changed from system.settings where name = 'output_format_orc_compression_method'; +set output_format_orc_compression_method = 'none'; +select changed from system.settings where name = 'output_format_orc_compression_method'; +set output_format_orc_compression_method = 'none'; +select changed from system.settings where name = 'output_format_orc_compression_method'; +set output_format_orc_compression_method = 'lz4'; +select changed from system.settings where name = 'output_format_orc_compression_method'; + +--- SettingFieldMultiEnum + +select changed from system.settings where name = 'join_algorithm'; +set join_algorithm = 'auto,direct'; +select changed from system.settings where name = 'join_algorithm'; +set join_algorithm = 'auto,direct'; +select changed from system.settings where name = 'join_algorithm'; +set join_algorithm = 'default'; +select changed from system.settings where name = 'join_algorithm'; diff --git a/parser/testdata/02706_array_map_tuples/ast.json b/parser/testdata/02706_array_map_tuples/ast.json new file mode 100644 index 000000000..2d25b8bc9 --- /dev/null +++ b/parser/testdata/02706_array_map_tuples/ast.json @@ -0,0 +1,85 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (alias arr1) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Tuple_(UInt64_1, UInt64_2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayMap (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function lambda (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Identifier y" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier y" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Identifier arr1" + } + ], + + "rows": 21, + + "statistics": + { + "elapsed": 0.001563587, + "rows_read": 21, + "bytes_read": 816 + } +} diff --git a/parser/testdata/02706_array_map_tuples/metadata.json b/parser/testdata/02706_array_map_tuples/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02706_array_map_tuples/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02706_array_map_tuples/query.sql b/parser/testdata/02706_array_map_tuples/query.sql new file mode 100644 index 000000000..205e15c7d --- /dev/null +++ b/parser/testdata/02706_array_map_tuples/query.sql @@ -0,0 +1,6 @@ +WITH [(1, 2)] AS arr1 SELECT arrayMap((x, y) -> (y, x), arr1); +WITH [(1, 2)] AS arr1 SELECT arrayMap(x -> x.1, arr1); +WITH [(1, 2)] AS arr1, [(3, 4)] AS arr2 SELECT arrayMap((x, y) -> (y.1, x.2), arr1, arr2); + +WITH [(1, 2)] AS arr1 SELECT arrayMap((x, y, z) -> (y, x, z), arr1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +WITH [1, 2] AS arr1 SELECT arrayMap((x, y) -> (y, x), arr1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } diff --git a/parser/testdata/02706_keeper_map_insert_strict/ast.json b/parser/testdata/02706_keeper_map_insert_strict/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02706_keeper_map_insert_strict/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02706_keeper_map_insert_strict/metadata.json b/parser/testdata/02706_keeper_map_insert_strict/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02706_keeper_map_insert_strict/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02706_keeper_map_insert_strict/query.sql b/parser/testdata/02706_keeper_map_insert_strict/query.sql new file mode 100644 index 000000000..97c801ec4 --- /dev/null +++ b/parser/testdata/02706_keeper_map_insert_strict/query.sql @@ -0,0 +1,20 @@ +-- Tags: no-ordinary-database, no-fasttest + +DROP TABLE IF EXISTS 02706_keeper_map_insert_strict SYNC; + +CREATE TABLE 02706_keeper_map_insert_strict (key UInt64, value Float64) Engine=KeeperMap('/' || currentDatabase() || '/test_02706_keeper_map_insert_strict') PRIMARY KEY(key); + +INSERT INTO 02706_keeper_map_insert_strict VALUES (1, 1.1), (2, 2.2); +SELECT * FROM 02706_keeper_map_insert_strict WHERE key = 1; + +SET keeper_map_strict_mode = false; + +INSERT INTO 02706_keeper_map_insert_strict VALUES (1, 2.1); +SELECT * FROM 02706_keeper_map_insert_strict WHERE key = 1; + +SET keeper_map_strict_mode = true; + +INSERT INTO 02706_keeper_map_insert_strict VALUES (1, 2.1); -- { serverError KEEPER_EXCEPTION } +SELECT * FROM 02706_keeper_map_insert_strict WHERE key = 1; + +DROP TABLE 02706_keeper_map_insert_strict; diff --git a/parser/testdata/02706_kolmogorov_smirnov_test/ast.json b/parser/testdata/02706_kolmogorov_smirnov_test/ast.json new file mode 100644 index 000000000..96bff6e87 --- /dev/null +++ b/parser/testdata/02706_kolmogorov_smirnov_test/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery kstest (children 1)" + }, + { + "explain": " Identifier kstest" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001463861, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/02706_kolmogorov_smirnov_test/metadata.json b/parser/testdata/02706_kolmogorov_smirnov_test/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02706_kolmogorov_smirnov_test/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02706_kolmogorov_smirnov_test/query.sql b/parser/testdata/02706_kolmogorov_smirnov_test/query.sql new file mode 100644 index 000000000..3199b6968 --- /dev/null +++ b/parser/testdata/02706_kolmogorov_smirnov_test/query.sql @@ -0,0 +1,107 @@ +DROP TABLE IF EXISTS kstest; + +CREATE TABLE kstest (left Float64, right Float64) ENGINE = Memory; + +INSERT INTO kstest VALUES (0.010268, 0), (0.000167, 0), (0.000167, 0), (0.159258, 1), (0.136278, 1), (0.122389, 1); + +SELECT +roundBankers(kolmogorovSmirnovTest(left, right).2, 6), +roundBankers(kolmogorovSmirnovTest('two-sided')(left, right).2, 6), +roundBankers(kolmogorovSmirnovTest('less')(left, right).2, 6), +roundBankers(kolmogorovSmirnovTest('greater')(left, right).2, 6), +roundBankers(kolmogorovSmirnovTest('two-sided','auto')(left, right).2, 6), +roundBankers(kolmogorovSmirnovTest('less','auto')(left, right).2, 6), +roundBankers(kolmogorovSmirnovTest('greater','auto')(left, right).2, 6), +roundBankers(kolmogorovSmirnovTest('two-sided','exact')(left, right).2, 6), +roundBankers(kolmogorovSmirnovTest('less','exact')(left, right).2, 6), +roundBankers(kolmogorovSmirnovTest('greater','exact')(left, right).2, 6), +roundBankers(kolmogorovSmirnovTest('two-sided','asymp')(left, right).2, 6), +roundBankers(kolmogorovSmirnovTest('less','asymp')(left, right).2, 6), +roundBankers(kolmogorovSmirnovTest('greater','asymp')(left, right).2, 6) , +roundBankers(kolmogorovSmirnovTest(left, right).1, 6), +roundBankers(kolmogorovSmirnovTest('two-sided')(left, right).1, 6), +roundBankers(kolmogorovSmirnovTest('less')(left, right).1, 6), +roundBankers(kolmogorovSmirnovTest('greater')(left, right).1, 6), +roundBankers(kolmogorovSmirnovTest('two-sided','auto')(left, right).1, 6), +roundBankers(kolmogorovSmirnovTest('less','auto')(left, right).1, 6), +roundBankers(kolmogorovSmirnovTest('greater','auto')(left, right).1, 6), +roundBankers(kolmogorovSmirnovTest('two-sided','exact')(left, right).1, 6), +roundBankers(kolmogorovSmirnovTest('less','exact')(left, right).1, 6), +roundBankers(kolmogorovSmirnovTest('greater','exact')(left, right).1, 6), +roundBankers(kolmogorovSmirnovTest('two-sided','asymp')(left, right).1, 6), +roundBankers(kolmogorovSmirnovTest('less','asymp')(left, right).1, 6), +roundBankers(kolmogorovSmirnovTest('greater','asymp')(left, right).1, 6) +from kstest; + +DROP TABLE IF EXISTS kstest; + +CREATE TABLE kstest (left Float64, right Float64) ENGINE = Memory; + +INSERT INTO kstest VALUES (14.72789, 0), (9.61661, 0), (13.57615, 0), (3.98392, 0), (11.98889, 0), (10.99422, 0), (5.44792, 0), (20.29346, 0), (7.05926, 0), (9.22732, 0), (12.06847, 0), (13.52612, 0), (8.24597, 0), (9.35245, 0), (10.12297, 0), (15.80624, 0), (13.68613, 0), (10.72729, 0), (5.62078, 0), (6.12229, 0), (6.03801, 0), (8.95585, 0), (24.04613, 0), (9.04757, 0), (2.68263, 0), (15.43935, 0), (2.89423, 0), (4.01423, 0), (4.30568, 0), (11.99948, 0), (8.40574, 0), (10.86642, 0), (9.4266, 0), (-8.12752, 0), (7.91634, 0), (7.3967, 0), (2.26431, 0), (14.20118, 0), (6.68233, 0), (15.46221, 0), (7.88467, 0), (11.20011, 0), (8.92027, 0), (10.27926, 0), (5.14395, 0), (5.62178, 0), (12.84383, 0), (9.98009, 0), (-0.69789, 0), (11.41386, 0), (7.76863, 0), (7.21743, 0), (1.81176, 0), (9.43762, 0), (19.22117, 0), (2.97128, 0), (14.32851, 0), (7.54959, 0), (3.81545, 0), (10.1281, 0), (2.48596, 0), (10.0461, 0), (3.59714, 0), (9.73522, 0), (18.8077, 0), (3.15148, 0), (12.26062, 0), (5.66707, 0), (6.58623, 0), (17.30902, 0), (9.91391, 0), (5.36946, 0), (15.73637, 0), (16.96281, 0), (11.54063, 0), (18.37358, 0), (11.38255, 0), (10.53256, 0), (8.08833, 0), (16.27556, 0), (2.42969, 0), (9.56127, 0), (7.32998, 0), (9.19511, 0), (9.66903, 0), (4.15029, 0), (8.83511, 0), (14.60617, 0), (14.06143, 0), (5.39556, 0), (10.11871, 0), (10.56619, 0), (14.4462, 0), (10.42106, 0), (7.75551, 0), (11.00418, 0), (4.47226, 0), (16.35461, 0), (18.55174, 0), (11.82044, 0), (7.39454, 0), (11.27767, 0), (6.83827, 0), (7.76858, 0), (15.97614, 0), (14.53781, 0), (12.99546, 0), (16.91151, 0), (9.65012, 0), (14.25487, 0), (14.03618, 0), (2.57382, 0), (2.50779, 0), (14.24787, 0), (13.34666, 0), (7.31102, 0), (10.22981, 0), (17.4435, 0), (21.2074, 0), (6.64191, 0), (18.7086, 0), (14.78686, 0), (9.85287, 0), (4.48263, 0), (14.17469, 0), (14.4342, 0), (19.2481, 0), (3.47165, 0), (8.28712, 0), (8.81657, 0), (0.92319, 0), (20.41106, 0), (6.76127, 0), (22.00242, 0), (8.66129, 0), (10.9929, 0), (17.95494, 0), (17.20996, 0), (12.18888, 0), (12.14257, 0), (15.81243, 0), (4.43362, 0), (1.17567, 0), (15.60881, 0), (9.34833, 0), (6.33513, 0), (-0.83095, 0), (12.43268, 0), (6.63207, 0), (11.96877, 0), (14.81029, 0), (21.84876, 0), (3.75896, 0), (6.91307, 0), (13.73015, 0), (8.63753, 0), (15.71679, 0), (1.74565, 0), (9.16895, 0), (5.70685, 0), (5.00117, 0), (13.06888, 0), (7.51204, 0), (15.34885, 0), (5.20264, 0), (8.59043, 0), (6.45619, 0), (14.61979, 0), (11.7075, 0), (14.04901, 0), (4.20525, 0), (15.1733, 0), (3.12934, 0), (8.08049, 0), (15.41273, 0), (16.90751, 0), (5.86893, 0), (7.1086, 0), (4.418, 0), (12.0614, 0), (7.07887, 0), (3.61585, 0), (11.73001, 0), (10.80449, 0), (8.40311, 0), (9.91276, 0), (16.4164, 0), (5.25034, 0), (15.20283, 0), (10.42909, 0), (9.53888, 0), (14.68939, 0), (6.60007, 0), (18.31058, 0), (7.01885, 0), (18.71631, 0), (10.50002, 0), (10.7517, 0), (4.23224, 0), (2.28924, 0), (8.56059, 0), (8.25095, 0), (9.15673, 0), (13.28409, 0), (8.4513, 0), (2.83911, 0), (2.79676, 0), (9.11055, 0), (7.18529, 0), (-4.1258, 0), (5.28306, 0), (6.82757, 0), (10.89035, 0), (5.24822, 0), (11.935, 0), (6.45675, 0), (10.18088, 0), (4.9932, 0), (18.09939, 0), (8.11738, 0), (5.37883, 0), (10.50339, 0), (16.64093, 0), (14.77263, 0), (13.71385, 0), (6.98746, 0), (10.74635, 0), (5.49432, 0), (13.46078, 0), (10.67565, 0), (9.0291, 0), (11.51417, 0), (13.07118, 0), (9.5049, 0), (8.50611, 0), (6.47606, 0), (13.06526, 0), (19.08658, 0), (9.49741, 0), (10.60865, 0), (2.28996, 0), (8.12846, 0), (5.62241, 0), (4.07712, 0), (17.98526, 0), (9.466, 0), (11.38904, 0), (5.91826, 0), (1.52059, 0), (18.79161, 0), (18.20669, 0), (-1.67829, 0), (18.01586, 0), (16.31577, 0), (7.88281, 0), (8.46179, 0), (10.31113, 0), (14.88377, 0), (1.31835, 0), (2.53176, 0), (9.48625, 0), (3.97936, 0), (11.52319, 0), (13.24178, 0), (7.58739, 0), (10.00959, 0), (9.73361, 0), (8.35716, 0), (1.65491, 0), (11.11521, 0), (6.08355, 0), (10.04582, 0), (11.58237, 0), (16.40249, 0), (1.9691, 0), (13.22776, 0), (2.67059, 0), (9.83651, 0), (2.12539, 0), (9.27114, 0), (9.0699, 0), (2.78179, 0), (12.49311, 0), (12.97662, 0), (15.06359, 0), (16.91565, 0), (5.92011, 0), (5.81304, 0), (8.46425, 0), (9.48705, 0), (4.68191, 0), (5.70028, 0), (-0.78798, 0), (10.03442, 0), (15.45433, 0), (9.43845, 0), (3.05825, 0), (6.92126, 0), (14.05905, 0), (19.71579, 0), (15.0131, 0), (4.50386, 0), (1.31061, 0), (10.81197, 0), (14.32942, 0), (9.26469, 0), (7.27679, 0), (22.69295, 0), (12.03763, 0), (7.34876, 0), (16.60689, 0), (7.48786, 0), (15.78602, 0), (17.21048, 0), (13.93482, 0), (9.69911, 0), (12.24315, 0), (10.58131, 0), (19.57006, 0), (9.8856, 0), (11.70302, 0), (7.89864, 0), (12.24831, 0), (16.93707, 0), (9.65467, 0), (4.221, 0), (15.45229, 0), (12.83088, 0), (7.58313, 0), (12.895, 0), (10.02471, 0), (13.36059, 0), (5.07864, 0), (9.72017, 0), (11.05809, 0), (15.28528, 0), (13.99834, 0), (19.26989, 0), (9.41846, 0), (11.65425, 0), (8.49638, 0), (6.38592, 0), (-4.69837, 0), (12.22061, 0), (9.41331, 0), (13.2075, 0), (12.97005, 0), (11.44352, 0), (9.79805, 0), (6.93116, 0), (10.07691, 0), (22.05892, 0), (7.80353, 0), (-2.17276, 0), (0.61509, 0), (8.35842, 0), (17.77108, 0), (14.70841, 0), (1.27992, 0), (15.62699, 0), (9.32914, 0), (15.41866, 0), (10.82009, 0), (3.29902, 0), (9.21998, 0), (7.93845, 0), (10.33344, 0), (12.06399, 0), (5.5308, 0), (8.38727, 0), (18.11104, 0), (8.86565, 0), (19.41825, 0), (9.52376, 0), (3.94552, 0), (9.37587, 0), (15.44954, 0), (15.90527, 0), (13.18927, 0), (7.01646, 0), (9.06005, 0), (9.06431, 0), (5.76006, 0), (9.18705, 0), (-3.48446, 0), (15.89817, 0), (12.94719, 0), (23.69426, 0), (17.47755, 0), (15.61528, 0), (0.54832, 0), (14.32916, 0), (9.55305, 0), (13.79891, 0), (0.82544, 0), (13.34875, 0), (9.07614, 0), (5.19621, 0), (2.1451, 0), (9.87726, 0), (8.45439, 0), (-1.41842, 0), (7.93598, 0), (11.23151, 0), (17.84458, 0), (7.02237, 0), (10.7842, 0), (4.42832, 0), (4.45044, 0), (1.50938, 0), (21.21651, 0), (6.2097, 0), (6.84354, 0), (18.53804, 0), (12.01072, 0), (4.8345, 0), (20.41587, 0), (14.48353, 0), (8.71116, 0), (12.42818, 0), (14.89244, 0), (8.03033, 0), (5.25917, 0), (2.30092, 0), (10.22504, 0), (15.37573, 0), (7.13666, 0), (4.45018, 0), (10.18405, 0), (3.91025, 0), (14.52304, 0), (13.14771, 0), (11.99219, 0), (9.21345, 0), (8.85106, 0), (12.91887, 0), (15.62308, 0), (11.88034, 0), (15.12097, 0), (11.58168, 0), (16.83051, 0), (5.25405, 0), (2.19976, 0), (4.56716, 0), (16.46053, 0), (5.61995, 0), (8.67704, 0), (5.62789, 0), (9.84815, 0), (13.05834, 0), (11.74205, 0), (3.88393, 0), (16.15321, 0), (4.83925, 0), (13.00334, 0), (4.4028, 0), (4.35794, 0), (4.47478, 0), (2.38713, 0), (4.25235, 0), (10.87509, 0), (9.82411, 0), (13.61518, 0), (10.25507, 0), (4.0335, 0), (10.69881, 0), (5.70321, 0), (6.96244, 0), (9.35874, 0), (6.28076, 0), (8.29015, 0), (6.88653, 0), (7.70687, 0), (8.2001, 0), (6.73415, 0), (3.82052, 0), (3.94469, 0), (15.82384, 0), (2.54004, 0), (10.74876, 0), (12.60517, 0), (17.7024, 0), (4.6722, 0), (13.67341, 0), (6.4565, 0), (12.95699, 0), (4.56912, 0), (5.58464, 0), (4.0638, 0), (13.05559, 0), (5.38269, 0), (0.16354, 0), (7.23962, 0), (7.38577, 0), (8.50951, 0), (13.72574, 0), (17.80421, 0), (3.01135, 0), (8.02608, 0), (14.23847, 0), (-8.65656, 1), (22.98234, 1), (23.80821, 1), (13.33939, 1), (-4.05537, 1), (23.5155, 1), (-6.45272, 1), (17.7903, 1), (11.463, 1), (5.28021, 1), (8.39157, 1), (6.02464, 1), (14.43732, 1), (15.76584, 1), (1.54391, 1), (1.24897, 1), (27.1507, 1), (7.71091, 1), (15.71846, 1), (32.97808, 1), (-1.79334, 1), (-9.23439, 1), (11.27838, 1), (0.72703, 1), (18.51557, 1), (9.16619, 1), (17.29624, 1), (-1.30208, 1), (-3.48018, 1), (10.12082, 1), (-8.01318, 1), (-14.22264, 1), (16.58174, 1), (-0.55975, 1), (5.61449, 1), (1.44626, 1), (7.89158, 1), (1.13369, 1), (-0.82609, 1), (12.23365, 1), (12.45443, 1), (14.46915, 1), (13.72627, 1), (18.41459, 1), (29.66702, 1), (1.51619, 1), (10.40078, 1), (3.33266, 1), (6.12036, 1), (11.86553, 1), (6.59422, 1), (22.0948, 1), (1.79623, 1), (14.29513, 1), (19.69162, 1), (-7.98033, 1), (5.48433, 1), (-2.28474, 1), (9.91876, 1), (10.64097, 1), (0.22523, 1), (17.01773, 1), (22.37388, 1), (14.04215, 1), (23.1244, 1), (18.96958, 1), (8.42663, 1), (3.7165, 1), (14.29366, 1), (23.50886, 1), (26.33722, 1), (26.72396, 1), (13.26287, 1), (12.97607, 1), (17.41838, 1), (8.63875, 1), (17.08943, 1), (23.15356, 1), (-4.4965, 1), (7.58895, 1), (26.04074, 1), (6.84245, 1), (20.56287, 1), (3.84735, 1), (-2.76304, 1), (13.1615, 1), (8.21954, 1), (-3.49943, 1), (22.12419, 1), (7.08323, 1), (16.12937, 1), (-0.32672, 1), (16.5942, 1), (7.68977, 1), (11.39484, 1), (-5.11987, 1), (20.87404, 1), (8.01007, 1), (3.26497, 1), (5.61253, 1), (20.69182, 1), (0.0296, 1), (21.904, 1), (22.46572, 1), (3.63685, 1), (-5.10846, 1), (14.86389, 1), (5.47188, 1), (18.44095, 1), (16.71368, 1), (6.36704, 1), (8.82663, 1), (14.6727, 1), (7.98383, 1), (2.65568, 1), (21.45827, 1), (11.77948, 1), (4.71979, 1), (3.17951, 1), (13.90226, 1), (15.50578, 1), (10.8026, 1), (16.91369, 1), (9.90552, 1), (13.87322, 1), (4.12366, 1), (-3.78985, 1), (1.7599, 1), (3.43715, 1), (-3.45246, 1), (23.64571, 1), (-4.96877, 1), (3.93514, 1), (1.49914, 1), (12.71519, 1), (5.11521, 1), (4.79872, 1), (20.89391, 1), (5.363, 1), (8.02765, 1), (14.30804, 1), (11.49002, 1), (14.25281, 1), (7.6573, 1), (15.49686, 1), (3.29327, 1), (2.27236, 1), (12.58104, 1), (19.19128, 1), (15.25901, 1), (6.5221, 1), (10.10965, 1), (12.75249, 1), (16.50977, 1), (-8.6697, 1), (8.28553, 1), (1.44315, 1), (4.65869, 1), (0.98149, 1), (0.16623, 1), (17.66332, 1), (4.35346, 1), (6.52742, 1), (-1.06631, 1), (-5.28454, 1), (14.25583, 1), (8.74058, 1), (1.89553, 1), (-0.92959, 1), (10.30289, 1), (-6.3744, 1), (-8.1706, 1), (10.95369, 1), (4.94384, 1), (28.40568, 1), (3.7004, 1), (2.52363, 1), (4.07997, 1), (7.8849, 1), (17.95409, 1), (16.67021, 1), (11.34377, 1), (-0.07446, 1), (22.00223, 1), (3.31778, 1), (18.50719, 1), (-3.58655, 1), (6.5394, 1), (12.40459, 1), (16.59866, 1), (7.54176, 1), (-1.51044, 1), (12.69758, 1), (2.9842, 1), (2.49187, 1), (2.04113, 1), (-2.46544, 1), (15.18368, 1), (-0.04058, 1), (-0.4127, 1), (10.5526, 1), (12.03982, 1), (12.10923, 1), (11.54954, 1), (-1.18613, 1), (11.30984, 1), (23.54105, 1), (10.67321, 1), (24.09196, 1), (7.5008, 1), (12.52233, 1), (4.30673, 1), (9.35793, 1), (4.44472, 1), (-7.00679, 1), (8.56241, 1), (23.73891, 1), (15.62708, 1), (16.09205, 1), (12.52074, 1), (14.58927, 1), (-4.80187, 1), (8.47964, 1), (7.75477, 1), (12.6893, 1), (7.14147, 1), (12.12654, 1), (12.32334, 1), (7.98909, 1), (3.26652, 1), (20.53684, 1), (32.3369, 1), (19.74911, 1), (-4.62897, 1), (8.26483, 1), (20.88451, 1), (-2.12982, 1), (25.61459, 1), (5.32091, 1), (-4.1196, 1), (7.57937, 1), (21.15847, 1), (6.46355, 1), (7.74846, 1), (19.62636, 1), (28.34629, 1), (26.73919, 1), (20.40427, 1), (3.03378, 1), (10.2537, 1), (7.47745, 1), (10.79184, 1), (3.91962, 1), (19.97973, 1), (18.87711, 1), (12.56157, 1), (11.46033, 1), (3.78661, 1), (-9.45748, 1), (12.06033, 1), (-0.74615, 1), (13.2815, 1), (24.78052, 1), (5.83337, 1), (17.4111, 1), (19.70331, 1), (11.78446, 1), (-1.366, 1), (1.37458, 1), (16.31483, 1), (32.63464, 1), (-3.79736, 1), (19.17984, 1), (-0.27705, 1), (-3.69456, 1), (28.38058, 1), (-1.36876, 1), (-25.63301, 1), (3.58644, 1), (-6.85667, 1), (13.42225, 1), (12.04671, 1), (28.99468, 1), (7.87662, 1), (2.61119, 1), (-3.56022, 1), (1.50022, 1), (14.55836, 1), (9.35831, 1), (16.9366, 1), (29.23126, 1), (15.31386, 1), (13.46112, 1), (7.39667, 1), (11.15599, 1), (9.80499, 1), (22.64923, 1), (8.67693, 1), (18.67335, 1), (-3.19127, 1), (22.94716, 1), (17.86834, 1), (16.98267, 1), (15.91653, 1), (11.79718, 1), (18.50208, 1), (8.90755, 1), (10.44843, 1), (4.67433, 1), (6.82287, 1), (10.82228, 1), (-4.18631, 1), (20.3872, 1), (11.84735, 1), (21.25376, 1), (10.55032, 1), (12.19023, 1), (0.63369, 1), (7.92381, 1), (17.90933, 1), (15.30781, 1), (10.01877, 1), (0.88744, 1), (22.20967, 1), (-4.23117, 1), (21.50819, 1), (11.27421, 1), (-16.23179, 1), (33.43085, 1), (5.15093, 1), (1.34505, 1), (6.027, 1), (-10.43035, 1), (27.45998, 1), (19.24886, 1), (-4.44761, 1), (5.453, 1), (12.73758, 1), (11.2897, 1), (31.032, 1), (7.39168, 1), (11.95245, 1), (26.279, 1), (-1.0255, 1), (10.36675, 1), (11.58439, 1), (27.8405, 1), (13.1707, 1), (31.39133, 1), (27.08301, 1), (-2.14368, 1), (4.08476, 1), (21.5573, 1), (16.69822, 1), (7.69955, 1), (8.32793, 1), (6.49235, 1), (-7.3284, 1), (10.58264, 1), (-6.17006, 1), (34.55782, 1), (10.93221, 1), (44.24299, 1), (14.6224, 1), (-7.42798, 1), (15.52351, 1), (11.33982, 1), (10.46716, 1), (13.0986, 1), (-4.25988, 1), (9.55316, 1), (0.75489, 1), (25.99212, 1), (-0.81401, 1), (3.49551, 1), (22.99402, 1), (10.99628, 1), (23.70223, 1), (2.71482, 1), (22.82309, 1), (31.25686, 1), (4.86318, 1), (-1.06476, 1), (15.10298, 1), (-0.61015, 1), (17.81246, 1), (-1.55788, 1), (18.09709, 1), (9.11271, 1), (9.94682, 1), (-7.33194, 1), (-4.67293, 1), (21.81717, 1), (7.16318, 1), (13.25649, 1), (13.88776, 1), (4.95793, 1), (17.65303, 1), (14.47382, 1), (13.19373, 1), (31.86093, 1), (5.73161, 1), (10.96492, 1), (6.97951, 1), (1.75136, 1), (10.96144, 1), (15.08137, 1), (9.95311, 1), (7.07729, 1), (3.08148, 1), (22.37954, 1), (8.51951, 1), (2.88746, 1), (26.73509, 1), (-2.88939, 1), (-2.82367, 1), (-0.35783, 1), (14.22076, 1), (11.50295, 1), (7.10171, 1), (8.28488, 1), (0.54178, 1), (13.8022, 1), (15.62157, 1), (10.79173, 1), (28.18946, 1), (30.43524, 1), (2.54914, 1), (9.89421, 1), (13.08631, 1), (4.68761, 1), (5.61516, 1), (22.88072, 1), (7.4735, 1), (11.27382, 1), (2.39559, 1), (-3.31889, 1), (9.61957, 1), (23.01381, 1), (-1.23467, 1), (9.07691, 1), (15.78056, 1), (12.28421, 1), (9.44888, 1), (13.16928, 1), (4.33357, 1), (2.21737, 1), (33.17833, 1), (13.25407, 1), (-2.47961, 1), (6.41401, 1), (18.8439, 1), (-4.63375, 1), (-8.2909, 1), (12.18221, 1), (-2.95356, 1), (19.61659, 1), (12.45056, 1), (-4.17198, 1), (21.9641, 1), (11.96416, 1), (12.74573, 1), (10.47873, 1), (12.73295, 1), (11.31373, 1), (9.9827, 1), (5.87138, 1), (4.24372, 1), (-23.72256, 1), (28.41337, 1), (4.88103, 1), (3.61902, 1), (8.93586, 1), (16.40759, 1), (27.84494, 1), (5.6001, 1), (14.51379, 1), (13.5576, 1), (12.92213, 1), (3.90686, 1), (17.07104, 1), (15.84268, 1), (17.38777, 1), (16.54766, 1), (5.94487, 1), (17.02804, 1), (7.66386, 1), (10.43088, 1), (6.16059, 1), (20.46178, 1), (20.02888, 1), (20.95949, 1), (6.50808, 1), (7.22366, 1), (8.06659, 1), (16.08241, 1), (13.83514, 1), (-0.33454, 1), (12.98848, 1), (12.99024, 1); + +SELECT +roundBankers(kolmogorovSmirnovTest(left, right).2, 6), +roundBankers(kolmogorovSmirnovTest('two-sided')(left, right).2, 6), +roundBankers(kolmogorovSmirnovTest('less')(left, right).2, 6), +roundBankers(kolmogorovSmirnovTest('greater')(left, right).2, 6), +roundBankers(kolmogorovSmirnovTest('two-sided','auto')(left, right).2, 6), +roundBankers(kolmogorovSmirnovTest('less','auto')(left, right).2, 6), +roundBankers(kolmogorovSmirnovTest('greater','auto')(left, right).2, 6), +roundBankers(kolmogorovSmirnovTest('two-sided','exact')(left, right).2, 6), +roundBankers(kolmogorovSmirnovTest('less','exact')(left, right).2, 6), +roundBankers(kolmogorovSmirnovTest('greater','exact')(left, right).2, 6), +roundBankers(kolmogorovSmirnovTest('two-sided','asymp')(left, right).2, 6), +roundBankers(kolmogorovSmirnovTest('less','asymp')(left, right).2, 6), +roundBankers(kolmogorovSmirnovTest('greater','asymp')(left, right).2, 6) , +roundBankers(kolmogorovSmirnovTest(left, right).1, 6), +roundBankers(kolmogorovSmirnovTest('two-sided')(left, right).1, 6), +roundBankers(kolmogorovSmirnovTest('less')(left, right).1, 6), +roundBankers(kolmogorovSmirnovTest('greater')(left, right).1, 6), +roundBankers(kolmogorovSmirnovTest('two-sided','auto')(left, right).1, 6), +roundBankers(kolmogorovSmirnovTest('less','auto')(left, right).1, 6), +roundBankers(kolmogorovSmirnovTest('greater','auto')(left, right).1, 6), +roundBankers(kolmogorovSmirnovTest('two-sided','exact')(left, right).1, 6), +roundBankers(kolmogorovSmirnovTest('less','exact')(left, right).1, 6), +roundBankers(kolmogorovSmirnovTest('greater','exact')(left, right).1, 6), +roundBankers(kolmogorovSmirnovTest('two-sided','asymp')(left, right).1, 6), +roundBankers(kolmogorovSmirnovTest('less','asymp')(left, right).1, 6), +roundBankers(kolmogorovSmirnovTest('greater','asymp')(left, right).1, 6) +from kstest; + +DROP TABLE IF EXISTS kstest; + + +CREATE TABLE kstest (left Float64, right Float64) ENGINE = Memory; + +INSERT INTO kstest VALUES (4.82025, 0), (6.13896, 0), (15.20277, 0), (14.15351, 0), (7.21338, 0), (8.55506, 0), (13.80816, 0), (11.28411, 0), (7.4612, 0), (7.43759, 0), (12.9832, 0), (-5.74783, 0), (12.47114, 0), (15.14223, 0), (3.40603, 0), (9.27323, 0), (7.88547, 0), (8.56456, 0), (4.59731, 0), (7.91213, 0), (7.33894, 0), (21.74811, 0), (11.92111, 0), (0.18828, 0), (10.47314, 0), (20.37396, 0), (11.04991, 0), (13.30083, 0), (14.28065, 0), (2.86942, 0), (24.96072, 0), (14.20164, 0), (18.28769, 0), (10.50949, 0), (9.22273, 0), (11.77608, 0), (8.56872, 0), (13.74535, 0), (11.65209, 0), (12.51894, 0), (17.76256, 0), (13.52122, 0), (8.70796, 0), (6.04749, 0), (16.33064, 0), (8.35636, 0), (14.03496, 0), (11.05834, 0), (14.49261, 0), (2.59383, 0), (8.01022, 0), (4.05458, 0), (13.26384, 0), (14.62058, 0), (10.52489, 0), (8.46357, 0), (6.4147, 0), (9.70071, 0), (12.47581, 0), (4.38333, 0), (17.54172, 0), (10.12109, 0), (7.73186, 0), (14.0279, 0), (11.6621, 0), (17.47045, 0), (15.50223, 0), (15.46034, 0), (13.39964, 0), (14.98025, 0), (15.87912, 0), (17.67374, 0), (9.64073, 0), (12.84904, 0), (7.70278, 0), (13.03156, 0), (9.04512, 0), (15.97014, 0), (8.96389, 0), (11.48009, 0), (9.71153, 0), (13.00084, 0), (12.39803, 0), (13.08188, 0), (5.82244, 0), (10.81871, 0), (8.2539, 0), (7.52114, 0), (9.11488, 0), (8.37482, 0), (14.48652, 0), (11.42152, 0), (16.03111, 0), (13.14057, 0), (-2.26351, 0), (15.50394, 0), (14.88603, 0), (13.37257, 0), (11.84026, 0), (7.66558, 0), (6.24584, 0), (3.6312, 0), (2.7018, 0), (5.63656, 0), (5.82643, 0), (10.06745, 0), (-0.5831, 0), (14.84202, 0), (9.5524, 0), (19.71713, 0), (14.23109, 0), (8.69105, 0), (5.33742, 0), (7.30372, 0), (7.93342, 0), (15.20884, 0), (7.53839, 0), (13.45311, 0), (11.04473, 0), (10.76673, 0), (15.44145, 0), (14.06596, 0), (9.14873, 0), (12.88372, 0), (8.74994, 0), (10.53263, 0), (16.16694, 0), (8.37197, 0), (3.43739, 0), (4.72799, 0), (9.08802, 0), (11.2531, 0), (5.16115, 0), (10.20895, 0), (18.70884, 0), (15.88924, 0), (3.38758, 0), (6.46449, 0), (10.21088, 0), (14.08458, 0), (15.74508, 0), (19.31896, 0), (13.19641, 0), (11.95409, 0), (10.70718, 0), (1.05245, 0), (10.04772, 0), (17.01369, 0), (10.2286, 0), (19.58323, 0), (7.02892, 0), (4.16866, 0), (8.94326, 0), (4.99854, 0), (8.88352, 0), (18.65422, 0), (17.32328, 0), (9.33492, 0), (14.94788, 0), (8.05863, 0), (14.6737, 0), (10.93801, 0), (0.54036, 0), (-0.34242, 0), (5.89076, 0), (3.15189, 0), (1.94421, 0), (6.38698, 0), (10.50654, 0), (8.95362, 0), (6.23711, 0), (11.75359, 0), (12.42155, 0), (-1.55472, 0), (4.6688, 0), (10.48087, 0), (11.74615, 0), (9.26822, 0), (7.55517, 0), (12.76005, 0), (16.47102, 0), (11.31297, 0), (14.37437, 0), (2.38799, 0), (6.44577, 0), (5.07471, 0), (11.55123, 0), (7.76795, 0), (10.60116, 0), (14.40885, 0), (11.58158, 0), (8.81648, 0), (12.92299, 0), (11.26939, 0), (17.95014, 0), (2.95002, 0), (17.41959, 0), (11.12455, 0), (8.78541, 0), (14.36413, 0), (12.98554, 0), (12.58505, 0), (15.49789, 0), (11.70999, 0), (0.65596, 0), (11.08202, 0), (14.75752, 0), (6.84385, 0), (9.27245, 0), (13.78243, 0), (17.4863, 0), (4.01777, 0), (11.82861, 0), (13.86551, 0), (6.16591, 0), (8.71589, 0), (16.77195, 0), (17.23243, 0), (-2.12941, 0), (5.66629, 0), (12.45153, 0), (1.63971, 0), (13.84031, 0), (4.6144, 0), (5.26169, 0), (9.27769, 0), (9.14288, 0), (9.71953, 0), (9.38446, 0), (1.64788, 0), (11.72922, 0), (13.68926, 0), (9.42952, 0), (12.05574, 0), (9.09148, 0), (5.32273, 0), (20.25258, 0), (10.14599, 0), (10.82156, 0), (5.75736, 0), (7.13567, 0), (9.29746, 0), (5.1618, 0), (10.076, 0), (21.65669, 0), (13.35486, 0), (6.79957, 0), (8.76243, 0), (14.59294, 0), (16.90609, 0), (10.50337, 0), (-0.07923, 0), (13.51648, 0), (12.0676, 0), (0.86482, 0), (9.03563, 0), (5.38751, 0), (17.16866, 0), (2.78702, 0), (11.15548, 0), (12.30843, 0), (8.04897, 0), (9.95814, 0), (11.29308, 0), (14.13032, 0), (21.05877, 0), (3.57386, 0), (7.96631, 0), (3.30484, 0), (18.61856, 0), (16.35184, 0), (7.65236, 0), (18.02895, 0), (9.79458, 0), (16.7274, 0), (8.84453, 0), (13.05709, 0), (10.91447, 0), (8.40171, 0), (16.95211, 0), (11.82194, 0), (19.87978, 0), (12.88455, 0), (-0.00947, 0), (12.28109, 0), (6.96462, 0), (13.75282, 0), (14.39141, 0), (11.07193, 0), (12.88039, 0), (11.38253, 0), (21.02707, 0), (7.51955, 0), (6.31984, 0), (15.6543, 0), (14.80315, 0), (8.38024, 0), (21.7516, 0), (14.31336, 0), (15.04703, 0), (5.73787, 0), (13.16911, 0), (12.40695, 0), (9.88968, 0), (8.46703, 0), (8.70637, 0), (8.03551, 0), (5.9757, 0), (12.22951, 0), (3.14736, 0), (10.51266, 0), (18.593, 0), (10.82213, 0), (7.14216, 0), (6.81154, 0), (-0.6486, 0), (20.56136, 0), (11.35367, 0), (11.38205, 0), (17.14, 0), (14.91215, 0), (15.50207, 0), (5.93162, 0), (3.74869, 0), (14.11532, 0), (7.38954, 0), (5.45764, 0), (18.33733, 0), (9.91923, 0), (2.38991, 0), (14.16756, 0), (2.39791, 0), (6.92586, 0), (5.32474, 0), (2.28812, 0), (5.71718, 0), (5.84197, 0), (2.76206, 0), (19.05928, 0), (11.51788, 0), (6.56648, 0), (3.35735, 0), (7.55948, 0), (19.99908, 0), (13.00634, 0), (18.36886, 0), (11.14675, 0), (16.72931, 0), (12.50106, 0), (6.00605, 0), (23.06653, 0), (5.39694, 0), (9.53167, 0), (12.76944, 0), (7.20604, 0), (13.25391, 0), (13.7341, 0), (10.85292, 0), (-7.75835, 0), (10.29728, 0), (13.70099, 0), (10.17959, 0), (9.98399, 0), (12.69389, 0), (-0.28848, 0), (-2.18319, 0), (13.36378, 0), (10.09232, 0), (5.49489, 0), (5.46156, 0), (0.94225, 0), (12.79205, 0), (10.09593, 0), (6.06218, 0), (0.89463, 0), (11.88986, 0), (10.79733, 0), (1.51371, 0), (2.20967, 0), (15.45732, 0), (16.5262, 0), (5.99724, 0), (8.3613, 0), (15.68183, 0), (15.32117, 0), (14.15674, 0), (6.64553, 0), (4.20777, 0), (-0.10521, 0), (-0.88169, 0), (1.85913, 0), (9.73673, 0), (0.30926, 0), (6.17559, 0), (11.76602, 0), (5.68385, 0), (14.57088, 0), (12.81509, 0), (9.85682, 0), (12.06376, 0), (6.08874, 0), (11.63921, 0), (14.86722, 0), (10.41035, 0), (2.93794, 0), (12.21841, 0), (0.23804, 0), (3.14845, 0), (7.29748, 0), (3.06134, 0), (13.77684, 0), (16.21992, 0), (5.33511, 0), (9.68959, 0), (9.44169, 0), (18.08012, 0), (4.04224, 0), (8.77918, 0), (10.18324, 0), (9.38914, 0), (11.76995, 0), (14.19963, 0), (6.88817, 0), (16.56123, 0), (15.39885, 0), (5.21241, 0), (4.44408, 0), (17.87587, 0), (12.53337, 0), (13.60916, 0), (6.60104, 0), (7.35453, 0), (18.61572, 0), (6.10437, 0), (13.08682, 0), (12.15404, 0), (4.90789, 0), (2.13353, 0), (12.49593, 0), (11.93056, 0), (13.29408, 0), (5.70038, 0), (8.40271, 0), (5.19456, 0), (-5.51028, 0), (14.0329, 0), (10.38365, 0), (6.56812, 0), (4.21129, 0), (9.7157, 0), (9.88553, 0), (13.45346, 0), (4.97752, 0), (12.77595, 0), (8.56465, 0), (4.27703, 0), (18.12502, 0), (12.45735, 0), (12.42912, 0), (12.08125, 0), (10.85779, 0), (4.36013, 0), (11.85062, 0), (8.47776, 0), (9.60822, 0), (11.3069, 0), (14.25525, 0), (1.55168, 0), (14.57782, 0), (7.84786, 0), (9.87774, 0), (14.75575, 0), (3.68774, 0), (9.37667, 0), (20.28676, 0), (12.10027, 0), (8.01819, 0), (18.78158, 0), (20.85402, 0), (18.98069, 0), (16.1429, 0), (9.24047, 0), (14.12487, 0), (10.18841, 0), (-3.04478, 0), (5.7552, 0), (9.30376, 0), (11.42837, 0), (6.02364, 0), (8.86984, 0), (10.91177, 0), (10.04418, 0), (18.10774, 0), (7.49384, 0), (9.11556, 0), (9.7051, 0), (5.23268, 0), (9.04647, 0), (8.81547, 0), (2.65098, 0), (-2.69857, 1), (15.80943, 1), (7.31555, 1), (3.96517, 1), (4.77809, 1), (9.6472, 1), (-26.41717, 1), (-10.85635, 1), (-1.4376, 1), (-0.96308, 1), (2.84315, 1), (5.79467, 1), (-3.06091, 1), (-14.62902, 1), (22.08022, 1), (-2.11982, 1), (-4.84824, 1), (-10.50447, 1), (2.4891, 1), (9.90324, 1), (-22.66866, 1), (-0.97103, 1), (-16.57608, 1), (-3.78749, 1), (25.84511, 1), (5.30797, 1), (-18.19466, 1), (11.72708, 1), (0.2891, 1), (-9.83474, 1), (6.69942, 1), (18.09604, 1), (18.52651, 1), (1.38201, 1), (7.64615, 1), (17.66598, 1), (-2.44141, 1), (-9.01598, 1), (27.69142, 1), (4.06946, 1), (-15.0077, 1), (-10.49648, 1), (-4.88322, 1), (-25.09805, 1), (-4.64024, 1), (20.94434, 1), (24.12126, 1), (-14.10962, 1), (10.6512, 1), (14.50687, 1), (-19.88081, 1), (-11.55271, 1), (13.16921, 1), (16.63864, 1), (-24.08114, 1), (-9.09949, 1), (-10.54702, 1), (0.20813, 1), (8.19066, 1), (-2.70523, 1), (-0.23954, 1), (7.19398, 1), (-7.1618, 1), (-7.44322, 1), (-17.92031, 1), (-1.58146, 1), (9.18338, 1), (3.25838, 1), (-14.30234, 1), (1.84695, 1), (31.13794, 1), (-0.85067, 1), (19.02787, 1), (-3.09594, 1), (13.45584, 1), (-5.48104, 1), (-22.74928, 1), (-8.03697, 1), (17.31143, 1), (-16.65231, 1), (-18.58713, 1), (-16.52641, 1), (14.95261, 1), (12.56762, 1), (15.00188, 1), (1.85858, 1), (2.1926, 1), (-2.4095, 1), (21.56873, 1), (3.35509, 1), (-4.98672, 1), (35.08603, 1), (-10.01602, 1), (-3.85153, 1), (-6.81974, 1), (19.56525, 1), (-9.35488, 1), (0.24268, 1), (-3.51488, 1), (-0.37066, 1), (24.20888, 1), (-11.73537, 1), (0.01282, 1), (0.03963, 1), (-9.65589, 1), (-0.37429, 1), (5.61255, 1), (0.49984, 1), (-10.15066, 1), (-14.54314, 1), (16.56889, 1), (-7.73873, 1), (-3.76422, 1), (1.40722, 1), (2.28818, 1), (-13.12643, 1), (5.17082, 1), (4.79089, 1), (-17.42643, 1), (8.72548, 1), (-3.70285, 1), (16.77893, 1), (13.382, 1), (19.98418, 1), (0.00483, 1), (-4.75951, 1), (2.35391, 1), (21.65809, 1), (-9.2714, 1), (-18.38253, 1), (7.23097, 1), (14.97927, 1), (-4.02197, 1), (-29.8189, 1), (-12.8554, 1), (-7.60124, 1), (-14.90158, 1), (-3.31486, 1), (31.38144, 1), (-8.61288, 1), (15.31895, 1), (-10.19488, 1), (13.796, 1), (-0.32912, 1), (-0.0684, 1), (-30.06834, 1), (24.93912, 1), (-3.26506, 1), (-8.29751, 1), (-5.39189, 1), (-25.08603, 1), (-1.45318, 1), (16.72724, 1), (-3.38467, 1), (-26.00478, 1), (7.28369, 1), (16.96226, 1), (16.5858, 1), (10.46583, 1), (3.84345, 1), (-2.99382, 1), (1.42078, 1), (-11.0123, 1), (2.09909, 1), (1.21064, 1), (15.36079, 1), (-21.61349, 1), (22.7726, 1), (10.50512, 1), (-6.95825, 1), (9.20036, 1), (15.66902, 1), (3.28098, 1), (-9.05692, 1), (0.32882, 1), (-1.64934, 1), (-4.81406, 1), (-5.06006, 1), (19.97493, 1), (2.88646, 1), (-0.34552, 1), (7.55186, 1), (-22.96115, 1), (31.29166, 1), (6.18798, 1), (-2.52715, 1), (-11.58799, 1), (14.13596, 1), (13.45069, 1), (12.15179, 1), (3.44491, 1), (-8.78006, 1), (18.32087, 1), (11.91757, 1), (-2.00179, 1), (10.88411, 1), (9.09327, 1), (6.62484, 1), (8.87178, 1), (11.52254, 1), (-14.15988, 1), (-17.19515, 1), (14.03089, 1), (-2.4095, 1), (-16.83575, 1), (2.71469, 1), (4.84351, 1), (-1.17651, 1), (-3.37529, 1), (-19.92137, 1), (4.48952, 1), (-12.4906, 1), (-5.65277, 1), (8.50819, 1), (-19.61261, 1), (12.54156, 1), (11.06784, 1), (-12.59285, 1), (3.43683, 1), (-3.00325, 1), (12.49082, 1), (7.20955, 1), (17.6547, 1), (15.8619, 1), (24.3048, 1), (-8.05434, 1), (-6.06901, 1), (-15.69515, 1), (-11.13917, 1), (-3.90757, 1), (-2.57038, 1), (5.14065, 1), (17.8497, 1), (-8.64665, 1), (-18.68331, 1), (5.8567, 1), (-20.93884, 1), (4.40583, 1), (14.35985, 1), (4.18134, 1), (4.3635, 1), (9.35428, 1), (2.8908, 1), (16.01017, 1), (-1.48499, 1), (-9.97949, 1), (1.03055, 1), (-2.79697, 1), (6.85977, 1), (4.73213, 1), (2.7815, 1), (-2.46866, 1), (18.39425, 1), (-0.80378, 1), (-0.22982, 1), (-16.11608, 1), (3.0862, 1), (3.20779, 1), (10.50146, 1), (-0.21305, 1), (11.21012, 1), (-0.99825, 1), (18.39633, 1), (-3.39003, 1), (-0.64411, 1), (-1.39932, 1), (15.45319, 1), (-0.66044, 1), (-15.2223, 1), (-34.39907, 1), (-3.57836, 1), (16.82828, 1), (1.66624, 1), (15.43475, 1), (8.17776, 1), (5.50486, 1), (10.43082, 1), (-6.63332, 1), (2.28008, 1), (16.37203, 1), (5.16313, 1), (-8.85281, 1), (13.26692, 1), (-7.46842, 1), (8.43091, 1), (-13.18172, 1), (-0.72401, 1), (22.3881, 1), (10.65448, 1), (2.81289, 1), (10.92405, 1), (-8.95358, 1), (19.80653, 1), (-12.86527, 1), (5.38826, 1), (-6.83501, 1), (-15.7647, 1), (-27.67412, 1), (8.6499, 1), (-4.89542, 1), (16.76167, 1), (12.84284, 1), (-17.27324, 1), (-4.18726, 1), (-14.62366, 1), (-5.49863, 1), (-16.22846, 1), (10.60329, 1), (6.46781, 1), (1.70458, 1), (10.77448, 1), (0.8463, 1), (13.0482, 1), (-4.36264, 1), (3.22647, 1), (2.38828, 1), (6.7946, 1), (-0.25254, 1), (1.2497, 1), (1.6544, 1), (4.1019, 1), (11.27839, 1), (-5.04127, 1), (18.11674, 1), (0.51231, 1), (-0.51029, 1), (13.52556, 1), (16.10171, 1), (5.68197, 1), (-2.85904, 1), (-8.89167, 1), (6.24489, 1), (10.85319, 1), (-0.39816, 1), (3.87079, 1), (-3.1867, 1), (1.55322, 1), (16.86779, 1), (-14.60321, 1), (-1.81952, 1), (-3.11624, 1), (1.24193, 1), (10.18179, 1), (4.69796, 1), (0.69032, 1), (11.7723, 1), (7.62896, 1), (9.89741, 1), (9.11484, 1), (-3.84676, 1), (-0.4777, 1), (0.95958, 1), (-7.95056, 1), (-10.97474, 1), (-6.54861, 1), (34.74933, 1), (27.39463, 1), (4.18299, 1), (6.02476, 1), (-1.99397, 1), (1.26478, 1), (23.37106, 1), (10.49682, 1), (-11.04354, 1), (-12.22284, 1), (-9.87635, 1), (28.90511, 1), (6.77613, 1), (0.55352, 1), (0.37031, 1), (7.1418, 1), (3.24897, 1), (-1.60918, 1), (3.1675, 1), (-17.97072, 1), (-5.61743, 1), (14.1422, 1), (14.87695, 1), (-4.65961, 1), (-0.99174, 1), (-2.96623, 1), (-9.02263, 1), (-17.2088, 1), (2.78608, 1), (6.74239, 1), (4.8524, 1), (7.46731, 1), (1.04894, 1), (-12.8023, 1), (-17.18188, 1), (-5.08801, 1), (22.13942, 1), (-0.36384, 1), (17.80564, 1), (7.67504, 1), (1.59779, 1), (4.10942, 1), (0.61074, 1), (-14.40767, 1), (10.59906, 1), (16.57017, 1), (-15.17526, 1), (-6.98549, 1), (-0.64548, 1), (3.23756, 1), (14.65504, 1), (4.583, 1), (12.72378, 1), (5.26547, 1), (0.81781, 1), (9.38273, 1), (10.37636, 1), (10.70325, 1), (-0.83043, 1), (-7.53149, 1), (-9.09147, 1), (-19.51381, 1), (-28.44508, 1), (6.44392, 1), (11.10201, 1), (-2.86184, 1), (8.30673, 1), (8.8797, 1), (10.68053, 1), (15.62919, 1), (8.00579, 1), (6.4651, 1), (-4.50029, 1), (18.04514, 1), (11.12996, 1), (-5.14007, 1), (9.43857, 1), (3.13476, 1), (4.9772, 1), (-17.45782, 1), (0.05552, 1), (-1.90283, 1), (2.67908, 1), (-2.62243, 1), (-3.22767, 1), (-8.70222, 1), (-23.11605, 1), (21.6757, 1), (12.70076, 1), (4.4322, 1), (11.69344, 1), (9.18052, 1), (-2.2549, 1), (-2.15615, 1), (20.29765, 1), (-0.29536, 1), (15.50109, 1), (8.79187, 1), (5.11533, 1), (-20.44436, 1), (-3.00909, 1), (-4.48291, 1), (21.84462, 1), (1.94225, 1), (-2.81908, 1), (17.19418, 1), (-9.33528, 1), (-0.17346, 1), (0.03958, 1), (-35.17786, 1), (8.36887, 1), (-9.02292, 1), (-10.98804, 1), (0.29335, 1), (4.29634, 1), (3.87718, 1), (-9.08532, 1), (7.13922, 1), (-7.62463, 1), (-10.5666, 1), (4.68165, 1), (-3.30172, 1), (13.04852, 1), (13.45616, 1), (2.41043, 1), (-0.36501, 1), (-15.67383, 1), (17.92217, 1), (8.42106, 1), (3.22063, 1), (-7.31753, 1), (21.99596, 1), (-36.8273, 1), (-20.46391, 1), (5.74179, 1), (-15.83178, 1), (14.90454, 1), (-8.84645, 1), (3.72036, 1), (4.6877, 1), (16.35418, 1), (3.15441, 1), (2.39907, 1), (-17.58664, 1), (-13.18269, 1); + +SELECT +roundBankers(kolmogorovSmirnovTest(left, right).2, 6), +roundBankers(kolmogorovSmirnovTest('two-sided')(left, right).2, 6), +roundBankers(kolmogorovSmirnovTest('less')(left, right).2, 6), +roundBankers(kolmogorovSmirnovTest('greater')(left, right).2, 6), +roundBankers(kolmogorovSmirnovTest('two-sided','auto')(left, right).2, 6), +roundBankers(kolmogorovSmirnovTest('less','auto')(left, right).2, 6), +roundBankers(kolmogorovSmirnovTest('greater','auto')(left, right).2, 6), +roundBankers(kolmogorovSmirnovTest('two-sided','exact')(left, right).2, 6), +roundBankers(kolmogorovSmirnovTest('less','exact')(left, right).2, 6), +roundBankers(kolmogorovSmirnovTest('greater','exact')(left, right).2, 6), +roundBankers(kolmogorovSmirnovTest('two-sided','asymp')(left, right).2, 6), +roundBankers(kolmogorovSmirnovTest('less','asymp')(left, right).2, 6), +roundBankers(kolmogorovSmirnovTest('greater','asymp')(left, right).2, 6) , +roundBankers(kolmogorovSmirnovTest(left, right).1, 6), +roundBankers(kolmogorovSmirnovTest('two-sided')(left, right).1, 6), +roundBankers(kolmogorovSmirnovTest('less')(left, right).1, 6), +roundBankers(kolmogorovSmirnovTest('greater')(left, right).1, 6), +roundBankers(kolmogorovSmirnovTest('two-sided','auto')(left, right).1, 6), +roundBankers(kolmogorovSmirnovTest('less','auto')(left, right).1, 6), +roundBankers(kolmogorovSmirnovTest('greater','auto')(left, right).1, 6), +roundBankers(kolmogorovSmirnovTest('two-sided','exact')(left, right).1, 6), +roundBankers(kolmogorovSmirnovTest('less','exact')(left, right).1, 6), +roundBankers(kolmogorovSmirnovTest('greater','exact')(left, right).1, 6), +roundBankers(kolmogorovSmirnovTest('two-sided','asymp')(left, right).1, 6), +roundBankers(kolmogorovSmirnovTest('less','asymp')(left, right).1, 6), +roundBankers(kolmogorovSmirnovTest('greater','asymp')(left, right).1, 6) +from kstest; + +DROP TABLE IF EXISTS kstest; diff --git a/parser/testdata/02706_show_columns/ast.json b/parser/testdata/02706_show_columns/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02706_show_columns/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02706_show_columns/metadata.json b/parser/testdata/02706_show_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02706_show_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02706_show_columns/query.sql b/parser/testdata/02706_show_columns/query.sql new file mode 100644 index 000000000..57d80a1c5 --- /dev/null +++ b/parser/testdata/02706_show_columns/query.sql @@ -0,0 +1,97 @@ +-- Tags: no-parallel +-- no-parallel: creates a custom database schema and expects to use it exclusively + +-- Create a test table and verify that the output of SHOW COLUMNS is sane. +-- The matching of actual/expected results relies on the fact that the output of SHOW COLUMNS is sorted. +DROP TABLE IF EXISTS tab; +CREATE TABLE tab +( + `uint64` UInt64, + `int32` Nullable(Int32) COMMENT 'example comment', + `str` String, + INDEX idx str TYPE set(1000) +) +ENGINE = MergeTree +PRIMARY KEY (uint64) +ORDER BY (uint64, str); + +SELECT '--- Aliases of SHOW COLUMNS'; +SHOW COLUMNS FROM tab; +SHOW FIELDS FROM tab; + +SELECT '--- EXTENDED'; +SHOW EXTENDED COLUMNS FROM tab; + +SELECT '--- FULL'; +SHOW FULL COLUMNS FROM tab; + +SELECT '--- LIKE'; +SHOW COLUMNS FROM tab LIKE '%int%'; + +SELECT '--- NOT LIKE'; +SHOW COLUMNS FROM tab NOT LIKE '%int%'; + +SELECT '--- ILIKE'; +SHOW COLUMNS FROM tab ILIKE '%INT%'; + +SELECT '--- NOT ILIKE'; +SHOW COLUMNS FROM tab NOT ILIKE '%INT%'; + +SELECT '--- WHERE'; +SHOW COLUMNS FROM tab WHERE field LIKE '%int%'; + +SELECT '--- LIMIT'; +SHOW COLUMNS FROM tab LIMIT 1; + +SELECT '--- Check with weird table names'; + +DROP TABLE IF EXISTS `$4@^7`; +CREATE TABLE `$4@^7` (c String) ENGINE = MergeTree ORDER BY c; +SHOW COLUMNS FROM `$4@^7`; +DROP TABLE `$4@^7`; + +DROP TABLE IF EXISTS NULL; +CREATE TABLE NULL (c String) ENGINE = MergeTree ORDER BY c; +SHOW COLUMNS FROM NULL; +DROP TABLE NULL; + +DROP TABLE IF EXISTS `tab.with.dots`; +CREATE TABLE `tab.with.dots` (c String) ENGINE = MergeTree ORDER BY c; +SHOW COLUMNS FROM `tab.with.dots`; +DROP TABLE `tab.with.dots`; + +DROP DATABASE IF EXISTS `'`; +CREATE DATABASE `'`; +CREATE TABLE `'`.`'` (c String) ENGINE = MergeTree ORDER BY c; +SHOW COLUMNS FROM `'` FROM `'`; +SHOW COLUMNS FROM `'`.`'`; -- abbreviated form +DROP TABLE `'`.`'`; +DROP DATABASE `'`; + +-- Create a table in a different database. Intentionally useing the same table/column names as above so +-- we notice if something is buggy in the implementation of SHOW COLUMNS. +DROP DATABASE IF EXISTS database_123456789abcde; +CREATE DATABASE database_123456789abcde; -- pseudo-random database name + +DROP TABLE IF EXISTS database_123456789abcde.tab; +CREATE TABLE database_123456789abcde.tab +( + `uint64` UInt64, + `int32` Int32, + `str` String +) +ENGINE = MergeTree +ORDER BY uint64; + +SELECT '--- Original table'; +SHOW COLUMNS FROM tab; + +SELECT '--- Equally named table in other database'; +SHOW COLUMNS FROM tab FROM database_123456789abcde; + +SELECT '--- Short form'; +SHOW COLUMNS FROM database_123456789abcde.tab; + +DROP DATABASE database_123456789abcde; + +DROP TABLE tab; diff --git a/parser/testdata/02707_analyzer_nested_lambdas_types/ast.json b/parser/testdata/02707_analyzer_nested_lambdas_types/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02707_analyzer_nested_lambdas_types/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02707_analyzer_nested_lambdas_types/metadata.json b/parser/testdata/02707_analyzer_nested_lambdas_types/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02707_analyzer_nested_lambdas_types/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02707_analyzer_nested_lambdas_types/query.sql b/parser/testdata/02707_analyzer_nested_lambdas_types/query.sql new file mode 100644 index 000000000..320e1111e --- /dev/null +++ b/parser/testdata/02707_analyzer_nested_lambdas_types/query.sql @@ -0,0 +1,24 @@ +SELECT + range(1), + arrayMap(x -> arrayMap(x -> x, range(x)), [1]) +SETTINGS enable_analyzer = 0; + +SELECT + range(1), + arrayMap(x -> arrayMap(x -> x, range(x)), [1]) +SETTINGS enable_analyzer = 1; + +SELECT + range(1), + arrayMap(x -> arrayMap(x -> 1, range(x)), [1]) +SETTINGS enable_analyzer = 0; + +SELECT + range(1), + arrayMap(x -> arrayMap(x -> 1, range(x)), [1]) +SETTINGS enable_analyzer = 1; + +SELECT + range(1), + arrayMap(x -> arrayMap(y -> 1, range(x)), [1]) +SETTINGS enable_analyzer = 1; diff --git a/parser/testdata/02707_keeper_map_delete_update_strict/ast.json b/parser/testdata/02707_keeper_map_delete_update_strict/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02707_keeper_map_delete_update_strict/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02707_keeper_map_delete_update_strict/metadata.json b/parser/testdata/02707_keeper_map_delete_update_strict/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02707_keeper_map_delete_update_strict/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02707_keeper_map_delete_update_strict/query.sql b/parser/testdata/02707_keeper_map_delete_update_strict/query.sql new file mode 100644 index 000000000..cc5990353 --- /dev/null +++ b/parser/testdata/02707_keeper_map_delete_update_strict/query.sql @@ -0,0 +1,44 @@ +-- Tags: no-ordinary-database, no-fasttest + +DROP TABLE IF EXISTS 02707_keepermap_delete_update; + +SET keeper_map_strict_mode = 1; + +CREATE TABLE 02707_keepermap_delete_update (key UInt64, value String, value2 UInt64) ENGINE=KeeperMap('/' || currentDatabase() || '/test02707_keepermap_delete_update') PRIMARY KEY(key); + +INSERT INTO 02707_keepermap_delete_update VALUES (1, 'Some string', 0), (2, 'Some other string', 0), (3, 'random', 0), (4, 'random2', 0); + +SELECT *, _version, _version FROM 02707_keepermap_delete_update ORDER BY key; +SELECT '-----------'; + +DELETE FROM 02707_keepermap_delete_update WHERE value LIKE 'Some%string'; + +SELECT *, _version FROM 02707_keepermap_delete_update ORDER BY key; +SELECT '-----------'; + +ALTER TABLE 02707_keepermap_delete_update DELETE WHERE key >= 4; + +SELECT *, _version FROM 02707_keepermap_delete_update ORDER BY key; +SELECT '-----------'; + +DELETE FROM 02707_keepermap_delete_update WHERE 1 = 1; +SELECT count() FROM 02707_keepermap_delete_update; +SELECT '-----------'; + +INSERT INTO 02707_keepermap_delete_update VALUES (1, 'String', 10), (2, 'String', 20), (3, 'String', 30), (4, 'String', 40); +SELECT *, _version FROM 02707_keepermap_delete_update ORDER BY key; +SELECT '-----------'; + +ALTER TABLE 02707_keepermap_delete_update UPDATE value = 'Another' WHERE key > 2; +SELECT *, _version FROM 02707_keepermap_delete_update ORDER BY key; +SELECT '-----------'; + +ALTER TABLE 02707_keepermap_delete_update UPDATE key = key * 10 WHERE 1 = 1; -- { serverError BAD_ARGUMENTS } +SELECT *, _version FROM 02707_keepermap_delete_update ORDER BY key; +SELECT '-----------'; + +ALTER TABLE 02707_keepermap_delete_update UPDATE value2 = value2 * 10 + 2 WHERE value2 < 100; +SELECT *, _version FROM 02707_keepermap_delete_update ORDER BY key; +SELECT '-----------'; + +DROP TABLE IF EXISTS 02707_keepermap_delete_update; diff --git a/parser/testdata/02707_skip_index_with_in/ast.json b/parser/testdata/02707_skip_index_with_in/ast.json new file mode 100644 index 000000000..692155952 --- /dev/null +++ b/parser/testdata/02707_skip_index_with_in/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_skip_index_in (children 1)" + }, + { + "explain": " Identifier t_skip_index_in" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001072486, + "rows_read": 2, + "bytes_read": 82 + } +} diff --git a/parser/testdata/02707_skip_index_with_in/metadata.json b/parser/testdata/02707_skip_index_with_in/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02707_skip_index_with_in/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02707_skip_index_with_in/query.sql b/parser/testdata/02707_skip_index_with_in/query.sql new file mode 100644 index 000000000..4767619ce --- /dev/null +++ b/parser/testdata/02707_skip_index_with_in/query.sql @@ -0,0 +1,20 @@ +DROP TABLE IF EXISTS t_skip_index_in; + +CREATE TABLE t_skip_index_in +( + a String, + b String, + c String, + INDEX idx_c c TYPE bloom_filter GRANULARITY 1 +) +ENGINE = MergeTree +ORDER BY (a, b); + +INSERT INTO t_skip_index_in VALUES ('a', 'b', 'c'); + +-- This query checks that set is not being built if indexes are not used, +-- because with EXPLAIN the set will be built only for analysis of indexes. +EXPLAIN SELECT count() FROM t_skip_index_in WHERE c IN (SELECT throwIf(1)) SETTINGS use_skip_indexes = 0 FORMAT Null; +EXPLAIN SELECT count() FROM t_skip_index_in WHERE c IN (SELECT throwIf(1)) SETTINGS use_skip_indexes = 1; -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO } + +DROP TABLE t_skip_index_in; diff --git a/parser/testdata/02708_dotProduct/ast.json b/parser/testdata/02708_dotProduct/ast.json new file mode 100644 index 000000000..340c77bde --- /dev/null +++ b/parser/testdata/02708_dotProduct/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '-- Negative tests'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001352616, + "rows_read": 5, + "bytes_read": 188 + } +} diff --git a/parser/testdata/02708_dotProduct/metadata.json b/parser/testdata/02708_dotProduct/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02708_dotProduct/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02708_dotProduct/query.sql b/parser/testdata/02708_dotProduct/query.sql new file mode 100644 index 000000000..05c66777d --- /dev/null +++ b/parser/testdata/02708_dotProduct/query.sql @@ -0,0 +1,66 @@ +SELECT '-- Negative tests'; + +SELECT arrayDotProduct([1, 2]); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT arrayDotProduct([1, 2], 'abc'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT arrayDotProduct('abc', [1, 2]); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT arrayDotProduct([1, 2], ['abc', 'def']); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT arrayDotProduct([1, 2], [3, 4, 5]); -- { serverError SIZES_OF_ARRAYS_DONT_MATCH } +SELECT dotProduct([1, 2], (3, 4, 5)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT '-- Tests'; +SELECT ' -- Array'; +SELECT [1, 2, 3]::Array(UInt8) AS x, [4, 5, 6]::Array(UInt8) AS y, dotProduct(x, y) AS res, toTypeName(res); +SELECT [1, 2, 3]::Array(UInt16) AS x, [4, 5, 6]::Array(UInt16) AS y, dotProduct(x, y) AS res, toTypeName(res); +SELECT [1, 2, 3]::Array(UInt32) AS x, [4, 5, 6]::Array(UInt32) AS y, dotProduct(x, y) AS res, toTypeName(res); +SELECT [1, 2, 3]::Array(UInt64) AS x, [4, 5, 6]::Array(UInt64) AS y, dotProduct(x, y) AS res, toTypeName(res); +SELECT [-1, -2, -3]::Array(Int8) AS x, [4, 5, 6]::Array(Int8) AS y, dotProduct(x, y) AS res, toTypeName(res); +SELECT [-1, -2, -3]::Array(Int16) AS x, [4, 5, 6]::Array(Int16) AS y, dotProduct(x, y) AS res, toTypeName(res); +SELECT [-1, -2, -3]::Array(Int32) AS x, [4, 5, 6]::Array(Int32) AS y, dotProduct(x, y) AS res, toTypeName(res); +SELECT [-1, -2, -3]::Array(Int64) AS x, [4, 5, 6]::Array(Int64) AS y, dotProduct(x, y) AS res, toTypeName(res); +SELECT [1, 2, 3]::Array(Float32) AS x, [4, 5, 6]::Array(Float32) AS y, dotProduct(x, y) AS res, toTypeName(res); +SELECT [1, 2, 3]::Array(Float64) AS x, [4, 5, 6]::Array(Float64) AS y, dotProduct(x, y) AS res, toTypeName(res); +-- empty arrays +SELECT []::Array(Float32) AS x, []::Array(Float32) AS y, dotProduct(x, y) AS res, toTypeName(res); +SELECT []::Array(UInt8) AS x, []::Array(UInt8) AS y, dotProduct(x, y) AS res, toTypeName(res); + +SELECT ' -- Tuple'; +SELECT (1::UInt8, 2::UInt8, 3::UInt8) AS x, (4::UInt8, 5::UInt8, 6::UInt8) AS y, dotProduct(x, y) AS res, toTypeName(res); +SELECT (1::UInt16, 2::UInt16, 3::UInt16) AS x, (4::UInt16, 5::UInt16, 6::UInt16) AS y, dotProduct(x, y) AS res, toTypeName(res); +SELECT (1::UInt32, 2::UInt32, 3::UInt32) AS x, (4::UInt32, 5::UInt32, 6::UInt32) AS y, dotProduct(x, y) AS res, toTypeName(res); +SELECT (1::UInt64, 2::UInt64, 3::UInt64) AS x, (4::UInt64, 5::UInt64, 6::UInt64) AS y, dotProduct(x, y) AS res, toTypeName(res); +SELECT (-1::Int8, -2::Int8, -3::Int8) AS x, (4::Int8, 5::Int8, 6::Int8) AS y, dotProduct(x, y) AS res, toTypeName(res); +SELECT (-1::Int16, -2::Int16, -3::Int16) AS x, (4::Int16, 5::Int16, 6::Int16) AS y, dotProduct(x, y) AS res, toTypeName(res); +SELECT (-1::Int32, -2::Int32, -3::Int32) AS x, (4::Int32, 5::Int32, 6::Int32) AS y, dotProduct(x, y) AS res, toTypeName(res); +SELECT (-1::Int64, -2::Int64, -3::Int64) AS x, (4::Int64, 5::Int64, 6::Int64) AS y, dotProduct(x, y) AS res, toTypeName(res); +SELECT (1::Float32, 2::Float32, 3::Float32) AS x, (4::Float32, 5::Float32, 6::Float32) AS y, dotProduct(x, y) AS res, toTypeName(res); +SELECT (1::Float64, 2::Float64, 3::Float64) AS x, (4::Float64, 5::Float64, 6::Float64) AS y, dotProduct(x, y) AS res, toTypeName(res); + +SELECT '-- Non-const argument'; +SELECT materialize([1::UInt8, 2::UInt8, 3::UInt8]) AS x, [4::UInt8, 5::UInt8, 6::UInt8] AS y, dotProduct(x, y) AS res, toTypeName(res); +SELECT materialize([]::Array(Float32)) AS x, []::Array(Float32) AS y, dotProduct(x, y) AS res, toTypeName(res); +SELECT materialize([]::Array(UInt8)) AS x, []::Array(UInt8) AS y, dotProduct(x, y) AS res, toTypeName(res); + +SELECT ' -- Array with mixed element arguments types (result type is the supertype)'; +SELECT [1::UInt16, 2::UInt8, 3::Float32] AS x, [4::Int16, 5::Float32, 6::UInt8] AS y, dotProduct(x, y) AS res, toTypeName(res); + +SELECT ' -- Tuple with mixed element arguments types'; +SELECT (1::UInt16, 2::UInt8, 3::Float32) AS x, (4::Int16, 5::Float32, 6::UInt8) AS y, dotProduct(x, y) AS res, toTypeName(res); + +SELECT '-- Aliases'; +SELECT scalarProduct([1, 2, 3], [4, 5, 6]); +SELECT scalarProduct((1, 2, 3), (4, 5, 6)); +SELECT arrayDotProduct([1, 2, 3], [4, 5, 6]); -- actually no alias but the internal function for arrays + +SELECT '-- Tests that trigger special paths'; +DROP TABLE IF EXISTS tab; +CREATE TABLE tab(id UInt64, vec Array(Float32)) ENGINE = MergeTree ORDER BY id; +INSERT INTO tab VALUES (0, [0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0, 3.0, 0.0, 1.0, 2.0]) (1, [5.0, 1.0, 2.0, 3.0, 5.0, 1.0, 2.0, 3.0, 5.0, 1.0, 2.0, 3.0, 5.0, 1.0, 2.0, 3.0, 5.0, 1.0, 2.0]); +SELECT ' -- non-const / non-const'; +SELECT id, arrayDotProduct(vec, vec) FROM tab ORDER BY id; +SELECT id, arrayDotProduct(vec::Array(Float64), vec::Array(Float64)) FROM tab ORDER BY id; +SELECT id, arrayDotProduct(vec::Array(UInt32), vec::Array(UInt32)) FROM tab ORDER BY id; +SELECT ' -- const / non-const'; +SELECT id, arrayDotProduct([5.0, 2.0, 2.0, 3.0, 5.0, 1.0, 2.0, 3.0, 5.0, 1.0, 2.0, 3.0, 5.0, 1.0, 2.0, 3.0, 5.0, 1.0, 2.0]::Array(Float32), vec) FROM tab ORDER BY id; +SELECT id, arrayDotProduct([5.0, 2.0, 2.0, 3.0, 5.0, 1.0, 2.0, 3.0, 5.0, 1.0, 2.0, 3.0, 5.0, 1.0, 2.0, 3.0, 5.0, 1.0, 2.0]::Array(Float64), vec) FROM tab ORDER BY id; +SELECT id, arrayDotProduct([5, 2, 2, 3, 5, 1, 2, 3, 5, 1, 2, 3, 5, 1, 2, 3, 5, 1, 2]::Array(UInt32), vec) FROM tab ORDER BY id; +DROP TABLE tab; diff --git a/parser/testdata/02708_parallel_replicas_not_found_column/ast.json b/parser/testdata/02708_parallel_replicas_not_found_column/ast.json new file mode 100644 index 000000000..35148b619 --- /dev/null +++ b/parser/testdata/02708_parallel_replicas_not_found_column/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery t_02708 (children 3)" + }, + { + "explain": " Identifier t_02708" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration x (children 1)" + }, + { + "explain": " DataType DateTime" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001140318, + "rows_read": 10, + "bytes_read": 346 + } +} diff --git a/parser/testdata/02708_parallel_replicas_not_found_column/metadata.json b/parser/testdata/02708_parallel_replicas_not_found_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02708_parallel_replicas_not_found_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02708_parallel_replicas_not_found_column/query.sql b/parser/testdata/02708_parallel_replicas_not_found_column/query.sql new file mode 100644 index 000000000..4179b0491 --- /dev/null +++ b/parser/testdata/02708_parallel_replicas_not_found_column/query.sql @@ -0,0 +1,4 @@ +CREATE TABLE IF NOT EXISTS t_02708(x DateTime) ENGINE = MergeTree ORDER BY tuple(); +SET send_logs_level='error'; +SELECT count() FROM t_02708 SETTINGS enable_parallel_replicas=1; +DROP TABLE t_02708; diff --git a/parser/testdata/02709_generate_random_valid_decimals_and_bools/ast.json b/parser/testdata/02709_generate_random_valid_decimals_and_bools/ast.json new file mode 100644 index 000000000..fa7bd2564 --- /dev/null +++ b/parser/testdata/02709_generate_random_valid_decimals_and_bools/ast.json @@ -0,0 +1,76 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal 'Decimal(6, 3)'" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function generateRandom (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'x Decimal(6, 3)'" + }, + { + "explain": " Literal UInt64_42" + }, + { + "explain": " Literal UInt64_5" + } + ], + + "rows": 18, + + "statistics": + { + "elapsed": 0.001289144, + "rows_read": 18, + "bytes_read": 706 + } +} diff --git a/parser/testdata/02709_generate_random_valid_decimals_and_bools/metadata.json b/parser/testdata/02709_generate_random_valid_decimals_and_bools/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02709_generate_random_valid_decimals_and_bools/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02709_generate_random_valid_decimals_and_bools/query.sql b/parser/testdata/02709_generate_random_valid_decimals_and_bools/query.sql new file mode 100644 index 000000000..c290ce483 --- /dev/null +++ b/parser/testdata/02709_generate_random_valid_decimals_and_bools/query.sql @@ -0,0 +1,5 @@ +select toString(x)::Decimal(6, 3) from generateRandom('x Decimal(6, 3)', 42) limit 5; +select toString(x)::Decimal(15, 9) from generateRandom('x Decimal(15, 9)', 42) limit 5; +select toString(x)::Decimal(30, 20) from generateRandom('x Decimal(30, 20)', 42) limit 5; +select toString(x)::Decimal(60, 40) from generateRandom('x Decimal(60, 40)', 42) limit 5; +select reinterpret(x, 'UInt8') from generateRandom('x Bool', 42) limit 5; diff --git a/parser/testdata/02709_storage_memory_compressed/ast.json b/parser/testdata/02709_storage_memory_compressed/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02709_storage_memory_compressed/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02709_storage_memory_compressed/metadata.json b/parser/testdata/02709_storage_memory_compressed/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02709_storage_memory_compressed/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02709_storage_memory_compressed/query.sql b/parser/testdata/02709_storage_memory_compressed/query.sql new file mode 100644 index 000000000..ab7e267e5 --- /dev/null +++ b/parser/testdata/02709_storage_memory_compressed/query.sql @@ -0,0 +1,12 @@ +-- Tags: memory-engine +DROP TABLE IF EXISTS t_memory_compressed; + +CREATE TABLE t_memory_compressed (id UInt64, s String, arr Array(LowCardinality(String)), m Map(String, String)) +ENGINE = Memory SETTINGS compress = 1; + +INSERT INTO t_memory_compressed VALUES (1, 'foo', range(5), map('k1', 'v1')); +INSERT INTO t_memory_compressed VALUES (2, 'bar', range(5), map('k2', 'v2')); + +SELECT * FROM t_memory_compressed ORDER BY id; + +DROP TABLE t_memory_compressed; diff --git a/parser/testdata/02710_aggregation_nested_map_ip_uuid/ast.json b/parser/testdata/02710_aggregation_nested_map_ip_uuid/ast.json new file mode 100644 index 000000000..d2bdd039e --- /dev/null +++ b/parser/testdata/02710_aggregation_nested_map_ip_uuid/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery summing_table (children 1)" + }, + { + "explain": " Identifier summing_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001335259, + "rows_read": 2, + "bytes_read": 78 + } +} diff --git a/parser/testdata/02710_aggregation_nested_map_ip_uuid/metadata.json b/parser/testdata/02710_aggregation_nested_map_ip_uuid/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02710_aggregation_nested_map_ip_uuid/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02710_aggregation_nested_map_ip_uuid/query.sql b/parser/testdata/02710_aggregation_nested_map_ip_uuid/query.sql new file mode 100644 index 000000000..456e8723d --- /dev/null +++ b/parser/testdata/02710_aggregation_nested_map_ip_uuid/query.sql @@ -0,0 +1,32 @@ +DROP TABLE IF EXISTS summing_table; +CREATE TABLE summing_table +( + id UInt32, + `ip4Map.value` Array(IPv4), `ip4Map.total` Array(UInt32), + `ip6Map.value` Array(IPv6), `ip6Map.total` Array(UInt32), + `uuidMap.value` Array(UUID), `uuidMap.total` Array(UInt32) +) ENGINE = SummingMergeTree ORDER BY id; + +INSERT INTO summing_table(id, ip4Map.value, ip4Map.total, ip6Map.value, ip6Map.total, uuidMap.value, uuidMap.total) + values (1, ['1.2.3.4'], [1], ['::1'], [2], ['00130949-0cd4-4c3d-84c4-cc421eff480f'], [3]); +INSERT INTO summing_table(id, ip4Map.value, ip4Map.total, ip6Map.value, ip6Map.total, uuidMap.value, uuidMap.total) + values(1, ['1.2.3.4'], [4], ['::1'], [5], ['00130949-0cd4-4c3d-84c4-cc421eff480f'], [6]); +OPTIMIZE TABLE summing_table FINAL; +SELECT * FROM summing_table ORDER BY id, ip4Map.value, ip4Map.total, ip6Map.value, ip6Map.total, uuidMap.value, uuidMap.total; + +INSERT INTO summing_table(id, ip4Map.value, ip4Map.total, ip6Map.value, ip6Map.total, uuidMap.value, uuidMap.total) + values(2, ['1.2.3.4'], [7], ['::1'], [8], ['00130949-0cd4-4c3d-84c4-cc421eff480f'], [9]); +INSERT INTO summing_table(id, ip4Map.value, ip4Map.total, ip6Map.value, ip6Map.total, uuidMap.value, uuidMap.total) + values(1, ['1.2.3.4'], [10], ['::1'], [11], ['00130949-0cd4-4c3d-84c4-cc421eff480f'], [12]); +INSERT INTO summing_table(id, ip4Map.value, ip4Map.total, ip6Map.value, ip6Map.total, uuidMap.value, uuidMap.total) + values(1, ['2.3.4.5'], [13], ['::2'], [14], ['00000000-0cd4-4c3d-84c4-cc421eff480f'], [15]); +INSERT INTO summing_table(id, ip4Map.value, ip4Map.total, ip6Map.value, ip6Map.total, uuidMap.value, uuidMap.total) + values(2, ['2.3.4.5'], [16], ['::1'], [17], ['00130949-0cd4-4c3d-84c4-cc421eff480f'], [18]); +INSERT INTO summing_table(id, ip4Map.value, ip4Map.total, ip6Map.value, ip6Map.total, uuidMap.value, uuidMap.total) + values(2, ['1.2.3.4'], [19], ['::2'], [20], ['00130949-0cd4-4c3d-84c4-cc421eff480f'], [21]); +INSERT INTO summing_table(id, ip4Map.value, ip4Map.total, ip6Map.value, ip6Map.total, uuidMap.value, uuidMap.total) + values(1, ['1.2.3.4'], [22], ['::1'], [23], ['00000000-0cd4-4c3d-84c4-cc421eff480f'], [24]); +OPTIMIZE TABLE summing_table FINAL; +SELECT * FROM summing_table ORDER BY id, ip4Map.value, ip4Map.total, ip6Map.value, ip6Map.total, uuidMap.value, uuidMap.total; + +DROP TABLE summing_table; diff --git a/parser/testdata/02710_allow_suspicious_indices/ast.json b/parser/testdata/02710_allow_suspicious_indices/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02710_allow_suspicious_indices/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02710_allow_suspicious_indices/metadata.json b/parser/testdata/02710_allow_suspicious_indices/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02710_allow_suspicious_indices/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02710_allow_suspicious_indices/query.sql b/parser/testdata/02710_allow_suspicious_indices/query.sql new file mode 100644 index 000000000..78d52f7bc --- /dev/null +++ b/parser/testdata/02710_allow_suspicious_indices/query.sql @@ -0,0 +1,22 @@ +-- Check CREATE TABLE + +DROP TABLE IF EXISTS tbl; +CREATE TABLE tbl (id UInt32) ENGINE = MergeTree() ORDER BY (id + 1, id + 1); -- { serverError BAD_ARGUMENTS } +CREATE TABLE tbl (id UInt32) ENGINE = MergeTree() ORDER BY (id + 1, id + 1) SETTINGS allow_suspicious_indices = 1; + +DROP TABLE IF EXISTS tbl; +CREATE TABLE tbl (id UInt32, INDEX idx (id + 1, id + 1) TYPE minmax) ENGINE = MergeTree() ORDER BY id; -- { serverError BAD_ARGUMENTS } +CREATE TABLE tbl (id UInt32, INDEX idx (id + 1, id + 1) TYPE minmax) ENGINE = MergeTree() ORDER BY id SETTINGS allow_suspicious_indices = 1; + +-- Check ALTER TABLE + +DROP TABLE IF EXISTS tbl; +CREATE TABLE tbl (id1 UInt32) ENGINE = MergeTree() ORDER BY id1; +ALTER TABLE tbl ADD COLUMN `id2` UInt32, MODIFY ORDER BY (id1, id2, id2); -- { serverError BAD_ARGUMENTS } +ALTER TABLE tbl ADD COLUMN `id2` UInt32, MODIFY ORDER BY (id1, id2, id1); -- { serverError BAD_ARGUMENTS } +ALTER TABLE tbl ADD COLUMN `id2` UInt32, MODIFY ORDER BY (id1, id2, id2) SETTINGS allow_suspicious_indices = 1; + +DROP TABLE IF EXISTS tbl; +CREATE TABLE tbl (id UInt32) ENGINE = MergeTree() ORDER BY id; +ALTER TABLE tbl ADD INDEX idx (id+1, id, id+1) TYPE minmax; -- { serverError BAD_ARGUMENTS } +ALTER TABLE tbl ADD INDEX idx (id+1, id, id+1) TYPE minmax SETTINGS allow_suspicious_indices = 1; diff --git a/parser/testdata/02710_date_diff_aliases/ast.json b/parser/testdata/02710_date_diff_aliases/ast.json new file mode 100644 index 000000000..7521828fb --- /dev/null +++ b/parser/testdata/02710_date_diff_aliases/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier name" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.functions" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001410531, + "rows_read": 9, + "bytes_read": 357 + } +} diff --git a/parser/testdata/02710_date_diff_aliases/metadata.json b/parser/testdata/02710_date_diff_aliases/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02710_date_diff_aliases/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02710_date_diff_aliases/query.sql b/parser/testdata/02710_date_diff_aliases/query.sql new file mode 100644 index 000000000..c6b31c44f --- /dev/null +++ b/parser/testdata/02710_date_diff_aliases/query.sql @@ -0,0 +1,7 @@ +SELECT name FROM system.functions +WHERE name = 'date_diff' + OR name = 'DATE_DIFF' + OR name = 'timestampDiff' + OR name = 'timestamp_diff' + OR name = 'TIMESTAMP_DIFF' +ORDER BY name; diff --git a/parser/testdata/02710_default_replicated_parameters/ast.json b/parser/testdata/02710_default_replicated_parameters/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02710_default_replicated_parameters/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02710_default_replicated_parameters/metadata.json b/parser/testdata/02710_default_replicated_parameters/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02710_default_replicated_parameters/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02710_default_replicated_parameters/query.sql b/parser/testdata/02710_default_replicated_parameters/query.sql new file mode 100644 index 000000000..faeea6cdf --- /dev/null +++ b/parser/testdata/02710_default_replicated_parameters/query.sql @@ -0,0 +1,11 @@ +-- Tags: no-parallel + +DROP DATABASE IF EXISTS replicated_database_params; + +CREATE DATABASE replicated_database_params ENGINE = Replicated('some/path/' || currentDatabase() || '/replicated_database_params'); +SHOW CREATE DATABASE replicated_database_params; +DROP DATABASE replicated_database_params; + +CREATE DATABASE replicated_database_params ENGINE = Replicated('some/path/' || currentDatabase() || '/replicated_database_params', 'shard_1'); +SHOW CREATE DATABASE replicated_database_params; +DROP DATABASE replicated_database_params; diff --git a/parser/testdata/02710_show_table/ast.json b/parser/testdata/02710_show_table/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02710_show_table/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02710_show_table/metadata.json b/parser/testdata/02710_show_table/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02710_show_table/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02710_show_table/query.sql b/parser/testdata/02710_show_table/query.sql new file mode 100644 index 000000000..52682ce83 --- /dev/null +++ b/parser/testdata/02710_show_table/query.sql @@ -0,0 +1,16 @@ +-- Tags: no-parallel +DROP TABLE IF EXISTS t_2710_show_table; + +CREATE TABLE t_2710_show_table(n1 UInt32, s String) engine=Log; +SHOW TABLE t_2710_show_table; +SHOW CREATE TABLE t_2710_show_table; +SHOW CREATE t_2710_show_table; + +DROP TABLE t_2710_show_table; + +DROP DATABASE IF EXISTS t_2710_db; +CREATE DATABASE t_2710_db engine=Atomic; +SHOW DATABASE t_2710_db; +SHOW CREATE DATABASE t_2710_db; + +DROP DATABASE t_2710_db; diff --git a/parser/testdata/02710_topk_with_empty_array/ast.json b/parser/testdata/02710_topk_with_empty_array/ast.json new file mode 100644 index 000000000..af5a9420f --- /dev/null +++ b/parser/testdata/02710_topk_with_empty_array/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function topK (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function emptyArrayInt16 (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001384968, + "rows_read": 8, + "bytes_read": 307 + } +} diff --git a/parser/testdata/02710_topk_with_empty_array/metadata.json b/parser/testdata/02710_topk_with_empty_array/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02710_topk_with_empty_array/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02710_topk_with_empty_array/query.sql b/parser/testdata/02710_topk_with_empty_array/query.sql new file mode 100644 index 000000000..7de066e9a --- /dev/null +++ b/parser/testdata/02710_topk_with_empty_array/query.sql @@ -0,0 +1 @@ +SELECT topK(emptyArrayInt16()); diff --git a/parser/testdata/02711_server_uuid_macro/ast.json b/parser/testdata/02711_server_uuid_macro/ast.json new file mode 100644 index 000000000..01263450b --- /dev/null +++ b/parser/testdata/02711_server_uuid_macro/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001241357, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/02711_server_uuid_macro/metadata.json b/parser/testdata/02711_server_uuid_macro/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02711_server_uuid_macro/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02711_server_uuid_macro/query.sql b/parser/testdata/02711_server_uuid_macro/query.sql new file mode 100644 index 000000000..ebaab28dc --- /dev/null +++ b/parser/testdata/02711_server_uuid_macro/query.sql @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS test; + +-- You can create a table with the {server_uuid} substituted. +CREATE TABLE test (x UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test/{shard}', 'replica-{server_uuid}') ORDER BY x; + +-- The server UUID is correctly substituted. +SELECT engine_full LIKE ('%replica-{server_uuid}%') FROM system.tables WHERE database = currentDatabase() AND name = 'test'; +SELECT count() > 0 FROM system.zookeeper WHERE path = '/clickhouse/tables/' || currentDatabase() || '/test/s1/replicas/' AND name LIKE 'replica-' || serverUUID()::String || '%'; + +-- An attempt to create a second table with the same UUID results in error. +CREATE TABLE test2 (x UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test/{shard}', 'replica-{server_uuid}') ORDER BY x; -- { serverError REPLICA_ALREADY_EXISTS } + +-- The macro {server_uuid} is special, not a configuration-type macro. It's normal that it is inaccessible with the getMacro function. +SELECT getMacro('server_uuid'); -- { serverError NO_ELEMENTS_IN_CONFIG } + +DROP TABLE test SYNC; diff --git a/parser/testdata/02711_soundex_function/ast.json b/parser/testdata/02711_soundex_function/ast.json new file mode 100644 index 000000000..1467079f5 --- /dev/null +++ b/parser/testdata/02711_soundex_function/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function soundex (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal ''" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001020521, + "rows_read": 7, + "bytes_read": 254 + } +} diff --git a/parser/testdata/02711_soundex_function/metadata.json b/parser/testdata/02711_soundex_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02711_soundex_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02711_soundex_function/query.sql b/parser/testdata/02711_soundex_function/query.sql new file mode 100644 index 000000000..d2fe374b3 --- /dev/null +++ b/parser/testdata/02711_soundex_function/query.sql @@ -0,0 +1,28 @@ +SELECT soundex(''); +SELECT soundex('12345'); +SELECT soundex('341Jons54326ton'); +SELECT soundex('A2222222'); +SELECT soundex('Fairdale'); +SELECT soundex('Faredale'); +SELECT soundex('Jon1s2o3n'); +SELECT soundex('Jonson'); +SELECT soundex('Jonston'); +SELECT soundex('M\acDonald22321'); +SELECT soundex('MacDonald'); +SELECT soundex('S3344mith0000'); +SELECT soundex('Smith'); + +SELECT '---'; + +-- same input strings but in a table +DROP TABLE IF EXISTS tab; +CREATE TABLE tab (col String) Engine=MergeTree ORDER BY col; +INSERT INTO tab VALUES ('') ('12345') ('341Jons54326ton') ('A2222222') ('Fairdale') ('Faredale') ('Jon1s2o3n') ('Jonson') ('Jonston') ('M\acDonald22321') ('MacDonald') ('S3344mith0000') ('Smith'); + +SELECT soundex(col) FROM tab; + +DROP TABLE tab; + +-- negative tests +SELECT soundex(toFixedString('Smith', 5)); -- { serverError ILLEGAL_COLUMN } +SELECT soundex(5); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } diff --git a/parser/testdata/02711_trim_aliases/ast.json b/parser/testdata/02711_trim_aliases/ast.json new file mode 100644 index 000000000..e61cc27ce --- /dev/null +++ b/parser/testdata/02711_trim_aliases/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier name" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.functions" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.00118295, + "rows_read": 9, + "bytes_read": 357 + } +} diff --git a/parser/testdata/02711_trim_aliases/metadata.json b/parser/testdata/02711_trim_aliases/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02711_trim_aliases/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02711_trim_aliases/query.sql b/parser/testdata/02711_trim_aliases/query.sql new file mode 100644 index 000000000..d0d739805 --- /dev/null +++ b/parser/testdata/02711_trim_aliases/query.sql @@ -0,0 +1,5 @@ +SELECT name FROM system.functions +WHERE name = 'ltrim' + OR name = 'rtrim' + OR name = 'trim' +ORDER BY name; diff --git a/parser/testdata/02713_array_low_cardinality_string/ast.json b/parser/testdata/02713_array_low_cardinality_string/ast.json new file mode 100644 index 000000000..c5b9d14e7 --- /dev/null +++ b/parser/testdata/02713_array_low_cardinality_string/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tab (children 1)" + }, + { + "explain": " Identifier tab" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001262037, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/02713_array_low_cardinality_string/metadata.json b/parser/testdata/02713_array_low_cardinality_string/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02713_array_low_cardinality_string/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02713_array_low_cardinality_string/query.sql b/parser/testdata/02713_array_low_cardinality_string/query.sql new file mode 100644 index 000000000..964e82da9 --- /dev/null +++ b/parser/testdata/02713_array_low_cardinality_string/query.sql @@ -0,0 +1,23 @@ +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab +( + foo Array(LowCardinality(String)), + INDEX idx foo TYPE bloom_filter +) +ENGINE = MergeTree +PRIMARY KEY tuple(); + +INSERT INTO tab VALUES (['a', 'b']); + +SELECT '---'; + +SELECT table, name, type +FROM system.data_skipping_indices +WHERE database = currentDatabase() AND table = 'tab'; + +SELECT '---'; + +EXPLAIN indexes = 1, description = 0 SELECT * FROM tab WHERE has(foo, 'b'); + +DROP TABLE tab; diff --git a/parser/testdata/02713_ip4_uint_compare/ast.json b/parser/testdata/02713_ip4_uint_compare/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02713_ip4_uint_compare/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02713_ip4_uint_compare/metadata.json b/parser/testdata/02713_ip4_uint_compare/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02713_ip4_uint_compare/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02713_ip4_uint_compare/query.sql b/parser/testdata/02713_ip4_uint_compare/query.sql new file mode 100644 index 000000000..ec8d65843 --- /dev/null +++ b/parser/testdata/02713_ip4_uint_compare/query.sql @@ -0,0 +1,9 @@ +WITH toIPv4('127.0.0.10') AS ip +SELECT + ip = 2130706442::UInt32, + ip = 0::UInt32, + ip < 2130706443::UInt32, + ip > 2130706441::UInt32, + ip <= 2130706442::UInt32, + ip >= 2130706442::UInt32, + ip != 2130706442::UInt32; diff --git a/parser/testdata/02713_sequence_match_serialization_fix/ast.json b/parser/testdata/02713_sequence_match_serialization_fix/ast.json new file mode 100644 index 000000000..2f4679f2a --- /dev/null +++ b/parser/testdata/02713_sequence_match_serialization_fix/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery 02713_seqt (children 1)" + }, + { + "explain": " Identifier 02713_seqt" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001148961, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/02713_sequence_match_serialization_fix/metadata.json b/parser/testdata/02713_sequence_match_serialization_fix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02713_sequence_match_serialization_fix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02713_sequence_match_serialization_fix/query.sql b/parser/testdata/02713_sequence_match_serialization_fix/query.sql new file mode 100644 index 000000000..3521cb847 --- /dev/null +++ b/parser/testdata/02713_sequence_match_serialization_fix/query.sql @@ -0,0 +1,36 @@ +DROP TABLE IF EXISTS 02713_seqt; +DROP TABLE IF EXISTS 02713_seqt_distr; + +SELECT + 'serialized state is not used', sequenceMatch('(?1)(?2)')(time, number_ = 1, number_ = 0) AS seq +FROM +( + SELECT + number AS time, + number % 2 AS number_ + FROM numbers_mt(100) +); + + +CREATE TABLE 02713_seqt +ENGINE = MergeTree +ORDER BY n AS +SELECT + sequenceMatchState('(?1)(?2)')(time, number_ = 1, number_ = 0) AS seq, + 1 AS n +FROM +( + SELECT + number AS time, + number % 2 AS number_ + FROM numbers_mt(100) +); + + +SELECT 'serialized state is used', sequenceMatchMerge('(?1)(?2)')(seq) AS seq +FROM 02713_seqt; + + +CREATE TABLE 02713_seqt_distr ( seq AggregateFunction(sequenceMatch('(?1)(?2)'), UInt64, UInt8, UInt8) , n UInt8) ENGINE = Distributed(test_shard_localhost, currentDatabase(), '02713_seqt'); + +SELECT 'via Distributed', sequenceMatchMerge('(?1)(?2)')(seq) AS seq FROM 02713_seqt_distr; diff --git a/parser/testdata/02714_date_date32_in/ast.json b/parser/testdata/02714_date_date32_in/ast.json new file mode 100644 index 000000000..9771ec8a7 --- /dev/null +++ b/parser/testdata/02714_date_date32_in/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function in (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDate32 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '2020-01-01'" + }, + { + "explain": " Function toDate (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '2020-01-01'" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.000992776, + "rows_read": 12, + "bytes_read": 467 + } +} diff --git a/parser/testdata/02714_date_date32_in/metadata.json b/parser/testdata/02714_date_date32_in/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02714_date_date32_in/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02714_date_date32_in/query.sql b/parser/testdata/02714_date_date32_in/query.sql new file mode 100644 index 000000000..69a087eff --- /dev/null +++ b/parser/testdata/02714_date_date32_in/query.sql @@ -0,0 +1,4 @@ +select toDate32('2020-01-01') in (toDate('2020-01-01')); +select toDate('2020-01-01') in (toDate32('2020-01-01')); +select toDate('2020-01-01') in 1::Int64; +select toDate32('2020-01-01') in 1::UInt64; diff --git a/parser/testdata/02714_read_bytes_aggregateFunction/ast.json b/parser/testdata/02714_read_bytes_aggregateFunction/ast.json new file mode 100644 index 000000000..2b5f55d79 --- /dev/null +++ b/parser/testdata/02714_read_bytes_aggregateFunction/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001078664, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02714_read_bytes_aggregateFunction/metadata.json b/parser/testdata/02714_read_bytes_aggregateFunction/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02714_read_bytes_aggregateFunction/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02714_read_bytes_aggregateFunction/query.sql b/parser/testdata/02714_read_bytes_aggregateFunction/query.sql new file mode 100644 index 000000000..cabdb69d3 --- /dev/null +++ b/parser/testdata/02714_read_bytes_aggregateFunction/query.sql @@ -0,0 +1,61 @@ +SET merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.0; + +CREATE TABLE test (id UInt64, `amax` AggregateFunction(argMax, String, DateTime)) +ENGINE=MergeTree() +ORDER BY id +SETTINGS ratio_of_defaults_for_sparse_serialization=1 -- Sparse columns will take more bytes for a single row +AS + SELECT number, argMaxState(number::String, '2023-04-12 16:23:01'::DateTime) + FROM numbers(1) + GROUP BY number; + +SELECT sum(id) FROM test FORMAT Null; +SELECT argMaxMerge(amax) FROM test FORMAT Null; + +INSERT INTO test + SELECT number, argMaxState(number::String, '2023-04-12 16:23:01'::DateTime) + FROM numbers(9) + GROUP BY number; + +SELECT sum(id) FROM test FORMAT Null; +SELECT argMaxMerge(amax) FROM test FORMAT Null; + +INSERT INTO test +SELECT number, argMaxState(number::String, '2023-04-12 16:23:01'::DateTime) +FROM numbers(990) +GROUP BY number; + +SELECT sum(id) FROM test FORMAT Null; +SELECT argMaxMerge(amax) FROM test FORMAT Null; + +SYSTEM FLUSH LOGS query_log; + +SELECT 'UInt64', + read_rows, + read_bytes +FROM system.query_log +WHERE + current_database = currentDatabase() AND + query = 'SELECT sum(id) FROM test FORMAT Null;' AND + type = 2 AND event_date >= yesterday() +ORDER BY event_time_microseconds; + +-- Size of ColumnAggregateFunction: Number of pointers * pointer size + arena size +-- 1 * 8 + AggregateFunction(argMax, String, DateTime) +-- +-- Size of AggregateFunction(argMax, String, DateTime): +-- 1 Base class + 1 specific/value class: +-- Base class: MAX(sizeOf(SingleValueDataFixed<T>), sizeOf(SingleValueDataString), sizeOf(SingleValueDataGeneric)) = 64 +-- Specific class: SingleValueDataFixed(DateTime) = 4 + 1. With padding = 8 +-- Total: 8 + 64 + 8 = 80 +-- +-- ColumnAggregateFunction total: 8 + 2 * 64 = 136 +SELECT 'AggregateFunction(argMax, String, DateTime)', + read_rows, + read_bytes +FROM system.query_log +WHERE + current_database = currentDatabase() AND + query = 'SELECT argMaxMerge(amax) FROM test FORMAT Null;' AND + type = 2 AND event_date >= yesterday() +ORDER BY event_time_microseconds; diff --git a/parser/testdata/02715_bit_operations_float/ast.json b/parser/testdata/02715_bit_operations_float/ast.json new file mode 100644 index 000000000..2a4a456a3 --- /dev/null +++ b/parser/testdata/02715_bit_operations_float/ast.json @@ -0,0 +1,106 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Function notEquals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function bitNot (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Float64_-inf" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Function notEquals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function bitNot (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Float64_inf" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Function notEquals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function bitNot (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Float64_3.40282e38" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Function notEquals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function bitNot (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Float64_nan" + }, + { + "explain": " Literal UInt64_0" + } + ], + + "rows": 28, + + "statistics": + { + "elapsed": 0.001207878, + "rows_read": 28, + "bytes_read": 1085 + } +} diff --git a/parser/testdata/02715_bit_operations_float/metadata.json b/parser/testdata/02715_bit_operations_float/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02715_bit_operations_float/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02715_bit_operations_float/query.sql b/parser/testdata/02715_bit_operations_float/query.sql new file mode 100644 index 000000000..6dc0a14e9 --- /dev/null +++ b/parser/testdata/02715_bit_operations_float/query.sql @@ -0,0 +1,8 @@ +SELECT bitNot(-inf) != 0, bitNot(inf) != 0, bitNot(3.40282e+38) != 0, bitNot(nan) != 0; +SELECT bitCount(-inf), bitCount(inf), bitCount(3.40282e+38), bitCount(nan); + +SELECT bitAnd(1.0, 1.0); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT bitOr(1.0, 1.0); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT bitRotateLeft(1.0, 1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT bitShiftLeft(1.0, 1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT bitTest(1.0, 1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } diff --git a/parser/testdata/02715_or_null/ast.json b/parser/testdata/02715_or_null/ast.json new file mode 100644 index 000000000..4d408e055 --- /dev/null +++ b/parser/testdata/02715_or_null/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function argMaxOrNull (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier id" + }, + { + "explain": " Identifier timestamp" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001113822, + "rows_read": 8, + "bytes_read": 296 + } +} diff --git a/parser/testdata/02715_or_null/metadata.json b/parser/testdata/02715_or_null/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02715_or_null/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02715_or_null/query.sql b/parser/testdata/02715_or_null/query.sql new file mode 100644 index 000000000..f020dd2c7 --- /dev/null +++ b/parser/testdata/02715_or_null/query.sql @@ -0,0 +1,33 @@ +SELECT argMaxOrNull(id, timestamp) +FROM +( + SELECT + CAST(NULL, 'Nullable(UInt32)') AS id, + 2 AS timestamp +); + +SELECT + argMax(id, timestamp), + argMaxOrNull(id, timestamp) +FROM +( + SELECT + CAST(NULL, 'Nullable(UInt32)') AS id, + 2 AS timestamp + UNION ALL + SELECT + 1 AS id, + 1 AS timestamp +); + +SELECT argMaxIfOrNull(id, timestamp, id IS NOT NULL) +FROM +( + SELECT + CAST(NULL, 'Nullable(UInt32)') AS id, + 2 AS timestamp + UNION ALL + SELECT + 1 AS id, + 1 AS timestamp +); diff --git a/parser/testdata/02716_create_direct_dict_with_lifetime_throws/ast.json b/parser/testdata/02716_create_direct_dict_with_lifetime_throws/ast.json new file mode 100644 index 000000000..d3ce4ea36 --- /dev/null +++ b/parser/testdata/02716_create_direct_dict_with_lifetime_throws/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery dict_source (children 3)" + }, + { + "explain": " Identifier dict_source" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration key (children 1)" + }, + { + "explain": " DataType UInt64" + }, + { + "explain": " ColumnDeclaration value (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Identifier key" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001354779, + "rows_read": 11, + "bytes_read": 390 + } +} diff --git a/parser/testdata/02716_create_direct_dict_with_lifetime_throws/metadata.json b/parser/testdata/02716_create_direct_dict_with_lifetime_throws/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02716_create_direct_dict_with_lifetime_throws/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02716_create_direct_dict_with_lifetime_throws/query.sql b/parser/testdata/02716_create_direct_dict_with_lifetime_throws/query.sql new file mode 100644 index 000000000..763f74fb3 --- /dev/null +++ b/parser/testdata/02716_create_direct_dict_with_lifetime_throws/query.sql @@ -0,0 +1,3 @@ +CREATE TABLE IF NOT EXISTS dict_source (key UInt64, value String) ENGINE=MergeTree ORDER BY key; + +CREATE DICTIONARY dict(`key` UInt64,`value` String) PRIMARY KEY key SOURCE(CLICKHOUSE(table 'dict_source')) LAYOUT(DIRECT()) LIFETIME(0); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/02716_drop_if_empty/ast.json b/parser/testdata/02716_drop_if_empty/ast.json new file mode 100644 index 000000000..4c94337b4 --- /dev/null +++ b/parser/testdata/02716_drop_if_empty/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery data_02716_1 (children 1)" + }, + { + "explain": " Identifier data_02716_1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001450018, + "rows_read": 2, + "bytes_read": 76 + } +} diff --git a/parser/testdata/02716_drop_if_empty/metadata.json b/parser/testdata/02716_drop_if_empty/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02716_drop_if_empty/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02716_drop_if_empty/query.sql b/parser/testdata/02716_drop_if_empty/query.sql new file mode 100644 index 000000000..6cea90a6d --- /dev/null +++ b/parser/testdata/02716_drop_if_empty/query.sql @@ -0,0 +1,22 @@ +DROP TABLE IF EXISTS data_02716_1; +DROP TABLE IF EXISTS data_02716_2; +DROP TABLE IF EXISTS {CLICKHOUSE_DATABASE_1:Identifier}.data_02716_3; +DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE_1:Identifier}; + +CREATE TABLE data_02716_1 (v UInt64) ENGINE = MergeTree ORDER BY v; +CREATE TABLE data_02716_2 (v UInt64) ENGINE = MergeTree ORDER BY v; + +CREATE DATABASE {CLICKHOUSE_DATABASE_1:Identifier}; +CREATE TABLE {CLICKHOUSE_DATABASE_1:Identifier}.data_02716_3 (v UInt64) ENGINE = MergeTree ORDER BY v; + +INSERT INTO data_02716_1 SELECT * FROM system.numbers LIMIT 1; + +-- { echoOn } +DROP TABLE IF EMPTY data_02716_2; +DROP TABLE IF EMPTY data_02716_1; -- { serverError TABLE_NOT_EMPTY } +TRUNCATE TABLE data_02716_1; +DROP TABLE IF EMPTY data_02716_1; +DROP DATABASE IF EMPTY {CLICKHOUSE_DATABASE_1:Identifier}; -- { serverError NOT_IMPLEMENTED } + +SELECT count() FROM system.tables WHERE database = {CLICKHOUSE_DATABASE_1:String}; +SELECT count() FROM system.tables WHERE database = 'default' AND name IN ('data_02716_1', 'data_02716_2'); diff --git a/parser/testdata/02716_int256_arrayfunc/ast.json b/parser/testdata/02716_int256_arrayfunc/ast.json new file mode 100644 index 000000000..ff02a38d5 --- /dev/null +++ b/parser/testdata/02716_int256_arrayfunc/ast.json @@ -0,0 +1,88 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function arrayDifference (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toUInt128 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayDifference (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toUInt128 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_3" + } + ], + + "rows": 22, + + "statistics": + { + "elapsed": 0.001506844, + "rows_read": 22, + "bytes_read": 907 + } +} diff --git a/parser/testdata/02716_int256_arrayfunc/metadata.json b/parser/testdata/02716_int256_arrayfunc/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02716_int256_arrayfunc/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02716_int256_arrayfunc/query.sql b/parser/testdata/02716_int256_arrayfunc/query.sql new file mode 100644 index 000000000..779a3168e --- /dev/null +++ b/parser/testdata/02716_int256_arrayfunc/query.sql @@ -0,0 +1,22 @@ +SELECT arrayDifference([toUInt128(1), 3]), toTypeName(arrayDifference([toUInt128(1), 3])); +SELECT arrayDifference([toInt128(1), 3]), toTypeName(arrayDifference([toInt128(1), 3])); +SELECT arrayDifference([toUInt256(1), 3]), toTypeName(arrayDifference([toUInt256(1), 3])); +SELECT arrayDifference([toInt256(1), 3]), toTypeName(arrayDifference([toInt256(1), 3])); + +SELECT '---'; + +SELECT arrayCumSum([toUInt128(1), 2]), toTypeName(arrayCumSum([toUInt128(1), 2])); +SELECT arrayCumSum([toInt128(1), 2]), toTypeName(arrayCumSum([toInt128(1), 2])); +SELECT arrayCumSum([toUInt256(1), 2]), toTypeName(arrayCumSum([toUInt256(1), 2])); +SELECT arrayCumSum([toInt256(1), 2]), toTypeName(arrayCumSum([toInt256(1), 2])); + +SELECT arrayCumSum([3, toInt128(1), toInt256(1)]), toTypeName(arrayCumSum([toUInt256(1), toUInt128(1)])); +SELECT arrayCumSum([toInt256(1), toInt128(1)]), toTypeName(arrayCumSum([toInt256(1), toInt128(1)])); + +SELECT '---'; + +SELECT arrayCumSumNonNegative([toUInt128(1), 2]), toTypeName(arrayCumSumNonNegative([toUInt128(1), 2])); +SELECT arrayCumSumNonNegative([toInt128(1), -2]), toTypeName(arrayCumSumNonNegative([toInt128(1), -2])); +SELECT arrayCumSumNonNegative([toUInt256(1), 2]), toTypeName(arrayCumSumNonNegative([toUInt256(1), 2])); +SELECT arrayCumSumNonNegative([toInt256(1), -2]), toTypeName(arrayCumSumNonNegative([toInt256(1), -2])); + diff --git a/parser/testdata/02717_pretty_json/ast.json b/parser/testdata/02717_pretty_json/ast.json new file mode 100644 index 000000000..dddf37ac9 --- /dev/null +++ b/parser/testdata/02717_pretty_json/ast.json @@ -0,0 +1,211 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 8)" + }, + { + "explain": " Literal UInt64_42 (alias num)" + }, + { + "explain": " Literal Array_[UInt64_42, UInt64_42] (alias arr)" + }, + { + "explain": " Literal Array_[Array_[Array_[UInt64_42, UInt64_42], Array_[UInt64_42, UInt64_42]], Array_[Array_[UInt64_42, UInt64_42]]] (alias nested_arr)" + }, + { + "explain": " Function CAST (alias tuple) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_42" + }, + { + "explain": " Literal UInt64_42" + }, + { + "explain": " Literal 'Tuple(a UInt32, b UInt32)'" + }, + { + "explain": " Function CAST (alias nested_tuple) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_42" + }, + { + "explain": " Literal UInt64_42" + }, + { + "explain": " Literal UInt64_42" + }, + { + "explain": " Literal UInt64_42" + }, + { + "explain": " Literal 'Tuple(a Tuple(b Tuple(c UInt32, d UInt32), e UInt32), f UInt32)'" + }, + { + "explain": " Function map (alias map) (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Literal UInt64_42" + }, + { + "explain": " Literal UInt64_42" + }, + { + "explain": " Literal UInt64_24" + }, + { + "explain": " Literal UInt64_24" + }, + { + "explain": " Function map (alias nested_map) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_42" + }, + { + "explain": " Function map (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_42" + }, + { + "explain": " Function map (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_42" + }, + { + "explain": " Literal UInt64_42" + }, + { + "explain": " Function CAST (alias nested_types) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function map (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_42" + }, + { + "explain": " Literal UInt64_42" + }, + { + "explain": " Literal Array_[UInt64_42, UInt64_42]" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function map (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_42" + }, + { + "explain": " Literal UInt64_42" + }, + { + "explain": " Literal Array_[UInt64_42, UInt64_42]" + }, + { + "explain": " Literal 'Array(Tuple(Map(UInt32, UInt32), Array(UInt32)))'" + }, + { + "explain": " Identifier PrettyNDJSON" + } + ], + + "rows": 63, + + "statistics": + { + "elapsed": 0.001816975, + "rows_read": 63, + "bytes_read": 2783 + } +} diff --git a/parser/testdata/02717_pretty_json/metadata.json b/parser/testdata/02717_pretty_json/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02717_pretty_json/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02717_pretty_json/query.sql b/parser/testdata/02717_pretty_json/query.sql new file mode 100644 index 000000000..d227f2987 --- /dev/null +++ b/parser/testdata/02717_pretty_json/query.sql @@ -0,0 +1,2 @@ +select 42 as num, [42, 42] as arr, [[[42, 42], [42, 42]], [[42, 42]]] as nested_arr, tuple(42, 42)::Tuple(a UInt32, b UInt32) as tuple, tuple(tuple(tuple(42, 42), 42), 42)::Tuple(a Tuple(b Tuple(c UInt32, d UInt32), e UInt32), f UInt32) as nested_tuple, map(42, 42, 24, 24) as map, map(42, map(42, map(42, 42))) as nested_map, [tuple(map(42, 42), [42, 42]), tuple(map(42, 42), [42, 42])]::Array(Tuple(Map(UInt32, UInt32), Array(UInt32))) as nested_types format PrettyNDJSON; + diff --git a/parser/testdata/02718_array_fold/ast.json b/parser/testdata/02718_array_fold/ast.json new file mode 100644 index 000000000..98f41aafd --- /dev/null +++ b/parser/testdata/02718_array_fold/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '-- Negative tests'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001415199, + "rows_read": 5, + "bytes_read": 188 + } +} diff --git a/parser/testdata/02718_array_fold/metadata.json b/parser/testdata/02718_array_fold/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02718_array_fold/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02718_array_fold/query.sql b/parser/testdata/02718_array_fold/query.sql new file mode 100644 index 000000000..e59eae87f --- /dev/null +++ b/parser/testdata/02718_array_fold/query.sql @@ -0,0 +1,60 @@ +SELECT '-- Negative tests'; +SELECT arrayFold(); -- { serverError TOO_FEW_ARGUMENTS_FOR_FUNCTION } +SELECT arrayFold(1); -- { serverError TOO_FEW_ARGUMENTS_FOR_FUNCTION } +SELECT arrayFold(1, toUInt64(0)); -- { serverError TOO_FEW_ARGUMENTS_FOR_FUNCTION } +SELECT arrayFold(1, emptyArrayUInt64(), toUInt64(0)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT arrayFold( acc,x -> x, emptyArrayString(), toInt8(0)); -- { serverError TYPE_MISMATCH } +SELECT arrayFold( acc,x -> x, 'not an array', toUInt8(0)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT arrayFold( acc,x,y -> x, [0, 1], 'not an array', toUInt8(0)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT arrayFold( acc,x -> x, [0, 1], [2, 3], toUInt8(0)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT arrayFold( acc,x,y -> x, [0, 1], [2, 3, 4], toUInt8(0)); -- { serverError SIZES_OF_ARRAYS_DONT_MATCH } + +SELECT '-- Const arrays'; +SELECT arrayFold( acc,x -> acc+x*2, [1, 2, 3, 4], toInt64(3)); +SELECT arrayFold( acc,x -> acc+x*2, emptyArrayInt64(), toInt64(3)); +SELECT arrayFold( acc,x,y -> acc+x*2+y*3, [1, 2, 3, 4], [5, 6, 7, 8], toInt64(3)); +SELECT arrayFold( acc,x -> arrayPushBack(acc, x), [1, 2, 3, 4], emptyArrayInt64()); +SELECT arrayFold( acc,x -> arrayPushFront(acc, x), [1, 2, 3, 4], emptyArrayInt64()); +SELECT arrayFold( acc,x -> (arrayPushFront(acc.1, x),arrayPushBack(acc.2, x)), [1, 2, 3, 4], (emptyArrayInt64(), emptyArrayInt64())); +SELECT arrayFold( acc,x -> x%2 ? (arrayPushBack(acc.1, x), acc.2): (acc.1, arrayPushBack(acc.2, x)), [1, 2, 3, 4, 5, 6], (emptyArrayInt64(), emptyArrayInt64())); + +SELECT '-- Non-const arrays'; +SELECT arrayFold( acc,x -> acc+x, range(number), number) FROM system.numbers LIMIT 5; +SELECT arrayFold( acc,x -> arrayPushFront(acc,x), range(number), emptyArrayUInt64()) FROM system.numbers LIMIT 5; +SELECT arrayFold( acc,x -> x%2 ? arrayPushFront(acc,x) : arrayPushBack(acc,x), range(number), emptyArrayUInt64()) FROM system.numbers LIMIT 5; + +SELECT '-- Bug 57458'; + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab (line String, patterns Array(String)) ENGINE = MergeTree ORDER BY line; +INSERT INTO tab VALUES ('abcdef', ['c']), ('ghijkl', ['h', 'k']), ('mnopqr', ['n']); + +SELECT + line, + patterns, + arrayFold(acc, pat -> position(line, pat), patterns, 0::UInt64) +FROM tab +ORDER BY line; + +DROP TABLE tab; + +CREATE TABLE tab (line String) ENGINE = Memory(); +INSERT INTO tab VALUES ('xxx..yyy..'), ('..........'), ('..xx..yyy.'), ('..........'), ('xxx.......'); + +SELECT + line, + splitByNonAlpha(line), + arrayFold( + (acc, str) -> position(line, str), + splitByNonAlpha(line), + 0::UInt64 + ) +FROM + tab; + +DROP TABLE tab; + +SELECT ' -- Bug 57816'; + +SELECT arrayFold(acc, x -> arrayIntersect(acc, x), [['qwe', 'asd'], ['qwe','asde']], []); diff --git a/parser/testdata/02718_insert_meet_hardware_error/ast.json b/parser/testdata/02718_insert_meet_hardware_error/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02718_insert_meet_hardware_error/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02718_insert_meet_hardware_error/metadata.json b/parser/testdata/02718_insert_meet_hardware_error/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02718_insert_meet_hardware_error/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02718_insert_meet_hardware_error/query.sql b/parser/testdata/02718_insert_meet_hardware_error/query.sql new file mode 100644 index 000000000..ab90bbecc --- /dev/null +++ b/parser/testdata/02718_insert_meet_hardware_error/query.sql @@ -0,0 +1,20 @@ +-- Tags: zookeeper, no-parallel + +DROP TABLE IF EXISTS t_hardware_error NO DELAY; + +CREATE TABLE t_hardware_error ( + KeyID UInt32 +) Engine = ReplicatedMergeTree('/clickhouse/tables/{shard}/{database}/t_async_insert_dedup', '{replica}') +ORDER BY (KeyID); + +insert into t_hardware_error values (1), (2), (3), (4), (5); + +system enable failpoint replicated_merge_tree_commit_zk_fail_after_op; + +insert into t_hardware_error values (6), (7), (8), (9), (10); + +select count() from t_hardware_error; + +system disable failpoint replicated_commit_zk_fail_after_op; + +DROP TABLE t_hardware_error NO DELAY; diff --git a/parser/testdata/02719_aggregate_with_empty_string_key/ast.json b/parser/testdata/02719_aggregate_with_empty_string_key/ast.json new file mode 100644 index 000000000..5688e55b6 --- /dev/null +++ b/parser/testdata/02719_aggregate_with_empty_string_key/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00123902, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/02719_aggregate_with_empty_string_key/metadata.json b/parser/testdata/02719_aggregate_with_empty_string_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02719_aggregate_with_empty_string_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02719_aggregate_with_empty_string_key/query.sql b/parser/testdata/02719_aggregate_with_empty_string_key/query.sql new file mode 100644 index 000000000..12572982d --- /dev/null +++ b/parser/testdata/02719_aggregate_with_empty_string_key/query.sql @@ -0,0 +1,7 @@ +drop table if exists test ; +create table test(str Nullable(String), i Int64) engine=Memory(); +insert into test values(null, 1),('', 2),('s', 1); +select '-----------String------------'; +select str, max(i) from test group by str order by str nulls first; + +drop table test; diff --git a/parser/testdata/02720_row_policy_column_with_dots/ast.json b/parser/testdata/02720_row_policy_column_with_dots/ast.json new file mode 100644 index 000000000..acd1a0bb3 --- /dev/null +++ b/parser/testdata/02720_row_policy_column_with_dots/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery table_with_dot_column (children 3)" + }, + { + "explain": " Identifier table_with_dot_column" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " ColumnDeclaration date (children 1)" + }, + { + "explain": " DataType Date" + }, + { + "explain": " ColumnDeclaration regular_column (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " ColumnDeclaration other_column.2 (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Identifier date" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001523509, + "rows_read": 14, + "bytes_read": 540 + } +} diff --git a/parser/testdata/02720_row_policy_column_with_dots/metadata.json b/parser/testdata/02720_row_policy_column_with_dots/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02720_row_policy_column_with_dots/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02720_row_policy_column_with_dots/query.sql b/parser/testdata/02720_row_policy_column_with_dots/query.sql new file mode 100644 index 000000000..fcb0bf628 --- /dev/null +++ b/parser/testdata/02720_row_policy_column_with_dots/query.sql @@ -0,0 +1,6 @@ +CREATE TABLE IF NOT EXISTS table_with_dot_column (date Date, regular_column String, `other_column.2` String) ENGINE = MergeTree() ORDER BY date; +INSERT INTO table_with_dot_column SELECT '2020-01-01', 'Hello', 'World'; +INSERT INTO table_with_dot_column SELECT toDate(now() + 48*3600), 'Hello', 'World'; +CREATE ROW POLICY IF NOT EXISTS row_policy ON table_with_dot_column USING toDate(date) >= today() - 30 TO ALL; +SELECT count(*) FROM table_with_dot_column; +DROP TABLE table_with_dot_column; diff --git a/parser/testdata/02721_url_cluster/ast.json b/parser/testdata/02721_url_cluster/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02721_url_cluster/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02721_url_cluster/metadata.json b/parser/testdata/02721_url_cluster/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02721_url_cluster/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02721_url_cluster/query.sql b/parser/testdata/02721_url_cluster/query.sql new file mode 100644 index 000000000..c30b03495 --- /dev/null +++ b/parser/testdata/02721_url_cluster/query.sql @@ -0,0 +1,40 @@ +-- Tags: no-fasttest +-- Tag no-fasttest: Depends on AWS + +select * from urlCluster('test_cluster_two_shards_localhost', 'http://localhost:11111/test/{a,b,c}.tsv') ORDER BY c1, c2, c3; +select * from urlCluster('test_cluster_two_shards_localhost', 'http://localhost:11111/test/{a,b,c}.tsv', 'TSV') ORDER BY c1, c2, c3; +select * from urlCluster('test_cluster_two_shards_localhost', 'http://localhost:11111/test/{a,b,c}.tsv', 'TSV', 'c1 UInt64, c2 UInt64, c3 UInt64') ORDER BY c1, c2, c3; +select * from urlCluster('test_cluster_two_shards_localhost', 'http://localhost:11111/test/{a,b,c}.tsv', 'TSV', 'c1 UInt64, c2 UInt64, c3 UInt64', 'auto') ORDER BY c1, c2, c3; + +desc urlCluster('test_cluster_two_shards_localhost', 'http://localhost:11111/test/{a,b,c}.tsv'); +desc urlCluster('test_cluster_two_shards_localhost', 'http://localhost:11111/test/{a,b,c}.tsv', 'TSV'); +desc urlCluster('test_cluster_two_shards_localhost', 'http://localhost:11111/test/{a,b,c}.tsv', 'TSV', 'c1 UInt64, c2 UInt64, c3 UInt64'); +desc urlCluster('test_cluster_two_shards_localhost', 'http://localhost:11111/test/{a,b,c}.tsv', 'TSV', 'c1 UInt64, c2 UInt64, c3 UInt64', 'auto'); + +select COUNT() from urlCluster('test_cluster_two_shards_localhost', 'http://localhost:11111/test/{a,b,c}.tsv'); +select COUNT(*) from urlCluster('test_cluster_two_shards_localhost', 'http://localhost:11111/test/{a,b,c}.tsv'); + +desc urlCluster('test_cluster_one_shard_three_replicas_localhost', 'http://localhost:11111/test/{a,b}.tsv'); +desc urlCluster('test_cluster_one_shard_three_replicas_localhost', 'http://localhost:11111/test/{a,b}.tsv', 'TSV'); +desc urlCluster('test_cluster_one_shard_three_replicas_localhost', 'http://localhost:11111/test/{a,b}.tsv', 'auto'); +desc urlCluster('test_cluster_one_shard_three_replicas_localhost', 'http://localhost:11111/test/{a,b}.tsv', 'TSV', 'auto'); +desc urlCluster('test_cluster_one_shard_three_replicas_localhost', 'http://localhost:11111/test/{a,b}.tsv', 'TSV', 'auto', 'auto'); + +desc urlCluster('test_cluster_one_shard_three_replicas_localhost', headers('X-ClickHouse-Database'='default'), 'http://localhost:11111/test/{a,b}.tsv'); +desc urlCluster('test_cluster_one_shard_three_replicas_localhost', 'http://localhost:11111/test/{a,b}.tsv', headers('X-ClickHouse-Database'='default'), 'TSV'); +desc urlCluster('test_cluster_one_shard_three_replicas_localhost', 'http://localhost:11111/test/{a,b}.tsv', 'auto', headers('X-ClickHouse-Database'='default')); +desc urlCluster('test_cluster_one_shard_three_replicas_localhost', 'http://localhost:11111/test/{a,b}.tsv', 'TSV', 'auto', headers('X-ClickHouse-Database'='default')); +desc urlCluster('test_cluster_one_shard_three_replicas_localhost', 'http://localhost:11111/test/{a,b}.tsv', 'TSV', headers('X-ClickHouse-Database'='default'), 'auto', 'auto'); + +select * from urlCluster('test_cluster_one_shard_three_replicas_localhost', 'http://localhost:11111/test/{a,b}.tsv') order by c1, c2, c3; +select * from urlCluster('test_cluster_one_shard_three_replicas_localhost', 'http://localhost:11111/test/{a,b}.tsv', 'TSV') order by c1, c2, c3; +select * from urlCluster('test_cluster_one_shard_three_replicas_localhost', 'http://localhost:11111/test/{a,b}.tsv', 'auto') order by c1, c2, c3; +select * from urlCluster('test_cluster_one_shard_three_replicas_localhost', 'http://localhost:11111/test/{a,b}.tsv', 'TSV', 'auto') order by c1, c2, c3; +select * from urlCluster('test_cluster_one_shard_three_replicas_localhost', 'http://localhost:11111/test/{a,b}.tsv', 'TSV', 'auto', 'auto') order by c1, c2, c3; + +drop table if exists test; +create table test (x UInt32, y UInt32, z UInt32) engine=Memory(); +insert into test select * from urlCluster('test_cluster_one_shard_three_replicas_localhost', 'http://localhost:11111/test/a.tsv', 'TSV'); +select * from test; +drop table test; + diff --git a/parser/testdata/02722_log_profile_events/ast.json b/parser/testdata/02722_log_profile_events/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02722_log_profile_events/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02722_log_profile_events/metadata.json b/parser/testdata/02722_log_profile_events/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02722_log_profile_events/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02722_log_profile_events/query.sql b/parser/testdata/02722_log_profile_events/query.sql new file mode 100644 index 000000000..07e2d524d --- /dev/null +++ b/parser/testdata/02722_log_profile_events/query.sql @@ -0,0 +1,5 @@ +-- There are no fatal errors: +SELECT count() FROM system.events WHERE event = 'LogFatal'; + +-- It counts the trace log messages: +SELECT count() > 0 FROM system.events WHERE event = 'LogTrace'; diff --git a/parser/testdata/02723_jit_aggregation_bug_48120/ast.json b/parser/testdata/02723_jit_aggregation_bug_48120/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02723_jit_aggregation_bug_48120/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02723_jit_aggregation_bug_48120/metadata.json b/parser/testdata/02723_jit_aggregation_bug_48120/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02723_jit_aggregation_bug_48120/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02723_jit_aggregation_bug_48120/query.sql b/parser/testdata/02723_jit_aggregation_bug_48120/query.sql new file mode 100644 index 000000000..1c714e341 --- /dev/null +++ b/parser/testdata/02723_jit_aggregation_bug_48120/query.sql @@ -0,0 +1,17 @@ +-- Tags: no-fasttest, no-msan + +drop table if exists dummy; +CREATE TABLE dummy ( num1 Int32, num2 Enum8('foo' = 0, 'bar' = 1, 'tar' = 2) ) +ENGINE = MergeTree ORDER BY num1 as select 5, 'bar'; + +set compile_aggregate_expressions=1; +set min_count_to_compile_aggregate_expression=0; + +-- { echoOn } +SYSTEM DROP COMPILED EXPRESSION CACHE; +SELECT minIf(num1, num1 < 5) FROM dummy GROUP BY num2; +SYSTEM DROP COMPILED EXPRESSION CACHE; +SELECT minIf(num1, num1 >= 5) FROM dummy GROUP BY num2; +-- { echoOff } + +drop table dummy; diff --git a/parser/testdata/02723_parallelize_output_setting/ast.json b/parser/testdata/02723_parallelize_output_setting/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02723_parallelize_output_setting/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02723_parallelize_output_setting/metadata.json b/parser/testdata/02723_parallelize_output_setting/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02723_parallelize_output_setting/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02723_parallelize_output_setting/query.sql b/parser/testdata/02723_parallelize_output_setting/query.sql new file mode 100644 index 000000000..86e6d4b4e --- /dev/null +++ b/parser/testdata/02723_parallelize_output_setting/query.sql @@ -0,0 +1,15 @@ +-- Tags: no-parallel, no-fasttest + +insert into function file(data_02723.csv) select number from numbers(5) settings engine_file_truncate_on_insert=1; + +set max_threads=2; +-- { echoOn } +set parallelize_output_from_storages=1; +select startsWith(trimLeft(explain),'Resize') as resize from (explain pipeline select * from file(data_02723.csv)) where resize; +-- no Resize in pipeline +set parallelize_output_from_storages=0; +select startsWith(trimLeft(explain),'Resize') as resize from (explain pipeline select * from file(data_02723.csv)) where resize; + +-- Data from URL source is immediately resized to max_treads streams, before any ExpressionTransform. +set parallelize_output_from_storages=1; +select match(arrayStringConcat(groupArray(explain), ''), '.*Resize 1 → 2 *URL 0 → 1 *$') from (explain pipeline select x, count() from url('https://example.com', Parquet, 'x Int64') group by x order by count() limit 10); \ No newline at end of file diff --git a/parser/testdata/02723_zookeeper_name/ast.json b/parser/testdata/02723_zookeeper_name/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02723_zookeeper_name/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02723_zookeeper_name/metadata.json b/parser/testdata/02723_zookeeper_name/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02723_zookeeper_name/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02723_zookeeper_name/query.sql b/parser/testdata/02723_zookeeper_name/query.sql new file mode 100644 index 000000000..e63747679 --- /dev/null +++ b/parser/testdata/02723_zookeeper_name/query.sql @@ -0,0 +1,23 @@ +-- Tags: zookeeper, replica + +SELECT 'Create Tables'; +CREATE TABLE t1(k UInt32, v UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_02723/zookeeper_name/t1', '1') ORDER BY k; + +CREATE TABLE t2(k UInt32, v UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_02723/zookeeper_name/t2', '1') ORDER BY k; + +SELECT 'Insert Data'; + +INSERT INTO t1 SELECT * FROM generateRandom('k UInt32, v UInt32') LIMIT 1; +INSERT INTO t2 SELECT * FROM generateRandom('k UInt32, v UInt32') LIMIT 1; + +SELECT + table, zookeeper_name, count() +FROM system.replicas +INNER JOIN system.parts USING (database, table) +WHERE database = currentDatabase() +GROUP BY table, zookeeper_name +ORDER BY table, zookeeper_name +FORMAT CSV; + +DROP TABLE t1; +DROP TABLE t2; diff --git a/parser/testdata/02724_function_in_left_table_clause_asof_join/ast.json b/parser/testdata/02724_function_in_left_table_clause_asof_join/ast.json new file mode 100644 index 000000000..946edb52a --- /dev/null +++ b/parser/testdata/02724_function_in_left_table_clause_asof_join/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function count (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.00121864, + "rows_read": 7, + "bytes_read": 250 + } +} diff --git a/parser/testdata/02724_function_in_left_table_clause_asof_join/metadata.json b/parser/testdata/02724_function_in_left_table_clause_asof_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02724_function_in_left_table_clause_asof_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02724_function_in_left_table_clause_asof_join/query.sql b/parser/testdata/02724_function_in_left_table_clause_asof_join/query.sql new file mode 100644 index 000000000..6aa70a379 --- /dev/null +++ b/parser/testdata/02724_function_in_left_table_clause_asof_join/query.sql @@ -0,0 +1,20 @@ +select count(*) +from ( + select 1 as id, [1, 2, 3] as arr +) as sessions +ASOF LEFT JOIN ( + select 1 as session_id, 4 as id +) as visitors +ON visitors.session_id <= sessions.id AND arrayFirst(a -> a, arrayMap((a) -> a, sessions.arr)) = visitors.id +; + +select count(*) +from ( + select 1 as id, [1, 2, 3] as arr +) as sessions +ASOF LEFT JOIN ( + select 1 as session_id, 4 as id +) as visitors +ON visitors.session_id <= sessions.id AND arrayFirst(a -> a, arrayMap((a) -> a, sessions.arr)) = visitors.id +SETTINGS join_algorithm = 'full_sorting_merge' +; diff --git a/parser/testdata/02724_jit_logical_functions/ast.json b/parser/testdata/02724_jit_logical_functions/ast.json new file mode 100644 index 000000000..b3f3eb9a9 --- /dev/null +++ b/parser/testdata/02724_jit_logical_functions/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001190384, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02724_jit_logical_functions/metadata.json b/parser/testdata/02724_jit_logical_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02724_jit_logical_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02724_jit_logical_functions/query.sql b/parser/testdata/02724_jit_logical_functions/query.sql new file mode 100644 index 000000000..fe6646337 --- /dev/null +++ b/parser/testdata/02724_jit_logical_functions/query.sql @@ -0,0 +1,21 @@ +SET compile_expressions = 1; +SET min_count_to_compile_expression = 0; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table (a UInt8, b UInt8) ENGINE = TinyLog; +INSERT INTO test_table VALUES (0, 0), (0, 1), (1, 0), (1, 1); + +SELECT 'Logical functions not null'; +SELECT a, b, and(a, b), or(a, b), xor(a, b) FROM test_table; + +DROP TABLE test_table; + +DROP TABLE IF EXISTS test_table_nullable; +CREATE TABLE test_table_nullable (a UInt8, b Nullable(UInt8)) ENGINE = TinyLog; +INSERT INTO test_table_nullable VALUES (0, 0), (0, 1), (1, 0), (1, 1), (0, NULL), (1, NULL); + +SELECT 'Logical functions nullable'; +SELECT a, b, and(a, b), or(a, b), xor(a, b) FROM test_table_nullable; +SELECT and(b, b), or(b, b), xor(b, b) FROM test_table_nullable; + +DROP TABLE test_table_nullable; diff --git a/parser/testdata/02724_mutliple_storage_join/ast.json b/parser/testdata/02724_mutliple_storage_join/ast.json new file mode 100644 index 000000000..61778d1d7 --- /dev/null +++ b/parser/testdata/02724_mutliple_storage_join/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery user (children 3)" + }, + { + "explain": " Identifier user" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration id (children 1)" + }, + { + "explain": " DataType UInt32" + }, + { + "explain": " ColumnDeclaration name (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function Join (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Identifier ANY" + }, + { + "explain": " Identifier LEFT" + }, + { + "explain": " Identifier id" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001275904, + "rows_read": 14, + "bytes_read": 474 + } +} diff --git a/parser/testdata/02724_mutliple_storage_join/metadata.json b/parser/testdata/02724_mutliple_storage_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02724_mutliple_storage_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02724_mutliple_storage_join/query.sql b/parser/testdata/02724_mutliple_storage_join/query.sql new file mode 100644 index 000000000..286e86770 --- /dev/null +++ b/parser/testdata/02724_mutliple_storage_join/query.sql @@ -0,0 +1,21 @@ +CREATE TABLE user(id UInt32, name String) ENGINE = Join(ANY, LEFT, id); +INSERT INTO user VALUES (1,'U1')(2,'U2')(3,'U3'); + +CREATE TABLE product(id UInt32, name String, cate String) ENGINE = Join(ANY, LEFT, id); +INSERT INTO product VALUES (1,'P1','C1')(2,'P2','C1')(3,'P3','C2'); + +CREATE TABLE order(id UInt32, pId UInt32, uId UInt32) ENGINE = TinyLog; +INSERT INTO order VALUES (1,1,1)(2,1,2)(3,2,3); + +SELECT ignore(*) FROM ( + SELECT + uId, + user.id as `uuu` + FROM order + LEFT ANY JOIN user + ON uId = `uuu` +); + +SELECT ignore(*) FROM order +LEFT ANY JOIN user ON uId = user.id +LEFT ANY JOIN product ON pId = product.id; diff --git a/parser/testdata/02724_persist_interval_type/ast.json b/parser/testdata/02724_persist_interval_type/ast.json new file mode 100644 index 000000000..f8d79013b --- /dev/null +++ b/parser/testdata/02724_persist_interval_type/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery saved_intervals_tmp (children 1)" + }, + { + "explain": " Identifier saved_intervals_tmp" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001161976, + "rows_read": 2, + "bytes_read": 90 + } +} diff --git a/parser/testdata/02724_persist_interval_type/metadata.json b/parser/testdata/02724_persist_interval_type/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02724_persist_interval_type/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02724_persist_interval_type/query.sql b/parser/testdata/02724_persist_interval_type/query.sql new file mode 100644 index 000000000..3acce003c --- /dev/null +++ b/parser/testdata/02724_persist_interval_type/query.sql @@ -0,0 +1,15 @@ +DROP TABLE IF EXISTS saved_intervals_tmp; +create table saved_intervals_tmp Engine=Memory as SELECT number as EventID, toIntervalSecond(number+1) as v1, toIntervalHour(number+2) as v2, toIntervalNanosecond(number+3) as v3 from numbers(2); +with toDateTime64('2023-01-01 00:00:00.000000001', 9, 'US/Eastern') as c select c+v1 as c_v1, c+v2 as c_v2, c+v3 as c_v3, date_diff(second, c, c_v1), date_diff(hour, c, c_v2), date_diff(second, c, c_v3) from saved_intervals_tmp; +DROP TABLE IF EXISTS saved_intervals_tmp; + +DROP TABLE IF EXISTS saved_intervals_mgt; +create table saved_intervals_mgt Engine=MergeTree() ORDER BY EventID as SELECT number as EventID, toIntervalSecond(number+1) as v1, toIntervalHour(number+2) as v2, toIntervalNanosecond(number+3) as v3 from numbers(2); +with toDateTime64('2023-01-01 00:00:00.000000001', 9, 'US/Eastern') as c select c+v1 as c_v1, c+v2 as c_v2, c+v3 as c_v3, date_diff(second, c, c_v1), date_diff(hour, c, c_v2), date_diff(second, c, c_v3) from saved_intervals_mgt; +DROP TABLE IF EXISTS saved_intervals_mgt; + +DROP TABLE IF EXISTS t1; +CREATE table t1 (v1 IntervalMinute) ENGINE = Memory; +INSERT INTO t1 with toDateTime64('2023-01-01 00:00:00.000000001', 9, 'US/Eastern') as c SELECT EXTRACT(MINUTE FROM c+toIntervalSecond(number * 60)) from numbers(2); +select * from t1; +DROP TABLE IF EXISTS t1; \ No newline at end of file diff --git a/parser/testdata/02724_show_indexes/ast.json b/parser/testdata/02724_show_indexes/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02724_show_indexes/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02724_show_indexes/metadata.json b/parser/testdata/02724_show_indexes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02724_show_indexes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02724_show_indexes/query.sql b/parser/testdata/02724_show_indexes/query.sql new file mode 100644 index 000000000..a8d699ddb --- /dev/null +++ b/parser/testdata/02724_show_indexes/query.sql @@ -0,0 +1,98 @@ +-- Tags: no-parallel +-- no-parallel: creates a custom database schema and expects to use it exclusively + +-- Create a test table and verify that the output of SHOW INDEXES is sane. +-- The matching of actual/expected results relies on the fact that the output of SHOW INDEX is sorted. +DROP TABLE IF EXISTS tbl; +CREATE TABLE tbl +( + a UInt64, + b UInt64, + c UInt64, + d UInt64, + e UInt64, + INDEX mm1_idx (a, c, d) TYPE minmax, + INDEX mm2_idx (c, d, e) TYPE minmax, + INDEX set_idx (e) TYPE set(100), + INDEX blf_idx (d, b) TYPE bloom_filter(0.8) +) +ENGINE = MergeTree +PRIMARY KEY (c, a); + +SELECT '--- Aliases of SHOW INDEX'; +SHOW INDEX FROM tbl; +SHOW INDEXES FROM tbl; +SHOW INDICES FROM tbl; +SHOW KEYS FROM tbl; + +SELECT '--- EXTENDED'; +SHOW EXTENDED INDEX FROM tbl; +-- +SELECT '--- WHERE'; +SHOW INDEX FROM tbl WHERE index_type LIKE '%minmax%'; + +SELECT '--- Check with weird table names'; + +DROP TABLE IF EXISTS `$4@^7`; +CREATE TABLE `$4@^7` (c String) ENGINE = MergeTree ORDER BY c; +SHOW INDEX FROM `$4@^7`; +DROP TABLE `$4@^7`; + +DROP TABLE IF EXISTS NULL; +CREATE TABLE NULL (c String) ENGINE = MergeTree ORDER BY c; +SHOW INDEX FROM NULL; +DROP TABLE NULL; + +DROP TABLE IF EXISTS `tab.with.dots`; +CREATE TABLE `tab.with.dots` +( + a UInt64, + b UInt64, + c UInt64, + d UInt64, + e UInt64, + INDEX mm1_idx (a, c, d) TYPE minmax, + INDEX mm2_idx (c, d, e) TYPE minmax, + INDEX set_idx (e) TYPE set(100), + INDEX blf_idx (d, b) TYPE bloom_filter(0.8) +) +ENGINE = MergeTree +PRIMARY KEY (c, a); +SHOW INDEX FROM `tab.with.dots`; +DROP TABLE `tab.with.dots`; + +DROP DATABASE IF EXISTS `'`; +CREATE DATABASE `'`; +CREATE TABLE `'`.`'` (c String) ENGINE = MergeTree ORDER BY c; +SHOW INDEX FROM `'` FROM `'`; +SHOW INDEX FROM `'`.`'`; -- abbreviated form +DROP TABLE `'`.`'`; +DROP DATABASE `'`; + +-- Create a table in a different database. Intentionally using the same table/column names as above so +-- we notice if something is buggy in the implementation of SHOW INDEX. +DROP DATABASE IF EXISTS database_123456789abcde; +CREATE DATABASE database_123456789abcde; -- pseudo-random database name + +DROP TABLE IF EXISTS database_123456789abcde.tbl; +CREATE TABLE database_123456789abcde.tbl +( + a UInt64, + b UInt64, + INDEX mmi_idx b TYPE minmax +) +ENGINE = MergeTree +PRIMARY KEY a; + +SELECT '--- Original table'; +SHOW INDEX FROM tbl; + +SELECT '--- Equally named table in other database'; +SHOW INDEX FROM tbl FROM database_123456789abcde; + +SELECT '--- Short form'; +SHOW INDEX FROM database_123456789abcde.tbl; + +DROP DATABASE database_123456789abcde; + +DROP TABLE tbl; diff --git a/parser/testdata/02725_agg_projection_respect_PK/ast.json b/parser/testdata/02725_agg_projection_respect_PK/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02725_agg_projection_respect_PK/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02725_agg_projection_respect_PK/metadata.json b/parser/testdata/02725_agg_projection_respect_PK/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02725_agg_projection_respect_PK/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02725_agg_projection_respect_PK/query.sql b/parser/testdata/02725_agg_projection_respect_PK/query.sql new file mode 100644 index 000000000..21ababebf --- /dev/null +++ b/parser/testdata/02725_agg_projection_respect_PK/query.sql @@ -0,0 +1,34 @@ +-- Tags: no-random-merge-tree-settings + +DROP TABLE IF EXISTS t0; + +CREATE TABLE t0 +( + c1 Int64, + c2 Int64, + c3 Int64, + PROJECTION p1 + ( + SELECT + c1, + c2, + sum(c3) + GROUP BY + c2, + c1 + ) +) +ENGINE = MergeTree ORDER BY (c1, c2) settings min_bytes_for_wide_part = 10485760, min_rows_for_wide_part = 0; + +SET optimize_trivial_insert_select = 1; +INSERT INTO t0 SELECT + number, + -number, + number +FROM numbers_mt(1e5); + +set parallel_replicas_local_plan = 1, parallel_replicas_support_projection = 1, optimize_aggregation_in_order = 0; +select trimLeft(*) from (EXPLAIN indexes = 1 SELECT c1, sum(c3) FROM t0 GROUP BY c1) where explain like '%ReadFromMergeTree%'; +select trimLeft(*) from (EXPLAIN indexes = 1 SELECT c1, sum(c3) FROM t0 WHERE c1 = 100 GROUP BY c1) where explain like '%Granules%'; + +DROP TABLE t0; diff --git a/parser/testdata/02725_alias_columns_should_not_allow_compression_codec/ast.json b/parser/testdata/02725_alias_columns_should_not_allow_compression_codec/ast.json new file mode 100644 index 000000000..5d0b34410 --- /dev/null +++ b/parser/testdata/02725_alias_columns_should_not_allow_compression_codec/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery alias_column_should_not_allow_compression (children 1)" + }, + { + "explain": " Identifier alias_column_should_not_allow_compression" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001166516, + "rows_read": 2, + "bytes_read": 134 + } +} diff --git a/parser/testdata/02725_alias_columns_should_not_allow_compression_codec/metadata.json b/parser/testdata/02725_alias_columns_should_not_allow_compression_codec/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02725_alias_columns_should_not_allow_compression_codec/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02725_alias_columns_should_not_allow_compression_codec/query.sql b/parser/testdata/02725_alias_columns_should_not_allow_compression_codec/query.sql new file mode 100644 index 000000000..083a3aefd --- /dev/null +++ b/parser/testdata/02725_alias_columns_should_not_allow_compression_codec/query.sql @@ -0,0 +1,7 @@ +drop table if exists alias_column_should_not_allow_compression; +create table if not exists alias_column_should_not_allow_compression ( user_id UUID, user_id_hashed ALIAS (cityHash64(user_id))) engine=MergeTree() order by tuple(); +create table if not exists alias_column_should_not_allow_compression_fail ( user_id UUID, user_id_hashed ALIAS (cityHash64(user_id)) codec(LZ4HC(1))) engine=MergeTree() order by tuple(); -- { serverError BAD_ARGUMENTS } +alter table alias_column_should_not_allow_compression modify column user_id codec(LZ4HC(1)); +alter table alias_column_should_not_allow_compression modify column user_id_hashed codec(LZ4HC(1)); -- { serverError BAD_ARGUMENTS } +alter table alias_column_should_not_allow_compression add column user_id_hashed_1 UInt64 ALIAS (cityHash64(user_id)) codec(LZ4HC(1)); -- { serverError BAD_ARGUMENTS } +drop table if exists alias_column_should_not_allow_compression; diff --git a/parser/testdata/02725_alias_with_restricted_keywords/ast.json b/parser/testdata/02725_alias_with_restricted_keywords/ast.json new file mode 100644 index 000000000..29ab2b667 --- /dev/null +++ b/parser/testdata/02725_alias_with_restricted_keywords/ast.json @@ -0,0 +1,40 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1 (alias array)" + }, + { + "explain": " Literal UInt64_2 (alias union)" + } + ], + + "rows": 6, + + "statistics": + { + "elapsed": 0.001108841, + "rows_read": 6, + "bytes_read": 233 + } +} diff --git a/parser/testdata/02725_alias_with_restricted_keywords/metadata.json b/parser/testdata/02725_alias_with_restricted_keywords/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02725_alias_with_restricted_keywords/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02725_alias_with_restricted_keywords/query.sql b/parser/testdata/02725_alias_with_restricted_keywords/query.sql new file mode 100644 index 000000000..6df0e8560 --- /dev/null +++ b/parser/testdata/02725_alias_with_restricted_keywords/query.sql @@ -0,0 +1 @@ +SELECT 1 `array`, 2 "union"; diff --git a/parser/testdata/02725_any_join_single_row/ast.json b/parser/testdata/02725_any_join_single_row/ast.json new file mode 100644 index 000000000..6ac60c058 --- /dev/null +++ b/parser/testdata/02725_any_join_single_row/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery join_test (children 1)" + }, + { + "explain": " Identifier join_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001077122, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/02725_any_join_single_row/metadata.json b/parser/testdata/02725_any_join_single_row/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02725_any_join_single_row/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02725_any_join_single_row/query.sql b/parser/testdata/02725_any_join_single_row/query.sql new file mode 100644 index 000000000..f7ddd2f40 --- /dev/null +++ b/parser/testdata/02725_any_join_single_row/query.sql @@ -0,0 +1,41 @@ +DROP TABLE IF EXISTS join_test; +DROP TABLE IF EXISTS join_test_right; + +CREATE TABLE join_test ( `key` UInt64, `value` UInt64 ) ENGINE = Join(ANY, LEFT, key); + +-- Save table size before inserting any rows +CREATE TEMPORARY TABLE initial_table_size AS + SELECT engine_full, total_rows, total_bytes FROM system.tables WHERE (name = 'join_test') AND (database = currentDatabase()); + +-- Check that table size is less than 100K +SELECT engine_full, total_rows, total_bytes < 100_000 FROM initial_table_size; + +INSERT INTO join_test (key, value) SELECT 1, number FROM numbers(1); + +-- Save table size after inserting one row +CREATE TEMPORARY TABLE one_row_table_size AS + SELECT engine_full, total_rows, total_bytes FROM system.tables WHERE (name = 'join_test') AND (database = currentDatabase()); + +-- Check that table size is less than 2x after inserting one row +SELECT engine_full, total_rows, total_bytes < 2 * (SELECT total_bytes FROM initial_table_size) FROM one_row_table_size; + +-- Insert some more rows with the same key +INSERT INTO join_test (key, value) SELECT 1, number FROM numbers(1); +INSERT INTO join_test (key, value) SELECT 1, number FROM numbers(10_000); + +-- Check that rows with the same key are not duplicated +SELECT engine_full, total_rows, total_bytes == (SELECT total_bytes FROM one_row_table_size) FROM system.tables WHERE (name = 'join_test') AND (database = currentDatabase()); + +-- For RIGHT join we save all rows from the right table +CREATE TABLE join_test_right ( `key` UInt64, `value` UInt64 ) ENGINE = Join(ANY, RIGHT, key); + +INSERT INTO join_test_right (key, value) SELECT 1, number FROM numbers(1); +INSERT INTO join_test_right (key, value) SELECT 1, number FROM numbers(1); +INSERT INTO join_test_right (key, value) SELECT 1, number FROM numbers(1); +SELECT count() == 3 FROM (SELECT 1 as key) t1 ANY RIGHT JOIN join_test_right ON t1.key = join_test_right.key; +INSERT INTO join_test_right (key, value) SELECT 1, number FROM numbers(7); +SELECT count() == 10 FROM (SELECT 1 as key) t1 ANY RIGHT JOIN join_test_right ON t1.key = join_test_right.key; +SELECT count() == 10 FROM (SELECT 2 as key) t1 ANY RIGHT JOIN join_test_right ON t1.key = join_test_right.key; + +DROP TABLE IF EXISTS join_test; +DROP TABLE IF EXISTS join_test_right; diff --git a/parser/testdata/02725_cnf_large_check/ast.json b/parser/testdata/02725_cnf_large_check/ast.json new file mode 100644 index 000000000..e186ce713 --- /dev/null +++ b/parser/testdata/02725_cnf_large_check/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery 02725_cnf (children 1)" + }, + { + "explain": " Identifier 02725_cnf" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001403515, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/02725_cnf_large_check/metadata.json b/parser/testdata/02725_cnf_large_check/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02725_cnf_large_check/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02725_cnf_large_check/query.sql b/parser/testdata/02725_cnf_large_check/query.sql new file mode 100644 index 000000000..2567636c0 --- /dev/null +++ b/parser/testdata/02725_cnf_large_check/query.sql @@ -0,0 +1,27 @@ +DROP TABLE IF EXISTS 02725_cnf; + +CREATE TABLE 02725_cnf (c0 UInt8, c1 UInt8, c2 UInt8, c3 UInt8, c4 UInt8, c5 UInt8, c6 UInt8, c7 UInt8, c8 UInt8, c9 UInt8) ENGINE = Memory; + +INSERT INTO 02725_cnf VALUES (0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 1), (0, 0, 0, 0, 0, 0, 0, 0, 1, 0), (0, 0, 0, 0, 0, 0, 0, 0, 1, 1), (0, 0, 0, 0, 0, 0, 0, 1, 0, 0), (0, 0, 0, 0, 0, 0, 0, 1, 0, 1), (0, 0, 0, 0, 0, 0, 0, 1, 1, 0), (0, 0, 0, 0, 0, 0, 0, 1, 1, 1); + +SELECT count() +FROM 02725_cnf +WHERE (c5 AND (NOT c0)) OR ((NOT c3) AND (NOT c6) AND (NOT c1) AND (NOT c6)) OR (c7 AND (NOT c3) AND (NOT c5) AND (NOT c7)) OR ((NOT c8) AND c5) OR ((NOT c0)) OR ((NOT c8) AND (NOT c5) AND c1 AND c6 AND c3) OR (c7 AND (NOT c0) AND c6 AND c1 AND (NOT c2)) OR (c3 AND (NOT c9) AND c1) +SETTINGS convert_query_to_cnf = 1, enable_analyzer = 1; + +SELECT count() +FROM 02725_cnf +WHERE (c5 AND (NOT c0)) OR ((NOT c3) AND (NOT c6) AND (NOT c1) AND (NOT c6)) OR (c7 AND (NOT c3) AND (NOT c5) AND (NOT c7)) OR ((NOT c8) AND c5) OR ((NOT c0)) OR ((NOT c8) AND (NOT c5) AND c1 AND c6 AND c3) OR (c7 AND (NOT c0) AND c6 AND c1 AND (NOT c2)) OR (c3 AND (NOT c9) AND c1) +SETTINGS convert_query_to_cnf = 1, enable_analyzer = 0; + +SELECT count() +FROM 02725_cnf +WHERE ((NOT c2) AND c2 AND (NOT c1)) OR ((NOT c2) AND c3 AND (NOT c5)) OR ((NOT c7) AND (NOT c8)) OR (c9 AND c6 AND c8 AND (NOT c8) AND (NOT c7)) +SETTINGS convert_query_to_cnf = 1, enable_analyzer = 1; + +SELECT count() +FROM 02725_cnf +WHERE ((NOT c2) AND c2 AND (NOT c1)) OR ((NOT c2) AND c3 AND (NOT c5)) OR ((NOT c7) AND (NOT c8)) OR (c9 AND c6 AND c8 AND (NOT c8) AND (NOT c7)) +SETTINGS convert_query_to_cnf = 1, enable_analyzer = 0; + +DROP TABLE 02725_cnf; diff --git a/parser/testdata/02725_memory-for-merges/ast.json b/parser/testdata/02725_memory-for-merges/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02725_memory-for-merges/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02725_memory-for-merges/metadata.json b/parser/testdata/02725_memory-for-merges/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02725_memory-for-merges/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02725_memory-for-merges/query.sql b/parser/testdata/02725_memory-for-merges/query.sql new file mode 100644 index 000000000..8c7856834 --- /dev/null +++ b/parser/testdata/02725_memory-for-merges/query.sql @@ -0,0 +1,26 @@ +-- Tags: no-object-storage, no-random-merge-tree-settings, no-fasttest +-- We allocate a lot of memory for buffers when reading or writing to S3 + +DROP TABLE IF EXISTS 02725_memory_for_merges SYNC; + +CREATE TABLE 02725_memory_for_merges +( n UInt64, + s String +) +ENGINE = MergeTree +ORDER BY n +SETTINGS merge_max_block_size_bytes=1024, index_granularity_bytes=1024; + +INSERT INTO 02725_memory_for_merges SELECT number, randomPrintableASCII(1000000) FROM numbers(100); +INSERT INTO 02725_memory_for_merges SELECT number, randomPrintableASCII(1000000) FROM numbers(100); +INSERT INTO 02725_memory_for_merges SELECT number, randomPrintableASCII(1000000) FROM numbers(100); +INSERT INTO 02725_memory_for_merges SELECT number, randomPrintableASCII(1000000) FROM numbers(100); +INSERT INTO 02725_memory_for_merges SELECT number, randomPrintableASCII(1000000) FROM numbers(100); + +OPTIMIZE TABLE 02725_memory_for_merges FINAL; + +SYSTEM FLUSH LOGS part_log; + +SELECT (sum(peak_memory_usage) < 1024 * 1024 * 200 AS x) ? x : sum(peak_memory_usage) from system.part_log where database=currentDatabase() and table='02725_memory_for_merges' and event_type='MergeParts'; + +DROP TABLE IF EXISTS 02725_memory_for_merges SYNC; diff --git a/parser/testdata/02725_null_group_key_with_rollup/ast.json b/parser/testdata/02725_null_group_key_with_rollup/ast.json new file mode 100644 index 000000000..44bcbd1d4 --- /dev/null +++ b/parser/testdata/02725_null_group_key_with_rollup/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001417984, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02725_null_group_key_with_rollup/metadata.json b/parser/testdata/02725_null_group_key_with_rollup/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02725_null_group_key_with_rollup/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02725_null_group_key_with_rollup/query.sql b/parser/testdata/02725_null_group_key_with_rollup/query.sql new file mode 100644 index 000000000..98f354e29 --- /dev/null +++ b/parser/testdata/02725_null_group_key_with_rollup/query.sql @@ -0,0 +1,13 @@ +set allow_suspicious_low_cardinality_types=1; +DROP TABLE IF EXISTS group_by_null_key; +CREATE TABLE group_by_null_key (c1 Nullable(Int32), c2 LowCardinality(Nullable(Int32))) ENGINE = Memory(); +INSERT INTO group_by_null_key VALUES (null, null), (null, null); + +select c1, count(*) from group_by_null_key group by c1 WITH TOTALS; +select c2, count(*) from group_by_null_key group by c2 WITH TOTALS; + +select c1, count(*) from group_by_null_key group by ROLLUP(c1); +select c2, count(*) from group_by_null_key group by ROLLUP(c2); + + +DROP TABLE group_by_null_key; diff --git a/parser/testdata/02725_sleep_max_time/ast.json b/parser/testdata/02725_sleep_max_time/ast.json new file mode 100644 index 000000000..16d0f093f --- /dev/null +++ b/parser/testdata/02725_sleep_max_time/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Function sleepEachRow (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Float64_0.05" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001124576, + "rows_read": 13, + "bytes_read": 493 + } +} diff --git a/parser/testdata/02725_sleep_max_time/metadata.json b/parser/testdata/02725_sleep_max_time/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02725_sleep_max_time/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02725_sleep_max_time/query.sql b/parser/testdata/02725_sleep_max_time/query.sql new file mode 100644 index 000000000..b8378aee1 --- /dev/null +++ b/parser/testdata/02725_sleep_max_time/query.sql @@ -0,0 +1 @@ +SELECT * FROM system.numbers WHERE sleepEachRow(0.05) LIMIT 10; -- { serverError TOO_SLOW } diff --git a/parser/testdata/02725_url_support_virtual_column/ast.json b/parser/testdata/02725_url_support_virtual_column/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02725_url_support_virtual_column/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02725_url_support_virtual_column/metadata.json b/parser/testdata/02725_url_support_virtual_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02725_url_support_virtual_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02725_url_support_virtual_column/query.sql b/parser/testdata/02725_url_support_virtual_column/query.sql new file mode 100644 index 000000000..5ba4e45df --- /dev/null +++ b/parser/testdata/02725_url_support_virtual_column/query.sql @@ -0,0 +1,7 @@ +-- Tags: no-parallel + +select _path from url('http://127.0.0.1:8123/?query=select+1&user=default', LineAsString, 's String'); +select _file from url('http://127.0.0.1:8123/?query=select+1&user=default', LineAsString, 's String'); +select _file, count() from url('http://127.0.0.1:8123/?query=select+1&user=default', LineAsString, 's String') group by _file; +select _path, _file, s from url('http://127.0.0.1:8123/?query=select+1&user=default', LineAsString, 's String'); +select _path, _file, s from url('http://127.0.0.1:8123/?query=select+1&user=default&password=wrong', LineAsString, 's String'); -- { serverError RECEIVED_ERROR_FROM_REMOTE_IO_SERVER } diff --git a/parser/testdata/02726_async_insert_flush_queue/ast.json b/parser/testdata/02726_async_insert_flush_queue/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02726_async_insert_flush_queue/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02726_async_insert_flush_queue/metadata.json b/parser/testdata/02726_async_insert_flush_queue/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02726_async_insert_flush_queue/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02726_async_insert_flush_queue/query.sql b/parser/testdata/02726_async_insert_flush_queue/query.sql new file mode 100644 index 000000000..3648285a4 --- /dev/null +++ b/parser/testdata/02726_async_insert_flush_queue/query.sql @@ -0,0 +1,42 @@ +-- Tags: no-parallel + +DROP TABLE IF EXISTS t_async_inserts_flush; + +CREATE TABLE t_async_inserts_flush (a UInt64) ENGINE = Memory; + +-- { echo ON } + +SET async_insert = 1; +SET wait_for_async_insert = 0; +-- Disable adaptive timeout to prevent immediate push of the first message (if the queue last push was old) +SET async_insert_use_adaptive_busy_timeout=0; +SET async_insert_busy_timeout_max_ms = 10000000; + +INSERT INTO t_async_inserts_flush VALUES (1) (2); + +INSERT INTO t_async_inserts_flush FORMAT JSONEachRow {"a": 10} {"a": 20}; + +INSERT INTO t_async_inserts_flush FORMAT JSONEachRow {"a": "str"}; + +INSERT INTO t_async_inserts_flush FORMAT JSONEachRow {"a": 100} {"a": 200}; + +INSERT INTO t_async_inserts_flush VALUES (3) (4) (5); + +SELECT sleep(1) FORMAT Null; + +SELECT format, length(entries.query_id) FROM system.asynchronous_inserts +WHERE database = currentDatabase() AND table = 't_async_inserts_flush' +ORDER BY format; + +SELECT count() FROM t_async_inserts_flush; + +SYSTEM FLUSH ASYNC INSERT QUEUE; + +SELECT count() FROM system.asynchronous_inserts +WHERE database = currentDatabase() AND table = 't_async_inserts_flush'; + +SELECT count() FROM t_async_inserts_flush; + +SELECT * FROM t_async_inserts_flush ORDER BY a; + +DROP TABLE t_async_inserts_flush; diff --git a/parser/testdata/02730_dictionary_hashed_load_factor_element_count/ast.json b/parser/testdata/02730_dictionary_hashed_load_factor_element_count/ast.json new file mode 100644 index 000000000..b0ca08d8c --- /dev/null +++ b/parser/testdata/02730_dictionary_hashed_load_factor_element_count/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery dict_sharded (children 1)" + }, + { + "explain": " Identifier dict_sharded" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00114493, + "rows_read": 2, + "bytes_read": 76 + } +} diff --git a/parser/testdata/02730_dictionary_hashed_load_factor_element_count/metadata.json b/parser/testdata/02730_dictionary_hashed_load_factor_element_count/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02730_dictionary_hashed_load_factor_element_count/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02730_dictionary_hashed_load_factor_element_count/query.sql b/parser/testdata/02730_dictionary_hashed_load_factor_element_count/query.sql new file mode 100644 index 000000000..1e42f5688 --- /dev/null +++ b/parser/testdata/02730_dictionary_hashed_load_factor_element_count/query.sql @@ -0,0 +1,17 @@ +DROP DICTIONARY IF EXISTS dict_sharded; +DROP DICTIONARY IF EXISTS dict_sharded_multi; +DROP TABLE IF EXISTS dict_data; + +CREATE TABLE dict_data (key UInt64, v0 UInt16, v1 UInt16, v2 UInt16, v3 UInt16, v4 UInt16) engine=Memory() AS SELECT number, number%65535, number%65535, number%6553, number%655355, number%65535 FROM numbers(1e6); + +CREATE DICTIONARY dict_sharded (key UInt64, v0 UInt16) PRIMARY KEY key SOURCE(CLICKHOUSE(TABLE 'dict_data')) LIFETIME(MIN 0 MAX 0) LAYOUT(HASHED(SHARDS 32)); +SYSTEM RELOAD DICTIONARY dict_sharded; +SELECT name, length(attribute.names), element_count, round(load_factor, 4) FROM system.dictionaries WHERE database = currentDatabase() AND name = 'dict_sharded'; +DROP DICTIONARY dict_sharded; + +CREATE DICTIONARY dict_sharded_multi (key UInt64, v0 UInt16, v1 UInt16, v2 UInt16, v3 UInt16, v4 UInt16) PRIMARY KEY key SOURCE(CLICKHOUSE(TABLE 'dict_data')) LIFETIME(MIN 0 MAX 0) LAYOUT(HASHED(SHARDS 32)); +SYSTEM RELOAD DICTIONARY dict_sharded_multi; +SELECT name, length(attribute.names), element_count, round(load_factor, 4) FROM system.dictionaries WHERE database = currentDatabase() AND name = 'dict_sharded_multi'; +DROP DICTIONARY dict_sharded_multi; + +DROP TABLE dict_data; diff --git a/parser/testdata/02730_with_fill_by_sorting_prefix/ast.json b/parser/testdata/02730_with_fill_by_sorting_prefix/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02730_with_fill_by_sorting_prefix/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02730_with_fill_by_sorting_prefix/metadata.json b/parser/testdata/02730_with_fill_by_sorting_prefix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02730_with_fill_by_sorting_prefix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02730_with_fill_by_sorting_prefix/query.sql b/parser/testdata/02730_with_fill_by_sorting_prefix/query.sql new file mode 100644 index 000000000..e2f1ce29d --- /dev/null +++ b/parser/testdata/02730_with_fill_by_sorting_prefix/query.sql @@ -0,0 +1,60 @@ +-- { echoOn } +set use_with_fill_by_sorting_prefix=1; + +-- corner case with constant sort prefix +SELECT number +FROM numbers(1) +ORDER BY 10 ASC, number DESC WITH FILL FROM 1 +SETTINGS enable_positional_arguments=0; + +-- sensor table +drop table if exists ts; +create table ts (sensor_id UInt64, timestamp UInt64, value Float64) ENGINE=MergeTree() ORDER BY (sensor_id, timestamp); +insert into ts VALUES (1, 10, 1), (1, 12, 2), (3, 5, 1), (3, 7, 3), (5, 1, 1), (5, 3, 1); +-- FillingTransform: 6 rows will be processed in 1 chunks +select * from ts order by sensor_id, timestamp with fill step 1; + +drop table if exists ts; +create table ts (sensor_id UInt64, timestamp UInt64, value Float64) ENGINE=MergeTree() ORDER BY (sensor_id, timestamp); +system stop merges ts; +-- FillingTransform: 6 rows will be processed in 3 chunks with 2 rows each +insert into ts VALUES (1, 10, 1), (1, 12, 1); +insert into ts VALUES (3, 5, 1), (3, 7, 1); +insert into ts VALUES (5, 1, 1), (5, 3, 1); +select * from ts order by sensor_id, timestamp with fill step 1 settings max_block_size=2; + +drop table if exists ts; +create table ts (sensor_id UInt64, timestamp UInt64, value Float64) ENGINE=MergeTree() ORDER BY (sensor_id, timestamp); +system stop merges ts; +-- FillingTransform: 6 rows will be processed in 2 chunks with 3 rows each +insert into ts VALUES (1, 10, 1), (1, 12, 1), (3, 5, 1); +insert into ts VALUES (3, 7, 1), (5, 1, 1), (5, 3, 1); +select * from ts order by sensor_id, timestamp with fill step 1 settings max_block_size=3; + +-- FROM and TO +-- ASC order in sorting prefix +select * from ts order by sensor_id, timestamp with fill from 6 to 10 step 1 interpolate (value as 9999); +select * from ts order by sensor_id, timestamp with fill from 6 to 10 step 1 interpolate (value as 9999) settings use_with_fill_by_sorting_prefix=0; + +-- DESC order in sorting prefix +select * from ts order by sensor_id DESC, timestamp with fill from 6 to 10 step 1 interpolate (value as 9999); +select * from ts order by sensor_id DESC, timestamp with fill from 6 to 10 step 1 interpolate (value as 9999) settings use_with_fill_by_sorting_prefix=0; + +-- without TO +-- ASC order in sorting prefix +select * from ts order by sensor_id, timestamp with fill from 6 step 1 interpolate (value as 9999); +select * from ts order by sensor_id, timestamp with fill from 6 step 1 interpolate (value as 9999) settings use_with_fill_by_sorting_prefix=0; +-- DESC order in sorting prefix +select * from ts order by sensor_id DESC, timestamp with fill from 6 step 1 interpolate (value as 9999); +select * from ts order by sensor_id DESC, timestamp with fill from 6 step 1 interpolate (value as 9999) settings use_with_fill_by_sorting_prefix=0; + +-- without FROM +-- ASC order in sorting prefix +select * from ts order by sensor_id, timestamp with fill to 10 step 1 interpolate (value as 9999); +select * from ts order by sensor_id, timestamp with fill to 10 step 1 interpolate (value as 9999) settings use_with_fill_by_sorting_prefix=0; +-- DESC order in sorting prefix +select * from ts order by sensor_id DESC, timestamp with fill to 10 step 1 interpolate (value as 9999); +select * from ts order by sensor_id DESC, timestamp with fill to 10 step 1 interpolate (value as 9999) settings use_with_fill_by_sorting_prefix=0; + +-- checking that sorting prefix columns can't be used in INTERPOLATE +SELECT * FROM ts ORDER BY sensor_id, value, timestamp WITH FILL FROM 6 TO 10 INTERPOLATE ( value AS 1 ); -- { serverError INVALID_WITH_FILL_EXPRESSION } diff --git a/parser/testdata/02731_auto_convert_dictionary_layout_to_complex_by_complex_keys/ast.json b/parser/testdata/02731_auto_convert_dictionary_layout_to_complex_by_complex_keys/ast.json new file mode 100644 index 000000000..2b8ef48cd --- /dev/null +++ b/parser/testdata/02731_auto_convert_dictionary_layout_to_complex_by_complex_keys/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery dict_flat_simple (children 1)" + }, + { + "explain": " Identifier dict_flat_simple" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001165015, + "rows_read": 2, + "bytes_read": 84 + } +} diff --git a/parser/testdata/02731_auto_convert_dictionary_layout_to_complex_by_complex_keys/metadata.json b/parser/testdata/02731_auto_convert_dictionary_layout_to_complex_by_complex_keys/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02731_auto_convert_dictionary_layout_to_complex_by_complex_keys/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02731_auto_convert_dictionary_layout_to_complex_by_complex_keys/query.sql b/parser/testdata/02731_auto_convert_dictionary_layout_to_complex_by_complex_keys/query.sql new file mode 100644 index 000000000..753b9f663 --- /dev/null +++ b/parser/testdata/02731_auto_convert_dictionary_layout_to_complex_by_complex_keys/query.sql @@ -0,0 +1,35 @@ +DROP DICTIONARY IF EXISTS dict_flat_simple; +DROP DICTIONARY IF EXISTS dict_hashed_simple_Decimal128; +DROP DICTIONARY IF EXISTS dict_hashed_simple_Float32; +DROP DICTIONARY IF EXISTS dict_hashed_simple_String; +DROP DICTIONARY IF EXISTS dict_hashed_simple_auto_convert; +DROP TABLE IF EXISTS dict_data; + +CREATE TABLE dict_data (v0 UInt16, v1 Int16, v2 Float32, v3 Decimal128(10), v4 String) engine=Memory() AS SELECT number, number%65535, number*1.1, number*1.1, 'foo' FROM numbers(10);; + +CREATE DICTIONARY dict_flat_simple (v0 UInt16, v1 UInt16, v2 UInt16) PRIMARY KEY v0 SOURCE(CLICKHOUSE(TABLE 'dict_data')) LIFETIME(0) LAYOUT(flat()); +SYSTEM RELOAD DICTIONARY dict_flat_simple; +SELECT name, type FROM system.dictionaries WHERE database = currentDatabase() AND name = 'dict_flat_simple'; +DROP DICTIONARY dict_flat_simple; + +CREATE DICTIONARY dict_hashed_simple_Decimal128 (v3 Decimal128(10), v1 UInt16, v2 Float32) PRIMARY KEY v3 SOURCE(CLICKHOUSE(TABLE 'dict_data')) LIFETIME(0) LAYOUT(hashed()); +SYSTEM RELOAD DICTIONARY dict_hashed_simple_Decimal128; +SELECT name, type FROM system.dictionaries WHERE database = currentDatabase() AND name = 'dict_hashed_simple_Decimal128'; +DROP DICTIONARY dict_hashed_simple_Decimal128; + +CREATE DICTIONARY dict_hashed_simple_Float32 (v2 Float32, v3 Decimal128(10), v4 String) PRIMARY KEY v2 SOURCE(CLICKHOUSE(TABLE 'dict_data')) LIFETIME(0) LAYOUT(hashed()); +SYSTEM RELOAD DICTIONARY dict_hashed_simple_Float32; +SELECT name, type FROM system.dictionaries WHERE database = currentDatabase() AND name = 'dict_hashed_simple_Float32'; +DROP DICTIONARY dict_hashed_simple_Float32; + +CREATE DICTIONARY dict_hashed_simple_String (v4 String, v3 Decimal128(10), v2 Float32) PRIMARY KEY v4 SOURCE(CLICKHOUSE(TABLE 'dict_data')) LIFETIME(0) LAYOUT(hashed()); +SYSTEM RELOAD DICTIONARY dict_hashed_simple_String; +SELECT name, type FROM system.dictionaries WHERE database = currentDatabase() AND name = 'dict_hashed_simple_String'; +DROP DICTIONARY dict_hashed_simple_String; + +CREATE DICTIONARY dict_hashed_simple_auto_convert (v0 UInt16, v1 Int16, v2 UInt16) PRIMARY KEY v0,v1 SOURCE(CLICKHOUSE(TABLE 'dict_data')) LIFETIME(0) LAYOUT(hashed()); +SYSTEM RELOAD DICTIONARY dict_hashed_simple_auto_convert; +SELECT name, type FROM system.dictionaries WHERE database = currentDatabase() AND name = 'dict_hashed_simple_auto_convert'; +DROP DICTIONARY dict_hashed_simple_auto_convert; + +DROP TABLE dict_data; diff --git a/parser/testdata/02731_formats_s3/ast.json b/parser/testdata/02731_formats_s3/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02731_formats_s3/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02731_formats_s3/metadata.json b/parser/testdata/02731_formats_s3/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02731_formats_s3/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02731_formats_s3/query.sql b/parser/testdata/02731_formats_s3/query.sql new file mode 100644 index 000000000..e54a1c10b --- /dev/null +++ b/parser/testdata/02731_formats_s3/query.sql @@ -0,0 +1,10 @@ +-- Tags: no-fasttest +-- Tag no-fasttest: Depends on AWS + +-- Reading from s3 a parquet file of size between ~1 MB and ~2 MB was broken at some point +-- (bug in CachedOnDiskReadBufferFromFile). +select sum(*) from s3(s3_conn, filename='02731.parquet') settings remote_filesystem_read_method='threadpool', remote_filesystem_read_prefetch=1; + +-- Reading from s3 of arrow files of ~40 MB (max_download_buffer_size * 4) was broken at some point +-- (bug in ParallelReadBuffer). +select sum(*) from s3(s3_conn, filename='02731.arrow') settings remote_filesystem_read_method='read', max_download_buffer_size = 1048576; diff --git a/parser/testdata/02731_in_operator_with_one_size_tuple/ast.json b/parser/testdata/02731_in_operator_with_one_size_tuple/ast.json new file mode 100644 index 000000000..a4cd7535a --- /dev/null +++ b/parser/testdata/02731_in_operator_with_one_size_tuple/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery test (children 3)" + }, + { + "explain": " Identifier test" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration report_date (children 1)" + }, + { + "explain": " DataType Date" + }, + { + "explain": " ColumnDeclaration sspid (children 1)" + }, + { + "explain": " DataType UInt64" + }, + { + "explain": " Storage definition (children 3)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Identifier report_date" + }, + { + "explain": " Identifier report_date" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001547825, + "rows_read": 12, + "bytes_read": 422 + } +} diff --git a/parser/testdata/02731_in_operator_with_one_size_tuple/metadata.json b/parser/testdata/02731_in_operator_with_one_size_tuple/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02731_in_operator_with_one_size_tuple/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02731_in_operator_with_one_size_tuple/query.sql b/parser/testdata/02731_in_operator_with_one_size_tuple/query.sql new file mode 100644 index 000000000..eab7d24a9 --- /dev/null +++ b/parser/testdata/02731_in_operator_with_one_size_tuple/query.sql @@ -0,0 +1,10 @@ +CREATE TABLE test(`report_date` Date, `sspid` UInt64) ENGINE MergeTree PARTITION BY report_date ORDER BY report_date; + +INSERT INTO test SELECT toDate('2023-04-20'), 0; +INSERT INTO test SELECT toDate('2023-04-19'), 0; +INSERT INTO test SELECT toDate('2023-04-17'), 1; +INSERT INTO test SELECT toDate('2023-04-17'), 1; + + +SELECT * FROM test WHERE tuple(report_date) IN tuple(toDate('2023-04-17')); +DROP TABLE test; \ No newline at end of file diff --git a/parser/testdata/02731_nothing_deserialization/ast.json b/parser/testdata/02731_nothing_deserialization/ast.json new file mode 100644 index 000000000..c49567831 --- /dev/null +++ b/parser/testdata/02731_nothing_deserialization/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '\u0001\\0'" + }, + { + "explain": " Literal 'AggregateFunction(nothingArrayIf, Array(Nullable(Nothing)), Nullable(Nothing))'" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001373771, + "rows_read": 8, + "bytes_read": 356 + } +} diff --git a/parser/testdata/02731_nothing_deserialization/metadata.json b/parser/testdata/02731_nothing_deserialization/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02731_nothing_deserialization/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02731_nothing_deserialization/query.sql b/parser/testdata/02731_nothing_deserialization/query.sql new file mode 100644 index 000000000..7526bce35 --- /dev/null +++ b/parser/testdata/02731_nothing_deserialization/query.sql @@ -0,0 +1 @@ +SELECT CAST('\x01\x00' AS AggregateFunction(nothingArrayIf, Array(Nullable(Nothing)), Nullable(Nothing))); -- { serverError INCORRECT_DATA } diff --git a/parser/testdata/02731_parallel_replicas_join_subquery/ast.json b/parser/testdata/02731_parallel_replicas_join_subquery/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02731_parallel_replicas_join_subquery/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02731_parallel_replicas_join_subquery/metadata.json b/parser/testdata/02731_parallel_replicas_join_subquery/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02731_parallel_replicas_join_subquery/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02731_parallel_replicas_join_subquery/query.sql b/parser/testdata/02731_parallel_replicas_join_subquery/query.sql new file mode 100644 index 000000000..774b371bc --- /dev/null +++ b/parser/testdata/02731_parallel_replicas_join_subquery/query.sql @@ -0,0 +1,254 @@ +-- Tags: zookeeper + +DROP TABLE IF EXISTS join_inner_table SYNC; + +CREATE TABLE join_inner_table +( + id UUID, + key String, + number Int64, + value1 String, + value2 String, + time Int64 +) +ENGINE=ReplicatedMergeTree('/clickhouse/tables/{database}/join_inner_table', 'r1') +ORDER BY (id, number, key); + +INSERT INTO join_inner_table +SELECT + '833c9e22-c245-4eb5-8745-117a9a1f26b1'::UUID as id, + rowNumberInAllBlocks()::String as key, + * FROM generateRandom('number Int64, value1 String, value2 String, time Int64', 1, 10, 2) +LIMIT 100; + +SET max_parallel_replicas = 3; +SET cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost'; +SET joined_subquery_requires_alias = 0; + +SELECT '=============== INNER QUERY (NO PARALLEL) ==============='; + +SELECT + key, + value1, + value2, + toUInt64(min(time)) AS start_ts +FROM join_inner_table + PREWHERE (id = '833c9e22-c245-4eb5-8745-117a9a1f26b1') AND (number > toUInt64('1610517366120')) +GROUP BY key, value1, value2 +ORDER BY key, value1, value2 +LIMIT 10; + +SELECT '=============== no-analyzer: INNER QUERY (PARALLEL), QUERIES EXECUTED BY PARALLEL INNER QUERY ALONE ==============='; + +-- Parallel inner query alone without analyzer +SELECT + key, + value1, + value2, + toUInt64(min(time)) AS start_ts +FROM join_inner_table +PREWHERE (id = '833c9e22-c245-4eb5-8745-117a9a1f26b1') AND (number > toUInt64('1610517366120')) +GROUP BY key, value1, value2 +ORDER BY key, value1, value2 +LIMIT 10 +SETTINGS enable_parallel_replicas = 1, enable_analyzer=0, parallel_replicas_only_with_analyzer=0; + +SYSTEM FLUSH LOGS query_log; +SELECT ProfileEvents['ParallelReplicasQueryCount'], replaceRegexpAll(query, '_data_(\d+)_(\d+)', '_data_') as query +FROM system.query_log +WHERE + event_date >= yesterday() + AND type = 'QueryFinish' + AND query_id IN + ( + SELECT query_id + FROM system.query_log + WHERE + current_database = currentDatabase() + AND event_date >= yesterday() + AND type = 'QueryFinish' + AND query LIKE '-- Parallel inner query alone without analyzer%' + ); + +SELECT '=============== analyzer: INNER QUERY (PARALLEL), QUERIES EXECUTED BY PARALLEL INNER QUERY ALONE ==============='; + +-- Parallel inner query alone with analyzer +SELECT + key, + value1, + value2, + toUInt64(min(time)) AS start_ts +FROM join_inner_table +PREWHERE (id = '833c9e22-c245-4eb5-8745-117a9a1f26b1') AND (number > toUInt64('1610517366120')) +GROUP BY key, value1, value2 +ORDER BY key, value1, value2 +LIMIT 10 +SETTINGS enable_parallel_replicas = 1, enable_analyzer=1; + +SYSTEM FLUSH LOGS query_log; +SELECT ProfileEvents['ParallelReplicasQueryCount'], replaceRegexpAll(query, '_data_(\d+)_(\d+)', '_data_') as query +FROM system.query_log +WHERE + event_date >= yesterday() + AND type = 'QueryFinish' + AND query_id IN + ( + SELECT query_id + FROM system.query_log + WHERE + current_database = currentDatabase() + AND event_date >= yesterday() + AND type = 'QueryFinish' + AND query LIKE '-- Parallel inner query alone with analyzer%' + ); + +---- Query with JOIN + +DROP TABLE IF EXISTS join_outer_table SYNC; + +CREATE TABLE join_outer_table +( + id UUID, + key String, + otherValue1 String, + otherValue2 String, + time Int64 +) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/join_outer_table', 'r1') +ORDER BY (id, time, key); + +INSERT INTO join_outer_table +SELECT + '833c9e22-c245-4eb5-8745-117a9a1f26b1'::UUID as id, + (rowNumberInAllBlocks() % 10)::String as key, + * FROM generateRandom('otherValue1 String, otherValue2 String, time Int64', 1, 10, 2) +LIMIT 100; + + +SELECT '=============== OUTER QUERY (NO PARALLEL) ==============='; + +SELECT + value1, + value2, + avg(count) AS avg +FROM +( + SELECT + key, + value1, + value2, + count() AS count + FROM join_outer_table + INNER JOIN + ( + SELECT + key, + value1, + value2, + toUInt64(min(time)) AS start_ts + FROM join_inner_table + PREWHERE (id = '833c9e22-c245-4eb5-8745-117a9a1f26b1') AND (number > toUInt64('1610517366120')) + GROUP BY key, value1, value2 + ) USING (key) + GROUP BY key, value1, value2 +) +GROUP BY value1, value2 +ORDER BY value1, value2; + +SELECT '=============== no-analyzer: OUTER QUERY (PARALLEL) ==============='; + +-- Parallel full query without analyzer +SELECT + value1, + value2, + avg(count) AS avg +FROM + ( + SELECT + key, + value1, + value2, + count() AS count + FROM join_outer_table + INNER JOIN + ( + SELECT + key, + value1, + value2, + toUInt64(min(time)) AS start_ts + FROM join_inner_table + PREWHERE (id = '833c9e22-c245-4eb5-8745-117a9a1f26b1') AND (number > toUInt64('1610517366120')) + GROUP BY key, value1, value2 + ) USING (key) + GROUP BY key, value1, value2 + ) +GROUP BY value1, value2 +ORDER BY value1, value2 +SETTINGS enable_parallel_replicas = 1, enable_analyzer=0, parallel_replicas_only_with_analyzer=0; + +SYSTEM FLUSH LOGS query_log; +SELECT ProfileEvents['ParallelReplicasQueryCount'], replaceRegexpAll(query, '_data_(\d+)_(\d+)', '_data_') as query +FROM system.query_log +WHERE + event_date >= yesterday() + AND type = 'QueryFinish' + AND query_id IN + ( + SELECT query_id + FROM system.query_log + WHERE + current_database = currentDatabase() + AND event_date >= yesterday() + AND type = 'QueryFinish' + AND query LIKE '-- Parallel full query without analyzer%' + ); + +SELECT '=============== analyzer: OUTER QUERY (PARALLEL) ==============='; + +-- Parallel full query with analyzer +SELECT + value1, + value2, + avg(count) AS avg +FROM + ( + SELECT + key, + value1, + value2, + count() AS count + FROM join_outer_table + INNER JOIN + ( + SELECT + key, + value1, + value2, + toUInt64(min(time)) AS start_ts + FROM join_inner_table + PREWHERE (id = '833c9e22-c245-4eb5-8745-117a9a1f26b1') AND (number > toUInt64('1610517366120')) + GROUP BY key, value1, value2 + ) USING (key) + GROUP BY key, value1, value2 + ) +GROUP BY value1, value2 +ORDER BY value1, value2 +SETTINGS enable_parallel_replicas = 1, enable_analyzer=1; + +SYSTEM FLUSH LOGS query_log; +SELECT ProfileEvents['ParallelReplicasQueryCount'], replaceRegexpAll(query, '_data_(\d+)_(\d+)', '_data_') as query +FROM system.query_log +WHERE + event_date >= yesterday() + AND type = 'QueryFinish' + AND query_id IN + ( + SELECT query_id + FROM system.query_log + WHERE + current_database = currentDatabase() + AND event_date >= yesterday() + AND type = 'QueryFinish' + AND query LIKE '-- Parallel full query with analyzer%' + ); diff --git a/parser/testdata/02731_replace_partition_from_temporary_table/ast.json b/parser/testdata/02731_replace_partition_from_temporary_table/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02731_replace_partition_from_temporary_table/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02731_replace_partition_from_temporary_table/metadata.json b/parser/testdata/02731_replace_partition_from_temporary_table/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02731_replace_partition_from_temporary_table/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02731_replace_partition_from_temporary_table/query.sql b/parser/testdata/02731_replace_partition_from_temporary_table/query.sql new file mode 100644 index 000000000..db0cbb3fa --- /dev/null +++ b/parser/testdata/02731_replace_partition_from_temporary_table/query.sql @@ -0,0 +1,49 @@ +-- Tags: no-replicated-database, no-shared-merge-tree +-- SharedMergeTree doesn't support replace partition from MergeTree engine + +DROP TEMPORARY TABLE IF EXISTS src; +DROP TABLE IF EXISTS dst; +DROP TABLE IF EXISTS rdst; + +CREATE TEMPORARY TABLE src (p UInt64, k String, d UInt64) ENGINE = MergeTree PARTITION BY p ORDER BY k; +CREATE TABLE dst (p UInt64, k String, d UInt64) ENGINE = MergeTree PARTITION BY p ORDER BY k; +CREATE TABLE rdst (p UInt64, k String, d UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_alter_attach_00626_rdst', 'r1') PARTITION BY p ORDER BY k; + +SELECT 'Initial'; +INSERT INTO src VALUES (0, '0', 1); +INSERT INTO src VALUES (1, '0', 1); +INSERT INTO src VALUES (1, '1', 1); +INSERT INTO src VALUES (2, '0', 1); +INSERT INTO src VALUES (3, '0', 1); +INSERT INTO src VALUES (3, '1', 1); + +INSERT INTO dst VALUES (0, '1', 2); +INSERT INTO dst VALUES (1, '1', 2), (1, '2', 2); +INSERT INTO dst VALUES (2, '1', 2); +INSERT INTO dst VALUES (3, '1', 2), (3, '2', 2); + +INSERT INTO rdst VALUES (0, '1', 2); +INSERT INTO rdst VALUES (1, '1', 2), (1, '2', 2); +INSERT INTO rdst VALUES (2, '1', 2); +INSERT INTO rdst VALUES (3, '1', 2), (3, '2', 2); + +SELECT count(), sum(d) FROM dst; +SELECT count(), sum(d) FROM rdst; + +SELECT 'REPLACE simple'; +ALTER TABLE dst REPLACE PARTITION 1 FROM src; +SELECT count(), sum(d) FROM dst; +ALTER TABLE rdst REPLACE PARTITION 3 FROM src; +SELECT count(), sum(d) FROM rdst; + +SELECT 'ATTACH FROM'; +ALTER TABLE dst DROP PARTITION 1; +ALTER TABLE dst ATTACH PARTITION 1 FROM src; +SELECT count(), sum(d) FROM dst; +ALTER TABLE rdst DROP PARTITION 3; +ALTER TABLE rdst ATTACH PARTITION 1 FROM src; +SELECT count(), sum(d) FROM rdst; + +DROP TEMPORARY TABLE IF EXISTS src; +DROP TABLE IF EXISTS dst; +DROP TABLE IF EXISTS rdst; diff --git a/parser/testdata/02732_transform_fuzz/ast.json b/parser/testdata/02732_transform_fuzz/ast.json new file mode 100644 index 000000000..b274969f0 --- /dev/null +++ b/parser/testdata/02732_transform_fuzz/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function caseWithExpr (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function arrayReduce (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001445514, + "rows_read": 13, + "bytes_read": 496 + } +} diff --git a/parser/testdata/02732_transform_fuzz/metadata.json b/parser/testdata/02732_transform_fuzz/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02732_transform_fuzz/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02732_transform_fuzz/query.sql b/parser/testdata/02732_transform_fuzz/query.sql new file mode 100644 index 000000000..872cf3a65 --- /dev/null +++ b/parser/testdata/02732_transform_fuzz/query.sql @@ -0,0 +1 @@ +SELECT caseWithExpr(arrayReduce(NULL, []), []); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } diff --git a/parser/testdata/02733_distinct/ast.json b/parser/testdata/02733_distinct/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02733_distinct/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02733_distinct/metadata.json b/parser/testdata/02733_distinct/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02733_distinct/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02733_distinct/query.sql b/parser/testdata/02733_distinct/query.sql new file mode 100644 index 000000000..bbb26b17d --- /dev/null +++ b/parser/testdata/02733_distinct/query.sql @@ -0,0 +1,19 @@ +-- Tags: no-random-settings +-- there is a bug if `optimize_distinct_in_order` is true + +DROP TABLE IF EXISTS test; +CREATE TABLE test +( + c1 String, + c2 String, + c3 String +) +ENGINE = ReplacingMergeTree +ORDER BY (c1, c3); + +INSERT INTO test(c1, c2, c3) VALUES ('', '', '1'), ('', '', '2'),('v1', 'v2', '3'),('v1', 'v2', '4'),('v1', 'v2', '5'); + +SELECT c1, c2, c3 FROM test GROUP BY c1, c2, c3 ORDER BY c1, c2, c3; +SELECT DISTINCT c1, c2, c3 FROM test; + +DROP TABLE test; diff --git a/parser/testdata/02733_fix_distinct_in_order_bug_49622/ast.json b/parser/testdata/02733_fix_distinct_in_order_bug_49622/ast.json new file mode 100644 index 000000000..ddce1e3c7 --- /dev/null +++ b/parser/testdata/02733_fix_distinct_in_order_bug_49622/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001208002, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02733_fix_distinct_in_order_bug_49622/metadata.json b/parser/testdata/02733_fix_distinct_in_order_bug_49622/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02733_fix_distinct_in_order_bug_49622/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02733_fix_distinct_in_order_bug_49622/query.sql b/parser/testdata/02733_fix_distinct_in_order_bug_49622/query.sql new file mode 100644 index 000000000..9501a2c07 --- /dev/null +++ b/parser/testdata/02733_fix_distinct_in_order_bug_49622/query.sql @@ -0,0 +1,15 @@ +set optimize_distinct_in_order=1; + +DROP TABLE IF EXISTS test_string; + +CREATE TABLE test_string +( + `c1` String, + `c2` String +) +ENGINE = MergeTree +ORDER BY c1; + +INSERT INTO test_string(c1, c2) VALUES ('1', ''), ('2', ''); + +SELECT DISTINCT c2, c1 FROM test_string; diff --git a/parser/testdata/02733_sparse_columns_reload/ast.json b/parser/testdata/02733_sparse_columns_reload/ast.json new file mode 100644 index 000000000..8607c4c69 --- /dev/null +++ b/parser/testdata/02733_sparse_columns_reload/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_sparse_reload (children 1)" + }, + { + "explain": " Identifier t_sparse_reload" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001192307, + "rows_read": 2, + "bytes_read": 82 + } +} diff --git a/parser/testdata/02733_sparse_columns_reload/metadata.json b/parser/testdata/02733_sparse_columns_reload/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02733_sparse_columns_reload/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02733_sparse_columns_reload/query.sql b/parser/testdata/02733_sparse_columns_reload/query.sql new file mode 100644 index 000000000..d4b482741 --- /dev/null +++ b/parser/testdata/02733_sparse_columns_reload/query.sql @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS t_sparse_reload; + +CREATE TABLE t_sparse_reload (id UInt64, v UInt64) +ENGINE = MergeTree ORDER BY id +SETTINGS ratio_of_defaults_for_sparse_serialization = 0.95; + +INSERT INTO t_sparse_reload SELECT number, 0 FROM numbers(100000); + +SELECT count() FROM t_sparse_reload WHERE NOT ignore(*); + +ALTER TABLE t_sparse_reload MODIFY SETTING ratio_of_defaults_for_sparse_serialization = 1.0; + +DETACH TABLE t_sparse_reload; +ATTACH TABLE t_sparse_reload; + +SELECT count() FROM t_sparse_reload WHERE NOT ignore(*); + +DROP TABLE t_sparse_reload; diff --git a/parser/testdata/02734_big_int_from_float_ubsan/ast.json b/parser/testdata/02734_big_int_from_float_ubsan/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02734_big_int_from_float_ubsan/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02734_big_int_from_float_ubsan/metadata.json b/parser/testdata/02734_big_int_from_float_ubsan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02734_big_int_from_float_ubsan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02734_big_int_from_float_ubsan/query.sql b/parser/testdata/02734_big_int_from_float_ubsan/query.sql new file mode 100644 index 000000000..9fbf54c1a --- /dev/null +++ b/parser/testdata/02734_big_int_from_float_ubsan/query.sql @@ -0,0 +1,9 @@ +WITH + 18 AS precision, + toUInt256(-1) AS int, + toUInt256(toFloat64(int)) AS converted, + toString(int) AS int_str, + toString(converted) AS converted_str +SELECT + length(int_str) = length(converted_str) AS have_same_length, + substring(int_str, 1, precision) = substring(converted_str, 1, precision) AS have_same_prefix diff --git a/parser/testdata/02734_optimize_group_by/ast.json b/parser/testdata/02734_optimize_group_by/ast.json new file mode 100644 index 000000000..82708e826 --- /dev/null +++ b/parser/testdata/02734_optimize_group_by/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'a' (alias key)" + }, + { + "explain": " Literal 'b' (alias value)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier key" + }, + { + "explain": " Set" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001279969, + "rows_read": 9, + "bytes_read": 299 + } +} diff --git a/parser/testdata/02734_optimize_group_by/metadata.json b/parser/testdata/02734_optimize_group_by/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02734_optimize_group_by/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02734_optimize_group_by/query.sql b/parser/testdata/02734_optimize_group_by/query.sql new file mode 100644 index 000000000..626805d02 --- /dev/null +++ b/parser/testdata/02734_optimize_group_by/query.sql @@ -0,0 +1,7 @@ +SELECT 'a' AS key, 'b' as value GROUP BY key WITH CUBE SETTINGS enable_analyzer = 0; +SELECT 'a' AS key, 'b' as value GROUP BY key WITH CUBE SETTINGS enable_analyzer = 1; + +SELECT 'a' AS key, 'b' as value GROUP BY ignore(1) WITH CUBE; + +SELECT 'a' AS key, 'b' as value GROUP BY ignore(1); +SELECT 'a' AS key, 'b' as value GROUP BY key; diff --git a/parser/testdata/02734_sparse_columns_mutation/ast.json b/parser/testdata/02734_sparse_columns_mutation/ast.json new file mode 100644 index 000000000..4d807919b --- /dev/null +++ b/parser/testdata/02734_sparse_columns_mutation/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_sparse_mutation (children 1)" + }, + { + "explain": " Identifier t_sparse_mutation" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001064854, + "rows_read": 2, + "bytes_read": 86 + } +} diff --git a/parser/testdata/02734_sparse_columns_mutation/metadata.json b/parser/testdata/02734_sparse_columns_mutation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02734_sparse_columns_mutation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02734_sparse_columns_mutation/query.sql b/parser/testdata/02734_sparse_columns_mutation/query.sql new file mode 100644 index 000000000..6fdb5b5f4 --- /dev/null +++ b/parser/testdata/02734_sparse_columns_mutation/query.sql @@ -0,0 +1,30 @@ +DROP TABLE IF EXISTS t_sparse_mutation; + +CREATE TABLE t_sparse_mutation (id UInt64, v UInt64) +ENGINE = MergeTree ORDER BY id +SETTINGS ratio_of_defaults_for_sparse_serialization = 0.9; + +INSERT INTO t_sparse_mutation select number, if (number % 21 = 0, number, 0) FROM numbers(10000); + +SET mutations_sync = 2; + +DELETE FROM t_sparse_mutation WHERE id % 2 = 0; + +SELECT count(), sum(v) FROM t_sparse_mutation; + +SELECT sum(has_lightweight_delete) FROM system.parts +WHERE database = currentDatabase() AND table = 't_sparse_mutation' AND active; + +ALTER TABLE t_sparse_mutation UPDATE v = v * 2 WHERE id % 5 = 0; +ALTER TABLE t_sparse_mutation DELETE WHERE id % 3 = 0; + +SELECT count(), sum(v) FROM t_sparse_mutation; + +OPTIMIZE TABLE t_sparse_mutation FINAL; + +SELECT sum(has_lightweight_delete) FROM system.parts +WHERE database = currentDatabase() AND table = 't_sparse_mutation' AND active; + +SELECT count(), sum(v) FROM t_sparse_mutation; + +DROP TABLE t_sparse_mutation; diff --git a/parser/testdata/02734_sparse_columns_short_circuit/ast.json b/parser/testdata/02734_sparse_columns_short_circuit/ast.json new file mode 100644 index 000000000..49b759a07 --- /dev/null +++ b/parser/testdata/02734_sparse_columns_short_circuit/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_sparse_short_circuit (children 1)" + }, + { + "explain": " Identifier t_sparse_short_circuit" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001172674, + "rows_read": 2, + "bytes_read": 96 + } +} diff --git a/parser/testdata/02734_sparse_columns_short_circuit/metadata.json b/parser/testdata/02734_sparse_columns_short_circuit/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02734_sparse_columns_short_circuit/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02734_sparse_columns_short_circuit/query.sql b/parser/testdata/02734_sparse_columns_short_circuit/query.sql new file mode 100644 index 000000000..da8de22a8 --- /dev/null +++ b/parser/testdata/02734_sparse_columns_short_circuit/query.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS t_sparse_short_circuit; + +SET short_circuit_function_evaluation = 'force_enable'; + +CREATE TABLE t_sparse_short_circuit (a UInt64, b UInt64) +ENGINE = MergeTree ORDER BY tuple() +SETTINGS ratio_of_defaults_for_sparse_serialization = 0.9; + +INSERT INTO t_sparse_short_circuit select number, if (number % 21 = 0, number % 10 + 1, 0) FROM numbers(100000); + +SELECT sum(if(a % 10 = 0, CAST(b, 'UInt8'), 0)) FROM t_sparse_short_circuit; + +DROP TABLE t_sparse_short_circuit; diff --git a/parser/testdata/02735_array_map_array_of_tuples/ast.json b/parser/testdata/02735_array_map_array_of_tuples/ast.json new file mode 100644 index 000000000..dad2552c9 --- /dev/null +++ b/parser/testdata/02735_array_map_array_of_tuples/ast.json @@ -0,0 +1,73 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayMap (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function lambda (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 17, + + "statistics": + { + "elapsed": 0.001248443, + "rows_read": 17, + "bytes_read": 664 + } +} diff --git a/parser/testdata/02735_array_map_array_of_tuples/metadata.json b/parser/testdata/02735_array_map_array_of_tuples/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02735_array_map_array_of_tuples/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02735_array_map_array_of_tuples/query.sql b/parser/testdata/02735_array_map_array_of_tuples/query.sql new file mode 100644 index 000000000..51d60aa0c --- /dev/null +++ b/parser/testdata/02735_array_map_array_of_tuples/query.sql @@ -0,0 +1,4 @@ +SELECT arrayMap((x) -> x, [tuple(1)]); +SELECT arrayMap((x) -> x.1, [tuple(1)]); +SELECT arrayMap((x) -> x.1 + x.2, [tuple(1, 2)]); +SELECT arrayMap((x, y) -> x + y, [tuple(1, 2)]); diff --git a/parser/testdata/02735_asof_join_right_null/ast.json b/parser/testdata/02735_asof_join_right_null/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02735_asof_join_right_null/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02735_asof_join_right_null/metadata.json b/parser/testdata/02735_asof_join_right_null/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02735_asof_join_right_null/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02735_asof_join_right_null/query.sql b/parser/testdata/02735_asof_join_right_null/query.sql new file mode 100644 index 000000000..997d33a05 --- /dev/null +++ b/parser/testdata/02735_asof_join_right_null/query.sql @@ -0,0 +1,32 @@ + +CREATE TABLE t1 (a Int, b Int) ENGINE = Memory; +INSERT INTO t1 VALUES (1, -1), (1, 0), (1, 1), (1, 2), (1, 3), (1, 4); + +CREATE TABLE t2 (a Int, b Nullable(Int)) ENGINE = Memory; +INSERT INTO t2 VALUES (1, 1), (1, NULL), (1, 2); + +-- { echoOn } +SELECT * FROM t1 ASOF JOIN t2 ON t1.a = t2.a AND t1.b < t2.b ORDER BY t1.b; +SELECT * FROM t1 ASOF JOIN t2 ON t1.a = t2.a AND t1.b <= t2.b ORDER BY t1.b; +SELECT * FROM t1 ASOF JOIN t2 ON t1.a = t2.a AND t1.b > t2.b ORDER BY t1.b; +SELECT * FROM t1 ASOF JOIN t2 ON t1.a = t2.a AND t1.b >= t2.b ORDER BY t1.b; + +SELECT * FROM t1 ASOF LEFT JOIN t2 ON t1.a = t2.a AND t1.b < t2.b ORDER BY t1.b; +SELECT * FROM t1 ASOF LEFT JOIN t2 ON t1.a = t2.a AND t1.b <= t2.b ORDER BY t1.b; +SELECT * FROM t1 ASOF LEFT JOIN t2 ON t1.a = t2.a AND t1.b > t2.b ORDER BY t1.b; +SELECT * FROM t1 ASOF LEFT JOIN t2 ON t1.a = t2.a AND t1.b >= t2.b ORDER BY t1.b; + +SET join_use_nulls = 1; + +SELECT * FROM t1 ASOF JOIN t2 ON t1.a = t2.a AND t1.b < t2.b ORDER BY t1.b; +SELECT * FROM t1 ASOF JOIN t2 ON t1.a = t2.a AND t1.b <= t2.b ORDER BY t1.b; +SELECT * FROM t1 ASOF JOIN t2 ON t1.a = t2.a AND t1.b > t2.b ORDER BY t1.b; +SELECT * FROM t1 ASOF JOIN t2 ON t1.a = t2.a AND t1.b >= t2.b ORDER BY t1.b; + +SELECT * FROM t1 ASOF LEFT JOIN t2 ON t1.a = t2.a AND t1.b < t2.b ORDER BY t1.b; +SELECT * FROM t1 ASOF LEFT JOIN t2 ON t1.a = t2.a AND t1.b <= t2.b ORDER BY t1.b; +SELECT * FROM t1 ASOF LEFT JOIN t2 ON t1.a = t2.a AND t1.b > t2.b ORDER BY t1.b; +SELECT * FROM t1 ASOF LEFT JOIN t2 ON t1.a = t2.a AND t1.b >= t2.b ORDER BY t1.b; + +DROP TABLE t1; + diff --git a/parser/testdata/02735_parquet_encoder/ast.json b/parser/testdata/02735_parquet_encoder/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02735_parquet_encoder/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02735_parquet_encoder/metadata.json b/parser/testdata/02735_parquet_encoder/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02735_parquet_encoder/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02735_parquet_encoder/query.sql b/parser/testdata/02735_parquet_encoder/query.sql new file mode 100644 index 000000000..45498cbc1 --- /dev/null +++ b/parser/testdata/02735_parquet_encoder/query.sql @@ -0,0 +1,197 @@ +-- Tags: long, no-fasttest, no-parallel, no-tsan, no-msan, no-asan + +set output_format_parquet_use_custom_encoder = 1; +set output_format_parquet_row_group_size = 1000; +set output_format_parquet_data_page_size = 800; +set output_format_parquet_batch_size = 100; +set output_format_parquet_row_group_size_bytes = 1000000000; +set engine_file_truncate_on_insert = 1; +set allow_suspicious_low_cardinality_types = 1; +set output_format_parquet_enum_as_byte_array=0; + +-- Write random data to parquet file, then read from it and check that it matches what we wrote. +-- Do this for all kinds of data types: primitive, Nullable(primitive), Array(primitive), +-- Array(Nullable(primitive)), Array(Array(primitive)), Map(primitive, primitive), etc. + +drop table if exists basic_types_02735; +create temporary table basic_types_02735 as select * from generateRandom(' + u8 UInt8, + u16 UInt16, + u32 UInt32, + u64 UInt64, + i8 Int8, + i16 Int16, + i32 Int32, + i64 Int64, + date Date, + date32 Date32, + datetime DateTime, + datetime64 DateTime64, + enum8 Enum8(''x'' = 1, ''y'' = 2, ''z'' = 3), + enum16 Enum16(''xx'' = 1000, ''yy'' = 2000, ''zz'' = 3000), + float32 Float32, + float64 Float64, + str String, + fstr FixedString(12), + u128 UInt128, + u256 UInt256, + i128 Int128, + i256 Int256, + decimal32 Decimal32(3), + decimal64 Decimal64(10), + decimal128 Decimal128(20), + decimal256 Decimal256(40), + ipv4 IPv4, + ipv6 IPv6') limit 1011; +insert into function file(basic_types_02735.parquet) select * from basic_types_02735 settings output_format_parquet_datetime_as_uint32 = 1; +desc file(basic_types_02735.parquet); +select (select sum(cityHash64(*)) from basic_types_02735) - (select sum(cityHash64(*)) from file(basic_types_02735.parquet)); +drop table basic_types_02735; + +-- DateTime values don't roundtrip (without output_format_parquet_datetime_as_uint32) because we +-- write them as DateTime64(3) (the closest type supported by Parquet). +drop table if exists datetime_02735; +create temporary table datetime_02735 as select * from generateRandom('datetime DateTime') limit 1011; +insert into function file(datetime_02735.parquet) select * from datetime_02735; +desc file(datetime_02735.parquet); +select (select sum(cityHash64(toDateTime64(datetime, 3))) from datetime_02735) - (select sum(cityHash64(*)) from file(datetime_02735.parquet)); +select (select sum(cityHash64(*)) from datetime_02735) - (select sum(cityHash64(*)) from file(datetime_02735.parquet, Parquet, 'datetime DateTime')); +drop table datetime_02735; + +drop table if exists nullables_02735; +create temporary table nullables_02735 as select * from generateRandom(' + u16 Nullable(UInt16), + i64 Nullable(Int64), + datetime64 Nullable(DateTime64), + enum8 Nullable(Enum8(''x'' = 1, ''y'' = 2, ''z'' = 3)), + float64 Nullable(Float64), + str Nullable(String), + fstr Nullable(FixedString(12)), + i256 Nullable(Int256), + decimal256 Nullable(Decimal256(40)), + ipv6 Nullable(IPv6)') limit 1000; +insert into function file(nullables_02735.parquet) select * from nullables_02735; +select (select sum(cityHash64(*)) from nullables_02735) - (select sum(cityHash64(*)) from file(nullables_02735.parquet)); +drop table nullables_02735; + + +-- TODO: When cityHash64() fully supports Nullable: https://github.com/ClickHouse/ClickHouse/pull/58754 +-- the next two blocks can be simplified: arrays_out_02735 intermediate table is not needed, +-- a.csv and b.csv are not needed. + +drop table if exists arrays_02735; +drop table if exists arrays_out_02735; +create table arrays_02735 engine = Memory as select * from generateRandom(' + u32 Array(UInt32), + i8 Array(Int8), + datetime Array(DateTime), + enum16 Array(Enum16(''xx'' = 1000, ''yy'' = 2000, ''zz'' = 3000)), + float32 Array(Float32), + str Array(String), + fstr Array(FixedString(12)), + u128 Array(UInt128), + decimal64 Array(Decimal64(10)), + ipv4 Array(IPv4), + msi Map(String, Int16), + tup Tuple(FixedString(3), Array(String), Map(Int8, Date))') limit 1000; +insert into function file(arrays_02735.parquet) select * from arrays_02735; +create temporary table arrays_out_02735 as arrays_02735; +insert into arrays_out_02735 select * from file(arrays_02735.parquet); +select (select sum(cityHash64(*)) from arrays_02735) - (select sum(cityHash64(*)) from arrays_out_02735); +--select (select sum(cityHash64(*)) from arrays_02735) - +-- (select sum(cityHash64(u32, i8, datetime, enum16, float32, str, fstr, arrayMap(x->reinterpret(x, 'UInt128'), u128), decimal64, ipv4, msi, tup)) from file(arrays_02735.parquet)); +drop table arrays_02735; +drop table arrays_out_02735; + + +drop table if exists madness_02735; +create temporary table madness_02735 as select * from generateRandom(' + aa Array(Array(UInt32)), + aaa Array(Array(Array(UInt32))), + an Array(Nullable(String)), + aan Array(Array(Nullable(FixedString(10)))), + l LowCardinality(String), + ln LowCardinality(Nullable(FixedString(11))), + al Array(LowCardinality(UInt128)), + aaln Array(Array(LowCardinality(Nullable(String)))), + mln Map(LowCardinality(String), Nullable(Int8)), + t Tuple(Map(FixedString(5), Tuple(Array(UInt16), Nullable(UInt16), Array(Tuple(Int8, Decimal64(10))))), Tuple(kitchen UInt64, sink String)), + n Nested(hello UInt64, world Tuple(first String, second FixedString(1))) + ') limit 1000; +insert into function file(madness_02735.parquet) select * from madness_02735; +insert into function file(a.csv) select * from madness_02735 order by tuple(*); +insert into function file(b.csv) select aa, aaa, an, aan, l, ln, arrayMap(x->reinterpret(x, 'UInt128'), al) as al_, aaln, mln, t, n.hello, n.world from file(madness_02735.parquet) order by tuple(aa, aaa, an, aan, l, ln, al_, aaln, mln, t, n.hello, n.world); +select (select sum(cityHash64(*)) from file(a.csv, LineAsString)) - (select sum(cityHash64(*)) from file(b.csv, LineAsString)); +--select (select sum(cityHash64(*)) from madness_02735) - +-- (select sum(cityHash64(aa, aaa, an, aan, l, ln, map(x->reinterpret(x, 'UInt128'), al), aaln, mln, t, n.hello, n.world)) from file(madness_02735.parquet)); +drop table madness_02735; + + +-- Merging input blocks into bigger row groups. +insert into function file(squash_02735.parquet) select '012345' union all select '543210' settings max_block_size = 1; +select num_columns, num_rows, num_row_groups from file(squash_02735.parquet, ParquetMetadata); + +-- Row group size limit in bytes. +insert into function file(row_group_bytes_02735.parquet) select '012345' union all select '543210' settings max_block_size = 1, output_format_parquet_row_group_size_bytes = 5; +select num_columns, num_rows, num_row_groups from file(row_group_bytes_02735.parquet, ParquetMetadata); + +-- Row group size limit in rows. +insert into function file(tiny_row_groups_02735.parquet) select * from numbers(3) settings output_format_parquet_row_group_size = 1; +select num_columns, num_rows, num_row_groups from file(tiny_row_groups_02735.parquet, ParquetMetadata); + +-- 1M unique 8-byte values should exceed dictionary_size_limit (1 MB). +insert into function file(big_column_chunk_02735.parquet) select number from numbers(1000000) settings output_format_parquet_row_group_size = 1000000; +select num_columns, num_rows, num_row_groups from file(big_column_chunk_02735.parquet, ParquetMetadata); +select sum(cityHash64(number)) from file(big_column_chunk_02735.parquet); + +-- Check statistics: signed vs unsigned, null count. Use enough rows to produce multiple pages. +insert into function file(statistics_02735.parquet) select 100 + number%200 as a, toUInt32(number * 3000) as u, toInt32(number * 3000) as i, if(number % 10 == 9, toString(number), null) as s from numbers(1000000) settings output_format_parquet_row_group_size = 1000000; +select num_columns, num_rows, num_row_groups from file(statistics_02735.parquet, ParquetMetadata); +select tupleElement(c, 'statistics') from file(statistics_02735.parquet, ParquetMetadata) array join tupleElement(row_groups[1], 'columns') as c; + +-- Statistics string length limit (max_statistics_size). +insert into function file(long_string_02735.parquet) select toString(range(number * 2000)) from numbers(2); +select tupleElement(tupleElement(row_groups[1], 'columns'), 'statistics') from file(long_string_02735.parquet, ParquetMetadata); + +-- Compression setting. +insert into function file(compressed_02735.parquet) select concat('aaaaaaaaaaaaaaaa', toString(number)) as s from numbers(1000) settings output_format_parquet_row_group_size = 10000, output_format_parquet_compression_method='zstd'; +select total_compressed_size < 10000, total_uncompressed_size > 15000 from file(compressed_02735.parquet, ParquetMetadata); +insert into function file(compressed_02735.parquet) select concat('aaaaaaaaaaaaaaaa', toString(number)) as s from numbers(1000) settings output_format_parquet_row_group_size = 10000, output_format_parquet_compression_method='none'; +select total_compressed_size < 10000, total_uncompressed_size > 15000 from file(compressed_02735.parquet, ParquetMetadata); +insert into function file(compressed_02735.parquet) select if(number%3==1, NULL, 42) as x from numbers(70) settings output_format_parquet_compression_method='zstd'; +select sum(cityHash64(*)) from file(compressed_02735.parquet); + +-- Single-threaded encoding and Arrow encoder. +drop table if exists other_encoders_02735; +create temporary table other_encoders_02735 as select number, number*2 from numbers(10000); +insert into function file(single_thread_02735.parquet) select * from other_encoders_02735 settings max_threads = 1; +select sum(cityHash64(*)) from file(single_thread_02735.parquet); +insert into function file(arrow_02735.parquet) select * from other_encoders_02735 settings output_format_parquet_use_custom_encoder = 0; +select sum(cityHash64(*)) from file(arrow_02735.parquet); + +-- String -> binary vs string; FixedString -> fixed-length-binary vs binary vs string. +insert into function file(strings1_02735.parquet) select 'never', toFixedString('gonna', 5) settings output_format_parquet_string_as_string = 1, output_format_parquet_fixed_string_as_fixed_byte_array = 1; +select columns.5, columns.6 from file(strings1_02735.parquet, ParquetMetadata) array join columns; +insert into function file(strings2_02735.parquet) select 'give', toFixedString('you', 3) settings output_format_parquet_string_as_string = 0, output_format_parquet_fixed_string_as_fixed_byte_array = 0; +select columns.5, columns.6 from file(strings2_02735.parquet, ParquetMetadata) array join columns; +insert into function file(strings3_02735.parquet) select toFixedString('up', 2) settings output_format_parquet_string_as_string = 1, output_format_parquet_fixed_string_as_fixed_byte_array = 0; +select columns.5, columns.6 from file(strings3_02735.parquet, ParquetMetadata) array join columns; +select * from file(strings1_02735.parquet); +select * from file(strings2_02735.parquet); +select * from file(strings3_02735.parquet); + +-- DateTime64 with different units. +insert into function file(datetime64_02735.parquet) select + toDateTime64(number / 1e3, 3) as ms, + toDateTime64(number / 1e6, 6) as us, + toDateTime64(number / 1e9, 9) as ns, + toDateTime64(number / 1e2, 2) as cs, + toDateTime64(number, 0) as s, + toDateTime64(number / 1e7, 7) as dus + from numbers(2000); +desc file(datetime64_02735.parquet); +select sum(cityHash64(*)) from file(datetime64_02735.parquet); + +insert into function file(date_as_uint16.parquet) select toDate('2025-08-12') as d settings output_format_parquet_date_as_uint16 = 1; +select * from file(date_as_uint16.parquet); +desc file(date_as_uint16.parquet); diff --git a/parser/testdata/02735_system_zookeeper_auxiliary/ast.json b/parser/testdata/02735_system_zookeeper_auxiliary/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02735_system_zookeeper_auxiliary/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02735_system_zookeeper_auxiliary/metadata.json b/parser/testdata/02735_system_zookeeper_auxiliary/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02735_system_zookeeper_auxiliary/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02735_system_zookeeper_auxiliary/query.sql b/parser/testdata/02735_system_zookeeper_auxiliary/query.sql new file mode 100644 index 000000000..fee2d6016 --- /dev/null +++ b/parser/testdata/02735_system_zookeeper_auxiliary/query.sql @@ -0,0 +1,20 @@ +-- Tags: no-fasttest, no-replicated-database, no-shared-merge-tree +-- no-shared-merge-tree -- smt doesn't support aux zookeepers + +DROP TABLE IF EXISTS test_system_zookeeper_auxiliary; + +CREATE TABLE test_system_zookeeper_auxiliary ( + key UInt64 +) +ENGINE ReplicatedMergeTree('zookeeper2:/clickhouse/{database}/02731_test_system_zookeeper_auxiliary/{shard}', '{replica}') +ORDER BY tuple(); + +SELECT DISTINCT zookeeperName FROM system.zookeeper WHERE path = '/' AND zookeeperName = 'default'; +SELECT DISTINCT zookeeperName FROM system.zookeeper WHERE path = '/' AND zookeeperName = 'zookeeper2'; + +SELECT count() FROM system.zookeeper WHERE path IN '/' AND zookeeperName = 'zookeeper3'; -- { serverError BAD_ARGUMENTS } + +SELECT count() = 0 FROM system.zookeeper WHERE path IN '/' AND zookeeperName = 'default' AND zookeeperName = 'zookeeper2'; +SELECT count() > 0 FROM system.zookeeper WHERE path IN '/' AND zookeeperName = 'zookeeper2' AND zookeeperName = 'zookeeper2'; + +DROP TABLE IF EXISTS test_system_zookeeper_auxiliary; diff --git a/parser/testdata/02735_system_zookeeper_connection/ast.json b/parser/testdata/02735_system_zookeeper_connection/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02735_system_zookeeper_connection/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02735_system_zookeeper_connection/metadata.json b/parser/testdata/02735_system_zookeeper_connection/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02735_system_zookeeper_connection/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02735_system_zookeeper_connection/query.sql b/parser/testdata/02735_system_zookeeper_connection/query.sql new file mode 100644 index 000000000..2ea40eddd --- /dev/null +++ b/parser/testdata/02735_system_zookeeper_connection/query.sql @@ -0,0 +1,25 @@ +-- Tags: no-fasttest, no-replicated-database, no-shared-merge-tree +-- no-shared-merge-tree -- smt doesn't support aux zookeepers + +DROP TABLE IF EXISTS test_zk_connection_table; + +CREATE TABLE test_zk_connection_table ( + key UInt64 +) +ENGINE ReplicatedMergeTree('zookeeper2:/clickhouse/{database}/02731_zk_connection/{shard}', '{replica}') +ORDER BY tuple(); + +SET session_timezone = 'UTC'; + +-- NOTE: Durind the query execution, now() can be evaluated a bit earlier than connected_time +select name, host, port, index, is_expired, keeper_api_version, (connected_time between yesterday() and now() + interval 3 seconds), + (abs(session_uptime_elapsed_seconds - zookeeperSessionUptime()) < 10), enabled_feature_flags +from system.zookeeper_connection where name='default'; + +-- keeper_api_version will by 0 for auxiliary_zookeeper2, because we fail to get /api_version due to chroot +-- I'm not sure if it's a bug or a useful trick to fallback to basic api +-- Also, auxiliary zookeeper is created lazily +select name, host, port, index, is_expired, keeper_api_version, (connected_time between yesterday() and now() + interval 3 seconds) +from system.zookeeper_connection where name!='default'; + +DROP TABLE IF EXISTS test_zk_connection_table; diff --git a/parser/testdata/02736_bit_count_big_int/ast.json b/parser/testdata/02736_bit_count_big_int/ast.json new file mode 100644 index 000000000..c9f80c097 --- /dev/null +++ b/parser/testdata/02736_bit_count_big_int/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function bitCount (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Int64_-1" + }, + { + "explain": " Literal 'UInt128'" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001070317, + "rows_read": 10, + "bytes_read": 378 + } +} diff --git a/parser/testdata/02736_bit_count_big_int/metadata.json b/parser/testdata/02736_bit_count_big_int/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02736_bit_count_big_int/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02736_bit_count_big_int/query.sql b/parser/testdata/02736_bit_count_big_int/query.sql new file mode 100644 index 000000000..35a4a6416 --- /dev/null +++ b/parser/testdata/02736_bit_count_big_int/query.sql @@ -0,0 +1,19 @@ +SELECT bitCount(CAST(-1 AS UInt128)); +SELECT bitCount(CAST(-1 AS UInt256)); + +SELECT bitCount(CAST(-1 AS Int128)); +SELECT bitCount(CAST(-1 AS Int256)); + +SELECT bitCount(CAST(-1 AS UInt128) - 1); +SELECT bitCount(CAST(-1 AS UInt256) - 2); + +SELECT bitCount(CAST(-1 AS Int128) - 3); +SELECT bitCount(CAST(-1 AS Int256) - 4); + +SELECT bitCount(CAST(0xFFFFFFFFFFFFFFFF AS Int256)); + +SELECT toTypeName(bitCount(1::UInt128)); +SELECT toTypeName(bitCount(1::UInt256)); + +SELECT toTypeName(bitCount(1::Int128)); +SELECT toTypeName(bitCount(1::Int256)); diff --git a/parser/testdata/02737_arrayJaccardIndex/ast.json b/parser/testdata/02737_arrayJaccardIndex/ast.json new file mode 100644 index 000000000..9750939b9 --- /dev/null +++ b/parser/testdata/02737_arrayJaccardIndex/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'negative tests'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001248661, + "rows_read": 5, + "bytes_read": 185 + } +} diff --git a/parser/testdata/02737_arrayJaccardIndex/metadata.json b/parser/testdata/02737_arrayJaccardIndex/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02737_arrayJaccardIndex/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02737_arrayJaccardIndex/query.sql b/parser/testdata/02737_arrayJaccardIndex/query.sql new file mode 100644 index 000000000..499debd94 --- /dev/null +++ b/parser/testdata/02737_arrayJaccardIndex/query.sql @@ -0,0 +1,30 @@ +SELECT 'negative tests'; + +SELECT 'a' AS arr1, 2 AS arr2, round(arrayJaccardIndex(arr1, arr2), 2); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT [] AS arr1, [] AS arr2, round(arrayJaccardIndex(arr1, arr2), 2); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT ['1', '2'] AS arr1, [1,2] AS arr2, round(arrayJaccardIndex(arr1, arr2), 2); -- { serverError NO_COMMON_TYPE } + +SELECT 'const arguments'; + +SELECT [1,2] AS arr1, [1,2,3,4] AS arr2, round(arrayJaccardIndex(arr1, arr2), 2); +SELECT [1, 1.1, 2.2] AS arr1, [2.2, 3.3, 444] AS arr2, round(arrayJaccardIndex(arr1, arr2), 2); +SELECT [toUInt16(1)] AS arr1, [toUInt32(1)] AS arr2, round(arrayJaccardIndex(arr1, arr2), 2); +SELECT ['a'] AS arr1, ['a', 'aa', 'aaa'] AS arr2, round(arrayJaccardIndex(arr1, arr2), 2); +SELECT [[1,2], [3,4]] AS arr1, [[1,2], [3,5]] AS arr2, round(arrayJaccardIndex(arr1, arr2), 2); + +SELECT 'non-const arguments'; + +DROP TABLE IF EXISTS array_jaccard_index; + +CREATE TABLE array_jaccard_index (arr Array(UInt8)) engine = MergeTree ORDER BY arr; +INSERT INTO array_jaccard_index values ([1,2,3]); +INSERT INTO array_jaccard_index values ([1,2]); +INSERT INTO array_jaccard_index values ([1]); + +SELECT arr, [1,2] AS other, round(arrayJaccardIndex(arr, other), 2) FROM array_jaccard_index ORDER BY arr; +SELECT arr, [] AS other, round(arrayJaccardIndex(arr, other), 2) FROM array_jaccard_index ORDER BY arr; +SELECT [1,2] AS other, arr, round(arrayJaccardIndex(other, arr), 2) FROM array_jaccard_index ORDER BY arr; +SELECT [] AS other, arr, round(arrayJaccardIndex(other, arr), 2) FROM array_jaccard_index ORDER BY arr; +SELECT arr, arr, round(arrayJaccardIndex(arr, arr), 2) FROM array_jaccard_index ORDER BY arr; + +DROP TABLE array_jaccard_index; diff --git a/parser/testdata/02737_session_timezone/ast.json b/parser/testdata/02737_session_timezone/ast.json new file mode 100644 index 000000000..53cfb60c7 --- /dev/null +++ b/parser/testdata/02737_session_timezone/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00108908, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02737_session_timezone/metadata.json b/parser/testdata/02737_session_timezone/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02737_session_timezone/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02737_session_timezone/query.sql b/parser/testdata/02737_session_timezone/query.sql new file mode 100644 index 000000000..1afadbde6 --- /dev/null +++ b/parser/testdata/02737_session_timezone/query.sql @@ -0,0 +1,29 @@ +SET session_timezone = 'Абырвалг'; -- { serverError BAD_ARGUMENTS} + +SELECT timezone(), timezoneOf(now()) SETTINGS session_timezone = 'Pacific/Pitcairn'; + +SET session_timezone = 'Asia/Novosibirsk'; +SELECT timezone(), timezoneOf(now()); + +-- test simple queries +SELECT toDateTime(toDateTime('2022-12-12 23:23:23'), 'Europe/Zurich'); +SELECT toDateTime64(toDateTime64('2022-12-12 23:23:23.123', 3), 3, 'Europe/Zurich') SETTINGS session_timezone = 'America/Denver'; + +-- subquery shall use main query's session_timezone +SELECT toDateTime(toDateTime('2022-12-12 23:23:23'), 'Europe/Zurich'), (SELECT toDateTime(toDateTime('2022-12-12 23:23:23'), 'Europe/Zurich') SETTINGS session_timezone = 'Europe/Helsinki') SETTINGS session_timezone = 'America/Denver'; + +-- test proper serialization +SELECT toDateTime('2002-12-12 23:23:23') AS dt, toString(dt) SETTINGS session_timezone = 'Asia/Phnom_Penh'; +SELECT toDateTime64('2002-12-12 23:23:23.123', 3) AS dt64, toString(dt64) SETTINGS session_timezone = 'Asia/Phnom_Penh'; + +-- Create a table and test that DateTimes are processed correctly on insert +CREATE TABLE test_tz_setting (d DateTime('UTC')) Engine=Memory AS SELECT toDateTime('2000-01-01 00:00:00'); +INSERT INTO test_tz_setting VALUES ('2000-01-01 01:00:00'); -- this is parsed using timezone from `d` column +INSERT INTO test_tz_setting VALUES (toDateTime('2000-01-02 02:00:00')); -- this is parsed using `session_timezone` + +-- Test parsing in WHERE filter, shall have the same logic as insert +SELECT d FROM test_tz_setting WHERE d == '2000-01-01 01:00:00'; -- 1 row expected +SELECT d FROM test_tz_setting WHERE d == toDateTime('2000-01-01 02:00:00'); -- 0 rows expected + +-- Cleanup table +DROP TABLE test_tz_setting SYNC; diff --git a/parser/testdata/02737_sql_auto_is_null/ast.json b/parser/testdata/02737_sql_auto_is_null/ast.json new file mode 100644 index 000000000..6b6eec47e --- /dev/null +++ b/parser/testdata/02737_sql_auto_is_null/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001076141, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02737_sql_auto_is_null/metadata.json b/parser/testdata/02737_sql_auto_is_null/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02737_sql_auto_is_null/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02737_sql_auto_is_null/query.sql b/parser/testdata/02737_sql_auto_is_null/query.sql new file mode 100644 index 000000000..22f1a9524 --- /dev/null +++ b/parser/testdata/02737_sql_auto_is_null/query.sql @@ -0,0 +1,2 @@ +SET SQL_AUTO_IS_NULL = 0; +SELECT getSetting('SQL_AUTO_IS_NULL'); diff --git a/parser/testdata/02740_hashed_dictionary_load_factor_smoke/ast.json b/parser/testdata/02740_hashed_dictionary_load_factor_smoke/ast.json new file mode 100644 index 000000000..4eda06113 --- /dev/null +++ b/parser/testdata/02740_hashed_dictionary_load_factor_smoke/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_table (children 1)" + }, + { + "explain": " Identifier test_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001348464, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/02740_hashed_dictionary_load_factor_smoke/metadata.json b/parser/testdata/02740_hashed_dictionary_load_factor_smoke/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02740_hashed_dictionary_load_factor_smoke/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02740_hashed_dictionary_load_factor_smoke/query.sql b/parser/testdata/02740_hashed_dictionary_load_factor_smoke/query.sql new file mode 100644 index 000000000..d4bb9a1b1 --- /dev/null +++ b/parser/testdata/02740_hashed_dictionary_load_factor_smoke/query.sql @@ -0,0 +1,107 @@ +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + key UInt64, + value UInt16 +) ENGINE=Memory() AS SELECT number, number FROM numbers(1e5); + +DROP TABLE IF EXISTS test_table_nullable; +CREATE TABLE test_table_nullable +( + key UInt64, + value Nullable(UInt16) +) ENGINE=Memory() AS SELECT number, number % 2 == 0 ? NULL : number FROM numbers(1e5); + +DROP TABLE IF EXISTS test_table_string; +CREATE TABLE test_table_string +( + key String, + value UInt16 +) ENGINE=Memory() AS SELECT 'foo' || number::String, number FROM numbers(1e5); + +DROP TABLE IF EXISTS test_table_complex; +CREATE TABLE test_table_complex +( + key_1 UInt64, + key_2 UInt64, + value UInt16 +) ENGINE=Memory() AS SELECT number, number, number FROM numbers(1e5); + +DROP DICTIONARY IF EXISTS test_sparse_dictionary_load_factor; +CREATE DICTIONARY test_sparse_dictionary_load_factor +( + key UInt64, + value UInt16 +) PRIMARY KEY key +SOURCE(CLICKHOUSE(TABLE test_table)) +LAYOUT(SPARSE_HASHED(MAX_LOAD_FACTOR 0.90)) +LIFETIME(0); +SHOW CREATE test_sparse_dictionary_load_factor; +SYSTEM RELOAD DICTIONARY test_sparse_dictionary_load_factor; +SELECT element_count FROM system.dictionaries WHERE database = currentDatabase() AND name = 'test_sparse_dictionary_load_factor'; +SELECT count() FROM test_table WHERE dictGet('test_sparse_dictionary_load_factor', 'value', key) != value; +DROP DICTIONARY test_sparse_dictionary_load_factor; + +DROP DICTIONARY IF EXISTS test_dictionary_load_factor; +CREATE DICTIONARY test_dictionary_load_factor +( + key UInt64, + value UInt16 +) PRIMARY KEY key +SOURCE(CLICKHOUSE(TABLE test_table)) +LAYOUT(HASHED(MAX_LOAD_FACTOR 0.90)) +LIFETIME(0); +SHOW CREATE test_dictionary_load_factor; +SYSTEM RELOAD DICTIONARY test_dictionary_load_factor; +SELECT element_count FROM system.dictionaries WHERE database = currentDatabase() AND name = 'test_dictionary_load_factor'; +SELECT count() FROM test_table WHERE dictGet('test_dictionary_load_factor', 'value', key) != value; +DROP DICTIONARY test_dictionary_load_factor; + +DROP DICTIONARY IF EXISTS test_dictionary_load_factor_nullable; +CREATE DICTIONARY test_dictionary_load_factor_nullable +( + key UInt64, + value Nullable(UInt16) +) PRIMARY KEY key +SOURCE(CLICKHOUSE(TABLE test_table_nullable)) +LAYOUT(HASHED(MAX_LOAD_FACTOR 0.90)) +LIFETIME(0); +SHOW CREATE test_dictionary_load_factor_nullable; +SYSTEM RELOAD DICTIONARY test_dictionary_load_factor_nullable; +SELECT element_count FROM system.dictionaries WHERE database = currentDatabase() AND name = 'test_dictionary_load_factor_nullable'; +SELECT count() FROM test_table_nullable WHERE dictGet('test_dictionary_load_factor_nullable', 'value', key) != value; +DROP DICTIONARY test_dictionary_load_factor_nullable; + +DROP DICTIONARY IF EXISTS test_complex_dictionary_load_factor; +CREATE DICTIONARY test_complex_dictionary_load_factor +( + key_1 UInt64, + key_2 UInt64, + value UInt16 +) PRIMARY KEY key_1, key_2 +SOURCE(CLICKHOUSE(TABLE test_table_complex)) +LAYOUT(COMPLEX_KEY_HASHED(MAX_LOAD_FACTOR 0.90)) +LIFETIME(0); +SYSTEM RELOAD DICTIONARY test_complex_dictionary_load_factor; +SHOW CREATE test_complex_dictionary_load_factor; +SELECT element_count FROM system.dictionaries WHERE database = currentDatabase() and name = 'test_complex_dictionary_load_factor'; +SELECT count() FROM test_table_complex WHERE dictGet('test_complex_dictionary_load_factor', 'value', (key_1, key_2)) != value; +DROP DICTIONARY test_complex_dictionary_load_factor; + +DROP DICTIONARY IF EXISTS test_dictionary_load_factor_string; +CREATE DICTIONARY test_dictionary_load_factor_string +( + key String, + value UInt16 +) PRIMARY KEY key +SOURCE(CLICKHOUSE(TABLE test_table_string)) +LAYOUT(HASHED(MAX_LOAD_FACTOR 1)) +LIFETIME(0); +-- should because of MAX_LOAD_FACTOR is 1 (maximum allowed value is 0.99) +SYSTEM RELOAD DICTIONARY test_dictionary_load_factor_string; -- { serverError BAD_ARGUMENTS } +DROP DICTIONARY test_dictionary_load_factor_string; + +DROP TABLE test_table; +DROP TABLE test_table_nullable; +DROP TABLE test_table_string; +DROP TABLE test_table_complex; diff --git a/parser/testdata/02746_index_analysis_binary_operator_with_null/ast.json b/parser/testdata/02746_index_analysis_binary_operator_with_null/ast.json new file mode 100644 index 000000000..4df8fb29a --- /dev/null +++ b/parser/testdata/02746_index_analysis_binary_operator_with_null/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tab (children 1)" + }, + { + "explain": " Identifier tab" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001396623, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/02746_index_analysis_binary_operator_with_null/metadata.json b/parser/testdata/02746_index_analysis_binary_operator_with_null/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02746_index_analysis_binary_operator_with_null/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02746_index_analysis_binary_operator_with_null/query.sql b/parser/testdata/02746_index_analysis_binary_operator_with_null/query.sql new file mode 100644 index 000000000..f9613735b --- /dev/null +++ b/parser/testdata/02746_index_analysis_binary_operator_with_null/query.sql @@ -0,0 +1,12 @@ +drop table if exists tab; + +create table tab (x DateTime) engine MergeTree order by x; + +SELECT toDateTime(65537, toDateTime(NULL), NULL) +FROM tab +WHERE ((x + CAST('1', 'Nullable(UInt8)')) <= 2) AND ((x + CAST('', 'Nullable(UInt8)')) <= 256) +ORDER BY + toDateTime(toDateTime(-2, NULL, NULL) + 100.0001, NULL, -2, NULL) DESC NULLS LAST, + x ASC NULLS LAST; + +drop table tab; diff --git a/parser/testdata/02751_match_constant_needle/ast.json b/parser/testdata/02751_match_constant_needle/ast.json new file mode 100644 index 000000000..dff1aa6e2 --- /dev/null +++ b/parser/testdata/02751_match_constant_needle/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function match (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'default\/k8s1'" + }, + { + "explain": " Literal '\\\\A(?:(?:[-0-9_a-z]+(?:\\\\.[-0-9_a-z]+)*)\/k8s1)\\\\z'" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001732223, + "rows_read": 8, + "bytes_read": 337 + } +} diff --git a/parser/testdata/02751_match_constant_needle/metadata.json b/parser/testdata/02751_match_constant_needle/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02751_match_constant_needle/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02751_match_constant_needle/query.sql b/parser/testdata/02751_match_constant_needle/query.sql new file mode 100644 index 000000000..9980c3760 --- /dev/null +++ b/parser/testdata/02751_match_constant_needle/query.sql @@ -0,0 +1,2 @@ +select match('default/k8s1', '\\A(?:(?:[-0-9_a-z]+(?:\\.[-0-9_a-z]+)*)/k8s1)\\z'); +select match('abc123', '[a-zA-Z]+(?P<num>\\d+)'); diff --git a/parser/testdata/02751_multiif_to_if_crash/ast.json b/parser/testdata/02751_multiif_to_if_crash/ast.json new file mode 100644 index 000000000..40987142a --- /dev/null +++ b/parser/testdata/02751_multiif_to_if_crash/ast.json @@ -0,0 +1,82 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function sum (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier A" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function multiIf (alias A) (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal NULL" + } + ], + + "rows": 20, + + "statistics": + { + "elapsed": 0.001538583, + "rows_read": 20, + "bytes_read": 811 + } +} diff --git a/parser/testdata/02751_multiif_to_if_crash/metadata.json b/parser/testdata/02751_multiif_to_if_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02751_multiif_to_if_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02751_multiif_to_if_crash/query.sql b/parser/testdata/02751_multiif_to_if_crash/query.sql new file mode 100644 index 000000000..6b9c221c9 --- /dev/null +++ b/parser/testdata/02751_multiif_to_if_crash/query.sql @@ -0,0 +1,3 @@ +SELECT sum(A) FROM (SELECT multiIf(1, 1, NULL) as A); +SELECT sum(multiIf(number = NULL, 65536, 3)) FROM numbers(3); +SELECT multiIf(NULL, 65536 :: UInt32, 3 :: Int32); diff --git a/parser/testdata/02751_parallel_replicas_bug_chunkinfo_not_set/ast.json b/parser/testdata/02751_parallel_replicas_bug_chunkinfo_not_set/ast.json new file mode 100644 index 000000000..30a70b28e --- /dev/null +++ b/parser/testdata/02751_parallel_replicas_bug_chunkinfo_not_set/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery join_inner_table__fuzz_1 (children 1)" + }, + { + "explain": " Identifier join_inner_table__fuzz_1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001478712, + "rows_read": 2, + "bytes_read": 101 + } +} diff --git a/parser/testdata/02751_parallel_replicas_bug_chunkinfo_not_set/metadata.json b/parser/testdata/02751_parallel_replicas_bug_chunkinfo_not_set/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02751_parallel_replicas_bug_chunkinfo_not_set/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02751_parallel_replicas_bug_chunkinfo_not_set/query.sql b/parser/testdata/02751_parallel_replicas_bug_chunkinfo_not_set/query.sql new file mode 100644 index 000000000..ca22f324f --- /dev/null +++ b/parser/testdata/02751_parallel_replicas_bug_chunkinfo_not_set/query.sql @@ -0,0 +1,43 @@ +CREATE TABLE join_inner_table__fuzz_1 +( + `id` UUID, + `key` Nullable(Date), + `number` Int64, + `value1` LowCardinality(String), + `value2` LowCardinality(String), + `time` Int128 +) +ENGINE = MergeTree +ORDER BY (id, number, key) +SETTINGS allow_nullable_key = 1; + +INSERT INTO join_inner_table__fuzz_1 SELECT + CAST('833c9e22-c245-4eb5-8745-117a9a1f26b1', 'UUID') AS id, + CAST(rowNumberInAllBlocks(), 'String') AS key, + * +FROM generateRandom('number Int64, value1 String, value2 String, time Int64', 1, 10, 2) +LIMIT 100; + +SET max_parallel_replicas = 3, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_parallel_replicas = 1, parallel_replicas_for_non_replicated_merge_tree=1; + +-- SELECT query will write a Warning to the logs +SET send_logs_level='error'; + +SELECT + key, + value1, + value2, + toUInt64(min(time)) AS start_ts +FROM join_inner_table__fuzz_1 +PREWHERE (id = '833c9e22-c245-4eb5-8745-117a9a1f26b1') AND (number > toUInt64('1610517366120')) +GROUP BY + key, + value1, + value2 + WITH ROLLUP +ORDER BY + key ASC, + value1 ASC, + value2 ASC NULLS LAST +LIMIT 10 +FORMAT Null; diff --git a/parser/testdata/02751_query_log_test_partitions/ast.json b/parser/testdata/02751_query_log_test_partitions/ast.json new file mode 100644 index 000000000..ef61a63bf --- /dev/null +++ b/parser/testdata/02751_query_log_test_partitions/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001160658, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02751_query_log_test_partitions/metadata.json b/parser/testdata/02751_query_log_test_partitions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02751_query_log_test_partitions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02751_query_log_test_partitions/query.sql b/parser/testdata/02751_query_log_test_partitions/query.sql new file mode 100644 index 000000000..455724fce --- /dev/null +++ b/parser/testdata/02751_query_log_test_partitions/query.sql @@ -0,0 +1,20 @@ +set log_queries=1; +set log_queries_min_type='QUERY_FINISH'; + +DROP TABLE IF EXISTS 02751_query_log_test_partitions; +CREATE TABLE 02751_query_log_test_partitions (a Int64, b Int64) ENGINE = MergeTree PARTITION BY a ORDER BY b; + +INSERT INTO 02751_query_log_test_partitions SELECT number, number FROM numbers(10); + +SELECT * FROM 02751_query_log_test_partitions WHERE a = 3; + +SYSTEM FLUSH LOGS query_log; + +SELECT + --Remove the prefix string which is a mutable database name. + arrayStringConcat(arrayPopFront(splitByString('.', partitions[1])), '.') +FROM + system.query_log +WHERE + current_database=currentDatabase() and + query = 'SELECT * FROM 02751_query_log_test_partitions WHERE a = 3;' diff --git a/parser/testdata/02752_custom_separated_ignore_spaces_bug/ast.json b/parser/testdata/02752_custom_separated_ignore_spaces_bug/ast.json new file mode 100644 index 000000000..a62a1e9b2 --- /dev/null +++ b/parser/testdata/02752_custom_separated_ignore_spaces_bug/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function format (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Identifier CustomSeparatedIgnoreSpaces" + }, + { + "explain": " Literal 'x String'" + }, + { + "explain": " Literal ' unquoted_string\\n'" + }, + { + "explain": " Set" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001143408, + "rows_read": 14, + "bytes_read": 534 + } +} diff --git a/parser/testdata/02752_custom_separated_ignore_spaces_bug/metadata.json b/parser/testdata/02752_custom_separated_ignore_spaces_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02752_custom_separated_ignore_spaces_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02752_custom_separated_ignore_spaces_bug/query.sql b/parser/testdata/02752_custom_separated_ignore_spaces_bug/query.sql new file mode 100644 index 000000000..62047a704 --- /dev/null +++ b/parser/testdata/02752_custom_separated_ignore_spaces_bug/query.sql @@ -0,0 +1 @@ +select * from format(CustomSeparatedIgnoreSpaces, 'x String', ' unquoted_string\n') settings format_custom_escaping_rule='CSV'; diff --git a/parser/testdata/02752_forbidden_headers/ast.json b/parser/testdata/02752_forbidden_headers/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02752_forbidden_headers/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02752_forbidden_headers/metadata.json b/parser/testdata/02752_forbidden_headers/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02752_forbidden_headers/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02752_forbidden_headers/query.sql b/parser/testdata/02752_forbidden_headers/query.sql new file mode 100644 index 000000000..dd0dcb53b --- /dev/null +++ b/parser/testdata/02752_forbidden_headers/query.sql @@ -0,0 +1,26 @@ +-- Tags: no-fasttest +-- Tag no-fasttest: Depends on AWS + +SELECT * FROM url('http://localhost:8123/', LineAsString, headers('exact_header' = 'value')); -- { serverError BAD_ARGUMENTS } +SELECT * FROM url('http://localhost:8123/', LineAsString, headers('cAsE_INSENSITIVE_header' = 'value')); -- { serverError BAD_ARGUMENTS } +SELECT * FROM url('http://localhost:8123/', LineAsString, headers('bad_header_name: test\nexact_header' = 'value')); -- { serverError BAD_ARGUMENTS } +SELECT * FROM url('http://localhost:8123/', LineAsString, headers('bad_header_value' = 'test\nexact_header: value')); -- { serverError BAD_ARGUMENTS } +SELECT * FROM url('http://localhost:8123/', LineAsString, headers('random_header' = 'value')) FORMAT Null; + +SELECT * FROM urlCluster('test_cluster_two_shards_localhost', 'http://localhost:8123/', LineAsString, headers('exact_header' = 'value')); -- { serverError BAD_ARGUMENTS } +SELECT * FROM urlCluster('test_cluster_two_shards_localhost', 'http://localhost:8123/', LineAsString, headers('cAsE_INSENSITIVE_header' = 'value')); -- { serverError BAD_ARGUMENTS } +SELECT * FROM urlCluster('test_cluster_two_shards_localhost', 'http://localhost:8123/', LineAsString, headers('bad_header_name: test\nexact_header' = 'value')); -- { serverError BAD_ARGUMENTS } +SELECT * FROM urlCluster('test_cluster_two_shards_localhost', 'http://localhost:8123/', LineAsString, headers('bad_header_value' = 'test\nexact_header: value')); -- { serverError BAD_ARGUMENTS } +SELECT * FROM urlCluster('test_cluster_two_shards_localhost', 'http://localhost:8123/', LineAsString, headers('random_header' = 'value')) FORMAT Null; + +SELECT * FROM s3('http://localhost:8123/123/4', LineAsString, headers('exact_header' = 'value')); -- { serverError BAD_ARGUMENTS } +SELECT * FROM s3('http://localhost:8123/123/4', LineAsString, headers('cAsE_INSENSITIVE_header' = 'value')); -- { serverError BAD_ARGUMENTS } +SELECT * FROM s3('http://localhost:8123/123/4', LineAsString, headers('bad_header_name: test\nexact_header' = 'value')); -- { serverError BAD_ARGUMENTS } +SELECT * FROM s3('http://localhost:8123/123/4', LineAsString, headers('bad_header_value' = 'test\nexact_header: value')); -- { serverError BAD_ARGUMENTS } +SELECT * FROM s3('http://localhost:8123/123/4', LineAsString, headers('random_header' = 'value')); -- { serverError S3_ERROR } + +SELECT * FROM s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:8123/123/4', LineAsString, headers('exact_header' = 'value')); -- { serverError BAD_ARGUMENTS } +SELECT * FROM s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:8123/123/4', LineAsString, headers('cAsE_INSENSITIVE_header' = 'value')); -- { serverError BAD_ARGUMENTS } +SELECT * FROM s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:8123/123/4', LineAsString, headers('bad_header_name: test\nexact_header' = 'value')); -- { serverError BAD_ARGUMENTS } +SELECT * FROM s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:8123/123/4', LineAsString, headers('bad_header_value' = 'test\nexact_header: value')); -- { serverError BAD_ARGUMENTS } +SELECT * FROM s3Cluster('test_cluster_two_shards_localhost', 'http://localhost:8123/123/4', LineAsString, headers('random_header' = 'value')); -- { serverError S3_ERROR } diff --git a/parser/testdata/02752_is_null_priority/ast.json b/parser/testdata/02752_is_null_priority/ast.json new file mode 100644 index 000000000..417f7d872 --- /dev/null +++ b/parser/testdata/02752_is_null_priority/ast.json @@ -0,0 +1,73 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Explain EXPLAIN AST (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function isNull (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function multiply (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier a" + }, + { + "explain": " Identifier b" + }, + { + "explain": " Function isNotNull (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function multiply (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier a" + }, + { + "explain": " Identifier b" + } + ], + + "rows": 17, + + "statistics": + { + "elapsed": 0.001645388, + "rows_read": 17, + "bytes_read": 652 + } +} diff --git a/parser/testdata/02752_is_null_priority/metadata.json b/parser/testdata/02752_is_null_priority/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02752_is_null_priority/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02752_is_null_priority/query.sql b/parser/testdata/02752_is_null_priority/query.sql new file mode 100644 index 000000000..a0a9741e7 --- /dev/null +++ b/parser/testdata/02752_is_null_priority/query.sql @@ -0,0 +1 @@ +EXPLAIN AST SELECT a * b IS NULL, a * b IS NOT NULL; diff --git a/parser/testdata/02752_space_function/ast.json b/parser/testdata/02752_space_function/ast.json new file mode 100644 index 000000000..cee2d3c43 --- /dev/null +++ b/parser/testdata/02752_space_function/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'const, uint'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001076365, + "rows_read": 5, + "bytes_read": 182 + } +} diff --git a/parser/testdata/02752_space_function/metadata.json b/parser/testdata/02752_space_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02752_space_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02752_space_function/query.sql b/parser/testdata/02752_space_function/query.sql new file mode 100644 index 000000000..b12906927 --- /dev/null +++ b/parser/testdata/02752_space_function/query.sql @@ -0,0 +1,64 @@ +SELECT 'const, uint'; +SELECT space(3::UInt8), length(space(3::UInt8)); +SELECT space(3::UInt16), length(space(3::UInt16)); +SELECT space(3::UInt32), length(space(3::UInt32)); +SELECT space(3::UInt64), length(space(3::UInt64)); +SELECT 'const, int'; +SELECT space(3::Int8), length(space(3::Int8)); +SELECT space(3::Int16), length(space(3::Int16)); +SELECT space(3::Int32), length(space(3::Int32)); +SELECT space(3::Int64), length(space(3::Int64)); + +SELECT 'const, int, negative'; +SELECT space(-3::Int8), length(space(-3::Int8)); +SELECT space(-3::Int16), length(space(-3::Int16)); +SELECT space(-3::Int32), length(space(-3::Int32)); +SELECT space(-3::Int64), length(space(-3::Int64)); + +SELECT 'negative tests'; +SELECT space('abc'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT space(['abc']); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT space(('abc')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT space(30303030303030303030303030303030::UInt64); -- { serverError TOO_LARGE_STRING_SIZE } + +SELECT 'null'; +SELECT space(NULL); + +DROP TABLE IF EXISTS defaults; +CREATE TABLE defaults +( + u8 UInt8, + u16 UInt16, + u32 UInt32, + u64 UInt64, + i8 Int8, + i16 Int16, + i32 Int32, + i64 Int64 +) ENGINE = Memory(); + +INSERT INTO defaults values (3, 12, 4, 56, 3, 12, -4, 56) (2, 10, 21, 20, 2, 10, -21, 20) (1, 4, 9, 5, 1, 4, -9, 5) (0, 5, 7, 7, 0, 5, -7, 7); + +SELECT 'const, uint, multiple'; +SELECT space(30::UInt8) FROM defaults; +SELECT space(30::UInt16) FROM defaults; +SELECT space(30::UInt32) FROM defaults; +SELECT space(30::UInt64) FROM defaults; +SELECT 'const int, multiple'; +SELECT space(30::Int8) FROM defaults; +SELECT space(30::Int16) FROM defaults; +SELECT space(30::Int32) FROM defaults; +SELECT space(30::Int64) FROM defaults; + +SELECT 'non-const, uint'; +SELECT space(u8), length(space(u8)) FROM defaults; +SELECT space(u16), length(space(u16)) FROM defaults; +SELECT space(u32), length(space(u32)) from defaults; +SELECT space(u64), length(space(u64)) FROM defaults; +SELECT 'non-const, int'; +SELECT space(i8), length(space(i8)) FROM defaults; +SELECT space(i16), length(space(i16)) FROM defaults; +SELECT space(i32), length(space(i32)) from defaults; +SELECT space(i64), length(space(i64)) FROM defaults; + +DROP TABLE defaults; diff --git a/parser/testdata/02762_replicated_database_no_args/ast.json b/parser/testdata/02762_replicated_database_no_args/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02762_replicated_database_no_args/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02762_replicated_database_no_args/metadata.json b/parser/testdata/02762_replicated_database_no_args/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02762_replicated_database_no_args/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02762_replicated_database_no_args/query.sql b/parser/testdata/02762_replicated_database_no_args/query.sql new file mode 100644 index 000000000..b811bb7d4 --- /dev/null +++ b/parser/testdata/02762_replicated_database_no_args/query.sql @@ -0,0 +1,3 @@ +-- Tags: no-parallel + +create database replicated_db_no_args engine=Replicated; -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/02763_jit_compare_functions_nan/ast.json b/parser/testdata/02763_jit_compare_functions_nan/ast.json new file mode 100644 index 000000000..478f563d1 --- /dev/null +++ b/parser/testdata/02763_jit_compare_functions_nan/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001030228, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02763_jit_compare_functions_nan/metadata.json b/parser/testdata/02763_jit_compare_functions_nan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02763_jit_compare_functions_nan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02763_jit_compare_functions_nan/query.sql b/parser/testdata/02763_jit_compare_functions_nan/query.sql new file mode 100644 index 000000000..61d165139 --- /dev/null +++ b/parser/testdata/02763_jit_compare_functions_nan/query.sql @@ -0,0 +1,25 @@ +SET compile_expressions = 1; +SET min_count_to_compile_expression = 0; + +DROP TABLE IF EXISTS test_table_1; +DROP TABLE IF EXISTS test_table_2; + +CREATE TABLE test_table_1 (id UInt32) ENGINE = MergeTree ORDER BY (id); +create table test_table_2 (id UInt32) ENGINE = MergeTree ORDER BY (id); +INSERT INTO test_table_1 VALUES (2); +INSERT INTO test_table_2 VALUES (2); + +select t1.id, t2.id FROM test_table_1 AS t1 RIGHT JOIN test_table_2 AS t2 ON (t1.id = t2.id) +WHERE (acos(t2.id) <> atan(t1.id)) and (not (acos(t2.id) <> atan(t1.id))); + +DROP TABLE test_table_1; +DROP TABLE test_table_2; + +SELECT '--'; + +SELECT (acos(a) <> atan(b)) and (not (acos(a) <> atan(b))) r FROM (SELECT 2 a, 2 b); +SELECT (acos(a) <> atan(b)) and (not (acos(a) <> atan(b))) r FROM (SELECT 2 a, 2 b); +SELECT (acos(a) <> atan(b)) and (not (acos(a) <> atan(b))) r FROM (SELECT 2 a, 2 b); +SELECT (acos(a) <> atan(b)) and (not (acos(a) <> atan(b))) r FROM (SELECT 2 a, 2 b); +SELECT (acos(a) <> atan(b)) and (not (acos(a) <> atan(b))) r FROM (SELECT 2 a, 2 b); +SELECT (acos(a) <> atan(b)) and (not (acos(a) <> atan(b))) r FROM (SELECT 2 a, 2 b); diff --git a/parser/testdata/02763_mutate_compact_part_with_skip_indices_and_projections/ast.json b/parser/testdata/02763_mutate_compact_part_with_skip_indices_and_projections/ast.json new file mode 100644 index 000000000..63fe21ece --- /dev/null +++ b/parser/testdata/02763_mutate_compact_part_with_skip_indices_and_projections/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00159152, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/02763_mutate_compact_part_with_skip_indices_and_projections/metadata.json b/parser/testdata/02763_mutate_compact_part_with_skip_indices_and_projections/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02763_mutate_compact_part_with_skip_indices_and_projections/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02763_mutate_compact_part_with_skip_indices_and_projections/query.sql b/parser/testdata/02763_mutate_compact_part_with_skip_indices_and_projections/query.sql new file mode 100644 index 000000000..bb9825fe5 --- /dev/null +++ b/parser/testdata/02763_mutate_compact_part_with_skip_indices_and_projections/query.sql @@ -0,0 +1,31 @@ +DROP TABLE IF EXISTS test; + +CREATE TABLE test ( col1 Int64, dt Date ) ENGINE = MergeTree PARTITION BY dt ORDER BY tuple(); + +INSERT INTO test FORMAT Values (1, today()); + +ALTER TABLE test ADD COLUMN col2 String; + +ALTER TABLE test ADD INDEX i1 (col1, col2) TYPE set(100) GRANULARITY 1; + +ALTER TABLE test MATERIALIZE INDEX i1; + +ALTER TABLE test ADD COLUMN col3 String; + +ALTER TABLE test DROP COLUMN col3; + +DROP TABLE IF EXISTS test; + +CREATE TABLE test ( col1 Int64, dt Date ) ENGINE = MergeTree PARTITION BY dt ORDER BY tuple(); + +INSERT INTO test FORMAT Values (1, today()); + +ALTER TABLE test ADD COLUMN col2 String; + +ALTER TABLE test ADD PROJECTION p1 ( SELECT col2, sum(col1) GROUP BY col2 ); + +ALTER TABLE test MATERIALIZE PROJECTION p1; + +ALTER TABLE test ADD COLUMN col3 String; + +ALTER TABLE test DROP COLUMN col3; diff --git a/parser/testdata/02764_index_analysis_fix/ast.json b/parser/testdata/02764_index_analysis_fix/ast.json new file mode 100644 index 000000000..bac72f9fd --- /dev/null +++ b/parser/testdata/02764_index_analysis_fix/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery x (children 1)" + }, + { + "explain": " Identifier x" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001232636, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/02764_index_analysis_fix/metadata.json b/parser/testdata/02764_index_analysis_fix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02764_index_analysis_fix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02764_index_analysis_fix/query.sql b/parser/testdata/02764_index_analysis_fix/query.sql new file mode 100644 index 000000000..541a3444e --- /dev/null +++ b/parser/testdata/02764_index_analysis_fix/query.sql @@ -0,0 +1,9 @@ +drop table if exists x; + +create table x (dt String) engine MergeTree partition by toYYYYMM(toDate(dt)) order by tuple(); + +insert into x values ('2022-10-01 10:10:10'); + +select * from x where dt like '2022-10-01%'; + +drop table x; diff --git a/parser/testdata/02764_parallel_replicas_plain_merge_tree/ast.json b/parser/testdata/02764_parallel_replicas_plain_merge_tree/ast.json new file mode 100644 index 000000000..c92f2262d --- /dev/null +++ b/parser/testdata/02764_parallel_replicas_plain_merge_tree/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery parallel_replicas_plain (children 1)" + }, + { + "explain": " Identifier parallel_replicas_plain" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001151403, + "rows_read": 2, + "bytes_read": 98 + } +} diff --git a/parser/testdata/02764_parallel_replicas_plain_merge_tree/metadata.json b/parser/testdata/02764_parallel_replicas_plain_merge_tree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02764_parallel_replicas_plain_merge_tree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02764_parallel_replicas_plain_merge_tree/query.sql b/parser/testdata/02764_parallel_replicas_plain_merge_tree/query.sql new file mode 100644 index 000000000..b26cbc3cf --- /dev/null +++ b/parser/testdata/02764_parallel_replicas_plain_merge_tree/query.sql @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS parallel_replicas_plain; +CREATE TABLE parallel_replicas_plain (x String) ENGINE=MergeTree() ORDER BY x; +INSERT INTO parallel_replicas_plain SELECT toString(number) FROM numbers(10); + +SET max_parallel_replicas=3, enable_parallel_replicas=1, cluster_for_parallel_replicas='parallel_replicas'; +SET send_logs_level='error'; +SET parallel_replicas_for_non_replicated_merge_tree = 0; + +SELECT x FROM parallel_replicas_plain LIMIT 1 FORMAT Null; +SELECT max(length(x)) FROM parallel_replicas_plain FORMAT Null; + +SET parallel_replicas_for_non_replicated_merge_tree = 1; + +SELECT x FROM parallel_replicas_plain LIMIT 1 FORMAT Null; +SELECT max(length(x)) FROM parallel_replicas_plain FORMAT Null; + +DROP TABLE parallel_replicas_plain; diff --git a/parser/testdata/02765_parallel_replicas_final_modifier/ast.json b/parser/testdata/02765_parallel_replicas_final_modifier/ast.json new file mode 100644 index 000000000..f1c1ee6a5 --- /dev/null +++ b/parser/testdata/02765_parallel_replicas_final_modifier/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery parallel_replicas_final (children 3)" + }, + { + "explain": " Identifier parallel_replicas_final" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration x (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function ReplacingMergeTree (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Identifier x" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001073362, + "rows_read": 10, + "bytes_read": 383 + } +} diff --git a/parser/testdata/02765_parallel_replicas_final_modifier/metadata.json b/parser/testdata/02765_parallel_replicas_final_modifier/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02765_parallel_replicas_final_modifier/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02765_parallel_replicas_final_modifier/query.sql b/parser/testdata/02765_parallel_replicas_final_modifier/query.sql new file mode 100644 index 000000000..cf80e8c71 --- /dev/null +++ b/parser/testdata/02765_parallel_replicas_final_modifier/query.sql @@ -0,0 +1,15 @@ +CREATE TABLE IF NOT EXISTS parallel_replicas_final (x String) ENGINE=ReplacingMergeTree() ORDER BY x; + +INSERT INTO parallel_replicas_final SELECT toString(number) FROM numbers(10); + +SET max_parallel_replicas=3, enable_parallel_replicas=1, cluster_for_parallel_replicas='parallel_replicas'; +SET parallel_replicas_for_non_replicated_merge_tree = 1; +SET parallel_replicas_only_with_analyzer = 0; -- necessary for CI run with disabled analyzer + +SELECT * FROM parallel_replicas_final FINAL FORMAT Null; + +SET enable_parallel_replicas=2; + +SELECT * FROM parallel_replicas_final FINAL FORMAT Null; -- { serverError SUPPORT_IS_DISABLED } + +DROP TABLE IF EXISTS parallel_replicas_final; diff --git a/parser/testdata/02766_bitshift_with_const_arguments/ast.json b/parser/testdata/02766_bitshift_with_const_arguments/ast.json new file mode 100644 index 000000000..9e228918a --- /dev/null +++ b/parser/testdata/02766_bitshift_with_const_arguments/ast.json @@ -0,0 +1,85 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function bitShiftLeft (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function if (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Literal '14342'" + }, + { + "explain": " Literal '4242348'" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 21, + + "statistics": + { + "elapsed": 0.001486185, + "rows_read": 21, + "bytes_read": 814 + } +} diff --git a/parser/testdata/02766_bitshift_with_const_arguments/metadata.json b/parser/testdata/02766_bitshift_with_const_arguments/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02766_bitshift_with_const_arguments/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02766_bitshift_with_const_arguments/query.sql b/parser/testdata/02766_bitshift_with_const_arguments/query.sql new file mode 100644 index 000000000..6b2961f05 --- /dev/null +++ b/parser/testdata/02766_bitshift_with_const_arguments/query.sql @@ -0,0 +1,22 @@ +SELECT bitShiftLeft(if(number = NULL, '14342', '4242348'), 1) FROM numbers(1); +SELECT bitShiftLeft(if(number = NULL, '14342', '4242348'), 1) FROM numbers(3); +SELECT bitShiftLeft(if(materialize(NULL), '14342', '4242348'), 1) FROM numbers(1); +SELECT bitShiftLeft(if(materialize(1), '123', '123'), 1) from numbers(1); + + +-- The next queries are from fuzzer that found the bug: +DROP TABLE IF EXISTS t0; +DROP TABLE IF EXISTS t1; +CREATE TABLE t0 (vkey UInt32, pkey UInt32, c0 UInt32) engine = TinyLog; +CREATE TABLE t1 (vkey UInt32) ENGINE = AggregatingMergeTree ORDER BY vkey; +INSERT INTO t0 VALUES (15, 25000, 58); +SELECT ref_5.pkey AS c_2_c2392_6 FROM t0 AS ref_5 WHERE 'J[' < multiIf(ref_5.pkey IN ( SELECT 1 ), bitShiftLeft(multiIf(ref_5.c0 > NULL, '1', ')'), 40), NULL); +DROP TABLE t0; +DROP TABLE t1; + +DROP TABLE IF EXISTS t5; +CREATE TABLE t5 (vkey UInt32, pkey UInt32, c18 Float32, c19 UInt32) ENGINE = Log; +INSERT INTO t5 VALUES (3, 13000, 73.90, 83); +SELECT subq_0.pkey as c_1_c1193_15 FROM t5 AS subq_0 WHERE sipHash128(0, subq_0.c18, bitShiftRight(case when false then (sipHash128(subq_0.pkey, subq_0.c18, 'S')) else '1' end, 0)) is not null; +DROP TABLE t5; + diff --git a/parser/testdata/02768_cse_nested_distributed/ast.json b/parser/testdata/02768_cse_nested_distributed/ast.json new file mode 100644 index 000000000..873cc045d --- /dev/null +++ b/parser/testdata/02768_cse_nested_distributed/ast.json @@ -0,0 +1,91 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Subquery (alias s) (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function greater (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function count (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function remote (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '127.2'" + }, + { + "explain": " Identifier system.settings" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier s" + } + ], + + "rows": 23, + + "statistics": + { + "elapsed": 0.001377777, + "rows_read": 23, + "bytes_read": 956 + } +} diff --git a/parser/testdata/02768_cse_nested_distributed/metadata.json b/parser/testdata/02768_cse_nested_distributed/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02768_cse_nested_distributed/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02768_cse_nested_distributed/query.sql b/parser/testdata/02768_cse_nested_distributed/query.sql new file mode 100644 index 000000000..90e526c0d --- /dev/null +++ b/parser/testdata/02768_cse_nested_distributed/query.sql @@ -0,0 +1,5 @@ +with (select count() > 0 from remote('127.2', system.settings)) as s select s; +-- nested +with (select count() > 0 from remote('127.2', remote('127.2', system.settings))) as s select s; +-- nested via view() +with (select count() > 0 from remote('127.2', view(select count() from remote('127.2', system.settings)))) as s select s; diff --git a/parser/testdata/02769_compare_functions_nan/ast.json b/parser/testdata/02769_compare_functions_nan/ast.json new file mode 100644 index 000000000..77e208dbb --- /dev/null +++ b/parser/testdata/02769_compare_functions_nan/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001165837, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02769_compare_functions_nan/metadata.json b/parser/testdata/02769_compare_functions_nan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02769_compare_functions_nan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02769_compare_functions_nan/query.sql b/parser/testdata/02769_compare_functions_nan/query.sql new file mode 100644 index 000000000..1e1a9df9c --- /dev/null +++ b/parser/testdata/02769_compare_functions_nan/query.sql @@ -0,0 +1,60 @@ +SET compile_expressions = 1; +SET min_count_to_compile_expression = 0; + +SELECT nan AS value, value = value, value = materialize(value), materialize(value) = value, materialize(value) = materialize(value); +SELECT cast(nan, 'Float32') AS value, value = value, value = materialize(value), materialize(value) = value, materialize(value) = materialize(value); +SELECT nan AS lhs, cast(nan, 'Float32') AS rhs, lhs = rhs, lhs = materialize(rhs), materialize(lhs) = rhs, materialize(lhs) = materialize(rhs); + +SELECT '--'; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt32, + value UInt32 +) ENGINE = MergeTree ORDER BY id; + +INSERT INTO test_table VALUES (76, 57); + +SELECT value FROM (SELECT stddevSamp(id) AS value FROM test_table) as subquery +WHERE ((value = value) AND (NOT (value = value))); + +DROP TABLE test_table; + +SELECT '--'; + +SELECT nan AS value, value != value, value != materialize(value), materialize(value) != value, materialize(value) != materialize(value); +SELECT cast(nan, 'Float32') AS value, value != value, value != materialize(value), materialize(value) != value, materialize(value) != materialize(value); +SELECT nan AS lhs, cast(nan, 'Float32') AS rhs, lhs != rhs, lhs != materialize(rhs), materialize(lhs) != rhs, materialize(lhs) != materialize(rhs); + +SELECT '--'; + +CREATE TABLE test_table +( + id UInt32, + value_1 UInt32, + value_2 Float32 +) ENGINE = MergeTree ORDER BY id; + +INSERT INTO test_table VALUES (12000, 36, 77.94); + +SELECT value +FROM (SELECT (corr(value_1, value_1) OVER test_window) AS value FROM test_table WINDOW test_window AS (PARTITION BY value_2 ORDER BY id ASC)) as subquery +WHERE not (not (value <> value)); + +DROP TABLE test_table; + +SELECT '--'; + +CREATE TABLE test_table +( + id Float32, + value Float32 +) ENGINE=MergeTree ORDER BY id; + +INSERT INTO test_table VALUES (-10.75, 95.57); + +SELECT * FROM (SELECT corr(id, id) as corr_value FROM test_table GROUP BY value) AS subquery LEFT ANTI JOIN test_table ON (subquery.corr_value = test_table.id) +WHERE (test_table.id >= test_table.id) AND (NOT (test_table.id >= test_table.id)); + +DROP TABLE test_table; diff --git a/parser/testdata/02769_parallel_replicas_unavailable_shards/ast.json b/parser/testdata/02769_parallel_replicas_unavailable_shards/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02769_parallel_replicas_unavailable_shards/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02769_parallel_replicas_unavailable_shards/metadata.json b/parser/testdata/02769_parallel_replicas_unavailable_shards/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02769_parallel_replicas_unavailable_shards/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02769_parallel_replicas_unavailable_shards/query.sql b/parser/testdata/02769_parallel_replicas_unavailable_shards/query.sql new file mode 100644 index 000000000..06783cda6 --- /dev/null +++ b/parser/testdata/02769_parallel_replicas_unavailable_shards/query.sql @@ -0,0 +1,22 @@ +-- Tags: no-parallel +-- - no-parallel - due to usage of fail points + +DROP TABLE IF EXISTS test_parallel_replicas_unavailable_shards; +CREATE TABLE test_parallel_replicas_unavailable_shards (n UInt64) ENGINE=MergeTree() ORDER BY tuple(); +INSERT INTO test_parallel_replicas_unavailable_shards SELECT * FROM numbers(10); + +SET enable_parallel_replicas=2, max_parallel_replicas=11, cluster_for_parallel_replicas='parallel_replicas', parallel_replicas_for_non_replicated_merge_tree=1; +SET send_logs_level='error'; +SET parallel_replicas_only_with_analyzer = 0; -- necessary for CI run with disabled analyzer + +-- with local plan for initiator, the query can be executed fast on initator, we can simply not come to the point where unavailable replica can be detected +-- therefore disable local plan for now +SYSTEM ENABLE FAILPOINT parallel_replicas_wait_for_unused_replicas; +SELECT count() FROM test_parallel_replicas_unavailable_shards WHERE NOT ignore(*) SETTINGS log_comment = '02769_7b513191-5082-4073-8568-53b86a49da79', parallel_replicas_local_plan=0; + +SYSTEM FLUSH LOGS query_log; + +SET enable_parallel_replicas=0; +SELECT ProfileEvents['ParallelReplicasUnavailableCount'] FROM system.query_log WHERE yesterday() <= event_date AND query_id in (select query_id from system.query_log where log_comment = '02769_7b513191-5082-4073-8568-53b86a49da79' and current_database = currentDatabase()) and type = 'QueryFinish' and query_id == initial_query_id; + +DROP TABLE test_parallel_replicas_unavailable_shards; diff --git a/parser/testdata/02770_jit_aggregation_nullable_key_fix/ast.json b/parser/testdata/02770_jit_aggregation_nullable_key_fix/ast.json new file mode 100644 index 000000000..868362ad1 --- /dev/null +++ b/parser/testdata/02770_jit_aggregation_nullable_key_fix/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001129886, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02770_jit_aggregation_nullable_key_fix/metadata.json b/parser/testdata/02770_jit_aggregation_nullable_key_fix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02770_jit_aggregation_nullable_key_fix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02770_jit_aggregation_nullable_key_fix/query.sql b/parser/testdata/02770_jit_aggregation_nullable_key_fix/query.sql new file mode 100644 index 000000000..e4ce789f4 --- /dev/null +++ b/parser/testdata/02770_jit_aggregation_nullable_key_fix/query.sql @@ -0,0 +1,39 @@ +SET compile_aggregate_expressions = 1; +SET min_count_to_compile_aggregate_expression = 0; +SET group_by_use_nulls = 0; + +SELECT count() FROM +( + SELECT + count([NULL, NULL]), + count([2147483646, -2147483647, 3, 3]), + uniqExact(if(number >= 1048577, number, NULL), NULL) + FROM numbers(1048577) + GROUP BY if(number >= 2., number, NULL) +); + +SELECT count() FROM +( + SELECT count() + FROM numbers(65411) + GROUP BY if(number < 1, NULL, number) +); + +SET group_by_use_nulls = 1; + +SELECT count() FROM +( + SELECT + count([NULL, NULL]), + count([2147483646, -2147483647, 3, 3]), + uniqExact(if(number >= 1048577, number, NULL), NULL) + FROM numbers(1048577) + GROUP BY if(number >= 2., number, NULL) +); + +SELECT count() FROM +( + SELECT count() + FROM numbers(65411) + GROUP BY if(number < 1, NULL, number) +); diff --git a/parser/testdata/02771_if_constant_folding/ast.json b/parser/testdata/02771_if_constant_folding/ast.json new file mode 100644 index 000000000..ee1fdf21b --- /dev/null +++ b/parser/testdata/02771_if_constant_folding/ast.json @@ -0,0 +1,91 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function if (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal 'UInt64'" + }, + { + "explain": " Function toString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_5" + } + ], + + "rows": 23, + + "statistics": + { + "elapsed": 0.001419729, + "rows_read": 23, + "bytes_read": 903 + } +} diff --git a/parser/testdata/02771_if_constant_folding/metadata.json b/parser/testdata/02771_if_constant_folding/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02771_if_constant_folding/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02771_if_constant_folding/query.sql b/parser/testdata/02771_if_constant_folding/query.sql new file mode 100644 index 000000000..161046502 --- /dev/null +++ b/parser/testdata/02771_if_constant_folding/query.sql @@ -0,0 +1 @@ +SELECT cast(number, if(1 = 1, 'UInt64', toString(number))) FROM numbers(5); diff --git a/parser/testdata/02771_ignore_data_skipping_indices/ast.json b/parser/testdata/02771_ignore_data_skipping_indices/ast.json new file mode 100644 index 000000000..c19f8a817 --- /dev/null +++ b/parser/testdata/02771_ignore_data_skipping_indices/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery data_02771 (children 1)" + }, + { + "explain": " Identifier data_02771" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00163824, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/02771_ignore_data_skipping_indices/metadata.json b/parser/testdata/02771_ignore_data_skipping_indices/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02771_ignore_data_skipping_indices/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02771_ignore_data_skipping_indices/query.sql b/parser/testdata/02771_ignore_data_skipping_indices/query.sql new file mode 100644 index 000000000..8f7bccf7b --- /dev/null +++ b/parser/testdata/02771_ignore_data_skipping_indices/query.sql @@ -0,0 +1,35 @@ +DROP TABLE IF EXISTS data_02771; + +CREATE TABLE data_02771 +( + key Int, + x Int, + y Int, + INDEX x_idx x TYPE minmax GRANULARITY 1, + INDEX y_idx y TYPE minmax GRANULARITY 1, + INDEX xy_idx (x,y) TYPE minmax GRANULARITY 1 +) +Engine=MergeTree() +ORDER BY key; + +INSERT INTO data_02771 VALUES (1, 2, 3); + +SELECT * FROM data_02771; +SELECT * FROM data_02771 SETTINGS ignore_data_skipping_indices=''; -- { serverError CANNOT_PARSE_TEXT } +SELECT * FROM data_02771 SETTINGS ignore_data_skipping_indices='x_idx'; +SELECT * FROM data_02771 SETTINGS ignore_data_skipping_indices='na_idx'; + +SELECT * FROM data_02771 WHERE x = 1 AND y = 1 SETTINGS ignore_data_skipping_indices='xy_idx',force_data_skipping_indices='xy_idx' ; -- { serverError INDEX_NOT_USED } +SELECT * FROM data_02771 WHERE x = 1 AND y = 2 SETTINGS ignore_data_skipping_indices='xy_idx'; + +SET enable_analyzer = 0; + +SELECT * from ( EXPLAIN indexes = 1 SELECT * FROM data_02771 WHERE x = 1 AND y = 2 ) WHERE explain NOT LIKE '%Expression%' AND explain NOT LIKE '%Filter%'; +SELECT * from ( EXPLAIN indexes = 1 SELECT * FROM data_02771 WHERE x = 1 AND y = 2 SETTINGS ignore_data_skipping_indices='xy_idx' ) WHERE explain NOT LIKE '%Expression%' AND explain NOT LIKE '%Filter%'; + +SET enable_analyzer = 1; + +SELECT * from ( EXPLAIN indexes = 1 SELECT * FROM data_02771 WHERE x = 1 AND y = 2 ) WHERE explain NOT LIKE '%Expression%' AND explain NOT LIKE '%Filter%'; +SELECT * from ( EXPLAIN indexes = 1 SELECT * FROM data_02771 WHERE x = 1 AND y = 2 SETTINGS ignore_data_skipping_indices='xy_idx' ) WHERE explain NOT LIKE '%Expression%' AND explain NOT LIKE '%Filter%'; + +DROP TABLE data_02771; diff --git a/parser/testdata/02771_jit_functions_comparison_crash/ast.json b/parser/testdata/02771_jit_functions_comparison_crash/ast.json new file mode 100644 index 000000000..8cf76c98a --- /dev/null +++ b/parser/testdata/02771_jit_functions_comparison_crash/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001163952, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02771_jit_functions_comparison_crash/metadata.json b/parser/testdata/02771_jit_functions_comparison_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02771_jit_functions_comparison_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02771_jit_functions_comparison_crash/query.sql b/parser/testdata/02771_jit_functions_comparison_crash/query.sql new file mode 100644 index 000000000..e02f1a338 --- /dev/null +++ b/parser/testdata/02771_jit_functions_comparison_crash/query.sql @@ -0,0 +1,36 @@ +SET compile_expressions = 1; +SET min_count_to_compile_expression = 0; + +DROP TABLE IF EXISTS test_table_1; +CREATE TABLE test_table_1 +( + pkey UInt32, + c8 UInt32, + c9 String, + c10 Float32, + c11 String +) ENGINE = MergeTree ORDER BY pkey; + +DROP TABLE IF EXISTS test_table_2; +CREATE TABLE test_table_2 +( + vkey UInt32, + pkey UInt32, + c15 UInt32 +) ENGINE = MergeTree ORDER BY vkey; + +WITH test_cte AS +( + SELECT + ref_10.c11 as c_2_c2350_1, + ref_9.c9 as c_2_c2351_2 + FROM + test_table_1 as ref_9 + RIGHT OUTER JOIN test_table_1 as ref_10 ON (ref_9.c11 = ref_10.c9) + INNER JOIN test_table_2 as ref_11 ON (ref_10.c8 = ref_11.vkey) + WHERE ((ref_10.pkey + ref_11.pkey) BETWEEN ref_11.vkey AND (CASE WHEN (-30.87 >= ref_9.c10) THEN ref_11.c15 ELSE ref_11.pkey END)) +) +SELECT ref_13.c_2_c2350_1 as c_2_c2357_3 FROM test_cte as ref_13 WHERE (ref_13.c_2_c2351_2) in (select ref_14.c_2_c2351_2 as c_5_c2352_0 FROM test_cte as ref_14); + +DROP TABLE test_table_1; +DROP TABLE test_table_2; diff --git a/parser/testdata/02771_log_faminy_truncate_count/ast.json b/parser/testdata/02771_log_faminy_truncate_count/ast.json new file mode 100644 index 000000000..9b4c09a23 --- /dev/null +++ b/parser/testdata/02771_log_faminy_truncate_count/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_log (children 1)" + }, + { + "explain": " Identifier test_log" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001208555, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/02771_log_faminy_truncate_count/metadata.json b/parser/testdata/02771_log_faminy_truncate_count/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02771_log_faminy_truncate_count/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02771_log_faminy_truncate_count/query.sql b/parser/testdata/02771_log_faminy_truncate_count/query.sql new file mode 100644 index 000000000..3fb22837f --- /dev/null +++ b/parser/testdata/02771_log_faminy_truncate_count/query.sql @@ -0,0 +1,26 @@ +DROP TABLE IF EXISTS test_log; +CREATE TABLE test_log +( + `crypto_name` String, + `trade_date` Date +) +ENGINE = Log; + +INSERT INTO test_log (crypto_name, trade_date) VALUES ('abc', '2021-01-01'), ('def', '2022-02-02'); + +TRUNCATE TABLE test_log; +SELECT count() FROM test_log; + +DROP TABLE IF EXISTS test_log; +CREATE TABLE test_log +( + `crypto_name` String, + `trade_date` Date +) +ENGINE = StripeLog; + +INSERT INTO test_log (crypto_name, trade_date) VALUES ('abc', '2021-01-01'), ('def', '2022-02-02'); + +TRUNCATE TABLE test_log; +SELECT count() FROM test_log; +DROP TABLE test_log; diff --git a/parser/testdata/02771_parallel_replicas_analyzer/ast.json b/parser/testdata/02771_parallel_replicas_analyzer/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02771_parallel_replicas_analyzer/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02771_parallel_replicas_analyzer/metadata.json b/parser/testdata/02771_parallel_replicas_analyzer/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02771_parallel_replicas_analyzer/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02771_parallel_replicas_analyzer/query.sql b/parser/testdata/02771_parallel_replicas_analyzer/query.sql new file mode 100644 index 000000000..6886b65fd --- /dev/null +++ b/parser/testdata/02771_parallel_replicas_analyzer/query.sql @@ -0,0 +1,44 @@ +-- Tags: zookeeper +DROP TABLE IF EXISTS join_inner_table__fuzz_146_replicated SYNC; +CREATE TABLE join_inner_table__fuzz_146_replicated +( + `id` UUID, + `key` String, + `number` Int64, + `value1` String, + `value2` String, + `time` Nullable(Int64) +) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/join_inner_table__fuzz_146_replicated', '{replica}') +ORDER BY (id, number, key) +SETTINGS index_granularity = 8192; + +INSERT INTO join_inner_table__fuzz_146_replicated + SELECT CAST('833c9e22-c245-4eb5-8745-117a9a1f26b1', 'UUID') AS id, CAST(rowNumberInAllBlocks(), 'String') AS key, * + FROM generateRandom('number Int64, value1 String, value2 String, time Int64', 1, 10, 2) LIMIT 10; + +-- Simple query with analyzer and pure parallel replicas +SELECT number +FROM join_inner_table__fuzz_146_replicated + SETTINGS + enable_analyzer = 1, + max_parallel_replicas = 2, + cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', + enable_parallel_replicas = 1; + +SYSTEM FLUSH LOGS query_log; +SELECT is_initial_query, ProfileEvents['ParallelReplicasQueryCount'] as c, query +FROM system.query_log +WHERE event_date >= yesterday() + AND type = 'QueryFinish' + AND query_id = + ( + SELECT query_id + FROM system.query_log + WHERE current_database = currentDatabase() + AND event_date >= yesterday() + AND type = 'QueryFinish' + AND query LIKE '-- Simple query with analyzer and pure parallel replicas%' + ); + +DROP TABLE join_inner_table__fuzz_146_replicated SYNC; diff --git a/parser/testdata/02771_resolve_compound_identifier/ast.json b/parser/testdata/02771_resolve_compound_identifier/ast.json new file mode 100644 index 000000000..6e5a1749f --- /dev/null +++ b/parser/testdata/02771_resolve_compound_identifier/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_02771 (children 1)" + }, + { + "explain": " Identifier test_02771" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001420468, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/02771_resolve_compound_identifier/metadata.json b/parser/testdata/02771_resolve_compound_identifier/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02771_resolve_compound_identifier/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02771_resolve_compound_identifier/query.sql b/parser/testdata/02771_resolve_compound_identifier/query.sql new file mode 100644 index 000000000..db4d44337 --- /dev/null +++ b/parser/testdata/02771_resolve_compound_identifier/query.sql @@ -0,0 +1,11 @@ +DROP DATABASE IF EXISTS test_02771; + +CREATE DATABASE test_02771; + +CREATE TABLE test_02771.t (x UInt8) ENGINE = MergeTree() ORDER BY x; + +INSERT INTO test_02771.t SELECT number FROM numbers(10); + +SELECT t.x FROM test_02771.t ORDER BY t.x; + +DROP DATABASE IF EXISTS test_02771; diff --git a/parser/testdata/02771_tsv_csv_custom_skip_trailing_empty_lines/ast.json b/parser/testdata/02771_tsv_csv_custom_skip_trailing_empty_lines/ast.json new file mode 100644 index 000000000..dd8ef8883 --- /dev/null +++ b/parser/testdata/02771_tsv_csv_custom_skip_trailing_empty_lines/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function format (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Identifier TSV" + }, + { + "explain": " Literal 'x UInt32, y UInt32'" + }, + { + "explain": " Literal '1\\t2\\n\\n'" + }, + { + "explain": " Set" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001072834, + "rows_read": 14, + "bytes_read": 510 + } +} diff --git a/parser/testdata/02771_tsv_csv_custom_skip_trailing_empty_lines/metadata.json b/parser/testdata/02771_tsv_csv_custom_skip_trailing_empty_lines/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02771_tsv_csv_custom_skip_trailing_empty_lines/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02771_tsv_csv_custom_skip_trailing_empty_lines/query.sql b/parser/testdata/02771_tsv_csv_custom_skip_trailing_empty_lines/query.sql new file mode 100644 index 000000000..917a434cd --- /dev/null +++ b/parser/testdata/02771_tsv_csv_custom_skip_trailing_empty_lines/query.sql @@ -0,0 +1,12 @@ +select * from format(TSV, 'x UInt32, y UInt32', '1\t2\n\n') settings input_format_tsv_skip_trailing_empty_lines=0; -- {serverError CANNOT_PARSE_INPUT_ASSERTION_FAILED} +select * from format(TSV, 'x UInt32, y UInt32', '1\t2\n\n') settings input_format_tsv_skip_trailing_empty_lines=1; +select * from format(TSV, 'x UInt32, y UInt32', '1\t2\n\n1\t2\n') settings input_format_tsv_skip_trailing_empty_lines=1; -- {serverError CANNOT_PARSE_INPUT_ASSERTION_FAILED} + +select * from format(CSV, 'x UInt32, y UInt32', '1,2\n\n') settings input_format_csv_skip_trailing_empty_lines=0; -- {serverError CANNOT_PARSE_INPUT_ASSERTION_FAILED} +select * from format(CSV, 'x UInt32, y UInt32', '1,2\n\n') settings input_format_csv_skip_trailing_empty_lines=1; +select * from format(CSV, 'x UInt32, y UInt32', '1,2\n\n1,2\n') settings input_format_csv_skip_trailing_empty_lines=1; -- {serverError CANNOT_PARSE_INPUT_ASSERTION_FAILED} + +select * from format(CustomSeparated, 'x UInt32, y UInt32', '1\t2\n\n\n') settings input_format_custom_skip_trailing_empty_lines=0; -- {serverError CANNOT_PARSE_INPUT_ASSERTION_FAILED} +select * from format(CustomSeparated, 'x UInt32, y UInt32', '1\t2\n\n\n') settings input_format_custom_skip_trailing_empty_lines=1; +select * from format(CustomSeparated, 'x UInt32, y UInt32', '1\t2\n\n\n1\t2\n\n\n') settings input_format_custom_skip_trailing_empty_lines=1; -- {serverError CANNOT_PARSE_INPUT_ASSERTION_FAILED} + diff --git a/parser/testdata/02772_jit_date_time_add/ast.json b/parser/testdata/02772_jit_date_time_add/ast.json new file mode 100644 index 000000000..9c80d77c8 --- /dev/null +++ b/parser/testdata/02772_jit_date_time_add/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.0012345, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02772_jit_date_time_add/metadata.json b/parser/testdata/02772_jit_date_time_add/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02772_jit_date_time_add/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02772_jit_date_time_add/query.sql b/parser/testdata/02772_jit_date_time_add/query.sql new file mode 100644 index 000000000..0ba994580 --- /dev/null +++ b/parser/testdata/02772_jit_date_time_add/query.sql @@ -0,0 +1,6 @@ +SET compile_expressions = 1; +SET min_count_to_compile_expression = 0; + +SELECT DISTINCT result FROM (SELECT toStartOfFifteenMinutes(toDateTime(toStartOfFifteenMinutes(toDateTime(1000.0001220703125) + (number * 65536))) + (number * 9223372036854775807)) AS result FROM system.numbers LIMIT 1048576) ORDER BY result DESC NULLS FIRST FORMAT Null; -- { serverError DECIMAL_OVERFLOW } +SELECT DISTINCT result FROM (SELECT toStartOfFifteenMinutes(toDateTime(toStartOfFifteenMinutes(toDateTime(1000.0001220703125) + (number * 65536))) + toInt64(number * 9223372036854775807)) AS result FROM system.numbers LIMIT 1048576) ORDER BY result DESC NULLS FIRST FORMAT Null; +SELECT round(round(round(round(round(100)), round(round(round(round(NULL), round(65535)), toTypeName(now() + 9223372036854775807) LIKE 'DateTime%DateTime%DateTime%DateTime%', round(-2)), 255), round(NULL)))); diff --git a/parser/testdata/02772_s3_crash/ast.json b/parser/testdata/02772_s3_crash/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02772_s3_crash/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02772_s3_crash/metadata.json b/parser/testdata/02772_s3_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02772_s3_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02772_s3_crash/query.sql b/parser/testdata/02772_s3_crash/query.sql new file mode 100644 index 000000000..ba198716e --- /dev/null +++ b/parser/testdata/02772_s3_crash/query.sql @@ -0,0 +1,7 @@ +-- Tags: no-fasttest +-- Tag no-fasttest: Depends on AWS + +SELECT * FROM s3(headers('random_header' = 'value')); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT * FROM s3Cluster('test_cluster_two_shards_localhost', headers('random_header' = 'value')); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SET enable_analyzer = 1; +EXPLAIN QUERY TREE SELECT 1 FROM s3('a', 1, CSV); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/02775_show_columns_called_from_clickhouse/ast.json b/parser/testdata/02775_show_columns_called_from_clickhouse/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02775_show_columns_called_from_clickhouse/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02775_show_columns_called_from_clickhouse/metadata.json b/parser/testdata/02775_show_columns_called_from_clickhouse/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02775_show_columns_called_from_clickhouse/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02775_show_columns_called_from_clickhouse/query.sql b/parser/testdata/02775_show_columns_called_from_clickhouse/query.sql new file mode 100644 index 000000000..73595c3cb --- /dev/null +++ b/parser/testdata/02775_show_columns_called_from_clickhouse/query.sql @@ -0,0 +1,81 @@ +-- Tags: no-fasttest, no-parallel +-- no-fasttest: json type needs rapidjson library, geo types need s2 geometry +-- no-parallel: can't provide currentDatabase() to SHOW COLUMNS + +-- Tests the output of SHOW COLUMNS when called through the ClickHouse protocol. + +-- ----------------------------------------------------------------------------------- +-- Please keep this test in-sync with 02775_show_columns_called_from_clickhouse.expect +-- ----------------------------------------------------------------------------------- + +DROP TABLE IF EXISTS tab; + +SET allow_suspicious_low_cardinality_types=1; +SET enable_json_type=1; + +CREATE TABLE tab +( + i8 Int8, + i16 Int16, + i32 Int32, + i64 Int64, + i128 Int128, + i256 Int256, + ui8 UInt8, + ui16 UInt16, + ui32 UInt32, + ui64 UInt64, + ui128 UInt128, + ui256 UInt256, + f32 Float32, + f64 Float64, + dec32 Decimal32(2), + dec64 Decimal64(2), + dec128 Decimal128(2), + dec128_native Decimal(35, 30), + dec128_text Decimal(35, 31), + dec256 Decimal256(2), + dec256_native Decimal(65, 2), + dec256_text Decimal(66, 2), + p Point, + r Ring, + pg Polygon, + mpg MultiPolygon, + b Bool, + s String, + fs FixedString(3), + uuid UUID, + d Date, + d32 Date32, + dt DateTime, + dt_tz1 DateTime('UTC'), + dt_tz2 DateTime('Europe/Amsterdam'), + dt64 DateTime64(3), + dt64_3_tz1 DateTime64(3, 'UTC'), + dt64_3_tz2 DateTime64(3, 'Asia/Shanghai'), + dt64_6 DateTime64(6, 'UTC'), + dt64_9 DateTime64(9, 'UTC'), + enm Enum('hallo' = 1, 'welt' = 2), + agg AggregateFunction(uniq, UInt64), + sagg SimpleAggregateFunction(sum, Double), + a Array(String), + o JSON, + t Tuple(Int32, String, Nullable(String), LowCardinality(String), LowCardinality(Nullable(String)), Tuple(Int32, String)), + m Map(Int32, String), + m_complex Map(Int32, Map(Int32, LowCardinality(Nullable(String)))), + nested Nested (col1 String, col2 UInt32), + ip4 IPv4, + ip6 IPv6, + ns Nullable(String), + nfs Nullable(FixedString(3)), + ndt64 Nullable(DateTime64(3)), + ndt64_tz Nullable(DateTime64(3, 'Asia/Shanghai')), + ls LowCardinality(String), + lfs LowCardinality(FixedString(3)), + lns LowCardinality(Nullable(String)), + lnfs LowCardinality(Nullable(FixedString(3))), +) ENGINE Memory; + +SHOW COLUMNS FROM tab; + +DROP TABLE tab; diff --git a/parser/testdata/02780_final_streams_data_skipping_index/ast.json b/parser/testdata/02780_final_streams_data_skipping_index/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02780_final_streams_data_skipping_index/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02780_final_streams_data_skipping_index/metadata.json b/parser/testdata/02780_final_streams_data_skipping_index/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02780_final_streams_data_skipping_index/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02780_final_streams_data_skipping_index/query.sql b/parser/testdata/02780_final_streams_data_skipping_index/query.sql new file mode 100644 index 000000000..56bc073ec --- /dev/null +++ b/parser/testdata/02780_final_streams_data_skipping_index/query.sql @@ -0,0 +1,29 @@ +-- Tags: no-random-merge-tree-settings, no-random-settings + +DROP TABLE IF EXISTS data; + +CREATE TABLE data +( + key Int, + v1 DateTime, + INDEX v1_index v1 TYPE minmax GRANULARITY 1 +) ENGINE=AggregatingMergeTree() +ORDER BY key +SETTINGS index_granularity=8192, min_bytes_for_wide_part=0, min_rows_for_wide_part=0; + +SYSTEM STOP MERGES data; +SET optimize_on_insert = 0; + +-- generate 50% of marks that cannot be skipped with v1_index +-- this will create a gap in marks +INSERT INTO data SELECT number, if(number/8192 % 2 == 0, now(), now() - INTERVAL 200 DAY) FROM numbers(1e6); +INSERT INTO data SELECT number+1e6, if(number/8192 % 2 == 0, now(), now() - INTERVAL 200 DAY) FROM numbers(1e6); + +-- { echoOn } +EXPLAIN PIPELINE SELECT * FROM data FINAL WHERE v1 >= now() - INTERVAL 180 DAY +SETTINGS max_threads=2, max_final_threads=2, force_data_skipping_indices='v1_index', use_skip_indexes_if_final=1 +FORMAT LineAsString; + +EXPLAIN PIPELINE SELECT * FROM data FINAL WHERE v1 >= now() - INTERVAL 180 DAY +SETTINGS max_threads=2, max_final_threads=2, force_data_skipping_indices='v1_index', use_skip_indexes_if_final=0 +FORMAT LineAsString; diff --git a/parser/testdata/02781_data_skipping_index_merge_tree_min_for_seek/ast.json b/parser/testdata/02781_data_skipping_index_merge_tree_min_for_seek/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02781_data_skipping_index_merge_tree_min_for_seek/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02781_data_skipping_index_merge_tree_min_for_seek/metadata.json b/parser/testdata/02781_data_skipping_index_merge_tree_min_for_seek/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02781_data_skipping_index_merge_tree_min_for_seek/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02781_data_skipping_index_merge_tree_min_for_seek/query.sql b/parser/testdata/02781_data_skipping_index_merge_tree_min_for_seek/query.sql new file mode 100644 index 000000000..aafaa66fb --- /dev/null +++ b/parser/testdata/02781_data_skipping_index_merge_tree_min_for_seek/query.sql @@ -0,0 +1,24 @@ +-- Tags: no-random-merge-tree-settings, no-random-settings + +DROP TABLE IF EXISTS data; + +CREATE TABLE data +( + key Int, + v1 DateTime, + INDEX v1_index v1 TYPE minmax GRANULARITY 1 +) ENGINE=AggregatingMergeTree() +ORDER BY key +SETTINGS index_granularity=8192; + +SYSTEM STOP MERGES data; + +-- generate 50% of marks that cannot be skipped with v1_index +-- this will create a gap in marks +INSERT INTO data SELECT number, if(number/8192 % 2 == 0, now(), now() - INTERVAL 200 DAY) FROM numbers(1e6); +INSERT INTO data SELECT number+1e6, if(number/8192 % 2 == 0, now(), now() - INTERVAL 200 DAY) FROM numbers(1e6); + +-- Set `parallel_replicas_index_analysis_only_on_coordinator = 0` to prevent remote replicas from skipping index analysis in Parallel Replicas. +-- Otherwise, they may return full ranges and trigger max_rows_to_read validation failures. +SELECT * FROM data WHERE v1 >= now() - INTERVAL 180 DAY FORMAT Null SETTINGS max_threads=1, max_final_threads=1, force_data_skipping_indices='v1_index', merge_tree_min_rows_for_seek=0, max_rows_to_read=1999999, parallel_replicas_index_analysis_only_on_coordinator=0; +SELECT * FROM data WHERE v1 >= now() - INTERVAL 180 DAY FORMAT Null SETTINGS max_threads=1, max_final_threads=1, force_data_skipping_indices='v1_index', merge_tree_min_rows_for_seek=1, max_rows_to_read=1999999, parallel_replicas_index_analysis_only_on_coordinator=0; -- { serverError TOO_MANY_ROWS } diff --git a/parser/testdata/02782_inconsistent_formatting_and_constant_folding/ast.json b/parser/testdata/02782_inconsistent_formatting_and_constant_folding/ast.json new file mode 100644 index 000000000..0b09a32f3 --- /dev/null +++ b/parser/testdata/02782_inconsistent_formatting_and_constant_folding/ast.json @@ -0,0 +1,70 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 6)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal Int64_-1" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Int64_-1" + }, + { + "explain": " Literal Float64_-0" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Float64_-0" + } + ], + + "rows": 16, + + "statistics": + { + "elapsed": 0.001124719, + "rows_read": 16, + "bytes_read": 579 + } +} diff --git a/parser/testdata/02782_inconsistent_formatting_and_constant_folding/metadata.json b/parser/testdata/02782_inconsistent_formatting_and_constant_folding/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02782_inconsistent_formatting_and_constant_folding/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02782_inconsistent_formatting_and_constant_folding/query.sql b/parser/testdata/02782_inconsistent_formatting_and_constant_folding/query.sql new file mode 100644 index 000000000..31278862a --- /dev/null +++ b/parser/testdata/02782_inconsistent_formatting_and_constant_folding/query.sql @@ -0,0 +1,41 @@ +SELECT -0, toTypeName(-0), -1, toTypeName(-1), -0., toTypeName(-0.); + +DROP TABLE IF EXISTS t4; +DROP TABLE IF EXISTS t7; + +create table t4 (c26 String) engine = Log; +create view t7 as select max(ref_3.c26) as c_2_c46_1 from t4 as ref_3; + +select + c_7_c4585_14 as c_4_c4593_5 + from + (select + avg(0) as c_7_c4572_1, + max(-0) as c_7_c4585_14 + from + t7 as ref_0 + group by ref_0.c_2_c46_1) as subq_0 +where c_4_c4593_5 <= multiIf(true, 1, exp10(c_4_c4593_5) <= 1, 1, 1); + +select x as c + from + (select 1 AS k, + max(0) as a, + max(-0) as x + from + t7 GROUP BY k) +where NOT ignore(c); + +SELECT x +FROM +( + SELECT + avg(0) AS c_7_c4572_1, + max(-0) AS x + FROM t7 AS ref_0 + GROUP BY ref_0.c_2_c46_1 +) +WHERE x <= multiIf(true, 1, exp10(x) <= 1, 1, 1); + +DROP TABLE t7; +DROP TABLE t4; diff --git a/parser/testdata/02782_values_null_to_lc_nullable/ast.json b/parser/testdata/02782_values_null_to_lc_nullable/ast.json new file mode 100644 index 000000000..8a69e9681 --- /dev/null +++ b/parser/testdata/02782_values_null_to_lc_nullable/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function values (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 's LowCardinality(Nullable(String))'" + }, + { + "explain": " Literal NULL" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001191335, + "rows_read": 12, + "bytes_read": 476 + } +} diff --git a/parser/testdata/02782_values_null_to_lc_nullable/metadata.json b/parser/testdata/02782_values_null_to_lc_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02782_values_null_to_lc_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02782_values_null_to_lc_nullable/query.sql b/parser/testdata/02782_values_null_to_lc_nullable/query.sql new file mode 100644 index 000000000..250fe6b75 --- /dev/null +++ b/parser/testdata/02782_values_null_to_lc_nullable/query.sql @@ -0,0 +1,2 @@ +select * from values('s LowCardinality(Nullable(String))', (NULL)); + diff --git a/parser/testdata/02783_date_predicate_optimizations/ast.json b/parser/testdata/02783_date_predicate_optimizations/ast.json new file mode 100644 index 000000000..4bf703194 --- /dev/null +++ b/parser/testdata/02783_date_predicate_optimizations/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery source (children 1)" + }, + { + "explain": " Identifier source" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001248337, + "rows_read": 2, + "bytes_read": 65 + } +} diff --git a/parser/testdata/02783_date_predicate_optimizations/metadata.json b/parser/testdata/02783_date_predicate_optimizations/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02783_date_predicate_optimizations/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02783_date_predicate_optimizations/query.sql b/parser/testdata/02783_date_predicate_optimizations/query.sql new file mode 100644 index 000000000..b127af677 --- /dev/null +++ b/parser/testdata/02783_date_predicate_optimizations/query.sql @@ -0,0 +1,139 @@ +CREATE TABLE source +( + `ts` DateTime('UTC'), + `n` Int32 +) +ENGINE = MergeTree +PARTITION BY toYYYYMM(ts) +ORDER BY tuple(); + +INSERT INTO source values ('2021-12-31 23:00:00', 0); + +SELECT * FROM source WHERE toYYYYMM(ts) = 202112; +SELECT * FROM source WHERE toYear(ts) = 2021; +SELECT * FROM source WHERE toYYYYMM(ts) = 202112 SETTINGS enable_analyzer=1; +SELECT * FROM source WHERE toYear(ts) = 2021 SETTINGS enable_analyzer=1; + +DROP TABLE IF EXISTS source; +CREATE TABLE source +( + `dt` Date, + `ts` DateTime, + `dt_32` Date32, + `ts_64` DateTime64(3), + `n` Int32 +) +ENGINE = MergeTree +PARTITION BY toYYYYMM(ts) +ORDER BY tuple(); + +INSERT INTO source values ('2022-12-31', '2022-12-31 23:59:59', '2022-12-31', '2022-12-31 23:59:59.123', 0); +INSERT INTO source values ('2023-01-01', '2023-01-01 00:00:00', '2023-01-01', '2023-01-01 00:00:00.000', 1); +INSERT INTO source values ('2023-12-01', '2023-12-01 00:00:00', '2023-12-01', '2023-12-01 00:00:00.000', 2); +INSERT INTO source values ('2023-12-31', '2023-12-31 23:59:59', '2023-12-31', '2023-12-31 23:59:59.123', 3); +INSERT INTO source values ('2024-01-01', '2024-01-01 00:00:00', '2024-01-01', '2024-01-01 00:00:00.000', 4); + +SELECT 'Date'; +SELECT count(*) FROM source WHERE toYYYYMM(dt) = 202312; +SELECT count(*) FROM source WHERE toYYYYMM(dt) <> 202312; +SELECT count(*) FROM source WHERE toYYYYMM(dt) < 202312; +SELECT count(*) FROM source WHERE toYYYYMM(dt) <= 202312; +SELECT count(*) FROM source WHERE toYYYYMM(dt) > 202312; +SELECT count(*) FROM source WHERE toYYYYMM(dt) >= 202312; +SELECT count(*) FROM source WHERE toYear(dt) = 2023; +SELECT count(*) FROM source WHERE toYear(dt) <> 2023; +SELECT count(*) FROM source WHERE toYear(dt) < 2023; +SELECT count(*) FROM source WHERE toYear(dt) <= 2023; +SELECT count(*) FROM source WHERE toYear(dt) > 2023; +SELECT count(*) FROM source WHERE toYear(dt) >= 2023; +SELECT count(*) FROM source WHERE toYYYYMM(dt) = 202312 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYYYYMM(dt) <> 202312 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYYYYMM(dt) < 202312 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYYYYMM(dt) <= 202312 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYYYYMM(dt) > 202312 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYYYYMM(dt) >= 202312 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYear(dt) = 2023 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYear(dt) <> 2023 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYear(dt) < 2023 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYear(dt) <= 2023 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYear(dt) > 2023 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYear(dt) >= 2023 SETTINGS enable_analyzer=1; + +SELECT 'DateTime'; +SELECT count(*) FROM source WHERE toYYYYMM(ts) = 202312; +SELECT count(*) FROM source WHERE toYYYYMM(ts) <> 202312; +SELECT count(*) FROM source WHERE toYYYYMM(ts) < 202312; +SELECT count(*) FROM source WHERE toYYYYMM(ts) <= 202312; +SELECT count(*) FROM source WHERE toYYYYMM(ts) > 202312; +SELECT count(*) FROM source WHERE toYYYYMM(ts) >= 202312; +SELECT count(*) FROM source WHERE toYear(ts) = 2023; +SELECT count(*) FROM source WHERE toYear(ts) <> 2023; +SELECT count(*) FROM source WHERE toYear(ts) < 2023; +SELECT count(*) FROM source WHERE toYear(ts) <= 2023; +SELECT count(*) FROM source WHERE toYear(ts) > 2023; +SELECT count(*) FROM source WHERE toYear(ts) >= 2023; +SELECT count(*) FROM source WHERE toYYYYMM(ts) = 202312 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYYYYMM(ts) <> 202312 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYYYYMM(ts) < 202312 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYYYYMM(ts) <= 202312 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYYYYMM(ts) > 202312 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYYYYMM(ts) >= 202312 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYear(ts) = 2023 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYear(ts) <> 2023 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYear(ts) < 2023 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYear(ts) <= 2023 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYear(ts) > 2023 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYear(ts) >= 2023 SETTINGS enable_analyzer=1; + +SELECT 'Date32'; +SELECT count(*) FROM source WHERE toYYYYMM(dt_32) = 202312; +SELECT count(*) FROM source WHERE toYYYYMM(dt_32) <> 202312; +SELECT count(*) FROM source WHERE toYYYYMM(dt_32) < 202312; +SELECT count(*) FROM source WHERE toYYYYMM(dt_32) <= 202312; +SELECT count(*) FROM source WHERE toYYYYMM(dt_32) > 202312; +SELECT count(*) FROM source WHERE toYYYYMM(dt_32) >= 202312; +SELECT count(*) FROM source WHERE toYear(dt_32) = 2023; +SELECT count(*) FROM source WHERE toYear(dt_32) <> 2023; +SELECT count(*) FROM source WHERE toYear(dt_32) < 2023; +SELECT count(*) FROM source WHERE toYear(dt_32) <= 2023; +SELECT count(*) FROM source WHERE toYear(dt_32) > 2023; +SELECT count(*) FROM source WHERE toYear(dt_32) >= 2023; +SELECT count(*) FROM source WHERE toYYYYMM(dt_32) = 202312 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYYYYMM(dt_32) <> 202312 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYYYYMM(dt_32) < 202312 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYYYYMM(dt_32) <= 202312 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYYYYMM(dt_32) > 202312 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYYYYMM(dt_32) >= 202312 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYear(dt_32) = 2023 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYear(dt_32) <> 2023 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYear(dt_32) < 2023 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYear(dt_32) <= 2023 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYear(dt_32) > 2023 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYear(dt_32) >= 2023 SETTINGS enable_analyzer=1; + +SELECT 'DateTime64'; +SELECT count(*) FROM source WHERE toYYYYMM(ts_64) = 202312; +SELECT count(*) FROM source WHERE toYYYYMM(ts_64) <> 202312; +SELECT count(*) FROM source WHERE toYYYYMM(ts_64) < 202312; +SELECT count(*) FROM source WHERE toYYYYMM(ts_64) <= 202312; +SELECT count(*) FROM source WHERE toYYYYMM(ts_64) > 202312; +SELECT count(*) FROM source WHERE toYYYYMM(ts_64) >= 202312; +SELECT count(*) FROM source WHERE toYear(ts_64) = 2023; +SELECT count(*) FROM source WHERE toYear(ts_64) <> 2023; +SELECT count(*) FROM source WHERE toYear(ts_64) < 2023; +SELECT count(*) FROM source WHERE toYear(ts_64) <= 2023; +SELECT count(*) FROM source WHERE toYear(ts_64) > 2023; +SELECT count(*) FROM source WHERE toYear(ts_64) >= 2023; +SELECT count(*) FROM source WHERE toYYYYMM(ts_64) = 202312 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYYYYMM(ts_64) <> 202312 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYYYYMM(ts_64) < 202312 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYYYYMM(ts_64) <= 202312 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYYYYMM(ts_64) > 202312 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYYYYMM(ts_64) >= 202312 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYear(ts_64) = 2023 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYear(ts_64) <> 2023 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYear(ts_64) < 2023 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYear(ts_64) <= 2023 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYear(ts_64) > 2023 SETTINGS enable_analyzer=1; +SELECT count(*) FROM source WHERE toYear(ts_64) >= 2023 SETTINGS enable_analyzer=1; +DROP TABLE source; diff --git a/parser/testdata/02783_max_bytes_to_read_in_schema_inference/ast.json b/parser/testdata/02783_max_bytes_to_read_in_schema_inference/ast.json new file mode 100644 index 000000000..2a0632f8e --- /dev/null +++ b/parser/testdata/02783_max_bytes_to_read_in_schema_inference/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000823619, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02783_max_bytes_to_read_in_schema_inference/metadata.json b/parser/testdata/02783_max_bytes_to_read_in_schema_inference/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02783_max_bytes_to_read_in_schema_inference/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02783_max_bytes_to_read_in_schema_inference/query.sql b/parser/testdata/02783_max_bytes_to_read_in_schema_inference/query.sql new file mode 100644 index 000000000..ef0381df1 --- /dev/null +++ b/parser/testdata/02783_max_bytes_to_read_in_schema_inference/query.sql @@ -0,0 +1,5 @@ +set input_format_max_rows_to_read_for_schema_inference=2; +set input_format_json_infer_incomplete_types_as_strings=0; +desc format('JSONEachRow', '{"a" : null}, {"a" : 42}') settings input_format_max_bytes_to_read_for_schema_inference=10; -- {serverError CANNOT_EXTRACT_TABLE_STRUCTURE} +desc format('JSONEachRow', '{"a" : null}, {"a" : 42}') settings input_format_max_bytes_to_read_for_schema_inference=20; + diff --git a/parser/testdata/02783_parsedatetimebesteffort_syslog/ast.json b/parser/testdata/02783_parsedatetimebesteffort_syslog/ast.json new file mode 100644 index 000000000..565fdf3de --- /dev/null +++ b/parser/testdata/02783_parsedatetimebesteffort_syslog/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001219518, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02783_parsedatetimebesteffort_syslog/metadata.json b/parser/testdata/02783_parsedatetimebesteffort_syslog/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02783_parsedatetimebesteffort_syslog/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02783_parsedatetimebesteffort_syslog/query.sql b/parser/testdata/02783_parsedatetimebesteffort_syslog/query.sql new file mode 100644 index 000000000..94c4462d5 --- /dev/null +++ b/parser/testdata/02783_parsedatetimebesteffort_syslog/query.sql @@ -0,0 +1,55 @@ +SET session_timezone = 'UTC'; +SET formatdatetime_e_with_space_padding = 1; + +SELECT 'The reference time point is 2023-06-30 23:59:30'; +SELECT '───────────────────────────────────────────────'; +SELECT 'The argument is before the reference time point'; +SELECT '───────────────────────────────────────────────'; + +WITH + toDateTime('2023-06-30 23:59:30') AS dt_ref, + now() AS dt_now, + date_sub(DAY, 1, dt_now) as dt_before, + dateDiff(SECOND, dt_ref, dt_now) AS time_shift, + formatDateTime(dt_before, '%b %e %T') AS syslog_before +SELECT + formatDateTime(dt_before - time_shift, '%b %e %T') AS syslog_arg, + parseDateTimeBestEffort(syslog_before) - time_shift AS res, + parseDateTimeBestEffortOrNull(syslog_before) - time_shift AS res_null, + parseDateTimeBestEffortOrZero(syslog_before) - time_shift AS res_zero, + parseDateTimeBestEffortUS(syslog_before) - time_shift AS res_us, + parseDateTimeBestEffortUSOrNull(syslog_before) - time_shift AS res_us_null, + parseDateTimeBestEffortUSOrZero(syslog_before) - time_shift AS res_us_zero, + parseDateTime64BestEffort(syslog_before) - time_shift AS res64, + parseDateTime64BestEffortOrNull(syslog_before) - time_shift AS res64_null, + parseDateTime64BestEffortOrZero(syslog_before) - time_shift AS res64_zero, + parseDateTime64BestEffortUS(syslog_before) - time_shift AS res64_us, + parseDateTime64BestEffortUSOrNull(syslog_before) - time_shift AS res64_us_null, + parseDateTime64BestEffortUSOrZero(syslog_before) - time_shift AS res64_us_zero +FORMAT Vertical; + +SELECT '──────────────────────────────────────────────'; +SELECT 'The argument is after the reference time point'; +SELECT '──────────────────────────────────────────────'; + +WITH + toDateTime('2023-06-30 23:59:30') AS dt_ref, + now() AS dt_now, + date_add(DAY, 1, dt_now) as dt_after, + dateDiff(SECOND, dt_ref, dt_now) AS time_shift, + formatDateTime(dt_after, '%b %e %T') AS syslog_after +SELECT + formatDateTime(dt_after - time_shift, '%b %e %T') AS syslog_arg, + parseDateTimeBestEffort(syslog_after) - time_shift AS res, + parseDateTimeBestEffortOrNull(syslog_after) - time_shift AS res_null, + parseDateTimeBestEffortOrZero(syslog_after) - time_shift AS res_zero, + parseDateTimeBestEffortUS(syslog_after) - time_shift AS res_us, + parseDateTimeBestEffortUSOrNull(syslog_after) - time_shift AS res_us_null, + parseDateTimeBestEffortUSOrZero(syslog_after) - time_shift AS res_us_zero, + parseDateTime64BestEffort(syslog_after) - time_shift AS res64, + parseDateTime64BestEffortOrNull(syslog_after) - time_shift AS res64_null, + parseDateTime64BestEffortOrZero(syslog_after) - time_shift AS res64_zero, + parseDateTime64BestEffortUS(syslog_after) - time_shift AS res64_us, + parseDateTime64BestEffortUSOrNull(syslog_after) - time_shift AS res64_us_null, + parseDateTime64BestEffortUSOrZero(syslog_after) - time_shift AS res64_us_zero +FORMAT Vertical; diff --git a/parser/testdata/02784_move_all_conditions_to_prewhere_analyzer_asan/ast.json b/parser/testdata/02784_move_all_conditions_to_prewhere_analyzer_asan/ast.json new file mode 100644 index 000000000..36d5ef7ba --- /dev/null +++ b/parser/testdata/02784_move_all_conditions_to_prewhere_analyzer_asan/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_02784 (children 1)" + }, + { + "explain": " Identifier t_02784" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001459392, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/02784_move_all_conditions_to_prewhere_analyzer_asan/metadata.json b/parser/testdata/02784_move_all_conditions_to_prewhere_analyzer_asan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02784_move_all_conditions_to_prewhere_analyzer_asan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02784_move_all_conditions_to_prewhere_analyzer_asan/query.sql b/parser/testdata/02784_move_all_conditions_to_prewhere_analyzer_asan/query.sql new file mode 100644 index 000000000..3766e5b0c --- /dev/null +++ b/parser/testdata/02784_move_all_conditions_to_prewhere_analyzer_asan/query.sql @@ -0,0 +1,15 @@ +DROP TABLE IF EXISTS t_02784; + +CREATE TABLE t_02784 (c1 UInt64, c2 UInt64) ENGINE=MergeTree() ORDER BY c1 SETTINGS min_bytes_for_wide_part=1; + +INSERT INTO t_02784 SELECT number, number FROM numbers(1); + +SET enable_analyzer=1; +SET move_all_conditions_to_prewhere=1; + +SELECT c1, c2 FROM t_02784 WHERE c1 = 0 AND c2 = 0; +SELECT c1, c2 FROM t_02784 WHERE c2 = 0 AND c1 = 0; +SELECT c2, c1 FROM t_02784 WHERE c1 = 0 AND c2 = 0; +SELECT c2, c1 FROM t_02784 WHERE c2 = 0 AND c1 = 0; + +DROP TABLE t_02784; diff --git a/parser/testdata/02784_projections_read_in_order_bug/ast.json b/parser/testdata/02784_projections_read_in_order_bug/ast.json new file mode 100644 index 000000000..d49864271 --- /dev/null +++ b/parser/testdata/02784_projections_read_in_order_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery events (children 1)" + }, + { + "explain": " Identifier events" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001476549, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/02784_projections_read_in_order_bug/metadata.json b/parser/testdata/02784_projections_read_in_order_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02784_projections_read_in_order_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02784_projections_read_in_order_bug/query.sql b/parser/testdata/02784_projections_read_in_order_bug/query.sql new file mode 100644 index 000000000..6f1939452 --- /dev/null +++ b/parser/testdata/02784_projections_read_in_order_bug/query.sql @@ -0,0 +1,48 @@ +DROP TABLE IF EXISTS events; + +create table events ( + `organisation_id` UUID, + `session_id` UUID, + `id` UUID DEFAULT generateUUIDv4(), + `timestamp` UInt64, + `payload` String, + `customer_id` UUID, + `call_id` String, + PROJECTION events_by_session_and_org + ( + SELECT * + ORDER BY + organisation_id, + session_id, + timestamp + ), + PROJECTION events_by_session + ( + SELECT * + ORDER BY + session_id, + timestamp + ), + PROJECTION events_by_session_and_customer + ( + SELECT * + ORDER BY + customer_id, + session_id, + timestamp + ), + PROJECTION events_by_call_id + ( + SELECT * + ORDER BY + call_id, + timestamp + )) engine = MergeTree order by (organisation_id, session_id, timestamp) settings index_granularity = 3; + +insert into events values (reinterpretAsUUID(0), reinterpretAsUUID(1), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC'), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(0), reinterpretAsUUID(1), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC'), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(1), reinterpretAsUUID(0), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC'), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(1), reinterpretAsUUID(0), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC'), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(3), reinterpretAsUUID(2), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC'), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(3), reinterpretAsUUID(2), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC'), toString(0), reinterpretAsUUID(0), toString(0)); +insert into events values (reinterpretAsUUID(0), reinterpretAsUUID(1), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC'), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(0), reinterpretAsUUID(1), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC'), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(1), reinterpretAsUUID(0), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC'), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(1), reinterpretAsUUID(0), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC'), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(3), reinterpretAsUUID(2), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC'), toString(0), reinterpretAsUUID(0), toString(0)), (reinterpretAsUUID(3), reinterpretAsUUID(2), reinterpretAsUUID(0), toDateTime('2022-02-02', 'UTC'), toString(0), reinterpretAsUUID(0), toString(0)); + +set read_in_order_two_level_merge_threshold=1; +SELECT id, timestamp, payload FROM events WHERE (organisation_id = reinterpretAsUUID(1)) AND (session_id = reinterpretAsUUID(0)) ORDER BY timestamp, payload, id ASC; + +DROP TABLE events; diff --git a/parser/testdata/02784_schema_inference_null_as_default/ast.json b/parser/testdata/02784_schema_inference_null_as_default/ast.json new file mode 100644 index 000000000..35e1d9d41 --- /dev/null +++ b/parser/testdata/02784_schema_inference_null_as_default/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DescribeQuery (children 2)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function format (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier JSONEachRow" + }, + { + "explain": " Literal '{\"x\" : null}, {\"x\" : 42}'" + }, + { + "explain": " Set" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001310391, + "rows_read": 7, + "bytes_read": 239 + } +} diff --git a/parser/testdata/02784_schema_inference_null_as_default/metadata.json b/parser/testdata/02784_schema_inference_null_as_default/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02784_schema_inference_null_as_default/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02784_schema_inference_null_as_default/query.sql b/parser/testdata/02784_schema_inference_null_as_default/query.sql new file mode 100644 index 000000000..571e3ab4f --- /dev/null +++ b/parser/testdata/02784_schema_inference_null_as_default/query.sql @@ -0,0 +1,7 @@ +desc format(JSONEachRow, '{"x" : null}, {"x" : 42}') settings schema_inference_make_columns_nullable=1; +select * from format(JSONEachRow, '{"x" : null}, {"x" : 42}') settings schema_inference_make_columns_nullable=1; +desc format(JSONEachRow, '{"x" : null}, {"x" : 42}') settings schema_inference_make_columns_nullable='auto', input_format_null_as_default=0; +select * from format(JSONEachRow, '{"x" : null}, {"x" : 42}') settings schema_inference_make_columns_nullable='auto', input_format_null_as_default=0; +desc format(JSONEachRow, '{"x" : null}, {"x" : 42}') settings schema_inference_make_columns_nullable=0, input_format_null_as_default=1; +select * from format(JSONEachRow, '{"x" : null}, {"x" : 42}') settings schema_inference_make_columns_nullable=0, input_format_null_as_default=1; + diff --git a/parser/testdata/02785_date_predicate_optimizations_ast_query_tree_rewrite/ast.json b/parser/testdata/02785_date_predicate_optimizations_ast_query_tree_rewrite/ast.json new file mode 100644 index 000000000..91316bd5e --- /dev/null +++ b/parser/testdata/02785_date_predicate_optimizations_ast_query_tree_rewrite/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery date_t (children 1)" + }, + { + "explain": " Identifier date_t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001398633, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/02785_date_predicate_optimizations_ast_query_tree_rewrite/metadata.json b/parser/testdata/02785_date_predicate_optimizations_ast_query_tree_rewrite/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02785_date_predicate_optimizations_ast_query_tree_rewrite/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02785_date_predicate_optimizations_ast_query_tree_rewrite/query.sql b/parser/testdata/02785_date_predicate_optimizations_ast_query_tree_rewrite/query.sql new file mode 100644 index 000000000..5ff62cb4b --- /dev/null +++ b/parser/testdata/02785_date_predicate_optimizations_ast_query_tree_rewrite/query.sql @@ -0,0 +1,75 @@ +DROP TABLE IF EXISTS date_t; +CREATE TABLE date_t (id UInt32, value1 String, date1 Date) ENGINE ReplacingMergeTree() ORDER BY id; + +EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYear(date1) = 1993 AND id BETWEEN 1 AND 3; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE toYear(date1) = 1993 AND id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; +EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYear(date1) <> 1993 AND id BETWEEN 1 AND 3; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE toYear(date1) <> 1993 AND id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; +EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYear(date1) < 1993 AND id BETWEEN 1 AND 3; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE toYear(date1) < 1993 AND id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; +EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYear(date1) > 1993 AND id BETWEEN 1 AND 3; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE toYear(date1) > 1993 AND id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; +EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYear(date1) <= 1993 AND id BETWEEN 1 AND 3; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE toYear(date1) <= 1993 AND id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; +EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYear(date1) >= 1993 AND id BETWEEN 1 AND 3; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE toYear(date1) >= 1993 AND id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; +EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYear(date1) BETWEEN 1993 AND 1997 AND id BETWEEN 1 AND 3; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE toYear(date1) BETWEEN 1993 AND 1997 AND id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; +EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE (toYear(date1) = 1993 OR toYear(date1) = 1994) AND id BETWEEN 1 AND 3; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE (toYear(date1) = 1993 OR toYear(date1) = 1994) AND id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; +EXPLAIN SYNTAX SELECT value1, toYear(date1) as year1 FROM date_t WHERE year1 = 1993 AND id BETWEEN 1 AND 3; +EXPLAIN QUERY TREE run_passes=1 SELECT value1, toYear(date1) as year1 FROM date_t WHERE year1 = 1993 AND id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; +EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE 1993 > toYear(date1) AND id BETWEEN 1 AND 3; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE 1993 > toYear(date1) AND id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; +EXPLAIN SYNTAX SELECT value1 FROM date_t PREWHERE toYear(date1) = 1993 WHERE id BETWEEN 1 AND 3; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t PREWHERE toYear(date1) = 1993 WHERE id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; +EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE id BETWEEN 1 AND 3 HAVING toYear(date1) = 1993; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE id BETWEEN 1 AND 3 HAVING toYear(date1) = 1993 SETTINGS enable_analyzer=1; +EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYYYYMM(date1) = 199300 AND id BETWEEN 1 AND 3; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE toYYYYMM(date1) = 199300 AND id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; +EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYYYYMM(date1) = 199313 AND id BETWEEN 1 AND 3; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE toYYYYMM(date1) = 199313 AND id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; +EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYYYYMM(date1) = 199312 AND id BETWEEN 1 AND 3; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE toYYYYMM(date1) = 199312 AND id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; +EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYYYYMM(date1) = 199203 AND id BETWEEN 1 AND 3; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE toYYYYMM(date1) = 199203 AND id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; +EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYYYYMM(date1) <> 199203 AND id BETWEEN 1 AND 3; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE toYYYYMM(date1) <> 199203 AND id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; +EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYYYYMM(date1) < 199203 AND id BETWEEN 1 AND 3; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE toYYYYMM(date1) < 199203 AND id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; +EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYYYYMM(date1) > 199203 AND id BETWEEN 1 AND 3; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE toYYYYMM(date1) > 199203 AND id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; +EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYYYYMM(date1) <= 199203 AND id BETWEEN 1 AND 3; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE toYYYYMM(date1) <= 199203 AND id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; +EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE toYYYYMM(date1) >= 199203 AND id BETWEEN 1 AND 3; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE toYYYYMM(date1) >= 199203 AND id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; +EXPLAIN SYNTAX SELECT value1 FROM date_t WHERE (toYYYYMM(date1) >= 199203 OR toYear(date1) = 1993) AND id BETWEEN 1 AND 3; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date_t WHERE (toYYYYMM(date1) >= 199203 OR toYear(date1) = 1993) AND id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; +DROP TABLE date_t; + +DROP TABLE IF EXISTS datetime_t; +CREATE TABLE datetime_t (id UInt32, value1 String, date1 Datetime) ENGINE ReplacingMergeTree() ORDER BY id; + +EXPLAIN SYNTAX SELECT value1 FROM datetime_t WHERE toYear(date1) = 1993 AND id BETWEEN 1 AND 3; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM datetime_t WHERE toYear(date1) = 1993 AND id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; +EXPLAIN SYNTAX SELECT value1 FROM datetime_t WHERE toYYYYMM(date1) = 199312 AND id BETWEEN 1 AND 3; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM datetime_t WHERE toYYYYMM(date1) = 199312 AND id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; +DROP TABLE datetime_t; + +DROP TABLE IF EXISTS date32_t; +CREATE TABLE date32_t (id UInt32, value1 String, date1 Date32) ENGINE ReplacingMergeTree() ORDER BY id; + +EXPLAIN SYNTAX SELECT value1 FROM date32_t WHERE toYear(date1) = 1993 AND id BETWEEN 1 AND 3; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date32_t WHERE toYear(date1) = 1993 AND id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; +EXPLAIN SYNTAX SELECT value1 FROM date32_t WHERE toYYYYMM(date1) = 199312 AND id BETWEEN 1 AND 3; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM date32_t WHERE toYYYYMM(date1) = 199312 AND id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; +DROP TABLE date32_t; + +DROP TABLE IF EXISTS datetime64_t; +CREATE TABLE datetime64_t (id UInt32, value1 String, date1 Datetime64) ENGINE ReplacingMergeTree() ORDER BY id; + +EXPLAIN SYNTAX SELECT value1 FROM datetime64_t WHERE toYear(date1) = 1993 AND id BETWEEN 1 AND 3; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM datetime64_t WHERE toYear(date1) = 1993 AND id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; +EXPLAIN SYNTAX SELECT value1 FROM datetime64_t WHERE toYYYYMM(date1) = 199312 AND id BETWEEN 1 AND 3; +EXPLAIN QUERY TREE run_passes=1 SELECT value1 FROM datetime64_t WHERE toYYYYMM(date1) = 199312 AND id BETWEEN 1 AND 3 SETTINGS enable_analyzer=1; +DROP TABLE datetime64_t; diff --git a/parser/testdata/02785_global_join_too_many_columns/ast.json b/parser/testdata/02785_global_join_too_many_columns/ast.json new file mode 100644 index 000000000..03ab19c00 --- /dev/null +++ b/parser/testdata/02785_global_join_too_many_columns/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery local (children 1)" + }, + { + "explain": " Identifier local" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001174454, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/02785_global_join_too_many_columns/metadata.json b/parser/testdata/02785_global_join_too_many_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02785_global_join_too_many_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02785_global_join_too_many_columns/query.sql b/parser/testdata/02785_global_join_too_many_columns/query.sql new file mode 100644 index 000000000..a49aae25f --- /dev/null +++ b/parser/testdata/02785_global_join_too_many_columns/query.sql @@ -0,0 +1,14 @@ +drop table if exists local; +drop table if exists distr; + +create table local (a UInt64, b UInt64, c UInt64, d UInt64, e UInt64, f UInt64, g UInt64, h UInt64) engine = Log; +create table distr as local engine = Distributed('test_cluster_two_shards', currentDatabase(), local); + +insert into local (a) select number from numbers(10); + +set max_columns_to_read=1; +select count() from distr as l global all left join distr as r on l.a = r.a; + +drop table if exists local; +drop table if exists distr; + diff --git a/parser/testdata/02785_left_anti_join_bug/ast.json b/parser/testdata/02785_left_anti_join_bug/ast.json new file mode 100644 index 000000000..0ba6bd201 --- /dev/null +++ b/parser/testdata/02785_left_anti_join_bug/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000984987, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02785_left_anti_join_bug/metadata.json b/parser/testdata/02785_left_anti_join_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02785_left_anti_join_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02785_left_anti_join_bug/query.sql b/parser/testdata/02785_left_anti_join_bug/query.sql new file mode 100644 index 000000000..107672ace --- /dev/null +++ b/parser/testdata/02785_left_anti_join_bug/query.sql @@ -0,0 +1,15 @@ +SET allow_suspicious_low_cardinality_types=1; + +DROP TABLE IF EXISTS test_table; +DROP TABLE IF EXISTS test_table__fuzz_3; + +CREATE TABLE test_table (`id` Float32, `value` Float32) ENGINE = MergeTree ORDER BY id; +INSERT INTO test_table VALUES (-10.75, 95.57); + +CREATE TABLE test_table__fuzz_3 (`id` LowCardinality(Nullable(Float32)), `value` Float32) ENGINE = MergeTree ORDER BY id SETTINGS allow_nullable_key=1; + +insert into test_table__fuzz_3 select * from generateRandom() limit 10; +SELECT * FROM (SELECT CAST('104857.5', 'Float32'), corr(NULL, id, id) AS corr_value FROM test_table__fuzz_3 GROUP BY value) AS subquery ANTI LEFT JOIN test_table ON subquery.corr_value = test_table.id format Null; + +DROP TABLE IF EXISTS test_table; +DROP TABLE IF EXISTS test_table__fuzz_3; diff --git a/parser/testdata/02785_summing_merge_tree_datetime64/ast.json b/parser/testdata/02785_summing_merge_tree_datetime64/ast.json new file mode 100644 index 000000000..3b284d97a --- /dev/null +++ b/parser/testdata/02785_summing_merge_tree_datetime64/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery summing_merge_tree_datetime64 (children 1)" + }, + { + "explain": " Identifier summing_merge_tree_datetime64" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001104319, + "rows_read": 2, + "bytes_read": 110 + } +} diff --git a/parser/testdata/02785_summing_merge_tree_datetime64/metadata.json b/parser/testdata/02785_summing_merge_tree_datetime64/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02785_summing_merge_tree_datetime64/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02785_summing_merge_tree_datetime64/query.sql b/parser/testdata/02785_summing_merge_tree_datetime64/query.sql new file mode 100644 index 000000000..db00f1893 --- /dev/null +++ b/parser/testdata/02785_summing_merge_tree_datetime64/query.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS summing_merge_tree_datetime64; + +CREATE TABLE summing_merge_tree_datetime64 ( `pk` UInt64, `timestamp` DateTime64(3), `value` UInt64 ) +ENGINE = SummingMergeTree() ORDER BY pk; + +INSERT INTO summing_merge_tree_datetime64 SELECT 1 pk, '2023-05-01 23:55:55.100' timestamp, 1 value; +INSERT INTO summing_merge_tree_datetime64 SELECT 1 pk, '2023-05-01 23:55:55.100' timestamp, 2 value; +INSERT INTO summing_merge_tree_datetime64 SELECT 1 pk, '2023-05-01 23:55:55.100' timestamp, 3 value; +INSERT INTO summing_merge_tree_datetime64 SELECT 1 pk, '2023-05-01 23:55:55.100' timestamp, 4 value; +INSERT INTO summing_merge_tree_datetime64 SELECT 1 pk, '2023-05-01 23:55:55.100' timestamp, 5 value; + +SELECT * FROM summing_merge_tree_datetime64 FINAL; +DROP TABLE summing_merge_tree_datetime64; diff --git a/parser/testdata/02786_max_execution_time_leaf/ast.json b/parser/testdata/02786_max_execution_time_leaf/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02786_max_execution_time_leaf/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02786_max_execution_time_leaf/metadata.json b/parser/testdata/02786_max_execution_time_leaf/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02786_max_execution_time_leaf/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02786_max_execution_time_leaf/query.sql b/parser/testdata/02786_max_execution_time_leaf/query.sql new file mode 100644 index 000000000..1531990c0 --- /dev/null +++ b/parser/testdata/02786_max_execution_time_leaf/query.sql @@ -0,0 +1,5 @@ +-- Tags: no-fasttest +SET max_rows_to_read = 0; +SELECT count() FROM cluster('test_cluster_two_shards', view( SELECT * FROM numbers(10000000000) )) SETTINGS max_execution_time_leaf = 1; -- { serverError TIMEOUT_EXCEEDED } +-- Can return partial result +SELECT count() FROM cluster('test_cluster_two_shards', view( SELECT * FROM numbers(10000000000) )) FORMAT Null SETTINGS max_execution_time_leaf = 1, timeout_overflow_mode_leaf = 'break'; diff --git a/parser/testdata/02786_transform_float/ast.json b/parser/testdata/02786_transform_float/ast.json new file mode 100644 index 000000000..4f9c01d6f --- /dev/null +++ b/parser/testdata/02786_transform_float/ast.json @@ -0,0 +1,88 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function transform (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal Array_[UInt64_1]" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toFloat32 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function toFloat32 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_3" + } + ], + + "rows": 22, + + "statistics": + { + "elapsed": 0.001165165, + "rows_read": 22, + "bytes_read": 880 + } +} diff --git a/parser/testdata/02786_transform_float/metadata.json b/parser/testdata/02786_transform_float/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02786_transform_float/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02786_transform_float/query.sql b/parser/testdata/02786_transform_float/query.sql new file mode 100644 index 000000000..4229425b0 --- /dev/null +++ b/parser/testdata/02786_transform_float/query.sql @@ -0,0 +1,3 @@ +select transform(number, [1], [toFloat32(1)], toFloat32(1)) from numbers(3); +SELECT '---'; +select transform(number, [3], [toFloat32(1)], toFloat32(1)) from numbers(6); diff --git a/parser/testdata/02787_transform_null/ast.json b/parser/testdata/02787_transform_null/ast.json new file mode 100644 index 000000000..8d234690a --- /dev/null +++ b/parser/testdata/02787_transform_null/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function transform (alias result) (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal Array_[UInt64_0, UInt64_1]" + }, + { + "explain": " Literal Array_['ZERO', 'ONE']" + }, + { + "explain": " Literal 'DEFAULT'" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001435556, + "rows_read": 10, + "bytes_read": 399 + } +} diff --git a/parser/testdata/02787_transform_null/metadata.json b/parser/testdata/02787_transform_null/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02787_transform_null/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02787_transform_null/query.sql b/parser/testdata/02787_transform_null/query.sql new file mode 100644 index 000000000..64a771f0f --- /dev/null +++ b/parser/testdata/02787_transform_null/query.sql @@ -0,0 +1,40 @@ +SELECT transform(0, [0, 1], ['ZERO', 'ONE'], 'DEFAULT') AS result; +SELECT transform(0, [0, 1], ['ZERO', 'ONE'], NULL) AS result; + +SELECT CASE 1 + WHEN 0 THEN 'ZERO' + WHEN 1 THEN 'ONE' + ELSE 'NONE' +END AS result; + +SELECT CASE 1 + WHEN 0 THEN NULL + WHEN 1 THEN 'ONE' + ELSE 'NONE' +END AS result; + +select + case 1 + when 1 then 'a' + else 'b' + end value; + +select + case 1 + when 1 then 'a' + end value; + +SELECT + d, + toInt16OrNull(d), + caseWithExpression(d, 'a', 3, toInt16OrZero(d)) AS case_zero, + caseWithExpression(d, 'a', 3, toInt16OrNull(d)) AS case_null, + if(d = 'a', 3, toInt16OrZero(d)) AS if_zero, + if(d = 'a', 3, toInt16OrNull(d)) AS if_null +FROM +( + SELECT arrayJoin(['', '1', 'a']) AS d +) +ORDER BY + case_zero ASC, + d ASC; diff --git a/parser/testdata/02788_current_schemas_function/ast.json b/parser/testdata/02788_current_schemas_function/ast.json new file mode 100644 index 000000000..2a793f7e9 --- /dev/null +++ b/parser/testdata/02788_current_schemas_function/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function current_schemas (alias result) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Bool_1" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001186784, + "rows_read": 7, + "bytes_read": 281 + } +} diff --git a/parser/testdata/02788_current_schemas_function/metadata.json b/parser/testdata/02788_current_schemas_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02788_current_schemas_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02788_current_schemas_function/query.sql b/parser/testdata/02788_current_schemas_function/query.sql new file mode 100644 index 000000000..8cf03738d --- /dev/null +++ b/parser/testdata/02788_current_schemas_function/query.sql @@ -0,0 +1,4 @@ +SELECT current_schemas(true) AS result; +SELECT current_schemas(false) AS result; +SELECT current_schemas(1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT current_schemas(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } \ No newline at end of file diff --git a/parser/testdata/02788_fix_logical_error_in_sorting/ast.json b/parser/testdata/02788_fix_logical_error_in_sorting/ast.json new file mode 100644 index 000000000..5030d8015 --- /dev/null +++ b/parser/testdata/02788_fix_logical_error_in_sorting/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001326359, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02788_fix_logical_error_in_sorting/metadata.json b/parser/testdata/02788_fix_logical_error_in_sorting/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02788_fix_logical_error_in_sorting/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02788_fix_logical_error_in_sorting/query.sql b/parser/testdata/02788_fix_logical_error_in_sorting/query.sql new file mode 100644 index 000000000..97741e6fc --- /dev/null +++ b/parser/testdata/02788_fix_logical_error_in_sorting/query.sql @@ -0,0 +1,85 @@ +SET allow_deprecated_error_prone_window_functions = 1; + +DROP TABLE IF EXISTS session_events; +DROP TABLE IF EXISTS event_types; + +CREATE TABLE session_events +( + clientId UInt64, + sessionId String, + pageId UInt64, + eventNumber UInt64, + timestamp UInt64, + type LowCardinality(String), + data String +) +ENGINE = MergeTree +PARTITION BY toYYYYMM(toDate(pageId / 1000)) +ORDER BY (clientId, sessionId, pageId, timestamp); + +CREATE TABLE event_types +( + type String, + active Int16 +) +ENGINE = MergeTree +PARTITION BY substring(type, 1, 1) +ORDER BY (type, active); + +SYSTEM STOP MERGES session_events; +SYSTEM STOP MERGES event_types; + +INSERT INTO session_events SELECT + 141, + '693de636-6d9b-47b7-b52a-33bd303b6255', + 1686053240314, + number, + number, + toString(number % 10), + '' +FROM numbers_mt(100000); + +INSERT INTO session_events SELECT + 141, + '693de636-6d9b-47b7-b52a-33bd303b6255', + 1686053240314, + number, + number, + toString(number % 10), + '' +FROM numbers_mt(100000); + +INSERT INTO event_types SELECT + toString(number % 10), + number % 2 +FROM numbers(20); + +SET optimize_sorting_by_input_stream_properties = 1; + +-- We check only that no exception was thrown +EXPLAIN PIPELINE +SELECT + pageId, + [prev_active_ts, timestamp] AS inactivity_timestamps, + timestamp - prev_active_ts AS inactive_duration, + timestamp +FROM +( + SELECT + pageId, + timestamp, + neighbor(timestamp, -1) AS prev_active_ts + FROM session_events + WHERE (type IN ( + SELECT type + FROM event_types + WHERE active = 1 + )) AND (sessionId = '693de636-6d9b-47b7-b52a-33bd303b6255') AND (session_events.clientId = 141) AND (pageId = 1686053240314) + ORDER BY timestamp ASC +) +WHERE runningDifference(timestamp) >= 500 +ORDER BY timestamp ASC +FORMAT Null; + +DROP TABLE session_events; +DROP TABLE event_types; diff --git a/parser/testdata/02789_describe_table_settings/ast.json b/parser/testdata/02789_describe_table_settings/ast.json new file mode 100644 index 000000000..73b428db4 --- /dev/null +++ b/parser/testdata/02789_describe_table_settings/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DescribeQuery (children 3)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function format (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier JSONEachRow" + }, + { + "explain": " Literal '{\"id\" : 1, \"age\" : 25, \"name\" : \"Josh\", \"status\" : null, \"hobbies\" : [\"football\", \"cooking\"]}'" + }, + { + "explain": " Set" + }, + { + "explain": " Identifier CSV" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001374269, + "rows_read": 8, + "bytes_read": 331 + } +} diff --git a/parser/testdata/02789_describe_table_settings/metadata.json b/parser/testdata/02789_describe_table_settings/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02789_describe_table_settings/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02789_describe_table_settings/query.sql b/parser/testdata/02789_describe_table_settings/query.sql new file mode 100644 index 000000000..64b5b21fe --- /dev/null +++ b/parser/testdata/02789_describe_table_settings/query.sql @@ -0,0 +1,3 @@ +DESC format(JSONEachRow, '{"id" : 1, "age" : 25, "name" : "Josh", "status" : null, "hobbies" : ["football", "cooking"]}') SETTINGS schema_inference_hints = 'age LowCardinality(UInt8), status Nullable(String)', allow_suspicious_low_cardinality_types=1 FORMAT CSV; +DESC format(JSONEachRow, '{"id" : 1, "age" : 25, "name" : "Josh", "status" : null, "hobbies" : ["football", "cooking"]}') FORMAT CSV SETTINGS schema_inference_hints = 'age LowCardinality(UInt8), status Nullable(String)', allow_suspicious_low_cardinality_types=1; +DESC format(JSONEachRow, '{"id" : 1, "age" : 25, "name" : "Josh", "status" : null, "hobbies" : ["football", "cooking"]}') FORMAT CSV SETTINGS schema_inference_hints = 'age LowCardinality(UInt8), status Nullable(String)', allow_suspicious_low_cardinality_types=1 SETTINGS max_threads=0; -- { clientError SYNTAX_ERROR } diff --git a/parser/testdata/02789_functions_after_sorting_and_columns_with_same_names_bug/ast.json b/parser/testdata/02789_functions_after_sorting_and_columns_with_same_names_bug/ast.json new file mode 100644 index 000000000..dcabf9b38 --- /dev/null +++ b/parser/testdata/02789_functions_after_sorting_and_columns_with_same_names_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000956301, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/02789_functions_after_sorting_and_columns_with_same_names_bug/metadata.json b/parser/testdata/02789_functions_after_sorting_and_columns_with_same_names_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02789_functions_after_sorting_and_columns_with_same_names_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02789_functions_after_sorting_and_columns_with_same_names_bug/query.sql b/parser/testdata/02789_functions_after_sorting_and_columns_with_same_names_bug/query.sql new file mode 100644 index 000000000..4a9ede363 --- /dev/null +++ b/parser/testdata/02789_functions_after_sorting_and_columns_with_same_names_bug/query.sql @@ -0,0 +1,133 @@ +drop table if exists test; +drop table if exists test1; + +CREATE TABLE test +( + `pt` String, + `count_distinct_exposure_uv` AggregateFunction(uniqHLL12, Int64) +) +ENGINE = AggregatingMergeTree +ORDER BY pt; + +SELECT * +FROM +( + SELECT m0.pt AS pt + ,m0.`exposure_uv` AS exposure_uv + ,round(m2.exposure_uv,4) AS exposure_uv_hb_last_value + ,if(m2.exposure_uv IS NULL OR m2.exposure_uv = 0,NULL,round((m0.exposure_uv - m2.exposure_uv) * 1.0 / m2.exposure_uv,4)) AS exposure_uv_hb_diff_percent + ,round(m1.exposure_uv,4) AS exposure_uv_tb_last_value + ,if(m1.exposure_uv IS NULL OR m1.exposure_uv = 0,NULL,round((m0.exposure_uv - m1.exposure_uv) * 1.0 / m1.exposure_uv,4)) AS exposure_uv_tb_diff_percent + FROM + ( + SELECT m0.pt AS pt + ,`exposure_uv` AS `exposure_uv` + FROM + ( + SELECT pt AS pt + ,CASE WHEN COUNT(`exposure_uv`) > 0 THEN AVG(`exposure_uv`) ELSE 0 END AS `exposure_uv` + FROM + ( + SELECT pt AS pt + ,uniqHLL12Merge(count_distinct_exposure_uv) AS `exposure_uv` + FROM test + GROUP BY pt + ) m + GROUP BY pt + ) m0 + ) m0 + LEFT JOIN + ( + SELECT m0.pt AS pt + ,`exposure_uv` AS `exposure_uv` + FROM + ( + SELECT formatDateTime(addYears(parseDateTimeBestEffort(pt),1),'%Y%m%d') AS pt + ,CASE WHEN COUNT(`exposure_uv`) > 0 THEN AVG(`exposure_uv`) ELSE 0 END AS `exposure_uv` + FROM + ( + SELECT pt AS pt + ,uniqHLL12Merge(count_distinct_exposure_uv) AS `exposure_uv` + FROM test + GROUP BY pt + ) m + GROUP BY pt + ) m0 + ) m1 + ON m0.pt = m1.pt + LEFT JOIN + ( + SELECT m0.pt AS pt + ,`exposure_uv` AS `exposure_uv` + FROM + ( + SELECT formatDateTime(addDays(toDate(parseDateTimeBestEffort(pt)),1),'%Y%m%d') AS pt + ,CASE WHEN COUNT(`exposure_uv`) > 0 THEN AVG(`exposure_uv`) ELSE 0 END AS `exposure_uv` + FROM + ( + SELECT pt AS pt + ,uniqHLL12Merge(count_distinct_exposure_uv) AS `exposure_uv` + FROM test + GROUP BY pt + ) m + GROUP BY pt + ) m0 + ) m2 + ON m0.pt = m2.pt +) c0 +ORDER BY pt ASC, exposure_uv DESC +settings join_use_nulls = 1; + +CREATE TABLE test1 +( + `pt` String, + `exposure_uv` Float64 +) +ENGINE = Memory; + +SELECT * +FROM +( + SELECT m0.pt + ,m0.exposure_uv AS exposure_uv + ,round(m2.exposure_uv,4) + FROM + ( + SELECT pt + ,exposure_uv + FROM test1 + ) m0 + LEFT JOIN + ( + SELECT pt + ,exposure_uv + FROM test1 + ) m1 + ON m0.pt = m1.pt + LEFT JOIN + ( + SELECT pt + ,exposure_uv + FROM test1 + ) m2 + ON m0.pt = m2.pt +) c0 +ORDER BY exposure_uv +settings join_use_nulls = 1; + +SELECT + pt AS pt, + exposure_uv AS exposure_uv +FROM +( + SELECT + pt + FROM test1 +) AS m0 +FULL OUTER JOIN +( + SELECT + pt, + exposure_uv + FROM test1 +) AS m1 ON m0.pt = m1.pt; diff --git a/parser/testdata/02789_functions_after_sorting_and_columns_with_same_names_bug_2/ast.json b/parser/testdata/02789_functions_after_sorting_and_columns_with_same_names_bug_2/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02789_functions_after_sorting_and_columns_with_same_names_bug_2/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02789_functions_after_sorting_and_columns_with_same_names_bug_2/metadata.json b/parser/testdata/02789_functions_after_sorting_and_columns_with_same_names_bug_2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02789_functions_after_sorting_and_columns_with_same_names_bug_2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02789_functions_after_sorting_and_columns_with_same_names_bug_2/query.sql b/parser/testdata/02789_functions_after_sorting_and_columns_with_same_names_bug_2/query.sql new file mode 100644 index 000000000..b0221635f --- /dev/null +++ b/parser/testdata/02789_functions_after_sorting_and_columns_with_same_names_bug_2/query.sql @@ -0,0 +1,107 @@ +create table test1 ( + `pt` String, + `brand_name` String, + `total_indirect_order_cnt` Float64, + `total_indirect_gmv` Float64 +) ENGINE = Memory; + +create table test2 ( + `pt` String, + `brand_name` String, + `exposure_uv` Float64, + `click_uv` Float64 +) ENGINE = Memory; + +INSERT INTO test1 (`pt`, `brand_name`, `total_indirect_order_cnt`, `total_indirect_gmv`) VALUES ('20230625', 'LINING', 2232, 1008710), ('20230625', 'adidas', 125, 58820), ('20230625', 'Nike', 1291, 1033020), ('20230626', 'Nike', 1145, 938926), ('20230626', 'LINING', 1904, 853336), ('20230626', 'adidas', 133, 62546), ('20220626', 'LINING', 3747, 1855203), ('20220626', 'Nike', 2295, 1742665), ('20220626', 'adidas', 302, 122388); + +INSERT INTO test2 (`pt`, `brand_name`, `exposure_uv`, `click_uv`) VALUES ('20230625', 'Nike', 2012913, 612831), ('20230625', 'adidas', 480277, 96176), ('20230625', 'LINING', 2474234, 627814), ('20230626', 'Nike', 1934666, 610770), ('20230626', 'adidas', 469904, 91117), ('20230626', 'LINING', 2285142, 599765), ('20220626', 'Nike', 2979656, 937166), ('20220626', 'adidas', 704751, 124250), ('20220626', 'LINING', 3163884, 1010221); + +SELECT * FROM ( + SELECT m0.pt AS pt + ,m0.`uvctr` AS uvctr + ,round(m1.uvctr,4) AS uvctr_hb_last_value + ,round(m2.uvctr,4) AS uvctr_tb_last_value + FROM + ( + SELECT m0.pt AS pt + ,COALESCE(m0.brand_name,m1.brand_name) AS brand_name + ,if(isNaN(`click_uv` / `exposure_uv`) OR isInfinite(`click_uv` / `exposure_uv`),NULL,`click_uv` / `exposure_uv`) AS `uvctr` + FROM + ( + SELECT pt AS pt + ,brand_name AS `brand_name` + ,exposure_uv AS `exposure_uv` + ,click_uv AS `click_uv` + FROM test2 + WHERE pt = '20230626' + ) m0 + FULL JOIN + ( + SELECT pt AS pt + ,brand_name AS `brand_name` + ,total_indirect_order_cnt AS `total_indirect_order_cnt` + ,total_indirect_gmv AS `total_indirect_gmv` + FROM test1 + WHERE pt = '20230626' + ) m1 + ON m0.brand_name = m1.brand_name AND m0.pt = m1.pt + ) m0 + LEFT JOIN + ( + SELECT m0.pt AS pt + ,if(isNaN(`click_uv` / `exposure_uv`) OR isInfinite(`click_uv` / `exposure_uv`),NULL,`click_uv` / `exposure_uv`) AS `uvctr` + ,COALESCE(m0.brand_name,m1.brand_name) AS brand_name + ,`exposure_uv` AS `exposure_uv` + ,`click_uv` + FROM + ( + SELECT pt AS pt + ,brand_name AS `brand_name` + ,exposure_uv AS `exposure_uv` + ,click_uv AS `click_uv` + FROM test2 + WHERE pt = '20230625' + ) m0 + FULL JOIN + ( + SELECT pt AS pt + ,brand_name AS `brand_name` + ,total_indirect_order_cnt AS `total_indirect_order_cnt` + ,total_indirect_gmv AS `total_indirect_gmv` + FROM test1 + WHERE pt = '20230625' + ) m1 + ON m0.brand_name = m1.brand_name AND m0.pt = m1.pt + ) m1 + ON m0.brand_name = m1.brand_name AND m0.pt = m1.pt + LEFT JOIN + ( + SELECT m0.pt AS pt + ,if(isNaN(`click_uv` / `exposure_uv`) OR isInfinite(`click_uv` / `exposure_uv`),NULL,`click_uv` / `exposure_uv`) AS `uvctr` + ,COALESCE(m0.brand_name,m1.brand_name) AS brand_name + ,`exposure_uv` AS `exposure_uv` + ,`click_uv` + FROM + ( + SELECT pt AS pt + ,brand_name AS `brand_name` + ,exposure_uv AS `exposure_uv` + ,click_uv AS `click_uv` + FROM test2 + WHERE pt = '20220626' + ) m0 + FULL JOIN + ( + SELECT pt AS pt + ,brand_name AS `brand_name` + ,total_indirect_order_cnt AS `total_indirect_order_cnt` + ,total_indirect_gmv AS `total_indirect_gmv` + FROM test1 + WHERE pt = '20220626' + ) m1 + ON m0.brand_name = m1.brand_name AND m0.pt = m1.pt + ) m2 + ON m0.brand_name = m2.brand_name AND m0.pt = m2.pt +) c0 +ORDER BY pt ASC, uvctr DESC; + diff --git a/parser/testdata/02789_jit_cannot_convert_column/ast.json b/parser/testdata/02789_jit_cannot_convert_column/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02789_jit_cannot_convert_column/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02789_jit_cannot_convert_column/metadata.json b/parser/testdata/02789_jit_cannot_convert_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02789_jit_cannot_convert_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02789_jit_cannot_convert_column/query.sql b/parser/testdata/02789_jit_cannot_convert_column/query.sql new file mode 100644 index 000000000..f5e694a38 --- /dev/null +++ b/parser/testdata/02789_jit_cannot_convert_column/query.sql @@ -0,0 +1,11 @@ +SELECT + sum(c), + toInt32((h - null::Nullable(DateTime)) / 3600) + 1 AS a +FROM +( + SELECT count() AS c, h + FROM ( SELECT now() AS h ) + WHERE toInt32((h - null::Nullable(DateTime)) / 3600) + 1 = 1 + GROUP BY h +) +GROUP BY a settings min_count_to_compile_expression = 0; diff --git a/parser/testdata/02789_set_index_nullable_condition_bug/ast.json b/parser/testdata/02789_set_index_nullable_condition_bug/ast.json new file mode 100644 index 000000000..1dbaa436a --- /dev/null +++ b/parser/testdata/02789_set_index_nullable_condition_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tab (children 1)" + }, + { + "explain": " Identifier tab" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001190088, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/02789_set_index_nullable_condition_bug/metadata.json b/parser/testdata/02789_set_index_nullable_condition_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02789_set_index_nullable_condition_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02789_set_index_nullable_condition_bug/query.sql b/parser/testdata/02789_set_index_nullable_condition_bug/query.sql new file mode 100644 index 000000000..dee0418d1 --- /dev/null +++ b/parser/testdata/02789_set_index_nullable_condition_bug/query.sql @@ -0,0 +1,118 @@ +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab +( + col1 String, + col2 String, + INDEX test_table_col2_idx col2 TYPE set(0) GRANULARITY 1 +) ENGINE = MergeTree() +ORDER BY col1 +AS SELECT 'v1', 'v2'; + +SELECT * FROM tab +WHERE 1 == 1 AND col1 == col1 OR + 0 AND col2 == NULL; + +DROP TABLE tab; + +-- Test for issue #75485 + +SELECT 'Bulk filtering enabled'; +set secondary_indices_enable_bulk_filtering = 1; + +CREATE TABLE tab +( + col Nullable(Boolean), + INDEX col_idx col TYPE set(0) +) +ENGINE = MergeTree() +ORDER BY tuple(); + +INSERT INTO tab VALUES + (DEFAULT), + (DEFAULT); + +SELECT count() FROM tab WHERE col OR col IS NULL; +DROP TABLE tab; + +CREATE TABLE tab +( + col Nullable(Boolean), + INDEX col_idx col TYPE set(0) +) +ENGINE = MergeTree() +ORDER BY tuple(); + +INSERT INTO tab VALUES + (DEFAULT), + (DEFAULT), + (TRUE); + +SELECT count() FROM tab WHERE col OR col IS NULL; +DROP TABLE tab; + +CREATE TABLE tab +( + col Nullable(Boolean), + INDEX col_idx col TYPE set(0) +) +ENGINE = MergeTree() +ORDER BY tuple(); + +INSERT INTO tab VALUES + (DEFAULT), + (DEFAULT), + (FALSE); + +SELECT count() FROM tab WHERE col OR col IS NULL; +DROP TABLE tab; + +SELECT 'Bulk filtering disabled'; +set secondary_indices_enable_bulk_filtering = 0; + +CREATE TABLE tab +( + col Nullable(Boolean), + INDEX col_idx col TYPE set(0) +) +ENGINE = MergeTree() +ORDER BY tuple(); + +INSERT INTO tab VALUES + (DEFAULT), + (DEFAULT); + +SELECT count() FROM tab WHERE col OR col IS NULL; +DROP TABLE tab; + +CREATE TABLE tab +( + col Nullable(Boolean), + INDEX col_idx col TYPE set(0) +) +ENGINE = MergeTree() +ORDER BY tuple(); + +INSERT INTO tab VALUES + (DEFAULT), + (DEFAULT), + (TRUE); + +SELECT count() FROM tab WHERE col OR col IS NULL; +DROP TABLE tab; + +CREATE TABLE tab +( + col Nullable(Boolean), + INDEX col_idx col TYPE set(0) +) +ENGINE = MergeTree() +ORDER BY tuple(); + +INSERT INTO tab VALUES + (DEFAULT), + (DEFAULT), + (FALSE); + +SELECT count() FROM tab WHERE col OR col IS NULL; +DROP TABLE tab; diff --git a/parser/testdata/02790_fix_coredump_when_compile_expression/ast.json b/parser/testdata/02790_fix_coredump_when_compile_expression/ast.json new file mode 100644 index 000000000..2f99d9ec3 --- /dev/null +++ b/parser/testdata/02790_fix_coredump_when_compile_expression/ast.json @@ -0,0 +1,70 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery test (children 3)" + }, + { + "explain": " Identifier test" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " ColumnDeclaration col1 (children 1)" + }, + { + "explain": " DataType Nullable (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " DataType DOUBLE" + }, + { + "explain": " ColumnDeclaration col2 (children 1)" + }, + { + "explain": " DataType Nullable (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " DataType DOUBLE" + }, + { + "explain": " ColumnDeclaration col3 (children 1)" + }, + { + "explain": " DataType DOUBLE" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function Memory" + } + ], + + "rows": 16, + + "statistics": + { + "elapsed": 0.001604592, + "rows_read": 16, + "bytes_read": 590 + } +} diff --git a/parser/testdata/02790_fix_coredump_when_compile_expression/metadata.json b/parser/testdata/02790_fix_coredump_when_compile_expression/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02790_fix_coredump_when_compile_expression/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02790_fix_coredump_when_compile_expression/query.sql b/parser/testdata/02790_fix_coredump_when_compile_expression/query.sql new file mode 100644 index 000000000..90995da0c --- /dev/null +++ b/parser/testdata/02790_fix_coredump_when_compile_expression/query.sql @@ -0,0 +1,4 @@ +CREATE TABLE test (col1 Nullable(DOUBLE), col2 Nullable(DOUBLE), col3 DOUBLE) ENGINE=Memory; + +insert into test values(1.0 , 2.0, 3.0); +select multiIf(col1 > 2, col2/col3, 4.0) from test SETTINGS min_count_to_compile_expression=0; diff --git a/parser/testdata/02790_keyed_hash_bug/ast.json b/parser/testdata/02790_keyed_hash_bug/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02790_keyed_hash_bug/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02790_keyed_hash_bug/metadata.json b/parser/testdata/02790_keyed_hash_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02790_keyed_hash_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02790_keyed_hash_bug/query.sql b/parser/testdata/02790_keyed_hash_bug/query.sql new file mode 100644 index 000000000..409e284d0 --- /dev/null +++ b/parser/testdata/02790_keyed_hash_bug/query.sql @@ -0,0 +1,2 @@ +--- previously caused MemorySanitizer: use-of-uninitialized-value, because we tried to read hash key from empty tuple column during interpretation +SELECT sipHash64Keyed((1111111111111111111, toUInt64(222222222222223))) group by toUInt64(222222222222223); diff --git a/parser/testdata/02790_optimize_skip_unused_shards_join/ast.json b/parser/testdata/02790_optimize_skip_unused_shards_join/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02790_optimize_skip_unused_shards_join/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02790_optimize_skip_unused_shards_join/metadata.json b/parser/testdata/02790_optimize_skip_unused_shards_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02790_optimize_skip_unused_shards_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02790_optimize_skip_unused_shards_join/query.sql b/parser/testdata/02790_optimize_skip_unused_shards_join/query.sql new file mode 100644 index 000000000..0773e0a9a --- /dev/null +++ b/parser/testdata/02790_optimize_skip_unused_shards_join/query.sql @@ -0,0 +1,55 @@ +-- Issue: https://github.com/ClickHouse/ClickHouse/issues/15995 + +DROP TABLE IF EXISTS outer; +DROP TABLE IF EXISTS inner; + +DROP TABLE IF EXISTS outer_distributed; +DROP TABLE IF EXISTS inner_distributed; + +CREATE TABLE IF NOT EXISTS outer +( + `id` UInt64, + `organization_id` UInt64, + `version` UInt64 +) +ENGINE = ReplacingMergeTree(version) +PARTITION BY organization_id % 8 +ORDER BY (organization_id, id); + +CREATE TABLE inner +( + `id` UInt64, + `outer_id` UInt64, + `organization_id` UInt64, + `version` UInt64, + `date` Date +) +ENGINE = ReplacingMergeTree(version) +PARTITION BY toYYYYMM(date) +ORDER BY (organization_id, outer_id); + +CREATE TABLE inner_distributed AS inner +ENGINE = Distributed('test_cluster_two_shards', currentDatabase(), 'inner', intHash64(organization_id)); + +CREATE TABLE outer_distributed AS outer +ENGINE = Distributed('test_cluster_two_shards', currentDatabase(), 'outer', intHash64(organization_id)); + +SELECT + sum(if(inner_distributed.id != 0, 1, 0)) AS total, + inner_distributed.date AS date +FROM outer_distributed AS outer_distributed +FINAL +LEFT JOIN +( + SELECT + inner_distributed.outer_id AS outer_id, + inner_distributed.id AS id, + inner_distributed.date AS date + FROM inner_distributed AS inner_distributed + FINAL + WHERE inner_distributed.organization_id = 15078 +) AS inner_distributed ON inner_distributed.outer_id = outer_distributed.id +WHERE (outer_distributed.organization_id = 15078) AND (date != toDate('1970-01-01')) +GROUP BY date +ORDER BY date DESC +SETTINGS distributed_product_mode = 'local', optimize_skip_unused_shards = 1; diff --git a/parser/testdata/02790_sql_standard_fetch/ast.json b/parser/testdata/02790_sql_standard_fetch/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02790_sql_standard_fetch/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02790_sql_standard_fetch/metadata.json b/parser/testdata/02790_sql_standard_fetch/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02790_sql_standard_fetch/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02790_sql_standard_fetch/query.sql b/parser/testdata/02790_sql_standard_fetch/query.sql new file mode 100644 index 000000000..638cc6668 --- /dev/null +++ b/parser/testdata/02790_sql_standard_fetch/query.sql @@ -0,0 +1,34 @@ +-- https://antonz.org/sql-fetch/ + +CREATE TEMPORARY TABLE employees (id UInt64, name String, department String, salary UInt64); +INSERT INTO employees VALUES (23, 'Henry', 'it', 104), (24, 'Irene', 'it', 104), (25, 'Frank', 'it', 120), (31, 'Cindy', 'sales', 96), (33, 'Alice', 'sales', 100), (32, 'Dave', 'sales', 96), (22, 'Grace', 'it', 90), (21, 'Emma', 'it', 84); + +-- Determinism +SET max_threads = 1, parallelize_output_from_storages = 0; + +select transform(name, ['Henry', 'Irene', 'Dave', 'Cindy'], ['Henry or Irene', 'Henry or Irene', 'Dave or Cindy', 'Dave or Cindy']) AS name, department, salary from (SELECT * FROM employees ORDER BY id, name, department, salary) +order by salary desc +limit 5 +format PrettyCompactNoEscapes; + +select transform(name, ['Henry', 'Irene', 'Dave', 'Cindy'], ['Henry or Irene', 'Henry or Irene', 'Dave or Cindy', 'Dave or Cindy']) AS name, department, salary from (SELECT * FROM employees ORDER BY id, name, department, salary) +order by salary desc +fetch first 5 rows only +format PrettyCompactNoEscapes; + +select transform(name, ['Henry', 'Irene', 'Dave', 'Cindy'], ['Henry or Irene', 'Henry or Irene', 'Dave or Cindy', 'Dave or Cindy']) AS name, department, salary from (SELECT * FROM employees ORDER BY id, name, department, salary) +order by salary desc +fetch first 5 rows with ties +format PrettyCompactNoEscapes; + +select transform(name, ['Henry', 'Irene', 'Dave', 'Cindy'], ['Henry or Irene', 'Henry or Irene', 'Dave or Cindy', 'Dave or Cindy']) AS name, department, salary from (SELECT * FROM employees ORDER BY id, name, department, salary) +order by salary desc +offset 3 rows +fetch next 5 rows only +format PrettyCompactNoEscapes; + +select transform(name, ['Henry', 'Irene', 'Dave', 'Cindy'], ['Henry or Irene', 'Henry or Irene', 'Dave or Cindy', 'Dave or Cindy']) AS name, department, salary from (SELECT * FROM employees ORDER BY id, name, department, salary) +order by salary desc +offset 3 rows +fetch first 5 rows only +format PrettyCompactNoEscapes; diff --git a/parser/testdata/02790_url_multiple_tsv_files/ast.json b/parser/testdata/02790_url_multiple_tsv_files/ast.json new file mode 100644 index 000000000..5f4e70ec3 --- /dev/null +++ b/parser/testdata/02790_url_multiple_tsv_files/ast.json @@ -0,0 +1,94 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function sum (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function url (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'http:\/\/127.0.0.1:8123?query=select+{1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16}+as+x+format+TSV'" + }, + { + "explain": " Literal 'TSV'" + }, + { + "explain": " Set" + } + ], + + "rows": 24, + + "statistics": + { + "elapsed": 0.001146961, + "rows_read": 24, + "bytes_read": 1054 + } +} diff --git a/parser/testdata/02790_url_multiple_tsv_files/metadata.json b/parser/testdata/02790_url_multiple_tsv_files/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02790_url_multiple_tsv_files/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02790_url_multiple_tsv_files/query.sql b/parser/testdata/02790_url_multiple_tsv_files/query.sql new file mode 100644 index 000000000..b71bd7c73 --- /dev/null +++ b/parser/testdata/02790_url_multiple_tsv_files/query.sql @@ -0,0 +1,5 @@ +select sum(*) from (select * from url('http://127.0.0.1:8123?query=select+{1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16}+as+x+format+TSV', 'TSV') settings max_threads=1, max_download_threads=1); +select sum(*) from (select * from url('http://127.0.0.1:8123?query=select+{1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16}+as+x+format+CSV', 'CSV') settings max_threads=1, max_download_threads=1); +select sum(*) from (select * from url('http://127.0.0.1:8123?query=select+{1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16}+as+x+format+JSONEachRow', 'JSONEachRow') settings max_threads=1, max_download_threads=1); +select sum(*) from (select * from url('http://127.0.0.1:8123?query=select+{1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16}+as+x+format+TSKV', 'TSKV') settings max_threads=1, max_download_threads=1); +select sum(*) from (select * from url('http://127.0.0.1:8123?query=select+{1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16}+as+x+format+Native', 'Native') settings max_threads=1, max_download_threads=1); diff --git a/parser/testdata/02791_final_block_structure_mismatch_bug/ast.json b/parser/testdata/02791_final_block_structure_mismatch_bug/ast.json new file mode 100644 index 000000000..2234145ea --- /dev/null +++ b/parser/testdata/02791_final_block_structure_mismatch_bug/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001096331, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02791_final_block_structure_mismatch_bug/metadata.json b/parser/testdata/02791_final_block_structure_mismatch_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02791_final_block_structure_mismatch_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02791_final_block_structure_mismatch_bug/query.sql b/parser/testdata/02791_final_block_structure_mismatch_bug/query.sql new file mode 100644 index 000000000..394e3bff8 --- /dev/null +++ b/parser/testdata/02791_final_block_structure_mismatch_bug/query.sql @@ -0,0 +1,81 @@ +SET do_not_merge_across_partitions_select_final=1; + +CREATE TABLE test_block_mismatch +( + a UInt32, + b DateTime +) +ENGINE = ReplacingMergeTree +PARTITION BY toYYYYMM(b) +ORDER BY (toDate(b), a); + +INSERT INTO test_block_mismatch VALUES (1, toDateTime('2023-01-01 12:12:12')); +INSERT INTO test_block_mismatch VALUES (1, toDateTime('2023-01-01 12:12:12')); +SELECT count(*) FROM test_block_mismatch FINAL; + +INSERT INTO test_block_mismatch VALUES (1, toDateTime('2023-02-02 12:12:12')); +INSERT INTO test_block_mismatch VALUES (1, toDateTime('2023-02-02 12:12:12')); +SELECT count(*) FROM test_block_mismatch FINAL; + +optimize table test_block_mismatch final; +system stop merges test_block_mismatch; + +INSERT INTO test_block_mismatch VALUES (2, toDateTime('2023-01-01 12:12:12')); +INSERT INTO test_block_mismatch VALUES (2, toDateTime('2023-01-01 12:12:12')); +-- one lonely part in 2023-02-02 partition and 3 parts in 2023-01-01 partition. +-- lonely part will not be processed by PartsSplitter and 2023-01-01's parts will be - previously this led to the `Block structure mismatch in Pipe::unitePipes` exception. +SELECT count(*) FROM test_block_mismatch FINAL; + + +-- variations of the test above with slightly modified table definitions + +CREATE TABLE test_block_mismatch_sk1 +( + a UInt32, + b DateTime +) +ENGINE = ReplacingMergeTree +PARTITION BY toYYYYMM(b) +PRIMARY KEY (toDate(b)) +ORDER BY (toDate(b), a); + +INSERT INTO test_block_mismatch_sk1 VALUES (1, toDateTime('2023-01-01 12:12:12')); +INSERT INTO test_block_mismatch_sk1 VALUES (1, toDateTime('2023-01-01 12:12:12')); +SELECT count(*) FROM test_block_mismatch_sk1 FINAL; + +INSERT INTO test_block_mismatch_sk1 VALUES (1, toDateTime('2023-02-02 12:12:12')); +INSERT INTO test_block_mismatch_sk1 VALUES (1, toDateTime('2023-02-02 12:12:12')); +SELECT count(*) FROM test_block_mismatch_sk1 FINAL; + +optimize table test_block_mismatch_sk1 final; +system stop merges test_block_mismatch_sk1; + +INSERT INTO test_block_mismatch_sk1 VALUES (2, toDateTime('2023-01-01 12:12:12')); +INSERT INTO test_block_mismatch_sk1 VALUES (2, toDateTime('2023-01-01 12:12:12')); +SELECT count(*) FROM test_block_mismatch_sk1 FINAL; + + +CREATE TABLE test_block_mismatch_sk2 +( + a UInt32, + b DateTime +) +ENGINE = ReplacingMergeTree +PARTITION BY toYYYYMM(b) +PRIMARY KEY (a) +ORDER BY (a, toDate(b)); + +INSERT INTO test_block_mismatch_sk2 VALUES (1, toDateTime('2023-01-01 12:12:12')); +INSERT INTO test_block_mismatch_sk2 VALUES (1, toDateTime('2023-01-01 12:12:12')); +SELECT count(*) FROM test_block_mismatch_sk2 FINAL; + +INSERT INTO test_block_mismatch_sk2 VALUES (1, toDateTime('2023-02-02 12:12:12')); +INSERT INTO test_block_mismatch_sk2 VALUES (1, toDateTime('2023-02-02 12:12:12')); +SELECT count(*) FROM test_block_mismatch_sk2 FINAL; + +optimize table test_block_mismatch_sk2 final; +system stop merges test_block_mismatch_sk2; + +INSERT INTO test_block_mismatch_sk2 VALUES (2, toDateTime('2023-01-01 12:12:12')); +INSERT INTO test_block_mismatch_sk2 VALUES (2, toDateTime('2023-01-01 12:12:12')); +SELECT count(*) FROM test_block_mismatch_sk2 FINAL; diff --git a/parser/testdata/02791_predicate_pushdown_different_types/ast.json b/parser/testdata/02791_predicate_pushdown_different_types/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02791_predicate_pushdown_different_types/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02791_predicate_pushdown_different_types/metadata.json b/parser/testdata/02791_predicate_pushdown_different_types/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02791_predicate_pushdown_different_types/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02791_predicate_pushdown_different_types/query.sql b/parser/testdata/02791_predicate_pushdown_different_types/query.sql new file mode 100644 index 000000000..121ffb27e --- /dev/null +++ b/parser/testdata/02791_predicate_pushdown_different_types/query.sql @@ -0,0 +1,7 @@ +# These queries triggered a crash in old ClickHouse versions: + +CREATE TEMPORARY TABLE a (key UInt32, ID LowCardinality(String)); +CREATE TEMPORARY TABLE b (key UInt32); +SELECT * FROM b JOIN a USING (key) WHERE ID = '1' HAVING ID = '1'; + +# PS. Predicate pushdown does not work for LowCardinality(String), but it's another problem. diff --git a/parser/testdata/02792_alter_table_modify_comment/ast.json b/parser/testdata/02792_alter_table_modify_comment/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02792_alter_table_modify_comment/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02792_alter_table_modify_comment/metadata.json b/parser/testdata/02792_alter_table_modify_comment/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02792_alter_table_modify_comment/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02792_alter_table_modify_comment/query.sql b/parser/testdata/02792_alter_table_modify_comment/query.sql new file mode 100644 index 000000000..ba3083390 --- /dev/null +++ b/parser/testdata/02792_alter_table_modify_comment/query.sql @@ -0,0 +1,72 @@ +-- Tags: no-replicated-database +-- Tag no-replicated-database: Unsupported type of ALTER query + +DROP TABLE IF EXISTS t; + +# Memory, MergeTree, and ReplicatedMergeTree + +CREATE TABLE t (x UInt8) ENGINE = Memory COMMENT 'Hello'; +SELECT comment FROM system.tables WHERE database = currentDatabase() AND table = 't'; +ALTER TABLE t MODIFY COMMENT 'World'; +SELECT comment FROM system.tables WHERE database = currentDatabase() AND table = 't'; +DROP TABLE t; + +CREATE TABLE t (x UInt8) ENGINE = MergeTree ORDER BY () COMMENT 'Hello'; +SELECT comment FROM system.tables WHERE database = currentDatabase() AND table = 't'; +ALTER TABLE t MODIFY COMMENT 'World'; +SELECT comment FROM system.tables WHERE database = currentDatabase() AND table = 't'; +DROP TABLE t; + +# The case when there are many operations in one ALTER + +CREATE TABLE t (x UInt8) ENGINE = MergeTree ORDER BY () COMMENT 'Hello'; +SELECT comment FROM system.tables WHERE database = currentDatabase() AND table = 't'; +ALTER TABLE t MODIFY COMMENT 'World', MODIFY COLUMN x UInt16; +SELECT comment FROM system.tables WHERE database = currentDatabase() AND table = 't'; +DROP TABLE t; + +# Note that the table comment is not replicated. We can implement it later. + +CREATE TABLE t (x UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_comment_table1/t', '1') ORDER BY () COMMENT 'Hello'; +SELECT comment FROM system.tables WHERE database = currentDatabase() AND table = 't'; +ALTER TABLE t MODIFY COMMENT 'World'; +SELECT comment FROM system.tables WHERE database = currentDatabase() AND table = 't'; +DROP TABLE t SYNC; + +CREATE TABLE t (x UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_comment_table2/t', '1') ORDER BY () COMMENT 'Hello'; +SELECT comment FROM system.tables WHERE database = currentDatabase() AND table = 't'; +ALTER TABLE t MODIFY COMMENT 'World', MODIFY COLUMN x UInt16; +SELECT comment FROM system.tables WHERE database = currentDatabase() AND table = 't'; +DROP TABLE t SYNC; + +# The cases when there is no comment on creation + +CREATE TABLE t (x UInt8) ENGINE = Memory; +SELECT comment FROM system.tables WHERE database = currentDatabase() AND table = 't'; +ALTER TABLE t MODIFY COMMENT 'World'; +SELECT comment FROM system.tables WHERE database = currentDatabase() AND table = 't'; +DROP TABLE t; + +CREATE TABLE t (x UInt8) ENGINE = MergeTree ORDER BY (); +SELECT comment FROM system.tables WHERE database = currentDatabase() AND table = 't'; +ALTER TABLE t MODIFY COMMENT 'World'; +SELECT comment FROM system.tables WHERE database = currentDatabase() AND table = 't'; +DROP TABLE t; + +CREATE TABLE t (x UInt8) ENGINE = MergeTree ORDER BY (); +SELECT comment FROM system.tables WHERE database = currentDatabase() AND table = 't'; +ALTER TABLE t MODIFY COMMENT 'World', MODIFY COLUMN x UInt16; +SELECT comment FROM system.tables WHERE database = currentDatabase() AND table = 't'; +DROP TABLE t; + +CREATE TABLE t (x UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_comment_table3/t', '1') ORDER BY (); +SELECT comment FROM system.tables WHERE database = currentDatabase() AND table = 't'; +ALTER TABLE t MODIFY COMMENT 'World'; +SELECT comment FROM system.tables WHERE database = currentDatabase() AND table = 't'; +DROP TABLE t SYNC; + +CREATE TABLE t (x UInt8) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_comment_table4/t', '1') ORDER BY (); +SELECT comment FROM system.tables WHERE database = currentDatabase() AND table = 't'; +ALTER TABLE t MODIFY COMMENT 'World', MODIFY COLUMN x UInt16; +SELECT comment FROM system.tables WHERE database = currentDatabase() AND table = 't'; +DROP TABLE t SYNC; diff --git a/parser/testdata/02792_drop_projection_lwd/ast.json b/parser/testdata/02792_drop_projection_lwd/ast.json new file mode 100644 index 000000000..fd67bf32d --- /dev/null +++ b/parser/testdata/02792_drop_projection_lwd/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00131092, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02792_drop_projection_lwd/metadata.json b/parser/testdata/02792_drop_projection_lwd/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02792_drop_projection_lwd/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02792_drop_projection_lwd/query.sql b/parser/testdata/02792_drop_projection_lwd/query.sql new file mode 100644 index 000000000..dad7f7cd0 --- /dev/null +++ b/parser/testdata/02792_drop_projection_lwd/query.sql @@ -0,0 +1,20 @@ +SET mutations_sync = 2; + +DROP TABLE IF EXISTS t_projections_lwd; + +CREATE TABLE t_projections_lwd (a UInt32, b UInt32, PROJECTION p (SELECT * ORDER BY b)) ENGINE = MergeTree ORDER BY a; + +INSERT INTO t_projections_lwd SELECT number, number FROM numbers(100); + +-- LWD does not work, as expected +DELETE FROM t_projections_lwd WHERE a = 1; -- { serverError SUPPORT_IS_DISABLED } +KILL MUTATION WHERE database = currentDatabase() AND table = 't_projections_lwd' SYNC FORMAT Null; + +-- drop projection +ALTER TABLE t_projections_lwd DROP projection p; + +DELETE FROM t_projections_lwd WHERE a = 2; + +SELECT count() FROM t_projections_lwd; + +DROP TABLE t_projections_lwd; diff --git a/parser/testdata/02794_pushdown_invalid_get/ast.json b/parser/testdata/02794_pushdown_invalid_get/ast.json new file mode 100644 index 000000000..b0c9de81f --- /dev/null +++ b/parser/testdata/02794_pushdown_invalid_get/ast.json @@ -0,0 +1,94 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toInt128 (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toInt128 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Int64_-2" + }, + { + "explain": " Identifier x" + } + ], + + "rows": 24, + + "statistics": + { + "elapsed": 0.000941647, + "rows_read": 24, + "bytes_read": 1002 + } +} diff --git a/parser/testdata/02794_pushdown_invalid_get/metadata.json b/parser/testdata/02794_pushdown_invalid_get/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02794_pushdown_invalid_get/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02794_pushdown_invalid_get/query.sql b/parser/testdata/02794_pushdown_invalid_get/query.sql new file mode 100644 index 000000000..949f86825 --- /dev/null +++ b/parser/testdata/02794_pushdown_invalid_get/query.sql @@ -0,0 +1,2 @@ +SELECT * FROM (SELECT toInt128(NULL) AS x UNION ALL SELECT materialize(toInt128(-2))) WHERE x; -- { serverError ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER } +SELECT * FROM (SELECT toInt128(NULL) AS x UNION ALL SELECT materialize(toInt128(-2))) WHERE x != 0; diff --git a/parser/testdata/02795_full_join_assert_cast/ast.json b/parser/testdata/02795_full_join_assert_cast/ast.json new file mode 100644 index 000000000..140a2c158 --- /dev/null +++ b/parser/testdata/02795_full_join_assert_cast/ast.json @@ -0,0 +1,127 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function any (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier s" + }, + { + "explain": " TablesInSelectQuery (children 2)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (alias t1) (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (alias s) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'a'" + }, + { + "explain": " Literal 'String'" + }, + { + "explain": " TablesInSelectQueryElement (children 2)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (alias t2) (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (alias s) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'b'" + }, + { + "explain": " Literal 'LowCardinality(String)'" + }, + { + "explain": " TableJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier s" + } + ], + + "rows": 35, + + "statistics": + { + "elapsed": 0.001389087, + "rows_read": 35, + "bytes_read": 1476 + } +} diff --git a/parser/testdata/02795_full_join_assert_cast/metadata.json b/parser/testdata/02795_full_join_assert_cast/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02795_full_join_assert_cast/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02795_full_join_assert_cast/query.sql b/parser/testdata/02795_full_join_assert_cast/query.sql new file mode 100644 index 000000000..d3f0060ac --- /dev/null +++ b/parser/testdata/02795_full_join_assert_cast/query.sql @@ -0,0 +1 @@ +SELECT any(toTypeName(s)) FROM (SELECT ('a' :: String) as s) t1 FULL JOIN (SELECT ('b' :: LowCardinality(String)) as s) t2 USING (s); diff --git a/parser/testdata/02796_calculate_text_stack_trace/ast.json b/parser/testdata/02796_calculate_text_stack_trace/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02796_calculate_text_stack_trace/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02796_calculate_text_stack_trace/metadata.json b/parser/testdata/02796_calculate_text_stack_trace/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02796_calculate_text_stack_trace/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02796_calculate_text_stack_trace/query.sql b/parser/testdata/02796_calculate_text_stack_trace/query.sql new file mode 100644 index 000000000..22a0a5ea2 --- /dev/null +++ b/parser/testdata/02796_calculate_text_stack_trace/query.sql @@ -0,0 +1,21 @@ +-- Tags: no-parallel + +SET max_rows_to_read = 0; -- system.text_log can be really big +SELECT 'Hello', throwIf(1); -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO } +SYSTEM FLUSH LOGS query_log, text_log; + +SELECT length(stack_trace) > 1000 FROM system.query_log WHERE current_database = currentDatabase() AND query LIKE '%SELECT \'Hello\', throwIf(1)%' AND query NOT LIKE '%system%' ORDER BY event_time_microseconds DESC LIMIT 1; + +SELECT message LIKE '%Stack trace%' FROM system.text_log WHERE level = 'Error' AND message LIKE '%Exception%throwIf%' + AND query_id = (SELECT query_id FROM system.query_log WHERE current_database = currentDatabase() AND query LIKE '%SELECT \'Hello\', throwIf(1)%' AND query NOT LIKE '%system%' ORDER BY event_time_microseconds DESC LIMIT 1) + ORDER BY event_time_microseconds DESC LIMIT 10; + +SET calculate_text_stack_trace = 0; +SELECT 'World', throwIf(1); -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO } +SYSTEM FLUSH LOGS query_log, text_log; + +SELECT length(stack_trace) FROM system.query_log WHERE current_database = currentDatabase() AND query LIKE '%SELECT \'World\', throwIf(1)%' AND query NOT LIKE '%system%' ORDER BY event_time_microseconds DESC LIMIT 1; + +SELECT message LIKE '%Stack trace%' FROM system.text_log WHERE level = 'Error' AND message LIKE '%Exception%throwIf%' + AND query_id = (SELECT query_id FROM system.query_log WHERE current_database = currentDatabase() AND query LIKE '%SELECT \'World\', throwIf(1)%' AND query NOT LIKE '%system%' ORDER BY event_time_microseconds DESC LIMIT 1) + ORDER BY event_time_microseconds DESC LIMIT 10; diff --git a/parser/testdata/02796_projection_date_filter_on_view/ast.json b/parser/testdata/02796_projection_date_filter_on_view/ast.json new file mode 100644 index 000000000..1a90d548f --- /dev/null +++ b/parser/testdata/02796_projection_date_filter_on_view/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery fx_1m (children 1)" + }, + { + "explain": " Identifier fx_1m" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001192355, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/02796_projection_date_filter_on_view/metadata.json b/parser/testdata/02796_projection_date_filter_on_view/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02796_projection_date_filter_on_view/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02796_projection_date_filter_on_view/query.sql b/parser/testdata/02796_projection_date_filter_on_view/query.sql new file mode 100644 index 000000000..cb26a6bce --- /dev/null +++ b/parser/testdata/02796_projection_date_filter_on_view/query.sql @@ -0,0 +1,70 @@ +DROP TABLE IF EXISTS fx_1m; +DROP TABLE IF EXISTS fx_5m; + +-- create source table +CREATE TABLE fx_1m ( + `symbol` LowCardinality(String) CODEC(ZSTD), + `dt_close` DateTime64(3, 'UTC') CODEC(DoubleDelta, ZSTD), + `open` Float32 CODEC(Delta, ZSTD), + `high` Float32 CODEC(Delta, ZSTD), + `low` Float32 CODEC(Delta, ZSTD), + `close` Float32 CODEC(Delta, ZSTD), + `volume` Float32 CODEC(Delta, ZSTD) +) +ENGINE = MergeTree() +PARTITION BY toYear(dt_close) +ORDER BY (symbol, dt_close) SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +-- add projection +ALTER TABLE fx_1m +ADD PROJECTION fx_5m ( + SELECT + symbol, + toStartOfInterval(dt_close, INTERVAL 300 SECOND) AS dt_close, + argMin(open, dt_close), + max(high), + min(low), + argMax(close, dt_close), + sum(volume) volume + GROUP BY symbol, dt_close +); + +-- materialize projection +ALTER TABLE fx_1m MATERIALIZE PROJECTION fx_5m SETTINGS mutations_sync = 2; + +-- create view using projection +CREATE VIEW fx_5m AS +SELECT + symbol, + toStartOfInterval(dt_close, INTERVAL 300 SECOND) AS dt_close, + argMin(open, dt_close) open, + max(high) high, + min(low) low, + argMax(close, dt_close) close, + sum(volume) volume +FROM fx_1m +GROUP BY symbol, dt_close; + +-- insert sample data +INSERT INTO fx_1m +SELECT + 'EURUSD', + toDateTime64('2022-12-12 12:00:00', 3, 'UTC') + number, + number + randCanonical(), + number + randCanonical(), + number + randCanonical(), + number + randCanonical(), + number + randCanonical() +FROM numbers(1000000); + +-- segmentation fault (filter on dt_close column) +SELECT + dt_close, + close +FROM fx_5m +where symbol = 'EURUSD' and dt_close between '2022-12-11' and '2022-12-13' +order by dt_close +format Null; + +DROP TABLE fx_5m; +DROP TABLE fx_1m; diff --git a/parser/testdata/02797_aggregator_huge_mem_usage_bug/ast.json b/parser/testdata/02797_aggregator_huge_mem_usage_bug/ast.json new file mode 100644 index 000000000..2020debe8 --- /dev/null +++ b/parser/testdata/02797_aggregator_huge_mem_usage_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery v (children 1)" + }, + { + "explain": " Identifier v" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001109442, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/02797_aggregator_huge_mem_usage_bug/metadata.json b/parser/testdata/02797_aggregator_huge_mem_usage_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02797_aggregator_huge_mem_usage_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02797_aggregator_huge_mem_usage_bug/query.sql b/parser/testdata/02797_aggregator_huge_mem_usage_bug/query.sql new file mode 100644 index 000000000..3532f617e --- /dev/null +++ b/parser/testdata/02797_aggregator_huge_mem_usage_bug/query.sql @@ -0,0 +1,12 @@ +DROP TABLE IF EXISTS v; + +create view v (s LowCardinality(String), n UInt8) as select 'test' as s, toUInt8(number) as n from numbers(10000000); + +-- this is what allows mem usage to go really high +set max_block_size=4294967296; + +set max_memory_usage = '420Mi'; + +select s, sum(n) from v group by s format Null; + +DROP TABLE v; diff --git a/parser/testdata/02797_range_nullable/ast.json b/parser/testdata/02797_range_nullable/ast.json new file mode 100644 index 000000000..8626bde0a --- /dev/null +++ b/parser/testdata/02797_range_nullable/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function range (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal NULL" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001162956, + "rows_read": 7, + "bytes_read": 254 + } +} diff --git a/parser/testdata/02797_range_nullable/metadata.json b/parser/testdata/02797_range_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02797_range_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02797_range_nullable/query.sql b/parser/testdata/02797_range_nullable/query.sql new file mode 100644 index 000000000..ae35eb6fb --- /dev/null +++ b/parser/testdata/02797_range_nullable/query.sql @@ -0,0 +1,12 @@ +SELECT range(null); +SELECT range(10, null); +SELECT range(10, 2, null); +select range('string', Null); +SELECT range(toNullable(1)); +SELECT range(0::Nullable(UInt64), 10::Nullable(UInt64), 2::Nullable(UInt64)); +SELECT range(0::Nullable(Int64), 10::Nullable(Int64), 2::Nullable(Int64)); +SELECT range(materialize(0), 10::Nullable(UInt64), 2::Nullable(UInt64)); +SELECT range(Null::Nullable(UInt64), 10::Nullable(UInt64), 2::Nullable(UInt64)); -- { serverError BAD_ARGUMENTS } +SELECT range(0::Nullable(UInt64), Null::Nullable(UInt64), 2::Nullable(UInt64)); -- { serverError BAD_ARGUMENTS } +SELECT range(0::Nullable(UInt64), 10::Nullable(UInt64), Null::Nullable(UInt64)); -- { serverError BAD_ARGUMENTS } +SELECT range(Null::Nullable(UInt8), materialize(1)); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/02797_transform_narrow_types/ast.json b/parser/testdata/02797_transform_narrow_types/ast.json new file mode 100644 index 000000000..1e680f351 --- /dev/null +++ b/parser/testdata/02797_transform_narrow_types/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function transform (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Literal Int64_-1" + }, + { + "explain": " Literal Array_[Int64_-1, UInt64_2]" + }, + { + "explain": " Literal Array_['f', 's']" + }, + { + "explain": " Literal 'g'" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001022365, + "rows_read": 10, + "bytes_read": 373 + } +} diff --git a/parser/testdata/02797_transform_narrow_types/metadata.json b/parser/testdata/02797_transform_narrow_types/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02797_transform_narrow_types/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02797_transform_narrow_types/query.sql b/parser/testdata/02797_transform_narrow_types/query.sql new file mode 100644 index 000000000..56a02fb8a --- /dev/null +++ b/parser/testdata/02797_transform_narrow_types/query.sql @@ -0,0 +1,6 @@ +SELECT transform(-1, [-1, 2], ['f', 's'], 'g'); +SELECT transform(2, [-1, 2], ['f', 's'], 'g'); +SELECT transform(-1, [-1, 2], [11, 22], 33); +SELECT transform(-1, [-1, 2], [11, 22]); +SELECT transform(3, [-1, 2], [11, 22], 33); +SELECT transform(3, [-1, 2], [11, 22]); diff --git a/parser/testdata/02798_explain_settings_not_applied_bug/ast.json b/parser/testdata/02798_explain_settings_not_applied_bug/ast.json new file mode 100644 index 000000000..ad84ba972 --- /dev/null +++ b/parser/testdata/02798_explain_settings_not_applied_bug/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001263889, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02798_explain_settings_not_applied_bug/metadata.json b/parser/testdata/02798_explain_settings_not_applied_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02798_explain_settings_not_applied_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02798_explain_settings_not_applied_bug/query.sql b/parser/testdata/02798_explain_settings_not_applied_bug/query.sql new file mode 100644 index 000000000..7c48d62be --- /dev/null +++ b/parser/testdata/02798_explain_settings_not_applied_bug/query.sql @@ -0,0 +1,21 @@ +SET output_format_pretty_display_footer_column_names=0; +SET output_format_pretty_color=1; +SET read_in_order_two_level_merge_threshold=1000000; + +DROP TABLE IF EXISTS t; +CREATE TABLE t(a UInt64) +ENGINE = MergeTree +ORDER BY a +SETTINGS index_granularity = 8192; + +INSERT INTO t SELECT * FROM numbers_mt(1e3); +OPTIMIZE TABLE t FINAL; + +EXPLAIN PIPELINE +SELECT a +FROM t +GROUP BY a +FORMAT PrettySpace +SETTINGS optimize_aggregation_in_order = 1; + +DROP TABLE t; diff --git a/parser/testdata/02798_generic_transform/ast.json b/parser/testdata/02798_generic_transform/ast.json new file mode 100644 index 000000000..b8e69f7ce --- /dev/null +++ b/parser/testdata/02798_generic_transform/ast.json @@ -0,0 +1,94 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function transform (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function toString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal Tuple_(UInt64_3, '3')" + }, + { + "explain": " Literal Tuple_(UInt64_5, '5')" + }, + { + "explain": " Literal Tuple_(UInt64_7, '7')" + }, + { + "explain": " Literal Array_['hello', 'world', 'abc!']" + }, + { + "explain": " Literal 'def'" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 24, + + "statistics": + { + "elapsed": 0.00112794, + "rows_read": 24, + "bytes_read": 979 + } +} diff --git a/parser/testdata/02798_generic_transform/metadata.json b/parser/testdata/02798_generic_transform/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02798_generic_transform/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02798_generic_transform/query.sql b/parser/testdata/02798_generic_transform/query.sql new file mode 100644 index 000000000..6317d83fb --- /dev/null +++ b/parser/testdata/02798_generic_transform/query.sql @@ -0,0 +1,12 @@ +SELECT transform((number, toString(number)), [(3, '3'), (5, '5'), (7, '7')], ['hello', 'world', 'abc!'], 'def') FROM system.numbers LIMIT 10; +SELECT transform(toNullable(toInt256(number)), [3, 5, 7], ['hello', 'world', 'abc'], '') FROM system.numbers LIMIT 10; +SELECT transform(toUInt256(number), [3, 5, 7], ['hello', 'world', 'abc'], '') FROM system.numbers LIMIT 10; + +select case 1::Nullable(Int32) when 1 then 123 else 0 end; + +SELECT transform(arrayJoin(['c', 'b', 'a']), ['a', 'b'], [toDateTime64('2023-01-01', 3), toDateTime64('2023-02-02', 3)], toDateTime64('2023-03-03', 3)); + +SELECT transform(1, [1], [toDecimal32(1, 2)]), toDecimal32(1, 2); +select transform(1, [1], [toDecimal32(42, 2)]), toDecimal32(42, 2); +SELECT transform(1, [1], [toDecimal32(42, 2)], 0); +SELECT transform(1, [1], [toDecimal32(42, 2)], toDecimal32(0, 2)); diff --git a/parser/testdata/02798_substring_index/ast.json b/parser/testdata/02798_substring_index/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02798_substring_index/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02798_substring_index/metadata.json b/parser/testdata/02798_substring_index/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02798_substring_index/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02798_substring_index/query.sql b/parser/testdata/02798_substring_index/query.sql new file mode 100644 index 000000000..520775e89 --- /dev/null +++ b/parser/testdata/02798_substring_index/query.sql @@ -0,0 +1,93 @@ +-- { echoOn } +select substringIndex('www.clickhouse.com', '.', -4); +select substringIndex('www.clickhouse.com', '.', -3); +select substringIndex('www.clickhouse.com', '.', -2); +select substringIndex('www.clickhouse.com', '.', -1); +select substringIndex('www.clickhouse.com', '.', 0); +select substringIndex('www.clickhouse.com', '.', 1); +select substringIndex('www.clickhouse.com', '.', 2); +select substringIndex('www.clickhouse.com', '.', 3); +select substringIndex('www.clickhouse.com', '.', 4); + +select substringIndex(materialize('www.clickhouse.com'), '.', -4); +select substringIndex(materialize('www.clickhouse.com'), '.', -3); +select substringIndex(materialize('www.clickhouse.com'), '.', -2); +select substringIndex(materialize('www.clickhouse.com'), '.', -1); +select substringIndex(materialize('www.clickhouse.com'), '.', 0); +select substringIndex(materialize('www.clickhouse.com'), '.', 1); +select substringIndex(materialize('www.clickhouse.com'), '.', 2); +select substringIndex(materialize('www.clickhouse.com'), '.', 3); +select substringIndex(materialize('www.clickhouse.com'), '.', 4); + +select substringIndex(materialize('www.clickhouse.com'), '.', materialize(-4)); +select substringIndex(materialize('www.clickhouse.com'), '.', materialize(-3)); +select substringIndex(materialize('www.clickhouse.com'), '.', materialize(-2)); +select substringIndex(materialize('www.clickhouse.com'), '.', materialize(-1)); +select substringIndex(materialize('www.clickhouse.com'), '.', materialize(0)); +select substringIndex(materialize('www.clickhouse.com'), '.', materialize(1)); +select substringIndex(materialize('www.clickhouse.com'), '.', materialize(2)); +select substringIndex(materialize('www.clickhouse.com'), '.', materialize(3)); +select substringIndex(materialize('www.clickhouse.com'), '.', materialize(4)); + +select substringIndex('www.clickhouse.com', '.', materialize(-4)); +select substringIndex('www.clickhouse.com', '.', materialize(-3)); +select substringIndex('www.clickhouse.com', '.', materialize(-2)); +select substringIndex('www.clickhouse.com', '.', materialize(-1)); +select substringIndex('www.clickhouse.com', '.', materialize(0)); +select substringIndex('www.clickhouse.com', '.', materialize(1)); +select substringIndex('www.clickhouse.com', '.', materialize(2)); +select substringIndex('www.clickhouse.com', '.', materialize(3)); +select substringIndex('www.clickhouse.com', '.', materialize(4)); + +select SUBSTRING_INDEX('www.clickhouse.com', '.', 2); + +select substringIndex('www.clickhouse.com', '..', 2); -- { serverError BAD_ARGUMENTS } +select substringIndex('www.clickhouse.com', '', 2); -- { serverError BAD_ARGUMENTS } +select substringIndex('www.clickhouse.com', materialize('.'), 2); -- { serverError ILLEGAL_COLUMN } +select substringIndex('www.clickhouse.com', '.', cast(2 as Int128)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +select substringIndexUTF8('富强,民主,文明', ',', -4); +select substringIndexUTF8('富强,民主,文明', ',', -3); +select substringIndexUTF8('富强,民主,文明', ',', -2); +select substringIndexUTF8('富强,民主,文明', ',', -1); +select substringIndexUTF8('富强,民主,文明', ',', 0); +select substringIndexUTF8('富强,民主,文明', ',', 1); +select substringIndexUTF8('富强,民主,文明', ',', 2); +select substringIndexUTF8('富强,民主,文明', ',', 3); +select substringIndexUTF8('富强,民主,文明', ',', 4); + +select substringIndexUTF8(materialize('富强,民主,文明'), ',', -4); +select substringIndexUTF8(materialize('富强,民主,文明'), ',', -3); +select substringIndexUTF8(materialize('富强,民主,文明'), ',', -2); +select substringIndexUTF8(materialize('富强,民主,文明'), ',', -1); +select substringIndexUTF8(materialize('富强,民主,文明'), ',', 0); +select substringIndexUTF8(materialize('富强,民主,文明'), ',', 1); +select substringIndexUTF8(materialize('富强,民主,文明'), ',', 2); +select substringIndexUTF8(materialize('富强,民主,文明'), ',', 3); +select substringIndexUTF8(materialize('富强,民主,文明'), ',', 4); + +select substringIndexUTF8('富强,民主,文明', ',', materialize(-4)); +select substringIndexUTF8('富强,民主,文明', ',', materialize(-3)); +select substringIndexUTF8('富强,民主,文明', ',', materialize(-2)); +select substringIndexUTF8('富强,民主,文明', ',', materialize(-1)); +select substringIndexUTF8('富强,民主,文明', ',', materialize(0)); +select substringIndexUTF8('富强,民主,文明', ',', materialize(1)); +select substringIndexUTF8('富强,民主,文明', ',', materialize(2)); +select substringIndexUTF8('富强,民主,文明', ',', materialize(3)); +select substringIndexUTF8('富强,民主,文明', ',', materialize(4)); + +select substringIndexUTF8(materialize('富强,民主,文明'), ',', materialize(-4)); +select substringIndexUTF8(materialize('富强,民主,文明'), ',', materialize(-3)); +select substringIndexUTF8(materialize('富强,民主,文明'), ',', materialize(-2)); +select substringIndexUTF8(materialize('富强,民主,文明'), ',', materialize(-1)); +select substringIndexUTF8(materialize('富强,民主,文明'), ',', materialize(0)); +select substringIndexUTF8(materialize('富强,民主,文明'), ',', materialize(1)); +select substringIndexUTF8(materialize('富强,民主,文明'), ',', materialize(2)); +select substringIndexUTF8(materialize('富强,民主,文明'), ',', materialize(3)); +select substringIndexUTF8(materialize('富强,民主,文明'), ',', materialize(4)); + +select substringIndexUTF8('富强,民主,文明', ',,', 2); -- { serverError BAD_ARGUMENTS } +select substringIndexUTF8('富强,民主,文明', '', 2); -- { serverError BAD_ARGUMENTS } +select substringIndexUTF8('富强,民主,文明', materialize(','), 2); -- { serverError ILLEGAL_COLUMN } +select substringIndexUTF8('富强,民主,文明', ',', cast(2 as Int128)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +-- { echoOff } diff --git a/parser/testdata/02799_transform_empty_arrays/ast.json b/parser/testdata/02799_transform_empty_arrays/ast.json new file mode 100644 index 000000000..a68450dfb --- /dev/null +++ b/parser/testdata/02799_transform_empty_arrays/ast.json @@ -0,0 +1,70 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function transform (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Literal Array_[UInt64_1]" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 16, + + "statistics": + { + "elapsed": 0.001444544, + "rows_read": 16, + "bytes_read": 624 + } +} diff --git a/parser/testdata/02799_transform_empty_arrays/metadata.json b/parser/testdata/02799_transform_empty_arrays/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02799_transform_empty_arrays/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02799_transform_empty_arrays/query.sql b/parser/testdata/02799_transform_empty_arrays/query.sql new file mode 100644 index 000000000..84e3e9d29 --- /dev/null +++ b/parser/testdata/02799_transform_empty_arrays/query.sql @@ -0,0 +1,3 @@ +SELECT transform(number, [], [1]) FROM numbers(10); +SELECT transform(number, [], [], 'Hello') FROM numbers(10); +SELECT transform(number, [], [], 'Hello ' || number::String) FROM numbers(10); diff --git a/parser/testdata/02800_transform_alter/ast.json b/parser/testdata/02800_transform_alter/ast.json new file mode 100644 index 000000000..ceb932c87 --- /dev/null +++ b/parser/testdata/02800_transform_alter/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_xy (children 1)" + }, + { + "explain": " Identifier test_xy" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001042448, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/02800_transform_alter/metadata.json b/parser/testdata/02800_transform_alter/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02800_transform_alter/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02800_transform_alter/query.sql b/parser/testdata/02800_transform_alter/query.sql new file mode 100644 index 000000000..7458f51c5 --- /dev/null +++ b/parser/testdata/02800_transform_alter/query.sql @@ -0,0 +1,43 @@ +DROP TABLE IF EXISTS test_xy; +DROP TABLE IF EXISTS updates; + +CREATE TABLE test_xy +( + `x` Int32, + `y` String +) +ENGINE = MergeTree +ORDER BY x; + +CREATE TABLE updates +( + `x` Int32, + `y` String +) +ENGINE = MergeTree +ORDER BY x; + +INSERT INTO test_xy(x, y) VALUES (1, 'a1'), (2, 'a2'), (3, 'a3'); +INSERT INTO updates(x, y) VALUES (2, 'b2'), (3, 'b3'); + +SELECT x, y, + transform(x, + (select groupArray(x) from (select x, y from updates order by x) t1), + (select groupArray(y) from (select x, y from updates order by x) t2), + y) +FROM test_xy +WHERE 1 ORDER BY x, y; + +SET mutations_sync = 1; +ALTER table test_xy + UPDATE + y = transform(x, + (select groupArray(x) from (select x, y from updates order by x) t1), + (select groupArray(y) from (select x, y from updates order by x) t2), + y) + WHERE 1; + +SELECT * FROM test_xy ORDER BY x, y; + +DROP TABLE test_xy; +DROP TABLE updates; diff --git a/parser/testdata/02801_transform_nullable/ast.json b/parser/testdata/02801_transform_nullable/ast.json new file mode 100644 index 000000000..68b80b15c --- /dev/null +++ b/parser/testdata/02801_transform_nullable/ast.json @@ -0,0 +1,97 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function transform (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Literal 'a'" + }, + { + "explain": " Literal Array_['a', 'b']" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDateTime64 (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Literal 'UTC'" + }, + { + "explain": " Function toDateTime64 (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Literal 'UTC'" + }, + { + "explain": " Function toDateTime64 (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Literal 'UTC'" + } + ], + + "rows": 25, + + "statistics": + { + "elapsed": 0.001383981, + "rows_read": 25, + "bytes_read": 947 + } +} diff --git a/parser/testdata/02801_transform_nullable/metadata.json b/parser/testdata/02801_transform_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02801_transform_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02801_transform_nullable/query.sql b/parser/testdata/02801_transform_nullable/query.sql new file mode 100644 index 000000000..95f3c1660 --- /dev/null +++ b/parser/testdata/02801_transform_nullable/query.sql @@ -0,0 +1,14 @@ +select transform('a', ['a', 'b'], [toDateTime64(1, 3, 'UTC'), toDateTime64(2, 3, 'UTC')], toDateTime64(0, 3, 'UTC')); +select transform(2, [1, 2], [toDateTime64(1, 3, 'UTC'), toDateTime64(2, 3, 'UTC')], toDateTime64(0, 3, 'UTC')); +select transform(null, [1, 2], [toDateTime64(1, 3, 'UTC'), toDateTime64(2, 3, 'UTC')], toDateTime64(0, 3, 'UTC')); + +SELECT transform(number, [3, 5, 7], ['hello', 'world', 'abc'], null) FROM system.numbers LIMIT 10; +SELECT transform(null, ['3', '5', '7'], ['hello', 'world', 'abc'], null) FROM system.numbers LIMIT 10; +SELECT transform(null, [null, null, null], [null, null, null], null) FROM system.numbers LIMIT 10; +SELECT transform(toString(number), ['3', '5', '7'], [111, 222, null], -1) FROM system.numbers LIMIT 10; +SELECT transform(toString(number), ['3', '5', '7'], [null, 222, 333], materialize(-1.1)) FROM system.numbers LIMIT 10; +SELECT transform(toString(number), ['3', '5', '7'], [null, null, null], materialize(1)) FROM system.numbers LIMIT 10; +SELECT transform(1, [2, 3], ['Meta.ua', null], materialize('Остальные')) AS title; +SELECT transform(2, [2, 3], [null, 'Google'], materialize('Остальные')) AS title; + +SELECT transform(number % 3 = 1 ? NULL : number, [2, 5, NULL], ['Hello', 'World', 'xyz'], '-') FROM numbers(10); diff --git a/parser/testdata/02802_with_cube_with_totals/ast.json b/parser/testdata/02802_with_cube_with_totals/ast.json new file mode 100644 index 000000000..ef3945e72 --- /dev/null +++ b/parser/testdata/02802_with_cube_with_totals/ast.json @@ -0,0 +1,118 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Tuple_(UInt64_2147483648, Tuple_(Float64_-0, Float64_1.1754943508222875e-38, UInt64_2147483646, '-9223372036854775808', NULL))" + }, + { + "explain": " Function toInt128 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Float64_0.0001" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 5)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal UInt64_256" + }, + { + "explain": " Function toInt64 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Float64_1.1754943508222875e-38" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Literal Float64_-0" + }, + { + "explain": " Literal Tuple_(Tuple_(UInt64_65535, '-92233720368547758.07'), Float64_0.9999)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal Tuple_(Float64_1, Float64_3.4028234663852886e38, '1', Float64_0.5)" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '0.1'" + } + ], + + "rows": 32, + + "statistics": + { + "elapsed": 0.001122724, + "rows_read": 32, + "bytes_read": 1466 + } +} diff --git a/parser/testdata/02802_with_cube_with_totals/metadata.json b/parser/testdata/02802_with_cube_with_totals/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02802_with_cube_with_totals/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02802_with_cube_with_totals/query.sql b/parser/testdata/02802_with_cube_with_totals/query.sql new file mode 100644 index 000000000..77adb68eb --- /dev/null +++ b/parser/testdata/02802_with_cube_with_totals/query.sql @@ -0,0 +1,2 @@ +SELECT tuple((2147483648, (-0., 1.1754943508222875e-38, 2147483646, '-9223372036854775808', NULL))), toInt128(0.0001) GROUP BY ((256, toInt64(1.1754943508222875e-38), NULL), NULL, -0., ((65535, '-92233720368547758.07'), 0.9999), tuple(((1., 3.4028234663852886e38, '1', 0.5), NULL, tuple('0.1')))) WITH CUBE WITH TOTALS; +SELECT NULL GROUP BY toUUID(NULL, '0', NULL, '0.0000065535'), 1 WITH CUBE WITH TOTALS; diff --git a/parser/testdata/02803_remote_cannot_clone_block/ast.json b/parser/testdata/02803_remote_cannot_clone_block/ast.json new file mode 100644 index 000000000..d5878aacd --- /dev/null +++ b/parser/testdata/02803_remote_cannot_clone_block/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery numbers_10_00223 (children 1)" + }, + { + "explain": " Identifier numbers_10_00223" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001493964, + "rows_read": 2, + "bytes_read": 84 + } +} diff --git a/parser/testdata/02803_remote_cannot_clone_block/metadata.json b/parser/testdata/02803_remote_cannot_clone_block/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02803_remote_cannot_clone_block/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02803_remote_cannot_clone_block/query.sql b/parser/testdata/02803_remote_cannot_clone_block/query.sql new file mode 100644 index 000000000..dd72b9904 --- /dev/null +++ b/parser/testdata/02803_remote_cannot_clone_block/query.sql @@ -0,0 +1,21 @@ +DROP TABLE IF EXISTS numbers_10_00223; + +CREATE TABLE numbers_10_00223 +ENGINE = Log AS +SELECT * +FROM system.numbers +LIMIT 10000; + +SET enable_analyzer = 0; + +SELECT * +FROM +( + SELECT 1 + FROM remote('127.0.0.{2,3}', currentDatabase(), numbers_10_00223) + WITH TOTALS +) +WHERE 1 +GROUP BY 1; + +DROP TABLE numbers_10_00223; diff --git a/parser/testdata/02804_clusterAllReplicas_insert/ast.json b/parser/testdata/02804_clusterAllReplicas_insert/ast.json new file mode 100644 index 000000000..f45bc569f --- /dev/null +++ b/parser/testdata/02804_clusterAllReplicas_insert/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery data (children 1)" + }, + { + "explain": " Identifier data" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00153964, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/02804_clusterAllReplicas_insert/metadata.json b/parser/testdata/02804_clusterAllReplicas_insert/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02804_clusterAllReplicas_insert/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02804_clusterAllReplicas_insert/query.sql b/parser/testdata/02804_clusterAllReplicas_insert/query.sql new file mode 100644 index 000000000..c39d9e7d7 --- /dev/null +++ b/parser/testdata/02804_clusterAllReplicas_insert/query.sql @@ -0,0 +1,6 @@ +drop table if exists data; +create table data (key Int) engine=Memory(); +-- NOTE: internal_replication is false, so INSERT will be done only into one shard +insert into function clusterAllReplicas(test_cluster_two_shards, currentDatabase(), data, rand()) values (2); +select * from data order by key; +drop table data; diff --git a/parser/testdata/02804_intersect_bad_cast/ast.json b/parser/testdata/02804_intersect_bad_cast/ast.json new file mode 100644 index 000000000..07f57704a --- /dev/null +++ b/parser/testdata/02804_intersect_bad_cast/ast.json @@ -0,0 +1,94 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Float64_2" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectIntersectExceptQuery (children 2)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1024" + }, + { + "explain": " Literal UInt64_256" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function and (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_100" + }, + { + "explain": " Literal Float64_inf" + }, + { + "explain": " Literal UInt64_256" + } + ], + + "rows": 24, + + "statistics": + { + "elapsed": 0.001446424, + "rows_read": 24, + "bytes_read": 980 + } +} diff --git a/parser/testdata/02804_intersect_bad_cast/metadata.json b/parser/testdata/02804_intersect_bad_cast/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02804_intersect_bad_cast/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02804_intersect_bad_cast/query.sql b/parser/testdata/02804_intersect_bad_cast/query.sql new file mode 100644 index 000000000..c7eb8fdd3 --- /dev/null +++ b/parser/testdata/02804_intersect_bad_cast/query.sql @@ -0,0 +1 @@ +SELECT 2., * FROM (SELECT 1024, 256 INTERSECT SELECT 100 AND inf, 256); diff --git a/parser/testdata/02805_distributed_queries_timeouts/ast.json b/parser/testdata/02805_distributed_queries_timeouts/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02805_distributed_queries_timeouts/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02805_distributed_queries_timeouts/metadata.json b/parser/testdata/02805_distributed_queries_timeouts/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02805_distributed_queries_timeouts/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02805_distributed_queries_timeouts/query.sql b/parser/testdata/02805_distributed_queries_timeouts/query.sql new file mode 100644 index 000000000..98aeac362 --- /dev/null +++ b/parser/testdata/02805_distributed_queries_timeouts/query.sql @@ -0,0 +1,6 @@ +-- Tags: no-fasttest +-- no-fasttest: Timeouts are slow +create table dist as system.one engine=Distributed(test_shard_localhost, system, one); +select sleep(8) from dist settings function_sleep_max_microseconds_per_block=8e9, prefer_localhost_replica=0, receive_timeout=7, async_socket_for_remote=0, use_hedged_requests=1 format Null; +select sleep(8) from dist settings function_sleep_max_microseconds_per_block=8e9, prefer_localhost_replica=0, receive_timeout=7, async_socket_for_remote=1, use_hedged_requests=0 format Null; +select sleep(8) from dist settings function_sleep_max_microseconds_per_block=8e9, prefer_localhost_replica=0, receive_timeout=7, async_socket_for_remote=0, use_hedged_requests=0 format Null; diff --git a/parser/testdata/02806_cte_block_cannot_be_empty/ast.json b/parser/testdata/02806_cte_block_cannot_be_empty/ast.json new file mode 100644 index 000000000..8a1e375bb --- /dev/null +++ b/parser/testdata/02806_cte_block_cannot_be_empty/ast.json @@ -0,0 +1,187 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " WithElement (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal UInt64_1 (alias ID)" + }, + { + "explain": " Function toDate (alias dt) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '2023-06-24'" + }, + { + "explain": " Literal UInt64_0 (alias p)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function multiIf (alias params) (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier t.ID" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function formatRowNoNewline (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'JSONEachRow'" + }, + { + "explain": " Identifier dd" + }, + { + "explain": " Literal ''" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (alias t) (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier ID" + }, + { + "explain": " Function multiIf (alias dd) (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier p" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Function toString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function plus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier dt" + }, + { + "explain": " Function toIntervalHour (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier p" + }, + { + "explain": " Literal '2022-01-01'" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier c" + } + ], + + "rows": 55, + + "statistics": + { + "elapsed": 0.001687076, + "rows_read": 55, + "bytes_read": 2384 + } +} diff --git a/parser/testdata/02806_cte_block_cannot_be_empty/metadata.json b/parser/testdata/02806_cte_block_cannot_be_empty/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02806_cte_block_cannot_be_empty/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02806_cte_block_cannot_be_empty/query.sql b/parser/testdata/02806_cte_block_cannot_be_empty/query.sql new file mode 100644 index 000000000..688dc1017 --- /dev/null +++ b/parser/testdata/02806_cte_block_cannot_be_empty/query.sql @@ -0,0 +1,18 @@ +with c as ( select 1 ID, toDate('2023-06-24') dt, 0 p ) select multiIf(t.ID = 1, formatRowNoNewline('JSONEachRow', dd), '') AS params from (select ID, case when p = 0 then toString(date_add(hour, p, dt)) else '2022-01-01' end as dd from c) t; +with c as ( select 1 ID, toDate('2023-06-24') dt, 0 p ) select multiIf(t.ID = 1, formatRowNoNewline('JSONEachRow', dd), '') AS params, dd from (select ID, case when p = 0 then toString(date_add(hour, p, dt)) else '2022-01-01' end as dd from c) t; + +select + if( + outer_table.condition_value = 1, + formatRowNoNewline('JSONEachRow', outer_table.result_date), + '' + ) as json +from ( + select + 1 as condition_value, + date_add(month, inner_table.offset, toDate('2023-06-24')) as result_date + from ( + select + 2 as offset + ) inner_table + ) outer_table; diff --git a/parser/testdata/02807_default_date_time_nullable/ast.json b/parser/testdata/02807_default_date_time_nullable/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02807_default_date_time_nullable/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02807_default_date_time_nullable/metadata.json b/parser/testdata/02807_default_date_time_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02807_default_date_time_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02807_default_date_time_nullable/query.sql b/parser/testdata/02807_default_date_time_nullable/query.sql new file mode 100644 index 000000000..9152f1987 --- /dev/null +++ b/parser/testdata/02807_default_date_time_nullable/query.sql @@ -0,0 +1,18 @@ +create temporary table test ( + data int, + default Nullable(DateTime) DEFAULT '1977-01-01 00:00:00' +) engine = Memory(); + +insert into test (data) select 1; + +select * from test; + +drop temporary table test; + +create temporary table test ( + data int, + default DateTime DEFAULT '1977-01-01 00:00:00' +) engine = Memory(); +insert into test (data) select 1; + +select * from test; diff --git a/parser/testdata/02807_lower_utf8_msan/ast.json b/parser/testdata/02807_lower_utf8_msan/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02807_lower_utf8_msan/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02807_lower_utf8_msan/metadata.json b/parser/testdata/02807_lower_utf8_msan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02807_lower_utf8_msan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02807_lower_utf8_msan/query.sql b/parser/testdata/02807_lower_utf8_msan/query.sql new file mode 100644 index 000000000..95f224577 --- /dev/null +++ b/parser/testdata/02807_lower_utf8_msan/query.sql @@ -0,0 +1,5 @@ +-- Tags: no-fasttest +-- no-fasttest: upper/lowerUTF8 use ICU + +SELECT lowerUTF8(arrayJoin(['©--------------------------------------', '©--------------------'])) ORDER BY 1; +SELECT upperUTF8(materialize('aaaaАБВГaaaaaaaaaaaaАБВГAAAAaaAA')) FROM numbers(2); diff --git a/parser/testdata/02807_math_unary_crash/ast.json b/parser/testdata/02807_math_unary_crash/ast.json new file mode 100644 index 000000000..8a95bc6e9 --- /dev/null +++ b/parser/testdata/02807_math_unary_crash/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t10 (children 1)" + }, + { + "explain": " Identifier t10" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001305321, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/02807_math_unary_crash/metadata.json b/parser/testdata/02807_math_unary_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02807_math_unary_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02807_math_unary_crash/query.sql b/parser/testdata/02807_math_unary_crash/query.sql new file mode 100644 index 000000000..fb693ac70 --- /dev/null +++ b/parser/testdata/02807_math_unary_crash/query.sql @@ -0,0 +1,6 @@ +DROP TABLE IF EXISTS t10; +CREATE TABLE t10 (`c0` Int32) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO t10 (c0) FORMAT Values (-1); +SELECT 1 FROM t10 GROUP BY erf(-sign(t10.c0)); +SELECT 1 FROM t10 GROUP BY -sign(t10.c0); +DROP TABLE t10; diff --git a/parser/testdata/02808_aliases_inside_case/ast.json b/parser/testdata/02808_aliases_inside_case/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02808_aliases_inside_case/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02808_aliases_inside_case/metadata.json b/parser/testdata/02808_aliases_inside_case/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02808_aliases_inside_case/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02808_aliases_inside_case/query.sql b/parser/testdata/02808_aliases_inside_case/query.sql new file mode 100644 index 000000000..0da45416e --- /dev/null +++ b/parser/testdata/02808_aliases_inside_case/query.sql @@ -0,0 +1,10 @@ +# We support specifying aliases in any place in the query, including CASE expression: + +with arrayJoin([1,2]) as arg +select arg, + (case + when arg = 1 + then 1 as one + when arg = 2 + then one / 2 + end) as imposible; diff --git a/parser/testdata/02809_has_subsequence/ast.json b/parser/testdata/02809_has_subsequence/ast.json new file mode 100644 index 000000000..5f23c4b95 --- /dev/null +++ b/parser/testdata/02809_has_subsequence/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'hasSubsequence'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001457811, + "rows_read": 5, + "bytes_read": 185 + } +} diff --git a/parser/testdata/02809_has_subsequence/metadata.json b/parser/testdata/02809_has_subsequence/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02809_has_subsequence/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02809_has_subsequence/query.sql b/parser/testdata/02809_has_subsequence/query.sql new file mode 100644 index 000000000..13b92164c --- /dev/null +++ b/parser/testdata/02809_has_subsequence/query.sql @@ -0,0 +1,68 @@ +select 'hasSubsequence'; +select hasSubsequence('garbage', ''); +select hasSubsequence('garbage', 'g'); +select hasSubsequence('garbage', 'G'); +select hasSubsequence('garbage', 'a'); +select hasSubsequence('garbage', 'e'); +select hasSubsequence('garbage', 'gr'); +select hasSubsequence('garbage', 'ab'); +select hasSubsequence('garbage', 'be'); +select hasSubsequence('garbage', 'arg'); +select hasSubsequence('garbage', 'gra'); +select hasSubsequence('garbage', 'rga'); +select hasSubsequence('garbage', 'garbage'); +select hasSubsequence('garbage', 'garbage1'); +select hasSubsequence('garbage', 'arbw'); +select hasSubsequence('garbage', 'ARG'); +select hasSubsequence('garbage', materialize('')); +select hasSubsequence('garbage', materialize('arg')); +select hasSubsequence('garbage', materialize('arbw')); +select hasSubsequence(materialize('garbage'), ''); +select hasSubsequence(materialize('garbage'), 'arg'); +select hasSubsequence(materialize('garbage'), 'arbw'); +select hasSubsequence(materialize('garbage'), materialize('')); +select hasSubsequence(materialize('garbage'), materialize('arg')); +select hasSubsequence(materialize('garbage'), materialize('garbage1')); + +select 'hasSubsequenceCaseInsensitive'; +select hasSubsequenceCaseInsensitive('garbage', 'w'); +select hasSubsequenceCaseInsensitive('garbage', 'ARG'); +select hasSubsequenceCaseInsensitive('GARGAGE', 'arg'); +select hasSubsequenceCaseInsensitive(materialize('garbage'), materialize('w')); +select hasSubsequenceCaseInsensitive(materialize('garbage'), materialize('ARG')); +select hasSubsequenceCaseInsensitive(materialize('GARGAGE'), materialize('arg')); + +select 'hasSubsequenceUTF8'; +select hasSubsequence('ClickHouse - столбцовая система управления базами данных', ''); +select hasSubsequence('ClickHouse - столбцовая система управления базами данных', 'C'); -- eng +select hasSubsequence('ClickHouse - столбцовая система управления базами данных', 'С'); -- cyrilic +select hasSubsequence('ClickHouse - столбцовая система управления базами данных', 'House'); +select hasSubsequence('ClickHouse - столбцовая система управления базами данных', 'house'); +select hasSubsequence('ClickHouse - столбцовая система управления базами данных', 'система'); +select hasSubsequence('ClickHouse - столбцовая система управления базами данных', 'Система'); +select hasSubsequence('ClickHouse - столбцовая система управления базами данных', 'ссубд'); +select hasSubsequence(materialize('ClickHouse - столбцовая система управления базами данных'), 'субд'); +select hasSubsequence(materialize('ClickHouse - столбцовая система управления базами данных'), 'суббд'); +select hasSubsequence('ClickHouse - столбцовая система управления базами данных', materialize('стул')); +select hasSubsequence('ClickHouse - столбцовая система управления базами данных', materialize('два стула')); +select hasSubsequence(materialize('ClickHouse - столбцовая система управления базами данных'), materialize('орех')); +select hasSubsequence(materialize('ClickHouse - столбцовая система управления базами данных'), materialize('два ореха')); + +select 'hasSubsequenceCaseInsensitiveUTF8'; +select hasSubsequenceCaseInsensitiveUTF8('для онлайн обработки аналитических запросов (OLAP)', 'oltp'); +select hasSubsequenceCaseInsensitiveUTF8('для онлайн обработки аналитических запросов (OLAP)', 'оОоОоO'); +select hasSubsequenceCaseInsensitiveUTF8('для онлайн обработки аналитических запросов (OLAP)', 'я раб'); +select hasSubsequenceCaseInsensitiveUTF8(materialize('для онлайн обработки аналитических запросов (OLAP)'), 'работа'); +select hasSubsequenceCaseInsensitiveUTF8(materialize('для онлайн обработки аналитических запросов (OLAP)'), 'work'); +select hasSubsequenceCaseInsensitiveUTF8('для онлайн обработки аналитических запросов (OLAP)', materialize('добро)')); +select hasSubsequenceCaseInsensitiveUTF8('для онлайн обработки аналитических запросов (OLAP)', materialize('зло()')); +select hasSubsequenceCaseInsensitiveUTF8(materialize('для онлайн обработки аналитических запросов (OLAP)'), materialize('аналитика')); +select hasSubsequenceCaseInsensitiveUTF8(materialize('для онлайн обработки аналитических запросов (OLAP)'), materialize('аналитика для аналитиков')); + +select 'Nullable'; +select hasSubsequence(Null, Null); +select hasSubsequence(Null, 'a'); +select hasSubsequence(Null::Nullable(String), 'arg'::Nullable(String)); +select hasSubsequence('garbage'::Nullable(String), 'a'); +select hasSubsequence('garbage'::Nullable(String), 'arg'::Nullable(String)); +select hasSubsequence(materialize('garbage'::Nullable(String)), materialize('arg'::Nullable(String))); \ No newline at end of file diff --git a/parser/testdata/02809_has_token/ast.json b/parser/testdata/02809_has_token/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02809_has_token/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02809_has_token/metadata.json b/parser/testdata/02809_has_token/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02809_has_token/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02809_has_token/query.sql b/parser/testdata/02809_has_token/query.sql new file mode 100644 index 000000000..08edf3756 --- /dev/null +++ b/parser/testdata/02809_has_token/query.sql @@ -0,0 +1,3 @@ +-- in old versions of ClickHouse, the following query returned a wrong result: + +SELECT hasToken('quotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotaquotquota', 'quota') AS r; diff --git a/parser/testdata/02809_prewhere_and_in/ast.json b/parser/testdata/02809_prewhere_and_in/ast.json new file mode 100644 index 000000000..949b66359 --- /dev/null +++ b/parser/testdata/02809_prewhere_and_in/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_02809 (children 1)" + }, + { + "explain": " Identifier t_02809" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001281323, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/02809_prewhere_and_in/metadata.json b/parser/testdata/02809_prewhere_and_in/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02809_prewhere_and_in/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02809_prewhere_and_in/query.sql b/parser/testdata/02809_prewhere_and_in/query.sql new file mode 100644 index 000000000..448f9512c --- /dev/null +++ b/parser/testdata/02809_prewhere_and_in/query.sql @@ -0,0 +1,32 @@ +DROP TABLE IF EXISTS t_02809; + +CREATE TABLE t_02809(a Int64, b Int64, s String) +ENGINE=MergeTree order by tuple() +AS SELECT number, number%10, toString(arrayMap(i-> cityHash64(i*number), range(50))) FROM numbers(10000); + +CREATE TABLE t_02809_set(c Int64) +ENGINE=Set() +AS SELECT * FROM numbers(10); + +CREATE TABLE t_02809_aux(c Int64) +ENGINE=Memory() +AS SELECT * FROM numbers(10); + + +SET optimize_move_to_prewhere=1; + +-- Queries with 'IN' +SELECT * FROM (EXPLAIN actions=1 SELECT * FROM t_02809 WHERE a IN (SELECT * FROM system.one)) WHERE explain LIKE '%Prewhere filter'; +SELECT * FROM (EXPLAIN actions=1 SELECT * FROM t_02809 WHERE a IN (1,2,3)) WHERE explain LIKE '%Prewhere filter'; +SELECT * FROM (EXPLAIN actions=1 SELECT * FROM t_02809 WHERE a IN t_02809_set) WHERE explain LIKE '%Prewhere filter'; +SELECT * FROM (EXPLAIN actions=1 SELECT * FROM t_02809 WHERE a IN t_02809_aux) WHERE explain LIKE '%Prewhere filter'; + +-- Queries with 'NOT IN' +SELECT * FROM (EXPLAIN actions=1 SELECT * FROM t_02809 WHERE a NOT IN (SELECT * FROM system.one)) WHERE explain LIKE '%Prewhere filter'; +SELECT * FROM (EXPLAIN actions=1 SELECT * FROM t_02809 WHERE a NOT IN (1,2,3)) WHERE explain LIKE '%Prewhere filter'; +SELECT * FROM (EXPLAIN actions=1 SELECT * FROM t_02809 WHERE a NOT IN t_02809_set) WHERE explain LIKE '%Prewhere filter'; +SELECT * FROM (EXPLAIN actions=1 SELECT * FROM t_02809 WHERE a NOT IN t_02809_aux) WHERE explain LIKE '%Prewhere filter'; + +DROP TABLE t_02809; +DROP TABLE t_02809_set; +DROP TABLE t_02809_aux; diff --git a/parser/testdata/02809_storage_set_analysis_bug/ast.json b/parser/testdata/02809_storage_set_analysis_bug/ast.json new file mode 100644 index 000000000..76cf4c373 --- /dev/null +++ b/parser/testdata/02809_storage_set_analysis_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_set (children 1)" + }, + { + "explain": " Identifier test_set" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001746398, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/02809_storage_set_analysis_bug/metadata.json b/parser/testdata/02809_storage_set_analysis_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02809_storage_set_analysis_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02809_storage_set_analysis_bug/query.sql b/parser/testdata/02809_storage_set_analysis_bug/query.sql new file mode 100644 index 000000000..f71494673 --- /dev/null +++ b/parser/testdata/02809_storage_set_analysis_bug/query.sql @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS test_set; +DROP TABLE IF EXISTS null_in__fuzz_6; + +set allow_suspicious_low_cardinality_types = 1; + +CREATE TABLE null_in__fuzz_6 (`dt` LowCardinality(UInt16), `idx` Int32, `i` Nullable(Int256), `s` Int32) ENGINE = MergeTree PARTITION BY dt ORDER BY idx; +insert into null_in__fuzz_6 select * from generateRandom() where i is not null limit 1; + +SET transform_null_in = 0; + +CREATE TABLE test_set (i Nullable(int)) ENGINE = Set(); +INSERT INTO test_set VALUES (1), (NULL); + +SELECT count() = 1 FROM null_in__fuzz_6 PREWHERE 71 WHERE i IN (test_set); -- { serverError CANNOT_CONVERT_TYPE } + +DROP TABLE test_set; +DROP TABLE null_in__fuzz_6; diff --git a/parser/testdata/02810_convert_uuid_to_uint128/ast.json b/parser/testdata/02810_convert_uuid_to_uint128/ast.json new file mode 100644 index 000000000..2eebaa25c --- /dev/null +++ b/parser/testdata/02810_convert_uuid_to_uint128/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toUInt128 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toUUID (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '00000000-0000-0000-0000-000000000000'" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001318088, + "rows_read": 9, + "bytes_read": 378 + } +} diff --git a/parser/testdata/02810_convert_uuid_to_uint128/metadata.json b/parser/testdata/02810_convert_uuid_to_uint128/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02810_convert_uuid_to_uint128/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02810_convert_uuid_to_uint128/query.sql b/parser/testdata/02810_convert_uuid_to_uint128/query.sql new file mode 100644 index 000000000..155596dd1 --- /dev/null +++ b/parser/testdata/02810_convert_uuid_to_uint128/query.sql @@ -0,0 +1,8 @@ +SELECT toUInt128(toUUID('00000000-0000-0000-0000-000000000000')); +SELECT toUInt128(toUUID('f82aef31-279e-431f-8b00-2899ad387aea')); +SELECT toUInt128(toUUID('ffffffff-ffff-ffff-ffff-ffffffffffff')); +SELECT toUInt64(toUUID('00000000-0000-0000-0000-000000000000')); -- { serverError NOT_IMPLEMENTED } +SELECT toInt128(toUUID('00000000-0000-0000-0000-000000000000')); -- { serverError NOT_IMPLEMENTED } +SELECT cast(toUUID('f82aef31-279e-431f-8b00-2899ad387aea'), 'UInt128'); +select accurateCast(toUUID('f82aef31-279e-431f-8b00-2899ad387aea'), 'UInt128'); +select toUUID('f82aef31-279e-431f-8b00-2899ad387aea')::UInt128; diff --git a/parser/testdata/02810_fix_remove_dedundant_distinct_view/ast.json b/parser/testdata/02810_fix_remove_dedundant_distinct_view/ast.json new file mode 100644 index 000000000..1c7fe090b --- /dev/null +++ b/parser/testdata/02810_fix_remove_dedundant_distinct_view/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tab_v (children 1)" + }, + { + "explain": " Identifier tab_v" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001536537, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/02810_fix_remove_dedundant_distinct_view/metadata.json b/parser/testdata/02810_fix_remove_dedundant_distinct_view/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02810_fix_remove_dedundant_distinct_view/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02810_fix_remove_dedundant_distinct_view/query.sql b/parser/testdata/02810_fix_remove_dedundant_distinct_view/query.sql new file mode 100644 index 000000000..10a68721c --- /dev/null +++ b/parser/testdata/02810_fix_remove_dedundant_distinct_view/query.sql @@ -0,0 +1,28 @@ +drop table if exists tab_v; +drop table if exists tab; +create table tab (x UInt64, y UInt64) engine MergeTree() order by (x, y); +insert into tab values(1, 1); +insert into tab values(1, 2); +insert into tab values(2, 1); + +create view tab_v as select distinct(x) from tab; + +-- { echoOn } +set query_plan_remove_redundant_distinct=1; +-- DISTINCT has to be removed since the view already has DISTINCT on the same column +SELECT count() +FROM +( + EXPLAIN SELECT DISTINCT x FROM tab_v +) +WHERE explain ILIKE '%distinct%'; + +SELECT DISTINCT x FROM tab_v ORDER BY x; + +-- explicitly checking that materialize() doesn't affect the result, - redundant DISTINCT is still removed +SELECT count() +FROM +( + EXPLAIN SELECT DISTINCT x FROM (SELECT materialize(x) as x FROM (select DISTINCT x from tab)) +) +WHERE explain ILIKE '%distinct%'; diff --git a/parser/testdata/02810_initcap/ast.json b/parser/testdata/02810_initcap/ast.json new file mode 100644 index 000000000..eb8f6870d --- /dev/null +++ b/parser/testdata/02810_initcap/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function initcap (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal ''" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001242479, + "rows_read": 7, + "bytes_read": 254 + } +} diff --git a/parser/testdata/02810_initcap/metadata.json b/parser/testdata/02810_initcap/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02810_initcap/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02810_initcap/query.sql b/parser/testdata/02810_initcap/query.sql new file mode 100644 index 000000000..1a7300036 --- /dev/null +++ b/parser/testdata/02810_initcap/query.sql @@ -0,0 +1,14 @@ +select initcap(''); +select initcap('Hello'); +select initcap('hello'); +select initcap('hello world'); +select initcap('yeah, well, i`m gonna go build my own theme park'); +select initcap('CRC32IEEE is the best function'); +select initcap('42oK'); + +select initcapUTF8(''); +select initcapUTF8('Hello'); +select initcapUTF8('yeah, well, i`m gonna go build my own theme park'); +select initcapUTF8('привет, как дела?'); +select initcapUTF8('ätsch, bätsch'); +select initcapUTF8('We dont support cases when lowercase and uppercase characters occupy different number of bytes in UTF-8. As an example, this happens for ß and ẞ.'); \ No newline at end of file diff --git a/parser/testdata/02810_row_binary_with_defaults/ast.json b/parser/testdata/02810_row_binary_with_defaults/ast.json new file mode 100644 index 000000000..b55047896 --- /dev/null +++ b/parser/testdata/02810_row_binary_with_defaults/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function format (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal 'RowBinaryWithDefaults'" + }, + { + "explain": " Literal 'x UInt32 default 42'" + }, + { + "explain": " Literal '\u0001'" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001374394, + "rows_read": 13, + "bytes_read": 507 + } +} diff --git a/parser/testdata/02810_row_binary_with_defaults/metadata.json b/parser/testdata/02810_row_binary_with_defaults/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02810_row_binary_with_defaults/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02810_row_binary_with_defaults/query.sql b/parser/testdata/02810_row_binary_with_defaults/query.sql new file mode 100644 index 000000000..73662352c --- /dev/null +++ b/parser/testdata/02810_row_binary_with_defaults/query.sql @@ -0,0 +1,7 @@ +select * from format('RowBinaryWithDefaults', 'x UInt32 default 42', x'01'); +select * from format('RowBinaryWithDefaults', 'x UInt32 default 42', x'0001000000'); +select * from format('RowBinaryWithDefaults', 'x Nullable(UInt32) default 42', x'01'); +select * from format('RowBinaryWithDefaults', 'x Nullable(UInt32) default 42', x'000001000000'); +select * from format('RowBinaryWithDefaults', 'x Nullable(UInt32) default 42', x'0001'); +select * from format('RowBinaryWithDefaults', 'x Array(Tuple(UInt32, UInt32)) default [(42, 42)]', x'01'); + diff --git a/parser/testdata/02810_system_jemalloc_bins/ast.json b/parser/testdata/02810_system_jemalloc_bins/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02810_system_jemalloc_bins/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02810_system_jemalloc_bins/metadata.json b/parser/testdata/02810_system_jemalloc_bins/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02810_system_jemalloc_bins/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02810_system_jemalloc_bins/query.sql b/parser/testdata/02810_system_jemalloc_bins/query.sql new file mode 100644 index 000000000..03062e70a --- /dev/null +++ b/parser/testdata/02810_system_jemalloc_bins/query.sql @@ -0,0 +1,13 @@ +WITH + (SELECT value IN ('ON', '1') FROM system.build_options WHERE name = 'USE_JEMALLOC') AS jemalloc_enabled, + (SELECT count() FROM system.jemalloc_bins) AS total_bins, + (SELECT count() FROM system.jemalloc_bins WHERE large) AS large_bins, + (SELECT count() FROM system.jemalloc_bins WHERE NOT large) AS small_bins, + (SELECT sum(size * (allocations - deallocations)) FROM system.jemalloc_bins WHERE large) AS large_allocated_bytes, + (SELECT sum(size * (allocations - deallocations)) FROM system.jemalloc_bins WHERE NOT large) AS small_allocated_bytes +SELECT + (total_bins > 0) = jemalloc_enabled, + (large_bins > 0) = jemalloc_enabled, + (small_bins > 0) = jemalloc_enabled, + (large_allocated_bytes > 0) = jemalloc_enabled, + (small_allocated_bytes > 0) = jemalloc_enabled; diff --git a/parser/testdata/02811_insert_schema_inference/ast.json b/parser/testdata/02811_insert_schema_inference/ast.json new file mode 100644 index 000000000..6106b93a5 --- /dev/null +++ b/parser/testdata/02811_insert_schema_inference/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001055424, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/02811_insert_schema_inference/metadata.json b/parser/testdata/02811_insert_schema_inference/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02811_insert_schema_inference/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02811_insert_schema_inference/query.sql b/parser/testdata/02811_insert_schema_inference/query.sql new file mode 100644 index 000000000..9de710047 --- /dev/null +++ b/parser/testdata/02811_insert_schema_inference/query.sql @@ -0,0 +1,9 @@ +drop table if exists test; +create table test +( + n1 UInt32, + n2 UInt32 alias murmurHash3_32(n1), + n3 UInt32 materialized n2 + 1 +)engine=MergeTree order by n1; +insert into test select * from generateRandom() limit 10; +drop table test; diff --git a/parser/testdata/02811_invalid_embedded_rocksdb_create/ast.json b/parser/testdata/02811_invalid_embedded_rocksdb_create/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02811_invalid_embedded_rocksdb_create/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02811_invalid_embedded_rocksdb_create/metadata.json b/parser/testdata/02811_invalid_embedded_rocksdb_create/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02811_invalid_embedded_rocksdb_create/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02811_invalid_embedded_rocksdb_create/query.sql b/parser/testdata/02811_invalid_embedded_rocksdb_create/query.sql new file mode 100644 index 000000000..54e0c2522 --- /dev/null +++ b/parser/testdata/02811_invalid_embedded_rocksdb_create/query.sql @@ -0,0 +1,2 @@ +-- Tags: no-fasttest, use-rocksdb +CREATE TABLE dict (`k` String, `v` String) ENGINE = EmbeddedRocksDB(k) PRIMARY KEY k; -- {serverError BAD_ARGUMENTS} diff --git a/parser/testdata/02811_ip_dict_attribute/ast.json b/parser/testdata/02811_ip_dict_attribute/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02811_ip_dict_attribute/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02811_ip_dict_attribute/metadata.json b/parser/testdata/02811_ip_dict_attribute/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02811_ip_dict_attribute/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02811_ip_dict_attribute/query.sql b/parser/testdata/02811_ip_dict_attribute/query.sql new file mode 100644 index 000000000..0ffff6e4a --- /dev/null +++ b/parser/testdata/02811_ip_dict_attribute/query.sql @@ -0,0 +1,13 @@ +CREATE TABLE src ( id UInt64, ip4 IPv4, ip6 IPv6 ) Engine=Memory AS + SELECT * FROM VALUES( (1, '1.1.1.1', '::1.1.1.1'), (2, '2.2.2.2', '::2.2.2.2') ); + +CREATE DICTIONARY dict ( id UInt64, ip4 IPv4, ip6 IPv6 ) + PRIMARY KEY id + LAYOUT(HASHED()) + SOURCE (CLICKHOUSE ( table src)) + lifetime ( 10); + +SELECT dictGet('dict', ('ip6', 'ip4'), arrayJoin([2,1])); + +DROP DICTIONARY dict; +DROP TABLE src; diff --git a/parser/testdata/02811_parallel_replicas_prewhere_count/ast.json b/parser/testdata/02811_parallel_replicas_prewhere_count/ast.json new file mode 100644 index 000000000..cff93055a --- /dev/null +++ b/parser/testdata/02811_parallel_replicas_prewhere_count/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery users (children 1)" + }, + { + "explain": " Identifier users" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001240377, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/02811_parallel_replicas_prewhere_count/metadata.json b/parser/testdata/02811_parallel_replicas_prewhere_count/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02811_parallel_replicas_prewhere_count/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02811_parallel_replicas_prewhere_count/query.sql b/parser/testdata/02811_parallel_replicas_prewhere_count/query.sql new file mode 100644 index 000000000..b68faa850 --- /dev/null +++ b/parser/testdata/02811_parallel_replicas_prewhere_count/query.sql @@ -0,0 +1,23 @@ +DROP TABLE IF EXISTS users; +CREATE TABLE users (uid Int16, name String, age Int16) ENGINE=MergeTree() ORDER BY uid; + +INSERT INTO users VALUES (111, 'JFK', 33); +INSERT INTO users VALUES (6666, 'KLM', 48); +INSERT INTO users VALUES (88888, 'AMS', 50); + +SELECT '-- count() ------------------------------'; +SELECT count() FROM users PREWHERE uid > 2000; + +-- enable parallel replicas but with high rows threshold +SET +skip_unavailable_shards=1, +enable_parallel_replicas=1, +max_parallel_replicas=3, +cluster_for_parallel_replicas='parallel_replicas', +parallel_replicas_for_non_replicated_merge_tree=1, +parallel_replicas_min_number_of_rows_per_replica=1000; + +SELECT '-- count() with parallel replicas -------'; +SELECT count() FROM users PREWHERE uid > 2000; + +DROP TABLE users; diff --git a/parser/testdata/02811_primary_key_in_columns/ast.json b/parser/testdata/02811_primary_key_in_columns/ast.json new file mode 100644 index 000000000..447af38e6 --- /dev/null +++ b/parser/testdata/02811_primary_key_in_columns/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery pk_test1 (children 1)" + }, + { + "explain": " Identifier pk_test1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00121769, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/02811_primary_key_in_columns/metadata.json b/parser/testdata/02811_primary_key_in_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02811_primary_key_in_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02811_primary_key_in_columns/query.sql b/parser/testdata/02811_primary_key_in_columns/query.sql new file mode 100644 index 000000000..0519f4c82 --- /dev/null +++ b/parser/testdata/02811_primary_key_in_columns/query.sql @@ -0,0 +1,83 @@ +DROP TABLE IF EXISTS pk_test1; +DROP TABLE IF EXISTS pk_test2; +DROP TABLE IF EXISTS pk_test3; +DROP TABLE IF EXISTS pk_test4; +DROP TABLE IF EXISTS pk_test5; +DROP TABLE IF EXISTS pk_test6; +DROP TABLE IF EXISTS pk_test7; +DROP TABLE IF EXISTS pk_test8; +DROP TABLE IF EXISTS pk_test9; +DROP TABLE IF EXISTS pk_test10; +DROP TABLE IF EXISTS pk_test11; +DROP TABLE IF EXISTS pk_test12; +DROP TABLE IF EXISTS pk_test12; +DROP TABLE IF EXISTS pk_test13; +DROP TABLE IF EXISTS pk_test14; +DROP TABLE IF EXISTS pk_test15; +DROP TABLE IF EXISTS pk_test16; +DROP TABLE IF EXISTS pk_test17; +DROP TABLE IF EXISTS pk_test18; +DROP TABLE IF EXISTS pk_test19; +DROP TABLE IF EXISTS pk_test20; +DROP TABLE IF EXISTS pk_test21; +DROP TABLE IF EXISTS pk_test22; +DROP TABLE IF EXISTS pk_test23; + +SET default_table_engine='MergeTree'; + +CREATE TABLE pk_test1 (a String PRIMARY KEY, b String, c String); +CREATE TABLE pk_test2 (a String PRIMARY KEY, b String PRIMARY KEY, c String); +CREATE TABLE pk_test3 (a String PRIMARY KEY, b String PRIMARY KEY, c String PRIMARY KEY); + +CREATE TABLE pk_test4 (a String, b String PRIMARY KEY, c String PRIMARY KEY); +CREATE TABLE pk_test5 (a String, b String PRIMARY KEY, c String); +CREATE TABLE pk_test6 (a String, b String, c String PRIMARY KEY); + +CREATE TABLE pk_test7 (a String PRIMARY KEY, b String, c String, PRIMARY KEY (a)); -- { clientError BAD_ARGUMENTS } +CREATE TABLE pk_test8 (a String PRIMARY KEY, b String PRIMARY KEY, c String, PRIMARY KEY (a)); -- { clientError BAD_ARGUMENTS } +CREATE TABLE pk_test9 (a String PRIMARY KEY, b String PRIMARY KEY, c String PRIMARY KEY, PRIMARY KEY (a)); -- { clientError BAD_ARGUMENTS } + +CREATE TABLE pk_test10 (a String, b String PRIMARY KEY, c String PRIMARY KEY, PRIMARY KEY (a)); -- { clientError BAD_ARGUMENTS } +CREATE TABLE pk_test11 (a String, b String PRIMARY KEY, c String, PRIMARY KEY (a)); -- { clientError BAD_ARGUMENTS } +CREATE TABLE pk_test12 (a String, b String, c String PRIMARY KEY, PRIMARY KEY (a)); -- { clientError BAD_ARGUMENTS } + +CREATE TABLE pk_test12 (a String PRIMARY KEY, b String, c String) PRIMARY KEY (a,b,c); -- { clientError BAD_ARGUMENTS } +CREATE TABLE pk_test13 (a String PRIMARY KEY, b String PRIMARY KEY, c String) PRIMARY KEY (a,b,c); -- { clientError BAD_ARGUMENTS } +CREATE TABLE pk_test14 (a String PRIMARY KEY, b String PRIMARY KEY, c String PRIMARY KEY) PRIMARY KEY (a,b,c); -- { clientError BAD_ARGUMENTS } + +CREATE TABLE pk_test15 (a String, b String PRIMARY KEY, c String PRIMARY KEY) PRIMARY KEY (a,b,c); -- { clientError BAD_ARGUMENTS } +CREATE TABLE pk_test16 (a String, b String PRIMARY KEY, c String) PRIMARY KEY (a,b,c); -- { clientError BAD_ARGUMENTS } +CREATE TABLE pk_test17 (a String, b String, c String PRIMARY KEY) PRIMARY KEY (a,b,c); -- { clientError BAD_ARGUMENTS } + +CREATE TABLE pk_test18 (a String PRIMARY KEY, b String, c String) ORDER BY (a,b,c); +CREATE TABLE pk_test19 (a String PRIMARY KEY, b String PRIMARY KEY, c String) ORDER BY (a,b,c); +CREATE TABLE pk_test20 (a String PRIMARY KEY, b String PRIMARY KEY, c String PRIMARY KEY) ORDER BY (a,b,c); + +CREATE TABLE pk_test21 (a String, b String PRIMARY KEY, c String PRIMARY KEY) ORDER BY (a,b,c); -- { serverError BAD_ARGUMENTS } +CREATE TABLE pk_test22 (a String, b String PRIMARY KEY, c String) ORDER BY (a,b,c); -- { serverError BAD_ARGUMENTS } +CREATE TABLE pk_test23 (a String, b String, c String PRIMARY KEY) ORDER BY (a,b,c); -- { serverError BAD_ARGUMENTS } + +DROP TABLE IF EXISTS pk_test1; +DROP TABLE IF EXISTS pk_test2; +DROP TABLE IF EXISTS pk_test3; +DROP TABLE IF EXISTS pk_test4; +DROP TABLE IF EXISTS pk_test5; +DROP TABLE IF EXISTS pk_test6; +DROP TABLE IF EXISTS pk_test7; +DROP TABLE IF EXISTS pk_test8; +DROP TABLE IF EXISTS pk_test9; +DROP TABLE IF EXISTS pk_test10; +DROP TABLE IF EXISTS pk_test11; +DROP TABLE IF EXISTS pk_test12; +DROP TABLE IF EXISTS pk_test12; +DROP TABLE IF EXISTS pk_test13; +DROP TABLE IF EXISTS pk_test14; +DROP TABLE IF EXISTS pk_test15; +DROP TABLE IF EXISTS pk_test16; +DROP TABLE IF EXISTS pk_test17; +DROP TABLE IF EXISTS pk_test18; +DROP TABLE IF EXISTS pk_test19; +DROP TABLE IF EXISTS pk_test20; +DROP TABLE IF EXISTS pk_test21; +DROP TABLE IF EXISTS pk_test22; +DROP TABLE IF EXISTS pk_test23; \ No newline at end of file diff --git a/parser/testdata/02811_read_in_order_and_array_join_bug/ast.json b/parser/testdata/02811_read_in_order_and_array_join_bug/ast.json new file mode 100644 index 000000000..aedb9e860 --- /dev/null +++ b/parser/testdata/02811_read_in_order_and_array_join_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_array_joins (children 1)" + }, + { + "explain": " Identifier test_array_joins" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001012663, + "rows_read": 2, + "bytes_read": 84 + } +} diff --git a/parser/testdata/02811_read_in_order_and_array_join_bug/metadata.json b/parser/testdata/02811_read_in_order_and_array_join_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02811_read_in_order_and_array_join_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02811_read_in_order_and_array_join_bug/query.sql b/parser/testdata/02811_read_in_order_and_array_join_bug/query.sql new file mode 100644 index 000000000..202f52d8b --- /dev/null +++ b/parser/testdata/02811_read_in_order_and_array_join_bug/query.sql @@ -0,0 +1,17 @@ +drop table if exists test_array_joins; +drop table if exists v4test_array_joins; +create table test_array_joins +( + id UInt64 default rowNumberInAllBlocks() + 1, + arr_1 Array(String), + arr_2 Array(String), + arr_3 Array(String), + arr_4 Array(String) +) engine = MergeTree order by id; + +insert into test_array_joins (id,arr_1, arr_2, arr_3, arr_4) +SELECT number,array(randomPrintableASCII(3)),array(randomPrintableASCII(3)),array(randomPrintableASCII(3)),array(randomPrintableASCII(3)) +from numbers(1000); +create view v4test_array_joins as SELECT * from test_array_joins where id != 10; +select * from v4test_array_joins array join arr_1, arr_2, arr_3, arr_4 where match(arr_4,'a') and id < 100 order by id format Null settings optimize_read_in_order = 1; + diff --git a/parser/testdata/02812_bug_with_unused_join_columns/ast.json b/parser/testdata/02812_bug_with_unused_join_columns/ast.json new file mode 100644 index 000000000..9914f13ad --- /dev/null +++ b/parser/testdata/02812_bug_with_unused_join_columns/ast.json @@ -0,0 +1,82 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function concat (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier func.name" + }, + { + "explain": " Identifier comb.name" + }, + { + "explain": " TablesInSelectQuery (children 2)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.functions (alias func)" + }, + { + "explain": " TablesInSelectQueryElement (children 2)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.aggregate_function_combinators (alias comb)" + }, + { + "explain": " TableJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier name" + }, + { + "explain": " Identifier is_aggregate" + }, + { + "explain": " Set" + } + ], + + "rows": 20, + + "statistics": + { + "elapsed": 0.001040841, + "rows_read": 20, + "bytes_read": 827 + } +} diff --git a/parser/testdata/02812_bug_with_unused_join_columns/metadata.json b/parser/testdata/02812_bug_with_unused_join_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02812_bug_with_unused_join_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02812_bug_with_unused_join_columns/query.sql b/parser/testdata/02812_bug_with_unused_join_columns/query.sql new file mode 100644 index 000000000..d791b8f33 --- /dev/null +++ b/parser/testdata/02812_bug_with_unused_join_columns/query.sql @@ -0,0 +1 @@ +SELECT concat(func.name, comb.name) AS x FROM system.functions AS func JOIN system.aggregate_function_combinators AS comb using name WHERE is_aggregate settings enable_analyzer=1; diff --git a/parser/testdata/02812_csv_date_time_with_comma/ast.json b/parser/testdata/02812_csv_date_time_with_comma/ast.json new file mode 100644 index 000000000..6349e23d6 --- /dev/null +++ b/parser/testdata/02812_csv_date_time_with_comma/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function format (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Identifier CSV" + }, + { + "explain": " Literal 'c1 DateTime, c2 String'" + }, + { + "explain": " Literal '01-01-2000,abc'" + }, + { + "explain": " Set" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001048616, + "rows_read": 14, + "bytes_read": 520 + } +} diff --git a/parser/testdata/02812_csv_date_time_with_comma/metadata.json b/parser/testdata/02812_csv_date_time_with_comma/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02812_csv_date_time_with_comma/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02812_csv_date_time_with_comma/query.sql b/parser/testdata/02812_csv_date_time_with_comma/query.sql new file mode 100644 index 000000000..ecd3cff6a --- /dev/null +++ b/parser/testdata/02812_csv_date_time_with_comma/query.sql @@ -0,0 +1,3 @@ +select * from format(CSV, 'c1 DateTime, c2 String', '01-01-2000,abc') settings date_time_input_format='best_effort'; +select * from format(CSV, 'c1 DateTime64(3), c2 String', '01-01-2000,abc') settings date_time_input_format='best_effort'; + diff --git a/parser/testdata/02812_from_to_utc_timestamp/ast.json b/parser/testdata/02812_from_to_utc_timestamp/ast.json new file mode 100644 index 000000000..0709d7d1e --- /dev/null +++ b/parser/testdata/02812_from_to_utc_timestamp/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_tbl (children 1)" + }, + { + "explain": " Identifier test_tbl" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000990607, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/02812_from_to_utc_timestamp/metadata.json b/parser/testdata/02812_from_to_utc_timestamp/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02812_from_to_utc_timestamp/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02812_from_to_utc_timestamp/query.sql b/parser/testdata/02812_from_to_utc_timestamp/query.sql new file mode 100644 index 000000000..64fa208e1 --- /dev/null +++ b/parser/testdata/02812_from_to_utc_timestamp/query.sql @@ -0,0 +1,37 @@ +DROP TABLE IF EXISTS test_tbl; +CREATE TABLE test_tbl (x UInt32, y DateTime, z DateTime64) engine=MergeTree ORDER BY x; +INSERT INTO test_tbl values(1, '2023-03-16', '2023-03-16 11:22:33'); +INSERT INTO test_tbl values(2, '2023-03-16 11:22:33', '2023-03-16'); +INSERT INTO test_tbl values(3, '2023-03-16 11:22:33', '2023-03-16 11:22:33.123456'); +SELECT x, to_utc_timestamp(toDateTime('2023-03-16 11:22:33'), 'Etc/GMT+1'), from_utc_timestamp(toDateTime64('2023-03-16 11:22:33', 3), 'Etc/GMT+1'), to_utc_timestamp(y, 'Asia/Shanghai'), from_utc_timestamp(z, 'Asia/Shanghai') from test_tbl order by x; +-- timestamp convert between DST timezone and UTC +SELECT to_utc_timestamp(toDateTime('2024-02-24 11:22:33'), 'Europe/Madrid'), from_utc_timestamp(toDateTime('2024-02-24 11:22:33'), 'Europe/Madrid') SETTINGS session_timezone='Europe/Moscow'; +SELECT to_utc_timestamp(toDateTime('2024-10-24 11:22:33'), 'Europe/Madrid'), from_utc_timestamp(toDateTime('2024-10-24 11:22:33'), 'Europe/Madrid') SETTINGS session_timezone='Europe/Moscow'; +SELECT to_utc_timestamp(toDateTime('2024-10-24 11:22:33'), 'EST'), from_utc_timestamp(toDateTime('2024-10-24 11:22:33'), 'EST') SETTINGS session_timezone='Europe/Moscow'; + +SELECT 'leap year:', to_utc_timestamp(toDateTime('2024-02-29 11:22:33'), 'EST'), from_utc_timestamp(toDateTime('2024-02-29 11:22:33'), 'EST') SETTINGS session_timezone='Europe/Moscow'; +SELECT 'non-leap year:', to_utc_timestamp(toDateTime('2023-02-29 11:22:33'), 'EST'), from_utc_timestamp(toDateTime('2023-02-29 11:22:33'), 'EST') SETTINGS session_timezone='Europe/Moscow'; +SELECT 'leap year:', to_utc_timestamp(toDateTime('2024-02-28 23:22:33'), 'EST'), from_utc_timestamp(toDateTime('2024-03-01 00:22:33'), 'EST') SETTINGS session_timezone='Europe/Moscow'; +SELECT 'non-leap year:', to_utc_timestamp(toDateTime('2023-02-28 23:22:33'), 'EST'), from_utc_timestamp(toDateTime('2023-03-01 00:22:33'), 'EST') SETTINGS session_timezone='Europe/Moscow'; +SELECT 'timezone with half-hour offset:', to_utc_timestamp(toDateTime('2024-02-29 11:22:33'), 'Australia/Adelaide'), from_utc_timestamp(toDateTime('2024-02-29 11:22:33'), 'Australia/Adelaide') SETTINGS session_timezone='Europe/Moscow'; +SELECT 'jump over a year:', to_utc_timestamp(toDateTime('2023-12-31 23:01:01'), 'EST'), from_utc_timestamp(toDateTime('2024-01-01 01:01:01'), 'EST') SETTINGS session_timezone='Europe/Moscow'; + +-- Test cases for dates before Unix epoch (1970-01-01) +SELECT 'before epoch 1:', to_utc_timestamp(toDateTime('1969-12-31 23:59:59'), 'UTC'), from_utc_timestamp(toDateTime('1969-12-31 23:59:59'), 'UTC') SETTINGS session_timezone='UTC'; +SELECT 'before epoch 2:', to_utc_timestamp(toDateTime('1969-01-01 00:00:00'), 'UTC'), from_utc_timestamp(toDateTime('1969-01-01 00:00:00'), 'UTC') SETTINGS session_timezone='UTC'; +SELECT 'before epoch 3:', to_utc_timestamp(toDateTime('1900-01-01 00:00:00'), 'UTC'), from_utc_timestamp(toDateTime('1900-01-01 00:00:00'), 'UTC') SETTINGS session_timezone='UTC'; + +-- Test cases for dates after maximum date (2106-02-07 06:28:15) +SELECT 'after max 1:', to_utc_timestamp(toDateTime('2106-02-07 06:28:16'), 'UTC'), from_utc_timestamp(toDateTime('2106-02-07 06:28:16'), 'UTC') SETTINGS session_timezone='UTC'; +SELECT 'after max 2:', to_utc_timestamp(toDateTime('2106-02-08 00:00:00'), 'UTC'), from_utc_timestamp(toDateTime('2106-02-08 00:00:00'), 'UTC') SETTINGS session_timezone='UTC'; +SELECT 'after max 3:', to_utc_timestamp(toDateTime('2107-01-01 00:00:00'), 'UTC'), from_utc_timestamp(toDateTime('2107-01-01 00:00:00'), 'UTC') SETTINGS session_timezone='UTC'; + +-- Test cases for dates before epoch with different timezones +SELECT 'before epoch with timezone 1:', to_utc_timestamp(toDateTime('1969-12-31 23:59:59'), 'America/New_York'), from_utc_timestamp(toDateTime('1969-12-31 23:59:59'), 'America/New_York') SETTINGS session_timezone='UTC'; +SELECT 'before epoch with timezone 2:', to_utc_timestamp(toDateTime('1969-12-31 23:59:59'), 'Asia/Tokyo'), from_utc_timestamp(toDateTime('1969-12-31 23:59:59'), 'Asia/Tokyo') SETTINGS session_timezone='UTC'; + +-- Test cases for dates after max with different timezones +SELECT 'after max with timezone 1:', to_utc_timestamp(toDateTime('2106-02-07 06:28:16'), 'America/New_York'), from_utc_timestamp(toDateTime('2106-02-07 06:28:16'), 'America/New_York') SETTINGS session_timezone='UTC'; +SELECT 'after max with timezone 2:', to_utc_timestamp(toDateTime('2106-02-07 06:28:16'), 'Asia/Tokyo'), from_utc_timestamp(toDateTime('2106-02-07 06:28:16'), 'Asia/Tokyo') SETTINGS session_timezone='UTC'; + +DROP TABLE test_tbl; diff --git a/parser/testdata/02812_large_varints/ast.json b/parser/testdata/02812_large_varints/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02812_large_varints/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02812_large_varints/metadata.json b/parser/testdata/02812_large_varints/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02812_large_varints/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02812_large_varints/query.sql b/parser/testdata/02812_large_varints/query.sql new file mode 100644 index 000000000..cfbebb729 --- /dev/null +++ b/parser/testdata/02812_large_varints/query.sql @@ -0,0 +1,4 @@ +-- 64-bit integers with MSB set (i.e. values > (1ULL<<63) - 1) could for historical/compat reasons not be serialized as var-ints (issue #51486). +-- These two queries internally produce such big values, run them to be sure no bad things happen. +SELECT topKWeightedState(65535)(now(), -2) FORMAT Null; +SELECT number FROM numbers(toUInt64(-1)) limit 10 Format Null; diff --git a/parser/testdata/02812_pointwise_array_operations/ast.json b/parser/testdata/02812_pointwise_array_operations/ast.json new file mode 100644 index 000000000..cadfaf031 --- /dev/null +++ b/parser/testdata/02812_pointwise_array_operations/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function plus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_1]" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_4]" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001179026, + "rows_read": 12, + "bytes_read": 505 + } +} diff --git a/parser/testdata/02812_pointwise_array_operations/metadata.json b/parser/testdata/02812_pointwise_array_operations/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02812_pointwise_array_operations/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02812_pointwise_array_operations/query.sql b/parser/testdata/02812_pointwise_array_operations/query.sql new file mode 100644 index 000000000..c10332e4a --- /dev/null +++ b/parser/testdata/02812_pointwise_array_operations/query.sql @@ -0,0 +1,18 @@ +SELECT (materialize([1,1]) + materialize([1,4])); +SELECT ([1,2] + [1,4]); +SELECT ([2.5, 1, 3, 10.1] + [2, 4, 9, 0]); +SELECT ([(1,3), (2,9)] + [(10.1, 2.4), (4,12)]); +SELECT ([[1,1],[2]]+[[12,1],[1]]); +SELECT ([1,2]+[1,number]) from numbers(5); +SELECT ([1,2::UInt64]+[1,number]) from numbers(5); +SELECT ([materialize(1),materialize(2),materialize(3)]-[1,2,3]); +SELECT [(NULL, 256), (NULL, 256)] + [(1., 100000000000000000000.), (NULL, 1048577)]; +SELECT ([1,2::UInt64]+[1,number]) from numbers(5); +CREATE TABLE my_table (values Array(Int32)) ENGINE = MergeTree() ORDER BY values; +INSERT INTO my_table (values) VALUES ([12, 3, 1]); +SELECT values - [1,2,3] FROM my_table WHERE arrayExists(x -> x > 5, values); +SELECT ([12,13] % [5,6]); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT ([2,3,4]-[1,-2,10,29]); -- { serverError SIZES_OF_ARRAYS_DONT_MATCH } +CREATE TABLE a ( x Array(UInt64), y Array(UInt64)) ENGINE = Memory; +INSERT INTO a VALUES ([2,3],[4,5]),([1,2,3], [4,5]),([6,7],[8,9,10]); +SELECT x, y, x+y FROM a; -- { serverError SIZES_OF_ARRAYS_DONT_MATCH } diff --git a/parser/testdata/02812_subquery_operators/ast.json b/parser/testdata/02812_subquery_operators/ast.json new file mode 100644 index 000000000..a273a21e0 --- /dev/null +++ b/parser/testdata/02812_subquery_operators/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function singleValueOrNull (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toNullable (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal ''" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001499033, + "rows_read": 9, + "bytes_read": 354 + } +} diff --git a/parser/testdata/02812_subquery_operators/metadata.json b/parser/testdata/02812_subquery_operators/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02812_subquery_operators/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02812_subquery_operators/query.sql b/parser/testdata/02812_subquery_operators/query.sql new file mode 100644 index 000000000..b0638b43e --- /dev/null +++ b/parser/testdata/02812_subquery_operators/query.sql @@ -0,0 +1,6 @@ +SELECT singleValueOrNull(toNullable('')); +SELECT singleValueOrNull(toNullable('Hello')); +SELECT singleValueOrNull((SELECT 'Hello')); +SELECT singleValueOrNull(toNullable(123)); +SELECT '' = ALL (SELECT toNullable('')); +SELECT '', ['\0'], [], singleValueOrNull(( SELECT '\0' ) ), ['']; diff --git a/parser/testdata/02813_any_value/ast.json b/parser/testdata/02813_any_value/ast.json new file mode 100644 index 000000000..e4235e13e --- /dev/null +++ b/parser/testdata/02813_any_value/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001368149, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02813_any_value/metadata.json b/parser/testdata/02813_any_value/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02813_any_value/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02813_any_value/query.sql b/parser/testdata/02813_any_value/query.sql new file mode 100644 index 000000000..6bd2b66fd --- /dev/null +++ b/parser/testdata/02813_any_value/query.sql @@ -0,0 +1,3 @@ +SET max_block_size = 10, max_threads = 1; +select any_value(number) from numbers(10); +select aNy_VaLue(number) from numbers(10); diff --git a/parser/testdata/02813_array_agg/ast.json b/parser/testdata/02813_array_agg/ast.json new file mode 100644 index 000000000..a4130629f --- /dev/null +++ b/parser/testdata/02813_array_agg/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001206057, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/02813_array_agg/metadata.json b/parser/testdata/02813_array_agg/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02813_array_agg/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02813_array_agg/query.sql b/parser/testdata/02813_array_agg/query.sql new file mode 100644 index 000000000..91d8d0774 --- /dev/null +++ b/parser/testdata/02813_array_agg/query.sql @@ -0,0 +1,10 @@ +drop table if exists t; +create table t (n Int32, s String) engine=MergeTree order by n; + +insert into t select number, 'hello, world!' from numbers (5); + +select array_agg(s) from t; + +select aRray_Agg(s) from t group by n; + +drop table t; diff --git a/parser/testdata/02813_array_concat_agg/ast.json b/parser/testdata/02813_array_concat_agg/ast.json new file mode 100644 index 000000000..8956353dc --- /dev/null +++ b/parser/testdata/02813_array_concat_agg/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001288685, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/02813_array_concat_agg/metadata.json b/parser/testdata/02813_array_concat_agg/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02813_array_concat_agg/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02813_array_concat_agg/query.sql b/parser/testdata/02813_array_concat_agg/query.sql new file mode 100644 index 000000000..94fe133db --- /dev/null +++ b/parser/testdata/02813_array_concat_agg/query.sql @@ -0,0 +1,9 @@ +drop table if exists t; + +create table t (n UInt32, a Array(Int32)) engine=Memory; +insert into t values (1, [1,2,3]), (2, [4,5]), (3, [6]); + +select array_concat_agg(a) from t; +select ArrAy_cOncAt_aGg(a) from t; +select n, array_concat_agg(a) from t group by n order by n; +drop table t; diff --git a/parser/testdata/02813_create_index_noop/ast.json b/parser/testdata/02813_create_index_noop/ast.json new file mode 100644 index 000000000..e505b647a --- /dev/null +++ b/parser/testdata/02813_create_index_noop/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001321869, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02813_create_index_noop/metadata.json b/parser/testdata/02813_create_index_noop/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02813_create_index_noop/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02813_create_index_noop/query.sql b/parser/testdata/02813_create_index_noop/query.sql new file mode 100644 index 000000000..0f32dc6bd --- /dev/null +++ b/parser/testdata/02813_create_index_noop/query.sql @@ -0,0 +1,1000 @@ +SET allow_create_index_without_type=0; +CREATE INDEX idx_tab1_0 on tab1 (col0); -- { serverError INCORRECT_QUERY } +SET allow_create_index_without_type=1; +CREATE INDEX idx_tab1_0 on tab1 (col0); +CREATE INDEX idx_tab1_1 on tab1 (col1); +CREATE INDEX idx_tab1_3 on tab1 (col3); +CREATE INDEX idx_tab1_4 on tab1 (col4); +CREATE INDEX idx_tab2_0 ON tab2 (col0 DESC); +CREATE INDEX idx_tab2_0 ON tab2 (col0 DESC,col1 DESC); +CREATE INDEX idx_tab2_0 ON tab2 (col0 DESC,col1 DESC,col4 DESC); +CREATE INDEX idx_tab2_0 ON tab2 (col0 DESC,col1); +CREATE INDEX idx_tab2_0 ON tab2 (col0 DESC,col3); +CREATE INDEX idx_tab2_0 ON tab2 (col0 DESC,col4 DESC); +CREATE INDEX idx_tab2_0 ON tab2 (col0 DESC,col4); +CREATE INDEX idx_tab2_0 ON tab2 (col0); +CREATE INDEX idx_tab2_0 ON tab2 (col0,col1 DESC); +CREATE INDEX idx_tab2_0 ON tab2 (col0,col1 DESC,col3); +CREATE INDEX idx_tab2_0 ON tab2 (col0,col1); +CREATE INDEX idx_tab2_0 ON tab2 (col0,col3 DESC); +CREATE INDEX idx_tab2_0 ON tab2 (col0,col3); +CREATE INDEX idx_tab2_0 ON tab2 (col0,col3,col1 DESC); +CREATE INDEX idx_tab2_0 ON tab2 (col0,col3,col4 DESC); +CREATE INDEX idx_tab2_0 ON tab2 (col0,col4 DESC); +CREATE INDEX idx_tab2_0 ON tab2 (col0,col4); +CREATE INDEX idx_tab2_0 ON tab2 (col0,col4,col3 DESC); +CREATE INDEX idx_tab2_0 ON tab2 (col1 DESC); +CREATE INDEX idx_tab2_0 ON tab2 (col1 DESC,col0 DESC); +CREATE INDEX idx_tab2_0 ON tab2 (col1 DESC,col0); +CREATE INDEX idx_tab2_0 ON tab2 (col1 DESC,col0,col3 DESC); +CREATE INDEX idx_tab2_0 ON tab2 (col1 DESC,col0,col4 DESC); +CREATE INDEX idx_tab2_0 ON tab2 (col1 DESC,col3 DESC); +CREATE INDEX idx_tab2_0 ON tab2 (col1 DESC,col3 DESC,col0); +CREATE INDEX idx_tab2_0 ON tab2 (col1 DESC,col3); +CREATE INDEX idx_tab2_0 ON tab2 (col1 DESC,col4 DESC); +CREATE INDEX idx_tab2_0 ON tab2 (col1 DESC,col4); +CREATE INDEX idx_tab2_0 ON tab2 (col1); +CREATE INDEX idx_tab2_0 ON tab2 (col1,col0 DESC); +CREATE INDEX idx_tab2_0 ON tab2 (col1,col0 DESC,col4 DESC); +CREATE INDEX idx_tab2_0 ON tab2 (col1,col0,col4 DESC); +CREATE INDEX idx_tab2_0 ON tab2 (col1,col3 DESC); +CREATE INDEX idx_tab2_0 ON tab2 (col1,col3); +CREATE INDEX idx_tab2_0 ON tab2 (col1,col4 DESC); +CREATE INDEX idx_tab2_0 ON tab2 (col1,col4 DESC,col0 DESC); +CREATE INDEX idx_tab2_0 ON tab2 (col1,col4 DESC,col3); +CREATE INDEX idx_tab2_0 ON tab2 (col1,col4); +CREATE INDEX idx_tab2_0 ON tab2 (col3 DESC); +CREATE INDEX idx_tab2_0 ON tab2 (col3 DESC,col0 DESC); +CREATE INDEX idx_tab2_0 ON tab2 (col3 DESC,col0 DESC,col4 DESC); +CREATE INDEX idx_tab2_0 ON tab2 (col3 DESC,col0); +CREATE INDEX idx_tab2_0 ON tab2 (col3 DESC,col0,col1); +CREATE INDEX idx_tab2_0 ON tab2 (col3 DESC,col0,col4); +CREATE INDEX idx_tab2_0 ON tab2 (col3 DESC,col1 DESC); +CREATE INDEX idx_tab2_0 ON tab2 (col3 DESC,col1); +CREATE INDEX idx_tab2_0 ON tab2 (col3 DESC,col1,col4); +CREATE INDEX idx_tab2_0 ON tab2 (col3 DESC,col4 DESC); +CREATE INDEX idx_tab2_0 ON tab2 (col3 DESC,col4 DESC,col1 DESC); +CREATE INDEX idx_tab2_0 ON tab2 (col3 DESC,col4 DESC,col1); +CREATE INDEX idx_tab2_0 ON tab2 (col3 DESC,col4); +CREATE INDEX idx_tab2_0 ON tab2 (col3); +CREATE INDEX idx_tab2_0 ON tab2 (col3,col0 DESC); +CREATE INDEX idx_tab2_0 ON tab2 (col3,col0 DESC,col1 DESC); +CREATE INDEX idx_tab2_0 ON tab2 (col3,col0 DESC,col1); +CREATE INDEX idx_tab2_0 ON tab2 (col3,col0); +CREATE INDEX idx_tab2_0 ON tab2 (col3,col1 DESC); +CREATE INDEX idx_tab2_0 ON tab2 (col3,col1 DESC,col0 DESC); +CREATE INDEX idx_tab2_0 ON tab2 (col3,col1); +CREATE INDEX idx_tab2_0 ON tab2 (col3,col4 DESC); +CREATE INDEX idx_tab2_0 ON tab2 (col3,col4); +CREATE INDEX idx_tab2_0 ON tab2 (col4 DESC); +CREATE INDEX idx_tab2_0 ON tab2 (col4 DESC,col0 DESC); +CREATE INDEX idx_tab2_0 ON tab2 (col4 DESC,col0); +CREATE INDEX idx_tab2_0 ON tab2 (col4 DESC,col0,col1 DESC); +CREATE INDEX idx_tab2_0 ON tab2 (col4 DESC,col0,col1); +CREATE INDEX idx_tab2_0 ON tab2 (col4 DESC,col1 DESC); +CREATE INDEX idx_tab2_0 ON tab2 (col4 DESC,col1 DESC,col0 DESC); +CREATE INDEX idx_tab2_0 ON tab2 (col4 DESC,col1); +CREATE INDEX idx_tab2_0 ON tab2 (col4 DESC,col3 DESC); +CREATE INDEX idx_tab2_0 ON tab2 (col4 DESC,col3 DESC,col0); +CREATE INDEX idx_tab2_0 ON tab2 (col4 DESC,col3); +CREATE INDEX idx_tab2_0 ON tab2 (col4); +CREATE INDEX idx_tab2_0 ON tab2 (col4,col0 DESC); +CREATE INDEX idx_tab2_0 ON tab2 (col4,col0 DESC,col1); +CREATE INDEX idx_tab2_0 ON tab2 (col4,col0); +CREATE INDEX idx_tab2_0 ON tab2 (col4,col1 DESC); +CREATE INDEX idx_tab2_0 ON tab2 (col4,col1 DESC,col3 DESC); +CREATE INDEX idx_tab2_0 ON tab2 (col4,col1 DESC,col3); +CREATE INDEX idx_tab2_0 ON tab2 (col4,col1); +CREATE INDEX idx_tab2_0 ON tab2 (col4,col3 DESC); +CREATE INDEX idx_tab2_0 ON tab2 (col4,col3 DESC,col0 DESC); +CREATE INDEX idx_tab2_0 ON tab2 (col4,col3); +CREATE INDEX idx_tab2_1 ON tab2 (col0 DESC); +CREATE INDEX idx_tab2_1 ON tab2 (col0 DESC,col1 DESC); +CREATE INDEX idx_tab2_1 ON tab2 (col0 DESC,col1); +CREATE INDEX idx_tab2_1 ON tab2 (col0 DESC,col3 DESC,col4); +CREATE INDEX idx_tab2_1 ON tab2 (col0 DESC,col3); +CREATE INDEX idx_tab2_1 ON tab2 (col0 DESC,col3,col1); +CREATE INDEX idx_tab2_1 ON tab2 (col0 DESC,col4 DESC); +CREATE INDEX idx_tab2_1 ON tab2 (col0 DESC,col4); +CREATE INDEX idx_tab2_1 ON tab2 (col0); +CREATE INDEX idx_tab2_1 ON tab2 (col0,col1 DESC); +CREATE INDEX idx_tab2_1 ON tab2 (col0,col1); +CREATE INDEX idx_tab2_1 ON tab2 (col0,col1,col4 DESC); +CREATE INDEX idx_tab2_1 ON tab2 (col0,col3 DESC); +CREATE INDEX idx_tab2_1 ON tab2 (col0,col3 DESC,col1); +CREATE INDEX idx_tab2_1 ON tab2 (col0,col3 DESC,col4); +CREATE INDEX idx_tab2_1 ON tab2 (col0,col3); +CREATE INDEX idx_tab2_1 ON tab2 (col0,col4 DESC); +CREATE INDEX idx_tab2_1 ON tab2 (col0,col4); +CREATE INDEX idx_tab2_1 ON tab2 (col1 DESC); +CREATE INDEX idx_tab2_1 ON tab2 (col1 DESC,col0 DESC); +CREATE INDEX idx_tab2_1 ON tab2 (col1 DESC,col0 DESC,col3); +CREATE INDEX idx_tab2_1 ON tab2 (col1 DESC,col0); +CREATE INDEX idx_tab2_1 ON tab2 (col1 DESC,col3 DESC); +CREATE INDEX idx_tab2_1 ON tab2 (col1 DESC,col3); +CREATE INDEX idx_tab2_1 ON tab2 (col1 DESC,col4 DESC); +CREATE INDEX idx_tab2_1 ON tab2 (col1 DESC,col4 DESC,col3,col0 DESC); +CREATE INDEX idx_tab2_1 ON tab2 (col1); +CREATE INDEX idx_tab2_1 ON tab2 (col1,col0 DESC); +CREATE INDEX idx_tab2_1 ON tab2 (col1,col0); +CREATE INDEX idx_tab2_1 ON tab2 (col1,col3 DESC); +CREATE INDEX idx_tab2_1 ON tab2 (col1,col3 DESC,col0 DESC); +CREATE INDEX idx_tab2_1 ON tab2 (col1,col3); +CREATE INDEX idx_tab2_1 ON tab2 (col1,col3,col4); +CREATE INDEX idx_tab2_1 ON tab2 (col1,col4); +CREATE INDEX idx_tab2_1 ON tab2 (col1,col4,col3); +CREATE INDEX idx_tab2_1 ON tab2 (col3 DESC); +CREATE INDEX idx_tab2_1 ON tab2 (col3 DESC,col0 DESC); +CREATE INDEX idx_tab2_1 ON tab2 (col3 DESC,col0 DESC,col1); +CREATE INDEX idx_tab2_1 ON tab2 (col3 DESC,col0 DESC,col4 DESC,col1); +CREATE INDEX idx_tab2_1 ON tab2 (col3 DESC,col0); +CREATE INDEX idx_tab2_1 ON tab2 (col3 DESC,col0,col1 DESC); +CREATE INDEX idx_tab2_1 ON tab2 (col3 DESC,col1 DESC); +CREATE INDEX idx_tab2_1 ON tab2 (col3 DESC,col1); +CREATE INDEX idx_tab2_1 ON tab2 (col3 DESC,col4 DESC); +CREATE INDEX idx_tab2_1 ON tab2 (col3); +CREATE INDEX idx_tab2_1 ON tab2 (col3,col0 DESC); +CREATE INDEX idx_tab2_1 ON tab2 (col3,col0); +CREATE INDEX idx_tab2_1 ON tab2 (col3,col1 DESC); +CREATE INDEX idx_tab2_1 ON tab2 (col3,col1); +CREATE INDEX idx_tab2_1 ON tab2 (col3,col4 DESC); +CREATE INDEX idx_tab2_1 ON tab2 (col3,col4 DESC,col0 DESC); +CREATE INDEX idx_tab2_1 ON tab2 (col3,col4); +CREATE INDEX idx_tab2_1 ON tab2 (col4 DESC); +CREATE INDEX idx_tab2_1 ON tab2 (col4 DESC,col0); +CREATE INDEX idx_tab2_1 ON tab2 (col4 DESC,col1 DESC); +CREATE INDEX idx_tab2_1 ON tab2 (col4 DESC,col1); +CREATE INDEX idx_tab2_1 ON tab2 (col4 DESC,col3 DESC,col1 DESC); +CREATE INDEX idx_tab2_1 ON tab2 (col4 DESC,col3); +CREATE INDEX idx_tab2_1 ON tab2 (col4); +CREATE INDEX idx_tab2_1 ON tab2 (col4,col0 DESC); +CREATE INDEX idx_tab2_1 ON tab2 (col4,col0); +CREATE INDEX idx_tab2_1 ON tab2 (col4,col1 DESC); +CREATE INDEX idx_tab2_1 ON tab2 (col4,col1); +CREATE INDEX idx_tab2_1 ON tab2 (col4,col3 DESC); +CREATE INDEX idx_tab2_1 ON tab2 (col4,col3 DESC,col1); +CREATE INDEX idx_tab2_2 ON tab2 (col0 DESC); +CREATE INDEX idx_tab2_2 ON tab2 (col0 DESC,col1 DESC); +CREATE INDEX idx_tab2_2 ON tab2 (col0 DESC,col1); +CREATE INDEX idx_tab2_2 ON tab2 (col0 DESC,col3); +CREATE INDEX idx_tab2_2 ON tab2 (col0 DESC,col3,col1); +CREATE INDEX idx_tab2_2 ON tab2 (col0 DESC,col4 DESC); +CREATE INDEX idx_tab2_2 ON tab2 (col0 DESC,col4); +CREATE INDEX idx_tab2_2 ON tab2 (col0); +CREATE INDEX idx_tab2_2 ON tab2 (col0,col1 DESC,col3); +CREATE INDEX idx_tab2_2 ON tab2 (col0,col1); +CREATE INDEX idx_tab2_2 ON tab2 (col0,col3); +CREATE INDEX idx_tab2_2 ON tab2 (col0,col4 DESC); +CREATE INDEX idx_tab2_2 ON tab2 (col0,col4 DESC,col1 DESC); +CREATE INDEX idx_tab2_2 ON tab2 (col0,col4 DESC,col3 DESC); +CREATE INDEX idx_tab2_2 ON tab2 (col0,col4); +CREATE INDEX idx_tab2_2 ON tab2 (col1 DESC); +CREATE INDEX idx_tab2_2 ON tab2 (col1 DESC,col0 DESC); +CREATE INDEX idx_tab2_2 ON tab2 (col1 DESC,col0 DESC,col3 DESC); +CREATE INDEX idx_tab2_2 ON tab2 (col1 DESC,col0); +CREATE INDEX idx_tab2_2 ON tab2 (col1 DESC,col0,col3 DESC); +CREATE INDEX idx_tab2_2 ON tab2 (col1 DESC,col0,col4); +CREATE INDEX idx_tab2_2 ON tab2 (col1 DESC,col3 DESC); +CREATE INDEX idx_tab2_2 ON tab2 (col1 DESC,col3 DESC,col0); +CREATE INDEX idx_tab2_2 ON tab2 (col1 DESC,col3); +CREATE INDEX idx_tab2_2 ON tab2 (col1 DESC,col4 DESC); +CREATE INDEX idx_tab2_2 ON tab2 (col1 DESC,col4); +CREATE INDEX idx_tab2_2 ON tab2 (col1); +CREATE INDEX idx_tab2_2 ON tab2 (col1,col0 DESC); +CREATE INDEX idx_tab2_2 ON tab2 (col1,col0); +CREATE INDEX idx_tab2_2 ON tab2 (col1,col3 DESC); +CREATE INDEX idx_tab2_2 ON tab2 (col1,col3,col0); +CREATE INDEX idx_tab2_2 ON tab2 (col1,col4 DESC); +CREATE INDEX idx_tab2_2 ON tab2 (col1,col4); +CREATE INDEX idx_tab2_2 ON tab2 (col3 DESC); +CREATE INDEX idx_tab2_2 ON tab2 (col3 DESC,col0 DESC); +CREATE INDEX idx_tab2_2 ON tab2 (col3 DESC,col0); +CREATE INDEX idx_tab2_2 ON tab2 (col3 DESC,col1); +CREATE INDEX idx_tab2_2 ON tab2 (col3 DESC,col4 DESC); +CREATE INDEX idx_tab2_2 ON tab2 (col3 DESC,col4); +CREATE INDEX idx_tab2_2 ON tab2 (col3); +CREATE INDEX idx_tab2_2 ON tab2 (col3,col0 DESC); +CREATE INDEX idx_tab2_2 ON tab2 (col3,col0); +CREATE INDEX idx_tab2_2 ON tab2 (col3,col1 DESC); +CREATE INDEX idx_tab2_2 ON tab2 (col3,col1 DESC,col0); +CREATE INDEX idx_tab2_2 ON tab2 (col3,col1); +CREATE INDEX idx_tab2_2 ON tab2 (col3,col4); +CREATE INDEX idx_tab2_2 ON tab2 (col4 DESC); +CREATE INDEX idx_tab2_2 ON tab2 (col4 DESC,col0 DESC); +CREATE INDEX idx_tab2_2 ON tab2 (col4 DESC,col1 DESC); +CREATE INDEX idx_tab2_2 ON tab2 (col4 DESC,col1); +CREATE INDEX idx_tab2_2 ON tab2 (col4 DESC,col3 DESC); +CREATE INDEX idx_tab2_2 ON tab2 (col4 DESC,col3); +CREATE INDEX idx_tab2_2 ON tab2 (col4); +CREATE INDEX idx_tab2_2 ON tab2 (col4,col0 DESC); +CREATE INDEX idx_tab2_2 ON tab2 (col4,col0,col3); +CREATE INDEX idx_tab2_2 ON tab2 (col4,col1 DESC,col0); +CREATE INDEX idx_tab2_2 ON tab2 (col4,col1); +CREATE INDEX idx_tab2_2 ON tab2 (col4,col3 DESC); +CREATE INDEX idx_tab2_2 ON tab2 (col4,col3); +CREATE INDEX idx_tab2_3 ON tab2 (col0 DESC); +CREATE INDEX idx_tab2_3 ON tab2 (col0 DESC,col3); +CREATE INDEX idx_tab2_3 ON tab2 (col0 DESC,col4 DESC); +CREATE INDEX idx_tab2_3 ON tab2 (col0 DESC,col4 DESC,col3); +CREATE INDEX idx_tab2_3 ON tab2 (col0); +CREATE INDEX idx_tab2_3 ON tab2 (col0,col1 DESC); +CREATE INDEX idx_tab2_3 ON tab2 (col0,col1); +CREATE INDEX idx_tab2_3 ON tab2 (col0,col3 DESC); +CREATE INDEX idx_tab2_3 ON tab2 (col0,col4 DESC); +CREATE INDEX idx_tab2_3 ON tab2 (col0,col4 DESC,col1 DESC,col3); +CREATE INDEX idx_tab2_3 ON tab2 (col0,col4 DESC,col3); +CREATE INDEX idx_tab2_3 ON tab2 (col0,col4); +CREATE INDEX idx_tab2_3 ON tab2 (col1 DESC); +CREATE INDEX idx_tab2_3 ON tab2 (col1 DESC,col0,col3); +CREATE INDEX idx_tab2_3 ON tab2 (col1 DESC,col3 DESC); +CREATE INDEX idx_tab2_3 ON tab2 (col1 DESC,col3); +CREATE INDEX idx_tab2_3 ON tab2 (col1 DESC,col4 DESC); +CREATE INDEX idx_tab2_3 ON tab2 (col1 DESC,col4 DESC,col0); +CREATE INDEX idx_tab2_3 ON tab2 (col1 DESC,col4 DESC,col3); +CREATE INDEX idx_tab2_3 ON tab2 (col1); +CREATE INDEX idx_tab2_3 ON tab2 (col1,col0 DESC); +CREATE INDEX idx_tab2_3 ON tab2 (col1,col3 DESC); +CREATE INDEX idx_tab2_3 ON tab2 (col1,col3); +CREATE INDEX idx_tab2_3 ON tab2 (col1,col4 DESC); +CREATE INDEX idx_tab2_3 ON tab2 (col1,col4); +CREATE INDEX idx_tab2_3 ON tab2 (col3 DESC); +CREATE INDEX idx_tab2_3 ON tab2 (col3 DESC,col0 DESC); +CREATE INDEX idx_tab2_3 ON tab2 (col3 DESC,col0 DESC,col4); +CREATE INDEX idx_tab2_3 ON tab2 (col3 DESC,col0); +CREATE INDEX idx_tab2_3 ON tab2 (col3 DESC,col1); +CREATE INDEX idx_tab2_3 ON tab2 (col3 DESC,col1,col4); +CREATE INDEX idx_tab2_3 ON tab2 (col3 DESC,col4 DESC); +CREATE INDEX idx_tab2_3 ON tab2 (col3 DESC,col4,col1); +CREATE INDEX idx_tab2_3 ON tab2 (col3); +CREATE INDEX idx_tab2_3 ON tab2 (col3,col0 DESC); +CREATE INDEX idx_tab2_3 ON tab2 (col3,col0); +CREATE INDEX idx_tab2_3 ON tab2 (col3,col1 DESC); +CREATE INDEX idx_tab2_3 ON tab2 (col3,col1 DESC,col4 DESC); +CREATE INDEX idx_tab2_3 ON tab2 (col3,col1); +CREATE INDEX idx_tab2_3 ON tab2 (col3,col4 DESC); +CREATE INDEX idx_tab2_3 ON tab2 (col3,col4); +CREATE INDEX idx_tab2_3 ON tab2 (col4 DESC); +CREATE INDEX idx_tab2_3 ON tab2 (col4 DESC,col0 DESC,col1 DESC); +CREATE INDEX idx_tab2_3 ON tab2 (col4 DESC,col0 DESC,col3 DESC); +CREATE INDEX idx_tab2_3 ON tab2 (col4 DESC,col0,col1 DESC); +CREATE INDEX idx_tab2_3 ON tab2 (col4 DESC,col1 DESC); +CREATE INDEX idx_tab2_3 ON tab2 (col4 DESC,col1); +CREATE INDEX idx_tab2_3 ON tab2 (col4 DESC,col3,col0); +CREATE INDEX idx_tab2_3 ON tab2 (col4); +CREATE INDEX idx_tab2_3 ON tab2 (col4,col0 DESC); +CREATE INDEX idx_tab2_3 ON tab2 (col4,col1 DESC); +CREATE INDEX idx_tab2_3 ON tab2 (col4,col3 DESC); +CREATE INDEX idx_tab2_3 ON tab2 (col4,col3 DESC,col0 DESC); +CREATE INDEX idx_tab2_4 ON tab2 (col0 DESC); +CREATE INDEX idx_tab2_4 ON tab2 (col0 DESC,col1 DESC); +CREATE INDEX idx_tab2_4 ON tab2 (col0 DESC,col1); +CREATE INDEX idx_tab2_4 ON tab2 (col0 DESC,col3 DESC); +CREATE INDEX idx_tab2_4 ON tab2 (col0 DESC,col4 DESC); +CREATE INDEX idx_tab2_4 ON tab2 (col0 DESC,col4 DESC,col3); +CREATE INDEX idx_tab2_4 ON tab2 (col0 DESC,col4); +CREATE INDEX idx_tab2_4 ON tab2 (col0); +CREATE INDEX idx_tab2_4 ON tab2 (col0,col3 DESC); +CREATE INDEX idx_tab2_4 ON tab2 (col0,col3); +CREATE INDEX idx_tab2_4 ON tab2 (col0,col4 DESC); +CREATE INDEX idx_tab2_4 ON tab2 (col1 DESC); +CREATE INDEX idx_tab2_4 ON tab2 (col1 DESC,col3 DESC); +CREATE INDEX idx_tab2_4 ON tab2 (col1 DESC,col4 DESC); +CREATE INDEX idx_tab2_4 ON tab2 (col1); +CREATE INDEX idx_tab2_4 ON tab2 (col1,col0 DESC); +CREATE INDEX idx_tab2_4 ON tab2 (col1,col4 DESC); +CREATE INDEX idx_tab2_4 ON tab2 (col1,col4); +CREATE INDEX idx_tab2_4 ON tab2 (col3 DESC); +CREATE INDEX idx_tab2_4 ON tab2 (col3 DESC,col0 DESC); +CREATE INDEX idx_tab2_4 ON tab2 (col3 DESC,col1); +CREATE INDEX idx_tab2_4 ON tab2 (col3 DESC,col4 DESC); +CREATE INDEX idx_tab2_4 ON tab2 (col3 DESC,col4); +CREATE INDEX idx_tab2_4 ON tab2 (col3); +CREATE INDEX idx_tab2_4 ON tab2 (col3,col0); +CREATE INDEX idx_tab2_4 ON tab2 (col3,col1); +CREATE INDEX idx_tab2_4 ON tab2 (col3,col4 DESC); +CREATE INDEX idx_tab2_4 ON tab2 (col3,col4); +CREATE INDEX idx_tab2_4 ON tab2 (col4 DESC); +CREATE INDEX idx_tab2_4 ON tab2 (col4 DESC,col0 DESC); +CREATE INDEX idx_tab2_4 ON tab2 (col4 DESC,col1); +CREATE INDEX idx_tab2_4 ON tab2 (col4 DESC,col3 DESC); +CREATE INDEX idx_tab2_4 ON tab2 (col4 DESC,col3 DESC,col1 DESC); +CREATE INDEX idx_tab2_4 ON tab2 (col4 DESC,col3); +CREATE INDEX idx_tab2_4 ON tab2 (col4 DESC,col3,col0 DESC,col1 DESC); +CREATE INDEX idx_tab2_4 ON tab2 (col4); +CREATE INDEX idx_tab2_4 ON tab2 (col4,col0 DESC); +CREATE INDEX idx_tab2_4 ON tab2 (col4,col0); +CREATE INDEX idx_tab2_4 ON tab2 (col4,col1 DESC); +CREATE INDEX idx_tab2_4 ON tab2 (col4,col1); +CREATE INDEX idx_tab2_4 ON tab2 (col4,col3); +CREATE INDEX idx_tab2_5 ON tab2 (col0 DESC); +CREATE INDEX idx_tab2_5 ON tab2 (col0 DESC,col3 DESC); +CREATE INDEX idx_tab2_5 ON tab2 (col0 DESC,col3 DESC,col1 DESC); +CREATE INDEX idx_tab2_5 ON tab2 (col0 DESC,col3); +CREATE INDEX idx_tab2_5 ON tab2 (col0); +CREATE INDEX idx_tab2_5 ON tab2 (col0,col1 DESC); +CREATE INDEX idx_tab2_5 ON tab2 (col0,col1); +CREATE INDEX idx_tab2_5 ON tab2 (col0,col4 DESC); +CREATE INDEX idx_tab2_5 ON tab2 (col1 DESC); +CREATE INDEX idx_tab2_5 ON tab2 (col1 DESC,col3 DESC); +CREATE INDEX idx_tab2_5 ON tab2 (col1 DESC,col3); +CREATE INDEX idx_tab2_5 ON tab2 (col1 DESC,col4 DESC); +CREATE INDEX idx_tab2_5 ON tab2 (col1 DESC,col4); +CREATE INDEX idx_tab2_5 ON tab2 (col1); +CREATE INDEX idx_tab2_5 ON tab2 (col1,col0); +CREATE INDEX idx_tab2_5 ON tab2 (col1,col3 DESC); +CREATE INDEX idx_tab2_5 ON tab2 (col1,col3); +CREATE INDEX idx_tab2_5 ON tab2 (col1,col4 DESC); +CREATE INDEX idx_tab2_5 ON tab2 (col3 DESC); +CREATE INDEX idx_tab2_5 ON tab2 (col3 DESC,col0 DESC); +CREATE INDEX idx_tab2_5 ON tab2 (col3 DESC,col1 DESC); +CREATE INDEX idx_tab2_5 ON tab2 (col3 DESC,col1); +CREATE INDEX idx_tab2_5 ON tab2 (col3 DESC,col4 DESC); +CREATE INDEX idx_tab2_5 ON tab2 (col3 DESC,col4,col1 DESC); +CREATE INDEX idx_tab2_5 ON tab2 (col3); +CREATE INDEX idx_tab2_5 ON tab2 (col3,col0); +CREATE INDEX idx_tab2_5 ON tab2 (col4 DESC); +CREATE INDEX idx_tab2_5 ON tab2 (col4 DESC,col0 DESC); +CREATE INDEX idx_tab2_5 ON tab2 (col4 DESC,col1); +CREATE INDEX idx_tab2_5 ON tab2 (col4 DESC,col3 DESC); +CREATE INDEX idx_tab2_5 ON tab2 (col4 DESC,col3); +CREATE INDEX idx_tab2_5 ON tab2 (col4); +CREATE INDEX idx_tab2_5 ON tab2 (col4,col0 DESC); +CREATE INDEX idx_tab2_5 ON tab2 (col4,col0); +CREATE INDEX idx_tab2_5 ON tab2 (col4,col0,col1 DESC); +CREATE INDEX idx_tab2_5 ON tab2 (col4,col1 DESC); +CREATE INDEX idx_tab2_5 ON tab2 (col4,col1); +CREATE INDEX idx_tab3_0 ON tab3 (col0 DESC); +CREATE INDEX idx_tab3_0 ON tab3 (col0 DESC,col1 DESC); +CREATE INDEX idx_tab3_0 ON tab3 (col0 DESC,col1 DESC,col4); +CREATE INDEX idx_tab3_0 ON tab3 (col0 DESC,col1); +CREATE INDEX idx_tab3_0 ON tab3 (col0 DESC,col1,col3); +CREATE INDEX idx_tab3_0 ON tab3 (col0 DESC,col1,col4); +CREATE INDEX idx_tab3_0 ON tab3 (col0 DESC,col3 DESC); +CREATE INDEX idx_tab3_0 ON tab3 (col0 DESC,col3); +CREATE INDEX idx_tab3_0 ON tab3 (col0 DESC,col3,col1 DESC); +CREATE INDEX idx_tab3_0 ON tab3 (col0 DESC,col4 DESC); +CREATE INDEX idx_tab3_0 ON tab3 (col0 DESC,col4 DESC,col1); +CREATE INDEX idx_tab3_0 ON tab3 (col0 DESC,col4 DESC,col3); +CREATE INDEX idx_tab3_0 ON tab3 (col0 DESC,col4); +CREATE INDEX idx_tab3_0 ON tab3 (col0 DESC,col4,col1); +CREATE INDEX idx_tab3_0 ON tab3 (col0); +CREATE INDEX idx_tab3_0 ON tab3 (col0,col1 DESC); +CREATE INDEX idx_tab3_0 ON tab3 (col0,col1 DESC,col3 DESC); +CREATE INDEX idx_tab3_0 ON tab3 (col0,col1); +CREATE INDEX idx_tab3_0 ON tab3 (col0,col3 DESC); +CREATE INDEX idx_tab3_0 ON tab3 (col0,col3); +CREATE INDEX idx_tab3_0 ON tab3 (col0,col4 DESC); +CREATE INDEX idx_tab3_0 ON tab3 (col0,col4 DESC,col1 DESC); +CREATE INDEX idx_tab3_0 ON tab3 (col0,col4 DESC,col3); +CREATE INDEX idx_tab3_0 ON tab3 (col0,col4); +CREATE INDEX idx_tab3_0 ON tab3 (col1 DESC); +CREATE INDEX idx_tab3_0 ON tab3 (col1 DESC,col0 DESC); +CREATE INDEX idx_tab3_0 ON tab3 (col1 DESC,col0 DESC,col4); +CREATE INDEX idx_tab3_0 ON tab3 (col1 DESC,col3 DESC); +CREATE INDEX idx_tab3_0 ON tab3 (col1 DESC,col3); +CREATE INDEX idx_tab3_0 ON tab3 (col1 DESC,col3,col4 DESC); +CREATE INDEX idx_tab3_0 ON tab3 (col1 DESC,col4 DESC); +CREATE INDEX idx_tab3_0 ON tab3 (col1 DESC,col4 DESC,col3 DESC); +CREATE INDEX idx_tab3_0 ON tab3 (col1 DESC,col4); +CREATE INDEX idx_tab3_0 ON tab3 (col1 DESC,col4,col3 DESC,col0 DESC); +CREATE INDEX idx_tab3_0 ON tab3 (col1); +CREATE INDEX idx_tab3_0 ON tab3 (col1,col0 DESC); +CREATE INDEX idx_tab3_0 ON tab3 (col1,col0); +CREATE INDEX idx_tab3_0 ON tab3 (col1,col0,col3); +CREATE INDEX idx_tab3_0 ON tab3 (col1,col3 DESC); +CREATE INDEX idx_tab3_0 ON tab3 (col1,col3 DESC,col0 DESC); +CREATE INDEX idx_tab3_0 ON tab3 (col1,col3 DESC,col4 DESC); +CREATE INDEX idx_tab3_0 ON tab3 (col1,col4 DESC); +CREATE INDEX idx_tab3_0 ON tab3 (col1,col4 DESC,col0 DESC); +CREATE INDEX idx_tab3_0 ON tab3 (col1,col4); +CREATE INDEX idx_tab3_0 ON tab3 (col3 DESC); +CREATE INDEX idx_tab3_0 ON tab3 (col3 DESC,col0 DESC); +CREATE INDEX idx_tab3_0 ON tab3 (col3 DESC,col0 DESC,col1 DESC); +CREATE INDEX idx_tab3_0 ON tab3 (col3 DESC,col0); +CREATE INDEX idx_tab3_0 ON tab3 (col3 DESC,col1 DESC); +CREATE INDEX idx_tab3_0 ON tab3 (col3 DESC,col1); +CREATE INDEX idx_tab3_0 ON tab3 (col3 DESC,col1,col4); +CREATE INDEX idx_tab3_0 ON tab3 (col3 DESC,col4 DESC); +CREATE INDEX idx_tab3_0 ON tab3 (col3 DESC,col4); +CREATE INDEX idx_tab3_0 ON tab3 (col3); +CREATE INDEX idx_tab3_0 ON tab3 (col3,col0 DESC); +CREATE INDEX idx_tab3_0 ON tab3 (col3,col0); +CREATE INDEX idx_tab3_0 ON tab3 (col3,col1 DESC); +CREATE INDEX idx_tab3_0 ON tab3 (col3,col1 DESC,col4); +CREATE INDEX idx_tab3_0 ON tab3 (col3,col1); +CREATE INDEX idx_tab3_0 ON tab3 (col3,col1,col0 DESC,col4); +CREATE INDEX idx_tab3_0 ON tab3 (col3,col4 DESC); +CREATE INDEX idx_tab3_0 ON tab3 (col3,col4); +CREATE INDEX idx_tab3_0 ON tab3 (col4 DESC); +CREATE INDEX idx_tab3_0 ON tab3 (col4 DESC,col0 DESC); +CREATE INDEX idx_tab3_0 ON tab3 (col4 DESC,col0); +CREATE INDEX idx_tab3_0 ON tab3 (col4 DESC,col0,col3); +CREATE INDEX idx_tab3_0 ON tab3 (col4 DESC,col1 DESC); +CREATE INDEX idx_tab3_0 ON tab3 (col4 DESC,col1); +CREATE INDEX idx_tab3_0 ON tab3 (col4 DESC,col1,col3); +CREATE INDEX idx_tab3_0 ON tab3 (col4 DESC,col3 DESC); +CREATE INDEX idx_tab3_0 ON tab3 (col4 DESC,col3); +CREATE INDEX idx_tab3_0 ON tab3 (col4 DESC,col3,col1 DESC); +CREATE INDEX idx_tab3_0 ON tab3 (col4); +CREATE INDEX idx_tab3_0 ON tab3 (col4,col0 DESC); +CREATE INDEX idx_tab3_0 ON tab3 (col4,col0); +CREATE INDEX idx_tab3_0 ON tab3 (col4,col1 DESC); +CREATE INDEX idx_tab3_0 ON tab3 (col4,col1 DESC,col0); +CREATE INDEX idx_tab3_0 ON tab3 (col4,col1 DESC,col3 DESC); +CREATE INDEX idx_tab3_0 ON tab3 (col4,col1); +CREATE INDEX idx_tab3_0 ON tab3 (col4,col3 DESC); +CREATE INDEX idx_tab3_0 ON tab3 (col4,col3); +CREATE INDEX idx_tab3_0 ON tab3 (col4,col3,col1,col0); +CREATE INDEX idx_tab3_1 ON tab3 (col0 DESC); +CREATE INDEX idx_tab3_1 ON tab3 (col0 DESC,col1 DESC); +CREATE INDEX idx_tab3_1 ON tab3 (col0 DESC,col1); +CREATE INDEX idx_tab3_1 ON tab3 (col0 DESC,col3); +CREATE INDEX idx_tab3_1 ON tab3 (col0 DESC,col3,col1); +CREATE INDEX idx_tab3_1 ON tab3 (col0 DESC,col4 DESC); +CREATE INDEX idx_tab3_1 ON tab3 (col0 DESC,col4); +CREATE INDEX idx_tab3_1 ON tab3 (col0); +CREATE INDEX idx_tab3_1 ON tab3 (col0,col1 DESC); +CREATE INDEX idx_tab3_1 ON tab3 (col0,col1); +CREATE INDEX idx_tab3_1 ON tab3 (col0,col1,col3 DESC); +CREATE INDEX idx_tab3_1 ON tab3 (col0,col3 DESC); +CREATE INDEX idx_tab3_1 ON tab3 (col0,col3); +CREATE INDEX idx_tab3_1 ON tab3 (col0,col4 DESC,col1); +CREATE INDEX idx_tab3_1 ON tab3 (col0,col4); +CREATE INDEX idx_tab3_1 ON tab3 (col1 DESC); +CREATE INDEX idx_tab3_1 ON tab3 (col1 DESC,col0 DESC); +CREATE INDEX idx_tab3_1 ON tab3 (col1 DESC,col0); +CREATE INDEX idx_tab3_1 ON tab3 (col1 DESC,col0,col3 DESC); +CREATE INDEX idx_tab3_1 ON tab3 (col1 DESC,col3 DESC); +CREATE INDEX idx_tab3_1 ON tab3 (col1 DESC,col3); +CREATE INDEX idx_tab3_1 ON tab3 (col1 DESC,col3,col4 DESC); +CREATE INDEX idx_tab3_1 ON tab3 (col1 DESC,col4 DESC); +CREATE INDEX idx_tab3_1 ON tab3 (col1 DESC,col4); +CREATE INDEX idx_tab3_1 ON tab3 (col1); +CREATE INDEX idx_tab3_1 ON tab3 (col1,col0 DESC); +CREATE INDEX idx_tab3_1 ON tab3 (col1,col0 DESC,col4); +CREATE INDEX idx_tab3_1 ON tab3 (col1,col0); +CREATE INDEX idx_tab3_1 ON tab3 (col1,col3 DESC); +CREATE INDEX idx_tab3_1 ON tab3 (col1,col3 DESC,col0); +CREATE INDEX idx_tab3_1 ON tab3 (col1,col3); +CREATE INDEX idx_tab3_1 ON tab3 (col1,col4 DESC); +CREATE INDEX idx_tab3_1 ON tab3 (col1,col4 DESC,col3 DESC); +CREATE INDEX idx_tab3_1 ON tab3 (col3 DESC); +CREATE INDEX idx_tab3_1 ON tab3 (col3 DESC,col0 DESC); +CREATE INDEX idx_tab3_1 ON tab3 (col3 DESC,col0 DESC,col4); +CREATE INDEX idx_tab3_1 ON tab3 (col3 DESC,col0); +CREATE INDEX idx_tab3_1 ON tab3 (col3 DESC,col0,col1); +CREATE INDEX idx_tab3_1 ON tab3 (col3 DESC,col1); +CREATE INDEX idx_tab3_1 ON tab3 (col3 DESC,col4); +CREATE INDEX idx_tab3_1 ON tab3 (col3); +CREATE INDEX idx_tab3_1 ON tab3 (col3,col0 DESC); +CREATE INDEX idx_tab3_1 ON tab3 (col3,col0 DESC,col4); +CREATE INDEX idx_tab3_1 ON tab3 (col3,col0,col4 DESC); +CREATE INDEX idx_tab3_1 ON tab3 (col3,col1 DESC); +CREATE INDEX idx_tab3_1 ON tab3 (col3,col1 DESC,col0 DESC); +CREATE INDEX idx_tab3_1 ON tab3 (col3,col1); +CREATE INDEX idx_tab3_1 ON tab3 (col3,col4 DESC); +CREATE INDEX idx_tab3_1 ON tab3 (col3,col4); +CREATE INDEX idx_tab3_1 ON tab3 (col4 DESC); +CREATE INDEX idx_tab3_1 ON tab3 (col4 DESC,col0 DESC); +CREATE INDEX idx_tab3_1 ON tab3 (col4 DESC,col1 DESC); +CREATE INDEX idx_tab3_1 ON tab3 (col4 DESC,col1 DESC,col3 DESC); +CREATE INDEX idx_tab3_1 ON tab3 (col4 DESC,col1); +CREATE INDEX idx_tab3_1 ON tab3 (col4 DESC,col1,col0 DESC); +CREATE INDEX idx_tab3_1 ON tab3 (col4 DESC,col3 DESC); +CREATE INDEX idx_tab3_1 ON tab3 (col4); +CREATE INDEX idx_tab3_1 ON tab3 (col4,col0 DESC); +CREATE INDEX idx_tab3_1 ON tab3 (col4,col0); +CREATE INDEX idx_tab3_1 ON tab3 (col4,col1 DESC); +CREATE INDEX idx_tab3_1 ON tab3 (col4,col1); +CREATE INDEX idx_tab3_1 ON tab3 (col4,col1,col0); +CREATE INDEX idx_tab3_1 ON tab3 (col4,col3 DESC); +CREATE INDEX idx_tab3_1 ON tab3 (col4,col3 DESC,col0 DESC); +CREATE INDEX idx_tab3_1 ON tab3 (col4,col3); +CREATE INDEX idx_tab3_2 ON tab3 (col0 DESC); +CREATE INDEX idx_tab3_2 ON tab3 (col0 DESC,col1 DESC); +CREATE INDEX idx_tab3_2 ON tab3 (col0 DESC,col3 DESC); +CREATE INDEX idx_tab3_2 ON tab3 (col0 DESC,col3); +CREATE INDEX idx_tab3_2 ON tab3 (col0 DESC,col4 DESC); +CREATE INDEX idx_tab3_2 ON tab3 (col0 DESC,col4); +CREATE INDEX idx_tab3_2 ON tab3 (col0); +CREATE INDEX idx_tab3_2 ON tab3 (col0,col1 DESC); +CREATE INDEX idx_tab3_2 ON tab3 (col0,col1); +CREATE INDEX idx_tab3_2 ON tab3 (col0,col3 DESC); +CREATE INDEX idx_tab3_2 ON tab3 (col0,col3); +CREATE INDEX idx_tab3_2 ON tab3 (col0,col4 DESC); +CREATE INDEX idx_tab3_2 ON tab3 (col0,col4); +CREATE INDEX idx_tab3_2 ON tab3 (col1 DESC); +CREATE INDEX idx_tab3_2 ON tab3 (col1 DESC,col0 DESC); +CREATE INDEX idx_tab3_2 ON tab3 (col1 DESC,col0); +CREATE INDEX idx_tab3_2 ON tab3 (col1 DESC,col3 DESC); +CREATE INDEX idx_tab3_2 ON tab3 (col1 DESC,col3); +CREATE INDEX idx_tab3_2 ON tab3 (col1 DESC,col4 DESC); +CREATE INDEX idx_tab3_2 ON tab3 (col1 DESC,col4); +CREATE INDEX idx_tab3_2 ON tab3 (col1); +CREATE INDEX idx_tab3_2 ON tab3 (col1,col0 DESC); +CREATE INDEX idx_tab3_2 ON tab3 (col1,col0); +CREATE INDEX idx_tab3_2 ON tab3 (col1,col3 DESC); +CREATE INDEX idx_tab3_2 ON tab3 (col1,col3); +CREATE INDEX idx_tab3_2 ON tab3 (col1,col4); +CREATE INDEX idx_tab3_2 ON tab3 (col3 DESC); +CREATE INDEX idx_tab3_2 ON tab3 (col3 DESC,col0); +CREATE INDEX idx_tab3_2 ON tab3 (col3 DESC,col1 DESC); +CREATE INDEX idx_tab3_2 ON tab3 (col3 DESC,col1); +CREATE INDEX idx_tab3_2 ON tab3 (col3 DESC,col1,col0 DESC); +CREATE INDEX idx_tab3_2 ON tab3 (col3 DESC,col1,col4 DESC); +CREATE INDEX idx_tab3_2 ON tab3 (col3 DESC,col4 DESC); +CREATE INDEX idx_tab3_2 ON tab3 (col3 DESC,col4 DESC,col0 DESC); +CREATE INDEX idx_tab3_2 ON tab3 (col3 DESC,col4 DESC,col0); +CREATE INDEX idx_tab3_2 ON tab3 (col3 DESC,col4); +CREATE INDEX idx_tab3_2 ON tab3 (col3 DESC,col4,col0); +CREATE INDEX idx_tab3_2 ON tab3 (col3); +CREATE INDEX idx_tab3_2 ON tab3 (col3,col0 DESC); +CREATE INDEX idx_tab3_2 ON tab3 (col3,col1 DESC); +CREATE INDEX idx_tab3_2 ON tab3 (col3,col1); +CREATE INDEX idx_tab3_2 ON tab3 (col3,col4 DESC); +CREATE INDEX idx_tab3_2 ON tab3 (col3,col4); +CREATE INDEX idx_tab3_2 ON tab3 (col4 DESC); +CREATE INDEX idx_tab3_2 ON tab3 (col4 DESC,col0 DESC); +CREATE INDEX idx_tab3_2 ON tab3 (col4 DESC,col0 DESC,col3); +CREATE INDEX idx_tab3_2 ON tab3 (col4 DESC,col0); +CREATE INDEX idx_tab3_2 ON tab3 (col4 DESC,col0,col3 DESC); +CREATE INDEX idx_tab3_2 ON tab3 (col4 DESC,col1); +CREATE INDEX idx_tab3_2 ON tab3 (col4 DESC,col3 DESC); +CREATE INDEX idx_tab3_2 ON tab3 (col4 DESC,col3 DESC,col0 DESC); +CREATE INDEX idx_tab3_2 ON tab3 (col4 DESC,col3,col0); +CREATE INDEX idx_tab3_2 ON tab3 (col4); +CREATE INDEX idx_tab3_2 ON tab3 (col4,col0 DESC,col1); +CREATE INDEX idx_tab3_2 ON tab3 (col4,col0); +CREATE INDEX idx_tab3_2 ON tab3 (col4,col1 DESC); +CREATE INDEX idx_tab3_2 ON tab3 (col4,col1); +CREATE INDEX idx_tab3_2 ON tab3 (col4,col3 DESC); +CREATE INDEX idx_tab3_3 ON tab3 (col0 DESC); +CREATE INDEX idx_tab3_3 ON tab3 (col0 DESC,col1 DESC); +CREATE INDEX idx_tab3_3 ON tab3 (col0 DESC,col3 DESC); +CREATE INDEX idx_tab3_3 ON tab3 (col0 DESC,col3); +CREATE INDEX idx_tab3_3 ON tab3 (col0 DESC,col3,col4); +CREATE INDEX idx_tab3_3 ON tab3 (col0 DESC,col4); +CREATE INDEX idx_tab3_3 ON tab3 (col0); +CREATE INDEX idx_tab3_3 ON tab3 (col0,col1 DESC); +CREATE INDEX idx_tab3_3 ON tab3 (col0,col1); +CREATE INDEX idx_tab3_3 ON tab3 (col0,col3 DESC); +CREATE INDEX idx_tab3_3 ON tab3 (col0,col3); +CREATE INDEX idx_tab3_3 ON tab3 (col0,col4 DESC); +CREATE INDEX idx_tab3_3 ON tab3 (col1 DESC); +CREATE INDEX idx_tab3_3 ON tab3 (col1 DESC,col0 DESC); +CREATE INDEX idx_tab3_3 ON tab3 (col1 DESC,col3 DESC); +CREATE INDEX idx_tab3_3 ON tab3 (col1 DESC,col3); +CREATE INDEX idx_tab3_3 ON tab3 (col1 DESC,col4 DESC); +CREATE INDEX idx_tab3_3 ON tab3 (col1 DESC,col4); +CREATE INDEX idx_tab3_3 ON tab3 (col1); +CREATE INDEX idx_tab3_3 ON tab3 (col1,col0); +CREATE INDEX idx_tab3_3 ON tab3 (col1,col3 DESC); +CREATE INDEX idx_tab3_3 ON tab3 (col1,col4 DESC); +CREATE INDEX idx_tab3_3 ON tab3 (col1,col4 DESC,col0 DESC); +CREATE INDEX idx_tab3_3 ON tab3 (col1,col4); +CREATE INDEX idx_tab3_3 ON tab3 (col3 DESC); +CREATE INDEX idx_tab3_3 ON tab3 (col3 DESC,col0); +CREATE INDEX idx_tab3_3 ON tab3 (col3 DESC,col1 DESC,col4); +CREATE INDEX idx_tab3_3 ON tab3 (col3 DESC,col1); +CREATE INDEX idx_tab3_3 ON tab3 (col3 DESC,col4 DESC); +CREATE INDEX idx_tab3_3 ON tab3 (col3 DESC,col4 DESC,col1 DESC); +CREATE INDEX idx_tab3_3 ON tab3 (col3 DESC,col4); +CREATE INDEX idx_tab3_3 ON tab3 (col3); +CREATE INDEX idx_tab3_3 ON tab3 (col3,col0 DESC); +CREATE INDEX idx_tab3_3 ON tab3 (col3,col0); +CREATE INDEX idx_tab3_3 ON tab3 (col3,col0,col4 DESC); +CREATE INDEX idx_tab3_3 ON tab3 (col3,col4,col1 DESC); +CREATE INDEX idx_tab3_3 ON tab3 (col4 DESC); +CREATE INDEX idx_tab3_3 ON tab3 (col4 DESC,col0); +CREATE INDEX idx_tab3_3 ON tab3 (col4 DESC,col1 DESC); +CREATE INDEX idx_tab3_3 ON tab3 (col4 DESC,col3); +CREATE INDEX idx_tab3_3 ON tab3 (col4); +CREATE INDEX idx_tab3_3 ON tab3 (col4,col0 DESC); +CREATE INDEX idx_tab3_3 ON tab3 (col4,col0); +CREATE INDEX idx_tab3_3 ON tab3 (col4,col1 DESC); +CREATE INDEX idx_tab3_3 ON tab3 (col4,col1); +CREATE INDEX idx_tab3_3 ON tab3 (col4,col3 DESC); +CREATE INDEX idx_tab3_3 ON tab3 (col4,col3); +CREATE INDEX idx_tab3_4 ON tab3 (col0 DESC); +CREATE INDEX idx_tab3_4 ON tab3 (col0 DESC,col1); +CREATE INDEX idx_tab3_4 ON tab3 (col0 DESC,col3 DESC); +CREATE INDEX idx_tab3_4 ON tab3 (col0 DESC,col3); +CREATE INDEX idx_tab3_4 ON tab3 (col0 DESC,col4); +CREATE INDEX idx_tab3_4 ON tab3 (col0); +CREATE INDEX idx_tab3_4 ON tab3 (col0,col1 DESC); +CREATE INDEX idx_tab3_4 ON tab3 (col0,col1); +CREATE INDEX idx_tab3_4 ON tab3 (col0,col3); +CREATE INDEX idx_tab3_4 ON tab3 (col0,col4 DESC); +CREATE INDEX idx_tab3_4 ON tab3 (col0,col4); +CREATE INDEX idx_tab3_4 ON tab3 (col1 DESC); +CREATE INDEX idx_tab3_4 ON tab3 (col1 DESC,col3 DESC); +CREATE INDEX idx_tab3_4 ON tab3 (col1 DESC,col4 DESC); +CREATE INDEX idx_tab3_4 ON tab3 (col1 DESC,col4 DESC,col0 DESC); +CREATE INDEX idx_tab3_4 ON tab3 (col1 DESC,col4 DESC,col0); +CREATE INDEX idx_tab3_4 ON tab3 (col1 DESC,col4); +CREATE INDEX idx_tab3_4 ON tab3 (col1 DESC,col4,col0 DESC); +CREATE INDEX idx_tab3_4 ON tab3 (col1); +CREATE INDEX idx_tab3_4 ON tab3 (col1,col0); +CREATE INDEX idx_tab3_4 ON tab3 (col1,col4 DESC); +CREATE INDEX idx_tab3_4 ON tab3 (col3 DESC); +CREATE INDEX idx_tab3_4 ON tab3 (col3 DESC,col1); +CREATE INDEX idx_tab3_4 ON tab3 (col3 DESC,col1,col0 DESC); +CREATE INDEX idx_tab3_4 ON tab3 (col3 DESC,col4,col0 DESC); +CREATE INDEX idx_tab3_4 ON tab3 (col3); +CREATE INDEX idx_tab3_4 ON tab3 (col3,col1); +CREATE INDEX idx_tab3_4 ON tab3 (col3,col4 DESC); +CREATE INDEX idx_tab3_4 ON tab3 (col4 DESC); +CREATE INDEX idx_tab3_4 ON tab3 (col4 DESC,col0 DESC); +CREATE INDEX idx_tab3_4 ON tab3 (col4 DESC,col0 DESC,col1); +CREATE INDEX idx_tab3_4 ON tab3 (col4 DESC,col0); +CREATE INDEX idx_tab3_4 ON tab3 (col4); +CREATE INDEX idx_tab3_4 ON tab3 (col4,col0); +CREATE INDEX idx_tab3_4 ON tab3 (col4,col0,col1); +CREATE INDEX idx_tab3_4 ON tab3 (col4,col1 DESC); +CREATE INDEX idx_tab3_4 ON tab3 (col4,col1); +CREATE INDEX idx_tab3_4 ON tab3 (col4,col1,col0 DESC); +CREATE INDEX idx_tab3_4 ON tab3 (col4,col3); +CREATE INDEX idx_tab3_5 ON tab3 (col0 DESC); +CREATE INDEX idx_tab3_5 ON tab3 (col0 DESC,col1); +CREATE INDEX idx_tab3_5 ON tab3 (col0 DESC,col3); +CREATE INDEX idx_tab3_5 ON tab3 (col0 DESC,col4 DESC); +CREATE INDEX idx_tab3_5 ON tab3 (col0); +CREATE INDEX idx_tab3_5 ON tab3 (col0,col1 DESC); +CREATE INDEX idx_tab3_5 ON tab3 (col0,col1); +CREATE INDEX idx_tab3_5 ON tab3 (col0,col1,col3); +CREATE INDEX idx_tab3_5 ON tab3 (col0,col3 DESC); +CREATE INDEX idx_tab3_5 ON tab3 (col0,col4); +CREATE INDEX idx_tab3_5 ON tab3 (col1 DESC); +CREATE INDEX idx_tab3_5 ON tab3 (col1 DESC,col0 DESC); +CREATE INDEX idx_tab3_5 ON tab3 (col1 DESC,col3 DESC); +CREATE INDEX idx_tab3_5 ON tab3 (col1 DESC,col3); +CREATE INDEX idx_tab3_5 ON tab3 (col1); +CREATE INDEX idx_tab3_5 ON tab3 (col1,col0); +CREATE INDEX idx_tab3_5 ON tab3 (col1,col3 DESC); +CREATE INDEX idx_tab3_5 ON tab3 (col1,col4 DESC); +CREATE INDEX idx_tab3_5 ON tab3 (col1,col4); +CREATE INDEX idx_tab3_5 ON tab3 (col3 DESC); +CREATE INDEX idx_tab3_5 ON tab3 (col3 DESC,col0 DESC); +CREATE INDEX idx_tab3_5 ON tab3 (col3 DESC,col1 DESC); +CREATE INDEX idx_tab3_5 ON tab3 (col3 DESC,col1); +CREATE INDEX idx_tab3_5 ON tab3 (col3 DESC,col1,col0); +CREATE INDEX idx_tab3_5 ON tab3 (col3 DESC,col4); +CREATE INDEX idx_tab3_5 ON tab3 (col3); +CREATE INDEX idx_tab3_5 ON tab3 (col3,col0 DESC); +CREATE INDEX idx_tab3_5 ON tab3 (col3,col0 DESC,col4); +CREATE INDEX idx_tab3_5 ON tab3 (col3,col0); +CREATE INDEX idx_tab3_5 ON tab3 (col3,col1 DESC); +CREATE INDEX idx_tab3_5 ON tab3 (col3,col4); +CREATE INDEX idx_tab3_5 ON tab3 (col4 DESC); +CREATE INDEX idx_tab3_5 ON tab3 (col4 DESC,col0 DESC); +CREATE INDEX idx_tab3_5 ON tab3 (col4 DESC,col0 DESC,col1 DESC); +CREATE INDEX idx_tab3_5 ON tab3 (col4 DESC,col1 DESC); +CREATE INDEX idx_tab3_5 ON tab3 (col4 DESC,col3 DESC); +CREATE INDEX idx_tab3_5 ON tab3 (col4); +CREATE INDEX idx_tab3_5 ON tab3 (col4,col0 DESC); +CREATE INDEX idx_tab3_5 ON tab3 (col4,col0); +CREATE INDEX idx_tab3_5 ON tab3 (col4,col1); +CREATE INDEX idx_tab3_5 ON tab3 (col4,col3 DESC); +CREATE INDEX idx_tab3_5 ON tab3 (col4,col3); +CREATE INDEX idx_tab4_0 ON tab4 (col0 DESC); +CREATE INDEX idx_tab4_0 ON tab4 (col0 DESC,col1 DESC); +CREATE INDEX idx_tab4_0 ON tab4 (col0 DESC,col1 DESC,col4 DESC); +CREATE INDEX idx_tab4_0 ON tab4 (col0 DESC,col1); +CREATE INDEX idx_tab4_0 ON tab4 (col0 DESC,col1,col4 DESC); +CREATE INDEX idx_tab4_0 ON tab4 (col0 DESC,col1,col4); +CREATE INDEX idx_tab4_0 ON tab4 (col0 DESC,col3 DESC); +CREATE INDEX idx_tab4_0 ON tab4 (col0 DESC,col3); +CREATE INDEX idx_tab4_0 ON tab4 (col0 DESC,col3,col4 DESC); +CREATE INDEX idx_tab4_0 ON tab4 (col0 DESC,col4 DESC); +CREATE INDEX idx_tab4_0 ON tab4 (col0 DESC,col4 DESC,col1 DESC); +CREATE INDEX idx_tab4_0 ON tab4 (col0 DESC,col4 DESC,col1); +CREATE INDEX idx_tab4_0 ON tab4 (col0); +CREATE INDEX idx_tab4_0 ON tab4 (col0,col1); +CREATE INDEX idx_tab4_0 ON tab4 (col0,col1,col4 DESC,col3); +CREATE INDEX idx_tab4_0 ON tab4 (col0,col3 DESC); +CREATE INDEX idx_tab4_0 ON tab4 (col0,col3); +CREATE INDEX idx_tab4_0 ON tab4 (col0,col3,col1); +CREATE INDEX idx_tab4_0 ON tab4 (col0,col3,col4); +CREATE INDEX idx_tab4_0 ON tab4 (col0,col4 DESC); +CREATE INDEX idx_tab4_0 ON tab4 (col0,col4); +CREATE INDEX idx_tab4_0 ON tab4 (col0,col4,col1,col3 DESC); +CREATE INDEX idx_tab4_0 ON tab4 (col1 DESC); +CREATE INDEX idx_tab4_0 ON tab4 (col1 DESC,col0 DESC); +CREATE INDEX idx_tab4_0 ON tab4 (col1 DESC,col0); +CREATE INDEX idx_tab4_0 ON tab4 (col1 DESC,col3 DESC); +CREATE INDEX idx_tab4_0 ON tab4 (col1 DESC,col3); +CREATE INDEX idx_tab4_0 ON tab4 (col1 DESC,col3,col4 DESC); +CREATE INDEX idx_tab4_0 ON tab4 (col1 DESC,col4 DESC); +CREATE INDEX idx_tab4_0 ON tab4 (col1 DESC,col4 DESC,col0 DESC); +CREATE INDEX idx_tab4_0 ON tab4 (col1 DESC,col4 DESC,col0); +CREATE INDEX idx_tab4_0 ON tab4 (col1 DESC,col4); +CREATE INDEX idx_tab4_0 ON tab4 (col1); +CREATE INDEX idx_tab4_0 ON tab4 (col1,col0 DESC); +CREATE INDEX idx_tab4_0 ON tab4 (col1,col0 DESC,col4 DESC); +CREATE INDEX idx_tab4_0 ON tab4 (col1,col0); +CREATE INDEX idx_tab4_0 ON tab4 (col1,col0,col3); +CREATE INDEX idx_tab4_0 ON tab4 (col1,col3 DESC); +CREATE INDEX idx_tab4_0 ON tab4 (col1,col3 DESC,col0); +CREATE INDEX idx_tab4_0 ON tab4 (col1,col3); +CREATE INDEX idx_tab4_0 ON tab4 (col1,col4 DESC); +CREATE INDEX idx_tab4_0 ON tab4 (col1,col4); +CREATE INDEX idx_tab4_0 ON tab4 (col1,col4,col0 DESC); +CREATE INDEX idx_tab4_0 ON tab4 (col3 DESC); +CREATE INDEX idx_tab4_0 ON tab4 (col3 DESC,col0 DESC); +CREATE INDEX idx_tab4_0 ON tab4 (col3 DESC,col0); +CREATE INDEX idx_tab4_0 ON tab4 (col3 DESC,col1 DESC); +CREATE INDEX idx_tab4_0 ON tab4 (col3 DESC,col1 DESC,col0); +CREATE INDEX idx_tab4_0 ON tab4 (col3 DESC,col1); +CREATE INDEX idx_tab4_0 ON tab4 (col3 DESC,col4 DESC); +CREATE INDEX idx_tab4_0 ON tab4 (col3 DESC,col4 DESC,col0); +CREATE INDEX idx_tab4_0 ON tab4 (col3 DESC,col4); +CREATE INDEX idx_tab4_0 ON tab4 (col3); +CREATE INDEX idx_tab4_0 ON tab4 (col3,col0 DESC); +CREATE INDEX idx_tab4_0 ON tab4 (col3,col0 DESC,col1,col4 DESC); +CREATE INDEX idx_tab4_0 ON tab4 (col3,col0); +CREATE INDEX idx_tab4_0 ON tab4 (col3,col0,col4 DESC); +CREATE INDEX idx_tab4_0 ON tab4 (col3,col1 DESC); +CREATE INDEX idx_tab4_0 ON tab4 (col3,col1 DESC,col4); +CREATE INDEX idx_tab4_0 ON tab4 (col3,col1); +CREATE INDEX idx_tab4_0 ON tab4 (col3,col4 DESC); +CREATE INDEX idx_tab4_0 ON tab4 (col3,col4); +CREATE INDEX idx_tab4_0 ON tab4 (col3,col4,col1); +CREATE INDEX idx_tab4_0 ON tab4 (col4 DESC); +CREATE INDEX idx_tab4_0 ON tab4 (col4 DESC,col0 DESC); +CREATE INDEX idx_tab4_0 ON tab4 (col4 DESC,col0); +CREATE INDEX idx_tab4_0 ON tab4 (col4 DESC,col0,col1 DESC,col3); +CREATE INDEX idx_tab4_0 ON tab4 (col4 DESC,col1 DESC); +CREATE INDEX idx_tab4_0 ON tab4 (col4 DESC,col1 DESC,col3 DESC); +CREATE INDEX idx_tab4_0 ON tab4 (col4 DESC,col1 DESC,col3); +CREATE INDEX idx_tab4_0 ON tab4 (col4 DESC,col1); +CREATE INDEX idx_tab4_0 ON tab4 (col4 DESC,col3 DESC); +CREATE INDEX idx_tab4_0 ON tab4 (col4 DESC,col3 DESC,col1 DESC,col0 DESC); +CREATE INDEX idx_tab4_0 ON tab4 (col4 DESC,col3); +CREATE INDEX idx_tab4_0 ON tab4 (col4); +CREATE INDEX idx_tab4_0 ON tab4 (col4,col0 DESC); +CREATE INDEX idx_tab4_0 ON tab4 (col4,col0); +CREATE INDEX idx_tab4_0 ON tab4 (col4,col0,col1); +CREATE INDEX idx_tab4_0 ON tab4 (col4,col1 DESC); +CREATE INDEX idx_tab4_0 ON tab4 (col4,col1); +CREATE INDEX idx_tab4_0 ON tab4 (col4,col3 DESC); +CREATE INDEX idx_tab4_0 ON tab4 (col4,col3); +CREATE INDEX idx_tab4_1 ON tab4 (col0 DESC); +CREATE INDEX idx_tab4_1 ON tab4 (col0 DESC,col1); +CREATE INDEX idx_tab4_1 ON tab4 (col0 DESC,col3 DESC,col1 DESC); +CREATE INDEX idx_tab4_1 ON tab4 (col0 DESC,col3); +CREATE INDEX idx_tab4_1 ON tab4 (col0 DESC,col4 DESC); +CREATE INDEX idx_tab4_1 ON tab4 (col0 DESC,col4); +CREATE INDEX idx_tab4_1 ON tab4 (col0); +CREATE INDEX idx_tab4_1 ON tab4 (col0,col1 DESC); +CREATE INDEX idx_tab4_1 ON tab4 (col0,col1); +CREATE INDEX idx_tab4_1 ON tab4 (col0,col1,col4 DESC); +CREATE INDEX idx_tab4_1 ON tab4 (col0,col3 DESC); +CREATE INDEX idx_tab4_1 ON tab4 (col0,col3); +CREATE INDEX idx_tab4_1 ON tab4 (col0,col3,col4 DESC); +CREATE INDEX idx_tab4_1 ON tab4 (col0,col4 DESC); +CREATE INDEX idx_tab4_1 ON tab4 (col1 DESC); +CREATE INDEX idx_tab4_1 ON tab4 (col1 DESC,col0 DESC); +CREATE INDEX idx_tab4_1 ON tab4 (col1 DESC,col0); +CREATE INDEX idx_tab4_1 ON tab4 (col1 DESC,col3 DESC); +CREATE INDEX idx_tab4_1 ON tab4 (col1 DESC,col3 DESC,col0 DESC); +CREATE INDEX idx_tab4_1 ON tab4 (col1 DESC,col4 DESC); +CREATE INDEX idx_tab4_1 ON tab4 (col1 DESC,col4); +CREATE INDEX idx_tab4_1 ON tab4 (col1); +CREATE INDEX idx_tab4_1 ON tab4 (col1,col0 DESC); +CREATE INDEX idx_tab4_1 ON tab4 (col1,col0); +CREATE INDEX idx_tab4_1 ON tab4 (col1,col3 DESC); +CREATE INDEX idx_tab4_1 ON tab4 (col1,col3); +CREATE INDEX idx_tab4_1 ON tab4 (col1,col3,col4 DESC,col0); +CREATE INDEX idx_tab4_1 ON tab4 (col1,col4 DESC); +CREATE INDEX idx_tab4_1 ON tab4 (col1,col4 DESC,col0); +CREATE INDEX idx_tab4_1 ON tab4 (col1,col4); +CREATE INDEX idx_tab4_1 ON tab4 (col1,col4,col3 DESC); +CREATE INDEX idx_tab4_1 ON tab4 (col3 DESC); +CREATE INDEX idx_tab4_1 ON tab4 (col3 DESC,col0 DESC); +CREATE INDEX idx_tab4_1 ON tab4 (col3 DESC,col0); +CREATE INDEX idx_tab4_1 ON tab4 (col3 DESC,col1 DESC); +CREATE INDEX idx_tab4_1 ON tab4 (col3 DESC,col1); +CREATE INDEX idx_tab4_1 ON tab4 (col3 DESC,col4 DESC); +CREATE INDEX idx_tab4_1 ON tab4 (col3 DESC,col4); +CREATE INDEX idx_tab4_1 ON tab4 (col3 DESC,col4,col1); +CREATE INDEX idx_tab4_1 ON tab4 (col3); +CREATE INDEX idx_tab4_1 ON tab4 (col3,col0 DESC); +CREATE INDEX idx_tab4_1 ON tab4 (col3,col0 DESC,col1 DESC); +CREATE INDEX idx_tab4_1 ON tab4 (col3,col0); +CREATE INDEX idx_tab4_1 ON tab4 (col3,col1 DESC); +CREATE INDEX idx_tab4_1 ON tab4 (col3,col1 DESC,col0 DESC); +CREATE INDEX idx_tab4_1 ON tab4 (col3,col1 DESC,col0); +CREATE INDEX idx_tab4_1 ON tab4 (col3,col4 DESC); +CREATE INDEX idx_tab4_1 ON tab4 (col3,col4); +CREATE INDEX idx_tab4_1 ON tab4 (col4 DESC); +CREATE INDEX idx_tab4_1 ON tab4 (col4 DESC,col0 DESC,col3 DESC,col1); +CREATE INDEX idx_tab4_1 ON tab4 (col4 DESC,col0); +CREATE INDEX idx_tab4_1 ON tab4 (col4 DESC,col0,col3 DESC); +CREATE INDEX idx_tab4_1 ON tab4 (col4 DESC,col1 DESC); +CREATE INDEX idx_tab4_1 ON tab4 (col4 DESC,col1); +CREATE INDEX idx_tab4_1 ON tab4 (col4 DESC,col3 DESC); +CREATE INDEX idx_tab4_1 ON tab4 (col4 DESC,col3 DESC,col1 DESC); +CREATE INDEX idx_tab4_1 ON tab4 (col4); +CREATE INDEX idx_tab4_1 ON tab4 (col4,col0 DESC,col3 DESC); +CREATE INDEX idx_tab4_1 ON tab4 (col4,col1 DESC); +CREATE INDEX idx_tab4_1 ON tab4 (col4,col1); +CREATE INDEX idx_tab4_1 ON tab4 (col4,col3 DESC); +CREATE INDEX idx_tab4_1 ON tab4 (col4,col3 DESC,col0,col1 DESC); +CREATE INDEX idx_tab4_1 ON tab4 (col4,col3); +CREATE INDEX idx_tab4_2 ON tab4 (col0 DESC); +CREATE INDEX idx_tab4_2 ON tab4 (col0 DESC,col3); +CREATE INDEX idx_tab4_2 ON tab4 (col0 DESC,col4 DESC); +CREATE INDEX idx_tab4_2 ON tab4 (col0 DESC,col4); +CREATE INDEX idx_tab4_2 ON tab4 (col0); +CREATE INDEX idx_tab4_2 ON tab4 (col0,col3 DESC); +CREATE INDEX idx_tab4_2 ON tab4 (col0,col3); +CREATE INDEX idx_tab4_2 ON tab4 (col0,col4 DESC); +CREATE INDEX idx_tab4_2 ON tab4 (col0,col4); +CREATE INDEX idx_tab4_2 ON tab4 (col0,col4,col1 DESC); +CREATE INDEX idx_tab4_2 ON tab4 (col1 DESC); +CREATE INDEX idx_tab4_2 ON tab4 (col1 DESC,col0 DESC); +CREATE INDEX idx_tab4_2 ON tab4 (col1 DESC,col3 DESC); +CREATE INDEX idx_tab4_2 ON tab4 (col1 DESC,col3); +CREATE INDEX idx_tab4_2 ON tab4 (col1 DESC,col4 DESC); +CREATE INDEX idx_tab4_2 ON tab4 (col1 DESC,col4 DESC,col0 DESC); +CREATE INDEX idx_tab4_2 ON tab4 (col1); +CREATE INDEX idx_tab4_2 ON tab4 (col1,col0 DESC); +CREATE INDEX idx_tab4_2 ON tab4 (col1,col0,col3); +CREATE INDEX idx_tab4_2 ON tab4 (col1,col0,col4 DESC); +CREATE INDEX idx_tab4_2 ON tab4 (col1,col3 DESC); +CREATE INDEX idx_tab4_2 ON tab4 (col1,col3); +CREATE INDEX idx_tab4_2 ON tab4 (col1,col4 DESC); +CREATE INDEX idx_tab4_2 ON tab4 (col1,col4); +CREATE INDEX idx_tab4_2 ON tab4 (col3 DESC); +CREATE INDEX idx_tab4_2 ON tab4 (col3 DESC,col0 DESC); +CREATE INDEX idx_tab4_2 ON tab4 (col3 DESC,col0); +CREATE INDEX idx_tab4_2 ON tab4 (col3 DESC,col1 DESC); +CREATE INDEX idx_tab4_2 ON tab4 (col3 DESC,col1); +CREATE INDEX idx_tab4_2 ON tab4 (col3 DESC,col4 DESC); +CREATE INDEX idx_tab4_2 ON tab4 (col3 DESC,col4); +CREATE INDEX idx_tab4_2 ON tab4 (col3); +CREATE INDEX idx_tab4_2 ON tab4 (col3,col0 DESC); +CREATE INDEX idx_tab4_2 ON tab4 (col3,col0); +CREATE INDEX idx_tab4_2 ON tab4 (col3,col1 DESC); +CREATE INDEX idx_tab4_2 ON tab4 (col3,col4); +CREATE INDEX idx_tab4_2 ON tab4 (col4 DESC); +CREATE INDEX idx_tab4_2 ON tab4 (col4 DESC,col0 DESC); +CREATE INDEX idx_tab4_2 ON tab4 (col4 DESC,col0 DESC,col3 DESC); +CREATE INDEX idx_tab4_2 ON tab4 (col4 DESC,col1 DESC); +CREATE INDEX idx_tab4_2 ON tab4 (col4 DESC,col1); +CREATE INDEX idx_tab4_2 ON tab4 (col4 DESC,col3 DESC); +CREATE INDEX idx_tab4_2 ON tab4 (col4 DESC,col3); +CREATE INDEX idx_tab4_2 ON tab4 (col4 DESC,col3,col0 DESC); +CREATE INDEX idx_tab4_2 ON tab4 (col4); +CREATE INDEX idx_tab4_2 ON tab4 (col4,col0,col1); +CREATE INDEX idx_tab4_2 ON tab4 (col4,col0,col3); +CREATE INDEX idx_tab4_2 ON tab4 (col4,col1); +CREATE INDEX idx_tab4_2 ON tab4 (col4,col3 DESC); +CREATE INDEX idx_tab4_3 ON tab4 (col0 DESC); +CREATE INDEX idx_tab4_3 ON tab4 (col0 DESC,col1 DESC); +CREATE INDEX idx_tab4_3 ON tab4 (col0 DESC,col1); +CREATE INDEX idx_tab4_3 ON tab4 (col0 DESC,col3 DESC); +CREATE INDEX idx_tab4_3 ON tab4 (col0 DESC,col3); +CREATE INDEX idx_tab4_3 ON tab4 (col0); +CREATE INDEX idx_tab4_3 ON tab4 (col0,col1 DESC); +CREATE INDEX idx_tab4_3 ON tab4 (col0,col1); +CREATE INDEX idx_tab4_3 ON tab4 (col0,col3 DESC); +CREATE INDEX idx_tab4_3 ON tab4 (col0,col3,col4 DESC); +CREATE INDEX idx_tab4_3 ON tab4 (col0,col4 DESC); +CREATE INDEX idx_tab4_3 ON tab4 (col1 DESC); +CREATE INDEX idx_tab4_3 ON tab4 (col1 DESC,col0 DESC); +CREATE INDEX idx_tab4_3 ON tab4 (col1 DESC,col0); +CREATE INDEX idx_tab4_3 ON tab4 (col1 DESC,col3 DESC); +CREATE INDEX idx_tab4_3 ON tab4 (col1 DESC,col3); +CREATE INDEX idx_tab4_3 ON tab4 (col1 DESC,col4 DESC); +CREATE INDEX idx_tab4_3 ON tab4 (col1 DESC,col4); +CREATE INDEX idx_tab4_3 ON tab4 (col1); +CREATE INDEX idx_tab4_3 ON tab4 (col1,col0); +CREATE INDEX idx_tab4_3 ON tab4 (col1,col4); +CREATE INDEX idx_tab4_3 ON tab4 (col3 DESC); +CREATE INDEX idx_tab4_3 ON tab4 (col3 DESC,col1); +CREATE INDEX idx_tab4_3 ON tab4 (col3 DESC,col1,col0); +CREATE INDEX idx_tab4_3 ON tab4 (col3 DESC,col4 DESC); +CREATE INDEX idx_tab4_3 ON tab4 (col3); +CREATE INDEX idx_tab4_3 ON tab4 (col3,col0 DESC); +CREATE INDEX idx_tab4_3 ON tab4 (col3,col0); +CREATE INDEX idx_tab4_3 ON tab4 (col3,col1 DESC); +CREATE INDEX idx_tab4_3 ON tab4 (col3,col1 DESC,col4 DESC); +CREATE INDEX idx_tab4_3 ON tab4 (col3,col1); +CREATE INDEX idx_tab4_3 ON tab4 (col3,col4); +CREATE INDEX idx_tab4_3 ON tab4 (col4 DESC); +CREATE INDEX idx_tab4_3 ON tab4 (col4 DESC,col0); +CREATE INDEX idx_tab4_3 ON tab4 (col4 DESC,col1 DESC); +CREATE INDEX idx_tab4_3 ON tab4 (col4 DESC,col1); +CREATE INDEX idx_tab4_3 ON tab4 (col4 DESC,col3); +CREATE INDEX idx_tab4_3 ON tab4 (col4 DESC,col3,col1 DESC); +CREATE INDEX idx_tab4_3 ON tab4 (col4 DESC,col3,col1); +CREATE INDEX idx_tab4_3 ON tab4 (col4); +CREATE INDEX idx_tab4_3 ON tab4 (col4,col0 DESC); +CREATE INDEX idx_tab4_3 ON tab4 (col4,col0); +CREATE INDEX idx_tab4_3 ON tab4 (col4,col1 DESC); +CREATE INDEX idx_tab4_3 ON tab4 (col4,col1); +CREATE INDEX idx_tab4_3 ON tab4 (col4,col3); +CREATE INDEX idx_tab4_4 ON tab4 (col0 DESC); +CREATE INDEX idx_tab4_4 ON tab4 (col0 DESC,col1 DESC); +CREATE INDEX idx_tab4_4 ON tab4 (col0 DESC,col1); +CREATE INDEX idx_tab4_4 ON tab4 (col0 DESC,col3 DESC); +CREATE INDEX idx_tab4_4 ON tab4 (col0 DESC,col3); +CREATE INDEX idx_tab4_4 ON tab4 (col0); +CREATE INDEX idx_tab4_4 ON tab4 (col0,col1); +CREATE INDEX idx_tab4_4 ON tab4 (col0,col1,col3 DESC); +CREATE INDEX idx_tab4_4 ON tab4 (col0,col3); +CREATE INDEX idx_tab4_4 ON tab4 (col0,col4); +CREATE INDEX idx_tab4_4 ON tab4 (col1 DESC); +CREATE INDEX idx_tab4_4 ON tab4 (col1 DESC,col0 DESC); +CREATE INDEX idx_tab4_4 ON tab4 (col1 DESC,col0); +CREATE INDEX idx_tab4_4 ON tab4 (col1 DESC,col4 DESC); +CREATE INDEX idx_tab4_4 ON tab4 (col1); +CREATE INDEX idx_tab4_4 ON tab4 (col1,col0 DESC); +CREATE INDEX idx_tab4_4 ON tab4 (col1,col3); +CREATE INDEX idx_tab4_4 ON tab4 (col1,col4 DESC); +CREATE INDEX idx_tab4_4 ON tab4 (col3 DESC); +CREATE INDEX idx_tab4_4 ON tab4 (col3 DESC,col0); +CREATE INDEX idx_tab4_4 ON tab4 (col3 DESC,col1 DESC); +CREATE INDEX idx_tab4_4 ON tab4 (col3 DESC,col1); +CREATE INDEX idx_tab4_4 ON tab4 (col3 DESC,col4 DESC); +CREATE INDEX idx_tab4_4 ON tab4 (col3 DESC,col4 DESC,col0 DESC); +CREATE INDEX idx_tab4_4 ON tab4 (col3); +CREATE INDEX idx_tab4_4 ON tab4 (col3,col0); +CREATE INDEX idx_tab4_4 ON tab4 (col3,col1 DESC); +CREATE INDEX idx_tab4_4 ON tab4 (col3,col4 DESC); +CREATE INDEX idx_tab4_4 ON tab4 (col4 DESC); +CREATE INDEX idx_tab4_4 ON tab4 (col4 DESC,col0); +CREATE INDEX idx_tab4_4 ON tab4 (col4 DESC,col1 DESC); +CREATE INDEX idx_tab4_4 ON tab4 (col4 DESC,col1); +CREATE INDEX idx_tab4_4 ON tab4 (col4 DESC,col3 DESC); +CREATE INDEX idx_tab4_4 ON tab4 (col4 DESC,col3 DESC,col0 DESC); +CREATE INDEX idx_tab4_4 ON tab4 (col4 DESC,col3); +CREATE INDEX idx_tab4_4 ON tab4 (col4 DESC,col3,col0 DESC,col1 DESC); +CREATE INDEX idx_tab4_4 ON tab4 (col4 DESC,col3,col1); +CREATE INDEX idx_tab4_4 ON tab4 (col4); +CREATE INDEX idx_tab4_4 ON tab4 (col4,col0); +CREATE INDEX idx_tab4_4 ON tab4 (col4,col1); +CREATE INDEX idx_tab4_4 ON tab4 (col4,col3 DESC); +CREATE INDEX idx_tab4_4 ON tab4 (col4,col3 DESC,col0); +CREATE INDEX idx_tab4_5 ON tab4 (col0 DESC); +CREATE INDEX idx_tab4_5 ON tab4 (col0 DESC,col1 DESC); +CREATE INDEX idx_tab4_5 ON tab4 (col0 DESC,col1); +CREATE INDEX idx_tab4_5 ON tab4 (col0 DESC,col3 DESC); +CREATE INDEX idx_tab4_5 ON tab4 (col0 DESC,col3 DESC,col1 DESC); +CREATE INDEX idx_tab4_5 ON tab4 (col0 DESC,col4 DESC); +CREATE INDEX idx_tab4_5 ON tab4 (col0 DESC,col4 DESC,col3); +CREATE INDEX idx_tab4_5 ON tab4 (col0); +CREATE INDEX idx_tab4_5 ON tab4 (col0,col3 DESC); +CREATE INDEX idx_tab4_5 ON tab4 (col0,col3); +CREATE INDEX idx_tab4_5 ON tab4 (col0,col4 DESC); +CREATE INDEX idx_tab4_5 ON tab4 (col1 DESC); +CREATE INDEX idx_tab4_5 ON tab4 (col1 DESC,col0 DESC); +CREATE INDEX idx_tab4_5 ON tab4 (col1 DESC,col0); +CREATE INDEX idx_tab4_5 ON tab4 (col1 DESC,col3 DESC); +CREATE INDEX idx_tab4_5 ON tab4 (col1 DESC,col3); +CREATE INDEX idx_tab4_5 ON tab4 (col1 DESC,col4 DESC); +CREATE INDEX idx_tab4_5 ON tab4 (col1); +CREATE INDEX idx_tab4_5 ON tab4 (col1,col0 DESC); +CREATE INDEX idx_tab4_5 ON tab4 (col1,col3 DESC); +CREATE INDEX idx_tab4_5 ON tab4 (col1,col4); +CREATE INDEX idx_tab4_5 ON tab4 (col3 DESC); +CREATE INDEX idx_tab4_5 ON tab4 (col3 DESC,col1 DESC); +CREATE INDEX idx_tab4_5 ON tab4 (col3 DESC,col1 DESC,col4 DESC); +CREATE INDEX idx_tab4_5 ON tab4 (col3 DESC,col1); +CREATE INDEX idx_tab4_5 ON tab4 (col3 DESC,col1,col4); +CREATE INDEX idx_tab4_5 ON tab4 (col3 DESC,col4 DESC); +CREATE INDEX idx_tab4_5 ON tab4 (col3); +CREATE INDEX idx_tab4_5 ON tab4 (col3,col0); +CREATE INDEX idx_tab4_5 ON tab4 (col3,col1 DESC); +CREATE INDEX idx_tab4_5 ON tab4 (col3,col1); +CREATE INDEX idx_tab4_5 ON tab4 (col3,col4 DESC); +CREATE INDEX idx_tab4_5 ON tab4 (col4 DESC); +CREATE INDEX idx_tab4_5 ON tab4 (col4 DESC,col1 DESC,col0 DESC); +CREATE INDEX idx_tab4_5 ON tab4 (col4 DESC,col3); +CREATE INDEX idx_tab4_5 ON tab4 (col4); +CREATE INDEX idx_tab4_5 ON tab4 (col4,col0 DESC); +CREATE INDEX idx_tab4_5 ON tab4 (col4,col1 DESC); +CREATE INDEX idx_tab4_5 ON tab4 (col4,col1 DESC,col3); +CREATE INDEX idx_tab4_5 ON tab4 (col4,col3 DESC,col1 DESC); +CREATE INDEX idx_tab4_5 ON tab4 (col4,col3); diff --git a/parser/testdata/02813_float_parsing/ast.json b/parser/testdata/02813_float_parsing/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02813_float_parsing/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02813_float_parsing/metadata.json b/parser/testdata/02813_float_parsing/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02813_float_parsing/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02813_float_parsing/query.sql b/parser/testdata/02813_float_parsing/query.sql new file mode 100644 index 000000000..ba57b87f1 --- /dev/null +++ b/parser/testdata/02813_float_parsing/query.sql @@ -0,0 +1,21 @@ +SELECT + toFloat64('1.7091'), + toFloat64('1.5008753E7'), + toFloat64('6e-09'), + toFloat64('6.000000000000001e-9'), + toFloat32('1.7091'), + toFloat32('1.5008753E7'), + toFloat32('6e-09'), + toFloat32('6.000000000000001e-9') +SETTINGS precise_float_parsing = 0; + +SELECT + toFloat64('1.7091'), + toFloat64('1.5008753E7'), + toFloat64('6e-09'), + toFloat64('6.000000000000001e-9'), + toFloat32('1.7091'), + toFloat32('1.5008753E7'), + toFloat32('6e-09'), + toFloat32('6.000000000000001e-9') +SETTINGS precise_float_parsing = 1; diff --git a/parser/testdata/02813_func_now_and_alias/ast.json b/parser/testdata/02813_func_now_and_alias/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02813_func_now_and_alias/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02813_func_now_and_alias/metadata.json b/parser/testdata/02813_func_now_and_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02813_func_now_and_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02813_func_now_and_alias/query.sql b/parser/testdata/02813_func_now_and_alias/query.sql new file mode 100644 index 000000000..6a2acbc03 --- /dev/null +++ b/parser/testdata/02813_func_now_and_alias/query.sql @@ -0,0 +1,6 @@ +-- "Tests" current_timestamp() which is an alias of now(). +-- Since the function is non-deterministic, only check that no bad things happen (don't check the returned value). + +SELECT count() FROM (SELECT current_timestamp()); +SELECT count() FROM (SELECT CURRENT_TIMESTAMP()); +SELECT count() FROM (SELECT current_TIMESTAMP()); diff --git a/parser/testdata/02813_func_today_and_alias/ast.json b/parser/testdata/02813_func_today_and_alias/ast.json new file mode 100644 index 000000000..0a652ff2f --- /dev/null +++ b/parser/testdata/02813_func_today_and_alias/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function today (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function current_date (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001021094, + "rows_read": 10, + "bytes_read": 376 + } +} diff --git a/parser/testdata/02813_func_today_and_alias/metadata.json b/parser/testdata/02813_func_today_and_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02813_func_today_and_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02813_func_today_and_alias/query.sql b/parser/testdata/02813_func_today_and_alias/query.sql new file mode 100644 index 000000000..d379aa74f --- /dev/null +++ b/parser/testdata/02813_func_today_and_alias/query.sql @@ -0,0 +1,6 @@ +SELECT today() = current_date(); +SELECT today() = CURRENT_DATE(); +SELECT today() = current_DATE(); +SELECT today() = curdate(); +SELECT today() = CURDATE(); +SELECT today() = curDATE(); diff --git a/parser/testdata/02813_optimize_lazy_materialization/ast.json b/parser/testdata/02813_optimize_lazy_materialization/ast.json new file mode 100644 index 000000000..ebf864134 --- /dev/null +++ b/parser/testdata/02813_optimize_lazy_materialization/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001030545, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02813_optimize_lazy_materialization/metadata.json b/parser/testdata/02813_optimize_lazy_materialization/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02813_optimize_lazy_materialization/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02813_optimize_lazy_materialization/query.sql b/parser/testdata/02813_optimize_lazy_materialization/query.sql new file mode 100644 index 000000000..96bd0ae01 --- /dev/null +++ b/parser/testdata/02813_optimize_lazy_materialization/query.sql @@ -0,0 +1,515 @@ +SET query_plan_optimize_lazy_materialization = 1; +SET query_plan_max_limit_for_lazy_materialization = 10; +SET allow_experimental_variant_type = 1; +SET use_variant_as_common_type = 1; +SET allow_experimental_dynamic_type = 1; + +DROP TABLE IF EXISTS optimize_lazy_materialization; +CREATE TABLE optimize_lazy_materialization (a UInt64, b UInt64, c UInt64, d UInt64, n Nested(x String)) +ENGINE MergeTree() PARTITION BY b ORDER BY a; +INSERT INTO optimize_lazy_materialization SELECT number, number % 2, number, number % 3, ['a', 'b', 'c'] FROM numbers(0, 100); +INSERT INTO optimize_lazy_materialization SELECT number, number % 2, number, number % 3, ['a', 'b', 'c'] FROM numbers(100, 100); + +-- { echoOn } +SELECT * FROM optimize_lazy_materialization ORDER BY c LIMIT 3; +-- queries with _part_offset column in projection +SELECT a, b, c, d, _part_index, _part_offset FROM optimize_lazy_materialization ORDER BY c LIMIT 3; +SELECT _part_index, _part_offset FROM optimize_lazy_materialization ORDER BY c LIMIT 3; +-- queries with filter +SELECT * FROM optimize_lazy_materialization WHERE d > 1 ORDER BY c LIMIT 3; +SELECT * FROM optimize_lazy_materialization PREWHERE d > 1 ORDER BY c LIMIT 3; +-- queries with function in order by +SELECT * FROM optimize_lazy_materialization WHERE d > 1 ORDER BY -c LIMIT 3; +SELECT * FROM optimize_lazy_materialization WHERE d > 1 ORDER BY -toFloat64(c) LIMIT 3; +SELECT * FROM optimize_lazy_materialization WHERE d > 1 ORDER BY c + 1 LIMIT 3; +-- queries with function in filter +SELECT * FROM optimize_lazy_materialization WHERE d % 3 > 1 ORDER BY c LIMIT 3; +-- queries with aliases +SELECT a AS a, b AS b, c AS c, d AS d FROM optimize_lazy_materialization WHERE d > 1 ORDER BY c LIMIT 3; +SELECT a AS a, b AS b, c AS c, d AS d FROM optimize_lazy_materialization WHERE d > 1 ORDER BY c LIMIT 3; +SELECT a + 1 AS a, b AS b, c + 1 AS c, d + 1 AS d FROM optimize_lazy_materialization WHERE d > 1 ORDER BY c LIMIT 3; +SELECT a + 1 AS a, b AS b, c + 1 AS c, d + 1 AS d FROM optimize_lazy_materialization WHERE d > 1 ORDER BY c LIMIT 3; +-- queries with non-trivial action's chain in expression +SELECT y, z FROM (SELECT a as y, b as z FROM optimize_lazy_materialization WHERE d > 1 ORDER BY c LIMIT 3) ORDER BY y + 1; +-- queries with default value +ALTER TABLE optimize_lazy_materialization ADD COLUMN default1 UInt64; +SELECT * FROM optimize_lazy_materialization ORDER BY c LIMIT 3; +ALTER TABLE optimize_lazy_materialization ADD COLUMN default2 UInt64 ALIAS 2; +SELECT * FROM optimize_lazy_materialization ORDER BY c LIMIT 3; +ALTER TABLE optimize_lazy_materialization ADD COLUMN default3 UInt64 ALIAS a + c; +SELECT * FROM optimize_lazy_materialization ORDER BY c LIMIT 3; +-- { echoOff } +DROP TABLE IF EXISTS optimize_lazy_materialization; + +-- queries with compact merge tree +CREATE TABLE optimize_lazy_materialization_with_compact_mt (a UInt64, b UInt64, c UInt64, d UInt64, n Nested(x String)) +ENGINE MergeTree() PARTITION BY b ORDER BY a +settings min_rows_for_wide_part = 10000; +INSERT INTO optimize_lazy_materialization_with_compact_mt SELECT number, number % 2, number, number % 3, ['a', 'b', 'c'] FROM numbers(0, 100); +INSERT INTO optimize_lazy_materialization_with_compact_mt SELECT number, number % 2, number, number % 3, ['a', 'b', 'c'] FROM numbers(100, 100); + +-- { echoOn } +SELECT * FROM optimize_lazy_materialization_with_compact_mt ORDER BY c LIMIT 3; +-- queries with _part_offset column in projection +SELECT a, b, c, d, _part_index, _part_offset FROM optimize_lazy_materialization_with_compact_mt ORDER BY c LIMIT 3; +SELECT _part_index, _part_offset FROM optimize_lazy_materialization_with_compact_mt ORDER BY c LIMIT 3; +-- queries with filter +SELECT * FROM optimize_lazy_materialization_with_compact_mt WHERE d > 1 ORDER BY c LIMIT 3; +SELECT * FROM optimize_lazy_materialization_with_compact_mt PREWHERE d > 1 ORDER BY c LIMIT 3; +-- queries with default value +ALTER TABLE optimize_lazy_materialization_with_compact_mt ADD COLUMN default1 UInt64; +SELECT * FROM optimize_lazy_materialization_with_compact_mt ORDER BY c LIMIT 3; +ALTER TABLE optimize_lazy_materialization_with_compact_mt ADD COLUMN default2 UInt64 ALIAS 2; +SELECT * FROM optimize_lazy_materialization_with_compact_mt ORDER BY c LIMIT 3; +ALTER TABLE optimize_lazy_materialization_with_compact_mt ADD COLUMN default3 UInt64 ALIAS a+c; +SELECT * FROM optimize_lazy_materialization_with_compact_mt ORDER BY c LIMIT 3; +-- { echoOff } +DROP TABLE IF EXISTS optimize_lazy_materialization_with_compact_mt; + +-- queries with int data type +DROP TABLE IF EXISTS optimize_lazy_materialization_with_int_data_type; +CREATE TABLE optimize_lazy_materialization_with_int_data_type +( + a UInt64, + b UInt64, + c UInt256 +) +ENGINE MergeTree() ORDER BY a; +INSERT INTO optimize_lazy_materialization_with_int_data_type +SELECT + number, + number % 2 ? 2000 : number, + number + 3 +FROM numbers(0, 1000); +-- { echoOn } +SELECT * FROM optimize_lazy_materialization_with_int_data_type ORDER BY b LIMIT 10; +-- { echoOff } + +-- queries with float data type +DROP TABLE IF EXISTS optimize_lazy_materialization_with_float_data_type; +CREATE TABLE optimize_lazy_materialization_with_float_data_type +( + a UInt64, + b UInt64, + c Float64 +) +ENGINE MergeTree() ORDER BY a; +INSERT INTO optimize_lazy_materialization_with_float_data_type +SELECT + number, + number % 2 ? 2000 : number, + number + 3.1 FROM numbers(0, 1000); +-- { echoOn } +SELECT * FROM optimize_lazy_materialization_with_float_data_type ORDER BY b LIMIT 10; +-- { echoOff } + +-- queries with decimal data type +DROP TABLE IF EXISTS optimize_lazy_materialization_with_decimal_data_type; +CREATE TABLE optimize_lazy_materialization_with_decimal_data_type +( + a UInt64, + b UInt64, + c Decimal256(1) +) +ENGINE MergeTree() ORDER BY a; +INSERT INTO optimize_lazy_materialization_with_decimal_data_type +SELECT + number, + number % 2 ? 2000 : number, + number + 4.12 +FROM numbers(0, 1000); +-- { echoOn } +SELECT * FROM optimize_lazy_materialization_with_decimal_data_type ORDER BY b LIMIT 10; +-- { echoOff } + +-- queries with string data type +DROP TABLE IF EXISTS optimize_lazy_materialization_with_string_data_type; +CREATE TABLE optimize_lazy_materialization_with_string_data_type +( + a UInt64, + b UInt64, + c String +) +ENGINE MergeTree() ORDER BY a; +INSERT INTO optimize_lazy_materialization_with_string_data_type +SELECT + number, + number % 2 ? 2000 : number, + repeat('a', number) +FROM numbers(0, 1000); +-- { echoOn } +SELECT * FROM optimize_lazy_materialization_with_string_data_type ORDER BY b LIMIT 10; +-- { echoOff } + +-- queries with fixed string data type +DROP TABLE IF EXISTS optimize_lazy_materialization_with_fixed_string_data_type; +CREATE TABLE optimize_lazy_materialization_with_fixed_string_data_type +( + a UInt64, + b UInt64, + c FixedString(10) +) +ENGINE MergeTree() ORDER BY a; +INSERT INTO optimize_lazy_materialization_with_fixed_string_data_type +SELECT + number, + number % 2 ? 2000 : number, + repeat('a', number % 10) +FROM numbers(0, 1000); +-- { echoOn } +SELECT * FROM optimize_lazy_materialization_with_fixed_string_data_type ORDER BY b LIMIT 10; +-- { echoOff } + +-- queries with fixed date data type +DROP TABLE IF EXISTS optimize_lazy_materialization_with_date_data_type; +CREATE TABLE optimize_lazy_materialization_with_date_data_type +( + a UInt64, + b UInt64, + c Date +) +ENGINE MergeTree() ORDER BY a; +INSERT INTO optimize_lazy_materialization_with_date_data_type +SELECT + number, + number % 2 ? 2000 : number, + number +FROM numbers(0, 1000); +-- { echoOn } +SELECT * FROM optimize_lazy_materialization_with_date_data_type ORDER BY b LIMIT 10; +-- { echoOff } + +-- queries with fixed date32 data type +DROP TABLE IF EXISTS optimize_lazy_materialization_with_date32_data_type; +CREATE TABLE optimize_lazy_materialization_with_date32_data_type +( + a UInt64, + b UInt64, + c Date32 +) +ENGINE MergeTree() ORDER BY a; +INSERT INTO optimize_lazy_materialization_with_date32_data_type +SELECT + number, + number % 2 ? 2000 : number, + number +FROM numbers(0, 1000); +-- { echoOn } +SELECT * FROM optimize_lazy_materialization_with_date32_data_type ORDER BY b LIMIT 10; +-- { echoOff } + +-- queries with fixed datetime data type +DROP TABLE IF EXISTS optimize_lazy_materialization_with_datetime_data_type; +CREATE TABLE optimize_lazy_materialization_with_datetime_data_type +( + a UInt64, + b UInt64, + c DateTime +) +ENGINE MergeTree() ORDER BY a; +INSERT INTO optimize_lazy_materialization_with_datetime_data_type +SELECT + number, + number % 2 ? 2000 : number, + number +FROM numbers(0, 1000); +-- { echoOn } +SELECT a, b, toUInt64(c) FROM optimize_lazy_materialization_with_datetime_data_type ORDER BY b LIMIT 10; +-- { echoOff } + +-- queries with fixed datetime64 data type +DROP TABLE IF EXISTS optimize_lazy_materialization_with_datetime64_data_type; +CREATE TABLE optimize_lazy_materialization_with_datetime64_data_type +( + a UInt64, + b UInt64, + c DateTime64 +) +ENGINE MergeTree() ORDER BY a; +INSERT INTO optimize_lazy_materialization_with_datetime64_data_type +SELECT + number, + number % 2 ? 2000 : number, + number +FROM numbers(0, 1000); +-- { echoOn } +SELECT a, b, toUInt64(c) FROM optimize_lazy_materialization_with_datetime64_data_type ORDER BY b LIMIT 10; +-- { echoOff } + +-- queries with fixed enum data type +DROP TABLE IF EXISTS optimize_lazy_materialization_with_enum_data_type; +CREATE TABLE optimize_lazy_materialization_with_enum_data_type +( + a UInt64, + b UInt64, + c Enum('hello', 'world') +) +ENGINE MergeTree() ORDER BY a; +INSERT INTO optimize_lazy_materialization_with_enum_data_type +SELECT + number, + number % 2 ? 2000 : number, + number % 2 ? 'world' : 'hello' +FROM numbers(0, 1000); +-- { echoOn } +SELECT * FROM optimize_lazy_materialization_with_enum_data_type ORDER BY b LIMIT 10; +-- { echoOff } + +-- queries with bool data type +DROP TABLE IF EXISTS optimize_lazy_materialization_with_bool_data_type; +CREATE TABLE optimize_lazy_materialization_with_bool_data_type +( + a UInt64, + b UInt64, + c Bool +) +ENGINE MergeTree() ORDER BY a; +INSERT INTO optimize_lazy_materialization_with_bool_data_type +SELECT + number, + number % 2 ? 2000 : number, + number % 2 +FROM numbers(0, 1000); +-- { echoOn } +SELECT * FROM optimize_lazy_materialization_with_bool_data_type ORDER BY b LIMIT 10; +-- { echoOff } + +-- queries with uuid data type +DROP TABLE IF EXISTS optimize_lazy_materialization_with_uuid_data_type; +CREATE TABLE optimize_lazy_materialization_with_uuid_data_type +( + a UInt64, + b UInt64, + c UUID) +ENGINE MergeTree() ORDER BY a; +INSERT INTO optimize_lazy_materialization_with_uuid_data_type +SELECT + number, + number % 2 ? 2000 : number, + generateUUIDv4() +FROM numbers(0, 1000); +-- { echoOn } +SELECT a, b, length(toString(c)) FROM optimize_lazy_materialization_with_uuid_data_type ORDER BY b LIMIT 10; +-- { echoOff } + +-- queries with ipv4 data type +DROP TABLE IF EXISTS optimize_lazy_materialization_with_ipv4_data_type; +CREATE TABLE optimize_lazy_materialization_with_ipv4_data_type +( + a UInt64, + b UInt64, + c IPv4 +) +ENGINE MergeTree() ORDER BY a; +INSERT INTO optimize_lazy_materialization_with_ipv4_data_type +SELECT + number, + number % 2 ? 2000 : number, + concat('1.2.3.', toString(number % 256)) +FROM numbers(0, 1000); +-- { echoOn } +SELECT * FROM optimize_lazy_materialization_with_ipv4_data_type ORDER BY b LIMIT 10; +-- { echoOff } + +-- queries with ipv6 data type +DROP TABLE IF EXISTS optimize_lazy_materialization_with_ipv6_data_type; +CREATE TABLE optimize_lazy_materialization_with_ipv6_data_type +( + a UInt64, + b UInt64, + c IPv6 +) +ENGINE MergeTree() ORDER BY a; +INSERT INTO optimize_lazy_materialization_with_ipv6_data_type +SELECT + number, + number % 2 ? 2000 : number, + concat('1:2:3:4:5:6:7:', toString(number % 256)) +FROM numbers(0, 1000); +-- { echoOn } +SELECT * FROM optimize_lazy_materialization_with_ipv6_data_type ORDER BY b LIMIT 10; +-- { echoOff } + +-- queries with array data type +DROP TABLE IF EXISTS optimize_lazy_materialization_with_array_data_type; +CREATE TABLE optimize_lazy_materialization_with_array_data_type +( + a UInt64, + b UInt64, + c Array(Tuple(field1 UInt64, field2 String)) +) +ENGINE MergeTree() ORDER BY a; +INSERT INTO optimize_lazy_materialization_with_array_data_type +SELECT + number, + number % 2 ? 2000 : number, + [(number, toString(number + 2)), (number + 1, toString(number + 4))] +FROM numbers(0, 1000); +-- { echoOn } +SELECT a, b, c, c.size0 FROM optimize_lazy_materialization_with_array_data_type ORDER BY b LIMIT 10; +SELECT a, b, c.field2 FROM optimize_lazy_materialization_with_array_data_type ORDER BY b LIMIT 10; +-- { echoOff } + +-- queries with tuple data type +DROP TABLE IF EXISTS optimize_lazy_materialization_with_tuple_data_type; +CREATE TABLE optimize_lazy_materialization_with_tuple_data_type +( + a UInt64, + b UInt64, + c Tuple(UInt64, String) +) +ENGINE MergeTree() ORDER BY a; +INSERT INTO optimize_lazy_materialization_with_tuple_data_type +SELECT + number, + number % 2 ? 2000 : number, + (number, toString(number * 2)) +FROM numbers(0, 1000); +-- { echoOn } +SELECT * FROM optimize_lazy_materialization_with_tuple_data_type ORDER BY b LIMIT 10; +-- { echoOff } + +-- queries with map data type +DROP TABLE IF EXISTS optimize_lazy_materialization_with_map_data_type; +CREATE TABLE optimize_lazy_materialization_with_map_data_type +( + a UInt64, + b UInt64, + c Map(String, UInt64) +) +ENGINE MergeTree() ORDER BY a; +INSERT INTO optimize_lazy_materialization_with_map_data_type +SELECT + number, + number % 2 ? 2000 : number, + map('key1', number + 1, 'key2', number + 2) +FROM numbers(0, 1000); +-- { echoOn } +SELECT * FROM optimize_lazy_materialization_with_map_data_type ORDER BY b LIMIT 10; +SELECT a, b, c['key1'] FROM optimize_lazy_materialization_with_map_data_type ORDER BY b LIMIT 10; +-- { echoOff } + +-- queries with variant data type +DROP TABLE IF EXISTS optimize_lazy_materialization_with_variant_data_type; +CREATE TABLE optimize_lazy_materialization_with_variant_data_type +( + a UInt64, + b UInt64, + c Variant(UInt64, String, Array(UInt64)) +) +ENGINE MergeTree() ORDER BY a; +INSERT INTO optimize_lazy_materialization_with_variant_data_type +SELECT + number, + number % 2 ? 2000 : number, + multiIf(number % 5 = 0, 666::Variant(UInt64, String, Array(UInt64)), number % 5 = 1, number::Variant(UInt64, String, Array(UInt64)), number % 5 = 2, [4, 4, 4]::Variant(UInt64, String, Array(UInt64)), NULL) +FROM numbers(0, 1000); +-- { echoOn } +SELECT * FROM optimize_lazy_materialization_with_variant_data_type ORDER BY b LIMIT 10; +-- queries with subcolumn of variant data type +SELECT a, b, c.UInt64 FROM optimize_lazy_materialization_with_variant_data_type ORDER BY b LIMIT 10; +SELECT a, b, c.UInt64.null FROM optimize_lazy_materialization_with_variant_data_type ORDER BY b LIMIT 10; +-- { echoOff } + +-- queries with low_cardinality data type +DROP TABLE IF EXISTS optimize_lazy_materialization_with_low_cardinality_data_type; +CREATE TABLE optimize_lazy_materialization_with_low_cardinality_data_type +( + a UInt64, + b UInt64, + c LowCardinality(String) +) +ENGINE MergeTree() ORDER BY a; +INSERT INTO optimize_lazy_materialization_with_low_cardinality_data_type +SELECT + number, + number % 2 ? 2000 : number, + multiIf(number % 3 = 0, 'aa', number % 3 = 1, 'bbb', 'cccc') +FROM numbers(0, 1000); +-- { echoOn } +SELECT * FROM optimize_lazy_materialization_with_low_cardinality_data_type ORDER BY b LIMIT 10; +-- { echoOff } + +-- queries with nullable data type +DROP TABLE IF EXISTS optimize_lazy_materialization_with_nullable_data_type; +CREATE TABLE optimize_lazy_materialization_with_nullable_data_type +( + a UInt64, + b UInt64, + c Nullable(String) +) +ENGINE MergeTree() ORDER BY a; +INSERT INTO optimize_lazy_materialization_with_nullable_data_type +SELECT + number, + number % 2 ? 2000 : number, + multiIf(number % 3 = 0, 'aa', NULL) +FROM numbers(0, 1000); +-- { echoOn } +SELECT * FROM optimize_lazy_materialization_with_nullable_data_type ORDER BY b LIMIT 10; +-- { echoOff } + +-- queries with nested data type +DROP TABLE IF EXISTS optimize_lazy_materialization_with_nested_data_type; +CREATE TABLE optimize_lazy_materialization_with_nested_data_type +( + a UInt64, + b UInt64, + c Nested + ( + id UInt32, + order String + ) +) +ENGINE MergeTree() ORDER BY a; +INSERT INTO optimize_lazy_materialization_with_nested_data_type +SELECT + number, + number % 2 ? 2000 : number, + multiIf(number % 3 = 0, [1, 2, 3], [4, 5]), + multiIf(number % 3 = 0, ['1', '2', '3'], ['4', '5']) +FROM numbers(0, 1000); +-- { echoOn } +SELECT a, b, c.id, c.order FROM optimize_lazy_materialization_with_nested_data_type ORDER BY b LIMIT 10; +-- { echoOff } + +-- queries with dynamic data type +DROP TABLE IF EXISTS optimize_lazy_materialization_with_dynamic_data_type; +CREATE TABLE optimize_lazy_materialization_with_dynamic_data_type +( + a UInt64, + b UInt64, + c Dynamic +) +ENGINE MergeTree() ORDER BY a; +INSERT INTO optimize_lazy_materialization_with_dynamic_data_type +SELECT + number, + number % 2 ? 2000 : number, + multiIf(number % 5 = 0, 1, number % 5 = 1, [2, 3], number % 5 = 2, '555', NULL), +FROM numbers(0, 1000); +-- { echoOn } +SELECT a, b, c FROM optimize_lazy_materialization_with_dynamic_data_type ORDER BY b LIMIT 10; +-- queries with subcolumn of dynamic data type +SELECT a, b, c.IPv4, c.String FROM optimize_lazy_materialization_with_dynamic_data_type ORDER BY b LIMIT 10; +-- { echoOff } + +-- queries with sparse data type +DROP TABLE IF EXISTS optimize_lazy_materialization_with_sparse_data_type; +CREATE TABLE optimize_lazy_materialization_with_sparse_data_type +( + a UInt64, + b UInt64, + c String +) +ENGINE MergeTree() ORDER BY a +SETTINGS ratio_of_defaults_for_sparse_serialization = 0.01;; +INSERT INTO optimize_lazy_materialization_with_sparse_data_type +SELECT + number, + number % 2 ? 2000 : number, + multiIf(number % 3 = 0, '', number % 3 = 1, 'aa', 'bb'), +FROM numbers(0, 1000); +-- { echoOn } +SELECT * FROM optimize_lazy_materialization_with_sparse_data_type ORDER BY b LIMIT 10; +-- { echoOff } \ No newline at end of file diff --git a/parser/testdata/02813_seriesDecomposeSTL/ast.json b/parser/testdata/02813_seriesDecomposeSTL/ast.json new file mode 100644 index 000000000..a35eba7fc --- /dev/null +++ b/parser/testdata/02813_seriesDecomposeSTL/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tb2 (children 1)" + }, + { + "explain": " Identifier tb2" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001123189, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/02813_seriesDecomposeSTL/metadata.json b/parser/testdata/02813_seriesDecomposeSTL/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02813_seriesDecomposeSTL/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02813_seriesDecomposeSTL/query.sql b/parser/testdata/02813_seriesDecomposeSTL/query.sql new file mode 100644 index 000000000..496267f24 --- /dev/null +++ b/parser/testdata/02813_seriesDecomposeSTL/query.sql @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS tb2; + +CREATE TABLE tb2 (`period` UInt32, `ts` Array(Float64)) ENGINE = Memory; +INSERT INTO tb2 VALUES (3,[10.1, 20.45, 40.34, 10.1, 20.45, 40.34, 10.1, 20.45, 40.34, 10.1, 20.45, 40.34, 10.1, 20.45, 40.34, 10.1, 20.45, 40.34, 10.1, 20.45, 40.34, 10.1, 20.45, 40.34]); +INSERT INTO tb2 VALUES (14, [139, 87, 110, 68, 54, 50, 51, 53, 133, 86, 141, 97, 156, 94, 149, 95, 140, 77, 61, 50, 54, 47, 133, 72, 152, 94, 148, 105, 162, 101, 160, 87, 63, 53, 55, 54, 151, 103, 189, 108, 183, 113, 175, 113, 178, 90, 71, 62, 62, 65, 165, 109, 181, 115, 182, 121, 178, 114, 170]); + +SELECT seriesDecomposeSTL([10.1, 20.45, 40.34, 10.1, 20.45, 40.34, 10.1, 20.45, 40.34, 10.1, 20.45, 40.34, 10.1, 20.45, 40.34, 10.1, 20.45, 40.34, 10.1, 20.45, 40.34, 10.1, 20.45, 40.34], 3); +SELECT seriesDecomposeSTL([2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2], 0); +SELECT seriesDecomposeSTL(ts, period) FROM tb2 ORDER BY period; +DROP TABLE IF EXISTS tb2; +SELECT seriesDecomposeSTL([2,2,2,2,2,2,2,2,2,2,2,2,2,2], -5); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT seriesDecomposeSTL([2,2,2,2,2,2,2,2,2,2,2,2,2,2], -5.2); --{ serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT seriesDecomposeSTL(); --{ serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +SELECT seriesDecomposeSTL([]); --{ serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +SELECT seriesDecomposeSTL([1,2,3], 2); --{ serverError BAD_ARGUMENTS} +SELECT seriesDecomposeSTL([2,2,2,3,3,3]); --{ serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +SELECT seriesDecomposeSTL([2,2,2,3,3,3], 9272653446478); --{ serverError BAD_ARGUMENTS} +SELECT seriesDecomposeSTL([2,2,2,3,3,3], 7); --{ serverError BAD_ARGUMENTS} diff --git a/parser/testdata/02813_seriesOutliersDetectTukey/ast.json b/parser/testdata/02813_seriesOutliersDetectTukey/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02813_seriesOutliersDetectTukey/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02813_seriesOutliersDetectTukey/metadata.json b/parser/testdata/02813_seriesOutliersDetectTukey/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02813_seriesOutliersDetectTukey/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02813_seriesOutliersDetectTukey/query.sql b/parser/testdata/02813_seriesOutliersDetectTukey/query.sql new file mode 100644 index 000000000..0030929e6 --- /dev/null +++ b/parser/testdata/02813_seriesOutliersDetectTukey/query.sql @@ -0,0 +1,32 @@ +-- Tags: no-cpu-aarch64 +-- Tag no-cpu-aarch64: values generated are slighly different on aarch64 + +DROP TABLE IF EXISTS tb1; + +CREATE TABLE tb1 (n UInt32, a Array(Float64)) engine=Memory; +INSERT INTO tb1 VALUES (1, [-3, 2.40, 15, 3.90, 5, 6, 4.50, 5.20, 3, 4, 5, 16, 7, 5, 5, 4]), (2, [-3, 2.40, 15, 3.90, 5, 6, 4.50, 5.20, 12, 45, 12, 3.40, 3, 4, 5, 6]); + +-- non-const inputs +SELECT seriesOutliersDetectTukey(a) FROM tb1 ORDER BY n; +SELECT seriesOutliersDetectTukey(a,.10,.90,1.5) FROM tb1 ORDER BY n; +DROP TABLE IF EXISTS tb1; + +-- const inputs +SELECT seriesOutliersDetectTukey([-3, 2, 15, 3, 5, 6, 4.50, 5, 12, 45, 12, 3.40, 3, 4, 5, 6]); +SELECT seriesOutliersDetectTukey([-3, 2.40, 15, 3.90, 5, 6, 4.50, 5.20, 12, 60, 12, 3.40, 3, 4, 5, 6, 3.40, 2.7]); + +-- const inputs with optional arguments +SELECT seriesOutliersDetectTukey([-3, 2, 15, 3, 5, 6, 4.50, 5, 12, 45, 12, 3.40, 3, 4, 5, 6], .25, .75, 1.5); +SELECT seriesOutliersDetectTukey([-3, 2, 15, 3, 5, 6, 4.50, 5, 12, 45, 12, 3.40, 3, 4, 5, 6], .10, .90, 1.5); +SELECT seriesOutliersDetectTukey([-3, 2, 15, 3, 5, 6, 4.50, 5, 12, 45, 12, 3.40, 3, 4, 5, 6], .02, .98, 1.5); +SELECT seriesOutliersDetectTukey([-3, 2, 15, 3], 0.02, 0.98, 1.5); +SELECT seriesOutliersDetectTukey(arrayMap(x -> sin(x / 10), range(30))); +SELECT seriesOutliersDetectTukey([-3, 2, 15, 3, 5, 6, 4, 5, 12, 45, 12, 3, 3, 4, 5, 6], .25, .75, 3); + +-- negative tests +SELECT seriesOutliersDetectTukey([-3, 2, 15, 3, 5, 6, 4, 5, 12, 45, 12, 3, 3, 4, 5, 6], .25, .75, -1); -- { serverError BAD_ARGUMENTS} +SELECT seriesOutliersDetectTukey([-3, 2, 15, 3], .33, .53); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +SELECT seriesOutliersDetectTukey([-3, 2, 15, 3], .33); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +SELECT seriesOutliersDetectTukey([-3, 2.4, 15, NULL]); -- { serverError ILLEGAL_COLUMN} +SELECT seriesOutliersDetectTukey([]); -- { serverError ILLEGAL_COLUMN} +SELECT seriesOutliersDetectTukey([-3, 2.4, 15]); -- { serverError BAD_ARGUMENTS} diff --git a/parser/testdata/02813_series_period_detect/ast.json b/parser/testdata/02813_series_period_detect/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02813_series_period_detect/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02813_series_period_detect/metadata.json b/parser/testdata/02813_series_period_detect/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02813_series_period_detect/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02813_series_period_detect/query.sql b/parser/testdata/02813_series_period_detect/query.sql new file mode 100644 index 000000000..ef3479d32 --- /dev/null +++ b/parser/testdata/02813_series_period_detect/query.sql @@ -0,0 +1,22 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS tb1; + +CREATE TABLE tb1 (n UInt32, a Array(Int32)) engine=Memory; +INSERT INTO tb1 VALUES (1, [10, 20, 30, 10, 20, 30, 10, 20, 30, 10, 20, 30, 10, 20, 30, 10, 20, 30, 10, 20, 30]), (2, [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]), (3, [6, 3, 4]); + +SELECT seriesPeriodDetectFFT([139, 87, 110, 68, 54, 50, 51, 53, 133, 86, 141, 97, 156, 94, 149, 95, 140, 77, 61, 50, 54, 47, 133, 72, 152, 94, 148, 105, 162, 101, 160, 87, 63, 53, 55, 54, 151, 103, 189, 108, 183, 113, 175, 113, 178, 90, 71, 62, 62, 65, 165, 109, 181, 115, 182, 121, 178, 114, 170]); +SELECT seriesPeriodDetectFFT([10, 20, 30, 10, 20, 30, 10, 20, 30, 10, 20, 30, 10, 20, 30, 10, 20, 30, 10, 20, 30]); +SELECT seriesPeriodDetectFFT([10.1, 20.45, 40.34, 10.1, 20.45, 40.34, 10.1, 20.45, 40.34, 10.1, 20.45, 40.34, 10.1, 20.45, 40.34, 10.1, 20.45, 40.34, 10.1, 20.45, 40.34, 10.1, 20.45, 40.34]); +SELECT seriesPeriodDetectFFT([10.1, 10, 400, 10.1, 10, 400, 10.1, 10, 400, 10.1, 10, 400, 10.1, 10, 400, 10.1, 10, 400, 10.1, 10, 400, 10.1, 10, 400]); +SELECT seriesPeriodDetectFFT([2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]); +SELECT seriesPeriodDetectFFT(arrayMap(x -> sin(x / 10), range(1000))); +SELECT seriesPeriodDetectFFT(arrayMap(x -> abs((x % 6) - 3), range(1000))); +SELECT seriesPeriodDetectFFT(arrayMap(x -> if((x % 6) < 3, 3, 0), range(1000))); +SELECT seriesPeriodDetectFFT([1,2,3]); +SELECT seriesPeriodDetectFFT(a) FROM tb1; +DROP TABLE IF EXISTS tb1; +SELECT seriesPeriodDetectFFT(); --{ serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +SELECT seriesPeriodDetectFFT([]); -- { serverError ILLEGAL_COLUMN} +SELECT seriesPeriodDetectFFT([NULL, NULL, NULL]); -- { serverError ILLEGAL_COLUMN} +SELECT seriesPeriodDetectFFT([10, 20, 30, 10, 202, 30, NULL]); -- { serverError ILLEGAL_COLUMN } \ No newline at end of file diff --git a/parser/testdata/02813_starting_in_text_log/ast.json b/parser/testdata/02813_starting_in_text_log/ast.json new file mode 100644 index 000000000..70e3bf8bb --- /dev/null +++ b/parser/testdata/02813_starting_in_text_log/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SYSTEM query" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001372994, + "rows_read": 1, + "bytes_read": 20 + } +} diff --git a/parser/testdata/02813_starting_in_text_log/metadata.json b/parser/testdata/02813_starting_in_text_log/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02813_starting_in_text_log/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02813_starting_in_text_log/query.sql b/parser/testdata/02813_starting_in_text_log/query.sql new file mode 100644 index 000000000..27dd4fddd --- /dev/null +++ b/parser/testdata/02813_starting_in_text_log/query.sql @@ -0,0 +1,3 @@ +SYSTEM FLUSH LOGS text_log; +SET max_rows_to_read = 0; -- system.text_log can be really big +SELECT count() > 0 FROM system.text_log WHERE event_date >= yesterday() AND message LIKE '%Starting ClickHouse%'; diff --git a/parser/testdata/02813_system_licenses_base/ast.json b/parser/testdata/02813_system_licenses_base/ast.json new file mode 100644 index 000000000..092fae917 --- /dev/null +++ b/parser/testdata/02813_system_licenses_base/ast.json @@ -0,0 +1,97 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk (children 1)" + }, + { + "explain": " ColumnsTransformerList (children 1)" + }, + { + "explain": " ColumnsReplaceTransformer (children 1)" + }, + { + "explain": " ColumnsReplaceTransformer::Replacement (children 1)" + }, + { + "explain": " Function substring (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Identifier license_text" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function position (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier license_text" + }, + { + "explain": " Literal '\\n'" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.licenses" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier library_name" + }, + { + "explain": " Literal 'poco'" + }, + { + "explain": " Identifier Vertical" + } + ], + + "rows": 25, + + "statistics": + { + "elapsed": 0.001416964, + "rows_read": 25, + "bytes_read": 1032 + } +} diff --git a/parser/testdata/02813_system_licenses_base/metadata.json b/parser/testdata/02813_system_licenses_base/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02813_system_licenses_base/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02813_system_licenses_base/query.sql b/parser/testdata/02813_system_licenses_base/query.sql new file mode 100644 index 000000000..e4b2ca3d3 --- /dev/null +++ b/parser/testdata/02813_system_licenses_base/query.sql @@ -0,0 +1 @@ +SELECT * REPLACE substring(license_text, 1, position(license_text, '\n')) AS license_text FROM system.licenses WHERE library_name = 'poco' FORMAT Vertical; diff --git a/parser/testdata/02814_ReplacingMergeTree_fix_select_final_on_single_partition/ast.json b/parser/testdata/02814_ReplacingMergeTree_fix_select_final_on_single_partition/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02814_ReplacingMergeTree_fix_select_final_on_single_partition/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02814_ReplacingMergeTree_fix_select_final_on_single_partition/metadata.json b/parser/testdata/02814_ReplacingMergeTree_fix_select_final_on_single_partition/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02814_ReplacingMergeTree_fix_select_final_on_single_partition/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02814_ReplacingMergeTree_fix_select_final_on_single_partition/query.sql b/parser/testdata/02814_ReplacingMergeTree_fix_select_final_on_single_partition/query.sql new file mode 100644 index 000000000..a89a1ff59 --- /dev/null +++ b/parser/testdata/02814_ReplacingMergeTree_fix_select_final_on_single_partition/query.sql @@ -0,0 +1,32 @@ +--- Based on https://github.com/ClickHouse/ClickHouse/issues/49685 +--- Verify that ReplacingMergeTree properly handles _is_deleted: +--- SELECT FINAL should take `_is_deleted` into consideration when there is only one partition. +-- { echoOn } + +DROP TABLE IF EXISTS t; +CREATE TABLE t +( + `account_id` UInt64, + `_is_deleted` UInt8, + `_version` UInt64 +) +ENGINE = ReplacingMergeTree(_version, _is_deleted) +ORDER BY (account_id); + +INSERT INTO t SELECT number, 0, 1 FROM numbers(1e3); +-- Mark the first 100 rows as deleted. +INSERT INTO t SELECT number, 1, 1 FROM numbers(1e2); + +-- Put everything in one partition +OPTIMIZE TABLE t FINAL; + +SELECT count() FROM t; +SELECT count() FROM t FINAL; + +-- Both should produce the same number of rows. +-- Previously, `do_not_merge_across_partitions_select_final = 1` showed more rows, +-- as if no rows were deleted. +SELECT count() FROM t FINAL SETTINGS do_not_merge_across_partitions_select_final = 1; +SELECT count() FROM t FINAL SETTINGS do_not_merge_across_partitions_select_final = 0; + +DROP TABLE t; diff --git a/parser/testdata/02814_age_datediff/ast.json b/parser/testdata/02814_age_datediff/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02814_age_datediff/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02814_age_datediff/metadata.json b/parser/testdata/02814_age_datediff/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02814_age_datediff/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02814_age_datediff/query.sql b/parser/testdata/02814_age_datediff/query.sql new file mode 100644 index 000000000..64e329b2f --- /dev/null +++ b/parser/testdata/02814_age_datediff/query.sql @@ -0,0 +1,83 @@ +-- { echo } + +-- DateTime64 vs DateTime64 with fractional part +SELECT age('nanosecond', toDateTime64('2015-08-18 20:30:36.100200005', 9, 'UTC'), toDateTime64('2015-08-18 20:30:41.200400005', 9, 'UTC')); +SELECT age('nanosecond', toDateTime64('2015-08-18 20:30:36.100200005', 9, 'UTC'), toDateTime64('2015-08-18 20:30:41.200400004', 9, 'UTC')); + +SELECT age('microsecond', toDateTime64('2015-08-18 20:30:36.100200005', 9, 'UTC'), toDateTime64('2015-08-18 20:30:41.200400005', 9, 'UTC')); +SELECT age('microsecond', toDateTime64('2015-08-18 20:30:36.100200005', 9, 'UTC'), toDateTime64('2015-08-18 20:30:41.200400004', 9, 'UTC')); + +SELECT age('millisecond', toDateTime64('2015-08-18 20:30:36.450299', 6, 'UTC'), toDateTime64('2015-08-18 20:30:41.550299', 6, 'UTC')); +SELECT age('millisecond', toDateTime64('2015-08-18 20:30:36.450299', 6, 'UTC'), toDateTime64('2015-08-18 20:30:41.550298', 6, 'UTC')); + +SELECT age('second', toDateTime64('2023-03-01 19:18:36.999003', 6, 'UTC'), toDateTime64('2023-03-01 19:18:41.999002', 6, 'UTC')); +SELECT age('second', toDateTime64('2023-03-01 19:18:36.999', 3, 'UTC'), toDateTime64('2023-03-01 19:18:41.001', 3, 'UTC')); + +SELECT age('minute', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-01 20:35:36.300', 3, 'UTC')); +SELECT age('minute', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-01 20:35:36.100', 3, 'UTC')); +SELECT age('minute', toDateTime64('2015-01-01 20:30:36.200101', 6, 'UTC'), toDateTime64('2015-01-01 20:35:36.200100', 6, 'UTC')); + +SELECT age('hour', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-01 23:30:36.200', 3, 'UTC')); +SELECT age('hour', toDateTime64('2015-01-01 20:31:36.200', 3, 'UTC'), toDateTime64('2015-01-01 23:30:36.200', 3, 'UTC')); +SELECT age('hour', toDateTime64('2015-01-01 20:30:37.200', 3, 'UTC'), toDateTime64('2015-01-01 23:30:36.200', 3, 'UTC')); +SELECT age('hour', toDateTime64('2015-01-01 20:30:36.300', 3, 'UTC'), toDateTime64('2015-01-01 23:30:36.200', 3, 'UTC')); +SELECT age('hour', toDateTime64('2015-01-01 20:30:36.200101', 6, 'UTC'), toDateTime64('2015-01-01 23:30:36.200100', 6, 'UTC')); + +SELECT age('day', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-04 20:30:36.200', 3, 'UTC')); +SELECT age('day', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-04 19:30:36.200', 3, 'UTC')); +SELECT age('day', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-04 20:28:36.200', 3, 'UTC')); +SELECT age('day', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-04 20:30:35.200', 3, 'UTC')); +SELECT age('day', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-04 20:30:36.199', 3, 'UTC')); +SELECT age('day', toDateTime64('2015-01-01 20:30:36.200101', 6, 'UTC'), toDateTime64('2015-01-04 20:30:36.200100', 6, 'UTC')); + +SELECT age('week', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-15 20:30:36.200', 3, 'UTC')); +SELECT age('week', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-15 19:30:36.200', 3, 'UTC')); +SELECT age('week', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-15 20:29:36.200', 3, 'UTC')); +SELECT age('week', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-15 20:30:35.200', 3, 'UTC')); +SELECT age('week', toDateTime64('2015-01-01 20:30:36.200', 3, 'UTC'), toDateTime64('2015-01-15 20:30:36.100', 3, 'UTC')); +SELECT age('week', toDateTime64('2015-01-01 20:30:36.200101', 6, 'UTC'), toDateTime64('2015-01-15 20:30:36.200100', 6, 'UTC')); + +SELECT age('month', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-05-02 20:30:36.200', 3, 'UTC')); +SELECT age('month', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-05-01 20:30:36.200', 3, 'UTC')); +SELECT age('month', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-05-02 19:30:36.200', 3, 'UTC')); +SELECT age('month', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-05-02 20:29:36.200', 3, 'UTC')); +SELECT age('month', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-05-02 20:30:35.200', 3, 'UTC')); +SELECT age('month', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-05-02 20:30:36.100', 3, 'UTC')); +SELECT age('month', toDateTime64('2015-01-02 20:30:36.200101', 6, 'UTC'), toDateTime64('2016-05-02 20:30:36.200100', 6, 'UTC')); + +SELECT age('quarter', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-04-02 20:30:36.200', 3, 'UTC')); +SELECT age('quarter', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-04-01 20:30:36.200', 3, 'UTC')); +SELECT age('quarter', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-04-02 19:30:36.200', 3, 'UTC')); +SELECT age('quarter', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-04-02 20:29:36.200', 3, 'UTC')); +SELECT age('quarter', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-04-02 20:30:35.200', 3, 'UTC')); +SELECT age('quarter', toDateTime64('2015-01-02 20:30:36.200', 3, 'UTC'), toDateTime64('2016-04-02 20:30:36.100', 3, 'UTC')); +SELECT age('quarter', toDateTime64('2015-01-02 20:30:36.200101', 6, 'UTC'), toDateTime64('2016-04-02 20:30:36.200100', 6, 'UTC')); + +SELECT age('year', toDateTime64('2015-02-02 20:30:36.200', 3, 'UTC'), toDateTime64('2023-02-02 20:30:36.200', 3, 'UTC')); +SELECT age('year', toDateTime64('2015-02-02 20:30:36.200', 3, 'UTC'), toDateTime64('2023-01-02 20:30:36.200', 3, 'UTC')); +SELECT age('year', toDateTime64('2015-02-02 20:30:36.200', 3, 'UTC'), toDateTime64('2023-02-01 20:30:36.200', 3, 'UTC')); +SELECT age('year', toDateTime64('2015-02-02 20:30:36.200', 3, 'UTC'), toDateTime64('2023-02-02 19:30:36.200', 3, 'UTC')); +SELECT age('year', toDateTime64('2015-02-02 20:30:36.200', 3, 'UTC'), toDateTime64('2023-02-02 20:29:36.200', 3, 'UTC')); +SELECT age('year', toDateTime64('2015-02-02 20:30:36.200', 3, 'UTC'), toDateTime64('2023-02-02 20:30:35.200', 3, 'UTC')); +SELECT age('year', toDateTime64('2015-02-02 20:30:36.200', 3, 'UTC'), toDateTime64('2023-02-02 20:30:36.100', 3, 'UTC')); +SELECT age('year', toDateTime64('2015-02-02 20:30:36.200101', 6, 'UTC'), toDateTime64('2023-02-02 20:30:36.200100', 6, 'UTC')); + +-- DateTime64 vs DateTime64 with negative time +SELECT age('millisecond', toDateTime64('1969-12-31 23:59:58.001', 3, 'UTC'), toDateTime64('1970-01-01 00:00:00.350', 3, 'UTC')); +SELECT age('second', toDateTime64('1969-12-31 23:59:58.001', 3, 'UTC'), toDateTime64('1970-01-01 00:00:00.35', 3, 'UTC')); +SELECT age('second', toDateTime64('1969-12-31 23:59:50.001', 3, 'UTC'), toDateTime64('1969-12-31 23:59:55.002', 3, 'UTC')); +SELECT age('second', toDateTime64('1969-12-31 23:59:50.003', 3, 'UTC'), toDateTime64('1969-12-31 23:59:55.002', 3, 'UTC')); + +SELECT DATEDIFF(millisecond, '2021-01-01'::Date, '2021-01-02'::Date); +SELECT DATEDIFF(millisecond, '2021-01-01'::Date, '2021-01-03'::Date32); +SELECT DATEDIFF(millisecond, '2021-01-01'::Date, '2021-01-02 00:01:01'::DateTime); +SELECT DATEDIFF(millisecond, '2021-01-01'::Date, '2021-01-02 00:00:01.299'::DateTime64); +SELECT DATEDIFF(millisecond, '2021-01-01 23:59:59.299'::DateTime64, '2021-01-02'::Date); +SELECT DATEDIFF(millisecond, '2021-01-01 23:59:59.299999'::DateTime64(6), '2021-01-02'::Date); +SELECT DATEDIFF(millisecond, '2021-01-01 23:59:59.2'::DateTime64(1), '2021-01-02'::Date); +SELECT DATEDIFF(microsecond, '2021-01-01 23:59:59.899999'::DateTime64(6), '2021-01-02 00:01:00.100200300'::DateTime64(9)); + +SELECT DATEDIFF(microsecond, '1969-12-31 23:59:59.999950'::DateTime64(6, 'UTC'), '1970-01-01 00:00:00.000010'::DateTime64(6, 'UTC')); +SELECT DATEDIFF(second, '1969-12-31 23:59:59.123'::DateTime64(6, 'UTC'), '1970-01-01 00:00:09.123'::DateTime64(6, 'UTC')); + +SELECT toYYYYMMDDhhmmss(toDateTime64('1969-12-31 23:59:59.900', 3)); diff --git a/parser/testdata/02814_create_index_uniq_noop/ast.json b/parser/testdata/02814_create_index_uniq_noop/ast.json new file mode 100644 index 000000000..be042d63b --- /dev/null +++ b/parser/testdata/02814_create_index_uniq_noop/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001236984, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02814_create_index_uniq_noop/metadata.json b/parser/testdata/02814_create_index_uniq_noop/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02814_create_index_uniq_noop/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02814_create_index_uniq_noop/query.sql b/parser/testdata/02814_create_index_uniq_noop/query.sql new file mode 100644 index 000000000..127b3cbde --- /dev/null +++ b/parser/testdata/02814_create_index_uniq_noop/query.sql @@ -0,0 +1,3 @@ +SET allow_create_index_without_type=1; +SET create_index_ignore_unique=1; +CREATE UNIQUE INDEX idx_tab2_0 ON tab2 (col1); diff --git a/parser/testdata/02814_currentDatabase_for_table_functions/ast.json b/parser/testdata/02814_currentDatabase_for_table_functions/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02814_currentDatabase_for_table_functions/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02814_currentDatabase_for_table_functions/metadata.json b/parser/testdata/02814_currentDatabase_for_table_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02814_currentDatabase_for_table_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02814_currentDatabase_for_table_functions/query.sql b/parser/testdata/02814_currentDatabase_for_table_functions/query.sql new file mode 100644 index 000000000..8b1e3ba1e --- /dev/null +++ b/parser/testdata/02814_currentDatabase_for_table_functions/query.sql @@ -0,0 +1,25 @@ +-- Based on https://github.com/ClickHouse/ClickHouse/issues/52436 +-- Test that inserts performed via Buffer table engine land into destination table. +-- { echoOn } + +DROP TABLE IF EXISTS null_table; +DROP TABLE IF EXISTS null_table_buffer; +DROP TABLE IF EXISTS null_mv; +DROP VIEW IF EXISTS number_view; + +CREATE TABLE null_table (number UInt64) ENGINE = Null; +CREATE VIEW number_view as SELECT * FROM numbers(10) as tb; +CREATE MATERIALIZED VIEW null_mv Engine = Log AS SELECT * FROM null_table LEFT JOIN number_view as tb USING number; + +CREATE TABLE null_table_buffer (number UInt64) ENGINE = Buffer(currentDatabase(), null_table, 1, 1, 1, 100, 200, 10000, 20000); +INSERT INTO null_table_buffer VALUES (1); + +-- OPTIMIZE query should flush Buffer table, but still it is not guaranteed +-- (see the comment StorageBuffer::optimize) +-- But the combination of OPTIMIZE + sleep + OPTIMIZE should be enough. +OPTIMIZE TABLE null_table_buffer; +SELECT sleep(1) FORMAT Null; +OPTIMIZE TABLE null_table_buffer; + +-- Insert about should've landed into `null_mv` +SELECT count() FROM null_mv; diff --git a/parser/testdata/02814_order_by_tuple_window_function/ast.json b/parser/testdata/02814_order_by_tuple_window_function/ast.json new file mode 100644 index 000000000..e1835f17d --- /dev/null +++ b/parser/testdata/02814_order_by_tuple_window_function/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function count (children 2)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " WindowDefinition" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001236588, + "rows_read": 12, + "bytes_read": 439 + } +} diff --git a/parser/testdata/02814_order_by_tuple_window_function/metadata.json b/parser/testdata/02814_order_by_tuple_window_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02814_order_by_tuple_window_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02814_order_by_tuple_window_function/query.sql b/parser/testdata/02814_order_by_tuple_window_function/query.sql new file mode 100644 index 000000000..8ba54fc11 --- /dev/null +++ b/parser/testdata/02814_order_by_tuple_window_function/query.sql @@ -0,0 +1 @@ +SELECT 1 ORDER BY tuple(count() OVER ()); diff --git a/parser/testdata/02815_alias_to_length/ast.json b/parser/testdata/02815_alias_to_length/ast.json new file mode 100644 index 000000000..8dd8c9333 --- /dev/null +++ b/parser/testdata/02815_alias_to_length/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function OCTET_LENGTH (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '1234'" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001383505, + "rows_read": 7, + "bytes_read": 263 + } +} diff --git a/parser/testdata/02815_alias_to_length/metadata.json b/parser/testdata/02815_alias_to_length/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02815_alias_to_length/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02815_alias_to_length/query.sql b/parser/testdata/02815_alias_to_length/query.sql new file mode 100644 index 000000000..780ac7dac --- /dev/null +++ b/parser/testdata/02815_alias_to_length/query.sql @@ -0,0 +1,6 @@ +SELECT OCTET_LENGTH('1234'); +SELECT OcTet_lenGtH('1234'); +SELECT OCTET_LENGTH('你好,世界'); + +-- This is a implementation-specific behavior of getting the length of an array. +SELECT OCTET_LENGTH([1,2,3,4]); diff --git a/parser/testdata/02815_analyzer_aggregate_functions_of_group_by_keys/ast.json b/parser/testdata/02815_analyzer_aggregate_functions_of_group_by_keys/ast.json new file mode 100644 index 000000000..6b4f4908a --- /dev/null +++ b/parser/testdata/02815_analyzer_aggregate_functions_of_group_by_keys/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001015414, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02815_analyzer_aggregate_functions_of_group_by_keys/metadata.json b/parser/testdata/02815_analyzer_aggregate_functions_of_group_by_keys/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02815_analyzer_aggregate_functions_of_group_by_keys/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02815_analyzer_aggregate_functions_of_group_by_keys/query.sql b/parser/testdata/02815_analyzer_aggregate_functions_of_group_by_keys/query.sql new file mode 100644 index 000000000..dfb885f5a --- /dev/null +++ b/parser/testdata/02815_analyzer_aggregate_functions_of_group_by_keys/query.sql @@ -0,0 +1,39 @@ +set enable_analyzer = 1; +set optimize_move_functions_out_of_any = 0; + +SELECT 'set optimize_aggregators_of_group_by_keys = 1'; +set optimize_aggregators_of_group_by_keys = 1; + +SELECT min(number % 2) AS a, max(number % 3) AS b FROM numbers(10000000) GROUP BY number % 2, number % 3 ORDER BY a, b; +SELECT any(number % 2) AS a, anyLast(number % 3) AS b FROM numbers(10000000) GROUP BY number % 2, number % 3 ORDER BY a, b; +SELECT max((number % 5) * (number % 7)) AS a FROM numbers(10000000) GROUP BY number % 7, number % 5 ORDER BY a; +SELECT foo FROM (SELECT anyLast(number) AS foo FROM numbers(1) GROUP BY number); +SELECT anyLast(number) FROM numbers(1) GROUP BY number; + +EXPLAIN QUERY TREE SELECT min(number % 2) AS a, max(number % 3) AS b FROM numbers(10000000) GROUP BY number % 2, number % 3 ORDER BY a, b; +EXPLAIN QUERY TREE SELECT any(number % 2) AS a, anyLast(number % 3) AS b FROM numbers(10000000) GROUP BY number % 2, number % 3 ORDER BY a, b; +EXPLAIN QUERY TREE SELECT max((number % 5) * (number % 7)) AS a FROM numbers(10000000) GROUP BY number % 7, number % 5 ORDER BY a; +EXPLAIN QUERY TREE SELECT foo FROM (SELECT anyLast(number) AS foo FROM numbers(1) GROUP BY number); + +EXPLAIN QUERY TREE +SELECT min(number) OVER (PARTITION BY number % 2) +FROM numbers(3) +GROUP BY number; + +SELECT 'set optimize_aggregators_of_group_by_keys = 0'; +set optimize_aggregators_of_group_by_keys = 0; + +SELECT min(number % 2) AS a, max(number % 3) AS b FROM numbers(10000000) GROUP BY number % 2, number % 3 ORDER BY a, b; +SELECT any(number % 2) AS a, anyLast(number % 3) AS b FROM numbers(10000000) GROUP BY number % 2, number % 3 ORDER BY a, b; +SELECT max((number % 5) * (number % 7)) AS a FROM numbers(10000000) GROUP BY number % 7, number % 5 ORDER BY a; +SELECT foo FROM (SELECT anyLast(number) AS foo FROM numbers(1) GROUP BY number); + +EXPLAIN QUERY TREE SELECT min(number % 2) AS a, max(number % 3) AS b FROM numbers(10000000) GROUP BY number % 2, number % 3 ORDER BY a, b; +EXPLAIN QUERY TREE SELECT any(number % 2) AS a, anyLast(number % 3) AS b FROM numbers(10000000) GROUP BY number % 2, number % 3 ORDER BY a, b; +EXPLAIN QUERY TREE SELECT max((number % 5) * (number % 7)) AS a FROM numbers(10000000) GROUP BY number % 7, number % 5 ORDER BY a; +EXPLAIN QUERY TREE SELECT foo FROM (SELECT anyLast(number) AS foo FROM numbers(1) GROUP BY number); + +EXPLAIN QUERY TREE +SELECT min(number) OVER (PARTITION BY number % 2) +FROM numbers(3) +GROUP BY number; diff --git a/parser/testdata/02815_empty_subquery_nullable_bug/ast.json b/parser/testdata/02815_empty_subquery_nullable_bug/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02815_empty_subquery_nullable_bug/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02815_empty_subquery_nullable_bug/metadata.json b/parser/testdata/02815_empty_subquery_nullable_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02815_empty_subquery_nullable_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02815_empty_subquery_nullable_bug/query.sql b/parser/testdata/02815_empty_subquery_nullable_bug/query.sql new file mode 100644 index 000000000..3f5d8441b --- /dev/null +++ b/parser/testdata/02815_empty_subquery_nullable_bug/query.sql @@ -0,0 +1,31 @@ +SELECT * FROM ( + SELECT ( + SELECT 0 AS x + FROM (SELECT 1 AS x) t1 + JOIN (SELECT 1 AS x) t2 USING (x) + ) AS x + FROM ( SELECT 1 AS x ) +) FORMAT Null; + +SELECT (x IN (111)) == 1 +FROM +( + SELECT ( SELECT 3 :: Nullable(UInt8) WHERE 0 ) AS x + FROM ( SELECT 2 AS x ) +) FORMAT Null; + +SELECT (x IN (111)) == 1 +FROM +( + SELECT ( SELECT 3 :: Nullable(UInt8) WHERE 1 ) AS x + FROM ( SELECT 2 AS x ) +) FORMAT Null; + +SELECT (x IN (111)) == 1 +FROM +( + SELECT ( SELECT 3 WHERE 0 ) AS x + FROM ( SELECT 2 AS x ) +) FORMAT Null; + +SELECT x, (SELECT 1 WHERE NULL) AS x FORMAT Null; diff --git a/parser/testdata/02815_first_line/ast.json b/parser/testdata/02815_first_line/ast.json new file mode 100644 index 000000000..e801143bf --- /dev/null +++ b/parser/testdata/02815_first_line/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function firstLine (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'foo\\nbar\\nbaz'" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001056993, + "rows_read": 7, + "bytes_read": 269 + } +} diff --git a/parser/testdata/02815_first_line/metadata.json b/parser/testdata/02815_first_line/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02815_first_line/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02815_first_line/query.sql b/parser/testdata/02815_first_line/query.sql new file mode 100644 index 000000000..8c0affaeb --- /dev/null +++ b/parser/testdata/02815_first_line/query.sql @@ -0,0 +1,12 @@ +select firstLine('foo\nbar\nbaz'); +select firstLine('foo\rbar\rbaz'); +select firstLine('foo\r\nbar\r\nbaz'); +select firstLine('foobarbaz'); + +select '== vector'; + +drop table if exists 02815_first_line_vector; +create table 02815_first_line_vector (n Int32, text String) engine = MergeTree order by n; + +insert into 02815_first_line_vector values (1, 'foo\nbar\nbaz'), (2, 'quux\n'), (3, 'single line'), (4, 'windows\r\nline breaks'); +select n, firstLine(text) from 02815_first_line_vector order by n; diff --git a/parser/testdata/02815_fix_not_found_constants_col_in_block/ast.json b/parser/testdata/02815_fix_not_found_constants_col_in_block/ast.json new file mode 100644 index 000000000..9f4a327e7 --- /dev/null +++ b/parser/testdata/02815_fix_not_found_constants_col_in_block/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t0 (children 1)" + }, + { + "explain": " Identifier t0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001483358, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/02815_fix_not_found_constants_col_in_block/metadata.json b/parser/testdata/02815_fix_not_found_constants_col_in_block/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02815_fix_not_found_constants_col_in_block/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02815_fix_not_found_constants_col_in_block/query.sql b/parser/testdata/02815_fix_not_found_constants_col_in_block/query.sql new file mode 100644 index 000000000..fa784cf12 --- /dev/null +++ b/parser/testdata/02815_fix_not_found_constants_col_in_block/query.sql @@ -0,0 +1,6 @@ +DROP TABLE IF EXISTS t0; +CREATE TABLE t0 (vkey UInt32, c0 Float32, primary key(c0)) engine = AggregatingMergeTree; +insert into t0 values (19000, 1); +select null as c_2_0, ref_2.c0 as c_2_1, ref_2.vkey as c_2_2 from t0 as ref_2 order by c_2_0 asc, c_2_1 asc, c_2_2 asc; +select null as c_2_0, ref_2.c0 as c_2_1, ref_2.vkey as c_2_2 from t0 as ref_2 order by c_2_0 asc, c_2_1 asc; +DROP TABLE t0; diff --git a/parser/testdata/02815_join_algorithm_setting/ast.json b/parser/testdata/02815_join_algorithm_setting/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02815_join_algorithm_setting/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02815_join_algorithm_setting/metadata.json b/parser/testdata/02815_join_algorithm_setting/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02815_join_algorithm_setting/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02815_join_algorithm_setting/query.sql b/parser/testdata/02815_join_algorithm_setting/query.sql new file mode 100644 index 000000000..f18f9fe8d --- /dev/null +++ b/parser/testdata/02815_join_algorithm_setting/query.sql @@ -0,0 +1,112 @@ +-- Tags: use-rocksdb + +DROP TABLE IF EXISTS rdb; +DROP TABLE IF EXISTS t2; + +CREATE TABLE rdb ( `key` UInt32, `value` String ) +ENGINE = EmbeddedRocksDB PRIMARY KEY key; +INSERT INTO rdb VALUES (1, 'a'), (2, 'b'), (3, 'c'), (4, 'd'), (5, 'e'); + +CREATE TABLE t2 ( `k` UInt16 ) ENGINE = TinyLog; +INSERT INTO t2 VALUES (4), (5), (6); + +SELECT value == 'direct,parallel_hash,hash' FROM system.settings WHERE name = 'join_algorithm'; + +SELECT countIf(explain like '%Algorithm: DirectKeyValueJoin%'), countIf(explain like '%Algorithm: HashJoin%') FROM ( + EXPLAIN PLAN actions = 1 + SELECT * FROM ( SELECT k AS key FROM t2 ) AS t2 + INNER JOIN rdb ON rdb.key = t2.key + ORDER BY key ASC +); + +SET join_algorithm = 'direct, hash'; + +SELECT value == 'direct,hash' FROM system.settings WHERE name = 'join_algorithm'; + +SELECT countIf(explain like '%Algorithm: DirectKeyValueJoin%'), countIf(explain like '%Algorithm: HashJoin%') FROM ( + EXPLAIN PLAN actions = 1 + SELECT * FROM ( SELECT k AS key FROM t2 ) AS t2 + INNER JOIN rdb ON rdb.key = t2.key + ORDER BY key ASC +); + +SET join_algorithm = 'hash, direct'; + +SELECT value == 'hash,direct' FROM system.settings WHERE name = 'join_algorithm'; + +SELECT countIf(explain like '%Algorithm: DirectKeyValueJoin%'), countIf(explain like '%Algorithm: HashJoin%') FROM ( + EXPLAIN PLAN actions = 1 + SELECT * FROM ( SELECT k AS key FROM t2 ) AS t2 + INNER JOIN rdb ON rdb.key = t2.key + ORDER BY key ASC +); + +SET join_algorithm = 'grace_hash,hash'; + +SELECT value == 'grace_hash,hash' FROM system.settings WHERE name = 'join_algorithm'; + +SELECT countIf(explain like '%Algorithm: GraceHashJoin%'), countIf(explain like '%Algorithm: HashJoin%') FROM ( + EXPLAIN PLAN actions = 1 + SELECT * FROM ( SELECT number AS key, number * 10 AS key2 FROM numbers_mt(10) ) AS t1 + JOIN ( SELECT k AS key, k + 100 AS key2 FROM t2 ) AS t2 ON t1.key = t2.key OR t1.key2 = t2.key2 +); + +SELECT countIf(explain like '%Algorithm: GraceHashJoin%'), countIf(explain like '%Algorithm: HashJoin%') FROM ( + EXPLAIN PLAN actions = 1 + SELECT * FROM ( SELECT number AS key, number * 10 AS key2 FROM numbers_mt(10) ) AS t1 + JOIN ( SELECT k AS key, k + 100 AS key2 FROM t2 ) AS t2 ON t1.key = t2.key +); + +SET join_algorithm = 'grace_hash, hash, auto'; + +SELECT value = 'grace_hash,hash,auto' FROM system.settings WHERE name = 'join_algorithm'; + + +DROP DICTIONARY IF EXISTS dict; +DROP TABLE IF EXISTS src; + +CREATE TABLE src (id UInt64, s String) ENGINE = MergeTree ORDER BY id +AS SELECT number, toString(number) FROM numbers(1000000); + +CREATE DICTIONARY dict( + id UInt64, + s String +) PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE 'src' DB currentDatabase())) +LIFETIME (MIN 0 MAX 0) +LAYOUT(HASHED()); + +SET join_algorithm = 'default'; + +SELECT countIf(explain like '%Algorithm: DirectKeyValueJoin%'), countIf(explain like '%Algorithm: HashJoin%') FROM ( + EXPLAIN actions = 1 + SELECT s FROM (SELECT toUInt64(9911) id) t1 INNER JOIN dict t2 USING (id) +); + +SET join_algorithm = 'direct,hash'; +SELECT countIf(explain like '%Algorithm: DirectKeyValueJoin%'), countIf(explain like '%Algorithm: HashJoin%') FROM ( + EXPLAIN actions = 1 + SELECT s FROM (SELECT toUInt64(9911) id) t1 INNER JOIN dict t2 USING (id) +); + +SET join_algorithm = 'hash,direct'; +SELECT countIf(explain like '%Algorithm: DirectKeyValueJoin%'), countIf(explain like '%Algorithm: HashJoin%') FROM ( + EXPLAIN actions = 1 + SELECT s FROM (SELECT toUInt64(9911) id) t1 INNER JOIN dict t2 USING (id) +); + +SET join_algorithm = 'grace_hash'; + +-- Cannot execute the grace hash with OR condition +SELECT * FROM ( SELECT number AS key, number * 10 AS key2 FROM numbers_mt(10) ) AS t1 +JOIN ( SELECT k AS key, k + 100 AS key2 FROM t2 ) AS t2 ON t1.key = t2.key OR t1.key2 = t2.key2; -- { serverError NOT_IMPLEMENTED } + +-- But for CROSS choose `hash` algorithm even though it's not enabled +SELECT * FROM ( SELECT number AS key, number * 10 AS key2 FROM numbers_mt(10) ) AS t1 +CROSS JOIN ( SELECT k AS key, k + 100 AS key2 FROM t2 ) AS t2 FORMAT Null +SETTINGS enable_analyzer = 1; + +-- ... (not for old analyzer) +SELECT * FROM ( SELECT number AS key, number * 10 AS key2 FROM numbers_mt(10) ) AS t1 +CROSS JOIN ( SELECT k AS key, k + 100 AS key2 FROM t2 ) AS t2 FORMAT Null +SETTINGS enable_analyzer = 0; -- { serverError NOT_IMPLEMENTED } diff --git a/parser/testdata/02815_logical_error_cannot_get_column_name_of_set/ast.json b/parser/testdata/02815_logical_error_cannot_get_column_name_of_set/ast.json new file mode 100644 index 000000000..afb2f3482 --- /dev/null +++ b/parser/testdata/02815_logical_error_cannot_get_column_name_of_set/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Set" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.00096869, + "rows_read": 11, + "bytes_read": 408 + } +} diff --git a/parser/testdata/02815_logical_error_cannot_get_column_name_of_set/metadata.json b/parser/testdata/02815_logical_error_cannot_get_column_name_of_set/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02815_logical_error_cannot_get_column_name_of_set/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02815_logical_error_cannot_get_column_name_of_set/query.sql b/parser/testdata/02815_logical_error_cannot_get_column_name_of_set/query.sql new file mode 100644 index 000000000..cd3ce3c9d --- /dev/null +++ b/parser/testdata/02815_logical_error_cannot_get_column_name_of_set/query.sql @@ -0,0 +1,3 @@ +SELECT * FROM numbers(SETTINGS x = 1); -- { serverError BAD_ARGUMENTS, UNSUPPORTED_METHOD } +SELECT * FROM numbers(numbers(SETTINGS x = 1)); -- { serverError UNKNOWN_FUNCTION, UNSUPPORTED_METHOD } +SELECT * FROM numbers(numbers(SETTINGS x = 1), SETTINGS x = 1); -- { serverError UNKNOWN_FUNCTION, UNSUPPORTED_METHOD } diff --git a/parser/testdata/02815_range_dict_no_direct_join/ast.json b/parser/testdata/02815_range_dict_no_direct_join/ast.json new file mode 100644 index 000000000..4c49d46e7 --- /dev/null +++ b/parser/testdata/02815_range_dict_no_direct_join/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery discounts (children 1)" + }, + { + "explain": " Identifier discounts" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001028352, + "rows_read": 2, + "bytes_read": 71 + } +} diff --git a/parser/testdata/02815_range_dict_no_direct_join/metadata.json b/parser/testdata/02815_range_dict_no_direct_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02815_range_dict_no_direct_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02815_range_dict_no_direct_join/query.sql b/parser/testdata/02815_range_dict_no_direct_join/query.sql new file mode 100644 index 000000000..6ed195cf2 --- /dev/null +++ b/parser/testdata/02815_range_dict_no_direct_join/query.sql @@ -0,0 +1,35 @@ +CREATE TABLE discounts +( + advertiser_id UInt64, + discount_start_date Date, + discount_end_date Nullable(Date), + amount Float64 +) +ENGINE = Memory; + +INSERT INTO discounts VALUES (1, '2015-01-01', Null, 0.1); +INSERT INTO discounts VALUES (1, '2015-01-15', Null, 0.2); +INSERT INTO discounts VALUES (2, '2015-01-01', '2015-01-15', 0.3); +INSERT INTO discounts VALUES (2, '2015-01-04', '2015-01-10', 0.4); +INSERT INTO discounts VALUES (3, '1970-01-01', '2015-01-15', 0.5); +INSERT INTO discounts VALUES (3, '1970-01-01', '2015-01-10', 0.6); + +CREATE DICTIONARY discounts_dict +( + advertiser_id UInt64, + discount_start_date Date, + discount_end_date Nullable(Date), + amount Float64 +) +PRIMARY KEY advertiser_id +SOURCE(CLICKHOUSE(TABLE discounts)) +LIFETIME(MIN 600 MAX 900) +LAYOUT(RANGE_HASHED(RANGE_LOOKUP_STRATEGY 'max')) +RANGE(MIN discount_start_date MAX discount_end_date); + +CREATE TABLE ids (id UInt64) ENGINE = Memory; +INSERT INTO ids SELECT * FROM numbers(10); + +SELECT id, amount FROM ids INNER JOIN discounts_dict ON id = advertiser_id ORDER BY id, amount SETTINGS join_algorithm = 'direct,hash'; +SELECT id, amount FROM ids INNER JOIN discounts_dict ON id = advertiser_id ORDER BY id, amount SETTINGS join_algorithm = 'default'; +SELECT id, amount FROM ids INNER JOIN discounts_dict ON id = advertiser_id ORDER BY id, amount SETTINGS join_algorithm = 'direct'; -- { serverError NOT_IMPLEMENTED } diff --git a/parser/testdata/02816_check_projection_metadata/ast.json b/parser/testdata/02816_check_projection_metadata/ast.json new file mode 100644 index 000000000..9fcc6e550 --- /dev/null +++ b/parser/testdata/02816_check_projection_metadata/ast.json @@ -0,0 +1,121 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery kek (children 3)" + }, + { + "explain": " Identifier kek" + }, + { + "explain": " Columns definition (children 2)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " ColumnDeclaration uuid (children 1)" + }, + { + "explain": " DataType FixedString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_16" + }, + { + "explain": " ColumnDeclaration id (children 1)" + }, + { + "explain": " DataType int" + }, + { + "explain": " ColumnDeclaration ns (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " ColumnDeclaration dt (children 1)" + }, + { + "explain": " DataType DateTime64 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_6" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Projection (children 1)" + }, + { + "explain": " ProjectionSelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Identifier ns" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_4" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Identifier id" + }, + { + "explain": " Identifier dt" + }, + { + "explain": " Identifier uuid" + } + ], + + "rows": 33, + + "statistics": + { + "elapsed": 0.001465291, + "rows_read": 33, + "bytes_read": 1165 + } +} diff --git a/parser/testdata/02816_check_projection_metadata/metadata.json b/parser/testdata/02816_check_projection_metadata/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02816_check_projection_metadata/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02816_check_projection_metadata/query.sql b/parser/testdata/02816_check_projection_metadata/query.sql new file mode 100644 index 000000000..e7da043ad --- /dev/null +++ b/parser/testdata/02816_check_projection_metadata/query.sql @@ -0,0 +1,3 @@ +create table kek (uuid FixedString(16), id int, ns String, dt DateTime64(6), projection null_pk (select * order by ns, 1, 4)) engine=MergeTree order by (id, dt, uuid); -- {serverError ILLEGAL_COLUMN } +-- this query could segfault or throw LOGICAL_ERROR previously, when we did not check projection PK +-- insert into kek select * from generageRandom(10000); diff --git a/parser/testdata/02816_has_token_empty/ast.json b/parser/testdata/02816_has_token_empty/ast.json new file mode 100644 index 000000000..a6a6a7e9c --- /dev/null +++ b/parser/testdata/02816_has_token_empty/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function hasTokenCaseInsensitive (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'K(G'" + }, + { + "explain": " Literal ''" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001092909, + "rows_read": 8, + "bytes_read": 297 + } +} diff --git a/parser/testdata/02816_has_token_empty/metadata.json b/parser/testdata/02816_has_token_empty/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02816_has_token_empty/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02816_has_token_empty/query.sql b/parser/testdata/02816_has_token_empty/query.sql new file mode 100644 index 000000000..1d13e7d10 --- /dev/null +++ b/parser/testdata/02816_has_token_empty/query.sql @@ -0,0 +1,11 @@ +SELECT hasTokenCaseInsensitive('K(G', ''); +SELECT hasTokenCaseInsensitive('Hello', ''); +SELECT hasTokenCaseInsensitive('', ''); +SELECT hasTokenCaseInsensitive('', 'Hello'); +SELECT hasTokenCaseInsensitiveOrNull('Hello', ''); +SELECT hasTokenCaseInsensitiveOrNull('', ''); +SELECT hasToken('Hello', ''); +SELECT hasToken('', 'Hello'); +SELECT hasToken('', ''); +SELECT hasTokenOrNull('', ''); +SELECT hasTokenOrNull('Hello', ''); diff --git a/parser/testdata/02816_s2_invalid_point/ast.json b/parser/testdata/02816_s2_invalid_point/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02816_s2_invalid_point/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02816_s2_invalid_point/metadata.json b/parser/testdata/02816_s2_invalid_point/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02816_s2_invalid_point/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02816_s2_invalid_point/query.sql b/parser/testdata/02816_s2_invalid_point/query.sql new file mode 100644 index 000000000..590eb8b5e --- /dev/null +++ b/parser/testdata/02816_s2_invalid_point/query.sql @@ -0,0 +1,3 @@ +-- Tags: no-fasttest + +SELECT geoToS2(toFloat64(toUInt64(-1)), toFloat64(toUInt64(-1))); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/02817_group_array_moving_zero_window_size/ast.json b/parser/testdata/02817_group_array_moving_zero_window_size/ast.json new file mode 100644 index 000000000..b96bfc775 --- /dev/null +++ b/parser/testdata/02817_group_array_moving_zero_window_size/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function groupArrayMovingAvg (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDecimal32 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toInt64 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_0" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001130977, + "rows_read": 14, + "bytes_read": 552 + } +} diff --git a/parser/testdata/02817_group_array_moving_zero_window_size/metadata.json b/parser/testdata/02817_group_array_moving_zero_window_size/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02817_group_array_moving_zero_window_size/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02817_group_array_moving_zero_window_size/query.sql b/parser/testdata/02817_group_array_moving_zero_window_size/query.sql new file mode 100644 index 000000000..fcbcaf124 --- /dev/null +++ b/parser/testdata/02817_group_array_moving_zero_window_size/query.sql @@ -0,0 +1,2 @@ +SELECT groupArrayMovingAvg ( toInt64 ( 0 ) ) ( toDecimal32 ( 1 , 1 ) ); -- { serverError BAD_ARGUMENTS } + diff --git a/parser/testdata/02818_parameterized_view_with_cte_multiple_usage/ast.json b/parser/testdata/02818_parameterized_view_with_cte_multiple_usage/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02818_parameterized_view_with_cte_multiple_usage/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02818_parameterized_view_with_cte_multiple_usage/metadata.json b/parser/testdata/02818_parameterized_view_with_cte_multiple_usage/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02818_parameterized_view_with_cte_multiple_usage/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02818_parameterized_view_with_cte_multiple_usage/query.sql b/parser/testdata/02818_parameterized_view_with_cte_multiple_usage/query.sql new file mode 100644 index 000000000..d56d9c4e1 --- /dev/null +++ b/parser/testdata/02818_parameterized_view_with_cte_multiple_usage/query.sql @@ -0,0 +1,16 @@ +create view test_param_view as +with {param_test_val:UInt8} as param_test_val +select param_test_val, + arrayCount((a)->(a < param_test_val), t.arr) as cnt1 +from (select [1,2,3,4,5] as arr) t; + +select * from test_param_view(param_test_val = 3); + +create view test_param_view2 as +with {param_test_val:UInt8} as param_test_val +select param_test_val, + arrayCount((a)->(a < param_test_val), t.arr) as cnt1, + arrayCount((a)->(a < param_test_val+1), t.arr) as cnt2 +from (select [1,2,3,4,5] as arr) t; + +select * from test_param_view2(param_test_val = 3); \ No newline at end of file diff --git a/parser/testdata/02828_create_as_table_function_rename/ast.json b/parser/testdata/02828_create_as_table_function_rename/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02828_create_as_table_function_rename/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02828_create_as_table_function_rename/metadata.json b/parser/testdata/02828_create_as_table_function_rename/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02828_create_as_table_function_rename/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02828_create_as_table_function_rename/query.sql b/parser/testdata/02828_create_as_table_function_rename/query.sql new file mode 100644 index 000000000..7e24e485f --- /dev/null +++ b/parser/testdata/02828_create_as_table_function_rename/query.sql @@ -0,0 +1,7 @@ + +drop table if exists t1; +create table t1 as remote('localhost', 'system.one'); +rename table t1 to t2; +select * from t2; +rename table t2 to t1; +drop table t1; diff --git a/parser/testdata/02830_insert_values_time_interval/ast.json b/parser/testdata/02830_insert_values_time_interval/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02830_insert_values_time_interval/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02830_insert_values_time_interval/metadata.json b/parser/testdata/02830_insert_values_time_interval/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02830_insert_values_time_interval/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02830_insert_values_time_interval/query.sql b/parser/testdata/02830_insert_values_time_interval/query.sql new file mode 100644 index 000000000..f5d5d8a4c --- /dev/null +++ b/parser/testdata/02830_insert_values_time_interval/query.sql @@ -0,0 +1,25 @@ + +DROP TABLE IF EXISTS t1; + +CREATE TABLE t1 +( + c1 DateTime DEFAULT now() NOT NULL, + c2 DateTime DEFAULT now() NOT NULL, + c3 DateTime DEFAULT now() NOT NULL, + PRIMARY KEY(c1, c2, c3) +) ENGINE = MergeTree() +ORDER BY (c1, c2, c3); + +INSERT INTO t1 (c1,c2,c3) VALUES(now() + INTERVAL '1 day 1 hour 1 minute 1 second', now(), now()); + +DROP TABLE t1; + +CREATE TABLE t1 (n int, dt DateTime) ENGINE=Memory; + +SET input_format_values_interpret_expressions=0; +INSERT INTO t1 VALUES (1, toDateTime('2023-07-20 21:53:01') + INTERVAL '1 day 1 hour 1 minute 1 second'), (2, toDateTime('2023-07-20 21:53:01') + INTERVAL '1 day'); +INSERT INTO t1 VALUES (3, toDateTime('2023-07-20 21:53:01') + INTERVAL 1 DAY), (4, toDateTime('2023-07-20 21:53:01') + (toIntervalMinute(1), toIntervalSecond(1))); + +SELECT * FROM t1 ORDER BY n; + +DROP TABLE t1; diff --git a/parser/testdata/02831_ast_fuzz_asan_join/ast.json b/parser/testdata/02831_ast_fuzz_asan_join/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02831_ast_fuzz_asan_join/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02831_ast_fuzz_asan_join/metadata.json b/parser/testdata/02831_ast_fuzz_asan_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02831_ast_fuzz_asan_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02831_ast_fuzz_asan_join/query.sql b/parser/testdata/02831_ast_fuzz_asan_join/query.sql new file mode 100644 index 000000000..7c7bfd2df --- /dev/null +++ b/parser/testdata/02831_ast_fuzz_asan_join/query.sql @@ -0,0 +1,22 @@ +SELECT + '0', + toTypeName(materialize(js2.s)) +FROM +( + SELECT number AS k + FROM numbers(100) +) AS js1 +FULL OUTER JOIN +( + SELECT + toLowCardinality(2147483647 + 256) AS k, + '-0.0000000001', + 1024, + toString(number + 10) AS s + FROM numbers(1024) +) AS js2 ON js1.k = js2.k +ORDER BY + inf DESC NULLS FIRST, + js1.k ASC NULLS LAST, + js2.k ASC +FORMAT `Null` diff --git a/parser/testdata/02831_regexp_analyze_recursion/ast.json b/parser/testdata/02831_regexp_analyze_recursion/ast.json new file mode 100644 index 000000000..8833307b1 --- /dev/null +++ b/parser/testdata/02831_regexp_analyze_recursion/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function match (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal ''" + }, + { + "explain": " Function repeat (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '('" + }, + { + "explain": " Literal UInt64_100000" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.00156525, + "rows_read": 11, + "bytes_read": 400 + } +} diff --git a/parser/testdata/02831_regexp_analyze_recursion/metadata.json b/parser/testdata/02831_regexp_analyze_recursion/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02831_regexp_analyze_recursion/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02831_regexp_analyze_recursion/query.sql b/parser/testdata/02831_regexp_analyze_recursion/query.sql new file mode 100644 index 000000000..800b2c871 --- /dev/null +++ b/parser/testdata/02831_regexp_analyze_recursion/query.sql @@ -0,0 +1 @@ +SELECT match('', repeat('(', 100000)); -- { serverError CANNOT_COMPILE_REGEXP } diff --git a/parser/testdata/02831_trash/ast.json b/parser/testdata/02831_trash/ast.json new file mode 100644 index 000000000..98cdea4f9 --- /dev/null +++ b/parser/testdata/02831_trash/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CRC32IEEE (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function sipHash128 (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.00134814, + "rows_read": 8, + "bytes_read": 307 + } +} diff --git a/parser/testdata/02831_trash/metadata.json b/parser/testdata/02831_trash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02831_trash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02831_trash/query.sql b/parser/testdata/02831_trash/query.sql new file mode 100644 index 000000000..600e2ad06 --- /dev/null +++ b/parser/testdata/02831_trash/query.sql @@ -0,0 +1,2 @@ +SELECT CRC32IEEE(sipHash128()); +SELECT CRC32(murmurHash3_128()); diff --git a/parser/testdata/02832_alter_delete_indexes_projections/ast.json b/parser/testdata/02832_alter_delete_indexes_projections/ast.json new file mode 100644 index 000000000..12c2e1efd --- /dev/null +++ b/parser/testdata/02832_alter_delete_indexes_projections/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001075694, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02832_alter_delete_indexes_projections/metadata.json b/parser/testdata/02832_alter_delete_indexes_projections/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02832_alter_delete_indexes_projections/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02832_alter_delete_indexes_projections/query.sql b/parser/testdata/02832_alter_delete_indexes_projections/query.sql new file mode 100644 index 000000000..399d0fba5 --- /dev/null +++ b/parser/testdata/02832_alter_delete_indexes_projections/query.sql @@ -0,0 +1,26 @@ +set mutations_sync = 2; + +drop table if exists t_delete_skip_index; + +create table t_delete_skip_index (x UInt32, y String, index i y type minmax granularity 3) engine = MergeTree order by tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +insert into t_delete_skip_index select number, toString(number) from numbers(8192 * 10); + +select count() from t_delete_skip_index where y in (4, 5); +alter table t_delete_skip_index delete where x < 8192; +select count() from t_delete_skip_index where y in (4, 5); + +drop table if exists t_delete_skip_index; +drop table if exists t_delete_projection; + +create table t_delete_projection (x UInt32, y UInt64, projection p (select sum(y))) engine = MergeTree order by tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +insert into t_delete_projection select number, toString(number) from numbers(8192 * 10); + +select sum(y) from t_delete_projection settings optimize_use_projections = 0; +select sum(y) from t_delete_projection settings optimize_use_projections = 0, force_optimize_projection = 1; + +alter table t_delete_projection delete where x < 8192; + +select sum(y) from t_delete_projection settings optimize_use_projections = 0; +select sum(y) from t_delete_projection settings optimize_use_projections = 0, force_optimize_projection = 1; + +drop table if exists t_delete_projection; diff --git a/parser/testdata/02832_integer_type_inference/ast.json b/parser/testdata/02832_integer_type_inference/ast.json new file mode 100644 index 000000000..f3cc7cc63 --- /dev/null +++ b/parser/testdata/02832_integer_type_inference/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[Int64_-4741124612489978151, Int64_-3236599669630092879, UInt64_5607475129431807682]" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001062924, + "rows_read": 5, + "bytes_read": 259 + } +} diff --git a/parser/testdata/02832_integer_type_inference/metadata.json b/parser/testdata/02832_integer_type_inference/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02832_integer_type_inference/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02832_integer_type_inference/query.sql b/parser/testdata/02832_integer_type_inference/query.sql new file mode 100644 index 000000000..c6e7c744f --- /dev/null +++ b/parser/testdata/02832_integer_type_inference/query.sql @@ -0,0 +1,11 @@ +select [-4741124612489978151, -3236599669630092879, 5607475129431807682]; +select [100, -100, 5607475129431807682, 5607475129431807683]; +select [[-4741124612489978151], [-3236599669630092879, 5607475129431807682]]; +select [[-4741124612489978151, -3236599669630092879], [5607475129431807682]]; +select [tuple(-4741124612489978151, 1), tuple(-3236599669630092879, 2), tuple(560747512943180768, 3)]; +select array(-4741124612489978151, 1, -3236599669630092879, 2, 560747512943180768, 3); +select map(-4741124612489978151, 1, -3236599669630092879, 2, 5607475129431807682, 3); +select [map(-4741124612489978151, 1, -3236599669630092879, 2, 5607475129431807682, 3), map(-1, 1)]; +select map(1, -4741124612489978151, 2, -3236599669630092879, 3, 5607475129431807682); +select [map(1, -4741124612489978151, 2, -3236599669630092879, 3, 5607475129431807682), map(-1, 1)]; +select if(materialize(1), -1234567890123456789, 1234567890123456789); diff --git a/parser/testdata/02832_transform_fixed_string_no_default/ast.json b/parser/testdata/02832_transform_fixed_string_no_default/ast.json new file mode 100644 index 000000000..555fe349d --- /dev/null +++ b/parser/testdata/02832_transform_fixed_string_no_default/ast.json @@ -0,0 +1,85 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function transform (alias name) (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Identifier name" + }, + { + "explain": " Literal Array_['a', 'b']" + }, + { + "explain": " Literal Array_['', NULL]" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (alias name) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'test'" + }, + { + "explain": " Literal 'Nullable(FixedString(4))'" + } + ], + + "rows": 21, + + "statistics": + { + "elapsed": 0.00143296, + "rows_read": 21, + "bytes_read": 892 + } +} diff --git a/parser/testdata/02832_transform_fixed_string_no_default/metadata.json b/parser/testdata/02832_transform_fixed_string_no_default/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02832_transform_fixed_string_no_default/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02832_transform_fixed_string_no_default/query.sql b/parser/testdata/02832_transform_fixed_string_no_default/query.sql new file mode 100644 index 000000000..0e58c716c --- /dev/null +++ b/parser/testdata/02832_transform_fixed_string_no_default/query.sql @@ -0,0 +1,3 @@ +SELECT transform(name, ['a', 'b'], ['', NULL]) AS name FROM (SELECT 'test'::Nullable(FixedString(4)) AS name); +SELECT transform(name, ['test', 'b'], ['', NULL]) AS name FROM (SELECT 'test'::Nullable(FixedString(4)) AS name); +SELECT transform(name, ['a', 'test'], ['', NULL]) AS name FROM (SELECT 'test'::Nullable(FixedString(4)) AS name); diff --git a/parser/testdata/02833_array_join_columns/ast.json b/parser/testdata/02833_array_join_columns/ast.json new file mode 100644 index 000000000..3f5f83799 --- /dev/null +++ b/parser/testdata/02833_array_join_columns/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_array_joins (children 1)" + }, + { + "explain": " Identifier test_array_joins" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001165651, + "rows_read": 2, + "bytes_read": 84 + } +} diff --git a/parser/testdata/02833_array_join_columns/metadata.json b/parser/testdata/02833_array_join_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02833_array_join_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02833_array_join_columns/query.sql b/parser/testdata/02833_array_join_columns/query.sql new file mode 100644 index 000000000..3f9a33a39 --- /dev/null +++ b/parser/testdata/02833_array_join_columns/query.sql @@ -0,0 +1,19 @@ +drop table if exists test_array_joins; +drop table if exists v4test_array_joins; + +create table test_array_joins +( + id UInt64 default rowNumberInAllBlocks() + 1, + arr_1 Array(String), + arr_2 Array(String), + arr_3 Array(String), + arr_4 Array(String) +) engine = MergeTree order by id; + +insert into test_array_joins (id,arr_1, arr_2, arr_3, arr_4) +SELECT number,array(randomPrintableASCII(3)),array(randomPrintableASCII(3)),array(randomPrintableASCII(3)),array(randomPrintableASCII(3)) +from numbers(1000); + +create view v4test_array_joins as SELECT * from test_array_joins where id != 10; + +select * from v4test_array_joins array join columns('^arr') where match(arr_4,'a') and id < 100 order by id format Null settings optimize_read_in_order = 0; diff --git a/parser/testdata/02833_multiprewhere_extra_column/ast.json b/parser/testdata/02833_multiprewhere_extra_column/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02833_multiprewhere_extra_column/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02833_multiprewhere_extra_column/metadata.json b/parser/testdata/02833_multiprewhere_extra_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02833_multiprewhere_extra_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02833_multiprewhere_extra_column/query.sql b/parser/testdata/02833_multiprewhere_extra_column/query.sql new file mode 100644 index 000000000..949154b76 --- /dev/null +++ b/parser/testdata/02833_multiprewhere_extra_column/query.sql @@ -0,0 +1,25 @@ +-- Tags: no-parallel, no-random-settings, no-random-merge-tree-settings, no-object-storage + +drop table if exists t_multi_prewhere; +drop row policy if exists policy_02834 on t_multi_prewhere; + +create table t_multi_prewhere (a UInt64, b UInt64, c UInt8) +engine = MergeTree order by tuple() +settings min_bytes_for_wide_part = 0; + +create row policy policy_02834 on t_multi_prewhere using a > 2000 as permissive to all; +insert into t_multi_prewhere select number, number, number from numbers(10000); + +system drop mark cache; +select sum(b) from t_multi_prewhere prewhere a < 5000; + +system flush logs query_log; + +select ProfileEvents['FileOpen'] from system.query_log +where + type = 'QueryFinish' + and current_database = currentDatabase() + and query ilike '%select sum(b) from t_multi_prewhere prewhere a < 5000%'; + +drop table if exists t_multi_prewhere; +drop row policy if exists policy_02834 on t_multi_prewhere; diff --git a/parser/testdata/02833_sparse_columns_tuple_function/ast.json b/parser/testdata/02833_sparse_columns_tuple_function/ast.json new file mode 100644 index 000000000..58400ec0f --- /dev/null +++ b/parser/testdata/02833_sparse_columns_tuple_function/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_tuple_sparse (children 1)" + }, + { + "explain": " Identifier t_tuple_sparse" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001282549, + "rows_read": 2, + "bytes_read": 80 + } +} diff --git a/parser/testdata/02833_sparse_columns_tuple_function/metadata.json b/parser/testdata/02833_sparse_columns_tuple_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02833_sparse_columns_tuple_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02833_sparse_columns_tuple_function/query.sql b/parser/testdata/02833_sparse_columns_tuple_function/query.sql new file mode 100644 index 000000000..776dd35dd --- /dev/null +++ b/parser/testdata/02833_sparse_columns_tuple_function/query.sql @@ -0,0 +1,14 @@ +drop table if exists t_tuple_sparse; + +create table t_tuple_sparse (a UInt64, b UInt64) +ENGINE = MergeTree ORDER BY tuple() +SETTINGS ratio_of_defaults_for_sparse_serialization = 0.0; + +insert into t_tuple_sparse values (0, 0); + +select (a, b) from t_tuple_sparse; +select (a, 0) from t_tuple_sparse; +select (a, 1) from t_tuple_sparse; +select (a, NULL) from t_tuple_sparse; + +drop table if exists t_tuple_sparse; diff --git a/parser/testdata/02833_starts_ends_with_utf8/ast.json b/parser/testdata/02833_starts_ends_with_utf8/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02833_starts_ends_with_utf8/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02833_starts_ends_with_utf8/metadata.json b/parser/testdata/02833_starts_ends_with_utf8/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02833_starts_ends_with_utf8/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02833_starts_ends_with_utf8/query.sql b/parser/testdata/02833_starts_ends_with_utf8/query.sql new file mode 100644 index 000000000..3a783dc28 --- /dev/null +++ b/parser/testdata/02833_starts_ends_with_utf8/query.sql @@ -0,0 +1,19 @@ +-- { echoOn } +select startsWithUTF8('富强民主文明和谐', '富强'); +select startsWithUTF8('富强民主文明和谐', '\xe5'); +select startsWithUTF8('富强民主文明和谐', ''); + +SELECT startsWithUTF8('123', '123'); +SELECT startsWithUTF8('123', '12'); +SELECT startsWithUTF8('123', '1234'); +SELECT startsWithUTF8('123', ''); + +select endsWithUTF8('富强民主文明和谐', '和谐'); +select endsWithUTF8('富强民主文明和谐', '\x90'); +select endsWithUTF8('富强民主文明和谐', ''); + +SELECT endsWithUTF8('123', '3'); +SELECT endsWithUTF8('123', '23'); +SELECT endsWithUTF8('123', '32'); +SELECT endsWithUTF8('123', ''); +-- { echoOff } diff --git a/parser/testdata/02833_std_alias/ast.json b/parser/testdata/02833_std_alias/ast.json new file mode 100644 index 000000000..590217427 --- /dev/null +++ b/parser/testdata/02833_std_alias/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery series (children 1)" + }, + { + "explain": " Identifier series" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001356113, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/02833_std_alias/metadata.json b/parser/testdata/02833_std_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02833_std_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02833_std_alias/query.sql b/parser/testdata/02833_std_alias/query.sql new file mode 100644 index 000000000..256990f3f --- /dev/null +++ b/parser/testdata/02833_std_alias/query.sql @@ -0,0 +1,8 @@ +DROP TABLE IF EXISTS series; +CREATE TABLE series(i UInt32, x Float64, y Float64) ENGINE = Memory; +INSERT INTO series(i, x, y) VALUES (1, 5.6,-4.4),(2, -9.6,3),(3, -1.3,-4),(4, 5.3,9.7),(5, 4.4,0.037),(6, -8.6,-7.8),(7, 5.1,9.3),(8, 7.9,-3.6),(9, -8.2,0.62),(10, -3,7.3); + +SELECT std(x), std(y) FROM series; +SELECT stddevPop(x), stddevPop(y) FROM series; + +DROP TABLE series; diff --git a/parser/testdata/02833_tuple_concat/ast.json b/parser/testdata/02833_tuple_concat/ast.json new file mode 100644 index 000000000..2f9d53acf --- /dev/null +++ b/parser/testdata/02833_tuple_concat/ast.json @@ -0,0 +1,40 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function tupleConcat (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 6, + + "statistics": + { + "elapsed": 0.00125201, + "rows_read": 6, + "bytes_read": 221 + } +} diff --git a/parser/testdata/02833_tuple_concat/metadata.json b/parser/testdata/02833_tuple_concat/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02833_tuple_concat/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02833_tuple_concat/query.sql b/parser/testdata/02833_tuple_concat/query.sql new file mode 100644 index 000000000..df43e08d5 --- /dev/null +++ b/parser/testdata/02833_tuple_concat/query.sql @@ -0,0 +1,23 @@ +SELECT tupleConcat(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT tupleConcat((1, 'y'), 1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT tupleConcat((1, 'y'), (2, 'n')); +SELECT tupleConcat((1, 'y'), (2, 'n'), (3, 'n')); + +WITH (1,2,3) || ('a','b','c') || ('2020-10-08'::Date, '2020-11-08'::Date) AS t +SELECT t, t.1, t.2, t.3, t.4, t.5, t.6, t.7, t.8; + +DROP TABLE IF EXISTS t_02833; +CREATE TABLE t_02833 (tup Tuple(a UInt64, b UInt64)) ENGINE=Log; +INSERT INTO t_02833 VALUES ((1, 2)); + +WITH (tup || tup) AS res +SELECT res, res.1, res.2, res.3, res.4 FROM t_02833; + +WITH (tup || (3, 4)) AS res +SELECT res, res.1, res.2, res.3, res.4 FROM t_02833; + +WITH ((3, 4) || tup) AS res +SELECT res, res.1, res.2, res.3, res.4 FROM t_02833; + +DROP TABLE t_02833; diff --git a/parser/testdata/02833_window_func_range_offset/ast.json b/parser/testdata/02833_window_func_range_offset/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02833_window_func_range_offset/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02833_window_func_range_offset/metadata.json b/parser/testdata/02833_window_func_range_offset/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02833_window_func_range_offset/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02833_window_func_range_offset/query.sql b/parser/testdata/02833_window_func_range_offset/query.sql new file mode 100644 index 000000000..f1d26c5cb --- /dev/null +++ b/parser/testdata/02833_window_func_range_offset/query.sql @@ -0,0 +1,6 @@ +-- invalid start offset with RANGE +SELECT count() OVER (ORDER BY 3.4028234663852886e38 RANGE BETWEEN 0.0 PRECEDING AND UNBOUNDED FOLLOWING); -- { serverError BAD_ARGUMENTS } +SELECT count() OVER (ORDER BY 3.4028234663852886e38 RANGE BETWEEN nan PRECEDING AND UNBOUNDED FOLLOWING); -- { serverError BAD_ARGUMENTS } +-- invalid end offset with RANGE +SELECT count() OVER (ORDER BY 3.4028234663852886e38 RANGE BETWEEN UNBOUNDED PRECEDING AND 0.0 FOLLOWING); -- { serverError BAD_ARGUMENTS } +SELECT count() OVER (ORDER BY 3.4028234663852886e38 RANGE BETWEEN UNBOUNDED PRECEDING AND nan FOLLOWING); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/02834_add_sub_date_functions/ast.json b/parser/testdata/02834_add_sub_date_functions/ast.json new file mode 100644 index 000000000..1a587439f --- /dev/null +++ b/parser/testdata/02834_add_sub_date_functions/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001126404, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02834_add_sub_date_functions/metadata.json b/parser/testdata/02834_add_sub_date_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02834_add_sub_date_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02834_add_sub_date_functions/query.sql b/parser/testdata/02834_add_sub_date_functions/query.sql new file mode 100644 index 000000000..49ab30579 --- /dev/null +++ b/parser/testdata/02834_add_sub_date_functions/query.sql @@ -0,0 +1,29 @@ +SET session_timezone = 'UTC'; + +SELECT ADDDATE(materialize('2022-05-07'::Date), INTERVAL 5 MINUTE); + +SELECT addDate('2022-05-07'::Date, INTERVAL 5 MINUTE); +SELECT addDate('2022-05-07'::Date32, INTERVAL 5 MINUTE); +SELECT addDate('2022-05-07'::DateTime, INTERVAL 5 MINUTE); +SELECT addDate('2022-05-07'::DateTime64, INTERVAL 5 MINUTE); +SELECT addDate('2022-05-07', INTERVAL 5 MINUTE); + +SELECT addDate('2022-05-07'::Date); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT addDate('2022-05-07'::Date, INTERVAL 5 MINUTE, 5); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT addDate('2022-05-07'::Date, 10); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT addDate(1.2, INTERVAL 5 MINUTE); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT '---'; + +SELECT SUBDATE(materialize('2022-05-07'::Date), INTERVAL 5 MINUTE); + +SELECT subDate('2022-05-07'::Date, INTERVAL 5 MINUTE); +SELECT subDate('2022-05-07'::Date32, INTERVAL 5 MINUTE); +SELECT subDate('2022-05-07'::DateTime, INTERVAL 5 MINUTE); +SELECT subDate('2022-05-07'::DateTime64, INTERVAL 5 MINUTE); +SELECT subDate('2022-05-07'::String, INTERVAL 5 MINUTE); + +SELECT subDate('2022-05-07'::Date); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT subDate('2022-05-07'::Date, INTERVAL 5 MINUTE, 5); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT subDate('2022-05-07'::Date, 10); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT subDate(1.2, INTERVAL 5 MINUTE); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } diff --git a/parser/testdata/02834_alter_exception/ast.json b/parser/testdata/02834_alter_exception/ast.json new file mode 100644 index 000000000..b83c7f8f4 --- /dev/null +++ b/parser/testdata/02834_alter_exception/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery alter_02834 (children 1)" + }, + { + "explain": " Identifier alter_02834" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001161461, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/02834_alter_exception/metadata.json b/parser/testdata/02834_alter_exception/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02834_alter_exception/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02834_alter_exception/query.sql b/parser/testdata/02834_alter_exception/query.sql new file mode 100644 index 000000000..d42f40fcb --- /dev/null +++ b/parser/testdata/02834_alter_exception/query.sql @@ -0,0 +1,4 @@ +DROP TABLE IF EXISTS alter_02834; +CREATE TABLE alter_02834 (a UInt64) ENGINE=MergeTree() ORDER BY a; +ALTER TABLE alter_02834 MODIFY QUERY SELECT a FROM alter_02834; -- { serverError NOT_IMPLEMENTED } +DROP TABLE alter_02834; diff --git a/parser/testdata/02834_analyzer_with_statement_references/ast.json b/parser/testdata/02834_analyzer_with_statement_references/ast.json new file mode 100644 index 000000000..fcf560efb --- /dev/null +++ b/parser/testdata/02834_analyzer_with_statement_references/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001044406, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02834_analyzer_with_statement_references/metadata.json b/parser/testdata/02834_analyzer_with_statement_references/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02834_analyzer_with_statement_references/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02834_analyzer_with_statement_references/query.sql b/parser/testdata/02834_analyzer_with_statement_references/query.sql new file mode 100644 index 000000000..ce1eaa7ae --- /dev/null +++ b/parser/testdata/02834_analyzer_with_statement_references/query.sql @@ -0,0 +1,7 @@ +SET enable_analyzer = 1; + +WITH test_aliases AS (SELECT number FROM numbers(20)), alias2 AS (SELECT number FROM test_aliases) +SELECT number FROM alias2 SETTINGS enable_global_with_statement = 1; + +WITH test_aliases AS (SELECT number FROM numbers(20)), alias2 AS (SELECT number FROM test_aliases) +SELECT number FROM alias2 SETTINGS enable_global_with_statement = 0; -- { serverError UNKNOWN_TABLE } diff --git a/parser/testdata/02834_apache_arrow_abort/ast.json b/parser/testdata/02834_apache_arrow_abort/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02834_apache_arrow_abort/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02834_apache_arrow_abort/metadata.json b/parser/testdata/02834_apache_arrow_abort/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02834_apache_arrow_abort/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02834_apache_arrow_abort/query.sql b/parser/testdata/02834_apache_arrow_abort/query.sql new file mode 100644 index 000000000..cb0eaebe5 --- /dev/null +++ b/parser/testdata/02834_apache_arrow_abort/query.sql @@ -0,0 +1,4 @@ +-- Tags: no-fasttest, no-tsan, no-asan, no-msan, no-ubsan +-- This tests depends on internet access, but it does not matter, because it only has to check that there is no abort due to a bug in Apache Arrow library. +SET optimize_trivial_insert_select=1; +INSERT INTO TABLE FUNCTION url('https://clickhouse-public-datasets.s3.amazonaws.com/hits_compatible/athena_partitioned/hits_9.parquet') SELECT * FROM url('https://clickhouse-public-datasets.s3.amazonaws.com/hits_compatible/athena_partitioned/hits_9.parquet'); -- { serverError CANNOT_WRITE_TO_OSTREAM, RECEIVED_ERROR_FROM_REMOTE_IO_SERVER, POCO_EXCEPTION } diff --git a/parser/testdata/02834_array_exists_segfault/ast.json b/parser/testdata/02834_array_exists_segfault/ast.json new file mode 100644 index 000000000..2c6bdd9f3 --- /dev/null +++ b/parser/testdata/02834_array_exists_segfault/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery 02834_t (children 1)" + }, + { + "explain": " Identifier 02834_t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00113146, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/02834_array_exists_segfault/metadata.json b/parser/testdata/02834_array_exists_segfault/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02834_array_exists_segfault/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02834_array_exists_segfault/query.sql b/parser/testdata/02834_array_exists_segfault/query.sql new file mode 100644 index 000000000..faf905891 --- /dev/null +++ b/parser/testdata/02834_array_exists_segfault/query.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS 02834_t; +CREATE TABLE 02834_t (id UInt64, arr Array(UInt64)) ENGINE = MergeTree ORDER BY id; +SET enable_analyzer = 0; +WITH subquery AS (SELECT []) SELECT t.* FROM 02834_t AS t JOIN subquery ON arrayExists(x -> x = 1, t.arr); -- { serverError INVALID_JOIN_ON_EXPRESSION } +SET enable_analyzer = 1; +WITH subquery AS (SELECT []) SELECT t.* FROM 02834_t AS t JOIN subquery ON arrayExists(x -> x = 1, t.arr); +INSERT INTO 02834_t VALUES (1, [1]), (2, [2]), (3, [1, 3]); +WITH subquery AS (SELECT []) SELECT t.* FROM 02834_t AS t JOIN subquery ON arrayExists(x -> x = 1, t.arr) ORDER BY t.id; +DROP TABLE 02834_t; diff --git a/parser/testdata/02834_formats_with_variable_number_of_columns/ast.json b/parser/testdata/02834_formats_with_variable_number_of_columns/ast.json new file mode 100644 index 000000000..860404f0b --- /dev/null +++ b/parser/testdata/02834_formats_with_variable_number_of_columns/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'CSV'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.000990934, + "rows_read": 5, + "bytes_read": 174 + } +} diff --git a/parser/testdata/02834_formats_with_variable_number_of_columns/metadata.json b/parser/testdata/02834_formats_with_variable_number_of_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02834_formats_with_variable_number_of_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02834_formats_with_variable_number_of_columns/query.sql b/parser/testdata/02834_formats_with_variable_number_of_columns/query.sql new file mode 100644 index 000000000..7c55cf2e9 --- /dev/null +++ b/parser/testdata/02834_formats_with_variable_number_of_columns/query.sql @@ -0,0 +1,24 @@ +select 'CSV'; +select * from format(CSV, 'x UInt32, y UInt32', '1,1\n2\n\n3,3,3,3') settings input_format_csv_allow_variable_number_of_columns=1; +select * from format(CSV, '1,1\n2\n\n3,3,3,3') settings input_format_csv_allow_variable_number_of_columns=1; +select * from format(CSVWithNames, '"x","y"\n1,1\n2\n\n3,3,3,3') settings input_format_csv_allow_variable_number_of_columns=1; +select * from format(CSVWithNames, 'x UInt32, z UInt32', '"x","y"\n1,1\n2\n\n3,3,3,3') settings input_format_csv_allow_variable_number_of_columns=1; +select 'TSV'; +select * from format(TSV, 'x UInt32, y UInt32', '1\t1\n2\n\n3\t3\t3\t3') settings input_format_tsv_allow_variable_number_of_columns=1; +select * from format(TSV, '1\t1\n2\n\n3\t3\t3\t3') settings input_format_tsv_allow_variable_number_of_columns=1; +select * from format(TSVWithNames, 'x\ty\n1\t1\n2\n\n3\t3\t3\t3') settings input_format_tsv_allow_variable_number_of_columns=1; +select * from format(TSVWithNames, 'x UInt32, z UInt32', 'x\ty\n1\t1\n2\n\n3\t3\t3\t3') settings input_format_tsv_allow_variable_number_of_columns=1; +select 'JSONCompactEachRow'; +select * from format(JSONCompactEachRow, 'x UInt32, y UInt32', '[1,1]\n[2]\n[]\n[3,3,3,3]') settings input_format_json_compact_allow_variable_number_of_columns=1; +select * from format(JSONCompactEachRow, 'x UInt32, y UInt32', '[1,1,[1,2,3]]\n[2]\n[]\n[3,3,3,3,[1,2,3]]') settings input_format_json_compact_allow_variable_number_of_columns=1; +select * from format(JSONCompactEachRow, 'x UInt32, y Array(UInt32)', '[1,[1,2,3],1]\n[2]\n[]\n[3,[3],3,3,[1,2,3]]') settings input_format_json_compact_allow_variable_number_of_columns=1; +select * from format(JSONCompactEachRow, '[1,1]\n[2]\n[]\n[3,3,3,3]') settings input_format_json_compact_allow_variable_number_of_columns=1; +select * from format(JSONCompactEachRowWithNames, '["x","y"]\n[1,1]\n[2]\n[]\n[3,3,3,3]') settings input_format_json_compact_allow_variable_number_of_columns=1; +select * from format(JSONCompactEachRowWithNames, 'x UInt32, z UInt32', '["x","y"]\n[1,1]\n[2]\n[]\n[3,3,3,3]') settings input_format_json_compact_allow_variable_number_of_columns=1; +select 'CustomSeparated'; +set format_custom_escaping_rule='CSV', format_custom_field_delimiter='<field_delimiter>', format_custom_row_before_delimiter='<row_before_delimiter>', format_custom_row_after_delimiter='<row_after_delimiter>', format_custom_row_between_delimiter='<row_between_delimiter>', format_custom_result_before_delimiter='<result_before_delimiter>', format_custom_result_after_delimiter='<result_after_delimiter>'; +select * from format(CustomSeparated, 'x UInt32, y UInt32', '<result_before_delimiter><row_before_delimiter>1<field_delimiter>1<row_after_delimiter><row_between_delimiter><row_before_delimiter>2<row_after_delimiter><row_between_delimiter><row_before_delimiter><row_after_delimiter><row_between_delimiter><row_before_delimiter>3<field_delimiter>3<field_delimiter>3<field_delimiter>3<row_after_delimiter><result_after_delimiter>') settings input_format_custom_allow_variable_number_of_columns=1; +select * from format(CustomSeparated, '<result_before_delimiter><row_before_delimiter>1<field_delimiter>1<row_after_delimiter><row_between_delimiter><row_before_delimiter>2<row_after_delimiter><row_between_delimiter><row_before_delimiter><row_after_delimiter><row_between_delimiter><row_before_delimiter>3<field_delimiter>3<field_delimiter>3<field_delimiter>3<row_after_delimiter><result_after_delimiter>') settings input_format_custom_allow_variable_number_of_columns=1; +select * from format(CustomSeparatedWithNames, '<result_before_delimiter><row_before_delimiter>"x"<field_delimiter>"y"<row_after_delimiter><row_between_delimiter><row_before_delimiter>1<field_delimiter>1<row_after_delimiter><row_between_delimiter><row_before_delimiter>2<row_after_delimiter><row_between_delimiter><row_before_delimiter><row_after_delimiter><row_between_delimiter><row_before_delimiter>3<field_delimiter>3<field_delimiter>3<field_delimiter>3<row_after_delimiter><result_after_delimiter>') settings input_format_custom_allow_variable_number_of_columns=1; +select * from format(CustomSeparatedWithNames, 'x UInt32, z UInt32', '<result_before_delimiter><row_before_delimiter>"x"<field_delimiter>"y"<row_after_delimiter><row_between_delimiter><row_before_delimiter>1<field_delimiter>1<row_after_delimiter><row_between_delimiter><row_before_delimiter>2<row_after_delimiter><row_between_delimiter><row_before_delimiter><row_after_delimiter><row_between_delimiter><row_before_delimiter>3<field_delimiter>3<field_delimiter>3<field_delimiter>3<row_after_delimiter><result_after_delimiter>') settings input_format_custom_allow_variable_number_of_columns=1; + diff --git a/parser/testdata/02834_nulls_first_sort/ast.json b/parser/testdata/02834_nulls_first_sort/ast.json new file mode 100644 index 000000000..d90856f80 --- /dev/null +++ b/parser/testdata/02834_nulls_first_sort/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery nulls_first_sort_test (children 1)" + }, + { + "explain": " Identifier nulls_first_sort_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001145505, + "rows_read": 2, + "bytes_read": 94 + } +} diff --git a/parser/testdata/02834_nulls_first_sort/metadata.json b/parser/testdata/02834_nulls_first_sort/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02834_nulls_first_sort/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02834_nulls_first_sort/query.sql b/parser/testdata/02834_nulls_first_sort/query.sql new file mode 100644 index 000000000..e17a49baf --- /dev/null +++ b/parser/testdata/02834_nulls_first_sort/query.sql @@ -0,0 +1,7 @@ +DROP TABLE IF EXISTS nulls_first_sort_test; +CREATE TABLE nulls_first_sort_test (a Nullable(Int32), b Nullable(Int32), c Nullable(Int32)) ENGINE = Memory; + +INSERT INTO nulls_first_sort_test VALUES (5,null,2), (5,null,1), (5,null,7), (5,null,3), (5,7,4), (5,7,6), (5,7,2), (5,7,1), (5,7,3), (5,7,9), (5,1,4), (5,1,6), (5,1,2), (5,1,1), (5,1,3), (5,1,9); + +SELECT * FROM nulls_first_sort_test ORDER BY a NULLS FIRST,b NULLS FIRST,c NULLS FIRST LIMIT 5; +DROP TABLE nulls_first_sort_test; diff --git a/parser/testdata/02834_sparse_columns_sort_with_limit/ast.json b/parser/testdata/02834_sparse_columns_sort_with_limit/ast.json new file mode 100644 index 000000000..07412ed7c --- /dev/null +++ b/parser/testdata/02834_sparse_columns_sort_with_limit/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_sparse_sort_limit (children 1)" + }, + { + "explain": " Identifier t_sparse_sort_limit" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001393674, + "rows_read": 2, + "bytes_read": 90 + } +} diff --git a/parser/testdata/02834_sparse_columns_sort_with_limit/metadata.json b/parser/testdata/02834_sparse_columns_sort_with_limit/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02834_sparse_columns_sort_with_limit/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02834_sparse_columns_sort_with_limit/query.sql b/parser/testdata/02834_sparse_columns_sort_with_limit/query.sql new file mode 100644 index 000000000..32bd9694b --- /dev/null +++ b/parser/testdata/02834_sparse_columns_sort_with_limit/query.sql @@ -0,0 +1,12 @@ +DROP TABLE IF EXISTS t_sparse_sort_limit; + +CREATE TABLE t_sparse_sort_limit (date Date, i UInt64, v Int16) +ENGINE = MergeTree ORDER BY (date, i) +SETTINGS ratio_of_defaults_for_sparse_serialization = 0.9; + +INSERT INTO t_sparse_sort_limit SELECT '2020-10-10', number % 10, number FROM numbers(100000); +INSERT INTO t_sparse_sort_limit SELECT '2020-10-11', number % 10, number FROM numbers(100000); + +SELECT count() FROM (SELECT toStartOfMonth(date) AS d FROM t_sparse_sort_limit ORDER BY -i LIMIT 65536); + +DROP TABLE IF EXISTS t_sparse_sort_limit; diff --git a/parser/testdata/02834_timestamp_function/ast.json b/parser/testdata/02834_timestamp_function/ast.json new file mode 100644 index 000000000..64159975a --- /dev/null +++ b/parser/testdata/02834_timestamp_function/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001275384, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02834_timestamp_function/metadata.json b/parser/testdata/02834_timestamp_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02834_timestamp_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02834_timestamp_function/query.sql b/parser/testdata/02834_timestamp_function/query.sql new file mode 100644 index 000000000..c816b7d4b --- /dev/null +++ b/parser/testdata/02834_timestamp_function/query.sql @@ -0,0 +1,28 @@ +SET session_timezone = 'UTC'; + +SELECT timestamp('2013-12-31'); +SELECT timestamp('2013-12-31 12:00:00'); +SELECT timestamp('2013-12-31 12:00:00.111111'); +SELECT timestamp('2013-12-31 12:00:00.1111111'); -- ignore > 6 fractional parts +SELECT timestamp('2013-12-31 12:00:00', '12:01:02'); +SELECT timestamp('2013-12-31 12:00:00', '12:01:02.1'); +SELECT timestamp('2013-12-31 12:00:00', '12:01:02.11'); +SELECT timestamp('2013-12-31 12:00:00', '12:01:02.111'); +SELECT timestamp('2013-12-31 12:00:00', '12:01:02.1111'); +SELECT timestamp('2013-12-31 12:00:00', '12:01:02.11111'); +SELECT timestamp('2013-12-31 12:00:00', '12:01:02.111111'); +SELECT timestamp('2013-12-31 12:00:00', '-12:01:02.111111'); +SELECT timestamp('2013-12-31 12:00:00', '-1:01:02.111111'); +SELECT timestamp('2013-12-31 12:00:00', '-100:01:02.111111'); +SELECT timestamp('2013-12-31 12:00:00', '32767:01:02.111111'); +SELECT timestamp('2013-12-31 12:00:00', '32768:01:02.111111'); -- roll over + +SELECT timestamp(materialize('2013-12-31')); +SELECT timestamp(materialize('2013-12-31 12:00:00'), materialize('12:00:00')); + +SELECT TIMESTAMP('2013-12-31'); + +SELECT timestamp(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT timestamp('2013-12-31 12:00:00', '12:00:00', ''); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT timestamp(1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT timestamp(1, 2); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } diff --git a/parser/testdata/02835_fuzz_remove_redundant_sorting/ast.json b/parser/testdata/02835_fuzz_remove_redundant_sorting/ast.json new file mode 100644 index 000000000..82964be0b --- /dev/null +++ b/parser/testdata/02835_fuzz_remove_redundant_sorting/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery numbers500k (children 1)" + }, + { + "explain": " Identifier numbers500k" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001018302, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/02835_fuzz_remove_redundant_sorting/metadata.json b/parser/testdata/02835_fuzz_remove_redundant_sorting/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02835_fuzz_remove_redundant_sorting/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02835_fuzz_remove_redundant_sorting/query.sql b/parser/testdata/02835_fuzz_remove_redundant_sorting/query.sql new file mode 100644 index 000000000..bdbc55941 --- /dev/null +++ b/parser/testdata/02835_fuzz_remove_redundant_sorting/query.sql @@ -0,0 +1,5 @@ +DROP TABLE IF EXISTS numbers500k; +CREATE TABLE numbers500k (`number` UInt32) ENGINE = MergeTree() ORDER BY tuple(); +INSERT INTO numbers500k SELECT number FROM system.numbers LIMIT 500000; +SELECT intDiv(number, NULL) AS k FROM (SELECT * FROM remote('127.0.0.{2,3}', currentDatabase(), numbers500k) PREWHERE 31 WHERE 65537 > 0 ORDER BY number DESC NULLS FIRST) GROUP BY GROUPING SETS ((k)) WITH TOTALS ORDER BY k ASC NULLS LAST LIMIT 2147483648; +DROP TABLE IF EXISTS numbers500k; diff --git a/parser/testdata/02835_join_step_explain/ast.json b/parser/testdata/02835_join_step_explain/ast.json new file mode 100644 index 000000000..d6dfce779 --- /dev/null +++ b/parser/testdata/02835_join_step_explain/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00123655, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02835_join_step_explain/metadata.json b/parser/testdata/02835_join_step_explain/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02835_join_step_explain/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02835_join_step_explain/query.sql b/parser/testdata/02835_join_step_explain/query.sql new file mode 100644 index 000000000..aabc52bd9 --- /dev/null +++ b/parser/testdata/02835_join_step_explain/query.sql @@ -0,0 +1,34 @@ +SET enable_analyzer = 1; +SET parallel_hash_join_threshold = 0; + +DROP TABLE IF EXISTS test_table_1; +CREATE TABLE test_table_1 +( + id UInt64, + value_1 String, + value_2 UInt64 +) ENGINE=MergeTree ORDER BY id; + +DROP TABLE IF EXISTS test_table_2; +CREATE TABLE test_table_2 +( + id UInt64, + value_1 String, + value_2 UInt64 +) ENGINE=MergeTree ORDER BY id; + +INSERT INTO test_table_1 VALUES (0, 'Value', 0); +INSERT INTO test_table_2 VALUES (0, 'Value', 0); + +SET query_plan_join_swap_table = 'false'; + +EXPLAIN header = 1, actions = 1 SELECT lhs.id, lhs.value_1, rhs.id, rhs.value_1 +FROM test_table_1 AS lhs INNER JOIN test_table_2 AS rhs ON lhs.id = rhs.id; + +SELECT '--'; + +EXPLAIN header = 1, actions = 1 SELECT lhs.id, lhs.value_1, rhs.id, rhs.value_1 +FROM test_table_1 AS lhs ASOF JOIN test_table_2 AS rhs ON lhs.id = rhs.id AND lhs.value_2 < rhs.value_2; + +DROP TABLE test_table_1; +DROP TABLE test_table_2; diff --git a/parser/testdata/02835_nested_array_lowcardinality/ast.json b/parser/testdata/02835_nested_array_lowcardinality/ast.json new file mode 100644 index 000000000..225bc0637 --- /dev/null +++ b/parser/testdata/02835_nested_array_lowcardinality/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery cool_table (children 1)" + }, + { + "explain": " Identifier cool_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001063589, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/02835_nested_array_lowcardinality/metadata.json b/parser/testdata/02835_nested_array_lowcardinality/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02835_nested_array_lowcardinality/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02835_nested_array_lowcardinality/query.sql b/parser/testdata/02835_nested_array_lowcardinality/query.sql new file mode 100644 index 000000000..36c1eb39c --- /dev/null +++ b/parser/testdata/02835_nested_array_lowcardinality/query.sql @@ -0,0 +1,49 @@ +DROP TABLE IF EXISTS cool_table; + +CREATE TABLE IF NOT EXISTS cool_table +( + id UInt64, + n Nested(n UInt64, lc1 LowCardinality(String)) +) +ENGINE = MergeTree +ORDER BY id; + +INSERT INTO cool_table SELECT number, range(number), range(number) FROM numbers(10); + +ALTER TABLE cool_table ADD COLUMN IF NOT EXISTS `n.lc2` Array(LowCardinality(String)); + +SELECT n.lc1, n.lc2 FROM cool_table ORDER BY id; + +DROP TABLE IF EXISTS cool_table; + +CREATE TABLE IF NOT EXISTS cool_table +( + id UInt64, + n Nested(n UInt64, lc1 Array(LowCardinality(String))) +) +ENGINE = MergeTree +ORDER BY id; + +INSERT INTO cool_table SELECT number, range(number), arrayMap(x -> range(x % 4), range(number)) FROM numbers(10); + +ALTER TABLE cool_table ADD COLUMN IF NOT EXISTS `n.lc2` Array(Array(LowCardinality(String))); + +SELECT n.lc1, n.lc2 FROM cool_table ORDER BY id; + +DROP TABLE IF EXISTS cool_table; + +CREATE TABLE IF NOT EXISTS cool_table +( + id UInt64, + n Nested(n UInt64, lc1 Map(LowCardinality(String), UInt64)) +) +ENGINE = MergeTree +ORDER BY id; + +INSERT INTO cool_table SELECT number, range(number), arrayMap(x -> (arrayMap(y -> 'k' || toString(y), range(x % 4)), range(x % 4))::Map(LowCardinality(String), UInt64), range(number)) FROM numbers(10); + +ALTER TABLE cool_table ADD COLUMN IF NOT EXISTS `n.lc2` Array(Map(LowCardinality(String), UInt64)); + +SELECT n.lc1, n.lc2 FROM cool_table ORDER BY id; + +DROP TABLE IF EXISTS cool_table; diff --git a/parser/testdata/02835_parallel_replicas_over_distributed/ast.json b/parser/testdata/02835_parallel_replicas_over_distributed/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02835_parallel_replicas_over_distributed/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02835_parallel_replicas_over_distributed/metadata.json b/parser/testdata/02835_parallel_replicas_over_distributed/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02835_parallel_replicas_over_distributed/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02835_parallel_replicas_over_distributed/query.sql b/parser/testdata/02835_parallel_replicas_over_distributed/query.sql new file mode 100644 index 000000000..51cfb93f9 --- /dev/null +++ b/parser/testdata/02835_parallel_replicas_over_distributed/query.sql @@ -0,0 +1,46 @@ +-- 1 shard + +SELECT '-- 1 shard, 3 replicas'; +DROP TABLE IF EXISTS test_d; +DROP TABLE IF EXISTS test; +CREATE TABLE test (id UInt64, date Date) +ENGINE = MergeTree +ORDER BY id; + +CREATE TABLE IF NOT EXISTS test_d as test +ENGINE = Distributed(test_cluster_one_shard_three_replicas_localhost, currentDatabase(), test); + +insert into test select *, today() from numbers(100); + +SET enable_parallel_replicas = 2, max_parallel_replicas = 3, parallel_replicas_for_non_replicated_merge_tree=1; +SET parallel_replicas_only_with_analyzer = 0; -- necessary for CI run with disabled analyzer + +SELECT count(), min(id), max(id), avg(id) +FROM test_d; + +insert into test select *, today() from numbers(100); + +SELECT count(), min(id), max(id), avg(id) +FROM test_d; + +-- 2 shards + +SELECT '-- 2 shards, 3 replicas each'; +DROP TABLE IF EXISTS test2_d; +DROP TABLE IF EXISTS test2; +CREATE TABLE test2 (id UInt64, date Date) +ENGINE = MergeTree +ORDER BY id; + +CREATE TABLE IF NOT EXISTS test2_d as test2 +ENGINE = Distributed(test_cluster_two_shard_three_replicas_localhost, currentDatabase(), test2, id); + +insert into test2 select *, today() from numbers(100); + +SELECT count(), min(id), max(id), avg(id) +FROM test2_d; + +insert into test2 select *, today() from numbers(100); + +SELECT count(), min(id), max(id), avg(id) +FROM test2_d; diff --git a/parser/testdata/02840_grace_hash_join_structure_mismatch/ast.json b/parser/testdata/02840_grace_hash_join_structure_mismatch/ast.json new file mode 100644 index 000000000..2d91b8cb5 --- /dev/null +++ b/parser/testdata/02840_grace_hash_join_structure_mismatch/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001544466, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02840_grace_hash_join_structure_mismatch/metadata.json b/parser/testdata/02840_grace_hash_join_structure_mismatch/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02840_grace_hash_join_structure_mismatch/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02840_grace_hash_join_structure_mismatch/query.sql b/parser/testdata/02840_grace_hash_join_structure_mismatch/query.sql new file mode 100644 index 000000000..03b930ef8 --- /dev/null +++ b/parser/testdata/02840_grace_hash_join_structure_mismatch/query.sql @@ -0,0 +1,9 @@ +set allow_suspicious_low_cardinality_types = 1; +CREATE TABLE t1__fuzz_17 (`a` LowCardinality(UInt8), `b` Nullable(UInt256)) ENGINE = Memory; +CREATE TABLE t2__fuzz_0 (`c` UInt32, `d` String) ENGINE = Memory; + +insert into t1__fuzz_17 select * from generateRandom() limit 1; +insert into t2__fuzz_0 select * from generateRandom() limit 1; + +set join_algorithm='grace_hash'; +SELECT * FROM t1__fuzz_17 INNER JOIN t2__fuzz_0 ON c = a WHERE a format Null; diff --git a/parser/testdata/02841_group_array_sorted/ast.json b/parser/testdata/02841_group_array_sorted/ast.json new file mode 100644 index 000000000..573c87d6b --- /dev/null +++ b/parser/testdata/02841_group_array_sorted/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function groupArraySorted (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_5" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_100" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001173324, + "rows_read": 15, + "bytes_read": 594 + } +} diff --git a/parser/testdata/02841_group_array_sorted/metadata.json b/parser/testdata/02841_group_array_sorted/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02841_group_array_sorted/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02841_group_array_sorted/query.sql b/parser/testdata/02841_group_array_sorted/query.sql new file mode 100644 index 000000000..a8cd6791f --- /dev/null +++ b/parser/testdata/02841_group_array_sorted/query.sql @@ -0,0 +1,41 @@ +SELECT groupArraySorted(5)(number) FROM numbers(100); + +SELECT groupArraySorted(10)(number) FROM numbers(5); + +SELECT groupArraySorted(100)(number) FROM numbers(1000); + +SELECT groupArraySorted(30)(str) FROM (SELECT toString(number) as str FROM numbers(30)); + +SELECT groupArraySorted(10)(toInt64(number/2)) FROM numbers(100); + +DROP TABLE IF EXISTS test; +CREATE TABLE test (a Array(UInt64)) engine=MergeTree ORDER BY a; +INSERT INTO test VALUES ([3,4,5,6]), ([1,2,3,4]), ([2,3,4,5]); +SELECT groupArraySorted(3)(a) FROM test; +DROP TABLE test; + +CREATE TABLE IF NOT EXISTS test (id Int32, data Tuple(Int32, Int32)) ENGINE = MergeTree() ORDER BY id; +INSERT INTO test (id, data) VALUES (1, (100, 200)), (2, (15, 25)), (3, (2, 1)), (4, (30, 60)); +SELECT groupArraySorted(4)(data) FROM test; +DROP TABLE test; + +CREATE TABLE IF NOT EXISTS test (id Int32, data Decimal32(2)) ENGINE = MergeTree() ORDER BY id; +INSERT INTO test (id, data) VALUES (1, 12.5), (2, 0.2), (3, 6.6), (4, 2.2); +SELECT groupArraySorted(4)(data) FROM test; +DROP TABLE test; + +CREATE TABLE IF NOT EXISTS test (id Int32, data FixedString(3)) ENGINE = MergeTree() ORDER BY id; +INSERT INTO test (id, data) VALUES (1, 'AAA'), (2, 'bbc'), (3, 'abc'), (4, 'aaa'), (5, 'Aaa'); +SELECT groupArraySorted(5)(data) FROM test; +DROP TABLE test; + +CREATE TABLE test (id Decimal(76, 53), str String) ENGINE = MergeTree ORDER BY id; +INSERT INTO test SELECT number, 'test' FROM numbers(1000000); +SELECT count(id) FROM test; +SELECT count(concat(toString(id), 'a')) FROM test; +DROP TABLE test; + +CREATE TABLE test (id UInt64, agg AggregateFunction(groupArraySorted(2), UInt64)) engine=MergeTree ORDER BY id; +INSERT INTO test SELECT 1, groupArraySortedState(2)(number) FROM numbers(10); +SELECT groupArraySortedMerge(2)(agg) FROM test; +DROP TABLE test; diff --git a/parser/testdata/02841_join_filter_set_sparse/ast.json b/parser/testdata/02841_join_filter_set_sparse/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02841_join_filter_set_sparse/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02841_join_filter_set_sparse/metadata.json b/parser/testdata/02841_join_filter_set_sparse/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02841_join_filter_set_sparse/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02841_join_filter_set_sparse/query.sql b/parser/testdata/02841_join_filter_set_sparse/query.sql new file mode 100644 index 000000000..e1a33998d --- /dev/null +++ b/parser/testdata/02841_join_filter_set_sparse/query.sql @@ -0,0 +1,22 @@ + +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE TABLE t1 (s String) ENGINE = MergeTree ORDER BY s +SETTINGS ratio_of_defaults_for_sparse_serialization = 0.5; + +INSERT INTO t1 SELECT if (number % 13 = 0, toString(number), '') FROM numbers(2000); + +CREATE TABLE t2 (s String) ENGINE = MergeTree ORDER BY s +SETTINGS ratio_of_defaults_for_sparse_serialization = 0.5; + +INSERT INTO t2 SELECT if (number % 14 = 0, toString(number), '') FROM numbers(2000); + +SELECT countIf(ignore(*) == 0) FROM t1 JOIN t2 ON t1.s = t2.s; + +SET join_algorithm = 'full_sorting_merge', max_rows_in_set_to_optimize_join = 100_000; + +SELECT countIf(ignore(*) == 0) FROM t1 JOIN t2 ON t1.s = t2.s; + +DROP TABLE t1; +DROP TABLE t2; diff --git a/parser/testdata/02841_not_ready_set_constraints/ast.json b/parser/testdata/02841_not_ready_set_constraints/ast.json new file mode 100644 index 000000000..d27a631d2 --- /dev/null +++ b/parser/testdata/02841_not_ready_set_constraints/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001206542, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/02841_not_ready_set_constraints/metadata.json b/parser/testdata/02841_not_ready_set_constraints/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02841_not_ready_set_constraints/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02841_not_ready_set_constraints/query.sql b/parser/testdata/02841_not_ready_set_constraints/query.sql new file mode 100644 index 000000000..274940f50 --- /dev/null +++ b/parser/testdata/02841_not_ready_set_constraints/query.sql @@ -0,0 +1,43 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE TABLE t1 ( + `id` UInt64 +) +ENGINE = MergeTree ORDER BY id; + +INSERT INTO t1(id) VALUES (42); + +CREATE TABLE t2 ( + `conversation` UInt64, + CONSTRAINT constraint_conversation CHECK conversation IN (SELECT id FROM t1) +) +ENGINE = MergeTree ORDER BY conversation; + +INSERT INTO t2(conversation) VALUES (42); + +select * from t2; + +drop table t1; + +INSERT INTO t2(conversation) VALUES (42); -- { serverError UNKNOWN_TABLE } + +drop table t2; + +CREATE TABLE t2 ( + `conversation` UInt64, + CONSTRAINT constraint_conversation CHECK conversation IN (SELECT id FROM t1) +) +ENGINE = MergeTree ORDER BY conversation; + +INSERT INTO t2(conversation) VALUES (42); -- { serverError UNKNOWN_TABLE } + +CREATE TABLE t1 ( + `id` UInt64 +) +ENGINE = MergeTree ORDER BY id; + +INSERT INTO t1(id) VALUES (42); + +INSERT INTO t2(conversation) VALUES (42); +select * from t2; diff --git a/parser/testdata/02841_not_ready_set_join_on/ast.json b/parser/testdata/02841_not_ready_set_join_on/ast.json new file mode 100644 index 000000000..7cc64a031 --- /dev/null +++ b/parser/testdata/02841_not_ready_set_join_on/ast.json @@ -0,0 +1,166 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " WithElement (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_42 (alias key)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 2)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier l_t" + }, + { + "explain": " TablesInSelectQueryElement (children 2)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (alias r_t) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_50" + }, + { + "explain": " TableJoin (children 1)" + }, + { + "explain": " Function and (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier l_t.key" + }, + { + "explain": " Identifier r_t.number" + }, + { + "explain": " Function in (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier r_t.number" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function multiply (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Float64_1000" + }, + { + "explain": " Set" + } + ], + + "rows": 48, + + "statistics": + { + "elapsed": 0.001701012, + "rows_read": 48, + "bytes_read": 2044 + } +} diff --git a/parser/testdata/02841_not_ready_set_join_on/metadata.json b/parser/testdata/02841_not_ready_set_join_on/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02841_not_ready_set_join_on/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02841_not_ready_set_join_on/query.sql b/parser/testdata/02841_not_ready_set_join_on/query.sql new file mode 100644 index 000000000..0d580346c --- /dev/null +++ b/parser/testdata/02841_not_ready_set_join_on/query.sql @@ -0,0 +1,5 @@ +with l_t as (select 42 as key) select * from l_t inner join numbers(50) as r_t on l_t.key = r_t.number and r_t.number in (select number * 2 from numbers(1e3)) SETTINGS enable_analyzer=0; +with l_t as (select 42 as key) select * from l_t inner join numbers(50) as r_t on l_t.key = r_t.number and r_t.number in (select number * 2 from numbers(1e3)) SETTINGS enable_analyzer=1; + +with l_t as (select 42 as key) select * from l_t inner join numbers(50) as r_t on l_t.key = r_t.number and r_t.number global in (select number * 2 from numbers(1e3)) SETTINGS enable_analyzer=1; +with l_t as (select 42 as key) select * from l_t inner join numbers(50) as r_t on l_t.key = r_t.number and r_t.number global in (select number * 2 from numbers(1e3)) SETTINGS enable_analyzer=0; diff --git a/parser/testdata/02841_parallel_final_wrong_columns_order/ast.json b/parser/testdata/02841_parallel_final_wrong_columns_order/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02841_parallel_final_wrong_columns_order/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02841_parallel_final_wrong_columns_order/metadata.json b/parser/testdata/02841_parallel_final_wrong_columns_order/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02841_parallel_final_wrong_columns_order/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02841_parallel_final_wrong_columns_order/query.sql b/parser/testdata/02841_parallel_final_wrong_columns_order/query.sql new file mode 100644 index 000000000..db15abb28 --- /dev/null +++ b/parser/testdata/02841_parallel_final_wrong_columns_order/query.sql @@ -0,0 +1,9 @@ +-- Tags: no-random-merge-tree-settings +-- Because we insert one million rows, it shouldn't choose too low index granularity. + +drop table if exists tab2; +create table tab2 (id String, version Int64, l String, accountCode String, z Int32) engine = ReplacingMergeTree(z) PRIMARY KEY (accountCode, id) ORDER BY (accountCode, id, version, l); +insert into tab2 select toString(number), number, toString(number), toString(number), 0 from numbers(1e6); +set max_threads=2; +select count() from tab2 final; +DROP TABLE tab2; diff --git a/parser/testdata/02841_parquet_filter_pushdown/ast.json b/parser/testdata/02841_parquet_filter_pushdown/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02841_parquet_filter_pushdown/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02841_parquet_filter_pushdown/metadata.json b/parser/testdata/02841_parquet_filter_pushdown/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02841_parquet_filter_pushdown/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02841_parquet_filter_pushdown/query.sql b/parser/testdata/02841_parquet_filter_pushdown/query.sql new file mode 100644 index 000000000..86319e07b --- /dev/null +++ b/parser/testdata/02841_parquet_filter_pushdown/query.sql @@ -0,0 +1,144 @@ +-- Tags: no-fasttest, no-parallel + +set output_format_parquet_row_group_size = 100; + +set input_format_null_as_default = 1; +set engine_file_truncate_on_insert = 1; +set optimize_or_like_chain = 0; +set max_block_size = 100000; +set max_insert_threads = 1; +set input_format_parquet_bloom_filter_push_down = 0; +set input_format_parquet_page_filter_push_down = 0; + +-- Try all the types. +insert into function file('02841.parquet') + -- Use negative numbers to test sign extension for signed types and lack of sign extension for + -- unsigned types. + with 5000 - number as n select + + number, + + intDiv(n, 11)::UInt8 as u8, + n::UInt16 u16, + n::UInt32 as u32, + n::UInt64 as u64, + intDiv(n, 11)::Int8 as i8, + n::Int16 i16, + n::Int32 as i32, + n::Int64 as i64, + + toDate32(n*500000) as date32, + toDateTime64(n*1e6, 3) as dt64_ms, + toDateTime64(n*1e6, 6) as dt64_us, + toDateTime64(n*1e6, 9) as dt64_ns, + toDateTime64(n*1e6, 0) as dt64_s, + toDateTime64(n*1e6, 2) as dt64_cs, + + (n/1000)::Float32 as f32, + (n/1000)::Float64 as f64, + + n::String as s, + n::String::FixedString(9) as fs, + + n::Decimal32(3)/1234 as d32, + n::Decimal64(10)/12345678 as d64, + n::Decimal128(20)/123456789012345 as d128, + n::Decimal256(40)/123456789012345/678901234567890 as d256 + + from numbers(10000); + +desc file('02841.parquet'); + +-- To generate reference results, use a temporary table and GROUP BYs to simulate row group filtering: +-- create temporary table t as with [as above] select intDiv(number, 100) as group, [as above]; +-- then e.g. for a query that filters by `x BETWEEN a AND b`: +-- select sum(c), sum(h) from (select count() as c, sum(number) as h, min(x) as mn, max(x) as mx from t group by group) where a <= mx and b >= mn; + +select '# Go over all types individually'; +select count(), sum(number) from file('02841.parquet') where indexHint(u8 in (10, 15, 250)); +select count(), sum(number) from file('02841.parquet') where indexHint(i8 between -3 and 2); +select count(), sum(number) from file('02841.parquet') where indexHint(u16 between 4000 and 61000 or u16 == 42); +select count(), sum(number) from file('02841.parquet') where indexHint(i16 between -150 and 250); +select count(), sum(number) from file('02841.parquet') where indexHint(u32 in (42, 4294966296)); +select count(), sum(number) from file('02841.parquet') where indexHint(i32 between -150 and 250); +select count(), sum(number) from file('02841.parquet') where indexHint(u64 in (42, 18446744073709550616)); +select count(), sum(number) from file('02841.parquet') where indexHint(i64 between -150 and 250); +select count(), sum(number) from file('02841.parquet') where indexHint(date32 between '1992-01-01' and '2023-08-02'); +select count(), sum(number) from file('02841.parquet') where indexHint(dt64_ms between '2000-01-01' and '2005-01-01'); +select count(), sum(number) from file('02841.parquet') where indexHint(dt64_us between toDateTime64(900000000, 2) and '2005-01-01'); +select count(), sum(number) from file('02841.parquet') where indexHint(dt64_ns between '2000-01-01' and '2005-01-01'); +select count(), sum(number) from file('02841.parquet') where indexHint(dt64_s between toDateTime64('-2.01e8'::Decimal64(0), 0) and toDateTime64(1.5e8::Decimal64(0), 0)); +select count(), sum(number) from file('02841.parquet') where indexHint(dt64_cs between toDateTime64('-2.01e8'::Decimal64(1), 1) and toDateTime64(1.5e8::Decimal64(2), 2)); +select count(), sum(number) from file('02841.parquet') where indexHint(f32 between -0.11::Float32 and 0.06::Float32); +select count(), sum(number) from file('02841.parquet') where indexHint(f64 between -0.11 and 0.06); +select count(), sum(number) from file('02841.parquet') where indexHint(s between '-9' and '1!!!'); +select count(), sum(number) from file('02841.parquet') where indexHint(fs between '-9' and '1!!!'); +select count(), sum(number) from file('02841.parquet') where indexHint(d32 between '-0.011'::Decimal32(3) and 0.006::Decimal32(3)); +select count(), sum(number) from file('02841.parquet') where indexHint(d64 between '-0.0000011'::Decimal64(7) and 0.0000006::Decimal64(9)); +select count(), sum(number) from file('02841.parquet') where indexHint(d128 between '-0.00000000000011'::Decimal128(20) and 0.00000000000006::Decimal128(20)); +select count(), sum(number) from file('02841.parquet') where indexHint(d256 between '-0.00000000000000000000000000011'::Decimal256(40) and 0.00000000000000000000000000006::Decimal256(35)); + +select '# Some random other cases'; +select count(), sum(number) from file('02841.parquet') where indexHint(0); +select count(), sum(number) from file('02841.parquet') where indexHint(s like '99%' or u64 == 2000); +select count(), sum(number) from file('02841.parquet') where indexHint(s like 'z%'); +select count(), sum(number) from file('02841.parquet') where indexHint(u8 == 10 or 1 == 1); +select count(), sum(number) from file('02841.parquet') where indexHint(u8 < 0); +select count(), sum(number) from file('02841.parquet') where indexHint(u64 + 1000000 == 1001000); +select count(), sum(number) from file('02841.parquet') where indexHint(u64 + 1000000 == 1001000) settings input_format_parquet_filter_push_down = 0; +select count(), sum(number) from file('02841.parquet') where indexHint(u32 + 1000000 == 999000); + +select '# Very long string, which makes the Parquet encoder omit the corresponding min/max stat'; +insert into function file('02841.parquet') + select arrayStringConcat(range(number*1000000)) as s from numbers(2); +select count() from file('02841.parquet') where indexHint(s > ''); + +select '# Nullable and LowCardinality'; +insert into function file('02841.parquet') select + number, + if(number%234 == 0, NULL, number) as sometimes_null, + toNullable(number) as never_null, + if(number%345 == 0, number::String, NULL) as mostly_null, + toLowCardinality(if(number%234 == 0, NULL, number)) as sometimes_null_lc, + toLowCardinality(toNullable(number)) as never_null_lc, + toLowCardinality(if(number%345 == 0, number::String, NULL)) as mostly_null_lc + from numbers(1000); + +desc file('02841.parquet'); +select count(), sum(number) from file('02841.parquet') where indexHint(sometimes_null is NULL); +select count(), sum(number) from file('02841.parquet') where indexHint(sometimes_null_lc is NULL); +select count(), sum(number) from file('02841.parquet') where indexHint(mostly_null is not NULL); +select count(), sum(number) from file('02841.parquet') where indexHint(mostly_null_lc is not NULL); +select count(), sum(number) from file('02841.parquet') where indexHint(sometimes_null > 850); +select count(), sum(number) from file('02841.parquet') where indexHint(sometimes_null_lc > 850); +select count(), sum(number) from file('02841.parquet') where indexHint(never_null > 850); +select count(), sum(number) from file('02841.parquet') where indexHint(never_null_lc > 850); +select count(), sum(number) from file('02841.parquet') where indexHint(never_null < 150); +select count(), sum(number) from file('02841.parquet') where indexHint(never_null_lc < 150); +-- Quirk with infinities: this reads too much because KeyCondition represents NULLs as infinities. +select count(), sum(number) from file('02841.parquet') where indexHint(sometimes_null < 150); +select count(), sum(number) from file('02841.parquet') where indexHint(sometimes_null_lc < 150); + +select '# Settings that affect the table schema or contents'; +insert into function file('02841.parquet') select + number, + if(number%234 == 0, NULL, number + 100) as positive_or_null, + if(number%234 == 0, NULL, -number - 100) as negative_or_null, + if(number%234 == 0, NULL, 'I am a string') as string_or_null + from numbers(1000); + +select count(), sum(number) from file('02841.parquet') where indexHint(positive_or_null < 50); -- quirk with infinities +select count(), sum(number) from file('02841.parquet', Parquet, 'number UInt64, positive_or_null UInt64') where indexHint(positive_or_null < 50); +select count(), sum(number) from file('02841.parquet') where indexHint(negative_or_null > -50); +select count(), sum(number) from file('02841.parquet', Parquet, 'number UInt64, negative_or_null Int64') where indexHint(negative_or_null > -50); +select count(), sum(number) from file('02841.parquet') where indexHint(string_or_null == ''); -- quirk with infinities + +-- Parquet index analysis doesn't support empty() function yet +select count(), sum(number) from file('02841.parquet', Parquet, 'number UInt64, string_or_null String') where indexHint(string_or_null == '') settings optimize_empty_string_comparisons = 0; +select count(), sum(number) from file('02841.parquet', Parquet, 'number UInt64, nEgAtIvE_oR_nUlL Int64') where indexHint(nEgAtIvE_oR_nUlL > -50) settings input_format_parquet_case_insensitive_column_matching = 1; + +select '# Bad type conversions'; +insert into function file('02841.parquet') select 42 as x; +select * from file('02841.parquet', Parquet, 'x Nullable(String)') where x not in (1); +insert into function file('t.parquet', Parquet, 'x String') values ('1'), ('100'), ('2'); +select * from file('t.parquet', Parquet, 'x Int64') where x >= 3; diff --git a/parser/testdata/02841_remote_parameter_parsing_error/ast.json b/parser/testdata/02841_remote_parameter_parsing_error/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02841_remote_parameter_parsing_error/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02841_remote_parameter_parsing_error/metadata.json b/parser/testdata/02841_remote_parameter_parsing_error/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02841_remote_parameter_parsing_error/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02841_remote_parameter_parsing_error/query.sql b/parser/testdata/02841_remote_parameter_parsing_error/query.sql new file mode 100644 index 000000000..9e467a1f6 --- /dev/null +++ b/parser/testdata/02841_remote_parameter_parsing_error/query.sql @@ -0,0 +1,14 @@ +-- Tags: shard + + +select * from remote('127.0.0.1', sys); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +select * from remote('127.0.0.1', system); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +select * from remote('127.0.0.1', system.o); -- { serverError UNKNOWN_TABLE } +select * from remote('127.0.0.1', system.one, default); -- { serverError UNKNOWN_IDENTIFIER } +select * from remote('127.0.0.1', system.one, default, ''); -- { serverError BAD_ARGUMENTS } +select * from remote('127.0.0.1', system.one, default, key1); -- { serverError BAD_ARGUMENTS } +select * from remote('127.0.0.1', system.one, 'default', '', key1); -- { serverError UNKNOWN_IDENTIFIER } +select * from remote('127.0.0.1', system.one, default, '', key1); -- { serverError BAD_ARGUMENTS } +select * from remote('127.0.0.1', system.one, 'default', pwd, key1); -- { serverError BAD_ARGUMENTS } +select * from remote('127.0.0.1', system.one, 'default', '', key1, key2); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +select * from remote('127.0.0.1', system, one, 'default', '', key1, key2); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } diff --git a/parser/testdata/02841_tuple_modulo/ast.json b/parser/testdata/02841_tuple_modulo/ast.json new file mode 100644 index 000000000..354c88716 --- /dev/null +++ b/parser/testdata/02841_tuple_modulo/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Tuple_(UInt64_5, UInt64_4)" + }, + { + "explain": " Literal UInt64_2" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001419049, + "rows_read": 8, + "bytes_read": 307 + } +} diff --git a/parser/testdata/02841_tuple_modulo/metadata.json b/parser/testdata/02841_tuple_modulo/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02841_tuple_modulo/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02841_tuple_modulo/query.sql b/parser/testdata/02841_tuple_modulo/query.sql new file mode 100644 index 000000000..56bacf879 --- /dev/null +++ b/parser/testdata/02841_tuple_modulo/query.sql @@ -0,0 +1,4 @@ +SELECT (5,4) % 2; +SELECT intDiv((5,4), 2); +SELECT intDivOrZero((5,4), 2); +SELECT intDivOrZero((5,4), 0); diff --git a/parser/testdata/02841_with_clause_resolve/ast.json b/parser/testdata/02841_with_clause_resolve/ast.json new file mode 100644 index 000000000..afc1fad6f --- /dev/null +++ b/parser/testdata/02841_with_clause_resolve/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.0012432, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02841_with_clause_resolve/metadata.json b/parser/testdata/02841_with_clause_resolve/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02841_with_clause_resolve/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02841_with_clause_resolve/query.sql b/parser/testdata/02841_with_clause_resolve/query.sql new file mode 100644 index 000000000..fe94a2611 --- /dev/null +++ b/parser/testdata/02841_with_clause_resolve/query.sql @@ -0,0 +1,141 @@ +set enable_analyzer = 1; + +WITH + -- Input + 44100 AS sample_frequency + , number AS tick + , tick / sample_frequency AS time + + -- Delay + , (time, wave, delay_, decay, count) -> arraySum(n1 -> wave(time - delay_ * n1), range(count)) AS delay + + , delay(time, (time -> 0.5), 0.2, 0.5, 5) AS kick + +SELECT + + kick + +FROM system.numbers +LIMIT 5; + +WITH + -- Input + 44100 AS sample_frequency + , number AS tick + , tick / sample_frequency AS time + + -- Output control + , 1 AS master_volume + , level -> least(1.0, greatest(-1.0, level)) AS clamp + , level -> (clamp(level) * 0x7FFF * master_volume)::Int16 AS output + , x -> (x, x) AS mono + + -- Basic waves + , time -> sin(time * 2 * pi()) AS sine_wave + , time -> time::UInt64 % 2 * 2 - 1 AS square_wave + , time -> (time - floor(time)) * 2 - 1 AS sawtooth_wave + , time -> abs(sawtooth_wave(time)) * 2 - 1 AS triangle_wave + + -- Helpers + , (from, to, wave, time) -> from + ((wave(time) + 1) / 2) * (to - from) AS lfo + , (from, to, steps, time) -> from + floor((time - floor(time)) * steps) / steps * (to - from) AS step_lfo + , (from, to, steps, time) -> exp(step_lfo(log(from), log(to), steps, time)) AS exp_step_lfo + + -- Noise + , time -> cityHash64(time) / 0xFFFFFFFFFFFFFFFF AS uniform_noise + , time -> erf(uniform_noise(time)) AS white_noise + , time -> cityHash64(time) % 2 ? 1 : -1 AS bernoulli_noise + + -- Distortion + , (x, amount) -> clamp(x * amount) AS clipping + , (x, amount) -> clamp(x > 0 ? pow(x, amount) : -pow(-x, amount)) AS power_distortion + , (x, amount) -> round(x * exp2(amount)) / exp2(amount) AS bitcrush + , (time, sample_frequency) -> round(time * sample_frequency) / sample_frequency AS desample + , (time, wave, amount) -> (time - floor(time) < (1 - amount)) ? wave(time * (1 - amount)) : 0 AS thin + , (time, wave, amount) -> wave(floor(time) + pow(time - floor(time), amount)) AS skew + + -- Combining + , (a, b, weight) -> a * (1 - weight) + b * weight AS combine + + -- Envelopes + , (time, offset, attack, hold, release) -> + time < offset ? 0 + : (time < offset + attack ? ((time - offset) / attack) + : (time < offset + attack + hold ? 1 + : (time < offset + attack + hold + release ? (offset + attack + hold + release - time) / release + : 0))) AS envelope + + , (bpm, time, offset, attack, hold, release) -> + envelope( + time * (bpm / 60) - floor(time * (bpm / 60)), + offset, + attack, + hold, + release) AS running_envelope + + -- Sequencers + , (sequence, time) -> sequence[1 + time::UInt64 % length(sequence)] AS sequencer + + -- Delay + , (time, wave, delay, decay, count) -> arraySum(n -> wave(time - delay * n) * pow(decay, n), range(count)) AS delay + + + , delay(time, (time -> power_distortion(sine_wave(time * 80 + sine_wave(time * 2)), lfo(0.5, 1, sine_wave, time / 16)) + * running_envelope(60, time, 0, 0.0, 0.01, 0.1)), + 0.2, 0.5, 5) AS kick + +SELECT + + (output( + kick + + delay(time, (time -> + power_distortion( + sine_wave(time * 50 + 1 * sine_wave(time * 100 + 1/4)) + * running_envelope(60, time, 0, 0.01, 0.01, 0.1), + lfo(1, 0.75, triangle_wave, time / 8))), + 0.2, 0.5, 10) + * lfo(0.5, 1, triangle_wave, time / 7) + + + delay(time, (time -> + power_distortion( + sine_wave(time * sequencer([50, 100, 200, 400], time / 2) + 1 * sine_wave(time * sequencer([50, 100, 200], time / 4) + 1/4)) + * running_envelope(60, time, 0.5, 0.01, 0.01, 0.1), + lfo(1, 0.75, triangle_wave, time / 8))), + 0.2, 0.5, 10) + * lfo(0.5, 1, triangle_wave, 16 + time / 11) + + + delay(time, (time -> + white_noise(time) * running_envelope(60, time, 0.75, 0.01, 0.01, 0.1)), + 0.2, 0.5, 10) + * lfo(0.5, 1, triangle_wave, 24 + time / 13) + + + sine_wave(time * 100 + 1 * sine_wave(time * 10 + 1/4)) + * running_envelope(120, time, 0, 0.01, 0.01, 0.1) + ), + + output( + kick + + delay(time + 0.01, (time -> + power_distortion( + sine_wave(time * 50 + 1 * sine_wave(time * 100 + 1/4)) + * running_envelope(60, time, 0, 0.01, 0.01, 0.1), + lfo(1, 0.75, triangle_wave, time / 8))), + 0.2, 0.5, 10) + * lfo(0.5, 1, triangle_wave, time / 7) + + + delay(time - 0.01, (time -> + power_distortion( + sine_wave(time * sequencer([50, 100, 200, 400], time / 2) + 1 * sine_wave(time * sequencer([50, 100, 200], time / 4) + 1/4)) + * running_envelope(60, time, 0.5, 0.01, 0.01, 0.1), + lfo(1, 0.75, triangle_wave, time / 8))), + 0.2, 0.5, 10) + * lfo(0.5, 1, triangle_wave, 16 + time / 11) + + + delay(time + 0.005, (time -> + white_noise(time) * running_envelope(60, time, 0.75, 0.01, 0.01, 0.1)), + 0.2, 0.5, 10) + * lfo(0.5, 1, triangle_wave, 24 + time / 13) + )) + +FROM system.numbers +LIMIT 10; diff --git a/parser/testdata/02842_filesystem_cache_validate_path/ast.json b/parser/testdata/02842_filesystem_cache_validate_path/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02842_filesystem_cache_validate_path/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02842_filesystem_cache_validate_path/metadata.json b/parser/testdata/02842_filesystem_cache_validate_path/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02842_filesystem_cache_validate_path/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02842_filesystem_cache_validate_path/query.sql b/parser/testdata/02842_filesystem_cache_validate_path/query.sql new file mode 100644 index 000000000..c33a5cf65 --- /dev/null +++ b/parser/testdata/02842_filesystem_cache_validate_path/query.sql @@ -0,0 +1,45 @@ +-- Tags: no-fasttest, no-replicated-database + +DROP TABLE IF EXISTS test; +DROP TABLE IF EXISTS test_1; +DROP TABLE IF EXISTS test_2; + +CREATE TABLE test (a Int32) +ENGINE = MergeTree() +ORDER BY tuple() +SETTINGS disk = disk(type = cache, + max_size = '1Mi', + path = '/kek', + disk = 'local_disk'); -- {serverError BAD_ARGUMENTS} + +CREATE TABLE test (a Int32) +ENGINE = MergeTree() +ORDER BY tuple() +SETTINGS disk = disk(type = cache, + max_size = '1Mi', + path = '/var/lib/clickhouse/filesystem_caches/../kek', + disk = 'local_disk'); -- {serverError BAD_ARGUMENTS} + +CREATE TABLE test (a Int32) +ENGINE = MergeTree() +ORDER BY tuple() +SETTINGS disk = disk(type = cache, + max_size = '1Mi', + path = '../kek', + disk = 'local_disk'); -- {serverError BAD_ARGUMENTS} + +CREATE TABLE test_1 (a Int32) +ENGINE = MergeTree() +ORDER BY tuple() +SETTINGS disk = disk(type = cache, + max_size = '1Mi', + path = '/var/lib/clickhouse/filesystem_caches/kek', + disk = 'local_disk'); + +CREATE TABLE test_2 (a Int32) +ENGINE = MergeTree() +ORDER BY tuple() +SETTINGS disk = disk(type = cache, + max_size = '1Mi', + path = 'kek2', + disk = 'local_disk'); diff --git a/parser/testdata/02842_largestTriangleThreeBuckets_aggregate_function/ast.json b/parser/testdata/02842_largestTriangleThreeBuckets_aggregate_function/ast.json new file mode 100644 index 000000000..125fa4bfa --- /dev/null +++ b/parser/testdata/02842_largestTriangleThreeBuckets_aggregate_function/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001019173, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02842_largestTriangleThreeBuckets_aggregate_function/metadata.json b/parser/testdata/02842_largestTriangleThreeBuckets_aggregate_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02842_largestTriangleThreeBuckets_aggregate_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02842_largestTriangleThreeBuckets_aggregate_function/query.sql b/parser/testdata/02842_largestTriangleThreeBuckets_aggregate_function/query.sql new file mode 100644 index 000000000..438302dc1 --- /dev/null +++ b/parser/testdata/02842_largestTriangleThreeBuckets_aggregate_function/query.sql @@ -0,0 +1,66 @@ +SET allow_deprecated_error_prone_window_functions = 1; +drop table if exists largestTriangleThreeBucketsTestFloat64Float64; + +CREATE TABLE largestTriangleThreeBucketsTestFloat64Float64 +( + x Float64, + y Float64 +) ENGINE = MergeTree order by (y,x); + +INSERT INTO largestTriangleThreeBucketsTestFloat64Float64 +VALUES (1.0, 10.0),(2.0, 20.0),(3.0, 15.0),(8.0, 60.0),(9.0, 55.0),(10.0, 70.0),(4.0, 30.0),(5.0, 40.0),(6.0, 35.0),(7.0, 50.0); + +select largestTriangleThreeBuckets(0)(x, y) FROM largestTriangleThreeBucketsTestFloat64Float64; + +select largestTriangleThreeBuckets(1)(x, y) FROM largestTriangleThreeBucketsTestFloat64Float64; + +select largestTriangleThreeBuckets(2)(x, y) FROM largestTriangleThreeBucketsTestFloat64Float64; + +SELECT largestTriangleThreeBuckets(4)(x, y) AS downsampled_data +FROM largestTriangleThreeBucketsTestFloat64Float64; + +drop table largestTriangleThreeBucketsTestFloat64Float64; + +drop table if exists largestTriangleThreeBucketsTestDecimal64Decimal64; + +CREATE TABLE largestTriangleThreeBucketsTestDecimal64Decimal64 +( + x Decimal64(2), + y Decimal64(2) +) ENGINE = MergeTree order by (y,x); + +INSERT INTO largestTriangleThreeBucketsTestDecimal64Decimal64(x, y) VALUES (0.63, 0.25), (0.02, 0.16), (0.29, 0.16), (0.2, 0.24), (0.41, 0.63), (0.06, 0.73), (0.36, 0.99), (0.57, 0.18), (0.98, 0.09), (0.73, 0.95), (0.45, 0.86), (0.37, 0.86), (0.6, 0.64), (0.11, 0.31), (0.7, 0.25), (0.85, 0.15), (0.68, 0.39), (0.9, 0.3), (0.25, 0.34), (0.09, 0.0), (0.91, 0.62), (0.47, 0.06), (0.08, 0.88), (0.48, 0.57), (0.55, 0.75), (0.19, 0.27), (0.87, 0.15), (0.15, 0.09), (0.77, 0.28), (0.5, 0.2), (0.39, 0.86), (0.52, 0.11), (0.38, 0.75), (0.71, 0.44), (0.21, 0.46), (0.88, 0.15), (0.83, 0.67), (0.23, 0.23); + +select largestTriangleThreeBuckets(20)(x, y) from largestTriangleThreeBucketsTestDecimal64Decimal64; + +drop table largestTriangleThreeBucketsTestDecimal64Decimal64; + +drop table if exists largestTriangleThreeBucketsTestDateTime64Float64; + +create table largestTriangleThreeBucketsTestDateTime64Float64 (x DateTime64(3), y Float64) engine = MergeTree order by (y,x); + +INSERT INTO largestTriangleThreeBucketsTestDateTime64Float64 (x, y) VALUES ('2023-09-06 00:00:00', 14.217481939467213), ('2023-09-11 00:00:00', 30.096113766096455), ('2023-01-31 00:00:00', 91.42364224984735), ('2023-12-14 00:00:00', 42.08543753438961), ('2023-10-31 00:00:00', 29.93227107709394), ('2023-12-31 00:00:00', 98.52375935588333), ('2023-07-07 00:00:00', 79.9367415060134), ('2023-08-02 00:00:00', 55.417182033825696), ('2023-03-15 00:00:00', 98.77709508458238), ('2023-09-05 00:00:00', 2.832505232031368), ('2023-06-05 00:00:00', 8.107958052612418), ('2023-02-08 00:00:00', 62.95788480328096), ('2023-02-17 00:00:00', 76.80522155552535), ('2023-11-13 00:00:00', 24.927527306242993), ('2023-02-03 00:00:00', 7.966981342350332), ('2023-05-31 00:00:00', 44.61922229800436), ('2023-09-21 00:00:00', 65.86974701469791), ('2023-01-14 00:00:00', 35.96528042030847), ('2023-02-19 00:00:00', 16.065599678978305), ('2023-05-24 00:00:00', 17.23630978966909), ('2023-11-15 00:00:00', 15.544172190379879), ('2023-12-03 00:00:00', 13.738382187690856), ('2023-10-09 00:00:00', 16.7137129521176), ('2023-11-19 00:00:00', 12.12866001303361), ('2023-06-10 00:00:00', 95.15764263905534), ('2023-07-06 00:00:00', 18.87765798627088), ('2023-03-13 00:00:00', 44.82941460384813), ('2023-01-29 00:00:00', 36.0214717111606), ('2023-12-19 00:00:00', 90.30173319497655), ('2023-07-15 00:00:00', 12.67101467231364), ('2023-07-06 00:00:00', 88.13662733228512), ('2023-05-10 00:00:00', 34.18711141027026), ('2023-11-12 00:00:00', 75.58716684321973), ('2023-10-28 00:00:00', 35.79179186729331), ('2023-11-14 00:00:00', 0.9318182359137728), ('2023-09-29 00:00:00', 80.05338096818797), ('2023-09-13 00:00:00', 16.130217942056866), ('2023-07-28 00:00:00', 11.186638594914744), ('2023-02-12 00:00:00', 69.43690757793445), ('2023-12-18 00:00:00', 12.832032764204616), ('2023-05-21 00:00:00', 74.25002458036471), ('2023-04-03 00:00:00', 51.5662427420719), ('2023-11-27 00:00:00', 96.44359131281784), ('2023-03-29 00:00:00', 33.018594418113324), ('2023-02-07 00:00:00', 84.58945099939815), ('2023-11-16 00:00:00', 40.61531555527268), ('2023-04-21 00:00:00', 60.0545791577218), ('2023-01-31 00:00:00', 87.23185155362057), ('2023-05-19 00:00:00', 77.4095289464808), ('2023-08-26 00:00:00', 18.700816570182067); + +select largestTriangleThreeBuckets(5)(x, y) from largestTriangleThreeBucketsTestDateTime64Float64; + +select lttb(5)(x, y) from largestTriangleThreeBucketsTestDateTime64Float64; + +drop table largestTriangleThreeBucketsTestDateTime64Float64; + +CREATE TABLE largestTriangleTreeBucketsBucketSizeTest +( + x UInt32, + y UInt32 +) ENGINE = MergeTree ORDER BY x; + +INSERT INTO largestTriangleTreeBucketsBucketSizeTest (x, y) SELECT (number + 1) AS x, (x % 1000) AS y FROM numbers(9999); + +SELECT + arrayJoin(lttb(1000)(x, y)) AS point, + tupleElement(point, 1) AS point_x, + point_x - neighbor(point_x, -1) AS point_x_diff_with_previous_row +FROM largestTriangleTreeBucketsBucketSizeTest LIMIT 990, 10; + +SELECT largestTriangleThreeBuckets(1)(0, '1900-01-01 00:00:00'::DateTime64); + +DROP TABLE largestTriangleTreeBucketsBucketSizeTest; diff --git a/parser/testdata/02842_move_pk_to_end_of_prewhere/ast.json b/parser/testdata/02842_move_pk_to_end_of_prewhere/ast.json new file mode 100644 index 000000000..2eaf612f5 --- /dev/null +++ b/parser/testdata/02842_move_pk_to_end_of_prewhere/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001067745, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02842_move_pk_to_end_of_prewhere/metadata.json b/parser/testdata/02842_move_pk_to_end_of_prewhere/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02842_move_pk_to_end_of_prewhere/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02842_move_pk_to_end_of_prewhere/query.sql b/parser/testdata/02842_move_pk_to_end_of_prewhere/query.sql new file mode 100644 index 000000000..7eadd5f91 --- /dev/null +++ b/parser/testdata/02842_move_pk_to_end_of_prewhere/query.sql @@ -0,0 +1,36 @@ +SET optimize_move_to_prewhere = 1; +SET enable_multiple_prewhere_read_steps = 1; +SET optimize_functions_to_subcolumns = 0; +SET allow_statistics_optimize = 0; + +DROP TABLE IF EXISTS t_02848_mt1; +DROP TABLE IF EXISTS t_02848_mt2; + +CREATE TABLE t_02848_mt1 (k UInt32, v String) ENGINE = MergeTree ORDER BY k SETTINGS min_bytes_for_wide_part=0; + +INSERT INTO t_02848_mt1 SELECT number, toString(number) FROM numbers(100); + +SELECT replaceRegexpAll(explain, '__table1\.|_UInt8', '') FROM (EXPLAIN actions=1 SELECT count() FROM t_02848_mt1 WHERE k = 3 AND notEmpty(v)) WHERE explain LIKE '%Prewhere filter%' OR explain LIKE '%Filter%'; +SELECT count() FROM t_02848_mt1 WHERE k = 3 AND notEmpty(v); + +CREATE TABLE t_02848_mt2 (a UInt32, b String, c Int32, d String) ENGINE = MergeTree ORDER BY (a,b,c) SETTINGS min_bytes_for_wide_part=0; + +INSERT INTO t_02848_mt2 SELECT number, toString(number), number, 'aaaabbbbccccddddtestxxxyyy' FROM numbers(100); + +-- the estimated column sizes are: {a: 428, b: 318, c: 428, d: 73} +-- it's not correct but let's fix it in the future. + +SELECT replaceRegexpAll(explain, '__table1\.|_UInt8|_String', '') FROM (EXPLAIN actions=1 SELECT count() FROM t_02848_mt2 WHERE a = 3 AND b == '3' AND c < 20 AND d like '%es%') WHERE explain LIKE '%Prewhere filter%' OR explain LIKE '%Filter%'; +SELECT count() FROM t_02848_mt2 WHERE a = 3 AND b == '3' AND c < 20 AND d like '%es%'; + +SELECT replaceRegexpAll(explain, '__table1\.|_UInt8|_String', '') FROM (EXPLAIN actions=1 SELECT count() FROM t_02848_mt2 WHERE a = 3 AND c < 20 AND c > 0 AND d like '%es%') WHERE explain LIKE '%Prewhere filter%' OR explain LIKE '%Filter%'; +SELECT count() FROM t_02848_mt2 WHERE a = 3 AND c < 20 AND c > 0 AND d like '%es%'; + +SELECT replaceRegexpAll(explain, '__table1\.|_UInt8|_String', '') FROM (EXPLAIN actions=1 SELECT count() FROM t_02848_mt2 WHERE b == '3' AND c < 20 AND d like '%es%') WHERE explain LIKE '%Prewhere filter%' OR explain LIKE '%Filter%'; +SELECT count() FROM t_02848_mt2 WHERE b == '3' AND c < 20 AND d like '%es%'; + +SELECT replaceRegexpAll(explain, '__table1\.|_UInt8|_String', '') FROM (EXPLAIN actions=1 SELECT count() FROM t_02848_mt2 WHERE a = 3 AND b == '3' AND d like '%es%') WHERE explain LIKE '%Prewhere filter%' OR explain LIKE '%Filter%'; +SELECT count() FROM t_02848_mt2 WHERE a = 3 AND b == '3' AND d like '%es%'; + +DROP TABLE t_02848_mt1; +DROP TABLE t_02848_mt2; diff --git a/parser/testdata/02842_mutations_replace_non_deterministic/ast.json b/parser/testdata/02842_mutations_replace_non_deterministic/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02842_mutations_replace_non_deterministic/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02842_mutations_replace_non_deterministic/metadata.json b/parser/testdata/02842_mutations_replace_non_deterministic/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02842_mutations_replace_non_deterministic/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02842_mutations_replace_non_deterministic/query.sql b/parser/testdata/02842_mutations_replace_non_deterministic/query.sql new file mode 100644 index 000000000..d4f989fd1 --- /dev/null +++ b/parser/testdata/02842_mutations_replace_non_deterministic/query.sql @@ -0,0 +1,136 @@ +-- Tags: no-shared-merge-tree +-- With shared merge tree non deterministic mutations are allowed +DROP TABLE IF EXISTS t_mutations_nondeterministic SYNC; + +SET mutations_sync = 2; +SET mutations_execute_subqueries_on_initiator = 1; +SET mutations_execute_nondeterministic_on_initiator = 1; + +-- SELECT sum(...) + +CREATE TABLE t_mutations_nondeterministic (id UInt64, v UInt64) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/02842_mutations_replace', '1') +ORDER BY id; + +INSERT INTO t_mutations_nondeterministic VALUES (10, 20); + +ALTER TABLE t_mutations_nondeterministic UPDATE v = (SELECT sum(number) FROM numbers(100)) WHERE 1; + +SELECT id, v FROM t_mutations_nondeterministic ORDER BY id; + +SELECT command FROM system.mutations +WHERE database = currentDatabase() AND table = 't_mutations_nondeterministic' AND is_done +ORDER BY command; + +DROP TABLE t_mutations_nondeterministic SYNC; + +-- SELECT groupArray(...) + +CREATE TABLE t_mutations_nondeterministic (id UInt64, v Array(UInt64)) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/02842_mutations_replace', '1') +ORDER BY id; + +INSERT INTO t_mutations_nondeterministic VALUES (10, [20]); + +ALTER TABLE t_mutations_nondeterministic UPDATE v = (SELECT groupArray(number) FROM numbers(10)) WHERE 1; + +SELECT id, v FROM t_mutations_nondeterministic ORDER BY id; + +-- Too big result. +ALTER TABLE t_mutations_nondeterministic UPDATE v = (SELECT groupArray(number) FROM numbers(10000)) WHERE 1; -- { serverError BAD_ARGUMENTS } + +SELECT command FROM system.mutations +WHERE database = currentDatabase() AND table = 't_mutations_nondeterministic' AND is_done +ORDER BY command; + +DROP TABLE t_mutations_nondeterministic SYNC; + +-- SELECT uniqExactState(...) + +CREATE TABLE t_mutations_nondeterministic (id UInt64, v AggregateFunction(uniqExact, UInt64)) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/02842_mutations_replace', '1') +ORDER BY id; + +INSERT INTO t_mutations_nondeterministic VALUES (10, initializeAggregation('uniqExactState', 1::UInt64)); + +ALTER TABLE t_mutations_nondeterministic UPDATE v = (SELECT uniqExactState(number) FROM numbers(5)) WHERE 1; + +SELECT id, finalizeAggregation(v) FROM t_mutations_nondeterministic ORDER BY id; + +SELECT command FROM system.mutations +WHERE database = currentDatabase() AND table = 't_mutations_nondeterministic' AND is_done +ORDER BY command; + +DROP TABLE t_mutations_nondeterministic SYNC; + +-- now() + +CREATE TABLE t_mutations_nondeterministic (id UInt64, v DateTime) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/02842_mutations_replace', '1') +ORDER BY id; + +INSERT INTO t_mutations_nondeterministic VALUES (10, '2020-10-10'); + +ALTER TABLE t_mutations_nondeterministic UPDATE v = now() WHERE 1; + +SELECT id, v BETWEEN now() - INTERVAL 10 MINUTE AND now() FROM t_mutations_nondeterministic; + +SELECT + replaceRegexpOne(command, '(\\d{10})', 'timestamp'), +FROM system.mutations +WHERE database = currentDatabase() AND table = 't_mutations_nondeterministic' AND is_done +ORDER BY command; + +DROP TABLE t_mutations_nondeterministic SYNC; + +-- filesystem(...) + +CREATE TABLE t_mutations_nondeterministic (id UInt64, v UInt64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/02842_mutations_replace', '1') ORDER BY id; + +INSERT INTO t_mutations_nondeterministic VALUES (10, 10); + +ALTER TABLE t_mutations_nondeterministic UPDATE v = filesystemCapacity(materialize('default')) WHERE 1; -- { serverError BAD_ARGUMENTS } + +DROP TABLE t_mutations_nondeterministic SYNC; + +-- UPDATE SELECT randConstant() + +CREATE TABLE t_mutations_nondeterministic (id UInt64, v UInt64) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/02842_mutations_replace', '1') +ORDER BY id; + +INSERT INTO t_mutations_nondeterministic VALUES (10, 10); + +-- Check that function in subquery is not rewritten. +ALTER TABLE t_mutations_nondeterministic +UPDATE v = +( + SELECT sum(number) FROM numbers(1000) WHERE number > randConstant() +) WHERE 1 +SETTINGS mutations_execute_subqueries_on_initiator = 0, allow_nondeterministic_mutations = 1; + +SELECT command FROM system.mutations +WHERE database = currentDatabase() AND table = 't_mutations_nondeterministic' AND is_done +ORDER BY command; + +DROP TABLE t_mutations_nondeterministic SYNC; + +-- DELETE WHERE now() + +CREATE TABLE t_mutations_nondeterministic (id UInt64, d DateTime) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/02842_mutations_replace', '1') +ORDER BY id; + +INSERT INTO t_mutations_nondeterministic VALUES (10, '2000-10-10'), (20, '2100-10-10'); + +ALTER TABLE t_mutations_nondeterministic DELETE WHERE d < now(); + +SELECT + replaceRegexpOne(command, '(\\d{10})', 'timestamp'), +FROM system.mutations +WHERE database = currentDatabase() AND table = 't_mutations_nondeterministic' AND NOT is_done +ORDER BY command; + +SELECT id, d FROM t_mutations_nondeterministic ORDER BY id; + +DROP TABLE t_mutations_nondeterministic SYNC; diff --git a/parser/testdata/02842_truncate_database/ast.json b/parser/testdata/02842_truncate_database/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02842_truncate_database/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02842_truncate_database/metadata.json b/parser/testdata/02842_truncate_database/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02842_truncate_database/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02842_truncate_database/query.sql b/parser/testdata/02842_truncate_database/query.sql new file mode 100644 index 000000000..bcd818f55 --- /dev/null +++ b/parser/testdata/02842_truncate_database/query.sql @@ -0,0 +1,80 @@ +-- Tags: no-parallel + +DROP DATABASE IF EXISTS test_truncate_database; + +-- test TRUNCATE DATABASE operation. +-- create tables, views and dictionary and populate them. Then try truncating the database. +-- all tables, views and dictionaries should be removed leaving an empty database +CREATE DATABASE test_truncate_database; +USE test_truncate_database; + +-- create tables with several different types of table engines +CREATE TABLE source_table_memory (x UInt16) ENGINE = Memory; +CREATE TABLE source_table_log (x UInt16) ENGINE = Log; +CREATE TABLE source_table_tiny_log (x UInt16) ENGINE = TinyLog; +CREATE TABLE source_table_stripe_log (x UInt16) ENGINE = StripeLog; +CREATE TABLE source_table_merge_tree (x UInt16) ENGINE = MergeTree ORDER BY x PARTITION BY x; +-- create dictionary source table +CREATE TABLE source_table_dictionary +( + id UInt64, + value String +) ENGINE = Memory(); + +-- insert data into the tables +INSERT INTO source_table_memory SELECT * FROM system.numbers LIMIT 10; +INSERT INTO source_table_log SELECT * FROM system.numbers LIMIT 10; +INSERT INTO source_table_tiny_log SELECT * FROM system.numbers LIMIT 10; +INSERT INTO source_table_stripe_log SELECT * FROM system.numbers LIMIT 10; +INSERT INTO source_table_merge_tree SELECT * FROM system.numbers LIMIT 10; +INSERT INTO source_table_dictionary VALUES (1, 'First'); + + +-- create view based on the tables +CREATE VIEW dest_view_memory (x UInt64) AS SELECT * FROM source_table_memory; +CREATE VIEW dest_view_log (x UInt64) AS SELECT * FROM source_table_log; +CREATE VIEW dest_view_tiny_log (x UInt64) AS SELECT * FROM source_table_tiny_log; +CREATE VIEW dest_view_stripe_log (x UInt64) AS SELECT * FROM source_table_stripe_log; +CREATE VIEW dest_view_merge_tree (x UInt64) AS SELECT * FROM source_table_merge_tree; +-- create dictionary based on source table +CREATE DICTIONARY dest_dictionary +( + id UInt64, + value String +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() DB 'test_truncate_database' TABLE 'source_table_dictionary')) +LAYOUT(FLAT()) +LIFETIME(MIN 0 MAX 1000); + + +SELECT * FROM dest_view_memory ORDER BY x LIMIT 1; +SELECT * FROM dest_view_log ORDER BY x LIMIT 1; +SELECT * FROM dest_view_tiny_log ORDER BY x LIMIT 1; +SELECT * FROM dest_view_stripe_log ORDER BY x LIMIT 1; +SELECT * FROM dest_view_merge_tree ORDER BY x LIMIT 1; +SELECT name, database, element_count FROM system.dictionaries WHERE database = 'test_truncate_database' AND name = 'dest_dictionary'; +SELECT * FROM dest_dictionary; +SELECT '=== TABLES IN test_truncate_database ==='; +SHOW TABLES FROM test_truncate_database; +SELECT '=== DICTIONARIES IN test_truncate_database ==='; +SHOW DICTIONARIES FROM test_truncate_database; + +TRUNCATE DATABASE test_truncate_database; + +SELECT * FROM dest_view_set ORDER BY x LIMIT 1; -- {serverError UNKNOWN_TABLE} +SELECT * FROM dest_view_memory ORDER BY x LIMIT 1; -- {serverError UNKNOWN_TABLE} +SELECT * FROM dest_view_log ORDER BY x LIMIT 1; -- {serverError UNKNOWN_TABLE} +SELECT * FROM dest_view_tiny_log ORDER BY x LIMIT 1; -- {serverError UNKNOWN_TABLE} +SELECT * FROM dest_view_stripe_log ORDER BY x LIMIT 1; -- {serverError UNKNOWN_TABLE} +SELECT * FROM dest_view_merge_tree ORDER BY x LIMIT 1; -- {serverError UNKNOWN_TABLE} +SELECT name, database, element_count FROM system.dictionaries WHERE database = 'test_truncate_database' AND name = 'dest_dictionary'; +SELECT * FROM dest_dictionary; -- {serverError UNKNOWN_TABLE} +SHOW TABLES FROM test_truncate_database; +SHOW DICTIONARIES FROM test_truncate_database; + +CREATE TABLE new_table (x UInt16) ENGINE = MergeTree ORDER BY x; +select 'new tables'; +SHOW TABLES FROM test_truncate_database; + +DROP DATABASE test_truncate_database; diff --git a/parser/testdata/02842_vertical_merge_after_add_drop_column/ast.json b/parser/testdata/02842_vertical_merge_after_add_drop_column/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02842_vertical_merge_after_add_drop_column/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02842_vertical_merge_after_add_drop_column/metadata.json b/parser/testdata/02842_vertical_merge_after_add_drop_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02842_vertical_merge_after_add_drop_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02842_vertical_merge_after_add_drop_column/query.sql b/parser/testdata/02842_vertical_merge_after_add_drop_column/query.sql new file mode 100644 index 000000000..0a06eb054 --- /dev/null +++ b/parser/testdata/02842_vertical_merge_after_add_drop_column/query.sql @@ -0,0 +1,25 @@ +-- In some versions vertical merges after DROP COLUMN was broken in some cases + +drop table if exists data; + +create table data ( + key Int, + `legacy_features_Map.id` Array(UInt8), + `legacy_features_Map.count` Array(UInt32), +) engine=MergeTree() +order by key +settings + min_bytes_for_wide_part=0, + min_rows_for_wide_part=0, + vertical_merge_algorithm_min_rows_to_activate=0, + vertical_merge_algorithm_min_columns_to_activate=0; + +insert into data (key) values (1); +insert into data (key) values (2); + +alter table data add column `features_legacy_Map.id` Array(UInt8), add column `features_legacy_Map.count` Array(UInt32); + +alter table data drop column legacy_features_Map settings mutations_sync=2; + +optimize table data final; +DROP TABLE data; diff --git a/parser/testdata/02843_context_has_expired/ast.json b/parser/testdata/02843_context_has_expired/ast.json new file mode 100644 index 000000000..0cdbcd826 --- /dev/null +++ b/parser/testdata/02843_context_has_expired/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery 02843_dict (children 1)" + }, + { + "explain": " Identifier 02843_dict" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001088139, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/02843_context_has_expired/metadata.json b/parser/testdata/02843_context_has_expired/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02843_context_has_expired/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02843_context_has_expired/query.sql b/parser/testdata/02843_context_has_expired/query.sql new file mode 100644 index 000000000..93204822f --- /dev/null +++ b/parser/testdata/02843_context_has_expired/query.sql @@ -0,0 +1,36 @@ +DROP DICTIONARY IF EXISTS 02843_dict; +DROP TABLE IF EXISTS 02843_source; +DROP TABLE IF EXISTS 02843_join; + +CREATE TABLE 02843_source +( + id UInt64, + value String +) +ENGINE=Memory; + +CREATE DICTIONARY 02843_dict +( + id UInt64, + value String +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE '02843_source')) +LAYOUT(DIRECT()); + +SELECT 1 IN (SELECT dictGet('02843_dict', 'value', materialize('1'))); + +CREATE TABLE 02843_join (id UInt8, value String) ENGINE Join(ANY, LEFT, id); +SELECT 1 IN (SELECT joinGet(02843_join, 'value', materialize(1))); +SELECT 1 IN (SELECT joinGetOrNull(02843_join, 'value', materialize(1))); + +SELECT 1 IN (SELECT materialize(connectionId())); +SELECT 1000000 IN (SELECT materialize(getSetting('max_threads'))); +SELECT 1 in (SELECT file(materialize('a'))); -- { serverError FILE_DOESNT_EXIST } + +EXPLAIN ESTIMATE SELECT 1 IN (SELECT dictGet('02843_dict', 'value', materialize('1'))); +EXPLAIN ESTIMATE SELECT 1 IN (SELECT joinGet(`02843_join`, 'value', materialize(1))); + +DROP DICTIONARY 02843_dict; +DROP TABLE 02843_source; +DROP TABLE 02843_join; diff --git a/parser/testdata/02843_date_predicate_optimizations_bugs/ast.json b/parser/testdata/02843_date_predicate_optimizations_bugs/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02843_date_predicate_optimizations_bugs/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02843_date_predicate_optimizations_bugs/metadata.json b/parser/testdata/02843_date_predicate_optimizations_bugs/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02843_date_predicate_optimizations_bugs/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02843_date_predicate_optimizations_bugs/query.sql b/parser/testdata/02843_date_predicate_optimizations_bugs/query.sql new file mode 100644 index 000000000..6e26a5166 --- /dev/null +++ b/parser/testdata/02843_date_predicate_optimizations_bugs/query.sql @@ -0,0 +1,9 @@ +select + toYYYYMM(date) as date_, + n +from (select + [toDate('20230815'), toDate('20230816')] as date, + [1, 2] as n +) as data +array join date, n +where date_ >= 202303; diff --git a/parser/testdata/02844_distributed_virtual_columns/ast.json b/parser/testdata/02844_distributed_virtual_columns/ast.json new file mode 100644 index 000000000..e9dbd0583 --- /dev/null +++ b/parser/testdata/02844_distributed_virtual_columns/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery data_01072 (children 1)" + }, + { + "explain": " Identifier data_01072" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001199202, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/02844_distributed_virtual_columns/metadata.json b/parser/testdata/02844_distributed_virtual_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02844_distributed_virtual_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02844_distributed_virtual_columns/query.sql b/parser/testdata/02844_distributed_virtual_columns/query.sql new file mode 100644 index 000000000..31a6780f1 --- /dev/null +++ b/parser/testdata/02844_distributed_virtual_columns/query.sql @@ -0,0 +1,5 @@ +drop table if exists data_01072; +drop table if exists dist_01072; +create table data_01072 (key Int) Engine=MergeTree() ORDER BY key; +create table dist_01072 (key Int) Engine=Distributed(test_cluster_two_shards, currentDatabase(), data_01072, key); +select * from dist_01072 where key=0 and _part='0'; diff --git a/parser/testdata/02844_subquery_timeout_with_break/ast.json b/parser/testdata/02844_subquery_timeout_with_break/ast.json new file mode 100644 index 000000000..dc05deb3e --- /dev/null +++ b/parser/testdata/02844_subquery_timeout_with_break/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001146784, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/02844_subquery_timeout_with_break/metadata.json b/parser/testdata/02844_subquery_timeout_with_break/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02844_subquery_timeout_with_break/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02844_subquery_timeout_with_break/query.sql b/parser/testdata/02844_subquery_timeout_with_break/query.sql new file mode 100644 index 000000000..00b527a93 --- /dev/null +++ b/parser/testdata/02844_subquery_timeout_with_break/query.sql @@ -0,0 +1,10 @@ +DROP TABLE IF EXISTS t; +CREATE TABLE t (key UInt64, value UInt64, INDEX value_idx value TYPE bloom_filter GRANULARITY 1) ENGINE=MergeTree() ORDER BY key; + +INSERT INTO t SELECT number, rand()%1000 FROM numbers(10000); + +SET timeout_overflow_mode='break'; +SET max_execution_time=0.1, max_rows_to_read=0; +SELECT * FROM t WHERE value IN (SELECT number FROM numbers(1000000000)); + +DROP TABLE t; diff --git a/parser/testdata/02845_arrayShiftRotate/ast.json b/parser/testdata/02845_arrayShiftRotate/ast.json new file mode 100644 index 000000000..3d3830c3d --- /dev/null +++ b/parser/testdata/02845_arrayShiftRotate/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '== arrayRotateLeft'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001118915, + "rows_read": 5, + "bytes_read": 189 + } +} diff --git a/parser/testdata/02845_arrayShiftRotate/metadata.json b/parser/testdata/02845_arrayShiftRotate/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02845_arrayShiftRotate/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02845_arrayShiftRotate/query.sql b/parser/testdata/02845_arrayShiftRotate/query.sql new file mode 100644 index 000000000..bdb409c3f --- /dev/null +++ b/parser/testdata/02845_arrayShiftRotate/query.sql @@ -0,0 +1,78 @@ +select '== arrayRotateLeft'; +select arrayRotateLeft([1,2,3,4,5], 2); +select arrayRotateLeft([1,2,3,4,5], -2); +select arrayRotateLeft([1,2,3,4,5], 8); +select arrayRotateLeft(['H', 'e', 'l', 'l', 'o'], 2); +select arrayRotateLeft([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], 1); +select ''; + +select '== arrayRotateRight'; +select arrayRotateRight([1,2,3,4,5], 2); +select arrayRotateRight([1,2,3,4,5], -2); +select arrayRotateRight([1,2,3,4,5], 8); +select arrayRotateRight(['H', 'e', 'l', 'l', 'o'], 2); +select arrayRotateRight([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], 1); +select ''; + +select '== arrayShiftLeft'; +select arrayShiftLeft([1, 2, 3, 4, 5], 3); +select arrayShiftLeft([1, 2, 3, 4, 5], -3); +select arrayShiftLeft([1, 2, 3, 4, 5], 8); +select arrayShiftLeft(['a', 'b', 'c', 'd', 'e'], 3); +select arrayShiftLeft([[1, 2], [3, 4], [5, 6]], 2); +select arrayShiftLeft([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], 1); +select arrayShiftLeft([1, 2, 3, 4, 5], 3, 7); +select arrayShiftLeft(['a', 'b', 'c', 'd', 'e'], 3, 'foo'); +select arrayShiftLeft([[1, 2], [3, 4], [5, 6]], 2, [7, 8]); +select arrayShiftLeft(CAST('[1, 2, 3, 4, 5, 6]', 'Array(UInt16)'), 1, 1000); +select ''; + +select '== arrayShiftRight'; +select arrayShiftRight([1, 2, 3, 4, 5], 3); +select arrayShiftRight([1, 2, 3, 4, 5], -3); +select arrayShiftRight([1, 2, 3, 4, 5], 8); +select arrayShiftRight(['a', 'b', 'c', 'd', 'e'], 3); +select arrayShiftRight([[1, 2], [3, 4], [5, 6]], 2); +select arrayShiftRight([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], 1); +select arrayShiftRight([1, 2, 3, 4, 5], 3, 7); +select arrayShiftRight(['a', 'b', 'c', 'd', 'e'], 3, 'foo'); +select arrayShiftRight([[1, 2], [3, 4], [5, 6]], 2, [7, 8]); +select arrayShiftRight(CAST('[1, 2, 3, 4, 5, 6]', 'Array(UInt16)'), 1, 1000); +select ''; + +select '== table'; +drop table if exists t02845; +create table t02845 (a Array(UInt8), s Int16, d UInt8) engine = MergeTree order by d; +insert into t02845 values ([1,2,3,4,5,6], 2, 1),([1,2,3,4,5,6], 3, 2),([1,2,3,4], 3, 3),([4,8,15,16,23,42], 5, 4),([2, 7, 18, 28, 18, 28, 45, 90, 45], 7, 5),([3, 14, 159, 26, 5], 11, 6); + +select '== table with constants'; +select '-- arrayRotateLeft'; +select arrayRotateLeft(a, 2) from t02845; +select '-- arrayRotateRight'; +select arrayRotateRight(a, 2) from t02845; +select '-- arrayShiftLeft'; +select arrayShiftLeft(a, 3) from t02845; +select '-- arrayShiftRight'; +select arrayShiftRight(a, 3) from t02845; + +select '== table with constants and defaults'; +select '-- arrayShiftLeft'; +select arrayShiftLeft(a, 3, 7) from t02845; +select '-- arrayShiftRight'; +select arrayShiftRight(a, 3, 7) from t02845; + +select '== table values'; +select '-- arrayRotateLeft'; +select arrayRotateLeft(a, s) from t02845; +select '-- arrayRotateRight'; +select arrayRotateRight(a, s) from t02845; +select '-- arrayShiftLeft'; +select arrayShiftLeft(a, s, d) from t02845; +select '-- arrayShiftRight'; +select arrayShiftRight(a, s, d) from t02845; + +select '== problematic cast cases'; +select arrayShiftLeft([30000], 3, 5); +select arrayShiftLeft([[1]], 3, []); +select arrayShiftLeft(['foo'], 3, 3); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select arrayShiftLeft([1], 3, 'foo'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } diff --git a/parser/testdata/02845_domain_rfc_support_ipv6/ast.json b/parser/testdata/02845_domain_rfc_support_ipv6/ast.json new file mode 100644 index 000000000..513b37994 --- /dev/null +++ b/parser/testdata/02845_domain_rfc_support_ipv6/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function domainRFC (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'http:\/\/[2001:db8::1]:80'" + }, + { + "explain": " Identifier CSV" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001155425, + "rows_read": 8, + "bytes_read": 302 + } +} diff --git a/parser/testdata/02845_domain_rfc_support_ipv6/metadata.json b/parser/testdata/02845_domain_rfc_support_ipv6/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02845_domain_rfc_support_ipv6/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02845_domain_rfc_support_ipv6/query.sql b/parser/testdata/02845_domain_rfc_support_ipv6/query.sql new file mode 100644 index 000000000..e590064af --- /dev/null +++ b/parser/testdata/02845_domain_rfc_support_ipv6/query.sql @@ -0,0 +1,33 @@ +SELECT domainRFC('http://[2001:db8::1]:80') FORMAT CSV; +SELECT domainRFC('[2001:db8::1]:80') FORMAT CSV; +SELECT domainRFC('[::200]:80') FORMAT CSV; +SELECT domainRFC('[2001:db8::1]') FORMAT CSV; +-- Does not conform to the IPv6 format. +SELECT domainRFC('[2001db81]:80') FORMAT CSV; +SELECT domainRFC('[20[01:db8::1]:80') FORMAT CSV; +SELECT domainRFC('[20[01:db]8::1]:80') FORMAT CSV; +SELECT domainRFC('[2001:db8::1') FORMAT CSV; +SELECT domainRFC('2001:db8::1]:80') FORMAT CSV; +SELECT domainRFC('[2001db81]:80') FORMAT CSV; +SELECT domainRFC('[2001::db.81]:80') FORMAT CSV; +SELECT domainRFC('[2001::db/81]:80') FORMAT CSV; +SELECT domainRFC('[2001::db?81]:80') FORMAT CSV; +SELECT domainRFC('[2001::db#81]:80') FORMAT CSV; +SELECT domainRFC('[2001::db@81]:80') FORMAT CSV; +SELECT domainRFC('[2001::db;81]:80') FORMAT CSV; +SELECT domainRFC('[2001::db=81]:80') FORMAT CSV; +SELECT domainRFC('[2001::db&81]:80') FORMAT CSV; +SELECT domainRFC('[2001::db~81]:80') FORMAT CSV; +SELECT domainRFC('[2001::db%81]:80') FORMAT CSV; +SELECT domainRFC('[2001::db<81]:80') FORMAT CSV; +SELECT domainRFC('[2001::db>81]:80') FORMAT CSV; +SELECT domainRFC('[2001::db{81]:80') FORMAT CSV; +SELECT domainRFC('[2001::db}81]:80') FORMAT CSV; +SELECT domainRFC('[2001::db|81]:80') FORMAT CSV; +SELECT domainRFC('[2001::db\81]:80') FORMAT CSV; +SELECT domainRFC('[2001::db^81]:80') FORMAT CSV; +SELECT domainRFC('[2001::db 81]:80') FORMAT CSV; +SELECT domainRFC('[[]:80') FORMAT CSV; +SELECT domainRFC('[]]:80') FORMAT CSV; +SELECT domainRFC('[]:80') FORMAT CSV; +SELECT domainRFC('[ ]:80') FORMAT CSV; diff --git a/parser/testdata/02845_group_by_constant_keys/ast.json b/parser/testdata/02845_group_by_constant_keys/ast.json new file mode 100644 index 000000000..2f3869980 --- /dev/null +++ b/parser/testdata/02845_group_by_constant_keys/ast.json @@ -0,0 +1,85 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Function count (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_1 (alias k1)" + }, + { + "explain": " Literal UInt64_2 (alias k2)" + }, + { + "explain": " Literal UInt64_3 (alias k3)" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers_mt (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10000000" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Identifier k1" + }, + { + "explain": " Identifier k2" + }, + { + "explain": " Identifier k3" + }, + { + "explain": " Set" + } + ], + + "rows": 21, + + "statistics": + { + "elapsed": 0.001081494, + "rows_read": 21, + "bytes_read": 765 + } +} diff --git a/parser/testdata/02845_group_by_constant_keys/metadata.json b/parser/testdata/02845_group_by_constant_keys/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02845_group_by_constant_keys/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02845_group_by_constant_keys/query.sql b/parser/testdata/02845_group_by_constant_keys/query.sql new file mode 100644 index 000000000..053ad3ecd --- /dev/null +++ b/parser/testdata/02845_group_by_constant_keys/query.sql @@ -0,0 +1,28 @@ +select count(number), 1 AS k1, 2 as k2, 3 as k3 from numbers_mt(10000000) group by k1, k2, k3 settings optimize_group_by_constant_keys=1, enable_software_prefetch_in_aggregation=0, compile_aggregate_expressions=0; +select count(number), 1 AS k1, 2 as k2, 3 as k3 from numbers_mt(10000000) group by k1, k2, k3 settings optimize_group_by_constant_keys=1, enable_software_prefetch_in_aggregation=1, compile_aggregate_expressions = 0; +select count(number), 1 AS k1, 2 as k2, 3 as k3 from numbers_mt(10000000) group by k1, k2, k3 settings optimize_group_by_constant_keys=1, enable_software_prefetch_in_aggregation=0, compile_aggregate_expressions = 1; +select count(number), 1 AS k1, 2 as k2, 3 as k3 from numbers_mt(10000000) group by k1, k2, k3 settings optimize_group_by_constant_keys=1, enable_software_prefetch_in_aggregation=1, compile_aggregate_expressions = 1; + +drop table if exists test; +create table test (x UInt64) engine=File(JSON); +set engine_file_allow_create_multiple_files = 1; +insert into test select * from numbers(10); +insert into test select * from numbers(10); +insert into test select * from numbers(10); + +select count() from test group by _file order by _file settings optimize_group_by_constant_keys=1, enable_software_prefetch_in_aggregation=0, compile_aggregate_expressions=0; +select count() from test group by _file order by _file settings optimize_group_by_constant_keys=1, enable_software_prefetch_in_aggregation=1, compile_aggregate_expressions=0; +select count() from test group by _file order by _file settings optimize_group_by_constant_keys=1, enable_software_prefetch_in_aggregation=0, compile_aggregate_expressions=1; +select count() from test group by _file order by _file settings optimize_group_by_constant_keys=1, enable_software_prefetch_in_aggregation=1, compile_aggregate_expressions=1; + +select count(), _file from test group by _file order by _file settings optimize_group_by_constant_keys=1, enable_software_prefetch_in_aggregation=0, compile_aggregate_expressions=0; +select count(), _file from test group by _file order by _file settings optimize_group_by_constant_keys=1, enable_software_prefetch_in_aggregation=1, compile_aggregate_expressions=0; +select count(), _file from test group by _file order by _file settings optimize_group_by_constant_keys=1, enable_software_prefetch_in_aggregation=0, compile_aggregate_expressions=1; +select count(), _file from test group by _file order by _file settings optimize_group_by_constant_keys=1, enable_software_prefetch_in_aggregation=1, compile_aggregate_expressions=1; + +select count() from test group by _file, _path order by _file, _path settings optimize_group_by_constant_keys=1, enable_software_prefetch_in_aggregation=0, compile_aggregate_expressions=0; +select count() from test group by _file, _path order by _file, _path settings optimize_group_by_constant_keys=1, enable_software_prefetch_in_aggregation=1, compile_aggregate_expressions=0; +select count() from test group by _file, _path order by _file, _path settings optimize_group_by_constant_keys=1, enable_software_prefetch_in_aggregation=0, compile_aggregate_expressions=1; +select count() from test group by _file, _path order by _file, _path settings optimize_group_by_constant_keys=1, enable_software_prefetch_in_aggregation=1, compile_aggregate_expressions=1; + +drop table test; diff --git a/parser/testdata/02845_join_on_cond_sparse/ast.json b/parser/testdata/02845_join_on_cond_sparse/ast.json new file mode 100644 index 000000000..4b2403207 --- /dev/null +++ b/parser/testdata/02845_join_on_cond_sparse/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001146111, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/02845_join_on_cond_sparse/metadata.json b/parser/testdata/02845_join_on_cond_sparse/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02845_join_on_cond_sparse/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02845_join_on_cond_sparse/query.sql b/parser/testdata/02845_join_on_cond_sparse/query.sql new file mode 100644 index 000000000..b70419af0 --- /dev/null +++ b/parser/testdata/02845_join_on_cond_sparse/query.sql @@ -0,0 +1,21 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE TABLE t1 ( id UInt32, attr UInt32 ) ENGINE = MergeTree ORDER BY id +SETTINGS ratio_of_defaults_for_sparse_serialization = 0.1; + +INSERT INTO t1 VALUES (0, 0); + +CREATE TABLE t2 ( id UInt32, attr UInt32 ) ENGINE = MergeTree ORDER BY id +SETTINGS ratio_of_defaults_for_sparse_serialization = 0.1; + +INSERT INTO t2 VALUES (0, 0); + +SELECT * FROM t1 JOIN t2 ON t1.id = t2.id AND t1.attr != 0; + +INSERT INTO t1 VALUES (0, 1); + +SELECT * FROM t1 JOIN t2 ON t1.id = t2.id AND t1.attr != 0; + +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; diff --git a/parser/testdata/02845_prewhere_preserve_column/ast.json b/parser/testdata/02845_prewhere_preserve_column/ast.json new file mode 100644 index 000000000..dce67c92c --- /dev/null +++ b/parser/testdata/02845_prewhere_preserve_column/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery 02845_prewhere (children 1)" + }, + { + "explain": " Identifier 02845_prewhere" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001229447, + "rows_read": 2, + "bytes_read": 80 + } +} diff --git a/parser/testdata/02845_prewhere_preserve_column/metadata.json b/parser/testdata/02845_prewhere_preserve_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02845_prewhere_preserve_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02845_prewhere_preserve_column/query.sql b/parser/testdata/02845_prewhere_preserve_column/query.sql new file mode 100644 index 000000000..8f791d8b9 --- /dev/null +++ b/parser/testdata/02845_prewhere_preserve_column/query.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS 02845_prewhere; + +SET move_all_conditions_to_prewhere = 1; + +CREATE TABLE 02845_prewhere ( e String, c String, q String ) ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO 02845_prewhere SELECT number, number, number from numbers(10); + +SELECT * FROM (SELECT * FROM 02845_prewhere WHERE e = '5' OR q = '6') WHERE (q = '6'); diff --git a/parser/testdata/02860_distributed_flush_on_detach/ast.json b/parser/testdata/02860_distributed_flush_on_detach/ast.json new file mode 100644 index 000000000..f2eb13f03 --- /dev/null +++ b/parser/testdata/02860_distributed_flush_on_detach/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001000046, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02860_distributed_flush_on_detach/metadata.json b/parser/testdata/02860_distributed_flush_on_detach/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02860_distributed_flush_on_detach/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02860_distributed_flush_on_detach/query.sql b/parser/testdata/02860_distributed_flush_on_detach/query.sql new file mode 100644 index 000000000..104426504 --- /dev/null +++ b/parser/testdata/02860_distributed_flush_on_detach/query.sql @@ -0,0 +1,33 @@ +set prefer_localhost_replica=0; + +drop table if exists data; +drop table if exists dist; + +-- { echoOn } + +create table data (key Int) engine=Memory(); +create table dist (key Int) engine=Distributed(test_shard_localhost, currentDatabase(), data); +system stop distributed sends dist; + +-- check that FLUSH DISTRIBUTED does flushing anyway +insert into dist values (1); +select * from data; +system flush distributed dist; +select * from data; +truncate table data; + +-- check that flush_on_detach=1 by default +insert into dist values (1); +detach table dist; +select * from data; +attach table dist; +truncate table data; + +-- check flush_on_detach=0 +drop table dist; +create table dist (key Int) engine=Distributed(test_shard_localhost, currentDatabase(), data) settings flush_on_detach=0; +system stop distributed sends dist; +insert into dist values (1); +detach table dist; +select * from data; +attach table dist; diff --git a/parser/testdata/02861_filter_pushdown_const_bug/ast.json b/parser/testdata/02861_filter_pushdown_const_bug/ast.json new file mode 100644 index 000000000..2774dfa31 --- /dev/null +++ b/parser/testdata/02861_filter_pushdown_const_bug/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001237055, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02861_filter_pushdown_const_bug/metadata.json b/parser/testdata/02861_filter_pushdown_const_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02861_filter_pushdown_const_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02861_filter_pushdown_const_bug/query.sql b/parser/testdata/02861_filter_pushdown_const_bug/query.sql new file mode 100644 index 000000000..a7880b340 --- /dev/null +++ b/parser/testdata/02861_filter_pushdown_const_bug/query.sql @@ -0,0 +1,24 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS t1; + +CREATE TABLE t1 (key UInt8) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO t1 VALUES (1),(2); + +SET join_algorithm = 'full_sorting_merge'; + +SELECT key FROM ( SELECT key FROM t1 ) AS t1 JOIN ( SELECT key FROM t1 ) AS t2 ON t1.key = t2.key WHERE key; +SELECT key FROM ( SELECT 1 AS key ) AS t1 JOIN ( SELECT 1 AS key ) AS t2 ON t1.key = t2.key WHERE key; +SELECT * FROM ( SELECT 1 AS key GROUP BY NULL ) AS t1 INNER JOIN (SELECT 1 AS key) AS t2 ON t1.key = t2.key WHERE t1.key ORDER BY key; + +SET max_rows_in_set_to_optimize_join = 0; + +SELECT key FROM ( SELECT key FROM t1 ) AS t1 JOIN ( SELECT key FROM t1 ) AS t2 ON t1.key = t2.key WHERE key; +SELECT key FROM ( SELECT 1 AS key ) AS t1 JOIN ( SELECT 1 AS key ) AS t2 ON t1.key = t2.key WHERE key; +SELECT * FROM ( SELECT 1 AS key GROUP BY NULL ) AS t1 INNER JOIN (SELECT 1 AS key) AS t2 ON t1.key = t2.key WHERE t1.key ORDER BY key; + +SET join_algorithm = 'grace_hash'; + +SELECT * FROM (SELECT key AS a FROM t1 ) t1 INNER JOIN (SELECT key AS c FROM t1 ) t2 ON c = a WHERE a; + +DROP TABLE IF EXISTS t1; diff --git a/parser/testdata/02861_index_set_incorrect_args/ast.json b/parser/testdata/02861_index_set_incorrect_args/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02861_index_set_incorrect_args/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02861_index_set_incorrect_args/metadata.json b/parser/testdata/02861_index_set_incorrect_args/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02861_index_set_incorrect_args/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02861_index_set_incorrect_args/query.sql b/parser/testdata/02861_index_set_incorrect_args/query.sql new file mode 100644 index 000000000..985e2a17f --- /dev/null +++ b/parser/testdata/02861_index_set_incorrect_args/query.sql @@ -0,0 +1,6 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/52019 +DROP TABLE IF EXISTS set_index__fuzz_41; +CREATE TABLE set_index__fuzz_41 (`a` Date, `b` Nullable(DateTime64(3)), INDEX b_set b TYPE set(0) GRANULARITY 1) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO set_index__fuzz_41 (a) VALUES (today()); +SELECT b FROM set_index__fuzz_41 WHERE and(b = 256) SETTINGS force_data_skipping_indices = 'b_set', optimize_move_to_prewhere = 0, max_parallel_replicas=2, parallel_replicas_for_non_replicated_merge_tree=1, enable_parallel_replicas=2; -- { serverError TOO_FEW_ARGUMENTS_FOR_FUNCTION } +DROP TABLE set_index__fuzz_41; diff --git a/parser/testdata/02861_interpolate_alias_precedence/ast.json b/parser/testdata/02861_interpolate_alias_precedence/ast.json new file mode 100644 index 000000000..e89059359 --- /dev/null +++ b/parser/testdata/02861_interpolate_alias_precedence/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery 02861_interpolate (children 1)" + }, + { + "explain": " Identifier 02861_interpolate" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001233003, + "rows_read": 2, + "bytes_read": 86 + } +} diff --git a/parser/testdata/02861_interpolate_alias_precedence/metadata.json b/parser/testdata/02861_interpolate_alias_precedence/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02861_interpolate_alias_precedence/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02861_interpolate_alias_precedence/query.sql b/parser/testdata/02861_interpolate_alias_precedence/query.sql new file mode 100644 index 000000000..dc96b9c57 --- /dev/null +++ b/parser/testdata/02861_interpolate_alias_precedence/query.sql @@ -0,0 +1,8 @@ +DROP TABLE IF EXISTS 02861_interpolate; + +CREATE TABLE 02861_interpolate (date Date, id String, f Int16) ENGINE=MergeTree() ORDER BY (date); +INSERT INTO 02861_interpolate VALUES ('2023-05-15', '1', 1), ('2023-05-22', '1', 15); + +SELECT date AS d, toNullable(f) AS f FROM 02861_interpolate WHERE id = '1' ORDER BY d ASC WITH FILL STEP toIntervalDay(1) INTERPOLATE (f); + +DROP TABLE 02861_interpolate; diff --git a/parser/testdata/02861_replacing_merge_tree_with_cleanup/ast.json b/parser/testdata/02861_replacing_merge_tree_with_cleanup/ast.json new file mode 100644 index 000000000..6c82b3597 --- /dev/null +++ b/parser/testdata/02861_replacing_merge_tree_with_cleanup/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001167273, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/02861_replacing_merge_tree_with_cleanup/metadata.json b/parser/testdata/02861_replacing_merge_tree_with_cleanup/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02861_replacing_merge_tree_with_cleanup/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02861_replacing_merge_tree_with_cleanup/query.sql b/parser/testdata/02861_replacing_merge_tree_with_cleanup/query.sql new file mode 100644 index 000000000..4cd44a131 --- /dev/null +++ b/parser/testdata/02861_replacing_merge_tree_with_cleanup/query.sql @@ -0,0 +1,24 @@ +DROP TABLE IF EXISTS test; +CREATE TABLE test (uid String, version UInt32, is_deleted UInt8) ENGINE = ReplacingMergeTree(version, is_deleted) Order by (uid) SETTINGS vertical_merge_algorithm_min_rows_to_activate = 1, + vertical_merge_algorithm_min_columns_to_activate = 0, + min_rows_for_wide_part = 1, + min_bytes_for_wide_part = 1, + allow_experimental_replacing_merge_with_cleanup=1; + +-- Expect d6 to be version=3 is_deleted=false +INSERT INTO test (*) VALUES ('d1', 1, 0), ('d1', 2, 1), ('d1', 3, 0), ('d1', 4, 1), ('d1', 5, 0), ('d2', 1, 0), ('d3', 1, 0), ('d4', 1, 0), ('d5', 1, 0), ('d6', 1, 0), ('d6', 3, 0); +-- Insert previous version of 'd6' but only v=3 is_deleted=false will remain +INSERT INTO test (*) VALUES ('d1', 1, 0), ('d1', 2, 1), ('d1', 3, 0), ('d1', 4, 1), ('d1', 5, 0), ('d2', 1, 0), ('d3', 1, 0), ('d4', 1, 0), ('d5', 1, 0), ('d6', 1, 0), ('d6', 2, 1); +SELECT '== Only last version remains after OPTIMIZE W/ CLEANUP =='; +OPTIMIZE TABLE test FINAL CLEANUP; +select * from test order by uid; + +-- insert d6 v=3 is_deleted=true (timestamp more recent so this version should be the one take into acount) +INSERT INTO test (*) VALUES ('d1', 1, 0), ('d1', 2, 1), ('d1', 3, 0), ('d1', 4, 1), ('d1', 5, 0), ('d2', 1, 0), ('d3', 1, 0), ('d4', 1, 0), ('d5', 1, 0), ('d6', 1, 0), ('d6', 3, 1); + +SELECT '== OPTIMIZE W/ CLEANUP (remove d6) =='; +OPTIMIZE TABLE test FINAL CLEANUP; +-- No d6 anymore +select * from test order by uid; + +DROP TABLE IF EXISTS test; diff --git a/parser/testdata/02861_uuid_format_serialization/ast.json b/parser/testdata/02861_uuid_format_serialization/ast.json new file mode 100644 index 000000000..1601efa9b --- /dev/null +++ b/parser/testdata/02861_uuid_format_serialization/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_uuid (children 1)" + }, + { + "explain": " Identifier t_uuid" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001064572, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/02861_uuid_format_serialization/metadata.json b/parser/testdata/02861_uuid_format_serialization/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02861_uuid_format_serialization/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02861_uuid_format_serialization/query.sql b/parser/testdata/02861_uuid_format_serialization/query.sql new file mode 100644 index 000000000..e73ef2d51 --- /dev/null +++ b/parser/testdata/02861_uuid_format_serialization/query.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS t_uuid; +CREATE TABLE t_uuid (x UUID) ENGINE=MergeTree ORDER BY x; + +INSERT INTO t_uuid VALUES ('61f0c404-5cb3-11e7-907b-a6006ad3dba0'), ('992f6910-42b2-43cd-98bc-c812fbf9b683'), ('417ddc5d-e556-4d27-95dd-a34d84e46a50'); + +SELECT * FROM t_uuid ORDER BY x LIMIT 1 FORMAT RowBinary; +SELECT * FROM t_uuid ORDER BY x FORMAT RowBinary; + +DROP TABLE IF EXISTS t_uuid; diff --git a/parser/testdata/02862_sorted_distinct_sparse_fix/ast.json b/parser/testdata/02862_sorted_distinct_sparse_fix/ast.json new file mode 100644 index 000000000..8043b034a --- /dev/null +++ b/parser/testdata/02862_sorted_distinct_sparse_fix/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00103466, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02862_sorted_distinct_sparse_fix/metadata.json b/parser/testdata/02862_sorted_distinct_sparse_fix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02862_sorted_distinct_sparse_fix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02862_sorted_distinct_sparse_fix/query.sql b/parser/testdata/02862_sorted_distinct_sparse_fix/query.sql new file mode 100644 index 000000000..7873f1e5c --- /dev/null +++ b/parser/testdata/02862_sorted_distinct_sparse_fix/query.sql @@ -0,0 +1,27 @@ +SET merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.0; + +DROP TABLE IF EXISTS t_sparse_distinct; + +CREATE TABLE t_sparse_distinct (id UInt32, v String) +ENGINE = MergeTree +ORDER BY id +SETTINGS ratio_of_defaults_for_sparse_serialization = 0.9; + +SYSTEM STOP MERGES t_sparse_distinct; + +INSERT INTO t_sparse_distinct SELECT number % 10, toString(number % 100 = 0) FROM numbers(100); +INSERT INTO t_sparse_distinct(id) SELECT number % 10 FROM numbers(100); + +-- { echoOn } +SELECT name, column, serialization_kind +FROM system.parts_columns +WHERE table = 't_sparse_distinct' AND database = currentDatabase() AND column = 'v' +ORDER BY name; + +set optimize_distinct_in_order=1; +set max_threads=1; + +select splitByString(' ', trimLeft(explain))[1] from (explain pipeline SELECT DISTINCT id, v FROM t_sparse_distinct) where explain ilike '%DistinctSortedStreamTransform%'; +SELECT DISTINCT id, v FROM t_sparse_distinct format Null; + +DROP TABLE t_sparse_distinct; diff --git a/parser/testdata/02862_uuid_reinterpret_as_numeric/ast.json b/parser/testdata/02862_uuid_reinterpret_as_numeric/ast.json new file mode 100644 index 000000000..336be1145 --- /dev/null +++ b/parser/testdata/02862_uuid_reinterpret_as_numeric/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_uuid (children 1)" + }, + { + "explain": " Identifier t_uuid" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001178058, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/02862_uuid_reinterpret_as_numeric/metadata.json b/parser/testdata/02862_uuid_reinterpret_as_numeric/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02862_uuid_reinterpret_as_numeric/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02862_uuid_reinterpret_as_numeric/query.sql b/parser/testdata/02862_uuid_reinterpret_as_numeric/query.sql new file mode 100644 index 000000000..d6369835f --- /dev/null +++ b/parser/testdata/02862_uuid_reinterpret_as_numeric/query.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS t_uuid; +CREATE TABLE t_uuid (x UUID) ENGINE=MergeTree ORDER BY x; + +INSERT INTO t_uuid VALUES ('61f0c404-5cb3-11e7-907b-a6006ad3dba0'); + +SELECT reinterpretAsUUID(x) FROM t_uuid; +SELECT reinterpretAsFloat32(x), reinterpretAsFloat64(x) FROM t_uuid; +SELECT reinterpretAsInt8(x), reinterpretAsInt16(x), reinterpretAsInt32(x), reinterpretAsInt64(x), reinterpretAsInt128(x), reinterpretAsInt256(x) FROM t_uuid; +SELECT reinterpretAsUInt8(x), reinterpretAsUInt16(x), reinterpretAsUInt32(x), reinterpretAsUInt64(x), reinterpretAsUInt128(x), reinterpretAsUInt256(x) FROM t_uuid; + +SELECT reinterpretAsUUID(reinterpretAsUInt128(reinterpretAsUInt32(reinterpretAsUInt256(x)))) FROM t_uuid; + +DROP TABLE IF EXISTS t_uuid; diff --git a/parser/testdata/02863_decode_html_component/ast.json b/parser/testdata/02863_decode_html_component/ast.json new file mode 100644 index 000000000..1fa9db957 --- /dev/null +++ b/parser/testdata/02863_decode_html_component/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function decodeHTMLComponent (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'Hello, "world"!'" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001297769, + "rows_read": 7, + "bytes_read": 291 + } +} diff --git a/parser/testdata/02863_decode_html_component/metadata.json b/parser/testdata/02863_decode_html_component/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02863_decode_html_component/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02863_decode_html_component/query.sql b/parser/testdata/02863_decode_html_component/query.sql new file mode 100644 index 000000000..0eb4653e3 --- /dev/null +++ b/parser/testdata/02863_decode_html_component/query.sql @@ -0,0 +1,24 @@ +SELECT decodeHTMLComponent('Hello, "world"!'); +SELECT decodeHTMLComponent('<123>'); +SELECT decodeHTMLComponent('&clickhouse'); +SELECT decodeHTMLComponent(''foo''); +SELECT decodeHTMLComponent('Hello, && world'); +SELECT decodeHTMLComponent('Hello, &;& world'); +SELECT decodeHTMLComponent('Hello, &a;& world'); +SELECT decodeHTMLComponent('Hello, <t;& world'); +SELECT decodeHTMLComponent('Hello, <t& world'); +SELECT decodeHTMLComponent('Hello, &t;& world'); + +SELECT decodeHTMLComponent(' !"#$%&'()*+,-./012'); +SELECT decodeHTMLComponent(')*+,-./0123456789:;<'); +SELECT decodeHTMLComponent('=>?@ABCDEFGHIJKLMNOP'); +SELECT decodeHTMLComponent('为'); +SELECT decodeHTMLComponent('为'); +SELECT decodeHTMLComponent('�'123'); +SELECT decodeHTMLComponent('ЦЦЮЮЫㄱ'); +SELECT decodeHTMLComponent('C𝓁𝒾𝒸𝓀𝐻𝑜𝓊𝓈𝑒'); +SELECT decodeHTMLComponent('C𝓁𝒾𝒸𝓀𝐻𝑜𝓊𝓈𝑒'); +SELECT decodeHTMLComponent('C𝓁𝒾𝒸𝓀𝐻𝑜𝓊𝓈𝑒{'); +SELECT decodeHTMLComponent(''); +SELECT decodeHTMLComponent('C'); + diff --git a/parser/testdata/02863_delayed_source_with_totals_and_extremes/ast.json b/parser/testdata/02863_delayed_source_with_totals_and_extremes/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02863_delayed_source_with_totals_and_extremes/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02863_delayed_source_with_totals_and_extremes/metadata.json b/parser/testdata/02863_delayed_source_with_totals_and_extremes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02863_delayed_source_with_totals_and_extremes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02863_delayed_source_with_totals_and_extremes/query.sql b/parser/testdata/02863_delayed_source_with_totals_and_extremes/query.sql new file mode 100644 index 000000000..9269df8b5 --- /dev/null +++ b/parser/testdata/02863_delayed_source_with_totals_and_extremes/query.sql @@ -0,0 +1,17 @@ +-- Tags: no-parallel +-- Tag no-parallel: failpoint is used which can force DelayedSource on other tests + +DROP TABLE IF EXISTS 02863_delayed_source; + +CREATE TABLE 02863_delayed_source(a Int64) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/02863_delayed_source/{replica}', 'r1') ORDER BY a; +INSERT INTO 02863_delayed_source VALUES (1), (2); + +SYSTEM ENABLE FAILPOINT use_delayed_remote_source; + +SELECT sum(a) FROM remote('127.0.0.4', currentDatabase(), '02863_delayed_source') WITH TOTALS SETTINGS extremes = 1; +SELECT max(explain like '%Delayed%') FROM (EXPLAIN PIPELINE graph=1 SELECT sum(a) FROM remote('127.0.0.4', currentDatabase(), '02863_delayed_source') WITH TOTALS SETTINGS extremes = 1); +SELECT sum(a) FROM remote('127.0.0.4', currentDatabase(), '02863_delayed_source') GROUP BY a ORDER BY a LIMIT 1 FORMAT JSON settings output_format_write_statistics=0; + +SYSTEM DISABLE FAILPOINT use_delayed_remote_source; + +DROP TABLE 02863_delayed_source; \ No newline at end of file diff --git a/parser/testdata/02863_ignore_foreign_keys_in_tables_definition/ast.json b/parser/testdata/02863_ignore_foreign_keys_in_tables_definition/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02863_ignore_foreign_keys_in_tables_definition/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02863_ignore_foreign_keys_in_tables_definition/metadata.json b/parser/testdata/02863_ignore_foreign_keys_in_tables_definition/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02863_ignore_foreign_keys_in_tables_definition/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02863_ignore_foreign_keys_in_tables_definition/query.sql b/parser/testdata/02863_ignore_foreign_keys_in_tables_definition/query.sql new file mode 100644 index 000000000..2d8146431 --- /dev/null +++ b/parser/testdata/02863_ignore_foreign_keys_in_tables_definition/query.sql @@ -0,0 +1,29 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/53380 + + +drop table if exists parent; +drop table if exists child; + +create table parent (id int, primary key(id)) engine MergeTree; +create table child (id int, pid int, primary key(id), foreign key(pid)) engine MergeTree; -- { clientError SYNTAX_ERROR } +create table child (id int, pid int, primary key(id), foreign key(pid) references) engine MergeTree; -- { clientError SYNTAX_ERROR } +create table child (id int, pid int, primary key(id), foreign key(pid) references parent(pid)) engine MergeTree; + +show create table child; + +create table child2 (id int, pid int, primary key(id), + foreign key(pid) references parent(pid) on delete) engine MergeTree; -- { clientError SYNTAX_ERROR } +create table child2 (id int, pid int, primary key(id), + foreign key(pid) references parent(pid) on delete cascade) engine MergeTree; + +show create table child2; + +create table child3 (id int, pid int, primary key(id), + foreign key(pid) references parent(pid) on delete cascade on update restrict) engine MergeTree; + +show create table child3; + +drop table child3; +drop table child2; +drop table child; +drop table parent; \ No newline at end of file diff --git a/parser/testdata/02863_interpolate_subquery/ast.json b/parser/testdata/02863_interpolate_subquery/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02863_interpolate_subquery/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02863_interpolate_subquery/metadata.json b/parser/testdata/02863_interpolate_subquery/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02863_interpolate_subquery/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02863_interpolate_subquery/query.sql b/parser/testdata/02863_interpolate_subquery/query.sql new file mode 100644 index 000000000..4d8ba5f9c --- /dev/null +++ b/parser/testdata/02863_interpolate_subquery/query.sql @@ -0,0 +1,7 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/53640 +DROP TABLE IF EXISTS tab; +CREATE TABLE tab (i UInt32, a UInt32) ENGINE=Memory; +SELECT i, col1 FROM ( + SELECT i, a AS col1, a AS col2 FROM tab ORDER BY i WITH FILL INTERPOLATE (col1 AS col1+col2, col2) +); +DROP TABLE tab; diff --git a/parser/testdata/02863_mutation_where_in_set_result_cache_pipeline_stuck_bug/ast.json b/parser/testdata/02863_mutation_where_in_set_result_cache_pipeline_stuck_bug/ast.json new file mode 100644 index 000000000..32ddd2c1f --- /dev/null +++ b/parser/testdata/02863_mutation_where_in_set_result_cache_pipeline_stuck_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tab (children 1)" + }, + { + "explain": " Identifier tab" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001516552, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/02863_mutation_where_in_set_result_cache_pipeline_stuck_bug/metadata.json b/parser/testdata/02863_mutation_where_in_set_result_cache_pipeline_stuck_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02863_mutation_where_in_set_result_cache_pipeline_stuck_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02863_mutation_where_in_set_result_cache_pipeline_stuck_bug/query.sql b/parser/testdata/02863_mutation_where_in_set_result_cache_pipeline_stuck_bug/query.sql new file mode 100644 index 000000000..4c3079575 --- /dev/null +++ b/parser/testdata/02863_mutation_where_in_set_result_cache_pipeline_stuck_bug/query.sql @@ -0,0 +1,10 @@ +drop table if exists tab; +create table tab (x UInt32, y UInt32) engine = MergeTree order by x; + +insert into tab select number, number from numbers(10); +insert into tab select number, number from numbers(20); + +set mutations_sync=2; + +alter table tab delete where x > 1000 and y in (select sum(number + 1) from numbers_mt(1e7) group by number % 2 with totals); +drop table if exists tab; diff --git a/parser/testdata/02863_non_const_timezone_check/ast.json b/parser/testdata/02863_non_const_timezone_check/ast.json new file mode 100644 index 000000000..08211e184 --- /dev/null +++ b/parser/testdata/02863_non_const_timezone_check/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery Dates (children 1)" + }, + { + "explain": " Identifier Dates" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001241884, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/02863_non_const_timezone_check/metadata.json b/parser/testdata/02863_non_const_timezone_check/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02863_non_const_timezone_check/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02863_non_const_timezone_check/query.sql b/parser/testdata/02863_non_const_timezone_check/query.sql new file mode 100644 index 000000000..4cb5457ae --- /dev/null +++ b/parser/testdata/02863_non_const_timezone_check/query.sql @@ -0,0 +1,19 @@ +DROP TABLE IF EXISTS Dates; + +CREATE TABLE Dates (date DateTime('UTC')) ENGINE = MergeTree() ORDER BY date; + +INSERT INTO Dates VALUES ('2023-08-25 15:30:00'); + +SELECT formatDateTime((SELECT date FROM Dates), '%H%i%S', number % 2 ? 'America/Los_Angeles' : 'Europe/Amsterdam') FROM numbers(5); + +SELECT formatDateTime((SELECT materialize(date) FROM Dates), '%H%i%S', number % 2 ? 'America/Los_Angeles' : 'Europe/Amsterdam') FROM numbers(5); + +SELECT formatDateTime((SELECT materialize(date) FROM Dates), '%H%i%S', number % 2 ? '' : 'Europe/Amsterdam') FROM numbers(5); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} + +SELECT toString((SELECT date FROM Dates), number % 2 ? 'America/Los_Angeles' : 'Europe/Amsterdam') FROM numbers(5); + +SELECT toString((SELECT materialize(date) FROM Dates), number % 2 ? 'America/Los_Angeles' : 'Europe/Amsterdam') FROM numbers(5); + +SELECT toString((SELECT materialize(date) FROM Dates), number % 2 ? 'America/Los_Angeles' : '') FROM numbers(5); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} + +DROP TABLE Dates; diff --git a/parser/testdata/02864_filtered_url_with_globs/ast.json b/parser/testdata/02864_filtered_url_with_globs/ast.json new file mode 100644 index 000000000..1035fe7a3 --- /dev/null +++ b/parser/testdata/02864_filtered_url_with_globs/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function url (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'http:\/\/127.0.0.1:8123?query=select+{1,2}+as+x+format+TSV'" + }, + { + "explain": " Literal 'TSV'" + }, + { + "explain": " Literal UInt64_0" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001310589, + "rows_read": 13, + "bytes_read": 523 + } +} diff --git a/parser/testdata/02864_filtered_url_with_globs/metadata.json b/parser/testdata/02864_filtered_url_with_globs/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02864_filtered_url_with_globs/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02864_filtered_url_with_globs/query.sql b/parser/testdata/02864_filtered_url_with_globs/query.sql new file mode 100644 index 000000000..e952f63af --- /dev/null +++ b/parser/testdata/02864_filtered_url_with_globs/query.sql @@ -0,0 +1,3 @@ +SELECT * FROM url('http://127.0.0.1:8123?query=select+{1,2}+as+x+format+TSV', 'TSV') WHERE 0; +SELECT _path FROM url('http://127.0.0.1:8123?query=select+{1,2}+as+x+format+TSV', 'TSV') WHERE 0; + diff --git a/parser/testdata/02864_profile_event_part_lock/ast.json b/parser/testdata/02864_profile_event_part_lock/ast.json new file mode 100644 index 000000000..3bf2a33d1 --- /dev/null +++ b/parser/testdata/02864_profile_event_part_lock/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery random_mt (children 1)" + }, + { + "explain": " Identifier random_mt" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001274601, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/02864_profile_event_part_lock/metadata.json b/parser/testdata/02864_profile_event_part_lock/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02864_profile_event_part_lock/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02864_profile_event_part_lock/query.sql b/parser/testdata/02864_profile_event_part_lock/query.sql new file mode 100644 index 000000000..2b2ac7b55 --- /dev/null +++ b/parser/testdata/02864_profile_event_part_lock/query.sql @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS random_mt; + +CREATE TABLE random_mt +( + key UInt64, + value String +) +ENGINE MergeTree() +ORDER BY tuple(); + +INSERT INTO random_mt VALUES (1, 'Hello'); + +SELECT any(value > 0) from system.events WHERE event = 'PartsLockHoldMicroseconds' or event = 'PartsLockWaitMicroseconds'; + +DROP TABLE IF EXISTS random_mt; + diff --git a/parser/testdata/02864_replace_regexp_string_fallback/ast.json b/parser/testdata/02864_replace_regexp_string_fallback/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02864_replace_regexp_string_fallback/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02864_replace_regexp_string_fallback/metadata.json b/parser/testdata/02864_replace_regexp_string_fallback/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02864_replace_regexp_string_fallback/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02864_replace_regexp_string_fallback/query.sql b/parser/testdata/02864_replace_regexp_string_fallback/query.sql new file mode 100644 index 000000000..917c11fe8 --- /dev/null +++ b/parser/testdata/02864_replace_regexp_string_fallback/query.sql @@ -0,0 +1,11 @@ +-- Tests functions replaceRegexpAll and replaceRegexpOne with trivial patterns. These trigger internally a fallback to simple string replacement. + +-- _materialize_ because the shortcut is only implemented for non-const haystack + const needle + const replacement strings + +SELECT 'Hello' AS haystack, 'l' AS needle, 'x' AS replacement, replaceRegexpOne(materialize(haystack), needle, replacement), replaceRegexpAll(materialize(haystack), needle, replacement); + +-- negative tests + +-- Even if the fallback is used, invalid substitutions must throw an exception. +SELECT 'Hello' AS haystack, 'l' AS needle, '\\1' AS replacement, replaceRegexpOne(materialize(haystack), needle, replacement); -- { serverError BAD_ARGUMENTS } +SELECT 'Hello' AS haystack, 'l' AS needle, '\\1' AS replacement, replaceRegexpAll(materialize(haystack), needle, replacement); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/02864_statistics_bug_67742/ast.json b/parser/testdata/02864_statistics_bug_67742/ast.json new file mode 100644 index 000000000..c4e221ff8 --- /dev/null +++ b/parser/testdata/02864_statistics_bug_67742/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001557076, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02864_statistics_bug_67742/metadata.json b/parser/testdata/02864_statistics_bug_67742/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02864_statistics_bug_67742/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02864_statistics_bug_67742/query.sql b/parser/testdata/02864_statistics_bug_67742/query.sql new file mode 100644 index 000000000..82b60c642 --- /dev/null +++ b/parser/testdata/02864_statistics_bug_67742/query.sql @@ -0,0 +1,27 @@ +SET allow_experimental_statistics = 1; +SET allow_statistics_optimize = 1; +SET mutations_sync = 1; + +DROP TABLE IF EXISTS tab; +CREATE TABLE tab (a Float64 STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); +INSERT INTO tab SELECT number FROM system.numbers LIMIT 10000; +SELECT count(*) FROM tab WHERE a < '10'; +DROP TABLE tab; + +DROP TABLE IF EXISTS tab; +CREATE TABLE tab (a Int32 STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); +INSERT INTO tab SELECT number FROM system.numbers LIMIT 10000; +SELECT count(*) FROM tab WHERE a < '10.5'; -- { serverError TYPE_MISMATCH } +DROP TABLE tab; + +DROP TABLE IF EXISTS tab; +CREATE TABLE tab (a Int32 STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); +INSERT INTO tab SELECT number FROM system.numbers LIMIT 10000; +SELECT count(*) FROM tab WHERE a < 10.5; +DROP TABLE tab; + +DROP TABLE IF EXISTS tab; +CREATE TABLE tab (a Int16 STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); +INSERT INTO tab SELECT number FROM system.numbers LIMIT 10000; +SELECT count(*) FROM tab WHERE a < '9999999999999999999999999'; +DROP TABLE tab; diff --git a/parser/testdata/02864_statistics_bug_69589/ast.json b/parser/testdata/02864_statistics_bug_69589/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02864_statistics_bug_69589/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02864_statistics_bug_69589/metadata.json b/parser/testdata/02864_statistics_bug_69589/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02864_statistics_bug_69589/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02864_statistics_bug_69589/query.sql b/parser/testdata/02864_statistics_bug_69589/query.sql new file mode 100644 index 000000000..79ce23411 --- /dev/null +++ b/parser/testdata/02864_statistics_bug_69589/query.sql @@ -0,0 +1,13 @@ +-- Tags: no-fasttest +-- no-fasttest: 'countmin' sketches need a 3rd party library + +SET allow_experimental_statistics = 1; +SET allow_statistics_optimize = 1; + +CREATE TABLE tab (c Nullable(Int)) ENGINE = MergeTree() ORDER BY tuple(); +INSERT INTO tab (c) VALUES (1); +DELETE FROM tab WHERE TRUE; +INSERT INTO tab (c) VALUES (2); +ALTER TABLE tab ADD STATISTICS c TYPE countmin; +OPTIMIZE TABLE tab; +SELECT 1 FROM tab WHERE tab.c = 0; diff --git a/parser/testdata/02864_statistics_create_materialize_drop/ast.json b/parser/testdata/02864_statistics_create_materialize_drop/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02864_statistics_create_materialize_drop/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02864_statistics_create_materialize_drop/metadata.json b/parser/testdata/02864_statistics_create_materialize_drop/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02864_statistics_create_materialize_drop/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02864_statistics_create_materialize_drop/query.sql b/parser/testdata/02864_statistics_create_materialize_drop/query.sql new file mode 100644 index 000000000..249e3c84a --- /dev/null +++ b/parser/testdata/02864_statistics_create_materialize_drop/query.sql @@ -0,0 +1,35 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS tab SYNC; + +SET allow_experimental_statistics = 1; +SET allow_statistics_optimize = 1; +SET allow_suspicious_low_cardinality_types=1; +SET mutations_sync = 2; + + +SELECT 'Test create statistics:'; + +CREATE TABLE tab +( + a LowCardinality(Int64) STATISTICS(countmin, minmax, tdigest, uniq), + b LowCardinality(Nullable(String)) STATISTICS(countmin, uniq), + c LowCardinality(Nullable(Int64)) STATISTICS(countmin, minmax, tdigest, uniq), + d DateTime STATISTICS(countmin, minmax, tdigest, uniq), + pk String, +) Engine = MergeTree() ORDER BY pk; + +INSERT INTO tab select number, number, number, toDateTime(number), generateUUIDv4() FROM system.numbers LIMIT 10000; +SHOW CREATE TABLE tab; + + +SELECT 'Test materialize and drop statistics:'; +ALTER TABLE tab DROP STATISTICS a, b, c, d; +ALTER TABLE tab ADD STATISTICS b TYPE countmin, uniq; +ALTER TABLE tab MATERIALIZE STATISTICS b; +SHOW CREATE TABLE tab; + +ALTER TABLE tab DROP STATISTICS b; +SHOW CREATE TABLE tab; + +DROP TABLE IF EXISTS tab SYNC; diff --git a/parser/testdata/02864_statistics_ddl/ast.json b/parser/testdata/02864_statistics_ddl/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02864_statistics_ddl/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02864_statistics_ddl/metadata.json b/parser/testdata/02864_statistics_ddl/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02864_statistics_ddl/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02864_statistics_ddl/query.sql b/parser/testdata/02864_statistics_ddl/query.sql new file mode 100644 index 000000000..8620aec15 --- /dev/null +++ b/parser/testdata/02864_statistics_ddl/query.sql @@ -0,0 +1,222 @@ +-- Tags: no-fasttest, long +-- no-fasttest: 'countmin' sketches need a 3rd party library + +-- Tests that DDL statements which create / drop / materialize statistics + +SET mutations_sync = 1; + +DROP TABLE IF EXISTS tab; + +SET allow_experimental_statistics = 0; +-- Error case: Can't create statistics when allow_experimental_statistics = 0 +CREATE TABLE tab (col Float64 STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); -- { serverError INCORRECT_QUERY } + +SET allow_experimental_statistics = 1; + +-- Error case: Unknown statistics types are rejected +CREATE TABLE tab (col Float64 STATISTICS(no_statistics_type)) Engine = MergeTree() ORDER BY tuple(); -- { serverError INCORRECT_QUERY } + +-- Error case: The same statistics type can't exist more than once on a column +CREATE TABLE tab (col Float64 STATISTICS(tdigest, tdigest)) Engine = MergeTree() ORDER BY tuple(); -- { serverError INCORRECT_QUERY } + +SET allow_suspicious_low_cardinality_types = 1; + +-- Statistics can only be created on columns of specific data types (depending on the statistics kind), (*) + +-- tdigest requires data_type.isValueRepresentedByInteger +-- These types work: +CREATE TABLE tab (col UInt8 STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col UInt256 STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Float32 STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Decimal32(3) STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Date STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Date32 STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col DateTime STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col DateTime64 STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Enum('hello', 'world') STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Nullable(UInt8) STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col LowCardinality(UInt8) STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col LowCardinality(Nullable(UInt8)) STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +-- These types don't work: +CREATE TABLE tab (col String STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col FixedString(1) STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col Array(Float64) STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col Tuple(Float64, Float64) STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col Map(UInt64, UInt64) STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col UUID STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col IPv4 STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col IPv6 STATISTICS(tdigest)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } + +-- uniq requires data_type.isValueRepresentedByInteger or (Fixed)String +-- These types work: +CREATE TABLE tab (col UInt8 STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col UInt256 STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Float32 STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Decimal32(3) STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Date STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Date32 STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col DateTime STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col DateTime64 STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Enum('hello', 'world') STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col IPv4 STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Nullable(UInt8) STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col LowCardinality(UInt8) STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col LowCardinality(Nullable(UInt8)) STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col String STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col FixedString(1) STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +-- These types don't work: +CREATE TABLE tab (col Array(Float64) STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col Tuple(Float64, Float64) STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col Map(UInt64, UInt64) STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col UUID STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col IPv6 STATISTICS(uniq)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } + +-- countmin requires data_type.isValueRepresentedByInteger or data_type = (Fixed)String +-- These types work: +CREATE TABLE tab (col UInt8 STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col UInt256 STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Float32 STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Decimal32(3) STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Date STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Date32 STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col DateTime STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col DateTime64 STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Enum('hello', 'world') STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col IPv4 STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Nullable(UInt8) STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col LowCardinality(UInt8) STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col LowCardinality(Nullable(UInt8)) STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col String STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col FixedString(1) STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +-- These types don't work: +CREATE TABLE tab (col Array(Float64) STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col Tuple(Float64, Float64) STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col Map(UInt64, UInt64) STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col UUID STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col IPv6 STATISTICS(countmin)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } + +-- minmax requires data_type.isValueRepresentedByInteger +-- These types work: +CREATE TABLE tab (col UInt8 STATISTICS(minmax)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col UInt256 STATISTICS(minmax)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Float32 STATISTICS(minmax)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Decimal32(3) STATISTICS(minmax)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Date STATISTICS(minmax)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Date32 STATISTICS(minmax)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col DateTime STATISTICS(minmax)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col DateTime64 STATISTICS(minmax)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Enum('hello', 'world') STATISTICS(minmax)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col IPv4 STATISTICS(minmax)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col Nullable(UInt8) STATISTICS(minmax)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col LowCardinality(UInt8) STATISTICS(minmax)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +CREATE TABLE tab (col LowCardinality(Nullable(UInt8)) STATISTICS(minmax)) Engine = MergeTree() ORDER BY tuple(); DROP TABLE tab; +-- These types don't work: +CREATE TABLE tab (col String STATISTICS(minmax)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col FixedString(1) STATISTICS(minmax)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col Array(Float64) STATISTICS(minmax)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col Tuple(Float64, Float64) STATISTICS(minmax)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col Map(UInt64, UInt64) STATISTICS(minmax)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col UUID STATISTICS(minmax)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } +CREATE TABLE tab (col IPv6 STATISTICS(minmax)) Engine = MergeTree() ORDER BY tuple(); -- { serverError ILLEGAL_STATISTICS } + +-- CREATE TABLE was easy, ALTER is more fun + +CREATE TABLE tab +( + f64 Float64, + f64_tdigest Float64 STATISTICS(tdigest), + f32 Float32, + s String, + a Array(Float64) +) +Engine = MergeTree() +ORDER BY tuple(); + +-- Error case: Unknown statistics types are rejected +-- (relevant for ADD and MODIFY) +ALTER TABLE tab ADD STATISTICS f64 TYPE no_statistics_type; -- { serverError INCORRECT_QUERY } +ALTER TABLE tab ADD STATISTICS IF NOT EXISTS f64 TYPE no_statistics_type; -- { serverError INCORRECT_QUERY } +ALTER TABLE tab MODIFY STATISTICS f64 TYPE no_statistics_type; -- { serverError INCORRECT_QUERY } +-- for some reason, ALTER TABLE tab MODIFY STATISTICS IF EXISTS is not supported + +-- Error case: The same statistics type can't exist more than once on a column +-- (relevant for ADD and MODIFY) +-- Create the same statistics object twice +ALTER TABLE tab ADD STATISTICS f64 TYPE tdigest, tdigest; -- { serverError INCORRECT_QUERY } +ALTER TABLE tab ADD STATISTICS IF NOT EXISTS f64 TYPE tdigest, tdigest; -- { serverError INCORRECT_QUERY } +ALTER TABLE tab MODIFY STATISTICS f64 TYPE tdigest, tdigest; -- { serverError INCORRECT_QUERY } +-- Create an statistics which exists already +ALTER TABLE tab ADD STATISTICS f64_tdigest TYPE tdigest; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab ADD STATISTICS IF NOT EXISTS f64_tdigest TYPE tdigest; -- no-op +ALTER TABLE tab MODIFY STATISTICS f64_tdigest TYPE tdigest; -- no-op + +-- Error case: Column does not exist +-- (relevant for ADD, MODIFY, DROP, CLEAR, and MATERIALIZE) +-- Note that the results are unfortunately quite inconsistent ... +ALTER TABLE tab ADD STATISTICS no_such_column TYPE tdigest; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab ADD STATISTICS IF NOT EXISTS no_such_column TYPE tdigest; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab MODIFY STATISTICS no_such_column TYPE tdigest; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab DROP STATISTICS no_such_column; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab DROP STATISTICS IF EXISTS no_such_column; -- no-op +ALTER TABLE tab CLEAR STATISTICS no_such_column; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab CLEAR STATISTICS IF EXISTS no_such_column; -- no-op +ALTER TABLE tab MATERIALIZE STATISTICS no_such_column; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab MATERIALIZE STATISTICS IF EXISTS no_such_column; -- { serverError ILLEGAL_STATISTICS } + +-- Error case: Column exists but has no statistics +-- (relevant for MODIFY, DROP, CLEAR, and MATERIALIZE) +-- Note that the results are unfortunately quite inconsistent ... +ALTER TABLE tab MODIFY STATISTICS s TYPE tdigest; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab DROP STATISTICS s; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab DROP STATISTICS IF EXISTS s; -- no-op +ALTER TABLE tab CLEAR STATISTICS s; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab CLEAR STATISTICS IF EXISTS s; -- no-op + +-- We don't check systematically that that statistics can only be created via ALTER ADD STATISTICS on columns of specific data types (the +-- internal type validation code is tested already above, (*)). Only do a rudimentary check for each statistics type with a data type that +-- works and one that doesn't work. +-- tdigest +-- Works: +ALTER TABLE tab ADD STATISTICS f64 TYPE tdigest; ALTER TABLE tab DROP STATISTICS f64; +ALTER TABLE tab MODIFY STATISTICS f64 TYPE tdigest; ALTER TABLE tab DROP STATISTICS f64; +-- Doesn't work: +ALTER TABLE tab ADD STATISTICS a TYPE tdigest; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab MODIFY STATISTICS a TYPE tdigest; -- { serverError ILLEGAL_STATISTICS } +-- uniq +-- Works: +ALTER TABLE tab ADD STATISTICS f64 TYPE uniq; ALTER TABLE tab DROP STATISTICS f64; +ALTER TABLE tab MODIFY STATISTICS f64 TYPE countmin; ALTER TABLE tab DROP STATISTICS f64; +-- Doesn't work: +ALTER TABLE tab ADD STATISTICS a TYPE uniq; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab MODIFY STATISTICS a TYPE uniq; -- { serverError ILLEGAL_STATISTICS } +-- countmin +-- Works: +ALTER TABLE tab ADD STATISTICS f64 TYPE countmin; ALTER TABLE tab DROP STATISTICS f64; +ALTER TABLE tab MODIFY STATISTICS f64 TYPE countmin; ALTER TABLE tab DROP STATISTICS f64; +-- Doesn't work: +ALTER TABLE tab ADD STATISTICS a TYPE countmin; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab MODIFY STATISTICS a TYPE countmin; -- { serverError ILLEGAL_STATISTICS } +-- minmax +-- Works: +ALTER TABLE tab ADD STATISTICS f64 TYPE minmax; ALTER TABLE tab DROP STATISTICS f64; +ALTER TABLE tab MODIFY STATISTICS f64 TYPE minmax; ALTER TABLE tab DROP STATISTICS f64; +-- Doesn't work: +ALTER TABLE tab ADD STATISTICS a TYPE minmax; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab MODIFY STATISTICS a TYPE minmax; -- { serverError ILLEGAL_STATISTICS } +ALTER TABLE tab MODIFY COLUMN f64_tdigest UInt64; + +-- Finally, do a full-circle test of a good case. Print table definition after each step. +-- Intentionally specifying _two_ columns and _two_ statistics types to have that also tested. +SHOW CREATE TABLE tab; +ALTER TABLE tab ADD STATISTICS f64, f32 TYPE tdigest, uniq; +SHOW CREATE TABLE tab; +ALTER TABLE tab MODIFY STATISTICS f64, f32 TYPE tdigest, uniq; +SHOW CREATE TABLE tab; +ALTER TABLE tab CLEAR STATISTICS f64, f32; +SHOW CREATE TABLE tab; +ALTER TABLE tab MATERIALIZE STATISTICS f64, f32; +SHOW CREATE TABLE tab; +ALTER TABLE tab DROP STATISTICS f64, f32; +SHOW CREATE TABLE tab; + +DROP TABLE tab; diff --git a/parser/testdata/02864_statistics_delayed_materialization_in_merge/ast.json b/parser/testdata/02864_statistics_delayed_materialization_in_merge/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02864_statistics_delayed_materialization_in_merge/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02864_statistics_delayed_materialization_in_merge/metadata.json b/parser/testdata/02864_statistics_delayed_materialization_in_merge/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02864_statistics_delayed_materialization_in_merge/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02864_statistics_delayed_materialization_in_merge/query.sql b/parser/testdata/02864_statistics_delayed_materialization_in_merge/query.sql new file mode 100644 index 000000000..d469a4c20 --- /dev/null +++ b/parser/testdata/02864_statistics_delayed_materialization_in_merge/query.sql @@ -0,0 +1,36 @@ +-- Tests delayed materialization of statistics in merge instead of during insert (setting 'materialize_statistics_on_insert = 0'). +-- (The concrete statistics type, column data type and predicate type don't matter) + +-- Checks by the predicate evaluation order in EXPLAIN. This is quite fragile, a better approach would be helpful (maybe 'send_logs_level'?) + +DROP TABLE IF EXISTS tab; + +SET allow_experimental_statistics = 1; +SET allow_statistics_optimize = 1; +SET enable_analyzer = 1; + +SET materialize_statistics_on_insert = 0; + +CREATE TABLE tab +( + a Int64 STATISTICS(tdigest), + b Int16 STATISTICS(tdigest), +) ENGINE = MergeTree() ORDER BY tuple() +SETTINGS min_bytes_for_wide_part = 0, enable_vertical_merge_algorithm = 0; -- TODO: there is a bug in vertical merge with statistics. + +INSERT INTO tab SELECT number, -number FROM system.numbers LIMIT 10000; +SELECT 'After insert'; +SELECT replaceRegexpAll(explain, '__table1\.', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%'; -- checks b first, then a (statistics not used) + +OPTIMIZE TABLE tab FINAL; +SELECT 'After merge'; +SELECT replaceRegexpAll(explain, '__table1\.', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%'; -- checks a first, then b (statistics used) + +TRUNCATE TABLE tab; +SET mutations_sync = 2; +INSERT INTO tab SELECT number, -number FROM system.numbers LIMIT 10000; +ALTER TABLE tab MATERIALIZE STATISTICS a, b; +SELECT 'After truncate, insert, and materialize'; +SELECT replaceRegexpAll(explain, '__table1\.', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%'; -- checks a first, then b (statistics used) + +DROP TABLE tab; diff --git a/parser/testdata/02864_statistics_predicates/ast.json b/parser/testdata/02864_statistics_predicates/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02864_statistics_predicates/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02864_statistics_predicates/metadata.json b/parser/testdata/02864_statistics_predicates/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02864_statistics_predicates/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02864_statistics_predicates/query.sql b/parser/testdata/02864_statistics_predicates/query.sql new file mode 100644 index 000000000..d7afba12c --- /dev/null +++ b/parser/testdata/02864_statistics_predicates/query.sql @@ -0,0 +1,250 @@ +-- Tags: no-fasttest +-- no-fasttest: 'countmin' sketches need a 3rd party library + +-- Tests the cross product of all predicates with all right-hand sides on all data types and all statistics types. + +SET allow_experimental_statistics = 1; +SET allow_statistics_optimize = 1; + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab +( + u64 UInt64, + u64_tdigest UInt64 STATISTICS(tdigest), + u64_minmax UInt64 STATISTICS(minmax), + u64_countmin UInt64 STATISTICS(countmin), + u64_uniq UInt64 STATISTICS(uniq), + f64 Float64, + f64_tdigest Float64 STATISTICS(tdigest), + f64_minmax Float64 STATISTICS(minmax), + f64_countmin Float64 STATISTICS(countmin), + f64_uniq Float64 STATISTICS(uniq), + dt DateTime, + dt_tdigest DateTime STATISTICS(tdigest), + dt_minmax DateTime STATISTICS(minmax), + dt_countmin DateTime STATISTICS(countmin), + dt_uniq DateTime STATISTICS(uniq), + b Bool, + b_tdigest Bool STATISTICS(tdigest), + b_minmax Bool STATISTICS(minmax), + b_countmin Bool STATISTICS(countmin), + b_uniq Bool STATISTICS(uniq), + s String, + -- s_tdigest String STATISTICS(tdigest), -- not supported by tdigest + -- s_minmax String STATISTICS(minmax), -- not supported by minmax + s_countmin String STATISTICS(countmin), + s_uniq String STATISTICS(uniq) +) Engine = MergeTree() ORDER BY tuple() +SETTINGS min_bytes_for_wide_part = 0; + +INSERT INTO tab +-- SELECT number % 10000, number % 1000, -(number % 100) FROM system.numbers LIMIT 10000; +SELECT number % 1000, -- u64 + number % 1000, + number % 1000, + number % 1000, + number % 1000, + number % 1000, -- f64 + number % 1000, + number % 1000, + number % 1000, + number % 1000, + number % 1000, -- dt + number % 1000, + number % 1000, + number % 1000, + number % 1000, + number % 2, -- b + number % 2, + number % 2, + number % 2, + number % 2, + toString(number % 1000), + toString(number % 1000), + toString(number % 1000) +FROM system.numbers LIMIT 10000; + +-- u64 ---------------------------------------------------- + +SELECT 'u64 and ='; + +SELECT count(*) FROM tab WHERE u64 = 7; +SELECT count(*) FROM tab WHERE u64_tdigest = 7; +SELECT count(*) FROM tab WHERE u64_minmax = 7; +SELECT count(*) FROM tab WHERE u64_countmin = 7; +SELECT count(*) FROM tab WHERE u64_uniq = 7; + +SELECT count(*) FROM tab WHERE u64 = 7.7; +SELECT count(*) FROM tab WHERE u64_tdigest = 7.7; +SELECT count(*) FROM tab WHERE u64_minmax = 7.7; +SELECT count(*) FROM tab WHERE u64_countmin = 7.7; +SELECT count(*) FROM tab WHERE u64_uniq = 7.7; + +SELECT count(*) FROM tab WHERE u64 = '7'; +SELECT count(*) FROM tab WHERE u64_tdigest = '7'; +SELECT count(*) FROM tab WHERE u64_minmax = '7'; +SELECT count(*) FROM tab WHERE u64_countmin = '7'; +SELECT count(*) FROM tab WHERE u64_uniq = '7'; + +SELECT count(*) FROM tab WHERE u64 = '7.7'; -- { serverError TYPE_MISMATCH } +SELECT count(*) FROM tab WHERE u64_tdigest = '7.7'; -- { serverError TYPE_MISMATCH } +SELECT count(*) FROM tab WHERE u64_minmax = '7.7'; -- { serverError TYPE_MISMATCH } +SELECT count(*) FROM tab WHERE u64_countmin = '7.7'; -- { serverError TYPE_MISMATCH } +SELECT count(*) FROM tab WHERE u64_uniq = '7.7'; -- { serverError TYPE_MISMATCH } + +SELECT 'u64 and <'; + +SELECT count(*) FROM tab WHERE u64 < 7; +SELECT count(*) FROM tab WHERE u64_tdigest < 7; +SELECT count(*) FROM tab WHERE u64_minmax < 7; +SELECT count(*) FROM tab WHERE u64_countmin < 7; +SELECT count(*) FROM tab WHERE u64_uniq < 7; + +SELECT count(*) FROM tab WHERE u64 < 7.7; +SELECT count(*) FROM tab WHERE u64_tdigest < 7.7; +SELECT count(*) FROM tab WHERE u64_minmax < 7.7; +SELECT count(*) FROM tab WHERE u64_countmin < 7.7; +SELECT count(*) FROM tab WHERE u64_uniq < 7.7; + +SELECT count(*) FROM tab WHERE u64 < '7'; +SELECT count(*) FROM tab WHERE u64_tdigest < '7'; +SELECT count(*) FROM tab WHERE u64_minmax < '7'; +SELECT count(*) FROM tab WHERE u64_countmin < '7'; +SELECT count(*) FROM tab WHERE u64_uniq < '7'; + +SELECT count(*) FROM tab WHERE u64 < '7.7'; -- { serverError TYPE_MISMATCH } +SELECT count(*) FROM tab WHERE u64_tdigest < '7.7'; -- { serverError TYPE_MISMATCH } +SELECT count(*) FROM tab WHERE u64_minmax < '7.7'; -- { serverError TYPE_MISMATCH } +SELECT count(*) FROM tab WHERE u64_countmin < '7.7'; -- { serverError TYPE_MISMATCH } +SELECT count(*) FROM tab WHERE u64_uniq < '7.7'; -- { serverError TYPE_MISMATCH } + +-- f64 ---------------------------------------------------- + +SELECT 'f64 and ='; + +SELECT count(*) FROM tab WHERE f64 = 7; +SELECT count(*) FROM tab WHERE f64_tdigest = 7; +SELECT count(*) FROM tab WHERE f64_minmax = 7; +SELECT count(*) FROM tab WHERE f64_countmin = 7; +SELECT count(*) FROM tab WHERE f64_uniq = 7; + +SELECT count(*) FROM tab WHERE f64 = 7.7; +SELECT count(*) FROM tab WHERE f64_tdigest = 7.7; +SELECT count(*) FROM tab WHERE f64_minmax = 7.7; +SELECT count(*) FROM tab WHERE f64_countmin = 7.7; +SELECT count(*) FROM tab WHERE f64_uniq = 7.7; + +SELECT count(*) FROM tab WHERE f64 = '7'; +SELECT count(*) FROM tab WHERE f64_tdigest = '7'; +SELECT count(*) FROM tab WHERE f64_minmax = '7'; +SELECT count(*) FROM tab WHERE f64_countmin = '7'; +SELECT count(*) FROM tab WHERE f64_uniq = '7'; + +SELECT count(*) FROM tab WHERE f64 = '7.7'; +SELECT count(*) FROM tab WHERE f64_tdigest = '7.7'; +SELECT count(*) FROM tab WHERE f64_minmax = '7.7'; +SELECT count(*) FROM tab WHERE f64_countmin = '7.7'; +SELECT count(*) FROM tab WHERE f64_uniq = '7.7'; + +SELECT 'f64 and <'; + +SELECT count(*) FROM tab WHERE f64 < 7; +SELECT count(*) FROM tab WHERE f64_tdigest < 7; +SELECT count(*) FROM tab WHERE f64_minmax < 7; +SELECT count(*) FROM tab WHERE f64_countmin < 7; +SELECT count(*) FROM tab WHERE f64_uniq < 7; + +SELECT count(*) FROM tab WHERE f64 < 7.7; +SELECT count(*) FROM tab WHERE f64_tdigest < 7.7; +SELECT count(*) FROM tab WHERE f64_minmax < 7.7; +SELECT count(*) FROM tab WHERE f64_countmin < 7.7; +SELECT count(*) FROM tab WHERE f64_uniq < 7.7; + +SELECT count(*) FROM tab WHERE f64 < '7'; +SELECT count(*) FROM tab WHERE f64_tdigest < '7'; +SELECT count(*) FROM tab WHERE f64_minmax < '7'; +SELECT count(*) FROM tab WHERE f64_countmin < '7'; +SELECT count(*) FROM tab WHERE f64_uniq < '7'; + +SELECT count(*) FROM tab WHERE f64 < '7.7'; +SELECT count(*) FROM tab WHERE f64_tdigest < '7.7'; +SELECT count(*) FROM tab WHERE f64_minmax < '7.7'; +SELECT count(*) FROM tab WHERE f64_countmin < '7.7'; +SELECT count(*) FROM tab WHERE f64_uniq < '7.7'; + +-- dt ---------------------------------------------------- + +SELECT 'dt and ='; + +SELECT count(*) FROM tab WHERE dt = '2024-08-08 11:12:13'; +SELECT count(*) FROM tab WHERE dt_tdigest = '2024-08-08 11:12:13'; +SELECT count(*) FROM tab WHERE dt_minmax = '2024-08-08 11:12:13'; +SELECT count(*) FROM tab WHERE dt_countmin = '2024-08-08 11:12:13'; +SELECT count(*) FROM tab WHERE dt_uniq = '2024-08-08 11:12:13'; + +SELECT count(*) FROM tab WHERE dt = 7; +SELECT count(*) FROM tab WHERE dt_tdigest = 7; +SELECT count(*) FROM tab WHERE dt_minmax = 7; +SELECT count(*) FROM tab WHERE dt_countmin = 7; +SELECT count(*) FROM tab WHERE dt_uniq = 7; + +SELECT 'dt and <'; + +SELECT count(*) FROM tab WHERE dt < '2024-08-08 11:12:13'; +SELECT count(*) FROM tab WHERE dt_tdigest < '2024-08-08 11:12:13'; +SELECT count(*) FROM tab WHERE dt_minmax < '2024-08-08 11:12:13'; +SELECT count(*) FROM tab WHERE dt_countmin < '2024-08-08 11:12:13'; +SELECT count(*) FROM tab WHERE dt_uniq < '2024-08-08 11:12:13'; + +SELECT count(*) FROM tab WHERE dt < 7; +SELECT count(*) FROM tab WHERE dt_tdigest < 7; +SELECT count(*) FROM tab WHERE dt_minmax < 7; +SELECT count(*) FROM tab WHERE dt_countmin < 7; +SELECT count(*) FROM tab WHERE dt_uniq < 7; + +-- b ---------------------------------------------------- + +SELECT 'b and ='; + +SELECT count(*) FROM tab WHERE b = true; +SELECT count(*) FROM tab WHERE b_tdigest = true; +SELECT count(*) FROM tab WHERE b_minmax = true; +SELECT count(*) FROM tab WHERE b_countmin = true; +SELECT count(*) FROM tab WHERE b_uniq = true; + +SELECT count(*) FROM tab WHERE b = 'true'; +SELECT count(*) FROM tab WHERE b_tdigest = 'true'; +SELECT count(*) FROM tab WHERE b_minmax = 'true'; +SELECT count(*) FROM tab WHERE b_countmin = 'true'; +SELECT count(*) FROM tab WHERE b_uniq = 'true'; + +SELECT count(*) FROM tab WHERE b = 1; +SELECT count(*) FROM tab WHERE b_tdigest = 1; +SELECT count(*) FROM tab WHERE b_minmax = 1; +SELECT count(*) FROM tab WHERE b_countmin = 1; +SELECT count(*) FROM tab WHERE b_uniq = 1; + +SELECT count(*) FROM tab WHERE b = 1.1; +SELECT count(*) FROM tab WHERE b_tdigest = 1.1; +SELECT count(*) FROM tab WHERE b_minmax = 1.1; +SELECT count(*) FROM tab WHERE b_countmin = 1.1; +SELECT count(*) FROM tab WHERE b_uniq = 1.1; + +-- s ---------------------------------------------------- + +SELECT 's and ='; + +SELECT count(*) FROM tab WHERE s = 7; -- { serverError NO_COMMON_TYPE } +-- SELECT count(*) FROM tab WHERE s_tdigest = 7; -- not supported +-- SELECT count(*) FROM tab WHERE s_minmax = 7; -- not supported +SELECT count(*) FROM tab WHERE s_countmin = 7; -- { serverError NO_COMMON_TYPE } +SELECT count(*) FROM tab WHERE s_uniq = 7; -- { serverError NO_COMMON_TYPE } + +SELECT count(*) FROM tab WHERE s = '7'; +-- SELECT count(*) FROM tab WHERE s_tdigest = '7'; -- not supported +-- SELECT count(*) FROM tab WHERE s_minmax = '7'; -- not supported +SELECT count(*) FROM tab WHERE s_countmin = '7'; +SELECT count(*) FROM tab WHERE s_uniq = '7'; + +DROP TABLE tab; diff --git a/parser/testdata/02864_statistics_usage/ast.json b/parser/testdata/02864_statistics_usage/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02864_statistics_usage/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02864_statistics_usage/metadata.json b/parser/testdata/02864_statistics_usage/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02864_statistics_usage/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02864_statistics_usage/query.sql b/parser/testdata/02864_statistics_usage/query.sql new file mode 100644 index 000000000..8ad613d1e --- /dev/null +++ b/parser/testdata/02864_statistics_usage/query.sql @@ -0,0 +1,44 @@ +-- Test that the optimizer picks up column statistics +-- (The concrete statistics type, column data type and predicate type don't matter) + +-- Checks by the predicate evaluation order in EXPLAIN. This is quite fragile, a better approach would be helpful (maybe 'send_logs_level'?) + +SET allow_experimental_statistics = 1; +SET allow_statistics_optimize = 1; +SET mutations_sync = 1; +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab +( + a Float64 STATISTICS(tdigest), + b Int64 STATISTICS(tdigest) +) Engine = MergeTree() ORDER BY tuple() +SETTINGS auto_statistics_types = ''; + +INSERT INTO tab select number, -number FROM system.numbers LIMIT 10000; +SELECT 'After insert'; +SELECT replaceRegexpAll(explain, '__table1\.', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%'; -- checks a first, then b (statistics used) + +ALTER TABLE tab DROP STATISTICS a, b; +SELECT 'After drop statistic'; +SELECT replaceRegexpAll(explain, '__table1\.', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%'; -- checks b first, then a (statistics not used) + +SELECT name, column, statistics from system.parts_columns where (database = currentDatabase()) AND (table = 'tab'); +ALTER TABLE tab ADD STATISTICS a, b TYPE tdigest; +ALTER TABLE tab MATERIALIZE STATISTICS ALL; +SELECT name, column, statistics from system.parts_columns where (database = currentDatabase()) AND (table = 'tab'); +INSERT INTO tab select number, -number FROM system.numbers LIMIT 10000; +SELECT 'After add and materialize statistic'; +SELECT replaceRegexpAll(explain, '__table1\.', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%'; -- checks a first, then b (statistics used) + +OPTIMIZE TABLE tab FINAL; +SELECT 'After merge'; +SELECT replaceRegexpAll(explain, '__table1\.', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE b < 10 and a < 10) WHERE explain LIKE '%Prewhere%'; -- checks a first, then b (statistics used) + +ALTER TABLE tab RENAME COLUMN b TO c; +SELECT 'After rename'; +SELECT replaceRegexpAll(explain, '__table1\.', '') FROM (EXPLAIN actions=1 SELECT count(*) FROM tab WHERE c < 10 and a < 10) WHERE explain LIKE '%Prewhere%'; -- checks a first, then c (statistics used) + +DROP TABLE IF EXISTS tab; diff --git a/parser/testdata/02864_test_ipv4_type_mismatch/ast.json b/parser/testdata/02864_test_ipv4_type_mismatch/ast.json new file mode 100644 index 000000000..70efe2b39 --- /dev/null +++ b/parser/testdata/02864_test_ipv4_type_mismatch/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00133741, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/02864_test_ipv4_type_mismatch/metadata.json b/parser/testdata/02864_test_ipv4_type_mismatch/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02864_test_ipv4_type_mismatch/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02864_test_ipv4_type_mismatch/query.sql b/parser/testdata/02864_test_ipv4_type_mismatch/query.sql new file mode 100644 index 000000000..20d0976af --- /dev/null +++ b/parser/testdata/02864_test_ipv4_type_mismatch/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS test; + +CREATE TABLE test +( + ip IPv4 Codec(ZSTD(6)), +) ENGINE MergeTree() order by ip; + +INSERT INTO test values ('1.1.1.1'); +INSERT INTO test values (toIPv4('8.8.8.8')); + +SELECT * FROM test ORDER BY ip; +SELECT ip IN IPv4StringToNum('1.1.1.1') FROM test order by ip; +SELECT ip IN ('1.1.1.1') FROM test order by ip; +SELECT ip IN IPv4StringToNum('8.8.8.8') FROM test order by ip; diff --git a/parser/testdata/02865_array_join_with_max_block_size/ast.json b/parser/testdata/02865_array_join_with_max_block_size/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02865_array_join_with_max_block_size/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02865_array_join_with_max_block_size/metadata.json b/parser/testdata/02865_array_join_with_max_block_size/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02865_array_join_with_max_block_size/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02865_array_join_with_max_block_size/query.sql b/parser/testdata/02865_array_join_with_max_block_size/query.sql new file mode 100644 index 000000000..bc8297826 --- /dev/null +++ b/parser/testdata/02865_array_join_with_max_block_size/query.sql @@ -0,0 +1,16 @@ +-- { echoOn } +set max_block_size = 10, enable_unaligned_array_join = true; +SELECT n, count(1) from (SELECT groupArray(number % 10) AS x FROM (SELECT * FROM numbers(100000))) ARRAY JOIN x as n group by n; +SELECT n % 10, count(1) from (SELECT range(0, number) as x FROM numbers(1000)) LEFT ARRAY JOIN x as n group by n % 10; +SELECT (m+n) % 10, count(1) from (SELECT range(0, number+1) as x, range(0, number+2) as y FROM numbers(100)) ARRAY JOIN x as m, y as n group by (m+n) % 10; + +set max_block_size = 1000, enable_unaligned_array_join = true; +SELECT n, count(1) from (SELECT groupArray(number % 10) AS x FROM (SELECT * FROM numbers(100000))) ARRAY JOIN x as n group by n; +SELECT n % 10, count(1) from (SELECT range(0, number) as x FROM numbers(1000)) LEFT ARRAY JOIN x as n group by n % 10; +SELECT (m+n) % 10, count(1) from (SELECT range(0, number+1) as x, range(0, number+2) as y FROM numbers(100)) ARRAY JOIN x as m, y as n group by (m+n) % 10; + +set max_block_size = 100000, enable_unaligned_array_join = true; +SELECT n, count(1) from (SELECT groupArray(number % 10) AS x FROM (SELECT * FROM numbers(100000))) ARRAY JOIN x as n group by n; +SELECT n % 10, count(1) from (SELECT range(0, number) as x FROM numbers(1000)) LEFT ARRAY JOIN x as n group by n % 10; +SELECT (m+n) % 10, count(1) from (SELECT range(0, number+1) as x, range(0, number+2) as y FROM numbers(100)) ARRAY JOIN x as m, y as n group by (m+n) % 10; +-- { echoOff } diff --git a/parser/testdata/02866_size_of_marks_skip_idx_explain/ast.json b/parser/testdata/02866_size_of_marks_skip_idx_explain/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02866_size_of_marks_skip_idx_explain/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02866_size_of_marks_skip_idx_explain/metadata.json b/parser/testdata/02866_size_of_marks_skip_idx_explain/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02866_size_of_marks_skip_idx_explain/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02866_size_of_marks_skip_idx_explain/query.sql b/parser/testdata/02866_size_of_marks_skip_idx_explain/query.sql new file mode 100644 index 000000000..b3adf3871 --- /dev/null +++ b/parser/testdata/02866_size_of_marks_skip_idx_explain/query.sql @@ -0,0 +1,24 @@ +-- Tags: no-random-merge-tree-settings + +SET optimize_move_to_prewhere = 1; +SET convert_query_to_cnf = 0; +SET optimize_read_in_order = 1; + +SET enable_analyzer = 1; -- slightly different operator names than w/o + +DROP TABLE IF EXISTS test_skip_idx; + +CREATE TABLE test_skip_idx ( + id UInt32, + INDEX name_idx_g2 id TYPE minmax GRANULARITY 2, + INDEX name_idx_g1 id TYPE minmax GRANULARITY 1) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS index_granularity = 1, index_granularity_bytes = 0, min_bytes_for_wide_part = 0; + +INSERT INTO test_skip_idx SELECT number FROM system.numbers LIMIT 5 OFFSET 1; + +EXPLAIN indexes = 1 SELECT * FROM test_skip_idx WHERE id < 2; +EXPLAIN indexes = 1 SELECT * FROM test_skip_idx WHERE id < 3; + +DROP TABLE test_skip_idx; diff --git a/parser/testdata/02867_create_user_ssh/ast.json b/parser/testdata/02867_create_user_ssh/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02867_create_user_ssh/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02867_create_user_ssh/metadata.json b/parser/testdata/02867_create_user_ssh/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02867_create_user_ssh/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02867_create_user_ssh/query.sql b/parser/testdata/02867_create_user_ssh/query.sql new file mode 100644 index 000000000..3e3cb30a6 --- /dev/null +++ b/parser/testdata/02867_create_user_ssh/query.sql @@ -0,0 +1,20 @@ +-- Tags: no-fasttest, no-parallel + +-- Tests user authentication with SSH public keys + +DROP USER IF EXISTS test_user_02867; + +-- negative tests +CREATE USER test_user_02867 IDENTIFIED WITH ssh_key BY KEY 'invalid_key' TYPE 'ssh-rsa'; -- { serverError LIBSSH_ERROR } +CREATE USER test_user_02867 IDENTIFIED WITH ssh_key BY KEY 'invalid_key' TYPE 'ssh-rsa', KEY 'invalid_key' TYPE 'ssh-rsa'; -- { serverError LIBSSH_ERROR } +CREATE USER test_user_02867 IDENTIFIED WITH ssh_key +BY KEY 'AAAAB3NzaC1yc2EAAAADAQABAAABgQCVTUso7/LQcBljfsHwyuL6fWfIvS3BaVpYB8lwf/ZylSOltBy6YlABtTU3mIb197d2DW99RcLKk174f5Zj5rUukXbV0fnufWvwd37fbb1eKM8zxBYvXs53EI5QBPZgKACIzMpYYZeJnAP0oZhUfWWtKXpy/SQ5CHiEIGD9RNYDL+uXZejMwC5r/+f2AmrATBo+Y+WJFZIvhj4uznFYvyvNTUz/YDvZCk+vwwIgiv4BpFCaZm2TeETTj6SvK567bZznLP5HXrkVbB5lhxjAkahc2w/Yjm//Fwto3xsMoJwROxJEU8L1kZ40QWPqjo7Tmr6C/hL2cKDNgWOEqrjLKQmh576s1+PfxwXpVPjLK4PHVSvuJLV88sn0iPdspLlKlDCdc7T9MqIrjJfxuhqnaoFQ7U+oBte8vkm1wGu76+WEC3iNWVAiIVZxLx9rUEsDqj3OovqfLiRsTmNLeY94p2asZjkx7rU48ZwuYN5XGafYsArPscj9Ve6RoRrof+5Q7cc=' +TYPE 'invalid_algorithm'; -- { serverError LIBSSH_ERROR } + +CREATE USER test_user_02867 IDENTIFIED WITH ssh_key +BY KEY 'AAAAB3NzaC1yc2EAAAADAQABAAABgQCVTUso7/LQcBljfsHwyuL6fWfIvS3BaVpYB8lwf/ZylSOltBy6YlABtTU3mIb197d2DW99RcLKk174f5Zj5rUukXbV0fnufWvwd37fbb1eKM8zxBYvXs53EI5QBPZgKACIzMpYYZeJnAP0oZhUfWWtKXpy/SQ5CHiEIGD9RNYDL+uXZejMwC5r/+f2AmrATBo+Y+WJFZIvhj4uznFYvyvNTUz/YDvZCk+vwwIgiv4BpFCaZm2TeETTj6SvK567bZznLP5HXrkVbB5lhxjAkahc2w/Yjm//Fwto3xsMoJwROxJEU8L1kZ40QWPqjo7Tmr6C/hL2cKDNgWOEqrjLKQmh576s1+PfxwXpVPjLK4PHVSvuJLV88sn0iPdspLlKlDCdc7T9MqIrjJfxuhqnaoFQ7U+oBte8vkm1wGu76+WEC3iNWVAiIVZxLx9rUEsDqj3OovqfLiRsTmNLeY94p2asZjkx7rU48ZwuYN5XGafYsArPscj9Ve6RoRrof+5Q7cc=' +TYPE 'ssh-rsa'; + +SHOW CREATE USER test_user_02867; + +DROP USER test_user_02867; diff --git a/parser/testdata/02867_null_lc_in_bug/ast.json b/parser/testdata/02867_null_lc_in_bug/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02867_null_lc_in_bug/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02867_null_lc_in_bug/metadata.json b/parser/testdata/02867_null_lc_in_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02867_null_lc_in_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02867_null_lc_in_bug/query.sql b/parser/testdata/02867_null_lc_in_bug/query.sql new file mode 100644 index 000000000..121b1447d --- /dev/null +++ b/parser/testdata/02867_null_lc_in_bug/query.sql @@ -0,0 +1,17 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/50570 + +DROP TABLE IF EXISTS tnul SYNC; +DROP TABLE IF EXISTS tlc SYNC; + +CREATE TABLE tnul (lc Nullable(String)) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO tnul VALUES (NULL), ('qwe'); +SELECT 'pure nullable result:'; +SELECT lc FROM tnul WHERE notIn(lc, ('rty', 'uiop')); +DROP TABLE tnul SYNC; + + +CREATE TABLE tlc (lc LowCardinality(Nullable(String))) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO tlc VALUES (NULL), ('qwe'); +SELECT 'wrapping in LC:'; +SELECT lc FROM tlc WHERE notIn(lc, ('rty', 'uiop')); +DROP TABLE tlc SYNC; diff --git a/parser/testdata/02867_nullable_primary_key_final/ast.json b/parser/testdata/02867_nullable_primary_key_final/ast.json new file mode 100644 index 000000000..d0588bcdd --- /dev/null +++ b/parser/testdata/02867_nullable_primary_key_final/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001355981, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/02867_nullable_primary_key_final/metadata.json b/parser/testdata/02867_nullable_primary_key_final/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02867_nullable_primary_key_final/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02867_nullable_primary_key_final/query.sql b/parser/testdata/02867_nullable_primary_key_final/query.sql new file mode 100644 index 000000000..773a6d35b --- /dev/null +++ b/parser/testdata/02867_nullable_primary_key_final/query.sql @@ -0,0 +1,65 @@ +DROP TABLE IF EXISTS t; + +CREATE TABLE t +( + `d` Nullable(Date), + `f1` Nullable(String), + `f2` Nullable(String), + `c` Nullable(Int64) +) +ENGINE = ReplacingMergeTree +ORDER BY (f1, f2, d) +SETTINGS allow_nullable_key = 1; + +INSERT INTO t SELECT + toDate('2023-09-10', 'UTC') AS d, + [number % 99999, NULL][number % 2] AS f1, + ['x', NULL][number % 2] AS f2, + [number, NULL][number % 2] AS c +FROM numbers(100000); + +SELECT + date_trunc('month', d), + SUM(c) +FROM t +FINAL +WHERE f2 = 'x' +GROUP BY 1; + +DROP TABLE t; + +CREATE TABLE t +( + `d` Nullable(Date), + `f1` Nullable(String), + `f2` Nullable(String), + `c` Nullable(Int64) +) +ENGINE = SummingMergeTree +ORDER BY (f1, f2, d) +SETTINGS allow_nullable_key = 1, index_granularity = 1; + +INSERT INTO t SELECT + toDate('2023-09-10', 'UTC') AS d, + NULL AS f1, + ['x', 'y', 'z'][number % 3] AS f2, + number AS c +FROM numbers(1000); + +SELECT + date_trunc('month', d), + SUM(c) +FROM t +FINAL +WHERE f2 = 'x' +GROUP BY 1; + +DROP TABLE t; + +CREATE TABLE t (o Nullable(String), p Nullable(String)) ENGINE = ReplacingMergeTree ORDER BY (p, o) SETTINGS allow_nullable_key = 1, index_granularity = 2; + +INSERT INTO t SELECT number, NULL FROM numbers(10); + +SELECT count() FROM t FINAL; + +DROP TABLE t; diff --git a/parser/testdata/02868_distinct_to_count_optimization/ast.json b/parser/testdata/02868_distinct_to_count_optimization/ast.json new file mode 100644 index 000000000..cecdeef2a --- /dev/null +++ b/parser/testdata/02868_distinct_to_count_optimization/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_rewrite_uniq_to_count (children 1)" + }, + { + "explain": " Identifier test_rewrite_uniq_to_count" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001431055, + "rows_read": 2, + "bytes_read": 104 + } +} diff --git a/parser/testdata/02868_distinct_to_count_optimization/metadata.json b/parser/testdata/02868_distinct_to_count_optimization/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02868_distinct_to_count_optimization/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02868_distinct_to_count_optimization/query.sql b/parser/testdata/02868_distinct_to_count_optimization/query.sql new file mode 100644 index 000000000..d30bade4d --- /dev/null +++ b/parser/testdata/02868_distinct_to_count_optimization/query.sql @@ -0,0 +1,68 @@ +drop table if exists test_rewrite_uniq_to_count; + +CREATE TABLE test_rewrite_uniq_to_count +( + `a` UInt8, + `b` UInt8, + `c` UInt8 +) ENGINE = MergeTree ORDER BY `a`; + + +INSERT INTO test_rewrite_uniq_to_count values ('1', '1', '1'), ('1', '1', '1'); +INSERT INTO test_rewrite_uniq_to_count values ('2', '2', '2'), ('2', '2', '2'); +INSERT INTO test_rewrite_uniq_to_count values ('3', '3', '3'), ('3', '3', '3'); + +set optimize_uniq_to_count=true; + + +SELECT '1. test simple distinct'; +SELECT uniq(a) FROM (SELECT DISTINCT a FROM test_rewrite_uniq_to_count) settings enable_analyzer=0; +EXPLAIN SYNTAX SELECT uniq(a) FROM (SELECT DISTINCT a FROM test_rewrite_uniq_to_count) settings enable_analyzer=0; +SELECT uniq(a) FROM (SELECT DISTINCT a FROM test_rewrite_uniq_to_count) settings enable_analyzer=1; +EXPLAIN QUERY TREE SELECT uniq(a) FROM (SELECT DISTINCT a FROM test_rewrite_uniq_to_count) settings enable_analyzer=1; + + +SELECT '2. test distinct with subquery alias'; +SELECT uniq(t.a) FROM (SELECT DISTINCT a FROM test_rewrite_uniq_to_count) t settings enable_analyzer=0; +EXPLAIN SYNTAX SELECT uniq(a) FROM (SELECT DISTINCT a FROM test_rewrite_uniq_to_count) t settings enable_analyzer=0; +SELECT uniq(t.a) FROM (SELECT DISTINCT a FROM test_rewrite_uniq_to_count) t settings enable_analyzer=1; +EXPLAIN QUERY TREE SELECT uniq(t.a) FROM (SELECT DISTINCT a FROM test_rewrite_uniq_to_count) t settings enable_analyzer=1; + +SELECT '3. test distinct with compound column name'; +SELECT uniq(a) FROM (SELECT DISTINCT test_rewrite_uniq_to_count.a FROM test_rewrite_uniq_to_count) t settings enable_analyzer=0; +EXPLAIN SYNTAX SELECT uniq(a) FROM (SELECT DISTINCT test_rewrite_uniq_to_count.a FROM test_rewrite_uniq_to_count) t settings enable_analyzer=0; +SELECT uniq(a) FROM (SELECT DISTINCT test_rewrite_uniq_to_count.a FROM test_rewrite_uniq_to_count) t settings enable_analyzer=1; +EXPLAIN QUERY TREE SELECT uniq(a) FROM (SELECT DISTINCT test_rewrite_uniq_to_count.a FROM test_rewrite_uniq_to_count) t settings enable_analyzer=1; + +SELECT '4. test distinct with select expression alias'; +SELECT uniq(alias_of_a) FROM (SELECT DISTINCT a as alias_of_a FROM test_rewrite_uniq_to_count) t settings enable_analyzer=0; +EXPLAIN SYNTAX SELECT uniq(alias_of_a) FROM (SELECT DISTINCT a as alias_of_a FROM test_rewrite_uniq_to_count) t settings enable_analyzer=0; +SELECT uniq(alias_of_a) FROM (SELECT DISTINCT a as alias_of_a FROM test_rewrite_uniq_to_count) t settings enable_analyzer=1; +EXPLAIN QUERY TREE SELECT uniq(alias_of_a) FROM (SELECT DISTINCT a as alias_of_a FROM test_rewrite_uniq_to_count) t settings enable_analyzer=1; + + +SELECT '5. test simple group by'; +SELECT uniq(a) FROM (SELECT a, sum(b) FROM test_rewrite_uniq_to_count GROUP BY a) settings enable_analyzer=0; +EXPLAIN SYNTAX SELECT uniq(a) FROM (SELECT a, sum(b) FROM test_rewrite_uniq_to_count GROUP BY a) settings enable_analyzer=0; +SELECT uniq(a) FROM (SELECT a, sum(b) FROM test_rewrite_uniq_to_count GROUP BY a) settings enable_analyzer=1; +EXPLAIN QUERY TREE SELECT uniq(a) FROM (SELECT a, sum(b) FROM test_rewrite_uniq_to_count GROUP BY a) settings enable_analyzer=1; + +SELECT '6. test group by with subquery alias'; +SELECT uniq(t.a) FROM (SELECT a, sum(b) FROM test_rewrite_uniq_to_count GROUP BY a) t settings enable_analyzer=0; +EXPLAIN SYNTAX SELECT uniq(t.a) FROM (SELECT a, sum(b) FROM test_rewrite_uniq_to_count GROUP BY a) t settings enable_analyzer=0; +SELECT uniq(t.a) FROM (SELECT a, sum(b) FROM test_rewrite_uniq_to_count GROUP BY a) t settings enable_analyzer=1; +EXPLAIN QUERY TREE SELECT uniq(t.a) FROM (SELECT a, sum(b) FROM test_rewrite_uniq_to_count GROUP BY a) t settings enable_analyzer=1; + +SELECT '7. test group by with compound column name'; +SELECT uniq(t.alias_of_a) FROM (SELECT a as alias_of_a, sum(b) FROM test_rewrite_uniq_to_count GROUP BY a) t settings enable_analyzer=0; +EXPLAIN SYNTAX SELECT uniq(t.alias_of_a) FROM (SELECT a as alias_of_a, sum(b) FROM test_rewrite_uniq_to_count GROUP BY a) t settings enable_analyzer=0; +SELECT uniq(t.alias_of_a) FROM (SELECT a as alias_of_a, sum(b) FROM test_rewrite_uniq_to_count GROUP BY a) t settings enable_analyzer=1; +EXPLAIN QUERY TREE SELECT uniq(t.alias_of_a) FROM (SELECT a as alias_of_a, sum(b) FROM test_rewrite_uniq_to_count GROUP BY a) t settings enable_analyzer=1; + +SELECT '8. test group by with select expression alias'; +SELECT uniq(t.alias_of_a) FROM (SELECT a as alias_of_a, sum(b) FROM test_rewrite_uniq_to_count GROUP BY alias_of_a) t settings enable_analyzer=0; +EXPLAIN SYNTAX SELECT uniq(t.alias_of_a) FROM (SELECT a as alias_of_a, sum(b) FROM test_rewrite_uniq_to_count GROUP BY alias_of_a) t settings enable_analyzer=0; +SELECT uniq(t.alias_of_a) FROM (SELECT a as alias_of_a, sum(b) FROM test_rewrite_uniq_to_count GROUP BY alias_of_a) t settings enable_analyzer=1; +EXPLAIN QUERY TREE SELECT uniq(t.alias_of_a) FROM (SELECT a as alias_of_a, sum(b) FROM test_rewrite_uniq_to_count GROUP BY alias_of_a) t settings enable_analyzer=1; + +drop table if exists test_rewrite_uniq_to_count; diff --git a/parser/testdata/02868_operator_is_not_distinct_from_priority/ast.json b/parser/testdata/02868_operator_is_not_distinct_from_priority/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02868_operator_is_not_distinct_from_priority/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02868_operator_is_not_distinct_from_priority/metadata.json b/parser/testdata/02868_operator_is_not_distinct_from_priority/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02868_operator_is_not_distinct_from_priority/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02868_operator_is_not_distinct_from_priority/query.sql b/parser/testdata/02868_operator_is_not_distinct_from_priority/query.sql new file mode 100644 index 000000000..5afee6347 --- /dev/null +++ b/parser/testdata/02868_operator_is_not_distinct_from_priority/query.sql @@ -0,0 +1,32 @@ +-- EXPLAIN AST SELECT false IS NOT DISTINCT FROM (true IN (true, false)); +EXPLAIN AST SELECT false IS NOT DISTINCT FROM true IN (true, false); + +-- EXPLAIN AST SELECT 1 IS NOT DISTINCT FROM (1 + 1); +EXPLAIN AST SELECT 1 IS NOT DISTINCT FROM 1 + 1; + +-- EXPLAIN AST SELECT true IS NOT DISTINCT FROM ('x' LIKE 'a'); +EXPLAIN AST SELECT true IS NOT DISTINCT FROM 'x' LIKE 'a'; + +-- EXPLAIN AST SELECT 'x' IS NOT DISTINCT FROM ('x' || 'a'); +EXPLAIN AST SELECT 'x' IS NOT DISTINCT FROM 'x' || 'a'; + +-- EXPLAIN AST SELECT 1 IS NOT DISTINCT FROM (1 :: integer); +EXPLAIN AST SELECT 1 IS NOT DISTINCT FROM 1 :: integer; + +-- EXPLAIN AST SELECT NOT (1 IS NOT DISTINCT FROM 1); +EXPLAIN AST SELECT NOT 1 IS NOT DISTINCT FROM 1; + +-- EXPLAIN AST SELECT (- 1) IS NOT DISTINCT FROM 1 ; +EXPLAIN AST SELECT - 1 IS NOT DISTINCT FROM 1 ; + +-- EXPLAIN AST SELECT (false IS NOT DISTINCT FROM true) OR true; +EXPLAIN AST SELECT false IS NOT DISTINCT FROM true OR true; + +-- EXPLAIN AST SELECT (NULL IS NULL) IS NOT DISTINCT FROM NULL; +EXPLAIN AST SELECT NULL IS NULL IS NOT DISTINCT FROM NULL; + +-- EXPLAIN AST SELECT (1 <=> 1) == 1; +EXPLAIN AST SELECT 1 <=> 1 == 1; + +-- EXPLAIN AST SELECT (1 == 1) <=> 1; +EXPLAIN AST SELECT 1 == 1 <=> 1; diff --git a/parser/testdata/02868_select_support_from_keywords/ast.json b/parser/testdata/02868_select_support_from_keywords/ast.json new file mode 100644 index 000000000..8fe97f659 --- /dev/null +++ b/parser/testdata/02868_select_support_from_keywords/ast.json @@ -0,0 +1,85 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery test_table (children 3)" + }, + { + "explain": " Identifier test_table" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " ColumnDeclaration date (children 1)" + }, + { + "explain": " DataType Date" + }, + { + "explain": " ColumnDeclaration __sign (children 1)" + }, + { + "explain": " DataType Int8" + }, + { + "explain": " ColumnDeclaration from (children 1)" + }, + { + "explain": " DataType Float64" + }, + { + "explain": " ColumnDeclaration to (children 1)" + }, + { + "explain": " DataType Float64" + }, + { + "explain": " Storage definition (children 4)" + }, + { + "explain": " Function CollapsingMergeTree (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier __sign" + }, + { + "explain": " Function toYYYYMM (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier date" + }, + { + "explain": " Identifier date" + }, + { + "explain": " Set" + } + ], + + "rows": 21, + + "statistics": + { + "elapsed": 0.001115882, + "rows_read": 21, + "bytes_read": 741 + } +} diff --git a/parser/testdata/02868_select_support_from_keywords/metadata.json b/parser/testdata/02868_select_support_from_keywords/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02868_select_support_from_keywords/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02868_select_support_from_keywords/query.sql b/parser/testdata/02868_select_support_from_keywords/query.sql new file mode 100644 index 000000000..dc06651a8 --- /dev/null +++ b/parser/testdata/02868_select_support_from_keywords/query.sql @@ -0,0 +1,5 @@ +create table test_table ( `date` Date, `__sign` Int8, `from` Float64, `to` Float64 ) ENGINE = CollapsingMergeTree(__sign) PARTITION BY toYYYYMM(date) ORDER BY (date) SETTINGS index_granularity = 8192; +create VIEW test_view AS WITH cte AS (SELECT date, __sign, "from", "to" FROM test_table FINAL) SELECT date, __sign, "from", "to" FROM cte; +show create table test_view; +drop table test_view; +drop table test_table; diff --git a/parser/testdata/02869_insert_filenames_collisions/ast.json b/parser/testdata/02869_insert_filenames_collisions/ast.json new file mode 100644 index 000000000..21ef33802 --- /dev/null +++ b/parser/testdata/02869_insert_filenames_collisions/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_collisions (children 1)" + }, + { + "explain": " Identifier t_collisions" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001428402, + "rows_read": 2, + "bytes_read": 76 + } +} diff --git a/parser/testdata/02869_insert_filenames_collisions/metadata.json b/parser/testdata/02869_insert_filenames_collisions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02869_insert_filenames_collisions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02869_insert_filenames_collisions/query.sql b/parser/testdata/02869_insert_filenames_collisions/query.sql new file mode 100644 index 000000000..441355930 --- /dev/null +++ b/parser/testdata/02869_insert_filenames_collisions/query.sql @@ -0,0 +1,73 @@ +DROP TABLE IF EXISTS t_collisions; + +SELECT lower(hex(reverse(CAST(sipHash128('very_very_long_column_name_that_will_be_replaced_with_hash'), 'FixedString(16)')))); + +CREATE TABLE t_collisions +( + `very_very_long_column_name_that_will_be_replaced_with_hash` Int32, + `e798545eefc8b7a1c2c81ff00c064ad8` Int32 +) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS replace_long_file_name_to_hash = 1, max_file_name_length = 42; -- { serverError BAD_ARGUMENTS } + +DROP TABLE IF EXISTS t_collisions; + +CREATE TABLE t_collisions +( + `col1` Int32, + `e798545eefc8b7a1c2c81ff00c064ad8` Int32 +) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS replace_long_file_name_to_hash = 1, max_file_name_length = 42; + +ALTER TABLE t_collisions ADD COLUMN very_very_long_column_name_that_will_be_replaced_with_hash Int32; -- { serverError BAD_ARGUMENTS } +ALTER TABLE t_collisions RENAME COLUMN col1 TO very_very_long_column_name_that_will_be_replaced_with_hash; -- { serverError BAD_ARGUMENTS } + +DROP TABLE IF EXISTS t_collisions; + +CREATE TABLE t_collisions +( + `very_very_long_column_name_that_will_be_replaced_with_hash` Int32, + `e798545eefc8b7a1c2c81ff00c064ad8` Int32 +) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS replace_long_file_name_to_hash = 0; + +INSERT INTO t_collisions VALUES (1, 1); + +ALTER TABLE t_collisions MODIFY SETTING replace_long_file_name_to_hash = 1, max_file_name_length = 42; -- { serverError BAD_ARGUMENTS } + +INSERT INTO t_collisions VALUES (2, 2); + +SELECT * FROM t_collisions ORDER BY e798545eefc8b7a1c2c81ff00c064ad8; + +DROP TABLE IF EXISTS t_collisions; + +CREATE TABLE t_collisions +( + `id` Int, + `col` Array(String), + `col.s` Array(LowCardinality(String)), + `col.u` Array(LowCardinality(String)) +) +ENGINE = MergeTree +ORDER BY id; -- { serverError BAD_ARGUMENTS } + +DROP TABLE IF EXISTS t_collisions; + +CREATE TABLE t_collisions +( + `id` Int, + `col` String, + `col.s` Array(LowCardinality(String)), + `col.u` Array(LowCardinality(String)) +) +ENGINE = MergeTree +ORDER BY id; + +ALTER TABLE t_collisions MODIFY COLUMN col Array(String); -- { serverError BAD_ARGUMENTS } + +DROP TABLE IF EXISTS t_collisions; diff --git a/parser/testdata/02869_parallel_replicas_read_from_several/ast.json b/parser/testdata/02869_parallel_replicas_read_from_several/ast.json new file mode 100644 index 000000000..eb0eba6be --- /dev/null +++ b/parser/testdata/02869_parallel_replicas_read_from_several/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001283146, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/02869_parallel_replicas_read_from_several/metadata.json b/parser/testdata/02869_parallel_replicas_read_from_several/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02869_parallel_replicas_read_from_several/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02869_parallel_replicas_read_from_several/query.sql b/parser/testdata/02869_parallel_replicas_read_from_several/query.sql new file mode 100644 index 000000000..f5241d559 --- /dev/null +++ b/parser/testdata/02869_parallel_replicas_read_from_several/query.sql @@ -0,0 +1,27 @@ +DROP TABLE IF EXISTS t1 SYNC; +DROP TABLE IF EXISTS t2 SYNC; +DROP TABLE IF EXISTS t3 SYNC; + +CREATE TABLE t1(k UInt32, v UInt32) ENGINE ReplicatedMergeTree('/parallel_replicas/{database}/test_tbl', 'r1') ORDER BY k settings index_granularity=10; +CREATE TABLE t2(k UInt32, v UInt32) ENGINE ReplicatedMergeTree('/parallel_replicas/{database}/test_tbl', 'r2') ORDER BY k settings index_granularity=10; +CREATE TABLE t3(k UInt32, v UInt32) ENGINE ReplicatedMergeTree('/parallel_replicas/{database}/test_tbl', 'r3') ORDER BY k settings index_granularity=10; + +insert into t1 select number, number from numbers(1000); +insert into t1 select number, number from numbers(1000, 1000); +insert into t1 select number, number from numbers(2000, 1000); + +insert into t2 select number, number from numbers(3000, 1000); +insert into t2 select number, number from numbers(4000, 1000); +insert into t2 select number, number from numbers(5000, 1000); + +insert into t3 select number, number from numbers(6000, 1000); +insert into t3 select number, number from numbers(7000, 1000); +insert into t3 select number, number from numbers(8000, 1000); + +system sync replica t1; +system sync replica t2; +system sync replica t3; + +SELECT count(), min(k), max(k), avg(k) +FROM t1 +SETTINGS enable_parallel_replicas = 1, max_parallel_replicas = 3, cluster_for_parallel_replicas='test_cluster_one_shard_three_replicas_localhost'; diff --git a/parser/testdata/02869_unicode_minus/ast.json b/parser/testdata/02869_unicode_minus/ast.json new file mode 100644 index 000000000..f6b5fa82d --- /dev/null +++ b/parser/testdata/02869_unicode_minus/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.00121597, + "rows_read": 5, + "bytes_read": 177 + } +} diff --git a/parser/testdata/02869_unicode_minus/metadata.json b/parser/testdata/02869_unicode_minus/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02869_unicode_minus/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02869_unicode_minus/query.sql b/parser/testdata/02869_unicode_minus/query.sql new file mode 100644 index 000000000..15fe7b839 --- /dev/null +++ b/parser/testdata/02869_unicode_minus/query.sql @@ -0,0 +1,2 @@ +SELECT 1 − 2; +SELECT −1; diff --git a/parser/testdata/02870_move_partition_to_volume_io_throttling/ast.json b/parser/testdata/02870_move_partition_to_volume_io_throttling/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02870_move_partition_to_volume_io_throttling/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02870_move_partition_to_volume_io_throttling/metadata.json b/parser/testdata/02870_move_partition_to_volume_io_throttling/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02870_move_partition_to_volume_io_throttling/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02870_move_partition_to_volume_io_throttling/query.sql b/parser/testdata/02870_move_partition_to_volume_io_throttling/query.sql new file mode 100644 index 000000000..f3362fd02 --- /dev/null +++ b/parser/testdata/02870_move_partition_to_volume_io_throttling/query.sql @@ -0,0 +1,14 @@ +-- Tags: no-random-merge-tree-settings, no-fasttest, no-replicated-database +-- Tag: no-fasttest -- requires S3 +-- Tag: no-replicated-database -- ALTER MOVE PARTITION TO should not be replicated (will be fixed separatelly) + +SET optimize_trivial_insert_select = 1; + +CREATE TABLE test_move_partition_throttling (key UInt64 CODEC(NONE)) ENGINE = MergeTree ORDER BY tuple() SETTINGS storage_policy='local_remote'; +INSERT INTO test_move_partition_throttling SELECT number FROM numbers(1e6); +SELECT disk_name, partition, rows FROM system.parts WHERE database = currentDatabase() AND table = 'test_move_partition_throttling' and active; +ALTER TABLE test_move_partition_throttling MOVE PARTITION tuple() TO VOLUME 'remote' SETTINGS max_remote_write_network_bandwidth=1600000; +SYSTEM FLUSH LOGS query_log; +-- (8e6-1600000)/1600000=4.0 +SELECT query_kind, query_duration_ms>4e3 FROM system.query_log WHERE type = 'QueryFinish' AND current_database = currentDatabase() AND query_kind = 'Alter'; +SELECT disk_name, partition, rows FROM system.parts WHERE database = currentDatabase() AND table = 'test_move_partition_throttling' and active; diff --git a/parser/testdata/02870_per_column_settings/ast.json b/parser/testdata/02870_per_column_settings/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02870_per_column_settings/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02870_per_column_settings/metadata.json b/parser/testdata/02870_per_column_settings/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02870_per_column_settings/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02870_per_column_settings/query.sql b/parser/testdata/02870_per_column_settings/query.sql new file mode 100644 index 000000000..bdb0b54cc --- /dev/null +++ b/parser/testdata/02870_per_column_settings/query.sql @@ -0,0 +1,70 @@ +-- Tags: no-random-merge-tree-settings, no-replicated-database +-- Tag no-replicated-database: Old syntax is not allowed +-- The test use replicated table to test serialize and deserialize column with settings declaration on zookeeper +-- Tests column-level settings for MergeTree* tables + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab +( + id UInt64, + long_string String SETTINGS (min_compress_block_size = 163840, max_compress_block_size = 163840), + v1 String, + v2 UInt64, + v3 Float32, + v4 Float64 +) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/tab/2870', 'r1') +ORDER BY id +SETTINGS min_bytes_for_wide_part = 1; + +SHOW CREATE tab; + +INSERT INTO TABLE tab SELECT number, randomPrintableASCII(1000), randomPrintableASCII(10), rand(number), rand(number+1), rand(number+2) FROM numbers(1000); +SELECT count() FROM tab; + +SELECT formatQuery('ALTER TABLE tab MODIFY COLUMN long_string MODIFY SETTING min_compress_block_size = 8192;'); +ALTER TABLE tab MODIFY COLUMN long_string MODIFY SETTING min_compress_block_size = 8192; +SHOW CREATE tab; + +SELECT formatQuery('ALTER TABLE tab MODIFY COLUMN long_string RESET SETTING min_compress_block_size;'); +ALTER TABLE tab MODIFY COLUMN long_string RESET SETTING min_compress_block_size; +SHOW CREATE tab; + +SELECT formatQuery('ALTER TABLE tab MODIFY COLUMN long_string REMOVE SETTINGS;'); +ALTER TABLE tab MODIFY COLUMN long_string REMOVE SETTINGS; +SHOW CREATE tab; + +SELECT formatQuery('ALTER TABLE tab MODIFY COLUMN long_string String SETTINGS (min_compress_block_size = 163840, max_compress_block_size = 163840);'); +ALTER TABLE tab MODIFY COLUMN long_string String SETTINGS (min_compress_block_size = 163840, max_compress_block_size = 163840); +SHOW CREATE tab; + +DROP TABLE tab; + +SELECT '---'; + +CREATE TABLE tab +( + id UInt64, + tup Tuple(UInt64, UInt64) SETTINGS (min_compress_block_size = 81920, max_compress_block_size = 163840), +) +ENGINE = MergeTree +ORDER BY id +SETTINGS min_bytes_for_wide_part = 1; + +INSERT INTO TABLE tab SELECT number, tuple(number, number) FROM numbers(1000); +SELECT tup FROM tab ORDER BY tup LIMIT 10; + +DROP TABLE tab; + +SELECT '---'; + +-- Unsupported column-level settings are rejected +CREATE TABLE tab +( + id UInt64, + long_string String SETTINGS (min_block_size = 81920, max_compress_block_size = 163840), +) +ENGINE = MergeTree +ORDER BY id +SETTINGS min_bytes_for_wide_part = 1; -- {serverError UNKNOWN_SETTING} diff --git a/parser/testdata/02871_join_on_system_errors/ast.json b/parser/testdata/02871_join_on_system_errors/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02871_join_on_system_errors/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02871_join_on_system_errors/metadata.json b/parser/testdata/02871_join_on_system_errors/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02871_join_on_system_errors/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02871_join_on_system_errors/query.sql b/parser/testdata/02871_join_on_system_errors/query.sql new file mode 100644 index 000000000..ae30ef8f7 --- /dev/null +++ b/parser/testdata/02871_join_on_system_errors/query.sql @@ -0,0 +1,13 @@ + +-- Unique table alias to distinguish between errors from different queries +SELECT * FROM (SELECT 1 as a) t +JOIN (SELECT 2 as a) `89467d35-77c2-4f82-ae7a-f093ff40f4cd` +ON t.a = `89467d35-77c2-4f82-ae7a-f093ff40f4cd`.a +; + +SELECT * +FROM system.errors +WHERE name = 'UNKNOWN_IDENTIFIER' +AND last_error_time > now() - 1 +AND last_error_message LIKE '%Missing columns%89467d35-77c2-4f82-ae7a-f093ff40f4cd%' +; diff --git a/parser/testdata/02871_multiple_joins_rewriter_v2_handle_last_table_columns/ast.json b/parser/testdata/02871_multiple_joins_rewriter_v2_handle_last_table_columns/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02871_multiple_joins_rewriter_v2_handle_last_table_columns/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02871_multiple_joins_rewriter_v2_handle_last_table_columns/metadata.json b/parser/testdata/02871_multiple_joins_rewriter_v2_handle_last_table_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02871_multiple_joins_rewriter_v2_handle_last_table_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02871_multiple_joins_rewriter_v2_handle_last_table_columns/query.sql b/parser/testdata/02871_multiple_joins_rewriter_v2_handle_last_table_columns/query.sql new file mode 100644 index 000000000..15a1d36c0 --- /dev/null +++ b/parser/testdata/02871_multiple_joins_rewriter_v2_handle_last_table_columns/query.sql @@ -0,0 +1,47 @@ +-- { echo } + +-- no clash name +SELECT + c + 1, + Z.c + 1 +FROM + (SELECT 10 a) X +CROSS JOIN + (SELECT 20 b) Y +CROSS JOIN + (SELECT 30 c) Z; + +-- alias clash +SELECT + (a + 1) AS c, + Z.c + 1 +FROM + (SELECT 10 a) X +CROSS JOIN + (SELECT 20 b) Y +CROSS JOIN + (SELECT 30 c) Z; + +-- column clash +SELECT + (X.c + 1) AS c, + Z.c + 1 +FROM + (SELECT 10 c) X +CROSS JOIN + (SELECT 20 b) Y +CROSS JOIN + (SELECT 30 c) Z; + +SELECT + (X.a + 1) AS a, + (Y.a + 1) AS Y_a, + (Z.a + 1) AS Z_a, + (Y.b + 1) AS b, + (Z.b + 1) AS Z_b +FROM + (SELECT 10 a) X +CROSS JOIN + (SELECT 20 a, 21 as b) Y +CROSS JOIN + (SELECT 30 a, 31 as b, 32 as c) Z; diff --git a/parser/testdata/02872_gcd_codec/ast.json b/parser/testdata/02872_gcd_codec/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02872_gcd_codec/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02872_gcd_codec/metadata.json b/parser/testdata/02872_gcd_codec/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02872_gcd_codec/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02872_gcd_codec/query.sql b/parser/testdata/02872_gcd_codec/query.sql new file mode 100644 index 000000000..696b8f68d --- /dev/null +++ b/parser/testdata/02872_gcd_codec/query.sql @@ -0,0 +1,126 @@ +-- GCD codec can't be used stand-alone +CREATE TABLE table_gcd_codec (n UInt64 CODEC(GCD)) ENGINE = Memory; -- { serverError BAD_ARGUMENTS } + +-- GCD codec rejects non-integer/decimal/datetime types +CREATE TABLE table_gcd_codec (str String CODEC(GCD, LZ4)) ENGINE = Memory; -- { serverError BAD_ARGUMENTS } + +-- Basic random-based correctness test +CREATE TABLE table_lz4 (id UInt64, ui UInt256 CODEC(LZ4)) ENGINE = Memory; +INSERT INTO table_lz4 SELECT * FROM generateRandom() LIMIT 50; + +CREATE TABLE table_gcd (id UInt64, ui UInt256 CODEC(GCD, LZ4)) ENGINE = Memory; +INSERT INTO table_gcd SELECT * FROM table_lz4; + +SELECT COUNT(*) +FROM ( + SELECT table_lz4.id, table_lz4.ui AS ui1, table_gcd.id, table_gcd.ui AS ui2 + FROM table_lz4 JOIN table_gcd + ON table_lz4.id = table_gcd.id +) +WHERE ui1 != ui2; + +------------------------------------------------------------------------------------------- +-- Compression/decompression works for all data types supported by GCD codec + +-- Int* +CREATE TABLE table_gcd_codec_uint8 (n UInt8 CODEC(GCD, LZ4)) ENGINE = Memory; +CREATE TABLE table_gcd_codec_uint16 (n UInt16 CODEC(GCD, LZ4)) ENGINE = Memory; +CREATE TABLE table_gcd_codec_uint32 (n UInt32 CODEC(GCD, LZ4)) ENGINE = Memory; +CREATE TABLE table_gcd_codec_uint64 (n UInt64 CODEC(GCD, LZ4)) ENGINE = Memory; +CREATE TABLE table_gcd_codec_uint128 (n UInt128 CODEC(GCD, LZ4)) ENGINE = Memory; +CREATE TABLE table_gcd_codec_uint256 (n UInt256 CODEC(GCD, LZ4)) ENGINE = Memory; + +INSERT INTO table_gcd_codec_uint8 SELECT number FROM system.numbers LIMIT 50; +INSERT INTO table_gcd_codec_uint16 SELECT number FROM system.numbers LIMIT 50; +INSERT INTO table_gcd_codec_uint32 SELECT number FROM system.numbers LIMIT 50; +INSERT INTO table_gcd_codec_uint64 SELECT number FROM system.numbers LIMIT 50; +INSERT INTO table_gcd_codec_uint128 SELECT number FROM system.numbers LIMIT 50; +INSERT INTO table_gcd_codec_uint256 SELECT number FROM system.numbers LIMIT 50; + +SELECT * FROM table_gcd_codec_uint8; +SELECT * FROM table_gcd_codec_uint16; +SELECT * FROM table_gcd_codec_uint32; +SELECT * FROM table_gcd_codec_uint64; +SELECT * FROM table_gcd_codec_uint128; +SELECT * FROM table_gcd_codec_uint256; + +-- UInt* +CREATE TABLE table_gcd_codec_int8 (n Int8 CODEC(GCD, LZ4)) ENGINE = Memory; +CREATE TABLE table_gcd_codec_int16 (n Int16 CODEC(GCD, LZ4)) ENGINE = Memory; +CREATE TABLE table_gcd_codec_int32 (n Int32 CODEC(GCD, LZ4)) ENGINE = Memory; +CREATE TABLE table_gcd_codec_int64 (n Int64 CODEC(GCD, LZ4)) ENGINE = Memory; +CREATE TABLE table_gcd_codec_int128 (n Int128 CODEC(GCD, LZ4)) ENGINE = Memory; +CREATE TABLE table_gcd_codec_int256 (n Int256 CODEC(GCD, LZ4)) ENGINE = Memory; + +INSERT INTO table_gcd_codec_int8 SELECT number FROM system.numbers LIMIT 50; +INSERT INTO table_gcd_codec_int16 SELECT number FROM system.numbers LIMIT 50; +INSERT INTO table_gcd_codec_int32 SELECT number FROM system.numbers LIMIT 50; +INSERT INTO table_gcd_codec_int64 SELECT number FROM system.numbers LIMIT 50; +INSERT INTO table_gcd_codec_int128 SELECT number FROM system.numbers LIMIT 50; +INSERT INTO table_gcd_codec_int256 SELECT number FROM system.numbers LIMIT 50; + +SELECT * FROM table_gcd_codec_int8; +SELECT * FROM table_gcd_codec_int16; +SELECT * FROM table_gcd_codec_int32; +SELECT * FROM table_gcd_codec_int64; +SELECT * FROM table_gcd_codec_int128; +SELECT * FROM table_gcd_codec_int256; + +-- Decimal* +CREATE TABLE table_gcd_codec_decimal32 (n Decimal32(1) CODEC(GCD, LZ4)) ENGINE = Memory; +CREATE TABLE table_gcd_codec_decimal64 (n Decimal64(1) CODEC(GCD, LZ4)) ENGINE = Memory; +CREATE TABLE table_gcd_codec_decimal128 (n Decimal128(1) CODEC(GCD, LZ4)) ENGINE = Memory; +CREATE TABLE table_gcd_codec_decimal256 (n Decimal256(1) CODEC(GCD, LZ4)) ENGINE = Memory; + +INSERT INTO table_gcd_codec_decimal32 SELECT number FROM system.numbers LIMIT 50; +INSERT INTO table_gcd_codec_decimal64 SELECT number FROM system.numbers LIMIT 50; +INSERT INTO table_gcd_codec_decimal128 SELECT number FROM system.numbers LIMIT 50; +INSERT INTO table_gcd_codec_decimal256 SELECT number FROM system.numbers LIMIT 50; + +SELECT * FROM table_gcd_codec_decimal32; +SELECT * FROM table_gcd_codec_decimal64; +SELECT * FROM table_gcd_codec_decimal128; +SELECT * FROM table_gcd_codec_decimal256; + +-- Date[32] +CREATE TABLE table_gcd_codec_date (n Date CODEC(GCD, LZ4)) ENGINE = Memory; +CREATE TABLE table_gcd_codec_date32 (n Date32 CODEC(GCD, LZ4)) ENGINE = Memory; + +INSERT INTO table_gcd_codec_date SELECT number FROM system.numbers LIMIT 50; +INSERT INTO table_gcd_codec_date32 SELECT number FROM system.numbers LIMIT 50; + +SELECT * FROM table_gcd_codec_date; +SELECT * FROM table_gcd_codec_date32; + +-- DateTimeTime[64] +CREATE TABLE table_gcd_codec_datetime (n DateTime('Asia/Istanbul') CODEC(GCD, LZ4)) ENGINE = Memory; +CREATE TABLE table_gcd_codec_datetime64 (n DateTime64(3, 'Asia/Istanbul') CODEC(GCD, LZ4)) ENGINE = Memory; + +INSERT INTO table_gcd_codec_datetime SELECT number FROM system.numbers LIMIT 50; +INSERT INTO table_gcd_codec_datetime64 SELECT number FROM system.numbers LIMIT 50; + +SELECT * FROM table_gcd_codec_datetime; +SELECT * FROM table_gcd_codec_datetime64; + + +-- A column with all zero values can be compressed/decompressed + +CREATE TABLE table_gcd_codec_only_zeros (n UInt8 CODEC(GCD, LZ4)) ENGINE = Memory; +INSERT INTO table_gcd_codec_only_zeros VALUES (0), (0), (0); +SELECT * FROM table_gcd_codec_only_zeros; + +-- Tests for Bug #56672: + +DROP TABLE IF EXISTS table_gcd_codec_one_hundred_zeros; +DROP TABLE IF EXISTS table_gcd_codec_one_hundred_ones; + +CREATE TABLE table_gcd_codec_one_hundred_zeros (a Nullable(Int64) CODEC (GCD,LZ4)) ENGINE=MergeTree ORDER BY (); +INSERT INTO table_gcd_codec_one_hundred_zeros SELECT 0 FROM numbers(100); +SELECT * FROM table_gcd_codec_one_hundred_zeros; + +CREATE TABLE table_gcd_codec_one_hundred_ones (a Nullable(Int64) CODEC (GCD,LZ4)) ENGINE=MergeTree Order by (); +INSERT INTO table_gcd_codec_one_hundred_ones SELECT 1 FROM numbers(100); +SELECT * FROM table_gcd_codec_one_hundred_ones; + +DROP TABLE table_gcd_codec_one_hundred_zeros; +DROP TABLE table_gcd_codec_one_hundred_ones; diff --git a/parser/testdata/02872_prewhere_filter/ast.json b/parser/testdata/02872_prewhere_filter/ast.json new file mode 100644 index 000000000..49184d057 --- /dev/null +++ b/parser/testdata/02872_prewhere_filter/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery data (children 1)" + }, + { + "explain": " Identifier data" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001044206, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/02872_prewhere_filter/metadata.json b/parser/testdata/02872_prewhere_filter/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02872_prewhere_filter/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02872_prewhere_filter/query.sql b/parser/testdata/02872_prewhere_filter/query.sql new file mode 100644 index 000000000..fdc3ec83a --- /dev/null +++ b/parser/testdata/02872_prewhere_filter/query.sql @@ -0,0 +1,9 @@ +drop table if exists data; + +create table data (key Int, val1 SimpleAggregateFunction(max, Nullable(Int)), val2 SimpleAggregateFunction(min, Int)) engine=AggregatingMergeTree() order by key; +system stop merges data; + +insert into data values (1,10,100); +insert into data values (1,20,10); + +select key, val1, val2, assumeNotNull(val1) > val2 x1, val1 > val2 x2 from data final prewhere assumeNotNull(val1) > 0 where x1 != x2 settings max_threads=1; diff --git a/parser/testdata/02873_s3_presigned_url_and_url_with_special_characters/ast.json b/parser/testdata/02873_s3_presigned_url_and_url_with_special_characters/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02873_s3_presigned_url_and_url_with_special_characters/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02873_s3_presigned_url_and_url_with_special_characters/metadata.json b/parser/testdata/02873_s3_presigned_url_and_url_with_special_characters/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02873_s3_presigned_url_and_url_with_special_characters/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02873_s3_presigned_url_and_url_with_special_characters/query.sql b/parser/testdata/02873_s3_presigned_url_and_url_with_special_characters/query.sql new file mode 100644 index 000000000..703ab10ee --- /dev/null +++ b/parser/testdata/02873_s3_presigned_url_and_url_with_special_characters/query.sql @@ -0,0 +1,5 @@ +-- Tags: no-fasttest + +select * from s3('http://localhost:11111/test/MyPrefix/BU%20-%20UNIT%20-%201/*.parquet'); -- { serverError CANNOT_EXTRACT_TABLE_STRUCTURE } + +select * from s3('http://localhost:11111/test/MyPrefix/*.parquet?some_token=ABCD', NOSIGN); -- { serverError CANNOT_DETECT_FORMAT } diff --git a/parser/testdata/02874_analysis_of_variance_overflow/ast.json b/parser/testdata/02874_analysis_of_variance_overflow/ast.json new file mode 100644 index 000000000..038a5e120 --- /dev/null +++ b/parser/testdata/02874_analysis_of_variance_overflow/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function analysisOfVariance (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_18446744073709551615" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001062787, + "rows_read": 8, + "bytes_read": 320 + } +} diff --git a/parser/testdata/02874_analysis_of_variance_overflow/metadata.json b/parser/testdata/02874_analysis_of_variance_overflow/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02874_analysis_of_variance_overflow/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02874_analysis_of_variance_overflow/query.sql b/parser/testdata/02874_analysis_of_variance_overflow/query.sql new file mode 100644 index 000000000..67fb4d28a --- /dev/null +++ b/parser/testdata/02874_analysis_of_variance_overflow/query.sql @@ -0,0 +1 @@ +SELECT analysisOfVariance(1, 18446744073709551615); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/02874_infer_objects_as_named_tuples/ast.json b/parser/testdata/02874_infer_objects_as_named_tuples/ast.json new file mode 100644 index 000000000..c83ddf700 --- /dev/null +++ b/parser/testdata/02874_infer_objects_as_named_tuples/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00122669, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02874_infer_objects_as_named_tuples/metadata.json b/parser/testdata/02874_infer_objects_as_named_tuples/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02874_infer_objects_as_named_tuples/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02874_infer_objects_as_named_tuples/query.sql b/parser/testdata/02874_infer_objects_as_named_tuples/query.sql new file mode 100644 index 000000000..48f499e7d --- /dev/null +++ b/parser/testdata/02874_infer_objects_as_named_tuples/query.sql @@ -0,0 +1,24 @@ +set input_format_json_try_infer_named_tuples_from_objects = 1; +desc format(JSONEachRow, '{"obj" : {"a" : 42, "b" : "Hello", "c" : [1,2,3]}}'); +select * from format(JSONEachRow, '{"obj" : {"a" : 42, "b" : "Hello", "c" : [1,2,3]}}'); +desc format(JSONEachRow, '{"obj" : {"a" : 42, "b" : "Hello", "c" : [1,2,3]}}, {"obj" : {"a" : 43, "b" : "World", "d" : "2020-01-01"}}'); +select * from format(JSONEachRow, '{"obj" : {"a" : 42, "b" : "Hello", "c" : [1,2,3]}}, {"obj" : {"a" : 43, "b" : "World", "d" : "2020-01-01"}}'); +desc format(JSONEachRow, '{"obj" : {"a" : 42, "b" : "Hello", "c" : [1,2,3]}}, {"obj" : {"a" : 43, "b" : "World", "d" : "2020-01-01"}}, {"obj" : {}}'); +select * from format(JSONEachRow, '{"obj" : {"a" : 42, "b" : "Hello", "c" : [1,2,3]}}, {"obj" : {"a" : 43, "b" : "World", "d" : "2020-01-01"}}, {"obj" : {}}'); +desc format(JSONEachRow, '{"obj" : {"a" : 42, "b" : "Hello", "c" : [1,2,3]}}, {"obj" : {"a" : 43, "b" : "World", "d" : "2020-01-01"}}, {"obj" : {}}, {"obj" : {"d" : "Hello", "b" : "2020-01-01"}}'); +select * from format(JSONEachRow, '{"obj" : {"a" : 42, "b" : "Hello", "c" : [1,2,3]}}, {"obj" : {"a" : 43, "b" : "World", "d" : "2020-01-01"}}, {"obj" : {}}, {"obj" : {"d" : "Hello", "b" : "2020-01-01"}}'); +desc format(JSONEachRow, '{"obj" : [{"a" : 42, "b" : "Hello", "c" : [1,2,3]}, {"a" : 43, "b" : "World", "d" : "2020-01-01"}]}, {"obj" : [{}]}'); +select * from format(JSONEachRow, '{"obj" : [{"a" : 42, "b" : "Hello", "c" : [1,2,3]}, {"a" : 43, "b" : "World", "d" : "2020-01-01"}]}, {"obj" : [{}]}'); +desc format(JSONEachRow, '{"obj" : {"nested_obj" : {"a" : 42, "b" : "Hello", "c" : [1,2,3]}}}, {"obj" : {"nested_obj" : {"a" : 43, "b" : "World", "d" : "2020-01-01"}}}, {"obj" : {"nested_obj" : {}}}'); +select * from format(JSONEachRow, '{"obj" : {"nested_obj" : {"a" : 42, "b" : "Hello", "c" : [1,2,3]}}}, {"obj" : {"nested_obj" : {"a" : 43, "b" : "World", "d" : "2020-01-01"}}}, {"obj" : {"nested_obj" : {}}}'); +desc format(JSONEachRow, '{"obj" : {"a" : {"b" : 1}}}, {"obj" : {"a.b" : 2, "a.b.c" : "Hello"}}'); +select * from format(JSONEachRow, '{"obj" : {"a" : {"b" : 1}}}, {"obj" : {"a.b" : 2, "a.b.c" : "Hello"}}'); +desc format(JSONEachRow, '{"obj" : {"a" : {}}}, {"obj" : {"a" : {"b" : {"c" : 10}}}}'); +select * from format(JSONEachRow, '{"obj" : {"a" : {}}}, {"obj" : {"a" : {"b" : {"c" : 10}}}}'); +desc format(JSONEachRow, '{"obj" : {"a" : {}}}'); +select * from format(JSONEachRow, '{"obj" : {"a" : {}}}'); +desc format(JSONEachRow, '{"obj" : {}}'); +select * from format(JSONEachRow, '{"obj" : {}}'); +desc format(JSONEachRow, '{"obj" : {"a" : [{}, {"b" : null}, {"c" : {"d" : 10}}]}}, {"obj" : {"a" : [{"e" : "Hello", "b" : [1,2,3]}]}}'); +select * from format(JSONEachRow, '{"obj" : {"a" : [{}, {"b" : null}, {"c" : {"d" : 10}}]}}, {"obj" : {"a" : [{"e" : "Hello", "b" : [1,2,3]}]}}'); + diff --git a/parser/testdata/02874_json_merge_patch_function_test/ast.json b/parser/testdata/02874_json_merge_patch_function_test/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02874_json_merge_patch_function_test/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02874_json_merge_patch_function_test/metadata.json b/parser/testdata/02874_json_merge_patch_function_test/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02874_json_merge_patch_function_test/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02874_json_merge_patch_function_test/query.sql b/parser/testdata/02874_json_merge_patch_function_test/query.sql new file mode 100644 index 000000000..9f0c3fd48 --- /dev/null +++ b/parser/testdata/02874_json_merge_patch_function_test/query.sql @@ -0,0 +1,24 @@ +-- Tags: no-fasttest +select jsonMergePatch(null); +select jsonMergePatch('{"a":1}'); +select jsonMergePatch('{"a":1}', '{"b":1}'); +select jsonMergePatch('{"a":1}', '{"b":1}', '{"c":[1,2]}'); +select jsonMergePatch('{"a":1}', '{"b":1}', '{"c":[{"d":1},2]}'); +select jsonMergePatch('{"a":1}','{"name": "joey"}','{"name": "tom"}','{"name": "zoey"}'); +select jsonMergePatch('{"a": "1","b": 2,"c": [true,{"qrdzkzjvnos": true,"yxqhipj": false,"oesax": "33o8_6AyUy"}]}', '{"c": "1"}'); +select jsonMergePatch('{"a": {"b": 1, "c": 2}}', '{"a": {"b": [3, 4]}}'); +select jsonMergePatch('{ "a": 1, "b":2 }','{ "a": 3, "c":4 }','{ "a": 5, "d":6 }'); +select jsonMergePatch('{"a":1, "b":2}', '{"b":null}'); + +select jsonMergePatch('[1]'); -- { serverError BAD_ARGUMENTS } +select jsonMergePatch('{"a": "1","b": 2,"c": [true,"qrdzkzjvnos": true,"yxqhipj": false,"oesax": "33o8_6AyUy"}]}', '{"c": "1"}'); -- { serverError BAD_ARGUMENTS } + +drop table if exists t_json_merge; +create table t_json_merge (id UInt64, s1 String, s2 String) engine = Memory; + +insert into t_json_merge select number, format('{{ "k{0}": {0} }}', toString(number * 2)), format('{{ "k{0}": {0} }}', toString(number * 2 + 1)) from numbers(5); +insert into t_json_merge select number, format('{{ "k{0}": {0} }}', toString(number * 2)), format('{{ "k{0}": {0}, "k{1}": 222 }}', toString(number * 2 + 1), toString(number * 2)) from numbers(5, 5); + +select jsonMergePatch(s1, s2) from t_json_merge ORDER BY id; + +drop table t_json_merge; diff --git a/parser/testdata/02874_parse_json_as_json_each_row_on_no_metadata/ast.json b/parser/testdata/02874_parse_json_as_json_each_row_on_no_metadata/ast.json new file mode 100644 index 000000000..f73bbf773 --- /dev/null +++ b/parser/testdata/02874_parse_json_as_json_each_row_on_no_metadata/ast.json @@ -0,0 +1,40 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DescribeQuery (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function format (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier JSON" + }, + { + "explain": " Literal '{\"a\" : 10, \"b\" : \"Hello\"}'" + } + ], + + "rows": 6, + + "statistics": + { + "elapsed": 0.001089515, + "rows_read": 6, + "bytes_read": 221 + } +} diff --git a/parser/testdata/02874_parse_json_as_json_each_row_on_no_metadata/metadata.json b/parser/testdata/02874_parse_json_as_json_each_row_on_no_metadata/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02874_parse_json_as_json_each_row_on_no_metadata/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02874_parse_json_as_json_each_row_on_no_metadata/query.sql b/parser/testdata/02874_parse_json_as_json_each_row_on_no_metadata/query.sql new file mode 100644 index 000000000..ba7b720e3 --- /dev/null +++ b/parser/testdata/02874_parse_json_as_json_each_row_on_no_metadata/query.sql @@ -0,0 +1,3 @@ +desc format(JSON, '{"a" : 10, "b" : "Hello"}'); +select * from format(JSON, '{"a" : 10, "b" : "Hello"}'); + diff --git a/parser/testdata/02874_toDaysSinceYearZero/ast.json b/parser/testdata/02874_toDaysSinceYearZero/ast.json new file mode 100644 index 000000000..307cb4432 --- /dev/null +++ b/parser/testdata/02874_toDaysSinceYearZero/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001101486, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02874_toDaysSinceYearZero/metadata.json b/parser/testdata/02874_toDaysSinceYearZero/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02874_toDaysSinceYearZero/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02874_toDaysSinceYearZero/query.sql b/parser/testdata/02874_toDaysSinceYearZero/query.sql new file mode 100644 index 000000000..a02591f79 --- /dev/null +++ b/parser/testdata/02874_toDaysSinceYearZero/query.sql @@ -0,0 +1,34 @@ +SET session_timezone = 'Europe/Amsterdam'; -- disable time zone randomization in CI + +SELECT 'Invalid parameters'; +SELECT toDaysSinceYearZero(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT toDaysSinceYearZero(toDate('2023-09-08'), 3); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toDaysSinceYearZero('str'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toDaysSinceYearZero(42); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT 'Const argument'; +SELECT toDaysSinceYearZero(toDate('1970-01-01')); +SELECT toDaysSinceYearZero(toDate('2023-09-08')); +SELECT toDaysSinceYearZero(toDate('2023-09-08'), 'America/Los_Angeles'); +SELECT toDaysSinceYearZero(toDate32('1900-01-01')); +SELECT toDaysSinceYearZero(toDate32('2023-09-08')); +SELECT toDaysSinceYearZero(toDate32('2023-09-08'), 'America/Los_Angeles'); +SELECT toDaysSinceYearZero(toDateTime('1970-01-01 00:00:00')); +SELECT toDaysSinceYearZero(toDateTime('2023-09-08 11:11:11')); +SELECT toDaysSinceYearZero(toDateTime('2023-09-08 11:11:11'), 'America/Los_Angeles'); +SELECT toDaysSinceYearZero(toDateTime64('1900-01-01 00:00:00.000', 3)); +SELECT toDaysSinceYearZero(toDateTime64('2023-09-08 11:11:11.123', 3)); +SELECT toDaysSinceYearZero(toDateTime64('2023-09-08 11:11:11.123', 3), 'America/Los_Angeles'); +SELECT toDaysSinceYearZero(toDateTime64('2023-09-08 11:11:11.123123123', 9)); +SELECT toDaysSinceYearZero(NULL); + +SELECT 'Non-const argument'; +SELECT toDaysSinceYearZero(materialize(toDate('2023-09-08'))); +SELECT toDaysSinceYearZero(materialize(toDate32('2023-09-08'))); +SELECT toDaysSinceYearZero(materialize(toDateTime('2023-09-08 11:11:11'))); +SELECT toDaysSinceYearZero(materialize(toDateTime64('2023-09-08 11:11:11.123', 3))); +SELECT toDaysSinceYearZero(materialize(toDateTime64('2023-09-08 11:11:11.123123123', 9))); + +SELECT 'MySQL alias'; +SELECT to_days(toDate('2023-09-08')); +SELECT TO_DAYS(toDate('2023-09-08')); diff --git a/parser/testdata/02875_final_invalid_read_ranges_bug/ast.json b/parser/testdata/02875_final_invalid_read_ranges_bug/ast.json new file mode 100644 index 000000000..aedcb9527 --- /dev/null +++ b/parser/testdata/02875_final_invalid_read_ranges_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001317318, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/02875_final_invalid_read_ranges_bug/metadata.json b/parser/testdata/02875_final_invalid_read_ranges_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02875_final_invalid_read_ranges_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02875_final_invalid_read_ranges_bug/query.sql b/parser/testdata/02875_final_invalid_read_ranges_bug/query.sql new file mode 100644 index 000000000..5557c5726 --- /dev/null +++ b/parser/testdata/02875_final_invalid_read_ranges_bug/query.sql @@ -0,0 +1,27 @@ +DROP TABLE IF EXISTS t; +CREATE TABLE t +( + tid UInt64, + processed_at DateTime, + created_at DateTime, + amount Int64 +) +ENGINE = ReplacingMergeTree +PARTITION BY toStartOfQuarter(created_at) +PRIMARY KEY (toStartOfDay(created_at), toStartOfDay(processed_at)) +ORDER BY (toStartOfDay(created_at), toStartOfDay(processed_at), tid) +SETTINGS index_granularity = 1; + +INSERT INTO t VALUES (5879429,'2023-07-01 03:50:35','2023-07-01 03:50:35',-278) (5881397,'2023-07-01 06:22:26','2023-07-01 06:22:27',2807) (5925060,'2023-07-04 00:24:03','2023-07-04 00:24:02',-12) (5936591,'2023-07-04 07:37:19','2023-07-04 07:37:18',-12) (5940709,'2023-07-04 09:13:35','2023-07-04 09:13:35',2820) (5942342,'2023-07-04 09:58:00','2023-07-04 09:57:59',-12) (5952231,'2023-07-04 22:33:24','2023-07-04 22:33:24',1692) (5959449,'2023-07-05 04:32:55','2023-07-05 04:32:54',-12) (5963240,'2023-07-05 06:37:08','2023-07-05 06:37:09',1709) (5965742,'2023-07-05 07:27:01','2023-07-05 07:27:02',1709) (5969948,'2023-07-05 08:44:36','2023-07-05 08:44:37',2278) (5971673,'2023-07-05 09:14:09','2023-07-05 09:14:09',5695) (6012987,'2023-07-06 20:52:28','2023-07-06 20:52:27',-536); + +SELECT tid, processed_at, created_at, amount FROM t FINAL ORDER BY tid; + +SELECT sum(amount) FROM t FINAL WHERE (processed_at >= '2023-09-19 00:00:00') AND (processed_at <= '2023-09-20 01:00:00'); + +INSERT INTO t VALUES (5879429,'2023-07-01 03:50:35','2023-07-01 03:50:35',-278) (5881397,'2023-07-01 06:22:26','2023-07-01 06:22:27',2807) (5925060,'2023-07-04 00:24:03','2023-07-04 00:24:02',-12) (5936591,'2023-07-04 07:37:19','2023-07-04 07:37:18',-12) (5940709,'2023-07-04 09:13:35','2023-07-04 09:13:35',2820) (5942342,'2023-07-04 09:58:00','2023-07-04 09:57:59',-12) (5952231,'2023-07-04 22:33:24','2023-07-04 22:33:24',1692) (5959449,'2023-07-05 04:32:55','2023-07-05 04:32:54',-12) (5963240,'2023-07-05 06:37:08','2023-07-05 06:37:09',1709) (5965742,'2023-07-05 07:27:01','2023-07-05 07:27:02',1709) (5969948,'2023-07-05 08:44:36','2023-07-05 08:44:37',2278) (5971673,'2023-07-05 09:14:09','2023-07-05 09:14:09',5695) (6012987,'2023-07-06 20:52:28','2023-07-06 20:52:27',-536); + +SELECT tid, processed_at, created_at, amount FROM t FINAL ORDER BY tid; + +SELECT sum(amount) FROM t FINAL WHERE (processed_at >= '2023-09-19 00:00:00') AND (processed_at <= '2023-09-20 01:00:00'); + +DROP TABLE t; diff --git a/parser/testdata/02875_fix_column_decimal_serialization/ast.json b/parser/testdata/02875_fix_column_decimal_serialization/ast.json new file mode 100644 index 000000000..24f2b9f8f --- /dev/null +++ b/parser/testdata/02875_fix_column_decimal_serialization/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery max_length_alias_14053__fuzz_45 (children 1)" + }, + { + "explain": " Identifier max_length_alias_14053__fuzz_45" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001064901, + "rows_read": 2, + "bytes_read": 115 + } +} diff --git a/parser/testdata/02875_fix_column_decimal_serialization/metadata.json b/parser/testdata/02875_fix_column_decimal_serialization/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02875_fix_column_decimal_serialization/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02875_fix_column_decimal_serialization/query.sql b/parser/testdata/02875_fix_column_decimal_serialization/query.sql new file mode 100644 index 000000000..2e71e47e9 --- /dev/null +++ b/parser/testdata/02875_fix_column_decimal_serialization/query.sql @@ -0,0 +1,17 @@ +CREATE TABLE max_length_alias_14053__fuzz_45 +( + `a` Date, + `b` Nullable(Decimal(76, 45)), + `c.d` Array(Nullable(DateTime64(3))), + `dcount` Int8 ALIAS length(c.d) +) +ENGINE = MergeTree +PARTITION BY toMonday(a) +ORDER BY (a, b) +SETTINGS allow_nullable_key = 1, index_granularity = 8192; + +INSERT INTO max_length_alias_14053__fuzz_45 VALUES ('2020-10-06',7367,['2020-10-06','2020-10-06','2020-10-06','2020-10-06','2020-10-06']),('2020-10-06',7367,['2020-10-06','2020-10-06','2020-10-06']),('2020-10-06',7367,['2020-10-06','2020-10-06']),('2020-10-07',7367,['2020-10-07','2020-10-07','2020-10-07','2020-10-07','2020-10-07']),('2020-10-08',7367,['2020-10-08','2020-10-08','2020-10-08','2020-10-08']),('2020-10-11',7367,['2020-10-11','2020-10-11','2020-10-11','2020-10-11','2020-10-11','2020-10-11','2020-10-11','2020-10-11']),('2020-10-11',7367,['2020-10-11']),('2020-08-26',7367,['2020-08-26','2020-08-26']),('2020-08-28',7367,['2020-08-28','2020-08-28','2020-08-28']),('2020-08-29',7367,['2020-08-29']),('2020-09-22',7367,['2020-09-22','2020-09-22','2020-09-22','2020-09-22','2020-09-22','2020-09-22','2020-09-22']); + +SELECT count(), min(length(c.d)) AS minExpr, min(dcount) AS minAlias, max(length(c.d)) AS maxExpr, max(dcount) AS maxAlias, b FROM max_length_alias_14053__fuzz_45 GROUP BY b; + +DROP TABLE max_length_alias_14053__fuzz_45; diff --git a/parser/testdata/02875_json_array_as_string/ast.json b/parser/testdata/02875_json_array_as_string/ast.json new file mode 100644 index 000000000..2d9d1b5b3 --- /dev/null +++ b/parser/testdata/02875_json_array_as_string/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.0010273, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02875_json_array_as_string/metadata.json b/parser/testdata/02875_json_array_as_string/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02875_json_array_as_string/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02875_json_array_as_string/query.sql b/parser/testdata/02875_json_array_as_string/query.sql new file mode 100644 index 000000000..b1e736b20 --- /dev/null +++ b/parser/testdata/02875_json_array_as_string/query.sql @@ -0,0 +1,2 @@ +set input_format_json_read_arrays_as_strings = 1; +select * from format(JSONEachRow, 'arr String', '{"arr" : [1, "Hello", [1,2,3]]}'); diff --git a/parser/testdata/02875_parallel_replicas_cluster_all_replicas/ast.json b/parser/testdata/02875_parallel_replicas_cluster_all_replicas/ast.json new file mode 100644 index 000000000..c28400c50 --- /dev/null +++ b/parser/testdata/02875_parallel_replicas_cluster_all_replicas/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tt (children 1)" + }, + { + "explain": " Identifier tt" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001066724, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/02875_parallel_replicas_cluster_all_replicas/metadata.json b/parser/testdata/02875_parallel_replicas_cluster_all_replicas/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02875_parallel_replicas_cluster_all_replicas/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02875_parallel_replicas_cluster_all_replicas/query.sql b/parser/testdata/02875_parallel_replicas_cluster_all_replicas/query.sql new file mode 100644 index 000000000..7f22965de --- /dev/null +++ b/parser/testdata/02875_parallel_replicas_cluster_all_replicas/query.sql @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS tt; +CREATE TABLE tt (n UInt64) ENGINE=MergeTree() ORDER BY tuple(); +INSERT INTO tt SELECT * FROM numbers(10); + +SET parallel_replicas_only_with_analyzer = 0; -- necessary for CI run with disabled analyzer + +SET enable_parallel_replicas=1, max_parallel_replicas=3, parallel_replicas_for_non_replicated_merge_tree=1; +SELECT count() FROM clusterAllReplicas('test_cluster_two_shard_three_replicas_localhost', currentDatabase(), tt) settings log_comment='02875_190aed82-2423-413b-ad4c-24dcca50f65b'; + +SYSTEM FLUSH LOGS query_log; + +SELECT countIf(ProfileEvents['ParallelReplicasQueryCount']>0) FROM system.query_log +WHERE type = 'QueryFinish' AND event_date >= yesterday() +AND initial_query_id IN (select query_id from system.query_log where current_database = currentDatabase() AND type = 'QueryFinish' AND event_date >= yesterday() AND log_comment = '02875_190aed82-2423-413b-ad4c-24dcca50f65b') +SETTINGS parallel_replicas_for_non_replicated_merge_tree=0; + +DROP TABLE tt; diff --git a/parser/testdata/02875_parallel_replicas_remote/ast.json b/parser/testdata/02875_parallel_replicas_remote/ast.json new file mode 100644 index 000000000..2acbefa08 --- /dev/null +++ b/parser/testdata/02875_parallel_replicas_remote/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tt (children 1)" + }, + { + "explain": " Identifier tt" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001427787, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/02875_parallel_replicas_remote/metadata.json b/parser/testdata/02875_parallel_replicas_remote/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02875_parallel_replicas_remote/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02875_parallel_replicas_remote/query.sql b/parser/testdata/02875_parallel_replicas_remote/query.sql new file mode 100644 index 000000000..d321108a1 --- /dev/null +++ b/parser/testdata/02875_parallel_replicas_remote/query.sql @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS tt; +CREATE TABLE tt (n UInt64) ENGINE=MergeTree() ORDER BY tuple(); +INSERT INTO tt SELECT * FROM numbers(10); + +SET parallel_replicas_only_with_analyzer = 0; -- necessary for CI run with disabled analyzer + +SET enable_parallel_replicas=1, max_parallel_replicas=3, parallel_replicas_for_non_replicated_merge_tree=1; +SELECT count() FROM remote('127.0.0.{1..6}', currentDatabase(), tt) settings log_comment='02875_89f3c39b-1919-48cb-b66e-ef9904e73146'; + +SYSTEM FLUSH LOGS query_log; + +SELECT countIf(ProfileEvents['ParallelReplicasQueryCount']>0) FROM system.query_log +WHERE type = 'QueryFinish' AND event_date >= yesterday() +AND initial_query_id IN (select query_id from system.query_log where current_database = currentDatabase() AND type = 'QueryFinish' AND event_date >= yesterday() AND log_comment = '02875_89f3c39b-1919-48cb-b66e-ef9904e73146') +SETTINGS parallel_replicas_for_non_replicated_merge_tree=0; + +DROP TABLE tt; diff --git a/parser/testdata/02876_json_incomplete_types_as_strings_inference/ast.json b/parser/testdata/02876_json_incomplete_types_as_strings_inference/ast.json new file mode 100644 index 000000000..c40091472 --- /dev/null +++ b/parser/testdata/02876_json_incomplete_types_as_strings_inference/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001086574, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02876_json_incomplete_types_as_strings_inference/metadata.json b/parser/testdata/02876_json_incomplete_types_as_strings_inference/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02876_json_incomplete_types_as_strings_inference/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02876_json_incomplete_types_as_strings_inference/query.sql b/parser/testdata/02876_json_incomplete_types_as_strings_inference/query.sql new file mode 100644 index 000000000..bb48873b0 --- /dev/null +++ b/parser/testdata/02876_json_incomplete_types_as_strings_inference/query.sql @@ -0,0 +1,6 @@ +set input_format_json_infer_incomplete_types_as_strings=1; +desc format(JSONEachRow, '{"a" : null, "b" : {}, "c" : []}'); +select * from format(JSONEachRow, '{"a" : null, "b" : {}, "c" : []}'); +desc format(JSONEachRow, '{"a" : {"b" : null, "c" : [[], []]}, "d" : {"e" : [{}, {}], "f" : null}}'); +select * from format(JSONEachRow, '{"a" : {"b" : null, "c" : [[], []]}, "d" : {"e" : [{}, {}], "f" : null}}'); + diff --git a/parser/testdata/02876_s3_cluster_schema_inference_names_with_spaces/ast.json b/parser/testdata/02876_s3_cluster_schema_inference_names_with_spaces/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02876_s3_cluster_schema_inference_names_with_spaces/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02876_s3_cluster_schema_inference_names_with_spaces/metadata.json b/parser/testdata/02876_s3_cluster_schema_inference_names_with_spaces/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02876_s3_cluster_schema_inference_names_with_spaces/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02876_s3_cluster_schema_inference_names_with_spaces/query.sql b/parser/testdata/02876_s3_cluster_schema_inference_names_with_spaces/query.sql new file mode 100644 index 000000000..8c7cf2120 --- /dev/null +++ b/parser/testdata/02876_s3_cluster_schema_inference_names_with_spaces/query.sql @@ -0,0 +1,6 @@ +-- Tags: no-fasttest +-- Tag no-fasttest: Depends on AWS + +desc s3Cluster(test_cluster_one_shard_three_replicas_localhost, 'http://localhost:11111/test/02876.parquet'); +select * from s3Cluster(test_cluster_one_shard_three_replicas_localhost, 'http://localhost:11111/test/02876.parquet'); + diff --git a/parser/testdata/02876_sort_union_of_sorted/ast.json b/parser/testdata/02876_sort_union_of_sorted/ast.json new file mode 100644 index 000000000..0b46effd7 --- /dev/null +++ b/parser/testdata/02876_sort_union_of_sorted/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery table1 (children 1)" + }, + { + "explain": " Identifier table1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000947413, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/02876_sort_union_of_sorted/metadata.json b/parser/testdata/02876_sort_union_of_sorted/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02876_sort_union_of_sorted/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02876_sort_union_of_sorted/query.sql b/parser/testdata/02876_sort_union_of_sorted/query.sql new file mode 100644 index 000000000..23d3772bc --- /dev/null +++ b/parser/testdata/02876_sort_union_of_sorted/query.sql @@ -0,0 +1,20 @@ +DROP TABLE IF EXISTS table1; +DROP TABLE IF EXISTS table2; + +CREATE TABLE table1 (number UInt64) ENGINE=MergeTree ORDER BY tuple(); +CREATE TABLE table2 (number UInt64) ENGINE=MergeTree ORDER BY tuple(); + +INSERT INTO table1 SELECT number FROM numbers_mt(1, 10); +INSERT INTO table2 SELECT number FROM numbers_mt(11, 10); + +SELECT '1..20:'; +SELECT * FROM ((SELECT * FROM table1 ORDER BY number) UNION ALL (SELECT * FROM table2 ORDER BY number)) ORDER BY number; + +SELECT '20..1:'; +SELECT * FROM ((SELECT * FROM table1 ORDER BY number) UNION ALL (SELECT * FROM table2 ORDER BY number)) ORDER BY number DESC; + +SELECT '20..1:'; +SELECT * FROM ((SELECT * FROM table1 ORDER BY number DESC) UNION ALL (SELECT * FROM table2 ORDER BY number DESC)) ORDER BY number DESC; + +DROP TABLE table1; +DROP TABLE table2; diff --git a/parser/testdata/02876_yyyymmddhhmmsstodatetime/ast.json b/parser/testdata/02876_yyyymmddhhmmsstodatetime/ast.json new file mode 100644 index 000000000..b7862c1f5 --- /dev/null +++ b/parser/testdata/02876_yyyymmddhhmmsstodatetime/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000952671, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02876_yyyymmddhhmmsstodatetime/metadata.json b/parser/testdata/02876_yyyymmddhhmmsstodatetime/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02876_yyyymmddhhmmsstodatetime/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02876_yyyymmddhhmmsstodatetime/query.sql b/parser/testdata/02876_yyyymmddhhmmsstodatetime/query.sql new file mode 100644 index 000000000..6f072ade1 --- /dev/null +++ b/parser/testdata/02876_yyyymmddhhmmsstodatetime/query.sql @@ -0,0 +1,127 @@ +SET session_timezone = 'UTC'; -- no time zone randomization, please + +----------------------------------------------------------- +SELECT '--- YYYYMMDDToDateTime'; + +SELECT 'Invalid input types are rejected'; +SELECT YYYYMMDDhhmmssToDateTime(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT YYYYMMDDhhmmssToDateTime(toDate('2023-09-11')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT YYYYMMDDhhmmssToDateTime(toDate32('2023-09-11')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT YYYYMMDDhhmmssToDateTime(toDateTime('2023-09-11 12:18:00')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT YYYYMMDDhhmmssToDateTime(toDateTime64('2023-09-11 12:18:00', 3)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT YYYYMMDDhhmmssToDateTime('2023-09-11'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT YYYYMMDDhhmmssToDateTime(20230911134254, 3); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT YYYYMMDDhhmmssToDateTime(20230911134254, 'invalid tz'); -- { serverError BAD_ARGUMENTS } +SELECT YYYYMMDDhhmmssToDateTime(20230911134254, 'Europe/Berlin', 'bad'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +SELECT 'Result type is DateTime'; +SELECT toTypeName(YYYYMMDDhhmmssToDateTime(19910824)); +SELECT toTypeName(YYYYMMDDhhmmssToDateTime(cast(19910824 AS Nullable(UInt64)))); +-- +SELECT 'Check correctness, integer arguments'; +SELECT YYYYMMDDhhmmssToDateTime(19691231595959); +SELECT YYYYMMDDhhmmssToDateTime(19700101000000); +SELECT YYYYMMDDhhmmssToDateTime(20200229111111); -- leap day +SELECT YYYYMMDDhhmmssToDateTime(20230911150505); +SELECT YYYYMMDDhhmmssToDateTime(21060207062815); +SELECT YYYYMMDDhhmmssToDateTime(21060207062816); +SELECT YYYYMMDDhhmmssToDateTime(9223372036854775807); -- huge value + +SELECT 'Check correctness, float arguments'; +SELECT YYYYMMDDhhmmssToDateTime(19691231595959.1); +SELECT YYYYMMDDhhmmssToDateTime(19700101000000.1); +SELECT YYYYMMDDhhmmssToDateTime(20200229111111.1); -- leap day +SELECT YYYYMMDDhhmmssToDateTime(20230911150505.1); +SELECT YYYYMMDDhhmmssToDateTime(21060207062815.1); +SELECT YYYYMMDDhhmmssToDateTime(21060207062816.1); +SELECT YYYYMMDDhhmmssToDateTime(NaN); -- { serverError BAD_ARGUMENTS } +SELECT YYYYMMDDhhmmssToDateTime(Inf); -- { serverError BAD_ARGUMENTS } +SELECT YYYYMMDDhhmmssToDateTime(-Inf); -- { serverError BAD_ARGUMENTS } + +SELECT 'Check correctness, decimal arguments'; +SELECT YYYYMMDDhhmmssToDateTime(toDecimal64(19691231595959.1, 5)); +SELECT YYYYMMDDhhmmssToDateTime(toDecimal64(19700101000000.1, 5)); +SELECT YYYYMMDDhhmmssToDateTime(toDecimal64(20200229111111.1, 5)); -- leap day +SELECT YYYYMMDDhhmmssToDateTime(toDecimal64(20230911150505.1, 5)); +SELECT YYYYMMDDhhmmssToDateTime(toDecimal64(21060207062815.1, 5)); +SELECT YYYYMMDDhhmmssToDateTime(toDecimal64(21060207062816.1, 5)); + +SELECT 'Special cases'; +SELECT YYYYMMDDhhmmssToDateTime(-20230911111111); -- negative +SELECT YYYYMMDDhhmmssToDateTime(110); -- invalid everything +SELECT YYYYMMDDhhmmssToDateTime(999999999999999999); -- huge value +SELECT YYYYMMDDhhmmssToDateTime(15001113111111); -- year out of range +SELECT YYYYMMDDhhmmssToDateTime(35001113111111); -- year out of range +SELECT YYYYMMDDhhmmssToDateTime(20231620111111); -- invalid month +SELECT YYYYMMDDhhmmssToDateTime(20230020111111); -- invalid month +SELECT YYYYMMDDhhmmssToDateTime(20230940111111); -- invalid day +SELECT YYYYMMDDhhmmssToDateTime(20230900111111); -- invalid day +SELECT YYYYMMDDhhmmssToDateTime(20230228111111); -- leap day when there is none +SELECT YYYYMMDDhhmmssToDateTime(True); +SELECT YYYYMMDDhhmmssToDateTime(False); +SELECT YYYYMMDDhhmmssToDateTime(NULL); +SELECT YYYYMMDDhhmmssToDateTime(yyyymmdd) FROM (SELECT 19840121 AS yyyymmdd UNION ALL SELECT 20230911 AS yyyymmdd) ORDER BY yyyymmdd; -- non-const + +----------------------------------------------------------- +SELECT '--- YYYYMMDDToDateTime64'; + +SELECT 'Invalid input types are rejected'; +SELECT YYYYMMDDhhmmssToDateTime64(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT YYYYMMDDhhmmssToDateTime64(toDate('2023-09-11')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT YYYYMMDDhhmmssToDateTime64(toDate32('2023-09-11')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT YYYYMMDDhhmmssToDateTime64(toDateTime('2023-09-11 12:18:00')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT YYYYMMDDhhmmssToDateTime64(toDateTime64('2023-09-11 12:18:00', 3)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT YYYYMMDDhhmmssToDateTime64('2023-09-11'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT YYYYMMDDhhmmssToDateTime64('2023-09-11', 'invalid precision'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT YYYYMMDDhhmmssToDateTime64(20230911134254, 3, 3); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT YYYYMMDDhhmmssToDateTime64(20230911134254, 3, 'invalid tz'); -- { serverError BAD_ARGUMENTS } +SELECT YYYYMMDDhhmmssToDateTime64(20230911134254, 3, 'Europe/Berlin', 'no more args'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +SELECT 'Result type is DateTime'; +SELECT toTypeName(YYYYMMDDhhmmssToDateTime64(19910824)); +SELECT toTypeName(YYYYMMDDhhmmssToDateTime64(19910824, 5)); +SELECT toTypeName(YYYYMMDDhhmmssToDateTime64(cast(19910824 AS Nullable(UInt64)))); + +SELECT 'Check correctness, integer arguments'; +SELECT YYYYMMDDhhmmssToDateTime64(189912315959); +SELECT YYYYMMDDhhmmssToDateTime64(19000101000000); +SELECT YYYYMMDDhhmmssToDateTime64(20200229111111); -- leap day +SELECT YYYYMMDDhhmmssToDateTime64(20230911150505); +SELECT YYYYMMDDhhmmssToDateTime64(22991231235959); +SELECT YYYYMMDDhhmmssToDateTime64(23000101000000); +-- SELECT YYYYMMDDhhmmssToDateTime64(9223372036854775807); -- huge value, commented out because on ARM, the rounding is slightly different + +SELECT 'Check correctness, float arguments'; +SELECT YYYYMMDDhhmmssToDateTime64(189912315959.1); +SELECT YYYYMMDDhhmmssToDateTime64(19000101000000.1); +SELECT YYYYMMDDhhmmssToDateTime64(20200229111111.1); -- leap day +SELECT YYYYMMDDhhmmssToDateTime64(20230911150505.1); +SELECT YYYYMMDDhhmmssToDateTime64(22991231235959.1); +SELECT YYYYMMDDhhmmssToDateTime64(23000101000000.1); +SELECT YYYYMMDDhhmmssToDateTime64(NaN); -- { serverError BAD_ARGUMENTS } +SELECT YYYYMMDDhhmmssToDateTime64(Inf); -- { serverError BAD_ARGUMENTS } +SELECT YYYYMMDDhhmmssToDateTime64(-Inf); -- { serverError BAD_ARGUMENTS } + +SELECT 'Check correctness, decimal arguments'; +SELECT YYYYMMDDhhmmssToDateTime64(toDecimal64(189912315959.1, 5)); +SELECT YYYYMMDDhhmmssToDateTime64(toDecimal64(19000101000000.1, 5)); +SELECT YYYYMMDDhhmmssToDateTime64(toDecimal64(20200229111111.1, 5)); -- leap day +SELECT YYYYMMDDhhmmssToDateTime64(toDecimal64(20230911150505.1, 5)); +SELECT YYYYMMDDhhmmssToDateTime64(toDecimal64(22991231235959.1, 5)); +SELECT YYYYMMDDhhmmssToDateTime64(toDecimal64(23000101000000.1, 5)); + +SELECT 'Special cases'; +SELECT YYYYMMDDhhmmssToDateTime64(-20230911111111); -- negative +SELECT YYYYMMDDhhmmssToDateTime64(110); -- invalid everything +SELECT YYYYMMDDhhmmssToDateTime64(999999999999999999); -- huge value +SELECT YYYYMMDDhhmmssToDateTime64(15001113111111); -- year out of range +SELECT YYYYMMDDhhmmssToDateTime64(35001113111111); -- year out of range +SELECT YYYYMMDDhhmmssToDateTime64(20231620111111); -- invalid month +SELECT YYYYMMDDhhmmssToDateTime64(20230020111111); -- invalid month +SELECT YYYYMMDDhhmmssToDateTime64(20230940111111); -- invalid day +SELECT YYYYMMDDhhmmssToDateTime64(20230900111111); -- invalid day +SELECT YYYYMMDDhhmmssToDateTime64(20230228111111); -- leap day when there is none +SELECT YYYYMMDDhhmmssToDateTime64(True); +SELECT YYYYMMDDhhmmssToDateTime64(False); +SELECT YYYYMMDDhhmmssToDateTime64(NULL); +SELECT YYYYMMDDhhmmssToDateTime64(yyyymmdd) FROM (SELECT 19840121 AS yyyymmdd UNION ALL SELECT 20230911 AS yyyymmdd) ORDER BY yyyymmdd; -- non-const diff --git a/parser/testdata/02876_yyyymmddtodate/ast.json b/parser/testdata/02876_yyyymmddtodate/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02876_yyyymmddtodate/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02876_yyyymmddtodate/metadata.json b/parser/testdata/02876_yyyymmddtodate/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02876_yyyymmddtodate/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02876_yyyymmddtodate/query.sql b/parser/testdata/02876_yyyymmddtodate/query.sql new file mode 100644 index 000000000..5bc7feae9 --- /dev/null +++ b/parser/testdata/02876_yyyymmddtodate/query.sql @@ -0,0 +1,120 @@ +----------------------------------------------------------- +SELECT '--- YYYYMMDDToDate'; + +SELECT 'Invalid input types are rejected'; +SELECT YYYYMMDDToDate(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT YYYYMMDDToDate(toDate('2023-09-11')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT YYYYMMDDToDate(toDate32('2023-09-11')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT YYYYMMDDToDate(toDateTime('2023-09-11 12:18:00')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT YYYYMMDDToDate(toDateTime64('2023-09-11 12:18:00', 3)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT YYYYMMDDToDate('2023-09-11'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT YYYYMMDDToDate(2023, 09, 11); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT YYYYMMDDToDate(2023, 110); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +SELECT 'Result type is Date'; +SELECT toTypeName(YYYYMMDDToDate(19910824)); +SELECT toTypeName(YYYYMMDDToDate(cast(19910824 AS Nullable(UInt64)))); + +SELECT 'Check correctness, integer arguments'; +SELECT YYYYMMDDToDate(19691231); +SELECT YYYYMMDDToDate(19700101); +SELECT YYYYMMDDToDate(20200229); -- leap day +SELECT YYYYMMDDToDate(20230911); +SELECT YYYYMMDDToDate(21490606); +SELECT YYYYMMDDToDate(21490607); +SELECT YYYYMMDDToDate(9223372036854775807); -- huge value + +SELECT 'Check correctness, float arguments'; +SELECT YYYYMMDDToDate(19691231.1); +SELECT YYYYMMDDToDate(19700101.1); +SELECT YYYYMMDDToDate(20200229.1); -- leap day +SELECT YYYYMMDDToDate(20230911.1); +SELECT YYYYMMDDToDate(21490606.1); +SELECT YYYYMMDDToDate(21490607.1); +SELECT YYYYMMDDToDate(NaN); -- { serverError BAD_ARGUMENTS } +SELECT YYYYMMDDToDate(Inf); -- { serverError BAD_ARGUMENTS } +SELECT YYYYMMDDToDate(-Inf); -- { serverError BAD_ARGUMENTS } + +SELECT 'Check correctness, decimal arguments'; +SELECT YYYYMMDDToDate(toDecimal64(19691231.1, 5)); +SELECT YYYYMMDDToDate(toDecimal64(19700101.1, 5)); +SELECT YYYYMMDDToDate(toDecimal64(20200229.1, 5)); -- leap day +SELECT YYYYMMDDToDate(toDecimal64(20230911.1, 5)); +SELECT YYYYMMDDToDate(toDecimal64(21490606.1, 5)); +SELECT YYYYMMDDToDate(toDecimal64(21490607.1, 5)); + +SELECT 'Special cases'; +SELECT YYYYMMDDToDate(-20230911); -- negative +SELECT YYYYMMDDToDate(110); -- invalid everything +SELECT YYYYMMDDToDate(9999999999999); -- huge value +SELECT YYYYMMDDToDate(15001113); -- year out of range +SELECT YYYYMMDDToDate(35001113); -- year out of range +SELECT YYYYMMDDToDate(20231620); -- invalid month +SELECT YYYYMMDDToDate(20230020); -- invalid month +SELECT YYYYMMDDToDate(20230940); -- invalid day +SELECT YYYYMMDDToDate(20230900); -- invalid day +SELECT YYYYMMDDToDate(20230228); -- leap day when there is none +SELECT YYYYMMDDToDate(True); +SELECT YYYYMMDDToDate(False); +SELECT YYYYMMDDToDate(NULL); +SELECT YYYYMMDDToDate(yyyymmdd) FROM (SELECT 19840121 AS yyyymmdd UNION ALL SELECT 20230911 AS yyyymmdd) ORDER BY yyyymmdd; -- non-const + +----------------------------------------------------------- +SELECT '--- YYYYMMDDToDate32'; + +SELECT 'Invalid input types are rejected'; +SELECT YYYYMMDDToDate32(toDate('2023-09-11')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT YYYYMMDDToDate32(toDate32('2023-09-11')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT YYYYMMDDToDate32(toDateTime('2023-09-11 12:18:00')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT YYYYMMDDToDate32(toDateTime64('2023-09-11 12:18:00', 3)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT YYYYMMDDToDate32('2023-09-11'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT YYYYMMDDToDate32(2023, 09, 11); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT YYYYMMDDToDate32(2023, 110); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +SELECT 'Result type is Date32'; +SELECT toTypeName(YYYYMMDDToDate32(19910824)); +SELECT toTypeName(YYYYMMDDToDate32(cast(19910824 AS Nullable(UInt64)))); + +SELECT 'Check correctness, integer arguments'; +SELECT YYYYMMDDToDate32(18991231); +SELECT YYYYMMDDToDate32(19000101); +SELECT YYYYMMDDToDate32(20200229); -- leap day +SELECT YYYYMMDDToDate32(20230911); +SELECT YYYYMMDDToDate32(22991231); +SELECT YYYYMMDDToDate32(23000101); +SELECT YYYYMMDDToDate32(9223372036854775807); -- huge value + +SELECT 'Check correctness, float arguments'; +SELECT YYYYMMDDToDate32(18991231.1); +SELECT YYYYMMDDToDate32(19000101.1); +SELECT YYYYMMDDToDate32(20200229.1); -- leap day +SELECT YYYYMMDDToDate32(20230911.1); +SELECT YYYYMMDDToDate32(22991231.1); +SELECT YYYYMMDDToDate32(23000101.1); +SELECT YYYYMMDDToDate32(NaN); -- { serverError BAD_ARGUMENTS } +SELECT YYYYMMDDToDate32(Inf); -- { serverError BAD_ARGUMENTS } +SELECT YYYYMMDDToDate32(-Inf); -- { serverError BAD_ARGUMENTS } + +SELECT 'Check correctness, decimal arguments'; +SELECT YYYYMMDDToDate32(toDecimal64(18991231.1, 5)); +SELECT YYYYMMDDToDate32(toDecimal64(19000101.1, 5)); +SELECT YYYYMMDDToDate32(toDecimal64(20200229.1, 5)); -- leap day +SELECT YYYYMMDDToDate32(toDecimal64(20230911.1, 5)); +SELECT YYYYMMDDToDate32(toDecimal64(22991231.1, 5)); +SELECT YYYYMMDDToDate32(toDecimal64(23000101.1, 5)); + +SELECT 'Special cases'; +SELECT YYYYMMDDToDate32(-20230911); -- negative +SELECT YYYYMMDDToDate32(110); -- invalid everything +SELECT YYYYMMDDToDate32(9999999999999); -- huge value +SELECT YYYYMMDDToDate32(15001113); -- year out of range +SELECT YYYYMMDDToDate32(35001113); -- year out of range +SELECT YYYYMMDDToDate32(20231620); -- invalid month +SELECT YYYYMMDDToDate32(20230020); -- invalid month +SELECT YYYYMMDDToDate32(20230940); -- invalid day +SELECT YYYYMMDDToDate32(20230900); -- invalid day +SELECT YYYYMMDDToDate32(20230228); -- leap day when there is none +SELECT YYYYMMDDToDate32(True); +SELECT YYYYMMDDToDate32(False); +SELECT YYYYMMDDToDate32(NULL); +SELECT YYYYMMDDToDate32(yyyymmdd) FROM (SELECT 19840121 AS yyyymmdd UNION ALL SELECT 20230911 AS yyyymmdd) ORDER BY yyyymmdd; -- non-const diff --git a/parser/testdata/02880_indexHint__partition_id/ast.json b/parser/testdata/02880_indexHint__partition_id/ast.json new file mode 100644 index 000000000..6d8f058d7 --- /dev/null +++ b/parser/testdata/02880_indexHint__partition_id/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery data (children 1)" + }, + { + "explain": " Identifier data" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001111692, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/02880_indexHint__partition_id/metadata.json b/parser/testdata/02880_indexHint__partition_id/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02880_indexHint__partition_id/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02880_indexHint__partition_id/query.sql b/parser/testdata/02880_indexHint__partition_id/query.sql new file mode 100644 index 000000000..9d5dc7bcb --- /dev/null +++ b/parser/testdata/02880_indexHint__partition_id/query.sql @@ -0,0 +1,10 @@ +drop table if exists data; +create table data (part Int) engine=MergeTree() order by tuple() partition by part; +insert into data values (1)(2); + +-- { echoOn } +select * from data prewhere indexHint(_partition_id = '1'); +-- TODO: optimize_use_implicit_projections ignores indexHint (with analyzer) because source columns might be aliased. +select count() from data prewhere indexHint(_partition_id = '1') settings optimize_use_implicit_projections = 0; +select * from data where indexHint(_partition_id = '1'); +select count() from data where indexHint(_partition_id = '1') settings optimize_use_implicit_projections = 0; diff --git a/parser/testdata/02882_formatQuery/ast.json b/parser/testdata/02882_formatQuery/ast.json new file mode 100644 index 000000000..f28ac16b2 --- /dev/null +++ b/parser/testdata/02882_formatQuery/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery all_valid (children 1)" + }, + { + "explain": " Identifier all_valid" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001184512, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/02882_formatQuery/metadata.json b/parser/testdata/02882_formatQuery/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02882_formatQuery/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02882_formatQuery/query.sql b/parser/testdata/02882_formatQuery/query.sql new file mode 100644 index 000000000..c3b3f202c --- /dev/null +++ b/parser/testdata/02882_formatQuery/query.sql @@ -0,0 +1,51 @@ +DROP TABLE IF EXISTS all_valid; +CREATE TABLE all_valid (id UInt64, query String) ENGINE=MergeTree ORDER BY id; +INSERT INTO all_valid VALUES (1, 'SELECT 1') (2, 'SeLeCt 22') (3, 'InSerT into TAB values (\'\')'); + +DROP TABLE IF EXISTS some_invalid; +CREATE TABLE some_invalid (id UInt64, query String) ENGINE=MergeTree ORDER BY id; +INSERT INTO some_invalid VALUES (1, 'SELECT 1') (2, 'SeLeCt 2') (3, 'bad 3') (4, 'select 4') (5, 'bad 5') (6, '') (7, 'SELECT 7'); + +SELECT '-- formatQuery'; + +SELECT formatQuery('SELECT 1;'); +SELECT formatQuery('SELECT 1'); +SELECT formatQuery('SeLeCt 1;'); +SELECT formatQuery('select 1;') == formatQuery('SeLeCt 1'); +SELECT normalizedQueryHash(formatQuery('select 1')) = normalizedQueryHash(formatQuery('SELECT 1')); + +SELECT formatQuery('INSERT INTO tab VALUES (\'\') (\'test\')'); +SELECT formatQuery('CREATE TABLE default.no_prop_table(`some_column` UInt64) ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity = 8192'); +SELECT formatQuery('EXPLAIN SYNTAX SELECT CAST(1 AS INT), CEIL(1), CEILING(1), CHAR(49), CHAR_LENGTH(\'1\'), CHARACTER_LENGTH(\'1\'), COALESCE(1), CONCAT(\'1\', \'1\'), CORR(1, 1), COS(1), COUNT(1), COVAR_POP(1, 1), COVAR_SAMP(1, 1), DATABASE(), SCHEMA(), DATEDIFF(\'DAY\', toDate(\'2020-10-24\'), toDate(\'2019-10-24\')), EXP(1), FLATTEN([[1]]), FLOOR(1), FQDN(), GREATEST(1), IF(1, 1, 1), IFNULL(1, 1), LCASE(\'A\'), LEAST(1), LENGTH(\'1\'), LN(1), LOCATE(\'1\', \'1\'), LOG(1), LOG10(1), LOG2(1), LOWER(\'A\'), MAX(1), MID(\'123\', 1, 1), MIN(1), MOD(1, 1), NOT(1), NOW(), NOW64(), NULLIF(1, 1), PI(), POSITION(\'123\', \'2\'), POW(1, 1), POWER(1, 1), RAND(), REPLACE(\'1\', \'1\', \'2\'), REVERSE(\'123\'), ROUND(1), SIN(1), SQRT(1), STDDEV_POP(1), STDDEV_SAMP(1), SUBSTR(\'123\', 2), SUBSTRING(\'123\', 2), SUM(1), TAN(1), TANH(1), TRUNC(1), TRUNCATE(1), UCASE(\'A\'), UPPER(\'A\'), USER(), VAR_POP(1), VAR_SAMP(1), WEEK(toDate(\'2020-10-24\')), YEARWEEK(toDate(\'2020-10-24\')) format TSVRaw;'); + +SELECT formatQuery(''); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('SEECTwrong'); -- { serverError SYNTAX_ERROR } + +SELECT id, query, formatQuery(query) FROM all_valid ORDER BY id; +SELECT id, query, formatQuery(query) FROM some_invalid ORDER BY id; -- { serverError SYNTAX_ERROR } +SELECT id, query, formatQueryOrNull(query) FROM all_valid ORDER BY id; +SELECT id, query, formatQueryOrNull(query) FROM some_invalid ORDER BY id; + +SELECT '-- formatQuerySingleLine'; + +SELECT formatQuerySingleLine('SELECT 1;'); +SELECT formatQuerySingleLine('SELECT 1'); +SELECT formatQuerySingleLine('SeLeCt 1;'); +SELECT formatQuerySingleLine('select 1;') == formatQuerySingleLine('SeLeCt 1'); +SELECT normalizedQueryHash(formatQuerySingleLine('select 1')) = normalizedQueryHash(formatQuerySingleLine('SELECT 1')); + +SELECT formatQuerySingleLine('INSERT INTO tab VALUES (\'\') (\'test\')'); + +SELECT formatQuerySingleLine('CREATE TABLE default.no_prop_table(`some_column` UInt64) ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity = 8192'); +SELECT formatQuerySingleLine('EXPLAIN SYNTAX SELECT CAST(1 AS INT), CEIL(1), CEILING(1), CHAR(49), CHAR_LENGTH(\'1\'), CHARACTER_LENGTH(\'1\'), COALESCE(1), CONCAT(\'1\', \'1\'), CORR(1, 1), COS(1), COUNT(1), COVAR_POP(1, 1), COVAR_SAMP(1, 1), DATABASE(), SCHEMA(), DATEDIFF(\'DAY\', toDate(\'2020-10-24\'), toDate(\'2019-10-24\')), EXP(1), FLATTEN([[1]]), FLOOR(1), FQDN(), GREATEST(1), IF(1, 1, 1), IFNULL(1, 1), LCASE(\'A\'), LEAST(1), LENGTH(\'1\'), LN(1), LOCATE(\'1\', \'1\'), LOG(1), LOG10(1), LOG2(1), LOWER(\'A\'), MAX(1), MID(\'123\', 1, 1), MIN(1), MOD(1, 1), NOT(1), NOW(), NOW64(), NULLIF(1, 1), PI(), POSITION(\'123\', \'2\'), POW(1, 1), POWER(1, 1), RAND(), REPLACE(\'1\', \'1\', \'2\'), REVERSE(\'123\'), ROUND(1), SIN(1), SQRT(1), STDDEV_POP(1), STDDEV_SAMP(1), SUBSTR(\'123\', 2), SUBSTRING(\'123\', 2), SUM(1), TAN(1), TANH(1), TRUNC(1), TRUNCATE(1), UCASE(\'A\'), UPPER(\'A\'), USER(), VAR_POP(1), VAR_SAMP(1), WEEK(toDate(\'2020-10-24\')), YEARWEEK(toDate(\'2020-10-24\')) format TSVRaw;'); + +SELECT formatQuerySingleLine(''); -- { serverError SYNTAX_ERROR } +SELECT formatQuerySingleLine('SEECTwrong'); -- { serverError SYNTAX_ERROR } + +SELECT id, query, formatQuerySingleLine(query) FROM all_valid ORDER BY id; +SELECT id, query, formatQuerySingleLine(query) FROM some_invalid ORDER BY id; -- { serverError SYNTAX_ERROR } +SELECT id, query, formatQuerySingleLineOrNull(query) FROM all_valid ORDER BY id; +SELECT id, query, formatQuerySingleLineOrNull(query) FROM some_invalid ORDER BY id; + +DROP TABLE all_valid; +DROP TABLE some_invalid; diff --git a/parser/testdata/02882_primary_key_index_in_function_different_types/ast.json b/parser/testdata/02882_primary_key_index_in_function_different_types/ast.json new file mode 100644 index 000000000..aa031e7bc --- /dev/null +++ b/parser/testdata/02882_primary_key_index_in_function_different_types/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_table (children 1)" + }, + { + "explain": " Identifier test_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001394176, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/02882_primary_key_index_in_function_different_types/metadata.json b/parser/testdata/02882_primary_key_index_in_function_different_types/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02882_primary_key_index_in_function_different_types/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02882_primary_key_index_in_function_different_types/query.sql b/parser/testdata/02882_primary_key_index_in_function_different_types/query.sql new file mode 100644 index 000000000..83b389557 --- /dev/null +++ b/parser/testdata/02882_primary_key_index_in_function_different_types/query.sql @@ -0,0 +1,24 @@ +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value UInt64 +) ENGINE=MergeTree ORDER BY (id, value) SETTINGS index_granularity = 8192, index_granularity_bytes = '1Mi'; + +INSERT INTO test_table SELECT number, number FROM numbers(10); + +set enable_analyzer = 0; + +EXPLAIN indexes = 1, description=0 SELECT id FROM test_table WHERE id <= 10 AND value IN (SELECT 5); +EXPLAIN indexes = 1, description=0 SELECT id FROM test_table WHERE id <= 10 AND value IN (SELECT '5'); +EXPLAIN indexes = 1, description=0 SELECT id FROM test_table WHERE id <= 10 AND value IN (SELECT toUInt8(number) FROM numbers(5)); +EXPLAIN indexes = 1, description=0 SELECT id FROM test_table WHERE id <= 10 AND value IN (SELECT toString(number) FROM numbers(5)); + +set enable_analyzer = 1; + +EXPLAIN indexes = 1, description=0 SELECT id FROM test_table WHERE id <= 10 AND value IN (SELECT 5); +EXPLAIN indexes = 1, description=0 SELECT id FROM test_table WHERE id <= 10 AND value IN (SELECT '5'); +EXPLAIN indexes = 1, description=0 SELECT id FROM test_table WHERE id <= 10 AND value IN (SELECT toUInt8(number) FROM numbers(5)); +EXPLAIN indexes = 1, description=0 SELECT id FROM test_table WHERE id <= 10 AND value IN (SELECT toString(number) FROM numbers(5)); + +DROP TABLE test_table; diff --git a/parser/testdata/02882_replicated_fetch_checksums_doesnt_match/ast.json b/parser/testdata/02882_replicated_fetch_checksums_doesnt_match/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02882_replicated_fetch_checksums_doesnt_match/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02882_replicated_fetch_checksums_doesnt_match/metadata.json b/parser/testdata/02882_replicated_fetch_checksums_doesnt_match/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02882_replicated_fetch_checksums_doesnt_match/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02882_replicated_fetch_checksums_doesnt_match/query.sql b/parser/testdata/02882_replicated_fetch_checksums_doesnt_match/query.sql new file mode 100644 index 000000000..c8cf5f9c8 --- /dev/null +++ b/parser/testdata/02882_replicated_fetch_checksums_doesnt_match/query.sql @@ -0,0 +1,45 @@ +-- Tags: no-shared-merge-tree + +DROP TABLE IF EXISTS checksums_r3; +DROP TABLE IF EXISTS checksums_r2; +DROP TABLE IF EXISTS checksums_r1; + +CREATE TABLE checksums_r1 (column1 UInt32, column2 String) Engine = ReplicatedMergeTree('/tables/{database}/checksums_table', 'r1') ORDER BY tuple(); + +CREATE TABLE checksums_r2 (column1 UInt32, column2 String) Engine = ReplicatedMergeTree('/tables/{database}/checksums_table', 'r2') ORDER BY tuple(); + +CREATE TABLE checksums_r3 (column1 UInt32, column2 String) Engine = ReplicatedMergeTree('/tables/{database}/checksums_table', 'r3') ORDER BY tuple(); + +SYSTEM STOP REPLICATION QUEUES checksums_r2; +SYSTEM STOP REPLICATION QUEUES checksums_r3; + +ALTER TABLE checksums_r1 MODIFY COLUMN column1 Int32 SETTINGS alter_sync=1; + +INSERT INTO checksums_r1 VALUES (1, 'hello'); + +INSERT INTO checksums_r3 VALUES (1, 'hello'); + +SYSTEM START REPLICATION QUEUES checksums_r2; + +SYSTEM SYNC REPLICA checksums_r2; + +SELECT count() FROM checksums_r1; +SELECT count() FROM checksums_r2; +SELECT count() FROM checksums_r3; + +SYSTEM START REPLICATION QUEUES checksums_r3; +SYSTEM SYNC REPLICA checksums_r3; + +SELECT count() FROM checksums_r1; +SELECT count() FROM checksums_r2; +SELECT count() FROM checksums_r3; + +SYSTEM FLUSH LOGS text_log; + +SET max_rows_to_read = 0; -- system.text_log can be really big +SELECT * FROM system.text_log WHERE event_time >= now() - INTERVAL 120 SECOND and level == 'Error' and message like '%CHECKSUM_DOESNT_MATCH%' and logger_name like ('%' || currentDatabase() || '%checksums_r%'); + +DROP TABLE IF EXISTS checksums_r3; +DROP TABLE IF EXISTS checksums_r2; +DROP TABLE IF EXISTS checksums_r1; + diff --git a/parser/testdata/02883_array_scalar_mult_div_modulo/ast.json b/parser/testdata/02883_array_scalar_mult_div_modulo/ast.json new file mode 100644 index 000000000..e256b3d45 --- /dev/null +++ b/parser/testdata/02883_array_scalar_mult_div_modulo/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function multiply (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[UInt64_2, UInt64_3, UInt64_5]" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_7" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.000924687, + "rows_read": 12, + "bytes_read": 501 + } +} diff --git a/parser/testdata/02883_array_scalar_mult_div_modulo/metadata.json b/parser/testdata/02883_array_scalar_mult_div_modulo/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02883_array_scalar_mult_div_modulo/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02883_array_scalar_mult_div_modulo/query.sql b/parser/testdata/02883_array_scalar_mult_div_modulo/query.sql new file mode 100644 index 000000000..287463003 --- /dev/null +++ b/parser/testdata/02883_array_scalar_mult_div_modulo/query.sql @@ -0,0 +1,25 @@ +SELECT materialize([2, 3, 5]) * materialize(7); +SELECT materialize(7) * materialize([2, 3, 5]); +SELECT [2, 3, 5] * materialize(7); +SELECT materialize(7) * [2, 3, 5]; +SELECT materialize([2, 3, 5]) * 7; +SELECT 7 * materialize([2, 3, 5]); +SELECT [2, 3, 5] * 7; +SELECT [[[2, 3, 5, 5]]] * 7; +SELECT 7 * [[[2, 3, 5, 5]]]; +SELECT [[[2, 3, 5, 5]]] / 2; +SELECT 2 / [[[2, 3, 5, 5]]]; +SELECT [(1, 2), (2, 2)] * 7; +SELECT [(NULL, 2), (2, NULL)] * 7; +SELECT [(NULL, 2), (2, NULL)] / 1; +SELECT [(1., 100000000000000000000.), (NULL, 1048577)] * 7; +SELECT [CAST('2', 'UInt64'), number] * 7 FROM numbers(5); +SELECT [2, 3, 5] * number FROM numbers(5); +SELECT range(number) * 42 FROM numbers(5); +CREATE TABLE my_table (values Array(Int32)) ENGINE = MergeTree() ORDER BY values; +INSERT INTO my_table (values) VALUES ([12, 3, 1]); +SELECT values * 5 FROM my_table WHERE arrayExists(x -> x > 5, values); +DROP TABLE my_table; +SELECT [6, 6, 3] % 2; +SELECT [6, 6, 3] / 2.5::Decimal(1, 1); +SELECT [1] / 'a'; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } diff --git a/parser/testdata/02883_read_in_reverse_order_virtual_column/ast.json b/parser/testdata/02883_read_in_reverse_order_virtual_column/ast.json new file mode 100644 index 000000000..38d2911fd --- /dev/null +++ b/parser/testdata/02883_read_in_reverse_order_virtual_column/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_reverse_order_virt_col (children 1)" + }, + { + "explain": " Identifier t_reverse_order_virt_col" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001370046, + "rows_read": 2, + "bytes_read": 100 + } +} diff --git a/parser/testdata/02883_read_in_reverse_order_virtual_column/metadata.json b/parser/testdata/02883_read_in_reverse_order_virtual_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02883_read_in_reverse_order_virtual_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02883_read_in_reverse_order_virtual_column/query.sql b/parser/testdata/02883_read_in_reverse_order_virtual_column/query.sql new file mode 100644 index 000000000..76821c879 --- /dev/null +++ b/parser/testdata/02883_read_in_reverse_order_virtual_column/query.sql @@ -0,0 +1,10 @@ +DROP TABLE IF EXISTS t_reverse_order_virt_col; + +CREATE TABLE t_reverse_order_virt_col (`order_0` Decimal(76, 53), `p_time` Date) +ENGINE = MergeTree PARTITION BY toYYYYMM(p_time) +ORDER BY order_0; + +INSERT INTO t_reverse_order_virt_col SELECT number, '1984-01-01' FROM numbers(1000000); +SELECT DISTINCT _part FROM (SELECT _part FROM t_reverse_order_virt_col ORDER BY order_0 DESC); + +DROP TABLE IF EXISTS t_reverse_order_virt_col; diff --git a/parser/testdata/02884_async_insert_skip_settings/ast.json b/parser/testdata/02884_async_insert_skip_settings/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02884_async_insert_skip_settings/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02884_async_insert_skip_settings/metadata.json b/parser/testdata/02884_async_insert_skip_settings/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02884_async_insert_skip_settings/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02884_async_insert_skip_settings/query.sql b/parser/testdata/02884_async_insert_skip_settings/query.sql new file mode 100644 index 000000000..f3fe3b8b2 --- /dev/null +++ b/parser/testdata/02884_async_insert_skip_settings/query.sql @@ -0,0 +1,47 @@ +-- Tags: no-parallel + +DROP TABLE IF EXISTS t_async_insert_skip_settings SYNC; + +CREATE TABLE t_async_insert_skip_settings (id UInt64) +ENGINE = ReplicatedMergeTree('/clickhouse/{database}/tables/t_async_insert_skip_settings', '1') +ORDER BY id; + +SET async_insert = 1; +SET async_insert_deduplicate = 1; +SET wait_for_async_insert = 0; +-- Disable adaptive timeout to prevent immediate push of the first message (if the queue last push was old) +SET async_insert_use_adaptive_busy_timeout=0; +SET async_insert_busy_timeout_max_ms = 1000000; + +SET insert_deduplication_token = '1'; +SET log_comment = 'async_insert_skip_settings_1'; +INSERT INTO t_async_insert_skip_settings VALUES (1); + +SET insert_deduplication_token = '2'; +SET log_comment = 'async_insert_skip_settings_2'; +INSERT INTO t_async_insert_skip_settings VALUES (1); + +SET insert_deduplication_token = '1'; +SET log_comment = 'async_insert_skip_settings_3'; +INSERT INTO t_async_insert_skip_settings VALUES (2); + +SET insert_deduplication_token = '3'; +SET log_comment = 'async_insert_skip_settings_4'; +INSERT INTO t_async_insert_skip_settings VALUES (2); + +SYSTEM FLUSH LOGS asynchronous_insert_log; + +SELECT 'pending to flush', length(entries.bytes) FROM system.asynchronous_inserts +WHERE database = currentDatabase() AND table = 't_async_insert_skip_settings' +ORDER BY first_update; + +SYSTEM FLUSH ASYNC INSERT QUEUE; + +SELECT * FROM t_async_insert_skip_settings ORDER BY id; + +SYSTEM FLUSH LOGS asynchronous_insert_log; + +SELECT 'flush queries', uniqExact(flush_query_id) FROM system.asynchronous_insert_log +WHERE database = currentDatabase() AND table = 't_async_insert_skip_settings'; + +DROP TABLE t_async_insert_skip_settings SYNC; diff --git a/parser/testdata/02884_duplicate_index_name/ast.json b/parser/testdata/02884_duplicate_index_name/ast.json new file mode 100644 index 000000000..838440b8d --- /dev/null +++ b/parser/testdata/02884_duplicate_index_name/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_dup_index (children 1)" + }, + { + "explain": " Identifier test_dup_index" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001231199, + "rows_read": 2, + "bytes_read": 80 + } +} diff --git a/parser/testdata/02884_duplicate_index_name/metadata.json b/parser/testdata/02884_duplicate_index_name/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02884_duplicate_index_name/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02884_duplicate_index_name/query.sql b/parser/testdata/02884_duplicate_index_name/query.sql new file mode 100644 index 000000000..4cd9ae6d2 --- /dev/null +++ b/parser/testdata/02884_duplicate_index_name/query.sql @@ -0,0 +1,10 @@ +DROP TABLE IF EXISTS test_dup_index; + +CREATE TABLE test_dup_index +( + a Int64, + b Int64, + INDEX idx_a a TYPE minmax, + INDEX idx_a b TYPE minmax +) Engine = MergeTree() +ORDER BY a; -- { serverError ILLEGAL_INDEX } diff --git a/parser/testdata/02884_interval_operator_support_plural_literal/ast.json b/parser/testdata/02884_interval_operator_support_plural_literal/ast.json new file mode 100644 index 000000000..d756eb672 --- /dev/null +++ b/parser/testdata/02884_interval_operator_support_plural_literal/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toIntervalYear (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.0012007, + "rows_read": 7, + "bytes_read": 267 + } +} diff --git a/parser/testdata/02884_interval_operator_support_plural_literal/metadata.json b/parser/testdata/02884_interval_operator_support_plural_literal/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02884_interval_operator_support_plural_literal/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02884_interval_operator_support_plural_literal/query.sql b/parser/testdata/02884_interval_operator_support_plural_literal/query.sql new file mode 100644 index 000000000..41403cdf7 --- /dev/null +++ b/parser/testdata/02884_interval_operator_support_plural_literal/query.sql @@ -0,0 +1,23 @@ +SELECT INTERVAL 2 year; +SELECT INTERVAL 2 years; +SELECT INTERVAL '2 years'; +SELECT INTERVAL 2 month; +SELECT INTERVAL 2 months; +SELECT INTERVAL '2 months'; +SELECT INTERVAL 2 week; +SELECT INTERVAL 2 weeks; +SELECT INTERVAL '2 weeks'; +SELECT INTERVAL 2 day; +SELECT INTERVAL 2 days; +SELECT INTERVAL '2 days'; +SELECT INTERVAL 2 hour; +SELECT INTERVAL 2 hours; +SELECT INTERVAL '2 hours'; +SELECT INTERVAL 2 minute; +SELECT INTERVAL 2 minutes; +SELECT INTERVAL '2 minutes'; +SELECT DATE_ADD(hour, 2, toDateTime(1234567890, 'UTC')); +SELECT DATE_ADD(hours, 2, toDateTime(1234567890, 'UTC')); +SELECT DATE_ADD(toDateTime(1234567890, 'UTC'), INTERVAL 2 day); +SELECT DATE_ADD(toDateTime(1234567890, 'UTC'), INTERVAL 2 days); +SELECT DATE_ADD(toDateTime(1234567890, 'UTC'), INTERVAL '2 days'); diff --git a/parser/testdata/02884_parallel_window_functions/ast.json b/parser/testdata/02884_parallel_window_functions/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02884_parallel_window_functions/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02884_parallel_window_functions/metadata.json b/parser/testdata/02884_parallel_window_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02884_parallel_window_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02884_parallel_window_functions/query.sql b/parser/testdata/02884_parallel_window_functions/query.sql new file mode 100644 index 000000000..2207c90a4 --- /dev/null +++ b/parser/testdata/02884_parallel_window_functions/query.sql @@ -0,0 +1,123 @@ +-- Tags: long, no-tsan, no-asan, no-ubsan, no-msan, no-debug + +CREATE TABLE window_function_threading +Engine = MergeTree +ORDER BY (ac, nw) +AS SELECT + toUInt64(toFloat32(number % 2) % 20000000) as ac, + toFloat32(1) as wg, + toUInt16(toFloat32(number % 3) % 400) as nw +FROM numbers_mt(10000000); + +SELECT count() FROM (EXPLAIN PIPELINE SELECT + nw, + sum(WR) AS R, + sumIf(WR, uniq_rows = 1) AS UNR +FROM +( + SELECT + uniq(nw) OVER (PARTITION BY ac) AS uniq_rows, + AVG(wg) AS WR, + ac, + nw + FROM window_function_threading + GROUP BY ac, nw +) +GROUP BY nw +ORDER BY nw ASC, R DESC +LIMIT 10) where explain ilike '%ScatterByPartitionTransform%' SETTINGS max_threads = 4; + +-- { echoOn } + +SELECT + nw, + sum(WR) AS R, + sumIf(WR, uniq_rows = 1) AS UNR +FROM +( + SELECT + uniq(nw) OVER (PARTITION BY ac) AS uniq_rows, + AVG(wg) AS WR, + ac, + nw + FROM window_function_threading + GROUP BY ac, nw +) +GROUP BY nw +ORDER BY nw ASC, R DESC +LIMIT 10; + +SELECT + nw, + sum(WR) AS R, + sumIf(WR, uniq_rows = 1) AS UNR +FROM +( + SELECT + uniq(nw) OVER (PARTITION BY ac) AS uniq_rows, + AVG(wg) AS WR, + ac, + nw + FROM window_function_threading + GROUP BY ac, nw +) +GROUP BY nw +ORDER BY nw ASC, R DESC +LIMIT 10 +SETTINGS max_threads = 1; + +SET max_rows_to_read = 40000000; + +SELECT + nw, + sum(WR) AS R, + sumIf(WR, uniq_rows = 1) AS UNR +FROM +( + SELECT + uniq(nw) OVER (PARTITION BY ac) AS uniq_rows, + AVG(wg) AS WR, + ac, + nw + FROM window_function_threading + WHERE (ac % 4) = 0 + GROUP BY + ac, + nw + UNION ALL + SELECT + uniq(nw) OVER (PARTITION BY ac) AS uniq_rows, + AVG(wg) AS WR, + ac, + nw + FROM window_function_threading + WHERE (ac % 4) = 1 + GROUP BY + ac, + nw + UNION ALL + SELECT + uniq(nw) OVER (PARTITION BY ac) AS uniq_rows, + AVG(wg) AS WR, + ac, + nw + FROM window_function_threading + WHERE (ac % 4) = 2 + GROUP BY + ac, + nw + UNION ALL + SELECT + uniq(nw) OVER (PARTITION BY ac) AS uniq_rows, + AVG(wg) AS WR, + ac, + nw + FROM window_function_threading + WHERE (ac % 4) = 3 + GROUP BY + ac, + nw +) +GROUP BY nw +ORDER BY nw ASC, R DESC +LIMIT 10; diff --git a/parser/testdata/02884_parallel_window_functions_bug/ast.json b/parser/testdata/02884_parallel_window_functions_bug/ast.json new file mode 100644 index 000000000..717cddcb5 --- /dev/null +++ b/parser/testdata/02884_parallel_window_functions_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery posts (children 1)" + }, + { + "explain": " Identifier posts" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001382733, + "rows_read": 2, + "bytes_read": 63 + } +} diff --git a/parser/testdata/02884_parallel_window_functions_bug/metadata.json b/parser/testdata/02884_parallel_window_functions_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02884_parallel_window_functions_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02884_parallel_window_functions_bug/query.sql b/parser/testdata/02884_parallel_window_functions_bug/query.sql new file mode 100644 index 000000000..84bc69e23 --- /dev/null +++ b/parser/testdata/02884_parallel_window_functions_bug/query.sql @@ -0,0 +1,84 @@ +CREATE TABLE IF NOT EXISTS posts +( + `page_id` LowCardinality(String), + `post_id` String CODEC(LZ4), + `host_id` UInt32 CODEC(T64, LZ4), + `path_id` UInt32, + `created` DateTime CODEC(T64, LZ4), + `as_of` DateTime CODEC(T64, LZ4) +) +ENGINE = ReplacingMergeTree(as_of) +PARTITION BY toStartOfMonth(created) +ORDER BY (page_id, post_id); + +CREATE TABLE IF NOT EXISTS post_metrics +( + `page_id` LowCardinality(String), + `post_id` String CODEC(LZ4), + `created` DateTime CODEC(T64, LZ4), + `impressions` UInt32 CODEC(T64, LZ4), + `clicks` UInt32 CODEC(T64, LZ4), + `as_of` DateTime CODEC(T64, LZ4) +) +ENGINE = ReplacingMergeTree(as_of) +PARTITION BY toStartOfMonth(created) +ORDER BY (page_id, post_id); + +INSERT INTO posts SELECT + repeat('a', (number % 10) + 1), + toString(number), + number % 10, + number, + now() - toIntervalMinute(number), + now() +FROM numbers(100000); + +INSERT INTO post_metrics SELECT + repeat('a', (number % 10) + 1), + toString(number), + now() - toIntervalMinute(number), + number * 100, + number * 10, + now() +FROM numbers(100000); + +SELECT + host_id, + path_id, + max(rank) AS rank +FROM +( + WITH + as_of_posts AS + ( + SELECT + *, + row_number() OVER (PARTITION BY (page_id, post_id) ORDER BY as_of DESC) AS row_num + FROM posts + WHERE (created >= subtractHours(now(), 24)) AND (host_id > 0) + ), + as_of_post_metrics AS + ( + SELECT + *, + row_number() OVER (PARTITION BY (page_id, post_id) ORDER BY as_of DESC) AS row_num + FROM post_metrics + WHERE created >= subtractHours(now(), 24) + ) + SELECT + page_id, + post_id, + host_id, + path_id, + impressions, + clicks, + ntile(20) OVER (PARTITION BY page_id ORDER BY clicks ASC ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS rank + FROM as_of_posts + GLOBAL LEFT JOIN as_of_post_metrics USING (page_id, post_id, row_num) + WHERE (row_num = 1) AND (impressions > 0) +) AS t +WHERE t.rank > 18 +GROUP BY + host_id, + path_id +FORMAT Null; diff --git a/parser/testdata/02884_string_distance_function/ast.json b/parser/testdata/02884_string_distance_function/ast.json new file mode 100644 index 000000000..66e8682d2 --- /dev/null +++ b/parser/testdata/02884_string_distance_function/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '-- const arguments'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001339342, + "rows_read": 5, + "bytes_read": 189 + } +} diff --git a/parser/testdata/02884_string_distance_function/metadata.json b/parser/testdata/02884_string_distance_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02884_string_distance_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02884_string_distance_function/query.sql b/parser/testdata/02884_string_distance_function/query.sql new file mode 100644 index 000000000..482996e14 --- /dev/null +++ b/parser/testdata/02884_string_distance_function/query.sql @@ -0,0 +1,56 @@ +SELECT '-- const arguments'; +-- just to see it works +SELECT 'clickhouse' AS s1, 'mouse' AS s2, byteHammingDistance(s1, s2); +SELECT 'clickhouse' AS s1, 'mouse' AS s2, editDistance(s1, s2); +SELECT 'clickhouse' AS s1, 'mouse' AS s2, damerauLevenshteinDistance(s1, s2); +SELECT 'clickhouse' AS s1, 'mouse' AS s2, stringJaccardIndex(s1, s2); +SELECT 'clickhouse' AS s1, 'mouse' AS s2, stringJaccardIndexUTF8(s1, s2); +SELECT 'clickhouse' AS s1, 'mouse' AS s2, jaroSimilarity(s1, s2); +SELECT 'clickhouse' AS s1, 'mouse' AS s2, jaroWinklerSimilarity(s1, s2); + +SELECT '-- test aliases'; +SELECT 'clickhouse' AS s1, 'mouse' AS s2, mismatches(s1, s2); +SELECT 'clickhouse' AS s1, 'mouse' AS s2, levenshteinDistance(s1, s2); + +SELECT '-- Deny DoS using too large inputs'; +SELECT editDistance(randomString(power(2, 17)), 'abc'); -- { serverError TOO_LARGE_STRING_SIZE} +SELECT damerauLevenshteinDistance(randomString(power(2, 17)), 'abc'); -- { serverError TOO_LARGE_STRING_SIZE} +SELECT jaroSimilarity(randomString(power(2, 17)), 'abc'); -- { serverError TOO_LARGE_STRING_SIZE} +SELECT jaroWinklerSimilarity(randomString(power(2, 17)), 'abc'); -- { serverError TOO_LARGE_STRING_SIZE} + +DROP TABLE IF EXISTS t; +CREATE TABLE t +( + s1 String, + s2 String +) ENGINE = MergeTree ORDER BY s1; + +-- actual test cases +INSERT INTO t VALUES ('', '') ('abc', '') ('', 'abc') ('abc', 'abc') ('abc', 'ab') ('abc', 'bc') ('clickhouse', 'mouse') ('我是谁', 'Tom') ('Jerry', '我是谁') ('我是谁', '我是我'); + +SELECT '-- non-const arguments'; +SELECT 'byteHammingDistance', s1, s2, byteHammingDistance(s1, s2) FROM t ORDER BY ALL; +SELECT 'editDistance', s1, s2, editDistance(s1, s2) FROM t ORDER BY ALL; +SELECT 'editDistanceUTF8', s1, s2, editDistanceUTF8(s1, s2) FROM t ORDER BY ALL; +SELECT 'damerauLevenshteinDistance', s1, s2, damerauLevenshteinDistance(s1, s2) FROM t ORDER BY ALL; +SELECT 'stringJaccardIndex', s1, s2, stringJaccardIndex(s1, s2) FROM t ORDER BY ALL; +SELECT 'stringJaccardIndexUTF8', s1, s2, stringJaccardIndexUTF8(s1, s2) FROM t ORDER BY ALL; +SELECT 'jaroSimilarity', s1, s2, jaroSimilarity(s1, s2) FROM t ORDER BY ALL; +SELECT 'jaroWinklerSimilarity', s1, s2, jaroWinklerSimilarity(s1, s2) FROM t ORDER BY ALL; + +SELECT '-- Special UTF-8 tests'; +-- We do not perform full UTF8 validation, so sometimes it just returns some result +SELECT stringJaccardIndexUTF8(materialize('hello'), materialize('\x48\x65\x6C')); +SELECT stringJaccardIndexUTF8(materialize('hello'), materialize('\xFF\xFF\xFF\xFF')); +SELECT stringJaccardIndexUTF8(materialize('hello'), materialize('\x41\xE2\x82\xAC')); +SELECT stringJaccardIndexUTF8(materialize('hello'), materialize('\xF0\x9F\x99\x82')); +SELECT stringJaccardIndexUTF8(materialize('hello'), materialize('\xFF')); +SELECT stringJaccardIndexUTF8(materialize('hello'), materialize('\xC2\x01')); -- { serverError BAD_ARGUMENTS } +SELECT stringJaccardIndexUTF8(materialize('hello'), materialize('\xC1\x81')); -- { serverError BAD_ARGUMENTS } +SELECT stringJaccardIndexUTF8(materialize('hello'), materialize('\xF0\x80\x80\x41')); -- { serverError BAD_ARGUMENTS } +SELECT stringJaccardIndexUTF8(materialize('hello'), materialize('\xC0\x80')); -- { serverError BAD_ARGUMENTS } +SELECT stringJaccardIndexUTF8(materialize('hello'), materialize('\xD8\x00 ')); -- { serverError BAD_ARGUMENTS } +SELECT stringJaccardIndexUTF8(materialize('hello'), materialize('\xDC\x00')); -- { serverError BAD_ARGUMENTS } +SELECT stringJaccardIndexUTF8('😃🌍', '🙃😃🌑'), stringJaccardIndex('😃🌍', '🙃😃🌑'); + +DROP TABLE t; diff --git a/parser/testdata/02884_virtual_column_order_by/ast.json b/parser/testdata/02884_virtual_column_order_by/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02884_virtual_column_order_by/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02884_virtual_column_order_by/metadata.json b/parser/testdata/02884_virtual_column_order_by/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02884_virtual_column_order_by/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02884_virtual_column_order_by/query.sql b/parser/testdata/02884_virtual_column_order_by/query.sql new file mode 100644 index 000000000..3c73f8481 --- /dev/null +++ b/parser/testdata/02884_virtual_column_order_by/query.sql @@ -0,0 +1,4 @@ +-- Tags: no-fasttest +insert into function file('02884_1.csv') select 1 as x settings engine_file_truncate_on_insert=1; +insert into function file('02884_2.csv') select 2 as x settings engine_file_truncate_on_insert=1; +select _file, * from file('02884_{1,2}.csv') order by _file settings max_threads=1; diff --git a/parser/testdata/02885_arg_min_max_combinator/ast.json b/parser/testdata/02885_arg_min_max_combinator/ast.json new file mode 100644 index 000000000..4d738651d --- /dev/null +++ b/parser/testdata/02885_arg_min_max_combinator/ast.json @@ -0,0 +1,94 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function sumArgMin (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_20" + }, + { + "explain": " Function sumArgMax (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_20" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_100" + } + ], + + "rows": 24, + + "statistics": + { + "elapsed": 0.001567353, + "rows_read": 24, + "bytes_read": 931 + } +} diff --git a/parser/testdata/02885_arg_min_max_combinator/metadata.json b/parser/testdata/02885_arg_min_max_combinator/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02885_arg_min_max_combinator/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02885_arg_min_max_combinator/query.sql b/parser/testdata/02885_arg_min_max_combinator/query.sql new file mode 100644 index 000000000..8502234ac --- /dev/null +++ b/parser/testdata/02885_arg_min_max_combinator/query.sql @@ -0,0 +1,10 @@ +select sumArgMin(number, number % 20), sumArgMax(number, number % 20) from numbers(100); +select sumArgMin(number, toString(number % 20)), sumArgMax(number, toString(number % 20)) from numbers(100); +select sumArgMinIf(number, number % 20, number % 2 = 0), sumArgMaxIf(number, number % 20, number % 2 = 0) from numbers(100); +select sumArgMin() from numbers(100); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +select sumArgMin(number) from numbers(100); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +-- Try passing a non comparable type, for example an AggregationState +select sumArgMin(number, unhex('0000000000000000')::AggregateFunction(sum, UInt64)) from numbers(100); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} + +-- ASAN (data leak) +SELECT sumArgMax(number, tuple(number, repeat('a', (10 * (number % 100))::Int32))) FROM numbers(1000); diff --git a/parser/testdata/02885_create_distributed_table_without_as/ast.json b/parser/testdata/02885_create_distributed_table_without_as/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02885_create_distributed_table_without_as/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02885_create_distributed_table_without_as/metadata.json b/parser/testdata/02885_create_distributed_table_without_as/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02885_create_distributed_table_without_as/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02885_create_distributed_table_without_as/query.sql b/parser/testdata/02885_create_distributed_table_without_as/query.sql new file mode 100644 index 000000000..879742683 --- /dev/null +++ b/parser/testdata/02885_create_distributed_table_without_as/query.sql @@ -0,0 +1,16 @@ +-- Here a Distributed table without AS must detect its structure. + +DROP TABLE IF EXISTS dist_tbl; +DROP TABLE IF EXISTS local_tbl; + +CREATE TABLE local_tbl (`key` UInt32, `value` UInt32 DEFAULT 42) ENGINE = MergeTree ORDER BY key; +CREATE TABLE dist_tbl ENGINE = Distributed('test_shard_localhost', currentDatabase(), 'local_tbl', rand()); +SHOW CREATE TABLE dist_tbl; +INSERT INTO dist_tbl (key) SETTINGS distributed_foreground_insert=1 VALUES (99); +SELECT 'local_tbl'; +SELECT * FROM local_tbl; +SELECT 'dist_tbl'; +SELECT * FROM dist_tbl; + +DROP TABLE dist_tbl; +DROP TABLE local_tbl; diff --git a/parser/testdata/02886_binary_like/ast.json b/parser/testdata/02886_binary_like/ast.json new file mode 100644 index 000000000..6aaa1cd87 --- /dev/null +++ b/parser/testdata/02886_binary_like/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function like (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'aяb'" + }, + { + "explain": " Literal 'a_b'" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001254431, + "rows_read": 8, + "bytes_read": 282 + } +} diff --git a/parser/testdata/02886_binary_like/metadata.json b/parser/testdata/02886_binary_like/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02886_binary_like/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02886_binary_like/query.sql b/parser/testdata/02886_binary_like/query.sql new file mode 100644 index 000000000..ba11f1fc0 --- /dev/null +++ b/parser/testdata/02886_binary_like/query.sql @@ -0,0 +1,26 @@ +SELECT 'aяb' LIKE 'a_b'; +SELECT 'a\0b' LIKE 'a_b'; +SELECT 'a\0b' LIKE 'a\0b'; +SELECT 'a\0b' LIKE 'a%\0b'; +SELECT 'a\xFFb' LIKE 'a%\xFFb'; +SELECT 'a\xFFb' LIKE 'a%\xFF\xFEb'; +SELECT 'a\xFFb' LIKE '%a\xFF\xFEb'; +SELECT 'a\xFF\xFEb' LIKE '%a\xFF\xFEb'; + +SELECT materialize('aяb') LIKE 'a_b'; +SELECT materialize('a\0b') LIKE 'a_b'; +SELECT materialize('a\0b') LIKE 'a\0b'; +SELECT materialize('a\0b') LIKE 'a%\0b'; +SELECT materialize('a\xFFb') LIKE 'a%\xFFb'; +SELECT materialize('a\xFFb') LIKE 'a%\xFF\xFEb'; +SELECT materialize('a\xFFb') LIKE '%a\xFF\xFEb'; +SELECT materialize('a\xFF\xFEb') LIKE '%a\xFF\xFEb'; + +SELECT materialize('aяb') LIKE materialize('a_b'); +SELECT materialize('a\0b') LIKE materialize('a_b'); +SELECT materialize('a\0b') LIKE materialize('a\0b'); +SELECT materialize('a\0b') LIKE materialize('a%\0b'); +SELECT materialize('a\xFFb') LIKE materialize('a%\xFFb'); +SELECT materialize('a\xFFb') LIKE materialize('a%\xFF\xFEb'); +SELECT materialize('a\xFFb') LIKE materialize('%a\xFF\xFEb'); +SELECT materialize('a\xFF\xFEb') LIKE materialize('%a\xFF\xFEb'); diff --git a/parser/testdata/02887_byteswap/ast.json b/parser/testdata/02887_byteswap/ast.json new file mode 100644 index 000000000..e99a01932 --- /dev/null +++ b/parser/testdata/02887_byteswap/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function byteSwap (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '0'" + }, + { + "explain": " Literal 'UInt8'" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001275983, + "rows_read": 10, + "bytes_read": 371 + } +} diff --git a/parser/testdata/02887_byteswap/metadata.json b/parser/testdata/02887_byteswap/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02887_byteswap/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02887_byteswap/query.sql b/parser/testdata/02887_byteswap/query.sql new file mode 100644 index 000000000..e428be828 --- /dev/null +++ b/parser/testdata/02887_byteswap/query.sql @@ -0,0 +1,61 @@ +SELECT byteSwap(0::UInt8); +SELECT byteSwap(1::UInt8); +SELECT byteSwap(255::UInt8); + +SELECT byteSwap(256::UInt16); +SELECT byteSwap(4135::UInt16); +SELECT byteSwap(10000::UInt16); +SELECT byteSwap(65535::UInt16); + +SELECT byteSwap(65536::UInt32); +SELECT byteSwap(3351772109::UInt32); +SELECT byteSwap(3455829959::UInt32); +SELECT byteSwap(4294967295::UInt32); + +SELECT byteSwap(4294967296::UInt64); +SELECT byteSwap(123294967295::UInt64); +SELECT byteSwap(18439412204227788800::UInt64); +SELECT byteSwap(18446744073709551615::UInt64); + +SELECT byteSwap(-0::Int8); +SELECT byteSwap(-1::Int8); +SELECT byteSwap(-128::Int8); + +SELECT byteSwap(-129::Int16); +SELECT byteSwap(-4135::Int16); +SELECT byteSwap(-32768::Int16); + +SELECT byteSwap(-32769::Int32); +SELECT byteSwap(-3351772109::Int32); +SELECT byteSwap(-2147483648::Int32); + +SELECT byteSwap(-2147483649::Int64); +SELECT byteSwap(-1242525266376::Int64); +SELECT byteSwap(-9223372036854775808::Int64); + +SELECT byteSwap(18446744073709551616::UInt128); +SELECT byteSwap(-9223372036854775809::Int128); + +SELECT byteSwap(340282366920938463463374607431768211456::UInt256); +SELECT byteSwap(-170141183460469231731687303715884105729::Int256); + +-- Booleans are interpreted as UInt8 +SELECT byteSwap(false); +SELECT byteSwap(true); + +-- Number of arguments should equal 1 +SELECT byteSwap(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT byteSwap(128, 129); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +-- Input should be integral +SELECT byteSwap('abc'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT byteSwap(toFixedString('abc', 3)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT byteSwap(toDate('2019-01-01')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT byteSwap(toDate32('2019-01-01')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT byteSwap(toDateTime32(1546300800)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT byteSwap(toDateTime64(1546300800, 3)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT byteSwap(generateUUIDv4()); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT byteSwap(toDecimal32(2, 4)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT byteSwap(toFloat32(123.456)); -- { serverError NOT_IMPLEMENTED } +SELECT byteSwap(toFloat64(123.456)); -- { serverError NOT_IMPLEMENTED } + diff --git a/parser/testdata/02887_format_readable_timedelta_subseconds/ast.json b/parser/testdata/02887_format_readable_timedelta_subseconds/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02887_format_readable_timedelta_subseconds/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02887_format_readable_timedelta_subseconds/metadata.json b/parser/testdata/02887_format_readable_timedelta_subseconds/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02887_format_readable_timedelta_subseconds/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02887_format_readable_timedelta_subseconds/query.sql b/parser/testdata/02887_format_readable_timedelta_subseconds/query.sql new file mode 100644 index 000000000..0a8a76d54 --- /dev/null +++ b/parser/testdata/02887_format_readable_timedelta_subseconds/query.sql @@ -0,0 +1,30 @@ +-- max_unit bigger than second, min_unit omitted (and considered 'seconds') +WITH + 'hours' AS maximum_unit, + arrayJoin([1.12, 60.2, 123.33, 24.45, 35.57, 66.64, 67.79, 48.88, 99.96, 3600]) AS elapsed +SELECT + formatReadableTimeDelta(elapsed, maximum_unit) AS time_delta; + +-- max_unit smaller than second, min_unit omitted (and considered 'nanoseconds') +WITH + 'milliseconds' AS maximum_unit, + arrayJoin([1.12, 60.2, 123.33, 24.45, 35.57, 66.64, 67.79797979, 48.888888, 99.96, 3600]) AS elapsed +SELECT + formatReadableTimeDelta(elapsed, maximum_unit) AS time_delta; + +-- Check exception is thrown +SELECT formatReadableTimeDelta(1.1, 'seconds', 'hours'); -- { serverError BAD_ARGUMENTS } + +-- Check empty units are omitted unless they are the only one +WITH + 'hours' AS maximum_unit, + 'microseconds' as minimum_unit, + arrayJoin([0, 3601.000000003]) AS elapsed +SELECT + formatReadableTimeDelta(elapsed, maximum_unit, minimum_unit); + +WITH + 'milliseconds' AS maximum_unit, + arrayJoin([0, 1.0005]) AS elapsed +SELECT + formatReadableTimeDelta(elapsed, maximum_unit); diff --git a/parser/testdata/02887_insert_quorum_wo_keeper_retries/ast.json b/parser/testdata/02887_insert_quorum_wo_keeper_retries/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02887_insert_quorum_wo_keeper_retries/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02887_insert_quorum_wo_keeper_retries/metadata.json b/parser/testdata/02887_insert_quorum_wo_keeper_retries/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02887_insert_quorum_wo_keeper_retries/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02887_insert_quorum_wo_keeper_retries/query.sql b/parser/testdata/02887_insert_quorum_wo_keeper_retries/query.sql new file mode 100644 index 000000000..eb7f8798a --- /dev/null +++ b/parser/testdata/02887_insert_quorum_wo_keeper_retries/query.sql @@ -0,0 +1,23 @@ +-- Tags: zookeeper, no-parallel, no-shared-merge-tree +# no-shared-merge-tree: quorum logic is specifit to replicated tables + +DROP TABLE IF EXISTS quorum1; +DROP TABLE IF EXISTS quorum2; + +CREATE TABLE quorum1(x UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_02887/quorum', '1') ORDER BY x; +CREATE TABLE quorum2(x UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_02887/quorum', '2') ORDER BY x; + +SET insert_keeper_fault_injection_probability=0; +SET insert_keeper_max_retries = 0; +SET insert_quorum = 2; + +system enable failpoint replicated_merge_tree_insert_quorum_fail_0; + +INSERT INTO quorum1 VALUES (1), (2), (3), (4), (5); -- {serverError UNKNOWN_STATUS_OF_INSERT} + +INSERT INTO quorum1 VALUES (6), (7), (8), (9), (10); + +SELECT count() FROM quorum1; + +DROP TABLE quorum1 NO DELAY; +DROP TABLE quorum2 NO DELAY; diff --git a/parser/testdata/02887_tuple_element_distributed/ast.json b/parser/testdata/02887_tuple_element_distributed/ast.json new file mode 100644 index 000000000..43c821eb7 --- /dev/null +++ b/parser/testdata/02887_tuple_element_distributed/ast.json @@ -0,0 +1,91 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tupleElement (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'a'" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal 'a'" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function remote (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '127.0.0.{1,2,3}'" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2" + } + ], + + "rows": 23, + + "statistics": + { + "elapsed": 0.001153821, + "rows_read": 23, + "bytes_read": 920 + } +} diff --git a/parser/testdata/02887_tuple_element_distributed/metadata.json b/parser/testdata/02887_tuple_element_distributed/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02887_tuple_element_distributed/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02887_tuple_element_distributed/query.sql b/parser/testdata/02887_tuple_element_distributed/query.sql new file mode 100644 index 000000000..b9c5c856d --- /dev/null +++ b/parser/testdata/02887_tuple_element_distributed/query.sql @@ -0,0 +1 @@ +SELECT equals(tupleElement(tuple('a', 10) AS x, 1), 'a') FROM remote('127.0.0.{1,2,3}', numbers(2)); diff --git a/parser/testdata/02888_attach_partition_from_different_tables/ast.json b/parser/testdata/02888_attach_partition_from_different_tables/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02888_attach_partition_from_different_tables/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02888_attach_partition_from_different_tables/metadata.json b/parser/testdata/02888_attach_partition_from_different_tables/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02888_attach_partition_from_different_tables/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02888_attach_partition_from_different_tables/query.sql b/parser/testdata/02888_attach_partition_from_different_tables/query.sql new file mode 100644 index 000000000..ae930408b --- /dev/null +++ b/parser/testdata/02888_attach_partition_from_different_tables/query.sql @@ -0,0 +1,90 @@ +-- test different index type +CREATE TABLE attach_partition_t1 ( + a UInt32, + b String, + INDEX bf b TYPE tokenbf_v1(8192, 3, 0) GRANULARITY 1 +) +ENGINE = MergeTree +ORDER BY a; + +INSERT INTO attach_partition_t1 SELECT number, toString(number) FROM numbers(10); + +CREATE TABLE attach_partition_t2 ( + a UInt32, + b String, + INDEX bf b TYPE bloom_filter GRANULARITY 1 +) +ENGINE = MergeTree +ORDER BY a; + +ALTER TABLE attach_partition_t2 ATTACH PARTITION tuple() FROM attach_partition_t1; -- { serverError BAD_ARGUMENTS } + +-- test different projection name +CREATE TABLE attach_partition_t3 ( + a UInt32, + b String, + PROJECTION proj + ( + SELECT + b, + sum(a) + GROUP BY b + ) +) +ENGINE = MergeTree +ORDER BY a; + +INSERT INTO attach_partition_t3 SELECT number, toString(number) FROM numbers(10); + +CREATE TABLE attach_partition_t4 ( + a UInt32, + b String, + PROJECTION differently_named_proj + ( + SELECT + b, + sum(a) + GROUP BY b + ) +) +ENGINE = MergeTree +ORDER BY a; + +ALTER TABLE attach_partition_t4 ATTACH PARTITION tuple() FROM attach_partition_t3; -- { serverError BAD_ARGUMENTS } + +-- check attach with same index and projection +CREATE TABLE attach_partition_t5 ( + a UInt32, + b String, + PROJECTION proj + ( + SELECT + b, + sum(a) + GROUP BY b + ) +) +ENGINE = MergeTree +ORDER BY a; + +INSERT INTO attach_partition_t5 SELECT number, toString(number) FROM numbers(10); + + +CREATE TABLE attach_partition_t6 ( + a UInt32, + b String, + PROJECTION proj + ( + SELECT + b, + sum(a) + GROUP BY b + ) +) +ENGINE = MergeTree +ORDER BY a; + +ALTER TABLE attach_partition_t6 ATTACH PARTITION tuple() FROM attach_partition_t5; + +SELECT * FROM attach_partition_t6 WHERE b = '1'; +SELECT b, sum(a) FROM attach_partition_t6 GROUP BY b ORDER BY b; diff --git a/parser/testdata/02888_integer_type_inference_in_if_function/ast.json b/parser/testdata/02888_integer_type_inference_in_if_function/ast.json new file mode 100644 index 000000000..1ea608043 --- /dev/null +++ b/parser/testdata/02888_integer_type_inference_in_if_function/ast.json @@ -0,0 +1,76 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function if (alias res) (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_9223372036854775806" + }, + { + "explain": " Literal Int64_-9223372036854775808" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2" + } + ], + + "rows": 18, + + "statistics": + { + "elapsed": 0.001409538, + "rows_read": 18, + "bytes_read": 734 + } +} diff --git a/parser/testdata/02888_integer_type_inference_in_if_function/metadata.json b/parser/testdata/02888_integer_type_inference_in_if_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02888_integer_type_inference_in_if_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02888_integer_type_inference_in_if_function/query.sql b/parser/testdata/02888_integer_type_inference_in_if_function/query.sql new file mode 100644 index 000000000..065536e0e --- /dev/null +++ b/parser/testdata/02888_integer_type_inference_in_if_function/query.sql @@ -0,0 +1,13 @@ +SELECT if(number % 2, 9223372036854775806, -9223372036854775808) AS res FROM numbers(2); +SELECT if(number % 2, materialize(9223372036854775806), -9223372036854775808) AS res FROM numbers(2); +SELECT if(number % 2, 9223372036854775806, materialize(-9223372036854775808)) AS res FROM numbers(2); +SELECT if(number % 2, materialize(9223372036854775806), materialize(-9223372036854775808)) AS res FROM numbers(2); +SELECT if(number % 2, [9223372036854775806], [2, 65537, -9223372036854775808]) AS res FROM numbers(2); +SELECT if(number % 2, materialize([9223372036854775806]), [2, 65537, -9223372036854775808]) AS res FROM numbers(2); +SELECT if(number % 2, [9223372036854775806], materialize([2, 65537, -9223372036854775808])) AS res FROM numbers(2); +SELECT if(number % 2, materialize([9223372036854775806]), materialize([2, 65537, -9223372036854775808])) AS res FROM numbers(2); +SELECT if(number % 2, [[9223372036854775806]], [[2, 65537, -9223372036854775808]]) AS res FROM numbers(2); +SELECT if(number % 2, materialize([[9223372036854775806]]), [[2, 65537, -9223372036854775808]]) AS res FROM numbers(2); +SELECT if(number % 2, [[9223372036854775806]], materialize([[2, 65537, -9223372036854775808]])) AS res FROM numbers(2); +SELECT if(number % 2, materialize([[9223372036854775806]]), materialize([[2, 65537, -9223372036854775808]])) AS res FROM numbers(2); + diff --git a/parser/testdata/02888_obsolete_settings/ast.json b/parser/testdata/02888_obsolete_settings/ast.json new file mode 100644 index 000000000..dd92ec6de --- /dev/null +++ b/parser/testdata/02888_obsolete_settings/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '-- Obsolete server settings'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001189076, + "rows_read": 5, + "bytes_read": 198 + } +} diff --git a/parser/testdata/02888_obsolete_settings/metadata.json b/parser/testdata/02888_obsolete_settings/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02888_obsolete_settings/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02888_obsolete_settings/query.sql b/parser/testdata/02888_obsolete_settings/query.sql new file mode 100644 index 000000000..6e68bb1f8 --- /dev/null +++ b/parser/testdata/02888_obsolete_settings/query.sql @@ -0,0 +1,8 @@ +SELECT '-- Obsolete server settings'; +SELECT name FROM system.server_settings WHERE is_obsolete = 1 ORDER BY name; + +SELECT '-- Obsolete general settings'; +SELECT count() >= 10 FROM system.settings WHERE is_obsolete = 1; + +SELECT '-- Obsolete merge tree settings'; +SELECT count() >= 10 FROM system.merge_tree_settings WHERE is_obsolete = 1; diff --git a/parser/testdata/02888_single_state_nullable_type/ast.json b/parser/testdata/02888_single_state_nullable_type/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02888_single_state_nullable_type/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02888_single_state_nullable_type/metadata.json b/parser/testdata/02888_single_state_nullable_type/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02888_single_state_nullable_type/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02888_single_state_nullable_type/query.sql b/parser/testdata/02888_single_state_nullable_type/query.sql new file mode 100644 index 000000000..420090a0b --- /dev/null +++ b/parser/testdata/02888_single_state_nullable_type/query.sql @@ -0,0 +1,7 @@ +WITH minSimpleState(value) AS c +SELECT toTypeName(c), c +FROM ( + SELECT NULL as value + UNION ALL + SELECT 1 as value +); diff --git a/parser/testdata/02888_system_tables_with_inaccessible_table_function/ast.json b/parser/testdata/02888_system_tables_with_inaccessible_table_function/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02888_system_tables_with_inaccessible_table_function/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02888_system_tables_with_inaccessible_table_function/metadata.json b/parser/testdata/02888_system_tables_with_inaccessible_table_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02888_system_tables_with_inaccessible_table_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02888_system_tables_with_inaccessible_table_function/query.sql b/parser/testdata/02888_system_tables_with_inaccessible_table_function/query.sql new file mode 100644 index 000000000..1727ae3dc --- /dev/null +++ b/parser/testdata/02888_system_tables_with_inaccessible_table_function/query.sql @@ -0,0 +1,46 @@ +-- Tags: no-fasttest + +DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}; + +CREATE DATABASE {CLICKHOUSE_DATABASE:Identifier}; + + +CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.tablefunc01 (x int) AS postgresql('127.121.0.1:5432', 'postgres_db', 'postgres_table', 'postgres_user', '124444'); +CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.tablefunc02 (x int) AS mysql('127.123.0.1:3306', 'mysql_db', 'mysql_table', 'mysql_user','123123'); +CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.tablefunc03 (a int) AS sqlite('db_path', 'table_name'); +CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.tablefunc04 (a int) AS mongodb('127.0.0.1:27017','test', 'my_collection', 'test_user', 'password', 'a Int'); +CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.tablefunc05 (a int) AS redis('127.0.0.1:6379', 'key', 'key UInt32'); +CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.tablefunc06 (a int) AS s3('http://some_addr:9000/cloud-storage-01/data.tsv', 'M9O7o0SX5I4udXhWxI12', '9ijqzmVN83fzD9XDkEAAAAAAAA', 'TSV'); + + +CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.tablefunc01_without_schema AS postgresql('127.121.0.1:5432', 'postgres_db', 'postgres_table', 'postgres_user', '124444'); -- { serverError POSTGRESQL_CONNECTION_FAILURE } +CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.tablefunc02_without_schema AS mysql('127.123.0.1:3306', 'mysql_db', 'mysql_table', 'mysql_user','123123'); -- {serverError ALL_CONNECTION_TRIES_FAILED } + +SELECT name, engine, engine_full, create_table_query, data_paths, notEmpty([metadata_path]), notEmpty([uuid]) + FROM system.tables + WHERE name like '%tablefunc%' and database=currentDatabase() + ORDER BY name; + +DETACH TABLE {CLICKHOUSE_DATABASE:Identifier}.tablefunc01; +DETACH TABLE {CLICKHOUSE_DATABASE:Identifier}.tablefunc02; +DETACH TABLE {CLICKHOUSE_DATABASE:Identifier}.tablefunc03; +DETACH TABLE {CLICKHOUSE_DATABASE:Identifier}.tablefunc04; +DETACH TABLE {CLICKHOUSE_DATABASE:Identifier}.tablefunc05; +DETACH TABLE {CLICKHOUSE_DATABASE:Identifier}.tablefunc06; + +ATTACH TABLE {CLICKHOUSE_DATABASE:Identifier}.tablefunc01; +ATTACH TABLE {CLICKHOUSE_DATABASE:Identifier}.tablefunc02; +ATTACH TABLE {CLICKHOUSE_DATABASE:Identifier}.tablefunc03; +ATTACH TABLE {CLICKHOUSE_DATABASE:Identifier}.tablefunc04; +ATTACH TABLE {CLICKHOUSE_DATABASE:Identifier}.tablefunc05; +ATTACH TABLE {CLICKHOUSE_DATABASE:Identifier}.tablefunc06; + +SELECT name, engine, engine_full, create_table_query, data_paths, notEmpty([metadata_path]), notEmpty([uuid]) + FROM system.tables + WHERE name like '%tablefunc%' and database=currentDatabase() + ORDER BY name; + +SELECT count() FROM {CLICKHOUSE_DATABASE:Identifier}.tablefunc01; -- { serverError POSTGRESQL_CONNECTION_FAILURE } +SELECT engine FROM system.tables WHERE name = 'tablefunc01' and database=currentDatabase(); + +DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}; diff --git a/parser/testdata/02889_datetime64_from_string/ast.json b/parser/testdata/02889_datetime64_from_string/ast.json new file mode 100644 index 000000000..7b4918a29 --- /dev/null +++ b/parser/testdata/02889_datetime64_from_string/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDateTime64 (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal '-123'" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Literal 'UTC'" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001193343, + "rows_read": 9, + "bytes_read": 320 + } +} diff --git a/parser/testdata/02889_datetime64_from_string/metadata.json b/parser/testdata/02889_datetime64_from_string/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02889_datetime64_from_string/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02889_datetime64_from_string/query.sql b/parser/testdata/02889_datetime64_from_string/query.sql new file mode 100644 index 000000000..99ace8a6e --- /dev/null +++ b/parser/testdata/02889_datetime64_from_string/query.sql @@ -0,0 +1,10 @@ +SELECT toDateTime64('-123', 3, 'UTC'); -- Allowed: no year starts with '-' +SELECT toDateTime64('23.9', 3, 'UTC'); -- Allowed: no year has a dot in notation +SELECT toDateTime64('-23.9', 3, 'UTC'); -- Allowed + +SELECT toDateTime64OrNull('0', 3, 'UTC'); +SELECT cast('0' as Nullable(DateTime64(3, 'UTC'))); + +SELECT toDateTime64('1234', 3, 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +SELECT toDateTime64('0', 3, 'UTC'); -- { serverError CANNOT_PARSE_DATETIME } +SELECT cast('0' as DateTime64(3, 'UTC')); -- { serverError CANNOT_PARSE_DATETIME } diff --git a/parser/testdata/02889_parts_columns_filenames/ast.json b/parser/testdata/02889_parts_columns_filenames/ast.json new file mode 100644 index 000000000..93ae51639 --- /dev/null +++ b/parser/testdata/02889_parts_columns_filenames/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_parts_columns_filenames (children 1)" + }, + { + "explain": " Identifier t_parts_columns_filenames" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001228079, + "rows_read": 2, + "bytes_read": 102 + } +} diff --git a/parser/testdata/02889_parts_columns_filenames/metadata.json b/parser/testdata/02889_parts_columns_filenames/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02889_parts_columns_filenames/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02889_parts_columns_filenames/query.sql b/parser/testdata/02889_parts_columns_filenames/query.sql new file mode 100644 index 000000000..488f0def3 --- /dev/null +++ b/parser/testdata/02889_parts_columns_filenames/query.sql @@ -0,0 +1,20 @@ +DROP TABLE IF EXISTS t_parts_columns_filenames; + +CREATE TABLE t_parts_columns_filenames (id UInt64, v UInt64, long_v_name UInt64, long_arr_name Array(UInt64), arr_col Array(UInt64)) +ENGINE = MergeTree ORDER BY id +SETTINGS + min_bytes_for_wide_part = 0, + replace_long_file_name_to_hash = 1, + max_file_name_length = 8, + ratio_of_defaults_for_sparse_serialization = 0.9; + +INSERT INTO t_parts_columns_filenames SELECT number, 0, 0, range(number % 5), range(number % 5) FROM numbers(10); + +SELECT * FROM t_parts_columns_filenames ORDER BY id; + +SELECT name, column, type, serialization_kind, substreams, filenames +FROM system.parts_columns +WHERE database = currentDatabase() AND table = 't_parts_columns_filenames' +ORDER BY name, column; + +DROP TABLE IF EXISTS t_parts_columns_filenames; diff --git a/parser/testdata/02889_print_pretty_type_names/ast.json b/parser/testdata/02889_print_pretty_type_names/ast.json new file mode 100644 index 000000000..b137f46c5 --- /dev/null +++ b/parser/testdata/02889_print_pretty_type_names/ast.json @@ -0,0 +1,163 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery test (children 3)" + }, + { + "explain": " Identifier test" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration a (children 1)" + }, + { + "explain": " DataType Tuple (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " NameTypePair b (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " NameTypePair c (children 1)" + }, + { + "explain": " DataType Tuple (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " NameTypePair d (children 1)" + }, + { + "explain": " DataType Nullable (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " DataType UInt64" + }, + { + "explain": " NameTypePair e (children 1)" + }, + { + "explain": " DataType Array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " DataType UInt32" + }, + { + "explain": " NameTypePair f (children 1)" + }, + { + "explain": " DataType Array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " DataType Tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " NameTypePair g (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " NameTypePair h (children 1)" + }, + { + "explain": " DataType Map (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " DataType String" + }, + { + "explain": " DataType Array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " DataType Tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " NameTypePair i (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " NameTypePair j (children 1)" + }, + { + "explain": " DataType UInt64" + }, + { + "explain": " NameTypePair k (children 1)" + }, + { + "explain": " DataType Date" + }, + { + "explain": " NameTypePair l (children 1)" + }, + { + "explain": " DataType Nullable (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function Memory" + } + ], + + "rows": 47, + + "statistics": + { + "elapsed": 0.001195679, + "rows_read": 47, + "bytes_read": 2022 + } +} diff --git a/parser/testdata/02889_print_pretty_type_names/metadata.json b/parser/testdata/02889_print_pretty_type_names/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02889_print_pretty_type_names/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02889_print_pretty_type_names/query.sql b/parser/testdata/02889_print_pretty_type_names/query.sql new file mode 100644 index 000000000..f8a207d35 --- /dev/null +++ b/parser/testdata/02889_print_pretty_type_names/query.sql @@ -0,0 +1,5 @@ +create table test (a Tuple(b String, c Tuple(d Nullable(UInt64), e Array(UInt32), f Array(Tuple(g String, h Map(String, Array(Tuple(i String, j UInt64))))), k Date), l Nullable(String))) engine=Memory; +insert into test select * from generateRandom(42) limit 1; +set print_pretty_type_names=1; +desc test format TSVRaw; +select toTypeName(a) from test limit 1 format TSVRaw; diff --git a/parser/testdata/02889_system_drop_format_schema/ast.json b/parser/testdata/02889_system_drop_format_schema/ast.json new file mode 100644 index 000000000..436f9c749 --- /dev/null +++ b/parser/testdata/02889_system_drop_format_schema/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Explain EXPLAIN SYNTAX (children 1)" + }, + { + "explain": " SYSTEM query" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001094258, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/02889_system_drop_format_schema/metadata.json b/parser/testdata/02889_system_drop_format_schema/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02889_system_drop_format_schema/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02889_system_drop_format_schema/query.sql b/parser/testdata/02889_system_drop_format_schema/query.sql new file mode 100644 index 000000000..2f17ae3d2 --- /dev/null +++ b/parser/testdata/02889_system_drop_format_schema/query.sql @@ -0,0 +1,2 @@ +EXPLAIN SYNTAX SYSTEM DROP FORMAT SCHEMA CACHE; +EXPLAIN SYNTAX SYSTEM DROP FORMAT SCHEMA CACHE FOR Protobuf; diff --git a/parser/testdata/02890_describe_table_options/ast.json b/parser/testdata/02890_describe_table_options/ast.json new file mode 100644 index 000000000..4f22c702d --- /dev/null +++ b/parser/testdata/02890_describe_table_options/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_describe_options (children 1)" + }, + { + "explain": " Identifier t_describe_options" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001314488, + "rows_read": 2, + "bytes_read": 88 + } +} diff --git a/parser/testdata/02890_describe_table_options/metadata.json b/parser/testdata/02890_describe_table_options/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02890_describe_table_options/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02890_describe_table_options/query.sql b/parser/testdata/02890_describe_table_options/query.sql new file mode 100644 index 000000000..74ecfa9a3 --- /dev/null +++ b/parser/testdata/02890_describe_table_options/query.sql @@ -0,0 +1,56 @@ +DROP TABLE IF EXISTS t_describe_options; + +SET print_pretty_type_names = 0; + +CREATE TABLE t_describe_options ( + id UInt64 COMMENT 'index column', + arr Array(UInt64) DEFAULT [10, 20] CODEC(ZSTD), + t Tuple(a String, b UInt64) DEFAULT ('foo', 0) CODEC(ZSTD)) +ENGINE = MergeTree +ORDER BY id; + +-- { echoOn } + +SET describe_compact_output = 0, describe_include_virtual_columns = 0, describe_include_subcolumns = 0; + +DESCRIBE TABLE t_describe_options; +DESCRIBE remote(test_shard_localhost, currentDatabase(), t_describe_options); + +SET describe_compact_output = 0, describe_include_virtual_columns = 0, describe_include_subcolumns = 1; + +DESCRIBE TABLE t_describe_options; +DESCRIBE remote(test_shard_localhost, currentDatabase(), t_describe_options); + +SET describe_compact_output = 0, describe_include_virtual_columns = 1, describe_include_subcolumns = 0; + +DESCRIBE TABLE t_describe_options; +DESCRIBE remote(test_shard_localhost, currentDatabase(), t_describe_options); + +SET describe_compact_output = 0, describe_include_virtual_columns = 1, describe_include_subcolumns = 1; + +DESCRIBE TABLE t_describe_options; +DESCRIBE remote(test_shard_localhost, currentDatabase(), t_describe_options); + +SET describe_compact_output = 1, describe_include_virtual_columns = 0, describe_include_subcolumns = 0; + +DESCRIBE TABLE t_describe_options; +DESCRIBE remote(test_shard_localhost, currentDatabase(), t_describe_options); + +SET describe_compact_output = 1, describe_include_virtual_columns = 0, describe_include_subcolumns = 1; + +DESCRIBE TABLE t_describe_options; +DESCRIBE remote(test_shard_localhost, currentDatabase(), t_describe_options); + +SET describe_compact_output = 1, describe_include_virtual_columns = 1, describe_include_subcolumns = 0; + +DESCRIBE TABLE t_describe_options; +DESCRIBE remote(test_shard_localhost, currentDatabase(), t_describe_options); + +SET describe_compact_output = 1, describe_include_virtual_columns = 1, describe_include_subcolumns = 1; + +DESCRIBE TABLE t_describe_options; +DESCRIBE remote(test_shard_localhost, currentDatabase(), t_describe_options); + +-- { echoOff } + +DROP TABLE t_describe_options; diff --git a/parser/testdata/02890_named_tuple_functions/ast.json b/parser/testdata/02890_named_tuple_functions/ast.json new file mode 100644 index 000000000..df2296f04 --- /dev/null +++ b/parser/testdata/02890_named_tuple_functions/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001172361, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02890_named_tuple_functions/metadata.json b/parser/testdata/02890_named_tuple_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02890_named_tuple_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02890_named_tuple_functions/query.sql b/parser/testdata/02890_named_tuple_functions/query.sql new file mode 100644 index 000000000..672446256 --- /dev/null +++ b/parser/testdata/02890_named_tuple_functions/query.sql @@ -0,0 +1,34 @@ +set enable_named_columns_in_function_tuple = 1; +set enable_analyzer = 1; + +drop table if exists x; +create table x (i int, j int) engine MergeTree order by i; +insert into x values (1, 2); + +select toTypeName(tuple(i, j)) from x; +select tupleNames(tuple(i, j)) from x; + +select toTypeName(tuple(1, j)) from x; +select tupleNames(tuple(1, j)) from x; + +select toTypeName(tuple(1 as k, j)) from x; +select tupleNames(tuple(1 as k, j)) from x; + +select toTypeName(tuple(i, i, j, j)) from x; +select tupleNames(tuple(i, i, j, j)) from x; + +select tupleNames(1); -- { serverError 43 } + +drop table x; + +drop table if exists tbl; + +-- Make sure named tuple won't break Values insert +create table tbl (x Tuple(a Int32, b Int32, c Int32)) engine MergeTree order by (); +insert into tbl values (tuple(1, 2, 3)); -- without tuple it's interpreted differently inside values block. +select * from tbl; + +drop table tbl; + +-- Avoid generating named tuple for special keywords +select toTypeName(tuple(null)), toTypeName(tuple(true)), toTypeName(tuple(false)); diff --git a/parser/testdata/02890_partition_prune_in_extra_columns/ast.json b/parser/testdata/02890_partition_prune_in_extra_columns/ast.json new file mode 100644 index 000000000..4f0b7763d --- /dev/null +++ b/parser/testdata/02890_partition_prune_in_extra_columns/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery e (children 1)" + }, + { + "explain": " Identifier e" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001224169, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/02890_partition_prune_in_extra_columns/metadata.json b/parser/testdata/02890_partition_prune_in_extra_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02890_partition_prune_in_extra_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02890_partition_prune_in_extra_columns/query.sql b/parser/testdata/02890_partition_prune_in_extra_columns/query.sql new file mode 100644 index 000000000..29fd313b1 --- /dev/null +++ b/parser/testdata/02890_partition_prune_in_extra_columns/query.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS e; + +CREATE TABLE e (dt DateTime, t Int32) ENGINE = MergeTree() PARTITION BY (t, toYYYYMM(dt)) ORDER BY tuple(); + +INSERT INTO e SELECT toDateTime('2022-12-12 11:00:00') + number, 86 FROM numbers(10); + +SELECT COUNT(*) FROM e WHERE (t, dt) IN (86, '2022-12-12 11:00:00'); + +DROP TABLE e; diff --git a/parser/testdata/02890_untuple_column_names/ast.json b/parser/testdata/02890_untuple_column_names/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02890_untuple_column_names/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02890_untuple_column_names/metadata.json b/parser/testdata/02890_untuple_column_names/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02890_untuple_column_names/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02890_untuple_column_names/query.sql b/parser/testdata/02890_untuple_column_names/query.sql new file mode 100644 index 000000000..9773e2e53 --- /dev/null +++ b/parser/testdata/02890_untuple_column_names/query.sql @@ -0,0 +1,48 @@ +-- If the untuple() function has an alias, and if the tuple element has an explicit name, we want to use it to +-- generate the resulting column name. +-- Check all combinations of tuple element alias and untuple() alias. Also to avoid that we generate the same +-- result column names and confuse query analysis (see #26179), test also two untuple() calls in one SElECT +-- with the same types and aliases. + +SELECT '-- tuple element alias'; + +SELECT untuple(tuple(1)::Tuple(a Int)), untuple(tuple('s')::Tuple(a String)) FORMAT Vertical SETTINGS enable_analyzer = 0; +SELECT untuple(tuple(1)::Tuple(a Int)), untuple(tuple('s')::Tuple(a String)) FORMAT Vertical SETTINGS enable_analyzer = 1; + +SELECT untuple(tuple(1)::Tuple(a Int)), untuple(tuple(1)::Tuple(a Int)) FORMAT Vertical SETTINGS enable_analyzer = 0; -- { serverError DUPLICATE_COLUMN } +SELECT untuple(tuple(1)::Tuple(a Int)), untuple(tuple(1)::Tuple(a Int)) FORMAT Vertical SETTINGS enable_analyzer = 1; -- Bug: doesn't throw an exception + +SELECT '-- tuple element alias + untuple() alias'; + +SELECT untuple(tuple(1)::Tuple(a Int)) x, untuple(tuple('s')::Tuple(a String)) y FORMAT Vertical SETTINGS enable_analyzer = 0; +SELECT untuple(tuple(1)::Tuple(a Int)) x, untuple(tuple('s')::Tuple(a String)) y FORMAT Vertical SETTINGS enable_analyzer = 1; + +SELECT untuple(tuple(1)::Tuple(a Int)) x, untuple(tuple(1)::Tuple(a Int)) x FORMAT Vertical SETTINGS enable_analyzer = 0; -- { serverError DUPLICATE_COLUMN } +SELECT untuple(tuple(1)::Tuple(a Int)) x, untuple(tuple(1)::Tuple(a Int)) x FORMAT Vertical SETTINGS enable_analyzer = 1; -- Bug: doesn't throw an exception + +SELECT '-- untuple() alias'; + +SELECT untuple(tuple(1)::Tuple(Int)) x, untuple(tuple('s')::Tuple(String)) y FORMAT Vertical SETTINGS enable_analyzer = 0; +SELECT untuple(tuple(1)::Tuple(Int)) x, untuple(tuple('s')::Tuple(String)) y FORMAT Vertical SETTINGS enable_analyzer = 1; + +SELECT untuple(tuple(1)::Tuple(Int)) x, untuple(tuple(1)::Tuple(Int)) x FORMAT Vertical SETTINGS enable_analyzer = 0; -- { serverError DUPLICATE_COLUMN } +SELECT untuple(tuple(1)::Tuple(Int)) x, untuple(tuple(1)::Tuple(Int)) x FORMAT Vertical SETTINGS enable_analyzer = 1; -- Bug: doesn't throw an exception + +SELECT '-- no aliases'; + +SELECT untuple(tuple(1)::Tuple(Int)), untuple(tuple('s')::Tuple(String)) FORMAT Vertical SETTINGS enable_analyzer = 0; +SELECT untuple(tuple(1)::Tuple(Int)), untuple(tuple('s')::Tuple(String)) FORMAT Vertical SETTINGS enable_analyzer = 1; + +SELECT untuple(tuple(1)::Tuple(Int)), untuple(tuple(1)::Tuple(Int)) FORMAT Vertical SETTINGS enable_analyzer = 0; -- { serverError DUPLICATE_COLUMN } +SELECT untuple(tuple(1)::Tuple(Int)), untuple(tuple(1)::Tuple(Int)) FORMAT Vertical SETTINGS enable_analyzer = 1; -- Bug: doesn't throw an exception + +SELECT '-- tuple() loses the column names (would be good to fix, see #36773)'; +SELECT untuple(tuple(1 as a)) as t FORMAT Vertical SETTINGS enable_analyzer = 0, enable_named_columns_in_function_tuple = 0; +SELECT untuple(tuple(1 as a)) as t FORMAT Vertical SETTINGS enable_analyzer = 1, enable_named_columns_in_function_tuple = 0; + +SELECT '-- tuple() with enable_named_columns_in_function_tuple = 1 and enable_analyzer = 1 keeps the column names'; +SELECT untuple(tuple(1 as a)) as t FORMAT Vertical SETTINGS enable_analyzer = 1, enable_named_columns_in_function_tuple = 1; + +SELECT '-- thankfully JSONExtract() keeps them'; +SELECT untuple(JSONExtract('{"key": "value"}', 'Tuple(key String)')) x FORMAT Vertical SETTINGS enable_analyzer = 0; +SELECT untuple(JSONExtract('{"key": "value"}', 'Tuple(key String)')) x FORMAT Vertical SETTINGS enable_analyzer = 1; diff --git a/parser/testdata/02891_alter_update_adaptive_granularity/ast.json b/parser/testdata/02891_alter_update_adaptive_granularity/ast.json new file mode 100644 index 000000000..cf5e33fd6 --- /dev/null +++ b/parser/testdata/02891_alter_update_adaptive_granularity/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery kv (children 1)" + }, + { + "explain": " Identifier kv" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001388928, + "rows_read": 2, + "bytes_read": 57 + } +} diff --git a/parser/testdata/02891_alter_update_adaptive_granularity/metadata.json b/parser/testdata/02891_alter_update_adaptive_granularity/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02891_alter_update_adaptive_granularity/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02891_alter_update_adaptive_granularity/query.sql b/parser/testdata/02891_alter_update_adaptive_granularity/query.sql new file mode 100644 index 000000000..09dfd6d8c --- /dev/null +++ b/parser/testdata/02891_alter_update_adaptive_granularity/query.sql @@ -0,0 +1,24 @@ +CREATE TABLE kv +( + `key` UInt64, + `value` UInt64, + `s` String, + INDEX value_idx value TYPE minmax GRANULARITY 1 +) +ENGINE = ReplacingMergeTree +ORDER BY key +SETTINGS index_granularity = 32, index_granularity_bytes = 1024; + +INSERT INTO kv SELECT + number, + number + 100, + toString(number) +FROM numbers(2048); + +ALTER TABLE kv + UPDATE s = 'The Containers library is a generic collection of class templates and algorithms that allow programmers to easily implement common data structures like queues, lists and stacks' WHERE 1 +SETTINGS mutations_sync = 2; + +SELECT * +FROM kv +WHERE value = 442; diff --git a/parser/testdata/02891_array_shingles/ast.json b/parser/testdata/02891_array_shingles/ast.json new file mode 100644 index 000000000..84cd3e6dd --- /dev/null +++ b/parser/testdata/02891_array_shingles/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '-- negative tests'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001398909, + "rows_read": 5, + "bytes_read": 188 + } +} diff --git a/parser/testdata/02891_array_shingles/metadata.json b/parser/testdata/02891_array_shingles/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02891_array_shingles/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02891_array_shingles/query.sql b/parser/testdata/02891_array_shingles/query.sql new file mode 100644 index 000000000..e2b5cde88 --- /dev/null +++ b/parser/testdata/02891_array_shingles/query.sql @@ -0,0 +1,23 @@ +SELECT '-- negative tests'; +SELECT arrayShingles(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT arrayShingles([1, 2, 3, 4, 5]); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT arrayShingles([1, 2, 3, 4, 5], 2, 3); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT arrayShingles([1, 2, 3, 4, 5], 'str'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT arrayShingles((1, 2, 3, 4, 5), 0); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT arrayShingles([1, 2, 3, 4, 5], 0); -- { serverError BAD_ARGUMENTS } +SELECT arrayShingles([1, 2, 3, 4, 5], -2); -- { serverError BAD_ARGUMENTS } +SELECT arrayShingles([1, 2, 3, 4, 5], 6); -- { serverError BAD_ARGUMENTS } +SELECT arrayShingles([], 1); -- { serverError BAD_ARGUMENTS } + +SELECT '-- const and non-const inputs'; +SELECT [1, 2, 3, 4, 5] AS arr, 1 AS len, arrayShingles(arr, len), arrayShingles(materialize(arr), len); +SELECT [1, 2, 3, 4, 5] AS arr, 3 AS len, arrayShingles(arr, len), arrayShingles(materialize(arr), len); +SELECT [1, 2 ,3, 4, 5] AS arr, 5 AS len, arrayShingles(arr, len), arrayShingles(materialize(arr), len); + +SELECT ['ab', 'c', 'de', '', 'hi'] AS arr, 1 AS len, arrayShingles(arr, len), arrayShingles(materialize(arr), len); +SELECT ['ab', 'c', 'de', '', 'hi'] AS arr, 3 AS len, arrayShingles(arr, len), arrayShingles(materialize(arr), len); +SELECT ['ab', 'c', 'de', '', 'hi'] AS arr, 5 AS len, arrayShingles(arr, len), arrayShingles(materialize(arr), len); + +SELECT '-- special cases'; +SELECT arrayShingles([toNullable(2), toNullable(1)], 1); +SELECT arrayShingles([toLowCardinality(2), toLowCardinality(1)], 1); diff --git a/parser/testdata/02891_empty_tuple/ast.json b/parser/testdata/02891_empty_tuple/ast.json new file mode 100644 index 000000000..cd9ae2a81 --- /dev/null +++ b/parser/testdata/02891_empty_tuple/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery x (children 1)" + }, + { + "explain": " Identifier x" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001278857, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/02891_empty_tuple/metadata.json b/parser/testdata/02891_empty_tuple/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02891_empty_tuple/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02891_empty_tuple/query.sql b/parser/testdata/02891_empty_tuple/query.sql new file mode 100644 index 000000000..c8669d4a0 --- /dev/null +++ b/parser/testdata/02891_empty_tuple/query.sql @@ -0,0 +1,27 @@ +drop table if exists x; + +create table x engine MergeTree order by () as select () as a, () as b; + +insert into x values ((), ()); + +select count() from x; + +select * from x order by (); + +select (); + +drop table x; + +drop table if exists x; + +create table x (i Nullable(Tuple())) engine MergeTree order by (); -- { serverError 43 } +create table x (i LowCardinality(Tuple())) engine MergeTree order by (); -- { serverError 43 } +create table x (i Tuple(), j Array(Tuple())) engine MergeTree order by (); + +insert into x values ((), [(), ()]), ((), []); + +select count() from x; + +select * from x order by () settings max_threads = 1; + +drop table x; diff --git a/parser/testdata/02891_functions_over_sparse_columns/ast.json b/parser/testdata/02891_functions_over_sparse_columns/ast.json new file mode 100644 index 000000000..b08111c2f --- /dev/null +++ b/parser/testdata/02891_functions_over_sparse_columns/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001150232, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/02891_functions_over_sparse_columns/metadata.json b/parser/testdata/02891_functions_over_sparse_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02891_functions_over_sparse_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02891_functions_over_sparse_columns/query.sql b/parser/testdata/02891_functions_over_sparse_columns/query.sql new file mode 100644 index 000000000..14d5e0f98 --- /dev/null +++ b/parser/testdata/02891_functions_over_sparse_columns/query.sql @@ -0,0 +1,5 @@ +drop table if exists test; +create table test (key Int) engine=MergeTree() order by tuple() settings ratio_of_defaults_for_sparse_serialization=0.1; +insert into test select 0 from numbers(10); +select arrayMap(x -> (x <= key), [1]) from test; +drop table test; diff --git a/parser/testdata/02891_rename_table_without_keyword/ast.json b/parser/testdata/02891_rename_table_without_keyword/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02891_rename_table_without_keyword/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02891_rename_table_without_keyword/metadata.json b/parser/testdata/02891_rename_table_without_keyword/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02891_rename_table_without_keyword/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02891_rename_table_without_keyword/query.sql b/parser/testdata/02891_rename_table_without_keyword/query.sql new file mode 100644 index 000000000..2f32dc94d --- /dev/null +++ b/parser/testdata/02891_rename_table_without_keyword/query.sql @@ -0,0 +1,42 @@ +DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}; +CREATE DATABASE IF NOT EXISTS {CLICKHOUSE_DATABASE:Identifier}; + +CREATE TABLE IF NOT EXISTS {CLICKHOUSE_DATABASE:Identifier}.r1 (name String) Engine=Memory(); +SHOW TABLES FROM {CLICKHOUSE_DATABASE:Identifier}; + +RENAME TABLE {CLICKHOUSE_DATABASE:Identifier}.r1 TO {CLICKHOUSE_DATABASE:Identifier}.r1_bak; +SHOW TABLES FROM {CLICKHOUSE_DATABASE:Identifier}; + +RENAME {CLICKHOUSE_DATABASE:Identifier}.r1_bak TO {CLICKHOUSE_DATABASE:Identifier}.r1; +SHOW TABLES FROM {CLICKHOUSE_DATABASE:Identifier}; + +CREATE TABLE IF NOT EXISTS {CLICKHOUSE_DATABASE:Identifier}.r2 (name String) Engine=Memory(); +RENAME {CLICKHOUSE_DATABASE:Identifier}.r1 TO {CLICKHOUSE_DATABASE:Identifier}.r1_bak, + {CLICKHOUSE_DATABASE:Identifier}.r2 TO {CLICKHOUSE_DATABASE:Identifier}.r2_bak; +SHOW TABLES FROM {CLICKHOUSE_DATABASE:Identifier}; + +CREATE TABLE IF NOT EXISTS {CLICKHOUSE_DATABASE:Identifier}.source_table ( + id UInt64, + value String + ) ENGINE = Memory; + +CREATE DICTIONARY IF NOT EXISTS {CLICKHOUSE_DATABASE:Identifier}.test_dictionary +( + id UInt64, + value String +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE '{CLICKHOUSE_DATABASE:String}.dictionary_table')) +LAYOUT(FLAT()) +LIFETIME(MIN 0 MAX 1000); + +SHOW DICTIONARIES FROM {CLICKHOUSE_DATABASE:Identifier}; + +RENAME {CLICKHOUSE_DATABASE:Identifier}.test_dictionary TO {CLICKHOUSE_DATABASE:Identifier}.test_dictionary_2; +SHOW DICTIONARIES FROM {CLICKHOUSE_DATABASE:Identifier}; + +SHOW DATABASES LIKE '{CLICKHOUSE_DATABASE:String}'; +RENAME {CLICKHOUSE_DATABASE:Identifier} TO {CLICKHOUSE_DATABASE_1:Identifier}; -- { serverError UNKNOWN_TABLE } +SHOW DATABASES LIKE '{CLICKHOUSE_DATABASE:String}'; + +DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}; diff --git a/parser/testdata/02892_SummingMergeTree_Nested/ast.json b/parser/testdata/02892_SummingMergeTree_Nested/ast.json new file mode 100644 index 000000000..22e6cc59c --- /dev/null +++ b/parser/testdata/02892_SummingMergeTree_Nested/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery nested_smt (children 1)" + }, + { + "explain": " Identifier nested_smt" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001343684, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/02892_SummingMergeTree_Nested/metadata.json b/parser/testdata/02892_SummingMergeTree_Nested/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02892_SummingMergeTree_Nested/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02892_SummingMergeTree_Nested/query.sql b/parser/testdata/02892_SummingMergeTree_Nested/query.sql new file mode 100644 index 000000000..90a6fbd24 --- /dev/null +++ b/parser/testdata/02892_SummingMergeTree_Nested/query.sql @@ -0,0 +1,26 @@ +drop table if exists nested_smt; +create table nested_smt ( + date date, + val UInt64, + counters_Map Nested ( + id UInt8, + count Int32 + ) +) +ENGINE = SummingMergeTree() +ORDER BY (date); + +system stop merges nested_smt; + +insert into nested_smt values ('2023-10-05', 1, [1,2,3], [10,20,30]); +insert into nested_smt values ('2023-10-05', 2, [1,2,3], [1,1,1]); + +-- { echo } +select * from nested_smt order by val; +select * from nested_smt final; + +system start merges nested_smt; +optimize table nested_smt final; +select * from nested_smt; + +drop table nested_smt; diff --git a/parser/testdata/02892_orc_filter_pushdown/ast.json b/parser/testdata/02892_orc_filter_pushdown/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02892_orc_filter_pushdown/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02892_orc_filter_pushdown/metadata.json b/parser/testdata/02892_orc_filter_pushdown/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02892_orc_filter_pushdown/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02892_orc_filter_pushdown/query.sql b/parser/testdata/02892_orc_filter_pushdown/query.sql new file mode 100644 index 000000000..4f2e3ca23 --- /dev/null +++ b/parser/testdata/02892_orc_filter_pushdown/query.sql @@ -0,0 +1,221 @@ +-- Tags: no-fasttest, no-parallel + +set output_format_orc_string_as_string = 1; +set output_format_orc_row_index_stride = 100; +set input_format_orc_row_batch_size = 100; +set input_format_orc_filter_push_down = 1; +set input_format_null_as_default = 1; + +set engine_file_truncate_on_insert = 1; +set optimize_or_like_chain = 0; +set max_block_size = 100000; +set max_insert_threads = 1; +set max_execution_time = 300; + +SET session_timezone = 'UTC'; + + +-- Try all the types. +insert into function file('02892.orc') + with 5000 - number as n +select + number, + intDiv(n, 11)::Int8 as i8, + n::Int16 i16, + n::Int32 as i32, + n::Int64 as i64, + + toDate32(n*500000) as date32, + toDateTime64(n*1e6, 3) as dt64_ms, + toDateTime64(n*1e6, 6) as dt64_us, + toDateTime64(n*1e6, 9) as dt64_ns, + toDateTime64(n*1e6, 0) as dt64_s, + toDateTime64(n*1e6, 2) as dt64_cs, + (n/1000)::Float32 as f32, + (n/1000)::Float64 as f64, + n::String as s, + n::String::FixedString(9) as fs, + n::Decimal32(3)/1234 as d32, + n::Decimal64(10)/12345678 as d64, + n::Decimal128(20)/123456789012345 as d128 + from numbers(10000); + +desc file('02892.orc'); + + +-- Go over all types individually +-- { echoOn } +select count(), sum(number) from file('02892.orc') where indexHint(i8 in (10, 15, -6)); +select count(1), min(i8), max(i8) from file('02892.orc') where i8 in (10, 15, -6); + +select count(), sum(number) from file('02892.orc') where indexHint(i8 between -3 and 2); +select count(1), min(i8), max(i8) from file('02892.orc') where i8 between -3 and 2; + +select count(), sum(number) from file('02892.orc') where indexHint(i16 between 4000 and 61000 or i16 == 42); +select count(1), min(i16), max(i16) from file('02892.orc') where i16 between 4000 and 61000 or i16 == 42; + +select count(), sum(number) from file('02892.orc') where indexHint(i16 between -150 and 250); +select count(1), min(i16), max(i16) from file('02892.orc') where i16 between -150 and 250; + +select count(), sum(number) from file('02892.orc') where indexHint(i32 in (42, -1000)); +select count(1), min(i32), max(i32) from file('02892.orc') where i32 in (42, -1000); + +select count(), sum(number) from file('02892.orc') where indexHint(i32 between -150 and 250); +select count(1), min(i32), max(i32) from file('02892.orc') where i32 between -150 and 250; + +select count(), sum(number) from file('02892.orc') where indexHint(i64 in (42, -1000)); +select count(1), min(i64), max(i64) from file('02892.orc') where i64 in (42, -1000); + +select count(), sum(number) from file('02892.orc') where indexHint(i64 between -150 and 250); +select count(1), min(i64), max(i64) from file('02892.orc') where i64 between -150 and 250; + +select count(), sum(number) from file('02892.orc') where indexHint(date32 between '1992-01-01' and '2023-08-02'); +select count(1), min(date32), max(date32) from file('02892.orc') where date32 between '1992-01-01' and '2023-08-02'; + +select count(), sum(number) from file('02892.orc') where indexHint(dt64_ms between '2000-01-01' and '2005-01-01'); +select count(1), min(dt64_ms), max(dt64_ms) from file('02892.orc') where dt64_ms between '2000-01-01' and '2005-01-01'; + +select count(), sum(number) from file('02892.orc') where indexHint(dt64_us between toDateTime64(900000000, 2) and '2005-01-01'); +select count(1), min(dt64_us), max(dt64_us) from file('02892.orc') where (dt64_us between toDateTime64(900000000, 2) and '2005-01-01'); + +select count(), sum(number) from file('02892.orc') where indexHint(dt64_ns between '2000-01-01' and '2005-01-01'); +select count(1), min(dt64_ns), max(dt64_ns) from file('02892.orc') where (dt64_ns between '2000-01-01' and '2005-01-01'); + +select count(), sum(number) from file('02892.orc') where indexHint(dt64_s between toDateTime64('-2.01e8'::Decimal64(0), 0) and toDateTime64(1.5e8::Decimal64(0), 0)); +select count(1), min(dt64_s), max(dt64_s) from file('02892.orc') where (dt64_s between toDateTime64('-2.01e8'::Decimal64(0), 0) and toDateTime64(1.5e8::Decimal64(0), 0)); + +select count(), sum(number) from file('02892.orc') where indexHint(dt64_cs between toDateTime64('-2.01e8'::Decimal64(1), 1) and toDateTime64(1.5e8::Decimal64(2), 2)); +select count(1), min(dt64_cs), max(dt64_cs) from file('02892.orc') where (dt64_cs between toDateTime64('-2.01e8'::Decimal64(1), 1) and toDateTime64(1.5e8::Decimal64(2), 2)); + +select count(), sum(number) from file('02892.orc') where indexHint(f32 between -0.11::Float32 and 0.06::Float32); +select count(1), min(f32), max(f32) from file('02892.orc') where (f32 between -0.11::Float32 and 0.06::Float32); + +select count(), sum(number) from file('02892.orc') where indexHint(f64 between -0.11 and 0.06); +select count(1), min(f64), max(f64) from file('02892.orc') where (f64 between -0.11 and 0.06); + +select count(), sum(number) from file('02892.orc') where indexHint(s between '-9' and '1!!!'); +select count(1), min(s), max(s) from file('02892.orc') where (s between '-9' and '1!!!'); + +select count(), sum(number) from file('02892.orc') where indexHint(fs between '-9' and '1!!!'); +select count(1), min(fs), max(fs) from file('02892.orc') where (fs between '-9' and '1!!!'); + +select count(), sum(number) from file('02892.orc') where indexHint(d32 between '-0.011'::Decimal32(3) and 0.006::Decimal32(3)); +select count(1), min(d32), max(d32) from file('02892.orc') where (d32 between '-0.011'::Decimal32(3) and 0.006::Decimal32(3)); + +select count(), sum(number) from file('02892.orc') where indexHint(d64 between '-0.0000011'::Decimal64(7) and 0.0000006::Decimal64(9)); +select count(1), min(d64), max(d64) from file('02892.orc') where (d64 between '-0.0000011'::Decimal64(7) and 0.0000006::Decimal64(9)); + +select count(), sum(number) from file('02892.orc') where indexHint(d128 between '-0.00000000000011'::Decimal128(20) and 0.00000000000006::Decimal128(20)); +select count(1), min(d128), max(128) from file('02892.orc') where (d128 between '-0.00000000000011'::Decimal128(20) and 0.00000000000006::Decimal128(20)); + +-- Some random other cases. +select count(), sum(number) from file('02892.orc') where indexHint(0); +select count(), min(number), max(number) from file('02892.orc') where indexHint(0); + +select count(), sum(number) from file('02892.orc') where indexHint(s like '99%' or i64 == 2000); +select count(), min(s), max(s) from file('02892.orc') where (s like '99%' or i64 == 2000); + +select count(), sum(number) from file('02892.orc') where indexHint(s like 'z%'); +select count(), min(s), max(s) from file('02892.orc') where (s like 'z%'); + +select count(), sum(number) from file('02892.orc') where indexHint(i8 == 10 or 1 == 1); +select count(), min(i8), max(i8) from file('02892.orc') where (i8 == 10 or 1 == 1); + +select count(), sum(number) from file('02892.orc') where indexHint(i8 < 0); +select count(), min(i8), max(i8) from file('02892.orc') where (i8 < 0); +-- { echoOff } + +-- Nullable and LowCardinality. +insert into function file('02892.orc') select + number, + if(number%234 == 0, NULL, number) as sometimes_null, + toNullable(number) as never_null, + if(number%345 == 0, number::String, NULL) as mostly_null, + toLowCardinality(if(number%234 == 0, NULL, number)) as sometimes_null_lc, + toLowCardinality(toNullable(number)) as never_null_lc, + toLowCardinality(if(number%345 == 0, number::String, NULL)) as mostly_null_lc + from numbers(1000); + +-- { echoOn } +select count(), sum(number) from file('02892.orc') where indexHint(sometimes_null is NULL); +select count(), min(sometimes_null), max(sometimes_null) from file('02892.orc') where (sometimes_null is NULL); + +select count(), sum(number) from file('02892.orc') where indexHint(sometimes_null_lc is NULL); +select count(), min(sometimes_null_lc), max(sometimes_null_lc) from file('02892.orc') where (sometimes_null_lc is NULL); + +select count(), sum(number) from file('02892.orc') where indexHint(mostly_null is not NULL); +select count(), min(mostly_null), max(mostly_null) from file('02892.orc') where (mostly_null is not NULL); + +select count(), sum(number) from file('02892.orc') where indexHint(mostly_null_lc is not NULL); +select count(), min(mostly_null_lc), max(mostly_null_lc) from file('02892.orc') where (mostly_null_lc is not NULL); + +select count(), sum(number) from file('02892.orc') where indexHint(sometimes_null > 850); +select count(), min(sometimes_null), max(sometimes_null) from file('02892.orc') where (sometimes_null > 850); + +select count(), sum(number) from file('02892.orc') where indexHint(sometimes_null_lc > 850); +select count(), min(sometimes_null_lc), max(sometimes_null_lc) from file('02892.orc') where (sometimes_null_lc > 850); + +select count(), sum(number) from file('02892.orc') where indexHint(never_null > 850); +select count(), min(never_null), max(never_null) from file('02892.orc') where (never_null > 850); + +select count(), sum(number) from file('02892.orc') where indexHint(never_null_lc > 850); +select count(), min(never_null_lc), max(never_null_lc) from file('02892.orc') where (never_null_lc > 850); + +select count(), sum(number) from file('02892.orc') where indexHint(never_null < 150); +select count(), min(never_null), max(never_null) from file('02892.orc') where (never_null < 150); + +select count(), sum(number) from file('02892.orc') where indexHint(never_null_lc < 150); +select count(), min(never_null_lc), max(never_null_lc) from file('02892.orc') where (never_null_lc < 150); + +select count(), sum(number) from file('02892.orc') where indexHint(sometimes_null < 150); +select count(), min(sometimes_null), max(sometimes_null) from file('02892.orc') where (sometimes_null < 150); + +select count(), sum(number) from file('02892.orc') where indexHint(sometimes_null_lc < 150); +select count(), min(sometimes_null_lc), max(sometimes_null_lc) from file('02892.orc') where (sometimes_null_lc < 150); +-- { echoOff } + +-- Settings that affect the table schema or contents. +insert into function file('02892.orc') select + number, + if(number%234 == 0, NULL, number + 100) as positive_or_null, + if(number%234 == 0, NULL, -number - 100) as negative_or_null, + if(number%234 == 0, NULL, 'I am a string') as string_or_null + from numbers(1000); + +-- { echoOn } +select count(), sum(number) from file('02892.orc') where indexHint(positive_or_null < 50); -- quirk with infinities +select count(), min(positive_or_null), max(positive_or_null) from file('02892.orc') where (positive_or_null < 50); + +select count(), sum(number) from file('02892.orc', ORC, 'number UInt64, positive_or_null UInt64') where indexHint(positive_or_null < 50); +select count(), min(positive_or_null), max(positive_or_null) from file('02892.orc', ORC, 'number UInt64, positive_or_null UInt64') where (positive_or_null < 50); + +select count(), sum(number) from file('02892.orc') where indexHint(negative_or_null > -50); +select count(), min(negative_or_null), max(negative_or_null) from file('02892.orc') where (negative_or_null > -50); + +select count(), sum(number) from file('02892.orc', ORC, 'number UInt64, negative_or_null Int64') where indexHint(negative_or_null > -50); +select count(), min(negative_or_null), max(negative_or_null) from file('02892.orc', ORC, 'number UInt64, negative_or_null Int64') where (negative_or_null > -50); + +select count(), sum(number) from file('02892.orc') where indexHint(string_or_null == ''); -- quirk with infinities +select count(), min(string_or_null), max(string_or_null) from file('02892.orc') where (string_or_null == ''); + +select count(), sum(number) from file('02892.orc', ORC, 'number UInt64, string_or_null String') where indexHint(string_or_null == ''); +select count(), min(string_or_null), max(string_or_null) from file('02892.orc', ORC, 'number UInt64, string_or_null String') where (string_or_null == ''); + +select count(), sum(number) from file('02892.orc', ORC, 'number UInt64, nEgAtIvE_oR_nUlL Int64') where indexHint(nEgAtIvE_oR_nUlL > -50) settings input_format_orc_case_insensitive_column_matching = 1; +select count(), min(nEgAtIvE_oR_nUlL), max(nEgAtIvE_oR_nUlL) from file('02892.orc', ORC, 'number UInt64, nEgAtIvE_oR_nUlL Int64') where (nEgAtIvE_oR_nUlL > -50) settings input_format_orc_case_insensitive_column_matching = 1; + +select count(), sum(number) from file('02892.orc', ORC, 'number UInt64, negative_or_null Int64') where indexHint(negative_or_null < -500); +select count(), min(negative_or_null), max(negative_or_null) from file('02892.orc', ORC, 'number UInt64, negative_or_null Int64') where (negative_or_null < -500); + +select count(), sum(number) from file('02892.orc', ORC, 'number UInt64, negative_or_null Int64') where indexHint(negative_or_null is null) SETTINGS enable_analyzer=1; +select count(), min(negative_or_null), max(negative_or_null) from file('02892.orc', ORC, 'number UInt64, negative_or_null Int64') where (negative_or_null is null); + +select count(), sum(number) from file('02892.orc', ORC, 'number UInt64, negative_or_null Int64') where indexHint(negative_or_null in (0, -1, -10, -100, -1000)); +select count(), min(negative_or_null), max(negative_or_null) from file('02892.orc', ORC, 'number UInt64, negative_or_null Int64') where (negative_or_null in (0, -1, -10, -100, -1000)); + +select count(), sum(number) from file('02892.orc', ORC, 'number UInt64, string_or_null LowCardinality(String)') where indexHint(string_or_null like 'I am%'); +select count(), min(string_or_null), max(string_or_null) from file('02892.orc', ORC, 'number UInt64, string_or_null LowCardinality(String)') where (string_or_null like 'I am%'); + +select count(), sum(number) from file('02892.orc', ORC, 'number UInt64, string_or_null LowCardinality(Nullable(String))') where indexHint(string_or_null like 'I am%'); +select count(), min(string_or_null), max(string_or_null) from file('02892.orc', ORC, 'number UInt64, string_or_null LowCardinality(Nullable(String))') where (string_or_null like 'I am%'); +-- { echoOff } diff --git a/parser/testdata/02892_rocksdb_trivial_count/ast.json b/parser/testdata/02892_rocksdb_trivial_count/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02892_rocksdb_trivial_count/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02892_rocksdb_trivial_count/metadata.json b/parser/testdata/02892_rocksdb_trivial_count/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02892_rocksdb_trivial_count/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02892_rocksdb_trivial_count/query.sql b/parser/testdata/02892_rocksdb_trivial_count/query.sql new file mode 100644 index 000000000..a770b1537 --- /dev/null +++ b/parser/testdata/02892_rocksdb_trivial_count/query.sql @@ -0,0 +1,12 @@ +-- Tags: use-rocksdb + +CREATE TABLE dict (key UInt64, value String) ENGINE = EmbeddedRocksDB PRIMARY KEY key; +INSERT INTO dict SELECT number, toString(number) FROM numbers(121); +-- { echoOn } +SELECT count() FROM dict SETTINGS optimize_trivial_approximate_count_query = 0, max_rows_to_read = 1; -- { serverError TOO_MANY_ROWS } +SELECT count() FROM dict SETTINGS optimize_trivial_approximate_count_query = 1, max_rows_to_read = 1; +SET optimize_trivial_approximate_count_query = 1; +-- needs more data to see total_bytes or just detach and attach the table +DETACH TABLE dict SYNC; +ATTACH TABLE dict; +SELECT total_rows, total_bytes > 0 FROM system.tables WHERE database = currentDatabase() AND name = 'dict' FORMAT CSV; diff --git a/parser/testdata/02893_array_enum_has_hasAny/ast.json b/parser/testdata/02893_array_enum_has_hasAny/ast.json new file mode 100644 index 000000000..e8642b595 --- /dev/null +++ b/parser/testdata/02893_array_enum_has_hasAny/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery v (children 1)" + }, + { + "explain": " Identifier v" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001174174, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/02893_array_enum_has_hasAny/metadata.json b/parser/testdata/02893_array_enum_has_hasAny/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02893_array_enum_has_hasAny/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02893_array_enum_has_hasAny/query.sql b/parser/testdata/02893_array_enum_has_hasAny/query.sql new file mode 100644 index 000000000..976e0b3c4 --- /dev/null +++ b/parser/testdata/02893_array_enum_has_hasAny/query.sql @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS v; +DROP TABLE IF EXISTS v2; + +CREATE TABLE IF NOT EXISTS v (value Array(Enum('foo' = 1, 'bar' = 2))) ENGINE = Memory; +INSERT INTO v VALUES (['foo', 'bar']), (['foo']), (['bar']); +SELECT * FROM v WHERE has(value, 'foo') ORDER BY value; +SELECT * FROM v WHERE hasAny(value, ['bar']) ORDER BY value; +SELECT * FROM v WHERE has(value, 'x') ORDER BY value; + +CREATE TABLE IF NOT EXISTS v2 (value Array(Array(Nullable(Enum('foo' = 1, 'bar' = 2))))) ENGINE = Memory; +INSERT INTO v2 VALUES ([['foo', 'bar']]), ([['foo']]), ([['bar']]); +SELECT * FROM v2 WHERE has(value, ['foo']) ORDER BY value; +SELECT * FROM v2 WHERE has(value, [NULL]) ORDER BY value; + +DROP TABLE v; +DROP TABLE v2; diff --git a/parser/testdata/02893_bad_sample_view/ast.json b/parser/testdata/02893_bad_sample_view/ast.json new file mode 100644 index 000000000..11a94ce9b --- /dev/null +++ b/parser/testdata/02893_bad_sample_view/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery view_without_sample (children 1)" + }, + { + "explain": " Identifier view_without_sample" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001293835, + "rows_read": 2, + "bytes_read": 90 + } +} diff --git a/parser/testdata/02893_bad_sample_view/metadata.json b/parser/testdata/02893_bad_sample_view/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02893_bad_sample_view/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02893_bad_sample_view/query.sql b/parser/testdata/02893_bad_sample_view/query.sql new file mode 100644 index 000000000..412caf52f --- /dev/null +++ b/parser/testdata/02893_bad_sample_view/query.sql @@ -0,0 +1,4 @@ +DROP TABLE IF EXISTS view_without_sample; +CREATE VIEW view_without_sample AS SELECT 1 AS x; +SELECT * FROM merge(currentDatabase(), '^view_without_sample$') SAMPLE 1 / 100; +DROP TABLE view_without_sample; diff --git a/parser/testdata/02893_system_drop_schema_cache_format/ast.json b/parser/testdata/02893_system_drop_schema_cache_format/ast.json new file mode 100644 index 000000000..65f8e84cd --- /dev/null +++ b/parser/testdata/02893_system_drop_schema_cache_format/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Explain EXPLAIN SYNTAX (children 1)" + }, + { + "explain": " SYSTEM query" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001086825, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/02893_system_drop_schema_cache_format/metadata.json b/parser/testdata/02893_system_drop_schema_cache_format/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02893_system_drop_schema_cache_format/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02893_system_drop_schema_cache_format/query.sql b/parser/testdata/02893_system_drop_schema_cache_format/query.sql new file mode 100644 index 000000000..1efcb8161 --- /dev/null +++ b/parser/testdata/02893_system_drop_schema_cache_format/query.sql @@ -0,0 +1 @@ +explain syntax system drop schema cache for hdfs; diff --git a/parser/testdata/02893_trash_optimization/ast.json b/parser/testdata/02893_trash_optimization/ast.json new file mode 100644 index 000000000..9eb49244a --- /dev/null +++ b/parser/testdata/02893_trash_optimization/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001494994, + "rows_read": 5, + "bytes_read": 169 + } +} diff --git a/parser/testdata/02893_trash_optimization/metadata.json b/parser/testdata/02893_trash_optimization/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02893_trash_optimization/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02893_trash_optimization/query.sql b/parser/testdata/02893_trash_optimization/query.sql new file mode 100644 index 000000000..a61bc86ee --- /dev/null +++ b/parser/testdata/02893_trash_optimization/query.sql @@ -0,0 +1,3 @@ +SELECT * +FROM merge('system', '^one$') AS one +WHERE (one.dummy = 0) OR (one.dummy = 1); diff --git a/parser/testdata/02893_vertical_final_bugs/ast.json b/parser/testdata/02893_vertical_final_bugs/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02893_vertical_final_bugs/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02893_vertical_final_bugs/metadata.json b/parser/testdata/02893_vertical_final_bugs/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02893_vertical_final_bugs/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02893_vertical_final_bugs/query.sql b/parser/testdata/02893_vertical_final_bugs/query.sql new file mode 100644 index 000000000..e82ab674c --- /dev/null +++ b/parser/testdata/02893_vertical_final_bugs/query.sql @@ -0,0 +1,22 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/64543 +DROP TABLE IF EXISTS foo; +DROP TABLE IF EXISTS bar; +CREATE TABLE foo (id UInt64, seq UInt64) ENGINE = Memory; +CREATE TABLE bar (id UInt64, seq UInt64, name String) ENGINE = ReplacingMergeTree ORDER BY id; +INSERT INTO foo VALUES (1, 1); +INSERT INTO bar VALUES (1, 1, 'a') (2, 2, 'b'); +INSERT INTO bar VALUES (1, 2, 'b') (2, 3, 'c'); +SELECT * FROM bar INNER JOIN foo USING id WHERE bar.seq > foo.seq SETTINGS final = 1; + +-- Same problem possible can happen with array join +DROP TABLE IF EXISTS t; +CREATE TABLE t (k1 UInt64, k2 UInt64, v UInt64) ENGINE = ReplacingMergeTree() ORDER BY (k1, k2); +SET optimize_on_insert = 0; +INSERT INTO t VALUES (1, 2, 3) (1, 2, 4) (2, 3, 4), (2, 3, 5); +-- { echo ON } +SELECT arrayJoin([(k1, v), (k2, v)]) AS row, row.1 as k FROM t FINAL WHERE k1 != 3 AND k = 1 ORDER BY row SETTINGS enable_vertical_final = 0; +SELECT arrayJoin([(k1, v), (k2, v)]) AS row, row.1 as k FROM t FINAL WHERE k1 != 3 AND k = 1 ORDER BY row SETTINGS enable_vertical_final = 1; +SELECT arrayJoin([(k1, v), (k2, v)]) AS row, row.1 as k FROM t FINAL WHERE k1 != 3 AND k = 2 ORDER BY row SETTINGS enable_vertical_final = 0; +SELECT arrayJoin([(k1, v), (k2, v)]) AS row, row.1 as k FROM t FINAL WHERE k1 != 3 AND k = 2 ORDER BY row SETTINGS enable_vertical_final = 1; +SELECT arrayJoin([(k1, v), (k2, v)]) AS row, row.1 as k FROM t FINAL WHERE k1 != 3 AND k = 3 ORDER BY row SETTINGS enable_vertical_final = 0; +SELECT arrayJoin([(k1, v), (k2, v)]) AS row, row.1 as k FROM t FINAL WHERE k1 != 3 AND k = 3 ORDER BY row SETTINGS enable_vertical_final = 1; diff --git a/parser/testdata/02895_cast_operator_bug/ast.json b/parser/testdata/02895_cast_operator_bug/ast.json new file mode 100644 index 000000000..6819baf09 --- /dev/null +++ b/parser/testdata/02895_cast_operator_bug/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Array_[UInt64_1]" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '0'" + }, + { + "explain": " Literal 'UInt16'" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001450205, + "rows_read": 9, + "bytes_read": 318 + } +} diff --git a/parser/testdata/02895_cast_operator_bug/metadata.json b/parser/testdata/02895_cast_operator_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02895_cast_operator_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02895_cast_operator_bug/query.sql b/parser/testdata/02895_cast_operator_bug/query.sql new file mode 100644 index 000000000..39c5f6b2a --- /dev/null +++ b/parser/testdata/02895_cast_operator_bug/query.sql @@ -0,0 +1 @@ +SELECT [1], 0::UInt16; diff --git a/parser/testdata/02896_cyclic_aliases_crash/ast.json b/parser/testdata/02896_cyclic_aliases_crash/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02896_cyclic_aliases_crash/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02896_cyclic_aliases_crash/metadata.json b/parser/testdata/02896_cyclic_aliases_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02896_cyclic_aliases_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02896_cyclic_aliases_crash/query.sql b/parser/testdata/02896_cyclic_aliases_crash/query.sql new file mode 100644 index 000000000..04e2c254a --- /dev/null +++ b/parser/testdata/02896_cyclic_aliases_crash/query.sql @@ -0,0 +1,36 @@ + +SET max_ast_depth = 10_000_000; + +SELECT + val, + val + 1 as prev, + val + prev as val +FROM ( SELECT 1 as val ) +; -- { serverError CYCLIC_ALIASES, UNKNOWN_IDENTIFIER, TOO_DEEP_RECURSION } + + +SELECT + val, + val + 1 as prev, + val + prev as val2 +FROM ( SELECT 1 as val ) +; + +select number % 2 as number, count() from numbers(10) where number != 0 group by number % 2 as number; + +CREATE TABLE test_table (time_stamp_utc DateTime, impressions UInt32, clicks UInt32, revenue Float32) ENGINE = MergeTree ORDER BY time_stamp_utc; + +SELECT + toStartOfDay(toDateTime(time_stamp_utc)) AS time_stamp_utc, + sum(impressions) AS Impressions, + sum(clicks) AS Clicks, + sum(revenue) AS Revenue +FROM test_table +WHERE (time_stamp_utc >= toDateTime('2024-04-25 00:00:00')) AND (time_stamp_utc < toDateTime('2024-05-02 00:00:00')) +GROUP BY time_stamp_utc +ORDER BY Impressions DESC +LIMIT 1000; + +drop table test_table; +create table test_table engine MergeTree order by sum as select 100 as sum union all select 200 as sum; +select sum as sum from (select sum(sum) as sum from test_table); diff --git a/parser/testdata/02896_illegal_sampling/ast.json b/parser/testdata/02896_illegal_sampling/ast.json new file mode 100644 index 000000000..7483a293c --- /dev/null +++ b/parser/testdata/02896_illegal_sampling/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 2)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " SampleRatio 1 \/ 2" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001093065, + "rows_read": 15, + "bytes_read": 582 + } +} diff --git a/parser/testdata/02896_illegal_sampling/metadata.json b/parser/testdata/02896_illegal_sampling/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02896_illegal_sampling/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02896_illegal_sampling/query.sql b/parser/testdata/02896_illegal_sampling/query.sql new file mode 100644 index 000000000..9799925e1 --- /dev/null +++ b/parser/testdata/02896_illegal_sampling/query.sql @@ -0,0 +1 @@ +SELECT * FROM (SELECT 1) SAMPLE 1 / 2; -- { serverError SAMPLING_NOT_SUPPORTED, UNSUPPORTED_METHOD } \ No newline at end of file diff --git a/parser/testdata/02896_leading_zeroes_no_octal/ast.json b/parser/testdata/02896_leading_zeroes_no_octal/ast.json new file mode 100644 index 000000000..eeaba4446 --- /dev/null +++ b/parser/testdata/02896_leading_zeroes_no_octal/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_leading_zeroes (children 1)" + }, + { + "explain": " Identifier t_leading_zeroes" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001120569, + "rows_read": 2, + "bytes_read": 84 + } +} diff --git a/parser/testdata/02896_leading_zeroes_no_octal/metadata.json b/parser/testdata/02896_leading_zeroes_no_octal/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02896_leading_zeroes_no_octal/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02896_leading_zeroes_no_octal/query.sql b/parser/testdata/02896_leading_zeroes_no_octal/query.sql new file mode 100644 index 000000000..4ad177022 --- /dev/null +++ b/parser/testdata/02896_leading_zeroes_no_octal/query.sql @@ -0,0 +1,223 @@ +DROP TABLE IF EXISTS t_leading_zeroes; +DROP TABLE IF EXISTS t_leading_zeroes_f; + +CREATE TABLE t_leading_zeroes(id Int64, input String, val Int64, expected Int64, comment String) ENGINE=MergeTree ORDER BY id; +CREATE TABLE t_leading_zeroes_f(id Int64, input String, val Float64, expected Float64, comment String) ENGINE=MergeTree ORDER BY id; + +SET input_format_values_interpret_expressions = 0; + +INSERT INTO t_leading_zeroes VALUES (1000, '0', 0, 0, 'Single zero'); +INSERT INTO t_leading_zeroes VALUES (1001, '00', 00, 0, 'Double zero'); +INSERT INTO t_leading_zeroes VALUES (1002, '000000000000000', 000000000000000, 0, 'Mutliple redundant zeroes'); +INSERT INTO t_leading_zeroes VALUES (1003, '01', 01, 1, 'Octal like, interpret as decimal'); +INSERT INTO t_leading_zeroes VALUES (1004, '08', 08, 8, 'Octal like, interpret as decimal'); +INSERT INTO t_leading_zeroes VALUES (1005, '0100', 0100, 100, 'Octal like, interpret as decimal'); +INSERT INTO t_leading_zeroes VALUES (1006, '0000000000100', 0000000000100, 100, 'Octal like, interpret as decimal, multiple leading zeroes'); + +INSERT INTO t_leading_zeroes VALUES (1010, '-0', -0, 0, 'Single zero negative'); +INSERT INTO t_leading_zeroes VALUES (1011, '-00', -00, 0, 'Double zero negative'); +INSERT INTO t_leading_zeroes VALUES (1012, '-000000000000000', -000000000000000, 0, 'Mutliple redundant zeroes negative'); +INSERT INTO t_leading_zeroes VALUES (1013, '-01', -01, -1, 'Octal like, interpret as decimal negative'); +INSERT INTO t_leading_zeroes VALUES (1014, '-08', -08, -8, 'Octal like, interpret as decimal negative'); +INSERT INTO t_leading_zeroes VALUES (1015, '-0100', -0100, -100, 'Octal like, interpret as decimal negative'); +INSERT INTO t_leading_zeroes VALUES (1016, '-0000000000100', -0000000000100, -100, 'Octal like, interpret as decimal, multiple leading zeroes negative'); + +INSERT INTO t_leading_zeroes VALUES (1020, '+0', +0, 0, 'Single zero positive'); +INSERT INTO t_leading_zeroes VALUES (1021, '+00', +00, 0, 'Double zero negpositiveative'); +INSERT INTO t_leading_zeroes VALUES (1022, '+000000000000000', +000000000000000, 0, 'Mutliple redundant zeroes positive'); +INSERT INTO t_leading_zeroes VALUES (1023, '+01', +01, 1, 'Octal like, interpret as decimal positive'); +INSERT INTO t_leading_zeroes VALUES (1024, '+08', +08, 8, 'Octal like, interpret as decimal positive'); +INSERT INTO t_leading_zeroes VALUES (1025, '+0100', +0100, 100, 'Octal like, interpret as decimal positive'); +INSERT INTO t_leading_zeroes VALUES (1026, '+0000000000100', +0000000000100, 100, 'Octal like, interpret as decimal, multiple leading zeroes positive'); + +INSERT INTO t_leading_zeroes VALUES (1030, '0000.008', 0000.008, 0, 'Floating point should work...'); +INSERT INTO t_leading_zeroes VALUES (1031, '-0000.008', -0000.008, 0, 'Floating point should work...'); +INSERT INTO t_leading_zeroes VALUES (1032, '+0000.008', +0000.008, 0, 'Floating point should work...'); +INSERT INTO t_leading_zeroes VALUES (1033, '0000.008e3', 0000.008e3, 8, 'Floating point should work...'); +INSERT INTO t_leading_zeroes VALUES (1034, '-0000.008e3', -0000.008e3, -8, 'Floating point should work...'); +INSERT INTO t_leading_zeroes VALUES (1035, '+0000.008e3', 0000.008e3, 8, 'Floating point should work...'); +INSERT INTO t_leading_zeroes VALUES (1036, '08000.008e-3', 08000.008e-3, 8, 'Floating point should work...'); +INSERT INTO t_leading_zeroes VALUES (1037, '-08000.008e-3', -08000.008e-3, -8, 'Floating point should work...'); +INSERT INTO t_leading_zeroes VALUES (1038, '+08000.008e-3', 08000.008e-3, 8, 'Floating point should work...'); + +INSERT INTO t_leading_zeroes VALUES (1060, '0x0abcd', 0x0abcd, 43981, 'Hex should be parsed'); +INSERT INTO t_leading_zeroes VALUES (1061, '-0x0abcd', -0x0abcd, -43981, 'Hex should be parsed'); +INSERT INTO t_leading_zeroes VALUES (1062, '+0x0abcd', +0x0abcd, 43981, 'Hex should be parsed'); +INSERT INTO t_leading_zeroes VALUES (1063, '0x0abcdP1', 0x0abcdP1, 87962, 'Hex should be parsed'); +INSERT INTO t_leading_zeroes VALUES (1064, '0x0abcdP+1', 0x0abcdP+1, 87962, 'Hex should be parsed'); +INSERT INTO t_leading_zeroes VALUES (1065, '0x0abcdP-1', 0x0abcdP-1, 21990, 'Hex should be parsed'); +INSERT INTO t_leading_zeroes VALUES (1066, '0x0abcdP01', 0x0abcdP01, 87962, 'Hex should be parsed'); +INSERT INTO t_leading_zeroes VALUES (1067, '0x0abcdP+01', 0x0abcdP+01, 87962, 'Hex should be parsed'); +INSERT INTO t_leading_zeroes VALUES (1068, '0x0abcdP-01', 0x0abcdP-01, 21990, 'Hex should be parsed'); + + +-- Floating point numbers go via readFloatTextFastImpl - so should not be affected + +INSERT INTO t_leading_zeroes_f VALUES (2000, '0', 0, 0, 'Single zero'); +INSERT INTO t_leading_zeroes_f VALUES (2001, '00', 00, 0, 'Double zero'); +INSERT INTO t_leading_zeroes_f VALUES (2002, '000000000000000', 000000000000000, 0, 'Mutliple redundant zeroes'); +INSERT INTO t_leading_zeroes_f VALUES (2003, '01', 01, 1, 'Octal like, interpret as decimal'); +INSERT INTO t_leading_zeroes_f VALUES (2004, '08', 08, 8, 'Octal like, interpret as decimal'); +INSERT INTO t_leading_zeroes_f VALUES (2005, '0100', 0100, 100, 'Octal like, interpret as decimal'); +INSERT INTO t_leading_zeroes_f VALUES (2006, '0000000000100', 0000000000100, 100, 'Octal like, interpret as decimal, multiple leading zeroes'); + +-- Float negative zero is machine/context dependent +-- INSERT INTO t_leading_zeroes_f VALUES (2010, '-0', -0, 0, 'Single zero negative'); +-- INSERT INTO t_leading_zeroes_f VALUES (2011, '-00', -00, 0, 'Double zero negative'); +-- INSERT INTO t_leading_zeroes_f VALUES (2012, '-000000000000000', -000000000000000, 0, 'Mutliple redundant zeroes negative'); +INSERT INTO t_leading_zeroes_f VALUES (2013, '-01', -01, -1, 'Octal like, interpret as decimal negative'); +INSERT INTO t_leading_zeroes_f VALUES (2014, '-08', -08, -8, 'Octal like, interpret as decimal negative'); +INSERT INTO t_leading_zeroes_f VALUES (2015, '-0100', -0100, -100, 'Octal like, interpret as decimal negative'); +INSERT INTO t_leading_zeroes_f VALUES (2016, '-0000000000100', -0000000000100, -100, 'Octal like, interpret as decimal, multiple leading zeroes negative'); + +INSERT INTO t_leading_zeroes_f VALUES (2020, '+0', +0, 0, 'Single zero positive'); +INSERT INTO t_leading_zeroes_f VALUES (2021, '+00', +00, 0, 'Double zero negpositiveative'); +INSERT INTO t_leading_zeroes_f VALUES (2022, '+000000000000000', +000000000000000, 0, 'Mutliple redundant zeroes positive'); +INSERT INTO t_leading_zeroes_f VALUES (2023, '+01', +01, 1, 'Octal like, interpret as decimal positive'); +INSERT INTO t_leading_zeroes_f VALUES (2024, '+08', +08, 8, 'Octal like, interpret as decimal positive'); +INSERT INTO t_leading_zeroes_f VALUES (2025, '+0100', +0100, 100, 'Octal like, interpret as decimal positive'); +INSERT INTO t_leading_zeroes_f VALUES (2026, '+0000000000100', +0000000000100, 100, 'Octal like, interpret as decimal, multiple leading zeroes positive'); + +INSERT INTO t_leading_zeroes_f VALUES (2030, '0000.008', 0000.008, 0.008, 'Floating point should work...'); +INSERT INTO t_leading_zeroes_f VALUES (2031, '-0000.008', -0000.008, -0.008, 'Floating point should work...'); +INSERT INTO t_leading_zeroes_f VALUES (2032, '+0000.008', +0000.008, 0.008, 'Floating point should work...'); +INSERT INTO t_leading_zeroes_f VALUES (2033, '0000.008e3', 0000.008e3, 8, 'Floating point should work...'); +INSERT INTO t_leading_zeroes_f VALUES (2034, '-0000.008e3', -0000.008e3, -8, 'Floating point should work...'); +INSERT INTO t_leading_zeroes_f VALUES (2035, '+0000.008e3', 0000.008e3, 8, 'Floating point should work...'); +INSERT INTO t_leading_zeroes_f VALUES (2036, '08.5e-3', 08.5e-3, 0.0085, 'Floating point should work...'); +INSERT INTO t_leading_zeroes_f VALUES (2037, '-08.5e-3', -08.5e-3, -0.0085, 'Floating point should work...'); +INSERT INTO t_leading_zeroes_f VALUES (2038, '+08.5e-3', 08.5e-3, 0.0085, 'Floating point should work...'); + +INSERT INTO t_leading_zeroes_f VALUES (2063, '0x0abcdP1', 0x0abcdP1, 87962, 'Hex should be parsed'); +INSERT INTO t_leading_zeroes_f VALUES (2064, '0x0abcdP+1', 0x0abcdP+1, 87962, 'Hex should be parsed'); +INSERT INTO t_leading_zeroes_f VALUES (2065, '0x0abcdP-1', 0x0abcdP-1, 21990.5, 'Hex should be parsed'); +INSERT INTO t_leading_zeroes_f VALUES (2066, '0x0abcdP01', 0x0abcdP01, 87962, 'Hex should be parsed'); +INSERT INTO t_leading_zeroes_f VALUES (2067, '0x0abcdP+01', 0x0abcdP+01, 87962, 'Hex should be parsed'); +INSERT INTO t_leading_zeroes_f VALUES (2068, '0x0abcdP-01', 0x0abcdP-01, 21990.5, 'Hex should be parsed'); +INSERT INTO t_leading_zeroes_f VALUES (2069, '0x01P-01', 0x01P-01, 0.5, 'Hex should be parsed'); + +-- Coincidentally, the following result in 9 rather than 9e9 because of readFloatTextFastImpl +-- using readUIntTextUpToNSignificantDigits<4>(exponent, in) +-- INSERT INTO t_leading_zeroes_f VALUES (2070, '00009e00009', 00009e00009, 9e9, '???'); + +-- Binary should not work with input_format_values_interpret_expressions = 0 + +INSERT INTO t_leading_zeroes_f VALUES (2050, '0b10000', 0b10000, 16, 'Binary should not be parsed'); -- { error SYNTAX_ERROR } +INSERT INTO t_leading_zeroes_f VALUES (2051, '-0b10000', -0b10000, -16, 'Binary should not be parsed'); -- { error SYNTAX_ERROR } +INSERT INTO t_leading_zeroes_f VALUES (2052, '+0b10000', +0b10000, 16, 'Binary should not be parsed'); -- { error SYNTAX_ERROR } + +INSERT INTO t_leading_zeroes VALUES (1050, '0b10000', 0b10000, 16, 'Binary should not be parsed'); -- { error SYNTAX_ERROR } +INSERT INTO t_leading_zeroes VALUES (1051, '-0b10000', -0b10000, -16, 'Binary should not be parsed'); -- { error SYNTAX_ERROR } +INSERT INTO t_leading_zeroes VALUES (1052, '+0b10000', +0b10000, 16, 'Binary should not be parsed'); -- { error SYNTAX_ERROR } + + + +SET input_format_values_interpret_expressions = 1; + +INSERT INTO t_leading_zeroes VALUES (11000, '0', 0, 0, 'Single zero'); +INSERT INTO t_leading_zeroes VALUES (11001, '00', 00, 0, 'Double zero'); +INSERT INTO t_leading_zeroes VALUES (11002, '000000000000000', 000000000000000, 0, 'Mutliple redundant zeroes'); +INSERT INTO t_leading_zeroes VALUES (11003, '01', 01, 1, 'Octal like, interpret as decimal'); +INSERT INTO t_leading_zeroes VALUES (11004, '08', 08, 8, 'Octal like, interpret as decimal'); +INSERT INTO t_leading_zeroes VALUES (11005, '0100', 0100, 100, 'Octal like, interpret as decimal'); +INSERT INTO t_leading_zeroes VALUES (11006, '0000000000100', 0000000000100, 100, 'Octal like, interpret as decimal, multiple leading zeroes'); + +INSERT INTO t_leading_zeroes VALUES (11010, '-0', -0, 0, 'Single zero negative'); +INSERT INTO t_leading_zeroes VALUES (11011, '-00', -00, 0, 'Double zero negative'); +INSERT INTO t_leading_zeroes VALUES (11012, '-000000000000000', -000000000000000, 0, 'Mutliple redundant zeroes negative'); +INSERT INTO t_leading_zeroes VALUES (11013, '-01', -01, -1, 'Octal like, interpret as decimal negative'); +INSERT INTO t_leading_zeroes VALUES (11014, '-08', -08, -8, 'Octal like, interpret as decimal negative'); +INSERT INTO t_leading_zeroes VALUES (11015, '-0100', -0100, -100, 'Octal like, interpret as decimal negative'); +INSERT INTO t_leading_zeroes VALUES (11016, '-0000000000100', -0000000000100, -100, 'Octal like, interpret as decimal, multiple leading zeroes negative'); + +INSERT INTO t_leading_zeroes VALUES (11020, '+0', +0, 0, 'Single zero positive'); +INSERT INTO t_leading_zeroes VALUES (11021, '+00', +00, 0, 'Double zero negpositiveative'); +INSERT INTO t_leading_zeroes VALUES (11022, '+000000000000000', +000000000000000, 0, 'Mutliple redundant zeroes positive'); +INSERT INTO t_leading_zeroes VALUES (11023, '+01', +01, 1, 'Octal like, interpret as decimal positive'); +INSERT INTO t_leading_zeroes VALUES (11024, '+08', +08, 8, 'Octal like, interpret as decimal positive'); +INSERT INTO t_leading_zeroes VALUES (11025, '+0100', +0100, 100, 'Octal like, interpret as decimal positive'); +INSERT INTO t_leading_zeroes VALUES (11026, '+0000000000100', +0000000000100, 100, 'Octal like, interpret as decimal, multiple leading zeroes positive'); + +INSERT INTO t_leading_zeroes VALUES (11030, '0000.008', 0000.008, 0, 'Floating point should work...'); +INSERT INTO t_leading_zeroes VALUES (11031, '-0000.008', -0000.008, 0, 'Floating point should work...'); +INSERT INTO t_leading_zeroes VALUES (11032, '+0000.008', +0000.008, 0, 'Floating point should work...'); +INSERT INTO t_leading_zeroes VALUES (11033, '0000.008e3', 0000.008e3, 8, 'Floating point should work...'); +INSERT INTO t_leading_zeroes VALUES (11034, '-0000.008e3', -0000.008e3, -8, 'Floating point should work...'); +INSERT INTO t_leading_zeroes VALUES (11035, '+0000.008e3', 0000.008e3, 8, 'Floating point should work...'); +INSERT INTO t_leading_zeroes VALUES (11036, '08000.008e-3', 08000.008e-3, 8, 'Floating point should work...'); +INSERT INTO t_leading_zeroes VALUES (11037, '-08000.008e-3', -08000.008e-3, -8, 'Floating point should work...'); +INSERT INTO t_leading_zeroes VALUES (11038, '+08000.008e-3', 08000.008e-3, 8, 'Floating point should work...'); + +INSERT INTO t_leading_zeroes VALUES (11050, '0b10000', 0b10000, 16, 'Binary should be parsed'); +INSERT INTO t_leading_zeroes VALUES (11051, '-0b10000', -0b10000, -16, 'Binary should be parsed'); +INSERT INTO t_leading_zeroes VALUES (11052, '+0b10000', +0b10000, 16, 'Binary should be parsed'); + +INSERT INTO t_leading_zeroes VALUES (11060, '0x0abcd', 0x0abcd, 43981, 'Hex should be parsed'); +INSERT INTO t_leading_zeroes VALUES (11061, '-0x0abcd', -0x0abcd, -43981, 'Hex should be parsed'); +INSERT INTO t_leading_zeroes VALUES (11062, '+0x0abcd', +0x0abcd, 43981, 'Hex should be parsed'); +INSERT INTO t_leading_zeroes VALUES (11063, '0x0abcdP1', 0x0abcdP1, 87962, 'Hex should be parsed'); +INSERT INTO t_leading_zeroes VALUES (11064, '0x0abcdP+1', 0x0abcdP+1, 87962, 'Hex should be parsed'); +INSERT INTO t_leading_zeroes VALUES (11065, '0x0abcdP-1', 0x0abcdP-1, 21990, 'Hex should be parsed'); +INSERT INTO t_leading_zeroes VALUES (11066, '0x0abcdP01', 0x0abcdP01, 87962, 'Hex should be parsed'); +INSERT INTO t_leading_zeroes VALUES (11067, '0x0abcdP+01', 0x0abcdP+01, 87962, 'Hex should be parsed'); +INSERT INTO t_leading_zeroes VALUES (11068, '0x0abcdP-01', 0x0abcdP-01, 21990, 'Hex should be parsed'); + +-- Floating point numbers go via readFloatTextFastImpl - so should not be affected + +INSERT INTO t_leading_zeroes_f VALUES (12000, '0', 0, 0, 'Single zero'); +INSERT INTO t_leading_zeroes_f VALUES (12001, '00', 00, 0, 'Double zero'); +INSERT INTO t_leading_zeroes_f VALUES (12002, '000000000000000', 000000000000000, 0, 'Mutliple redundant zeroes'); +INSERT INTO t_leading_zeroes_f VALUES (12003, '01', 01, 1, 'Octal like, interpret as decimal'); +INSERT INTO t_leading_zeroes_f VALUES (12004, '08', 08, 8, 'Octal like, interpret as decimal'); +INSERT INTO t_leading_zeroes_f VALUES (12005, '0100', 0100, 100, 'Octal like, interpret as decimal'); +INSERT INTO t_leading_zeroes_f VALUES (12006, '0000000000100', 0000000000100, 100, 'Octal like, interpret as decimal, multiple leading zeroes'); + +-- Float negative zero is machine/context dependent +-- INSERT INTO t_leading_zeroes_f VALUES (12010, '-0', -0, 0, 'Single zero negative'); +-- INSERT INTO t_leading_zeroes_f VALUES (12011, '-00', -00, 0, 'Double zero negative'); +-- INSERT INTO t_leading_zeroes_f VALUES (12012, '-000000000000000', -000000000000000, 0, 'Mutliple redundant zeroes negative'); +INSERT INTO t_leading_zeroes_f VALUES (12013, '-01', -01, -1, 'Octal like, interpret as decimal negative'); +INSERT INTO t_leading_zeroes_f VALUES (12014, '-08', -08, -8, 'Octal like, interpret as decimal negative'); +INSERT INTO t_leading_zeroes_f VALUES (12015, '-0100', -0100, -100, 'Octal like, interpret as decimal negative'); +INSERT INTO t_leading_zeroes_f VALUES (12016, '-0000000000100', -0000000000100, -100, 'Octal like, interpret as decimal, multiple leading zeroes negative'); + +INSERT INTO t_leading_zeroes_f VALUES (12020, '+0', +0, 0, 'Single zero positive'); +INSERT INTO t_leading_zeroes_f VALUES (12021, '+00', +00, 0, 'Double zero negpositiveative'); +INSERT INTO t_leading_zeroes_f VALUES (12022, '+000000000000000', +000000000000000, 0, 'Mutliple redundant zeroes positive'); +INSERT INTO t_leading_zeroes_f VALUES (12023, '+01', +01, 1, 'Octal like, interpret as decimal positive'); +INSERT INTO t_leading_zeroes_f VALUES (12024, '+08', +08, 8, 'Octal like, interpret as decimal positive'); +INSERT INTO t_leading_zeroes_f VALUES (12025, '+0100', +0100, 100, 'Octal like, interpret as decimal positive'); +INSERT INTO t_leading_zeroes_f VALUES (12026, '+0000000000100', +0000000000100, 100, 'Octal like, interpret as decimal, multiple leading zeroes positive'); + +INSERT INTO t_leading_zeroes_f VALUES (12030, '0000.008', 0000.008, 0.008, 'Floating point should work...'); +INSERT INTO t_leading_zeroes_f VALUES (12031, '-0000.008', -0000.008, -0.008, 'Floating point should work...'); +INSERT INTO t_leading_zeroes_f VALUES (12032, '+0000.008', +0000.008, 0.008, 'Floating point should work...'); +INSERT INTO t_leading_zeroes_f VALUES (12033, '0000.008e3', 0000.008e3, 8, 'Floating point should work...'); +INSERT INTO t_leading_zeroes_f VALUES (12034, '-0000.008e3', -0000.008e3, -8, 'Floating point should work...'); +INSERT INTO t_leading_zeroes_f VALUES (12035, '+0000.008e3', 0000.008e3, 8, 'Floating point should work...'); +INSERT INTO t_leading_zeroes_f VALUES (12036, '08.5e-3', 08.5e-3, 0.0085, 'Floating point should work...'); +INSERT INTO t_leading_zeroes_f VALUES (12037, '-08.5e-3', -08.5e-3, -0.0085, 'Floating point should work...'); +INSERT INTO t_leading_zeroes_f VALUES (12038, '+08.5e-3', 08.5e-3, 0.0085, 'Floating point should work...'); + +INSERT INTO t_leading_zeroes_f VALUES (12050, '0b10000', 0b10000, 16, 'Binary should be parsed'); +INSERT INTO t_leading_zeroes_f VALUES (12051, '-0b10000', -0b10000, -16, 'Binary should be parsed'); +INSERT INTO t_leading_zeroes_f VALUES (12052, '+0b10000', +0b10000, 16, 'Binary should be parsed'); + +INSERT INTO t_leading_zeroes_f VALUES (12063, '0x0abcdP1', 0x0abcdP1, 87962, 'Hex should be parsed'); +INSERT INTO t_leading_zeroes_f VALUES (12064, '0x0abcdP+1', 0x0abcdP+1, 87962, 'Hex should be parsed'); +INSERT INTO t_leading_zeroes_f VALUES (12065, '0x0abcdP-1', 0x0abcdP-1, 21990.5, 'Hex should be parsed'); +INSERT INTO t_leading_zeroes_f VALUES (12066, '0x0abcdP01', 0x0abcdP01, 87962, 'Hex should be parsed'); +INSERT INTO t_leading_zeroes_f VALUES (12067, '0x0abcdP+01', 0x0abcdP+01, 87962, 'Hex should be parsed'); +INSERT INTO t_leading_zeroes_f VALUES (12068, '0x0abcdP-01', 0x0abcdP-01, 21990.5, 'Hex should be parsed'); +INSERT INTO t_leading_zeroes_f VALUES (12069, '0x01P-01', 0x01P-01, 0.5, 'Hex should be parsed'); + +SELECT 'Leading zeroes into Int64 (1XXX without input_format_values_interpret_expressions and 1XXXX with)'; +SELECT t.val == t.expected AS ok, * FROM t_leading_zeroes t ORDER BY id; + + +SELECT 'Leading zeroes into Float64 (2XXX without input_format_values_interpret_expressions and 2XXXX with)'; +SELECT t.val == t.expected AS ok, * FROM t_leading_zeroes_f t ORDER BY id; + + +DROP TABLE IF EXISTS t_leading_zeroes; +DROP TABLE IF EXISTS t_leading_zeroes_f; diff --git a/parser/testdata/02896_max_execution_time_with_break_overflow_mode/ast.json b/parser/testdata/02896_max_execution_time_with_break_overflow_mode/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02896_max_execution_time_with_break_overflow_mode/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02896_max_execution_time_with_break_overflow_mode/metadata.json b/parser/testdata/02896_max_execution_time_with_break_overflow_mode/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02896_max_execution_time_with_break_overflow_mode/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02896_max_execution_time_with_break_overflow_mode/query.sql b/parser/testdata/02896_max_execution_time_with_break_overflow_mode/query.sql new file mode 100644 index 000000000..ecaad62b3 --- /dev/null +++ b/parser/testdata/02896_max_execution_time_with_break_overflow_mode/query.sql @@ -0,0 +1,12 @@ +-- Tags: no-fasttest + +SET max_rows_to_read = 0, max_execution_time = 0, max_estimated_execution_time = 0; + +-- Query stops after timeout without an error +SELECT * FROM numbers(100000000) SETTINGS max_block_size=1, max_execution_time=2, timeout_overflow_mode='break' FORMAT Null; + +-- Query returns an error when runtime is estimated after timeout_before_checking_execution_speed passed +SELECT * FROM numbers(100000000) SETTINGS max_block_size=1, timeout_before_checking_execution_speed=1, max_estimated_execution_time=2, timeout_overflow_mode='throw' FORMAT Null; -- { serverError TOO_SLOW } + +-- Query returns timeout error before its full execution time is estimated +SELECT * FROM numbers(100000000) SETTINGS max_block_size=1, timeout_before_checking_execution_speed=1, max_execution_time=2, timeout_overflow_mode='throw' FORMAT Null; -- { serverError TIMEOUT_EXCEEDED } diff --git a/parser/testdata/02896_multiple_OR/ast.json b/parser/testdata/02896_multiple_OR/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02896_multiple_OR/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02896_multiple_OR/metadata.json b/parser/testdata/02896_multiple_OR/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02896_multiple_OR/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02896_multiple_OR/query.sql b/parser/testdata/02896_multiple_OR/query.sql new file mode 100644 index 000000000..653ddebca --- /dev/null +++ b/parser/testdata/02896_multiple_OR/query.sql @@ -0,0 +1,28 @@ +-- https://github.com/ClickHouse/ClickHouse/pull/52653 +DROP TABLE IF EXISTS or_bug; +CREATE TABLE or_bug (key UInt8) ENGINE=MergeTree ORDER BY key; +INSERT INTO TABLE or_bug VALUES (0), (1); + +-- { echoOn } +SELECT * FROM or_bug WHERE (key = 1) OR false OR false; +SELECT * FROM or_bug WHERE (key = 1) OR false; +SELECT * FROM or_bug WHERE (key = 1); +-- { echoOff } + +-- https://github.com/ClickHouse/ClickHouse/issues/55288 +DROP TABLE IF EXISTS forms; +CREATE TABLE forms +( + `form_id` FixedString(24), + `text_field` String +) +ENGINE = MergeTree +PRIMARY KEY form_id +ORDER BY form_id; +insert into forms values ('5840ead423829c1eab29fa97','this is a test'); + +-- { echoOn } +select * from forms where text_field like '%this%' or 0 = 1 or 0 = 1; +select * from forms where text_field like '%this%' or 0 = 1; +select * from forms where text_field like '%this%'; +-- { echoOff } diff --git a/parser/testdata/02896_optimize_array_exists_to_has_with_date/ast.json b/parser/testdata/02896_optimize_array_exists_to_has_with_date/ast.json new file mode 100644 index 000000000..09a504ac6 --- /dev/null +++ b/parser/testdata/02896_optimize_array_exists_to_has_with_date/ast.json @@ -0,0 +1,82 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayExists (alias date_exists) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function lambda (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier date" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier date" + }, + { + "explain": " Literal '2022-07-31'" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDate (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '2022-07-31'" + } + ], + + "rows": 20, + + "statistics": + { + "elapsed": 0.001299519, + "rows_read": 20, + "bytes_read": 826 + } +} diff --git a/parser/testdata/02896_optimize_array_exists_to_has_with_date/metadata.json b/parser/testdata/02896_optimize_array_exists_to_has_with_date/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02896_optimize_array_exists_to_has_with_date/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02896_optimize_array_exists_to_has_with_date/query.sql b/parser/testdata/02896_optimize_array_exists_to_has_with_date/query.sql new file mode 100644 index 000000000..b87a154b4 --- /dev/null +++ b/parser/testdata/02896_optimize_array_exists_to_has_with_date/query.sql @@ -0,0 +1 @@ +SELECT arrayExists(date -> (date = '2022-07-31'), [toDate('2022-07-31')]) AS date_exists; diff --git a/parser/testdata/02897_alter_partition_parameters/ast.json b/parser/testdata/02897_alter_partition_parameters/ast.json new file mode 100644 index 000000000..626cae715 --- /dev/null +++ b/parser/testdata/02897_alter_partition_parameters/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00113904, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/02897_alter_partition_parameters/metadata.json b/parser/testdata/02897_alter_partition_parameters/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02897_alter_partition_parameters/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02897_alter_partition_parameters/query.sql b/parser/testdata/02897_alter_partition_parameters/query.sql new file mode 100644 index 000000000..6150642f8 --- /dev/null +++ b/parser/testdata/02897_alter_partition_parameters/query.sql @@ -0,0 +1,170 @@ +DROP TABLE IF EXISTS test; + +CREATE TABLE test +( + EventDate Date +) +ENGINE = MergeTree +ORDER BY tuple() +PARTITION BY toMonday(EventDate); + +INSERT INTO test VALUES(toDate('2023-10-09')); + +ALTER TABLE test DROP PARTITION ('2023-10-09'); + +SELECT count() FROM test; + +INSERT INTO test VALUES(toDate('2023-10-09')); + +ALTER TABLE test DROP PARTITION (('2023-10-09')); + +SELECT count() FROM test; + +INSERT INTO test VALUES(toDate('2023-10-09')); + +ALTER TABLE test DROP PARTITION '2023-10-09'; + +SELECT count() FROM test; + +INSERT INTO test VALUES(toDate('2023-10-09')); + +SET param_partition='2023-10-09'; + +ALTER TABLE test DROP PARTITION {partition:String}; + +SELECT count() FROM test; + +INSERT INTO test VALUES(toDate('2023-10-09')); + +ALTER TABLE test DROP PARTITION tuple(toMonday({partition:Date})); + +SELECT count() FROM test; + +INSERT INTO test VALUES(toDate('2023-10-09')); + +-- for some reason only tuples are allowed as non-string arguments +ALTER TABLE test DROP PARTITION toMonday({partition:String}); --{clientError SYNTAX_ERROR} + +set param_partition_id = '20231009'; + +ALTER TABLE test DROP PARTITION ID {partition_id:String}; + +SELECT count() FROM test; + +INSERT INTO test VALUES(toDate('2023-10-09')); + +ALTER TABLE test DROP PARTITION {partition:Date}; +SELECT count() FROM test; + +DROP TABLE IF EXISTS test; + +DROP TABLE IF EXISTS test2; + +CREATE TABLE test2 +( + a UInt32, + b Int64 +) +ENGINE = MergeTree +ORDER BY tuple() +PARTITION BY (a * b, b * b); + +INSERT INTO test2 VALUES(1, 2); + +ALTER TABLE test2 DROP PARTITION tuple(2, 4); + +SELECT count() FROM test2; + +INSERT INTO test2 VALUES(1, 2); + +ALTER TABLE test2 DROP PARTITION (2, 4); + +SELECT count() FROM test2; + +INSERT INTO test2 VALUES(1, 2); + +SET param_first='2'; +SET param_second='4'; + +ALTER TABLE test2 DROP PARTITION tuple({first:UInt32},{second:Int64}); + +SELECT count() FROM test2; + +DROP TABLE IF EXISTS test2; +DROP TABLE IF EXISTS test3; + +CREATE TABLE test3 +( + a UInt32, + b Int64 +) +ENGINE = MergeTree +ORDER BY tuple() +PARTITION BY a; + +INSERT INTO test3 VALUES(1, 2); + +SET param_simple='1'; + +ALTER TABLE test3 DROP PARTITION {simple:String}; + +SELECT count() FROM test3; + +DROP TABLE IF EXISTS test3; + +DROP TABLE IF EXISTS test4; + +CREATE TABLE test4 (EventDate Date) ENGINE = MergeTree() ORDER BY tuple() PARTITION BY EventDate; + +INSERT INTO test4 VALUES(toDate('2023-10-09')); + +SET param_partition='2023-10-09'; + +ALTER TABLE test4 ON CLUSTER 'test_shard_localhost' DROP PARTITION {partition:String} FORMAT Null; + +SELECT count() FROM test4; + +DROP TABLE IF EXISTS test4; + +DROP TABLE IF EXISTS test5; + +CREATE TABLE test5 +( + a UInt32, + b Int64 +) +ENGINE = MergeTree +ORDER BY tuple() +PARTITION BY (a, b); + +INSERT INTO test5 VALUES(1, 2); + +SET param_f='1'; +SET param_s='2'; + +ALTER TABLE test5 DROP PARTITION ({f:UInt32}, 2); + +SELECT count() FROM test5; + +DROP TABLE IF EXISTS test5; + +DROP TABLE IF EXISTS test6; + +CREATE TABLE test6 +( + a UInt32, + b Int64 +) +ENGINE = MergeTree +ORDER BY tuple() +PARTITION BY (a, b); + +INSERT INTO test6 VALUES(1, 2); + +SET param_tuple=(1, 2); + +ALTER TABLE test6 DROP PARTITION {tuple:Tuple(UInt32, Int64)}; + +SELECT count() FROM test6; + +DROP TABLE IF EXISTS test6; diff --git a/parser/testdata/02898_parallel_replicas_custom_key_final/ast.json b/parser/testdata/02898_parallel_replicas_custom_key_final/ast.json new file mode 100644 index 000000000..850e0a1af --- /dev/null +++ b/parser/testdata/02898_parallel_replicas_custom_key_final/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery 02898_parallel_replicas_final (children 1)" + }, + { + "explain": " Identifier 02898_parallel_replicas_final" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001078494, + "rows_read": 2, + "bytes_read": 110 + } +} diff --git a/parser/testdata/02898_parallel_replicas_custom_key_final/metadata.json b/parser/testdata/02898_parallel_replicas_custom_key_final/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02898_parallel_replicas_custom_key_final/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02898_parallel_replicas_custom_key_final/query.sql b/parser/testdata/02898_parallel_replicas_custom_key_final/query.sql new file mode 100644 index 000000000..32e259243 --- /dev/null +++ b/parser/testdata/02898_parallel_replicas_custom_key_final/query.sql @@ -0,0 +1,19 @@ +DROP TABLE IF EXISTS 02898_parallel_replicas_final; + +CREATE TABLE 02898_parallel_replicas_final (x String, y Int32) ENGINE = ReplacingMergeTree ORDER BY cityHash64(x); + +INSERT INTO 02898_parallel_replicas_final SELECT toString(number), number % 3 FROM numbers(1000); + +SELECT y, count() +FROM cluster(test_cluster_one_shard_three_replicas_localhost, currentDatabase(), 02898_parallel_replicas_final) FINAL +GROUP BY y +ORDER BY y +SETTINGS max_parallel_replicas=3, enable_parallel_replicas=1, parallel_replicas_custom_key='cityHash64(y)', parallel_replicas_mode='custom_key_sampling'; + +SELECT y, count() +FROM cluster(test_cluster_one_shard_three_replicas_localhost, currentDatabase(), 02898_parallel_replicas_final) FINAL +GROUP BY y +ORDER BY y +SETTINGS max_parallel_replicas=3, enable_parallel_replicas=1, parallel_replicas_custom_key='cityHash64(y)', parallel_replicas_mode='custom_key_range'; + +DROP TABLE 02898_parallel_replicas_final; diff --git a/parser/testdata/02898_parallel_replicas_progress_bar/ast.json b/parser/testdata/02898_parallel_replicas_progress_bar/ast.json new file mode 100644 index 000000000..326c2903a --- /dev/null +++ b/parser/testdata/02898_parallel_replicas_progress_bar/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001074418, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/02898_parallel_replicas_progress_bar/metadata.json b/parser/testdata/02898_parallel_replicas_progress_bar/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02898_parallel_replicas_progress_bar/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02898_parallel_replicas_progress_bar/query.sql b/parser/testdata/02898_parallel_replicas_progress_bar/query.sql new file mode 100644 index 000000000..63566d84f --- /dev/null +++ b/parser/testdata/02898_parallel_replicas_progress_bar/query.sql @@ -0,0 +1,43 @@ +DROP TABLE IF EXISTS t1 SYNC; +DROP TABLE IF EXISTS t2 SYNC; +DROP TABLE IF EXISTS t3 SYNC; + +CREATE TABLE t1(k UInt32, v String) ENGINE ReplicatedMergeTree('/02898_parallel_replicas/{database}/test_tbl', 'r1') ORDER BY k; +CREATE TABLE t2(k UInt32, v String) ENGINE ReplicatedMergeTree('/02898_parallel_replicas/{database}/test_tbl', 'r2') ORDER BY k; +CREATE TABLE t3(k UInt32, v String) ENGINE ReplicatedMergeTree('/02898_parallel_replicas/{database}/test_tbl', 'r3') ORDER BY k; + +insert into t1 select number, toString(number) from numbers(1000, 1000); +insert into t2 select number, toString(number) from numbers(2000, 1000); +insert into t3 select number, toString(number) from numbers(3000, 1000); + +system sync replica t1; +system sync replica t2; +system sync replica t3; + +SET enable_parallel_replicas=1, max_parallel_replicas=3, cluster_for_parallel_replicas='test_cluster_one_shard_three_replicas_localhost'; +SET parallel_replicas_local_plan=0; -- corresponding logs about total rows are written only during interaction with remote nodes + -- but with local plan a query execution can be finished locally even before we get response from remote node +SET parallel_replicas_only_with_analyzer = 0; -- necessary for CI run with disabled analyzer + +-- default coordinator +SELECT count(), min(k), max(k), avg(k) FROM t1 SETTINGS log_comment='02898_default_190aed82-2423-413b-ad4c-24dcca50f65b'; + +-- check logs +SYSTEM FLUSH LOGS text_log, query_log; +SET max_rows_to_read = 0; -- system.text_log can be really big +SELECT count() > 0 FROM system.text_log +WHERE query_id in (select query_id from system.query_log where current_database = currentDatabase() AND log_comment='02898_default_190aed82-2423-413b-ad4c-24dcca50f65b' and event_date >= yesterday()) + AND message LIKE '%Total rows to read: 3000%' AND event_date >= yesterday(); + +-- reading in order coordinator +-- disable parallel_replicas_local_plan since the test relay on traces which only present in case of no local plan +SELECT k, sipHash64(v) FROM t1 order by k limit 5 offset 998 SETTINGS optimize_read_in_order=1, log_comment='02898_inorder_190aed82-2423-413b-ad4c-24dcca50f65b'; + +SYSTEM FLUSH LOGS text_log, query_log; +SELECT count() > 0 FROM system.text_log +WHERE query_id in (select query_id from system.query_log where current_database = currentDatabase() AND log_comment='02898_inorder_190aed82-2423-413b-ad4c-24dcca50f65b' and event_date >= yesterday()) + AND message LIKE '%Updated total rows to read: added % rows, total 3000 rows%' AND event_date >= yesterday(); + +DROP TABLE t1 SYNC; +DROP TABLE t2 SYNC; +DROP TABLE t3 SYNC; diff --git a/parser/testdata/02899_indexing_by_space_filling_curves/ast.json b/parser/testdata/02899_indexing_by_space_filling_curves/ast.json new file mode 100644 index 000000000..a04a8fd15 --- /dev/null +++ b/parser/testdata/02899_indexing_by_space_filling_curves/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001101643, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02899_indexing_by_space_filling_curves/metadata.json b/parser/testdata/02899_indexing_by_space_filling_curves/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02899_indexing_by_space_filling_curves/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02899_indexing_by_space_filling_curves/query.sql b/parser/testdata/02899_indexing_by_space_filling_curves/query.sql new file mode 100644 index 000000000..dc72882c3 --- /dev/null +++ b/parser/testdata/02899_indexing_by_space_filling_curves/query.sql @@ -0,0 +1,40 @@ +SET merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.0; + +-- Prevent remote replicas from skipping index analysis in Parallel Replicas. Otherwise, they may return full ranges and trigger max_rows_to_read validation failures. +SET parallel_replicas_index_analysis_only_on_coordinator = 0; + +DROP TABLE IF EXISTS test; + +CREATE TABLE test (x UInt32, y UInt32) ENGINE = MergeTree ORDER BY mortonEncode(x, y) SETTINGS index_granularity = 8192, index_granularity_bytes = '1Mi'; +INSERT INTO test SELECT number DIV 1024, number % 1024 FROM numbers(1048576); + +SET max_rows_to_read = 8192, force_primary_key = 1, analyze_index_with_space_filling_curves = 1; +SELECT count() FROM test WHERE x >= 10 AND x <= 20 AND y >= 20 AND y <= 30; + +SET max_rows_to_read = 8192, force_primary_key = 1, analyze_index_with_space_filling_curves = 0; +SELECT count() FROM test WHERE x >= 10 AND x <= 20 AND y >= 20 AND y <= 30; -- { serverError INDEX_NOT_USED } + +DROP TABLE test; + +-- The same, but with more precise index + +CREATE TABLE test (x UInt32, y UInt32) ENGINE = MergeTree ORDER BY mortonEncode(x, y) SETTINGS index_granularity = 1; +SET max_rows_to_read = 0; +INSERT INTO test SELECT number DIV 32, number % 32 FROM numbers(1024); + +SET max_rows_to_read = 200, force_primary_key = 1, analyze_index_with_space_filling_curves = 1; +SELECT count() FROM test WHERE x >= 10 AND x <= 20 AND y >= 20 AND y <= 30; + +-- Various other conditions + +SELECT count() FROM test WHERE x = 10 SETTINGS max_rows_to_read = 64; +SELECT count() FROM test WHERE x = 10 AND y > 10 SETTINGS max_rows_to_read = 42; +SELECT count() FROM test WHERE x = 10 AND y < 10 SETTINGS max_rows_to_read = 20; + +SELECT count() FROM test WHERE y = 10 SETTINGS max_rows_to_read = 48; +SELECT count() FROM test WHERE x >= 10 AND y = 10 SETTINGS max_rows_to_read = 33; +SELECT count() FROM test WHERE y = 10 AND x <= 10 SETTINGS max_rows_to_read = 17; + +SELECT count() FROM test PREWHERE x >= 10 WHERE x < 11 AND y = 10 SETTINGS max_rows_to_read = 3; + +DROP TABLE test; diff --git a/parser/testdata/02900_add_subtract_interval_with_string_date/ast.json b/parser/testdata/02900_add_subtract_interval_with_string_date/ast.json new file mode 100644 index 000000000..745179967 --- /dev/null +++ b/parser/testdata/02900_add_subtract_interval_with_string_date/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '-- const date, const delta'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.00133793, + "rows_read": 5, + "bytes_read": 197 + } +} diff --git a/parser/testdata/02900_add_subtract_interval_with_string_date/metadata.json b/parser/testdata/02900_add_subtract_interval_with_string_date/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02900_add_subtract_interval_with_string_date/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02900_add_subtract_interval_with_string_date/query.sql b/parser/testdata/02900_add_subtract_interval_with_string_date/query.sql new file mode 100644 index 000000000..bc4410dab --- /dev/null +++ b/parser/testdata/02900_add_subtract_interval_with_string_date/query.sql @@ -0,0 +1,139 @@ +SELECT '-- const date, const delta'; + +SELECT ' -- add'; +SELECT addYears('2023-10-22', 1), addYears('2023-10-22 12:34:56.123', 1); +SELECT addQuarters('2023-10-22', 1), addQuarters('2023-10-22 12:34:56.123', 1); +SELECT addMonths('2023-10-22', 1), addMonths('2023-10-22 12:34:56.123', 1); +SELECT addWeeks('2023-10-22', 1), addWeeks('2023-10-22 12:34:56.123', 1); +SELECT addDays('2023-10-22', 1), addDays('2023-10-22 12:34:56.123', 1); +SELECT addHours('2023-10-22', 1), addHours('2023-10-22 12:34:56.123', 1); +SELECT addMinutes('2023-10-22', 1), addMinutes('2023-10-22 12:34:56.123', 1); +SELECT addSeconds('2023-10-22', 1), addSeconds('2023-10-22 12:34:56.123', 1); +SELECT addMilliseconds('2023-10-22', 1), addMilliseconds('2023-10-22 12:34:56.123', 1); +SELECT addMicroseconds('2023-10-22', 1), addMicroseconds('2023-10-22 12:34:56.123', 1); +SELECT addNanoseconds('2023-10-22', 1), addNanoseconds('2023-10-22 12:34:56.123', 1); + +SELECT ' -- subtract'; +SELECT subtractYears('2023-10-22', 1), subtractYears('2023-10-22 12:34:56.123', 1); +SELECT subtractQuarters('2023-10-22', 1), subtractQuarters('2023-10-22 12:34:56.123', 1); +SELECT subtractMonths('2023-10-22', 1), subtractMonths('2023-10-22 12:34:56.123', 1); +SELECT subtractWeeks('2023-10-22', 1), subtractWeeks('2023-10-22 12:34:56.123', 1); +SELECT subtractDays('2023-10-22', 1), subtractDays('2023-10-22 12:34:56.123', 1); +SELECT subtractHours('2023-10-22', 1), subtractHours('2023-10-22 12:34:56.123', 1); +SELECT subtractMinutes('2023-10-22', 1), subtractMinutes('2023-10-22 12:34:56.123', 1); +SELECT subtractSeconds('2023-10-22', 1), subtractSeconds('2023-10-22 12:34:56.123', 1); +SELECT subtractMilliseconds('2023-10-22', 1), subtractMilliseconds('2023-10-22 12:34:56.123', 1); +SELECT subtractMicroseconds('2023-10-22', 1), subtractMicroseconds('2023-10-22 12:34:56.123', 1); +SELECT subtractNanoseconds('2023-10-22', 1), subtractNanoseconds('2023-10-22 12:34:56.123', 1); + +SELECT '-- non-const date, const delta'; + +SELECT ' -- add'; +SELECT addYears(materialize('2023-10-22'), 1), addYears(materialize('2023-10-22 12:34:56.123'), 1); +SELECT addQuarters(materialize('2023-10-22'), 1), addQuarters(materialize('2023-10-22 12:34:56.123'), 1); +SELECT addMonths(materialize('2023-10-22'), 1), addMonths(materialize('2023-10-22 12:34:56.123'), 1); +SELECT addWeeks(materialize('2023-10-22'), 1), addWeeks(materialize('2023-10-22 12:34:56.123'), 1); +SELECT addDays(materialize('2023-10-22'), 1), addDays(materialize('2023-10-22 12:34:56.123'), 1); +SELECT addHours(materialize('2023-10-22'), 1), addHours(materialize('2023-10-22 12:34:56.123'), 1); +SELECT addMinutes(materialize('2023-10-22'), 1), addMinutes(materialize('2023-10-22 12:34:56.123'), 1); +SELECT addSeconds(materialize('2023-10-22'), 1), addSeconds(materialize('2023-10-22 12:34:56.123'), 1); +SELECT addMilliseconds(materialize('2023-10-22'), 1), addMilliseconds(materialize('2023-10-22 12:34:56.123'), 1); +SELECT addMicroseconds(materialize('2023-10-22'), 1), addMicroseconds(materialize('2023-10-22 12:34:56.123'), 1); +SELECT addNanoseconds(materialize('2023-10-22'), 1), addNanoseconds(materialize('2023-10-22 12:34:56.123'), 1); + +SELECT ' -- subtract'; +SELECT subtractYears(materialize('2023-10-22'), 1), subtractYears(materialize('2023-10-22 12:34:56.123'), 1); +SELECT subtractQuarters(materialize('2023-10-22'), 1), subtractQuarters(materialize('2023-10-22 12:34:56.123'), 1); +SELECT subtractMonths(materialize('2023-10-22'), 1), subtractMonths(materialize('2023-10-22 12:34:56.123'), 1); +SELECT subtractWeeks(materialize('2023-10-22'), 1), subtractWeeks(materialize('2023-10-22 12:34:56.123'), 1); +SELECT subtractDays(materialize('2023-10-22'), 1), subtractDays(materialize('2023-10-22 12:34:56.123'), 1); +SELECT subtractHours(materialize('2023-10-22'), 1), subtractHours(materialize('2023-10-22 12:34:56.123'), 1); +SELECT subtractMinutes(materialize('2023-10-22'), 1), subtractMinutes(materialize('2023-10-22 12:34:56.123'), 1); +SELECT subtractSeconds(materialize('2023-10-22'), 1), subtractSeconds(materialize('2023-10-22 12:34:56.123'), 1); +SELECT subtractMilliseconds(materialize('2023-10-22'), 1), subtractMilliseconds(materialize('2023-10-22 12:34:56.123'), 1); +SELECT subtractMicroseconds(materialize('2023-10-22'), 1), subtractMicroseconds(materialize('2023-10-22 12:34:56.123'), 1); +SELECT subtractNanoseconds(materialize('2023-10-22'), 1), subtractNanoseconds(materialize('2023-10-22 12:34:56.123'), 1); + +SELECT '-- const date, non-const delta'; + +SELECT ' -- add'; +SELECT addYears('2023-10-22', materialize(1)), addYears('2023-10-22 12:34:56.123', materialize(1)); +SELECT addQuarters('2023-10-22', materialize(1)), addQuarters('2023-10-22 12:34:56.123', materialize(1)); +SELECT addMonths('2023-10-22', materialize(1)), addMonths('2023-10-22 12:34:56.123', materialize(1)); +SELECT addWeeks('2023-10-22', materialize(1)), addWeeks('2023-10-22 12:34:56.123', materialize(1)); +SELECT addDays('2023-10-22', materialize(1)), addDays('2023-10-22 12:34:56.123', materialize(1)); +SELECT addHours('2023-10-22', materialize(1)), addHours('2023-10-22 12:34:56.123', materialize(1)); +SELECT addMinutes('2023-10-22', materialize(1)), addMinutes('2023-10-22 12:34:56.123', materialize(1)); +SELECT addSeconds('2023-10-22', materialize(1)), addSeconds('2023-10-22 12:34:56.123', materialize(1)); +SELECT addMilliseconds('2023-10-22', materialize(1)), addMilliseconds('2023-10-22 12:34:56.123', materialize(1)); +SELECT addMicroseconds('2023-10-22', materialize(1)), addMicroseconds('2023-10-22 12:34:56.123', materialize(1)); +SELECT addNanoseconds('2023-10-22', materialize(1)), addNanoseconds('2023-10-22 12:34:56.123', materialize(1)); + +SELECT ' -- subtract'; +SELECT subtractYears('2023-10-22', materialize(1)), subtractYears('2023-10-22 12:34:56.123', materialize(1)); +SELECT subtractQuarters('2023-10-22', materialize(1)), subtractQuarters('2023-10-22 12:34:56.123', materialize(1)); +SELECT subtractMonths('2023-10-22', materialize(1)), subtractMonths('2023-10-22 12:34:56.123', materialize(1)); +SELECT subtractWeeks('2023-10-22', materialize(1)), subtractWeeks('2023-10-22 12:34:56.123', materialize(1)); +SELECT subtractDays('2023-10-22', materialize(1)), subtractDays('2023-10-22 12:34:56.123', materialize(1)); +SELECT subtractHours('2023-10-22', materialize(1)), subtractHours('2023-10-22 12:34:56.123', materialize(1)); +SELECT subtractMinutes('2023-10-22', materialize(1)), subtractMinutes('2023-10-22 12:34:56.123', materialize(1)); +SELECT subtractSeconds('2023-10-22', materialize(1)), subtractSeconds('2023-10-22 12:34:56.123', materialize(1)); +SELECT subtractMilliseconds('2023-10-22', materialize(1)), subtractMilliseconds('2023-10-22 12:34:56.123', materialize(1)); +SELECT subtractMicroseconds('2023-10-22', materialize(1)), subtractMicroseconds('2023-10-22 12:34:56.123', materialize(1)); +SELECT subtractNanoseconds('2023-10-22', materialize(1)), subtractNanoseconds('2023-10-22 12:34:56.123', materialize(1)); + +SELECT '-- non-const date, non-const delta'; + +SELECT ' -- add'; +SELECT addYears(materialize('2023-10-22'), materialize(1)), addYears(materialize('2023-10-22 12:34:56.123'), materialize(1)); +SELECT addQuarters(materialize('2023-10-22'), materialize(1)), addQuarters(materialize('2023-10-22 12:34:56.123'), materialize(1)); +SELECT addMonths(materialize('2023-10-22'), materialize(1)), addMonths(materialize('2023-10-22 12:34:56.123'), materialize(1)); +SELECT addWeeks(materialize('2023-10-22'), materialize(1)), addWeeks(materialize('2023-10-22 12:34:56.123'), materialize(1)); +SELECT addDays(materialize('2023-10-22'), materialize(1)), addDays(materialize('2023-10-22 12:34:56.123'), materialize(1)); +SELECT addHours(materialize('2023-10-22'), materialize(1)), addHours(materialize('2023-10-22 12:34:56.123'), materialize(1)); +SELECT addMinutes(materialize('2023-10-22'), materialize(1)), addMinutes(materialize('2023-10-22 12:34:56.123'), materialize(1)); +SELECT addSeconds(materialize('2023-10-22'), materialize(1)), addSeconds(materialize('2023-10-22 12:34:56.123'), materialize(1)); +SELECT addMilliseconds(materialize('2023-10-22'), materialize(1)), addMilliseconds(materialize('2023-10-22 12:34:56.123'), materialize(1)); +SELECT addMicroseconds(materialize('2023-10-22'), materialize(1)), addMicroseconds(materialize('2023-10-22 12:34:56.123'), materialize(1)); +SELECT addNanoseconds(materialize('2023-10-22'), materialize(1)), addNanoseconds(materialize('2023-10-22 12:34:56.123'), materialize(1)); + +SELECT ' -- subtract'; +SELECT subtractYears(materialize('2023-10-22'), materialize(1)), subtractYears(materialize('2023-10-22 12:34:56.123'), materialize(1)); +SELECT subtractQuarters(materialize('2023-10-22'), materialize(1)), subtractQuarters(materialize('2023-10-22 12:34:56.123'), materialize(1)); +SELECT subtractMonths(materialize('2023-10-22'), materialize(1)), subtractMonths(materialize('2023-10-22 12:34:56.123'), materialize(1)); +SELECT subtractWeeks(materialize('2023-10-22'), materialize(1)), subtractWeeks(materialize('2023-10-22 12:34:56.123'), materialize(1)); +SELECT subtractDays(materialize('2023-10-22'), materialize(1)), subtractDays(materialize('2023-10-22 12:34:56.123'), materialize(1)); +SELECT subtractHours(materialize('2023-10-22'), materialize(1)), subtractHours(materialize('2023-10-22 12:34:56.123'), materialize(1)); +SELECT subtractMinutes(materialize('2023-10-22'), materialize(1)), subtractMinutes(materialize('2023-10-22 12:34:56.123'), materialize(1)); +SELECT subtractSeconds(materialize('2023-10-22'), materialize(1)), subtractSeconds(materialize('2023-10-22 12:34:56.123'), materialize(1)); +SELECT subtractMilliseconds(materialize('2023-10-22'), materialize(1)), subtractMilliseconds(materialize('2023-10-22 12:34:56.123'), materialize(1)); +SELECT subtractMicroseconds(materialize('2023-10-22'), materialize(1)), subtractMicroseconds(materialize('2023-10-22 12:34:56.123'), materialize(1)); +SELECT subtractNanoseconds(materialize('2023-10-22'), materialize(1)), subtractNanoseconds(materialize('2023-10-22 12:34:56.123'), 1); + +SELECT '-- plus operator'; + +SELECT '2023-10-23' + INTERVAL 1 YEAR, '2023-10-23 12:34:56.123' + INTERVAL 1 YEAR; +SELECT '2023-10-23' + INTERVAL 1 QUARTER, '2023-10-23 12:34:56.123' + INTERVAL 1 QUARTER; +SELECT '2023-10-23' + INTERVAL 1 MONTH,'2023-10-23 12:34:56.123' + INTERVAL 1 MONTH; +SELECT '2023-10-23' + INTERVAL 1 WEEK, '2023-10-23 12:34:56.123' + INTERVAL 1 WEEK; +SELECT '2023-10-23' + INTERVAL 1 DAY, '2023-10-23 12:34:56.123' + INTERVAL 1 DAY; +SELECT '2023-10-23' + INTERVAL 1 HOUR, '2023-10-23 12:34:56.123' + INTERVAL 1 HOUR; +SELECT '2023-10-23' + INTERVAL 1 MINUTE, '2023-10-23 12:34:56.123' + INTERVAL 1 MINUTE; +SELECT '2023-10-23' + INTERVAL 1 SECOND, '2023-10-23 12:34:56.123' + INTERVAL 1 SECOND; +SELECT '2023-10-23' + INTERVAL 1 MILLISECOND, '2023-10-23 12:34:56.123' + INTERVAL 1 MILLISECOND; +SELECT '2023-10-23' + INTERVAL 1 MICROSECOND, '2023-10-23 12:34:56.123' + INTERVAL 1 MICROSECOND; +SELECT '2023-10-23' + INTERVAL 1 NANOSECOND, '2023-10-23 12:34:56.123' + INTERVAL 1 NANOSECOND; + +SELECT '-- minus operator'; + +SELECT '2023-10-23' - INTERVAL 1 YEAR, '2023-10-23 12:34:56.123' - INTERVAL 1 YEAR; +SELECT '2023-10-23' - INTERVAL 1 QUARTER, '2023-10-23 12:34:56.123' - INTERVAL 1 QUARTER; +SELECT '2023-10-23' - INTERVAL 1 MONTH, '2023-10-23 12:34:56.123' - INTERVAL 1 MONTH; +SELECT '2023-10-23' - INTERVAL 1 WEEK, '2023-10-23 12:34:56.123' - INTERVAL 1 WEEK; +SELECT '2023-10-23' - INTERVAL 1 DAY, '2023-10-23 12:34:56.123' - INTERVAL 1 DAY; +SELECT '2023-10-23' - INTERVAL 1 HOUR, '2023-10-23 12:34:56.123' - INTERVAL 1 HOUR; +SELECT '2023-10-23' - INTERVAL 1 MINUTE, '2023-10-23 12:34:56.123' - INTERVAL 1 MINUTE; +SELECT '2023-10-23' - INTERVAL 1 SECOND, '2023-10-23 12:34:56.123' - INTERVAL 1 SECOND; +SELECT '2023-10-23' - INTERVAL 1 MILLISECOND, '2023-10-23 12:34:56.123' - INTERVAL 1 MILLISECOND; +SELECT '2023-10-23' - INTERVAL 1 MICROSECOND, '2023-10-23 12:34:56.123' - INTERVAL 1 MICROSECOND; +SELECT '2023-10-23' - INTERVAL 1 NANOSECOND, '2023-10-23 12:34:56.123' - INTERVAL 1 NANOSECOND; diff --git a/parser/testdata/02900_date_time_check_overflow/ast.json b/parser/testdata/02900_date_time_check_overflow/ast.json new file mode 100644 index 000000000..a7166d686 --- /dev/null +++ b/parser/testdata/02900_date_time_check_overflow/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00154248, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02900_date_time_check_overflow/metadata.json b/parser/testdata/02900_date_time_check_overflow/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02900_date_time_check_overflow/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02900_date_time_check_overflow/query.sql b/parser/testdata/02900_date_time_check_overflow/query.sql new file mode 100644 index 000000000..a814c4883 --- /dev/null +++ b/parser/testdata/02900_date_time_check_overflow/query.sql @@ -0,0 +1,73 @@ +SET session_timezone = 'UTC'; + +SELECT 'ignore'; +SET date_time_overflow_behavior = 'ignore'; +SELECT toDateTime(toDateTime64('1900-01-01 00:00:00.123', 3)); +SELECT toDateTime(toDateTime64('2299-12-31 23:59:59.999', 3)); + +SELECT toDateTime(toDate32('1900-01-01')); +SELECT toDateTime(toDate32('2299-12-31')); + +SELECT toDateTime(toDate('2149-06-06')); + +SELECT toDate(toDateTime64('1900-01-01 00:00:00.123', 3)); +SELECT toDate(toDateTime64('2149-06-07 00:00:00.123', 3)); +SELECT toDate(toDateTime64('2299-12-31 23:59:59.999', 3)); + +SELECT toDate(toDate32('1900-01-01')); +SELECT toDate(toDate32('2299-12-31')); + + +SELECT 'No output on `throw`'; +SET date_time_overflow_behavior = 'throw'; +SELECT toDateTime(toDateTime64('1900-01-01 00:00:00.123', 3)); -- { serverError VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE } +SELECT toDateTime(toDateTime64('2299-12-31 23:59:59.999', 3)); -- { serverError VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE } +SELECT toDateTime(toDate32('1900-01-01')); -- { serverError VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE } +SELECT toDateTime(toDate32('2299-12-31')); -- { serverError VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE } +SELECT toDateTime(toDate('2149-06-06')); -- { serverError VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE } +SELECT toDate(toDateTime64('1900-01-01 00:00:00.123', 3)); -- { serverError VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE } +SELECT toDate(toDateTime64('2299-12-31 23:59:59.999', 3)); -- { serverError VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE } +SELECT toDate(toDate32('1900-01-01')); -- { serverError VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE } +SELECT toDate(toDate32('2299-12-31')); -- { serverError VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE } + + +SELECT 'saturate'; +SET date_time_overflow_behavior = 'saturate'; + +SELECT toDateTime(toDateTime64('1900-01-01 00:00:00.123', 3)); +SELECT toDateTime(toDateTime64('2299-12-31 23:59:59.999', 3)); + +SELECT toDateTime(toDate32('1900-01-01')); +SELECT toDateTime(toDate32('2299-12-31')); + +SELECT toDateTime(toDate('2149-06-06')); + +SELECT toDate(toDateTime64('1900-01-01 00:00:00.123', 3)); +SELECT toDate(toDateTime64('2149-06-07 00:00:00.123', 3)); +SELECT toDate(toDateTime64('2299-12-31 23:59:59.999', 3)); + +SELECT toDate(toDate32('1900-01-01')); +SELECT toDate(toDate32('2299-12-31')); + + +-- Test DateTime64 to Date conversion with different timezones +SELECT 'DateTime64 to Date with timezones'; +SET date_time_overflow_behavior = 'saturate'; + +-- Test with UTC +SELECT toDate(toDateTime64('2245-12-31 23:59:59', 0, 'UTC')); +SELECT toDate(toDateTime64('1900-01-01 00:00:00', 0, 'UTC')); + +-- Test with Europe/Berlin (UTC+1/+2) +SELECT toDate(toDateTime64('2245-12-31 23:59:59', 0, 'Europe/Berlin')); +SELECT toDate(toDateTime64('1900-01-01 00:00:00', 0, 'Europe/Berlin')); + +-- Test with America/New_York (UTC-5/-4) +SELECT toDate(toDateTime64('2245-12-31 23:59:59', 0, 'America/New_York')); +SELECT toDate(toDateTime64('1900-01-01 00:00:00', 0, 'America/New_York')); + +-- Test edge cases around max date with timezone +SELECT toDate(toDateTime64('2149-06-06 23:59:59', 0, 'UTC')); +SELECT toDate(toDateTime64('2149-06-07 00:00:00', 0, 'UTC')); +SELECT toDate(toDateTime64('2149-06-06 23:59:59', 0, 'Europe/Berlin')); +SELECT toDate(toDateTime64('2149-06-07 00:00:00', 0, 'Europe/Berlin')); diff --git a/parser/testdata/02900_decimal_sort_with_multiple_columns/ast.json b/parser/testdata/02900_decimal_sort_with_multiple_columns/ast.json new file mode 100644 index 000000000..9f5ad55cb --- /dev/null +++ b/parser/testdata/02900_decimal_sort_with_multiple_columns/ast.json @@ -0,0 +1,106 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function modulo (alias i) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function negate (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function toDecimal32 (alias j) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_20" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_600" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier i" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier j" + } + ], + + "rows": 28, + + "statistics": + { + "elapsed": 0.001193803, + "rows_read": 28, + "bytes_read": 1081 + } +} diff --git a/parser/testdata/02900_decimal_sort_with_multiple_columns/metadata.json b/parser/testdata/02900_decimal_sort_with_multiple_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02900_decimal_sort_with_multiple_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02900_decimal_sort_with_multiple_columns/query.sql b/parser/testdata/02900_decimal_sort_with_multiple_columns/query.sql new file mode 100644 index 000000000..bc74add25 --- /dev/null +++ b/parser/testdata/02900_decimal_sort_with_multiple_columns/query.sql @@ -0,0 +1 @@ +select -number % 2 as i, toDecimal32(number % 20, 3) as j from numbers(600) order by i, j; diff --git a/parser/testdata/02900_issue_55858/ast.json b/parser/testdata/02900_issue_55858/ast.json new file mode 100644 index 000000000..c0563cf82 --- /dev/null +++ b/parser/testdata/02900_issue_55858/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001224234, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02900_issue_55858/metadata.json b/parser/testdata/02900_issue_55858/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02900_issue_55858/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02900_issue_55858/query.sql b/parser/testdata/02900_issue_55858/query.sql new file mode 100644 index 000000000..af01442ce --- /dev/null +++ b/parser/testdata/02900_issue_55858/query.sql @@ -0,0 +1,10 @@ +set precise_float_parsing = 1; + +select cast('2023-01-01' as Float64); -- { serverError CANNOT_PARSE_TEXT } +select cast('2023-01-01' as Float32); -- { serverError CANNOT_PARSE_TEXT } +select toFloat32('2023-01-01'); -- { serverError CANNOT_PARSE_TEXT } +select toFloat64('2023-01-01'); -- { serverError CANNOT_PARSE_TEXT } +select toFloat32OrZero('2023-01-01'); +select toFloat64OrZero('2023-01-01'); +select toFloat32OrNull('2023-01-01'); +select toFloat64OrNull('2023-01-01'); diff --git a/parser/testdata/02900_window_function_with_sparse_column/ast.json b/parser/testdata/02900_window_function_with_sparse_column/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02900_window_function_with_sparse_column/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02900_window_function_with_sparse_column/metadata.json b/parser/testdata/02900_window_function_with_sparse_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02900_window_function_with_sparse_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02900_window_function_with_sparse_column/query.sql b/parser/testdata/02900_window_function_with_sparse_column/query.sql new file mode 100644 index 000000000..6919e23ad --- /dev/null +++ b/parser/testdata/02900_window_function_with_sparse_column/query.sql @@ -0,0 +1,45 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/55843 +-- These tests pass without the fix when either of +-- - optimize_read_in_window_order = 0 and optimize_read_in_order = 0 +-- - ratio_of_defaults_for_sparse_serialization = 1 +-- However it is better to leave the settings as randomized because we run +-- stateless tests quite a few times during a PR, so if a bug is introduced +-- then there is a big chance of catching it. Furthermore, randomized settings +-- might identify new bugs. + +CREATE TABLE test1 +( + id String, + time DateTime64(9), + key Int64, + value Bool, +) +ENGINE = MergeTree +PARTITION BY toYYYYMM(time) +ORDER BY (key, id, time); + +INSERT INTO test1 VALUES ('id0', now(), 3, false); + +SELECT last_value(value) OVER (PARTITION BY id ORDER BY time ASC) as last_value +FROM test1 +WHERE (key = 3); + +SELECT last_value(value) OVER (ORDER BY time ASC) as last_value +FROM test1 +WHERE (key = 3); + +SELECT last_value(value) OVER (PARTITION BY id ORDER BY time ASC) as last_value +FROM test1; + + + +CREATE TABLE test2 +( + time DateTime, + value String +) +ENGINE = MergeTree +ORDER BY (time) AS SELECT 0, ''; + +SELECT any(value) OVER (ORDER BY time ASC) FROM test2; +SELECT last_value(value) OVER (ORDER BY time ASC) FROM test2; diff --git a/parser/testdata/02901_analyzer_recursive_window/ast.json b/parser/testdata/02901_analyzer_recursive_window/ast.json new file mode 100644 index 000000000..8c114e649 --- /dev/null +++ b/parser/testdata/02901_analyzer_recursive_window/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " WindowListElement" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001206496, + "rows_read": 7, + "bytes_read": 244 + } +} diff --git a/parser/testdata/02901_analyzer_recursive_window/metadata.json b/parser/testdata/02901_analyzer_recursive_window/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02901_analyzer_recursive_window/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02901_analyzer_recursive_window/query.sql b/parser/testdata/02901_analyzer_recursive_window/query.sql new file mode 100644 index 000000000..49feb897b --- /dev/null +++ b/parser/testdata/02901_analyzer_recursive_window/query.sql @@ -0,0 +1,4 @@ +SELECT 1 WINDOW x AS (PARTITION BY x); -- { serverError UNKNOWN_IDENTIFIER } +SELECT 1 WINDOW x AS (PARTITION BY dummy); +SELECT 1 WINDOW dummy AS (PARTITION BY dummy); +SELECT count() OVER dummy WINDOW dummy AS (PARTITION BY dummy); diff --git a/parser/testdata/02901_predicate_pushdown_cte_stateful/ast.json b/parser/testdata/02901_predicate_pushdown_cte_stateful/ast.json new file mode 100644 index 000000000..eb34309ea --- /dev/null +++ b/parser/testdata/02901_predicate_pushdown_cte_stateful/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000966727, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02901_predicate_pushdown_cte_stateful/metadata.json b/parser/testdata/02901_predicate_pushdown_cte_stateful/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02901_predicate_pushdown_cte_stateful/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02901_predicate_pushdown_cte_stateful/query.sql b/parser/testdata/02901_predicate_pushdown_cte_stateful/query.sql new file mode 100644 index 000000000..d65b0da42 --- /dev/null +++ b/parser/testdata/02901_predicate_pushdown_cte_stateful/query.sql @@ -0,0 +1,22 @@ +SET allow_deprecated_error_prone_window_functions = 1; + +CREATE TABLE t +( + `rDate` String, + `cpu_total` Int64 +) +ENGINE = Log; + +insert into t values ('2022-03-06', 22442 ), ('2022-03-05', 22382 ), ('2022-03-04', 22395 ), ('2022-03-03', 22306 ), ('2022-03-02', 22095 ), ('2022-03-01', 22065 ), ('2022-02-28', 21949 ), ('2022-02-27', 21884 ), ('2022-02-26', 21875 ), ('2022-02-25', 21858 ), ('2022-02-24', 21775 ), ('2022-02-23', 21639 ), ('2022-02-22', 21557 ), ('2022-02-21', 21381 ), ('2022-02-20', 21794 ), ('2022-02-19', 21808 ), ('2022-02-18', 21695 ), ('2022-02-17', 20874 ), ('2022-02-16', 20911 ), ('2022-02-15', 20898 ), ('2022-02-14', 20768 ), ('2022-02-13', 20588 ), ('2022-02-12', 20516 ), ('2022-02-11', 20501 ), ('2022-02-10', 20429 ), ('2022-02-09', 20208 ), ('2022-02-08', 20186 ), ('2022-02-07', 20192 ), ('2022-02-06', 20192 ), ('2022-02-05', 20175 ), ('2022-02-04', 20191 ), ('2022-02-03', 20214 ), ('2022-02-02', 20215 ), ('2022-02-01', 20220 ), ('2022-01-31', 20146 ), ('2022-01-30', 20137 ), ('2022-01-29', 20162 ), ('2022-01-28', 20164 ), ('2022-01-27', 20128 ), ('2022-01-26', 20139 ), ('2022-01-25', 20000 ), ('2022-01-24', 19778 ), ('2022-01-23', 19789 ), ('2022-01-22', 19628 ), ('2022-01-21', 19631 ), ('2022-01-20', 19386 ), ('2022-01-19', 19439 ), ('2022-01-18', 19477 ), ('2022-01-17', 19386 ), ('2022-01-16', 20013 ), ('2022-01-15', 19359 ), ('2022-01-14', 19356 ), ('2022-01-13', 19300 ), ('2022-01-12', 19237 ), ('2022-01-11', 19159 ), ('2022-01-10', 18970 ), ('2022-01-09', 18804 ), ('2022-01-08', 18816 ), ('2022-01-07', 18808 ), ('2022-01-06', 18693 ), ('2022-01-05', 18639 ), ('2022-01-04', 18579 ), ('2022-01-03', 18450 ), ('2022-01-02', 18458 ), ('2022-01-01', 18445 ), ('2021-12-31', 18443 ), ('2021-12-30', 18388 ), ('2021-12-29', 18348 ), ('2021-12-28', 18042 ), ('2021-12-26', 18049 ), ('2021-12-22', 17962 ); + +SELECT cpu_total_week +FROM + ( + WITH neighbor(cpu_total, 7) AS cpu_total_7 + SELECT + rDate, + floor(multiIf(cpu_total_7 = 0, 0, cpu_total - cpu_total_7), 2) AS cpu_total_week + FROM t + ) AS t_table_471873 +WHERE (rDate >= '2022-03-06') AND (rDate <= '2022-03-06') +SETTINGS enable_optimize_predicate_expression = 1; diff --git a/parser/testdata/02901_remove_nullable_crash_analyzer/ast.json b/parser/testdata/02901_remove_nullable_crash_analyzer/ast.json new file mode 100644 index 000000000..a9d5506b7 --- /dev/null +++ b/parser/testdata/02901_remove_nullable_crash_analyzer/ast.json @@ -0,0 +1,73 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function multiIf (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function plus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function isNotNull (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Asterisk" + }, + { + "explain": " Literal NULL" + } + ], + + "rows": 17, + + "statistics": + { + "elapsed": 0.001325814, + "rows_read": 17, + "bytes_read": 647 + } +} diff --git a/parser/testdata/02901_remove_nullable_crash_analyzer/metadata.json b/parser/testdata/02901_remove_nullable_crash_analyzer/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02901_remove_nullable_crash_analyzer/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02901_remove_nullable_crash_analyzer/query.sql b/parser/testdata/02901_remove_nullable_crash_analyzer/query.sql new file mode 100644 index 000000000..96f88a23a --- /dev/null +++ b/parser/testdata/02901_remove_nullable_crash_analyzer/query.sql @@ -0,0 +1,6 @@ +SELECT 1 % ( CASE WHEN 1 THEN (1 IS NOT NULL + *) ELSE NULL END ); +SELECT CASE 1 WHEN FALSE THEN 1 ELSE CASE WHEN 1 THEN 1 - (CASE 1 WHEN 1 THEN 1 ELSE 1 END) END % 1 END; + +SELECT 1 % if(1, dummy, NULL); -- { serverError ILLEGAL_DIVISION } +SELECT sum(multiIf(1, dummy, NULL)); +SELECT sum(multiIf(1, dummy, NULL)) OVER (); diff --git a/parser/testdata/02902_add_scalar_in_all_case/ast.json b/parser/testdata/02902_add_scalar_in_all_case/ast.json new file mode 100644 index 000000000..e74792f66 --- /dev/null +++ b/parser/testdata/02902_add_scalar_in_all_case/ast.json @@ -0,0 +1,121 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function count (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function format (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier TSVRaw" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function arrayStringConcat (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function groupArray (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'some long string'" + }, + { + "explain": " Literal '\\n'" + }, + { + "explain": " Literal 'LowCardinality(String)'" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10000" + }, + { + "explain": " Identifier TSVRaw" + } + ], + + "rows": 33, + + "statistics": + { + "elapsed": 0.001437613, + "rows_read": 33, + "bytes_read": 1489 + } +} diff --git a/parser/testdata/02902_add_scalar_in_all_case/metadata.json b/parser/testdata/02902_add_scalar_in_all_case/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02902_add_scalar_in_all_case/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02902_add_scalar_in_all_case/query.sql b/parser/testdata/02902_add_scalar_in_all_case/query.sql new file mode 100644 index 000000000..047d2f4cd --- /dev/null +++ b/parser/testdata/02902_add_scalar_in_all_case/query.sql @@ -0,0 +1 @@ +SELECT count() FROM format(TSVRaw, (SELECT cast(arrayStringConcat(groupArray('some long string'), '\n'), 'LowCardinality(String)') FROM numbers(10000))) FORMAT TSVRaw; diff --git a/parser/testdata/02902_diable_apply_deleted_mask/ast.json b/parser/testdata/02902_diable_apply_deleted_mask/ast.json new file mode 100644 index 000000000..74d55a4c7 --- /dev/null +++ b/parser/testdata/02902_diable_apply_deleted_mask/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_apply_deleted_mask (children 1)" + }, + { + "explain": " Identifier test_apply_deleted_mask" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001164704, + "rows_read": 2, + "bytes_read": 98 + } +} diff --git a/parser/testdata/02902_diable_apply_deleted_mask/metadata.json b/parser/testdata/02902_diable_apply_deleted_mask/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02902_diable_apply_deleted_mask/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02902_diable_apply_deleted_mask/query.sql b/parser/testdata/02902_diable_apply_deleted_mask/query.sql new file mode 100644 index 000000000..81d5714f0 --- /dev/null +++ b/parser/testdata/02902_diable_apply_deleted_mask/query.sql @@ -0,0 +1,25 @@ +DROP TABLE IF EXISTS test_apply_deleted_mask; + +CREATE TABLE test_apply_deleted_mask(id Int64, value String) ENGINE = MergeTree ORDER BY id; + +INSERT INTO test_apply_deleted_mask SELECT number, number::String FROM numbers(5); + +DELETE FROM test_apply_deleted_mask WHERE id % 2 = 0; + +SELECT 'Normal SELECT does not see deleted rows'; +SELECT *, _row_exists FROM test_apply_deleted_mask; + +SELECT 'With the setting disabled the deleted rows are visible'; +SELECT *, _row_exists FROM test_apply_deleted_mask SETTINGS apply_deleted_mask = 0; + +SELECT 'With the setting disabled the deleted rows are visible but still can be filterd out'; +SELECT * FROM test_apply_deleted_mask WHERE _row_exists SETTINGS apply_deleted_mask = 0; + +INSERT INTO test_apply_deleted_mask SELECT number, number::String FROM numbers(5, 1); + +OPTIMIZE TABLE test_apply_deleted_mask FINAL SETTINGS mutations_sync=2; + +SELECT 'Read the data after OPTIMIZE, all deleted rwos should be physically removed now'; +SELECT *, _row_exists FROM test_apply_deleted_mask SETTINGS apply_deleted_mask = 0; + +DROP TABLE test_apply_deleted_mask; \ No newline at end of file diff --git a/parser/testdata/02902_json_skip_null_values/ast.json b/parser/testdata/02902_json_skip_null_values/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02902_json_skip_null_values/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02902_json_skip_null_values/metadata.json b/parser/testdata/02902_json_skip_null_values/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02902_json_skip_null_values/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02902_json_skip_null_values/query.sql b/parser/testdata/02902_json_skip_null_values/query.sql new file mode 100644 index 000000000..24b162216 --- /dev/null +++ b/parser/testdata/02902_json_skip_null_values/query.sql @@ -0,0 +1,39 @@ +-- Tags: no-fasttest + +create table test_02902 engine File(JSONEachRow) + settings output_format_json_named_tuples_as_objects = 1, output_format_json_skip_null_value_in_named_tuples = 1 + as select cast((number::String, null, (number::String, null)), 'Tuple(a Nullable(String), b Nullable(Int64), c Tuple(x Nullable(String), y Nullable(Float64)))') as c + from numbers(3); + +select * from test_02902 format JSONEachRow settings output_format_json_named_tuples_as_objects = 1, output_format_json_skip_null_value_in_named_tuples = 1; +select * from test_02902 format JSONEachRow settings output_format_json_named_tuples_as_objects = 1, output_format_json_skip_null_value_in_named_tuples = 0; + +drop table test_02902; + +select + toJSONString(c) +from + ( + select + cast( + (number:: String, null, (number:: String, null)), + 'Tuple(a Nullable(String), b Nullable(Int64), c Tuple(x Nullable(String), y Nullable(Float64)))' + ) as c + from + numbers(3) + ) +settings output_format_json_named_tuples_as_objects = 1, output_format_json_skip_null_value_in_named_tuples = 0; + +select + toJSONString(c) +from + ( + select + cast( + (number:: String, null, (number:: String, null)), + 'Tuple(a Nullable(String), b Nullable(Int64), c Tuple(x Nullable(String), y Nullable(Float64)))' + ) as c + from + numbers(3) + ) +settings output_format_json_named_tuples_as_objects = 1, output_format_json_skip_null_value_in_named_tuples = 1; diff --git a/parser/testdata/02902_select_subcolumns_from_engine_null/ast.json b/parser/testdata/02902_select_subcolumns_from_engine_null/ast.json new file mode 100644 index 000000000..84daa8141 --- /dev/null +++ b/parser/testdata/02902_select_subcolumns_from_engine_null/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery null_02902 (children 3)" + }, + { + "explain": " Identifier null_02902" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration t (children 1)" + }, + { + "explain": " DataType Tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " NameTypePair num (children 1)" + }, + { + "explain": " DataType Int64" + }, + { + "explain": " NameTypePair str (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function Null" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001138626, + "rows_read": 13, + "bytes_read": 480 + } +} diff --git a/parser/testdata/02902_select_subcolumns_from_engine_null/metadata.json b/parser/testdata/02902_select_subcolumns_from_engine_null/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02902_select_subcolumns_from_engine_null/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02902_select_subcolumns_from_engine_null/query.sql b/parser/testdata/02902_select_subcolumns_from_engine_null/query.sql new file mode 100644 index 000000000..4cd6e68fd --- /dev/null +++ b/parser/testdata/02902_select_subcolumns_from_engine_null/query.sql @@ -0,0 +1,6 @@ +CREATE TABLE null_02902 (t Tuple(num Int64, str String)) ENGINE = Null; +SELECT t FROM null_02902; +SELECT tupleElement(t, 'num') FROM null_02902; +SELECT t.num, t.str FROM null_02902; + +DROP TABLE null_02902; diff --git a/parser/testdata/02902_show_databases_limit/ast.json b/parser/testdata/02902_show_databases_limit/ast.json new file mode 100644 index 000000000..7cdd3b775 --- /dev/null +++ b/parser/testdata/02902_show_databases_limit/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "ShowTables" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001205574, + "rows_read": 1, + "bytes_read": 18 + } +} diff --git a/parser/testdata/02902_show_databases_limit/metadata.json b/parser/testdata/02902_show_databases_limit/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02902_show_databases_limit/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02902_show_databases_limit/query.sql b/parser/testdata/02902_show_databases_limit/query.sql new file mode 100644 index 000000000..e13ae5a7e --- /dev/null +++ b/parser/testdata/02902_show_databases_limit/query.sql @@ -0,0 +1 @@ +SHOW DATABASES LIMIT 0; diff --git a/parser/testdata/02902_topKGeneric_deserialization_memory/ast.json b/parser/testdata/02902_topKGeneric_deserialization_memory/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02902_topKGeneric_deserialization_memory/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02902_topKGeneric_deserialization_memory/metadata.json b/parser/testdata/02902_topKGeneric_deserialization_memory/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02902_topKGeneric_deserialization_memory/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02902_topKGeneric_deserialization_memory/query.sql b/parser/testdata/02902_topKGeneric_deserialization_memory/query.sql new file mode 100644 index 000000000..3228810e0 --- /dev/null +++ b/parser/testdata/02902_topKGeneric_deserialization_memory/query.sql @@ -0,0 +1,9 @@ +-- Tags: no-fasttest + +-- https://github.com/ClickHouse/ClickHouse/issues/49706 +-- Using format Parquet for convenience so it errors out without output (but still deserializes the output) +-- Without the fix this would OOM the client when deserializing the state +SELECT + topKResampleState(1048576, 257, 65536, 10)(toString(number), number) +FROM numbers(3) +FORMAT Parquet; -- { clientError UNKNOWN_TYPE } diff --git a/parser/testdata/02903_bug_43644/ast.json b/parser/testdata/02903_bug_43644/ast.json new file mode 100644 index 000000000..c9a56a97e --- /dev/null +++ b/parser/testdata/02903_bug_43644/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tab (children 1)" + }, + { + "explain": " Identifier tab" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001096386, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/02903_bug_43644/metadata.json b/parser/testdata/02903_bug_43644/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02903_bug_43644/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02903_bug_43644/query.sql b/parser/testdata/02903_bug_43644/query.sql new file mode 100644 index 000000000..c86988f83 --- /dev/null +++ b/parser/testdata/02903_bug_43644/query.sql @@ -0,0 +1,22 @@ +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab +( + `machine_id` UInt64, + `name` String, + `timestamp` DateTime +) +ENGINE = MergeTree +PARTITION BY toYYYYMM(timestamp) +ORDER BY machine_id; + +insert into tab(machine_id, name, timestamp) +select 1, 'a_name', '2022-11-24 12:00:00'; + +SELECT + toStartOfInterval(timestamp, INTERVAL 300 SECOND) AS ts +FROM tab +WHERE ts > '2022-11-24 11:19:00' +GROUP BY ts; + +DROP TABLE tab; diff --git a/parser/testdata/02903_parameterized_view_explain_ast/ast.json b/parser/testdata/02903_parameterized_view_explain_ast/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02903_parameterized_view_explain_ast/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02903_parameterized_view_explain_ast/metadata.json b/parser/testdata/02903_parameterized_view_explain_ast/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02903_parameterized_view_explain_ast/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02903_parameterized_view_explain_ast/query.sql b/parser/testdata/02903_parameterized_view_explain_ast/query.sql new file mode 100644 index 000000000..6af6dab2f --- /dev/null +++ b/parser/testdata/02903_parameterized_view_explain_ast/query.sql @@ -0,0 +1,3 @@ +EXPLAIN AST +CREATE VIEW numbers_pv AS +SELECT * FROM numbers LIMIT {amount:UInt8}; \ No newline at end of file diff --git a/parser/testdata/02905_show_setting_query/ast.json b/parser/testdata/02905_show_setting_query/ast.json new file mode 100644 index 000000000..0c69a11fd --- /dev/null +++ b/parser/testdata/02905_show_setting_query/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001254057, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02905_show_setting_query/metadata.json b/parser/testdata/02905_show_setting_query/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02905_show_setting_query/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02905_show_setting_query/query.sql b/parser/testdata/02905_show_setting_query/query.sql new file mode 100644 index 000000000..bbbb1a7e2 --- /dev/null +++ b/parser/testdata/02905_show_setting_query/query.sql @@ -0,0 +1,7 @@ +SET max_threads = 1; +SHOW SETTING max_threads; + +SET max_threads = 2; +SHOW SETTING max_threads; + +SHOW SETTING `max_threads' OR name = 'max_memory_usage`; diff --git a/parser/testdata/02905_system_logs_hostname/ast.json b/parser/testdata/02905_system_logs_hostname/ast.json new file mode 100644 index 000000000..b35901c81 --- /dev/null +++ b/parser/testdata/02905_system_logs_hostname/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'test hostname in system log tables'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001322547, + "rows_read": 5, + "bytes_read": 205 + } +} diff --git a/parser/testdata/02905_system_logs_hostname/metadata.json b/parser/testdata/02905_system_logs_hostname/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02905_system_logs_hostname/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02905_system_logs_hostname/query.sql b/parser/testdata/02905_system_logs_hostname/query.sql new file mode 100644 index 000000000..3d0f5466c --- /dev/null +++ b/parser/testdata/02905_system_logs_hostname/query.sql @@ -0,0 +1,26 @@ +SELECT 'test hostname in system log tables'; + +set log_query_threads=1; +set log_queries_min_type='QUERY_FINISH'; +set log_queries=1; +select '02095_system_logs_hostname' from system.one format Null; +set log_queries=0; +set log_query_threads=0; + +system flush logs query_log, query_thread_log; + +select hostname +from system.query_log +where + query like 'select \'02095_system_logs_hostname%' + and current_database = currentDatabase() + and event_date >= yesterday() LIMIT 1 FORMAT Null; + + +select hostName(), hostname +from system.query_thread_log +where + query like 'select \'02095_system_logs_hostname%' + and current_database = currentDatabase() + and event_date >= yesterday() LIMIT 1 FORMAT Null; + diff --git a/parser/testdata/02906_flatten_only_true_nested/ast.json b/parser/testdata/02906_flatten_only_true_nested/ast.json new file mode 100644 index 000000000..b4eb4df82 --- /dev/null +++ b/parser/testdata/02906_flatten_only_true_nested/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001095833, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02906_flatten_only_true_nested/metadata.json b/parser/testdata/02906_flatten_only_true_nested/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02906_flatten_only_true_nested/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02906_flatten_only_true_nested/query.sql b/parser/testdata/02906_flatten_only_true_nested/query.sql new file mode 100644 index 000000000..e930b46bd --- /dev/null +++ b/parser/testdata/02906_flatten_only_true_nested/query.sql @@ -0,0 +1,9 @@ +set flatten_nested = 1; +drop table if exists test_nested; +create table test_nested (data Nested(x UInt32, y UInt32)) engine=Memory; +desc test_nested; +drop table test_nested; +drop table if exists test_array_tuple; +create table test_array_tuple (data Array(Tuple(x UInt64, y UInt64))) engine=Memory; +desc test_array_tuple; +drop table test_array_tuple; diff --git a/parser/testdata/02906_force_optimize_projection_name/ast.json b/parser/testdata/02906_force_optimize_projection_name/ast.json new file mode 100644 index 000000000..497bb064f --- /dev/null +++ b/parser/testdata/02906_force_optimize_projection_name/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001251179, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/02906_force_optimize_projection_name/metadata.json b/parser/testdata/02906_force_optimize_projection_name/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02906_force_optimize_projection_name/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02906_force_optimize_projection_name/query.sql b/parser/testdata/02906_force_optimize_projection_name/query.sql new file mode 100644 index 000000000..aa9c9a5e0 --- /dev/null +++ b/parser/testdata/02906_force_optimize_projection_name/query.sql @@ -0,0 +1,37 @@ +DROP TABLE IF EXISTS test; + +CREATE TABLE test +( + `id` UInt64, + `name` String, + PROJECTION projection_name + ( + SELECT sum(id) GROUP BY id, name + ) +) +ENGINE = MergeTree() +ORDER BY id +SETTINGS index_granularity_bytes = 10000; + +set parallel_replicas_local_plan = 1, parallel_replicas_support_projection = 1, optimize_aggregation_in_order = 0; + +INSERT INTO test SELECT number, 'test' FROM numbers(1, 100); + +SELECT name FROM test GROUP BY name SETTINGS force_optimize_projection_name='projection_name'; + +SELECT name FROM test GROUP BY name SETTINGS force_optimize_projection_name='non_existing_projection'; -- { serverError INCORRECT_DATA } + +SELECT name FROM test SETTINGS force_optimize_projection_name='projection_name'; -- { serverError INCORRECT_DATA } + +INSERT INTO test SELECT number, 'test' FROM numbers(1, 100) SETTINGS force_optimize_projection_name='projection_name'; +SELECT 1 SETTINGS force_optimize_projection_name='projection_name'; + +SYSTEM FLUSH LOGS query_log; + +SELECT read_rows FROM system.query_log +WHERE current_database = currentDatabase() + AND query LIKE '%SELECT name FROM test%' + AND Settings['force_optimize_projection_name'] = 'projection_name' + AND type = 'ExceptionBeforeStart'; + +DROP TABLE test; diff --git a/parser/testdata/02906_interval_comparison/ast.json b/parser/testdata/02906_interval_comparison/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02906_interval_comparison/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02906_interval_comparison/metadata.json b/parser/testdata/02906_interval_comparison/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02906_interval_comparison/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02906_interval_comparison/query.sql b/parser/testdata/02906_interval_comparison/query.sql new file mode 100644 index 000000000..feaf40326 --- /dev/null +++ b/parser/testdata/02906_interval_comparison/query.sql @@ -0,0 +1,7 @@ +-- Comparing the same types is ok: +SELECT INTERVAL 1 SECOND = INTERVAL 1 SECOND; +-- It is reasonable to not give an answer for this: +SELECT INTERVAL 30 DAY < INTERVAL 1 MONTH; -- { serverError NO_COMMON_TYPE } +-- This we could change in the future: +SELECT INTERVAL 1 SECOND = INTERVAL 1 YEAR; -- { serverError NO_COMMON_TYPE } +SELECT INTERVAL 1 SECOND <= INTERVAL 1 YEAR; -- { serverError NO_COMMON_TYPE } diff --git a/parser/testdata/02906_orc_tuple_field_prune/ast.json b/parser/testdata/02906_orc_tuple_field_prune/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02906_orc_tuple_field_prune/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02906_orc_tuple_field_prune/metadata.json b/parser/testdata/02906_orc_tuple_field_prune/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02906_orc_tuple_field_prune/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02906_orc_tuple_field_prune/query.sql b/parser/testdata/02906_orc_tuple_field_prune/query.sql new file mode 100644 index 000000000..5428abc40 --- /dev/null +++ b/parser/testdata/02906_orc_tuple_field_prune/query.sql @@ -0,0 +1,40 @@ +-- Tags: no-fasttest, no-parallel + +set engine_file_truncate_on_insert = 1; +set flatten_nested = 0; + +insert into function file('02906.orc', 'ORC') +select + number::Int64 as int64_column, + number::String as string_column, + number::Float64 as float64_column, + cast(if(number % 10 = 0, tuple(null, null, null), tuple(number::String, number::Float64, number::Int64)) as Tuple(a Nullable(String), b Nullable(Float64), c Nullable(Int64))) as tuple_column, + cast(if(number % 10 = 0, array(tuple(null, null, null)), array(tuple(number::String, number::Float64, number::Int64))) as Array(Tuple(a Nullable(String), b Nullable(Float64), c Nullable(Int64)))) as array_tuple_column, + cast(if(number % 10 = 0, map(number::String, tuple(null, null, null)), map(number::String, tuple(number::String, number::Float64, number::Int64))) as Map(String, Tuple(a Nullable(String), b Nullable(Float64), c Nullable(Int64)))) as map_tuple_column + from numbers(100); + +desc file('02906.orc'); + +-- { echoOn } +-- Test primitive types +select int64_column, string_column, float64_column from file('02906.orc') where int64_column % 15 = 0; + +-- Test tuple type with names +select tuple_column from file('02906.orc', 'ORC', 'int64_column Int64, tuple_column Tuple(a Nullable(String), b Nullable(Float64), c Nullable(Int64))') where int64_column % 15 = 0; +select tuple_column from file('02906.orc', 'ORC', 'int64_column Int64, tuple_column Tuple(c Nullable(Int64))') where int64_column % 15 = 0; +select tuple_column from file('02906.orc', 'ORC', 'int64_column Int64, tuple_column Tuple(c Nullable(Int64), d Nullable(String))') where int64_column % 15 = 0; + +-- Test tuple type without names +select tuple_column from file('02906.orc', 'ORC', 'int64_column Int64, tuple_column Tuple(Nullable(String), Nullable(Float64), Nullable(Int64))') where int64_column % 15 = 0; +select tuple_column from file('02906.orc', 'ORC', 'int64_column Int64, tuple_column Tuple(Nullable(String), Nullable(Float64))') where int64_column % 15 = 0; + +-- Test tuple nested in array +select array_tuple_column from file('02906.orc', 'ORC', 'int64_column Int64, array_tuple_column Array(Tuple(a Nullable(String), b Nullable(Float64), c Nullable(Int64)))') where int64_column % 15 = 0; +select array_tuple_column from file('02906.orc', 'ORC', 'int64_column Int64, array_tuple_column Array(Tuple(b Nullable(Float64), c Nullable(Int64)))') where int64_column % 15 = 0; +select array_tuple_column from file('02906.orc', 'ORC', 'int64_column Int64, array_tuple_column Array(Tuple(b Nullable(Float64), c Nullable(Int64), d Nullable(String)))') where int64_column % 15 = 0; + +-- Test tuple nested in map +select map_tuple_column from file('02906.orc', 'ORC', 'int64_column Int64, map_tuple_column Map(String, Tuple(a Nullable(String), b Nullable(Float64), c Nullable(Int64)))') where int64_column % 15 = 0; +select map_tuple_column from file('02906.orc', 'ORC', 'int64_column Int64, map_tuple_column Map(String, Tuple(b Nullable(Float64), c Nullable(Int64)))') where int64_column % 15 = 0; +select map_tuple_column from file('02906.orc', 'ORC', 'int64_column Int64, map_tuple_column Map(String, Tuple(b Nullable(Float64), c Nullable(Int64), d Nullable(String)))') where int64_column % 15 = 0; +-- { echoOff } diff --git a/parser/testdata/02907_filter_pushdown_crash/ast.json b/parser/testdata/02907_filter_pushdown_crash/ast.json new file mode 100644 index 000000000..71d4898a8 --- /dev/null +++ b/parser/testdata/02907_filter_pushdown_crash/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001379528, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/02907_filter_pushdown_crash/metadata.json b/parser/testdata/02907_filter_pushdown_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02907_filter_pushdown_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02907_filter_pushdown_crash/query.sql b/parser/testdata/02907_filter_pushdown_crash/query.sql new file mode 100644 index 000000000..eb881823f --- /dev/null +++ b/parser/testdata/02907_filter_pushdown_crash/query.sql @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE TABLE t1 (key UInt8) ENGINE = MergeTree ORDER BY key; +INSERT INTO t1 VALUES (1),(2); + +CREATE TABLE t2 (key UInt32) ENGINE = MergeTree ORDER BY key; +INSERT INTO t2 VALUES (1),(2); + +SELECT a FROM ( SELECT key + 1 as a, key FROM t1 GROUP BY key ) WHERE key FORMAT Null; + +SET join_algorithm = 'full_sorting_merge'; +SET max_rows_in_set_to_optimize_join = 0; + +SELECT key FROM ( SELECT key FROM t1 GROUP BY key ) t1 JOIN (SELECT key FROM t2) t2 ON t1.key = t2.key WHERE key FORMAT Null; + +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; diff --git a/parser/testdata/02907_fromDaysSinceYearZero/ast.json b/parser/testdata/02907_fromDaysSinceYearZero/ast.json new file mode 100644 index 000000000..60bf5a0d9 --- /dev/null +++ b/parser/testdata/02907_fromDaysSinceYearZero/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001436329, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02907_fromDaysSinceYearZero/metadata.json b/parser/testdata/02907_fromDaysSinceYearZero/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02907_fromDaysSinceYearZero/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02907_fromDaysSinceYearZero/query.sql b/parser/testdata/02907_fromDaysSinceYearZero/query.sql new file mode 100644 index 000000000..9f356080f --- /dev/null +++ b/parser/testdata/02907_fromDaysSinceYearZero/query.sql @@ -0,0 +1,41 @@ +SET session_timezone = 'Europe/Amsterdam'; -- disable time zone randomization in CI + +SELECT '-- negative tests'; +SELECT fromDaysSinceYearZero(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT fromDaysSinceYearZero32(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT fromDaysSinceYearZero(1, 2); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT fromDaysSinceYearZero32(1, 2); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT fromDaysSinceYearZero('needs a number'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT fromDaysSinceYearZero32('needs a number'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT fromDaysSinceYearZero(-3); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT fromDaysSinceYearZero32(-3); -- { serverError ARGUMENT_OUT_OF_BOUND } + +SELECT '-- UInt32 and Int32 arguments, both const and non-const'; +SELECT 719527 AS u, toInt32(u) AS s, fromDaysSinceYearZero(u), fromDaysSinceYearZero(materialize(u)), fromDaysSinceYearZero(s), fromDaysSinceYearZero(materialize(s)); -- outside Date's range +SELECT 719528 AS u, toInt32(u) AS s, fromDaysSinceYearZero(u), fromDaysSinceYearZero(materialize(u)), fromDaysSinceYearZero(s), fromDaysSinceYearZero(materialize(s)); +SELECT 719529 AS u, toInt32(u) AS s, fromDaysSinceYearZero(u), fromDaysSinceYearZero(materialize(u)), fromDaysSinceYearZero(s), fromDaysSinceYearZero(materialize(s)); +SELECT 785062 AS u, toInt32(u) AS s, fromDaysSinceYearZero(u), fromDaysSinceYearZero(materialize(u)), fromDaysSinceYearZero(s), fromDaysSinceYearZero(materialize(s)); +SELECT 785063 AS u, toInt32(u) AS s, fromDaysSinceYearZero(u), fromDaysSinceYearZero(materialize(u)), fromDaysSinceYearZero(s), fromDaysSinceYearZero(materialize(s)); +SELECT 785064 AS u, toInt32(u) AS s, fromDaysSinceYearZero(u), fromDaysSinceYearZero(materialize(u)), fromDaysSinceYearZero(s), fromDaysSinceYearZero(materialize(s)); -- outside Date's range + +SELECT 693960 AS u, toInt32(u) AS s, fromDaysSinceYearZero32(u), fromDaysSinceYearZero32(materialize(u)), fromDaysSinceYearZero32(s), fromDaysSinceYearZero32(materialize(s)); -- outside Date32's range +SELECT 693961 AS u, toInt32(u) AS s, fromDaysSinceYearZero32(u), fromDaysSinceYearZero32(materialize(u)), fromDaysSinceYearZero32(s), fromDaysSinceYearZero32(materialize(s)); +SELECT 693962 AS u, toInt32(u) AS s, fromDaysSinceYearZero32(u), fromDaysSinceYearZero32(materialize(u)), fromDaysSinceYearZero32(s), fromDaysSinceYearZero32(materialize(s)); +SELECT 840056 AS u, toInt32(u) AS s, fromDaysSinceYearZero32(u), fromDaysSinceYearZero32(materialize(u)), fromDaysSinceYearZero32(s), fromDaysSinceYearZero32(materialize(s)); +SELECT 840057 AS u, toInt32(u) AS s, fromDaysSinceYearZero32(u), fromDaysSinceYearZero32(materialize(u)), fromDaysSinceYearZero32(s), fromDaysSinceYearZero32(materialize(s)); +SELECT 840058 AS u, toInt32(u) AS s, fromDaysSinceYearZero32(u), fromDaysSinceYearZero32(materialize(u)), fromDaysSinceYearZero32(s), fromDaysSinceYearZero32(materialize(s)); -- outside Date32's range + +SELECT '-- integer types != (U)Int32'; +SELECT toUInt8(255) AS u, toInt8(127) AS s, fromDaysSinceYearZero(u), fromDaysSinceYearZero32(u), fromDaysSinceYearZero(s), fromDaysSinceYearZero32(s); -- outside Date's range for all (U)Int8-s +SELECT toUInt16(65535) AS u, toInt16(32767) AS s, fromDaysSinceYearZero(u), fromDaysSinceYearZero32(u), fromDaysSinceYearZero(s), fromDaysSinceYearZero32(s); -- outside Date's range for all (U)Int16-s +SELECT toUInt64(719529) AS u, toInt64(719529) AS s, fromDaysSinceYearZero(u), fromDaysSinceYearZero32(u), fromDaysSinceYearZero(s), fromDaysSinceYearZero32(s); -- something useful + +SELECT '-- NULL handling'; +SELECT fromDaysSinceYearZero(NULL), fromDaysSinceYearZero32(NULL); + +SELECT '-- ubsan bugs'; +SELECT fromDaysSinceYearZero32(2147483648); +SELECT fromDaysSinceYearZero32(3); + +SELECT '-- Alias'; +SELECT FROM_DAYS(1); diff --git a/parser/testdata/02907_read_buffer_content_is_cached_multiple_blobs/ast.json b/parser/testdata/02907_read_buffer_content_is_cached_multiple_blobs/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02907_read_buffer_content_is_cached_multiple_blobs/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02907_read_buffer_content_is_cached_multiple_blobs/metadata.json b/parser/testdata/02907_read_buffer_content_is_cached_multiple_blobs/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02907_read_buffer_content_is_cached_multiple_blobs/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02907_read_buffer_content_is_cached_multiple_blobs/query.sql b/parser/testdata/02907_read_buffer_content_is_cached_multiple_blobs/query.sql new file mode 100644 index 000000000..e8216c3df --- /dev/null +++ b/parser/testdata/02907_read_buffer_content_is_cached_multiple_blobs/query.sql @@ -0,0 +1,17 @@ +-- Tags: no-fasttest + +-- We want to test `isContentCached(offset, size)` method implementation in ReadBufferFromRemoteFSGather and CachedOnDiskReadBufferFromFile +-- Specifically, how they handle `offset` parameter when we have multiple S3 objects representing a single ClickHouse file +-- Log englie table files will be represented by multiple objects on S3 +CREATE TABLE t(a UInt64) +ENGINE = Log +SETTINGS disk = 's3_cache'; + +INSERT INTO t SELECT number FROM numbers_mt(1e6); +INSERT INTO t SELECT number FROM numbers_mt(1e6); + +-- First of all the cache should be warmed up +SELECT * FROM t FORMAT Null; + +-- Now we can do the actual test. All we need is successfull completion w/o expections +SELECT * FROM t FORMAT Null; diff --git a/parser/testdata/02908_alter_column_alias/ast.json b/parser/testdata/02908_alter_column_alias/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02908_alter_column_alias/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02908_alter_column_alias/metadata.json b/parser/testdata/02908_alter_column_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02908_alter_column_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02908_alter_column_alias/query.sql b/parser/testdata/02908_alter_column_alias/query.sql new file mode 100644 index 000000000..fd98339e8 --- /dev/null +++ b/parser/testdata/02908_alter_column_alias/query.sql @@ -0,0 +1,8 @@ +CREATE TABLE t ( + c0 DateTime, + c1 DateTime, + a DateTime alias toStartOfFifteenMinutes(c0) +) ENGINE = MergeTree() ORDER BY tuple(); + +ALTER TABLE t MODIFY COLUMN a DateTime ALIAS c1; +SHOW CREATE t; diff --git a/parser/testdata/02908_empty_named_collection/ast.json b/parser/testdata/02908_empty_named_collection/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02908_empty_named_collection/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02908_empty_named_collection/metadata.json b/parser/testdata/02908_empty_named_collection/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02908_empty_named_collection/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02908_empty_named_collection/query.sql b/parser/testdata/02908_empty_named_collection/query.sql new file mode 100644 index 000000000..6aab83858 --- /dev/null +++ b/parser/testdata/02908_empty_named_collection/query.sql @@ -0,0 +1,5 @@ +-- Tags: no-parallel + +CREATE NAMED COLLECTION foobar03 AS a = 1; +ALTER NAMED COLLECTION foobar03 DELETE b; -- { serverError BAD_ARGUMENTS } +DROP NAMED COLLECTION foobar03; diff --git a/parser/testdata/02908_filesystem_cache_as_collection/ast.json b/parser/testdata/02908_filesystem_cache_as_collection/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02908_filesystem_cache_as_collection/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02908_filesystem_cache_as_collection/metadata.json b/parser/testdata/02908_filesystem_cache_as_collection/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02908_filesystem_cache_as_collection/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02908_filesystem_cache_as_collection/query.sql b/parser/testdata/02908_filesystem_cache_as_collection/query.sql new file mode 100644 index 000000000..a86d97bca --- /dev/null +++ b/parser/testdata/02908_filesystem_cache_as_collection/query.sql @@ -0,0 +1,10 @@ +-- Tags: no-fasttest, no-replicated-database + +CREATE NAMED COLLECTION IF NOT EXISTS cache_collection_sql AS path = 'collection_sql', max_size = '1Mi'; +DROP TABLE IF EXISTS test; +CREATE TABLE test (a Int32, b String) +ENGINE = MergeTree() ORDER BY a SETTINGS disk = disk(type = cache, disk = 'local_disk', name = 'cache_with_sql_collection', cache_name='cache_collection_sql', load_metadata_asynchronously = 0); +select path from system.filesystem_cache_settings where cache_name = 'cache_with_sql_collection'; +CREATE TABLE test2 (a Int32, b String) +ENGINE = MergeTree() ORDER BY a SETTINGS disk = disk(type = cache, disk = 'local_disk', name = 'cache_with_collection', cache_name='cache_collection', load_metadata_asynchronously = 0); +select path from system.filesystem_cache_settings where cache_name = 'cache_with_collection'; diff --git a/parser/testdata/02910_nullable_enum_cast/ast.json b/parser/testdata/02910_nullable_enum_cast/ast.json new file mode 100644 index 000000000..76d0227fd --- /dev/null +++ b/parser/testdata/02910_nullable_enum_cast/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Literal 'Nullable(Enum(\\'A\\' = 1, \\'B\\' = 2))'" + }, + { + "explain": " Literal 'Nullable(String)'" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001466929, + "rows_read": 13, + "bytes_read": 536 + } +} diff --git a/parser/testdata/02910_nullable_enum_cast/metadata.json b/parser/testdata/02910_nullable_enum_cast/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02910_nullable_enum_cast/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02910_nullable_enum_cast/query.sql b/parser/testdata/02910_nullable_enum_cast/query.sql new file mode 100644 index 000000000..09189539c --- /dev/null +++ b/parser/testdata/02910_nullable_enum_cast/query.sql @@ -0,0 +1,4 @@ +SELECT CAST(materialize(CAST(NULL, 'Nullable(Enum(\'A\' = 1, \'B\' = 2))')), 'Nullable(String)'); +SELECT CAST(CAST(NULL, 'Nullable(Enum(\'A\' = 1, \'B\' = 2))'), 'Nullable(String)'); +SELECT CAST(materialize(CAST(1, 'Nullable(Enum(\'A\' = 1, \'B\' = 2))')), 'Nullable(String)'); +SELECT CAST(CAST(1, 'Nullable(Enum(\'A\' = 1, \'B\' = 2))'), 'Nullable(String)'); diff --git a/parser/testdata/02910_prefetch_unexpceted_exception/ast.json b/parser/testdata/02910_prefetch_unexpceted_exception/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02910_prefetch_unexpceted_exception/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02910_prefetch_unexpceted_exception/metadata.json b/parser/testdata/02910_prefetch_unexpceted_exception/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02910_prefetch_unexpceted_exception/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02910_prefetch_unexpceted_exception/query.sql b/parser/testdata/02910_prefetch_unexpceted_exception/query.sql new file mode 100644 index 000000000..d03acf7c7 --- /dev/null +++ b/parser/testdata/02910_prefetch_unexpceted_exception/query.sql @@ -0,0 +1,24 @@ +-- Tags: no-parallel, no-random-settings, no-random-merge-tree-settings +-- no-parallel -- enables failpoint +-- no-random-settings -- depend on type of part, should always fail +drop table if exists prefetched_table; + +CREATE TABLE prefetched_table(key UInt64, s String) Engine = MergeTree() order by key; + +INSERT INTO prefetched_table SELECT rand(), randomString(5) from numbers(1000); +INSERT INTO prefetched_table SELECT rand(), randomString(5) from numbers(1000); +INSERT INTO prefetched_table SELECT rand(), randomString(5) from numbers(1000); +INSERT INTO prefetched_table SELECT rand(), randomString(5) from numbers(1000); +INSERT INTO prefetched_table SELECT rand(), randomString(5) from numbers(1000); + +SET local_filesystem_read_prefetch=1; +SET allow_prefetched_read_pool_for_remote_filesystem=1; +SET allow_prefetched_read_pool_for_local_filesystem=1; + +SYSTEM ENABLE FAILPOINT prefetched_reader_pool_failpoint; + +SELECT * FROM prefetched_table FORMAT Null; --{serverError BAD_ARGUMENTS} + +SYSTEM DISABLE FAILPOINT prefetched_reader_pool_failpoint; + +drop table if exists prefetched_table; diff --git a/parser/testdata/02910_replicated_merge_parameters_must_consistent/ast.json b/parser/testdata/02910_replicated_merge_parameters_must_consistent/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02910_replicated_merge_parameters_must_consistent/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02910_replicated_merge_parameters_must_consistent/metadata.json b/parser/testdata/02910_replicated_merge_parameters_must_consistent/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02910_replicated_merge_parameters_must_consistent/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02910_replicated_merge_parameters_must_consistent/query.sql b/parser/testdata/02910_replicated_merge_parameters_must_consistent/query.sql new file mode 100644 index 000000000..ec19e54e9 --- /dev/null +++ b/parser/testdata/02910_replicated_merge_parameters_must_consistent/query.sql @@ -0,0 +1,135 @@ +-- Tags: zookeeper, no-replicated-database, no-shared-merge-tree + +CREATE TABLE t +( + `id` UInt64, + `val` String, + `legacy_ver` UInt64, +) +ENGINE = ReplicatedReplacingMergeTree('/tables/{database}/t/', 'r1', legacy_ver) +ORDER BY id; + +CREATE TABLE t_r_ok +( + `id` UInt64, + `val` String, + `legacy_ver` UInt64, +) +ENGINE = ReplicatedReplacingMergeTree('/tables/{database}/t/', 'r2', legacy_ver) +ORDER BY id; + +CREATE TABLE t_r_error +( + `id` UInt64, + `val` String, + `legacy_ver` UInt64 +) +ENGINE = ReplicatedReplacingMergeTree('/tables/{database}/t/', 'r3') +ORDER BY id; -- { serverError METADATA_MISMATCH } + +CREATE TABLE t2 +( + `id` UInt64, + `val` String, + `legacy_ver` UInt64, + `deleted` UInt8 +) +ENGINE = ReplicatedReplacingMergeTree('/tables/{database}/t2/', 'r1', legacy_ver) +ORDER BY id; + +CREATE TABLE t2_r_ok +( + `id` UInt64, + `val` String, + `legacy_ver` UInt64, + `deleted` UInt8 +) +ENGINE = ReplicatedReplacingMergeTree('/tables/{database}/t2/', 'r2', legacy_ver) +ORDER BY id; + +CREATE TABLE t2_r_error +( + `id` UInt64, + `val` String, + `legacy_ver` UInt64, + `deleted` UInt8 +) +ENGINE = ReplicatedReplacingMergeTree('/tables/{database}/t2/', 'r3', legacy_ver, deleted) +ORDER BY id; -- { serverError METADATA_MISMATCH } + +CREATE TABLE t3 +( + `key` UInt64, + `metrics1` UInt64, + `metrics2` UInt64 +) +ENGINE = ReplicatedSummingMergeTree('/tables/{database}/t3/', 'r1', metrics1) +ORDER BY key; + +CREATE TABLE t3_r_ok +( + `key` UInt64, + `metrics1` UInt64, + `metrics2` UInt64 +) +ENGINE = ReplicatedSummingMergeTree('/tables/{database}/t3/', 'r2', metrics1) +ORDER BY key; + + +CREATE TABLE t3_r_error +( + `key` UInt64, + `metrics1` UInt64, + `metrics2` UInt64 +) +ENGINE = ReplicatedSummingMergeTree('/tables/{database}/t3/', 'r3', metrics2) +ORDER BY key; -- { serverError METADATA_MISMATCH } + +CREATE TABLE t4 +( + `key` UInt32, + `Path` String, + `Time` DateTime('UTC'), + `Value` Float64, + `Version` UInt32, + `col` UInt64 +) +ENGINE = ReplicatedGraphiteMergeTree('/tables/{database}/t4/', 'r1', 'graphite_rollup') +ORDER BY key; + +CREATE TABLE t4_r_ok +( + `key` UInt32, + `Path` String, + `Time` DateTime('UTC'), + `Value` Float64, + `Version` UInt32, + `col` UInt64 +) +ENGINE = ReplicatedGraphiteMergeTree('/tables/{database}/t4/', 'r2', 'graphite_rollup') +ORDER BY key; + +CREATE TABLE t4_r_error +( + `key` UInt32, + `Path` String, + `Time` DateTime('UTC'), + `Value` Float64, + `Version` UInt32, + `col` UInt64 +) +ENGINE = ReplicatedGraphiteMergeTree('/tables/{database}/t4/', 'r3', 'graphite_rollup_alternative') +ORDER BY key; -- { serverError METADATA_MISMATCH } + +-- https://github.com/ClickHouse/ClickHouse/issues/58451 +CREATE TABLE t4_r_error_2 +( + `key` UInt32, + `Path` String, + `Time` DateTime('UTC'), + `Value` Float64, + `Version` UInt32, + `col` UInt64 +) +ENGINE = ReplicatedGraphiteMergeTree('/tables/{database}/t4/', 'r4', 'graphite_rollup_alternative_no_function') +ORDER BY key; -- { serverError METADATA_MISMATCH } \ No newline at end of file diff --git a/parser/testdata/02910_rocksdb_optimize/ast.json b/parser/testdata/02910_rocksdb_optimize/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02910_rocksdb_optimize/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02910_rocksdb_optimize/metadata.json b/parser/testdata/02910_rocksdb_optimize/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02910_rocksdb_optimize/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02910_rocksdb_optimize/query.sql b/parser/testdata/02910_rocksdb_optimize/query.sql new file mode 100644 index 000000000..575ba6db2 --- /dev/null +++ b/parser/testdata/02910_rocksdb_optimize/query.sql @@ -0,0 +1,5 @@ +-- Tags: use-rocksdb + +CREATE TABLE dict (key UInt64, value String) ENGINE = EmbeddedRocksDB PRIMARY KEY key; +INSERT INTO dict SELECT number, toString(number) FROM numbers(1e3); +OPTIMIZE TABLE dict; diff --git a/parser/testdata/02911_add_index_and_materialize_index/ast.json b/parser/testdata/02911_add_index_and_materialize_index/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02911_add_index_and_materialize_index/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02911_add_index_and_materialize_index/metadata.json b/parser/testdata/02911_add_index_and_materialize_index/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02911_add_index_and_materialize_index/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02911_add_index_and_materialize_index/query.sql b/parser/testdata/02911_add_index_and_materialize_index/query.sql new file mode 100644 index 000000000..f8785ec9a --- /dev/null +++ b/parser/testdata/02911_add_index_and_materialize_index/query.sql @@ -0,0 +1,18 @@ +-- Tags: no-replicated-database + +DROP TABLE IF EXISTS index_test; + +CREATE TABLE index_test +( + x UInt32, + y UInt32, + z UInt32 +) ENGINE = MergeTree order by x; + +ALTER TABLE index_test + ADD INDEX i_x mortonDecode(2, z).1 TYPE minmax GRANULARITY 1, + ADD INDEX i_y mortonDecode(2, z).2 TYPE minmax GRANULARITY 1, + MATERIALIZE INDEX i_x, + MATERIALIZE INDEX i_y; + +drop table index_test; diff --git a/parser/testdata/02911_analyzer_explain_estimate/ast.json b/parser/testdata/02911_analyzer_explain_estimate/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02911_analyzer_explain_estimate/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02911_analyzer_explain_estimate/metadata.json b/parser/testdata/02911_analyzer_explain_estimate/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02911_analyzer_explain_estimate/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02911_analyzer_explain_estimate/query.sql b/parser/testdata/02911_analyzer_explain_estimate/query.sql new file mode 100644 index 000000000..77f30ba82 --- /dev/null +++ b/parser/testdata/02911_analyzer_explain_estimate/query.sql @@ -0,0 +1,5 @@ +-- Tags: distributed + +SET enable_analyzer = 1; + +EXPLAIN ESTIMATE SELECT 0 = 1048577, NULL, groupBitmapOr(bitmapBuild([toInt32(65537)])) FROM cluster(test_cluster_two_shards) WHERE NULL = 1048575; diff --git a/parser/testdata/02911_analyzer_order_by_read_in_order_query_plan/ast.json b/parser/testdata/02911_analyzer_order_by_read_in_order_query_plan/ast.json new file mode 100644 index 000000000..63d6b07d3 --- /dev/null +++ b/parser/testdata/02911_analyzer_order_by_read_in_order_query_plan/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001382884, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02911_analyzer_order_by_read_in_order_query_plan/metadata.json b/parser/testdata/02911_analyzer_order_by_read_in_order_query_plan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02911_analyzer_order_by_read_in_order_query_plan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02911_analyzer_order_by_read_in_order_query_plan/query.sql b/parser/testdata/02911_analyzer_order_by_read_in_order_query_plan/query.sql new file mode 100644 index 000000000..2c1941f2a --- /dev/null +++ b/parser/testdata/02911_analyzer_order_by_read_in_order_query_plan/query.sql @@ -0,0 +1,156 @@ +SET optimize_read_in_order = 1, query_plan_read_in_order = 1, enable_analyzer = 1; + +drop table if exists tab; +drop table if exists tab2; +drop table if exists tab3; +drop table if exists tab4; +drop table if exists tab5; + +create table tab (a UInt32, b UInt32, c UInt32, d UInt32) engine = MergeTree order by ((a + b) * c, sin(a / b)); +insert into tab select number, number, number, number from numbers(5); +insert into tab select number, number, number, number from numbers(5); + +-- { echoOn } + +-- Exact match, single key +select * from tab order by (a + b) * c; +select * from (explain plan actions = 1 select * from tab order by (a + b) * c) where explain like '%sort description%'; + +select * from tab order by (a + b) * c desc; +select * from (explain plan actions = 1 select * from tab order by (a + b) * c desc) where explain like '%sort description%'; + +-- Exact match, full key +select * from tab order by (a + b) * c, sin(a / b); +select * from (explain plan actions = 1 select * from tab order by (a + b) * c, sin(a / b)) where explain like '%sort description%'; + +select * from tab order by (a + b) * c desc, sin(a / b) desc nulls first; +select * from (explain plan actions = 1 select * from tab order by (a + b) * c desc, sin(a / b) desc nulls first) where explain like '%sort description%'; + +-- Exact match, mixed direction +select * from tab order by (a + b) * c desc, sin(a / b); +select * from (explain plan actions = 1 select * from tab order by (a + b) * c desc, sin(a / b)) where explain like '%sort description%'; + +select * from tab order by (a + b) * c, sin(a / b) desc; +select * from (explain plan actions = 1 select * from tab order by (a + b) * c, sin(a / b) desc) where explain like '%sort description%'; + +-- Wrong order, full sort +select * from tab order by sin(a / b), (a + b) * c; +select * from (explain plan actions = 1 select * from tab order by sin(a / b), (a + b) * c) where explain ilike '%sort description%'; + +-- Fixed point +select * from tab where (a + b) * c = 8 order by sin(a / b); +select * from (explain plan actions = 1 select * from tab where (a + b) * c = 8 order by sin(a / b)) where explain ilike '%sort description%'; + +select * from tab where d + 1 = 2 order by (d + 1) * 4, (a + b) * c; +select * from (explain plan actions = 1 select * from tab where d + 1 = 2 order by (d + 1) * 4, (a + b) * c) where explain ilike '%sort description%'; + +select * from tab where d + 1 = 3 and (a + b) = 4 and c = 2 order by (d + 1) * 4, sin(a / b); +select * from (explain plan actions = 1 select * from tab where d + 1 = 3 and (a + b) = 4 and c = 2 order by (d + 1) * 4, sin(a / b)) where explain ilike '%sort description%'; + +-- Wrong order with fixed point +select * from tab where (a + b) * c = 8 order by sin(b / a); +select * from (explain plan actions = 1 select * from tab where (a + b) * c = 8 order by sin(b / a)) where explain ilike '%sort description%'; + +-- Monotonicity +select * from tab order by intDiv((a + b) * c, 2); +select * from (explain plan actions = 1 select * from tab order by intDiv((a + b) * c, 2)) where explain like '%sort description%'; + +select * from tab order by intDiv((a + b) * c, 2), sin(a / b); +select * from (explain plan actions = 1 select * from tab order by intDiv((a + b) * c, 2), sin(a / b)) where explain like '%sort description%'; + +-- select * from tab order by (a + b) * c, intDiv(sin(a / b), 2); +select * from (explain plan actions = 1 select * from tab order by (a + b) * c, intDiv(sin(a / b), 2)) where explain like '%sort description%'; + +-- select * from tab order by (a + b) * c desc , intDiv(sin(a / b), 2); +select * from (explain plan actions = 1 select * from tab order by (a + b) * c desc , intDiv(sin(a / b), 2)) where explain like '%sort description%'; + +-- select * from tab order by (a + b) * c, intDiv(sin(a / b), 2) desc; +select * from (explain plan actions = 1 select * from tab order by (a + b) * c, intDiv(sin(a / b), 2) desc) where explain like '%sort description%'; + +-- select * from tab order by (a + b) * c desc, intDiv(sin(a / b), 2) desc; +select * from (explain plan actions = 1 select * from tab order by (a + b) * c desc, intDiv(sin(a / b), 2) desc nulls first) where explain like '%sort description%'; + +-- select * from tab order by (a + b) * c desc, intDiv(sin(a / b), -2); +select * from (explain plan actions = 1 select * from tab order by (a + b) * c desc, intDiv(sin(a / b), -2)) where explain like '%sort description%'; + +-- select * from tab order by (a + b) * c desc, intDiv(intDiv(sin(a / b), -2), -3); +select * from (explain plan actions = 1 select * from tab order by (a + b) * c desc, intDiv(intDiv(sin(a / b), -2), -3)) where explain like '%sort description%'; + +-- select * from tab order by (a + b) * c, intDiv(intDiv(sin(a / b), -2), -3); +select * from (explain plan actions = 1 select * from tab order by (a + b) * c, intDiv(intDiv(sin(a / b), -2), -3)) where explain like '%sort description%'; + +-- Aliases +select * from (select *, a + b as x from tab) order by x * c; +select * from (explain plan actions = 1 select * from (select *, a + b as x from tab) order by x * c) where explain like '%sort description%'; + +select * from (select *, a + b as x, a / b as y from tab) order by x * c, sin(y); +select * from (explain plan actions = 1 select * from (select *, a + b as x, a / b as y from tab) order by x * c, sin(y)) where explain like '%sort description%'; + +select * from (select *, a / b as y from (select *, a + b as x from tab)) order by x * c, sin(y); +select * from (explain plan actions = 1 select * from (select *, a / b as y from (select *, a + b as x from tab)) order by x * c, sin(y)) where explain like '%sort description%'; + +-- { echoOff } + +create table tab2 (x DateTime, y UInt32, z UInt32) engine = MergeTree order by (x, y); +insert into tab2 select toDate('2020-02-02') + number, number, number from numbers(4); +insert into tab2 select toDate('2020-02-02') + number, number, number from numbers(4); + +-- { echoOn } + +select * from tab2 order by toTimeZone(toTimezone(x, 'UTC'), 'CET'), intDiv(intDiv(y, -2), -3); +select * from (explain plan actions = 1 select * from tab2 order by toTimeZone(toTimezone(x, 'UTC'), 'CET'), intDiv(intDiv(y, -2), -3)) where explain like '%sort description%'; + +select * from tab2 order by toStartOfDay(x), intDiv(intDiv(y, -2), -3); +select * from (explain plan actions = 1 select * from tab2 order by toStartOfDay(x), intDiv(intDiv(y, -2), -3)) where explain like '%sort description%'; + +-- select * from tab2 where toTimezone(x, 'CET') = '2020-02-03 01:00:00' order by intDiv(intDiv(y, -2), -3); +select * from (explain plan actions = 1 select * from tab2 where toTimezone(x, 'CET') = '2020-02-03 01:00:00' order by intDiv(intDiv(y, -2), -3)) where explain like '%sort description%'; + +-- { echoOff } + +create table tab3 (a UInt32, b UInt32, c UInt32, d UInt32) engine = MergeTree order by ((a + b) * c, sin(a / b)); +insert into tab3 select number, number, number, number from numbers(5); +insert into tab3 select number, number, number, number from numbers(5); + +create table tab4 (a UInt32, b UInt32, c UInt32, d UInt32) engine = MergeTree order by sin(a / b); +insert into tab4 select number, number, number, number from numbers(5); +insert into tab4 select number, number, number, number from numbers(5); + +create table tab5 (a UInt32, b UInt32, c UInt32, d UInt32) engine = MergeTree order by (a + b) * c; +insert into tab5 select number, number, number, number from numbers(5); +insert into tab5 select number, number, number, number from numbers(5); + +-- { echoOn } + +-- Union (not fully supported) +select * from (select * from tab union all select * from tab3) order by (a + b) * c, sin(a / b); +select * from (explain plan actions = 1 select * from (select * from tab union all select * from tab3) order by (a + b) * c, sin(a / b)) where explain like '%sort description%' or explain like '%ReadType%'; + +select * from (select * from tab where (a + b) * c = 8 union all select * from tab3 where (a + b) * c = 18) order by sin(a / b); +select * from (explain plan actions = 1 select * from (select * from tab where (a + b) * c = 8 union all select * from tab3 where (a + b) * c = 18) order by sin(a / b)) where explain like '%sort description%' or explain like '%ReadType%'; + +select * from (select * from tab where (a + b) * c = 8 union all select * from tab4) order by sin(a / b); +select * from (explain plan actions = 1 select * from (select * from tab where (a + b) * c = 8 union all select * from tab4) order by sin(a / b)) where explain like '%sort description%' or explain like '%ReadType%'; + +select * from (select * from tab union all select * from tab5) order by (a + b) * c; +select * from (explain plan actions = 1 select * from (select * from tab union all select * from tab5) order by (a + b) * c) where explain like '%sort description%' or explain like '%ReadType%'; + +select * from (select * from tab union all select * from tab5) order by (a + b) * c, sin(a / b); +select * from (explain plan actions = 1 select * from (select * from tab union all select * from tab5) order by (a + b) * c, sin(a / b)) where explain like '%sort description%' or explain like '%ReadType%'; + +-- Union with limit +select * from (select * from tab union all select * from tab5) order by (a + b) * c, sin(a / b) limit 3; +select * from (explain plan actions = 1 select * from (select * from tab union all select * from tab5) order by (a + b) * c, sin(a / b) limit 3) where explain ilike '%sort description%' or explain like '%ReadType%' or explain like '%Limit%'; + +-- In this example, we read-in-order from tab up to ((a + b) * c, sin(a / b)) and from tab5 up to ((a + b) * c). +-- In case of tab5, there would be two finish sorting transforms: ((a + b) * c) -> ((a + b) * c, sin(a / b)) -> ((a + b) * c, sin(a / b), d). +-- It's important that ((a + b) * c) -> ((a + b) * c does not have LIMIT. We can add LIMIT WITH TIES later, when sorting alog support it. +-- In case of tab4, we do full sorting by ((a + b) * c, sin(a / b), d) with LIMIT. We can replace it to sorting by ((a + b) * c, sin(a / b)) and LIMIT WITH TIES, when sorting alog support it. +select * from (select * from tab union all select * from tab5 union all select * from tab4) order by (a + b) * c, sin(a / b), d limit 3; +select * from (explain plan actions = 1 select * from (select * from tab union all select * from tab5 union all select * from tab4) order by (a + b) * c, sin(a / b), d limit 3) where explain ilike '%sort description%' or explain like '%ReadType%' or explain like '%Limit%'; + +drop table if exists tab; +drop table if exists tab2; +drop table if exists tab3; +drop table if exists tab4; +drop table if exists tab5; diff --git a/parser/testdata/02911_analyzer_remove_unused_projection_columns/ast.json b/parser/testdata/02911_analyzer_remove_unused_projection_columns/ast.json new file mode 100644 index 000000000..b11f115a6 --- /dev/null +++ b/parser/testdata/02911_analyzer_remove_unused_projection_columns/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001023594, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02911_analyzer_remove_unused_projection_columns/metadata.json b/parser/testdata/02911_analyzer_remove_unused_projection_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02911_analyzer_remove_unused_projection_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02911_analyzer_remove_unused_projection_columns/query.sql b/parser/testdata/02911_analyzer_remove_unused_projection_columns/query.sql new file mode 100644 index 000000000..d567ac6c8 --- /dev/null +++ b/parser/testdata/02911_analyzer_remove_unused_projection_columns/query.sql @@ -0,0 +1,22 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value String +) ENGINE = MergeTree ORDER BY id; + +INSERT INTO test_table VALUES (0, 'Value_0'); + +SET max_columns_to_read = 1; + +SELECT id FROM (SELECT * FROM test_table); +SELECT id FROM (SELECT * FROM (SELECT * FROM test_table)); +SELECT id FROM (SELECT * FROM test_table UNION ALL SELECT * FROM test_table); + +SELECT id FROM (SELECT id, value FROM test_table); +SELECT id FROM (SELECT id, value FROM (SELECT id, value FROM test_table)); +SELECT id FROM (SELECT id, value FROM test_table UNION ALL SELECT id, value FROM test_table); + +DROP TABLE test_table; diff --git a/parser/testdata/02911_cte_invalid_query_analysis/ast.json b/parser/testdata/02911_cte_invalid_query_analysis/ast.json new file mode 100644 index 000000000..2d5576f03 --- /dev/null +++ b/parser/testdata/02911_cte_invalid_query_analysis/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t0 (children 1)" + }, + { + "explain": " Identifier t0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001507491, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/02911_cte_invalid_query_analysis/metadata.json b/parser/testdata/02911_cte_invalid_query_analysis/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02911_cte_invalid_query_analysis/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02911_cte_invalid_query_analysis/query.sql b/parser/testdata/02911_cte_invalid_query_analysis/query.sql new file mode 100644 index 000000000..dcf21831e --- /dev/null +++ b/parser/testdata/02911_cte_invalid_query_analysis/query.sql @@ -0,0 +1,34 @@ +drop table if exists t0; +drop table if exists t1; +drop table if exists t3; + +create table t0 (pkey UInt32, c1 UInt32, primary key(pkey)) engine = MergeTree; +create table t1 (vkey UInt32, primary key(vkey)) engine = MergeTree; +create table t3 (c17 String, primary key(c17)) engine = MergeTree; +insert into t1 values (3); + +WITH +cte_1 AS (select + subq_1.c_5_c1698_16 as c_2_c1702_3, + subq_1.c_5_c1694_12 as c_2_c1703_4 + from + (select + covarPop(-0, 74) as c_5_c1686_4, + sumWithOverflow(0) as c_5_c1694_12, + covarPop(-53.64, 92.63) as c_5_c1698_16 + from + t3 as ref_8 + group by ref_8.c17) as subq_1) +select + ref_15.c_2_c1703_4 as c_2_c1723_6, + ref_15.c_2_c1702_3 as c_2_c1724_7 + from + t0 as ref_14 + RIGHT outer join cte_1 as ref_15 + on (ref_14.c1 = ref_15.c_2_c1702_3) + RIGHT outer join t1 as ref_16 + on (ref_14.pkey = ref_16.vkey); + +drop table t0; +drop table t1; +drop table t3; diff --git a/parser/testdata/02911_join_on_nullsafe_optimization/ast.json b/parser/testdata/02911_join_on_nullsafe_optimization/ast.json new file mode 100644 index 000000000..f86f9f927 --- /dev/null +++ b/parser/testdata/02911_join_on_nullsafe_optimization/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001321556, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/02911_join_on_nullsafe_optimization/metadata.json b/parser/testdata/02911_join_on_nullsafe_optimization/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02911_join_on_nullsafe_optimization/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02911_join_on_nullsafe_optimization/query.sql b/parser/testdata/02911_join_on_nullsafe_optimization/query.sql new file mode 100644 index 000000000..164977a98 --- /dev/null +++ b/parser/testdata/02911_join_on_nullsafe_optimization/query.sql @@ -0,0 +1,77 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS t1n; +DROP TABLE IF EXISTS t2n; + +CREATE TABLE t1 (x Nullable(Int64), y Nullable(UInt64)) ENGINE = TinyLog; +CREATE TABLE t2 (x Nullable(Int64), y Nullable(UInt64)) ENGINE = TinyLog; + +INSERT INTO t1 VALUES (1,42), (2,2), (3,3), (NULL,NULL); +INSERT INTO t2 VALUES (NULL,NULL), (2,2), (3,33), (4,42); + +CREATE TABLE t1n (x Int64, y UInt64) ENGINE = TinyLog; +CREATE TABLE t2n (x Int64, y UInt64) ENGINE = TinyLog; + +INSERT INTO t1n VALUES (1,42), (2,2), (3,3); +INSERT INTO t2n VALUES (2,2), (3,33), (4,42); + +SET enable_analyzer = 1; + +-- { echoOn } +SELECT * FROM t1 JOIN t2 ON (t1.x <=> t2.x OR (t1.x IS NULL AND t2.x IS NULL)) ORDER BY t1.x NULLS LAST; + +SELECT * FROM t1 JOIN t2 ON (t1.x <=> t2.x OR t1.x IS NULL AND t2.x IS NULL) OR t1.y <=> t2.y ORDER BY t1.x NULLS LAST; + +SELECT * FROM t1 JOIN t2 ON (t1.x = t2.x OR t1.x IS NULL AND t2.x IS NULL) ORDER BY t1.x; +SELECT * FROM t1 JOIN t2 ON t1.x <=> t2.x AND ((t1.x = t1.y) OR t1.x IS NULL AND t1.y IS NULL) ORDER BY t1.x; + +SELECT * FROM t1 JOIN t2 ON (t1.x = t2.x OR t1.x IS NULL AND t2.x IS NULL) AND t1.y <=> t2.y ORDER BY t1.x NULLS LAST; + +SELECT * FROM t1 JOIN t2 ON (t1.x <=> t2.x OR t1.y <=> t2.y OR (t1.x IS NULL AND t2.x IS NULL) OR (t1.y IS NULL AND t2.y IS NULL)) ORDER BY t1.x NULLS LAST; + +SELECT * FROM t1 JOIN t2 ON (t1.x <=> t2.x OR (t1.x IS NULL AND t2.x IS NULL)) AND (t1.y == t2.y OR (t1.y IS NULL AND t2.y IS NULL)) AND COALESCE(t1.x, 0) != 2 ORDER BY t1.x NULLS LAST; + +SELECT x = y OR (x IS NULL AND y IS NULL) FROM t1 ORDER BY x NULLS LAST; + +SELECT * FROM t1 JOIN t2 ON (t1.x == t2.x AND ((t2.x IS NOT NULL) AND (t1.x IS NOT NULL)) ) OR ( (t2.x IS NULL) AND (t1.x IS NULL) ) ORDER BY t1.x NULLS LAST; +SELECT * FROM t1 JOIN t2 ON (t1.x == t2.x AND ((t2.x IS NOT NULL) AND (t1.x IS NOT NULL)) ) OR ( t2.x <> t1.x AND (t2.x IS NULL) AND (t1.x IS NULL) ) ORDER BY t1.x NULLS LAST; +SELECT * FROM t1 JOIN t2 ON (t1.x == t2.x AND ((t2.x IS NOT NULL) AND (t1.x IS NOT NULL)) ) OR ( t2.x <> t1.x AND t2.x <> t1.x ) ORDER BY ALL NULLS LAST SETTINGS query_plan_use_new_logical_join_step = 0; +SELECT * FROM t1 JOIN t2 ON (t1.x == t2.x AND ((t2.x IS NOT NULL) AND (t1.x IS NOT NULL)) ) OR ( t2.x <> t1.x AND (t2.x IS NULL) AND (t2.x IS NULL) ) ORDER BY t1.x NULLS LAST SETTINGS query_plan_use_new_logical_join_step = 0, use_join_disjunctions_push_down = 0; + +-- aliases defined in the join condition are valid +SELECT *, e, e2 FROM t1 FULL JOIN t2 ON ( ( ((t1.x == t2.x) AS e) AND ((t2.x IS NOT NULL) AND (t1.x IS NOT NULL)) ) OR ( (t2.x IS NULL) AND (t1.x IS NULL) ) AS e2 ) ORDER BY t1.x NULLS LAST, t2.x NULLS LAST; +SELECT *, e, e2 FROM t1 FULL JOIN t2 ON ( ( ((t1.x == t2.x) AS e) AND ((t2.x IS NOT NULL) AND (t1.x IS NOT NULL)) ) AS e2 ) ORDER BY t1.x NULLS LAST, t2.x NULLS LAST; + +-- check for non-nullable columns for which `is null` is replaced with constant +SELECT * FROM t1n as t1 JOIN t2n as t2 ON (t1.x == t2.x AND ((t2.x IS NOT NULL) AND (t1.x IS NOT NULL)) ) OR ( (t2.x IS NULL) AND (t1.x IS NULL) ) ORDER BY t1.x NULLS LAST; + +-- { echoOff } + +SELECT '--'; + +-- IS NOT NULL and constants are optimized out +SELECT count() FROM ( EXPLAIN QUERY TREE + SELECT * FROM t1 JOIN t2 ON ( (t1.x = t2.x) AND (t1.x IS NOT NULL) AND true AND (t2.x IS NOT NULL) ) +) WHERE explain like '%CONSTANT%' OR explain ilike '%is%null%'; + +SELECT count() FROM ( EXPLAIN QUERY TREE + SELECT * FROM t1 JOIN t2 ON ( (t1.x = t2.x) AND true ) +) WHERE explain like '%CONSTANT%' OR explain ilike '%is%null%'; + +-- this is not optimized out +SELECT count() FROM ( EXPLAIN QUERY TREE + SELECT * FROM t1 JOIN t2 ON (t1.x <=> t2.x OR t1.x IS NULL AND t1.y <=> t2.y AND t2.x IS NULL) +) WHERE explain like '%CONSTANT%' OR explain ilike '%is%null%'; + +SELECT count() FROM ( EXPLAIN QUERY TREE + SELECT * FROM t1 JOIN t2 ON t1.x <=> t2.x AND (t1.x = t1.y OR t1.x IS NULL AND t1.y IS NULL) +) WHERE explain like '%CONSTANT%' OR explain ilike '%is%null%'; + +SELECT count() FROM ( EXPLAIN QUERY TREE + SELECT * FROM t1 JOIN t2 ON t1.x = t2.x AND NOT (t1.x = 1 OR t1.x IS NULL) +) WHERE explain ilike '%function_name: isNull%'; + +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS t1n; +DROP TABLE IF EXISTS t2n; diff --git a/parser/testdata/02911_row_policy_on_cluster/ast.json b/parser/testdata/02911_row_policy_on_cluster/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02911_row_policy_on_cluster/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02911_row_policy_on_cluster/metadata.json b/parser/testdata/02911_row_policy_on_cluster/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02911_row_policy_on_cluster/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02911_row_policy_on_cluster/query.sql b/parser/testdata/02911_row_policy_on_cluster/query.sql new file mode 100644 index 000000000..0c60bb5a6 --- /dev/null +++ b/parser/testdata/02911_row_policy_on_cluster/query.sql @@ -0,0 +1,11 @@ +-- Tags: no-parallel, zookeeper, no-replicated-database +-- Tag no-replicated-database: distributed_ddl_output_mode is none + +DROP ROW POLICY IF EXISTS 02911_rowpolicy ON default.* ON CLUSTER test_shard_localhost; +DROP USER IF EXISTS 02911_user ON CLUSTER test_shard_localhost; + +CREATE USER 02911_user ON CLUSTER test_shard_localhost; +CREATE ROW POLICY 02911_rowpolicy ON CLUSTER test_shard_localhost ON default.* USING 1 TO 02911_user; + +DROP ROW POLICY 02911_rowpolicy ON default.* ON CLUSTER test_shard_localhost; +DROP USER 02911_user ON CLUSTER test_shard_localhost; diff --git a/parser/testdata/02911_support_alias_column_in_indices/ast.json b/parser/testdata/02911_support_alias_column_in_indices/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02911_support_alias_column_in_indices/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02911_support_alias_column_in_indices/metadata.json b/parser/testdata/02911_support_alias_column_in_indices/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02911_support_alias_column_in_indices/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02911_support_alias_column_in_indices/query.sql b/parser/testdata/02911_support_alias_column_in_indices/query.sql new file mode 100644 index 000000000..5ab50044e --- /dev/null +++ b/parser/testdata/02911_support_alias_column_in_indices/query.sql @@ -0,0 +1,40 @@ +-- Tags: no-parallel + +drop database if exists 02911_support_alias_column_in_indices; +create database 02911_support_alias_column_in_indices; +use 02911_support_alias_column_in_indices; + +create table test1 +( + c UInt32, + a alias c + 1, + index i (a) type minmax +) +engine = MergeTree +order by c +settings index_granularity = 8192, min_index_granularity_bytes = 1024, index_granularity_bytes = 10485760; -- default settings, prevent randomization in tests + +insert into test1 select * from numbers(10); +insert into test1 select * from numbers(11, 20); + +explain indexes = 1 select * from test1 where a > 10 settings enable_analyzer = 0; +explain indexes = 1 select * from test1 where a > 10 settings enable_analyzer = 1; + +create table test2 +( + c UInt32, + a1 alias c + 1, + a2 alias a1 + 1, + index i (a2) type minmax +) +engine = MergeTree +order by c +settings index_granularity = 8192, min_index_granularity_bytes = 1024, index_granularity_bytes = 10485760; -- default settings, prevent randomization in tests + +insert into test2 select * from numbers(10); +insert into test2 select * from numbers(11, 20); + +explain indexes = 1 select * from test2 where a2 > 15 settings enable_analyzer = 0; +explain indexes = 1 select * from test2 where a2 > 15 settings enable_analyzer = 1; + +drop database 02911_support_alias_column_in_indices; diff --git a/parser/testdata/02911_system_symbols/ast.json b/parser/testdata/02911_system_symbols/ast.json new file mode 100644 index 000000000..fbbebd000 --- /dev/null +++ b/parser/testdata/02911_system_symbols/ast.json @@ -0,0 +1,121 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 6)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function demangle (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier symbol" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.symbols" + }, + { + "explain": " Function like (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier symbol" + }, + { + "explain": " Literal '%StorageSystemSymbols%'" + }, + { + "explain": " Function like (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal '%DB::StorageSystemSymbols::StorageSystemSymbols%'" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Set" + } + ], + + "rows": 33, + + "statistics": + { + "elapsed": 0.001402026, + "rows_read": 33, + "bytes_read": 1365 + } +} diff --git a/parser/testdata/02911_system_symbols/metadata.json b/parser/testdata/02911_system_symbols/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02911_system_symbols/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02911_system_symbols/query.sql b/parser/testdata/02911_system_symbols/query.sql new file mode 100644 index 000000000..d4195c193 --- /dev/null +++ b/parser/testdata/02911_system_symbols/query.sql @@ -0,0 +1 @@ +SELECT x FROM (SELECT demangle(symbol) AS x FROM system.symbols WHERE symbol LIKE '%StorageSystemSymbols%') WHERE x LIKE '%DB::StorageSystemSymbols::StorageSystemSymbols%' ORDER BY x LIMIT 1 SETTINGS allow_introspection_functions = 1; diff --git a/parser/testdata/02912_group_array_sample/ast.json b/parser/testdata/02912_group_array_sample/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02912_group_array_sample/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02912_group_array_sample/metadata.json b/parser/testdata/02912_group_array_sample/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02912_group_array_sample/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02912_group_array_sample/query.sql b/parser/testdata/02912_group_array_sample/query.sql new file mode 100644 index 000000000..6cf9d4517 --- /dev/null +++ b/parser/testdata/02912_group_array_sample/query.sql @@ -0,0 +1,2 @@ +-- Checks that the random seed is different for multiple states of aggregation: +SELECT uniq(x) > 50 FROM (SELECT number, groupArraySample(10)(arrayJoin(range(1000))) AS x FROM numbers(100) GROUP BY number); diff --git a/parser/testdata/02912_ingestion_mv_deduplication/ast.json b/parser/testdata/02912_ingestion_mv_deduplication/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02912_ingestion_mv_deduplication/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02912_ingestion_mv_deduplication/metadata.json b/parser/testdata/02912_ingestion_mv_deduplication/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02912_ingestion_mv_deduplication/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02912_ingestion_mv_deduplication/query.sql b/parser/testdata/02912_ingestion_mv_deduplication/query.sql new file mode 100644 index 000000000..a2378fd8f --- /dev/null +++ b/parser/testdata/02912_ingestion_mv_deduplication/query.sql @@ -0,0 +1,205 @@ +-- Tags: zookeeper +SET session_timezone = 'UTC'; + +SELECT '-- Original issue with max_insert_delayed_streams_for_parallel_write <= 1'; +/* + + This is the expected behavior when mv deduplication is set to false. + + - 1st insert works for landing and mv tables + - 2nd insert gets first block 20220901 deduplicated and second one inserted in landing table + - 2nd insert gets both blocks inserted in mv table + +*/ +SET deduplicate_blocks_in_dependent_materialized_views = 0, max_insert_delayed_streams_for_parallel_write = 0; + +CREATE TABLE landing +( + time DateTime, + number Int64 +) +Engine=ReplicatedReplacingMergeTree('/clickhouse/' || currentDatabase() || '/landing/{shard}/', '{replica}') +PARTITION BY toYYYYMMDD(time) +ORDER BY time; + +CREATE MATERIALIZED VIEW mv +ENGINE = ReplicatedSummingMergeTree('/clickhouse/' || currentDatabase() || '/mv/{shard}/', '{replica}') +PARTITION BY toYYYYMMDD(hour) ORDER BY hour +AS SELECT + toStartOfHour(time) AS hour, + sum(number) AS sum_amount +FROM landing +GROUP BY hour; + +INSERT INTO landing VALUES ('2022-09-01 12:23:34', 42); +INSERT INTO landing VALUES ('2022-09-01 12:23:34', 42),('2023-09-01 12:23:34', 42); + +SELECT '-- Landing'; +SELECT * FROM landing FINAL ORDER BY time; +SELECT '-- MV'; +SELECT * FROM mv FINAL ORDER BY hour; + +DROP TABLE IF EXISTS landing SYNC; +DROP TABLE IF EXISTS mv SYNC; + +SELECT '-- Original issue with deduplicate_blocks_in_dependent_materialized_views = 0 AND max_insert_delayed_streams_for_parallel_write > 1'; +/* + + This is the unexpected behavior due to setting max_insert_delayed_streams_for_parallel_write > 1. + + This unexpected behavior was present since version 21.9 or earlier but due to this PR https://github.com/ClickHouse/ClickHouse/pull/34780 + when max_insert_delayed_streams_for_parallel_write gets disabled by default the issue was mitigated. + + This is what happens: + + - 1st insert works for landing and mv tables + - 2nd insert gets first block 20220901 deduplicated and second one inserted in landing table + - 2nd insert is not inserting anything in mv table due to a bug computing blocks to be discarded, now that block is inserted because deduplicate_blocks_in_dependent_materialized_views=0 + + Now it is fixed. +*/ +SET deduplicate_blocks_in_dependent_materialized_views = 0, max_insert_delayed_streams_for_parallel_write = 1000; + +CREATE TABLE landing +( + time DateTime, + number Int64 +) +Engine=ReplicatedReplacingMergeTree('/clickhouse/' || currentDatabase() || '/landing/{shard}/', '{replica}') +PARTITION BY toYYYYMMDD(time) +ORDER BY time; + +CREATE MATERIALIZED VIEW mv +ENGINE = ReplicatedSummingMergeTree('/clickhouse/' || currentDatabase() || '/mv/{shard}/', '{replica}') +PARTITION BY toYYYYMMDD(hour) ORDER BY hour +AS SELECT + toStartOfHour(time) AS hour, + sum(number) AS sum_amount +FROM landing +GROUP BY hour; + +INSERT INTO landing VALUES ('2022-09-01 12:23:34', 42); +INSERT INTO landing VALUES ('2022-09-01 12:23:34', 42),('2023-09-01 12:23:34', 42); + +SELECT '-- Landing'; +SELECT * FROM landing FINAL ORDER BY time; +SELECT '-- MV'; +SELECT * FROM mv FINAL ORDER BY hour; + +DROP TABLE IF EXISTS landing SYNC; +DROP TABLE IF EXISTS mv SYNC; + +SELECT '-- Original issue with deduplicate_blocks_in_dependent_materialized_views = 1 AND max_insert_delayed_streams_for_parallel_write > 1'; +/* + + By setting deduplicate_blocks_in_dependent_materialized_views = 1 we can make the code go through a different path getting an expected + behavior again, even with max_insert_delayed_streams_for_parallel_write > 1. + + This is what happens now: + + - 1st insert works for landing and mv tables + - 2nd insert gets first block 20220901 deduplicated for landing and both rows are inserted for mv tables + +*/ +SET deduplicate_blocks_in_dependent_materialized_views = 1, max_insert_delayed_streams_for_parallel_write = 1000; + +CREATE TABLE landing +( + time DateTime, + number Int64 +) +Engine=ReplicatedReplacingMergeTree('/clickhouse/' || currentDatabase() || '/landing/{shard}/', '{replica}') +PARTITION BY toYYYYMMDD(time) +ORDER BY time; + +CREATE MATERIALIZED VIEW mv +ENGINE = ReplicatedSummingMergeTree('/clickhouse/' || currentDatabase() || '/mv/{shard}/', '{replica}') +PARTITION BY toYYYYMMDD(hour) ORDER BY hour +AS SELECT + toStartOfHour(time) AS hour, + sum(number) AS sum_amount +FROM landing +GROUP BY hour; + +INSERT INTO landing VALUES ('2022-09-01 12:23:34', 42); +INSERT INTO landing VALUES ('2022-09-01 12:23:34', 42),('2023-09-01 12:23:34', 42); + +SELECT '-- Landing'; +SELECT * FROM landing FINAL ORDER BY time; +SELECT '-- MV'; +SELECT * FROM mv FINAL ORDER BY hour; + +DROP TABLE IF EXISTS landing SYNC; +DROP TABLE IF EXISTS mv SYNC; + +SELECT '-- Regression introduced in https://github.com/ClickHouse/ClickHouse/pull/54184'; +/* + + This is a test to prevent regression introduced in https://github.com/ClickHouse/ClickHouse/pull/54184 from happening again. + + The PR was trying to fix the unexpected behavior when deduplicate_blocks_in_dependent_materialized_views = 0 AND + max_insert_delayed_streams_for_parallel_write > 1 but it ended up adding a new regression. + +*/ +SET deduplicate_blocks_in_dependent_materialized_views = 0, max_insert_delayed_streams_for_parallel_write = 0; + +CREATE TABLE landing +( + `time` DateTime, + `pk1` LowCardinality(String), + `pk2` LowCardinality(String), + `pk3` LowCardinality(String), + `pk4` String +) +ENGINE = ReplicatedReplacingMergeTree('/clickhouse/' || currentDatabase() || '/landing/{shard}/', '{replica}') +ORDER BY (pk1, pk2, pk3, pk4); + +CREATE TABLE ds +( + `pk1` LowCardinality(String), + `pk2` LowCardinality(String), + `pk3` LowCardinality(String), + `pk4` LowCardinality(String), + `occurences` AggregateFunction(count) +) +ENGINE = ReplicatedAggregatingMergeTree('/clickhouse/' || currentDatabase() || '/ds/{shard}/', '{replica}') +ORDER BY (pk1, pk2, pk3, pk4); + +CREATE MATERIALIZED VIEW mv TO ds AS +SELECT + pk1, + pk2, + pk4, + pk3, + countState() AS occurences +FROM landing +GROUP BY pk1, pk2, pk4, pk3; + +INSERT INTO landing (time, pk1, pk2, pk4, pk3) +VALUES ('2023-01-01 00:00:00','org-1','prod','login','user'),('2023-01-01 00:00:00','org-1','prod','login','user'),('2023-01-01 00:00:00','org-1','prod','login','user'),('2023-02-01 00:00:00','org-1','stage','login','user'),('2023-02-01 00:00:00','org-1','prod','login','account'),('2023-02-01 00:00:00','org-1','prod','checkout','user'),('2023-03-01 00:00:00','org-1','prod','login','account'),('2023-03-01 00:00:00','org-1','prod','login','account'); + +SELECT '-- Landing (Agg/Replacing)MergeTree'; +SELECT + pk1, + pk2, + pk4, + pk3, + count() as occurences +FROM landing +GROUP BY pk1, pk2, pk4, pk3 +ORDER BY pk1, pk2, pk4, pk3; + +SELECT '--- MV'; +SELECT + pk1, + pk2, + pk4, + pk3, + countMerge(occurences) AS occurences +FROM ds +GROUP BY pk1, pk2, pk4, pk3 +ORDER BY pk1, pk2, pk4, pk3; + +DROP TABLE IF EXISTS landing SYNC; +DROP TABLE IF EXISTS ds SYNC; +DROP TABLE IF EXISTS mv SYNC; diff --git a/parser/testdata/02913_sum_map_state/ast.json b/parser/testdata/02913_sum_map_state/ast.json new file mode 100644 index 000000000..55621f5da --- /dev/null +++ b/parser/testdata/02913_sum_map_state/ast.json @@ -0,0 +1,106 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function hex (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function sumMappedArraysState (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '0.1'" + }, + { + "explain": " Literal 'Decimal(3)'" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '1'" + }, + { + "explain": " Literal 'Decimal(3)'" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '1.2'" + }, + { + "explain": " Literal 'Decimal(3)'" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '2'" + }, + { + "explain": " Literal 'Decimal(3)'" + } + ], + + "rows": 28, + + "statistics": + { + "elapsed": 0.001219227, + "rows_read": 28, + "bytes_read": 1146 + } +} diff --git a/parser/testdata/02913_sum_map_state/metadata.json b/parser/testdata/02913_sum_map_state/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02913_sum_map_state/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02913_sum_map_state/query.sql b/parser/testdata/02913_sum_map_state/query.sql new file mode 100644 index 000000000..9f4fd27bb --- /dev/null +++ b/parser/testdata/02913_sum_map_state/query.sql @@ -0,0 +1 @@ +SELECT hex(sumMappedArraysState([CAST('0.1', 'Decimal(3)'), CAST('1', 'Decimal(3)')], [CAST('1.2', 'Decimal(3)'), CAST('2', 'Decimal(3)')])); diff --git a/parser/testdata/02915_analyzer_fuzz_1/ast.json b/parser/testdata/02915_analyzer_fuzz_1/ast.json new file mode 100644 index 000000000..eaa6b87f5 --- /dev/null +++ b/parser/testdata/02915_analyzer_fuzz_1/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001299704, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02915_analyzer_fuzz_1/metadata.json b/parser/testdata/02915_analyzer_fuzz_1/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02915_analyzer_fuzz_1/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02915_analyzer_fuzz_1/query.sql b/parser/testdata/02915_analyzer_fuzz_1/query.sql new file mode 100644 index 000000000..f1d606ab1 --- /dev/null +++ b/parser/testdata/02915_analyzer_fuzz_1/query.sql @@ -0,0 +1,2 @@ +set enable_analyzer=1; +SELECT concat('With ', materialize(_CAST('ba\0', 'LowCardinality(FixedString(3))'))) AS `concat('With ', materialize(CAST('ba\\0', 'LowCardinality(FixedString(3))')))` FROM system.one GROUP BY 'With '; diff --git a/parser/testdata/02915_analyzer_fuzz_2/ast.json b/parser/testdata/02915_analyzer_fuzz_2/ast.json new file mode 100644 index 000000000..f6a84b7e5 --- /dev/null +++ b/parser/testdata/02915_analyzer_fuzz_2/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001180094, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02915_analyzer_fuzz_2/metadata.json b/parser/testdata/02915_analyzer_fuzz_2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02915_analyzer_fuzz_2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02915_analyzer_fuzz_2/query.sql b/parser/testdata/02915_analyzer_fuzz_2/query.sql new file mode 100644 index 000000000..8921d36c5 --- /dev/null +++ b/parser/testdata/02915_analyzer_fuzz_2/query.sql @@ -0,0 +1,4 @@ +SET aggregate_functions_null_for_empty = 1; +--set enable_analyzer=1; +create table t_delete_projection (x UInt32, y UInt64, projection p (select sum(y))) engine = MergeTree order by tuple() SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; +insert into t_delete_projection select number, toString(number) from numbers(8192 * 10); diff --git a/parser/testdata/02915_analyzer_fuzz_5/ast.json b/parser/testdata/02915_analyzer_fuzz_5/ast.json new file mode 100644 index 000000000..b3e4218dc --- /dev/null +++ b/parser/testdata/02915_analyzer_fuzz_5/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.0012796, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02915_analyzer_fuzz_5/metadata.json b/parser/testdata/02915_analyzer_fuzz_5/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02915_analyzer_fuzz_5/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02915_analyzer_fuzz_5/query.sql b/parser/testdata/02915_analyzer_fuzz_5/query.sql new file mode 100644 index 000000000..d75d4f4eb --- /dev/null +++ b/parser/testdata/02915_analyzer_fuzz_5/query.sql @@ -0,0 +1,6 @@ +set enable_analyzer=1; +SET max_block_size = 1000; +SET max_threads = 4; +SET max_rows_to_group_by = 3000, group_by_overflow_mode = 'any'; +SELECT 'limit w/ GROUP BY', count(NULL), number FROM remote('127.{1,2}', view(SELECT intDiv(number, 2147483647) + AS number FROM numbers(10))) GROUP BY number WITH ROLLUP ORDER BY count() ASC, number DESC NULLS LAST SETTINGS limit = 2; diff --git a/parser/testdata/02915_analyzer_fuzz_6/ast.json b/parser/testdata/02915_analyzer_fuzz_6/ast.json new file mode 100644 index 000000000..e5f50392d --- /dev/null +++ b/parser/testdata/02915_analyzer_fuzz_6/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001246944, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02915_analyzer_fuzz_6/metadata.json b/parser/testdata/02915_analyzer_fuzz_6/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02915_analyzer_fuzz_6/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02915_analyzer_fuzz_6/query.sql b/parser/testdata/02915_analyzer_fuzz_6/query.sql new file mode 100644 index 000000000..cc276ec40 --- /dev/null +++ b/parser/testdata/02915_analyzer_fuzz_6/query.sql @@ -0,0 +1,19 @@ +set allow_suspicious_low_cardinality_types=1; +set enable_analyzer=1; + +create table tab (x LowCardinality(Nullable(Float64))) engine = MergeTree order by x settings allow_nullable_key=1; +insert into tab select number from numbers(2); +SELECT [(arrayJoin([x]), x)] AS row FROM tab; + + +CREATE TABLE t__fuzz_307 (`k1` DateTime, `k2` LowCardinality(Nullable(Float64)), `v` Nullable(UInt32)) ENGINE = + ReplacingMergeTree ORDER BY (k1, k2) settings allow_nullable_key=1; + insert into t__fuzz_307 select * from generateRandom() limit 10; + SELECT arrayJoin([tuple([(toNullable(NULL), -9223372036854775808, toNullable(3.4028234663852886e38), arrayJoin( +[tuple([(toNullable(NULL), 2147483647, toNullable(0.5), k2)])]), k2)])]) AS row, arrayJoin([(1024, k2)]), -9223372036854775807, 256, tupleElement(row, 1048576, 1024) AS k FROM t__fuzz_307 FINAL ORDER BY (toNullable('655.36'), 2, toNullable +('0.2147483648'), k2) ASC, toNullable('102.3') DESC NULLS FIRST, '10.25' DESC, k ASC NULLS FIRST format Null; + +CREATE TABLE t__fuzz_282 (`k1` DateTime, `k2` LowCardinality(Nullable(Float64)), `v` Nullable(UInt32)) ENGINE = ReplacingMergeTree ORDER BY (k1, k2) SETTINGS allow_nullable_key = 1; +INSERT INTO t__fuzz_282 VALUES (1, 2, 3) (1, 2, 4) (2, 3, 4), (2, 3, 5); + +SELECT arrayJoin([tuple([(toNullable(NULL), -9223372036854775808, toNullable(3.4028234663852886e38), arrayJoin([tuple([(toNullable(NULL), 2147483647, toNullable(0.5), k2)])]), k2)])]) AS row, arrayJoin([(1024, k2)]), -9223372036854775807, 256, tupleElement(row, 1048576, 1024) AS k FROM t__fuzz_282 FINAL ORDER BY (toNullable('655.36'), 2, toNullable('0.2147483648'), k2) ASC, toNullable('102.3') DESC NULLS FIRST, '10.25' DESC, k ASC NULLS FIRST format Null; diff --git a/parser/testdata/02915_move_partition_inactive_replica/ast.json b/parser/testdata/02915_move_partition_inactive_replica/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02915_move_partition_inactive_replica/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02915_move_partition_inactive_replica/metadata.json b/parser/testdata/02915_move_partition_inactive_replica/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02915_move_partition_inactive_replica/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02915_move_partition_inactive_replica/query.sql b/parser/testdata/02915_move_partition_inactive_replica/query.sql new file mode 100644 index 000000000..81eabce4f --- /dev/null +++ b/parser/testdata/02915_move_partition_inactive_replica/query.sql @@ -0,0 +1,63 @@ +-- Tags: no-parallel + +create database if not exists shard_0; +create database if not exists shard_1; + +drop table if exists shard_0.from_0; +drop table if exists shard_1.from_0; +drop table if exists shard_0.from_1; +drop table if exists shard_1.from_1; +drop table if exists shard_0.to; +drop table if exists shard_1.to; + +create table shard_0.from_0 (x UInt32) engine = ReplicatedMergeTree('/clickhouse/tables/from_0_' || currentDatabase(), '0') order by x settings old_parts_lifetime=1, max_cleanup_delay_period=1, cleanup_delay_period=1; +create table shard_1.from_0 (x UInt32) engine = ReplicatedMergeTree('/clickhouse/tables/from_0_' || currentDatabase(), '1') order by x settings old_parts_lifetime=1, max_cleanup_delay_period=1, cleanup_delay_period=1; + +create table shard_0.from_1 (x UInt32) engine = ReplicatedMergeTree('/clickhouse/tables/from_1_' || currentDatabase(), '0') order by x settings old_parts_lifetime=1, max_cleanup_delay_period=1, cleanup_delay_period=1; +create table shard_1.from_1 (x UInt32) engine = ReplicatedMergeTree('/clickhouse/tables/from_1_' || currentDatabase(), '1') order by x settings old_parts_lifetime=1, max_cleanup_delay_period=1, cleanup_delay_period=1; + +insert into shard_0.from_0 select number from numbers(10); +insert into shard_0.from_0 select number + 10 from numbers(10); + +insert into shard_0.from_1 select number + 20 from numbers(10); +insert into shard_0.from_1 select number + 30 from numbers(10); + +system sync replica shard_1.from_0; +system sync replica shard_1.from_1; + + +create table shard_0.to (x UInt32) engine = ReplicatedMergeTree('/clickhouse/tables/to_' || currentDatabase(), '0') order by x settings old_parts_lifetime=1, max_cleanup_delay_period=1, cleanup_delay_period=1; + +create table shard_1.to (x UInt32) engine = ReplicatedMergeTree('/clickhouse/tables/to_' || currentDatabase(), '1') order by x settings old_parts_lifetime=1, max_cleanup_delay_period=1, cleanup_delay_period=1; + +detach table shard_1.to; + +alter table shard_0.from_0 on cluster test_cluster_two_shards_different_databases move partition tuple() to table shard_0.to format Null settings distributed_ddl_output_mode='never_throw', distributed_ddl_task_timeout = 1; + +alter table shard_0.from_1 on cluster test_cluster_two_shards_different_databases move partition tuple() to table shard_0.to format Null settings distributed_ddl_output_mode='never_throw', distributed_ddl_task_timeout = 1; + +OPTIMIZE TABLE shard_0.from_0; +OPTIMIZE TABLE shard_1.from_0; +OPTIMIZE TABLE shard_0.from_1; +OPTIMIZE TABLE shard_1.from_1; + +OPTIMIZE TABLE shard_0.to; + +-- If moved parts are not merged by OPTIMIZE or background merge restart +-- can log Warning about metadata version on disk. It's normal situation +-- and test shouldn't rarely fail because of it. +set send_logs_level = 'error'; + +system restart replica shard_0.to; + +-- Doesn't lead to test flakyness, because we don't check anything after it +select sleep(2); + +attach table shard_1.to; + +drop table if exists shard_0.from_0; +drop table if exists shard_1.from_0; +drop table if exists shard_0.from_1; +drop table if exists shard_1.from_1; +drop table if exists shard_0.to; +drop table if exists shard_1.to; diff --git a/parser/testdata/02915_sleep_large_uint/ast.json b/parser/testdata/02915_sleep_large_uint/ast.json new file mode 100644 index 000000000..60bed0f6c --- /dev/null +++ b/parser/testdata/02915_sleep_large_uint/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function sleep (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Float64_3.40282e44" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001113827, + "rows_read": 7, + "bytes_read": 268 + } +} diff --git a/parser/testdata/02915_sleep_large_uint/metadata.json b/parser/testdata/02915_sleep_large_uint/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02915_sleep_large_uint/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02915_sleep_large_uint/query.sql b/parser/testdata/02915_sleep_large_uint/query.sql new file mode 100644 index 000000000..08b6c580a --- /dev/null +++ b/parser/testdata/02915_sleep_large_uint/query.sql @@ -0,0 +1,8 @@ +SELECT sleep(3.40282e+44); -- { serverError BAD_ARGUMENTS } +SELECT sleep((pow(2, 64) / 1000000) - 1); -- { serverError BAD_ARGUMENTS } +SELECT sleepEachRow(184467440737095516) from numbers(10000); -- { serverError BAD_ARGUMENTS } +SET max_rows_to_read = 0; +SELECT sleepEachRow(pow(2, 31)) from numbers(9007199254740992) settings function_sleep_max_microseconds_per_block = 8589934592000000000; -- { serverError TOO_SLOW } + +-- Another corner case, but it requires lots of memory to run (huge block size) +-- SELECT sleepEachRow(pow(2, 31)) from numbers(17179869184) settings max_block_size = 17179869184, function_sleep_max_microseconds_per_block = 8589934592000000000; -- { serverError TOO_SLOW } diff --git a/parser/testdata/02916_addcolumn_nested/ast.json b/parser/testdata/02916_addcolumn_nested/ast.json new file mode 100644 index 000000000..861123c9c --- /dev/null +++ b/parser/testdata/02916_addcolumn_nested/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001489845, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02916_addcolumn_nested/metadata.json b/parser/testdata/02916_addcolumn_nested/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02916_addcolumn_nested/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02916_addcolumn_nested/query.sql b/parser/testdata/02916_addcolumn_nested/query.sql new file mode 100644 index 000000000..1e64fca6a --- /dev/null +++ b/parser/testdata/02916_addcolumn_nested/query.sql @@ -0,0 +1,22 @@ +SET flatten_nested = 0; + +DROP TABLE IF EXISTS nested_table; +CREATE TABLE nested_table (id UInt64, first Nested(a Int8, b String)) ENGINE = MergeTree() ORDER BY id; +SHOW CREATE nested_table; + +SET flatten_nested = 1; + +ALTER TABLE nested_table ADD COLUMN second Nested(c Int8, d String) AFTER id; +SHOW CREATE nested_table; + +SET flatten_nested = 0; + +ALTER TABLE nested_table ADD COLUMN third Nested(e Int8, f String) FIRST; +SHOW CREATE nested_table; + +SET flatten_nested = 1; + +ALTER TABLE nested_table ADD COLUMN fourth Nested(g Int8, h String); +SHOW CREATE nested_table; + +DROP TABLE nested_table; diff --git a/parser/testdata/02916_analyzer_set_in_join/ast.json b/parser/testdata/02916_analyzer_set_in_join/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02916_analyzer_set_in_join/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02916_analyzer_set_in_join/metadata.json b/parser/testdata/02916_analyzer_set_in_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02916_analyzer_set_in_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02916_analyzer_set_in_join/query.sql b/parser/testdata/02916_analyzer_set_in_join/query.sql new file mode 100644 index 000000000..cae17d74a --- /dev/null +++ b/parser/testdata/02916_analyzer_set_in_join/query.sql @@ -0,0 +1,11 @@ + +SELECT 1, b +FROM numbers(1) +ARRAY JOIN [materialize(3) IN (SELECT 42)] AS b +; + +SELECT * +FROM (SELECT materialize(42) as a) as t1 +JOIN (SELECT materialize(1) as a) as t2 +ON t1.a IN (SELECT 42) = t2.a +; diff --git a/parser/testdata/02916_another_move_partition_inactive_replica/ast.json b/parser/testdata/02916_another_move_partition_inactive_replica/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02916_another_move_partition_inactive_replica/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02916_another_move_partition_inactive_replica/metadata.json b/parser/testdata/02916_another_move_partition_inactive_replica/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02916_another_move_partition_inactive_replica/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02916_another_move_partition_inactive_replica/query.sql b/parser/testdata/02916_another_move_partition_inactive_replica/query.sql new file mode 100644 index 000000000..fa5b63578 --- /dev/null +++ b/parser/testdata/02916_another_move_partition_inactive_replica/query.sql @@ -0,0 +1,54 @@ +-- Tags: no-parallel + +create database if not exists shard_0; +create database if not exists shard_1; + +drop table if exists shard_0.from_1; +drop table if exists shard_1.from_1; +drop table if exists shard_0.to; +drop table if exists shard_1.to; + +create table shard_0.from_1 (x UInt32) engine = ReplicatedMergeTree('/clickhouse/tables/from_1_' || currentDatabase(), '0') order by x settings old_parts_lifetime=1, max_cleanup_delay_period=1, cleanup_delay_period=1, shared_merge_tree_disable_merges_and_mutations_assignment=1; +create table shard_1.from_1 (x UInt32) engine = ReplicatedMergeTree('/clickhouse/tables/from_1_' || currentDatabase(), '1') order by x settings old_parts_lifetime=1, max_cleanup_delay_period=1, cleanup_delay_period=1, shared_merge_tree_disable_merges_and_mutations_assignment=1; + +system stop merges shard_0.from_1; +system stop merges shard_1.from_1; +insert into shard_0.from_1 select number + 20 from numbers(10); +insert into shard_0.from_1 select number + 30 from numbers(10); + +insert into shard_0.from_1 select number + 40 from numbers(10); +insert into shard_0.from_1 select number + 50 from numbers(10); + +system sync replica shard_1.from_1; + +create table shard_0.to (x UInt32) engine = ReplicatedMergeTree('/clickhouse/tables/to_' || currentDatabase(), '0') order by x settings old_parts_lifetime=1, max_cleanup_delay_period=1, cleanup_delay_period=1, max_parts_to_merge_at_once=2, shared_merge_tree_disable_merges_and_mutations_assignment=1; + +create table shard_1.to (x UInt32) engine = ReplicatedMergeTree('/clickhouse/tables/to_' || currentDatabase(), '1') order by x settings old_parts_lifetime=1, max_cleanup_delay_period=1, cleanup_delay_period=1, max_parts_to_merge_at_once=2; + +detach table shard_1.to; + +alter table shard_0.from_1 on cluster test_cluster_two_shards_different_databases move partition tuple() to table shard_0.to format Null settings distributed_ddl_output_mode='never_throw', distributed_ddl_task_timeout = 1; + +drop table if exists shard_0.from_1; +drop table if exists shard_1.from_1; +OPTIMIZE TABLE shard_0.to; +OPTIMIZE TABLE shard_0.to; +select name, active from system.parts where database='shard_0' and table='to' and active order by name; + +-- If moved parts are not merged by OPTIMIZE or background merge restart +-- can log Warning about metadata version on disk. It's normal situation +-- and test shouldn't rarely fail because of it. +set send_logs_level = 'error'; + +system restart replica shard_0.to; + +-- Doesn't lead to test flakyness, because we don't check content in table +-- which doesn't depend on any background operation +select sleep(3); + +attach table shard_1.to; +system sync replica shard_1.to; +select count(), sum(x) from shard_1.to; + +drop table if exists shard_0.to; +drop table if exists shard_1.to; diff --git a/parser/testdata/02916_csv_infer_numbers_from_strings/ast.json b/parser/testdata/02916_csv_infer_numbers_from_strings/ast.json new file mode 100644 index 000000000..22a91dcd8 --- /dev/null +++ b/parser/testdata/02916_csv_infer_numbers_from_strings/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001787363, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02916_csv_infer_numbers_from_strings/metadata.json b/parser/testdata/02916_csv_infer_numbers_from_strings/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02916_csv_infer_numbers_from_strings/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02916_csv_infer_numbers_from_strings/query.sql b/parser/testdata/02916_csv_infer_numbers_from_strings/query.sql new file mode 100644 index 000000000..713d3d719 --- /dev/null +++ b/parser/testdata/02916_csv_infer_numbers_from_strings/query.sql @@ -0,0 +1,4 @@ +set input_format_csv_try_infer_numbers_from_strings=1; +desc format(CSV, '"42","42.42","True"'); +desc format(CSV, '"42","42.42","True"\n"abc","def","ghk"'); + diff --git a/parser/testdata/02916_date_text_parsing/ast.json b/parser/testdata/02916_date_text_parsing/ast.json new file mode 100644 index 000000000..4d5e35742 --- /dev/null +++ b/parser/testdata/02916_date_text_parsing/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function format (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Identifier CSV" + }, + { + "explain": " Literal 'd Date, s String'" + }, + { + "explain": " Literal 'abcdefgh,SomeString'" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.00131426, + "rows_read": 13, + "bytes_read": 505 + } +} diff --git a/parser/testdata/02916_date_text_parsing/metadata.json b/parser/testdata/02916_date_text_parsing/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02916_date_text_parsing/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02916_date_text_parsing/query.sql b/parser/testdata/02916_date_text_parsing/query.sql new file mode 100644 index 000000000..d895ccece --- /dev/null +++ b/parser/testdata/02916_date_text_parsing/query.sql @@ -0,0 +1,25 @@ +select * from format(CSV, 'd Date, s String', 'abcdefgh,SomeString'); -- {serverError CANNOT_PARSE_DATE} +select * from format(CSV, 'd Date, s String', '2bcdefgh,SomeString'); -- {serverError CANNOT_PARSE_DATE} +select * from format(CSV, 'd Date, s String', '20cdefgh,SomeString'); -- {serverError CANNOT_PARSE_DATE} +select * from format(CSV, 'd Date, s String', '202defgh,SomeString'); -- {serverError CANNOT_PARSE_DATE} +select * from format(CSV, 'd Date, s String', '2020efgh,SomeString'); -- {serverError CANNOT_PARSE_DATE} +select * from format(CSV, 'd Date, s String', '20200fgh,SomeString'); -- {serverError CANNOT_PARSE_DATE} +select * from format(CSV, 'd Date, s String', '202001gh,SomeString'); -- {serverError CANNOT_PARSE_DATE} +select * from format(CSV, 'd Date, s String', '2020010h,SomeString'); -- {serverError CANNOT_PARSE_DATE} +select * from format(CSV, 'd Date, s String', '20200102,SomeString'); +select * from format(CSV, 'd Date, s String', 'abcd-ef-gh,SomeString'); -- {serverError CANNOT_PARSE_DATE} +select * from format(CSV, 'd Date, s String', '2bcd-ef-gh,SomeString'); -- {serverError CANNOT_PARSE_DATE} +select * from format(CSV, 'd Date, s String', '20cd-ef-gh,SomeString'); -- {serverError CANNOT_PARSE_DATE} +select * from format(CSV, 'd Date, s String', '202d-ef-gh,SomeString'); -- {serverError CANNOT_PARSE_DATE} +select * from format(CSV, 'd Date, s String', '2020-ef-gh,SomeString'); -- {serverError CANNOT_PARSE_DATE} +select * from format(CSV, 'd Date, s String', '2020-f-gh,SomeString'); -- {serverError CANNOT_PARSE_DATE} +select * from format(CSV, 'd Date, s String', '2020-f-g,SomeString'); -- {serverError CANNOT_PARSE_DATE} +select * from format(CSV, 'd Date, s String', '2020-0f-gh,SomeString'); -- {serverError CANNOT_PARSE_DATE} +select * from format(CSV, 'd Date, s String', '2020-01-gh,SomeString'); -- {serverError CANNOT_PARSE_DATE} +select * from format(CSV, 'd Date, s String', '2020-01-h,SomeString'); -- {serverError CANNOT_PARSE_DATE} +select * from format(CSV, 'd Date, s String', '2020-1-gh,SomeString'); -- {serverError CANNOT_PARSE_DATE} +select * from format(CSV, 'd Date, s String', '2020-1-h,SomeString'); -- {serverError CANNOT_PARSE_DATE} +select * from format(CSV, 'd Date, s String', '2020-01-02,SomeString'); +select * from format(CSV, 'd Date, s String', '2020-01-2,SomeString'); +select * from format(CSV, 'd Date, s String', '2020-1-2,SomeString'); +select * from format(CSV, 'd Date, s String', '2020-1-02,SomeString'); diff --git a/parser/testdata/02916_distributed_skip_unavailable_shards/ast.json b/parser/testdata/02916_distributed_skip_unavailable_shards/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02916_distributed_skip_unavailable_shards/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02916_distributed_skip_unavailable_shards/metadata.json b/parser/testdata/02916_distributed_skip_unavailable_shards/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02916_distributed_skip_unavailable_shards/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02916_distributed_skip_unavailable_shards/query.sql b/parser/testdata/02916_distributed_skip_unavailable_shards/query.sql new file mode 100644 index 000000000..48a129498 --- /dev/null +++ b/parser/testdata/02916_distributed_skip_unavailable_shards/query.sql @@ -0,0 +1,28 @@ +-- Tags: shard, no-fasttest + +DROP TABLE IF EXISTS table_02916; +DROP TABLE IF EXISTS table_02916_distributed; + +CREATE TABLE table_02916 +( + `ID` UInt32, + `Name` String +) +ENGINE = MergeTree +ORDER BY ID; + +INSERT INTO table_02916 VALUES (1234, 'abcd'); + +CREATE TABLE table_02916_distributed +( + `ID` UInt32, + `Name` String +) +ENGINE = Distributed(test_unavailable_shard, currentDatabase(), table_02916, rand()) +SETTINGS skip_unavailable_shards = 1; + +SET send_logs_level='fatal'; +SELECT *, _shard_num FROM table_02916_distributed; + +DROP TABLE table_02916_distributed; +DROP TABLE table_02916; diff --git a/parser/testdata/02916_glogal_in_cancel/ast.json b/parser/testdata/02916_glogal_in_cancel/ast.json new file mode 100644 index 000000000..165d618b2 --- /dev/null +++ b/parser/testdata/02916_glogal_in_cancel/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001192263, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02916_glogal_in_cancel/metadata.json b/parser/testdata/02916_glogal_in_cancel/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02916_glogal_in_cancel/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02916_glogal_in_cancel/query.sql b/parser/testdata/02916_glogal_in_cancel/query.sql new file mode 100644 index 000000000..dd6179594 --- /dev/null +++ b/parser/testdata/02916_glogal_in_cancel/query.sql @@ -0,0 +1,2 @@ +set max_execution_time = 0.5, timeout_overflow_mode = 'break', max_rows_to_read = 0; +SELECT number FROM remote('127.0.0.{3|2}', numbers(1)) WHERE number GLOBAL IN (SELECT number FROM numbers(10000000000.)) format Null; diff --git a/parser/testdata/02916_replication_protocol_wait_for_part/ast.json b/parser/testdata/02916_replication_protocol_wait_for_part/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02916_replication_protocol_wait_for_part/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02916_replication_protocol_wait_for_part/metadata.json b/parser/testdata/02916_replication_protocol_wait_for_part/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02916_replication_protocol_wait_for_part/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02916_replication_protocol_wait_for_part/query.sql b/parser/testdata/02916_replication_protocol_wait_for_part/query.sql new file mode 100644 index 000000000..fc47dfee4 --- /dev/null +++ b/parser/testdata/02916_replication_protocol_wait_for_part/query.sql @@ -0,0 +1,24 @@ +-- Tags: no-replicated-database, no-fasttest, no-shared-merge-tree +-- Tag no-replicated-database: different number of replicas + +create table tableIn (n int) + engine=ReplicatedMergeTree('/test/02916/{database}/table', '1') + order by tuple() + settings + storage_policy='s3_cache', + sleep_before_commit_local_part_in_replicated_table_ms=5000; +create table tableOut (n int) + engine=ReplicatedMergeTree('/test/02916/{database}/table', '2') + order by tuple() + settings + storage_policy='s3_cache'; + +SET send_logs_level='error'; + +insert into tableIn values(1); +insert into tableIn values(2); +system sync replica tableOut; +select count() from tableOut; + +drop table tableIn; +drop table tableOut; diff --git a/parser/testdata/02916_set_formatting/ast.json b/parser/testdata/02916_set_formatting/ast.json new file mode 100644 index 000000000..f2ef84702 --- /dev/null +++ b/parser/testdata/02916_set_formatting/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function formatQuerySingleLine (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'set additional_table_filters = {\\'kjsnckjn\\': \\'ksanmn\\', \\'dkm\\': \\'dd\\'}'" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001265814, + "rows_read": 7, + "bytes_read": 342 + } +} diff --git a/parser/testdata/02916_set_formatting/metadata.json b/parser/testdata/02916_set_formatting/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02916_set_formatting/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02916_set_formatting/query.sql b/parser/testdata/02916_set_formatting/query.sql new file mode 100644 index 000000000..10b875293 --- /dev/null +++ b/parser/testdata/02916_set_formatting/query.sql @@ -0,0 +1,13 @@ +SELECT formatQuerySingleLine('set additional_table_filters = {\'kjsnckjn\': \'ksanmn\', \'dkm\': \'dd\'}'); +SELECT formatQuerySingleLine('SELECT v FROM t1 SETTINGS additional_table_filters = {\'default.t1\': \'s\'}'); + +DROP TABLE IF EXISTS t1; +DROP VIEW IF EXISTS v1; + +CREATE TABLE t1 (v UInt64, s String) ENGINE=MergeTree() ORDER BY v; +CREATE VIEW v1 (v UInt64) AS SELECT v FROM t1 SETTINGS additional_table_filters = {'default.t1': 's != \'s1%\''}; + +SHOW CREATE TABLE v1 FORMAT Vertical; + +DROP VIEW v1; +DROP TABLE t1; diff --git a/parser/testdata/02916_to_start_of_interval_with_origin/ast.json b/parser/testdata/02916_to_start_of_interval_with_origin/ast.json new file mode 100644 index 000000000..0324e6255 --- /dev/null +++ b/parser/testdata/02916_to_start_of_interval_with_origin/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001085281, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02916_to_start_of_interval_with_origin/metadata.json b/parser/testdata/02916_to_start_of_interval_with_origin/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02916_to_start_of_interval_with_origin/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02916_to_start_of_interval_with_origin/query.sql b/parser/testdata/02916_to_start_of_interval_with_origin/query.sql new file mode 100644 index 000000000..b03ccae31 --- /dev/null +++ b/parser/testdata/02916_to_start_of_interval_with_origin/query.sql @@ -0,0 +1,95 @@ +set session_timezone = 'UTC'; + +SELECT '-- Negative tests'; + +-- time and origin arguments must have the same type +SELECT toStartOfInterval(toDate('2023-01-02 14:45:50'), toIntervalSecond(5), toDate32('2023-01-02 14:44:30')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toStartOfInterval(toDate('2023-01-02 14:45:50'), toIntervalMillisecond(12), toDateTime('2023-01-02 14:44:30')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toStartOfInterval(toDate32('2023-01-02 14:45:50'), toIntervalHour(5), toDate('2023-01-02 14:44:30')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toStartOfInterval(toDateTime('2023-01-02 14:45:50'), toIntervalMinute(1), toDateTime64('2023-01-02 14:44:30', 2)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toStartOfInterval(toDateTime64('2023-01-02 14:45:50', 2), toIntervalMinute(1), toDate('2023-01-02 14:44:30')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +-- the origin must be before the time +SELECT toStartOfInterval(toDateTime('2023-01-02 14:42:50'), toIntervalMinute(1), toDateTime('2023-01-02 14:44:30')); -- { serverError BAD_ARGUMENTS } + +-- the origin must be constant +SELECT toStartOfInterval(toDateTime('2023-01-02 14:45:50'), toIntervalMinute(1), number % 2 == 0 ? toDateTime('2023-02-01 15:55:00') : toDateTime('2023-01-01 15:55:00')) from numbers(1); -- { serverError ILLEGAL_COLUMN } +SELECT toStartOfInterval(toDateTime('2023-01-02 14:45:50'), toIntervalHour(1), materialize(toDateTime('2023-01-02 14:44:30')), 'Europe/Amsterdam'); -- { serverError ILLEGAL_COLUMN } + +-- with 4 arguments, the 3rd one must not be a string or an integer +SELECT toStartOfInterval(toDateTime('2023-01-02 14:45:50'), toIntervalYear(1), 'Europe/Amsterdam', 'Europe/Amsterdam'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toStartOfInterval(toDateTime('2023-01-02 14:45:50'), toIntervalYear(1), 5, 'Europe/Amsterdam'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +-- too many arguments +SELECT toStartOfInterval(toDateTime('2023-01-02 14:45:50'), toIntervalYear(1), toDateTime('2020-01-02 14:44:30'), 'Europe/Amsterdam', 5); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +SELECT 'Time and origin as Date'; +SELECT toStartOfInterval(toDate('2023-10-09'), toIntervalYear(1), toDate('2022-02-01')); +SELECT toStartOfInterval(toDate('2023-10-09'), toIntervalQuarter(1), toDate('2022-02-01')); +SELECT toStartOfInterval(toDate('2023-10-09'), toIntervalMonth(1), toDate('2023-09-08')); +SELECT toStartOfInterval(toDate('2023-10-09'), toIntervalWeek(1), toDate('2023-10-01')); +SELECT toStartOfInterval(toDate('2023-10-09'), toIntervalDay(1), toDate('2023-10-08')); +SELECT toStartOfInterval(toDate('2023-10-09'), toIntervalHour(1), toDate('2023-10-09')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toStartOfInterval(toDate('2023-10-09'), toIntervalMinute(1), toDate('2023-10-09')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toStartOfInterval(toDate('2023-10-09'), toIntervalSecond(1), toDate('2023-10-09')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toStartOfInterval(toDate('2023-10-09'), toIntervalMillisecond(1), toDate('2023-10-09')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toStartOfInterval(toDate('2023-10-09'), toIntervalMicrosecond(1), toDate('2023-10-09')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toStartOfInterval(toDate('2023-10-09'), toIntervalNanosecond(1), toDate('2023-10-09')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT 'Time and origin as Date32'; +SELECT toStartOfInterval(toDate32('2023-10-09'), toIntervalYear(1), toDate32('2022-02-01')); +SELECT toStartOfInterval(toDate32('2023-10-09'), toIntervalQuarter(1), toDate32('2022-02-01')); +SELECT toStartOfInterval(toDate32('2023-10-09'), toIntervalMonth(1), toDate32('2023-09-08')); +SELECT toStartOfInterval(toDate32('2023-10-09'), toIntervalWeek(1), toDate32('2023-10-01')); +SELECT toStartOfInterval(toDate32('2023-10-09'), toIntervalDay(1), toDate32('2023-10-08')); +SELECT toStartOfInterval(toDate32('2023-10-09'), toIntervalHour(1), toDate32('2023-10-09')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toStartOfInterval(toDate32('2023-10-09'), toIntervalMinute(1), toDate32('2023-10-09')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toStartOfInterval(toDate32('2023-10-09'), toIntervalSecond(1), toDate32('2023-10-09')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toStartOfInterval(toDate32('2023-10-09'), toIntervalMillisecond(1), toDate32('2023-10-09')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toStartOfInterval(toDate32('2023-10-09'), toIntervalMicrosecond(1), toDate32('2023-10-09')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toStartOfInterval(toDate32('2023-10-09'), toIntervalNanosecond(1), toDate32('2023-10-09')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT 'Time and origin as DateTime'; +SELECT toStartOfInterval(toDateTime('2023-10-09 10:11:12'), toIntervalYear(1), toDateTime('2022-02-01 09:08:07')); +SELECT toStartOfInterval(toDateTime('2023-10-09 10:11:12'), toIntervalQuarter(1), toDateTime('2022-02-01 09:08:07')); +SELECT toStartOfInterval(toDateTime('2023-10-09 10:11:12'), toIntervalMonth(1), toDateTime('2023-09-08 09:08:07')); +SELECT toStartOfInterval(toDateTime('2023-10-09 10:11:12'), toIntervalWeek(1), toDateTime('2023-10-01 09:08:07')); +SELECT toStartOfInterval(toDateTime('2023-10-09 10:11:12'), toIntervalDay(1), toDateTime('2023-10-08 09:08:07')); +SELECT toStartOfInterval(toDateTime('2023-10-09 10:11:12'), toIntervalHour(1), toDateTime('2023-10-09 09:10:07')); +SELECT toStartOfInterval(toDateTime('2023-10-09 10:11:12'), toIntervalMinute(1), toDateTime('2023-10-09 09:10:07')); +SELECT toStartOfInterval(toDateTime('2023-10-09 10:11:12'), toIntervalSecond(1), toDateTime('2023-10-09 09:10:07')); +SELECT toStartOfInterval(toDateTime('2023-10-09 10:11:12'), toIntervalMillisecond(1), toDateTime('2023-10-09 10:11:12')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toStartOfInterval(toDateTime('2023-10-09 10:11:12'), toIntervalMicrosecond(1), toDateTime('2023-10-09 10:11:12')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toStartOfInterval(toDateTime('2023-10-09 10:11:12'), toIntervalNanosecond(1), toDateTime('2023-10-09 10:11:12')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT 'Time and origin as DateTime64(9)'; +SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987654321', 9), toIntervalYear(1), toDateTime64('2022-02-01 09:08:07.123456789', 9)); +SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987654321', 9), toIntervalQuarter(1), toDateTime64('2022-02-01 09:08:07.123456789', 9)); +SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987654321', 9), toIntervalMonth(1), toDateTime64('2023-09-10 09:08:07.123456789', 9)); +SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987654321', 9), toIntervalWeek(1), toDateTime64('2023-10-01 09:08:07.123456789', 9)); +SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987654321', 9), toIntervalDay(1), toDateTime64('2023-10-08 09:08:07.123456789', 9)); +SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987654321', 9), toIntervalHour(1), toDateTime64('2023-10-09 09:10:07.123456789', 9)); +SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987654321', 9), toIntervalMinute(1), toDateTime64('2023-10-09 09:10:11.123456789', 9)); +SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987654321', 9), toIntervalSecond(1), toDateTime64('2023-10-09 10:11:10.123456789', 9)); +SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987654321', 9), toIntervalMillisecond(1), toDateTime64('2023-10-09 10:11:12.123456789', 9)); +SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987654321', 9), toIntervalMicrosecond(1), toDateTime64('2023-10-09 10:11:12.123456789', 9)); +SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987654321', 9), toIntervalNanosecond(1), toDateTime64('2023-10-09 10:11:12.123456789', 9)); + +SELECT 'Time and origin as DateTime64(3)'; +SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987', 3), toIntervalYear(1), toDateTime64('2022-02-01 09:08:07.123', 3)); +SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987', 3), toIntervalQuarter(1), toDateTime64('2022-02-01 09:08:07.123', 3)); +SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987', 3), toIntervalMonth(1), toDateTime64('2023-09-08 09:08:07.123', 3)); +SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987', 3), toIntervalWeek(1), toDateTime64('2023-10-01 09:08:07.123', 3)); +SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987', 3), toIntervalDay(1), toDateTime64('2023-10-08 09:08:07.123', 3)); +SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987', 3), toIntervalHour(1), toDateTime64('2023-10-09 09:10:07.123', 3)); +SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987', 3), toIntervalMinute(1), toDateTime64('2023-10-09 10:10:11.123', 3)); +SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987', 3), toIntervalSecond(1), toDateTime64('2023-10-09 10:11:10.123', 3)); +SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987', 3), toIntervalMillisecond(1), toDateTime64('2023-10-09 10:11:12.123', 3)); +SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987', 3), toIntervalMicrosecond(1), toDateTime64('2023-10-09 10:11:12.123', 3)); +SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.987', 3), toIntervalNanosecond(1), toDateTime64('2023-10-09 10:11:12.123', 3)); + +SELECT 'Non-const arguments'; +SELECT toStartOfInterval(number % 2 == 0 ? toDateTime64('2023-03-01 15:55:00', 2) : toDateTime64('2023-02-01 15:55:00', 2), toIntervalMinute(1), toDateTime64('2023-01-01 13:55:00', 2), 'Europe/Amsterdam') from numbers(5); +SELECT toStartOfInterval(number % 2 == 0 ? toDateTime('2023-03-01 15:55:00') : toDateTime('2023-02-01 15:55:00'), toIntervalHour(1), toDateTime('2023-01-01 13:55:00'), 'Europe/Amsterdam') from numbers(5); +SELECT toStartOfInterval(materialize(toDateTime('2023-01-02 14:45:50')), toIntervalHour(1), toDateTime('2023-01-02 14:44:30'), 'Europe/Amsterdam'); +SELECT toStartOfInterval(materialize(toDateTime64('2023-02-01 15:45:50', 2)), toIntervalHour(1), toDateTime64('2023-01-02 14:44:30', 2), 'Europe/Amsterdam'); diff --git a/parser/testdata/02917_transform_tsan/ast.json b/parser/testdata/02917_transform_tsan/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02917_transform_tsan/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02917_transform_tsan/metadata.json b/parser/testdata/02917_transform_tsan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02917_transform_tsan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02917_transform_tsan/query.sql b/parser/testdata/02917_transform_tsan/query.sql new file mode 100644 index 000000000..dac79f83d --- /dev/null +++ b/parser/testdata/02917_transform_tsan/query.sql @@ -0,0 +1,2 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/56815 +SELECT transform(arrayJoin([NULL, NULL]), [NULL, NULL], [NULL]) GROUP BY GROUPING SETS (('0.1'), ('-0.2147483647')); diff --git a/parser/testdata/02918_alter_temporary_table/ast.json b/parser/testdata/02918_alter_temporary_table/ast.json new file mode 100644 index 000000000..69a01e27e --- /dev/null +++ b/parser/testdata/02918_alter_temporary_table/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery alter_test (children 1)" + }, + { + "explain": " Identifier alter_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001023087, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/02918_alter_temporary_table/metadata.json b/parser/testdata/02918_alter_temporary_table/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02918_alter_temporary_table/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02918_alter_temporary_table/query.sql b/parser/testdata/02918_alter_temporary_table/query.sql new file mode 100644 index 000000000..58840a20e --- /dev/null +++ b/parser/testdata/02918_alter_temporary_table/query.sql @@ -0,0 +1,38 @@ +DROP TABLE IF EXISTS alter_test; + +CREATE TEMPORARY TABLE alter_test (CounterID UInt32, StartDate Date, UserID UInt32, VisitID UInt32, NestedColumn Nested(A UInt8, S String), ToDrop UInt32); + +INSERT INTO alter_test VALUES (1, '2014-01-01', 2, 3, [1,2,3], ['a','b','c'], 4); + +ALTER TABLE alter_test ADD COLUMN Added0 UInt32; +ALTER TEMPORARY TABLE alter_test ADD COLUMN Added2 UInt32; +ALTER TABLE alter_test ADD COLUMN Added1 UInt32 AFTER Added0; + +ALTER TABLE alter_test ADD COLUMN AddedNested1 Nested(A UInt32, B UInt64) AFTER Added2; +ALTER TABLE alter_test ADD COLUMN AddedNested1.C Array(String) AFTER AddedNested1.B; +ALTER TABLE alter_test ADD COLUMN AddedNested2 Nested(A UInt32, B UInt64) AFTER AddedNested1; + +DESC TABLE alter_test; + +ALTER TABLE alter_test DROP COLUMN ToDrop; + +ALTER TABLE alter_test MODIFY COLUMN Added0 String; + +ALTER TABLE alter_test DROP COLUMN NestedColumn.A; +ALTER TABLE alter_test DROP COLUMN NestedColumn.S; + +ALTER TABLE alter_test DROP COLUMN AddedNested1.B; + +ALTER TABLE alter_test ADD COLUMN IF NOT EXISTS Added0 UInt32; +ALTER TABLE alter_test ADD COLUMN IF NOT EXISTS AddedNested1 Nested(A UInt32, B UInt64); +ALTER TABLE alter_test ADD COLUMN IF NOT EXISTS AddedNested1.C Array(String); +ALTER TABLE alter_test MODIFY COLUMN IF EXISTS ToDrop UInt64; +ALTER TABLE alter_test DROP COLUMN IF EXISTS ToDrop; +ALTER TABLE alter_test COMMENT COLUMN IF EXISTS ToDrop 'new comment'; +ALTER TABLE alter_test RENAME COLUMN Added0 to RenamedColumn; + +DESC TABLE alter_test; + +SELECT * FROM alter_test; + +DROP TABLE alter_test; diff --git a/parser/testdata/02918_analyzer_to_ast_crash/ast.json b/parser/testdata/02918_analyzer_to_ast_crash/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02918_analyzer_to_ast_crash/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02918_analyzer_to_ast_crash/metadata.json b/parser/testdata/02918_analyzer_to_ast_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02918_analyzer_to_ast_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02918_analyzer_to_ast_crash/query.sql b/parser/testdata/02918_analyzer_to_ast_crash/query.sql new file mode 100644 index 000000000..274f74d6a --- /dev/null +++ b/parser/testdata/02918_analyzer_to_ast_crash/query.sql @@ -0,0 +1,5 @@ +WITH + x AS (SELECT in((SELECT * FROM y))), + y AS (SELECT 1) +SELECT * FROM x; -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + diff --git a/parser/testdata/02918_fuzzjson_table_function/ast.json b/parser/testdata/02918_fuzzjson_table_function/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02918_fuzzjson_table_function/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02918_fuzzjson_table_function/metadata.json b/parser/testdata/02918_fuzzjson_table_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02918_fuzzjson_table_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02918_fuzzjson_table_function/query.sql b/parser/testdata/02918_fuzzjson_table_function/query.sql new file mode 100644 index 000000000..7ce064834 --- /dev/null +++ b/parser/testdata/02918_fuzzjson_table_function/query.sql @@ -0,0 +1,92 @@ +-- Tags: no-parallel, no-replicated-database: Named collection is used +-- + +DROP NAMED COLLECTION IF EXISTS 02918_json_fuzzer; +CREATE NAMED COLLECTION 02918_json_fuzzer AS json_str='{}'; + +SELECT * FROM fuzzJSON(02918_json_fuzzer, random_seed=54321) LIMIT 10; +SELECT * FROM fuzzJSON(02918_json_fuzzer, json_str='{"ClickHouse":"Is Fast"}', random_seed=1337) LIMIT 20; +SELECT * FROM fuzzJSON(02918_json_fuzzer, json_str='{"students":[{"name":"Alice"}, {"name":"Bob"}]}', random_seed=1337) LIMIT 20; +SELECT * FROM fuzzJSON(02918_json_fuzzer, json_str='{"schedule":[{"breakfast":"7am"}, {"lunch":"12pm"}]}', random_seed=123456, reuse_output=true) LIMIT 20; +SELECT * FROM fuzzJSON(02918_json_fuzzer, json_str='{"schedule":[{"breakfast":"7am"}, {"lunch":"12pm"}]}', random_seed=123456, reuse_output=false) LIMIT 20; +SELECT * FROM fuzzJSON(02918_json_fuzzer, + json_str='{"schedule":[{"breakfast":"7am"}, {"lunch":"12pm"}]}', + random_seed=123456, + reuse_output=0, + max_output_length=128) LIMIT 20; + +SELECT * FROM fuzzJSON(02918_json_fuzzer, + json_str='{"schedule":[{"breakfast":"7am"}, {"lunch":"12pm"}]}', + random_seed=123456, + reuse_output=0, + max_output_length=65536, + max_nesting_level=10, + max_array_size=20) LIMIT 20; + +SELECT * FROM fuzzJSON(02918_json_fuzzer, + random_seed=6667, + max_nesting_level=0) LIMIT 10; + +SELECT * FROM fuzzJSON(02918_json_fuzzer, + random_seed=6667, + max_object_size=0, + max_array_size=0) LIMIT 10; + +-- +DROP TABLE IF EXISTS 02918_table_str; +CREATE TABLE 02918_table_str (json_str String) Engine=Memory; + +INSERT INTO 02918_table_str SELECT * FROM fuzzJSON(02918_json_fuzzer) limit 10; +INSERT INTO 02918_table_str SELECT * FROM fuzzJSON(02918_json_fuzzer) limit 10; +INSERT INTO 02918_table_str SELECT * FROM fuzzJSON(02918_json_fuzzer, random_seed=123, reuse_output=true) limit 10; +INSERT INTO 02918_table_str SELECT * FROM fuzzJSON( + 02918_json_fuzzer, + json_str='{"name": "John Doe", "age": 30, "address": {"city": "Citiville", "zip": "12345"}, "hobbies": ["reading", "traveling", "coding"]}', + random_seed=6666) LIMIT 200; + +INSERT INTO 02918_table_str SELECT * FROM fuzzJSON( + 02918_json_fuzzer, + json_str='{"name": "John Doe", "age": 30, "address": {"city": "Citiville", "zip": "12345"}, "hobbies": ["reading", "traveling", "coding"]}', + random_seed=6666, + min_key_length=1, + max_key_length=5) LIMIT 200; + +INSERT INTO 02918_table_str SELECT * FROM fuzzJSON( + 02918_json_fuzzer, + json_str='{"name": "John Doe", "age": 30, "address": {"city": "Citiville", "zip": "12345"}, "hobbies": ["reading", "traveling", "coding"]}', + max_nesting_level=128, + reuse_output=true, + random_seed=6666, + min_key_length=5, + max_key_length=5) LIMIT 200; + +INSERT INTO 02918_table_str SELECT * FROM fuzzJSON( + 02918_json_fuzzer, + json_str='{"name": "John Doe", "age": 30, "address": {"city": "Citiville", "zip": "12345"}, "hobbies": ["reading", "traveling", "coding"]}', + random_seed=6666, + reuse_output=1, + probability=0.5, + max_output_length=65536, + max_nesting_level=18446744073709551615, + max_array_size=18446744073709551615, + max_object_size=18446744073709551615, + max_key_length=65536, + max_string_value_length=65536) LIMIT 100; + +SELECT count() FROM 02918_table_str; + +DROP TABLE IF EXISTS 02918_table_str; + +-- +SELECT * FROM fuzzJSON(02918_json_fuzzer, max_output_length="Hello") LIMIT 10; -- { serverError BAD_ARGUMENTS } +SELECT * FROM fuzzJSON(02918_json_fuzzer, max_output_length=65537) LIMIT 10; -- { serverError BAD_ARGUMENTS } +SELECT * FROM fuzzJSON(02918_json_fuzzer, probability=10) LIMIT 10; -- { serverError BAD_ARGUMENTS } +SELECT * FROM fuzzJSON(02918_json_fuzzer, probability=-0.1) LIMIT 10; -- { serverError BAD_ARGUMENTS } +SELECT * FROM fuzzJSON(02918_json_fuzzer, probability=1.1) LIMIT 10; -- { serverError BAD_ARGUMENTS } +SELECT * FROM fuzzJSON(02918_json_fuzzer, probability=1.1) LIMIT 10; -- { serverError BAD_ARGUMENTS } +SELECT * FROM fuzzJSON(02918_json_fuzzer, max_string_value_length=65537) LIMIT 10; -- { serverError BAD_ARGUMENTS } +SELECT * FROM fuzzJSON(02918_json_fuzzer, max_key_length=65537) LIMIT 10; -- { serverError BAD_ARGUMENTS } +SELECT * FROM fuzzJSON(02918_json_fuzzer, max_key_length=10, min_key_length=0) LIMIT 10; -- { serverError BAD_ARGUMENTS } +SELECT * FROM fuzzJSON(02918_json_fuzzer, max_key_length=10, min_key_length=11) LIMIT 10; -- { serverError BAD_ARGUMENTS } +SELECT * FROM fuzzJSON(02918_json_fuzzer, equals(random_seed, viewExplain('EXPLAIN', 'actions = 1', (SELECT count(*) FROM numbers(10))), 54321)) LIMIT 10; -- { serverError BAD_ARGUMENTS } + diff --git a/parser/testdata/02918_join_pm_lc_crash/ast.json b/parser/testdata/02918_join_pm_lc_crash/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02918_join_pm_lc_crash/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02918_join_pm_lc_crash/metadata.json b/parser/testdata/02918_join_pm_lc_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02918_join_pm_lc_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02918_join_pm_lc_crash/query.sql b/parser/testdata/02918_join_pm_lc_crash/query.sql new file mode 100644 index 000000000..0326acff4 --- /dev/null +++ b/parser/testdata/02918_join_pm_lc_crash/query.sql @@ -0,0 +1,30 @@ + +SET joined_subquery_requires_alias = 0, join_algorithm = 'partial_merge'; + +SET enable_analyzer = 0, join_use_nulls = 0; + +SELECT * FROM (SELECT dummy AS val FROM system.one) +JOIN (SELECT toLowCardinality(toNullable(dummy)) AS val +FROM system.one GROUP BY val WITH TOTALS) +USING (val); + +SET enable_analyzer = 0, join_use_nulls = 1; + +SELECT * FROM (SELECT dummy AS val FROM system.one) +JOIN (SELECT toLowCardinality(toNullable(dummy)) AS val +FROM system.one GROUP BY val WITH TOTALS) +USING (val); + +SET enable_analyzer = 1, join_use_nulls = 0; + +SELECT * FROM (SELECT dummy AS val FROM system.one) +JOIN (SELECT toLowCardinality(toNullable(dummy)) AS val +FROM system.one GROUP BY val WITH TOTALS) +USING (val); + +SET enable_analyzer = 1, join_use_nulls = 1; + +SELECT * FROM (SELECT dummy AS val FROM system.one) +JOIN (SELECT toLowCardinality(toNullable(dummy)) AS val +FROM system.one GROUP BY val WITH TOTALS) +USING (val); diff --git a/parser/testdata/02918_optimize_count_for_merge_tables/ast.json b/parser/testdata/02918_optimize_count_for_merge_tables/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02918_optimize_count_for_merge_tables/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02918_optimize_count_for_merge_tables/metadata.json b/parser/testdata/02918_optimize_count_for_merge_tables/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02918_optimize_count_for_merge_tables/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02918_optimize_count_for_merge_tables/query.sql b/parser/testdata/02918_optimize_count_for_merge_tables/query.sql new file mode 100644 index 000000000..752129105 --- /dev/null +++ b/parser/testdata/02918_optimize_count_for_merge_tables/query.sql @@ -0,0 +1,35 @@ +-- Tests that Merge-engine (not: MergeTree!) tables support the trivial count +-- optimization if all underlying tables support it + +DROP TABLE IF EXISTS mt1; +DROP TABLE IF EXISTS mt2; +DROP TABLE IF EXISTS merge; + +CREATE TABLE mt1 (id UInt64) ENGINE = MergeTree ORDER BY id; +CREATE TABLE mt2 (id UInt64) ENGINE = MergeTree ORDER BY id; +CREATE TABLE merge (id UInt64) ENGINE = Merge(currentDatabase(), '^mt[0-9]+$'); + +INSERT INTO mt1 VALUES (1); +INSERT INTO mt2 VALUES (1); + +SET apply_mutations_on_fly = 0; +SET apply_patch_parts = 0; + +SELECT count() FROM merge; + +-- can use the trivial count optimization +EXPLAIN SELECT count() FROM merge settings enable_analyzer=0; + +CREATE TABLE mt3 (id UInt64) ENGINE = TinyLog; + +INSERT INTO mt2 VALUES (2); + +SELECT count() FROM merge; + +-- can't use the trivial count optimization as TinyLog doesn't support it +EXPLAIN SELECT count() FROM merge settings enable_analyzer=0; + +DROP TABLE IF EXISTS mt1; +DROP TABLE IF EXISTS mt2; +DROP TABLE IF EXISTS mt3; +DROP TABLE IF EXISTS merge; diff --git a/parser/testdata/02918_parallel_replicas_custom_key_unavailable_replica/ast.json b/parser/testdata/02918_parallel_replicas_custom_key_unavailable_replica/ast.json new file mode 100644 index 000000000..1deaef0f6 --- /dev/null +++ b/parser/testdata/02918_parallel_replicas_custom_key_unavailable_replica/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery 02918_parallel_replicas (children 1)" + }, + { + "explain": " Identifier 02918_parallel_replicas" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001210695, + "rows_read": 2, + "bytes_read": 98 + } +} diff --git a/parser/testdata/02918_parallel_replicas_custom_key_unavailable_replica/metadata.json b/parser/testdata/02918_parallel_replicas_custom_key_unavailable_replica/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02918_parallel_replicas_custom_key_unavailable_replica/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02918_parallel_replicas_custom_key_unavailable_replica/query.sql b/parser/testdata/02918_parallel_replicas_custom_key_unavailable_replica/query.sql new file mode 100644 index 000000000..763a4530b --- /dev/null +++ b/parser/testdata/02918_parallel_replicas_custom_key_unavailable_replica/query.sql @@ -0,0 +1,34 @@ +DROP TABLE IF EXISTS 02918_parallel_replicas; + +CREATE TABLE 02918_parallel_replicas (x String, y Int32) ENGINE = MergeTree ORDER BY cityHash64(x); + +INSERT INTO 02918_parallel_replicas SELECT toString(number), number % 4 FROM numbers(1000); + +SET prefer_localhost_replica=0; + +--- if we try to query unavaialble replica, connection will be retried +--- but a warning log message will be printed out +SET send_logs_level='error'; +-- { echoOn } +SELECT y, count() +FROM cluster(test_cluster_1_shard_3_replicas_1_unavailable, currentDatabase(), 02918_parallel_replicas) +GROUP BY y +ORDER BY y +SETTINGS max_parallel_replicas=3, enable_parallel_replicas=1, parallel_replicas_custom_key='cityHash64(y)', parallel_replicas_mode='custom_key_sampling'; + +SELECT y, count() +FROM cluster(test_cluster_1_shard_3_replicas_1_unavailable, currentDatabase(), 02918_parallel_replicas) +GROUP BY y +ORDER BY y +SETTINGS max_parallel_replicas=3, enable_parallel_replicas=1, parallel_replicas_custom_key='cityHash64(y)', parallel_replicas_mode='custom_key_range'; + +SET use_hedged_requests=0; +SELECT y, count() +FROM cluster(test_cluster_1_shard_3_replicas_1_unavailable, currentDatabase(), 02918_parallel_replicas) +GROUP BY y +ORDER BY y +SETTINGS max_parallel_replicas=3, enable_parallel_replicas=1, parallel_replicas_custom_key='cityHash64(y)', parallel_replicas_mode='custom_key_sampling'; +-- { echoOff } +SET send_logs_level='warning'; + +DROP TABLE 02918_parallel_replicas; diff --git a/parser/testdata/02918_wrong_dictionary_source/ast.json b/parser/testdata/02918_wrong_dictionary_source/ast.json new file mode 100644 index 000000000..c4dd4382e --- /dev/null +++ b/parser/testdata/02918_wrong_dictionary_source/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery id_value_dictionary (children 1)" + }, + { + "explain": " Identifier id_value_dictionary" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001268113, + "rows_read": 2, + "bytes_read": 90 + } +} diff --git a/parser/testdata/02918_wrong_dictionary_source/metadata.json b/parser/testdata/02918_wrong_dictionary_source/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02918_wrong_dictionary_source/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02918_wrong_dictionary_source/query.sql b/parser/testdata/02918_wrong_dictionary_source/query.sql new file mode 100644 index 000000000..e729ef74c --- /dev/null +++ b/parser/testdata/02918_wrong_dictionary_source/query.sql @@ -0,0 +1,11 @@ +DROP DICTIONARY IF EXISTS id_value_dictionary; +DROP TABLE IF EXISTS source_table; + +CREATE TABLE source_table(id UInt64, value String) ENGINE = MergeTree ORDER BY tuple(); + +-- There is no "CLICKHOUSEX" dictionary source, so the next query must fail even if `dictionaries_lazy_load` is enabled. +CREATE DICTIONARY id_value_dictionary(id UInt64, value String) PRIMARY KEY id SOURCE(CLICKHOUSEX(TABLE 'source_table')) LIFETIME(MIN 0 MAX 1000) LAYOUT(FLAT()); -- { serverError UNKNOWN_ELEMENT_IN_CONFIG } + +SELECT count() FROM system.dictionaries WHERE name=='id_value_dictionary' AND database==currentDatabase(); + +DROP TABLE source_table; diff --git a/parser/testdata/02919_alter_temporary_table_with_nondefault_engine/ast.json b/parser/testdata/02919_alter_temporary_table_with_nondefault_engine/ast.json new file mode 100644 index 000000000..6a562efcb --- /dev/null +++ b/parser/testdata/02919_alter_temporary_table_with_nondefault_engine/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery alter_test (children 1)" + }, + { + "explain": " Identifier alter_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001162245, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/02919_alter_temporary_table_with_nondefault_engine/metadata.json b/parser/testdata/02919_alter_temporary_table_with_nondefault_engine/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02919_alter_temporary_table_with_nondefault_engine/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02919_alter_temporary_table_with_nondefault_engine/query.sql b/parser/testdata/02919_alter_temporary_table_with_nondefault_engine/query.sql new file mode 100644 index 000000000..8b22c082e --- /dev/null +++ b/parser/testdata/02919_alter_temporary_table_with_nondefault_engine/query.sql @@ -0,0 +1,21 @@ +DROP TABLE IF EXISTS alter_test; + +CREATE TEMPORARY TABLE alter_test (a UInt32, b UInt8) ENGINE=MergeTree ORDER BY a; +INSERT INTO alter_test VALUES (1, 2); +ALTER TEMPORARY TABLE alter_test MODIFY COLUMN b UInt8 FIRST; +DESC TABLE alter_test; + +DROP TABLE IF EXISTS alter_test; + +CREATE TEMPORARY TABLE alter_test (a UInt32, b UInt8) ENGINE=Log; +INSERT INTO alter_test VALUES (1, 2); +ALTER TEMPORARY TABLE alter_test COMMENT COLUMN b 'this is comment for log engine'; +DESC TABLE alter_test; + +DROP TABLE IF EXISTS alter_test; + +CREATE TEMPORARY TABLE alter_test (a UInt32, b UInt8) ENGINE=Null; +INSERT INTO alter_test VALUES (1, 2); +ALTER TEMPORARY TABLE alter_test MODIFY COLUMN b UInt8 FIRST; +DESC TABLE alter_test; + diff --git a/parser/testdata/02919_ddsketch_quantile/ast.json b/parser/testdata/02919_ddsketch_quantile/ast.json new file mode 100644 index 000000000..9d80f84cc --- /dev/null +++ b/parser/testdata/02919_ddsketch_quantile/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '1'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001221864, + "rows_read": 5, + "bytes_read": 172 + } +} diff --git a/parser/testdata/02919_ddsketch_quantile/metadata.json b/parser/testdata/02919_ddsketch_quantile/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02919_ddsketch_quantile/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02919_ddsketch_quantile/query.sql b/parser/testdata/02919_ddsketch_quantile/query.sql new file mode 100644 index 000000000..d98978c11 --- /dev/null +++ b/parser/testdata/02919_ddsketch_quantile/query.sql @@ -0,0 +1,73 @@ +SELECT '1'; -- simple test +SELECT round(quantileDD(0.01, 0.5)(number), 2) FROM numbers(200); +SELECT round(quantileDD(0.0001, 0.69)(number), 2) FROM numbers(500); +SELECT round(quantileDD(0.003, 0.42)(number), 2) FROM numbers(200); +SELECT round(quantileDD(0.02, 0.99)(number), 2) FROM numbers(500); + +SELECT '2'; -- median is close to 0 +SELECT round(quantileDD(0.01, 0.5)(number), 2) +FROM +( + SELECT arrayJoin([toInt64(number), number - 10]) AS number + FROM numbers(0, 10) +); +SELECT round(quantileDD(0.01, 0.5)(number - 10), 2) FROM numbers(21); + +SELECT '3'; -- all values are negative +SELECT round(quantileDD(0.01, 0.99)(-number), 2) FROM numbers(1, 500); + +SELECT '4'; -- min and max values of integer types (-2^63, 2^63-1) +SELECT round(quantileDD(0.01, 0.5)(number), 2) +FROM +( + SELECT arrayJoin([toInt64(number), number - 9223372036854775808, toInt64(number + 9223372036854775798)]) AS number + FROM numbers(0, 10) +); + +SELECT '5'; -- min and max values of floating point types +SELECT round(quantileDD(0.01, 0.42)(number), 2) +FROM +( + SELECT arrayJoin([toFloat32(number), number - 3.4028235e+38, toFloat32(number + 3.4028235e+38)]) AS number + FROM numbers(0, 10) +); + +SELECT '6'; -- denormalized floats +SELECT round(quantileDD(0.01, 0.69)(number), 2) +FROM +( + SELECT arrayJoin([toFloat32(number), number - 1.1754944e-38, toFloat32(number + 1.1754944e-38)]) AS number + FROM numbers(0, 10) +); + +SELECT '7'; -- NaNs +SELECT round(quantileDD(0.01, 0.5)(number), 2) +FROM +( + SELECT arrayJoin([toFloat32(number), NaN * number]) AS number + FROM numbers(0, 10) +); + +SELECT '8'; -- sparse sketch + +SELECT round(quantileDD(0.01, 0.75)(number), 2) +FROM +( + SELECT number * 1e7 AS number + FROM numbers(20) +); + +SELECT '9'; -- ser/deser + +DROP TABLE IF EXISTS `02919_ddsketch_quantile`; + +CREATE TABLE `02919_ddsketch_quantile` +ENGINE = Log AS +SELECT quantilesDDState(0.001, 0.9)(number) AS sketch +FROM numbers(1000); + +INSERT INTO `02919_ddsketch_quantile` SELECT quantilesDDState(0.001, 0.9)(number + 1000) +FROM numbers(1000); + +SELECT arrayMap(a -> round(a, 2), (quantilesDDMerge(0.001, 0.9)(sketch))) +FROM `02919_ddsketch_quantile`; diff --git a/parser/testdata/02919_insert_meet_eternal_hardware_error/ast.json b/parser/testdata/02919_insert_meet_eternal_hardware_error/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02919_insert_meet_eternal_hardware_error/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02919_insert_meet_eternal_hardware_error/metadata.json b/parser/testdata/02919_insert_meet_eternal_hardware_error/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02919_insert_meet_eternal_hardware_error/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02919_insert_meet_eternal_hardware_error/query.sql b/parser/testdata/02919_insert_meet_eternal_hardware_error/query.sql new file mode 100644 index 000000000..b04b22ac9 --- /dev/null +++ b/parser/testdata/02919_insert_meet_eternal_hardware_error/query.sql @@ -0,0 +1,27 @@ +-- Tags: zookeeper, no-parallel, no-shared-merge-tree +-- no-shared-merge-tree: This failure injection is only RMT specific + +DROP TABLE IF EXISTS t_hardware_error NO DELAY; + +CREATE TABLE t_hardware_error ( + KeyID UInt32 +) Engine = ReplicatedMergeTree('/clickhouse/tables/{shard}/{database}/t_async_insert_dedup', '{replica}') +ORDER BY (KeyID); + +insert into t_hardware_error values (1), (2), (3), (4), (5); + +-- Data is written to ZK but the connection fails right after and we can't recover it +system enable failpoint replicated_merge_tree_commit_zk_fail_after_op; +system enable failpoint replicated_merge_tree_commit_zk_fail_when_recovering_from_hw_fault; + +insert into t_hardware_error values (6), (7), (8), (9), (10); -- {serverError UNKNOWN_STATUS_OF_INSERT} + +system disable failpoint replicated_commit_zk_fail_after_op; +system disable failpoint replicated_merge_tree_commit_zk_fail_when_recovering_from_hw_fault; + +insert into t_hardware_error values (11), (12), (13), (14), (15); + +-- All 3 commits have been written correctly. The unknown status is ok (since it failed after the operation) +Select arraySort(groupArray(KeyID)) FROM t_hardware_error; + +DROP TABLE t_hardware_error NO DELAY; diff --git a/parser/testdata/02919_segfault_nullable_materialized_update/ast.json b/parser/testdata/02919_segfault_nullable_materialized_update/ast.json new file mode 100644 index 000000000..1e8bde81a --- /dev/null +++ b/parser/testdata/02919_segfault_nullable_materialized_update/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery crash_02919 (children 1)" + }, + { + "explain": " Identifier crash_02919" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001132939, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/02919_segfault_nullable_materialized_update/metadata.json b/parser/testdata/02919_segfault_nullable_materialized_update/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02919_segfault_nullable_materialized_update/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02919_segfault_nullable_materialized_update/query.sql b/parser/testdata/02919_segfault_nullable_materialized_update/query.sql new file mode 100644 index 000000000..f531ec031 --- /dev/null +++ b/parser/testdata/02919_segfault_nullable_materialized_update/query.sql @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS crash_02919; + +CREATE TABLE crash_02919 ( + b Int64, + c Nullable(Int64) MATERIALIZED b, + d Nullable(Bool) MATERIALIZED b +) +ENGINE = MergeTree +ORDER BY tuple(); + +INSERT INTO crash_02919 VALUES (0); +SELECT b, c, d FROM crash_02919; +ALTER TABLE crash_02919 UPDATE b = 1 WHERE 1=1 SETTINGS mutations_sync = 1; +SELECT b, c, d FROM crash_02919; +ALTER TABLE crash_02919 UPDATE b = 0.1 WHERE 1=1 SETTINGS mutations_sync = 1; +SELECT b, c, d FROM crash_02919; + +DROP TABLE crash_02919; diff --git a/parser/testdata/02919_storage_fuzzjson/ast.json b/parser/testdata/02919_storage_fuzzjson/ast.json new file mode 100644 index 000000000..46b85e5ef --- /dev/null +++ b/parser/testdata/02919_storage_fuzzjson/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery 02919_test_table_noarg (children 1)" + }, + { + "explain": " Identifier 02919_test_table_noarg" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001182163, + "rows_read": 2, + "bytes_read": 96 + } +} diff --git a/parser/testdata/02919_storage_fuzzjson/metadata.json b/parser/testdata/02919_storage_fuzzjson/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02919_storage_fuzzjson/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02919_storage_fuzzjson/query.sql b/parser/testdata/02919_storage_fuzzjson/query.sql new file mode 100644 index 000000000..bf473f4b6 --- /dev/null +++ b/parser/testdata/02919_storage_fuzzjson/query.sql @@ -0,0 +1,65 @@ +DROP TABLE IF EXISTS 02919_test_table_noarg; +CREATE TABLE 02919_test_table_noarg(str String) ENGINE = FuzzJSON('{}'); + +SELECT count() FROM (SELECT * FROM 02919_test_table_noarg LIMIT 100); + +DROP TABLE IF EXISTS 02919_test_table_noarg; + +-- +DROP TABLE IF EXISTS 02919_test_table_valid_args; +CREATE TABLE 02919_test_table_valid_args(str String) ENGINE = FuzzJSON( + '{"pet":"rat"}', NULL); + +SELECT count() FROM (SELECT * FROM 02919_test_table_valid_args LIMIT 100); + +DROP TABLE IF EXISTS 02919_test_table_valid_args; + +-- +DROP TABLE IF EXISTS 02919_test_table_reuse_args; +CREATE TABLE 02919_test_table_reuse_args(str String) ENGINE = FuzzJSON( + '{ + "name": "Jane Doe", + "age": 30, + "city": "New York", + "contacts": { + "email": "jane@example.com", + "phone": "+1234567890" + }, + "skills": [ + "JavaScript", + "Python", + { + "frameworks": ["React", "Django"] + } + ], + "projects": [ + {"name": "Project A", "status": "completed"}, + {"name": "Project B", "status": "in-progress"} + ] + }', + 12345); + +SELECT count() FROM (SELECT * FROM 02919_test_table_reuse_args LIMIT 100); + +DROP TABLE IF EXISTS 02919_test_table_reuse_args; + +-- +DROP TABLE IF EXISTS 02919_test_table_invalid_col_type; +CREATE TABLE 02919_test_table_invalid_col_type +( + str Nullable(Int64) +) +ENGINE = FuzzJSON('{"pet":"rat"}', NULL); -- { serverError BAD_ARGUMENTS } + +DROP TABLE IF EXISTS 02919_test_table_invalid_col_type; + +-- +DROP TABLE IF EXISTS 02919_test_multi_col; +CREATE TABLE 02919_test_multi_col +( + str1 String, + str2 String +) ENGINE = FuzzJSON('{"pet":"rat"}', 999); + +SELECT count(str1), count(str2) FROM (SELECT str1, str2 FROM 02919_test_multi_col LIMIT 100); +DROP TABLE IF EXISTS 02919_test_multi_col; diff --git a/parser/testdata/02920_alter_column_of_projections/ast.json b/parser/testdata/02920_alter_column_of_projections/ast.json new file mode 100644 index 000000000..15d92bd63 --- /dev/null +++ b/parser/testdata/02920_alter_column_of_projections/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001436427, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/02920_alter_column_of_projections/metadata.json b/parser/testdata/02920_alter_column_of_projections/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02920_alter_column_of_projections/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02920_alter_column_of_projections/query.sql b/parser/testdata/02920_alter_column_of_projections/query.sql new file mode 100644 index 000000000..ab7b79783 --- /dev/null +++ b/parser/testdata/02920_alter_column_of_projections/query.sql @@ -0,0 +1,33 @@ +DROP TABLE IF EXISTS t; + +CREATE TABLE t (uid Int16, name String, age Nullable(Int8), i Int16, j Int16, projection p1 (select name, age, uniq(i), count(j) group by name, age)) ENGINE=MergeTree order by uid settings index_granularity = 1; + +INSERT INTO t VALUES (1231, 'John', 11, 1, 1), (6666, 'Ksenia', 1, 2, 2), (8888, 'Alice', 1, 3, 3), (6667, 'Ksenia', null, 4, 4); + +-- Cannot ALTER, which breaks key column of projection. +ALTER TABLE t MODIFY COLUMN age Nullable(Int32); -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } + +-- Cannot ALTER, uniq(Int16) is not compatible with uniq(Int32). +ALTER TABLE t MODIFY COLUMN i Int32; -- { serverError CANNOT_CONVERT_TYPE } + +SYSTEM STOP MERGES t; + +SET alter_sync = 0; + +-- Can ALTER, count(Int16) is compatible with count(Int32). +ALTER TABLE t MODIFY COLUMN j Int32; + +-- Projection query works without mutation applied. +SELECT count(j) FROM t GROUP BY name, age; + +SYSTEM START MERGES t; + +SET alter_sync = 1; + +-- Another ALTER to wait for. +ALTER TABLE t MODIFY COLUMN j Int64 SETTINGS mutations_sync = 2; + +-- Projection query works with mutation applied. +SELECT count(j) FROM t GROUP BY name, age; + +DROP TABLE t; diff --git a/parser/testdata/02920_fix_json_merge_patch/ast.json b/parser/testdata/02920_fix_json_merge_patch/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02920_fix_json_merge_patch/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02920_fix_json_merge_patch/metadata.json b/parser/testdata/02920_fix_json_merge_patch/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02920_fix_json_merge_patch/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02920_fix_json_merge_patch/query.sql b/parser/testdata/02920_fix_json_merge_patch/query.sql new file mode 100644 index 000000000..d51a7833d --- /dev/null +++ b/parser/testdata/02920_fix_json_merge_patch/query.sql @@ -0,0 +1,3 @@ +-- Tags: no-fasttest + +select '{"id":1,"foo":["bar"]}' as a, jsonMergePatch(a,toJSONString(map('foo',arrayPushBack(arrayMap(x->JSONExtractString(x),JSONExtractArrayRaw(a, 'foo')),'baz')))) as b; diff --git a/parser/testdata/02920_rename_column_of_skip_indices/ast.json b/parser/testdata/02920_rename_column_of_skip_indices/ast.json new file mode 100644 index 000000000..7e7c07930 --- /dev/null +++ b/parser/testdata/02920_rename_column_of_skip_indices/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001082218, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/02920_rename_column_of_skip_indices/metadata.json b/parser/testdata/02920_rename_column_of_skip_indices/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02920_rename_column_of_skip_indices/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02920_rename_column_of_skip_indices/query.sql b/parser/testdata/02920_rename_column_of_skip_indices/query.sql new file mode 100644 index 000000000..cb48d8b50 --- /dev/null +++ b/parser/testdata/02920_rename_column_of_skip_indices/query.sql @@ -0,0 +1,31 @@ +DROP TABLE IF EXISTS t; + +CREATE TABLE t +( + key1 UInt64, + value1 String, + value2 String, + INDEX idx (value1) TYPE set(10) GRANULARITY 1 +) +ENGINE MergeTree ORDER BY key1 SETTINGS index_granularity = 1; + +INSERT INTO t SELECT toDate('2019-10-01') + number % 3, toString(number), toString(number) from numbers(9); + +SYSTEM STOP MERGES t; + +SET alter_sync = 0; + +ALTER TABLE t RENAME COLUMN value1 TO value11; + +-- Index works without mutation applied. +SELECT * FROM t WHERE value11 = '000' SETTINGS max_rows_to_read = 0; + +SYSTEM START MERGES t; + +-- Another ALTER to wait for. +ALTER TABLE t RENAME COLUMN value11 TO value12 SETTINGS mutations_sync = 2; + +-- Index works with mutation applied. +SELECT * FROM t WHERE value12 = '000' SETTINGS max_rows_to_read = 0; + +DROP TABLE t; diff --git a/parser/testdata/02920_unary_operators_functions/ast.json b/parser/testdata/02920_unary_operators_functions/ast.json new file mode 100644 index 000000000..66fce7727 --- /dev/null +++ b/parser/testdata/02920_unary_operators_functions/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function plus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function not (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Function not (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_0" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001014036, + "rows_read": 12, + "bytes_read": 453 + } +} diff --git a/parser/testdata/02920_unary_operators_functions/metadata.json b/parser/testdata/02920_unary_operators_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02920_unary_operators_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02920_unary_operators_functions/query.sql b/parser/testdata/02920_unary_operators_functions/query.sql new file mode 100644 index 000000000..3f3c3a161 --- /dev/null +++ b/parser/testdata/02920_unary_operators_functions/query.sql @@ -0,0 +1 @@ +SELECT NOT (0) + NOT (0); \ No newline at end of file diff --git a/parser/testdata/02921_bit_hamming_distance_big_int/ast.json b/parser/testdata/02921_bit_hamming_distance_big_int/ast.json new file mode 100644 index 000000000..8f30b2958 --- /dev/null +++ b/parser/testdata/02921_bit_hamming_distance_big_int/ast.json @@ -0,0 +1,88 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Function CAST (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '314776434768051644139306697240981192872'" + }, + { + "explain": " Literal 'UInt128'" + }, + { + "explain": " Function CAST (alias y) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '0'" + }, + { + "explain": " Literal 'UInt128'" + }, + { + "explain": " Function bitCount (alias a) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function bitXor (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Identifier y" + }, + { + "explain": " Function bitHammingDistance (alias b) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Identifier y" + } + ], + + "rows": 22, + + "statistics": + { + "elapsed": 0.001441567, + "rows_read": 22, + "bytes_read": 861 + } +} diff --git a/parser/testdata/02921_bit_hamming_distance_big_int/metadata.json b/parser/testdata/02921_bit_hamming_distance_big_int/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02921_bit_hamming_distance_big_int/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02921_bit_hamming_distance_big_int/query.sql b/parser/testdata/02921_bit_hamming_distance_big_int/query.sql new file mode 100644 index 000000000..6f241e104 --- /dev/null +++ b/parser/testdata/02921_bit_hamming_distance_big_int/query.sql @@ -0,0 +1,12 @@ +SELECT 314776434768051644139306697240981192872::UInt128 AS x, 0::UInt128 AS y, bitCount(bitXor(x, y)) AS a, bitHammingDistance(x, y) AS b; +SELECT 14776434768051644139306697240981192872314776434768051644139306697240981192872::UInt256 AS x, 0::UInt128 AS y, bitCount(bitXor(x, y)) AS a, bitHammingDistance(x, y) AS b; +SELECT 314776434768051644139306697240981192872::UInt128 AS x, 14776434768051644139306697240981192872314776434768051644139306697240981192872::UInt256 AS y, bitCount(bitXor(x, y)) AS a, bitHammingDistance(x, y) AS b; + +SELECT 314776434768051644139306697240981192872::Int128 AS x, 0::UInt128 AS y, bitCount(bitXor(x, y)) AS a, bitHammingDistance(x, y) AS b; +SELECT 14776434768051644139306697240981192872314776434768051644139306697240981192872::Int256 AS x, 0::UInt128 AS y, bitCount(bitXor(x, y)) AS a, bitHammingDistance(x, y) AS b; +SELECT 314776434768051644139306697240981192872::Int128 AS x, 14776434768051644139306697240981192872314776434768051644139306697240981192872::UInt256 AS y, bitCount(bitXor(x, y)) AS a, bitHammingDistance(x, y) AS b; + +SELECT 314776434768051644139306697240981192872::UInt128 AS x, 0::Int128 AS y, bitCount(bitXor(x, y)) AS a, bitHammingDistance(x, y) AS b; +SELECT 14776434768051644139306697240981192872314776434768051644139306697240981192872::UInt256 AS x, 0::Int128 AS y, bitCount(bitXor(x, y)) AS a, bitHammingDistance(x, y) AS b; +SELECT 314776434768051644139306697240981192872::UInt128 AS x, 14776434768051644139306697240981192872314776434768051644139306697240981192872::Int256 AS y, bitCount(bitXor(x, y)) AS a, bitHammingDistance(x, y) AS b; + diff --git a/parser/testdata/02921_database_filesystem_path_check/ast.json b/parser/testdata/02921_database_filesystem_path_check/ast.json new file mode 100644 index 000000000..6fee0bd8e --- /dev/null +++ b/parser/testdata/02921_database_filesystem_path_check/ast.json @@ -0,0 +1,40 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery db_filesystem (children 2)" + }, + { + "explain": " Identifier db_filesystem" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function Filesystem (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '\/etc'" + } + ], + + "rows": 6, + + "statistics": + { + "elapsed": 0.001029975, + "rows_read": 6, + "bytes_read": 226 + } +} diff --git a/parser/testdata/02921_database_filesystem_path_check/metadata.json b/parser/testdata/02921_database_filesystem_path_check/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02921_database_filesystem_path_check/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02921_database_filesystem_path_check/query.sql b/parser/testdata/02921_database_filesystem_path_check/query.sql new file mode 100644 index 000000000..d62b629df --- /dev/null +++ b/parser/testdata/02921_database_filesystem_path_check/query.sql @@ -0,0 +1,2 @@ +create database db_filesystem ENGINE=Filesystem('/etc'); -- { serverError BAD_ARGUMENTS } +create database db_filesystem ENGINE=Filesystem('../../../../../../../../etc'); -- { serverError BAD_ARGUMENTS } \ No newline at end of file diff --git a/parser/testdata/02921_fuzzbits_with_array_join/ast.json b/parser/testdata/02921_fuzzbits_with_array_join/ast.json new file mode 100644 index 000000000..fc4253f60 --- /dev/null +++ b/parser/testdata/02921_fuzzbits_with_array_join/ast.json @@ -0,0 +1,85 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function length (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function fuzzBits (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'stringstring'" + }, + { + "explain": " Literal Float64_0.5" + }, + { + "explain": " Identifier a" + }, + { + "explain": " TablesInSelectQuery (children 2)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " ArrayJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_2] (alias a)" + } + ], + + "rows": 21, + + "statistics": + { + "elapsed": 0.001401183, + "rows_read": 21, + "bytes_read": 850 + } +} diff --git a/parser/testdata/02921_fuzzbits_with_array_join/metadata.json b/parser/testdata/02921_fuzzbits_with_array_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02921_fuzzbits_with_array_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02921_fuzzbits_with_array_join/query.sql b/parser/testdata/02921_fuzzbits_with_array_join/query.sql new file mode 100644 index 000000000..5d80a5fbe --- /dev/null +++ b/parser/testdata/02921_fuzzbits_with_array_join/query.sql @@ -0,0 +1,2 @@ +SELECT length(fuzzBits('stringstring', 0.5)), a FROM numbers(1) ARRAY JOIN [1, 2] AS a; +SELECT length(fuzzBits('stringstring'::FixedString(100), 0.5)), a FROM numbers(1) ARRAY JOIN [1, 2] AS a \ No newline at end of file diff --git a/parser/testdata/02921_parameterized_view_except_queries/ast.json b/parser/testdata/02921_parameterized_view_except_queries/ast.json new file mode 100644 index 000000000..918d6f63a --- /dev/null +++ b/parser/testdata/02921_parameterized_view_except_queries/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '--- Data ---'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001563786, + "rows_read": 5, + "bytes_read": 183 + } +} diff --git a/parser/testdata/02921_parameterized_view_except_queries/metadata.json b/parser/testdata/02921_parameterized_view_except_queries/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02921_parameterized_view_except_queries/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02921_parameterized_view_except_queries/query.sql b/parser/testdata/02921_parameterized_view_except_queries/query.sql new file mode 100644 index 000000000..8212a022d --- /dev/null +++ b/parser/testdata/02921_parameterized_view_except_queries/query.sql @@ -0,0 +1,70 @@ +select '--- Data ---'; + +DROP VIEW IF EXISTS V_DELTA; +DROP TABLE IF EXISTS users; + +CREATE TABLE users (uid Int16, name String, age Int16) ENGINE=Memory; + +INSERT INTO users VALUES (1231, 'John', 33); +INSERT INTO users VALUES (6666, 'Ksenia', 48); +INSERT INTO users VALUES (8888, 'Alice', 50); + +SELECT * FROM users order by uid; + +select '--- Params ---'; + +-- set params +set param_a1 = '10'; +set param_a2 = '50'; +set param_a3 = '10'; +set param_a4 = '40'; +--check +select {a1: Int32}, {a2: Int32}, {a3: Int32}, {a4: Int32}; + +select '--- First view ---'; +-- using 4 param in the select of the view work here. +CREATE OR REPLACE VIEW V_DELTA AS +select distinct uid from +( +select uid, name, age from users +where age >= {a1: Int32} +OR age >= {a2: Int32} +OR age >= {a3: Int32} +OR age >= {a4: Int32} +) +order by uid; + +SELECT * FROM V_DELTA(a1=10, a2=50, a3=10, a4=40); + + +select '--- Second query result ---'; + +-- check individual query before the next part +select distinct uid from +( +select uid, name, age from users +where age >= {a1: Int32} AND age <= {a2: Int32} +EXCEPT +select uid, name, age from users +where age >= {a3: Int32} AND age <= {a4: Int32} +) +order by uid; + +select '--- Second view result ---'; + +-- using 4 param in the select of the view do not work here. +CREATE OR REPLACE VIEW V_DELTA AS +select distinct uid from +( +select uid, name, age from users +where age >= {a1: Int32} AND age <= {a2: Int32} +EXCEPT +select uid, name, age from users +where age >= {a3: Int32} AND age <= {a4: Int32} +) +order by uid; + +SELECT * FROM V_DELTA(a1=10, a2=50, a3=10, a4=40); + +DROP VIEW V_DELTA; +DROP TABLE users; diff --git a/parser/testdata/02922_analyzer_aggregate_nothing_type/ast.json b/parser/testdata/02922_analyzer_aggregate_nothing_type/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02922_analyzer_aggregate_nothing_type/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02922_analyzer_aggregate_nothing_type/metadata.json b/parser/testdata/02922_analyzer_aggregate_nothing_type/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02922_analyzer_aggregate_nothing_type/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02922_analyzer_aggregate_nothing_type/query.sql b/parser/testdata/02922_analyzer_aggregate_nothing_type/query.sql new file mode 100644 index 000000000..c855e01a2 --- /dev/null +++ b/parser/testdata/02922_analyzer_aggregate_nothing_type/query.sql @@ -0,0 +1,104 @@ + +select sum(NULL); +select quantile(0.5)(NULL); +select quantiles(0.1, 0.2)(NULL :: Nullable(UInt32)); +select quantile(0.5)(NULL), quantiles(0.1, 0.2)(NULL :: Nullable(UInt32)), count(NULL), sum(NULL); + +SELECT count(NULL) FROM remote('127.0.0.{1,2}', numbers(3)) GROUP BY number % 2 WITH TOTALS; +SELECT quantile(0.5)(NULL) FROM remote('127.0.0.{1,2}', numbers(3)) GROUP BY number % 2 WITH TOTALS; +SELECT quantiles(0.1, 0.2)(NULL :: Nullable(UInt32)) FROM remote('127.0.0.{1,2}', numbers(3)) GROUP BY number % 2 WITH TOTALS; + +SELECT '-- notinhgs:'; +SELECT nothing() as n, toTypeName(n); +SELECT nothing(1) as n, toTypeName(n); +SELECT nothing(NULL) as n, toTypeName(n); +SELECT nothingUInt64() as n, toTypeName(n); +SELECT nothingUInt64(1) as n, toTypeName(n); +SELECT nothingUInt64(NULL) as n, toTypeName(n); +SELECT nothingNull() as n, toTypeName(n); +SELECT nothingNull(1) as n, toTypeName(n); +SELECT nothingNull(NULL) as n, toTypeName(n); + +SELECT '-- quantile:'; +SELECT quantileArray(0.5)([NULL, NULL]) AS x FROM remote('127.0.0.{1,2}', numbers(3)); +SELECT quantileArrayIf(0.5)([NULL], 1) AS x FROM remote('127.0.0.{1,2}', numbers(3)); +SELECT quantileArrayIf(0.5)([NULL], 0) AS x FROM remote('127.0.0.{1,2}', numbers(3)); +SELECT quantileIfArray(0.5)([NULL, NULL], [1, 0]) AS x FROM remote('127.0.0.{1,2}', numbers(3)); +SELECT quantileIfArray(0.5)([1, NULL], [1, 0]) AS x FROM remote('127.0.0.{1,2}', numbers(3)); +SELECT quantileIfArrayIf(0.5)([1, NULL], [1, 0], 1) AS x FROM remote('127.0.0.{1,2}', numbers(3)); +SELECT quantileIfArrayArray(0.5)([[1, NULL]], [[1, 0]]) AS x FROM remote('127.0.0.{1,2}', numbers(3)); + +SELECT '-- quantiles:'; +select quantilesArray(0.5, 0.9)([NULL :: Nullable(UInt64), NULL]) AS x FROM remote('127.0.0.{1,2}', numbers(3)); +SELECT quantilesArrayIf(0.5, 0.9)([NULL :: Nullable(UInt64)], 1) AS x FROM remote('127.0.0.{1,2}', numbers(3)); +SELECT quantilesArrayIf(0.5, 0.9)([NULL :: Nullable(UInt64)], 0) AS x FROM remote('127.0.0.{1,2}', numbers(3)); +SELECT quantilesIfArray(0.5, 0.9)([NULL :: Nullable(UInt64), NULL], [1, 0]) AS x FROM remote('127.0.0.{1,2}', numbers(3)); +SELECT quantilesIfArray(0.5, 0.9)([1, NULL], [1, 0]) AS x FROM remote('127.0.0.{1,2}', numbers(3)); +SELECT quantilesIfArrayIf(0.5, 0.9)([1, NULL], [1, 0], 1) AS x FROM remote('127.0.0.{1,2}', numbers(3)); +SELECT quantilesIfArrayArray(0.5, 0.9)([[1, NULL]], [[1, 0]]) AS x FROM remote('127.0.0.{1,2}', numbers(3)); + +SELECT '-- nothing:'; +SELECT nothingArray([NULL, NULL]) AS x FROM remote('127.0.0.{1,2}', numbers(3)); +SELECT nothingArrayIf([NULL], 1) AS x FROM remote('127.0.0.{1,2}', numbers(3)); +SELECT nothingArrayIf([NULL], 0) AS x FROM remote('127.0.0.{1,2}', numbers(3)); +SELECT nothingIfArray([NULL, NULL], [1, 0]) AS x FROM remote('127.0.0.{1,2}', numbers(3)); +SELECT nothingIfArray([1, NULL], [1, 0]) AS x FROM remote('127.0.0.{1,2}', numbers(3)); +SELECT nothingIfArrayIf([1, NULL], [1, 0], 1) AS x FROM remote('127.0.0.{1,2}', numbers(3)); +SELECT nothingIfArrayArray([[1, NULL]], [[1, 0]]) AS x FROM remote('127.0.0.{1,2}', numbers(3)); + +SELECT '-- nothing(UInt64):'; +SELECT nothingUInt64Array([NULL, NULL]) AS x FROM remote('127.0.0.{1,2}', numbers(3)); +SELECT nothingUInt64ArrayIf([NULL], 1) AS x FROM remote('127.0.0.{1,2}', numbers(3)); +SELECT nothingUInt64ArrayIf([NULL], 0) AS x FROM remote('127.0.0.{1,2}', numbers(3)); +SELECT nothingUInt64IfArray([NULL, NULL], [1, 0]) AS x FROM remote('127.0.0.{1,2}', numbers(3)); +SELECT nothingUInt64IfArray([1, NULL], [1, 0]) AS x FROM remote('127.0.0.{1,2}', numbers(3)); +SELECT nothingUInt64IfArrayIf([1, NULL], [1, 0], 1) AS x FROM remote('127.0.0.{1,2}', numbers(3)); +SELECT nothingUInt64IfArrayArray([[1, NULL]], [[1, 0]]) AS x FROM remote('127.0.0.{1,2}', numbers(3)); + +SELECT '-- nothing(Nullable(Nothing)):'; +SELECT nothingNullArray([NULL, NULL]) AS x FROM remote('127.0.0.{1,2}', numbers(3)); +SELECT nothingNullArrayIf([NULL], 1) AS x FROM remote('127.0.0.{1,2}', numbers(3)); +SELECT nothingNullArrayIf([NULL], 0) AS x FROM remote('127.0.0.{1,2}', numbers(3)); +SELECT nothingNullIfArray([NULL, NULL], [1, 0]) AS x FROM remote('127.0.0.{1,2}', numbers(3)); +SELECT nothingNullIfArray([1, NULL], [1, 0]) AS x FROM remote('127.0.0.{1,2}', numbers(3)); +SELECT nothingNullIfArrayIf([1, NULL], [1, 0], 1) AS x FROM remote('127.0.0.{1,2}', numbers(3)); +SELECT nothingNullIfArrayArray([[1, NULL]], [[1, 0]]) AS x FROM remote('127.0.0.{1,2}', numbers(3)); + +SELECT '-- sum:'; +SELECT sumArray([NULL, NULL]) AS x FROM remote('127.0.0.{1,2}', numbers(3)); +SELECT sumArrayIf([NULL], 1) AS x FROM remote('127.0.0.{1,2}', numbers(3)); +SELECT sumArrayIf([NULL], 0) AS x FROM remote('127.0.0.{1,2}', numbers(3)); +SELECT sumIfArray([NULL, NULL], [1, 0]) AS x FROM remote('127.0.0.{1,2}', numbers(3)); +SELECT sumIfArray([1, NULL], [1, 0]) AS x FROM remote('127.0.0.{1,2}', numbers(3)); +SELECT sumIfArrayIf([1, NULL], [1, 0], 1) AS x FROM remote('127.0.0.{1,2}', numbers(3)); +SELECT sumIfArrayArray([[1, NULL]], [[1, 0]]) AS x FROM remote('127.0.0.{1,2}', numbers(3)); + +SELECT '-- count:'; +SELECT countArray([NULL, NULL]) AS x FROM remote('127.0.0.{1,2}', numbers(3)); +SELECT countArrayIf([NULL], 1) AS x FROM remote('127.0.0.{1,2}', numbers(3)); +SELECT countArrayIf([NULL], 0) AS x FROM remote('127.0.0.{1,2}', numbers(3)); +SELECT countIfArray([NULL, NULL], [1, 0]) AS x FROM remote('127.0.0.{1,2}', numbers(3)); +SELECT countIfArray([1, NULL], [1, 0]) AS x FROM remote('127.0.0.{1,2}', numbers(3)); +SELECT countIfArrayIf([1, NULL], [1, 0], 1) AS x FROM remote('127.0.0.{1,2}', numbers(3)); +SELECT countIfArrayArray([[1, NULL]], [[1, 0]]) AS x FROM remote('127.0.0.{1,2}', numbers(3)); + + +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (`n` UInt64) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO t1 SELECT * FROM numbers(10); + +SET +enable_parallel_replicas=1, + max_parallel_replicas=2, + use_hedged_requests=0, + cluster_for_parallel_replicas='test_cluster_one_shard_three_replicas_localhost', + parallel_replicas_for_non_replicated_merge_tree=1 +; + +SELECT count(NULL) FROM t1 WITH TOTALS; +SELECT count(NULL as a), a FROM t1 WITH TOTALS; + +SELECT count(NULL as a), sum(a) FROM t1 WITH TOTALS; + +SELECT uniq(NULL) FROM t1 WITH TOTALS; +SELECT quantile(0.5)(NULL), quantile(0.9)(NULL), quantiles(0.1, 0.2)(NULL :: Nullable(UInt32)) FROM t1 WITH TOTALS; diff --git a/parser/testdata/02922_respect_nulls_Nullable/ast.json b/parser/testdata/02922_respect_nulls_Nullable/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02922_respect_nulls_Nullable/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02922_respect_nulls_Nullable/metadata.json b/parser/testdata/02922_respect_nulls_Nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02922_respect_nulls_Nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02922_respect_nulls_Nullable/query.sql b/parser/testdata/02922_respect_nulls_Nullable/query.sql new file mode 100644 index 000000000..d2d3ba294 --- /dev/null +++ b/parser/testdata/02922_respect_nulls_Nullable/query.sql @@ -0,0 +1,77 @@ +SELECT + *, + * APPLY (toTypeName) +FROM +( + SELECT + bl, + anyIf(n, cond) IGNORE NULLS AS any_ignore, + anyIf(n, cond) RESPECT NULLS AS any_respect, + anyLastIf(n, cond) IGNORE NULLS AS last_ignore, + anyLastIf(n, cond) RESPECT NULLS AS last_respect, + anyIf(nullable_n, cond) IGNORE NULLS AS any_nullable_ignore, + anyIf(nullable_n, cond) RESPECT NULLS AS any_nullable_respect, + anyLastIf(nullable_n, cond) IGNORE NULLS AS last_nullable_ignore, + anyLastIf(nullable_n, cond) RESPECT NULLS AS last_nullable_respect + FROM + ( + SELECT + number AS n, + rand() > pow(2, 31) as cond, + if(cond, NULL, n) as nullable_n, + blockNumber() AS bl + FROM numbers(10000) + ) + GROUP BY bl +) +WHERE + any_ignore != any_respect + OR toTypeName(any_ignore) != toTypeName(any_respect) + OR last_ignore != last_respect + OR toTypeName(last_ignore) != toTypeName(last_respect) + OR any_nullable_ignore != any_nullable_respect + OR toTypeName(any_nullable_ignore) != toTypeName(any_nullable_respect) + OR last_nullable_ignore != last_nullable_respect + OR toTypeName(last_nullable_ignore) != toTypeName(last_nullable_respect); + +-- { echoOn } +Select anyOrNull(tp) FROM (Select (number, number) as tp from numbers(10)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +Select anyOrNull(tp) FROM (Select (number, number) as tp from numbers(10)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT + any(tp) AS default, + toTypeName(default) as default_type, + any(tp) RESPECT NULLS AS respect, + toTypeName(respect) as respect_type +FROM +( + SELECT (toLowCardinality(number), number) AS tp + FROM numbers(10) +); + +SELECT first_value_respect_nullsMerge(t) FROM (Select first_value_respect_nullsState(number) as t FROM numbers(0)); +SELECT first_value_respect_nullsMerge(t) FROM (Select first_value_respect_nullsState(number::Nullable(UInt8)) as t FROM numbers(0)); +SELECT first_value_respect_nullsMerge(t) FROM (Select first_value_respect_nullsState(number::LowCardinality(Nullable(UInt8))) as t FROM numbers(0)) settings allow_suspicious_low_cardinality_types=1; + +SELECT first_value_respect_nullsOrNullMerge(t) FROM (Select first_value_respect_nullsOrNullState(number) as t FROM numbers(0)); +SELECT first_value_respect_nullsMerge(t) FROM (Select first_value_respect_nullsOrNullState(number) as t FROM numbers(0)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT first_value_respect_nullsOrNullMerge(t) FROM (Select first_value_respect_nullsState(number) as t FROM numbers(0)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + + +SELECT first_value_respect_nullsMerge(t) FROM (Select first_value_respect_nullsState(dummy) as t FROM system.one); +SELECT first_value_respect_nullsMerge(t) FROM (Select first_value_respect_nullsState(dummy::Nullable(UInt8)) as t FROM system.one); +SELECT first_value_respect_nullsMerge(t) FROM (Select first_value_respect_nullsState(NULL) as t FROM system.one); +SELECT first_value_respect_nullsMerge(t) FROM (Select first_value_respect_nullsState(NULL::Nullable(UInt8)) as t FROM system.one); + +-- Assert sanitizer: passing NULL (not Nullable() with different values is accepted and ignored) +SELECT + anyLastIf(n, cond) RESPECT NULLS, + anyLastIf(nullable_n, cond) RESPECT NULLS +FROM +( + SELECT + number AS n, + NULL as cond, + number::Nullable(Int64) as nullable_n + FROM numbers(10000) +); diff --git a/parser/testdata/02922_respect_nulls_extensive/ast.json b/parser/testdata/02922_respect_nulls_extensive/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02922_respect_nulls_extensive/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02922_respect_nulls_extensive/metadata.json b/parser/testdata/02922_respect_nulls_extensive/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02922_respect_nulls_extensive/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02922_respect_nulls_extensive/query.sql b/parser/testdata/02922_respect_nulls_extensive/query.sql new file mode 100644 index 000000000..4ab46cf65 --- /dev/null +++ b/parser/testdata/02922_respect_nulls_extensive/query.sql @@ -0,0 +1,63 @@ +-- { echoOn } + +-- The function name is case insensitive, with or without respect nulls and using any of the aliases +Select number, first_value (number) RESPECT NULLS over (order by number) from numbers(1); +Select number, First_value (number) RESPECT NULLS over (order by number) from numbers(1); +Select number, FIRST_VALUE (number) RESPECT NULLS over (order by number) from numbers(1); +Select number, FIRST_VALUE (number) over (order by number) from numbers(1); +Select number, first_value_respect_nulls (number) over (order by number) from numbers(1); +Select number, any (number) RESPECT NULLS over (order by number) from numbers(1); +Select number, any_value (number) RESPECT NULLS over (order by number) from numbers(1); + +Select number, last_value (number) RESPECT NULLS over (order by number) from numbers(1); +Select number, Last_value (number) RESPECT NULLS over (order by number) from numbers(1); +Select number, LAST_VALUE (number) RESPECT NULLS over (order by number) from numbers(1); +Select number, LAST_VALUE (number) over (order by number) from numbers(1); +Select number, last_value_respect_nulls (number) over (order by number) from numbers(1); +Select number, anyLast (number) RESPECT NULLS over (order by number) from numbers(1); + +-- IGNORE NULLS should be accepted too +Select number, FIRST_VALUE (number) IGNORE NULLS over (order by number) from numbers(1); +Select number, LAST_VALUE (number) IGNORE NULLS over (order by number) from numbers(1); + +-- When applying IGNORE NULLs to first_value_respect_nulls we go back to the original function (any) +Select first_value_respect_nulls (number) IGNORE NULLS from (SELECT if(number < 2, NULL, number) as number FROM numbers(10)); +Select FIRST_VALUE_respect_nulls (number) IGNORE NULLS from (SELECT if(number < 2, NULL, number) as number FROM numbers(10)); +Select last_value_respect_nulls (number) IGNORE NULLS from (SELECT if(number < 2, NULL, number) as number FROM numbers(10)); +Select LAST_VALUE_respect_nulls (number) IGNORE NULLS from (SELECT if(number < 2, NULL, number) as number FROM numbers(10)); + +-- IGNORE/RESPECT NULLS should work with combinators because we can do it +SELECT first_valueIf (number, NOT isNull(number) AND (assumeNotNull(number) > 5)) RESPECT NULLS from (SELECT if(number < 2, NULL, number) as number FROM numbers(10)); +SELECT last_valueIf (number, NOT isNull(number) AND (assumeNotNull(number) > 5)) RESPECT NULLS from (SELECT if(number < 2, NULL, number) as number FROM numbers(10)); +SELECT first_valueIf (number, isNull(number)) RESPECT NULLS from (SELECT if(number > 8, NULL, number) as number FROM numbers(10)); +SELECT last_valueIf (number, isNull(number)) RESPECT NULLS from (SELECT if(number > 8, NULL, number) as number FROM numbers(10)); +SELECT toTypeName(first_valueIfState(number, isNull(number)) RESPECT NULLS) from (SELECT if(number > 8, NULL, number) as number FROM numbers(10)); +SELECT toTypeName(last_valueIfState(number, isNull(number)) RESPECT NULLS) from (SELECT if(number > 8, NULL, number) as number FROM numbers(10)); +SELECT FIRST_VALUEIf (number, NOT isNull(number) AND (assumeNotNull(number) > 5)) RESPECT NULLS from (SELECT if(number < 2, NULL, number) as number FROM numbers(10)); +SELECT LAST_VALUEIf (number, NOT isNull(number) AND (assumeNotNull(number) > 5)) RESPECT NULLS from (SELECT if(number < 2, NULL, number) as number FROM numbers(10)); +SELECT anyIf (number, isNull(number)) RESPECT NULLS from (SELECT if(number > 8, NULL, number) as number FROM numbers(10)); +SELECT anyLastIf (number, isNull(number)) RESPECT NULLS from (SELECT if(number > 8, NULL, number) as number FROM numbers(10)); +SELECT toTypeName(FIRST_VALUEIfState(number, isNull(number)) RESPECT NULLS) from (SELECT if(number > 8, NULL, number) as number FROM numbers(10)); +SELECT toTypeName(LAST_VALUEIfState(number, isNull(number)) RESPECT NULLS) from (SELECT if(number > 8, NULL, number) as number FROM numbers(10)); + +-- Unsupported functions should throw in the server +SELECT number, sum (number) RESPECT NULLS over (order by number) from numbers(1); -- { serverError NOT_IMPLEMENTED } +SELECT number, avgIf (number) RESPECT NULLS over (order by number) from numbers(1); -- { serverError NOT_IMPLEMENTED } +-- Same for double RESPECT NULLS +SELECT number, first_value_respect_nulls (number) RESPECT NULLS over (order by number) from numbers(1); -- { serverError NOT_IMPLEMENTED } +SELECT number, last_value_respect_nulls (number) RESPECT NULLS over (order by number) from numbers(1); -- { serverError NOT_IMPLEMENTED } + +-- Aggregate_functions_null_for_empty should work the same way +SELECT toTypeName(any(number) RESPECT NULLS) from numbers(1); +SELECT toTypeName(anyOrNull(number) RESPECT NULLS) from numbers(1); +SELECT any(number) RESPECT NULLS from numbers(0); +SELECT anyOrNull(number) RESPECT NULLS from numbers(0); +SELECT any(number) RESPECT NULLS from (Select NULL::Nullable(UInt8) as number FROM numbers(10)); +SELECT anyOrNull(number) RESPECT NULLS from (Select NULL::Nullable(UInt8) as number FROM numbers(10)); +SELECT any(number) RESPECT NULLS from (Select if(number > 8, NULL, number) as number FROM numbers(10)); +SELECT anyOrNull(number) RESPECT NULLS from (Select if(number > 8, NULL, number) as number FROM numbers(10)); +SELECT any(number) RESPECT NULLS from (Select if(number < 8, NULL, number) as number FROM numbers(10)); +SELECT anyOrNull(number) RESPECT NULLS from (Select if(number < 8, NULL, number) as number FROM numbers(10)); + +SELECT toTypeName(any(number) RESPECT NULLS) from numbers(1) SETTINGS aggregate_functions_null_for_empty = 1; +SELECT any(number) RESPECT NULLS from numbers(0) SETTINGS aggregate_functions_null_for_empty = 1; diff --git a/parser/testdata/02922_respect_nulls_parser/ast.json b/parser/testdata/02922_respect_nulls_parser/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02922_respect_nulls_parser/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02922_respect_nulls_parser/metadata.json b/parser/testdata/02922_respect_nulls_parser/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02922_respect_nulls_parser/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02922_respect_nulls_parser/query.sql b/parser/testdata/02922_respect_nulls_parser/query.sql new file mode 100644 index 000000000..ccd67dbe6 --- /dev/null +++ b/parser/testdata/02922_respect_nulls_parser/query.sql @@ -0,0 +1,19 @@ +SELECT first_value(number) RESPECT NULLS IGNORE NULLS from numbers(1); -- { clientError SYNTAX_ERROR } + +SELECT formatQuery('SELECT first_value(number) RESPECT NULLS from numbers(1)'); +SELECT formatQuery('SELECT first_value(number) IGNORE NULLS from numbers(1)'); +SELECT formatQuery('SELECT any (number) RESPECT NULLS from numbers(1)'); +SELECT formatQuery('SELECT LAST_VALUE(number) RESPECT NULLS from numbers(1)'); + +-- The parser doesn't know if this function supports "RESPECT/IGNORE" NULLS +SELECT formatQuery('SELECT sum(number) RESPECT NULLS from numbers(1)'); + +-- Normal functions should throw in the server +SELECT toDateTimeNonExistingFunction(now()) RESPECT NULLS b; -- { serverError UNKNOWN_FUNCTION } +SELECT toDateTime(now()) RESPECT NULLS b; -- { serverError SYNTAX_ERROR } +SELECT count() from numbers(10) where in(number, (0)) RESPECT NULLS; -- { serverError SYNTAX_ERROR } +SELECT if(number > 0, number, 0) respect nulls from numbers(0); -- { serverError SYNTAX_ERROR } +WITH (x -> x + 1) AS lambda SELECT lambda(number) RESPECT NULLS FROM numbers(10) SETTINGS enable_analyzer = 1; -- { serverError SYNTAX_ERROR } +SELECT * from system.one WHERE indexHint(dummy = 1) RESPECT NULLS; -- { serverError SYNTAX_ERROR } +SELECT arrayJoin([[3,4,5], [6,7], [2], [1,1]]) IGNORE NULLS; -- { serverError SYNTAX_ERROR } +SELECT number, grouping(number % 2, number) RESPECT NULLS AS gr FROM numbers(10) GROUP BY GROUPING SETS ((number), (number % 2)) SETTINGS force_grouping_standard_compatibility = 0; -- { serverError SYNTAX_ERROR } diff --git a/parser/testdata/02922_respect_nulls_states/ast.json b/parser/testdata/02922_respect_nulls_states/ast.json new file mode 100644 index 000000000..a969d5054 --- /dev/null +++ b/parser/testdata/02922_respect_nulls_states/ast.json @@ -0,0 +1,76 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function first_value_respect_nullsState (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier dummy" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function last_value_respect_nullsState (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier dummy" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.one" + } + ], + + "rows": 18, + + "statistics": + { + "elapsed": 0.001599699, + "rows_read": 18, + "bytes_read": 771 + } +} diff --git a/parser/testdata/02922_respect_nulls_states/metadata.json b/parser/testdata/02922_respect_nulls_states/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02922_respect_nulls_states/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02922_respect_nulls_states/query.sql b/parser/testdata/02922_respect_nulls_states/query.sql new file mode 100644 index 000000000..b4c82ddbb --- /dev/null +++ b/parser/testdata/02922_respect_nulls_states/query.sql @@ -0,0 +1,29 @@ +SELECT toTypeName(first_value_respect_nullsState(dummy)), toTypeName(last_value_respect_nullsState(dummy)) from system.one; + +SELECT first_value_respect_nullsMerge(t) FROM (Select first_value_respect_nullsState(dummy) as t FROM system.one); +SELECT first_value_respect_nullsMerge(t) FROM (Select first_value_respect_nullsState(NULL::Nullable(UInt8)) as t FROM system.one); +SELECT first_value_respect_nullsMerge(t) FROM (Select first_value_respect_nullsState(number) as t FROM numbers(5)); +SELECT first_value_respect_nullsMerge(t) FROM (Select first_value_respect_nullsState(NULL::Nullable(UInt8)) as t FROM numbers(5)); + +SELECT last_value_respect_nullsMerge(t) FROM (Select last_value_respect_nullsState(dummy) as t FROM system.one); +SELECT last_value_respect_nullsMerge(t) FROM (Select last_value_respect_nullsState(NULL::Nullable(UInt8)) as t FROM system.one); +SELECT last_value_respect_nullsMerge(t) FROM (Select last_value_respect_nullsState(number) as t FROM numbers(5)); +SELECT last_value_respect_nullsMerge(t) FROM (Select last_value_respect_nullsState(NULL::Nullable(UInt8)) as t FROM numbers(5)); + +SELECT first_value_respect_nullsMerge(t) FROM (Select first_valueState(number) as t from numbers(1)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT first_value_respect_nullsMerge(t) FROM (Select last_value_respect_nullsState(number) as t from numbers(1)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT last_value_respect_nullsMerge(t) FROM (Select first_value_respect_nullsState(number) as t from numbers(1)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT first_value_respect_nullsMerge(CAST(unhex('00'), 'AggregateFunction(any, UInt64)')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +-- Invalid (starts at 1) +SELECT first_value_respect_nullsMerge(CAST(unhex('00'), 'AggregateFunction(any_respect_nulls, UInt64)')); -- { serverError INCORRECT_DATA } +-- Not set (Default value) +SELECT first_value_respect_nullsMerge(CAST(unhex('01'), 'AggregateFunction(any_respect_nulls, UInt64)')); +SELECT finalizeAggregation(CAST(unhex('01'), 'AggregateFunction(any_respect_nulls, UInt64)')); +-- Set to NULL +SELECT first_value_respect_nullsMerge(CAST(unhex('02'), 'AggregateFunction(any_respect_nulls, UInt64)')); -- { serverError INCORRECT_DATA } +SELECT first_value_respect_nullsMerge(CAST(unhex('02'), 'AggregateFunction(any_respect_nulls, Nullable(UInt64))')); +SELECT finalizeAggregation(CAST(unhex('02'), 'AggregateFunction(any_respect_nulls, Nullable(UInt64))')); + +-- Set to other value, but without providing value +SELECT first_value_respect_nullsMerge(CAST(unhex('03'), 'AggregateFunction(any_respect_nulls, UInt64)')); -- { serverError CANNOT_READ_ALL_DATA } diff --git a/parser/testdata/02923_cte_equality_disjunction/ast.json b/parser/testdata/02923_cte_equality_disjunction/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02923_cte_equality_disjunction/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02923_cte_equality_disjunction/metadata.json b/parser/testdata/02923_cte_equality_disjunction/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02923_cte_equality_disjunction/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02923_cte_equality_disjunction/query.sql b/parser/testdata/02923_cte_equality_disjunction/query.sql new file mode 100644 index 000000000..288bed9e4 --- /dev/null +++ b/parser/testdata/02923_cte_equality_disjunction/query.sql @@ -0,0 +1,12 @@ +--https://github.com/ClickHouse/ClickHouse/issues/5323 +CREATE TABLE test_bug_optimization +( + `path` String +) +ENGINE = MergeTree +ORDER BY path; + +WITH (path = 'test1') OR match(path, 'test2') OR (match(path, 'test3') AND match(path, 'test2')) OR match(path, 'test4') OR (path = 'test5') OR (path = 'test6') AS alias_in_error +SELECT count(1) +FROM test_bug_optimization +WHERE alias_in_error; diff --git a/parser/testdata/02923_explain_expired_context/ast.json b/parser/testdata/02923_explain_expired_context/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02923_explain_expired_context/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02923_explain_expired_context/metadata.json b/parser/testdata/02923_explain_expired_context/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02923_explain_expired_context/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02923_explain_expired_context/query.sql b/parser/testdata/02923_explain_expired_context/query.sql new file mode 100644 index 000000000..68277508e --- /dev/null +++ b/parser/testdata/02923_explain_expired_context/query.sql @@ -0,0 +1,3 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/51321 +EXPLAIN ESTIMATE SELECT any(toTypeName(s)) FROM (SELECT 'bbbbbbbb', toTypeName(s), CAST('', 'LowCardinality(String)'), NULL, CAST('\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0', 'String') AS s) AS t1 FULL OUTER JOIN (SELECT CAST('bbbbb\0\0bbb\0bb\0bb', 'LowCardinality(String)'), CAST(CAST('a', 'String'), 'LowCardinality(String)') AS s GROUP BY CoNnEcTiOn_Id()) AS t2 USING (s) WITH TOTALS; +EXPLAIN ESTIMATE SELECT any(s) FROM (SELECT '' AS s) AS t1 JOIN (SELECT '' AS s GROUP BY connection_id()) AS t2 USING (s); diff --git a/parser/testdata/02923_join_use_nulls_modulo/ast.json b/parser/testdata/02923_join_use_nulls_modulo/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02923_join_use_nulls_modulo/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02923_join_use_nulls_modulo/metadata.json b/parser/testdata/02923_join_use_nulls_modulo/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02923_join_use_nulls_modulo/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02923_join_use_nulls_modulo/query.sql b/parser/testdata/02923_join_use_nulls_modulo/query.sql new file mode 100644 index 000000000..4134a42c5 --- /dev/null +++ b/parser/testdata/02923_join_use_nulls_modulo/query.sql @@ -0,0 +1,22 @@ +--https://github.com/ClickHouse/ClickHouse/issues/47366 +SELECT + id % 255, + toTypeName(d.id) +FROM +( + SELECT + toLowCardinality(1048577) AS id, + toLowCardinality(9223372036854775807) AS value + GROUP BY + GROUPING SETS ( + (toLowCardinality(1024)), + (id % 10.0001), + ((id % 2147483646) != -9223372036854775807), + ((id % -1) != 255)) + ) AS a + SEMI LEFT JOIN +( + SELECT toLowCardinality(9223372036854775807) AS id + WHERE (id % 2147483646) != NULL +) AS d USING (id) +SETTINGS join_use_nulls=1; diff --git a/parser/testdata/02931_alter_materialized_view_query_inconsistent/ast.json b/parser/testdata/02931_alter_materialized_view_query_inconsistent/ast.json new file mode 100644 index 000000000..4ae351998 --- /dev/null +++ b/parser/testdata/02931_alter_materialized_view_query_inconsistent/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery pipe (children 1)" + }, + { + "explain": " Identifier pipe" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001413711, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/02931_alter_materialized_view_query_inconsistent/metadata.json b/parser/testdata/02931_alter_materialized_view_query_inconsistent/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02931_alter_materialized_view_query_inconsistent/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02931_alter_materialized_view_query_inconsistent/query.sql b/parser/testdata/02931_alter_materialized_view_query_inconsistent/query.sql new file mode 100644 index 000000000..895d5b3ab --- /dev/null +++ b/parser/testdata/02931_alter_materialized_view_query_inconsistent/query.sql @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS pipe; +DROP TABLE IF EXISTS src; +DROP TABLE IF EXISTS dest; + +CREATE TABLE src(v UInt64) ENGINE = Null; +CREATE TABLE dest(v UInt64) Engine = MergeTree() ORDER BY v; +CREATE MATERIALIZED VIEW pipe TO dest AS SELECT v FROM src; + +ALTER TABLE dest ADD COLUMN v2 UInt64; + +ALTER TABLE pipe MODIFY QUERY SELECT v * 2 as v, 1 as v2 FROM src; + +DESCRIBE TABLE pipe; +SHOW CREATE TABLE pipe; + +DROP TABLE IF EXISTS pipe; +DROP TABLE IF EXISTS src; +DROP TABLE IF EXISTS dest; diff --git a/parser/testdata/02931_max_num_to_warn/ast.json b/parser/testdata/02931_max_num_to_warn/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02931_max_num_to_warn/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02931_max_num_to_warn/metadata.json b/parser/testdata/02931_max_num_to_warn/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02931_max_num_to_warn/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02931_max_num_to_warn/query.sql b/parser/testdata/02931_max_num_to_warn/query.sql new file mode 100644 index 000000000..ed2c0a99f --- /dev/null +++ b/parser/testdata/02931_max_num_to_warn/query.sql @@ -0,0 +1,86 @@ +-- Tags: no-parallel + +CREATE DATABASE IF NOT EXISTS test_max_num_to_warn_02931; +CREATE TABLE IF NOT EXISTS test_max_num_to_warn_02931.test_max_num_to_warn_1 (id Int32, str String) Engine=Memory; +CREATE TABLE IF NOT EXISTS test_max_num_to_warn_02931.test_max_num_to_warn_2 (id Int32, str String) Engine=Memory; +CREATE TABLE IF NOT EXISTS test_max_num_to_warn_02931.test_max_num_to_warn_3 (id Int32, str String) Engine=Memory; +CREATE TABLE IF NOT EXISTS test_max_num_to_warn_02931.test_max_num_to_warn_4 (id Int32, str String) Engine=Memory; +CREATE TABLE IF NOT EXISTS test_max_num_to_warn_02931.test_max_num_to_warn_5 (id Int32, str String) Engine=Memory; +CREATE TABLE IF NOT EXISTS test_max_num_to_warn_02931.test_max_num_to_warn_6 (id Int32, str String) Engine=Memory; +CREATE TABLE IF NOT EXISTS test_max_num_to_warn_02931.test_max_num_to_warn_7 (id Int32, str String) Engine=Memory; +CREATE TABLE IF NOT EXISTS test_max_num_to_warn_02931.test_max_num_to_warn_8 (id Int32, str String) Engine=Memory; +CREATE TABLE IF NOT EXISTS test_max_num_to_warn_02931.test_max_num_to_warn_9 (id Int32, str String) Engine=Memory; +CREATE TABLE IF NOT EXISTS test_max_num_to_warn_02931.test_max_num_to_warn_10 (id Int32, str String) Engine=Memory; +CREATE TABLE IF NOT EXISTS test_max_num_to_warn_02931.test_max_num_to_warn_11 (id Int32, str String) Engine=Memory; + +CREATE VIEW IF NOT EXISTS test_max_num_to_warn_02931.test_max_num_to_warn_view_1 AS SELECT * FROM test_max_num_to_warn_02931.test_max_num_to_warn_1; +CREATE VIEW IF NOT EXISTS test_max_num_to_warn_02931.test_max_num_to_warn_view_2 AS SELECT * FROM test_max_num_to_warn_02931.test_max_num_to_warn_2; +CREATE VIEW IF NOT EXISTS test_max_num_to_warn_02931.test_max_num_to_warn_view_3 AS SELECT * FROM test_max_num_to_warn_02931.test_max_num_to_warn_3; +CREATE VIEW IF NOT EXISTS test_max_num_to_warn_02931.test_max_num_to_warn_view_4 AS SELECT * FROM test_max_num_to_warn_02931.test_max_num_to_warn_4; +CREATE VIEW IF NOT EXISTS test_max_num_to_warn_02931.test_max_num_to_warn_view_5 AS SELECT * FROM test_max_num_to_warn_02931.test_max_num_to_warn_5; +CREATE VIEW IF NOT EXISTS test_max_num_to_warn_02931.test_max_num_to_warn_view_6 AS SELECT * FROM test_max_num_to_warn_02931.test_max_num_to_warn_6; +CREATE VIEW IF NOT EXISTS test_max_num_to_warn_02931.test_max_num_to_warn_view_7 AS SELECT * FROM test_max_num_to_warn_02931.test_max_num_to_warn_7; +CREATE VIEW IF NOT EXISTS test_max_num_to_warn_02931.test_max_num_to_warn_view_8 AS SELECT * FROM test_max_num_to_warn_02931.test_max_num_to_warn_8; +CREATE VIEW IF NOT EXISTS test_max_num_to_warn_02931.test_max_num_to_warn_view_9 AS SELECT * FROM test_max_num_to_warn_02931.test_max_num_to_warn_9; +CREATE VIEW IF NOT EXISTS test_max_num_to_warn_02931.test_max_num_to_warn_view_10 AS SELECT * FROM test_max_num_to_warn_02931.test_max_num_to_warn_10; +CREATE VIEW IF NOT EXISTS test_max_num_to_warn_02931.test_max_num_to_warn_view_11 AS SELECT * FROM test_max_num_to_warn_02931.test_max_num_to_warn_11; + +CREATE DICTIONARY IF NOT EXISTS test_max_num_to_warn_02931.test_max_num_to_warn_dict_1 (id Int32, str String) PRIMARY KEY id +SOURCE(CLICKHOUSE(DB 'test_max_num_to_warn_02931' TABLE 'test_max_num_to_warn_1'))LAYOUT(FLAT()) LIFETIME(MIN 0 MAX 1000); +CREATE DICTIONARY IF NOT EXISTS test_max_num_to_warn_02931.test_max_num_to_warn_dict_2 (id Int32, str String) PRIMARY KEY id +SOURCE(CLICKHOUSE(DB 'test_max_num_to_warn_02931' TABLE 'test_max_num_to_warn_2'))LAYOUT(FLAT()) LIFETIME(MIN 0 MAX 1000); +CREATE DICTIONARY IF NOT EXISTS test_max_num_to_warn_02931.test_max_num_to_warn_dict_3 (id Int32, str String) PRIMARY KEY id +SOURCE(CLICKHOUSE(DB 'test_max_num_to_warn_02931' TABLE 'test_max_num_to_warn_3'))LAYOUT(FLAT()) LIFETIME(MIN 0 MAX 1000); +CREATE DICTIONARY IF NOT EXISTS test_max_num_to_warn_02931.test_max_num_to_warn_dict_4 (id Int32, str String) PRIMARY KEY id +SOURCE(CLICKHOUSE(DB 'test_max_num_to_warn_02931' TABLE 'test_max_num_to_warn_4'))LAYOUT(FLAT()) LIFETIME(MIN 0 MAX 1000); +CREATE DICTIONARY IF NOT EXISTS test_max_num_to_warn_02931.test_max_num_to_warn_dict_5 (id Int32, str String) PRIMARY KEY id +SOURCE(CLICKHOUSE(DB 'test_max_num_to_warn_02931' TABLE 'test_max_num_to_warn_5'))LAYOUT(FLAT()) LIFETIME(MIN 0 MAX 1000); +CREATE DICTIONARY IF NOT EXISTS test_max_num_to_warn_02931.test_max_num_to_warn_dict_6 (id Int32, str String) PRIMARY KEY id +SOURCE(CLICKHOUSE(DB 'test_max_num_to_warn_02931' TABLE 'test_max_num_to_warn_6'))LAYOUT(FLAT()) LIFETIME(MIN 0 MAX 1000); +CREATE DICTIONARY IF NOT EXISTS test_max_num_to_warn_02931.test_max_num_to_warn_dict_7 (id Int32, str String) PRIMARY KEY id +SOURCE(CLICKHOUSE(DB 'test_max_num_to_warn_02931' TABLE 'test_max_num_to_warn_7'))LAYOUT(FLAT()) LIFETIME(MIN 0 MAX 1000); +CREATE DICTIONARY IF NOT EXISTS test_max_num_to_warn_02931.test_max_num_to_warn_dict_8 (id Int32, str String) PRIMARY KEY id +SOURCE(CLICKHOUSE(DB 'test_max_num_to_warn_02931' TABLE 'test_max_num_to_warn_8'))LAYOUT(FLAT()) LIFETIME(MIN 0 MAX 1000); +CREATE DICTIONARY IF NOT EXISTS test_max_num_to_warn_02931.test_max_num_to_warn_dict_9 (id Int32, str String) PRIMARY KEY id +SOURCE(CLICKHOUSE(DB 'test_max_num_to_warn_02931' TABLE 'test_max_num_to_warn_9'))LAYOUT(FLAT()) LIFETIME(MIN 0 MAX 1000); +CREATE DICTIONARY IF NOT EXISTS test_max_num_to_warn_02931.test_max_num_to_warn_dict_10 (id Int32, str String) PRIMARY KEY id +SOURCE(CLICKHOUSE(DB 'test_max_num_to_warn_02931' TABLE 'test_max_num_to_warn_10'))LAYOUT(FLAT()) LIFETIME(MIN 0 MAX 1000); + +CREATE DATABASE IF NOT EXISTS test_max_num_to_warn_1; +CREATE DATABASE IF NOT EXISTS test_max_num_to_warn_2; +CREATE DATABASE IF NOT EXISTS test_max_num_to_warn_3; +CREATE DATABASE IF NOT EXISTS test_max_num_to_warn_4; +CREATE DATABASE IF NOT EXISTS test_max_num_to_warn_5; +CREATE DATABASE IF NOT EXISTS test_max_num_to_warn_6; +CREATE DATABASE IF NOT EXISTS test_max_num_to_warn_7; +CREATE DATABASE IF NOT EXISTS test_max_num_to_warn_8; +CREATE DATABASE IF NOT EXISTS test_max_num_to_warn_9; +CREATE DATABASE IF NOT EXISTS test_max_num_to_warn_10; +CREATE DATABASE IF NOT EXISTS test_max_num_to_warn_11; + +INSERT INTO test_max_num_to_warn_02931.test_max_num_to_warn_1 VALUES (1, 'Hello'); +INSERT INTO test_max_num_to_warn_02931.test_max_num_to_warn_2 VALUES (1, 'Hello'); +INSERT INTO test_max_num_to_warn_02931.test_max_num_to_warn_3 VALUES (1, 'Hello'); +INSERT INTO test_max_num_to_warn_02931.test_max_num_to_warn_4 VALUES (1, 'Hello'); +INSERT INTO test_max_num_to_warn_02931.test_max_num_to_warn_5 VALUES (1, 'Hello'); +INSERT INTO test_max_num_to_warn_02931.test_max_num_to_warn_6 VALUES (1, 'Hello'); +INSERT INTO test_max_num_to_warn_02931.test_max_num_to_warn_7 VALUES (1, 'Hello'); +INSERT INTO test_max_num_to_warn_02931.test_max_num_to_warn_8 VALUES (1, 'Hello'); +INSERT INTO test_max_num_to_warn_02931.test_max_num_to_warn_9 VALUES (1, 'Hello'); +INSERT INTO test_max_num_to_warn_02931.test_max_num_to_warn_10 VALUES (1, 'Hello'); +INSERT INTO test_max_num_to_warn_02931.test_max_num_to_warn_11 VALUES (1, 'Hello'); + +SELECT replaceRegexpAll(message, '\(\d+\)', '_'), message_format_string FROM system.warnings WHERE message LIKE 'The number of%' ORDER BY message; + +DROP DATABASE IF EXISTS test_max_num_to_warn_02931; +DROP DATABASE IF EXISTS test_max_num_to_warn_1; +DROP DATABASE IF EXISTS test_max_num_to_warn_2; +DROP DATABASE IF EXISTS test_max_num_to_warn_3; +DROP DATABASE IF EXISTS test_max_num_to_warn_4; +DROP DATABASE IF EXISTS test_max_num_to_warn_5; +DROP DATABASE IF EXISTS test_max_num_to_warn_6; +DROP DATABASE IF EXISTS test_max_num_to_warn_7; +DROP DATABASE IF EXISTS test_max_num_to_warn_8; +DROP DATABASE IF EXISTS test_max_num_to_warn_9; +DROP DATABASE IF EXISTS test_max_num_to_warn_10; +DROP DATABASE IF EXISTS test_max_num_to_warn_11; diff --git a/parser/testdata/02931_rewrite_sum_column_and_constant/ast.json b/parser/testdata/02931_rewrite_sum_column_and_constant/ast.json new file mode 100644 index 000000000..e720e6f27 --- /dev/null +++ b/parser/testdata/02931_rewrite_sum_column_and_constant/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001277653, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02931_rewrite_sum_column_and_constant/metadata.json b/parser/testdata/02931_rewrite_sum_column_and_constant/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02931_rewrite_sum_column_and_constant/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02931_rewrite_sum_column_and_constant/query.sql b/parser/testdata/02931_rewrite_sum_column_and_constant/query.sql new file mode 100644 index 000000000..fdf892617 --- /dev/null +++ b/parser/testdata/02931_rewrite_sum_column_and_constant/query.sql @@ -0,0 +1,216 @@ +set enable_analyzer = 1; +-- { echoOn } +Select sum(number + 1) from numbers(10); +Select sum(1 + number) from numbers(10); +Select sum(number - 1) from numbers(10); +Select sum(1 - number) from numbers(10); +EXPLAIN SYNTAX (Select sum(number + 1) from numbers(10)); +EXPLAIN SYNTAX (Select sum(1 + number) from numbers(10)); +EXPLAIN SYNTAX (Select sum(number - 1) from numbers(10)); +EXPLAIN SYNTAX (Select sum(1 - number) from numbers(10)); + +WITH 1::Nullable(UInt64) as my_literal Select sum(number + my_literal) from numbers(0); +WITH 1::Nullable(UInt64) as my_literal Select sum(number) + my_literal * count() from numbers(0); +EXPLAIN SYNTAX (WITH 1::Nullable(UInt64) as my_literal Select sum(number + my_literal) from numbers(0)); +EXPLAIN SYNTAX (WITH 1::Nullable(UInt64) as my_literal Select sum(number) + my_literal * count() from numbers(0)); +-- { echoOff } + +DROP TABLE IF EXISTS test_table; + +CREATE TABLE test_table +( + uint64 UInt64, + float64 Float64, + decimal32 Decimal32(5), +) ENGINE=MergeTree ORDER BY uint64; + +-- Use Float64 numbers divisible by 1/16 (or some other small power of two), so that their sum doesn't depend on summation order. +INSERT INTO test_table VALUES (1, 1.125, 1.11); +INSERT INTO test_table VALUES (2, 2.250, 2.22); +INSERT INTO test_table VALUES (3, 3.375, 3.33); +INSERT INTO test_table VALUES (4, 4.500, 4.44); +INSERT INTO test_table VALUES (5, 5.625, 5.55); + +-- { echoOn } +SELECT sum(uint64 + 1 AS i) from test_table where i > 0; +SELECT sum(uint64 + 1) AS j from test_table having j > 0; +SELECT sum(uint64 + 1 AS i) j from test_table where i > 0 having j > 0; +SELECT sum((uint64 AS m) + (1 AS n)) j from test_table where m > 0 and n > 0 having j > 0; +SELECT sum(((uint64 AS m) + (1 AS n)) AS i) j from test_table where m > 0 and n > 0 and i > 0 having j > 0; +EXPLAIN SYNTAX (SELECT sum(uint64 + 1 AS i) from test_table where i > 0); +EXPLAIN SYNTAX (SELECT sum(uint64 + 1) AS j from test_table having j > 0); +EXPLAIN SYNTAX (SELECT sum(uint64 + 1 AS i) j from test_table where i > 0 having j > 0); +EXPLAIN SYNTAX (SELECT sum((uint64 AS m) + (1 AS n)) j from test_table where m > 0 and n > 0 having j > 0); +EXPLAIN SYNTAX (SELECT sum(((uint64 AS m) + (1 AS n)) AS i) j from test_table where m > 0 and n > 0 and i > 0 having j > 0); + +SELECT sum(1 + uint64 AS i) from test_table where i > 0; +SELECT sum(1 + uint64) AS j from test_table having j > 0; +SELECT sum(1 + uint64 AS i) j from test_table where i > 0 having j > 0; +SELECT sum((1 AS m) + (uint64 AS n)) j from test_table where m > 0 and n > 0 having j > 0; +SELECT sum(((1 AS m) + (uint64 AS n)) AS i) j from test_table where m > 0 and n > 0 and i > 0 having j > 0; +EXPLAIN SYNTAX (SELECT sum(1 + uint64 AS i) from test_table where i > 0); +EXPLAIN SYNTAX (SELECT sum(1 + uint64) AS j from test_table having j > 0); +EXPLAIN SYNTAX (SELECT sum(1 + uint64 AS i) j from test_table where i > 0 having j > 0); +EXPLAIN SYNTAX (SELECT sum((1 AS m) + (uint64 AS n)) j from test_table where m > 0 and n > 0 having j > 0); +EXPLAIN SYNTAX (SELECT sum(((1 AS m) + (uint64 AS n)) AS i) j from test_table where m > 0 and n > 0 and i > 0 having j > 0); + +SELECT sum(uint64 - 1 AS i) from test_table where i > 0; +SELECT sum(uint64 - 1) AS j from test_table having j > 0; +SELECT sum(uint64 - 1 AS i) j from test_table where i > 0 having j > 0; +SELECT sum((uint64 AS m) - (1 AS n)) j from test_table where m > 0 and n > 0 having j > 0; +SELECT sum(((uint64 AS m) - (1 AS n)) AS i) j from test_table where m > 0 and n > 0 and i > 0 having j > 0; +EXPLAIN SYNTAX (SELECT sum(uint64 - 1 AS i) from test_table where i > 0); +EXPLAIN SYNTAX (SELECT sum(uint64 - 1) AS j from test_table having j > 0); +EXPLAIN SYNTAX (SELECT sum(uint64 - 1 AS i) j from test_table where i > 0 having j > 0); +EXPLAIN SYNTAX (SELECT sum((uint64 AS m) - (1 AS n)) j from test_table where m > 0 and n > 0 having j > 0); +EXPLAIN SYNTAX (SELECT sum(((uint64 AS m) - (1 AS n)) AS i) j from test_table where m > 0 and n > 0 and i > 0 having j > 0); + +SELECT sum(1 - uint64 AS i) from test_table; +SELECT sum(1 - uint64) AS j from test_table; +SELECT sum(1 - uint64 AS i) j from test_table; +SELECT sum((1 AS m) - (uint64 AS n)) j from test_table; +SELECT sum(((1 AS m) - (uint64 AS n)) AS i) j from test_table; +EXPLAIN SYNTAX (SELECT sum(1 - uint64 AS i) from test_table where i > 0); +EXPLAIN SYNTAX (SELECT sum(1 - uint64) AS j from test_table having j < 0); +EXPLAIN SYNTAX (SELECT sum(1 - uint64 AS i) j from test_table where i > 0 having j < 0); +EXPLAIN SYNTAX (SELECT sum((1 AS m) - (uint64 AS n)) j from test_table where m > 0 and n > 0 having j < 0); +EXPLAIN SYNTAX (SELECT sum(((1 AS m) - (uint64 AS n)) AS i) j from test_table where m > 0 and n > 0 and i < 0 having j < 0); + +SELECT sum(uint64 + 2.11) From test_table; +SELECT sum(2.11 + uint64) From test_table; +SELECT sum(uint64 - 2.11) From test_table; +SELECT sum(2.11 - uint64) From test_table; +SELECT sum(uint64) + 2.11 * count(uint64) From test_table; +SELECT 2.11 * count(uint64) + sum(uint64) From test_table; +SELECT sum(uint64) - 2.11 * count(uint64) From test_table; +SELECT 2.11 * count(uint64) - sum(uint64) From test_table; +EXPLAIN SYNTAX (SELECT sum(uint64 + 2.11) From test_table); +EXPLAIN SYNTAX (SELECT sum(2.11 + uint64) From test_table); +EXPLAIN SYNTAX (SELECT sum(uint64 - 2.11) From test_table); +EXPLAIN SYNTAX (SELECT sum(2.11 - uint64) From test_table); +EXPLAIN SYNTAX (SELECT sum(uint64) + 2.11 * count(uint64) From test_table); +EXPLAIN SYNTAX (SELECT 2.11 * count(uint64) + sum(uint64) From test_table); +EXPLAIN SYNTAX (SELECT sum(uint64) - 2.11 * count(uint64) From test_table); +EXPLAIN SYNTAX (SELECT 2.11 * count(uint64) - sum(uint64) From test_table); + +SELECT sum(uint64 + 2) From test_table; +SELECT sum(2 + uint64) From test_table; +SELECT sum(uint64 - 2) From test_table; +SELECT sum(2 - uint64) From test_table; +SELECT sum(uint64) + 2 * count(uint64) From test_table; +SELECT 2 * count(uint64) + sum(uint64) From test_table; +SELECT sum(uint64) - 2 * count(uint64) From test_table; +SELECT 2 * count(uint64) - sum(uint64) From test_table; +EXPLAIN SYNTAX (SELECT sum(uint64 + 2) From test_table); +EXPLAIN SYNTAX (SELECT sum(2 + uint64) From test_table); +EXPLAIN SYNTAX (SELECT sum(uint64 - 2) From test_table); +EXPLAIN SYNTAX (SELECT sum(2 - uint64) From test_table); +EXPLAIN SYNTAX (SELECT sum(uint64) + 2 * count(uint64) From test_table); +EXPLAIN SYNTAX (SELECT 2 * count(uint64) + sum(uint64) From test_table); +EXPLAIN SYNTAX (SELECT sum(uint64) - 2 * count(uint64) From test_table); +EXPLAIN SYNTAX (SELECT 2 * count(uint64) - sum(uint64) From test_table); + +SELECT sum(float64 + 2) From test_table; +SELECT sum(2 + float64) From test_table; +SELECT sum(float64 - 2) From test_table; +SELECT sum(2 - float64) From test_table; +SELECT sum(float64) + 2 * count(float64) From test_table; +SELECT 2 * count(float64) + sum(float64) From test_table; +SELECT sum(float64) - 2 * count(float64) From test_table; +SELECT 2 * count(float64) - sum(float64) From test_table; +EXPLAIN SYNTAX (SELECT sum(float64 + 2) From test_table); +EXPLAIN SYNTAX (SELECT sum(2 + float64) From test_table); +EXPLAIN SYNTAX (SELECT sum(float64 - 2) From test_table); +EXPLAIN SYNTAX (SELECT sum(2 - float64) From test_table); +EXPLAIN SYNTAX (SELECT sum(float64) + 2 * count(float64) From test_table); +EXPLAIN SYNTAX (SELECT 2 * count(float64) + sum(float64) From test_table); +EXPLAIN SYNTAX (SELECT sum(float64) - 2 * count(float64) From test_table); +EXPLAIN SYNTAX (SELECT 2 * count(float64) - sum(float64) From test_table); + +SELECT sum(decimal32 + 2) From test_table; +SELECT sum(2 + decimal32) From test_table; +SELECT sum(decimal32 - 2) From test_table; +SELECT sum(2 - decimal32) From test_table; +SELECT sum(decimal32) + 2 * count(decimal32) From test_table; +SELECT 2 * count(decimal32) + sum(decimal32) From test_table; +SELECT sum(decimal32) - 2 * count(decimal32) From test_table; +SELECT 2 * count(decimal32) - sum(decimal32) From test_table; +EXPLAIN SYNTAX (SELECT sum(decimal32 + 2) From test_table); +EXPLAIN SYNTAX (SELECT sum(2 + decimal32) From test_table); +EXPLAIN SYNTAX (SELECT sum(decimal32 - 2) From test_table); +EXPLAIN SYNTAX (SELECT sum(2 - decimal32) From test_table); +EXPLAIN SYNTAX (SELECT sum(decimal32) + 2 * count(decimal32) From test_table); +EXPLAIN SYNTAX (SELECT 2 * count(decimal32) + sum(decimal32) From test_table); +EXPLAIN SYNTAX (SELECT sum(decimal32) - 2 * count(decimal32) From test_table); +EXPLAIN SYNTAX (SELECT 2 * count(decimal32) - sum(decimal32) From test_table); + +SELECT sum(uint64 + 2) + sum(uint64 + 3) From test_table; +SELECT sum(uint64 + 2) - sum(uint64 + 3) From test_table; +SELECT sum(uint64 - 2) + sum(uint64 - 3) From test_table; +SELECT sum(uint64 - 2) - sum(uint64 - 3) From test_table; +SELECT sum(2 - uint64) - sum(3 - uint64) From test_table; +SELECT (sum(uint64) + 2 * count(uint64)) + (sum(uint64) + 3 * count(uint64)) From test_table; +SELECT (sum(uint64) + 2 * count(uint64)) - (sum(uint64) + 3 * count(uint64)) From test_table; +SELECT (sum(uint64) - 2 * count(uint64)) + (sum(uint64) - 3 * count(uint64)) From test_table; +SELECT (sum(uint64) - 2 * count(uint64)) - (sum(uint64) - 3 * count(uint64)) From test_table; +SELECT (2 * count(uint64) - sum(uint64)) + (3 * count(uint64) - sum(uint64)) From test_table; +EXPLAIN SYNTAX (SELECT sum(uint64 + 2) + sum(uint64 + 3) From test_table); +EXPLAIN SYNTAX (SELECT sum(uint64 + 2) - sum(uint64 + 3) From test_table); +EXPLAIN SYNTAX (SELECT sum(uint64 - 2) + sum(uint64 - 3) From test_table); +EXPLAIN SYNTAX (SELECT sum(uint64 - 2) - sum(uint64 - 3) From test_table); +EXPLAIN SYNTAX (SELECT sum(2 - uint64) - sum(3 - uint64) From test_table); +EXPLAIN SYNTAX (SELECT (sum(uint64) + 2 * count(uint64)) + (sum(uint64) + 3 * count(uint64)) From test_table); +EXPLAIN SYNTAX (SELECT (sum(uint64) + 2 * count(uint64)) - (sum(uint64) + 3 * count(uint64)) From test_table); +EXPLAIN SYNTAX (SELECT (sum(uint64) - 2 * count(uint64)) + (sum(uint64) - 3 * count(uint64)) From test_table); +EXPLAIN SYNTAX (SELECT (sum(uint64) - 2 * count(uint64)) - (sum(uint64) - 3 * count(uint64)) From test_table); +EXPLAIN SYNTAX (SELECT (2 * count(uint64) - sum(uint64)) + (3 * count(uint64) - sum(uint64)) From test_table); + +SELECT sum(float64 + 2) + sum(float64 + 3) From test_table; +SELECT sum(float64 + 2) - sum(float64 + 3) From test_table; +SELECT sum(float64 - 2) + sum(float64 - 3) From test_table; +SELECT sum(float64 - 2) - sum(float64 - 3) From test_table; +SELECT sum(2 - float64) - sum(3 - float64) From test_table; +SELECT (sum(float64) + 2 * count(float64)) + (sum(float64) + 3 * count(float64)) From test_table; +SELECT (sum(float64) + 2 * count(float64)) - (sum(float64) + 3 * count(float64)) From test_table; +SELECT (sum(float64) - 2 * count(float64)) + (sum(float64) - 3 * count(float64)) From test_table; +SELECT (sum(float64) - 2 * count(float64)) - (sum(float64) - 3 * count(float64)) From test_table; +SELECT (2 * count(float64) - sum(float64)) + (3 * count(float64) - sum(float64)) From test_table; +EXPLAIN SYNTAX (SELECT sum(float64 + 2) + sum(float64 + 3) From test_table); +EXPLAIN SYNTAX (SELECT sum(float64 + 2) - sum(float64 + 3) From test_table); +EXPLAIN SYNTAX (SELECT sum(float64 - 2) + sum(float64 - 3) From test_table); +EXPLAIN SYNTAX (SELECT sum(float64 - 2) - sum(float64 - 3) From test_table); +EXPLAIN SYNTAX (SELECT sum(2 - float64) - sum(3 - float64) From test_table); +EXPLAIN SYNTAX (SELECT (sum(float64) + 2 * count(float64)) + (sum(float64) + 3 * count(float64)) From test_table); +EXPLAIN SYNTAX (SELECT (sum(float64) + 2 * count(float64)) - (sum(float64) + 3 * count(float64)) From test_table); +EXPLAIN SYNTAX (SELECT (sum(float64) - 2 * count(float64)) + (sum(float64) - 3 * count(float64)) From test_table); +EXPLAIN SYNTAX (SELECT (sum(float64) - 2 * count(float64)) - (sum(float64) - 3 * count(float64)) From test_table); +EXPLAIN SYNTAX (SELECT (2 * count(float64) - sum(float64)) + (3 * count(float64) - sum(float64)) From test_table); + +SELECT sum(decimal32 + 2) + sum(decimal32 + 3) From test_table; +SELECT sum(decimal32 + 2) - sum(decimal32 + 3) From test_table; +SELECT sum(decimal32 - 2) + sum(decimal32 - 3) From test_table; +SELECT sum(decimal32 - 2) - sum(decimal32 - 3) From test_table; +SELECT sum(2 - decimal32) - sum(3 - decimal32) From test_table; +SELECT (sum(decimal32) + 2 * count(decimal32)) + (sum(decimal32) + 3 * count(decimal32)) From test_table; +SELECT (sum(decimal32) + 2 * count(decimal32)) - (sum(decimal32) + 3 * count(decimal32)) From test_table; +SELECT (sum(decimal32) - 2 * count(decimal32)) + (sum(decimal32) - 3 * count(decimal32)) From test_table; +SELECT (sum(decimal32) - 2 * count(decimal32)) - (sum(decimal32) - 3 * count(decimal32)) From test_table; +SELECT (2 * count(decimal32) - sum(decimal32)) + (3 * count(decimal32) - sum(decimal32)) From test_table; +EXPLAIN SYNTAX (SELECT sum(decimal32 + 2) + sum(decimal32 + 3) From test_table); +EXPLAIN SYNTAX (SELECT sum(decimal32 + 2) - sum(decimal32 + 3) From test_table); +EXPLAIN SYNTAX (SELECT sum(decimal32 - 2) + sum(decimal32 - 3) From test_table); +EXPLAIN SYNTAX (SELECT sum(decimal32 - 2) - sum(decimal32 - 3) From test_table); +EXPLAIN SYNTAX (SELECT sum(2 - decimal32) - sum(3 - decimal32) From test_table); +EXPLAIN SYNTAX (SELECT (sum(decimal32) + 2 * count(decimal32)) + (sum(decimal32) + 3 * count(decimal32)) From test_table); +EXPLAIN SYNTAX (SELECT (sum(decimal32) + 2 * count(decimal32)) - (sum(decimal32) + 3 * count(decimal32)) From test_table); +EXPLAIN SYNTAX (SELECT (sum(decimal32) - 2 * count(decimal32)) + (sum(decimal32) - 3 * count(decimal32)) From test_table); +EXPLAIN SYNTAX (SELECT (sum(decimal32) - 2 * count(decimal32)) - (sum(decimal32) - 3 * count(decimal32)) From test_table); +EXPLAIN SYNTAX (SELECT (2 * count(decimal32) - sum(decimal32)) + (3 * count(decimal32) - sum(decimal32)) From test_table); + +-- https://github.com/ClickHouse/ClickHouse/issues/59414 +SELECT sum(uint64 + 2) as j, j + 5 as t from test_table; +EXPLAIN SYNTAX SELECT sum(uint64 + 2) as j, j + 5 as t from test_table; +-- { echoOff } + + +DROP TABLE IF EXISTS test_table; diff --git a/parser/testdata/02931_ubsan_error_arena_aligned_alloc/ast.json b/parser/testdata/02931_ubsan_error_arena_aligned_alloc/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02931_ubsan_error_arena_aligned_alloc/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02931_ubsan_error_arena_aligned_alloc/metadata.json b/parser/testdata/02931_ubsan_error_arena_aligned_alloc/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02931_ubsan_error_arena_aligned_alloc/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02931_ubsan_error_arena_aligned_alloc/query.sql b/parser/testdata/02931_ubsan_error_arena_aligned_alloc/query.sql new file mode 100644 index 000000000..c140d7d42 --- /dev/null +++ b/parser/testdata/02931_ubsan_error_arena_aligned_alloc/query.sql @@ -0,0 +1,2 @@ +-- previously it caused `runtime error: applying non-zero offset 7 to null pointer` +SELECT sumResample(65535, 20, 1)(number, number % 20) FROM numbers(200); diff --git a/parser/testdata/02932_analyzer_rewrite_sum_column_and_constant/ast.json b/parser/testdata/02932_analyzer_rewrite_sum_column_and_constant/ast.json new file mode 100644 index 000000000..2f3ba7fd3 --- /dev/null +++ b/parser/testdata/02932_analyzer_rewrite_sum_column_and_constant/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001078057, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02932_analyzer_rewrite_sum_column_and_constant/metadata.json b/parser/testdata/02932_analyzer_rewrite_sum_column_and_constant/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02932_analyzer_rewrite_sum_column_and_constant/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02932_analyzer_rewrite_sum_column_and_constant/query.sql b/parser/testdata/02932_analyzer_rewrite_sum_column_and_constant/query.sql new file mode 100644 index 000000000..b6fa097ab --- /dev/null +++ b/parser/testdata/02932_analyzer_rewrite_sum_column_and_constant/query.sql @@ -0,0 +1,211 @@ +SET enable_analyzer=1; + +-- { echoOn } +Select sum(number + 1) from numbers(10); +Select sum(1 + number) from numbers(10); +Select sum(number - 1) from numbers(10); +Select sum(1 - number) from numbers(10); +EXPLAIN QUERY TREE (Select sum(number + 1) from numbers(10)); +EXPLAIN QUERY TREE (Select sum(1 + number) from numbers(10)); +EXPLAIN QUERY TREE (Select sum(number - 1) from numbers(10)); +EXPLAIN QUERY TREE (Select sum(1 - number) from numbers(10)); + +WITH 1::Nullable(UInt64) as my_literal Select sum(number + my_literal) from numbers(0); +WITH 1::Nullable(UInt64) as my_literal Select sum(number) + my_literal * count() from numbers(0); +EXPLAIN QUERY TREE (WITH 1::Nullable(UInt64) as my_literal Select sum(number + my_literal) from numbers(0)); +EXPLAIN QUERY TREE (WITH 1::Nullable(UInt64) as my_literal Select sum(number) + my_literal * count() from numbers(0)); +-- { echoOff } + +DROP TABLE IF EXISTS test_table; + +CREATE TABLE test_table +( + uint64 UInt64, + float64 Float64, + decimal32 Decimal32(5), +) ENGINE=MergeTree ORDER BY uint64; + +-- Use Float64 numbers divisible by 1/16 (or some other small power of two), so that their sum doesn't depend on summation order. +INSERT INTO test_table VALUES (1, 1.125, 1.11); +INSERT INTO test_table VALUES (2, 2.250, 2.22); +INSERT INTO test_table VALUES (3, 3.375, 3.33); +INSERT INTO test_table VALUES (4, 4.500, 4.44); +INSERT INTO test_table VALUES (5, 5.625, 5.55); + +-- { echoOn } +SELECT sum(uint64 + 1 AS i) from test_table where i > 0; +SELECT sum(uint64 + 1) AS j from test_table having j > 0; +SELECT sum(uint64 + 1 AS i) j from test_table where i > 0 having j > 0; +SELECT sum((uint64 AS m) + (1 AS n)) j from test_table where m > 0 and n > 0 having j > 0; +SELECT sum(((uint64 AS m) + (1 AS n)) AS i) j from test_table where m > 0 and n > 0 and i > 0 having j > 0; +EXPLAIN QUERY TREE (SELECT sum(uint64 + 1 AS i) from test_table where i > 0); +EXPLAIN QUERY TREE (SELECT sum(uint64 + 1) AS j from test_table having j > 0); +EXPLAIN QUERY TREE (SELECT sum(uint64 + 1 AS i) j from test_table where i > 0 having j > 0); +EXPLAIN QUERY TREE (SELECT sum((uint64 AS m) + (1 AS n)) j from test_table where m > 0 and n > 0 having j > 0); +EXPLAIN QUERY TREE (SELECT sum(((uint64 AS m) + (1 AS n)) AS i) j from test_table where m > 0 and n > 0 and i > 0 having j > 0); + +SELECT sum(1 + uint64 AS i) from test_table where i > 0; +SELECT sum(1 + uint64) AS j from test_table having j > 0; +SELECT sum(1 + uint64 AS i) j from test_table where i > 0 having j > 0; +SELECT sum((1 AS m) + (uint64 AS n)) j from test_table where m > 0 and n > 0 having j > 0; +SELECT sum(((1 AS m) + (uint64 AS n)) AS i) j from test_table where m > 0 and n > 0 and i > 0 having j > 0; +EXPLAIN QUERY TREE (SELECT sum(1 + uint64 AS i) from test_table where i > 0); +EXPLAIN QUERY TREE (SELECT sum(1 + uint64) AS j from test_table having j > 0); +EXPLAIN QUERY TREE (SELECT sum(1 + uint64 AS i) j from test_table where i > 0 having j > 0); +EXPLAIN QUERY TREE (SELECT sum((1 AS m) + (uint64 AS n)) j from test_table where m > 0 and n > 0 having j > 0); +EXPLAIN QUERY TREE (SELECT sum(((1 AS m) + (uint64 AS n)) AS i) j from test_table where m > 0 and n > 0 and i > 0 having j > 0); + +SELECT sum(uint64 - 1 AS i) from test_table where i > 0; +SELECT sum(uint64 - 1) AS j from test_table having j > 0; +SELECT sum(uint64 - 1 AS i) j from test_table where i > 0 having j > 0; +SELECT sum((uint64 AS m) - (1 AS n)) j from test_table where m > 0 and n > 0 having j > 0; +SELECT sum(((uint64 AS m) - (1 AS n)) AS i) j from test_table where m > 0 and n > 0 and i > 0 having j > 0; +EXPLAIN QUERY TREE (SELECT sum(uint64 - 1 AS i) from test_table where i > 0); +EXPLAIN QUERY TREE (SELECT sum(uint64 - 1) AS j from test_table having j > 0); +EXPLAIN QUERY TREE (SELECT sum(uint64 - 1 AS i) j from test_table where i > 0 having j > 0); +EXPLAIN QUERY TREE (SELECT sum((uint64 AS m) - (1 AS n)) j from test_table where m > 0 and n > 0 having j > 0); +EXPLAIN QUERY TREE (SELECT sum(((uint64 AS m) - (1 AS n)) AS i) j from test_table where m > 0 and n > 0 and i > 0 having j > 0); + +SELECT sum(1 - uint64 AS i) from test_table; +SELECT sum(1 - uint64) AS j from test_table; +SELECT sum(1 - uint64 AS i) j from test_table; +SELECT sum((1 AS m) - (uint64 AS n)) j from test_table; +SELECT sum(((1 AS m) - (uint64 AS n)) AS i) j from test_table; +EXPLAIN QUERY TREE (SELECT sum(1 - uint64 AS i) from test_table where i > 0); +EXPLAIN QUERY TREE (SELECT sum(1 - uint64) AS j from test_table having j < 0); +EXPLAIN QUERY TREE (SELECT sum(1 - uint64 AS i) j from test_table where i > 0 having j < 0); +EXPLAIN QUERY TREE (SELECT sum((1 AS m) - (uint64 AS n)) j from test_table where m > 0 and n > 0 having j < 0); +EXPLAIN QUERY TREE (SELECT sum(((1 AS m) - (uint64 AS n)) AS i) j from test_table where m > 0 and n > 0 and i < 0 having j < 0); + +SELECT sum(uint64 + 2.11) From test_table; +SELECT sum(2.11 + uint64) From test_table; +SELECT sum(uint64 - 2.11) From test_table; +SELECT sum(2.11 - uint64) From test_table; +SELECT sum(uint64) + 2.11 * count(uint64) From test_table; +SELECT 2.11 * count(uint64) + sum(uint64) From test_table; +SELECT sum(uint64) - 2.11 * count(uint64) From test_table; +SELECT 2.11 * count(uint64) - sum(uint64) From test_table; +EXPLAIN QUERY TREE (SELECT sum(uint64 + 2.11) From test_table); +EXPLAIN QUERY TREE (SELECT sum(2.11 + uint64) From test_table); +EXPLAIN QUERY TREE (SELECT sum(uint64 - 2.11) From test_table); +EXPLAIN QUERY TREE (SELECT sum(2.11 - uint64) From test_table); +EXPLAIN QUERY TREE (SELECT sum(uint64) + 2.11 * count(uint64) From test_table); +EXPLAIN QUERY TREE (SELECT 2.11 * count(uint64) + sum(uint64) From test_table); +EXPLAIN QUERY TREE (SELECT sum(uint64) - 2.11 * count(uint64) From test_table); +EXPLAIN QUERY TREE (SELECT 2.11 * count(uint64) - sum(uint64) From test_table); + +SELECT sum(uint64 + 2) From test_table; +SELECT sum(2 + uint64) From test_table; +SELECT sum(uint64 - 2) From test_table; +SELECT sum(2 - uint64) From test_table; +SELECT sum(uint64) + 2 * count(uint64) From test_table; +SELECT 2 * count(uint64) + sum(uint64) From test_table; +SELECT sum(uint64) - 2 * count(uint64) From test_table; +SELECT 2 * count(uint64) - sum(uint64) From test_table; +EXPLAIN QUERY TREE (SELECT sum(uint64 + 2) From test_table); +EXPLAIN QUERY TREE (SELECT sum(2 + uint64) From test_table); +EXPLAIN QUERY TREE (SELECT sum(uint64 - 2) From test_table); +EXPLAIN QUERY TREE (SELECT sum(2 - uint64) From test_table); +EXPLAIN QUERY TREE (SELECT sum(uint64) + 2 * count(uint64) From test_table); +EXPLAIN QUERY TREE (SELECT 2 * count(uint64) + sum(uint64) From test_table); +EXPLAIN QUERY TREE (SELECT sum(uint64) - 2 * count(uint64) From test_table); +EXPLAIN QUERY TREE (SELECT 2 * count(uint64) - sum(uint64) From test_table); + +SELECT sum(float64 + 2) From test_table; +SELECT sum(2 + float64) From test_table; +SELECT sum(float64 - 2) From test_table; +SELECT sum(2 - float64) From test_table; +SELECT sum(float64) + 2 * count(float64) From test_table; +SELECT 2 * count(float64) + sum(float64) From test_table; +SELECT sum(float64) - 2 * count(float64) From test_table; +SELECT 2 * count(float64) - sum(float64) From test_table; +EXPLAIN QUERY TREE (SELECT sum(float64 + 2) From test_table); +EXPLAIN QUERY TREE (SELECT sum(2 + float64) From test_table); +EXPLAIN QUERY TREE (SELECT sum(float64 - 2) From test_table); +EXPLAIN QUERY TREE (SELECT sum(2 - float64) From test_table); +EXPLAIN QUERY TREE (SELECT sum(float64) + 2 * count(float64) From test_table); +EXPLAIN QUERY TREE (SELECT 2 * count(float64) + sum(float64) From test_table); +EXPLAIN QUERY TREE (SELECT sum(float64) - 2 * count(float64) From test_table); +EXPLAIN QUERY TREE (SELECT 2 * count(float64) - sum(float64) From test_table); + +SELECT sum(decimal32 + 2) From test_table; +SELECT sum(2 + decimal32) From test_table; +SELECT sum(decimal32 - 2) From test_table; +SELECT sum(2 - decimal32) From test_table; +SELECT sum(decimal32) + 2 * count(decimal32) From test_table; +SELECT 2 * count(decimal32) + sum(decimal32) From test_table; +SELECT sum(decimal32) - 2 * count(decimal32) From test_table; +SELECT 2 * count(decimal32) - sum(decimal32) From test_table; +EXPLAIN QUERY TREE (SELECT sum(decimal32 + 2) From test_table); +EXPLAIN QUERY TREE (SELECT sum(2 + decimal32) From test_table); +EXPLAIN QUERY TREE (SELECT sum(decimal32 - 2) From test_table); +EXPLAIN QUERY TREE (SELECT sum(2 - decimal32) From test_table); +EXPLAIN QUERY TREE (SELECT sum(decimal32) + 2 * count(decimal32) From test_table); +EXPLAIN QUERY TREE (SELECT 2 * count(decimal32) + sum(decimal32) From test_table); +EXPLAIN QUERY TREE (SELECT sum(decimal32) - 2 * count(decimal32) From test_table); +EXPLAIN QUERY TREE (SELECT 2 * count(decimal32) - sum(decimal32) From test_table); + +SELECT sum(uint64 + 2) + sum(uint64 + 3) From test_table; +SELECT sum(uint64 + 2) - sum(uint64 + 3) From test_table; +SELECT sum(uint64 - 2) - sum(uint64 - 3) From test_table; +SELECT sum(2 - uint64) - sum(3 - uint64) From test_table; +SELECT (sum(uint64) + 2 * count(uint64)) + (sum(uint64) + 3 * count(uint64)) From test_table; +SELECT (sum(uint64) + 2 * count(uint64)) - (sum(uint64) + 3 * count(uint64)) From test_table; +SELECT (sum(uint64) - 2 * count(uint64)) + (sum(uint64) - 3 * count(uint64)) From test_table; +SELECT (sum(uint64) - 2 * count(uint64)) - (sum(uint64) - 3 * count(uint64)) From test_table; +SELECT (2 * count(uint64) - sum(uint64)) + (3 * count(uint64) - sum(uint64)) From test_table; +EXPLAIN QUERY TREE (SELECT sum(uint64 + 2) + sum(uint64 + 3) From test_table); +EXPLAIN QUERY TREE (SELECT sum(uint64 + 2) - sum(uint64 + 3) From test_table); +EXPLAIN QUERY TREE (SELECT sum(uint64 - 2) + sum(uint64 - 3) From test_table); +EXPLAIN QUERY TREE (SELECT sum(uint64 - 2) - sum(uint64 - 3) From test_table); +EXPLAIN QUERY TREE (SELECT sum(2 - uint64) - sum(3 - uint64) From test_table); +EXPLAIN QUERY TREE (SELECT (sum(uint64) + 2 * count(uint64)) + (sum(uint64) + 3 * count(uint64)) From test_table); +EXPLAIN QUERY TREE (SELECT (sum(uint64) + 2 * count(uint64)) - (sum(uint64) + 3 * count(uint64)) From test_table); +EXPLAIN QUERY TREE (SELECT (sum(uint64) - 2 * count(uint64)) + (sum(uint64) - 3 * count(uint64)) From test_table); +EXPLAIN QUERY TREE (SELECT (sum(uint64) - 2 * count(uint64)) - (sum(uint64) - 3 * count(uint64)) From test_table); +EXPLAIN QUERY TREE (SELECT (2 * count(uint64) - sum(uint64)) + (3 * count(uint64) - sum(uint64)) From test_table); + +SELECT sum(float64 + 2) + sum(float64 + 3) From test_table; +SELECT sum(float64 + 2) - sum(float64 + 3) From test_table; +SELECT sum(float64 - 2) + sum(float64 - 3) From test_table; +SELECT sum(float64 - 2) - sum(float64 - 3) From test_table; +SELECT sum(2 - float64) - sum(3 - float64) From test_table; +SELECT (sum(float64) + 2 * count(float64)) + (sum(float64) + 3 * count(float64)) From test_table; +SELECT (sum(float64) + 2 * count(float64)) - (sum(float64) + 3 * count(float64)) From test_table; +SELECT (sum(float64) - 2 * count(float64)) + (sum(float64) - 3 * count(float64)) From test_table; +SELECT (sum(float64) - 2 * count(float64)) - (sum(float64) - 3 * count(float64)) From test_table; +SELECT (2 * count(float64) - sum(float64)) + (3 * count(float64) - sum(float64)) From test_table; +EXPLAIN QUERY TREE (SELECT sum(float64 + 2) + sum(float64 + 3) From test_table); +EXPLAIN QUERY TREE (SELECT sum(float64 + 2) - sum(float64 + 3) From test_table); +EXPLAIN QUERY TREE (SELECT sum(float64 - 2) + sum(float64 - 3) From test_table); +EXPLAIN QUERY TREE (SELECT sum(float64 - 2) - sum(float64 - 3) From test_table); +EXPLAIN QUERY TREE (SELECT sum(2 - float64) - sum(3 - float64) From test_table); +EXPLAIN QUERY TREE (SELECT (sum(float64) + 2 * count(float64)) + (sum(float64) + 3 * count(float64)) From test_table); +EXPLAIN QUERY TREE (SELECT (sum(float64) + 2 * count(float64)) - (sum(float64) + 3 * count(float64)) From test_table); +EXPLAIN QUERY TREE (SELECT (sum(float64) - 2 * count(float64)) + (sum(float64) - 3 * count(float64)) From test_table); +EXPLAIN QUERY TREE (SELECT (sum(float64) - 2 * count(float64)) - (sum(float64) - 3 * count(float64)) From test_table); +EXPLAIN QUERY TREE (SELECT (2 * count(float64) - sum(float64)) + (3 * count(float64) - sum(float64)) From test_table); + +SELECT sum(decimal32 + 2) + sum(decimal32 + 3) From test_table; +SELECT sum(decimal32 + 2) - sum(decimal32 + 3) From test_table; +SELECT sum(decimal32 - 2) + sum(decimal32 - 3) From test_table; +SELECT sum(decimal32 - 2) - sum(decimal32 - 3) From test_table; +SELECT sum(2 - decimal32) - sum(3 - decimal32) From test_table; +SELECT (sum(decimal32) + 2 * count(decimal32)) + (sum(decimal32) + 3 * count(decimal32)) From test_table; +SELECT (sum(decimal32) + 2 * count(decimal32)) - (sum(decimal32) + 3 * count(decimal32)) From test_table; +SELECT (sum(decimal32) - 2 * count(decimal32)) + (sum(decimal32) - 3 * count(decimal32)) From test_table; +SELECT (sum(decimal32) - 2 * count(decimal32)) - (sum(decimal32) - 3 * count(decimal32)) From test_table; +SELECT (2 * count(decimal32) - sum(decimal32)) + (3 * count(decimal32) - sum(decimal32)) From test_table; +EXPLAIN QUERY TREE (SELECT sum(decimal32 + 2) + sum(decimal32 + 3) From test_table); +EXPLAIN QUERY TREE (SELECT sum(decimal32 + 2) - sum(decimal32 + 3) From test_table); +EXPLAIN QUERY TREE (SELECT sum(decimal32 - 2) + sum(decimal32 - 3) From test_table); +EXPLAIN QUERY TREE (SELECT sum(decimal32 - 2) - sum(decimal32 - 3) From test_table); +EXPLAIN QUERY TREE (SELECT sum(2 - decimal32) - sum(3 - decimal32) From test_table); +EXPLAIN QUERY TREE (SELECT (sum(decimal32) + 2 * count(decimal32)) + (sum(decimal32) + 3 * count(decimal32)) From test_table); +EXPLAIN QUERY TREE (SELECT (sum(decimal32) + 2 * count(decimal32)) - (sum(decimal32) + 3 * count(decimal32)) From test_table); +EXPLAIN QUERY TREE (SELECT (sum(decimal32) - 2 * count(decimal32)) + (sum(decimal32) - 3 * count(decimal32)) From test_table); +EXPLAIN QUERY TREE (SELECT (sum(decimal32) - 2 * count(decimal32)) - (sum(decimal32) - 3 * count(decimal32)) From test_table); +EXPLAIN QUERY TREE (SELECT (2 * count(decimal32) - sum(decimal32)) + (3 * count(decimal32) - sum(decimal32)) From test_table); +-- { echoOff } + +DROP TABLE IF EXISTS test_table; diff --git a/parser/testdata/02932_apply_deleted_mask/ast.json b/parser/testdata/02932_apply_deleted_mask/ast.json new file mode 100644 index 000000000..000c93b52 --- /dev/null +++ b/parser/testdata/02932_apply_deleted_mask/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_materialize_delete (children 1)" + }, + { + "explain": " Identifier t_materialize_delete" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001246173, + "rows_read": 2, + "bytes_read": 92 + } +} diff --git a/parser/testdata/02932_apply_deleted_mask/metadata.json b/parser/testdata/02932_apply_deleted_mask/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02932_apply_deleted_mask/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02932_apply_deleted_mask/query.sql b/parser/testdata/02932_apply_deleted_mask/query.sql new file mode 100644 index 000000000..0ada0640a --- /dev/null +++ b/parser/testdata/02932_apply_deleted_mask/query.sql @@ -0,0 +1,43 @@ +DROP TABLE IF EXISTS t_materialize_delete; + +CREATE TABLE t_materialize_delete (id UInt64, v UInt64) +ENGINE = MergeTree ORDER BY id PARTITION BY id % 10; + +SET mutations_sync = 2; + +INSERT INTO t_materialize_delete SELECT number, number FROM numbers(100); + +SELECT 'Inserted'; + +SELECT count(), sum(v) FROM t_materialize_delete; +SELECT count(), sum(rows), sum(has_lightweight_delete) FROM system.parts WHERE database = currentDatabase() AND table = 't_materialize_delete' AND active; + +SELECT 'Lighweight deleted'; + +DELETE FROM t_materialize_delete WHERE id % 7 = 3; + +SELECT count(), sum(v) FROM t_materialize_delete; +SELECT count(), sum(rows), sum(has_lightweight_delete) FROM system.parts WHERE database = currentDatabase() AND table = 't_materialize_delete' AND active; + +SELECT 'Mask applied'; + +ALTER TABLE t_materialize_delete APPLY DELETED MASK; + +SELECT count(), sum(v) FROM t_materialize_delete; +SELECT count(), sum(rows), sum(has_lightweight_delete) FROM system.parts WHERE database = currentDatabase() AND table = 't_materialize_delete' AND active; + +SELECT 'Lighweight deleted'; + +DELETE FROM t_materialize_delete WHERE id % 7 = 4; + +SELECT count(), sum(v) FROM t_materialize_delete; +SELECT count(), sum(rows), sum(has_lightweight_delete) FROM system.parts WHERE database = currentDatabase() AND table = 't_materialize_delete' AND active; + +SELECT 'Mask applied in partition'; + +ALTER TABLE t_materialize_delete APPLY DELETED MASK IN PARTITION 5; + +SELECT count(), sum(v) FROM t_materialize_delete; +SELECT count(), sum(rows), sum(has_lightweight_delete) FROM system.parts WHERE database = currentDatabase() AND table = 't_materialize_delete' AND active; + +DROP TABLE t_materialize_delete; diff --git a/parser/testdata/02932_group_by_null_fuzzer/ast.json b/parser/testdata/02932_group_by_null_fuzzer/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02932_group_by_null_fuzzer/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02932_group_by_null_fuzzer/metadata.json b/parser/testdata/02932_group_by_null_fuzzer/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02932_group_by_null_fuzzer/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02932_group_by_null_fuzzer/query.sql b/parser/testdata/02932_group_by_null_fuzzer/query.sql new file mode 100644 index 000000000..603c7783e --- /dev/null +++ b/parser/testdata/02932_group_by_null_fuzzer/query.sql @@ -0,0 +1,6 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/43202 +-- Queries are generated by the fuzzer, so don't expect them to make sense +SET enable_positional_arguments=0; +SELECT NULL, '' FROM (SELECT toNullable(''), NULL AS key GROUP BY GROUPING SETS ((NULL))) AS s1 ALL LEFT JOIN (SELECT '' AS key, NULL AS value GROUP BY GROUPING SETS (('')) WITH TOTALS UNION ALL SELECT NULL AS key, toNullable(NULL) AS value GROUP BY '', NULL, '' WITH TOTALS) AS s2 USING (key); +SELECT NULL GROUP BY NULL WITH TOTALS; +SELECT 1048575, NULL, b FROM (SELECT '25.5' AS a, NULL, NULL AS b GROUP BY GROUPING SETS ((0.0001)) WITH TOTALS) AS js1 ANY RIGHT JOIN (SELECT NULL AS a, NULL AS b WHERE NULL GROUP BY NULL, -9223372036854775807 WITH CUBE WITH TOTALS UNION ALL SELECT NULL AS a, NULL AS b GROUP BY 1, '21474836.46' WITH TOTALS) AS js2 USING (a, b) ORDER BY nan DESC NULLS LAST, '9223372036854775807' DESC NULLS LAST, a ASC NULLS LAST; diff --git a/parser/testdata/02932_idna/ast.json b/parser/testdata/02932_idna/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02932_idna/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02932_idna/metadata.json b/parser/testdata/02932_idna/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02932_idna/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02932_idna/query.sql b/parser/testdata/02932_idna/query.sql new file mode 100644 index 000000000..db7688064 --- /dev/null +++ b/parser/testdata/02932_idna/query.sql @@ -0,0 +1,124 @@ +-- Tags: no-fasttest +-- no-fasttest: requires idna library + +-- See also 02932_punycode.sql + +SELECT '-- Negative tests'; + +SELECT idnaEncode(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT tryIdnaEncode(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT idnaDecode(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +SELECT idnaEncode(1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT tryIdnaEncode(1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT idnaDecode(1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT idnaEncode('two', 'strings'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT tryIdnaEncode('two', 'strings'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT idnaDecode('two', 'strings'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +SELECT idnaEncode(toFixedString('two', 3)); -- { serverError NOT_IMPLEMENTED } +SELECT tryIdnaEncode(toFixedString('two', 3)); -- { serverError NOT_IMPLEMENTED } +SELECT idnaDecode(toFixedString('two', 3)); -- { serverError NOT_IMPLEMENTED } + +SELECT '-- Regular cases'; + +-- The test cases originate from the ada idna unit tests: +-- - https://github.com/ada-url/idna/blob/8cd03ef867dbd06be87bd61df9cf69aa1182ea21/tests/fixtures/to_ascii_alternating.txt +-- - https://github.com/ada-url/idna/blob/8cd03ef867dbd06be87bd61df9cf69aa1182ea21/tests/fixtures/to_unicode_alternating.txt +-- +SELECT 'straße.de' AS idna, idnaEncode(idna) AS ascii, tryIdnaEncode(idna) AS ascii_try, idnaDecode(ascii) AS original, idnaDecode(ascii_try) AS original_try; +SELECT '2001:4860:4860::8888' AS idna, idnaEncode(idna) AS ascii, tryIdnaEncode(idna) AS ascii_try, idnaDecode(ascii) AS original, idnaDecode(ascii_try) AS original_try; +SELECT 'AMAZON' AS idna, idnaEncode(idna) AS ascii, tryIdnaEncode(idna) AS ascii_try, idnaDecode(ascii) AS original, idnaDecode(ascii_try) AS original_try; +SELECT 'aa--' AS idna, idnaEncode(idna) AS ascii, tryIdnaEncode(idna) AS ascii_try, idnaDecode(ascii) AS original, idnaDecode(ascii_try) AS original_try; +SELECT 'a†--' AS idna, idnaEncode(idna) AS ascii, tryIdnaEncode(idna) AS ascii_try, idnaDecode(ascii) AS original, idnaDecode(ascii_try) AS original_try; +SELECT 'ab--c' AS idna, idnaEncode(idna) AS ascii, tryIdnaEncode(idna) AS ascii_try, idnaDecode(ascii) AS original, idnaDecode(ascii_try) AS original_try; +SELECT '-†' AS idna, idnaEncode(idna) AS ascii, tryIdnaEncode(idna) AS ascii_try, idnaDecode(ascii) AS original, idnaDecode(ascii_try) AS original_try; +SELECT '-x.xn--zca' AS idna, idnaEncode(idna) AS ascii, tryIdnaEncode(idna) AS ascii_try, idnaDecode(ascii) AS original, idnaDecode(ascii_try) AS original_try; +SELECT 'x-.xn--zca' AS idna, idnaEncode(idna) AS ascii, tryIdnaEncode(idna) AS ascii_try, idnaDecode(ascii) AS original, idnaDecode(ascii_try) AS original_try; +SELECT 'x-.ß' AS idna, idnaEncode(idna) AS ascii, tryIdnaEncode(idna) AS ascii_try, idnaDecode(ascii) AS original, idnaDecode(ascii_try) AS original_try; +SELECT 'x..ß' AS idna, idnaEncode(idna) AS ascii, tryIdnaEncode(idna) AS ascii_try, idnaDecode(ascii) AS original, idnaDecode(ascii_try) AS original_try; +SELECT '128.0,0.1' AS idna, idnaEncode(idna) AS ascii, tryIdnaEncode(idna) AS ascii_try, idnaDecode(ascii) AS original, idnaDecode(ascii_try) AS original_try; +SELECT 'xn--zca.xn--zca' AS idna, idnaEncode(idna) AS ascii, tryIdnaEncode(idna) AS ascii_try, idnaDecode(ascii) AS original, idnaDecode(ascii_try) AS original_try; +SELECT 'xn--zca.ß' AS idna, idnaEncode(idna) AS ascii, tryIdnaEncode(idna) AS ascii_try, idnaDecode(ascii) AS original, idnaDecode(ascii_try) AS original_try; +SELECT 'x01234567890123456789012345678901234567890123456789012345678901x' AS idna, idnaEncode(idna) AS ascii, tryIdnaEncode(idna) AS ascii_try, idnaDecode(ascii) AS original, idnaDecode(ascii_try) AS original_try; +SELECT 'x01234567890123456789012345678901234567890123456789012345678901x.xn--zca' AS idna, idnaEncode(idna) AS ascii, tryIdnaEncode(idna) AS ascii_try, idnaDecode(ascii) AS original, idnaDecode(ascii_try) AS original_try; +SELECT 'x01234567890123456789012345678901234567890123456789012345678901x.ß' AS idna, idnaEncode(idna) AS ascii, tryIdnaEncode(idna) AS ascii_try, idnaDecode(ascii) AS original, idnaDecode(ascii_try) AS original_try; +SELECT '01234567890123456789012345678901234567890123456789.01234567890123456789012345678901234567890123456789.01234567890123456789012345678901234567890123456789.01234567890123456789012345678901234567890123456789.0123456789012345678901234567890123456789012345678.x' AS idna, idnaEncode(idna) AS ascii, tryIdnaEncode(idna) AS ascii_try, idnaDecode(ascii) AS original, idnaDecode(ascii_try) AS original_try; +SELECT '≠' AS idna, idnaEncode(idna) AS ascii, tryIdnaEncode(idna) AS ascii_try, idnaDecode(ascii) AS original, idnaDecode(ascii_try) AS original_try; + +SELECT 'aa--' AS ascii, idnaDecode(ascii) AS unicode, idnaEncode(unicode) AS original, tryIdnaEncode(unicode) AS original_try; +SELECT 'ab--c' AS ascii, idnaDecode(ascii) AS unicode, idnaEncode(unicode) AS original, tryIdnaEncode(unicode) AS original_try; +SELECT '-x' AS ascii, idnaDecode(ascii) AS unicode, idnaEncode(unicode) AS original, tryIdnaEncode(unicode) AS original_try; +SELECT '' AS ascii, idnaDecode(ascii) AS unicode, idnaEncode(unicode) AS original, tryIdnaEncode(unicode) AS original_try; +SELECT 'xn--1ch' AS ascii, idnaDecode(ascii) AS unicode, idnaEncode(unicode) AS original, tryIdnaEncode(unicode) AS original_try; +SELECT 'xn--dqd20apc' AS ascii, idnaDecode(ascii) AS unicode, idnaEncode(unicode) AS original, tryIdnaEncode(unicode) AS original_try; +SELECT 'xn--gdh' AS ascii, idnaDecode(ascii) AS unicode, idnaEncode(unicode) AS original, tryIdnaEncode(unicode) AS original_try; +SELECT 'xn--80aaa0ahbbeh4c' AS ascii, idnaDecode(ascii) AS unicode, idnaEncode(unicode) AS original, tryIdnaEncode(unicode) AS original_try; +SELECT 'xn--3bs854c' AS ascii, idnaDecode(ascii) AS unicode, idnaEncode(unicode) AS original, tryIdnaEncode(unicode) AS original_try; +SELECT 'xn--mgb9awbf' AS ascii, idnaDecode(ascii) AS unicode, idnaEncode(unicode) AS original, tryIdnaEncode(unicode) AS original_try; +SELECT 'xn--mgbaam7a8h' AS ascii, idnaDecode(ascii) AS unicode, idnaEncode(unicode) AS original, tryIdnaEncode(unicode) AS original_try; +SELECT 'xn--mgbbh1a71e' AS ascii, idnaDecode(ascii) AS unicode, idnaEncode(unicode) AS original, tryIdnaEncode(unicode) AS original_try; +SELECT 'xn--s7y.com' AS ascii, idnaDecode(ascii) AS unicode, idnaEncode(unicode) AS original, tryIdnaEncode(unicode) AS original_try; +SELECT 'xn--55qx5d.xn--tckwe' AS ascii, idnaDecode(ascii) AS unicode, idnaEncode(unicode) AS original, tryIdnaEncode(unicode) AS original_try; +SELECT 'xn--4dbrk0ce' AS ascii, idnaDecode(ascii) AS unicode, idnaEncode(unicode) AS original, tryIdnaEncode(unicode) AS original_try; +SELECT 'xn--zckzah' AS ascii, idnaDecode(ascii) AS unicode, idnaEncode(unicode) AS original, tryIdnaEncode(unicode) AS original_try; +SELECT 'xn--p1ai.com' AS ascii, idnaDecode(ascii) AS unicode, idnaEncode(unicode) AS original, tryIdnaEncode(unicode) AS original_try; +SELECT 'xn--mxahbxey0c.gr' AS ascii, idnaDecode(ascii) AS unicode, idnaEncode(unicode) AS original, tryIdnaEncode(unicode) AS original_try; +SELECT 'xn--h2brj9c' AS ascii, idnaDecode(ascii) AS unicode, idnaEncode(unicode) AS original, tryIdnaEncode(unicode) AS original_try; +SELECT 'xn--d1acpjx3f.xn--p1ai' AS ascii, idnaDecode(ascii) AS unicode, idnaEncode(unicode) AS original, tryIdnaEncode(unicode) AS original_try; +SELECT 'xn--q9jyb4c' AS ascii, idnaDecode(ascii) AS unicode, idnaEncode(unicode) AS original, tryIdnaEncode(unicode) AS original_try; +SELECT 'xn--sterreich-z7a.at' AS ascii, idnaDecode(ascii) AS unicode, idnaEncode(unicode) AS original, tryIdnaEncode(unicode) AS original_try; +SELECT 'xn--h2breg3eve.xn--h2brj9c' AS ascii, idnaDecode(ascii) AS unicode, idnaEncode(unicode) AS original, tryIdnaEncode(unicode) AS original_try; +SELECT 'ejemplo.xn--q9jyb4c' AS ascii, idnaDecode(ascii) AS unicode, idnaEncode(unicode) AS original, tryIdnaEncode(unicode) AS original_try; +SELECT 'xn--9t4b11yi5a.com' AS ascii, idnaDecode(ascii) AS unicode, idnaEncode(unicode) AS original, tryIdnaEncode(unicode) AS original_try; +SELECT 'xn--gk3at1e.com' AS ascii, idnaDecode(ascii) AS unicode, idnaEncode(unicode) AS original, tryIdnaEncode(unicode) AS original_try; +SELECT 'xn--42c2d9a' AS ascii, idnaDecode(ascii) AS unicode, idnaEncode(unicode) AS original, tryIdnaEncode(unicode) AS original_try; +SELECT '1xn--' AS ascii, idnaDecode(ascii) AS unicode, idnaEncode(unicode) AS original, tryIdnaEncode(unicode) AS original_try; +SELECT 'xn--bih.com' AS ascii, idnaDecode(ascii) AS unicode, idnaEncode(unicode) AS original, tryIdnaEncode(unicode) AS original_try; +SELECT 'xn--4gbrim.xn----rmckbbajlc6dj7bxne2c.xn--wgbh1c' AS ascii, idnaDecode(ascii) AS unicode, idnaEncode(unicode) AS original, tryIdnaEncode(unicode) AS original_try; +SELECT 'xn--mgbb9fbpob' AS ascii, idnaDecode(ascii) AS unicode, idnaEncode(unicode) AS original, tryIdnaEncode(unicode) AS original_try; +SELECT 'xn--55qw42g.xn--55qw42g' AS ascii, idnaDecode(ascii) AS unicode, idnaEncode(unicode) AS original, tryIdnaEncode(unicode) AS original_try; +SELECT '≠' AS ascii, idnaDecode(ascii) AS unicode, idnaEncode(unicode) AS original, tryIdnaEncode(unicode) AS original_try; +SELECT 'ファッション.biz' AS ascii, idnaDecode(ascii) AS unicode, idnaEncode(unicode) AS original, tryIdnaEncode(unicode) AS original_try; +-- +SELECT '-- Special cases'; + +SELECT '---- Empty input'; +SELECT idnaEncode(''); +SELECT tryIdnaEncode(''); +SELECT idnaDecode(''); + +SELECT '---- NULL input'; +SELECT idnaEncode(NULL); +SELECT tryIdnaEncode(NULL); +SELECT idnaDecode(NULL); + +SELECT '---- Garbage inputs for idnaEncode'; +-- - https://github.com/ada-url/idna/blob/8cd03ef867dbd06be87bd61df9cf69aa1182ea21/tests/fixtures/to_ascii_invalid.txt +SELECT idnaEncode('xn--'); -- { serverError BAD_ARGUMENTS } +SELECT tryIdnaEncode('xn--'); +SELECT idnaEncode('ﻱa'); -- { serverError BAD_ARGUMENTS } +SELECT tryIdnaEncode('ﻱa'); +SELECT idnaEncode('xn--a-yoc'); -- { serverError BAD_ARGUMENTS } +SELECT tryIdnaEncode('xn--a-yoc'); +SELECT idnaEncode('xn--tešla'); -- { serverError BAD_ARGUMENTS } +SELECT tryIdnaEncode('xn--tešla'); + +SELECT '---- Long input'; +SELECT 'Wenn Sie ... vom Hauptbahnhof in München ... mit zehn Minuten, ohne, dass Sie am Flughafen noch einchecken müssen, dann starten Sie im Grunde genommen am Flughafen ... am ... am Hauptbahnhof in München starten Sie Ihren Flug. Zehn Minuten. Schauen Sie sich mal die großen Flughäfen an, wenn Sie in Heathrow in London oder sonst wo, meine se ... Charles de Gaulle äh in Frankreich oder in ...äh... in ... in...äh...in Rom. Wenn Sie sich mal die Entfernungen ansehen, wenn Sie Frankfurt sich ansehen, dann werden Sie feststellen, dass zehn Minuten... Sie jederzeit locker in Frankfurt brauchen, um ihr Gate zu finden. Wenn Sie vom Flug ... vom ... vom Hauptbahnhof starten - Sie steigen in den Hauptbahnhof ein, Sie fahren mit dem Transrapid in zehn Minuten an den Flughafen in ... an den Flughafen Franz Josef Strauß. Dann starten Sie praktisch hier am Hauptbahnhof in München. Das bedeutet natürlich, dass der Hauptbahnhof im Grunde genommen näher an Bayern ... an die bayerischen Städte heranwächst, weil das ja klar ist, weil auf dem Hauptbahnhof viele Linien aus Bayern zusammenlaufen.' AS idna, idnaEncode(idna) AS ascii, tryIdnaEncode(ascii) AS ascii_try, idnaDecode(ascii) AS original, idnaDecode(ascii_try) AS original_try FORMAT Vertical; + +SELECT '---- Non-const input'; +DROP TABLE IF EXISTS tab; +CREATE TABLE tab (idna String) ENGINE=MergeTree ORDER BY idna; +INSERT INTO tab VALUES ('straße.münchen.de') ('') ('münchen'); +SELECT idna, idnaEncode(idna) AS ascii, tryIdnaEncode(ascii) AS ascii_try, idnaDecode(ascii) AS original, idnaDecode(ascii_try) AS original_try FROM tab; +DROP TABLE tab; + +SELECT '---- Non-const input with invalid values sprinkled in'; +DROP TABLE IF EXISTS tab; +CREATE TABLE tab (idna String) ENGINE=MergeTree ORDER BY idna; +INSERT INTO tab VALUES ('xn--') ('london.co.uk') ('ytraße.münchen.de') ('xn--tešla') ('microsoft.com') ('xn--'); +SELECT idna, idnaEncode(idna) AS ascii FROM tab; -- { serverError BAD_ARGUMENTS } +SELECT idna, tryIdnaEncode(idna) AS ascii, idnaDecode(ascii) AS original FROM tab; +DROP TABLE tab; diff --git a/parser/testdata/02932_lwd_and_mutations/ast.json b/parser/testdata/02932_lwd_and_mutations/ast.json new file mode 100644 index 000000000..a67455c81 --- /dev/null +++ b/parser/testdata/02932_lwd_and_mutations/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_lwd_mutations (children 1)" + }, + { + "explain": " Identifier t_lwd_mutations" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001681718, + "rows_read": 2, + "bytes_read": 82 + } +} diff --git a/parser/testdata/02932_lwd_and_mutations/metadata.json b/parser/testdata/02932_lwd_and_mutations/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02932_lwd_and_mutations/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02932_lwd_and_mutations/query.sql b/parser/testdata/02932_lwd_and_mutations/query.sql new file mode 100644 index 000000000..a68aca917 --- /dev/null +++ b/parser/testdata/02932_lwd_and_mutations/query.sql @@ -0,0 +1,43 @@ +DROP TABLE IF EXISTS t_lwd_mutations; + +CREATE TABLE t_lwd_mutations(id UInt64, v UInt64) ENGINE = MergeTree ORDER BY id; +INSERT INTO t_lwd_mutations SELECT number, 0 FROM numbers(1000); + +SET mutations_sync = 2; + +DELETE FROM t_lwd_mutations WHERE id % 10 = 0; + +SELECT count(), sum(v), arraySort(groupUniqArray(id % 10)) FROM t_lwd_mutations; +SELECT count(), sum(rows), sum(has_lightweight_delete) FROM system.parts WHERE database = currentDatabase() AND table = 't_lwd_mutations' AND active; + +ALTER TABLE t_lwd_mutations UPDATE v = 1 WHERE id % 4 = 0, DELETE WHERE id % 10 = 1; + +SELECT count(), sum(v), arraySort(groupUniqArray(id % 10)) FROM t_lwd_mutations; +SELECT count(), sum(rows), sum(has_lightweight_delete) FROM system.parts WHERE database = currentDatabase() AND table = 't_lwd_mutations' AND active; + +DELETE FROM t_lwd_mutations WHERE id % 10 = 2; + +SELECT count(), sum(v), arraySort(groupUniqArray(id % 10)) FROM t_lwd_mutations; +SELECT count(), sum(rows), sum(has_lightweight_delete) FROM system.parts WHERE database = currentDatabase() AND table = 't_lwd_mutations' AND active; + +ALTER TABLE t_lwd_mutations UPDATE v = 1 WHERE id % 4 = 1, DELETE WHERE id % 10 = 3; + +SELECT count(), sum(v), arraySort(groupUniqArray(id % 10)) FROM t_lwd_mutations; +SELECT count(), sum(rows), sum(has_lightweight_delete) FROM system.parts WHERE database = currentDatabase() AND table = 't_lwd_mutations' AND active; + +ALTER TABLE t_lwd_mutations UPDATE _row_exists = 0 WHERE id % 10 = 4, DELETE WHERE id % 10 = 5; + +SELECT count(), sum(v), arraySort(groupUniqArray(id % 10)) FROM t_lwd_mutations; +SELECT count(), sum(rows), sum(has_lightweight_delete) FROM system.parts WHERE database = currentDatabase() AND table = 't_lwd_mutations' AND active; + +ALTER TABLE t_lwd_mutations DELETE WHERE id % 10 = 6, UPDATE _row_exists = 0 WHERE id % 10 = 7; + +SELECT count(), sum(v), arraySort(groupUniqArray(id % 10)) FROM t_lwd_mutations; +SELECT count(), sum(rows), sum(has_lightweight_delete) FROM system.parts WHERE database = currentDatabase() AND table = 't_lwd_mutations' AND active; + +ALTER TABLE t_lwd_mutations APPLY DELETED MASK; + +SELECT count(), sum(v), arraySort(groupUniqArray(id % 10)) FROM t_lwd_mutations; +SELECT count(), sum(rows), sum(has_lightweight_delete) FROM system.parts WHERE database = currentDatabase() AND table = 't_lwd_mutations' AND active; + +DROP TABLE IF EXISTS t_lwd_mutations; diff --git a/parser/testdata/02932_materialized_view_with_dropped_target_table_no_exception/ast.json b/parser/testdata/02932_materialized_view_with_dropped_target_table_no_exception/ast.json new file mode 100644 index 000000000..23b009322 --- /dev/null +++ b/parser/testdata/02932_materialized_view_with_dropped_target_table_no_exception/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001383268, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02932_materialized_view_with_dropped_target_table_no_exception/metadata.json b/parser/testdata/02932_materialized_view_with_dropped_target_table_no_exception/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02932_materialized_view_with_dropped_target_table_no_exception/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02932_materialized_view_with_dropped_target_table_no_exception/query.sql b/parser/testdata/02932_materialized_view_with_dropped_target_table_no_exception/query.sql new file mode 100644 index 000000000..4c807716e --- /dev/null +++ b/parser/testdata/02932_materialized_view_with_dropped_target_table_no_exception/query.sql @@ -0,0 +1,21 @@ +set ignore_materialized_views_with_dropped_target_table = 1; +set send_logs_level='error'; +drop table if exists from_table; +drop table if exists to_table; +drop table if exists mv; + +create table from_table (x UInt32) engine=MergeTree order by x; +create table to_table (x UInt32) engine=MergeTree order by x; +create materialized view mv to to_table as select * from from_table; + +insert into from_table select 42; +select * from from_table; +select * from to_table; + +drop table to_table; + +insert into from_table select 42; +select * from from_table; + +drop table from_table; +drop view mv; diff --git a/parser/testdata/02932_non_ready_set_stuck/ast.json b/parser/testdata/02932_non_ready_set_stuck/ast.json new file mode 100644 index 000000000..c39895784 --- /dev/null +++ b/parser/testdata/02932_non_ready_set_stuck/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery tab (children 3)" + }, + { + "explain": " Identifier tab" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " ColumnDeclaration item_id (children 1)" + }, + { + "explain": " DataType UInt64" + }, + { + "explain": " ColumnDeclaration price_sold (children 1)" + }, + { + "explain": " DataType Nullable (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " DataType Float32" + }, + { + "explain": " ColumnDeclaration date (children 1)" + }, + { + "explain": " DataType Date" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Identifier item_id" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001515553, + "rows_read": 15, + "bytes_read": 543 + } +} diff --git a/parser/testdata/02932_non_ready_set_stuck/metadata.json b/parser/testdata/02932_non_ready_set_stuck/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02932_non_ready_set_stuck/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02932_non_ready_set_stuck/query.sql b/parser/testdata/02932_non_ready_set_stuck/query.sql new file mode 100644 index 000000000..c04f8f187 --- /dev/null +++ b/parser/testdata/02932_non_ready_set_stuck/query.sql @@ -0,0 +1,2 @@ +CREATE TABLE tab (item_id UInt64, price_sold Nullable(Float32), date Date) ENGINE = MergeTree ORDER BY item_id; +SELECT * FROM (SELECT item_id FROM tab GROUP BY item_id WITH TOTALS ORDER BY '922337203.6854775806' IN (SELECT NULL)) AS l RIGHT JOIN (SELECT item_id FROM tab) AS r ON l.item_id = r.item_id WHERE NULL; diff --git a/parser/testdata/02932_parallel_replicas_fuzzer/ast.json b/parser/testdata/02932_parallel_replicas_fuzzer/ast.json new file mode 100644 index 000000000..756470ef4 --- /dev/null +++ b/parser/testdata/02932_parallel_replicas_fuzzer/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001502325, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02932_parallel_replicas_fuzzer/metadata.json b/parser/testdata/02932_parallel_replicas_fuzzer/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02932_parallel_replicas_fuzzer/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02932_parallel_replicas_fuzzer/query.sql b/parser/testdata/02932_parallel_replicas_fuzzer/query.sql new file mode 100644 index 000000000..038f5c1c9 --- /dev/null +++ b/parser/testdata/02932_parallel_replicas_fuzzer/query.sql @@ -0,0 +1,38 @@ +SET parallel_replicas_for_non_replicated_merge_tree=1; + +-- https://github.com/ClickHouse/ClickHouse/issues/49559 +CREATE TABLE join_inner_table__fuzz_146 (`id` UUID, `key` String, `number` Int64, `value1` String, `value2` String, `time` Nullable(Int64)) ENGINE = MergeTree ORDER BY (id, number, key); +INSERT INTO join_inner_table__fuzz_146 SELECT CAST('833c9e22-c245-4eb5-8745-117a9a1f26b1', 'UUID') AS id, CAST(rowNumberInAllBlocks(), 'String') AS key, * FROM generateRandom('number Int64, value1 String, value2 String, time Int64', 1, 10, 2) LIMIT 100; +SELECT key, value1, value2, toUInt64(min(time)) AS start_ts FROM join_inner_table__fuzz_146 GROUP BY key, value1, value2 WITH CUBE ORDER BY key ASC NULLS LAST, value2 DESC NULLS LAST LIMIT 9223372036854775806 + FORMAT Null + SETTINGS + max_parallel_replicas = 3, + prefer_localhost_replica = 1, + cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', + enable_parallel_replicas = 1, + use_hedged_requests = 0; + + +-- https://github.com/ClickHouse/ClickHouse/issues/48496 +CREATE TABLE t_02709__fuzz_23 (`key` Nullable(UInt8), `sign` Int8, `date` DateTime64(3)) ENGINE = CollapsingMergeTree(sign) PARTITION BY date ORDER BY key SETTINGS allow_nullable_key=1; +INSERT INTO t_02709__fuzz_23 values (1, 1, '2023-12-01 00:00:00.000'); +SELECT NULL FROM t_02709__fuzz_23 FINAL +GROUP BY sign, '1023' +ORDER BY nan DESC, [0, NULL, NULL, NULL, NULL] DESC +FORMAT Null +SETTINGS + max_parallel_replicas = 3, + enable_parallel_replicas = 1, + use_hedged_requests = 0, + cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost'; + +SELECT _CAST(NULL, 'Nullable(Nothing)') AS `NULL` +FROM t_02709__fuzz_23 FINAL +GROUP BY + t_02709__fuzz_23.sign, + '1023' +ORDER BY + nan DESC, + _CAST([0, NULL, NULL, NULL, NULL], 'Array(Nullable(UInt8))') DESC +FORMAT Null +SETTINGS receive_timeout = 10., receive_data_timeout_ms = 10000, use_hedged_requests = 0, allow_suspicious_low_cardinality_types = 1, max_parallel_replicas = 3, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_parallel_replicas = 1, parallel_replicas_for_non_replicated_merge_tree = 1, log_queries = 1, table_function_remote_max_addresses = 200, enable_analyzer = 1; diff --git a/parser/testdata/02932_punycode/ast.json b/parser/testdata/02932_punycode/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02932_punycode/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02932_punycode/metadata.json b/parser/testdata/02932_punycode/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02932_punycode/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02932_punycode/query.sql b/parser/testdata/02932_punycode/query.sql new file mode 100644 index 000000000..b9bcf9336 --- /dev/null +++ b/parser/testdata/02932_punycode/query.sql @@ -0,0 +1,86 @@ +-- Tags: no-fasttest +-- no-fasttest: requires idna library + +-- See also 02932_idna.sql + +SELECT '-- Negative tests'; + +SELECT punycodeEncode(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT punycodeDecode(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT tryPunycodeDecode(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +SELECT punycodeEncode(1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT punycodeDecode(1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT tryPunycodeDecode(1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT punycodeEncode('two', 'strings'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT punycodeDecode('two', 'strings'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT tryPunycodeDecode('two', 'strings'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +SELECT punycodeEncode(toFixedString('two', 3)); -- { serverError NOT_IMPLEMENTED } +SELECT punycodeDecode(toFixedString('two', 3)); -- { serverError NOT_IMPLEMENTED } +SELECT tryPunycodeDecode(toFixedString('two', 3)); -- { serverError NOT_IMPLEMENTED } + +SELECT '-- Regular cases'; + +-- The test cases originate from the ada idna unit tests: +-- - https://github.com/ada-url/idna/blob/8cd03ef867dbd06be87bd61df9cf69aa1182ea21/tests/fixtures/utf8_punycode_alternating.txt + +SELECT 'a' AS str, punycodeEncode(str) AS puny, punycodeDecode(puny) AS original, tryPunycodeDecode(puny) AS original_try; +SELECT 'A' AS str, punycodeEncode(str) AS puny, punycodeDecode(puny) AS original, tryPunycodeDecode(puny) AS original_try; +SELECT '--' AS str, punycodeEncode(str) AS puny, punycodeDecode(puny) AS original, tryPunycodeDecode(puny) AS original_try; +SELECT 'London' AS str, punycodeEncode(str) AS puny, punycodeDecode(puny) AS original, tryPunycodeDecode(puny) AS original_try; +SELECT 'Lloyd-Atkinson' AS str, punycodeEncode(str) AS puny, punycodeDecode(puny) AS original, tryPunycodeDecode(puny) AS original_try; +SELECT 'This has spaces' AS str, punycodeEncode(str) AS puny, punycodeDecode(puny) AS original, tryPunycodeDecode(puny) AS original_try; +SELECT '-> $1.00 <-' AS str, punycodeEncode(str) AS puny, punycodeDecode(puny) AS original, tryPunycodeDecode(puny) AS original_try; +SELECT 'а' AS str, punycodeEncode(str) AS puny, punycodeDecode(puny) AS original, tryPunycodeDecode(puny) AS original_try; +SELECT 'ü' AS str, punycodeEncode(str) AS puny, punycodeDecode(puny) AS original, tryPunycodeDecode(puny) AS original_try; +SELECT 'α' AS str, punycodeEncode(str) AS puny, punycodeDecode(puny) AS original, tryPunycodeDecode(puny) AS original_try; +SELECT '例' AS str, punycodeEncode(str) AS puny, punycodeDecode(puny) AS original, tryPunycodeDecode(puny) AS original_try; +SELECT '😉' AS str, punycodeEncode(str) AS puny, punycodeDecode(puny) AS original, tryPunycodeDecode(puny) AS original_try; +SELECT 'αβγ' AS str, punycodeEncode(str) AS puny, punycodeDecode(puny) AS original, tryPunycodeDecode(puny) AS original_try; +SELECT 'München' AS str, punycodeEncode(str) AS puny, punycodeDecode(puny) AS original, tryPunycodeDecode(puny) AS original_try; +SELECT 'Mnchen-3ya' AS str, punycodeEncode(str) AS puny, punycodeDecode(puny) AS original, tryPunycodeDecode(puny) AS original_try; +SELECT 'München-Ost' AS str, punycodeEncode(str) AS puny, punycodeDecode(puny) AS original, tryPunycodeDecode(puny) AS original_try; +SELECT 'Bahnhof München-Ost' AS str, punycodeEncode(str) AS puny, punycodeDecode(puny) AS original, tryPunycodeDecode(puny) AS original_try; +SELECT 'abæcdöef' AS str, punycodeEncode(str) AS puny, punycodeDecode(puny) AS original, tryPunycodeDecode(puny) AS original_try; +SELECT 'правда' AS str, punycodeEncode(str) AS puny, punycodeDecode(puny) AS original, tryPunycodeDecode(puny) AS original_try; +SELECT 'ยจฆฟคฏข' AS str, punycodeEncode(str) AS puny, punycodeDecode(puny) AS original, tryPunycodeDecode(puny) AS original_try; +SELECT 'ドメイン名例' AS str, punycodeEncode(str) AS puny, punycodeDecode(puny) AS original, tryPunycodeDecode(puny) AS original_try; +SELECT 'MajiでKoiする5秒前' AS str, punycodeEncode(str) AS puny, punycodeDecode(puny) AS original, tryPunycodeDecode(puny) AS original_try; +SELECT '「bücher」' AS str, punycodeEncode(str) AS puny, punycodeDecode(puny) AS original, tryPunycodeDecode(puny) AS original_try; +SELECT '团淄' AS str, punycodeEncode(str) AS puny, punycodeDecode(puny) AS original, tryPunycodeDecode(puny) AS original_try; +-- +SELECT '-- Special cases'; + +SELECT '---- Empty input'; +SELECT punycodeEncode(''); +SELECT punycodeDecode(''); +SELECT tryPunycodeDecode(''); + +SELECT '---- NULL input'; +SELECT punycodeEncode(NULL); +SELECT punycodeDecode(NULL); +SELECT tryPunycodeDecode(NULL); + +SELECT '---- Garbage Punycode-encoded input'; +SELECT punycodeDecode('no punycode'); -- { serverError BAD_ARGUMENTS } +SELECT tryPunycodeDecode('no punycode'); + +SELECT '---- Long input'; +SELECT 'Wenn Sie ... vom Hauptbahnhof in München ... mit zehn Minuten, ohne, dass Sie am Flughafen noch einchecken müssen, dann starten Sie im Grunde genommen am Flughafen ... am ... am Hauptbahnhof in München starten Sie Ihren Flug. Zehn Minuten. Schauen Sie sich mal die großen Flughäfen an, wenn Sie in Heathrow in London oder sonst wo, meine se ... Charles de Gaulle äh in Frankreich oder in ...äh... in ... in...äh...in Rom. Wenn Sie sich mal die Entfernungen ansehen, wenn Sie Frankfurt sich ansehen, dann werden Sie feststellen, dass zehn Minuten... Sie jederzeit locker in Frankfurt brauchen, um ihr Gate zu finden. Wenn Sie vom Flug ... vom ... vom Hauptbahnhof starten - Sie steigen in den Hauptbahnhof ein, Sie fahren mit dem Transrapid in zehn Minuten an den Flughafen in ... an den Flughafen Franz Josef Strauß. Dann starten Sie praktisch hier am Hauptbahnhof in München. Das bedeutet natürlich, dass der Hauptbahnhof im Grunde genommen näher an Bayern ... an die bayerischen Städte heranwächst, weil das ja klar ist, weil auf dem Hauptbahnhof viele Linien aus Bayern zusammenlaufen.' AS str, punycodeEncode(str) AS puny, punycodeDecode(puny) AS original, tryPunycodeDecode(puny) AS original_try FORMAT Vertical; + +SELECT '---- Non-const values'; +DROP TABLE IF EXISTS tab; +CREATE TABLE tab (str String) ENGINE=MergeTree ORDER BY str; +INSERT INTO tab VALUES ('abc') ('aäoöuü') ('München'); +SELECT str, punycodeEncode(str) AS puny, punycodeDecode(puny) AS original, tryPunycodeDecode(puny) AS original_try FROM tab; +DROP TABLE tab; + +SELECT '---- Non-const values with invalid values sprinkled in'; +DROP TABLE IF EXISTS tab; +CREATE TABLE tab (puny String) ENGINE=MergeTree ORDER BY puny; +INSERT INTO tab VALUES ('Also no punycode') ('London-') ('Mnchen-3ya') ('No punycode') ('Rtting-3ya') ('XYZ no punycode'); +SELECT puny, punycodeDecode(puny) AS original FROM tab; -- { serverError BAD_ARGUMENTS } +SELECT puny, tryPunycodeDecode(puny) AS original FROM tab; +DROP TABLE tab; diff --git a/parser/testdata/02932_query_settings_max_size_drop/ast.json b/parser/testdata/02932_query_settings_max_size_drop/ast.json new file mode 100644 index 000000000..60b26a1ff --- /dev/null +++ b/parser/testdata/02932_query_settings_max_size_drop/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery test_max_size_drop (children 1)" + }, + { + "explain": " Identifier test_max_size_drop" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001024977, + "rows_read": 2, + "bytes_read": 89 + } +} diff --git a/parser/testdata/02932_query_settings_max_size_drop/metadata.json b/parser/testdata/02932_query_settings_max_size_drop/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02932_query_settings_max_size_drop/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02932_query_settings_max_size_drop/query.sql b/parser/testdata/02932_query_settings_max_size_drop/query.sql new file mode 100644 index 000000000..b3535ae3f --- /dev/null +++ b/parser/testdata/02932_query_settings_max_size_drop/query.sql @@ -0,0 +1,31 @@ +CREATE TABLE test_max_size_drop +Engine = MergeTree() +ORDER BY number +AS SELECT number +FROM numbers(1000) +; + +DROP TABLE test_max_size_drop SETTINGS max_table_size_to_drop = 1; -- { serverError TABLE_SIZE_EXCEEDS_MAX_DROP_SIZE_LIMIT } +DROP TABLE test_max_size_drop; + +CREATE TABLE test_max_size_drop +Engine = MergeTree() +ORDER BY number +AS SELECT number +FROM numbers(1000) +; + +ALTER TABLE test_max_size_drop DROP PARTITION tuple() SETTINGS max_partition_size_to_drop = 1; -- { serverError TABLE_SIZE_EXCEEDS_MAX_DROP_SIZE_LIMIT } +ALTER TABLE test_max_size_drop DROP PARTITION tuple(); +DROP TABLE test_max_size_drop; + +CREATE TABLE test_max_size_drop +Engine = MergeTree() +ORDER BY number +AS SELECT number +FROM numbers(1000) +; + +ALTER TABLE test_max_size_drop DROP PART 'all_1_1_0' SETTINGS max_partition_size_to_drop = 1; -- { serverError TABLE_SIZE_EXCEEDS_MAX_DROP_SIZE_LIMIT } +ALTER TABLE test_max_size_drop DROP PART 'all_1_1_0'; +DROP TABLE test_max_size_drop; diff --git a/parser/testdata/02932_query_settings_max_size_drop_rmt/ast.json b/parser/testdata/02932_query_settings_max_size_drop_rmt/ast.json new file mode 100644 index 000000000..2d2de3aca --- /dev/null +++ b/parser/testdata/02932_query_settings_max_size_drop_rmt/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_max_size_drop (children 1)" + }, + { + "explain": " Identifier test_max_size_drop" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001097301, + "rows_read": 2, + "bytes_read": 88 + } +} diff --git a/parser/testdata/02932_query_settings_max_size_drop_rmt/metadata.json b/parser/testdata/02932_query_settings_max_size_drop_rmt/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02932_query_settings_max_size_drop_rmt/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02932_query_settings_max_size_drop_rmt/query.sql b/parser/testdata/02932_query_settings_max_size_drop_rmt/query.sql new file mode 100644 index 000000000..58ea35780 --- /dev/null +++ b/parser/testdata/02932_query_settings_max_size_drop_rmt/query.sql @@ -0,0 +1,21 @@ +DROP TABLE IF EXISTS test_max_size_drop SYNC; +SET insert_keeper_fault_injection_probability = 0.0; + +CREATE TABLE test_max_size_drop (number UInt64) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_max_size_drop', '1') +ORDER BY number; + +INSERT INTO test_max_size_drop SELECT number FROM numbers(1000); + +DROP TABLE test_max_size_drop SETTINGS max_table_size_to_drop = 1; -- { serverError TABLE_SIZE_EXCEEDS_MAX_DROP_SIZE_LIMIT } +DROP TABLE test_max_size_drop SYNC; + +CREATE TABLE test_max_size_drop (number UInt64) +Engine = ReplicatedMergeTree('/clickhouse/tables/{database}/test_max_size_drop', '1') +ORDER BY number; + +INSERT INTO test_max_size_drop SELECT number FROM numbers(1000); + +ALTER TABLE test_max_size_drop DROP PARTITION tuple() SETTINGS max_partition_size_to_drop = 1; -- { serverError TABLE_SIZE_EXCEEDS_MAX_DROP_SIZE_LIMIT } +ALTER TABLE test_max_size_drop DROP PARTITION tuple(); +DROP TABLE test_max_size_drop SYNC; diff --git a/parser/testdata/02932_set_ttl_where/ast.json b/parser/testdata/02932_set_ttl_where/ast.json new file mode 100644 index 000000000..3099a9e7d --- /dev/null +++ b/parser/testdata/02932_set_ttl_where/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_temp (children 1)" + }, + { + "explain": " Identifier t_temp" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001668572, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/02932_set_ttl_where/metadata.json b/parser/testdata/02932_set_ttl_where/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02932_set_ttl_where/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02932_set_ttl_where/query.sql b/parser/testdata/02932_set_ttl_where/query.sql new file mode 100644 index 000000000..80e8b9c48 --- /dev/null +++ b/parser/testdata/02932_set_ttl_where/query.sql @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS t_temp; +create table t_temp ( + a UInt32, + timestamp DateTime +) +engine = MergeTree +order by a +TTL timestamp + INTERVAL 2 SECOND WHERE a in (select number from system.numbers limit 10_000); + +select sleep(1); +insert into t_temp select rand(), now() from system.numbers limit 100_000; +select sleep(1); +insert into t_temp select rand(), now() from system.numbers limit 100_000; +select sleep(1); +optimize table t_temp final; + +DROP TABLE t_temp; diff --git a/parser/testdata/02933_compare_with_bool_as_string/ast.json b/parser/testdata/02933_compare_with_bool_as_string/ast.json new file mode 100644 index 000000000..38fe8eb21 --- /dev/null +++ b/parser/testdata/02933_compare_with_bool_as_string/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Bool_1" + }, + { + "explain": " Literal 'true'" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001634977, + "rows_read": 8, + "bytes_read": 285 + } +} diff --git a/parser/testdata/02933_compare_with_bool_as_string/metadata.json b/parser/testdata/02933_compare_with_bool_as_string/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02933_compare_with_bool_as_string/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02933_compare_with_bool_as_string/query.sql b/parser/testdata/02933_compare_with_bool_as_string/query.sql new file mode 100644 index 000000000..5dbacd5fb --- /dev/null +++ b/parser/testdata/02933_compare_with_bool_as_string/query.sql @@ -0,0 +1 @@ +select true = 'true'; diff --git a/parser/testdata/02933_ephemeral_mv/ast.json b/parser/testdata/02933_ephemeral_mv/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02933_ephemeral_mv/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02933_ephemeral_mv/metadata.json b/parser/testdata/02933_ephemeral_mv/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02933_ephemeral_mv/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02933_ephemeral_mv/query.sql b/parser/testdata/02933_ephemeral_mv/query.sql new file mode 100644 index 000000000..3e2f0bf0d --- /dev/null +++ b/parser/testdata/02933_ephemeral_mv/query.sql @@ -0,0 +1,31 @@ + +CREATE TABLE raw +( + name String, + num String +) ENGINE = MergeTree +ORDER BY (name); + +CREATE TABLE parsed_eph +( + name String, + num_ephemeral UInt32 EPHEMERAL, + num UInt32 MATERIALIZED num_ephemeral, +) ENGINE = MergeTree +ORDER BY (name); + +CREATE MATERIALIZED VIEW parse_mv_eph +TO parsed_eph +AS +SELECT + name, + toUInt32(num) as num_ephemeral +FROM raw; + +INSERT INTO raw VALUES ('3', '3'), ('42', '42'); + +SELECT name, num FROM parsed_eph; + +DROP VIEW parse_mv_eph; +DROP TABLE parsed_eph; +DROP TABLE raw; diff --git a/parser/testdata/02933_paste_join/ast.json b/parser/testdata/02933_paste_join/ast.json new file mode 100644 index 000000000..1f734aedd --- /dev/null +++ b/parser/testdata/02933_paste_join/ast.json @@ -0,0 +1,127 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 2)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (alias t1) (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number (alias a)" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " TablesInSelectQueryElement (children 2)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (alias t2) (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number (alias a)" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " TableJoin" + } + ], + + "rows": 35, + + "statistics": + { + "elapsed": 0.001435099, + "rows_read": 35, + "bytes_read": 1546 + } +} diff --git a/parser/testdata/02933_paste_join/metadata.json b/parser/testdata/02933_paste_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02933_paste_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02933_paste_join/query.sql b/parser/testdata/02933_paste_join/query.sql new file mode 100644 index 000000000..6c5a923d0 --- /dev/null +++ b/parser/testdata/02933_paste_join/query.sql @@ -0,0 +1,55 @@ +select * from (SELECT number as a FROM numbers(10)) t1 PASTE JOIN (select number as a from numbers(10)) t2; +select * from (SELECT number as a FROM numbers(10)) t1 PASTE JOIN (select number as a from numbers(10) order by a desc) t2; +create table if not exists test (number UInt64) engine=Memory; +insert into test select number from numbers(6); +insert into test select number from numbers(5); +SELECT * FROM (SELECT 1) t1 PASTE JOIN (SELECT 2) SETTINGS joined_subquery_requires_alias=0; +select * from (SELECT number as a FROM numbers(11)) t1 PASTE JOIN test t2 SETTINGS max_threads=1; +select * from (SELECT number as a FROM numbers(11)) t1 PASTE JOIN (select * from test limit 2) t2 SETTINGs max_threads=1; +CREATE TABLE t1 (a UInt64, b UInt64) ENGINE = Memory; +INSERT INTO t1 SELECT number, number FROM numbers(0, 3); +INSERT INTO t1 SELECT number, number FROM numbers(3, 2); +INSERT INTO t1 SELECT number, number FROM numbers(5, 7); +INSERT INTO t1 SELECT number, number FROM numbers(12, 2); +INSERT INTO t1 SELECT number, number FROM numbers(14, 1); +INSERT INTO t1 SELECT number, number FROM numbers(15, 2); +INSERT INTO t1 SELECT number, number FROM numbers(17, 1); +INSERT INTO t1 SELECT number, number FROM numbers(18, 2); +INSERT INTO t1 SELECT number, number FROM numbers(20, 2); +INSERT INTO t1 SELECT number, number FROM numbers(22, 2); +INSERT INTO t1 SELECT number, number FROM numbers(24, 2); +INSERT INTO t1 SELECT number, number FROM numbers(26, 2); +INSERT INTO t1 SELECT number, number FROM numbers(28, 2); + + +CREATE TABLE t2 (a UInt64, b UInt64) ENGINE = Memory; +INSERT INTO t2 SELECT number, number FROM numbers(0, 2); +INSERT INTO t2 SELECT number, number FROM numbers(2, 3); +INSERT INTO t2 SELECT number, number FROM numbers(5, 5); +INSERT INTO t2 SELECT number, number FROM numbers(10, 5); +INSERT INTO t2 SELECT number, number FROM numbers(15, 15); + +SELECT * FROM ( SELECT * from t1 ) t1 PASTE JOIN ( SELECT * from t2 ) t2 SETTINGS max_threads = 1; +SELECT toTypeName(a) FROM (SELECT number as a FROM numbers(11)) t1 PASTE JOIN (select number as a from numbers(10)) t2 SETTINGS join_use_nulls = 1; +SET max_threads = 2; +select * from (SELECT number as a FROM numbers_mt(10)) t1 PASTE JOIN (select number as a from numbers(10) ORDER BY a DESC) t2 SETTINGS max_block_size=10; +select * from (SELECT number as a FROM numbers(10)) t1 ANY PASTE JOIN (select number as a from numbers(10)) t2; -- { clientError SYNTAX_ERROR } +select * from (SELECT number as a FROM numbers(10)) t1 ALL PASTE JOIN (select number as a from numbers(10)) t2; -- { clientError SYNTAX_ERROR } + +TRUNCATE TABLE test; +INSERT INTO test SELECT number from numbers(6); +SELECT * FROM (SELECT number FROM test) PASTE JOIN (SELECT number FROM numbers(6) ORDER BY number) SETTINGS joined_subquery_requires_alias = 0; +SELECT * FROM (SELECT number FROM test PASTE JOIN (Select number FROM numbers(7))) PASTE JOIN (SELECT number FROM numbers(6) PASTE JOIN (SELECT number FROM test)) SETTINGS joined_subquery_requires_alias = 0; +SELECT * FROM (SELECT number FROM test PASTE JOIN (SELECT number FROM test PASTE JOIN (Select number FROM numbers(7)))) PASTE JOIN (SELECT number FROM numbers(6) PASTE JOIN (SELECT number FROM test)) SETTINGS joined_subquery_requires_alias = 0; +SELECT * FROM (SELECT 1 AS a) PASTE JOIN (SELECT 2 AS b) PASTE JOIN (SELECT 3 AS c) SETTINGS enable_analyzer = 1; +SELECT * FROM (SELECT 1 AS a) PASTE JOIN (SELECT 2 AS b) PASTE JOIN (SELECT 3 AS a) SETTINGS enable_analyzer = 1; -- { serverError AMBIGUOUS_COLUMN_NAME } + +SET enable_analyzer = 1; +CREATE TABLE test1 (a Int32) engine=MergeTree order by a; +INSERT INTO test1 SELECT * FROM numbers(2); +CREATE TABLE test2 (a Int32) engine=MergeTree order by a; +INSERT INTO test2 SELECT * FROM numbers(2); +SELECT * FROM test1 PASTE JOIN (SELECT * FROM test2); +SELECT a `test2.a` FROM test1 PASTE JOIN test2; +SELECT * FROM test1 `test2.a` PASTE JOIN test2 `test2.a`; -- { serverError MULTIPLE_EXPRESSIONS_FOR_ALIAS } +SELECT * FROM test1 PASTE JOIN (SELECT number AS a FROM numbers(2) ORDER BY number DESC); -- { serverError AMBIGUOUS_COLUMN_NAME } diff --git a/parser/testdata/02933_sqid/ast.json b/parser/testdata/02933_sqid/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02933_sqid/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02933_sqid/metadata.json b/parser/testdata/02933_sqid/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02933_sqid/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02933_sqid/query.sql b/parser/testdata/02933_sqid/query.sql new file mode 100644 index 000000000..3dcca13a8 --- /dev/null +++ b/parser/testdata/02933_sqid/query.sql @@ -0,0 +1,37 @@ +-- Tags: no-fasttest + +SET allow_suspicious_low_cardinality_types = 1; + +SELECT '-- negative tests'; +SELECT sqidEncode(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT sqidDecode(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT sqidEncode('1'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT sqidDecode(1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT '-- const UInt*'; +SELECT sqidEncode(1) AS sqid, sqidDecode(sqid); +SELECT sqidEncode(1, 2) AS sqid, sqidDecode(sqid); +SELECT sqidEncode(1, 2, 3) AS sqid, sqidDecode(sqid); +SELECT sqidEncode(1::UInt8, 2::UInt16, 3::UInt32, 4::UInt64) AS sqid, sqidDecode(sqid); +SELECT sqidEncode(toNullable(1), toLowCardinality(2)) AS sqid; +SELECT sqidDecode('1'); + +SELECT '-- non-const UInt*'; +SELECT sqidEncode(materialize(1)) AS sqid, sqidDecode(sqid); +SELECT sqidEncode(materialize(1), materialize(2)) AS sqid, sqidDecode(sqid); +SELECT sqidEncode(materialize(1), materialize(2), materialize(3)) AS sqid, sqidDecode(sqid); +SELECT sqidEncode(materialize(1::UInt8), materialize(2::UInt16), materialize(3::UInt32), materialize(4::UInt64)) AS sqid, sqidDecode(sqid); +SELECT sqidEncode(toNullable(materialize(1)), toLowCardinality(materialize(2))); + +SELECT '-- invalid sqid'; +SELECT sqidDecode('invalid sqid'); + +SELECT '-- bug 69450'; +DROP TABLE IF EXISTS tab; +CREATE TABLE tab (id String) ENGINE = MergeTree ORDER BY id; +INSERT INTO tab SELECT * FROM generateRandom() LIMIT 1000000; +SELECT sqidDecode(id) FROM tab FORMAT Null; +DROP TABLE tab; + +SELECT '-- alias'; +SELECT sqid(1, 2); diff --git a/parser/testdata/02934_merge_tree_max_projections/ast.json b/parser/testdata/02934_merge_tree_max_projections/ast.json new file mode 100644 index 000000000..ac496dcdd --- /dev/null +++ b/parser/testdata/02934_merge_tree_max_projections/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_max_mt_projections_alter (children 1)" + }, + { + "explain": " Identifier test_max_mt_projections_alter" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001128101, + "rows_read": 2, + "bytes_read": 110 + } +} diff --git a/parser/testdata/02934_merge_tree_max_projections/metadata.json b/parser/testdata/02934_merge_tree_max_projections/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02934_merge_tree_max_projections/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02934_merge_tree_max_projections/query.sql b/parser/testdata/02934_merge_tree_max_projections/query.sql new file mode 100644 index 000000000..6ed3bc442 --- /dev/null +++ b/parser/testdata/02934_merge_tree_max_projections/query.sql @@ -0,0 +1,39 @@ +DROP TABLE IF EXISTS test_max_mt_projections_alter; +CREATE TABLE test_max_mt_projections_alter (c1 UInt32, c2 UInt32, c3 UInt32) + ENGINE = MergeTree ORDER BY c1 + SETTINGS max_projections = 3; + +ALTER TABLE test_max_mt_projections_alter ADD PROJECTION p1 (SELECT c2 ORDER BY c2); +ALTER TABLE test_max_mt_projections_alter ADD PROJECTION p2 (SELECT c3 ORDER BY c3); +ALTER TABLE test_max_mt_projections_alter ADD PROJECTION p3 (SELECT c1, c2 ORDER BY c1, c2); + +ALTER TABLE test_max_mt_projections_alter + ADD PROJECTION p4 (SELECT c2, c3 ORDER BY c2, c3); -- { serverError LIMIT_EXCEEDED } + +ALTER TABLE test_max_mt_projections_alter DROP PROJECTION p3; + +ALTER TABLE test_max_mt_projections_alter ADD PROJECTION p4 (SELECT c2, c3 ORDER BY c2, c3); + +DROP TABLE IF EXISTS test_max_mt_projections_alter; + +DROP TABLE IF EXISTS test_max_mt_projections_create; +CREATE TABLE test_max_mt_projections_create (c1 UInt32, c2 UInt32, + PROJECTION p1 (SELECT c1, c2 ORDER BY c2), + PROJECTION p2 (SELECT c2 ORDER BY c2)) + ENGINE = MergeTree ORDER BY c1 + SETTINGS max_projections = 1; -- { serverError LIMIT_EXCEEDED } + +CREATE TABLE test_max_mt_projections_create (c1 UInt32, c2 UInt32, + PROJECTION p (SELECT c1, c2 ORDER BY c2)) + ENGINE = MergeTree ORDER BY c1 + SETTINGS max_projections = 0; -- { serverError LIMIT_EXCEEDED } + +CREATE TABLE test_max_mt_projections_create (c1 UInt32, c2 UInt32, + PROJECTION p (SELECT c1, c2 ORDER BY c2)) + ENGINE = MergeTree ORDER BY c1 + SETTINGS max_projections = 1; + +ALTER TABLE test_max_mt_projections_create + ADD PROJECTION p2 (SELECT c2 ORDER BY c2); -- { serverError LIMIT_EXCEEDED } + +DROP TABLE IF EXISTS test_max_mt_projections_create; diff --git a/parser/testdata/02935_date_trunc_case_unsensitiveness/ast.json b/parser/testdata/02935_date_trunc_case_unsensitiveness/ast.json new file mode 100644 index 000000000..0b21f7900 --- /dev/null +++ b/parser/testdata/02935_date_trunc_case_unsensitiveness/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function dateTrunc (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'DAY'" + }, + { + "explain": " Function toDateTime (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '2022-03-01 12:55:55'" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001124537, + "rows_read": 10, + "bytes_read": 392 + } +} diff --git a/parser/testdata/02935_date_trunc_case_unsensitiveness/metadata.json b/parser/testdata/02935_date_trunc_case_unsensitiveness/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02935_date_trunc_case_unsensitiveness/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02935_date_trunc_case_unsensitiveness/query.sql b/parser/testdata/02935_date_trunc_case_unsensitiveness/query.sql new file mode 100644 index 000000000..df290f3de --- /dev/null +++ b/parser/testdata/02935_date_trunc_case_unsensitiveness/query.sql @@ -0,0 +1,27 @@ +SELECT dateTrunc('DAY', toDateTime('2022-03-01 12:55:55')); +SELECT dateTrunc('MONTH', toDateTime64('2022-03-01 12:55:55', 2)); +SELECT dateTrunc('WEEK', toDate('2022-03-01')); +SELECT dateTrunc('Day', toDateTime('2022-03-01 12:55:55')); +SELECT dateTrunc('Month', toDateTime64('2022-03-01 12:55:55', 2)); +SELECT dateTrunc('Week', toDate('2022-03-01')); +SELECT dateTrunc('day', toDateTime('2022-03-01 12:55:55')); +SELECT dateTrunc('month', toDateTime64('2022-03-01 12:55:55', 2)); +SELECT dateTrunc('week', toDate('2022-03-01')); +SELECT dateTrunc('Nanosecond', toDateTime64('2022-03-01 12:12:12.0123', 3)); +SELECT dateTrunc('MicroSecond', toDateTime64('2022-03-01 12:12:12.0123456', 7)); +SELECT dateTrunc('MILLISECOND', toDateTime64('2022-03-01 12:12:12.012324251', 9)); +SELECT dateTrunc('mICROsECOND', toDateTime64('2022-03-01 12:12:12.0123', 4)); +SELECT dateTrunc('mIllISecoNd', toDateTime64('2022-03-01 12:12:12.0123456', 6)); +SELECT dateTrunc('NANoSecoND', toDateTime64('2022-03-01 12:12:12.012345678', 8)); +SELECT dateTrunc('Nanosecond', toDateTime64('1950-03-01 12:12:12.0123', 3)); +SELECT dateTrunc('MicroSecond', toDateTime64('1951-03-01 12:12:12.0123456', 7)); +SELECT dateTrunc('MILLISECOND', toDateTime64('1952-03-01 12:12:12.012324251', 9)); +SELECT dateTrunc('mICROsECOND', toDateTime64('1965-03-01 12:12:12.0123', 4)); +SELECT dateTrunc('mIllISecoNd', toDateTime64('1966-03-01 12:12:12.0123456', 6)); +SELECT dateTrunc('NANoSecoND', toDateTime64('1967-03-01 12:12:12.012345678', 8)); +SELECT dateTrunc('Nanosecond', toDateTime('2022-03-01')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT dateTrunc('MicroSecond', toDateTime('2022-03-01')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT dateTrunc('MILLISECOND', toDateTime('2022-03-01')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT dateTrunc('Nanosecond', toDate('2022-03-01')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT dateTrunc('MicroSecond', toDate('2022-03-01')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT dateTrunc('MILLISECOND', toDate('2022-03-01')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } diff --git a/parser/testdata/02935_format_with_arbitrary_types/ast.json b/parser/testdata/02935_format_with_arbitrary_types/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02935_format_with_arbitrary_types/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02935_format_with_arbitrary_types/metadata.json b/parser/testdata/02935_format_with_arbitrary_types/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02935_format_with_arbitrary_types/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02935_format_with_arbitrary_types/query.sql b/parser/testdata/02935_format_with_arbitrary_types/query.sql new file mode 100644 index 000000000..b0a02127f --- /dev/null +++ b/parser/testdata/02935_format_with_arbitrary_types/query.sql @@ -0,0 +1,85 @@ + +-- Tags: no-fasttest +-- no-fasttest: json type needs rapidjson library, geo types need s2 geometry + +SET enable_json_type = 1; +SET allow_suspicious_low_cardinality_types=1; + +SELECT '-- Const string + non-const arbitrary type'; +SELECT format('The {0} to all questions is {1}.', 'answer', materialize(42 :: Int8)); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize(43 :: Int16)); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize(44 :: Int32)); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize(45 :: Int64)); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize(46 :: Int128)); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize(47 :: Int256)); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize(48 :: UInt8)); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize(49 :: UInt16)); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize(50 :: UInt32)); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize(51 :: UInt64)); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize(52 :: UInt128)); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize(53 :: UInt256)); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize(42.42 :: Float32)); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize(43.43 :: Float64)); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize(44.44 :: Decimal(2))); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize(true :: Bool)); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize(false :: Bool)); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize('foo' :: String)); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize('bar' :: FixedString(3))); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize('foo' :: Nullable(String))); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize('bar' :: Nullable(FixedString(3)))); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize('foo' :: LowCardinality(String))); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize('bar' :: LowCardinality(FixedString(3)))); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize('foo' :: LowCardinality(Nullable(String)))); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize('bar' :: LowCardinality(Nullable(FixedString(3))))); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize(42 :: LowCardinality(Nullable(UInt32)))); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize(42 :: LowCardinality(UInt32))); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize('fae310ca-d52a-4923-9e9b-02bf67f4b009' :: UUID)); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize('2023-11-14' :: Date)); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize('2123-11-14' :: Date32)); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize('2023-11-14 05:50:12' :: DateTime('Europe/Amsterdam'))); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize('2023-11-14 05:50:12.123' :: DateTime64(3, 'Europe/Amsterdam'))); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize('hallo' :: Enum('hallo' = 1))); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize(['foo', 'bar'] :: Array(String))); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize('{"foo": "bar"}' :: JSON)); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize((42, 'foo') :: Tuple(Int32, String))); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize(map(42, 'foo') :: Map(Int32, String))); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize('122.233.64.201' :: IPv4)); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize('2001:0001:130F:0002:0003:09C0:876A:130B' :: IPv6)); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize((42, 43) :: Point)); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize([(0,0),(10,0),(10,10),(0,10)] :: Ring)); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize([[(20, 20), (50, 20), (50, 50), (20, 50)], [(30, 30), (50, 50), (50, 30)]] :: Polygon)); +SELECT format('The {0} to all questions is {1}.', 'answer', materialize([[[(0, 0), (10, 0), (10, 10), (0, 10)]], [[(20, 20), (50, 20), (50, 50), (20, 50)],[(30, 30), (50, 50), (50, 30)]]] :: MultiPolygon)); + +SELECT '-- Nested'; +DROP TABLE IF EXISTS format_nested; +CREATE TABLE format_nested(attrs Nested(k String, v String)) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO format_nested VALUES (['foo', 'bar'], ['qaz', 'qux']); +SELECT format('The {0} to all questions is {1}.', attrs.k, attrs.v) FROM format_nested; +DROP TABLE format_nested; + +SELECT '-- NULL arguments'; +SELECT format('The {0} to all questions is {1}', NULL, NULL); +SELECT format('The {0} to all questions is {1}', NULL, materialize(NULL :: Nullable(UInt64))); +SELECT format('The {0} to all questions is {1}', materialize(NULL :: Nullable(UInt64)), materialize(NULL :: Nullable(UInt64))); +SELECT format('The {0} to all questions is {1}', 42, materialize(NULL :: Nullable(UInt64))); +SELECT format('The {0} to all questions is {1}', '42', materialize(NULL :: Nullable(UInt64))); +SELECT format('The {0} to all questions is {1}', 42, materialize(NULL :: Nullable(UInt64)), materialize(NULL :: Nullable(UInt64))); +SELECT format('The {0} to all questions is {1}', '42', materialize(NULL :: Nullable(UInt64)), materialize(NULL :: Nullable(UInt64))); + +SELECT '-- Various arguments tests'; +SELECT format('The {0} to all questions is {1}', materialize('Non-const'), materialize(' strings')); +SELECT format('The {0} to all questions is {1}', 'Two arguments ', 'test'); +SELECT format('The {0} to all questions is {1} and {2}', 'Three ', 'arguments', ' test'); +SELECT format('The {0} to all questions is {1} and {2}', materialize(3 :: Int64), ' arguments test', ' with int type'); +SELECT format('The {0} to all questions is {1}', materialize(42 :: Int32), materialize(144 :: UInt64)); +SELECT format('The {0} to all questions is {1} and {2}', materialize(42 :: Int32), materialize(144 :: UInt64), materialize(255 :: UInt32)); +SELECT format('The {0} to all questions is {1}', 42, 144); +SELECT format('The {0} to all questions is {1} and {2}', 42, 144, 255); + +SELECT '-- Single argument tests'; +SELECT format('The answer to all questions is {0}.', 42); +SELECT format('The answer to all questions is {0}.', materialize(42)); +SELECT format('The answer to all questions is {0}.', 'foo'); +SELECT format('The answer to all questions is {0}.', materialize('foo')); +SELECT format('The answer to all questions is {0}.', NULL); +SELECT format('The answer to all questions is {0}.', materialize(NULL :: Nullable(UInt64))); diff --git a/parser/testdata/02935_ipv6_bit_operations/ast.json b/parser/testdata/02935_ipv6_bit_operations/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02935_ipv6_bit_operations/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02935_ipv6_bit_operations/metadata.json b/parser/testdata/02935_ipv6_bit_operations/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02935_ipv6_bit_operations/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02935_ipv6_bit_operations/query.sql b/parser/testdata/02935_ipv6_bit_operations/query.sql new file mode 100644 index 000000000..6598c2ac5 --- /dev/null +++ b/parser/testdata/02935_ipv6_bit_operations/query.sql @@ -0,0 +1,7 @@ +WITH toIPv6('FFFF:0000:FFFF:0000:FFFF:0000:FFFF:0000') AS ip1, toIPv6('0000:FFFF:0000:FFFF:0000:FFFF:0000:FFFF') AS ip2, + CAST('226854911280625642308916404954512140970', 'UInt128') AS n1, CAST('113427455640312821154458202477256070485', 'UInt128') AS n2 +SELECT bin(ip1), bin(ip2), bin(n1), bin(n2), + bin(bitAnd(ip1, n1)), bin(bitAnd(n1, ip1)), bin(bitAnd(ip2, n1)), bin(bitAnd(n1, ip2)), + bin(bitAnd(ip1, n2)), bin(bitAnd(n2, ip1)), bin(bitAnd(ip2, n2)), bin(bitAnd(n2, ip2)), + bin(bitOr(ip1, n1)), bin(bitOr(n1, ip1)), bin(bitOr(ip2, n1)), bin(bitOr(n1, ip2)), + bin(bitOr(ip1, n2)), bin(bitOr(n2, ip1)), bin(bitOr(ip2, n2)), bin(bitOr(n2, ip2)); diff --git a/parser/testdata/02935_ipv6_from_uint128_equality/ast.json b/parser/testdata/02935_ipv6_from_uint128_equality/ast.json new file mode 100644 index 000000000..0cb864a9d --- /dev/null +++ b/parser/testdata/02935_ipv6_from_uint128_equality/ast.json @@ -0,0 +1,70 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toIPv6 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '1512366075204170938810683009357704959'" + }, + { + "explain": " Literal 'UInt128'" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '0123:4567:89ab:cdef:8899:aabb:ccdd:eeff'" + }, + { + "explain": " Literal 'IPv6'" + } + ], + + "rows": 16, + + "statistics": + { + "elapsed": 0.001340624, + "rows_read": 16, + "bytes_read": 676 + } +} diff --git a/parser/testdata/02935_ipv6_from_uint128_equality/metadata.json b/parser/testdata/02935_ipv6_from_uint128_equality/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02935_ipv6_from_uint128_equality/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02935_ipv6_from_uint128_equality/query.sql b/parser/testdata/02935_ipv6_from_uint128_equality/query.sql new file mode 100644 index 000000000..559dfbd0f --- /dev/null +++ b/parser/testdata/02935_ipv6_from_uint128_equality/query.sql @@ -0,0 +1 @@ +SELECT toIPv6(1512366075204170938810683009357704959::UInt128) = '0123:4567:89ab:cdef:8899:aabb:ccdd:eeff'::IPv6 diff --git a/parser/testdata/02935_ipv6_from_uint128_one/ast.json b/parser/testdata/02935_ipv6_from_uint128_one/ast.json new file mode 100644 index 000000000..d9610c26f --- /dev/null +++ b/parser/testdata/02935_ipv6_from_uint128_one/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toIPv6 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toUInt128 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001062952, + "rows_read": 9, + "bytes_read": 348 + } +} diff --git a/parser/testdata/02935_ipv6_from_uint128_one/metadata.json b/parser/testdata/02935_ipv6_from_uint128_one/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02935_ipv6_from_uint128_one/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02935_ipv6_from_uint128_one/query.sql b/parser/testdata/02935_ipv6_from_uint128_one/query.sql new file mode 100644 index 000000000..0e0b69d52 --- /dev/null +++ b/parser/testdata/02935_ipv6_from_uint128_one/query.sql @@ -0,0 +1 @@ +SELECT toIPv6(toUInt128(1)); diff --git a/parser/testdata/02935_ipv6_from_uint128_two/ast.json b/parser/testdata/02935_ipv6_from_uint128_two/ast.json new file mode 100644 index 000000000..3c76497cc --- /dev/null +++ b/parser/testdata/02935_ipv6_from_uint128_two/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toIPv6 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toUInt128 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_4335" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001354392, + "rows_read": 9, + "bytes_read": 351 + } +} diff --git a/parser/testdata/02935_ipv6_from_uint128_two/metadata.json b/parser/testdata/02935_ipv6_from_uint128_two/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02935_ipv6_from_uint128_two/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02935_ipv6_from_uint128_two/query.sql b/parser/testdata/02935_ipv6_from_uint128_two/query.sql new file mode 100644 index 000000000..3c0bc5876 --- /dev/null +++ b/parser/testdata/02935_ipv6_from_uint128_two/query.sql @@ -0,0 +1 @@ +SELECT toIPv6(toUInt128(4335)); diff --git a/parser/testdata/02935_ipv6_from_uint128_with_bit_and/ast.json b/parser/testdata/02935_ipv6_from_uint128_with_bit_and/ast.json new file mode 100644 index 000000000..d687ed551 --- /dev/null +++ b/parser/testdata/02935_ipv6_from_uint128_with_bit_and/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toIPv6 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function bitAnd (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toIPv6 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'ffff:fff3:1fff:2fff:0000:0000:0000:0000'" + }, + { + "explain": " Function toIPv6 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'f5f6:ffff:f32f:61ff:0000:0000:0000:0000'" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001194058, + "rows_read": 14, + "bytes_read": 623 + } +} diff --git a/parser/testdata/02935_ipv6_from_uint128_with_bit_and/metadata.json b/parser/testdata/02935_ipv6_from_uint128_with_bit_and/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02935_ipv6_from_uint128_with_bit_and/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02935_ipv6_from_uint128_with_bit_and/query.sql b/parser/testdata/02935_ipv6_from_uint128_with_bit_and/query.sql new file mode 100644 index 000000000..c8e12d559 --- /dev/null +++ b/parser/testdata/02935_ipv6_from_uint128_with_bit_and/query.sql @@ -0,0 +1 @@ +SELECT toIPv6(bitAnd(toIPv6('ffff:fff3:1fff:2fff:0000:0000:0000:0000'), toIPv6('f5f6:ffff:f32f:61ff:0000:0000:0000:0000'))); diff --git a/parser/testdata/02935_ipv6_to_and_from_uint128/ast.json b/parser/testdata/02935_ipv6_to_and_from_uint128/ast.json new file mode 100644 index 000000000..02e47948d --- /dev/null +++ b/parser/testdata/02935_ipv6_to_and_from_uint128/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toIPv6 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toUInt128 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toIPv6 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '1234:5678:9abc:def0:fedc:9abc:4321:8765'" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001483903, + "rows_read": 11, + "bytes_read": 471 + } +} diff --git a/parser/testdata/02935_ipv6_to_and_from_uint128/metadata.json b/parser/testdata/02935_ipv6_to_and_from_uint128/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02935_ipv6_to_and_from_uint128/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02935_ipv6_to_and_from_uint128/query.sql b/parser/testdata/02935_ipv6_to_and_from_uint128/query.sql new file mode 100644 index 000000000..c54b7503c --- /dev/null +++ b/parser/testdata/02935_ipv6_to_and_from_uint128/query.sql @@ -0,0 +1 @@ +SELECT toIPv6(toUInt128(toIPv6('1234:5678:9abc:def0:fedc:9abc:4321:8765'))); diff --git a/parser/testdata/02935_parallel_replicas_settings/ast.json b/parser/testdata/02935_parallel_replicas_settings/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02935_parallel_replicas_settings/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02935_parallel_replicas_settings/metadata.json b/parser/testdata/02935_parallel_replicas_settings/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02935_parallel_replicas_settings/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02935_parallel_replicas_settings/query.sql b/parser/testdata/02935_parallel_replicas_settings/query.sql new file mode 100644 index 000000000..a9201ebcc --- /dev/null +++ b/parser/testdata/02935_parallel_replicas_settings/query.sql @@ -0,0 +1,38 @@ +-- Tags: no-random-settings + +DROP TABLE IF EXISTS test_parallel_replicas_settings; +CREATE TABLE test_parallel_replicas_settings (n UInt64) ENGINE=MergeTree() ORDER BY tuple(); +INSERT INTO test_parallel_replicas_settings SELECT * FROM numbers(10); + +SET enable_parallel_replicas=2, max_parallel_replicas=3, parallel_replicas_for_non_replicated_merge_tree=1; +SET parallel_replicas_only_with_analyzer = 0; -- necessary for CI run with disabled analyzer + +SET cluster_for_parallel_replicas=''; +SELECT count() FROM test_parallel_replicas_settings WHERE NOT ignore(*); -- { serverError CLUSTER_DOESNT_EXIST } + +SET cluster_for_parallel_replicas='parallel_replicas'; +SELECT count() FROM test_parallel_replicas_settings WHERE NOT ignore(*) settings log_comment='0_f621c4f2-4da7-4a7c-bb6d-052c442d0f7f'; + +SYSTEM FLUSH LOGS text_log, query_log; +SET max_rows_to_read = 0; -- system.text_log can be really big +SELECT count() > 0 FROM system.text_log +WHERE yesterday() <= event_date + AND query_id in (select query_id from system.query_log where current_database=currentDatabase() AND log_comment='0_f621c4f2-4da7-4a7c-bb6d-052c442d0f7f') + AND level = 'Information' + AND message ILIKE '%Disabling ''use_hedged_requests'' in favor of ''enable_parallel_replicas''%' +SETTINGS enable_parallel_replicas=0; + +SET use_hedged_requests=1; +SELECT count() FROM test_parallel_replicas_settings WHERE NOT ignore(*) settings log_comment='1_f621c4f2-4da7-4a7c-bb6d-052c442d0f7f'; + +SYSTEM FLUSH LOGS text_log, query_log; + +SET enable_parallel_replicas=0; +SELECT count() > 0 FROM system.text_log +WHERE yesterday() <= event_date + AND query_id in (select query_id from system.query_log where current_database = currentDatabase() AND log_comment = '1_f621c4f2-4da7-4a7c-bb6d-052c442d0f7f') + AND level = 'Warning' + AND message ILIKE '%Setting ''use_hedged_requests'' explicitly with enabled ''enable_parallel_replicas'' has no effect%' +SETTINGS enable_parallel_replicas=0; + +DROP TABLE test_parallel_replicas_settings; diff --git a/parser/testdata/02940_json_array_of_unnamed_tuples_inference/ast.json b/parser/testdata/02940_json_array_of_unnamed_tuples_inference/ast.json new file mode 100644 index 000000000..9664ade9e --- /dev/null +++ b/parser/testdata/02940_json_array_of_unnamed_tuples_inference/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001430764, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02940_json_array_of_unnamed_tuples_inference/metadata.json b/parser/testdata/02940_json_array_of_unnamed_tuples_inference/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02940_json_array_of_unnamed_tuples_inference/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02940_json_array_of_unnamed_tuples_inference/query.sql b/parser/testdata/02940_json_array_of_unnamed_tuples_inference/query.sql new file mode 100644 index 000000000..716d82f05 --- /dev/null +++ b/parser/testdata/02940_json_array_of_unnamed_tuples_inference/query.sql @@ -0,0 +1,3 @@ +set input_format_json_infer_array_of_dynamic_from_array_of_different_types=0; +desc format(JSONEachRow, '{"data" : [[1, null, 3, null], [null, {"a" : 12, "b" : 12}, null, "string"], [null, null, 4, "string"]]}'); + diff --git a/parser/testdata/02940_variant_text_deserialization/ast.json b/parser/testdata/02940_variant_text_deserialization/ast.json new file mode 100644 index 000000000..c588a9925 --- /dev/null +++ b/parser/testdata/02940_variant_text_deserialization/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001520206, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02940_variant_text_deserialization/metadata.json b/parser/testdata/02940_variant_text_deserialization/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02940_variant_text_deserialization/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02940_variant_text_deserialization/query.sql b/parser/testdata/02940_variant_text_deserialization/query.sql new file mode 100644 index 000000000..291feaf61 --- /dev/null +++ b/parser/testdata/02940_variant_text_deserialization/query.sql @@ -0,0 +1,267 @@ +set allow_experimental_variant_type = 1; +set allow_suspicious_variant_types = 1; +set session_timezone = 'UTC'; + +select 'JSON'; +select 'String'; +select v, variantElement(v, 'String') from format(JSONEachRow, 'v Variant(String, UInt64)', '{"v" : null}, {"v" : "string"}, {"v" : 42}') format JSONEachRow; + +select 'FixedString'; +select v, variantElement(v, 'FixedString(4)') from format(JSONEachRow, 'v Variant(String, FixedString(4))', '{"v" : null}, {"v" : "string"}, {"v" : "abcd"}') format JSONEachRow; + +select 'Bool'; +select v, variantElement(v, 'Bool') from format(JSONEachRow, 'v Variant(String, Bool)', '{"v" : null}, {"v" : "string"}, {"v" : true}') format JSONEachRow; + +select 'Integers'; +select v, variantElement(v, 'Int8') from format(JSONEachRow, 'v Variant(String, Int8, UInt64)', '{"v" : null}, {"v" : "string"}, {"v" : -1}, {"v" : 0}, {"v" : 10000000000}') format JSONEachRow; +select v, variantElement(v, 'UInt8') from format(JSONEachRow, 'v Variant(String, UInt8, Int64)', '{"v" : null}, {"v" : "string"}, {"v" : -1}, {"v" : 0}, {"v" : 10000000000}') format JSONEachRow; +select v, variantElement(v, 'Int16') from format(JSONEachRow, 'v Variant(String, Int16, Int64)', '{"v" : null}, {"v" : "string"}, {"v" : -1}, {"v" : 0}, {"v" : 10000000000}') format JSONEachRow; +select v, variantElement(v, 'UInt16') from format(JSONEachRow, 'v Variant(String, UInt16, Int64)', '{"v" : null}, {"v" : "string"}, {"v" : -1}, {"v" : 0}, {"v" : 10000000000}') format JSONEachRow; +select v, variantElement(v, 'Int32') from format(JSONEachRow, 'v Variant(String, Int32, Int64)', '{"v" : null}, {"v" : "string"}, {"v" : -1}, {"v" : 0}, {"v" : 10000000000}') format JSONEachRow; +select v, variantElement(v, 'UInt32') from format(JSONEachRow, 'v Variant(String, UInt32, Int64)', '{"v" : null}, {"v" : "string"}, {"v" : -1}, {"v" : 0}, {"v" : 10000000000}') format JSONEachRow; +select v, variantElement(v, 'Int64') from format(JSONEachRow, 'v Variant(String, Int64, Int128)', '{"v" : null}, {"v" : "string"}, {"v" : -1}, {"v" : 0}, {"v" : 10000000000000000000000}') format JSONEachRow; +select v, variantElement(v, 'UInt64') from format(JSONEachRow, 'v Variant(String, UInt64, Int128)', '{"v" : null}, {"v" : "string"}, {"v" : -1}, {"v" : 0}, {"v" : 10000000000000000000000}') format JSONEachRow; +select v, variantElement(v, 'Int128') from format(JSONEachRow, 'v Variant(String, Int128, Int256)', '{"v" : null}, {"v" : "string"}, {"v" : -1}, {"v" : 0}') format JSONEachRow; +select v, variantElement(v, 'UInt128') from format(JSONEachRow, 'v Variant(String, UInt128, Int256)', '{"v" : null}, {"v" : "string"}, {"v" : -1}, {"v" : 0}') format JSONEachRow; + +select 'Floats'; +select v, variantElement(v, 'Float32') from format(JSONEachRow, 'v Variant(String, Float32)', '{"v" : null}, {"v" : "string"}, {"v" : 42.42}') format JSONEachRow; +select v, variantElement(v, 'Float64') from format(JSONEachRow, 'v Variant(String, Float64)', '{"v" : null}, {"v" : "string"}, {"v" : 42.42}') format JSONEachRow; + +select 'Decimals'; +select v, variantElement(v, 'Decimal32(6)') from format(JSONEachRow, 'v Variant(String, Decimal32(6))', '{"v" : null}, {"v" : "string"}, {"v" : 42.42}, {"v" : 4242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242.424242424242424242}') format JSONEachRow; +select v, variantElement(v, 'Decimal64(6)') from format(JSONEachRow, 'v Variant(String, Decimal64(6))', '{"v" : null}, {"v" : "string"}, {"v" : 42.42}, {"v" : 4242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242.424242424242424242}') format JSONEachRow; +select v, variantElement(v, 'Decimal128(6)') from format(JSONEachRow, 'v Variant(String, Decimal128(6))', '{"v" : null}, {"v" : "string"}, {"v" : 42.42}, {"v" : 4242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242.424242424242424242}') format JSONEachRow; +select v, variantElement(v, 'Decimal256(6)') from format(JSONEachRow, 'v Variant(String, Decimal256(6))', '{"v" : null}, {"v" : "string"}, {"v" : 42.42}, {"v" : 4242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242.424242424242424242}') format JSONEachRow; + +select 'Dates and DateTimes'; +select v, variantElement(v, 'Date') from format(JSONEachRow, 'v Variant(String, Date, DateTime64)', '{"v" : null}, {"v" : "string"}, {"v" : "2020-01-01"}, {"v" : "2020-01-01 00:00:00.999"}') format JSONEachRow; +select v, variantElement(v, 'Date32') from format(JSONEachRow, 'v Variant(String, Date32, DateTime64)', '{"v" : null}, {"v" : "string"}, {"v" : "1900-01-01"}, {"v" : "2020-01-01 00:00:00.999"}') format JSONEachRow; +select v, variantElement(v, 'DateTime') from format(JSONEachRow, 'v Variant(String, DateTime, DateTime64)', '{"v" : null}, {"v" : "string"}, {"v" : "2020-01-01 00:00:00"}, {"v" : "2020-01-01 00:00:00.999"}') format JSONEachRow; +select v, variantElement(v, 'DateTime64') from format(JSONEachRow, 'v Variant(String, DateTime64)', '{"v" : null}, {"v" : "string"}, {"v" : "2020-01-01 00:00:00.999"}, {"v" : "2020-01-01 00:00:00.999999999 ABC"}') format JSONEachRow; + +select 'UUID'; +select v, variantElement(v, 'UUID') from format(JSONEachRow, 'v Variant(String, UUID)', '{"v" : null}, {"v" : "string"}, {"v" : "c8619cca-0caa-445e-ae76-1d4f6e0b3927"}') format JSONEachRow; + +select 'IPv4'; +select v, variantElement(v, 'IPv4') from format(JSONEachRow, 'v Variant(String, IPv4)', '{"v" : null}, {"v" : "string"}, {"v" : "127.0.0.1"}') format JSONEachRow; + +select 'IPv6'; +select v, variantElement(v, 'IPv6') from format(JSONEachRow, 'v Variant(String, IPv6)', '{"v" : null}, {"v" : "string"}, {"v" : "2001:0db8:85a3:0000:0000:8a2e:0370:7334"}') format JSONEachRow; + +select 'Enum'; +select v, variantElement(v, 'Enum(''a'' = 1)') from format(JSONEachRow, 'v Variant(String, UInt32, Enum(''a'' = 1))', '{"v" : null}, {"v" : "string"}, {"v" : "a"}, {"v" : 1}, {"v" : 2}') format JSONEachRow; + +select 'Map'; +select v, variantElement(v, 'Map(String, UInt64)') from format(JSONEachRow, 'v Variant(String, Map(String, UInt64))', '{"v" : null}, {"v" : "string"}, {"v" : {"a" : 42, "b" : 43, "c" : null}}, {"v" : {"c" : 44, "d" : [1,2,3]}}') format JSONEachRow; + +select 'Tuple'; +select v, variantElement(v, 'Tuple(a UInt64, b UInt64)') from format(JSONEachRow, 'v Variant(String, Tuple(a UInt64, b UInt64))', '{"v" : null}, {"v" : "string"}, {"v" : {"a" : 42, "b" : null}}, {"v" : {"a" : 44, "d" : 32}}') format JSONEachRow; +select v, variantElement(v, 'Tuple(a UInt64, b UInt64)') from format(JSONEachRow, 'v Variant(String, Tuple(a UInt64, b UInt64))', '{"v" : null}, {"v" : "string"}, {"v" : {"a" : 42, "b" : null}}, {"v" : {"a" : 44, "d" : 32}}') settings input_format_json_defaults_for_missing_elements_in_named_tuple=0; + +select 'Array'; +select v, variantElement(v, 'Array(UInt64)') from format(JSONEachRow, 'v Variant(String, Array(UInt64))', '{"v" : null}, {"v" : "string"}, {"v" : [1, 2, 3]}, {"v" : [null, null, null]} {"v" : [1, 2, "hello"]}') format JSONEachRow; + +select 'LowCardinality'; +select v, variantElement(v, 'LowCardinality(String)') from format(JSONEachRow, 'v Variant(LowCardinality(String), UInt64)', '{"v" : null}, {"v" : "string"}, {"v" : 42}') format JSONEachRow; +select v, variantElement(v, 'Array(LowCardinality(Nullable(String)))') from format(JSONEachRow, 'v Variant(Array(LowCardinality(Nullable(String))), UInt64)', '{"v" : null}, {"v" : ["string", null]}, {"v" : 42}') format JSONEachRow; + +select 'Nullable'; +select v, variantElement(v, 'Array(Nullable(String))') from format(JSONEachRow, 'v Variant(String, Array(Nullable(String)))', '{"v" : null}, {"v" : "string"}, {"v" : ["hello", null, "world"]}') format JSONEachRow; + +select repeat('-', 80) format JSONEachRow; + +select 'CSV'; +select 'String'; +select v, variantElement(v, 'String') from format(CSV, 'v Variant(String, UInt64)', '\\N\n"string"\nstring\n42') format CSV; + +select 'FixedString'; +select v, variantElement(v, 'FixedString(4)') from format(CSV, 'v Variant(String, FixedString(4))', '\\N\n"string"\nstring\n"abcd"') format CSV; + +select 'Bool'; +select v, variantElement(v, 'Bool') from format(CSV, 'v Variant(String, Bool)', '\\N\ntruee\ntrue') format CSV; + +select 'Integers'; +select v, variantElement(v, 'Int8') from format(CSV, 'v Variant(String, Int8, UInt64)', '\n"string"\n-1\n0\n10000000000\n42d42') format CSV; +select v, variantElement(v, 'UInt8') from format(CSV, 'v Variant(String, UInt8, Int64)', '\\N\n"string"\n-1\n0\n10000000000\n42d42') format CSV; +select v, variantElement(v, 'Int16') from format(CSV, 'v Variant(String, Int16, Int64)', '\\N\n"string"\n-1\n0\n10000000000\n42d42') format CSV; +select v, variantElement(v, 'UInt16') from format(CSV, 'v Variant(String, UInt16, Int64)', '\\N\n"string"\n-1\n0\n10000000000\n42d42') format CSV; +select v, variantElement(v, 'Int32') from format(CSV, 'v Variant(String, Int32, Int64)', '\\N\n"string"\n-1\n0\n10000000000\n42d42') format CSV; +select v, variantElement(v, 'UInt32') from format(CSV, 'v Variant(String, UInt32, Int64)', '\\N\n"string"\n-1\n0\n10000000000\n42d42') format CSV; +select v, variantElement(v, 'Int64') from format(CSV, 'v Variant(String, Int64, Int128)', '\\N\n"string"\n-1\n0\n10000000000000000000000\n42d42') format CSV; +select v, variantElement(v, 'UInt64') from format(CSV, 'v Variant(String, UInt64, Int128)', '\\N\n"string"\n-1\n0\n10000000000000000000000\n42d42') format CSV; +select v, variantElement(v, 'Int128') from format(CSV, 'v Variant(String, Int128, Int256)', '\\N\n"string"\n-1\n0\n42d42') format CSV; +select v, variantElement(v, 'UInt128') from format(CSV, 'v Variant(String, UInt128, Int256)', '\\N\n"string"\n-1\n0\n42d42') format CSV; + +select 'Floats'; +select v, variantElement(v, 'Float32') from format(CSV, 'v Variant(String, Float32)', '\\N\n"string"\n42.42\n42.d42') format CSV; +select v, variantElement(v, 'Float64') from format(CSV, 'v Variant(String, Float64)', '\\N\n"string"\n42.42\n42.d42') format CSV; + +select 'Decimals'; +select v, variantElement(v, 'Decimal32(6)') from format(CSV, 'v Variant(String, Decimal32(6))', '\\N\n"string"\n42.42\n42d42\n4242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242.424242424242424242') format CSV; +select v, variantElement(v, 'Decimal64(6)') from format(CSV, 'v Variant(String, Decimal64(6))', '\\N\n"string"\n42.42\n42d42\n4242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242.424242424242424242') format CSV; +select v, variantElement(v, 'Decimal128(6)') from format(CSV, 'v Variant(String, Decimal128(6))', '\\N\n"string"\n42.42\n42d42\n4242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242.424242424242424242') format CSV; +select v, variantElement(v, 'Decimal256(6)') from format(CSV, 'v Variant(String, Decimal256(6))', '\\N\n"string"\n42.42\n42d42\n4242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242.424242424242424242') format CSV; + +select 'Dates and DateTimes'; +select v, variantElement(v, 'Date') from format(CSV, 'v Variant(String, Date, DateTime64)', '\\N\n"string"\n"2020-01-d1"\n"2020-01-01"\n"2020-01-01 00:00:00.999"') format CSV; +select v, variantElement(v, 'Date32') from format(CSV, 'v Variant(String, Date32, DateTime64)', '\\N\n"string"\n"2020-01-d1"\n"1900-01-01"\n"2020-01-01 00:00:00.999"') format CSV; +select v, variantElement(v, 'DateTime') from format(CSV, 'v Variant(String, DateTime, DateTime64)', '\\N\n"string"\n"2020-01-d1"\n"2020-01-01 00:00:00"\n"2020-01-01 00:00:00.999"') format CSV; +select v, variantElement(v, 'DateTime64') from format(CSV, 'v Variant(String, DateTime64)', '\\N\n"string"\n"2020-01-d1"\n"2020-01-01 00:00:00.999"\n"2020-01-01 00:00:00.999999999 ABC"') format CSV; + +select 'UUID'; +select v, variantElement(v, 'UUID') from format(CSV, 'v Variant(String, UUID)', '\\N\n"string"\n"c8619cca-0caa-445e-ae76-1d4f6e0b3927"\nc8619cca-0caa-445e-ae76-1d4f6e0b3927AAA') format CSV; + +select 'IPv4'; +select v, variantElement(v, 'IPv4') from format(CSV, 'v Variant(String, IPv4)', '\\N\n"string"\n"127.0.0.1"\n"127.0.0.1AAA"') format CSV; + +select 'IPv6'; +select v, variantElement(v, 'IPv6') from format(CSV, 'v Variant(String, IPv6)', '\\N\n"string"\n"2001:0db8:85a3:0000:0000:8a2e:0370:7334"\n2001:0db8:85a3:0000:0000:8a2e:0370:7334AAA') format CSV; + +select 'Enum'; +select v, variantElement(v, 'Enum(''a'' = 1)') from format(CSV, 'v Variant(String, UInt32, Enum(''a'' = 1))', '\\N\n"string"\n"a"\n1\n2\naa') format CSV; + +select 'Map'; +select v, variantElement(v, 'Map(String, UInt64)') from format(CSV, 'v Variant(String, Map(String, UInt64))', '\\N\n"string"\n"{''a'' : 42, ''b'' : 43, ''c'' : null}"\n"{''c'' : 44, ''d'' : [1,2,3]}"\n"{''c'' : 44"') format CSV; + +select 'Array'; +select v, variantElement(v, 'Array(UInt64)') from format(CSV, 'v Variant(String, Array(UInt64))', '\\N\n"string"\n"[1, 2, 3]"\n"[null, null, null]"\n"[1, 2, ''hello'']"\n"[1, 2"') format CSV; + +select 'LowCardinality'; +select v, variantElement(v, 'LowCardinality(String)') from format(CSV, 'v Variant(LowCardinality(String), UInt64)', '\\N\n"string"\n42') format CSV; +select v, variantElement(v, 'Array(LowCardinality(Nullable(String)))') from format(CSV, 'v Variant(Array(LowCardinality(Nullable(String))), UInt64, String)', '\\N\n"[''string'', null]"\n"[''string'', nul]"\n42') format CSV; + +select 'Nullable'; +select v, variantElement(v, 'Array(Nullable(String))') from format(CSV, 'v Variant(String, Array(Nullable(String)))', '\\N\n"string"\n"[''hello'', null, ''world'']"\n"[''hello'', nul]"') format CSV; + +select repeat('-', 80) format JSONEachRow; + +select 'TSV'; +select 'String'; +select v, variantElement(v, 'String') from format(TSV, 'v Variant(String, UInt64)', '\\N\nstring\n42') format TSV; + +select 'FixedString'; +select v, variantElement(v, 'FixedString(4)') from format(TSV, 'v Variant(String, FixedString(4))', '\\N\nstring\nabcd') format TSV; + +select 'Bool'; +select v, variantElement(v, 'Bool') from format(TSV, 'v Variant(String, Bool)', '\\N\ntruee\ntrue') format TSV; + +select 'Integers'; +select v, variantElement(v, 'Int8') from format(TSV, 'v Variant(String, Int8, UInt64)', '\\N\nstring\n-1\n0\n10000000000\n42d42') format TSV; +select v, variantElement(v, 'UInt8') from format(TSV, 'v Variant(String, UInt8, Int64)', '\\N\nstring\n-1\n0\n10000000000\n42d42') format TSV; +select v, variantElement(v, 'Int16') from format(TSV, 'v Variant(String, Int16, Int64)', '\\N\nstring\n-1\n0\n10000000000\n42d42') format TSV; +select v, variantElement(v, 'UInt16') from format(TSV, 'v Variant(String, UInt16, Int64)', '\\N\nstring\n-1\n0\n10000000000\n42d42') format TSV; +select v, variantElement(v, 'Int32') from format(TSV, 'v Variant(String, Int32, Int64)', '\\N\nstring\n-1\n0\n10000000000\n42d42') format TSV; +select v, variantElement(v, 'UInt32') from format(TSV, 'v Variant(String, UInt32, Int64)', '\\N\nstring\n-1\n0\n10000000000\n42d42') format TSV; +select v, variantElement(v, 'Int64') from format(TSV, 'v Variant(String, Int64, Int128)', '\\N\nstring\n-1\n0\n10000000000000000000000\n42d42') format TSV; +select v, variantElement(v, 'UInt64') from format(TSV, 'v Variant(String, UInt64, Int128)', '\\N\nstring\n-1\n0\n10000000000000000000000\n42d42') format TSV; +select v, variantElement(v, 'Int128') from format(TSV, 'v Variant(String, Int128, Int256)', '\\N\nstring\n-1\n0\n42d42') format TSV; +select v, variantElement(v, 'UInt128') from format(TSV, 'v Variant(String, UInt128, Int256)', '\\N\nstring\n-1\n0\n42d42') format TSV; + +select 'Floats'; +select v, variantElement(v, 'Float32') from format(TSV, 'v Variant(String, Float32)', '\\N\nstring\n42.42\n42.d42') format TSV; +select v, variantElement(v, 'Float64') from format(TSV, 'v Variant(String, Float64)', '\\N\nstring\n42.42\n42.d42') format TSV; + +select 'Decimals'; +select v, variantElement(v, 'Decimal32(6)') from format(TSV, 'v Variant(String, Decimal32(6))', '\\N\nstring\n42.42\n42d42\n4242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242.424242424242424242') format TSV; +select v, variantElement(v, 'Decimal64(6)') from format(TSV, 'v Variant(String, Decimal64(6))', '\\N\nstring\n42.42\n42d42\n4242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242.424242424242424242') format TSV; +select v, variantElement(v, 'Decimal128(6)') from format(TSV, 'v Variant(String, Decimal128(6))', '\\N\nstring\n42.42\n42d42\n4242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242.424242424242424242') format TSV; +select v, variantElement(v, 'Decimal256(6)') from format(TSV, 'v Variant(String, Decimal256(6))', '\\N\nstring\n42.42\n42d42\n4242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242.424242424242424242') format TSV; + +select 'Dates and DateTimes'; +select v, variantElement(v, 'Date') from format(TSV, 'v Variant(String, Date, DateTime64)', '\\N\nstring\n2020-01-d1\n2020-01-01\n2020-01-01 00:00:00.999') format TSV; +select v, variantElement(v, 'Date32') from format(TSV, 'v Variant(String, Date32, DateTime64)', '\\N\nstring\n2020-01-d1\n1900-01-01\n2020-01-01 00:00:00.999') format TSV; +select v, variantElement(v, 'DateTime') from format(TSV, 'v Variant(String, DateTime, DateTime64)', '\\N\nstring\n2020-01-d1\n2020-01-01 00:00:00\n2020-01-01 00:00:00.999') format TSV; +select v, variantElement(v, 'DateTime64') from format(TSV, 'v Variant(String, DateTime64)', '\\N\nstring\n2020-01-d1\n2020-01-01 00:00:00.999\n2020-01-01 00:00:00.999999999 ABC') format TSV; + +select 'UUID'; +select v, variantElement(v, 'UUID') from format(TSV, 'v Variant(String, UUID)', '\\N\nstring\nc8619cca-0caa-445e-ae76-1d4f6e0b3927\nc8619cca-0caa-445e-ae76-1d4f6e0b3927AAA') format TSV; + +select 'IPv4'; +select v, variantElement(v, 'IPv4') from format(TSV, 'v Variant(String, IPv4)', '\\N\nstring\n127.0.0.1\n127.0.0.1AAA') format TSV; + +select 'IPv6'; +select v, variantElement(v, 'IPv6') from format(TSV, 'v Variant(String, IPv6)', '\\N\nstring\n2001:0db8:85a3:0000:0000:8a2e:0370:7334\n2001:0db8:85a3:0000:0000:8a2e:0370:7334AAA') format TSV; + +select 'Enum'; +select v, variantElement(v, 'Enum(''a'' = 1)') from format(TSV, 'v Variant(String, UInt32, Enum(''a'' = 1))', '\\N\nstring\na\n1\n2\naa') format TSV; + +select 'Map'; +select v, variantElement(v, 'Map(String, UInt64)') from format(TSV, 'v Variant(String, Map(String, UInt64))', '\\N\nstring\n{''a'' : 42, ''b'' : 43, ''c'' : null}\n{''c'' : 44, ''d'' : [1,2,3]}\n{''c'' : 44') format TSV; + +select 'Array'; +select v, variantElement(v, 'Array(UInt64)') from format(TSV, 'v Variant(String, Array(UInt64))', '\\N\nstring\n[1, 2, 3]\n[null, null, null]\n[1, 2, ''hello'']\n[1, 2') format TSV; + +select 'LowCardinality'; +select v, variantElement(v, 'LowCardinality(String)') from format(TSV, 'v Variant(LowCardinality(String), UInt64)', '\\N\nstring\n42') format TSV; +select v, variantElement(v, 'Array(LowCardinality(Nullable(String)))') from format(TSV, 'v Variant(Array(LowCardinality(Nullable(String))), UInt64, String)', '\\N\n[''string'', null]\n[''string'', nul]\n42') format TSV; + +select 'Nullable'; +select v, variantElement(v, 'Array(Nullable(String))') from format(TSV, 'v Variant(String, Array(Nullable(String)))', '\\N\nstring\n[''hello'', null, ''world'']\n[''hello'', nul]') format TSV; + +select repeat('-', 80) format JSONEachRow; + +select 'Values'; +select 'String'; +select v, variantElement(v, 'String') from format(Values, 'v Variant(String, UInt64)', '(NULL), (''string''), (42)') format Values; + +select 'FixedString'; +select v, variantElement(v, 'FixedString(4)') from format(Values, 'v Variant(String, FixedString(4))', '(NULL), (''string''), (''abcd'')') format Values; + +select 'Bool'; +select v, variantElement(v, 'Bool') from format(Values, 'v Variant(String, Bool)', '(NULL), (true)') format Values; + +select 'Integers'; +select v, variantElement(v, 'Int8') from format(Values, 'v Variant(String, Int8, UInt64)', '(NULL), (''string''), (-1), (0), (10000000000)') format Values; +select v, variantElement(v, 'UInt8') from format(Values, 'v Variant(String, UInt8, Int64)', '(NULL), (''string''), (-1), (0), (10000000000)') format Values; +select v, variantElement(v, 'Int16') from format(Values, 'v Variant(String, Int16, Int64)', '(NULL), (''string''), (-1), (0), (10000000000)') format Values; +select v, variantElement(v, 'UInt16') from format(Values, 'v Variant(String, UInt16, Int64)', '(NULL), (''string''), (-1), (0), (10000000000)') format Values; +select v, variantElement(v, 'Int32') from format(Values, 'v Variant(String, Int32, Int64)', '(NULL), (''string''), (-1), (0), (10000000000)') format Values; +select v, variantElement(v, 'UInt32') from format(Values, 'v Variant(String, UInt32, Int64)', '(NULL), (''string''), (-1), (0), (10000000000)') format Values; +select v, variantElement(v, 'Int64') from format(Values, 'v Variant(String, Int64, Int128)', '(NULL), (''string''), (-1), (0), (10000000000000000000000)') format Values; +select v, variantElement(v, 'UInt64') from format(Values, 'v Variant(String, UInt64, Int128)', '(NULL), (''string''), (-1), (0), (10000000000000000000000)') format Values; +select v, variantElement(v, 'Int128') from format(Values, 'v Variant(String, Int128, Int256)', '(NULL), (''string''), (-1), (0)') format Values; +select v, variantElement(v, 'UInt128') from format(Values, 'v Variant(String, UInt128, Int256)', '(NULL), (''string''), (-1), (0)') format Values; + +select 'Floats'; +select v, variantElement(v, 'Float32') from format(Values, 'v Variant(String, Float32)', '(NULL), (''string''), (42.42)') format Values; +select v, variantElement(v, 'Float64') from format(Values, 'v Variant(String, Float64)', '(NULL), (''string''), (42.42)') format Values; + +select 'Decimals'; +select v, variantElement(v, 'Decimal32(6)') from format(Values, 'v Variant(String, Decimal32(6))', '(NULL), (''string''), (42.42)') format Values; +select v, variantElement(v, 'Decimal64(6)') from format(Values, 'v Variant(String, Decimal64(6))', '(NULL), (''string''), (42.42)') format Values; +select v, variantElement(v, 'Decimal128(6)') from format(Values, 'v Variant(String, Decimal128(6))', '(NULL), (''string''), (42.42)') format Values; +select v, variantElement(v, 'Decimal256(6)') from format(Values, 'v Variant(String, Decimal256(6))', '(NULL), (''string''), (42.42)') format Values; + +select 'Dates and DateTimes'; +select v, variantElement(v, 'Date') from format(Values, 'v Variant(String, Date, DateTime64)', '(NULL), (''string''), (''2020-01-d1''), (''2020-01-01''), (''2020-01-01 00:00:00.999'')') format Values; +select v, variantElement(v, 'Date32') from format(Values, 'v Variant(String, Date32, DateTime64)', '(NULL), (''string''), (''2020-01-d1''), (''1900-01-01''), (''2020-01-01 00:00:00.999'')') format Values; +select v, variantElement(v, 'DateTime') from format(Values, 'v Variant(String, DateTime, DateTime64)', '(NULL), (''string''), (''2020-01-d1''), (''2020-01-01 00:00:00''), (''2020-01-01 00:00:00.999'')') format Values; +select v, variantElement(v, 'DateTime64') from format(Values, 'v Variant(String, DateTime64)', '(NULL), (''string''), (''2020-01-d1''), (''2020-01-01 00:00:00.999''), (''2020-01-01 00:00:00.999999999 ABC'')') format Values; + +select 'UUID'; +select v, variantElement(v, 'UUID') from format(Values, 'v Variant(String, UUID)', '(NULL), (''string''), (''c8619cca-0caa-445e-ae76-1d4f6e0b3927''), (''c8619cca-0caa-445e-ae76-1d4f6e0b3927AAA'')') format Values; + +select 'IPv4'; +select v, variantElement(v, 'IPv4') from format(Values, 'v Variant(String, IPv4)', '(NULL), (''string''), (''127.0.0.1''), (''127.0.0.1AAA'')') format Values; + +select 'IPv6'; +select v, variantElement(v, 'IPv6') from format(Values, 'v Variant(String, IPv6)', '(NULL), (''string''), (''2001:0db8:85a3:0000:0000:8a2e:0370:7334''), (''2001:0db8:85a3:0000:0000:8a2e:0370:7334AAA'')') format Values; + +select 'Enum'; +select v, variantElement(v, 'Enum(''a'' = 1)') from format(Values, 'v Variant(String, UInt32, Enum(''a'' = 1))', '(NULL), (''string''), (''a''), (1), (2), (''aa'')') format Values; + +select 'Map'; +select v, variantElement(v, 'Map(String, UInt64)') from format(Values, 'v Variant(String, Map(String, UInt64))', '(NULL), (''string''), ({''a'' : 42, ''b'' : 43, ''c'' : null})') format Values; + +select 'Array'; +select v, variantElement(v, 'Array(UInt64)') from format(Values, 'v Variant(String, Array(UInt64))', '(NULL), (''string''), ([1, 2, 3]), ([null, null, null])') format Values; + +select 'LowCardinality'; +select v, variantElement(v, 'LowCardinality(String)') from format(Values, 'v Variant(LowCardinality(String), UInt64)', '(NULL), (''string''), (42)') format Values; +select v, variantElement(v, 'Array(LowCardinality(Nullable(String)))') from format(Values, 'v Variant(Array(LowCardinality(Nullable(String))), UInt64, String)', '(NULL), ([''string'', null]), (42)') format Values; + +select 'Nullable'; +select v, variantElement(v, 'Array(Nullable(String))') from format(Values, 'v Variant(String, Array(Nullable(String)))', '(NULL), (''string''), ([''hello'', null, ''world''])') format Values; + +select ''; diff --git a/parser/testdata/02941_any_RESPECT_NULL_sparse_column/ast.json b/parser/testdata/02941_any_RESPECT_NULL_sparse_column/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02941_any_RESPECT_NULL_sparse_column/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02941_any_RESPECT_NULL_sparse_column/metadata.json b/parser/testdata/02941_any_RESPECT_NULL_sparse_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02941_any_RESPECT_NULL_sparse_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02941_any_RESPECT_NULL_sparse_column/query.sql b/parser/testdata/02941_any_RESPECT_NULL_sparse_column/query.sql new file mode 100644 index 000000000..df86b740c --- /dev/null +++ b/parser/testdata/02941_any_RESPECT_NULL_sparse_column/query.sql @@ -0,0 +1,5 @@ +-- regression for the case when aggregate function will be called with from==to for sparse column +DROP TABLE IF EXISTS data_sparse_column; +CREATE TABLE data_sparse_column (`key` Int64, `value` Int32) ENGINE = MergeTree ORDER BY key; +INSERT INTO data_sparse_column VALUES (1, 0); +SELECT any(value) RESPECT NULLS FROM data_sparse_column; diff --git a/parser/testdata/02941_projections_external_aggregation/ast.json b/parser/testdata/02941_projections_external_aggregation/ast.json new file mode 100644 index 000000000..4a811401e --- /dev/null +++ b/parser/testdata/02941_projections_external_aggregation/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_proj_external (children 1)" + }, + { + "explain": " Identifier t_proj_external" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001240228, + "rows_read": 2, + "bytes_read": 82 + } +} diff --git a/parser/testdata/02941_projections_external_aggregation/metadata.json b/parser/testdata/02941_projections_external_aggregation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02941_projections_external_aggregation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02941_projections_external_aggregation/query.sql b/parser/testdata/02941_projections_external_aggregation/query.sql new file mode 100644 index 000000000..e1ba5cd5c --- /dev/null +++ b/parser/testdata/02941_projections_external_aggregation/query.sql @@ -0,0 +1,66 @@ +DROP TABLE IF EXISTS t_proj_external; + +CREATE TABLE t_proj_external +( + k1 UInt32, + k2 UInt32, + k3 UInt32, + value UInt32 +) +ENGINE = MergeTree +ORDER BY tuple(); + +INSERT INTO t_proj_external SELECT 1, number%2, number%4, number FROM numbers(50000); + +SYSTEM STOP MERGES t_proj_external; + +ALTER TABLE t_proj_external ADD PROJECTION aaaa ( + SELECT + k1, + k2, + k3, + sum(value) + GROUP BY k1, k2, k3 +); + +INSERT INTO t_proj_external SELECT 1, number%2, number%4, number FROM numbers(100000) LIMIT 50000, 100000; + +SELECT '*** correct aggregation ***'; + +SELECT k1, k2, k3, sum(value) v FROM t_proj_external GROUP BY k1, k2, k3 ORDER BY k1, k2, k3 SETTINGS optimize_use_projections = 0; + +SELECT '*** correct aggregation with projection ***'; + +SELECT k1, k2, k3, sum(value) v FROM t_proj_external GROUP BY k1, k2, k3 ORDER BY k1, k2, k3; + +SELECT '*** optimize_aggregation_in_order = 0, max_bytes_before_external_group_by = 1, group_by_two_level_threshold = 1 ***'; + +SELECT k1, k2, k3, sum(value) v FROM t_proj_external GROUP BY k1, k2, k3 ORDER BY k1, k2, k3 SETTINGS optimize_aggregation_in_order = 0, max_bytes_before_external_group_by = 1, max_bytes_ratio_before_external_group_by = 0, group_by_two_level_threshold = 1; + +SELECT '*** optimize_aggregation_in_order = 1, max_bytes_before_external_group_by = 1, group_by_two_level_threshold = 1 ***'; + +SELECT k1, k2, k3, sum(value) v FROM t_proj_external GROUP BY k1, k2, k3 ORDER BY k1, k2, k3 SETTINGS optimize_aggregation_in_order = 1, max_bytes_before_external_group_by = 1, max_bytes_ratio_before_external_group_by = 0, group_by_two_level_threshold = 1; + +SYSTEM START MERGES t_proj_external; + +ALTER TABLE t_proj_external MATERIALIZE PROJECTION aaaa SETTINGS mutations_sync = 2; + +SELECT '*** after materialization ***'; + +SELECT '*** correct aggregation ***'; + +SELECT k1, k2, k3, sum(value) v FROM t_proj_external GROUP BY k1, k2, k3 ORDER BY k1, k2, k3 SETTINGS optimize_use_projections = 0; + +SELECT '*** correct aggregation with projection ***'; + +SELECT k1, k2, k3, sum(value) v FROM t_proj_external GROUP BY k1, k2, k3 ORDER BY k1, k2, k3; + +SELECT '*** optimize_aggregation_in_order = 0, max_bytes_before_external_group_by = 1, group_by_two_level_threshold = 1 ***'; + +SELECT k1, k2, k3, sum(value) v FROM t_proj_external GROUP BY k1, k2, k3 ORDER BY k1, k2, k3 SETTINGS optimize_aggregation_in_order = 0, max_bytes_before_external_group_by = 1, max_bytes_ratio_before_external_group_by = 0, group_by_two_level_threshold = 1; + +SELECT '*** optimize_aggregation_in_order = 1, max_bytes_before_external_group_by = 1, group_by_two_level_threshold = 1 ***'; + +SELECT k1, k2, k3, sum(value) v FROM t_proj_external GROUP BY k1, k2, k3 ORDER BY k1, k2, k3 SETTINGS optimize_aggregation_in_order = 1, max_bytes_before_external_group_by = 1, max_bytes_ratio_before_external_group_by = 0, group_by_two_level_threshold = 1; + +DROP TABLE IF EXISTS t_proj_external; diff --git a/parser/testdata/02942_variant_cast/ast.json b/parser/testdata/02942_variant_cast/ast.json new file mode 100644 index 000000000..ccacce049 --- /dev/null +++ b/parser/testdata/02942_variant_cast/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000952611, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02942_variant_cast/metadata.json b/parser/testdata/02942_variant_cast/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02942_variant_cast/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02942_variant_cast/query.sql b/parser/testdata/02942_variant_cast/query.sql new file mode 100644 index 000000000..33587e3e4 --- /dev/null +++ b/parser/testdata/02942_variant_cast/query.sql @@ -0,0 +1,23 @@ +set allow_experimental_variant_type=1; + +select NULL::Variant(String, UInt64); +select 42::UInt64::Variant(String, UInt64); +select 42::UInt32::Variant(String, UInt64); -- {serverError CANNOT_CONVERT_TYPE} +select now()::Variant(String, UInt64); -- {serverError CANNOT_CONVERT_TYPE} +select CAST(number % 2 ? NULL : number, 'Variant(String, UInt64)') from numbers(4); +select 'Hello'::LowCardinality(String)::Variant(LowCardinality(String), UInt64); +select 'Hello'::LowCardinality(Nullable(String))::Variant(LowCardinality(String), UInt64); +select 'NULL'::LowCardinality(Nullable(String))::Variant(LowCardinality(String), UInt64); +select 'Hello'::LowCardinality(Nullable(String))::Variant(LowCardinality(String), UInt64); +select CAST(CAST(number % 2 ? NULL : 'Hello', 'LowCardinality(Nullable(String))'), 'Variant(LowCardinality(String), UInt64)') from numbers(4); + +select NULL::Variant(String, UInt64)::UInt64; +select NULL::Variant(String, UInt64)::Nullable(UInt64); +select '42'::Variant(String, UInt64)::UInt64; +select 'str'::Variant(String, UInt64)::UInt64; -- {serverError CANNOT_PARSE_TEXT} +select CAST(multiIf(number % 3 == 0, NULL::Variant(String, UInt64), number % 3 == 1, 'Hello'::Variant(String, UInt64), number::Variant(String, UInt64)), 'Nullable(String)') from numbers(6); +select CAST(multiIf(number == 1, NULL::Variant(String, UInt64), number == 2, 'Hello'::Variant(String, UInt64), number::Variant(String, UInt64)), 'UInt64') from numbers(6); -- {serverError CANNOT_PARSE_TEXT} + + +select number::Variant(UInt64)::Variant(String, UInt64)::Variant(Array(String), String, UInt64) from numbers(2); +select 'str'::Variant(String, UInt64)::Variant(String, Array(UInt64)); -- {serverError CANNOT_CONVERT_TYPE} diff --git a/parser/testdata/02943_create_query_interpreter_sample_block_fix/ast.json b/parser/testdata/02943_create_query_interpreter_sample_block_fix/ast.json new file mode 100644 index 000000000..14b9f1b5a --- /dev/null +++ b/parser/testdata/02943_create_query_interpreter_sample_block_fix/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_table (children 1)" + }, + { + "explain": " Identifier test_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001121597, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/02943_create_query_interpreter_sample_block_fix/metadata.json b/parser/testdata/02943_create_query_interpreter_sample_block_fix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02943_create_query_interpreter_sample_block_fix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02943_create_query_interpreter_sample_block_fix/query.sql b/parser/testdata/02943_create_query_interpreter_sample_block_fix/query.sql new file mode 100644 index 000000000..0262393fd --- /dev/null +++ b/parser/testdata/02943_create_query_interpreter_sample_block_fix/query.sql @@ -0,0 +1,52 @@ +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + number UInt64 +) +ENGINE=MergeTree ORDER BY number; + +DROP VIEW IF EXISTS test_mv; +CREATE MATERIALIZED VIEW test_mv ENGINE=MergeTree ORDER BY arr +AS +WITH (SELECT '\d[a-z]') AS constant_value +SELECT extractAll(concat(toString(number), 'a'), assumeNotNull(constant_value)) AS arr +FROM test_table; + +INSERT INTO test_table VALUES (0); +SELECT * FROM test_mv ORDER BY arr; + +SELECT '--'; + +INSERT INTO test_table VALUES (1); +SELECT * FROM test_mv ORDER BY arr; + +SELECT '--'; + +TRUNCATE test_table; + +DROP TABLE IF EXISTS regex_test_table; +CREATE TABLE regex_test_table +( + regex String +) +ENGINE = MergeTree ORDER BY regex; + +INSERT INTO regex_test_table VALUES ('\d[a-z]'); + +DROP VIEW test_mv; +CREATE MATERIALIZED VIEW test_mv ENGINE=MergeTree ORDER BY arr +AS +WITH (SELECT regex FROM regex_test_table) AS constant_value +SELECT extractAll(concat(toString(number), 'a'), assumeNotNull(constant_value)) AS arr +FROM test_table; + +INSERT INTO test_table VALUES (0); +SELECT * FROM test_mv ORDER BY arr; + +SELECT '--'; + +INSERT INTO test_table VALUES (1); +SELECT * FROM test_mv ORDER BY arr; + +DROP VIEW test_mv; +DROP TABLE test_table; diff --git a/parser/testdata/02943_exprs_order_in_group_by_with_rollup/ast.json b/parser/testdata/02943_exprs_order_in_group_by_with_rollup/ast.json new file mode 100644 index 000000000..807148e88 --- /dev/null +++ b/parser/testdata/02943_exprs_order_in_group_by_with_rollup/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_group_by_with_rollup_order (children 1)" + }, + { + "explain": " Identifier test_group_by_with_rollup_order" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001021884, + "rows_read": 2, + "bytes_read": 114 + } +} diff --git a/parser/testdata/02943_exprs_order_in_group_by_with_rollup/metadata.json b/parser/testdata/02943_exprs_order_in_group_by_with_rollup/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02943_exprs_order_in_group_by_with_rollup/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02943_exprs_order_in_group_by_with_rollup/query.sql b/parser/testdata/02943_exprs_order_in_group_by_with_rollup/query.sql new file mode 100644 index 000000000..03bb7f9e7 --- /dev/null +++ b/parser/testdata/02943_exprs_order_in_group_by_with_rollup/query.sql @@ -0,0 +1,12 @@ +DROP TABLE IF EXISTS test_group_by_with_rollup_order; + +CREATE TABLE test_group_by_with_rollup_order (id Int64, a Nullable(Int64), b Nullable(String)) ENGINE = MergeTree ORDER BY id; + +insert into test_group_by_with_rollup_order values(1,1,'a'); +insert into test_group_by_with_rollup_order values(2,2,'a'); +insert into test_group_by_with_rollup_order values(3,3,'b'); +insert into test_group_by_with_rollup_order values(4,4,'b'); + +SELECT toString(a) as r1, b, count() FROM test_group_by_with_rollup_order GROUP BY r1, b WITH ROLLUP ORDER BY b,r1; + +DROP TABLE IF EXISTS test_group_by_with_rollup_order; diff --git a/parser/testdata/02943_order_by_all/ast.json b/parser/testdata/02943_order_by_all/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02943_order_by_all/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02943_order_by_all/metadata.json b/parser/testdata/02943_order_by_all/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02943_order_by_all/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02943_order_by_all/query.sql b/parser/testdata/02943_order_by_all/query.sql new file mode 100644 index 000000000..4ce59e84e --- /dev/null +++ b/parser/testdata/02943_order_by_all/query.sql @@ -0,0 +1,115 @@ +-- Tests that sort expression ORDER BY ALL + +DROP TABLE IF EXISTS order_by_all; + +CREATE TABLE order_by_all +( + a String, + b Nullable(Int32) +) +ENGINE = Memory; + +INSERT INTO order_by_all VALUES ('B', 3), ('C', NULL), ('D', 1), ('A', 2); + +SELECT '-- no modifiers'; + +SET enable_analyzer = 0; +SELECT a, b FROM order_by_all ORDER BY ALL; +SELECT b, a FROM order_by_all ORDER BY ALL; + +SET enable_analyzer = 1; +SELECT a, b FROM order_by_all ORDER BY ALL; +SELECT b, a FROM order_by_all ORDER BY ALL; + +SELECT '-- with ASC/DESC modifiers'; + +SET enable_analyzer = 0; +SELECT a, b FROM order_by_all ORDER BY ALL ASC; +SELECT a, b FROM order_by_all ORDER BY ALL DESC; + +SET enable_analyzer = 1; +SELECT a, b FROM order_by_all ORDER BY ALL ASC; +SELECT a, b FROM order_by_all ORDER BY ALL DESC; + +SELECT '-- with NULLS FIRST/LAST modifiers'; + +SET enable_analyzer = 0; +SELECT b, a FROM order_by_all ORDER BY ALL NULLS FIRST; +SELECT b, a FROM order_by_all ORDER BY ALL NULLS LAST; + +SET enable_analyzer = 1; +SELECT b, a FROM order_by_all ORDER BY ALL NULLS FIRST; +SELECT b, a FROM order_by_all ORDER BY ALL NULLS LAST; + +SELECT '-- SELECT *'; + +SET enable_analyzer = 0; +SELECT * FROM order_by_all ORDER BY all; + +SET enable_analyzer = 1; +SELECT * FROM order_by_all ORDER BY all; + +DROP TABLE order_by_all; + +SELECT '-- the trouble starts when "order by all is all" is ambiguous'; + +CREATE TABLE order_by_all +( + a String, + b Nullable(Int32), + all UInt64 +) +ENGINE = Memory; + +INSERT INTO order_by_all VALUES ('B', 3, 10), ('C', NULL, 40), ('D', 1, 20), ('A', 2, 30); + +SELECT ' -- columns'; + +SET enable_analyzer = 0; +SELECT a, b, all FROM order_by_all ORDER BY all; -- { serverError UNEXPECTED_EXPRESSION } +SELECT a, b, all FROM order_by_all ORDER BY all SETTINGS enable_order_by_all = false; +SELECT a FROM order_by_all ORDER BY all; -- { serverError UNEXPECTED_EXPRESSION } +SELECT a FROM order_by_all ORDER BY all SETTINGS enable_order_by_all = false; +SELECT * FROM order_by_all ORDER BY all; -- { serverError UNEXPECTED_EXPRESSION } +SELECT * FROM order_by_all ORDER BY all SETTINGS enable_order_by_all = false; + +SET enable_analyzer = 1; +SELECT a, b, all FROM order_by_all ORDER BY all; -- { serverError UNEXPECTED_EXPRESSION } +SELECT a, b, all FROM order_by_all ORDER BY all SETTINGS enable_order_by_all = false; +SELECT a FROM order_by_all ORDER BY all SETTINGS enable_order_by_all = false; +-- SELECT * FROM order_by_all ORDER BY all; -- { serverError UNEXPECTED_EXPRESSION } -- (*) see below +SELECT * FROM order_by_all ORDER BY all SETTINGS enable_order_by_all = false; +-- SELECT a FROM order_by_all ORDER BY all; -- { serverError UNEXPECTED_EXPRESSION } -- (*) see below + +-- (*) These queries show the expected behavior for analyzer. Unfortunately, it is not implemented that way yet, +-- which is not wrong but a bit unintuitive (some may say a landmine). Keeping the queries for now for reference. + +SELECT ' -- column aliases'; + +SET enable_analyzer = 0; +SELECT a, b AS all FROM order_by_all ORDER BY all; -- { serverError UNEXPECTED_EXPRESSION } +SELECT a, b AS all FROM order_by_all ORDER BY all SETTINGS enable_order_by_all = false; + +SET enable_analyzer = 1; +SELECT a, b AS all FROM order_by_all ORDER BY all; -- { serverError UNEXPECTED_EXPRESSION } +SELECT a, b AS all FROM order_by_all ORDER BY all SETTINGS enable_order_by_all = false; + +SELECT ' -- expressions'; + +SET enable_analyzer = 0; +SELECT format('{} {}', a, b) AS all FROM order_by_all ORDER BY all; -- { serverError UNEXPECTED_EXPRESSION } +SELECT format('{} {}', a, b) AS all FROM order_by_all ORDER BY all SETTINGS enable_order_by_all = false; + +SET enable_analyzer = 1; +SELECT format('{} {}', a, b) AS all FROM order_by_all ORDER BY all; -- { serverError UNEXPECTED_EXPRESSION } +SELECT format('{} {}', a, b) AS all FROM order_by_all ORDER BY all SETTINGS enable_order_by_all = false; + +SELECT ' -- ORDER BY ALL loses its special meaning when used in conjunction with other columns'; + +SET enable_analyzer = 0; +SELECT a, b, all FROM order_by_all ORDER BY all, a; + +SET enable_analyzer = 1; +SELECT a, b, all FROM order_by_all ORDER BY all, a; + +DROP TABLE order_by_all; diff --git a/parser/testdata/02943_positional_arguments_bugs/ast.json b/parser/testdata/02943_positional_arguments_bugs/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02943_positional_arguments_bugs/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02943_positional_arguments_bugs/metadata.json b/parser/testdata/02943_positional_arguments_bugs/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02943_positional_arguments_bugs/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02943_positional_arguments_bugs/query.sql b/parser/testdata/02943_positional_arguments_bugs/query.sql new file mode 100644 index 000000000..9b1b872ae --- /dev/null +++ b/parser/testdata/02943_positional_arguments_bugs/query.sql @@ -0,0 +1,26 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/46628 +DROP TABLE IF EXISTS t; +CREATE TABLE t +( + `n` int, + `__unused_group_by_column` int +) +ENGINE = MergeTree +ORDER BY n AS +SELECT number, number +FROM numbers(10); + +SELECT + sum(n), + __unused_group_by_column +FROM t +GROUP BY __unused_group_by_column ORDER BY __unused_group_by_column; + +SELECT sum(n), 1 as x from t group by x; + +SELECT + 'processed' AS type, + max(number) AS max_date, + min(number) AS min_date +FROM numbers(100) +GROUP BY type; diff --git a/parser/testdata/02943_tokenbf_and_ngrambf_indexes_support_match_function/ast.json b/parser/testdata/02943_tokenbf_and_ngrambf_indexes_support_match_function/ast.json new file mode 100644 index 000000000..3396f93e2 --- /dev/null +++ b/parser/testdata/02943_tokenbf_and_ngrambf_indexes_support_match_function/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tokenbf_tab (children 1)" + }, + { + "explain": " Identifier tokenbf_tab" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001072814, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/02943_tokenbf_and_ngrambf_indexes_support_match_function/metadata.json b/parser/testdata/02943_tokenbf_and_ngrambf_indexes_support_match_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02943_tokenbf_and_ngrambf_indexes_support_match_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02943_tokenbf_and_ngrambf_indexes_support_match_function/query.sql b/parser/testdata/02943_tokenbf_and_ngrambf_indexes_support_match_function/query.sql new file mode 100644 index 000000000..5ad54c872 --- /dev/null +++ b/parser/testdata/02943_tokenbf_and_ngrambf_indexes_support_match_function/query.sql @@ -0,0 +1,188 @@ +DROP TABLE IF EXISTS tokenbf_tab; +DROP TABLE IF EXISTS ngrambf_tab; + +CREATE TABLE tokenbf_tab +( + id UInt32, + str String, + INDEX idx str TYPE tokenbf_v1(256, 2, 0) +) +ENGINE = MergeTree +ORDER BY id +SETTINGS index_granularity = 1; + +CREATE TABLE ngrambf_tab +( + id UInt32, + str String, + INDEX idx str TYPE ngrambf_v1(3, 256, 2, 0) +) +ENGINE = MergeTree +ORDER BY id +SETTINGS index_granularity = 1; + +INSERT INTO tokenbf_tab VALUES (1, 'Well, Hello ClickHouse !'), (2, 'Well, Hello World !'), (3, 'Good Weather !'), (4, 'Say Hello !'), (5, 'Its An OLAP Database'), (6, 'True World Champion'); +INSERT INTO ngrambf_tab VALUES (1, 'Hello ClickHouse'), (2, 'Hello World'), (3, 'Good Weather'), (4, 'Say Hello'), (5, 'OLAP Database'), (6, 'World Champion'); + +SELECT * FROM tokenbf_tab WHERE match(str, ' Hello (ClickHouse|World) ') ORDER BY id; +SELECT * FROM ngrambf_tab WHERE match(str, 'Hello (ClickHouse|World)') ORDER BY id; + +-- Read 2/6 granules +-- Required string: 'Hello ' +-- Alternatives: 'Hello ClickHouse', 'Hello World' +-- Surrounded by spaces for tokenbf + +SELECT * +FROM +( + EXPLAIN PLAN indexes=1 + SELECT * FROM tokenbf_tab WHERE match(str, ' Hello (ClickHouse|World) ') ORDER BY id +) +WHERE + explain LIKE '%Granules: %' +SETTINGS + enable_analyzer = 0; + +SELECT * +FROM +( + EXPLAIN PLAN indexes=1 + SELECT * FROM tokenbf_tab WHERE match(str, ' Hello (ClickHouse|World) ') ORDER BY id +) +WHERE + explain LIKE '%Granules: %' +SETTINGS + enable_analyzer = 1; + +SELECT * +FROM +( + EXPLAIN PLAN indexes=1 + SELECT * FROM ngrambf_tab WHERE match(str, 'Hello (ClickHouse|World)') ORDER BY id +) +WHERE + explain LIKE '%Granules: %' +SETTINGS + enable_analyzer = 0; + +SELECT * +FROM +( + EXPLAIN PLAN indexes=1 + SELECT * FROM ngrambf_tab WHERE match(str, 'Hello (ClickHouse|World)') ORDER BY id +) +WHERE + explain LIKE '%Granules: %' +SETTINGS + enable_analyzer = 1; + + +SELECT '---'; + +SELECT * FROM tokenbf_tab WHERE match(str, '.* (ClickHouse|World) ') ORDER BY id; +SELECT * FROM ngrambf_tab WHERE match(str, '.*(ClickHouse|World)') ORDER BY id; + +-- Read 3/6 granules +-- Required string: - +-- Alternatives: 'ClickHouse', 'World' +-- Surrounded by spaces for tokenbf + +SELECT * +FROM +( + EXPLAIN PLAN indexes = 1 + SELECT * FROM tokenbf_tab WHERE match(str, '.* (ClickHouse|World) ') ORDER BY id +) +WHERE + explain LIKE '%Granules: %' +SETTINGS + enable_analyzer = 0; + +SELECT * +FROM +( + EXPLAIN PLAN indexes = 1 + SELECT * FROM tokenbf_tab WHERE match(str, '.* (ClickHouse|World) ') ORDER BY id +) +WHERE + explain LIKE '%Granules: %' +SETTINGS + enable_analyzer = 1; + +SELECT * +FROM +( + EXPLAIN PLAN indexes = 1 + SELECT * FROM ngrambf_tab WHERE match(str, '.*(ClickHouse|World)') ORDER BY id +) +WHERE + explain LIKE '%Granules: %' +SETTINGS + enable_analyzer = 0; + +SELECT * +FROM +( + EXPLAIN PLAN indexes = 1 + SELECT * FROM ngrambf_tab WHERE match(str, '.*(ClickHouse|World)') ORDER BY id +) +WHERE + explain LIKE '%Granules: %' +SETTINGS + enable_analyzer = 1; + +SELECT '---'; + +SELECT * FROM tokenbf_tab WHERE match(str, ' OLAP .*') ORDER BY id; +SELECT * FROM ngrambf_tab WHERE match(str, 'OLAP.*') ORDER BY id; + +-- Read 1/6 granules +-- Required string: 'OLAP' +-- Alternatives: - +-- Surrounded by spaces for tokenbf + +SELECT * +FROM +( + EXPLAIN PLAN indexes = 1 + SELECT * FROM tokenbf_tab WHERE match(str, ' OLAP (.*?)*') ORDER BY id +) +WHERE + explain LIKE '%Granules: %' +SETTINGS + enable_analyzer = 0; +SELECT * +FROM +( + EXPLAIN PLAN indexes = 1 + SELECT * FROM tokenbf_tab WHERE match(str, ' OLAP (.*?)*') ORDER BY id +) +WHERE + explain LIKE '%Granules: %' +SETTINGS + enable_analyzer = 1; + +SELECT * +FROM +( + EXPLAIN PLAN indexes = 1 + SELECT * FROM ngrambf_tab WHERE match(str, 'OLAP (.*?)*') ORDER BY id +) +WHERE + explain LIKE '%Granules: %' +SETTINGS + enable_analyzer = 0; + +SELECT * +FROM +( + EXPLAIN PLAN indexes = 1 + SELECT * FROM ngrambf_tab WHERE match(str, 'OLAP (.*?)*') ORDER BY id +) +WHERE + explain LIKE '%Granules: %' +SETTINGS + enable_analyzer = 1; + +DROP TABLE tokenbf_tab; +DROP TABLE ngrambf_tab; diff --git a/parser/testdata/02943_use_full_text_skip_index_with_has_any/ast.json b/parser/testdata/02943_use_full_text_skip_index_with_has_any/ast.json new file mode 100644 index 000000000..8f324eca4 --- /dev/null +++ b/parser/testdata/02943_use_full_text_skip_index_with_has_any/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tokenbf_v1_hasany_test (children 1)" + }, + { + "explain": " Identifier tokenbf_v1_hasany_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001120443, + "rows_read": 2, + "bytes_read": 96 + } +} diff --git a/parser/testdata/02943_use_full_text_skip_index_with_has_any/metadata.json b/parser/testdata/02943_use_full_text_skip_index_with_has_any/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02943_use_full_text_skip_index_with_has_any/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02943_use_full_text_skip_index_with_has_any/query.sql b/parser/testdata/02943_use_full_text_skip_index_with_has_any/query.sql new file mode 100644 index 000000000..b489ecd47 --- /dev/null +++ b/parser/testdata/02943_use_full_text_skip_index_with_has_any/query.sql @@ -0,0 +1,53 @@ +DROP TABLE IF EXISTS tokenbf_v1_hasany_test; +DROP TABLE IF EXISTS ngrambf_v1_hasany_test; + +CREATE TABLE tokenbf_v1_hasany_test +( + id UInt32, + array Array(String), + INDEX idx_array_tokenbf_v1 array TYPE tokenbf_v1(512,3,0) GRANULARITY 1, +) Engine=MergeTree() ORDER BY id SETTINGS index_granularity = 1; + +CREATE TABLE ngrambf_v1_hasany_test +( + id UInt32, + array Array(String), + INDEX idx_array_ngrambf_v1 array TYPE ngrambf_v1(3,512,3,0) GRANULARITY 1, +) Engine=MergeTree() ORDER BY id SETTINGS index_granularity = 1; + +INSERT INTO tokenbf_v1_hasany_test VALUES (1, ['this is a test', 'example.com']), (2, ['another test', 'another example']); +INSERT INTO ngrambf_v1_hasany_test VALUES (1, ['this is a test', 'example.com']), (2, ['another test', 'another example']); + +SELECT * FROM tokenbf_v1_hasany_test WHERE hasAny(array, ['this is a test']) SETTINGS force_data_skipping_indices='idx_array_tokenbf_v1'; +SELECT '--'; +SELECT * FROM tokenbf_v1_hasany_test WHERE hasAny(array, ['example.com']) SETTINGS force_data_skipping_indices='idx_array_tokenbf_v1'; +SELECT '--'; +SELECT * FROM tokenbf_v1_hasany_test WHERE hasAny(array, ['another test']) SETTINGS force_data_skipping_indices='idx_array_tokenbf_v1'; +SELECT '--'; +SELECT * FROM tokenbf_v1_hasany_test WHERE hasAny(array, ['another example', 'example.com']) ORDER BY id ASC SETTINGS force_data_skipping_indices='idx_array_tokenbf_v1'; +SELECT '--'; + +SELECT * FROM ngrambf_v1_hasany_test WHERE hasAny(array, ['this is a test']) SETTINGS force_data_skipping_indices='idx_array_ngrambf_v1'; +SELECT '--'; +SELECT * FROM ngrambf_v1_hasany_test WHERE hasAny(array, ['example.com']) SETTINGS force_data_skipping_indices='idx_array_ngrambf_v1'; +SELECT '--'; +SELECT * FROM ngrambf_v1_hasany_test WHERE hasAny(array, ['another test']) SETTINGS force_data_skipping_indices='idx_array_ngrambf_v1'; +SELECT '--'; +SELECT * FROM ngrambf_v1_hasany_test WHERE hasAny(array, ['another example', 'example.com']) ORDER BY id ASC SETTINGS force_data_skipping_indices='idx_array_ngrambf_v1'; +SELECT '--'; + +SELECT * FROM tokenbf_v1_hasany_test WHERE hasAll(array, ['this is a test', 'example.com']) SETTINGS force_data_skipping_indices='idx_array_tokenbf_v1'; +SELECT '--'; +SELECT * FROM tokenbf_v1_hasany_test WHERE hasAll(array, ['another test']) SETTINGS force_data_skipping_indices='idx_array_tokenbf_v1'; +SELECT '--'; +SELECT * FROM tokenbf_v1_hasany_test WHERE hasAll(array, ['another example', 'example.com']) ORDER BY id ASC SETTINGS force_data_skipping_indices='idx_array_tokenbf_v1'; +SELECT '--'; + +SELECT * FROM ngrambf_v1_hasany_test WHERE hasAll(array, ['this is a test', 'example.com']) SETTINGS force_data_skipping_indices='idx_array_ngrambf_v1'; +SELECT '--'; +SELECT * FROM ngrambf_v1_hasany_test WHERE hasAll(array, ['another test']) SETTINGS force_data_skipping_indices='idx_array_ngrambf_v1'; +SELECT '--'; +SELECT * FROM ngrambf_v1_hasany_test WHERE hasAll(array, ['another example', 'example.com']) ORDER BY id ASC SETTINGS force_data_skipping_indices='idx_array_ngrambf_v1'; + +DROP TABLE tokenbf_v1_hasany_test; +DROP TABLE ngrambf_v1_hasany_test; diff --git a/parser/testdata/02943_variant_element/ast.json b/parser/testdata/02943_variant_element/ast.json new file mode 100644 index 000000000..34b93f3e4 --- /dev/null +++ b/parser/testdata/02943_variant_element/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001313608, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02943_variant_element/metadata.json b/parser/testdata/02943_variant_element/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02943_variant_element/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02943_variant_element/query.sql b/parser/testdata/02943_variant_element/query.sql new file mode 100644 index 000000000..556c0147e --- /dev/null +++ b/parser/testdata/02943_variant_element/query.sql @@ -0,0 +1,16 @@ +set allow_experimental_variant_type=1; +set use_variant_as_common_type=1; + +select variantElement(NULL::Variant(String, UInt64), 'UInt64') from numbers(4); +select variantElement(number::Variant(String, UInt64), 'UInt64') from numbers(4); +select variantElement(number::Variant(String, UInt64), 'String') from numbers(4); +select variantElement((number % 2 ? NULL : number)::Variant(String, UInt64), 'UInt64') from numbers(4); +select variantElement((number % 2 ? NULL : number)::Variant(String, UInt64), 'String') from numbers(4); +select variantElement((number % 2 ? NULL : 'str_' || toString(number))::LowCardinality(Nullable(String))::Variant(LowCardinality(String), UInt64), 'LowCardinality(String)') from numbers(4); +select variantElement(NULL::LowCardinality(Nullable(String))::Variant(LowCardinality(String), UInt64), 'LowCardinality(String)') from numbers(4); +select variantElement((number % 2 ? NULL : number)::Variant(Array(UInt64), UInt64), 'Array(UInt64)') from numbers(4); +select variantElement(NULL::Variant(Array(UInt64), UInt64), 'Array(UInt64)') from numbers(4); +select variantElement(number % 2 ? NULL : range(number + 1), 'Array(UInt64)') from numbers(4); + +select variantElement([[(number % 2 ? NULL : number)::Variant(String, UInt64)]], 'UInt64') from numbers(4); + diff --git a/parser/testdata/02944_variant_as_common_type_analyzer/ast.json b/parser/testdata/02944_variant_as_common_type_analyzer/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02944_variant_as_common_type_analyzer/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02944_variant_as_common_type_analyzer/metadata.json b/parser/testdata/02944_variant_as_common_type_analyzer/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02944_variant_as_common_type_analyzer/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02944_variant_as_common_type_analyzer/query.sql b/parser/testdata/02944_variant_as_common_type_analyzer/query.sql new file mode 100644 index 000000000..7947c8a0c --- /dev/null +++ b/parser/testdata/02944_variant_as_common_type_analyzer/query.sql @@ -0,0 +1,78 @@ +-- this test is just like 02944_variant_as_common_type, but with different expected output, because +-- analyzer changes some return types. Specifically, if(c, x, y) always casts to the common type of +-- x and y, even if c is constant. +set enable_analyzer=1; + +set allow_experimental_variant_type=1; +set use_variant_as_common_type=1; + +select toTypeName(res), if(1, [1,2,3], 'str_1') as res; +select toTypeName(res), if(1, [1,2,3], 'str_1'::Nullable(String)) as res; + +select toTypeName(res), if(0, [1,2,3], 'str_1') as res; +select toTypeName(res), if(0, [1,2,3], 'str_1'::Nullable(String)) as res; + +select toTypeName(res), if(NULL, [1,2,3], 'str_1') as res; +select toTypeName(res), if(NULL, [1,2,3], 'str_1'::Nullable(String)) as res; + +select toTypeName(res), if(materialize(NULL::Nullable(UInt8)), [1,2,3], 'str_1') as res; +select toTypeName(res), if(materialize(NULL::Nullable(UInt8)), [1,2,3], 'str_1'::Nullable(String)) as res; + +select toTypeName(res), if(1, materialize([1,2,3]), 'str_1') as res; +select toTypeName(res), if(1, materialize([1,2,3]), 'str_1'::Nullable(String)) as res; + +select toTypeName(res), if(0, materialize([1,2,3]), 'str_1') as res; +select toTypeName(res), if(0, materialize([1,2,3]), 'str_1'::Nullable(String)) as res; + +select toTypeName(res), if(NULL, materialize([1,2,3]), 'str_1') as res; +select toTypeName(res), if(NULL, materialize([1,2,3]), 'str_1'::Nullable(String)) as res; + +select toTypeName(res), if(materialize(NULL::Nullable(UInt8)), materialize([1,2,3]), 'str_1') as res; +select toTypeName(res), if(materialize(NULL::Nullable(UInt8)), materialize([1,2,3]), 'str_1'::Nullable(String)) as res; + +select toTypeName(res), if(1, [1,2,3], materialize('str_1')) as res; +select toTypeName(res), if(1, [1,2,3], materialize('str_1')::Nullable(String)) as res; + +select toTypeName(res), if(0, [1,2,3], materialize('str_1')) as res; +select toTypeName(res), if(0, [1,2,3], materialize('str_1')::Nullable(String)) as res; + +select toTypeName(res), if(NULL, [1,2,3], materialize('str_1')) as res; +select toTypeName(res), if(NULL, [1,2,3], materialize('str_1')::Nullable(String)) as res; + +select toTypeName(res), if(materialize(NULL::Nullable(UInt8)), [1,2,3], materialize('str_1')) as res; +select toTypeName(res), if(materialize(NULL::Nullable(UInt8)), [1,2,3], materialize('str_1')::Nullable(String)) as res; + + +select toTypeName(res), if(0, range(number + 1), 'str_' || toString(number)) as res from numbers(4); +select toTypeName(res), if(0, range(number + 1), ('str_' || toString(number))::Nullable(String)) as res from numbers(4); + +select toTypeName(res), if(1, range(number + 1), 'str_' || toString(number)) as res from numbers(4); +select toTypeName(res), if(1, range(number + 1), ('str_' || toString(number))::Nullable(String)) as res from numbers(4); + +select toTypeName(res), if(NULL, range(number + 1), 'str_' || toString(number)) as res from numbers(4); +select toTypeName(res), if(NULL, range(number + 1), ('str_' || toString(number))::Nullable(String)) as res from numbers(4); + +select toTypeName(res), if(materialize(NULL::Nullable(UInt8)), range(number + 1), 'str_' || toString(number)) as res from numbers(4); +select toTypeName(res), if(materialize(NULL::Nullable(UInt8)), range(number + 1), ('str_' || toString(number))::Nullable(String)) as res from numbers(4); + +select toTypeName(res), if(number % 2, range(number + 1), 'str_' || toString(number)) as res from numbers(4); +select toTypeName(res), if(number % 2, range(number + 1), ('str_' || toString(number))::Nullable(String)) as res from numbers(4); + +select toTypeName(res), if(number % 2, range(number + 1), ('str_' || toString(number))::LowCardinality(String)) as res from numbers(4); +select toTypeName(res), if(number % 2, range(number + 1), ('str_' || toString(number))::LowCardinality(Nullable(String))) as res from numbers(4); + + +select toTypeName(res), multiIf(number % 3 == 0, range(number + 1), number % 3 == 1, number, 'str_' || toString(number)) as res from numbers(6); +select toTypeName(res), multiIf(number % 3 == 0, range(number + 1), number % 3 == 1, number, ('str_' || toString(number))::Nullable(String)) as res from numbers(6); +select toTypeName(res), multiIf(number % 3 == 0, range(number + 1), number % 3 == 1, number, ('str_' || toString(number))::LowCardinality(String)) as res from numbers(6); +select toTypeName(res), multiIf(number % 3 == 0, range(number + 1), number % 3 == 1, number, ('str_' || toString(number))::LowCardinality(Nullable(String))) as res from numbers(6); + + +select toTypeName(res), array(1, 'str_1', 2, 'str_2') as res; +select toTypeName(res), array([1, 2, 3], ['str_1', 'str_2', 'str_3']) as res; +select toTypeName(res), array(array([1, 2, 3], ['str_1', 'str_2', 'str_3']), [1, 2, 3]) as res; +select toTypeName(res), array([1, 2, 3], [[1, 2, 3]]) as res; + +select toTypeName(res), map('a', 1, 'b', 'str_1') as res; +select toTypeName(res), map('a', 1, 'b', map('c', 2, 'd', 'str_1')) as res; +select toTypeName(res), map('a', 1, 'b', [1, 2, 3], 'c', [[4, 5, 6]]) as res; diff --git a/parser/testdata/02945_blake3_msan/ast.json b/parser/testdata/02945_blake3_msan/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02945_blake3_msan/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02945_blake3_msan/metadata.json b/parser/testdata/02945_blake3_msan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02945_blake3_msan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02945_blake3_msan/query.sql b/parser/testdata/02945_blake3_msan/query.sql new file mode 100644 index 000000000..cad9b6292 --- /dev/null +++ b/parser/testdata/02945_blake3_msan/query.sql @@ -0,0 +1,3 @@ +-- Tags: no-fasttest +-- https://github.com/ClickHouse/ClickHouse/issues/57810 +SELECT hex(BLAKE3(BLAKE3('a'))); diff --git a/parser/testdata/02946_literal_alias_misclassification/ast.json b/parser/testdata/02946_literal_alias_misclassification/ast.json new file mode 100644 index 000000000..c3d53bdf9 --- /dev/null +++ b/parser/testdata/02946_literal_alias_misclassification/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery literal_alias_misclassification (children 1)" + }, + { + "explain": " Identifier literal_alias_misclassification" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001204359, + "rows_read": 2, + "bytes_read": 114 + } +} diff --git a/parser/testdata/02946_literal_alias_misclassification/metadata.json b/parser/testdata/02946_literal_alias_misclassification/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02946_literal_alias_misclassification/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02946_literal_alias_misclassification/query.sql b/parser/testdata/02946_literal_alias_misclassification/query.sql new file mode 100644 index 000000000..0d001bf1e --- /dev/null +++ b/parser/testdata/02946_literal_alias_misclassification/query.sql @@ -0,0 +1,24 @@ +DROP TABLE IF EXISTS literal_alias_misclassification; + +CREATE TABLE literal_alias_misclassification +( + `id` Int64, + `a` Nullable(String), + `b` Nullable(Int64) +) +ENGINE = MergeTree +ORDER BY id; + + +INSERT INTO literal_alias_misclassification values(1, 'a', 1); +INSERT INTO literal_alias_misclassification values(2, 'b', 2); + +SELECT 'const' AS r, b +FROM + ( SELECT a AS r, b FROM literal_alias_misclassification ) AS t1 + LEFT JOIN + ( SELECT a AS r FROM literal_alias_misclassification ) AS t2 + ON t1.r = t2.r +ORDER BY b; + +DROP TABLE IF EXISTS literal_alias_misclassification; diff --git a/parser/testdata/02946_materialize_column_must_not_override_past_values/ast.json b/parser/testdata/02946_materialize_column_must_not_override_past_values/ast.json new file mode 100644 index 000000000..2fc85b46d --- /dev/null +++ b/parser/testdata/02946_materialize_column_must_not_override_past_values/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001075176, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02946_materialize_column_must_not_override_past_values/metadata.json b/parser/testdata/02946_materialize_column_must_not_override_past_values/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02946_materialize_column_must_not_override_past_values/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02946_materialize_column_must_not_override_past_values/query.sql b/parser/testdata/02946_materialize_column_must_not_override_past_values/query.sql new file mode 100644 index 000000000..cfdde2877 --- /dev/null +++ b/parser/testdata/02946_materialize_column_must_not_override_past_values/query.sql @@ -0,0 +1,85 @@ +SET mutations_sync = 2; + +DROP TABLE IF EXISTS tab; + +-- Tests that existing parts which contain a non-default value in columns with DEFAULT expression remain unchanged by MATERIALIZE COLUMN> +SELECT 'DEFAULT expressions'; + +SELECT '-- Compact parts'; + +CREATE TABLE tab (id Int64, dflt Int64 DEFAULT 54321) ENGINE MergeTree ORDER BY id; +INSERT INTO tab (id, dflt) VALUES (1, 1); +INSERT INTO tab (id) VALUES (2); +SELECT 'Before materialize'; +SELECT * FROM tab ORDER BY id; +ALTER TABLE tab MATERIALIZE COLUMN dflt; +SELECT 'After materialize'; +SELECT * FROM tab ORDER BY id; +DROP TABLE tab; + +SELECT '-- Wide parts'; + +CREATE TABLE tab (id Int64, dflt Int64 DEFAULT 54321) ENGINE MergeTree ORDER BY id SETTINGS min_bytes_for_wide_part = 1; +INSERT INTO tab (id, dflt) VALUES (1, 1); +INSERT INTO tab (id) VALUES (2); +SELECT 'Before materialize'; +SELECT * FROM tab ORDER BY id; +ALTER TABLE tab MATERIALIZE COLUMN dflt; +SELECT 'After materialize'; +SELECT * FROM tab ORDER BY id; +DROP TABLE tab; + +SELECT '-- Nullable column != physically absent'; + +CREATE TABLE tab (id Int64, dflt Nullable(Int64) DEFAULT 54321) ENGINE MergeTree ORDER BY id SETTINGS min_bytes_for_wide_part = 1; +INSERT INTO tab (id, dflt) VALUES (1, 1); +INSERT INTO tab (id, dflt) VALUES (2, NULL); +INSERT INTO tab (id) VALUES (3); +SELECT 'Before materialize'; +SELECT * FROM tab ORDER BY id; +ALTER TABLE tab MATERIALIZE COLUMN dflt; +SELECT 'After materialize'; +SELECT * FROM tab ORDER BY id; +DROP TABLE tab; + +SELECT '-- Parts with renamed column'; + +CREATE TABLE tab (id Int64, dflt Int64 DEFAULT 54321) ENGINE MergeTree ORDER BY id; +INSERT INTO tab (id, dflt) VALUES (1, 1); +INSERT INTO tab (id) VALUES (2); +SELECT 'Before materialize'; +SELECT * FROM tab ORDER BY id; +ALTER TABLE tab RENAME COLUMN dflt TO dflt2; +SELECT 'After rename'; +SELECT * FROM tab ORDER BY id; +ALTER TABLE tab MATERIALIZE COLUMN dflt2; +SELECT 'After materialize'; +SELECT * FROM tab ORDER BY id; +DROP TABLE tab; + +-- But for columns with MATERIALIZED expression, all existing parts should be rewritten in case a new expression was set in the meantime. +SELECT 'MATERIALIZED expressions'; + +SELECT '-- Compact parts'; + +CREATE TABLE tab (id Int64, mtrl Int64 MATERIALIZED 54321) ENGINE MergeTree ORDER BY id; +INSERT INTO tab (id) VALUES (1); +SELECT 'Before materialize'; +SELECT id, mtrl FROM tab ORDER BY id; +ALTER TABLE tab MODIFY COLUMN mtrl Int64 MATERIALIZED 65432; +ALTER TABLE tab MATERIALIZE COLUMN mtrl; +SELECT 'After materialize'; +SELECT id, mtrl FROM tab ORDER BY id; +DROP TABLE tab; + +SELECT '-- Compact parts'; + +CREATE TABLE tab (id Int64, mtrl Int64 MATERIALIZED 54321) ENGINE MergeTree ORDER BY id SETTINGS min_bytes_for_wide_part = 1; +INSERT INTO tab (id) VALUES (1); +SELECT 'Before materialize'; +SELECT id, mtrl FROM tab ORDER BY id; +ALTER TABLE tab MODIFY COLUMN mtrl Int64 MATERIALIZED 65432; +ALTER TABLE tab MATERIALIZE COLUMN mtrl; +SELECT 'After materialize'; +SELECT id, mtrl FROM tab ORDER BY id; +DROP TABLE tab; diff --git a/parser/testdata/02946_merge_tree_final_split_ranges_by_primary_key/ast.json b/parser/testdata/02946_merge_tree_final_split_ranges_by_primary_key/ast.json new file mode 100644 index 000000000..4a037be7c --- /dev/null +++ b/parser/testdata/02946_merge_tree_final_split_ranges_by_primary_key/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_table (children 1)" + }, + { + "explain": " Identifier test_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001176433, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/02946_merge_tree_final_split_ranges_by_primary_key/metadata.json b/parser/testdata/02946_merge_tree_final_split_ranges_by_primary_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02946_merge_tree_final_split_ranges_by_primary_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02946_merge_tree_final_split_ranges_by_primary_key/query.sql b/parser/testdata/02946_merge_tree_final_split_ranges_by_primary_key/query.sql new file mode 100644 index 000000000..780ed5b79 --- /dev/null +++ b/parser/testdata/02946_merge_tree_final_split_ranges_by_primary_key/query.sql @@ -0,0 +1,34 @@ +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value String +) ENGINE=ReplacingMergeTree ORDER BY id SETTINGS index_granularity = 2; + +INSERT INTO test_table SELECT 0, '0'; +INSERT INTO test_table SELECT number + 1, number + 1 FROM numbers(15); +OPTIMIZE TABLE test_table; + +SELECT COUNT() FROM system.parts WHERE database = currentDatabase() AND table = 'test_table' AND active = 1; +SYSTEM STOP MERGES test_table; + +SELECT '--'; + +SELECT id, value FROM test_table FINAL ORDER BY id; + +SELECT '--'; + +INSERT INTO test_table SELECT 5, '5'; +SELECT id, value FROM test_table FINAL ORDER BY id; + +SELECT '--'; + +INSERT INTO test_table SELECT number + 8, number + 8 FROM numbers(8); +SELECT id, value FROM test_table FINAL ORDER BY id; + +SELECT '--'; + +INSERT INTO test_table SELECT number, number FROM numbers(32); +SELECT id, value FROM test_table FINAL ORDER BY id; + +DROP TABLE test_table; diff --git a/parser/testdata/02946_parallel_replicas_distributed/ast.json b/parser/testdata/02946_parallel_replicas_distributed/ast.json new file mode 100644 index 000000000..3803f9948 --- /dev/null +++ b/parser/testdata/02946_parallel_replicas_distributed/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00120041, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/02946_parallel_replicas_distributed/metadata.json b/parser/testdata/02946_parallel_replicas_distributed/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02946_parallel_replicas_distributed/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02946_parallel_replicas_distributed/query.sql b/parser/testdata/02946_parallel_replicas_distributed/query.sql new file mode 100644 index 000000000..c151feff8 --- /dev/null +++ b/parser/testdata/02946_parallel_replicas_distributed/query.sql @@ -0,0 +1,19 @@ +DROP TABLE IF EXISTS test; +DROP TABLE IF EXISTS test_d; + +CREATE TABLE test (id UInt64, date Date) +ENGINE = MergeTree +ORDER BY id +AS select *, '2023-12-25' from numbers(100); + +CREATE TABLE IF NOT EXISTS test_d as test +ENGINE = Distributed(test_cluster_one_shard_three_replicas_localhost, currentDatabase(), test); + +SET parallel_replicas_only_with_analyzer = 0; -- necessary for CI run with disabled analyzer + +SELECT count(), sum(id) +FROM test_d +SETTINGS enable_parallel_replicas = 2, max_parallel_replicas = 3, parallel_replicas_for_non_replicated_merge_tree=1; + +DROP TABLE test_d; +DROP TABLE test; diff --git a/parser/testdata/02946_parallel_replicas_force_primary_key/ast.json b/parser/testdata/02946_parallel_replicas_force_primary_key/ast.json new file mode 100644 index 000000000..583872d91 --- /dev/null +++ b/parser/testdata/02946_parallel_replicas_force_primary_key/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00110794, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/02946_parallel_replicas_force_primary_key/metadata.json b/parser/testdata/02946_parallel_replicas_force_primary_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02946_parallel_replicas_force_primary_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02946_parallel_replicas_force_primary_key/query.sql b/parser/testdata/02946_parallel_replicas_force_primary_key/query.sql new file mode 100644 index 000000000..b30793241 --- /dev/null +++ b/parser/testdata/02946_parallel_replicas_force_primary_key/query.sql @@ -0,0 +1,49 @@ +DROP TABLE IF EXISTS t1 SYNC; +DROP TABLE IF EXISTS t2 SYNC; +DROP TABLE IF EXISTS t3 SYNC; + +CREATE TABLE t1(k UInt32, v String) ENGINE ReplicatedMergeTree('/02946_parallel_replicas/{database}/test_tbl', 'r1') ORDER BY k; +CREATE TABLE t2(k UInt32, v String) ENGINE ReplicatedMergeTree('/02946_parallel_replicas/{database}/test_tbl', 'r2') ORDER BY k; +CREATE TABLE t3(k UInt32, v String) ENGINE ReplicatedMergeTree('/02946_parallel_replicas/{database}/test_tbl', 'r3') ORDER BY k; + +insert into t1 select number % 4, toString(number) from numbers(1000, 1000); +insert into t2 select number % 4, toString(number) from numbers(2000, 1000); +insert into t3 select number % 4, toString(number) from numbers(3000, 1000); + +system sync replica t1; +system sync replica t2; +system sync replica t3; + +-- w/o parallel replicas +SELECT + k, + count() +FROM t1 +WHERE k > 0 +GROUP BY k +ORDER BY k +SETTINGS force_primary_key = 1, enable_parallel_replicas = 0; + +-- parallel replicas, primary key is used +SET enable_parallel_replicas=1, max_parallel_replicas=3, cluster_for_parallel_replicas='test_cluster_one_shard_three_replicas_localhost'; +SELECT + k, + count() +FROM t1 +WHERE k > 0 +GROUP BY k +ORDER BY k +SETTINGS force_primary_key = 1; + +-- parallel replicas, primary key is NOT used +SELECT + k, + count() +FROM t1 +GROUP BY k +ORDER BY k +SETTINGS force_primary_key = 1; -- { serverError INDEX_NOT_USED } + +DROP TABLE t1 SYNC; +DROP TABLE t2 SYNC; +DROP TABLE t3 SYNC; diff --git a/parser/testdata/02947_dropped_tables_parts/ast.json b/parser/testdata/02947_dropped_tables_parts/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02947_dropped_tables_parts/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02947_dropped_tables_parts/metadata.json b/parser/testdata/02947_dropped_tables_parts/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02947_dropped_tables_parts/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02947_dropped_tables_parts/query.sql b/parser/testdata/02947_dropped_tables_parts/query.sql new file mode 100644 index 000000000..554a19ca6 --- /dev/null +++ b/parser/testdata/02947_dropped_tables_parts/query.sql @@ -0,0 +1,14 @@ + +DROP TABLE IF EXISTS 02947_table_1; +DROP TABLE IF EXISTS 02947_table_2; + +CREATE TABLE 02947_table_1 (id Int32) Engine=MergeTree() ORDER BY id; +CREATE TABLE 02947_table_2 (id Int32) Engine=MergeTree() ORDER BY id; +INSERT INTO 02947_table_1 VALUES (1),(2); +INSERT INTO 02947_table_2 VALUES (3),(4); + +SELECT database, table, name FROM system.parts WHERE database = currentDatabase() AND startsWith(table, '02947_table_'); +select * from system.dropped_tables_parts format Null; + +DROP TABLE 02947_table_1; +DROP TABLE 02947_table_2; diff --git a/parser/testdata/02947_merge_tree_index_table_1/ast.json b/parser/testdata/02947_merge_tree_index_table_1/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02947_merge_tree_index_table_1/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02947_merge_tree_index_table_1/metadata.json b/parser/testdata/02947_merge_tree_index_table_1/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02947_merge_tree_index_table_1/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02947_merge_tree_index_table_1/query.sql b/parser/testdata/02947_merge_tree_index_table_1/query.sql new file mode 100644 index 000000000..ab3be8eb6 --- /dev/null +++ b/parser/testdata/02947_merge_tree_index_table_1/query.sql @@ -0,0 +1,43 @@ +-- Tags: no-random-settings + +DROP TABLE IF EXISTS t_merge_tree_index; + +CREATE TABLE t_merge_tree_index (a UInt64 CODEC(LZ4), b UInt64 CODEC(LZ4), s String CODEC(LZ4)) +ENGINE = MergeTree ORDER BY (a, b) +SETTINGS + index_granularity = 3, + min_bytes_for_wide_part = 0, + ratio_of_defaults_for_sparse_serialization = 1.0, + serialization_info_version = 'basic', + compact_parts_max_granules_to_buffer = 1; + +SYSTEM STOP MERGES t_merge_tree_index; + +INSERT INTO t_merge_tree_index SELECT number % 5, number, 'v' || toString(number * number) FROM numbers(10); +INSERT INTO t_merge_tree_index SELECT number % 5, number, 'v' || toString(number * number) FROM numbers(10, 10); + +SELECT * FROM t_merge_tree_index ORDER BY _part, a, b; +SELECT * FROM mergeTreeIndex(currentDatabase(), t_merge_tree_index) ORDER BY part_name, mark_number FORMAT PrettyCompactNoEscapesMonoBlock; +SELECT * FROM mergeTreeIndex(currentDatabase(), t_merge_tree_index, with_marks = true) ORDER BY part_name, mark_number FORMAT PrettyCompactNoEscapesMonoBlock; + +DROP TABLE t_merge_tree_index; + +CREATE TABLE t_merge_tree_index (a UInt64 CODEC(LZ4), b UInt64 CODEC(LZ4), s String CODEC(LZ4)) +ENGINE = MergeTree ORDER BY (a, b) +SETTINGS + index_granularity = 3, + min_bytes_for_wide_part = '1G', + ratio_of_defaults_for_sparse_serialization = 1.0, + serialization_info_version = 'basic', + compact_parts_max_granules_to_buffer = 1; + +SYSTEM STOP MERGES t_merge_tree_index; + +INSERT INTO t_merge_tree_index SELECT number % 4, number, 'v' || toString(number * number) FROM numbers(10); +INSERT INTO t_merge_tree_index SELECT number % 4, number, 'v' || toString(number * number) FROM numbers(10, 10); + +SELECT * FROM t_merge_tree_index ORDER BY _part, a, b; +SELECT * FROM mergeTreeIndex(currentDatabase(), t_merge_tree_index) ORDER BY part_name, mark_number FORMAT PrettyCompactNoEscapesMonoBlock; +SELECT * FROM mergeTreeIndex(currentDatabase(), t_merge_tree_index, with_marks = true) ORDER BY part_name, mark_number FORMAT PrettyCompactNoEscapesMonoBlock; + +DROP TABLE t_merge_tree_index; diff --git a/parser/testdata/02947_merge_tree_index_table_2/ast.json b/parser/testdata/02947_merge_tree_index_table_2/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02947_merge_tree_index_table_2/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02947_merge_tree_index_table_2/metadata.json b/parser/testdata/02947_merge_tree_index_table_2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02947_merge_tree_index_table_2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02947_merge_tree_index_table_2/query.sql b/parser/testdata/02947_merge_tree_index_table_2/query.sql new file mode 100644 index 000000000..04ce88c3b --- /dev/null +++ b/parser/testdata/02947_merge_tree_index_table_2/query.sql @@ -0,0 +1,44 @@ +-- Tags: no-random-settings + +DROP TABLE IF EXISTS t_merge_tree_index; + +SET output_format_pretty_row_numbers = 0; +SET print_pretty_type_names = 0; + +CREATE TABLE t_merge_tree_index +( + `a` UInt64, + `b` UInt64, + `sp` UInt64, + `arr` Array(LowCardinality(String)), + `n` Nested(c1 String, c2 UInt64), + `t` Tuple(c1 UInt64, c2 UInt64), + `column.with.dots` UInt64 +) +ENGINE = MergeTree +ORDER BY (a, b, sipHash64(sp) % 100) +SETTINGS + index_granularity = 3, + min_bytes_for_wide_part = 0, + min_rows_for_wide_part = 6, + ratio_of_defaults_for_sparse_serialization = 0.9, + write_marks_for_substreams_in_compact_parts=0, + serialization_info_version = 'basic', + compact_parts_max_granules_to_buffer = 1; + +SYSTEM STOP MERGES t_merge_tree_index; + +INSERT INTO t_merge_tree_index SELECT number % 5, number, 0, ['foo', 'bar'], ['aaa', 'bbb', 'ccc'], [11, 22, 33], (number, number), number FROM numbers(10); + +ALTER TABLE t_merge_tree_index ADD COLUMN c UInt64 AFTER b; + +INSERT INTO t_merge_tree_index SELECT number % 5, number, number, 10, ['foo', 'bar'], ['aaa', 'bbb', 'ccc'], [11, 22, 33], (number, number), number FROM numbers(5); +INSERT INTO t_merge_tree_index SELECT number % 5, number, number, 10, ['foo', 'bar'], ['aaa', 'bbb', 'ccc'], [11, 22, 33], (number, number), number FROM numbers(10); + +SELECT * FROM mergeTreeIndex(currentDatabase(), t_merge_tree_index) ORDER BY part_name, mark_number FORMAT PrettyCompactNoEscapesMonoBlock; +SELECT * FROM mergeTreeIndex(currentDatabase(), t_merge_tree_index, with_marks = true) ORDER BY part_name, mark_number FORMAT PrettyCompactNoEscapesMonoBlock; + +SET describe_compact_output = 1; +DESCRIBE mergeTreeIndex(currentDatabase(), t_merge_tree_index, with_marks = true); + +DROP TABLE t_merge_tree_index; diff --git a/parser/testdata/02947_merge_tree_index_table_4/ast.json b/parser/testdata/02947_merge_tree_index_table_4/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02947_merge_tree_index_table_4/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02947_merge_tree_index_table_4/metadata.json b/parser/testdata/02947_merge_tree_index_table_4/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02947_merge_tree_index_table_4/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02947_merge_tree_index_table_4/query.sql b/parser/testdata/02947_merge_tree_index_table_4/query.sql new file mode 100644 index 000000000..4bcee87b9 --- /dev/null +++ b/parser/testdata/02947_merge_tree_index_table_4/query.sql @@ -0,0 +1,44 @@ +-- Tags: no-random-settings + +DROP TABLE IF EXISTS t_merge_tree_index; + +SET output_format_pretty_row_numbers = 0; +SET print_pretty_type_names = 0; + +CREATE TABLE t_merge_tree_index +( + `a` UInt64, + `b` UInt64, + `sp` UInt64, + `arr` Array(LowCardinality(String)), + `n` Nested(c1 String, c2 UInt64), + `t` Tuple(c1 UInt64, c2 UInt64), + `column.with.dots` UInt64 +) +ENGINE = MergeTree +ORDER BY (a, b, sipHash64(sp) % 100) +SETTINGS + index_granularity = 3, + min_bytes_for_wide_part = 0, + min_rows_for_wide_part = 6, + ratio_of_defaults_for_sparse_serialization = 0.9, + serialization_info_version = 'basic', + write_marks_for_substreams_in_compact_parts=1; + +SYSTEM STOP MERGES t_merge_tree_index; + +INSERT INTO t_merge_tree_index SELECT number % 5, number, 0, ['foo', 'bar'], ['aaa', 'bbb', 'ccc'], [11, 22, 33], (number, number), number FROM numbers(10); + +ALTER TABLE t_merge_tree_index ADD COLUMN c UInt64 AFTER b; + +INSERT INTO t_merge_tree_index SELECT number % 5, number, number, 10, ['foo', 'bar'], ['aaa', 'bbb', 'ccc'], [11, 22, 33], (number, number), number FROM numbers(5); +INSERT INTO t_merge_tree_index SELECT number % 5, number, number, 10, ['foo', 'bar'], ['aaa', 'bbb', 'ccc'], [11, 22, 33], (number, number), number FROM numbers(10); + +SELECT * FROM mergeTreeIndex(currentDatabase(), t_merge_tree_index) ORDER BY part_name, mark_number FORMAT PrettyCompactNoEscapesMonoBlock; +SELECT * FROM mergeTreeIndex(currentDatabase(), t_merge_tree_index, with_marks = true) ORDER BY part_name, mark_number FORMAT PrettyCompactNoEscapesMonoBlock; + +SET describe_compact_output = 1; +DESCRIBE mergeTreeIndex(currentDatabase(), t_merge_tree_index, with_marks = true); + +DROP TABLE t_merge_tree_index; + diff --git a/parser/testdata/02947_parallel_replicas_remote/ast.json b/parser/testdata/02947_parallel_replicas_remote/ast.json new file mode 100644 index 000000000..0a97b2c02 --- /dev/null +++ b/parser/testdata/02947_parallel_replicas_remote/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001546932, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/02947_parallel_replicas_remote/metadata.json b/parser/testdata/02947_parallel_replicas_remote/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02947_parallel_replicas_remote/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02947_parallel_replicas_remote/query.sql b/parser/testdata/02947_parallel_replicas_remote/query.sql new file mode 100644 index 000000000..ae8ee2c63 --- /dev/null +++ b/parser/testdata/02947_parallel_replicas_remote/query.sql @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS test; + +CREATE TABLE test (id UInt64, date Date) +ENGINE = MergeTree +ORDER BY id +AS select *, '2023-12-25' from numbers(100); + +SET parallel_replicas_only_with_analyzer = 0; -- necessary for CI run with disabled analyzer + +-- when the query plan is serialized for distributed query, parallel replicas are not enabled because +-- (with prefer_localhost_replica) because all reading steps are ReadFromTable instead of ReadFromMergeTree +SET serialize_query_plan = 0; + +SELECT count(), sum(id) +FROM remote('127.0.0.1|127.0.0.2|127.0.0.3|127.0.0.4', currentDatabase(), test) +SETTINGS enable_parallel_replicas = 1, max_parallel_replicas = 4, prefer_localhost_replica = 0, parallel_replicas_for_non_replicated_merge_tree = 1; + +DROP TABLE test; diff --git a/parser/testdata/02949_parallel_replicas_in_subquery/ast.json b/parser/testdata/02949_parallel_replicas_in_subquery/ast.json new file mode 100644 index 000000000..dcfeff5e2 --- /dev/null +++ b/parser/testdata/02949_parallel_replicas_in_subquery/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery merge_tree_in_subqueries (children 1)" + }, + { + "explain": " Identifier merge_tree_in_subqueries" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001123028, + "rows_read": 2, + "bytes_read": 100 + } +} diff --git a/parser/testdata/02949_parallel_replicas_in_subquery/metadata.json b/parser/testdata/02949_parallel_replicas_in_subquery/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02949_parallel_replicas_in_subquery/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02949_parallel_replicas_in_subquery/query.sql b/parser/testdata/02949_parallel_replicas_in_subquery/query.sql new file mode 100644 index 000000000..2ba438bbe --- /dev/null +++ b/parser/testdata/02949_parallel_replicas_in_subquery/query.sql @@ -0,0 +1,32 @@ +DROP TABLE IF EXISTS merge_tree_in_subqueries; +CREATE TABLE merge_tree_in_subqueries (id UInt64, name String, num UInt64) ENGINE = MergeTree ORDER BY (id, name); +INSERT INTO merge_tree_in_subqueries VALUES(1, 'test1', 42); +INSERT INTO merge_tree_in_subqueries VALUES(2, 'test2', 8); +INSERT INTO merge_tree_in_subqueries VALUES(3, 'test3', 8); +INSERT INTO merge_tree_in_subqueries VALUES(4, 'test4', 1985); +INSERT INTO merge_tree_in_subqueries VALUES(5, 'test5', 0); + +SET parallel_replicas_only_with_analyzer = 0; -- necessary for CI run with disabled analyzer + +SET max_parallel_replicas=3, cluster_for_parallel_replicas='test_cluster_one_shard_three_replicas_localhost', parallel_replicas_for_non_replicated_merge_tree=1; + +SELECT * FROM merge_tree_in_subqueries WHERE id IN (SELECT * FROM system.numbers LIMIT 0) SETTINGS enable_parallel_replicas=2, parallel_replicas_allow_in_with_subquery=0; -- { serverError SUPPORT_IS_DISABLED } +SELECT * FROM merge_tree_in_subqueries WHERE id IN (SELECT * FROM system.numbers LIMIT 0) SETTINGS enable_parallel_replicas=2, parallel_replicas_allow_in_with_subquery=1; +SELECT * FROM merge_tree_in_subqueries WHERE id IN (SELECT * FROM system.numbers LIMIT 0) SETTINGS enable_parallel_replicas=1; + +SELECT '---'; +SELECT * FROM merge_tree_in_subqueries WHERE id IN (SELECT * FROM system.numbers LIMIT 2, 3) ORDER BY id SETTINGS enable_parallel_replicas=2, parallel_replicas_allow_in_with_subquery=0; -- { serverError SUPPORT_IS_DISABLED }; +SELECT * FROM merge_tree_in_subqueries WHERE id IN (SELECT * FROM system.numbers LIMIT 2, 3) ORDER BY id SETTINGS enable_parallel_replicas=2, parallel_replicas_allow_in_with_subquery=1; +SELECT * FROM merge_tree_in_subqueries WHERE id IN (SELECT * FROM system.numbers LIMIT 2, 3) ORDER BY id SETTINGS enable_parallel_replicas=1; + +SELECT '---'; +SELECT * FROM merge_tree_in_subqueries WHERE id IN (SELECT 1) ORDER BY id SETTINGS enable_parallel_replicas=2, parallel_replicas_allow_in_with_subquery=0; -- { serverError SUPPORT_IS_DISABLED }; +SELECT * FROM merge_tree_in_subqueries WHERE id IN (SELECT 1) ORDER BY id SETTINGS enable_parallel_replicas=2, parallel_replicas_allow_in_with_subquery=1; +SELECT * FROM merge_tree_in_subqueries WHERE id IN (SELECT 1) ORDER BY id SETTINGS enable_parallel_replicas=1; + +-- IN with tuples is allowed +SELECT '---'; +SELECT id, name FROM merge_tree_in_subqueries WHERE (id, name) IN (3, 'test3') SETTINGS enable_parallel_replicas=2, parallel_replicas_allow_in_with_subquery=0; +SELECT id, name FROM merge_tree_in_subqueries WHERE (id, name) IN (3, 'test3') SETTINGS enable_parallel_replicas=2, parallel_replicas_allow_in_with_subquery=1; + +DROP TABLE IF EXISTS merge_tree_in_subqueries; diff --git a/parser/testdata/02949_parallel_replicas_scalar_subquery_big_integer/ast.json b/parser/testdata/02949_parallel_replicas_scalar_subquery_big_integer/ast.json new file mode 100644 index 000000000..e6e355b42 --- /dev/null +++ b/parser/testdata/02949_parallel_replicas_scalar_subquery_big_integer/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00124687, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/02949_parallel_replicas_scalar_subquery_big_integer/metadata.json b/parser/testdata/02949_parallel_replicas_scalar_subquery_big_integer/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02949_parallel_replicas_scalar_subquery_big_integer/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02949_parallel_replicas_scalar_subquery_big_integer/query.sql b/parser/testdata/02949_parallel_replicas_scalar_subquery_big_integer/query.sql new file mode 100644 index 000000000..3b5f6277d --- /dev/null +++ b/parser/testdata/02949_parallel_replicas_scalar_subquery_big_integer/query.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS test; +CREATE TABLE test (x UInt8) ENGINE = MergeTree ORDER BY x; +INSERT INTO test VALUES (1), (2), (3); + +SET enable_parallel_replicas = 1, max_parallel_replicas = 2, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', prefer_localhost_replica = 0, parallel_replicas_for_non_replicated_merge_tree = 1; + +WITH (SELECT '111111111111111111111111111111111111111'::UInt128) AS v SELECT sum(x), max(v) FROM test; + +DROP TABLE test; diff --git a/parser/testdata/02949_ttl_group_by_bug/ast.json b/parser/testdata/02949_ttl_group_by_bug/ast.json new file mode 100644 index 000000000..b81ffa209 --- /dev/null +++ b/parser/testdata/02949_ttl_group_by_bug/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00149461, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02949_ttl_group_by_bug/metadata.json b/parser/testdata/02949_ttl_group_by_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02949_ttl_group_by_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02949_ttl_group_by_bug/query.sql b/parser/testdata/02949_ttl_group_by_bug/query.sql new file mode 100644 index 000000000..83776cc9d --- /dev/null +++ b/parser/testdata/02949_ttl_group_by_bug/query.sql @@ -0,0 +1,31 @@ +SET merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.0; + +DROP TABLE IF EXISTS ttl_group_by_bug; + +CREATE TABLE ttl_group_by_bug +(key UInt32, ts DateTime, value UInt32, min_value UInt32 default value, max_value UInt32 default value) +ENGINE = MergeTree() +ORDER BY (key, toStartOfInterval(ts, toIntervalMinute(3)), ts) +TTL ts + INTERVAL 5 MINUTE GROUP BY key, toStartOfInterval(ts, toIntervalMinute(3)) +SET value = sum(value), min_value = min(min_value), max_value = max(max_value), ts=min(toStartOfInterval(ts, toIntervalMinute(3))); + +INSERT INTO ttl_group_by_bug(key, ts, value) SELECT number%5 as key, now() - interval 10 minute + number, 0 FROM numbers(1000); + +OPTIMIZE TABLE ttl_group_by_bug FINAL; + +SELECT * +FROM +( + SELECT + _part, + rowNumberInAllBlocks(), + (key, toStartOfInterval(ts, toIntervalMinute(3)), ts) AS cur, + lagInFrame((key, toStartOfInterval(ts, toIntervalMinute(3)), ts), 1) OVER () AS prev, + 1 + FROM ttl_group_by_bug +) +WHERE cur < prev +LIMIT 2 +SETTINGS max_threads = 1; + +DROP TABLE IF EXISTS ttl_group_by_bug; diff --git a/parser/testdata/02950_dictionary_short_circuit/ast.json b/parser/testdata/02950_dictionary_short_circuit/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02950_dictionary_short_circuit/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02950_dictionary_short_circuit/metadata.json b/parser/testdata/02950_dictionary_short_circuit/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02950_dictionary_short_circuit/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02950_dictionary_short_circuit/query.sql b/parser/testdata/02950_dictionary_short_circuit/query.sql new file mode 100644 index 000000000..c613709a9 --- /dev/null +++ b/parser/testdata/02950_dictionary_short_circuit/query.sql @@ -0,0 +1,270 @@ +-- Tags: no-parallel + +DROP TABLE IF EXISTS dictionary_source_table; +CREATE TABLE dictionary_source_table +( + id UInt64, + v1 String, + v2 Nullable(String), + v3 Nullable(UInt64) +) ENGINE=TinyLog; + +INSERT INTO dictionary_source_table VALUES (0, 'zero', 'zero', 0), (1, 'one', NULL, 1); + +DROP DICTIONARY IF EXISTS flat_dictionary; +CREATE DICTIONARY flat_dictionary +( + id UInt64, + v1 String, + v2 Nullable(String) DEFAULT NULL, + v3 Nullable(UInt64) +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE 'dictionary_source_table')) +LIFETIME(MIN 0 MAX 0) +LAYOUT(FLAT()); + +SELECT 'Flat dictionary'; +SELECT dictGetOrDefault('flat_dictionary', ('v1', 'v2'), 0, (intDiv(1, id), intDiv(1, id))) +FROM dictionary_source_table; +SELECT dictGetOrDefault('flat_dictionary', 'v2', id+1, intDiv(NULL, id)) +FROM dictionary_source_table; +SELECT dictGetOrDefault('flat_dictionary', 'v3', id+1, intDiv(NULL, id)) +FROM dictionary_source_table; +DROP DICTIONARY flat_dictionary; + + +DROP DICTIONARY IF EXISTS hashed_dictionary; +CREATE DICTIONARY hashed_dictionary +( + id UInt64, + v1 String, + v2 Nullable(String) DEFAULT NULL, + v3 Nullable(UInt64) +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE 'dictionary_source_table')) +LIFETIME(MIN 0 MAX 0) +LAYOUT(HASHED()); + +SELECT 'Hashed dictionary'; +SELECT dictGetOrDefault('hashed_dictionary', ('v1', 'v2'), 0, (intDiv(1, id), intDiv(1, id))) +FROM dictionary_source_table; +SELECT dictGetOrDefault('hashed_dictionary', 'v2', id+1, intDiv(NULL, id)) +FROM dictionary_source_table; +SELECT dictGetOrDefault('hashed_dictionary', 'v3', id+1, intDiv(NULL, id)) +FROM dictionary_source_table; +SELECT dictGetOrDefault('hashed_dictionary', 'v2', 1, intDiv(1, id)) +FROM dictionary_source_table; +DROP DICTIONARY hashed_dictionary; + + +DROP DICTIONARY IF EXISTS hashed_array_dictionary; +CREATE DICTIONARY hashed_array_dictionary +( + id UInt64, + v1 String, + v2 Nullable(String) DEFAULT NULL, + v3 Nullable(UInt64) +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE 'dictionary_source_table')) +LIFETIME(MIN 0 MAX 0) +LAYOUT(HASHED_ARRAY()); + +SELECT 'Hashed array dictionary'; +SELECT dictGetOrDefault('hashed_array_dictionary', ('v1', 'v2'), 0, (intDiv(1, id), intDiv(1, id))) +FROM dictionary_source_table; +SELECT dictGetOrDefault('hashed_array_dictionary', 'v2', id+1, intDiv(NULL, id)) +FROM dictionary_source_table; +SELECT dictGetOrDefault('hashed_array_dictionary', 'v3', id+1, intDiv(NULL, id)) +FROM dictionary_source_table; +-- Fuzzer +SELECT dictGetOrDefault('hashed_array_dictionary', ('v1', 'v2'), toUInt128(0), (materialize(toNullable(NULL)), intDiv(1, id), intDiv(1, id))) FROM dictionary_source_table; -- { serverError TYPE_MISMATCH } +SELECT materialize(materialize(toLowCardinality(15))), dictGetOrDefault('hashed_array_dictionary', ('v1', 'v2'), 0, (intDiv(materialize(NULL), id), intDiv(1, id), intDiv(1, id))) FROM dictionary_source_table; -- { serverError TYPE_MISMATCH } +SELECT dictGetOrDefault('hashed_array_dictionary', ('v1', 'v2'), 0, (toNullable(NULL), intDiv(1, id), intDiv(1, id))) FROM dictionary_source_table; -- { serverError TYPE_MISMATCH } +DROP DICTIONARY hashed_array_dictionary; + + +DROP TABLE IF EXISTS range_dictionary_source_table; +CREATE TABLE range_dictionary_source_table +( + id UInt64, + start Date, + end Nullable(Date), + val Nullable(UInt64) +) ENGINE=TinyLog; + +INSERT INTO range_dictionary_source_table VALUES (0, '2023-01-01', Null, Null), (1, '2022-11-09', '2022-12-08', 1); + +DROP DICTIONARY IF EXISTS range_hashed_dictionary; +CREATE DICTIONARY range_hashed_dictionary +( + id UInt64, + start Date, + end Nullable(Date), + val Nullable(UInt64) +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE 'range_dictionary_source_table')) +LIFETIME(MIN 0 MAX 0) +LAYOUT(RANGE_HASHED()) +RANGE(MIN start MAX end); + +SELECT 'Range hashed dictionary'; +SELECT dictGetOrDefault('range_hashed_dictionary', 'val', id, toDate('2023-01-02'), intDiv(NULL, id)) +FROM range_dictionary_source_table; +DROP DICTIONARY range_hashed_dictionary; +DROP TABLE range_dictionary_source_table; + + +DROP DICTIONARY IF EXISTS cache_dictionary; +CREATE DICTIONARY cache_dictionary +( + id UInt64, + v1 String, + v2 Nullable(String) DEFAULT NULL, + v3 Nullable(UInt64) +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE 'dictionary_source_table')) +LIFETIME(MIN 0 MAX 0) +LAYOUT(CACHE(SIZE_IN_CELLS 10)); + +SELECT 'Cache dictionary'; +SELECT dictGetOrDefault('cache_dictionary', ('v1', 'v2'), 0, (intDiv(1, id), intDiv(1, id))) +FROM dictionary_source_table; +SELECT dictGetOrDefault('cache_dictionary', 'v2', id+1, intDiv(NULL, id)) +FROM dictionary_source_table; +SELECT dictGetOrDefault('cache_dictionary', 'v3', id+1, intDiv(NULL, id)) +FROM dictionary_source_table; +DROP DICTIONARY cache_dictionary; + + +DROP DICTIONARY IF EXISTS direct_dictionary; +CREATE DICTIONARY direct_dictionary +( + id UInt64, + v1 String, + v2 Nullable(String) DEFAULT NULL, + v3 Nullable(UInt64) +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE 'dictionary_source_table')) +LAYOUT(DIRECT()); + +SELECT 'Direct dictionary'; +SELECT dictGetOrDefault('direct_dictionary', ('v1', 'v2'), 0, (intDiv(1, id), intDiv(1, id))) +FROM dictionary_source_table; +SELECT dictGetOrDefault('direct_dictionary', 'v2', id+1, intDiv(NULL, id)) +FROM dictionary_source_table; +SELECT dictGetOrDefault('direct_dictionary', 'v3', id+1, intDiv(NULL, id)) +FROM dictionary_source_table; +DROP DICTIONARY direct_dictionary; + + +DROP TABLE dictionary_source_table; + + +DROP TABLE IF EXISTS ip_dictionary_source_table; +CREATE TABLE ip_dictionary_source_table +( + id UInt64, + prefix String, + asn UInt32, + cca2 String +) ENGINE=TinyLog; + +INSERT INTO ip_dictionary_source_table VALUES (0, '202.79.32.0/20', 17501, 'NP'), (1, '2620:0:870::/48', 3856, 'US'), (2, '2a02:6b8:1::/48', 13238, 'RU'); + +DROP DICTIONARY IF EXISTS ip_dictionary; +CREATE DICTIONARY ip_dictionary +( + id UInt64, + prefix String, + asn UInt32, + cca2 String +) +PRIMARY KEY prefix +SOURCE(CLICKHOUSE(TABLE 'ip_dictionary_source_table')) +LAYOUT(IP_TRIE) +LIFETIME(3600); + +SELECT 'IP TRIE dictionary'; +SELECT dictGetOrDefault('ip_dictionary', 'cca2', toIPv4('202.79.32.10'), intDiv(0, id)) +FROM ip_dictionary_source_table; +SELECT dictGetOrDefault('ip_dictionary', ('asn', 'cca2'), IPv6StringToNum('2a02:6b8:1::1'), +(intDiv(1, id), intDiv(1, id))) FROM ip_dictionary_source_table; +DROP DICTIONARY ip_dictionary; + + +DROP TABLE IF EXISTS polygon_dictionary_source_table; +CREATE TABLE polygon_dictionary_source_table +( + key Array(Array(Array(Tuple(Float64, Float64)))), + name Nullable(String) +) ENGINE=TinyLog; + +INSERT INTO polygon_dictionary_source_table VALUES([[[(3, 1), (0, 1), (0, -1), (3, -1)]]], 'East'), ([[[(-3, 1), (-3, -1), (0, -1), (0, 1)]]], 'West'); + +DROP DICTIONARY IF EXISTS polygon_dictionary; +CREATE DICTIONARY polygon_dictionary +( + key Array(Array(Array(Tuple(Float64, Float64)))), + name Nullable(String) +) +PRIMARY KEY key +SOURCE(CLICKHOUSE(TABLE 'polygon_dictionary_source_table')) +LIFETIME(0) +LAYOUT(POLYGON()); + +DROP TABLE IF EXISTS points; +CREATE TABLE points (x Float64, y Float64) ENGINE=TinyLog; +INSERT INTO points VALUES (0.5, 0), (-0.5, 0), (10,10); + +SELECT 'POLYGON dictionary'; +SELECT tuple(x, y) as key, dictGetOrDefault('polygon_dictionary', 'name', key, intDiv(1, y)) +FROM points; + +DROP TABLE points; +DROP DICTIONARY polygon_dictionary; +DROP TABLE polygon_dictionary_source_table; + + +DROP TABLE IF EXISTS regexp_dictionary_source_table; +CREATE TABLE regexp_dictionary_source_table +( + id UInt64, + parent_id UInt64, + regexp String, + keys Array(String), + values Array(String), +) ENGINE=TinyLog; + +INSERT INTO regexp_dictionary_source_table VALUES (1, 0, 'Linux/(\d+[\.\d]*).+tlinux', ['name', 'version'], ['TencentOS', '\1']); +INSERT INTO regexp_dictionary_source_table VALUES (2, 0, '(\d+)/tclwebkit(\d+[\.\d]*)', ['name', 'version', 'comment'], ['Android', '$1', 'test $1 and $2']); +INSERT INTO regexp_dictionary_source_table VALUES (3, 2, '33/tclwebkit', ['version'], ['13']); +INSERT INTO regexp_dictionary_source_table VALUES (4, 2, '3[12]/tclwebkit', ['version'], ['12']); +INSERT INTO regexp_dictionary_source_table VALUES (5, 2, '3[12]/tclwebkit', ['version'], ['11']); +INSERT INTO regexp_dictionary_source_table VALUES (6, 2, '3[12]/tclwebkit', ['version'], ['10']); + +DROP DICTIONARY IF EXISTS regexp_dict; +create dictionary regexp_dict +( + regexp String, + name String, + version Nullable(UInt64), + comment String default 'nothing' +) +PRIMARY KEY(regexp) +SOURCE(CLICKHOUSE(TABLE 'regexp_dictionary_source_table')) +LIFETIME(0) +LAYOUT(regexp_tree); + +SELECT 'Regular Expression Tree dictionary'; +SELECT dictGetOrDefault('regexp_dict', 'name', concat(toString(number), '/tclwebkit', toString(number)), +intDiv(1,number)) FROM numbers(2); +-- Fuzzer +SELECT dictGetOrDefault('regexp_dict', 'name', concat('/tclwebkit', toString(number)), intDiv(1, number)) FROM numbers(2); -- { serverError ILLEGAL_DIVISION } +DROP DICTIONARY regexp_dict; +DROP TABLE regexp_dictionary_source_table; diff --git a/parser/testdata/02950_parallel_replicas_used_count/ast.json b/parser/testdata/02950_parallel_replicas_used_count/ast.json new file mode 100644 index 000000000..a1b291bf7 --- /dev/null +++ b/parser/testdata/02950_parallel_replicas_used_count/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001064135, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/02950_parallel_replicas_used_count/metadata.json b/parser/testdata/02950_parallel_replicas_used_count/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02950_parallel_replicas_used_count/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02950_parallel_replicas_used_count/query.sql b/parser/testdata/02950_parallel_replicas_used_count/query.sql new file mode 100644 index 000000000..8bdf40076 --- /dev/null +++ b/parser/testdata/02950_parallel_replicas_used_count/query.sql @@ -0,0 +1,33 @@ +DROP TABLE IF EXISTS test; + +CREATE TABLE test (k UInt64, v String) +ENGINE = MergeTree +ORDER BY k +SETTINGS index_granularity=1; + +INSERT INTO test SELECT number, toString(number) FROM numbers(10_000); + +SET parallel_replicas_only_with_analyzer = 0; -- necessary for CI run with disabled analyzer + +SET enable_parallel_replicas = 2, max_parallel_replicas = 3, parallel_replicas_for_non_replicated_merge_tree=1, cluster_for_parallel_replicas='test_cluster_one_shard_three_replicas_localhost'; +-- default coordinator +SELECT count(), sum(k) +FROM test +SETTINGS log_comment = '02950_parallel_replicas_used_replicas_count'; + +SYSTEM FLUSH LOGS query_log; +SELECT ProfileEvents['ParallelReplicasUsedCount'] > 0 FROM system.query_log WHERE type = 'QueryFinish' AND query_id IN (SELECT query_id FROM system.query_log WHERE current_database = currentDatabase() AND log_comment = '02950_parallel_replicas_used_replicas_count' AND type = 'QueryFinish' AND initial_query_id = query_id) SETTINGS enable_parallel_replicas=0; + +-- In order coordinator +SELECT k FROM test order by k limit 5 offset 89 SETTINGS optimize_read_in_order=1, log_comment='02950_parallel_replicas_used_replicas_count_2', merge_tree_min_rows_for_concurrent_read=1, max_threads=1; + +SYSTEM FLUSH LOGS query_log; +SELECT ProfileEvents['ParallelReplicasUsedCount'] > 0 FROM system.query_log WHERE type = 'QueryFinish' AND query_id IN (SELECT query_id FROM system.query_log WHERE current_database = currentDatabase() AND log_comment = '02950_parallel_replicas_used_replicas_count_2' AND type = 'QueryFinish' AND initial_query_id = query_id) SETTINGS enable_parallel_replicas=0; + +-- In reverse order coordinator +SELECT k FROM test order by k desc limit 5 offset 9906 SETTINGS optimize_read_in_order=1, log_comment='02950_parallel_replicas_used_replicas_count_3', merge_tree_min_rows_for_concurrent_read=1, max_threads=1; + +SYSTEM FLUSH LOGS query_log; +SELECT ProfileEvents['ParallelReplicasUsedCount'] > 0 FROM system.query_log WHERE type = 'QueryFinish' AND query_id IN (SELECT query_id FROM system.query_log WHERE current_database = currentDatabase() AND log_comment = '02950_parallel_replicas_used_replicas_count_3' AND type = 'QueryFinish' AND initial_query_id = query_id) SETTINGS enable_parallel_replicas=0; + +DROP TABLE test; diff --git a/parser/testdata/02950_part_log_bytes_uncompressed/ast.json b/parser/testdata/02950_part_log_bytes_uncompressed/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02950_part_log_bytes_uncompressed/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02950_part_log_bytes_uncompressed/metadata.json b/parser/testdata/02950_part_log_bytes_uncompressed/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02950_part_log_bytes_uncompressed/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02950_part_log_bytes_uncompressed/query.sql b/parser/testdata/02950_part_log_bytes_uncompressed/query.sql new file mode 100644 index 000000000..a520e4a6a --- /dev/null +++ b/parser/testdata/02950_part_log_bytes_uncompressed/query.sql @@ -0,0 +1,29 @@ +-- Tags: no-random-merge-tree-settings, no-random-settings +-- Because we compare part sizes, and they could be affected by index granularity and index compression settings. + +CREATE TABLE part_log_bytes_uncompressed ( + key UInt8, + value UInt8 +) +Engine=MergeTree() +ORDER BY key; + +INSERT INTO part_log_bytes_uncompressed SELECT 1, 1 FROM numbers(1000); +INSERT INTO part_log_bytes_uncompressed SELECT 2, 1 FROM numbers(1000); + +OPTIMIZE TABLE part_log_bytes_uncompressed FINAL; + +ALTER TABLE part_log_bytes_uncompressed UPDATE value = 3 WHERE 1 = 1 SETTINGS mutations_sync=2; + +INSERT INTO part_log_bytes_uncompressed SELECT 3, 1 FROM numbers(1000); +ALTER TABLE part_log_bytes_uncompressed DROP PART 'all_4_4_0' SETTINGS mutations_sync=2; + +SYSTEM FLUSH LOGS part_log; + +SELECT event_type, table, part_name, bytes_uncompressed > 0, (bytes_uncompressed > 0 ? (size_in_bytes < bytes_uncompressed ? '1' : toString((size_in_bytes, bytes_uncompressed))) : '0') +FROM system.part_log +WHERE event_date >= yesterday() AND database = currentDatabase() AND table = 'part_log_bytes_uncompressed' + AND (event_type != 'RemovePart' OR part_name = 'all_4_4_0') -- ignore removal of other parts +ORDER BY part_name, event_type; + +DROP TABLE part_log_bytes_uncompressed; diff --git a/parser/testdata/02950_part_offset_as_primary_key/ast.json b/parser/testdata/02950_part_offset_as_primary_key/ast.json new file mode 100644 index 000000000..0120aa17a --- /dev/null +++ b/parser/testdata/02950_part_offset_as_primary_key/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001224389, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02950_part_offset_as_primary_key/metadata.json b/parser/testdata/02950_part_offset_as_primary_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02950_part_offset_as_primary_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02950_part_offset_as_primary_key/query.sql b/parser/testdata/02950_part_offset_as_primary_key/query.sql new file mode 100644 index 000000000..bd19e8e36 --- /dev/null +++ b/parser/testdata/02950_part_offset_as_primary_key/query.sql @@ -0,0 +1,45 @@ +SET merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.0; + +drop table if exists a; + +create table a (i int) engine MergeTree order by i settings index_granularity = 2; +insert into a select -number from numbers(5); + +-- Prevent remote replicas from skipping index analysis in Parallel Replicas. Otherwise, they may return full ranges and trigger max_rows_to_read validation failures. +SET parallel_replicas_index_analysis_only_on_coordinator = 0; + +-- nothing to read +select i from a where _part_offset >= 5 order by i settings max_bytes_to_read = 1; + +-- one granule +select i from a where _part_offset = 0 order by i settings max_rows_to_read = 2; +select i from a where _part_offset = 1 order by i settings max_rows_to_read = 2; +select i from a where _part_offset = 2 order by i settings max_rows_to_read = 2; +select i from a where _part_offset = 3 order by i settings max_rows_to_read = 2; +select i from a where _part_offset = 4 order by i settings max_rows_to_read = 1; + +-- other predicates +select i from a where _part_offset in (1, 4) order by i settings max_rows_to_read = 3; +select i from a where _part_offset not in (1, 4) order by i settings max_rows_to_read = 4; + +-- the force_primary_key check still works +select i from a where _part_offset = 4 order by i settings force_primary_key = 1; -- { serverError INDEX_NOT_USED } + +-- combining with other primary keys doesn't work (makes no sense) +select i from a where i = -3 or _part_offset = 4 order by i settings force_primary_key = 1; -- { serverError INDEX_NOT_USED } + +drop table a; + +drop table if exists b; + +create table b (i int) engine MergeTree order by tuple() settings index_granularity = 2; + +-- all_1_1_0 +insert into b select number * 10 from numbers(5); +-- all_2_2_0 +insert into b select number * 100 from numbers(5); + +-- multiple parts with _part predicate +select i from b where (_part = 'all_1_1_0' and _part_offset in (1, 4)) or (_part = 'all_2_2_0' and _part_offset in (0, 4)) order by i settings max_rows_to_read = 6; + +drop table b; diff --git a/parser/testdata/02950_reading_array_tuple_subcolumns/ast.json b/parser/testdata/02950_reading_array_tuple_subcolumns/ast.json new file mode 100644 index 000000000..9c3fd299f --- /dev/null +++ b/parser/testdata/02950_reading_array_tuple_subcolumns/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001047174, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/02950_reading_array_tuple_subcolumns/metadata.json b/parser/testdata/02950_reading_array_tuple_subcolumns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02950_reading_array_tuple_subcolumns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02950_reading_array_tuple_subcolumns/query.sql b/parser/testdata/02950_reading_array_tuple_subcolumns/query.sql new file mode 100644 index 000000000..85bf16a88 --- /dev/null +++ b/parser/testdata/02950_reading_array_tuple_subcolumns/query.sql @@ -0,0 +1,15 @@ +DROP TABLE IF EXISTS test; +CREATE TABLE test +( + `id` UInt64, + `t` Tuple(a UInt64, b Array(Tuple(c UInt64, d UInt64))) +) +ENGINE = MergeTree +ORDER BY id +SETTINGS min_rows_for_wide_part = 1, min_bytes_for_wide_part = 1, index_granularity = 8192; +INSERT INTO test SELECT number, tuple(number, arrayMap(x -> tuple(number + 1, number + 2), range(number % 10))) FROM numbers(100000); +INSERT INTO test SELECT number, tuple(number, arrayMap(x -> tuple(number + 1, number + 2), range(number % 10))) FROM numbers(100000); +INSERT INTO test SELECT number, tuple(number, arrayMap(x -> tuple(number + 1, number + 2), range(number % 10))) FROM numbers(100000); +SELECT t.b, t.b.c FROM test ORDER BY id FORMAT Null; +DROP TABLE test; + diff --git a/parser/testdata/02952_conjunction_optimization/ast.json b/parser/testdata/02952_conjunction_optimization/ast.json new file mode 100644 index 000000000..1d0901a59 --- /dev/null +++ b/parser/testdata/02952_conjunction_optimization/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001154946, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02952_conjunction_optimization/metadata.json b/parser/testdata/02952_conjunction_optimization/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02952_conjunction_optimization/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02952_conjunction_optimization/query.sql b/parser/testdata/02952_conjunction_optimization/query.sql new file mode 100644 index 000000000..e1904c5d1 --- /dev/null +++ b/parser/testdata/02952_conjunction_optimization/query.sql @@ -0,0 +1,28 @@ +SET enable_analyzer = 1; + +SET optimize_empty_string_comparisons = 0; + +DROP TABLE IF EXISTS 02952_disjunction_optimization; + +CREATE TABLE 02952_disjunction_optimization +(a Int32, b String) +ENGINE=Memory; + +INSERT INTO 02952_disjunction_optimization VALUES (1, 'test'), (2, 'test2'), (3, 'another'), (3, ''), (4, ''); + +SELECT * FROM 02952_disjunction_optimization WHERE a <> 1 AND a <> 2 AND a <> 4; +EXPLAIN QUERY TREE SELECT * FROM 02952_disjunction_optimization WHERE a <> 1 AND a <> 2 AND a <> 4; + +SELECT * FROM 02952_disjunction_optimization WHERE a <> 1 AND a <> 2 AND a <> 4 AND true; +EXPLAIN QUERY TREE SELECT * FROM 02952_disjunction_optimization WHERE a <> 1 AND a <> 2 AND a <> 4 AND true; + +SELECT * FROM 02952_disjunction_optimization WHERE a <> 1 AND a <> 2 AND a <> 4 AND b <> ''; +EXPLAIN QUERY TREE SELECT * FROM 02952_disjunction_optimization WHERE a <> 1 AND a <> 2 AND a <> 4 AND b <> ''; + +SELECT * FROM 02952_disjunction_optimization WHERE a <> 1 AND a <> 2 AND b = '' AND a <> 4; +EXPLAIN QUERY TREE SELECT * FROM 02952_disjunction_optimization WHERE a <> 1 AND a <> 2 AND b = '' AND a <> 4; + +SELECT * FROM 02952_disjunction_optimization WHERE (a <> 1 AND a <> 2 AND a <> 4) OR b = ''; +EXPLAIN QUERY TREE SELECT * FROM 02952_disjunction_optimization WHERE (a <> 1 AND a <> 2 AND a <> 4) OR b = ''; + +DROP TABLE 02952_disjunction_optimization; diff --git a/parser/testdata/02953_slow_create_view/ast.json b/parser/testdata/02953_slow_create_view/ast.json new file mode 100644 index 000000000..ea676c84f --- /dev/null +++ b/parser/testdata/02953_slow_create_view/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery slow_view1 (children 1)" + }, + { + "explain": " Identifier slow_view1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001040693, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/02953_slow_create_view/metadata.json b/parser/testdata/02953_slow_create_view/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02953_slow_create_view/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02953_slow_create_view/query.sql b/parser/testdata/02953_slow_create_view/query.sql new file mode 100644 index 000000000..7824bd97b --- /dev/null +++ b/parser/testdata/02953_slow_create_view/query.sql @@ -0,0 +1,44 @@ +drop view if exists slow_view1; + +create view slow_view1 as +with c1 as (select 1 as a), + c2 as (select a from c1), + c3 as (select a from c2), + c4 as (select a from c3), + c5 as (select a from c4), + c6 as (select a from c5), + c7 as (select a from c6), + c8 as (select a from c7), + c9 as (select a from c8), + c10 as (select a from c9), + c11 as (select a from c10), + c12 as (select a from c11), + c13 as (select a from c12), + c14 as (select a from c13), + c15 as (select a from c14), + c16 as (select a from c15), + c17 as (select a from c16), + c18 as (select a from c17), + c19 as (select a from c18), + c20 as (select a from c19), + c21 as (select a from c20), + c22 as (select a from c21), + c23 as (select a from c22), + c24 as (select a from c23), + c25 as (select a from c24), + c26 as (select a from c25), + c27 as (select a from c26), + c28 as (select a from c27), + c29 as (select a from c28), + c30 as (select a from c29), + c31 as (select a from c30), + c32 as (select a from c31), + c33 as (select a from c32), + c34 as (select a from c33), + c35 as (select a from c34), + c36 as (select a from c35), + c37 as (select a from c36), + c38 as (select a from c37), + c39 as (select a from c38), + c40 as (select a from c39) +select a from c21; diff --git a/parser/testdata/02954_analyzer_fuzz_i57086/ast.json b/parser/testdata/02954_analyzer_fuzz_i57086/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02954_analyzer_fuzz_i57086/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02954_analyzer_fuzz_i57086/metadata.json b/parser/testdata/02954_analyzer_fuzz_i57086/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02954_analyzer_fuzz_i57086/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02954_analyzer_fuzz_i57086/query.sql b/parser/testdata/02954_analyzer_fuzz_i57086/query.sql new file mode 100644 index 000000000..886944e30 --- /dev/null +++ b/parser/testdata/02954_analyzer_fuzz_i57086/query.sql @@ -0,0 +1,15 @@ +--https://github.com/ClickHouse/ClickHouse/issues/57086 +SELECT + 'limit w/ GROUP BY', + count(NULL), + number +FROM remote('127.{1,2}', view( + SELECT intDiv(number, 2147483647) AS number + FROM numbers(10) + )) +GROUP BY number +WITH ROLLUP +ORDER BY + count() ASC, + number DESC NULLS LAST + SETTINGS limit = 2, enable_analyzer = 1; diff --git a/parser/testdata/02955_avro_format_zstd_encode_support/ast.json b/parser/testdata/02955_avro_format_zstd_encode_support/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02955_avro_format_zstd_encode_support/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02955_avro_format_zstd_encode_support/metadata.json b/parser/testdata/02955_avro_format_zstd_encode_support/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02955_avro_format_zstd_encode_support/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02955_avro_format_zstd_encode_support/query.sql b/parser/testdata/02955_avro_format_zstd_encode_support/query.sql new file mode 100644 index 000000000..b88e1dbcc --- /dev/null +++ b/parser/testdata/02955_avro_format_zstd_encode_support/query.sql @@ -0,0 +1,16 @@ +-- Tags: no-fasttest +DROP TABLE IF EXISTS t; +CREATE TABLE t +( + `n1` Int32 +) +ENGINE = File(Avro) +SETTINGS output_format_avro_codec = 'zstd'; + +INSERT INTO t SELECT * +FROM numbers(10); + +SELECT sum(n1) +FROM t; + +DROP TABLE t; diff --git a/parser/testdata/02955_sparkBar_alias_sparkbar/ast.json b/parser/testdata/02955_sparkBar_alias_sparkbar/ast.json new file mode 100644 index 000000000..248575e33 --- /dev/null +++ b/parser/testdata/02955_sparkBar_alias_sparkbar/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001307692, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02955_sparkBar_alias_sparkbar/metadata.json b/parser/testdata/02955_sparkBar_alias_sparkbar/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02955_sparkBar_alias_sparkbar/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02955_sparkBar_alias_sparkbar/query.sql b/parser/testdata/02955_sparkBar_alias_sparkbar/query.sql new file mode 100644 index 000000000..0f658379f --- /dev/null +++ b/parser/testdata/02955_sparkBar_alias_sparkbar/query.sql @@ -0,0 +1,11 @@ +SET enable_analyzer = 1; +DROP TABLE IF EXISTS spark_bar_test; + +CREATE TABLE spark_bar_test (`value` Int64, `event_date` Date) ENGINE = MergeTree ORDER BY event_date; + +INSERT INTO spark_bar_test VALUES (1,'2020-01-01'), (3,'2020-01-02'), (4,'2020-01-02'), (-3,'2020-01-02'), (5,'2020-01-03'), (2,'2020-01-04'), (3,'2020-01-05'), (7,'2020-01-06'), (6,'2020-01-07'), (8,'2020-01-08'), (2,'2020-01-11'); + +SELECT sparkbar(9)(event_date,cnt) FROM (SELECT sum(value) as cnt, event_date FROM spark_bar_test GROUP BY event_date); +SELECT sparkBar(9)(event_date,cnt) FROM (SELECT sum(value) as cnt, event_date FROM spark_bar_test GROUP BY event_date); + +DROP TABLE IF EXISTS spark_bar_test; diff --git a/parser/testdata/02956_fix_to_start_of_milli_microsecond/ast.json b/parser/testdata/02956_fix_to_start_of_milli_microsecond/ast.json new file mode 100644 index 000000000..4bc4ec2ee --- /dev/null +++ b/parser/testdata/02956_fix_to_start_of_milli_microsecond/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toStartOfInterval (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDateTime64 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '2023-10-09 10:11:12.000999'" + }, + { + "explain": " Literal UInt64_6" + }, + { + "explain": " Function toIntervalMillisecond (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001143385, + "rows_read": 13, + "bytes_read": 545 + } +} diff --git a/parser/testdata/02956_fix_to_start_of_milli_microsecond/metadata.json b/parser/testdata/02956_fix_to_start_of_milli_microsecond/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02956_fix_to_start_of_milli_microsecond/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02956_fix_to_start_of_milli_microsecond/query.sql b/parser/testdata/02956_fix_to_start_of_milli_microsecond/query.sql new file mode 100644 index 000000000..15753d453 --- /dev/null +++ b/parser/testdata/02956_fix_to_start_of_milli_microsecond/query.sql @@ -0,0 +1,7 @@ +SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.000999', 6), toIntervalMillisecond(1)); +SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.000500', 6), toIntervalMillisecond(1)); +SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.000499', 6), toIntervalMillisecond(1)); +SELECT toStartOfInterval(toDateTime64('2023-10-09 10:11:12.000999', 6), toIntervalMillisecond(10)); +select toStartOfInterval(toDateTime64('2023-10-09 00:01:34', 9), toIntervalMicrosecond(100000000)); +select toStartOfInterval(toDateTime64('2023-10-09 00:01:34', 9), toIntervalMillisecond(100000)); +select toStartOfInterval(toDateTime64('2023-10-09 00:01:34', 9), toIntervalSecond(100)); \ No newline at end of file diff --git a/parser/testdata/02956_format_constexpr/ast.json b/parser/testdata/02956_format_constexpr/ast.json new file mode 100644 index 000000000..4aa2702dd --- /dev/null +++ b/parser/testdata/02956_format_constexpr/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function isConstant (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function format (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '{}, world'" + }, + { + "explain": " Literal 'Hello'" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.00108324, + "rows_read": 10, + "bytes_read": 383 + } +} diff --git a/parser/testdata/02956_format_constexpr/metadata.json b/parser/testdata/02956_format_constexpr/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02956_format_constexpr/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02956_format_constexpr/query.sql b/parser/testdata/02956_format_constexpr/query.sql new file mode 100644 index 000000000..32c614363 --- /dev/null +++ b/parser/testdata/02956_format_constexpr/query.sql @@ -0,0 +1 @@ +SELECT isConstant(format('{}, world', 'Hello')); diff --git a/parser/testdata/02956_rocksdb_with_ttl/ast.json b/parser/testdata/02956_rocksdb_with_ttl/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02956_rocksdb_with_ttl/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02956_rocksdb_with_ttl/metadata.json b/parser/testdata/02956_rocksdb_with_ttl/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02956_rocksdb_with_ttl/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02956_rocksdb_with_ttl/query.sql b/parser/testdata/02956_rocksdb_with_ttl/query.sql new file mode 100644 index 000000000..01efe19cf --- /dev/null +++ b/parser/testdata/02956_rocksdb_with_ttl/query.sql @@ -0,0 +1,13 @@ +-- Tags: no-ordinary-database, use-rocksdb + +-- TTL = 2s +CREATE TABLE dict_with_ttl (key UInt64, value String) ENGINE = EmbeddedRocksDB(2) PRIMARY KEY (key); +INSERT INTO dict_with_ttl VALUES (0, 'foo'); +-- Data inserted correctly +SELECT * FROM dict_with_ttl; +-- If possible, we should test that even we execute OPTIMIZE TABLE, the data is still there if TTL is not expired yet +-- Nevertheless, query time is unpredictable with different builds, so we can't test it. So we only test that after 3s +-- we execute OPTIMIZE and the data should be gone. +SELECT sleep(3); +OPTIMIZE TABLE dict_with_ttl; +SELECT * FROM dict_with_ttl; diff --git a/parser/testdata/02958_transform_enum/ast.json b/parser/testdata/02958_transform_enum/ast.json new file mode 100644 index 000000000..14af59e53 --- /dev/null +++ b/parser/testdata/02958_transform_enum/ast.json @@ -0,0 +1,76 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function arrayJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_['Hello', 'world']" + }, + { + "explain": " Literal 'Enum(\\'Hello\\', \\'world\\')'" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function transform (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal Array_['Hello', 'world']" + }, + { + "explain": " Literal Array_[UInt64_123, UInt64_456]" + }, + { + "explain": " Literal UInt64_0" + } + ], + + "rows": 18, + + "statistics": + { + "elapsed": 0.00149965, + "rows_read": 18, + "bytes_read": 721 + } +} diff --git a/parser/testdata/02958_transform_enum/metadata.json b/parser/testdata/02958_transform_enum/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02958_transform_enum/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02958_transform_enum/query.sql b/parser/testdata/02958_transform_enum/query.sql new file mode 100644 index 000000000..3b0fd40a2 --- /dev/null +++ b/parser/testdata/02958_transform_enum/query.sql @@ -0,0 +1,3 @@ +WITH arrayJoin(['Hello', 'world'])::Enum('Hello', 'world') AS x SELECT x, transform(x, ['Hello', 'world'], [123, 456], 0); +WITH arrayJoin(['Hello', 'world'])::Enum('Hello', 'world') AS x SELECT x, transform(x, ['Hello', 'world', 'goodbye'], [123, 456], 0); -- { serverError UNKNOWN_ELEMENT_OF_ENUM } +WITH arrayJoin(['Hello', 'world'])::Enum('Hello', 'world') AS x SELECT x, transform(x, ['Hello', 'world'], ['test', 'best']::Array(Enum('test' = 123, 'best' = 456, '' = 0)), ''::Enum('test' = 123, 'best' = 456, '' = 0)) AS y; diff --git a/parser/testdata/02959_system_database_engines/ast.json b/parser/testdata/02959_system_database_engines/ast.json new file mode 100644 index 000000000..a0cc9d761 --- /dev/null +++ b/parser/testdata/02959_system_database_engines/ast.json @@ -0,0 +1,70 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.database_engines" + }, + { + "explain": " Function in (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier name" + }, + { + "explain": " Literal Tuple_('Atomic', 'Lazy', 'Ordinary')" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier name" + } + ], + + "rows": 16, + + "statistics": + { + "elapsed": 0.001157845, + "rows_read": 16, + "bytes_read": 621 + } +} diff --git a/parser/testdata/02959_system_database_engines/metadata.json b/parser/testdata/02959_system_database_engines/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02959_system_database_engines/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02959_system_database_engines/query.sql b/parser/testdata/02959_system_database_engines/query.sql new file mode 100644 index 000000000..67cb20f04 --- /dev/null +++ b/parser/testdata/02959_system_database_engines/query.sql @@ -0,0 +1 @@ +SELECT * FROM system.database_engines WHERE name IN ('Atomic', 'Lazy', 'Ordinary') ORDER BY name; diff --git a/parser/testdata/02960_alter_table_part_query_parameter/ast.json b/parser/testdata/02960_alter_table_part_query_parameter/ast.json new file mode 100644 index 000000000..f771215c1 --- /dev/null +++ b/parser/testdata/02960_alter_table_part_query_parameter/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery data (children 1)" + }, + { + "explain": " Identifier data" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000985986, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/02960_alter_table_part_query_parameter/metadata.json b/parser/testdata/02960_alter_table_part_query_parameter/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02960_alter_table_part_query_parameter/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02960_alter_table_part_query_parameter/query.sql b/parser/testdata/02960_alter_table_part_query_parameter/query.sql new file mode 100644 index 000000000..31d3ae102 --- /dev/null +++ b/parser/testdata/02960_alter_table_part_query_parameter/query.sql @@ -0,0 +1,15 @@ +drop table if exists data; +create table data (key Int) engine=MergeTree() order by key; + +insert into data values (1); + +set param_part='all_1_1_0'; +alter table data detach part {part:String}; +alter table data attach part {part:String}; +set param_part='all_2_2_0'; +alter table data detach part {part:String}; +alter table data drop detached part {part:String} settings allow_drop_detached=1; + +insert into data values (2); +set param_part='all_3_3_0'; +alter table data drop part {part:String}; diff --git a/parser/testdata/02960_partition_by_udf/ast.json b/parser/testdata/02960_partition_by_udf/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02960_partition_by_udf/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02960_partition_by_udf/metadata.json b/parser/testdata/02960_partition_by_udf/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02960_partition_by_udf/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02960_partition_by_udf/query.sql b/parser/testdata/02960_partition_by_udf/query.sql new file mode 100644 index 000000000..3a5b74916 --- /dev/null +++ b/parser/testdata/02960_partition_by_udf/query.sql @@ -0,0 +1,19 @@ +-- Tags: no-parallel + +DROP FUNCTION IF EXISTS f1; +CREATE FUNCTION f1 AS (x) -> x; + +CREATE TABLE hit +( + `UserID` UInt32, + `URL` String, + `EventTime` DateTime +) +ENGINE = MergeTree +partition by f1(URL) +ORDER BY (EventTime); + +INSERT INTO hit SELECT * FROM generateRandom() LIMIT 10; +SELECT count() FROM hit; + +DROP TABLE hit; diff --git a/parser/testdata/02960_validate_database_engines/ast.json b/parser/testdata/02960_validate_database_engines/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02960_validate_database_engines/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02960_validate_database_engines/metadata.json b/parser/testdata/02960_validate_database_engines/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02960_validate_database_engines/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02960_validate_database_engines/query.sql b/parser/testdata/02960_validate_database_engines/query.sql new file mode 100644 index 000000000..5d39a7686 --- /dev/null +++ b/parser/testdata/02960_validate_database_engines/query.sql @@ -0,0 +1,14 @@ +-- Tags: no-parallel + +DROP DATABASE IF EXISTS test2960_valid_database_engine; + +-- create database with valid engine. Should succeed. +CREATE DATABASE test2960_valid_database_engine ENGINE = Atomic; + +-- create database with valid engine but arguments are not allowed. Should fail. +CREATE DATABASE test2960_database_engine_args_not_allowed ENGINE = Atomic('foo', 'bar'); -- { serverError BAD_ARGUMENTS } + +-- create database with an invalid engine. Should fail. +CREATE DATABASE test2960_invalid_database_engine ENGINE = Foo; -- { serverError UNKNOWN_DATABASE_ENGINE } + +DROP DATABASE IF EXISTS test2960_valid_database_engine; diff --git a/parser/testdata/02961_analyzer_low_cardinality_fuzzer/ast.json b/parser/testdata/02961_analyzer_low_cardinality_fuzzer/ast.json new file mode 100644 index 000000000..e5bac6346 --- /dev/null +++ b/parser/testdata/02961_analyzer_low_cardinality_fuzzer/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001128839, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02961_analyzer_low_cardinality_fuzzer/metadata.json b/parser/testdata/02961_analyzer_low_cardinality_fuzzer/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02961_analyzer_low_cardinality_fuzzer/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02961_analyzer_low_cardinality_fuzzer/query.sql b/parser/testdata/02961_analyzer_low_cardinality_fuzzer/query.sql new file mode 100644 index 000000000..8836d10b8 --- /dev/null +++ b/parser/testdata/02961_analyzer_low_cardinality_fuzzer/query.sql @@ -0,0 +1,19 @@ +set allow_suspicious_low_cardinality_types = true; + +CREATE TABLE test_tuple_filter__fuzz_2 +( + `id` Nullable(UInt32), + `value` LowCardinality(String), + `log_date` LowCardinality(Date) +) +ENGINE = MergeTree +PARTITION BY log_date +ORDER BY id +SETTINGS allow_nullable_key = 1; + +INSERT INTO test_tuple_filter__fuzz_2 SELECT number, toString(number), toDate('2024-01-01') + number FROM numbers(10); + +SELECT + (tuple(log_date) = tuple('2021-01-01'), log_date) +FROM test_tuple_filter__fuzz_2 +ORDER BY log_date; diff --git a/parser/testdata/02961_drop_tables/ast.json b/parser/testdata/02961_drop_tables/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02961_drop_tables/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02961_drop_tables/metadata.json b/parser/testdata/02961_drop_tables/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02961_drop_tables/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02961_drop_tables/query.sql b/parser/testdata/02961_drop_tables/query.sql new file mode 100644 index 000000000..f84fffbef --- /dev/null +++ b/parser/testdata/02961_drop_tables/query.sql @@ -0,0 +1,46 @@ +-- Tags: no-parallel +DROP DATABASE IF EXISTS 02961_db1; +CREATE DATABASE IF NOT EXISTS 02961_db1; +DROP DATABASE IF EXISTS 02961_db2; +CREATE DATABASE IF NOT EXISTS 02961_db2; + + +CREATE TABLE IF NOT EXISTS 02961_db1.02961_tb1 (id UInt32) Engine=Memory(); +CREATE TABLE IF NOT EXISTS 02961_db1.02961_tb2 (id UInt32) Engine=Memory(); + +CREATE TABLE IF NOT EXISTS 02961_db2.02961_tb3 (id UInt32) Engine=Memory(); +CREATE TABLE IF NOT EXISTS 02961_db2.02961_tb4 (id UInt32) Engine=Memory(); +CREATE TABLE IF NOT EXISTS 02961_db2.02961_tb5 (id UInt32) Engine=Memory(); + +DROP TABLE 02961_db1.02961_tb1, 02961_db1.02961_tb2, 02961_db2.02961_tb3; + +SELECT '-- check which tables exist in 02961_db1'; +SHOW TABLES FROM 02961_db1; +SELECT '-- check which tables exist in 02961_db2'; +SHOW TABLES FROM 02961_db2; + +SELECT 'Test when deletion of existing table fails'; +DROP TABLE 02961_db2.02961_tb4, 02961_db1.02961_tb1, 02961_db2.02961_tb5; -- { serverError UNKNOWN_TABLE } + +SELECT '-- check which tables exist in 02961_db1'; +SHOW TABLES FROM 02961_db1; +SELECT '-- check which tables exist in 02961_db2'; +SHOW TABLES FROM 02961_db2; + +DROP TABLE IF EXISTS tab1, tab2, tab3; +CREATE TABLE IF NOT EXISTS tab1 (id UInt32) Engine=Memory(); +CREATE TABLE IF NOT EXISTS tab2 (id UInt32) Engine=Memory(); +CREATE TABLE IF NOT EXISTS tab3 (id UInt32) Engine=Memory(); + +INSERT INTO tab2 SELECT number FROM system.numbers limit 10; + +DROP TABLE IF EMPTY tab1, tab2, tab3; -- { serverError TABLE_NOT_EMPTY } +SELECT 'Test when deletion of not empty table fails'; +SHOW TABLES; + +TRUNCATE TABLE tab2, tab3; -- { clientError SYNTAX_ERROR } + +DROP TABLE IF EXISTS tab1, tab2, tab3; + +DROP DATABASE IF EXISTS 02961_db1; +DROP DATABASE IF EXISTS 02961_db2; diff --git a/parser/testdata/02961_higher_order_constant_expressions/ast.json b/parser/testdata/02961_higher_order_constant_expressions/ast.json new file mode 100644 index 000000000..c5387b288 --- /dev/null +++ b/parser/testdata/02961_higher_order_constant_expressions/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001226554, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02961_higher_order_constant_expressions/metadata.json b/parser/testdata/02961_higher_order_constant_expressions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02961_higher_order_constant_expressions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02961_higher_order_constant_expressions/query.sql b/parser/testdata/02961_higher_order_constant_expressions/query.sql new file mode 100644 index 000000000..23b0b72f4 --- /dev/null +++ b/parser/testdata/02961_higher_order_constant_expressions/query.sql @@ -0,0 +1,13 @@ +SET enable_analyzer = 1; + +SELECT arrayMap(x -> x, [1, 2, 3]) AS x, isConstant(x); +SELECT arrayMap(x -> x + 1, [1, 2, 3]) AS x, isConstant(x); +SELECT arrayMap(x -> x + x, [1, 2, 3]) AS x, isConstant(x); +SELECT arrayMap((x, y) -> x + y, [1, 2, 3], [4, 5, 6]) AS x, isConstant(x); +SELECT arrayMap(x -> 1, [1, 2, 3]) AS x, isConstant(x); +SELECT arrayMap(x -> x + number, [1, 2, 3]) AS x, isConstant(x) FROM numbers(1); +SELECT arrayMap(x -> number, [1, 2, 3]) AS x, isConstant(x) FROM numbers(1); +SELECT arrayMax([1, 2, 3]) AS x, isConstant(x); + +-- Does not work yet: +-- SELECT [1, 2, 3] IN arrayMap(x -> x, [1, 2, 3]); diff --git a/parser/testdata/02961_read_bool_as_string_json/ast.json b/parser/testdata/02961_read_bool_as_string_json/ast.json new file mode 100644 index 000000000..fde276dc5 --- /dev/null +++ b/parser/testdata/02961_read_bool_as_string_json/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001163898, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02961_read_bool_as_string_json/metadata.json b/parser/testdata/02961_read_bool_as_string_json/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02961_read_bool_as_string_json/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02961_read_bool_as_string_json/query.sql b/parser/testdata/02961_read_bool_as_string_json/query.sql new file mode 100644 index 000000000..b9f4a7926 --- /dev/null +++ b/parser/testdata/02961_read_bool_as_string_json/query.sql @@ -0,0 +1,9 @@ +set input_format_json_read_bools_as_strings=1; +select * from format(JSONEachRow, 'x String', '{"x" : true}, {"x" : false}, {"x" : "str"}'); +select * from format(JSONEachRow, '{"x" : true}, {"x" : false}, {"x" : "str"}'); +select * from format(JSONEachRow, 'x String', '{"x" : tru}'); -- {serverError CANNOT_PARSE_INPUT_ASSERTION_FAILED} +select * from format(JSONEachRow, 'x String', '{"x" : fals}'); -- {serverError CANNOT_PARSE_INPUT_ASSERTION_FAILED} +select * from format(JSONEachRow, 'x String', '{"x" : atru}'); -- {serverError INCORRECT_DATA} +select * from format(JSONEachRow, 'x Array(String)', '{"x" : [true, false]}, {"x" : [false, true]}, {"x" : ["str1", "str2"]}'); +select * from format(JSONEachRow, '{"x" : [true, false]}, {"x" : [false, true]}, {"x" : ["str1", "str2"]}'); + diff --git a/parser/testdata/02961_sumMapFiltered_keepKey/ast.json b/parser/testdata/02961_sumMapFiltered_keepKey/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02961_sumMapFiltered_keepKey/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02961_sumMapFiltered_keepKey/metadata.json b/parser/testdata/02961_sumMapFiltered_keepKey/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02961_sumMapFiltered_keepKey/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02961_sumMapFiltered_keepKey/query.sql b/parser/testdata/02961_sumMapFiltered_keepKey/query.sql new file mode 100644 index 000000000..dc5aa743b --- /dev/null +++ b/parser/testdata/02961_sumMapFiltered_keepKey/query.sql @@ -0,0 +1,3 @@ + +SELECT sumMapFiltered([1,2,3])(a,b) FROM values('a Array(Int64), b Array(Int64)',([1, 2, 3], [10, 10, 10]), ([3, 4, 5], [10, 10, 10]),([4, 5, 6], [10, 10, 10]),([6, 7, 8], [10, 10, 10])); +SELECT sumMapFiltered([1,2,3,toInt8(-3)])(a,b) FROM values('a Array(UInt64), b Array(Int64)',([1, 2, 3], [10, 10, 10]), ([3, 4, 5], [10, 10, 10]),([4, 5, 6], [10, 10, 10]),([6, 7, 8], [10, 10, 10])); diff --git a/parser/testdata/02962_analyzer_const_in_count_distinct/ast.json b/parser/testdata/02962_analyzer_const_in_count_distinct/ast.json new file mode 100644 index 000000000..0a67cb11a --- /dev/null +++ b/parser/testdata/02962_analyzer_const_in_count_distinct/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001197062, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02962_analyzer_const_in_count_distinct/metadata.json b/parser/testdata/02962_analyzer_const_in_count_distinct/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02962_analyzer_const_in_count_distinct/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02962_analyzer_const_in_count_distinct/query.sql b/parser/testdata/02962_analyzer_const_in_count_distinct/query.sql new file mode 100644 index 000000000..669018a13 --- /dev/null +++ b/parser/testdata/02962_analyzer_const_in_count_distinct/query.sql @@ -0,0 +1,8 @@ +set count_distinct_optimization = 1; + +SELECT uniqExact('257') +FROM + (SELECT + number, CAST(number / 9223372036854775806, 'UInt64') AS m + FROM numbers(3) + ); diff --git a/parser/testdata/02962_analyzer_constant_set/ast.json b/parser/testdata/02962_analyzer_constant_set/ast.json new file mode 100644 index 000000000..8dadec64d --- /dev/null +++ b/parser/testdata/02962_analyzer_constant_set/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_parallel_index (children 1)" + }, + { + "explain": " Identifier test_parallel_index" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001023744, + "rows_read": 2, + "bytes_read": 90 + } +} diff --git a/parser/testdata/02962_analyzer_constant_set/metadata.json b/parser/testdata/02962_analyzer_constant_set/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02962_analyzer_constant_set/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02962_analyzer_constant_set/query.sql b/parser/testdata/02962_analyzer_constant_set/query.sql new file mode 100644 index 000000000..aae2f1c01 --- /dev/null +++ b/parser/testdata/02962_analyzer_constant_set/query.sql @@ -0,0 +1,15 @@ +DROP TABLE IF EXISTS test_parallel_index; + +CREATE TABLE test_parallel_index +( + z UInt64, + INDEX i z TYPE set(8) +) +ENGINE = MergeTree +ORDER BY (); + +insert into test_parallel_index select number from numbers(10); + +select sum(z) from test_parallel_index where z = 2 or z = 7 or z = 13 or z = 17 or z = 19 or z = 23; + +DROP TABLE test_parallel_index; diff --git a/parser/testdata/02962_analyzer_resolve_group_by_on_shards/ast.json b/parser/testdata/02962_analyzer_resolve_group_by_on_shards/ast.json new file mode 100644 index 000000000..860156880 --- /dev/null +++ b/parser/testdata/02962_analyzer_resolve_group_by_on_shards/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function and (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Function and (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDate (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Int64_-2147483647" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Literal NULL" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001035004, + "rows_read": 14, + "bytes_read": 522 + } +} diff --git a/parser/testdata/02962_analyzer_resolve_group_by_on_shards/metadata.json b/parser/testdata/02962_analyzer_resolve_group_by_on_shards/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02962_analyzer_resolve_group_by_on_shards/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02962_analyzer_resolve_group_by_on_shards/query.sql b/parser/testdata/02962_analyzer_resolve_group_by_on_shards/query.sql new file mode 100644 index 000000000..00a800679 --- /dev/null +++ b/parser/testdata/02962_analyzer_resolve_group_by_on_shards/query.sql @@ -0,0 +1,20 @@ +SELECT NULL AND (toDate(-2147483647, NULL) AND NULL) +FROM remote('127.0.0.{1,2}', view( + SELECT + NULL AND NULL, + NULL, + toDate(toDate('0.0001048577', toDate(NULL, 10 AND (toDate(257, 9223372036854775807, NULL) AND NULL AND NULL) AND NULL, 7, NULL), NULL, NULL) AND NULL AND -2147483648, NULL, NULL) AND NULL + FROM system.one + WHERE toDate(toDate(NULL, NULL, NULL), NULL) + GROUP BY + GROUPING SETS ((NULL)) +)); + +SELECT NULL AND (toDate(-2147483647, NULL) AND NULL) +FROM remote('127.0.0.{1,2}', view( + SELECT NULL + FROM system.one + WHERE toDate(toDate(NULL, NULL, NULL), NULL) + GROUP BY + GROUPING SETS (('')) +)); diff --git a/parser/testdata/02962_indexHint_rpn_construction/ast.json b/parser/testdata/02962_indexHint_rpn_construction/ast.json new file mode 100644 index 000000000..818470b27 --- /dev/null +++ b/parser/testdata/02962_indexHint_rpn_construction/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery tab (children 1)" + }, + { + "explain": " Identifier tab" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000966501, + "rows_read": 2, + "bytes_read": 59 + } +} diff --git a/parser/testdata/02962_indexHint_rpn_construction/metadata.json b/parser/testdata/02962_indexHint_rpn_construction/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02962_indexHint_rpn_construction/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02962_indexHint_rpn_construction/query.sql b/parser/testdata/02962_indexHint_rpn_construction/query.sql new file mode 100644 index 000000000..3532bea57 --- /dev/null +++ b/parser/testdata/02962_indexHint_rpn_construction/query.sql @@ -0,0 +1,20 @@ +CREATE TABLE tab +( + `foo` Array(LowCardinality(String)), + INDEX idx foo TYPE bloom_filter GRANULARITY 1 +) +ENGINE = MergeTree +PRIMARY KEY tuple(); + +INSERT INTO tab SELECT if(number % 2, ['value'], []) +FROM system.numbers +LIMIT 10000; + +SELECT * +FROM tab +PREWHERE indexHint(indexHint(-1, 0.)) +WHERE has(foo, 'b'); + +SELECT * +FROM tab +PREWHERE indexHint(0); diff --git a/parser/testdata/02962_join_using_bug_57894/ast.json b/parser/testdata/02962_join_using_bug_57894/ast.json new file mode 100644 index 000000000..3cdaf2217 --- /dev/null +++ b/parser/testdata/02962_join_using_bug_57894/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001016993, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/02962_join_using_bug_57894/metadata.json b/parser/testdata/02962_join_using_bug_57894/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02962_join_using_bug_57894/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02962_join_using_bug_57894/query.sql b/parser/testdata/02962_join_using_bug_57894/query.sql new file mode 100644 index 000000000..eb95f618f --- /dev/null +++ b/parser/testdata/02962_join_using_bug_57894/query.sql @@ -0,0 +1,35 @@ +DROP TABLE IF EXISTS t; +DROP TABLE IF EXISTS r; +SET allow_suspicious_low_cardinality_types = 1; + +CREATE TABLE t (`x` UInt32, `s` LowCardinality(String)) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO t SELECT number, toString(number) FROM numbers(5); + +CREATE TABLE r (`x` LowCardinality(Nullable(UInt32)), `s` Nullable(String)) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO r SELECT number, toString(number) FROM numbers(2, 8); +INSERT INTO r VALUES (NULL, NULL); + +SET enable_analyzer = 0; + +SELECT x FROM t FULL JOIN r USING (x) ORDER BY ALL +; + + +SELECT x FROM t FULL JOIN r USING (x) ORDER BY ALL +SETTINGS join_algorithm = 'partial_merge'; + +SELECT x FROM t FULL JOIN r USING (x) ORDER BY ALL +SETTINGS join_algorithm = 'full_sorting_merge'; + +SELECT '--- analyzer ---'; + +SET enable_analyzer = 1; + +SELECT x FROM t FULL JOIN r USING (x) ORDER BY ALL +; + +SELECT x FROM t FULL JOIN r USING (x) ORDER BY ALL +SETTINGS join_algorithm = 'partial_merge'; + +SELECT x FROM t FULL JOIN r USING (x) ORDER BY ALL +SETTINGS join_algorithm = 'full_sorting_merge'; diff --git a/parser/testdata/02962_max_joined_block_rows/ast.json b/parser/testdata/02962_max_joined_block_rows/ast.json new file mode 100644 index 000000000..01a49acb4 --- /dev/null +++ b/parser/testdata/02962_max_joined_block_rows/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000978919, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/02962_max_joined_block_rows/metadata.json b/parser/testdata/02962_max_joined_block_rows/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02962_max_joined_block_rows/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02962_max_joined_block_rows/query.sql b/parser/testdata/02962_max_joined_block_rows/query.sql new file mode 100644 index 000000000..175206114 --- /dev/null +++ b/parser/testdata/02962_max_joined_block_rows/query.sql @@ -0,0 +1,40 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE table t1 (a UInt64, b UInt64) ENGINE = Memory; +INSERT INTO t1 SELECT number % 2, number FROM numbers(10); + +CREATE table t2 (a UInt64) ENGINE = Memory; + +INSERT INTO t2 SELECT number % 2 FROM numbers(10); + +SET min_joined_block_size_rows = 0, min_joined_block_size_bytes = 0; + +-- block size is always multiple of 5 because we have 5 rows for each key in right table +-- we do not split rows corresponding to the same key + +SET join_algorithm = 'hash'; + +SELECT max(bs) <= 5, b FROM ( + SELECT blockSize() as bs, * FROM t1 JOIN t2 ON t1.a = t2.a +) GROUP BY b +ORDER BY b +SETTINGS max_joined_block_size_rows = 5; + +SELECT '--'; + +SELECT max(bs) <= 10, b FROM ( + SELECT blockSize() as bs, * FROM t1 JOIN t2 ON t1.a = t2.a +) GROUP BY b +ORDER BY b +SETTINGS max_joined_block_size_rows = 10; + +SELECT '--'; + +SET join_algorithm = 'parallel_hash'; + +SELECT max(bs) <= 10, b FROM ( + SELECT blockSize() as bs, * FROM t1 JOIN t2 ON t1.a = t2.a +) GROUP BY b +ORDER BY b +SETTINGS max_joined_block_size_rows = 10; diff --git a/parser/testdata/02962_parallel_window_functions_different_partitioning/ast.json b/parser/testdata/02962_parallel_window_functions_different_partitioning/ast.json new file mode 100644 index 000000000..f199dd7f3 --- /dev/null +++ b/parser/testdata/02962_parallel_window_functions_different_partitioning/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery empsalary (children 1)" + }, + { + "explain": " Identifier empsalary" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001371771, + "rows_read": 2, + "bytes_read": 71 + } +} diff --git a/parser/testdata/02962_parallel_window_functions_different_partitioning/metadata.json b/parser/testdata/02962_parallel_window_functions_different_partitioning/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02962_parallel_window_functions_different_partitioning/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02962_parallel_window_functions_different_partitioning/query.sql b/parser/testdata/02962_parallel_window_functions_different_partitioning/query.sql new file mode 100644 index 000000000..90af415c5 --- /dev/null +++ b/parser/testdata/02962_parallel_window_functions_different_partitioning/query.sql @@ -0,0 +1,32 @@ +CREATE TABLE empsalary +( + `depname` LowCardinality(String), + `empno` UInt64, + `salary` Int32, + `enroll_date` Date +) +ENGINE = Memory; + +insert into empsalary values ('sales',3,4800,'2007-08-01'), ('sales',1,5000,'2006-10-01'), ('sales',4,4800,'2007-08-08'); + + +insert into empsalary values ('sales',3,4800,'2007-08-01'), ('sales',1,5000,'2006-10-01'), ('sales',4,4800,'2007-08-08'); + +insert into empsalary values ('sales',3,4800,'2007-08-01'), ('sales',1,5000,'2006-10-01'), ('sales',4,4800,'2007-08-08'); + +-- 1 window function + +SELECT depname, + sum(salary) OVER (PARTITION BY depname order by empno) AS depsalary +FROM empsalary +order by depsalary; + + +-- 2 window functions with different window, +-- but result should be the same for depsalary + +SELECT depname, + sum(salary) OVER (PARTITION BY depname order by empno) AS depsalary, + min(salary) OVER (PARTITION BY depname, empno order by enroll_date) AS depminsalary +FROM empsalary +order by depsalary; diff --git a/parser/testdata/02963_invalid_identifier/ast.json b/parser/testdata/02963_invalid_identifier/ast.json new file mode 100644 index 000000000..a8f4a9798 --- /dev/null +++ b/parser/testdata/02963_invalid_identifier/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " QualifiedAsterisk (children 1)" + }, + { + "explain": " Identifier t.t.t" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.tables" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier database" + }, + { + "explain": " Function currentDatabase (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001146229, + "rows_read": 15, + "bytes_read": 586 + } +} diff --git a/parser/testdata/02963_invalid_identifier/metadata.json b/parser/testdata/02963_invalid_identifier/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02963_invalid_identifier/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02963_invalid_identifier/query.sql b/parser/testdata/02963_invalid_identifier/query.sql new file mode 100644 index 000000000..64a52364b --- /dev/null +++ b/parser/testdata/02963_invalid_identifier/query.sql @@ -0,0 +1 @@ +SELECT t.t.t.* FROM system.tables WHERE database = currentDatabase(); --{serverError INVALID_IDENTIFIER} diff --git a/parser/testdata/02963_msan_agg_addBatchLookupTable8/ast.json b/parser/testdata/02963_msan_agg_addBatchLookupTable8/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02963_msan_agg_addBatchLookupTable8/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02963_msan_agg_addBatchLookupTable8/metadata.json b/parser/testdata/02963_msan_agg_addBatchLookupTable8/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02963_msan_agg_addBatchLookupTable8/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02963_msan_agg_addBatchLookupTable8/query.sql b/parser/testdata/02963_msan_agg_addBatchLookupTable8/query.sql new file mode 100644 index 000000000..a3a8bd062 --- /dev/null +++ b/parser/testdata/02963_msan_agg_addBatchLookupTable8/query.sql @@ -0,0 +1,2 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/58727 +SELECT number % 2 AS even, aggThrow(number) FROM numbers(10) GROUP BY even; -- { serverError AGGREGATE_FUNCTION_THROW} diff --git a/parser/testdata/02963_single_value_destructor/ast.json b/parser/testdata/02963_single_value_destructor/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02963_single_value_destructor/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02963_single_value_destructor/metadata.json b/parser/testdata/02963_single_value_destructor/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02963_single_value_destructor/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02963_single_value_destructor/query.sql b/parser/testdata/02963_single_value_destructor/query.sql new file mode 100644 index 000000000..ee8f9164a --- /dev/null +++ b/parser/testdata/02963_single_value_destructor/query.sql @@ -0,0 +1,8 @@ +-- When we use SingleValueDataBaseMemoryBlock we must ensure we call the class destructor on destroy + +Select argMax((number, number), (number, number)) FROM numbers(100000) format Null; +Select argMin((number, number), (number, number)) FROM numbers(100000) format Null; +Select anyHeavy((number, number)) FROM numbers(100000) format Null; +Select singleValueOrNull(number::Date32) FROM numbers(100000) format Null; +Select anyArgMax(number, (number, number)) FROM numbers(100000) format Null; +Select anyArgMin(number, (number, number)) FROM numbers(100000) format Null; diff --git a/parser/testdata/02963_test_flexible_disk_configuration/ast.json b/parser/testdata/02963_test_flexible_disk_configuration/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02963_test_flexible_disk_configuration/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02963_test_flexible_disk_configuration/metadata.json b/parser/testdata/02963_test_flexible_disk_configuration/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02963_test_flexible_disk_configuration/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02963_test_flexible_disk_configuration/query.sql b/parser/testdata/02963_test_flexible_disk_configuration/query.sql new file mode 100644 index 000000000..6b285d423 --- /dev/null +++ b/parser/testdata/02963_test_flexible_disk_configuration/query.sql @@ -0,0 +1,98 @@ +-- Tags: no-fasttest + +drop table if exists test; +create table test (a Int32) engine = MergeTree() order by tuple() +settings disk=disk(name='02963_custom_disk', type = object_storage, object_storage_type = local_blob_storage, path='./02963_test1/'); + +drop table if exists test; +create table test (a Int32) engine = MergeTree() order by tuple() +settings disk=disk(name='02963_custom_disk', type = object_storage, object_storage_type = local_blob_storage, path='./02963_test2/'); -- { serverError BAD_ARGUMENTS } + +drop table if exists test; +create table test (a Int32) engine = MergeTree() order by tuple() +settings disk=disk(name='02963_custom_disk'); -- { serverError BAD_ARGUMENTS } + +drop table if exists test; +create table test (a Int32) engine = MergeTree() order by tuple() +settings disk='02963_custom_disk'; -- { serverError BAD_ARGUMENTS } + +drop table if exists test; +create table test (a Int32) engine = MergeTree() order by tuple() +settings disk=disk(name='s3_disk_02963'); -- { serverError BAD_ARGUMENTS } + +drop table if exists test; +create table test (a Int32) engine = MergeTree() order by tuple() +settings disk='s3_disk_02963'; + +drop table if exists test; +create table test (a Int32) engine = MergeTree() order by tuple() +settings disk=disk(name='s3_disk_02963', type = object_storage, object_storage_type = local_blob_storage, path='./02963_test2/'); -- { serverError BAD_ARGUMENTS } + +drop table if exists test; +create table test (a Int32) engine = MergeTree() order by tuple() +settings disk=disk(name='test1', + type = object_storage, + object_storage_type = s3, + endpoint = 'http://localhost:11111/test/common/', + access_key_id = clickhouse, + secret_access_key = clickhouse); + +drop table if exists test; +create table test (a Int32) engine = MergeTree() order by tuple() +settings disk=disk(name='test2', + type = object_storage, + object_storage_type = s3, + metadata_type = local, + endpoint = 'http://localhost:11111/test/common/', + access_key_id = clickhouse, + secret_access_key = clickhouse); + +drop table if exists test; +create table test (a Int32) engine = MergeTree() order by tuple() +settings disk=disk(name='test3', + type = object_storage, + object_storage_type = s3, + metadata_type = local, + metadata_keep_free_space_bytes = 1024, + endpoint = 'http://localhost:11111/test/common/', + access_key_id = clickhouse, + secret_access_key = clickhouse); + +drop table if exists test; +create table test (a Int32) engine = MergeTree() order by tuple() +settings disk=disk(name='test4', + type = object_storage, + object_storage_type = s3, + metadata_type = local, + metadata_keep_free_space_bytes = 0, + endpoint = 'http://localhost:11111/test/common/', + access_key_id = clickhouse, + secret_access_key = clickhouse); + +drop table if exists test; +create table test (a Int32) engine = MergeTree() order by tuple() +settings disk=disk(name='test5', + type = object_storage, + object_storage_type = s3, + metadata_type = lll, + endpoint = 'http://localhost:11111/test/common/', + access_key_id = clickhouse, + secret_access_key = clickhouse); -- { serverError UNKNOWN_ELEMENT_IN_CONFIG } + +create table test (a Int32) engine = MergeTree() order by tuple() +settings disk=disk(name='test6', + type = object_storage, + object_storage_type = kkk, + metadata_type = local, + endpoint = 'http://localhost:11111/test/common/', + access_key_id = clickhouse, + secret_access_key = clickhouse); -- { serverError UNKNOWN_ELEMENT_IN_CONFIG } + +create table test (a Int32) engine = MergeTree() order by tuple() +settings disk=disk(name='test7', + type = kkk, + object_storage_type = s3, + metadata_type = local, + endpoint = 'http://localhost:11111/test/common/', + access_key_id = clickhouse, + secret_access_key = clickhouse); -- { serverError UNKNOWN_ELEMENT_IN_CONFIG } diff --git a/parser/testdata/02965_projection_with_partition_pruning/ast.json b/parser/testdata/02965_projection_with_partition_pruning/ast.json new file mode 100644 index 000000000..4048efa3d --- /dev/null +++ b/parser/testdata/02965_projection_with_partition_pruning/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery a (children 1)" + }, + { + "explain": " Identifier a" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001166498, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/02965_projection_with_partition_pruning/metadata.json b/parser/testdata/02965_projection_with_partition_pruning/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02965_projection_with_partition_pruning/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02965_projection_with_partition_pruning/query.sql b/parser/testdata/02965_projection_with_partition_pruning/query.sql new file mode 100644 index 000000000..92f7cc067 --- /dev/null +++ b/parser/testdata/02965_projection_with_partition_pruning/query.sql @@ -0,0 +1,9 @@ +drop table if exists a; + +create table a (i int, j int, projection p (select * order by j)) engine MergeTree partition by i order by tuple() settings index_granularity = 1; + +insert into a values (1, 2), (0, 5), (3, 4); + +select * from a where i > 0 and j = 4 settings force_index_by_date = 1; + +drop table a; diff --git a/parser/testdata/02966_float32_promotion/ast.json b/parser/testdata/02966_float32_promotion/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02966_float32_promotion/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02966_float32_promotion/metadata.json b/parser/testdata/02966_float32_promotion/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02966_float32_promotion/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02966_float32_promotion/query.sql b/parser/testdata/02966_float32_promotion/query.sql new file mode 100644 index 000000000..df687ca53 --- /dev/null +++ b/parser/testdata/02966_float32_promotion/query.sql @@ -0,0 +1,6 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/58680 +DROP TABLE IF EXISTS f32_table; +CREATE TABLE f32_table (my_field Float32) ENGINE=Memory(); +INSERT INTO f32_table values ('49.9'); +SELECT * FROM f32_table where my_field = '49.9'; +DROP TABLE f32_table; diff --git a/parser/testdata/02966_topk_counts_approx_count_sum/ast.json b/parser/testdata/02966_topk_counts_approx_count_sum/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02966_topk_counts_approx_count_sum/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02966_topk_counts_approx_count_sum/metadata.json b/parser/testdata/02966_topk_counts_approx_count_sum/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02966_topk_counts_approx_count_sum/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02966_topk_counts_approx_count_sum/query.sql b/parser/testdata/02966_topk_counts_approx_count_sum/query.sql new file mode 100644 index 000000000..e68805f91 --- /dev/null +++ b/parser/testdata/02966_topk_counts_approx_count_sum/query.sql @@ -0,0 +1,29 @@ +WITH + arraySlice(arrayReverseSort(x -> (x.2, x.1), arrayZip(untuple(sumMap(([k], [1]))))), 1, 5) AS topKExact, + arraySlice(arrayReverseSort(x -> (x.2, x.1), arrayZip(untuple(sumMap(([k], [w]))))), 1, 5) AS topKWeightedExact +SELECT + topKExact, + topKWeightedExact, + topK(3, 1, 'counts')(k) AS topK_counts, + topKWeighted(3, 1, 'counts')(k, w) AS topKWeighted_counts, + approx_top_count(3, 6)(k) AS approx_top_count, + approx_top_k(3, 4)(k) AS approx_top_k, + approx_top_sum(3, 4)(k, w) AS approx_top_sum +FROM +( + SELECT + concat(countDigits(number * number), '_', intDiv((number % 10), 7)) AS k, + number AS w + FROM numbers(1000) +) +FORMAT Vertical; + +SELECT topKMerge(4, 2, 'counts')(state) FROM ( SELECT topKState(4, 2, 'counts')(countDigits(number * number)) AS state FROM numbers(1000)); + +SELECT topKMerge(4, 3, 'counts')(state) FROM ( SELECT topKState(4, 2, 'counts')(countDigits(number * number)) AS state FROM numbers(1000)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT topKMerge(4, 2)(state) FROM ( SELECT topKState(4, 2, 'counts')(countDigits(number * number)) AS state FROM numbers(1000)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT topKMerge(state) FROM ( SELECT topKState(4, 2, 'counts')(countDigits(number * number)) AS state FROM numbers(1000)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT length(approx_top_k(50)(number)), length(approx_top_k(100)(number)), length(approx_top_k(35)(number)) FROM numbers(200); diff --git a/parser/testdata/02967_analyzer_fuzz/ast.json b/parser/testdata/02967_analyzer_fuzz/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02967_analyzer_fuzz/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02967_analyzer_fuzz/metadata.json b/parser/testdata/02967_analyzer_fuzz/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02967_analyzer_fuzz/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02967_analyzer_fuzz/query.sql b/parser/testdata/02967_analyzer_fuzz/query.sql new file mode 100644 index 000000000..dab6ec2af --- /dev/null +++ b/parser/testdata/02967_analyzer_fuzz/query.sql @@ -0,0 +1,20 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/57193 +SELECT + 2147483647, + count(pow(NULL, 1.0001)) +FROM remote(test_cluster_two_shards, system, one) +GROUP BY + makeDateTime64(NULL, NULL, pow(NULL, '257') - '-1', '0.2147483647', 257), + makeDateTime64(pow(pow(NULL, '21474836.46') - '0.0000065535', 1048577), '922337203685477580.6', NULL, NULL, pow(NULL, 1.0001) - 65536, NULL) +WITH CUBE + SETTINGS enable_analyzer = 1; + + +CREATE TABLE data_01223 (`key` Int) ENGINE = Memory; +CREATE TABLE dist_layer_01223 AS data_01223 ENGINE = Distributed(test_cluster_two_shards, currentDatabase(), data_01223); +CREATE TABLE dist_01223 AS data_01223 ENGINE = Distributed(test_cluster_two_shards, currentDatabase(), dist_layer_01223); +SELECT count(round('92233720368547758.07', '-0.01', NULL, nan, '25.7', '-92233720368547758.07', NULL)) +FROM dist_01223 +WHERE round(NULL, 1025, 1.1754943508222875e-38, NULL) +WITH TOTALS + SETTINGS enable_analyzer = 1; diff --git a/parser/testdata/02967_fuzz_bad_cast/ast.json b/parser/testdata/02967_fuzz_bad_cast/ast.json new file mode 100644 index 000000000..d5d4046b8 --- /dev/null +++ b/parser/testdata/02967_fuzz_bad_cast/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1__fuzz_4 (children 1)" + }, + { + "explain": " Identifier t1__fuzz_4" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001154965, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/02967_fuzz_bad_cast/metadata.json b/parser/testdata/02967_fuzz_bad_cast/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02967_fuzz_bad_cast/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02967_fuzz_bad_cast/query.sql b/parser/testdata/02967_fuzz_bad_cast/query.sql new file mode 100644 index 000000000..8b34cc6e4 --- /dev/null +++ b/parser/testdata/02967_fuzz_bad_cast/query.sql @@ -0,0 +1,10 @@ +DROP TABLE IF EXISTS t1__fuzz_4; +DROP TABLE IF EXISTS t0__fuzz_29; + +SET allow_suspicious_low_cardinality_types = 1, join_algorithm = 'partial_merge', join_use_nulls = 1; +CREATE TABLE t1__fuzz_4 (`x` Nullable(UInt32), `y` Int64) ENGINE = MergeTree ORDER BY (x, y) SETTINGS allow_nullable_key = 1; +CREATE TABLE t0__fuzz_29 (`x` LowCardinality(UInt256), `y` Array(Array(Date))) ENGINE = MergeTree ORDER BY (x, y); +SELECT sum(0), NULL FROM t0__fuzz_29 FULL OUTER JOIN t1__fuzz_4 USING (x) PREWHERE NULL; + +DROP TABLE t1__fuzz_4; +DROP TABLE t0__fuzz_29; diff --git a/parser/testdata/02967_index_hint_crash/ast.json b/parser/testdata/02967_index_hint_crash/ast.json new file mode 100644 index 000000000..cf1f98418 --- /dev/null +++ b/parser/testdata/02967_index_hint_crash/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery tab (children 1)" + }, + { + "explain": " Identifier tab" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001199047, + "rows_read": 2, + "bytes_read": 59 + } +} diff --git a/parser/testdata/02967_index_hint_crash/metadata.json b/parser/testdata/02967_index_hint_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02967_index_hint_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02967_index_hint_crash/query.sql b/parser/testdata/02967_index_hint_crash/query.sql new file mode 100644 index 000000000..e33a4992c --- /dev/null +++ b/parser/testdata/02967_index_hint_crash/query.sql @@ -0,0 +1,16 @@ +CREATE TABLE tab +( + `foo` Array(LowCardinality(String)), + INDEX idx foo TYPE bloom_filter GRANULARITY 1 +) +ENGINE = MergeTree +PRIMARY KEY tuple(); + +INSERT INTO tab SELECT if(number % 2, ['value'], []) +FROM system.numbers +LIMIT 10000; + +SELECT * +FROM tab +PREWHERE indexHint() +FORMAT Null; diff --git a/parser/testdata/02968_adaptive_async_insert_timeout/ast.json b/parser/testdata/02968_adaptive_async_insert_timeout/ast.json new file mode 100644 index 000000000..1b94e78c2 --- /dev/null +++ b/parser/testdata/02968_adaptive_async_insert_timeout/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery async_insert_mt_test (children 1)" + }, + { + "explain": " Identifier async_insert_mt_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001048067, + "rows_read": 2, + "bytes_read": 92 + } +} diff --git a/parser/testdata/02968_adaptive_async_insert_timeout/metadata.json b/parser/testdata/02968_adaptive_async_insert_timeout/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02968_adaptive_async_insert_timeout/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02968_adaptive_async_insert_timeout/query.sql b/parser/testdata/02968_adaptive_async_insert_timeout/query.sql new file mode 100644 index 000000000..f9606cace --- /dev/null +++ b/parser/testdata/02968_adaptive_async_insert_timeout/query.sql @@ -0,0 +1,51 @@ +DROP TABLE IF EXISTS async_insert_mt_test; +CREATE TABLE async_insert_mt_test (a UInt64, b Array(UInt64)) ENGINE=MergeTree() ORDER BY a; + +SET async_insert_use_adaptive_busy_timeout = 1; + +INSERT INTO async_insert_mt_test + SETTINGS + async_insert=1, + wait_for_async_insert=1, + async_insert_busy_timeout_min_ms=10, + async_insert_busy_timeout_max_ms=500, + async_insert_busy_timeout_increase_rate=1.0, + async_insert_busy_timeout_decrease_rate=1.0 + VALUES (3, []), (1, [1, 3]), (2, [7, 8]), (4, [5, 9]), (5, [2, 6]); + + +INSERT INTO async_insert_mt_test + SETTINGS + async_insert=1, + wait_for_async_insert=1, + async_insert_busy_timeout_ms=500, + async_insert_busy_timeout_min_ms=500 + VALUES (3, []), (1, [1, 3]), (2, [7, 8]), (4, [5, 9]), (5, [2, 6]); + + +INSERT INTO async_insert_mt_test + SETTINGS + async_insert=1, + wait_for_async_insert=1, + async_insert_busy_timeout_ms=100, + async_insert_busy_timeout_min_ms=500 + VALUES (3, []), (1, [1, 3]), (2, [7, 8]), (4, [5, 9]), (5, [2, 6]); + + +INSERT INTO async_insert_mt_test + SETTINGS + async_insert=1, + wait_for_async_insert=1, + async_insert_busy_timeout_increase_rate=-1.0 + VALUES (3, []), (1, [1, 3]), (2, [7, 8]), (4, [5, 9]), (5, [2, 6]); -- { serverError INVALID_SETTING_VALUE } + + +INSERT INTO async_insert_mt_test + SETTINGS + async_insert=1, + wait_for_async_insert=1, + async_insert_busy_timeout_decrease_rate=-1.0 + VALUES (3, []), (1, [1, 3]), (2, [7, 8]), (4, [5, 9]), (5, [2, 6]); -- { serverError INVALID_SETTING_VALUE } + + +DROP TABLE IF EXISTS async_insert_mt_test; diff --git a/parser/testdata/02968_analyzer_join_column_not_found/ast.json b/parser/testdata/02968_analyzer_join_column_not_found/ast.json new file mode 100644 index 000000000..d6f9a9216 --- /dev/null +++ b/parser/testdata/02968_analyzer_join_column_not_found/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery im (children 1)" + }, + { + "explain": " Identifier im" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001196638, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/02968_analyzer_join_column_not_found/metadata.json b/parser/testdata/02968_analyzer_join_column_not_found/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02968_analyzer_join_column_not_found/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02968_analyzer_join_column_not_found/query.sql b/parser/testdata/02968_analyzer_join_column_not_found/query.sql new file mode 100644 index 000000000..e7c12aac8 --- /dev/null +++ b/parser/testdata/02968_analyzer_join_column_not_found/query.sql @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS im; +CREATE TABLE im (id Int32, dd Int32) ENGINE = Memory(); +INSERT INTO im VALUES (1, 1); + +DROP TABLE IF EXISTS ts; +CREATE TABLE ts (tid Int32, id Int32) ENGINE = Memory(); +INSERT INTO ts VALUES (1, 1); + +SELECT * +FROM im AS m +INNER JOIN ( + SELECT tid, dd, t.id + FROM im AS m + INNER JOIN ts AS t ON m.id = t.id +) AS t ON m.dd = t.dd +; diff --git a/parser/testdata/02968_full_sorting_join_fuzz/ast.json b/parser/testdata/02968_full_sorting_join_fuzz/ast.json new file mode 100644 index 000000000..5bbef5579 --- /dev/null +++ b/parser/testdata/02968_full_sorting_join_fuzz/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001232952, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02968_full_sorting_join_fuzz/metadata.json b/parser/testdata/02968_full_sorting_join_fuzz/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02968_full_sorting_join_fuzz/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02968_full_sorting_join_fuzz/query.sql b/parser/testdata/02968_full_sorting_join_fuzz/query.sql new file mode 100644 index 000000000..85ca740ce --- /dev/null +++ b/parser/testdata/02968_full_sorting_join_fuzz/query.sql @@ -0,0 +1,15 @@ +SET max_bytes_in_join = 0, join_algorithm = 'full_sorting_merge', max_block_size = 10240; + +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE TABLE t1 (`key` UInt32, `s` String) ENGINE = MergeTree ORDER BY key; +CREATE TABLE t2 (`key` UInt32, `s` String) ENGINE = MergeTree ORDER BY key; + +INSERT INTO t1 SELECT (sipHash64(number, 'x') % 10000000) + 1 AS key, concat('val', toString(number)) AS s FROM numbers_mt(10000000); +INSERT INTO t2 SELECT (sipHash64(number, 'y') % 1000000) + 1 AS key, concat('val', toString(number)) AS s FROM numbers_mt(1000000); + +SELECT materialize([NULL]), [], 100, count(materialize(NULL)) FROM t1 ALL INNER JOIN t2 ON t1.key = t2.key PREWHERE 10 WHERE t2.key != 0 WITH TOTALS; + +DROP TABLE t1; +DROP TABLE t2; diff --git a/parser/testdata/02968_projection_merge/ast.json b/parser/testdata/02968_projection_merge/ast.json new file mode 100644 index 000000000..35b798d54 --- /dev/null +++ b/parser/testdata/02968_projection_merge/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'ReplacingMergeTree'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001027996, + "rows_read": 5, + "bytes_read": 189 + } +} diff --git a/parser/testdata/02968_projection_merge/metadata.json b/parser/testdata/02968_projection_merge/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02968_projection_merge/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02968_projection_merge/query.sql b/parser/testdata/02968_projection_merge/query.sql new file mode 100644 index 000000000..05b4a39ea --- /dev/null +++ b/parser/testdata/02968_projection_merge/query.sql @@ -0,0 +1,118 @@ +SELECT 'ReplacingMergeTree'; +DROP TABLE IF EXISTS tp; +CREATE TABLE tp +( + `type` Int32, + `eventcnt` UInt64, + PROJECTION p + ( + SELECT type,sum(eventcnt) + GROUP BY type + ) +) +ENGINE = ReplacingMergeTree +ORDER BY type +SETTINGS deduplicate_merge_projection_mode = 'rebuild'; + +INSERT INTO tp SELECT number%3, 1 FROM numbers(3); +INSERT INTO tp SELECT number%3, 2 FROM numbers(3); + +OPTIMIZE TABLE tp FINAL; + +set parallel_replicas_local_plan = 1, parallel_replicas_support_projection = 1, optimize_aggregation_in_order = 0; + +SELECT type,sum(eventcnt) FROM tp GROUP BY type ORDER BY type +SETTINGS optimize_use_projections = 0, force_optimize_projection = 0; + +SELECT type,sum(eventcnt) FROM tp GROUP BY type ORDER BY type +SETTINGS optimize_use_projections = 1, force_optimize_projection = 1; + + +SELECT 'CollapsingMergeTree'; +DROP TABLE IF EXISTS tp; +CREATE TABLE tp +( + `type` Int32, + `eventcnt` UInt64, + `sign` Int8, + PROJECTION p + ( + SELECT type,sum(eventcnt) + GROUP BY type + ) +) +ENGINE = CollapsingMergeTree(sign) +ORDER BY type +SETTINGS deduplicate_merge_projection_mode = 'rebuild'; + +INSERT INTO tp SELECT number % 3, 1, 1 FROM numbers(3); +INSERT INTO tp SELECT number % 3, 1, -1 FROM numbers(3); +INSERT INTO tp SELECT number % 3, 2, 1 FROM numbers(3); + +OPTIMIZE TABLE tp FINAL; + +SELECT type,sum(eventcnt) FROM tp GROUP BY type ORDER BY type +SETTINGS optimize_use_projections = 0, force_optimize_projection = 0; + +SELECT type,sum(eventcnt) FROM tp GROUP BY type ORDER BY type +SETTINGS optimize_use_projections = 1, force_optimize_projection = 1; + +-- Actually we don't need to test all 3 engines Replacing/Collapsing/VersionedCollapsing, +-- Because they share the same logic of 'reduce number of rows during merges' +SELECT 'VersionedCollapsingMergeTree'; +DROP TABLE IF EXISTS tp; +CREATE TABLE tp +( + `type` Int32, + `eventcnt` UInt64, + `sign` Int8, + `version` UInt8, + PROJECTION p + ( + SELECT type,sum(eventcnt) + GROUP BY type + ) +) +ENGINE = VersionedCollapsingMergeTree(sign,version) +ORDER BY type +SETTINGS deduplicate_merge_projection_mode = 'rebuild'; + +INSERT INTO tp SELECT number % 3, 1, -1, 0 FROM numbers(3); +INSERT INTO tp SELECT number % 3, 2, 1, 1 FROM numbers(3); +INSERT INTO tp SELECT number % 3, 1, 1, 0 FROM numbers(3); + +OPTIMIZE TABLE tp FINAL; + +SELECT type,sum(eventcnt) FROM tp GROUP BY type ORDER BY type +SETTINGS optimize_use_projections = 0, force_optimize_projection = 0; + +SELECT type,sum(eventcnt) FROM tp GROUP BY type ORDER BY type +SETTINGS optimize_use_projections = 1, force_optimize_projection = 1; + +SELECT 'DEDUPLICATE ON MergeTree'; +DROP TABLE IF EXISTS tp; +CREATE TABLE tp +( + `type` Int32, + `eventcnt` UInt64, + PROJECTION p + ( + SELECT type,sum(eventcnt) + GROUP BY type + ) +) +ENGINE = MergeTree +ORDER BY type +SETTINGS deduplicate_merge_projection_mode = 'rebuild'; + +INSERT INTO tp SELECT number % 3, 1 FROM numbers(3); +INSERT INTO tp SELECT number % 3, 2 FROM numbers(3); + +OPTIMIZE TABLE tp FINAL DEDUPLICATE BY type; + +SELECT type,sum(eventcnt) FROM tp GROUP BY type ORDER BY type +SETTINGS optimize_use_projections = 0, force_optimize_projection = 0; + +SELECT type,sum(eventcnt) FROM tp GROUP BY type ORDER BY type +SETTINGS optimize_use_projections = 1, force_optimize_projection = 1; + diff --git a/parser/testdata/02968_sumMap_with_nan/ast.json b/parser/testdata/02968_sumMap_with_nan/ast.json new file mode 100644 index 000000000..a29fed389 --- /dev/null +++ b/parser/testdata/02968_sumMap_with_nan/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function sumMapFiltered (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier y" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[Float64_6.7]" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001169418, + "rows_read": 14, + "bytes_read": 540 + } +} diff --git a/parser/testdata/02968_sumMap_with_nan/metadata.json b/parser/testdata/02968_sumMap_with_nan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02968_sumMap_with_nan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02968_sumMap_with_nan/query.sql b/parser/testdata/02968_sumMap_with_nan/query.sql new file mode 100644 index 000000000..330da94cf --- /dev/null +++ b/parser/testdata/02968_sumMap_with_nan/query.sql @@ -0,0 +1,4 @@ +SELECT sumMapFiltered([6.7])([x], [y]) +FROM values('x Float64, y Float64', (0, 1), (1, 2.3), (nan, inf), (6.7, 3), (4, 4), (5, 1)); + +SELECT sumMap([x],[y]) FROM values('x Float64, y Float64', (4, 1), (1, 2.3), (nan,inf), (6.7,3), (4,4), (5, 1)); diff --git a/parser/testdata/02968_url_args/ast.json b/parser/testdata/02968_url_args/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02968_url_args/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02968_url_args/metadata.json b/parser/testdata/02968_url_args/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02968_url_args/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02968_url_args/query.sql b/parser/testdata/02968_url_args/query.sql new file mode 100644 index 000000000..a9ac96970 --- /dev/null +++ b/parser/testdata/02968_url_args/query.sql @@ -0,0 +1,19 @@ +-- Tags: no-fasttest + +create table a (x Int64) engine URL('https://example.com/', CSV, headers('foo' = 'bar', 'a' = '13')); +show create a; +create table b (x Int64) engine URL('https://example.com/', CSV, headers()); +show create b; +create table c (x Int64) engine S3('https://example.s3.amazonaws.com/a.csv', NOSIGN, CSV, headers('foo' = 'bar')); +show create c; +create table d (x Int64) engine S3('https://example.s3.amazonaws.com/a.csv', NOSIGN, headers('foo' = 'bar')); +show create d; + +create view e (x Int64) as select count() from url('https://example.com/', CSV, headers('foo' = 'bar', 'a' = '13')); +show create e; +create view f (x Int64) as select count() from url('https://example.com/', CSV, headers()); +show create f; +create view g (x Int64) as select count() from s3('https://example.s3.amazonaws.com/a.csv', CSV, headers('foo' = 'bar')); +show create g; +create view h (x Int64) as select count() from s3('https://example.s3.amazonaws.com/a.csv', headers('foo' = 'bar')); +show create h; diff --git a/parser/testdata/02969_analyzer_eliminate_injective_functions/ast.json b/parser/testdata/02969_analyzer_eliminate_injective_functions/ast.json new file mode 100644 index 000000000..ad94c5ac1 --- /dev/null +++ b/parser/testdata/02969_analyzer_eliminate_injective_functions/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001302521, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02969_analyzer_eliminate_injective_functions/metadata.json b/parser/testdata/02969_analyzer_eliminate_injective_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02969_analyzer_eliminate_injective_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02969_analyzer_eliminate_injective_functions/query.sql b/parser/testdata/02969_analyzer_eliminate_injective_functions/query.sql new file mode 100644 index 000000000..a7d0c7793 --- /dev/null +++ b/parser/testdata/02969_analyzer_eliminate_injective_functions/query.sql @@ -0,0 +1,31 @@ +set enable_analyzer = 1; + +EXPLAIN QUERY TREE +SELECT toString(toString(number + 1)) as val, count() +FROM numbers(2) +GROUP BY val +ORDER BY val; + +SELECT toString(toString(number + 1)) as val, count() +FROM numbers(2) +GROUP BY ALL +ORDER BY val; + +EXPLAIN QUERY TREE +SELECT toString(toString(number + 1)) as val, count() +FROM numbers(2) +GROUP BY ALL +ORDER BY val; + +SELECT 'CHECK WITH TOTALS'; + +EXPLAIN QUERY TREE +SELECT toString(toString(number + 1)) as val, count() +FROM numbers(2) +GROUP BY val WITH TOTALS +ORDER BY val; + +SELECT toString(toString(number + 1)) as val, count() +FROM numbers(2) +GROUP BY val WITH TOTALS +ORDER BY val; diff --git a/parser/testdata/02969_functions_to_subcolumns_if_null/ast.json b/parser/testdata/02969_functions_to_subcolumns_if_null/ast.json new file mode 100644 index 000000000..16da088e9 --- /dev/null +++ b/parser/testdata/02969_functions_to_subcolumns_if_null/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_subcolumns_if (children 1)" + }, + { + "explain": " Identifier t_subcolumns_if" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00134051, + "rows_read": 2, + "bytes_read": 82 + } +} diff --git a/parser/testdata/02969_functions_to_subcolumns_if_null/metadata.json b/parser/testdata/02969_functions_to_subcolumns_if_null/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02969_functions_to_subcolumns_if_null/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02969_functions_to_subcolumns_if_null/query.sql b/parser/testdata/02969_functions_to_subcolumns_if_null/query.sql new file mode 100644 index 000000000..859d26af1 --- /dev/null +++ b/parser/testdata/02969_functions_to_subcolumns_if_null/query.sql @@ -0,0 +1,27 @@ +DROP TABLE IF EXISTS t_subcolumns_if; + +CREATE TABLE t_subcolumns_if (id Nullable(Int64)) ENGINE=MergeTree ORDER BY tuple(); + +INSERT INTO t_subcolumns_if SELECT number::Nullable(Int64) as number FROM numbers(10000); + +SELECT + sum(multiIf(id IS NOT NULL, 1, 0)) +FROM t_subcolumns_if +SETTINGS enable_analyzer = 1, optimize_functions_to_subcolumns = 1; + +SELECT + sum(multiIf(id IS NULL, 1, 0)) +FROM t_subcolumns_if +SETTINGS enable_analyzer = 0, optimize_functions_to_subcolumns = 1; + +SELECT + sum(multiIf(id IS NULL, 1, 0)) +FROM t_subcolumns_if +SETTINGS enable_analyzer = 1, optimize_functions_to_subcolumns = 0; + +SELECT + sum(multiIf(id IS NULL, 1, 0)) +FROM t_subcolumns_if +SETTINGS enable_analyzer = 1, optimize_functions_to_subcolumns = 1; + +DROP TABLE IF EXISTS t_subcolumns_if; diff --git a/parser/testdata/02969_mysql_cast_type_aliases/ast.json b/parser/testdata/02969_mysql_cast_type_aliases/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02969_mysql_cast_type_aliases/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02969_mysql_cast_type_aliases/metadata.json b/parser/testdata/02969_mysql_cast_type_aliases/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02969_mysql_cast_type_aliases/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02969_mysql_cast_type_aliases/query.sql b/parser/testdata/02969_mysql_cast_type_aliases/query.sql new file mode 100644 index 000000000..fdc9b6a3c --- /dev/null +++ b/parser/testdata/02969_mysql_cast_type_aliases/query.sql @@ -0,0 +1,46 @@ +-- See https://dev.mysql.com/doc/refman/8.0/en/cast-functions.html#function_cast +-- Tests are in order of the type appearance in the docs + +SET enable_json_type = 1; + +SELECT '-- Uppercase tests'; +-- Not supported as it is translated to FixedString without arguments +-- SELECT 'Binary' AS mysql_type, CAST('' AS BINARY) AS result, toTypeName(result) AS native_type; +SELECT 'Binary(N)' AS mysql_type, CAST('foo' AS BINARY(3)) AS result, toTypeName(result) AS native_type; +SELECT 'Char' AS mysql_type, CAST(44 AS CHAR) AS result, toTypeName(result) AS native_type; +SELECT 'Date' AS mysql_type, CAST('2021-02-03' AS DATE) AS result, toTypeName(result) AS native_type; +SELECT 'DateTime' AS mysql_type, CAST('2021-02-03 12:01:02' AS DATETIME) AS result, toTypeName(result) AS native_type; +SELECT 'Decimal' AS mysql_type, CAST(45.1 AS DECIMAL) AS result, toTypeName(result) AS native_type; +SELECT 'Decimal(M)' AS mysql_type, CAST(46.2 AS DECIMAL(4)) AS result, toTypeName(result) AS native_type; +SELECT 'Decimal(M, D)' AS mysql_type, CAST(47.21 AS DECIMAL(4, 2)) AS result, toTypeName(result) AS native_type; +SELECT 'Double' AS mysql_type, CAST(48.11 AS DOUBLE) AS result, toTypeName(result) AS native_type; +SELECT 'JSON' AS mysql_type, CAST('{\"foo\":\"bar\"}' AS JSON) AS result, toTypeName(result) AS native_type; +SELECT 'Real' AS mysql_type, CAST(49.22 AS REAL) AS result, toTypeName(result) AS native_type; +SELECT 'Signed' AS mysql_type, CAST(50 AS SIGNED) AS result, toTypeName(result) AS native_type; +SELECT 'Unsigned' AS mysql_type, CAST(52 AS UNSIGNED) AS result, toTypeName(result) AS native_type; +-- Could be added as an alias, but SIGNED INTEGER in CAST context means UInt64, +-- while INTEGER SIGNED as a column definition means UInt32. +-- SELECT 'Signed integer' AS mysql_type, CAST(51 AS SIGNED INTEGER) AS result, toTypeName(result) AS native_type; +-- SELECT 'Unsigned integer' AS mysql_type, CAST(53 AS UNSIGNED INTEGER) AS result, toTypeName(result) AS native_type; +SELECT 'Year' AS mysql_type, CAST(2007 AS YEAR) AS result, toTypeName(result) AS native_type; +-- Currently, expects UInt64 as an argument +-- SELECT 'Time' AS mysql_type, CAST('12:45' AS TIME) AS result, toTypeName(result) AS native_type; + +SELECT '-- Lowercase tests'; +-- select 'Binary' as mysql_type, cast('' as binary) as result, toTypeName(result) as native_type; +select 'Binary(N)' as mysql_type, cast('foo' as binary(3)) as result, toTypeName(result) as native_type; +select 'Char' as mysql_type, cast(44 as char) as result, toTypeName(result) as native_type; +select 'Date' as mysql_type, cast('2021-02-03' as date) as result, toTypeName(result) as native_type; +select 'DateTime' as mysql_type, cast('2021-02-03 12:01:02' as datetime) as result, toTypeName(result) as native_type; +select 'Decimal' as mysql_type, cast(45.1 as decimal) as result, toTypeName(result) as native_type; +select 'Decimal(M)' as mysql_type, cast(46.2 as decimal(4)) as result, toTypeName(result) as native_type; +select 'Decimal(M, D)' as mysql_type, cast(47.21 as decimal(4, 2)) as result, toTypeName(result) as native_type; +select 'Double' as mysql_type, cast(48.11 as double) as result, toTypeName(result) as native_type; +select 'JSON' as mysql_type, cast('{\"foo\":\"bar\"}' as json) as result, toTypeName(result) as native_type; +select 'Real' as mysql_type, cast(49.22 as real) as result, toTypeName(result) as native_type; +select 'Signed' as mysql_type, cast(50 as signed) as result, toTypeName(result) as native_type; +select 'Unsigned' as mysql_type, cast(52 as unsigned) as result, toTypeName(result) as native_type; +-- select 'Signed integer' as mysql_type, cast(51 as signed integer) as result, toTypeName(result) as native_type; +-- select 'Unsigned integer' as mysql_type, cast(53 as unsigned integer) as result, toTypeName(result) as native_type; +select 'Year' as mysql_type, cast(2007 as year) as result, toTypeName(result) as native_type; +-- select 'Time' as mysql_type, cast('12:45' as time) as result, toTypeName(result) as native_type; diff --git a/parser/testdata/02970_generate_series/ast.json b/parser/testdata/02970_generate_series/ast.json new file mode 100644 index 000000000..cd7bd171a --- /dev/null +++ b/parser/testdata/02970_generate_series/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function count (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function generate_series (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_5" + }, + { + "explain": " Literal UInt64_4" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001150485, + "rows_read": 13, + "bytes_read": 507 + } +} diff --git a/parser/testdata/02970_generate_series/metadata.json b/parser/testdata/02970_generate_series/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02970_generate_series/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02970_generate_series/query.sql b/parser/testdata/02970_generate_series/query.sql new file mode 100644 index 000000000..edae884a5 --- /dev/null +++ b/parser/testdata/02970_generate_series/query.sql @@ -0,0 +1,15 @@ +SELECT count() FROM generate_series(5, 4); +SELECT count() FROM generate_series(0, 0); +SELECT count() FROM generate_series(10, 20, 3); +SELECT count() FROM generate_series(7, 77, 10); +SELECT count() FROM generate_series(0, 1000, 2); +SELECT count() FROM generate_series(0, 999, 20); +SELECT sum(generate_series) FROM generate_series(4, 1008, 4) WHERE generate_series % 7 = 1; +SELECT sum(generate_series) FROM generate_series(4, 1008, 4) WHERE generate_series % 7 = 1 SETTINGS max_block_size = 71; + +SELECT * FROM generate_series(5, 4); +SELECT * FROM generate_series(0, 0); +SELECT * FROM generate_series(10, 20, 3); +SELECT * FROM generate_series(7, 77, 10); +SELECT * FROM generate_series(7, 52, 5) WHERE generate_series >= 13; + diff --git a/parser/testdata/02970_visible_width_behavior/ast.json b/parser/testdata/02970_visible_width_behavior/ast.json new file mode 100644 index 000000000..f6bf50e0a --- /dev/null +++ b/parser/testdata/02970_visible_width_behavior/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function visibleWidth (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'ClickHouse是一个很好的数据库'" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001377035, + "rows_read": 7, + "bytes_read": 296 + } +} diff --git a/parser/testdata/02970_visible_width_behavior/metadata.json b/parser/testdata/02970_visible_width_behavior/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02970_visible_width_behavior/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02970_visible_width_behavior/query.sql b/parser/testdata/02970_visible_width_behavior/query.sql new file mode 100644 index 000000000..efaa8852c --- /dev/null +++ b/parser/testdata/02970_visible_width_behavior/query.sql @@ -0,0 +1,6 @@ +SELECT visibleWidth('ClickHouse是一个很好的数据库'); +SELECT visibleWidth('ClickHouse是一个很好的数据库') SETTINGS function_visible_width_behavior = 0; +SELECT visibleWidth('ClickHouse是一个很好的数据库') SETTINGS function_visible_width_behavior = 1; +SELECT visibleWidth('ClickHouse是一个很好的数据库') SETTINGS function_visible_width_behavior = 2; -- { serverError BAD_ARGUMENTS } +SELECT visibleWidth('ClickHouse是一个很好的数据库') SETTINGS compatibility = '23.12'; +SELECT visibleWidth('ClickHouse是一个很好的数据库') SETTINGS compatibility = '24.1'; diff --git a/parser/testdata/02971_functions_to_subcolumns_column_names/ast.json b/parser/testdata/02971_functions_to_subcolumns_column_names/ast.json new file mode 100644 index 000000000..2db5265bc --- /dev/null +++ b/parser/testdata/02971_functions_to_subcolumns_column_names/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_column_names (children 1)" + }, + { + "explain": " Identifier t_column_names" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000911503, + "rows_read": 2, + "bytes_read": 80 + } +} diff --git a/parser/testdata/02971_functions_to_subcolumns_column_names/metadata.json b/parser/testdata/02971_functions_to_subcolumns_column_names/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02971_functions_to_subcolumns_column_names/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02971_functions_to_subcolumns_column_names/query.sql b/parser/testdata/02971_functions_to_subcolumns_column_names/query.sql new file mode 100644 index 000000000..6df2f27db --- /dev/null +++ b/parser/testdata/02971_functions_to_subcolumns_column_names/query.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS t_column_names; + +CREATE TABLE t_column_names (arr Array(UInt64), n Nullable(String)) ENGINE = Memory; + +INSERT INTO t_column_names VALUES ([1, 2, 3], 'foo'); + +SET optimize_functions_to_subcolumns = 1; +SET enable_analyzer = 1; + +EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT length(arr), isNull(n) FROM t_column_names; +SELECT length(arr), isNull(n) FROM t_column_names FORMAT JSONEachRow; + +DROP TABLE t_column_names; diff --git a/parser/testdata/02971_functions_to_subcolumns_map/ast.json b/parser/testdata/02971_functions_to_subcolumns_map/ast.json new file mode 100644 index 000000000..01ae7a02f --- /dev/null +++ b/parser/testdata/02971_functions_to_subcolumns_map/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_func_to_subcolumns_map (children 1)" + }, + { + "explain": " Identifier t_func_to_subcolumns_map" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001430477, + "rows_read": 2, + "bytes_read": 100 + } +} diff --git a/parser/testdata/02971_functions_to_subcolumns_map/metadata.json b/parser/testdata/02971_functions_to_subcolumns_map/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02971_functions_to_subcolumns_map/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02971_functions_to_subcolumns_map/query.sql b/parser/testdata/02971_functions_to_subcolumns_map/query.sql new file mode 100644 index 000000000..c53a03b8c --- /dev/null +++ b/parser/testdata/02971_functions_to_subcolumns_map/query.sql @@ -0,0 +1,19 @@ +DROP TABLE IF EXISTS t_func_to_subcolumns_map; + +CREATE TABLE t_func_to_subcolumns_map (id UInt64, m Map(String, UInt64)) ENGINE = MergeTree ORDER BY id; + +INSERT INTO t_func_to_subcolumns_map VALUES (1, map('aaa', 1, 'bbb', 2)) (2, map('ccc', 3)); + +SET optimize_functions_to_subcolumns = 1; +SET enable_analyzer = 1; + +EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT length(m) FROM t_func_to_subcolumns_map; +SELECT length(m) FROM t_func_to_subcolumns_map; + +EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT empty(m) FROM t_func_to_subcolumns_map; +SELECT empty(m) FROM t_func_to_subcolumns_map; + +EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT notEmpty(m) FROM t_func_to_subcolumns_map; +SELECT notEmpty(m) FROM t_func_to_subcolumns_map; + +DROP TABLE t_func_to_subcolumns_map; diff --git a/parser/testdata/02971_functions_to_subcolumns_variant/ast.json b/parser/testdata/02971_functions_to_subcolumns_variant/ast.json new file mode 100644 index 000000000..ba43febbf --- /dev/null +++ b/parser/testdata/02971_functions_to_subcolumns_variant/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_func_to_subcolumns_variant (children 1)" + }, + { + "explain": " Identifier t_func_to_subcolumns_variant" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001371511, + "rows_read": 2, + "bytes_read": 108 + } +} diff --git a/parser/testdata/02971_functions_to_subcolumns_variant/metadata.json b/parser/testdata/02971_functions_to_subcolumns_variant/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02971_functions_to_subcolumns_variant/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02971_functions_to_subcolumns_variant/query.sql b/parser/testdata/02971_functions_to_subcolumns_variant/query.sql new file mode 100644 index 000000000..2612664a7 --- /dev/null +++ b/parser/testdata/02971_functions_to_subcolumns_variant/query.sql @@ -0,0 +1,15 @@ +DROP TABLE IF EXISTS t_func_to_subcolumns_variant; + +SET allow_experimental_variant_type = 1; + +CREATE TABLE t_func_to_subcolumns_variant (id UInt64, v Variant(String, UInt64)) ENGINE = MergeTree ORDER BY id; + +INSERT INTO t_func_to_subcolumns_variant VALUES (1, 'foo') (2, 111); + +SET optimize_functions_to_subcolumns = 1; +SET enable_analyzer = 1; + +EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT variantElement(v, 'String') FROM t_func_to_subcolumns_variant; +SELECT variantElement(v, 'String') FROM t_func_to_subcolumns_variant; + +DROP TABLE t_func_to_subcolumns_variant; diff --git a/parser/testdata/02971_limit_by_distributed/ast.json b/parser/testdata/02971_limit_by_distributed/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02971_limit_by_distributed/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02971_limit_by_distributed/metadata.json b/parser/testdata/02971_limit_by_distributed/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02971_limit_by_distributed/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02971_limit_by_distributed/query.sql b/parser/testdata/02971_limit_by_distributed/query.sql new file mode 100644 index 000000000..66a85137f --- /dev/null +++ b/parser/testdata/02971_limit_by_distributed/query.sql @@ -0,0 +1,25 @@ +-- Tags: shard + +drop table if exists tlb; +create table tlb (k UInt64) engine MergeTree order by k; + +INSERT INTO tlb (k) SELECT 0 FROM numbers(100); +INSERT INTO tlb (k) SELECT 1; + +-- { echoOn } +-- with limit +SELECT k +FROM remote('127.0.0.{2,3}', currentDatabase(), tlb) +ORDER BY k ASC +LIMIT 1 BY k +LIMIT 100; + +-- w/o limit +SELECT k +FROM remote('127.0.0.{2,3}', currentDatabase(), tlb) +ORDER BY k ASC +LIMIT 1 BY k; + +-- { echoOff } + +DROP TABLE tlb; diff --git a/parser/testdata/02972_insert_deduplication_token_hierarchical_inserts/ast.json b/parser/testdata/02972_insert_deduplication_token_hierarchical_inserts/ast.json new file mode 100644 index 000000000..5fb16018f --- /dev/null +++ b/parser/testdata/02972_insert_deduplication_token_hierarchical_inserts/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001450434, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02972_insert_deduplication_token_hierarchical_inserts/metadata.json b/parser/testdata/02972_insert_deduplication_token_hierarchical_inserts/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02972_insert_deduplication_token_hierarchical_inserts/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02972_insert_deduplication_token_hierarchical_inserts/query.sql b/parser/testdata/02972_insert_deduplication_token_hierarchical_inserts/query.sql new file mode 100644 index 000000000..a0a58fc34 --- /dev/null +++ b/parser/testdata/02972_insert_deduplication_token_hierarchical_inserts/query.sql @@ -0,0 +1,140 @@ +SET insert_deduplicate = 1; +SET deduplicate_blocks_in_dependent_materialized_views = 1; +SET insert_deduplication_token = 'test'; + +DROP TABLE IF EXISTS landing; +CREATE TABLE landing +( + timestamp UInt64, + value UInt64 +) +ENGINE = MergeTree ORDER BY tuple() SETTINGS non_replicated_deduplication_window = 1000; + + +DROP TABLE IF EXISTS ds_1_1; +CREATE TABLE ds_1_1 +( + t UInt64, + v UInt64 +) +ENGINE = MergeTree ORDER BY tuple() SETTINGS non_replicated_deduplication_window = 1000; + +DROP VIEW IF EXISTS mv_1_1; +CREATE MATERIALIZED VIEW mv_1_1 TO ds_1_1 as +SELECT + timestamp t, sum(value) v +FROM landing +GROUP BY t; + +DROP TABLE IF EXISTS ds_1_2; +CREATE TABLE ds_1_2 +( + t UInt64, + v UInt64 +) +ENGINE = MergeTree ORDER BY tuple() SETTINGS non_replicated_deduplication_window = 1000; + +DROP VIEW IF EXISTS mv_1_2; +CREATE MATERIALIZED VIEW mv_1_2 TO ds_1_2 as +SELECT + timestamp t, sum(value) v +FROM landing +GROUP BY t; + + +DROP TABLE IF EXISTS ds_2_1; +CREATE TABLE ds_2_1 +( + l String, + t DateTime, + v UInt64 +) +ENGINE = MergeTree ORDER BY tuple() SETTINGS non_replicated_deduplication_window = 1000; + +DROP VIEW IF EXISTS mv_2_1; +CREATE MATERIALIZED VIEW mv_2_1 TO ds_2_1 as +SELECT '2_1' l, t, v +FROM ds_1_1; + +DROP VIEW IF EXISTS mv_2_2; +CREATE MATERIALIZED VIEW mv_2_2 TO ds_2_1 as +SELECT '2_2' l, t, v +FROM ds_1_2; + + +DROP TABLE IF EXISTS ds_3_1; +CREATE TABLE ds_3_1 +( + l String, + t DateTime, + v UInt64 +) +ENGINE = MergeTree ORDER BY tuple() SETTINGS non_replicated_deduplication_window = 1000; + +DROP VIEW IF EXISTS mv_3_1; +CREATE MATERIALIZED VIEW mv_3_1 TO ds_3_1 as +SELECT '3_1' l, t, v +FROM ds_2_1; + + +DROP TABLE IF EXISTS ds_4_1; +CREATE TABLE ds_4_1 +( + l String, + t DateTime, + v UInt64 +) +ENGINE = MergeTree ORDER BY tuple() SETTINGS non_replicated_deduplication_window = 1000; + +DROP VIEW IF EXISTS mv_4_1; +CREATE MATERIALIZED VIEW mv_4_1 TO ds_4_1 as +SELECT '4_1' l, t, v +FROM mv_3_1; + +DROP TABLE IF EXISTS ds_4_2; +CREATE TABLE ds_4_2 +( + l String, + t DateTime, + v UInt64 +) +ENGINE = MergeTree ORDER BY tuple() SETTINGS non_replicated_deduplication_window = 1000; + +DROP VIEW IF EXISTS mv_4_2; +CREATE MATERIALIZED VIEW mv_4_2 TO ds_4_2 as +SELECT '4_2' l, t, v +FROM mv_3_1; + +INSERT INTO landing SELECT 1 as timestamp, 1 AS value FROM numbers(10); + +SELECT sleep(3); + +INSERT INTO landing SELECT 1 as timestamp, 1 AS value FROM numbers(10); + +SYSTEM FLUSH LOGS part_log; +SELECT table, name, error FROM system.part_log +WHERE database = currentDatabase() +ORDER BY table, name; + +SELECT count() FROM landing; + +DROP TABLE landing; + +DROP TABLE ds_1_1; +DROP VIEW mv_1_1; + +DROP TABLE ds_1_2; +DROP VIEW mv_1_2; + +DROP TABLE ds_2_1; +DROP VIEW mv_2_1; +DROP VIEW mv_2_2; + +DROP TABLE ds_3_1; +DROP VIEW mv_3_1; + +DROP TABLE ds_4_1; +DROP VIEW mv_4_1; + +DROP TABLE ds_4_2; +DROP VIEW mv_4_2; diff --git a/parser/testdata/02972_insert_deduplication_token_hierarchical_inserts_views/ast.json b/parser/testdata/02972_insert_deduplication_token_hierarchical_inserts_views/ast.json new file mode 100644 index 000000000..2c9ebfefb --- /dev/null +++ b/parser/testdata/02972_insert_deduplication_token_hierarchical_inserts_views/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001110098, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02972_insert_deduplication_token_hierarchical_inserts_views/metadata.json b/parser/testdata/02972_insert_deduplication_token_hierarchical_inserts_views/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02972_insert_deduplication_token_hierarchical_inserts_views/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02972_insert_deduplication_token_hierarchical_inserts_views/query.sql b/parser/testdata/02972_insert_deduplication_token_hierarchical_inserts_views/query.sql new file mode 100644 index 000000000..e6e01752c --- /dev/null +++ b/parser/testdata/02972_insert_deduplication_token_hierarchical_inserts_views/query.sql @@ -0,0 +1,53 @@ +SET insert_deduplicate = 1; +SET deduplicate_blocks_in_dependent_materialized_views = 1; +SET update_insert_deduplication_token_in_dependent_materialized_views = 1; +SET insert_deduplication_token = 'test'; + +DROP TABLE IF EXISTS landing; +CREATE TABLE landing +( + timestamp UInt64, + value UInt64 +) +ENGINE = MergeTree ORDER BY tuple() SETTINGS non_replicated_deduplication_window = 1000; + +DROP TABLE IF EXISTS ds_1_1; +CREATE TABLE ds_1_1 +( + t UInt64, + v UInt64 +) +ENGINE = MergeTree ORDER BY tuple() SETTINGS non_replicated_deduplication_window = 1000; + +DROP VIEW IF EXISTS mv_1_1; +CREATE MATERIALIZED VIEW mv_1_1 TO ds_1_1 as +SELECT + timestamp t, sum(value) v +FROM landing +GROUP BY t; + +DROP VIEW IF EXISTS mv_1_2; +CREATE MATERIALIZED VIEW mv_1_2 TO ds_1_1 as +SELECT + timestamp t, sum(value) v +FROM landing +GROUP BY t; + +INSERT INTO landing SELECT 1 as timestamp, 1 AS value FROM numbers(10); + +SELECT sleep(3); + +INSERT INTO landing SELECT 1 as timestamp, 1 AS value FROM numbers(10); + +SYSTEM FLUSH LOGS part_log; +SELECT table, name, error FROM system.part_log +WHERE database = currentDatabase() +ORDER BY table, name; + +SELECT count() FROM landing; + +DROP TABLE landing; + +DROP TABLE ds_1_1; +DROP VIEW mv_1_1; +DROP VIEW mv_1_2; diff --git a/parser/testdata/02972_parallel_replicas_cte/ast.json b/parser/testdata/02972_parallel_replicas_cte/ast.json new file mode 100644 index 000000000..b9b8fa0bc --- /dev/null +++ b/parser/testdata/02972_parallel_replicas_cte/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery pr_1 (children 1)" + }, + { + "explain": " Identifier pr_1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001000991, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/02972_parallel_replicas_cte/metadata.json b/parser/testdata/02972_parallel_replicas_cte/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02972_parallel_replicas_cte/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02972_parallel_replicas_cte/query.sql b/parser/testdata/02972_parallel_replicas_cte/query.sql new file mode 100644 index 000000000..effedf319 --- /dev/null +++ b/parser/testdata/02972_parallel_replicas_cte/query.sql @@ -0,0 +1,83 @@ +DROP TABLE IF EXISTS pr_1; +DROP TABLE IF EXISTS pr_2; +DROP TABLE IF EXISTS numbers_1e6; + +CREATE TABLE pr_1 (`a` UInt32) ENGINE = MergeTree ORDER BY a PARTITION BY a % 10 AS +SELECT 10 * intDiv(number, 10) + 1 FROM numbers(1_000); + +CREATE TABLE pr_2 (`a` UInt32) ENGINE = MergeTree ORDER BY a AS +SELECT * FROM numbers(1_000); + +WITH filtered_groups AS (SELECT a FROM pr_1 WHERE a >= 100) +SELECT count() FROM pr_2 INNER JOIN filtered_groups ON pr_2.a = filtered_groups.a; + +WITH filtered_groups AS (SELECT a FROM pr_1 WHERE a >= 100) +SELECT count() FROM pr_2 INNER JOIN filtered_groups ON pr_2.a = filtered_groups.a +SETTINGS enable_parallel_replicas = 1, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', max_parallel_replicas = 3; + +-- Testing that it is disabled for enable_analyzer=0. With analyzer it will be supported (with correct result) +WITH filtered_groups AS (SELECT a FROM pr_1 WHERE a >= 100) +SELECT count() FROM pr_2 INNER JOIN filtered_groups ON pr_2.a = filtered_groups.a +SETTINGS enable_analyzer = 0, parallel_replicas_only_with_analyzer=0, +enable_parallel_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', max_parallel_replicas = 3; -- { serverError SUPPORT_IS_DISABLED } + +-- Disabled for any value of enable_parallel_replicas != 1, not just 2 +WITH filtered_groups AS (SELECT a FROM pr_1 WHERE a >= 100) +SELECT count() FROM pr_2 INNER JOIN filtered_groups ON pr_2.a = filtered_groups.a +SETTINGS enable_analyzer = 0, parallel_replicas_only_with_analyzer=0, +enable_parallel_replicas = 512, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', max_parallel_replicas = 3; -- { serverError SUPPORT_IS_DISABLED } + +-- Sanitizer +SELECT count() FROM pr_2 JOIN numbers(10) as pr_1 ON pr_2.a = pr_1.number +SETTINGS enable_parallel_replicas = 1, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', max_parallel_replicas = 3; + +-- Parallel replicas detection should work inside subqueries +SELECT * +FROM +( + WITH filtered_groups AS (SELECT a FROM pr_1 WHERE a >= 100) + SELECT count() FROM pr_2 INNER JOIN filtered_groups ON pr_2.a = filtered_groups.a +) +SETTINGS enable_parallel_replicas = 1, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', max_parallel_replicas = 3; + +-- Subquery + subquery +SELECT count() +FROM +( + SELECT c + 1 + FROM + ( + WITH filtered_groups AS (SELECT a FROM pr_1 WHERE a >= 100) + SELECT count() as c FROM pr_2 INNER JOIN filtered_groups ON pr_2.a = filtered_groups.a + ) +) +SETTINGS enable_parallel_replicas = 1, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', max_parallel_replicas = 3; + +CREATE TABLE numbers_1e3 +( + `n` UInt64 +) +ENGINE = MergeTree +ORDER BY n +AS SELECT * FROM numbers(1_000); + +-- Same but nested CTE's +WITH + cte1 AS + ( + SELECT n + FROM numbers_1e3 + ), + cte2 AS + ( + SELECT n + FROM numbers_1e3 + WHERE n IN (cte1) + ) +SELECT count() +FROM cte2 +SETTINGS enable_parallel_replicas = 1, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', max_parallel_replicas = 3; + +DROP TABLE IF EXISTS numbers_1e6; +DROP TABLE IF EXISTS pr_1; +DROP TABLE IF EXISTS pr_2; diff --git a/parser/testdata/02972_to_string_nullable_timezone/ast.json b/parser/testdata/02972_to_string_nullable_timezone/ast.json new file mode 100644 index 000000000..520b3761a --- /dev/null +++ b/parser/testdata/02972_to_string_nullable_timezone/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001262591, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02972_to_string_nullable_timezone/metadata.json b/parser/testdata/02972_to_string_nullable_timezone/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02972_to_string_nullable_timezone/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02972_to_string_nullable_timezone/query.sql b/parser/testdata/02972_to_string_nullable_timezone/query.sql new file mode 100644 index 000000000..d8cff4f3c --- /dev/null +++ b/parser/testdata/02972_to_string_nullable_timezone/query.sql @@ -0,0 +1,4 @@ +SET session_timezone='Europe/Amsterdam'; +SELECT toString(toDateTime('2022-01-01 12:13:14'), CAST('UTC', 'Nullable(String)')); +SELECT toString(toDateTime('2022-01-01 12:13:14'), materialize(CAST('UTC', 'Nullable(String)'))); +SELECT toString(CAST(toDateTime('2022-01-01 12:13:14'), 'Nullable(DateTime)'), materialize(CAST('UTC', 'Nullable(String)'))); diff --git a/parser/testdata/02973_analyzer_join_use_nulls_column_not_found/ast.json b/parser/testdata/02973_analyzer_join_use_nulls_column_not_found/ast.json new file mode 100644 index 000000000..eea1942df --- /dev/null +++ b/parser/testdata/02973_analyzer_join_use_nulls_column_not_found/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001285625, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02973_analyzer_join_use_nulls_column_not_found/metadata.json b/parser/testdata/02973_analyzer_join_use_nulls_column_not_found/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02973_analyzer_join_use_nulls_column_not_found/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02973_analyzer_join_use_nulls_column_not_found/query.sql b/parser/testdata/02973_analyzer_join_use_nulls_column_not_found/query.sql new file mode 100644 index 000000000..5d1afa2a2 --- /dev/null +++ b/parser/testdata/02973_analyzer_join_use_nulls_column_not_found/query.sql @@ -0,0 +1,81 @@ +SET join_use_nulls = 1; + +SELECT '--'; + +select c FROM ( + select + d2.c + from ( select 1 as a, 2 as b ) d1 + FULL join ( select 1 as a, 3 as c ) d2 + on (d1.a = d2.a) +) +; + +SELECT '--'; + +with d1 as ( + select + 1 as a, + 2 as b +), +d2 as ( + select + 1 as a, + 3 as c +), +joined as ( + select + d1.*, + d2.c + from d1 + inner join d2 + on (d1.a = d2.a) +) +select c +from joined; + +SELECT '--'; + +WITH + a AS ( SELECT 0 AS key, 'a' AS acol ), + b AS ( SELECT 2 AS key ) +SELECT a.key +FROM b +LEFT JOIN a ON 1 +LEFT JOIN a AS a1 ON 1 +; + +SELECT '--'; + +WITH + a AS ( SELECT 0 AS key, 'a' AS acol ), + b AS ( SELECT 2 AS key ) +SELECT a.acol, a1.acol +FROM b +LEFT JOIN a ON a.key = b.key +LEFT JOIN a AS a1 ON a1.key = a.key +; +SELECT '--'; + +WITH + a AS ( SELECT 0 AS key, 'a' AS acol ), + b AS ( SELECT 2 AS key ) +SELECT a.acol, a1.acol +FROM b +FULL JOIN a ON a.key = b.key +FULL JOIN a AS a1 ON a1.key = a.key +ORDER BY 1 +SETTINGS join_use_nulls = 0 +; + +SELECT '--'; + +WITH + a AS ( SELECT 0 AS key, 'a' AS acol ), + b AS ( SELECT 2 AS key ) +SELECT a.acol, a1.acol +FROM b +FULL JOIN a ON a.key = b.key +FULL JOIN a AS a1 ON a1.key = a.key +ORDER BY 1 +; diff --git a/parser/testdata/02973_block_number_sparse_serialization_and_mutation/ast.json b/parser/testdata/02973_block_number_sparse_serialization_and_mutation/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02973_block_number_sparse_serialization_and_mutation/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02973_block_number_sparse_serialization_and_mutation/metadata.json b/parser/testdata/02973_block_number_sparse_serialization_and_mutation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02973_block_number_sparse_serialization_and_mutation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02973_block_number_sparse_serialization_and_mutation/query.sql b/parser/testdata/02973_block_number_sparse_serialization_and_mutation/query.sql new file mode 100644 index 000000000..7a1de2897 --- /dev/null +++ b/parser/testdata/02973_block_number_sparse_serialization_and_mutation/query.sql @@ -0,0 +1,39 @@ +-- Tags: zookeeper + +-- we need exact block-numbers +SET insert_keeper_fault_injection_probability=0; + +DROP TABLE IF EXISTS table_with_some_columns; + +CREATE TABLE table_with_some_columns( + key UInt64, + value0 UInt8 +) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/table_with_some_columns', '1') +ORDER BY key +SETTINGS allow_experimental_block_number_column=1, +ratio_of_defaults_for_sparse_serialization=0.0001, +min_bytes_for_wide_part = 0, +replace_long_file_name_to_hash=0; -- simpler to debug + +INSERT INTO table_with_some_columns SELECT rand(), number + 10 from numbers(100000); + +INSERT INTO table_with_some_columns SELECT rand(), number + 10 from numbers(1); + +OPTIMIZE TABLE table_with_some_columns FINAL; + +INSERT INTO table_with_some_columns SELECT rand(), number+222222222 from numbers(1); + +OPTIMIZE TABLE table_with_some_columns FINAL; + +set alter_sync = 2; + +ALTER TABLE table_with_some_columns DROP COLUMN value0; + +INSERT INTO table_with_some_columns SELECT rand() from numbers(1); + +OPTIMIZE TABLE table_with_some_columns FINAL; + +SELECT *, _block_number FROM table_with_some_columns where not ignore(*) Format Null; + +DROP TABLE IF EXISTS table_with_some_columns; diff --git a/parser/testdata/02973_dictionary_table_exception_fix/ast.json b/parser/testdata/02973_dictionary_table_exception_fix/ast.json new file mode 100644 index 000000000..4a9788098 --- /dev/null +++ b/parser/testdata/02973_dictionary_table_exception_fix/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery test_table (children 3)" + }, + { + "explain": " Identifier test_table" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration i (children 1)" + }, + { + "explain": " DataType Int64" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Identifier i" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001358708, + "rows_read": 9, + "bytes_read": 309 + } +} diff --git a/parser/testdata/02973_dictionary_table_exception_fix/metadata.json b/parser/testdata/02973_dictionary_table_exception_fix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02973_dictionary_table_exception_fix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02973_dictionary_table_exception_fix/query.sql b/parser/testdata/02973_dictionary_table_exception_fix/query.sql new file mode 100644 index 000000000..f8061b426 --- /dev/null +++ b/parser/testdata/02973_dictionary_table_exception_fix/query.sql @@ -0,0 +1,6 @@ +CREATE TABLE test_table (i Int64) engine=MergeTree order by i; +CREATE DICTIONARY test_dict (y String, value UInt64 DEFAULT 0) PRIMARY KEY y SOURCE(CLICKHOUSE(TABLE 'test_table')) LAYOUT(DIRECT()); +CREATE TABLE test_dict (y Int64) engine=MergeTree order by y; -- { serverError DICTIONARY_ALREADY_EXISTS } +CREATE DICTIONARY test_table (y String, value UInt64 DEFAULT 0) PRIMARY KEY y SOURCE(CLICKHOUSE(TABLE 'test_table')) LAYOUT(DIRECT()); -- { serverError TABLE_ALREADY_EXISTS } +CREATE DICTIONARY test_dict (y String, value UInt64 DEFAULT 0) PRIMARY KEY y SOURCE(CLICKHOUSE(TABLE 'test_table')) LAYOUT(DIRECT()); -- { serverError DICTIONARY_ALREADY_EXISTS } +CREATE TABLE test_table (y Int64) engine=MergeTree order by y; -- { serverError TABLE_ALREADY_EXISTS } diff --git a/parser/testdata/02974_analyzer_array_join_subcolumn/ast.json b/parser/testdata/02974_analyzer_array_join_subcolumn/ast.json new file mode 100644 index 000000000..fefd1de84 --- /dev/null +++ b/parser/testdata/02974_analyzer_array_join_subcolumn/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t2 (children 1)" + }, + { + "explain": " Identifier t2" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000932339, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/02974_analyzer_array_join_subcolumn/metadata.json b/parser/testdata/02974_analyzer_array_join_subcolumn/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02974_analyzer_array_join_subcolumn/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02974_analyzer_array_join_subcolumn/query.sql b/parser/testdata/02974_analyzer_array_join_subcolumn/query.sql new file mode 100644 index 000000000..1fd103d0b --- /dev/null +++ b/parser/testdata/02974_analyzer_array_join_subcolumn/query.sql @@ -0,0 +1,24 @@ +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS t3; + +CREATE TABLE t2 (id Int32, pe Map(String, Tuple(a UInt64, b UInt64))) ENGINE = MergeTree ORDER BY id; +INSERT INTO t2 VALUES (1, {'a': (1, 2), 'b': (2, 3)}),; + +CREATE TABLE t3 (id Int32, c Tuple(v String, pe Map(String, Tuple(a UInt64, b UInt64)))) ENGINE = MergeTree ORDER BY id; +INSERT INTO t3 VALUES (1, ('A', {'a':(1, 2),'b':(2, 3)})); + +SELECT pe, pe.values.a FROM (SELECT * FROM t2) ARRAY JOIN pe SETTINGS enable_analyzer = 1; +SELECT p, p.values.a FROM (SELECT * FROM t2) ARRAY JOIN pe AS p SETTINGS enable_analyzer = 1; + +SELECT pe, pe.values.a FROM t2 ARRAY JOIN pe; +SELECT p, p.values.a FROM t2 ARRAY JOIN pe AS p; + +SELECT c.pe, c.pe.values.a FROM (SELECT * FROM t3) ARRAY JOIN c.pe SETTINGS enable_analyzer = 1; +SELECT p, p.values.a FROM (SELECT * FROM t3) ARRAY JOIN c.pe as p SETTINGS enable_analyzer = 1; + +SELECT c.pe, c.pe.values.a FROM t3 ARRAY JOIN c.pe SETTINGS enable_analyzer = 1; +SELECT p, p.values.a FROM t3 ARRAY JOIN c.pe as p; + + +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS t3; diff --git a/parser/testdata/02974_if_with_map/ast.json b/parser/testdata/02974_if_with_map/ast.json new file mode 100644 index 000000000..703884442 --- /dev/null +++ b/parser/testdata/02974_if_with_map/ast.json @@ -0,0 +1,115 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function if (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Function map (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Literal UInt64_4" + }, + { + "explain": " Function map (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Literal UInt64_4" + }, + { + "explain": " Literal UInt64_5" + }, + { + "explain": " Literal UInt64_6" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2" + } + ], + + "rows": 31, + + "statistics": + { + "elapsed": 0.001215777, + "rows_read": 31, + "bytes_read": 1168 + } +} diff --git a/parser/testdata/02974_if_with_map/metadata.json b/parser/testdata/02974_if_with_map/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02974_if_with_map/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02974_if_with_map/query.sql b/parser/testdata/02974_if_with_map/query.sql new file mode 100644 index 000000000..2387cffd4 --- /dev/null +++ b/parser/testdata/02974_if_with_map/query.sql @@ -0,0 +1,20 @@ +select if(number % 2 = 0, map(1,2,3,4), map(3,4,5,6)) from numbers(2); +select if(number % 2 = 0, materialize(map(1,2,3,4)), map(3,4,5,6)) from numbers(2); +select if(number % 2 = 0, map(3,4,5,6), materialize(map(1,2,3,4))) from numbers(2); +select if(number % 2 = 0, materialize(map(3,4,5,6)), materialize(map(1,2,3,4))) from numbers(2); +select if(number % 2 = 0, map(1,2,3,4), map(3,4)) from numbers(2); +select if(number % 2 = 0, materialize(map(1,2,3,4)), map(3,4)) from numbers(2); +select if(number % 2 = 0, map(3,4,5,6), materialize(map(1,2))) from numbers(2); +select if(number % 2 = 0, materialize(map(3,4,5,6)), materialize(map(1,2))) from numbers(2); +select if(1, map(1,2,3,4), map(3,4,5,6)) from numbers(2); +select if(0, map(1,2,3,4), map(3,4,5,6)) from numbers(2); +select if(null, map(1,2,3,4), map(3,4,5,6)) from numbers(2); +select if(1, materialize(map(1,2,3,4)), map(3,4,5,6)) from numbers(2); +select if(0, materialize(map(1,2,3,4)), map(3,4,5,6)) from numbers(2); +select if(null, materialize(map(1,2,3,4)), map(3,4,5,6)) from numbers(2); +select if(1, map(3,4,5,6), materialize(map(1,2,3,4))) from numbers(2); +select if(0, map(3,4,5,6), materialize(map(1,2,3,4))) from numbers(2); +select if(null, map(3,4,5,6), materialize(map(1,2,3,4))) from numbers(2); +select if(1, materialize(map(3,4,5,6)), materialize(map(1,2,3,4))) from numbers(2); +select if(0, materialize(map(3,4,5,6)), materialize(map(1,2,3,4))) from numbers(2); +select if(null, materialize(map(3,4,5,6)), materialize(map(1,2,3,4))) from numbers(2); diff --git a/parser/testdata/02975_intdiv_with_decimal/ast.json b/parser/testdata/02975_intdiv_with_decimal/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02975_intdiv_with_decimal/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02975_intdiv_with_decimal/metadata.json b/parser/testdata/02975_intdiv_with_decimal/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02975_intdiv_with_decimal/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02975_intdiv_with_decimal/query.sql b/parser/testdata/02975_intdiv_with_decimal/query.sql new file mode 100644 index 000000000..0911a4812 --- /dev/null +++ b/parser/testdata/02975_intdiv_with_decimal/query.sql @@ -0,0 +1,70 @@ +--intDiv-- +SELECT intDiv(4,2); +SELECT intDiv(toDecimal32(4.4, 2), 2); +SELECT intDiv(4, toDecimal32(2.2, 2)); +SELECT intDiv(toDecimal32(4.4, 2), 2); +SELECT intDiv(toDecimal32(4.4, 2), toDecimal32(2.2, 2)); +SELECT intDiv(toDecimal64(4.4, 3), 2); +SELECT intDiv(toDecimal64(4.4, 3), toDecimal32(2.2, 2)); +SELECT intDiv(toDecimal128(4.4, 4), 2); +SELECT intDiv(toDecimal128(4.4, 4), toDecimal32(2.2, 2)); +SELECT intDiv(toDecimal256(4.4, 5), 2); +SELECT intDiv(toDecimal256(4.4, 5), toDecimal32(2.2, 2)); +SELECT intDiv(4, toDecimal64(2.2, 2)); +SELECT intDiv(toDecimal32(4.4, 2), toDecimal64(2.2, 2)); +SELECT intDiv(4, toDecimal128(2.2, 3)); +SELECT intDiv(toDecimal32(4.4, 2), toDecimal128(2.2, 2)); +SELECT intDiv(4, toDecimal256(2.2, 4)); +SELECT intDiv(toDecimal32(4.4, 2), toDecimal256(2.2, 2)); +SELECT intDiv(toDecimal64(4.4, 2), toDecimal64(2.2, 2)); +SELECT intDiv(toDecimal128(4.4, 2), toDecimal64(2.2, 2)); +SELECT intDiv(toDecimal256(4.4, 2), toDecimal64(2.2, 2)); +SELECT intDiv(toDecimal64(4.4, 2), toDecimal128(2.2, 2)); +SELECT intDiv(toDecimal128(4.4, 2), toDecimal128(2.2, 2)); +SELECT intDiv(toDecimal256(4.4, 2), toDecimal128(2.2, 2)); +SELECT intDiv(toDecimal64(4.4, 2), toDecimal256(2.2, 2)); +SELECT intDiv(toDecimal128(4.4, 2), toDecimal256(2.2, 2)); +SELECT intDiv(toDecimal256(4.4, 2), toDecimal256(2.2, 2)); +SELECT intDiv(4.2, toDecimal32(2.2, 2)); +SELECT intDiv(4.2, toDecimal64(2.2, 2)); +SELECT intDiv(4.2, toDecimal128(2.2, 2)); +SELECT intDiv(4.2, toDecimal256(2.2, 2)); +SELECT intDiv(toDecimal32(4.4, 2), 2.2); +SELECT intDiv(toDecimal64(4.4, 2), 2.2); +SELECT intDiv(toDecimal128(4.4, 2), 2.2); +SELECT intDiv(toDecimal256(4.4, 2), 2.2); +--intDivOrZero-- +SELECT intDivOrZero(4,2); +SELECT intDivOrZero(toDecimal32(4.4, 2), 2); +SELECT intDivOrZero(4, toDecimal32(2.2, 2)); +SELECT intDivOrZero(toDecimal32(4.4, 2), 2); +SELECT intDivOrZero(toDecimal32(4.4, 2), toDecimal32(2.2, 2)); +SELECT intDivOrZero(toDecimal64(4.4, 3), 2); +SELECT intDivOrZero(toDecimal64(4.4, 3), toDecimal32(2.2, 2)); +SELECT intDivOrZero(toDecimal128(4.4, 4), 2); +SELECT intDivOrZero(toDecimal128(4.4, 4), toDecimal32(2.2, 2)); +SELECT intDivOrZero(toDecimal256(4.4, 5), 2); +SELECT intDivOrZero(toDecimal256(4.4, 5), toDecimal32(2.2, 2)); +SELECT intDivOrZero(4, toDecimal64(2.2, 2)); +SELECT intDivOrZero(toDecimal32(4.4, 2), toDecimal64(2.2, 2)); +SELECT intDivOrZero(4, toDecimal128(2.2, 3)); +SELECT intDivOrZero(toDecimal32(4.4, 2), toDecimal128(2.2, 2)); +SELECT intDivOrZero(4, toDecimal256(2.2, 4)); +SELECT intDivOrZero(toDecimal32(4.4, 2), toDecimal256(2.2, 2)); +SELECT intDivOrZero(toDecimal64(4.4, 2), toDecimal64(2.2, 2)); +SELECT intDivOrZero(toDecimal128(4.4, 2), toDecimal64(2.2, 2)); +SELECT intDivOrZero(toDecimal256(4.4, 2), toDecimal64(2.2, 2)); +SELECT intDivOrZero(toDecimal64(4.4, 2), toDecimal128(2.2, 2)); +SELECT intDivOrZero(toDecimal128(4.4, 2), toDecimal128(2.2, 2)); +SELECT intDivOrZero(toDecimal256(4.4, 2), toDecimal128(2.2, 2)); +SELECT intDivOrZero(toDecimal64(4.4, 2), toDecimal256(2.2, 2)); +SELECT intDivOrZero(toDecimal128(4.4, 2), toDecimal256(2.2, 2)); +SELECT intDivOrZero(toDecimal256(4.4, 2), toDecimal256(2.2, 2)); +SELECT intDivOrZero(4.2, toDecimal32(2.2, 2)); +SELECT intDivOrZero(4.2, toDecimal64(2.2, 2)); +SELECT intDivOrZero(4.2, toDecimal128(2.2, 2)); +SELECT intDivOrZero(4.2, toDecimal256(2.2, 2)); +SELECT intDivOrZero(toDecimal32(4.4, 2), 2.2); +SELECT intDivOrZero(toDecimal64(4.4, 2), 2.2); +SELECT intDivOrZero(toDecimal128(4.4, 2), 2.2); +SELECT intDivOrZero(toDecimal256(4.4, 2), 2.2); diff --git a/parser/testdata/02975_system_zookeeper_retries/ast.json b/parser/testdata/02975_system_zookeeper_retries/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02975_system_zookeeper_retries/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02975_system_zookeeper_retries/metadata.json b/parser/testdata/02975_system_zookeeper_retries/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02975_system_zookeeper_retries/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02975_system_zookeeper_retries/query.sql b/parser/testdata/02975_system_zookeeper_retries/query.sql new file mode 100644 index 000000000..1b01399e0 --- /dev/null +++ b/parser/testdata/02975_system_zookeeper_retries/query.sql @@ -0,0 +1,22 @@ +-- Tags: zookeeper, no-parallel, no-fasttest + +SELECT path, name +FROM system.zookeeper +WHERE path = '/keeper' +ORDER BY path, name +SETTINGS + insert_keeper_retry_initial_backoff_ms = 1, + insert_keeper_retry_max_backoff_ms = 20, + insert_keeper_fault_injection_probability=0.3, + insert_keeper_fault_injection_seed=4, + log_comment='02975_system_zookeeper_retries'; + + +SYSTEM FLUSH LOGS query_log; + +-- Check that there where zk session failures +SELECT ProfileEvents['ZooKeeperHardwareExceptions'] > 0 +FROM system.query_log +WHERE current_database = currentDatabase() AND type = 'QueryFinish' AND log_comment='02975_system_zookeeper_retries' +ORDER BY event_time_microseconds DESC +LIMIT 1; diff --git a/parser/testdata/02976_system_zookeeper_filters/ast.json b/parser/testdata/02976_system_zookeeper_filters/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02976_system_zookeeper_filters/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02976_system_zookeeper_filters/metadata.json b/parser/testdata/02976_system_zookeeper_filters/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02976_system_zookeeper_filters/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02976_system_zookeeper_filters/query.sql b/parser/testdata/02976_system_zookeeper_filters/query.sql new file mode 100644 index 000000000..508f224b2 --- /dev/null +++ b/parser/testdata/02976_system_zookeeper_filters/query.sql @@ -0,0 +1,22 @@ +-- Tags: zookeeper, no-parallel, no-fasttest, long + +SET allow_unrestricted_reads_from_keeper = 'false'; + +SELECT count() > 0 FROM system.zookeeper; -- { serverError BAD_ARGUMENTS } +SELECT count() > 0 FROM system.zookeeper WHERE name LIKE '%_%'; -- { serverError BAD_ARGUMENTS } +SELECT count() > 0 FROM system.zookeeper WHERE value LIKE '%'; -- { serverError BAD_ARGUMENTS } +SELECT count() > 0 FROM system.zookeeper WHERE path LIKE '/%'; -- { serverError BAD_ARGUMENTS } +SELECT count() > 0 FROM system.zookeeper WHERE path = '/'; + +SET allow_unrestricted_reads_from_keeper = 'true'; + +SELECT count() > 0 FROM system.zookeeper; +SELECT count() > 0 FROM system.zookeeper WHERE name LIKE '%_%'; +SELECT count() > 0 FROM system.zookeeper WHERE value LIKE '%'; +SELECT count() > 0 FROM system.zookeeper WHERE path LIKE '/%'; +SELECT count() > 0 FROM system.zookeeper WHERE path = '/'; + +SET allow_unrestricted_reads_from_keeper = DEFAULT; + +SELECT count() > 0 FROM system.zookeeper WHERE path = '/' AND zookeeperName = 'unknown'; -- { serverError BAD_ARGUMENTS } +SELECT count() > 0 FROM system.zookeeper WHERE path = '/' AND zookeeperName = 'default'; diff --git a/parser/testdata/02977_csv_format_support_tuple/ast.json b/parser/testdata/02977_csv_format_support_tuple/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02977_csv_format_support_tuple/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02977_csv_format_support_tuple/metadata.json b/parser/testdata/02977_csv_format_support_tuple/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02977_csv_format_support_tuple/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02977_csv_format_support_tuple/query.sql b/parser/testdata/02977_csv_format_support_tuple/query.sql new file mode 100644 index 000000000..f30e217ca --- /dev/null +++ b/parser/testdata/02977_csv_format_support_tuple/query.sql @@ -0,0 +1,9 @@ +-- Tags: no-parallel + +SET output_format_csv_serialize_tuple_into_separate_columns = false; +SET input_format_csv_deserialize_separate_columns_into_tuple = false; +SET input_format_csv_try_infer_strings_from_quoted_tuples = false; + +insert into function file('02977_1.csv') select '20240305', 1, ['s', 'd'], map('a', 2), tuple('222', 33, map('abc', 5)) SETTINGS engine_file_truncate_on_insert=1; +desc file('02977_1.csv'); +select * from file('02977_1.csv') settings max_threads=1; diff --git a/parser/testdata/02981_nested_bad_types/ast.json b/parser/testdata/02981_nested_bad_types/ast.json new file mode 100644 index 000000000..fc1c38572 --- /dev/null +++ b/parser/testdata/02981_nested_bad_types/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001008793, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02981_nested_bad_types/metadata.json b/parser/testdata/02981_nested_bad_types/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02981_nested_bad_types/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02981_nested_bad_types/query.sql b/parser/testdata/02981_nested_bad_types/query.sql new file mode 100644 index 000000000..affd4eacd --- /dev/null +++ b/parser/testdata/02981_nested_bad_types/query.sql @@ -0,0 +1,42 @@ +set allow_suspicious_low_cardinality_types=0; +set allow_suspicious_fixed_string_types=0; + +select [42]::Array(LowCardinality(UInt64)); -- {serverError SUSPICIOUS_TYPE_FOR_LOW_CARDINALITY} +select [[[42]]]::Array(Array(Array(LowCardinality(UInt64)))); -- {serverError SUSPICIOUS_TYPE_FOR_LOW_CARDINALITY} +select map('a', 42)::Map(String, LowCardinality(UInt64)); -- {serverError SUSPICIOUS_TYPE_FOR_LOW_CARDINALITY} +select map('a', map('b', [42]))::Map(String, Map(String, Array(LowCardinality(UInt64)))); -- {serverError SUSPICIOUS_TYPE_FOR_LOW_CARDINALITY} +select tuple('a', 42)::Tuple(String, LowCardinality(UInt64)); -- {serverError SUSPICIOUS_TYPE_FOR_LOW_CARDINALITY} +select tuple('a', [map('b', 42)])::Tuple(String, Array(Map(String, LowCardinality(UInt64)))); -- {serverError SUSPICIOUS_TYPE_FOR_LOW_CARDINALITY} + +create table test (x Array(LowCardinality(UInt64))) engine=Memory; -- {serverError SUSPICIOUS_TYPE_FOR_LOW_CARDINALITY} +create table test (x Array(Array(LowCardinality(UInt64)))) engine=Memory; -- {serverError SUSPICIOUS_TYPE_FOR_LOW_CARDINALITY} +create table test (x Map(String, LowCardinality(UInt64))) engine=Memory; -- {serverError SUSPICIOUS_TYPE_FOR_LOW_CARDINALITY} +create table test (x Map(String, Map(String, LowCardinality(UInt64)))) engine=Memory; -- {serverError SUSPICIOUS_TYPE_FOR_LOW_CARDINALITY} +create table test (x Tuple(String, LowCardinality(UInt64))) engine=Memory; -- {serverError SUSPICIOUS_TYPE_FOR_LOW_CARDINALITY} +create table test (x Tuple(String, Array(Map(String, LowCardinality(UInt64))))) engine=Memory; -- {serverError SUSPICIOUS_TYPE_FOR_LOW_CARDINALITY} + + +select ['42']::Array(FixedString(1000000)); -- {serverError ILLEGAL_COLUMN} +select ['42']::Array(FixedString(1000000)); -- {serverError ILLEGAL_COLUMN} +select [[['42']]]::Array(Array(Array(FixedString(1000000)))); -- {serverError ILLEGAL_COLUMN} +select map('a', '42')::Map(String, FixedString(1000000)); -- {serverError ILLEGAL_COLUMN} +select map('a', map('b', ['42']))::Map(String, Map(String, Array(FixedString(1000000)))); -- {serverError ILLEGAL_COLUMN} +select tuple('a', '42')::Tuple(String, FixedString(1000000)); -- {serverError ILLEGAL_COLUMN} +select tuple('a', [map('b', '42')])::Tuple(String, Array(Map(String, FixedString(1000000)))); -- {serverError ILLEGAL_COLUMN} + +create table test (x Array(FixedString(1000000))) engine=Memory; -- {serverError ILLEGAL_COLUMN} +create table test (x Array(Array(FixedString(1000000)))) engine=Memory; -- {serverError ILLEGAL_COLUMN} +create table test (x Map(String, FixedString(1000000))) engine=Memory; -- {serverError ILLEGAL_COLUMN} +create table test (x Map(String, Map(String, FixedString(1000000)))) engine=Memory; -- {serverError ILLEGAL_COLUMN} +create table test (x Tuple(String, FixedString(1000000))) engine=Memory; -- {serverError ILLEGAL_COLUMN} +create table test (x Tuple(String, Array(Map(String, FixedString(1000000))))) engine=Memory; -- {serverError ILLEGAL_COLUMN} + +select 42::Variant(String, LowCardinality(UInt64)) settings allow_experimental_variant_type=1; -- {serverError SUSPICIOUS_TYPE_FOR_LOW_CARDINALITY} +select tuple('a', [map('b', 42)])::Tuple(String, Array(Map(String, Variant(LowCardinality(UInt64), String)))); -- {serverError SUSPICIOUS_TYPE_FOR_LOW_CARDINALITY} +create table test (x Variant(LowCardinality(UInt64), String)) engine=Memory; -- {serverError SUSPICIOUS_TYPE_FOR_LOW_CARDINALITY} +create table test (x Tuple(String, Array(Map(String, Variant(LowCardinality(UInt64), String))))) engine=Memory; -- {serverError SUSPICIOUS_TYPE_FOR_LOW_CARDINALITY} + +select '42'::Variant(UInt64, FixedString(1000000)); -- {serverError ILLEGAL_COLUMN} +select tuple('a', [map('b', '42')])::Tuple(String, Array(Map(String, Variant(UInt32, FixedString(1000000))))); -- {serverError ILLEGAL_COLUMN} +create table test (x Variant(UInt64, FixedString(1000000))) engine=Memory; -- {serverError ILLEGAL_COLUMN} +create table test (x Tuple(String, Array(Map(String, FixedString(1000000))))) engine=Memory; -- {serverError ILLEGAL_COLUMN} diff --git a/parser/testdata/02981_translate_fixedstring/ast.json b/parser/testdata/02981_translate_fixedstring/ast.json new file mode 100644 index 000000000..8b4971484 --- /dev/null +++ b/parser/testdata/02981_translate_fixedstring/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function translate (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'aaa'" + }, + { + "explain": " Literal 'FixedString(10)'" + }, + { + "explain": " Literal 'a'" + }, + { + "explain": " Literal 'A'" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001230713, + "rows_read": 12, + "bytes_read": 434 + } +} diff --git a/parser/testdata/02981_translate_fixedstring/metadata.json b/parser/testdata/02981_translate_fixedstring/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02981_translate_fixedstring/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02981_translate_fixedstring/query.sql b/parser/testdata/02981_translate_fixedstring/query.sql new file mode 100644 index 000000000..209efa4ba --- /dev/null +++ b/parser/testdata/02981_translate_fixedstring/query.sql @@ -0,0 +1,2 @@ +SELECT translate('aaa'::FixedString(10), 'a','A'); +SELECT translate(number::String::FixedString(1), '0','A') from numbers(4); diff --git a/parser/testdata/02981_variant_type_function/ast.json b/parser/testdata/02981_variant_type_function/ast.json new file mode 100644 index 000000000..0bdca62b5 --- /dev/null +++ b/parser/testdata/02981_variant_type_function/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001382217, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02981_variant_type_function/metadata.json b/parser/testdata/02981_variant_type_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02981_variant_type_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02981_variant_type_function/query.sql b/parser/testdata/02981_variant_type_function/query.sql new file mode 100644 index 000000000..cba653d73 --- /dev/null +++ b/parser/testdata/02981_variant_type_function/query.sql @@ -0,0 +1,13 @@ +SET allow_experimental_variant_type = 1; +CREATE TABLE test (v Variant(UInt64, String, Array(UInt64))) ENGINE = Memory; +INSERT INTO test VALUES (NULL), (42), ('Hello, World!'), ([1, 2, 3]); +SELECT variantType(v) as type FROM test; +SELECT toTypeName(variantType(v)) from test limit 1; + +SELECT variantType() FROM test; -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +SELECT variantType(v, v) FROM test; -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +SELECT variantType(v.String) FROM test; -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} + +SELECT variantType(v::Variant(UInt64, String, Array(UInt64), Date)) as type FROM test; +SELECT toTypeName(variantType(v::Variant(UInt64, String, Array(UInt64), Date))) from test limit 1; + diff --git a/parser/testdata/02981_vertical_merges_memory_usage/ast.json b/parser/testdata/02981_vertical_merges_memory_usage/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02981_vertical_merges_memory_usage/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02981_vertical_merges_memory_usage/metadata.json b/parser/testdata/02981_vertical_merges_memory_usage/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02981_vertical_merges_memory_usage/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02981_vertical_merges_memory_usage/query.sql b/parser/testdata/02981_vertical_merges_memory_usage/query.sql new file mode 100644 index 000000000..c5171defd --- /dev/null +++ b/parser/testdata/02981_vertical_merges_memory_usage/query.sql @@ -0,0 +1,37 @@ +-- Tags: long, no-random-merge-tree-settings + +DROP TABLE IF EXISTS t_vertical_merge_memory; + +CREATE TABLE t_vertical_merge_memory (id UInt64, arr Array(String)) +ENGINE = MergeTree ORDER BY id +SETTINGS + min_bytes_for_wide_part = 0, + vertical_merge_algorithm_min_rows_to_activate = 1, + vertical_merge_algorithm_min_columns_to_activate = 1, + index_granularity = 8192, + index_granularity_bytes = '10M', + merge_max_block_size = 8192, + merge_max_block_size_bytes = '10M'; + +INSERT INTO t_vertical_merge_memory SELECT number, arrayMap(x -> repeat('a', 50), range(1000)) FROM numbers(3000); +-- Why 3001? - Deduplication, which is off with normal MergeTree by default but on for ReplicatedMergeTree and SharedMergeTree. +-- We automatically replace MergeTree with SharedMergeTree in ClickHouse Cloud. +INSERT INTO t_vertical_merge_memory SELECT number, arrayMap(x -> repeat('a', 50), range(1000)) FROM numbers(3001); + +OPTIMIZE TABLE t_vertical_merge_memory FINAL; + +SYSTEM FLUSH LOGS part_log; + +SELECT + merge_algorithm, + peak_memory_usage < 500 * 1024 * 1024 + ? 'OK' + : format('FAIL: memory usage: {}', formatReadableSize(peak_memory_usage)) +FROM system.part_log +WHERE + database = currentDatabase() + AND table = 't_vertical_merge_memory' + AND event_type = 'MergeParts' + AND length(merged_from) = 2; + +DROP TABLE IF EXISTS t_vertical_merge_memory; diff --git a/parser/testdata/02982_changeDate/ast.json b/parser/testdata/02982_changeDate/ast.json new file mode 100644 index 000000000..cd2716dc8 --- /dev/null +++ b/parser/testdata/02982_changeDate/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'Negative tests'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.00116292, + "rows_read": 5, + "bytes_read": 185 + } +} diff --git a/parser/testdata/02982_changeDate/metadata.json b/parser/testdata/02982_changeDate/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02982_changeDate/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02982_changeDate/query.sql b/parser/testdata/02982_changeDate/query.sql new file mode 100644 index 000000000..2bc9aa955 --- /dev/null +++ b/parser/testdata/02982_changeDate/query.sql @@ -0,0 +1,185 @@ +SELECT 'Negative tests'; +-- as changeYear, changeMonth, changeDay, changeMinute, changeSecond share the same implementation, just testing one of them +SELECT changeYear(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT changeYear(toDate('2000-01-01')); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT changeYear(toDate('2000-01-01'), 2000, 1); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT changeYear(1999, 2000); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT changeYear(toDate('2000-01-01'), 'abc'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT changeYear(toDate('2000-01-01'), 1.5); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +-- Disable timezone randomization +SET session_timezone='CET'; + +SELECT 'changeYear'; +SELECT '-- Date'; +SELECT changeYear(toDate('2000-01-01'), 2001); +SELECT changeYear(toDate('2000-01-01'), 1800); -- out-of-bounds +SELECT changeYear(toDate('2000-01-01'), -5000); -- out-of-bounds +SELECT changeYear(toDate('2000-01-01'), 2500); -- out-of-bounds +SELECT '-- Date32'; +SELECT changeYear(toDate32('2000-01-01'), 2001); +SELECT changeYear(toDate32('2000-01-01'), 1800); -- out-of-bounds +SELECT changeYear(toDate32('2000-01-01'), -5000); -- out-of-bounds +SELECT changeYear(toDate32('2000-01-01'), 2500); -- out-of-bounds +SELECT '-- DateTime'; +SELECT changeYear(toDateTime('2000-01-01 11:22:33'), 2001); +SELECT changeYear(toDateTime('2000-01-01 11:22:33'), 1800); -- out-of-bounds +SELECT changeYear(toDateTime('2000-01-01 11:22:33'), -5000); -- out-of-bounds +SELECT changeYear(toDateTime('2000-01-01 11:22:33'), 2500); -- out-of-bounds +SELECT '-- DateTime64'; +SELECT changeYear(toDateTime64('2000-01-01 11:22:33.4444', 4), 2001); +SELECT changeYear(toDateTime64('2000-01-01 11:22:33.4444', 4), 1800); -- out-of-bounds +SELECT changeYear(toDateTime64('2000-01-01 11:22:33.4444', 4), -5000); -- out-of-bounds +SELECT changeYear(toDateTime64('2000-01-01 11:22:33.4444', 4), 2500); -- out-of-bounds + +SELECT 'changeMonth'; +SELECT '-- Date'; +SELECT changeMonth(toDate('2000-01-01'), 1); +SELECT changeMonth(toDate('2000-01-01'), 2); +SELECT changeMonth(toDate('2000-01-01'), 12); +SELECT changeMonth(toDate('2000-01-01'), 0); -- out-of-bounds +SELECT changeMonth(toDate('2000-01-01'), -1); -- out-of-bounds +SELECT changeMonth(toDate('2000-01-01'), 13); -- out-of-bounds +SELECT '-- Date32'; +SELECT changeMonth(toDate32('2000-01-01'), 1); +SELECT changeMonth(toDate32('2000-01-01'), 2); +SELECT changeMonth(toDate32('2000-01-01'), 12); +SELECT changeMonth(toDate32('2000-01-01'), 0); -- out-of-bounds +SELECT changeMonth(toDate32('2000-01-01'), -1); -- out-of-bounds +SELECT changeMonth(toDate32('2000-01-01'), 13); -- out-of-bounds +SELECT '-- DateTime'; +SELECT changeMonth(toDateTime('2000-01-01 11:22:33'), 1); +SELECT changeMonth(toDateTime('2000-01-01 11:22:33'), 2); +SELECT changeMonth(toDateTime('2000-01-01 11:22:33'), 12); +SELECT changeMonth(toDateTime('2000-01-01 11:22:33'), 0); -- out-of-bounds +SELECT changeMonth(toDateTime('2000-01-01 11:22:33'), -1); -- out-of-bounds +SELECT changeMonth(toDateTime('2000-01-01 11:22:33'), 13); -- out-of-bounds +SELECT '-- DateTime64'; +SELECT changeMonth(toDateTime64('2000-01-01 11:22:33.4444', 4), 1); +SELECT changeMonth(toDateTime64('2000-01-01 11:22:33.4444', 4), 2); +SELECT changeMonth(toDateTime64('2000-01-01 11:22:33.4444', 4), 12); +SELECT changeMonth(toDateTime64('2000-01-01 11:22:33.4444', 4), 0); -- out-of-bounds +SELECT changeMonth(toDateTime64('2000-01-01 11:22:33.4444', 4), -1); -- out-of-bounds +SELECT changeMonth(toDateTime64('2000-01-01 11:22:33.4444', 4), 13); -- out-of-bounds + +SELECT 'changeDay'; +SELECT '-- Date'; +SELECT changeDay(toDate('2000-01-01'), 1); +SELECT changeDay(toDate('2000-01-01'), 2); +SELECT changeDay(toDate('2000-01-01'), 31); +SELECT changeDay(toDate('2000-01-01'), 0); -- out-of-bounds +SELECT changeDay(toDate('2000-01-01'), -1); -- out-of-bounds +SELECT changeDay(toDate('2000-01-01'), 32); -- out-of-bounds +SELECT '-- Date32'; +SELECT changeDay(toDate32('2000-01-01'), 1); +SELECT changeDay(toDate32('2000-01-01'), 2); +SELECT changeDay(toDate32('2000-01-01'), 31); +SELECT changeDay(toDate32('2000-01-01'), 0); -- out-of-bounds +SELECT changeDay(toDate32('2000-01-01'), -1); -- out-of-bounds +SELECT changeDay(toDate32('2000-01-01'), 32); -- out-of-bounds +SELECT '-- DateTime'; +SELECT changeDay(toDateTime('2000-01-01 11:22:33'), 1); +SELECT changeDay(toDateTime('2000-01-01 11:22:33'), 2); +SELECT changeDay(toDateTime('2000-01-01 11:22:33'), 31); +SELECT changeDay(toDateTime('2000-01-01 11:22:33'), 0); -- out-of-bounds +SELECT changeDay(toDateTime('2000-01-01 11:22:33'), -1); -- out-of-bounds +SELECT changeDay(toDateTime('2000-01-01 11:22:33'), 32); -- out-of-bounds +SELECT '-- DateTime64'; +SELECT changeDay(toDateTime64('2000-01-01 11:22:33.4444', 4), 1); +SELECT changeDay(toDateTime64('2000-01-01 11:22:33.4444', 4), 2); +SELECT changeDay(toDateTime64('2000-01-01 11:22:33.4444', 4), 31); +SELECT changeDay(toDateTime64('2000-01-01 11:22:33.4444', 4), 0); -- out-of-bounds +SELECT changeDay(toDateTime64('2000-01-01 11:22:33.4444', 4), -1); -- out-of-bounds +SELECT changeDay(toDateTime64('2000-01-01 11:22:33.4444', 4), 32); -- out-of-bounds +SELECT '-- Special case: change to 29 Feb in a leap year'; +SELECT changeDay(toDate('2000-02-28'), 29); +SELECT changeDay(toDate32('2000-02-01'), 29); +SELECT changeDay(toDateTime('2000-02-01 11:22:33'), 29); +SELECT changeDay(toDateTime64('2000-02-01 11:22:33.4444', 4), 29); + +SELECT 'changeHour'; +SELECT '-- Date'; +SELECT changeHour(toDate('2000-01-01'), 0); +SELECT changeHour(toDate('2000-01-01'), 2); +SELECT changeHour(toDate('2000-01-01'), 23); +SELECT changeHour(toDate('2000-01-01'), -1); -- out-of-bounds +SELECT changeHour(toDate('2000-01-01'), 24); -- out-of-bounds +SELECT '-- Date32'; +SELECT changeHour(toDate32('2000-01-01'), 0); +SELECT changeHour(toDate32('2000-01-01'), 2); +SELECT changeHour(toDate32('2000-01-01'), 23); +SELECT changeHour(toDate32('2000-01-01'), -1); -- out-of-bounds +SELECT changeHour(toDate32('2000-01-01'), 24); -- out-of-bounds +SELECT '-- DateTime'; +SELECT changeHour(toDateTime('2000-01-01 11:22:33'), 0); +SELECT changeHour(toDateTime('2000-01-01 11:22:33'), 2); +SELECT changeHour(toDateTime('2000-01-01 11:22:33'), 23); +SELECT changeHour(toDateTime('2000-01-01 11:22:33'), -1); -- out-of-bounds +SELECT changeHour(toDateTime('2000-01-01 11:22:33'), 24); -- out-of-bounds +SELECT '-- DateTime64'; +SELECT changeHour(toDateTime64('2000-01-01 11:22:33.4444', 4), 0); +SELECT changeHour(toDateTime64('2000-01-01 11:22:33.4444', 4), 2); +SELECT changeHour(toDateTime64('2000-01-01 11:22:33.4444', 4), 23); +SELECT changeHour(toDateTime64('2000-01-01 11:22:33.4444', 4), -1); -- out-of-bounds +SELECT changeHour(toDateTime64('2000-01-01 11:22:33.4444', 4), 24); -- out-of-bounds +SELECT '-- With different timezone'; +SELECT changeHour(toDate('2000-01-01'), -1) SETTINGS session_timezone = 'Asia/Novosibirsk'; +SELECT changeHour(toDate('2000-01-01'), 24) SETTINGS session_timezone = 'Asia/Novosibirsk'; + +SELECT 'changeMinute'; +SELECT '-- Date'; +SELECT changeMinute(toDate('2000-01-01'), 0); +SELECT changeMinute(toDate('2000-01-01'), 2); +SELECT changeMinute(toDate('2000-01-01'), 59); +SELECT changeMinute(toDate('2000-01-01'), -1); -- out-of-bounds +SELECT changeMinute(toDate('2000-01-01'), 60); -- out-of-bounds +SELECT '-- Date32'; +SELECT changeMinute(toDate32('2000-01-01'), 0); +SELECT changeMinute(toDate32('2000-01-01'), 2); +SELECT changeMinute(toDate32('2000-01-01'), 59); +SELECT changeMinute(toDate32('2000-01-01'), -1); -- out-of-bounds +SELECT changeMinute(toDate32('2000-01-01'), 60); -- out-of-bounds +SELECT '-- DateTime'; +SELECT changeMinute(toDateTime('2000-01-01 11:22:33'), 0); +SELECT changeMinute(toDateTime('2000-01-01 11:22:33'), 2); +SELECT changeMinute(toDateTime('2000-01-01 11:22:33'), 59); +SELECT changeMinute(toDateTime('2000-01-01 11:22:33'), -1); -- out-of-bounds +SELECT changeMinute(toDateTime('2000-01-01 11:22:33'), 60); -- out-of-bounds +SELECT '-- DateTime64'; +SELECT changeMinute(toDateTime64('2000-01-01 11:22:33.4444', 4), 0); +SELECT changeMinute(toDateTime64('2000-01-01 11:22:33.4444', 4), 2); +SELECT changeMinute(toDateTime64('2000-01-01 11:22:33.4444', 4), 59); +SELECT changeMinute(toDateTime64('2000-01-01 11:22:33.4444', 4), -1); -- out-of-bounds +SELECT changeMinute(toDateTime64('2000-01-01 11:22:33.4444', 4), 60); -- out-of-bounds +SELECT '-- With different timezone'; +SELECT changeMinute(toDate('2000-01-01'), -1) SETTINGS session_timezone = 'Asia/Novosibirsk'; +SELECT changeMinute(toDate('2000-01-01'), 60) SETTINGS session_timezone = 'Asia/Novosibirsk'; + +SELECT 'changeSecond'; +SELECT '-- Date'; +SELECT changeSecond(toDate('2000-01-01'), 0); +SELECT changeSecond(toDate('2000-01-01'), 2); +SELECT changeSecond(toDate('2000-01-01'), 59); +SELECT changeSecond(toDate('2000-01-01'), -1); -- out-of-bounds +SELECT changeSecond(toDate('2000-01-01'), 60); -- out-of-bounds +SELECT '-- Date32'; +SELECT changeSecond(toDate32('2000-01-01'), 0); +SELECT changeSecond(toDate32('2000-01-01'), 2); +SELECT changeSecond(toDate32('2000-01-01'), 59); +SELECT changeSecond(toDate32('2000-01-01'), -1); -- out-of-bounds +SELECT changeSecond(toDate32('2000-01-01'), 60); -- out-of-bounds +SELECT '-- DateTime'; +SELECT changeSecond(toDateTime('2000-01-01 11:22:33'), 0); +SELECT changeSecond(toDateTime('2000-01-01 11:22:33'), 2); +SELECT changeSecond(toDateTime('2000-01-01 11:22:33'), 59); +SELECT changeSecond(toDateTime('2000-01-01 11:22:33'), -1); -- out-of-bounds +SELECT changeSecond(toDateTime('2000-01-01 11:22:33'), 60); -- out-of-bounds +SELECT '-- DateTime64'; +SELECT changeSecond(toDateTime64('2000-01-01 11:22:33.4444', 4), 0); +SELECT changeSecond(toDateTime64('2000-01-01 11:22:33.4444', 4), 2); +SELECT changeSecond(toDateTime64('2000-01-01 11:22:33.4444', 4), 59); +SELECT changeSecond(toDateTime64('2000-01-01 11:22:33.4444', 4), -1); -- out-of-bounds +SELECT changeSecond(toDateTime64('2000-01-01 11:22:33.4444', 4), 60); -- out-of-bounds +SELECT '-- With different timezone'; +SELECT changeSecond(toDate('2000-01-01'), -1) SETTINGS session_timezone = 'Asia/Novosibirsk'; +SELECT changeSecond(toDate('2000-01-01'), 60) SETTINGS session_timezone = 'Asia/Novosibirsk'; diff --git a/parser/testdata/02982_create_mv_inner_extra/ast.json b/parser/testdata/02982_create_mv_inner_extra/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02982_create_mv_inner_extra/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02982_create_mv_inner_extra/metadata.json b/parser/testdata/02982_create_mv_inner_extra/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02982_create_mv_inner_extra/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02982_create_mv_inner_extra/query.sql b/parser/testdata/02982_create_mv_inner_extra/query.sql new file mode 100644 index 000000000..372d61c16 --- /dev/null +++ b/parser/testdata/02982_create_mv_inner_extra/query.sql @@ -0,0 +1,58 @@ +-- Tags: no-random-merge-tree-settings + +DROP TABLE IF EXISTS data; +DROP TABLE IF EXISTS mv_indexes; +DROP TABLE IF EXISTS mv_no_indexes; +DROP TABLE IF EXISTS mv_projections; +DROP TABLE IF EXISTS mv_primary_key; +DROP TABLE IF EXISTS mv_primary_key_from_column; + +CREATE TABLE data +( + key String, +) +ENGINE = MergeTree +ORDER BY key; + +CREATE MATERIALIZED VIEW mv_indexes +( + key String, + INDEX idx key TYPE bloom_filter GRANULARITY 1 +) +ENGINE = MergeTree +ORDER BY key +AS SELECT * FROM data; + +CREATE MATERIALIZED VIEW mv_no_indexes +( + key String, + INDEX idx key TYPE bloom_filter GRANULARITY 1 +) +ENGINE = Null +AS SELECT * FROM data; + +CREATE MATERIALIZED VIEW mv_projections +( + key String, + projection p (SELECT uniqCombined(key)) +) +ENGINE = MergeTree +ORDER BY key +AS SELECT * FROM data; + +CREATE MATERIALIZED VIEW mv_primary_key +( + key String, + PRIMARY KEY key +) +ENGINE = MergeTree +AS SELECT * FROM data; + +CREATE MATERIALIZED VIEW mv_primary_key_from_column +( + key String PRIMARY KEY +) +ENGINE = MergeTree +AS SELECT * FROM data; + +SELECT replaceRegexpOne(create_table_query, 'CREATE TABLE [^ ]*', 'CREATE TABLE x') FROM system.tables WHERE database = currentDatabase() and table LIKE '.inner%' ORDER BY 1 FORMAT LineAsString; diff --git a/parser/testdata/02982_dont_infer_exponent_floats/ast.json b/parser/testdata/02982_dont_infer_exponent_floats/ast.json new file mode 100644 index 000000000..2dcc0868a --- /dev/null +++ b/parser/testdata/02982_dont_infer_exponent_floats/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DescribeQuery (children 2)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function format (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier CSV" + }, + { + "explain": " Literal '1E20\\n1.1E20'" + }, + { + "explain": " Set" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001179077, + "rows_read": 7, + "bytes_read": 219 + } +} diff --git a/parser/testdata/02982_dont_infer_exponent_floats/metadata.json b/parser/testdata/02982_dont_infer_exponent_floats/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02982_dont_infer_exponent_floats/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02982_dont_infer_exponent_floats/query.sql b/parser/testdata/02982_dont_infer_exponent_floats/query.sql new file mode 100644 index 000000000..4f78855f5 --- /dev/null +++ b/parser/testdata/02982_dont_infer_exponent_floats/query.sql @@ -0,0 +1,5 @@ +DESC format(CSV, '1E20\n1.1E20') settings input_format_try_infer_exponent_floats = 0; +DESC format(CSV, '1E20\n1.1E20') settings input_format_try_infer_exponent_floats = 1; +-- This setting should not take affect on JSON formats +DESC format(JSONEachRow, '{"x" : 1.1e20}') settings input_format_try_infer_exponent_floats = 0; + diff --git a/parser/testdata/02982_minmax_nan_null_order/ast.json b/parser/testdata/02982_minmax_nan_null_order/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02982_minmax_nan_null_order/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02982_minmax_nan_null_order/metadata.json b/parser/testdata/02982_minmax_nan_null_order/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02982_minmax_nan_null_order/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02982_minmax_nan_null_order/query.sql b/parser/testdata/02982_minmax_nan_null_order/query.sql new file mode 100644 index 000000000..ad9e40874 --- /dev/null +++ b/parser/testdata/02982_minmax_nan_null_order/query.sql @@ -0,0 +1,28 @@ +-- { echoOn } +-- Tuples with NaN +SELECT min((c1, c2)), max((c1, c2)) FROM values((nan, 0.), (0., 0.), (5., 5.)); +SELECT minIf((c1, c2), c2 >= 0.0), maxIf((c1, c2), c2 >= 0.0) FROM values((nan, 0.), (0., 0.), (5., 5.)); +SELECT (c1, c2) as t FROM values((nan, 0.), (0., 0.), (5., 5.)) ORDER BY t ASC LIMIT 1; +SELECT (c1, c2) as t FROM values((nan, 0.), (0., 0.), (5., 5.)) ORDER BY t DESC LIMIT 1; + +SELECT min((c1, c2)), max((c1, c2)) FROM values((-5, 0), (nan, 0.), (0., 0.), (5., 5.)); +SELECT minIf((c1, c2), c2 >= 0.0), maxIf((c1, c2), c2 >= 0.0) FROM values((-5, 0), (nan, 0.), (0., 0.), (5., 5.)); +SELECT (c1, c2) as t FROM values((-5, 0), (nan, 0.), (0., 0.), (5., 5.)) ORDER BY t ASC LIMIT 1; +SELECT (c1, c2) as t FROM values((-5, 0), (nan, 0.), (0., 0.), (5., 5.)) ORDER BY t DESC LIMIT 1; + +-- Tuples with NULL +SELECT min((c1, c2)), max((c1, c2)) FROM values((NULL, 0.), (0., 0.), (5., 5.)); +SELECT minIf((c1, c2), c2 >= 0), maxIf((c1, c2), c2 >= 0) FROM values((NULL, 0.), (0., 0.), (5., 5.)); +SELECT (c1, c2) as t FROM values((NULL, 0.), (0., 0.), (5., 5.)) ORDER BY t ASC LIMIT 1; +SELECT (c1, c2) as t FROM values((NULL, 0.), (0., 0.), (5., 5.)) ORDER BY t DESC LIMIT 1; + +SELECT min((c1, c2)), max((c1, c2)) FROM values((0., 0.), (5., 5.), (NULL, 0.)); +SELECT minIf((c1, c2), c2 >= 0), maxIf((c1, c2), c2 >= 0) FROM values((0., 0.), (5., 5.), (NULL, 0.)); +SELECT (c1, c2) as t FROM values((NULL, 0.), (0., 0.), (5., 5.), (NULL, 0.)) ORDER BY t ASC LIMIT 1; +SELECT (c1, c2) as t FROM values((NULL, 0.), (0., 0.), (5., 5.), (NULL, 0.)) ORDER BY t DESC LIMIT 1; + +-- Map with NULL +SELECT min(map(0, c1)), max(map(0, c1)) FROM values(NULL, 0, 5., 5.); +SELECT minIf(map(0, c1), assumeNotNull(c1) >= 0), maxIf(map(0, c1), assumeNotNull(c1) >= 0) FROM values(NULL, 0, 5., 5.); +SELECT map(0, c1) as t FROM values(NULL, 0, 5., 5.) ORDER BY t ASC LIMIT 1; +SELECT map(0, c1) as t FROM values(NULL, 0, 5., 5.) ORDER BY t DESC LIMIT 1; diff --git a/parser/testdata/02982_parallel_replicas_unexpected_cluster/ast.json b/parser/testdata/02982_parallel_replicas_unexpected_cluster/ast.json new file mode 100644 index 000000000..7f57263a9 --- /dev/null +++ b/parser/testdata/02982_parallel_replicas_unexpected_cluster/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_unexpected_cluster (children 1)" + }, + { + "explain": " Identifier test_unexpected_cluster" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00108229, + "rows_read": 2, + "bytes_read": 98 + } +} diff --git a/parser/testdata/02982_parallel_replicas_unexpected_cluster/metadata.json b/parser/testdata/02982_parallel_replicas_unexpected_cluster/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02982_parallel_replicas_unexpected_cluster/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02982_parallel_replicas_unexpected_cluster/query.sql b/parser/testdata/02982_parallel_replicas_unexpected_cluster/query.sql new file mode 100644 index 000000000..89ed25129 --- /dev/null +++ b/parser/testdata/02982_parallel_replicas_unexpected_cluster/query.sql @@ -0,0 +1,10 @@ +DROP TABLE IF EXISTS test_unexpected_cluster; +CREATE TABLE test_unexpected_cluster (n UInt64) ENGINE=MergeTree() ORDER BY tuple(); +INSERT INTO test_unexpected_cluster SELECT * FROM numbers(10); + +SET parallel_replicas_only_with_analyzer = 0; -- necessary for CI run with disabled analyzer + +SET enable_parallel_replicas=2, max_parallel_replicas=2, cluster_for_parallel_replicas='test_cluster_two_shards', parallel_replicas_for_non_replicated_merge_tree=1; +SELECT count() FROM test_unexpected_cluster WHERE NOT ignore(*); -- { serverError UNEXPECTED_CLUSTER } + +DROP TABLE test_unexpected_cluster; diff --git a/parser/testdata/02982_unambiguous_alter_commands/ast.json b/parser/testdata/02982_unambiguous_alter_commands/ast.json new file mode 100644 index 000000000..a91838013 --- /dev/null +++ b/parser/testdata/02982_unambiguous_alter_commands/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '--- Alter commands in parens'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001373517, + "rows_read": 5, + "bytes_read": 199 + } +} diff --git a/parser/testdata/02982_unambiguous_alter_commands/metadata.json b/parser/testdata/02982_unambiguous_alter_commands/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02982_unambiguous_alter_commands/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02982_unambiguous_alter_commands/query.sql b/parser/testdata/02982_unambiguous_alter_commands/query.sql new file mode 100644 index 000000000..d25bccb65 --- /dev/null +++ b/parser/testdata/02982_unambiguous_alter_commands/query.sql @@ -0,0 +1,9 @@ +SELECT '--- Alter commands in parens'; +SELECT formatQuery('ALTER TABLE a (MODIFY TTL expr GROUP BY some_key), (ADD COLUMN a Int64)'); +SELECT formatQuery('ALTER TABLE a (MODIFY TTL expr TO VOLUME \'vol1\', expr2 + INTERVAL 2 YEAR TO VOLUME \'vol2\'), (DROP COLUMN c)'); + +SELECT '--- Check only consistent parens around alter commands are accepted'; +SELECT formatQuery('ALTER TABLE a (DROP COLUMN b), DROP COLUMN c'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a DROP COLUMN b, (DROP COLUMN c)'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a (DROP COLUMN b), (DROP COLUMN c)'); +SELECT formatQuery('ALTER TABLE a DROP COLUMN b, DROP COLUMN c'); -- Make sure it is backward compatible diff --git a/parser/testdata/02983_const_sharding_key/ast.json b/parser/testdata/02983_const_sharding_key/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02983_const_sharding_key/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02983_const_sharding_key/metadata.json b/parser/testdata/02983_const_sharding_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02983_const_sharding_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02983_const_sharding_key/query.sql b/parser/testdata/02983_const_sharding_key/query.sql new file mode 100644 index 000000000..339293b8b --- /dev/null +++ b/parser/testdata/02983_const_sharding_key/query.sql @@ -0,0 +1,26 @@ +-- Tags: distributed, no-parallel + +DROP DATABASE IF EXISTS shard_0; +DROP DATABASE IF EXISTS shard_1; +DROP TABLE IF EXISTS t_distr; + +CREATE DATABASE IF NOT EXISTS shard_0; +CREATE DATABASE IF NOT EXISTS shard_1; + +CREATE TABLE shard_0.t_local (a Int) ENGINE = Memory; +CREATE TABLE shard_1.t_local (a Int) ENGINE = Memory; +CREATE TABLE t_distr (a Int) ENGINE = Distributed(test_cluster_two_shards_different_databases, '', 't_local', 1000); + +SET distributed_foreground_insert=0; +INSERT INTO t_distr VALUES (1), (2), (3); + +SET distributed_foreground_insert=1; +INSERT INTO t_distr VALUES (4), (5), (6), (7); + +SYSTEM FLUSH DISTRIBUTED t_distr; + +SELECT * FROM t_distr ORDER BY a; + +DROP TABLE t_distr; +DROP DATABASE shard_0; +DROP DATABASE shard_1; diff --git a/parser/testdata/02983_empty_map/ast.json b/parser/testdata/02983_empty_map/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02983_empty_map/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02983_empty_map/metadata.json b/parser/testdata/02983_empty_map/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02983_empty_map/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02983_empty_map/query.sql b/parser/testdata/02983_empty_map/query.sql new file mode 100644 index 000000000..78bc5d873 --- /dev/null +++ b/parser/testdata/02983_empty_map/query.sql @@ -0,0 +1,21 @@ +--https://github.com/ClickHouse/ClickHouse/issues/59402 +CREATE TABLE t1 +( + f1 Int32, + f2 Map(LowCardinality(String),LowCardinality(String)), + f3 Map(String,String), + f4 Map(Int32,Int32) +) +ENGINE=Memory AS +SELECT 1 as f1, + map(number%2,number%10) as f2, + f2 as f3, + f2 as f4 +from numbers(1000111); + +SET max_block_size=10; + +-- { echoOn } +SELECT f1, f2['2'], count() FROM t1 GROUP BY 1,2 order by 1,2; +SELECT f1, f3['2'], count() FROM t1 GROUP BY 1,2 order by 1,2; +SELECT f1, f4[2], count() FROM t1 GROUP BY 1,2 order by 1,2; diff --git a/parser/testdata/02983_empty_map_hasToken/ast.json b/parser/testdata/02983_empty_map_hasToken/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02983_empty_map_hasToken/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02983_empty_map_hasToken/metadata.json b/parser/testdata/02983_empty_map_hasToken/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02983_empty_map_hasToken/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02983_empty_map_hasToken/query.sql b/parser/testdata/02983_empty_map_hasToken/query.sql new file mode 100644 index 000000000..6d146150a --- /dev/null +++ b/parser/testdata/02983_empty_map_hasToken/query.sql @@ -0,0 +1,27 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/60223 + +CREATE TABLE test +( + t String, + id String, + h Map(String, String) +) +ENGINE = MergeTree +ORDER BY (t, id) SETTINGS index_granularity = 4096 ; + +insert into test values ('xxx', 'x', {'content-type':'text/plain','user-agent':'bulk-tests'}); +insert into test values ('xxx', 'y', {'content-type':'application/json','user-agent':'bulk-tests'}); +insert into test select 'xxx', number, map('content-type', 'x' ) FROM numbers(1e2); + +optimize table test final; + +SELECT count() FROM test PREWHERE hasToken(h['user-agent'], 'bulk') WHERE hasToken(h['user-agent'], 'tests') and t = 'xxx'; +SELECT count() FROM test PREWHERE hasToken(h['user-agent'], 'tests') WHERE hasToken(h['user-agent'], 'bulk') and t = 'xxx'; +SELECT count() FROM test WHERE hasToken(h['user-agent'], 'bulk') and hasToken(h['user-agent'], 'tests') and t = 'xxx'; +SELECT count() FROM test PREWHERE hasToken(h['user-agent'], 'bulk') and hasToken(h['user-agent'], 'tests') and t = 'xxx'; +SELECT count() FROM test PREWHERE hasToken(h['user-agent'], 'bulk') and hasToken(h['user-agent'], 'tests') WHERE t = 'xxx'; +SELECT count() FROM test PREWHERE hasToken(h['user-agent'], 'tests') and hasToken(h['user-agent'], 'bulk') WHERE t = 'xxx'; +SELECT count() FROM test PREWHERE hasToken(h['user-agent'], 'tests') and hasToken(h['user-agent'], 'bulk'); +SELECT count() FROM test PREWHERE hasToken(h['user-agent'], 'bulk') and hasToken(h['user-agent'], 'tests'); +SELECT count() FROM test WHERE hasToken(h['user-agent'], 'tests') and hasToken(h['user-agent'], 'bulk'); +SELECT count() FROM test WHERE hasToken(h['user-agent'], 'bulk') and hasToken(h['user-agent'], 'tests'); diff --git a/parser/testdata/02984_topk_empty_merge/ast.json b/parser/testdata/02984_topk_empty_merge/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02984_topk_empty_merge/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02984_topk_empty_merge/metadata.json b/parser/testdata/02984_topk_empty_merge/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02984_topk_empty_merge/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02984_topk_empty_merge/query.sql b/parser/testdata/02984_topk_empty_merge/query.sql new file mode 100644 index 000000000..754b0cb26 --- /dev/null +++ b/parser/testdata/02984_topk_empty_merge/query.sql @@ -0,0 +1,2 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/59107 +SELECT topK('102.4') FROM remote('127.0.0.{1,2}', view(SELECT NULL FROM system.one WHERE dummy = 1)); diff --git a/parser/testdata/02985_dialects_with_distributed_tables/ast.json b/parser/testdata/02985_dialects_with_distributed_tables/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02985_dialects_with_distributed_tables/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02985_dialects_with_distributed_tables/metadata.json b/parser/testdata/02985_dialects_with_distributed_tables/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02985_dialects_with_distributed_tables/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02985_dialects_with_distributed_tables/query.sql b/parser/testdata/02985_dialects_with_distributed_tables/query.sql new file mode 100644 index 000000000..e9e6934f1 --- /dev/null +++ b/parser/testdata/02985_dialects_with_distributed_tables/query.sql @@ -0,0 +1,33 @@ +-- Tags: no-fasttest, distributed + +SET allow_experimental_prql_dialect = 1; +SET allow_experimental_kusto_dialect = 1; + +DROP TABLE IF EXISTS shared_test_table; +DROP TABLE IF EXISTS distributed_test_table; + +CREATE TABLE shared_test_table (id UInt64) +ENGINE = MergeTree +ORDER BY (id); + +CREATE TABLE distributed_test_table +ENGINE = Distributed(test_cluster_two_shard_three_replicas_localhost, currentDatabase(), shared_test_table); + +INSERT INTO shared_test_table VALUES (123), (651), (446), (315), (234), (764); + +SELECT id FROM distributed_test_table LIMIT 3; + +SET dialect = 'kusto'; + +distributed_test_table | take 3; + +SET dialect = 'prql'; + +from distributed_test_table +select {id} +take 1..3; + +SET dialect = 'clickhouse'; + +DROP TABLE distributed_test_table; +DROP TABLE shared_test_table; diff --git a/parser/testdata/02985_if_over_big_int_decimal/ast.json b/parser/testdata/02985_if_over_big_int_decimal/ast.json new file mode 100644 index 000000000..62f135888 --- /dev/null +++ b/parser/testdata/02985_if_over_big_int_decimal/ast.json @@ -0,0 +1,91 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function sumIf (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal 'Int128'" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1000" + } + ], + + "rows": 23, + + "statistics": + { + "elapsed": 0.001663515, + "rows_read": 23, + "bytes_read": 904 + } +} diff --git a/parser/testdata/02985_if_over_big_int_decimal/metadata.json b/parser/testdata/02985_if_over_big_int_decimal/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02985_if_over_big_int_decimal/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02985_if_over_big_int_decimal/query.sql b/parser/testdata/02985_if_over_big_int_decimal/query.sql new file mode 100644 index 000000000..0295a64a0 --- /dev/null +++ b/parser/testdata/02985_if_over_big_int_decimal/query.sql @@ -0,0 +1,14 @@ +select sumIf(number::Int128, number % 10 == 0) from numbers(1000); +select sumIf(number::UInt128, number % 10 == 0) from numbers(1000); +select sumIf(number::Int256, number % 10 == 0) from numbers(1000); +select sumIf(number::UInt256, number % 10 == 0) from numbers(1000); +select sumIf(number::Decimal128(3), number % 10 == 0) from numbers(1000); +select sumIf(number::Decimal256(3), number % 10 == 0) from numbers(1000); + +-- Test when the condition is neither 0 nor 1 +select sumIf(number::Int128, number % 10) from numbers(1000); +select sumIf(number::UInt128, number % 10) from numbers(1000); +select sumIf(number::Int256, number % 10) from numbers(1000); +select sumIf(number::UInt256, number % 10) from numbers(1000); +select sumIf(number::Decimal128(3), number % 10) from numbers(1000); +select sumIf(number::Decimal256(3), number % 10) from numbers(1000); diff --git a/parser/testdata/02985_minmax_index_aggregate_function/ast.json b/parser/testdata/02985_minmax_index_aggregate_function/ast.json new file mode 100644 index 000000000..db262bd5a --- /dev/null +++ b/parser/testdata/02985_minmax_index_aggregate_function/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_index_agg_func (children 1)" + }, + { + "explain": " Identifier t_index_agg_func" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001402133, + "rows_read": 2, + "bytes_read": 84 + } +} diff --git a/parser/testdata/02985_minmax_index_aggregate_function/metadata.json b/parser/testdata/02985_minmax_index_aggregate_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02985_minmax_index_aggregate_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02985_minmax_index_aggregate_function/query.sql b/parser/testdata/02985_minmax_index_aggregate_function/query.sql new file mode 100644 index 000000000..7d35c1b31 --- /dev/null +++ b/parser/testdata/02985_minmax_index_aggregate_function/query.sql @@ -0,0 +1,36 @@ +DROP TABLE IF EXISTS t_index_agg_func; + +CREATE TABLE t_index_agg_func +( + id UInt64, + v AggregateFunction(avg, UInt64), + INDEX idx_v v TYPE minmax GRANULARITY 1 +) +ENGINE = AggregatingMergeTree ORDER BY id +SETTINGS index_granularity = 4; -- { serverError BAD_ARGUMENTS } + +CREATE TABLE t_index_agg_func +( + id UInt64, + v AggregateFunction(avg, UInt64), +) +ENGINE = AggregatingMergeTree ORDER BY id +SETTINGS index_granularity = 4; + +ALTER TABLE t_index_agg_func ADD INDEX idx_v v TYPE minmax GRANULARITY 1; -- { serverError BAD_ARGUMENTS } + +ALTER TABLE t_index_agg_func ADD INDEX idx_v finalizeAggregation(v) TYPE minmax GRANULARITY 1; + +INSERT INTO t_index_agg_func SELECT number % 10, initializeAggregation('avgState', toUInt64(number % 20)) FROM numbers(1000); +INSERT INTO t_index_agg_func SELECT number % 10, initializeAggregation('avgState', toUInt64(number % 20)) FROM numbers(1000, 1000); + +OPTIMIZE TABLE t_index_agg_func FINAL; + +SELECT count() FROM system.parts WHERE table = 't_index_agg_func' AND database = currentDatabase() AND active; + +SET force_data_skipping_indices = 'idx_v'; +SET use_skip_indexes_if_final = 1; + +SELECT id, finalizeAggregation(v) AS vv FROM t_index_agg_func FINAL WHERE vv >= 10 ORDER BY id; + +DROP TABLE t_index_agg_func; diff --git a/parser/testdata/02985_shard_query_start_time/ast.json b/parser/testdata/02985_shard_query_start_time/ast.json new file mode 100644 index 000000000..bfdd40ef9 --- /dev/null +++ b/parser/testdata/02985_shard_query_start_time/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery sharded_table (children 1)" + }, + { + "explain": " Identifier sharded_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001174994, + "rows_read": 2, + "bytes_read": 78 + } +} diff --git a/parser/testdata/02985_shard_query_start_time/metadata.json b/parser/testdata/02985_shard_query_start_time/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02985_shard_query_start_time/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02985_shard_query_start_time/query.sql b/parser/testdata/02985_shard_query_start_time/query.sql new file mode 100644 index 000000000..832e21bca --- /dev/null +++ b/parser/testdata/02985_shard_query_start_time/query.sql @@ -0,0 +1,34 @@ +DROP TABLE IF EXISTS sharded_table; +CREATE TABLE sharded_table (dummy UInt8) ENGINE = Distributed('test_cluster_two_shards', 'system', 'one'); + +SET prefer_localhost_replica=0; +SELECT * FROM sharded_table FORMAT Null SETTINGS log_comment='02985_shard_query_start_time_query_1'; + +SYSTEM FLUSH LOGS query_log; + +-- Check that there are 2 queries to shards and for each one query_start_time_microseconds is more recent +-- than initial_query_start_time_microseconds, and initial_query_start_time_microseconds matches the original query +-- query_start_time_microseconds +WITH +( + SELECT + (query_id, query_start_time, query_start_time_microseconds) + FROM + system.query_log + WHERE + event_date >= yesterday() + AND current_database = currentDatabase() + AND log_comment = '02985_shard_query_start_time_query_1' + AND type = 'QueryFinish' +) AS id_and_start_tuple +SELECT + type, + countIf(query_start_time >= initial_query_start_time), -- Using >= because it's comparing seconds + countIf(query_start_time_microseconds > initial_query_start_time_microseconds), + countIf(initial_query_start_time = id_and_start_tuple.2), + countIf(initial_query_start_time_microseconds = id_and_start_tuple.3) +FROM + system.query_log +WHERE + NOT is_initial_query AND initial_query_id = id_and_start_tuple.1 +GROUP BY type; diff --git a/parser/testdata/02986_leftpad_fixedstring/ast.json b/parser/testdata/02986_leftpad_fixedstring/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02986_leftpad_fixedstring/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02986_leftpad_fixedstring/metadata.json b/parser/testdata/02986_leftpad_fixedstring/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02986_leftpad_fixedstring/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02986_leftpad_fixedstring/query.sql b/parser/testdata/02986_leftpad_fixedstring/query.sql new file mode 100644 index 000000000..eaed9b3ad --- /dev/null +++ b/parser/testdata/02986_leftpad_fixedstring/query.sql @@ -0,0 +1,41 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/59604 +SELECT leftPad(toFixedString('abc', 3), 0), leftPad('abc', CAST('0', 'Int32')); +SELECT leftPad(toFixedString('abc343243424324', 15), 1) as a, toTypeName(a); + +SELECT rightPad(toFixedString('abc', 3), 0), rightPad('abc', CAST('0', 'Int32')); +SELECT rightPad(toFixedString('abc343243424324', 15), 1) as a, toTypeName(a); + +SELECT + hex(leftPad(toFixedString('abc34324' as s, 8), number)) as result, + hex(leftPad(s, number)) = result, + hex(leftPadUTF8(toFixedString(s, 8), number)) = result, + hex(leftPadUTF8(s, number)) = result +FROM numbers(20); + +SELECT + hex(rightPad(toFixedString('abc34324' as s, 8), number)) as result, + hex(rightPad(s, number)) = result, + hex(rightPadUTF8(toFixedString(s, 8), number)) = result, + hex(rightPadUTF8(s, number)) = result +FROM numbers(20); + +-- I'm not confident the behaviour should be like this. I'm only testing memory problems +SELECT + hex(leftPadUTF8(toFixedString('abc34324' as s, 8), number, '🇪🇸')) as result, + hex(leftPadUTF8(s, number, '🇪🇸')) = result +FROM numbers(20); + +SELECT + hex(rightPadUTF8(toFixedString('abc34324' as s, 8), number, '🇪🇸')) as result, + hex(rightPadUTF8(s, number, '🇪🇸')) = result +FROM numbers(20); + +SELECT + hex(leftPadUTF8(toFixedString('🇪🇸' as s, 8), number, 'Ñ')) as result, + hex(leftPadUTF8(s, number, 'Ñ')) = result +FROM numbers(20); + +SELECT + hex(rightPadUTF8(toFixedString('🇪🇸' as s, 8), number, 'Ñ')) as result, + hex(rightPadUTF8(s, number, 'Ñ')) = result +FROM numbers(20); diff --git a/parser/testdata/02987_group_array_intersect/ast.json b/parser/testdata/02987_group_array_intersect/ast.json new file mode 100644 index 000000000..ea6ef2b1f --- /dev/null +++ b/parser/testdata/02987_group_array_intersect/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00119666, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02987_group_array_intersect/metadata.json b/parser/testdata/02987_group_array_intersect/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02987_group_array_intersect/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02987_group_array_intersect/query.sql b/parser/testdata/02987_group_array_intersect/query.sql new file mode 100644 index 000000000..89a9dcb8d --- /dev/null +++ b/parser/testdata/02987_group_array_intersect/query.sql @@ -0,0 +1,93 @@ +SET merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.0; + +DROP TABLE IF EXISTS test_empty; +CREATE TABLE test_empty (a Array(Int64)) engine=MergeTree ORDER BY a; +INSERT INTO test_empty VALUES ([]); +SELECT arraySort(groupArrayIntersect(*)) FROM test_empty; +INSERT INTO test_empty VALUES ([1]); +SELECT arraySort(groupArrayIntersect(*)) FROM test_empty; +DROP TABLE test_empty; + +DROP TABLE IF EXISTS test_null; +CREATE TABLE test_null (a Array(Nullable(Int64))) engine=MergeTree ORDER BY a SETTINGS allow_nullable_key=1; +INSERT INTO test_null VALUES ([NULL, NULL]); +SELECT arraySort(groupArrayIntersect(*)) FROM test_null; +INSERT INTO test_null VALUES ([NULL]); +SELECT arraySort(groupArrayIntersect(*)) FROM test_null; +INSERT INTO test_null VALUES ([1,2]); +SELECT arraySort(groupArrayIntersect(*)) FROM test_null; +DROP TABLE test_null; + +DROP TABLE IF EXISTS test_nested_arrays; +CREATE TABLE test_nested_arrays (a Array(Array(Int64))) engine=MergeTree ORDER BY a; +INSERT INTO test_nested_arrays VALUES ([[1,2,3,4,5,6], [1,2,4,5]]); +INSERT INTO test_nested_arrays VALUES ([[1,2,4,5]]); +SELECT arraySort(groupArrayIntersect(*)) FROM test_nested_arrays; +INSERT INTO test_nested_arrays VALUES ([[1,4,3,0,5,5,5]]); +SELECT arraySort(groupArrayIntersect(*)) FROM test_nested_arrays; +DROP TABLE test_nested_arrays; + +DROP TABLE IF EXISTS test_numbers; +CREATE TABLE test_numbers (a Array(Int64)) engine=MergeTree ORDER BY a; +INSERT INTO test_numbers VALUES ([1,2,3,4,5,6]); +INSERT INTO test_numbers VALUES ([1,2,4,5]); +INSERT INTO test_numbers VALUES ([1,4,3,0,5,5,5]); +SELECT arraySort(groupArrayIntersect(*)) FROM test_numbers; +INSERT INTO test_numbers VALUES ([9]); +SELECT arraySort(groupArrayIntersect(*)) FROM test_numbers; +DROP TABLE test_numbers; + +DROP TABLE IF EXISTS test_big_numbers_sep; +CREATE TABLE test_big_numbers_sep (a Array(Int64)) engine=MergeTree ORDER BY a; +INSERT INTO test_big_numbers_sep SELECT array(number) FROM numbers_mt(100000); +SELECT arraySort(groupArrayIntersect(*)) FROM test_big_numbers_sep; +DROP TABLE test_big_numbers_sep; + +DROP TABLE IF EXISTS test_big_numbers; +CREATE TABLE test_big_numbers (a Array(Int64)) engine=MergeTree ORDER BY a; +INSERT INTO test_big_numbers SELECT range(100000); +SELECT length(groupArrayIntersect(*)) FROM test_big_numbers; +INSERT INTO test_big_numbers SELECT range(99999); +SELECT length(groupArrayIntersect(*)) FROM test_big_numbers; +INSERT INTO test_big_numbers VALUES ([9]); +SELECT arraySort(groupArrayIntersect(*)) FROM test_big_numbers; +DROP TABLE test_big_numbers; + +DROP TABLE IF EXISTS test_string; +CREATE TABLE test_string (a Array(String)) engine=MergeTree ORDER BY a; +INSERT INTO test_string VALUES (['a', 'b', 'c', 'd', 'e', 'f']); +INSERT INTO test_string VALUES (['a', 'aa', 'b', 'bb', 'c', 'cc', 'd', 'dd', 'f', 'ff']); +INSERT INTO test_string VALUES (['ae', 'ab', 'a', 'bb', 'c']); +SELECT arraySort(groupArrayIntersect(*)) FROM test_string; +DROP TABLE test_string; + +DROP TABLE IF EXISTS test_big_string; +CREATE TABLE test_big_string (a Array(String)) engine=MergeTree ORDER BY a; +INSERT INTO test_big_string SELECT groupArray(toString(number)) FROM numbers_mt(50000); +SELECT length(groupArrayIntersect(*)) FROM test_big_string; +INSERT INTO test_big_string SELECT groupArray(toString(number)) FROM numbers_mt(49999); +SELECT length(groupArrayIntersect(*)) FROM test_big_string; +INSERT INTO test_big_string VALUES (['1']); +SELECT arraySort(groupArrayIntersect(*)) FROM test_big_string; +INSERT INTO test_big_string VALUES (['a']); +SELECT arraySort(groupArrayIntersect(*)) FROM test_big_string; +DROP TABLE test_big_string; + +DROP TABLE IF EXISTS test_datetime; +CREATE TABLE test_datetime (a Array(DateTime)) engine=MergeTree ORDER BY a; +INSERT INTO test_datetime VALUES ([toDateTime('2023-01-01 00:00:00'), toDateTime('2023-01-01 01:02:03'), toDateTime('2023-01-01 02:03:04')]); +INSERT INTO test_datetime VALUES ([toDateTime('2023-01-01 00:00:00'), toDateTime('2023-01-01 01:02:04'), toDateTime('2023-01-01 02:03:05')]); +SELECT arraySort(groupArrayIntersect(*)) from test_datetime; +DROP TABLE test_datetime; + +DROP TABLE IF EXISTS test_date32; +CREATE TABLE test_date32 (a Array(Date32)) engine=MergeTree ORDER BY a; +INSERT INTO test_date32 VALUES ([toDate32('2023-01-01 00:00:00'), toDate32('2023-01-01 00:00:01')]); +SELECT arraySort(groupArrayIntersect(*)) from test_date32; +DROP TABLE test_date32; + +DROP TABLE IF EXISTS test_date; +CREATE TABLE test_date (a Array(Date)) engine=MergeTree ORDER BY a; +INSERT INTO test_date VALUES ([toDate('2023-01-01 00:00:00'), toDate('2023-01-01 00:00:01')]); +SELECT arraySort(groupArrayIntersect(*)) from test_date; +DROP TABLE test_date; diff --git a/parser/testdata/02987_logical_optimizer_pass_lowcardinality/ast.json b/parser/testdata/02987_logical_optimizer_pass_lowcardinality/ast.json new file mode 100644 index 000000000..ae8ca4c75 --- /dev/null +++ b/parser/testdata/02987_logical_optimizer_pass_lowcardinality/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery 02987_logical_optimizer_table (children 3)" + }, + { + "explain": " Identifier 02987_logical_optimizer_table" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration key (children 1)" + }, + { + "explain": " DataType Int" + }, + { + "explain": " ColumnDeclaration value (children 1)" + }, + { + "explain": " DataType Int" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function Memory (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001171512, + "rows_read": 11, + "bytes_read": 431 + } +} diff --git a/parser/testdata/02987_logical_optimizer_pass_lowcardinality/metadata.json b/parser/testdata/02987_logical_optimizer_pass_lowcardinality/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02987_logical_optimizer_pass_lowcardinality/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02987_logical_optimizer_pass_lowcardinality/query.sql b/parser/testdata/02987_logical_optimizer_pass_lowcardinality/query.sql new file mode 100644 index 000000000..266270562 --- /dev/null +++ b/parser/testdata/02987_logical_optimizer_pass_lowcardinality/query.sql @@ -0,0 +1,5 @@ +CREATE TABLE 02987_logical_optimizer_table (key Int, value Int) ENGINE=Memory(); +CREATE VIEW v1 AS SELECT * FROM 02987_logical_optimizer_table; +CREATE TABLE 02987_logical_optimizer_merge AS v1 ENGINE=Merge(currentDatabase(), 'v1'); + +SELECT _table, key FROM 02987_logical_optimizer_merge WHERE (_table = toFixedString(toFixedString(toFixedString('v1', toNullable(2)), 2), 2)) OR ((value = toLowCardinality(toNullable(10))) AND (_table = toFixedString(toNullable('v3'), 2))) OR ((value = 20) AND (_table = toFixedString(toFixedString(toFixedString('v1', 2), 2), 2)) AND (_table = toFixedString(toLowCardinality(toFixedString('v3', 2)), 2))) SETTINGS enable_analyzer = true, join_use_nulls = true, convert_query_to_cnf = true; diff --git a/parser/testdata/02988_join_using_prewhere_pushdown/ast.json b/parser/testdata/02988_join_using_prewhere_pushdown/ast.json new file mode 100644 index 000000000..f2c5d2745 --- /dev/null +++ b/parser/testdata/02988_join_using_prewhere_pushdown/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00090594, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/02988_join_using_prewhere_pushdown/metadata.json b/parser/testdata/02988_join_using_prewhere_pushdown/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02988_join_using_prewhere_pushdown/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02988_join_using_prewhere_pushdown/query.sql b/parser/testdata/02988_join_using_prewhere_pushdown/query.sql new file mode 100644 index 000000000..db49f155d --- /dev/null +++ b/parser/testdata/02988_join_using_prewhere_pushdown/query.sql @@ -0,0 +1,24 @@ +DROP TABLE IF EXISTS t; + +SET allow_suspicious_low_cardinality_types = 1; + + +CREATE TABLE t (`id` UInt16, `u` LowCardinality(Int32), `s` LowCardinality(String)) +ENGINE = MergeTree ORDER BY id; + +INSERT INTO t VALUES (1,1,'a'),(2,2,'b'); + +SELECT u, s FROM t +INNER JOIN ( SELECT number :: Int32 AS u FROM numbers(10) ) AS t1 +USING (u) +WHERE u != 2 +; + +SELECT u, s, toTypeName(u) FROM t +FULL JOIN ( SELECT number :: UInt32 AS u FROM numbers(10) ) AS t1 +USING (u) +WHERE u == 2 +ORDER BY 1 +; + +DROP TABLE IF EXISTS t; diff --git a/parser/testdata/02988_ordinary_database_warning/ast.json b/parser/testdata/02988_ordinary_database_warning/ast.json new file mode 100644 index 000000000..ed41df54d --- /dev/null +++ b/parser/testdata/02988_ordinary_database_warning/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery 02988_ordinary (children 1)" + }, + { + "explain": " Identifier 02988_ordinary" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001390942, + "rows_read": 2, + "bytes_read": 80 + } +} diff --git a/parser/testdata/02988_ordinary_database_warning/metadata.json b/parser/testdata/02988_ordinary_database_warning/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02988_ordinary_database_warning/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02988_ordinary_database_warning/query.sql b/parser/testdata/02988_ordinary_database_warning/query.sql new file mode 100644 index 000000000..2a9756d6f --- /dev/null +++ b/parser/testdata/02988_ordinary_database_warning/query.sql @@ -0,0 +1,10 @@ +DROP DATABASE IF EXISTS 02988_ordinary; + +SET send_logs_level = 'fatal'; +SET allow_deprecated_database_ordinary = 1; +-- Creation of a database with Ordinary engine emits a warning. +CREATE DATABASE 02988_ordinary ENGINE=Ordinary; + +SELECT DISTINCT 'Ok.' FROM system.warnings WHERE message ILIKE '%Ordinary%' and message ILIKE '%deprecated%'; + +DROP DATABASE IF EXISTS 02988_ordinary; diff --git a/parser/testdata/02989_group_by_tuple/ast.json b/parser/testdata/02989_group_by_tuple/ast.json new file mode 100644 index 000000000..1b4445465 --- /dev/null +++ b/parser/testdata/02989_group_by_tuple/ast.json @@ -0,0 +1,88 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier number" + } + ], + + "rows": 22, + + "statistics": + { + "elapsed": 0.001119151, + "rows_read": 22, + "bytes_read": 834 + } +} diff --git a/parser/testdata/02989_group_by_tuple/metadata.json b/parser/testdata/02989_group_by_tuple/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02989_group_by_tuple/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02989_group_by_tuple/query.sql b/parser/testdata/02989_group_by_tuple/query.sql new file mode 100644 index 000000000..d0a205f5e --- /dev/null +++ b/parser/testdata/02989_group_by_tuple/query.sql @@ -0,0 +1 @@ +SELECT number FROM numbers(3) GROUP BY (number, number % 2) ORDER BY number; diff --git a/parser/testdata/02989_join_using_parent_scope/ast.json b/parser/testdata/02989_join_using_parent_scope/ast.json new file mode 100644 index 000000000..c63305f6a --- /dev/null +++ b/parser/testdata/02989_join_using_parent_scope/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tabc (children 1)" + }, + { + "explain": " Identifier tabc" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000985315, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/02989_join_using_parent_scope/metadata.json b/parser/testdata/02989_join_using_parent_scope/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02989_join_using_parent_scope/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02989_join_using_parent_scope/query.sql b/parser/testdata/02989_join_using_parent_scope/query.sql new file mode 100644 index 000000000..dc5f6adfb --- /dev/null +++ b/parser/testdata/02989_join_using_parent_scope/query.sql @@ -0,0 +1,90 @@ +DROP TABLE IF EXISTS tabc; +CREATE TABLE tabc (a UInt32, b UInt32 ALIAS a + 1, c UInt32 ALIAS b + 1, s String) ENGINE = MergeTree ORDER BY a; +INSERT INTO tabc (a, s) SELECT number, 'abc' || toString(number) FROM numbers(4); + +DROP TABLE IF EXISTS ta; +CREATE TABLE ta (a Int32) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO ta SELECT number FROM numbers(4); + +DROP TABLE IF EXISTS tb; +CREATE TABLE tb (b Int32) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO tb SELECT number FROM numbers(4); + +SET join_use_nulls = 1; + +SET analyzer_compatibility_join_using_top_level_identifier = 1; + +-- { echoOn } +SELECT 1 AS c0 FROM (SELECT 1 AS c1) t0 JOIN (SELECT 1 AS c0) t1 USING (c0); +SELECT 1 AS c0 FROM (SELECT 1 AS c0) t0 JOIN (SELECT 1 AS c0) t1 USING (c0); + +SELECT 1 AS a FROM tb JOIN tabc USING (a) ORDER BY ALL; +SELECT a + 2 AS b FROM ta JOIN tabc USING (b) ORDER BY ALL; +SELECT b + 2 AS a FROM tb JOIN tabc USING (a) ORDER BY ALL; +SELECT a + 2 AS c FROM ta JOIN tabc USING (c) ORDER BY ALL; + +SELECT b AS a, a FROM tb JOIN tabc USING (a) ORDER BY ALL; +SELECT 1 AS b FROM tb JOIN ta USING (b); -- { serverError UNKNOWN_IDENTIFIER } + +-- SELECT * returns all columns from both tables in new analyzer +SELECT 3 AS a, a, * FROM tb FULL JOIN tabc USING (a) ORDER BY ALL SETTINGS enable_analyzer = 1; +SELECT b + 1 AS a, * FROM tb JOIN tabc USING (a) ORDER BY ALL SETTINGS enable_analyzer = 1; + +SELECT b + 1 AS a, * FROM tb JOIN tabc USING (a) ORDER BY ALL SETTINGS enable_analyzer = 1; +SELECT b + 1 AS a, * FROM tb LEFT JOIN tabc USING (a) ORDER BY ALL SETTINGS enable_analyzer = 1; +SELECT b + 1 AS a, * FROM tb RIGHT JOIN tabc USING (a) ORDER BY ALL SETTINGS enable_analyzer = 1; +SELECT b + 1 AS a, * FROM tb FULL JOIN tabc USING (a) ORDER BY ALL SETTINGS enable_analyzer = 1; +SELECT b + 1 AS a, * FROM tb FULL JOIN tabc USING (a) ORDER BY ALL SETTINGS asterisk_include_alias_columns = 1, enable_analyzer = 1; + +SELECT b + 1 AS a, * FROM (SELECT b FROM tb) t1 JOIN (SELECT a, b FROM tabc) t2 USING (a) ORDER BY ALL SETTINGS enable_analyzer = 1; +SELECT b + 1 AS a, * FROM (SELECT b FROM tb) t1 LEFT JOIN (SELECT a, b FROM tabc) t2 USING (a) ORDER BY ALL SETTINGS enable_analyzer = 1; +SELECT b + 1 AS a, * FROM (SELECT b FROM tb) t1 RIGHT JOIN (SELECT a, b FROM tabc) t2 USING (a) ORDER BY ALL SETTINGS enable_analyzer = 1; +SELECT b + 1 AS a, * FROM (SELECT b FROM tb) t1 FULL JOIN (SELECT a, b FROM tabc) t2 USING (a) ORDER BY ALL SETTINGS enable_analyzer = 1; + +SELECT b + 1 AS a, s FROM tb FULL OUTER JOIN tabc USING (a) PREWHERE a > 2 ORDER BY ALL SETTINGS enable_analyzer = 1; + +EXPLAIN PIPELINE SELECT (SELECT 1) AS c0 FROM (SELECT 1 AS c0, 1 AS c1) tx JOIN (SELECT 0 AS c0, 1 AS c1) ty USING (c0, c1) FORMAT Null SETTINGS enable_analyzer = 1; + +-- It's a default behavior for old analyzer and new with analyzer_compatibility_join_using_top_level_identifier +-- Column `b` actually exists in left table, but `b` from USING is resoled to `a + 2` and `a` is not in left table +-- so we get UNKNOWN_IDENTIFIER error. +SELECT a + 2 AS b FROM tb JOIN tabc USING (b) ORDER BY ALL +SETTINGS analyzer_compatibility_join_using_top_level_identifier = 1; -- { serverError UNKNOWN_IDENTIFIER } + +-- In new analyzer with `analyzer_compatibility_join_using_top_level_identifier = 0` we get `b` from left table +SELECT a + 2 AS b FROM tb JOIN tabc USING (b) ORDER BY ALL +SETTINGS analyzer_compatibility_join_using_top_level_identifier = 0, enable_analyzer = 1; + +-- This is example where query may return different results with different `analyzer_compatibility_join_using_top_level_identifier` + +DROP TABLE IF EXISTS users; +CREATE TABLE users (uid Int16, name String, spouse_name String) ENGINE=Memory; + +INSERT INTO users VALUES (1231, 'John', 'Ksenia'); +INSERT INTO users VALUES (6666, 'Ksenia', ''); + +SELECT u1.uid, u1.spouse_name as name, u2.uid, u2.name +FROM users u1 JOIN users u2 USING (name) +ORDER BY u1.uid +FORMAT TSVWithNamesAndTypes +SETTINGS enable_analyzer = 1, analyzer_compatibility_join_using_top_level_identifier = 1; + +SELECT u1.uid, u1.spouse_name as name, u2.uid, u2.name +FROM users u1 JOIN users u2 USING (name) +ORDER BY u1.uid +FORMAT TSVWithNamesAndTypes +SETTINGS enable_analyzer = 1, analyzer_compatibility_join_using_top_level_identifier = 0; + +SELECT u1.uid, u1.spouse_name as name, u2.uid, u2.name +FROM users u1 JOIN users u2 USING (name) +ORDER BY u1.uid +FORMAT TSVWithNamesAndTypes +SETTINGS enable_analyzer = 0; + +DROP TABLE IF EXISTS users; + + +DROP TABLE IF EXISTS tabc; +DROP TABLE IF EXISTS ta; +DROP TABLE IF EXISTS tb; +DROP TABLE IF EXISTS tc; diff --git a/parser/testdata/02989_replicated_merge_tree_invalid_metadata_version/ast.json b/parser/testdata/02989_replicated_merge_tree_invalid_metadata_version/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02989_replicated_merge_tree_invalid_metadata_version/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02989_replicated_merge_tree_invalid_metadata_version/metadata.json b/parser/testdata/02989_replicated_merge_tree_invalid_metadata_version/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02989_replicated_merge_tree_invalid_metadata_version/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02989_replicated_merge_tree_invalid_metadata_version/query.sql b/parser/testdata/02989_replicated_merge_tree_invalid_metadata_version/query.sql new file mode 100644 index 000000000..15633586a --- /dev/null +++ b/parser/testdata/02989_replicated_merge_tree_invalid_metadata_version/query.sql @@ -0,0 +1,40 @@ +-- Tags: zookeeper + +DROP TABLE IF EXISTS test_table_replicated; +CREATE TABLE test_table_replicated +( + id UInt64, + value String +) ENGINE=ReplicatedMergeTree('/clickhouse/tables/{database}/test_table_replicated', '1_replica') ORDER BY id; + +ALTER TABLE test_table_replicated ADD COLUMN insert_time DateTime; + +SELECT name, version FROM system.zookeeper +WHERE path = (SELECT zookeeper_path FROM system.replicas WHERE database = currentDatabase() AND table = 'test_table_replicated') +AND name = 'metadata' FORMAT Vertical; + +DROP TABLE IF EXISTS test_table_replicated_second; +CREATE TABLE test_table_replicated_second +( + id UInt64, + value String, + insert_time DateTime +) ENGINE=ReplicatedMergeTree('/clickhouse/tables/{database}/test_table_replicated', '2_replica') ORDER BY id; + +DROP TABLE test_table_replicated; + +SELECT '--'; + +SELECT name, value FROM system.zookeeper +WHERE path = (SELECT replica_path FROM system.replicas WHERE database = currentDatabase() AND table = 'test_table_replicated_second') +AND name = 'metadata_version' FORMAT Vertical; + +SYSTEM RESTART REPLICA test_table_replicated_second; + +ALTER TABLE test_table_replicated_second ADD COLUMN insert_time_updated DateTime; + +SELECT '--'; + +DESCRIBE test_table_replicated_second; + +DROP TABLE test_table_replicated_second; diff --git a/parser/testdata/02989_system_tables_metadata_version/ast.json b/parser/testdata/02989_system_tables_metadata_version/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02989_system_tables_metadata_version/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02989_system_tables_metadata_version/metadata.json b/parser/testdata/02989_system_tables_metadata_version/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02989_system_tables_metadata_version/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02989_system_tables_metadata_version/query.sql b/parser/testdata/02989_system_tables_metadata_version/query.sql new file mode 100644 index 000000000..9534b1f2e --- /dev/null +++ b/parser/testdata/02989_system_tables_metadata_version/query.sql @@ -0,0 +1,50 @@ +-- Tags: zookeeper, no-parallel + +DROP TABLE IF EXISTS test_temporary_table_02989; +CREATE TEMPORARY TABLE test_temporary_table_02989 +( + id UInt64, + value String +) ENGINE=MergeTree ORDER BY id; + +SELECT name, metadata_version FROM system.tables WHERE name = 'test_temporary_table_02989' AND is_temporary; + +DROP TABLE test_temporary_table_02989; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value String +) ENGINE=MergeTree ORDER BY id; + +SELECT '--'; + +SELECT name, metadata_version FROM system.tables WHERE database = currentDatabase() AND name = 'test_table'; + +DROP TABLE test_table; + +DROP TABLE IF EXISTS test_table_replicated; +CREATE TABLE test_table_replicated +( + id UInt64, + value String +) ENGINE=ReplicatedMergeTree('/clickhouse/tables/{database}/test_table_replicated', '1_replica') ORDER BY id; + +SELECT '--'; + +SELECT name, metadata_version FROM system.tables WHERE database = currentDatabase() AND name = 'test_table_replicated'; + +ALTER TABLE test_table_replicated ADD COLUMN insert_time DateTime; + +SELECT '--'; + +SELECT name, metadata_version FROM system.tables WHERE database = currentDatabase() AND name = 'test_table_replicated'; + +ALTER TABLE test_table_replicated ADD COLUMN insert_time_updated DateTime; + +SELECT '--'; + +SELECT name, metadata_version FROM system.tables WHERE database = currentDatabase() AND name = 'test_table_replicated'; + +DROP TABLE test_table_replicated; diff --git a/parser/testdata/02989_variant_comparison/ast.json b/parser/testdata/02989_variant_comparison/ast.json new file mode 100644 index 000000000..649e8498b --- /dev/null +++ b/parser/testdata/02989_variant_comparison/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001025055, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02989_variant_comparison/metadata.json b/parser/testdata/02989_variant_comparison/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02989_variant_comparison/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02989_variant_comparison/query.sql b/parser/testdata/02989_variant_comparison/query.sql new file mode 100644 index 000000000..4d09933fb --- /dev/null +++ b/parser/testdata/02989_variant_comparison/query.sql @@ -0,0 +1,80 @@ +set allow_experimental_variant_type=1; +set allow_suspicious_types_in_order_by=1; + +create table test (v1 Variant(String, UInt64, Array(UInt32)), v2 Variant(String, UInt64, Array(UInt32))) engine=Memory; + +insert into test values (42, 42); +insert into test values (42, 43); +insert into test values (43, 42); + +insert into test values ('abc', 'abc'); +insert into test values ('abc', 'abd'); +insert into test values ('abd', 'abc'); + +insert into test values ([1,2,3], [1,2,3]); +insert into test values ([1,2,3], [1,2,4]); +insert into test values ([1,2,4], [1,2,3]); + +insert into test values (NULL, NULL); + +insert into test values (42, 'abc'); +insert into test values ('abc', 42); + +insert into test values (42, [1,2,3]); +insert into test values ([1,2,3], 42); + +insert into test values (42, NULL); +insert into test values (NULL, 42); + +insert into test values ('abc', [1,2,3]); +insert into test values ([1,2,3], 'abc'); + +insert into test values ('abc', NULL); +insert into test values (NULL, 'abc'); + +insert into test values ([1,2,3], NULL); +insert into test values (NULL, [1,2,3]); + + +select 'order by v1 nulls first'; +select v1 from test order by v1 nulls first; + +select 'order by v1 nulls last'; +select v1 from test order by v1 nulls last; + +select 'order by v2 nulls first'; +select v2 from test order by v2 nulls first; + +select 'order by v2 nulls last'; +select v2 from test order by v2 nulls last; + + +select 'order by v1, v2 nulls first'; +select * from test order by v1, v2 nulls first; + +select 'order by v1, v2 nulls last'; +select * from test order by v1, v2 nulls last; + +select 'order by v2, v1 nulls first'; +select * from test order by v2, v1 nulls first; + +select 'order by v2, v1 nulls last'; +select * from test order by v2, v1 nulls last; + +select 'v1 = v2'; +select v1, v2, v1 = v2 from test order by v1, v2; + +select 'v1 < v2'; +select v1, v2, v1 < v2 from test order by v1, v2; + +select 'v1 <= v2'; +select v1, v2, v1 <= v2 from test order by v1, v2; + +select 'v1 > v2'; +select v1, v2, v1 > v2 from test order by v1, v2; + +select 'v1 >= v2'; +select v1, v2, v2 >= v2 from test order by v1, v2; + +drop table test; + diff --git a/parser/testdata/02990_arrayFold_nullable_lc/ast.json b/parser/testdata/02990_arrayFold_nullable_lc/ast.json new file mode 100644 index 000000000..eaa8df5e5 --- /dev/null +++ b/parser/testdata/02990_arrayFold_nullable_lc/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001068537, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02990_arrayFold_nullable_lc/metadata.json b/parser/testdata/02990_arrayFold_nullable_lc/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02990_arrayFold_nullable_lc/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02990_arrayFold_nullable_lc/query.sql b/parser/testdata/02990_arrayFold_nullable_lc/query.sql new file mode 100644 index 000000000..280defdfb --- /dev/null +++ b/parser/testdata/02990_arrayFold_nullable_lc/query.sql @@ -0,0 +1,35 @@ +SET allow_suspicious_low_cardinality_types=1; + +SELECT arrayFold((acc, x) -> (acc + (x * 2)), [1, 2, 3, 4], toInt64(3)); +SELECT arrayFold((acc, x) -> (acc + (x * 2)), [1, 2, 3, 4], toInt64(toNullable(3))); +SELECT arrayFold((acc, x) -> (acc + (x * 2)), [1, 2, 3, 4], materialize(toInt64(toNullable(3)))); + +SELECT arrayFold((acc, x) -> (acc + (x * 2)), [1, 2, 3, 4]::Array(Nullable(Int64)), toInt64(3)); -- { serverError TYPE_MISMATCH } +SELECT arrayFold((acc, x) -> (acc + (x * 2)), [1, 2, 3, 4]::Array(Nullable(Int64)), toInt64(toNullable(3))); + +SELECT arrayFold((acc, x) -> (acc + (x * 2)), []::Array(Int64), toInt64(3)); +SELECT arrayFold((acc, x) -> (acc + (x * 2)), []::Array(Nullable(Int64)), toInt64(toNullable(3))); +SELECT arrayFold((acc, x) -> (acc + (x * 2)), []::Array(Nullable(Int64)), toInt64(NULL)); + +SELECT arrayFold((acc, x) -> x, materialize(CAST('[0, 1]', 'Array(Nullable(UInt8))')), toUInt8(toNullable(0))); +SELECT arrayFold((acc, x) -> x, materialize(CAST([NULL], 'Array(Nullable(UInt8))')), toUInt8(toNullable(0))); +SELECT arrayFold((acc, x) -> acc + x, materialize(CAST([NULL], 'Array(Nullable(UInt8))')), toUInt64(toNullable(0))); +SELECT arrayFold((acc, x) -> acc + x, materialize(CAST([1, 2, NULL], 'Array(Nullable(UInt8))')), toUInt64(toNullable(0))); + +SELECT arrayFold((acc, x) -> toNullable(acc + (x * 2)), [1, 2, 3, 4], toInt64(3)); -- { serverError TYPE_MISMATCH } +SELECT arrayFold((acc, x) -> toNullable(acc + (x * 2)), [1, 2, 3, 4], toNullable(toInt64(3))); + +SELECT arrayFold((acc, x) -> (acc + (x * 2)), [1, 2, 3, 4], toLowCardinality(toInt64(3))); -- { serverError TYPE_MISMATCH } +SELECT arrayFold((acc, x) -> toLowCardinality(acc + (x * 2)), [1, 2, 3, 4], toLowCardinality(toInt64(3))); +SELECT arrayFold((acc, x) -> (acc + (x * 2)), [1, 2, 3, 4]::Array(LowCardinality(Int64)), toInt64(toLowCardinality(3))); -- { serverError TYPE_MISMATCH } +SELECT arrayFold((acc, x) -> toLowCardinality(acc + (x * 2)), [1, 2, 3, 4]::Array(LowCardinality(Int64)), toInt64(toLowCardinality(3))); + +SELECT arrayFold((acc, x) -> acc + (x * 2), [1, 2, 3, 4]::Array(Nullable(Int64)), toInt64(toLowCardinality(3))); -- { serverError TYPE_MISMATCH } +SELECT arrayFold((acc, x) -> toLowCardinality(acc + (x * 2)), [1, 2, 3, 4]::Array(Nullable(Int64)), toInt64(toLowCardinality(3))); -- { serverError TYPE_MISMATCH } +SELECT arrayFold((acc, x) -> toLowCardinality(acc + (x * 2)), [1, 2, 3, 4]::Array(Nullable(Int64)), toInt64(toNullable(3))); -- { serverError TYPE_MISMATCH } + +SELECT arrayFold((acc, x) -> (acc + (x * 2)), [1, 2, 3, 4], NULL); +-- It's debatable which one of the following 2 queries should work, but considering the return type must match the +-- accumulator type it makes sense to be the second one +SELECT arrayFold((acc, x) -> (acc + (x * 2)), [1, 2, 3, 4], NULL::LowCardinality(Nullable(Int64))); -- { serverError TYPE_MISMATCH } +SELECT arrayFold((acc, x) -> (acc + (x * 2))::LowCardinality(Nullable(Int64)), [1, 2, 3, 4], NULL::LowCardinality(Nullable(Int64))); diff --git a/parser/testdata/02990_format_not_precedence/ast.json b/parser/testdata/02990_format_not_precedence/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02990_format_not_precedence/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02990_format_not_precedence/metadata.json b/parser/testdata/02990_format_not_precedence/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02990_format_not_precedence/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02990_format_not_precedence/query.sql b/parser/testdata/02990_format_not_precedence/query.sql new file mode 100644 index 000000000..98ef2c9e7 --- /dev/null +++ b/parser/testdata/02990_format_not_precedence/query.sql @@ -0,0 +1,7 @@ +-- { echoOn } +SELECT NOT 0 + NOT 0; +SELECT NOT (0 + (NOT 0)); +SELECT (NOT 0) + (NOT 0); +SELECT formatQuery('SELECT NOT 0 + NOT 0'); +SELECT formatQuery('SELECT NOT (0 + (NOT 0))'); +SELECT formatQuery('SELECT (NOT 0) + (NOT 0)'); diff --git a/parser/testdata/02990_optimize_uniq_to_count_alias/ast.json b/parser/testdata/02990_optimize_uniq_to_count_alias/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02990_optimize_uniq_to_count_alias/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02990_optimize_uniq_to_count_alias/metadata.json b/parser/testdata/02990_optimize_uniq_to_count_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02990_optimize_uniq_to_count_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02990_optimize_uniq_to_count_alias/query.sql b/parser/testdata/02990_optimize_uniq_to_count_alias/query.sql new file mode 100644 index 000000000..54d19264c --- /dev/null +++ b/parser/testdata/02990_optimize_uniq_to_count_alias/query.sql @@ -0,0 +1,52 @@ +--https://github.com/ClickHouse/ClickHouse/issues/59999 +DROP TABLE IF EXISTS tags; +CREATE TABLE tags (dev_tag String) ENGINE = Memory AS SELECT '1'; + +SELECT * +FROM +( + SELECT countDistinct(dev_tag) AS total_devtags + FROM + ( + SELECT dev_tag + FROM + ( + SELECT * + FROM tags + ) AS t + GROUP BY dev_tag + ) AS t +) SETTINGS optimize_uniq_to_count=0; + +SELECT * +FROM +( + SELECT countDistinct(dev_tag) AS total_devtags + FROM + ( + SELECT dev_tag + FROM + ( + SELECT * + FROM tags + ) AS t + GROUP BY dev_tag + ) AS t +) SETTINGS optimize_uniq_to_count=1; + +-- https://github.com/ClickHouse/ClickHouse/issues/62298 +DROP TABLE IF EXISTS users; +CREATE TABLE users +( + `id` Int64, + `name` String +) +ENGINE = ReplacingMergeTree +ORDER BY (id, name); + +INSERT INTO users VALUES (1, 'pufit'), (1, 'pufit2'), (1, 'pufit3'); + +SELECT uniqExact(id) FROM ( SELECT id FROM users WHERE id = 1 GROUP BY id, name ); + +DROP TABLE IF EXISTS users; +DROP TABLE IF EXISTS tags; diff --git a/parser/testdata/02990_parts_splitter_invalid_ranges/ast.json b/parser/testdata/02990_parts_splitter_invalid_ranges/ast.json new file mode 100644 index 000000000..5e136c445 --- /dev/null +++ b/parser/testdata/02990_parts_splitter_invalid_ranges/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_table (children 1)" + }, + { + "explain": " Identifier test_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001290263, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/02990_parts_splitter_invalid_ranges/metadata.json b/parser/testdata/02990_parts_splitter_invalid_ranges/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02990_parts_splitter_invalid_ranges/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02990_parts_splitter_invalid_ranges/query.sql b/parser/testdata/02990_parts_splitter_invalid_ranges/query.sql new file mode 100644 index 000000000..e19c23acc --- /dev/null +++ b/parser/testdata/02990_parts_splitter_invalid_ranges/query.sql @@ -0,0 +1,31 @@ +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + `eventType` String, + `timestamp` UInt64, + `key` UInt64 +) +ENGINE = ReplacingMergeTree +PRIMARY KEY (eventType, timestamp) +ORDER BY (eventType, timestamp, key) +SETTINGS index_granularity = 1; + +SYSTEM STOP MERGES test_table; + +INSERT INTO test_table VALUES ('1', 1704472004759, 1), ('3', 1704153600000, 2), ('3', 1704153600000, 3), ('5', 1700161822134, 4); + +INSERT INTO test_table VALUES ('1', 1704468357009, 1), ('3', 1704153600000, 2), ('3', 1704153600000, 3), ('5', 1701458520878, 4); + +INSERT INTO test_table VALUES ('1', 1704470704762, 1), ('3', 1704153600000, 2), ('3', 1704153600000, 3), ('5', 1702609856302, 4); + +SELECT eventType, timestamp, key FROM test_table +WHERE (eventType IN ('2', '4')) AND + ((timestamp >= max2(toInt64('1698938519999'), toUnixTimestamp64Milli(now64() - toIntervalDay(90)))) AND + (timestamp <= (toInt64('1707143315452') - 1))); + +SELECT eventType, timestamp, key FROM test_table FINAL +WHERE (eventType IN ('2', '4')) AND + ((timestamp >= max2(toInt64('1698938519999'), toUnixTimestamp64Milli(now64() - toIntervalDay(90)))) AND + (timestamp <= (toInt64('1707143315452') - 1))); + +DROP TABLE test_table; diff --git a/parser/testdata/02990_rmt_replica_path_uuid/ast.json b/parser/testdata/02990_rmt_replica_path_uuid/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02990_rmt_replica_path_uuid/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02990_rmt_replica_path_uuid/metadata.json b/parser/testdata/02990_rmt_replica_path_uuid/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02990_rmt_replica_path_uuid/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02990_rmt_replica_path_uuid/query.sql b/parser/testdata/02990_rmt_replica_path_uuid/query.sql new file mode 100644 index 000000000..4fcdff291 --- /dev/null +++ b/parser/testdata/02990_rmt_replica_path_uuid/query.sql @@ -0,0 +1,23 @@ +-- Tags: no-parallel, no-ordinary-database, no-replicated-database +-- Tag no-parallel: static UUID +-- Tag no-ordinary-database: requires UUID +-- Tag no-replicated-database: executes with ON CLUSTER anyway + +-- Ignore "ATTACH TABLE query with full table definition is not recommended" +-- Ignore BAD_ARGUMENTS +SET send_logs_level='fatal'; + +DROP TABLE IF EXISTS x; + +ATTACH TABLE x UUID 'aaaaaaaa-1111-2222-3333-aaaaaaaaaaaa' (key Int) ENGINE = ReplicatedMergeTree('/tables/{database}/{uuid}', 'r1') ORDER BY tuple(); +SELECT uuid FROM system.tables WHERE database = currentDatabase() and table = 'x'; +SELECT replica_path FROM system.replicas WHERE database = currentDatabase() and table = 'x'; +DROP TABLE x; + +-- {uuid} macro forbidden for CREATE TABLE without explicit UUID +CREATE TABLE x (key Int) ENGINE = ReplicatedMergeTree('/tables/{database}/{uuid}', 'r1') ORDER BY tuple(); -- { serverError BAD_ARGUMENTS } + +CREATE TABLE x UUID 'aaaaaaaa-1111-2222-3333-aaaaaaaaaaaa' (key Int) ENGINE = ReplicatedMergeTree('/tables/{database}/{uuid}', 'r1') ORDER BY tuple(); +SELECT uuid FROM system.tables WHERE database = currentDatabase() and table = 'x'; +SELECT replica_path FROM system.replicas WHERE database = currentDatabase() and table = 'x'; +DROP TABLE x; diff --git a/parser/testdata/02990_variant_where_cond/ast.json b/parser/testdata/02990_variant_where_cond/ast.json new file mode 100644 index 000000000..5fc21f03d --- /dev/null +++ b/parser/testdata/02990_variant_where_cond/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001096667, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02990_variant_where_cond/metadata.json b/parser/testdata/02990_variant_where_cond/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02990_variant_where_cond/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02990_variant_where_cond/query.sql b/parser/testdata/02990_variant_where_cond/query.sql new file mode 100644 index 000000000..eca6e7e84 --- /dev/null +++ b/parser/testdata/02990_variant_where_cond/query.sql @@ -0,0 +1,11 @@ +set allow_experimental_variant_type=1; + +create table test (v Variant(String, UInt64)) engine=MergeTree ORDER BY tuple(); +insert into test values (42), ('Hello'), (NULL); + +select * from test where v = 'Hello'; +select * from test where v = 42; -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +select * from test where v = 42::UInt64::Variant(String, UInt64); + +drop table test; + diff --git a/parser/testdata/02991_count_rewrite_analyzer/ast.json b/parser/testdata/02991_count_rewrite_analyzer/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02991_count_rewrite_analyzer/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02991_count_rewrite_analyzer/metadata.json b/parser/testdata/02991_count_rewrite_analyzer/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02991_count_rewrite_analyzer/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02991_count_rewrite_analyzer/query.sql b/parser/testdata/02991_count_rewrite_analyzer/query.sql new file mode 100644 index 000000000..bb0d3a1a9 --- /dev/null +++ b/parser/testdata/02991_count_rewrite_analyzer/query.sql @@ -0,0 +1,7 @@ +-- Regression test for https://github.com/ClickHouse/ClickHouse/issues/59919 +SET enable_analyzer=1; + +SELECT toTypeName(sum(toNullable('a') IN toNullable('a'))) AS x; +SELECT toTypeName(count(toNullable('a') IN toNullable('a'))) AS x; +SELECT toTypeName(sum(toFixedString('a', toLowCardinality(toNullable(1))) IN toFixedString('a', 1))) AS x; +SELECT toTypeName(count(toFixedString('a', toLowCardinality(toNullable(1))) IN toFixedString('a', 1))) AS x; diff --git a/parser/testdata/02992_all_columns_should_have_comment/ast.json b/parser/testdata/02992_all_columns_should_have_comment/ast.json new file mode 100644 index 000000000..74ef218c4 --- /dev/null +++ b/parser/testdata/02992_all_columns_should_have_comment/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SYSTEM query" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000915868, + "rows_read": 1, + "bytes_read": 20 + } +} diff --git a/parser/testdata/02992_all_columns_should_have_comment/metadata.json b/parser/testdata/02992_all_columns_should_have_comment/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02992_all_columns_should_have_comment/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02992_all_columns_should_have_comment/query.sql b/parser/testdata/02992_all_columns_should_have_comment/query.sql new file mode 100644 index 000000000..4a4af313a --- /dev/null +++ b/parser/testdata/02992_all_columns_should_have_comment/query.sql @@ -0,0 +1,8 @@ +SYSTEM FLUSH LOGS /* all tables */; +SELECT 'Column ' || name || ' from table ' || concat(database, '.', table) || ' should have a comment' +FROM system.columns +WHERE (database = 'system') AND + (comment = '') AND + (table NOT ILIKE '%log%') AND + (table NOT IN ('numbers', 'numbers_mt', 'one', 'generate_series', 'generateSeries', 'coverage_log', 'filesystem_read_prefetches_log', 'custom_metrics', 'custom_metrics_refresher', 'prometheus_metrics', 'unicode')) AND + (default_kind != 'ALIAS'); diff --git a/parser/testdata/02992_analyzer_group_by_const/ast.json b/parser/testdata/02992_analyzer_group_by_const/ast.json new file mode 100644 index 000000000..4a679f852 --- /dev/null +++ b/parser/testdata/02992_analyzer_group_by_const/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001215518, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02992_analyzer_group_by_const/metadata.json b/parser/testdata/02992_analyzer_group_by_const/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02992_analyzer_group_by_const/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02992_analyzer_group_by_const/query.sql b/parser/testdata/02992_analyzer_group_by_const/query.sql new file mode 100644 index 000000000..efe18918c --- /dev/null +++ b/parser/testdata/02992_analyzer_group_by_const/query.sql @@ -0,0 +1,75 @@ +SET enable_analyzer=1; + +-- Illegal column String of first argument of function concatWithSeparator. Must be a constant String. +SELECT concatWithSeparator('a', 'b') GROUP BY 'a'; +-- use-of-uninitialized-value +SELECT concatWithSeparator('|', 'a', concatWithSeparator('|', CAST('a', 'LowCardinality(String)'))) GROUP BY 'a'; +SELECT concatWithSeparator('|', 'a', concatWithSeparator('|', CAST('x', 'LowCardinality(String)'))) GROUP BY 'a'; +-- should be const like for the query w/o GROUP BY +select dumpColumnStructure('x') GROUP BY 'x'; +select dumpColumnStructure('x'); +-- from https://github.com/ClickHouse/ClickHouse/pull/60046 +SELECT cityHash64('limit', _CAST(materialize('World'), 'LowCardinality(String)')) FROM system.one GROUP BY GROUPING SETS ('limit'); + +WITH ( + SELECT dummy AS x + FROM system.one + ) AS y +SELECT + y, + min(dummy) +FROM remote('127.0.0.{1,2}', system.one) +GROUP BY y; + +WITH ( + SELECT dummy AS x + FROM system.one + ) AS y +SELECT + y, + min(dummy) +FROM remote('127.0.0.{2,3}', system.one) +GROUP BY y; + +CREATE TABLE ttt (hr DateTime, ts DateTime) ENGINE=Memory +as select '2000-01-01' d, d; + +SELECT + count(), + now() AS c1 +FROM remote('127.0.0.{1,2}', currentDatabase(), ttt) +GROUP BY c1 FORMAT Null; + +SELECT + count(), + now() AS c1 +FROM remote('127.0.0.{3,2}', currentDatabase(), ttt) +GROUP BY c1 FORMAT Null; + +SELECT + count(), + now() AS c1 +FROM remote('127.0.0.{1,2}', currentDatabase(), ttt) +GROUP BY c1 + 1 FORMAT Null; + +SELECT + count(), + now() AS c1 +FROM remote('127.0.0.{3,2}', currentDatabase(), ttt) +GROUP BY c1 + 1 FORMAT Null; + +SELECT + count(), + tuple(nullIf(toDateTime(formatDateTime(hr, '%F %T', 'America/Los_Angeles'), 'America/Los_Angeles'), toDateTime(0))) as c1, + defaultValueOfArgumentType(toTimeZone(ts, 'America/Los_Angeles')) as c2, + formatDateTime(hr, '%F %T', 'America/Los_Angeles') as c3 +FROM remote('127.0.0.{1,2}', currentDatabase(), ttt) +GROUP BY c1, c2, c3 FORMAT Null; + +SELECT + count(), + tuple(nullIf(toDateTime(formatDateTime(hr, '%F %T', 'America/Los_Angeles'), 'America/Los_Angeles'), toDateTime(0))) as c1, + defaultValueOfArgumentType(toTimeZone(ts, 'America/Los_Angeles')) as c2, + formatDateTime(hr, '%F %T', 'America/Los_Angeles') as c3 +FROM remote('127.0.0.{3,2}', currentDatabase(), ttt) +GROUP BY c1, c2, c3 FORMAT Null; diff --git a/parser/testdata/02992_settings_overflow/ast.json b/parser/testdata/02992_settings_overflow/ast.json new file mode 100644 index 000000000..34aab635c --- /dev/null +++ b/parser/testdata/02992_settings_overflow/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000961562, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02992_settings_overflow/metadata.json b/parser/testdata/02992_settings_overflow/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02992_settings_overflow/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02992_settings_overflow/query.sql b/parser/testdata/02992_settings_overflow/query.sql new file mode 100644 index 000000000..d120c3400 --- /dev/null +++ b/parser/testdata/02992_settings_overflow/query.sql @@ -0,0 +1 @@ +SET max_threads = -1; -- { serverError CANNOT_CONVERT_TYPE } diff --git a/parser/testdata/02993_lazy_index_loading/ast.json b/parser/testdata/02993_lazy_index_loading/ast.json new file mode 100644 index 000000000..9561cf707 --- /dev/null +++ b/parser/testdata/02993_lazy_index_loading/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000967774, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/02993_lazy_index_loading/metadata.json b/parser/testdata/02993_lazy_index_loading/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02993_lazy_index_loading/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02993_lazy_index_loading/query.sql b/parser/testdata/02993_lazy_index_loading/query.sql new file mode 100644 index 000000000..0c3b0ac5a --- /dev/null +++ b/parser/testdata/02993_lazy_index_loading/query.sql @@ -0,0 +1,29 @@ +DROP TABLE IF EXISTS test; +CREATE TABLE test (s String) ENGINE = MergeTree ORDER BY s SETTINGS index_granularity = 1, use_primary_key_cache = 0; + +SET optimize_trivial_insert_select = 1; +INSERT INTO test SELECT randomString(1000) FROM numbers(100000); +SELECT round(primary_key_bytes_in_memory, -7), round(primary_key_bytes_in_memory_allocated, -7) FROM system.parts WHERE database = currentDatabase() AND table = 'test' FORMAT Vertical; + +DETACH TABLE test; +SET max_memory_usage = '50M'; +ATTACH TABLE test; + +SELECT primary_key_bytes_in_memory, primary_key_bytes_in_memory_allocated FROM system.parts WHERE database = currentDatabase() AND table = 'test' FORMAT Vertical; + +SET max_memory_usage = '200M'; + +-- Run a query that doesn use indexes +SELECT s != '' FROM test LIMIT 1; + +-- Check that index was not loaded +SELECT primary_key_bytes_in_memory, primary_key_bytes_in_memory_allocated FROM system.parts WHERE database = currentDatabase() AND table = 'test' FORMAT Vertical; + +-- Run a query that uses PK index +SET max_execution_time = 300; +SELECT s != '' FROM test WHERE s < '9999999999' LIMIT 1; + +-- Check that index was loaded +SELECT round(primary_key_bytes_in_memory, -7), round(primary_key_bytes_in_memory_allocated, -7) FROM system.parts WHERE database = currentDatabase() AND table = 'test' FORMAT Vertical; + +DROP TABLE test; diff --git a/parser/testdata/02993_values_escape_quote/ast.json b/parser/testdata/02993_values_escape_quote/ast.json new file mode 100644 index 000000000..ff07411ad --- /dev/null +++ b/parser/testdata/02993_values_escape_quote/ast.json @@ -0,0 +1,40 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'foo'" + }, + { + "explain": " Identifier Values" + } + ], + + "rows": 6, + + "statistics": + { + "elapsed": 0.001348067, + "rows_read": 6, + "bytes_read": 200 + } +} diff --git a/parser/testdata/02993_values_escape_quote/metadata.json b/parser/testdata/02993_values_escape_quote/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02993_values_escape_quote/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02993_values_escape_quote/query.sql b/parser/testdata/02993_values_escape_quote/query.sql new file mode 100644 index 000000000..e6fc5f1b2 --- /dev/null +++ b/parser/testdata/02993_values_escape_quote/query.sql @@ -0,0 +1,12 @@ +select 'foo' format Values; +select 'foo\'bar' format Values; +select 'foo\'\'bar' format Values; + +select '\noutput_format_values_escape_quote_with_quote=1' format LineAsString; +set output_format_values_escape_quote_with_quote=1; + +select 'foo' format Values; +select 'foo\'bar' format Values; +select 'foo\'\'bar' format Values; +-- fix no newline at end of file +select '' format LineAsString; diff --git a/parser/testdata/02994_cosineDistanceNullable/ast.json b/parser/testdata/02994_cosineDistanceNullable/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02994_cosineDistanceNullable/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02994_cosineDistanceNullable/metadata.json b/parser/testdata/02994_cosineDistanceNullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02994_cosineDistanceNullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02994_cosineDistanceNullable/query.sql b/parser/testdata/02994_cosineDistanceNullable/query.sql new file mode 100644 index 000000000..a62216982 --- /dev/null +++ b/parser/testdata/02994_cosineDistanceNullable/query.sql @@ -0,0 +1,3 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/59596 +SELECT cosineDistance((1, 1), (toNullable(0.5), 0.1)); +SELECT cosineDistance((1, 1), (toNullable(0.5), 0.1)) from numbers(10); diff --git a/parser/testdata/02994_inconsistent_formatting/ast.json b/parser/testdata/02994_inconsistent_formatting/ast.json new file mode 100644 index 000000000..39bf8e1b0 --- /dev/null +++ b/parser/testdata/02994_inconsistent_formatting/ast.json @@ -0,0 +1,40 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery table (children 2)" + }, + { + "explain": " Identifier table" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration x (children 1)" + }, + { + "explain": " DataType UInt8" + } + ], + + "rows": 6, + + "statistics": + { + "elapsed": 0.001448644, + "rows_read": 6, + "bytes_read": 209 + } +} diff --git a/parser/testdata/02994_inconsistent_formatting/metadata.json b/parser/testdata/02994_inconsistent_formatting/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02994_inconsistent_formatting/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02994_inconsistent_formatting/query.sql b/parser/testdata/02994_inconsistent_formatting/query.sql new file mode 100644 index 000000000..f22f81513 --- /dev/null +++ b/parser/testdata/02994_inconsistent_formatting/query.sql @@ -0,0 +1,10 @@ +CREATE TEMPORARY TABLE table (x UInt8); +INSERT INTO `table` FORMAT Values (1); +INSERT INTO TABLE `table` FORMAT Values (2); +INSERT INTO TABLE table FORMAT Values (3); +SELECT * FROM table ORDER BY x; +DROP TABLE table; + +CREATE TEMPORARY TABLE FORMAT (x UInt8); +INSERT INTO table FORMAT Values (1); +SELECT * FROM FORMAT FORMAT Values; diff --git a/parser/testdata/02994_sanity_check_settings/ast.json b/parser/testdata/02994_sanity_check_settings/ast.json new file mode 100644 index 000000000..f16017880 --- /dev/null +++ b/parser/testdata/02994_sanity_check_settings/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000869826, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02994_sanity_check_settings/metadata.json b/parser/testdata/02994_sanity_check_settings/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02994_sanity_check_settings/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02994_sanity_check_settings/query.sql b/parser/testdata/02994_sanity_check_settings/query.sql new file mode 100644 index 000000000..1a7a27ceb --- /dev/null +++ b/parser/testdata/02994_sanity_check_settings/query.sql @@ -0,0 +1,30 @@ +SET send_logs_level = 'error'; +CREATE TABLE data_02052_1_wide0__fuzz_48 +( + `key` Nullable(Int64), + `value` UInt8 +) + ENGINE = MergeTree + ORDER BY key + SETTINGS min_bytes_for_wide_part = 0, allow_nullable_key = 1 AS +SELECT + number, + repeat(toString(number), 5) +FROM numbers(1); + +-- Disabled because even after reducing internally to "256 * getNumberOfPhysicalCPUCores()" threads it's too much for CI (or for anything running this many times in parallel) +-- SELECT * APPLY max +-- FROM data_02052_1_wide0__fuzz_48 +-- GROUP BY key +-- WITH CUBE +-- SETTINGS max_read_buffer_size = 7, max_threads = 9223372036854775807; + +SELECT zero + 1 AS x +FROM system.zeros LIMIT 10 + SETTINGS max_block_size = 9223372036854775806, max_rows_to_read = 20, read_overflow_mode = 'break'; + +EXPLAIN PIPELINE SELECT zero + 1 AS x FROM system.zeros LIMIT 10 SETTINGS max_block_size = 9223372036854775806, max_rows_to_read = 20, read_overflow_mode = 'break'; + +-- Verify that we clamp odd values to something slightly saner +SET max_block_size = 9223372036854775806; +SELECT value FROM system.settings WHERE name = 'max_block_size'; diff --git a/parser/testdata/02995_bad_formatting_union_intersect/ast.json b/parser/testdata/02995_bad_formatting_union_intersect/ast.json new file mode 100644 index 000000000..256cc6a58 --- /dev/null +++ b/parser/testdata/02995_bad_formatting_union_intersect/ast.json @@ -0,0 +1,82 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery t1 (children 3)" + }, + { + "explain": " Identifier t1" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Identifier c" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectIntersectExceptQuery (children 2)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1 (alias c)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1 (alias c)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2 (alias c)" + } + ], + + "rows": 20, + + "statistics": + { + "elapsed": 0.001243929, + "rows_read": 20, + "bytes_read": 759 + } +} diff --git a/parser/testdata/02995_bad_formatting_union_intersect/metadata.json b/parser/testdata/02995_bad_formatting_union_intersect/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02995_bad_formatting_union_intersect/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02995_bad_formatting_union_intersect/query.sql b/parser/testdata/02995_bad_formatting_union_intersect/query.sql new file mode 100644 index 000000000..227f407fc --- /dev/null +++ b/parser/testdata/02995_bad_formatting_union_intersect/query.sql @@ -0,0 +1,2 @@ +create temporary table t1 engine=MergeTree() order by c as ( select 1 as c intersect (select 1 as c union all select 2 as c ) ); +SELECT * FROM t1; diff --git a/parser/testdata/02995_preliminary_filters_duplicated_columns/ast.json b/parser/testdata/02995_preliminary_filters_duplicated_columns/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02995_preliminary_filters_duplicated_columns/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02995_preliminary_filters_duplicated_columns/metadata.json b/parser/testdata/02995_preliminary_filters_duplicated_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02995_preliminary_filters_duplicated_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02995_preliminary_filters_duplicated_columns/query.sql b/parser/testdata/02995_preliminary_filters_duplicated_columns/query.sql new file mode 100644 index 000000000..7be7df097 --- /dev/null +++ b/parser/testdata/02995_preliminary_filters_duplicated_columns/query.sql @@ -0,0 +1,7 @@ +-- It is special because actions cannot be reused for SimpleAggregateFunction (see https://github.com/ClickHouse/ClickHouse/pull/54436) +SET allow_suspicious_primary_key = 1; +drop table if exists data; +create table data (key Int) engine=AggregatingMergeTree() order by tuple(); +insert into data values (0); +select * from data final prewhere indexHint(_partition_id = 'all') or indexHint(_partition_id = 'all'); +select * from data final prewhere indexHint(_partition_id = 'all') or indexHint(_partition_id = 'all') or indexHint(_partition_id = 'all'); diff --git a/parser/testdata/02995_preliminary_filters_duplicated_columns_SimpleAggregateFunction/ast.json b/parser/testdata/02995_preliminary_filters_duplicated_columns_SimpleAggregateFunction/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02995_preliminary_filters_duplicated_columns_SimpleAggregateFunction/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02995_preliminary_filters_duplicated_columns_SimpleAggregateFunction/metadata.json b/parser/testdata/02995_preliminary_filters_duplicated_columns_SimpleAggregateFunction/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02995_preliminary_filters_duplicated_columns_SimpleAggregateFunction/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02995_preliminary_filters_duplicated_columns_SimpleAggregateFunction/query.sql b/parser/testdata/02995_preliminary_filters_duplicated_columns_SimpleAggregateFunction/query.sql new file mode 100644 index 000000000..964775811 --- /dev/null +++ b/parser/testdata/02995_preliminary_filters_duplicated_columns_SimpleAggregateFunction/query.sql @@ -0,0 +1,6 @@ +-- It is special because actions cannot be reused for SimpleAggregateFunction (see https://github.com/ClickHouse/ClickHouse/pull/54436) +set allow_suspicious_primary_key = 1; +drop table if exists data; +create table data (key SimpleAggregateFunction(max, Int)) engine=AggregatingMergeTree() order by tuple(); +insert into data values (0); +select * from data final prewhere indexHint(_partition_id = 'all') and key >= -1 where key >= 0; diff --git a/parser/testdata/02996_analyzer_prewhere_projection/ast.json b/parser/testdata/02996_analyzer_prewhere_projection/ast.json new file mode 100644 index 000000000..cd331664a --- /dev/null +++ b/parser/testdata/02996_analyzer_prewhere_projection/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000947938, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02996_analyzer_prewhere_projection/metadata.json b/parser/testdata/02996_analyzer_prewhere_projection/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02996_analyzer_prewhere_projection/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02996_analyzer_prewhere_projection/query.sql b/parser/testdata/02996_analyzer_prewhere_projection/query.sql new file mode 100644 index 000000000..66f7c37a8 --- /dev/null +++ b/parser/testdata/02996_analyzer_prewhere_projection/query.sql @@ -0,0 +1,7 @@ +SET allow_suspicious_low_cardinality_types=1; + +CREATE TABLE t__fuzz_0 (`i` LowCardinality(Int32), `j` Int32, `k` Int32, PROJECTION p (SELECT * ORDER BY j)) ENGINE = MergeTree ORDER BY i SETTINGS index_granularity = 1; +INSERT INTO t__fuzz_0 Select number, number, number FROM numbers(100); + +SELECT * FROM t__fuzz_0 PREWHERE 7 AND (i < 2147483647) AND (j IN (2147483646, -2, 1)) +SETTINGS enable_analyzer = true; diff --git a/parser/testdata/02996_index_compaction_counterexample/ast.json b/parser/testdata/02996_index_compaction_counterexample/ast.json new file mode 100644 index 000000000..bf6f770ab --- /dev/null +++ b/parser/testdata/02996_index_compaction_counterexample/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery b (children 1)" + }, + { + "explain": " Identifier b" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001238963, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/02996_index_compaction_counterexample/metadata.json b/parser/testdata/02996_index_compaction_counterexample/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02996_index_compaction_counterexample/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02996_index_compaction_counterexample/query.sql b/parser/testdata/02996_index_compaction_counterexample/query.sql new file mode 100644 index 000000000..1545f83c7 --- /dev/null +++ b/parser/testdata/02996_index_compaction_counterexample/query.sql @@ -0,0 +1,8 @@ +DROP TABLE IF EXISTS b; +create table b (x Int64, y String) engine MergeTree order by (x, y) settings index_granularity=2; +insert into b values (0, 'a'), (1, 'b'), (1, 'c'); +select count() from b where x = 1 and y = 'b'; +detach table b; +attach table b; +select count() from b where x = 1 and y = 'b'; +DROP TABLE b; diff --git a/parser/testdata/02996_nullable_arrayReduce/ast.json b/parser/testdata/02996_nullable_arrayReduce/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02996_nullable_arrayReduce/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02996_nullable_arrayReduce/metadata.json b/parser/testdata/02996_nullable_arrayReduce/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02996_nullable_arrayReduce/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02996_nullable_arrayReduce/query.sql b/parser/testdata/02996_nullable_arrayReduce/query.sql new file mode 100644 index 000000000..8f69296db --- /dev/null +++ b/parser/testdata/02996_nullable_arrayReduce/query.sql @@ -0,0 +1,17 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/59600 +SELECT arrayReduce(toNullable('stddevSampOrNull'), [1]); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT arrayReduce(toNullable('median'), [toDecimal32OrNull(toFixedString('1', 1), 2), 8]); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toFixedString('--- Int Empty ---', toLowCardinality(17)), arrayReduce(toNullable('avgOrNull'), [1]); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT arrayReduce('any', toNullable(3)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT arrayReduce(toLowCardinality('median'), [toLowCardinality(toNullable(8))]); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +-- { echoOn } +SELECT arrayReduce('sum', []::Array(UInt8)) as a, toTypeName(a); +SELECT arrayReduce('sumOrNull', []::Array(UInt8)) as a, toTypeName(a); +SELECT arrayReduce('sum', [NULL]::Array(Nullable(UInt8))) as a, toTypeName(a); +SELECT arrayReduce('sum', [NULL, 10]::Array(Nullable(UInt8))) as a, toTypeName(a); +SELECT arrayReduce('any_respect_nulls', [NULL, 10]::Array(Nullable(UInt8))) as a, toTypeName(a); +SELECT arrayReduce('any_respect_nulls', [10, NULL]::Array(Nullable(UInt8))) as a, toTypeName(a); + +SELECT arrayReduce('median', [toLowCardinality(toNullable(8))]) as t, toTypeName(t); +-- { echoOff } diff --git a/parser/testdata/02997_fix_datetime64_scale_conversion/ast.json b/parser/testdata/02997_fix_datetime64_scale_conversion/ast.json new file mode 100644 index 000000000..ef045d554 --- /dev/null +++ b/parser/testdata/02997_fix_datetime64_scale_conversion/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_0 (children 1)" + }, + { + "explain": " Identifier test_0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001224353, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/02997_fix_datetime64_scale_conversion/metadata.json b/parser/testdata/02997_fix_datetime64_scale_conversion/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02997_fix_datetime64_scale_conversion/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02997_fix_datetime64_scale_conversion/query.sql b/parser/testdata/02997_fix_datetime64_scale_conversion/query.sql new file mode 100644 index 000000000..b905ef2b9 --- /dev/null +++ b/parser/testdata/02997_fix_datetime64_scale_conversion/query.sql @@ -0,0 +1,124 @@ +DROP TABLE IF EXISTS test_0; +CREATE TABLE IF NOT EXISTS test_0 (a DateTime64(0)) engine = MergeTree order by a; +INSERT INTO test_0 VALUES (toDateTime64('2023-01-01 00:00:00', 0)); +INSERT INTO test_0 VALUES (toDateTime64('2023-01-01 00:00:00.123456789', 0)); +INSERT INTO test_0 VALUES (toDateTime64('2023-01-01 01:01:01', 1)); +INSERT INTO test_0 VALUES (toDateTime64('2023-01-01 01:01:01.123456789', 1)); +INSERT INTO test_0 VALUES (toDateTime64('2023-01-02 02:02:02', 2)); +INSERT INTO test_0 VALUES (toDateTime64('2023-01-02 02:02:02.123456789', 2)); +INSERT INTO test_0 VALUES (toDateTime64('2023-01-03 03:03:03', 3)); +INSERT INTO test_0 VALUES (toDateTime64('2023-01-03 03:03:03.123456789', 3)); +INSERT INTO test_0 VALUES (toDateTime64('2023-01-04 04:04:04', 4)); +INSERT INTO test_0 VALUES (toDateTime64('2023-01-04 04:04:04.123456789', 4)); +INSERT INTO test_0 VALUES (toDateTime64('2023-01-05 05:05:05', 5)); +INSERT INTO test_0 VALUES (toDateTime64('2023-01-05 05:05:05.123456789', 5)); +INSERT INTO test_0 VALUES (toDateTime64('2023-01-06 06:06:06', 6)); +INSERT INTO test_0 VALUES (toDateTime64('2023-01-06 06:06:06.123456789', 6)); +INSERT INTO test_0 VALUES (toDateTime64('2023-01-07 07:07:07', 7)); +INSERT INTO test_0 VALUES (toDateTime64('2023-01-07 07:07:07.123456789', 7)); +INSERT INTO test_0 VALUES (toDateTime64('2023-01-08 08:08:08', 8)); +INSERT INTO test_0 VALUES (toDateTime64('2023-01-08 08:08:08.123456789', 8)); +INSERT INTO test_0 VALUES (toDateTime64('2023-01-09 09:09:09', 9)); +INSERT INTO test_0 VALUES (toDateTime64('2023-01-09 09:09:09.123456789', 9)); +SELECT * FROM test_0 ORDER BY a; +DROP TABLE test_0; + +DROP TABLE IF EXISTS test_2; +CREATE TABLE IF NOT EXISTS test_2 (a DateTime64(2)) engine = MergeTree order by a; +INSERT INTO test_2 VALUES (toDateTime64('2023-01-01 00:00:00', 0)); +INSERT INTO test_2 VALUES (toDateTime64('2023-01-01 00:00:00.123456789', 0)); +INSERT INTO test_2 VALUES (toDateTime64('2023-01-01 01:01:01', 1)); +INSERT INTO test_2 VALUES (toDateTime64('2023-01-01 01:01:01.123456789', 1)); +INSERT INTO test_2 VALUES (toDateTime64('2023-01-02 02:02:02', 2)); +INSERT INTO test_2 VALUES (toDateTime64('2023-01-02 02:02:02.123456789', 2)); +INSERT INTO test_2 VALUES (toDateTime64('2023-01-03 03:03:03', 3)); +INSERT INTO test_2 VALUES (toDateTime64('2023-01-03 03:03:03.123456789', 3)); +INSERT INTO test_2 VALUES (toDateTime64('2023-01-04 04:04:04', 4)); +INSERT INTO test_2 VALUES (toDateTime64('2023-01-04 04:04:04.123456789', 4)); +INSERT INTO test_2 VALUES (toDateTime64('2023-01-05 05:05:05', 5)); +INSERT INTO test_2 VALUES (toDateTime64('2023-01-05 05:05:05.123456789', 5)); +INSERT INTO test_2 VALUES (toDateTime64('2023-01-06 06:06:06', 6)); +INSERT INTO test_2 VALUES (toDateTime64('2023-01-06 06:06:06.123456789', 6)); +INSERT INTO test_2 VALUES (toDateTime64('2023-01-07 07:07:07', 7)); +INSERT INTO test_2 VALUES (toDateTime64('2023-01-07 07:07:07.123456789', 7)); +INSERT INTO test_2 VALUES (toDateTime64('2023-01-08 08:08:08', 8)); +INSERT INTO test_2 VALUES (toDateTime64('2023-01-08 08:08:08.123456789', 8)); +INSERT INTO test_2 VALUES (toDateTime64('2023-01-09 09:09:09', 9)); +INSERT INTO test_2 VALUES (toDateTime64('2023-01-09 09:09:09.123456789', 9)); +SELECT * FROM test_2 ORDER BY a; +DROP TABLE test_2; + +DROP TABLE IF EXISTS test_3; +CREATE TABLE IF NOT EXISTS test_3 (a DateTime64(3)) engine = MergeTree order by a; +INSERT INTO test_3 VALUES (toDateTime64('2023-01-01 00:00:00', 0)); +INSERT INTO test_3 VALUES (toDateTime64('2023-01-01 00:00:00.123456789', 0)); +INSERT INTO test_3 VALUES (toDateTime64('2023-01-01 01:01:01', 1)); +INSERT INTO test_3 VALUES (toDateTime64('2023-01-01 01:01:01.123456789', 1)); +INSERT INTO test_3 VALUES (toDateTime64('2023-01-02 02:02:02', 2)); +INSERT INTO test_3 VALUES (toDateTime64('2023-01-02 02:02:02.123456789', 2)); +INSERT INTO test_3 VALUES (toDateTime64('2023-01-03 03:03:03', 3)); +INSERT INTO test_3 VALUES (toDateTime64('2023-01-03 03:03:03.123456789', 3)); +INSERT INTO test_3 VALUES (toDateTime64('2023-01-04 04:04:04', 4)); +INSERT INTO test_3 VALUES (toDateTime64('2023-01-04 04:04:04.123456789', 4)); +INSERT INTO test_3 VALUES (toDateTime64('2023-01-05 05:05:05', 5)); +INSERT INTO test_3 VALUES (toDateTime64('2023-01-05 05:05:05.123456789', 5)); +INSERT INTO test_3 VALUES (toDateTime64('2023-01-06 06:06:06', 6)); +INSERT INTO test_3 VALUES (toDateTime64('2023-01-06 06:06:06.123456789', 6)); +INSERT INTO test_3 VALUES (toDateTime64('2023-01-07 07:07:07', 7)); +INSERT INTO test_3 VALUES (toDateTime64('2023-01-07 07:07:07.123456789', 7)); +INSERT INTO test_3 VALUES (toDateTime64('2023-01-08 08:08:08', 8)); +INSERT INTO test_3 VALUES (toDateTime64('2023-01-08 08:08:08.123456789', 8)); +INSERT INTO test_3 VALUES (toDateTime64('2023-01-09 09:09:09', 9)); +INSERT INTO test_3 VALUES (toDateTime64('2023-01-09 09:09:09.123456789', 9)); +SELECT * FROM test_3 ORDER BY a; +DROP TABLE test_3; + +DROP TABLE IF EXISTS test_6; +CREATE TABLE IF NOT EXISTS test_6 (a DateTime64(6)) engine = MergeTree order by a; +INSERT INTO test_6 VALUES (toDateTime64('2023-01-01 00:00:00', 0)); +INSERT INTO test_6 VALUES (toDateTime64('2023-01-01 00:00:00.123456789', 0)); +INSERT INTO test_6 VALUES (toDateTime64('2023-01-01 01:01:01', 1)); +INSERT INTO test_6 VALUES (toDateTime64('2023-01-01 01:01:01.123456789', 1)); +INSERT INTO test_6 VALUES (toDateTime64('2023-01-02 02:02:02', 2)); +INSERT INTO test_6 VALUES (toDateTime64('2023-01-02 02:02:02.123456789', 2)); +INSERT INTO test_6 VALUES (toDateTime64('2023-01-03 03:03:03', 3)); +INSERT INTO test_6 VALUES (toDateTime64('2023-01-03 03:03:03.123456789', 3)); +INSERT INTO test_6 VALUES (toDateTime64('2023-01-04 04:04:04', 4)); +INSERT INTO test_6 VALUES (toDateTime64('2023-01-04 04:04:04.123456789', 4)); +INSERT INTO test_6 VALUES (toDateTime64('2023-01-05 05:05:05', 5)); +INSERT INTO test_6 VALUES (toDateTime64('2023-01-05 05:05:05.123456789', 5)); +INSERT INTO test_6 VALUES (toDateTime64('2023-01-06 06:06:06', 6)); +INSERT INTO test_6 VALUES (toDateTime64('2023-01-06 06:06:06.123456789', 6)); +INSERT INTO test_6 VALUES (toDateTime64('2023-01-07 07:07:07', 7)); +INSERT INTO test_6 VALUES (toDateTime64('2023-01-07 07:07:07.123456789', 7)); +INSERT INTO test_6 VALUES (toDateTime64('2023-01-08 08:08:08', 8)); +INSERT INTO test_6 VALUES (toDateTime64('2023-01-08 08:08:08.123456789', 8)); +INSERT INTO test_6 VALUES (toDateTime64('2023-01-09 09:09:09', 9)); +INSERT INTO test_6 VALUES (toDateTime64('2023-01-09 09:09:09.123456789', 9)); +SELECT * FROM test_6 ORDER BY a; +DROP TABLE test_6; + +DROP TABLE IF EXISTS test_9; +CREATE TABLE IF NOT EXISTS test_9 (a DateTime64(6)) engine = MergeTree order by a; +INSERT INTO test_9 VALUES (toDateTime64('2023-01-01 00:00:00', 0)); +INSERT INTO test_9 VALUES (toDateTime64('2023-01-01 00:00:00.123456789', 0)); +INSERT INTO test_9 VALUES (toDateTime64('2023-01-01 01:01:01', 1)); +INSERT INTO test_9 VALUES (toDateTime64('2023-01-01 01:01:01.123456789', 1)); +INSERT INTO test_9 VALUES (toDateTime64('2023-01-02 02:02:02', 2)); +INSERT INTO test_9 VALUES (toDateTime64('2023-01-02 02:02:02.123456789', 2)); +INSERT INTO test_9 VALUES (toDateTime64('2023-01-03 03:03:03', 3)); +INSERT INTO test_9 VALUES (toDateTime64('2023-01-03 03:03:03.123456789', 3)); +INSERT INTO test_9 VALUES (toDateTime64('2023-01-04 04:04:04', 4)); +INSERT INTO test_9 VALUES (toDateTime64('2023-01-04 04:04:04.123456789', 4)); +INSERT INTO test_9 VALUES (toDateTime64('2023-01-05 05:05:05', 5)); +INSERT INTO test_9 VALUES (toDateTime64('2023-01-05 05:05:05.123456789', 5)); +INSERT INTO test_9 VALUES (toDateTime64('2023-01-06 06:06:06', 6)); +INSERT INTO test_9 VALUES (toDateTime64('2023-01-06 06:06:06.123456789', 6)); +INSERT INTO test_9 VALUES (toDateTime64('2023-01-07 07:07:07', 7)); +INSERT INTO test_9 VALUES (toDateTime64('2023-01-07 07:07:07.123456789', 7)); +INSERT INTO test_9 VALUES (toDateTime64('2023-01-08 08:08:08', 8)); +INSERT INTO test_9 VALUES (toDateTime64('2023-01-08 08:08:08.123456789', 8)); +INSERT INTO test_9 VALUES (toDateTime64('2023-01-09 09:09:09', 9)); +INSERT INTO test_9 VALUES (toDateTime64('2023-01-09 09:09:09.123456789', 9)); +SELECT * FROM test_9 ORDER BY a; +DROP TABLE test_9; diff --git a/parser/testdata/02997_projections_formatting/ast.json b/parser/testdata/02997_projections_formatting/ast.json new file mode 100644 index 000000000..12ba51a1e --- /dev/null +++ b/parser/testdata/02997_projections_formatting/ast.json @@ -0,0 +1,82 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery t_proj (children 3)" + }, + { + "explain": " Identifier t_proj" + }, + { + "explain": " Columns definition (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration t (children 1)" + }, + { + "explain": " DataType DateTime" + }, + { + "explain": " ColumnDeclaration id (children 1)" + }, + { + "explain": " DataType UInt64" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Projection (children 1)" + }, + { + "explain": " ProjectionSelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier id" + }, + { + "explain": " Identifier t" + }, + { + "explain": " Function toStartOfDay (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier t" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Identifier id" + } + ], + + "rows": 20, + + "statistics": + { + "elapsed": 0.00133247, + "rows_read": 20, + "bytes_read": 701 + } +} diff --git a/parser/testdata/02997_projections_formatting/metadata.json b/parser/testdata/02997_projections_formatting/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02997_projections_formatting/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02997_projections_formatting/query.sql b/parser/testdata/02997_projections_formatting/query.sql new file mode 100644 index 000000000..b593c2576 --- /dev/null +++ b/parser/testdata/02997_projections_formatting/query.sql @@ -0,0 +1,5 @@ +CREATE TEMPORARY TABLE t_proj (t DateTime, id UInt64, PROJECTION p (SELECT id, t ORDER BY toStartOfDay(t))) ENGINE = MergeTree ORDER BY id; +SHOW CREATE TEMPORARY TABLE t_proj FORMAT TSVRaw; + +CREATE TEMPORARY TABLE t_proj2 (a UInt32, b UInt32, PROJECTION p (SELECT a ORDER BY b * 2)) ENGINE = MergeTree ORDER BY a; +SHOW CREATE TEMPORARY TABLE t_proj2 FORMAT TSVRaw; diff --git a/parser/testdata/02998_analyzer_prewhere_report/ast.json b/parser/testdata/02998_analyzer_prewhere_report/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02998_analyzer_prewhere_report/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02998_analyzer_prewhere_report/metadata.json b/parser/testdata/02998_analyzer_prewhere_report/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02998_analyzer_prewhere_report/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02998_analyzer_prewhere_report/query.sql b/parser/testdata/02998_analyzer_prewhere_report/query.sql new file mode 100644 index 000000000..5a7cab854 --- /dev/null +++ b/parser/testdata/02998_analyzer_prewhere_report/query.sql @@ -0,0 +1,18 @@ +--https://github.com/ClickHouse/ClickHouse/issues/60232 +CREATE TABLE hits +( + `date` Date, + `data` Array(UInt32) +) +ENGINE = MergeTree +PARTITION BY toYYYYMM(date) +ORDER BY date; + +INSERT INTO hits values('2024-01-01', [1, 2, 3]); + +SELECT + hits.date, + arrayFilter(x -> (x IN (2, 3)), data) AS filtered +FROM hits +WHERE arrayExists(x -> (x IN (2, 3)), data) +SETTINGS enable_analyzer = 1; diff --git a/parser/testdata/02998_analyzer_secret_args_tree_node/ast.json b/parser/testdata/02998_analyzer_secret_args_tree_node/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02998_analyzer_secret_args_tree_node/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02998_analyzer_secret_args_tree_node/metadata.json b/parser/testdata/02998_analyzer_secret_args_tree_node/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02998_analyzer_secret_args_tree_node/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02998_analyzer_secret_args_tree_node/query.sql b/parser/testdata/02998_analyzer_secret_args_tree_node/query.sql new file mode 100644 index 000000000..a216f886f --- /dev/null +++ b/parser/testdata/02998_analyzer_secret_args_tree_node/query.sql @@ -0,0 +1,12 @@ +-- Tags: no-fasttest +-- encrypt function doesn't exist in the fastest build + +-- { echoOn } +SET enable_analyzer = 1; + +EXPLAIN QUERY TREE SELECT encrypt('aes-256-ofb', (SELECT 'qwerty'), '12345678901234567890123456789012'), encrypt('aes-256-ofb', (SELECT 'asdf'), '12345678901234567890123456789012'); + +SET format_display_secrets_in_show_and_select = 1; + +EXPLAIN QUERY TREE SELECT encrypt('aes-256-ofb', (SELECT 'qwerty'), '12345678901234567890123456789012'), encrypt('aes-256-ofb', (SELECT 'asdf'), '12345678901234567890123456789012'); +-- { echoOff } diff --git a/parser/testdata/02998_attach_partition_not_allowed_if_structure_differs_due_to_materialized_column/ast.json b/parser/testdata/02998_attach_partition_not_allowed_if_structure_differs_due_to_materialized_column/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02998_attach_partition_not_allowed_if_structure_differs_due_to_materialized_column/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02998_attach_partition_not_allowed_if_structure_differs_due_to_materialized_column/metadata.json b/parser/testdata/02998_attach_partition_not_allowed_if_structure_differs_due_to_materialized_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02998_attach_partition_not_allowed_if_structure_differs_due_to_materialized_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02998_attach_partition_not_allowed_if_structure_differs_due_to_materialized_column/query.sql b/parser/testdata/02998_attach_partition_not_allowed_if_structure_differs_due_to_materialized_column/query.sql new file mode 100644 index 000000000..c92d71893 --- /dev/null +++ b/parser/testdata/02998_attach_partition_not_allowed_if_structure_differs_due_to_materialized_column/query.sql @@ -0,0 +1,21 @@ +CREATE TABLE attach_partition_t7 ( + a UInt32, + b UInt32 +) + ENGINE = MergeTree +PARTITION BY a ORDER BY a; + +ALTER TABLE attach_partition_t7 + ADD COLUMN mat_column + UInt32 MATERIALIZED a+b; + +insert into attach_partition_t7 values (1, 2); + +CREATE TABLE attach_partition_t8 ( + a UInt32, + b UInt32 +) + ENGINE = MergeTree +PARTITION BY a ORDER BY a; + +ALTER TABLE attach_partition_t8 ATTACH PARTITION ID '1' FROM attach_partition_t7; -- {serverError INCOMPATIBLE_COLUMNS}; diff --git a/parser/testdata/02998_ipv6_hashing/ast.json b/parser/testdata/02998_ipv6_hashing/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02998_ipv6_hashing/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02998_ipv6_hashing/metadata.json b/parser/testdata/02998_ipv6_hashing/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02998_ipv6_hashing/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02998_ipv6_hashing/query.sql b/parser/testdata/02998_ipv6_hashing/query.sql new file mode 100644 index 000000000..a83679274 --- /dev/null +++ b/parser/testdata/02998_ipv6_hashing/query.sql @@ -0,0 +1,5 @@ +-- Tags: no-fasttest + +SELECT toIPv6(materialize(toLowCardinality('fe80::62:5aff:fed1:daf0'))) AS ipv6, SHA256(ipv6) from numbers(10); +SELECT toIPv6(materialize('fe80::62:5aff:fed1:daf0')) AS ipv6, SHA256(ipv6) from numbers(10); + diff --git a/parser/testdata/02998_operator_respect_nulls/ast.json b/parser/testdata/02998_operator_respect_nulls/ast.json new file mode 100644 index 000000000..8ba1564df --- /dev/null +++ b/parser/testdata/02998_operator_respect_nulls/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function plus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001643108, + "rows_read": 8, + "bytes_read": 287 + } +} diff --git a/parser/testdata/02998_operator_respect_nulls/metadata.json b/parser/testdata/02998_operator_respect_nulls/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02998_operator_respect_nulls/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02998_operator_respect_nulls/query.sql b/parser/testdata/02998_operator_respect_nulls/query.sql new file mode 100644 index 000000000..240992e1f --- /dev/null +++ b/parser/testdata/02998_operator_respect_nulls/query.sql @@ -0,0 +1 @@ +SELECT plus(1, 1) RESPECT NULLS; -- { serverError SYNTAX_ERROR } diff --git a/parser/testdata/02998_pretty_format_print_readable_number_on_single_value/ast.json b/parser/testdata/02998_pretty_format_print_readable_number_on_single_value/ast.json new file mode 100644 index 000000000..b103a7515 --- /dev/null +++ b/parser/testdata/02998_pretty_format_print_readable_number_on_single_value/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001255562, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02998_pretty_format_print_readable_number_on_single_value/metadata.json b/parser/testdata/02998_pretty_format_print_readable_number_on_single_value/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02998_pretty_format_print_readable_number_on_single_value/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02998_pretty_format_print_readable_number_on_single_value/query.sql b/parser/testdata/02998_pretty_format_print_readable_number_on_single_value/query.sql new file mode 100644 index 000000000..46d6bb657 --- /dev/null +++ b/parser/testdata/02998_pretty_format_print_readable_number_on_single_value/query.sql @@ -0,0 +1,97 @@ +SET output_format_pretty_display_footer_column_names=0; +SELECT 1_000_000 as a FORMAT Pretty; +SELECT 1_000_000 as a FORMAT PrettyNoEscapes; +SELECT 1_000_000 as a FORMAT PrettyMonoBlock; +SELECT 1_000_000 as a FORMAT PrettyNoEscapesMonoBlock; +SELECT 1_000_000 as a FORMAT PrettyCompact; +SELECT 1_000_000 as a FORMAT PrettyCompactNoEscapes; +SELECT 1_000_000 as a FORMAT PrettyCompactMonoBlock; +SELECT 1_000_000 as a FORMAT PrettyCompactNoEscapesMonoBlock; +SELECT 1_000_000 as a FORMAT PrettySpace; +SELECT 1_000_000 as a FORMAT PrettySpaceNoEscapes; +SELECT 1_000_000 as a FORMAT PrettySpaceMonoBlock; +SELECT 1_000_000 as a FORMAT PrettySpaceNoEscapesMonoBlock; + + +SELECT 1_000_000 as a SETTINGS output_format_pretty_single_large_number_tip_threshold = 1000 FORMAT Pretty; +SELECT 1_000_000 as a SETTINGS output_format_pretty_single_large_number_tip_threshold = 1000 FORMAT PrettyNoEscapes; +SELECT 1_000_000 as a SETTINGS output_format_pretty_single_large_number_tip_threshold = 1000 FORMAT PrettyMonoBlock; +SELECT 1_000_000 as a SETTINGS output_format_pretty_single_large_number_tip_threshold = 1000 FORMAT PrettyNoEscapesMonoBlock; +SELECT 1_000_000 as a SETTINGS output_format_pretty_single_large_number_tip_threshold = 1000 FORMAT PrettyCompact; +SELECT 1_000_000 as a SETTINGS output_format_pretty_single_large_number_tip_threshold = 1000 FORMAT PrettyCompactNoEscapes; +SELECT 1_000_000 as a SETTINGS output_format_pretty_single_large_number_tip_threshold = 1000 FORMAT PrettyCompactMonoBlock; +SELECT 1_000_000 as a SETTINGS output_format_pretty_single_large_number_tip_threshold = 1000 FORMAT PrettyCompactNoEscapesMonoBlock; +SELECT 1_000_000 as a SETTINGS output_format_pretty_single_large_number_tip_threshold = 1000 FORMAT PrettySpace; +SELECT 1_000_000 as a SETTINGS output_format_pretty_single_large_number_tip_threshold = 1000 FORMAT PrettySpaceNoEscapes; +SELECT 1_000_000 as a SETTINGS output_format_pretty_single_large_number_tip_threshold = 1000 FORMAT PrettySpaceMonoBlock; +SELECT 1_000_000 as a SETTINGS output_format_pretty_single_large_number_tip_threshold = 1000 FORMAT PrettySpaceNoEscapesMonoBlock; + +SELECT 1_000_001 as a FORMAT Pretty; +SELECT 1_000_001 as a FORMAT PrettyNoEscapes; +SELECT 1_000_001 as a FORMAT PrettyMonoBlock; +SELECT 1_000_001 as a FORMAT PrettyNoEscapesMonoBlock; +SELECT 1_000_001 as a FORMAT PrettyCompact; +SELECT 1_000_001 as a FORMAT PrettyCompactNoEscapes; +SELECT 1_000_001 as a FORMAT PrettyCompactMonoBlock; +SELECT 1_000_001 as a FORMAT PrettyCompactNoEscapesMonoBlock; +SELECT 1_000_001 as a FORMAT PrettySpace; +SELECT 1_000_001 as a FORMAT PrettySpaceNoEscapes; +SELECT 1_000_001 as a FORMAT PrettySpaceMonoBlock; +SELECT 1_000_001 as a FORMAT PrettySpaceNoEscapesMonoBlock; + +SELECT 1_000_000_000 as a FORMAT Pretty; +SELECT 1_000_000_000 as a FORMAT PrettyNoEscapes; +SELECT 1_000_000_000 as a FORMAT PrettyMonoBlock; +SELECT 1_000_000_000 as a FORMAT PrettyNoEscapesMonoBlock; +SELECT 1_000_000_000 as a FORMAT PrettyCompact; +SELECT 1_000_000_000 as a FORMAT PrettyCompactNoEscapes; +SELECT 1_000_000_000 as a FORMAT PrettyCompactMonoBlock; +SELECT 1_000_000_000 as a FORMAT PrettyCompactNoEscapesMonoBlock; +SELECT 1_000_000_000 as a FORMAT PrettySpace; +SELECT 1_000_000_000 as a FORMAT PrettySpaceNoEscapes; +SELECT 1_000_000_000 as a FORMAT PrettySpaceMonoBlock; +SELECT 1_000_000_000 as a FORMAT PrettySpaceNoEscapesMonoBlock; + +SELECT 1_000_000_000 as a, 1_000_000_000 as b FORMAT Pretty; +SELECT 1_000_000_000 as a, 1_000_000_000 as b FORMAT PrettyNoEscapes; +SELECT 1_000_000_000 as a, 1_000_000_000 as b FORMAT PrettyMonoBlock; +SELECT 1_000_000_000 as a, 1_000_000_000 as b FORMAT PrettyNoEscapesMonoBlock; +SELECT 1_000_000_000 as a, 1_000_000_000 as b FORMAT PrettyCompact; +SELECT 1_000_000_000 as a, 1_000_000_000 as b FORMAT PrettyCompactNoEscapes; +SELECT 1_000_000_000 as a, 1_000_000_000 as b FORMAT PrettyCompactMonoBlock; +SELECT 1_000_000_000 as a, 1_000_000_000 as b FORMAT PrettyCompactNoEscapesMonoBlock; +SELECT 1_000_000_000 as a, 1_000_000_000 as b FORMAT PrettySpace; +SELECT 1_000_000_000 as a, 1_000_000_000 as b FORMAT PrettySpaceNoEscapes; +SELECT 1_000_000_000 as a, 1_000_000_000 as b FORMAT PrettySpaceMonoBlock; +SELECT 1_000_000_000 as a, 1_000_000_000 as b FORMAT PrettySpaceNoEscapesMonoBlock; + +SELECT 1_000_000_000 as a FROM system.numbers LIMIT 2 FORMAT Pretty; +SELECT 1_000_000_000 as a FROM system.numbers LIMIT 2 FORMAT PrettyNoEscapes; +SELECT 1_000_000_000 as a FROM system.numbers LIMIT 2 FORMAT PrettyMonoBlock; +SELECT 1_000_000_000 as a FROM system.numbers LIMIT 2 FORMAT PrettyNoEscapesMonoBlock; +SELECT 1_000_000_000 as a FROM system.numbers LIMIT 2 FORMAT PrettyCompact; +SELECT 1_000_000_000 as a FROM system.numbers LIMIT 2 FORMAT PrettyCompactNoEscapes; +SELECT 1_000_000_000 as a FROM system.numbers LIMIT 2 FORMAT PrettyCompactMonoBlock; +SELECT 1_000_000_000 as a FROM system.numbers LIMIT 2 FORMAT PrettyCompactNoEscapesMonoBlock; +SELECT 1_000_000_000 as a FROM system.numbers LIMIT 2 FORMAT PrettySpace; +SELECT 1_000_000_000 as a FROM system.numbers LIMIT 2 FORMAT PrettySpaceNoEscapes; +SELECT 1_000_000_000 as a FROM system.numbers LIMIT 2 FORMAT PrettySpaceMonoBlock; +SELECT 1_000_000_000 as a FROM system.numbers LIMIT 2 FORMAT PrettySpaceNoEscapesMonoBlock; + +SET output_format_pretty_single_large_number_tip_threshold=1; +SELECT '2024-02-29'::Date FORMAT Pretty; +SELECT '2024-02-29'::Date FORMAT PrettyNoEscapes; +SELECT '2024-02-29'::Date FORMAT PrettyMonoBlock; +SELECT '2024-02-29'::Date FORMAT PrettyNoEscapesMonoBlock; +SELECT '2024-02-29'::Date32 FORMAT Pretty; +SELECT '2024-02-29'::Date32 FORMAT PrettyNoEscapes; +SELECT '2024-02-29'::Date32 FORMAT PrettyMonoBlock; +SELECT '2024-02-29'::Date32 FORMAT PrettyNoEscapesMonoBlock; +SELECT '2024-02-29 00:00:00'::DateTime FORMAT Pretty; +SELECT '2024-02-29 00:00:00'::DateTime FORMAT PrettyNoEscapes; +SELECT '2024-02-29 00:00:00'::DateTime FORMAT PrettyMonoBlock; +SELECT '2024-02-29 00:00:00'::DateTime FORMAT PrettyNoEscapesMonoBlock; +SELECT '2024-02-29 00:00:00'::DateTime::DateTime64 FORMAT Pretty; +SELECT '2024-02-29 00:00:00'::DateTime::DateTime64 FORMAT PrettyNoEscapes; +SELECT '2024-02-29 00:00:00'::DateTime::DateTime64 FORMAT PrettyMonoBlock; +SELECT '2024-02-29 00:00:00'::DateTime::DateTime64 FORMAT PrettyNoEscapesMonoBlock; diff --git a/parser/testdata/02998_primary_key_skip_columns/ast.json b/parser/testdata/02998_primary_key_skip_columns/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02998_primary_key_skip_columns/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02998_primary_key_skip_columns/metadata.json b/parser/testdata/02998_primary_key_skip_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02998_primary_key_skip_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02998_primary_key_skip_columns/query.sql b/parser/testdata/02998_primary_key_skip_columns/query.sql new file mode 100644 index 000000000..1abe692a7 --- /dev/null +++ b/parser/testdata/02998_primary_key_skip_columns/query.sql @@ -0,0 +1,36 @@ +-- Tags: no-asan, no-tsan, no-msan, no-ubsan, no-random-settings, no-debug, no-fasttest +-- no-fasttest: Low index granularity and too many parts makes the test slow + +DROP TABLE IF EXISTS test; + +CREATE TABLE test (a UInt64, b UInt64, c UInt64) ENGINE = MergeTree ORDER BY (a, b, c) SETTINGS index_granularity = 1, primary_key_ratio_of_unique_prefix_values_to_skip_suffix_columns = 1; +INSERT INTO test SELECT sipHash64(number, 1), sipHash64(number, 2), sipHash64(number, 3) FROM numbers(100000); + +SELECT count() FROM test; +SELECT count() FROM test WHERE a > 1849813033528774208 AND a < 4594276315503201760; +SELECT count() FROM test WHERE b > 7898976344263989848 AND b < 8040320939819153137; +SELECT count() FROM test WHERE c > 13239894303140990071 AND c < 16179795840886947236; +SELECT count() FROM test WHERE a > 1849813033528774208 AND a < 4594276315503201760 AND b > 7898976344263989848 AND b < 8040320939819153137; +SELECT count() FROM test WHERE b > 7898976344263989848 AND b < 8040320939819153137 AND c > 13239894303140990071 AND c < 16179795840886947236; +SELECT count() FROM test WHERE a > 1849813033528774208 AND a < 4594276315503201760 AND c > 13239894303140990071 AND c < 16179795840886947236; +SELECT count() FROM test WHERE a > 1849813033528774208 AND a < 4594276315503201760 AND b > 7898976344263989848 AND b < 8040320939819153137 AND c > 13239894303140990071 AND c < 16179795840886947236; + +SELECT 'Key size: ', round(sum(primary_key_bytes_in_memory), -5) FROM system.parts WHERE database = currentDatabase() AND table = 'test'; + +ALTER TABLE test MODIFY SETTING primary_key_ratio_of_unique_prefix_values_to_skip_suffix_columns = 0.9; + +DETACH TABLE test; +ATTACH TABLE test; + +SELECT count() FROM test; +SELECT count() FROM test WHERE a > 1849813033528774208 AND a < 4594276315503201760; +SELECT count() FROM test WHERE b > 7898976344263989848 AND b < 8040320939819153137; +SELECT count() FROM test WHERE c > 13239894303140990071 AND c < 16179795840886947236; +SELECT count() FROM test WHERE a > 1849813033528774208 AND a < 4594276315503201760 AND b > 7898976344263989848 AND b < 8040320939819153137; +SELECT count() FROM test WHERE b > 7898976344263989848 AND b < 8040320939819153137 AND c > 13239894303140990071 AND c < 16179795840886947236; +SELECT count() FROM test WHERE a > 1849813033528774208 AND a < 4594276315503201760 AND c > 13239894303140990071 AND c < 16179795840886947236; +SELECT count() FROM test WHERE a > 1849813033528774208 AND a < 4594276315503201760 AND b > 7898976344263989848 AND b < 8040320939819153137 AND c > 13239894303140990071 AND c < 16179795840886947236; + +SELECT 'Key size: ', round(sum(primary_key_bytes_in_memory), -5) FROM system.parts WHERE database = currentDatabase() AND table = 'test'; + +DROP TABLE test; diff --git a/parser/testdata/02998_projection_after_attach_partition/ast.json b/parser/testdata/02998_projection_after_attach_partition/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02998_projection_after_attach_partition/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02998_projection_after_attach_partition/metadata.json b/parser/testdata/02998_projection_after_attach_partition/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02998_projection_after_attach_partition/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02998_projection_after_attach_partition/query.sql b/parser/testdata/02998_projection_after_attach_partition/query.sql new file mode 100644 index 000000000..2ae0af0e8 --- /dev/null +++ b/parser/testdata/02998_projection_after_attach_partition/query.sql @@ -0,0 +1,42 @@ +-- { echoOn } +DROP TABLE IF EXISTS visits_order; +DROP TABLE IF EXISTS visits_order_dst; + +CREATE TABLE visits_order +( + user_id UInt64, + user_name String, + some_int UInt64 +) ENGINE = MergeTree() PRIMARY KEY user_id PARTITION BY user_id SETTINGS index_granularity = 1; + +CREATE TABLE visits_order_dst +( + user_id UInt64, + user_name String, + some_int UInt64 +) ENGINE = MergeTree() PRIMARY KEY user_id PARTITION BY user_id SETTINGS index_granularity = 1; + +ALTER TABLE visits_order ADD PROJECTION user_name_projection (SELECT * ORDER BY user_name); +ALTER TABLE visits_order_dst ADD PROJECTION user_name_projection (SELECT * ORDER BY user_name); + +INSERT INTO visits_order SELECT 2, 'user2', number from numbers(1, 10); +INSERT INTO visits_order SELECT 2, 'another_user2', number*2 from numbers(1, 10); +INSERT INTO visits_order SELECT 2, 'yet_another_user2', number*3 from numbers(1, 10); + +-- Merge all parts so that projections can no longer help filter them, +-- which will result in projections not being used. +OPTIMIZE TABLE visits_order FINAL; + +ALTER TABLE visits_order_dst ATTACH PARTITION ID '2' FROM visits_order; + +SET enable_analyzer=0; + +EXPLAIN SELECT * FROM visits_order_dst WHERE user_name='another_user2'; + +SET enable_analyzer=1, enable_parallel_replicas=0; + +EXPLAIN SELECT * FROM visits_order_dst WHERE user_name='another_user2'; + +SET enable_analyzer=1, enable_parallel_replicas=1, parallel_replicas_local_plan=1, parallel_replicas_support_projection=1, optimize_aggregation_in_order = 0; + +SELECT trimLeft(*) FROM (EXPLAIN SELECT * FROM visits_order_dst WHERE user_name='another_user2') where explain like '%ReadFromPreparedSource%' or explain like '%ReadFromMergeTree%'; diff --git a/parser/testdata/02998_system_dns_cache_table/ast.json b/parser/testdata/02998_system_dns_cache_table/ast.json new file mode 100644 index 000000000..e0eafab02 --- /dev/null +++ b/parser/testdata/02998_system_dns_cache_table/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Identifier hostname" + }, + { + "explain": " Identifier ip_address" + }, + { + "explain": " Identifier ip_family" + }, + { + "explain": " Identifier cached_at" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.dns_cache" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001101498, + "rows_read": 12, + "bytes_read": 458 + } +} diff --git a/parser/testdata/02998_system_dns_cache_table/metadata.json b/parser/testdata/02998_system_dns_cache_table/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02998_system_dns_cache_table/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02998_system_dns_cache_table/query.sql b/parser/testdata/02998_system_dns_cache_table/query.sql new file mode 100644 index 000000000..0ceb3d8a9 --- /dev/null +++ b/parser/testdata/02998_system_dns_cache_table/query.sql @@ -0,0 +1,3 @@ +SELECT hostname, ip_address, ip_family, cached_at FROM system.dns_cache +LIMIT 0 +FORMAT TSVWithNamesAndTypes; diff --git a/parser/testdata/02998_to_milliseconds/ast.json b/parser/testdata/02998_to_milliseconds/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02998_to_milliseconds/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02998_to_milliseconds/metadata.json b/parser/testdata/02998_to_milliseconds/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02998_to_milliseconds/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02998_to_milliseconds/query.sql b/parser/testdata/02998_to_milliseconds/query.sql new file mode 100644 index 000000000..f159f6aab --- /dev/null +++ b/parser/testdata/02998_to_milliseconds/query.sql @@ -0,0 +1,17 @@ +-- Negative tests +SELECT toMillisecond(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT toMillisecond('string'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toMillisecond(toDate('2024-02-28')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toMillisecond(toDate32('2024-02-28')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +-- Tests with constant and non-constant arguments +SELECT toDateTime('2023-04-21 10:20:30') AS dt, toMillisecond(dt), toMillisecond(materialize(dt)); +SELECT toDateTime64('2023-04-21 10:20:30', 0) AS dt64, toMillisecond(dt64), toMillisecond(materialize(dt64)); +SELECT toDateTime64('2023-04-21 10:20:30.123', 3) AS dt64, toMillisecond(dt64), toMillisecond(materialize(dt64)); +SELECT toDateTime64('2023-04-21 10:20:30.123456', 6) AS dt64, toMillisecond(dt64), toMillisecond(materialize(dt64)); +SELECT toDateTime64('2023-04-21 10:20:30.123456789', 9) AS dt64, toMillisecond(dt64), toMillisecond(materialize(dt64)); + +-- Special cases +SELECT MILLISECOND(toDateTime64('2023-04-21 10:20:30.123456', 2)); -- Alias +SELECT toNullable(toDateTime('2023-04-21 10:20:30')) AS dt, toMillisecond(dt); -- Nullable +SELECT toLowCardinality(toDateTime('2023-04-21 10:20:30')) AS dt, toMillisecond(dt); -- LowCardinality diff --git a/parser/testdata/02999_analyzer_preimage_null/ast.json b/parser/testdata/02999_analyzer_preimage_null/ast.json new file mode 100644 index 000000000..f773dfc5d --- /dev/null +++ b/parser/testdata/02999_analyzer_preimage_null/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001328075, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02999_analyzer_preimage_null/metadata.json b/parser/testdata/02999_analyzer_preimage_null/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02999_analyzer_preimage_null/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02999_analyzer_preimage_null/query.sql b/parser/testdata/02999_analyzer_preimage_null/query.sql new file mode 100644 index 000000000..0fc61cf08 --- /dev/null +++ b/parser/testdata/02999_analyzer_preimage_null/query.sql @@ -0,0 +1,20 @@ +SET enable_analyzer=1; +SET optimize_time_filter_with_preimage=1; + +CREATE TABLE date_t__fuzz_0 (`id` UInt32, `value1` String, `date1` Date) ENGINE = ReplacingMergeTree ORDER BY id SETTINGS allow_nullable_key=1; + +-- { echoOn } +EXPLAIN QUERY TREE run_passes = 1 +SELECT * +FROM date_t__fuzz_0 +WHERE ((toYear(date1) AS b) != toNullable(1993)) AND (id <= b); + +EXPLAIN QUERY TREE run_passes = 1 +SELECT * +FROM date_t__fuzz_0 +WHERE ((toYear(date1) AS b) != 1993) AND (id <= b) SETTINGS optimize_time_filter_with_preimage=0; + +EXPLAIN QUERY TREE run_passes = 1 +SELECT * +FROM date_t__fuzz_0 +WHERE ((toYear(date1) AS b) != 1993) AND (id <= b) SETTINGS optimize_time_filter_with_preimage=1; diff --git a/parser/testdata/02999_scalar_subqueries_bug_1/ast.json b/parser/testdata/02999_scalar_subqueries_bug_1/ast.json new file mode 100644 index 000000000..3f0bd956e --- /dev/null +++ b/parser/testdata/02999_scalar_subqueries_bug_1/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_table_select (children 1)" + }, + { + "explain": " Identifier t_table_select" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001084288, + "rows_read": 2, + "bytes_read": 80 + } +} diff --git a/parser/testdata/02999_scalar_subqueries_bug_1/metadata.json b/parser/testdata/02999_scalar_subqueries_bug_1/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02999_scalar_subqueries_bug_1/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02999_scalar_subqueries_bug_1/query.sql b/parser/testdata/02999_scalar_subqueries_bug_1/query.sql new file mode 100644 index 000000000..88bcdeb7f --- /dev/null +++ b/parser/testdata/02999_scalar_subqueries_bug_1/query.sql @@ -0,0 +1,8 @@ +drop table if exists t_table_select; +CREATE TABLE t_table_select (id UInt32) ENGINE = MergeTree ORDER BY id; +INSERT INTO t_table_select (id) SELECT number FROM numbers(30); + +CREATE TEMPORARY TABLE t_test (x UInt32, y Nullable(UInt32)) AS SELECT a.id, b.id FROM remote('127.0.0.{1,2}', currentDatabase(), t_table_select) AS a GLOBAL LEFT JOIN (SELECT id FROM remote('127.0.0.{1,2}', currentDatabase(), t_table_select) AS b WHERE (b.id % 10) = 0) AS b ON b.id = a.id SETTINGS join_use_nulls = 1; + +select * from t_test order by x; + diff --git a/parser/testdata/02999_scalar_subqueries_bug_2/ast.json b/parser/testdata/02999_scalar_subqueries_bug_2/ast.json new file mode 100644 index 000000000..51ff623f0 --- /dev/null +++ b/parser/testdata/02999_scalar_subqueries_bug_2/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery source (children 1)" + }, + { + "explain": " Identifier source" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001356783, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/02999_scalar_subqueries_bug_2/metadata.json b/parser/testdata/02999_scalar_subqueries_bug_2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02999_scalar_subqueries_bug_2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02999_scalar_subqueries_bug_2/query.sql b/parser/testdata/02999_scalar_subqueries_bug_2/query.sql new file mode 100644 index 000000000..03ac91e40 --- /dev/null +++ b/parser/testdata/02999_scalar_subqueries_bug_2/query.sql @@ -0,0 +1,18 @@ +drop table if exists source; +drop table if exists target1; +drop table if exists target2; +drop table if exists v_heavy; + + +create table source(type String) engine=MergeTree order by type; + +create view v_heavy as +with nums as (select number from numbers(1e5)) +select count(*) n from (select number from numbers(1e5) n1 cross join nums); + +create table target1(type String) engine=MergeTree order by type; +create table target2(type String) engine=MergeTree order by type; + +set max_execution_time=2; +-- we should not execute scalar subquery here +create materialized view vm_target2 to target2 as select * from source where type='two' and (select sum(sleepEachRow(0.1)) from numbers(30)); diff --git a/parser/testdata/02999_ulid_short_circuit/ast.json b/parser/testdata/02999_ulid_short_circuit/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/02999_ulid_short_circuit/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/02999_ulid_short_circuit/metadata.json b/parser/testdata/02999_ulid_short_circuit/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02999_ulid_short_circuit/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02999_ulid_short_circuit/query.sql b/parser/testdata/02999_ulid_short_circuit/query.sql new file mode 100644 index 000000000..4453d9dbe --- /dev/null +++ b/parser/testdata/02999_ulid_short_circuit/query.sql @@ -0,0 +1,5 @@ +-- Tags: no-fasttest + +SET session_timezone='Europe/Madrid'; -- disable time zone randomization in CI +SELECT if(length(x) = 26, ULIDStringToDateTime(x, 'Europe/Madrid'), toDateTime('2024-02-21 12:00:00', 'Europe/Madrid')) AS datetime +FROM values('x String', '01HQ3KJJKHRWP357YVYBX32WHY', '01HQ3KJJKH') diff --git a/parser/testdata/02999_variant_suspicious_types/ast.json b/parser/testdata/02999_variant_suspicious_types/ast.json new file mode 100644 index 000000000..c67aec665 --- /dev/null +++ b/parser/testdata/02999_variant_suspicious_types/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001262684, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/02999_variant_suspicious_types/metadata.json b/parser/testdata/02999_variant_suspicious_types/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/02999_variant_suspicious_types/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/02999_variant_suspicious_types/query.sql b/parser/testdata/02999_variant_suspicious_types/query.sql new file mode 100644 index 000000000..8cdbfc13a --- /dev/null +++ b/parser/testdata/02999_variant_suspicious_types/query.sql @@ -0,0 +1,7 @@ +set allow_suspicious_variant_types=0; +select 42::Variant(UInt32, Int64); -- {serverError ILLEGAL_COLUMN} +select [42]::Variant(Array(UInt32), Array(Int64)); -- {serverError ILLEGAL_COLUMN} +select 'Hello'::Variant(String, LowCardinality(String)); -- {serverError ILLEGAL_COLUMN} +select (1, 'Hello')::Variant(Tuple(UInt32, String), Tuple(Int64, String)); -- {serverError ILLEGAL_COLUMN} +select map(42, 42)::Variant(Map(UInt64, UInt32), Map(UInt64, Int64)); -- {serverError ILLEGAL_COLUMN} + diff --git a/parser/testdata/03000_minmax_index_first/ast.json b/parser/testdata/03000_minmax_index_first/ast.json new file mode 100644 index 000000000..3b727919f --- /dev/null +++ b/parser/testdata/03000_minmax_index_first/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery skip_table (children 1)" + }, + { + "explain": " Identifier skip_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001276131, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/03000_minmax_index_first/metadata.json b/parser/testdata/03000_minmax_index_first/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03000_minmax_index_first/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03000_minmax_index_first/query.sql b/parser/testdata/03000_minmax_index_first/query.sql new file mode 100644 index 000000000..e9a542459 --- /dev/null +++ b/parser/testdata/03000_minmax_index_first/query.sql @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS skip_table; + +CREATE TABLE skip_table +( + k UInt64, + v UInt64, + INDEX v_set v TYPE set(100) GRANULARITY 2, -- set index is declared before minmax intentionally + INDEX v_mm v TYPE minmax GRANULARITY 2 +) +ENGINE = MergeTree +PRIMARY KEY k +SETTINGS index_granularity = 8192; + +INSERT INTO skip_table SELECT number, intDiv(number, 4096) FROM numbers(100000); +SELECT trim(explain) FROM ( EXPLAIN indexes = 1 SELECT * FROM skip_table WHERE v = 125 SETTINGS per_part_index_stats=1) WHERE explain like '%Name%'; + +DROP TABLE skip_table; diff --git a/parser/testdata/03000_too_big_max_execution_time_setting/ast.json b/parser/testdata/03000_too_big_max_execution_time_setting/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03000_too_big_max_execution_time_setting/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03000_too_big_max_execution_time_setting/metadata.json b/parser/testdata/03000_too_big_max_execution_time_setting/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03000_too_big_max_execution_time_setting/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03000_too_big_max_execution_time_setting/query.sql b/parser/testdata/03000_too_big_max_execution_time_setting/query.sql new file mode 100644 index 000000000..7aa86891b --- /dev/null +++ b/parser/testdata/03000_too_big_max_execution_time_setting/query.sql @@ -0,0 +1,2 @@ +select 1 settings max_execution_time = 9223372036854775808; -- {clientError BAD_ARGUMENTS} + diff --git a/parser/testdata/03000_traverse_shadow_system_data_paths/ast.json b/parser/testdata/03000_traverse_shadow_system_data_paths/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03000_traverse_shadow_system_data_paths/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03000_traverse_shadow_system_data_paths/metadata.json b/parser/testdata/03000_traverse_shadow_system_data_paths/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03000_traverse_shadow_system_data_paths/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03000_traverse_shadow_system_data_paths/query.sql b/parser/testdata/03000_traverse_shadow_system_data_paths/query.sql new file mode 100644 index 000000000..be4b16f12 --- /dev/null +++ b/parser/testdata/03000_traverse_shadow_system_data_paths/query.sql @@ -0,0 +1,20 @@ +-- Tags: no-replicated-database, no-fasttest, no-parallel, no-random-settings, no-random-merge-tree-settings + +DROP TABLE IF EXISTS 03000_traverse_shadow_system_data_path_table; + +CREATE TABLE 03000_traverse_shadow_system_data_path_table ( + id Int64, + data String +) ENGINE=MergeTree() +ORDER BY id +SETTINGS storage_policy='s3_cache'; + +INSERT INTO 03000_traverse_shadow_system_data_path_table VALUES (0, 'data'); +ALTER TABLE 03000_traverse_shadow_system_data_path_table FREEZE WITH NAME '03000_traverse_shadow_system_data_path_table_backup'; + +SELECT count() > 0 +FROM system.remote_data_paths +WHERE disk_name = 's3_cache' AND local_path LIKE '%shadow/03000_traverse_shadow_system_data_path_table_backup%' +SETTINGS traverse_shadow_remote_data_paths=1; + +DROP TABLE IF EXISTS 03000_traverse_shadow_system_data_path_table; diff --git a/parser/testdata/03000_virtual_columns_in_prewhere/ast.json b/parser/testdata/03000_virtual_columns_in_prewhere/ast.json new file mode 100644 index 000000000..8b789a1fd --- /dev/null +++ b/parser/testdata/03000_virtual_columns_in_prewhere/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001050572, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03000_virtual_columns_in_prewhere/metadata.json b/parser/testdata/03000_virtual_columns_in_prewhere/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03000_virtual_columns_in_prewhere/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03000_virtual_columns_in_prewhere/query.sql b/parser/testdata/03000_virtual_columns_in_prewhere/query.sql new file mode 100644 index 000000000..c1e6eba6b --- /dev/null +++ b/parser/testdata/03000_virtual_columns_in_prewhere/query.sql @@ -0,0 +1,12 @@ +SET optimize_trivial_insert_select = 1; + +drop table if exists x; + +create table x (i int, j int, k int) engine MergeTree order by tuple() settings index_granularity=8192, index_granularity_bytes = '10Mi', min_bytes_for_wide_part=0, min_rows_for_wide_part=0, ratio_of_defaults_for_sparse_serialization=1; + +insert into x select number, number * 2, number * 3 from numbers(100000); + +-- One granule, (_part_offset (8 bytes) + <one minimal physical column> (4 bytes)) * 8192 + <other two physical columns>(8 bytes) * 1 = 98312 +select * from x prewhere _part_offset = 0 settings max_bytes_to_read = 98312; + +drop table x; diff --git a/parser/testdata/03001_analyzer_nullable_nothing/ast.json b/parser/testdata/03001_analyzer_nullable_nothing/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03001_analyzer_nullable_nothing/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03001_analyzer_nullable_nothing/metadata.json b/parser/testdata/03001_analyzer_nullable_nothing/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03001_analyzer_nullable_nothing/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03001_analyzer_nullable_nothing/query.sql b/parser/testdata/03001_analyzer_nullable_nothing/query.sql new file mode 100644 index 000000000..c1c7ca87b --- /dev/null +++ b/parser/testdata/03001_analyzer_nullable_nothing/query.sql @@ -0,0 +1,6 @@ +--https://github.com/ClickHouse/ClickHouse/issues/58906 +SELECT + count(_CAST(NULL, 'Nullable(Nothing)')), + round(avg(_CAST(NULL, 'Nullable(Nothing)'))) AS k +FROM numbers(256) + SETTINGS enable_analyzer = 1; diff --git a/parser/testdata/03001_block_offset_column_2/ast.json b/parser/testdata/03001_block_offset_column_2/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03001_block_offset_column_2/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03001_block_offset_column_2/metadata.json b/parser/testdata/03001_block_offset_column_2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03001_block_offset_column_2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03001_block_offset_column_2/query.sql b/parser/testdata/03001_block_offset_column_2/query.sql new file mode 100644 index 000000000..56e3b5c1f --- /dev/null +++ b/parser/testdata/03001_block_offset_column_2/query.sql @@ -0,0 +1,25 @@ + +DROP TABLE IF EXISTS t_block_offset; + +CREATE TABLE t_block_offset (id UInt32) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 3, enable_block_number_column = 0, enable_block_offset_column = 0; + +INSERT INTO t_block_offset SELECT number * 2 FROM numbers(8); + +INSERT INTO t_block_offset SELECT number * 2 FROM numbers(8, 8); + +OPTIMIZE TABLE t_block_offset FINAL; + +SELECT _part, _block_number, _block_offset, _part_offset, id FROM t_block_offset ORDER BY _block_number, _block_offset; + +ALTER TABLE t_block_offset MODIFY SETTING enable_block_number_column = 1; +ALTER TABLE t_block_offset MODIFY SETTING enable_block_offset_column = 1; + +INSERT INTO t_block_offset SELECT number * 2 + 1 FROM numbers(16); + +OPTIMIZE TABLE t_block_offset FINAL; + +SELECT '==========='; +SELECT _part, _block_number, _block_offset, _part_offset, id FROM t_block_offset ORDER BY _block_number, _block_offset; + + +DROP TABLE t_block_offset; diff --git a/parser/testdata/03001_consider_lwd_when_merge/ast.json b/parser/testdata/03001_consider_lwd_when_merge/ast.json new file mode 100644 index 000000000..95ee14296 --- /dev/null +++ b/parser/testdata/03001_consider_lwd_when_merge/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery lwd_merge (children 1)" + }, + { + "explain": " Identifier lwd_merge" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00096716, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/03001_consider_lwd_when_merge/metadata.json b/parser/testdata/03001_consider_lwd_when_merge/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03001_consider_lwd_when_merge/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03001_consider_lwd_when_merge/query.sql b/parser/testdata/03001_consider_lwd_when_merge/query.sql new file mode 100644 index 000000000..ab3920aac --- /dev/null +++ b/parser/testdata/03001_consider_lwd_when_merge/query.sql @@ -0,0 +1,28 @@ +DROP TABLE IF EXISTS lwd_merge; + +CREATE TABLE lwd_merge (id UInt64 CODEC(NONE)) + ENGINE = MergeTree ORDER BY id +SETTINGS max_bytes_to_merge_at_min_space_in_pool = 80000, max_bytes_to_merge_at_max_space_in_pool = 80000, exclude_deleted_rows_for_part_size_in_merge = 0; + +INSERT INTO lwd_merge SELECT number FROM numbers(10000); +INSERT INTO lwd_merge SELECT number FROM numbers(10000, 10000); + +SET optimize_throw_if_noop = 1; + +OPTIMIZE TABLE lwd_merge; -- { serverError CANNOT_ASSIGN_OPTIMIZE } +SELECT count() FROM system.parts WHERE database = currentDatabase() AND table = 'lwd_merge' AND active = 1; + +DELETE FROM lwd_merge WHERE id % 10 > 0; + +OPTIMIZE TABLE lwd_merge; -- { serverError CANNOT_ASSIGN_OPTIMIZE } +SELECT count() FROM system.parts WHERE database = currentDatabase() AND table = 'lwd_merge' AND active = 1; + +ALTER TABLE lwd_merge MODIFY SETTING exclude_deleted_rows_for_part_size_in_merge = 1; + +-- delete again because deleted rows will be counted in mutation +DELETE FROM lwd_merge WHERE id % 100 == 0; + +OPTIMIZE TABLE lwd_merge; +SELECT count() FROM system.parts WHERE database = currentDatabase() AND table = 'lwd_merge' AND active = 1; + +DROP TABLE IF EXISTS lwd_merge; diff --git a/parser/testdata/03001_data_version_column/ast.json b/parser/testdata/03001_data_version_column/ast.json new file mode 100644 index 000000000..6b62125fb --- /dev/null +++ b/parser/testdata/03001_data_version_column/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_data_version (children 1)" + }, + { + "explain": " Identifier t_data_version" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001051215, + "rows_read": 2, + "bytes_read": 80 + } +} diff --git a/parser/testdata/03001_data_version_column/metadata.json b/parser/testdata/03001_data_version_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03001_data_version_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03001_data_version_column/query.sql b/parser/testdata/03001_data_version_column/query.sql new file mode 100644 index 000000000..4e3377ebf --- /dev/null +++ b/parser/testdata/03001_data_version_column/query.sql @@ -0,0 +1,20 @@ +DROP TABLE IF EXISTS t_data_version; + +CREATE TABLE t_data_version (a UInt64, b UInt64) ENGINE = MergeTree ORDER BY a; + +INSERT INTO t_data_version VALUES (1, 1); +INSERT INTO t_data_version VALUES (2, 2); + +SELECT _part, _part_data_version, * FROM t_data_version ORDER BY a; + +ALTER TABLE t_data_version UPDATE b = a * 100 WHERE 1 SETTINGS mutations_sync = 2; + +SELECT _part, _part_data_version, * FROM t_data_version ORDER BY a; + +INSERT INTO t_data_version VALUES (3, 3); + +-- Check parts pruning. +SELECT _part, _part_data_version, * FROM t_data_version WHERE _part_data_version = 4 ORDER BY a SETTINGS max_rows_to_read = 1; + +DROP TABLE t_data_version; + diff --git a/parser/testdata/03001_insert_threads_deduplication/ast.json b/parser/testdata/03001_insert_threads_deduplication/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03001_insert_threads_deduplication/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03001_insert_threads_deduplication/metadata.json b/parser/testdata/03001_insert_threads_deduplication/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03001_insert_threads_deduplication/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03001_insert_threads_deduplication/query.sql b/parser/testdata/03001_insert_threads_deduplication/query.sql new file mode 100644 index 000000000..093d2b318 --- /dev/null +++ b/parser/testdata/03001_insert_threads_deduplication/query.sql @@ -0,0 +1,69 @@ +-- Tags: distributed + +DROP TABLE IF EXISTS landing SYNC; +DROP TABLE IF EXISTS landing_dist SYNC; +DROP TABLE IF EXISTS ds SYNC; + +CREATE TABLE landing +( + timestamp DateTime64(3), + status String, + id String +) +ENGINE = MergeTree() +ORDER BY timestamp; + +CREATE TABLE landing_dist +( + timestamp DateTime64(3), + status String, + id String +) +ENGINE = Distributed('test_cluster_two_shards', currentDatabase(), 'landing', rand()); + +SYSTEM STOP MERGES landing; -- Stopping merges to force 3 parts + +INSERT INTO landing (status, id, timestamp) SELECT * FROM generateRandom() LIMIT 1; +INSERT INTO landing (status, id, timestamp) SELECT * FROM generateRandom() LIMIT 1; +INSERT INTO landing (status, id, timestamp) SELECT * FROM generateRandom() LIMIT 1; + +CREATE TABLE ds +( + timestamp DateTime64(3), + status String, + id String +) +ENGINE = MergeTree() +ORDER BY timestamp +SETTINGS non_replicated_deduplication_window=1000; + +INSERT INTO ds SELECT * FROM landing +SETTINGS insert_deduplicate=1, insert_deduplication_token='token1', + max_insert_threads=5; + +SELECT count() FROM ds; + +INSERT INTO ds SELECT * FROM landing +SETTINGS insert_deduplicate=1, insert_deduplication_token='token2', + max_insert_threads=1; + +SELECT count() FROM ds; + +-- When reading from distributed table, 6 rows are going to be retrieved +-- due to the being using the two shards cluster + +INSERT INTO ds SELECT * FROM landing_dist +SETTINGS insert_deduplicate=1, insert_deduplication_token='token3', + max_insert_threads=5; + +SELECT count() FROM ds; + +INSERT INTO ds SELECT * FROM landing_dist +SETTINGS insert_deduplicate=1, insert_deduplication_token='token4', + max_insert_threads=1; + +SELECT count() FROM ds; + +DROP TABLE IF EXISTS landing SYNC; +DROP TABLE IF EXISTS landing_dist SYNC; +DROP TABLE IF EXISTS ds SYNC; diff --git a/parser/testdata/03001_max_parallel_replicas_zero_value/ast.json b/parser/testdata/03001_max_parallel_replicas_zero_value/ast.json new file mode 100644 index 000000000..5b9df90d1 --- /dev/null +++ b/parser/testdata/03001_max_parallel_replicas_zero_value/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_d (children 1)" + }, + { + "explain": " Identifier test_d" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001196111, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/03001_max_parallel_replicas_zero_value/metadata.json b/parser/testdata/03001_max_parallel_replicas_zero_value/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03001_max_parallel_replicas_zero_value/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03001_max_parallel_replicas_zero_value/query.sql b/parser/testdata/03001_max_parallel_replicas_zero_value/query.sql new file mode 100644 index 000000000..499486713 --- /dev/null +++ b/parser/testdata/03001_max_parallel_replicas_zero_value/query.sql @@ -0,0 +1,5 @@ +drop table if exists test_d; +create table test_d engine=Distributed(test_cluster_two_shard_three_replicas_localhost, system, numbers); +select * from test_d limit 10 settings max_parallel_replicas = 0, prefer_localhost_replica = 0; --{clientError BAD_ARGUMENTS} +drop table test_d; + diff --git a/parser/testdata/03002_analyzer_prewhere/ast.json b/parser/testdata/03002_analyzer_prewhere/ast.json new file mode 100644 index 000000000..e7fdc8ce7 --- /dev/null +++ b/parser/testdata/03002_analyzer_prewhere/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001836538, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03002_analyzer_prewhere/metadata.json b/parser/testdata/03002_analyzer_prewhere/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03002_analyzer_prewhere/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03002_analyzer_prewhere/query.sql b/parser/testdata/03002_analyzer_prewhere/query.sql new file mode 100644 index 000000000..ca00d588c --- /dev/null +++ b/parser/testdata/03002_analyzer_prewhere/query.sql @@ -0,0 +1,12 @@ +SET max_threads = 16, allow_suspicious_low_cardinality_types = true, enable_positional_arguments = false, log_queries = true, table_function_remote_max_addresses = 200, any_join_distinct_right_table_keys = true, joined_subquery_requires_alias = false, enable_analyzer = true, max_memory_usage = 10000000000, log_comment = '/workspace/ch/tests/queries/0_stateless/01710_projection_in_index.sql', send_logs_level = 'fatal', enable_optimize_predicate_expression = false, prefer_localhost_replica = true, allow_introspection_functions = true, optimize_functions_to_subcolumns = false, transform_null_in = true, optimize_use_projections = true, allow_deprecated_syntax_for_merge_tree = true, parallelize_output_from_storages = false; + +CREATE TABLE t__fuzz_0 (`i` Int32, `j` Nullable(Int32), `k` Int32, PROJECTION p (SELECT * ORDER BY j)) ENGINE = MergeTree ORDER BY i SETTINGS index_granularity = 1, allow_nullable_key=1; + +INSERT INTO t__fuzz_0 SELECT * FROM generateRandom() LIMIT 3; +INSERT INTO t__fuzz_0 SELECT * FROM generateRandom() LIMIT 3; +INSERT INTO t__fuzz_0 SELECT * FROM generateRandom() LIMIT 3; +INSERT INTO t__fuzz_0 SELECT * FROM generateRandom() LIMIT 3; +INSERT INTO t__fuzz_0 SELECT * FROM generateRandom() LIMIT 3; + +SELECT * FROM t__fuzz_0 PREWHERE (i < 5) AND (j IN (1, 2)) WHERE i < 5; +DROP TABLE t__fuzz_0; diff --git a/parser/testdata/03002_filter_skip_virtual_columns_with_non_deterministic_functions/ast.json b/parser/testdata/03002_filter_skip_virtual_columns_with_non_deterministic_functions/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03002_filter_skip_virtual_columns_with_non_deterministic_functions/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03002_filter_skip_virtual_columns_with_non_deterministic_functions/metadata.json b/parser/testdata/03002_filter_skip_virtual_columns_with_non_deterministic_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03002_filter_skip_virtual_columns_with_non_deterministic_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03002_filter_skip_virtual_columns_with_non_deterministic_functions/query.sql b/parser/testdata/03002_filter_skip_virtual_columns_with_non_deterministic_functions/query.sql new file mode 100644 index 000000000..6714a0692 --- /dev/null +++ b/parser/testdata/03002_filter_skip_virtual_columns_with_non_deterministic_functions/query.sql @@ -0,0 +1,7 @@ +-- Tags: long +SET max_rows_to_read = 0; +create table test (number UInt64) engine=MergeTree order by number; +insert into test select * from numbers(50000000); +select ignore(number) from test where RAND() > 4292390314 limit 10; +select count() > 0 from test where RAND() > 4292390314; +drop table test; diff --git a/parser/testdata/03002_int_div_decimal_with_date_bug/ast.json b/parser/testdata/03002_int_div_decimal_with_date_bug/ast.json new file mode 100644 index 000000000..53837519e --- /dev/null +++ b/parser/testdata/03002_int_div_decimal_with_date_bug/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function intDiv (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '1.0'" + }, + { + "explain": " Literal 'Decimal256(3)'" + }, + { + "explain": " Function today (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.00135134, + "rows_read": 12, + "bytes_read": 449 + } +} diff --git a/parser/testdata/03002_int_div_decimal_with_date_bug/metadata.json b/parser/testdata/03002_int_div_decimal_with_date_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03002_int_div_decimal_with_date_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03002_int_div_decimal_with_date_bug/query.sql b/parser/testdata/03002_int_div_decimal_with_date_bug/query.sql new file mode 100644 index 000000000..166882120 --- /dev/null +++ b/parser/testdata/03002_int_div_decimal_with_date_bug/query.sql @@ -0,0 +1,5 @@ +SELECT intDiv(CAST('1.0', 'Decimal256(3)'), today()); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT intDiv(CAST('1.0', 'Decimal256(3)'), toDate('2023-01-02')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT intDiv(CAST('1.0', 'Decimal256(2)'), toDate32('2023-01-02 12:12:12')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT intDiv(CAST('1.0', 'Decimal256(2)'), toDateTime('2023-01-02 12:12:12')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT intDiv(CAST('1.0', 'Decimal256(2)'), toDateTime64('2023-01-02 12:12:12.002', 3)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } diff --git a/parser/testdata/03002_map_array_functions_with_low_cardinality/ast.json b/parser/testdata/03002_map_array_functions_with_low_cardinality/ast.json new file mode 100644 index 000000000..62746a371 --- /dev/null +++ b/parser/testdata/03002_map_array_functions_with_low_cardinality/ast.json @@ -0,0 +1,79 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function mapContainsKeyLike (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function map (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Literal 'aa'" + }, + { + "explain": " Function toLowCardinality (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal 'bb'" + }, + { + "explain": " Function toLowCardinality (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function toLowCardinality (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'a%'" + } + ], + + "rows": 19, + + "statistics": + { + "elapsed": 0.001320327, + "rows_read": 19, + "bytes_read": 764 + } +} diff --git a/parser/testdata/03002_map_array_functions_with_low_cardinality/metadata.json b/parser/testdata/03002_map_array_functions_with_low_cardinality/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03002_map_array_functions_with_low_cardinality/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03002_map_array_functions_with_low_cardinality/query.sql b/parser/testdata/03002_map_array_functions_with_low_cardinality/query.sql new file mode 100644 index 000000000..8820a433d --- /dev/null +++ b/parser/testdata/03002_map_array_functions_with_low_cardinality/query.sql @@ -0,0 +1 @@ +SELECT mapContainsKeyLike(map('aa', toLowCardinality(1), 'bb', toLowCardinality(2)), toLowCardinality('a%')); diff --git a/parser/testdata/03002_modify_query_cte/ast.json b/parser/testdata/03002_modify_query_cte/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03002_modify_query_cte/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03002_modify_query_cte/metadata.json b/parser/testdata/03002_modify_query_cte/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03002_modify_query_cte/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03002_modify_query_cte/query.sql b/parser/testdata/03002_modify_query_cte/query.sql new file mode 100644 index 000000000..3a36ce7e7 --- /dev/null +++ b/parser/testdata/03002_modify_query_cte/query.sql @@ -0,0 +1,15 @@ + +CREATE TABLE table_03002 (ts DateTime, event_type String) ENGINE = MergeTree ORDER BY (event_type, ts); + +CREATE MATERIALIZED VIEW mv_03002 TO table_03002 AS SELECT ts FROM table_03002; + +SHOW CREATE TABLE mv_03002; + +ALTER TABLE mv_03002 MODIFY QUERY +WITH MY_CTE AS (SELECT ts FROM table_03002) +SELECT * FROM MY_CTE; + +SHOW CREATE TABLE mv_03002; + +DROP TABLE mv_03002; +DROP TABLE table_03002; diff --git a/parser/testdata/03002_sample_factor_where/ast.json b/parser/testdata/03002_sample_factor_where/ast.json new file mode 100644 index 000000000..e0cc3b0b9 --- /dev/null +++ b/parser/testdata/03002_sample_factor_where/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_sample_factor (children 1)" + }, + { + "explain": " Identifier t_sample_factor" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001336323, + "rows_read": 2, + "bytes_read": 82 + } +} diff --git a/parser/testdata/03002_sample_factor_where/metadata.json b/parser/testdata/03002_sample_factor_where/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03002_sample_factor_where/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03002_sample_factor_where/query.sql b/parser/testdata/03002_sample_factor_where/query.sql new file mode 100644 index 000000000..643003434 --- /dev/null +++ b/parser/testdata/03002_sample_factor_where/query.sql @@ -0,0 +1,11 @@ +DROP TABLE IF EXISTS t_sample_factor; + +CREATE TABLE t_sample_factor(a UInt64, b UInt64) ENGINE = MergeTree ORDER BY (a, b) SAMPLE BY b; +INSERT INTO t_sample_factor(a, b) VALUES (1, 2), (3, 4); + +SELECT uniq(b) * any(_sample_factor) FROM t_sample_factor SAMPLE 200000; + +SELECT uniq(b) * any(_sample_factor) FROM t_sample_factor SAMPLE 200000 WHERE a < -1; +SELECT uniq(b) * any(_sample_factor) FROM t_sample_factor SAMPLE 200000 PREWHERE a < -1; + +DROP TABLE t_sample_factor; diff --git a/parser/testdata/03003_analyzer_setting/ast.json b/parser/testdata/03003_analyzer_setting/ast.json new file mode 100644 index 000000000..a40b2ea08 --- /dev/null +++ b/parser/testdata/03003_analyzer_setting/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery test (children 3)" + }, + { + "explain": " Identifier test" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration dummy (children 1)" + }, + { + "explain": " DataType Int8" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function Distributed (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Identifier test_cluster_two_shards" + }, + { + "explain": " Literal 'system'" + }, + { + "explain": " Literal 'one'" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001345757, + "rows_read": 12, + "bytes_read": 430 + } +} diff --git a/parser/testdata/03003_analyzer_setting/metadata.json b/parser/testdata/03003_analyzer_setting/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03003_analyzer_setting/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03003_analyzer_setting/query.sql b/parser/testdata/03003_analyzer_setting/query.sql new file mode 100644 index 000000000..3dbdaed4a --- /dev/null +++ b/parser/testdata/03003_analyzer_setting/query.sql @@ -0,0 +1,9 @@ +CREATE TABLE test (dummy Int8) ENGINE = Distributed(test_cluster_two_shards, 'system', 'one'); + +SET enable_analyzer = 0; + +SELECT * FROM (SELECT * FROM test SETTINGS enable_analyzer = 1); -- { serverError INCORRECT_QUERY } + +SET enable_analyzer = 1; + +SELECT * FROM (SELECT * FROM test SETTINGS enable_analyzer = 0); -- { serverError INCORRECT_QUERY } diff --git a/parser/testdata/03003_arrayEnumerate_crash/ast.json b/parser/testdata/03003_arrayEnumerate_crash/ast.json new file mode 100644 index 000000000..85e768b5c --- /dev/null +++ b/parser/testdata/03003_arrayEnumerate_crash/ast.json @@ -0,0 +1,73 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayEnumerateUniqRanked (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function arrayEnumerateUniqRanked (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[Array_[UInt64_1, UInt64_2, UInt64_3], Array_[UInt64_2, UInt64_2, UInt64_1], Array_[UInt64_3]]" + }, + { + "explain": " Function or (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1 (alias x)" + }, + { + "explain": " Function toLowCardinality (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Int64_-9223372036854775808" + } + ], + + "rows": 17, + + "statistics": + { + "elapsed": 0.001250982, + "rows_read": 17, + "bytes_read": 840 + } +} diff --git a/parser/testdata/03003_arrayEnumerate_crash/metadata.json b/parser/testdata/03003_arrayEnumerate_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03003_arrayEnumerate_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03003_arrayEnumerate_crash/query.sql b/parser/testdata/03003_arrayEnumerate_crash/query.sql new file mode 100644 index 000000000..21102ddbb --- /dev/null +++ b/parser/testdata/03003_arrayEnumerate_crash/query.sql @@ -0,0 +1,2 @@ +SELECT arrayEnumerateUniqRanked(arrayEnumerateUniqRanked([[1, 2, 3], [2, 2, 1], [3]]), materialize(1 AS x) OR toLowCardinality(-9223372036854775808)); -- { serverError BAD_ARGUMENTS } +SELECT arrayEnumerateUniqRanked([[1, 2, 3], [2, 2, 1], [3]], number) FROM numbers(10); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/03003_compatibility_setting_bad_value/ast.json b/parser/testdata/03003_compatibility_setting_bad_value/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03003_compatibility_setting_bad_value/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03003_compatibility_setting_bad_value/metadata.json b/parser/testdata/03003_compatibility_setting_bad_value/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03003_compatibility_setting_bad_value/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03003_compatibility_setting_bad_value/query.sql b/parser/testdata/03003_compatibility_setting_bad_value/query.sql new file mode 100644 index 000000000..3a09eec74 --- /dev/null +++ b/parser/testdata/03003_compatibility_setting_bad_value/query.sql @@ -0,0 +1 @@ +select 42 settings compatibility=NULL; -- {clientError BAD_GET} diff --git a/parser/testdata/03003_count_asterisk_filter/ast.json b/parser/testdata/03003_count_asterisk_filter/ast.json new file mode 100644 index 000000000..1a5232de6 --- /dev/null +++ b/parser/testdata/03003_count_asterisk_filter/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery users (children 3)" + }, + { + "explain": " Identifier users" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " ColumnDeclaration uid (children 1)" + }, + { + "explain": " DataType Int16" + }, + { + "explain": " ColumnDeclaration name (children 1)" + }, + { + "explain": " DataType Nullable (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " ColumnDeclaration age (children 1)" + }, + { + "explain": " DataType Int16" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function Memory" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001224075, + "rows_read": 14, + "bytes_read": 504 + } +} diff --git a/parser/testdata/03003_count_asterisk_filter/metadata.json b/parser/testdata/03003_count_asterisk_filter/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03003_count_asterisk_filter/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03003_count_asterisk_filter/query.sql b/parser/testdata/03003_count_asterisk_filter/query.sql new file mode 100644 index 000000000..9bd10dfae --- /dev/null +++ b/parser/testdata/03003_count_asterisk_filter/query.sql @@ -0,0 +1,13 @@ +CREATE TABLE users (uid Int16, name Nullable(String), age Int16) ENGINE=Memory; + +INSERT INTO users VALUES (1231, 'John', 33); +INSERT INTO users VALUES (6666, Null, 48); +INSERT INTO users VALUES (8888, 'Alice', 50); + +SELECT count(name) FILTER (WHERE uid > 2000) FROM users; +SELECT countIf(name, uid > 2000) FROM users; + +SELECT count(*) FILTER (WHERE uid > 2000) FROM users; +SELECT countIf(uid > 2000) FROM users; + +DROP TABLE users; diff --git a/parser/testdata/03003_enum_and_string_compatible/ast.json b/parser/testdata/03003_enum_and_string_compatible/ast.json new file mode 100644 index 000000000..2bfbfdf57 --- /dev/null +++ b/parser/testdata/03003_enum_and_string_compatible/ast.json @@ -0,0 +1,76 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function CAST (alias enum1) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'Hello'" + }, + { + "explain": " Literal 'Enum8(\\'Hello\\', \\'World\\')'" + }, + { + "explain": " Function CAST (alias enum2) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'test'" + }, + { + "explain": " Literal 'Enum8(\\'test\\', \\'best\\')'" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Identifier enum1" + }, + { + "explain": " Literal 'Goodbye'" + }, + { + "explain": " Identifier enum2" + } + ], + + "rows": 18, + + "statistics": + { + "elapsed": 0.001121748, + "rows_read": 18, + "bytes_read": 698 + } +} diff --git a/parser/testdata/03003_enum_and_string_compatible/metadata.json b/parser/testdata/03003_enum_and_string_compatible/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03003_enum_and_string_compatible/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03003_enum_and_string_compatible/query.sql b/parser/testdata/03003_enum_and_string_compatible/query.sql new file mode 100644 index 000000000..0abba6741 --- /dev/null +++ b/parser/testdata/03003_enum_and_string_compatible/query.sql @@ -0,0 +1 @@ +WITH 'Hello'::Enum8('Hello', 'World') AS enum1, 'test'::Enum8('test', 'best') AS enum2 SELECT [enum1, 'Goodbye', enum2]; diff --git a/parser/testdata/03003_functions_to_subcolumns_final/ast.json b/parser/testdata/03003_functions_to_subcolumns_final/ast.json new file mode 100644 index 000000000..801433d69 --- /dev/null +++ b/parser/testdata/03003_functions_to_subcolumns_final/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_length_1 (children 1)" + }, + { + "explain": " Identifier t_length_1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001402544, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/03003_functions_to_subcolumns_final/metadata.json b/parser/testdata/03003_functions_to_subcolumns_final/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03003_functions_to_subcolumns_final/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03003_functions_to_subcolumns_final/query.sql b/parser/testdata/03003_functions_to_subcolumns_final/query.sql new file mode 100644 index 000000000..b2ca478da --- /dev/null +++ b/parser/testdata/03003_functions_to_subcolumns_final/query.sql @@ -0,0 +1,24 @@ +DROP TABLE IF EXISTS t_length_1; +DROP TABLE IF EXISTS t_length_2; + +SET optimize_functions_to_subcolumns = 1; +SET enable_analyzer = 1; +SET optimize_on_insert = 0; + +CREATE TABLE t_length_1 (id UInt64, arr Array(UInt64)) ENGINE = ReplacingMergeTree ORDER BY id; +CREATE TABLE t_length_2 (id UInt64, arr_length UInt64) ENGINE = ReplacingMergeTree ORDER BY id; + +INSERT INTO t_length_1 VALUES (1, [1, 2, 3]), (2, [4, 5]); +INSERT INTO t_length_2 VALUES (1, 3), (1, 2), (2, 2); + +SELECT length(arr) FROM t_length_1 WHERE length(arr) in (SELECT arr_length FROM t_length_2); +EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT length(arr) FROM t_length_1 WHERE length(arr) in (SELECT arr_length FROM t_length_2); + +SELECT length(arr) FROM t_length_1 WHERE length(arr) in (SELECT arr_length FROM t_length_2 FINAL); +EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT length(arr) FROM t_length_1 WHERE length(arr) in (SELECT arr_length FROM t_length_2 FINAL); + +SELECT length(arr) FROM t_length_1 FINAL WHERE length(arr) in (SELECT arr_length FROM t_length_2 FINAL); +EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT length(arr) FROM t_length_1 FINAL WHERE length(arr) in (SELECT arr_length FROM t_length_2 FINAL); + +DROP TABLE t_length_1; +DROP TABLE t_length_2; diff --git a/parser/testdata/03003_sql_json_nonsense/ast.json b/parser/testdata/03003_sql_json_nonsense/ast.json new file mode 100644 index 000000000..0a1c82558 --- /dev/null +++ b/parser/testdata/03003_sql_json_nonsense/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function JSON_QUERY (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal '{\"x\":1}'" + }, + { + "explain": " Literal '$[\\'hello\\']'" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toLowCardinality (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'x'" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001244912, + "rows_read": 13, + "bytes_read": 516 + } +} diff --git a/parser/testdata/03003_sql_json_nonsense/metadata.json b/parser/testdata/03003_sql_json_nonsense/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03003_sql_json_nonsense/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03003_sql_json_nonsense/query.sql b/parser/testdata/03003_sql_json_nonsense/query.sql new file mode 100644 index 000000000..9b7beb42c --- /dev/null +++ b/parser/testdata/03003_sql_json_nonsense/query.sql @@ -0,0 +1 @@ +SELECT JSON_QUERY('{"x":1}', '$[\'hello\']', materialize(toLowCardinality('x'))); diff --git a/parser/testdata/03004_force_null_for_omitted/ast.json b/parser/testdata/03004_force_null_for_omitted/ast.json new file mode 100644 index 000000000..63bd75315 --- /dev/null +++ b/parser/testdata/03004_force_null_for_omitted/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001186578, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03004_force_null_for_omitted/metadata.json b/parser/testdata/03004_force_null_for_omitted/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03004_force_null_for_omitted/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03004_force_null_for_omitted/query.sql b/parser/testdata/03004_force_null_for_omitted/query.sql new file mode 100644 index 000000000..43ba2568a --- /dev/null +++ b/parser/testdata/03004_force_null_for_omitted/query.sql @@ -0,0 +1,36 @@ +set allow_suspicious_low_cardinality_types = 1; +insert into function file(concat(currentDatabase(), '.03004_data.bsonEachRow'), auto, 'null Nullable(UInt32)') select number % 2 ? NULL : number from numbers(5) settings engine_file_truncate_on_insert=1; +select * from file(concat(currentDatabase(), '.03004_data.bsonEachRow'), auto, 'null UInt32, foo UInt32'); +select * from file(concat(currentDatabase(), '.03004_data.bsonEachRow'), auto, 'null UInt32, foo UInt32') settings input_format_force_null_for_omitted_fields = 1; -- { serverError TYPE_MISMATCH } +select * from file(concat(currentDatabase(), '.03004_data.bsonEachRow'), auto, 'null UInt32, foo Nullable(UInt32)'); +select * from file(concat(currentDatabase(), '.03004_data.bsonEachRow'), auto, 'null UInt32, foo Nullable(UInt32)') settings input_format_force_null_for_omitted_fields = 1; +select * from file(concat(currentDatabase(), '.03004_data.bsonEachRow'), auto, 'null UInt32, foo LowCardinality(Nullable(UInt32))'); +select * from file(concat(currentDatabase(), '.03004_data.bsonEachRow'), auto, 'null UInt32, foo LowCardinality(Nullable(UInt32))') settings input_format_force_null_for_omitted_fields = 1; + +select * from format(JSONEachRow, 'foo UInt32', '{}'); +select * from format(JSONEachRow, 'foo UInt32', '{}') settings input_format_force_null_for_omitted_fields = 1; -- { serverError TYPE_MISMATCH } +select * from format(JSONEachRow, 'foo UInt32, bar Nullable(UInt32)', '{}'); +select * from format(JSONEachRow, 'foo UInt32, bar Nullable(UInt32)', '{\"foo\":1}'); +select * from format(JSONEachRow, 'foo UInt32, bar Nullable(UInt32)', '{}') settings input_format_force_null_for_omitted_fields = 1; -- { serverError TYPE_MISMATCH } +select * from format(JSONEachRow, 'foo UInt32, bar Nullable(UInt32)', '{\"foo\":1}') settings input_format_force_null_for_omitted_fields = 1; +select * from format(JSONEachRow, 'foo UInt32, bar LowCardinality(Nullable(UInt32))', '{\"foo\":1}'); +select * from format(JSONEachRow, 'foo UInt32, bar LowCardinality(Nullable(UInt32))', '{\"foo\":1}') settings input_format_force_null_for_omitted_fields = 1; + +select * from format(CSVWithNamesAndTypes, 'foo UInt32, bar UInt32', 'foo\nUInt32\n1'); +select * from format(CSVWithNamesAndTypes, 'foo UInt32, bar UInt32', 'foo\nUInt32\n1') settings input_format_force_null_for_omitted_fields = 1; -- { serverError TYPE_MISMATCH } +select * from format(CSVWithNamesAndTypes, 'foo UInt32, bar Nullable(UInt32)', 'foo\nUInt32\n1') settings input_format_force_null_for_omitted_fields = 1; +select * from format(CSVWithNamesAndTypes, 'foo UInt32, bar LowCardinality(Nullable(UInt32))', 'foo\nUInt32\n1') settings input_format_force_null_for_omitted_fields = 1; +select * from format(CSVWithNamesAndTypes, 'foo UInt32, bar UInt32', 'foo,bar\nUInt32,UInt32\n1,2\n3\n') settings input_format_csv_allow_variable_number_of_columns = 1; +select * from format(CSVWithNamesAndTypes, 'foo UInt32, bar UInt32', 'foo,bar\nUInt32,UInt32\n1,2\n3\n') settings input_format_csv_allow_variable_number_of_columns = 1, input_format_force_null_for_omitted_fields = 1; -- { serverError TYPE_MISMATCH } + +select * from format(TSVWithNamesAndTypes, 'foo UInt32, bar UInt32', 'foo\nUInt32\n1'); +select * from format(TSVWithNamesAndTypes, 'foo UInt32, bar UInt32', 'foo\nUInt32\n1') settings input_format_force_null_for_omitted_fields = 1; -- { serverError TYPE_MISMATCH } +select * from format(TSVWithNamesAndTypes, 'foo UInt32, bar Nullable(UInt32)', 'foo\nUInt32\n1') settings input_format_force_null_for_omitted_fields = 1; +select * from format(TSVWithNamesAndTypes, 'foo UInt32, bar LowCardinality(Nullable(UInt32))', 'foo\nUInt32\n1') settings input_format_force_null_for_omitted_fields = 1; +select * from format(TSVWithNamesAndTypes, 'foo UInt32, bar UInt32', 'foo\tbar\nUInt32\tUInt32\n1\t2\n3\n') settings input_format_tsv_allow_variable_number_of_columns = 1; +select * from format(TSVWithNamesAndTypes, 'foo UInt32, bar UInt32', 'foo\tbar\nUInt32\tUInt32\n1\t2\n3\n') settings input_format_tsv_allow_variable_number_of_columns = 1, input_format_force_null_for_omitted_fields = 1; -- { serverError TYPE_MISMATCH } + +select * from format(TSKV, 'foo UInt32, bar UInt32', 'foo=1\n'); +select * from format(TSKV, 'foo UInt32, bar UInt32', 'foo=1\n') settings input_format_force_null_for_omitted_fields = 1; -- { serverError TYPE_MISMATCH } +select * from format(TSKV, 'foo UInt32, bar Nullable(UInt32)', 'foo=1\n') settings input_format_force_null_for_omitted_fields = 1; +select * from format(TSKV, 'foo UInt32, bar LowCardinality(Nullable(UInt32))', 'foo=1\n') settings input_format_force_null_for_omitted_fields = 1; diff --git a/parser/testdata/03004_json_named_tuples_inference_ambiguous_paths_as_string/ast.json b/parser/testdata/03004_json_named_tuples_inference_ambiguous_paths_as_string/ast.json new file mode 100644 index 000000000..f7c372434 --- /dev/null +++ b/parser/testdata/03004_json_named_tuples_inference_ambiguous_paths_as_string/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001188267, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03004_json_named_tuples_inference_ambiguous_paths_as_string/metadata.json b/parser/testdata/03004_json_named_tuples_inference_ambiguous_paths_as_string/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03004_json_named_tuples_inference_ambiguous_paths_as_string/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03004_json_named_tuples_inference_ambiguous_paths_as_string/query.sql b/parser/testdata/03004_json_named_tuples_inference_ambiguous_paths_as_string/query.sql new file mode 100644 index 000000000..4b986c948 --- /dev/null +++ b/parser/testdata/03004_json_named_tuples_inference_ambiguous_paths_as_string/query.sql @@ -0,0 +1,4 @@ +set input_format_json_use_string_type_for_ambiguous_paths_in_named_tuples_inference_from_objects=1; +desc format(JSONEachRow, '{"obj" : {"a" : 42}}, {"obj" : {"a" : {"b" : 42}}}'); +select * from format(JSONEachRow, '{"obj" : {"a" : 42}}, {"obj" : {"a" : {"b" : 42}}}'); + diff --git a/parser/testdata/03005_input_function_in_join/ast.json b/parser/testdata/03005_input_function_in_join/ast.json new file mode 100644 index 000000000..3e6efe16f --- /dev/null +++ b/parser/testdata/03005_input_function_in_join/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001339363, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/03005_input_function_in_join/metadata.json b/parser/testdata/03005_input_function_in_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03005_input_function_in_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03005_input_function_in_join/query.sql b/parser/testdata/03005_input_function_in_join/query.sql new file mode 100644 index 000000000..a6fc27cd8 --- /dev/null +++ b/parser/testdata/03005_input_function_in_join/query.sql @@ -0,0 +1,14 @@ +drop table if exists test; +create table test (a Int8) engine = MergeTree order by tuple(); +INSERT INTO test +SELECT x.number FROM ( + SELECT number + FROM system.numbers + LIMIT 10 +) AS x +INNER JOIN input('a UInt64') AS y ON x.number = y.a +Format CSV 2 + +select * from test; +drop table test; + diff --git a/parser/testdata/03006_analyzer_executable_table_function/ast.json b/parser/testdata/03006_analyzer_executable_table_function/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03006_analyzer_executable_table_function/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03006_analyzer_executable_table_function/metadata.json b/parser/testdata/03006_analyzer_executable_table_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03006_analyzer_executable_table_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03006_analyzer_executable_table_function/query.sql b/parser/testdata/03006_analyzer_executable_table_function/query.sql new file mode 100644 index 000000000..4edbcc97f --- /dev/null +++ b/parser/testdata/03006_analyzer_executable_table_function/query.sql @@ -0,0 +1,4 @@ +SELECT + toFixedString(toFixedString(toLowCardinality(toFixedString('--------------------', toNullable(20))), toLowCardinality(20)), 20), + * +FROM executable('data String', SETTINGS max_command_execution_time = 100); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} diff --git a/parser/testdata/03006_buffer_overflow_join/ast.json b/parser/testdata/03006_buffer_overflow_join/ast.json new file mode 100644 index 000000000..29c02d0f7 --- /dev/null +++ b/parser/testdata/03006_buffer_overflow_join/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery 03006_buffer_overflow_l (children 3)" + }, + { + "explain": " Identifier 03006_buffer_overflow_l" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration a (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " ColumnDeclaration b (children 1)" + }, + { + "explain": " DataType Tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " DataType String" + }, + { + "explain": " DataType String" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function Memory" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001215173, + "rows_read": 13, + "bytes_read": 491 + } +} diff --git a/parser/testdata/03006_buffer_overflow_join/metadata.json b/parser/testdata/03006_buffer_overflow_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03006_buffer_overflow_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03006_buffer_overflow_join/query.sql b/parser/testdata/03006_buffer_overflow_join/query.sql new file mode 100644 index 000000000..8c1fa3cec --- /dev/null +++ b/parser/testdata/03006_buffer_overflow_join/query.sql @@ -0,0 +1,6 @@ +CREATE TABLE 03006_buffer_overflow_l (`a` String, `b` Tuple(String, String)) ENGINE = Memory; +INSERT INTO 03006_buffer_overflow_l SELECT * FROM generateRandom() limit 1000; +CREATE TABLE 03006_buffer_overflow_r (`a` LowCardinality(Nullable(String)), `c` Tuple(LowCardinality(String), LowCardinality(String))) ENGINE = Memory; +INSERT INTO 03006_buffer_overflow_r SELECT * FROM generateRandom() limit 1000; + +SELECT a FROM 03006_buffer_overflow_l RIGHT JOIN 03006_buffer_overflow_r USING (a) ORDER BY a ASC NULLS FIRST FORMAT Null; diff --git a/parser/testdata/03006_mv_deduplication_throw_if_async_insert/ast.json b/parser/testdata/03006_mv_deduplication_throw_if_async_insert/ast.json new file mode 100644 index 000000000..a3ef963e8 --- /dev/null +++ b/parser/testdata/03006_mv_deduplication_throw_if_async_insert/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery 02985_test (children 1)" + }, + { + "explain": " Identifier 02985_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001127664, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/03006_mv_deduplication_throw_if_async_insert/metadata.json b/parser/testdata/03006_mv_deduplication_throw_if_async_insert/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03006_mv_deduplication_throw_if_async_insert/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03006_mv_deduplication_throw_if_async_insert/query.sql b/parser/testdata/03006_mv_deduplication_throw_if_async_insert/query.sql new file mode 100644 index 000000000..808317c91 --- /dev/null +++ b/parser/testdata/03006_mv_deduplication_throw_if_async_insert/query.sql @@ -0,0 +1,19 @@ +DROP TABLE IF EXISTS 02985_test; + +SET async_insert = 1; +SET deduplicate_blocks_in_dependent_materialized_views = 1; + +CREATE TABLE 03006_test +( + d Date, + value UInt64 +) +ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO 03006_test VALUES ('2024-03-05', 1), ('2024-03-05', 2), ('2024-03-05', 1); -- { serverError SUPPORT_IS_DISABLED } +INSERT INTO 03006_test SETTINGS compatibility='24.1' VALUES ('2024-03-05', 1), ('2024-03-05', 2), ('2024-03-05', 1); +INSERT INTO 03006_test SETTINGS async_insert=0 VALUES ('2024-03-05', 1), ('2024-03-05', 2), ('2024-03-05', 1); +INSERT INTO 03006_test SETTINGS deduplicate_blocks_in_dependent_materialized_views=0 VALUES ('2024-03-05', 1), ('2024-03-05', 2), ('2024-03-05', 1); +INSERT INTO 03006_test SETTINGS throw_if_deduplication_in_dependent_materialized_views_enabled_with_async_insert=0 VALUES ('2024-03-05', 1), ('2024-03-05', 2), ('2024-03-05', 1); + +DROP TABLE IF EXISTS 02985_test; diff --git a/parser/testdata/03006_parallel_replicas_cte_explain_syntax_crash/ast.json b/parser/testdata/03006_parallel_replicas_cte_explain_syntax_crash/ast.json new file mode 100644 index 000000000..cb6590a8d --- /dev/null +++ b/parser/testdata/03006_parallel_replicas_cte_explain_syntax_crash/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery numbers_1e6__fuzz_34 (children 1)" + }, + { + "explain": " Identifier numbers_1e6__fuzz_34" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001279267, + "rows_read": 2, + "bytes_read": 92 + } +} diff --git a/parser/testdata/03006_parallel_replicas_cte_explain_syntax_crash/metadata.json b/parser/testdata/03006_parallel_replicas_cte_explain_syntax_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03006_parallel_replicas_cte_explain_syntax_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03006_parallel_replicas_cte_explain_syntax_crash/query.sql b/parser/testdata/03006_parallel_replicas_cte_explain_syntax_crash/query.sql new file mode 100644 index 000000000..ff56540b3 --- /dev/null +++ b/parser/testdata/03006_parallel_replicas_cte_explain_syntax_crash/query.sql @@ -0,0 +1,43 @@ +DROP TABLE IF EXISTS numbers_1e6__fuzz_34; +DROP TABLE IF EXISTS numbers_1e6__fuzz_33; + +CREATE TABLE numbers_1e6__fuzz_34 +( + n UInt64 +) +ENGINE = MergeTree +ORDER BY n +AS SELECT * +FROM numbers(10); + + +CREATE TABLE numbers_1e6__fuzz_33 +( + n UInt64 +) +ENGINE = MergeTree +ORDER BY n +AS SELECT * +FROM numbers(10); + +SET enable_analyzer = 1; +SET enable_parallel_replicas = 1, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', max_parallel_replicas = 3, parallel_replicas_min_number_of_rows_per_replica=0; + +EXPLAIN SYNTAX +WITH + cte1 AS + ( + SELECT n + FROM numbers_1e6__fuzz_34 + ), + cte2 AS + ( + SELECT n + FROM numbers_1e6__fuzz_33 + PREWHERE n IN (cte1) + ) +SELECT count() +FROM cte2; + +DROP TABLE numbers_1e6__fuzz_34; +DROP TABLE numbers_1e6__fuzz_33; diff --git a/parser/testdata/03006_parallel_replicas_prewhere/ast.json b/parser/testdata/03006_parallel_replicas_prewhere/ast.json new file mode 100644 index 000000000..66b4590fd --- /dev/null +++ b/parser/testdata/03006_parallel_replicas_prewhere/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DROP ROW POLICY query" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001414931, + "rows_read": 1, + "bytes_read": 29 + } +} diff --git a/parser/testdata/03006_parallel_replicas_prewhere/metadata.json b/parser/testdata/03006_parallel_replicas_prewhere/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03006_parallel_replicas_prewhere/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03006_parallel_replicas_prewhere/query.sql b/parser/testdata/03006_parallel_replicas_prewhere/query.sql new file mode 100644 index 000000000..14e0e77d1 --- /dev/null +++ b/parser/testdata/03006_parallel_replicas_prewhere/query.sql @@ -0,0 +1,42 @@ +DROP POLICY IF EXISTS url_na_log_policy0 ON url_na_log; +DROP TABLE IF EXISTS url_na_log; + +CREATE TABLE url_na_log +( + `SiteId` UInt32, + `DateVisit` Date +) +ENGINE = MergeTree +PRIMARY KEY SiteId +ORDER BY (SiteId, DateVisit) +SETTINGS index_granularity_bytes = 1000000, index_granularity = 1000, min_bytes_for_wide_part = 0; + +CREATE ROW POLICY url_na_log_policy0 ON url_na_log FOR SELECT USING (DateVisit < '2022-08-11') OR (DateVisit > '2022-08-19') TO default; + +INSERT INTO url_na_log +SETTINGS max_insert_block_size = 200000 +SELECT + 209, + CAST('2022-08-09', 'Date') + toIntervalDay(intDiv(number, 10000)) +FROM numbers(130000) +SETTINGS max_insert_block_size = 200000; + +SET max_block_size = 1048576, max_threads = 1, enable_parallel_replicas = 1, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', max_parallel_replicas = 3, parallel_replicas_min_number_of_rows_per_replica=10000; +SET parallel_replicas_only_with_analyzer = 0; -- necessary for CI run with disabled analyzer + +EXPLAIN ESTIMATE +SELECT count() +FROM url_na_log +PREWHERE (DateVisit >= toFixedString('2022-08-10', 10)) AND (DateVisit <= '2022-08-20') +SETTINGS parallel_replicas_local_plan=0; + +-- here parallel replicas uses local snapshot as working set +-- so, the estimation can be done +EXPLAIN ESTIMATE +SELECT count() +FROM url_na_log +PREWHERE (DateVisit >= toFixedString('2022-08-10', 10)) AND (DateVisit <= '2022-08-20') +SETTINGS enable_analyzer=1, parallel_replicas_local_plan=1; + +DROP POLICY url_na_log_policy0 ON url_na_log; +DROP TABLE url_na_log; diff --git a/parser/testdata/03007_column_nullable_uninitialzed_value/ast.json b/parser/testdata/03007_column_nullable_uninitialzed_value/ast.json new file mode 100644 index 000000000..af76ddba3 --- /dev/null +++ b/parser/testdata/03007_column_nullable_uninitialzed_value/ast.json @@ -0,0 +1,91 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function greater (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function count (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Function avg (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDecimal32 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Function count (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1000" + }, + { + "explain": " Set" + } + ], + + "rows": 23, + + "statistics": + { + "elapsed": 0.001342737, + "rows_read": 23, + "bytes_read": 880 + } +} diff --git a/parser/testdata/03007_column_nullable_uninitialzed_value/metadata.json b/parser/testdata/03007_column_nullable_uninitialzed_value/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03007_column_nullable_uninitialzed_value/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03007_column_nullable_uninitialzed_value/query.sql b/parser/testdata/03007_column_nullable_uninitialzed_value/query.sql new file mode 100644 index 000000000..44f6642d2 --- /dev/null +++ b/parser/testdata/03007_column_nullable_uninitialzed_value/query.sql @@ -0,0 +1 @@ +SELECT count(NULL) IGNORE NULLS > avg(toDecimal32(NULL)) IGNORE NULLS, count() FROM numbers(1000) WITH TOTALS SETTINGS enable_analyzer = 1; diff --git a/parser/testdata/03008_deduplication_cases_from_docs/ast.json b/parser/testdata/03008_deduplication_cases_from_docs/ast.json new file mode 100644 index 000000000..4c2e4a4e0 --- /dev/null +++ b/parser/testdata/03008_deduplication_cases_from_docs/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'Different materialized view insert into one underlayed table equal data.'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001208402, + "rows_read": 5, + "bytes_read": 243 + } +} diff --git a/parser/testdata/03008_deduplication_cases_from_docs/metadata.json b/parser/testdata/03008_deduplication_cases_from_docs/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03008_deduplication_cases_from_docs/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03008_deduplication_cases_from_docs/query.sql b/parser/testdata/03008_deduplication_cases_from_docs/query.sql new file mode 100644 index 000000000..1bd7ef09c --- /dev/null +++ b/parser/testdata/03008_deduplication_cases_from_docs/query.sql @@ -0,0 +1,322 @@ +select 'Different materialized view insert into one underlayed table equal data.'; + +DROP TABLE IF EXISTS dst; +DROP TABLE IF EXISTS mv_dst; +DROP TABLE IF EXISTS mv_first; +DROP TABLE IF EXISTS mv_second; + +CREATE TABLE dst +( + `key` Int64, + `value` String +) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS non_replicated_deduplication_window=1000; + +CREATE TABLE mv_dst +( + `key` Int64, + `value` String +) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS non_replicated_deduplication_window=1000; + +CREATE MATERIALIZED VIEW mv_first +TO mv_dst +AS SELECT + 0 AS key, + value AS value +FROM dst; + +CREATE MATERIALIZED VIEW mv_second +TO mv_dst +AS SELECT + 0 AS key, + value AS value +FROM dst; + +SET deduplicate_blocks_in_dependent_materialized_views=1; + +select 'first attempt'; + +INSERT INTO dst VALUES (1, 'A'); + +SELECT + 'from dst', + *, + _part +FROM dst +ORDER by all; + +SELECT + 'from mv_dst', + *, + _part +FROM mv_dst +ORDER by all; + +select 'second attempt'; + +INSERT INTO dst VALUES (1, 'A'); + +SELECT + 'from dst', + *, + _part +FROM dst +ORDER by all; + +SELECT + 'from mv_dst', + *, + _part +FROM mv_dst +ORDER by all; + +DROP TABLE mv_second; +DROP TABLE mv_first; +DROP TABLE mv_dst; +DROP TABLE dst; + +select 'Different insert operations generate the same data after transformation in underlied table of materialized view.'; + +DROP TABLE IF EXISTS dst; +DROP TABLE IF EXISTS mv_dst; + +CREATE TABLE dst +( + `key` Int64, + `value` String +) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS non_replicated_deduplication_window=1000; + +CREATE MATERIALIZED VIEW mv_dst +( + `key` Int64, + `value` String +) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS non_replicated_deduplication_window=1000 +AS SELECT + 0 AS key, + value AS value +FROM dst; + +SET deduplicate_blocks_in_dependent_materialized_views=1; + +select 'first attempt'; + +INSERT INTO dst VALUES (1, 'A'); + +SELECT + 'from dst', + *, + _part +FROM dst +ORDER by all; + +SELECT + 'from mv_dst', + *, + _part +FROM mv_dst +ORDER by all; + +select 'second attempt'; + +INSERT INTO dst VALUES (2, 'A'); + +SELECT + 'from dst', + *, + _part +FROM dst +ORDER by all; + +SELECT + 'from mv_dst', + *, + _part +FROM mv_dst +ORDER by all; + +DROP TABLE mv_dst; +DROP TABLE dst; + +select 'Indentical blocks in insertion with `insert_deduplication_token`'; + +DROP TABLE IF EXISTS dst; + +CREATE TABLE dst +( + `key` Int64, + `value` String +) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS non_replicated_deduplication_window=1000; + +SET max_block_size=1; +SET min_insert_block_size_rows=0; +SET min_insert_block_size_bytes=0; + +select 'first attempt'; + +INSERT INTO dst SELECT + 0 AS key, + 'A' AS value +FROM numbers(2) +SETTINGS insert_deduplication_token='some_user_token'; + +SELECT + 'from dst', + *, + _part +FROM dst +ORDER by all; + +select 'second attempt'; + +INSERT INTO dst SELECT + 0 AS key, + 'A' AS value +FROM numbers(2) +SETTINGS insert_deduplication_token='some_user_token'; + +SELECT + 'from dst', + *, + _part +FROM dst +ORDER by all; + +select 'third attempt'; + +INSERT INTO dst SELECT + 1 AS key, + 'b' AS value +FROM numbers(2) +SETTINGS insert_deduplication_token='some_user_token'; + +SELECT + 'from dst', + *, + _part +FROM dst +ORDER by all; + +DROP TABLE dst; + +select 'Indentical blocks in insertion'; + +DROP TABLE IF EXISTS dst; + +CREATE TABLE dst +( + `key` Int64, + `value` String +) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS non_replicated_deduplication_window=1000; + +SET max_block_size=1; +SET min_insert_block_size_rows=0; +SET min_insert_block_size_bytes=0; + +INSERT INTO dst SELECT + 0 AS key, + 'A' AS value +FROM numbers(2); + +SELECT + 'from dst', + *, + _part +FROM dst +ORDER by all; + +DROP TABLE dst; + +select 'Indentical blocks after materialised view`s transformation'; + +DROP TABLE IF EXISTS dst; +DROP TABLE IF EXISTS mv_dst; + +CREATE TABLE dst +( + `key` Int64, + `value` String +) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS non_replicated_deduplication_window=1000; + +CREATE MATERIALIZED VIEW mv_dst +( + `key` Int64, + `value` String +) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS non_replicated_deduplication_window=1000 +AS SELECT + 0 AS key, + value AS value +FROM dst; + +SET max_block_size=1; +SET min_insert_block_size_rows=0; +SET min_insert_block_size_bytes=0; + +SET deduplicate_blocks_in_dependent_materialized_views=1; + +select 'first attempt'; + +INSERT INTO dst SELECT + number + 1 AS key, + IF(key = 0, 'A', 'B') AS value +FROM numbers(2); + +SELECT + 'from dst', + *, + _part +FROM dst +ORDER by all; + +SELECT + 'from mv_dst', + *, + _part +FROM mv_dst +ORDER by all; + +select 'second attempt'; + +INSERT INTO dst SELECT + number + 1 AS key, + IF(key = 0, 'A', 'B') AS value +FROM numbers(2); + +SELECT + 'from dst', + *, + _part +FROM dst +ORDER by all; + +SELECT + 'from mv_dst', + *, + _part +FROM mv_dst +ORDER by all; + +DROP TABLE mv_dst; +DROP TABLE dst; diff --git a/parser/testdata/03008_deduplication_insert_into_partitioned_table/ast.json b/parser/testdata/03008_deduplication_insert_into_partitioned_table/ast.json new file mode 100644 index 000000000..63ce9ebb2 --- /dev/null +++ b/parser/testdata/03008_deduplication_insert_into_partitioned_table/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery partitioned_table (children 1)" + }, + { + "explain": " Identifier partitioned_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00112768, + "rows_read": 2, + "bytes_read": 86 + } +} diff --git a/parser/testdata/03008_deduplication_insert_into_partitioned_table/metadata.json b/parser/testdata/03008_deduplication_insert_into_partitioned_table/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03008_deduplication_insert_into_partitioned_table/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03008_deduplication_insert_into_partitioned_table/query.sql b/parser/testdata/03008_deduplication_insert_into_partitioned_table/query.sql new file mode 100644 index 000000000..1447b3f84 --- /dev/null +++ b/parser/testdata/03008_deduplication_insert_into_partitioned_table/query.sql @@ -0,0 +1,83 @@ +DROP TABLE IF EXISTS partitioned_table; +DROP TABLE IF EXISTS mv_table; + + +SET deduplicate_blocks_in_dependent_materialized_views = 1; + + +SELECT 'no user deduplication token'; + +CREATE TABLE partitioned_table + (key Int64, value String) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/03008_deduplication_insert_into_partitioned_table', '{replica}') + partition by key % 10 + order by tuple(); + +CREATE MATERIALIZED VIEW mv_table (key Int64, value String) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/03008_deduplication_insert_into_partitioned_table_mv', '{replica}') + ORDER BY tuple() + AS SELECT key, value FROM partitioned_table; + +INSERT INTO partitioned_table VALUES (1, 'A'), (2, 'B'); +INSERT INTO partitioned_table VALUES (1, 'A'), (2, 'C'); +INSERT INTO partitioned_table VALUES (1, 'D'), (2, 'B'); + +SELECT 'partitioned_table is deduplicated bacause deduplication works in scope of one partiotion:'; +SELECT * FROM partitioned_table ORDER BY ALL; +SELECT 'mv_table is not deduplicated because the inserted blocks was different:'; +SELECT * FROM mv_table ORDER BY ALL; + +DROP TABLE partitioned_table; +DROP TABLE mv_table; + + +SELECT 'with user deduplication token'; + +CREATE TABLE partitioned_table + (key Int64, value String) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/03008_deduplication_insert_into_partitioned_table', '{replica}') + partition by key % 10 + order by tuple(); + +CREATE MATERIALIZED VIEW mv_table (key Int64, value String) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/03008_deduplication_insert_into_partitioned_table_mv', '{replica}') + ORDER BY tuple() + AS SELECT key, value FROM partitioned_table; + +INSERT INTO partitioned_table SETTINGS insert_deduplication_token='token_1' VALUES (1, 'A'), (2, 'B'); +INSERT INTO partitioned_table SETTINGS insert_deduplication_token='token_2' VALUES (1, 'A'), (2, 'C'); +INSERT INTO partitioned_table SETTINGS insert_deduplication_token='token_3' VALUES (1, 'D'), (2, 'B'); + +SELECT 'partitioned_table is not deduplicated because different tokens:'; +SELECT * FROM partitioned_table ORDER BY ALL; +SELECT 'mv_table is not deduplicated because different tokens:'; +SELECT * FROM mv_table ORDER BY ALL; + +DROP TABLE partitioned_table; +DROP TABLE mv_table; + + +SELECT 'with incorrect usage of user deduplication token'; + +CREATE TABLE partitioned_table + (key Int64, value String) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/03008_deduplication_insert_into_partitioned_table', '{replica}') + partition by key % 10 + order by tuple(); + +CREATE MATERIALIZED VIEW mv_table (key Int64, value String) + ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/03008_deduplication_insert_into_partitioned_table_mv', '{replica}') + ORDER BY tuple() + AS SELECT key, value FROM partitioned_table; + +INSERT INTO partitioned_table SETTINGS insert_deduplication_token='token_0' VALUES (1, 'A'), (2, 'B'); +INSERT INTO partitioned_table SETTINGS insert_deduplication_token='token_0' VALUES (1, 'A'), (2, 'C'); +INSERT INTO partitioned_table SETTINGS insert_deduplication_token='token_0' VALUES (1, 'D'), (2, 'B'); + +SELECT 'partitioned_table is deduplicated because equal tokens:'; +SELECT * FROM partitioned_table ORDER BY ALL; +SELECT 'mv_table is deduplicated because equal tokens:'; +SELECT * FROM mv_table ORDER BY ALL; + +DROP TABLE partitioned_table; +DROP TABLE mv_table; diff --git a/parser/testdata/03008_deduplication_remote_insert_select/ast.json b/parser/testdata/03008_deduplication_remote_insert_select/ast.json new file mode 100644 index 000000000..cb03cd773 --- /dev/null +++ b/parser/testdata/03008_deduplication_remote_insert_select/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery src (children 1)" + }, + { + "explain": " Identifier src" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001445479, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/03008_deduplication_remote_insert_select/metadata.json b/parser/testdata/03008_deduplication_remote_insert_select/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03008_deduplication_remote_insert_select/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03008_deduplication_remote_insert_select/query.sql b/parser/testdata/03008_deduplication_remote_insert_select/query.sql new file mode 100644 index 000000000..c8e092822 --- /dev/null +++ b/parser/testdata/03008_deduplication_remote_insert_select/query.sql @@ -0,0 +1,51 @@ +DROP TABLE IF EXISTS src; + +CREATE TABLE src (a UInt64, b UInt64) + ENGINE=ReplicatedMergeTree('/clickhouse/tables/{database}/03008_deduplication_remote_insert_select/src', '{replica}') + ORDER BY tuple(); + +INSERT INTO src SELECT number % 10 as a, number as b FROM numbers(100); + +SET allow_experimental_parallel_reading_from_replicas=1; +SET max_parallel_replicas=3; +SET parallel_replicas_for_non_replicated_merge_tree=1; +SET cluster_for_parallel_replicas='parallel_replicas'; + +-- { echoOn } +SELECT count() FROM src; +SELECT a, sum(b), uniq(b), FROM src GROUP BY a ORDER BY a; +SELECT count() FROM remote('127.0.0.{1..2}', currentDatabase(), src); +-- { echoOff } + +DROP TABLE IF EXISTS dst_null; +CREATE TABLE dst_null(a UInt64, b UInt64) + ENGINE = Null; + +DROP TABLE IF EXISTS mv_dst; +CREATE MATERIALIZED VIEW mv_dst + ENGINE = AggregatingMergeTree() + ORDER BY a + AS SELECT + a, + sumState(b) AS sum_b, + uniqState(b) AS uniq_b + FROM dst_null + GROUP BY a; + +-- { echoOn } +INSERT INTO dst_null + SELECT a, b FROM src; + +SELECT + a, + sumMerge(sum_b) AS sum_b, + uniqMerge(uniq_b) AS uniq_b +FROM mv_dst +GROUP BY a +ORDER BY a; +-- { echoOff } + +DROP TABLE src; +DROP TABLE mv_dst; +DROP TABLE dst_null; + diff --git a/parser/testdata/03008_deduplication_wrong_mv/ast.json b/parser/testdata/03008_deduplication_wrong_mv/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03008_deduplication_wrong_mv/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03008_deduplication_wrong_mv/metadata.json b/parser/testdata/03008_deduplication_wrong_mv/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03008_deduplication_wrong_mv/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03008_deduplication_wrong_mv/query.sql b/parser/testdata/03008_deduplication_wrong_mv/query.sql new file mode 100644 index 000000000..00f3c498d --- /dev/null +++ b/parser/testdata/03008_deduplication_wrong_mv/query.sql @@ -0,0 +1,22 @@ +-- Tags: memory-engine +DROP TABLE IF EXISTS mv; +DROP TABLE IF EXISTS src; +DROP TABLE IF EXISTS dst; + +-- { echo ON } +CREATE TABLE src (x UInt8) ENGINE = Memory; +CREATE TABLE dst (x UInt8) ENGINE = Memory; +CREATE MATERIALIZED VIEW mv1 TO dst AS SELECT * FROM src; + +INSERT INTO src VALUES (0); +SELECT * from dst; + +TRUNCATE TABLE dst; + +--DROP TABLE src SYNC; +--CREATE TABLE src (y String) ENGINE = MergeTree order by tuple(); +ALTER TABLE src ADD COLUMN y UInt8; +ALTER TABLE src DROP COLUMN x; + +INSERT INTO src VALUES (0); +SELECT * from dst; diff --git a/parser/testdata/03008_filter_projections_non_deterministoc_functions/ast.json b/parser/testdata/03008_filter_projections_non_deterministoc_functions/ast.json new file mode 100644 index 000000000..b51aa5ee7 --- /dev/null +++ b/parser/testdata/03008_filter_projections_non_deterministoc_functions/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery test (children 3)" + }, + { + "explain": " Identifier test" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration number (children 1)" + }, + { + "explain": " DataType UInt64" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Identifier number" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001437634, + "rows_read": 9, + "bytes_read": 308 + } +} diff --git a/parser/testdata/03008_filter_projections_non_deterministoc_functions/metadata.json b/parser/testdata/03008_filter_projections_non_deterministoc_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03008_filter_projections_non_deterministoc_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03008_filter_projections_non_deterministoc_functions/query.sql b/parser/testdata/03008_filter_projections_non_deterministoc_functions/query.sql new file mode 100644 index 000000000..3be9bc398 --- /dev/null +++ b/parser/testdata/03008_filter_projections_non_deterministoc_functions/query.sql @@ -0,0 +1,28 @@ +create table test (number UInt64) engine=MergeTree order by number; +system stop merges test; +INSERT INTO test select number from numbers(100000); +INSERT INTO test select number from numbers(100000); +INSERT INTO test select number from numbers(100000); +INSERT INTO test select number from numbers(100000); +INSERT INTO test select number from numbers(100000); +INSERT INTO test select number from numbers(100000); +INSERT INTO test select number from numbers(100000); +INSERT INTO test select number from numbers(100000); +INSERT INTO test select number from numbers(100000); +INSERT INTO test select number from numbers(100000); + +select '-- count'; +SELECT count(), _part FROM test GROUP BY _part ORDER BY _part; + +select '-- rand()%2=0:'; +SELECT count() > 0 AND count() < 100000, _part FROM test WHERE rand(1)%2=1 GROUP BY _part ORDER BY _part; + +select '-- optimize_use_implicit_projections=0'; +SELECT count() > 0 AND count() < 100000, _part FROM test WHERE rand(2)%2=1 GROUP BY _part ORDER BY _part settings optimize_use_implicit_projections=0; + +select '-- optimize_trivial_count_query=0'; +SELECT count() > 0 AND count() < 100000, _part FROM test WHERE rand(3)%2=1 GROUP BY _part ORDER BY _part settings optimize_trivial_count_query=0; + +select '-- optimize_trivial_count_query=0, optimize_use_implicit_projections=0'; +SELECT count() > 0 AND count() < 100000, _part FROM test WHERE rand(4)%2=1 GROUP BY _part ORDER BY _part settings optimize_trivial_count_query=0,optimize_use_implicit_projections=0; + diff --git a/parser/testdata/03008_groupSortedArray_field/ast.json b/parser/testdata/03008_groupSortedArray_field/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03008_groupSortedArray_field/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03008_groupSortedArray_field/metadata.json b/parser/testdata/03008_groupSortedArray_field/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03008_groupSortedArray_field/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03008_groupSortedArray_field/query.sql b/parser/testdata/03008_groupSortedArray_field/query.sql new file mode 100644 index 000000000..6d2aea641 --- /dev/null +++ b/parser/testdata/03008_groupSortedArray_field/query.sql @@ -0,0 +1,6 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/61186 +SELECT hex(CAST(unhex('0A01003C79A557B3C43400C4865AA84C3B4B01000650BC18F7DE0B00FAAF43E708213401008ED706EA0A9F13007228F915F5602C0100C692CA8FB81405003A6D357047EB1A01008416B7C3239EE3FF7BE9483CDC61DC01003E133A7C081AF5FFC1ECC583F7E5EA01000000000000000000000000000000000100C4865AA84C3BCBFF3B79A557B3C4B4010024C46EF500F1ECFFDB3B910AFF0ED301005E2FC14EBAEAE5FFA1D03EB14515DA'), + 'AggregateFunction(groupArraySorted(10), Decimal(38, 38))')); + +Select hex(groupArraySortedState(10)((number < 3 ? NULL : number)::Nullable(Decimal(3))) as t), toTypeName(t) from numbers(10); +Select finalizeAggregation(unhex('070109000000010600000001080000000103000000010500000001040000000107000000')::AggregateFunction(groupArraySorted(10), Nullable(Decimal(3, 0)))); diff --git a/parser/testdata/03008_index_small/ast.json b/parser/testdata/03008_index_small/ast.json new file mode 100644 index 000000000..3433209c6 --- /dev/null +++ b/parser/testdata/03008_index_small/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001020857, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/03008_index_small/metadata.json b/parser/testdata/03008_index_small/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03008_index_small/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03008_index_small/query.sql b/parser/testdata/03008_index_small/query.sql new file mode 100644 index 000000000..72213ed4f --- /dev/null +++ b/parser/testdata/03008_index_small/query.sql @@ -0,0 +1,19 @@ +DROP TABLE IF EXISTS test; + +CREATE TABLE test (a UInt8, b UInt8) ENGINE = MergeTree ORDER BY (a, b) +SETTINGS index_granularity = 1, primary_key_ratio_of_unique_prefix_values_to_skip_suffix_columns = 0.01; + +SET optimize_move_to_prewhere = 0; + +INSERT INTO test +SELECT number DIV 2, number +FROM numbers(3); + +SELECT count() FROM test WHERE b >= 0; + +DETACH TABLE test; +ATTACH TABLE test; + +SELECT count() FROM test WHERE b >= 0; + +DROP TABLE test; diff --git a/parser/testdata/03008_optimize_equal_ranges/ast.json b/parser/testdata/03008_optimize_equal_ranges/ast.json new file mode 100644 index 000000000..8177d89cf --- /dev/null +++ b/parser/testdata/03008_optimize_equal_ranges/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_optimize_equal_ranges (children 1)" + }, + { + "explain": " Identifier t_optimize_equal_ranges" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001276364, + "rows_read": 2, + "bytes_read": 98 + } +} diff --git a/parser/testdata/03008_optimize_equal_ranges/metadata.json b/parser/testdata/03008_optimize_equal_ranges/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03008_optimize_equal_ranges/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03008_optimize_equal_ranges/query.sql b/parser/testdata/03008_optimize_equal_ranges/query.sql new file mode 100644 index 000000000..73fffafbf --- /dev/null +++ b/parser/testdata/03008_optimize_equal_ranges/query.sql @@ -0,0 +1,30 @@ +DROP TABLE IF EXISTS t_optimize_equal_ranges; + +CREATE TABLE t_optimize_equal_ranges (a UInt64, b String, c UInt64) ENGINE = MergeTree ORDER BY a; + +SET max_block_size = 1024; +SET max_bytes_before_external_group_by = 0; +SET max_bytes_ratio_before_external_group_by = 0; +SET optimize_aggregation_in_order = 0; +SET optimize_use_projections = 0; + +INSERT INTO t_optimize_equal_ranges SELECT 0, toString(number), number FROM numbers(30000); +INSERT INTO t_optimize_equal_ranges SELECT 1, toString(number), number FROM numbers(30000); +INSERT INTO t_optimize_equal_ranges SELECT 2, toString(number), number FROM numbers(30000); + +SELECT a, uniqExact(b) FROM t_optimize_equal_ranges GROUP BY a ORDER BY a SETTINGS max_threads = 16; +SELECT a, uniqExact(b) FROM t_optimize_equal_ranges GROUP BY a ORDER BY a SETTINGS max_threads = 1; +SELECT a, sum(c) FROM t_optimize_equal_ranges GROUP BY a ORDER BY a SETTINGS max_threads = 16; +SELECT a, sum(c) FROM t_optimize_equal_ranges GROUP BY a ORDER BY a SETTINGS max_threads = 1; + +SYSTEM FLUSH LOGS query_log; + +SELECT + used_aggregate_functions[1] AS func, + Settings['max_threads'] AS threads, + ProfileEvents['AggregationOptimizedEqualRangesOfKeys'] > 0 +FROM system.query_log +WHERE type = 'QueryFinish' AND current_database = currentDatabase() AND query LIKE '%SELECT%FROM%t_optimize_equal_ranges%' +ORDER BY func, threads; + +DROP TABLE t_optimize_equal_ranges; diff --git a/parser/testdata/03008_uniq_exact_equal_ranges/ast.json b/parser/testdata/03008_uniq_exact_equal_ranges/ast.json new file mode 100644 index 000000000..5fa74b4c0 --- /dev/null +++ b/parser/testdata/03008_uniq_exact_equal_ranges/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_uniq_exact (children 1)" + }, + { + "explain": " Identifier t_uniq_exact" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001622475, + "rows_read": 2, + "bytes_read": 76 + } +} diff --git a/parser/testdata/03008_uniq_exact_equal_ranges/metadata.json b/parser/testdata/03008_uniq_exact_equal_ranges/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03008_uniq_exact_equal_ranges/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03008_uniq_exact_equal_ranges/query.sql b/parser/testdata/03008_uniq_exact_equal_ranges/query.sql new file mode 100644 index 000000000..fe73a068b --- /dev/null +++ b/parser/testdata/03008_uniq_exact_equal_ranges/query.sql @@ -0,0 +1,37 @@ +DROP TABLE IF EXISTS t_uniq_exact; + +CREATE TABLE t_uniq_exact (a UInt64, b String, c UInt64) ENGINE = MergeTree ORDER BY a; + +SET group_by_two_level_threshold_bytes = 1; +SET group_by_two_level_threshold = 1; +SET max_threads = 4; +SET max_bytes_before_external_group_by = 0; +SET max_bytes_ratio_before_external_group_by = 0; +SET optimize_aggregation_in_order = 0; + +INSERT INTO t_uniq_exact SELECT 0, randomPrintableASCII(5), rand() FROM numbers(300000); +INSERT INTO t_uniq_exact SELECT 1, randomPrintableASCII(5), rand() FROM numbers(300000); +INSERT INTO t_uniq_exact SELECT 2, randomPrintableASCII(5), rand() FROM numbers(300000); +INSERT INTO t_uniq_exact SELECT 3, randomPrintableASCII(5), rand() FROM numbers(300000); +INSERT INTO t_uniq_exact SELECT 4, randomPrintableASCII(5), rand() FROM numbers(300000); +INSERT INTO t_uniq_exact SELECT 5, randomPrintableASCII(5), rand() FROM numbers(300000); +INSERT INTO t_uniq_exact SELECT 6, randomPrintableASCII(5), rand() FROM numbers(300000); +INSERT INTO t_uniq_exact SELECT 7, randomPrintableASCII(5), rand() FROM numbers(300000); +INSERT INTO t_uniq_exact SELECT 8, randomPrintableASCII(5), rand() FROM numbers(300000); +INSERT INTO t_uniq_exact SELECT 9, randomPrintableASCII(5), rand() FROM numbers(300000); + +OPTIMIZE TABLE t_uniq_exact FINAL; + +SELECT a, uniqExact(b) FROM t_uniq_exact GROUP BY a ORDER BY a +SETTINGS min_hit_rate_to_use_consecutive_keys_optimization = 1.0 +EXCEPT +SELECT a, uniqExact(b) FROM t_uniq_exact GROUP BY a ORDER BY a +SETTINGS min_hit_rate_to_use_consecutive_keys_optimization = 0.5; + +SELECT a, sum(c) FROM t_uniq_exact GROUP BY a ORDER BY a +SETTINGS min_hit_rate_to_use_consecutive_keys_optimization = 1.0 +EXCEPT +SELECT a, sum(c) FROM t_uniq_exact GROUP BY a ORDER BY a +SETTINGS min_hit_rate_to_use_consecutive_keys_optimization = 0.5; + +DROP TABLE t_uniq_exact; diff --git a/parser/testdata/03009_consecutive_keys_nullable/ast.json b/parser/testdata/03009_consecutive_keys_nullable/ast.json new file mode 100644 index 000000000..160eba06a --- /dev/null +++ b/parser/testdata/03009_consecutive_keys_nullable/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_nullable_keys_1 (children 1)" + }, + { + "explain": " Identifier t_nullable_keys_1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001313874, + "rows_read": 2, + "bytes_read": 86 + } +} diff --git a/parser/testdata/03009_consecutive_keys_nullable/metadata.json b/parser/testdata/03009_consecutive_keys_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03009_consecutive_keys_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03009_consecutive_keys_nullable/query.sql b/parser/testdata/03009_consecutive_keys_nullable/query.sql new file mode 100644 index 000000000..bd1a34e0a --- /dev/null +++ b/parser/testdata/03009_consecutive_keys_nullable/query.sql @@ -0,0 +1,56 @@ +DROP TABLE IF EXISTS t_nullable_keys_1; + +CREATE TABLE t_nullable_keys_1 (x Nullable(Int64)) ENGINE = Memory; +INSERT INTO t_nullable_keys_1 VALUES (1), (1), (NULL); +SELECT x, count(), countIf(x IS NULL) FROM t_nullable_keys_1 GROUP BY x ORDER BY x; + +DROP TABLE t_nullable_keys_1; + +DROP TABLE IF EXISTS t_nullable_keys_2; + +CREATE TABLE t_nullable_keys_2 (x Nullable(Int64)) ENGINE = Memory; +INSERT INTO t_nullable_keys_2 VALUES (NULL), (1), (1); +SELECT x, count(), countIf(x IS NULL) FROM t_nullable_keys_2 GROUP BY x ORDER BY x; + +DROP TABLE t_nullable_keys_2; + +DROP TABLE IF EXISTS t_nullable_keys_3; + +CREATE TABLE t_nullable_keys_3 (x Nullable(Int64)) ENGINE = Memory; +INSERT INTO t_nullable_keys_3 VALUES (NULL), (NULL), (NULL); +SELECT x, count(), countIf(x IS NULL) FROM t_nullable_keys_3 GROUP BY x ORDER BY x; + +DROP TABLE t_nullable_keys_3; + +DROP TABLE IF EXISTS t_nullable_keys_4; + +CREATE TABLE t_nullable_keys_4 (x Nullable(Int64)) ENGINE = Memory; +INSERT INTO t_nullable_keys_4 VALUES (1), (1), (1); +SELECT x, count(), countIf(x IS NULL) FROM t_nullable_keys_4 GROUP BY x ORDER BY x; + +DROP TABLE t_nullable_keys_4; + +DROP TABLE IF EXISTS t_nullable_keys_5; + +CREATE TABLE t_nullable_keys_5 (x Nullable(Int64)) ENGINE = Memory; +INSERT INTO t_nullable_keys_5 VALUES (1), (NULL), (1); +SELECT x, count(), countIf(x IS NULL) FROM t_nullable_keys_5 GROUP BY x ORDER BY x; + +DROP TABLE t_nullable_keys_5; + +DROP TABLE IF EXISTS t_nullable_keys_6; + +CREATE TABLE t_nullable_keys_6 (x Nullable(Int64)) ENGINE = Memory; +INSERT INTO t_nullable_keys_6 VALUES (NULL), (1), (NULL); +SELECT x, count(), countIf(x IS NULL) FROM t_nullable_keys_6 GROUP BY x ORDER BY x; + +DROP TABLE t_nullable_keys_6; + +SYSTEM FLUSH LOGS query_log; + +SELECT + splitByChar('.', tables[1])[2] AS table, + ProfileEvents['AggregationOptimizedEqualRangesOfKeys'] > 0 +FROM system.query_log +WHERE type = 'QueryFinish' AND current_database = currentDatabase() AND query LIKE '%SELECT%FROM%t_nullable_keys_%' +ORDER BY table; diff --git a/parser/testdata/03009_range_dict_get_or_default/ast.json b/parser/testdata/03009_range_dict_get_or_default/ast.json new file mode 100644 index 000000000..8358b3914 --- /dev/null +++ b/parser/testdata/03009_range_dict_get_or_default/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery range_dictionary (children 1)" + }, + { + "explain": " Identifier range_dictionary" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001668591, + "rows_read": 2, + "bytes_read": 84 + } +} diff --git a/parser/testdata/03009_range_dict_get_or_default/metadata.json b/parser/testdata/03009_range_dict_get_or_default/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03009_range_dict_get_or_default/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03009_range_dict_get_or_default/query.sql b/parser/testdata/03009_range_dict_get_or_default/query.sql new file mode 100644 index 000000000..1f4b4073b --- /dev/null +++ b/parser/testdata/03009_range_dict_get_or_default/query.sql @@ -0,0 +1,34 @@ +DROP DICTIONARY IF EXISTS range_dictionary; +DROP TABLE IF EXISTS range_dictionary_nullable_source_table; + + +CREATE TABLE range_dictionary_nullable_source_table +( + key UInt64, + start_date Date, + end_date Date, + value Nullable(UInt64) +) +ENGINE = TinyLog; + +INSERT INTO range_dictionary_nullable_source_table VALUES (0, toDate('2019-05-05'), toDate('2019-05-20'), 0), (1, toDate('2019-05-05'), toDate('2019-05-20'), NULL); + +CREATE DICTIONARY range_dictionary +( + key UInt64, + start_date Date, + end_date Date, + value Nullable(UInt64) DEFAULT NULL +) +PRIMARY KEY key +SOURCE(CLICKHOUSE(HOST 'localhost' PORT tcpPort() TABLE 'range_dictionary_nullable_source_table')) +LIFETIME(MIN 1 MAX 1000) +LAYOUT(RANGE_HASHED()) +RANGE(MIN start_date MAX end_date); + +SELECT dictGetOrDefault('range_dictionary', 'value', toUInt64(2), toDate(toLowCardinality(materialize('2019-05-15'))), 2); + + +DROP DICTIONARY IF EXISTS range_dictionary; +DROP TABLE IF EXISTS range_dictionary_nullable_source_table; + diff --git a/parser/testdata/03009_storage_memory_circ_buffer_usage/ast.json b/parser/testdata/03009_storage_memory_circ_buffer_usage/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03009_storage_memory_circ_buffer_usage/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03009_storage_memory_circ_buffer_usage/metadata.json b/parser/testdata/03009_storage_memory_circ_buffer_usage/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03009_storage_memory_circ_buffer_usage/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03009_storage_memory_circ_buffer_usage/query.sql b/parser/testdata/03009_storage_memory_circ_buffer_usage/query.sql new file mode 100644 index 000000000..1324e1d0d --- /dev/null +++ b/parser/testdata/03009_storage_memory_circ_buffer_usage/query.sql @@ -0,0 +1,64 @@ +-- Tags: memory-engine +SET max_block_size = 65409; -- Default value + +DROP TABLE IF EXISTS memory; +CREATE TABLE memory (i UInt32) ENGINE = Memory SETTINGS min_bytes_to_keep = 4096, max_bytes_to_keep = 16384; + +SELECT 'TESTING BYTES'; +/* 1. testing oldest block doesn't get deleted because of min-threshold */ +INSERT INTO memory SELECT * FROM numbers(0, 1600); +SELECT total_bytes FROM system.tables WHERE name = 'memory' and database = currentDatabase(); + +/* 2. adding block that doesn't get deleted */ +INSERT INTO memory SELECT * FROM numbers(1000, 100); +SELECT total_bytes FROM system.tables WHERE name = 'memory' and database = currentDatabase(); + +/* 3. testing oldest block gets deleted - 9216 bytes - 1100 */ +INSERT INTO memory SELECT * FROM numbers(9000, 1000); +SELECT total_bytes FROM system.tables WHERE name = 'memory' and database = currentDatabase(); + +/* 4.check large block over-writes all bytes / rows */ +INSERT INTO memory SELECT * FROM numbers(9000, 10000); +SELECT total_bytes FROM system.tables WHERE name = 'memory' and database = currentDatabase(); + +DROP TABLE IF EXISTS memory; +CREATE TABLE memory (i UInt32) ENGINE = Memory SETTINGS min_rows_to_keep = 100, max_rows_to_keep = 1000; + +SELECT 'TESTING ROWS'; +/* 1. add normal number of rows */ +INSERT INTO memory SELECT * FROM numbers(0, 50); +SELECT total_rows FROM system.tables WHERE name = 'memory' and database = currentDatabase(); + +/* 2. table should have 1000 */ +INSERT INTO memory SELECT * FROM numbers(50, 950); +SELECT total_rows FROM system.tables WHERE name = 'memory' and database = currentDatabase(); + +/* 3. table should have 1020 - removed first 50 */ +INSERT INTO memory SELECT * FROM numbers(2000, 70); +SELECT total_rows FROM system.tables WHERE name = 'memory' and database = currentDatabase(); + +/* 4. check large block over-writes all rows */ +INSERT INTO memory SELECT * FROM numbers(3000, 1100); +SELECT total_rows FROM system.tables WHERE name = 'memory' and database = currentDatabase(); + +SELECT 'TESTING NO CIRCULAR-BUFFER'; +DROP TABLE IF EXISTS memory; +CREATE TABLE memory (i UInt32) ENGINE = Memory; + +INSERT INTO memory SELECT * FROM numbers(0, 1600); +SELECT total_bytes FROM system.tables WHERE name = 'memory' and database = currentDatabase(); + +INSERT INTO memory SELECT * FROM numbers(1000, 100); +SELECT total_bytes FROM system.tables WHERE name = 'memory' and database = currentDatabase(); + +INSERT INTO memory SELECT * FROM numbers(9000, 1000); +SELECT total_bytes FROM system.tables WHERE name = 'memory' and database = currentDatabase(); + +INSERT INTO memory SELECT * FROM numbers(9000, 10000); +SELECT total_bytes FROM system.tables WHERE name = 'memory' and database = currentDatabase(); + +SELECT 'TESTING INVALID SETTINGS'; +CREATE TABLE faulty_memory (i UInt32) ENGINE = Memory SETTINGS min_rows_to_keep = 100; -- { serverError SETTING_CONSTRAINT_VIOLATION } +CREATE TABLE faulty_memory (i UInt32) ENGINE = Memory SETTINGS min_bytes_to_keep = 100; -- { serverError SETTING_CONSTRAINT_VIOLATION } + +DROP TABLE memory; diff --git a/parser/testdata/03010_file_log_large_poll_batch_size/ast.json b/parser/testdata/03010_file_log_large_poll_batch_size/ast.json new file mode 100644 index 000000000..11e97358a --- /dev/null +++ b/parser/testdata/03010_file_log_large_poll_batch_size/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery test (children 3)" + }, + { + "explain": " Identifier test" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration number (children 1)" + }, + { + "explain": " DataType UInt64" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function FileLog (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '.\/user_files\/data.jsonl'" + }, + { + "explain": " Literal 'JSONEachRow'" + }, + { + "explain": " Set" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.000836003, + "rows_read": 12, + "bytes_read": 421 + } +} diff --git a/parser/testdata/03010_file_log_large_poll_batch_size/metadata.json b/parser/testdata/03010_file_log_large_poll_batch_size/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03010_file_log_large_poll_batch_size/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03010_file_log_large_poll_batch_size/query.sql b/parser/testdata/03010_file_log_large_poll_batch_size/query.sql new file mode 100644 index 000000000..2663011f2 --- /dev/null +++ b/parser/testdata/03010_file_log_large_poll_batch_size/query.sql @@ -0,0 +1,2 @@ +create table test (number UInt64) engine=FileLog('./user_files/data.jsonl', 'JSONEachRow') settings poll_max_batch_size=18446744073709; -- {serverError INVALID_SETTING_VALUE} + diff --git a/parser/testdata/03010_read_system_parts_table_test/ast.json b/parser/testdata/03010_read_system_parts_table_test/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03010_read_system_parts_table_test/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03010_read_system_parts_table_test/metadata.json b/parser/testdata/03010_read_system_parts_table_test/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03010_read_system_parts_table_test/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03010_read_system_parts_table_test/query.sql b/parser/testdata/03010_read_system_parts_table_test/query.sql new file mode 100644 index 000000000..1f72c2a3d --- /dev/null +++ b/parser/testdata/03010_read_system_parts_table_test/query.sql @@ -0,0 +1,13 @@ +-- Tags: no-shared-merge-tree +-- The parts might named differently with SMT +DROP TABLE IF EXISTS users; +CREATE TABLE users (uid Int16, name String, age Int16) ENGINE=MergeTree ORDER BY uid PARTITION BY uid; + +INSERT INTO users VALUES (1231, 'John', 33); +INSERT INTO users VALUES (6666, 'Ksenia', 48); + +SELECT uuid, name from system.parts WHERE database = currentDatabase() AND table = 'users'; + +SELECT uuid, name, table from system.parts WHERE database = currentDatabase() AND table = 'users' AND uuid = '00000000-0000-0000-0000-000000000000'; +SELECT uuid, name, table, column from system.parts_columns WHERE database = currentDatabase() AND table = 'users' AND uuid = '00000000-0000-0000-0000-000000000000'; +DROP TABLE IF EXISTS users; diff --git a/parser/testdata/03010_sum_to_to_count_if_nullable/ast.json b/parser/testdata/03010_sum_to_to_count_if_nullable/ast.json new file mode 100644 index 000000000..aebf6bad7 --- /dev/null +++ b/parser/testdata/03010_sum_to_to_count_if_nullable/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001302106, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03010_sum_to_to_count_if_nullable/metadata.json b/parser/testdata/03010_sum_to_to_count_if_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03010_sum_to_to_count_if_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03010_sum_to_to_count_if_nullable/query.sql b/parser/testdata/03010_sum_to_to_count_if_nullable/query.sql new file mode 100644 index 000000000..5ec6ee5a9 --- /dev/null +++ b/parser/testdata/03010_sum_to_to_count_if_nullable/query.sql @@ -0,0 +1,14 @@ +SET optimize_rewrite_sum_if_to_count_if = 1; + +SET enable_analyzer = 0; +SELECT (sumIf(toNullable(1), (number % 2) = 0), NULL) FROM numbers(10); +SELECT (sum(if((number % 2) = 0, toNullable(1), 0)), NULL) FROM numbers(10); +SELECT (tuple(sum(if((number % 2) = 0, toNullable(0), 123)) IGNORE NULLS), toUInt8(3)) FROM numbers(100); + +SET enable_analyzer = 1; +SELECT (sumIf(toNullable(1), (number % 2) = 0), NULL) FROM numbers(10); +EXPLAIN QUERY TREE SELECT (sumIf(toNullable(1), (number % 2) = 0), NULL) FROM numbers(10); +SELECT (sum(if((number % 2) = 0, toNullable(1), 0)), NULL) FROM numbers(10); +EXPLAIN QUERY TREE SELECT (sum(if((number % 2) = 0, toNullable(1), 0)), NULL) FROM numbers(10); +SELECT (tuple(sum(if((number % 2) = 0, toNullable(0), 123)) IGNORE NULLS), toUInt8(3)) FROM numbers(100); +EXPLAIN QUERY TREE SELECT (tuple(sum(if((number % 2) = 0, toNullable(0), 123)) IGNORE NULLS), toUInt8(3)) FROM numbers(100); diff --git a/parser/testdata/03010_view_prewhere_in/ast.json b/parser/testdata/03010_view_prewhere_in/ast.json new file mode 100644 index 000000000..ccbdf55f9 --- /dev/null +++ b/parser/testdata/03010_view_prewhere_in/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery v (children 1)" + }, + { + "explain": " Identifier v" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001475692, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/03010_view_prewhere_in/metadata.json b/parser/testdata/03010_view_prewhere_in/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03010_view_prewhere_in/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03010_view_prewhere_in/query.sql b/parser/testdata/03010_view_prewhere_in/query.sql new file mode 100644 index 000000000..799c07f31 --- /dev/null +++ b/parser/testdata/03010_view_prewhere_in/query.sql @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS v; +CREATE VIEW v (`date` UInt32,`value` UInt8) AS +WITH + data AS (SELECT '' id LIMIT 0), + r AS (SELECT'' as id, 1::UInt8 as value) +SELECT + now() as date, + value AND (data.id IN (SELECT '' as d from system.one)) AS value +FROM data + LEFT JOIN r ON data.id = r.id; + +SELECT 1; +SELECT date, value FROM v; +SELECT 2; +SELECT date, value FROM v ORDER BY date; +SELECT 3; +DROP TABLE v; diff --git a/parser/testdata/03010_virtual_memory_mappings_asynchronous_metrics/ast.json b/parser/testdata/03010_virtual_memory_mappings_asynchronous_metrics/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03010_virtual_memory_mappings_asynchronous_metrics/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03010_virtual_memory_mappings_asynchronous_metrics/metadata.json b/parser/testdata/03010_virtual_memory_mappings_asynchronous_metrics/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03010_virtual_memory_mappings_asynchronous_metrics/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03010_virtual_memory_mappings_asynchronous_metrics/query.sql b/parser/testdata/03010_virtual_memory_mappings_asynchronous_metrics/query.sql new file mode 100644 index 000000000..e9aced83d --- /dev/null +++ b/parser/testdata/03010_virtual_memory_mappings_asynchronous_metrics/query.sql @@ -0,0 +1,4 @@ +-- Tags: no-replicated-database + +SELECT least(value, 0) FROM system.asynchronous_metrics WHERE metric = 'VMMaxMapCount'; +SELECT least(value, 0) FROM system.asynchronous_metrics WHERE metric = 'VMNumMaps'; diff --git a/parser/testdata/03011_adaptative_timeout_compatibility/ast.json b/parser/testdata/03011_adaptative_timeout_compatibility/ast.json new file mode 100644 index 000000000..0bc8c35f6 --- /dev/null +++ b/parser/testdata/03011_adaptative_timeout_compatibility/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier value" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.settings" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier name" + }, + { + "explain": " Literal 'async_insert_use_adaptive_busy_timeout'" + }, + { + "explain": " Set" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.002840925, + "rows_read": 14, + "bytes_read": 538 + } +} diff --git a/parser/testdata/03011_adaptative_timeout_compatibility/metadata.json b/parser/testdata/03011_adaptative_timeout_compatibility/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03011_adaptative_timeout_compatibility/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03011_adaptative_timeout_compatibility/query.sql b/parser/testdata/03011_adaptative_timeout_compatibility/query.sql new file mode 100644 index 000000000..cdd2da0f6 --- /dev/null +++ b/parser/testdata/03011_adaptative_timeout_compatibility/query.sql @@ -0,0 +1 @@ +SELECT value from system.settings where name = 'async_insert_use_adaptive_busy_timeout' SETTINGS compatibility = '23.12'; diff --git a/parser/testdata/03011_definitive_guide_to_cast/ast.json b/parser/testdata/03011_definitive_guide_to_cast/ast.json new file mode 100644 index 000000000..cb9878261 --- /dev/null +++ b/parser/testdata/03011_definitive_guide_to_cast/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001498367, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03011_definitive_guide_to_cast/metadata.json b/parser/testdata/03011_definitive_guide_to_cast/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03011_definitive_guide_to_cast/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03011_definitive_guide_to_cast/query.sql b/parser/testdata/03011_definitive_guide_to_cast/query.sql new file mode 100644 index 000000000..708db0adc --- /dev/null +++ b/parser/testdata/03011_definitive_guide_to_cast/query.sql @@ -0,0 +1,356 @@ +SET session_timezone = 'Europe/Amsterdam'; + +-- Type conversion functions and operators. + + +-- 1. SQL standard CAST operator: `CAST(value AS Type)`. + +SELECT CAST(123 AS String); + +-- It converts between various data types, including parameterized data types + +SELECT CAST(1234567890 AS DateTime('Europe/Amsterdam')); + +-- and composite data types: + +SELECT CAST('[1, 2, 3]' AS Array(UInt8)); + +-- Its return type depends on the setting `cast_keep_nullable`. If it is enabled, if the source argument type is Nullable, the resulting data type will be also Nullable, even if it is not written explicitly: + +SET cast_keep_nullable = 1; +SELECT CAST(x AS UInt8) AS y, toTypeName(y) FROM VALUES('x Nullable(String)', ('123'), ('NULL')); + +SET cast_keep_nullable = 0; +SELECT CAST(x AS UInt8) AS y, toTypeName(y) FROM VALUES('x Nullable(String)', ('123'), ('NULL')); -- { serverError CANNOT_PARSE_TEXT } + +-- There are various type conversion rules, some worth noting. + +-- Conversion between numeric types can involve implementation-defined overflow: + +SELECT CAST(257 AS UInt8); +SELECT CAST(-1 AS UInt8); + +-- Conversion from string acts like parsing, and for composite data types like Array, Tuple, it works in the same way as from the `Values` data format: + +SELECT CAST($$['Hello', 'wo\'rld\\']$$ AS Array(String)); + +-- ' +-- While for simple data types, it does not interpret escape sequences: + +SELECT arrayJoin(CAST($$['Hello', 'wo\'rld\\']$$ AS Array(String))) AS x, CAST($$wo\'rld\\$$ AS FixedString(9)) AS y; + +-- As conversion from String is similar to direct parsing rather than conversion from other types, +-- it can be stricter for numbers by not tolerating overflows in some cases: + +SELECT CAST(-123 AS UInt8), CAST(1234 AS UInt8); + +SELECT CAST('-123' AS UInt8); -- { serverError CANNOT_PARSE_NUMBER } + +-- In some cases it still allows overflows, but it is implementation defined: + +SELECT CAST('1234' AS UInt8); + +-- Parsing from a string does not tolerate extra whitespace characters: + +SELECT CAST(' 123' AS UInt8); -- { serverError CANNOT_PARSE_TEXT } +SELECT CAST('123 ' AS UInt8); -- { serverError CANNOT_PARSE_TEXT } + +-- But for composite data types, it involves a more featured parser, that takes care of whitespace inside the data structures: + +SELECT CAST('[ 123 ,456, ]' AS Array(UInt16)); + +-- Conversion from a floating point value to an integer will involve truncation towards zero: + +SELECT CAST(1.9, 'Int64'), CAST(-1.9, 'Int64'); + +-- Conversion from NULL into a non-Nullable type will throw an exception, as well as conversions from denormal floating point numbers (NaN, inf, -inf) to an integer, or conversion between arrays of different dimensions. + +-- However, you might find it amusing that an empty array of Nothing data type can be converted to arrays of any dimensions: + +SELECT [] AS x, CAST(x AS Array(Array(Array(Tuple(UInt64, String))))) AS y, toTypeName(x), toTypeName(y); + +-- Conversion between numbers and DateTime/Date data types interprets the number as the number of seconds/days from the Unix epoch, +-- where Unix epoch starts from 1970-01-01T00:00:00Z (the midnight of Gregorian year 1970 in UTC), +-- and the number of seconds don't count the coordination seconds, as in Unix. + +-- For example, it is 1 AM in Amsterdam: + +SELECT CAST(0 AS DateTime('Europe/Amsterdam')); + +-- The numbers can be fractional and negative (for DateTime64): + +SELECT CAST(1234567890.123456 AS DateTime64(6, 'Europe/Amsterdam')); +SELECT CAST(-0.111111 AS DateTime64(6, 'Europe/Amsterdam')); + +-- If the result does not fit in the range of the corresponding time data types, it is truncated and saturated to the boundaries: + +SELECT CAST(1234567890.123456 AS DateTime('Europe/Amsterdam')); +SELECT CAST(-1 AS DateTime('Europe/Amsterdam')); + +SELECT CAST(1e20 AS DateTime64(6, 'Europe/Amsterdam')); + +-- A special case is DateTime64(9) - the maximum resolution, where is does not cover the usual range, +-- and in this case, it throws an exception on overflow (I don't mind if we change this behavior in the future): + + SELECT CAST(1e20 AS DateTime64(9, 'Europe/Amsterdam')); -- { serverError DECIMAL_OVERFLOW } + +-- If a number is converted to a Date data type, the value is interpreted as the number of days since the Unix epoch, +-- but if the number is larger than the range of the data type, it is interpreted as a unix timestamp +-- (the number of seconds since the Unix epoch), similarly how it is done for the DateTime data type, +-- for convenience (while the internal representation of Date is the number of days, +-- often people want the unix timestamp to be also parsed into the Date data type): + +SELECT CAST(14289 AS Date); +SELECT CAST(1234567890 AS Date); + +-- When converting to a FixedString, if the length of the result data type is larger than the value, the result is padded with zero bytes: + +SELECT CAST('123' AS FixedString(5)) FORMAT TSV; + +-- But if it does not fit, an exception is thrown: + +SELECT CAST('12345' AS FixedString(3)) FORMAT TSV; -- { serverError TOO_LARGE_STRING_SIZE } + +-- The operator is case-insensitive: + +SELECT CAST(123 AS String); +SELECT cast(123 AS String); +SELECT Cast(123 AS String); + + +-- 2. The functional form of this operator: `CAST(value, 'Type')`: + +SELECT CAST(123, 'String'); + +-- This form is equivalent. Keep in mind that the type has to be a constant expression: + +SELECT CAST(123, 'Str'||'ing'); -- this works. + +-- This does not work: SELECT materialize('String') AS type, CAST(123, type); + +-- It is also case-insensitive: + +SELECT CasT(123, 'String'); + +-- The functional form exists for the consistency of implementation (as every operator also exists in the functional form and the functional form is represented in the query's Abstract Syntax Tree). Anyway, the functional form also makes sense for users, when they need to construct a data type name from a constant expression, or when they want to generate a query programmatically. + +-- It's worth noting that the operator form does not allow to specify the type name as a string literal: + +-- This does not work: SELECT CAST(123 AS 'String'); + +-- By only allowing it as an identifier, either bare word: + +SELECT CAST(123 AS String); + +-- Or as a MySQL or PostgreSQL quoted identifiers: + +SELECT CAST(123 AS `String`); +SELECT CAST(123 AS "String"); + +-- While the functional form only allows the type name as a string literal: + +SELECT CAST(123, 'String'); -- works +SELECT CAST(123, String); -- { serverError UNKNOWN_IDENTIFIER } + +-- However, you can cheat: + +SELECT 'String' AS String, CAST(123, String); + + +-- 3. The internal function `_CAST` which is different from `CAST` only by being not dependent on the value of `cast_keep_nullable` setting and other settings. + +-- This is needed when ClickHouse has to persist an expression for future use, like in table definitions, including primary and partition keys and other indices. + +-- The function is not intended to be used directly. When a user uses a regular `CAST` operator or function in a table definition, it is transparently converted to `_CAST` to persist its behavior. However, the user can still use the internal version directly: + +SELECT _CAST(x, 'UInt8') AS y, toTypeName(y) FROM VALUES('x Nullable(String)', ('123'), ('456')); + +-- There is no operator form of this function: + +-- does not work, here UInt8 is interpreted as an alias for the value: +SELECT _CAST(123 AS UInt8); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +SELECT CAST(123 AS UInt8); -- works + + +-- 4. PostgreSQL-style cast syntax `::` + +SELECT 123::String; + +-- It has a difference from the `CAST` operator: if it is applied to a simple literal value, instead of performing a type conversion, it invokes the SQL parser directly on the corresponding text fragment of the query. The most important case will be the floating-point and decimal types. + +-- In this example, we parse `1.1` as Decimal and do not involve any type conversion: + +SELECT 1.1::Decimal(30, 20); + +-- In this example, `1.1` is first parsed as usual, yielding a Float64 value, and then converted to Decimal, producing a wrong result: + +SELECT CAST(1.1 AS Decimal(30, 20)); + +-- We can change this behavior in the future. + +-- Another example: + +SELECT -1::UInt64; -- { serverError CANNOT_PARSE_NUMBER } + +SELECT CAST(-1 AS UInt64); -- conversion with overflow + +-- For composite data types, if a value is a literal, it is parsed directly: + +SELECT [1.1, 2.3]::Array(Decimal(30, 20)); + +-- But if the value contains expressions, the usage of the `::` operator will be equivalent to invoking the CAST operator on the expression: + +SELECT [1.1, 2.3 + 0]::Array(Decimal(30, 20)); + +-- The automatic column name for the result of an application of the `::` operator may be the same as for the result of an application of the CAST operator to a string containing the corresponding fragment of the query or to a corresponding expression: + +SELECT 1.1::Decimal(30, 20), CAST('1.1' AS Decimal(30, 20)), (1+1)::UInt8 FORMAT Vertical; + +-- The operator has the highest priority among others: + +SELECT 1-1::String; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +-- But one interesting example is the unary minus. Here the minus is not an operator but part of the numeric literal: + +SELECT -1::String; + +-- Here it is an operator: + +SELECT 1 AS x, -x::String; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + + +-- 5. Accurate casting functions: `accurateCast`, `accurateCastOrNull`, `accurateCastOrDefault`. + +-- These functions check if the value is exactly representable in the target data type. + +-- The function `accurateCast` performs the conversion or throws an exception if the value is not exactly representable: + +SELECT accurateCast(1.123456789, 'Float32'); -- { serverError CANNOT_CONVERT_TYPE } + +-- The function `accurateCastOrNull` always wraps the target type into Nullable, and returns NULL if the value is not exactly representable: + +SELECT accurateCastOrNull(1.123456789, 'Float32'); + +-- The function `accurateCastOrDefault` takes an additional parameter, which must be of the target type, and returns it if the value is not exactly representable: + +SELECT accurateCastOrDefault(-1, 'UInt64', 0::UInt64); + +-- If this parameter is omitted, it is assumed to be the default value of the corresponding data type: + +SELECT accurateCastOrDefault(-1, 'UInt64'); +SELECT accurateCastOrDefault(-1, 'DateTime'); + +-- Unfortunately, this does not work as expected: SELECT accurateCastOrDefault(-1, $$Enum8('None' = 1, 'Hello' = 2, 'World' = 3)$$); +-- https://github.com/ClickHouse/ClickHouse/issues/61495 + +-- These functions are case-sensitive, and there are no corresponding operators: + +SELECT ACCURATECAST(1, 'String'); -- { serverError UNKNOWN_FUNCTION }. + + +-- 6. Explicit conversion functions: + +-- `toString`, `toFixedString`, +-- `toUInt8`, `toUInt16`, `toUInt32`, `toUInt64`, `toUInt128`, `toUInt256`, +-- `toInt8`, `toInt16`, `toInt32`, `toInt64`, `toInt128`, `toInt256`, +-- `toFloat32`, `toFloat64`, +-- `toDecimal32`, `toDecimal64`, `toDecimal128`, `toDecimal256`, +-- `toDate`, `toDate32`, `toDateTime`, `toDateTime64`, +-- `toUUID`, `toIPv4`, `toIPv6`, +-- `toIntervalNanosecond`, `toIntervalMicrosecond`, `toIntervalMillisecond`, +-- `toIntervalSecond`, `toIntervalMinute`, `toIntervalHour`, +-- `toIntervalDay`, `toIntervalWeek`, `toIntervalMonth`, `toIntervalQuarter`, `toIntervalYear` + +-- These functions work under the same rules as the CAST operator and can be thought as elementary implementation parts of that operator. They allow implementation-defined overflow while converting between numeric types. + +SELECT toUInt8(-1); + +-- These are ClickHouse-native conversion functions. They take an argument with the input value, and for some of the data types (`FixedString`, `DateTime`, `DateTime64`, `Decimal`s), the subsequent arguments are constant expressions, defining the parameters of these data types, or the rules to interpret the source value. + +SELECT toFloat64(123); -- no arguments +SELECT toFixedString('Hello', 10) FORMAT TSV; -- the parameter of the FixedString data type, the function returns FixedString(10) +SELECT toFixedString('Hello', 5 + 5) FORMAT TSV; -- it can be a constant expression + +SELECT toDecimal32('123.456', 2); -- the scale of the Decimal data type + +SELECT toDateTime('2024-04-25 01:02:03', 'Europe/Amsterdam'); -- the time zone of DateTime +SELECT toDateTime64('2024-04-25 01:02:03', 6, 'Europe/Amsterdam'); -- the scale of DateTime64 and its time zone + +-- The length of FixedString and the scale of Decimal and DateTime64 types are mandatory arguments, while the time zone of the DateTime data type is optional. + +-- If the time zone is not specified, the time zone of the argument's data type is used, and if the argument is not a date time, the session time zone is used. + +SELECT toDateTime('2024-04-25 01:02:03'); +SELECT toDateTime64('2024-04-25 01:02:03.456789', 6); + +-- Here, the time zone can be specified as the rule of interpretation of the value during conversion: + +SELECT toString(1710612085::DateTime, 'America/Los_Angeles'); +SELECT toString(1710612085::DateTime); + +-- In the case when the time zone is not the part of the resulting data type, but a rule of interpretation of the source value, +-- it can be non-constant. Let's clarify: in this example, the resulting data type is a String; it does not have a time zone parameter: + +SELECT toString(1710612085::DateTime, tz) FROM Values('tz String', 'Europe/Amsterdam', 'America/Los_Angeles'); + +-- Functions converting to numeric types, date and datetime, IP and UUID, also have versions with -OrNull, -OrZero, and -OrDefault fallbacks, +-- that don't throw exceptions on parsing errors. +-- They use the same rules to the accurateCast operator: + +SELECT toUInt8OrNull('123'), toUInt8OrNull('-123'), toUInt8OrNull('1234'), toUInt8OrNull(' 123'); +SELECT toUInt8OrZero('123'), toUInt8OrZero('-123'), toUInt8OrZero('1234'), toUInt8OrZero(' 123'); +SELECT toUInt8OrDefault('123', 10), toUInt8OrDefault('-123', 10), toUInt8OrDefault('1234', 10), toUInt8OrDefault(' 123', 10); +SELECT toUInt8OrDefault('123'), toUInt8OrDefault('-123'), toUInt8OrDefault('1234'), toUInt8OrDefault(' 123'); + +SELECT toTypeName(toUInt8OrNull('123')), toTypeName(toUInt8OrZero('123')); + +-- These functions are only applicable to string data types. +-- Although it is a room for extension: + +SELECT toUInt8OrNull(123); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +-- String and FixedString work: + +SELECT toUInt8OrNull(123::FixedString(3)); + +-- For the FixedString data type trailing zero bytes are allowed, because they are the padding for FixedString: + +SELECT toUInt8OrNull('123'::FixedString(4)); +SELECT toUInt8OrNull('123\0'::FixedString(4)); + +-- While for String, they don't: + +SELECT toUInt8OrNull('123\0'); + + +-- 7. SQL-compatibility type-defining operators: + +SELECT DATE '2024-04-25', TIMESTAMP '2024-01-01 02:03:04', INTERVAL 1 MINUTE, INTERVAL '12 hour'; + +-- These operators are interpreted as the corresponding explicit conversion functions. + + +-- 8. SQL-compatibility aliases for explicit conversion functions: + +SELECT DATE('2024-04-25'), TIMESTAMP('2024-01-01 02:03:04'), FROM_UNIXTIME(1234567890); + +-- These functions exist for compatibility with MySQL. They are case-insensitive. + +SELECT date '2024-04-25', timeSTAMP('2024-01-01 02:03:04'), From_Unixtime(1234567890); + + +-- 9. Specialized conversion functions: + +-- `parseDateTimeBestEffort`, `parseDateTimeBestEffortUS`, `parseDateTime64BestEffort`, `parseDateTime64BestEffortUS`, `toUnixTimestamp` + +-- These functions are similar to explicit conversion functions but provide special rules on how the conversion is performed. + +SELECT parseDateTimeBestEffort('25 Apr 1986 1pm'); + + +-- 10. Functions for converting between different components or rounding of date and time data types. + +SELECT toDayOfMonth(toDateTime(1234567890)); + +-- These functions are covered in a separate topic. diff --git a/parser/testdata/03012_prewhere_merge_distributed/ast.json b/parser/testdata/03012_prewhere_merge_distributed/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03012_prewhere_merge_distributed/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03012_prewhere_merge_distributed/metadata.json b/parser/testdata/03012_prewhere_merge_distributed/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03012_prewhere_merge_distributed/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03012_prewhere_merge_distributed/query.sql b/parser/testdata/03012_prewhere_merge_distributed/query.sql new file mode 100644 index 000000000..81ec38109 --- /dev/null +++ b/parser/testdata/03012_prewhere_merge_distributed/query.sql @@ -0,0 +1,36 @@ +-- Tags: log-engine +DROP TABLE IF EXISTS test_local; +DROP TABLE IF EXISTS test_distributed; + +CREATE TABLE test_local ( name String, date Date, sign Int8 ) ENGINE MergeTree PARTITION BY date ORDER BY name SETTINGS index_granularity = 8192; + +CREATE TABLE test_distributed ( name String, date Date, sign Int8 ) ENGINE = Distributed('test_cluster_two_shards', currentDatabase(), test_local, rand64()); + +SET insert_distributed_sync = 1; + +INSERT INTO test_distributed (name, date, sign) VALUES ('1', '2024-01-01', 1),('2', '2024-01-02', 1),('3', '2024-01-03', 1),('4', '2024-01-04', 1),('5', '2024-01-05', 1),('6', '2024-01-06', 1),('7', '2024-01-07', 1),('8', '2024-01-08', 1),('9', '2024-01-09', 1),('10', '2024-01-10', 1),('11', '2024-01-11', 1); + +SELECT count() FROM test_distributed WHERE name GLOBAL IN ( SELECT name FROM test_distributed ); + +SET prefer_localhost_replica = 1; + +SELECT count() FROM merge(currentDatabase(), '^test_distributed$') WHERE name GLOBAL IN ( SELECT name FROM test_distributed ); +SELECT count() FROM merge(currentDatabase(), '^test_distributed$') PREWHERE name GLOBAL IN ( SELECT name FROM test_distributed ); + +SET prefer_localhost_replica = 0; + +SELECT count() FROM merge(currentDatabase(), '^test_distributed$') WHERE name GLOBAL IN ( SELECT name FROM test_distributed ); +SELECT count() FROM merge(currentDatabase(), '^test_distributed$') PREWHERE name GLOBAL IN ( SELECT name FROM test_distributed ); + +DROP TABLE test_local; +DROP TABLE test_distributed; + +DROP TABLE IF EXISTS test_log; + +CREATE TABLE test_log ( a int, b int ) ENGINE Log; + +INSERT INTO test_log values (1, 2); + +SELECT count() FROM merge(currentDatabase(), '^test_log$') PREWHERE a = 3; -- { serverError 182 } + +DROP TABLE test_log; diff --git a/parser/testdata/03013_addDays_with_timezone/ast.json b/parser/testdata/03013_addDays_with_timezone/ast.json new file mode 100644 index 000000000..0f441a4ec --- /dev/null +++ b/parser/testdata/03013_addDays_with_timezone/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function addDays (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function toDateTime64 (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal '2024-01-01'" + }, + { + "explain": " Literal UInt64_6" + }, + { + "explain": " Literal 'Asia\/Shanghai'" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Literal 'Asia\/Shanghai'" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001675744, + "rows_read": 13, + "bytes_read": 495 + } +} diff --git a/parser/testdata/03013_addDays_with_timezone/metadata.json b/parser/testdata/03013_addDays_with_timezone/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03013_addDays_with_timezone/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03013_addDays_with_timezone/query.sql b/parser/testdata/03013_addDays_with_timezone/query.sql new file mode 100644 index 000000000..eb822d538 --- /dev/null +++ b/parser/testdata/03013_addDays_with_timezone/query.sql @@ -0,0 +1 @@ +select addDays(toDateTime64('2024-01-01', 6, 'Asia/Shanghai'), 10, 'Asia/Shanghai'); diff --git a/parser/testdata/03013_fuzz_arrayPartialReverseSort/ast.json b/parser/testdata/03013_fuzz_arrayPartialReverseSort/ast.json new file mode 100644 index 000000000..73726d3f2 --- /dev/null +++ b/parser/testdata/03013_fuzz_arrayPartialReverseSort/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier res" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001603684, + "rows_read": 5, + "bytes_read": 175 + } +} diff --git a/parser/testdata/03013_fuzz_arrayPartialReverseSort/metadata.json b/parser/testdata/03013_fuzz_arrayPartialReverseSort/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03013_fuzz_arrayPartialReverseSort/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03013_fuzz_arrayPartialReverseSort/query.sql b/parser/testdata/03013_fuzz_arrayPartialReverseSort/query.sql new file mode 100644 index 000000000..f575ae952 --- /dev/null +++ b/parser/testdata/03013_fuzz_arrayPartialReverseSort/query.sql @@ -0,0 +1,19 @@ +SELECT res +FROM +( + SELECT + arrayPartialReverseSort(2, if(number % 2, emptyArrayUInt64(), range(number))) AS arr, + arrayResize(arr, if(empty(arr), 0, 2)) AS res + FROM system.numbers + LIMIT 7 +); + +SELECT res +FROM +( + SELECT + arrayPartialReverseSort(materialize(2), if(number % 2, emptyArrayUInt64(), range(number))) AS arr, + arrayResize(arr, if(empty(arr), 0, 2)) AS res + FROM system.numbers + LIMIT 7 +); diff --git a/parser/testdata/03013_group_by_use_nulls_with_materialize_and_analyzer/ast.json b/parser/testdata/03013_group_by_use_nulls_with_materialize_and_analyzer/ast.json new file mode 100644 index 000000000..39c7d0e84 --- /dev/null +++ b/parser/testdata/03013_group_by_use_nulls_with_materialize_and_analyzer/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001184005, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03013_group_by_use_nulls_with_materialize_and_analyzer/metadata.json b/parser/testdata/03013_group_by_use_nulls_with_materialize_and_analyzer/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03013_group_by_use_nulls_with_materialize_and_analyzer/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03013_group_by_use_nulls_with_materialize_and_analyzer/query.sql b/parser/testdata/03013_group_by_use_nulls_with_materialize_and_analyzer/query.sql new file mode 100644 index 000000000..7b57dbd80 --- /dev/null +++ b/parser/testdata/03013_group_by_use_nulls_with_materialize_and_analyzer/query.sql @@ -0,0 +1,10 @@ +set enable_analyzer = 1; +set group_by_use_nulls = 1; +set optimize_group_by_function_keys = 1; +set optimize_injective_functions_in_group_by = 1; + +SELECT 3 + 3 from numbers(10) GROUP BY GROUPING SETS (('str'), (3 + 3)) order by all; +SELECT materialize(3) from numbers(10) GROUP BY GROUPING SETS (('str'), (materialize(3))) order by all; +SELECT ignore(3) from numbers(10) GROUP BY GROUPING SETS (('str'), (ignore(3))) order by all; +SELECT materialize(ignore(3)) from numbers(10) GROUP BY GROUPING SETS (('str'), (materialize(ignore(3)))) order by all; +SELECT ignore(materialize(3)) from numbers(10) GROUP BY GROUPING SETS (('str'), (ignore(materialize(3)))) order by all; diff --git a/parser/testdata/03013_ignore_drop_queries_probability/ast.json b/parser/testdata/03013_ignore_drop_queries_probability/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03013_ignore_drop_queries_probability/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03013_ignore_drop_queries_probability/metadata.json b/parser/testdata/03013_ignore_drop_queries_probability/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03013_ignore_drop_queries_probability/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03013_ignore_drop_queries_probability/query.sql b/parser/testdata/03013_ignore_drop_queries_probability/query.sql new file mode 100644 index 000000000..0533dc7ad --- /dev/null +++ b/parser/testdata/03013_ignore_drop_queries_probability/query.sql @@ -0,0 +1,19 @@ +-- Tags: memory-engine +create table test_memory (number UInt64) engine=Memory; +insert into test_memory select 42; +drop table test_memory settings ignore_drop_queries_probability=1; +select * from test_memory; +drop table test_memory; + +create table test_merge_tree (number UInt64) engine=MergeTree order by number; +insert into test_merge_tree select 42; +drop table test_merge_tree settings ignore_drop_queries_probability=1; +select * from test_merge_tree; +drop table test_merge_tree; + +create table test_join (number UInt64) engine=Join(ALL, LEFT, number); +insert into test_join select 42; +drop table test_join settings ignore_drop_queries_probability=1; +select * from test_join; +drop table test_join; + diff --git a/parser/testdata/03013_position_const_start_pos/ast.json b/parser/testdata/03013_position_const_start_pos/ast.json new file mode 100644 index 000000000..a7e6dc62a --- /dev/null +++ b/parser/testdata/03013_position_const_start_pos/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery 03013_position_const_start_pos (children 3)" + }, + { + "explain": " Identifier 03013_position_const_start_pos" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration n (children 1)" + }, + { + "explain": " DataType Int16" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function Memory" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001510678, + "rows_read": 8, + "bytes_read": 324 + } +} diff --git a/parser/testdata/03013_position_const_start_pos/metadata.json b/parser/testdata/03013_position_const_start_pos/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03013_position_const_start_pos/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03013_position_const_start_pos/query.sql b/parser/testdata/03013_position_const_start_pos/query.sql new file mode 100644 index 000000000..0c6da694b --- /dev/null +++ b/parser/testdata/03013_position_const_start_pos/query.sql @@ -0,0 +1,3 @@ +CREATE TABLE 03013_position_const_start_pos (n Int16) ENGINE = Memory; +INSERT INTO 03013_position_const_start_pos SELECT * FROM generateRandom() LIMIT 1000; +SELECT position(concat(NULLIF(1, 1), materialize(3)), 'ca', 2) FROM 03013_position_const_start_pos FORMAT Null; diff --git a/parser/testdata/03013_repeat_with_nonnative_integers/ast.json b/parser/testdata/03013_repeat_with_nonnative_integers/ast.json new file mode 100644 index 000000000..d33fc25b1 --- /dev/null +++ b/parser/testdata/03013_repeat_with_nonnative_integers/ast.json @@ -0,0 +1,76 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function repeat (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function toUInt256 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_12" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 18, + + "statistics": + { + "elapsed": 0.001210639, + "rows_read": 18, + "bytes_read": 720 + } +} diff --git a/parser/testdata/03013_repeat_with_nonnative_integers/metadata.json b/parser/testdata/03013_repeat_with_nonnative_integers/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03013_repeat_with_nonnative_integers/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03013_repeat_with_nonnative_integers/query.sql b/parser/testdata/03013_repeat_with_nonnative_integers/query.sql new file mode 100644 index 000000000..0dbe98994 --- /dev/null +++ b/parser/testdata/03013_repeat_with_nonnative_integers/query.sql @@ -0,0 +1,4 @@ +SELECT repeat(toString(number), toUInt256(12)) FROM numbers(1); +SELECT repeat(toString(number), toUInt128(12)) FROM numbers(1); +SELECT repeat(toString(number), toInt256(12)) FROM numbers(1); +SELECT repeat(toString(number), toInt128(12)) FROM numbers(1); diff --git a/parser/testdata/03013_test_part_level_is_reset_attach_from_disk_mt/ast.json b/parser/testdata/03013_test_part_level_is_reset_attach_from_disk_mt/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03013_test_part_level_is_reset_attach_from_disk_mt/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03013_test_part_level_is_reset_attach_from_disk_mt/metadata.json b/parser/testdata/03013_test_part_level_is_reset_attach_from_disk_mt/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03013_test_part_level_is_reset_attach_from_disk_mt/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03013_test_part_level_is_reset_attach_from_disk_mt/query.sql b/parser/testdata/03013_test_part_level_is_reset_attach_from_disk_mt/query.sql new file mode 100644 index 000000000..eb05dfea8 --- /dev/null +++ b/parser/testdata/03013_test_part_level_is_reset_attach_from_disk_mt/query.sql @@ -0,0 +1,21 @@ +-- Tags: no-shared-merge-tree +SET alter_sync = 2; +-- {echoOn} +DROP TABLE IF EXISTS test; +CREATE TABLE test (a Int) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO test VALUES (1), (2), (3); +OPTIMIZE TABLE test FINAL; +SELECT part_name FROM system.parts where table='test' and active and database = currentDatabase(); +ALTER TABLE test DETACH PART 'all_1_1_1'; +ALTER TABLE test ATTACH PART 'all_1_1_1'; +SELECT part_name FROM system.parts where table='test' and active and database = currentDatabase(); + +-- Same as above, but with attach partition (different code path, should be tested as well) +DROP TABLE IF EXISTS test; +CREATE TABLE test (a Int) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO test VALUES (1), (2), (3); +OPTIMIZE TABLE test FINAL; +SELECT part_name FROM system.parts where table='test' and active and database = currentDatabase(); +ALTER TABLE test DETACH PART 'all_1_1_1'; +ALTER TABLE test ATTACH PARTITION tuple(); +SELECT part_name FROM system.parts where table='test' and active and database = currentDatabase(); diff --git a/parser/testdata/03014_analyzer_group_by_use_nulls/ast.json b/parser/testdata/03014_analyzer_group_by_use_nulls/ast.json new file mode 100644 index 000000000..ae34a8828 --- /dev/null +++ b/parser/testdata/03014_analyzer_group_by_use_nulls/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'a' (alias key)" + }, + { + "explain": " Literal 'b' (alias value)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier key" + }, + { + "explain": " Set" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001315628, + "rows_read": 9, + "bytes_read": 299 + } +} diff --git a/parser/testdata/03014_analyzer_group_by_use_nulls/metadata.json b/parser/testdata/03014_analyzer_group_by_use_nulls/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03014_analyzer_group_by_use_nulls/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03014_analyzer_group_by_use_nulls/query.sql b/parser/testdata/03014_analyzer_group_by_use_nulls/query.sql new file mode 100644 index 000000000..a1c302465 --- /dev/null +++ b/parser/testdata/03014_analyzer_group_by_use_nulls/query.sql @@ -0,0 +1 @@ +SELECT 'a' AS key, 'b' as value GROUP BY key WITH CUBE SETTINGS group_by_use_nulls = 1; diff --git a/parser/testdata/03014_analyzer_groupby_fuzz_60317/ast.json b/parser/testdata/03014_analyzer_groupby_fuzz_60317/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03014_analyzer_groupby_fuzz_60317/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03014_analyzer_groupby_fuzz_60317/metadata.json b/parser/testdata/03014_analyzer_groupby_fuzz_60317/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03014_analyzer_groupby_fuzz_60317/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03014_analyzer_groupby_fuzz_60317/query.sql b/parser/testdata/03014_analyzer_groupby_fuzz_60317/query.sql new file mode 100644 index 000000000..295f89c5a --- /dev/null +++ b/parser/testdata/03014_analyzer_groupby_fuzz_60317/query.sql @@ -0,0 +1,27 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/60317 +SELECT + toNullable(materialize(_CAST(30, 'LowCardinality(UInt8)'))) as a, + _CAST(30, 'LowCardinality(UInt8)') as b, + makeDate(materialize(_CAST(30, 'LowCardinality(UInt8)')), 10, _CAST(30, 'Nullable(UInt8)')) as c +FROM system.one +GROUP BY + _CAST(30, 'Nullable(UInt8)') +SETTINGS enable_analyzer = 1; + +-- WITH CUBE (note that result is different with the analyzer (analyzer is correct including all combinations) +SELECT + toNullable(toNullable(materialize(_CAST(30, 'LowCardinality(UInt8)')))) AS `toNullable(toNullable(materialize(toLowCardinality(30))))`, + _CAST(0, 'Date') AS `makeDate(-1980.1, -1980.1, 10)`, + _CAST(30, 'LowCardinality(UInt8)') AS `toLowCardinality(30)`, + 30 AS `30`, + makeDate(materialize(_CAST(30, 'LowCardinality(UInt8)')), 10, _CAST(30, 'Nullable(UInt8)')) AS `makeDate(materialize(toLowCardinality(30)), 10, toNullable(toNullable(30)))`, + -1980.1 AS `-1980.1` +FROM system.one AS __table1 +GROUP BY + _CAST(30, 'Nullable(UInt8)'), + -1980.1, + materialize(30), + _CAST(30, 'Nullable(UInt8)') +WITH CUBE +WITH TOTALS +SETTINGS enable_analyzer = 1; diff --git a/parser/testdata/03014_async_with_dedup_part_log_rmt/ast.json b/parser/testdata/03014_async_with_dedup_part_log_rmt/ast.json new file mode 100644 index 000000000..0f5686ddd --- /dev/null +++ b/parser/testdata/03014_async_with_dedup_part_log_rmt/ast.json @@ -0,0 +1,40 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery 03014_async_with_dedup_part_log (children 2)" + }, + { + "explain": " Identifier 03014_async_with_dedup_part_log" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration x (children 1)" + }, + { + "explain": " DataType UInt64" + } + ], + + "rows": 6, + + "statistics": + { + "elapsed": 0.001204565, + "rows_read": 6, + "bytes_read": 262 + } +} diff --git a/parser/testdata/03014_async_with_dedup_part_log_rmt/metadata.json b/parser/testdata/03014_async_with_dedup_part_log_rmt/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03014_async_with_dedup_part_log_rmt/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03014_async_with_dedup_part_log_rmt/query.sql b/parser/testdata/03014_async_with_dedup_part_log_rmt/query.sql new file mode 100644 index 000000000..23ddc642c --- /dev/null +++ b/parser/testdata/03014_async_with_dedup_part_log_rmt/query.sql @@ -0,0 +1,24 @@ +CREATE TABLE 03014_async_with_dedup_part_log (x UInt64) +ENGINE=ReplicatedMergeTree('/clickhouse/table/{database}/03014_async_with_dedup_part_log', 'r1') ORDER BY tuple(); + +SET async_insert = 1; +SET wait_for_async_insert = 1; +SET async_insert_deduplicate = 1; + +SELECT '-- Inserted part --'; +INSERT INTO 03014_async_with_dedup_part_log VALUES (2); + +SYSTEM FLUSH LOGS part_log; +SELECT error, count() FROM system.part_log +WHERE table = '03014_async_with_dedup_part_log' AND database = currentDatabase() AND event_type = 'NewPart' +GROUP BY error +ORDER BY error; + +SELECT '-- Deduplicated part --'; +INSERT INTO 03014_async_with_dedup_part_log VALUES (2); + +SYSTEM FLUSH LOGS part_log; +SELECT error, count() FROM system.part_log +WHERE table = '03014_async_with_dedup_part_log' AND database = currentDatabase() AND event_type = 'NewPart' +GROUP BY error +ORDER BY error; diff --git a/parser/testdata/03014_group_by_use_nulls_injective_functions_and_analyzer/ast.json b/parser/testdata/03014_group_by_use_nulls_injective_functions_and_analyzer/ast.json new file mode 100644 index 000000000..28db424f7 --- /dev/null +++ b/parser/testdata/03014_group_by_use_nulls_injective_functions_and_analyzer/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001217336, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03014_group_by_use_nulls_injective_functions_and_analyzer/metadata.json b/parser/testdata/03014_group_by_use_nulls_injective_functions_and_analyzer/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03014_group_by_use_nulls_injective_functions_and_analyzer/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03014_group_by_use_nulls_injective_functions_and_analyzer/query.sql b/parser/testdata/03014_group_by_use_nulls_injective_functions_and_analyzer/query.sql new file mode 100644 index 000000000..d700f9ba3 --- /dev/null +++ b/parser/testdata/03014_group_by_use_nulls_injective_functions_and_analyzer/query.sql @@ -0,0 +1,4 @@ +set enable_analyzer=1, group_by_use_nulls=1, optimize_injective_functions_in_group_by=1; +SELECT bitNot(bitNot(number)) + 3 FROM numbers(10) GROUP BY GROUPING SETS (('str', bitNot(bitNot(number))), ('str')) order by all; +SELECT tuple(tuple(tuple(number))) FROM numbers(10) GROUP BY GROUPING SETS (('str', tuple(tuple(number))), ('str')) order by all; +SELECT materialize(3) + 3 FROM numbers(10) GROUP BY GROUPING SETS (('str', materialize(materialize(3))), ('str')) order by all; diff --git a/parser/testdata/03014_msan_parse_date_time/ast.json b/parser/testdata/03014_msan_parse_date_time/ast.json new file mode 100644 index 000000000..319de47a5 --- /dev/null +++ b/parser/testdata/03014_msan_parse_date_time/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function parseDateTimeBestEffort (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toFixedString (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '01\/12\/2017,'" + }, + { + "explain": " Literal UInt64_11" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.000878563, + "rows_read": 10, + "bytes_read": 407 + } +} diff --git a/parser/testdata/03014_msan_parse_date_time/metadata.json b/parser/testdata/03014_msan_parse_date_time/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03014_msan_parse_date_time/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03014_msan_parse_date_time/query.sql b/parser/testdata/03014_msan_parse_date_time/query.sql new file mode 100644 index 000000000..d6daea69c --- /dev/null +++ b/parser/testdata/03014_msan_parse_date_time/query.sql @@ -0,0 +1 @@ +SELECT parseDateTimeBestEffort(toFixedString('01/12/2017,', 11)); -- { serverError CANNOT_PARSE_DATETIME } diff --git a/parser/testdata/03015_aggregator_empty_data_multiple_blocks/ast.json b/parser/testdata/03015_aggregator_empty_data_multiple_blocks/ast.json new file mode 100644 index 000000000..cf5ab74a1 --- /dev/null +++ b/parser/testdata/03015_aggregator_empty_data_multiple_blocks/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery 03015_aggregator_empty_data_multiple_blocks (children 3)" + }, + { + "explain": " Identifier 03015_aggregator_empty_data_multiple_blocks" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration c0 (children 1)" + }, + { + "explain": " DataType Int32" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function Memory (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.00129394, + "rows_read": 9, + "bytes_read": 389 + } +} diff --git a/parser/testdata/03015_aggregator_empty_data_multiple_blocks/metadata.json b/parser/testdata/03015_aggregator_empty_data_multiple_blocks/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03015_aggregator_empty_data_multiple_blocks/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03015_aggregator_empty_data_multiple_blocks/query.sql b/parser/testdata/03015_aggregator_empty_data_multiple_blocks/query.sql new file mode 100644 index 000000000..d2c1816f7 --- /dev/null +++ b/parser/testdata/03015_aggregator_empty_data_multiple_blocks/query.sql @@ -0,0 +1,3 @@ +CREATE TABLE 03015_aggregator_empty_data_multiple_blocks (c0 Int32) ENGINE = Memory(); +INSERT INTO 03015_aggregator_empty_data_multiple_blocks SELECT * FROM generateRandom() LIMIT 1000; +SELECT radians(t1.c0) FROM 03015_aggregator_empty_data_multiple_blocks AS t1 RIGHT ANTI JOIN 03015_aggregator_empty_data_multiple_blocks AS right_0 ON t1.c0=right_0.c0 GROUP BY t1.c0; diff --git a/parser/testdata/03015_analyzer_groupby_fuzz_60772/ast.json b/parser/testdata/03015_analyzer_groupby_fuzz_60772/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03015_analyzer_groupby_fuzz_60772/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03015_analyzer_groupby_fuzz_60772/metadata.json b/parser/testdata/03015_analyzer_groupby_fuzz_60772/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03015_analyzer_groupby_fuzz_60772/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03015_analyzer_groupby_fuzz_60772/query.sql b/parser/testdata/03015_analyzer_groupby_fuzz_60772/query.sql new file mode 100644 index 000000000..5190b8635 --- /dev/null +++ b/parser/testdata/03015_analyzer_groupby_fuzz_60772/query.sql @@ -0,0 +1,23 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/60772 +SELECT toFixedString(toFixedString(toFixedString(toFixedString(toFixedString(toFixedString('%W', 2), 2), 2),toLowCardinality(toLowCardinality(toNullable(2)))), 2), 2), + toFixedString(toFixedString('2018-01-02 22:33:44', 19), 19), + hasSubsequence(toNullable(materialize(toLowCardinality('garbage'))), 'gr') +GROUP BY + '2018-01-02 22:33:44', + toFixedString(toFixedString('2018-01-02 22:33:44', 19), 19), + 'gr', + '2018-01-02 22:33:44' +SETTINGS enable_analyzer = 1; + +-- WITH CUBE (note that result is different with the analyzer (analyzer is correct including all combinations) +SELECT + toFixedString(toFixedString(toFixedString(toFixedString(toFixedString(toFixedString('%W', 2), 2), 2), toLowCardinality(toLowCardinality(toNullable(2)))), 2), 2), + toFixedString(toFixedString('2018-01-02 22:33:44', 19), 19), + hasSubsequence(toNullable(materialize(toLowCardinality('garbage'))), 'gr') +GROUP BY + '2018-01-02 22:33:44', + toFixedString(toFixedString('2018-01-02 22:33:44', 19), 19), + 'gr', + '2018-01-02 22:33:44' +WITH CUBE +SETTINGS enable_analyzer = 1; diff --git a/parser/testdata/03015_peder1001/ast.json b/parser/testdata/03015_peder1001/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03015_peder1001/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03015_peder1001/metadata.json b/parser/testdata/03015_peder1001/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03015_peder1001/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03015_peder1001/query.sql b/parser/testdata/03015_peder1001/query.sql new file mode 100644 index 000000000..df8e4db15 --- /dev/null +++ b/parser/testdata/03015_peder1001/query.sql @@ -0,0 +1,23 @@ +-- Tags: no-fasttest +-- no-fasttest: upper/lowerUTF8 use ICU + +DROP TABLE IF EXISTS test_data; + +CREATE TABLE test_data +( + ShipmentDate Date +) +ENGINE = Memory; + +INSERT INTO test_data (ShipmentDate) Values ('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'), ('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-02-07'), ('2022-02-11'), ('2022-02-15'), ('2022-02-16'), ('2022-02-18'), ('2022-02-22'), ('2022-02-24'), ('2022-03-02'), ('2022-03-04'), ('2022-03-07'), ('2022-03-09'), ('2022-03-10'), ('2022-03-11'), ('2022-03-14'), ('2022-03-15'), ('2022-03-17'), ('2022-03-18'), ('2022-03-23'), ('2022-04-28'), ('2022-05-24'), ('2022-03-31'), ('2022-04-19'), ('2022-04-25'), ('2022-04-26'), ('2022-05-02'), ('2022-05-04'), ('2022-05-05'), ('2022-05-11'), ('2022-05-12'), ('2022-05-13'), ('2022-05-16'), ('2022-05-18'), ('2022-05-20'), ('2022-05-23'), ('2022-05-27'), ('2022-05-31'), ('2022-05-10'), ('2022-02-17'), ('2022-03-24'), ('2022-05-09'), ('2022-05-30'), ('2022-02-21'), ('2022-01-11'), ('2022-01-28'), ('2022-04-27'), ('2022-05-25'), ('2022-04-18'), ('2022-01-21'), ('2022-03-22'), ('2022-04-01'), ('2022-04-06'), ('2022-04-11'), ('2022-05-19'), ('2022-02-01'), ('2022-02-23'), ('2022-02-09'), ('2022-03-03'), ('2022-04-04'), ('2022-04-05'), ('2022-04-12'), ('2022-04-29'), ('2022-01-06'), ('2022-03-01'), ('2022-03-26'), ('2022-01-10'), ('2022-01-03'), ('2022-05-01'), ('2022-03-21'), ('2022-03-27'), ('2022-01-31'), ('2022-04-13'), ('2022-03-29'), ('2022-02-20'), ('2022-02-06'), ('2022-03-13'), ('2022-02-27'), ('2022-03-20'), ('2022-04-24'), ('2022-05-15'), ('2022-05-22'), ('2022-01-09'), ('2022-04-03'), ('2022-03-12'), ('2022-01-23'), ('2022-05-08'), ('2022-05-29'), ('2022-02-19'), ('2022-05-07'), ('2022-05-26'), ('2022-01-30'), ('2022-03-05'), ('2022-05-21'), ('2022-02-26'), ('2022-01-16'), ('2022-05-17'), ('2022-01-29'), ('2022-02-12'), ('2022-01-02'), ('2022-02-05'),('2022-04-22'), ('2022-02-14'), ('2022-02-28'), ('2022-02-04'), ('2022-02-08'), ('2022-03-16'), ('2022-03-25'), ('2022-02-25'), ('2022-03-08'), ('2022-05-03'), ('2022-05-06'), ('2022-02-10'), ('2022-02-13'), ('2022-03-06'), ('2022-04-07'), ('2022-04-08'), ('2022-04-20'), ('2022-04-21'), ('2022-03-28'), ('2022-03-30'), ('2022-01-04'), ('2022-01-05'), ('2022-01-07'), ('2022-01-12'), ('2022-01-13'), ('2022-01-14'), ('2022-01-17'), ('2022-01-18'), ('2022-01-19'), ('2022-01-20'), ('2022-01-24'), ('2022-01-25'), ('2022-01-26'), ('2022-01-27'), ('2022-02-02'), ('2022-02-03'), ('2022-01-08'); + +SELECT + toDayOfWeek(ShipmentDate) AS c +FROM test_data +WHERE c IS NOT NULL AND lowerUTF8(formatDateTime(date_add(DAY, toInt32(c) - 1, toDate('2024-01-01')), '%W')) LIKE '%m%' +GROUP BY c +ORDER BY c ASC +LIMIT 62 +OFFSET 0; + +DROP TABLE test_data; diff --git a/parser/testdata/03015_with_fill_invalid_expression/ast.json b/parser/testdata/03015_with_fill_invalid_expression/ast.json new file mode 100644 index 000000000..a5d9354ee --- /dev/null +++ b/parser/testdata/03015_with_fill_invalid_expression/ast.json @@ -0,0 +1,121 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number (alias x)" + }, + { + "explain": " Function plus (alias y) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_5" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier y" + }, + { + "explain": " OrderByElement (children 3)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_4" + }, + { + "explain": " OrderByElement (children 3)" + }, + { + "explain": " Identifier y" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_5" + } + ], + + "rows": 33, + + "statistics": + { + "elapsed": 0.001222461, + "rows_read": 33, + "bytes_read": 1187 + } +} diff --git a/parser/testdata/03015_with_fill_invalid_expression/metadata.json b/parser/testdata/03015_with_fill_invalid_expression/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03015_with_fill_invalid_expression/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03015_with_fill_invalid_expression/query.sql b/parser/testdata/03015_with_fill_invalid_expression/query.sql new file mode 100644 index 000000000..dbb63c029 --- /dev/null +++ b/parser/testdata/03015_with_fill_invalid_expression/query.sql @@ -0,0 +1,2 @@ +select number as x, number + 1 as y from numbers(5) where number % 3 == 1 order by y, x with fill from 1 to 4, y with fill from 2 to 5; -- {serverError INVALID_WITH_FILL_EXPRESSION} + diff --git a/parser/testdata/03016_analyzer_groupby_fuzz_59796/ast.json b/parser/testdata/03016_analyzer_groupby_fuzz_59796/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03016_analyzer_groupby_fuzz_59796/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03016_analyzer_groupby_fuzz_59796/metadata.json b/parser/testdata/03016_analyzer_groupby_fuzz_59796/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03016_analyzer_groupby_fuzz_59796/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03016_analyzer_groupby_fuzz_59796/query.sql b/parser/testdata/03016_analyzer_groupby_fuzz_59796/query.sql new file mode 100644 index 000000000..6c926c328 --- /dev/null +++ b/parser/testdata/03016_analyzer_groupby_fuzz_59796/query.sql @@ -0,0 +1,6 @@ +SELECT + concat(concat(unhex('00'), concat(unhex('00'), concat(unhex(toFixedString('00', 2)), toFixedString(toFixedString(' key="v" ', 9), 9), concat(unhex('00'), toFixedString(' key="v" ', 9)), toFixedString(materialize(toLowCardinality(' key="v" ')), 9)), toFixedString(' key="v" ', 9)), toFixedString(' key="v" ', 9)), unhex('00'), ' key="v" ') AS haystack +GROUP BY + concat(unhex('00'), toFixedString(materialize(toFixedString(' key="v" ', 9)), 9), toFixedString(toFixedString('00', 2), toNullable(2)), toFixedString(toFixedString(toFixedString(' key="v" ', 9), 9), 9)), + concat(' key="v" ') +SETTINGS enable_analyzer = 1; diff --git a/parser/testdata/03017_analyzer_groupby_fuzz_61600/ast.json b/parser/testdata/03017_analyzer_groupby_fuzz_61600/ast.json new file mode 100644 index 000000000..7b925cb18 --- /dev/null +++ b/parser/testdata/03017_analyzer_groupby_fuzz_61600/ast.json @@ -0,0 +1,91 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery set_index_not__fuzz_0 (children 2)" + }, + { + "explain": " Identifier set_index_not__fuzz_0" + }, + { + "explain": " Columns definition (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration name (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " ColumnDeclaration status (children 1)" + }, + { + "explain": " DataType Enum8 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'alive'" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'rip'" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Index (children 2)" + }, + { + "explain": " Identifier status" + }, + { + "explain": " Function set (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2" + } + ], + + "rows": 23, + + "statistics": + { + "elapsed": 0.001496397, + "rows_read": 23, + "bytes_read": 866 + } +} diff --git a/parser/testdata/03017_analyzer_groupby_fuzz_61600/metadata.json b/parser/testdata/03017_analyzer_groupby_fuzz_61600/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03017_analyzer_groupby_fuzz_61600/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03017_analyzer_groupby_fuzz_61600/query.sql b/parser/testdata/03017_analyzer_groupby_fuzz_61600/query.sql new file mode 100644 index 000000000..b22ea42b6 --- /dev/null +++ b/parser/testdata/03017_analyzer_groupby_fuzz_61600/query.sql @@ -0,0 +1,25 @@ +CREATE TABLE set_index_not__fuzz_0 (`name` String, `status` Enum8('alive' = 0, 'rip' = 1), INDEX idx_status status TYPE set(2) GRANULARITY 1) +ENGINE = MergeTree ORDER BY name +SETTINGS index_granularity = 8192; + +INSERT INTO set_index_not__fuzz_0 SELECT * from generateRandom() limit 1; + +SELECT + 38, + concat(position(concat(concat(position(concat(toUInt256(3)), 'ca', 2), 3),NULLIF(1, materialize(toLowCardinality(1)))), toLowCardinality(toNullable('ca'))), concat(NULLIF(1, 1), concat(3), toNullable(3))) +FROM set_index_not__fuzz_0 +GROUP BY + toNullable(3), + concat(concat(NULLIF(1, 1), toNullable(toNullable(3)))) +SETTINGS enable_analyzer = 1; + +-- WITH ROLLUP (note that result is different with the analyzer (analyzer is correct including all combinations) +SELECT + 38, + concat(position(concat(concat(position(concat(toUInt256(3)), 'ca', 2), 3), NULLIF(1, materialize(toLowCardinality(1)))), toLowCardinality(toNullable('ca'))), concat(NULLIF(1, 1), concat(3), toNullable(3))) +FROM set_index_not__fuzz_0 +GROUP BY + toNullable(3), + concat(concat(NULLIF(1, 1), toNullable(toNullable(3)))) +WITH ROLLUP +SETTINGS enable_analyzer = 1; diff --git a/parser/testdata/03018_analyzer_distributed_query_with_positional_arguments/ast.json b/parser/testdata/03018_analyzer_distributed_query_with_positional_arguments/ast.json new file mode 100644 index 000000000..fdc946014 --- /dev/null +++ b/parser/testdata/03018_analyzer_distributed_query_with_positional_arguments/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_0 (alias x)" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001146135, + "rows_read": 5, + "bytes_read": 187 + } +} diff --git a/parser/testdata/03018_analyzer_distributed_query_with_positional_arguments/metadata.json b/parser/testdata/03018_analyzer_distributed_query_with_positional_arguments/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03018_analyzer_distributed_query_with_positional_arguments/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03018_analyzer_distributed_query_with_positional_arguments/query.sql b/parser/testdata/03018_analyzer_distributed_query_with_positional_arguments/query.sql new file mode 100644 index 000000000..16ba3b155 --- /dev/null +++ b/parser/testdata/03018_analyzer_distributed_query_with_positional_arguments/query.sql @@ -0,0 +1,7 @@ +select 0 as x +from remote('127.0.0.{1,2}', system.one) +group by x; + +select 0 as x +from remote('127.0.0.{1,2}', system.one) +order by x; diff --git a/parser/testdata/03018_analyzer_greater_null/ast.json b/parser/testdata/03018_analyzer_greater_null/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03018_analyzer_greater_null/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03018_analyzer_greater_null/metadata.json b/parser/testdata/03018_analyzer_greater_null/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03018_analyzer_greater_null/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03018_analyzer_greater_null/query.sql b/parser/testdata/03018_analyzer_greater_null/query.sql new file mode 100644 index 000000000..66ea53e45 --- /dev/null +++ b/parser/testdata/03018_analyzer_greater_null/query.sql @@ -0,0 +1,4 @@ +SELECT + max(NULL > 255) > NULL AS a, + count(NULL > 1.) > 1048577 +FROM numbers(10); diff --git a/parser/testdata/03019_numbers_pretty/ast.json b/parser/testdata/03019_numbers_pretty/ast.json new file mode 100644 index 000000000..b09dfde8a --- /dev/null +++ b/parser/testdata/03019_numbers_pretty/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001236713, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03019_numbers_pretty/metadata.json b/parser/testdata/03019_numbers_pretty/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03019_numbers_pretty/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03019_numbers_pretty/query.sql b/parser/testdata/03019_numbers_pretty/query.sql new file mode 100644 index 000000000..91efbe61d --- /dev/null +++ b/parser/testdata/03019_numbers_pretty/query.sql @@ -0,0 +1,7 @@ +SET output_format_pretty_row_numbers = 0; +SELECT 1.23e9 FORMAT Pretty; +SELECT -1.23e9 FORMAT Pretty; +SELECT inf FORMAT Pretty; +SELECT -inf FORMAT Pretty; +SELECT nan FORMAT Pretty; +SELECT 1e111 FORMAT Pretty; diff --git a/parser/testdata/03020_order_by_SimpleAggregateFunction/ast.json b/parser/testdata/03020_order_by_SimpleAggregateFunction/ast.json new file mode 100644 index 000000000..b4b07c97f --- /dev/null +++ b/parser/testdata/03020_order_by_SimpleAggregateFunction/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001302306, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03020_order_by_SimpleAggregateFunction/metadata.json b/parser/testdata/03020_order_by_SimpleAggregateFunction/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03020_order_by_SimpleAggregateFunction/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03020_order_by_SimpleAggregateFunction/query.sql b/parser/testdata/03020_order_by_SimpleAggregateFunction/query.sql new file mode 100644 index 000000000..fee42d1ab --- /dev/null +++ b/parser/testdata/03020_order_by_SimpleAggregateFunction/query.sql @@ -0,0 +1,33 @@ +set allow_suspicious_primary_key = 0; + +drop table if exists data; + +create table data (key Int, value AggregateFunction(sum, UInt64)) engine=AggregatingMergeTree() order by (key, value); -- { serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY } +create table data (key Int, value SimpleAggregateFunction(sum, UInt64)) engine=AggregatingMergeTree() order by (key, value); -- { serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY } + +create table data (key Int, value AggregateFunction(sum, UInt64)) engine=AggregatingMergeTree() primary key value; -- { serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY } +create table data (key Int, value SimpleAggregateFunction(sum, UInt64)) engine=AggregatingMergeTree() primary key value; -- { serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY } + +create table data (key Int, value AggregateFunction(sum, UInt64)) engine=AggregatingMergeTree() primary key value order by (value, key); -- { serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY } +create table data (key Int, value SimpleAggregateFunction(sum, UInt64)) engine=AggregatingMergeTree() primary key value order by (value, key); -- { serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY } + +set allow_suspicious_primary_key = 1; +create table data (key Int, value SimpleAggregateFunction(sum, UInt64)) engine=AggregatingMergeTree() primary key value order by (value, key); + +-- ATTACH should work regardless allow_suspicious_primary_key +set allow_suspicious_primary_key = 0; +detach table data; +attach table data; +drop table data; + +-- ALTER AggregatingMergeTree +create table data (key Int) engine=AggregatingMergeTree() order by (key); +alter table data add column value SimpleAggregateFunction(sum, UInt64), modify order by (key, value); -- { serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY } +alter table data add column value SimpleAggregateFunction(sum, UInt64), modify order by (key, value) settings allow_suspicious_primary_key=1; +drop table data; + +-- ALTER ReplicatedAggregatingMergeTree +create table data_rep (key Int) engine=ReplicatedAggregatingMergeTree('/tables/{database}', 'r1') order by (key); +alter table data_rep add column value SimpleAggregateFunction(sum, UInt64), modify order by (key, value); -- { serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY } +alter table data_rep add column value SimpleAggregateFunction(sum, UInt64), modify order by (key, value) settings allow_suspicious_primary_key=1; +drop table data_rep; diff --git a/parser/testdata/03022_alter_materialized_view_query_has_inner_table/ast.json b/parser/testdata/03022_alter_materialized_view_query_has_inner_table/ast.json new file mode 100644 index 000000000..4087e3643 --- /dev/null +++ b/parser/testdata/03022_alter_materialized_view_query_has_inner_table/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery src_table (children 1)" + }, + { + "explain": " Identifier src_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000972299, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/03022_alter_materialized_view_query_has_inner_table/metadata.json b/parser/testdata/03022_alter_materialized_view_query_has_inner_table/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03022_alter_materialized_view_query_has_inner_table/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03022_alter_materialized_view_query_has_inner_table/query.sql b/parser/testdata/03022_alter_materialized_view_query_has_inner_table/query.sql new file mode 100644 index 000000000..cd36be085 --- /dev/null +++ b/parser/testdata/03022_alter_materialized_view_query_has_inner_table/query.sql @@ -0,0 +1,15 @@ +DROP TABLE IF EXISTS src_table; +DROP TABLE IF EXISTS mv; + +CREATE TABLE src_table (`a` UInt32, `b` UInt32) ENGINE = MergeTree ORDER BY a; +CREATE MATERIALIZED VIEW mv (`a` UInt32) ENGINE = MergeTree ORDER BY a AS SELECT a FROM src_table; + +INSERT INTO src_table (a, b) VALUES (1, 1), (2, 2); + +SELECT * FROM mv; + +SET allow_experimental_alter_materialized_view_structure = 1; +ALTER TABLE mv MODIFY QUERY SELECT a, b FROM src_table; -- { serverError NO_SUCH_COLUMN_IN_TABLE } + +DROP TABLE src_table; +DROP TABLE mv; diff --git a/parser/testdata/03022_highlight_digit_groups/ast.json b/parser/testdata/03022_highlight_digit_groups/ast.json new file mode 100644 index 000000000..d0492970a --- /dev/null +++ b/parser/testdata/03022_highlight_digit_groups/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001018782, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03022_highlight_digit_groups/metadata.json b/parser/testdata/03022_highlight_digit_groups/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03022_highlight_digit_groups/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03022_highlight_digit_groups/query.sql b/parser/testdata/03022_highlight_digit_groups/query.sql new file mode 100644 index 000000000..8c371c409 --- /dev/null +++ b/parser/testdata/03022_highlight_digit_groups/query.sql @@ -0,0 +1,9 @@ +SET output_format_pretty_display_footer_column_names=0; +SET output_format_pretty_row_numbers = 0; + +SELECT exp10(number) * (number % 2 ? 1 : -1) FROM numbers(30) FORMAT PrettySpace SETTINGS output_format_pretty_color = 1; + +SELECT exp10(number) FROM numbers(10) FORMAT PrettySpace SETTINGS output_format_pretty_color = 1, output_format_pretty_highlight_digit_groups = 0; +SELECT exp10(number) FROM numbers(10) FORMAT PrettySpace; + +SELECT exp10(number) + exp10(-number) FROM numbers(10) FORMAT PrettySpace SETTINGS output_format_pretty_color = 1; diff --git a/parser/testdata/03023_analyzer_optimize_group_by_function_keys_with_nulls/ast.json b/parser/testdata/03023_analyzer_optimize_group_by_function_keys_with_nulls/ast.json new file mode 100644 index 000000000..686f072f5 --- /dev/null +++ b/parser/testdata/03023_analyzer_optimize_group_by_function_keys_with_nulls/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00133759, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03023_analyzer_optimize_group_by_function_keys_with_nulls/metadata.json b/parser/testdata/03023_analyzer_optimize_group_by_function_keys_with_nulls/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03023_analyzer_optimize_group_by_function_keys_with_nulls/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03023_analyzer_optimize_group_by_function_keys_with_nulls/query.sql b/parser/testdata/03023_analyzer_optimize_group_by_function_keys_with_nulls/query.sql new file mode 100644 index 000000000..0d0a81c91 --- /dev/null +++ b/parser/testdata/03023_analyzer_optimize_group_by_function_keys_with_nulls/query.sql @@ -0,0 +1,4 @@ +set enable_analyzer=1; +set group_by_use_nulls=1; +set optimize_group_by_function_keys=1; +SELECT ignore(toLowCardinality(number)) FROM numbers(10) GROUP BY GROUPING SETS ((ignore(toLowCardinality(number)), toLowCardinality(number))); diff --git a/parser/testdata/03023_group_by_use_nulls_analyzer_crashes/ast.json b/parser/testdata/03023_group_by_use_nulls_analyzer_crashes/ast.json new file mode 100644 index 000000000..d1527cf77 --- /dev/null +++ b/parser/testdata/03023_group_by_use_nulls_analyzer_crashes/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001136191, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03023_group_by_use_nulls_analyzer_crashes/metadata.json b/parser/testdata/03023_group_by_use_nulls_analyzer_crashes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03023_group_by_use_nulls_analyzer_crashes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03023_group_by_use_nulls_analyzer_crashes/query.sql b/parser/testdata/03023_group_by_use_nulls_analyzer_crashes/query.sql new file mode 100644 index 000000000..d3d6ecaad --- /dev/null +++ b/parser/testdata/03023_group_by_use_nulls_analyzer_crashes/query.sql @@ -0,0 +1,59 @@ +set enable_analyzer = 1, group_by_use_nulls = 1; + +SELECT tuple(tuple(number)) as x FROM numbers(10) GROUP BY (number, tuple(number)) with cube order by x; + +select tuple(array(number)) as x FROM numbers(10) GROUP BY number, array(number) WITH ROLLUP order by x; + +SELECT tuple(number) AS x FROM numbers(10) GROUP BY GROUPING SETS (number) order by x; + +SELECT ignore(toFixedString('Lambda as function parameter', 28), toNullable(28), ignore(8)), sum(marks) FROM system.parts WHERE database = currentDatabase() GROUP BY GROUPING SETS ((2)) FORMAT Null settings optimize_injective_functions_in_group_by=1, optimize_group_by_function_keys=1, group_by_use_nulls=1; -- { serverError ILLEGAL_AGGREGATION } + +SELECT toLowCardinality(materialize('a' AS key)), 'b' AS value GROUP BY key WITH CUBE SETTINGS group_by_use_nulls = 1; + +SELECT tuple(tuple(number)) AS x +FROM numbers(10) +GROUP BY (number, (toString(x), number)) + WITH CUBE +SETTINGS group_by_use_nulls = 1 FORMAT Null; + +SELECT tuple(number + 1) AS x FROM numbers(10) GROUP BY number + 1, toString(x) WITH CUBE settings group_by_use_nulls=1 FORMAT Null; + +SELECT tuple(tuple(number)) AS x FROM numbers(10) WHERE toString(toUUID(tuple(number), NULL), x) GROUP BY number, (toString(x), number) WITH CUBE SETTINGS group_by_use_nulls = 1 FORMAT Null; + +SELECT materialize('a'), 'a' AS key GROUP BY key WITH CUBE WITH TOTALS SETTINGS group_by_use_nulls = 1; + +EXPLAIN QUERY TREE +SELECT a, b +FROM numbers(3) +GROUP BY number as a, (number + number) as b WITH CUBE +ORDER BY a, b format Null; + +SELECT a, b +FROM numbers(3) +GROUP BY number as a, (number + number) as b WITH CUBE +ORDER BY a, b; + +SELECT + a, + b, + cramersVBiasCorrected(a, b) +FROM numbers(3) +GROUP BY + number AS a, + number + number AS b + WITH CUBE +SETTINGS group_by_use_nulls = 1; + +SELECT arrayMap(x -> '.', range(number % 10)) AS k FROM remote('127.0.0.{2,3}', numbers(10)) GROUP BY GROUPING SETS ((k)) ORDER BY k settings group_by_use_nulls=1; + +SELECT count('Lambda as function parameter') AS c FROM (SELECT ignore(ignore('Lambda as function parameter', 28, 28, 28, 28, 28, 28), 28), materialize('Lambda as function parameter'), 28, 28, 'world', 5 FROM system.numbers WHERE ignore(materialize('Lambda as function parameter'), materialize(toLowCardinality(28)), 28, 28, 28, 28, toUInt128(28)) LIMIT 2) GROUP BY GROUPING SETS ((toLowCardinality(0)), (toLowCardinality(toNullable(28))), (1)) HAVING nullIf(c, 10) < 50 ORDER BY c ASC NULLS FIRST settings group_by_use_nulls=1; -- { serverError ILLEGAL_AGGREGATION } + +SELECT arraySplit(x -> 0, []) WHERE materialize(1) GROUP BY (0, ignore('a')) WITH ROLLUP SETTINGS group_by_use_nulls = 1; + +SELECT arraySplit(x -> toUInt8(number), []) from numbers(1) GROUP BY toUInt8(number) WITH ROLLUP SETTINGS group_by_use_nulls = 1; + +SELECT arraySplit(number -> toUInt8(number), []) from numbers(1) GROUP BY toUInt8(number) WITH ROLLUP SETTINGS group_by_use_nulls = 1; + +SELECT count(arraySplit(number -> toUInt8(number), [arraySplit(x -> toUInt8(number), [])])) FROM numbers(10) GROUP BY number, [number] WITH ROLLUP settings group_by_use_nulls=1; -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} + +SELECT count(arraySplit(x -> toUInt8(number), [])) FROM numbers(10) GROUP BY number, [number] WITH ROLLUP settings group_by_use_nulls=1; diff --git a/parser/testdata/03023_remove_unused_column_distinct/ast.json b/parser/testdata/03023_remove_unused_column_distinct/ast.json new file mode 100644 index 000000000..a0c0fadbb --- /dev/null +++ b/parser/testdata/03023_remove_unused_column_distinct/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier product_id" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001082461, + "rows_read": 5, + "bytes_read": 182 + } +} diff --git a/parser/testdata/03023_remove_unused_column_distinct/metadata.json b/parser/testdata/03023_remove_unused_column_distinct/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03023_remove_unused_column_distinct/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03023_remove_unused_column_distinct/query.sql b/parser/testdata/03023_remove_unused_column_distinct/query.sql new file mode 100644 index 000000000..af8756f1f --- /dev/null +++ b/parser/testdata/03023_remove_unused_column_distinct/query.sql @@ -0,0 +1,15 @@ +SELECT product_id +FROM +( + SELECT DISTINCT + product_id, + section_id + FROM + ( + SELECT + concat('product_', number % 2) AS product_id, + concat('section_', number % 3) AS section_id + FROM numbers(10) + ) +) +SETTINGS enable_analyzer = 1; diff --git a/parser/testdata/03024_total_rows_approx_is_set_for_system_zeros_and_generate_random/ast.json b/parser/testdata/03024_total_rows_approx_is_set_for_system_zeros_and_generate_random/ast.json new file mode 100644 index 000000000..5d26f9746 --- /dev/null +++ b/parser/testdata/03024_total_rows_approx_is_set_for_system_zeros_and_generate_random/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001206626, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03024_total_rows_approx_is_set_for_system_zeros_and_generate_random/metadata.json b/parser/testdata/03024_total_rows_approx_is_set_for_system_zeros_and_generate_random/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03024_total_rows_approx_is_set_for_system_zeros_and_generate_random/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03024_total_rows_approx_is_set_for_system_zeros_and_generate_random/query.sql b/parser/testdata/03024_total_rows_approx_is_set_for_system_zeros_and_generate_random/query.sql new file mode 100644 index 000000000..0db09ead2 --- /dev/null +++ b/parser/testdata/03024_total_rows_approx_is_set_for_system_zeros_and_generate_random/query.sql @@ -0,0 +1,9 @@ +SET max_rows_to_read = 1e11; + +SELECT * FROM system.numbers LIMIT 1e12 FORMAT Null; -- { serverError TOO_MANY_ROWS } +SELECT * FROM system.numbers_mt LIMIT 1e12 FORMAT Null; -- { serverError TOO_MANY_ROWS } + +SELECT * FROM system.zeros LIMIT 1e12 FORMAT Null; -- { serverError TOO_MANY_ROWS } +SELECT * FROM system.zeros_mt LIMIT 1e12 FORMAT Null; -- { serverError TOO_MANY_ROWS } + +SELECT * FROM generateRandom() LIMIT 1e12 FORMAT Null; -- { serverError TOO_MANY_ROWS } diff --git a/parser/testdata/03030_system_flush_distributed_settings/ast.json b/parser/testdata/03030_system_flush_distributed_settings/ast.json new file mode 100644 index 000000000..355277c18 --- /dev/null +++ b/parser/testdata/03030_system_flush_distributed_settings/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery ephemeral (children 1)" + }, + { + "explain": " Identifier ephemeral" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001313493, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/03030_system_flush_distributed_settings/metadata.json b/parser/testdata/03030_system_flush_distributed_settings/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03030_system_flush_distributed_settings/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03030_system_flush_distributed_settings/query.sql b/parser/testdata/03030_system_flush_distributed_settings/query.sql new file mode 100644 index 000000000..fac673a4f --- /dev/null +++ b/parser/testdata/03030_system_flush_distributed_settings/query.sql @@ -0,0 +1,22 @@ +drop table if exists ephemeral; +drop table if exists dist_in; +drop table if exists data; +drop table if exists mv; +drop table if exists dist_out; + +create table ephemeral (key Int, value Int) engine=Null(); +create table dist_in as ephemeral engine=Distributed(test_shard_localhost, currentDatabase(), ephemeral, key) settings background_insert_batch=1; +create table data (key Int, uniq_values Int) engine=TinyLog(); +create materialized view mv to data as select key, uniqExact(value::String) uniq_values from ephemeral group by key; +system stop distributed sends dist_in; +create table dist_out as data engine=Distributed(test_shard_localhost, currentDatabase(), data); + +set prefer_localhost_replica=0; +SET optimize_trivial_insert_select = 1; + +-- due to pushing to MV with aggregation the query needs ~300MiB +-- but it will be done in background via "system flush distributed" +insert into dist_in select number/100, number from system.numbers limit 3e6 settings max_block_size=3e6, max_memory_usage='100Mi'; +system flush distributed dist_in; -- { serverError MEMORY_LIMIT_EXCEEDED } +system flush distributed dist_in settings max_memory_usage=0; +select count() from dist_out; diff --git a/parser/testdata/03031_distinguish_bool_and_int_in_settings/ast.json b/parser/testdata/03031_distinguish_bool_and_int_in_settings/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03031_distinguish_bool_and_int_in_settings/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03031_distinguish_bool_and_int_in_settings/metadata.json b/parser/testdata/03031_distinguish_bool_and_int_in_settings/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03031_distinguish_bool_and_int_in_settings/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03031_distinguish_bool_and_int_in_settings/query.sql b/parser/testdata/03031_distinguish_bool_and_int_in_settings/query.sql new file mode 100644 index 000000000..33be34a40 --- /dev/null +++ b/parser/testdata/03031_distinguish_bool_and_int_in_settings/query.sql @@ -0,0 +1,65 @@ +-- Custom settings must remember their types - whether it's a boolean or an integer. + +-- Different ways to set a boolean. +SET custom_f1 = false; +SET custom_f2 = False; +SET custom_f3 = FALSE; + +SET custom_n0 = 0; +SET custom_n1 = 1; + +SET custom_t1 = true; +SET custom_t2 = True; +SET custom_t3 = TRUE; + +SELECT '-- Custom settings from system.settings'; + +SELECT name, value, type FROM system.settings WHERE startsWith(name, 'custom_') ORDER BY name; + +SELECT '-- Custom settings via getSetting()'; + +SELECT 'custom_f1' AS name, getSetting(name) AS value, toTypeName(value); +SELECT 'custom_f2' AS name, getSetting(name) AS value, toTypeName(value); +SELECT 'custom_f3' AS name, getSetting(name) AS value, toTypeName(value); + +SELECT 'custom_n0' AS name, getSetting(name) AS value, toTypeName(value); +SELECT 'custom_n1' AS name, getSetting(name) AS value, toTypeName(value); + +SELECT 'custom_t1' AS name, getSetting(name) AS value, toTypeName(value); +SELECT 'custom_t2' AS name, getSetting(name) AS value, toTypeName(value); +SELECT 'custom_t3' AS name, getSetting(name) AS value, toTypeName(value); + +-- Built-in settings have hardcoded types. +SELECT '-- Built-in settings'; + +SET async_insert = false; +SELECT name, value, type FROM system.settings WHERE name = 'async_insert'; +SELECT 'async_insert' AS name, getSetting(name) AS value, toTypeName(value); + +SET async_insert = False; +SELECT name, value, type FROM system.settings WHERE name = 'async_insert'; +SELECT 'async_insert' AS name, getSetting(name) AS value, toTypeName(value); + +SET async_insert = FALSE; +SELECT name, value, type FROM system.settings WHERE name = 'async_insert'; +SELECT 'async_insert' AS name, getSetting(name) AS value, toTypeName(value); + +SET async_insert = 0; +SELECT name, value, type FROM system.settings WHERE name = 'async_insert'; +SELECT 'async_insert' AS name, getSetting(name) AS value, toTypeName(value); + +SET async_insert = 1; +SELECT name, value, type FROM system.settings WHERE name = 'async_insert'; +SELECT 'async_insert' AS name, getSetting(name) AS value, toTypeName(value); + +SET async_insert = true; +SELECT name, value, type FROM system.settings WHERE name = 'async_insert'; +SELECT 'async_insert' AS name, getSetting(name) AS value, toTypeName(value); + +SET async_insert = True; +SELECT name, value, type FROM system.settings WHERE name = 'async_insert'; +SELECT 'async_insert' AS name, getSetting(name) AS value, toTypeName(value); + +SET async_insert = TRUE; +SELECT name, value, type FROM system.settings WHERE name = 'async_insert'; +SELECT 'async_insert' AS name, getSetting(name) AS value, toTypeName(value); diff --git a/parser/testdata/03031_filter_float64_logical_error/ast.json b/parser/testdata/03031_filter_float64_logical_error/ast.json new file mode 100644 index 000000000..0291e8bbf --- /dev/null +++ b/parser/testdata/03031_filter_float64_logical_error/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery 03031_test (children 1)" + }, + { + "explain": " Identifier 03031_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001344972, + "rows_read": 2, + "bytes_read": 73 + } +} diff --git a/parser/testdata/03031_filter_float64_logical_error/metadata.json b/parser/testdata/03031_filter_float64_logical_error/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03031_filter_float64_logical_error/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03031_filter_float64_logical_error/query.sql b/parser/testdata/03031_filter_float64_logical_error/query.sql new file mode 100644 index 000000000..261cc61e4 --- /dev/null +++ b/parser/testdata/03031_filter_float64_logical_error/query.sql @@ -0,0 +1,58 @@ +CREATE TABLE 03031_test +( + `id` UInt64, + `value_1` String, + `value_2` String, + `value_3` String, + INDEX value_1_idx value_1 TYPE bloom_filter GRANULARITY 1, + INDEX value_2_idx value_2 TYPE ngrambf_v1(3, 512, 2, 0) GRANULARITY 1, + INDEX value_3_idx value_3 TYPE tokenbf_v1(512, 3, 0) GRANULARITY 1 +) +ENGINE = MergeTree +ORDER BY id; + +INSERT INTO 03031_test SELECT + number, + toString(number), + toString(number), + toString(number) +FROM numbers(10); + +SELECT + count('9223372036854775806'), + 7 +FROM 03031_test +PREWHERE (id = NULL) AND 1024 +WHERE 0.0001 +GROUP BY '0.03' +SETTINGS force_primary_key = 1, force_data_skipping_indices = 'value_1_idx, value_2_idx', enable_analyzer=0; + +SELECT + count('9223372036854775806'), + 7 +FROM 03031_test +PREWHERE (id = NULL) AND 1024 +WHERE 0.0001 +GROUP BY '0.03' + WITH ROLLUP +SETTINGS force_primary_key = 1, force_data_skipping_indices = 'value_1_idx, value_2_idx', enable_analyzer=1; + +-- Distributed queries currently return one row with count()==0 +SELECT + count('9223372036854775806'), + 7 +FROM remote('127.0.0.{1,2}', currentDatabase(), 03031_test) +PREWHERE (id = NULL) AND 1024 +WHERE 0.0001 +GROUP BY '0.03' +SETTINGS force_primary_key = 1, force_data_skipping_indices = 'value_1_idx, value_2_idx', enable_analyzer=0; + +SELECT + count('9223372036854775806'), + 7 +FROM 03031_test +PREWHERE (id = NULL) AND 1024 +WHERE 0.0001 +GROUP BY '0.03' +SETTINGS force_primary_key = 1, force_data_skipping_indices = 'value_1_idx, value_2_idx', enable_analyzer=0, parallel_replicas_only_with_analyzer=0, +allow_experimental_parallel_reading_from_replicas=1, cluster_for_parallel_replicas='parallel_replicas', max_parallel_replicas=100, parallel_replicas_for_non_replicated_merge_tree=1; diff --git a/parser/testdata/03031_input_format_allow_errors_num_bad_escape_sequence/ast.json b/parser/testdata/03031_input_format_allow_errors_num_bad_escape_sequence/ast.json new file mode 100644 index 000000000..f108396e6 --- /dev/null +++ b/parser/testdata/03031_input_format_allow_errors_num_bad_escape_sequence/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function format (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier JSONEachRow" + }, + { + "explain": " Literal '{\"item\" : \"some string\"}, {\"item\":\"\\\\\\\\ \\\\ud83d\"}'" + }, + { + "explain": " Set" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001350074, + "rows_read": 13, + "bytes_read": 515 + } +} diff --git a/parser/testdata/03031_input_format_allow_errors_num_bad_escape_sequence/metadata.json b/parser/testdata/03031_input_format_allow_errors_num_bad_escape_sequence/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03031_input_format_allow_errors_num_bad_escape_sequence/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03031_input_format_allow_errors_num_bad_escape_sequence/query.sql b/parser/testdata/03031_input_format_allow_errors_num_bad_escape_sequence/query.sql new file mode 100644 index 000000000..d551a449a --- /dev/null +++ b/parser/testdata/03031_input_format_allow_errors_num_bad_escape_sequence/query.sql @@ -0,0 +1,2 @@ +select * from format(JSONEachRow, '{"item" : "some string"}, {"item":"\\\\ \ud83d"}') settings input_format_allow_errors_num=1; + diff --git a/parser/testdata/03031_low_cardinality_logical_error/ast.json b/parser/testdata/03031_low_cardinality_logical_error/ast.json new file mode 100644 index 000000000..c28bbd386 --- /dev/null +++ b/parser/testdata/03031_low_cardinality_logical_error/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001214658, + "rows_read": 5, + "bytes_read": 169 + } +} diff --git a/parser/testdata/03031_low_cardinality_logical_error/metadata.json b/parser/testdata/03031_low_cardinality_logical_error/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03031_low_cardinality_logical_error/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03031_low_cardinality_logical_error/query.sql b/parser/testdata/03031_low_cardinality_logical_error/query.sql new file mode 100644 index 000000000..02ef0585b --- /dev/null +++ b/parser/testdata/03031_low_cardinality_logical_error/query.sql @@ -0,0 +1,14 @@ +SELECT * +FROM ( + SELECT + ([toString(number % 2)] :: Array(LowCardinality(String))) AS item_id, + count() + FROM numbers(3) + GROUP BY item_id WITH TOTALS +) AS l FULL JOIN ( + SELECT + ([toString((number % 2) * 2)] :: Array(String)) AS item_id + FROM numbers(3) +) AS r +ON l.item_id = r.item_id +ORDER BY 1,2,3; diff --git a/parser/testdata/03031_read_in_order_optimization_with_virtual_row/ast.json b/parser/testdata/03031_read_in_order_optimization_with_virtual_row/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03031_read_in_order_optimization_with_virtual_row/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03031_read_in_order_optimization_with_virtual_row/metadata.json b/parser/testdata/03031_read_in_order_optimization_with_virtual_row/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03031_read_in_order_optimization_with_virtual_row/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03031_read_in_order_optimization_with_virtual_row/query.sql b/parser/testdata/03031_read_in_order_optimization_with_virtual_row/query.sql new file mode 100644 index 000000000..8c5422550 --- /dev/null +++ b/parser/testdata/03031_read_in_order_optimization_with_virtual_row/query.sql @@ -0,0 +1,221 @@ +-- Tags: no-parallel-replicas +-- ^ because we are using query_log + +SET read_in_order_use_virtual_row = 1; +SET use_query_condition_cache = 0; + +DROP TABLE IF EXISTS t; + +CREATE TABLE t +( + `x` UInt64, + `y` UInt64, + `z` UInt64, + `k` UInt64 +) +ENGINE = MergeTree +ORDER BY (x, y, z) +SETTINGS index_granularity = 8192, +index_granularity_bytes = 10485760; + +SYSTEM STOP MERGES t; + +INSERT INTO t SELECT + number, + number, + number, + number +FROM numbers(8192 * 3); + +INSERT INTO t SELECT + number + (8192 * 3), + number + (8192 * 3), + number + (8192 * 3), + number +FROM numbers(8192 * 3); + +-- Expecting 2 virtual rows + one chunk (8192) for result + one extra chunk for next consumption in merge transform (8192), +-- both chunks come from the same part. +SELECT x +FROM t +ORDER BY x ASC +LIMIT 4 +SETTINGS max_block_size = 8192, +read_in_order_two_level_merge_threshold = 0, --force preliminary merge +max_threads = 1, +optimize_read_in_order = 1, +log_comment = 'preliminary merge, no filter'; + +SYSTEM FLUSH LOGS query_log; + +SELECT read_rows +FROM system.query_log +WHERE current_database = currentDatabase() +AND log_comment = 'preliminary merge, no filter' +AND type = 'QueryFinish' +ORDER BY query_start_time DESC +limit 1; + +SELECT '========'; +-- Expecting 2 virtual rows + two chunks (8192*2) get filtered out + one chunk for result (8192), +-- all chunks come from the same part. +SELECT k +FROM t +WHERE k > 8192 * 2 +ORDER BY x ASC +LIMIT 4 +SETTINGS max_block_size = 8192, +read_in_order_two_level_merge_threshold = 0, --force preliminary merge +max_threads = 1, +optimize_read_in_order = 1, +log_comment = 'preliminary merge with filter'; + +SYSTEM FLUSH LOGS query_log; + +SELECT read_rows +FROM system.query_log +WHERE current_database = currentDatabase() +AND log_comment = 'preliminary merge with filter' +AND type = 'QueryFinish' +ORDER BY query_start_time DESC +LIMIT 1; + +SELECT '========'; +-- Expecting 2 virtual rows + one chunk (8192) for result + one extra chunk for next consumption in merge transform (8192), +-- both chunks come from the same part. +SELECT x +FROM t +ORDER BY x ASC +LIMIT 4 +SETTINGS max_block_size = 8192, +read_in_order_two_level_merge_threshold = 5, --avoid preliminary merge +max_threads = 1, +optimize_read_in_order = 1, +log_comment = 'no preliminary merge, no filter'; + +SYSTEM FLUSH LOGS query_log; + +SELECT read_rows +FROM system.query_log +WHERE current_database = currentDatabase() +AND log_comment = 'no preliminary merge, no filter' +AND type = 'QueryFinish' +ORDER BY query_start_time DESC +LIMIT 1; + +SELECT '========'; +-- Expecting 2 virtual rows + two chunks (8192*2) get filtered out + one chunk for result (8192), +-- all chunks come from the same part. +SELECT k +FROM t +WHERE k > 8192 * 2 +ORDER BY x ASC +LIMIT 4 +SETTINGS max_block_size = 8192, +read_in_order_two_level_merge_threshold = 5, --avoid preliminary merge +max_threads = 1, +optimize_read_in_order = 1, +log_comment = 'no preliminary merge, with filter'; + +SYSTEM FLUSH LOGS query_log; + +SELECT read_rows +FROM system.query_log +WHERE current_database = currentDatabase() +AND log_comment = 'no preliminary merge, with filter' +AND type = 'QueryFinish' +ORDER BY query_start_time DESC +LIMIT 1; + +DROP TABLE t; + +SELECT '========'; +-- from 02149_read_in_order_fixed_prefix +DROP TABLE IF EXISTS fixed_prefix; + +CREATE TABLE fixed_prefix(a UInt32, b UInt32) +ENGINE = MergeTree ORDER BY (a, b) +SETTINGS index_granularity = 3; + +SYSTEM STOP MERGES fixed_prefix; + +INSERT INTO fixed_prefix VALUES (0, 100), (1, 2), (1, 3), (1, 4), (2, 5); +INSERT INTO fixed_prefix VALUES (0, 100), (1, 2), (1, 3), (1, 4), (2, 5); + +SELECT a, b +FROM fixed_prefix +WHERE a = 1 +ORDER BY b +SETTINGS max_threads = 1, +optimize_read_in_order = 1, +read_in_order_two_level_merge_threshold = 0; --force preliminary merge + +SELECT a, b +FROM fixed_prefix +WHERE a = 1 +ORDER BY b +SETTINGS max_threads = 1, +optimize_read_in_order = 1, +read_in_order_two_level_merge_threshold = 5; --avoid preliminary merge + +DROP TABLE fixed_prefix; + +SELECT '========'; +DROP TABLE IF EXISTS function_pk; + +CREATE TABLE function_pk +( + `A` Int64, + `B` Int64 +) +ENGINE = MergeTree ORDER BY (A, -B) +SETTINGS index_granularity = 1; + +SYSTEM STOP MERGES function_pk; + +INSERT INTO function_pk values(1,1); +INSERT INTO function_pk values(1,3); +INSERT INTO function_pk values(1,2); + +SELECT * +FROM function_pk +ORDER BY (A,-B) ASC +limit 3 +SETTINGS max_threads = 1, +optimize_read_in_order = 1, +read_in_order_two_level_merge_threshold = 5; --avoid preliminary merge + +DROP TABLE function_pk; + +-- modified from 02317_distinct_in_order_optimization +SELECT '-- test distinct ----'; + +DROP TABLE IF EXISTS distinct_in_order SYNC; + +CREATE TABLE distinct_in_order +( + `a` int, + `b` int, + `c` int +) +ENGINE = MergeTree +ORDER BY (a, b) +SETTINGS index_granularity = 8192, +index_granularity_bytes = '10Mi'; + +SYSTEM STOP MERGES distinct_in_order; + +INSERT INTO distinct_in_order SELECT + number % number, + number % 5, + number % 10 +FROM numbers(1, 1000000); + +SELECT DISTINCT a +FROM distinct_in_order +ORDER BY a ASC +SETTINGS read_in_order_two_level_merge_threshold = 0, +optimize_read_in_order = 1, +max_threads = 2; + +DROP TABLE distinct_in_order; diff --git a/parser/testdata/03031_read_in_order_optimization_with_virtual_row_explain/ast.json b/parser/testdata/03031_read_in_order_optimization_with_virtual_row_explain/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03031_read_in_order_optimization_with_virtual_row_explain/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03031_read_in_order_optimization_with_virtual_row_explain/metadata.json b/parser/testdata/03031_read_in_order_optimization_with_virtual_row_explain/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03031_read_in_order_optimization_with_virtual_row_explain/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03031_read_in_order_optimization_with_virtual_row_explain/query.sql b/parser/testdata/03031_read_in_order_optimization_with_virtual_row_explain/query.sql new file mode 100644 index 000000000..8e3f37b37 --- /dev/null +++ b/parser/testdata/03031_read_in_order_optimization_with_virtual_row_explain/query.sql @@ -0,0 +1,26 @@ +-- Tags: no-random-merge-tree-settings, no-object-storage + +SET optimize_read_in_order = 1, merge_tree_min_rows_for_concurrent_read = 1000, read_in_order_use_virtual_row = 1; + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab +( + `t` DateTime +) +ENGINE = MergeTree +ORDER BY t +SETTINGS index_granularity = 1; + +SYSTEM STOP MERGES tab; + +INSERT INTO tab SELECT toDateTime('2024-01-10') + number FROM numbers(10000); +INSERT INTO tab SELECT toDateTime('2024-01-30') + number FROM numbers(10000); +INSERT INTO tab SELECT toDateTime('2024-01-20') + number FROM numbers(10000); + +EXPLAIN PIPELINE +SELECT * +FROM tab +ORDER BY t ASC +SETTINGS read_in_order_two_level_merge_threshold = 0, max_threads = 4, read_in_order_use_buffering = 0 +FORMAT tsv; \ No newline at end of file diff --git a/parser/testdata/03031_read_in_order_optimization_with_virtual_row_special/ast.json b/parser/testdata/03031_read_in_order_optimization_with_virtual_row_special/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03031_read_in_order_optimization_with_virtual_row_special/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03031_read_in_order_optimization_with_virtual_row_special/metadata.json b/parser/testdata/03031_read_in_order_optimization_with_virtual_row_special/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03031_read_in_order_optimization_with_virtual_row_special/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03031_read_in_order_optimization_with_virtual_row_special/query.sql b/parser/testdata/03031_read_in_order_optimization_with_virtual_row_special/query.sql new file mode 100644 index 000000000..52aa71437 --- /dev/null +++ b/parser/testdata/03031_read_in_order_optimization_with_virtual_row_special/query.sql @@ -0,0 +1,21 @@ +-- Tags: no-parallel + +-- modified from test_01155_ordinary, to test special optimization path for virtual row +DROP DATABASE IF EXISTS test_03031; + +CREATE DATABASE test_03031; + +USE test_03031; + +SET read_in_order_use_virtual_row = 1; + +CREATE TABLE src (s String) ENGINE = MergeTree() ORDER BY s; +INSERT INTO src(s) VALUES ('before moving tables'); +CREATE TABLE dist (s String) ENGINE = Distributed(test_shard_localhost, test_03031, src); + +SET enable_analyzer=0; +SELECT _table FROM merge('test_03031', '') ORDER BY _table, s; + +DROP TABLE src; +DROP TABLE dist; +DROP DATABASE test_03031; \ No newline at end of file diff --git a/parser/testdata/03031_table_function_fuzzquery/ast.json b/parser/testdata/03031_table_function_fuzzquery/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03031_table_function_fuzzquery/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03031_table_function_fuzzquery/metadata.json b/parser/testdata/03031_table_function_fuzzquery/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03031_table_function_fuzzquery/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03031_table_function_fuzzquery/query.sql b/parser/testdata/03031_table_function_fuzzquery/query.sql new file mode 100644 index 000000000..b26096f7f --- /dev/null +++ b/parser/testdata/03031_table_function_fuzzquery/query.sql @@ -0,0 +1,18 @@ + +SELECT * FROM fuzzQuery('SELECT 1', 500, 8956) LIMIT 0 FORMAT TSVWithNamesAndTypes; + +SELECT * FROM fuzzQuery('SELECT * +FROM ( + SELECT + ([toString(number % 2)] :: Array(LowCardinality(String))) AS item_id, + count() + FROM numbers(3) + GROUP BY item_id WITH TOTALS +) AS l FULL JOIN ( + SELECT + ([toString((number % 2) * 2)] :: Array(String)) AS item_id + FROM numbers(3) +) AS r +ON l.item_id = r.item_id +ORDER BY 1,2,3; +', 500, 8956) LIMIT 10 FORMAT NULL; diff --git a/parser/testdata/03031_tuple_elimination_analyzer/ast.json b/parser/testdata/03031_tuple_elimination_analyzer/ast.json new file mode 100644 index 000000000..45569cfa9 --- /dev/null +++ b/parser/testdata/03031_tuple_elimination_analyzer/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001230538, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/03031_tuple_elimination_analyzer/metadata.json b/parser/testdata/03031_tuple_elimination_analyzer/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03031_tuple_elimination_analyzer/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03031_tuple_elimination_analyzer/query.sql b/parser/testdata/03031_tuple_elimination_analyzer/query.sql new file mode 100644 index 000000000..42bd50049 --- /dev/null +++ b/parser/testdata/03031_tuple_elimination_analyzer/query.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS test; + +SET allow_suspicious_low_cardinality_types = true, enable_analyzer = true; + +CREATE TABLE test (`id` LowCardinality(UInt32)) ENGINE = MergeTree ORDER BY id AS SELECT 0; + +SELECT tuple(tuple(id) = tuple(1048576)) FROM test; + +DROP TABLE test; diff --git a/parser/testdata/03032_multi_search_const_low_cardinality/ast.json b/parser/testdata/03032_multi_search_const_low_cardinality/ast.json new file mode 100644 index 000000000..12aae7d31 --- /dev/null +++ b/parser/testdata/03032_multi_search_const_low_cardinality/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function multiSearchFirstIndex (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toLowCardinality (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal ''" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toLowCardinality (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal ''" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001255946, + "rows_read": 14, + "bytes_read": 573 + } +} diff --git a/parser/testdata/03032_multi_search_const_low_cardinality/metadata.json b/parser/testdata/03032_multi_search_const_low_cardinality/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03032_multi_search_const_low_cardinality/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03032_multi_search_const_low_cardinality/query.sql b/parser/testdata/03032_multi_search_const_low_cardinality/query.sql new file mode 100644 index 000000000..bc5e5cff1 --- /dev/null +++ b/parser/testdata/03032_multi_search_const_low_cardinality/query.sql @@ -0,0 +1 @@ +SELECT multiSearchFirstIndex(toLowCardinality(''), [toLowCardinality('')]) diff --git a/parser/testdata/03032_numbers_zeros/ast.json b/parser/testdata/03032_numbers_zeros/ast.json new file mode 100644 index 000000000..930a69c47 --- /dev/null +++ b/parser/testdata/03032_numbers_zeros/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '------numbers_0-argument-----'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001118549, + "rows_read": 5, + "bytes_read": 200 + } +} diff --git a/parser/testdata/03032_numbers_zeros/metadata.json b/parser/testdata/03032_numbers_zeros/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03032_numbers_zeros/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03032_numbers_zeros/query.sql b/parser/testdata/03032_numbers_zeros/query.sql new file mode 100644 index 000000000..0c60ed933 --- /dev/null +++ b/parser/testdata/03032_numbers_zeros/query.sql @@ -0,0 +1,28 @@ +SELECT '------numbers_0-argument-----'; +SELECT number FROM numbers() LIMIT 10; +SELECT '------system.numbers---------'; +SELECT number FROM system.numbers LIMIT 10; +SELECT '------numbers_1-argument-----'; +SELECT number FROM numbers(10); +SELECT '------numbers_2-arguments----'; +SELECT number FROM numbers(10,10); +SELECT '------numbers_3-arguments----'; +SELECT number FROM numbers(10,10,2); +SELECT '------numbers_mt_0-argument-----'; +SELECT number FROM numbers_mt() LIMIT 10; +SELECT '------numbers_mt_1-argument-----'; +SELECT number FROM numbers_mt(10); +SELECT '------numbers_mt_2-arguments----'; +SELECT number FROM numbers_mt(10,10); +SELECT '------numbers_mt_3-arguments----'; +SELECT number FROM numbers_mt(10,10,2); +SELECT '------zeros_0-argument-------'; +SELECT zero FROM zeros() LIMIT 10; +SELECT '------system.zeros-----------'; +SELECT zero FROM system.zeros LIMIT 10; +SELECT '------zeros_1-argument-------'; +SELECT zero FROM zeros(10); +SELECT '------zeros_mt_0-argument-------'; +SELECT zero FROM zeros_mt() LIMIT 10; +SELECT '------zeros_mt_1-argument-------'; +SELECT zero FROM zeros_mt(10); diff --git a/parser/testdata/03032_redundant_equals/ast.json b/parser/testdata/03032_redundant_equals/ast.json new file mode 100644 index 000000000..d2d7fd727 --- /dev/null +++ b/parser/testdata/03032_redundant_equals/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_table (children 1)" + }, + { + "explain": " Identifier test_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001251084, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/03032_redundant_equals/metadata.json b/parser/testdata/03032_redundant_equals/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03032_redundant_equals/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03032_redundant_equals/query.sql b/parser/testdata/03032_redundant_equals/query.sql new file mode 100644 index 000000000..cf402f1b1 --- /dev/null +++ b/parser/testdata/03032_redundant_equals/query.sql @@ -0,0 +1,92 @@ +DROP TABLE IF EXISTS test_table; + +CREATE TABLE test_table +( + k UInt64, +) +ENGINE = MergeTree +ORDER BY k SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +INSERT INTO test_table SELECT number FROM numbers(100000); + +SET enable_analyzer = 1; + +SELECT * FROM test_table WHERE k in (100) = 1; +SELECT * FROM test_table WHERE k = (100) = 1; +SELECT * FROM test_table WHERE k not in (100) = 0; +SELECT * FROM test_table WHERE k != (100) = 0; +SELECT * FROM test_table WHERE 1 = (k = 100); +SELECT * FROM test_table WHERE 0 = (k not in (100)); +SELECT * FROM test_table WHERE k < 1 = 1; +SELECT * FROM test_table WHERE k >= 1 = 0; +SELECT * FROM test_table WHERE k > 1 = 0; +SELECT * FROM test_table WHERE ((k not in (101) = 0) OR (k in (100) = 1)) = 1; +SELECT * FROM test_table WHERE (NOT ((k not in (100) = 0) OR (k in (100) = 1))) = 0; +SELECT * FROM test_table WHERE (NOT ((k in (101) = 0) OR (k in (100) = 1))) = 1; +SELECT * FROM test_table WHERE ((k not in (101) = 0) OR (k in (100) = 1)) = 1; +SELECT * FROM test_table WHERE ((k not in (99) = 1) AND (k in (100) = 1)) = 1; +-- we skip optimizing queries with toNullable(0 or 1) but lets make sure they still work +SELECT * FROM test_table WHERE (k = 101) = toLowCardinality(toNullable(1)); +SELECT * FROM test_table WHERE (k = 101) = toNullable(1); +SELECT * FROM test_table WHERE (k = 101) = toLowCardinality(1); +SELECT * FROM test_table WHERE ((k not in (101) = toNullable(0)) OR (k in (100) = toNullable(1))) = toNullable(1); +SELECT * FROM test_table WHERE (((k NOT IN toLowCardinality(toNullable(101))) = toLowCardinality(toNullable(0))) OR ((k IN (toLowCardinality(100))) = toNullable(1))); +SELECT * FROM test_table WHERE (((k IN toLowCardinality(toNullable(101))) = toLowCardinality(toNullable(0))) AND ((k NOT IN (toLowCardinality(100))) = toNullable(1))) = toNullable(toLowCardinality(0)); + +SELECT count() +FROM +( + EXPLAIN PLAN indexes=1 + SELECT * FROM test_table WHERE k in (100) = 1 +) +WHERE + explain LIKE '%Granules: 1/%'; + +SELECT count() +FROM +( + EXPLAIN PLAN indexes=1 + SELECT * FROM test_table WHERE k >= 1 = 0 +) +WHERE + explain LIKE '%Granules: 1/%'; + +SELECT count() +FROM +( + EXPLAIN PLAN indexes=1 + SELECT * FROM test_table WHERE k not in (100) = 0 +) +WHERE + explain LIKE '%Granules: 1/%'; + +SELECT count() +FROM +( + EXPLAIN PLAN indexes=1 + SELECT * FROM test_table WHERE k > 1 = 0 +) +WHERE + explain LIKE '%Granules: 1/%'; + +SELECT count() +FROM +( + EXPLAIN PLAN indexes=1 + SELECT * FROM test_table WHERE (NOT ((k not in (100) = 0) OR (k in (100) = 1))) = 0 +) +WHERE + explain LIKE '%Granules: 1/%'; + + +SELECT count() +FROM +( + EXPLAIN PLAN indexes=1 + SELECT * FROM test_table WHERE (NOT ((k in (101) = 0) OR (k in (100) = 1))) = 1 +) +WHERE + explain LIKE '%Granules: 1/%'; + + +DROP TABLE test_table; diff --git a/parser/testdata/03032_rmt_create_columns_from_replica/ast.json b/parser/testdata/03032_rmt_create_columns_from_replica/ast.json new file mode 100644 index 000000000..e3c33292e --- /dev/null +++ b/parser/testdata/03032_rmt_create_columns_from_replica/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery data_r1 (children 1)" + }, + { + "explain": " Identifier data_r1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001133983, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/03032_rmt_create_columns_from_replica/metadata.json b/parser/testdata/03032_rmt_create_columns_from_replica/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03032_rmt_create_columns_from_replica/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03032_rmt_create_columns_from_replica/query.sql b/parser/testdata/03032_rmt_create_columns_from_replica/query.sql new file mode 100644 index 000000000..e08db0bda --- /dev/null +++ b/parser/testdata/03032_rmt_create_columns_from_replica/query.sql @@ -0,0 +1,5 @@ +drop table if exists data_r1; +drop table if exists data_r2; +create table data_r1 (key Int) engine=ReplicatedMergeTree('/tables/{database}', 'r1') order by tuple(); +create table data_r2 engine=ReplicatedMergeTree('/tables/{database}', 'r2') order by tuple(); +show create data_r2 format LineAsString; diff --git a/parser/testdata/03032_save_bad_json_escape_sequences/ast.json b/parser/testdata/03032_save_bad_json_escape_sequences/ast.json new file mode 100644 index 000000000..8afb770a5 --- /dev/null +++ b/parser/testdata/03032_save_bad_json_escape_sequences/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001404901, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03032_save_bad_json_escape_sequences/metadata.json b/parser/testdata/03032_save_bad_json_escape_sequences/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03032_save_bad_json_escape_sequences/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03032_save_bad_json_escape_sequences/query.sql b/parser/testdata/03032_save_bad_json_escape_sequences/query.sql new file mode 100644 index 000000000..cdec0f078 --- /dev/null +++ b/parser/testdata/03032_save_bad_json_escape_sequences/query.sql @@ -0,0 +1,16 @@ +set input_format_json_throw_on_bad_escape_sequence=0; + +select * from format(JSONEachRow, $$ +{"key" : "\u"} +{"key" : "\ud"} +{"key" : "\ud8"} +{"key" : "\ud80"} +{"key" : "\ud800"} +{"key" : "\ud800\"} +{"key" : "\ud800\u"} +{"key" : "\ud800\u1"} +{"key" : "\ud800\u12"} +{"key" : "\ud800\u123"} +{"key" : "\ud800\u1234"} +$$); + diff --git a/parser/testdata/03032_scalars_create_as_select/ast.json b/parser/testdata/03032_scalars_create_as_select/ast.json new file mode 100644 index 000000000..61b37f4b9 --- /dev/null +++ b/parser/testdata/03032_scalars_create_as_select/ast.json @@ -0,0 +1,115 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery query_run_metric_arrays (children 3)" + }, + { + "explain": " Identifier query_run_metric_arrays" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function Memory" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Subquery (alias all_metrics) (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Subquery (alias all_names) (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function groupUniqArrayArray (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_['a', 'b']" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier all_names" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier all_metrics" + } + ], + + "rows": 31, + + "statistics": + { + "elapsed": 0.001557868, + "rows_read": 31, + "bytes_read": 1432 + } +} diff --git a/parser/testdata/03032_scalars_create_as_select/metadata.json b/parser/testdata/03032_scalars_create_as_select/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03032_scalars_create_as_select/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03032_scalars_create_as_select/query.sql b/parser/testdata/03032_scalars_create_as_select/query.sql new file mode 100644 index 000000000..ae75a30ad --- /dev/null +++ b/parser/testdata/03032_scalars_create_as_select/query.sql @@ -0,0 +1,2 @@ +create table query_run_metric_arrays engine Memory as with (with (select groupUniqArrayArray(['a', 'b']) from numbers(1)) as all_names select all_names) as all_metrics select all_metrics; +select * from query_run_metric_arrays; diff --git a/parser/testdata/03032_storage_memory_modify_settings/ast.json b/parser/testdata/03032_storage_memory_modify_settings/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03032_storage_memory_modify_settings/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03032_storage_memory_modify_settings/metadata.json b/parser/testdata/03032_storage_memory_modify_settings/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03032_storage_memory_modify_settings/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03032_storage_memory_modify_settings/query.sql b/parser/testdata/03032_storage_memory_modify_settings/query.sql new file mode 100644 index 000000000..32c29446c --- /dev/null +++ b/parser/testdata/03032_storage_memory_modify_settings/query.sql @@ -0,0 +1,77 @@ +-- Tags: memory-engine +SET max_block_size = 65409; -- Default value + +SELECT 'TESTING MODIFY SMALLER BYTES'; +DROP TABLE IF EXISTS memory; +CREATE TABLE memory (i UInt32) ENGINE = Memory SETTINGS min_bytes_to_keep = 8192, max_bytes_to_keep = 32768; + +INSERT INTO memory SELECT * FROM numbers(0, 100); -- 1024 bytes +INSERT INTO memory SELECT * FROM numbers(0, 3000); -- 16384 bytes +SELECT total_bytes FROM system.tables WHERE name = 'memory' and database = currentDatabase(); -- 17408 in total + +ALTER TABLE memory MODIFY SETTING min_bytes_to_keep = 4096, max_bytes_to_keep = 16384; +SELECT total_bytes FROM system.tables WHERE name = 'memory' and database = currentDatabase(); -- 16384 in total after deleting + +INSERT INTO memory SELECT * FROM numbers(3000, 10000); -- 65536 bytes +SELECT total_bytes FROM system.tables WHERE name = 'memory' and database = currentDatabase(); + +SELECT 'TESTING MODIFY SMALLER ROWS'; +DROP TABLE IF EXISTS memory; +CREATE TABLE memory (i UInt32) ENGINE = Memory SETTINGS min_rows_to_keep = 200, max_rows_to_keep = 2000; + +INSERT INTO memory SELECT * FROM numbers(0, 100); -- 100 rows +INSERT INTO memory SELECT * FROM numbers(100, 1000); -- 1000 rows +SELECT total_rows FROM system.tables WHERE name = 'memory' and database = currentDatabase(); -- 1100 in total + +ALTER TABLE memory MODIFY SETTING min_rows_to_keep = 100, max_rows_to_keep = 1000; +SELECT total_rows FROM system.tables WHERE name = 'memory' and database = currentDatabase(); -- 1000 in total after deleting + +INSERT INTO memory SELECT * FROM numbers(1000, 500); -- 500 rows +SELECT total_rows FROM system.tables WHERE name = 'memory' and database = currentDatabase(); -- 500 in total after deleting + +SELECT 'TESTING ADD SETTINGS'; +DROP TABLE IF EXISTS memory; +CREATE TABLE memory (i UInt32) ENGINE = Memory; + +INSERT INTO memory SELECT * FROM numbers(0, 50); -- 50 rows +SELECT total_rows FROM system.tables WHERE name = 'memory' and database = currentDatabase(); -- 50 in total + +INSERT INTO memory SELECT * FROM numbers(50, 950); -- 950 rows +SELECT total_rows FROM system.tables WHERE name = 'memory' and database = currentDatabase(); -- 1000 in total + +INSERT INTO memory SELECT * FROM numbers(2000, 70); -- 70 rows +SELECT total_rows FROM system.tables WHERE name = 'memory' and database = currentDatabase(); -- 1070 in total + +ALTER TABLE memory MODIFY SETTING min_rows_to_keep = 100, max_rows_to_keep = 1000; +SELECT total_rows FROM system.tables WHERE name = 'memory' and database = currentDatabase(); -- 1020 in total after deleting + +INSERT INTO memory SELECT * FROM numbers(3000, 1100); -- 1100 rows +SELECT total_rows FROM system.tables WHERE name = 'memory' and database = currentDatabase(); -- 1100 in total after deleting + +SELECT 'TESTING ADD SETTINGS'; +DROP TABLE IF EXISTS memory; +CREATE TABLE memory (i UInt32) ENGINE = Memory; +ALTER TABLE memory MODIFY SETTING min_rows_to_keep = 100, max_rows_to_keep = 1000; + +INSERT INTO memory SELECT * FROM numbers(0, 50); -- 50 rows +SELECT total_rows FROM system.tables WHERE name = 'memory' and database = currentDatabase(); -- 50 in total + +INSERT INTO memory SELECT * FROM numbers(50, 950); -- 950 rows +SELECT total_rows FROM system.tables WHERE name = 'memory' and database = currentDatabase(); -- 1000 in total + +INSERT INTO memory SELECT * FROM numbers(2000, 70); -- 70 rows +SELECT total_rows FROM system.tables WHERE name = 'memory' and database = currentDatabase(); -- 1020 in total after deleting + +INSERT INTO memory SELECT * FROM numbers(3000, 1100); -- 1100 rows +SELECT total_rows FROM system.tables WHERE name = 'memory' and database = currentDatabase(); -- 1100 in total after deleting + +SELECT 'TESTING INVALID SETTINGS'; +DROP TABLE IF EXISTS memory; +CREATE TABLE memory (i UInt32) ENGINE = Memory; +ALTER TABLE memory MODIFY SETTING min_rows_to_keep = 100; -- { serverError SETTING_CONSTRAINT_VIOLATION } +ALTER TABLE memory MODIFY SETTING min_bytes_to_keep = 100; -- { serverError SETTING_CONSTRAINT_VIOLATION } +ALTER TABLE memory MODIFY SETTING max_rows_to_keep = 1000; +ALTER TABLE memory MODIFY SETTING max_bytes_to_keep = 1000; + +DROP TABLE memory; + diff --git a/parser/testdata/03032_string_to_variant_cast/ast.json b/parser/testdata/03032_string_to_variant_cast/ast.json new file mode 100644 index 000000000..5eeddceec --- /dev/null +++ b/parser/testdata/03032_string_to_variant_cast/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001212945, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03032_string_to_variant_cast/metadata.json b/parser/testdata/03032_string_to_variant_cast/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03032_string_to_variant_cast/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03032_string_to_variant_cast/query.sql b/parser/testdata/03032_string_to_variant_cast/query.sql new file mode 100644 index 000000000..67a501b96 --- /dev/null +++ b/parser/testdata/03032_string_to_variant_cast/query.sql @@ -0,0 +1,17 @@ +set allow_experimental_variant_type=1; +select CAST('42', 'Variant(String, UInt64)') as v, variantType(v); +select CAST('abc', 'Variant(String, UInt64)') as v, variantType(v); +select CAST('null', 'Variant(String, UInt64)') as v, variantType(v); +select CAST('[1, 2, 3]', 'Variant(String, Array(UInt64))') as v, variantType(v); +select CAST('[1, 2, 3', 'Variant(String, Array(UInt64))') as v, variantType(v); +select CAST('42', 'Variant(Date)') as v, variantType(v); -- {serverError INCORRECT_DATA} +select accurateCastOrNull('42', 'Variant(Date)') as v, variantType(v); + +select CAST('42'::FixedString(2), 'Variant(String, UInt64)') as v, variantType(v); +select CAST('42'::LowCardinality(String), 'Variant(String, UInt64)') as v, variantType(v); +select CAST('42'::Nullable(String), 'Variant(String, UInt64)') as v, variantType(v); +select CAST(NULL::Nullable(String), 'Variant(String, UInt64)') as v, variantType(v); +select CAST('42'::LowCardinality(Nullable(String)), 'Variant(String, UInt64)') as v, variantType(v); +select CAST(NULL::LowCardinality(Nullable(String)), 'Variant(String, UInt64)') as v, variantType(v); +select CAST(NULL::LowCardinality(Nullable(FixedString(2))), 'Variant(String, UInt64)') as v, variantType(v); + diff --git a/parser/testdata/03032_variant_bool_number_not_suspicious/ast.json b/parser/testdata/03032_variant_bool_number_not_suspicious/ast.json new file mode 100644 index 000000000..409c6bd30 --- /dev/null +++ b/parser/testdata/03032_variant_bool_number_not_suspicious/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001201103, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03032_variant_bool_number_not_suspicious/metadata.json b/parser/testdata/03032_variant_bool_number_not_suspicious/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03032_variant_bool_number_not_suspicious/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03032_variant_bool_number_not_suspicious/query.sql b/parser/testdata/03032_variant_bool_number_not_suspicious/query.sql new file mode 100644 index 000000000..b7609d34f --- /dev/null +++ b/parser/testdata/03032_variant_bool_number_not_suspicious/query.sql @@ -0,0 +1,4 @@ +set allow_experimental_variant_type=1; +set allow_suspicious_variant_types=0; +select 'true'::Bool::Variant(UInt32, Bool); + diff --git a/parser/testdata/03033_analyzer_merge_engine_filter_push_down/ast.json b/parser/testdata/03033_analyzer_merge_engine_filter_push_down/ast.json new file mode 100644 index 000000000..f2d0e85c8 --- /dev/null +++ b/parser/testdata/03033_analyzer_merge_engine_filter_push_down/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001225023, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03033_analyzer_merge_engine_filter_push_down/metadata.json b/parser/testdata/03033_analyzer_merge_engine_filter_push_down/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03033_analyzer_merge_engine_filter_push_down/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03033_analyzer_merge_engine_filter_push_down/query.sql b/parser/testdata/03033_analyzer_merge_engine_filter_push_down/query.sql new file mode 100644 index 000000000..d01e458a5 --- /dev/null +++ b/parser/testdata/03033_analyzer_merge_engine_filter_push_down/query.sql @@ -0,0 +1,7 @@ +set allow_suspicious_low_cardinality_types=1; +drop table if exists test; +create table test (`x` LowCardinality(Nullable(UInt32)), `y` String) engine = MergeTree order by tuple(); +insert into test values (1, 'a'), (2, 'bb'), (3, 'ccc'), (4, 'dddd'); +create table m_table (x UInt32, y String) engine = Merge(currentDatabase(), 'test*'); +select toTypeName(x), x FROM m_table SETTINGS additional_table_filters = {'m_table':'x != 4'}, optimize_move_to_prewhere=1, enable_analyzer=1; +drop table test; diff --git a/parser/testdata/03033_analyzer_parametrized_view_alias/ast.json b/parser/testdata/03033_analyzer_parametrized_view_alias/ast.json new file mode 100644 index 000000000..ad44982ab --- /dev/null +++ b/parser/testdata/03033_analyzer_parametrized_view_alias/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery raw_data (children 1)" + }, + { + "explain": " Identifier raw_data" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001547302, + "rows_read": 2, + "bytes_read": 69 + } +} diff --git a/parser/testdata/03033_analyzer_parametrized_view_alias/metadata.json b/parser/testdata/03033_analyzer_parametrized_view_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03033_analyzer_parametrized_view_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03033_analyzer_parametrized_view_alias/query.sql b/parser/testdata/03033_analyzer_parametrized_view_alias/query.sql new file mode 100644 index 000000000..3c0e3b4a6 --- /dev/null +++ b/parser/testdata/03033_analyzer_parametrized_view_alias/query.sql @@ -0,0 +1,20 @@ +CREATE TABLE raw_data +( + `id` UInt8, + `data` String +) +ENGINE = MergeTree +ORDER BY id; + + +INSERT INTO raw_data SELECT number, number +FROM numbers(10); + +CREATE VIEW raw_data_parameterized AS +SELECT * +FROM raw_data +WHERE (id >= {id_from:UInt8}) AND (id <= {id_to:UInt8}); + +SELECT t1.id +FROM raw_data_parameterized(id_from = 0, id_to = 50000) t1 +ORDER BY t1.id; diff --git a/parser/testdata/03033_analyzer_resolve_from_parent_scope/ast.json b/parser/testdata/03033_analyzer_resolve_from_parent_scope/ast.json new file mode 100644 index 000000000..7b1cbe640 --- /dev/null +++ b/parser/testdata/03033_analyzer_resolve_from_parent_scope/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery vecs_Float32 (children 3)" + }, + { + "explain": " Identifier vecs_Float32" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration v (children 1)" + }, + { + "explain": " DataType Array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " DataType Float32" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function Memory" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001516062, + "rows_read": 10, + "bytes_read": 371 + } +} diff --git a/parser/testdata/03033_analyzer_resolve_from_parent_scope/metadata.json b/parser/testdata/03033_analyzer_resolve_from_parent_scope/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03033_analyzer_resolve_from_parent_scope/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03033_analyzer_resolve_from_parent_scope/query.sql b/parser/testdata/03033_analyzer_resolve_from_parent_scope/query.sql new file mode 100644 index 000000000..739ecbf8d --- /dev/null +++ b/parser/testdata/03033_analyzer_resolve_from_parent_scope/query.sql @@ -0,0 +1,59 @@ +CREATE TABLE vecs_Float32 (v Array(Float32)) ENGINE=Memory; +INSERT INTO vecs_Float32 +SELECT v FROM ( + SELECT + number AS n, + [ + rand(n*10), rand(n*10+1), rand(n*10+2), rand(n*10+3), rand(n*10+4), rand(n*10+5), rand(n*10+6), rand(n*10+7), rand(n*10+8), rand(n*10+9), + rand(n*10+10), rand(n*10+11), rand(n*10+12), rand(n*10+13), rand(n*10+14), rand(n*10+15), rand(n*10+16), rand(n*10+17), rand(n*10+18), rand(n*10+19), + rand(n*10+20), rand(n*10+21), rand(n*10+22), rand(n*10+23), rand(n*10+24), rand(n*10+25), rand(n*10+26), rand(n*10+27), rand(n*10+28), rand(n*10+29), + rand(n*10+30), rand(n*10+31), rand(n*10+32), rand(n*10+33), rand(n*10+34), rand(n*10+35), rand(n*10+36), rand(n*10+37), rand(n*10+38), rand(n*10+39), + rand(n*10+40), rand(n*10+41), rand(n*10+42), rand(n*10+43), rand(n*10+44), rand(n*10+45), rand(n*10+46), rand(n*10+47), rand(n*10+48), rand(n*10+49), + rand(n*10+50), rand(n*10+51), rand(n*10+52), rand(n*10+53), rand(n*10+54), rand(n*10+55), rand(n*10+56), rand(n*10+57), rand(n*10+58), rand(n*10+59), + rand(n*10+60), rand(n*10+61), rand(n*10+62), rand(n*10+63), rand(n*10+64), rand(n*10+65), rand(n*10+66), rand(n*10+67), rand(n*10+68), rand(n*10+69), + rand(n*10+70), rand(n*10+71), rand(n*10+72), rand(n*10+73), rand(n*10+74), rand(n*10+75), rand(n*10+76), rand(n*10+77), rand(n*10+78), rand(n*10+79), + rand(n*10+80), rand(n*10+81), rand(n*10+82), rand(n*10+83), rand(n*10+84), rand(n*10+85), rand(n*10+86), rand(n*10+87), rand(n*10+88), rand(n*10+89), + rand(n*10+90), rand(n*10+91), rand(n*10+92), rand(n*10+93), rand(n*10+94), rand(n*10+95), rand(n*10+96), rand(n*10+97), rand(n*10+98), rand(n*10+99), + rand(n*10+100), rand(n*10+101), rand(n*10+102), rand(n*10+103), rand(n*10+104), rand(n*10+105), rand(n*10+106), rand(n*10+107), rand(n*10+108), rand(n*10+109), + rand(n*10+110), rand(n*10+111), rand(n*10+112), rand(n*10+113), rand(n*10+114), rand(n*10+115), rand(n*10+116), rand(n*10+117), rand(n*10+118), rand(n*10+119), + rand(n*10+120), rand(n*10+121), rand(n*10+122), rand(n*10+123), rand(n*10+124), rand(n*10+125), rand(n*10+126), rand(n*10+127), rand(n*10+128), rand(n*10+129), + rand(n*10+130), rand(n*10+131), rand(n*10+132), rand(n*10+133), rand(n*10+134), rand(n*10+135), rand(n*10+136), rand(n*10+137), rand(n*10+138), rand(n*10+139), + rand(n*10+140), rand(n*10+141), rand(n*10+142), rand(n*10+143), rand(n*10+144), rand(n*10+145), rand(n*10+146), rand(n*10+147), rand(n*10+148), rand(n*10+149) + ] AS v + FROM system.numbers + LIMIT 10 +); + +WITH (SELECT v FROM vecs_Float32 limit 1) AS a SELECT count(dp) FROM (SELECT dotProduct(a, v) AS dp FROM vecs_Float32); + +WITH + t as (SELECT number + a as x FROM numbers(5)) +SELECT 0 as a, x FROM t +UNION ALL +SELECT 5 as a, x FROM t +ORDER BY a, x +FORMAT Null +SETTINGS enable_analyzer = 1; + +WITH t AS + ( + SELECT number + a AS x + FROM numbers(5) + ) +SELECT * +FROM +( + SELECT + 0 AS a, + x + FROM t + UNION ALL + SELECT + 5 AS a, + x + FROM t +) +ORDER BY + a ASC, + x ASC +SETTINGS enable_analyzer = 1; diff --git a/parser/testdata/03033_create_as_copies_comment/ast.json b/parser/testdata/03033_create_as_copies_comment/ast.json new file mode 100644 index 000000000..92956e048 --- /dev/null +++ b/parser/testdata/03033_create_as_copies_comment/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery base (children 1)" + }, + { + "explain": " Identifier base" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001202469, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/03033_create_as_copies_comment/metadata.json b/parser/testdata/03033_create_as_copies_comment/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03033_create_as_copies_comment/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03033_create_as_copies_comment/query.sql b/parser/testdata/03033_create_as_copies_comment/query.sql new file mode 100644 index 000000000..0de42f6cc --- /dev/null +++ b/parser/testdata/03033_create_as_copies_comment/query.sql @@ -0,0 +1,11 @@ +DROP TABLE IF EXISTS base; +DROP TABLE IF EXISTS copy_without_comment; +DROP TABLE IF EXISTS copy_with_comment; + +CREATE TABLE base (a Int32) ENGINE = TinyLog COMMENT 'original comment'; +CREATE TABLE copy_without_comment AS base; +CREATE TABLE copy_with_comment AS base COMMENT 'new comment'; + +SELECT comment FROM system.tables WHERE database = currentDatabase() AND name = 'base'; +SELECT comment FROM system.tables WHERE database = currentDatabase() AND name = 'copy_without_comment'; +SELECT comment FROM system.tables WHERE database = currentDatabase() AND name = 'copy_with_comment'; \ No newline at end of file diff --git a/parser/testdata/03033_cte_numbers_memory/ast.json b/parser/testdata/03033_cte_numbers_memory/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03033_cte_numbers_memory/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03033_cte_numbers_memory/metadata.json b/parser/testdata/03033_cte_numbers_memory/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03033_cte_numbers_memory/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03033_cte_numbers_memory/query.sql b/parser/testdata/03033_cte_numbers_memory/query.sql new file mode 100644 index 000000000..b362f42f8 --- /dev/null +++ b/parser/testdata/03033_cte_numbers_memory/query.sql @@ -0,0 +1,16 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/61238 +SET enable_analyzer=1; + +WITH +(SELECT number FROM system.numbers LIMIT 1) as w1, +(SELECT number FROM system.numbers LIMIT 1) as w2, +(SELECT number FROM system.numbers LIMIT 1) as w3, +(SELECT number FROM system.numbers LIMIT 1) as w4, +(SELECT number FROM system.numbers LIMIT 1) as w5, +(SELECT number FROM system.numbers LIMIT 1) as w6 +SELECT number FROM ( + SELECT number FROM system.numbers LIMIT 10 + UNION ALL + SELECT number FROM system.numbers LIMIT 10 +) +WHERE number < 5; diff --git a/parser/testdata/03033_dist_settings.optimize_skip_unused_shards_rewrite_in_composite_sharding_key/ast.json b/parser/testdata/03033_dist_settings.optimize_skip_unused_shards_rewrite_in_composite_sharding_key/ast.json new file mode 100644 index 000000000..a8868f319 --- /dev/null +++ b/parser/testdata/03033_dist_settings.optimize_skip_unused_shards_rewrite_in_composite_sharding_key/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000996486, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/03033_dist_settings.optimize_skip_unused_shards_rewrite_in_composite_sharding_key/metadata.json b/parser/testdata/03033_dist_settings.optimize_skip_unused_shards_rewrite_in_composite_sharding_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03033_dist_settings.optimize_skip_unused_shards_rewrite_in_composite_sharding_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03033_dist_settings.optimize_skip_unused_shards_rewrite_in_composite_sharding_key/query.sql b/parser/testdata/03033_dist_settings.optimize_skip_unused_shards_rewrite_in_composite_sharding_key/query.sql new file mode 100644 index 000000000..b68fc2772 --- /dev/null +++ b/parser/testdata/03033_dist_settings.optimize_skip_unused_shards_rewrite_in_composite_sharding_key/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS t; +DROP TABLE IF EXISTS dt; + +CREATE TABLE t (tag_id UInt64, tag_name String) ENGINE = MergeTree ORDER BY tuple(); +CREATE TABLE dt AS t ENGINE = Distributed('test_cluster_two_shards_localhost', currentDatabase(), 't', cityHash64(concat(tag_id, tag_name))); + +INSERT INTO dt SETTINGS distributed_foreground_insert=1 VALUES (1, 'foo1'); -- shard0 +INSERT INTO dt SETTINGS distributed_foreground_insert=1 VALUES (1, 'foo2'); -- shard1 + +SET optimize_skip_unused_shards=1, optimize_skip_unused_shards_rewrite_in=1; +-- { echoOn } +SELECT shardNum(), count() FROM dt WHERE (tag_id, tag_name) IN ((1, 'foo1'), (1, 'foo2')) GROUP BY 1 ORDER BY 1; +SELECT shardNum(), count() FROM dt WHERE tag_id IN (1, 1) AND tag_name IN ('foo1', 'foo2') GROUP BY 1 ORDER BY 1; +SELECT shardNum(), count() FROM dt WHERE tag_id = 1 AND tag_name IN ('foo1', 'foo2') GROUP BY 1 ORDER BY 1; diff --git a/parser/testdata/03033_distinct_transform_const_columns/ast.json b/parser/testdata/03033_distinct_transform_const_columns/ast.json new file mode 100644 index 000000000..7e455ce8e --- /dev/null +++ b/parser/testdata/03033_distinct_transform_const_columns/ast.json @@ -0,0 +1,256 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function COALESCE (children 1)" + }, + { + "explain": " ExpressionList (children 13)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function COALESCE (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal ''" + }, + { + "explain": " Function toNullable (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'b3'" + }, + { + "explain": " Function toUInt128 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toNullable (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function toLowCardinality (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function toUInt128 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toNullable (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function toUInt128 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function COALESCE (children 1)" + }, + { + "explain": " ExpressionList (children 13)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function COALESCE (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function COALESCE (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal ''" + }, + { + "explain": " Literal 'b3'" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function toLowCardinality (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function toUInt128 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toUInt256 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_100000" + } + ], + + "rows": 78, + + "statistics": + { + "elapsed": 0.001247639, + "rows_read": 78, + "bytes_read": 3056 + } +} diff --git a/parser/testdata/03033_distinct_transform_const_columns/metadata.json b/parser/testdata/03033_distinct_transform_const_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03033_distinct_transform_const_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03033_distinct_transform_const_columns/query.sql b/parser/testdata/03033_distinct_transform_const_columns/query.sql new file mode 100644 index 000000000..41df19ab6 --- /dev/null +++ b/parser/testdata/03033_distinct_transform_const_columns/query.sql @@ -0,0 +1 @@ +SELECT DISTINCT COALESCE(COALESCE('') = toNullable('b3'), toUInt128(toNullable(2)), 2, 2, toLowCardinality(2), 2, 2, 2, toUInt128(toNullable(2)), materialize(2), toUInt128(2), 2, 2), COALESCE(COALESCE(COALESCE(materialize(''))) = 'b3', 2, 2, 2, toLowCardinality(2), toUInt128(2), 2, 2, 2, materialize(toUInt256(2)), 2, 2, 2) FROM numbers(100000); diff --git a/parser/testdata/03033_dynamic_text_serialization/ast.json b/parser/testdata/03033_dynamic_text_serialization/ast.json new file mode 100644 index 000000000..51f552e87 --- /dev/null +++ b/parser/testdata/03033_dynamic_text_serialization/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00147302, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03033_dynamic_text_serialization/metadata.json b/parser/testdata/03033_dynamic_text_serialization/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03033_dynamic_text_serialization/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03033_dynamic_text_serialization/query.sql b/parser/testdata/03033_dynamic_text_serialization/query.sql new file mode 100644 index 000000000..e11a4000f --- /dev/null +++ b/parser/testdata/03033_dynamic_text_serialization/query.sql @@ -0,0 +1,75 @@ +set allow_experimental_dynamic_type = 1; +set input_format_json_infer_array_of_dynamic_from_array_of_different_types=0; + +select 'JSON'; +select d, dynamicType(d) from format(JSONEachRow, 'd Dynamic', $$ +{"d" : 42} +{"d" : 42.42} +{"d" : "str"} +{"d" : [1, 2, 3]} +{"d" : "2020-01-01"} +{"d" : "2020-01-01 10:00:00"} +{"d" : {"a" : 42, "b" : "str"}} +{"d" : {"a" : 43}} +{"d" : {"a" : 44, "c" : [1, 2, 3]}} +{"d" : [1, "str", [1, 2, 3]]} +{"d" : null} +{"d" : true} +$$) format JSONEachRow; + +select d, dynamicType(d), isDynamicElementInSharedData(d) from format(JSONEachRow, 'd Dynamic(max_types=2)', $$ +{"d" : 42} +{"d" : 42.42} +{"d" : "str"} +{"d" : null} +{"d" : true} +$$) format JSONEachRow; + +select 'CSV'; +select d, dynamicType(d) from format(CSV, 'd Dynamic', +$$42 +42.42 +"str" +"[1, 2, 3]" +"2020-01-01" +"2020-01-01 10:00:00" +"[1, 'str', [1, 2, 3]]" +\N +true +$$) format CSV; + +select 'TSV'; +select d, dynamicType(d) from format(TSV, 'd Dynamic', +$$42 +42.42 +str +[1, 2, 3] +2020-01-01 +2020-01-01 10:00:00 +[1, 'str', [1, 2, 3]] +\N +true +$$) format TSV; + +select 'Values'; +select d, dynamicType(d) from format(Values, 'd Dynamic', $$ +(42) +(42.42) +('str') +([1, 2, 3]) +('2020-01-01') +('2020-01-01 10:00:00') +(NULL) +(true) +$$) format Values; +select ''; + +select 'Cast using parsing'; +drop table if exists test; +create table test (s String) engine=Memory; +insert into test values ('42'), ('42.42'), ('[1, 2, 3]'), ('2020-01-01'), ('2020-01-01 10:00:00'), ('NULL'), ('true'); +set cast_string_to_dynamic_use_inference=1; +select s::Dynamic as d, dynamicType(d) from test; +select s::Dynamic(max_types=3) as d, dynamicType(d), isDynamicElementInSharedData(d) from test; +drop table test; + diff --git a/parser/testdata/03033_final_undefined_last_mark/ast.json b/parser/testdata/03033_final_undefined_last_mark/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03033_final_undefined_last_mark/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03033_final_undefined_last_mark/metadata.json b/parser/testdata/03033_final_undefined_last_mark/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03033_final_undefined_last_mark/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03033_final_undefined_last_mark/query.sql b/parser/testdata/03033_final_undefined_last_mark/query.sql new file mode 100644 index 000000000..25a30a365 --- /dev/null +++ b/parser/testdata/03033_final_undefined_last_mark/query.sql @@ -0,0 +1,23 @@ +-- Tags: no-random-settings, no-random-merge-tree-settings + +DROP TABLE IF EXISTS account_test; + +CREATE TABLE account_test +( + `id` UInt64, + `row_ver` UInt64, +) +ENGINE = ReplacingMergeTree(row_ver) +ORDER BY id +SETTINGS index_granularity = 16, index_granularity_bytes = 0, + min_rows_for_wide_part = 0, min_bytes_for_wide_part = 0, + min_rows_for_compact_part = 0, min_bytes_for_compact_part = 0; + +SYSTEM STOP MERGES account_test; + +INSERT INTO account_test VALUES (11338881281426660955,717769962224129342),(12484100559155738267,7950971667203174918),(7603729260199571867,3255798127676911942),(7023543111808724827,911615979861855126),(10293135086416484571,3264379259750736572),(15561193439904316763,8419819469587131454),(17632407413882870235,7252071832370181502),(17009726455991851227,7525297506591593939),(12392078953873778779,8473049173389293961),(15283366022689446555,11692491360262171467),(9087459014730986523,2783662960221838603),(293823584550906267,4847630088179732782),(15693186194430465755,8163804880526285623),(7353080168325584795,17315892478487497859),(5980311238303466523,6943353798059390089),(14242621660019578011,8684624667957352769),(8241843507567433563,15731952080102886438); +INSERT INTO account_test VALUES (11338881281426660955, 14765404159170880511); + +SELECT 'Disabled', * FROM account_test FINAL WHERE id = 11338881281426660955 SETTINGS split_parts_ranges_into_intersecting_and_non_intersecting_final = 0; +SELECT 'Enabled', * FROM account_test FINAL WHERE id = 11338881281426660955 SETTINGS split_parts_ranges_into_intersecting_and_non_intersecting_final = 1; + diff --git a/parser/testdata/03033_from_unixtimestamp_joda_by_int64/ast.json b/parser/testdata/03033_from_unixtimestamp_joda_by_int64/ast.json new file mode 100644 index 000000000..d3125dafa --- /dev/null +++ b/parser/testdata/03033_from_unixtimestamp_joda_by_int64/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function fromUnixTimestampInJodaSyntax (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal UInt64_10262736196" + }, + { + "explain": " Literal 'YYYY-MM-dd HH:mm:ss'" + }, + { + "explain": " Literal 'Asia\/Shanghai'" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001062985, + "rows_read": 9, + "bytes_read": 372 + } +} diff --git a/parser/testdata/03033_from_unixtimestamp_joda_by_int64/metadata.json b/parser/testdata/03033_from_unixtimestamp_joda_by_int64/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03033_from_unixtimestamp_joda_by_int64/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03033_from_unixtimestamp_joda_by_int64/query.sql b/parser/testdata/03033_from_unixtimestamp_joda_by_int64/query.sql new file mode 100644 index 000000000..69d898d30 --- /dev/null +++ b/parser/testdata/03033_from_unixtimestamp_joda_by_int64/query.sql @@ -0,0 +1 @@ +select fromUnixTimestampInJodaSyntax(10262736196, 'YYYY-MM-dd HH:mm:ss', 'Asia/Shanghai'); \ No newline at end of file diff --git a/parser/testdata/03033_index_definition_sql_udf_bug/ast.json b/parser/testdata/03033_index_definition_sql_udf_bug/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03033_index_definition_sql_udf_bug/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03033_index_definition_sql_udf_bug/metadata.json b/parser/testdata/03033_index_definition_sql_udf_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03033_index_definition_sql_udf_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03033_index_definition_sql_udf_bug/query.sql b/parser/testdata/03033_index_definition_sql_udf_bug/query.sql new file mode 100644 index 000000000..84ab1d33c --- /dev/null +++ b/parser/testdata/03033_index_definition_sql_udf_bug/query.sql @@ -0,0 +1,21 @@ +-- Tags: no-parallel + +DROP FUNCTION IF EXISTS test_func_1; +CREATE FUNCTION test_func_1 AS (a, b, c) -> ((a + b) + c); + +DROP TABLE IF EXISTS t4_2; +CREATE TABLE t4_2 +( + `col1` Int64 NOT NULL COMMENT 'test', + `col2` Float64 NOT NULL, + `col3` Int64 NOT NULL, + INDEX ind4 test_func_1(col1, col3, col1) TYPE set(51) GRANULARITY 5 +) +ENGINE = MergeTree +ORDER BY col1 +; + +INSERT INTO t4_2 (col1, col2, col3) SELECT number, number, number FROM numbers(10); + +SELECT * FROM t4_2 WHERE test_func_1(col1, col3, col1) = 6 +SETTINGS force_data_skipping_indices = 'ind4'; diff --git a/parser/testdata/03033_lightweight_deletes_sync/ast.json b/parser/testdata/03033_lightweight_deletes_sync/ast.json new file mode 100644 index 000000000..e84f45b97 --- /dev/null +++ b/parser/testdata/03033_lightweight_deletes_sync/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_lightweight_deletes (children 1)" + }, + { + "explain": " Identifier t_lightweight_deletes" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001663277, + "rows_read": 2, + "bytes_read": 94 + } +} diff --git a/parser/testdata/03033_lightweight_deletes_sync/metadata.json b/parser/testdata/03033_lightweight_deletes_sync/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03033_lightweight_deletes_sync/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03033_lightweight_deletes_sync/query.sql b/parser/testdata/03033_lightweight_deletes_sync/query.sql new file mode 100644 index 000000000..bb4bb6dfa --- /dev/null +++ b/parser/testdata/03033_lightweight_deletes_sync/query.sql @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS t_lightweight_deletes; + +CREATE TABLE t_lightweight_deletes (a UInt64) ENGINE = MergeTree ORDER BY a; + +INSERT INTO t_lightweight_deletes VALUES (1) (2) (3); + +DELETE FROM t_lightweight_deletes WHERE a = 1 SETTINGS lightweight_deletes_sync = 2; + +SELECT count() FROM t_lightweight_deletes; +SELECT count() FROM system.mutations WHERE database = currentDatabase() AND table = 't_lightweight_deletes' AND NOT is_done; + +SYSTEM STOP MERGES t_lightweight_deletes; +DELETE FROM t_lightweight_deletes WHERE a = 2 SETTINGS lightweight_deletes_sync = 0; + +SELECT count() FROM t_lightweight_deletes; +SELECT count() FROM system.mutations WHERE database = currentDatabase() AND table = 't_lightweight_deletes' AND NOT is_done; + +DROP TABLE t_lightweight_deletes; diff --git a/parser/testdata/03033_parts_splitter_bug_and_index_loading/ast.json b/parser/testdata/03033_parts_splitter_bug_and_index_loading/ast.json new file mode 100644 index 000000000..3cc233bcb --- /dev/null +++ b/parser/testdata/03033_parts_splitter_bug_and_index_loading/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery t (children 3)" + }, + { + "explain": " Identifier t" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration a (children 1)" + }, + { + "explain": " DataType UInt32" + }, + { + "explain": " ColumnDeclaration b (children 1)" + }, + { + "explain": " DataType UInt32" + }, + { + "explain": " Storage definition (children 3)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier a" + }, + { + "explain": " Identifier b" + }, + { + "explain": " Set" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001259852, + "rows_read": 15, + "bytes_read": 476 + } +} diff --git a/parser/testdata/03033_parts_splitter_bug_and_index_loading/metadata.json b/parser/testdata/03033_parts_splitter_bug_and_index_loading/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03033_parts_splitter_bug_and_index_loading/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03033_parts_splitter_bug_and_index_loading/query.sql b/parser/testdata/03033_parts_splitter_bug_and_index_loading/query.sql new file mode 100644 index 000000000..25ec1c8fd --- /dev/null +++ b/parser/testdata/03033_parts_splitter_bug_and_index_loading/query.sql @@ -0,0 +1,19 @@ +create table t(a UInt32, b UInt32) engine=MergeTree order by (a, b) settings index_granularity=1; + +system stop merges t; + +-- for this part the first columns is useless, so we have to use both +insert into t select 42, number from numbers_mt(100); + +-- for this part the first columns is enough +insert into t select number, number from numbers_mt(100); + +-- force reloading index +detach table t; +attach table t; + +set merge_tree_min_bytes_for_concurrent_read=1, merge_tree_min_rows_for_concurrent_read=1, merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability=1.0, max_threads=4; + +-- the bug happened when we used (a, b) index values for one part and only (a) for another in PartsSplitter. even a simple count query is enough, +-- because some granules were assinged to wrong layers and hence not returned from the reading step (because they were filtered out by `FilterSortedStreamByRange`) +select count() from t where not ignore(*); diff --git a/parser/testdata/03033_recursive_cte_basic/ast.json b/parser/testdata/03033_recursive_cte_basic/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03033_recursive_cte_basic/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03033_recursive_cte_basic/metadata.json b/parser/testdata/03033_recursive_cte_basic/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03033_recursive_cte_basic/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03033_recursive_cte_basic/query.sql b/parser/testdata/03033_recursive_cte_basic/query.sql new file mode 100644 index 000000000..63014e9cc --- /dev/null +++ b/parser/testdata/03033_recursive_cte_basic/query.sql @@ -0,0 +1,43 @@ +-- { echoOn } + +SET enable_analyzer = 1; + +WITH RECURSIVE recursive_cte AS (SELECT 1 AS n UNION ALL SELECT n + 1 FROM recursive_cte WHERE n < 10) +SELECT n FROM recursive_cte; + +SELECT '--'; + +WITH RECURSIVE recursive_cte AS (SELECT toUInt8(1) AS n UNION ALL SELECT toUInt8(n + 1) FROM recursive_cte WHERE n < 10) +SELECT n FROM recursive_cte; + +SELECT '--'; + +WITH RECURSIVE recursive_cte AS (SELECT toUInt16(1) AS n UNION ALL SELECT toUInt8(n + 1) FROM recursive_cte WHERE n < 10) +SELECT n FROM recursive_cte; + +SELECT '--'; + +WITH RECURSIVE recursive_cte AS (SELECT materialize(toUInt16(1)) AS n UNION ALL SELECT toUInt8(n + 1) FROM recursive_cte WHERE n < 10) +SELECT n FROM recursive_cte; + +SELECT '--'; + +WITH RECURSIVE recursive_cte AS (SELECT toUInt16(1) AS n UNION ALL SELECT materialize(toUInt8(n + 1)) FROM recursive_cte WHERE n < 10) +SELECT n FROM recursive_cte; + +SELECT '--'; + +WITH RECURSIVE recursive_cte AS (SELECT toUInt16(1) AS n, '1' AS concat UNION ALL SELECT materialize(toUInt8(n + 1)), concat || toString(n + 1) FROM recursive_cte WHERE n < 10) +SELECT n, concat FROM recursive_cte; + +SELECT '--'; + +WITH RECURSIVE recursive_cte AS (SELECT 1 AS n UNION ALL SELECT n + 1 FROM recursive_cte) +SELECT n FROM recursive_cte LIMIT 5; + +SELECT '--'; + +WITH RECURSIVE recursive_cte AS (SELECT materialize(toUInt8(1)) AS n UNION ALL SELECT materialize(toUInt8(n + 1)) FROM recursive_cte WHERE n < 10) +SELECT n FROM recursive_cte FORMAT Null SETTINGS max_recursive_cte_evaluation_depth = 5; -- { serverError TOO_DEEP_RECURSION } + +-- { echoOff } diff --git a/parser/testdata/03033_scalars_context_data_race/ast.json b/parser/testdata/03033_scalars_context_data_race/ast.json new file mode 100644 index 000000000..e9c28886a --- /dev/null +++ b/parser/testdata/03033_scalars_context_data_race/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001131487, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/03033_scalars_context_data_race/metadata.json b/parser/testdata/03033_scalars_context_data_race/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03033_scalars_context_data_race/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03033_scalars_context_data_race/query.sql b/parser/testdata/03033_scalars_context_data_race/query.sql new file mode 100644 index 000000000..8c72bb53c --- /dev/null +++ b/parser/testdata/03033_scalars_context_data_race/query.sql @@ -0,0 +1,104 @@ +DROP TABLE IF EXISTS test; +DROP TABLE IF EXISTS test_tmp; +DROP TABLE IF EXISTS dst; +DROP TABLE IF EXISTS view; + +CREATE TABLE test +( + `address` FixedString(20), + `deployer` FixedString(20), + `block_number` UInt256, + `block_hash` FixedString(32), + `block_timestamp` DateTime('UTC'), + `insertion_time` DateTime('UTC') +) +ENGINE = MergeTree +ORDER BY address +SETTINGS index_granularity = 8192; + +CREATE TABLE test_tmp as test; + +CREATE TABLE dst +( + `block_timestamp` AggregateFunction(max, Nullable(DateTime('UTC'))), + `block_hash` AggregateFunction(argMax, Nullable(FixedString(32)), DateTime('UTC')), + `block_number` AggregateFunction(argMax, Nullable(UInt256), DateTime('UTC')), + `deployer` AggregateFunction(argMax, Nullable(FixedString(20)), DateTime('UTC')), + `address` FixedString(20), + `name` AggregateFunction(argMax, Nullable(String), DateTime('UTC')), + `symbol` AggregateFunction(argMax, Nullable(String), DateTime('UTC')), + `decimals` AggregateFunction(argMax, Nullable(UInt8), DateTime('UTC')), + `is_proxy` AggregateFunction(argMax, Nullable(Bool), DateTime('UTC')), + `blacklist_flags` AggregateFunction(argMax, Array(Nullable(String)), DateTime('UTC')), + `whitelist_flags` AggregateFunction(argMax, Array(Nullable(String)), DateTime('UTC')), + `detected_standards` AggregateFunction(argMax, Array(Nullable(String)), DateTime('UTC')), + `amended_type` AggregateFunction(argMax, Nullable(String), DateTime('UTC')), + `comment` AggregateFunction(argMax, Nullable(String), DateTime('UTC')), + `_sources` AggregateFunction(groupUniqArray, String), + `_updated_at` AggregateFunction(max, DateTime('UTC')), + `_active` AggregateFunction(argMax, Bool, DateTime('UTC')) +) +ENGINE = MergeTree +ORDER BY address +SETTINGS index_granularity = 8192; + +CREATE MATERIALIZED VIEW view TO dst +( + `block_timestamp` AggregateFunction(max, Nullable(DateTime('UTC'))), + `block_hash` AggregateFunction(argMax, Nullable(FixedString(32)), DateTime('UTC')), + `block_number` AggregateFunction(argMax, Nullable(UInt256), DateTime('UTC')), + `deployer` AggregateFunction(argMax, Nullable(FixedString(20)), DateTime('UTC')), + `address` FixedString(20), + `name` AggregateFunction(argMax, Nullable(String), DateTime('UTC')), + `symbol` AggregateFunction(argMax, Nullable(String), DateTime('UTC')), + `decimals` AggregateFunction(argMax, Nullable(UInt8), DateTime('UTC')), + `is_proxy` AggregateFunction(argMax, Nullable(Bool), DateTime('UTC')), + `blacklist_flags` AggregateFunction(argMax, Array(Nullable(String)), DateTime('UTC')), + `whitelist_flags` AggregateFunction(argMax, Array(Nullable(String)), DateTime('UTC')), + `detected_standards` AggregateFunction(argMax, Array(Nullable(String)), DateTime('UTC')), + `amended_type` AggregateFunction(argMax, Nullable(String), DateTime('UTC')), + `comment` AggregateFunction(argMax, Nullable(String), DateTime('UTC')), + `_sources` AggregateFunction(groupUniqArray, String), + `_updated_at` AggregateFunction(max, DateTime('UTC')), + `_active` AggregateFunction(argMax, Bool, DateTime('UTC')) +) AS +(WITH ( + SELECT toDateTime('1970-01-01 00:00:00') + ) AS default_timestamp +SELECT + maxState(CAST(block_timestamp, 'Nullable(DateTime(\'UTC\'))')) AS block_timestamp, + argMaxState(CAST(block_hash, 'Nullable(FixedString(32))'), insertion_time) AS block_hash, + argMaxState(CAST(block_number, 'Nullable(UInt256)'), insertion_time) AS block_number, + argMaxState(CAST(deployer, 'Nullable(FixedString(20))'), insertion_time) AS deployer, + address, + argMaxState(CAST(NULL, 'Nullable(String)'), CAST(default_timestamp, 'DateTime(\'UTC\')')) AS name, + argMaxState(CAST(NULL, 'Nullable(String)'), CAST(default_timestamp, 'DateTime(\'UTC\')')) AS symbol, + argMaxState(CAST(NULL, 'Nullable(UInt8)'), CAST(default_timestamp, 'DateTime(\'UTC\')')) AS decimals, + argMaxState(CAST(true, 'Nullable(Boolean)'), insertion_time) AS is_proxy, + argMaxState(CAST('[]', 'Array(Nullable(String))'), CAST(default_timestamp, 'DateTime(\'UTC\')')) AS blacklist_flags, + argMaxState(CAST('[]', 'Array(Nullable(String))'), CAST(default_timestamp, 'DateTime(\'UTC\')')) AS whitelist_flags, + argMaxState(CAST('[]', 'Array(Nullable(String))'), CAST(default_timestamp, 'DateTime(\'UTC\')')) AS detected_standards, + argMaxState(CAST(NULL, 'Nullable(String)'), CAST(default_timestamp, 'DateTime(\'UTC\')')) AS amended_type, + argMaxState(CAST(NULL, 'Nullable(String)'), CAST(default_timestamp, 'DateTime(\'UTC\')')) AS comment, + groupUniqArrayState('tokens_proxy_deployments') AS _sources, + maxState(insertion_time) AS _updated_at, + argMaxState(true, CAST(default_timestamp, 'DateTime(\'UTC\')')) AS _active +FROM test +WHERE insertion_time > toDateTime('2024-03-14 11:38:09') +GROUP BY address); + +set max_insert_threads=4; +insert into test_tmp select * from generateRandom() limit 24; +insert into test_tmp select * from generateRandom() limit 25; +insert into test_tmp select * from generateRandom() limit 26; +insert into test_tmp select * from generateRandom() limit 30; + +INSERT INTO test(address, deployer, block_number, block_hash, block_timestamp, insertion_time) SELECT * FROM test_tmp; + +select count() from test; + +DROP TABLE test; +DROP TABLE test_tmp; +DROP TABLE dst; +DROP TABLE view; + diff --git a/parser/testdata/03033_set_index_in/ast.json b/parser/testdata/03033_set_index_in/ast.json new file mode 100644 index 000000000..9ad8f3032 --- /dev/null +++ b/parser/testdata/03033_set_index_in/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001345662, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03033_set_index_in/metadata.json b/parser/testdata/03033_set_index_in/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03033_set_index_in/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03033_set_index_in/query.sql b/parser/testdata/03033_set_index_in/query.sql new file mode 100644 index 000000000..bc0676fc5 --- /dev/null +++ b/parser/testdata/03033_set_index_in/query.sql @@ -0,0 +1,11 @@ +SET optimize_trivial_insert_select = 1; + +create table a (k UInt64, v UInt64, index i (v) type set(100) granularity 2) engine MergeTree order by k settings index_granularity=8192, index_granularity_bytes=1000000000, min_index_granularity_bytes=0; +insert into a select number, intDiv(number, 4096) from numbers(1000000); +select sum(1+ignore(*)) from a where indexHint(v in (20, 40)); +select sum(1+ignore(*)) from a where indexHint(v in (select 20 union all select 40 union all select 60)); + +SELECT 1 FROM a PREWHERE v IN (SELECT 1) WHERE v IN (SELECT 2); + +select 1 from a where indexHint(indexHint(materialize(0))); +select sum(1+ignore(*)) from a where indexHint(indexHint(v in (20, 40))); diff --git a/parser/testdata/03033_tupleIntXYZ_and_tupleModulo/ast.json b/parser/testdata/03033_tupleIntXYZ_and_tupleModulo/ast.json new file mode 100644 index 000000000..c468aee4a --- /dev/null +++ b/parser/testdata/03033_tupleIntXYZ_and_tupleModulo/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function tupleIntDiv (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Tuple_(UInt64_15, UInt64_10, UInt64_5)" + }, + { + "explain": " Literal Tuple_(UInt64_0, UInt64_0, UInt64_0)" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001591775, + "rows_read": 8, + "bytes_read": 352 + } +} diff --git a/parser/testdata/03033_tupleIntXYZ_and_tupleModulo/metadata.json b/parser/testdata/03033_tupleIntXYZ_and_tupleModulo/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03033_tupleIntXYZ_and_tupleModulo/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03033_tupleIntXYZ_and_tupleModulo/query.sql b/parser/testdata/03033_tupleIntXYZ_and_tupleModulo/query.sql new file mode 100644 index 000000000..2cb7e726a --- /dev/null +++ b/parser/testdata/03033_tupleIntXYZ_and_tupleModulo/query.sql @@ -0,0 +1,13 @@ +SELECT tupleIntDiv((15, 10, 5), (0, 0, 0)); -- { serverError ILLEGAL_DIVISION } +SELECT tupleIntDiv((15, 10, 5), (5, 5, 5)); +SELECT tupleIntDiv((15, 10, 5), (5.5, 5.5, 5.5)); +SELECT tupleIntDivOrZero((5, 10, 15), (0, 0, 0)); -- no error thrown for zero divisors +SELECT tupleIntDivByNumber((15, 10, 5), 0); -- { serverError ILLEGAL_DIVISION } +SELECT tupleIntDivByNumber((15, 10, 5), 5); +SELECT tupleIntDivByNumber((15.2, 10.7, 5.5), 5.8); +SELECT tupleIntDivOrZeroByNumber((15, 10, 5), 5); +SELECT tupleIntDivOrZeroByNumber((15, 10, 5), 0); -- no error thrown for zero divisors +SELECT tupleModulo((15, 10, 5), (0, 3, 2)); -- { serverError ILLEGAL_DIVISION } +SELECT tupleModulo((15, 10, 5), (5, 3, 2)); +SELECT tupleModuloByNumber((15, 10, 5), 0); -- { serverError ILLEGAL_DIVISION } +SELECT tupleModuloByNumber((15, 10, 5), 2); \ No newline at end of file diff --git a/parser/testdata/03033_virtual_column_override/ast.json b/parser/testdata/03033_virtual_column_override/ast.json new file mode 100644 index 000000000..a1f27176d --- /dev/null +++ b/parser/testdata/03033_virtual_column_override/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery override_test (children 1)" + }, + { + "explain": " Identifier override_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001415024, + "rows_read": 2, + "bytes_read": 78 + } +} diff --git a/parser/testdata/03033_virtual_column_override/metadata.json b/parser/testdata/03033_virtual_column_override/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03033_virtual_column_override/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03033_virtual_column_override/query.sql b/parser/testdata/03033_virtual_column_override/query.sql new file mode 100644 index 000000000..49258bbb5 --- /dev/null +++ b/parser/testdata/03033_virtual_column_override/query.sql @@ -0,0 +1,3 @@ +DROP TABLE IF EXISTS override_test; +CREATE TABLE override_test (_part UInt32) ENGINE = MergeTree ORDER BY tuple() AS SELECT 1; +SELECT _part FROM override_test; diff --git a/parser/testdata/03033_with_fill_interpolate/ast.json b/parser/testdata/03033_with_fill_interpolate/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03033_with_fill_interpolate/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03033_with_fill_interpolate/metadata.json b/parser/testdata/03033_with_fill_interpolate/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03033_with_fill_interpolate/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03033_with_fill_interpolate/query.sql b/parser/testdata/03033_with_fill_interpolate/query.sql new file mode 100644 index 000000000..48457341e --- /dev/null +++ b/parser/testdata/03033_with_fill_interpolate/query.sql @@ -0,0 +1,28 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/55794 +SET enable_analyzer=1; +DROP TABLE IF EXISTS 03033_example_table; + +CREATE TABLE 03033_example_table +( + ColumnA Int64, + ColumnB Int64, + ColumnC Int64 +) +ENGINE = MergeTree() +ORDER BY ColumnA; + +WITH +helper AS ( + SELECT + * + FROM + 03033_example_table + ORDER BY + ColumnA WITH FILL INTERPOLATE ( + ColumnB AS ColumnC, + ColumnC AS ColumnA + ) +) +SELECT ColumnB FROM helper; + +DROP TABLE IF EXISTS 03033_example_table; diff --git a/parser/testdata/03034_ddls_and_merges_with_unusual_maps/ast.json b/parser/testdata/03034_ddls_and_merges_with_unusual_maps/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03034_ddls_and_merges_with_unusual_maps/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03034_ddls_and_merges_with_unusual_maps/metadata.json b/parser/testdata/03034_ddls_and_merges_with_unusual_maps/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03034_ddls_and_merges_with_unusual_maps/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03034_ddls_and_merges_with_unusual_maps/query.sql b/parser/testdata/03034_ddls_and_merges_with_unusual_maps/query.sql new file mode 100644 index 000000000..f0187a4a0 --- /dev/null +++ b/parser/testdata/03034_ddls_and_merges_with_unusual_maps/query.sql @@ -0,0 +1,33 @@ +-- Tests maps with "unusual" key types (Float32, Nothing, LowCardinality(String)) + +SET mutations_sync = 2; + +DROP TABLE IF EXISTS tab; + +SELECT 'Map(Nothing, ...) is non-comparable --> not usable as primary key'; +CREATE TABLE tab (m1 Map(String, AggregateFunction(sum, UInt32))) ENGINE = MergeTree ORDER BY m1; -- { serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY } + +SELECT 'But Map(Nothing, ...) can be a non-primary-key, it is quite useless though ...'; +CREATE TABLE tab (m3 Map(Nothing, String)) ENGINE = MergeTree ORDER BY tuple(); +-- INSERT INTO tab VALUES (map('', 'd')); -- { serverError NOT_IMPLEMENTED } -- The client can't serialize the data and fails. The query + -- doesn't reach the server and we can't check via 'serverError' :-/ +DROP TABLE tab; + +SELECT 'Map(Float32, ...) and Map(LC(String)) are okay as primary key'; +CREATE TABLE tab (m1 Map(Float32, String), m2 Map(LowCardinality(String), String)) ENGINE = MergeTree ORDER BY (m1, m2); +INSERT INTO tab VALUES (map(1.0, 'a'), map('b', 'b')); +INSERT INTO tab VALUES (map(2.0, 'aa'), map('bb', 'bb')); + +-- Test merge +OPTIMIZE TABLE tab FINAL; +SELECT * FROM tab ORDER BY m1, m2; + +DROP TABLE tab; + +SELECT 'Map(Float32, ...) and Map(LC(String)) as non-primary-key'; +CREATE TABLE tab (m1 Map(Float32, String), m2 Map(LowCardinality(String), String)) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO tab VALUES (map(1.0, 'a'), map('b', 'b')), (map(2.0, 'aa'), map('bb', 'bb')); +ALTER TABLE tab UPDATE m1 = map(3.0, 'aaa') WHERE m1 = map(2.0, 'aa'); +SELECT * FROM tab ORDER BY m1, m2; + +DROP TABLE tab; diff --git a/parser/testdata/03034_dynamic_conversions/ast.json b/parser/testdata/03034_dynamic_conversions/ast.json new file mode 100644 index 000000000..80f8b5365 --- /dev/null +++ b/parser/testdata/03034_dynamic_conversions/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001549108, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03034_dynamic_conversions/metadata.json b/parser/testdata/03034_dynamic_conversions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03034_dynamic_conversions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03034_dynamic_conversions/query.sql b/parser/testdata/03034_dynamic_conversions/query.sql new file mode 100644 index 000000000..c0b470f29 --- /dev/null +++ b/parser/testdata/03034_dynamic_conversions/query.sql @@ -0,0 +1,34 @@ +set allow_experimental_dynamic_type=1; +set allow_experimental_variant_type=1; +set use_variant_as_common_type=1; + +select number::Dynamic as d, dynamicType(d) from numbers(3); +select number::Dynamic(max_types=0) as d, dynamicType(d) from numbers(3); +select number::Dynamic::UInt64 as v from numbers(3); +select number::Dynamic::String as v from numbers(3); +select number::Dynamic::Date as v from numbers(3); +select number::Dynamic::Array(UInt64) as v from numbers(3); -- {serverError TYPE_MISMATCH} +select number::Dynamic::Variant(UInt64, String) as v, variantType(v) from numbers(3); +select (number % 2 ? NULL : number)::Dynamic as d, dynamicType(d) from numbers(3); + +select multiIf(number % 4 == 0, number, number % 4 == 1, 'str_' || toString(number), number % 4 == 2, range(number), NULL)::Dynamic as d, dynamicType(d) from numbers(6); +select multiIf(number % 4 == 0, number, number % 4 == 1, 'str_' || toString(number), number % 4 == 2, range(number), NULL)::Dynamic(max_types=0) as d, dynamicType(d) from numbers(6); +select multiIf(number % 4 == 0, number, number % 4 == 1, 'str_' || toString(number), number % 4 == 2, range(number), NULL)::Dynamic(max_types=1) as d, dynamicType(d) from numbers(6); +select multiIf(number % 4 == 0, number, number % 4 == 1, 'str_' || toString(number), number % 4 == 2, range(number), NULL)::Dynamic(max_types=2) as d, dynamicType(d) from numbers(6); + +select number::Dynamic(max_types=2)::Dynamic(max_types=3) as d from numbers(3); +select number::Dynamic(max_types=2)::Dynamic(max_types=1) as d from numbers(3); +select multiIf(number % 4 == 0, number, number % 4 == 1, 'str_' || toString(number), number % 4 == 2, range(number), NULL)::Dynamic(max_types=2)::Dynamic(max_types=1) as d, dynamicType(d) from numbers(6); +select multiIf(number % 4 == 0, number, number % 4 == 1, toDate(number), number % 4 == 2, range(number), NULL)::Dynamic(max_types=4)::Dynamic(max_types=3) as d, dynamicType(d) from numbers(6); + + +create table test (d Dynamic) engine = Memory; +insert into test values (NULL), (42), ('42.42'), (true), ('e10'); +select d::Float64 from test; +select d::Nullable(Float64) from test; +select d::String from test; +select d::Nullable(String) from test; +select d::UInt64 from test; -- {serverError CANNOT_PARSE_TEXT} +select d::Nullable(UInt64) from test; +select d::Date from test; -- {serverError CANNOT_PARSE_DATE} + diff --git a/parser/testdata/03034_json_extract_variant/ast.json b/parser/testdata/03034_json_extract_variant/ast.json new file mode 100644 index 000000000..3189fdfa9 --- /dev/null +++ b/parser/testdata/03034_json_extract_variant/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function JSONExtract (alias v) (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal '{\"a\" : 42}'" + }, + { + "explain": " Literal 'a'" + }, + { + "explain": " Literal 'Variant(String, UInt32)'" + }, + { + "explain": " Function variantType (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier v" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001309906, + "rows_read": 12, + "bytes_read": 461 + } +} diff --git a/parser/testdata/03034_json_extract_variant/metadata.json b/parser/testdata/03034_json_extract_variant/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03034_json_extract_variant/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03034_json_extract_variant/query.sql b/parser/testdata/03034_json_extract_variant/query.sql new file mode 100644 index 000000000..54d5bed95 --- /dev/null +++ b/parser/testdata/03034_json_extract_variant/query.sql @@ -0,0 +1,6 @@ +select JSONExtract('{"a" : 42}', 'a', 'Variant(String, UInt32)') as v, variantType(v); +select JSONExtract('{"a" : "Hello"}', 'a', 'Variant(String, UInt32)') as v, variantType(v); +select JSONExtract('{"a" : [1, 2, 3]}', 'a', 'Variant(String, Array(UInt32))') as v, variantType(v); +select JSONExtract('{"obj" : {"a" : 42, "b" : "Hello", "c" : [1,2,3]}}', 'obj', 'Map(String, Variant(UInt32, String, Array(UInt32)))'); +select JSONExtractKeysAndValues('{"a" : 42, "b" : "Hello", "c" : [1,2,3]}', 'Variant(UInt32, String, Array(UInt32))') as v, toTypeName(v); + diff --git a/parser/testdata/03034_normalized_ast/ast.json b/parser/testdata/03034_normalized_ast/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03034_normalized_ast/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03034_normalized_ast/metadata.json b/parser/testdata/03034_normalized_ast/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03034_normalized_ast/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03034_normalized_ast/query.sql b/parser/testdata/03034_normalized_ast/query.sql new file mode 100644 index 000000000..8b518d6d1 --- /dev/null +++ b/parser/testdata/03034_normalized_ast/query.sql @@ -0,0 +1,9 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/49472 +SET enable_analyzer=1; +SELECT + concat(database, table) AS name, + count() +FROM clusterAllReplicas(test_shard_localhost, system.tables) +WHERE database=currentDatabase() +GROUP BY name +FORMAT Null; diff --git a/parser/testdata/03034_recursive_cte_tree/ast.json b/parser/testdata/03034_recursive_cte_tree/ast.json new file mode 100644 index 000000000..b4c07190d --- /dev/null +++ b/parser/testdata/03034_recursive_cte_tree/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001787314, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03034_recursive_cte_tree/metadata.json b/parser/testdata/03034_recursive_cte_tree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03034_recursive_cte_tree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03034_recursive_cte_tree/query.sql b/parser/testdata/03034_recursive_cte_tree/query.sql new file mode 100644 index 000000000..fa6229893 --- /dev/null +++ b/parser/testdata/03034_recursive_cte_tree/query.sql @@ -0,0 +1,37 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS tree; +CREATE TABLE tree +( + id UInt64, + link Nullable(UInt64), + data String +) ENGINE=TinyLog; + +INSERT INTO tree VALUES (0, NULL, 'ROOT'), (1, 0, 'Child_1'), (2, 0, 'Child_2'), (3, 1, 'Child_1_1'); + +WITH RECURSIVE search_tree AS ( + SELECT id, link, data + FROM tree t + WHERE t.id = 0 + UNION ALL + SELECT t.id, t.link, t.data + FROM tree t, search_tree st + WHERE t.link = st.id +) +SELECT * FROM search_tree; + +SELECT '--'; + +WITH RECURSIVE search_tree AS ( + SELECT id, link, data, [t.id] AS path + FROM tree t + WHERE t.id = 0 + UNION ALL + SELECT t.id, t.link, t.data, arrayConcat(path, [t.id]) + FROM tree t, search_tree st + WHERE t.link = st.id +) +SELECT * FROM search_tree; + +DROP TABLE tree; diff --git a/parser/testdata/03034_recursive_cte_tree_fuzz_crash_fix/ast.json b/parser/testdata/03034_recursive_cte_tree_fuzz_crash_fix/ast.json new file mode 100644 index 000000000..adbbdaa85 --- /dev/null +++ b/parser/testdata/03034_recursive_cte_tree_fuzz_crash_fix/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001373721, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03034_recursive_cte_tree_fuzz_crash_fix/metadata.json b/parser/testdata/03034_recursive_cte_tree_fuzz_crash_fix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03034_recursive_cte_tree_fuzz_crash_fix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03034_recursive_cte_tree_fuzz_crash_fix/query.sql b/parser/testdata/03034_recursive_cte_tree_fuzz_crash_fix/query.sql new file mode 100644 index 000000000..1e26c5376 --- /dev/null +++ b/parser/testdata/03034_recursive_cte_tree_fuzz_crash_fix/query.sql @@ -0,0 +1,50 @@ +SET enable_analyzer = 1; +SET enable_global_with_statement=1; +SET session_timezone = 'Etc/UTC'; + +DROP TABLE IF EXISTS department__fuzz_1; +CREATE TABLE department__fuzz_1 (`id` DateTime, `parent_department` UInt128, `name` String) ENGINE = TinyLog; + +INSERT INTO department__fuzz_1 VALUES (0, NULL, 'ROOT'); +INSERT INTO department__fuzz_1 VALUES (1, 0, 'A'); +INSERT INTO department__fuzz_1 VALUES (2, 1, 'B'); +INSERT INTO department__fuzz_1 VALUES (3, 2, 'C'); +INSERT INTO department__fuzz_1 VALUES (4, 2, 'D'); +INSERT INTO department__fuzz_1 VALUES (5, 0, 'E'); +INSERT INTO department__fuzz_1 VALUES (6, 4, 'F'); +INSERT INTO department__fuzz_1 VALUES (7, 5, 'G'); + +DROP TABLE IF EXISTS department__fuzz_3; +CREATE TABLE department__fuzz_3 (`id` Date, `parent_department` UInt128, `name` LowCardinality(String)) ENGINE = TinyLog; + +INSERT INTO department__fuzz_3 VALUES (0, NULL, 'ROOT'); +INSERT INTO department__fuzz_3 VALUES (1, 0, 'A'); +INSERT INTO department__fuzz_3 VALUES (2, 1, 'B'); +INSERT INTO department__fuzz_3 VALUES (3, 2, 'C'); +INSERT INTO department__fuzz_3 VALUES (4, 2, 'D'); +INSERT INTO department__fuzz_3 VALUES (5, 0, 'E'); +INSERT INTO department__fuzz_3 VALUES (6, 4, 'F'); +INSERT INTO department__fuzz_3 VALUES (7, 5, 'G'); + +SELECT * FROM +( + WITH RECURSIVE q AS + ( + SELECT * FROM department__fuzz_3 + UNION ALL + ( + WITH RECURSIVE x AS + ( + SELECT * FROM department__fuzz_1 + UNION ALL + (SELECT * FROM q UNION ALL SELECT * FROM x) + ) + SELECT * FROM x + ) + ) + SELECT * FROM q LIMIT 32 +) +ORDER BY id ASC, parent_department DESC, name ASC; + +DROP TABLE department__fuzz_1; +DROP TABLE department__fuzz_3; diff --git a/parser/testdata/03034_recursive_cte_tree_merge_tree/ast.json b/parser/testdata/03034_recursive_cte_tree_merge_tree/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03034_recursive_cte_tree_merge_tree/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03034_recursive_cte_tree_merge_tree/metadata.json b/parser/testdata/03034_recursive_cte_tree_merge_tree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03034_recursive_cte_tree_merge_tree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03034_recursive_cte_tree_merge_tree/query.sql b/parser/testdata/03034_recursive_cte_tree_merge_tree/query.sql new file mode 100644 index 000000000..231aae296 --- /dev/null +++ b/parser/testdata/03034_recursive_cte_tree_merge_tree/query.sql @@ -0,0 +1,179 @@ +-- { echoOn } + +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS tree; +CREATE TABLE tree +( + id UInt64, + link Nullable(UInt64), + data String +) ENGINE=MergeTree ORDER BY (); + +INSERT INTO tree VALUES (0, NULL, 'ROOT'), (1, 0, 'Child_1'), (2, 0, 'Child_2'), (3, 1, 'Child_1_1'); + +WITH RECURSIVE search_tree AS ( + SELECT id, link, data + FROM tree t + WHERE t.id = 0 + UNION ALL + SELECT t.id, t.link, t.data + FROM tree t, search_tree st + WHERE t.link = st.id +) +SELECT * FROM search_tree; + +SELECT '--'; + +WITH RECURSIVE search_tree AS ( + SELECT id, link, data, [t.id] AS path + FROM tree t + WHERE t.id = 0 + UNION ALL + SELECT t.id, t.link, t.data, arrayConcat(path, [t.id]) + FROM tree t, search_tree st + WHERE t.link = st.id +) +SELECT * FROM search_tree; + +DROP TABLE tree; + +/** + * Based on https://github.com/postgres/postgres/blob/master/src/test/regress/sql/with.sql, license: + * + * PostgreSQL Database Management System + * (formerly known as Postgres, then as Postgres95) + * + * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group + * + * Portions Copyright (c) 1994, The Regents of the University of California + * + * Permission to use, copy, modify, and distribute this software and its + * documentation for any purpose, without fee, and without a written agreement + * is hereby granted, provided that the above copyright notice and this + * paragraph and the following two paragraphs appear in all copies. + * + * IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR + * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING + * LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS + * DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS + * ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO + *PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. + */ + +-- +-- Tests for common table expressions (WITH query, ... SELECT ...) +-- + +-- +-- Some examples with a tree +-- +-- department structure represented here is as follows: +-- +-- ROOT-+->A-+->B-+->C +-- | | +-- | +->D-+->F +-- +->E-+->G + +DROP TABLE IF EXISTS department; +CREATE TABLE department ( + id UInt64, -- department ID + parent_department UInt64, -- upper department ID + name String -- department name +) +ENGINE=MergeTree ORDER BY (); + +INSERT INTO department VALUES (0, NULL, 'ROOT'); +INSERT INTO department VALUES (1, 0, 'A'); +INSERT INTO department VALUES (2, 1, 'B'); +INSERT INTO department VALUES (3, 2, 'C'); +INSERT INTO department VALUES (4, 2, 'D'); +INSERT INTO department VALUES (5, 0, 'E'); +INSERT INTO department VALUES (6, 4, 'F'); +INSERT INTO department VALUES (7, 5, 'G'); + + +-- extract all departments under 'A'. Result should be A, B, C, D and F +WITH RECURSIVE subdepartment AS +( + -- non recursive term + SELECT name as root_name, * FROM department WHERE name = 'A' + + UNION ALL + + -- recursive term + SELECT sd.root_name, d.* FROM department AS d, subdepartment AS sd + WHERE d.parent_department = sd.id +) +SELECT * FROM subdepartment ORDER BY name; + +-- extract all departments under 'A' with "level" number +WITH RECURSIVE subdepartment AS +( + -- non recursive term + SELECT 1 AS level, * FROM department WHERE name = 'A' + + UNION ALL + + -- recursive term + SELECT sd.level + 1, d.* FROM department AS d, subdepartment AS sd + WHERE d.parent_department = sd.id +) +SELECT * FROM subdepartment ORDER BY name; + +-- extract all departments under 'A' with "level" number. +-- Only shows level 2 or more +WITH RECURSIVE subdepartment AS +( + -- non recursive term + SELECT 1 AS level, * FROM department WHERE name = 'A' + + UNION ALL + + -- recursive term + SELECT sd.level + 1, d.* FROM department AS d, subdepartment AS sd + WHERE d.parent_department = sd.id +) +SELECT * FROM subdepartment WHERE level >= 2 ORDER BY name; + +-- "RECURSIVE" is ignored if the query has no self-reference +WITH RECURSIVE subdepartment AS +( + -- note lack of recursive UNION structure + SELECT * FROM department WHERE name = 'A' +) +SELECT * FROM subdepartment ORDER BY name; + +-- corner case in which sub-WITH gets initialized first +SELECT * FROM +( + WITH RECURSIVE q AS ( + SELECT * FROM department + UNION ALL + (WITH x AS (SELECT * FROM q) + SELECT * FROM x) + ) + SELECT * FROM q LIMIT 24 +) ORDER BY id, parent_department, name; + +SELECT * FROM +( + WITH RECURSIVE q AS ( + SELECT * FROM department + UNION ALL + (WITH RECURSIVE x AS ( + SELECT * FROM department + UNION ALL + (SELECT * FROM q UNION ALL SELECT * FROM x) + ) + SELECT * FROM x) + ) + SELECT * FROM q LIMIT 32 +) ORDER BY id, parent_department, name; + +-- { echoOff } diff --git a/parser/testdata/03035_alias_column_bug_distributed/ast.json b/parser/testdata/03035_alias_column_bug_distributed/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03035_alias_column_bug_distributed/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03035_alias_column_bug_distributed/metadata.json b/parser/testdata/03035_alias_column_bug_distributed/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03035_alias_column_bug_distributed/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03035_alias_column_bug_distributed/query.sql b/parser/testdata/03035_alias_column_bug_distributed/query.sql new file mode 100644 index 000000000..8f60808d7 --- /dev/null +++ b/parser/testdata/03035_alias_column_bug_distributed/query.sql @@ -0,0 +1,44 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/44414 +SET enable_analyzer=1; +DROP TABLE IF EXISTS alias_bug; +DROP TABLE IF EXISTS alias_bug_dist; +CREATE TABLE alias_bug +( + `src` String, + `theAlias` String ALIAS trimBoth(src) +) +ENGINE = MergeTree() +ORDER BY src; + +CREATE TABLE alias_bug_dist +AS alias_bug +ENGINE = Distributed('test_shard_localhost', currentDatabase(), 'alias_bug', rand()); + +INSERT INTO alias_bug VALUES ('SOURCE1'); + +-- OK +SELECT theAlias,CAST(NULL, 'Nullable(String)') AS src FROM alias_bug LIMIT 1 FORMAT Null; + +-- Not OK +SELECT theAlias,CAST(NULL, 'Nullable(String)') AS src FROM alias_bug_dist LIMIT 1 FORMAT Null; + +DROP TABLE IF EXISTS alias_bug; +DROP TABLE IF EXISTS alias_bug_dist; +CREATE TABLE alias_bug +( + `s` String, + `src` String, + `theAlias` String ALIAS trimBoth(src) +) +ENGINE = MergeTree() +ORDER BY src; + +CREATE TABLE alias_bug_dist +AS alias_bug +ENGINE = Distributed('test_shard_localhost', currentDatabase(), 'alias_bug', rand()); + +-- Unknown identifier +SELECT CAST(123, 'String') AS src,theAlias FROM alias_bug_dist LIMIT 1 FORMAT Null; + +DROP TABLE IF EXISTS alias_bug; +DROP TABLE IF EXISTS alias_bug_dist; diff --git a/parser/testdata/03035_argMinMax_numeric_non_extreme_bug/ast.json b/parser/testdata/03035_argMinMax_numeric_non_extreme_bug/ast.json new file mode 100644 index 000000000..fe46c0add --- /dev/null +++ b/parser/testdata/03035_argMinMax_numeric_non_extreme_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001621471, + "rows_read": 2, + "bytes_read": 61 + } +} diff --git a/parser/testdata/03035_argMinMax_numeric_non_extreme_bug/metadata.json b/parser/testdata/03035_argMinMax_numeric_non_extreme_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03035_argMinMax_numeric_non_extreme_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03035_argMinMax_numeric_non_extreme_bug/query.sql b/parser/testdata/03035_argMinMax_numeric_non_extreme_bug/query.sql new file mode 100644 index 000000000..deb580b90 --- /dev/null +++ b/parser/testdata/03035_argMinMax_numeric_non_extreme_bug/query.sql @@ -0,0 +1,26 @@ +CREATE TABLE IF NOT EXISTS test +( + `value` Float64 CODEC(Delta, LZ4), + `uuid` LowCardinality(String), + `time` DateTime64(3, 'UTC') CODEC(DoubleDelta, LZ4) +) +ENGINE = MergeTree() +ORDER BY uuid; + + +INSERT INTO test (uuid, time, value) +VALUES ('a1000000-0000-0000-0000-0000000000a1','2021-01-01 00:00:00.000',0), ('a1000000-0000-0000-0000-0000000000a1','2021-01-01 00:00:09.000',1), ('a1000000-0000-0000-0000-0000000000a1','2021-01-01 00:00:10.000',2), ('a1000000-0000-0000-0000-0000000000a1','2021-01-01 00:00:19.000',3), ('a1000000-0000-0000-0000-0000000000a1','2021-01-01 00:00:20.000',2), ('a1000000-0000-0000-0000-0000000000a1','2021-01-01 00:00:29.000',1), ('a1000000-0000-0000-0000-0000000000a1','2021-01-01 00:00:30.000',0), ('a1000000-0000-0000-0000-0000000000a1','2021-01-01 00:00:39.000',-1), ('a1000000-0000-0000-0000-0000000000a1','2021-01-01 00:00:40.000',-2), ('a1000000-0000-0000-0000-0000000000a1','2021-01-01 00:00:49.000',-3), ('a1000000-0000-0000-0000-0000000000a1','2021-01-01 00:00:50.000',-2), ('a1000000-0000-0000-0000-0000000000a1','2021-01-01 00:00:59.000',-1), ('a1000000-0000-0000-0000-0000000000a1','2021-01-01 00:01:00.000',0); + +SELECT + max(time), + max(toNullable(time)), + min(time), + min(toNullable(time)), + argMax(value, time), + argMax(value, toNullable(time)), + argMin(value, time), + argMin(value, toNullable(time)), + argMinIf(value, toNullable(time), time != '2021-01-01 00:00:00.000'), + argMaxIf(value, toNullable(time), time != '2021-01-01 00:00:59.000'), +FROM test +WHERE (time >= fromUnixTimestamp64Milli(1609459200000, 'UTC')) AND (time < fromUnixTimestamp64Milli(1609459260000, 'UTC')) FORMAT Vertical; diff --git a/parser/testdata/03035_dynamic_sorting/ast.json b/parser/testdata/03035_dynamic_sorting/ast.json new file mode 100644 index 000000000..193064967 --- /dev/null +++ b/parser/testdata/03035_dynamic_sorting/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001067735, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03035_dynamic_sorting/metadata.json b/parser/testdata/03035_dynamic_sorting/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03035_dynamic_sorting/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03035_dynamic_sorting/query.sql b/parser/testdata/03035_dynamic_sorting/query.sql new file mode 100644 index 000000000..43d6568a1 --- /dev/null +++ b/parser/testdata/03035_dynamic_sorting/query.sql @@ -0,0 +1,41 @@ +set allow_experimental_dynamic_type = 1; +set allow_suspicious_types_in_order_by=1; + +drop table if exists test; +create table test (d1 Dynamic(max_types=2), d2 Dynamic(max_types=2)) engine=Memory; + +insert into test values (42, 42), (42, 43), (43, 42), ('abc', 'abc'), ('abc', 'abd'), ('abd', 'abc'), +([1,2,3], [1,2,3]), ([1,2,3], [1,2,4]), ([1,2,4], [1,2,3]), +('2020-01-01', '2020-01-01'), ('2020-01-01', '2020-01-02'), ('2020-01-02', '2020-01-01'), +(NULL, NULL), (42, 'abc'), ('abc', 42), (42, [1,2,3]), ([1,2,3], 42), (42, NULL), (NULL, 42), +('abc', [1,2,3]), ([1,2,3], 'abc'), ('abc', NULL), (NULL, 'abc'), ([1,2,3], NULL), (NULL, [1,2,3]), +(42, '2020-01-01'), ('2020-01-01', 42), ('2020-01-01', 'abc'), ('abc', '2020-01-01'), +('2020-01-01', [1,2,3]), ([1,2,3], '2020-01-01'), ('2020-01-01', NULL), (NULL, '2020-01-01'); + +select 'order by d1 nulls first'; +select d1, dynamicType(d1), isDynamicElementInSharedData(d1) from test order by d1 nulls first; + +select 'order by d1 nulls last'; +select d1, dynamicType(d1), isDynamicElementInSharedData(d1) from test order by d1 nulls last; + +select 'order by d2 nulls first'; +select d2, dynamicType(d2), isDynamicElementInSharedData(d2) from test order by d2 nulls first; + +select 'order by d2 nulls last'; +select d2, dynamicType(d2), isDynamicElementInSharedData(d2) from test order by d2 nulls last; + + +select 'order by d1, d2 nulls first'; +select d1, d2, dynamicType(d1), isDynamicElementInSharedData(d1), dynamicType(d2), isDynamicElementInSharedData(d2) from test order by d1, d2 nulls first; + +select 'order by d1, d2 nulls last'; +select d1, d2, dynamicType(d1), isDynamicElementInSharedData(d1), dynamicType(d2), isDynamicElementInSharedData(d2) from test order by d1, d2 nulls last; + +select 'order by d2, d1 nulls first'; +select d1, d2, dynamicType(d1), isDynamicElementInSharedData(d1), dynamicType(d2), isDynamicElementInSharedData(d2) from test order by d2, d1 nulls first; + +select 'order by d2, d1 nulls last'; +select d1, d2, dynamicType(d1), isDynamicElementInSharedData(d1), dynamicType(d2), isDynamicElementInSharedData(d2) from test order by d2, d1 nulls last; + +drop table test; + diff --git a/parser/testdata/03035_internal_functions_direct_call/ast.json b/parser/testdata/03035_internal_functions_direct_call/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03035_internal_functions_direct_call/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03035_internal_functions_direct_call/metadata.json b/parser/testdata/03035_internal_functions_direct_call/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03035_internal_functions_direct_call/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03035_internal_functions_direct_call/query.sql b/parser/testdata/03035_internal_functions_direct_call/query.sql new file mode 100644 index 000000000..e358e4983 --- /dev/null +++ b/parser/testdata/03035_internal_functions_direct_call/query.sql @@ -0,0 +1,23 @@ +-- This functions should not be called directly, only for internal use. +-- However, we cannot completely forbid it (becasue query can came from another server, for example) +-- Check that usage of these functions does not lead to crash or logical error + +SELECT __actionName(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT __actionName('aaa', 'aaa', 'aaa'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT __actionName('aaa', '') SETTINGS enable_analyzer = 1; -- { serverError BAD_ARGUMENTS } +SELECT __actionName('aaa', materialize('aaa')); -- { serverError BAD_ARGUMENTS,ILLEGAL_COLUMN } +SELECT __actionName(materialize('aaa'), 'aaa'); -- { serverError ILLEGAL_COLUMN } +SELECT __actionName('aaa', 'aaa'); + +SELECT concat(__actionName('aaa', toNullable('x')), '1') GROUP BY __actionName('aaa', 'x'); -- { serverError BAD_ARGUMENTS } + +SELECT __getScalar('aaa'); -- { serverError BAD_ARGUMENTS } +SELECT __getScalar(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT __getScalar(1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT __getScalar(materialize('1')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT __scalarSubqueryResult('1'); +SELECT 'a' || __scalarSubqueryResult(a), materialize('1') as a; +SELECT __scalarSubqueryResult(a, a), materialize('1') as a; -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +SELECT 1 as `__grouping_set`; diff --git a/parser/testdata/03035_materialized_primary_key/ast.json b/parser/testdata/03035_materialized_primary_key/ast.json new file mode 100644 index 000000000..db45a9f21 --- /dev/null +++ b/parser/testdata/03035_materialized_primary_key/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001223946, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/03035_materialized_primary_key/metadata.json b/parser/testdata/03035_materialized_primary_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03035_materialized_primary_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03035_materialized_primary_key/query.sql b/parser/testdata/03035_materialized_primary_key/query.sql new file mode 100644 index 000000000..928aebc34 --- /dev/null +++ b/parser/testdata/03035_materialized_primary_key/query.sql @@ -0,0 +1,28 @@ +DROP TABLE IF EXISTS test; +CREATE TABLE test +( + id UInt64, + value String +) ENGINE=MergeTree ORDER BY id; + +INSERT INTO test VALUES (1, 'Alice'), (2, 'Bob'); + +DROP VIEW IF EXISTS test_mv; +CREATE MATERIALIZED VIEW test_mv +( + id UInt64, + value String +) ENGINE=MergeTree +ORDER BY id AS SELECT id, value FROM test; + +DROP VIEW IF EXISTS test_mv_pk; +CREATE MATERIALIZED VIEW test_mv_pk +( + value String, + id UInt64 +) ENGINE=MergeTree PRIMARY KEY value +POPULATE AS SELECT value, id FROM test; + +SELECT name, primary_key +FROM system.tables +WHERE database = currentDatabase() AND name LIKE 'test%'; \ No newline at end of file diff --git a/parser/testdata/03035_morton_encode_no_rows/ast.json b/parser/testdata/03035_morton_encode_no_rows/ast.json new file mode 100644 index 000000000..a89c6d74e --- /dev/null +++ b/parser/testdata/03035_morton_encode_no_rows/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function mortonEncode (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Tuple_(UInt64_1, UInt64_1)" + }, + { + "explain": " Literal UInt64_65534" + }, + { + "explain": " Literal UInt64_65533" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001200331, + "rows_read": 11, + "bytes_read": 442 + } +} diff --git a/parser/testdata/03035_morton_encode_no_rows/metadata.json b/parser/testdata/03035_morton_encode_no_rows/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03035_morton_encode_no_rows/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03035_morton_encode_no_rows/query.sql b/parser/testdata/03035_morton_encode_no_rows/query.sql new file mode 100644 index 000000000..2663b1ac2 --- /dev/null +++ b/parser/testdata/03035_morton_encode_no_rows/query.sql @@ -0,0 +1,2 @@ +SELECT mortonEncode(materialize((1, 1)), 65534, 65533); +SELECT mortonEncode((1, 1), 65534, 65533); diff --git a/parser/testdata/03035_recursive_cte_postgres_1/ast.json b/parser/testdata/03035_recursive_cte_postgres_1/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03035_recursive_cte_postgres_1/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03035_recursive_cte_postgres_1/metadata.json b/parser/testdata/03035_recursive_cte_postgres_1/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03035_recursive_cte_postgres_1/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03035_recursive_cte_postgres_1/query.sql b/parser/testdata/03035_recursive_cte_postgres_1/query.sql new file mode 100644 index 000000000..9a4e313ce --- /dev/null +++ b/parser/testdata/03035_recursive_cte_postgres_1/query.sql @@ -0,0 +1,95 @@ +/** + * Based on https://github.com/postgres/postgres/blob/master/src/test/regress/sql/with.sql, license: + * + * PostgreSQL Database Management System + * (formerly known as Postgres, then as Postgres95) + * + * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group + * + * Portions Copyright (c) 1994, The Regents of the University of California + * + * Permission to use, copy, modify, and distribute this software and its + * documentation for any purpose, without fee, and without a written agreement + * is hereby granted, provided that the above copyright notice and this + * paragraph and the following two paragraphs appear in all copies. + * + * IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR + * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING + * LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS + * DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS + * ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO + *PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. + */ + +-- +-- Tests for common table expressions (WITH query, ... SELECT ...) +-- + +-- { echoOn } + +SET enable_analyzer = 1; + +-- WITH RECURSIVE + +-- sum of 1..100 +WITH RECURSIVE t AS ( + SELECT 1 AS n +UNION ALL + SELECT n+1 FROM t WHERE n < 100 +) +SELECT sum(n) FROM t; + +WITH RECURSIVE t AS ( + SELECT 1 AS n +UNION ALL + SELECT n+1 FROM t WHERE n < 5 +) +SELECT * FROM t; + +-- This'd be an infinite loop, but outside query reads only as much as needed +WITH RECURSIVE t AS ( + SELECT 1 AS n +UNION ALL + SELECT n+1 FROM t) +SELECT * FROM t LIMIT 10; + +WITH RECURSIVE t AS ( + SELECT 'foo' AS n +UNION ALL + SELECT n || ' bar' FROM t WHERE length(n) < 20 +) +SELECT n, toTypeName(n) FROM t; + +WITH RECURSIVE t AS ( + SELECT '7' AS n +UNION ALL + SELECT n+1 FROM t WHERE n < 10 +) +SELECT n, toTypeName(n) FROM t; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +-- Deeply nested WITH caused a list-munging problem in v13 +-- Detection of cross-references and self-references +WITH RECURSIVE w1 AS + (WITH w2 AS + (WITH w3 AS + (WITH w4 AS + (WITH w5 AS + (WITH RECURSIVE w6 AS + (WITH w7 AS + (WITH w8 AS + (SELECT 1) + SELECT * FROM w8) + SELECT * FROM w7) + SELECT * FROM w6) + SELECT * FROM w5) + SELECT * FROM w4) + SELECT * FROM w3) + SELECT * FROM w2) +SELECT * FROM w1; + +-- { echoOff } diff --git a/parser/testdata/03036_clamp/ast.json b/parser/testdata/03036_clamp/ast.json new file mode 100644 index 000000000..5997e971a --- /dev/null +++ b/parser/testdata/03036_clamp/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function clamp (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Literal UInt64_20" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001530721, + "rows_read": 9, + "bytes_read": 320 + } +} diff --git a/parser/testdata/03036_clamp/metadata.json b/parser/testdata/03036_clamp/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03036_clamp/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03036_clamp/query.sql b/parser/testdata/03036_clamp/query.sql new file mode 100644 index 000000000..9973265c1 --- /dev/null +++ b/parser/testdata/03036_clamp/query.sql @@ -0,0 +1,15 @@ +SELECT clamp(1, 10, 20); +SELECT clamp(30, 10, 20); +SELECT clamp(15, 10, 20); +SELECT clamp('a', 'b', 'c'); +SELECT clamp(today(), yesterday() - 10, yesterday() + 10) - today(); +SELECT clamp([], ['hello'], ['world']); +SELECT clamp(-1., -1000., 18446744073709551615.); +SELECT clamp(toNullable(123), 234, 456); +select clamp(1, null, 5); +select clamp(1, 6, null); +select clamp(1, 5, nan); +select clamp(toInt64(number), toInt64(number-1), toInt64(number+1)) from numbers(3); +select clamp(number, number-1, number+1) from numbers(3); -- { serverError NO_COMMON_TYPE } +select clamp(1, 3, 2); -- { serverError BAD_ARGUMENTS } +select clamp(1, data[1], data[2])from (select arrayJoin([[1, 2], [2,3], [3,2], [4, 4]]) as data); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/03036_dynamic_read_shared_subcolumns_compact_merge_tree/ast.json b/parser/testdata/03036_dynamic_read_shared_subcolumns_compact_merge_tree/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03036_dynamic_read_shared_subcolumns_compact_merge_tree/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03036_dynamic_read_shared_subcolumns_compact_merge_tree/metadata.json b/parser/testdata/03036_dynamic_read_shared_subcolumns_compact_merge_tree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03036_dynamic_read_shared_subcolumns_compact_merge_tree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03036_dynamic_read_shared_subcolumns_compact_merge_tree/query.sql b/parser/testdata/03036_dynamic_read_shared_subcolumns_compact_merge_tree/query.sql new file mode 100644 index 000000000..bff28fb5c --- /dev/null +++ b/parser/testdata/03036_dynamic_read_shared_subcolumns_compact_merge_tree/query.sql @@ -0,0 +1,43 @@ +-- Tags: long, no-tsan, no-msan, no-ubsan, no-asan + +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; +set allow_experimental_dynamic_type = 1; + +drop table if exists test; +create table test (id UInt64, d Dynamic(max_types=2)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000; + +insert into test select number, number from numbers(100000) settings min_insert_block_size_rows=50000; +insert into test select number, 'str_' || toString(number) from numbers(100000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10 + 1)) from numbers(200000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, NULL from numbers(300000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, multiIf(number % 4 == 3, 'str_' || toString(number), number % 4 == 2, NULL, number % 4 == 1, number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10 + 1))) from numbers(400000, 400000) settings min_insert_block_size_rows=50000; +insert into test select number, if (number % 5 == 1, [range((number % 10 + 1)::UInt64)]::Array(Array(Dynamic)), number) from numbers(100000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, if (number % 5 == 1, ('str_' || number)::LowCardinality(String)::Dynamic, number::Dynamic) from numbers(100000, 100000) settings min_insert_block_size_rows=50000; + +select distinct dynamicType(d) as type from test order by type; +select count() from test where dynamicType(d) == 'UInt64'; +select count() from test where d.UInt64 is not NULL; +select count() from test where dynamicType(d) == 'String'; +select count() from test where d.String is not NULL; +select count() from test where dynamicType(d) == 'Date'; +select count() from test where d.Date is not NULL; +select count() from test where dynamicType(d) == 'LowCardinality(String)'; +select count() from test where d.`LowCardinality(String)` is not NULL; +select count() from test where dynamicType(d) == 'Array(Variant(String, UInt64))'; +select count() from test where not empty(d.`Array(Variant(String, UInt64))`); +select count() from test where dynamicType(d) == 'Array(Array(Dynamic))'; +select count() from test where not empty(d.`Array(Array(Dynamic))`); +select count() from test where d is NULL; +select count() from test where not empty(d.`Tuple(a Array(Dynamic))`.a.String); + +select d, d.UInt64, d.String, d.`Array(Variant(String, UInt64))` from test format Null; +select d.UInt64, d.String, d.`Array(Variant(String, UInt64))` from test format Null; +select d.Int8, d.Date, d.`LowCardinality(String)`, d.`Array(String)` from test format Null; +select d, d.UInt64, d.Date, d.`LowCardinality(String)`, d.`Array(Variant(String, UInt64))`, d.`Array(Variant(String, UInt64))`.size0, d.`Array(Variant(String, UInt64))`.UInt64 from test format Null; +select d.UInt64, d.Date, d.`LowCardinality(String)`, d.`Array(Variant(String, UInt64))`, d.`Array(Variant(String, UInt64))`.size0, d.`Array(Variant(String, UInt64))`.UInt64, d.`Array(Variant(String, UInt64))`.String from test format Null; +select d, d.`Tuple(a UInt64, b String)`.a, d.`Array(Dynamic)`.`Variant(String, UInt64)`.UInt64, d.`Array(Variant(String, UInt64))`.UInt64 from test format Null; +select d.`Array(Dynamic)`.`Variant(String, UInt64)`.UInt64, d.`Array(Dynamic)`.size0, d.`Array(Variant(String, UInt64))`.UInt64 from test format Null; +select d.`Array(Array(Dynamic))`.size1, d.`Array(Array(Dynamic))`.UInt64, d.`Array(Array(Dynamic))`.`Map(String, Tuple(a UInt64))`.values.a from test format Null; + +drop table test; diff --git a/parser/testdata/03036_dynamic_read_shared_subcolumns_memory/ast.json b/parser/testdata/03036_dynamic_read_shared_subcolumns_memory/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03036_dynamic_read_shared_subcolumns_memory/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03036_dynamic_read_shared_subcolumns_memory/metadata.json b/parser/testdata/03036_dynamic_read_shared_subcolumns_memory/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03036_dynamic_read_shared_subcolumns_memory/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03036_dynamic_read_shared_subcolumns_memory/query.sql b/parser/testdata/03036_dynamic_read_shared_subcolumns_memory/query.sql new file mode 100644 index 000000000..4eed3d155 --- /dev/null +++ b/parser/testdata/03036_dynamic_read_shared_subcolumns_memory/query.sql @@ -0,0 +1,43 @@ +-- Tags: long, no-tsan, no-msan, no-ubsan, no-asan + +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; +set allow_experimental_dynamic_type = 1; + +drop table if exists test; +create table test (id UInt64, d Dynamic(max_types=2)) engine=Memory; + +insert into test select number, number from numbers(100000) settings min_insert_block_size_rows=50000; +insert into test select number, 'str_' || toString(number) from numbers(100000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10 + 1)) from numbers(200000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, NULL from numbers(300000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, multiIf(number % 4 == 3, 'str_' || toString(number), number % 4 == 2, NULL, number % 4 == 1, number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10 + 1))) from numbers(400000, 400000) settings min_insert_block_size_rows=50000; +insert into test select number, if (number % 5 == 1, [range((number % 10 + 1)::UInt64)]::Array(Array(Dynamic)), number) from numbers(100000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, if (number % 5 == 1, ('str_' || number)::LowCardinality(String)::Dynamic, number::Dynamic) from numbers(100000, 100000) settings min_insert_block_size_rows=50000; + +select distinct dynamicType(d) as type from test order by type; +select count() from test where dynamicType(d) == 'UInt64'; +select count() from test where d.UInt64 is not NULL; +select count() from test where dynamicType(d) == 'String'; +select count() from test where d.String is not NULL; +select count() from test where dynamicType(d) == 'Date'; +select count() from test where d.Date is not NULL; +select count() from test where dynamicType(d) == 'LowCardinality(String)'; +select count() from test where d.`LowCardinality(String)` is not NULL; +select count() from test where dynamicType(d) == 'Array(Variant(String, UInt64))'; +select count() from test where not empty(d.`Array(Variant(String, UInt64))`); +select count() from test where dynamicType(d) == 'Array(Array(Dynamic))'; +select count() from test where not empty(d.`Array(Array(Dynamic))`); +select count() from test where d is NULL; +select count() from test where not empty(d.`Tuple(a Array(Dynamic))`.a.String); + +select d, d.UInt64, d.String, d.`Array(Variant(String, UInt64))` from test format Null; +select d.UInt64, d.String, d.`Array(Variant(String, UInt64))` from test format Null; +select d.Int8, d.Date, d.`Array(String)` from test format Null; +select d, d.UInt64, d.Date, d.`Array(Variant(String, UInt64))`, d.`Array(Variant(String, UInt64))`.size0, d.`Array(Variant(String, UInt64))`.UInt64 from test format Null; +select d.UInt64, d.Date, d.`Array(Variant(String, UInt64))`, d.`Array(Variant(String, UInt64))`.size0, d.`Array(Variant(String, UInt64))`.UInt64, d.`Array(Variant(String, UInt64))`.String from test format Null; +select d, d.`Tuple(a UInt64, b String)`.a, d.`Array(Dynamic)`.`Variant(String, UInt64)`.UInt64, d.`Array(Variant(String, UInt64))`.UInt64 from test format Null; +select d.`Array(Dynamic)`.`Variant(String, UInt64)`.UInt64, d.`Array(Dynamic)`.size0, d.`Array(Variant(String, UInt64))`.UInt64 from test format Null; +select d.`Array(Array(Dynamic))`.size1, d.`Array(Array(Dynamic))`.UInt64, d.`Array(Array(Dynamic))`.`Map(String, Tuple(a UInt64))`.values.a from test format Null; + +drop table test; diff --git a/parser/testdata/03036_dynamic_read_shared_subcolumns_wide_merge_tree/ast.json b/parser/testdata/03036_dynamic_read_shared_subcolumns_wide_merge_tree/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03036_dynamic_read_shared_subcolumns_wide_merge_tree/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03036_dynamic_read_shared_subcolumns_wide_merge_tree/metadata.json b/parser/testdata/03036_dynamic_read_shared_subcolumns_wide_merge_tree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03036_dynamic_read_shared_subcolumns_wide_merge_tree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03036_dynamic_read_shared_subcolumns_wide_merge_tree/query.sql b/parser/testdata/03036_dynamic_read_shared_subcolumns_wide_merge_tree/query.sql new file mode 100644 index 000000000..9e6e06521 --- /dev/null +++ b/parser/testdata/03036_dynamic_read_shared_subcolumns_wide_merge_tree/query.sql @@ -0,0 +1,45 @@ +-- Tags: long, no-tsan, no-msan, no-ubsan, no-asan + +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; +set allow_experimental_dynamic_type = 1; + +set min_bytes_to_use_direct_io = 0; -- min_bytes_to_use_direct_io > 0 is broken + +drop table if exists test; +create table test (id UInt64, d Dynamic(max_types=2)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1; + +insert into test select number, number from numbers(100000) settings min_insert_block_size_rows=50000; +insert into test select number, 'str_' || toString(number) from numbers(100000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10 + 1)) from numbers(200000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, NULL from numbers(300000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, multiIf(number % 4 == 3, 'str_' || toString(number), number % 4 == 2, NULL, number % 4 == 1, number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10 + 1))) from numbers(400000, 400000) settings min_insert_block_size_rows=50000; +insert into test select number, if (number % 5 == 1, [range((number % 10 + 1)::UInt64)]::Array(Array(Dynamic)), number) from numbers(100000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, if (number % 5 == 1, ('str_' || number)::LowCardinality(String)::Dynamic, number::Dynamic) from numbers(100000, 100000) settings min_insert_block_size_rows=50000; + +select distinct dynamicType(d) as type from test order by type; +select count() from test where dynamicType(d) == 'UInt64'; +select count() from test where d.UInt64 is not NULL; +select count() from test where dynamicType(d) == 'String'; +select count() from test where d.String is not NULL; +select count() from test where dynamicType(d) == 'Date'; +select count() from test where d.Date is not NULL; +select count() from test where dynamicType(d) == 'LowCardinality(String)'; +select count() from test where d.`LowCardinality(String)` is not NULL; +select count() from test where dynamicType(d) == 'Array(Variant(String, UInt64))'; +select count() from test where not empty(d.`Array(Variant(String, UInt64))`); +select count() from test where dynamicType(d) == 'Array(Array(Dynamic))'; +select count() from test where not empty(d.`Array(Array(Dynamic))`); +select count() from test where d is NULL; +select count() from test where not empty(d.`Tuple(a Array(Dynamic))`.a.String); + +select d, d.UInt64, d.String, d.`Array(Variant(String, UInt64))` from test format Null; +select d.UInt64, d.String, d.`Array(Variant(String, UInt64))` from test format Null; +select d.Int8, d.Date, d.`Array(String)` from test format Null; +select d, d.UInt64, d.Date, d.`Array(Variant(String, UInt64))`, d.`Array(Variant(String, UInt64))`.size0, d.`Array(Variant(String, UInt64))`.UInt64 from test format Null; +select d.UInt64, d.Date, d.`Array(Variant(String, UInt64))`, d.`Array(Variant(String, UInt64))`.size0, d.`Array(Variant(String, UInt64))`.UInt64, d.`Array(Variant(String, UInt64))`.String from test format Null; +select d, d.`Tuple(a UInt64, b String)`.a, d.`Array(Dynamic)`.`Variant(String, UInt64)`.UInt64, d.`Array(Variant(String, UInt64))`.UInt64 from test format Null; +select d.`Array(Dynamic)`.`Variant(String, UInt64)`.UInt64, d.`Array(Dynamic)`.size0, d.`Array(Variant(String, UInt64))`.UInt64 from test format Null; +select d.`Array(Array(Dynamic))`.size1, d.`Array(Array(Dynamic))`.UInt64, d.`Array(Array(Dynamic))`.`Map(String, Tuple(a UInt64))`.values.a from test format Null; + +drop table test; diff --git a/parser/testdata/03036_dynamic_read_subcolumns_compact_merge_tree/ast.json b/parser/testdata/03036_dynamic_read_subcolumns_compact_merge_tree/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03036_dynamic_read_subcolumns_compact_merge_tree/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03036_dynamic_read_subcolumns_compact_merge_tree/metadata.json b/parser/testdata/03036_dynamic_read_subcolumns_compact_merge_tree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03036_dynamic_read_subcolumns_compact_merge_tree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03036_dynamic_read_subcolumns_compact_merge_tree/query.sql b/parser/testdata/03036_dynamic_read_subcolumns_compact_merge_tree/query.sql new file mode 100644 index 000000000..822393d3c --- /dev/null +++ b/parser/testdata/03036_dynamic_read_subcolumns_compact_merge_tree/query.sql @@ -0,0 +1,41 @@ +-- Tags: long, no-tsan, no-msan, no-ubsan, no-asan +-- Random settings limits: index_granularity=(100, None) + +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; +set allow_experimental_dynamic_type = 1; + +drop table if exists test; +create table test (id UInt64, d Dynamic) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000; + +insert into test select number, number from numbers(100000) settings min_insert_block_size_rows=50000; +insert into test select number, 'str_' || toString(number) from numbers(100000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10 + 1)) from numbers(200000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, NULL from numbers(300000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, multiIf(number % 4 == 3, 'str_' || toString(number), number % 4 == 2, NULL, number % 4 == 1, number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10 + 1))) from numbers(400000, 400000) settings min_insert_block_size_rows=50000; +insert into test select number, [range((number % 10 + 1)::UInt64)]::Array(Array(Dynamic)) from numbers(100000, 100000) settings min_insert_block_size_rows=50000; + +select distinct dynamicType(d) as type from test order by type; +select count() from test where dynamicType(d) == 'UInt64'; +select count() from test where d.UInt64 is not NULL; +select count() from test where dynamicType(d) == 'String'; +select count() from test where d.String is not NULL; +select count() from test where dynamicType(d) == 'Date'; +select count() from test where d.Date is not NULL; +select count() from test where dynamicType(d) == 'Array(Variant(String, UInt64))'; +select count() from test where not empty(d.`Array(Variant(String, UInt64))`); +select count() from test where dynamicType(d) == 'Array(Array(Dynamic))'; +select count() from test where not empty(d.`Array(Array(Dynamic))`); +select count() from test where d is NULL; +select count() from test where not empty(d.`Tuple(a Array(Dynamic))`.a.String); + +select d, d.UInt64, d.String, d.`Array(Variant(String, UInt64))` from test format Null; +select d.UInt64, d.String, d.`Array(Variant(String, UInt64))` from test format Null; +select d.Int8, d.Date, d.`Array(String)` from test format Null; +select d, d.UInt64, d.Date, d.`Array(Variant(String, UInt64))`, d.`Array(Variant(String, UInt64))`.size0, d.`Array(Variant(String, UInt64))`.UInt64 from test format Null; +select d.UInt64, d.Date, d.`Array(Variant(String, UInt64))`, d.`Array(Variant(String, UInt64))`.size0, d.`Array(Variant(String, UInt64))`.UInt64, d.`Array(Variant(String, UInt64))`.String from test format Null; +select d, d.`Tuple(a UInt64, b String)`.a, d.`Array(Dynamic)`.`Variant(String, UInt64)`.UInt64, d.`Array(Variant(String, UInt64))`.UInt64 from test format Null; +select d.`Array(Dynamic)`.`Variant(String, UInt64)`.UInt64, d.`Array(Dynamic)`.size0, d.`Array(Variant(String, UInt64))`.UInt64 from test format Null; +select d.`Array(Array(Dynamic))`.size1, d.`Array(Array(Dynamic))`.UInt64, d.`Array(Array(Dynamic))`.`Map(String, Tuple(a UInt64))`.values.a from test format Null; + +drop table test; diff --git a/parser/testdata/03036_dynamic_read_subcolumns_memory/ast.json b/parser/testdata/03036_dynamic_read_subcolumns_memory/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03036_dynamic_read_subcolumns_memory/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03036_dynamic_read_subcolumns_memory/metadata.json b/parser/testdata/03036_dynamic_read_subcolumns_memory/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03036_dynamic_read_subcolumns_memory/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03036_dynamic_read_subcolumns_memory/query.sql b/parser/testdata/03036_dynamic_read_subcolumns_memory/query.sql new file mode 100644 index 000000000..c446c31fc --- /dev/null +++ b/parser/testdata/03036_dynamic_read_subcolumns_memory/query.sql @@ -0,0 +1,40 @@ +-- Tags: long, no-tsan, no-msan, no-ubsan, no-asan + +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; +set allow_experimental_dynamic_type = 1; + +drop table if exists test; +create table test (id UInt64, d Dynamic) engine=Memory; + +insert into test select number, number from numbers(100000) settings min_insert_block_size_rows=50000; +insert into test select number, 'str_' || toString(number) from numbers(100000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10 + 1)) from numbers(200000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, NULL from numbers(300000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, multiIf(number % 4 == 3, 'str_' || toString(number), number % 4 == 2, NULL, number % 4 == 1, number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10 + 1))) from numbers(400000, 400000) settings min_insert_block_size_rows=50000; +insert into test select number, [range((number % 10 + 1)::UInt64)]::Array(Array(Dynamic)) from numbers(100000, 100000) settings min_insert_block_size_rows=50000; + +select distinct dynamicType(d) as type from test order by type; +select count() from test where dynamicType(d) == 'UInt64'; +select count() from test where d.UInt64 is not NULL; +select count() from test where dynamicType(d) == 'String'; +select count() from test where d.String is not NULL; +select count() from test where dynamicType(d) == 'Date'; +select count() from test where d.Date is not NULL; +select count() from test where dynamicType(d) == 'Array(Variant(String, UInt64))'; +select count() from test where not empty(d.`Array(Variant(String, UInt64))`); +select count() from test where dynamicType(d) == 'Array(Array(Dynamic))'; +select count() from test where not empty(d.`Array(Array(Dynamic))`); +select count() from test where d is NULL; +select count() from test where not empty(d.`Tuple(a Array(Dynamic))`.a.String); + +select d, d.UInt64, d.String, d.`Array(Variant(String, UInt64))` from test format Null; +select d.UInt64, d.String, d.`Array(Variant(String, UInt64))` from test format Null; +select d.Int8, d.Date, d.`Array(String)` from test format Null; +select d, d.UInt64, d.Date, d.`Array(Variant(String, UInt64))`, d.`Array(Variant(String, UInt64))`.size0, d.`Array(Variant(String, UInt64))`.UInt64 from test format Null; +select d.UInt64, d.Date, d.`Array(Variant(String, UInt64))`, d.`Array(Variant(String, UInt64))`.size0, d.`Array(Variant(String, UInt64))`.UInt64, d.`Array(Variant(String, UInt64))`.String from test format Null; +select d, d.`Tuple(a UInt64, b String)`.a, d.`Array(Dynamic)`.`Variant(String, UInt64)`.UInt64, d.`Array(Variant(String, UInt64))`.UInt64 from test format Null; +select d.`Array(Dynamic)`.`Variant(String, UInt64)`.UInt64, d.`Array(Dynamic)`.size0, d.`Array(Variant(String, UInt64))`.UInt64 from test format Null; +select d.`Array(Array(Dynamic))`.size1, d.`Array(Array(Dynamic))`.UInt64, d.`Array(Array(Dynamic))`.`Map(String, Tuple(a UInt64))`.values.a from test format Null; + +drop table test; diff --git a/parser/testdata/03036_dynamic_read_subcolumns_wide_merge_tree/ast.json b/parser/testdata/03036_dynamic_read_subcolumns_wide_merge_tree/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03036_dynamic_read_subcolumns_wide_merge_tree/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03036_dynamic_read_subcolumns_wide_merge_tree/metadata.json b/parser/testdata/03036_dynamic_read_subcolumns_wide_merge_tree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03036_dynamic_read_subcolumns_wide_merge_tree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03036_dynamic_read_subcolumns_wide_merge_tree/query.sql b/parser/testdata/03036_dynamic_read_subcolumns_wide_merge_tree/query.sql new file mode 100644 index 000000000..85f5cc57e --- /dev/null +++ b/parser/testdata/03036_dynamic_read_subcolumns_wide_merge_tree/query.sql @@ -0,0 +1,43 @@ +-- Tags: long, no-tsan, no-msan, no-ubsan, no-asan +-- Random settings limits: index_granularity=(100, None) + +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; +set allow_experimental_dynamic_type = 1; + +set min_bytes_to_use_direct_io = 0; -- min_bytes_to_use_direct_io > 0 is broken + +drop table if exists test; +create table test (id UInt64, d Dynamic) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1; + +insert into test select number, number from numbers(100000) settings min_insert_block_size_rows=50000; +insert into test select number, 'str_' || toString(number) from numbers(100000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10 + 1)) from numbers(200000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, NULL from numbers(300000, 100000) settings min_insert_block_size_rows=50000; +insert into test select number, multiIf(number % 4 == 3, 'str_' || toString(number), number % 4 == 2, NULL, number % 4 == 1, number, arrayMap(x -> multiIf(number % 9 == 0, NULL, number % 9 == 3, 'str_' || toString(number), number), range(number % 10 + 1))) from numbers(400000, 400000) settings min_insert_block_size_rows=50000; +insert into test select number, [range((number % 10 + 1)::UInt64)]::Array(Array(Dynamic)) from numbers(100000, 100000) settings min_insert_block_size_rows=50000; + +select distinct dynamicType(d) as type from test order by type; +select count() from test where dynamicType(d) == 'UInt64'; +select count() from test where d.UInt64 is not NULL; +select count() from test where dynamicType(d) == 'String'; +select count() from test where d.String is not NULL; +select count() from test where dynamicType(d) == 'Date'; +select count() from test where d.Date is not NULL; +select count() from test where dynamicType(d) == 'Array(Variant(String, UInt64))'; +select count() from test where not empty(d.`Array(Variant(String, UInt64))`); +select count() from test where dynamicType(d) == 'Array(Array(Dynamic))'; +select count() from test where not empty(d.`Array(Array(Dynamic))`); +select count() from test where d is NULL; +select count() from test where not empty(d.`Tuple(a Array(Dynamic))`.a.String); + +select d, d.UInt64, d.String, d.`Array(Variant(String, UInt64))` from test format Null; +select d.UInt64, d.String, d.`Array(Variant(String, UInt64))` from test format Null; +select d.Int8, d.Date, d.`Array(String)` from test format Null; +select d, d.UInt64, d.Date, d.`Array(Variant(String, UInt64))`, d.`Array(Variant(String, UInt64))`.size0, d.`Array(Variant(String, UInt64))`.UInt64 from test format Null; +select d.UInt64, d.Date, d.`Array(Variant(String, UInt64))`, d.`Array(Variant(String, UInt64))`.size0, d.`Array(Variant(String, UInt64))`.UInt64, d.`Array(Variant(String, UInt64))`.String from test format Null; +select d, d.`Tuple(a UInt64, b String)`.a, d.`Array(Dynamic)`.`Variant(String, UInt64)`.UInt64, d.`Array(Variant(String, UInt64))`.UInt64 from test format Null; +select d.`Array(Dynamic)`.`Variant(String, UInt64)`.UInt64, d.`Array(Dynamic)`.size0, d.`Array(Variant(String, UInt64))`.UInt64 from test format Null; +select d.`Array(Array(Dynamic))`.size1, d.`Array(Array(Dynamic))`.UInt64, d.`Array(Array(Dynamic))`.`Map(String, Tuple(a UInt64))`.values.a from test format Null; + +drop table test; diff --git a/parser/testdata/03036_join_filter_push_down_equivalent_sets/ast.json b/parser/testdata/03036_join_filter_push_down_equivalent_sets/ast.json new file mode 100644 index 000000000..0e541671c --- /dev/null +++ b/parser/testdata/03036_join_filter_push_down_equivalent_sets/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001417872, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03036_join_filter_push_down_equivalent_sets/metadata.json b/parser/testdata/03036_join_filter_push_down_equivalent_sets/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03036_join_filter_push_down_equivalent_sets/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03036_join_filter_push_down_equivalent_sets/query.sql b/parser/testdata/03036_join_filter_push_down_equivalent_sets/query.sql new file mode 100644 index 000000000..f7e222d05 --- /dev/null +++ b/parser/testdata/03036_join_filter_push_down_equivalent_sets/query.sql @@ -0,0 +1,153 @@ +SET enable_analyzer = 1; +SET optimize_move_to_prewhere = 0; +SET query_plan_convert_outer_join_to_inner_join = 0; +SET parallel_hash_join_threshold = 0; + +DROP TABLE IF EXISTS test_table_1; +CREATE TABLE test_table_1 +( + id UInt64, + value String +) ENGINE=MergeTree ORDER BY id SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +CREATE TABLE test_table_2 +( + id UInt64, + value String +) ENGINE=MergeTree ORDER BY id SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +INSERT INTO test_table_1 SELECT number, number FROM numbers(10); +INSERT INTO test_table_2 SELECT number, number FROM numbers(10); + +-- { echoOn } + +EXPLAIN header = 1, actions = 1 +SELECT lhs.id, rhs.id, lhs.value, rhs.value FROM test_table_1 AS lhs INNER JOIN test_table_2 AS rhs ON lhs.id = rhs.id +WHERE lhs.id = 5 +SETTINGS query_plan_join_swap_table = 'false' +; + +SELECT '--'; + +SELECT lhs.id, rhs.id, lhs.value, rhs.value FROM test_table_1 AS lhs INNER JOIN test_table_2 AS rhs ON lhs.id = rhs.id +WHERE lhs.id = 5; + +SELECT '--'; + +EXPLAIN header = 1, actions = 1 +SELECT lhs.id, rhs.id, lhs.value, rhs.value FROM test_table_1 AS lhs INNER JOIN test_table_2 AS rhs ON lhs.id = rhs.id +WHERE rhs.id = 5 +SETTINGS query_plan_join_swap_table = 'false'; +; + +SELECT '--'; + +SELECT lhs.id, rhs.id, lhs.value, rhs.value FROM test_table_1 AS lhs INNER JOIN test_table_2 AS rhs ON lhs.id = rhs.id +WHERE rhs.id = 5; + +SELECT '--'; + +EXPLAIN header = 1, actions = 1 +SELECT lhs.id, rhs.id, lhs.value, rhs.value FROM test_table_1 AS lhs INNER JOIN test_table_2 AS rhs ON lhs.id = rhs.id +WHERE lhs.id = 5 AND rhs.id = 6 +SETTINGS query_plan_join_swap_table = 'false' +; + +SELECT lhs.id, rhs.id, lhs.value, rhs.value FROM test_table_1 AS lhs INNER JOIN test_table_2 AS rhs ON lhs.id = rhs.id +WHERE lhs.id = 5 AND rhs.id = 6; + +SELECT '--'; + +EXPLAIN header = 1, actions = 1 +SELECT lhs.id, rhs.id, lhs.value, rhs.value FROM test_table_1 AS lhs LEFT JOIN test_table_2 AS rhs ON lhs.id = rhs.id +WHERE lhs.id = 5 +SETTINGS query_plan_join_swap_table = 'false' +; + +SELECT '--'; + +SELECT lhs.id, rhs.id, lhs.value, rhs.value FROM test_table_1 AS lhs LEFT JOIN test_table_2 AS rhs ON lhs.id = rhs.id +WHERE lhs.id = 5; + +SELECT '--'; + +EXPLAIN header = 1, actions = 1 +SELECT lhs.id, rhs.id, lhs.value, rhs.value FROM test_table_1 AS lhs LEFT JOIN test_table_2 AS rhs ON lhs.id = rhs.id +WHERE rhs.id = 5 +SETTINGS query_plan_join_swap_table = 'false' +; + +SELECT '--'; + +SELECT lhs.id, rhs.id, lhs.value, rhs.value FROM test_table_1 AS lhs LEFT JOIN test_table_2 AS rhs ON lhs.id = rhs.id +WHERE rhs.id = 5; + +SELECT '--'; + +EXPLAIN header = 1, actions = 1 +SELECT lhs.id, rhs.id, lhs.value, rhs.value FROM test_table_1 AS lhs RIGHT JOIN test_table_2 AS rhs ON lhs.id = rhs.id +WHERE lhs.id = 5 +SETTINGS query_plan_join_swap_table = 'false' +; + +SELECT '--'; + +SELECT lhs.id, rhs.id, lhs.value, rhs.value FROM test_table_1 AS lhs RIGHT JOIN test_table_2 AS rhs ON lhs.id = rhs.id +WHERE lhs.id = 5; + +SELECT '--'; + +EXPLAIN header = 1, actions = 1 +SELECT lhs.id, rhs.id, lhs.value, rhs.value FROM test_table_1 AS lhs RIGHT JOIN test_table_2 AS rhs ON lhs.id = rhs.id +WHERE rhs.id = 5 +SETTINGS query_plan_join_swap_table = 'false' +; + +SELECT '--'; + +SELECT lhs.id, rhs.id, lhs.value, rhs.value FROM test_table_1 AS lhs RIGHT JOIN test_table_2 AS rhs ON lhs.id = rhs.id +WHERE rhs.id = 5; + +SELECT '--'; + +EXPLAIN header = 1, actions = 1 +SELECT lhs.id, rhs.id, lhs.value, rhs.value FROM test_table_1 AS lhs FULL JOIN test_table_2 AS rhs ON lhs.id = rhs.id +WHERE lhs.id = 5 +SETTINGS query_plan_join_swap_table = 'false' +; + +SELECT '--'; + +SELECT lhs.id, rhs.id, lhs.value, rhs.value FROM test_table_1 AS lhs FULL JOIN test_table_2 AS rhs ON lhs.id = rhs.id +WHERE lhs.id = 5; + +SELECT '--'; + +EXPLAIN header = 1, actions = 1 +SELECT lhs.id, rhs.id, lhs.value, rhs.value FROM test_table_1 AS lhs FULL JOIN test_table_2 AS rhs ON lhs.id = rhs.id +WHERE rhs.id = 5 +SETTINGS query_plan_join_swap_table = 'false' +; + +SELECT '--'; + +SELECT lhs.id, rhs.id, lhs.value, rhs.value FROM test_table_1 AS lhs FULL JOIN test_table_2 AS rhs ON lhs.id = rhs.id +WHERE rhs.id = 5; + +SELECT '--'; + +EXPLAIN header = 1, actions = 1 +SELECT lhs.id, rhs.id, lhs.value, rhs.value FROM test_table_1 AS lhs FULL JOIN test_table_2 AS rhs ON lhs.id = rhs.id +WHERE lhs.id = 5 AND rhs.id = 6 +SETTINGS query_plan_join_swap_table = 'false' +; + +SELECT '--'; + +SELECT lhs.id, rhs.id, lhs.value, rhs.value FROM test_table_1 AS lhs FULL JOIN test_table_2 AS rhs ON lhs.id = rhs.id +WHERE lhs.id = 5 AND rhs.id = 6; + +-- { echoOff } + +DROP TABLE test_table_1; +DROP TABLE test_table_2; diff --git a/parser/testdata/03036_prewhere_lambda_function/ast.json b/parser/testdata/03036_prewhere_lambda_function/ast.json new file mode 100644 index 000000000..69709e6e0 --- /dev/null +++ b/parser/testdata/03036_prewhere_lambda_function/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001118011, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/03036_prewhere_lambda_function/metadata.json b/parser/testdata/03036_prewhere_lambda_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03036_prewhere_lambda_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03036_prewhere_lambda_function/query.sql b/parser/testdata/03036_prewhere_lambda_function/query.sql new file mode 100644 index 000000000..8b9ebb775 --- /dev/null +++ b/parser/testdata/03036_prewhere_lambda_function/query.sql @@ -0,0 +1,7 @@ +DROP TABLE IF EXISTS t; +CREATE TABLE t (A Array(Int64)) Engine = MergeTree ORDER BY tuple(); +INSERT INTO t VALUES ([1,2,3]), ([4,5,6]), ([7,8,9]); + +SELECT * FROM t PREWHERE arrayExists(x -> x = 5, A); + +DROP TABLE t; diff --git a/parser/testdata/03036_reading_s3_archives/ast.json b/parser/testdata/03036_reading_s3_archives/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03036_reading_s3_archives/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03036_reading_s3_archives/metadata.json b/parser/testdata/03036_reading_s3_archives/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03036_reading_s3_archives/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03036_reading_s3_archives/query.sql b/parser/testdata/03036_reading_s3_archives/query.sql new file mode 100644 index 000000000..43bda4ee7 --- /dev/null +++ b/parser/testdata/03036_reading_s3_archives/query.sql @@ -0,0 +1,22 @@ +-- Tags: no-fasttest +-- Tag no-fasttest: Depends on AWS + +SELECT id, data, _size, _file, _path FROM s3(s3_conn, filename='03036_archive1.zip :: example1.csv') ORDER BY (id, _file, _path); +SELECT id, data, _size, _file, _path FROM s3(s3_conn, filename='03036_archive2.zip :: example*.csv') ORDER BY (id, _file, _path); +SELECT id, data, _size, _file, _path FROM s3(s3_conn, filename='03036_archive*.zip :: example2.csv') ORDER BY (id, _file, _path); +SELECT id, data, _size, _file, _path FROM s3(s3_conn, filename='03036_archive*.zip :: example*') ORDER BY (id, _file, _path); +SELECT id, data, _size, _file, _path FROM s3(s3_conn, filename='03036_archive1.tar :: example1.csv') ORDER BY (id, _file, _path); +SELECT id, data, _size, _file, _path FROM s3(s3_conn, filename='03036_archive*.tar :: example4.csv') ORDER BY (id, _file, _path); +SELECT id, data, _size, _file, _path FROM s3(s3_conn, filename='03036_archive2.tar :: example*.csv') ORDER BY (id, _file, _path); +SELECT id, data, _size, _file, _path FROM s3(s3_conn, filename='03036_archive*.tar.gz :: example*.csv') ORDER BY (id, _file, _path); +SELECT id, data, _size, _file, _path FROM s3(s3_conn, filename='03036_archive*.tar* :: example{2..3}.csv') ORDER BY (id, _file, _path); +select id, data, _size, _file, _path from s3(s3_conn, filename='03036_archive2.zip :: nonexistent.csv'); -- { serverError CANNOT_EXTRACT_TABLE_STRUCTURE } +select id, data, _size, _file, _path from s3(s3_conn, filename='03036_archive2.zip :: nonexistent{2..3}.csv'); -- { serverError CANNOT_EXTRACT_TABLE_STRUCTURE } +CREATE TABLE table_zip22 Engine S3(s3_conn, filename='03036_archive2.zip :: example2.csv'); +select id, data, _size, _file, _path from table_zip22 ORDER BY (id, _file, _path); +CREATE table table_tar2star Engine S3(s3_conn, filename='03036_archive2.tar :: example*.csv'); +SELECT id, data, _size, _file, _path FROM table_tar2star ORDER BY (id, _file, _path); +CREATE table table_tarstarglobs Engine S3(s3_conn, filename='03036_archive*.tar* :: example{2..3}.csv'); +SELECT id, data, _size, _file, _path FROM table_tarstarglobs ORDER BY (id, _file, _path); +CREATE table table_noexist Engine s3(s3_conn, filename='03036_archive2.zip :: nonexistent.csv'); -- { serverError UNKNOWN_STORAGE } +SELECT id, data, _size, _file, _path FROM s3(s3_conn, filename='03036_compressed_file_archive.zip :: example7.csv', format='CSV', structure='auto', compression_method='gz') ORDER BY (id, _file, _path) diff --git a/parser/testdata/03036_reading_s3_cluster_archives/ast.json b/parser/testdata/03036_reading_s3_cluster_archives/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03036_reading_s3_cluster_archives/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03036_reading_s3_cluster_archives/metadata.json b/parser/testdata/03036_reading_s3_cluster_archives/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03036_reading_s3_cluster_archives/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03036_reading_s3_cluster_archives/query.sql b/parser/testdata/03036_reading_s3_cluster_archives/query.sql new file mode 100644 index 000000000..8502b517a --- /dev/null +++ b/parser/testdata/03036_reading_s3_cluster_archives/query.sql @@ -0,0 +1,12 @@ +-- Tags: no-fasttest +-- Tag no-fasttest: Depends on AWS + +select 's3, single archive'; +SELECT id, data, _size, _file, _path FROM s3(s3_conn, filename='03036_archive2.zip :: example*.csv') ORDER BY (id, _file, _path); +select 's3Cluster: single archive'; +SELECT id, data, _size, _file, _path FROM s3Cluster('test_cluster_two_shards', s3_conn, filename='03036_archive2.zip :: example*.csv') ORDER BY (id, _file, _path); + +select 's3: many archives'; +SELECT id, data, _size, _file, _path FROM s3(s3_conn, filename='03036_archive*.zip :: example*.csv') ORDER BY (id, _file, _path); +select 's3Cluster: many archives'; +SELECT id, data, _size, _file, _path FROM s3Cluster('test_cluster_two_shards', s3_conn, filename='03036_archive*.zip :: example*.csv') ORDER BY (id, _file, _path); diff --git a/parser/testdata/03036_recursive_cte_postgres_2/ast.json b/parser/testdata/03036_recursive_cte_postgres_2/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03036_recursive_cte_postgres_2/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03036_recursive_cte_postgres_2/metadata.json b/parser/testdata/03036_recursive_cte_postgres_2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03036_recursive_cte_postgres_2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03036_recursive_cte_postgres_2/query.sql b/parser/testdata/03036_recursive_cte_postgres_2/query.sql new file mode 100644 index 000000000..b8e850b47 --- /dev/null +++ b/parser/testdata/03036_recursive_cte_postgres_2/query.sql @@ -0,0 +1,162 @@ +/** + * Based on https://github.com/postgres/postgres/blob/master/src/test/regress/sql/with.sql, license: + * + * PostgreSQL Database Management System + * (formerly known as Postgres, then as Postgres95) + * + * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group + * + * Portions Copyright (c) 1994, The Regents of the University of California + * + * Permission to use, copy, modify, and distribute this software and its + * documentation for any purpose, without fee, and without a written agreement + * is hereby granted, provided that the above copyright notice and this + * paragraph and the following two paragraphs appear in all copies. + * + * IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR + * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING + * LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS + * DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS + * ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO + *PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. + */ + +-- +-- Tests for common table expressions (WITH query, ... SELECT ...) +-- + +-- { echoOn } + +SET enable_analyzer = 1; + +-- +-- Some examples with a tree +-- +-- department structure represented here is as follows: +-- +-- ROOT-+->A-+->B-+->C +-- | | +-- | +->D-+->F +-- +->E-+->G + +DROP TABLE IF EXISTS department; +CREATE TABLE department ( + id UInt64, -- department ID + parent_department UInt64, -- upper department ID + name String -- department name +) +ENGINE=TinyLog; + +INSERT INTO department VALUES (0, NULL, 'ROOT'); +INSERT INTO department VALUES (1, 0, 'A'); +INSERT INTO department VALUES (2, 1, 'B'); +INSERT INTO department VALUES (3, 2, 'C'); +INSERT INTO department VALUES (4, 2, 'D'); +INSERT INTO department VALUES (5, 0, 'E'); +INSERT INTO department VALUES (6, 4, 'F'); +INSERT INTO department VALUES (7, 5, 'G'); + + +-- extract all departments under 'A'. Result should be A, B, C, D and F +WITH RECURSIVE subdepartment AS +( + -- non recursive term + SELECT name as root_name, * FROM department WHERE name = 'A' + + UNION ALL + + -- recursive term + SELECT sd.root_name, d.* FROM department AS d, subdepartment AS sd + WHERE d.parent_department = sd.id +) +SELECT * FROM subdepartment ORDER BY name; + +-- extract all departments under 'A' with "level" number +WITH RECURSIVE subdepartment AS +( + -- non recursive term + SELECT 1 AS level, * FROM department WHERE name = 'A' + + UNION ALL + + -- recursive term + SELECT sd.level + 1, d.* FROM department AS d, subdepartment AS sd + WHERE d.parent_department = sd.id +) +SELECT * FROM subdepartment ORDER BY name; + +-- extract all departments under 'A' with "level" number. +-- Only shows level 2 or more +WITH RECURSIVE subdepartment AS +( + -- non recursive term + SELECT 1 AS level, * FROM department WHERE name = 'A' + + UNION ALL + + -- recursive term + SELECT sd.level + 1, d.* FROM department AS d, subdepartment AS sd + WHERE d.parent_department = sd.id +) +SELECT * FROM subdepartment WHERE level >= 2 ORDER BY name; + +-- "RECURSIVE" is ignored if the query has no self-reference +WITH RECURSIVE subdepartment AS +( + -- note lack of recursive UNION structure + SELECT * FROM department WHERE name = 'A' +) +SELECT * FROM subdepartment ORDER BY name; + +-- inside subqueries +SELECT count(*) FROM +( + WITH RECURSIVE t AS ( + SELECT toUInt64(1) AS n UNION ALL SELECT n + 1 FROM t WHERE n < 500 + ) + SELECT * FROM t +) AS t WHERE n < ( + SELECT count(*) FROM ( + WITH RECURSIVE t AS ( + SELECT toUInt64(1) AS n UNION ALL SELECT n + 1 FROM t WHERE n < 100 + ) + SELECT * FROM t WHERE n < 50000 + ) AS t WHERE n < 100); + +-- corner case in which sub-WITH gets initialized first +WITH RECURSIVE q AS ( + SELECT * FROM department + UNION ALL + (WITH x AS (SELECT * FROM q) + SELECT * FROM x) + ) +SELECT * FROM q LIMIT 24; + +WITH RECURSIVE q AS ( + SELECT * FROM department + UNION ALL + (WITH RECURSIVE x AS ( + SELECT * FROM department + UNION ALL + (SELECT * FROM q UNION ALL SELECT * FROM x) + ) + SELECT * FROM x) + ) +SELECT * FROM q LIMIT 32; + +-- recursive term has sub-UNION +WITH RECURSIVE t AS ( + SELECT 1 AS i, 2 AS j + UNION ALL + SELECT t2.i, t.j+1 FROM + (SELECT 2 AS i UNION ALL SELECT 3 AS i) AS t2 + JOIN t ON (t2.i = t.i+1)) + + SELECT * FROM t; + +-- { echoOff } diff --git a/parser/testdata/03036_schema_inference_cache_s3_archives/ast.json b/parser/testdata/03036_schema_inference_cache_s3_archives/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03036_schema_inference_cache_s3_archives/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03036_schema_inference_cache_s3_archives/metadata.json b/parser/testdata/03036_schema_inference_cache_s3_archives/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03036_schema_inference_cache_s3_archives/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03036_schema_inference_cache_s3_archives/query.sql b/parser/testdata/03036_schema_inference_cache_s3_archives/query.sql new file mode 100644 index 000000000..61b3e1d6f --- /dev/null +++ b/parser/testdata/03036_schema_inference_cache_s3_archives/query.sql @@ -0,0 +1,9 @@ +-- Tags: no-fasttest +-- Tag no-fasttest: Depends on AWS + +SELECT * FROM s3(s3_conn, filename='03036_archive1.zip :: example{1,2}.csv') ORDER BY tuple(*); +SELECT schema_inference_mode, splitByChar('/', source)[-1] as file, schema FROM system.schema_inference_cache WHERE file = '03036_archive1.zip::example1.csv' ORDER BY file; + +SET schema_inference_mode = 'union'; +SELECT * FROM s3(s3_conn, filename='03036_json_archive.zip :: example{11,12}.jsonl') ORDER BY tuple(*); +SELECT schema_inference_mode, splitByChar('/', source)[-1] as file, schema FROM system.schema_inference_cache WHERE startsWith(file, '03036_json_archive.zip') ORDER BY file; \ No newline at end of file diff --git a/parser/testdata/03036_with_numbers/ast.json b/parser/testdata/03036_with_numbers/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03036_with_numbers/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03036_with_numbers/metadata.json b/parser/testdata/03036_with_numbers/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03036_with_numbers/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03036_with_numbers/query.sql b/parser/testdata/03036_with_numbers/query.sql new file mode 100644 index 000000000..bd0f6b617 --- /dev/null +++ b/parser/testdata/03036_with_numbers/query.sql @@ -0,0 +1,9 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/13843 +SET enable_analyzer=1; +WITH 10 AS n +SELECT * +FROM numbers(n); + +WITH cast(10, 'UInt64') AS n +SELECT * +FROM numbers(n); diff --git a/parser/testdata/03037_dot_product_overflow/ast.json b/parser/testdata/03037_dot_product_overflow/ast.json new file mode 100644 index 000000000..e84612551 --- /dev/null +++ b/parser/testdata/03037_dot_product_overflow/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function ignore (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function dotProduct (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[UInt64_9223372036854775807, UInt64_1]" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[Int64_-3, UInt64_1]" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001465398, + "rows_read": 14, + "bytes_read": 625 + } +} diff --git a/parser/testdata/03037_dot_product_overflow/metadata.json b/parser/testdata/03037_dot_product_overflow/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03037_dot_product_overflow/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03037_dot_product_overflow/query.sql b/parser/testdata/03037_dot_product_overflow/query.sql new file mode 100644 index 000000000..94d5eba62 --- /dev/null +++ b/parser/testdata/03037_dot_product_overflow/query.sql @@ -0,0 +1,2 @@ +select ignore(dotProduct(materialize([9223372036854775807, 1]), materialize([-3, 1]))); + diff --git a/parser/testdata/03037_dynamic_merges_1_horizontal_compact_merge_tree/ast.json b/parser/testdata/03037_dynamic_merges_1_horizontal_compact_merge_tree/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03037_dynamic_merges_1_horizontal_compact_merge_tree/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03037_dynamic_merges_1_horizontal_compact_merge_tree/metadata.json b/parser/testdata/03037_dynamic_merges_1_horizontal_compact_merge_tree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03037_dynamic_merges_1_horizontal_compact_merge_tree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03037_dynamic_merges_1_horizontal_compact_merge_tree/query.sql b/parser/testdata/03037_dynamic_merges_1_horizontal_compact_merge_tree/query.sql new file mode 100644 index 000000000..9bd2aee06 --- /dev/null +++ b/parser/testdata/03037_dynamic_merges_1_horizontal_compact_merge_tree/query.sql @@ -0,0 +1,50 @@ +-- Tags: long, no-tsan, no-msan, no-ubsan, no-asan +-- Random settings limits: index_granularity=(100, None); merge_max_block_size=(100, None) + +set allow_experimental_dynamic_type=1; + +drop table if exists test; +create table test (id UInt64, d Dynamic(max_types=2)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_columns_to_activate=10, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760, lock_acquire_timeout_for_background_operations=600; + +system stop merges test; +insert into test select number, number from numbers(100000); +insert into test select number, 'str_' || toString(number) from numbers(80000); +insert into test select number, range(number % 10 + 1) from numbers(70000); +insert into test select number, toDate(number) from numbers(60000); +insert into test select number, toDateTime(number) from numbers(50000); +insert into test select number, NULL from numbers(100000); + +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); +system start merges test; optimize table test final;; +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); + +system stop merges test; +insert into test select number, map(number, number) from numbers(200000); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); +system start merges test; +optimize table test final; +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); + +system stop merges test; +insert into test select number, tuple(number, number) from numbers(10000); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); +system start merges test; +optimize table test final; +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); + +system stop merges test; +insert into test select number, 'str_' || number from numbers(30000); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); +system start merges test; +optimize table test final; +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); + + +drop table test; diff --git a/parser/testdata/03037_dynamic_merges_1_horizontal_compact_wide_tree/ast.json b/parser/testdata/03037_dynamic_merges_1_horizontal_compact_wide_tree/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03037_dynamic_merges_1_horizontal_compact_wide_tree/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03037_dynamic_merges_1_horizontal_compact_wide_tree/metadata.json b/parser/testdata/03037_dynamic_merges_1_horizontal_compact_wide_tree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03037_dynamic_merges_1_horizontal_compact_wide_tree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03037_dynamic_merges_1_horizontal_compact_wide_tree/query.sql b/parser/testdata/03037_dynamic_merges_1_horizontal_compact_wide_tree/query.sql new file mode 100644 index 000000000..ee2dadd30 --- /dev/null +++ b/parser/testdata/03037_dynamic_merges_1_horizontal_compact_wide_tree/query.sql @@ -0,0 +1,49 @@ +-- Tags: long, no-tsan, no-msan, no-ubsan, no-asan +-- Random settings limits: index_granularity=(100, None); merge_max_block_size=(100, None) + +set allow_experimental_dynamic_type=1; + +drop table if exists test; +create table test (id UInt64, d Dynamic(max_types=2)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_columns_to_activate=10, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760, lock_acquire_timeout_for_background_operations=600; + +system stop merges test; +insert into test select number, number from numbers(100000); +insert into test select number, 'str_' || toString(number) from numbers(80000); +insert into test select number, range(number % 10 + 1) from numbers(70000); +insert into test select number, toDate(number) from numbers(60000); +insert into test select number, toDateTime(number) from numbers(50000); +insert into test select number, NULL from numbers(100000); + +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); +system start merges test; optimize table test final;; +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); + +system stop merges test; +insert into test select number, map(number, number) from numbers(200000); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); +system start merges test; +optimize table test final; +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); + +system stop merges test; +insert into test select number, tuple(number, number) from numbers(10000); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); +system start merges test; +optimize table test final; +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); + +system stop merges test; +insert into test select number, 'str_' || number from numbers(30000); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); +system start merges test; +optimize table test final; +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); + +drop table test; diff --git a/parser/testdata/03037_dynamic_merges_1_vertical_compact_merge_tree/ast.json b/parser/testdata/03037_dynamic_merges_1_vertical_compact_merge_tree/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03037_dynamic_merges_1_vertical_compact_merge_tree/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03037_dynamic_merges_1_vertical_compact_merge_tree/metadata.json b/parser/testdata/03037_dynamic_merges_1_vertical_compact_merge_tree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03037_dynamic_merges_1_vertical_compact_merge_tree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03037_dynamic_merges_1_vertical_compact_merge_tree/query.sql b/parser/testdata/03037_dynamic_merges_1_vertical_compact_merge_tree/query.sql new file mode 100644 index 000000000..6c2ce8f9e --- /dev/null +++ b/parser/testdata/03037_dynamic_merges_1_vertical_compact_merge_tree/query.sql @@ -0,0 +1,49 @@ +-- Tags: long, no-tsan, no-msan, no-ubsan, no-asan +-- Random settings limits: index_granularity=(100, None); merge_max_block_size=(100, None) + +set allow_experimental_dynamic_type=1; + +drop table if exists test; +create table test (id UInt64, d Dynamic(max_types=2)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760, lock_acquire_timeout_for_background_operations=600; + +system stop merges test; +insert into test select number, number from numbers(100000); +insert into test select number, 'str_' || toString(number) from numbers(80000); +insert into test select number, range(number % 10 + 1) from numbers(70000); +insert into test select number, toDate(number) from numbers(60000); +insert into test select number, toDateTime(number) from numbers(50000); +insert into test select number, NULL from numbers(100000); + +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); +system start merges test; optimize table test final;; +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); + +system stop merges test; +insert into test select number, map(number, number) from numbers(200000); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); +system start merges test; +optimize table test final; +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); + +system stop merges test; +insert into test select number, tuple(number, number) from numbers(10000); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); +system start merges test; +optimize table test final; +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); + +system stop merges test; +insert into test select number, 'str_' || number from numbers(30000); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); +system start merges test; +optimize table test final; +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); + +drop table test; diff --git a/parser/testdata/03037_dynamic_merges_1_vertical_wide_merge_tree/ast.json b/parser/testdata/03037_dynamic_merges_1_vertical_wide_merge_tree/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03037_dynamic_merges_1_vertical_wide_merge_tree/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03037_dynamic_merges_1_vertical_wide_merge_tree/metadata.json b/parser/testdata/03037_dynamic_merges_1_vertical_wide_merge_tree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03037_dynamic_merges_1_vertical_wide_merge_tree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03037_dynamic_merges_1_vertical_wide_merge_tree/query.sql b/parser/testdata/03037_dynamic_merges_1_vertical_wide_merge_tree/query.sql new file mode 100644 index 000000000..2350cddd2 --- /dev/null +++ b/parser/testdata/03037_dynamic_merges_1_vertical_wide_merge_tree/query.sql @@ -0,0 +1,49 @@ +-- Tags: long, no-tsan, no-msan, no-ubsan, no-asan +-- Random settings limits: index_granularity=(100, None); merge_max_block_size=(100, None) + +set allow_experimental_dynamic_type=1; + +drop table if exists test; +create table test (id UInt64, d Dynamic(max_types=2)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1, index_granularity_bytes=10485760, index_granularity=8192, merge_max_block_size=8192, merge_max_block_size_bytes=10485760, lock_acquire_timeout_for_background_operations=600; + +system stop merges test; +insert into test select number, number from numbers(100000); +insert into test select number, 'str_' || toString(number) from numbers(80000); +insert into test select number, range(number % 10 + 1) from numbers(70000); +insert into test select number, toDate(number) from numbers(60000); +insert into test select number, toDateTime(number) from numbers(50000); +insert into test select number, NULL from numbers(100000); + +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); +system start merges test; optimize table test final;; +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); + +system stop merges test; +insert into test select number, map(number, number) from numbers(200000); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); +system start merges test; +optimize table test final; +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); + +system stop merges test; +insert into test select number, tuple(number, number) from numbers(10000); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); +system start merges test; +optimize table test final; +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); + +system stop merges test; +insert into test select number, 'str_' || number from numbers(30000); +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); +system start merges test; +optimize table test final; +select '---------------------'; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); + +drop table test; diff --git a/parser/testdata/03037_dynamic_merges_2_horizontal_compact_merge_tree/ast.json b/parser/testdata/03037_dynamic_merges_2_horizontal_compact_merge_tree/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03037_dynamic_merges_2_horizontal_compact_merge_tree/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03037_dynamic_merges_2_horizontal_compact_merge_tree/metadata.json b/parser/testdata/03037_dynamic_merges_2_horizontal_compact_merge_tree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03037_dynamic_merges_2_horizontal_compact_merge_tree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03037_dynamic_merges_2_horizontal_compact_merge_tree/query.sql b/parser/testdata/03037_dynamic_merges_2_horizontal_compact_merge_tree/query.sql new file mode 100644 index 000000000..7f1934091 --- /dev/null +++ b/parser/testdata/03037_dynamic_merges_2_horizontal_compact_merge_tree/query.sql @@ -0,0 +1,15 @@ +-- Tags: long, no-tsan, no-msan, no-ubsan, no-asan +-- Random settings limits: index_granularity=(100, None); merge_max_block_size=(100, None) + +set allow_experimental_dynamic_type = 1; + +drop table if exists test; +create table test (id UInt64, d Dynamic) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, lock_acquire_timeout_for_background_operations=600; +system stop merges test; +insert into test select number, number from numbers(200000); +insert into test select number, 'str_' || toString(number) from numbers(200000, 200000); +insert into test select number, range(number % 10 + 1) from numbers(400000, 200000); +system start merges test; +optimize table test final; +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +drop table test; diff --git a/parser/testdata/03037_dynamic_merges_2_horizontal_wide_merge_tree/ast.json b/parser/testdata/03037_dynamic_merges_2_horizontal_wide_merge_tree/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03037_dynamic_merges_2_horizontal_wide_merge_tree/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03037_dynamic_merges_2_horizontal_wide_merge_tree/metadata.json b/parser/testdata/03037_dynamic_merges_2_horizontal_wide_merge_tree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03037_dynamic_merges_2_horizontal_wide_merge_tree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03037_dynamic_merges_2_horizontal_wide_merge_tree/query.sql b/parser/testdata/03037_dynamic_merges_2_horizontal_wide_merge_tree/query.sql new file mode 100644 index 000000000..f1f387fae --- /dev/null +++ b/parser/testdata/03037_dynamic_merges_2_horizontal_wide_merge_tree/query.sql @@ -0,0 +1,15 @@ +-- Tags: long, no-tsan, no-msan, no-ubsan, no-asan +-- Random settings limits: index_granularity=(100, None); merge_max_block_size=(100, None) + +set allow_experimental_dynamic_type = 1; + +drop table if exists test; +create table test (id UInt64, d Dynamic) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, lock_acquire_timeout_for_background_operations=600; +system stop merges test; +insert into test select number, number from numbers(200000); +insert into test select number, 'str_' || toString(number) from numbers(200000, 200000); +insert into test select number, range(number % 10 + 1) from numbers(400000, 200000); +system start merges test; +optimize table test final; +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +drop table test; diff --git a/parser/testdata/03037_dynamic_merges_2_vertical_compact_merge_tree/ast.json b/parser/testdata/03037_dynamic_merges_2_vertical_compact_merge_tree/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03037_dynamic_merges_2_vertical_compact_merge_tree/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03037_dynamic_merges_2_vertical_compact_merge_tree/metadata.json b/parser/testdata/03037_dynamic_merges_2_vertical_compact_merge_tree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03037_dynamic_merges_2_vertical_compact_merge_tree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03037_dynamic_merges_2_vertical_compact_merge_tree/query.sql b/parser/testdata/03037_dynamic_merges_2_vertical_compact_merge_tree/query.sql new file mode 100644 index 000000000..cc11c454d --- /dev/null +++ b/parser/testdata/03037_dynamic_merges_2_vertical_compact_merge_tree/query.sql @@ -0,0 +1,15 @@ +-- Tags: long, no-tsan, no-msan, no-ubsan, no-asan +-- Random settings limits: index_granularity=(100, None); merge_max_block_size=(100, None) + +set allow_experimental_dynamic_type = 1; + +drop table if exists test; +create table test (id UInt64, d Dynamic) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1, lock_acquire_timeout_for_background_operations=600; +system stop merges test; +insert into test select number, number from numbers(200000); +insert into test select number, 'str_' || toString(number) from numbers(200000, 200000); +insert into test select number, range(number % 10 + 1) from numbers(400000, 200000); +system start merges test; +optimize table test final; +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +drop table test; diff --git a/parser/testdata/03037_dynamic_merges_2_vertical_wide_merge_tree/ast.json b/parser/testdata/03037_dynamic_merges_2_vertical_wide_merge_tree/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03037_dynamic_merges_2_vertical_wide_merge_tree/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03037_dynamic_merges_2_vertical_wide_merge_tree/metadata.json b/parser/testdata/03037_dynamic_merges_2_vertical_wide_merge_tree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03037_dynamic_merges_2_vertical_wide_merge_tree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03037_dynamic_merges_2_vertical_wide_merge_tree/query.sql b/parser/testdata/03037_dynamic_merges_2_vertical_wide_merge_tree/query.sql new file mode 100644 index 000000000..ffb2aca8b --- /dev/null +++ b/parser/testdata/03037_dynamic_merges_2_vertical_wide_merge_tree/query.sql @@ -0,0 +1,15 @@ +-- Tags: long, no-tsan, no-msan, no-ubsan, no-asan +-- Random settings limits: index_granularity=(100, None); merge_max_block_size=(100, None) + +set allow_experimental_dynamic_type = 1; + +drop table if exists test; +create table test (id UInt64, d Dynamic) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1, lock_acquire_timeout_for_background_operations=600; +system stop merges test; +insert into test select number, number from numbers(200000); +insert into test select number, 'str_' || toString(number) from numbers(200000, 200000); +insert into test select number, range(number % 10 + 1) from numbers(400000, 200000); +system start merges test; +optimize table test final; +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +drop table test; diff --git a/parser/testdata/03037_precent_rank/ast.json b/parser/testdata/03037_precent_rank/ast.json new file mode 100644 index 000000000..1799c54e7 --- /dev/null +++ b/parser/testdata/03037_precent_rank/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery product_groups (children 1)" + }, + { + "explain": " Identifier product_groups" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001157563, + "rows_read": 2, + "bytes_read": 80 + } +} diff --git a/parser/testdata/03037_precent_rank/metadata.json b/parser/testdata/03037_precent_rank/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03037_precent_rank/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03037_precent_rank/query.sql b/parser/testdata/03037_precent_rank/query.sql new file mode 100644 index 000000000..b0f83fa33 --- /dev/null +++ b/parser/testdata/03037_precent_rank/query.sql @@ -0,0 +1,52 @@ +drop table if exists product_groups; +drop table if exists products; + +CREATE TABLE product_groups ( + group_id Int64, + group_name String +) Engine = Memory; + + +CREATE TABLE products ( + product_id Int64, + product_name String, + price DECIMAL(11, 2), + group_id Int64 +) Engine = Memory; + +INSERT INTO product_groups VALUES (1, 'Smartphone'),(2, 'Laptop'),(3, 'Tablet'); + +INSERT INTO products (product_id,product_name, group_id,price) VALUES (1, 'Microsoft Lumia', 1, 200), (2, 'HTC One', 1, 400), (3, 'Nexus', 1, 500), (4, 'iPhone', 1, 900),(5, 'HP Elite', 2, 1200),(6, 'Lenovo Thinkpad', 2, 700),(7, 'Sony VAIO', 2, 700),(8, 'Dell Vostro', 2, 800),(9, 'iPad', 3, 700),(10, 'Kindle Fire', 3, 150),(11, 'Samsung Galaxy Tab', 3, 200); + +INSERT INTO product_groups VALUES (4, 'Unknow'); +INSERT INTO products (product_id,product_name, group_id,price) VALUES (12, 'Others', 4, 200); + +SELECT * +FROM +( + SELECT + product_name, + group_name, + price, + rank() OVER (PARTITION BY group_name ORDER BY price ASC) AS rank, + percent_rank() OVER (PARTITION BY group_name ORDER BY price ASC) AS percent + FROM products + INNER JOIN product_groups USING (group_id) +) AS t +ORDER BY + group_name ASC, + price ASC, + product_name ASC; + +drop table product_groups; +drop table products; + +select number, row_number, cast(percent_rank * 10000 as Int32) as percent_rank +from ( + select number, row_number() over () as row_number, percent_rank() over (order by number) as percent_rank + from numbers(10000) + order by number + limit 10 +) +settings max_block_size=100; + diff --git a/parser/testdata/03037_recursive_cte_postgres_3/ast.json b/parser/testdata/03037_recursive_cte_postgres_3/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03037_recursive_cte_postgres_3/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03037_recursive_cte_postgres_3/metadata.json b/parser/testdata/03037_recursive_cte_postgres_3/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03037_recursive_cte_postgres_3/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03037_recursive_cte_postgres_3/query.sql b/parser/testdata/03037_recursive_cte_postgres_3/query.sql new file mode 100644 index 000000000..3413185fd --- /dev/null +++ b/parser/testdata/03037_recursive_cte_postgres_3/query.sql @@ -0,0 +1,90 @@ +/** + * Based on https://github.com/postgres/postgres/blob/master/src/test/regress/sql/with.sql, license: + * + * PostgreSQL Database Management System + * (formerly known as Postgres, then as Postgres95) + * + * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group + * + * Portions Copyright (c) 1994, The Regents of the University of California + * + * Permission to use, copy, modify, and distribute this software and its + * documentation for any purpose, without fee, and without a written agreement + * is hereby granted, provided that the above copyright notice and this + * paragraph and the following two paragraphs appear in all copies. + * + * IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR + * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING + * LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS + * DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS + * ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO + *PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. + */ + +-- +-- Tests for common table expressions (WITH query, ... SELECT ...) +-- + +-- { echoOn } + +SET enable_analyzer = 1; +SET join_algorithm = 'hash'; + +-- +-- different tree example +-- +DROP TABLE IF EXISTS tree; +CREATE TABLE tree( + id UInt64, + parent_id Nullable(UInt64) +) +ENGINE=TinyLog; + +INSERT INTO tree +VALUES (1, NULL), (2, 1), (3,1), (4,2), (5,2), (6,2), (7,3), (8,3), (9,4), (10,4), (11,7), (12,7), (13,7), (14, 9), (15,11), (16,11); + +-- +-- get all paths from "second level" nodes to leaf nodes +-- +WITH RECURSIVE t AS ( + SELECT 1 AS id, []::Array(UInt64) AS path +UNION ALL + SELECT tree.id, arrayConcat(t.path, [tree.id]) + FROM tree JOIN t ON (tree.parent_id = t.id) +) +SELECT t1.*, t2.* FROM t AS t1 JOIN t AS t2 ON + (t1.path[1] = t2.path[1] AND + length(t1.path) = 1 AND + length(t2.path) > 1) + ORDER BY t1.id, t2.id; + +-- just count 'em +WITH RECURSIVE t AS ( + SELECT 1 AS id, []::Array(UInt64) AS path +UNION ALL + SELECT tree.id, arrayConcat(t.path, [tree.id]) + FROM tree JOIN t ON (tree.parent_id = t.id) +) +SELECT t1.id, count(t2.path) FROM t AS t1 JOIN t AS t2 ON + (t1.path[1] = t2.path[1] AND + length(t1.path) = 1 AND + length(t2.path) > 1) + GROUP BY t1.id + ORDER BY t1.id; + +-- -- this variant tickled a whole-row-variable bug in 8.4devel +WITH RECURSIVE t AS ( + SELECT 1 AS id, []::Array(UInt64) AS path +UNION ALL + SELECT tree.id, arrayConcat(t.path, [tree.id]) + FROM tree JOIN t ON (tree.parent_id = t.id) +) +SELECT t1.id, t2.path, tuple(t2.*) FROM t AS t1 JOIN t AS t2 ON +(t1.id=t2.id); + +-- { echoOff } diff --git a/parser/testdata/03037_s3_write_to_globbed_partitioned_path/ast.json b/parser/testdata/03037_s3_write_to_globbed_partitioned_path/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03037_s3_write_to_globbed_partitioned_path/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03037_s3_write_to_globbed_partitioned_path/metadata.json b/parser/testdata/03037_s3_write_to_globbed_partitioned_path/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03037_s3_write_to_globbed_partitioned_path/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03037_s3_write_to_globbed_partitioned_path/query.sql b/parser/testdata/03037_s3_write_to_globbed_partitioned_path/query.sql new file mode 100644 index 000000000..1de89a593 --- /dev/null +++ b/parser/testdata/03037_s3_write_to_globbed_partitioned_path/query.sql @@ -0,0 +1,4 @@ +-- Tags: no-fasttest + +insert into function s3('http://localhost:11111/test/data_*_{_partition_id}.csv') partition by number % 3 select * from numbers(10); -- {serverError DATABASE_ACCESS_DENIED} + diff --git a/parser/testdata/03037_union_view/ast.json b/parser/testdata/03037_union_view/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03037_union_view/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03037_union_view/metadata.json b/parser/testdata/03037_union_view/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03037_union_view/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03037_union_view/query.sql b/parser/testdata/03037_union_view/query.sql new file mode 100644 index 000000000..d963444fd --- /dev/null +++ b/parser/testdata/03037_union_view/query.sql @@ -0,0 +1,31 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/55803 +SET enable_analyzer=1; +DROP TABLE IF EXISTS broken_table; +DROP TABLE IF EXISTS broken_view; + +CREATE TABLE broken_table +( + start DateTime64(6), + end DateTime64(6), +) +ENGINE = ReplacingMergeTree(start) +ORDER BY (start); + +CREATE VIEW broken_view as +SELECT + t.start as start, + t.end as end, + cast(datediff('second', t.start, t.end) as float) as total_sec +FROM broken_table t FINAL +UNION ALL +SELECT + null as start, + null as end, + null as total_sec; + +SELECT v.start, v.total_sec +FROM broken_view v FINAL +WHERE v.start IS NOT NULL; + +DROP TABLE IF EXISTS broken_table; +DROP TABLE IF EXISTS broken_view; diff --git a/parser/testdata/03037_zero_step_in_numbers_table_function/ast.json b/parser/testdata/03037_zero_step_in_numbers_table_function/ast.json new file mode 100644 index 000000000..c76dde526 --- /dev/null +++ b/parser/testdata/03037_zero_step_in_numbers_table_function/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Literal UInt64_0" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001433546, + "rows_read": 13, + "bytes_read": 486 + } +} diff --git a/parser/testdata/03037_zero_step_in_numbers_table_function/metadata.json b/parser/testdata/03037_zero_step_in_numbers_table_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03037_zero_step_in_numbers_table_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03037_zero_step_in_numbers_table_function/query.sql b/parser/testdata/03037_zero_step_in_numbers_table_function/query.sql new file mode 100644 index 000000000..08fafd6dd --- /dev/null +++ b/parser/testdata/03037_zero_step_in_numbers_table_function/query.sql @@ -0,0 +1,2 @@ +select * from numbers(1, 10, 0); -- {serverError BAD_ARGUMENTS} + diff --git a/parser/testdata/03038_ambiguous_column/ast.json b/parser/testdata/03038_ambiguous_column/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03038_ambiguous_column/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03038_ambiguous_column/metadata.json b/parser/testdata/03038_ambiguous_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03038_ambiguous_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03038_ambiguous_column/query.sql b/parser/testdata/03038_ambiguous_column/query.sql new file mode 100644 index 000000000..131bc552f --- /dev/null +++ b/parser/testdata/03038_ambiguous_column/query.sql @@ -0,0 +1,42 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/48308 +SET enable_analyzer=1; +DROP TABLE IF EXISTS 03038_table; + +CREATE TABLE 03038_table +( + `time` DateTime +) +ENGINE = MergeTree +ORDER BY time; + +SELECT * +FROM +( + SELECT + toUInt64(time) AS time, + toHour(03038_table.time) + FROM 03038_table +) +ORDER BY time ASC; + +WITH subquery AS ( + SELECT + toUInt64(time) AS time, + toHour(03038_table.time) + FROM 03038_table +) +SELECT * +FROM subquery +ORDER BY subquery.time ASC; + +SELECT * +FROM +( + SELECT + toUInt64(time) AS time, + toHour(03038_table.time) AS hour + FROM 03038_table +) +ORDER BY time ASC, hour; + +DROP TABLE IF EXISTS 03038_table; diff --git a/parser/testdata/03038_move_partition_to_oneself_deadlock/ast.json b/parser/testdata/03038_move_partition_to_oneself_deadlock/ast.json new file mode 100644 index 000000000..deb9d2d94 --- /dev/null +++ b/parser/testdata/03038_move_partition_to_oneself_deadlock/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001318795, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03038_move_partition_to_oneself_deadlock/metadata.json b/parser/testdata/03038_move_partition_to_oneself_deadlock/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03038_move_partition_to_oneself_deadlock/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03038_move_partition_to_oneself_deadlock/query.sql b/parser/testdata/03038_move_partition_to_oneself_deadlock/query.sql new file mode 100644 index 000000000..f3072fb35 --- /dev/null +++ b/parser/testdata/03038_move_partition_to_oneself_deadlock/query.sql @@ -0,0 +1,8 @@ +SET optimize_trivial_insert_select = 1; + +DROP TABLE IF EXISTS move_partition_to_oneself; +CREATE TABLE move_partition_to_oneself (key UInt64 CODEC(NONE)) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO move_partition_to_oneself SELECT number FROM numbers(1e6); +SELECT partition, rows FROM system.parts WHERE database = currentDatabase() AND table = 'move_partition_to_oneself' and active; +ALTER TABLE move_partition_to_oneself MOVE PARTITION tuple() TO TABLE move_partition_to_oneself; +SELECT partition, rows FROM system.parts WHERE database = currentDatabase() AND table = 'move_partition_to_oneself' and active; diff --git a/parser/testdata/03038_nested_dynamic_merges_compact_horizontal/ast.json b/parser/testdata/03038_nested_dynamic_merges_compact_horizontal/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03038_nested_dynamic_merges_compact_horizontal/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03038_nested_dynamic_merges_compact_horizontal/metadata.json b/parser/testdata/03038_nested_dynamic_merges_compact_horizontal/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03038_nested_dynamic_merges_compact_horizontal/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03038_nested_dynamic_merges_compact_horizontal/query.sql b/parser/testdata/03038_nested_dynamic_merges_compact_horizontal/query.sql new file mode 100644 index 000000000..e3b8ea635 --- /dev/null +++ b/parser/testdata/03038_nested_dynamic_merges_compact_horizontal/query.sql @@ -0,0 +1,45 @@ +-- Tags: long, no-tsan, no-msan, no-ubsan, no-asan +-- Random settings limits: index_granularity=(100, None); merge_max_block_size=(100, None) + +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; +set allow_experimental_dynamic_type = 1; +set enable_named_columns_in_function_tuple = 0; + +drop table if exists test;; +create table test (id UInt64, d Dynamic(max_types=2)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, lock_acquire_timeout_for_background_operations=600; + +system stop merges test; +insert into test select number, number from numbers(100000); +insert into test select number, tuple(if(number % 3 == 0, number, 'str_' || toString(number)))::Tuple(a Dynamic(max_types=2)) from numbers(100000); +insert into test select number, tuple(if(number % 3 == 0, toDate(number), range(number % 10)))::Tuple(a Dynamic(max_types=2)) from numbers(50000); +insert into test select number, multiIf(number % 5 == 0, tuple(if(number % 3 == 0, toDateTime(number), toIPv4(number)))::Tuple(a Dynamic(max_types=2)), number % 5 == 1 or number % 5 == 2, number, 'str_' || number) from numbers(100000); + +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; +system start merges test; +optimize table test final; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; + +system stop merges test; +insert into test select number, tuple(if(number % 3 == 0, toDateTime(number), NULL))::Tuple(a Dynamic(max_types=2)) from numbers(50000); +insert into test select number, tuple(if(number % 2 == 0, tuple(number), NULL))::Tuple(a Dynamic(max_types=2)) from numbers(200000); + +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; +system start merges test; +optimize table test final; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; + +system stop merges test; +insert into test select number, tuple(toDateTime(number))::Tuple(a Dynamic(max_types=2)) from numbers(40000); + +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; +system start merges test; +optimize table test final; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; + +drop table test; diff --git a/parser/testdata/03038_nested_dynamic_merges_compact_vertical/ast.json b/parser/testdata/03038_nested_dynamic_merges_compact_vertical/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03038_nested_dynamic_merges_compact_vertical/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03038_nested_dynamic_merges_compact_vertical/metadata.json b/parser/testdata/03038_nested_dynamic_merges_compact_vertical/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03038_nested_dynamic_merges_compact_vertical/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03038_nested_dynamic_merges_compact_vertical/query.sql b/parser/testdata/03038_nested_dynamic_merges_compact_vertical/query.sql new file mode 100644 index 000000000..db11dfc93 --- /dev/null +++ b/parser/testdata/03038_nested_dynamic_merges_compact_vertical/query.sql @@ -0,0 +1,45 @@ +-- Tags: long, no-tsan, no-msan, no-ubsan, no-asan +-- Random settings limits: index_granularity=(100, None); merge_max_block_size=(100, None) + +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; +set allow_experimental_dynamic_type = 1; +set enable_named_columns_in_function_tuple = 0; + +drop table if exists test;; +create table test (id UInt64, d Dynamic(max_types=2)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1, lock_acquire_timeout_for_background_operations=600; + +system stop merges test; +insert into test select number, number from numbers(100000); +insert into test select number, tuple(if(number % 3 == 0, number, 'str_' || toString(number)))::Tuple(a Dynamic(max_types=2)) from numbers(100000); +insert into test select number, tuple(if(number % 3 == 0, toDate(number), range(number % 10)))::Tuple(a Dynamic(max_types=2)) from numbers(50000); +insert into test select number, multiIf(number % 5 == 0, tuple(if(number % 3 == 0, toDateTime(number), toIPv4(number)))::Tuple(a Dynamic(max_types=2)), number % 5 == 1 or number % 5 == 2, number, 'str_' || number) from numbers(100000); + +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; +system start merges test; +optimize table test final; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; + +system stop merges test; +insert into test select number, tuple(if(number % 3 == 0, toDateTime(number), NULL))::Tuple(a Dynamic(max_types=2)) from numbers(50000); +insert into test select number, tuple(if(number % 2 == 0, tuple(number), NULL))::Tuple(a Dynamic(max_types=2)) from numbers(200000); + +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; +system start merges test; +optimize table test final; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; + +system stop merges test; +insert into test select number, tuple(toDateTime(number))::Tuple(a Dynamic(max_types=2)) from numbers(40000); + +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; +system start merges test; +optimize table test final; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; + +drop table test; diff --git a/parser/testdata/03038_nested_dynamic_merges_wide_horizontal/ast.json b/parser/testdata/03038_nested_dynamic_merges_wide_horizontal/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03038_nested_dynamic_merges_wide_horizontal/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03038_nested_dynamic_merges_wide_horizontal/metadata.json b/parser/testdata/03038_nested_dynamic_merges_wide_horizontal/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03038_nested_dynamic_merges_wide_horizontal/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03038_nested_dynamic_merges_wide_horizontal/query.sql b/parser/testdata/03038_nested_dynamic_merges_wide_horizontal/query.sql new file mode 100644 index 000000000..4ed4d00fe --- /dev/null +++ b/parser/testdata/03038_nested_dynamic_merges_wide_horizontal/query.sql @@ -0,0 +1,45 @@ +-- Tags: long, no-tsan, no-msan, no-ubsan, no-asan +-- Random settings limits: index_granularity=(100, None); merge_max_block_size=(100, None) + +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; +set allow_experimental_dynamic_type = 1; +set enable_named_columns_in_function_tuple = 0; + +drop table if exists test;; +create table test (id UInt64, d Dynamic(max_types=2)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, lock_acquire_timeout_for_background_operations=600; + +system stop merges test; +insert into test select number, number from numbers(100000); +insert into test select number, tuple(if(number % 3 == 0, number, 'str_' || toString(number)))::Tuple(a Dynamic(max_types=2)) from numbers(100000); +insert into test select number, tuple(if(number % 3 == 0, toDate(number), range(number % 10)))::Tuple(a Dynamic(max_types=2)) from numbers(50000); +insert into test select number, multiIf(number % 5 == 0, tuple(if(number % 3 == 0, toDateTime(number), toIPv4(number)))::Tuple(a Dynamic(max_types=2)), number % 5 == 1 or number % 5 == 2, number, 'str_' || number) from numbers(100000); + +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; +system start merges test; +optimize table test final; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; + +system stop merges test; +insert into test select number, tuple(if(number % 3 == 0, toDateTime(number), NULL))::Tuple(a Dynamic(max_types=2)) from numbers(50000); +insert into test select number, tuple(if(number % 2 == 0, tuple(number), NULL))::Tuple(a Dynamic(max_types=2)) from numbers(200000); + +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; +system start merges test; +optimize table test final; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; + +system stop merges test; +insert into test select number, tuple(toDateTime(number))::Tuple(a Dynamic(max_types=2)) from numbers(40000); + +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; +system start merges test; +optimize table test final; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; + +drop table test; diff --git a/parser/testdata/03038_nested_dynamic_merges_wide_vertical/ast.json b/parser/testdata/03038_nested_dynamic_merges_wide_vertical/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03038_nested_dynamic_merges_wide_vertical/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03038_nested_dynamic_merges_wide_vertical/metadata.json b/parser/testdata/03038_nested_dynamic_merges_wide_vertical/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03038_nested_dynamic_merges_wide_vertical/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03038_nested_dynamic_merges_wide_vertical/query.sql b/parser/testdata/03038_nested_dynamic_merges_wide_vertical/query.sql new file mode 100644 index 000000000..2f8b258ba --- /dev/null +++ b/parser/testdata/03038_nested_dynamic_merges_wide_vertical/query.sql @@ -0,0 +1,45 @@ +-- Tags: long, no-tsan, no-msan, no-ubsan, no-asan +-- Random settings limits: index_granularity=(100, None); merge_max_block_size=(100, None) + +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; +set allow_experimental_dynamic_type = 1; +set enable_named_columns_in_function_tuple = 0; + +drop table if exists test;; +create table test (id UInt64, d Dynamic(max_types=2)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1, lock_acquire_timeout_for_background_operations=600; + +system stop merges test; +insert into test select number, number from numbers(100000); +insert into test select number, tuple(if(number % 3 == 0, number, 'str_' || toString(number)))::Tuple(a Dynamic(max_types=2)) from numbers(100000); +insert into test select number, tuple(if(number % 3 == 0, toDate(number), range(number % 10)))::Tuple(a Dynamic(max_types=2)) from numbers(50000); +insert into test select number, multiIf(number % 5 == 0, tuple(if(number % 3 == 0, toDateTime(number), toIPv4(number)))::Tuple(a Dynamic(max_types=2)), number % 5 == 1 or number % 5 == 2, number, 'str_' || number) from numbers(100000); + +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; +system start merges test; +optimize table test final; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; + +system stop merges test; +insert into test select number, tuple(if(number % 3 == 0, toDateTime(number), NULL))::Tuple(a Dynamic(max_types=2)) from numbers(50000); +insert into test select number, tuple(if(number % 2 == 0, tuple(number), NULL))::Tuple(a Dynamic(max_types=2)) from numbers(200000); + +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; +system start merges test; +optimize table test final; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; + +system stop merges test; +insert into test select number, tuple(toDateTime(number))::Tuple(a Dynamic(max_types=2)) from numbers(40000); + +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; +system start merges test; +optimize table test final; +select '---------------------'; +select count(), dynamicType(d) || ':' || dynamicType(d.`Tuple(a Dynamic(max_types=2))`.a) as type, isDynamicElementInSharedData(d.`Tuple(a Dynamic(max_types=2))`.a) as flag from test group by type, flag order by count(), type; + +drop table test; diff --git a/parser/testdata/03038_recursive_cte_postgres_4/ast.json b/parser/testdata/03038_recursive_cte_postgres_4/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03038_recursive_cte_postgres_4/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03038_recursive_cte_postgres_4/metadata.json b/parser/testdata/03038_recursive_cte_postgres_4/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03038_recursive_cte_postgres_4/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03038_recursive_cte_postgres_4/query.sql b/parser/testdata/03038_recursive_cte_postgres_4/query.sql new file mode 100644 index 000000000..1fb340102 --- /dev/null +++ b/parser/testdata/03038_recursive_cte_postgres_4/query.sql @@ -0,0 +1,73 @@ +/** + * Based on https://github.com/postgres/postgres/blob/master/src/test/regress/sql/with.sql, license: + * + * PostgreSQL Database Management System + * (formerly known as Postgres, then as Postgres95) + * + * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group + * + * Portions Copyright (c) 1994, The Regents of the University of California + * + * Permission to use, copy, modify, and distribute this software and its + * documentation for any purpose, without fee, and without a written agreement + * is hereby granted, provided that the above copyright notice and this + * paragraph and the following two paragraphs appear in all copies. + * + * IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR + * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING + * LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS + * DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS + * ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO + *PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. + */ + +-- +-- Tests for common table expressions (WITH query, ... SELECT ...) +-- + +-- { echoOn } + +SET enable_analyzer = 1; +SET join_algorithm = 'hash'; + +-- +-- test cycle detection +-- + +DROP TABLE IF EXISTS graph; +CREATE TABLE graph( + f UInt64, + t UInt64, + label String +) +ENGINE = TinyLog; + +INSERT INTO graph VALUES (1, 2, 'arc 1 -> 2'), (1, 3, 'arc 1 -> 3'), (2, 3, 'arc 2 -> 3'), (1, 4, 'arc 1 -> 4'), (4, 5, 'arc 4 -> 5'), (5, 1, 'arc 5 -> 1'); + +WITH RECURSIVE search_graph AS ( + SELECT *, false AS is_cycle, [tuple(g.f, g.t)] AS path FROM graph g + UNION ALL + SELECT g.*, has(path, tuple(g.f, g.t)), arrayConcat(sg.path, [tuple(g.f, g.t)]) + FROM graph g, search_graph sg + WHERE g.f = sg.t AND NOT is_cycle +) +SELECT * FROM search_graph +SETTINGS query_plan_join_swap_table = 'false' +; + +-- ordering by the path column has same effect as SEARCH DEPTH FIRST +WITH RECURSIVE search_graph AS ( + SELECT *, false AS is_cycle, [tuple(g.f, g.t)] AS path FROM graph g + UNION ALL + SELECT g.*, has(path, tuple(g.f, g.t)), arrayConcat(sg.path, [tuple(g.f, g.t)]) + FROM graph g, search_graph sg + WHERE g.f = sg.t AND NOT is_cycle +) +SELECT * FROM search_graph ORDER BY path; + +-- { echoOff } diff --git a/parser/testdata/03039_recursive_cte_postgres_5/ast.json b/parser/testdata/03039_recursive_cte_postgres_5/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03039_recursive_cte_postgres_5/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03039_recursive_cte_postgres_5/metadata.json b/parser/testdata/03039_recursive_cte_postgres_5/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03039_recursive_cte_postgres_5/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03039_recursive_cte_postgres_5/query.sql b/parser/testdata/03039_recursive_cte_postgres_5/query.sql new file mode 100644 index 000000000..eb4043cca --- /dev/null +++ b/parser/testdata/03039_recursive_cte_postgres_5/query.sql @@ -0,0 +1,84 @@ + +/** + * Based on https://github.com/postgres/postgres/blob/master/src/test/regress/sql/with.sql, license: + * + * PostgreSQL Database Management System + * (formerly known as Postgres, then as Postgres95) + * + * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group + * + * Portions Copyright (c) 1994, The Regents of the University of California + * + * Permission to use, copy, modify, and distribute this software and its + * documentation for any purpose, without fee, and without a written agreement + * is hereby granted, provided that the above copyright notice and this + * paragraph and the following two paragraphs appear in all copies. + * + * IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR + * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING + * LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS + * DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS + * ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO + *PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. + */ + +-- +-- Tests for common table expressions (WITH query, ... SELECT ...) +-- + +-- { echoOn } + +SET enable_analyzer = 1; + +-- +-- test multiple WITH queries +-- +WITH RECURSIVE + y AS (SELECT 1 AS id), + x AS (SELECT * FROM y UNION ALL SELECT id+1 FROM x WHERE id < 5) +SELECT * FROM x ORDER BY id; + +-- forward reference OK +WITH RECURSIVE + x AS (SELECT * FROM y UNION ALL SELECT id+1 FROM x WHERE id < 5), + y AS (SELECT 1 AS id) + SELECT * FROM x ORDER BY id; + +WITH RECURSIVE + x AS + (SELECT 1 AS id UNION ALL SELECT id+1 FROM x WHERE id < 5), + y AS + (SELECT 1 AS id UNION ALL SELECT id+1 FROM y WHERE id < 10) + SELECT y.*, x.* FROM y LEFT JOIN x USING (id) ORDER BY y.id; + +WITH RECURSIVE + x AS + (SELECT 1 AS id UNION ALL SELECT id+1 FROM x WHERE id < 5), + y AS + (SELECT 1 AS id UNION ALL SELECT id+1 FROM x WHERE id < 10) + SELECT y.*, x.* FROM y LEFT JOIN x USING (id) ORDER BY y.id; + +WITH RECURSIVE + x AS + (SELECT 1 AS id UNION ALL SELECT id+1 FROM x WHERE id < 3 ), + y AS + (SELECT * FROM x UNION ALL SELECT * FROM x), + z AS + (SELECT * FROM x UNION ALL SELECT id+1 FROM z WHERE id < 10) + SELECT * FROM z ORDER BY id; + +WITH RECURSIVE + x AS + (SELECT 1 AS id UNION ALL SELECT id+1 FROM x WHERE id < 3 ), + y AS + (SELECT * FROM x UNION ALL SELECT * FROM x), + z AS + (SELECT * FROM y UNION ALL SELECT id+1 FROM z WHERE id < 10) + SELECT * FROM z ORDER BY id; + +-- { echoOff } diff --git a/parser/testdata/03039_unknown_identifier_window_function/ast.json b/parser/testdata/03039_unknown_identifier_window_function/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03039_unknown_identifier_window_function/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03039_unknown_identifier_window_function/metadata.json b/parser/testdata/03039_unknown_identifier_window_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03039_unknown_identifier_window_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03039_unknown_identifier_window_function/query.sql b/parser/testdata/03039_unknown_identifier_window_function/query.sql new file mode 100644 index 000000000..652085d9f --- /dev/null +++ b/parser/testdata/03039_unknown_identifier_window_function/query.sql @@ -0,0 +1,35 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/45535 +SET enable_analyzer=1; + +SELECT + *, + count() OVER () AS c +FROM numbers(10) +ORDER BY toString(number); + + +WITH + toString(number) as str +SELECT + *, + count() OVER () AS c +FROM numbers(10) +ORDER BY str; + +SELECT + *, + count() OVER () AS c, + toString(number) as str +FROM numbers(10) +ORDER BY str; + + +WITH + test AS ( + SELECT + *, + count() OVER () AS c + FROM numbers(10) + ) +SELECT * FROM test +ORDER BY toString(number); diff --git a/parser/testdata/03040_alias_column_join/ast.json b/parser/testdata/03040_alias_column_join/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03040_alias_column_join/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03040_alias_column_join/metadata.json b/parser/testdata/03040_alias_column_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03040_alias_column_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03040_alias_column_join/query.sql b/parser/testdata/03040_alias_column_join/query.sql new file mode 100644 index 000000000..6ffd749a6 --- /dev/null +++ b/parser/testdata/03040_alias_column_join/query.sql @@ -0,0 +1,14 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/44365 +SET enable_analyzer=1; +DROP TABLE IF EXISTS 03040_test; + +CREATE TABLE 03040_test +( + id UInt64, + val String alias 'value: '||toString(id) +) ENGINE = MergeTree +ORDER BY tuple(); + +SELECT val FROM 03040_test t GROUP BY val; + +DROP TABLE IF EXISTS 03040_test; diff --git a/parser/testdata/03040_array_sum_and_join/ast.json b/parser/testdata/03040_array_sum_and_join/ast.json new file mode 100644 index 000000000..b56a50884 --- /dev/null +++ b/parser/testdata/03040_array_sum_and_join/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001515397, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03040_array_sum_and_join/metadata.json b/parser/testdata/03040_array_sum_and_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03040_array_sum_and_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03040_array_sum_and_join/query.sql b/parser/testdata/03040_array_sum_and_join/query.sql new file mode 100644 index 000000000..90d3d83c9 --- /dev/null +++ b/parser/testdata/03040_array_sum_and_join/query.sql @@ -0,0 +1,27 @@ +SET enable_analyzer=1; + +select t.1 as cnt, + t.2 as name, + t.3 as percent +from ( + select arrayJoin(result) as t + from ( + select [ + (79, 'name1'), + (62, 'name2'), + (44, 'name3') + ] as data, + arraySum(arrayMap(t -> t.1, data)) as total, + arrayMap(t -> + tuple(t.1, t.2, + multiIf(total = 0, 0, t.1 > 0 and t.1 < 10, -1.0, + (toFloat32(t.1) / toFloat32(total)) * 100) + ), + data + ) as result + ) + ); + +SELECT arrayMap(x -> arrayMap(x -> (x.1), [(1, 1), (2, 2)]), [(3, 3), (4, 4)]); + +SELECT arrayMap(x -> (x.1, arrayMap(x -> (x.1), [(1, 1), (2, 2)])), [(3, 3), (4, 4)]); diff --git a/parser/testdata/03040_dynamic_type_alters_1_compact_merge_tree/ast.json b/parser/testdata/03040_dynamic_type_alters_1_compact_merge_tree/ast.json new file mode 100644 index 000000000..0a5af329f --- /dev/null +++ b/parser/testdata/03040_dynamic_type_alters_1_compact_merge_tree/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001351668, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03040_dynamic_type_alters_1_compact_merge_tree/metadata.json b/parser/testdata/03040_dynamic_type_alters_1_compact_merge_tree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03040_dynamic_type_alters_1_compact_merge_tree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03040_dynamic_type_alters_1_compact_merge_tree/query.sql b/parser/testdata/03040_dynamic_type_alters_1_compact_merge_tree/query.sql new file mode 100644 index 000000000..de05ba361 --- /dev/null +++ b/parser/testdata/03040_dynamic_type_alters_1_compact_merge_tree/query.sql @@ -0,0 +1,53 @@ +set allow_experimental_dynamic_type = 1; +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; + +drop table if exists test; +create table test (x UInt64, y UInt64) engine=MergeTree order by x settings min_rows_for_wide_part=100000000, min_bytes_for_wide_part=1000000000; +select 'initial insert'; +insert into test select number, number from numbers(3); + +select 'alter add column 1'; +alter table test add column d Dynamic(max_types=3) settings mutations_sync=1; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); +select x, y, d, d.String, d.UInt64, d.`Tuple(a UInt64)`.a from test order by x; + +select 'insert after alter add column 1'; +insert into test select number, number, number from numbers(3, 3); +insert into test select number, number, 'str_' || toString(number) from numbers(6, 3); +insert into test select number, number, NULL from numbers(9, 3); +insert into test select number, number, multiIf(number % 3 == 0, number, number % 3 == 1, 'str_' || toString(number), NULL) from numbers(12, 3); +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); +select x, y, d, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; + +select 'alter modify column 1'; +alter table test modify column d Dynamic(max_types=0) settings mutations_sync=1; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); +select x, y, d, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; + +select 'insert after alter modify column 1'; +insert into test select number, number, multiIf(number % 4 == 0, number, number % 4 == 1, 'str_' || toString(number), number % 4 == 2, toDate(number), NULL) from numbers(15, 4); +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); +select x, y, d, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; + +select 'alter modify column 2'; +alter table test modify column d Dynamic(max_types=2) settings mutations_sync=1; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); +select x, y, d, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; + +select 'insert after alter modify column 2'; +insert into test select number, number, multiIf(number % 4 == 0, number, number % 4 == 1, 'str_' || toString(number), number % 4 == 2, toDate(number), NULL) from numbers(19, 4); +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); +select x, y, d, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; + +select 'alter modify column 3'; +alter table test modify column y Dynamic settings mutations_sync=1; +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); +select x, y, y.UInt64, y.String, y.`Tuple(a UInt64)`.a, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; + +select 'insert after alter modify column 3'; +insert into test select number, multiIf(number % 3 == 0, number, number % 3 == 1, 'str_' || toString(number), NULL), NULL from numbers(23, 3); +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(), dynamicType(d); +select x, y, y.UInt64, y.String, y.`Tuple(a UInt64)`.a, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; + +drop table test; \ No newline at end of file diff --git a/parser/testdata/03040_dynamic_type_alters_1_memory/ast.json b/parser/testdata/03040_dynamic_type_alters_1_memory/ast.json new file mode 100644 index 000000000..f02292e61 --- /dev/null +++ b/parser/testdata/03040_dynamic_type_alters_1_memory/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001294005, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03040_dynamic_type_alters_1_memory/metadata.json b/parser/testdata/03040_dynamic_type_alters_1_memory/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03040_dynamic_type_alters_1_memory/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03040_dynamic_type_alters_1_memory/query.sql b/parser/testdata/03040_dynamic_type_alters_1_memory/query.sql new file mode 100644 index 000000000..e802fd034 --- /dev/null +++ b/parser/testdata/03040_dynamic_type_alters_1_memory/query.sql @@ -0,0 +1,53 @@ +set allow_experimental_dynamic_type = 1; +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; + +drop table if exists test; +create table test (x UInt64, y UInt64) engine=Memory; +select 'initial insert'; +insert into test select number, number from numbers(3); + +select 'alter add column 1'; +alter table test add column d Dynamic(max_types=3) settings mutations_sync=1; +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select x, y, d, d.String, d.UInt64, d.`Tuple(a UInt64)`.a from test order by x; + +select 'insert after alter add column 1'; +insert into test select number, number, number from numbers(3, 3); +insert into test select number, number, 'str_' || toString(number) from numbers(6, 3); +insert into test select number, number, NULL from numbers(9, 3); +insert into test select number, number, multiIf(number % 3 == 0, number, number % 3 == 1, 'str_' || toString(number), NULL) from numbers(12, 3); +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select x, y, d, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; + +select 'alter modify column 1'; +alter table test modify column d Dynamic(max_types=1) settings mutations_sync=1; +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select x, y, d, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; + +select 'insert after alter modify column 1'; +insert into test select number, number, multiIf(number % 4 == 0, number, number % 4 == 1, 'str_' || toString(number), number % 4 == 2, toDate(number), NULL) from numbers(15, 4); +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select x, y, d, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; + +select 'alter modify column 2'; +alter table test modify column d Dynamic(max_types=3) settings mutations_sync=1; +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select x, y, d, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; + +select 'insert after alter modify column 2'; +insert into test select number, number, multiIf(number % 4 == 0, number, number % 4 == 1, 'str_' || toString(number), number % 4 == 2, toDate(number), NULL) from numbers(19, 4); +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select x, y, d, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; + +select 'alter modify column 3'; +alter table test modify column y Dynamic settings mutations_sync=1; +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select x, y, y.UInt64, y.String, y.`Tuple(a UInt64)`.a, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; + +select 'insert after alter modify column 3'; +insert into test select number, multiIf(number % 3 == 0, number, number % 3 == 1, 'str_' || toString(number), NULL), NULL from numbers(23, 3); +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select x, y, y.UInt64, y.String, y.`Tuple(a UInt64)`.a, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; + +drop table test; \ No newline at end of file diff --git a/parser/testdata/03040_dynamic_type_alters_1_wide_merge_tree/ast.json b/parser/testdata/03040_dynamic_type_alters_1_wide_merge_tree/ast.json new file mode 100644 index 000000000..9d5d25022 --- /dev/null +++ b/parser/testdata/03040_dynamic_type_alters_1_wide_merge_tree/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001111514, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03040_dynamic_type_alters_1_wide_merge_tree/metadata.json b/parser/testdata/03040_dynamic_type_alters_1_wide_merge_tree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03040_dynamic_type_alters_1_wide_merge_tree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03040_dynamic_type_alters_1_wide_merge_tree/query.sql b/parser/testdata/03040_dynamic_type_alters_1_wide_merge_tree/query.sql new file mode 100644 index 000000000..55c4f0b5f --- /dev/null +++ b/parser/testdata/03040_dynamic_type_alters_1_wide_merge_tree/query.sql @@ -0,0 +1,53 @@ +set allow_experimental_dynamic_type = 1; +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; + +drop table if exists test; +create table test (x UInt64, y UInt64) engine=MergeTree order by x settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1; +select 'initial insert'; +insert into test select number, number from numbers(3); + +select 'alter add column 1'; +alter table test add column d Dynamic(max_types=3) settings mutations_sync=1; +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select x, y, d, d.String, d.UInt64, d.`Tuple(a UInt64)`.a from test order by x; + +select 'insert after alter add column 1'; +insert into test select number, number, number from numbers(3, 3); +insert into test select number, number, 'str_' || toString(number) from numbers(6, 3); +insert into test select number, number, NULL from numbers(9, 3); +insert into test select number, number, multiIf(number % 3 == 0, number, number % 3 == 1, 'str_' || toString(number), NULL) from numbers(12, 3); +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select x, y, d, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; + +select 'alter modify column 1'; +alter table test modify column d Dynamic(max_types=1) settings mutations_sync=1; +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select x, y, d, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; + +select 'insert after alter modify column 1'; +insert into test select number, number, multiIf(number % 4 == 0, number, number % 4 == 1, 'str_' || toString(number), number % 4 == 2, toDate(number), NULL) from numbers(15, 4); +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select x, y, d, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; + +select 'alter modify column 2'; +alter table test modify column d Dynamic(max_types=3) settings mutations_sync=1; +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select x, y, d, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; + +select 'insert after alter modify column 2'; +insert into test select number, number, multiIf(number % 4 == 0, number, number % 4 == 1, 'str_' || toString(number), number % 4 == 2, toDate(number), NULL) from numbers(19, 4); +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select x, y, d, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; + +select 'alter modify column 3'; +alter table test modify column y Dynamic settings mutations_sync=1; +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select x, y, y.UInt64, y.String, y.`Tuple(a UInt64)`.a, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; + +select 'insert after alter modify column 3'; +insert into test select number, multiIf(number % 3 == 0, number, number % 3 == 1, 'str_' || toString(number), NULL), NULL from numbers(23, 3); +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select x, y, y.UInt64, y.String, y.`Tuple(a UInt64)`.a, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; + +drop table test; \ No newline at end of file diff --git a/parser/testdata/03040_dynamic_type_alters_2_compact_merge_tree/ast.json b/parser/testdata/03040_dynamic_type_alters_2_compact_merge_tree/ast.json new file mode 100644 index 000000000..5a8c8d0df --- /dev/null +++ b/parser/testdata/03040_dynamic_type_alters_2_compact_merge_tree/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001310641, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03040_dynamic_type_alters_2_compact_merge_tree/metadata.json b/parser/testdata/03040_dynamic_type_alters_2_compact_merge_tree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03040_dynamic_type_alters_2_compact_merge_tree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03040_dynamic_type_alters_2_compact_merge_tree/query.sql b/parser/testdata/03040_dynamic_type_alters_2_compact_merge_tree/query.sql new file mode 100644 index 000000000..cead110dd --- /dev/null +++ b/parser/testdata/03040_dynamic_type_alters_2_compact_merge_tree/query.sql @@ -0,0 +1,39 @@ +set allow_experimental_dynamic_type = 1; +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; + +drop table if exists test; +create table test (x UInt64, y UInt64) engine=MergeTree order by x settings min_rows_for_wide_part=100000000, min_bytes_for_wide_part=1000000000; + +select 'initial insert'; +insert into test select number, number from numbers(3); + +select 'alter add column'; +alter table test add column d Dynamic settings mutations_sync=1; +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select x, y, d, d.String, d.UInt64, d.`Tuple(a UInt64)`.a from test order by x; + +select 'insert after alter add column 1'; +insert into test select number, number, number from numbers(3, 3); +insert into test select number, number, 'str_' || toString(number) from numbers(6, 3); +insert into test select number, number, NULL from numbers(9, 3); +insert into test select number, number, multiIf(number % 3 == 0, number, number % 3 == 1, 'str_' || toString(number), NULL) from numbers(12, 3); +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select x, y, d, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; + +select 'alter rename column 1'; +alter table test rename column d to d1 settings mutations_sync=1; +select count(), dynamicType(d1) from test group by dynamicType(d1) order by count(), dynamicType(d1); +select x, y, d1, d1.String, d1.UInt64, d1.Date, d1.`Tuple(a UInt64)`.a from test order by x; + +select 'insert nested dynamic'; +insert into test select number, number, [number % 2 ? number : 'str_' || toString(number)]::Array(Dynamic) from numbers(15, 3); +select count(), dynamicType(d1) from test group by dynamicType(d1) order by count(), dynamicType(d1); +select x, y, d1, d1.String, d1.UInt64, d1.Date, d1.`Tuple(a UInt64)`.a, d1.`Array(Dynamic)`.UInt64, d1.`Array(Dynamic)`.String, d1.`Array(Dynamic)`.Date from test order by x; + +select 'alter rename column 2'; +alter table test rename column d1 to d2 settings mutations_sync=1; +select count(), dynamicType(d2) from test group by dynamicType(d2) order by count(), dynamicType(d2); +select x, y, d2, d2.String, d2.UInt64, d2.Date, d2.`Tuple(a UInt64)`.a, d2.`Array(Dynamic)`.UInt64, d2.`Array(Dynamic)`.String, d2.`Array(Dynamic)`.Date, from test order by x; + +drop table test; diff --git a/parser/testdata/03040_dynamic_type_alters_2_wide_merge_tree/ast.json b/parser/testdata/03040_dynamic_type_alters_2_wide_merge_tree/ast.json new file mode 100644 index 000000000..12248ec89 --- /dev/null +++ b/parser/testdata/03040_dynamic_type_alters_2_wide_merge_tree/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001219399, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03040_dynamic_type_alters_2_wide_merge_tree/metadata.json b/parser/testdata/03040_dynamic_type_alters_2_wide_merge_tree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03040_dynamic_type_alters_2_wide_merge_tree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03040_dynamic_type_alters_2_wide_merge_tree/query.sql b/parser/testdata/03040_dynamic_type_alters_2_wide_merge_tree/query.sql new file mode 100644 index 000000000..16f5e8499 --- /dev/null +++ b/parser/testdata/03040_dynamic_type_alters_2_wide_merge_tree/query.sql @@ -0,0 +1,41 @@ +set allow_experimental_dynamic_type = 1; +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; + +drop table if exists test; +create table test (x UInt64, y UInt64) engine=MergeTree order by x settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1; + +select 'initial insert'; +insert into test select number, number from numbers(3); + +select 'alter add column'; +alter table test add column d Dynamic settings mutations_sync=1; +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select x, y, d, d.String, d.UInt64, d.`Tuple(a UInt64)`.a from test order by x; + +select 'insert after alter add column 1'; +insert into test select number, number, number from numbers(3, 3); +insert into test select number, number, 'str_' || toString(number) from numbers(6, 3); +insert into test select number, number, NULL from numbers(9, 3); +insert into test select number, number, multiIf(number % 3 == 0, number, number % 3 == 1, 'str_' || toString(number), NULL) from numbers(12, 3); +optimize table test final; +select count(), dynamicType(d) from test group by dynamicType(d) order by count(), dynamicType(d); +select x, y, d, d.String, d.UInt64, d.Date, d.`Tuple(a UInt64)`.a from test order by x; + +select 'alter rename column 1'; +alter table test rename column d to d1 settings mutations_sync=1; +select count(), dynamicType(d1) from test group by dynamicType(d1) order by count(), dynamicType(d1); +select x, y, d1, d1.String, d1.UInt64, d1.Date, d1.`Tuple(a UInt64)`.a from test order by x; + +select 'insert nested dynamic'; +insert into test select number, number, [number % 2 ? number : 'str_' || toString(number)]::Array(Dynamic) from numbers(15, 3); +optimize table test final; +select count(), dynamicType(d1) from test group by dynamicType(d1) order by count(), dynamicType(d1); +select x, y, d1, d1.String, d1.UInt64, d1.Date, d1.`Tuple(a UInt64)`.a, d1.`Array(Dynamic)`.UInt64, d1.`Array(Dynamic)`.String, d1.`Array(Dynamic)`.Date from test order by x; + +select 'alter rename column 2'; +alter table test rename column d1 to d2 settings mutations_sync=1; +select count(), dynamicType(d2) from test group by dynamicType(d2) order by count(), dynamicType(d2); +select x, y, d2, d2.String, d2.UInt64, d2.Date, d2.`Tuple(a UInt64)`.a, d2.`Array(Dynamic)`.UInt64, d2.`Array(Dynamic)`.String, d2.`Array(Dynamic)`.Date, from test order by x; + +drop table test; diff --git a/parser/testdata/03040_recursive_cte_postgres_6/ast.json b/parser/testdata/03040_recursive_cte_postgres_6/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03040_recursive_cte_postgres_6/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03040_recursive_cte_postgres_6/metadata.json b/parser/testdata/03040_recursive_cte_postgres_6/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03040_recursive_cte_postgres_6/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03040_recursive_cte_postgres_6/query.sql b/parser/testdata/03040_recursive_cte_postgres_6/query.sql new file mode 100644 index 000000000..33f06266d --- /dev/null +++ b/parser/testdata/03040_recursive_cte_postgres_6/query.sql @@ -0,0 +1,116 @@ +/** + * Based on https://github.com/postgres/postgres/blob/master/src/test/regress/sql/with.sql, license: + * + * PostgreSQL Database Management System + * (formerly known as Postgres, then as Postgres95) + * + * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group + * + * Portions Copyright (c) 1994, The Regents of the University of California + * + * Permission to use, copy, modify, and distribute this software and its + * documentation for any purpose, without fee, and without a written agreement + * is hereby granted, provided that the above copyright notice and this + * paragraph and the following two paragraphs appear in all copies. + * + * IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR + * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING + * LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS + * DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS + * ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO + *PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. + */ + +-- +-- Tests for common table expressions (WITH query, ... SELECT ...) +-- + +-- { echoOn } + +SET enable_analyzer = 1; + +-- +-- error cases +-- + +-- INTERSECT +WITH RECURSIVE x AS (SELECT 1 AS n INTERSECT SELECT n+1 FROM x) + SELECT * FROM x; -- {serverError UNSUPPORTED_METHOD} + +WITH RECURSIVE x AS (SELECT 1 AS n INTERSECT ALL SELECT n+1 FROM x) + SELECT * FROM x; -- {serverError UNSUPPORTED_METHOD} + +-- EXCEPT +WITH RECURSIVE x AS (SELECT 1 AS n EXCEPT SELECT n+1 FROM x) + SELECT * FROM x; -- {serverError UNSUPPORTED_METHOD} + +WITH RECURSIVE x AS (SELECT 1 AS n EXCEPT ALL SELECT n+1 FROM x) + SELECT * FROM x; -- {serverError UNSUPPORTED_METHOD} + +-- no non-recursive term +WITH RECURSIVE x AS (SELECT n FROM x) + SELECT * FROM x; -- {serverError UNKNOWN_TABLE} + +-- recursive term in the left hand side (strictly speaking, should allow this) +WITH RECURSIVE x AS (SELECT n FROM x UNION ALL SELECT 1 AS n) + SELECT * FROM x; -- {serverError UNKNOWN_TABLE} + +DROP TABLE IF EXISTS y; +CREATE TABLE y (a UInt64) ENGINE=TinyLog; +INSERT INTO y SELECT * FROM numbers(1, 10); + +-- LEFT JOIN + +WITH RECURSIVE x AS (SELECT a AS n FROM y WHERE a = 1 + UNION ALL + SELECT x.n+1 FROM y LEFT JOIN x ON x.n = y.a WHERE n < 10) +SELECT * FROM x FORMAT NULL SETTINGS max_recursive_cte_evaluation_depth = 5; -- { serverError TOO_DEEP_RECURSION } + +-- RIGHT JOIN +WITH RECURSIVE x AS (SELECT a AS n FROM y WHERE a = 1 + UNION ALL + SELECT x.n+1 FROM x RIGHT JOIN y ON x.n = y.a WHERE n < 10) +SELECT * FROM x FORMAT NULL SETTINGS max_recursive_cte_evaluation_depth = 5; -- { serverError TOO_DEEP_RECURSION } + +-- FULL JOIN +WITH RECURSIVE x AS (SELECT a AS n FROM y WHERE a = 1 + UNION ALL + SELECT x.n+1 FROM x FULL JOIN y ON x.n = y.a WHERE n < 10) +SELECT * FROM x FORMAT NULL SETTINGS max_recursive_cte_evaluation_depth = 5; -- { serverError TOO_DEEP_RECURSION } + +-- subquery +WITH RECURSIVE x AS (SELECT 1 AS n UNION ALL SELECT n+1 FROM x + WHERE n IN (SELECT * FROM x)) + SELECT * FROM x FORMAT NULL SETTINGS max_recursive_cte_evaluation_depth = 5; -- { serverError TOO_DEEP_RECURSION } + +-- aggregate functions +WITH RECURSIVE x AS (SELECT 1 AS n UNION ALL SELECT count(*) FROM x) + SELECT * FROM x FORMAT NULL SETTINGS max_recursive_cte_evaluation_depth = 5; -- { serverError TOO_DEEP_RECURSION } + +WITH RECURSIVE x AS (SELECT 1 AS n UNION ALL SELECT sum(n) FROM x) + SELECT * FROM x FORMAT NULL SETTINGS max_recursive_cte_evaluation_depth = 5; -- { serverError TOO_DEEP_RECURSION } + +-- ORDER BY +WITH RECURSIVE x AS (SELECT 1 AS n UNION ALL SELECT n+1 FROM x ORDER BY 1) + SELECT * FROM x FORMAT NULL SETTINGS max_recursive_cte_evaluation_depth = 5; -- { serverError TOO_DEEP_RECURSION } + +-- FIXME: indeterministic results +-- +-- target list has a recursive query name +-- WITH RECURSIVE x AS (SELECT 1 AS id +-- UNION ALL +-- SELECT (SELECT * FROM x) FROM x WHERE id < 5 +-- ) SELECT * FROM x; -- { serverError UNKNOWN_TABLE, CANNOT_INSERT_NULL_IN_ORDINARY_COLUMN } + +-- mutual recursive query (not implemented) +WITH RECURSIVE + x AS (SELECT 1 AS id UNION ALL SELECT id+1 FROM y WHERE id < 5), + y AS (SELECT 1 AS id UNION ALL SELECT id+1 FROM x WHERE id < 5) +SELECT * FROM x FORMAT NULL SETTINGS max_recursive_cte_evaluation_depth = 5; -- { serverError TOO_DEEP_RECURSION } + +-- { echoOff } diff --git a/parser/testdata/03041_analyzer_gigachad_join/ast.json b/parser/testdata/03041_analyzer_gigachad_join/ast.json new file mode 100644 index 000000000..f5d00aa38 --- /dev/null +++ b/parser/testdata/03041_analyzer_gigachad_join/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001444246, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03041_analyzer_gigachad_join/metadata.json b/parser/testdata/03041_analyzer_gigachad_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03041_analyzer_gigachad_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03041_analyzer_gigachad_join/query.sql b/parser/testdata/03041_analyzer_gigachad_join/query.sql new file mode 100644 index 000000000..88f7fc562 --- /dev/null +++ b/parser/testdata/03041_analyzer_gigachad_join/query.sql @@ -0,0 +1,15 @@ +SET enable_analyzer=1; +CREATE TABLE IF NOT EXISTS first engine = MergeTree PARTITION BY (inn, toYYYYMM(received)) ORDER BY (inn, sessionId) +AS SELECT now() AS received, '123456789' AS inn, '42' AS sessionId; + +CREATE TABLE IF NOT EXISTS second engine = MergeTree PARTITION BY (inn, toYYYYMM(received)) ORDER BY (inn, sessionId) +AS SELECT now() AS received, '123456789' AS inn, '42' AS sessionId, '111' AS serial, '222' AS reg; + +SELECT alias_first.inn, arrayFirst(t -> isNotNull(t), regInfo.1), arrayFirst(t -> isNotNull(t), regInfo.2) + FROM first AS alias_first + INNER JOIN ( + SELECT alias_second.inn, alias_second.sessionId, groupArray((serial, reg)) AS regInfo + FROM second AS alias_second + GROUP BY inn, sessionId + ) AS resp ON (alias_first.inn = resp.inn) AND (alias_first.sessionId = resp.sessionId) +WHERE if('123456789' IS NOT NULL, alias_first.inn = '123456789', 1) diff --git a/parser/testdata/03041_recursive_cte_postgres_7/ast.json b/parser/testdata/03041_recursive_cte_postgres_7/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03041_recursive_cte_postgres_7/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03041_recursive_cte_postgres_7/metadata.json b/parser/testdata/03041_recursive_cte_postgres_7/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03041_recursive_cte_postgres_7/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03041_recursive_cte_postgres_7/query.sql b/parser/testdata/03041_recursive_cte_postgres_7/query.sql new file mode 100644 index 000000000..5f4455efc --- /dev/null +++ b/parser/testdata/03041_recursive_cte_postgres_7/query.sql @@ -0,0 +1,115 @@ +/** + * Based on https://github.com/postgres/postgres/blob/master/src/test/regress/sql/with.sql, license: + * + * PostgreSQL Database Management System + * (formerly known as Postgres, then as Postgres95) + * + * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group + * + * Portions Copyright (c) 1994, The Regents of the University of California + * + * Permission to use, copy, modify, and distribute this software and its + * documentation for any purpose, without fee, and without a written agreement + * is hereby granted, provided that the above copyright notice and this + * paragraph and the following two paragraphs appear in all copies. + * + * IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR + * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING + * LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS + * DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS + * ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO + *PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. + */ + +-- +-- Tests for common table expressions (WITH query, ... SELECT ...) +-- + +-- { echoOn } + +SET enable_analyzer = 1; + +WITH RECURSIVE foo AS + (SELECT 1 AS i + UNION ALL + (SELECT i+1 FROM foo WHERE i < 10 + UNION ALL + SELECT i+1 FROM foo WHERE i < 5) +) SELECT * FROM foo; + +WITH RECURSIVE foo AS + (SELECT 1 AS i + UNION ALL + SELECT * FROM + (SELECT i+1 FROM foo WHERE i < 10 + UNION ALL + SELECT i+1 FROM foo WHERE i < 5) AS t +) SELECT * FROM foo; + +WITH RECURSIVE foo AS + (SELECT 1 AS i + UNION ALL + (SELECT i+1 FROM foo WHERE i < 10 + EXCEPT + SELECT i+1 FROM foo WHERE i < 5) +) SELECT * FROM foo; + +WITH RECURSIVE foo AS + (SELECT 1 AS i + UNION ALL + (SELECT i+1 FROM foo WHERE i < 10 + INTERSECT + SELECT i+1 FROM foo WHERE i < 5) +) SELECT * FROM foo; + +-- +-- test for nested-recursive-WITH bug +-- +WITH RECURSIVE t AS ( + WITH RECURSIVE s AS ( + SELECT toUInt64(1) AS i + UNION ALL + SELECT i+1 FROM s WHERE i < 10 + ) + SELECT i AS j FROM s + UNION ALL + SELECT j+1 FROM t WHERE j < 10 +) +SELECT * FROM t; + +-- +-- Test CTEs read in non-initialization orders +-- + +WITH RECURSIVE + tab AS (SELECT * FROM values('id_key UInt64, link UInt64', (1,17), (2,17), (3,17), (4,17), (6,17), (5,17))), + iter AS ( + SELECT 0 AS id_key, 'base' AS row_type, 17 AS link + UNION ALL ( + WITH remaining AS ( + SELECT tab.id_key AS id_key, 'true'::text AS row_type, iter.link AS link, MIN(tab.id_key) OVER () AS min + FROM tab INNER JOIN iter USING (link) + WHERE tab.id_key > iter.id_key + ), + first_remaining AS ( + SELECT id_key, row_type, link + FROM remaining + WHERE id_key=min + ), + effect AS ( + SELECT tab.id_key AS id_key, 'new'::text AS row_type, tab.link AS link + FROM first_remaining e INNER JOIN tab ON e.id_key=tab.id_key + WHERE e.row_type = 'false' + ) + SELECT * FROM first_remaining + UNION ALL SELECT * FROM effect + ) + ) +SELECT * FROM iter; + +-- { echoOff } diff --git a/parser/testdata/03041_select_with_query_result/ast.json b/parser/testdata/03041_select_with_query_result/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03041_select_with_query_result/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03041_select_with_query_result/metadata.json b/parser/testdata/03041_select_with_query_result/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03041_select_with_query_result/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03041_select_with_query_result/query.sql b/parser/testdata/03041_select_with_query_result/query.sql new file mode 100644 index 000000000..e5897ea12 --- /dev/null +++ b/parser/testdata/03041_select_with_query_result/query.sql @@ -0,0 +1,42 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/44153 +SET enable_analyzer=1; +DROP TABLE IF EXISTS parent; +DROP TABLE IF EXISTS join_table_1; +DROP TABLE IF EXISTS join_table_2; + +CREATE TABLE parent( + a_id Int64, + b_id Int64, + c_id Int64, + created_at Int64 +) +ENGINE=MergeTree() +ORDER BY (a_id, b_id, c_id, created_at); + +CREATE TABLE join_table_1( + a_id Int64, + b_id Int64 +) +ENGINE=MergeTree() +ORDER BY (a_id, b_id); + +CREATE TABLE join_table_2( + c_id Int64, + created_at Int64 +) +ENGINE=MergeTree() +ORDER BY (c_id, created_at); + +WITH with_table as ( + SELECT p.a_id, p.b_id, p.c_id FROM parent p + LEFT JOIN join_table_1 jt1 ON jt1.a_id = p.a_id AND jt1.b_id = p.b_id + LEFT JOIN join_table_2 jt2 ON jt2.c_id = p.c_id + WHERE + p.a_id = 0 AND (jt2.c_id = 0 OR p.created_at = 0) +) +SELECT p.a_id, p.b_id, COUNT(*) as f_count FROM with_table +GROUP BY p.a_id, p.b_id; + +DROP TABLE IF EXISTS parent; +DROP TABLE IF EXISTS join_table_1; +DROP TABLE IF EXISTS join_table_2; diff --git a/parser/testdata/03042_analyzer_alias_join/ast.json b/parser/testdata/03042_analyzer_alias_join/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03042_analyzer_alias_join/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03042_analyzer_alias_join/metadata.json b/parser/testdata/03042_analyzer_alias_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03042_analyzer_alias_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03042_analyzer_alias_join/query.sql b/parser/testdata/03042_analyzer_alias_join/query.sql new file mode 100644 index 000000000..d9a8d8b4c --- /dev/null +++ b/parser/testdata/03042_analyzer_alias_join/query.sql @@ -0,0 +1,21 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/14978 +SET enable_analyzer=1; +CREATE TABLE test1(id UInt64, t1value UInt64) ENGINE=MergeTree ORDER BY tuple(); +CREATE TABLE test2(id UInt64, t2value String) ENGINE=MergeTree ORDER BY tuple(); + +SELECT NULL AS t2value +FROM test1 t1 +LEFT JOIN ( + SELECT id, t2value FROM test2 +) t2 +ON t1.id=t2.id +WHERE t2.t2value='test'; + +-- workaround should work too +SELECT NULL AS _svalue +FROM test1 t1 +LEFT JOIN ( + SELECT id, t2value FROM test2 +) t2 +ON t1.id=t2.id +WHERE t2.t2value='test'; diff --git a/parser/testdata/03042_not_found_column_c1/ast.json b/parser/testdata/03042_not_found_column_c1/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03042_not_found_column_c1/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03042_not_found_column_c1/metadata.json b/parser/testdata/03042_not_found_column_c1/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03042_not_found_column_c1/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03042_not_found_column_c1/query.sql b/parser/testdata/03042_not_found_column_c1/query.sql new file mode 100644 index 000000000..08202dc0d --- /dev/null +++ b/parser/testdata/03042_not_found_column_c1/query.sql @@ -0,0 +1,9 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/42399 +SET enable_analyzer=1; + +CREATE TABLE IF NOT EXISTS t0 (c0 Int32) ENGINE = Memory() ; +CREATE TABLE t1 (c0 Int32, c1 Int32, c2 Int32) ENGINE = Memory() ; +CREATE TABLE t2 (c0 String, c1 String MATERIALIZED (c2), c2 Int32) ENGINE = Memory() ; +CREATE TABLE t3 (c0 String, c1 String, c2 String) ENGINE = Log() ; +CREATE TABLE IF NOT EXISTS t4 (c0 Int32) ENGINE = Log() ; +SELECT t3.c1, t3.c2, t1.c1, t1.c0, t2.c2, t0.c0, t1.c2, t2.c1, t4.c0 FROM t3, t0, t1, t2, t4; diff --git a/parser/testdata/03043_group_array_result_is_expected/ast.json b/parser/testdata/03043_group_array_result_is_expected/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03043_group_array_result_is_expected/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03043_group_array_result_is_expected/metadata.json b/parser/testdata/03043_group_array_result_is_expected/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03043_group_array_result_is_expected/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03043_group_array_result_is_expected/query.sql b/parser/testdata/03043_group_array_result_is_expected/query.sql new file mode 100644 index 000000000..e2c79e5c4 --- /dev/null +++ b/parser/testdata/03043_group_array_result_is_expected/query.sql @@ -0,0 +1,45 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/27115 +SET enable_analyzer=1; +drop table if exists fill_ex; + +create table fill_ex ( + eventDate Date , + storeId String +) +engine = ReplacingMergeTree() +partition by toYYYYMM(eventDate) +order by (storeId,eventDate); + +insert into fill_ex (eventDate,storeId) values ('2021-07-16','s') ('2021-07-17','ee'); + +select + groupArray(key) as keys, + count() as c +from + ( + select + *, + eventDate as key + from + ( + select + eventDate + from + ( + select + eventDate + from + fill_ex final + where + eventDate >= toDate('2021-07-01') + and eventDate<toDate('2021-07-30') + order by + eventDate ) + order by + eventDate with fill + from + toDate('2021-07-01') to toDate('2021-07-30') ) + order by + eventDate ); + +drop table if exists fill_ex; diff --git a/parser/testdata/03044_analyzer_alias_join/ast.json b/parser/testdata/03044_analyzer_alias_join/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03044_analyzer_alias_join/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03044_analyzer_alias_join/metadata.json b/parser/testdata/03044_analyzer_alias_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03044_analyzer_alias_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03044_analyzer_alias_join/query.sql b/parser/testdata/03044_analyzer_alias_join/query.sql new file mode 100644 index 000000000..6876584a7 --- /dev/null +++ b/parser/testdata/03044_analyzer_alias_join/query.sql @@ -0,0 +1,18 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/17319 +SET enable_analyzer=1; +CREATE TABLE hits (date Date, data Float64) engine=Memory(); + +SELECT + subquery1.period AS period, + if(1=1, 0, subquery1.data1) AS data, + if(1=1, 0, subquery2.data) AS other_data +FROM +( + SELECT date AS period, data AS data1 + FROM hits +) AS subquery1 +LEFT JOIN +( + SELECT date AS period, data AS data + FROM hits +) AS subquery2 ON (subquery1.period = subquery2.period) diff --git a/parser/testdata/03044_array_join_columns_in_nested_table/ast.json b/parser/testdata/03044_array_join_columns_in_nested_table/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03044_array_join_columns_in_nested_table/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03044_array_join_columns_in_nested_table/metadata.json b/parser/testdata/03044_array_join_columns_in_nested_table/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03044_array_join_columns_in_nested_table/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03044_array_join_columns_in_nested_table/query.sql b/parser/testdata/03044_array_join_columns_in_nested_table/query.sql new file mode 100644 index 000000000..4885b7e3f --- /dev/null +++ b/parser/testdata/03044_array_join_columns_in_nested_table/query.sql @@ -0,0 +1,3 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/11813 +SET enable_analyzer=1; +select 1 from (select 1 x) l join (select 1 y, [1] a) r on l.x = r.y array join r.a; diff --git a/parser/testdata/03045_analyzer_alias_join_with_if/ast.json b/parser/testdata/03045_analyzer_alias_join_with_if/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03045_analyzer_alias_join_with_if/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03045_analyzer_alias_join_with_if/metadata.json b/parser/testdata/03045_analyzer_alias_join_with_if/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03045_analyzer_alias_join_with_if/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03045_analyzer_alias_join_with_if/query.sql b/parser/testdata/03045_analyzer_alias_join_with_if/query.sql new file mode 100644 index 000000000..cbc467264 --- /dev/null +++ b/parser/testdata/03045_analyzer_alias_join_with_if/query.sql @@ -0,0 +1,34 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/13210 +SET enable_analyzer=1; +CREATE TABLE test_a_table ( + name String, + a_col String +) +Engine = MergeTree() +ORDER BY name; + +CREATE TABLE test_b_table ( + name String, + b_col String, + some_val String +) +Engine = MergeTree() +ORDER BY name; + +SELECT + b.name name, + a.a_col a_col, + b.b_col b_col, + 'N' some_val +from test_a_table a +join test_b_table b on a.name = b.name +where b.some_val = 'Y'; + +SELECT + b.name name, + a.a_col a_col, + b.b_col b_col, + if(1,'N',b.some_val) some_val +from test_a_table a +join test_b_table b on a.name = b.name +where b.some_val = 'Y'; diff --git a/parser/testdata/03045_unknown_identifier_alias_substitution/ast.json b/parser/testdata/03045_unknown_identifier_alias_substitution/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03045_unknown_identifier_alias_substitution/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03045_unknown_identifier_alias_substitution/metadata.json b/parser/testdata/03045_unknown_identifier_alias_substitution/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03045_unknown_identifier_alias_substitution/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03045_unknown_identifier_alias_substitution/query.sql b/parser/testdata/03045_unknown_identifier_alias_substitution/query.sql new file mode 100644 index 000000000..967b7b247 --- /dev/null +++ b/parser/testdata/03045_unknown_identifier_alias_substitution/query.sql @@ -0,0 +1,21 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/23053 +SET enable_analyzer=1; +DROP TABLE IF EXISTS repl_tbl; + +CREATE TEMPORARY TABLE repl_tbl +( + `key` UInt32, + `val_1` UInt32, + `val_2` String, + `val_3` String, + `val_4` String, + `val_5` UUID, + `ts` DateTime +) +ENGINE = ReplacingMergeTree(ts) +ORDER BY `key`; +set prefer_column_name_to_alias = 1; +INSERT INTO repl_tbl (key) SELECT number FROM numbers(10); +WITH 10 as k SELECT k as key, * FROM repl_tbl WHERE key = k; + +DROP TABLE IF EXISTS repl_tbl; diff --git a/parser/testdata/03046_column_in_block_array_join/ast.json b/parser/testdata/03046_column_in_block_array_join/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03046_column_in_block_array_join/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03046_column_in_block_array_join/metadata.json b/parser/testdata/03046_column_in_block_array_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03046_column_in_block_array_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03046_column_in_block_array_join/query.sql b/parser/testdata/03046_column_in_block_array_join/query.sql new file mode 100644 index 000000000..d7e932edd --- /dev/null +++ b/parser/testdata/03046_column_in_block_array_join/query.sql @@ -0,0 +1,39 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/37729 +SET enable_analyzer=1; + +DROP TABLE IF EXISTS nested_test; +DROP TABLE IF EXISTS join_test; + +CREATE TABLE nested_test +( + s String, + nest Nested + ( + x UInt64, + y UInt64 + ) +) ENGINE = MergeTree +ORDER BY s; + +CREATE TABLE join_test +( + id Int64, + y UInt64 +) +ENGINE = MergeTree +ORDER BY id; + +INSERT INTO nested_test +VALUES ('Hello', [1,2], [10,20]), ('World', [3,4,5], [30,40,50]), ('Goodbye', [], []); + +INSERT INTO join_test +VALUES (1,1),(2,4),(3,20),(4,40); + +SELECT s +FROM nested_test AS t1 +ARRAY JOIN nest +INNER JOIN join_test AS t2 ON nest.y = t2.y +ORDER BY ALL; + +DROP TABLE IF EXISTS nested_test; +DROP TABLE IF EXISTS join_test; diff --git a/parser/testdata/03047_analyzer_alias_join/ast.json b/parser/testdata/03047_analyzer_alias_join/ast.json new file mode 100644 index 000000000..54233924c --- /dev/null +++ b/parser/testdata/03047_analyzer_alias_join/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000991885, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03047_analyzer_alias_join/metadata.json b/parser/testdata/03047_analyzer_alias_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03047_analyzer_alias_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03047_analyzer_alias_join/query.sql b/parser/testdata/03047_analyzer_alias_join/query.sql new file mode 100644 index 000000000..29fc711aa --- /dev/null +++ b/parser/testdata/03047_analyzer_alias_join/query.sql @@ -0,0 +1,30 @@ +SET enable_analyzer=1; +SELECT + 1 AS value, + * +FROM +( + SELECT 1 AS key +) AS l +LEFT JOIN +( + SELECT + 2 AS key, + 1 AS value +) AS r USING (key) +SETTINGS join_use_nulls = 1; + +SELECT + 1 AS value, + * +FROM +( + SELECT 2 AS key +) AS l +LEFT JOIN +( + SELECT + 2 AS key, + 1 AS value +) AS r USING (key) +SETTINGS join_use_nulls = 1 diff --git a/parser/testdata/03047_group_by_field_identified_aggregation/ast.json b/parser/testdata/03047_group_by_field_identified_aggregation/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03047_group_by_field_identified_aggregation/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03047_group_by_field_identified_aggregation/metadata.json b/parser/testdata/03047_group_by_field_identified_aggregation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03047_group_by_field_identified_aggregation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03047_group_by_field_identified_aggregation/query.sql b/parser/testdata/03047_group_by_field_identified_aggregation/query.sql new file mode 100644 index 000000000..d12e85ca0 --- /dev/null +++ b/parser/testdata/03047_group_by_field_identified_aggregation/query.sql @@ -0,0 +1,4 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/32639 +SET enable_analyzer=1; + +SELECT 0 AND id ? 1 : 2 AS a, sum(id) FROM (SELECT 1 AS id) GROUP BY a; diff --git a/parser/testdata/03047_on_fly_mutations_events/ast.json b/parser/testdata/03047_on_fly_mutations_events/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03047_on_fly_mutations_events/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03047_on_fly_mutations_events/metadata.json b/parser/testdata/03047_on_fly_mutations_events/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03047_on_fly_mutations_events/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03047_on_fly_mutations_events/query.sql b/parser/testdata/03047_on_fly_mutations_events/query.sql new file mode 100644 index 000000000..107b5d797 --- /dev/null +++ b/parser/testdata/03047_on_fly_mutations_events/query.sql @@ -0,0 +1,53 @@ +-- Tags: no-shared-catalog, no-parallel-replicas +-- FIXME no-shared-catalog: STOP MERGES will only stop them on the current replica, the second one will continue to merge +-- no-parallel-replicas: profile events may differ with parallel replicas. + +DROP TABLE IF EXISTS t_lightweight_mut_7; + +SET apply_mutations_on_fly = 1; +SET max_streams_for_merge_tree_reading = 1; + +CREATE TABLE t_lightweight_mut_7 (id UInt64, v UInt64) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/t_lightweight_mut_7', '1') ORDER BY id; + +SYSTEM STOP MERGES t_lightweight_mut_7; + +INSERT INTO t_lightweight_mut_7 SELECT number, number FROM numbers(100000); + +ALTER TABLE t_lightweight_mut_7 UPDATE v = 3 WHERE id % 5 = 0; +ALTER TABLE t_lightweight_mut_7 DELETE WHERE v % 3 = 0; + +SYSTEM SYNC REPLICA t_lightweight_mut_7 PULL; + +SELECT count() FROM t_lightweight_mut_7; + +SYSTEM START MERGES t_lightweight_mut_7; + +ALTER TABLE t_lightweight_mut_7 UPDATE v = v WHERE 1 SETTINGS mutations_sync = 2; + +SYSTEM STOP MERGES t_lightweight_mut_7; + +ALTER TABLE t_lightweight_mut_7 UPDATE v = v * v WHERE 1; + +SYSTEM SYNC REPLICA t_lightweight_mut_7 PULL; + +SELECT 1, sum(v) FROM t_lightweight_mut_7; +SELECT 2, sum(v) FROM t_lightweight_mut_7 SETTINGS apply_mutations_on_fly = 0; + +SYSTEM START MERGES t_lightweight_mut_7; + +ALTER TABLE t_lightweight_mut_7 UPDATE v = v WHERE 1 SETTINGS mutations_sync = 2; + +SELECT 3, sum(v) FROM t_lightweight_mut_7; + +SYSTEM FLUSH LOGS query_log; + +SELECT + query, + ProfileEvents['ReadTasksWithAppliedMutationsOnFly'], + ProfileEvents['MutationsAppliedOnFlyInAllReadTasks'] +FROM system.query_log +WHERE current_database = currentDatabase() AND query ILIKE 'SELECT%FROM%t_lightweight_mut_7%' AND type = 'QueryFinish' +ORDER BY event_time_microseconds; + +DROP TABLE t_lightweight_mut_7; diff --git a/parser/testdata/03047_on_fly_mutations_materialized/ast.json b/parser/testdata/03047_on_fly_mutations_materialized/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03047_on_fly_mutations_materialized/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03047_on_fly_mutations_materialized/metadata.json b/parser/testdata/03047_on_fly_mutations_materialized/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03047_on_fly_mutations_materialized/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03047_on_fly_mutations_materialized/query.sql b/parser/testdata/03047_on_fly_mutations_materialized/query.sql new file mode 100644 index 000000000..268402af7 --- /dev/null +++ b/parser/testdata/03047_on_fly_mutations_materialized/query.sql @@ -0,0 +1,26 @@ +-- Tags: no-shared-catalog +-- FIXME no-shared-catalog: STOP MERGES will only stop them on the current replica, the second one will continue to merge + +DROP TABLE IF EXISTS t_update_materialized; + +SET apply_mutations_on_fly = 1; + +CREATE TABLE t_update_materialized (id UInt64, c1 UInt64, c2 UInt64 MATERIALIZED c1 * 2) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/t_update_materialized', '1') ORDER BY id; + +SYSTEM STOP MERGES t_update_materialized; + +INSERT INTO t_update_materialized (id, c1) VALUES (1, 1); + +SELECT id, c2 FROM t_update_materialized ORDER BY id; +SELECT id, c1, c2 FROM t_update_materialized ORDER BY id; + +ALTER TABLE t_update_materialized UPDATE c1 = 2 WHERE id = 1; + +SYSTEM SYNC REPLICA t_update_materialized PULL; + +SELECT id, c2 FROM t_update_materialized; +SELECT id, c1, c2 FROM t_update_materialized; + +SELECT count() FROM system.mutations +WHERE database = currentDatabase() AND table = 't_update_materialized' AND NOT is_done; diff --git a/parser/testdata/03047_on_fly_mutations_multiple_updates/ast.json b/parser/testdata/03047_on_fly_mutations_multiple_updates/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03047_on_fly_mutations_multiple_updates/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03047_on_fly_mutations_multiple_updates/metadata.json b/parser/testdata/03047_on_fly_mutations_multiple_updates/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03047_on_fly_mutations_multiple_updates/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03047_on_fly_mutations_multiple_updates/query.sql b/parser/testdata/03047_on_fly_mutations_multiple_updates/query.sql new file mode 100644 index 000000000..15f7e72ea --- /dev/null +++ b/parser/testdata/03047_on_fly_mutations_multiple_updates/query.sql @@ -0,0 +1,38 @@ +-- Tags: no-random-merge-tree-settings, no-random-settings, no-parallel +-- no-parallel: SYSTEM DROP MARK CACHE is used. + +DROP TABLE IF EXISTS t_lightweight_mut_5; + +SET apply_mutations_on_fly = 1; + +CREATE TABLE t_lightweight_mut_5 (id UInt64, s1 String, s2 String) +ENGINE = MergeTree ORDER BY id +SETTINGS min_bytes_for_wide_part = 0, + min_bytes_for_full_part_storage = 0, + serialization_info_version = 'basic', + storage_policy = 'default'; + +SYSTEM STOP MERGES t_lightweight_mut_5; + +INSERT INTO t_lightweight_mut_5 VALUES (1, 'a', 'b'); +ALTER TABLE t_lightweight_mut_5 UPDATE s1 = 'x', s2 = 'y' WHERE id = 1; + +SYSTEM DROP MARK CACHE; +SELECT s1 FROM t_lightweight_mut_5 ORDER BY id; + +SYSTEM DROP MARK CACHE; +SELECT s2 FROM t_lightweight_mut_5 ORDER BY id; + +SYSTEM DROP MARK CACHE; +SELECT s1, s2 FROM t_lightweight_mut_5 ORDER BY id; + +SYSTEM FLUSH LOGS query_log; + +SELECT query, ProfileEvents['FileOpen'] FROM system.query_log +WHERE + current_database = currentDatabase() + AND query ILIKE 'SELECT%FROM t_lightweight_mut_5%' + AND type = 'QueryFinish' +ORDER BY event_time_microseconds; + +DROP TABLE t_lightweight_mut_5; diff --git a/parser/testdata/03047_on_fly_mutations_multiple_updates_rmt/ast.json b/parser/testdata/03047_on_fly_mutations_multiple_updates_rmt/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03047_on_fly_mutations_multiple_updates_rmt/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03047_on_fly_mutations_multiple_updates_rmt/metadata.json b/parser/testdata/03047_on_fly_mutations_multiple_updates_rmt/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03047_on_fly_mutations_multiple_updates_rmt/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03047_on_fly_mutations_multiple_updates_rmt/query.sql b/parser/testdata/03047_on_fly_mutations_multiple_updates_rmt/query.sql new file mode 100644 index 000000000..26a7badd0 --- /dev/null +++ b/parser/testdata/03047_on_fly_mutations_multiple_updates_rmt/query.sql @@ -0,0 +1,44 @@ +-- Tags: no-random-merge-tree-settings, no-random-settings, no-fasttest, no-parallel-replicas, no-parallel +-- no-parallel-replicas: reading from s3 ('S3GetObject' event) can happened on any "replica", so we can see no 'S3GetObject' on initiator +-- no-parallel: SYSTEM DROP MARK CACHE is used. + +DROP TABLE IF EXISTS t_lightweight_mut_5; + +SET apply_mutations_on_fly = 1; +SET enable_filesystem_cache = 0; +SET read_through_distributed_cache=0; + +CREATE TABLE t_lightweight_mut_5 (id UInt64, s1 String, s2 String) +ENGINE = ReplicatedMergeTree('/clickhouse/zktest/tables/{database}/t_lightweight_mut_1', '1') ORDER BY id +SETTINGS min_bytes_for_wide_part = 0, + min_bytes_for_full_part_storage = 0, + primary_key_lazy_load = 0, + serialization_info_version = 'basic', + storage_policy = 's3_cache'; + +SYSTEM STOP MERGES t_lightweight_mut_5; + +INSERT INTO t_lightweight_mut_5 VALUES (1, 'a', 'b'); +ALTER TABLE t_lightweight_mut_5 UPDATE s1 = 'x', s2 = 'y' WHERE id = 1; + +SYSTEM SYNC REPLICA t_lightweight_mut_5 PULL; + +SYSTEM DROP MARK CACHE; +SELECT s1 FROM t_lightweight_mut_5 ORDER BY id; + +SYSTEM DROP MARK CACHE; +SELECT s2 FROM t_lightweight_mut_5 ORDER BY id; + +SYSTEM DROP MARK CACHE; +SELECT s1, s2 FROM t_lightweight_mut_5 ORDER BY id; + +SYSTEM FLUSH LOGS query_log; + +SELECT query, ProfileEvents['S3GetObject'] FROM system.query_log +WHERE + current_database = currentDatabase() + AND query ILIKE 'SELECT%FROM t_lightweight_mut_5%' + AND type = 'QueryFinish' +ORDER BY event_time_microseconds; + +DROP TABLE t_lightweight_mut_5; diff --git a/parser/testdata/03047_on_fly_mutations_non_deterministic/ast.json b/parser/testdata/03047_on_fly_mutations_non_deterministic/ast.json new file mode 100644 index 000000000..0d8b389dd --- /dev/null +++ b/parser/testdata/03047_on_fly_mutations_non_deterministic/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_lightweight_mut_2 (children 1)" + }, + { + "explain": " Identifier t_lightweight_mut_2" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001271568, + "rows_read": 2, + "bytes_read": 90 + } +} diff --git a/parser/testdata/03047_on_fly_mutations_non_deterministic/metadata.json b/parser/testdata/03047_on_fly_mutations_non_deterministic/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03047_on_fly_mutations_non_deterministic/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03047_on_fly_mutations_non_deterministic/query.sql b/parser/testdata/03047_on_fly_mutations_non_deterministic/query.sql new file mode 100644 index 000000000..7e95907e2 --- /dev/null +++ b/parser/testdata/03047_on_fly_mutations_non_deterministic/query.sql @@ -0,0 +1,28 @@ +DROP TABLE IF EXISTS t_lightweight_mut_2; + +SET apply_mutations_on_fly = 1; + +CREATE TABLE t_lightweight_mut_2 (id UInt64, v UInt64) ENGINE = MergeTree ORDER BY id; + +SYSTEM STOP MERGES t_lightweight_mut_2; +INSERT INTO t_lightweight_mut_2 VALUES (10, 20); + +ALTER TABLE t_lightweight_mut_2 UPDATE v = rand() WHERE 1; + +SELECT * FROM t_lightweight_mut_2; -- { serverError BAD_ARGUMENTS } +SELECT * FROM t_lightweight_mut_2 SETTINGS apply_mutations_on_fly = 0; + +SELECT count() FROM system.mutations WHERE database = currentDatabase() AND table = 't_lightweight_mut_2' AND NOT is_done AND NOT is_killed; +KILL MUTATION WHERE database = currentDatabase() AND table = 't_lightweight_mut_2' FORMAT Null; +SELECT count() FROM system.mutations WHERE database = currentDatabase() AND table = 't_lightweight_mut_2' AND NOT is_done AND NOT is_killed; + +ALTER TABLE t_lightweight_mut_2 UPDATE v = (SELECT sum(number) FROM numbers(100)) WHERE 1; + +SELECT * FROM t_lightweight_mut_2; -- { serverError BAD_ARGUMENTS } +SELECT * FROM t_lightweight_mut_2 SETTINGS apply_mutations_on_fly = 0; + +SELECT count() FROM system.mutations WHERE database = currentDatabase() AND table = 't_lightweight_mut_2' AND NOT is_done AND NOT is_killed; +KILL MUTATION WHERE database = currentDatabase() AND table = 't_lightweight_mut_2' FORMAT Null; +SELECT count() FROM system.mutations WHERE database = currentDatabase() AND table = 't_lightweight_mut_2' AND NOT is_done AND NOT is_killed; + +DROP TABLE t_lightweight_mut_2; diff --git a/parser/testdata/03047_on_fly_mutations_non_deterministic_replace/ast.json b/parser/testdata/03047_on_fly_mutations_non_deterministic_replace/ast.json new file mode 100644 index 000000000..6eb8646c1 --- /dev/null +++ b/parser/testdata/03047_on_fly_mutations_non_deterministic_replace/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_lightweight_mut_5 (children 1)" + }, + { + "explain": " Identifier t_lightweight_mut_5" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001107896, + "rows_read": 2, + "bytes_read": 90 + } +} diff --git a/parser/testdata/03047_on_fly_mutations_non_deterministic_replace/metadata.json b/parser/testdata/03047_on_fly_mutations_non_deterministic_replace/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03047_on_fly_mutations_non_deterministic_replace/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03047_on_fly_mutations_non_deterministic_replace/query.sql b/parser/testdata/03047_on_fly_mutations_non_deterministic_replace/query.sql new file mode 100644 index 000000000..fbb403ee1 --- /dev/null +++ b/parser/testdata/03047_on_fly_mutations_non_deterministic_replace/query.sql @@ -0,0 +1,144 @@ +DROP TABLE IF EXISTS t_lightweight_mut_5; + +SET apply_mutations_on_fly = 1; +SET mutations_execute_subqueries_on_initiator = 1; +SET mutations_execute_nondeterministic_on_initiator = 1; + +-- SELECT sum(...) + +CREATE TABLE t_lightweight_mut_5 (id UInt64, v UInt64) ENGINE = MergeTree ORDER BY id; + +SYSTEM STOP MERGES t_lightweight_mut_5; +INSERT INTO t_lightweight_mut_5 VALUES (10, 20); + +ALTER TABLE t_lightweight_mut_5 UPDATE v = (SELECT sum(number) FROM numbers(100)) WHERE 1; + +SELECT id, v FROM t_lightweight_mut_5 ORDER BY id; + +SELECT command FROM system.mutations +WHERE database = currentDatabase() AND table = 't_lightweight_mut_5' AND NOT is_done +ORDER BY command; + +DROP TABLE t_lightweight_mut_5; + +-- SELECT groupArray(...) + +CREATE TABLE t_lightweight_mut_5 (id UInt64, v Array(UInt64)) ENGINE = MergeTree ORDER BY id; + +SYSTEM STOP MERGES t_lightweight_mut_5; +INSERT INTO t_lightweight_mut_5 VALUES (10, [20]); + +ALTER TABLE t_lightweight_mut_5 UPDATE v = (SELECT groupArray(number) FROM numbers(10)) WHERE 1; + +SELECT id, v FROM t_lightweight_mut_5 ORDER BY id; + +ALTER TABLE t_lightweight_mut_5 UPDATE v = (SELECT groupArray(number) FROM numbers(10000)) WHERE 1; + +SELECT id, length(v) FROM t_lightweight_mut_5 ORDER BY id; -- { serverError BAD_ARGUMENTS } + +SELECT command FROM system.mutations +WHERE database = currentDatabase() AND table = 't_lightweight_mut_5' AND NOT is_done +ORDER BY command; + +SYSTEM START MERGES t_lightweight_mut_5; + +-- Force to wait previous mutations +ALTER TABLE t_lightweight_mut_5 UPDATE v = v WHERE 1 SETTINGS mutations_sync = 2; + +SELECT id, length(v) FROM t_lightweight_mut_5 ORDER BY id; + +DROP TABLE t_lightweight_mut_5; + +-- SELECT uniqExactState(...) + +CREATE TABLE t_lightweight_mut_5 (id UInt64, v AggregateFunction(uniqExact, UInt64)) ENGINE = MergeTree ORDER BY id; + +SYSTEM STOP MERGES t_lightweight_mut_5; +INSERT INTO t_lightweight_mut_5 VALUES (10, initializeAggregation('uniqExactState', 1::UInt64)); + +ALTER TABLE t_lightweight_mut_5 UPDATE v = (SELECT uniqExactState(number) FROM numbers(5)) WHERE 1; + +SELECT id, finalizeAggregation(v) FROM t_lightweight_mut_5 ORDER BY id; + +SELECT command FROM system.mutations +WHERE database = currentDatabase() AND table = 't_lightweight_mut_5' AND NOT is_done +ORDER BY command; + +DROP TABLE t_lightweight_mut_5; + +-- now() + +CREATE TABLE t_lightweight_mut_5 (id UInt64, v DateTime) ENGINE = MergeTree ORDER BY id; + +SYSTEM STOP MERGES t_lightweight_mut_5; +INSERT INTO t_lightweight_mut_5 VALUES (10, '2020-10-10'); + +ALTER TABLE t_lightweight_mut_5 UPDATE v = now() WHERE 1; + +SELECT id, v BETWEEN now() - INTERVAL 10 MINUTE AND now() FROM t_lightweight_mut_5; + +SELECT + replaceRegexpOne(command, '(\\d{10})', 'timestamp'), +FROM system.mutations +WHERE database = currentDatabase() AND table = 't_lightweight_mut_5' AND NOT is_done +ORDER BY command; + +DROP TABLE t_lightweight_mut_5; + +-- filesystem(...) + +CREATE TABLE t_lightweight_mut_5 (id UInt64, v UInt64) ENGINE = MergeTree ORDER BY id; + +SYSTEM STOP MERGES t_lightweight_mut_5; +INSERT INTO t_lightweight_mut_5 VALUES (10, 10); + +ALTER TABLE t_lightweight_mut_5 UPDATE v = filesystemCapacity(materialize('default')) WHERE 1; + +SELECT * FROM t_lightweight_mut_5 ORDER BY id; -- { serverError BAD_ARGUMENTS } +SELECT * FROM t_lightweight_mut_5 ORDER BY id SETTINGS apply_mutations_on_fly = 0; + +SELECT command FROM system.mutations +WHERE database = currentDatabase() AND table = 't_lightweight_mut_5' AND NOT is_done +ORDER BY command; + +DROP TABLE t_lightweight_mut_5; + +-- UPDATE SELECT randConstant() + +CREATE TABLE t_lightweight_mut_5 (id UInt64, v UInt64) ENGINE = MergeTree ORDER BY id; + +SYSTEM STOP MERGES t_lightweight_mut_5; +INSERT INTO t_lightweight_mut_5 VALUES (10, 10); + +-- Check that function in subquery is not rewritten. +ALTER TABLE t_lightweight_mut_5 +UPDATE v = +( + SELECT sum(number) FROM numbers(1000) WHERE number > randConstant() +) WHERE 1 SETTINGS mutations_execute_subqueries_on_initiator = 0; + +SELECT command FROM system.mutations +WHERE database = currentDatabase() AND table = 't_lightweight_mut_5' AND NOT is_done +ORDER BY command; + +DROP TABLE t_lightweight_mut_5; + +-- DELETE WHERE now() + +CREATE TABLE t_lightweight_mut_5 (id UInt64, d DateTime) ENGINE = MergeTree ORDER BY id; + +SYSTEM STOP MERGES t_lightweight_mut_5; +INSERT INTO t_lightweight_mut_5 VALUES (10, '2000-10-10'), (20, '2100-10-10'); + +ALTER TABLE t_lightweight_mut_5 DELETE WHERE d < now(); + +SELECT * FROM t_lightweight_mut_5 ORDER BY id SETTINGS apply_mutations_on_fly = 0; +SELECT * FROM t_lightweight_mut_5 ORDER BY id SETTINGS apply_mutations_on_fly = 1; + +SELECT + replaceRegexpOne(command, '(\\d{10})', 'timestamp'), +FROM system.mutations +WHERE database = currentDatabase() AND table = 't_lightweight_mut_5' AND NOT is_done +ORDER BY command; + +DROP TABLE t_lightweight_mut_5; diff --git a/parser/testdata/03047_on_fly_mutations_prewhere/ast.json b/parser/testdata/03047_on_fly_mutations_prewhere/ast.json new file mode 100644 index 000000000..c382aed74 --- /dev/null +++ b/parser/testdata/03047_on_fly_mutations_prewhere/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_update_prewhere (children 1)" + }, + { + "explain": " Identifier t_update_prewhere" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001542635, + "rows_read": 2, + "bytes_read": 86 + } +} diff --git a/parser/testdata/03047_on_fly_mutations_prewhere/metadata.json b/parser/testdata/03047_on_fly_mutations_prewhere/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03047_on_fly_mutations_prewhere/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03047_on_fly_mutations_prewhere/query.sql b/parser/testdata/03047_on_fly_mutations_prewhere/query.sql new file mode 100644 index 000000000..b3e03d921 --- /dev/null +++ b/parser/testdata/03047_on_fly_mutations_prewhere/query.sql @@ -0,0 +1,49 @@ +DROP TABLE IF EXISTS t_update_prewhere; + +SET mutations_sync = 2; +SET apply_mutations_on_fly = 0; + +CREATE TABLE t_update_prewhere (id UInt64, c1 UInt64, c2 UInt64, c3 UInt64) +ENGINE = MergeTree ORDER BY id; + +INSERT INTO t_update_prewhere SELECT number, number, number, number FROM numbers(100000); + +ALTER TABLE t_update_prewhere UPDATE c1 = 0 WHERE id % 2 = 0; + +SELECT count() FROM t_update_prewhere PREWHERE c1 != 0 WHERE c2 % 3 = 0; + +ALTER TABLE t_update_prewhere UPDATE c3 = c2 * 2 WHERE c1 != 0; + +SELECT sum(c3) FROM t_update_prewhere PREWHERE c3 % 3 = 0 WHERE c1 != 0; + +ALTER TABLE t_update_prewhere UPDATE c2 = c3 - c1 WHERE c3 < 10000; + +SELECT sum(c2) FROM t_update_prewhere PREWHERE c1 != 0 WHERE c2 % 5 = 1; + +DROP TABLE IF EXISTS t_update_prewhere; + +SELECT '++++++++++++++++++++'; + +SET mutations_sync = 0; +SET apply_mutations_on_fly = 1; + +CREATE TABLE t_update_prewhere (id UInt64, c1 UInt64, c2 UInt64, c3 UInt64) +ENGINE = MergeTree ORDER BY id; + +SYSTEM STOP MERGES t_update_prewhere; + +INSERT INTO t_update_prewhere SELECT number, number, number, number FROM numbers(100000); + +ALTER TABLE t_update_prewhere UPDATE c1 = 0 WHERE id % 2 = 0; + +SELECT count() FROM t_update_prewhere PREWHERE c1 != 0 WHERE c2 % 3 = 0; + +ALTER TABLE t_update_prewhere UPDATE c3 = c2 * 2 WHERE c1 != 0; + +SELECT sum(c3) FROM t_update_prewhere PREWHERE c3 % 3 = 0 WHERE c1 != 0; + +ALTER TABLE t_update_prewhere UPDATE c2 = c3 - c1 WHERE c3 < 10000; + +SELECT sum(c2) FROM t_update_prewhere PREWHERE c1 != 0 WHERE c2 % 5 = 1; + +DROP TABLE IF EXISTS t_update_prewhere; diff --git a/parser/testdata/03047_on_fly_mutations_projections/ast.json b/parser/testdata/03047_on_fly_mutations_projections/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03047_on_fly_mutations_projections/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03047_on_fly_mutations_projections/metadata.json b/parser/testdata/03047_on_fly_mutations_projections/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03047_on_fly_mutations_projections/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03047_on_fly_mutations_projections/query.sql b/parser/testdata/03047_on_fly_mutations_projections/query.sql new file mode 100644 index 000000000..500f576e7 --- /dev/null +++ b/parser/testdata/03047_on_fly_mutations_projections/query.sql @@ -0,0 +1,22 @@ + +DROP TABLE IF EXISTS t_update_projections; + +SET apply_mutations_on_fly = 1; +SET parallel_replicas_local_plan = 1, parallel_replicas_support_projection = 1, optimize_aggregation_in_order = 0; + +CREATE TABLE t_update_projections (id UInt64, v UInt64, PROJECTION proj (SELECT sum(v))) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/t_update_projections', '1') ORDER BY tuple(); + +SYSTEM STOP MERGES t_update_projections; + +INSERT INTO t_update_projections SELECT number, number FROM numbers(100000); +SELECT sum(v) FROM t_update_projections SETTINGS force_optimize_projection = 1; + +ALTER TABLE t_update_projections UPDATE v = v * v WHERE id % 2 = 1; + +SYSTEM SYNC REPLICA t_update_projections PULL; + +SELECT sum(v) FROM t_update_projections; +SELECT sum(v) FROM t_update_projections SETTINGS force_optimize_projection = 1; -- { serverError PROJECTION_NOT_USED } + +DROP TABLE t_update_projections; diff --git a/parser/testdata/03047_on_fly_mutations_skip_index/ast.json b/parser/testdata/03047_on_fly_mutations_skip_index/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03047_on_fly_mutations_skip_index/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03047_on_fly_mutations_skip_index/metadata.json b/parser/testdata/03047_on_fly_mutations_skip_index/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03047_on_fly_mutations_skip_index/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03047_on_fly_mutations_skip_index/query.sql b/parser/testdata/03047_on_fly_mutations_skip_index/query.sql new file mode 100644 index 000000000..a609ae59a --- /dev/null +++ b/parser/testdata/03047_on_fly_mutations_skip_index/query.sql @@ -0,0 +1,46 @@ +-- Tags: no-shared-catalog, no-parallel-replicas +-- no-shared-catalog: STOP MERGES will only stop them on the current replica, the second one will continue to merge +-- no-parallel-replicas: the result of EXPLAIN differs with parallel replicas + +SET use_skip_indexes_on_data_read = 0; +SET use_query_condition_cache = 0; + +DROP TABLE IF EXISTS t_lightweight_mut_3; + +SET mutations_sync = 0; + +CREATE TABLE t_lightweight_mut_3 (id UInt64, v UInt64, INDEX idx v TYPE minmax GRANULARITY 1) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/t_lightweight_mut_3', '1') ORDER BY id; + +SYSTEM STOP MERGES t_lightweight_mut_3; + +INSERT INTO t_lightweight_mut_3 VALUES (1, 1); +INSERT INTO t_lightweight_mut_3 VALUES (2, 2000); + +SELECT id, v FROM t_lightweight_mut_3 WHERE v > 100 ORDER BY id SETTINGS force_data_skipping_indices = 'idx'; + +SELECT trim(explain) AS s FROM ( + EXPLAIN indexes = 1 + SELECT id, v FROM t_lightweight_mut_3 WHERE v > 100 ORDER BY id SETTINGS force_data_skipping_indices = 'idx' +) WHERE s LIKE 'Granules: %'; + +ALTER TABLE t_lightweight_mut_3 UPDATE v = 1000 WHERE id = 1; +INSERT INTO t_lightweight_mut_3 VALUES (3, 3); + +SYSTEM SYNC REPLICA t_lightweight_mut_3 PULL; + +SELECT id, v FROM t_lightweight_mut_3 WHERE v > 100 ORDER BY id SETTINGS apply_mutations_on_fly = 1; + +SELECT trim(explain) AS s FROM ( + EXPLAIN indexes = 1 + SELECT id, v FROM t_lightweight_mut_3 WHERE v > 100 ORDER BY id SETTINGS apply_mutations_on_fly = 1 +) WHERE s LIKE 'Granules: %'; + +SELECT id, v FROM t_lightweight_mut_3 WHERE v > 100 ORDER BY id SETTINGS apply_mutations_on_fly = 0; + +SELECT trim(explain) AS s FROM ( + EXPLAIN indexes = 1 + SELECT id, v FROM t_lightweight_mut_3 WHERE v > 100 ORDER BY id SETTINGS apply_mutations_on_fly = 0 +) WHERE s LIKE 'Granules: %'; + +DROP TABLE t_lightweight_mut_3; diff --git a/parser/testdata/03047_on_fly_update_delete/ast.json b/parser/testdata/03047_on_fly_update_delete/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03047_on_fly_update_delete/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03047_on_fly_update_delete/metadata.json b/parser/testdata/03047_on_fly_update_delete/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03047_on_fly_update_delete/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03047_on_fly_update_delete/query.sql b/parser/testdata/03047_on_fly_update_delete/query.sql new file mode 100644 index 000000000..fb81a9735 --- /dev/null +++ b/parser/testdata/03047_on_fly_update_delete/query.sql @@ -0,0 +1,52 @@ +-- Tags: no-shared-catalog +-- no-shared-catalog: STOP MERGES will only stop them on the current replica, the second one will continue to merge + +DROP TABLE IF EXISTS t_lightweight_mut_6; + +SET apply_mutations_on_fly = 1; + +CREATE TABLE t_lightweight_mut_6 (id UInt64, v UInt64) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/t_lightweight_mut_6', '1') +ORDER BY id +SETTINGS ratio_of_defaults_for_sparse_serialization = 1.0; -- There is a bug. + +INSERT INTO t_lightweight_mut_6 select number, number FROM numbers(10000); + +SET mutations_sync = 2; +DELETE FROM t_lightweight_mut_6 WHERE id % 2 = 0; + +SELECT count(), sum(v) FROM t_lightweight_mut_6; + +SELECT sum(has_lightweight_delete) FROM system.parts +WHERE database = currentDatabase() AND table = 't_lightweight_mut_6' AND active; + +SET mutations_sync = 0; +SYSTEM STOP MERGES t_lightweight_mut_6; + +ALTER TABLE t_lightweight_mut_6 UPDATE v = v * 2 WHERE id % 5 = 0; +ALTER TABLE t_lightweight_mut_6 DELETE WHERE id % 3 = 0; + +SYSTEM SYNC REPLICA t_lightweight_mut_6 PULL; + +SELECT count(), sum(v) FROM t_lightweight_mut_6; +SELECT count(), sum(v) FROM t_lightweight_mut_6 SETTINGS apply_mutations_on_fly = 0; + +SELECT count(), sum(v) FROM t_lightweight_mut_6 PREWHERE id % 5 = 0; +SELECT count(), sum(v) FROM t_lightweight_mut_6 PREWHERE id % 5 = 0 SETTINGS apply_mutations_on_fly = 0; + +SYSTEM START MERGES t_lightweight_mut_6; + +SET mutations_sync = 2; +ALTER TABLE t_lightweight_mut_6 UPDATE v = v WHERE 1; +OPTIMIZE TABLE t_lightweight_mut_6 FINAL; + +SELECT count() FROM system.mutations +WHERE database = currentDatabase() AND table = 't_lightweight_mut_6' AND NOT is_done; + +SELECT sum(has_lightweight_delete) FROM system.parts +WHERE database = currentDatabase() AND table = 't_lightweight_mut_6' AND active; + +SELECT count(), sum(v) FROM t_lightweight_mut_6; +SELECT count(), sum(v) FROM t_lightweight_mut_6 PREWHERE id % 5 = 0; + +DROP TABLE t_lightweight_mut_6; diff --git a/parser/testdata/03048_not_found_column_xxx_in_block/ast.json b/parser/testdata/03048_not_found_column_xxx_in_block/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03048_not_found_column_xxx_in_block/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03048_not_found_column_xxx_in_block/metadata.json b/parser/testdata/03048_not_found_column_xxx_in_block/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03048_not_found_column_xxx_in_block/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03048_not_found_column_xxx_in_block/query.sql b/parser/testdata/03048_not_found_column_xxx_in_block/query.sql new file mode 100644 index 000000000..f511ea81e --- /dev/null +++ b/parser/testdata/03048_not_found_column_xxx_in_block/query.sql @@ -0,0 +1,67 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/41964 +SET enable_analyzer=1; + +DROP TABLE IF EXISTS ab_12_aaa; +DROP TABLE IF EXISTS ab_12_bbb; + +CREATE TABLE ab_12_aaa +( + `id` String, + `subid` Int32, + `prodcat` String, + `prodtype` String, + `quality` String, + `m1` Float64, + `m2` Float64, + `r1` Float64, + `r2` Float64, + `d1` Float64, + `d2` Float64, + `pcs` Float64, + `qty` Float64, + `amt` Float64, + `amts` Float64, + `prc` Float64, + `prcs` Float64, + `suqty` Float64, + `suamt` Float64, + `_year` String +) +ENGINE = MergeTree +ORDER BY (_year, prodcat, prodtype, quality, d1, id) +SETTINGS index_granularity = 8192; + +CREATE TABLE ab_12_bbb +( + `id` String, + `sales_type` String, + `date` Date32, + `o1` String, + `o2` String, + `o3` String, + `o4` String, + `o5` String, + `short` String, + `a1` String, + `a2` String, + `a3` String, + `idx` String, + `a4` String, + `ctx` String, + `_year` String, + `theyear` UInt16 MATERIALIZED toYear(`date`), + `themonth` UInt8 MATERIALIZED toMonth(`date`), + `theweek` UInt8 MATERIALIZED toISOWeek(`date`) +) +ENGINE = MergeTree +ORDER BY (theyear, themonth, _year, id, sales_type, date) +SETTINGS index_granularity = 8192; + +SELECT * +FROM ab_12_aaa aa +LEFT JOIN ab_12_bbb bb +ON bb.id = aa.id AND bb.`_year` = aa.`_year` +WHERE bb.theyear >= 2019; + +DROP TABLE IF EXISTS ab_12_aaa; +DROP TABLE IF EXISTS ab_12_bbb; diff --git a/parser/testdata/03049_analyzer_group_by_alias/ast.json b/parser/testdata/03049_analyzer_group_by_alias/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03049_analyzer_group_by_alias/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03049_analyzer_group_by_alias/metadata.json b/parser/testdata/03049_analyzer_group_by_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03049_analyzer_group_by_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03049_analyzer_group_by_alias/query.sql b/parser/testdata/03049_analyzer_group_by_alias/query.sql new file mode 100644 index 000000000..712d1c27e --- /dev/null +++ b/parser/testdata/03049_analyzer_group_by_alias/query.sql @@ -0,0 +1,23 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/7520 +SET enable_analyzer=1; +CREATE TABLE test (`a` UInt32, `b` UInt32) ENGINE = Memory; + +INSERT INTO test VALUES (1,2), (1,3), (2,4); + +-- 1 5 +-- 2 4 + +WITH + a as key +SELECT + a as k1, + sum(b) as k2 +FROM + test +GROUP BY + key +ORDER BY k1, k2; + +WITH a as key SELECT key as k1 FROM test GROUP BY key ORDER BY key; + +WITH a as key SELECT key as k1 FROM test ORDER BY key; diff --git a/parser/testdata/03049_unknown_identifier_materialized_column/ast.json b/parser/testdata/03049_unknown_identifier_materialized_column/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03049_unknown_identifier_materialized_column/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03049_unknown_identifier_materialized_column/metadata.json b/parser/testdata/03049_unknown_identifier_materialized_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03049_unknown_identifier_materialized_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03049_unknown_identifier_materialized_column/query.sql b/parser/testdata/03049_unknown_identifier_materialized_column/query.sql new file mode 100644 index 000000000..0efe59a1f --- /dev/null +++ b/parser/testdata/03049_unknown_identifier_materialized_column/query.sql @@ -0,0 +1,14 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/54317 +SET enable_analyzer=1; +DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}; + +CREATE DATABASE {CLICKHOUSE_DATABASE:Identifier}; +USE {CLICKHOUSE_DATABASE:Identifier}; + +CREATE TABLE l (y String) Engine Memory; +CREATE TABLE r (d Date, y String, ty UInt16 MATERIALIZED toYear(d)) Engine Memory; +select * from l L left join r R on L.y = R.y where R.ty >= 2019; +select * from l left join r on l.y = r.y where r.ty >= 2019; +select * from {CLICKHOUSE_DATABASE:Identifier}.l left join {CLICKHOUSE_DATABASE:Identifier}.r on l.y = r.y where r.ty >= 2019; + +DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}; diff --git a/parser/testdata/03050_select_one_one_one/ast.json b/parser/testdata/03050_select_one_one_one/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03050_select_one_one_one/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03050_select_one_one_one/metadata.json b/parser/testdata/03050_select_one_one_one/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03050_select_one_one_one/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03050_select_one_one_one/query.sql b/parser/testdata/03050_select_one_one_one/query.sql new file mode 100644 index 000000000..09f3f20c3 --- /dev/null +++ b/parser/testdata/03050_select_one_one_one/query.sql @@ -0,0 +1,4 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/36973 +SET enable_analyzer=1; +SELECT 1, 1, 1; +SELECT * FROM (SELECT 1, 1, 1); diff --git a/parser/testdata/03051_many_ctes/ast.json b/parser/testdata/03051_many_ctes/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03051_many_ctes/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03051_many_ctes/metadata.json b/parser/testdata/03051_many_ctes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03051_many_ctes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03051_many_ctes/query.sql b/parser/testdata/03051_many_ctes/query.sql new file mode 100644 index 000000000..e442813b6 --- /dev/null +++ b/parser/testdata/03051_many_ctes/query.sql @@ -0,0 +1,6 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/40955 +SET enable_analyzer=1; +WITH toInt64(2) AS new_x SELECT new_x AS x FROM (SELECT 1 AS x) t; +WITH toInt64(2) AS new_x SELECT * replace(new_x as x) FROM (SELECT 1 AS x) t; +SELECT 2 AS x FROM (SELECT 1 AS x) t; +SELECT * replace(2 as x) FROM (SELECT 1 AS x) t; diff --git a/parser/testdata/03052_query_hash_includes_aliases/ast.json b/parser/testdata/03052_query_hash_includes_aliases/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03052_query_hash_includes_aliases/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03052_query_hash_includes_aliases/metadata.json b/parser/testdata/03052_query_hash_includes_aliases/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03052_query_hash_includes_aliases/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03052_query_hash_includes_aliases/query.sql b/parser/testdata/03052_query_hash_includes_aliases/query.sql new file mode 100644 index 000000000..55993175b --- /dev/null +++ b/parser/testdata/03052_query_hash_includes_aliases/query.sql @@ -0,0 +1,32 @@ +-- https://github.com/ClickHouse/ClickHouse/pull/40065 +SET enable_analyzer=1; + +SELECT +( + SELECT + 1 AS number, + number + FROM numbers(1) +) AS s, +( + SELECT + 1, + number + FROM numbers(1) +) AS s2; + +SELECT +( + SELECT + 1 + 2 AS number, + 1 + number AS b + FROM system.numbers + LIMIT 10, 1 +), +( + SELECT + 1 + 2 AS number2, + 1 + number AS b + FROM system.numbers + LIMIT 10, 1 +); diff --git a/parser/testdata/03053_analyzer_join_alias/ast.json b/parser/testdata/03053_analyzer_join_alias/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03053_analyzer_join_alias/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03053_analyzer_join_alias/metadata.json b/parser/testdata/03053_analyzer_join_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03053_analyzer_join_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03053_analyzer_join_alias/query.sql b/parser/testdata/03053_analyzer_join_alias/query.sql new file mode 100644 index 000000000..677cf9d4d --- /dev/null +++ b/parser/testdata/03053_analyzer_join_alias/query.sql @@ -0,0 +1,44 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/23104 +SET enable_analyzer=1; +DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}; +CREATE DATABASE {CLICKHOUSE_DATABASE:Identifier}; + +CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.base +( +`id` UInt64, +`id2` UInt64, +`d` UInt64, +`value` UInt64 +) +ENGINE=MergeTree() +PARTITION BY d +ORDER BY (id,id2,d); + +CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.derived1 +( + `id1` UInt64, + `d1` UInt64, + `value1` UInt64 +) +ENGINE = MergeTree() +PARTITION BY d1 +ORDER BY (id1, d1); + +CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.derived2 +( + `id2` UInt64, + `d2` UInt64, + `value2` UInt64 +) +ENGINE = MergeTree() +PARTITION BY d2 +ORDER BY (id2, d2); + +SELECT + base.id AS `base.id`, + derived2.id2 AS `derived2.id2`, + derived2.value2 AS `derived2.value2`, + derived1.value1 AS `derived1.value1` +FROM {CLICKHOUSE_DATABASE:Identifier}.base AS base +LEFT JOIN {CLICKHOUSE_DATABASE:Identifier}.derived2 AS derived2 ON base.id2 = derived2.id2 +LEFT JOIN {CLICKHOUSE_DATABASE:Identifier}.derived1 AS derived1 ON base.id = derived1.id1; diff --git a/parser/testdata/03054_analyzer_join_alias/ast.json b/parser/testdata/03054_analyzer_join_alias/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03054_analyzer_join_alias/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03054_analyzer_join_alias/metadata.json b/parser/testdata/03054_analyzer_join_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03054_analyzer_join_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03054_analyzer_join_alias/query.sql b/parser/testdata/03054_analyzer_join_alias/query.sql new file mode 100644 index 000000000..f018f57cc --- /dev/null +++ b/parser/testdata/03054_analyzer_join_alias/query.sql @@ -0,0 +1,13 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/21584 +SET enable_analyzer=1; +SELECT count() +FROM +( + SELECT number AS key_1 + FROM numbers(15) +) AS x +ALL INNER JOIN +( + SELECT number AS key_1 + FROM numbers(10) +) AS z ON key_1 = z.key_1; diff --git a/parser/testdata/03055_analyzer_subquery_group_array/ast.json b/parser/testdata/03055_analyzer_subquery_group_array/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03055_analyzer_subquery_group_array/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03055_analyzer_subquery_group_array/metadata.json b/parser/testdata/03055_analyzer_subquery_group_array/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03055_analyzer_subquery_group_array/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03055_analyzer_subquery_group_array/query.sql b/parser/testdata/03055_analyzer_subquery_group_array/query.sql new file mode 100644 index 000000000..29ba1dd7c --- /dev/null +++ b/parser/testdata/03055_analyzer_subquery_group_array/query.sql @@ -0,0 +1,6 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/23344 +SET enable_analyzer=1; +SELECT logTrace(repeat('Hello', 100)), ignore(*) +FROM ( + SELECT ignore((SELECT groupArrayState(([number], [number])) FROM numbers(19000))) +) diff --git a/parser/testdata/03057_analyzer_subquery_alias_join/ast.json b/parser/testdata/03057_analyzer_subquery_alias_join/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03057_analyzer_subquery_alias_join/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03057_analyzer_subquery_alias_join/metadata.json b/parser/testdata/03057_analyzer_subquery_alias_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03057_analyzer_subquery_alias_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03057_analyzer_subquery_alias_join/query.sql b/parser/testdata/03057_analyzer_subquery_alias_join/query.sql new file mode 100644 index 000000000..92f603ed5 --- /dev/null +++ b/parser/testdata/03057_analyzer_subquery_alias_join/query.sql @@ -0,0 +1,13 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/10276 +SET enable_analyzer=1; +SELECT + sum(x.n) as n, + sum(z.n) as n2 +FROM +( + SELECT 1000 AS n,1 as id +) AS x +join (select 10000 as n,1 as id) as y +on x.id = y.id +left join (select 100000 as n,1 as id) as z +on x.id = z.id; diff --git a/parser/testdata/03058_analyzer_ambiguous_columns/ast.json b/parser/testdata/03058_analyzer_ambiguous_columns/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03058_analyzer_ambiguous_columns/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03058_analyzer_ambiguous_columns/metadata.json b/parser/testdata/03058_analyzer_ambiguous_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03058_analyzer_ambiguous_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03058_analyzer_ambiguous_columns/query.sql b/parser/testdata/03058_analyzer_ambiguous_columns/query.sql new file mode 100644 index 000000000..ef3c0e5f6 --- /dev/null +++ b/parser/testdata/03058_analyzer_ambiguous_columns/query.sql @@ -0,0 +1,25 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/4567 +SET enable_analyzer=1; +DROP TABLE IF EXISTS fact; +DROP TABLE IF EXISTS animals; +DROP TABLE IF EXISTS colors; + +create table fact(id Int64, animal_key Int64, color_key Int64) Engine = MergeTree order by tuple(); +insert into fact values (1,1,1),(2,2,2); + +create table animals(animal_key UInt64, animal_name String) Engine = MergeTree order by tuple(); +insert into animals values (0, 'unknown'); + +create table colors(color_key UInt64, color_name String) Engine = MergeTree order by tuple(); +insert into colors values (0, 'unknown'); + + +select id, animal_name, a.animal_key, color_name, color_key +from fact a + left join (select toInt64(animal_key) animal_key, animal_name from animals) b on (a.animal_key = b.animal_key) + left join (select toInt64(color_key) color_key, color_name from colors) c on (a.color_key = c.color_key); -- { serverError AMBIGUOUS_IDENTIFIER } + +select id, animal_name, animal_key, color_name, color_key +from fact a + left join (select toInt64(animal_key) animal_key, animal_name from animals) b on (a.animal_key = b.animal_key) + left join (select toInt64(color_key) color_key, color_name from colors) c on (a.color_key = c.color_key); -- { serverError AMBIGUOUS_IDENTIFIER } diff --git a/parser/testdata/03059_analyzer_join_engine_missing_column/ast.json b/parser/testdata/03059_analyzer_join_engine_missing_column/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03059_analyzer_join_engine_missing_column/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03059_analyzer_join_engine_missing_column/metadata.json b/parser/testdata/03059_analyzer_join_engine_missing_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03059_analyzer_join_engine_missing_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03059_analyzer_join_engine_missing_column/query.sql b/parser/testdata/03059_analyzer_join_engine_missing_column/query.sql new file mode 100644 index 000000000..164a42e5b --- /dev/null +++ b/parser/testdata/03059_analyzer_join_engine_missing_column/query.sql @@ -0,0 +1,9 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/17710 +SET enable_analyzer=1; +CREATE TABLE id_val(id UInt32, val UInt32) ENGINE = Memory; +CREATE TABLE id_val_join0(id UInt32, val UInt8) ENGINE = Join(ANY, LEFT, id) SETTINGS join_use_nulls = 0; +CREATE TABLE id_val_join1(id UInt32, val UInt8) ENGINE = Join(ANY, LEFT, id) SETTINGS join_use_nulls = 1; + +SELECT * FROM id_val ANY LEFT JOIN id_val_join0 USING (id) SETTINGS join_use_nulls = 0; + +SELECT * FROM id_val ANY LEFT JOIN id_val_join1 USING (id) SETTINGS join_use_nulls = 1; diff --git a/parser/testdata/03060_analyzer_regular_view_alias/ast.json b/parser/testdata/03060_analyzer_regular_view_alias/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03060_analyzer_regular_view_alias/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03060_analyzer_regular_view_alias/metadata.json b/parser/testdata/03060_analyzer_regular_view_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03060_analyzer_regular_view_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03060_analyzer_regular_view_alias/query.sql b/parser/testdata/03060_analyzer_regular_view_alias/query.sql new file mode 100644 index 000000000..0556683b9 --- /dev/null +++ b/parser/testdata/03060_analyzer_regular_view_alias/query.sql @@ -0,0 +1,16 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/11068 +SET enable_analyzer=1; +create table vt(datetime_value DateTime, value Float64) Engine=Memory; + +create view computed_datum_hours as +SELECT + toStartOfHour(b.datetime_value) AS datetime_desc, + sum(b.value) AS value +FROM vt AS b +GROUP BY toStartOfHour(b.datetime_value); + +SELECT + toStartOfHour(b.datetime_value) AS datetime_desc, + sum(b.value) AS value +FROM vt AS b +GROUP BY toStartOfHour(b.datetime_value); diff --git a/parser/testdata/03061_analyzer_alias_as_right_key_in_join/ast.json b/parser/testdata/03061_analyzer_alias_as_right_key_in_join/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03061_analyzer_alias_as_right_key_in_join/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03061_analyzer_alias_as_right_key_in_join/metadata.json b/parser/testdata/03061_analyzer_alias_as_right_key_in_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03061_analyzer_alias_as_right_key_in_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03061_analyzer_alias_as_right_key_in_join/query.sql b/parser/testdata/03061_analyzer_alias_as_right_key_in_join/query.sql new file mode 100644 index 000000000..c9d1e8e95 --- /dev/null +++ b/parser/testdata/03061_analyzer_alias_as_right_key_in_join/query.sql @@ -0,0 +1,9 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/24395 +SET enable_analyzer=1; +CREATE TABLE xxxx_yyy (key UInt32, key_b ALIAS key) ENGINE=MergeTree() ORDER BY key; +INSERT INTO xxxx_yyy SELECT number FROM numbers(10); + +SELECT * +FROM xxxx_yyy AS a +INNER JOIN xxxx_yyy AS b ON a.key = b.key_b +ORDER BY ALL; diff --git a/parser/testdata/03062_analyzer_join_engine_missing_column/ast.json b/parser/testdata/03062_analyzer_join_engine_missing_column/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03062_analyzer_join_engine_missing_column/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03062_analyzer_join_engine_missing_column/metadata.json b/parser/testdata/03062_analyzer_join_engine_missing_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03062_analyzer_join_engine_missing_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03062_analyzer_join_engine_missing_column/query.sql b/parser/testdata/03062_analyzer_join_engine_missing_column/query.sql new file mode 100644 index 000000000..487d74b33 --- /dev/null +++ b/parser/testdata/03062_analyzer_join_engine_missing_column/query.sql @@ -0,0 +1,13 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/23416 +SET enable_analyzer=1; +create table test (TOPIC String, PARTITION UInt64, OFFSET UInt64, ID UInt64) ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_03062', 'r2') ORDER BY (TOPIC, PARTITION, OFFSET); + +create table test_join (TOPIC String, PARTITION UInt64, OFFSET UInt64) ENGINE = Join(ANY, LEFT, `TOPIC`, `PARTITION`) SETTINGS join_any_take_last_row = 1; + +insert into test values('abc',0,0,0); + +insert into test_join values('abc',0,1); + +select *, joinGet('test_join', 'OFFSET', TOPIC, PARTITION) from test; + +select * from test any left join test_join using (TOPIC, PARTITION); diff --git a/parser/testdata/03063_analyzer_multi_join_wrong_table_specifier/ast.json b/parser/testdata/03063_analyzer_multi_join_wrong_table_specifier/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03063_analyzer_multi_join_wrong_table_specifier/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03063_analyzer_multi_join_wrong_table_specifier/metadata.json b/parser/testdata/03063_analyzer_multi_join_wrong_table_specifier/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03063_analyzer_multi_join_wrong_table_specifier/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03063_analyzer_multi_join_wrong_table_specifier/query.sql b/parser/testdata/03063_analyzer_multi_join_wrong_table_specifier/query.sql new file mode 100644 index 000000000..5655d4a01 --- /dev/null +++ b/parser/testdata/03063_analyzer_multi_join_wrong_table_specifier/query.sql @@ -0,0 +1,16 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/23162 +SET enable_analyzer=1; +CREATE TABLE t1 ( k Int64, x Int64) ENGINE = Memory; + +CREATE TABLE t2( x Int64 ) ENGINE = Memory; + +create table s (k Int64, d DateTime) Engine=Memory; + +SELECT * FROM t1 +INNER JOIN s ON t1.k = s.k +INNER JOIN t2 ON t2.x = t1.x +WHERE (t1.d >= now()); -- { serverError UNKNOWN_IDENTIFIER } + +SELECT * FROM t1 +INNER JOIN s ON t1.k = s.k +WHERE (t1.d >= now()); -- { serverError UNKNOWN_IDENTIFIER } diff --git a/parser/testdata/03064_analyzer_named_subqueries/ast.json b/parser/testdata/03064_analyzer_named_subqueries/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03064_analyzer_named_subqueries/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03064_analyzer_named_subqueries/metadata.json b/parser/testdata/03064_analyzer_named_subqueries/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03064_analyzer_named_subqueries/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03064_analyzer_named_subqueries/query.sql b/parser/testdata/03064_analyzer_named_subqueries/query.sql new file mode 100644 index 000000000..d56964110 --- /dev/null +++ b/parser/testdata/03064_analyzer_named_subqueries/query.sql @@ -0,0 +1,6 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/25655 +SET enable_analyzer=1; +SELECT + sum(t.b) / 1 a, + sum(t.a) +FROM ( SELECT 1 a, 2 b ) t; diff --git a/parser/testdata/03065_analyzer_cross_join_and_array_join/ast.json b/parser/testdata/03065_analyzer_cross_join_and_array_join/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03065_analyzer_cross_join_and_array_join/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03065_analyzer_cross_join_and_array_join/metadata.json b/parser/testdata/03065_analyzer_cross_join_and_array_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03065_analyzer_cross_join_and_array_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03065_analyzer_cross_join_and_array_join/query.sql b/parser/testdata/03065_analyzer_cross_join_and_array_join/query.sql new file mode 100644 index 000000000..5034e2eed --- /dev/null +++ b/parser/testdata/03065_analyzer_cross_join_and_array_join/query.sql @@ -0,0 +1,3 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/11757 +SET enable_analyzer=1; +select * from (select [1, 2] a) aa cross join (select [3, 4] b) bb array join aa.a, bb.b; diff --git a/parser/testdata/03066_analyzer_global_with_statement/ast.json b/parser/testdata/03066_analyzer_global_with_statement/ast.json new file mode 100644 index 000000000..5449e0f41 --- /dev/null +++ b/parser/testdata/03066_analyzer_global_with_statement/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001326382, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03066_analyzer_global_with_statement/metadata.json b/parser/testdata/03066_analyzer_global_with_statement/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03066_analyzer_global_with_statement/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03066_analyzer_global_with_statement/query.sql b/parser/testdata/03066_analyzer_global_with_statement/query.sql new file mode 100644 index 000000000..2b879ed73 --- /dev/null +++ b/parser/testdata/03066_analyzer_global_with_statement/query.sql @@ -0,0 +1,8 @@ +SET enable_analyzer=1; +WITH 0 AS test +SELECT * +FROM +( + SELECT 1 AS test +) +SETTINGS enable_global_with_statement = 1 diff --git a/parser/testdata/03067_analyzer_complex_alias_join/ast.json b/parser/testdata/03067_analyzer_complex_alias_join/ast.json new file mode 100644 index 000000000..b226148be --- /dev/null +++ b/parser/testdata/03067_analyzer_complex_alias_join/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001078592, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03067_analyzer_complex_alias_join/metadata.json b/parser/testdata/03067_analyzer_complex_alias_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03067_analyzer_complex_alias_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03067_analyzer_complex_alias_join/query.sql b/parser/testdata/03067_analyzer_complex_alias_join/query.sql new file mode 100644 index 000000000..58845b937 --- /dev/null +++ b/parser/testdata/03067_analyzer_complex_alias_join/query.sql @@ -0,0 +1,10 @@ +SET enable_analyzer=1; +with d as (select 'key'::Varchar(255) c, 'x'::Varchar(255) s) +SELECT r1, c as r2 +FROM ( + SELECT t as s, c as r1 + FROM ( SELECT 'y'::Varchar(255) as t, 'x'::Varchar(255) as s) t1 + LEFT JOIN d USING (s) + ) t2 +LEFT JOIN d using (s) +SETTINGS join_use_nulls=1; diff --git a/parser/testdata/03068_analyzer_distributed_join/ast.json b/parser/testdata/03068_analyzer_distributed_join/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03068_analyzer_distributed_join/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03068_analyzer_distributed_join/metadata.json b/parser/testdata/03068_analyzer_distributed_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03068_analyzer_distributed_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03068_analyzer_distributed_join/query.sql b/parser/testdata/03068_analyzer_distributed_join/query.sql new file mode 100644 index 000000000..4ea0a985d --- /dev/null +++ b/parser/testdata/03068_analyzer_distributed_join/query.sql @@ -0,0 +1,58 @@ +-- Tags: no-replicated-database, shard +-- Closes: https://github.com/ClickHouse/ClickHouse/issues/6571 + +SET enable_analyzer=1; +CREATE TABLE LINEITEM_shard ON CLUSTER test_shard_localhost +( + L_ORDERKEY UInt64, + L_COMMITDATE UInt32, + L_RECEIPTDATE UInt32 +) +ENGINE = MergeTree() +ORDER BY L_ORDERKEY; + +CREATE TABLE LINEITEM AS LINEITEM_shard +ENGINE = Distributed('test_shard_localhost', currentDatabase(), LINEITEM_shard, rand()); + +CREATE TABLE ORDERS_shard ON CLUSTER test_shard_localhost +( + O_ORDERKEY UInt64, + O_ORDERPRIORITY UInt32 +) +ENGINE = MergeTree() +ORDER BY O_ORDERKEY; + +CREATE TABLE ORDERS AS ORDERS_shard +ENGINE = Distributed('test_shard_localhost', currentDatabase(), ORDERS_shard, rand()); + +SET joined_subquery_requires_alias=0; + +select + O_ORDERPRIORITY, + count(*) as order_count +from ORDERS JOIN ( + select L_ORDERKEY + from + LINEITEM_shard + group by L_ORDERKEY + having any(L_COMMITDATE < L_RECEIPTDATE) +) on O_ORDERKEY=L_ORDERKEY +group by O_ORDERPRIORITY +order by O_ORDERPRIORITY +limit 1; + +SET joined_subquery_requires_alias=1; + +select + O_ORDERPRIORITY, + count(*) as order_count +from ORDERS JOIN ( + select L_ORDERKEY + from + LINEITEM_shard + group by L_ORDERKEY + having any(L_COMMITDATE < L_RECEIPTDATE) +) AS x on O_ORDERKEY=L_ORDERKEY +group by O_ORDERPRIORITY +order by O_ORDERPRIORITY +limit 1; diff --git a/parser/testdata/03069_analyzer_with_alias_in_array_join/ast.json b/parser/testdata/03069_analyzer_with_alias_in_array_join/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03069_analyzer_with_alias_in_array_join/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03069_analyzer_with_alias_in_array_join/metadata.json b/parser/testdata/03069_analyzer_with_alias_in_array_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03069_analyzer_with_alias_in_array_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03069_analyzer_with_alias_in_array_join/query.sql b/parser/testdata/03069_analyzer_with_alias_in_array_join/query.sql new file mode 100644 index 000000000..5ec04cbc0 --- /dev/null +++ b/parser/testdata/03069_analyzer_with_alias_in_array_join/query.sql @@ -0,0 +1,6 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/4432 +SET enable_analyzer=1; +WITH [1, 2] AS zz +SELECT x +FROM system.one +ARRAY JOIN zz AS x diff --git a/parser/testdata/03070_analyzer_CTE_scalar_as_numbers/ast.json b/parser/testdata/03070_analyzer_CTE_scalar_as_numbers/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03070_analyzer_CTE_scalar_as_numbers/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03070_analyzer_CTE_scalar_as_numbers/metadata.json b/parser/testdata/03070_analyzer_CTE_scalar_as_numbers/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03070_analyzer_CTE_scalar_as_numbers/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03070_analyzer_CTE_scalar_as_numbers/query.sql b/parser/testdata/03070_analyzer_CTE_scalar_as_numbers/query.sql new file mode 100644 index 000000000..a94ae8114 --- /dev/null +++ b/parser/testdata/03070_analyzer_CTE_scalar_as_numbers/query.sql @@ -0,0 +1,6 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/8259 +SET enable_analyzer=1; +with + (select 25) as something +select *, something +from numbers(toUInt64(assumeNotNull(something))); diff --git a/parser/testdata/03071_analyzer_array_join_forbid_non_existing_columns/ast.json b/parser/testdata/03071_analyzer_array_join_forbid_non_existing_columns/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03071_analyzer_array_join_forbid_non_existing_columns/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03071_analyzer_array_join_forbid_non_existing_columns/metadata.json b/parser/testdata/03071_analyzer_array_join_forbid_non_existing_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03071_analyzer_array_join_forbid_non_existing_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03071_analyzer_array_join_forbid_non_existing_columns/query.sql b/parser/testdata/03071_analyzer_array_join_forbid_non_existing_columns/query.sql new file mode 100644 index 000000000..211fa2a31 --- /dev/null +++ b/parser/testdata/03071_analyzer_array_join_forbid_non_existing_columns/query.sql @@ -0,0 +1,12 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/9233 +SET enable_analyzer=1; +SELECT * +FROM +( + SELECT + [1, 2, 3] AS x, + [4, 5, 6] AS y +) +ARRAY JOIN + x, + Y; -- { serverError UNKNOWN_IDENTIFIER } diff --git a/parser/testdata/03071_fix_short_circuit_logic/ast.json b/parser/testdata/03071_fix_short_circuit_logic/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03071_fix_short_circuit_logic/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03071_fix_short_circuit_logic/metadata.json b/parser/testdata/03071_fix_short_circuit_logic/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03071_fix_short_circuit_logic/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03071_fix_short_circuit_logic/query.sql b/parser/testdata/03071_fix_short_circuit_logic/query.sql new file mode 100644 index 000000000..dc5fb5085 --- /dev/null +++ b/parser/testdata/03071_fix_short_circuit_logic/query.sql @@ -0,0 +1,62 @@ + + +CREATE FUNCTION IF NOT EXISTS unhexPrefixed AS value -> unhex(substring(value, 3)); +CREATE FUNCTION IF NOT EXISTS hex2bytes AS address -> CAST(unhexPrefixed(address), 'FixedString(20)'); +CREATE FUNCTION IF NOT EXISTS bytes2hex AS address -> concat('0x', lower(hex(address))); + +CREATE TABLE test +( + `transfer_id` String, + `address` FixedString(20), + `value` UInt256, + `block_timestamp` DateTime('UTC'), + `token_address` FixedString(20) +) +ENGINE = MergeTree +PARTITION BY toYYYYMM(block_timestamp) +PRIMARY KEY (address, block_timestamp) +ORDER BY (address, block_timestamp); + +INSERT INTO test SELECT 'token-transfer-0x758f1bbabb160683e1c80ed52dcd24a32b599d40edf1cec91b5f1199c0e392a2-56', hex2bytes('0xd387a6e4e84a6c86bd90c158c6028a58cc8ac459'), 3000000000000000000000, '2024-01-02 16:54:59', 'abc'; + +CREATE TABLE token_data +( + token_address_hex String, + chain String, + is_blacklisted Bool +) +ENGINE = TinyLog; + +INSERT INTO token_data SELECT bytes2hex('abc'), 'zksync', false; + +CREATE DICTIONARY token_data_map +( + token_address_hex String, + chain String, + is_blacklisted Bool +) +PRIMARY KEY token_address_hex, chain +SOURCE(Clickhouse(table token_data)) +LIFETIME(MIN 200 MAX 300) +LAYOUT(COMPLEX_KEY_HASHED_ARRAY()); + +SELECT block_timestamp +FROM +( + SELECT + block_timestamp, + bytes2hex(token_address) AS token_address_hex + FROM + ( + SELECT + transfer_id, + address, + value, + block_timestamp, + token_address, + 'zksync' AS chain + FROM test + ) + WHERE (address = hex2bytes('0xd387a6e4e84a6c86bd90c158c6028a58cc8ac459')) AND (transfer_id NOT LIKE 'gas%') AND (value > 0) AND (dictGetOrDefault(token_data_map, 'is_blacklisted', (token_address_hex, 'zksync'), true)) +) +SETTINGS max_threads = 1, short_circuit_function_evaluation = 'enable', enable_analyzer = 0; diff --git a/parser/testdata/03072_analyzer_missing_columns_from_subquery/ast.json b/parser/testdata/03072_analyzer_missing_columns_from_subquery/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03072_analyzer_missing_columns_from_subquery/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03072_analyzer_missing_columns_from_subquery/metadata.json b/parser/testdata/03072_analyzer_missing_columns_from_subquery/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03072_analyzer_missing_columns_from_subquery/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03072_analyzer_missing_columns_from_subquery/query.sql b/parser/testdata/03072_analyzer_missing_columns_from_subquery/query.sql new file mode 100644 index 000000000..ec3b067cb --- /dev/null +++ b/parser/testdata/03072_analyzer_missing_columns_from_subquery/query.sql @@ -0,0 +1,3 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/14699 +SET enable_analyzer=1; +select * from (select number from numbers(1)) where not ignore(*); diff --git a/parser/testdata/03073_analyzer_alias_as_column_name/ast.json b/parser/testdata/03073_analyzer_alias_as_column_name/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03073_analyzer_alias_as_column_name/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03073_analyzer_alias_as_column_name/metadata.json b/parser/testdata/03073_analyzer_alias_as_column_name/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03073_analyzer_alias_as_column_name/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03073_analyzer_alias_as_column_name/query.sql b/parser/testdata/03073_analyzer_alias_as_column_name/query.sql new file mode 100644 index 000000000..bba51e28b --- /dev/null +++ b/parser/testdata/03073_analyzer_alias_as_column_name/query.sql @@ -0,0 +1,9 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/27068 +SET enable_analyzer=1; +CREATE TABLE test ( id String, create_time DateTime ) ENGINE = MergeTree ORDER BY id; + +insert into test values(1,'1970-02-01 00:00:00'); +insert into test values(2,'1970-02-01 00:00:00'); +insert into test values(3,'1970-03-01 00:00:00'); + +select id,'1997-02-01' as create_time from test where test.create_time='1970-02-01 00:00:00' ORDER BY id diff --git a/parser/testdata/03074_analyzer_alias_column_in_view/ast.json b/parser/testdata/03074_analyzer_alias_column_in_view/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03074_analyzer_alias_column_in_view/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03074_analyzer_alias_column_in_view/metadata.json b/parser/testdata/03074_analyzer_alias_column_in_view/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03074_analyzer_alias_column_in_view/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03074_analyzer_alias_column_in_view/query.sql b/parser/testdata/03074_analyzer_alias_column_in_view/query.sql new file mode 100644 index 000000000..314b6c0e8 --- /dev/null +++ b/parser/testdata/03074_analyzer_alias_column_in_view/query.sql @@ -0,0 +1,7 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/28687 +SET enable_analyzer=1; +create view alias (dummy int, n alias dummy) as select * from system.one; + +select n from alias; + +select * from alias where n=0; diff --git a/parser/testdata/03075_analyzer_subquery_alias/ast.json b/parser/testdata/03075_analyzer_subquery_alias/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03075_analyzer_subquery_alias/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03075_analyzer_subquery_alias/metadata.json b/parser/testdata/03075_analyzer_subquery_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03075_analyzer_subquery_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03075_analyzer_subquery_alias/query.sql b/parser/testdata/03075_analyzer_subquery_alias/query.sql new file mode 100644 index 000000000..4f097350d --- /dev/null +++ b/parser/testdata/03075_analyzer_subquery_alias/query.sql @@ -0,0 +1,11 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/28777 +SET enable_analyzer=1; +SELECT + sum(q0.a2) AS a1, + sum(q0.a1) AS a9 +FROM +( + SELECT + 1 AS a1, + 2 AS a2 +) AS q0; diff --git a/parser/testdata/03076_analyzer_multiple_joins_alias/ast.json b/parser/testdata/03076_analyzer_multiple_joins_alias/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03076_analyzer_multiple_joins_alias/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03076_analyzer_multiple_joins_alias/metadata.json b/parser/testdata/03076_analyzer_multiple_joins_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03076_analyzer_multiple_joins_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03076_analyzer_multiple_joins_alias/query.sql b/parser/testdata/03076_analyzer_multiple_joins_alias/query.sql new file mode 100644 index 000000000..894e3bc56 --- /dev/null +++ b/parser/testdata/03076_analyzer_multiple_joins_alias/query.sql @@ -0,0 +1,51 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/29734 +SET enable_analyzer=1; +SELECT * +FROM +( + SELECT 1 AS x +) AS a +INNER JOIN +( + SELECT + 1 AS x, + 2 AS y +) AS b ON (a.x = b.x) AND (a.y = b.y); -- { serverError UNKNOWN_IDENTIFIER } + + + +SELECT * +FROM +( + SELECT 1 AS x +) AS a +INNER JOIN +( + SELECT + 1 AS x, + 2 AS y +) AS b ON (a.x = b.x) AND (a.y = b.y) +INNER JOIN +( + SELECT 3 AS x +) AS c ON a.x = c.x; -- { serverError UNKNOWN_IDENTIFIER } + + +SELECT * +FROM +( + SELECT number AS x + FROM numbers(10) +) AS a +INNER JOIN +( + SELECT + number AS x, + number AS y + FROM numbers(10) +) AS b ON (a.x = b.x) AND (a.y = b.y) +INNER JOIN +( + SELECT number AS x + FROM numbers(10) +) AS c ON a.x = c.x; -- { serverError UNKNOWN_IDENTIFIER } diff --git a/parser/testdata/03077_analyzer_multi_scalar_subquery_aliases/ast.json b/parser/testdata/03077_analyzer_multi_scalar_subquery_aliases/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03077_analyzer_multi_scalar_subquery_aliases/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03077_analyzer_multi_scalar_subquery_aliases/metadata.json b/parser/testdata/03077_analyzer_multi_scalar_subquery_aliases/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03077_analyzer_multi_scalar_subquery_aliases/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03077_analyzer_multi_scalar_subquery_aliases/query.sql b/parser/testdata/03077_analyzer_multi_scalar_subquery_aliases/query.sql new file mode 100644 index 000000000..d4335d35e --- /dev/null +++ b/parser/testdata/03077_analyzer_multi_scalar_subquery_aliases/query.sql @@ -0,0 +1,23 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/33825 +SET enable_analyzer=1; +CREATE TABLE t1 (i Int64, j Int64) ENGINE = Memory; +INSERT INTO t1 SELECT number, number FROM system.numbers LIMIT 10; +SELECT + (SELECT max(i) FROM t1) as i, + (SELECT max(i) FROM t1) as j, + (SELECT max(i) FROM t1) as k, + (SELECT max(i) FROM t1) as l +FROM t1; + +SELECT 1; + +WITH ( + SELECT max(i) + FROM t1 + ) AS value +SELECT + value AS i, + value AS j, + value AS k, + value AS l +FROM t1; diff --git a/parser/testdata/03078_analyzer_multi_scalar_subquery_aliases/ast.json b/parser/testdata/03078_analyzer_multi_scalar_subquery_aliases/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03078_analyzer_multi_scalar_subquery_aliases/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03078_analyzer_multi_scalar_subquery_aliases/metadata.json b/parser/testdata/03078_analyzer_multi_scalar_subquery_aliases/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03078_analyzer_multi_scalar_subquery_aliases/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03078_analyzer_multi_scalar_subquery_aliases/query.sql b/parser/testdata/03078_analyzer_multi_scalar_subquery_aliases/query.sql new file mode 100644 index 000000000..b9b850619 --- /dev/null +++ b/parser/testdata/03078_analyzer_multi_scalar_subquery_aliases/query.sql @@ -0,0 +1,17 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/33825 +SET enable_analyzer=1; +CREATE TABLE t2 (first_column Int64, second_column Int64) ENGINE = Memory; +INSERT INTO t2 SELECT number, number FROM system.numbers LIMIT 10; + + +SELECT ( + SELECT 111111111111 + ) AS first_column +FROM t2; + +SELECT 1; + +SELECT ( + SELECT 2222222222 + ) AS second_column +FROM t2; diff --git a/parser/testdata/03079_analyzer_numeric_literals_as_column_names/ast.json b/parser/testdata/03079_analyzer_numeric_literals_as_column_names/ast.json new file mode 100644 index 000000000..d9e2eb451 --- /dev/null +++ b/parser/testdata/03079_analyzer_numeric_literals_as_column_names/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001431067, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03079_analyzer_numeric_literals_as_column_names/metadata.json b/parser/testdata/03079_analyzer_numeric_literals_as_column_names/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03079_analyzer_numeric_literals_as_column_names/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03079_analyzer_numeric_literals_as_column_names/query.sql b/parser/testdata/03079_analyzer_numeric_literals_as_column_names/query.sql new file mode 100644 index 000000000..80e681c07 --- /dev/null +++ b/parser/testdata/03079_analyzer_numeric_literals_as_column_names/query.sql @@ -0,0 +1,13 @@ +SET enable_analyzer=1; +CREATE TABLE testdata (`1` String) ENGINE=MergeTree ORDER BY tuple(); +INSERT INTO testdata VALUES ('testdata'); + +SELECT * +FROM ( + SELECT if(isValidUTF8(`1`), NULL, 'error!') AS error_message, + if(error_message IS NULL, 1, 0) AS valid + FROM testdata +) +WHERE valid; + +select * from (select 'str' as `1`) where 1; diff --git a/parser/testdata/03080_analyzer_prefer_column_name_to_alias__virtual_columns/ast.json b/parser/testdata/03080_analyzer_prefer_column_name_to_alias__virtual_columns/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03080_analyzer_prefer_column_name_to_alias__virtual_columns/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03080_analyzer_prefer_column_name_to_alias__virtual_columns/metadata.json b/parser/testdata/03080_analyzer_prefer_column_name_to_alias__virtual_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03080_analyzer_prefer_column_name_to_alias__virtual_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03080_analyzer_prefer_column_name_to_alias__virtual_columns/query.sql b/parser/testdata/03080_analyzer_prefer_column_name_to_alias__virtual_columns/query.sql new file mode 100644 index 000000000..2138828cd --- /dev/null +++ b/parser/testdata/03080_analyzer_prefer_column_name_to_alias__virtual_columns/query.sql @@ -0,0 +1,28 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/35652 +SET enable_analyzer=1; +CREATE TABLE test ( + id UInt64 +) +ENGINE = MergeTree() +SAMPLE BY intHash32(id) +ORDER BY intHash32(id); + +SELECT + any(id), + any(id) AS id +FROM test +SETTINGS prefer_column_name_to_alias = 1; + +SELECT + any(_sample_factor), + any(_sample_factor) AS _sample_factor +FROM test +SETTINGS prefer_column_name_to_alias = 1; + +SELECT + any(_partition_id), + any(_sample_factor), + any(_partition_id) AS _partition_id, + any(_sample_factor) AS _sample_factor +FROM test +SETTINGS prefer_column_name_to_alias = 1; diff --git a/parser/testdata/03080_incorrect_join_with_merge/ast.json b/parser/testdata/03080_incorrect_join_with_merge/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03080_incorrect_join_with_merge/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03080_incorrect_join_with_merge/metadata.json b/parser/testdata/03080_incorrect_join_with_merge/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03080_incorrect_join_with_merge/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03080_incorrect_join_with_merge/query.sql b/parser/testdata/03080_incorrect_join_with_merge/query.sql new file mode 100644 index 000000000..a743c5bdf --- /dev/null +++ b/parser/testdata/03080_incorrect_join_with_merge/query.sql @@ -0,0 +1,73 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/29838 +SET enable_analyzer=1; +SET distributed_foreground_insert=1; + +DROP TABLE IF EXISTS first_table_lr SYNC; +CREATE TABLE first_table_lr +( + id String, + id2 String +) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_03080/alter', 'r1') +ORDER BY id; + + +DROP TABLE IF EXISTS first_table; +CREATE TABLE first_table +( + id String, + id2 String +) +ENGINE = Distributed('test_shard_localhost', currentDatabase(), 'first_table_lr'); + + +DROP TABLE IF EXISTS second_table_lr; +CREATE TABLE second_table_lr +( + id String, + id2 String +) ENGINE = MergeTree() +ORDER BY id; + +DROP TABLE IF EXISTS second_table; +CREATE TABLE second_table +( + id String, + id2 String +) +ENGINE = Distributed('test_shard_localhost', currentDatabase(), 'second_table_lr'); + +INSERT INTO first_table VALUES ('1', '2'), ('3', '4'); +INSERT INTO second_table VALUES ('1', '2'), ('3', '4'); + +DROP TABLE IF EXISTS two_tables; +CREATE TABLE two_tables +( + id String, + id2 String +) +ENGINE = Merge(currentDatabase(), '^(first_table)$'); + +SELECT + count() +FROM first_table as s +GLOBAL ANY JOIN second_table as f USING (id) +WHERE + f.id2 GLOBAL IN ( + SELECT + id2 + FROM second_table + GROUP BY id2 + ); + +SELECT + count() +FROM two_tables as s +GLOBAL ANY JOIN second_table as f USING (id) +WHERE + f.id2 GLOBAL IN ( + SELECT + id2 + FROM second_table + GROUP BY id2 + ); diff --git a/parser/testdata/03081_analyzer_agg_func_CTE/ast.json b/parser/testdata/03081_analyzer_agg_func_CTE/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03081_analyzer_agg_func_CTE/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03081_analyzer_agg_func_CTE/metadata.json b/parser/testdata/03081_analyzer_agg_func_CTE/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03081_analyzer_agg_func_CTE/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03081_analyzer_agg_func_CTE/query.sql b/parser/testdata/03081_analyzer_agg_func_CTE/query.sql new file mode 100644 index 000000000..3cb02512a --- /dev/null +++ b/parser/testdata/03081_analyzer_agg_func_CTE/query.sql @@ -0,0 +1,19 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/36189 +SET enable_analyzer=1; +CREATE TABLE test +( + `dt` Date, + `text` String +) +ENGINE = MergeTree +ORDER BY dt; + +insert into test values ('2020-01-01', 'text1'), ('2019-01-01', 'text2'), ('1900-01-01', 'text3'); + +WITH max(dt) AS maxDt +SELECT maxDt +FROM test; + +WITH max(number) AS maxDt +SELECT maxDt +FROM numbers(10); diff --git a/parser/testdata/03082_analyzer_left_join_correct_column/ast.json b/parser/testdata/03082_analyzer_left_join_correct_column/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03082_analyzer_left_join_correct_column/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03082_analyzer_left_join_correct_column/metadata.json b/parser/testdata/03082_analyzer_left_join_correct_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03082_analyzer_left_join_correct_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03082_analyzer_left_join_correct_column/query.sql b/parser/testdata/03082_analyzer_left_join_correct_column/query.sql new file mode 100644 index 000000000..3b83f9783 --- /dev/null +++ b/parser/testdata/03082_analyzer_left_join_correct_column/query.sql @@ -0,0 +1,31 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/39634 +SET enable_analyzer=1; +CREATE TABLE test1 +( + `pk` String, + `x.y` Decimal(18, 4) +) +ENGINE = MergeTree() +ORDER BY (pk); + +CREATE TABLE test2 +( + `pk` String, + `x.y` Decimal(18, 4) +) +ENGINE = MergeTree() +ORDER BY (pk); + +INSERT INTO test1 SELECT 'pk1', 1; + +INSERT INTO test2 SELECT 'pk1', 2; + +SELECT t1.pk, t2.x.y +FROM test1 t1 +LEFT JOIN test2 t2 + on t1.pk = t2.pk; + +SELECT t1.pk, t2.`x.y` +FROM test1 t1 +LEFT JOIN test2 t2 + on t1.pk = t2.pk; diff --git a/parser/testdata/03084_analyzer_join_column_alias/ast.json b/parser/testdata/03084_analyzer_join_column_alias/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03084_analyzer_join_column_alias/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03084_analyzer_join_column_alias/metadata.json b/parser/testdata/03084_analyzer_join_column_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03084_analyzer_join_column_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03084_analyzer_join_column_alias/query.sql b/parser/testdata/03084_analyzer_join_column_alias/query.sql new file mode 100644 index 000000000..8a7258f58 --- /dev/null +++ b/parser/testdata/03084_analyzer_join_column_alias/query.sql @@ -0,0 +1,24 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/47432 +SET enable_analyzer=1; +create table t1 +engine = MergeTree() +order by tuple() +as +select 1 as user_id, 2 as level; + + +create table t2 +engine = MergeTree() +order by tuple() +as +select 1 as user_id, 'website' as event_source, '2023-01-01 00:00:00'::DateTime as timestamp; + + +alter table t2 +add column date Date alias toDate(timestamp); + +SELECT + any(t2.date) as any_val +FROM t1 AS t1 +LEFT JOIN t2 as t2 + ON (t1.user_id = t2.user_id); diff --git a/parser/testdata/03085_analyzer_alias_column_group_by/ast.json b/parser/testdata/03085_analyzer_alias_column_group_by/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03085_analyzer_alias_column_group_by/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03085_analyzer_alias_column_group_by/metadata.json b/parser/testdata/03085_analyzer_alias_column_group_by/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03085_analyzer_alias_column_group_by/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03085_analyzer_alias_column_group_by/query.sql b/parser/testdata/03085_analyzer_alias_column_group_by/query.sql new file mode 100644 index 000000000..c360e8619 --- /dev/null +++ b/parser/testdata/03085_analyzer_alias_column_group_by/query.sql @@ -0,0 +1,5 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/54910 +SET enable_analyzer=1; +SELECT toTypeName(stat_standard_id) AS stat_standard_id_1, count(1) AS value +FROM ( SELECT 'string value' AS stat_standard_id ) +GROUP BY stat_standard_id_1 LIMIT 1 diff --git a/parser/testdata/03086_analyzer_window_func_part_of_group_by/ast.json b/parser/testdata/03086_analyzer_window_func_part_of_group_by/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03086_analyzer_window_func_part_of_group_by/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03086_analyzer_window_func_part_of_group_by/metadata.json b/parser/testdata/03086_analyzer_window_func_part_of_group_by/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03086_analyzer_window_func_part_of_group_by/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03086_analyzer_window_func_part_of_group_by/query.sql b/parser/testdata/03086_analyzer_window_func_part_of_group_by/query.sql new file mode 100644 index 000000000..7e44b37f8 --- /dev/null +++ b/parser/testdata/03086_analyzer_window_func_part_of_group_by/query.sql @@ -0,0 +1,13 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/57321 +SET enable_analyzer=1; +SELECT + ver, + max(ver) OVER () AS ver_max +FROM +( + SELECT 1 AS ver + UNION ALL + SELECT 2 AS ver +) +GROUP BY ver +ORDER BY ver; diff --git a/parser/testdata/03087_analyzer_subquery_with_alias/ast.json b/parser/testdata/03087_analyzer_subquery_with_alias/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03087_analyzer_subquery_with_alias/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03087_analyzer_subquery_with_alias/metadata.json b/parser/testdata/03087_analyzer_subquery_with_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03087_analyzer_subquery_with_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03087_analyzer_subquery_with_alias/query.sql b/parser/testdata/03087_analyzer_subquery_with_alias/query.sql new file mode 100644 index 000000000..a00ca4960 --- /dev/null +++ b/parser/testdata/03087_analyzer_subquery_with_alias/query.sql @@ -0,0 +1,16 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/59154 +SET enable_analyzer=1; +SELECT * +FROM +( + WITH + assumeNotNull(( + SELECT 0.9 + )) AS TUNING, + ELEMENT_QUERY AS + ( + SELECT quantiles(TUNING)(1) + ) + SELECT * + FROM ELEMENT_QUERY +); diff --git a/parser/testdata/03088_analyzer_ambiguous_column_multi_call/ast.json b/parser/testdata/03088_analyzer_ambiguous_column_multi_call/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03088_analyzer_ambiguous_column_multi_call/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03088_analyzer_ambiguous_column_multi_call/metadata.json b/parser/testdata/03088_analyzer_ambiguous_column_multi_call/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03088_analyzer_ambiguous_column_multi_call/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03088_analyzer_ambiguous_column_multi_call/query.sql b/parser/testdata/03088_analyzer_ambiguous_column_multi_call/query.sql new file mode 100644 index 000000000..3670404d1 --- /dev/null +++ b/parser/testdata/03088_analyzer_ambiguous_column_multi_call/query.sql @@ -0,0 +1,13 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/61014 +SET enable_analyzer=1; + +DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}; +create database {CLICKHOUSE_DATABASE:Identifier}; + +create table {CLICKHOUSE_DATABASE:Identifier}.a (i int) engine = Log(); + +select + {CLICKHOUSE_DATABASE:Identifier}.a.i +from + {CLICKHOUSE_DATABASE:Identifier}.a, + {CLICKHOUSE_DATABASE:Identifier}.a as x; diff --git a/parser/testdata/03089_analyzer_alias_replacement/ast.json b/parser/testdata/03089_analyzer_alias_replacement/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03089_analyzer_alias_replacement/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03089_analyzer_alias_replacement/metadata.json b/parser/testdata/03089_analyzer_alias_replacement/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03089_analyzer_alias_replacement/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03089_analyzer_alias_replacement/query.sql b/parser/testdata/03089_analyzer_alias_replacement/query.sql new file mode 100644 index 000000000..5526e1aaf --- /dev/null +++ b/parser/testdata/03089_analyzer_alias_replacement/query.sql @@ -0,0 +1,9 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/61950 +SET enable_analyzer=1; + +with dummy + 1 as dummy select dummy from system.one; + +WITH dummy + 3 AS dummy +SELECT dummy + 1 AS y +FROM system.one +SETTINGS enable_global_with_statement = 1; diff --git a/parser/testdata/03090_analyzer_multiple_using_statements/ast.json b/parser/testdata/03090_analyzer_multiple_using_statements/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03090_analyzer_multiple_using_statements/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03090_analyzer_multiple_using_statements/metadata.json b/parser/testdata/03090_analyzer_multiple_using_statements/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03090_analyzer_multiple_using_statements/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03090_analyzer_multiple_using_statements/query.sql b/parser/testdata/03090_analyzer_multiple_using_statements/query.sql new file mode 100644 index 000000000..08ea103d3 --- /dev/null +++ b/parser/testdata/03090_analyzer_multiple_using_statements/query.sql @@ -0,0 +1,17 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/55647 +SET enable_analyzer=1; + +SELECT +* +FROM ( + SELECT * + FROM system.one +) a +JOIN ( + SELECT * + FROM system.one +) b USING dummy +JOIN ( + SELECT * + FROM system.one +) c USING dummy diff --git a/parser/testdata/03091_analyzer_same_table_name_in_different_databases/ast.json b/parser/testdata/03091_analyzer_same_table_name_in_different_databases/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03091_analyzer_same_table_name_in_different_databases/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03091_analyzer_same_table_name_in_different_databases/metadata.json b/parser/testdata/03091_analyzer_same_table_name_in_different_databases/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03091_analyzer_same_table_name_in_different_databases/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03091_analyzer_same_table_name_in_different_databases/query.sql b/parser/testdata/03091_analyzer_same_table_name_in_different_databases/query.sql new file mode 100644 index 000000000..11984aec4 --- /dev/null +++ b/parser/testdata/03091_analyzer_same_table_name_in_different_databases/query.sql @@ -0,0 +1,31 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/61947 +SET enable_analyzer=1; + +DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}; +DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE_1:Identifier}; + +CREATE DATABASE {CLICKHOUSE_DATABASE:Identifier}; +CREATE DATABASE {CLICKHOUSE_DATABASE_1:Identifier}; +CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.`1-1` (field Int8) ENGINE = Memory; +CREATE TABLE {CLICKHOUSE_DATABASE_1:Identifier}.`1-1` (field Int8) ENGINE = Memory; +CREATE TABLE {CLICKHOUSE_DATABASE_1:Identifier}.`2-1` (field Int8) ENGINE = Memory; + +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.`1-1` VALUES (1); + +SELECT * +FROM {CLICKHOUSE_DATABASE:Identifier}.`1-1` +LEFT JOIN {CLICKHOUSE_DATABASE_1:Identifier}.`1-1` ON {CLICKHOUSE_DATABASE:Identifier}.`1-1`.field = {CLICKHOUSE_DATABASE_1:Identifier}.`1-1`.field; + +SELECT ''; + +SELECT * FROM +( +SELECT 'using asterisk', {CLICKHOUSE_DATABASE:Identifier}.`1-1`.*, {CLICKHOUSE_DATABASE_1:Identifier}.`1-1`.* +FROM {CLICKHOUSE_DATABASE:Identifier}.`1-1` +LEFT JOIN {CLICKHOUSE_DATABASE_1:Identifier}.`1-1` USING field +UNION ALL +SELECT 'using field name', {CLICKHOUSE_DATABASE:Identifier}.`1-1`.field, {CLICKHOUSE_DATABASE_1:Identifier}.`1-1`.field +FROM {CLICKHOUSE_DATABASE:Identifier}.`1-1` +LEFT JOIN {CLICKHOUSE_DATABASE_1:Identifier}.`1-1` USING field +) +ORDER BY ALL; diff --git a/parser/testdata/03092_analyzer_same_table_name_in_different_databases/ast.json b/parser/testdata/03092_analyzer_same_table_name_in_different_databases/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03092_analyzer_same_table_name_in_different_databases/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03092_analyzer_same_table_name_in_different_databases/metadata.json b/parser/testdata/03092_analyzer_same_table_name_in_different_databases/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03092_analyzer_same_table_name_in_different_databases/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03092_analyzer_same_table_name_in_different_databases/query.sql b/parser/testdata/03092_analyzer_same_table_name_in_different_databases/query.sql new file mode 100644 index 000000000..83b1a9027 --- /dev/null +++ b/parser/testdata/03092_analyzer_same_table_name_in_different_databases/query.sql @@ -0,0 +1,18 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/61947 +SET enable_analyzer=1; + +DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}; +DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE_1:Identifier}; + +CREATE DATABASE {CLICKHOUSE_DATABASE:Identifier}; +CREATE DATABASE {CLICKHOUSE_DATABASE_1:Identifier}; +CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.`1-1` (field Int8) ENGINE = Memory; +CREATE TABLE {CLICKHOUSE_DATABASE_1:Identifier}.`2-1` (field Int8) ENGINE = Memory; +CREATE TABLE {CLICKHOUSE_DATABASE_1:Identifier}.`3-1` (field Int8) ENGINE = Memory; + +INSERT INTO {CLICKHOUSE_DATABASE:Identifier}.`1-1` VALUES (1); + +SELECT {CLICKHOUSE_DATABASE:Identifier}.`1-1`.* +FROM {CLICKHOUSE_DATABASE:Identifier}.`1-1` +LEFT JOIN {CLICKHOUSE_DATABASE_1:Identifier}.`2-1` ON {CLICKHOUSE_DATABASE:Identifier}.`1-1`.field = {CLICKHOUSE_DATABASE_1:Identifier}.`2-1`.field +LEFT JOIN {CLICKHOUSE_DATABASE_1:Identifier}.`3-1` ON {CLICKHOUSE_DATABASE_1:Identifier}.`2-1`.field = {CLICKHOUSE_DATABASE_1:Identifier}.`3-1`.field; diff --git a/parser/testdata/03093_analyzer_column_alias/ast.json b/parser/testdata/03093_analyzer_column_alias/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03093_analyzer_column_alias/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03093_analyzer_column_alias/metadata.json b/parser/testdata/03093_analyzer_column_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03093_analyzer_column_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03093_analyzer_column_alias/query.sql b/parser/testdata/03093_analyzer_column_alias/query.sql new file mode 100644 index 000000000..edf89108b --- /dev/null +++ b/parser/testdata/03093_analyzer_column_alias/query.sql @@ -0,0 +1,21 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/26674 +SET enable_analyzer = true; + +SELECT + Carrier, + sum(toFloat64(C3)) AS C1, + sum(toFloat64(C1)) AS C2, + sum(toFloat64(C2)) AS C3 +FROM + ( + SELECT + 1 AS Carrier, + count(CAST(1, 'Nullable(Int32)')) AS C1, + max(number) AS C2, + min(number) AS C3 + FROM numbers(10) + GROUP BY Carrier + ) AS ITBL +GROUP BY Carrier +LIMIT 1000001 +SETTINGS prefer_column_name_to_alias=1; diff --git a/parser/testdata/03093_analyzer_miel_test/ast.json b/parser/testdata/03093_analyzer_miel_test/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03093_analyzer_miel_test/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03093_analyzer_miel_test/metadata.json b/parser/testdata/03093_analyzer_miel_test/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03093_analyzer_miel_test/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03093_analyzer_miel_test/query.sql b/parser/testdata/03093_analyzer_miel_test/query.sql new file mode 100644 index 000000000..4915864bb --- /dev/null +++ b/parser/testdata/03093_analyzer_miel_test/query.sql @@ -0,0 +1,16 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/58985 + +DROP TABLE IF EXISTS test_03093; + +CREATE TABLE test_03093 (app String, c UInt64, k Map(String, String)) ENGINE=MergeTree ORDER BY app; + +INSERT INTO test_03093 VALUES ('x1', 123, {'k1': ''}); +INSERT INTO test_03093 VALUES ('x1', 123, {'k1': '', 'k11': ''}); +INSERT INTO test_03093 VALUES ('x1', 12, {'k1': ''}); + +SET enable_analyzer=1; + +select app, arrayZip(untuple(sumMap(k.keys, replicate(1, k.keys)))) from test_03093 PREWHERE c > 1 group by app; +select app, arrayZip(untuple(sumMap(k.keys, replicate(1, k.keys)))) from test_03093 WHERE c > 1 group by app; + +DROP TABLE IF EXISTS test_03093; diff --git a/parser/testdata/03093_bug37909_query_does_not_finish/ast.json b/parser/testdata/03093_bug37909_query_does_not_finish/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03093_bug37909_query_does_not_finish/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03093_bug37909_query_does_not_finish/metadata.json b/parser/testdata/03093_bug37909_query_does_not_finish/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03093_bug37909_query_does_not_finish/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03093_bug37909_query_does_not_finish/query.sql b/parser/testdata/03093_bug37909_query_does_not_finish/query.sql new file mode 100644 index 000000000..90f0a8a3b --- /dev/null +++ b/parser/testdata/03093_bug37909_query_does_not_finish/query.sql @@ -0,0 +1,78 @@ +-- Bug 37909 + +SELECT + v_date AS vDate, + round(sum(v_share)) AS v_sum +FROM +( + WITH + ( + SELECT rand() % 10000 + ) AS dummy_1, + ( + SELECT rand() % 10000 + ) AS dummy_2, + ( + SELECT rand() % 10000 + ) AS dummy_3, + _v AS + ( + SELECT + xxHash64(rand()) % 100000 AS d_id, + toDate(parseDateTimeBestEffort('2022-01-01') + (rand() % 2600000)) AS v_date + FROM numbers(1000000) + ORDER BY d_id ASC + ), + _i AS + ( + SELECT xxHash64(rand()) % 40000 AS d_id + FROM numbers(1000000) + ), + not_i AS + ( + SELECT + NULL AS v_date, + d_id, + 0 AS v_share + FROM _i + LIMIT 100 + ) + SELECT * + FROM + ( + SELECT + d_id, + v_date, + v_share + FROM not_i + UNION ALL + SELECT + d_id, + v_date, + 1 AS v_share + FROM + ( + SELECT + d_id, + arrayJoin(groupArray(v_date)) AS v_date + FROM + ( + SELECT + v_date, + d_id + FROM _v + UNION ALL + SELECT + NULL AS v_date, + d_id + FROM _i + ) + GROUP BY d_id + ) + ) + WHERE (v_date >= '2022-05-08') AND (v_date <= '2022-06-07') +) +/* WHERE (v_date >= '2022-05-08') AND (v_date <= '2022-06-07') placing condition has same effect */ +GROUP BY vDate +ORDER BY vDate ASC +SETTINGS enable_analyzer = 1; -- the query times out if enable_analyzer = 0 diff --git a/parser/testdata/03093_bug_gcd_codec/ast.json b/parser/testdata/03093_bug_gcd_codec/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03093_bug_gcd_codec/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03093_bug_gcd_codec/metadata.json b/parser/testdata/03093_bug_gcd_codec/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03093_bug_gcd_codec/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03093_bug_gcd_codec/query.sql b/parser/testdata/03093_bug_gcd_codec/query.sql new file mode 100644 index 000000000..4a8370c6b --- /dev/null +++ b/parser/testdata/03093_bug_gcd_codec/query.sql @@ -0,0 +1,17 @@ +-- Tags: long + +CREATE TABLE test_gcd(test_col UInt32 CODEC(GCD, LZ4)) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS index_granularity = 8192, index_granularity_bytes = 1024; + +INSERT INTO test_gcd SELECT floor(randUniform(1, 3)) FROM numbers(150000); +OPTIMIZE TABLE test_gcd FINAL; + +CREATE TABLE test_gcd2(test_col UInt32 CODEC(GCD, LZ4)) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS index_granularity = 8192, index_granularity_bytes = 1024, min_bytes_for_wide_part = 0, max_compress_block_size = 1024, min_compress_block_size = 1024; + +INSERT INTO test_gcd2 SELECT floor(randUniform(1, 3)) FROM numbers(150000); +OPTIMIZE TABLE test_gcd2 FINAL; diff --git a/parser/testdata/03093_reading_bug_with_parallel_replicas/ast.json b/parser/testdata/03093_reading_bug_with_parallel_replicas/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03093_reading_bug_with_parallel_replicas/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03093_reading_bug_with_parallel_replicas/metadata.json b/parser/testdata/03093_reading_bug_with_parallel_replicas/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03093_reading_bug_with_parallel_replicas/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03093_reading_bug_with_parallel_replicas/query.sql b/parser/testdata/03093_reading_bug_with_parallel_replicas/query.sql new file mode 100644 index 000000000..00dd17c49 --- /dev/null +++ b/parser/testdata/03093_reading_bug_with_parallel_replicas/query.sql @@ -0,0 +1,19 @@ + +set max_threads = 16; +set use_hedged_requests = 0; +set max_parallel_replicas = 3; +set cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost'; +set enable_parallel_replicas = 1; +set parallel_replicas_for_non_replicated_merge_tree = 1; +set allow_aggregate_partitions_independently = 1; + +drop table if exists t2; + +create table t2(a Int16) engine=MergeTree order by tuple() partition by a % 8 SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +system stop merges t2; + +insert into t2 select number from numbers_mt(1e6); +insert into t2 select number from numbers_mt(1e6); + +select a from t2 group by a format Null; diff --git a/parser/testdata/03093_special_column_errors/ast.json b/parser/testdata/03093_special_column_errors/ast.json new file mode 100644 index 000000000..6afd45bb0 --- /dev/null +++ b/parser/testdata/03093_special_column_errors/ast.json @@ -0,0 +1,70 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery replacing_wrong (children 3)" + }, + { + "explain": " Identifier replacing_wrong" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " ColumnDeclaration key (children 1)" + }, + { + "explain": " DataType Int64" + }, + { + "explain": " ColumnDeclaration ver (children 1)" + }, + { + "explain": " DataType Int64" + }, + { + "explain": " ColumnDeclaration is_deleted (children 1)" + }, + { + "explain": " DataType UInt16" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function ReplacingMergeTree (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier ver" + }, + { + "explain": " Identifier is_deleted" + }, + { + "explain": " Identifier key" + } + ], + + "rows": 16, + + "statistics": + { + "elapsed": 0.001433794, + "rows_read": 16, + "bytes_read": 592 + } +} diff --git a/parser/testdata/03093_special_column_errors/metadata.json b/parser/testdata/03093_special_column_errors/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03093_special_column_errors/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03093_special_column_errors/query.sql b/parser/testdata/03093_special_column_errors/query.sql new file mode 100644 index 000000000..5daf72d22 --- /dev/null +++ b/parser/testdata/03093_special_column_errors/query.sql @@ -0,0 +1,28 @@ +CREATE TABLE replacing_wrong (key Int64, ver Int64, is_deleted UInt16) ENGINE = ReplacingMergeTree(ver, is_deleted) ORDER BY key; -- { serverError BAD_TYPE_OF_FIELD } +CREATE TABLE replacing_wrong (key Int64, ver String, is_deleted UInt8) ENGINE = ReplacingMergeTree(ver, is_deleted) ORDER BY key; -- { serverError BAD_TYPE_OF_FIELD } +CREATE TABLE replacing_wrong (key Int64, ver Int64, is_deleted UInt8) ENGINE = ReplacingMergeTree(is_deleted, is_deleted) ORDER BY key; -- { serverError BAD_ARGUMENTS } + +CREATE TABLE replacing (key Int64, ver Int64, is_deleted UInt8) ENGINE = ReplacingMergeTree(ver, is_deleted) ORDER BY key; +ALTER TABLE replacing MODIFY COLUMN ver String; -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } +ALTER TABLE replacing MODIFY COLUMN ver Int128; +ALTER TABLE replacing MODIFY COLUMN is_deleted String; -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } +ALTER TABLE replacing MODIFY COLUMN is_deleted UInt16; -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } +ALTER TABLE replacing MODIFY COLUMN is_deleted Int8; -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } +ALTER TABLE replacing DROP COLUMN ver; -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } +ALTER TABLE replacing DROP COLUMN is_deleted; -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } +ALTER TABLE replacing RENAME COLUMN ver TO ver2; -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } +ALTER TABLE replacing RENAME COLUMN is_deleted TO is_deleted2; -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } + +CREATE TABLE collapsing_wrong (key Int64, sign Int16) ENGINE = CollapsingMergeTree(sign) ORDER BY key; -- { serverError BAD_TYPE_OF_FIELD } +CREATE TABLE collapsing_wrong (key Int64, sign UInt8) ENGINE = CollapsingMergeTree(sign) ORDER BY key; -- { serverError BAD_TYPE_OF_FIELD } +CREATE TABLE collapsing_wrong (key Int64, sign UInt8) ENGINE = CollapsingMergeTree(not_existing) ORDER BY key; -- { serverError NO_SUCH_COLUMN_IN_TABLE } + +CREATE TABLE collapsing (key Int64, sign Int8) ENGINE = CollapsingMergeTree(sign) ORDER BY key; +ALTER TABLE collapsing MODIFY COLUMN sign String; -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } +ALTER TABLE collapsing DROP COLUMN sign; -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } +ALTER TABLE collapsing RENAME COLUMN sign TO sign2; -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } +ALTER TABLE collapsing MODIFY COLUMN sign MODIFY SETTING max_compress_block_size = 123456; -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } + +CREATE TABLE versioned_collapsing_wrong (key Int64, version UInt8, sign Int8) ENGINE = VersionedCollapsingMergeTree(sign, sign) ORDER BY key; -- { serverError BAD_ARGUMENTS } + +CREATE TABLE versioned_collapsing (key Int64, version UInt8, sign Int8) ENGINE = VersionedCollapsingMergeTree(sign, version) ORDER BY key; diff --git a/parser/testdata/03093_virtual_column_override_group_by/ast.json b/parser/testdata/03093_virtual_column_override_group_by/ast.json new file mode 100644 index 000000000..f81f5d3f2 --- /dev/null +++ b/parser/testdata/03093_virtual_column_override_group_by/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery override_test__fuzz_45 (children 4)" + }, + { + "explain": " Identifier override_test__fuzz_45" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration _part (children 1)" + }, + { + "explain": " DataType Float32" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001280038, + "rows_read": 15, + "bytes_read": 561 + } +} diff --git a/parser/testdata/03093_virtual_column_override_group_by/metadata.json b/parser/testdata/03093_virtual_column_override_group_by/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03093_virtual_column_override_group_by/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03093_virtual_column_override_group_by/query.sql b/parser/testdata/03093_virtual_column_override_group_by/query.sql new file mode 100644 index 000000000..168d38a15 --- /dev/null +++ b/parser/testdata/03093_virtual_column_override_group_by/query.sql @@ -0,0 +1,2 @@ +CREATE TABLE override_test__fuzz_45 (`_part` Float32) ENGINE = MergeTree ORDER BY tuple() AS SELECT 1; +SELECT _part FROM override_test__fuzz_45 GROUP BY materialize(6), 1; diff --git a/parser/testdata/03093_with_fill_support_constant_expression/ast.json b/parser/testdata/03093_with_fill_support_constant_expression/ast.json new file mode 100644 index 000000000..4f74f0f37 --- /dev/null +++ b/parser/testdata/03093_with_fill_support_constant_expression/ast.json @@ -0,0 +1,88 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_0 (alias l)" + }, + { + "explain": " Literal UInt64_10 (alias r)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function multiply (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_5" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 3)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Identifier l" + }, + { + "explain": " Identifier r" + } + ], + + "rows": 22, + + "statistics": + { + "elapsed": 0.001405965, + "rows_read": 22, + "bytes_read": 815 + } +} diff --git a/parser/testdata/03093_with_fill_support_constant_expression/metadata.json b/parser/testdata/03093_with_fill_support_constant_expression/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03093_with_fill_support_constant_expression/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03093_with_fill_support_constant_expression/query.sql b/parser/testdata/03093_with_fill_support_constant_expression/query.sql new file mode 100644 index 000000000..2e1cf612d --- /dev/null +++ b/parser/testdata/03093_with_fill_support_constant_expression/query.sql @@ -0,0 +1,2 @@ +WITH 0 AS l, 10 AS r SELECT number * 2 FROM numbers(5) ORDER BY 1 WITH FILL FROM l TO r; +WITH 0 AS l, 10 AS r SELECT number * 2 FROM numbers(5) ORDER BY 1 WITH FILL FROM l TO l + r; diff --git a/parser/testdata/03094_analyzer_fiddle_multiif/ast.json b/parser/testdata/03094_analyzer_fiddle_multiif/ast.json new file mode 100644 index 000000000..86fa31feb --- /dev/null +++ b/parser/testdata/03094_analyzer_fiddle_multiif/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery users_03094 (children 1)" + }, + { + "explain": " Identifier users_03094" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001288126, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/03094_analyzer_fiddle_multiif/metadata.json b/parser/testdata/03094_analyzer_fiddle_multiif/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03094_analyzer_fiddle_multiif/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03094_analyzer_fiddle_multiif/query.sql b/parser/testdata/03094_analyzer_fiddle_multiif/query.sql new file mode 100644 index 000000000..842674991 --- /dev/null +++ b/parser/testdata/03094_analyzer_fiddle_multiif/query.sql @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS users_03094; + +CREATE TABLE users_03094 (name String, age Int16) ENGINE=Memory; +INSERT INTO users_03094 VALUES ('John', 33); +INSERT INTO users_03094 VALUES ('Ksenia', 48); +INSERT INTO users_03094 VALUES ('Alice', 50); + +SET enable_analyzer=1; + +SELECT + multiIf((age > 30) or (true), '1', '2') AS a, + max(name) +FROM users_03094 +GROUP BY a; + +DROP TABLE IF EXISTS users_03094; diff --git a/parser/testdata/03094_grouparraysorted_memory/ast.json b/parser/testdata/03094_grouparraysorted_memory/ast.json new file mode 100644 index 000000000..357f124f8 --- /dev/null +++ b/parser/testdata/03094_grouparraysorted_memory/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery 03094_grouparrysorted_dest (children 1)" + }, + { + "explain": " Identifier 03094_grouparrysorted_dest" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001207638, + "rows_read": 2, + "bytes_read": 105 + } +} diff --git a/parser/testdata/03094_grouparraysorted_memory/metadata.json b/parser/testdata/03094_grouparraysorted_memory/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03094_grouparraysorted_memory/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03094_grouparraysorted_memory/query.sql b/parser/testdata/03094_grouparraysorted_memory/query.sql new file mode 100644 index 000000000..c062d97e4 --- /dev/null +++ b/parser/testdata/03094_grouparraysorted_memory/query.sql @@ -0,0 +1,36 @@ +CREATE TABLE 03094_grouparrysorted_dest +( + ServiceName LowCardinality(String) CODEC(ZSTD(1)), + -- aggregates + SlowSpans AggregateFunction(groupArraySorted(100), + Tuple(NegativeDurationNs Int64, Timestamp DateTime64(9), TraceId String, SpanId String) + ) CODEC(ZSTD(1)) +) +ENGINE = AggregatingMergeTree() +ORDER BY (ServiceName); + +CREATE TABLE 03094_grouparrysorted_src +( + ServiceName String, + Duration Int64, + Timestamp DateTime64(9), + TraceId String, + SpanId String +) +ENGINE = MergeTree() +ORDER BY (); + +CREATE MATERIALIZED VIEW 03094_grouparrysorted_mv TO 03094_grouparrysorted_dest +AS SELECT + ServiceName, + groupArraySortedState(100)( + CAST( + tuple(-Duration, Timestamp, TraceId, SpanId), + 'Tuple(NegativeDurationNs Int64, Timestamp DateTime64(9), TraceId String, SpanId String)' + )) as SlowSpans +FROM 03094_grouparrysorted_src +GROUP BY + ServiceName; + + +INSERT INTO 03094_grouparrysorted_src SELECT * FROM generateRandom() LIMIT 500000; diff --git a/parser/testdata/03094_named_tuple_bug24607/ast.json b/parser/testdata/03094_named_tuple_bug24607/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03094_named_tuple_bug24607/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03094_named_tuple_bug24607/metadata.json b/parser/testdata/03094_named_tuple_bug24607/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03094_named_tuple_bug24607/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03094_named_tuple_bug24607/query.sql b/parser/testdata/03094_named_tuple_bug24607/query.sql new file mode 100644 index 000000000..698c339e5 --- /dev/null +++ b/parser/testdata/03094_named_tuple_bug24607/query.sql @@ -0,0 +1,4 @@ +SELECT + JSONExtract('{"a":1, "b":"test"}', 'Tuple(a UInt8, b String)') AS x, + x.a +SETTINGS enable_analyzer = 1; diff --git a/parser/testdata/03094_one_thousand_joins/ast.json b/parser/testdata/03094_one_thousand_joins/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03094_one_thousand_joins/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03094_one_thousand_joins/metadata.json b/parser/testdata/03094_one_thousand_joins/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03094_one_thousand_joins/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03094_one_thousand_joins/query.sql b/parser/testdata/03094_one_thousand_joins/query.sql new file mode 100644 index 000000000..2c2d6e7dd --- /dev/null +++ b/parser/testdata/03094_one_thousand_joins/query.sql @@ -0,0 +1,10 @@ +-- Tags: no-fasttest, no-tsan, no-asan, no-msan, long +-- (no-tsan because it has a small maximum stack size and the test would fail with TOO_DEEP_RECURSION) + +SET join_algorithm = 'default'; -- for 'full_sorting_merge' the query is 10x slower +SET enable_analyzer = 1; -- old analyzer returns TOO_DEEP_SUBQUERIES +SET query_plan_join_swap_table = 'auto'; -- 'true' is slower +SET max_threads = 8; + +-- Bug 33446, marked as 'long' because it still runs around 10 sec +SELECT * FROM (SELECT 1 AS x) t1 JOIN (SELECT 1 AS x) t2 ON t1.x = t2.x JOIN (SELECT 1 AS x) t3 ON t1.x = t3.x JOIN (SELECT 1 AS x) t4 ON t1.x = t4.x JOIN (SELECT 1 AS x) t5 ON t1.x = t5.x JOIN (SELECT 1 AS x) t6 ON t1.x = t6.x JOIN (SELECT 1 AS x) t7 ON t1.x = t7.x JOIN (SELECT 1 AS x) t8 ON t1.x = t8.x JOIN (SELECT 1 AS x) t9 ON t1.x = t9.x JOIN (SELECT 1 AS x) t10 ON t1.x = t10.x JOIN (SELECT 1 AS x) t11 ON t1.x = t11.x JOIN (SELECT 1 AS x) t12 ON t1.x = t12.x JOIN (SELECT 1 AS x) t13 ON t1.x = t13.x JOIN (SELECT 1 AS x) t14 ON t1.x = t14.x JOIN (SELECT 1 AS x) t15 ON t1.x = t15.x JOIN (SELECT 1 AS x) t16 ON t1.x = t16.x JOIN (SELECT 1 AS x) t17 ON t1.x = t17.x JOIN (SELECT 1 AS x) t18 ON t1.x = t18.x JOIN (SELECT 1 AS x) t19 ON t1.x = t19.x JOIN (SELECT 1 AS x) t20 ON t1.x = t20.x JOIN (SELECT 1 AS x) t21 ON t1.x = t21.x JOIN (SELECT 1 AS x) t22 ON t1.x = t22.x JOIN (SELECT 1 AS x) t23 ON t1.x = t23.x JOIN (SELECT 1 AS x) t24 ON t1.x = t24.x JOIN (SELECT 1 AS x) t25 ON t1.x = t25.x JOIN (SELECT 1 AS x) t26 ON t1.x = t26.x JOIN (SELECT 1 AS x) t27 ON t1.x = t27.x JOIN (SELECT 1 AS x) t28 ON t1.x = t28.x JOIN (SELECT 1 AS x) t29 ON t1.x = t29.x JOIN (SELECT 1 AS x) t30 ON t1.x = t30.x JOIN (SELECT 1 AS x) t31 ON t1.x = t31.x JOIN (SELECT 1 AS x) t32 ON t1.x = t32.x JOIN (SELECT 1 AS x) t33 ON t1.x = t33.x JOIN (SELECT 1 AS x) t34 ON t1.x = t34.x JOIN (SELECT 1 AS x) t35 ON t1.x = t35.x JOIN (SELECT 1 AS x) t36 ON t1.x = t36.x JOIN (SELECT 1 AS x) t37 ON t1.x = t37.x JOIN (SELECT 1 AS x) t38 ON t1.x = t38.x JOIN (SELECT 1 AS x) t39 ON t1.x = t39.x JOIN (SELECT 1 AS x) t40 ON t1.x = t40.x JOIN (SELECT 1 AS x) t41 ON t1.x = t41.x JOIN (SELECT 1 AS x) t42 ON t1.x = t42.x JOIN (SELECT 1 AS x) t43 ON t1.x = t43.x JOIN (SELECT 1 AS x) t44 ON t1.x = t44.x JOIN (SELECT 1 AS x) t45 ON t1.x = t45.x JOIN (SELECT 1 AS x) t46 ON t1.x = t46.x JOIN (SELECT 1 AS x) t47 ON t1.x = t47.x JOIN (SELECT 1 AS x) t48 ON t1.x = t48.x JOIN (SELECT 1 AS x) t49 ON t1.x = t49.x JOIN (SELECT 1 AS x) t50 ON t1.x = t50.x JOIN (SELECT 1 AS x) t51 ON t1.x = t51.x JOIN (SELECT 1 AS x) t52 ON t1.x = t52.x JOIN (SELECT 1 AS x) t53 ON t1.x = t53.x JOIN (SELECT 1 AS x) t54 ON t1.x = t54.x JOIN (SELECT 1 AS x) t55 ON t1.x = t55.x JOIN (SELECT 1 AS x) t56 ON t1.x = t56.x JOIN (SELECT 1 AS x) t57 ON t1.x = t57.x JOIN (SELECT 1 AS x) t58 ON t1.x = t58.x JOIN (SELECT 1 AS x) t59 ON t1.x = t59.x JOIN (SELECT 1 AS x) t60 ON t1.x = t60.x JOIN (SELECT 1 AS x) t61 ON t1.x = t61.x JOIN (SELECT 1 AS x) t62 ON t1.x = t62.x JOIN (SELECT 1 AS x) t63 ON t1.x = t63.x JOIN (SELECT 1 AS x) t64 ON t1.x = t64.x JOIN (SELECT 1 AS x) t65 ON t1.x = t65.x JOIN (SELECT 1 AS x) t66 ON t1.x = t66.x JOIN (SELECT 1 AS x) t67 ON t1.x = t67.x JOIN (SELECT 1 AS x) t68 ON t1.x = t68.x JOIN (SELECT 1 AS x) t69 ON t1.x = t69.x JOIN (SELECT 1 AS x) t70 ON t1.x = t70.x JOIN (SELECT 1 AS x) t71 ON t1.x = t71.x JOIN (SELECT 1 AS x) t72 ON t1.x = t72.x JOIN (SELECT 1 AS x) t73 ON t1.x = t73.x JOIN (SELECT 1 AS x) t74 ON t1.x = t74.x JOIN (SELECT 1 AS x) t75 ON t1.x = t75.x JOIN (SELECT 1 AS x) t76 ON t1.x = t76.x JOIN (SELECT 1 AS x) t77 ON t1.x = t77.x JOIN (SELECT 1 AS x) t78 ON t1.x = t78.x JOIN (SELECT 1 AS x) t79 ON t1.x = t79.x JOIN (SELECT 1 AS x) t80 ON t1.x = t80.x JOIN (SELECT 1 AS x) t81 ON t1.x = t81.x JOIN (SELECT 1 AS x) t82 ON t1.x = t82.x JOIN (SELECT 1 AS x) t83 ON t1.x = t83.x JOIN (SELECT 1 AS x) t84 ON t1.x = t84.x JOIN (SELECT 1 AS x) t85 ON t1.x = t85.x JOIN (SELECT 1 AS x) t86 ON t1.x = t86.x JOIN (SELECT 1 AS x) t87 ON t1.x = t87.x JOIN (SELECT 1 AS x) t88 ON t1.x = t88.x JOIN (SELECT 1 AS x) t89 ON t1.x = t89.x JOIN (SELECT 1 AS x) t90 ON t1.x = t90.x JOIN (SELECT 1 AS x) t91 ON t1.x = t91.x JOIN (SELECT 1 AS x) t92 ON t1.x = t92.x JOIN (SELECT 1 AS x) t93 ON t1.x = t93.x JOIN (SELECT 1 AS x) t94 ON t1.x = t94.x JOIN (SELECT 1 AS x) t95 ON t1.x = t95.x JOIN (SELECT 1 AS x) t96 ON t1.x = t96.x JOIN (SELECT 1 AS x) t97 ON t1.x = t97.x JOIN (SELECT 1 AS x) t98 ON t1.x = t98.x JOIN (SELECT 1 AS x) t99 ON t1.x = t99.x JOIN (SELECT 1 AS x) t100 ON t1.x = t100.x JOIN (SELECT 1 AS x) t101 ON t1.x = t101.x JOIN (SELECT 1 AS x) t102 ON t1.x = t102.x JOIN (SELECT 1 AS x) t103 ON t1.x = t103.x JOIN (SELECT 1 AS x) t104 ON t1.x = t104.x JOIN (SELECT 1 AS x) t105 ON t1.x = t105.x JOIN (SELECT 1 AS x) t106 ON t1.x = t106.x JOIN (SELECT 1 AS x) t107 ON t1.x = t107.x JOIN (SELECT 1 AS x) t108 ON t1.x = t108.x JOIN (SELECT 1 AS x) t109 ON t1.x = t109.x JOIN (SELECT 1 AS x) t110 ON t1.x = t110.x JOIN (SELECT 1 AS x) t111 ON t1.x = t111.x JOIN (SELECT 1 AS x) t112 ON t1.x = t112.x JOIN (SELECT 1 AS x) t113 ON t1.x = t113.x JOIN (SELECT 1 AS x) t114 ON t1.x = t114.x JOIN (SELECT 1 AS x) t115 ON t1.x = t115.x JOIN (SELECT 1 AS x) t116 ON t1.x = t116.x JOIN (SELECT 1 AS x) t117 ON t1.x = t117.x JOIN (SELECT 1 AS x) t118 ON t1.x = t118.x JOIN (SELECT 1 AS x) t119 ON t1.x = t119.x JOIN (SELECT 1 AS x) t120 ON t1.x = t120.x JOIN (SELECT 1 AS x) t121 ON t1.x = t121.x JOIN (SELECT 1 AS x) t122 ON t1.x = t122.x JOIN (SELECT 1 AS x) t123 ON t1.x = t123.x JOIN (SELECT 1 AS x) t124 ON t1.x = t124.x JOIN (SELECT 1 AS x) t125 ON t1.x = t125.x JOIN (SELECT 1 AS x) t126 ON t1.x = t126.x JOIN (SELECT 1 AS x) t127 ON t1.x = t127.x JOIN (SELECT 1 AS x) t128 ON t1.x = t128.x JOIN (SELECT 1 AS x) t129 ON t1.x = t129.x JOIN (SELECT 1 AS x) t130 ON t1.x = t130.x JOIN (SELECT 1 AS x) t131 ON t1.x = t131.x JOIN (SELECT 1 AS x) t132 ON t1.x = t132.x JOIN (SELECT 1 AS x) t133 ON t1.x = t133.x JOIN (SELECT 1 AS x) t134 ON t1.x = t134.x JOIN (SELECT 1 AS x) t135 ON t1.x = t135.x JOIN (SELECT 1 AS x) t136 ON t1.x = t136.x JOIN (SELECT 1 AS x) t137 ON t1.x = t137.x JOIN (SELECT 1 AS x) t138 ON t1.x = t138.x JOIN (SELECT 1 AS x) t139 ON t1.x = t139.x JOIN (SELECT 1 AS x) t140 ON t1.x = t140.x JOIN (SELECT 1 AS x) t141 ON t1.x = t141.x JOIN (SELECT 1 AS x) t142 ON t1.x = t142.x JOIN (SELECT 1 AS x) t143 ON t1.x = t143.x JOIN (SELECT 1 AS x) t144 ON t1.x = t144.x JOIN (SELECT 1 AS x) t145 ON t1.x = t145.x JOIN (SELECT 1 AS x) t146 ON t1.x = t146.x JOIN (SELECT 1 AS x) t147 ON t1.x = t147.x JOIN (SELECT 1 AS x) t148 ON t1.x = t148.x JOIN (SELECT 1 AS x) t149 ON t1.x = t149.x JOIN (SELECT 1 AS x) t150 ON t1.x = t150.x JOIN (SELECT 1 AS x) t151 ON t1.x = t151.x JOIN (SELECT 1 AS x) t152 ON t1.x = t152.x JOIN (SELECT 1 AS x) t153 ON t1.x = t153.x JOIN (SELECT 1 AS x) t154 ON t1.x = t154.x JOIN (SELECT 1 AS x) t155 ON t1.x = t155.x JOIN (SELECT 1 AS x) t156 ON t1.x = t156.x JOIN (SELECT 1 AS x) t157 ON t1.x = t157.x JOIN (SELECT 1 AS x) t158 ON t1.x = t158.x JOIN (SELECT 1 AS x) t159 ON t1.x = t159.x JOIN (SELECT 1 AS x) t160 ON t1.x = t160.x JOIN (SELECT 1 AS x) t161 ON t1.x = t161.x JOIN (SELECT 1 AS x) t162 ON t1.x = t162.x JOIN (SELECT 1 AS x) t163 ON t1.x = t163.x JOIN (SELECT 1 AS x) t164 ON t1.x = t164.x JOIN (SELECT 1 AS x) t165 ON t1.x = t165.x JOIN (SELECT 1 AS x) t166 ON t1.x = t166.x JOIN (SELECT 1 AS x) t167 ON t1.x = t167.x JOIN (SELECT 1 AS x) t168 ON t1.x = t168.x JOIN (SELECT 1 AS x) t169 ON t1.x = t169.x JOIN (SELECT 1 AS x) t170 ON t1.x = t170.x JOIN (SELECT 1 AS x) t171 ON t1.x = t171.x JOIN (SELECT 1 AS x) t172 ON t1.x = t172.x JOIN (SELECT 1 AS x) t173 ON t1.x = t173.x JOIN (SELECT 1 AS x) t174 ON t1.x = t174.x JOIN (SELECT 1 AS x) t175 ON t1.x = t175.x JOIN (SELECT 1 AS x) t176 ON t1.x = t176.x JOIN (SELECT 1 AS x) t177 ON t1.x = t177.x JOIN (SELECT 1 AS x) t178 ON t1.x = t178.x JOIN (SELECT 1 AS x) t179 ON t1.x = t179.x JOIN (SELECT 1 AS x) t180 ON t1.x = t180.x JOIN (SELECT 1 AS x) t181 ON t1.x = t181.x JOIN (SELECT 1 AS x) t182 ON t1.x = t182.x JOIN (SELECT 1 AS x) t183 ON t1.x = t183.x JOIN (SELECT 1 AS x) t184 ON t1.x = t184.x JOIN (SELECT 1 AS x) t185 ON t1.x = t185.x JOIN (SELECT 1 AS x) t186 ON t1.x = t186.x JOIN (SELECT 1 AS x) t187 ON t1.x = t187.x JOIN (SELECT 1 AS x) t188 ON t1.x = t188.x JOIN (SELECT 1 AS x) t189 ON t1.x = t189.x JOIN (SELECT 1 AS x) t190 ON t1.x = t190.x JOIN (SELECT 1 AS x) t191 ON t1.x = t191.x JOIN (SELECT 1 AS x) t192 ON t1.x = t192.x JOIN (SELECT 1 AS x) t193 ON t1.x = t193.x JOIN (SELECT 1 AS x) t194 ON t1.x = t194.x JOIN (SELECT 1 AS x) t195 ON t1.x = t195.x JOIN (SELECT 1 AS x) t196 ON t1.x = t196.x JOIN (SELECT 1 AS x) t197 ON t1.x = t197.x JOIN (SELECT 1 AS x) t198 ON t1.x = t198.x JOIN (SELECT 1 AS x) t199 ON t1.x = t199.x JOIN (SELECT 1 AS x) t200 ON t1.x = t200.x JOIN (SELECT 1 AS x) t201 ON t1.x = t201.x JOIN (SELECT 1 AS x) t202 ON t1.x = t202.x JOIN (SELECT 1 AS x) t203 ON t1.x = t203.x JOIN (SELECT 1 AS x) t204 ON t1.x = t204.x JOIN (SELECT 1 AS x) t205 ON t1.x = t205.x JOIN (SELECT 1 AS x) t206 ON t1.x = t206.x JOIN (SELECT 1 AS x) t207 ON t1.x = t207.x JOIN (SELECT 1 AS x) t208 ON t1.x = t208.x JOIN (SELECT 1 AS x) t209 ON t1.x = t209.x JOIN (SELECT 1 AS x) t210 ON t1.x = t210.x JOIN (SELECT 1 AS x) t211 ON t1.x = t211.x JOIN (SELECT 1 AS x) t212 ON t1.x = t212.x JOIN (SELECT 1 AS x) t213 ON t1.x = t213.x JOIN (SELECT 1 AS x) t214 ON t1.x = t214.x JOIN (SELECT 1 AS x) t215 ON t1.x = t215.x JOIN (SELECT 1 AS x) t216 ON t1.x = t216.x JOIN (SELECT 1 AS x) t217 ON t1.x = t217.x JOIN (SELECT 1 AS x) t218 ON t1.x = t218.x JOIN (SELECT 1 AS x) t219 ON t1.x = t219.x JOIN (SELECT 1 AS x) t220 ON t1.x = t220.x JOIN (SELECT 1 AS x) t221 ON t1.x = t221.x JOIN (SELECT 1 AS x) t222 ON t1.x = t222.x JOIN (SELECT 1 AS x) t223 ON t1.x = t223.x JOIN (SELECT 1 AS x) t224 ON t1.x = t224.x JOIN (SELECT 1 AS x) t225 ON t1.x = t225.x JOIN (SELECT 1 AS x) t226 ON t1.x = t226.x JOIN (SELECT 1 AS x) t227 ON t1.x = t227.x JOIN (SELECT 1 AS x) t228 ON t1.x = t228.x JOIN (SELECT 1 AS x) t229 ON t1.x = t229.x JOIN (SELECT 1 AS x) t230 ON t1.x = t230.x JOIN (SELECT 1 AS x) t231 ON t1.x = t231.x JOIN (SELECT 1 AS x) t232 ON t1.x = t232.x JOIN (SELECT 1 AS x) t233 ON t1.x = t233.x JOIN (SELECT 1 AS x) t234 ON t1.x = t234.x JOIN (SELECT 1 AS x) t235 ON t1.x = t235.x JOIN (SELECT 1 AS x) t236 ON t1.x = t236.x JOIN (SELECT 1 AS x) t237 ON t1.x = t237.x JOIN (SELECT 1 AS x) t238 ON t1.x = t238.x JOIN (SELECT 1 AS x) t239 ON t1.x = t239.x JOIN (SELECT 1 AS x) t240 ON t1.x = t240.x JOIN (SELECT 1 AS x) t241 ON t1.x = t241.x JOIN (SELECT 1 AS x) t242 ON t1.x = t242.x JOIN (SELECT 1 AS x) t243 ON t1.x = t243.x JOIN (SELECT 1 AS x) t244 ON t1.x = t244.x JOIN (SELECT 1 AS x) t245 ON t1.x = t245.x JOIN (SELECT 1 AS x) t246 ON t1.x = t246.x JOIN (SELECT 1 AS x) t247 ON t1.x = t247.x JOIN (SELECT 1 AS x) t248 ON t1.x = t248.x JOIN (SELECT 1 AS x) t249 ON t1.x = t249.x JOIN (SELECT 1 AS x) t250 ON t1.x = t250.x JOIN (SELECT 1 AS x) t251 ON t1.x = t251.x JOIN (SELECT 1 AS x) t252 ON t1.x = t252.x JOIN (SELECT 1 AS x) t253 ON t1.x = t253.x JOIN (SELECT 1 AS x) t254 ON t1.x = t254.x JOIN (SELECT 1 AS x) t255 ON t1.x = t255.x JOIN (SELECT 1 AS x) t256 ON t1.x = t256.x JOIN (SELECT 1 AS x) t257 ON t1.x = t257.x JOIN (SELECT 1 AS x) t258 ON t1.x = t258.x JOIN (SELECT 1 AS x) t259 ON t1.x = t259.x JOIN (SELECT 1 AS x) t260 ON t1.x = t260.x JOIN (SELECT 1 AS x) t261 ON t1.x = t261.x JOIN (SELECT 1 AS x) t262 ON t1.x = t262.x JOIN (SELECT 1 AS x) t263 ON t1.x = t263.x JOIN (SELECT 1 AS x) t264 ON t1.x = t264.x JOIN (SELECT 1 AS x) t265 ON t1.x = t265.x JOIN (SELECT 1 AS x) t266 ON t1.x = t266.x JOIN (SELECT 1 AS x) t267 ON t1.x = t267.x JOIN (SELECT 1 AS x) t268 ON t1.x = t268.x JOIN (SELECT 1 AS x) t269 ON t1.x = t269.x JOIN (SELECT 1 AS x) t270 ON t1.x = t270.x JOIN (SELECT 1 AS x) t271 ON t1.x = t271.x JOIN (SELECT 1 AS x) t272 ON t1.x = t272.x JOIN (SELECT 1 AS x) t273 ON t1.x = t273.x JOIN (SELECT 1 AS x) t274 ON t1.x = t274.x JOIN (SELECT 1 AS x) t275 ON t1.x = t275.x JOIN (SELECT 1 AS x) t276 ON t1.x = t276.x JOIN (SELECT 1 AS x) t277 ON t1.x = t277.x JOIN (SELECT 1 AS x) t278 ON t1.x = t278.x JOIN (SELECT 1 AS x) t279 ON t1.x = t279.x JOIN (SELECT 1 AS x) t280 ON t1.x = t280.x JOIN (SELECT 1 AS x) t281 ON t1.x = t281.x JOIN (SELECT 1 AS x) t282 ON t1.x = t282.x JOIN (SELECT 1 AS x) t283 ON t1.x = t283.x JOIN (SELECT 1 AS x) t284 ON t1.x = t284.x JOIN (SELECT 1 AS x) t285 ON t1.x = t285.x JOIN (SELECT 1 AS x) t286 ON t1.x = t286.x JOIN (SELECT 1 AS x) t287 ON t1.x = t287.x JOIN (SELECT 1 AS x) t288 ON t1.x = t288.x JOIN (SELECT 1 AS x) t289 ON t1.x = t289.x JOIN (SELECT 1 AS x) t290 ON t1.x = t290.x JOIN (SELECT 1 AS x) t291 ON t1.x = t291.x JOIN (SELECT 1 AS x) t292 ON t1.x = t292.x JOIN (SELECT 1 AS x) t293 ON t1.x = t293.x JOIN (SELECT 1 AS x) t294 ON t1.x = t294.x JOIN (SELECT 1 AS x) t295 ON t1.x = t295.x JOIN (SELECT 1 AS x) t296 ON t1.x = t296.x JOIN (SELECT 1 AS x) t297 ON t1.x = t297.x JOIN (SELECT 1 AS x) t298 ON t1.x = t298.x JOIN (SELECT 1 AS x) t299 ON t1.x = t299.x JOIN (SELECT 1 AS x) t300 ON t1.x = t300.x JOIN (SELECT 1 AS x) t301 ON t1.x = t301.x JOIN (SELECT 1 AS x) t302 ON t1.x = t302.x JOIN (SELECT 1 AS x) t303 ON t1.x = t303.x JOIN (SELECT 1 AS x) t304 ON t1.x = t304.x JOIN (SELECT 1 AS x) t305 ON t1.x = t305.x JOIN (SELECT 1 AS x) t306 ON t1.x = t306.x JOIN (SELECT 1 AS x) t307 ON t1.x = t307.x JOIN (SELECT 1 AS x) t308 ON t1.x = t308.x JOIN (SELECT 1 AS x) t309 ON t1.x = t309.x JOIN (SELECT 1 AS x) t310 ON t1.x = t310.x JOIN (SELECT 1 AS x) t311 ON t1.x = t311.x JOIN (SELECT 1 AS x) t312 ON t1.x = t312.x JOIN (SELECT 1 AS x) t313 ON t1.x = t313.x JOIN (SELECT 1 AS x) t314 ON t1.x = t314.x JOIN (SELECT 1 AS x) t315 ON t1.x = t315.x JOIN (SELECT 1 AS x) t316 ON t1.x = t316.x JOIN (SELECT 1 AS x) t317 ON t1.x = t317.x JOIN (SELECT 1 AS x) t318 ON t1.x = t318.x JOIN (SELECT 1 AS x) t319 ON t1.x = t319.x JOIN (SELECT 1 AS x) t320 ON t1.x = t320.x JOIN (SELECT 1 AS x) t321 ON t1.x = t321.x JOIN (SELECT 1 AS x) t322 ON t1.x = t322.x JOIN (SELECT 1 AS x) t323 ON t1.x = t323.x JOIN (SELECT 1 AS x) t324 ON t1.x = t324.x JOIN (SELECT 1 AS x) t325 ON t1.x = t325.x JOIN (SELECT 1 AS x) t326 ON t1.x = t326.x JOIN (SELECT 1 AS x) t327 ON t1.x = t327.x JOIN (SELECT 1 AS x) t328 ON t1.x = t328.x JOIN (SELECT 1 AS x) t329 ON t1.x = t329.x JOIN (SELECT 1 AS x) t330 ON t1.x = t330.x JOIN (SELECT 1 AS x) t331 ON t1.x = t331.x JOIN (SELECT 1 AS x) t332 ON t1.x = t332.x JOIN (SELECT 1 AS x) t333 ON t1.x = t333.x JOIN (SELECT 1 AS x) t334 ON t1.x = t334.x JOIN (SELECT 1 AS x) t335 ON t1.x = t335.x JOIN (SELECT 1 AS x) t336 ON t1.x = t336.x JOIN (SELECT 1 AS x) t337 ON t1.x = t337.x JOIN (SELECT 1 AS x) t338 ON t1.x = t338.x JOIN (SELECT 1 AS x) t339 ON t1.x = t339.x JOIN (SELECT 1 AS x) t340 ON t1.x = t340.x JOIN (SELECT 1 AS x) t341 ON t1.x = t341.x JOIN (SELECT 1 AS x) t342 ON t1.x = t342.x JOIN (SELECT 1 AS x) t343 ON t1.x = t343.x JOIN (SELECT 1 AS x) t344 ON t1.x = t344.x JOIN (SELECT 1 AS x) t345 ON t1.x = t345.x JOIN (SELECT 1 AS x) t346 ON t1.x = t346.x JOIN (SELECT 1 AS x) t347 ON t1.x = t347.x JOIN (SELECT 1 AS x) t348 ON t1.x = t348.x JOIN (SELECT 1 AS x) t349 ON t1.x = t349.x JOIN (SELECT 1 AS x) t350 ON t1.x = t350.x JOIN (SELECT 1 AS x) t351 ON t1.x = t351.x JOIN (SELECT 1 AS x) t352 ON t1.x = t352.x JOIN (SELECT 1 AS x) t353 ON t1.x = t353.x JOIN (SELECT 1 AS x) t354 ON t1.x = t354.x JOIN (SELECT 1 AS x) t355 ON t1.x = t355.x JOIN (SELECT 1 AS x) t356 ON t1.x = t356.x JOIN (SELECT 1 AS x) t357 ON t1.x = t357.x JOIN (SELECT 1 AS x) t358 ON t1.x = t358.x JOIN (SELECT 1 AS x) t359 ON t1.x = t359.x JOIN (SELECT 1 AS x) t360 ON t1.x = t360.x JOIN (SELECT 1 AS x) t361 ON t1.x = t361.x JOIN (SELECT 1 AS x) t362 ON t1.x = t362.x JOIN (SELECT 1 AS x) t363 ON t1.x = t363.x JOIN (SELECT 1 AS x) t364 ON t1.x = t364.x JOIN (SELECT 1 AS x) t365 ON t1.x = t365.x JOIN (SELECT 1 AS x) t366 ON t1.x = t366.x JOIN (SELECT 1 AS x) t367 ON t1.x = t367.x JOIN (SELECT 1 AS x) t368 ON t1.x = t368.x JOIN (SELECT 1 AS x) t369 ON t1.x = t369.x JOIN (SELECT 1 AS x) t370 ON t1.x = t370.x JOIN (SELECT 1 AS x) t371 ON t1.x = t371.x JOIN (SELECT 1 AS x) t372 ON t1.x = t372.x JOIN (SELECT 1 AS x) t373 ON t1.x = t373.x JOIN (SELECT 1 AS x) t374 ON t1.x = t374.x JOIN (SELECT 1 AS x) t375 ON t1.x = t375.x JOIN (SELECT 1 AS x) t376 ON t1.x = t376.x JOIN (SELECT 1 AS x) t377 ON t1.x = t377.x JOIN (SELECT 1 AS x) t378 ON t1.x = t378.x JOIN (SELECT 1 AS x) t379 ON t1.x = t379.x JOIN (SELECT 1 AS x) t380 ON t1.x = t380.x JOIN (SELECT 1 AS x) t381 ON t1.x = t381.x JOIN (SELECT 1 AS x) t382 ON t1.x = t382.x JOIN (SELECT 1 AS x) t383 ON t1.x = t383.x JOIN (SELECT 1 AS x) t384 ON t1.x = t384.x JOIN (SELECT 1 AS x) t385 ON t1.x = t385.x JOIN (SELECT 1 AS x) t386 ON t1.x = t386.x JOIN (SELECT 1 AS x) t387 ON t1.x = t387.x JOIN (SELECT 1 AS x) t388 ON t1.x = t388.x JOIN (SELECT 1 AS x) t389 ON t1.x = t389.x JOIN (SELECT 1 AS x) t390 ON t1.x = t390.x JOIN (SELECT 1 AS x) t391 ON t1.x = t391.x JOIN (SELECT 1 AS x) t392 ON t1.x = t392.x JOIN (SELECT 1 AS x) t393 ON t1.x = t393.x JOIN (SELECT 1 AS x) t394 ON t1.x = t394.x JOIN (SELECT 1 AS x) t395 ON t1.x = t395.x JOIN (SELECT 1 AS x) t396 ON t1.x = t396.x JOIN (SELECT 1 AS x) t397 ON t1.x = t397.x JOIN (SELECT 1 AS x) t398 ON t1.x = t398.x JOIN (SELECT 1 AS x) t399 ON t1.x = t399.x JOIN (SELECT 1 AS x) t400 ON t1.x = t400.x JOIN (SELECT 1 AS x) t401 ON t1.x = t401.x JOIN (SELECT 1 AS x) t402 ON t1.x = t402.x JOIN (SELECT 1 AS x) t403 ON t1.x = t403.x JOIN (SELECT 1 AS x) t404 ON t1.x = t404.x JOIN (SELECT 1 AS x) t405 ON t1.x = t405.x JOIN (SELECT 1 AS x) t406 ON t1.x = t406.x JOIN (SELECT 1 AS x) t407 ON t1.x = t407.x JOIN (SELECT 1 AS x) t408 ON t1.x = t408.x JOIN (SELECT 1 AS x) t409 ON t1.x = t409.x JOIN (SELECT 1 AS x) t410 ON t1.x = t410.x JOIN (SELECT 1 AS x) t411 ON t1.x = t411.x JOIN (SELECT 1 AS x) t412 ON t1.x = t412.x JOIN (SELECT 1 AS x) t413 ON t1.x = t413.x JOIN (SELECT 1 AS x) t414 ON t1.x = t414.x JOIN (SELECT 1 AS x) t415 ON t1.x = t415.x JOIN (SELECT 1 AS x) t416 ON t1.x = t416.x JOIN (SELECT 1 AS x) t417 ON t1.x = t417.x JOIN (SELECT 1 AS x) t418 ON t1.x = t418.x JOIN (SELECT 1 AS x) t419 ON t1.x = t419.x JOIN (SELECT 1 AS x) t420 ON t1.x = t420.x JOIN (SELECT 1 AS x) t421 ON t1.x = t421.x JOIN (SELECT 1 AS x) t422 ON t1.x = t422.x JOIN (SELECT 1 AS x) t423 ON t1.x = t423.x JOIN (SELECT 1 AS x) t424 ON t1.x = t424.x JOIN (SELECT 1 AS x) t425 ON t1.x = t425.x JOIN (SELECT 1 AS x) t426 ON t1.x = t426.x JOIN (SELECT 1 AS x) t427 ON t1.x = t427.x JOIN (SELECT 1 AS x) t428 ON t1.x = t428.x JOIN (SELECT 1 AS x) t429 ON t1.x = t429.x JOIN (SELECT 1 AS x) t430 ON t1.x = t430.x JOIN (SELECT 1 AS x) t431 ON t1.x = t431.x JOIN (SELECT 1 AS x) t432 ON t1.x = t432.x JOIN (SELECT 1 AS x) t433 ON t1.x = t433.x JOIN (SELECT 1 AS x) t434 ON t1.x = t434.x JOIN (SELECT 1 AS x) t435 ON t1.x = t435.x JOIN (SELECT 1 AS x) t436 ON t1.x = t436.x JOIN (SELECT 1 AS x) t437 ON t1.x = t437.x JOIN (SELECT 1 AS x) t438 ON t1.x = t438.x JOIN (SELECT 1 AS x) t439 ON t1.x = t439.x JOIN (SELECT 1 AS x) t440 ON t1.x = t440.x JOIN (SELECT 1 AS x) t441 ON t1.x = t441.x JOIN (SELECT 1 AS x) t442 ON t1.x = t442.x JOIN (SELECT 1 AS x) t443 ON t1.x = t443.x JOIN (SELECT 1 AS x) t444 ON t1.x = t444.x JOIN (SELECT 1 AS x) t445 ON t1.x = t445.x JOIN (SELECT 1 AS x) t446 ON t1.x = t446.x JOIN (SELECT 1 AS x) t447 ON t1.x = t447.x JOIN (SELECT 1 AS x) t448 ON t1.x = t448.x JOIN (SELECT 1 AS x) t449 ON t1.x = t449.x JOIN (SELECT 1 AS x) t450 ON t1.x = t450.x JOIN (SELECT 1 AS x) t451 ON t1.x = t451.x JOIN (SELECT 1 AS x) t452 ON t1.x = t452.x JOIN (SELECT 1 AS x) t453 ON t1.x = t453.x JOIN (SELECT 1 AS x) t454 ON t1.x = t454.x JOIN (SELECT 1 AS x) t455 ON t1.x = t455.x JOIN (SELECT 1 AS x) t456 ON t1.x = t456.x JOIN (SELECT 1 AS x) t457 ON t1.x = t457.x JOIN (SELECT 1 AS x) t458 ON t1.x = t458.x JOIN (SELECT 1 AS x) t459 ON t1.x = t459.x JOIN (SELECT 1 AS x) t460 ON t1.x = t460.x JOIN (SELECT 1 AS x) t461 ON t1.x = t461.x JOIN (SELECT 1 AS x) t462 ON t1.x = t462.x JOIN (SELECT 1 AS x) t463 ON t1.x = t463.x JOIN (SELECT 1 AS x) t464 ON t1.x = t464.x JOIN (SELECT 1 AS x) t465 ON t1.x = t465.x JOIN (SELECT 1 AS x) t466 ON t1.x = t466.x JOIN (SELECT 1 AS x) t467 ON t1.x = t467.x JOIN (SELECT 1 AS x) t468 ON t1.x = t468.x JOIN (SELECT 1 AS x) t469 ON t1.x = t469.x JOIN (SELECT 1 AS x) t470 ON t1.x = t470.x JOIN (SELECT 1 AS x) t471 ON t1.x = t471.x JOIN (SELECT 1 AS x) t472 ON t1.x = t472.x JOIN (SELECT 1 AS x) t473 ON t1.x = t473.x JOIN (SELECT 1 AS x) t474 ON t1.x = t474.x JOIN (SELECT 1 AS x) t475 ON t1.x = t475.x JOIN (SELECT 1 AS x) t476 ON t1.x = t476.x JOIN (SELECT 1 AS x) t477 ON t1.x = t477.x JOIN (SELECT 1 AS x) t478 ON t1.x = t478.x JOIN (SELECT 1 AS x) t479 ON t1.x = t479.x JOIN (SELECT 1 AS x) t480 ON t1.x = t480.x JOIN (SELECT 1 AS x) t481 ON t1.x = t481.x JOIN (SELECT 1 AS x) t482 ON t1.x = t482.x JOIN (SELECT 1 AS x) t483 ON t1.x = t483.x JOIN (SELECT 1 AS x) t484 ON t1.x = t484.x JOIN (SELECT 1 AS x) t485 ON t1.x = t485.x JOIN (SELECT 1 AS x) t486 ON t1.x = t486.x JOIN (SELECT 1 AS x) t487 ON t1.x = t487.x JOIN (SELECT 1 AS x) t488 ON t1.x = t488.x JOIN (SELECT 1 AS x) t489 ON t1.x = t489.x JOIN (SELECT 1 AS x) t490 ON t1.x = t490.x JOIN (SELECT 1 AS x) t491 ON t1.x = t491.x JOIN (SELECT 1 AS x) t492 ON t1.x = t492.x JOIN (SELECT 1 AS x) t493 ON t1.x = t493.x JOIN (SELECT 1 AS x) t494 ON t1.x = t494.x JOIN (SELECT 1 AS x) t495 ON t1.x = t495.x JOIN (SELECT 1 AS x) t496 ON t1.x = t496.x JOIN (SELECT 1 AS x) t497 ON t1.x = t497.x JOIN (SELECT 1 AS x) t498 ON t1.x = t498.x JOIN (SELECT 1 AS x) t499 ON t1.x = t499.x JOIN (SELECT 1 AS x) t500 ON t1.x = t500.x JOIN (SELECT 1 AS x) t501 ON t1.x = t501.x JOIN (SELECT 1 AS x) t502 ON t1.x = t502.x JOIN (SELECT 1 AS x) t503 ON t1.x = t503.x JOIN (SELECT 1 AS x) t504 ON t1.x = t504.x JOIN (SELECT 1 AS x) t505 ON t1.x = t505.x JOIN (SELECT 1 AS x) t506 ON t1.x = t506.x JOIN (SELECT 1 AS x) t507 ON t1.x = t507.x JOIN (SELECT 1 AS x) t508 ON t1.x = t508.x JOIN (SELECT 1 AS x) t509 ON t1.x = t509.x JOIN (SELECT 1 AS x) t510 ON t1.x = t510.x JOIN (SELECT 1 AS x) t511 ON t1.x = t511.x JOIN (SELECT 1 AS x) t512 ON t1.x = t512.x JOIN (SELECT 1 AS x) t513 ON t1.x = t513.x JOIN (SELECT 1 AS x) t514 ON t1.x = t514.x JOIN (SELECT 1 AS x) t515 ON t1.x = t515.x JOIN (SELECT 1 AS x) t516 ON t1.x = t516.x JOIN (SELECT 1 AS x) t517 ON t1.x = t517.x JOIN (SELECT 1 AS x) t518 ON t1.x = t518.x JOIN (SELECT 1 AS x) t519 ON t1.x = t519.x JOIN (SELECT 1 AS x) t520 ON t1.x = t520.x JOIN (SELECT 1 AS x) t521 ON t1.x = t521.x JOIN (SELECT 1 AS x) t522 ON t1.x = t522.x JOIN (SELECT 1 AS x) t523 ON t1.x = t523.x JOIN (SELECT 1 AS x) t524 ON t1.x = t524.x JOIN (SELECT 1 AS x) t525 ON t1.x = t525.x JOIN (SELECT 1 AS x) t526 ON t1.x = t526.x JOIN (SELECT 1 AS x) t527 ON t1.x = t527.x JOIN (SELECT 1 AS x) t528 ON t1.x = t528.x JOIN (SELECT 1 AS x) t529 ON t1.x = t529.x JOIN (SELECT 1 AS x) t530 ON t1.x = t530.x JOIN (SELECT 1 AS x) t531 ON t1.x = t531.x JOIN (SELECT 1 AS x) t532 ON t1.x = t532.x JOIN (SELECT 1 AS x) t533 ON t1.x = t533.x JOIN (SELECT 1 AS x) t534 ON t1.x = t534.x JOIN (SELECT 1 AS x) t535 ON t1.x = t535.x JOIN (SELECT 1 AS x) t536 ON t1.x = t536.x JOIN (SELECT 1 AS x) t537 ON t1.x = t537.x JOIN (SELECT 1 AS x) t538 ON t1.x = t538.x JOIN (SELECT 1 AS x) t539 ON t1.x = t539.x JOIN (SELECT 1 AS x) t540 ON t1.x = t540.x JOIN (SELECT 1 AS x) t541 ON t1.x = t541.x JOIN (SELECT 1 AS x) t542 ON t1.x = t542.x JOIN (SELECT 1 AS x) t543 ON t1.x = t543.x JOIN (SELECT 1 AS x) t544 ON t1.x = t544.x JOIN (SELECT 1 AS x) t545 ON t1.x = t545.x JOIN (SELECT 1 AS x) t546 ON t1.x = t546.x JOIN (SELECT 1 AS x) t547 ON t1.x = t547.x JOIN (SELECT 1 AS x) t548 ON t1.x = t548.x JOIN (SELECT 1 AS x) t549 ON t1.x = t549.x JOIN (SELECT 1 AS x) t550 ON t1.x = t550.x JOIN (SELECT 1 AS x) t551 ON t1.x = t551.x JOIN (SELECT 1 AS x) t552 ON t1.x = t552.x JOIN (SELECT 1 AS x) t553 ON t1.x = t553.x JOIN (SELECT 1 AS x) t554 ON t1.x = t554.x JOIN (SELECT 1 AS x) t555 ON t1.x = t555.x JOIN (SELECT 1 AS x) t556 ON t1.x = t556.x JOIN (SELECT 1 AS x) t557 ON t1.x = t557.x JOIN (SELECT 1 AS x) t558 ON t1.x = t558.x JOIN (SELECT 1 AS x) t559 ON t1.x = t559.x JOIN (SELECT 1 AS x) t560 ON t1.x = t560.x JOIN (SELECT 1 AS x) t561 ON t1.x = t561.x JOIN (SELECT 1 AS x) t562 ON t1.x = t562.x JOIN (SELECT 1 AS x) t563 ON t1.x = t563.x JOIN (SELECT 1 AS x) t564 ON t1.x = t564.x JOIN (SELECT 1 AS x) t565 ON t1.x = t565.x JOIN (SELECT 1 AS x) t566 ON t1.x = t566.x JOIN (SELECT 1 AS x) t567 ON t1.x = t567.x JOIN (SELECT 1 AS x) t568 ON t1.x = t568.x JOIN (SELECT 1 AS x) t569 ON t1.x = t569.x JOIN (SELECT 1 AS x) t570 ON t1.x = t570.x JOIN (SELECT 1 AS x) t571 ON t1.x = t571.x JOIN (SELECT 1 AS x) t572 ON t1.x = t572.x JOIN (SELECT 1 AS x) t573 ON t1.x = t573.x JOIN (SELECT 1 AS x) t574 ON t1.x = t574.x JOIN (SELECT 1 AS x) t575 ON t1.x = t575.x JOIN (SELECT 1 AS x) t576 ON t1.x = t576.x JOIN (SELECT 1 AS x) t577 ON t1.x = t577.x JOIN (SELECT 1 AS x) t578 ON t1.x = t578.x JOIN (SELECT 1 AS x) t579 ON t1.x = t579.x JOIN (SELECT 1 AS x) t580 ON t1.x = t580.x JOIN (SELECT 1 AS x) t581 ON t1.x = t581.x JOIN (SELECT 1 AS x) t582 ON t1.x = t582.x JOIN (SELECT 1 AS x) t583 ON t1.x = t583.x JOIN (SELECT 1 AS x) t584 ON t1.x = t584.x JOIN (SELECT 1 AS x) t585 ON t1.x = t585.x JOIN (SELECT 1 AS x) t586 ON t1.x = t586.x JOIN (SELECT 1 AS x) t587 ON t1.x = t587.x JOIN (SELECT 1 AS x) t588 ON t1.x = t588.x JOIN (SELECT 1 AS x) t589 ON t1.x = t589.x JOIN (SELECT 1 AS x) t590 ON t1.x = t590.x JOIN (SELECT 1 AS x) t591 ON t1.x = t591.x JOIN (SELECT 1 AS x) t592 ON t1.x = t592.x JOIN (SELECT 1 AS x) t593 ON t1.x = t593.x JOIN (SELECT 1 AS x) t594 ON t1.x = t594.x JOIN (SELECT 1 AS x) t595 ON t1.x = t595.x JOIN (SELECT 1 AS x) t596 ON t1.x = t596.x JOIN (SELECT 1 AS x) t597 ON t1.x = t597.x JOIN (SELECT 1 AS x) t598 ON t1.x = t598.x JOIN (SELECT 1 AS x) t599 ON t1.x = t599.x JOIN (SELECT 1 AS x) t600 ON t1.x = t600.x JOIN (SELECT 1 AS x) t601 ON t1.x = t601.x JOIN (SELECT 1 AS x) t602 ON t1.x = t602.x JOIN (SELECT 1 AS x) t603 ON t1.x = t603.x JOIN (SELECT 1 AS x) t604 ON t1.x = t604.x JOIN (SELECT 1 AS x) t605 ON t1.x = t605.x JOIN (SELECT 1 AS x) t606 ON t1.x = t606.x JOIN (SELECT 1 AS x) t607 ON t1.x = t607.x JOIN (SELECT 1 AS x) t608 ON t1.x = t608.x JOIN (SELECT 1 AS x) t609 ON t1.x = t609.x JOIN (SELECT 1 AS x) t610 ON t1.x = t610.x JOIN (SELECT 1 AS x) t611 ON t1.x = t611.x JOIN (SELECT 1 AS x) t612 ON t1.x = t612.x JOIN (SELECT 1 AS x) t613 ON t1.x = t613.x JOIN (SELECT 1 AS x) t614 ON t1.x = t614.x JOIN (SELECT 1 AS x) t615 ON t1.x = t615.x JOIN (SELECT 1 AS x) t616 ON t1.x = t616.x JOIN (SELECT 1 AS x) t617 ON t1.x = t617.x JOIN (SELECT 1 AS x) t618 ON t1.x = t618.x JOIN (SELECT 1 AS x) t619 ON t1.x = t619.x JOIN (SELECT 1 AS x) t620 ON t1.x = t620.x JOIN (SELECT 1 AS x) t621 ON t1.x = t621.x JOIN (SELECT 1 AS x) t622 ON t1.x = t622.x JOIN (SELECT 1 AS x) t623 ON t1.x = t623.x JOIN (SELECT 1 AS x) t624 ON t1.x = t624.x JOIN (SELECT 1 AS x) t625 ON t1.x = t625.x JOIN (SELECT 1 AS x) t626 ON t1.x = t626.x JOIN (SELECT 1 AS x) t627 ON t1.x = t627.x JOIN (SELECT 1 AS x) t628 ON t1.x = t628.x JOIN (SELECT 1 AS x) t629 ON t1.x = t629.x JOIN (SELECT 1 AS x) t630 ON t1.x = t630.x JOIN (SELECT 1 AS x) t631 ON t1.x = t631.x JOIN (SELECT 1 AS x) t632 ON t1.x = t632.x JOIN (SELECT 1 AS x) t633 ON t1.x = t633.x JOIN (SELECT 1 AS x) t634 ON t1.x = t634.x JOIN (SELECT 1 AS x) t635 ON t1.x = t635.x JOIN (SELECT 1 AS x) t636 ON t1.x = t636.x JOIN (SELECT 1 AS x) t637 ON t1.x = t637.x JOIN (SELECT 1 AS x) t638 ON t1.x = t638.x JOIN (SELECT 1 AS x) t639 ON t1.x = t639.x JOIN (SELECT 1 AS x) t640 ON t1.x = t640.x JOIN (SELECT 1 AS x) t641 ON t1.x = t641.x JOIN (SELECT 1 AS x) t642 ON t1.x = t642.x JOIN (SELECT 1 AS x) t643 ON t1.x = t643.x JOIN (SELECT 1 AS x) t644 ON t1.x = t644.x JOIN (SELECT 1 AS x) t645 ON t1.x = t645.x JOIN (SELECT 1 AS x) t646 ON t1.x = t646.x JOIN (SELECT 1 AS x) t647 ON t1.x = t647.x JOIN (SELECT 1 AS x) t648 ON t1.x = t648.x JOIN (SELECT 1 AS x) t649 ON t1.x = t649.x JOIN (SELECT 1 AS x) t650 ON t1.x = t650.x JOIN (SELECT 1 AS x) t651 ON t1.x = t651.x JOIN (SELECT 1 AS x) t652 ON t1.x = t652.x JOIN (SELECT 1 AS x) t653 ON t1.x = t653.x JOIN (SELECT 1 AS x) t654 ON t1.x = t654.x JOIN (SELECT 1 AS x) t655 ON t1.x = t655.x JOIN (SELECT 1 AS x) t656 ON t1.x = t656.x JOIN (SELECT 1 AS x) t657 ON t1.x = t657.x JOIN (SELECT 1 AS x) t658 ON t1.x = t658.x JOIN (SELECT 1 AS x) t659 ON t1.x = t659.x JOIN (SELECT 1 AS x) t660 ON t1.x = t660.x JOIN (SELECT 1 AS x) t661 ON t1.x = t661.x JOIN (SELECT 1 AS x) t662 ON t1.x = t662.x JOIN (SELECT 1 AS x) t663 ON t1.x = t663.x JOIN (SELECT 1 AS x) t664 ON t1.x = t664.x JOIN (SELECT 1 AS x) t665 ON t1.x = t665.x JOIN (SELECT 1 AS x) t666 ON t1.x = t666.x diff --git a/parser/testdata/03094_transform_return_first/ast.json b/parser/testdata/03094_transform_return_first/ast.json new file mode 100644 index 000000000..786183121 --- /dev/null +++ b/parser/testdata/03094_transform_return_first/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function transform (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_1, UInt64_1]" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_4, UInt64_5]" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001549756, + "rows_read": 9, + "bytes_read": 378 + } +} diff --git a/parser/testdata/03094_transform_return_first/metadata.json b/parser/testdata/03094_transform_return_first/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03094_transform_return_first/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03094_transform_return_first/query.sql b/parser/testdata/03094_transform_return_first/query.sql new file mode 100644 index 000000000..fa18440f7 --- /dev/null +++ b/parser/testdata/03094_transform_return_first/query.sql @@ -0,0 +1,7 @@ +SELECT transform(1, [1, 1, 1], [1, 4, 5]); +SELECT transform('1', ['1', '1', '1'], ['1', '4', '5']); +SELECT transform((0, 0), [(0, 0), (0, 0), (0, 0)], [(2, 2), (5, 5), (10, 10)]); + +-- https://github.com/ClickHouse/ClickHouse/issues/62183 +-- Case is turned into caseWithExpression, which then it's turned into transform +select case 1 when 1 then 2 when 1 then 4 end; diff --git a/parser/testdata/03094_virtual_column_table_name/ast.json b/parser/testdata/03094_virtual_column_table_name/ast.json new file mode 100644 index 000000000..44c87cda4 --- /dev/null +++ b/parser/testdata/03094_virtual_column_table_name/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001237603, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03094_virtual_column_table_name/metadata.json b/parser/testdata/03094_virtual_column_table_name/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03094_virtual_column_table_name/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03094_virtual_column_table_name/query.sql b/parser/testdata/03094_virtual_column_table_name/query.sql new file mode 100644 index 000000000..19c77f44b --- /dev/null +++ b/parser/testdata/03094_virtual_column_table_name/query.sql @@ -0,0 +1,119 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS m0; +DROP TABLE IF EXISTS m1; +DROP TABLE IF EXISTS m2; +DROP TABLE IF EXISTS m3; +DROP TABLE IF EXISTS m4; +DROP TABLE IF EXISTS m5; +DROP TABLE IF EXISTS d1; +DROP TABLE IF EXISTS d2; +DROP TABLE IF EXISTS d3; +DROP TABLE IF EXISTS d4; +DROP TABLE IF EXISTS d5; +DROP TABLE IF EXISTS d6; +DROP TABLE IF EXISTS d7; +DROP TABLE IF EXISTS d8; +DROP TABLE IF EXISTS temp1; +DROP TABLE IF EXISTS temp2; +DROP TABLE IF EXISTS buffer1; +DROP VIEW IF EXISTS view1; +DROP VIEW IF EXISTS view2; +DROP VIEW IF EXISTS mv1; +DROP VIEW IF EXISTS mv2; +DROP TABLE IF EXISTS dist5; +DROP TABLE IF EXISTS dist6; + +CREATE TABLE d1 (key Int, value Int) ENGINE=Memory(); +CREATE TABLE d2 (key Int, value Int) ENGINE=MergeTree() ORDER BY key; +CREATE TABLE d3 (_table Int, value Int) ENGINE=Memory(); +CREATE TABLE d8 (key Int, value Int) ENGINE=Memory(); + +CREATE TEMPORARY TABLE temp1(key Int); +CREATE TEMPORARY TABLE temp2(key Int); + +CREATE TABLE m0 ENGINE=Merge(currentDatabase(), '^(d1|d2)$'); +CREATE TABLE d4 ENGINE=Distributed('test_shard_localhost', currentDatabase(), d8, rand()); +CREATE TABLE dist5 ENGINE=Distributed('test_shard_localhost', currentDatabase(), d4, rand()); +CREATE TABLE dist6 ENGINE=Distributed('test_shard_localhost', currentDatabase(), m0, rand()); + +INSERT INTO d1 VALUES (1, 10); +INSERT INTO d1 VALUES (2, 20); + +INSERT INTO d2 VALUES (3, 30); +INSERT INTO d2 VALUES (4, 40); + +INSERT INTO d3 VALUES (5, 50); +INSERT INTO d3 VALUES (6, 60); + +INSERT INTO d8 VALUES (5, 50); +INSERT INTO d8 VALUES (6, 60); + +INSERT INTO temp1 VALUES (1); +INSERT INTO temp2 VALUES (2); + +CREATE TABLE m1 ENGINE=Merge(currentDatabase(), '^(d1|d2)$'); +CREATE TABLE m2 ENGINE=Merge(currentDatabase(), '^(d1|d4)$'); +CREATE TABLE m3 ENGINE=Merge(currentDatabase(), '^(m1|d2)$'); +CREATE TABLE m4 ENGINE=Merge(currentDatabase(), '^(m2|d2)$'); +CREATE TABLE m5 ENGINE=Merge(currentDatabase(), '^(m1|m2)$'); + +CREATE VIEW view1 AS SELECT key, _table FROM d1; +CREATE VIEW view2 AS SELECT key FROM d1; + +CREATE TABLE d5 (key Int, value Int) ENGINE=MergeTree() ORDER BY key; +INSERT INTO d5 VALUES (7, 70); +INSERT INTO d5 VALUES (8, 80); +CREATE TABLE buffer1 AS d5 ENGINE = Buffer(currentDatabase(), d5, 1, 10000, 10000, 10000, 10000, 100000000, 100000000); +INSERT INTO buffer1 VALUES (9, 90); + +CREATE TABLE d6 (key Int, value Int) ENGINE = MergeTree ORDER BY value; +CREATE TABLE d7 (key Int, value Int) ENGINE = SummingMergeTree ORDER BY key; +CREATE MATERIALIZED VIEW mv1 TO d7 AS SELECT key, count(value) AS value FROM d6 GROUP BY key; +CREATE MATERIALIZED VIEW mv2 ENGINE = SummingMergeTree ORDER BY key AS SELECT key, count(value) AS value FROM d6 GROUP BY key; +INSERT INTO d6 VALUES (10, 100), (10, 110); + +-- { echoOn } +SELECT _table FROM d1; +SELECT count(_table) FROM d1 WHERE _table = 'd1' GROUP BY _table; +SELECT _table, key, value FROM d1 WHERE value = 10; + +SELECT _table FROM d2; +SELECT count(_table) FROM d2 WHERE _table = 'd2' GROUP BY _table; +SELECT _table, key, value FROM d2 WHERE value = 40; + +SELECT _table, value FROM d3 WHERE _table = 6; + +SELECT _table FROM d4; +SELECT count(_table) FROM d4 WHERE _table = 'd8' GROUP BY _table; +SELECT _table, key, value FROM d4 WHERE value = 60; + +SELECT _table FROM m1 ORDER BY _table ASC; +SELECT count(_table) FROM m1 WHERE _table = 'd1' GROUP BY _table; +SELECT _table, key, value FROM m1 WHERE _table = 'd2' and value <= 30; + +SELECT _table FROM m2 ORDER BY _table ASC; +SELECT count(_table) FROM m2 WHERE _table = 'd1' GROUP BY _table; +SELECT _table, key, value FROM m2 WHERE _table = 'd8' and value >= 60; + +SELECT _table, key, value FROM (SELECT _table, key, value FROM d1 UNION ALL SELECT _table, key, value FROM d2) ORDER BY key ASC; + +SELECT _table, key FROM view1 ORDER BY key ASC; +SELECT _table, key FROM view2 ORDER BY key ASC; + +SELECT _table, key, value FROM buffer1 ORDER BY key ASC; + +SELECT _table, key, value FROM mv1 ORDER BY key ASC; +SELECT _table, key, value FROM mv2 ORDER BY key ASC; + +SELECT _table, * FROM dist5 ORDER BY key ASC; +SELECT _table, * FROM dist6 ORDER BY key ASC; +SELECT _table, * FROM m3 ORDER BY key ASC; +SELECT _table, * FROM m4 WHERE _table = 'd8' ORDER BY key ASC; +SELECT _table, * FROM m5 WHERE _table = 'd8' ORDER BY key ASC; + +SELECT _table, * FROM temp1 ORDER BY key ASC; +SELECT _table, * FROM temp2 ORDER BY key ASC; +SELECT *,_table FROM (SELECT *, _table FROM temp1 UNION ALL SELECT *, _table FROM temp2) ORDER BY key ASC; + +SELECT * FROM d1 PREWHERE _table = 'd1'; -- { serverError ILLEGAL_PREWHERE } diff --git a/parser/testdata/03095_group_by_server_constants_bug/ast.json b/parser/testdata/03095_group_by_server_constants_bug/ast.json new file mode 100644 index 000000000..354466e3a --- /dev/null +++ b/parser/testdata/03095_group_by_server_constants_bug/ast.json @@ -0,0 +1,76 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function serverUUID (alias s) (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function count (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function remote (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '127.0.0.{1,2}'" + }, + { + "explain": " Identifier system.one" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier s" + }, + { + "explain": " Identifier Null" + } + ], + + "rows": 18, + + "statistics": + { + "elapsed": 0.001723547, + "rows_read": 18, + "bytes_read": 677 + } +} diff --git a/parser/testdata/03095_group_by_server_constants_bug/metadata.json b/parser/testdata/03095_group_by_server_constants_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03095_group_by_server_constants_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03095_group_by_server_constants_bug/query.sql b/parser/testdata/03095_group_by_server_constants_bug/query.sql new file mode 100644 index 000000000..9f9fda1ef --- /dev/null +++ b/parser/testdata/03095_group_by_server_constants_bug/query.sql @@ -0,0 +1,5 @@ +SELECT serverUUID() AS s, count() FROM remote('127.0.0.{1,2}', system.one) GROUP BY s format Null; + +select getMacro('replica') as s, count() from remote('127.0.0.{1,2}', system.one) group by s; + +select uptime() as s, count() FROM remote('127.0.0.{1,2}', system.one) group by s format Null; diff --git a/parser/testdata/03095_join_filter_push_down_right_stream_filled/ast.json b/parser/testdata/03095_join_filter_push_down_right_stream_filled/ast.json new file mode 100644 index 000000000..262cf5de9 --- /dev/null +++ b/parser/testdata/03095_join_filter_push_down_right_stream_filled/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1__fuzz_0 (children 1)" + }, + { + "explain": " Identifier t1__fuzz_0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00112833, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/03095_join_filter_push_down_right_stream_filled/metadata.json b/parser/testdata/03095_join_filter_push_down_right_stream_filled/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03095_join_filter_push_down_right_stream_filled/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03095_join_filter_push_down_right_stream_filled/query.sql b/parser/testdata/03095_join_filter_push_down_right_stream_filled/query.sql new file mode 100644 index 000000000..4ce7657e1 --- /dev/null +++ b/parser/testdata/03095_join_filter_push_down_right_stream_filled/query.sql @@ -0,0 +1,25 @@ +DROP TABLE IF EXISTS t1__fuzz_0; +CREATE TABLE t1__fuzz_0 +( + `x` UInt8, + `str` String +) +ENGINE = MergeTree ORDER BY x; + +INSERT INTO t1__fuzz_0 SELECT number, toString(number) FROM numbers(10); + +DROP TABLE IF EXISTS left_join__fuzz_2; +CREATE TABLE left_join__fuzz_2 +( + `x` UInt32, + `s` LowCardinality(String) +) ENGINE = Join(`ALL`, LEFT, x); + +INSERT INTO left_join__fuzz_2 SELECT number, toString(number) FROM numbers(10); + +SELECT 14 FROM t1__fuzz_0 LEFT JOIN left_join__fuzz_2 USING (x) +WHERE pointInPolygon(materialize((-inf, 1023)), [(5, 0.9998999834060669), (1.1920928955078125e-7, 100.0000991821289), (1.000100016593933, 100.0000991821289)]) +ORDER BY toNullable('202.79.32.10') DESC NULLS LAST, toNullable(toLowCardinality(toUInt256(14))) ASC, x DESC NULLS LAST; + +DROP TABLE t1__fuzz_0; +DROP TABLE left_join__fuzz_2; diff --git a/parser/testdata/03095_merge_and_buffer_tables/ast.json b/parser/testdata/03095_merge_and_buffer_tables/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03095_merge_and_buffer_tables/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03095_merge_and_buffer_tables/metadata.json b/parser/testdata/03095_merge_and_buffer_tables/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03095_merge_and_buffer_tables/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03095_merge_and_buffer_tables/query.sql b/parser/testdata/03095_merge_and_buffer_tables/query.sql new file mode 100644 index 000000000..81018a9ce --- /dev/null +++ b/parser/testdata/03095_merge_and_buffer_tables/query.sql @@ -0,0 +1,29 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/36963 + +DROP TABLE IF EXISTS mt1; +DROP TABLE IF EXISTS mt2; +DROP TABLE IF EXISTS b; + +create table mt1 (f1 Int32, f2 Int32) engine = MergeTree() order by f1; + +create table mt2 as mt1 engine = MergeTree() order by f1; +create table b as mt1 engine = Buffer(currentDatabase(), mt2, 16, 1, 1, 10000, 1000000, 10000000, 100000000); + +create table m as mt1 engine = Merge(currentDatabase(), '^(mt1|b)$'); + +-- insert some data +insert into mt1 values(1, 1), (2, 2); +insert into b values(3, 3), (4, 4); + +OPTIMIZE TABLE b; +OPTIMIZE TABLE mt1; +OPTIMIZE TABLE mt2; + +-- do select +select f1, f2 +from m +where f1 = 1 and f2 = 1; + +DROP TABLE IF EXISTS mt1; +DROP TABLE IF EXISTS mt2; +DROP TABLE IF EXISTS b; diff --git a/parser/testdata/03095_msan_uuid_string_to_num/ast.json b/parser/testdata/03095_msan_uuid_string_to_num/ast.json new file mode 100644 index 000000000..8d6e0e5c5 --- /dev/null +++ b/parser/testdata/03095_msan_uuid_string_to_num/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function UUIDStringToNum (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '00112233-4455-6677-8899-aabbccddeeff'" + }, + { + "explain": " Literal UInt64_2" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001346495, + "rows_read": 8, + "bytes_read": 328 + } +} diff --git a/parser/testdata/03095_msan_uuid_string_to_num/metadata.json b/parser/testdata/03095_msan_uuid_string_to_num/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03095_msan_uuid_string_to_num/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03095_msan_uuid_string_to_num/query.sql b/parser/testdata/03095_msan_uuid_string_to_num/query.sql new file mode 100644 index 000000000..779ee1ad2 --- /dev/null +++ b/parser/testdata/03095_msan_uuid_string_to_num/query.sql @@ -0,0 +1,4 @@ +SELECT UUIDStringToNum('00112233-4455-6677-8899-aabbccddeeff', 2); +SELECT UUIDStringToNum('00112233-4455-6677-8899-aabbccddeeff', materialize(2)); -- { serverError ILLEGAL_COLUMN } +SELECT 'a/<@];!~p{jTj={)' AS bytes, UUIDNumToString(toFixedString(bytes, 16), 2) AS uuid; +SELECT 'a/<@];!~p{jTj={)' AS bytes, UUIDNumToString(toFixedString(bytes, 16), materialize(2)) AS uuid; -- { serverError ILLEGAL_COLUMN } diff --git a/parser/testdata/03095_window_functions_qualify/ast.json b/parser/testdata/03095_window_functions_qualify/ast.json new file mode 100644 index 000000000..f82a553b0 --- /dev/null +++ b/parser/testdata/03095_window_functions_qualify/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001142992, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03095_window_functions_qualify/metadata.json b/parser/testdata/03095_window_functions_qualify/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03095_window_functions_qualify/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03095_window_functions_qualify/query.sql b/parser/testdata/03095_window_functions_qualify/query.sql new file mode 100644 index 000000000..729039927 --- /dev/null +++ b/parser/testdata/03095_window_functions_qualify/query.sql @@ -0,0 +1,36 @@ +SET enable_analyzer = 1; + +SELECT number, COUNT() OVER (PARTITION BY number % 3) AS partition_count FROM numbers(10) QUALIFY partition_count = 4 ORDER BY number; + +SELECT '--'; + +SELECT number FROM numbers(10) QUALIFY (COUNT() OVER (PARTITION BY number % 3) AS partition_count) = 4 ORDER BY number; + +SELECT '--'; + +SELECT number FROM numbers(10) QUALIFY number > 5 ORDER BY number; + +SELECT '--'; + +SELECT (number % 2) AS key, count() FROM numbers(10) GROUP BY key HAVING key = 0 QUALIFY key == 0; + +SELECT '--'; + +SELECT (number % 2) AS key, count() FROM numbers(10) GROUP BY key QUALIFY key == 0; + +SELECT '--'; + +SELECT number, COUNT() OVER (PARTITION BY number % 3) AS partition_count FROM numbers(10) QUALIFY COUNT() OVER (PARTITION BY number % 3) = 4 ORDER BY number; + +SELECT '--'; + +EXPLAIN header = 1, actions = 1 +SELECT number, COUNT() OVER (PARTITION BY number % 3) AS partition_count FROM numbers(10) QUALIFY COUNT() OVER (PARTITION BY number % 3) = 4 ORDER BY number; + +SELECT number % toUInt256(2) AS key, count() FROM numbers(10) GROUP BY key WITH CUBE WITH TOTALS QUALIFY key = toNullable(toNullable(0)); -- { serverError NOT_IMPLEMENTED } + +SELECT number % 2 AS key, count(materialize(5)) IGNORE NULLS FROM numbers(10) WHERE toLowCardinality(toLowCardinality(materialize(2))) GROUP BY key WITH CUBE WITH TOTALS QUALIFY key = 0; -- { serverError NOT_IMPLEMENTED } + +SELECT 4, count(4) IGNORE NULLS, number % 2 AS key FROM numbers(10) GROUP BY key WITH ROLLUP WITH TOTALS QUALIFY key = materialize(0); -- { serverError NOT_IMPLEMENTED } + +SELECT 3, number % toLowCardinality(2) AS key, count() IGNORE NULLS FROM numbers(10) GROUP BY key WITH ROLLUP WITH TOTALS QUALIFY key = 0; -- { serverError NOT_IMPLEMENTED } diff --git a/parser/testdata/03096_largest_triangle_3b_crash/ast.json b/parser/testdata/03096_largest_triangle_3b_crash/ast.json new file mode 100644 index 000000000..1c3c9bb45 --- /dev/null +++ b/parser/testdata/03096_largest_triangle_3b_crash/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function largestTriangleThreeBuckets (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal Float64_nan" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001619624, + "rows_read": 10, + "bytes_read": 383 + } +} diff --git a/parser/testdata/03096_largest_triangle_3b_crash/metadata.json b/parser/testdata/03096_largest_triangle_3b_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03096_largest_triangle_3b_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03096_largest_triangle_3b_crash/query.sql b/parser/testdata/03096_largest_triangle_3b_crash/query.sql new file mode 100644 index 000000000..b1a072940 --- /dev/null +++ b/parser/testdata/03096_largest_triangle_3b_crash/query.sql @@ -0,0 +1 @@ +SELECT largestTriangleThreeBuckets(1)(1, nan); \ No newline at end of file diff --git a/parser/testdata/03096_order_by_system_tables/ast.json b/parser/testdata/03096_order_by_system_tables/ast.json new file mode 100644 index 000000000..3560cd692 --- /dev/null +++ b/parser/testdata/03096_order_by_system_tables/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SYSTEM query" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001850675, + "rows_read": 1, + "bytes_read": 20 + } +} diff --git a/parser/testdata/03096_order_by_system_tables/metadata.json b/parser/testdata/03096_order_by_system_tables/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03096_order_by_system_tables/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03096_order_by_system_tables/query.sql b/parser/testdata/03096_order_by_system_tables/query.sql new file mode 100644 index 000000000..c6c1bb3b6 --- /dev/null +++ b/parser/testdata/03096_order_by_system_tables/query.sql @@ -0,0 +1,10 @@ +SYSTEM FLUSH LOGS /* all tables */; + +-- Check for system tables which have non-default sorting key +WITH + ['asynchronous_metric_log', 'asynchronous_insert_log', 'opentelemetry_span_log', 'coverage_log'] AS known_tables, + 'event_date, event_time' as default_sorting_key +SELECT + 'Table ' || name || ' has non-default sorting key: ' || sorting_key +FROM system.tables +WHERE (database = 'system') AND (engine = 'MergeTree') AND name not like 'minio%' AND (NOT arraySum(arrayMap(x -> position(name, x), known_tables))) AND (sorting_key != default_sorting_key); diff --git a/parser/testdata/03096_update_non_indexed_columns/ast.json b/parser/testdata/03096_update_non_indexed_columns/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03096_update_non_indexed_columns/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03096_update_non_indexed_columns/metadata.json b/parser/testdata/03096_update_non_indexed_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03096_update_non_indexed_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03096_update_non_indexed_columns/query.sql b/parser/testdata/03096_update_non_indexed_columns/query.sql new file mode 100644 index 000000000..283547d89 --- /dev/null +++ b/parser/testdata/03096_update_non_indexed_columns/query.sql @@ -0,0 +1,31 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/39453 + +DROP TABLE IF EXISTS test_03096; + +CREATE TABLE test_03096 +( + `a` UInt32, + `b` UInt32, + `c` UInt32, + `d` UInt32 MATERIALIZED 0, + `sum` UInt32 MATERIALIZED (a + b) + c, + INDEX idx (c, d) TYPE minmax GRANULARITY 1 +) +ENGINE = MergeTree +ORDER BY a +SETTINGS index_granularity = 8192; + +INSERT INTO test_03096 SELECT number, number % 42, number % 123 FROM numbers(10000); + +select count() from test_03096; +select count() from test_03096 where b = 0; + +alter table test_03096 update b = 100 where b = 0 SETTINGS mutations_sync=2; + +select latest_fail_reason == '', is_done == 1 from system.mutations where table='test_03096' and database = currentDatabase(); + +alter table test_03096 update b = 123 where c = 0 SETTINGS mutations_sync=2; + +select latest_fail_reason == '', is_done == 1 from system.mutations where table='test_03096' and database = currentDatabase(); + +DROP TABLE IF EXISTS test_03096; diff --git a/parser/testdata/03097_query_log_join_processes/ast.json b/parser/testdata/03097_query_log_join_processes/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03097_query_log_join_processes/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03097_query_log_join_processes/metadata.json b/parser/testdata/03097_query_log_join_processes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03097_query_log_join_processes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03097_query_log_join_processes/query.sql b/parser/testdata/03097_query_log_join_processes/query.sql new file mode 100644 index 000000000..9584124a9 --- /dev/null +++ b/parser/testdata/03097_query_log_join_processes/query.sql @@ -0,0 +1,7 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/56521 + +SYSTEM FLUSH LOGS query_log; + +SET enable_analyzer=1; + +SELECT count(1) as num, hostName() as hostName FROM system.query_log as a INNER JOIN system.processes as b on a.query_id = b.query_id and type = 'QueryStart' and dateDiff('second', event_time, now()) > 5 and current_database = currentDatabase() FORMAT Null; diff --git a/parser/testdata/03098_prefer_column_to_alias_subquery/ast.json b/parser/testdata/03098_prefer_column_to_alias_subquery/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03098_prefer_column_to_alias_subquery/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03098_prefer_column_to_alias_subquery/metadata.json b/parser/testdata/03098_prefer_column_to_alias_subquery/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03098_prefer_column_to_alias_subquery/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03098_prefer_column_to_alias_subquery/query.sql b/parser/testdata/03098_prefer_column_to_alias_subquery/query.sql new file mode 100644 index 000000000..cb41151b9 --- /dev/null +++ b/parser/testdata/03098_prefer_column_to_alias_subquery/query.sql @@ -0,0 +1,155 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/47552 + +DROP TABLE IF EXISTS clickhouse_alias_issue_1; +DROP TABLE IF EXISTS clickhouse_alias_issue_2; + +CREATE TABLE clickhouse_alias_issue_1 ( + id bigint, + column_1 Nullable(Float32) +) Engine=Memory; + +CREATE TABLE clickhouse_alias_issue_2 ( + id bigint, + column_2 Nullable(Float32) +) Engine=Memory; + +SET enable_analyzer = 1; + +INSERT INTO `clickhouse_alias_issue_1` +VALUES (1, 100), (2, 200), (3, 300); + +INSERT INTO `clickhouse_alias_issue_2` +VALUES (1, 10), (2, 20), (3, 30); + +-- This query returns the expected result +-- 300 \N 3 +-- 200 \N 2 +-- 100 \N 1 +-- \N 30 3 +-- \N 20 2 +-- \N 10 1 +SELECT * +FROM +( +SELECT + max(`column_1`) AS `column_1`, +NULL AS `column_2`, + `id` +FROM `clickhouse_alias_issue_1` +GROUP BY + `id` +UNION ALL +SELECT + NULL AS `column_1`, + max(`column_2`) AS `column_2`, + `id` +FROM `clickhouse_alias_issue_2` +GROUP BY + `id` +SETTINGS prefer_column_name_to_alias=1 +) +ORDER BY ALL DESC NULLS LAST; + +SELECT '-------------------------'; + +-- This query also returns the expected result +-- 300 30 3 +-- 200 20 2 +-- 100 10 1 +SELECT + max(`column_1`) AS `column_1`, + max(`column_2`) AS `column_2`, + `id` +FROM ( + SELECT + max(`column_1`) AS `column_1`, + NULL AS `column_2`, + `id` + FROM `clickhouse_alias_issue_1` + GROUP BY + `id` + UNION ALL + SELECT + NULL AS `column_1`, + max(`column_2`) AS `column_2`, + `id` + FROM `clickhouse_alias_issue_2` + GROUP BY + `id` + SETTINGS prefer_column_name_to_alias=1 +) as T1 +GROUP BY `id` +ORDER BY `id` DESC +SETTINGS prefer_column_name_to_alias=1; + +SELECT '-------------------------'; + +-- Expected result : +-- 10 3 +-- 10 2 +-- 10 1 +SELECT `column_1` / `column_2`, `id` +FROM ( + SELECT + max(`column_1`) AS `column_1`, + max(`column_2`) AS `column_2`, + `id` + FROM ( + SELECT + max(`column_1`) AS `column_1`, + NULL AS `column_2`, + `id` + FROM `clickhouse_alias_issue_1` + GROUP BY + `id` + UNION ALL + SELECT + NULL AS `column_1`, + max(`column_2`) AS `column_2`, + `id` + FROM `clickhouse_alias_issue_2` + GROUP BY + `id` + SETTINGS prefer_column_name_to_alias=1 + ) as T1 + GROUP BY `id` + ORDER BY `id` DESC + SETTINGS prefer_column_name_to_alias=1 +) as T2 +WHERE `column_1` IS NOT NULL AND `column_2` IS NOT NULL +SETTINGS prefer_column_name_to_alias=1; + +SELECT '-------------------------'; + +-- Without the setting, the expected result is the same +-- but the actual result isn't wrong +SELECT `column_1` / `column_2`, `id` +FROM ( + SELECT + max(`column_1`) AS `column_1`, + max(`column_2`) AS `column_2`, + `id` + FROM ( + SELECT + max(`column_1`) AS `column_1`, + NULL AS `column_2`, + `id` + FROM `clickhouse_alias_issue_1` + GROUP BY + `id` + UNION ALL + SELECT + NULL AS `column_1`, + max(`column_2`) AS `column_2`, + `id` + FROM `clickhouse_alias_issue_2` + GROUP BY + `id` + ) as T1 + GROUP BY `id` + ORDER BY `id` DESC +) as T2 +WHERE `column_1` IS NOT NULL AND `column_2` IS NOT NULL; + +DROP TABLE IF EXISTS clickhouse_alias_issue_1; +DROP TABLE IF EXISTS clickhouse_alias_issue_2; diff --git a/parser/testdata/03099_analyzer_multi_join/ast.json b/parser/testdata/03099_analyzer_multi_join/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03099_analyzer_multi_join/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03099_analyzer_multi_join/metadata.json b/parser/testdata/03099_analyzer_multi_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03099_analyzer_multi_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03099_analyzer_multi_join/query.sql b/parser/testdata/03099_analyzer_multi_join/query.sql new file mode 100644 index 000000000..dfdaeeea7 --- /dev/null +++ b/parser/testdata/03099_analyzer_multi_join/query.sql @@ -0,0 +1,27 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/56503 +SET enable_analyzer = 1; + +SELECT + tb1.owner_id AS owner_id, + type +FROM +( + SELECT number AS owner_id + FROM numbers(100) +) AS tb1 +CROSS JOIN values('type varchar', 'type1', 'type2', 'type3') AS pt +LEFT JOIN +( + SELECT tb2.owner_id AS owner_id + FROM + ( + SELECT number AS owner_id + FROM numbers(100) + GROUP BY owner_id + ) AS tb2 +) AS merged USING (owner_id) +WHERE tb1.owner_id = merged.owner_id +GROUP BY + tb1.owner_id, + type +FORMAT `Null`; diff --git a/parser/testdata/03100_analyzer_constants_in_multiif/ast.json b/parser/testdata/03100_analyzer_constants_in_multiif/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03100_analyzer_constants_in_multiif/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03100_analyzer_constants_in_multiif/metadata.json b/parser/testdata/03100_analyzer_constants_in_multiif/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03100_analyzer_constants_in_multiif/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03100_analyzer_constants_in_multiif/query.sql b/parser/testdata/03100_analyzer_constants_in_multiif/query.sql new file mode 100644 index 000000000..c9a4000d2 --- /dev/null +++ b/parser/testdata/03100_analyzer_constants_in_multiif/query.sql @@ -0,0 +1,13 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/59101 +SET enable_analyzer = 1; + +CREATE TABLE users (name String, age Int16) ENGINE=Memory; +INSERT INTO users VALUES ('John', 33); +INSERT INTO users VALUES ('Ksenia', 48); +INSERT INTO users VALUES ('Alice', 50); + +SELECT + multiIf((age > 30) or (true), '1', '2') AS a, + max(name) +FROM users +GROUP BY a; diff --git a/parser/testdata/03100_lwu_01_basics/ast.json b/parser/testdata/03100_lwu_01_basics/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03100_lwu_01_basics/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03100_lwu_01_basics/metadata.json b/parser/testdata/03100_lwu_01_basics/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03100_lwu_01_basics/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03100_lwu_01_basics/query.sql b/parser/testdata/03100_lwu_01_basics/query.sql new file mode 100644 index 000000000..9ce909556 --- /dev/null +++ b/parser/testdata/03100_lwu_01_basics/query.sql @@ -0,0 +1,46 @@ +-- Tags: no-parallel-replicas, no-replicated-database +-- no-parallel-replicas: profile events may differ with parallel replicas. +-- no-replicated-database: fails due to additional shard. + +SET insert_keeper_fault_injection_probability = 0.0; +SET enable_lightweight_update = 1; + +DROP TABLE IF EXISTS t_shared SYNC; + +CREATE TABLE t_shared (id UInt64, c1 UInt64, c2 Int16) +ENGINE = ReplicatedMergeTree('/zookeeper/{database}/t_shared/', '1') +ORDER BY id +SETTINGS + enable_block_number_column = true, + enable_block_offset_column = true; + +INSERT INTO t_shared SELECT number, number, number FROM numbers(20); +INSERT INTO t_shared SELECT number, number, number FROM numbers(100, 10); + +SET apply_patch_parts = 1; +SET max_threads = 1; + +UPDATE t_shared SET c2 = c1 * c1 WHERE id % 2 = 0; + +SELECT * FROM t_shared ORDER BY id; +SELECT name, rows FROM system.parts WHERE database = currentDatabase() AND table = 't_shared' ORDER BY name; + +DETACH TABLE t_shared; +ATTACH TABLE t_shared; + +SELECT * FROM t_shared ORDER BY id; +SELECT name, rows FROM system.parts WHERE database = currentDatabase() AND table = 't_shared' ORDER BY name; + +ALTER TABLE t_shared APPLY PATCHES SETTINGS mutations_sync = 2; + +SELECT * FROM t_shared ORDER BY id; +SELECT name, rows FROM system.parts WHERE database = currentDatabase() AND table = 't_shared' ORDER BY name; + +SYSTEM FLUSH LOGS query_log; + +SELECT ProfileEvents['ReadTasksWithAppliedPatches'] +FROM system.query_log +WHERE current_database = currentDatabase() AND query = 'SELECT * FROM t_shared ORDER BY id;' AND type = 'QueryFinish' +ORDER BY event_time_microseconds; + +DROP TABLE t_shared SYNC; diff --git a/parser/testdata/03100_lwu_02_basics/ast.json b/parser/testdata/03100_lwu_02_basics/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03100_lwu_02_basics/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03100_lwu_02_basics/metadata.json b/parser/testdata/03100_lwu_02_basics/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03100_lwu_02_basics/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03100_lwu_02_basics/query.sql b/parser/testdata/03100_lwu_02_basics/query.sql new file mode 100644 index 000000000..99b4fd16a --- /dev/null +++ b/parser/testdata/03100_lwu_02_basics/query.sql @@ -0,0 +1,30 @@ +-- Tags: no-replicated-database +-- no-replicated-database: SYSTEM STOP MERGES works only on one replica. + +SET insert_keeper_fault_injection_probability = 0.0; +SET enable_lightweight_update = 1; + +DROP TABLE IF EXISTS t_shared SYNC; + +CREATE TABLE t_shared (id UInt64, c1 UInt64) +ENGINE = ReplicatedMergeTree('/zookeeper/{database}/t_shared/', '1') +ORDER BY id +SETTINGS + enable_block_number_column = true, + enable_block_offset_column = true; + +SYSTEM STOP MERGES t_shared; + +INSERT INTO t_shared SELECT number, number FROM numbers(10); + +UPDATE t_shared SET c1 = 111 WHERE id % 2 = 1; + +INSERT INTO t_shared SELECT number, number FROM numbers(50, 10); + +UPDATE t_shared SET c1 = 222 WHERE id % 2 = 0; + +SELECT name, rows FROM system.parts WHERE database = currentDatabase() AND table = 't_shared' ORDER BY name; + +SELECT * FROM t_shared ORDER BY id; + +DROP TABLE t_shared SYNC; diff --git a/parser/testdata/03100_lwu_03_join/ast.json b/parser/testdata/03100_lwu_03_join/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03100_lwu_03_join/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03100_lwu_03_join/metadata.json b/parser/testdata/03100_lwu_03_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03100_lwu_03_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03100_lwu_03_join/query.sql b/parser/testdata/03100_lwu_03_join/query.sql new file mode 100644 index 000000000..d89687237 --- /dev/null +++ b/parser/testdata/03100_lwu_03_join/query.sql @@ -0,0 +1,47 @@ +-- Tags: no-parallel-replicas, no-replicated-database, long +-- no-parallel-replicas: profile events may differ with parallel replicas. +-- no-replicated-database: fails due to additional shard. + +SET insert_keeper_fault_injection_probability = 0.0; +SET enable_lightweight_update = 1; + +DROP TABLE IF EXISTS t_shared SYNC; + +CREATE TABLE t_shared (id UInt64, c1 UInt64) +ENGINE = ReplicatedMergeTree('/zookeeper/{database}/t_shared/', '1') +ORDER BY id +SETTINGS + enable_block_number_column = 1, + enable_block_offset_column = 1, + apply_patches_on_merge = 0; + +INSERT INTO t_shared VALUES (1, 2) (3, 4); + +UPDATE t_shared SET c1 = 100 WHERE id = 1; + +SELECT name, rows from system.parts WHERE database = currentDatabase() AND table = 't_shared' AND active ORDER BY name; + +SELECT * FROM t_shared ORDER BY id; + +INSERT INTO t_shared VALUES (5, 6); + +UPDATE t_shared SET c1 = 200 WHERE id = 5; + +SELECT name, rows from system.parts WHERE database = currentDatabase() AND table = 't_shared' AND active ORDER BY name; + +SELECT * FROM t_shared ORDER BY id; + +OPTIMIZE TABLE t_shared PARTITION ID 'all' FINAL; + +SELECT name, rows from system.parts WHERE database = currentDatabase() AND table = 't_shared' AND active ORDER BY name; + +SELECT * FROM t_shared ORDER BY id; + +DROP TABLE t_shared SYNC; + +SYSTEM FLUSH LOGS query_log; + +SELECT mapSort(mapFilter((k, v) -> k IN ('ReadTasksWithAppliedPatches', 'PatchesAppliedInAllReadTasks', 'PatchesMergeAppliedInAllReadTasks', 'PatchesJoinAppliedInAllReadTasks'), ProfileEvents)) +FROM system.query_log +WHERE current_database = currentDatabase() AND query LIKE '%SELECT * FROM t_shared ORDER BY id%' AND type = 'QueryFinish' +ORDER BY event_time_microseconds; diff --git a/parser/testdata/03100_lwu_04_prewhere/ast.json b/parser/testdata/03100_lwu_04_prewhere/ast.json new file mode 100644 index 000000000..968d0843f --- /dev/null +++ b/parser/testdata/03100_lwu_04_prewhere/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_shared (children 1)" + }, + { + "explain": " Identifier t_shared" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001425151, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/03100_lwu_04_prewhere/metadata.json b/parser/testdata/03100_lwu_04_prewhere/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03100_lwu_04_prewhere/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03100_lwu_04_prewhere/query.sql b/parser/testdata/03100_lwu_04_prewhere/query.sql new file mode 100644 index 000000000..0cf29cb6b --- /dev/null +++ b/parser/testdata/03100_lwu_04_prewhere/query.sql @@ -0,0 +1,23 @@ +DROP TABLE IF EXISTS t_shared SYNC; +SET enable_lightweight_update = 1; + +CREATE TABLE t_shared (id UInt64, c1 UInt64, c2 String) +ENGINE = ReplicatedMergeTree('/zookeeper/{database}/t_shared/', '1') +ORDER BY id +SETTINGS + enable_block_number_column = 1, + enable_block_offset_column = 1; + +INSERT INTO t_shared VALUES (1, 11, 'foo') (2, 22, 'bar') (3, 33, 'sss'); + +UPDATE t_shared SET c1 = 111, c2 = 'aaa' WHERE id = 1; +UPDATE t_shared SET c2 = 'aaa' WHERE id = 3; + +SELECT * FROM t_shared ORDER BY id SETTINGS apply_patch_parts = 0; +SELECT * FROM t_shared ORDER BY id SETTINGS apply_patch_parts = 1; + +SELECT * FROM t_shared PREWHERE c2 = 'aaa' AND c1 = 111; +SELECT * FROM t_shared WHERE c2 = 'aaa' AND c1 = 111; +SELECT * FROM t_shared PREWHERE c2 = 'aaa' WHERE c1 = 111; + +DROP TABLE t_shared SYNC; diff --git a/parser/testdata/03100_lwu_05_basics/ast.json b/parser/testdata/03100_lwu_05_basics/ast.json new file mode 100644 index 000000000..61fc43bd5 --- /dev/null +++ b/parser/testdata/03100_lwu_05_basics/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_lightweight (children 1)" + }, + { + "explain": " Identifier t_lightweight" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001454949, + "rows_read": 2, + "bytes_read": 78 + } +} diff --git a/parser/testdata/03100_lwu_05_basics/metadata.json b/parser/testdata/03100_lwu_05_basics/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03100_lwu_05_basics/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03100_lwu_05_basics/query.sql b/parser/testdata/03100_lwu_05_basics/query.sql new file mode 100644 index 000000000..b4cb1ab1b --- /dev/null +++ b/parser/testdata/03100_lwu_05_basics/query.sql @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS t_lightweight; +SET enable_lightweight_update = 1; + +CREATE TABLE t_lightweight (d Date, e Enum8('foo' = 1, 'bar' = 2)) +ENGINE = MergeTree ORDER BY d +SETTINGS enable_block_number_column = 1, enable_block_offset_column = 1; + +INSERT INTO t_lightweight (d, e) VALUES ('2018-01-01', 'foo'); +INSERT INTO t_lightweight (d, e) VALUES ('2018-01-02', 'bar'); + +UPDATE t_lightweight SET e = CAST('foo', 'Enum8(\'foo\' = 1, \'bar\' = 2)') WHERE d = '2018-01-02'; + +SELECT e FROM t_lightweight ORDER BY d; +SELECT name, rows FROM system.parts WHERE database = currentDatabase() AND table = 't_lightweight' ORDER BY name; + +DROP TABLE t_lightweight; diff --git a/parser/testdata/03100_lwu_06_apply_patches/ast.json b/parser/testdata/03100_lwu_06_apply_patches/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03100_lwu_06_apply_patches/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03100_lwu_06_apply_patches/metadata.json b/parser/testdata/03100_lwu_06_apply_patches/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03100_lwu_06_apply_patches/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03100_lwu_06_apply_patches/query.sql b/parser/testdata/03100_lwu_06_apply_patches/query.sql new file mode 100644 index 000000000..1dca1e12b --- /dev/null +++ b/parser/testdata/03100_lwu_06_apply_patches/query.sql @@ -0,0 +1,44 @@ +-- Tags: no-replicated-database +-- no-replicated-database: OPTIMIZE is replicated which affects the part names. + +SET insert_keeper_fault_injection_probability = 0.0; +SET enable_lightweight_update = 1; + +DROP TABLE IF EXISTS t_shared SYNC; + +CREATE TABLE t_shared (id UInt64, c1 UInt64) +ENGINE = ReplicatedMergeTree('/zookeeper/{database}/t_shared/', '1') +ORDER BY id +SETTINGS + enable_block_number_column = 1, + enable_block_offset_column = 1, + apply_patches_on_merge = 0, + cleanup_delay_period = 1000, + max_cleanup_delay_period = 1000; + +INSERT INTO t_shared SELECT number, number FROM numbers(20); +INSERT INTO t_shared SELECT number, number FROM numbers(20, 10); + +UPDATE t_shared SET c1 = id + 100 WHERE id % 2 = 0; + +SET mutations_sync = 2; +ALTER TABLE t_shared APPLY PATCHES, UPDATE c1 = 2000 WHERE id % 10 = 0; + +UPDATE t_shared SET c1 = id + 1000 WHERE id % 3 = 0; + +SELECT '*** with patches ***'; +SELECT * FROM t_shared ORDER BY id SETTINGS apply_patch_parts = 1; +SELECT '*** without patches ***'; +SELECT * FROM t_shared ORDER BY id SETTINGS apply_patch_parts = 0; + +SELECT name, rows FROM system.parts WHERE database = currentDatabase() AND table = 't_shared' AND active ORDER BY name; + +SELECT * FROM t_shared ORDER BY id SETTINGS apply_patch_parts = 1; +SELECT name, rows FROM system.parts WHERE database = currentDatabase() AND table = 't_shared' AND active ORDER BY name; + +OPTIMIZE TABLE t_shared PARTITION ID 'all' FINAL; + +SELECT * FROM t_shared ORDER BY id SETTINGS apply_patch_parts = 1; +SELECT name, rows FROM system.parts WHERE database = currentDatabase() AND table = 't_shared' AND active ORDER BY name; + +DROP TABLE t_shared SYNC; diff --git a/parser/testdata/03100_lwu_07_merge_patches/ast.json b/parser/testdata/03100_lwu_07_merge_patches/ast.json new file mode 100644 index 000000000..b5a73d59f --- /dev/null +++ b/parser/testdata/03100_lwu_07_merge_patches/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_lightweight (children 1)" + }, + { + "explain": " Identifier t_lightweight" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001277315, + "rows_read": 2, + "bytes_read": 78 + } +} diff --git a/parser/testdata/03100_lwu_07_merge_patches/metadata.json b/parser/testdata/03100_lwu_07_merge_patches/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03100_lwu_07_merge_patches/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03100_lwu_07_merge_patches/query.sql b/parser/testdata/03100_lwu_07_merge_patches/query.sql new file mode 100644 index 000000000..c2060f071 --- /dev/null +++ b/parser/testdata/03100_lwu_07_merge_patches/query.sql @@ -0,0 +1,25 @@ +DROP TABLE IF EXISTS t_lightweight SYNC; +SET enable_lightweight_update = 1; + +CREATE TABLE t_lightweight (id UInt64, c1 UInt64) +ENGINE = MergeTree ORDER BY id +SETTINGS enable_block_number_column = 1, enable_block_offset_column = 1; + +INSERT INTO t_lightweight SELECT number, number FROM numbers(20); + +UPDATE t_lightweight SET c1 = c1 + 100 WHERE id % 2 = 0; +UPDATE t_lightweight SET c1 = c1 + 1000 WHERE id % 3 = 0; +UPDATE t_lightweight SET c1 = 10000 WHERE id = 10; +UPDATE t_lightweight SET c1 = 13000 WHERE id = 10; +UPDATE t_lightweight SET c1 = 15000 WHERE id = 15; + +SELECT * FROM t_lightweight ORDER BY id SETTINGS apply_patch_parts = 1; +SELECT name, rows FROM system.parts WHERE database = currentDatabase() AND table = 't_lightweight' AND active ORDER BY min_block_number; + +OPTIMIZE TABLE t_lightweight PARTITION ID 'patch-3e1a7650697c132eb044cc6f1d82bc92-all' FINAL; + +SELECT * FROM t_lightweight ORDER BY id SETTINGS apply_patch_parts = 1; +SELECT name, rows FROM system.parts WHERE database = currentDatabase() AND table = 't_lightweight' AND active ORDER BY min_block_number; +SELECT count() FROM t_lightweight WHERE c1 != id; + +DROP TABLE t_lightweight SYNC; diff --git a/parser/testdata/03100_lwu_08_multiple_blocks/ast.json b/parser/testdata/03100_lwu_08_multiple_blocks/ast.json new file mode 100644 index 000000000..b781ef151 --- /dev/null +++ b/parser/testdata/03100_lwu_08_multiple_blocks/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_lightweight_8 (children 1)" + }, + { + "explain": " Identifier t_lightweight_8" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001416197, + "rows_read": 2, + "bytes_read": 82 + } +} diff --git a/parser/testdata/03100_lwu_08_multiple_blocks/metadata.json b/parser/testdata/03100_lwu_08_multiple_blocks/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03100_lwu_08_multiple_blocks/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03100_lwu_08_multiple_blocks/query.sql b/parser/testdata/03100_lwu_08_multiple_blocks/query.sql new file mode 100644 index 000000000..fde6341bc --- /dev/null +++ b/parser/testdata/03100_lwu_08_multiple_blocks/query.sql @@ -0,0 +1,15 @@ +DROP TABLE IF EXISTS t_lightweight_8; +SET enable_lightweight_update = 1; + +CREATE TABLE t_lightweight_8 (id UInt64, v UInt64) ENGINE = MergeTree ORDER BY id +SETTINGS index_granularity = 1024, enable_block_number_column = 1, enable_block_offset_column = 1; + +INSERT INTO t_lightweight_8 SELECT number, 0 FROM numbers(100000); + +UPDATE t_lightweight_8 SET v = 1 WHERE id % 20000 = 0; + +SET max_block_size = 8192; + +SELECT sum(v) FROM t_lightweight_8; + +DROP TABLE t_lightweight_8; diff --git a/parser/testdata/03100_lwu_09_different_structure/ast.json b/parser/testdata/03100_lwu_09_different_structure/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03100_lwu_09_different_structure/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03100_lwu_09_different_structure/metadata.json b/parser/testdata/03100_lwu_09_different_structure/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03100_lwu_09_different_structure/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03100_lwu_09_different_structure/query.sql b/parser/testdata/03100_lwu_09_different_structure/query.sql new file mode 100644 index 000000000..c5f9e66a0 --- /dev/null +++ b/parser/testdata/03100_lwu_09_different_structure/query.sql @@ -0,0 +1,33 @@ +-- Tags: no-replicated-database +-- no-replicated-database: 03100_lwu_03_join + +SET insert_keeper_fault_injection_probability = 0.0; +SET enable_lightweight_update = 1; + +DROP TABLE IF EXISTS t_shared SYNC; + +CREATE TABLE t_shared (id UInt64, c1 UInt64, s String) +ENGINE = ReplicatedMergeTree('/zookeeper/{database}/t_shared/', '1') +ORDER BY id +SETTINGS + enable_block_number_column = true, + enable_block_offset_column = true, + shared_merge_tree_disable_merges_and_mutations_assignment = 1, + apply_patches_on_merge = 0; + +INSERT INTO t_shared SELECT number, number, 's' || toString(number) FROM numbers(20); + +UPDATE t_shared SET c1 = c1 * 10 WHERE id % 2 = 0; +UPDATE t_shared SET s = s || '_foo' WHERE id % 2 = 1; +UPDATE t_shared SET c1 = c1 + 1000 WHERE id % 3 = 0; + +OPTIMIZE TABLE t_shared FINAL SETTINGS optimize_throw_if_noop = 1; +OPTIMIZE TABLE t_shared PARTITION ID 'patch-3e1a7650697c132eb044cc6f1d82bc92-all' FINAL SETTINGS optimize_throw_if_noop = 1; +OPTIMIZE TABLE t_shared PARTITION ID 'patch-8feeedf7588c601fd7f38da7fe68712b-all' FINAL SETTINGS optimize_throw_if_noop = 1; + +SET apply_patch_parts = 1; +SELECT * FROM t_shared ORDER BY id; + +SELECT name, rows FROM system.parts WHERE database = currentDatabase() AND table = 't_shared' AND active ORDER BY name; + +DROP TABLE t_shared SYNC; diff --git a/parser/testdata/03100_lwu_10_apply_on_merges/ast.json b/parser/testdata/03100_lwu_10_apply_on_merges/ast.json new file mode 100644 index 000000000..0b8dcc005 --- /dev/null +++ b/parser/testdata/03100_lwu_10_apply_on_merges/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_lwu_merges (children 1)" + }, + { + "explain": " Identifier t_lwu_merges" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001289542, + "rows_read": 2, + "bytes_read": 76 + } +} diff --git a/parser/testdata/03100_lwu_10_apply_on_merges/metadata.json b/parser/testdata/03100_lwu_10_apply_on_merges/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03100_lwu_10_apply_on_merges/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03100_lwu_10_apply_on_merges/query.sql b/parser/testdata/03100_lwu_10_apply_on_merges/query.sql new file mode 100644 index 000000000..f2656292f --- /dev/null +++ b/parser/testdata/03100_lwu_10_apply_on_merges/query.sql @@ -0,0 +1,45 @@ +DROP TABLE IF EXISTS t_lwu_merges SYNC; +SET enable_lightweight_update = 1; + +CREATE TABLE t_lwu_merges (id UInt64, u UInt64, s String) +ENGINE = ReplicatedMergeTree('/zookeeper/{database}/t_lwu_merges/', '1') +ORDER BY id PARTITION BY id % 2 +SETTINGS + enable_block_number_column = 1, + enable_block_offset_column = 1, + apply_patches_on_merge = 1, + shared_merge_tree_disable_merges_and_mutations_assignment = 1, + max_replicated_mutations_in_queue = 0; + +INSERT INTO t_lwu_merges SELECT number, number, 'c' || number FROM numbers(10000); + +UPDATE t_lwu_merges SET s = s || '_foo' WHERE id % 3 = 0; +UPDATE t_lwu_merges SET u = id * 10 WHERE id % 3 = 1; + +SYSTEM SYNC REPLICA t_lwu_merges; + +OPTIMIZE TABLE t_lwu_merges PARTITION 0 FINAL SETTINGS optimize_throw_if_noop = 1; + +SELECT sum(u), countIf(endsWith(s, '_foo')) FROM t_lwu_merges SETTINGS apply_patch_parts = 0; +SELECT sum(u), countIf(endsWith(s, '_foo')) FROM t_lwu_merges SETTINGS apply_patch_parts = 1; + +OPTIMIZE TABLE t_lwu_merges PARTITION 1 FINAL SETTINGS optimize_throw_if_noop = 1; + +SELECT sum(u), countIf(endsWith(s, '_foo')) FROM t_lwu_merges SETTINGS apply_patch_parts = 0; +SELECT sum(u), countIf(endsWith(s, '_foo')) FROM t_lwu_merges SETTINGS apply_patch_parts = 1; + +UPDATE t_lwu_merges SET u = 0 WHERE id % 3 = 1; +-- Add a barrier mutation between patch parts. +ALTER TABLE t_lwu_merges DELETE WHERE id = 0 SETTINGS mutations_sync = 0; +-- The second patch shouldn't be applied on merge until mutation is done. +UPDATE t_lwu_merges SET u = 0 WHERE id % 3 = 2; + +SYSTEM SYNC REPLICA t_lwu_merges; + +OPTIMIZE TABLE t_lwu_merges PARTITION 0 FINAL SETTINGS optimize_throw_if_noop = 1; +OPTIMIZE TABLE t_lwu_merges PARTITION 1 FINAL SETTINGS optimize_throw_if_noop = 1; + +SELECT sum(u), countIf(endsWith(s, '_foo')) FROM t_lwu_merges SETTINGS apply_patch_parts = 0; +SELECT sum(u), countIf(endsWith(s, '_foo')) FROM t_lwu_merges SETTINGS apply_patch_parts = 1; + +DROP TABLE t_lwu_merges SYNC; diff --git a/parser/testdata/03100_lwu_12_sequential_consistency/ast.json b/parser/testdata/03100_lwu_12_sequential_consistency/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03100_lwu_12_sequential_consistency/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03100_lwu_12_sequential_consistency/metadata.json b/parser/testdata/03100_lwu_12_sequential_consistency/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03100_lwu_12_sequential_consistency/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03100_lwu_12_sequential_consistency/query.sql b/parser/testdata/03100_lwu_12_sequential_consistency/query.sql new file mode 100644 index 000000000..bb35794c3 --- /dev/null +++ b/parser/testdata/03100_lwu_12_sequential_consistency/query.sql @@ -0,0 +1,33 @@ +-- Tags: replica + +DROP TABLE IF EXISTS t_lwu_sequential_1 SYNC; +DROP TABLE IF EXISTS t_lwu_sequential_2 SYNC; + +SET enable_lightweight_update = 1; + +CREATE TABLE t_lwu_sequential_1 (id UInt64, s FixedString(3)) +ENGINE = ReplicatedMergeTree('/zookeeper/{database}/t_lwu_sequential/', '1') +ORDER BY id +SETTINGS + enable_block_number_column = 1, + enable_block_offset_column = 1; + +CREATE TABLE t_lwu_sequential_2 (id UInt64, s FixedString(3)) +ENGINE = ReplicatedMergeTree('/zookeeper/{database}/t_lwu_sequential/', '2') +ORDER BY id +SETTINGS + enable_block_number_column = 1, + enable_block_offset_column = 1; + +SET update_sequential_consistency = 1; +SET select_sequential_consistency = 0; + +INSERT INTO t_lwu_sequential_1 VALUES (1, 'abc'), (2, 'def'); + +UPDATE t_lwu_sequential_2 SET s = 'foo' WHERE id = 1; + +SET select_sequential_consistency = 1; +SYSTEM SYNC REPLICA t_lwu_sequential_1 LIGHTWEIGHT; + +SELECT * FROM t_lwu_sequential_1 ORDER BY id SETTINGS apply_patch_parts = 0; +SELECT * FROM t_lwu_sequential_1 ORDER BY id SETTINGS apply_patch_parts = 1; diff --git a/parser/testdata/03100_lwu_18_sequence/ast.json b/parser/testdata/03100_lwu_18_sequence/ast.json new file mode 100644 index 000000000..8a7a89721 --- /dev/null +++ b/parser/testdata/03100_lwu_18_sequence/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_lwu_sequence (children 1)" + }, + { + "explain": " Identifier t_lwu_sequence" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001255324, + "rows_read": 2, + "bytes_read": 80 + } +} diff --git a/parser/testdata/03100_lwu_18_sequence/metadata.json b/parser/testdata/03100_lwu_18_sequence/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03100_lwu_18_sequence/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03100_lwu_18_sequence/query.sql b/parser/testdata/03100_lwu_18_sequence/query.sql new file mode 100644 index 000000000..40b1a641e --- /dev/null +++ b/parser/testdata/03100_lwu_18_sequence/query.sql @@ -0,0 +1,36 @@ +DROP TABLE IF EXISTS t_lwu_sequence; +SET enable_lightweight_update = 1; + +CREATE TABLE t_lwu_sequence (a UInt64, b UInt64) +ENGINE = MergeTree ORDER BY a +SETTINGS + enable_block_number_column = 1, + enable_block_offset_column = 1, + apply_patches_on_merge = 1, + shared_merge_tree_disable_merges_and_mutations_assignment = 1; + +INSERT INTO t_lwu_sequence SELECT number, number FROM numbers(10); + +UPDATE t_lwu_sequence SET b = 500 WHERE a = 5; +UPDATE t_lwu_sequence SET b = 501 WHERE a = 5; +UPDATE t_lwu_sequence SET b = 502 WHERE a = 5; +UPDATE t_lwu_sequence SET b = 503 WHERE a = 5; + +SELECT b FROM t_lwu_sequence WHERE a = 5 SETTINGS apply_patch_parts = 0; +SELECT b FROM t_lwu_sequence WHERE a = 5 SETTINGS apply_patch_parts = 1; + +SELECT count(), sum(rows) FROM system.parts WHERE database = currentDatabase() AND table = 't_lwu_sequence' AND startsWith(name, 'patch') AND active; + +OPTIMIZE TABLE t_lwu_sequence PARTITION ID 'patch-d9dff7d4cface4172f96b0bae7cb2e83-all' FINAL; + +SELECT b FROM t_lwu_sequence WHERE a = 5 SETTINGS apply_patch_parts = 0; +SELECT b FROM t_lwu_sequence WHERE a = 5 SETTINGS apply_patch_parts = 1; + +SELECT count(), sum(rows) FROM system.parts WHERE database = currentDatabase() AND table = 't_lwu_sequence' AND startsWith(name, 'patch') AND active; + +OPTIMIZE TABLE t_lwu_sequence PARTITION ID 'all' FINAL; + +SELECT b FROM t_lwu_sequence WHERE a = 5 SETTINGS apply_patch_parts = 0; +SELECT b FROM t_lwu_sequence WHERE a = 5 SETTINGS apply_patch_parts = 1; + +DROP TABLE t_lwu_sequence; diff --git a/parser/testdata/03100_lwu_19_nullable/ast.json b/parser/testdata/03100_lwu_19_nullable/ast.json new file mode 100644 index 000000000..5a8225188 --- /dev/null +++ b/parser/testdata/03100_lwu_19_nullable/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery mutation_table (children 1)" + }, + { + "explain": " Identifier mutation_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001421828, + "rows_read": 2, + "bytes_read": 80 + } +} diff --git a/parser/testdata/03100_lwu_19_nullable/metadata.json b/parser/testdata/03100_lwu_19_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03100_lwu_19_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03100_lwu_19_nullable/query.sql b/parser/testdata/03100_lwu_19_nullable/query.sql new file mode 100644 index 000000000..3cbbe3048 --- /dev/null +++ b/parser/testdata/03100_lwu_19_nullable/query.sql @@ -0,0 +1,52 @@ +DROP TABLE IF EXISTS mutation_table; +SET enable_lightweight_update = 1; + +CREATE TABLE mutation_table +( + id int, + price Nullable(Int32) +) +ENGINE = MergeTree() +PARTITION BY id +ORDER BY id +SETTINGS enable_block_number_column = 1, enable_block_offset_column = 1; + +INSERT INTO mutation_table (id, price) VALUES (1, 100); + +UPDATE mutation_table SET price = 150 WHERE id = 1; + +SELECT * FROM mutation_table; + +DROP TABLE IF EXISTS mutation_table; + +create table mutation_table (dt Nullable(Date), name Nullable(String)) +engine MergeTree order by tuple() +settings enable_block_number_column = 1, enable_block_offset_column = 1; + +insert into mutation_table (name, dt) values ('car', '2020-02-28'); +insert into mutation_table (name, dt) values ('dog', '2020-03-28'); + +select * from mutation_table order by dt, name; + +update mutation_table set dt = toDateOrNull('2020-08-02') where name = 'car'; + +select * from mutation_table order by dt, name; + +insert into mutation_table (name, dt) values ('car', null); +insert into mutation_table (name, dt) values ('cat', null); + +update mutation_table set dt = toDateOrNull('2020-08-03') where name = 'car' and dt is null; + +select * from mutation_table order by dt, name; + +update mutation_table set dt = toDateOrNull('2020-08-04') where name = 'car' or dt is null; + +select * from mutation_table order by dt, name; + +insert into mutation_table (name, dt) values (null, '2020-08-05'); + +update mutation_table set dt = null where name is not null; + +select * from mutation_table order by dt, name; + +DROP TABLE IF EXISTS mutation_table; diff --git a/parser/testdata/03100_lwu_20_different_structure/ast.json b/parser/testdata/03100_lwu_20_different_structure/ast.json new file mode 100644 index 000000000..c47e514e5 --- /dev/null +++ b/parser/testdata/03100_lwu_20_different_structure/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery testing (children 1)" + }, + { + "explain": " Identifier testing" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001380841, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/03100_lwu_20_different_structure/metadata.json b/parser/testdata/03100_lwu_20_different_structure/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03100_lwu_20_different_structure/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03100_lwu_20_different_structure/query.sql b/parser/testdata/03100_lwu_20_different_structure/query.sql new file mode 100644 index 000000000..fe3f8bcaf --- /dev/null +++ b/parser/testdata/03100_lwu_20_different_structure/query.sql @@ -0,0 +1,50 @@ +DROP TABLE IF EXISTS testing; +SET enable_lightweight_update = 1; + +CREATE TABLE testing +( + a String, + b String, + c Int32, + d Int32, + e Int32, +) +ENGINE = MergeTree PRIMARY KEY (a) +SETTINGS min_bytes_for_wide_part = 0, enable_block_number_column = 1, enable_block_offset_column = 1; + +INSERT INTO testing SELECT number, number, number, number, number % 2 FROM numbers(5); + +-- { echoOn } + +OPTIMIZE TABLE testing FINAL; + +SELECT c FROM testing ORDER BY d; +SELECT c FROM testing ORDER BY e, d; + +-- update all columns used by proj_1 +UPDATE testing SET c = c+1, d = d+2 WHERE 1; + +SELECT * FROM system.mutations WHERE database = currentDatabase() AND table = 'testing' AND not is_done; + +SELECT c FROM testing ORDER BY d; +SELECT c FROM testing ORDER BY e, d; + +-- update only one column +UPDATE testing SET d = d-1 WHERE 1; + +SELECT * FROM system.mutations WHERE database = currentDatabase() AND table = 'testing' AND not is_done; + +SELECT c FROM testing ORDER BY d; +SELECT c FROM testing ORDER BY e, d; + +-- update only another one column +UPDATE testing SET c = c-1 WHERE 1; + +SELECT * FROM system.mutations WHERE database = currentDatabase() AND table = 'testing' AND not is_done; + +SELECT c FROM testing ORDER BY d; +SELECT c FROM testing ORDER BY e, d; + +-- { echoOff } + +DROP TABLE testing; diff --git a/parser/testdata/03100_lwu_22_detach_attach_patches/ast.json b/parser/testdata/03100_lwu_22_detach_attach_patches/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03100_lwu_22_detach_attach_patches/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03100_lwu_22_detach_attach_patches/metadata.json b/parser/testdata/03100_lwu_22_detach_attach_patches/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03100_lwu_22_detach_attach_patches/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03100_lwu_22_detach_attach_patches/query.sql b/parser/testdata/03100_lwu_22_detach_attach_patches/query.sql new file mode 100644 index 000000000..132ff4898 --- /dev/null +++ b/parser/testdata/03100_lwu_22_detach_attach_patches/query.sql @@ -0,0 +1,72 @@ +-- Tags: no-replicated-database +-- no-replicated-database: fails due to additional shard. + +DROP TABLE IF EXISTS t_detach_attach_patches SYNC; +DROP TABLE IF EXISTS t_detach_attach_patches_dst SYNC; + +SET enable_lightweight_update = 1; + +CREATE TABLE t_detach_attach_patches (id UInt64, a UInt64, b UInt64, c UInt64) +ENGINE = ReplicatedMergeTree('/zookeeper/{database}/t_lwu_on_fly/', '1') +ORDER BY a PARTITION BY id +SETTINGS + enable_block_number_column = 1, + enable_block_offset_column = 1; + +CREATE TABLE t_detach_attach_patches_dst AS t_detach_attach_patches +ENGINE = ReplicatedMergeTree('/zookeeper/{database}/t_lwu_on_fly_dst/', '1') +ORDER BY a PARTITION BY id +SETTINGS + enable_block_number_column = 1, + enable_block_offset_column = 1; + +SET apply_patch_parts = 1; +SET mutations_sync = 2; +SET insert_keeper_fault_injection_probability = 0.0; + +INSERT INTO t_detach_attach_patches VALUES (0, 1, 1, 1) (0, 2, 2, 2); +INSERT INTO t_detach_attach_patches VALUES (1, 1, 1, 1) (1, 2, 2, 2); +INSERT INTO t_detach_attach_patches VALUES (2, 1, 1, 1) (2, 2, 2, 2); +INSERT INTO t_detach_attach_patches VALUES (3, 1, 1, 1) (3, 2, 2, 2); +INSERT INTO t_detach_attach_patches VALUES (4, 1, 1, 1) (4, 2, 2, 2); +INSERT INTO t_detach_attach_patches VALUES (5, 1, 1, 1) (5, 2, 2, 2); + +UPDATE t_detach_attach_patches SET b = b + 1 WHERE a = 1; +UPDATE t_detach_attach_patches SET c = c + 2 WHERE a = 2; +UPDATE t_detach_attach_patches SET b = b + 3, c = c + 3 WHERE 1; + +SELECT '=========='; +SELECT * FROM t_detach_attach_patches ORDER BY ALL; + +ALTER TABLE t_detach_attach_patches DETACH PARTITION 0; -- { serverError SUPPORT_IS_DISABLED } +ALTER TABLE t_detach_attach_patches APPLY PATCHES IN PARTITION 0; +ALTER TABLE t_detach_attach_patches DETACH PARTITION 0; + +ALTER TABLE t_detach_attach_patches DETACH PART '1_0_0_0'; -- { serverError SUPPORT_IS_DISABLED } +ALTER TABLE t_detach_attach_patches APPLY PATCHES IN PARTITION 1; +ALTER TABLE t_detach_attach_patches DETACH PART '1_0_0_0_4'; + +ALTER TABLE t_detach_attach_patches MOVE PARTITION 2 TO TABLE t_detach_attach_patches_dst; -- { serverError SUPPORT_IS_DISABLED } +ALTER TABLE t_detach_attach_patches APPLY PATCHES IN PARTITION 2; +ALTER TABLE t_detach_attach_patches MOVE PARTITION 2 TO TABLE t_detach_attach_patches_dst; + +ALTER TABLE t_detach_attach_patches_dst REPLACE PARTITION 3 FROM t_detach_attach_patches; -- { serverError SUPPORT_IS_DISABLED } +ALTER TABLE t_detach_attach_patches APPLY PATCHES IN PARTITION 3; +ALTER TABLE t_detach_attach_patches_dst REPLACE PARTITION 3 FROM t_detach_attach_patches; + +ALTER TABLE t_detach_attach_patches DROP PARTITION 4; +ALTER TABLE t_detach_attach_patches DROP PART '5_0_0_0'; + +SELECT '=========='; +SELECT * FROM t_detach_attach_patches ORDER BY ALL; + +ALTER TABLE t_detach_attach_patches ATTACH PARTITION 0; +ALTER TABLE t_detach_attach_patches ATTACH PART '1_0_0_0_4'; + +SET apply_patch_parts = 0; + +SELECT '=========='; +SELECT * FROM t_detach_attach_patches ORDER BY ALL; + +DROP TABLE t_detach_attach_patches SYNC; +DROP TABLE t_detach_attach_patches_dst SYNC; diff --git a/parser/testdata/03100_lwu_23_apply_patches/ast.json b/parser/testdata/03100_lwu_23_apply_patches/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03100_lwu_23_apply_patches/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03100_lwu_23_apply_patches/metadata.json b/parser/testdata/03100_lwu_23_apply_patches/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03100_lwu_23_apply_patches/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03100_lwu_23_apply_patches/query.sql b/parser/testdata/03100_lwu_23_apply_patches/query.sql new file mode 100644 index 000000000..3977e3290 --- /dev/null +++ b/parser/testdata/03100_lwu_23_apply_patches/query.sql @@ -0,0 +1,74 @@ +-- Tags: no-replicated-database, long +-- Tag no-replicated-database: profile events for mutations may differ because of additional replicas. + +DROP TABLE IF EXISTS t_apply_patches SYNC; +DROP TABLE IF EXISTS t_apply_patches_smt SYNC; + +SET enable_lightweight_update = 1; + +CREATE TABLE t_apply_patches (a UInt64, b UInt64, c UInt64, d UInt64) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS + min_bytes_for_wide_part = 0, + min_bytes_for_full_part_storage = 0, + ratio_of_defaults_for_sparse_serialization = 1.0, + enable_block_number_column = 1, + enable_block_offset_column = 1; + +SET mutations_sync = 2; + +INSERT INTO t_apply_patches SELECT number, 0, 0, 0 FROM numbers(10000); + +UPDATE t_apply_patches SET b = 1 WHERE a % 4 = 0; +UPDATE t_apply_patches SET c = 2 WHERE a % 4 = 0; +UPDATE t_apply_patches SET b = 3, c = 4 WHERE a % 4 = 1; + +SELECT b, c, count() FROM t_apply_patches GROUP BY b, c ORDER BY b, c; + +ALTER TABLE t_apply_patches APPLY PATCHES; + +SELECT b, c, count() FROM t_apply_patches GROUP BY b, c ORDER BY b, c SETTINGS apply_patch_parts = 0; + +SYSTEM FLUSH LOGS part_log; + +SELECT + ProfileEvents['MutationSomePartColumns'], + ProfileEvents['MutatedUncompressedBytes'] -- 2 * 8 * 10000 = 160000, because only 2 columns must be affected. +FROM system.part_log WHERE database = currentDatabase() AND table = 't_apply_patches' AND event_type = 'MutatePart' +ORDER BY ALL; + +CREATE TABLE t_apply_patches_smt (a UInt64, b UInt64, c UInt64, d UInt64) +ENGINE = ReplicatedMergeTree('/zookeeper/{database}/t_apply_patches_smt/', '1') +ORDER BY tuple() +SETTINGS + min_bytes_for_wide_part = 0, + min_bytes_for_full_part_storage = 0, + ratio_of_defaults_for_sparse_serialization = 1.0, + enable_block_number_column = 1, + enable_block_offset_column = 1; + +SET mutations_sync = 2; + +INSERT INTO t_apply_patches_smt SELECT number, 0, 0, 0 FROM numbers(10000); + +UPDATE t_apply_patches_smt SET b = 1 WHERE a % 4 = 0; +UPDATE t_apply_patches_smt SET c = 2 WHERE a % 4 = 0; +UPDATE t_apply_patches_smt SET b = 3, c = 4 WHERE a % 4 = 1; + +SELECT b, c, count() FROM t_apply_patches_smt GROUP BY b, c ORDER BY b, c; + +ALTER TABLE t_apply_patches_smt APPLY PATCHES; + +SELECT b, c, count() FROM t_apply_patches GROUP BY b, c ORDER BY b, c SETTINGS apply_patch_parts = 0; + +SYSTEM FLUSH LOGS part_log; + +SELECT + ProfileEvents['MutationSomePartColumns'], + ProfileEvents['MutatedUncompressedBytes'] -- 2 * 8 * 10000 = 160000, because only 2 columns must be affected. +FROM system.part_log WHERE database = currentDatabase() AND table = 't_apply_patches_smt' AND event_type = 'MutatePart' +ORDER BY ALL; + +DROP TABLE IF EXISTS t_apply_patches SYNC; +DROP TABLE IF EXISTS t_apply_patches_smt SYNC; diff --git a/parser/testdata/03100_lwu_26_subcolumns/ast.json b/parser/testdata/03100_lwu_26_subcolumns/ast.json new file mode 100644 index 000000000..3c210b499 --- /dev/null +++ b/parser/testdata/03100_lwu_26_subcolumns/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_lwu_subcolumns (children 1)" + }, + { + "explain": " Identifier t_lwu_subcolumns" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001217908, + "rows_read": 2, + "bytes_read": 84 + } +} diff --git a/parser/testdata/03100_lwu_26_subcolumns/metadata.json b/parser/testdata/03100_lwu_26_subcolumns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03100_lwu_26_subcolumns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03100_lwu_26_subcolumns/query.sql b/parser/testdata/03100_lwu_26_subcolumns/query.sql new file mode 100644 index 000000000..9d8c89383 --- /dev/null +++ b/parser/testdata/03100_lwu_26_subcolumns/query.sql @@ -0,0 +1,39 @@ +DROP TABLE IF EXISTS t_lwu_subcolumns; + +SET enable_json_type = 1; +SET enable_lightweight_update = 1; + +CREATE TABLE t_lwu_subcolumns(data JSON, arr Array(UInt32), n Nullable(String)) +ENGINE = MergeTree ORDER BY tuple() +SETTINGS enable_block_number_column = 1, enable_block_offset_column = 1; + +INSERT INTO t_lwu_subcolumns VALUES ('{"a": 1, "b": "foo"}', [1, 2, 3, 4], NULL); + +UPDATE t_lwu_subcolumns SET data = '{"a": "qqww", "c": 2}' WHERE 1; +UPDATE t_lwu_subcolumns SET arr = [100, 200], n = 'aaa' WHERE 1; + +SET apply_patch_parts = 0; + +SELECT * FROM t_lwu_subcolumns; +SELECT data.a, data.b, data.c FROM t_lwu_subcolumns; +SELECT arr.size0 FROM t_lwu_subcolumns; +SELECT n.null FROM t_lwu_subcolumns; + +SET apply_patch_parts = 1; + +SELECT * FROM t_lwu_subcolumns; +SELECT data.a, data.b, data.c FROM t_lwu_subcolumns; +SELECT arr.size0 FROM t_lwu_subcolumns; +SELECT n.null FROM t_lwu_subcolumns; + +SET optimize_throw_if_noop = 1; +OPTIMIZE TABLE t_lwu_subcolumns FINAL; + +SET apply_patch_parts = 0; + +SELECT * FROM t_lwu_subcolumns; +SELECT data.a, data.b, data.c FROM t_lwu_subcolumns; +SELECT arr.size0 FROM t_lwu_subcolumns; +SELECT n.null FROM t_lwu_subcolumns; + +DROP TABLE t_lwu_subcolumns; diff --git a/parser/testdata/03100_lwu_27_update_after_on_fly_mutations/ast.json b/parser/testdata/03100_lwu_27_update_after_on_fly_mutations/ast.json new file mode 100644 index 000000000..c950f0e2a --- /dev/null +++ b/parser/testdata/03100_lwu_27_update_after_on_fly_mutations/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_lwu_on_fly (children 1)" + }, + { + "explain": " Identifier t_lwu_on_fly" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001267784, + "rows_read": 2, + "bytes_read": 76 + } +} diff --git a/parser/testdata/03100_lwu_27_update_after_on_fly_mutations/metadata.json b/parser/testdata/03100_lwu_27_update_after_on_fly_mutations/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03100_lwu_27_update_after_on_fly_mutations/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03100_lwu_27_update_after_on_fly_mutations/query.sql b/parser/testdata/03100_lwu_27_update_after_on_fly_mutations/query.sql new file mode 100644 index 000000000..2cec2b163 --- /dev/null +++ b/parser/testdata/03100_lwu_27_update_after_on_fly_mutations/query.sql @@ -0,0 +1,23 @@ +DROP TABLE IF EXISTS t_lwu_on_fly SYNC; +SET enable_lightweight_update = 1; + +CREATE TABLE t_lwu_on_fly (id UInt64, a UInt64, b UInt64, c UInt64) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/t_lwu_on_fly', '1') ORDER BY id +SETTINGS enable_block_number_column = 1, enable_block_offset_column = 1; + +SYSTEM STOP MERGES t_lwu_on_fly; + +INSERT INTO t_lwu_on_fly (id) VALUES (1) (2) (3); + +SET apply_patch_parts = 1; +SET apply_mutations_on_fly = 1; + +UPDATE t_lwu_on_fly SET a = 2 WHERE id = 2; + +ALTER TABLE t_lwu_on_fly UPDATE b = 20 WHERE a = 2 SETTINGS mutations_sync = 0; + +UPDATE t_lwu_on_fly SET c = 200 WHERE b = 20; + +SELECT * FROM t_lwu_on_fly ORDER BY id; + +DROP TABLE t_lwu_on_fly SYNC; diff --git a/parser/testdata/03100_lwu_30_join_cache/ast.json b/parser/testdata/03100_lwu_30_join_cache/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03100_lwu_30_join_cache/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03100_lwu_30_join_cache/metadata.json b/parser/testdata/03100_lwu_30_join_cache/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03100_lwu_30_join_cache/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03100_lwu_30_join_cache/query.sql b/parser/testdata/03100_lwu_30_join_cache/query.sql new file mode 100644 index 000000000..62e3d8358 --- /dev/null +++ b/parser/testdata/03100_lwu_30_join_cache/query.sql @@ -0,0 +1,18 @@ +-- Tags: no-tsan, no-asan, no-msan, no-parallel, no-debug + +DROP TABLE IF EXISTS t_patch_join_cache; + +CREATE TABLE t_patch_join_cache (a UInt64, s String) +ENGINE = ReplicatedMergeTree('/zookeeper/{database}/t_patch_join_cache/', '1') ORDER BY a +SETTINGS enable_block_number_column = 1, enable_block_offset_column = 1; + +SET enable_lightweight_update = 1; + +INSERT INTO t_patch_join_cache SELECT number, '' FROM numbers(3000000); + +UPDATE t_patch_join_cache SET s = 'foo' WHERE 1; +OPTIMIZE TABLE t_patch_join_cache PARTITION ID 'all' FINAL SETTINGS optimize_throw_if_noop = 1; + +SELECT count() FROM t_patch_join_cache WHERE s = 'foo' SETTINGS max_threads = 8, max_memory_usage = '1Gi'; + +DROP TABLE t_patch_join_cache; diff --git a/parser/testdata/03100_lwu_31_merge_memory_usage/ast.json b/parser/testdata/03100_lwu_31_merge_memory_usage/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03100_lwu_31_merge_memory_usage/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03100_lwu_31_merge_memory_usage/metadata.json b/parser/testdata/03100_lwu_31_merge_memory_usage/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03100_lwu_31_merge_memory_usage/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03100_lwu_31_merge_memory_usage/query.sql b/parser/testdata/03100_lwu_31_merge_memory_usage/query.sql new file mode 100644 index 000000000..25f77dd0b --- /dev/null +++ b/parser/testdata/03100_lwu_31_merge_memory_usage/query.sql @@ -0,0 +1,22 @@ +-- Tags: no-tsan, no-asan, no-msan, no-debug, no-random-settings, no-replicated-database +-- memory usage can differ with sanitizers and in debug mode +-- no-replicated-database because test may fail due to adding additional shard + +SET enable_lightweight_update = 1; + +DROP TABLE IF EXISTS t_lwu_memory SYNC; + +CREATE TABLE t_lwu_memory (id UInt64, value String) +ENGINE = ReplicatedMergeTree('/zookeeper/{database}/t_lwu_memory/', '1') ORDER BY id +SETTINGS enable_block_number_column = 1, enable_block_offset_column = 1; + +INSERT INTO t_lwu_memory SELECT number, '' FROM numbers(5000000); +OPTIMIZE TABLE t_lwu_memory FINAL; + +UPDATE t_lwu_memory SET value = toString(id) WHERE 1; +OPTIMIZE TABLE t_lwu_memory PARTITION ID 'patch-193eaced72cfb2f63d65ea2798b72338-all' FINAL; + +SELECT count() FROM system.parts WHERE database = currentDatabase() AND table = 't_lwu_memory' AND active = 1; +SELECT sum(id), sum(toUInt64(value)) FROM t_lwu_memory SETTINGS max_memory_usage = '150M', max_threads = 4; + +DROP TABLE t_lwu_memory SYNC; diff --git a/parser/testdata/03100_lwu_32_on_fly_filter/ast.json b/parser/testdata/03100_lwu_32_on_fly_filter/ast.json new file mode 100644 index 000000000..3f9a385f9 --- /dev/null +++ b/parser/testdata/03100_lwu_32_on_fly_filter/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00143499, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03100_lwu_32_on_fly_filter/metadata.json b/parser/testdata/03100_lwu_32_on_fly_filter/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03100_lwu_32_on_fly_filter/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03100_lwu_32_on_fly_filter/query.sql b/parser/testdata/03100_lwu_32_on_fly_filter/query.sql new file mode 100644 index 000000000..749bce040 --- /dev/null +++ b/parser/testdata/03100_lwu_32_on_fly_filter/query.sql @@ -0,0 +1,27 @@ +SET enable_lightweight_update=1; + +DROP TABLE IF EXISTS lwu_on_fly; + +CREATE TABLE lwu_on_fly (id UInt64, u UInt64, s String) +ENGINE = MergeTree +ORDER BY id PARTITION BY id % 2 +SETTINGS + enable_block_number_column = 1, + enable_block_offset_column = 1, + apply_patches_on_merge = 1; + +SYSTEM STOP MERGES lwu_on_fly; + +INSERT INTO lwu_on_fly SELECT number, number, 'c' || number FROM numbers(10); +UPDATE lwu_on_fly SET u = 0 WHERE id % 3 = 2; + +ALTER TABLE lwu_on_fly DELETE WHERE id IN (4, 5) SETTINGS mutations_sync = 0; +UPDATE lwu_on_fly SET u = 0 WHERE id % 3 = 1; + +SELECT * FROM lwu_on_fly ORDER BY id settings apply_mutations_on_fly = 1, apply_patch_parts = 1; + +ALTER TABLE lwu_on_fly DELETE WHERE 1 SETTINGS mutations_sync = 0; + +SELECT count() FROM lwu_on_fly WHERE NOT ignore(*) SETTINGS apply_mutations_on_fly = 1, apply_patch_parts = 1; + +DROP TABLE IF EXISTS lwu_on_fly; diff --git a/parser/testdata/03100_lwu_33_add_column/ast.json b/parser/testdata/03100_lwu_33_add_column/ast.json new file mode 100644 index 000000000..fed0ce2b1 --- /dev/null +++ b/parser/testdata/03100_lwu_33_add_column/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_lwu_add_column (children 1)" + }, + { + "explain": " Identifier t_lwu_add_column" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001497993, + "rows_read": 2, + "bytes_read": 84 + } +} diff --git a/parser/testdata/03100_lwu_33_add_column/metadata.json b/parser/testdata/03100_lwu_33_add_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03100_lwu_33_add_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03100_lwu_33_add_column/query.sql b/parser/testdata/03100_lwu_33_add_column/query.sql new file mode 100644 index 000000000..bf2ac957f --- /dev/null +++ b/parser/testdata/03100_lwu_33_add_column/query.sql @@ -0,0 +1,21 @@ +DROP TABLE IF EXISTS t_lwu_add_column; +SET enable_lightweight_update = 1; + +CREATE TABLE t_lwu_add_column(a UInt64) +ENGINE = MergeTree ORDER BY tuple() +SETTINGS enable_block_number_column = 1, enable_block_offset_column = 1; + +INSERT INTO t_lwu_add_column (a) SELECT number FROM numbers(100000); + +ALTER TABLE t_lwu_add_column ADD COLUMN b UInt64; + +UPDATE t_lwu_add_column SET b = 1 WHERE a % 2 = 0; + +ALTER TABLE t_lwu_add_column ADD COLUMN c Array(String); + +UPDATE t_lwu_add_column SET b = 2, c = ['a', 'b', 'c'] WHERE a % 3 = 0; + +SELECT a % 6 AS n, sum(b), groupUniqArray(c) FROM t_lwu_add_column GROUP BY n ORDER BY n; +SELECT * FROM t_lwu_add_column ORDER BY a LIMIT 10; + +DROP TABLE IF EXISTS t_lwu_add_column; diff --git a/parser/testdata/03100_lwu_34_multistep_prewhere/ast.json b/parser/testdata/03100_lwu_34_multistep_prewhere/ast.json new file mode 100644 index 000000000..cdb2dc885 --- /dev/null +++ b/parser/testdata/03100_lwu_34_multistep_prewhere/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_lwu_multistep (children 1)" + }, + { + "explain": " Identifier t_lwu_multistep" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001285256, + "rows_read": 2, + "bytes_read": 82 + } +} diff --git a/parser/testdata/03100_lwu_34_multistep_prewhere/metadata.json b/parser/testdata/03100_lwu_34_multistep_prewhere/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03100_lwu_34_multistep_prewhere/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03100_lwu_34_multistep_prewhere/query.sql b/parser/testdata/03100_lwu_34_multistep_prewhere/query.sql new file mode 100644 index 000000000..f22e1e3f2 --- /dev/null +++ b/parser/testdata/03100_lwu_34_multistep_prewhere/query.sql @@ -0,0 +1,22 @@ +DROP TABLE IF EXISTS t_lwu_multistep; + +SET enable_multiple_prewhere_read_steps = 1; +SET enable_lightweight_update = 1; +SET move_all_conditions_to_prewhere = 1; + +CREATE TABLE t_lwu_multistep(a UInt64, b UInt64, c UInt64, d UInt64, e UInt64) +ENGINE = MergeTree ORDER BY tuple() +SETTINGS enable_block_number_column = 1, enable_block_offset_column = 1; + +INSERT INTO t_lwu_multistep SELECT number % 2, number, number, number, number % 2 FROM numbers(100000); + +UPDATE t_lwu_multistep SET a = a + 1 WHERE 1; +UPDATE t_lwu_multistep SET b = b + 1 WHERE b < 50000; +UPDATE t_lwu_multistep SET c = c + 1000000 WHERE c < 50000; + +SELECT count() FROM t_lwu_multistep WHERE a = 1 AND b > 10000 AND c < 100000; +SELECT count() FROM t_lwu_multistep WHERE a = 0 AND b > 10000 AND c < 100000; +SELECT count() FROM t_lwu_multistep WHERE a = 1 AND b > 10000 AND c > 200000; +SELECT count() FROM t_lwu_multistep WHERE a = 0 AND b > 10000 AND c > 200000; + +DROP TABLE IF EXISTS t_lwu_multistep; diff --git a/parser/testdata/03100_lwu_35_lock_profile_events/ast.json b/parser/testdata/03100_lwu_35_lock_profile_events/ast.json new file mode 100644 index 000000000..6730ab8cc --- /dev/null +++ b/parser/testdata/03100_lwu_35_lock_profile_events/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_lwu_lock_profile_events (children 1)" + }, + { + "explain": " Identifier t_lwu_lock_profile_events" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001201433, + "rows_read": 2, + "bytes_read": 102 + } +} diff --git a/parser/testdata/03100_lwu_35_lock_profile_events/metadata.json b/parser/testdata/03100_lwu_35_lock_profile_events/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03100_lwu_35_lock_profile_events/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03100_lwu_35_lock_profile_events/query.sql b/parser/testdata/03100_lwu_35_lock_profile_events/query.sql new file mode 100644 index 000000000..731232916 --- /dev/null +++ b/parser/testdata/03100_lwu_35_lock_profile_events/query.sql @@ -0,0 +1,21 @@ +DROP TABLE IF EXISTS t_lwu_lock_profile_events SYNC; + +CREATE TABLE t_lwu_lock_profile_events (id UInt64) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/t_lwu_lock_profile_events', '1') +ORDER BY id +SETTINGS enable_block_number_column = 1, enable_block_offset_column = 1; + +SET enable_lightweight_update = 1; +SET lightweight_delete_mode = 'lightweight_update_force'; + +INSERT INTO t_lwu_lock_profile_events SELECT number FROM numbers(100000); +DELETE FROM t_lwu_lock_profile_events WHERE id < 10000; + +SELECT count() FROM t_lwu_lock_profile_events; +SYSTEM FLUSH LOGS query_log; + +SELECT ProfileEvents['PatchesAcquireLockTries'], ProfileEvents['PatchesAcquireLockMicroseconds'] > 0 +FROM system.query_log +WHERE type = 'QueryFinish' AND current_database = currentDatabase() AND query LIKE '%DELETE FROM t_lwu_lock_profile_events WHERE id < 10000%'; + +DROP TABLE t_lwu_lock_profile_events SYNC; diff --git a/parser/testdata/03100_lwu_36_json_skip_indexes/ast.json b/parser/testdata/03100_lwu_36_json_skip_indexes/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03100_lwu_36_json_skip_indexes/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03100_lwu_36_json_skip_indexes/metadata.json b/parser/testdata/03100_lwu_36_json_skip_indexes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03100_lwu_36_json_skip_indexes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03100_lwu_36_json_skip_indexes/query.sql b/parser/testdata/03100_lwu_36_json_skip_indexes/query.sql new file mode 100644 index 000000000..38936956b --- /dev/null +++ b/parser/testdata/03100_lwu_36_json_skip_indexes/query.sql @@ -0,0 +1,74 @@ +-- Tags: no-parallel-replicas +-- no-parallel-replicas: the result of EXPLAIN differs with parallel replicas + +SET use_skip_indexes_on_data_read = 0; + +DROP TABLE IF EXISTS test; + +CREATE TABLE test ( + id UInt64, + document JSON(name String, age UInt16), + INDEX ix_name document.name TYPE bloom_filter(0.01) GRANULARITY 1, + INDEX ix_country document.country::String TYPE bloom_filter(0.01) GRANULARITY 1 +) +ENGINE = MergeTree() +ORDER BY (id) +SETTINGS enable_block_number_column = 1, enable_block_offset_column = 1, index_granularity = 1; + +INSERT INTO test VALUES (1, '{"name":"foo", "age":15}'); +INSERT INTO test VALUES (2, '{"name":"boo", "age":15}'); +INSERT INTO test VALUES (3, '{"name":"bar", "age":15}'); + +SET enable_lightweight_update = 1; + +UPDATE test SET document = '{"name":"aaa", "age":15, "country": "USA"}' WHERE id = 1; + +SELECT * FROM test +WHERE document.name = 'aaa' OR document.name = 'boo' +ORDER BY id +SETTINGS apply_patch_parts = 1; + +SELECT trim(explain) AS s FROM ( + EXPLAIN indexes = 1 + SELECT * FROM test + WHERE document.name = 'aaa' OR document.name = 'boo' + ORDER BY id + SETTINGS apply_patch_parts = 1 +) WHERE s LIKE 'Granules: %'; + +SELECT * FROM test +WHERE document.name = 'aaa' OR document.name = 'boo' +ORDER BY id +SETTINGS apply_patch_parts = 0; + +SELECT trim(explain) AS s FROM ( + EXPLAIN indexes = 1 + SELECT * FROM test + WHERE document.name = 'aaa' OR document.name = 'boo' + ORDER BY id + SETTINGS apply_patch_parts = 0 +) WHERE s LIKE 'Granules: %'; + +SELECT count()FROM test +WHERE document.country::String = 'USA' +SETTINGS apply_patch_parts = 1; + +SELECT trim(explain) AS s FROM ( + EXPLAIN indexes = 1 + SELECT count()FROM test + WHERE document.country::String = 'USA' + SETTINGS apply_patch_parts = 1 +) WHERE s LIKE 'Granules: %'; + +SELECT count() FROM test +WHERE document.country::String = 'USA' +SETTINGS apply_patch_parts = 0; + +SELECT trim(explain) AS s FROM ( + EXPLAIN indexes = 1 + SELECT count() FROM test + WHERE document.country::String = 'USA' + SETTINGS apply_patch_parts = 0 +) WHERE s LIKE 'Granules: %'; + +DROP TABLE IF EXISTS test; diff --git a/parser/testdata/03100_lwu_37_update_all_columns/ast.json b/parser/testdata/03100_lwu_37_update_all_columns/ast.json new file mode 100644 index 000000000..30679d7a2 --- /dev/null +++ b/parser/testdata/03100_lwu_37_update_all_columns/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t0 (children 1)" + }, + { + "explain": " Identifier t0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001387995, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03100_lwu_37_update_all_columns/metadata.json b/parser/testdata/03100_lwu_37_update_all_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03100_lwu_37_update_all_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03100_lwu_37_update_all_columns/query.sql b/parser/testdata/03100_lwu_37_update_all_columns/query.sql new file mode 100644 index 000000000..fb0bed97f --- /dev/null +++ b/parser/testdata/03100_lwu_37_update_all_columns/query.sql @@ -0,0 +1,10 @@ +DROP TABLE IF EXISTS t0; +CREATE TABLE t0 (c0 Int) ENGINE = MergeTree() ORDER BY tuple() SETTINGS enable_block_number_column = 1, enable_block_offset_column = 1; + +INSERT INTO TABLE t0 (c0) VALUES (0); + +SET enable_lightweight_update = 1; +UPDATE t0 SET c0 = 1 WHERE 1; +SELECT c0 FROM t0 ORDER BY c0; + +DROP TABLE IF EXISTS t0; diff --git a/parser/testdata/03100_lwu_38_replacing/ast.json b/parser/testdata/03100_lwu_38_replacing/ast.json new file mode 100644 index 000000000..263892984 --- /dev/null +++ b/parser/testdata/03100_lwu_38_replacing/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_lwu_replacing (children 1)" + }, + { + "explain": " Identifier t_lwu_replacing" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001626346, + "rows_read": 2, + "bytes_read": 82 + } +} diff --git a/parser/testdata/03100_lwu_38_replacing/metadata.json b/parser/testdata/03100_lwu_38_replacing/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03100_lwu_38_replacing/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03100_lwu_38_replacing/query.sql b/parser/testdata/03100_lwu_38_replacing/query.sql new file mode 100644 index 000000000..777fc8c1f --- /dev/null +++ b/parser/testdata/03100_lwu_38_replacing/query.sql @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS t_lwu_replacing; + +CREATE TABLE t_lwu_replacing (id UInt64, value String, timestamp DateTime) +ENGINE = ReplacingMergeTree(timestamp) +ORDER BY id SETTINGS enable_block_number_column = 1, enable_block_offset_column = 1; + +SYSTEM STOP MERGES t_lwu_replacing; + +INSERT INTO t_lwu_replacing SELECT number, 'v' || toString(number), now() FROM numbers(10); +INSERT INTO t_lwu_replacing SELECT number, 'v' || toString(number), now() FROM numbers(5, 10); + +SET apply_patch_parts = 1; +SET enable_lightweight_update = 1; +SET lightweight_delete_mode = 'lightweight_update_force'; + +DELETE FROM t_lwu_replacing WHERE id % 2 = 0; +SELECT id, value FROM t_lwu_replacing FINAL ORDER BY id; diff --git a/parser/testdata/03100_lwu_39_after_replace_partition/ast.json b/parser/testdata/03100_lwu_39_after_replace_partition/ast.json new file mode 100644 index 000000000..0ac561b15 --- /dev/null +++ b/parser/testdata/03100_lwu_39_after_replace_partition/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_lwu_replace (children 1)" + }, + { + "explain": " Identifier t_lwu_replace" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00136841, + "rows_read": 2, + "bytes_read": 78 + } +} diff --git a/parser/testdata/03100_lwu_39_after_replace_partition/metadata.json b/parser/testdata/03100_lwu_39_after_replace_partition/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03100_lwu_39_after_replace_partition/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03100_lwu_39_after_replace_partition/query.sql b/parser/testdata/03100_lwu_39_after_replace_partition/query.sql new file mode 100644 index 000000000..b474f039b --- /dev/null +++ b/parser/testdata/03100_lwu_39_after_replace_partition/query.sql @@ -0,0 +1,25 @@ +DROP TABLE IF EXISTS t_lwu_replace; + +SET enable_lightweight_update = 1; + +CREATE TABLE t_lwu_replace (c0 Int) +ENGINE = MergeTree ORDER BY tuple() +SETTINGS enable_block_number_column = 1, enable_block_offset_column = 1; + +INSERT INTO TABLE t_lwu_replace (c0) VALUES (1); +ALTER TABLE t_lwu_replace REPLACE PARTITION ID '0' FROM t_lwu_replace; +UPDATE t_lwu_replace SET c0 = 2 WHERE TRUE; + +SELECT * FROM t_lwu_replace ORDER BY c0; +DROP TABLE IF EXISTS t_lwu_replace; + +CREATE TABLE t_lwu_replace (c0 Int) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/t_lwu_replace', '1') ORDER BY tuple() +SETTINGS enable_block_number_column = 1, enable_block_offset_column = 1; + +INSERT INTO TABLE t_lwu_replace (c0) VALUES (1); +ALTER TABLE t_lwu_replace REPLACE PARTITION ID '0' FROM t_lwu_replace; +UPDATE t_lwu_replace SET c0 = 2 WHERE TRUE; + +SELECT * FROM t_lwu_replace ORDER BY c0; +DROP TABLE IF EXISTS t_lwu_replace; diff --git a/parser/testdata/03100_lwu_41_bytes_limits/ast.json b/parser/testdata/03100_lwu_41_bytes_limits/ast.json new file mode 100644 index 000000000..6a99eeaa7 --- /dev/null +++ b/parser/testdata/03100_lwu_41_bytes_limits/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_lwu_bytes_limits (children 1)" + }, + { + "explain": " Identifier t_lwu_bytes_limits" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001567075, + "rows_read": 2, + "bytes_read": 88 + } +} diff --git a/parser/testdata/03100_lwu_41_bytes_limits/metadata.json b/parser/testdata/03100_lwu_41_bytes_limits/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03100_lwu_41_bytes_limits/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03100_lwu_41_bytes_limits/query.sql b/parser/testdata/03100_lwu_41_bytes_limits/query.sql new file mode 100644 index 000000000..809ff3fb9 --- /dev/null +++ b/parser/testdata/03100_lwu_41_bytes_limits/query.sql @@ -0,0 +1,33 @@ +DROP TABLE IF EXISTS t_lwu_bytes_limits; + +CREATE TABLE t_lwu_bytes_limits (id UInt64, s String) +ENGINE = MergeTree ORDER BY id +SETTINGS enable_block_number_column = 1, enable_block_offset_column = 1, max_uncompressed_bytes_in_patches = '100Ki'; + +SET enable_lightweight_update = 1; + +INSERT INTO t_lwu_bytes_limits SELECT number, randomPrintableASCII(10) FROM numbers(1000000); + +UPDATE t_lwu_bytes_limits SET s = 'foo' WHERE id = 1000; +UPDATE t_lwu_bytes_limits SET s = 'foo' WHERE id = 101000; +UPDATE t_lwu_bytes_limits SET s = randomPrintableASCII(100) WHERE 1; -- { serverError TOO_LARGE_LIGHTWEIGHT_UPDATES } + +SELECT id FROM t_lwu_bytes_limits WHERE s = 'foo' ORDER BY id; + +DROP TABLE t_lwu_bytes_limits; + +CREATE TABLE t_lwu_bytes_limits (id UInt64, s String) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/03100_lwu_41_bytes_limits', '1') ORDER BY id +SETTINGS enable_block_number_column = 1, enable_block_offset_column = 1, max_uncompressed_bytes_in_patches = '100Ki'; + +SET enable_lightweight_update = 1; + +INSERT INTO t_lwu_bytes_limits SELECT number, randomPrintableASCII(10) FROM numbers(1000000); + +UPDATE t_lwu_bytes_limits SET s = 'foo' WHERE id = 1000; +UPDATE t_lwu_bytes_limits SET s = 'foo' WHERE id = 101000; +UPDATE t_lwu_bytes_limits SET s = randomPrintableASCII(100) WHERE 1; -- { serverError TOO_LARGE_LIGHTWEIGHT_UPDATES } + +SELECT id FROM t_lwu_bytes_limits WHERE s = 'foo' ORDER BY id; + +DROP TABLE t_lwu_bytes_limits; diff --git a/parser/testdata/03100_lwu_43_subquery_from_rmt/ast.json b/parser/testdata/03100_lwu_43_subquery_from_rmt/ast.json new file mode 100644 index 000000000..acd8930f5 --- /dev/null +++ b/parser/testdata/03100_lwu_43_subquery_from_rmt/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery lightweight_test (children 1)" + }, + { + "explain": " Identifier lightweight_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001405736, + "rows_read": 2, + "bytes_read": 84 + } +} diff --git a/parser/testdata/03100_lwu_43_subquery_from_rmt/metadata.json b/parser/testdata/03100_lwu_43_subquery_from_rmt/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03100_lwu_43_subquery_from_rmt/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03100_lwu_43_subquery_from_rmt/query.sql b/parser/testdata/03100_lwu_43_subquery_from_rmt/query.sql new file mode 100644 index 000000000..04502e8a0 --- /dev/null +++ b/parser/testdata/03100_lwu_43_subquery_from_rmt/query.sql @@ -0,0 +1,35 @@ +DROP TABLE IF EXISTS lightweight_test SYNC; +DROP TABLE IF EXISTS keys SYNC; + +CREATE TABLE lightweight_test +( + ts DateTime, + value String, + key String +) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/lightweight_test', '1') +PARTITION BY toYYYYMMDD(ts) +ORDER BY (key) +SETTINGS enable_block_number_column = 1, enable_block_offset_column = 1; + +CREATE TABLE keys +( + key String +) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/keys', '1') +ORDER BY (key); + +INSERT INTO lightweight_test VALUES (now(), 'val', 'key'); + +INSERT INTO keys VALUES ('key'); + +SELECT key, value FROM lightweight_test ORDER BY key; + +UPDATE lightweight_test +SET value = 'UPDATED-1' +WHERE key IN (SELECT key FROM keys); + +SELECT key, value FROM lightweight_test ORDER BY key; + +DROP TABLE IF EXISTS lightweight_test SYNC; +DROP TABLE IF EXISTS keys SYNC; diff --git a/parser/testdata/03100_lwu_44_missing_default/ast.json b/parser/testdata/03100_lwu_44_missing_default/ast.json new file mode 100644 index 000000000..9c5fbb735 --- /dev/null +++ b/parser/testdata/03100_lwu_44_missing_default/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_lwu_defaults (children 1)" + }, + { + "explain": " Identifier t_lwu_defaults" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001614022, + "rows_read": 2, + "bytes_read": 80 + } +} diff --git a/parser/testdata/03100_lwu_44_missing_default/metadata.json b/parser/testdata/03100_lwu_44_missing_default/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03100_lwu_44_missing_default/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03100_lwu_44_missing_default/query.sql b/parser/testdata/03100_lwu_44_missing_default/query.sql new file mode 100644 index 000000000..abdc59d7c --- /dev/null +++ b/parser/testdata/03100_lwu_44_missing_default/query.sql @@ -0,0 +1,61 @@ +DROP TABLE IF EXISTS t_lwu_defaults; +DROP TABLE IF EXISTS t_mutation_defaults; + +CREATE TABLE t_lwu_defaults +( + x UInt32, + y UInt32 +) +ENGINE = MergeTree ORDER BY x +SETTINGS enable_block_offset_column = 1, enable_block_number_column = 1; + +CREATE TABLE t_mutation_defaults +( + x UInt32, + y UInt32 +) +ENGINE = MergeTree ORDER BY x; + +INSERT INTO t_lwu_defaults (x, y) SELECT (number + 1) AS x, (x % 1000) AS y FROM numbers(9999); +INSERT INTO t_mutation_defaults (x, y) SELECT (number + 1) AS x, (x % 1000) AS y FROM numbers(9999); + +ALTER TABLE t_lwu_defaults ADD COLUMN z UInt32 DEFAULT 0 AFTER y; +ALTER TABLE t_mutation_defaults ADD COLUMN z UInt32 DEFAULT 0 AFTER y; + +UPDATE t_lwu_defaults SET z = y WHERE x > 0; +ALTER TABLE t_mutation_defaults UPDATE z = y WHERE x > 0 SETTINGS mutations_sync = 2; + +SELECT intDiv(z, 100) AS a, COUNT() AS b FROM t_lwu_defaults GROUP BY a ORDER BY a LIMIT 10; +SELECT intDiv(z, 100) AS a, COUNT() AS b FROM t_mutation_defaults GROUP BY a ORDER BY a LIMIT 10; + +DROP TABLE IF EXISTS t_lwu_defaults; +DROP TABLE IF EXISTS t_mutation_defaults; + +CREATE TABLE t_lwu_defaults +( + x UInt32, + y UInt32 +) +ENGINE = MergeTree ORDER BY x +SETTINGS enable_block_offset_column = 1, enable_block_number_column = 1; + +CREATE TABLE t_mutation_defaults +( + x UInt32, + y UInt32 +) +ENGINE = MergeTree ORDER BY x; + +INSERT INTO t_lwu_defaults (x, y) SELECT (number + 1) AS x, (x % 1000) AS y FROM numbers(9999); +INSERT INTO t_mutation_defaults (x, y) SELECT (number + 1) AS x, (x % 1000) AS y FROM numbers(9999); + +ALTER TABLE t_lwu_defaults ADD COLUMN z UInt32 DEFAULT y + 1000 AFTER y; +ALTER TABLE t_mutation_defaults ADD COLUMN z UInt32 DEFAULT y + 1000 AFTER y; + +UPDATE t_lwu_defaults SET y = y + 10000 WHERE x > 0; +ALTER TABLE t_mutation_defaults UPDATE y = y + 10000 WHERE x > 0 SETTINGS mutations_sync = 2; + +SELECT intDiv(z, 100) AS a, COUNT() AS b FROM t_lwu_defaults GROUP BY a ORDER BY a LIMIT 10; +SELECT intDiv(z, 100) AS a, COUNT() AS b FROM t_mutation_defaults GROUP BY a ORDER BY a LIMIT 10; + +DROP TABLE IF EXISTS t_lwu_defaults; diff --git a/parser/testdata/03100_lwu_45_query_condition_cache/ast.json b/parser/testdata/03100_lwu_45_query_condition_cache/ast.json new file mode 100644 index 000000000..b246aa9cb --- /dev/null +++ b/parser/testdata/03100_lwu_45_query_condition_cache/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_lwu_condition_cache (children 1)" + }, + { + "explain": " Identifier t_lwu_condition_cache" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00123086, + "rows_read": 2, + "bytes_read": 94 + } +} diff --git a/parser/testdata/03100_lwu_45_query_condition_cache/metadata.json b/parser/testdata/03100_lwu_45_query_condition_cache/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03100_lwu_45_query_condition_cache/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03100_lwu_45_query_condition_cache/query.sql b/parser/testdata/03100_lwu_45_query_condition_cache/query.sql new file mode 100644 index 000000000..f1db5ce9a --- /dev/null +++ b/parser/testdata/03100_lwu_45_query_condition_cache/query.sql @@ -0,0 +1,23 @@ +DROP TABLE IF EXISTS t_lwu_condition_cache; + +SET use_query_condition_cache = 1; +SET enable_lightweight_update = 1; +SET apply_patch_parts = 1; + +CREATE TABLE t_lwu_condition_cache +( + id UInt64 DEFAULT generateSnowflakeID(), + exists UInt8 +) +ENGINE = MergeTree ORDER BY id +SETTINGS index_granularity = 8192, enable_block_number_column = 1, enable_block_offset_column = 1; + +INSERT INTO t_lwu_condition_cache (exists) SELECT 0 FROM numbers(100000); + +SELECT count() FROM t_lwu_condition_cache WHERE exists; + +UPDATE t_lwu_condition_cache SET exists = 1 WHERE 1; + +SELECT count() FROM t_lwu_condition_cache WHERE exists; + +DROP TABLE IF EXISTS t_lwu_condition_cache; diff --git a/parser/testdata/03100_lwu_46_deletes_skip_indexes/ast.json b/parser/testdata/03100_lwu_46_deletes_skip_indexes/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03100_lwu_46_deletes_skip_indexes/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03100_lwu_46_deletes_skip_indexes/metadata.json b/parser/testdata/03100_lwu_46_deletes_skip_indexes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03100_lwu_46_deletes_skip_indexes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03100_lwu_46_deletes_skip_indexes/query.sql b/parser/testdata/03100_lwu_46_deletes_skip_indexes/query.sql new file mode 100644 index 000000000..e541a55e6 --- /dev/null +++ b/parser/testdata/03100_lwu_46_deletes_skip_indexes/query.sql @@ -0,0 +1,55 @@ +-- Tags: no-parallel-replicas + +DROP TABLE IF EXISTS t_lwd_indexes; + +SET enable_lightweight_update = 1; +SET use_skip_indexes_on_data_read = 0; + +CREATE TABLE t_lwd_indexes +( + key UInt64, + value String, + INDEX idx_key (key) TYPE minmax GRANULARITY 1, + INDEX idx_value (value) TYPE bloom_filter(0.001) GRANULARITY 1 +) +ENGINE = MergeTree ORDER BY tuple() +SETTINGS index_granularity = 128, index_granularity_bytes = '10M', min_bytes_for_wide_part = 0, enable_block_number_column = 1, enable_block_offset_column = 1; + +INSERT INTO t_lwd_indexes SELECT number, 'v' || toString(number) FROM numbers(10000); + +DELETE FROM t_lwd_indexes WHERE key < 500 SETTINGS lightweight_delete_mode = 'alter_update'; +DELETE FROM t_lwd_indexes WHERE key < 5000 SETTINGS lightweight_delete_mode = 'lightweight_update_force'; + +SELECT count() FROM t_lwd_indexes WHERE key = 1000 SETTINGS force_data_skipping_indices = 'idx_key'; + +SELECT trim(explain) FROM +( + EXPLAIN indexes = 1 SELECT count() FROM t_lwd_indexes WHERE key = 1000 SETTINGS force_data_skipping_indices = 'idx_key' +) +WHERE explain LIKE '%Granules%'; + +SELECT count() FROM t_lwd_indexes WHERE key = 9000 SETTINGS force_data_skipping_indices = 'idx_key'; + +SELECT trim(explain) FROM +( + EXPLAIN indexes = 1 SELECT count() FROM t_lwd_indexes WHERE key = 9000 SETTINGS force_data_skipping_indices = 'idx_key' +) +WHERE explain LIKE '%Granules%'; + +SELECT count() FROM t_lwd_indexes WHERE value = 'v1000' SETTINGS force_data_skipping_indices = 'idx_value'; + +SELECT trim(explain) FROM +( + EXPLAIN indexes = 1 SELECT count() FROM t_lwd_indexes WHERE value = 'v1000' SETTINGS force_data_skipping_indices = 'idx_value' +) +WHERE explain LIKE '%Granules%'; + +SELECT count() FROM t_lwd_indexes WHERE value = 'v9000' SETTINGS force_data_skipping_indices = 'idx_value'; + +SELECT trim(explain) FROM +( + EXPLAIN indexes = 1 SELECT count() FROM t_lwd_indexes WHERE value = 'v9000' SETTINGS force_data_skipping_indices = 'idx_value' +) +WHERE explain LIKE '%Granules%'; + +DROP TABLE t_lwd_indexes; diff --git a/parser/testdata/03100_lwu_deletes_1/ast.json b/parser/testdata/03100_lwu_deletes_1/ast.json new file mode 100644 index 000000000..83c46da2c --- /dev/null +++ b/parser/testdata/03100_lwu_deletes_1/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_lwu_delete (children 1)" + }, + { + "explain": " Identifier t_lwu_delete" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001535492, + "rows_read": 2, + "bytes_read": 76 + } +} diff --git a/parser/testdata/03100_lwu_deletes_1/metadata.json b/parser/testdata/03100_lwu_deletes_1/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03100_lwu_deletes_1/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03100_lwu_deletes_1/query.sql b/parser/testdata/03100_lwu_deletes_1/query.sql new file mode 100644 index 000000000..0c9524cb0 --- /dev/null +++ b/parser/testdata/03100_lwu_deletes_1/query.sql @@ -0,0 +1,29 @@ +DROP TABLE IF EXISTS t_lwu_delete; + +CREATE TABLE t_lwu_delete (id UInt64, v UInt64) ENGINE = MergeTree ORDER BY id SETTINGS enable_block_number_column = 1, enable_block_offset_column = 1; +SYSTEM STOP MERGES t_lwu_delete; + +INSERT INTO t_lwu_delete SELECT number, number FROM numbers(10000); + +SET enable_lightweight_update = 1; +SET lightweight_delete_mode = 'lightweight_update_force'; + +SELECT sum(v) FROM t_lwu_delete; +SELECT count() FROM t_lwu_delete; + +DELETE FROM t_lwu_delete WHERE id % 4 = 1; + +SELECT sum(v) FROM t_lwu_delete; +SELECT count() FROM t_lwu_delete; + +UPDATE t_lwu_delete SET v = v + 1000 WHERE id % 10 = 0; + +SELECT sum(v) FROM t_lwu_delete; +SELECT count() FROM t_lwu_delete; + +DELETE FROM t_lwu_delete WHERE id % 2 = 0; + +SELECT sum(v) FROM t_lwu_delete; +SELECT count() FROM t_lwu_delete; + +DROP TABLE IF EXISTS t_lwu_delete; diff --git a/parser/testdata/03100_lwu_deletes_2/ast.json b/parser/testdata/03100_lwu_deletes_2/ast.json new file mode 100644 index 000000000..60e9c15be --- /dev/null +++ b/parser/testdata/03100_lwu_deletes_2/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery lwd_test (children 1)" + }, + { + "explain": " Identifier lwd_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001180323, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/03100_lwu_deletes_2/metadata.json b/parser/testdata/03100_lwu_deletes_2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03100_lwu_deletes_2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03100_lwu_deletes_2/query.sql b/parser/testdata/03100_lwu_deletes_2/query.sql new file mode 100644 index 000000000..ff525195e --- /dev/null +++ b/parser/testdata/03100_lwu_deletes_2/query.sql @@ -0,0 +1,67 @@ +DROP TABLE IF EXISTS lwd_test; + +SET enable_lightweight_update = 1; +SET lightweight_delete_mode = 'lightweight_update_force'; + +CREATE TABLE lwd_test (id UInt64 , value String) ENGINE MergeTree() ORDER BY id +SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi', enable_block_number_column = 1, enable_block_offset_column = 1; + +INSERT INTO lwd_test SELECT number, randomString(10) FROM system.numbers LIMIT 1000000; + +SET mutations_sync = 2; + +SELECT 'Count', count() FROM lwd_test; +SELECT 'First row', id, length(value) FROM lwd_test ORDER BY id LIMIT 1; + +SELECT 'Delete 100K rows using lightweight DELETE'; +DELETE FROM lwd_test WHERE id < 100000; + +SELECT 'Count', count() FROM lwd_test; +SELECT 'First row', id, length(value) FROM lwd_test ORDER BY id LIMIT 1; + +SELECT 'Force merge to cleanup deleted rows'; +OPTIMIZE TABLE lwd_test FINAL; + +SELECT 'Count', count() FROM lwd_test; +SELECT 'First row', id, length(value) FROM lwd_test ORDER BY id LIMIT 1; + +SELECT 'Delete 100K more rows using lightweight DELETE'; +DELETE FROM lwd_test WHERE id < 200000; + +SELECT 'Count', count() FROM lwd_test; +SELECT 'First row', id, length(value) FROM lwd_test ORDER BY id LIMIT 1; + +SELECT 'Do UPDATE mutation'; +ALTER TABLE lwd_test UPDATE value = 'v' WHERE id % 2 == 0; + +SELECT 'Count', count() FROM lwd_test; +SELECT 'First row', id, length(value) FROM lwd_test ORDER BY id LIMIT 1; + +SELECT 'Force merge to cleanup deleted rows'; +OPTIMIZE TABLE lwd_test FINAL; + +SELECT 'Count', count() FROM lwd_test; +SELECT 'First row', id, length(value) FROM lwd_test ORDER BY id LIMIT 1; + +SELECT 'Delete 100K more rows using lightweight DELETE'; +DELETE FROM lwd_test WHERE id < 300000; + +SELECT 'Count', count() FROM lwd_test; +SELECT 'First row', id, length(value) FROM lwd_test ORDER BY id LIMIT 1; + +SELECT 'Do ALTER DELETE mutation that does a "heavyweight" delete'; +ALTER TABLE lwd_test DELETE WHERE id % 3 == 0; + +SELECT 'Count', count() FROM lwd_test; +SELECT 'First row', id, length(value) FROM lwd_test ORDER BY id LIMIT 1; + +SELECT 'Delete 100K more rows using lightweight DELETE'; +DELETE FROM lwd_test WHERE id >= 300000 and id < 400000; + +SELECT 'Force merge to cleanup deleted rows'; +OPTIMIZE TABLE lwd_test FINAL; + +SELECT 'Count', count() FROM lwd_test; +SELECT 'First row', id, length(value) FROM lwd_test ORDER BY id LIMIT 1; + +DROP TABLE lwd_test; diff --git a/parser/testdata/03100_lwu_deletes_3/ast.json b/parser/testdata/03100_lwu_deletes_3/ast.json new file mode 100644 index 000000000..16768af7a --- /dev/null +++ b/parser/testdata/03100_lwu_deletes_3/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_lwu_deletes_3 (children 1)" + }, + { + "explain": " Identifier t_lwu_deletes_3" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001487657, + "rows_read": 2, + "bytes_read": 82 + } +} diff --git a/parser/testdata/03100_lwu_deletes_3/metadata.json b/parser/testdata/03100_lwu_deletes_3/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03100_lwu_deletes_3/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03100_lwu_deletes_3/query.sql b/parser/testdata/03100_lwu_deletes_3/query.sql new file mode 100644 index 000000000..e19421c0c --- /dev/null +++ b/parser/testdata/03100_lwu_deletes_3/query.sql @@ -0,0 +1,55 @@ +DROP TABLE IF EXISTS t_lwu_deletes_3 SYNC; + +CREATE TABLE t_lwu_deletes_3 (id UInt64, dt Date, v1 UInt64, v2 String) +ENGINE = ReplicatedMergeTree('/zookeeper/{database}/t_lwu_deletes_3/', '1') +ORDER BY (id, dt) +SETTINGS + enable_block_number_column = 1, + enable_block_offset_column = 1; + +SET apply_patch_parts = 1; +SET enable_lightweight_update = 1; +SET lightweight_delete_mode = 'lightweight_update_force'; + +SYSTEM STOP MERGES t_lwu_deletes_3; + +INSERT INTO t_lwu_deletes_3 SELECT number % 10000, toDate('2024-10-10'), 0, '' FROM numbers(100000); +INSERT INTO t_lwu_deletes_3 SELECT number % 10000, toDate('2024-11-11'), 0, '' FROM numbers(100000); +INSERT INTO t_lwu_deletes_3 SELECT number % 10000, toDate('2024-12-12'), 0, '' FROM numbers(100000); + +UPDATE t_lwu_deletes_3 SET v1 = 42 WHERE id = 100; +UPDATE t_lwu_deletes_3 SET v1 = 42 WHERE id = 4000; +UPDATE t_lwu_deletes_3 SET v2 = 'foo' WHERE id >= 9500; + +DELETE FROM t_lwu_deletes_3 WHERE id = 200; +DELETE FROM t_lwu_deletes_3 WHERE dt = toDate('2024-11-11') AND id >= 4000 AND id < 5000; +DELETE FROM t_lwu_deletes_3 WHERE dt = toDate('2024-11-11') AND id >= 3500 AND id < 4500; +DELETE FROM t_lwu_deletes_3 WHERE notEmpty(v2); + +SELECT 'reference'; +SELECT 300000 - 10 * 3 - 1500 * 10 - 500 * 10 * 3 , 42 * 10 * 5, 0; + +SELECT 'before merge'; +SELECT count(), sum(v1), sum(notEmpty(v2)) FROM t_lwu_deletes_3; + +SELECT count(), uniqExact(partition_id), sum(rows) +FROM system.parts_columns +WHERE database = currentDatabase() AND table = 't_lwu_deletes_3' AND column = '_row_exists' AND active AND startsWith(name, 'patch'); + +SYSTEM START MERGES t_lwu_deletes_3; +OPTIMIZE TABLE t_lwu_deletes_3 PARTITION ID 'patch-f18f7271629a324b0d26b6ad0b83a6c2-all' FINAL SETTINGS optimize_throw_if_noop = 1; + +SELECT 'after merge patch'; +SELECT count(), sum(v1), sum(notEmpty(v2)) FROM t_lwu_deletes_3; + +SELECT count(), uniqExact(partition_id), sum(rows) +FROM system.parts_columns +WHERE database = currentDatabase() AND table = 't_lwu_deletes_3' AND column = '_row_exists' AND active AND startsWith(name, 'patch'); + +OPTIMIZE TABLE t_lwu_deletes_3 PARTITION ID 'all' FINAL SETTINGS optimize_throw_if_noop = 1; + +SELECT 'after merge main'; +SELECT count(), sum(v1), sum(notEmpty(v2)) FROM t_lwu_deletes_3 SETTINGS apply_patch_parts = 0; +SELECT sum(rows) FROM system.parts WHERE database = currentDatabase() AND table = 't_lwu_deletes_3' AND NOT startsWith(name, 'patch') AND active; + +DROP TABLE t_lwu_deletes_3 SYNC; diff --git a/parser/testdata/03100_lwu_deletes_4_index/ast.json b/parser/testdata/03100_lwu_deletes_4_index/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03100_lwu_deletes_4_index/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03100_lwu_deletes_4_index/metadata.json b/parser/testdata/03100_lwu_deletes_4_index/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03100_lwu_deletes_4_index/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03100_lwu_deletes_4_index/query.sql b/parser/testdata/03100_lwu_deletes_4_index/query.sql new file mode 100644 index 000000000..16bd8d428 --- /dev/null +++ b/parser/testdata/03100_lwu_deletes_4_index/query.sql @@ -0,0 +1,25 @@ +-- Tags: no-replicated-database +-- no-replicated-database: read_rows in query_log differs because of replicated database. + +DROP TABLE IF EXISTS t_lwd_index SYNC; + +CREATE TABLE t_lwd_index (id UInt64) +ENGINE = ReplicatedMergeTree('/zookeeper/{database}/t_lwd_index/', '1') +ORDER BY id +SETTINGS index_granularity = 1, enable_block_number_column = 1, enable_block_offset_column = 1; + +INSERT INTO t_lwd_index SELECT * FROM numbers(1000); + +SET enable_lightweight_update = 1; +SET lightweight_delete_mode = 'lightweight_update_force'; + +DELETE FROM t_lwd_index WHERE id = 200; +DELETE FROM t_lwd_index WHERE id IN (100, 110, 120, 130); + +SYSTEM FLUSH LOGS query_log; + +SELECT read_rows FROM system.query_log +WHERE type = 'QueryFinish' AND query like 'DELETE FROM t_lwd_index%' AND current_database = currentDatabase() +ORDER BY event_time_microseconds; + +DROP TABLE t_lwd_index; diff --git a/parser/testdata/03100_lwu_deletes_5_vertical_merge/ast.json b/parser/testdata/03100_lwu_deletes_5_vertical_merge/ast.json new file mode 100644 index 000000000..16e5785de --- /dev/null +++ b/parser/testdata/03100_lwu_deletes_5_vertical_merge/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_lwu_deletes_vertical (children 1)" + }, + { + "explain": " Identifier t_lwu_deletes_vertical" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001034047, + "rows_read": 2, + "bytes_read": 96 + } +} diff --git a/parser/testdata/03100_lwu_deletes_5_vertical_merge/metadata.json b/parser/testdata/03100_lwu_deletes_5_vertical_merge/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03100_lwu_deletes_5_vertical_merge/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03100_lwu_deletes_5_vertical_merge/query.sql b/parser/testdata/03100_lwu_deletes_5_vertical_merge/query.sql new file mode 100644 index 000000000..e4cfeac8a --- /dev/null +++ b/parser/testdata/03100_lwu_deletes_5_vertical_merge/query.sql @@ -0,0 +1,51 @@ +DROP TABLE IF EXISTS t_lwu_deletes_vertical; + +CREATE TABLE t_lwu_deletes_vertical +( + id UInt64, + c1 UInt64, + c2 UInt64, + c3 String, + c4 String +) +ENGINE = MergeTree +ORDER BY id +SETTINGS + min_bytes_for_wide_part = 0, + enable_block_number_column = 1, + enable_block_offset_column = 1, + vertical_merge_algorithm_min_rows_to_activate = 1, + vertical_merge_algorithm_min_columns_to_activate = 1, + vertical_merge_optimize_lightweight_delete = 1; + +INSERT INTO t_lwu_deletes_vertical SELECT number, rand(), rand(), randomPrintableASCII(10), randomPrintableASCII(10) FROM numbers(100000); + +SET enable_lightweight_update = 1; +SET lightweight_delete_mode = 'lightweight_update_force'; + +DELETE FROM t_lwu_deletes_vertical WHERE id % 4 = 0; +SELECT count() FROM t_lwu_deletes_vertical; + +OPTIMIZE TABLE t_lwu_deletes_vertical FINAL; +SELECT count() FROM t_lwu_deletes_vertical; +SELECT count() FROM system.parts_columns WHERE database = currentDatabase() AND table = 't_lwu_deletes_vertical' AND active AND partition_id = 'all' AND column = '_row_exists'; + +DELETE FROM t_lwu_deletes_vertical WHERE 1; +SELECT count() FROM t_lwu_deletes_vertical; + +OPTIMIZE TABLE t_lwu_deletes_vertical FINAL; +SELECT count() FROM t_lwu_deletes_vertical; +SELECT count() FROM system.parts_columns WHERE database = currentDatabase() AND table = 't_lwu_deletes_vertical' AND active AND partition_id = 'all' AND column = '_row_exists'; + +SYSTEM FLUSH LOGS part_log; + +SELECT + merge_algorithm, + read_rows, + rows, + ProfileEvents['ReadTasksWithAppliedPatches'], + ProfileEvents['PatchesReadRows'] +FROM system.part_log WHERE database = currentDatabase() AND table = 't_lwu_deletes_vertical' AND event_type = 'MergeParts' +ORDER BY event_time_microseconds; + +DROP TABLE IF EXISTS t_lwu_deletes_vertical; diff --git a/parser/testdata/03101_analyzer_identifiers_1/ast.json b/parser/testdata/03101_analyzer_identifiers_1/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03101_analyzer_identifiers_1/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03101_analyzer_identifiers_1/metadata.json b/parser/testdata/03101_analyzer_identifiers_1/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03101_analyzer_identifiers_1/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03101_analyzer_identifiers_1/query.sql b/parser/testdata/03101_analyzer_identifiers_1/query.sql new file mode 100644 index 000000000..499f712e5 --- /dev/null +++ b/parser/testdata/03101_analyzer_identifiers_1/query.sql @@ -0,0 +1,53 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/23194 +-- This test add query-templates for fuzzer +SET enable_analyzer = 1; + +DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}; +CREATE DATABASE {CLICKHOUSE_DATABASE:Identifier}; +USE {CLICKHOUSE_DATABASE:Identifier}; + +CREATE TABLE table ( + column UInt64, + nest Nested + ( + key Nested ( + subkey UInt16 + ) + ) +) ENGINE = Memory(); + + +SELECT t.column FROM table AS t; + +USE default; +SELECT column FROM {CLICKHOUSE_DATABASE:Identifier}.table; +USE {CLICKHOUSE_DATABASE:Identifier}; + + +SELECT {CLICKHOUSE_DATABASE:Identifier}.table.column FROM table; + +-- + +SELECT t1.x, t2.x, y FROM + (SELECT x, y FROM VALUES ('x UInt16, y UInt16', (0,1))) AS t1, + (SELECT x, z FROM VALUES ('x UInt16, z UInt16', (2,3))) AS t2; + +SELECT '---'; + +SELECT 1; +SELECT dummy; +SELECT one.dummy; +SELECT system.one.dummy; + +SELECT *; + +-- + +SELECT nest.key.subkey FROM table; +SELECT table.nest FROM table ARRAY JOIN nest; + +SELECT '---'; + +SELECT * FROM (SELECT [1, 2, 3] AS arr) ARRAY JOIN arr; + +SELECT * FROM table ARRAY JOIN [1, 2, 3] AS arr; diff --git a/parser/testdata/03101_analyzer_identifiers_2/ast.json b/parser/testdata/03101_analyzer_identifiers_2/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03101_analyzer_identifiers_2/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03101_analyzer_identifiers_2/metadata.json b/parser/testdata/03101_analyzer_identifiers_2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03101_analyzer_identifiers_2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03101_analyzer_identifiers_2/query.sql b/parser/testdata/03101_analyzer_identifiers_2/query.sql new file mode 100644 index 000000000..92c3e9826 --- /dev/null +++ b/parser/testdata/03101_analyzer_identifiers_2/query.sql @@ -0,0 +1,85 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/23194 +SET enable_analyzer = 1; + +CREATE TEMPORARY TABLE test1 (a String, nest Nested(x String, y String)); + +SELECT a, nest.* FROM test1 ARRAY JOIN nest; +SELECT a, n.* FROM test1 ARRAY JOIN nest AS n; + +CREATE TEMPORARY TABLE test2 (a String, nest Array(Tuple(x String, y String))); + +SELECT a, nest.* FROM test2 ARRAY JOIN nest; +SELECT a, n.* FROM test2 ARRAY JOIN nest AS n; + + +SELECT 1 AS x, x, x + 1; +SELECT x, x + 1, 1 AS x; +SELECT x, 1 + (2 + (3 AS x)); + +SELECT '---'; + +SELECT 123 AS x FROM (SELECT a, x FROM (SELECT 1 AS a, 2 AS b)); + +SELECT '---'; + +SELECT 123 AS x, (SELECT x) AS y; +SELECT 123 AS x, 123 IN (SELECT x); + +SELECT '---'; + +WITH 123 AS x SELECT 555 FROM (SELECT a, x FROM (SELECT 1 AS a, 2 AS b)); + +SELECT '---'; + +-- here we refer to table `test1` (defined as subquery) three times, one of them inside another scalar subquery. +WITH t AS (SELECT 1) SELECT t, (SELECT * FROM t) FROM t; -- { serverError UNKNOWN_IDENTIFIER } + +-- throws, because x is not visible outside. +SELECT x FROM (SELECT y FROM VALUES ('y UInt16', (2)) WHERE (1 AS x) = y) AS t; -- { serverError UNKNOWN_IDENTIFIER } + +-- throws, because the table name `t` is not visible outside +SELECT t.x FROM (SELECT * FROM (SELECT 1 AS x) AS t); -- { serverError UNKNOWN_IDENTIFIER } +SELECT x FROM (SELECT * FROM (SELECT 99 AS x) AS t); + +SELECT '---'; + +SELECT t.x FROM (SELECT 1 AS x) AS t; +SELECT t.a FROM (SELECT a FROM test1) AS t; +SELECT a FROM (SELECT a FROM test1) AS t; + +SELECT '---'; + +-- this is wrong, the `tbl` name is not exported +SELECT test1.a FROM (SELECT a FROM test1) AS t; -- { serverError UNKNOWN_IDENTIFIER } +-- this is also wrong, the `t2` alias is not exported +SELECT test1.a FROM (SELECT a FROM test1 AS t2) AS t; -- { serverError UNKNOWN_IDENTIFIER } + + +-- does not work, `x` is not visible; +SELECT x, (SELECT 1 AS x); -- { serverError UNKNOWN_IDENTIFIER } +-- does not work either; +SELECT x IN (SELECT 1 AS x); -- { serverError UNKNOWN_IDENTIFIER } +-- this will work, but keep in mind that there are two different `x`. +SELECT x IN (SELECT 1 AS x) FROM (SELECT 1 AS x); + +SELECT '---'; + +SELECT x + 1 AS x, x FROM (SELECT 1 AS x); +SELECT x, x + 1 AS x FROM (SELECT 1 AS x); +SELECT 1 AS x, 2 AS x; -- { serverError MULTIPLE_EXPRESSIONS_FOR_ALIAS } + +SELECT '---'; + + +SELECT arrayMap(x -> x + 1, [1, 2]); + +SELECT x, arrayMap((x, y) -> x[1] + y + arrayFirst(x -> x != y, x), arr) FROM (SELECT 1 AS x, [([1, 2], 3), ([4, 5], 6)] AS arr); + +SELECT x1, arrayMap((x2, y2) -> x2[1] + y2 + arrayFirst(x3 -> x3 != y2, x2), arr) FROM (SELECT 1 AS x1, [([1, 2], 3), ([4, 5], 6)] AS arr); + +SELECT arrayMap(x -> [y * 2, (x + 1) AS y, 1 AS z], [1, 2]), y; -- { serverError UNKNOWN_IDENTIFIER } + +-- TODO: this must work +--SELECT arrayMap(x -> [y * 2, (x + 1) AS y, 1 AS z], [1, 2]), z; + +SELECT arrayMap(x -> (x + 1) AS y, [3, 5]), arrayMap(x -> (x || 'hello') AS y, ['qq', 'ww']); -- { serverError MULTIPLE_EXPRESSIONS_FOR_ALIAS } diff --git a/parser/testdata/03101_analyzer_identifiers_3/ast.json b/parser/testdata/03101_analyzer_identifiers_3/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03101_analyzer_identifiers_3/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03101_analyzer_identifiers_3/metadata.json b/parser/testdata/03101_analyzer_identifiers_3/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03101_analyzer_identifiers_3/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03101_analyzer_identifiers_3/query.sql b/parser/testdata/03101_analyzer_identifiers_3/query.sql new file mode 100644 index 000000000..f113b8bc9 --- /dev/null +++ b/parser/testdata/03101_analyzer_identifiers_3/query.sql @@ -0,0 +1,92 @@ +-- Tags: no-parallel +-- Looks like you cannot use the query parameter as a column name. +-- https://github.com/ClickHouse/ClickHouse/issues/23194 +SET enable_analyzer = 1; + +DROP DATABASE IF EXISTS db1_03101; +DROP DATABASE IF EXISTS db2_03101; +CREATE DATABASE db1_03101; +CREATE DATABASE db2_03101; +USE db1_03101; + +CREATE TABLE db1_03101.tbl +( + col String, + db1_03101 Nested + ( + tbl Nested + ( + col String + ) + ) +) +ENGINE = Memory; + +SELECT db1_03101.tbl.col FROM db1_03101.tbl; + + +SELECT db1_03101.* FROM tbl; +SELECT db1_03101 FROM tbl; + + +SELECT * FROM tbl; +SELECT count(*) FROM tbl; +SELECT * + * FROM VALUES('a UInt16', 1, 10); + +SELECT '---'; + +SELECT * GROUP BY *; +-- not ok as every component of ORDER BY may contain ASC/DESC and COLLATE; though can be supported in some sense +-- but it works +SELECT * ORDER BY *; +SELECT * WHERE *; -- { serverError UNEXPECTED_EXPRESSION } + +SELECT '---'; + +SELECT * FROM (SELECT 1 AS a) AS t, (SELECT 2 AS b) AS u; +-- equivalent to: +SELECT a, b FROM (SELECT 1 AS a) AS t, (SELECT 2 AS b) AS u; + +SELECT '---'; + +SELECT * FROM (SELECT 1 AS a) AS t, (SELECT 1 AS a) AS u; +-- equivalent to: +SELECT t.a, u.a FROM (SELECT 1 AS a) AS t, (SELECT 1 AS a) AS u; + +SELECT '---'; + +---- TODO: think about it +--CREATE TABLE db1_03101.t +--( +-- a UInt16 +--) +--ENGINE = Memory; +-- +--CREATE TABLE db2_03101.t +--( +-- a UInt16 +--) +--ENGINE = Memory; +-- +--SELECT * FROM (SELECT 1 AS a) AS db2_03101.t, (SELECT 1 AS a) AS db1_03101.t; +---- equivalent to: +--SELECT db2_03101.t.a, db1_03101.t.a FROM (SELECT 1 AS a) AS db2_03101.t, (SELECT 1 AS a) AS db1_03101.t; + + +CREATE TABLE t +( + x String, + nest Nested + ( + a String, + b String + ) +) ENGINE = Memory; + +SELECT * FROM t; + +-- equivalent to: +SELECT x, nest.* FROM t; + +-- equivalent to: +SELECT x, nest.a, nest.b FROM t; diff --git a/parser/testdata/03101_analyzer_identifiers_4/ast.json b/parser/testdata/03101_analyzer_identifiers_4/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03101_analyzer_identifiers_4/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03101_analyzer_identifiers_4/metadata.json b/parser/testdata/03101_analyzer_identifiers_4/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03101_analyzer_identifiers_4/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03101_analyzer_identifiers_4/query.sql b/parser/testdata/03101_analyzer_identifiers_4/query.sql new file mode 100644 index 000000000..869310aa1 --- /dev/null +++ b/parser/testdata/03101_analyzer_identifiers_4/query.sql @@ -0,0 +1,103 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/23194 +SET enable_analyzer = 1; + +DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}; +CREATE DATABASE {CLICKHOUSE_DATABASE:Identifier}; +USE {CLICKHOUSE_DATABASE:Identifier}; + +-- simple tuple access operator +SELECT tuple(1, 'a').1; +-- named tuple or complex column access operator - can be applied to Nested type as well as Array of named Tuple +SELECT CAST(('hello', 1) AS Tuple(hello String, count UInt32)) AS t, t.hello; +-- TODO: this doesn't work +-- https://github.com/ClickHouse/ClickHouse/issues/57361 +-- SELECT CAST(('hello', 1) AS Tuple(hello String, count UInt32)).hello; + +-- expansion of a tuple or complex column with asterisk +SELECT tuple(1, 'a').*; + +SELECT '---'; + +SELECT CAST(('hello', 1) AS Tuple(name String, count UInt32)).*; + +SELECT untuple(CAST(('hello', 1) AS Tuple(name String, count UInt32))); -- will give two columns `name` and `count`. + +SELECT '---'; + +CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.t +( + col String, + hello String, + world String +) +ENGINE = Memory; + +CREATE TABLE {CLICKHOUSE_DATABASE:Identifier}.u +( + cc String +) +ENGINE = Memory; + +SELECT * EXCEPT('hello|world'); +-- TODO: Qualified matcher t.* EXCEPT 'hello|world' does not find table. +-- SELECT t.* EXCEPT(hello, world); +-- SELECT {CLICKHOUSE_DATABASE:Identifier}.t.* REPLACE(x + 1 AS x); + + +SELECT * EXCEPT(hello) REPLACE(x + 1 AS x); + +SELECT COLUMNS('^c') FROM t; +SELECT t.COLUMNS('^c') FROM t, u; +SELECT t.COLUMNS('^c') EXCEPT (test_hello, test_world) FROM t, u; + +SELECT '---'; + +SELECT * FROM (SELECT x, x FROM (SELECT 1 AS x)); +SELECT x FROM (SELECT x, x FROM (SELECT 1 AS x)); +SELECT 1 FROM (SELECT x, x FROM (SELECT 1 AS x)); + +SELECT '---'; + +SELECT `plus(1, 2)` FROM (SELECT 1 + 2); + +-- Lambda expressions can be aliased. (proposal) +--SELECT arrayMap(plus, [1, 2], [10, 20]); +--SELECT x -> x + 1 AS fun; + +SELECT '---'; + +SELECT x FROM numbers(5 AS x); + + +SELECT '---'; + +CREATE TEMPORARY TABLE aliased +( + x UInt8 DEFAULT 0, + y ALIAS x + 1 +); + +INSERT INTO aliased VALUES (10); + +SELECT y FROM aliased; + +CREATE TEMPORARY TABLE aliased2 +( + x UInt8, + y ALIAS ((x + 1) AS z) + 1 +); + +SELECT x, y, z FROM aliased2; -- { serverError UNKNOWN_IDENTIFIER } + + +SELECT '---'; + +CREATE TEMPORARY TABLE aliased3 +( + x UInt8, + y ALIAS z + 1, + z ALIAS x + 1 +); +INSERT INTO aliased3 VALUES (10); + +SELECT x, y, z FROM aliased3; diff --git a/parser/testdata/03101_analyzer_invalid_join_on/ast.json b/parser/testdata/03101_analyzer_invalid_join_on/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03101_analyzer_invalid_join_on/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03101_analyzer_invalid_join_on/metadata.json b/parser/testdata/03101_analyzer_invalid_join_on/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03101_analyzer_invalid_join_on/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03101_analyzer_invalid_join_on/query.sql b/parser/testdata/03101_analyzer_invalid_join_on/query.sql new file mode 100644 index 000000000..d7e26862d --- /dev/null +++ b/parser/testdata/03101_analyzer_invalid_join_on/query.sql @@ -0,0 +1,25 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/50271 + +drop table if exists t1; +drop table if exists t2; + +set enable_analyzer=1; + +create table t1 (c3 String, primary key(c3)) engine = MergeTree; +create table t2 (c11 String, primary key(c11)) engine = MergeTree; +insert into t1 values ('succeed'); +insert into t2 values ('succeed'); + +select + ref_0.c11 as c_2_c30_0 + from + t2 as ref_0 + cross join (select + ref_1.c3 as c_6_c28_15 + from + t1 as ref_1 + ) as subq_0 + where subq_0.c_6_c28_15 = (select c11 from t2 order by c11 limit 1); + +drop table if exists t1; +drop table if exists t2; diff --git a/parser/testdata/03102_prefer_column_name_to_alias/ast.json b/parser/testdata/03102_prefer_column_name_to_alias/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03102_prefer_column_name_to_alias/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03102_prefer_column_name_to_alias/metadata.json b/parser/testdata/03102_prefer_column_name_to_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03102_prefer_column_name_to_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03102_prefer_column_name_to_alias/query.sql b/parser/testdata/03102_prefer_column_name_to_alias/query.sql new file mode 100644 index 000000000..fafdb660e --- /dev/null +++ b/parser/testdata/03102_prefer_column_name_to_alias/query.sql @@ -0,0 +1,30 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/54954 + +DROP TABLE IF EXISTS loans; + +CREATE TABLE loans (loan_number int, security_id text) ENGINE=Memory; + +SET enable_analyzer=1; + +INSERT INTO loans VALUES (1, 'AAA'); +INSERT INTO loans VALUES (1, 'AAA'); +INSERT INTO loans VALUES (1, 'AAA'); +INSERT INTO loans VALUES (1, 'AAA'); +INSERT INTO loans VALUES (1, 'AAA'); +INSERT INTO loans VALUES (1, 'BBB'); +INSERT INTO loans VALUES (1, 'BBB'); +INSERT INTO loans VALUES (1, 'BBB'); +INSERT INTO loans VALUES (1, 'BBB'); +INSERT INTO loans VALUES (1, 'BBB'); +INSERT INTO loans VALUES (1, 'BBB'); + + +with block_0 as ( + select * from loans +), +block_1 as ( + select sum(loan_number) as loan_number from block_0 group by security_id +) +select loan_number from block_1 where loan_number > 3 order by loan_number settings prefer_column_name_to_alias = 1; + +DROP TABLE IF EXISTS loans; diff --git a/parser/testdata/03103_positional_arguments/ast.json b/parser/testdata/03103_positional_arguments/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03103_positional_arguments/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03103_positional_arguments/metadata.json b/parser/testdata/03103_positional_arguments/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03103_positional_arguments/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03103_positional_arguments/query.sql b/parser/testdata/03103_positional_arguments/query.sql new file mode 100644 index 000000000..eecaa3f4e --- /dev/null +++ b/parser/testdata/03103_positional_arguments/query.sql @@ -0,0 +1,45 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/56466 + +SET enable_analyzer=1; + +DROP TABLE IF EXISTS users; + +CREATE TABLE users (uid Int16, name String, age Int16) ENGINE=Memory; + +INSERT INTO users VALUES (1231, 'John', 33); +INSERT INTO users VALUES (6666, 'Ksenia', 48); +INSERT INTO users VALUES (8888, 'Alice', 50); + +-- The query works when using a single SELECT * +SELECT * +FROM +( + SELECT + name, + age + FROM users +) +GROUP BY + 1, + 2 +ORDER BY ALL; + +-- It doesn't when the GROUP BY is nested deeper +SELECT * +FROM +( + SELECT * + FROM + ( + SELECT + name, + age + FROM users + ) + GROUP BY + 1, + 2 +) +ORDER BY ALL; + +DROP TABLE IF EXISTS users; diff --git a/parser/testdata/03104_create_view_join/ast.json b/parser/testdata/03104_create_view_join/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03104_create_view_join/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03104_create_view_join/metadata.json b/parser/testdata/03104_create_view_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03104_create_view_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03104_create_view_join/query.sql b/parser/testdata/03104_create_view_join/query.sql new file mode 100644 index 000000000..a39be92b1 --- /dev/null +++ b/parser/testdata/03104_create_view_join/query.sql @@ -0,0 +1,26 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/11000 + +DROP TABLE IF EXISTS test_table_01; +DROP TABLE IF EXISTS test_table_02; +DROP TABLE IF EXISTS test_view_01; + +SET enable_analyzer = 1; + +CREATE TABLE test_table_01 ( + column Int32 +) ENGINE = Memory(); + +CREATE TABLE test_table_02 ( + column Int32 +) ENGINE = Memory(); + +CREATE VIEW test_view_01 AS +SELECT + t1.column, + t2.column +FROM test_table_01 AS t1 + INNER JOIN test_table_02 AS t2 ON t1.column = t2.column; + +DROP TABLE IF EXISTS test_table_01; +DROP TABLE IF EXISTS test_table_02; +DROP TABLE IF EXISTS test_view_01; diff --git a/parser/testdata/03105_table_aliases_in_mv/ast.json b/parser/testdata/03105_table_aliases_in_mv/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03105_table_aliases_in_mv/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03105_table_aliases_in_mv/metadata.json b/parser/testdata/03105_table_aliases_in_mv/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03105_table_aliases_in_mv/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03105_table_aliases_in_mv/query.sql b/parser/testdata/03105_table_aliases_in_mv/query.sql new file mode 100644 index 000000000..c6bb6db33 --- /dev/null +++ b/parser/testdata/03105_table_aliases_in_mv/query.sql @@ -0,0 +1,45 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/10894 + +DROP TABLE IF EXISTS event; +DROP TABLE IF EXISTS user; +DROP TABLE IF EXISTS mv; + +CREATE TABLE event ( + `event_time` DateTime, + `event_name` String, + `user_id` String +) +ENGINE = MergeTree() +ORDER BY (event_time, event_name); + +CREATE TABLE user ( + `user_id` String, + `user_type` String +) +ENGINE = MergeTree() +ORDER BY (user_id); + +INSERT INTO event VALUES ('2020-05-01 00:00:01', 'install', '1'), ('2020-05-01 00:00:02', 'install', '2'), ('2020-05-01 00:00:03', 'install', '3'); + +INSERT INTO user VALUES ('1', 'type_1'), ('2', 'type_2'), ('3', 'type_3'); + +CREATE MATERIALIZED VIEW mv +( + `event_time` DateTime, + `event_name` String, + `user_id` String, + `user_type` String +) +ENGINE = MergeTree() +ORDER BY (event_time, event_name) POPULATE AS +SELECT + e.event_time, + e.event_name, + e.user_id, + u.user_type +FROM event e +INNER JOIN user u ON u.user_id = e.user_id; + +DROP TABLE IF EXISTS event; +DROP TABLE IF EXISTS user; +DROP TABLE IF EXISTS mv; \ No newline at end of file diff --git a/parser/testdata/03107_ill_formed_select_in_materialized_view/ast.json b/parser/testdata/03107_ill_formed_select_in_materialized_view/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03107_ill_formed_select_in_materialized_view/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03107_ill_formed_select_in_materialized_view/metadata.json b/parser/testdata/03107_ill_formed_select_in_materialized_view/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03107_ill_formed_select_in_materialized_view/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03107_ill_formed_select_in_materialized_view/query.sql b/parser/testdata/03107_ill_formed_select_in_materialized_view/query.sql new file mode 100644 index 000000000..5f6ec74bd --- /dev/null +++ b/parser/testdata/03107_ill_formed_select_in_materialized_view/query.sql @@ -0,0 +1,15 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/448 + +DROP TABLE IF EXISTS a; +DROP TABLE iF EXISTS b; + +CREATE TABLE a ( a UInt64, b UInt64) ENGINE = Memory; +CREATE TABLE b ( b UInt64) ENGINE = Memory; + +SET enable_analyzer = 1; +SET joined_subquery_requires_alias = 0; + +CREATE MATERIALIZED VIEW view_4 ( bb UInt64, cnt UInt64) Engine=MergeTree ORDER BY bb POPULATE AS SELECT bb, count() AS cnt FROM (SELECT a, b AS j, b AS bb FROM a INNER JOIN (SELECT b AS j, b AS bb FROM b ) USING (j)) GROUP BY bb; -- { serverError UNKNOWN_IDENTIFIER } + +DROP TABLE IF EXISTS a; +DROP TABLE iF EXISTS b; diff --git a/parser/testdata/03108_describe_union_all/ast.json b/parser/testdata/03108_describe_union_all/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03108_describe_union_all/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03108_describe_union_all/metadata.json b/parser/testdata/03108_describe_union_all/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03108_describe_union_all/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03108_describe_union_all/query.sql b/parser/testdata/03108_describe_union_all/query.sql new file mode 100644 index 000000000..7e207ae28 --- /dev/null +++ b/parser/testdata/03108_describe_union_all/query.sql @@ -0,0 +1,5 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/8030 + +SET enable_analyzer=1; + +DESCRIBE (SELECT 1, 1 UNION ALL SELECT 1, 2); diff --git a/parser/testdata/03109_ast_too_big/ast.json b/parser/testdata/03109_ast_too_big/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03109_ast_too_big/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03109_ast_too_big/metadata.json b/parser/testdata/03109_ast_too_big/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03109_ast_too_big/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03109_ast_too_big/query.sql b/parser/testdata/03109_ast_too_big/query.sql new file mode 100644 index 000000000..1464f90fe --- /dev/null +++ b/parser/testdata/03109_ast_too_big/query.sql @@ -0,0 +1,53 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/32139 + +SET enable_analyzer=1; + +WITH + data AS ( + SELECT + rand64() AS val1, + rand64() AS val2, + rand64() AS val3, + rand64() AS val4, + rand64() AS val5, + rand64() AS val6, + rand64() AS val7, + rand64() AS val8, + rand64() AS val9, + rand64() AS val10, + rand64() AS val11, + rand64() AS val12, + rand64() AS val13, + rand64() AS val14 + FROM numbers(10) + ), + (SELECT avg(val1) FROM data) AS value1, + (SELECT avg(val2) FROM data) AS value2, + (SELECT avg(val3) FROM data) AS value3, + (SELECT avg(val4) FROM data) AS value4, + (SELECT avg(val5) FROM data) AS value5, + (SELECT avg(val6) FROM data) AS value6, + (SELECT avg(val7) FROM data) AS value7, + (SELECT avg(val8) FROM data) AS value8, + (SELECT avg(val9) FROM data) AS value9, + (SELECT avg(val10) FROM data) AS value10, + (SELECT avg(val11) FROM data) AS value11, + (SELECT avg(val12) FROM data) AS value12, + (SELECT avg(val13) FROM data) AS value13, + (SELECT avg(val14) FROM data) AS value14 +SELECT + value1 AS v1, + value2 AS v2, + value3 AS v3, + value4 AS v4, + value5 AS v5, + value6 AS v6, + value7 AS v7, + value8 AS v8, + value9 AS v9, + value10 AS v10, + value11 AS v11, + value12 AS v12, + value13 AS v13, + value14 AS v14 +FORMAT Null; diff --git a/parser/testdata/03110_unicode_alias/ast.json b/parser/testdata/03110_unicode_alias/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03110_unicode_alias/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03110_unicode_alias/metadata.json b/parser/testdata/03110_unicode_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03110_unicode_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03110_unicode_alias/query.sql b/parser/testdata/03110_unicode_alias/query.sql new file mode 100644 index 000000000..aa33195ea --- /dev/null +++ b/parser/testdata/03110_unicode_alias/query.sql @@ -0,0 +1,33 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/47288 + +SET enable_analyzer=1; + +select 1 as `c0` +from ( + select C.`字段` AS `字段` + from ( + select 2 as bb + ) A + LEFT JOIN ( + select '1' as `字段` + ) C ON 1 = 1 + LEFT JOIN ( + select 1 as a + ) D ON 1 = 1 + ) as `T0` +where `T0`.`字段` = '1'; + +select 1 as `c0` +from ( + select C.`$` AS `$` + from ( + select 2 as bb + ) A + LEFT JOIN ( + select '1' as `$` + ) C ON 1 = 1 + LEFT JOIN ( + select 1 as a + ) D ON 1 = 1 + ) as `T0` +where `T0`.`$` = '1'; diff --git a/parser/testdata/03111_inner_join_group_by/ast.json b/parser/testdata/03111_inner_join_group_by/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03111_inner_join_group_by/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03111_inner_join_group_by/metadata.json b/parser/testdata/03111_inner_join_group_by/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03111_inner_join_group_by/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03111_inner_join_group_by/query.sql b/parser/testdata/03111_inner_join_group_by/query.sql new file mode 100644 index 000000000..6ebaacfc3 --- /dev/null +++ b/parser/testdata/03111_inner_join_group_by/query.sql @@ -0,0 +1,18 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/50705 + +set enable_analyzer=1; + +SELECT + count(s0.number), + s1.half +FROM system.numbers AS s0 +INNER JOIN +( + SELECT + number, + number / 2 AS half + FROM system.numbers + LIMIT 10 +) AS s1 ON s0.number = s1.number +GROUP BY s0.number > 5 +LIMIT 10 -- {serverError NOT_AN_AGGREGATE} diff --git a/parser/testdata/03112_analyzer_not_found_column_in_block/ast.json b/parser/testdata/03112_analyzer_not_found_column_in_block/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03112_analyzer_not_found_column_in_block/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03112_analyzer_not_found_column_in_block/metadata.json b/parser/testdata/03112_analyzer_not_found_column_in_block/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03112_analyzer_not_found_column_in_block/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03112_analyzer_not_found_column_in_block/query.sql b/parser/testdata/03112_analyzer_not_found_column_in_block/query.sql new file mode 100644 index 000000000..cc734b2c4 --- /dev/null +++ b/parser/testdata/03112_analyzer_not_found_column_in_block/query.sql @@ -0,0 +1,27 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/54511 + +DROP TABLE IF EXISTS my_first_table; + +CREATE TABLE my_first_table +( + user_id UInt32, + message String, + timestamp DateTime, + metric Float32 +) +ENGINE = MergeTree +PRIMARY KEY (user_id, timestamp); + +INSERT INTO my_first_table (user_id, message, timestamp, metric) VALUES + (101, 'Hello, ClickHouse!', now(), -1.0 ), (102, 'Insert a lot of rows per batch', yesterday(), 1.41421 ), (102, 'Sort your data based on your commonly-used queries', today(), 2.718 ), (101, 'Granules are the smallest chunks of data read', now() + 5, 3.14159 ); + +SET enable_analyzer=1; + +SELECT + user_id + , (count(user_id) OVER (PARTITION BY user_id)) AS count +FROM my_first_table +WHERE timestamp > 0 and user_id IN (101) +LIMIT 2 BY user_id; + +DROP TABLE IF EXISTS my_first_table; diff --git a/parser/testdata/03113_analyzer_not_found_column_in_block_2/ast.json b/parser/testdata/03113_analyzer_not_found_column_in_block_2/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03113_analyzer_not_found_column_in_block_2/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03113_analyzer_not_found_column_in_block_2/metadata.json b/parser/testdata/03113_analyzer_not_found_column_in_block_2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03113_analyzer_not_found_column_in_block_2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03113_analyzer_not_found_column_in_block_2/query.sql b/parser/testdata/03113_analyzer_not_found_column_in_block_2/query.sql new file mode 100644 index 000000000..4389bdf83 --- /dev/null +++ b/parser/testdata/03113_analyzer_not_found_column_in_block_2/query.sql @@ -0,0 +1,21 @@ +-- https://github.com/ClickHouse/ClickHouse/pull/62457 + +drop table if exists t; + +create table t (ID String) Engine= Memory() ; +insert into t values('a'),('b'),('c'); + +-- This optimization is disabled by default and even its description says that it could lead to +-- inconsistencies for distributed queries. +set optimize_if_transform_strings_to_enum=0; +set enable_analyzer=1; + +SELECT multiIf( ((multiIf(ID='' AND (ID = 'a' OR ID = 'c' OR ID = 'b'),'a','x') as y) = 'c') OR + (multiIf(ID='' AND (ID = 'a' OR ID = 'c' OR ID = 'b'),'a','x') = 'b') OR + (multiIf(ID='' AND (ID = 'a' OR ID = 'c' OR ID = 'b'),'a','x') = 'd') OR + (multiIf(ID='' AND (ID = 'a' OR ID = 'c' OR ID = 'b'),'a','x') = 'e'),'test', 'x' + ) AS alias +FROM remote('127.0.0.{1,2}', currentDatabase(), t) +GROUP BY alias; + +drop table if exists t; diff --git a/parser/testdata/03114_analyzer_cte_with_join/ast.json b/parser/testdata/03114_analyzer_cte_with_join/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03114_analyzer_cte_with_join/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03114_analyzer_cte_with_join/metadata.json b/parser/testdata/03114_analyzer_cte_with_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03114_analyzer_cte_with_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03114_analyzer_cte_with_join/query.sql b/parser/testdata/03114_analyzer_cte_with_join/query.sql new file mode 100644 index 000000000..65dd3cb66 --- /dev/null +++ b/parser/testdata/03114_analyzer_cte_with_join/query.sql @@ -0,0 +1,15 @@ +-- Tags: no-replicated-database +-- https://github.com/ClickHouse/ClickHouse/issues/58500 + +SET enable_analyzer=1; + +drop table if exists t; + +create table t (ID UInt8) Engine= Memory() ; +insert into t values(1),(2),(3); + +with a as (select 1 as column_a) , b as (select 2 as column_b) + select * FROM remote('127.0.0.{1,2}', currentDatabase(), t) as c + inner join a on ID=column_a inner join b on ID=column_b; + +drop table if exists t; diff --git a/parser/testdata/03115_alias_exists_column/ast.json b/parser/testdata/03115_alias_exists_column/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03115_alias_exists_column/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03115_alias_exists_column/metadata.json b/parser/testdata/03115_alias_exists_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03115_alias_exists_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03115_alias_exists_column/query.sql b/parser/testdata/03115_alias_exists_column/query.sql new file mode 100644 index 000000000..65fc0d8f9 --- /dev/null +++ b/parser/testdata/03115_alias_exists_column/query.sql @@ -0,0 +1,5 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/44412 + +SET enable_analyzer=1; + +SELECT EXISTS(SELECT 1) AS mycheck FORMAT TSVWithNames; diff --git a/parser/testdata/03116_analyzer_explicit_alias_as_column_name/ast.json b/parser/testdata/03116_analyzer_explicit_alias_as_column_name/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03116_analyzer_explicit_alias_as_column_name/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03116_analyzer_explicit_alias_as_column_name/metadata.json b/parser/testdata/03116_analyzer_explicit_alias_as_column_name/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03116_analyzer_explicit_alias_as_column_name/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03116_analyzer_explicit_alias_as_column_name/query.sql b/parser/testdata/03116_analyzer_explicit_alias_as_column_name/query.sql new file mode 100644 index 000000000..d3e3a2907 --- /dev/null +++ b/parser/testdata/03116_analyzer_explicit_alias_as_column_name/query.sql @@ -0,0 +1,19 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/39923 +SET enable_analyzer=1; + +SELECT + errors.name AS labels, + value, + 'ch_errors_total' AS name +FROM system.errors +LIMIT 1 +FORMAT Null; + + +SELECT + map('name', errors.name) AS labels, + value, + 'ch_errors_total' AS name +FROM system.errors +LIMIT 1 +FORMAT Null; diff --git a/parser/testdata/03117_analyzer_same_column_name_as_func/ast.json b/parser/testdata/03117_analyzer_same_column_name_as_func/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03117_analyzer_same_column_name_as_func/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03117_analyzer_same_column_name_as_func/metadata.json b/parser/testdata/03117_analyzer_same_column_name_as_func/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03117_analyzer_same_column_name_as_func/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03117_analyzer_same_column_name_as_func/query.sql b/parser/testdata/03117_analyzer_same_column_name_as_func/query.sql new file mode 100644 index 000000000..a3f4da895 --- /dev/null +++ b/parser/testdata/03117_analyzer_same_column_name_as_func/query.sql @@ -0,0 +1,14 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/39855 +SET enable_analyzer=1; + +create table x( + a UInt64, + `sipHash64(a)` UInt64 +) engine = MergeTree order by a; +insert into x select number, number from VALUES('number UInt64', 1000, 10000, 100000); + +select a, sipHash64(a) from x order by sipHash64(a); + +select ''; + +select a, sipHash64(a) from x order by `sipHash64(a)`; diff --git a/parser/testdata/03118_analyzer_multi_join_prewhere/ast.json b/parser/testdata/03118_analyzer_multi_join_prewhere/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03118_analyzer_multi_join_prewhere/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03118_analyzer_multi_join_prewhere/metadata.json b/parser/testdata/03118_analyzer_multi_join_prewhere/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03118_analyzer_multi_join_prewhere/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03118_analyzer_multi_join_prewhere/query.sql b/parser/testdata/03118_analyzer_multi_join_prewhere/query.sql new file mode 100644 index 000000000..84f89c2c6 --- /dev/null +++ b/parser/testdata/03118_analyzer_multi_join_prewhere/query.sql @@ -0,0 +1,17 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/4596 +SET enable_analyzer=1; + +CREATE TABLE a1 ( ANIMAL Nullable(String) ) engine = MergeTree order by tuple(); +insert into a1 values('CROCO'); + +select count() + from a1 a + join a1 b on (a.ANIMAL = b.ANIMAL) + join a1 c on (c.ANIMAL = b.ANIMAL) +where a.ANIMAL = 'CROCO'; + +select count() + from a1 a + join a1 b on (a.ANIMAL = b.ANIMAL) + join a1 c on (c.ANIMAL = b.ANIMAL) +prewhere a.ANIMAL = 'CROCO'; diff --git a/parser/testdata/03119_analyzer_window_function_in_CTE_alias/ast.json b/parser/testdata/03119_analyzer_window_function_in_CTE_alias/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03119_analyzer_window_function_in_CTE_alias/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03119_analyzer_window_function_in_CTE_alias/metadata.json b/parser/testdata/03119_analyzer_window_function_in_CTE_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03119_analyzer_window_function_in_CTE_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03119_analyzer_window_function_in_CTE_alias/query.sql b/parser/testdata/03119_analyzer_window_function_in_CTE_alias/query.sql new file mode 100644 index 000000000..edbb324bd --- /dev/null +++ b/parser/testdata/03119_analyzer_window_function_in_CTE_alias/query.sql @@ -0,0 +1,21 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/47422 +SET enable_analyzer=1; + +DROP TEMPORARY TABLE IF EXISTS test; +CREATE TEMPORARY TABLE test (a Float32, id UInt64); +INSERT INTO test VALUES (10,10),(20,20); + +SELECT 'query1'; +-- alias clash (a is redefined in CTE) +-- 21.8: no error, bad result +-- 21.9 and newer: error "Block structure mismatch in (columns with identical name must have identical structure) stream" + +WITH avg(a) OVER () AS a SELECT a, id FROM test SETTINGS allow_experimental_window_functions = 1; + +SELECT 'query2'; +-- no aliases clash, good result +WITH avg(a) OVER () AS a2 SELECT a2, id FROM test SETTINGS allow_experimental_window_functions = 1; + +SELECT 'query3'; +-- aliases clash without CTE +SELECT avg(a) OVER () AS a, id FROM test SETTINGS allow_experimental_window_functions = 1; diff --git a/parser/testdata/03120_analyzer_dist_join/ast.json b/parser/testdata/03120_analyzer_dist_join/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03120_analyzer_dist_join/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03120_analyzer_dist_join/metadata.json b/parser/testdata/03120_analyzer_dist_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03120_analyzer_dist_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03120_analyzer_dist_join/query.sql b/parser/testdata/03120_analyzer_dist_join/query.sql new file mode 100644 index 000000000..e40df56c5 --- /dev/null +++ b/parser/testdata/03120_analyzer_dist_join/query.sql @@ -0,0 +1,76 @@ +-- Tags: no-replicated-database +-- https://github.com/ClickHouse/ClickHouse/issues/8547 +SET enable_analyzer=1; +SET distributed_foreground_insert=1; + +CREATE TABLE a1_replicated ON CLUSTER test_shard_localhost ( + day Date, + id UInt32 +) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/a1_replicated', '1_replica') +ORDER BY tuple(); + +CREATE TABLE a1 ( + day Date, + id UInt32 +) +ENGINE = Distributed('test_shard_localhost', currentDatabase(), a1_replicated, id); + +CREATE TABLE b1_replicated ON CLUSTER test_shard_localhost ( + day Date, + id UInt32 +) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/b1_replicated', '1_replica') +ORDER BY tuple(); + +CREATE TABLE b1 ( + day Date, + id UInt32 +) +ENGINE = Distributed('test_shard_localhost', currentDatabase(), b1_replicated, id); + + +INSERT INTO a1 (day, id) VALUES ('2019-01-01', 9), ('2019-01-01', 10), ('2019-01-02', 10), ('2019-01-01', 11); +INSERT INTO b1 (day, id) VALUES ('2019-01-01', 9), ('2019-01-01', 10), ('2019-01-02', 11), ('2019-01-01', 11); + + +SET distributed_product_mode='local'; + +SELECT id, count() +FROM a1 AS a1 +LEFT JOIN b1 AS b1 ON a1.id = b1.id +GROUP BY id +ORDER BY id; + +SELECT id, count() +FROM a1 a1 +LEFT JOIN (SELECT id FROM b1 b1) b1 ON a1.id = b1.id +GROUP BY id +ORDER BY id; + +SELECT id, count() +FROM (SELECT id FROM a1) a1 +LEFT JOIN (SELECT id FROM b1) b1 ON a1.id = b1.id +GROUP BY id +ORDER BY id; + + +SET distributed_product_mode='global'; + +SELECT id, count() +FROM a1 AS a1 +LEFT JOIN b1 AS b1 ON a1.id = b1.id +GROUP BY id +ORDER BY id; + +SELECT id, count() +FROM a1 a1 +LEFT JOIN (SELECT id FROM b1 b1) b1 ON a1.id = b1.id +GROUP BY id +ORDER BY id; + +SELECT id, count() +FROM (SELECT id FROM a1) a1 +LEFT JOIN (SELECT id FROM b1) b1 ON a1.id = b1.id +GROUP BY id +ORDER BY id; diff --git a/parser/testdata/03120_analyzer_param_in_CTE_alias/ast.json b/parser/testdata/03120_analyzer_param_in_CTE_alias/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03120_analyzer_param_in_CTE_alias/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03120_analyzer_param_in_CTE_alias/metadata.json b/parser/testdata/03120_analyzer_param_in_CTE_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03120_analyzer_param_in_CTE_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03120_analyzer_param_in_CTE_alias/query.sql b/parser/testdata/03120_analyzer_param_in_CTE_alias/query.sql new file mode 100644 index 000000000..d1e3d5a2f --- /dev/null +++ b/parser/testdata/03120_analyzer_param_in_CTE_alias/query.sql @@ -0,0 +1,10 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/33000 +SET enable_analyzer=1; + +SET param_test_a=30; + +WITH {test_a:UInt32} as column SELECT column as number FROM numbers(2) FORMAT TSVWithNames; + +WITH {test_a:UInt32} as column SELECT {test_a:UInt32} as number FROM numbers(2) FORMAT TSVWithNames; + +WITH {test_a:UInt32} as column SELECT column FROM numbers(2) FORMAT TSVWithNames; diff --git a/parser/testdata/03121_analyzer_filed_redefenition_in_subquery/ast.json b/parser/testdata/03121_analyzer_filed_redefenition_in_subquery/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03121_analyzer_filed_redefenition_in_subquery/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03121_analyzer_filed_redefenition_in_subquery/metadata.json b/parser/testdata/03121_analyzer_filed_redefenition_in_subquery/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03121_analyzer_filed_redefenition_in_subquery/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03121_analyzer_filed_redefenition_in_subquery/query.sql b/parser/testdata/03121_analyzer_filed_redefenition_in_subquery/query.sql new file mode 100644 index 000000000..bbbdde7f4 --- /dev/null +++ b/parser/testdata/03121_analyzer_filed_redefenition_in_subquery/query.sql @@ -0,0 +1,38 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/14739 +SET enable_analyzer=1; + +drop table if exists test_subquery; + +CREATE TABLE test_subquery +ENGINE = Memory AS +SELECT 'base' AS my_field; + +-- query 1 +SELECT my_field +FROM +( + SELECT + *, + 'redefined' AS my_field + from test_subquery +); + +-- query 2 +SELECT my_field +FROM +( + SELECT + 'redefined' AS my_field, + * + from test_subquery +); + +-- query 3 +SELECT my_field +FROM +( + SELECT + *, + 'redefined' AS my_field + from (select * from test_subquery) +); diff --git a/parser/testdata/03122_analyzer_collate_in_window_function/ast.json b/parser/testdata/03122_analyzer_collate_in_window_function/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03122_analyzer_collate_in_window_function/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03122_analyzer_collate_in_window_function/metadata.json b/parser/testdata/03122_analyzer_collate_in_window_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03122_analyzer_collate_in_window_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03122_analyzer_collate_in_window_function/query.sql b/parser/testdata/03122_analyzer_collate_in_window_function/query.sql new file mode 100644 index 000000000..efd2e2fc8 --- /dev/null +++ b/parser/testdata/03122_analyzer_collate_in_window_function/query.sql @@ -0,0 +1,9 @@ +-- Tags: no-fasttest +-- https://github.com/ClickHouse/ClickHouse/issues/44039 +SET enable_analyzer=1; + +create table test_window_collate(c1 String, c2 String) engine=MergeTree order by c1; +insert into test_window_collate values('1', '上海'); +insert into test_window_collate values('1', '北京'); +insert into test_window_collate values('1', '西安'); +select c2, groupArray(c2) over (partition by c1 order by c2 asc collate 'zh_Hans_CN') as res from test_window_collate order by c2 asc collate 'zh_Hans_CN'; diff --git a/parser/testdata/03123_analyzer_dist_join_CTE/ast.json b/parser/testdata/03123_analyzer_dist_join_CTE/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03123_analyzer_dist_join_CTE/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03123_analyzer_dist_join_CTE/metadata.json b/parser/testdata/03123_analyzer_dist_join_CTE/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03123_analyzer_dist_join_CTE/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03123_analyzer_dist_join_CTE/query.sql b/parser/testdata/03123_analyzer_dist_join_CTE/query.sql new file mode 100644 index 000000000..4d9f5e297 --- /dev/null +++ b/parser/testdata/03123_analyzer_dist_join_CTE/query.sql @@ -0,0 +1,28 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/22923 +SET enable_analyzer=1; +SET prefer_localhost_replica=0; + +create table "t0" (a Int64, b Int64) engine = MergeTree() partition by a order by a; + +create table "dist_t0" (a Int64, b Int64) engine = Distributed(test_shard_localhost, currentDatabase(), t0); + +insert into t0 values (1, 10), (2, 12); + +SELECT * FROM ( +WITH + b AS + ( + SELECT toInt64(number) AS a + FROM numbers(10) + ), + c AS + ( + SELECT toInt64(number) AS a + FROM numbers(10) + ) +SELECT * +FROM dist_t0 AS a +LEFT JOIN b AS b ON a.a = b.a +LEFT JOIN c AS c ON a.a = c.a +) +ORDER BY ALL; diff --git a/parser/testdata/03124_analyzer_nested_CTE_dist_in/ast.json b/parser/testdata/03124_analyzer_nested_CTE_dist_in/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03124_analyzer_nested_CTE_dist_in/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03124_analyzer_nested_CTE_dist_in/metadata.json b/parser/testdata/03124_analyzer_nested_CTE_dist_in/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03124_analyzer_nested_CTE_dist_in/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03124_analyzer_nested_CTE_dist_in/query.sql b/parser/testdata/03124_analyzer_nested_CTE_dist_in/query.sql new file mode 100644 index 000000000..be5346efa --- /dev/null +++ b/parser/testdata/03124_analyzer_nested_CTE_dist_in/query.sql @@ -0,0 +1,20 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/23865 +SET enable_analyzer=1; + +create table table_local engine = Memory AS select * from numbers(10); + +create table table_dist engine = Distributed('test_cluster_two_shards', currentDatabase(),table_local) AS table_local; + +with + x as ( + select number + from numbers(10) + where number % 3=0), + y as ( + select number, count() + from table_dist + where number in (select * from x) + group by number +) +select * from y +ORDER BY ALL; diff --git a/parser/testdata/03125_analyzer_CTE_two_joins/ast.json b/parser/testdata/03125_analyzer_CTE_two_joins/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03125_analyzer_CTE_two_joins/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03125_analyzer_CTE_two_joins/metadata.json b/parser/testdata/03125_analyzer_CTE_two_joins/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03125_analyzer_CTE_two_joins/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03125_analyzer_CTE_two_joins/query.sql b/parser/testdata/03125_analyzer_CTE_two_joins/query.sql new file mode 100644 index 000000000..934e2bc36 --- /dev/null +++ b/parser/testdata/03125_analyzer_CTE_two_joins/query.sql @@ -0,0 +1,17 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/29748 +SET enable_analyzer=1; + +create table events ( distinct_id String ) engine = Memory; + +INSERT INTO events VALUES ('1234'), ('1'); + +WITH cte1 as ( + SELECT '1234' as x + ), cte2 as ( + SELECT '1234' as x + ) +SELECT * +FROM events AS events +JOIN cte2 ON cte2.x = events.distinct_id +JOIN cte1 ON cte1.x = cte2.x +limit 1; diff --git a/parser/testdata/03126_column_not_under_group_by/ast.json b/parser/testdata/03126_column_not_under_group_by/ast.json new file mode 100644 index 000000000..1514a343a --- /dev/null +++ b/parser/testdata/03126_column_not_under_group_by/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001109832, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03126_column_not_under_group_by/metadata.json b/parser/testdata/03126_column_not_under_group_by/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03126_column_not_under_group_by/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03126_column_not_under_group_by/query.sql b/parser/testdata/03126_column_not_under_group_by/query.sql new file mode 100644 index 000000000..516126c89 --- /dev/null +++ b/parser/testdata/03126_column_not_under_group_by/query.sql @@ -0,0 +1,6 @@ +SET enable_analyzer=1; + +SELECT v.x, r.a, sum(c) +FROM (select 1 x, 2 c) AS v +ANY LEFT JOIN (SELECT 1 x, 2 a) AS r ON v.x = r.x +GROUP BY v.x; -- { serverError NOT_AN_AGGREGATE} diff --git a/parser/testdata/03127_argMin_combinator_state/ast.json b/parser/testdata/03127_argMin_combinator_state/ast.json new file mode 100644 index 000000000..4ff0ae009 --- /dev/null +++ b/parser/testdata/03127_argMin_combinator_state/ast.json @@ -0,0 +1,70 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function sumArgMinState (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 16, + + "statistics": + { + "elapsed": 0.00187538, + "rows_read": 16, + "bytes_read": 643 + } +} diff --git a/parser/testdata/03127_argMin_combinator_state/metadata.json b/parser/testdata/03127_argMin_combinator_state/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03127_argMin_combinator_state/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03127_argMin_combinator_state/query.sql b/parser/testdata/03127_argMin_combinator_state/query.sql new file mode 100644 index 000000000..2eb209ed5 --- /dev/null +++ b/parser/testdata/03127_argMin_combinator_state/query.sql @@ -0,0 +1,22 @@ +SELECT toTypeName(sumArgMinState(number, number)) FROM numbers(1); +SELECT sumArgMinState(number, number) AS a FROM numbers(3) FORMAT Null; + +DROP TABLE IF EXISTS argmax_comb; +CREATE TABLE argmax_comb( + id UInt64, + state AggregateFunction(avgArgMax, Float64, UInt64) + ) + ENGINE=MergeTree() ORDER BY tuple(); +INSERT INTO argmax_comb + SELECT + CAST(number % 10, 'UInt64') AS id, + avgArgMaxState(CAST(number, 'Float64'), id) + FROM numbers(100) + GROUP BY id; +SELECT avgArgMaxMerge(state) FROM argmax_comb; +SELECT + id, + avgArgMaxMerge(state) +FROM argmax_comb +GROUP BY id +ORDER BY id ASC; \ No newline at end of file diff --git a/parser/testdata/03127_system_unload_primary_key_table/ast.json b/parser/testdata/03127_system_unload_primary_key_table/ast.json new file mode 100644 index 000000000..e6120a848 --- /dev/null +++ b/parser/testdata/03127_system_unload_primary_key_table/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001268276, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/03127_system_unload_primary_key_table/metadata.json b/parser/testdata/03127_system_unload_primary_key_table/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03127_system_unload_primary_key_table/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03127_system_unload_primary_key_table/query.sql b/parser/testdata/03127_system_unload_primary_key_table/query.sql new file mode 100644 index 000000000..088a56910 --- /dev/null +++ b/parser/testdata/03127_system_unload_primary_key_table/query.sql @@ -0,0 +1,33 @@ +DROP TABLE IF EXISTS test; +DROP TABLE IF EXISTS test2; + +CREATE TABLE test (s String) ENGINE = MergeTree ORDER BY s SETTINGS index_granularity = 1, use_primary_key_cache = 0; +CREATE TABLE test2 (s String) ENGINE = MergeTree ORDER BY s SETTINGS index_granularity = 1, use_primary_key_cache = 0; + +INSERT INTO test SELECT randomString(1000) FROM numbers(100000); +SELECT round(primary_key_bytes_in_memory, -7), round(primary_key_bytes_in_memory_allocated, -7) FROM system.parts WHERE database = currentDatabase() AND table IN ('test', 'test2'); + +INSERT INTO test2 SELECT randomString(1000) FROM numbers(100000); +SELECT round(primary_key_bytes_in_memory, -7), round(primary_key_bytes_in_memory_allocated, -7) FROM system.parts WHERE database = currentDatabase() AND table IN ('test', 'test2'); + +SYSTEM UNLOAD PRIMARY KEY {CLICKHOUSE_DATABASE:Identifier}.test; +SELECT round(primary_key_bytes_in_memory, -7), round(primary_key_bytes_in_memory_allocated, -7) FROM system.parts WHERE database = currentDatabase() AND table IN ('test', 'test2'); + +SYSTEM UNLOAD PRIMARY KEY {CLICKHOUSE_DATABASE:Identifier}.test2; +SELECT round(primary_key_bytes_in_memory, -7), round(primary_key_bytes_in_memory_allocated, -7) FROM system.parts WHERE database = currentDatabase() AND table IN ('test', 'test2'); + +SELECT 'Query that does not use index for table `test`'; +SELECT s != '' FROM test LIMIT 1; +SELECT round(primary_key_bytes_in_memory, -7), round(primary_key_bytes_in_memory_allocated, -7) FROM system.parts WHERE database = currentDatabase() AND table IN ('test', 'test2'); + +SELECT 'Query that uses index in for table `test`'; +SELECT s != '' FROM test WHERE s < '99999999' LIMIT 1; +SELECT round(primary_key_bytes_in_memory, -7), round(primary_key_bytes_in_memory_allocated, -7) FROM system.parts WHERE database = currentDatabase() AND table IN ('test', 'test2'); + +SELECT 'Query that does not use index for table `test2`'; +SELECT s != '' FROM test2 LIMIT 1; +SELECT round(primary_key_bytes_in_memory, -7), round(primary_key_bytes_in_memory_allocated, -7) FROM system.parts WHERE database = currentDatabase() AND table IN ('test', 'test2'); + +SELECT 'Query that uses index for table `test2`'; +SELECT s != '' FROM test2 WHERE s < '99999999' LIMIT 1; +SELECT round(primary_key_bytes_in_memory, -7), round(primary_key_bytes_in_memory_allocated, -7) FROM system.parts WHERE database = currentDatabase() AND table IN ('test', 'test2'); diff --git a/parser/testdata/03127_window_functions_uint16/ast.json b/parser/testdata/03127_window_functions_uint16/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03127_window_functions_uint16/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03127_window_functions_uint16/metadata.json b/parser/testdata/03127_window_functions_uint16/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03127_window_functions_uint16/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03127_window_functions_uint16/query.sql b/parser/testdata/03127_window_functions_uint16/query.sql new file mode 100644 index 000000000..d0a0ce08e --- /dev/null +++ b/parser/testdata/03127_window_functions_uint16/query.sql @@ -0,0 +1,19 @@ +-- { echo } + +SELECT tumbleStart(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL '1' WEEK, 'US/Samoa'); +SELECT toDateTime(tumbleStart(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL '1' WEEK, 'US/Samoa'), 'US/Samoa'); +SELECT toDateTime(tumbleStart(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL '1' WEEK, 'US/Samoa'), 'US/Samoa'); +SELECT tumbleStart(tumble(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL '1' WEEK, 'US/Samoa')); +SELECT tumbleEnd(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL '1' WEEK, 'US/Samoa'); +SELECT toDateTime(tumbleEnd(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL '1' WEEK, 'US/Samoa'), 'US/Samoa'); +SELECT toDateTime(tumbleEnd(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL '1' WEEK, 'US/Samoa'), 'US/Samoa'); +SELECT tumbleEnd(tumble(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL '1' WEEK, 'US/Samoa')); + +SELECT hopStart(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL '1' WEEK, INTERVAL '3' WEEK, 'US/Samoa'); +SELECT toDateTime(hopStart(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL '1' WEEK, INTERVAL '3' WEEK, 'US/Samoa'), 'US/Samoa'); +SELECT toDateTime(hopStart(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL '1' WEEK, INTERVAL '3' WEEK, 'US/Samoa'), 'US/Samoa'); +SELECT hopStart(hop(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL '1' WEEK, INTERVAL '3' WEEK, 'US/Samoa')); +SELECT hopEnd(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL '1' WEEK, INTERVAL '3' WEEK, 'US/Samoa'); +SELECT toDateTime(hopEnd(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL '1' WEEK, INTERVAL '3' WEEK, 'US/Samoa'), 'US/Samoa'); +SELECT toDateTime(hopEnd(toDateTime('2020-01-09 12:00:01', 'US/Samoa'), INTERVAL '1' WEEK, INTERVAL '3' WEEK, 'US/Samoa'), 'US/Samoa'); +SELECT hopEnd(hop(toDateTime('2019-01-09 12:00:01', 'US/Samoa'), INTERVAL '1' WEEK, INTERVAL '3' WEEK, 'US/Samoa')); diff --git a/parser/testdata/03128_argMin_combinator_projection/ast.json b/parser/testdata/03128_argMin_combinator_projection/ast.json new file mode 100644 index 000000000..228a58459 --- /dev/null +++ b/parser/testdata/03128_argMin_combinator_projection/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery combinator_argMin_table_r1 (children 1)" + }, + { + "explain": " Identifier combinator_argMin_table_r1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001113249, + "rows_read": 2, + "bytes_read": 104 + } +} diff --git a/parser/testdata/03128_argMin_combinator_projection/metadata.json b/parser/testdata/03128_argMin_combinator_projection/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03128_argMin_combinator_projection/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03128_argMin_combinator_projection/query.sql b/parser/testdata/03128_argMin_combinator_projection/query.sql new file mode 100644 index 000000000..9d2d7393d --- /dev/null +++ b/parser/testdata/03128_argMin_combinator_projection/query.sql @@ -0,0 +1,75 @@ +DROP TABLE IF EXISTS combinator_argMin_table_r1 SYNC; +DROP TABLE IF EXISTS combinator_argMin_table_r2 SYNC; + +CREATE TABLE combinator_argMin_table_r1 +( + `id` Int32, + `value` Int32, + `agg_time` DateTime, + PROJECTION first_items + ( + SELECT + id, + minArgMin(agg_time, value), + maxArgMax(agg_time, value) + GROUP BY id + ) +) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_03128/combinator_argMin_table', 'r1') +ORDER BY (id); + +INSERT INTO combinator_argMin_table_r1 + SELECT + number % 10 as id, + number as value, + '2024-01-01 00:00:00' + INTERVAL number SECOND + FROM + numbers(100); + +INSERT INTO combinator_argMin_table_r1 + SELECT + number % 10 as id, + number * 10 as value, + '2024-01-01 00:00:00' + INTERVAL number SECOND + FROM + numbers(100); + +-- We check replication by creating another replica +CREATE TABLE combinator_argMin_table_r2 +( + `id` Int32, + `value` Int32, + `agg_time` DateTime, + PROJECTION first_items + ( + SELECT + id, + minArgMin(agg_time, value), + maxArgMax(agg_time, value) + GROUP BY id + ) +) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_03128/combinator_argMin_table', 'r2') +ORDER BY (id); + +SYSTEM SYNC REPLICA combinator_argMin_table_r2; + +set parallel_replicas_local_plan = 1, parallel_replicas_support_projection = 1, optimize_aggregation_in_order = 0; + +SELECT + id, + minArgMin(agg_time, value), + maxArgMax(agg_time, value) +FROM combinator_argMin_table_r1 +GROUP BY id +ORDER BY id +SETTINGS force_optimize_projection=1; + +SELECT + id, + minArgMin(agg_time, value), + maxArgMax(agg_time, value) +FROM combinator_argMin_table_r2 +GROUP BY id +ORDER BY id +SETTINGS force_optimize_projection=1; diff --git a/parser/testdata/03128_merge_tree_index_lazy_load/ast.json b/parser/testdata/03128_merge_tree_index_lazy_load/ast.json new file mode 100644 index 000000000..bd19add3d --- /dev/null +++ b/parser/testdata/03128_merge_tree_index_lazy_load/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_index_lazy_load (children 1)" + }, + { + "explain": " Identifier t_index_lazy_load" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001481838, + "rows_read": 2, + "bytes_read": 86 + } +} diff --git a/parser/testdata/03128_merge_tree_index_lazy_load/metadata.json b/parser/testdata/03128_merge_tree_index_lazy_load/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03128_merge_tree_index_lazy_load/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03128_merge_tree_index_lazy_load/query.sql b/parser/testdata/03128_merge_tree_index_lazy_load/query.sql new file mode 100644 index 000000000..19f00e7dc --- /dev/null +++ b/parser/testdata/03128_merge_tree_index_lazy_load/query.sql @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS t_index_lazy_load; + +CREATE TABLE t_index_lazy_load (a UInt64, b UInt64) +ENGINE = MergeTree ORDER BY (a, b) +SETTINGS index_granularity = 4, primary_key_ratio_of_unique_prefix_values_to_skip_suffix_columns = 0.5; + +INSERT INTO t_index_lazy_load SELECT number, number FROM numbers(10); + +SELECT mark_number, a, b FROM mergeTreeIndex(currentDatabase(), t_index_lazy_load) ORDER BY mark_number; + +DETACH TABLE t_index_lazy_load; +ATTACH TABLE t_index_lazy_load; + +SELECT mark_number, a, b FROM mergeTreeIndex(currentDatabase(), t_index_lazy_load) ORDER BY mark_number; + +DROP TABLE t_index_lazy_load; diff --git a/parser/testdata/03128_system_unload_primary_key/ast.json b/parser/testdata/03128_system_unload_primary_key/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03128_system_unload_primary_key/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03128_system_unload_primary_key/metadata.json b/parser/testdata/03128_system_unload_primary_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03128_system_unload_primary_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03128_system_unload_primary_key/query.sql b/parser/testdata/03128_system_unload_primary_key/query.sql new file mode 100644 index 000000000..521ccfb13 --- /dev/null +++ b/parser/testdata/03128_system_unload_primary_key/query.sql @@ -0,0 +1,15 @@ +-- Tags: no-parallel +DROP TABLE IF EXISTS test; +DROP TABLE IF EXISTS test2; + +CREATE TABLE test (s String) ENGINE = MergeTree ORDER BY s SETTINGS index_granularity = 1, use_primary_key_cache = 0; +CREATE TABLE test2 (s String) ENGINE = MergeTree ORDER BY s SETTINGS index_granularity = 1, use_primary_key_cache = 0; + +INSERT INTO test SELECT randomString(1000) FROM numbers(100000); +INSERT INTO test2 SELECT randomString(1000) FROM numbers(100000); + +SELECT round(primary_key_bytes_in_memory, -7), round(primary_key_bytes_in_memory_allocated, -7) FROM system.parts WHERE database = currentDatabase() AND table IN ('test', 'test2'); + +SYSTEM UNLOAD PRIMARY KEY; + +SELECT primary_key_bytes_in_memory, primary_key_bytes_in_memory_allocated FROM system.parts WHERE database = currentDatabase() AND table IN ('test', 'test2') \ No newline at end of file diff --git a/parser/testdata/03129_cte_with_final/ast.json b/parser/testdata/03129_cte_with_final/ast.json new file mode 100644 index 000000000..0e5bbfeb8 --- /dev/null +++ b/parser/testdata/03129_cte_with_final/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001217965, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/03129_cte_with_final/metadata.json b/parser/testdata/03129_cte_with_final/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03129_cte_with_final/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03129_cte_with_final/query.sql b/parser/testdata/03129_cte_with_final/query.sql new file mode 100644 index 000000000..2a0714ec5 --- /dev/null +++ b/parser/testdata/03129_cte_with_final/query.sql @@ -0,0 +1,28 @@ +DROP TABLE IF EXISTS t; + +CREATE TABLE t +( + `key` Int64, + `someCol` String, + `eventTime` DateTime +) +ENGINE = ReplacingMergeTree(eventTime) +ORDER BY key; + +INSERT INTO t Values (1, 'first', '2024-04-19 01:01:01'); +INSERT INTO t Values (1, 'first', '2024-04-19 01:01:01'); + +SET enable_analyzer = 1; + +EXPLAIN QUERY TREE passes=1 +WITH merged_test AS( + SELECT * FROM t Final +) +SELECT * FROM merged_test; + +WITH merged_test AS( + SELECT * FROM t Final +) +SELECT * FROM merged_test; + +DROP TABLE t; diff --git a/parser/testdata/03129_low_cardinality_nullable_non_first_primary_key/ast.json b/parser/testdata/03129_low_cardinality_nullable_non_first_primary_key/ast.json new file mode 100644 index 000000000..263beee1e --- /dev/null +++ b/parser/testdata/03129_low_cardinality_nullable_non_first_primary_key/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery small (children 1)" + }, + { + "explain": " Identifier small" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001095753, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/03129_low_cardinality_nullable_non_first_primary_key/metadata.json b/parser/testdata/03129_low_cardinality_nullable_non_first_primary_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03129_low_cardinality_nullable_non_first_primary_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03129_low_cardinality_nullable_non_first_primary_key/query.sql b/parser/testdata/03129_low_cardinality_nullable_non_first_primary_key/query.sql new file mode 100644 index 000000000..457c288da --- /dev/null +++ b/parser/testdata/03129_low_cardinality_nullable_non_first_primary_key/query.sql @@ -0,0 +1,10 @@ +DROP TABLE IF EXISTS small; + +CREATE TABLE small (`dt` DateTime, `user_email` LowCardinality(Nullable(String))) +ENGINE = MergeTree order by (dt, user_email) settings allow_nullable_key = 1, min_bytes_for_wide_part=0, min_rows_for_wide_part=0; + +INSERT INTO small (dt, user_email) SELECT number, if(number % 3 = 2, NULL, number) FROM numbers(1e2); + +SELECT SUM(dt::int) FROM small WHERE user_email IS NULL; + +DROP TABLE small; diff --git a/parser/testdata/03129_serial_test_zookeeper/ast.json b/parser/testdata/03129_serial_test_zookeeper/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03129_serial_test_zookeeper/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03129_serial_test_zookeeper/metadata.json b/parser/testdata/03129_serial_test_zookeeper/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03129_serial_test_zookeeper/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03129_serial_test_zookeeper/query.sql b/parser/testdata/03129_serial_test_zookeeper/query.sql new file mode 100644 index 000000000..0dcef31e9 --- /dev/null +++ b/parser/testdata/03129_serial_test_zookeeper/query.sql @@ -0,0 +1,40 @@ +-- Tags: zookeeper + +SELECT generateSerialID(currentDatabase() || 'x'); +SELECT generateSerialID(currentDatabase() || 'x'); +SELECT generateSerialID(currentDatabase() || 'y'); +SELECT generateSerialID(currentDatabase() || 'x') FROM numbers(5); + +-- Test basic functionality with start_value parameter +SELECT generateSerialID(currentDatabase() || 'start100', 100); +SELECT generateSerialID(currentDatabase() || 'start100', 100); +SELECT generateSerialID(currentDatabase() || 'start100', 100); + +-- Test with different start values for different series +SELECT generateSerialID(currentDatabase() || 'start200', 200); +SELECT generateSerialID(currentDatabase() || 'start200', 200); + +-- Test with start value 0 (should behave same as no parameter) +SELECT generateSerialID(currentDatabase() || 'start0', 0); +SELECT generateSerialID(currentDatabase() || 'start0', 0); + +-- Test with multiple rows and start_value +SELECT generateSerialID(currentDatabase() || 'start500', 500) FROM numbers(5); + +-- Test that start_value only affects the first call (when series is created) +-- Subsequent calls should ignore the start_value parameter +SELECT generateSerialID(currentDatabase() || 'start1000', 1000); +SELECT generateSerialID(currentDatabase() || 'start1000', 9999); -- This should return 1001, not 9999 + +-- Test error cases +SELECT generateSerialID(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT generateSerialID('x', 'y'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT generateSerialID('x', 'y', 'z'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT generateSerialID(1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT generateSerialID('abcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghij'); -- { serverError BAD_ARGUMENTS } + +-- Here the functions are identical and fall into common-subexpression-elimination: +SELECT generateSerialID(currentDatabase() || 'z'), generateSerialID(currentDatabase() || 'z') FROM numbers(5); + +SET max_autoincrement_series = 3; +SELECT generateSerialID('a'); -- { serverError LIMIT_EXCEEDED } diff --git a/parser/testdata/03129_update_nested_materialized_column_check/ast.json b/parser/testdata/03129_update_nested_materialized_column_check/ast.json new file mode 100644 index 000000000..a0aa5fc58 --- /dev/null +++ b/parser/testdata/03129_update_nested_materialized_column_check/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001134107, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03129_update_nested_materialized_column_check/metadata.json b/parser/testdata/03129_update_nested_materialized_column_check/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03129_update_nested_materialized_column_check/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03129_update_nested_materialized_column_check/query.sql b/parser/testdata/03129_update_nested_materialized_column_check/query.sql new file mode 100644 index 000000000..85a0b9903 --- /dev/null +++ b/parser/testdata/03129_update_nested_materialized_column_check/query.sql @@ -0,0 +1,33 @@ +SET asterisk_include_materialized_columns = 1 ; + +CREATE TABLE elements +( + `id` UInt32, + `nested.key` Array(String), + `nested.value` Array(String), + `nested.key_hashed` Array(UInt64) MATERIALIZED arrayMap(x -> sipHash64(x), nested.key), + `nested.val_hashed` Array(UInt64) MATERIALIZED arrayMap(x -> sipHash64(x), nested.value), +) + ENGINE = Memory ; + + +INSERT INTO elements (id,`nested.key`,`nested.value`) VALUES (5555, ['moto', 'hello'],['chocolatine', 'croissant']); + +SELECT * FROM elements ; + +ALTER TABLE elements +UPDATE + `nested.key` = arrayFilter((x, v) -> NOT (match(v, 'chocolatine')), `nested.key`, `nested.value` ), + `nested.value` = arrayFilter((x, v) -> NOT (match(v, 'chocolatine')), `nested.value`, `nested.value`) +WHERE id = 5555 +SETTINGS mutations_sync = 1 ; + +SELECT * FROM elements ; + +ALTER TABLE elements +UPDATE + `nested.value` = arrayMap(x -> concat(x, ' au chocolat'), `nested.value`) +WHERE id = 5555 +SETTINGS mutations_sync = 1 ; + +SELECT * FROM elements ; diff --git a/parser/testdata/03130_abs_in_key_condition_bug/ast.json b/parser/testdata/03130_abs_in_key_condition_bug/ast.json new file mode 100644 index 000000000..5d23ed21f --- /dev/null +++ b/parser/testdata/03130_abs_in_key_condition_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001152261, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/03130_abs_in_key_condition_bug/metadata.json b/parser/testdata/03130_abs_in_key_condition_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03130_abs_in_key_condition_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03130_abs_in_key_condition_bug/query.sql b/parser/testdata/03130_abs_in_key_condition_bug/query.sql new file mode 100644 index 000000000..eec0d034d --- /dev/null +++ b/parser/testdata/03130_abs_in_key_condition_bug/query.sql @@ -0,0 +1,10 @@ +DROP TABLE IF EXISTS t; + +CREATE TABLE t (id UInt64, ts DateTime) ENGINE = MergeTree() ORDER BY (id, ts) SETTINGS index_granularity = 2; + +INSERT INTO t VALUES + (1, toDateTime('2023-05-04 21:17:23', 'UTC')), (1, toDateTime('2023-05-04 22:17:23', 'UTC')), (2, toDateTime('2023-05-04 22:17:23', 'UTC')), (2, toDateTime('2023-05-04 23:17:23', 'UTC')); + +SELECT count(abs(toUnixTimestamp(ts, 'UTC') - toUnixTimestamp('2023-05-04 22:17:23', 'UTC')) AS error) FROM t WHERE error < 3600; + +DROP TABLE IF EXISTS t; diff --git a/parser/testdata/03130_analyzer_array_join_prefer_column/ast.json b/parser/testdata/03130_analyzer_array_join_prefer_column/ast.json new file mode 100644 index 000000000..8a4ffd924 --- /dev/null +++ b/parser/testdata/03130_analyzer_array_join_prefer_column/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_table (children 1)" + }, + { + "explain": " Identifier test_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001122732, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/03130_analyzer_array_join_prefer_column/metadata.json b/parser/testdata/03130_analyzer_array_join_prefer_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03130_analyzer_array_join_prefer_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03130_analyzer_array_join_prefer_column/query.sql b/parser/testdata/03130_analyzer_array_join_prefer_column/query.sql new file mode 100644 index 000000000..1f7bc9f9d --- /dev/null +++ b/parser/testdata/03130_analyzer_array_join_prefer_column/query.sql @@ -0,0 +1,19 @@ +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table (`id` UInt64, `value` String, `value_array` Array(UInt64)) ENGINE = MergeTree() ORDER BY id; +INSERT INTO test_table VALUES (0, 'aaa', [0]), (1, 'bbb', [1]), (2, 'ccc', [2]); + + +SELECT materialize(id), toTypeName(id) +FROM ( SELECT 'aaa' ) AS subquery +ARRAY JOIN [0] AS id +INNER JOIN test_table +USING (id) +; + +SELECT materialize(id), toTypeName(id) +FROM ( SELECT 'aaa' ) AS subquery +ARRAY JOIN [0] AS id +INNER JOIN test_table +USING (id) +SETTINGS prefer_column_name_to_alias = 1 +; diff --git a/parser/testdata/03130_analyzer_self_join_group_by/ast.json b/parser/testdata/03130_analyzer_self_join_group_by/ast.json new file mode 100644 index 000000000..d2e35b633 --- /dev/null +++ b/parser/testdata/03130_analyzer_self_join_group_by/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00112241, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03130_analyzer_self_join_group_by/metadata.json b/parser/testdata/03130_analyzer_self_join_group_by/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03130_analyzer_self_join_group_by/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03130_analyzer_self_join_group_by/query.sql b/parser/testdata/03130_analyzer_self_join_group_by/query.sql new file mode 100644 index 000000000..81af10c4a --- /dev/null +++ b/parser/testdata/03130_analyzer_self_join_group_by/query.sql @@ -0,0 +1,21 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (x Int32) ENGINE = MergeTree ORDER BY x; +INSERT INTO t1 VALUES (1), (2), (3); + +SET enable_analyzer = 1; + +SELECT t2.x FROM t1 JOIN t1 as t2 ON t1.x = t2.x GROUP BY t1.x; -- { serverError NOT_AN_AGGREGATE } +SELECT t2.number FROM numbers(10) as t1 JOIN numbers(10) as t2 ON t1.number = t2.number GROUP BY t1.number; -- { serverError NOT_AN_AGGREGATE } +SELECT t2.a FROM (SELECT x as a FROM t1) as t1 JOIN (SELECT x as a FROM t1) as t2 ON t1.a = t2.a GROUP BY t1.a; -- { serverError NOT_AN_AGGREGATE } +SELECT t2.a FROM (SELECT x as a FROM t1 UNION ALL SELECT x as a FROM t1) as t1 JOIN (SELECT x as a FROM t1 UNION ALL SELECT x as a FROM t1) as t2 ON t1.a = t2.a GROUP BY t1.a; -- { serverError NOT_AN_AGGREGATE } +SELECT t2.number FROM numbers(10) JOIN numbers(10) as t2 ON number = t2.number GROUP BY number SETTINGS joined_subquery_requires_alias = 0; -- { serverError NOT_AN_AGGREGATE } + +SELECT t2.x FROM t1 as t0 JOIN t1 as t2 ON t1.x = t2.x GROUP BY t1.x; -- { serverError NOT_AN_AGGREGATE } +SELECT t2.x FROM t1 as t0 JOIN t1 as t2 ON t0.x = t2.x GROUP BY t0.x; -- { serverError NOT_AN_AGGREGATE } +SELECT t2.x FROM t1 JOIN t1 as t2 ON t1.x = t2.x GROUP BY x; -- { serverError NOT_AN_AGGREGATE } +SELECT t1.x FROM t1 JOIN t1 as t2 ON t1.x = t2.x GROUP BY t2.x; -- { serverError NOT_AN_AGGREGATE } +SELECT x FROM t1 JOIN t1 as t2 ON t1.x = t2.x GROUP BY t2.x; -- { serverError NOT_AN_AGGREGATE } +SELECT x FROM t1 JOIN t1 as t2 USING (x) GROUP BY t2.x; -- { serverError NOT_AN_AGGREGATE } + +SELECT t1.x FROM t1 JOIN t1 as t2 ON t1.x = t2.x GROUP BY x ORDER BY ALL; +SELECT x, sum(t2.x) FROM t1 JOIN t1 as t2 ON t1.x = t2.x GROUP BY t1.x ORDER BY ALL; diff --git a/parser/testdata/03130_convert_outer_join_to_inner_join/ast.json b/parser/testdata/03130_convert_outer_join_to_inner_join/ast.json new file mode 100644 index 000000000..929ee6678 --- /dev/null +++ b/parser/testdata/03130_convert_outer_join_to_inner_join/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00101873, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03130_convert_outer_join_to_inner_join/metadata.json b/parser/testdata/03130_convert_outer_join_to_inner_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03130_convert_outer_join_to_inner_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03130_convert_outer_join_to_inner_join/query.sql b/parser/testdata/03130_convert_outer_join_to_inner_join/query.sql new file mode 100644 index 000000000..0a53bf03a --- /dev/null +++ b/parser/testdata/03130_convert_outer_join_to_inner_join/query.sql @@ -0,0 +1,55 @@ +SET enable_analyzer = 1; +SET join_algorithm = 'hash'; + +DROP TABLE IF EXISTS test_table_1; +CREATE TABLE test_table_1 +( + id UInt64, + value String +) ENGINE=MergeTree ORDER BY id +SETTINGS index_granularity = 16 # We have number of granules in the `EXPLAIN` output in reference file +; + +DROP TABLE IF EXISTS test_table_2; +CREATE TABLE test_table_2 +( + id UInt64, + value String +) ENGINE=MergeTree ORDER BY id +SETTINGS index_granularity = 16 +; + +INSERT INTO test_table_1 VALUES (1, 'Value_1'), (2, 'Value_2'); +INSERT INTO test_table_2 VALUES (2, 'Value_2'), (3, 'Value_3'); + + +EXPLAIN header = 1, actions = 1 SELECT * FROM test_table_1 AS lhs LEFT JOIN test_table_2 AS rhs ON lhs.id = rhs.id WHERE rhs.id != 0 +SETTINGS query_plan_join_swap_table = 'false' +; + +SELECT '--'; + +SELECT * FROM test_table_1 AS lhs LEFT JOIN test_table_2 AS rhs ON lhs.id = rhs.id WHERE rhs.id != 0; + +SELECT '--'; + +EXPLAIN header = 1, actions = 1 SELECT * FROM test_table_1 AS lhs RIGHT JOIN test_table_2 AS rhs ON lhs.id = rhs.id WHERE lhs.id != 0 +SETTINGS query_plan_join_swap_table = 'false' +; + +SELECT '--'; + +SELECT * FROM test_table_1 AS lhs RIGHT JOIN test_table_2 AS rhs ON lhs.id = rhs.id WHERE lhs.id != 0; + +SELECT '--'; + +EXPLAIN header = 1, actions = 1 SELECT * FROM test_table_1 AS lhs FULL JOIN test_table_2 AS rhs ON lhs.id = rhs.id WHERE lhs.id != 0 AND rhs.id != 0 +SETTINGS query_plan_join_swap_table = 'false' +; + +SELECT '--'; + +SELECT * FROM test_table_1 AS lhs FULL JOIN test_table_2 AS rhs ON lhs.id = rhs.id WHERE lhs.id != 0 AND rhs.id != 0; + +DROP TABLE test_table_1; +DROP TABLE test_table_2; diff --git a/parser/testdata/03130_generateSnowflakeId/ast.json b/parser/testdata/03130_generateSnowflakeId/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03130_generateSnowflakeId/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03130_generateSnowflakeId/metadata.json b/parser/testdata/03130_generateSnowflakeId/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03130_generateSnowflakeId/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03130_generateSnowflakeId/query.sql b/parser/testdata/03130_generateSnowflakeId/query.sql new file mode 100644 index 000000000..6c0a6cc0f --- /dev/null +++ b/parser/testdata/03130_generateSnowflakeId/query.sql @@ -0,0 +1,25 @@ +-- Test SQL function 'generateSnowflakeID' + +SELECT 'Negative tests'; +SELECT generateSnowflakeID(1, 2, 3); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT generateSnowflakeID(1, 'not_an_int'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT generateSnowflakeID(1, materialize(2)); -- { serverError ILLEGAL_COLUMN } + + SELECT 'The first bit must be zero'; +SELECT bitAnd(bitShiftRight(generateSnowflakeID(), 63), 1) = 0; + +SELECT 'Test disabling of common subexpression elimination via first parameter'; +SELECT generateSnowflakeID(1) = generateSnowflakeID(2); -- disabled common subexpression elimination --> lhs != rhs +SELECT generateSnowflakeID() = generateSnowflakeID(1); -- same as ^^ +SELECT generateSnowflakeID(1) = generateSnowflakeID(1); -- with common subexpression elimination + +SELECT 'Test user-provided machine ID'; +SELECT bitAnd(bitShiftRight(generateSnowflakeID(1, 123), 12), 1024 - 1) = 123; -- the machine id is actually set in the generated snowflake ID (1024 = 2^10) + +SELECT 'Generated Snowflake IDs are unique'; +SELECT count(*) +FROM +( + SELECT DISTINCT generateSnowflakeID() + FROM numbers(100) +); diff --git a/parser/testdata/03131_deprecated_functions/ast.json b/parser/testdata/03131_deprecated_functions/ast.json new file mode 100644 index 000000000..646b0410a --- /dev/null +++ b/parser/testdata/03131_deprecated_functions/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function neighbor (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001199587, + "rows_read": 14, + "bytes_read": 528 + } +} diff --git a/parser/testdata/03131_deprecated_functions/metadata.json b/parser/testdata/03131_deprecated_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03131_deprecated_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03131_deprecated_functions/query.sql b/parser/testdata/03131_deprecated_functions/query.sql new file mode 100644 index 000000000..acdf36a50 --- /dev/null +++ b/parser/testdata/03131_deprecated_functions/query.sql @@ -0,0 +1,13 @@ +SELECT number, neighbor(number, 2) FROM system.numbers LIMIT 10; -- { serverError DEPRECATED_FUNCTION } + +SELECT runningDifference(number) FROM system.numbers LIMIT 10; -- { serverError DEPRECATED_FUNCTION } + +SELECT k, runningAccumulate(sum_k) AS res FROM (SELECT number as k, sumState(k) AS sum_k FROM numbers(10) GROUP BY k ORDER BY k); -- { serverError DEPRECATED_FUNCTION } + +SET allow_deprecated_error_prone_window_functions=1; + +SELECT number, neighbor(number, 2) FROM system.numbers LIMIT 10 FORMAT Null; + +SELECT runningDifference(number) FROM system.numbers LIMIT 10 FORMAT Null; + +SELECT k, runningAccumulate(sum_k) AS res FROM (SELECT number as k, sumState(k) AS sum_k FROM numbers(10) GROUP BY k ORDER BY k) FORMAT Null; diff --git a/parser/testdata/03131_hilbert_coding/ast.json b/parser/testdata/03131_hilbert_coding/ast.json new file mode 100644 index 000000000..166604d91 --- /dev/null +++ b/parser/testdata/03131_hilbert_coding/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '----- START -----'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.000962451, + "rows_read": 5, + "bytes_read": 188 + } +} diff --git a/parser/testdata/03131_hilbert_coding/metadata.json b/parser/testdata/03131_hilbert_coding/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03131_hilbert_coding/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03131_hilbert_coding/query.sql b/parser/testdata/03131_hilbert_coding/query.sql new file mode 100644 index 000000000..b16a0efad --- /dev/null +++ b/parser/testdata/03131_hilbert_coding/query.sql @@ -0,0 +1,56 @@ +SELECT '----- START -----'; +drop table if exists hilbert_numbers_03131; +create table hilbert_numbers_03131( + n1 UInt32, + n2 UInt32 +) + Engine=MergeTree() + ORDER BY n1 SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +SELECT '----- CONST -----'; +select hilbertEncode(133); +select hilbertEncode(3, 4); +select hilbertDecode(2, 31); + +SELECT '----- 4294967296, 2 -----'; +insert into hilbert_numbers_03131 +select n1.number, n2.number +from numbers(pow(2, 32)-8,8) n1 + cross join numbers(pow(2, 32)-8, 8) n2 +; + +drop table if exists hilbert_numbers_1_03131; +create table hilbert_numbers_1_03131( + n1 UInt64, + n2 UInt64 +) + Engine=MergeTree() + ORDER BY n1 SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +insert into hilbert_numbers_1_03131 +select untuple(hilbertDecode(2, hilbertEncode(n1, n2))) +from hilbert_numbers_03131; + +( + select n1, n2 from hilbert_numbers_03131 + union distinct + select n1, n2 from hilbert_numbers_1_03131 +) +except +( + select n1, n2 from hilbert_numbers_03131 + intersect + select n1, n2 from hilbert_numbers_1_03131 +); +drop table if exists hilbert_numbers_1_03131; + +select '----- ERRORS -----'; +select hilbertEncode(); -- { serverError TOO_FEW_ARGUMENTS_FOR_FUNCTION } +select hilbertEncode(1, 2, 3); -- { serverError TOO_MANY_ARGUMENTS_FOR_FUNCTION } +select hilbertDecode(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +select hilbertEncode('text'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select hilbertDecode('text', 'text'); -- { serverError ILLEGAL_COLUMN } +select hilbertEncode((1, 2), 3); -- { serverError ARGUMENT_OUT_OF_BOUND } + +SELECT '----- END -----'; +drop table if exists hilbert_numbers_03131; diff --git a/parser/testdata/03131_rewrite_sum_if_nullable/ast.json b/parser/testdata/03131_rewrite_sum_if_nullable/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03131_rewrite_sum_if_nullable/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03131_rewrite_sum_if_nullable/metadata.json b/parser/testdata/03131_rewrite_sum_if_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03131_rewrite_sum_if_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03131_rewrite_sum_if_nullable/query.sql b/parser/testdata/03131_rewrite_sum_if_nullable/query.sql new file mode 100644 index 000000000..ecc4db6fc --- /dev/null +++ b/parser/testdata/03131_rewrite_sum_if_nullable/query.sql @@ -0,0 +1,20 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/62890 +-- { echoOn } +SELECT sum(if(materialize(0), toNullable(1), 0)); +SELECT sum(if(materialize(0), toNullable(1), materialize(0))); +SELECT sum(if(materialize(0), materialize(toNullable(1)), materialize(0))); +SELECT sum(if(materialize(0), materialize(1), materialize(0))); +SELECT sum(if(dummy, 0, toNullable(0))); +SELECT sum(if(dummy, materialize(0), toNullable(0))); +SELECT sum(if(dummy, materialize(0), materialize(toNullable(0)))); +SELECT sum(if(s == '', v, 0)) b from VALUES ('v Nullable(Int64), s String',(1, 'x')); + +SELECT sumOrNull(if(materialize(0), toNullable(1), 0)); +SELECT sumOrNull(if(materialize(0), 1, 0)); +SELECT sum(if(materialize(0), toNullable(1), 0)) settings aggregate_functions_null_for_empty=1; +SELECT sum(if(materialize(0), 1, 0)) settings aggregate_functions_null_for_empty=1; + +SELECT sumOrNull(if(materialize(1), toNullable(1), 10)); +SELECT sumOrNull(if(materialize(1), 1, 10)); +SELECT sum(if(materialize(1), toNullable(1), 10)) settings aggregate_functions_null_for_empty=1; +SELECT sum(if(materialize( 1), 1, 10)) settings aggregate_functions_null_for_empty=1; diff --git a/parser/testdata/03132_jit_sort_description_crash_fix/ast.json b/parser/testdata/03132_jit_sort_description_crash_fix/ast.json new file mode 100644 index 000000000..7647e4c3f --- /dev/null +++ b/parser/testdata/03132_jit_sort_description_crash_fix/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000973333, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03132_jit_sort_description_crash_fix/metadata.json b/parser/testdata/03132_jit_sort_description_crash_fix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03132_jit_sort_description_crash_fix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03132_jit_sort_description_crash_fix/query.sql b/parser/testdata/03132_jit_sort_description_crash_fix/query.sql new file mode 100644 index 000000000..f3b48afb4 --- /dev/null +++ b/parser/testdata/03132_jit_sort_description_crash_fix/query.sql @@ -0,0 +1,47 @@ +SET allow_deprecated_syntax_for_merge_tree = 1; +SET compile_sort_description = 1; +SET min_count_to_compile_sort_description = 0; + +DROP TABLE IF EXISTS test1_00395; +CREATE TABLE test1_00395 +( + col1 UInt64, + col2 Nullable(UInt64), + col3 String, + col4 Nullable(String), + col5 Array(UInt64), + col6 Array(Nullable(UInt64)), + col7 Array(String), + col8 Array(Nullable(String)), + d Date +) Engine = MergeTree(d, (col1, d), 8192); + +INSERT INTO test1_00395 VALUES (1, 1, 'a', 'a', [1], [1], ['a'], ['a'], '2000-01-01'); +INSERT INTO test1_00395 VALUES (1, NULL, 'a', 'a', [1], [1], ['a'], ['a'], '2000-01-01'); +INSERT INTO test1_00395 VALUES (1, 1, 'a', NULL, [1], [1], ['a'], ['a'], '2000-01-01'); +INSERT INTO test1_00395 VALUES (1, 1, 'a', 'a', [1], [NULL], ['a'], ['a'], '2000-01-01'); +INSERT INTO test1_00395 VALUES (1, 1, 'a', 'a', [1], [1], ['a'], [NULL], '2000-01-01'); + +SELECT count(greatest(multiIf(1, 2, toNullable(NULL), 3, 4))) FROM test1_00395 WHERE toNullable(27) GROUP BY col1 ORDER BY multiIf(27, 1, multiIf(materialize(1), toLowCardinality(2), 3, 1, 4), NULL, 4) ASC NULLS LAST, col1 DESC; + +SELECT '--'; + +SELECT count(greatest(multiIf(1, 2, toNullable(NULL), 3, 4))) FROM test1_00395 WHERE toNullable(27) GROUP BY col1 ORDER BY multiIf(27, 1, multiIf(materialize(1), toLowCardinality(2), 3, 1, 4), NULL, 4) ASC NULLS LAST, col1 DESC; + +SELECT '--'; + +SELECT multiIf(1, 2, NULL, 3, 4), count(greatest(multiIf(1, 2, NULL, toUInt256(3), 4), multiIf(1, 2, NULL, 3, 4))) FROM test1_00395 GROUP BY col1 WITH CUBE WITH TOTALS ORDER BY multiIf(27, 1, multiIf(materialize(1), toLowCardinality(2), 3, 1, 4), NULL, 4) ASC NULLS LAST; + +SELECT '--'; + +SELECT multiIf(1, 2, NULL, 3, 4), count(greatest(multiIf(1, 2, NULL, toUInt256(3), 4), multiIf(1, 2, NULL, 3, 4))) FROM test1_00395 GROUP BY col1 WITH CUBE WITH TOTALS ORDER BY multiIf(27, 1, multiIf(materialize(1), toLowCardinality(2), 3, 1, 4), NULL, 4) ASC NULLS LAST; + +SELECT '--'; + +SELECT col1 FROM test1_00395 ORDER BY multiIf(27, 1, multiIf(materialize(1), toLowCardinality(2), 3, 1, 4), NULL, 4) ASC; + +SELECT '--'; + +SELECT col1 FROM test1_00395 ORDER BY multiIf(27, 1, multiIf(materialize(1), toLowCardinality(2), 3, 1, 4), NULL, 4) ASC; + +DROP TABLE test1_00395; diff --git a/parser/testdata/03132_rewrite_aggregate_function_with_if_implicit_cast/ast.json b/parser/testdata/03132_rewrite_aggregate_function_with_if_implicit_cast/ast.json new file mode 100644 index 000000000..6581854fd --- /dev/null +++ b/parser/testdata/03132_rewrite_aggregate_function_with_if_implicit_cast/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00161601, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03132_rewrite_aggregate_function_with_if_implicit_cast/metadata.json b/parser/testdata/03132_rewrite_aggregate_function_with_if_implicit_cast/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03132_rewrite_aggregate_function_with_if_implicit_cast/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03132_rewrite_aggregate_function_with_if_implicit_cast/query.sql b/parser/testdata/03132_rewrite_aggregate_function_with_if_implicit_cast/query.sql new file mode 100644 index 000000000..7b7237ea7 --- /dev/null +++ b/parser/testdata/03132_rewrite_aggregate_function_with_if_implicit_cast/query.sql @@ -0,0 +1,8 @@ +SET enable_analyzer = 1; +-- { echoOn } +SELECT concat(1, sum(if(0, toUInt128(concat('%', toLowCardinality(toNullable(1)), toUInt256(1))), materialize(0)))); +SELECT any(if((number % 10) = 5, number, CAST(NULL, 'Nullable(Int128)'))) AS a, toTypeName(a) FROM numbers(100) AS a; +EXPLAIN QUERY TREE SELECT any(if((number % 10) = 5, number, CAST(NULL, 'Nullable(Int128)'))) AS a, toTypeName(a) FROM numbers(100); + +SELECT any(if((number % 10) = 5, CAST(NULL, 'Nullable(Int128)'), number)) AS a, toTypeName(a) FROM numbers(100) AS a; +EXPLAIN QUERY TREE SELECT any(if((number % 10) = 5, CAST(NULL, 'Nullable(Int128)'), number)) AS a, toTypeName(a) FROM numbers(100); diff --git a/parser/testdata/03132_sqlancer_union_all/ast.json b/parser/testdata/03132_sqlancer_union_all/ast.json new file mode 100644 index 000000000..f046f782a --- /dev/null +++ b/parser/testdata/03132_sqlancer_union_all/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000977475, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03132_sqlancer_union_all/metadata.json b/parser/testdata/03132_sqlancer_union_all/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03132_sqlancer_union_all/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03132_sqlancer_union_all/query.sql b/parser/testdata/03132_sqlancer_union_all/query.sql new file mode 100644 index 000000000..2502ce31e --- /dev/null +++ b/parser/testdata/03132_sqlancer_union_all/query.sql @@ -0,0 +1,30 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS t0; +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS t3; +DROP TABLE IF EXISTS t4; + +CREATE TABLE t0 (c0 String) ENGINE = Memory() ; +CREATE TABLE IF NOT EXISTS t1 (c0 Int32, c1 Int32, c2 ALIAS c1) ENGINE = Log() ; +CREATE TABLE t2 (c0 Int32) ENGINE = MergeTree() ORDER BY tuple() ; +CREATE TABLE t3 (c0 String) ENGINE = Memory() ; +CREATE TABLE t4 (c0 Int32) ENGINE = Memory() ; +INSERT INTO t4(c0) VALUES (-405831124); +INSERT INTO t1(c1, c0) VALUES (278926179, 891140511); +INSERT INTO t4(c0) VALUES (1586457527); +INSERT INTO t3(c0) VALUES ('?/|D!6 '), ('1586457527'); +INSERT INTO t2(c0) VALUES (1475250982); + +SELECT t1.c1 +FROM t3, t1 +WHERE true AND t1.c2 +UNION ALL +SELECT t1.c1 +FROM t3, t1 +WHERE NOT t1.c2 +UNION ALL +SELECT t1.c1 +FROM t3, t1 +WHERE t1.c2 IS NULL; diff --git a/parser/testdata/03141_fetches_errors_stress/ast.json b/parser/testdata/03141_fetches_errors_stress/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03141_fetches_errors_stress/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03141_fetches_errors_stress/metadata.json b/parser/testdata/03141_fetches_errors_stress/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03141_fetches_errors_stress/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03141_fetches_errors_stress/query.sql b/parser/testdata/03141_fetches_errors_stress/query.sql new file mode 100644 index 000000000..c8749ff67 --- /dev/null +++ b/parser/testdata/03141_fetches_errors_stress/query.sql @@ -0,0 +1,27 @@ +-- Tags: no-fasttest, no-parallel +-- Tag no-parallel -- due to failpoints + +create table data_r1 (key Int, value String) engine=ReplicatedMergeTree('/tables/{database}/data', '{table}') order by tuple(); +create table data_r2 (key Int, value String) engine=ReplicatedMergeTree('/tables/{database}/data', '{table}') order by tuple(); + +system enable failpoint replicated_sends_failpoint; +insert into data_r1 select number, randomPrintableASCII(100) from numbers(100_000) settings max_block_size=1000, min_insert_block_size_rows=1000; +system disable failpoint replicated_sends_failpoint; + +system sync replica data_r2; + +system flush logs text_log, part_log; +SET max_rows_to_read = 0; -- system.text_log can be really big +select event_time_microseconds, logger_name, message from system.text_log where level = 'Error' and message like '%Malformed chunked encoding%' order by 1 format LineAsString; + +-- { echoOn } +select table, errorCodeToName(error), count() from system.part_log where + database = currentDatabase() + and error > 0 + and errorCodeToName(error) not in ('FAULT_INJECTED', 'NO_REPLICA_HAS_PART', 'ATTEMPT_TO_READ_AFTER_EOF') + and (errorCodeToName(error) != 'POCO_EXCEPTION' or exception not like '%Malformed message: Unexpected EOF%') + group by 1, 2 + order by 1, 2; + +select count() from data_r1; +select count() from data_r2; diff --git a/parser/testdata/03141_wildcard_grants/ast.json b/parser/testdata/03141_wildcard_grants/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03141_wildcard_grants/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03141_wildcard_grants/metadata.json b/parser/testdata/03141_wildcard_grants/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03141_wildcard_grants/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03141_wildcard_grants/query.sql b/parser/testdata/03141_wildcard_grants/query.sql new file mode 100644 index 000000000..42f1b9476 --- /dev/null +++ b/parser/testdata/03141_wildcard_grants/query.sql @@ -0,0 +1,26 @@ +-- Tags: no-parallel + +DROP USER IF EXISTS user_03141; +CREATE USER user_03141; + +GRANT SELECT ON test*.* TO user_03141; +GRANT SELECT ON team*.* TO user_03141; +GRANT INSERT ON team*.* TO user_03141; +SHOW GRANTS FOR user_03141; +SELECT '---'; + +GRANT INSERT ON foo* TO user_03141; +GRANT INSERT ON foobar* TO user_03141 WITH GRANT OPTION; +SHOW GRANTS FOR user_03141; +SELECT '---'; + +REVOKE SELECT ON test.* FROM user_03141; +REVOKE SELECT ON team*.* FROM user_03141; +SHOW GRANTS FOR user_03141; +SELECT '---'; + +GRANT SELECT(bar) ON foo.test* TO user_03141; -- { clientError SYNTAX_ERROR } +GRANT SELECT(bar) ON foo.* TO user_03141; -- { clientError SYNTAX_ERROR } +GRANT SELECT(bar) ON *.* TO user_03141; -- { clientError SYNTAX_ERROR } + +DROP USER user_03141; diff --git a/parser/testdata/03142_alter_comment_parameterized_view/ast.json b/parser/testdata/03142_alter_comment_parameterized_view/ast.json new file mode 100644 index 000000000..449a26810 --- /dev/null +++ b/parser/testdata/03142_alter_comment_parameterized_view/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_table_comment (children 1)" + }, + { + "explain": " Identifier test_table_comment" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001705057, + "rows_read": 2, + "bytes_read": 88 + } +} diff --git a/parser/testdata/03142_alter_comment_parameterized_view/metadata.json b/parser/testdata/03142_alter_comment_parameterized_view/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03142_alter_comment_parameterized_view/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03142_alter_comment_parameterized_view/query.sql b/parser/testdata/03142_alter_comment_parameterized_view/query.sql new file mode 100644 index 000000000..98318e99e --- /dev/null +++ b/parser/testdata/03142_alter_comment_parameterized_view/query.sql @@ -0,0 +1,5 @@ +DROP TABLE IF EXISTS test_table_comment; +CREATE VIEW test_table_comment AS SELECT toString({date_from:String}); +ALTER TABLE test_table_comment MODIFY COMMENT 'test comment'; +SELECT create_table_query FROM system.tables WHERE name = 'test_table_comment' AND database = currentDatabase(); +DROP TABLE test_table_comment; diff --git a/parser/testdata/03142_skip_ANSI_in_UTF8_compute_width/ast.json b/parser/testdata/03142_skip_ANSI_in_UTF8_compute_width/ast.json new file mode 100644 index 000000000..eabbfd031 --- /dev/null +++ b/parser/testdata/03142_skip_ANSI_in_UTF8_compute_width/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function format (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Literal '\u001B[38;2;{0};{1};{2}m█\u001B[0m'" + }, + { + "explain": " Literal UInt64_255" + }, + { + "explain": " Literal UInt64_128" + }, + { + "explain": " Literal UInt64_128" + }, + { + "explain": " Identifier Pretty" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001115303, + "rows_read": 11, + "bytes_read": 411 + } +} diff --git a/parser/testdata/03142_skip_ANSI_in_UTF8_compute_width/metadata.json b/parser/testdata/03142_skip_ANSI_in_UTF8_compute_width/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03142_skip_ANSI_in_UTF8_compute_width/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03142_skip_ANSI_in_UTF8_compute_width/query.sql b/parser/testdata/03142_skip_ANSI_in_UTF8_compute_width/query.sql new file mode 100644 index 000000000..49f689a4c --- /dev/null +++ b/parser/testdata/03142_skip_ANSI_in_UTF8_compute_width/query.sql @@ -0,0 +1,2 @@ +SELECT format('\x1b[38;2;{0};{1};{2}m█\x1b[0m', 255, 128, 128) AS x FORMAT Pretty; +SELECT 'Hello', format('\x1b[38;2;{0};{1};{2}m█\x1b[0m test \x1b[38;2;{0};{1};{2}m█\x1b[0m', 255, 128, 128) AS x FORMAT Pretty; diff --git a/parser/testdata/03142_untuple_crash/ast.json b/parser/testdata/03142_untuple_crash/ast.json new file mode 100644 index 000000000..5ac7b0936 --- /dev/null +++ b/parser/testdata/03142_untuple_crash/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001352511, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03142_untuple_crash/metadata.json b/parser/testdata/03142_untuple_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03142_untuple_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03142_untuple_crash/query.sql b/parser/testdata/03142_untuple_crash/query.sql new file mode 100644 index 000000000..451069732 --- /dev/null +++ b/parser/testdata/03142_untuple_crash/query.sql @@ -0,0 +1,2 @@ +SET enable_analyzer=1; +SELECT untuple(x -> 0) -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } diff --git a/parser/testdata/03142_window_function_limit_by/ast.json b/parser/testdata/03142_window_function_limit_by/ast.json new file mode 100644 index 000000000..52d7e3cfc --- /dev/null +++ b/parser/testdata/03142_window_function_limit_by/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001496754, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03142_window_function_limit_by/metadata.json b/parser/testdata/03142_window_function_limit_by/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03142_window_function_limit_by/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03142_window_function_limit_by/query.sql b/parser/testdata/03142_window_function_limit_by/query.sql new file mode 100644 index 000000000..1565170a7 --- /dev/null +++ b/parser/testdata/03142_window_function_limit_by/query.sql @@ -0,0 +1,42 @@ +SET enable_analyzer = 1; + + +-- https://github.com/ClickHouse/ClickHouse/issues/55965 + +CREATE TABLE error_win_func +( + `k` String, + `in` UInt64, + `out` UInt64 +) +ENGINE = MergeTree +ORDER BY k AS +SELECT * from VALUES (('a', 2, 4), ('a', 4, 2), ('a', 6, 3), ('a', 8, 4)); + +SELECT + k, + in / out AS ratio, + count(*) OVER w AS count_rows_w +FROM error_win_func +WINDOW + w AS (ROWS BETWEEN CURRENT ROW AND 3 FOLLOWING) +ORDER BY ALL +LIMIT 1 BY + k; + +DROP TABLE error_win_func; + +-- https://github.com/ClickHouse/ClickHouse/issues/47217 + +CREATE TABLE t(n String, st String) ENGINE = Memory as +select * from values(('a', 'x'), ('b', 'y'), ('c', 'z')); + +SELECT + n as m, + count() OVER (PARTITION BY m) cnt +FROM t +WHERE st IN ('x', 'y') +ORDER BY ALL +LIMIT 1 BY m; + +DROP TABLE t; diff --git a/parser/testdata/03143_asof_join_ddb_long/ast.json b/parser/testdata/03143_asof_join_ddb_long/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03143_asof_join_ddb_long/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03143_asof_join_ddb_long/metadata.json b/parser/testdata/03143_asof_join_ddb_long/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03143_asof_join_ddb_long/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03143_asof_join_ddb_long/query.sql b/parser/testdata/03143_asof_join_ddb_long/query.sql new file mode 100644 index 000000000..c93e6618b --- /dev/null +++ b/parser/testdata/03143_asof_join_ddb_long/query.sql @@ -0,0 +1,52 @@ +-- Tags: long, no-random-merge-tree-settings, no-distributed-cache +-- no-random-merge-tree-settings - times out in private + +DROP TABLE IF EXISTS build; +DROP TABLE IF EXISTS skewed_probe; + +SET session_timezone = 'UTC'; + +CREATE TABLE build ENGINE = MergeTree ORDER BY (key, begin) +AS + SELECT + toDateTime('1990-03-21 13:00:00') + INTERVAL number MINUTE AS begin, + number % 4 AS key, + number AS value + FROM numbers(0, 4000000); + +CREATE TABLE skewed_probe ENGINE = MergeTree ORDER BY (key, begin) +AS + SELECT + toDateTime('1990-04-21 13:00:01') + INTERVAL number MINUTE AS begin, + 0 AS key + FROM numbers(0, 5) + UNION ALL + SELECT + toDateTime('1990-05-21 13:00:01') + INTERVAL number MINUTE AS begin, + 1 AS key + FROM numbers(0, 10) + UNION ALL + SELECT + toDateTime('1990-06-21 13:00:01') + INTERVAL number MINUTE AS begin, + 2 AS key + FROM numbers(0, 20) + UNION ALL + SELECT + toDateTime('1990-03-21 13:00:01') + INTERVAL number MINUTE AS begin, + 3 AS key + FROM numbers(0, 4000000); + +SET max_rows_to_read = 0; + +SELECT SUM(value), COUNT(*) +FROM skewed_probe +ASOF JOIN build +USING (key, begin) +; + +SELECT SUM(value), COUNT(*) +FROM skewed_probe +ASOF JOIN build +USING (key, begin) +SETTINGS join_algorithm = 'full_sorting_merge' +; diff --git a/parser/testdata/03143_cte_scope/ast.json b/parser/testdata/03143_cte_scope/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03143_cte_scope/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03143_cte_scope/metadata.json b/parser/testdata/03143_cte_scope/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03143_cte_scope/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03143_cte_scope/query.sql b/parser/testdata/03143_cte_scope/query.sql new file mode 100644 index 000000000..8b49d9019 --- /dev/null +++ b/parser/testdata/03143_cte_scope/query.sql @@ -0,0 +1,43 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/56287 +SET enable_analyzer = 1; +DROP TABLE IF EXISTS tmp_a; +DROP TABLE IF EXISTS tmp_b; + +CREATE TEMPORARY TABLE IF NOT EXISTS tmp_a +( + k1 Int32, + k2 Int32, + d1 Int32, + d2 Int32 +) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO tmp_a VALUES (1,2,3,4); +INSERT INTO tmp_a VALUES (5,6,7,8); + +CREATE TEMPORARY TABLE IF NOT EXISTS tmp_b ( + k1 Int32, + k2 Int32, + d0 Float64 +) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO tmp_b VALUES (1,2,0.3); +INSERT INTO tmp_b VALUES (5,6,0.4); + +SELECT tb1.*,tb2.* +FROM + ( + with tmp0 as (select k1,k2,d1 from tmp_a), + tmp_s as (select k1,k2,d0 from tmp_b), + tmp1 as (select tmp0.*,tmp_s.d0 from tmp0 left join tmp_s on tmp0.k1=tmp_s.k1 and tmp0.k2=tmp_s.k2) + select * from tmp1 + ) as tb1 + LEFT JOIN + ( + with tmp0 as (select k1,k2,d2 from tmp_a), + tmp_s as (select k1,k2,d0 from tmp_b), + tmp1 as (select tmp0.*,tmp_s.d0 from tmp0 left join tmp_s on tmp0.k1=tmp_s.k1 and tmp0.k2=tmp_s.k2) + select * from tmp1 + ) as tb2 + ON tb1.k1=tb2.k1 AND tb1.k2=tb2.k2 +ORDER BY k1; + +DROP TABLE IF EXISTS tmp_a; +DROP TABLE IF EXISTS tmp_b; diff --git a/parser/testdata/03143_group_by_constant_secondary/ast.json b/parser/testdata/03143_group_by_constant_secondary/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03143_group_by_constant_secondary/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03143_group_by_constant_secondary/metadata.json b/parser/testdata/03143_group_by_constant_secondary/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03143_group_by_constant_secondary/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03143_group_by_constant_secondary/query.sql b/parser/testdata/03143_group_by_constant_secondary/query.sql new file mode 100644 index 000000000..099160dd5 --- /dev/null +++ b/parser/testdata/03143_group_by_constant_secondary/query.sql @@ -0,0 +1,8 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/63264 +SELECT count() +FROM remote(test_cluster_two_shards, system, one) +GROUP BY 'hi' +SETTINGS + enable_analyzer = 1, + group_by_two_level_threshold = 1, + group_by_two_level_threshold_bytes = 33950592; diff --git a/parser/testdata/03143_join_filter_push_down_filled_join_fix/ast.json b/parser/testdata/03143_join_filter_push_down_filled_join_fix/ast.json new file mode 100644 index 000000000..1e3588a35 --- /dev/null +++ b/parser/testdata/03143_join_filter_push_down_filled_join_fix/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00136227, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03143_join_filter_push_down_filled_join_fix/metadata.json b/parser/testdata/03143_join_filter_push_down_filled_join_fix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03143_join_filter_push_down_filled_join_fix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03143_join_filter_push_down_filled_join_fix/query.sql b/parser/testdata/03143_join_filter_push_down_filled_join_fix/query.sql new file mode 100644 index 000000000..fc816623b --- /dev/null +++ b/parser/testdata/03143_join_filter_push_down_filled_join_fix/query.sql @@ -0,0 +1,45 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 +( + id UInt64, + external_id UInt64 +) +ENGINE = MergeTree +ORDER BY id; + +DROP TABLE IF EXISTS t2; +CREATE TABLE t2 +( + id UInt64, + name String +) +ENGINE = MergeTree +ORDER BY id; + +INSERT INTO t1 VALUES (1, 1); + +INSERT INTO t2 VALUES (1, 'test'); + +DROP DICTIONARY IF EXISTS d2; +CREATE DICTIONARY d2 +( + id UInt64, + name String, +) +PRIMARY KEY id +SOURCE(CLICKHOUSE( + table t2)) +LIFETIME(MIN 600 MAX 900) +LAYOUT(HASHED()); + +SELECT + * +FROM + t1 + LEFT JOIN d2 ON d2.id = t1.external_id + WHERE t1.id = 1 +LIMIT 1; + +DROP DICTIONARY d2; +DROP TABLE t2; +DROP TABLE t1; diff --git a/parser/testdata/03143_parallel_replicas_mat_view_bug/ast.json b/parser/testdata/03143_parallel_replicas_mat_view_bug/ast.json new file mode 100644 index 000000000..bccf3b42c --- /dev/null +++ b/parser/testdata/03143_parallel_replicas_mat_view_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery mv_table (children 1)" + }, + { + "explain": " Identifier mv_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001183882, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/03143_parallel_replicas_mat_view_bug/metadata.json b/parser/testdata/03143_parallel_replicas_mat_view_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03143_parallel_replicas_mat_view_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03143_parallel_replicas_mat_view_bug/query.sql b/parser/testdata/03143_parallel_replicas_mat_view_bug/query.sql new file mode 100644 index 000000000..ffc582429 --- /dev/null +++ b/parser/testdata/03143_parallel_replicas_mat_view_bug/query.sql @@ -0,0 +1,11 @@ +DROP TABLE IF EXISTS mv_table SYNC; +DROP TABLE IF EXISTS null_table; + +SET cluster_for_parallel_replicas='parallel_replicas', max_parallel_replicas=4, enable_parallel_replicas=1; +SET enable_analyzer=1; + +CREATE TABLE null_table (str String) ENGINE = Null; +CREATE MATERIALIZED VIEW mv_table (str String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/03143_parallel_replicas_mat_view_bug', '{replica}') ORDER BY str AS SELECT str AS str FROM null_table; +INSERT INTO null_table VALUES ('test'); + +SELECT * FROM mv_table; diff --git a/parser/testdata/03143_ttl_in_system_parts_columns_table/ast.json b/parser/testdata/03143_ttl_in_system_parts_columns_table/ast.json new file mode 100644 index 000000000..0facaf070 --- /dev/null +++ b/parser/testdata/03143_ttl_in_system_parts_columns_table/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_03143 (children 1)" + }, + { + "explain": " Identifier test_03143" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001297037, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/03143_ttl_in_system_parts_columns_table/metadata.json b/parser/testdata/03143_ttl_in_system_parts_columns_table/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03143_ttl_in_system_parts_columns_table/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03143_ttl_in_system_parts_columns_table/query.sql b/parser/testdata/03143_ttl_in_system_parts_columns_table/query.sql new file mode 100644 index 000000000..50adab2e9 --- /dev/null +++ b/parser/testdata/03143_ttl_in_system_parts_columns_table/query.sql @@ -0,0 +1,25 @@ +DROP TABLE IF EXISTS test_03143; + +CREATE TABLE test_03143 ( + timestamp DateTime, + x UInt32 TTL timestamp + INTERVAL 1 MONTH, + y String TTL timestamp + INTERVAL 1 DAY, + z String +) +ENGINE = MergeTree +ORDER BY tuple(); + + +INSERT INTO test_03143 VALUES ('2100-01-01', 123, 'Hello, world!', 'xxx yyy'); + +SELECT + name, + column, + type, + column_ttl_min, + column_ttl_max +FROM system.parts_columns +WHERE table = 'test_03143' and database = currentDatabase() +ORDER BY name, column; + +DROP TABLE IF EXISTS test_03143; diff --git a/parser/testdata/03143_window_functions_qualify_validation/ast.json b/parser/testdata/03143_window_functions_qualify_validation/ast.json new file mode 100644 index 000000000..ad29d02b1 --- /dev/null +++ b/parser/testdata/03143_window_functions_qualify_validation/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery uk_price_paid (children 1)" + }, + { + "explain": " Identifier uk_price_paid" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001071233, + "rows_read": 2, + "bytes_read": 78 + } +} diff --git a/parser/testdata/03143_window_functions_qualify_validation/metadata.json b/parser/testdata/03143_window_functions_qualify_validation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03143_window_functions_qualify_validation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03143_window_functions_qualify_validation/query.sql b/parser/testdata/03143_window_functions_qualify_validation/query.sql new file mode 100644 index 000000000..5adbe7ff2 --- /dev/null +++ b/parser/testdata/03143_window_functions_qualify_validation/query.sql @@ -0,0 +1,26 @@ +DROP TABLE IF EXISTS uk_price_paid; +CREATE TABLE uk_price_paid +( + `price` UInt32, + `date` Date, + `postcode1` LowCardinality(String), + `postcode2` LowCardinality(String), + `type` Enum8('terraced' = 1, 'semi-detached' = 2, 'detached' = 3, 'flat' = 4, 'other' = 0), + `is_new` UInt8, + `duration` Enum8('freehold' = 1, 'leasehold' = 2, 'unknown' = 0), + `addr1` String, + `addr2` String, + `street` LowCardinality(String), + `locality` LowCardinality(String), + `town` LowCardinality(String), + `district` LowCardinality(String), + `county` LowCardinality(String) +) +ENGINE = MergeTree +ORDER BY (postcode1, postcode2, addr1, addr2); + +SELECT count(), (quantile(0.9)(price) OVER ()) AS price_quantile FROM uk_price_paid WHERE toYear(date) = 2023 QUALIFY price > price_quantile; -- { serverError NOT_AN_AGGREGATE } + +SELECT count() FROM uk_price_paid WHERE toYear(date) = 2023 QUALIFY price > (quantile(0.9)(price) OVER ()); -- { serverError NOT_AN_AGGREGATE } + +DROP TABLE uk_price_paid; diff --git a/parser/testdata/03144_aggregate_states_with_different_types/ast.json b/parser/testdata/03144_aggregate_states_with_different_types/ast.json new file mode 100644 index 000000000..d1c93b189 --- /dev/null +++ b/parser/testdata/03144_aggregate_states_with_different_types/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00105486, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03144_aggregate_states_with_different_types/metadata.json b/parser/testdata/03144_aggregate_states_with_different_types/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03144_aggregate_states_with_different_types/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03144_aggregate_states_with_different_types/query.sql b/parser/testdata/03144_aggregate_states_with_different_types/query.sql new file mode 100644 index 000000000..4445d5b84 --- /dev/null +++ b/parser/testdata/03144_aggregate_states_with_different_types/query.sql @@ -0,0 +1,28 @@ +SET enable_analyzer = 1; + +select * APPLY hex +from ( + select ( + select stochasticLogisticRegressionState(0.1, 0., 5, 'SGD')(number, number) + from numbers(10) + ) as col1, + ( + select stochasticLinearRegressionState(0.1, 0., 5, 'SGD')(number, number) + from numbers(10) + ) as col2 +from numbers(1) +); + +SELECT * +FROM +( + SELECT + bitmapHasAny(bitmapBuild([toUInt8(1)]), + ( + SELECT groupBitmapState(toUInt8(1)) + )) has1, + bitmapHasAny(bitmapBuild([toUInt64(1)]), + ( + SELECT groupBitmapState(toUInt64(2)) + )) has2 +); diff --git a/parser/testdata/03144_alter_column_and_read/ast.json b/parser/testdata/03144_alter_column_and_read/ast.json new file mode 100644 index 000000000..ce3a222c0 --- /dev/null +++ b/parser/testdata/03144_alter_column_and_read/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tab (children 1)" + }, + { + "explain": " Identifier tab" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001401571, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/03144_alter_column_and_read/metadata.json b/parser/testdata/03144_alter_column_and_read/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03144_alter_column_and_read/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03144_alter_column_and_read/query.sql b/parser/testdata/03144_alter_column_and_read/query.sql new file mode 100644 index 000000000..d198c3447 --- /dev/null +++ b/parser/testdata/03144_alter_column_and_read/query.sql @@ -0,0 +1,11 @@ +drop table if exists tab; +create table tab (x UInt32) engine = MergeTree order by tuple(); + +insert into tab select number from numbers(10); + +set alter_sync = 0; +alter table tab update x = x + sleepEachRow(0.1) where 1; +alter table tab modify column x String; +alter table tab add column y String default x || '_42'; + +select x, y from tab order by x; diff --git a/parser/testdata/03144_asof_join_ddb_doubles/ast.json b/parser/testdata/03144_asof_join_ddb_doubles/ast.json new file mode 100644 index 000000000..229cee357 --- /dev/null +++ b/parser/testdata/03144_asof_join_ddb_doubles/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001331715, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03144_asof_join_ddb_doubles/metadata.json b/parser/testdata/03144_asof_join_ddb_doubles/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03144_asof_join_ddb_doubles/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03144_asof_join_ddb_doubles/query.sql b/parser/testdata/03144_asof_join_ddb_doubles/query.sql new file mode 100644 index 000000000..969614138 --- /dev/null +++ b/parser/testdata/03144_asof_join_ddb_doubles/query.sql @@ -0,0 +1,64 @@ +SET join_algorithm = 'full_sorting_merge'; +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS events0; + +CREATE TABLE events0 ( + begin Float64, + value Int32 +) ENGINE = MergeTree ORDER BY begin; + +INSERT INTO events0 VALUES (1.0, 0), (3.0, 1), (6.0, 2), (8.0, 3); + +SELECT p.ts, e.value +FROM + (SELECT number :: Float64 AS ts FROM numbers(10)) p +ASOF JOIN events0 e +ON p.ts >= e.begin +ORDER BY p.ts ASC; + +SELECT p.ts, e.value +FROM + (SELECT number :: Float64 AS ts FROM numbers(10)) p +ASOF LEFT JOIN events0 e +ON p.ts >= e.begin +ORDER BY p.ts ASC +-- SETTINGS join_use_nulls = 1 +; + +DROP TABLE IF EXISTS events0; + +DROP TABLE IF EXISTS events; +DROP TABLE IF EXISTS probes; + +CREATE TABLE events ( + key Int32, + begin Float64, + value Int32 +) ENGINE = MergeTree ORDER BY (key, begin); + +INSERT INTO events VALUES (1, 1.0, 0), (1, 3.0, 1), (1, 6.0, 2), (1, 8.0, 3), (2, 0.0, 10), (2, 7.0, 20), (2, 11.0, 30); + +CREATE TABLE probes ( + key Int32, + ts Float64 +) ENGINE = MergeTree ORDER BY (key, ts) AS +SELECT + key.number, + ts.number +FROM + numbers(1, 2) as key, + numbers(10) as ts +SETTINGS join_algorithm = 'hash'; + +SELECT p.key, p.ts, e.value +FROM probes p +ASOF JOIN events e +ON p.key = e.key AND p.ts >= e.begin +ORDER BY p.key, p.ts ASC; + +SELECT p.key, p.ts, e.value +FROM probes p +ASOF LEFT JOIN events e +ON p.key = e.key AND p.ts >= e.begin +ORDER BY p.key, p.ts ASC NULLS FIRST; diff --git a/parser/testdata/03144_fuzz_quoted_type_name/ast.json b/parser/testdata/03144_fuzz_quoted_type_name/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03144_fuzz_quoted_type_name/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03144_fuzz_quoted_type_name/metadata.json b/parser/testdata/03144_fuzz_quoted_type_name/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03144_fuzz_quoted_type_name/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03144_fuzz_quoted_type_name/query.sql b/parser/testdata/03144_fuzz_quoted_type_name/query.sql new file mode 100644 index 000000000..4f6cc6c86 --- /dev/null +++ b/parser/testdata/03144_fuzz_quoted_type_name/query.sql @@ -0,0 +1,7 @@ +create table t (x 123) engine Memory; -- { clientError SYNTAX_ERROR } +create table t (x `a.b`) engine Memory; -- { clientError SYNTAX_ERROR } +create table t (x Array(`a.b`)) engine Memory; -- { clientError SYNTAX_ERROR } + +create table t (x Array(`ab`)) engine Memory; -- { serverError UNKNOWN_TYPE } +create table t (x `ab`) engine Memory; -- { serverError UNKNOWN_TYPE } +create table t (x `Int64`) engine Memory; \ No newline at end of file diff --git a/parser/testdata/03144_invalid_filter/ast.json b/parser/testdata/03144_invalid_filter/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03144_invalid_filter/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03144_invalid_filter/metadata.json b/parser/testdata/03144_invalid_filter/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03144_invalid_filter/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03144_invalid_filter/query.sql b/parser/testdata/03144_invalid_filter/query.sql new file mode 100644 index 000000000..5b434972c --- /dev/null +++ b/parser/testdata/03144_invalid_filter/query.sql @@ -0,0 +1,14 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/48049 +SET enable_analyzer = 1; + +CREATE TABLE test_table (`id` UInt64, `value` String) ENGINE = TinyLog() AS Select number, number::String from numbers(10); + +WITH CAST(tuple(1), 'Tuple (value UInt64)') AS compound_value +SELECT id, test_table.* APPLY x -> compound_value.* +FROM test_table +WHERE arrayMap(x -> toString(x) AS lambda, [NULL, 256, 257, NULL, NULL]) +SETTINGS convert_query_to_cnf = true, optimize_using_constraints = true, optimize_substitute_columns = true; -- { serverError ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER } + +DESCRIBE TABLE (SELECT test_table.COLUMNS(id) FROM test_table WHERE '2147483647'); -- { serverError ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER } + +DROP TABLE test_table; diff --git a/parser/testdata/03145_asof_join_ddb_inequalities/ast.json b/parser/testdata/03145_asof_join_ddb_inequalities/ast.json new file mode 100644 index 000000000..51c2d578b --- /dev/null +++ b/parser/testdata/03145_asof_join_ddb_inequalities/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery events0 (children 1)" + }, + { + "explain": " Identifier events0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000983261, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/03145_asof_join_ddb_inequalities/metadata.json b/parser/testdata/03145_asof_join_ddb_inequalities/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03145_asof_join_ddb_inequalities/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03145_asof_join_ddb_inequalities/query.sql b/parser/testdata/03145_asof_join_ddb_inequalities/query.sql new file mode 100644 index 000000000..c4f1283a4 --- /dev/null +++ b/parser/testdata/03145_asof_join_ddb_inequalities/query.sql @@ -0,0 +1,67 @@ +DROP TABLE IF EXISTS events0; +DROP TABLE IF EXISTS probe0; + +SET enable_analyzer = 1; +SET join_algorithm = 'full_sorting_merge'; +SET date_time_input_format='basic'; + +CREATE TABLE events0 ( + begin Nullable(DateTime('UTC')), + value Int32 +) ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO events0 SELECT toDateTime('2023-03-21 13:00:00', 'UTC') + INTERVAL number HOUR, number FROM numbers(4); +INSERT INTO events0 VALUES (NULL, -10),('0000-01-01 00:00:00', -1), ('9999-12-31 23:59:59', 9); + +CREATE TABLE probe0 ( + begin Nullable(DateTime('UTC')) +) ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO probe0 SELECT toDateTime('2023-03-21 12:00:00', 'UTC') + INTERVAl number HOUR FROM numbers(10); +INSERT INTO probe0 VALUES (NULL),('9999-12-31 23:59:59'); + +SET join_use_nulls = 1; + +SELECT '-'; +SELECT p.begin, e.begin, e.value +FROM probe0 p +ASOF JOIN events0 e +ON p.begin > e.begin +ORDER BY p.begin ASC; + +SELECT '-'; +SELECT p.begin, e.begin, e.value +FROM probe0 p +ASOF LEFT JOIN events0 e +ON p.begin > e.begin +ORDER BY p.begin ASC; + +SELECT p.begin, e.begin, e.value +FROM probe0 p +ASOF JOIN events0 e +ON p.begin <= e.begin +ORDER BY p.begin ASC; + +SELECT '-'; +SELECT p.begin, e.begin, e.value +FROM probe0 p +ASOF LEFT JOIN events0 e +ON p.begin <= e.begin +ORDER BY p.begin ASC; + +SELECT '-'; +SELECT p.begin, e.begin, e.value +FROM probe0 p +ASOF JOIN events0 e +ON p.begin < e.begin +ORDER BY p.begin ASC; + +SELECT '-'; +SELECT p.begin, e.begin, e.value +FROM probe0 p +ASOF LEFT JOIN events0 e +ON p.begin < e.begin +ORDER BY p.begin ASC; + +DROP TABLE IF EXISTS events0; +DROP TABLE IF EXISTS probe0; diff --git a/parser/testdata/03145_unicode_quotes/ast.json b/parser/testdata/03145_unicode_quotes/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03145_unicode_quotes/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03145_unicode_quotes/metadata.json b/parser/testdata/03145_unicode_quotes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03145_unicode_quotes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03145_unicode_quotes/query.sql b/parser/testdata/03145_unicode_quotes/query.sql new file mode 100644 index 000000000..34a465cd8 --- /dev/null +++ b/parser/testdata/03145_unicode_quotes/query.sql @@ -0,0 +1,9 @@ +-- They work: +SELECT ‘This is an example of using English-style Unicode single quotes.’ AS “curly”; + +-- It is unspecified which escaping rules apply inside the literal in Unicode quotes, and currently none apply (similarly to heredocs) +-- This could be changed. + +SELECT ‘This is \an \\example ‘of using English-style Unicode single quotes.\’ AS “\c\\u\\\r\\\\l\\\\\y\\\\\\” FORMAT Vertical; + +SELECT ‘’ = '' AS “1” FORMAT JSONLines; diff --git a/parser/testdata/03146_bug47862/ast.json b/parser/testdata/03146_bug47862/ast.json new file mode 100644 index 000000000..cb4c12381 --- /dev/null +++ b/parser/testdata/03146_bug47862/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toInt64 (alias cast_res) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier lookup_res" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.000993419, + "rows_read": 7, + "bytes_read": 282 + } +} diff --git a/parser/testdata/03146_bug47862/metadata.json b/parser/testdata/03146_bug47862/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03146_bug47862/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03146_bug47862/query.sql b/parser/testdata/03146_bug47862/query.sql new file mode 100644 index 000000000..0f4114006 --- /dev/null +++ b/parser/testdata/03146_bug47862/query.sql @@ -0,0 +1,12 @@ +SELECT toInt64(lookup_res) AS cast_res +FROM ( + SELECT + indexOf(field_id, 10) AS val_idx, + ['110'][val_idx] AS lookup_res + FROM ( + SELECT arrayJoin([[10], [15]]) AS field_id + ) + WHERE val_idx != 0 +) +WHERE cast_res > 0 +SETTINGS enable_analyzer = 1; diff --git a/parser/testdata/03146_create_index_compatibility/ast.json b/parser/testdata/03146_create_index_compatibility/ast.json new file mode 100644 index 000000000..b6bff481b --- /dev/null +++ b/parser/testdata/03146_create_index_compatibility/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_index_3146 (children 1)" + }, + { + "explain": " Identifier t_index_3146" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001301033, + "rows_read": 2, + "bytes_read": 76 + } +} diff --git a/parser/testdata/03146_create_index_compatibility/metadata.json b/parser/testdata/03146_create_index_compatibility/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03146_create_index_compatibility/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03146_create_index_compatibility/query.sql b/parser/testdata/03146_create_index_compatibility/query.sql new file mode 100644 index 000000000..ede5bc056 --- /dev/null +++ b/parser/testdata/03146_create_index_compatibility/query.sql @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS t_index_3146; + +CREATE TABLE t_index_3146 (a UInt64, b UInt64) ENGINE = MergeTree ORDER BY tuple(); + +SET allow_create_index_without_type = 1; + +CREATE INDEX i1 ON t_index_3146 (a) TYPE minmax; +CREATE INDEX i2 ON t_index_3146 (a, b) TYPE minmax; +CREATE INDEX i3 ON t_index_3146 (a DESC, b ASC) TYPE minmax; +CREATE INDEX i4 ON t_index_3146 a TYPE minmax; +CREATE INDEX i5 ON t_index_3146 (a); -- ignored +CREATE INDEX i6 ON t_index_3146 (a DESC, b ASC); -- ignored +CREATE INDEX i7 ON t_index_3146; -- { clientError SYNTAX_ERROR } +CREATE INDEX i8 ON t_index_3146 a, b TYPE minmax; -- { clientError SYNTAX_ERROR } + +SHOW CREATE TABLE t_index_3146; +DROP TABLE t_index_3146; diff --git a/parser/testdata/03146_parameterized_view_with_date/ast.json b/parser/testdata/03146_parameterized_view_with_date/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03146_parameterized_view_with_date/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03146_parameterized_view_with_date/metadata.json b/parser/testdata/03146_parameterized_view_with_date/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03146_parameterized_view_with_date/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03146_parameterized_view_with_date/query.sql b/parser/testdata/03146_parameterized_view_with_date/query.sql new file mode 100644 index 000000000..2cfadb70b --- /dev/null +++ b/parser/testdata/03146_parameterized_view_with_date/query.sql @@ -0,0 +1,14 @@ + +drop table if exists table_pv; +create table table_pv (id Int32, timestamp_field DateTime) engine = Memory(); + +insert into table_pv values(1, '2024-03-01 00:00:00'); +insert into table_pv values (2, '2024-04-01 01:00:00'); + +create view pv as select * from table_pv where timestamp_field > {timestamp_param:DateTime}; + +select * from pv (timestamp_param=toDateTime('2024-04-01 00:00:01')); + +select * from pv (timestamp_param=toDateTime('2024-040')); -- { serverError CANNOT_PARSE_DATETIME } + +drop table table_pv; diff --git a/parser/testdata/03146_tpc_ds_grouping/ast.json b/parser/testdata/03146_tpc_ds_grouping/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03146_tpc_ds_grouping/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03146_tpc_ds_grouping/metadata.json b/parser/testdata/03146_tpc_ds_grouping/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03146_tpc_ds_grouping/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03146_tpc_ds_grouping/query.sql b/parser/testdata/03146_tpc_ds_grouping/query.sql new file mode 100644 index 000000000..cb290086b --- /dev/null +++ b/parser/testdata/03146_tpc_ds_grouping/query.sql @@ -0,0 +1,71 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/46335 +SET enable_analyzer = 1; +SELECT + key_a + key_b AS d, + rank() OVER () AS f +FROM + ( + SELECT + rand() % 10 AS key_a, + rand(1) % 5 AS key_b, + number + FROM numbers(100) + ) +GROUP BY + key_a, + key_b +WITH ROLLUP +ORDER BY multiIf(d = 0, key_a, NULL) ASC +FORMAT Null; + +SELECT + key_a + key_b AS d, + rank() OVER (PARTITION BY key_a + key_b) AS f +FROM + ( + SELECT + rand() % 10 AS key_a, + rand(1) % 5 AS key_b, + number + FROM numbers(100) + ) +GROUP BY + key_a, + key_b +WITH ROLLUP +ORDER BY multiIf(d = 0, key_a, NULL) ASC +FORMAT Null; + + +SELECT + grouping(key_a) + grouping(key_b) AS d, + rank() OVER (PARTITION BY grouping(key_a) + grouping(key_b), multiIf(grouping(key_b) = 0, key_a, NULL)) AS f +FROM + ( + SELECT + rand() % 10 AS key_a, + rand(1) % 5 AS key_b, + number + FROM numbers(100) + ) +GROUP BY + key_a, + key_b +WITH ROLLUP +ORDER BY multiIf(d = 0, key_a, NULL) ASC +FORMAT Null; + +SELECT grouping(key_a) + grouping(key_b) AS d +FROM + ( + SELECT + rand() % 10 AS key_a, + rand(toLowCardinality(1)) % 5 AS key_b, + number + FROM numbers(100) + ) +GROUP BY + key_a, + key_b +WITH ROLLUP +FORMAT Null; diff --git a/parser/testdata/03147_asof_join_ddb_missing/ast.json b/parser/testdata/03147_asof_join_ddb_missing/ast.json new file mode 100644 index 000000000..4876818ad --- /dev/null +++ b/parser/testdata/03147_asof_join_ddb_missing/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001116572, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03147_asof_join_ddb_missing/metadata.json b/parser/testdata/03147_asof_join_ddb_missing/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03147_asof_join_ddb_missing/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03147_asof_join_ddb_missing/query.sql b/parser/testdata/03147_asof_join_ddb_missing/query.sql new file mode 100644 index 000000000..f6795a71a --- /dev/null +++ b/parser/testdata/03147_asof_join_ddb_missing/query.sql @@ -0,0 +1,186 @@ +SET enable_analyzer=1; + +SET session_timezone = 'UTC'; +SET joined_subquery_requires_alias = 0; +SET enable_analyzer = 1; +SET join_algorithm = 'full_sorting_merge'; + +-- # 10 dates, 5 keys +WITH build AS ( + SELECT + k, + toDateTime('2001-01-01 00:00:00') + INTERVAL number MINUTE AS t, + number AS v + FROM numbers(10), (SELECT number AS k FROM numbers(5)) + SETTINGS join_algorithm = 'default' +), +probe AS ( + SELECT + k * 2 AS k, + t - INTERVAL 30 SECOND AS t + FROM build +) +SELECT SUM(v) +FROM probe ASOF JOIN build USING (k, t); + +-- # Coverage: Missing right side bin +WITH build AS ( + SELECT + k * 2 AS k, + toDateTime('2001-01-01 00:00:00') + INTERVAL number MINUTE AS t, + number AS v + FROM numbers(10), (SELECT number AS k FROM numbers(5)) + SETTINGS join_algorithm = 'default' +), +probe AS ( + SELECT + intDiv(k, 2) AS k, + t - INTERVAL 30 SECOND AS t + FROM build +) +SELECT SUM(v), COUNT(*) +FROM probe ASOF JOIN build USING (k, t); + +-- # 20 dates, 5 keys +WITH build AS ( + SELECT + k, + toDateTime('2001-01-01 00:00:00') + INTERVAL number MINUTE AS t, + number AS v + FROM numbers(20), (SELECT number AS k FROM numbers(5)) + SETTINGS join_algorithm = 'default' +), +probe AS ( + SELECT + k * 2 AS k, + t - INTERVAL 30 SECOND AS t + FROM build +) +SELECT SUM(v) +FROM probe ASOF JOIN build USING (k, t); + +-- # 30 dates, 5 keys +WITH build AS ( + SELECT + k, + toDateTime('2001-01-01 00:00:00') + INTERVAL number MINUTE AS t, + number AS v + FROM numbers(30), (SELECT number AS k FROM numbers(5)) + SETTINGS join_algorithm = 'default' +), +probe AS ( + SELECT + k * 2 AS k, + t - INTERVAL 30 SECOND AS t + FROM build +) +SELECT SUM(v) +FROM probe ASOF JOIN build USING (k, t); + +-- # 50 dates, 5 keys +WITH build AS ( + SELECT + k, + toDateTime('2001-01-01 00:00:00') + INTERVAL number MINUTE AS t, + number AS v + FROM numbers(50), (SELECT number AS k FROM numbers(5)) + SETTINGS join_algorithm = 'default' +), +probe AS ( + SELECT + k * 2 AS k, + t - INTERVAL 30 SECOND AS t + FROM build +) +SELECT SUM(v) +FROM probe ASOF JOIN build USING (k, t); + +-- # 100 dates, 5 keys +WITH build AS ( + SELECT + k, + toDateTime('2001-01-01 00:00:00') + INTERVAL number MINUTE AS t, + number AS v + FROM numbers(100), (SELECT number AS k FROM numbers(5)) + SETTINGS join_algorithm = 'default' +), +probe AS ( + SELECT + k * 2 AS k, + t - INTERVAL 30 SECOND AS t + FROM build +) +SELECT SUM(v) +FROM probe ASOF JOIN build USING (k, t); + +-- # 100 dates, 50 keys +WITH build AS ( + SELECT + k, + toDateTime('2001-01-01 00:00:00') + INTERVAL number MINUTE AS t, + number AS v + FROM numbers(100), (SELECT number AS k FROM numbers(50)) + SETTINGS join_algorithm = 'default' +), +probe AS ( + SELECT + k * 2 AS k, + t - INTERVAL 30 SECOND AS t + FROM build +) +SELECT SUM(v) +FROM probe ASOF JOIN build USING (k, t); + +-- # 1000 dates, 5 keys +WITH build AS ( + SELECT + k, + toDateTime('2001-01-01 00:00:00') + INTERVAL number MINUTE AS t, + number AS v + FROM numbers(1000), (SELECT number AS k FROM numbers(5)) + SETTINGS join_algorithm = 'default' +), +probe AS ( + SELECT + k * 2 AS k, + t - INTERVAL 30 SECOND AS t + FROM build +) +SELECT SUM(v) +FROM probe ASOF JOIN build USING (k, t); + +-- # 1000 dates, 50 keys +WITH build AS ( + SELECT + k, + toDateTime('2001-01-01 00:00:00') + INTERVAL number MINUTE AS t, + number AS v + FROM numbers(1000), (SELECT number AS k FROM numbers(50)) + SETTINGS join_algorithm = 'default' +), +probe AS ( + SELECT + k * 2 AS k, + t - INTERVAL 30 SECOND AS t + FROM build +) +SELECT SUM(v) +FROM probe ASOF JOIN build USING (k, t); + +-- # 10000 dates, 50 keys +WITH build AS ( + SELECT + k, + toDateTime('2001-01-01 00:00:00') + INTERVAL number MINUTE AS t, + number AS v + FROM numbers(10000), (SELECT number AS k FROM numbers(50)) + SETTINGS join_algorithm = 'default' +), +probe AS ( + SELECT + k * 2 AS k, + t - INTERVAL 30 SECOND AS t + FROM build +) +SELECT SUM(v) +FROM probe ASOF JOIN build USING (k, t); diff --git a/parser/testdata/03147_datetime64_constant_index_analysis/ast.json b/parser/testdata/03147_datetime64_constant_index_analysis/ast.json new file mode 100644 index 000000000..7f9d2888a --- /dev/null +++ b/parser/testdata/03147_datetime64_constant_index_analysis/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001555903, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/03147_datetime64_constant_index_analysis/metadata.json b/parser/testdata/03147_datetime64_constant_index_analysis/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03147_datetime64_constant_index_analysis/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03147_datetime64_constant_index_analysis/query.sql b/parser/testdata/03147_datetime64_constant_index_analysis/query.sql new file mode 100644 index 000000000..6b1ba92c1 --- /dev/null +++ b/parser/testdata/03147_datetime64_constant_index_analysis/query.sql @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS test; +CREATE TABLE test (d DateTime, PRIMARY KEY (d)); +INSERT INTO test SELECT toDateTime('2024-01-01') + number FROM numbers(1e6); +SET max_rows_to_read = 10000; + +-- Prevent remote replicas from skipping index analysis in Parallel Replicas. Otherwise, they may return full ranges and trigger max_rows_to_read validation failures. +SET parallel_replicas_index_analysis_only_on_coordinator = 0; + +SELECT count() FROM test WHERE d <= '2024-01-01 02:03:04'; +SELECT count() FROM test WHERE d <= toDateTime('2024-01-01 02:03:04'); +SELECT count() FROM test WHERE d <= toDateTime64('2024-01-01 02:03:04', 0); +SELECT count() FROM test WHERE d <= toDateTime64('2024-01-01 02:03:04', 3); +SET max_rows_to_read = 100_000; +SELECT count() FROM test WHERE d <= '2024-01-02'; +SELECT count() FROM test WHERE d <= toDate('2024-01-02'); +DROP TABLE test; diff --git a/parser/testdata/03147_parquet_memory_tracking/ast.json b/parser/testdata/03147_parquet_memory_tracking/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03147_parquet_memory_tracking/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03147_parquet_memory_tracking/metadata.json b/parser/testdata/03147_parquet_memory_tracking/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03147_parquet_memory_tracking/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03147_parquet_memory_tracking/query.sql b/parser/testdata/03147_parquet_memory_tracking/query.sql new file mode 100644 index 000000000..b9356ea2b --- /dev/null +++ b/parser/testdata/03147_parquet_memory_tracking/query.sql @@ -0,0 +1,16 @@ +-- Tags: no-fasttest, no-parallel + +-- Create an ~80 MB parquet file with one row group and one column. +insert into function file('03147_parquet_memory_tracking.parquet') select number from numbers(10000000) settings output_format_parquet_compression_method='none', output_format_parquet_row_group_size=1000000000000, engine_file_truncate_on_insert=1; + +set input_format_parquet_max_block_size = 10000000; +set input_format_parquet_prefer_block_bytes = 1000000000000; + +-- Try to read it with 60 MB memory limit. Should fail because we read the 80 MB column all at once. +select sum(ignore(*)) from file('03147_parquet_memory_tracking.parquet') settings max_memory_usage=60000000; -- { serverError MEMORY_LIMIT_EXCEEDED } + +-- Try to read it with 500 MB memory limit, just in case. +select sum(ignore(*)) from file('03147_parquet_memory_tracking.parquet') settings max_memory_usage=500000000; + +-- Truncate the file to avoid leaving too much garbage behind. +insert into function file('03147_parquet_memory_tracking.parquet') select number from numbers(1) settings engine_file_truncate_on_insert=1; diff --git a/parser/testdata/03147_rows_before_limit_fix/ast.json b/parser/testdata/03147_rows_before_limit_fix/ast.json new file mode 100644 index 000000000..246a2f88a --- /dev/null +++ b/parser/testdata/03147_rows_before_limit_fix/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00112764, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03147_rows_before_limit_fix/metadata.json b/parser/testdata/03147_rows_before_limit_fix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03147_rows_before_limit_fix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03147_rows_before_limit_fix/query.sql b/parser/testdata/03147_rows_before_limit_fix/query.sql new file mode 100644 index 000000000..c1fde3784 --- /dev/null +++ b/parser/testdata/03147_rows_before_limit_fix/query.sql @@ -0,0 +1,23 @@ +SET exact_rows_before_limit = 1; + +DROP TABLE IF EXISTS users; + +CREATE TABLE users (uid Int16, name String, age Int16) ENGINE=MergeTree ORDER BY uid; + +INSERT INTO users VALUES (1231, 'John', 33),(6666, 'John', 48), (8888, 'John', 50); + +SELECT age FROM remote('127.0.0.{2,3}', currentDatabase(), users) GROUP BY age LIMIT 20 FORMAT JSON SETTINGS output_format_write_statistics=0; + +DROP TABLE users; + +DROP TABLE IF EXISTS test_rows_count_bug_local; + +CREATE TABLE test_rows_count_bug_local (id UUID DEFAULT generateUUIDv4(), service_name String, path String) ENGINE=MergeTree ORDER BY tuple(); + +INSERT INTO test_rows_count_bug_local (service_name, path) VALUES ('service1', '/foo/1'), ('service1', '/foo/2'), ('service2', '/foo/3'), ('service2', '/foo/4'), ('service3', '/foo/5'); + +SELECT service_name FROM test_rows_count_bug_local +WHERE id global in (select id from remote('127.0.0.{2,3}', currentDatabase(), test_rows_count_bug_local)) +GROUP BY service_name ORDER BY service_name limit 20 FORMAT JSON SETTINGS output_format_write_statistics=0; + +DROP TABLE test_rows_count_bug_local; diff --git a/parser/testdata/03147_table_function_loop/ast.json b/parser/testdata/03147_table_function_loop/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03147_table_function_loop/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03147_table_function_loop/metadata.json b/parser/testdata/03147_table_function_loop/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03147_table_function_loop/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03147_table_function_loop/query.sql b/parser/testdata/03147_table_function_loop/query.sql new file mode 100644 index 000000000..797fb44ca --- /dev/null +++ b/parser/testdata/03147_table_function_loop/query.sql @@ -0,0 +1,16 @@ +-- Tags: no-parallel + +SELECT * FROM loop(numbers(3)) LIMIT 10; +SELECT * FROM loop (numbers(3)) LIMIT 10 settings max_block_size = 1; + +CREATE TABLE t (n Int8) ENGINE=MergeTree ORDER BY n; + +SELECT * FROM loop(t) LIMIT 15; -- { serverError TOO_MANY_RETRIES_TO_FETCH_PARTS } + +INSERT INTO t SELECT * FROM numbers(10); + +SELECT * FROM loop({CLICKHOUSE_DATABASE:Identifier}.t) LIMIT 15; +SELECT * FROM loop(t) LIMIT 15; +SELECT * FROM loop({CLICKHOUSE_DATABASE:Identifier}, t) LIMIT 15; + +SELECT * FROM loop('', '') -- { serverError UNKNOWN_TABLE } diff --git a/parser/testdata/03147_table_function_loop_remote_storage/ast.json b/parser/testdata/03147_table_function_loop_remote_storage/ast.json new file mode 100644 index 000000000..70d9cebe0 --- /dev/null +++ b/parser/testdata/03147_table_function_loop_remote_storage/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t0 (children 1)" + }, + { + "explain": " Identifier t0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001138105, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03147_table_function_loop_remote_storage/metadata.json b/parser/testdata/03147_table_function_loop_remote_storage/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03147_table_function_loop_remote_storage/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03147_table_function_loop_remote_storage/query.sql b/parser/testdata/03147_table_function_loop_remote_storage/query.sql new file mode 100644 index 000000000..6ffa5040f --- /dev/null +++ b/parser/testdata/03147_table_function_loop_remote_storage/query.sql @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS t0; + +CREATE TABLE t0 (c0 Int) ENGINE = Memory; +SELECT * FROM loop(remote('localhost:9000', currentDatabase(), 't0')) tx; -- { serverError TOO_MANY_RETRIES_TO_FETCH_PARTS } + +INSERT INTO TABLE t0 SELECT * FROM numbers(7); + +SELECT '---'; +SELECT * FROM loop(remote('localhost:9000', currentDatabase(), 't0')) tx LIMIT 3; + +SELECT '---'; +SELECT * FROM loop(remote('localhost:9000', currentDatabase(), 't0')) tx LIMIT 7; + +SELECT '---'; +SELECT * FROM loop(remote('localhost:9000', currentDatabase(), 't0')) tx LIMIT 11; + +DROP TABLE t0 SYNC; diff --git a/parser/testdata/03148_asof_join_ddb_subquery/ast.json b/parser/testdata/03148_asof_join_ddb_subquery/ast.json new file mode 100644 index 000000000..af1ac02ed --- /dev/null +++ b/parser/testdata/03148_asof_join_ddb_subquery/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery events (children 1)" + }, + { + "explain": " Identifier events" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001697904, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/03148_asof_join_ddb_subquery/metadata.json b/parser/testdata/03148_asof_join_ddb_subquery/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03148_asof_join_ddb_subquery/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03148_asof_join_ddb_subquery/query.sql b/parser/testdata/03148_asof_join_ddb_subquery/query.sql new file mode 100644 index 000000000..d8f628701 --- /dev/null +++ b/parser/testdata/03148_asof_join_ddb_subquery/query.sql @@ -0,0 +1,29 @@ +DROP TABLE IF EXISTS events; +CREATE TABLE events (begin Float64, value Int32) ENGINE = MergeTree() ORDER BY begin; + +INSERT INTO events VALUES (1, 0), (3, 1), (6, 2), (8, 3); + +SET enable_analyzer = 1; +SET join_algorithm = 'full_sorting_merge'; +SET joined_subquery_requires_alias = 0; + +SELECT + begin, + value IN ( + SELECT e1.value + FROM ( + SELECT * + FROM events e1 + WHERE e1.value = events.value + ) AS e1 + ASOF JOIN ( + SELECT number :: Float64 AS begin + FROM numbers(10) + WHERE number >= 1 AND number < 10 + ) + USING (begin) + ) +FROM events +ORDER BY begin ASC; + +DROP TABLE IF EXISTS events; diff --git a/parser/testdata/03148_mutations_virtual_columns/ast.json b/parser/testdata/03148_mutations_virtual_columns/ast.json new file mode 100644 index 000000000..9530b529a --- /dev/null +++ b/parser/testdata/03148_mutations_virtual_columns/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_mut_virtuals (children 1)" + }, + { + "explain": " Identifier t_mut_virtuals" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001378577, + "rows_read": 2, + "bytes_read": 80 + } +} diff --git a/parser/testdata/03148_mutations_virtual_columns/metadata.json b/parser/testdata/03148_mutations_virtual_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03148_mutations_virtual_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03148_mutations_virtual_columns/query.sql b/parser/testdata/03148_mutations_virtual_columns/query.sql new file mode 100644 index 000000000..045869b22 --- /dev/null +++ b/parser/testdata/03148_mutations_virtual_columns/query.sql @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS t_mut_virtuals; + +CREATE TABLE t_mut_virtuals (id UInt64, s String) ENGINE = MergeTree ORDER BY id; + +INSERT INTO t_mut_virtuals VALUES (1, 'a'); +INSERT INTO t_mut_virtuals VALUES (2, 'b'); + +SET insert_keeper_fault_injection_probability = 0; +SET mutations_sync = 2; + +ALTER TABLE t_mut_virtuals UPDATE s = _part WHERE 1; +ALTER TABLE t_mut_virtuals DELETE WHERE _part LIKE 'all_1_1_0%'; + +SELECT * FROM t_mut_virtuals ORDER BY id; + +DROP TABLE t_mut_virtuals; diff --git a/parser/testdata/03148_query_log_used_dictionaries/ast.json b/parser/testdata/03148_query_log_used_dictionaries/ast.json new file mode 100644 index 000000000..60cf83f36 --- /dev/null +++ b/parser/testdata/03148_query_log_used_dictionaries/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery 03148_dictionary (children 1)" + }, + { + "explain": " Identifier 03148_dictionary" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001425463, + "rows_read": 2, + "bytes_read": 84 + } +} diff --git a/parser/testdata/03148_query_log_used_dictionaries/metadata.json b/parser/testdata/03148_query_log_used_dictionaries/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03148_query_log_used_dictionaries/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03148_query_log_used_dictionaries/query.sql b/parser/testdata/03148_query_log_used_dictionaries/query.sql new file mode 100644 index 000000000..21cc48ad1 --- /dev/null +++ b/parser/testdata/03148_query_log_used_dictionaries/query.sql @@ -0,0 +1,88 @@ +DROP DICTIONARY IF EXISTS 03148_dictionary; + +CREATE DICTIONARY 03148_dictionary ( + id UInt64, + name String +) +PRIMARY KEY id +SOURCE(CLICKHOUSE( + QUERY 'select 0 as id, ''name0'' as name' +)) +LIFETIME(MIN 1 MAX 10) +LAYOUT(HASHED); + +SELECT + dictGet('03148_dictionary', 'name', number) as dict_value +FROM numbers(1) +SETTINGS + enable_analyzer = 1, + log_comment = 'simple_with_analyzer' +FORMAT Null; + +SYSTEM FLUSH LOGS query_log; + +SELECT log_comment, used_dictionaries +FROM system.query_log +WHERE current_database = currentDatabase() + AND type = 'QueryFinish' + AND log_comment = 'simple_with_analyzer' + AND is_internal = 0; + +SELECT * +FROM ( + SELECT + dictGet('03148_dictionary', 'name', number) as dict_value + FROM numbers(1) +) t +SETTINGS + enable_analyzer = 1, + log_comment = 'nested_with_analyzer' +FORMAT Null; + +SYSTEM FLUSH LOGS query_log; + +SELECT log_comment, used_dictionaries +FROM system.query_log +WHERE current_database = currentDatabase() + AND type = 'QueryFinish' + AND log_comment = 'nested_with_analyzer' + AND is_internal = 0; + +SELECT + dictGet('03148_dictionary', 'name', number) as dict_value +FROM numbers(1) +SETTINGS + enable_analyzer = 0, + log_comment = 'simple_without_analyzer' +FORMAT Null; + +SYSTEM FLUSH LOGS query_log; + +SELECT log_comment, used_dictionaries +FROM system.query_log +WHERE current_database = currentDatabase() + AND type = 'QueryFinish' + AND log_comment = 'simple_without_analyzer' + AND is_internal = 0; + +SELECT * +FROM ( + SELECT + dictGet('03148_dictionary', 'name', number) as dict_value + FROM numbers(1) +) t +SETTINGS + enable_analyzer = 0, + log_comment = 'nested_without_analyzer' +FORMAT Null; + +SYSTEM FLUSH LOGS query_log; + +SELECT log_comment, used_dictionaries +FROM system.query_log +WHERE current_database = currentDatabase() + AND type = 'QueryFinish' + AND log_comment = 'nested_without_analyzer' + AND is_internal = 0; + +DROP DICTIONARY IF EXISTS 03148_dictionary; diff --git a/parser/testdata/03148_setting_max_streams_to_max_threads_ratio_overflow/ast.json b/parser/testdata/03148_setting_max_streams_to_max_threads_ratio_overflow/ast.json new file mode 100644 index 000000000..4f43a0ef5 --- /dev/null +++ b/parser/testdata/03148_setting_max_streams_to_max_threads_ratio_overflow/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_table (children 1)" + }, + { + "explain": " Identifier test_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001411065, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/03148_setting_max_streams_to_max_threads_ratio_overflow/metadata.json b/parser/testdata/03148_setting_max_streams_to_max_threads_ratio_overflow/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03148_setting_max_streams_to_max_threads_ratio_overflow/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03148_setting_max_streams_to_max_threads_ratio_overflow/query.sql b/parser/testdata/03148_setting_max_streams_to_max_threads_ratio_overflow/query.sql new file mode 100644 index 000000000..38f25f60e --- /dev/null +++ b/parser/testdata/03148_setting_max_streams_to_max_threads_ratio_overflow/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value String +) ENGINE = MergeTree ORDER BY id; + +INSERT INTO test_table VALUES (0, 'Value_0'); + +SELECT * FROM test_table SETTINGS max_threads = 1025, max_streams_to_max_threads_ratio = -9223372036854775808, enable_analyzer = 1; -- { serverError PARAMETER_OUT_OF_BOUND } + +SELECT * FROM test_table SETTINGS max_threads = 1025, max_streams_to_max_threads_ratio = -9223372036854775808, enable_analyzer = 0; -- { serverError PARAMETER_OUT_OF_BOUND } + +DROP TABLE test_table; diff --git a/parser/testdata/03149_analyzer_join_projection_name/ast.json b/parser/testdata/03149_analyzer_join_projection_name/ast.json new file mode 100644 index 000000000..2bb0ee105 --- /dev/null +++ b/parser/testdata/03149_analyzer_join_projection_name/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery groups_dict (children 1)" + }, + { + "explain": " Identifier groups_dict" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001292514, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/03149_analyzer_join_projection_name/metadata.json b/parser/testdata/03149_analyzer_join_projection_name/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03149_analyzer_join_projection_name/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03149_analyzer_join_projection_name/query.sql b/parser/testdata/03149_analyzer_join_projection_name/query.sql new file mode 100644 index 000000000..140c7b3f9 --- /dev/null +++ b/parser/testdata/03149_analyzer_join_projection_name/query.sql @@ -0,0 +1,50 @@ +DROP DICTIONARY IF EXISTS groups_dict; +DROP TABLE IF EXISTS users; +DROP TABLE IF EXISTS groups; + +CREATE TABLE users (uid Int16, name String, gid LowCardinality(String), gname LowCardinality(String)) + ENGINE=MergeTree order by tuple(); +CREATE TABLE groups (gid LowCardinality(String), gname LowCardinality(String)) + ENGINE=MergeTree order by tuple(); + +CREATE DICTIONARY groups_dict ( + gid String, gname String +) +PRIMARY KEY gid, gname +LAYOUT(COMPLEX_KEY_HASHED()) +SOURCE(CLICKHOUSE(TABLE 'groups' DATABASE currentDatabase())) +LIFETIME(MIN 0 MAX 0); + + +INSERT INTO groups VALUES ('1', 'Group1'); + +INSERT INTO users VALUES (1231, 'John', '1', 'Group1'); + +select 'analyzer=1, join with dictionary'; + +SELECT u.uid, u.name, u.gid, u.gname +FROM users u left join groups_dict g using gid, gname +format PrettyCompactMonoBlock; + +select ''; +select 'analyzer=1, join with table'; + +SELECT u.uid, u.name, u.gid, u.gname +FROM users u left join groups g using gid, gname +format PrettyCompactMonoBlock; + + +set allow_experimental_analyzer=0; + +select ''; +select 'analyzer=0, join with dictionary'; + +SELECT u.uid, u.name, u.gid, u.gname +FROM users u left join groups_dict g using gid, gname +format PrettyCompactMonoBlock; + +select ''; +select 'analyzer=0, join with table'; +SELECT u.uid, u.name, u.gid, u.gname +FROM users u left join groups g using gid, gname +format PrettyCompactMonoBlock; diff --git a/parser/testdata/03149_analyzer_join_projection_name_2/ast.json b/parser/testdata/03149_analyzer_join_projection_name_2/ast.json new file mode 100644 index 000000000..96c3b3122 --- /dev/null +++ b/parser/testdata/03149_analyzer_join_projection_name_2/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery groups_dict (children 1)" + }, + { + "explain": " Identifier groups_dict" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000977197, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/03149_analyzer_join_projection_name_2/metadata.json b/parser/testdata/03149_analyzer_join_projection_name_2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03149_analyzer_join_projection_name_2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03149_analyzer_join_projection_name_2/query.sql b/parser/testdata/03149_analyzer_join_projection_name_2/query.sql new file mode 100644 index 000000000..83f43caaf --- /dev/null +++ b/parser/testdata/03149_analyzer_join_projection_name_2/query.sql @@ -0,0 +1,34 @@ +DROP DICTIONARY IF EXISTS groups_dict; +DROP TABLE IF EXISTS mv; +DROP TABLE IF EXISTS users; +DROP TABLE IF EXISTS groups; +DROP TABLE IF EXISTS target; + +CREATE TABLE users (uid Int16, name String, gid LowCardinality(String), gname LowCardinality(String)) + ENGINE=MergeTree order by tuple(); +CREATE TABLE groups (gid LowCardinality(String), gname LowCardinality(String)) + ENGINE=MergeTree order by tuple(); + +CREATE TABLE target (uid Int16, name String, gid LowCardinality(String), gname LowCardinality(String)) + ENGINE=MergeTree order by tuple(); + +CREATE DICTIONARY groups_dict ( + gid String, gname String +) +PRIMARY KEY gid, gname +LAYOUT(COMPLEX_KEY_HASHED()) +SOURCE(CLICKHOUSE(TABLE 'groups' DATABASE currentDatabase())) +LIFETIME(MIN 0 MAX 0); + +CREATE MATERIALIZED VIEW mv to target AS +SELECT u.uid, u.name, u.gid, u.gname +FROM users u left join groups_dict g using gid, gname; + +INSERT INTO groups VALUES ('1', 'Group1'); + +INSERT INTO users VALUES (1231, 'John', '1', 'Group1'); +INSERT INTO users VALUES (6666, 'Ksenia', '1', 'Group1'); +INSERT INTO users VALUES (8888, 'Alice', '1', 'Group1'); +INSERT INTO users VALUES (1234, 'Test', '2', 'Group1'); + +SELECT * FROM target ORDER BY uid format PrettyCompactMonoBlock; diff --git a/parser/testdata/03149_analyzer_window_redefinition/ast.json b/parser/testdata/03149_analyzer_window_redefinition/ast.json new file mode 100644 index 000000000..beae3a7a7 --- /dev/null +++ b/parser/testdata/03149_analyzer_window_redefinition/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery users (children 3)" + }, + { + "explain": " Identifier users" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " ColumnDeclaration uid (children 1)" + }, + { + "explain": " DataType Int16" + }, + { + "explain": " ColumnDeclaration name (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " ColumnDeclaration age (children 1)" + }, + { + "explain": " DataType Int16" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001336715, + "rows_read": 14, + "bytes_read": 485 + } +} diff --git a/parser/testdata/03149_analyzer_window_redefinition/metadata.json b/parser/testdata/03149_analyzer_window_redefinition/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03149_analyzer_window_redefinition/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03149_analyzer_window_redefinition/query.sql b/parser/testdata/03149_analyzer_window_redefinition/query.sql new file mode 100644 index 000000000..7bc5ec757 --- /dev/null +++ b/parser/testdata/03149_analyzer_window_redefinition/query.sql @@ -0,0 +1,8 @@ +CREATE TABLE users (uid Int16, name String, age Int16) ENGINE=MergeTree ORDER BY tuple(); + +INSERT INTO users VALUES (1231, 'John', 33); +INSERT INTO users VALUES (6666, 'Ksenia', 48); +INSERT INTO users VALUES (8888, 'Alice', 50); + +SELECT count(*) OVER w +FROM users WINDOW w AS (ORDER BY uid), w AS(ORDER BY name); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/03149_asof_join_ddb_timestamps/ast.json b/parser/testdata/03149_asof_join_ddb_timestamps/ast.json new file mode 100644 index 000000000..a588d11b7 --- /dev/null +++ b/parser/testdata/03149_asof_join_ddb_timestamps/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery events0 (children 1)" + }, + { + "explain": " Identifier events0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00113196, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/03149_asof_join_ddb_timestamps/metadata.json b/parser/testdata/03149_asof_join_ddb_timestamps/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03149_asof_join_ddb_timestamps/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03149_asof_join_ddb_timestamps/query.sql b/parser/testdata/03149_asof_join_ddb_timestamps/query.sql new file mode 100644 index 000000000..2eb9fe787 --- /dev/null +++ b/parser/testdata/03149_asof_join_ddb_timestamps/query.sql @@ -0,0 +1,95 @@ +DROP TABLE IF EXISTS events0; +DROP TABLE IF EXISTS probe0; + +SET session_timezone = 'UTC'; +SET enable_analyzer = 1; +SET join_algorithm = 'full_sorting_merge'; +SET join_use_nulls = 1; + +CREATE TABLE events0 +ENGINE = MergeTree() +ORDER BY COALESCE(begin, toDateTime('9999-12-31 23:59:59')) +AS +SELECT + toNullable(toDateTime('2023-03-21 13:00:00') + INTERVAL number HOUR) AS begin, + number AS value +FROM numbers(4); + +INSERT INTO events0 VALUES (NULL, -1), (toDateTime('9999-12-31 23:59:59'), 9); + +CREATE TABLE probe0 +ENGINE = MergeTree() +ORDER BY COALESCE(begin, toDateTime('9999-12-31 23:59:59')) +AS +SELECT + toNullable(toDateTime('2023-03-21 12:00:00') + INTERVAL number HOUR) AS begin +FROM numbers(10); + +INSERT INTO probe0 VALUES (NULL), (toDateTime('9999-12-31 23:59:59')); + +SELECT + p.begin, + e.value +FROM + probe0 p + ASOF JOIN events0 e ON p.begin >= e.begin +ORDER BY p.begin ASC; + +SELECT + p.begin, + e.value +FROM + probe0 p + ASOF JOIN events0 e USING (begin) +ORDER BY p.begin ASC +SETTINGS join_use_nulls = 0 +; + +SELECT + p.begin, + e.value +FROM + probe0 p + ASOF LEFT JOIN events0 e ON p.begin >= e.begin +ORDER BY p.begin ASC; + +SELECT + p.begin, + e.value +FROM + probe0 p + ASOF LEFT JOIN events0 e USING (begin) +ORDER BY p.begin ASC +SETTINGS join_use_nulls = 0 +; + +SELECT + p.begin, + e.value +FROM + probe0 p + ASOF RIGHT JOIN events0 e ON p.begin >= e.begin +ORDER BY e.begin ASC; -- { serverError NOT_IMPLEMENTED} + +SELECT + p.begin, + e.value +FROM + probe0 p + ASOF RIGHT JOIN events0 e USING (begin) +ORDER BY e.begin ASC; -- { serverError NOT_IMPLEMENTED} + + +SELECT + p.begin, + e.value +FROM + probe0 p + ASOF LEFT JOIN ( + SELECT * FROM events0 WHERE log(value + 5) > 10 + ) e ON p.begin + INTERVAL 2 HOUR >= e.begin + INTERVAL 1 HOUR +ORDER BY p.begin ASC; + + +DROP TABLE IF EXISTS events0; +DROP TABLE IF EXISTS probe0; diff --git a/parser/testdata/03149_variant_pop_back_typo/ast.json b/parser/testdata/03149_variant_pop_back_typo/ast.json new file mode 100644 index 000000000..cf6aeec3a --- /dev/null +++ b/parser/testdata/03149_variant_pop_back_typo/ast.json @@ -0,0 +1,94 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function map (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Literal ''" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function map (children 1)" + }, + { + "explain": " ExpressionList (children 6)" + }, + { + "explain": " Literal ''" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal ''" + }, + { + "explain": " Function toUInt128 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Set" + } + ], + + "rows": 24, + + "statistics": + { + "elapsed": 0.001546327, + "rows_read": 24, + "bytes_read": 841 + } +} diff --git a/parser/testdata/03149_variant_pop_back_typo/metadata.json b/parser/testdata/03149_variant_pop_back_typo/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03149_variant_pop_back_typo/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03149_variant_pop_back_typo/query.sql b/parser/testdata/03149_variant_pop_back_typo/query.sql new file mode 100644 index 000000000..c35a7b708 --- /dev/null +++ b/parser/testdata/03149_variant_pop_back_typo/query.sql @@ -0,0 +1 @@ +select [map(1, [], '', 1), map('', 1, 1, '', toUInt128(1), 1)] settings allow_experimental_variant_type=1, use_variant_as_common_type=1 \ No newline at end of file diff --git a/parser/testdata/03150_dynamic_type_mv_insert/ast.json b/parser/testdata/03150_dynamic_type_mv_insert/ast.json new file mode 100644 index 000000000..0faf649f0 --- /dev/null +++ b/parser/testdata/03150_dynamic_type_mv_insert/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001162882, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03150_dynamic_type_mv_insert/metadata.json b/parser/testdata/03150_dynamic_type_mv_insert/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03150_dynamic_type_mv_insert/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03150_dynamic_type_mv_insert/query.sql b/parser/testdata/03150_dynamic_type_mv_insert/query.sql new file mode 100644 index 000000000..71f42b612 --- /dev/null +++ b/parser/testdata/03150_dynamic_type_mv_insert/query.sql @@ -0,0 +1,42 @@ +SET allow_experimental_dynamic_type=1; +SET allow_suspicious_types_in_order_by=1; + +DROP TABLE IF EXISTS null_table; +CREATE TABLE null_table +( + n1 UInt8, + n2 Dynamic(max_types=3) +) +ENGINE = Null; + +DROP TABLE IF EXISTS to_table; +CREATE TABLE to_table +( + n1 UInt8, + n2 Dynamic(max_types=4) +) +ENGINE = MergeTree ORDER BY n1; + +DROP VIEW IF EXISTS dummy_rmv; +CREATE MATERIALIZED VIEW dummy_rmv TO to_table +AS SELECT * FROM null_table; + +INSERT INTO null_table ( n1, n2 ) VALUES (1, '2024-01-01'), (2, toDateTime64('2024-01-01', 3, 'Asia/Istanbul')), (3, toFloat32(1)), (4, toFloat64(2)); +SELECT *, dynamicType(n2) FROM to_table ORDER BY ALL; + +select ''; +INSERT INTO null_table ( n1, n2 ) VALUES (1, '2024-01-01'), (2, toDateTime64('2024-01-01', 3, 'Asia/Istanbul')), (3, toFloat32(1)), (4, toFloat64(2)); +SELECT *, dynamicType(n2) FROM to_table ORDER BY ALL; + +select ''; +ALTER TABLE to_table MODIFY COLUMN n2 Dynamic(max_types=1); +SELECT *, dynamicType(n2) FROM to_table ORDER BY ALL; + +select ''; +ALTER TABLE to_table MODIFY COLUMN n2 Dynamic(max_types=10); +INSERT INTO null_table ( n1, n2 ) VALUES (1, '2024-01-01'), (2, toDateTime64('2024-01-01', 3, 'Asia/Istanbul')), (3, toFloat32(1)), (4, toFloat64(2)); +SELECT *, dynamicType(n2) FROM to_table ORDER BY ALL; + +DROP TABLE null_table; +DROP VIEW dummy_rmv; +DROP TABLE to_table; diff --git a/parser/testdata/03150_grouping_sets_use_nulls_pushdown/ast.json b/parser/testdata/03150_grouping_sets_use_nulls_pushdown/ast.json new file mode 100644 index 000000000..e40ca7a6b --- /dev/null +++ b/parser/testdata/03150_grouping_sets_use_nulls_pushdown/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_grouping_sets_predicate (children 1)" + }, + { + "explain": " Identifier test_grouping_sets_predicate" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001678801, + "rows_read": 2, + "bytes_read": 108 + } +} diff --git a/parser/testdata/03150_grouping_sets_use_nulls_pushdown/metadata.json b/parser/testdata/03150_grouping_sets_use_nulls_pushdown/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03150_grouping_sets_use_nulls_pushdown/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03150_grouping_sets_use_nulls_pushdown/query.sql b/parser/testdata/03150_grouping_sets_use_nulls_pushdown/query.sql new file mode 100644 index 000000000..c39143216 --- /dev/null +++ b/parser/testdata/03150_grouping_sets_use_nulls_pushdown/query.sql @@ -0,0 +1,49 @@ +DROP TABLE IF EXISTS test_grouping_sets_predicate; + +CREATE TABLE test_grouping_sets_predicate ( day_ Date, type_1 String ) ENGINE=MergeTree ORDER BY day_; + +INSERT INTO test_grouping_sets_predicate SELECT toDate('2023-01-05') AS day_, 'hello, world' FROM numbers (10); + +SET group_by_use_nulls = true; + +SELECT * +FROM ( SELECT day_, type_1 FROM test_grouping_sets_predicate GROUP BY GROUPING SETS ( (day_, type_1), (day_) ) ) +WHERE day_ = '2023-01-05' +ORDER BY ALL; + + +SELECT * +FROM ( SELECT * FROM test_grouping_sets_predicate GROUP BY GROUPING SETS ( (day_, type_1), (day_) ) ) +WHERE day_ = '2023-01-05' +ORDER BY ALL; + +SELECT * +FROM ( SELECT day_ FROM test_grouping_sets_predicate GROUP BY GROUPING SETS ( (day_, type_1), (day_) ) ) +WHERE day_ = '2023-01-05' +ORDER BY * +SETTINGS enable_analyzer=1; + +SELECT * +FROM ( SELECT * FROM test_grouping_sets_predicate GROUP BY GROUPING SETS ( (day_, type_1), (day_) ) ) +WHERE day_ = '2023-01-05' +GROUP BY * +ORDER BY ALL +SETTINGS enable_analyzer=1; + +SELECT * +FROM ( SELECT * FROM test_grouping_sets_predicate GROUP BY GROUPING SETS ( (*), (day_) ) ) +WHERE day_ = '2023-01-05' +GROUP BY GROUPING SETS (*) +ORDER BY type_1 +SETTINGS enable_analyzer=1; + +SELECT * +FROM ( SELECT day_, COUNT(*) FROM test_grouping_sets_predicate GROUP BY GROUPING SETS ( (day_, type_1), (day_) ) ) +WHERE day_ = '2023-01-05' +ORDER BY ALL; + + +SELECT t2.* +FROM ( SELECT t1.* FROM test_grouping_sets_predicate t1 GROUP BY GROUPING SETS ( (day_, type_1), (day_) ) ) t2 +WHERE day_ = '2023-01-05' +ORDER BY ALL; diff --git a/parser/testdata/03150_infer_type_variant/ast.json b/parser/testdata/03150_infer_type_variant/ast.json new file mode 100644 index 000000000..f77b9348d --- /dev/null +++ b/parser/testdata/03150_infer_type_variant/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00144346, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03150_infer_type_variant/metadata.json b/parser/testdata/03150_infer_type_variant/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03150_infer_type_variant/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03150_infer_type_variant/query.sql b/parser/testdata/03150_infer_type_variant/query.sql new file mode 100644 index 000000000..d97851f69 --- /dev/null +++ b/parser/testdata/03150_infer_type_variant/query.sql @@ -0,0 +1,7 @@ +SET input_format_try_infer_variants=1; +SET output_format_pretty_fallback_to_vertical = 0; +SET input_format_json_infer_array_of_dynamic_from_array_of_different_types=0; +SELECT arr, toTypeName(arr) FROM format('JSONEachRow', '{"arr" : [1, "Hello", {"a" : 32}]}') FORMAT Pretty; +SELECT x, toTypeName(x) FROM format('JSONEachRow', '{"x" : 42}, {"x" : "Hello"}') FORMAT Pretty; +SELECT x, toTypeName(x) FROM format('JSONEachRow', '{"x" : [1, 2, 3]}, {"x" : {"a" : 42}}') FORMAT Pretty; +SELECT c1, toTypeName(c1), c2, toTypeName(c2) FROM format('CSV', '1,Hello World!\n2,"[1,2,3]"\n3,"2020-01-01"\n') FORMAT Pretty; diff --git a/parser/testdata/03150_trace_log_add_build_id/ast.json b/parser/testdata/03150_trace_log_add_build_id/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03150_trace_log_add_build_id/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03150_trace_log_add_build_id/metadata.json b/parser/testdata/03150_trace_log_add_build_id/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03150_trace_log_add_build_id/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03150_trace_log_add_build_id/query.sql b/parser/testdata/03150_trace_log_add_build_id/query.sql new file mode 100644 index 000000000..fc8585f7c --- /dev/null +++ b/parser/testdata/03150_trace_log_add_build_id/query.sql @@ -0,0 +1,10 @@ +-- Tags: no-asan, no-tsan, no-msan, no-ubsan, no-sanitize-coverage + +SET log_queries = 1; +SET log_query_threads = 1; +SET query_profiler_real_time_period_ns = 100000000; +SELECT sleep(1); +SYSTEM FLUSH LOGS trace_log; + +SELECT COUNT(*) > 1 FROM system.trace_log WHERE build_id IS NOT NULL; + diff --git a/parser/testdata/03150_url_hash_non_constant_level/ast.json b/parser/testdata/03150_url_hash_non_constant_level/ast.json new file mode 100644 index 000000000..ba2454235 --- /dev/null +++ b/parser/testdata/03150_url_hash_non_constant_level/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'https:\/\/www3.botinok.co.edu.il\/~kozlevich\/CGI-BIN\/WEBSIT~0.DLL?longptr=0xFFFFFFFF&ONERR=CONTINUE#!PGNUM=99' (alias url)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function URLHash (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier url" + }, + { + "explain": " Function arrayJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function range (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001484905, + "rows_read": 14, + "bytes_read": 645 + } +} diff --git a/parser/testdata/03150_url_hash_non_constant_level/metadata.json b/parser/testdata/03150_url_hash_non_constant_level/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03150_url_hash_non_constant_level/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03150_url_hash_non_constant_level/query.sql b/parser/testdata/03150_url_hash_non_constant_level/query.sql new file mode 100644 index 000000000..8afda0f9c --- /dev/null +++ b/parser/testdata/03150_url_hash_non_constant_level/query.sql @@ -0,0 +1,7 @@ +WITH 'https://www3.botinok.co.edu.il/~kozlevich/CGI-BIN/WEBSIT~0.DLL?longptr=0xFFFFFFFF&ONERR=CONTINUE#!PGNUM=99' AS url SELECT URLHash(url, arrayJoin(range(10))); +SELECT '---'; +WITH 'https://www3.botinok.co.edu.il/~kozlevich/CGI-BIN/WEBSIT~0.DLL?longptr=0xFFFFFFFF&ONERR=CONTINUE#!PGNUM=99' AS url SELECT URLHash(materialize(url), arrayJoin(range(10))); +SELECT '---'; +WITH 'https://www3.botinok.co.edu.il/~kozlevich/CGI-BIN/WEBSIT~0.DLL?longptr=0xFFFFFFFF&ONERR=CONTINUE#!PGNUM=99' AS url SELECT cityHash64(substring(x, -1, 1) IN ('/', '?', '#') ? substring(x, 1, -1) : x), arrayJoin(URLHierarchy(url)) AS x; +SELECT '---'; +WITH 'https://www3.botinok.co.edu.il/~kozlevich/CGI-BIN/WEBSIT~0.DLL?longptr=0xFFFFFFFF&ONERR=CONTINUE#!PGNUM=99' AS url SELECT cityHash64(substring(x, -1, 1) IN ('/', '?', '#') ? substring(x, 1, -1) : x), arrayJoin(URLHierarchy(materialize(url))) AS x; diff --git a/parser/testdata/03151_analyzer_view_read_only_necessary_columns/ast.json b/parser/testdata/03151_analyzer_view_read_only_necessary_columns/ast.json new file mode 100644 index 000000000..02ece7084 --- /dev/null +++ b/parser/testdata/03151_analyzer_view_read_only_necessary_columns/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_table (children 1)" + }, + { + "explain": " Identifier test_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001459632, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/03151_analyzer_view_read_only_necessary_columns/metadata.json b/parser/testdata/03151_analyzer_view_read_only_necessary_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03151_analyzer_view_read_only_necessary_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03151_analyzer_view_read_only_necessary_columns/query.sql b/parser/testdata/03151_analyzer_view_read_only_necessary_columns/query.sql new file mode 100644 index 000000000..ac86a8705 --- /dev/null +++ b/parser/testdata/03151_analyzer_view_read_only_necessary_columns/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value String +) ENGINE=MergeTree ORDER BY id; + +DROP VIEW IF EXISTS test_view; +CREATE VIEW test_view AS SELECT id, value FROM test_table; + +EXPLAIN header = 1 SELECT sum(id) FROM test_view settings enable_analyzer=1; + +DROP VIEW test_view; +DROP TABLE test_table; diff --git a/parser/testdata/03151_dynamic_type_scale_max_types/ast.json b/parser/testdata/03151_dynamic_type_scale_max_types/ast.json new file mode 100644 index 000000000..e36c2c84a --- /dev/null +++ b/parser/testdata/03151_dynamic_type_scale_max_types/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001077978, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03151_dynamic_type_scale_max_types/metadata.json b/parser/testdata/03151_dynamic_type_scale_max_types/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03151_dynamic_type_scale_max_types/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03151_dynamic_type_scale_max_types/query.sql b/parser/testdata/03151_dynamic_type_scale_max_types/query.sql new file mode 100644 index 000000000..f00c1492e --- /dev/null +++ b/parser/testdata/03151_dynamic_type_scale_max_types/query.sql @@ -0,0 +1,27 @@ +SET allow_experimental_dynamic_type = 1; +SET allow_suspicious_types_in_order_by = 1; +SET optimize_read_in_order = 1; + +drop table if exists to_table; + +CREATE TABLE to_table +( + n1 UInt8, + n2 Dynamic(max_types=2) +) +ENGINE = MergeTree ORDER BY n1; + +INSERT INTO to_table ( n1, n2 ) VALUES (1, '2024-01-01'), (2, toDateTime64('2024-01-01', 3, 'Asia/Istanbul')), (3, toFloat32(1)), (4, toFloat64(2)); +SELECT *, dynamicType(n2), isDynamicElementInSharedData(n2) FROM to_table ORDER BY ALL; + +select ''; +ALTER TABLE to_table MODIFY COLUMN n2 Dynamic(max_types=5); +INSERT INTO to_table ( n1, n2 ) VALUES (1, '2024-01-01'), (2, toDateTime64('2024-01-01', 3, 'Asia/Istanbul')), (3, toFloat32(1)), (4, toFloat64(2)); +SELECT *, dynamicType(n2), isDynamicElementInSharedData(n2) FROM to_table ORDER BY ALL; + +select ''; +ALTER TABLE to_table MODIFY COLUMN n2 Dynamic(max_types=0); +INSERT INTO to_table ( n1, n2 ) VALUES (1, '2024-01-01'), (2, toDateTime64('2024-01-01', 3, 'Asia/Istanbul')), (3, toFloat32(1)), (4, toFloat64(2)); +SELECT *, dynamicType(n2), isDynamicElementInSharedData(n2) FROM to_table ORDER BY ALL; + +ALTER TABLE to_table MODIFY COLUMN n2 Dynamic(max_types=500); -- { serverError UNEXPECTED_AST_STRUCTURE } diff --git a/parser/testdata/03151_external_cross_join/ast.json b/parser/testdata/03151_external_cross_join/ast.json new file mode 100644 index 000000000..bd666649b --- /dev/null +++ b/parser/testdata/03151_external_cross_join/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001104899, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03151_external_cross_join/metadata.json b/parser/testdata/03151_external_cross_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03151_external_cross_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03151_external_cross_join/query.sql b/parser/testdata/03151_external_cross_join/query.sql new file mode 100644 index 000000000..e0e05a10e --- /dev/null +++ b/parser/testdata/03151_external_cross_join/query.sql @@ -0,0 +1,20 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (x Int32) ENGINE = Memory; + +-- insert several blocks with 1 or 2 rows: +INSERT INTO t1 VALUES (1); +INSERT INTO t1 VALUES (10),(100); +INSERT INTO t1 VALUES (1000); +INSERT INTO t1 VALUES (10000),(100000); + +SET max_rows_in_join = 111; + +SELECT x, sum(number), count(), FROM ( + SELECT t1.x, t2.number + FROM t1 + CROSS JOIN numbers_mt(10_000_000) t2 + WHERE number <= x +) +GROUP BY ALL +ORDER BY x +; \ No newline at end of file diff --git a/parser/testdata/03151_pmj_join_non_procssed_clash/ast.json b/parser/testdata/03151_pmj_join_non_procssed_clash/ast.json new file mode 100644 index 000000000..b0e563f8f --- /dev/null +++ b/parser/testdata/03151_pmj_join_non_procssed_clash/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001402818, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03151_pmj_join_non_procssed_clash/metadata.json b/parser/testdata/03151_pmj_join_non_procssed_clash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03151_pmj_join_non_procssed_clash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03151_pmj_join_non_procssed_clash/query.sql b/parser/testdata/03151_pmj_join_non_procssed_clash/query.sql new file mode 100644 index 000000000..a54de8897 --- /dev/null +++ b/parser/testdata/03151_pmj_join_non_procssed_clash/query.sql @@ -0,0 +1,8 @@ +SET join_algorithm = 'partial_merge'; +SET max_joined_block_size_rows = 100; + + +SELECT count(ignore(*)), sum(t1.a), sum(t1.b), sum(t2.a) +FROM ( SELECT number AS a, number AS b FROM numbers(10000) ) t1 +JOIN ( SELECT number + 100 AS a FROM numbers(10000) ) t2 +ON t1.a = t2.a AND t1.b > 0; diff --git a/parser/testdata/03151_redundant_distinct_with_window/ast.json b/parser/testdata/03151_redundant_distinct_with_window/ast.json new file mode 100644 index 000000000..dbf4f26a8 --- /dev/null +++ b/parser/testdata/03151_redundant_distinct_with_window/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tab (children 1)" + }, + { + "explain": " Identifier tab" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.0011204, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/03151_redundant_distinct_with_window/metadata.json b/parser/testdata/03151_redundant_distinct_with_window/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03151_redundant_distinct_with_window/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03151_redundant_distinct_with_window/query.sql b/parser/testdata/03151_redundant_distinct_with_window/query.sql new file mode 100644 index 000000000..79e0074e9 --- /dev/null +++ b/parser/testdata/03151_redundant_distinct_with_window/query.sql @@ -0,0 +1,21 @@ +DROP TABLE IF EXISTS tab; +DROP TABLE IF EXISTS tab_v; + +CREATE TABLE tab (id Int32, val Nullable(Float64), dt Nullable(DateTime64(6)), type Nullable(Int32)) ENGINE = MergeTree ORDER BY id; + +insert into tab values (1,10,'2023-01-14 00:00:00',1),(2,20,'2023-01-14 00:00:00',1),(3,20,'2023-01-14 00:00:00',2),(4,40,'2023-01-14 00:00:00',3),(5,50,'2023-01-14 00:00:00',3); + +CREATE VIEW tab_v AS SELECT + t1.type AS type, + sum(t1.val) AS sval, + toStartOfDay(t1.dt) AS sday, + anyLast(sval) OVER w AS lval +FROM tab AS t1 +GROUP BY + type, + sday +WINDOW w AS (PARTITION BY type); + +select distinct type from tab_v order by type; +select '--------'; +select distinct type, sday from tab_v order by type, sday; diff --git a/parser/testdata/03152_analyzer_columns_list/ast.json b/parser/testdata/03152_analyzer_columns_list/ast.json new file mode 100644 index 000000000..3e29ed057 --- /dev/null +++ b/parser/testdata/03152_analyzer_columns_list/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00162552, + "rows_read": 2, + "bytes_read": 61 + } +} diff --git a/parser/testdata/03152_analyzer_columns_list/metadata.json b/parser/testdata/03152_analyzer_columns_list/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03152_analyzer_columns_list/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03152_analyzer_columns_list/query.sql b/parser/testdata/03152_analyzer_columns_list/query.sql new file mode 100644 index 000000000..baed3a4ff --- /dev/null +++ b/parser/testdata/03152_analyzer_columns_list/query.sql @@ -0,0 +1,13 @@ +CREATE TABLE test +( + foo String, + bar String, +) +ENGINE = MergeTree() +ORDER BY (foo, bar); + +INSERT INTO test VALUES ('foo', 'bar1'); + +SELECT COLUMNS(bar, foo) APPLY (length) FROM test; + +SELECT COLUMNS(bar, foo, xyz) APPLY (length) FROM test; -- { serverError UNKNOWN_IDENTIFIER } diff --git a/parser/testdata/03152_dynamic_type_simple/ast.json b/parser/testdata/03152_dynamic_type_simple/ast.json new file mode 100644 index 000000000..efc71ae89 --- /dev/null +++ b/parser/testdata/03152_dynamic_type_simple/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001353716, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03152_dynamic_type_simple/metadata.json b/parser/testdata/03152_dynamic_type_simple/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03152_dynamic_type_simple/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03152_dynamic_type_simple/query.sql b/parser/testdata/03152_dynamic_type_simple/query.sql new file mode 100644 index 000000000..ed24b213b --- /dev/null +++ b/parser/testdata/03152_dynamic_type_simple/query.sql @@ -0,0 +1,37 @@ +SET allow_experimental_dynamic_type=1; + +DROP TABLE IF EXISTS test_max_types; +CREATE TABLE test_max_types (d Dynamic(max_types=5)) ENGINE = Memory; +INSERT INTO test_max_types VALUES ('string1'), (42), (3.14), ([1, 2]), (toDate('2021-01-01')), ('string2'); +SELECT d, dynamicType(d) FROM test_max_types; + +SELECT ''; +DROP TABLE IF EXISTS test_nested_dynamic; +CREATE TABLE test_nested_dynamic (d1 Dynamic, d2 Dynamic(max_types=2)) ENGINE = Memory; +INSERT INTO test_nested_dynamic VALUES (NULL, 42), (42, 'string'), ('string', [1, 2]), ([1, 2], NULL); +SELECT d1, dynamicType(d1), d2, dynamicType(d2) FROM test_nested_dynamic; + +DROP TABLE IF EXISTS test_rapid_schema; +CREATE TABLE test_rapid_schema (d Dynamic) ENGINE = Memory; +INSERT INTO test_rapid_schema VALUES (42), ('string1'), (toDate('2021-01-01')), ([1, 2, 3]), (3.14), ('string2'), (toDateTime('2021-01-01 12:00:00')), (['array', 'of', 'strings']), (NULL), (toFloat64(42.42)); + +SELECT d, dynamicType(d), d.Int64, d.String, d.Date, d.Float64, d.DateTime, d.`Array(Int64)`, d.`Array(String)` +FROM test_rapid_schema FORMAT PrettyCompactMonoBlock; + + +SELECT ''; +SELECT finalizeAggregation(CAST(dynamic_state, 'AggregateFunction(sum, UInt64)')) +FROM +( + SELECT CAST(state, 'Dynamic') AS dynamic_state + FROM + ( + SELECT sumState(number) AS state + FROM numbers(10000) + ) +); + +DROP TABLE test_max_types; +DROP TABLE test_nested_dynamic; +DROP TABLE test_rapid_schema; + diff --git a/parser/testdata/03152_join_filter_push_down_equivalent_columns/ast.json b/parser/testdata/03152_join_filter_push_down_equivalent_columns/ast.json new file mode 100644 index 000000000..122ee0940 --- /dev/null +++ b/parser/testdata/03152_join_filter_push_down_equivalent_columns/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001227609, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03152_join_filter_push_down_equivalent_columns/metadata.json b/parser/testdata/03152_join_filter_push_down_equivalent_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03152_join_filter_push_down_equivalent_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03152_join_filter_push_down_equivalent_columns/query.sql b/parser/testdata/03152_join_filter_push_down_equivalent_columns/query.sql new file mode 100644 index 000000000..7bd1c09fd --- /dev/null +++ b/parser/testdata/03152_join_filter_push_down_equivalent_columns/query.sql @@ -0,0 +1,36 @@ +SET enable_analyzer = 1; +SET query_plan_join_swap_table = false; + +DROP TABLE IF EXISTS users; +CREATE TABLE users (uid Int16, name String, age Int16) ENGINE=MergeTree order by (uid, name); + +INSERT INTO users VALUES (1231, 'John', 33); +INSERT INTO users VALUES (6666, 'Ksenia', 48); +INSERT INTO users VALUES (8888, 'Alice', 50); + +DROP TABLE IF EXISTS users2; +CREATE TABLE users2 (uid Int16, name String, age2 Int16) ENGINE=MergeTree order by (uid, name); + +INSERT INTO users2 VALUES (1231, 'John', 33); +INSERT INTO users2 VALUES (6666, 'Ksenia', 48); +INSERT INTO users2 VALUES (8888, 'Alice', 50); + +-- { echoOn } + +EXPLAIN header = 1, indexes = 1 +SELECT name FROM users INNER JOIN users2 USING name WHERE users.name ='Alice'; + +SELECT '--'; + +EXPLAIN header = 1, indexes = 1 +SELECT name FROM users LEFT JOIN users2 USING name WHERE users.name ='Alice'; + +SELECT '--'; + +EXPLAIN header = 1, indexes = 1 +SELECT name FROM users RIGHT JOIN users2 USING name WHERE users2.name ='Alice'; + +-- { echoOff } + +DROP TABLE users; +DROP TABLE users2; diff --git a/parser/testdata/03152_trailing_comma_in_columns_list_in_insert/ast.json b/parser/testdata/03152_trailing_comma_in_columns_list_in_insert/ast.json new file mode 100644 index 000000000..a4b9a5677 --- /dev/null +++ b/parser/testdata/03152_trailing_comma_in_columns_list_in_insert/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery test (children 2)" + }, + { + "explain": " Identifier test" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " ColumnDeclaration a (children 1)" + }, + { + "explain": " DataType UInt8" + }, + { + "explain": " ColumnDeclaration b (children 1)" + }, + { + "explain": " DataType UInt8" + }, + { + "explain": " ColumnDeclaration c (children 1)" + }, + { + "explain": " DataType UInt8" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001259401, + "rows_read": 10, + "bytes_read": 345 + } +} diff --git a/parser/testdata/03152_trailing_comma_in_columns_list_in_insert/metadata.json b/parser/testdata/03152_trailing_comma_in_columns_list_in_insert/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03152_trailing_comma_in_columns_list_in_insert/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03152_trailing_comma_in_columns_list_in_insert/query.sql b/parser/testdata/03152_trailing_comma_in_columns_list_in_insert/query.sql new file mode 100644 index 000000000..4031f9a77 --- /dev/null +++ b/parser/testdata/03152_trailing_comma_in_columns_list_in_insert/query.sql @@ -0,0 +1,4 @@ +CREATE TEMPORARY TABLE test (a UInt8, b UInt8, c UInt8); +INSERT INTO test (a, b, c, ) VALUES (1, 2, 3); +INSERT INTO test (a, b, c) VALUES (4, 5, 6); +SELECT * FROM test ORDER BY a; diff --git a/parser/testdata/03153_dynamic_type_empty/ast.json b/parser/testdata/03153_dynamic_type_empty/ast.json new file mode 100644 index 000000000..fcbfdef5c --- /dev/null +++ b/parser/testdata/03153_dynamic_type_empty/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.0013997, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03153_dynamic_type_empty/metadata.json b/parser/testdata/03153_dynamic_type_empty/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03153_dynamic_type_empty/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03153_dynamic_type_empty/query.sql b/parser/testdata/03153_dynamic_type_empty/query.sql new file mode 100644 index 000000000..3a0c98e63 --- /dev/null +++ b/parser/testdata/03153_dynamic_type_empty/query.sql @@ -0,0 +1,7 @@ +SET allow_experimental_dynamic_type=1; + +DROP TABLE IF EXISTS test_null_empty; +CREATE TABLE test_null_empty (d Dynamic) ENGINE = Memory; +INSERT INTO test_null_empty VALUES ([]), ([1]), ([]), (['1']), ([]), (()),((1)), (()), (('1')), (()), ({}), ({1:2}), ({}), ({'1':'2'}), ({}); +SELECT d, dynamicType(d) FROM test_null_empty; +DROP TABLE test_null_empty; diff --git a/parser/testdata/03153_trailing_comma_in_values_list_in_insert/ast.json b/parser/testdata/03153_trailing_comma_in_values_list_in_insert/ast.json new file mode 100644 index 000000000..8868ee0f1 --- /dev/null +++ b/parser/testdata/03153_trailing_comma_in_values_list_in_insert/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery test (children 2)" + }, + { + "explain": " Identifier test" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " ColumnDeclaration a (children 1)" + }, + { + "explain": " DataType UInt8" + }, + { + "explain": " ColumnDeclaration b (children 1)" + }, + { + "explain": " DataType UInt8" + }, + { + "explain": " ColumnDeclaration c (children 1)" + }, + { + "explain": " DataType UInt8" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001304165, + "rows_read": 10, + "bytes_read": 345 + } +} diff --git a/parser/testdata/03153_trailing_comma_in_values_list_in_insert/metadata.json b/parser/testdata/03153_trailing_comma_in_values_list_in_insert/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03153_trailing_comma_in_values_list_in_insert/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03153_trailing_comma_in_values_list_in_insert/query.sql b/parser/testdata/03153_trailing_comma_in_values_list_in_insert/query.sql new file mode 100644 index 000000000..65301c977 --- /dev/null +++ b/parser/testdata/03153_trailing_comma_in_values_list_in_insert/query.sql @@ -0,0 +1,5 @@ +CREATE TEMPORARY TABLE test (a UInt8, b UInt8, c UInt8); +INSERT INTO test (a, b, c) VALUES (1, 2, 3, ); +INSERT INTO test (a, b, c) VALUES (4, 5, 6,); +INSERT INTO test (a, b, c) VALUES (7, 8, 9); +SELECT * FROM test ORDER BY a; diff --git a/parser/testdata/03154_recursive_cte_distributed/ast.json b/parser/testdata/03154_recursive_cte_distributed/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03154_recursive_cte_distributed/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03154_recursive_cte_distributed/metadata.json b/parser/testdata/03154_recursive_cte_distributed/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03154_recursive_cte_distributed/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03154_recursive_cte_distributed/query.sql b/parser/testdata/03154_recursive_cte_distributed/query.sql new file mode 100644 index 000000000..47e0b9aad --- /dev/null +++ b/parser/testdata/03154_recursive_cte_distributed/query.sql @@ -0,0 +1,48 @@ +-- Tags: shard + +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id String, + parent_id String +) +ENGINE = MergeTree ORDER BY id; + +INSERT INTO test_table VALUES ('a', ''); +INSERT INTO test_table VALUES ('b', 'a'); +INSERT INTO test_table VALUES ('c', 'a'); + +WITH RECURSIVE search_tree AS ( + SELECT id, parent_id, [parent_id] AS path, toUInt64(0) AS depth + FROM test_table + UNION ALL + SELECT t.id, t.parent_id, arrayConcat(path, [t.id]) as path, depth + 1 + FROM test_table t, search_tree st + WHERE t.parent_id = st.id) +SELECT * FROM search_tree ORDER BY depth, id, parent_id; + +SELECT '--'; + +WITH RECURSIVE search_tree AS ( + SELECT id, parent_id, [parent_id] AS path, toUInt64(0) AS depth + FROM remote('127.0.0.1', currentDatabase(), test_table) + UNION ALL + SELECT t.id, t.parent_id, arrayConcat(path, [t.id]) as path, depth + 1 + FROM remote('127.0.0.1', currentDatabase(), test_table) t, search_tree st + WHERE t.parent_id = st.id) +SELECT * FROM search_tree ORDER BY depth, id, parent_id; + +SELECT '--'; + +WITH RECURSIVE search_tree AS ( + SELECT id, parent_id, [parent_id] AS path, toUInt64(0) AS depth + FROM remote('127.0.0.{1,2}', currentDatabase(), test_table) + UNION ALL + SELECT t.id, t.parent_id, arrayConcat(path, [t.id]) as path, depth + 1 + FROM remote('127.0.0.{1,2}', currentDatabase(), test_table) t, search_tree st + WHERE t.parent_id = st.id) +SELECT * FROM search_tree ORDER BY depth, id, parent_id;; + +DROP TABLE test_table; diff --git a/parser/testdata/03155_analyzer_interpolate/ast.json b/parser/testdata/03155_analyzer_interpolate/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03155_analyzer_interpolate/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03155_analyzer_interpolate/metadata.json b/parser/testdata/03155_analyzer_interpolate/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03155_analyzer_interpolate/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03155_analyzer_interpolate/query.sql b/parser/testdata/03155_analyzer_interpolate/query.sql new file mode 100644 index 000000000..42c5f5ef6 --- /dev/null +++ b/parser/testdata/03155_analyzer_interpolate/query.sql @@ -0,0 +1,15 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/62464 +SET enable_analyzer = 1; + +SELECT n, [number] AS inter FROM ( + SELECT toFloat32(number % 10) AS n, number + FROM numbers(10) WHERE number % 3 = 1 +) GROUP BY n, inter ORDER BY n WITH FILL FROM 0 TO 5.51 STEP 0.5 INTERPOLATE (inter AS [5]); + +SELECT n, number+5 AS inter FROM ( -- { serverError NOT_AN_AGGREGATE } + SELECT toFloat32(number % 10) AS n, number, number*2 AS mn + FROM numbers(10) WHERE number % 3 = 1 +) GROUP BY n, inter ORDER BY n WITH FILL FROM 0 TO 5.51 STEP 0.5 INTERPOLATE (inter AS mn * 2); + +-- https://github.com/ClickHouse/ClickHouse/issues/64636 +select sum(number) as s from remote('127.0.0.{1,2}', numbers(10)) where (intDiv(number, 2) as key) != 1 group by key order by key with fill interpolate (s as 100500); diff --git a/parser/testdata/03155_datasketches_ubsan/ast.json b/parser/testdata/03155_datasketches_ubsan/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03155_datasketches_ubsan/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03155_datasketches_ubsan/metadata.json b/parser/testdata/03155_datasketches_ubsan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03155_datasketches_ubsan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03155_datasketches_ubsan/query.sql b/parser/testdata/03155_datasketches_ubsan/query.sql new file mode 100644 index 000000000..521301d03 --- /dev/null +++ b/parser/testdata/03155_datasketches_ubsan/query.sql @@ -0,0 +1,2 @@ +-- Tags: no-fasttest +SELECT uniqTheta(toFixedString('uniqTheta distinct', 18)) FROM (SELECT number % 2 AS x FROM numbers(10) WHERE materialize(16)); diff --git a/parser/testdata/03155_explain_current_transaction/ast.json b/parser/testdata/03155_explain_current_transaction/ast.json new file mode 100644 index 000000000..03803e48e --- /dev/null +++ b/parser/testdata/03155_explain_current_transaction/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Explain EXPLAIN CURRENT TRANSACTION" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.0014493, + "rows_read": 1, + "bytes_read": 43 + } +} diff --git a/parser/testdata/03155_explain_current_transaction/metadata.json b/parser/testdata/03155_explain_current_transaction/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03155_explain_current_transaction/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03155_explain_current_transaction/query.sql b/parser/testdata/03155_explain_current_transaction/query.sql new file mode 100644 index 000000000..fa0fd06e7 --- /dev/null +++ b/parser/testdata/03155_explain_current_transaction/query.sql @@ -0,0 +1 @@ +EXPLAIN CURRENT TRANSACTION; diff --git a/parser/testdata/03155_in_nested_subselects/ast.json b/parser/testdata/03155_in_nested_subselects/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03155_in_nested_subselects/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03155_in_nested_subselects/metadata.json b/parser/testdata/03155_in_nested_subselects/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03155_in_nested_subselects/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03155_in_nested_subselects/query.sql b/parser/testdata/03155_in_nested_subselects/query.sql new file mode 100644 index 000000000..5cb9d404a --- /dev/null +++ b/parser/testdata/03155_in_nested_subselects/query.sql @@ -0,0 +1,20 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/63833 +SET enable_analyzer = 1; +SET parallel_replicas_local_plan=1; + +create table Example (id Int32) engine = MergeTree ORDER BY id; +INSERT INTO Example SELECT number AS id FROM numbers(2); + +create table Null engine=Null as Example ; +--create table Null engine=MergeTree order by id as Example ; + +create materialized view Transform to Example as +select * from Null +join ( select * FROM Example + WHERE id IN (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM Null))))) + ) as old +using id; + +INSERT INTO Null SELECT number AS id FROM numbers(2); + +select * from Example order by all; -- should return 4 rows diff --git a/parser/testdata/03156_analyzer_array_join_distributed/ast.json b/parser/testdata/03156_analyzer_array_join_distributed/ast.json new file mode 100644 index 000000000..33a96a41d --- /dev/null +++ b/parser/testdata/03156_analyzer_array_join_distributed/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery arrays_test (children 3)" + }, + { + "explain": " Identifier arrays_test" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration s (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " ColumnDeclaration arr (children 1)" + }, + { + "explain": " DataType Array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " DataType UInt8" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Identifier s" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001597886, + "rows_read": 14, + "bytes_read": 502 + } +} diff --git a/parser/testdata/03156_analyzer_array_join_distributed/metadata.json b/parser/testdata/03156_analyzer_array_join_distributed/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03156_analyzer_array_join_distributed/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03156_analyzer_array_join_distributed/query.sql b/parser/testdata/03156_analyzer_array_join_distributed/query.sql new file mode 100644 index 000000000..55f9877b2 --- /dev/null +++ b/parser/testdata/03156_analyzer_array_join_distributed/query.sql @@ -0,0 +1,28 @@ +CREATE TABLE arrays_test (s String, arr Array(UInt8)) ENGINE = MergeTree() ORDER BY (s); + +INSERT INTO arrays_test VALUES ('Hello', [1,2]), ('World', [3,4,5]), ('Goodbye', []); + +SELECT s, arr, a FROM remote('127.0.0.2', currentDatabase(), arrays_test) ARRAY JOIN arr AS a WHERE a < 3 ORDER BY a; +SELECT s, arr, a FROM remote('127.0.0.{1,2}', currentDatabase(), arrays_test) ARRAY JOIN arr AS a WHERE a < 3 ORDER BY a; + + +SELECT s, arr FROM remote('127.0.0.2', currentDatabase(), arrays_test) ARRAY JOIN arr WHERE arr < 3 ORDER BY arr; +SELECT s, arr FROM remote('127.0.0.{1,2}', currentDatabase(), arrays_test) ARRAY JOIN arr WHERE arr < 3 ORDER BY arr; + +create table hourly( + hour datetime, + `metric.names` Array(String), + `metric.values` Array(Int64) +) Engine=Memory +as select '2020-01-01', ['a', 'b'], [1,2]; + +SELECT + toDate(hour) AS day, + `metric.names`, + sum(`metric.values`) +FROM remote('127.0.0.{1,2}', currentDatabase(), hourly) +ARRAY JOIN metric +GROUP BY + day, + metric.names +ORDER BY metric.names; diff --git a/parser/testdata/03156_group_concat/ast.json b/parser/testdata/03156_group_concat/ast.json new file mode 100644 index 000000000..698680677 --- /dev/null +++ b/parser/testdata/03156_group_concat/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_groupConcat (children 1)" + }, + { + "explain": " Identifier test_groupConcat" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000913781, + "rows_read": 2, + "bytes_read": 84 + } +} diff --git a/parser/testdata/03156_group_concat/metadata.json b/parser/testdata/03156_group_concat/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03156_group_concat/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03156_group_concat/query.sql b/parser/testdata/03156_group_concat/query.sql new file mode 100644 index 000000000..6ea78a8cd --- /dev/null +++ b/parser/testdata/03156_group_concat/query.sql @@ -0,0 +1,72 @@ +DROP TABLE IF EXISTS test_groupConcat; +CREATE TABLE test_groupConcat +( + id UInt64, + p_int Int32 NULL, + p_string String, + p_array Array(Int32) +) ENGINE = MergeTree ORDER BY id; + +SET max_insert_threads = 1, max_threads = 1, min_insert_block_size_rows = 0, min_insert_block_size_bytes = 0; +INSERT INTO test_groupConcat VALUES (0, 95, 'abc', [1, 2, 3]), (1, NULL, 'a', [993, 986, 979, 972]), (2, 123, 'makson95', []); + +SELECT * FROM test_groupConcat; + +SELECT groupConcat(p_int) FROM test_groupConcat; +SELECT groupConcat(p_string) FROM test_groupConcat; +SELECT groupConcat(p_array) FROM test_groupConcat; + +SELECT groupConcat('', 1)(p_array) FROM test_groupConcat; +SELECT groupConcat('', 3)(p_string) FROM test_groupConcat; +SELECT groupConcat('', 2)(p_int) FROM test_groupConcat; +SELECT groupConcat('\n', 3)(p_int) FROM test_groupConcat; + +SELECT groupConcat(',')(p_int) FROM test_groupConcat; +SELECT groupConcat(',')(p_string) FROM test_groupConcat; +SELECT groupConcat(',', 2)(p_array) FROM test_groupConcat; + +SELECT groupConcat(p_int) FROM test_groupConcat WHERE id = 1; + +INSERT INTO test_groupConcat VALUES (0, 95, 'abc', [1, 2, 3]), (1, NULL, 'a', [993, 986, 979, 972]), (2, 123, 'makson95', []); +INSERT INTO test_groupConcat VALUES (0, 95, 'abc', [1, 2, 3]), (1, NULL, 'a', [993, 986, 979, 972]), (2, 123, 'makson95', []); + +SELECT groupConcat(p_int) FROM test_groupConcat; +SELECT groupConcat(',')(p_string) FROM test_groupConcat; +SELECT groupConcat(p_array) FROM test_groupConcat; + +SELECT groupConcat(123)(number) FROM numbers(10); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT groupConcat(',', '3')(number) FROM numbers(10); -- { serverError BAD_ARGUMENTS } +SELECT groupConcat(',', 0)(number) FROM numbers(10); -- { serverError BAD_ARGUMENTS } +SELECT groupConcat(',', -1)(number) FROM numbers(10); -- { serverError BAD_ARGUMENTS } +SELECT groupConcat(',', 3, 3)(number) FROM numbers(10); -- { serverError TOO_MANY_ARGUMENTS_FOR_FUNCTION } + +SELECT length(groupConcat(number)) FROM numbers(100000); + +SELECT 'TESTING GroupConcat second argument overload'; + +TRUNCATE TABLE test_groupConcat; + +INSERT INTO test_groupConcat VALUES (0, 95, 'abc', [1, 2, 3]), (1, NULL, 'a', [993, 986, 979, 972]), (2, 123, 'makson95', []); + +SELECT groupConcat(p_int, ',') FROM test_groupConcat SETTINGS enable_analyzer=1; +SELECT groupConcat('.')(p_string) FROM test_groupConcat SETTINGS enable_analyzer=1; +SELECT groupConcat(p_array, '/') FROM test_groupConcat SETTINGS enable_analyzer=1; + +SELECT group_concat(p_array, '/') FROM test_groupConcat SETTINGS enable_analyzer=1; +SELECT grouP_CONcat(p_array, '/') FROM test_groupConcat SETTINGS enable_analyzer=1; +SELECT grouP_CONcat(',')(p_array, '/') FROM test_groupConcat SETTINGS enable_analyzer=1; -- overrides current parameter +SELECT grouP_CONcat(',', 2)(p_array, '/') FROM test_groupConcat SETTINGS enable_analyzer=1; -- works fine with both arguments + +DROP TABLE IF EXISTS test_groupConcat; + +CREATE TABLE test_groupConcat +( + id UInt64, + p_int Int32, +) ENGINE = MergeTree ORDER BY id; + +INSERT INTO test_groupConcat SELECT number, number FROM numbers(100000) SETTINGS min_insert_block_size_rows = 2000; + +SELECT length(groupConcat(p_int)) FROM test_groupConcat; + +DROP TABLE IF EXISTS test_groupConcat; diff --git a/parser/testdata/03156_nullable_number_tips/ast.json b/parser/testdata/03156_nullable_number_tips/ast.json new file mode 100644 index 000000000..89ff471fb --- /dev/null +++ b/parser/testdata/03156_nullable_number_tips/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001703276, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03156_nullable_number_tips/metadata.json b/parser/testdata/03156_nullable_number_tips/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03156_nullable_number_tips/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03156_nullable_number_tips/query.sql b/parser/testdata/03156_nullable_number_tips/query.sql new file mode 100644 index 000000000..c8d10b329 --- /dev/null +++ b/parser/testdata/03156_nullable_number_tips/query.sql @@ -0,0 +1,25 @@ +SET output_format_pretty_display_footer_column_names=0; +SELECT 123456789 AS x FORMAT PrettyCompact; +SELECT toNullable(123456789) AS x FORMAT PrettyCompact; +SELECT toLowCardinality(toNullable(123456789)) AS x FORMAT PrettyCompact; +SELECT toNullable(toLowCardinality(123456789)) AS x FORMAT PrettyCompact; +SELECT toLowCardinality(123456789) AS x FORMAT PrettyCompact; + +CREATE TEMPORARY TABLE test (x Nullable(UInt64), PRIMARY KEY ()) ENGINE = MergeTree SETTINGS ratio_of_defaults_for_sparse_serialization = 0, serialization_info_version = 'with_types', nullable_serialization_version = 'allow_sparse'; +INSERT INTO test SELECT number % 2 ? number * 123456789 : NULL FROM numbers(10); + +SELECT DISTINCT dumpColumnStructure(*) FROM test; + +SELECT * FROM test ORDER BY ALL DESC NULLS LAST LIMIT 1 FORMAT PRETTY; +SELECT * FROM test ORDER BY ALL ASC NULLS LAST LIMIT 1 FORMAT PRETTY; +SELECT * FROM test ORDER BY ALL ASC NULLS FIRST LIMIT 1 FORMAT PrettySpace; + +DROP TEMPORARY TABLE test; +CREATE TEMPORARY TABLE test (x UInt64, PRIMARY KEY ()) ENGINE = MergeTree SETTINGS ratio_of_defaults_for_sparse_serialization = 0, serialization_info_version = 'with_types', nullable_serialization_version = 'allow_sparse'; +INSERT INTO test SELECT number % 2 ? number * 123456789 : NULL FROM numbers(10); + +SELECT DISTINCT dumpColumnStructure(*) FROM test; + +SELECT * FROM test ORDER BY ALL DESC NULLS LAST LIMIT 1 FORMAT PRETTY; +SELECT * FROM test ORDER BY ALL ASC NULLS LAST LIMIT 1 FORMAT PRETTY; +SELECT * FROM test ORDER BY ALL ASC NULLS FIRST LIMIT 1 FORMAT PrettySpace; diff --git a/parser/testdata/03156_tuple_map_low_cardinality/ast.json b/parser/testdata/03156_tuple_map_low_cardinality/ast.json new file mode 100644 index 000000000..5e754018b --- /dev/null +++ b/parser/testdata/03156_tuple_map_low_cardinality/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_map_lc (children 1)" + }, + { + "explain": " Identifier t_map_lc" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001107072, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/03156_tuple_map_low_cardinality/metadata.json b/parser/testdata/03156_tuple_map_low_cardinality/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03156_tuple_map_low_cardinality/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03156_tuple_map_low_cardinality/query.sql b/parser/testdata/03156_tuple_map_low_cardinality/query.sql new file mode 100644 index 000000000..836b426a9 --- /dev/null +++ b/parser/testdata/03156_tuple_map_low_cardinality/query.sql @@ -0,0 +1,33 @@ +DROP TABLE IF EXISTS t_map_lc; + +CREATE TABLE t_map_lc +( + id UInt64, + t Tuple(m Map(LowCardinality(String), LowCardinality(String))) +) +ENGINE = MergeTree ORDER BY id SETTINGS min_bytes_for_wide_part = 0; + +INSERT INTO t_map_lc SELECT * FROM generateRandom('id UInt64, t Tuple(m Map(LowCardinality(String), LowCardinality(String)))') LIMIT 100000; + +SELECT count(), FROM t_map_lc WHERE NOT ignore(*, mapKeys(t.m)); +SELECT count(), FROM t_map_lc WHERE NOT ignore(*, t.m.keys); +SELECT count(), FROM t_map_lc WHERE NOT ignore(*, t.m.values); +SELECT * FROM t_map_lc WHERE mapContains(t.m, 'not_existing_key_1337'); + +DROP TABLE t_map_lc; + +CREATE TABLE t_map_lc +( + id UInt64, + t Tuple(m Map(LowCardinality(String), LowCardinality(String))) +) +ENGINE = MergeTree ORDER BY id SETTINGS min_bytes_for_wide_part = '10G'; + +INSERT INTO t_map_lc SELECT * FROM generateRandom('id UInt64, t Tuple(m Map(LowCardinality(String), LowCardinality(String)))') LIMIT 100000; + +SELECT count(), FROM t_map_lc WHERE NOT ignore(*, mapKeys(t.m)); +SELECT count(), FROM t_map_lc WHERE NOT ignore(*, t.m.keys); +SELECT count(), FROM t_map_lc WHERE NOT ignore(*, t.m.values); +SELECT * FROM t_map_lc WHERE mapContains(t.m, 'not_existing_key_1337'); + +DROP TABLE t_map_lc; diff --git a/parser/testdata/03157_dynamic_type_json/ast.json b/parser/testdata/03157_dynamic_type_json/ast.json new file mode 100644 index 000000000..4fa356e72 --- /dev/null +++ b/parser/testdata/03157_dynamic_type_json/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001342495, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03157_dynamic_type_json/metadata.json b/parser/testdata/03157_dynamic_type_json/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03157_dynamic_type_json/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03157_dynamic_type_json/query.sql b/parser/testdata/03157_dynamic_type_json/query.sql new file mode 100644 index 000000000..82409c930 --- /dev/null +++ b/parser/testdata/03157_dynamic_type_json/query.sql @@ -0,0 +1,15 @@ +SET allow_experimental_dynamic_type=1; +SET enable_json_type=1; +SET allow_experimental_variant_type=1; + +DROP TABLE IF EXISTS test_deep_nested_json; +CREATE TABLE test_deep_nested_json (i UInt16, d JSON) ENGINE = Memory; + +INSERT INTO test_deep_nested_json VALUES (1, '{"level1": {"level2": {"level3": {"level4": {"level5": {"level6": {"level7": {"level8": {"level9": {"level10": "deep_value"}}}}}}}}}}'); +INSERT INTO test_deep_nested_json VALUES (2, '{"level1": {"level2": {"level3": {"level4": {"level5": {"level6": {"level7": {"level8": {"level9": {"level10": "deep_array_value"}}}}}}}}}}'); + +SELECT * FROM test_deep_nested_json ORDER BY i; + +SELECT ''; +SELECT d::Dynamic d1, dynamicType(d1) FROM test_deep_nested_json ORDER BY i; +DROP TABLE test_deep_nested_json; diff --git a/parser/testdata/03157_negative_positional_arguments_ubsan/ast.json b/parser/testdata/03157_negative_positional_arguments_ubsan/ast.json new file mode 100644 index 000000000..b9e370089 --- /dev/null +++ b/parser/testdata/03157_negative_positional_arguments_ubsan/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Int64_-9223372036854775808" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001041239, + "rows_read": 7, + "bytes_read": 261 + } +} diff --git a/parser/testdata/03157_negative_positional_arguments_ubsan/metadata.json b/parser/testdata/03157_negative_positional_arguments_ubsan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03157_negative_positional_arguments_ubsan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03157_negative_positional_arguments_ubsan/query.sql b/parser/testdata/03157_negative_positional_arguments_ubsan/query.sql new file mode 100644 index 000000000..ddf5185c9 --- /dev/null +++ b/parser/testdata/03157_negative_positional_arguments_ubsan/query.sql @@ -0,0 +1 @@ +SELECT 1 GROUP BY -9223372036854775808; -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/03158_dynamic_type_from_variant/ast.json b/parser/testdata/03158_dynamic_type_from_variant/ast.json new file mode 100644 index 000000000..983763895 --- /dev/null +++ b/parser/testdata/03158_dynamic_type_from_variant/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001199617, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03158_dynamic_type_from_variant/metadata.json b/parser/testdata/03158_dynamic_type_from_variant/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03158_dynamic_type_from_variant/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03158_dynamic_type_from_variant/query.sql b/parser/testdata/03158_dynamic_type_from_variant/query.sql new file mode 100644 index 000000000..429ac21b5 --- /dev/null +++ b/parser/testdata/03158_dynamic_type_from_variant/query.sql @@ -0,0 +1,15 @@ +SET allow_experimental_dynamic_type=1; +SET allow_experimental_variant_type=1; +SET allow_suspicious_types_in_order_by=1; + +CREATE TABLE test_variable (v Variant(String, UInt32, IPv6, Bool, DateTime64)) ENGINE = Memory; +CREATE TABLE test_dynamic (d Dynamic) ENGINE = Memory; + +INSERT INTO test_variable VALUES (1), ('s'), (0), ('0'), ('true'), ('false'), ('2001-01-01 01:01:01.111'), (NULL); + +SELECT v, toTypeName(v) FROM test_variable ORDER BY v; + +INSERT INTO test_dynamic SELECT * FROM test_variable; + +SELECT ''; +SELECT d, dynamicType(d) FROM test_dynamic ORDER BY d; diff --git a/parser/testdata/03159_dynamic_type_all_types/ast.json b/parser/testdata/03159_dynamic_type_all_types/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03159_dynamic_type_all_types/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03159_dynamic_type_all_types/metadata.json b/parser/testdata/03159_dynamic_type_all_types/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03159_dynamic_type_all_types/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03159_dynamic_type_all_types/query.sql b/parser/testdata/03159_dynamic_type_all_types/query.sql new file mode 100644 index 000000000..cf8ba687d --- /dev/null +++ b/parser/testdata/03159_dynamic_type_all_types/query.sql @@ -0,0 +1,94 @@ +-- Tags: no-random-settings + +SET allow_experimental_dynamic_type=1; +SET allow_experimental_variant_type=1; +SET allow_suspicious_low_cardinality_types=1; +SET allow_suspicious_types_in_order_by=1; + +CREATE TABLE t (d Dynamic(max_types=254)) ENGINE = Memory; +-- Integer types: signed and unsigned integers (UInt8, UInt16, UInt32, UInt64, UInt128, UInt256, Int8, Int16, Int32, Int64, Int128, Int256) +INSERT INTO t VALUES (-128::Int8), (-127::Int8), (-1::Int8), (0::Int8), (1::Int8), (126::Int8), (127::Int8); +INSERT INTO t VALUES (-128::Int8), (-127::Int8), (-1::Int8), (0::Int8), (1::Int8), (126::Int8), (127::Int8); +INSERT INTO t VALUES (-32768::Int16), (-32767::Int16), (-1::Int16), (0::Int16), (1::Int16), (32766::Int16), (32767::Int16); +INSERT INTO t VALUES (-2147483648::Int32), (-2147483647::Int32), (-1::Int32), (0::Int32), (1::Int32), (2147483646::Int32), (2147483647::Int32); +INSERT INTO t VALUES (-9223372036854775808::Int64), (-9223372036854775807::Int64), (-1::Int64), (0::Int64), (1::Int64), (9223372036854775806::Int64), (9223372036854775807::Int64); +INSERT INTO t VALUES (-170141183460469231731687303715884105728::Int128), (-170141183460469231731687303715884105727::Int128), (-1::Int128), (0::Int128), (1::Int128), (170141183460469231731687303715884105726::Int128), (170141183460469231731687303715884105727::Int128); +INSERT INTO t VALUES (-57896044618658097711785492504343953926634992332820282019728792003956564819968::Int256), (-57896044618658097711785492504343953926634992332820282019728792003956564819967::Int256), (-1::Int256), (0::Int256), (1::Int256), (57896044618658097711785492504343953926634992332820282019728792003956564819966::Int256), (57896044618658097711785492504343953926634992332820282019728792003956564819967::Int256); + +INSERT INTO t VALUES (0::UInt8), (1::UInt8), (254::UInt8), (255::UInt8); +INSERT INTO t VALUES (0::UInt16), (1::UInt16), (65534::UInt16), (65535::UInt16); +INSERT INTO t VALUES (0::UInt32), (1::UInt32), (4294967294::UInt32), (4294967295::UInt32); +INSERT INTO t VALUES (0::UInt64), (1::UInt64), (18446744073709551614::UInt64), (18446744073709551615::UInt64); +INSERT INTO t VALUES (0::UInt128), (1::UInt128), (340282366920938463463374607431768211454::UInt128), (340282366920938463463374607431768211455::UInt128); +INSERT INTO t VALUES (0::UInt256), (1::UInt256), (115792089237316195423570985008687907853269984665640564039457584007913129639934::UInt256), (115792089237316195423570985008687907853269984665640564039457584007913129639935::UInt256); + +-- Floating-point numbers: floats(Float32 and Float64) and Decimal values +INSERT INTO t VALUES (1.17549435e-38::Float32), (3.40282347e+38::Float32), (-3.40282347e+38::Float32), (-1.17549435e-38::Float32), (1.4e-45::Float32), (-1.4e-45::Float32); +INSERT INTO t VALUES (inf::Float32), (-inf::Float32), (nan::Float32); +INSERT INTO t VALUES (inf::FLOAT(12)), (-inf::FLOAT(12)), (nan::FLOAT(12)); +INSERT INTO t VALUES (inf::FLOAT(15,22)), (-inf::FLOAT(15,22)), (nan::FLOAT(15,22)); + +INSERT INTO t VALUES (1.17549435e-38::Float64), (3.40282347e+38::Float64), (-3.40282347e+38::Float64), (-1.17549435e-38::Float64), (1.4e-45::Float64), (-1.4e-45::Float64); +INSERT INTO t VALUES (2.2250738585072014e-308::Float64), (1.7976931348623157e+308::Float64), (-1.7976931348623157e+308::Float64), (-2.2250738585072014e-308::Float64); +INSERT INTO t VALUES (inf::Float64), (-inf::Float64), (nan::Float64); +INSERT INTO t VALUES (inf::DOUBLE(12)), (-inf::DOUBLE(12)), (nan::DOUBLE(12)); +INSERT INTO t VALUES (inf::DOUBLE(15,22)), (-inf::DOUBLE(15,22)), (nan::DOUBLE(15,22)); + +INSERT INTO t VALUES (-99999999.9::Decimal32(1)); +INSERT INTO t VALUES (-999999999.99::Decimal64(2)); +INSERT INTO t VALUES (-999999999.999::Decimal128(3)); +INSERT INTO t VALUES (-999999999.9999::Decimal256(4)); + +-- Strings: String and FixedString +INSERT INTO t VALUES ('string'::String), ('1'::FixedString(1)), ('1'::FixedString(2)), ('1'::FixedString(10)); --(''::String), + +-- Boolean +INSERT INTO t VALUES ('1'::Bool), (0::Bool); + +-- Dates: use Date and Date32 for days, and DateTime and DateTime64 for instances in time +INSERT INTO t VALUES ('2022-01-01'::Date), ('2022-01-01'::Date32), ('2022-01-01 01:01:01'::DateTime), ('2022-01-01 01:01:01.011'::DateTime64); + +-- UUID +INSERT INTO t VALUES ('dededdb6-7835-4ce4-8d11-b5de6f2820e9'::UUID); +INSERT INTO t VALUES ('00000000-0000-0000-0000-000000000000'::UUID); + +-- LowCardinality +INSERT INTO t VALUES ('1'::LowCardinality(String)), ('1'::LowCardinality(String)), (0::LowCardinality(UInt16)); + +-- Arrays +INSERT INTO t VALUES ([]::Array(Dynamic)), ([[]]::Array(Array(Dynamic))), ([[[]]]::Array(Array(Array(Dynamic)))); + +-- Tuple +INSERT INTO t VALUES (()::Tuple(Dynamic)), ((())::Tuple(Tuple(Dynamic))), (((()))::Tuple(Tuple(Tuple(Dynamic)))); + +-- Map. +INSERT INTO t VALUES (map(11::Dynamic, 'v1'::Dynamic, '22'::Dynamic, 1::Dynamic)); + +-- SimpleAggregateFunction +INSERT INTO t VALUES ([1,2]::SimpleAggregateFunction(anyLast, Array(Int16))); + +-- IPs +INSERT INTO t VALUES (toIPv4('192.168.0.1')), (toIPv6('::1')); + +-- Geo +INSERT INTO t VALUES ((1.23, 4.56)::Point), (([(1.23, 4.56)::Point, (2.34, 5.67)::Point])::Ring); +INSERT INTO t VALUES ([[[(0, 0), (10, 0), (10, 10), (0, 10)]], [[(20, 20), (50, 20), (50, 50), (20, 50)],[(30, 30), (50, 50), (50, 30)]]]::MultiPolygon); + +-- Interval +INSERT INTO t VALUES (interval '1' day), (interval '2' month), (interval '3' year); + +-- Nested +INSERT INTO t VALUES ([(1, 'aa'), (2, 'bb')]::Nested(x UInt32, y String)); +INSERT INTO t VALUES ([(1, (2, ['aa', 'bb']), [(3, 'cc'), (4, 'dd')]), (5, (6, ['ee', 'ff']), [(7, 'gg'), (8, 'hh')])]::Nested(x UInt32, y Tuple(y1 UInt32, y2 Array(String)), z Nested(z1 UInt32, z2 String))); + +SELECT dynamicType(d), d FROM t ORDER BY substring(dynamicType(d),1,1), length(dynamicType(d)), d; + +CREATE TABLE t2 (d Dynamic(max_types=254)) ENGINE = Memory; +INSERT INTO t2 SELECT * FROM t; + +SELECT ''; +SELECT dynamicType(d), d FROM t2 ORDER BY substring(dynamicType(d),1,1), length(dynamicType(d)), d; + +SELECT ''; +SELECT uniqExact(dynamicType(d)) t_ FROM t; +SELECT uniqExact(dynamicType(d)) t_ FROM t2; diff --git a/parser/testdata/03160_dynamic_type_agg/ast.json b/parser/testdata/03160_dynamic_type_agg/ast.json new file mode 100644 index 000000000..195cae4d0 --- /dev/null +++ b/parser/testdata/03160_dynamic_type_agg/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001843702, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03160_dynamic_type_agg/metadata.json b/parser/testdata/03160_dynamic_type_agg/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03160_dynamic_type_agg/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03160_dynamic_type_agg/query.sql b/parser/testdata/03160_dynamic_type_agg/query.sql new file mode 100644 index 000000000..f99232031 --- /dev/null +++ b/parser/testdata/03160_dynamic_type_agg/query.sql @@ -0,0 +1,10 @@ +SET allow_experimental_dynamic_type=1; + +CREATE TABLE t (d Dynamic) ENGINE = Memory; + +INSERT INTO t SELECT sumState(number) AS d FROM numbers(100); + +SELECT finalizeAggregation(d.`AggregateFunction(sum, UInt64)`), + sumMerge(d.`AggregateFunction(sum, UInt64)`) +FROM t GROUP BY d.`AggregateFunction(sum, UInt64)`; + diff --git a/parser/testdata/03161_cnf_reduction/ast.json b/parser/testdata/03161_cnf_reduction/ast.json new file mode 100644 index 000000000..7939f1ed8 --- /dev/null +++ b/parser/testdata/03161_cnf_reduction/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery 03161_table (children 1)" + }, + { + "explain": " Identifier 03161_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001063426, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/03161_cnf_reduction/metadata.json b/parser/testdata/03161_cnf_reduction/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03161_cnf_reduction/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03161_cnf_reduction/query.sql b/parser/testdata/03161_cnf_reduction/query.sql new file mode 100644 index 000000000..c232823e9 --- /dev/null +++ b/parser/testdata/03161_cnf_reduction/query.sql @@ -0,0 +1,72 @@ +DROP TABLE IF EXISTS 03161_table; + +CREATE TABLE 03161_table (id UInt32, f UInt8) ENGINE = Memory; + +INSERT INTO 03161_table VALUES (0, 0), (1, 1), (2, 0); + +SELECT '-- Expected plan with analyzer:'; + +EXPLAIN SYNTAX +SELECT id +FROM 03161_table +WHERE f AND (NOT(f) OR f) +SETTINGS convert_query_to_cnf = 1, optimize_using_constraints = 1, enable_analyzer = 1; + +SELECT ''; + +SELECT '-- Expected result with analyzer:'; + +SELECT id +FROM 03161_table +WHERE f AND (NOT(f) OR f) +SETTINGS convert_query_to_cnf = 1, optimize_using_constraints = 1, enable_analyzer = 1; + +SELECT ''; + +SELECT '-- Expected plan w/o analyzer:'; + +EXPLAIN SYNTAX +SELECT id +FROM 03161_table +WHERE f AND (NOT(f) OR f) +SETTINGS convert_query_to_cnf = 1, optimize_using_constraints = 1, enable_analyzer = 0; + +SELECT ''; + +SELECT '-- Expected result w/o analyzer:'; + +SELECT id +FROM 03161_table +WHERE f AND (NOT(f) OR f) +SETTINGS convert_query_to_cnf = 1, optimize_using_constraints = 1, enable_analyzer = 0; + +DROP TABLE IF EXISTS 03161_table; + +-- Checking reproducer from GitHub issue +-- https://github.com/ClickHouse/ClickHouse/issues/57400 + +DROP TABLE IF EXISTS 03161_reproducer; + +CREATE TABLE 03161_reproducer (c0 UInt8, c1 UInt8, c2 UInt8, c3 UInt8, c4 UInt8, c5 UInt8, c6 UInt8, c7 UInt8, c8 UInt8, c9 UInt8) ENGINE = Memory; + +INSERT INTO 03161_reproducer VALUES (0, 0, 0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 1), (0, 0, 0, 0, 0, 0, 0, 0, 1, 0), (0, 0, 0, 0, 0, 0, 0, 0, 1, 1), (0, 0, 0, 0, 0, 0, 0, 1, 0, 0), (0, 0, 0, 0, 0, 0, 0, 1, 0, 1), (0, 0, 0, 0, 0, 0, 0, 1, 1, 0), (0, 0, 0, 0, 0, 0, 0, 1, 1, 1); + +SELECT ''; + +SELECT '-- Reproducer from the issue with analyzer'; + +SELECT count() +FROM 03161_reproducer +WHERE ((NOT c2) AND c2 AND (NOT c1)) OR ((NOT c2) AND c3 AND (NOT c5)) OR ((NOT c7) AND (NOT c8)) OR (c9 AND c6 AND c8 AND (NOT c8) AND (NOT c7)) +SETTINGS convert_query_to_cnf = 1, optimize_using_constraints = 1, enable_analyzer = 1; + +SELECT ''; + +SELECT '-- Reproducer from the issue w/o analyzer'; + +SELECT count() +FROM 03161_reproducer +WHERE ((NOT c2) AND c2 AND (NOT c1)) OR ((NOT c2) AND c3 AND (NOT c5)) OR ((NOT c7) AND (NOT c8)) OR (c9 AND c6 AND c8 AND (NOT c8) AND (NOT c7)) +SETTINGS convert_query_to_cnf = 1, optimize_using_constraints = 1, enable_analyzer = 0; + +DROP TABLE IF EXISTS 03161_reproducer; diff --git a/parser/testdata/03161_create_table_as_mv/ast.json b/parser/testdata/03161_create_table_as_mv/ast.json new file mode 100644 index 000000000..b799566e7 --- /dev/null +++ b/parser/testdata/03161_create_table_as_mv/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery base_table (children 1)" + }, + { + "explain": " Identifier base_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001999897, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/03161_create_table_as_mv/metadata.json b/parser/testdata/03161_create_table_as_mv/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03161_create_table_as_mv/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03161_create_table_as_mv/query.sql b/parser/testdata/03161_create_table_as_mv/query.sql new file mode 100644 index 000000000..ae9526bfd --- /dev/null +++ b/parser/testdata/03161_create_table_as_mv/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS base_table; +DROP TABLE IF EXISTS target_table; +DROP TABLE IF EXISTS mv_from_base_to_target; +DROP TABLE IF EXISTS mv_with_storage; +DROP TABLE IF EXISTS other_table_1; +DROP TABLE IF EXISTS other_table_2; + +CREATE TABLE base_table (date DateTime, id String, cost Float64) ENGINE = MergeTree() ORDER BY date; +CREATE TABLE target_table (id String, total AggregateFunction(sum, Float64)) ENGINE = MergeTree() ORDER BY id; +CREATE MATERIALIZED VIEW mv_from_base_to_target TO target_table AS Select id, sumState(cost) AS total FROM base_table GROUP BY id; +CREATE MATERIALIZED VIEW mv_with_storage ENGINE=MergeTree() ORDER BY id AS Select id, sumState(cost) FROM base_table GROUP BY id; + +CREATE TABLE other_table_1 AS mv_with_storage; +CREATE TABLE other_table_2 AS mv_from_base_to_target; -- { serverError INCORRECT_QUERY } diff --git a/parser/testdata/03161_decimal_binary_math/ast.json b/parser/testdata/03161_decimal_binary_math/ast.json new file mode 100644 index 000000000..8c23d7a3e --- /dev/null +++ b/parser/testdata/03161_decimal_binary_math/ast.json @@ -0,0 +1,79 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function toDecimal32 (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '42.4242'" + }, + { + "explain": " Literal UInt64_4" + }, + { + "explain": " Function toDecimal32 (alias y) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '2.42'" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function round (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function pow (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Identifier y" + }, + { + "explain": " Literal UInt64_6" + } + ], + + "rows": 19, + + "statistics": + { + "elapsed": 0.001401637, + "rows_read": 19, + "bytes_read": 704 + } +} diff --git a/parser/testdata/03161_decimal_binary_math/metadata.json b/parser/testdata/03161_decimal_binary_math/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03161_decimal_binary_math/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03161_decimal_binary_math/query.sql b/parser/testdata/03161_decimal_binary_math/query.sql new file mode 100644 index 000000000..5484cc6a9 --- /dev/null +++ b/parser/testdata/03161_decimal_binary_math/query.sql @@ -0,0 +1,79 @@ +SELECT toDecimal32('42.4242', 4) AS x, toDecimal32('2.42', 2) AS y, round(pow(x, y), 6); +SELECT toDecimal64('42.4242', 4) AS x, toDecimal32('2.42', 2) AS y, round(pow(x, y), 6); +SELECT toDecimal32('42.4242', 4) AS x, toDecimal64('2.42', 2) AS y, round(pow(x, y), 6); +SELECT toDecimal64('42.4242', 4) AS x, toDecimal32('2.42', 2) AS y, round(pow(x, y), 6); +SELECT toDecimal32('42.4242', 4) AS x, materialize(toDecimal32('2.42', 2)) AS y, round(pow(x, y), 6); +SELECT materialize(toDecimal32('42.4242', 4)) AS x, toDecimal32('2.42', 2) AS y, round(pow(x, y), 6); +SELECT materialize(toDecimal32('42.4242', 4)) AS x, materialize(toDecimal32('2.42', 2)) AS y, round(pow(x, y), 6); +SELECT 42.4242 AS x, toDecimal32('2.42', 2) AS y, round(pow(x, y), 6); +SELECT toDecimal32('42.4242', 4) AS x, 2.42 AS y, round(pow(x, y), 6); +SELECT materialize(42.4242) AS x, toDecimal32('2.42', 2) AS y, round(pow(x, y), 6); +SELECT 42.4242 AS x, materialize(toDecimal32('2.42', 2)) AS y, round(pow(x, y), 6); +SELECT materialize(42.4242) AS x, materialize(toDecimal32('2.42', 2)) AS y, round(pow(x, y), 6); +SELECT materialize(toDecimal32('42.4242', 4)) AS x, 2.42 AS y, round(pow(x, y), 6); +SELECT toDecimal32('42.4242', 4) AS x, materialize(2.42) AS y, round(pow(x, y), 6); +SELECT materialize(toDecimal32('42.4242', 4)) AS x, materialize(2.42) AS y, round(pow(x, y), 6); + +SELECT toDecimal32('0.4242', 4) AS x, toDecimal32('0.24', 2) AS y, round(atan2(y, x), 6); +SELECT toDecimal64('0.4242', 4) AS x, toDecimal32('0.24', 2) AS y, round(atan2(y, x), 6); +SELECT toDecimal32('0.4242', 4) AS x, toDecimal64('0.24', 2) AS y, round(atan2(y, x), 6); +SELECT toDecimal64('0.4242', 4) AS x, toDecimal64('0.24', 2) AS y, round(atan2(y, x), 6); +SELECT toDecimal32('0.4242', 4) AS x, materialize(toDecimal32('0.24', 2)) AS y, round(atan2(y, x), 6); +SELECT materialize(toDecimal32('0.4242', 4)) AS x, toDecimal32('0.24', 2) AS y, round(atan2(y, x), 6); +SELECT materialize(toDecimal32('0.4242', 4)) AS x, materialize(toDecimal32('0.24', 2)) AS y, round(atan2(y, x), 6); +SELECT 0.4242 AS x, toDecimal32('0.24', 2) AS y, round(atan2(y, x), 6); +SELECT toDecimal32('0.4242', 4) AS x, 0.24 AS y, round(atan2(y, x), 6); +SELECT materialize(0.4242) AS x, toDecimal32('0.24', 2) AS y, round(atan2(y, x), 6); +SELECT 0.4242 AS x, materialize(toDecimal32('0.24', 2)) AS y, round(atan2(y, x), 6); +SELECT materialize(0.4242) AS x, materialize(toDecimal32('0.24', 2)) AS y, round(atan2(y, x), 6); +SELECT materialize(toDecimal32('0.4242', 4)) AS x, 0.24 AS y, round(atan2(y, x), 6); +SELECT toDecimal32('0.4242', 4) AS x, materialize(0.24) AS y, round(atan2(y, x), 6); +SELECT materialize(toDecimal32('0.4242', 4)) AS x, materialize(0.24) AS y, round(atan2(y, x), 6); + +SELECT toDecimal32('42.4242', 4) AS x, toDecimal32('2.42', 2) AS y, round(min2(x, y), 6); +SELECT toDecimal64('42.4242', 4) AS x, toDecimal32('2.42', 2) AS y, round(min2(x, y), 6); +SELECT toDecimal32('42.4242', 4) AS x, toDecimal64('2.42', 2) AS y, round(min2(x, y), 6); +SELECT toDecimal64('42.4242', 4) AS x, toDecimal64('2.42', 2) AS y, round(min2(x, y), 6); +SELECT toDecimal32('42.4242', 4) AS x, materialize(toDecimal32('2.42', 2)) AS y, round(min2(x, y), 6); +SELECT materialize(toDecimal32('42.4242', 4)) AS x, toDecimal32('2.42', 2) AS y, round(min2(x, y), 6); +SELECT materialize(toDecimal32('42.4242', 4)) AS x, materialize(toDecimal32('2.42', 2)) AS y, round(min2(x, y), 6); +SELECT 42.4242 AS x, toDecimal32('2.42', 2) AS y, round(min2(x, y), 6); +SELECT toDecimal32('42.4242', 4) AS x, 2.42 AS y, round(min2(x, y), 6); +SELECT materialize(42.4242) AS x, toDecimal32('2.42', 2) AS y, round(min2(x, y), 6); +SELECT 42.4242 AS x, materialize(toDecimal32('2.42', 2)) AS y, round(min2(x, y), 6); +SELECT materialize(42.4242) AS x, materialize(toDecimal32('2.42', 2)) AS y, round(min2(x, y), 6); +SELECT materialize(toDecimal32('42.4242', 4)) AS x, 2.42 AS y, round(min2(x, y), 6); +SELECT toDecimal32('42.4242', 4) AS x, materialize(2.42) AS y, round(min2(x, y), 6); +SELECT materialize(toDecimal32('42.4242', 4)) AS x, materialize(2.42) AS y, round(min2(x, y), 6); + +SELECT toDecimal32('42.4242', 4) AS x, toDecimal32('2.42', 2) AS y, round(max2(x, y), 6); +SELECT toDecimal64('42.4242', 4) AS x, toDecimal32('2.42', 2) AS y, round(max2(x, y), 6); +SELECT toDecimal32('42.4242', 4) AS x, toDecimal64('2.42', 2) AS y, round(max2(x, y), 6); +SELECT toDecimal64('42.4242', 4) AS x, toDecimal64('2.42', 2) AS y, round(max2(x, y), 6); +SELECT toDecimal32('42.4242', 4) AS x, materialize(toDecimal32('2.42', 2)) AS y, round(max2(x, y), 6); +SELECT materialize(toDecimal32('42.4242', 4)) AS x, toDecimal32('2.42', 2) AS y, round(max2(x, y), 6); +SELECT materialize(toDecimal32('42.4242', 4)) AS x, materialize(toDecimal32('2.42', 2)) AS y, round(max2(x, y), 6); +SELECT 42.4242 AS x, toDecimal32('2.42', 2) AS y, round(max2(x, y), 6); +SELECT toDecimal32('42.4242', 4) AS x, 2.42 AS y, round(max2(x, y), 6); +SELECT materialize(42.4242) AS x, toDecimal32('2.42', 2) AS y, round(max2(x, y), 6); +SELECT 42.4242 AS x, materialize(toDecimal32('2.42', 2)) AS y, round(max2(x, y), 6); +SELECT materialize(42.4242) AS x, materialize(toDecimal32('2.42', 2)) AS y, round(max2(x, y), 6); +SELECT materialize(toDecimal32('42.4242', 4)) AS x, 2.42 AS y, round(max2(x, y), 6); +SELECT toDecimal32('42.4242', 4) AS x, materialize(2.42) AS y, round(max2(x, y), 6); +SELECT materialize(toDecimal32('42.4242', 4)) AS x, materialize(2.42) AS y, round(max2(x, y), 6); + +SELECT toDecimal32('0.4242', 4) AS x, toDecimal32('0.4242', 4) AS y, round(hypot(x, y), 6); +SELECT toDecimal64('0.4242', 4) AS x, toDecimal32('0.4242', 4) AS y, round(hypot(x, y), 6); +SELECT toDecimal32('0.4242', 4) AS x, toDecimal64('0.4242', 4) AS y, round(hypot(x, y), 6); +SELECT toDecimal64('0.4242', 4) AS x, toDecimal64('0.4242', 4) AS y, round(hypot(x, y), 6); +SELECT toDecimal32('0.4242', 4) AS x, materialize(toDecimal32('0.4242', 4)) AS y, round(hypot(x, y), 6); +SELECT materialize(toDecimal32('0.4242', 4)) AS x, toDecimal32('0.4242', 4) AS y, round(hypot(x, y), 6); +SELECT materialize(toDecimal32('0.4242', 4)) AS x, materialize(toDecimal32('0.4242', 4)) AS y, round(hypot(x, y), 6); +SELECT 0.4242 AS x, toDecimal32('0.4242', 4) AS y, round(hypot(x, y), 6); +SELECT toDecimal32('0.4242', 4) AS x, 0.4242 AS y, round(hypot(x, y), 6); +SELECT materialize(0.4242) AS x, toDecimal32('0.4242', 4) AS y, round(hypot(x, y), 6); +SELECT 0.4242 AS x, materialize(toDecimal32('0.4242', 4)) AS y, round(hypot(x, y), 6); +SELECT materialize(0.4242) AS x, materialize(toDecimal32('0.4242', 4)) AS y, round(hypot(x, y), 6); +SELECT materialize(toDecimal32('0.4242', 4)) AS x, 0.4242 AS y, round(hypot(x, y), 6); +SELECT toDecimal32('0.4242', 4) AS x, materialize(0.4242) AS y, round(hypot(x, y), 6); +SELECT materialize(toDecimal32('0.4242', 4)) AS x, materialize(0.4242) AS y, round(hypot(x, y), 6); diff --git a/parser/testdata/03161_ipv4_ipv6_equality/ast.json b/parser/testdata/03161_ipv4_ipv6_equality/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03161_ipv4_ipv6_equality/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03161_ipv4_ipv6_equality/metadata.json b/parser/testdata/03161_ipv4_ipv6_equality/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03161_ipv4_ipv6_equality/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03161_ipv4_ipv6_equality/query.sql b/parser/testdata/03161_ipv4_ipv6_equality/query.sql new file mode 100644 index 000000000..da2a66097 --- /dev/null +++ b/parser/testdata/03161_ipv4_ipv6_equality/query.sql @@ -0,0 +1,11 @@ +-- Equal +SELECT toIPv4('127.0.0.1') = toIPv6('::ffff:127.0.0.1'); +SELECT toIPv6('::ffff:127.0.0.1') = toIPv4('127.0.0.1'); + +-- Not equal +SELECT toIPv4('127.0.0.1') = toIPv6('::ffff:127.0.0.2'); +SELECT toIPv4('127.0.0.2') = toIPv6('::ffff:127.0.0.1'); +SELECT toIPv6('::ffff:127.0.0.1') = toIPv4('127.0.0.2'); +SELECT toIPv6('::ffff:127.0.0.2') = toIPv4('127.0.0.1'); +SELECT toIPv4('127.0.0.1') = toIPv6('::ffef:127.0.0.1'); +SELECT toIPv6('::ffef:127.0.0.1') = toIPv4('127.0.0.1'); \ No newline at end of file diff --git a/parser/testdata/03161_lightweight_delete_projection/ast.json b/parser/testdata/03161_lightweight_delete_projection/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03161_lightweight_delete_projection/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03161_lightweight_delete_projection/metadata.json b/parser/testdata/03161_lightweight_delete_projection/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03161_lightweight_delete_projection/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03161_lightweight_delete_projection/query.sql b/parser/testdata/03161_lightweight_delete_projection/query.sql new file mode 100644 index 000000000..7db8a21e6 --- /dev/null +++ b/parser/testdata/03161_lightweight_delete_projection/query.sql @@ -0,0 +1,137 @@ +-- For cloud version, should also consider min_bytes_for_full_part_storage since packed storage exists, +-- but for less redundancy, just let CI test the parameter. + +SET lightweight_deletes_sync = 2, alter_sync = 2; + +DROP TABLE IF EXISTS users_compact; + + +SELECT 'compact part'; + +CREATE TABLE users_compact ( + uid Int16, + name String, + age Int16, + projection p1 (select count(), age group by age), + projection p2 (select age, name group by age, name) +) ENGINE = MergeTree order by uid +SETTINGS min_bytes_for_wide_part = 10485760; + +INSERT INTO users_compact VALUES (1231, 'John', 33); + +SELECT 'testing throw default mode'; + +-- { echoOn } + +ALTER TABLE users_compact MODIFY SETTING lightweight_mutation_projection_mode = 'throw'; + +DELETE FROM users_compact WHERE uid = 1231; -- { serverError SUPPORT_IS_DISABLED } + +SELECT 'testing drop mode'; +ALTER TABLE users_compact MODIFY SETTING lightweight_mutation_projection_mode = 'drop'; + +DELETE FROM users_compact WHERE uid = 1231; + +SELECT * FROM users_compact ORDER BY uid; + +-- all_1_1_0_2 +SELECT + name +FROM system.parts +WHERE (database = currentDatabase()) AND (`table` = 'users_compact') AND (active = 1); + +-- expecting no projection +SELECT + name, parent_name +FROM system.projection_parts +WHERE (database = currentDatabase()) AND (`table` = 'users_compact') AND (active = 1); + +SELECT 'testing rebuild mode'; +INSERT INTO users_compact VALUES (6666, 'Ksenia', 48), (8888, 'Alice', 50); + +ALTER TABLE users_compact MODIFY SETTING lightweight_mutation_projection_mode = 'rebuild'; + +DELETE FROM users_compact WHERE uid = 6666; + +SELECT * FROM users_compact ORDER BY uid; + +-- all_1_1_0_4, all_3_3_0_4 +SELECT + name +FROM system.parts +WHERE (database = currentDatabase()) AND (`table` = 'users_compact') AND (active = 1); + +-- expecting projection p1, p2 +SELECT + name, parent_name +FROM system.projection_parts +WHERE (database = currentDatabase()) AND (`table` = 'users_compact') AND (active = 1) AND parent_name like 'all_3_3%'; + +-- { echoOff } + +DROP TABLE users_compact; + + +SELECT 'wide part'; +CREATE TABLE users_wide ( + uid Int16, + name String, + age Int16, + projection p1 (select count(), age group by age), + projection p2 (select age, name group by age, name) +) ENGINE = MergeTree order by uid +SETTINGS min_bytes_for_wide_part = 0; + +INSERT INTO users_wide VALUES (1231, 'John', 33); + +SELECT 'testing throw default mode'; + +-- { echoOn } + +ALTER TABLE users_wide MODIFY SETTING lightweight_mutation_projection_mode = 'throw'; + +DELETE FROM users_wide WHERE uid = 1231; -- { serverError SUPPORT_IS_DISABLED } + +SELECT 'testing drop mode'; +ALTER TABLE users_wide MODIFY SETTING lightweight_mutation_projection_mode = 'drop'; + +DELETE FROM users_wide WHERE uid = 1231; + +SELECT * FROM users_wide ORDER BY uid; + +-- all_1_1_0_2 +SELECT + name +FROM system.parts +WHERE (database = currentDatabase()) AND (`table` = 'users_wide') AND (active = 1); + +-- expecting no projection +SELECT + name, parent_name +FROM system.projection_parts +WHERE (database = currentDatabase()) AND (`table` = 'users_wide') AND (active = 1); + +SELECT 'testing rebuild mode'; +INSERT INTO users_wide VALUES (6666, 'Ksenia', 48), (8888, 'Alice', 50); + +ALTER TABLE users_wide MODIFY SETTING lightweight_mutation_projection_mode = 'rebuild'; + +DELETE FROM users_wide WHERE uid = 6666; + +SELECT * FROM users_wide ORDER BY uid; + +-- all_1_1_0_4, all_3_3_0_4 +SELECT + name +FROM system.parts +WHERE (database = currentDatabase()) AND (`table` = 'users_wide') AND (active = 1); + +-- expecting projection p1, p2 +SELECT + name, parent_name +FROM system.projection_parts +WHERE (database = currentDatabase()) AND (`table` = 'users_wide') AND (active = 1) AND parent_name like 'all_3_3%'; + +-- { echoOff } + +DROP TABLE users_wide; diff --git a/parser/testdata/03162_dynamic_type_nested/ast.json b/parser/testdata/03162_dynamic_type_nested/ast.json new file mode 100644 index 000000000..939483609 --- /dev/null +++ b/parser/testdata/03162_dynamic_type_nested/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001724231, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03162_dynamic_type_nested/metadata.json b/parser/testdata/03162_dynamic_type_nested/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03162_dynamic_type_nested/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03162_dynamic_type_nested/query.sql b/parser/testdata/03162_dynamic_type_nested/query.sql new file mode 100644 index 000000000..59c224919 --- /dev/null +++ b/parser/testdata/03162_dynamic_type_nested/query.sql @@ -0,0 +1,17 @@ +SET allow_experimental_dynamic_type=1; +SET allow_suspicious_types_in_order_by=1; + +CREATE TABLE t (d Dynamic) ENGINE = Memory; + +INSERT INTO t VALUES ([(1, 'aa'), (2, 'bb')]::Nested(x UInt32, y Dynamic)) ; +INSERT INTO t VALUES ([(1, (2, ['aa', 'bb'])), (5, (6, ['ee', 'ff']))]::Nested(x UInt32, y Dynamic)); + +SELECT dynamicType(d), + d, + d.`Nested(x UInt32, y Dynamic)`.x, + d.`Nested(x UInt32, y Dynamic)`.y, + dynamicType(d.`Nested(x UInt32, y Dynamic)`.y[1]), + d.`Nested(x UInt32, y Dynamic)`.y.`String`, + d.`Nested(x UInt32, y Dynamic)`.y.`Tuple(Int64, Array(String))` +FROM t ORDER BY d +FORMAT PrettyCompactMonoBlock; diff --git a/parser/testdata/03163_dynamic_as_supertype/ast.json b/parser/testdata/03163_dynamic_as_supertype/ast.json new file mode 100644 index 000000000..1e217b483 --- /dev/null +++ b/parser/testdata/03163_dynamic_as_supertype/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001252456, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03163_dynamic_as_supertype/metadata.json b/parser/testdata/03163_dynamic_as_supertype/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03163_dynamic_as_supertype/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03163_dynamic_as_supertype/query.sql b/parser/testdata/03163_dynamic_as_supertype/query.sql new file mode 100644 index 000000000..e859fbd18 --- /dev/null +++ b/parser/testdata/03163_dynamic_as_supertype/query.sql @@ -0,0 +1,9 @@ +SET allow_experimental_dynamic_type=1; +SET allow_suspicious_types_in_order_by=1; +SELECT if(number % 2, number::Dynamic(max_types=3), ('str_' || toString(number))::Dynamic(max_types=2)) AS d, toTypeName(d), dynamicType(d) FROM numbers(4); +CREATE TABLE dynamic_test_1 (d Dynamic(max_types=3)) ENGINE = Memory; +INSERT INTO dynamic_test_1 VALUES ('str_1'), (42::UInt64); +CREATE TABLE dynamic_test_2 (d Dynamic(max_types=5)) ENGINE = Memory; +INSERT INTO dynamic_test_2 VALUES ('str_2'), (43::UInt64), ('2020-01-01'::Date), ([1, 2, 3]); +SELECT * FROM (SELECT d, dynamicType(d) FROM dynamic_test_1 UNION ALL SELECT d, dynamicType(d) FROM dynamic_test_2) order by d; + diff --git a/parser/testdata/03164_adapting_parquet_reader_output_size/ast.json b/parser/testdata/03164_adapting_parquet_reader_output_size/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03164_adapting_parquet_reader_output_size/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03164_adapting_parquet_reader_output_size/metadata.json b/parser/testdata/03164_adapting_parquet_reader_output_size/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03164_adapting_parquet_reader_output_size/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03164_adapting_parquet_reader_output_size/query.sql b/parser/testdata/03164_adapting_parquet_reader_output_size/query.sql new file mode 100644 index 000000000..402deba4f --- /dev/null +++ b/parser/testdata/03164_adapting_parquet_reader_output_size/query.sql @@ -0,0 +1,25 @@ +-- Tags: no-fasttest, no-random-settings, no-parallel + +set max_insert_threads=1; +set schema_inference_make_columns_nullable=0; +set engine_file_truncate_on_insert=1; + +-- Average string lengths, approximately: 2, 200, 200, 200 +INSERT INTO FUNCTION file('03164_adapting_parquet_reader_output_size.parquet', Parquet, 'short String, long1 String, long2 String, long_low_cardinality String') SELECT number%100, range(cityHash64(number), cityHash64(number)+10), repeat(cityHash64(number)::String, 6+number%10), repeat((number%10)::String, 200+number%10) FROM numbers(25000); + +-- Default limits are high, everything goes in one block. +SELECT max(blockSize())+sum(ignore(short, long2)) FROM file('03164_adapting_parquet_reader_output_size.parquet'); +-- Small column doesn't take a lot of bytes, everything goes in one block. +SELECT max(blockSize())+sum(ignore(short)) FROM file('03164_adapting_parquet_reader_output_size.parquet') settings input_format_parquet_prefer_block_bytes=100000; +-- Specific number of rows requested. +SELECT max(blockSize())+sum(ignore(short, long2)) FROM file('03164_adapting_parquet_reader_output_size.parquet') settings input_format_parquet_max_block_size=64; +-- Tiny byte limit, reader bumps block size to 128 rows instead of 1 row. +SELECT max(blockSize())+sum(ignore(short, long2)) FROM file('03164_adapting_parquet_reader_output_size.parquet') settings input_format_parquet_prefer_block_bytes=30; + +-- Intermediate byte limit. The two parquet reader implementations estimate row byte sizes slightly +-- differently it slightly differently and don't match exactly, so we round the result. +SELECT roundToExp2(max(blockSize())+sum(ignore(short, long2))) FROM file('03164_adapting_parquet_reader_output_size.parquet') settings input_format_parquet_prefer_block_bytes=700000; +SELECT roundToExp2(max(blockSize())+sum(ignore(short, long1, long2))) FROM file('03164_adapting_parquet_reader_output_size.parquet') settings input_format_parquet_prefer_block_bytes=700000; + +-- Only the new parquet reader uses correct length estimate for dictionary-encoded strings. +SELECT roundToExp2(max(blockSize())+sum(ignore(short, long_low_cardinality))) FROM file('03164_adapting_parquet_reader_output_size.parquet') settings input_format_parquet_prefer_block_bytes=700000, input_format_parquet_use_native_reader_v3=1; diff --git a/parser/testdata/03164_analyzer_global_in_alias/ast.json b/parser/testdata/03164_analyzer_global_in_alias/ast.json new file mode 100644 index 000000000..ad66d34b1 --- /dev/null +++ b/parser/testdata/03164_analyzer_global_in_alias/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001468662, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03164_analyzer_global_in_alias/metadata.json b/parser/testdata/03164_analyzer_global_in_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03164_analyzer_global_in_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03164_analyzer_global_in_alias/query.sql b/parser/testdata/03164_analyzer_global_in_alias/query.sql new file mode 100644 index 000000000..ccfacd12d --- /dev/null +++ b/parser/testdata/03164_analyzer_global_in_alias/query.sql @@ -0,0 +1,6 @@ +SET enable_analyzer=1; +SELECT 1 GLOBAL IN (SELECT 1) AS s, s FROM remote('127.0.0.{2,3}', system.one) GROUP BY 1; +SELECT 1 GLOBAL IN (SELECT 1) AS s FROM remote('127.0.0.{2,3}', system.one) GROUP BY 1; + +SELECT 1 GLOBAL IN (SELECT 1) AS s, s FROM remote('127.0.0.{1,3}', system.one) GROUP BY 1; +SELECT 1 GLOBAL IN (SELECT 1) AS s FROM remote('127.0.0.{1,3}', system.one) GROUP BY 1; diff --git a/parser/testdata/03164_analyzer_rewrite_aggregate_function_with_if/ast.json b/parser/testdata/03164_analyzer_rewrite_aggregate_function_with_if/ast.json new file mode 100644 index 000000000..648d3e200 --- /dev/null +++ b/parser/testdata/03164_analyzer_rewrite_aggregate_function_with_if/ast.json @@ -0,0 +1,103 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function countIf (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function multiIf (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function less (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Function if (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_4" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_5" + } + ], + + "rows": 27, + + "statistics": + { + "elapsed": 0.001185071, + "rows_read": 27, + "bytes_read": 1067 + } +} diff --git a/parser/testdata/03164_analyzer_rewrite_aggregate_function_with_if/metadata.json b/parser/testdata/03164_analyzer_rewrite_aggregate_function_with_if/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03164_analyzer_rewrite_aggregate_function_with_if/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03164_analyzer_rewrite_aggregate_function_with_if/query.sql b/parser/testdata/03164_analyzer_rewrite_aggregate_function_with_if/query.sql new file mode 100644 index 000000000..52f767d8a --- /dev/null +++ b/parser/testdata/03164_analyzer_rewrite_aggregate_function_with_if/query.sql @@ -0,0 +1 @@ +SELECT countIf(multiIf(number < 2, NULL, if(number = 4, 1, 0))) FROM numbers(5); diff --git a/parser/testdata/03164_analyzer_validate_tree_size/ast.json b/parser/testdata/03164_analyzer_validate_tree_size/ast.json new file mode 100644 index 000000000..689d5e80a --- /dev/null +++ b/parser/testdata/03164_analyzer_validate_tree_size/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001367037, + "rows_read": 2, + "bytes_read": 55 + } +} diff --git a/parser/testdata/03164_analyzer_validate_tree_size/metadata.json b/parser/testdata/03164_analyzer_validate_tree_size/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03164_analyzer_validate_tree_size/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03164_analyzer_validate_tree_size/query.sql b/parser/testdata/03164_analyzer_validate_tree_size/query.sql new file mode 100644 index 000000000..0e581592a --- /dev/null +++ b/parser/testdata/03164_analyzer_validate_tree_size/query.sql @@ -0,0 +1,1007 @@ +CREATE TABLE t +( +c1 Int64 , +c2 Int64 , +c3 Int64 , +c4 Int64 , +c5 Int64 , +c6 Int64 , +c7 Int64 , +c8 Int64 , +c9 Int64 , +c10 Int64 , +c11 Int64 , +c12 Int64 , +c13 Int64 , +c14 Int64 , +c15 Int64 , +c16 Int64 , +c17 Int64 , +c18 Int64 , +c19 Int64 , +c20 Int64 , +c21 Int64 , +c22 Int64 , +c23 Int64 , +c24 Int64 , +c25 Int64 , +c26 Int64 , +c27 Int64 , +c28 Int64 , +c29 Int64 , +c30 Int64 , +c31 Int64 , +c32 Int64 , +c33 Int64 , +c34 Int64 , +c35 Int64 , +c36 Int64 , +c37 Int64 , +c38 Int64 , +c39 Int64 , +c40 Int64 , +c41 Int64 , +c42 Int64 , +c43 Int64 , +c44 Int64 , +c45 Int64 , +c46 Int64 , +c47 Int64 , +c48 Int64 , +c49 Int64 , +c50 Int64 , +c51 Int64 , +c52 Int64 , +c53 Int64 , +c54 Int64 , +c55 Int64 , +c56 Int64 , +c57 Int64 , +c58 Int64 , +c59 Int64 , +c60 Int64 , +c61 Int64 , +c62 Int64 , +c63 Int64 , +c64 Int64 , +c65 Int64 , +c66 Int64 , +c67 Int64 , +c68 Int64 , +c69 Int64 , +c70 Int64 , +c71 Int64 , +c72 Int64 , +c73 Int64 , +c74 Int64 , +c75 Int64 , +c76 Int64 , +c77 Int64 , +c78 Int64 , +c79 Int64 , +c80 Int64 , +c81 Int64 , +c82 Int64 , +c83 Int64 , +c84 Int64 , +c85 Int64 , +c86 Int64 , +c87 Int64 , +c88 Int64 , +c89 Int64 , +c90 Int64 , +c91 Int64 , +c92 Int64 , +c93 Int64 , +c94 Int64 , +c95 Int64 , +c96 Int64 , +c97 Int64 , +c98 Int64 , +c99 Int64 , +c100 Int64 , +c101 Int64 , +c102 Int64 , +c103 Int64 , +c104 Int64 , +c105 Int64 , +c106 Int64 , +c107 Int64 , +c108 Int64 , +c109 Int64 , +c110 Int64 , +c111 Int64 , +c112 Int64 , +c113 Int64 , +c114 Int64 , +c115 Int64 , +c116 Int64 , +c117 Int64 , +c118 Int64 , +c119 Int64 , +c120 Int64 , +c121 Int64 , +c122 Int64 , +c123 Int64 , +c124 Int64 , +c125 Int64 , +c126 Int64 , +c127 Int64 , +c128 Int64 , +c129 Int64 , +c130 Int64 , +c131 Int64 , +c132 Int64 , +c133 Int64 , +c134 Int64 , +c135 Int64 , +c136 Int64 , +c137 Int64 , +c138 Int64 , +c139 Int64 , +c140 Int64 , +c141 Int64 , +c142 Int64 , +c143 Int64 , +c144 Int64 , +c145 Int64 , +c146 Int64 , +c147 Int64 , +c148 Int64 , +c149 Int64 , +c150 Int64 , +c151 Int64 , +c152 Int64 , +c153 Int64 , +c154 Int64 , +c155 Int64 , +c156 Int64 , +c157 Int64 , +c158 Int64 , +c159 Int64 , +c160 Int64 , +c161 Int64 , +c162 Int64 , +c163 Int64 , +c164 Int64 , +c165 Int64 , +c166 Int64 , +c167 Int64 , +c168 Int64 , +c169 Int64 , +c170 Int64 , +c171 Int64 , +c172 Int64 , +c173 Int64 , +c174 Int64 , +c175 Int64 , +c176 Int64 , +c177 Int64 , +c178 Int64 , +c179 Int64 , +c180 Int64 , +c181 Int64 , +c182 Int64 , +c183 Int64 , +c184 Int64 , +c185 Int64 , +c186 Int64 , +c187 Int64 , +c188 Int64 , +c189 Int64 , +c190 Int64 , +c191 Int64 , +c192 Int64 , +c193 Int64 , +c194 Int64 , +c195 Int64 , +c196 Int64 , +c197 Int64 , +c198 Int64 , +c199 Int64 , +c200 Int64 , +c201 Int64 , +c202 Int64 , +c203 Int64 , +c204 Int64 , +c205 Int64 , +c206 Int64 , +c207 Int64 , +c208 Int64 , +c209 Int64 , +c210 Int64 , +c211 Int64 , +c212 Int64 , +c213 Int64 , +c214 Int64 , +c215 Int64 , +c216 Int64 , +c217 Int64 , +c218 Int64 , +c219 Int64 , +c220 Int64 , +c221 Int64 , +c222 Int64 , +c223 Int64 , +c224 Int64 , +c225 Int64 , +c226 Int64 , +c227 Int64 , +c228 Int64 , +c229 Int64 , +c230 Int64 , +c231 Int64 , +c232 Int64 , +c233 Int64 , +c234 Int64 , +c235 Int64 , +c236 Int64 , +c237 Int64 , +c238 Int64 , +c239 Int64 , +c240 Int64 , +c241 Int64 , +c242 Int64 , +c243 Int64 , +c244 Int64 , +c245 Int64 , +c246 Int64 , +c247 Int64 , +c248 Int64 , +c249 Int64 , +c250 Int64 , +c251 Int64 , +c252 Int64 , +c253 Int64 , +c254 Int64 , +c255 Int64 , +c256 Int64 , +c257 Int64 , +c258 Int64 , +c259 Int64 , +c260 Int64 , +c261 Int64 , +c262 Int64 , +c263 Int64 , +c264 Int64 , +c265 Int64 , +c266 Int64 , +c267 Int64 , +c268 Int64 , +c269 Int64 , +c270 Int64 , +c271 Int64 , +c272 Int64 , +c273 Int64 , +c274 Int64 , +c275 Int64 , +c276 Int64 , +c277 Int64 , +c278 Int64 , +c279 Int64 , +c280 Int64 , +c281 Int64 , +c282 Int64 , +c283 Int64 , +c284 Int64 , +c285 Int64 , +c286 Int64 , +c287 Int64 , +c288 Int64 , +c289 Int64 , +c290 Int64 , +c291 Int64 , +c292 Int64 , +c293 Int64 , +c294 Int64 , +c295 Int64 , +c296 Int64 , +c297 Int64 , +c298 Int64 , +c299 Int64 , +c300 Int64 , +c301 Int64 , +c302 Int64 , +c303 Int64 , +c304 Int64 , +c305 Int64 , +c306 Int64 , +c307 Int64 , +c308 Int64 , +c309 Int64 , +c310 Int64 , +c311 Int64 , +c312 Int64 , +c313 Int64 , +c314 Int64 , +c315 Int64 , +c316 Int64 , +c317 Int64 , +c318 Int64 , +c319 Int64 , +c320 Int64 , +c321 Int64 , +c322 Int64 , +c323 Int64 , +c324 Int64 , +c325 Int64 , +c326 Int64 , +c327 Int64 , +c328 Int64 , +c329 Int64 , +c330 Int64 , +c331 Int64 , +c332 Int64 , +c333 Int64 , +c334 Int64 , +c335 Int64 , +c336 Int64 , +c337 Int64 , +c338 Int64 , +c339 Int64 , +c340 Int64 , +c341 Int64 , +c342 Int64 , +c343 Int64 , +c344 Int64 , +c345 Int64 , +c346 Int64 , +c347 Int64 , +c348 Int64 , +c349 Int64 , +c350 Int64 , +c351 Int64 , +c352 Int64 , +c353 Int64 , +c354 Int64 , +c355 Int64 , +c356 Int64 , +c357 Int64 , +c358 Int64 , +c359 Int64 , +c360 Int64 , +c361 Int64 , +c362 Int64 , +c363 Int64 , +c364 Int64 , +c365 Int64 , +c366 Int64 , +c367 Int64 , +c368 Int64 , +c369 Int64 , +c370 Int64 , +c371 Int64 , +c372 Int64 , +c373 Int64 , +c374 Int64 , +c375 Int64 , +c376 Int64 , +c377 Int64 , +c378 Int64 , +c379 Int64 , +c380 Int64 , +c381 Int64 , +c382 Int64 , +c383 Int64 , +c384 Int64 , +c385 Int64 , +c386 Int64 , +c387 Int64 , +c388 Int64 , +c389 Int64 , +c390 Int64 , +c391 Int64 , +c392 Int64 , +c393 Int64 , +c394 Int64 , +c395 Int64 , +c396 Int64 , +c397 Int64 , +c398 Int64 , +c399 Int64 , +c400 Int64 , +c401 Int64 , +c402 Int64 , +c403 Int64 , +c404 Int64 , +c405 Int64 , +c406 Int64 , +c407 Int64 , +c408 Int64 , +c409 Int64 , +c410 Int64 , +c411 Int64 , +c412 Int64 , +c413 Int64 , +c414 Int64 , +c415 Int64 , +c416 Int64 , +c417 Int64 , +c418 Int64 , +c419 Int64 , +c420 Int64 , +c421 Int64 , +c422 Int64 , +c423 Int64 , +c424 Int64 , +c425 Int64 , +c426 Int64 , +c427 Int64 , +c428 Int64 , +c429 Int64 , +c430 Int64 , +c431 Int64 , +c432 Int64 , +c433 Int64 , +c434 Int64 , +c435 Int64 , +c436 Int64 , +c437 Int64 , +c438 Int64 , +c439 Int64 , +c440 Int64 , +c441 Int64 , +c442 Int64 , +c443 Int64 , +c444 Int64 , +c445 Int64 , +c446 Int64 , +c447 Int64 , +c448 Int64 , +c449 Int64 , +c450 Int64 , +c451 Int64 , +c452 Int64 , +c453 Int64 , +c454 Int64 , +c455 Int64 , +c456 Int64 , +c457 Int64 , +c458 Int64 , +c459 Int64 , +c460 Int64 , +c461 Int64 , +c462 Int64 , +c463 Int64 , +c464 Int64 , +c465 Int64 , +c466 Int64 , +c467 Int64 , +c468 Int64 , +c469 Int64 , +c470 Int64 , +c471 Int64 , +c472 Int64 , +c473 Int64 , +c474 Int64 , +c475 Int64 , +c476 Int64 , +c477 Int64 , +c478 Int64 , +c479 Int64 , +c480 Int64 , +c481 Int64 , +c482 Int64 , +c483 Int64 , +c484 Int64 , +c485 Int64 , +c486 Int64 , +c487 Int64 , +c488 Int64 , +c489 Int64 , +c490 Int64 , +c491 Int64 , +c492 Int64 , +c493 Int64 , +c494 Int64 , +c495 Int64 , +c496 Int64 , +c497 Int64 , +c498 Int64 , +c499 Int64 , +c500 Int64 , +b1 Int64 , +b2 Int64 , +b3 Int64 , +b4 Int64 , +b5 Int64 , +b6 Int64 , +b7 Int64 , +b8 Int64 , +b9 Int64 , +b10 Int64 , +b11 Int64 , +b12 Int64 , +b13 Int64 , +b14 Int64 , +b15 Int64 , +b16 Int64 , +b17 Int64 , +b18 Int64 , +b19 Int64 , +b20 Int64 , +b21 Int64 , +b22 Int64 , +b23 Int64 , +b24 Int64 , +b25 Int64 , +b26 Int64 , +b27 Int64 , +b28 Int64 , +b29 Int64 , +b30 Int64 , +b31 Int64 , +b32 Int64 , +b33 Int64 , +b34 Int64 , +b35 Int64 , +b36 Int64 , +b37 Int64 , +b38 Int64 , +b39 Int64 , +b40 Int64 , +b41 Int64 , +b42 Int64 , +b43 Int64 , +b44 Int64 , +b45 Int64 , +b46 Int64 , +b47 Int64 , +b48 Int64 , +b49 Int64 , +b50 Int64 , +b51 Int64 , +b52 Int64 , +b53 Int64 , +b54 Int64 , +b55 Int64 , +b56 Int64 , +b57 Int64 , +b58 Int64 , +b59 Int64 , +b60 Int64 , +b61 Int64 , +b62 Int64 , +b63 Int64 , +b64 Int64 , +b65 Int64 , +b66 Int64 , +b67 Int64 , +b68 Int64 , +b69 Int64 , +b70 Int64 , +b71 Int64 , +b72 Int64 , +b73 Int64 , +b74 Int64 , +b75 Int64 , +b76 Int64 , +b77 Int64 , +b78 Int64 , +b79 Int64 , +b80 Int64 , +b81 Int64 , +b82 Int64 , +b83 Int64 , +b84 Int64 , +b85 Int64 , +b86 Int64 , +b87 Int64 , +b88 Int64 , +b89 Int64 , +b90 Int64 , +b91 Int64 , +b92 Int64 , +b93 Int64 , +b94 Int64 , +b95 Int64 , +b96 Int64 , +b97 Int64 , +b98 Int64 , +b99 Int64 , +b100 Int64 , +b101 Int64 , +b102 Int64 , +b103 Int64 , +b104 Int64 , +b105 Int64 , +b106 Int64 , +b107 Int64 , +b108 Int64 , +b109 Int64 , +b110 Int64 , +b111 Int64 , +b112 Int64 , +b113 Int64 , +b114 Int64 , +b115 Int64 , +b116 Int64 , +b117 Int64 , +b118 Int64 , +b119 Int64 , +b120 Int64 , +b121 Int64 , +b122 Int64 , +b123 Int64 , +b124 Int64 , +b125 Int64 , +b126 Int64 , +b127 Int64 , +b128 Int64 , +b129 Int64 , +b130 Int64 , +b131 Int64 , +b132 Int64 , +b133 Int64 , +b134 Int64 , +b135 Int64 , +b136 Int64 , +b137 Int64 , +b138 Int64 , +b139 Int64 , +b140 Int64 , +b141 Int64 , +b142 Int64 , +b143 Int64 , +b144 Int64 , +b145 Int64 , +b146 Int64 , +b147 Int64 , +b148 Int64 , +b149 Int64 , +b150 Int64 , +b151 Int64 , +b152 Int64 , +b153 Int64 , +b154 Int64 , +b155 Int64 , +b156 Int64 , +b157 Int64 , +b158 Int64 , +b159 Int64 , +b160 Int64 , +b161 Int64 , +b162 Int64 , +b163 Int64 , +b164 Int64 , +b165 Int64 , +b166 Int64 , +b167 Int64 , +b168 Int64 , +b169 Int64 , +b170 Int64 , +b171 Int64 , +b172 Int64 , +b173 Int64 , +b174 Int64 , +b175 Int64 , +b176 Int64 , +b177 Int64 , +b178 Int64 , +b179 Int64 , +b180 Int64 , +b181 Int64 , +b182 Int64 , +b183 Int64 , +b184 Int64 , +b185 Int64 , +b186 Int64 , +b187 Int64 , +b188 Int64 , +b189 Int64 , +b190 Int64 , +b191 Int64 , +b192 Int64 , +b193 Int64 , +b194 Int64 , +b195 Int64 , +b196 Int64 , +b197 Int64 , +b198 Int64 , +b199 Int64 , +b200 Int64 , +b201 Int64 , +b202 Int64 , +b203 Int64 , +b204 Int64 , +b205 Int64 , +b206 Int64 , +b207 Int64 , +b208 Int64 , +b209 Int64 , +b210 Int64 , +b211 Int64 , +b212 Int64 , +b213 Int64 , +b214 Int64 , +b215 Int64 , +b216 Int64 , +b217 Int64 , +b218 Int64 , +b219 Int64 , +b220 Int64 , +b221 Int64 , +b222 Int64 , +b223 Int64 , +b224 Int64 , +b225 Int64 , +b226 Int64 , +b227 Int64 , +b228 Int64 , +b229 Int64 , +b230 Int64 , +b231 Int64 , +b232 Int64 , +b233 Int64 , +b234 Int64 , +b235 Int64 , +b236 Int64 , +b237 Int64 , +b238 Int64 , +b239 Int64 , +b240 Int64 , +b241 Int64 , +b242 Int64 , +b243 Int64 , +b244 Int64 , +b245 Int64 , +b246 Int64 , +b247 Int64 , +b248 Int64 , +b249 Int64 , +b250 Int64 , +b251 Int64 , +b252 Int64 , +b253 Int64 , +b254 Int64 , +b255 Int64 , +b256 Int64 , +b257 Int64 , +b258 Int64 , +b259 Int64 , +b260 Int64 , +b261 Int64 , +b262 Int64 , +b263 Int64 , +b264 Int64 , +b265 Int64 , +b266 Int64 , +b267 Int64 , +b268 Int64 , +b269 Int64 , +b270 Int64 , +b271 Int64 , +b272 Int64 , +b273 Int64 , +b274 Int64 , +b275 Int64 , +b276 Int64 , +b277 Int64 , +b278 Int64 , +b279 Int64 , +b280 Int64 , +b281 Int64 , +b282 Int64 , +b283 Int64 , +b284 Int64 , +b285 Int64 , +b286 Int64 , +b287 Int64 , +b288 Int64 , +b289 Int64 , +b290 Int64 , +b291 Int64 , +b292 Int64 , +b293 Int64 , +b294 Int64 , +b295 Int64 , +b296 Int64 , +b297 Int64 , +b298 Int64 , +b299 Int64 , +b300 Int64 , +b301 Int64 , +b302 Int64 , +b303 Int64 , +b304 Int64 , +b305 Int64 , +b306 Int64 , +b307 Int64 , +b308 Int64 , +b309 Int64 , +b310 Int64 , +b311 Int64 , +b312 Int64 , +b313 Int64 , +b314 Int64 , +b315 Int64 , +b316 Int64 , +b317 Int64 , +b318 Int64 , +b319 Int64 , +b320 Int64 , +b321 Int64 , +b322 Int64 , +b323 Int64 , +b324 Int64 , +b325 Int64 , +b326 Int64 , +b327 Int64 , +b328 Int64 , +b329 Int64 , +b330 Int64 , +b331 Int64 , +b332 Int64 , +b333 Int64 , +b334 Int64 , +b335 Int64 , +b336 Int64 , +b337 Int64 , +b338 Int64 , +b339 Int64 , +b340 Int64 , +b341 Int64 , +b342 Int64 , +b343 Int64 , +b344 Int64 , +b345 Int64 , +b346 Int64 , +b347 Int64 , +b348 Int64 , +b349 Int64 , +b350 Int64 , +b351 Int64 , +b352 Int64 , +b353 Int64 , +b354 Int64 , +b355 Int64 , +b356 Int64 , +b357 Int64 , +b358 Int64 , +b359 Int64 , +b360 Int64 , +b361 Int64 , +b362 Int64 , +b363 Int64 , +b364 Int64 , +b365 Int64 , +b366 Int64 , +b367 Int64 , +b368 Int64 , +b369 Int64 , +b370 Int64 , +b371 Int64 , +b372 Int64 , +b373 Int64 , +b374 Int64 , +b375 Int64 , +b376 Int64 , +b377 Int64 , +b378 Int64 , +b379 Int64 , +b380 Int64 , +b381 Int64 , +b382 Int64 , +b383 Int64 , +b384 Int64 , +b385 Int64 , +b386 Int64 , +b387 Int64 , +b388 Int64 , +b389 Int64 , +b390 Int64 , +b391 Int64 , +b392 Int64 , +b393 Int64 , +b394 Int64 , +b395 Int64 , +b396 Int64 , +b397 Int64 , +b398 Int64 , +b399 Int64 , +b400 Int64 , +b401 Int64 , +b402 Int64 , +b403 Int64 , +b404 Int64 , +b405 Int64 , +b406 Int64 , +b407 Int64 , +b408 Int64 , +b409 Int64 , +b410 Int64 , +b411 Int64 , +b412 Int64 , +b413 Int64 , +b414 Int64 , +b415 Int64 , +b416 Int64 , +b417 Int64 , +b418 Int64 , +b419 Int64 , +b420 Int64 , +b421 Int64 , +b422 Int64 , +b423 Int64 , +b424 Int64 , +b425 Int64 , +b426 Int64 , +b427 Int64 , +b428 Int64 , +b429 Int64 , +b430 Int64 , +b431 Int64 , +b432 Int64 , +b433 Int64 , +b434 Int64 , +b435 Int64 , +b436 Int64 , +b437 Int64 , +b438 Int64 , +b439 Int64 , +b440 Int64 , +b441 Int64 , +b442 Int64 , +b443 Int64 , +b444 Int64 , +b445 Int64 , +b446 Int64 , +b447 Int64 , +b448 Int64 , +b449 Int64 , +b450 Int64 , +b451 Int64 , +b452 Int64 , +b453 Int64 , +b454 Int64 , +b455 Int64 , +b456 Int64 , +b457 Int64 , +b458 Int64 , +b459 Int64 , +b460 Int64 , +b461 Int64 , +b462 Int64 , +b463 Int64 , +b464 Int64 , +b465 Int64 , +b466 Int64 , +b467 Int64 , +b468 Int64 , +b469 Int64 , +b470 Int64 , +b471 Int64 , +b472 Int64 , +b473 Int64 , +b474 Int64 , +b475 Int64 , +b476 Int64 , +b477 Int64 , +b478 Int64 , +b479 Int64 , +b480 Int64 , +b481 Int64 , +b482 Int64 , +b483 Int64 , +b484 Int64 , +b485 Int64 , +b486 Int64 , +b487 Int64 , +b488 Int64 , +b489 Int64 , +b490 Int64 , +b491 Int64 , +b492 Int64 , +b493 Int64 , +b494 Int64 , +b495 Int64 , +b496 Int64 , +b497 Int64 , +b498 Int64 , +b499 Int64 , +b500 Int64 +) ENGINE = Memory; + +insert into t(c1) values(1); + +SELECT count() FROM (SELECT tuple(*) FROM t); diff --git a/parser/testdata/03164_create_as_default/ast.json b/parser/testdata/03164_create_as_default/ast.json new file mode 100644 index 000000000..1d7665033 --- /dev/null +++ b/parser/testdata/03164_create_as_default/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery src_table (children 1)" + }, + { + "explain": " Identifier src_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001493704, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/03164_create_as_default/metadata.json b/parser/testdata/03164_create_as_default/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03164_create_as_default/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03164_create_as_default/query.sql b/parser/testdata/03164_create_as_default/query.sql new file mode 100644 index 000000000..e9fd7c1e3 --- /dev/null +++ b/parser/testdata/03164_create_as_default/query.sql @@ -0,0 +1,27 @@ +DROP TABLE IF EXISTS src_table; +DROP TABLE IF EXISTS copied_table; + +CREATE TABLE src_table +( + time DateTime('UTC') DEFAULT fromUnixTimestamp(sipTimestamp), + sipTimestamp UInt64 +) +ENGINE = MergeTree +ORDER BY time; + +INSERT INTO src_table(sipTimestamp) VALUES (toUnixTimestamp(toDateTime('2024-05-20 09:00:00', 'UTC'))); + +CREATE TABLE copied_table AS src_table; + +ALTER TABLE copied_table RENAME COLUMN `sipTimestamp` TO `timestamp`; + +SHOW CREATE TABLE src_table; + +SELECT name, default_expression FROM system.columns WHERE database = currentDatabase() AND table = 'src_table' ORDER BY name; +INSERT INTO src_table(sipTimestamp) VALUES (toUnixTimestamp(toDateTime('2024-05-20 09:00:00', 'UTC'))); + +SELECT * FROM src_table ORDER BY time FORMAT JSONEachRow; +SELECT * FROM copied_table ORDER BY time FORMAT JSONEachRow; + +DROP TABLE src_table; +DROP TABLE copied_table; diff --git a/parser/testdata/03164_early_constant_folding_analyzer/ast.json b/parser/testdata/03164_early_constant_folding_analyzer/ast.json new file mode 100644 index 000000000..231e0f815 --- /dev/null +++ b/parser/testdata/03164_early_constant_folding_analyzer/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery checks (children 1)" + }, + { + "explain": " Identifier checks" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001579299, + "rows_read": 2, + "bytes_read": 65 + } +} diff --git a/parser/testdata/03164_early_constant_folding_analyzer/metadata.json b/parser/testdata/03164_early_constant_folding_analyzer/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03164_early_constant_folding_analyzer/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03164_early_constant_folding_analyzer/query.sql b/parser/testdata/03164_early_constant_folding_analyzer/query.sql new file mode 100644 index 000000000..7fca1c556 --- /dev/null +++ b/parser/testdata/03164_early_constant_folding_analyzer/query.sql @@ -0,0 +1,29 @@ +CREATE TABLE checks +( + `pull_request_number` UInt32, + `commit_sha` LowCardinality(String), + `check_name` LowCardinality(String), + `check_status` LowCardinality(String), + `check_duration_ms` UInt64, + `check_start_time` DateTime, + `test_name` LowCardinality(String), + `test_status` LowCardinality(String), + `test_duration_ms` UInt64, + `report_url` String, + `pull_request_url` String, + `commit_url` String, + `task_url` String, + `base_ref` String, + `base_repo` String, + `head_ref` String, + `head_repo` String, + `test_context_raw` String, + `instance_type` LowCardinality(String), + `instance_id` String, + `date` Date MATERIALIZED toDate(check_start_time) +) +ENGINE = MergeTree ORDER BY (date, pull_request_number, commit_sha, check_name, test_name, check_start_time); + +insert into checks select * from generateRandom() limit 1; + +select trimLeft(explain) from (explain SELECT count(1) FROM checks WHERE test_name IS NOT NULL) where explain like '%ReadFromPreparedSource%' SETTINGS enable_analyzer = 1, enable_parallel_replicas = 0; diff --git a/parser/testdata/03164_linestring_geometry/ast.json b/parser/testdata/03164_linestring_geometry/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03164_linestring_geometry/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03164_linestring_geometry/metadata.json b/parser/testdata/03164_linestring_geometry/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03164_linestring_geometry/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03164_linestring_geometry/query.sql b/parser/testdata/03164_linestring_geometry/query.sql new file mode 100644 index 000000000..e4f1d1295 --- /dev/null +++ b/parser/testdata/03164_linestring_geometry/query.sql @@ -0,0 +1,8 @@ +-- { echoOn } +SELECT readWKTLineString('LINESTRING (1 1, 2 2, 3 3, 1 1)'); +SELECT toTypeName(readWKTLineString('LINESTRING (1 1, 2 2, 3 3, 1 1)')); +SELECT wkt(readWKTLineString('LINESTRING (1 1, 2 2, 3 3, 1 1)')); + +-- Native Array(Tuple(Float64, Float64)) is threated as Ring, not as LineString. +WITH wkt(CAST([(1, 1), (2, 2), (3, 3)], 'Array(Tuple(Float64, Float64))')) as x +SELECT x, toTypeName(x), readWKTRing(x) as y, toTypeName(y); diff --git a/parser/testdata/03164_materialize_skip_index/ast.json b/parser/testdata/03164_materialize_skip_index/ast.json new file mode 100644 index 000000000..5f321b570 --- /dev/null +++ b/parser/testdata/03164_materialize_skip_index/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_skip_index_insert (children 1)" + }, + { + "explain": " Identifier t_skip_index_insert" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001327074, + "rows_read": 2, + "bytes_read": 90 + } +} diff --git a/parser/testdata/03164_materialize_skip_index/metadata.json b/parser/testdata/03164_materialize_skip_index/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03164_materialize_skip_index/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03164_materialize_skip_index/query.sql b/parser/testdata/03164_materialize_skip_index/query.sql new file mode 100644 index 000000000..4dd7d4f3b --- /dev/null +++ b/parser/testdata/03164_materialize_skip_index/query.sql @@ -0,0 +1,50 @@ +DROP TABLE IF EXISTS t_skip_index_insert; + +CREATE TABLE t_skip_index_insert +( + a UInt64, + b UInt64, + INDEX idx_a a TYPE minmax, + INDEX idx_b b TYPE set(3) +) +ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity = 4; + +SET enable_analyzer = 1; +SET materialize_skip_indexes_on_insert = 0; + +SYSTEM STOP MERGES t_skip_index_insert; + +INSERT INTO t_skip_index_insert SELECT number, number / 50 FROM numbers(100); +INSERT INTO t_skip_index_insert SELECT number, number / 50 FROM numbers(100, 100); + +SELECT count() FROM t_skip_index_insert WHERE a >= 110 AND a < 130 AND b = 2; +EXPLAIN indexes = 1 SELECT count() FROM t_skip_index_insert WHERE a >= 110 AND a < 130 AND b = 2; + +SYSTEM START MERGES t_skip_index_insert; +OPTIMIZE TABLE t_skip_index_insert FINAL; + +SELECT count() FROM t_skip_index_insert WHERE a >= 110 AND a < 130 AND b = 2; +EXPLAIN indexes = 1 SELECT count() FROM t_skip_index_insert WHERE a >= 110 AND a < 130 AND b = 2; + +TRUNCATE TABLE t_skip_index_insert; + +INSERT INTO t_skip_index_insert SELECT number, number / 50 FROM numbers(100); +INSERT INTO t_skip_index_insert SELECT number, number / 50 FROM numbers(100, 100); + +SET mutations_sync = 2; + +ALTER TABLE t_skip_index_insert MATERIALIZE INDEX idx_a; +ALTER TABLE t_skip_index_insert MATERIALIZE INDEX idx_b; + +SELECT count() FROM t_skip_index_insert WHERE a >= 110 AND a < 130 AND b = 2; +EXPLAIN indexes = 1 SELECT count() FROM t_skip_index_insert WHERE a >= 110 AND a < 130 AND b = 2; + +DROP TABLE IF EXISTS t_skip_index_insert; + +SYSTEM FLUSH LOGS query_log; + +SELECT count(), sum(ProfileEvents['MergeTreeDataWriterSkipIndicesCalculationMicroseconds']) +FROM system.query_log +WHERE current_database = currentDatabase() + AND query LIKE 'INSERT INTO t_skip_index_insert SELECT%' + AND type = 'QueryFinish'; diff --git a/parser/testdata/03164_materialize_skip_index_on_merge/ast.json b/parser/testdata/03164_materialize_skip_index_on_merge/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03164_materialize_skip_index_on_merge/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03164_materialize_skip_index_on_merge/metadata.json b/parser/testdata/03164_materialize_skip_index_on_merge/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03164_materialize_skip_index_on_merge/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03164_materialize_skip_index_on_merge/query.sql b/parser/testdata/03164_materialize_skip_index_on_merge/query.sql new file mode 100644 index 000000000..0e63e2147 --- /dev/null +++ b/parser/testdata/03164_materialize_skip_index_on_merge/query.sql @@ -0,0 +1,87 @@ +-- Tests merge tree 'setting' materialize_skip_indexes_on_merge + +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab +( + a UInt64, + b UInt64, + INDEX idx_a a TYPE minmax, + INDEX idx_b b TYPE set(3) +) +ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity = 4; + +SELECT 'Regular merge'; + +INSERT INTO tab SELECT number, number / 50 FROM numbers(100); +INSERT INTO tab SELECT number, number / 50 FROM numbers(100, 100); + +OPTIMIZE TABLE tab FINAL; + +SELECT count() FROM tab WHERE a >= 110 AND a < 130 AND b = 2; + +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes = 1 SELECT count() FROM tab WHERE a >= 110 AND a < 130 AND b = 2 +) +WHERE explain LIKE '%Skip%' OR explain LIKE '%Name:%' OR explain LIKE '%Granules:%'; + +SELECT database, table, name, data_compressed_bytes FROM system.data_skipping_indices WHERE database = currentDatabase() AND table = 'tab'; + +SELECT 'Merge with materialize_skip_indexes_on_merge = 1'; + +ALTER TABLE tab MODIFY SETTING materialize_skip_indexes_on_merge = 0; + +TRUNCATE tab; +INSERT INTO tab SELECT number, number / 50 FROM numbers(100); +INSERT INTO tab SELECT number, number / 50 FROM numbers(100, 100); + +OPTIMIZE TABLE tab FINAL; + +SELECT count() FROM tab WHERE a >= 110 AND a < 130 AND b = 2; + +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes = 1 SELECT count() FROM tab WHERE a >= 110 AND a < 130 AND b = 2 +) +WHERE explain LIKE '%Skip%' OR explain LIKE '%Name:%' OR explain LIKE '%Granules:%'; + +SELECT database, table, name, data_compressed_bytes FROM system.data_skipping_indices WHERE database = currentDatabase() AND table = 'tab'; + +SYSTEM FLUSH LOGS query_log; +SELECT count(), sum(ProfileEvents['MergeTreeDataWriterSkipIndicesCalculationMicroseconds']) +FROM system.query_log +WHERE current_database = currentDatabase() + AND query LIKE 'OPTIMIZE TABLE tab FINAL' + AND type = 'QueryFinish'; + +SELECT 'Materialize indexes explicitly'; + +SET mutations_sync = 2; +ALTER TABLE tab MATERIALIZE INDEX idx_a; +ALTER TABLE tab MATERIALIZE INDEX idx_b; + +SELECT count() FROM tab WHERE a >= 110 AND a < 130 AND b = 2; +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes = 1 SELECT count() FROM tab WHERE a >= 110 AND a < 130 AND b = 2 +) +WHERE explain LIKE '%Skip%' OR explain LIKE '%Name:%' OR explain LIKE '%Granules:%'; +SELECT database, table, name, data_compressed_bytes FROM system.data_skipping_indices WHERE database = currentDatabase() AND table = 'tab'; + +SELECT 'Merge after resetting materialize_skip_indexes_on_merge to default'; + +ALTER TABLE tab RESET SETTING materialize_skip_indexes_on_merge; + +INSERT INTO tab SELECT number, number / 50 FROM numbers(100); +INSERT INTO tab SELECT number, number / 50 FROM numbers(100, 100); + +OPTIMIZE TABLE tab FINAL; + +SELECT count() FROM tab WHERE a >= 110 AND a < 130 AND b = 2; +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes = 1 SELECT count() FROM tab WHERE a >= 110 AND a < 130 AND b = 2 +) +WHERE explain LIKE '%Skip%' OR explain LIKE '%Name:%' OR explain LIKE '%Granules:%'; +SELECT database, table, name, data_compressed_bytes FROM system.data_skipping_indices WHERE database = currentDatabase() AND table = 'tab'; + +DROP TABLE tab; diff --git a/parser/testdata/03164_optimize_read_in_order_nullable/ast.json b/parser/testdata/03164_optimize_read_in_order_nullable/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03164_optimize_read_in_order_nullable/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03164_optimize_read_in_order_nullable/metadata.json b/parser/testdata/03164_optimize_read_in_order_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03164_optimize_read_in_order_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03164_optimize_read_in_order_nullable/query.sql b/parser/testdata/03164_optimize_read_in_order_nullable/query.sql new file mode 100644 index 000000000..7af6e55bf --- /dev/null +++ b/parser/testdata/03164_optimize_read_in_order_nullable/query.sql @@ -0,0 +1,55 @@ +-- Reproducer from https://github.com/ClickHouse/ClickHouse/issues/63460 +DROP TABLE IF EXISTS 03164_users; +CREATE TABLE 03164_users (uid Nullable(Int16), name String, age Int16) ENGINE=MergeTree ORDER BY (uid) SETTINGS allow_nullable_key=1; + +INSERT INTO 03164_users VALUES (1, 'John', 33); +INSERT INTO 03164_users VALUES (2, 'Ksenia', 48); +INSERT INTO 03164_users VALUES (NULL, 'Mark', 50); +OPTIMIZE TABLE 03164_users FINAL; + +SELECT '-- Reproducer result:'; + +SELECT * FROM 03164_users ORDER BY uid ASC NULLS FIRST LIMIT 10 SETTINGS optimize_read_in_order = 1; + +DROP TABLE IF EXISTS 03164_users; + +DROP TABLE IF EXISTS 03164_multi_key; +CREATE TABLE 03164_multi_key (c1 Nullable(UInt32), c2 Nullable(UInt32)) ENGINE = MergeTree ORDER BY (c1, c2) SETTINGS allow_nullable_key=1; + +INSERT INTO 03164_multi_key VALUES (0, 0), (1, NULL), (NULL, 2), (NULL, NULL), (4, 4); +-- Just in case +OPTIMIZE TABLE 03164_multi_key FINAL; + +SELECT ''; +SELECT '-- Read in order, no sort required:'; + +SELECT c1, c2 +FROM 03164_multi_key +ORDER BY c1 ASC NULLS LAST, c2 ASC NULLS LAST +SETTINGS optimize_read_in_order = 1; + +SELECT ''; +SELECT '-- Read in order, partial sort for second key:'; + +SELECT c1, c2 +FROM 03164_multi_key +ORDER BY c1 ASC NULLS LAST, c2 ASC NULLS FIRST +SETTINGS optimize_read_in_order = 1; + +SELECT ''; +SELECT '-- No reading in order, sort for first key:'; + +SELECT c1, c2 +FROM 03164_multi_key +ORDER BY c1 ASC NULLS FIRST, c2 ASC NULLS LAST +SETTINGS optimize_read_in_order = 1; + +SELECT ''; +SELECT '-- Reverse order, partial sort for the second key:'; + +SELECT c1, c2 +FROM 03164_multi_key +ORDER BY c1 DESC NULLS FIRST, c2 DESC NULLS LAST +SETTINGS optimize_read_in_order = 1; + +DROP TABLE IF EXISTS 03164_multi_key; diff --git a/parser/testdata/03164_orc_signedness/ast.json b/parser/testdata/03164_orc_signedness/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03164_orc_signedness/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03164_orc_signedness/metadata.json b/parser/testdata/03164_orc_signedness/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03164_orc_signedness/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03164_orc_signedness/query.sql b/parser/testdata/03164_orc_signedness/query.sql new file mode 100644 index 000000000..ae2d0428c --- /dev/null +++ b/parser/testdata/03164_orc_signedness/query.sql @@ -0,0 +1,42 @@ +-- Tags: no-fasttest, no-parallel + +set input_format_orc_filter_push_down = 1; +set engine_file_truncate_on_insert = 1; + +insert into function file('i8.orc') select materialize(-128)::Int8 as x; +insert into function file('u8.orc') select materialize(128)::UInt8 as x; +insert into function file('i16.orc') select materialize(-32768)::Int16 as x; +insert into function file('u16.orc') select materialize(32768)::UInt16 as x; +insert into function file('i32.orc') select materialize(-2147483648)::Int32 as x; +insert into function file('u32.orc') select materialize(2147483648)::UInt32 as x; +insert into function file('i64.orc') select materialize(-9223372036854775808)::Int64 as x; +insert into function file('u64.orc') select materialize(9223372036854775808)::UInt64 as x; + +-- { echoOn } +select x from file('i8.orc') where indexHint(x = -128); +select x from file('i8.orc') where indexHint(x = 128); +select x from file('u8.orc') where indexHint(x = -128); +select x from file('u8.orc') where indexHint(x = 128); + +select x from file('i16.orc') where indexHint(x = -32768); +select x from file('i16.orc') where indexHint(x = 32768); +select x from file('u16.orc') where indexHint(x = -32768); +select x from file('u16.orc') where indexHint(x = 32768); + +select x from file('i32.orc') where indexHint(x = -2147483648); +select x from file('i32.orc') where indexHint(x = 2147483648); +select x from file('u32.orc') where indexHint(x = -2147483648); +select x from file('u32.orc') where indexHint(x = 2147483648); + +select x from file('i64.orc') where indexHint(x = -9223372036854775808); +select x from file('i64.orc') where indexHint(x = 9223372036854775808); +select x from file('u64.orc') where indexHint(x = -9223372036854775808); +select x from file('u64.orc') where indexHint(x = 9223372036854775808); + +select x from file('u8.orc', ORC, 'x UInt8') where indexHint(x > 10); +select x from file('u8.orc', ORC, 'x UInt64') where indexHint(x > 10); +select x from file('u16.orc', ORC, 'x UInt16') where indexHint(x > 10); +select x from file('u16.orc', ORC, 'x UInt64') where indexHint(x > 10); +select x from file('u32.orc', ORC, 'x UInt32') where indexHint(x > 10); +select x from file('u32.orc', ORC, 'x UInt64') where indexHint(x > 10); +select x from file('u64.orc', ORC, 'x UInt64') where indexHint(x > 10); diff --git a/parser/testdata/03164_parallel_replicas_range_filter_min_max/ast.json b/parser/testdata/03164_parallel_replicas_range_filter_min_max/ast.json new file mode 100644 index 000000000..69edb1814 --- /dev/null +++ b/parser/testdata/03164_parallel_replicas_range_filter_min_max/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery range_filter_custom_range_test (children 1)" + }, + { + "explain": " Identifier range_filter_custom_range_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001248543, + "rows_read": 2, + "bytes_read": 112 + } +} diff --git a/parser/testdata/03164_parallel_replicas_range_filter_min_max/metadata.json b/parser/testdata/03164_parallel_replicas_range_filter_min_max/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03164_parallel_replicas_range_filter_min_max/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03164_parallel_replicas_range_filter_min_max/query.sql b/parser/testdata/03164_parallel_replicas_range_filter_min_max/query.sql new file mode 100644 index 000000000..0b9b7d4c5 --- /dev/null +++ b/parser/testdata/03164_parallel_replicas_range_filter_min_max/query.sql @@ -0,0 +1,177 @@ +DROP TABLE IF EXISTS range_filter_custom_range_test; + +CREATE TABLE range_filter_custom_range_test (k UInt64) ENGINE=MergeTree ORDER BY k; + +INSERT INTO range_filter_custom_range_test SELECT number + 5 from numbers(10); + +SELECT count() +FROM +( + SELECT * + FROM cluster(test_cluster_one_shard_three_replicas_localhost, currentDatabase(), range_filter_custom_range_test) + SETTINGS prefer_localhost_replica = 0, + max_parallel_replicas = 3, + distributed_group_by_no_merge = 0, + enable_parallel_replicas = 1, + parallel_replicas_custom_key = 'k', + parallel_replicas_mode = 'custom_key_range', + parallel_replicas_custom_key_range_lower = 5, + parallel_replicas_custom_key_range_upper = 15 +); + +SELECT count() +FROM +( + SELECT * + FROM cluster(test_cluster_one_shard_three_replicas_localhost, currentDatabase(), range_filter_custom_range_test) + SETTINGS prefer_localhost_replica = 0, + max_parallel_replicas = 3, + distributed_group_by_no_merge = 0, + enable_parallel_replicas = 1, + parallel_replicas_custom_key = 'k', + parallel_replicas_mode = 'custom_key_range', + parallel_replicas_custom_key_range_lower = 4, + parallel_replicas_custom_key_range_upper = 14 +); + +SELECT count() +FROM +( + SELECT * + FROM cluster(test_cluster_one_shard_three_replicas_localhost, currentDatabase(), range_filter_custom_range_test) + SETTINGS prefer_localhost_replica = 0, + max_parallel_replicas = 3, + distributed_group_by_no_merge = 0, + enable_parallel_replicas = 1, + parallel_replicas_custom_key = 'k', + parallel_replicas_mode = 'custom_key_range', + parallel_replicas_custom_key_range_lower = 6, + parallel_replicas_custom_key_range_upper = 17 +); + + +SELECT count() +FROM +( + SELECT * + FROM cluster(test_cluster_one_shard_three_replicas_localhost, currentDatabase(), range_filter_custom_range_test) + SETTINGS prefer_localhost_replica = 0, + max_parallel_replicas = 3, + distributed_group_by_no_merge = 0, + enable_parallel_replicas = 1, + parallel_replicas_custom_key = 'k', + parallel_replicas_mode = 'custom_key_range', + parallel_replicas_custom_key_range_lower = 0, + parallel_replicas_custom_key_range_upper = 15 +); + +SELECT count() +FROM +( + SELECT * + FROM cluster(test_cluster_one_shard_three_replicas_localhost, currentDatabase(), range_filter_custom_range_test) + SETTINGS prefer_localhost_replica = 0, + max_parallel_replicas = 3, + distributed_group_by_no_merge = 0, + enable_parallel_replicas = 1, + parallel_replicas_custom_key = 'k', + parallel_replicas_mode = 'custom_key_range', + parallel_replicas_custom_key_range_lower = 15, + parallel_replicas_custom_key_range_upper = 25 +); + +SELECT count() +FROM +( + SELECT * + FROM cluster(test_cluster_one_shard_three_replicas_localhost, currentDatabase(), range_filter_custom_range_test) + SETTINGS prefer_localhost_replica = 0, + max_parallel_replicas = 3, + distributed_group_by_no_merge = 0, + enable_parallel_replicas = 1, + parallel_replicas_custom_key = 'k', + parallel_replicas_mode = 'custom_key_range', + parallel_replicas_custom_key_range_lower = 0, + parallel_replicas_custom_key_range_upper = 5 +); + +SELECT count() +FROM +( + SELECT * + FROM cluster(test_cluster_one_shard_three_replicas_localhost, currentDatabase(), range_filter_custom_range_test) + SETTINGS prefer_localhost_replica = 0, + max_parallel_replicas = 3, + distributed_group_by_no_merge = 0, + enable_parallel_replicas = 1, + parallel_replicas_custom_key = 'k', + parallel_replicas_mode = 'custom_key_range', + parallel_replicas_custom_key_range_lower = 500, + parallel_replicas_custom_key_range_upper = 10000 +); + + +SELECT count() +FROM +( + SELECT * + FROM cluster(test_cluster_one_shard_three_replicas_localhost, currentDatabase(), range_filter_custom_range_test) + SETTINGS prefer_localhost_replica = 0, + max_parallel_replicas = 3, + distributed_group_by_no_merge = 0, + enable_parallel_replicas = 1, + parallel_replicas_custom_key = 'k', + parallel_replicas_mode = 'custom_key_range', + parallel_replicas_custom_key_range_lower = 10, + parallel_replicas_custom_key_range_upper = 13 +); + +DROP TABLE range_filter_custom_range_test; + +DROP TABLE IF EXISTS range_filter_custom_range_test_2; + +CREATE TABLE range_filter_custom_range_test_2 (k UInt64) ENGINE=MergeTree ORDER BY k; + +INSERT INTO range_filter_custom_range_test_2 SELECT number from numbers(13); + +SET send_logs_level = 'error'; -- failed connection tries are ok, `parallel_replicas` cluster contains 1 unavailable node + +SELECT count() +FROM +( + SELECT * + FROM cluster(parallel_replicas, currentDatabase(), range_filter_custom_range_test_2) + SETTINGS prefer_localhost_replica = 0, + max_parallel_replicas = 12, + distributed_group_by_no_merge = 0, + enable_parallel_replicas = 1, + parallel_replicas_custom_key = 'k', + parallel_replicas_mode = 'custom_key_range', + parallel_replicas_custom_key_range_lower = 0, + parallel_replicas_custom_key_range_upper = 13 +); + +DROP TABLE range_filter_custom_range_test_2; + +DROP TABLE IF EXISTS range_filter_custom_range_test_3; + +CREATE TABLE range_filter_custom_range_test_3 (k UInt64) ENGINE=MergeTree ORDER BY k; + +INSERT INTO range_filter_custom_range_test_3 SELECT number from numbers(4); + +SELECT count() +FROM +( + SELECT * + FROM cluster(test_cluster_one_shard_three_replicas_localhost, currentDatabase(), range_filter_custom_range_test_3) + SETTINGS prefer_localhost_replica = 0, + max_parallel_replicas = 12, + distributed_group_by_no_merge = 0, + enable_parallel_replicas = 1, + parallel_replicas_custom_key = 'k', + parallel_replicas_mode = 'custom_key_range', + parallel_replicas_custom_key_range_lower = 0, + parallel_replicas_custom_key_range_upper = 4 +); + +DROP TABLE range_filter_custom_range_test_3; diff --git a/parser/testdata/03164_s3_settings_for_queries_and_merges/ast.json b/parser/testdata/03164_s3_settings_for_queries_and_merges/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03164_s3_settings_for_queries_and_merges/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03164_s3_settings_for_queries_and_merges/metadata.json b/parser/testdata/03164_s3_settings_for_queries_and_merges/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03164_s3_settings_for_queries_and_merges/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03164_s3_settings_for_queries_and_merges/query.sql b/parser/testdata/03164_s3_settings_for_queries_and_merges/query.sql new file mode 100644 index 000000000..3a1c79961 --- /dev/null +++ b/parser/testdata/03164_s3_settings_for_queries_and_merges/query.sql @@ -0,0 +1,41 @@ +-- Tags: no-random-settings, no-fasttest, no-parallel + +SET allow_prefetched_read_pool_for_remote_filesystem=0; +SET allow_prefetched_read_pool_for_local_filesystem=0; +SET max_threads = 1; +SET remote_read_min_bytes_for_seek = 100000; +-- Will affect INSERT, but not merge +SET s3_check_objects_after_upload=1; + +DROP TABLE IF EXISTS t_compact_bytes_s3; +CREATE TABLE t_compact_bytes_s3(c1 UInt32, c2 UInt32, c3 UInt32, c4 UInt32, c5 UInt32) +ENGINE = MergeTree ORDER BY c1 +SETTINGS index_granularity = 512, min_bytes_for_wide_part = '10G', storage_policy = 's3_no_cache', write_marks_for_substreams_in_compact_parts=1; + +INSERT INTO t_compact_bytes_s3 SELECT number, number, number, number, number FROM numbers(512 * 32 * 40); + +SYSTEM DROP MARK CACHE; +OPTIMIZE TABLE t_compact_bytes_s3 FINAL; + +SYSTEM DROP MARK CACHE; +SELECT count() FROM t_compact_bytes_s3 WHERE NOT ignore(c2, c4); +SYSTEM FLUSH LOGS query_log; + +-- Errors in S3 requests will be automatically retried, however ProfileEvents can be wrong. That is why we subtract errors. +SELECT + ProfileEvents['S3ReadRequestsCount'] - ProfileEvents['S3ReadRequestsErrors'], + ProfileEvents['ReadBufferFromS3Bytes'] < ProfileEvents['ReadCompressedBytes'] * 1.1 +FROM system.query_log +WHERE event_date >= yesterday() AND type = 'QueryFinish' + AND current_database = currentDatabase() + AND query ilike '%INSERT INTO t_compact_bytes_s3 SELECT number, number, number%'; + +SELECT + ProfileEvents['S3ReadRequestsCount'] - ProfileEvents['S3ReadRequestsErrors'], + ProfileEvents['ReadBufferFromS3Bytes'] < ProfileEvents['ReadCompressedBytes'] * 1.1 +FROM system.query_log +WHERE event_date >= yesterday() AND type = 'QueryFinish' + AND current_database = currentDatabase() + AND query ilike '%OPTIMIZE TABLE t_compact_bytes_s3 FINAL%'; + +DROP TABLE IF EXISTS t_compact_bytes_s3; diff --git a/parser/testdata/03165_distinct_with_window_func_crash/ast.json b/parser/testdata/03165_distinct_with_window_func_crash/ast.json new file mode 100644 index 000000000..bcbb49cf8 --- /dev/null +++ b/parser/testdata/03165_distinct_with_window_func_crash/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery atable (children 1)" + }, + { + "explain": " Identifier atable" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001244452, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/03165_distinct_with_window_func_crash/metadata.json b/parser/testdata/03165_distinct_with_window_func_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03165_distinct_with_window_func_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03165_distinct_with_window_func_crash/query.sql b/parser/testdata/03165_distinct_with_window_func_crash/query.sql new file mode 100644 index 000000000..e2e87fde3 --- /dev/null +++ b/parser/testdata/03165_distinct_with_window_func_crash/query.sql @@ -0,0 +1,31 @@ +DROP TABLE IF EXISTS atable; + +CREATE TABLE atable +( + cdu_date Int16, + loanx_id String, + rating_sp String +) +ENGINE = MergeTree +ORDER BY tuple(); + +-- disable parallelization after window function otherwise +-- generated pipeline contains enormous number of transformers (should be fixed separately) +SET query_plan_enable_multithreading_after_window_functions=0; +-- max_threads is randomized, and can significantly increase number of parallel transformers after window func, so set to small value explicitly +SET max_threads=3; + +SELECT DISTINCT + loanx_id, + rating_sp, + cdu_date, + row_number() OVER (PARTITION BY cdu_date) AS row_number, + last_value(cdu_date) OVER (PARTITION BY loanx_id ORDER BY cdu_date ASC) AS last_cdu_date +FROM atable +GROUP BY + cdu_date, + loanx_id, + rating_sp +SETTINGS query_plan_remove_redundant_distinct = 1; + +DROP TABLE atable; diff --git a/parser/testdata/03165_order_by_duplicate/ast.json b/parser/testdata/03165_order_by_duplicate/ast.json new file mode 100644 index 000000000..68bb07624 --- /dev/null +++ b/parser/testdata/03165_order_by_duplicate/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00128585, + "rows_read": 2, + "bytes_read": 61 + } +} diff --git a/parser/testdata/03165_order_by_duplicate/metadata.json b/parser/testdata/03165_order_by_duplicate/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03165_order_by_duplicate/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03165_order_by_duplicate/query.sql b/parser/testdata/03165_order_by_duplicate/query.sql new file mode 100644 index 000000000..b8bcc10e1 --- /dev/null +++ b/parser/testdata/03165_order_by_duplicate/query.sql @@ -0,0 +1,16 @@ +CREATE TABLE test +ENGINE = ReplacingMergeTree +PRIMARY KEY id +AS SELECT number AS id FROM numbers(100); + +EXPLAIN QUERY TREE SELECT id +FROM test FINAL +WHERE id IN ( + SELECT DISTINCT id + FROM test FINAL + ORDER BY id ASC + LIMIT 4 +) +ORDER BY id ASC +LIMIT 1 BY id +SETTINGS enable_analyzer = 1; diff --git a/parser/testdata/03165_parseReadableSize/ast.json b/parser/testdata/03165_parseReadableSize/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03165_parseReadableSize/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03165_parseReadableSize/metadata.json b/parser/testdata/03165_parseReadableSize/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03165_parseReadableSize/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03165_parseReadableSize/query.sql b/parser/testdata/03165_parseReadableSize/query.sql new file mode 100644 index 000000000..33386268a --- /dev/null +++ b/parser/testdata/03165_parseReadableSize/query.sql @@ -0,0 +1,121 @@ +-- Should be the inverse of formatReadableSize +SELECT formatReadableSize(parseReadableSize('1 B')); +SELECT formatReadableSize(parseReadableSize('1 KiB')); +SELECT formatReadableSize(parseReadableSize('1 MiB')); +SELECT formatReadableSize(parseReadableSize('1 GiB')); +SELECT formatReadableSize(parseReadableSize('1 TiB')); +SELECT formatReadableSize(parseReadableSize('1 PiB')); +SELECT formatReadableSize(parseReadableSize('1 EiB')); + +-- Should be the inverse of formatReadableDecimalSize +SELECT formatReadableDecimalSize(parseReadableSize('1 B')); +SELECT formatReadableDecimalSize(parseReadableSize('1 KB')); +SELECT formatReadableDecimalSize(parseReadableSize('1 MB')); +SELECT formatReadableDecimalSize(parseReadableSize('1 GB')); +SELECT formatReadableDecimalSize(parseReadableSize('1 TB')); +SELECT formatReadableDecimalSize(parseReadableSize('1 PB')); +SELECT formatReadableDecimalSize(parseReadableSize('1 EB')); + +-- Is case-insensitive +SELECT formatReadableSize(parseReadableSize('1 mIb')); + +-- Should be able to parse decimals +SELECT parseReadableSize('1.00 KiB'); -- 1024 +SELECT parseReadableSize('3.00 KiB'); -- 3072 + +-- Infix whitespace is ignored +SELECT parseReadableSize('1 KiB'); +SELECT parseReadableSize('1KiB'); + +-- Can parse LowCardinality +SELECT parseReadableSize(toLowCardinality('1 KiB')); + +-- Can parse nullable fields +SELECT parseReadableSize(toNullable('1 KiB')); + +-- Can parse non-const columns fields +SELECT parseReadableSize(materialize('1 KiB')); + +-- Output is NULL if NULL arg is passed +SELECT parseReadableSize(NULL); + +-- Can parse more decimal places than Float64's precision +SELECT parseReadableSize('3.14159265358979323846264338327950288419716939937510 KiB'); + +-- Can parse sizes prefixed with a plus sign +SELECT parseReadableSize('+3.1415 KiB'); + +-- Can parse amounts in scientific notation +SELECT parseReadableSize('10e2 B'); + +-- Can parse floats with no decimal points +SELECT parseReadableSize('5. B'); + +-- Can parse numbers with leading zeroes +SELECT parseReadableSize('002 KiB'); + +-- Can parse octal-like +SELECT parseReadableSize('08 KiB'); + +-- Can parse various flavours of zero +SELECT parseReadableSize('0 KiB'), parseReadableSize('+0 KiB'), parseReadableSize('-0 KiB'); + +-- ERRORS +-- No arguments +SELECT parseReadableSize(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +-- Too many arguments +SELECT parseReadableSize('1 B', '2 B'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +-- Wrong Type +SELECT parseReadableSize(12); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +-- Invalid input - overall garbage +SELECT parseReadableSize('oh no'); -- { serverError CANNOT_PARSE_NUMBER } +-- Invalid input - unknown unit +SELECT parseReadableSize('12.3 rb'); -- { serverError CANNOT_PARSE_TEXT } +-- Invalid input - Leading whitespace +SELECT parseReadableSize(' 1 B'); -- { serverError CANNOT_PARSE_INPUT_ASSERTION_FAILED } +-- Invalid input - Trailing characters +SELECT parseReadableSize('1 B leftovers'); -- { serverError UNEXPECTED_DATA_AFTER_PARSED_VALUE } +-- Invalid input - Negative sizes are not allowed +SELECT parseReadableSize('-1 KiB'); -- { serverError BAD_ARGUMENTS } +-- Invalid input - Input too large to fit in UInt64 +SELECT parseReadableSize('1000 EiB'); -- { serverError BAD_ARGUMENTS } +-- Invalid input - Hexadecimal is not supported +SELECT parseReadableSize('0xa123 KiB'); -- { serverError CANNOT_PARSE_TEXT } +-- Invalid input - NaN is not supported, with or without sign and with different capitalizations +SELECT parseReadableSize('nan KiB'); -- { serverError BAD_ARGUMENTS } +SELECT parseReadableSize('+nan KiB'); -- { serverError BAD_ARGUMENTS } +SELECT parseReadableSize('-nan KiB'); -- { serverError BAD_ARGUMENTS } +SELECT parseReadableSize('NaN KiB'); -- { serverError BAD_ARGUMENTS } +-- Invalid input - Infinite is not supported, with or without sign, in all its forms +SELECT parseReadableSize('inf KiB'); -- { serverError BAD_ARGUMENTS } +SELECT parseReadableSize('+inf KiB'); -- { serverError BAD_ARGUMENTS } +SELECT parseReadableSize('-inf KiB'); -- { serverError BAD_ARGUMENTS } +SELECT parseReadableSize('infinite KiB'); -- { serverError BAD_ARGUMENTS } +SELECT parseReadableSize('+infinite KiB'); -- { serverError BAD_ARGUMENTS } +SELECT parseReadableSize('-infinite KiB'); -- { serverError BAD_ARGUMENTS } +SELECT parseReadableSize('Inf KiB'); -- { serverError BAD_ARGUMENTS } +SELECT parseReadableSize('Infinite KiB'); -- { serverError BAD_ARGUMENTS } + + +-- OR NULL +-- Works as the regular version when inputs are correct +SELECT + arrayJoin(['1 B', '1 KiB', '1 MiB', '1 GiB', '1 TiB', '1 PiB', '1 EiB']) AS readable_sizes, + parseReadableSizeOrNull(readable_sizes) AS filesize; + +-- Returns NULL on invalid values +SELECT + arrayJoin(['invalid', '1 Joe', '1KB', ' 1 GiB', '1 TiB with fries', 'NaN KiB', 'Inf KiB', '0xa123 KiB']) AS readable_sizes, + parseReadableSizeOrNull(readable_sizes) AS filesize; + + +-- OR ZERO +-- Works as the regular version when inputs are correct +SELECT + arrayJoin(['1 B', '1 KiB', '1 MiB', '1 GiB', '1 TiB', '1 PiB', '1 EiB']) AS readable_sizes, + parseReadableSizeOrZero(readable_sizes) AS filesize; + +-- Returns NULL on invalid values +SELECT + arrayJoin(['invalid', '1 Joe', '1KB', ' 1 GiB', '1 TiB with fries', 'NaN KiB', 'Inf KiB', '0xa123 KiB']) AS readable_sizes, + parseReadableSizeOrZero(readable_sizes) AS filesize; \ No newline at end of file diff --git a/parser/testdata/03165_round_scale_as_column/ast.json b/parser/testdata/03165_round_scale_as_column/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03165_round_scale_as_column/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03165_round_scale_as_column/metadata.json b/parser/testdata/03165_round_scale_as_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03165_round_scale_as_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03165_round_scale_as_column/query.sql b/parser/testdata/03165_round_scale_as_column/query.sql new file mode 100644 index 000000000..adae36564 --- /dev/null +++ b/parser/testdata/03165_round_scale_as_column/query.sql @@ -0,0 +1,124 @@ +-- Tests functions round(), roundBankers(), floor(), ceil() and trunc() with default 'scale' argument +SELECT toUInt8(number) AS x, round(x), roundBankers(x), floor(x), ceil(x), trunc(x) FROM system.numbers LIMIT 20; +SELECT toUInt16(number) AS x, round(x), roundBankers(x), floor(x), ceil(x), trunc(x) FROM system.numbers LIMIT 20; +SELECT toUInt32(number) AS x, round(x), roundBankers(x), floor(x), ceil(x), trunc(x) FROM system.numbers LIMIT 20; +SELECT toUInt64(number) AS x, round(x), roundBankers(x), floor(x), ceil(x), trunc(x) FROM system.numbers LIMIT 20; +SELECT toInt8(number - 10) AS x, round(x), roundBankers(x), floor(x), ceil(x), trunc(x) FROM system.numbers LIMIT 20; +SELECT toInt16(number - 10) AS x, round(x), roundBankers(x), floor(x), ceil(x), trunc(x) FROM system.numbers LIMIT 20; +SELECT toInt32(number - 10) AS x, round(x), roundBankers(x), floor(x), ceil(x), trunc(x) FROM system.numbers LIMIT 20; +SELECT toInt64(number - 10) AS x, round(x), roundBankers(x), floor(x), ceil(x), trunc(x) FROM system.numbers LIMIT 20; + +SELECT toFloat32(number - 10) AS x, round(x), roundBankers(x), floor(x), ceil(x), trunc(x) FROM system.numbers LIMIT 20; +SELECT toFloat64(number - 10) AS x, round(x), roundBankers(x), floor(x), ceil(x), trunc(x) FROM system.numbers LIMIT 20; +SELECT toFloat32((number - 10) / 10) AS x, round(x), roundBankers(x), floor(x), ceil(x), trunc(x) FROM system.numbers LIMIT 20; +SELECT toFloat64((number - 10) / 10) AS x, round(x), roundBankers(x), floor(x), ceil(x), trunc(x) FROM system.numbers LIMIT 20; + +-- Functions round(), roundBankers(), floor(), ceil() and trunc() accept non-const 'scale' arguments +SELECT toFloat32((number - 10) / 10) AS x, round(x, materialize(1)), roundBankers(x, materialize(1)), floor(x, materialize(1)), ceil(x, materialize(1)), trunc(x, materialize(1)) FROM system.numbers LIMIT 20; +SELECT toFloat64((number - 10) / 10) AS x, round(x, materialize(1)), roundBankers(x, materialize(1)), floor(x, materialize(1)), ceil(x, materialize(1)), trunc(x, materialize(1)) FROM system.numbers LIMIT 20; +SELECT toUInt8(number) AS x, round(x, materialize(-1)), roundBankers(x, materialize(-1)), floor(x, materialize(-1)), ceil(x, materialize(-1)), trunc(x, materialize(-1)) FROM system.numbers LIMIT 20; +SELECT toUInt16(number) AS x, round(x, materialize(-1)), roundBankers(x, materialize(-1)), floor(x, materialize(-1)), ceil(x, materialize(-1)), trunc(x, materialize(-1)) FROM system.numbers LIMIT 20; +SELECT toUInt32(number) AS x, round(x, materialize(-1)), roundBankers(x, materialize(-1)), floor(x, materialize(-1)), ceil(x, materialize(-1)), trunc(x, materialize(-1)) FROM system.numbers LIMIT 20; +SELECT toUInt64(number) AS x, round(x, materialize(-1)), roundBankers(x, materialize(-1)), floor(x, materialize(-1)), ceil(x, materialize(-1)), trunc(x, materialize(-1)) FROM system.numbers LIMIT 20; + +SELECT toInt8(number - 10) AS x, round(x, materialize(-1)), roundBankers(x, materialize(-1)), floor(x, materialize(-1)), ceil(x, materialize(-1)), trunc(x, materialize(-1)) FROM system.numbers LIMIT 20; +SELECT toInt16(number - 10) AS x, round(x, materialize(-1)), roundBankers(x, materialize(-1)), floor(x, materialize(-1)), ceil(x, materialize(-1)), trunc(x, materialize(-1)) FROM system.numbers LIMIT 20; +SELECT toInt32(number - 10) AS x, round(x, materialize(-1)), roundBankers(x, materialize(-1)), floor(x, materialize(-1)), ceil(x, materialize(-1)), trunc(x, materialize(-1)) FROM system.numbers LIMIT 20; +SELECT toInt64(number - 10) AS x, round(x, materialize(-1)), roundBankers(x, materialize(-1)), floor(x, materialize(-1)), ceil(x, materialize(-1)), trunc(x, materialize(-1)) FROM system.numbers LIMIT 20; +SELECT toFloat32(number - 10) AS x, round(x, materialize(-1)), roundBankers(x, materialize(-1)), floor(x, materialize(-1)), ceil(x, materialize(-1)), trunc(x, materialize(-1)) FROM system.numbers LIMIT 20; +SELECT toFloat64(number - 10) AS x, round(x, materialize(-1)), roundBankers(x, materialize(-1)), floor(x, materialize(-1)), ceil(x, materialize(-1)), trunc(x, materialize(-1)) FROM system.numbers LIMIT 20; + +SELECT toUInt8(number) AS x, round(x, materialize(-2)), roundBankers(x, materialize(-2)), floor(x, materialize(-2)), ceil(x, materialize(-2)), trunc(x, materialize(-2)) FROM system.numbers LIMIT 20; +SELECT toUInt16(number) AS x, round(x, materialize(-2)), roundBankers(x, materialize(-2)), floor(x, materialize(-2)), ceil(x, materialize(-2)), trunc(x, materialize(-2)) FROM system.numbers LIMIT 20; +SELECT toUInt32(number) AS x, round(x, materialize(-2)), roundBankers(x, materialize(-2)), floor(x, materialize(-2)), ceil(x, materialize(-2)), trunc(x, materialize(-2)) FROM system.numbers LIMIT 20; +SELECT toUInt64(number) AS x, round(x, materialize(-2)), roundBankers(x, materialize(-2)), floor(x, materialize(-2)), ceil(x, materialize(-2)), trunc(x, materialize(-2)) FROM system.numbers LIMIT 20; +SELECT toInt8(number - 10) AS x, round(x, materialize(-2)), roundBankers(x, materialize(-2)), floor(x, materialize(-2)), ceil(x, materialize(-2)), trunc(x, materialize(-2)) FROM system.numbers LIMIT 20; +SELECT toInt16(number - 10) AS x, round(x, materialize(-2)), roundBankers(x, materialize(-2)), floor(x, materialize(-2)), ceil(x, materialize(-2)), trunc(x, materialize(-2)) FROM system.numbers LIMIT 20; +SELECT toInt32(number - 10) AS x, round(x, materialize(-2)), roundBankers(x, materialize(-2)), floor(x, materialize(-2)), ceil(x, materialize(-2)), trunc(x, materialize(-2)) FROM system.numbers LIMIT 20; +SELECT toInt64(number - 10) AS x, round(x, materialize(-2)), roundBankers(x, materialize(-2)), floor(x, materialize(-2)), ceil(x, materialize(-2)), trunc(x, materialize(-2)) FROM system.numbers LIMIT 20; +SELECT toFloat32(number - 10) AS x, round(x, materialize(-2)), roundBankers(x, materialize(-2)), floor(x, materialize(-2)), ceil(x, materialize(-2)), trunc(x, materialize(-2)) FROM system.numbers LIMIT 20; +SELECT toFloat64(number - 10) AS x, round(x, materialize(-2)), roundBankers(x, materialize(-2)), floor(x, materialize(-2)), ceil(x, materialize(-2)), trunc(x, materialize(-2)) FROM system.numbers LIMIT 20; + +SELECT toString('CHECKPOINT1'); + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab ( + id Int32, + scale Int16, + u8 UInt8, u16 UInt16, u32 UInt32, u64 UInt64, + i8 Int8, i16 Int16, i32 Int32, i64 Int64, + f32 Float32, f64 Float64 +) ENGINE = Memory; + +INSERT INTO tab SELECT number , 0, number, number, number, number, number, number, number, number, number, number, FROM system.numbers LIMIT 20; +INSERT INTO tab SELECT number+20 , 0, number+10, number+10, number+10, number+10, number-10, number-10, number-10, number-10, (toFloat32(number)-10)/10, (toFloat64(number)-10)/10, FROM system.numbers LIMIT 20; +INSERT INTO tab SELECT number+40 , -1, number, number, number, number, number, number, number, number, number, number, FROM system.numbers LIMIT 20; +INSERT INTO tab SELECT number+60 , -1, number+10, number+10, number+10, number+10, number-10, number-10, number-10, number-10, (toFloat32(number)-10)/10, (toFloat64(number)-10)/10, FROM system.numbers LIMIT 20; +INSERT INTO tab SELECT number+80 , -2, number, number, number, number, number, number, number, number, number, number, FROM system.numbers LIMIT 20; +INSERT INTO tab SELECT number+100, -2, number+10, number+10, number+10, number+10, number-10, number-10, number-10, number-10, (toFloat32(number)-10)/10, (toFloat64(number)-10)/10, FROM system.numbers LIMIT 20; + +INSERT INTO tab SELECT number+200, -number, 0, 0, 0, 0, 0, 0, 0, 0, 12345.6789, 12345.6789, FROM system.numbers LIMIT 10; +INSERT INTO tab SELECT number+210, -number, 0, 0, 0, 0, 0, 0, 0, 0, 12345.6789, 12345.6789, FROM system.numbers LIMIT 10; + +INSERT INTO tab VALUES (300, 4, 2, 2, 2, 2, 2, 2, 2, 2, 2.0, 2.0); +INSERT INTO tab VALUES (301, 4, 20, 20, 20, 20, 20, 20, 20, 20, 20.0, 20.0); +INSERT INTO tab VALUES (302, 4, 200, 200, 200, 200, 200, 200, 200, 200, 200.0, 200.0); +INSERT INTO tab VALUES (303, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5.0, 5.0); +INSERT INTO tab VALUES (304, 4, 50, 50, 50, 50, 50, 50, 50, 50, 50.0, 50.0); +INSERT INTO tab VALUES (305, 4, 500, 500, 500, 500, 500, 500, 500, 500, 500.0, 500.0); + +SELECT toString('id u8 scale round(u8, scale) roundBankers(x, scale) floor(x, scale) ceil(x, scale) trunc(x, scale)'); +SELECT id, u8 AS x, scale, round(x, scale), roundBankers(x, scale), floor(x, scale), ceil(x, scale), trunc(x, scale) FROM tab ORDER BY id; +SELECT toString('id u16 scale round(u8, scale) roundBankers(x, scale) floor(x, scale) ceil(x, scale) trunc(x, scale)'); +SELECT id, u16 AS x, scale, round(x, scale), roundBankers(x, scale), floor(x, scale), ceil(x, scale), trunc(x, scale) FROM tab ORDER BY id; +SELECT toString('id u32 scale round(u8, scale) roundBankers(x, scale) floor(x, scale) ceil(x, scale) trunc(x, scale)'); +SELECT id, u32 AS x, scale, round(x, scale), roundBankers(x, scale), floor(x, scale), ceil(x, scale), trunc(x, scale) FROM tab ORDER BY id; +SELECT toString('id u64 scale round(u8, scale) roundBankers(x, scale) floor(x, scale) ceil(x, scale) trunc(x, scale)'); +SELECT id, u64 AS x, scale, round(x, scale), roundBankers(x, scale), floor(x, scale), ceil(x, scale), trunc(x, scale) FROM tab ORDER BY id; +SELECT toString('id i8 scale round(u8, scale) roundBankers(x, scale) floor(x, scale) ceil(x, scale) trunc(x, scale)'); +SELECT id, i8 AS x, scale, round(x, scale), roundBankers(x, scale), floor(x, scale), ceil(x, scale), trunc(x, scale) FROM tab ORDER BY id; +SELECT toString('id i16 scale round(u8, scale) roundBankers(x, scale) floor(x, scale) ceil(x, scale) trunc(x, scale)'); +SELECT id, i16 AS x, scale, round(x, scale), roundBankers(x, scale), floor(x, scale), ceil(x, scale), trunc(x, scale) FROM tab ORDER BY id; +SELECT toString('id i32 scale round(u8, scale) roundBankers(x, scale) floor(x, scale) ceil(x, scale) trunc(x, scale)'); +SELECT id, i32 AS x, scale, round(x, scale), roundBankers(x, scale), floor(x, scale), ceil(x, scale), trunc(x, scale) FROM tab ORDER BY id; +SELECT toString('id i64 scale round(u8, scale) roundBankers(x, scale) floor(x, scale) ceil(x, scale) trunc(x, scale)'); +SELECT id, i64 AS x, scale, round(x, scale), roundBankers(x, scale), floor(x, scale), ceil(x, scale), trunc(x, scale) FROM tab ORDER BY id; +SELECT toString('id f32 scale round(u8, scale) roundBankers(x, scale) floor(x, scale) ceil(x, scale) trunc(x, scale)'); +SELECT id, f32 AS x, scale, round(x, scale), roundBankers(x, scale), floor(x, scale), ceil(x, scale), trunc(x, scale) FROM tab ORDER BY id; +SELECT toString('id f64 scale round(u8, scale) roundBankers(x, scale) floor(x, scale) ceil(x, scale) trunc(x, scale)'); +SELECT id, f64 AS x, scale, round(x, scale), roundBankers(x, scale), floor(x, scale), ceil(x, scale), trunc(x, scale) FROM tab ORDER BY id; + +DROP TABLE tab; +-- +SELECT toString('CHECKPOINT2'); + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab ( + id Int32, + scale Int16, + d32 Decimal32(4), d64 Decimal64(4), d128 Decimal128(4), d256 Decimal256(4) +) ENGINE = Memory; + +INSERT INTO tab VALUES (1, 6, toDecimal32('42.42', 4), toDecimal64('42.42', 4), toDecimal128('42.42', 4), toDecimal256('42.42', 4)); +INSERT INTO tab SELECT 2 , 6, cos(d32), cos(d64), cos(d128), cos(d256) FROM tab WHERE id = 1; +INSERT INTO tab SELECT 3 , 6, sqrt(d32), sqrt(d64), sqrt(d128), sqrt(d256) FROM tab WHERE id = 1; +INSERT INTO tab SELECT 4 , 6, lgamma(d32), lgamma(d64), lgamma(d128), lgamma(d256) FROM tab WHERE id = 1; +INSERT INTO tab SELECT 5 , 6, tgamma(d32)/1e50, tgamma(d64)/1e50, tgamma(d128)/1e50, tgamma(d256)/1e50 FROM tab WHERE id = 1; +INSERT INTO tab SELECT 6 , 8, sin(d32), sin(d64), sin(d128), sin(d256) FROM tab WHERE id = 1; +INSERT INTO tab SELECT 7 , 8, cos(d32), cos(d64), cos(d128), cos(d256) FROM tab WHERE id = 1; +INSERT INTO tab SELECT 8 , 8, log(d32), log(d64), log(d128), log(d256) FROM tab WHERE id = 1; +INSERT INTO tab SELECT 9 , 8, log2(d32), log2(d64), log2(d128), log2(d256) FROM tab WHERE id = 1; +INSERT INTO tab SELECT 10, 8, log10(d32), log10(d64), log10(d128), log10(d256) FROM tab WHERE id = 1; + +SELECT id, round(d32, scale), round(d64, scale), round(d128, scale), round(d256, scale) FROM tab ORDER BY id; + +DROP TABLE tab; + +SELECT round(1, 1); +SELECT round(materialize(1), materialize(1)); +SELECT round(pi(), number) FROM numbers(10); +SELECT round(toDecimal32(42.42, 2), number) from numbers(3); +SELECT round(materialize(1), 1); +SELECT materialize(10.1) AS x, ceil(x, toUInt256(123)); --{serverError ILLEGAL_TYPE_OF_ARGUMENT} diff --git a/parser/testdata/03165_storage_merge_view_prewhere/ast.json b/parser/testdata/03165_storage_merge_view_prewhere/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03165_storage_merge_view_prewhere/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03165_storage_merge_view_prewhere/metadata.json b/parser/testdata/03165_storage_merge_view_prewhere/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03165_storage_merge_view_prewhere/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03165_storage_merge_view_prewhere/query.sql b/parser/testdata/03165_storage_merge_view_prewhere/query.sql new file mode 100644 index 000000000..97651d1b0 --- /dev/null +++ b/parser/testdata/03165_storage_merge_view_prewhere/query.sql @@ -0,0 +1,41 @@ +-- Tags: distributed + +DROP TABLE IF EXISTS ids; +DROP TABLE IF EXISTS data; +DROP TABLE IF EXISTS data2; + +CREATE TABLE ids (id UUID, whatever String) Engine=MergeTree ORDER BY tuple(); +INSERT INTO ids VALUES ('a1451105-722e-4fe7-bfaa-65ad2ae249c2', 'whatever'); + +CREATE TABLE data (id UUID, event_time DateTime, status String) Engine=MergeTree ORDER BY tuple(); +INSERT INTO data VALUES ('a1451105-722e-4fe7-bfaa-65ad2ae249c2', '2000-01-01', 'CREATED'); + +CREATE TABLE data2 (id UUID, event_time DateTime, status String) Engine=MergeTree ORDER BY tuple(); +INSERT INTO data2 VALUES ('a1451105-722e-4fe7-bfaa-65ad2ae249c2', '2000-01-02', 'CREATED'); + +SELECT + id, + whatever +FROM ids AS l +INNER JOIN merge(currentDatabase(), 'data*') AS s ON l.id = s.id +WHERE (status IN ['CREATED', 'CREATING']) +ORDER BY event_time DESC +; + +SELECT + id, + whatever +FROM ids AS l +INNER JOIN clusterAllReplicas(test_cluster_two_shards, merge(currentDatabase(), 'data*')) AS s ON l.id = s.id +WHERE (status IN ['CREATED', 'CREATING']) +ORDER BY event_time DESC +; + +SELECT + id, + whatever +FROM ids AS l +INNER JOIN view(SELECT * FROM merge(currentDatabase(), 'data*')) AS s ON l.id = s.id +WHERE (status IN ['CREATED', 'CREATING']) +ORDER BY event_time DESC +; diff --git a/parser/testdata/03165_string_functions_with_token_text_indexes/ast.json b/parser/testdata/03165_string_functions_with_token_text_indexes/ast.json new file mode 100644 index 000000000..34a36b9d7 --- /dev/null +++ b/parser/testdata/03165_string_functions_with_token_text_indexes/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '-------- Bloom filter --------'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001020281, + "rows_read": 5, + "bytes_read": 201 + } +} diff --git a/parser/testdata/03165_string_functions_with_token_text_indexes/metadata.json b/parser/testdata/03165_string_functions_with_token_text_indexes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03165_string_functions_with_token_text_indexes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03165_string_functions_with_token_text_indexes/query.sql b/parser/testdata/03165_string_functions_with_token_text_indexes/query.sql new file mode 100644 index 000000000..560e5ecf0 --- /dev/null +++ b/parser/testdata/03165_string_functions_with_token_text_indexes/query.sql @@ -0,0 +1,343 @@ +SELECT '-------- Bloom filter --------'; +SELECT ''; +DROP TABLE IF EXISTS 03165_token_bf; + +SET allow_experimental_full_text_index=1; + +CREATE TABLE 03165_token_bf +( + id Int64, + message String, + INDEX idx_message message TYPE tokenbf_v1(32768, 3, 2) GRANULARITY 1 +) +ENGINE = MergeTree +ORDER BY id; + +INSERT INTO 03165_token_bf VALUES(1, 'Service is not ready'); + +SELECT '-- No skip for prefix'; + +SELECT trim(explain) +FROM ( + EXPLAIN indexes = 1 SELECT * FROM 03165_token_bf WHERE startsWith(message, 'Serv') +) +WHERE explain LIKE '%Parts:%'; + +SELECT * FROM 03165_token_bf WHERE startsWith(message, 'Serv'); + +SELECT ''; +SELECT '-- Skip for prefix with complete token'; + +SELECT trim(explain) +FROM ( + EXPLAIN indexes = 1 SELECT * FROM 03165_token_bf WHERE startsWith(message, 'Serv i') +) +WHERE explain LIKE '%Parts:%'; + +SELECT * FROM 03165_token_bf WHERE startsWith(message, 'Serv i'); + +SELECT ''; +SELECT '-- No skip for suffix'; + +SELECT trim(explain) +FROM ( + EXPLAIN indexes = 1 SELECT * FROM 03165_token_bf WHERE endsWith(message, 'eady') +) +WHERE explain LIKE '%Parts:%'; + +SELECT * FROM 03165_token_bf WHERE endsWith(message, 'eady'); + +SELECT ''; +SELECT '-- Skip for suffix with complete token'; + +SELECT trim(explain) +FROM ( + EXPLAIN indexes = 1 SELECT * FROM 03165_token_bf WHERE endsWith(message, ' eady') +) +WHERE explain LIKE '%Parts:%'; + +SELECT * FROM 03165_token_bf WHERE endsWith(message, ' eady'); + +SELECT ''; +SELECT '-- No skip for substring'; + +SELECT trim(explain) +FROM ( + EXPLAIN indexes = 1 SELECT * FROM 03165_token_bf WHERE match(message, 'no') +) +WHERE explain LIKE '%Parts:%'; + +SELECT * FROM 03165_token_bf WHERE match(message, 'no'); + +SELECT ''; +SELECT '-- Skip for substring with complete token'; + +SELECT trim(explain) +FROM ( + EXPLAIN indexes = 1 SELECT * FROM 03165_token_bf WHERE match(message, ' xyz ') +) +WHERE explain LIKE '%Parts:%'; + +SELECT * FROM 03165_token_bf WHERE match(message, ' xyz '); + +SELECT ''; +SELECT '-- No skip for multiple substrings'; + +SELECT trim(explain) +FROM ( + EXPLAIN indexes = 1 SELECT * FROM 03165_token_bf WHERE multiSearchAny(message, ['ce', 'no']) +) +WHERE explain LIKE '%Parts:%'; + +SELECT * FROM 03165_token_bf WHERE multiSearchAny(message, ['ce', 'no']); + +SELECT ''; +SELECT '-- Skip for multiple substrings with complete tokens'; + +SELECT trim(explain) +FROM ( + EXPLAIN indexes = 1 SELECT * FROM 03165_token_bf WHERE multiSearchAny(message, [' wx ', ' yz ']) +) +WHERE explain LIKE '%Parts:%'; + +SELECT * FROM 03165_token_bf WHERE multiSearchAny(message, [' wx ', ' yz ']); + +SELECT ''; +SELECT '-- No skip for multiple non-existsing substrings, only one with complete token'; +SELECT trim(explain) +FROM ( + EXPLAIN indexes = 1 SELECT * FROM 03165_token_bf WHERE multiSearchAny(message, [' wx ', 'yz']) +) +WHERE explain LIKE '%Parts:%'; + +SELECT * FROM 03165_token_bf WHERE multiSearchAny(message, [' wx ', 'yz']); + +DROP TABLE IF EXISTS 03165_token_bf; + +SELECT ''; +SELECT '-------- Text index filter --------'; +SELECT ''; + +SET allow_experimental_full_text_index = 1; +DROP TABLE IF EXISTS 03165_token_ft; +CREATE TABLE 03165_token_ft +( + id Int64, + message String, + INDEX idx_message message TYPE text(tokenizer = 'splitByNonAlpha') GRANULARITY 1 +) +ENGINE = MergeTree +ORDER BY id; + +INSERT INTO 03165_token_ft VALUES(1, 'Service is not ready'); + +-- text search cannot operate on substrings, so no filtering based on text index should be performed here +SELECT '-- No skip for prefix'; + +SELECT trim(explain) +FROM ( + EXPLAIN indexes = 1 SELECT * FROM 03165_token_ft WHERE startsWith(message, 'Serv') +) +WHERE explain LIKE '%Parts:%'; + +SELECT * FROM 03165_token_ft WHERE startsWith(message, 'Serv'); + +SELECT ''; +-- here we get one full token, so we can utilize text index to skip (in this case) all granules +SELECT '-- Skip for prefix with complete token'; + +SELECT trim(explain) +FROM ( + EXPLAIN indexes = 1 SELECT * FROM 03165_token_ft WHERE startsWith(message, 'Serv i') +) +WHERE explain LIKE '%Parts:%'; + +SELECT * FROM 03165_token_ft WHERE startsWith(message, 'Serv i'); + +SELECT ''; +SELECT '-- No skip for suffix'; + +SELECT trim(explain) +FROM ( + EXPLAIN indexes = 1 SELECT * FROM 03165_token_ft WHERE endsWith(message, 'eady') +) +WHERE explain LIKE '%Parts:%'; + +SELECT * FROM 03165_token_ft WHERE endsWith(message, 'eady'); + +SELECT ''; +SELECT '-- Skip for suffix with complete token'; + +SELECT trim(explain) +FROM ( + EXPLAIN indexes = 1 SELECT * FROM 03165_token_ft WHERE endsWith(message, ' eady') +) +WHERE explain LIKE '%Parts:%'; + +SELECT * FROM 03165_token_ft WHERE endsWith(message, ' eady'); + +SELECT ''; +SELECT '-- No skip for substring'; + +SELECT trim(explain) +FROM ( + EXPLAIN indexes = 1 SELECT * FROM 03165_token_ft WHERE match(message, 'no') +) +WHERE explain LIKE '%Parts:%'; + +SELECT * FROM 03165_token_ft WHERE match(message, 'no'); + +SELECT ''; +SELECT '-- Skip for substring with complete token'; + +SELECT trim(explain) +FROM ( + EXPLAIN indexes = 1 SELECT * FROM 03165_token_ft WHERE match(message, ' xyz ') +) +WHERE explain LIKE '%Parts:%'; + +SELECT * FROM 03165_token_ft WHERE match(message, ' xyz '); + +SELECT ''; +SELECT '-- Skip for like with non matching tokens'; + +SELECT trim(explain) +FROM ( + EXPLAIN indexes = 1 SELECT * FROM 03165_token_ft WHERE like(message, '%rvice is definitely rea%') +) +WHERE explain LIKE '%Parts:%'; + +SELECT * FROM 03165_token_ft WHERE like(message, '%rvice is definitely rea%'); + +SELECT ''; +SELECT '-- No skip for like with matching substring'; + +SELECT trim(explain) +FROM ( + EXPLAIN indexes = 1 SELECT * FROM 03165_token_ft WHERE like(message, '%rvi%') +) +WHERE explain LIKE '%Parts:%'; + +SELECT * FROM 03165_token_ft WHERE like(message, '%rvi%'); + +SELECT ''; +SELECT '-- No skip for like with non-matching string'; + +SELECT trim(explain) +FROM ( + EXPLAIN indexes = 1 SELECT * FROM 03165_token_ft WHERE like(message, '%foo%') +) +WHERE explain LIKE '%Parts:%'; + +SELECT * FROM 03165_token_ft WHERE like(message, '%foo%'); + +SELECT ''; +SELECT '-- No skip for notLike with non-matching token'; + +SELECT trim(explain) +FROM ( + EXPLAIN indexes = 1 SELECT * FROM 03165_token_ft WHERE notLike(message, '%rvice is rea%') +) +WHERE explain LIKE '%Parts:%'; + +SELECT * FROM 03165_token_ft WHERE notLike(message, '%rvice is rea%'); + +SELECT ''; +-- could be an optimization in the future +SELECT '-- No skip for notLike with matching tokens'; + +SELECT trim(explain) +FROM ( + EXPLAIN indexes = 1 SELECT * FROM 03165_token_ft WHERE notLike(message, '%rvice is not rea%') +) +WHERE explain LIKE '%Parts:%'; + +SELECT * FROM 03165_token_ft WHERE notLike(message, '%rvice is not rea%'); + +SELECT ''; +SELECT '-- No skip for notLike with matching substring'; + +SELECT trim(explain) +FROM ( + EXPLAIN indexes = 1 SELECT * FROM 03165_token_ft WHERE notLike(message, '%ready%') +) +WHERE explain LIKE '%Parts:%'; + +SELECT * FROM 03165_token_ft WHERE notLike(message, '%ready%'); + +SELECT ''; +SELECT '-- No skip for equals with matching string'; + +SELECT trim(explain) +FROM ( + EXPLAIN indexes = 1 SELECT * FROM 03165_token_ft WHERE equals(message, 'Service is not ready') +) +WHERE explain LIKE '%Parts:%'; + +SELECT * FROM 03165_token_ft WHERE equals(message, 'Service is not ready'); + +SELECT ''; +SELECT '-- Skip for equals with non-matching string'; + +SELECT trim(explain) +FROM ( + EXPLAIN indexes = 1 SELECT * FROM 03165_token_ft WHERE equals(message, 'Service is not rea') +) +WHERE explain LIKE '%Parts:%'; + +SELECT * FROM 03165_token_ft WHERE equals(message, 'Service is not rea'); + +SELECT ''; +SELECT '-- No skip for notEquals with non-matching string'; + +SELECT trim(explain) +FROM ( + EXPLAIN indexes = 1 SELECT * FROM 03165_token_ft WHERE notEquals(message, 'Service is not rea') +) +WHERE explain LIKE '%Parts:%'; + +SELECT * FROM 03165_token_ft WHERE notEquals(message, 'Service is not rea'); + +SELECT ''; +SELECT '-- No skip for notEquals with matching string'; + +SELECT trim(explain) +FROM ( + EXPLAIN indexes = 1 SELECT * FROM 03165_token_ft WHERE notEquals(message, 'Service is not ready') +) +WHERE explain LIKE '%Parts:%'; + +SELECT * FROM 03165_token_ft WHERE notEquals(message, 'Service is not ready'); + +SELECT ''; +SELECT '-- No skip for hasTokenOrNull with matching token'; + +SELECT trim(explain) +FROM ( + EXPLAIN indexes = 1 SELECT * FROM 03165_token_ft WHERE hasTokenOrNull(message, 'ready') +) +WHERE explain LIKE '%Parts:%'; + +SELECT * FROM 03165_token_ft WHERE hasTokenOrNull(message, 'ready'); + +SELECT ''; +SELECT '-- Skip for hasTokenOrNull with non-matching token'; + +SELECT trim(explain) +FROM ( + EXPLAIN indexes = 1 SELECT * FROM 03165_token_ft WHERE hasTokenOrNull(message, 'foo') +) +WHERE explain LIKE '%Parts:%'; + +SELECT * FROM 03165_token_ft WHERE hasTokenOrNull(message, 'foo'); + +SELECT ''; +SELECT '-- Skip for hasTokenOrNull with ill-formed token'; + +SELECT trim(explain) +FROM ( + EXPLAIN indexes = 1 SELECT * FROM 03165_token_ft WHERE hasTokenOrNull(message, 'rea dy') +) +WHERE explain LIKE '%Parts:%'; + +SELECT * FROM 03165_token_ft WHERE hasTokenOrNull(message, 'rea dy'); diff --git a/parser/testdata/03166_mv_prewhere_duplicating_name_bug/ast.json b/parser/testdata/03166_mv_prewhere_duplicating_name_bug/ast.json new file mode 100644 index 000000000..cc0d01a2f --- /dev/null +++ b/parser/testdata/03166_mv_prewhere_duplicating_name_bug/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery src (children 3)" + }, + { + "explain": " Identifier src" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration x (children 1)" + }, + { + "explain": " DataType Int64" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function Log" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001122299, + "rows_read": 8, + "bytes_read": 267 + } +} diff --git a/parser/testdata/03166_mv_prewhere_duplicating_name_bug/metadata.json b/parser/testdata/03166_mv_prewhere_duplicating_name_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03166_mv_prewhere_duplicating_name_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03166_mv_prewhere_duplicating_name_bug/query.sql b/parser/testdata/03166_mv_prewhere_duplicating_name_bug/query.sql new file mode 100644 index 000000000..e27e86454 --- /dev/null +++ b/parser/testdata/03166_mv_prewhere_duplicating_name_bug/query.sql @@ -0,0 +1,7 @@ +create table src (x Int64) engine = Log; +create table dst (s String, lc LowCardinality(String)) engine MergeTree order by s; +create materialized view mv to dst (s String, lc String) as select 'a' as s, toLowCardinality('b') as lc from src; +insert into src values (1); + +select s, lc from mv where not ignore(lc) settings enable_analyzer=0; +select s, lc from mv where not ignore(lc) settings enable_analyzer=1; diff --git a/parser/testdata/03166_optimize_row_order_during_insert/ast.json b/parser/testdata/03166_optimize_row_order_during_insert/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03166_optimize_row_order_during_insert/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03166_optimize_row_order_during_insert/metadata.json b/parser/testdata/03166_optimize_row_order_during_insert/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03166_optimize_row_order_during_insert/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03166_optimize_row_order_during_insert/query.sql b/parser/testdata/03166_optimize_row_order_during_insert/query.sql new file mode 100644 index 000000000..5fc71598e --- /dev/null +++ b/parser/testdata/03166_optimize_row_order_during_insert/query.sql @@ -0,0 +1,98 @@ +-- Checks that no bad things happen when the table optimizes the row order to improve compressability during insert. + + +-- Below SELECTs intentionally only ORDER BY the table primary key and rely on read-in-order optimization +SET optimize_read_in_order = 1; + +-- Just simple check, that optimization works correctly for table with 2 columns and 2 equivalence classes. +SELECT 'Simple test'; + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab ( + name String, + event Int8 +) ENGINE = MergeTree +ORDER BY name +SETTINGS optimize_row_order = true; +INSERT INTO tab VALUES ('Igor', 3), ('Egor', 1), ('Egor', 2), ('Igor', 2), ('Igor', 1); + +SELECT * FROM tab ORDER BY name SETTINGS max_threads=1; + +DROP TABLE tab; + +-- Checks that RowOptimizer correctly selects the order for columns according to cardinality, with an empty ORDER BY. +-- There are 4 columns with cardinalities {name : 3, timestamp": 3, money: 17, flag: 2}, so the columns order must be {flag, name, timestamp, money}. +SELECT 'Cardinalities test'; + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab ( + name String, + timestamp Int64, + money UInt8, + flag String +) ENGINE = MergeTree +ORDER BY () +SETTINGS optimize_row_order = True; +INSERT INTO tab VALUES ('Bob', 4, 100, '1'), ('Nikita', 2, 54, '1'), ('Nikita', 1, 228, '1'), ('Alex', 4, 83, '1'), ('Alex', 4, 134, '1'), ('Alex', 1, 65, '0'), ('Alex', 4, 134, '1'), ('Bob', 2, 53, '0'), ('Alex', 4, 83, '0'), ('Alex', 1, 63, '1'), ('Bob', 2, 53, '1'), ('Alex', 4, 192, '1'), ('Alex', 2, 128, '1'), ('Nikita', 2, 148, '0'), ('Bob', 4, 177, '0'), ('Nikita', 1, 173, '0'), ('Alex', 1, 239, '0'), ('Alex', 1, 63, '0'), ('Alex', 2, 224, '1'), ('Bob', 4, 177, '0'), ('Alex', 2, 128, '1'), ('Alex', 4, 134, '0'), ('Alex', 4, 83, '1'), ('Bob', 4, 100, '0'), ('Nikita', 2, 54, '1'), ('Alex', 1, 239, '1'), ('Bob', 2, 187, '1'), ('Alex', 1, 65, '1'), ('Bob', 2, 53, '1'), ('Alex', 2, 224, '0'), ('Alex', 4, 192, '0'), ('Nikita', 1, 173, '1'), ('Nikita', 2, 148, '1'), ('Bob', 2, 187, '1'), ('Nikita', 2, 208, '1'), ('Nikita', 2, 208, '0'), ('Nikita', 1, 228, '0'), ('Nikita', 2, 148, '0'); + +SELECT * FROM tab SETTINGS max_threads=1; + +DROP TABLE tab; + +-- Checks that RowOptimizer correctly selects the order for columns according to cardinality in each equivalence class obtained using SortDescription. +-- There are two columns in the SortDescription: {flag, money} in this order. +-- So there are 5 equivalence classes: {9.81, 9}, {2.7, 1}, {42, 1}, {3.14, Null}, {42, Null}. +-- For the first three of them cardinalities of the other 2 columns are equal, so they are sorted in order {0, 1} in these classes. +-- In the fourth class cardinalities: {name : 2, timestamp : 3}, so they are sorted in order {name, timestamp} in this class. +-- In the fifth class cardinalities: {name : 3, timestamp : 2}, so they are sorted in order {timestamp, name} in this class. +SELECT 'Equivalence classes test'; + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab ( + name FixedString(2), + timestamp Float32, + money Float64, + flag Nullable(Int32) +) ENGINE = MergeTree +ORDER BY (flag, money) +SETTINGS optimize_row_order = True, allow_nullable_key = True; +INSERT INTO tab VALUES ('AB', 0, 42, Null), ('AB', 0, 42, Null), ('A', 1, 42, Null), ('AB', 1, 9.81, 0), ('B', 0, 42, Null), ('B', -1, 3.14, Null), ('B', 1, 2.7, 1), ('B', 0, 42, 1), ('A', 1, 42, 1), ('B', 1, 42, Null), ('B', 0, 2.7, 1), ('A', 0, 2.7, 1), ('B', 2, 3.14, Null), ('A', 0, 3.14, Null), ('A', 1, 2.7, 1), ('A', 1, 42, Null); + +SELECT * FROM tab ORDER BY (flag, money) SETTINGS max_threads=1; + +DROP TABLE tab; + +-- Checks that no bad things happen when the table optimizes the row order to improve compressability during insert for many different column types. +-- For some of these types estimateCardinalityInPermutedRange returns just the size of the current equal range. +-- There are 5 equivalence classes, each of them has equal size = 3. +-- In the first of them cardinality of the vector_array column equals 2, other cardinalities equals 3. +-- In the second of them cardinality of the nullable_int column equals 2, other cardinalities equals 3. +-- ... +-- In the fifth of them cardinality of the tuple_column column equals 2, other cardinalities equals 3. +-- So, for all of this classes for columns with cardinality equals 2 such that estimateCardinalityInPermutedRange methid is implemented, +-- this column must be the first in the column order, all others must be in the stable order. +-- For all other classes columns must be in the stable order. +SELECT 'Many types test'; + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab ( + fixed_str FixedString(6), + event_date Date, + vector_array Array(Float32), + nullable_int Nullable(Int128), + low_card_string LowCardinality(String), + map_column Map(String, String), + tuple_column Tuple(UInt256) +) ENGINE = MergeTree() +ORDER BY (fixed_str, event_date) +SETTINGS optimize_row_order = True; + +INSERT INTO tab VALUES ('A', '2020-01-01', [0.0, 1.1], 10, 'some string', {'key':'value'}, (123)), ('A', '2020-01-01', [0.0, 1.1], NULL, 'example', {}, (26)), ('A', '2020-01-01', [2.2, 1.1], 1, 'some other string', {'key2':'value2'}, (5)), ('A', '2020-01-02', [0.0, 1.1], 10, 'some string', {'key':'value'}, (123)), ('A', '2020-01-02', [0.0, 2.2], 10, 'example', {}, (26)), ('A', '2020-01-02', [2.2, 1.1], 1, 'some other string', {'key2':'value2'}, (5)), ('B', '2020-01-04', [0.0, 1.1], 10, 'some string', {'key':'value'}, (123)), ('B', '2020-01-04', [0.0, 2.2], Null, 'example', {}, (26)), ('B', '2020-01-04', [2.2, 1.1], 1, 'some string', {'key2':'value2'}, (5)), ('B', '2020-01-05', [0.0, 1.1], 10, 'some string', {'key':'value'}, (123)), ('B', '2020-01-05', [0.0, 2.2], Null, 'example', {}, (26)), ('B', '2020-01-05', [2.2, 1.1], 1, 'some other string', {'key':'value'}, (5)), ('C', '2020-01-04', [0.0, 1.1], 10, 'some string', {'key':'value'}, (5)), ('C', '2020-01-04', [0.0, 2.2], Null, 'example', {}, (26)), ('C', '2020-01-04', [2.2, 1.1], 1, 'some other string', {'key2':'value2'}, (5)); + +SELECT * FROM tab ORDER BY (fixed_str, event_date) SETTINGS max_threads=1; + +DROP TABLE tab; diff --git a/parser/testdata/03166_skip_indexes_vertical_merge_1/ast.json b/parser/testdata/03166_skip_indexes_vertical_merge_1/ast.json new file mode 100644 index 000000000..0d7f0995b --- /dev/null +++ b/parser/testdata/03166_skip_indexes_vertical_merge_1/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_ind_merge_1 (children 1)" + }, + { + "explain": " Identifier t_ind_merge_1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001560754, + "rows_read": 2, + "bytes_read": 78 + } +} diff --git a/parser/testdata/03166_skip_indexes_vertical_merge_1/metadata.json b/parser/testdata/03166_skip_indexes_vertical_merge_1/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03166_skip_indexes_vertical_merge_1/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03166_skip_indexes_vertical_merge_1/query.sql b/parser/testdata/03166_skip_indexes_vertical_merge_1/query.sql new file mode 100644 index 000000000..0c087b093 --- /dev/null +++ b/parser/testdata/03166_skip_indexes_vertical_merge_1/query.sql @@ -0,0 +1,41 @@ +DROP TABLE IF EXISTS t_ind_merge_1; + +SET enable_analyzer = 1; + +CREATE TABLE t_ind_merge_1 (a UInt64, b UInt64, c UInt64, d UInt64, INDEX idx_b b TYPE minmax) +ENGINE = MergeTree +ORDER BY a SETTINGS + index_granularity = 64, + merge_max_block_size = 8192, + vertical_merge_algorithm_min_rows_to_activate = 1, + vertical_merge_algorithm_min_columns_to_activate = 1, + min_bytes_for_wide_part = 0, + min_bytes_for_full_part_storage = 0, + enable_block_number_column = 0, + enable_block_offset_column = 0; + +INSERT INTO t_ind_merge_1 SELECT number, number, rand(), rand() FROM numbers(1000); +INSERT INTO t_ind_merge_1 SELECT number, number, rand(), rand() FROM numbers(1000); + +SELECT count() FROM t_ind_merge_1 WHERE b < 100 SETTINGS force_data_skipping_indices = 'idx_b'; +EXPLAIN indexes = 1 SELECT count() FROM t_ind_merge_1 WHERE b < 100; + +OPTIMIZE TABLE t_ind_merge_1 FINAL; + +SELECT count() FROM t_ind_merge_1 WHERE b < 100 SETTINGS force_data_skipping_indices = 'idx_b'; +EXPLAIN indexes = 1 SELECT count() FROM t_ind_merge_1 WHERE b < 100; + +SYSTEM FLUSH LOGS text_log; +SET max_rows_to_read = 0; -- system.text_log can be really big +WITH + (SELECT uuid FROM system.tables WHERE database = currentDatabase() AND table = 't_ind_merge_1') AS uuid, + extractAllGroupsVertical(message, 'containing (\\d+) columns \((\\d+) merged, (\\d+) gathered\)')[1] AS groups +SELECT + groups[1] AS total, + groups[2] AS merged, + groups[3] AS gathered +FROM system.text_log +WHERE ((query_id = uuid || '::all_1_2_1') OR (query_id = currentDatabase() || '.t_ind_merge_1::all_1_2_1')) AND notEmpty(groups) +ORDER BY event_time_microseconds; + +DROP TABLE t_ind_merge_1; diff --git a/parser/testdata/03166_skip_indexes_vertical_merge_2/ast.json b/parser/testdata/03166_skip_indexes_vertical_merge_2/ast.json new file mode 100644 index 000000000..2ee862a82 --- /dev/null +++ b/parser/testdata/03166_skip_indexes_vertical_merge_2/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_ind_merge_2 (children 1)" + }, + { + "explain": " Identifier t_ind_merge_2" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001476997, + "rows_read": 2, + "bytes_read": 78 + } +} diff --git a/parser/testdata/03166_skip_indexes_vertical_merge_2/metadata.json b/parser/testdata/03166_skip_indexes_vertical_merge_2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03166_skip_indexes_vertical_merge_2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03166_skip_indexes_vertical_merge_2/query.sql b/parser/testdata/03166_skip_indexes_vertical_merge_2/query.sql new file mode 100644 index 000000000..843689849 --- /dev/null +++ b/parser/testdata/03166_skip_indexes_vertical_merge_2/query.sql @@ -0,0 +1,45 @@ +DROP TABLE IF EXISTS t_ind_merge_2; + +CREATE TABLE t_ind_merge_2 ( + a UInt64, + b UInt64, + c UInt64, + d UInt64, + e UInt64, + f UInt64, + INDEX idx_a a TYPE minmax, + INDEX idx_b b TYPE minmax, + INDEX idx_cd c * d TYPE minmax, + INDEX idx_d1 d TYPE minmax, + INDEX idx_d2 d + 7 TYPE set(3), + INDEX idx_e e * 3 TYPE set(3)) +ENGINE = MergeTree +ORDER BY a SETTINGS + index_granularity = 64, + vertical_merge_algorithm_min_rows_to_activate = 1, + vertical_merge_algorithm_min_columns_to_activate = 1, + min_bytes_for_wide_part = 0, + min_bytes_for_full_part_storage = 0, + enable_block_number_column = 0, + enable_block_offset_column = 0; + +INSERT INTO t_ind_merge_2 SELECT number, number, rand(), rand(), rand(), rand() FROM numbers(1000); +INSERT INTO t_ind_merge_2 SELECT number, number, rand(), rand(), rand(), rand() FROM numbers(1000); + +OPTIMIZE TABLE t_ind_merge_2 FINAL; +SYSTEM FLUSH LOGS text_log; +SET max_rows_to_read = 0; -- system.text_log can be really big + +--- merged: a, c, d; gathered: b, e, f +WITH + (SELECT uuid FROM system.tables WHERE database = currentDatabase() AND table = 't_ind_merge_2') AS uuid, + extractAllGroupsVertical(message, 'containing (\\d+) columns \((\\d+) merged, (\\d+) gathered\)')[1] AS groups +SELECT + groups[1] AS total, + groups[2] AS merged, + groups[3] AS gathered +FROM system.text_log +WHERE ((query_id = uuid || '::all_1_2_1') OR (query_id = currentDatabase() || '.t_ind_merge_2::all_1_2_1')) AND notEmpty(groups) +ORDER BY event_time_microseconds; + +DROP TABLE t_ind_merge_2; diff --git a/parser/testdata/03167_base64_url_functions/ast.json b/parser/testdata/03167_base64_url_functions/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03167_base64_url_functions/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03167_base64_url_functions/metadata.json b/parser/testdata/03167_base64_url_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03167_base64_url_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03167_base64_url_functions/query.sql b/parser/testdata/03167_base64_url_functions/query.sql new file mode 100644 index 000000000..6c394ba6c --- /dev/null +++ b/parser/testdata/03167_base64_url_functions/query.sql @@ -0,0 +1,36 @@ +-- Tags: no-fasttest +-- no-fasttest because aklomp-base64 library is required + +-- incorrect number of arguments +SELECT base64URLEncode(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT base64URLDecode(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT tryBase64URLDecode(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT base64URLEncode('foo', 'excess argument'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT base64URLDecode('foo', 'excess argument'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT tryBase64URLDecode('foo', 'excess argument'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +-- test with valid inputs + +SELECT 'https://clickhouse.com' AS original, base64URLEncode(original) AS encoded, base64URLDecode(encoded), tryBase64URLDecode(encoded); +SELECT '12?' AS original, base64URLEncode(original) AS encoded, base64URLDecode(encoded), tryBase64URLDecode(encoded); +SELECT 'https://www.google.com/search?q=clickhouse+base64+decode&sca_esv=739f8bb380e4c7ed&ei=TfRiZqCDIrmnwPAP2KLRkA8&ved=0ahUKEwjg3ZHitsmGAxW5ExAIHVhRFPIQ4dUDCBA&uact=5&oq=clickhouse+base64+decode' AS original, base64URLEncode(original) AS encoded, base64URLDecode(encoded), tryBase64URLDecode(encoded); + +-- encoded value has no padding +SELECT 'aHR0cHM6Ly9jbGlj' AS encoded, base64URLDecode(encoded), tryBase64URLDecode(encoded); +-- encoded value has one-byte padding +SELECT 'aHR0cHM6Ly9jbGlja2g' AS encoded, base64URLDecode(encoded), tryBase64URLDecode(encoded); +-- encoded value has two-bytes padding +SELECT 'aHR0cHM6Ly9jbGljaw' AS encoded, base64URLDecode(encoded), tryBase64URLDecode(encoded); + +-- test with invalid inputs + +SELECT base64URLDecode('https://clickhouse.com'); -- { serverError INCORRECT_DATA } +SELECT tryBase64URLDecode('https://clickhouse.com'); +SELECT base64URLDecode('12?'); -- { serverError INCORRECT_DATA } +SELECT tryBase64URLDecode('12?'); +SELECT base64URLDecode('aHR0cHM6Ly9jbGlja'); -- { serverError INCORRECT_DATA } +SELECT tryBase64URLDecode('aHR0cHM6Ly9jbGlja'); + +-- test FixedString argument + +SELECT toFixedString('https://clickhouse.com', 22) AS original, base64URLEncode(original) AS encoded, base64URLDecode(encoded), tryBase64URLDecode(encoded); diff --git a/parser/testdata/03167_empty_tuple_concat/ast.json b/parser/testdata/03167_empty_tuple_concat/ast.json new file mode 100644 index 000000000..bbb782211 --- /dev/null +++ b/parser/testdata/03167_empty_tuple_concat/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function concat (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001245972, + "rows_read": 10, + "bytes_read": 369 + } +} diff --git a/parser/testdata/03167_empty_tuple_concat/metadata.json b/parser/testdata/03167_empty_tuple_concat/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03167_empty_tuple_concat/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03167_empty_tuple_concat/query.sql b/parser/testdata/03167_empty_tuple_concat/query.sql new file mode 100644 index 000000000..f6fce86f3 --- /dev/null +++ b/parser/testdata/03167_empty_tuple_concat/query.sql @@ -0,0 +1 @@ +SELECT ()||(); diff --git a/parser/testdata/03167_fancy_quotes_off_by_one/ast.json b/parser/testdata/03167_fancy_quotes_off_by_one/ast.json new file mode 100644 index 000000000..8e4e85498 --- /dev/null +++ b/parser/testdata/03167_fancy_quotes_off_by_one/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'test' (alias column)" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.002097779, + "rows_read": 5, + "bytes_read": 190 + } +} diff --git a/parser/testdata/03167_fancy_quotes_off_by_one/metadata.json b/parser/testdata/03167_fancy_quotes_off_by_one/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03167_fancy_quotes_off_by_one/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03167_fancy_quotes_off_by_one/query.sql b/parser/testdata/03167_fancy_quotes_off_by_one/query.sql new file mode 100644 index 000000000..6f563d8f2 --- /dev/null +++ b/parser/testdata/03167_fancy_quotes_off_by_one/query.sql @@ -0,0 +1 @@ +SELECT ‘test’ AS “column” \ No newline at end of file diff --git a/parser/testdata/03167_parametrized_view_with_cte/ast.json b/parser/testdata/03167_parametrized_view_with_cte/ast.json new file mode 100644 index 000000000..d1c85f2c0 --- /dev/null +++ b/parser/testdata/03167_parametrized_view_with_cte/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001012091, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03167_parametrized_view_with_cte/metadata.json b/parser/testdata/03167_parametrized_view_with_cte/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03167_parametrized_view_with_cte/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03167_parametrized_view_with_cte/query.sql b/parser/testdata/03167_parametrized_view_with_cte/query.sql new file mode 100644 index 000000000..ae6ab5864 --- /dev/null +++ b/parser/testdata/03167_parametrized_view_with_cte/query.sql @@ -0,0 +1,7 @@ +SET enable_analyzer=1; +CREATE OR REPLACE VIEW param_test AS SELECT {test_str:String} as s_result; +WITH 'OK' AS s SELECT * FROM param_test(test_str=s); +WITH (SELECT 123) AS s SELECT * FROM param_test(test_str=s); +WITH (SELECT 100 + 20 + 3) AS s SELECT * FROM param_test(test_str=s); +WITH (SELECT number FROM numbers(123, 1)) AS s SELECT * FROM param_test(test_str=s); +WITH CAST(123, 'String') AS s SELECT * FROM param_test(test_str=s); diff --git a/parser/testdata/03167_transactions_are_really_disabled/ast.json b/parser/testdata/03167_transactions_are_really_disabled/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03167_transactions_are_really_disabled/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03167_transactions_are_really_disabled/metadata.json b/parser/testdata/03167_transactions_are_really_disabled/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03167_transactions_are_really_disabled/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03167_transactions_are_really_disabled/query.sql b/parser/testdata/03167_transactions_are_really_disabled/query.sql new file mode 100644 index 000000000..793bc1885 --- /dev/null +++ b/parser/testdata/03167_transactions_are_really_disabled/query.sql @@ -0,0 +1,15 @@ +-- Tags: no-encrypted-storage + +DROP TABLE IF EXISTS mv_table; +DROP TABLE IF EXISTS null_table; + +CREATE TABLE null_table (str String) ENGINE = Null; +CREATE MATERIALIZED VIEW mv_table (str String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/transactions_disabled_rmt', '{replica}') ORDER BY str AS SELECT str AS str FROM null_table; + +SET implicit_transaction=1; +set throw_on_unsupported_query_inside_transaction=0; + +INSERT INTO null_table VALUES ('test'); --{serverError NOT_IMPLEMENTED} + +DROP TABLE IF EXISTS mv_table; +DROP TABLE IF EXISTS null_table; diff --git a/parser/testdata/03168_attach_as_replicated_materialized_view/ast.json b/parser/testdata/03168_attach_as_replicated_materialized_view/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03168_attach_as_replicated_materialized_view/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03168_attach_as_replicated_materialized_view/metadata.json b/parser/testdata/03168_attach_as_replicated_materialized_view/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03168_attach_as_replicated_materialized_view/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03168_attach_as_replicated_materialized_view/query.sql b/parser/testdata/03168_attach_as_replicated_materialized_view/query.sql new file mode 100644 index 000000000..7aeb3dedf --- /dev/null +++ b/parser/testdata/03168_attach_as_replicated_materialized_view/query.sql @@ -0,0 +1,42 @@ +-- Tags: zookeeper, no-replicated-database, no-ordinary-database +CREATE TABLE hourly_data + (domain_name String, event_time DateTime, count_views UInt64) + ENGINE = MergeTree ORDER BY (domain_name, event_time); + +CREATE TABLE monthly_aggregated_data + (domain_name String, month Date, sumCountViews AggregateFunction(sum, UInt64)) + ENGINE = AggregatingMergeTree ORDER BY (domain_name, month); + + +CREATE MATERIALIZED VIEW monthly_aggregated_data_mv + TO monthly_aggregated_data + AS + SELECT + toDate(toStartOfMonth(event_time)) AS month, + domain_name, + sumState(count_views) AS sumCountViews + FROM hourly_data + GROUP BY + domain_name, + month; + +INSERT INTO hourly_data (domain_name, event_time, count_views) + VALUES ('clickhouse.com', '2019-01-01 10:00:00', 1), ('clickhouse.com', '2019-02-02 00:00:00', 2), ('clickhouse.com', '2019-02-01 00:00:00', 3); + +SELECT sumMerge(sumCountViews) as sumCountViews FROM monthly_aggregated_data_mv; +SELECT count() FROM hourly_data; + +DETACH TABLE hourly_data; +DETACH TABLE monthly_aggregated_data; +ATTACH TABLE hourly_data AS REPLICATED; +ATTACH TABLE monthly_aggregated_data AS REPLICATED; +SYSTEM RESTORE REPLICA hourly_data; +SYSTEM RESTORE REPLICA monthly_aggregated_data; + +SELECT name, engine FROM system.tables WHERE database=currentDatabase(); + +INSERT INTO hourly_data (domain_name, event_time, count_views) + VALUES ('clickhouse.com', '2019-01-01 10:00:00', 1), ('clickhouse.com', '2019-02-02 00:00:00', 2), ('clickhouse.com', '2019-02-01 00:00:00', 3); + +SELECT sumMerge(sumCountViews) as sumCountViews FROM monthly_aggregated_data_mv; +SELECT count() FROM hourly_data; diff --git a/parser/testdata/03168_cld2_tsan/ast.json b/parser/testdata/03168_cld2_tsan/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03168_cld2_tsan/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03168_cld2_tsan/metadata.json b/parser/testdata/03168_cld2_tsan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03168_cld2_tsan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03168_cld2_tsan/query.sql b/parser/testdata/03168_cld2_tsan/query.sql new file mode 100644 index 000000000..701a781c4 --- /dev/null +++ b/parser/testdata/03168_cld2_tsan/query.sql @@ -0,0 +1,10 @@ +-- Tags: no-fasttest +-- Tag no-fasttest: depends on cld2 + +-- https://github.com/ClickHouse/ClickHouse/issues/64931 +SELECT detectLanguageMixed(materialize('二兎を追う者は一兎をも得ず二兎を追う者は一兎をも得ず A vaincre sans peril, on triomphe sans gloire.')) +GROUP BY + GROUPING SETS ( + ('a', toUInt256(1)), + (stringToH3(toFixedString(toFixedString('85283473ffffff', 14), 14)))) +SETTINGS allow_experimental_nlp_functions = 1; diff --git a/parser/testdata/03168_fuzz_multiIf_short_circuit/ast.json b/parser/testdata/03168_fuzz_multiIf_short_circuit/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03168_fuzz_multiIf_short_circuit/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03168_fuzz_multiIf_short_circuit/metadata.json b/parser/testdata/03168_fuzz_multiIf_short_circuit/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03168_fuzz_multiIf_short_circuit/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03168_fuzz_multiIf_short_circuit/query.sql b/parser/testdata/03168_fuzz_multiIf_short_circuit/query.sql new file mode 100644 index 000000000..4e4cc291e --- /dev/null +++ b/parser/testdata/03168_fuzz_multiIf_short_circuit/query.sql @@ -0,0 +1,6 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/64946 +SELECT + multiIf((number % toLowCardinality(toNullable(toUInt128(2)))) = (number % toNullable(2)), toInt8(1), (number % materialize(toLowCardinality(3))) = toUInt128(toNullable(0)), toInt8(materialize(materialize(2))), toInt64(toUInt128(3))) +FROM system.numbers +LIMIT 44857 +FORMAT Null; diff --git a/parser/testdata/03168_inconsistent_ast_formatting/ast.json b/parser/testdata/03168_inconsistent_ast_formatting/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03168_inconsistent_ast_formatting/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03168_inconsistent_ast_formatting/metadata.json b/parser/testdata/03168_inconsistent_ast_formatting/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03168_inconsistent_ast_formatting/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03168_inconsistent_ast_formatting/query.sql b/parser/testdata/03168_inconsistent_ast_formatting/query.sql new file mode 100644 index 000000000..ace538a90 --- /dev/null +++ b/parser/testdata/03168_inconsistent_ast_formatting/query.sql @@ -0,0 +1,61 @@ +create table a (x `Null`); -- { clientError SYNTAX_ERROR } +create table a (x f(`Null`)); -- { clientError SYNTAX_ERROR } +create table a (x Enum8(f(`Null`, 'World', 2))); -- { clientError SYNTAX_ERROR } +create table a (`value2` Enum8('Hello' = 1, equals(`Null`, 'World', 2), '!' = 3)); -- { clientError SYNTAX_ERROR } + +create table a (x Int8) engine Memory; +create table b empty as a; + +SELECT '--'; +SELECT NOT (1); +SELECT formatQuery('SELECT NOT 1'); +SELECT formatQuery('SELECT NOT (1)'); + +SELECT '--'; +SELECT NOT (1, 1, 1); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT formatQuery('SELECT NOT (1, 1, 1)'); +SELECT formatQuery('SELECT not(1, 1, 1)'); + +SELECT '--'; +SELECT NOT ((1,)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT NOT tuple(1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT formatQuery('SELECT NOT ((1,))'); +SELECT formatQuery('SELECT NOT (tuple(1))'); +SELECT formatQuery('SELECT NOT tuple(1)'); + +SELECT '--'; +SELECT NOT ((1, 1, 1)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT formatQuery('SELECT NOT ((1, 1, 1))'); +SELECT formatQuery('SELECT not((1, 1, 1))'); +SELECT formatQuery('SELECT not tuple(1, 1, 1)'); +SELECT formatQuery('SELECT not (tuple(1, 1, 1))'); + +SELECT '--'; +SELECT NOT [1]; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT NOT [(1)]; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT formatQuery('SELECT NOT [1]'); +SELECT formatQuery('SELECT NOT array(1)'); +SELECT formatQuery('SELECT NOT (array(1))'); +SELECT formatQuery('SELECT NOT [(1)]'); +SELECT formatQuery('SELECT NOT ([1])'); + +SELECT '--'; +SELECT -[1]; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT -[(1)]; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT formatQuery('SELECT -[1]'); +SELECT formatQuery('SELECT -array(1)'); +SELECT formatQuery('SELECT -(array(1))'); +SELECT formatQuery('SELECT -[(1)]'); +SELECT formatQuery('SELECT -([1])'); + +SELECT '--'; +SELECT -(1, 1, 1); +SELECT formatQuery('SELECT -(1, 1, 1)'); +SELECT formatQuery('SELECT negate ((1, 1, 1))'); +SELECT formatQuery('SELECT -tuple(1, 1, 1)'); +SELECT formatQuery('SELECT -(tuple(1, 1, 1))'); + +SELECT '--'; +SELECT -tuple((1, 1, 1)); +SELECT formatQuery('SELECT -((1, 1, 1))'); +SELECT formatQuery('SELECT -tuple((1, 1, 1))'); diff --git a/parser/testdata/03168_loop_engine_with_parallel_replicas/ast.json b/parser/testdata/03168_loop_engine_with_parallel_replicas/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03168_loop_engine_with_parallel_replicas/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03168_loop_engine_with_parallel_replicas/metadata.json b/parser/testdata/03168_loop_engine_with_parallel_replicas/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03168_loop_engine_with_parallel_replicas/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03168_loop_engine_with_parallel_replicas/query.sql b/parser/testdata/03168_loop_engine_with_parallel_replicas/query.sql new file mode 100644 index 000000000..d86334a23 --- /dev/null +++ b/parser/testdata/03168_loop_engine_with_parallel_replicas/query.sql @@ -0,0 +1,12 @@ +-- Tags: no-parallel + +DROP DATABASE IF EXISTS 03147_db; +CREATE DATABASE IF NOT EXISTS 03147_db; +CREATE TABLE 03147_db.t (n Int8) ENGINE=MergeTree ORDER BY n; +INSERT INTO 03147_db.t SELECT * FROM numbers(10); +USE 03147_db; + +-- We use the old setting here just to make sure we preserve it as an alias. +SET allow_experimental_parallel_reading_from_replicas = 2, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'parallel_replicas', max_parallel_replicas = 100; + +SELECT * FROM loop(03147_db.t) LIMIT 15 FORMAT Null; diff --git a/parser/testdata/03168_read_in_order_buffering_1/ast.json b/parser/testdata/03168_read_in_order_buffering_1/ast.json new file mode 100644 index 000000000..8faabc3f4 --- /dev/null +++ b/parser/testdata/03168_read_in_order_buffering_1/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_read_in_order_1 (children 1)" + }, + { + "explain": " Identifier t_read_in_order_1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001262837, + "rows_read": 2, + "bytes_read": 86 + } +} diff --git a/parser/testdata/03168_read_in_order_buffering_1/metadata.json b/parser/testdata/03168_read_in_order_buffering_1/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03168_read_in_order_buffering_1/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03168_read_in_order_buffering_1/query.sql b/parser/testdata/03168_read_in_order_buffering_1/query.sql new file mode 100644 index 000000000..75025dcad --- /dev/null +++ b/parser/testdata/03168_read_in_order_buffering_1/query.sql @@ -0,0 +1,45 @@ +DROP TABLE IF EXISTS t_read_in_order_1; + +CREATE TABLE t_read_in_order_1 (id UInt64, v UInt64) +ENGINE = MergeTree ORDER BY id +SETTINGS index_granularity = 1024, index_granularity_bytes = '10M'; + +INSERT INTO t_read_in_order_1 SELECT number, number FROM numbers(1000000); + +SET max_threads = 8; +SET optimize_read_in_order = 1; +SET read_in_order_use_buffering = 1; + +SELECT count() FROM +( + EXPLAIN PIPELINE SELECT * FROM t_read_in_order_1 ORDER BY id +) WHERE explain LIKE '%BufferChunks%'; + +SELECT count() FROM +( + EXPLAIN PIPELINE SELECT * FROM t_read_in_order_1 ORDER BY id LIMIT 10 +) WHERE explain LIKE '%BufferChunks%'; + +SELECT count() FROM +( + EXPLAIN PIPELINE SELECT * FROM t_read_in_order_1 WHERE v % 10 = 0 ORDER BY id LIMIT 10 +) WHERE explain LIKE '%BufferChunks%'; + +SET read_in_order_use_buffering = 0; + +SELECT count() FROM +( + EXPLAIN PIPELINE SELECT * FROM t_read_in_order_1 ORDER BY id +) WHERE explain LIKE '%BufferChunks%'; + +SELECT count() FROM +( + EXPLAIN PIPELINE SELECT * FROM t_read_in_order_1 ORDER BY id LIMIT 10 +) WHERE explain LIKE '%BufferChunks%'; + +SELECT count() FROM +( + EXPLAIN PIPELINE SELECT * FROM t_read_in_order_1 WHERE v % 10 = 0 ORDER BY id LIMIT 10 +) WHERE explain LIKE '%BufferChunks%'; + +DROP TABLE t_read_in_order_1; diff --git a/parser/testdata/03168_read_in_order_buffering_2/ast.json b/parser/testdata/03168_read_in_order_buffering_2/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03168_read_in_order_buffering_2/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03168_read_in_order_buffering_2/metadata.json b/parser/testdata/03168_read_in_order_buffering_2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03168_read_in_order_buffering_2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03168_read_in_order_buffering_2/query.sql b/parser/testdata/03168_read_in_order_buffering_2/query.sql new file mode 100644 index 000000000..4df623955 --- /dev/null +++ b/parser/testdata/03168_read_in_order_buffering_2/query.sql @@ -0,0 +1,17 @@ +-- Tags: long, no-random-settings, no-tsan, no-asan, no-msan, no-object-storage + +DROP TABLE IF EXISTS t_read_in_order_2; + +CREATE TABLE t_read_in_order_2 (id UInt64, v UInt64) ENGINE = MergeTree ORDER BY id; + +INSERT INTO t_read_in_order_2 SELECT number, number FROM numbers(10000000); +OPTIMIZE TABLE t_read_in_order_2 FINAL; + +SET optimize_read_in_order = 1; +SET max_threads = 4; +SET read_in_order_use_buffering = 1; +SET max_memory_usage = '100M'; + +SELECT * FROM t_read_in_order_2 ORDER BY id FORMAT Null; + +DROP TABLE t_read_in_order_2; diff --git a/parser/testdata/03169_cache_complex_dict_short_circuit_bug/ast.json b/parser/testdata/03169_cache_complex_dict_short_circuit_bug/ast.json new file mode 100644 index 000000000..88b9c8a27 --- /dev/null +++ b/parser/testdata/03169_cache_complex_dict_short_circuit_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery complex_key_simple_attributes_source_short_circuit_table (children 1)" + }, + { + "explain": " Identifier complex_key_simple_attributes_source_short_circuit_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001220529, + "rows_read": 2, + "bytes_read": 164 + } +} diff --git a/parser/testdata/03169_cache_complex_dict_short_circuit_bug/metadata.json b/parser/testdata/03169_cache_complex_dict_short_circuit_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03169_cache_complex_dict_short_circuit_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03169_cache_complex_dict_short_circuit_bug/query.sql b/parser/testdata/03169_cache_complex_dict_short_circuit_bug/query.sql new file mode 100644 index 000000000..f91aaf390 --- /dev/null +++ b/parser/testdata/03169_cache_complex_dict_short_circuit_bug/query.sql @@ -0,0 +1,31 @@ +DROP TABLE IF EXISTS complex_key_simple_attributes_source_short_circuit_table; +DROP DICTIONARY IF EXISTS cache_dictionary_complex_key_simple_attributes_short_circuit; + +CREATE TABLE complex_key_simple_attributes_source_short_circuit_table +( + id UInt64, + id_key String, + value_first String, + value_second String +) + ENGINE = TinyLog; + +INSERT INTO complex_key_simple_attributes_source_short_circuit_table VALUES(0, 'id_key_0', 'value_0', 'value_second_0'); + +CREATE DICTIONARY cache_dictionary_complex_key_simple_attributes_short_circuit +( + `id` UInt64, + `id_key` String, + `value_first` String DEFAULT 'value_first_default', + `value_second` String DEFAULT 'value_second_default' +) +PRIMARY KEY id, id_key +SOURCE(CLICKHOUSE(TABLE 'complex_key_simple_attributes_source_short_circuit_table')) +LIFETIME(MIN 1 MAX 1000) +LAYOUT(COMPLEX_KEY_CACHE(SIZE_IN_CELLS 10)); + +SELECT dictGetOrDefault('cache_dictionary_complex_key_simple_attributes_short_circuit', 'value_first', (number, concat(toString(number))), toString(materialize('default'))) AS value_first FROM system.numbers LIMIT 20 FORMAT Null; +SELECT dictGetOrDefault('cache_dictionary_complex_key_simple_attributes_short_circuit', 'value_first', (number, concat(toString(number))), toString(materialize('default'))) AS value_first FROM system.numbers LIMIT 20 FORMAT Null; + +DROP DICTIONARY IF EXISTS cache_dictionary_complex_key_simple_attributes_short_circuit; +DROP TABLE IF EXISTS complex_key_simple_attributes_source_short_circuit_table; diff --git a/parser/testdata/03169_display_column_names_in_footer/ast.json b/parser/testdata/03169_display_column_names_in_footer/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03169_display_column_names_in_footer/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03169_display_column_names_in_footer/metadata.json b/parser/testdata/03169_display_column_names_in_footer/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03169_display_column_names_in_footer/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03169_display_column_names_in_footer/query.sql b/parser/testdata/03169_display_column_names_in_footer/query.sql new file mode 100644 index 000000000..9e4ec09c2 --- /dev/null +++ b/parser/testdata/03169_display_column_names_in_footer/query.sql @@ -0,0 +1,19 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/65035 +SELECT *, toTypeName(*) FROM (SELECT * FROM system.numbers LIMIT 49) FORMAT Pretty; +SELECT *, toTypeName(*) FROM (SELECT * FROM system.numbers LIMIT 10) FORMAT Pretty SETTINGS output_format_pretty_display_footer_column_names_min_rows=9; +SELECT *, toTypeName(*) FROM (SELECT * FROM system.numbers LIMIT 100) FORMAT Pretty SETTINGS output_format_pretty_display_footer_column_names=0; +SELECT *, toTypeName(*) FROM (SELECT * FROM system.numbers LIMIT 100) FORMAT Pretty; +SELECT *, toTypeName(*) FROM (SELECT * FROM system.numbers LIMIT 100) FORMAT PrettyNoEscapes; +SELECT *, toTypeName(*) FROM (SELECT * FROM system.numbers LIMIT 100) FORMAT PrettyMonoBlock; +SELECT *, toTypeName(*) FROM (SELECT * FROM system.numbers LIMIT 100) FORMAT PrettyNoEscapesMonoBlock; +SELECT *, toTypeName(*) FROM (SELECT * FROM system.numbers LIMIT 100) FORMAT PrettyNoEscapesMonoBlock; +SELECT *, toTypeName(*) FROM (SELECT * FROM system.numbers LIMIT 100) FORMAT PrettyCompact SETTINGS output_format_pretty_display_footer_column_names=0; +SELECT *, toTypeName(*) FROM (SELECT * FROM system.numbers LIMIT 100) FORMAT PrettyCompact; +SELECT *, toTypeName(*) FROM (SELECT * FROM system.numbers LIMIT 100) FORMAT PrettyCompactNoEscapes; +SELECT *, toTypeName(*) FROM (SELECT * FROM system.numbers LIMIT 100) FORMAT PrettyCompactMonoBlock; +SELECT *, toTypeName(*) FROM (SELECT * FROM system.numbers LIMIT 100) FORMAT PrettyCompactNoEscapesMonoBlock; +SELECT *, toTypeName(*) FROM (SELECT * FROM system.numbers LIMIT 100) FORMAT PrettySpace SETTINGS output_format_pretty_display_footer_column_names=0; +SELECT *, toTypeName(*) FROM (SELECT * FROM system.numbers LIMIT 100) FORMAT PrettySpace; +SELECT *, toTypeName(*) FROM (SELECT * FROM system.numbers LIMIT 100) FORMAT PrettySpaceNoEscapes; +SELECT *, toTypeName(*) FROM (SELECT * FROM system.numbers LIMIT 100) FORMAT PrettySpaceMonoBlock; +SELECT *, toTypeName(*) FROM (SELECT * FROM system.numbers LIMIT 100) FORMAT PrettySpaceNoEscapesMonoBlock; diff --git a/parser/testdata/03169_modify_column_data_loss/ast.json b/parser/testdata/03169_modify_column_data_loss/ast.json new file mode 100644 index 000000000..0de85cbdb --- /dev/null +++ b/parser/testdata/03169_modify_column_data_loss/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery column_modify_test (children 1)" + }, + { + "explain": " Identifier column_modify_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001046434, + "rows_read": 2, + "bytes_read": 88 + } +} diff --git a/parser/testdata/03169_modify_column_data_loss/metadata.json b/parser/testdata/03169_modify_column_data_loss/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03169_modify_column_data_loss/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03169_modify_column_data_loss/query.sql b/parser/testdata/03169_modify_column_data_loss/query.sql new file mode 100644 index 000000000..def0a25a1 --- /dev/null +++ b/parser/testdata/03169_modify_column_data_loss/query.sql @@ -0,0 +1,19 @@ +DROP TABLE IF EXISTS column_modify_test; + +CREATE TABLE column_modify_test (id UInt64, val String, other_col UInt64) engine=MergeTree ORDER BY id SETTINGS min_bytes_for_wide_part=0; +INSERT INTO column_modify_test VALUES (1,'one',0); +INSERT INTO column_modify_test VALUES (2,'two',0); + +-- on 21.9 that was done via mutations mechanism +ALTER TABLE column_modify_test MODIFY COLUMN val Nullable(String); + +INSERT INTO column_modify_test VALUES (3,Null,0); + +-- till now everythings looks ok +SELECT * FROM column_modify_test order by id, val, other_col; + +-- Now we do mutation. It will affect one of the parts, and will update columns.txt to the latest / correct state w/o updating the column file! +alter table column_modify_test update other_col=1 where id = 1 SETTINGS mutations_sync=1; + +-- row 1 is damaged now the column file & columns.txt is out of sync! +SELECT *, throwIf(val <> 'one') as issue FROM column_modify_test WHERE id = 1; diff --git a/parser/testdata/03169_optimize_injective_functions_inside_uniq_crash/ast.json b/parser/testdata/03169_optimize_injective_functions_inside_uniq_crash/ast.json new file mode 100644 index 000000000..e2600e3c7 --- /dev/null +++ b/parser/testdata/03169_optimize_injective_functions_inside_uniq_crash/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function sum (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier u" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001135363, + "rows_read": 7, + "bytes_read": 252 + } +} diff --git a/parser/testdata/03169_optimize_injective_functions_inside_uniq_crash/metadata.json b/parser/testdata/03169_optimize_injective_functions_inside_uniq_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03169_optimize_injective_functions_inside_uniq_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03169_optimize_injective_functions_inside_uniq_crash/query.sql b/parser/testdata/03169_optimize_injective_functions_inside_uniq_crash/query.sql new file mode 100644 index 000000000..5ab32415f --- /dev/null +++ b/parser/testdata/03169_optimize_injective_functions_inside_uniq_crash/query.sql @@ -0,0 +1,21 @@ +SELECT sum(u) +FROM +( + SELECT + intDiv(number, 4096) AS k, + uniqCombined(tuple(materialize(toLowCardinality(toNullable(16))))) AS u + FROM numbers(4096 * 100) + GROUP BY k +) +SETTINGS enable_analyzer = 1, optimize_injective_functions_inside_uniq=0; + +SELECT sum(u) +FROM +( + SELECT + intDiv(number, 4096) AS k, + uniqCombined(tuple(materialize(toLowCardinality(toNullable(16))))) AS u + FROM numbers(4096 * 100) + GROUP BY k +) +SETTINGS enable_analyzer = 1, optimize_injective_functions_inside_uniq=1; diff --git a/parser/testdata/03170_part_offset_as_table_column/ast.json b/parser/testdata/03170_part_offset_as_table_column/ast.json new file mode 100644 index 000000000..6148a7ace --- /dev/null +++ b/parser/testdata/03170_part_offset_as_table_column/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery test_table (children 1)" + }, + { + "explain": " Identifier test_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001116639, + "rows_read": 2, + "bytes_read": 73 + } +} diff --git a/parser/testdata/03170_part_offset_as_table_column/metadata.json b/parser/testdata/03170_part_offset_as_table_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03170_part_offset_as_table_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03170_part_offset_as_table_column/query.sql b/parser/testdata/03170_part_offset_as_table_column/query.sql new file mode 100644 index 000000000..7711457f2 --- /dev/null +++ b/parser/testdata/03170_part_offset_as_table_column/query.sql @@ -0,0 +1,25 @@ +CREATE TABLE test_table +( + `key` UInt32, + `_part_offset` DEFAULT 0 +) +ENGINE = MergeTree +ORDER BY key; + +INSERT INTO test_table (key) SELECT number +FROM numbers(10); + +set enable_analyzer=0; + +SELECT * +FROM test_table; + +set enable_analyzer=1; + +SELECT * +FROM test_table; + +SELECT + key, + _part_offset +FROM test_table; diff --git a/parser/testdata/03171_condition_pushdown/ast.json b/parser/testdata/03171_condition_pushdown/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03171_condition_pushdown/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03171_condition_pushdown/metadata.json b/parser/testdata/03171_condition_pushdown/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03171_condition_pushdown/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03171_condition_pushdown/query.sql b/parser/testdata/03171_condition_pushdown/query.sql new file mode 100644 index 000000000..fcf5db886 --- /dev/null +++ b/parser/testdata/03171_condition_pushdown/query.sql @@ -0,0 +1,6 @@ +-- This query succeeds only if it is correctly optimized. +SET enable_analyzer = 1; +SELECT * FROM (SELECT * FROM numbers(1e19)) AS t1, (SELECT * FROM numbers(1e19)) AS t2 WHERE t1.number IN (123, 456) AND t2.number = t1.number ORDER BY ALL; + +-- Still TODO: +-- SELECT * FROM (SELECT * FROM numbers(1e19)) AS t1, (SELECT * FROM numbers(1e19)) AS t2 WHERE t1.number IN (SELECT 123 UNION ALL SELECT 456) AND t2.number = t1.number ORDER BY ALL; diff --git a/parser/testdata/03171_direct_dict_short_circuit_bug/ast.json b/parser/testdata/03171_direct_dict_short_circuit_bug/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03171_direct_dict_short_circuit_bug/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03171_direct_dict_short_circuit_bug/metadata.json b/parser/testdata/03171_direct_dict_short_circuit_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03171_direct_dict_short_circuit_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03171_direct_dict_short_circuit_bug/query.sql b/parser/testdata/03171_direct_dict_short_circuit_bug/query.sql new file mode 100644 index 000000000..5d0cf80ae --- /dev/null +++ b/parser/testdata/03171_direct_dict_short_circuit_bug/query.sql @@ -0,0 +1,37 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/65201 +SET short_circuit_function_evaluation='enable'; + +DROP DICTIONARY IF EXISTS direct_dictionary_simple_key_simple_attributes; +DROP TABLE IF EXISTS simple_key_simple_attributes_source_table; + +CREATE TABLE simple_key_simple_attributes_source_table +( + id UInt64, + value_first String, + value_second String +) + ENGINE = TinyLog; + +INSERT INTO simple_key_simple_attributes_source_table VALUES(0, 'value_0', 'value_second_0'); +INSERT INTO simple_key_simple_attributes_source_table VALUES(1, 'value_1', 'value_second_1'); +INSERT INTO simple_key_simple_attributes_source_table VALUES(2, 'value_2', 'value_second_2'); + + +CREATE DICTIONARY direct_dictionary_simple_key_simple_attributes +( + `id` UInt64, + `value_first` String DEFAULT 'value_first_default', + `value_second` String DEFAULT 'value_second_default' +) + PRIMARY KEY id + SOURCE(CLICKHOUSE(TABLE 'simple_key_simple_attributes_source_table')) + LAYOUT(DIRECT()); + +SELECT + toUInt128(1), + dictGetOrDefault('direct_dictionary_simple_key_simple_attributes', 'value_second', number, toString(toFixedString(toFixedString(toFixedString(materialize('default'), 7), 7), toUInt128(7)))) AS value_second +FROM system.numbers LIMIT 255 +FORMAT Null; + +DROP DICTIONARY IF EXISTS direct_dictionary_simple_key_simple_attributes; +DROP TABLE IF EXISTS simple_key_simple_attributes_source_table; diff --git a/parser/testdata/03171_function_to_subcolumns_fuzzer/ast.json b/parser/testdata/03171_function_to_subcolumns_fuzzer/ast.json new file mode 100644 index 000000000..b6c3c6e38 --- /dev/null +++ b/parser/testdata/03171_function_to_subcolumns_fuzzer/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001466093, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03171_function_to_subcolumns_fuzzer/metadata.json b/parser/testdata/03171_function_to_subcolumns_fuzzer/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03171_function_to_subcolumns_fuzzer/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03171_function_to_subcolumns_fuzzer/query.sql b/parser/testdata/03171_function_to_subcolumns_fuzzer/query.sql new file mode 100644 index 000000000..0f5d00c29 --- /dev/null +++ b/parser/testdata/03171_function_to_subcolumns_fuzzer/query.sql @@ -0,0 +1,50 @@ +SET optimize_functions_to_subcolumns = 1; +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS t_func_to_subcolumns_map_2; + +CREATE TABLE t_func_to_subcolumns_map_2 (id UInt64, m Map(String, UInt64)) ENGINE = MergeTree ORDER BY id; + +INSERT INTO t_func_to_subcolumns_map_2 VALUES (1, map('aaa', 1, 'bbb', 2)) (2, map('ccc', 3)); + +SELECT sum(mapContains(m, toNullable('aaa'))) FROM t_func_to_subcolumns_map_2; + +DROP TABLE t_func_to_subcolumns_map_2; + +DROP TABLE IF EXISTS t_func_to_subcolumns_join; + +CREATE TABLE t_func_to_subcolumns_join (id UInt64, arr Array(UInt64), n Nullable(String), m Map(String, UInt64)) +ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO t_func_to_subcolumns_join VALUES (1, [1, 2, 3], 'abc', map('foo', 1, 'bar', 2)) (2, [], NULL, map()); + +SET join_use_nulls = 1; + +SELECT + id, + right.n IS NULL +FROM t_func_to_subcolumns_join AS left +FULL OUTER JOIN +( + SELECT + 1 AS id, + 'qqq' AS n + UNION ALL + SELECT + 3 AS id, + 'www' +) AS right USING (id) +WHERE empty(arr) +ORDER BY id; + +DROP TABLE t_func_to_subcolumns_join; + +DROP TABLE IF EXISTS t_func_to_subcolumns_use_nulls; + +CREATE TABLE t_func_to_subcolumns_use_nulls (arr Array(UInt64), v UInt64) ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO t_func_to_subcolumns_use_nulls SELECT range(number % 10), number FROM numbers(100); + +SELECT length(arr) AS n, sum(v) FROM t_func_to_subcolumns_use_nulls GROUP BY n WITH ROLLUP HAVING n <= 4 OR isNull(n) ORDER BY n SETTINGS group_by_use_nulls = 1; + +DROP TABLE t_func_to_subcolumns_use_nulls; diff --git a/parser/testdata/03171_hashed_dictionary_short_circuit_bug_fix/ast.json b/parser/testdata/03171_hashed_dictionary_short_circuit_bug_fix/ast.json new file mode 100644 index 000000000..a759f232b --- /dev/null +++ b/parser/testdata/03171_hashed_dictionary_short_circuit_bug_fix/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery x (children 3)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration hash_id (children 1)" + }, + { + "explain": " DataType UInt64" + }, + { + "explain": " ColumnDeclaration user_result (children 1)" + }, + { + "explain": " DataType Decimal (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function Memory (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001050417, + "rows_read": 14, + "bytes_read": 505 + } +} diff --git a/parser/testdata/03171_hashed_dictionary_short_circuit_bug_fix/metadata.json b/parser/testdata/03171_hashed_dictionary_short_circuit_bug_fix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03171_hashed_dictionary_short_circuit_bug_fix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03171_hashed_dictionary_short_circuit_bug_fix/query.sql b/parser/testdata/03171_hashed_dictionary_short_circuit_bug_fix/query.sql new file mode 100644 index 000000000..6d3a63dba --- /dev/null +++ b/parser/testdata/03171_hashed_dictionary_short_circuit_bug_fix/query.sql @@ -0,0 +1,28 @@ +CREATE TABLE x ( hash_id UInt64, user_result Decimal(3, 2) ) ENGINE = Memory(); + +CREATE TABLE y ( hash_id UInt64, user_result DECIMAL(18, 6) ) ENGINE = Memory(); + +INSERT INTO x values (100, 1), (200, 2); +INSERT INTO y values (100, 1), (300, 3), (200, 2); + +CREATE DICTIONARY d1 (hash_id UInt64, user_result Decimal(3, 2) ) +PRIMARY KEY hash_id +SOURCE(CLICKHOUSE(TABLE 'x')) +LIFETIME(0) +LAYOUT(HASHED()); + +SELECT hash_id, + dictGetOrDefault(d1, 'user_result', toUInt64(hash_id), toFloat64(user_result)), + dictGet(d1, 'user_result', toUInt64(hash_id)) +FROM y; + +CREATE DICTIONARY d2 (hash_id UInt64, user_result Decimal(3, 2) ) +PRIMARY KEY hash_id +SOURCE(CLICKHOUSE(TABLE 'x')) +LIFETIME(0) +LAYOUT(HASHED_ARRAY()); + +SELECT hash_id, + dictGetOrDefault(d2, 'user_result', toUInt64(hash_id), toFloat64(user_result)), + dictGet(d2, 'user_result', toUInt64(hash_id)) +FROM y; diff --git a/parser/testdata/03171_indexing_by_hilbert_curve/ast.json b/parser/testdata/03171_indexing_by_hilbert_curve/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03171_indexing_by_hilbert_curve/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03171_indexing_by_hilbert_curve/metadata.json b/parser/testdata/03171_indexing_by_hilbert_curve/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03171_indexing_by_hilbert_curve/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03171_indexing_by_hilbert_curve/query.sql b/parser/testdata/03171_indexing_by_hilbert_curve/query.sql new file mode 100644 index 000000000..62464b6e8 --- /dev/null +++ b/parser/testdata/03171_indexing_by_hilbert_curve/query.sql @@ -0,0 +1,40 @@ +-- Prevent remote replicas from skipping index analysis in Parallel Replicas. Otherwise, they may return full ranges and trigger max_rows_to_read validation failures. +SET parallel_replicas_index_analysis_only_on_coordinator = 0; + +DROP TABLE IF EXISTS test_hilbert_encode_hilbert_encode; + +CREATE TABLE test_hilbert_encode (x UInt32, y UInt32) ENGINE = MergeTree ORDER BY hilbertEncode(x, y) SETTINGS index_granularity = 8192, index_granularity_bytes = '1Mi'; +INSERT INTO test_hilbert_encode SELECT number DIV 1024, number % 1024 FROM numbers(1048576); + +set max_streams_for_merge_tree_reading = 1; + +SET max_rows_to_read = 8192, force_primary_key = 1, analyze_index_with_space_filling_curves = 1; +SELECT count() FROM test_hilbert_encode WHERE x >= 10 AND x <= 20 AND y >= 20 AND y <= 30; + +SET max_rows_to_read = 8192, force_primary_key = 1, analyze_index_with_space_filling_curves = 0; +SELECT count() FROM test_hilbert_encode WHERE x >= 10 AND x <= 20 AND y >= 20 AND y <= 30; -- { serverError 277 } + +DROP TABLE test_hilbert_encode; + +-- The same, but with more precise index + +CREATE TABLE test_hilbert_encode (x UInt32, y UInt32) ENGINE = MergeTree ORDER BY hilbertEncode(x, y) SETTINGS index_granularity = 1; +SET max_rows_to_read = 0; +INSERT INTO test_hilbert_encode SELECT number DIV 32, number % 32 FROM numbers(1024); + +SET max_rows_to_read = 200, force_primary_key = 1, analyze_index_with_space_filling_curves = 1; +SELECT count() FROM test_hilbert_encode WHERE x >= 10 AND x <= 20 AND y >= 20 AND y <= 30; + +-- Various other conditions + +SELECT count() FROM test_hilbert_encode WHERE x = 10 SETTINGS max_rows_to_read = 49; +SELECT count() FROM test_hilbert_encode WHERE x = 10 AND y > 10 SETTINGS max_rows_to_read = 33; +SELECT count() FROM test_hilbert_encode WHERE x = 10 AND y < 10 SETTINGS max_rows_to_read = 15; + +SELECT count() FROM test_hilbert_encode WHERE y = 10 SETTINGS max_rows_to_read = 50; +SELECT count() FROM test_hilbert_encode WHERE x >= 10 AND y = 10 SETTINGS max_rows_to_read = 35; +SELECT count() FROM test_hilbert_encode WHERE y = 10 AND x <= 10 SETTINGS max_rows_to_read = 17; + +SELECT count() FROM test_hilbert_encode PREWHERE x >= 10 WHERE x < 11 AND y = 10 SETTINGS max_rows_to_read = 2; + +DROP TABLE test_hilbert_encode; diff --git a/parser/testdata/03172_bcrypt_validation/ast.json b/parser/testdata/03172_bcrypt_validation/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03172_bcrypt_validation/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03172_bcrypt_validation/metadata.json b/parser/testdata/03172_bcrypt_validation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03172_bcrypt_validation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03172_bcrypt_validation/query.sql b/parser/testdata/03172_bcrypt_validation/query.sql new file mode 100644 index 000000000..37dd0c9bb --- /dev/null +++ b/parser/testdata/03172_bcrypt_validation/query.sql @@ -0,0 +1,3 @@ +-- Tags: no-fasttest +DROP USER IF EXISTS 03172_user_invalid_bcrypt_hash; +CREATE USER 03172_user_invalid_bcrypt_hash IDENTIFIED WITH bcrypt_hash BY '012345678901234567890123456789012345678901234567890123456789'; -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/03172_format_settings_clauses/ast.json b/parser/testdata/03172_format_settings_clauses/ast.json new file mode 100644 index 000000000..6c5feae94 --- /dev/null +++ b/parser/testdata/03172_format_settings_clauses/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001499919, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03172_format_settings_clauses/metadata.json b/parser/testdata/03172_format_settings_clauses/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03172_format_settings_clauses/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03172_format_settings_clauses/query.sql b/parser/testdata/03172_format_settings_clauses/query.sql new file mode 100644 index 000000000..450afc5fe --- /dev/null +++ b/parser/testdata/03172_format_settings_clauses/query.sql @@ -0,0 +1,30 @@ +SET max_block_size = 10, max_threads = 1; +-- { echoOn } +-- Take the following example: +SELECT 1 UNION ALL SELECT 2 FORMAT TSV; + +-- Each subquery can be put in parentheses and have its own settings: +(SELECT getSetting('max_block_size') SETTINGS max_block_size = 1) UNION ALL (SELECT getSetting('max_block_size') SETTINGS max_block_size = 2) FORMAT TSV; + +-- And the whole query can have settings: +(SELECT getSetting('max_block_size') SETTINGS max_block_size = 1) UNION ALL (SELECT getSetting('max_block_size') SETTINGS max_block_size = 2) FORMAT TSV SETTINGS max_block_size = 3; + +-- A single query with output is parsed in the same way as the UNION ALL chain: +SELECT getSetting('max_block_size') SETTINGS max_block_size = 1 FORMAT TSV SETTINGS max_block_size = 3; + +-- So while these forms have a slightly different meaning, they both exist: +SELECT getSetting('max_block_size') SETTINGS max_block_size = 1 FORMAT TSV; +SELECT getSetting('max_block_size') FORMAT TSV SETTINGS max_block_size = 3; + +-- And due to this effect, the users expect that the FORMAT and SETTINGS may go in an arbitrary order. +-- But while this work: +(SELECT getSetting('max_block_size')) UNION ALL (SELECT getSetting('max_block_size')) FORMAT TSV SETTINGS max_block_size = 3; + +-- This does not work automatically, unless we explicitly allow different orders: +(SELECT getSetting('max_block_size')) UNION ALL (SELECT getSetting('max_block_size')) SETTINGS max_block_size = 3 FORMAT TSV; + +-- Inevitably, we allow this: +SELECT getSetting('max_block_size') SETTINGS max_block_size = 1 SETTINGS max_block_size = 3 FORMAT TSV; +/*^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^*/ +-- Because this part is consumed into ASTSelectWithUnionQuery +-- and the rest into ASTQueryWithOutput. diff --git a/parser/testdata/03172_system_detached_tables_no_loop/ast.json b/parser/testdata/03172_system_detached_tables_no_loop/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03172_system_detached_tables_no_loop/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03172_system_detached_tables_no_loop/metadata.json b/parser/testdata/03172_system_detached_tables_no_loop/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03172_system_detached_tables_no_loop/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03172_system_detached_tables_no_loop/query.sql b/parser/testdata/03172_system_detached_tables_no_loop/query.sql new file mode 100644 index 000000000..7bc39543d --- /dev/null +++ b/parser/testdata/03172_system_detached_tables_no_loop/query.sql @@ -0,0 +1,47 @@ +-- Tags: no-parallel + +SELECT '-----------------------'; +SELECT 'detached table no loop'; + +DROP DATABASE IF EXISTS test_no_loop; +CREATE DATABASE IF NOT EXISTS test_no_loop; + +SET max_block_size = 8; +CREATE TABLE test_no_loop.t0 (c0 Int) ENGINE = MergeTree ORDER BY c0; +CREATE TABLE test_no_loop.t1 (c0 Int) ENGINE = MergeTree ORDER BY c0; +CREATE TABLE test_no_loop.t2 (c0 Int) ENGINE = MergeTree ORDER BY c0; +CREATE TABLE test_no_loop.t3 (c0 Int) ENGINE = MergeTree ORDER BY c0; +CREATE TABLE test_no_loop.t4 (c0 Int) ENGINE = MergeTree ORDER BY c0; +CREATE TABLE test_no_loop.t5 (c0 Int) ENGINE = MergeTree ORDER BY c0; +CREATE TABLE test_no_loop.t6 (c0 Int) ENGINE = MergeTree ORDER BY c0; +CREATE TABLE test_no_loop.t7 (c0 Int) ENGINE = MergeTree ORDER BY c0; +CREATE TABLE test_no_loop.t8 (c0 Int) ENGINE = MergeTree ORDER BY c0; +DETACH TABLE test_no_loop.t0; +DETACH TABLE test_no_loop.t1; +DETACH TABLE test_no_loop.t2; +DETACH TABLE test_no_loop.t3; +DETACH TABLE test_no_loop.t4; +DETACH TABLE test_no_loop.t5; +DETACH TABLE test_no_loop.t6; +DETACH TABLE test_no_loop.t7; +DETACH TABLE test_no_loop.t8; +SELECT count(*) FROM system.detached_tables WHERE database='test_no_loop'; + +DROP DATABASE test_no_loop; + +SELECT '-----------------------'; +SELECT 'max_block_size is equal to the amount of tables'; + +DROP DATABASE IF EXISTS test_no_loop_2; +CREATE DATABASE test_no_loop_2; + +SET max_block_size = 3; +CREATE TABLE test_no_loop_2.t0 (c0 Int) ENGINE = MergeTree ORDER BY c0; +CREATE TABLE test_no_loop_2.t1 (c0 Int) ENGINE = MergeTree ORDER BY c0; +CREATE TABLE test_no_loop_2.t2 (c0 Int) ENGINE = MergeTree ORDER BY c0; +DETACH TABLE test_no_loop_2.t0; +DETACH TABLE test_no_loop_2.t1; +DETACH TABLE test_no_loop_2.t2; +SELECT count(*) FROM system.detached_tables WHERE database='test_no_loop_2'; + +DROP DATABASE test_no_loop_2; diff --git a/parser/testdata/03173_check_cyclic_dependencies_on_create_and_rename/ast.json b/parser/testdata/03173_check_cyclic_dependencies_on_create_and_rename/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03173_check_cyclic_dependencies_on_create_and_rename/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03173_check_cyclic_dependencies_on_create_and_rename/metadata.json b/parser/testdata/03173_check_cyclic_dependencies_on_create_and_rename/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03173_check_cyclic_dependencies_on_create_and_rename/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03173_check_cyclic_dependencies_on_create_and_rename/query.sql b/parser/testdata/03173_check_cyclic_dependencies_on_create_and_rename/query.sql new file mode 100644 index 000000000..0cadd4f5c --- /dev/null +++ b/parser/testdata/03173_check_cyclic_dependencies_on_create_and_rename/query.sql @@ -0,0 +1,77 @@ +-- Tags: atomic-database + +DROP TABLE IF EXISTS test; +CREATE TABLE test (id UInt64, value String) ENGINE=MergeTree ORDER BY id; +INSERT INTO test SELECT number, 'str_' || toString(number) FROM numbers(10); +DROP DICTIONARY IF EXISTS test_dict; +CREATE DICTIONARY test_dict +( + id UInt64, + value String +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE test)) +LAYOUT(FLAT()) +LIFETIME(MIN 0 MAX 1000); +DROP TABLE IF EXISTS view_source; +CREATE TABLE view_source (id UInt64) ENGINE=MergeTree ORDER BY id; +INSERT INTO view_source SELECT * FROM numbers(5); +DROP VIEW IF EXISTS view; +CREATE VIEW view AS SELECT id, dictGet('test_dict', 'value', id) as value FROM view_source; + +CREATE OR REPLACE DICTIONARY test_dict +( + id UInt64, + value String +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE view)) +LAYOUT(FLAT()) +LIFETIME(MIN 0 MAX 1000); -- {serverError INFINITE_LOOP} + +REPLACE DICTIONARY test_dict +( + id UInt64, + value String +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE view)) +LAYOUT(FLAT()) +LIFETIME(MIN 0 MAX 1000); -- {serverError INFINITE_LOOP} + + +DROP DICTIONARY IF EXISTS test_dict_2; +CREATE DICTIONARY test_dict_2 +( + id UInt64, + value String +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE view)) +LAYOUT(FLAT()) +LIFETIME(MIN 0 MAX 1000); + +EXCHANGE DICTIONARIES test_dict AND test_dict_2; -- {serverError INFINITE_LOOP} + +DROP DICTIONARY test_dict_2; + +CREATE OR REPLACE DICTIONARY test_dict_2 +( + id UInt64, + value String +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE view)) +LAYOUT(FLAT()) +LIFETIME(MIN 0 MAX 1000); + +EXCHANGE DICTIONARIES test_dict AND test_dict_2; -- {serverError INFINITE_LOOP} + +DROP DICTIONARY test_dict; +RENAME DICTIONARY test_dict_2 to test_dict; -- {serverError INFINITE_LOOP} + +DROP DICTIONARY test_dict_2; +DROP VIEW view; +DROP TABLE test; +DROP TABLE view_source; + diff --git a/parser/testdata/03173_distinct_combinator_alignment/ast.json b/parser/testdata/03173_distinct_combinator_alignment/ast.json new file mode 100644 index 000000000..e83dd54f4 --- /dev/null +++ b/parser/testdata/03173_distinct_combinator_alignment/ast.json @@ -0,0 +1,169 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function topKDistinctState (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toNullable (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_100" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function map (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toNullable (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_4" + }, + { + "explain": " Literal Tuple_(UInt64_3, UInt64_4)" + }, + { + "explain": " Literal UInt64_5" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Function map (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Literal Tuple_(UInt64_1, UInt64_2)" + }, + { + "explain": " Literal UInt64_4" + }, + { + "explain": " Literal Tuple_(UInt64_3, UInt64_4)" + }, + { + "explain": " Function toNullable (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_5" + }, + { + "explain": " Identifier Null" + } + ], + + "rows": 49, + + "statistics": + { + "elapsed": 0.001548517, + "rows_read": 49, + "bytes_read": 2032 + } +} diff --git a/parser/testdata/03173_distinct_combinator_alignment/metadata.json b/parser/testdata/03173_distinct_combinator_alignment/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03173_distinct_combinator_alignment/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03173_distinct_combinator_alignment/query.sql b/parser/testdata/03173_distinct_combinator_alignment/query.sql new file mode 100644 index 000000000..4a066be50 --- /dev/null +++ b/parser/testdata/03173_distinct_combinator_alignment/query.sql @@ -0,0 +1 @@ +SELECT toTypeName(topKDistinctState(toNullable(10))(toString(number)) IGNORE NULLS) FROM numbers(100) GROUP BY tuple((map((materialize(toNullable(1)), 2), 4, (3, 4), 5), 3)), map((1, 2), 4, (3, 4), toNullable(5)) WITH CUBE WITH TOTALS FORMAT Null diff --git a/parser/testdata/03173_forbid_qualify/ast.json b/parser/testdata/03173_forbid_qualify/ast.json new file mode 100644 index 000000000..b440bfe1e --- /dev/null +++ b/parser/testdata/03173_forbid_qualify/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_qualify (children 1)" + }, + { + "explain": " Identifier test_qualify" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001179738, + "rows_read": 2, + "bytes_read": 76 + } +} diff --git a/parser/testdata/03173_forbid_qualify/metadata.json b/parser/testdata/03173_forbid_qualify/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03173_forbid_qualify/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03173_forbid_qualify/query.sql b/parser/testdata/03173_forbid_qualify/query.sql new file mode 100644 index 000000000..f7b05a1eb --- /dev/null +++ b/parser/testdata/03173_forbid_qualify/query.sql @@ -0,0 +1,11 @@ +drop table if exists test_qualify; +create table test_qualify (number Int64) ENGINE = MergeTree ORDER BY (number); + +insert into test_qualify SELECT * FROM numbers(100); + +select count() from test_qualify; -- 100 +select * from test_qualify qualify row_number() over (order by number) = 50 SETTINGS enable_analyzer = 1; -- 49 +select * from test_qualify qualify row_number() over (order by number) = 50 SETTINGS enable_analyzer = 0; -- { serverError NOT_IMPLEMENTED } + +delete from test_qualify where number in (select number from test_qualify qualify row_number() over (order by number) = 50) SETTINGS validate_mutation_query = 0; -- { serverError UNFINISHED } +select count() from test_qualify; -- 100 diff --git a/parser/testdata/03173_set_transformed_partition_pruning/ast.json b/parser/testdata/03173_set_transformed_partition_pruning/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03173_set_transformed_partition_pruning/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03173_set_transformed_partition_pruning/metadata.json b/parser/testdata/03173_set_transformed_partition_pruning/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03173_set_transformed_partition_pruning/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03173_set_transformed_partition_pruning/query.sql b/parser/testdata/03173_set_transformed_partition_pruning/query.sql new file mode 100644 index 000000000..4d7b12bf5 --- /dev/null +++ b/parser/testdata/03173_set_transformed_partition_pruning/query.sql @@ -0,0 +1,261 @@ +-- Tags: no-msan, long +-- msan: too slow + +SELECT '-- Single partition by function'; + +DROP TABLE IF EXISTS 03173_single_function; +CREATE TABLE 03173_single_function ( + dt Date, +) +ENGINE = MergeTree +ORDER BY tuple() +PARTITION BY toMonth(dt); + +INSERT INTO 03173_single_function +SELECT toDate('2000-01-01') + 10 * number FROM numbers(50) +UNION ALL +SELECT toDate('2100-01-01') + 10 * number FROM numbers(50); +OPTIMIZE TABLE 03173_single_function FINAL; + +SELECT count() FROM 03173_single_function WHERE dt IN ('2024-01-20', '2024-05-25') SETTINGS log_comment='03173_single_function'; +SYSTEM FLUSH LOGS query_log; +SELECT ProfileEvents['SelectedParts'] FROM system.query_log WHERE type = 'QueryFinish' AND current_database = currentDatabase() AND log_comment = '03173_single_function'; + +DROP TABLE IF EXISTS 03173_single_function; + +SELECT '-- Nested partition by function'; + +DROP TABLE IF EXISTS 03173_nested_function; +CREATE TABLE 03173_nested_function( + id Int32, +) +ENGINE = MergeTree +ORDER BY tuple() +PARTITION BY xxHash32(id) % 3; + +INSERT INTO 03173_nested_function SELECT number FROM numbers(100); +OPTIMIZE TABLE 03173_nested_function FINAL; + +SELECT count() FROM 03173_nested_function WHERE id IN (10) SETTINGS log_comment='03173_nested_function'; +SELECT count() FROM 03173_nested_function WHERE xxHash32(id) IN (2158931063, 1449383981) SETTINGS log_comment='03173_nested_function_subexpr'; +SYSTEM FLUSH LOGS query_log; +SELECT ProfileEvents['SelectedParts'] FROM system.query_log WHERE type = 'QueryFinish' AND current_database = currentDatabase() AND log_comment = '03173_nested_function'; +SELECT ProfileEvents['SelectedParts'] FROM system.query_log WHERE type = 'QueryFinish' AND current_database = currentDatabase() AND log_comment = '03173_nested_function_subexpr'; + +DROP TABLE IF EXISTS 03173_nested_function; + +SELECT '-- Nested partition by function, LowCardinality'; + +SET allow_suspicious_low_cardinality_types = 1; + +DROP TABLE IF EXISTS 03173_nested_function_lc; +CREATE TABLE 03173_nested_function_lc( + id LowCardinality(Int32), +) +ENGINE = MergeTree +ORDER BY tuple() +PARTITION BY xxHash32(id) % 3; + +INSERT INTO 03173_nested_function_lc SELECT number FROM numbers(100); +OPTIMIZE TABLE 03173_nested_function_lc FINAL; + +SELECT count() FROM 03173_nested_function_lc WHERE id IN (10) SETTINGS log_comment='03173_nested_function_lc'; +SELECT count() FROM 03173_nested_function_lc WHERE xxHash32(id) IN (2158931063, 1449383981) SETTINGS log_comment='03173_nested_function_subexpr_lc'; +SYSTEM FLUSH LOGS query_log; +SELECT ProfileEvents['SelectedParts'] FROM system.query_log WHERE type = 'QueryFinish' AND current_database = currentDatabase() AND log_comment = '03173_nested_function_lc'; +SELECT ProfileEvents['SelectedParts'] FROM system.query_log WHERE type = 'QueryFinish' AND current_database = currentDatabase() AND log_comment = '03173_nested_function_subexpr_lc'; + +DROP TABLE IF EXISTS 03173_nested_function_lc; + +SELECT '-- Nested partition by function, Nullable'; + +DROP TABLE IF EXISTS 03173_nested_function_null; +CREATE TABLE 03173_nested_function_null( + id Nullable(Int32), +) +ENGINE = MergeTree +ORDER BY tuple() +PARTITION BY xxHash32(id) % 3 +SETTINGS allow_nullable_key=1; + +INSERT INTO 03173_nested_function_null SELECT number FROM numbers(100); +OPTIMIZE TABLE 03173_nested_function_null FINAL; + +SELECT count() FROM 03173_nested_function_null WHERE id IN (10) SETTINGS log_comment='03173_nested_function_null'; +SELECT count() FROM 03173_nested_function_null WHERE xxHash32(id) IN (2158931063, 1449383981) SETTINGS log_comment='03173_nested_function_subexpr_null'; +SYSTEM FLUSH LOGS query_log; +SELECT ProfileEvents['SelectedParts'] FROM system.query_log WHERE type = 'QueryFinish' AND current_database = currentDatabase() AND log_comment = '03173_nested_function_null'; +SELECT ProfileEvents['SelectedParts'] FROM system.query_log WHERE type = 'QueryFinish' AND current_database = currentDatabase() AND log_comment = '03173_nested_function_subexpr_null'; + +DROP TABLE IF EXISTS 03173_nested_function_null; + +SELECT '-- Nested partition by function, LowCardinality + Nullable'; + +DROP TABLE IF EXISTS 03173_nested_function_lc_null; + +SET allow_suspicious_low_cardinality_types = 1; +CREATE TABLE 03173_nested_function_lc_null( + id LowCardinality(Nullable(Int32)), +) +ENGINE = MergeTree +ORDER BY tuple() +PARTITION BY xxHash32(id) % 3 +SETTINGS allow_nullable_key=1; + +INSERT INTO 03173_nested_function_lc_null SELECT number FROM numbers(100); +OPTIMIZE TABLE 03173_nested_function_lc_null FINAL; + +SELECT count() FROM 03173_nested_function_lc_null WHERE id IN (10) SETTINGS log_comment='03173_nested_function_lc_null'; +SELECT count() FROM 03173_nested_function_lc_null WHERE xxHash32(id) IN (2158931063, 1449383981) SETTINGS log_comment='03173_nested_function_subexpr_lc_null'; +SYSTEM FLUSH LOGS query_log; +SELECT ProfileEvents['SelectedParts'] FROM system.query_log WHERE type = 'QueryFinish' AND current_database = currentDatabase() AND log_comment = '03173_nested_function_lc_null'; +SELECT ProfileEvents['SelectedParts'] FROM system.query_log WHERE type = 'QueryFinish' AND current_database = currentDatabase() AND log_comment = '03173_nested_function_subexpr_lc_null'; + +DROP TABLE IF EXISTS 03173_nested_function_lc_null; + +SELECT '-- Non-safe cast'; + +DROP TABLE IF EXISTS 03173_nonsafe_cast; +CREATE TABLE 03173_nonsafe_cast( + id Int64, +) +ENGINE = MergeTree +ORDER BY tuple() +PARTITION BY xxHash32(id) % 3; + +INSERT INTO 03173_nonsafe_cast SELECT number FROM numbers(100); +OPTIMIZE TABLE 03173_nonsafe_cast FINAL; + +SELECT count() FROM 03173_nonsafe_cast WHERE id IN (SELECT '50' UNION ALL SELECT '99') SETTINGS log_comment='03173_nonsafe_cast'; +SYSTEM FLUSH LOGS query_log; +SELECT ProfileEvents['SelectedParts'] FROM system.query_log WHERE type = 'QueryFinish' AND current_database = currentDatabase() AND log_comment = '03173_nonsafe_cast'; + +DROP TABLE IF EXISTS 03173_nonsafe_cast; + +SELECT '-- Multiple partition columns'; + +DROP TABLE IF EXISTS 03173_multiple_partition_cols; +CREATE TABLE 03173_multiple_partition_cols ( + key1 Int32, + key2 Int32 +) +ENGINE = MergeTree +ORDER BY tuple() +PARTITION BY (intDiv(key1, 50), xxHash32(key2) % 3); + +INSERT INTO 03173_multiple_partition_cols SELECT number, number FROM numbers(100); +OPTIMIZE TABLE 03173_multiple_partition_cols FINAL; + +SELECT count() FROM 03173_multiple_partition_cols WHERE key2 IN (4) SETTINGS log_comment='03173_multiple_columns'; +SELECT count() FROM 03173_multiple_partition_cols WHERE xxHash32(key2) IN (4251411170) SETTINGS log_comment='03173_multiple_columns_subexpr'; +SYSTEM FLUSH LOGS query_log; +SELECT ProfileEvents['SelectedParts'] FROM system.query_log WHERE type = 'QueryFinish' AND current_database = currentDatabase() AND log_comment = '03173_multiple_columns'; +-- Due to xxHash32() in WHERE condition, MinMax is unable to eliminate any parts, +-- so partition pruning leave two parts (for key1 // 50 = 0 and key1 // 50 = 1) +SELECT ProfileEvents['SelectedParts'] FROM system.query_log WHERE type = 'QueryFinish' AND current_database = currentDatabase() AND log_comment = '03173_multiple_columns_subexpr'; + +-- Preparing base table for filtering by LowCardinality/Nullable sets +DROP TABLE IF EXISTS 03173_base_data_source; +CREATE TABLE 03173_base_data_source( + id Int32, +) +ENGINE = MergeTree +ORDER BY tuple() +PARTITION BY xxHash32(id) % 3; + +INSERT INTO 03173_base_data_source SELECT number FROM numbers(100); +OPTIMIZE TABLE 03173_base_data_source FINAL; + +SELECT '-- LowCardinality set'; + +SET allow_suspicious_low_cardinality_types = 1; +DROP TABLE IF EXISTS 03173_low_cardinality_set; +CREATE TABLE 03173_low_cardinality_set (id LowCardinality(Int32)) ENGINE=Memory AS SELECT 10; + +SELECT count() FROM 03173_base_data_source WHERE id IN (SELECT id FROM 03173_low_cardinality_set) SETTINGS log_comment='03173_low_cardinality_set'; +SYSTEM FLUSH LOGS query_log; +SELECT ProfileEvents['SelectedParts'] FROM system.query_log WHERE type = 'QueryFinish' AND current_database = currentDatabase() AND log_comment = '03173_low_cardinality_set'; + +DROP TABLE IF EXISTS 03173_low_cardinality_set; + +SELECT '-- Nullable set'; + +DROP TABLE IF EXISTS 03173_nullable_set; +CREATE TABLE 03173_nullable_set (id Nullable(Int32)) ENGINE=Memory AS SELECT 10; + +SELECT count() FROM 03173_base_data_source WHERE id IN (SELECT id FROM 03173_nullable_set) SETTINGS log_comment='03173_nullable_set'; +SYSTEM FLUSH LOGS query_log; +SELECT ProfileEvents['SelectedParts'] FROM system.query_log WHERE type = 'QueryFinish' AND current_database = currentDatabase() AND log_comment = '03173_nullable_set'; + +DROP TABLE IF EXISTS 03173_nullable_set; + +SELECT '-- LowCardinality + Nullable set'; + +DROP TABLE IF EXISTS 03173_lc_nullable_set; +CREATE TABLE 03173_lc_nullable_set (id LowCardinality(Nullable(Int32))) ENGINE=Memory AS SELECT 10 UNION ALL SELECT NULL; + +SELECT count() FROM 03173_base_data_source WHERE id IN (SELECT id FROM 03173_lc_nullable_set) SETTINGS log_comment='03173_lc_nullable_set'; +SYSTEM FLUSH LOGS query_log; +SELECT ProfileEvents['SelectedParts'] FROM system.query_log WHERE type = 'QueryFinish' AND current_database = currentDatabase() AND log_comment = '03173_lc_nullable_set'; + +DROP TABLE IF EXISTS 03173_lc_nullable_set; + +SELECT '-- Not failing with date parsing functions'; + +DROP TABLE IF EXISTS 03173_date_parsing; +CREATE TABLE 03173_date_parsing ( + id String +) +ENGINE=MergeTree +ORDER BY tuple() +PARTITION BY toDate(id); + +INSERT INTO 03173_date_parsing +SELECT toString(toDate('2023-04-01') + number) +FROM numbers(20); + +SELECT count() FROM 03173_date_parsing WHERE id IN ('2023-04-02', '2023-05-02'); +SELECT count() FROM 03173_date_parsing WHERE id IN ('not a date'); + +DROP TABLE IF EXISTS 03173_date_parsing; + +SELECT '-- Pruning + not failing with nested date parsing functions'; + +DROP TABLE IF EXISTS 03173_nested_date_parsing; +CREATE TABLE 03173_nested_date_parsing ( + id String +) +ENGINE=MergeTree +ORDER BY tuple() +PARTITION BY toMonth(toDate(id)); + +INSERT INTO 03173_nested_date_parsing +SELECT toString(toDate('2000-01-01') + 10 * number) FROM numbers(50) +UNION ALL +SELECT toString(toDate('2100-01-01') + 10 * number) FROM numbers(50); + +SELECT count() FROM 03173_nested_date_parsing WHERE id IN ('2000-01-21', '2023-05-02') SETTINGS log_comment='03173_nested_date_parsing', session_timezone = ''; +SYSTEM FLUSH LOGS query_log; +SELECT ProfileEvents['SelectedParts'] FROM system.query_log WHERE type = 'QueryFinish' AND current_database = currentDatabase() AND log_comment = '03173_nested_date_parsing'; +SELECT count() FROM 03173_nested_date_parsing WHERE id IN ('not a date'); + +DROP TABLE IF EXISTS 03173_nested_date_parsing; + +SELECT '-- Empty transform functions'; + +DROP TABLE IF EXISTS 03173_empty_transform; +CREATE TABLE 03173_empty_transform( + id Int32, +) +ENGINE = MergeTree +ORDER BY tuple() +PARTITION BY xxHash32(id) % 3; + +INSERT INTO 03173_empty_transform SELECT number FROM numbers(6); +OPTIMIZE TABLE 03173_empty_transform FINAL; + +SELECT id FROM 03173_empty_transform WHERE xxHash32(id) % 3 IN (xxHash32(2::Int32) % 3) SETTINGS log_comment='03173_empty_transform'; +SYSTEM FLUSH LOGS query_log; +SELECT ProfileEvents['SelectedParts'] FROM system.query_log WHERE type = 'QueryFinish' AND current_database = currentDatabase() AND log_comment = '03173_empty_transform'; + +DROP TABLE IF EXISTS 03173_empty_transform; diff --git a/parser/testdata/03174_exact_rows_before_aggregation/ast.json b/parser/testdata/03174_exact_rows_before_aggregation/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03174_exact_rows_before_aggregation/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03174_exact_rows_before_aggregation/metadata.json b/parser/testdata/03174_exact_rows_before_aggregation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03174_exact_rows_before_aggregation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03174_exact_rows_before_aggregation/query.sql b/parser/testdata/03174_exact_rows_before_aggregation/query.sql new file mode 100644 index 000000000..f9fd4ef5a --- /dev/null +++ b/parser/testdata/03174_exact_rows_before_aggregation/query.sql @@ -0,0 +1,47 @@ +-- Tags: no-parallel, no-random-merge-tree-settings + +set rows_before_aggregation = 1, exact_rows_before_limit = 1, output_format_write_statistics = 0, max_block_size = 100; + +drop table if exists test; + +create table test (i int) engine MergeTree order by tuple(); +insert into test select arrayJoin(range(10000)); + +select * from test where i < 10 group by i order by i FORMAT JSONCompact; +select * from test where i < 10 group by i order by i FORMAT XML; +select * from test group by i having i in (10, 11, 12) order by i FORMAT JSONCompact; +select * from test where i < 20 group by i order by i FORMAT JSONCompact; +select max(i) from test where i < 20 limit 1 FORMAT JSONCompact; + +set prefer_localhost_replica = 0; +select * from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 30 group by i order by i FORMAT JSONCompact; +select * from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 20 group by i order by i FORMAT JSONCompact; + +set prefer_localhost_replica = 1; +select * from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 30 group by i order by i FORMAT JSONCompact; +select * from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 20 group by i order by i FORMAT JSONCompact; + +select max(i) from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 20 FORMAT JSONCompact; + +select * from (select * from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 10) group by i order by i limit 10 FORMAT JSONCompact; +set prefer_localhost_replica = 0; +select * from (select * from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 10) group by i order by i limit 10 FORMAT JSONCompact; + +drop table if exists test; + +create table test (i int) engine MergeTree order by i; + +insert into test select arrayJoin(range(10000)); + +set optimize_aggregation_in_order=1; + +select * from test where i < 10 group by i order by i FORMAT JSONCompact; +select max(i) from test where i < 20 limit 1 FORMAT JSONCompact; + +set prefer_localhost_replica = 0; +select * from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 30 group by i order by i FORMAT JSONCompact; + +set prefer_localhost_replica = 1; +select * from cluster(test_cluster_two_shards, currentDatabase(), test) where i < 30 group by i order by i FORMAT JSONCompact; + +drop table if exists test; diff --git a/parser/testdata/03174_least_greatest_ignore_null_input_values/ast.json b/parser/testdata/03174_least_greatest_ignore_null_input_values/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03174_least_greatest_ignore_null_input_values/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03174_least_greatest_ignore_null_input_values/metadata.json b/parser/testdata/03174_least_greatest_ignore_null_input_values/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03174_least_greatest_ignore_null_input_values/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03174_least_greatest_ignore_null_input_values/query.sql b/parser/testdata/03174_least_greatest_ignore_null_input_values/query.sql new file mode 100644 index 000000000..8b936cce6 --- /dev/null +++ b/parser/testdata/03174_least_greatest_ignore_null_input_values/query.sql @@ -0,0 +1,61 @@ +-- Tests functions "greatest" and "least" with NULL arguments + +SELECT 'Test with default NULL behavior'; +SET least_greatest_legacy_null_behavior = default; + +SELECT 'Test with one const argument'; +SELECT greatest(NULL), least(NULL); + +SELECT 'Test with two const arguments'; +SELECT greatest(1, NULL), least(1, NULL); +SELECT greatest(NULL, 1), least(NULL, 1); +SELECT greatest(NULL, 1.1), least(NULL, 1.1); +SELECT greatest(1.1, NULL), least(1.1, NULL); +SELECT greatest(NULL, 'a'), least(NULL, 'a'); +SELECT greatest('a', NULL), least('a', NULL); + +SELECT 'Test with one non-const argument'; +SELECT greatest(materialize(NULL)), least(materialize(NULL)); + +SELECT 'Test with two non-const arguments'; +SELECT greatest(materialize(1), NULL), least(materialize(1), NULL); +SELECT greatest(materialize(NULL), 1), least(materialize(NULL), 1); +SELECT greatest(materialize(NULL), 1.1), least(materialize(NULL), 1.1); +SELECT greatest(materialize(1.1), NULL), least(materialize(1.1), NULL); +SELECT greatest(materialize(NULL), 'a'), least(materialize(NULL), 'a'); +SELECT greatest(materialize('a'), NULL), least(materialize('a'), NULL); + +SELECT 'Special cases'; +SELECT greatest(toNullable(1), 2), least(toNullable(1), 2); +SELECT greatest(toLowCardinality(1), NULL), least(toLowCardinality(1), NULL); + +-- ---------------------------------------------------------------------------- + +SELECT 'Repeat above tests with legacy NULL behavior'; +SET least_greatest_legacy_null_behavior = true; + +SELECT 'Test with one const argument'; +SELECT greatest(NULL), least(NULL); + +SELECT 'Test with two const arguments'; +SELECT greatest(1, NULL), least(1, NULL); +SELECT greatest(NULL, 1), least(NULL, 1); +SELECT greatest(NULL, 1.1), least(NULL, 1.1); +SELECT greatest(1.1, NULL), least(1.1, NULL); +SELECT greatest(NULL, 'a'), least(NULL, 'a'); +SELECT greatest('a', NULL), least('a', NULL); + +SELECT 'Test with one non-const argument'; +SELECT greatest(materialize(NULL)), least(materialize(NULL)); + +SELECT 'Test with two non-const arguments'; +SELECT greatest(materialize(1), NULL), least(materialize(1), NULL); +SELECT greatest(materialize(NULL), 1), least(materialize(NULL), 1); +SELECT greatest(materialize(NULL), 1.1), least(materialize(NULL), 1.1); +SELECT greatest(materialize(1.1), NULL), least(materialize(1.1), NULL); +SELECT greatest(materialize(NULL), 'a'), least(materialize(NULL), 'a'); +SELECT greatest(materialize('a'), NULL), least(materialize('a'), NULL); + +SELECT 'Special cases'; +SELECT greatest(toNullable(1), 2), least(toNullable(1), 2); +SELECT greatest(toLowCardinality(1), NULL), least(toLowCardinality(1), NULL); diff --git a/parser/testdata/03174_merge_join_bug/ast.json b/parser/testdata/03174_merge_join_bug/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03174_merge_join_bug/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03174_merge_join_bug/metadata.json b/parser/testdata/03174_merge_join_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03174_merge_join_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03174_merge_join_bug/query.sql b/parser/testdata/03174_merge_join_bug/query.sql new file mode 100644 index 000000000..ab3c38476 --- /dev/null +++ b/parser/testdata/03174_merge_join_bug/query.sql @@ -0,0 +1,10 @@ +-- Tags: no-random-settings + +SET enable_analyzer=1, join_algorithm = 'full_sorting_merge'; +CREATE TABLE xxxx_yyy (key UInt32, key_b ALIAS key) ENGINE=MergeTree() ORDER BY key SETTINGS ratio_of_defaults_for_sparse_serialization=0.0; +INSERT INTO xxxx_yyy SELECT number FROM numbers(10); + +SELECT * +FROM xxxx_yyy AS a +INNER JOIN xxxx_yyy AS b ON a.key = b.key_b +ORDER BY a.key; diff --git a/parser/testdata/03174_multiple_authentication_methods_show_create/ast.json b/parser/testdata/03174_multiple_authentication_methods_show_create/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03174_multiple_authentication_methods_show_create/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03174_multiple_authentication_methods_show_create/metadata.json b/parser/testdata/03174_multiple_authentication_methods_show_create/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03174_multiple_authentication_methods_show_create/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03174_multiple_authentication_methods_show_create/query.sql b/parser/testdata/03174_multiple_authentication_methods_show_create/query.sql new file mode 100644 index 000000000..f9e9e72e5 --- /dev/null +++ b/parser/testdata/03174_multiple_authentication_methods_show_create/query.sql @@ -0,0 +1,13 @@ +-- Tags: no-fasttest, no-parallel + +-- Create user with mix both implicit and explicit auth type, starting with with +CREATE USER u_03174_multiple_auth_show_create IDENTIFIED WITH plaintext_password by '1', by '2', bcrypt_password by '3', by '4'; +SHOW CREATE USER u_03174_multiple_auth_show_create; + +DROP USER IF EXISTS u_03174_multiple_auth_show_create; + +-- Create user with mix both implicit and explicit auth type, starting with by +CREATE USER u_03174_multiple_auth_show_create IDENTIFIED by '1', plaintext_password by '2', bcrypt_password by '3', by '4'; +SHOW CREATE USER u_03174_multiple_auth_show_create; + +DROP USER IF EXISTS u_03174_multiple_auth_show_create; diff --git a/parser/testdata/03174_projection_deduplicate/ast.json b/parser/testdata/03174_projection_deduplicate/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03174_projection_deduplicate/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03174_projection_deduplicate/metadata.json b/parser/testdata/03174_projection_deduplicate/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03174_projection_deduplicate/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03174_projection_deduplicate/query.sql b/parser/testdata/03174_projection_deduplicate/query.sql new file mode 100644 index 000000000..f43f0a1f2 --- /dev/null +++ b/parser/testdata/03174_projection_deduplicate/query.sql @@ -0,0 +1,30 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/65548 +DROP TABLE IF EXISTS test_projection_deduplicate; + +CREATE TABLE test_projection_deduplicate +( + `id` Int32, + `string` String, + PROJECTION test_projection + ( + SELECT id + GROUP BY id + ) +) +ENGINE = MergeTree +PRIMARY KEY id; + +INSERT INTO test_projection_deduplicate VALUES (1, 'one'); +INSERT INTO test_projection_deduplicate VALUES (1, 'one'); + +OPTIMIZE TABLE test_projection_deduplicate DEDUPLICATE; -- { serverError SUPPORT_IS_DISABLED } + +SELECT * FROM test_projection_deduplicate; + +ALTER TABLE test_projection_deduplicate DROP PROJECTION test_projection; + +OPTIMIZE TABLE test_projection_deduplicate DEDUPLICATE; + +SELECT * FROM test_projection_deduplicate; + +DROP TABLE test_projection_deduplicate; diff --git a/parser/testdata/03174_split_parts_ranges_into_intersecting_and_non_intersecting_final_and_read-in-order_bug/ast.json b/parser/testdata/03174_split_parts_ranges_into_intersecting_and_non_intersecting_final_and_read-in-order_bug/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03174_split_parts_ranges_into_intersecting_and_non_intersecting_final_and_read-in-order_bug/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03174_split_parts_ranges_into_intersecting_and_non_intersecting_final_and_read-in-order_bug/metadata.json b/parser/testdata/03174_split_parts_ranges_into_intersecting_and_non_intersecting_final_and_read-in-order_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03174_split_parts_ranges_into_intersecting_and_non_intersecting_final_and_read-in-order_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03174_split_parts_ranges_into_intersecting_and_non_intersecting_final_and_read-in-order_bug/query.sql b/parser/testdata/03174_split_parts_ranges_into_intersecting_and_non_intersecting_final_and_read-in-order_bug/query.sql new file mode 100644 index 000000000..c8da71b7f --- /dev/null +++ b/parser/testdata/03174_split_parts_ranges_into_intersecting_and_non_intersecting_final_and_read-in-order_bug/query.sql @@ -0,0 +1,12 @@ +-- Tags: no-tsan, no-asan, no-msan, no-fasttest +-- Test is slow +create table tab (x DateTime('UTC'), y UInt32, v Int32) engine = ReplacingMergeTree(v) order by x; +insert into tab select toDateTime('2000-01-01', 'UTC') + number, number, 1 from numbers(1e7); +optimize table tab final; + +WITH (60 * 60) * 24 AS d +select toStartOfDay(x) as k, sum(y) as v, + (z + d) * (z + d - 1) / 2 - (toUInt64(k - toDateTime('2000-01-01', 'UTC')) as z) * (z - 1) / 2 as est, + est - v as delta +from tab final group by k order by k +settings max_threads=8, optimize_aggregation_in_order=1, split_parts_ranges_into_intersecting_and_non_intersecting_final=1; diff --git a/parser/testdata/03175_sparse_and_skip_index/ast.json b/parser/testdata/03175_sparse_and_skip_index/ast.json new file mode 100644 index 000000000..1e6387aff --- /dev/null +++ b/parser/testdata/03175_sparse_and_skip_index/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_bloom_filter (children 1)" + }, + { + "explain": " Identifier t_bloom_filter" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001814452, + "rows_read": 2, + "bytes_read": 80 + } +} diff --git a/parser/testdata/03175_sparse_and_skip_index/metadata.json b/parser/testdata/03175_sparse_and_skip_index/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03175_sparse_and_skip_index/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03175_sparse_and_skip_index/query.sql b/parser/testdata/03175_sparse_and_skip_index/query.sql new file mode 100644 index 000000000..e61770d83 --- /dev/null +++ b/parser/testdata/03175_sparse_and_skip_index/query.sql @@ -0,0 +1,46 @@ +DROP TABLE IF EXISTS t_bloom_filter; +CREATE TABLE t_bloom_filter( + key UInt64, + value UInt64, + + INDEX key_bf key TYPE bloom_filter(0.01) GRANULARITY 2147483648, -- bloom filter on sorting key column + INDEX value_bf value TYPE bloom_filter(0.01) GRANULARITY 2147483648 -- bloom filter on no-sorting column +) ENGINE=MergeTree ORDER BY key +SETTINGS + -- settings to trigger sparse serialization and vertical merge + ratio_of_defaults_for_sparse_serialization = 0.0, + vertical_merge_algorithm_min_rows_to_activate = 1, + vertical_merge_algorithm_min_columns_to_activate = 1, + allow_vertical_merges_from_compact_to_wide_parts = 1, + min_bytes_for_wide_part = 0, + enable_block_number_column = 0, + enable_block_offset_column = 0; + +SYSTEM STOP MERGES t_bloom_filter; + +-- Create at least one part +INSERT INTO t_bloom_filter +SELECT + number % 100 as key, -- 100 unique keys + rand() % 100 as value -- 100 unique values +FROM numbers(15_000); + +-- And another part +INSERT INTO t_bloom_filter +SELECT + number % 100 as key, -- 100 unique keys + rand() % 100 as value -- 100 unique values +FROM numbers(15_000, 15_000); + +SYSTEM START MERGES t_bloom_filter; + +-- Merge everything into a single part +OPTIMIZE TABLE t_bloom_filter FINAL; + +-- Check sparse serialization +SELECT column, serialization_kind FROM system.parts_columns WHERE database = currentDatabase() AND table = 't_bloom_filter' AND active ORDER BY column; + +SELECT COUNT() FROM t_bloom_filter WHERE key = 1; + +-- Check bloom filter non-zero size +SELECT COUNT() FROM system.parts WHERE database = currentDatabase() AND table = 't_bloom_filter' AND secondary_indices_uncompressed_bytes > 200 AND active; diff --git a/parser/testdata/03176_check_timeout_in_index_analysis/ast.json b/parser/testdata/03176_check_timeout_in_index_analysis/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03176_check_timeout_in_index_analysis/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03176_check_timeout_in_index_analysis/metadata.json b/parser/testdata/03176_check_timeout_in_index_analysis/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03176_check_timeout_in_index_analysis/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03176_check_timeout_in_index_analysis/query.sql b/parser/testdata/03176_check_timeout_in_index_analysis/query.sql new file mode 100644 index 000000000..110d448b4 --- /dev/null +++ b/parser/testdata/03176_check_timeout_in_index_analysis/query.sql @@ -0,0 +1,25 @@ +-- Tags: no-tsan, no-asan, no-ubsan, no-msan, no-debug, no-fasttest + +CREATE TABLE t_03176(k UInt64, v UInt64) ENGINE=MergeTree() ORDER BY k PARTITION BY k; + +INSERT INTO t_03176 SELECT number, number FROM numbers(5); + +-- Table is partitioned by k to so it will have 5 partitions +SELECT count() FROM system.parts WHERE database = currentDatabase() AND table = 't_03176' AND active; + +-- This query is fast without failpoint: should take < 1 sec +EXPLAIN indexes = 1 SELECT * FROM t_03176 ORDER BY k LIMIT 5 SETTINGS log_comment = '03176_q1' FORMAT Null; + +-- Now the query should be cancelled +EXPLAIN indexes = 1 SELECT * FROM t_03176 ORDER BY k LIMIT 5 SETTINGS log_comment = '03176_q3', max_execution_time = 0.00001 FORMAT Null; -- { serverError TIMEOUT_EXCEEDED } + +SYSTEM FLUSH LOGS query_log; + +-- Check that q1 was fast, q2 was slow and q3 had timeout +SELECT log_comment, type = 'QueryFinish', intDiv(query_duration_ms, 2000), exception_code != 0, (position('selectPartsToRead' IN stack_trace) > 0 OR position('filterPartsByPartition' IN stack_trace) > 0) AS has_selectPartsToRead +FROM system.query_log +WHERE current_database = currentDatabase() AND log_comment LIKE '03176_q_' AND type IN ('QueryFinish', 'ExceptionBeforeStart') +ORDER BY log_comment; + +DROP TABLE t_03176; + diff --git a/parser/testdata/03195_group_concat_deserialization_fix/ast.json b/parser/testdata/03195_group_concat_deserialization_fix/ast.json new file mode 100644 index 000000000..62cf80fe7 --- /dev/null +++ b/parser/testdata/03195_group_concat_deserialization_fix/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_serialization (children 1)" + }, + { + "explain": " Identifier test_serialization" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001290521, + "rows_read": 2, + "bytes_read": 88 + } +} diff --git a/parser/testdata/03195_group_concat_deserialization_fix/metadata.json b/parser/testdata/03195_group_concat_deserialization_fix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03195_group_concat_deserialization_fix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03195_group_concat_deserialization_fix/query.sql b/parser/testdata/03195_group_concat_deserialization_fix/query.sql new file mode 100644 index 000000000..337f1f3db --- /dev/null +++ b/parser/testdata/03195_group_concat_deserialization_fix/query.sql @@ -0,0 +1,23 @@ +DROP TABLE IF EXISTS test_serialization; + +CREATE TABLE test_serialization +( + id UInt64, + text AggregateFunction(groupConcat, String) +) ENGINE = AggregatingMergeTree() ORDER BY id; + +INSERT INTO test_serialization SELECT + 1, + groupConcatState('First'); + +SELECT groupConcatMerge(text) AS concatenated_text FROM test_serialization GROUP BY id; + +INSERT INTO test_serialization SELECT + 2, + groupConcatState('Second'); + +SELECT groupConcatMerge(text) AS concatenated_text FROM test_serialization GROUP BY id ORDER BY id; + +DROP TABLE IF EXISTS test_serialization; + + diff --git a/parser/testdata/03196_max_intersections_arena_crash/ast.json b/parser/testdata/03196_max_intersections_arena_crash/ast.json new file mode 100644 index 000000000..49a40f30a --- /dev/null +++ b/parser/testdata/03196_max_intersections_arena_crash/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery my_events (children 1)" + }, + { + "explain": " Identifier my_events" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001191695, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/03196_max_intersections_arena_crash/metadata.json b/parser/testdata/03196_max_intersections_arena_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03196_max_intersections_arena_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03196_max_intersections_arena_crash/query.sql b/parser/testdata/03196_max_intersections_arena_crash/query.sql new file mode 100644 index 000000000..b7269d7c4 --- /dev/null +++ b/parser/testdata/03196_max_intersections_arena_crash/query.sql @@ -0,0 +1,5 @@ +DROP TABLE IF EXISTS my_events; +CREATE TABLE my_events (start UInt32, end UInt32) Engine = MergeTree ORDER BY tuple() + AS Select * FROM VALUES ('start UInt32, end UInt32', (1, 3), (1, 6), (2, 5), (3, 7)); +SELECT start, end, maxIntersections(start, end) OVER () FROM my_events; +SELECT start, end, maxIntersectionsPosition(start, end) OVER () FROM my_events; diff --git a/parser/testdata/03197_fix_parse_mysql_iso_date/ast.json b/parser/testdata/03197_fix_parse_mysql_iso_date/ast.json new file mode 100644 index 000000000..8603c61aa --- /dev/null +++ b/parser/testdata/03197_fix_parse_mysql_iso_date/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function parseDateTime (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal '2024-06-20'" + }, + { + "explain": " Literal '%F'" + }, + { + "explain": " Literal 'UTC'" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001340952, + "rows_read": 9, + "bytes_read": 333 + } +} diff --git a/parser/testdata/03197_fix_parse_mysql_iso_date/metadata.json b/parser/testdata/03197_fix_parse_mysql_iso_date/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03197_fix_parse_mysql_iso_date/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03197_fix_parse_mysql_iso_date/query.sql b/parser/testdata/03197_fix_parse_mysql_iso_date/query.sql new file mode 100644 index 000000000..e83738f72 --- /dev/null +++ b/parser/testdata/03197_fix_parse_mysql_iso_date/query.sql @@ -0,0 +1,2 @@ +SELECT parseDateTime('2024-06-20', '%F', 'UTC') AS x; +SELECT parseDateTime('06/20/24', '%D', 'UTC') AS x; diff --git a/parser/testdata/03197_storage_join_strictness_type_restriction/ast.json b/parser/testdata/03197_storage_join_strictness_type_restriction/ast.json new file mode 100644 index 000000000..b3fcae8c0 --- /dev/null +++ b/parser/testdata/03197_storage_join_strictness_type_restriction/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001291889, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03197_storage_join_strictness_type_restriction/metadata.json b/parser/testdata/03197_storage_join_strictness_type_restriction/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03197_storage_join_strictness_type_restriction/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03197_storage_join_strictness_type_restriction/query.sql b/parser/testdata/03197_storage_join_strictness_type_restriction/query.sql new file mode 100644 index 000000000..5aa3e4c2e --- /dev/null +++ b/parser/testdata/03197_storage_join_strictness_type_restriction/query.sql @@ -0,0 +1,42 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 +( + a Int64, + b Int64 +) Engine = Join(SEMI, ALL, a); -- { serverError BAD_ARGUMENTS } + +CREATE TABLE t1 +( + a Int64, + b Int64 +) Engine = Join(SEMI, INNER, a); -- { serverError BAD_ARGUMENTS } + +CREATE TABLE t1 +( + a Int64, + b Int64 +) Engine = Join(SEMI, FULL, a); -- { serverError BAD_ARGUMENTS } + +CREATE TABLE t1 +( + a Int64, + b Int64 +) Engine = Join(ANTI, ALL, a); -- { serverError BAD_ARGUMENTS } + +CREATE TABLE t1 +( + a Int64, + b Int64 +) Engine = Join(ANTI, INNER, a); -- { serverError BAD_ARGUMENTS } + +CREATE TABLE t1 +( + a Int64, + b Int64 +) Engine = Join(ANTI, FULL, a); -- { serverError BAD_ARGUMENTS } + +CREATE TABLE t1 +( + a Int64, + b Int64 +) Engine = Join(ANY, FULL, a); -- { serverError NOT_IMPLEMENTED } diff --git a/parser/testdata/03198_bit_shift_throws_error_for_out_of_bounds/ast.json b/parser/testdata/03198_bit_shift_throws_error_for_out_of_bounds/ast.json new file mode 100644 index 000000000..9166250dd --- /dev/null +++ b/parser/testdata/03198_bit_shift_throws_error_for_out_of_bounds/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '-- bitShiftRight'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001435061, + "rows_read": 5, + "bytes_read": 187 + } +} diff --git a/parser/testdata/03198_bit_shift_throws_error_for_out_of_bounds/metadata.json b/parser/testdata/03198_bit_shift_throws_error_for_out_of_bounds/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03198_bit_shift_throws_error_for_out_of_bounds/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03198_bit_shift_throws_error_for_out_of_bounds/query.sql b/parser/testdata/03198_bit_shift_throws_error_for_out_of_bounds/query.sql new file mode 100644 index 000000000..58a2eb3ba --- /dev/null +++ b/parser/testdata/03198_bit_shift_throws_error_for_out_of_bounds/query.sql @@ -0,0 +1,29 @@ +SELECT '-- bitShiftRight'; +SELECT bitShiftRight(1, -1); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT bitShiftRight(toUInt32(1), 0); +SELECT bitShiftRight(toUInt32(1), 32); +SELECT bitShiftRight(toUInt32(1), 32 + 1); +SELECT bitShiftRight('hola', -1); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT bitShiftRight('hola', 0); +SELECT bitShiftRight('hola', 4 * 8); +SELECT bitShiftRight('hola', 4 * 8 + 1); +SELECT bitShiftRight(toFixedString('hola', 8), -1); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT bitShiftRight(toFixedString('hola', 8), 0); +SELECT bitShiftRight(toFixedString('hola', 8), 8 * 8); +SELECT bitShiftRight(toFixedString('hola', 8), 8 * 8 + 1); + +SELECT '-- bitShiftLeft'; +SELECT bitShiftLeft(1, -1); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT bitShiftLeft(toUInt32(1), 0); +SELECT bitShiftLeft(toUInt32(1), 32); +SELECT bitShiftLeft(toUInt32(1), 32 + 1); +SELECT bitShiftLeft('hola', -1); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT bitShiftLeft('hola', 0); +SELECT bitShiftLeft('hola', 4 * 8); +SELECT bitShiftLeft('hola', 4 * 8 + 1); +SELECT bitShiftLeft(toFixedString('hola', 8), -1); -- { serverError ARGUMENT_OUT_OF_BOUND } +SELECT bitShiftLeft(toFixedString('hola', 8), 0); +SELECT bitShiftLeft(toFixedString('hola', 8), 8 * 8); +SELECT bitShiftLeft(toFixedString('hola', 8), 8 * 8 + 1); + +SELECT 'OK'; diff --git a/parser/testdata/03198_dictionary_validate_primary_key_type/ast.json b/parser/testdata/03198_dictionary_validate_primary_key_type/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03198_dictionary_validate_primary_key_type/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03198_dictionary_validate_primary_key_type/metadata.json b/parser/testdata/03198_dictionary_validate_primary_key_type/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03198_dictionary_validate_primary_key_type/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03198_dictionary_validate_primary_key_type/query.sql b/parser/testdata/03198_dictionary_validate_primary_key_type/query.sql new file mode 100644 index 000000000..ff92ad0c7 --- /dev/null +++ b/parser/testdata/03198_dictionary_validate_primary_key_type/query.sql @@ -0,0 +1,41 @@ +CREATE DICTIONARY `test_dictionary0` ( + `n1` String, + `n2` UInt32 +) +PRIMARY KEY n1 +SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 DB 'test_db' TABLE 'table_01' USER 'default')) +LIFETIME(MIN 1 MAX 10) +LAYOUT(FLAT()); + +SET dictionary_validate_primary_key_type=1; + +CREATE DICTIONARY `test_dictionary1` ( + `n1` String, + `n2` UInt32 +) +PRIMARY KEY n1 +SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 DB 'test_db' TABLE 'table_01' USER 'default')) +LIFETIME(MIN 1 MAX 10) +LAYOUT(FLAT()); -- { serverError BAD_ARGUMENTS } + +CREATE DICTIONARY `test_dictionary2` ( + `n1` UInt32, + `n2` UInt32 +) +PRIMARY KEY n1 +SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 DB 'test_db' TABLE 'table_01' USER 'default')) +LIFETIME(MIN 1 MAX 10) +LAYOUT(FLAT()); -- { serverError BAD_ARGUMENTS } + +CREATE DICTIONARY `test_dictionary3` ( + `n1` UInt64, + `n2` UInt32 +) +PRIMARY KEY n1 +SOURCE(CLICKHOUSE(HOST 'localhost' PORT 9000 DB 'test_db' TABLE 'table_01' USER 'default')) +LIFETIME(MIN 1 MAX 10) +LAYOUT(FLAT()); + +DESCRIBE `test_dictionary0`; +DESCRIBE `test_dictionary3`; + diff --git a/parser/testdata/03198_dynamic_read_subcolumns/ast.json b/parser/testdata/03198_dynamic_read_subcolumns/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03198_dynamic_read_subcolumns/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03198_dynamic_read_subcolumns/metadata.json b/parser/testdata/03198_dynamic_read_subcolumns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03198_dynamic_read_subcolumns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03198_dynamic_read_subcolumns/query.sql b/parser/testdata/03198_dynamic_read_subcolumns/query.sql new file mode 100644 index 000000000..edbb3a7c5 --- /dev/null +++ b/parser/testdata/03198_dynamic_read_subcolumns/query.sql @@ -0,0 +1,22 @@ +-- Tags: no-random-settings, no-object-storage, no-parallel +-- no-parallel: Running `DROP MARK CACHE` can have a big impact on other concurrent tests +-- Tag no-object-storage: this test relies on the number of opened files in MergeTree that can differ in object storages + +SET allow_experimental_dynamic_type = 1; +DROP TABLE IF EXISTS test_dynamic; +CREATE TABLE test_dynamic (id UInt64, d Dynamic) ENGINE = MergeTree ORDER BY id SETTINGS min_bytes_for_wide_part = 0; +INSERT INTO test_dynamic VALUES (1, 'foo'), (2, 1111), (3, [1, 2, 3]); +EXPLAIN QUERY TREE SELECT d.String FROM test_dynamic SETTINGS enable_analyzer = 1; +SYSTEM DROP MARK CACHE; +SELECT d.String FROM test_dynamic SETTINGS enable_analyzer = 1; +SYSTEM DROP MARK CACHE; +SELECT d.String FROM test_dynamic SETTINGS enable_analyzer = 0; +SYSTEM FLUSH LOGS query_log; +SELECT + ProfileEvents['FileOpen'] +FROM system.query_log +WHERE (type = 2) AND (query LIKE 'SELECT d.String %test_dynamic%') AND (current_database = currentDatabase()) +ORDER BY event_time_microseconds DESC +LIMIT 2; + +DROP TABLE test_dynamic; diff --git a/parser/testdata/03198_group_array_intersect/ast.json b/parser/testdata/03198_group_array_intersect/ast.json new file mode 100644 index 000000000..922078d76 --- /dev/null +++ b/parser/testdata/03198_group_array_intersect/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_numbers__fuzz_29 (children 1)" + }, + { + "explain": " Identifier test_numbers__fuzz_29" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001058527, + "rows_read": 2, + "bytes_read": 94 + } +} diff --git a/parser/testdata/03198_group_array_intersect/metadata.json b/parser/testdata/03198_group_array_intersect/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03198_group_array_intersect/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03198_group_array_intersect/query.sql b/parser/testdata/03198_group_array_intersect/query.sql new file mode 100644 index 000000000..ab7ba0b0d --- /dev/null +++ b/parser/testdata/03198_group_array_intersect/query.sql @@ -0,0 +1,11 @@ +DROP TABLE IF EXISTS test_numbers__fuzz_29; +SET max_threads=1, max_insert_threads=1; +CREATE TABLE test_numbers__fuzz_29 (`a` Array(Nullable(FixedString(19)))) ENGINE = MergeTree ORDER BY a SETTINGS allow_nullable_key=1; + +INSERT INTO test_numbers__fuzz_29 VALUES ([1,2,3,4,5,6]); +INSERT INTO test_numbers__fuzz_29 VALUES ([1,2,4,5]); +INSERT INTO test_numbers__fuzz_29 VALUES ([1,4,3,0,5,5,5]); + +SELECT arraySort(groupArrayIntersect(*)) FROM test_numbers__fuzz_29 GROUP BY a WITH ROLLUP ORDER BY ALL; + +DROP TABLE test_numbers__fuzz_29; diff --git a/parser/testdata/03198_h3_polygon_to_cells/ast.json b/parser/testdata/03198_h3_polygon_to_cells/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03198_h3_polygon_to_cells/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03198_h3_polygon_to_cells/metadata.json b/parser/testdata/03198_h3_polygon_to_cells/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03198_h3_polygon_to_cells/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03198_h3_polygon_to_cells/query.sql b/parser/testdata/03198_h3_polygon_to_cells/query.sql new file mode 100644 index 000000000..47a2406a5 --- /dev/null +++ b/parser/testdata/03198_h3_polygon_to_cells/query.sql @@ -0,0 +1,32 @@ +-- Tags: no-fasttest + +-- example from h3 docs +SELECT arraySort(arrayMap(x -> h3ToString(x), h3PolygonToCells([(-122.4089866999972145,37.813318999983238),(-122.3544736999993603,37.7198061999978478),(-122.4798767000009008,37.8151571999998453)], 7))) + = ['872830820ffffff','872830828ffffff','87283082affffff','87283082bffffff','87283082effffff','872830870ffffff','872830876ffffff']; + +-- test both rings, polygons and multipolygons +DROP TABLE IF EXISTS rings; +DROP TABLE IF EXISTS polygons; +DROP TABLE IF EXISTS multipolygons; + +CREATE TABLE rings (ring Ring) ENGINE=Memory(); +CREATE TABLE polygons (polygon Polygon) ENGINE=Memory(); +CREATE TABLE multipolygons (multipolygon MultiPolygon) ENGINE=Memory(); + +INSERT INTO rings VALUES ([(55.66824,12.595493),(55.667901,12.593991),(55.667474,12.595117),(55.66824,12.595493)]); +-- expected: '8b63a9a9914cfff','8b63a9a99168fff','8b63a9a9916afff' +INSERT INTO polygons VALUES ([[(55.66824,12.595493),(55.667901,12.593991),(55.667474,12.595117),(55.66824,12.595493)],[(55.667695,12.595026),(55.667953,12.595085),(55.66788,12.594683),(55.667695,12.595026)]]); +-- expected: '8b63a9a9914cfff','8b63a9a99168fff' +INSERT INTO multipolygons VALUES ([[[(55.66824,12.595493),(55.667901,12.593991),(55.667474,12.595117),(55.66824,12.595493)],[(55.667695,12.595026),(55.667953,12.595085),(55.66788,12.594683),(55.667695,12.595026)]], [[(55.668461,12.597461),(55.668446,12.596694),(55.668797,12.596962),(55.668461,12.597461)]] ]); +-- expected: '8b63a9a9914cfff','8b63a9a99168fff', '8b63a9a99bb3fff' + +SELECT arraySort(arrayMap(x -> h3ToString(x), h3PolygonToCells(ring, 11))) + = ['8b63a9a9914cfff','8b63a9a99168fff','8b63a9a9916afff'] FROM rings; +SELECT arraySort(arrayMap(x -> h3ToString(x), h3PolygonToCells(polygon, 11))) + = ['8b63a9a9914cfff','8b63a9a99168fff'] FROM polygons; +SELECT arraySort(arrayMap(x -> h3ToString(x), h3PolygonToCells(multipolygon, 11))) + = ['8b63a9a9914cfff','8b63a9a99168fff','8b63a9a99bb3fff'] FROM multipolygons; + +DROP TABLE rings; +DROP TABLE polygons; +DROP TABLE multipolygons; diff --git a/parser/testdata/03198_json_extract_more_types/ast.json b/parser/testdata/03198_json_extract_more_types/ast.json new file mode 100644 index 000000000..b996ff746 --- /dev/null +++ b/parser/testdata/03198_json_extract_more_types/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00123188, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03198_json_extract_more_types/metadata.json b/parser/testdata/03198_json_extract_more_types/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03198_json_extract_more_types/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03198_json_extract_more_types/query.sql b/parser/testdata/03198_json_extract_more_types/query.sql new file mode 100644 index 000000000..28d24bbb2 --- /dev/null +++ b/parser/testdata/03198_json_extract_more_types/query.sql @@ -0,0 +1,29 @@ +set allow_suspicious_low_cardinality_types=1; + +select JSONExtract('{"a" : "2020-01-01"}', 'a', 'Date'); +select JSONExtract('{"a" : "2020-01-01"}', 'a', 'Date32'); +select JSONExtract('{"a" : "2020-01-01 00:00:00"}', 'a', 'DateTime'); +select JSONExtract('{"a" : "2020-01-01 00:00:00.000000"}', 'a', 'DateTime64(6)'); +select JSONExtract('{"a" : "127.0.0.1"}', 'a', 'IPv4'); +select JSONExtract('{"a" : "2001:0db8:85a3:0000:0000:8a2e:0370:7334"}', 'a', 'IPv6'); + + +select JSONExtract('{"a" : 42}', 'a', 'LowCardinality(UInt8)'); +select JSONExtract('{"a" : 42}', 'a', 'LowCardinality(Int8)'); +select JSONExtract('{"a" : 42}', 'a', 'LowCardinality(UInt16)'); +select JSONExtract('{"a" : 42}', 'a', 'LowCardinality(Int16)'); +select JSONExtract('{"a" : 42}', 'a', 'LowCardinality(UInt32)'); +select JSONExtract('{"a" : 42}', 'a', 'LowCardinality(Int32)'); +select JSONExtract('{"a" : 42}', 'a', 'LowCardinality(UInt64)'); +select JSONExtract('{"a" : 42}', 'a', 'LowCardinality(Int64)'); + +select JSONExtract('{"a" : 42}', 'a', 'LowCardinality(Float32)'); +select JSONExtract('{"a" : 42}', 'a', 'LowCardinality(Float32)'); + +select JSONExtract('{"a" : "Hello"}', 'a', 'LowCardinality(String)'); +select JSONExtract('{"a" : "Hello"}', 'a', 'LowCardinality(FixedString(5))'); +select JSONExtract('{"a" : "Hello"}', 'a', 'LowCardinality(FixedString(3))'); +select JSONExtract('{"a" : "Hello"}', 'a', 'LowCardinality(FixedString(10))'); + +select JSONExtract('{"a" : "5801c962-1182-458a-89f8-d077da5074f9"}', 'a', 'LowCardinality(UUID)'); + diff --git a/parser/testdata/03198_table_function_directory_path/ast.json b/parser/testdata/03198_table_function_directory_path/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03198_table_function_directory_path/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03198_table_function_directory_path/metadata.json b/parser/testdata/03198_table_function_directory_path/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03198_table_function_directory_path/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03198_table_function_directory_path/query.sql b/parser/testdata/03198_table_function_directory_path/query.sql new file mode 100644 index 000000000..34993ec4a --- /dev/null +++ b/parser/testdata/03198_table_function_directory_path/query.sql @@ -0,0 +1,15 @@ +-- Tags: no-parallel + +INSERT INTO FUNCTION file('data_03198_table_function_directory_path.csv', 'csv') SELECT '1.csv' SETTINGS engine_file_truncate_on_insert=1; +INSERT INTO FUNCTION file('data_03198_table_function_directory_path/1.csv', 'csv') SELECT '1.csv' SETTINGS engine_file_truncate_on_insert=1; +INSERT INTO FUNCTION file('data_03198_table_function_directory_path/2.csv', 'csv') SELECT '2.csv' SETTINGS engine_file_truncate_on_insert=1; +INSERT INTO FUNCTION file('data_03198_table_function_directory_path/dir/3.csv', 'csv') SELECT '3.csv' SETTINGS engine_file_truncate_on_insert=1; +INSERT INTO FUNCTION file('data_03198_table_function_directory_path/dir1/dir/4.csv', 'csv') SELECT '4.csv' SETTINGS engine_file_truncate_on_insert=1; +INSERT INTO FUNCTION file('data_03198_table_function_directory_path/dir2/dir/5.csv', 'csv') SELECT '5.csv' SETTINGS engine_file_truncate_on_insert=1; + +SELECT COUNT(*) FROM file('data_03198_table_function_directory_path'); +SELECT COUNT(*) FROM file('data_03198_table_function_directory_path/'); +SELECT COUNT(*) FROM file('data_03198_table_function_directory_path/dir'); +SELECT COUNT(*) FROM file('data_03198_table_function_directory_path/*/dir', 'csv'); -- { serverError CANNOT_READ_FROM_FILE_DESCRIPTOR, CANNOT_EXTRACT_TABLE_STRUCTURE } +SELECT COUNT(*) FROM file('data_03198_table_function_directory_pat'); -- { serverError CANNOT_STAT } +SELECT COUNT(*) FROM file('data_03198_table_function_directory_path.csv'); diff --git a/parser/testdata/03199_fix_auc_tie_handling/ast.json b/parser/testdata/03199_fix_auc_tie_handling/ast.json new file mode 100644 index 000000000..b3c96d6a1 --- /dev/null +++ b/parser/testdata/03199_fix_auc_tie_handling/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery labels_unordered (children 1)" + }, + { + "explain": " Identifier labels_unordered" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001597394, + "rows_read": 2, + "bytes_read": 85 + } +} diff --git a/parser/testdata/03199_fix_auc_tie_handling/metadata.json b/parser/testdata/03199_fix_auc_tie_handling/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03199_fix_auc_tie_handling/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03199_fix_auc_tie_handling/query.sql b/parser/testdata/03199_fix_auc_tie_handling/query.sql new file mode 100644 index 000000000..5de0844f4 --- /dev/null +++ b/parser/testdata/03199_fix_auc_tie_handling/query.sql @@ -0,0 +1,32 @@ +CREATE TABLE labels_unordered +( + idx Int64, + score Float64, + label Int64 +) +ENGINE = MergeTree +PRIMARY KEY idx +ORDER BY idx; + +SELECT floor(arrayAUC(array_concat_agg([score]), array_concat_agg([label])), 5) +FROM labels_unordered; + +INSERT INTO labels_unordered (idx,score,label) VALUES (1,0.1,0), (2,0.35,1), (3,0.4,0), (4,0.8,1), (5,0.8,0); + +SELECT floor(arrayAUC(array_concat_agg([score]), array_concat_agg([label])), 5) +FROM labels_unordered; + +CREATE TABLE labels_ordered +( + idx Int64, + score Float64, + label Int64 +) +ENGINE = MergeTree +PRIMARY KEY idx +ORDER BY idx; + +INSERT INTO labels_ordered (idx,score,label) VALUES (1,0.1,0), (2,0.35,1), (3,0.4,0), (4,0.8,0), (5,0.8,1); + +SELECT floor(arrayAUC(array_concat_agg([score]), array_concat_agg([label])), 5) +FROM labels_ordered; \ No newline at end of file diff --git a/parser/testdata/03199_has_lc_fixed_string/ast.json b/parser/testdata/03199_has_lc_fixed_string/ast.json new file mode 100644 index 000000000..b807fb7cf --- /dev/null +++ b/parser/testdata/03199_has_lc_fixed_string/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery 03199_fixedstring_array (children 1)" + }, + { + "explain": " Identifier 03199_fixedstring_array" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001018949, + "rows_read": 2, + "bytes_read": 98 + } +} diff --git a/parser/testdata/03199_has_lc_fixed_string/metadata.json b/parser/testdata/03199_has_lc_fixed_string/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03199_has_lc_fixed_string/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03199_has_lc_fixed_string/query.sql b/parser/testdata/03199_has_lc_fixed_string/query.sql new file mode 100644 index 000000000..3cb551804 --- /dev/null +++ b/parser/testdata/03199_has_lc_fixed_string/query.sql @@ -0,0 +1,7 @@ +DROP TABLE IF EXISTS 03199_fixedstring_array; +CREATE TABLE 03199_fixedstring_array (arr Array(LowCardinality(FixedString(8)))) ENGINE = Memory; +INSERT INTO 03199_fixedstring_array VALUES (['a', 'b']), (['c', 'd']); + +SELECT has(arr, toFixedString(materialize('a'), 1)) FROM 03199_fixedstring_array; + +DROP TABLE 03199_fixedstring_array; diff --git a/parser/testdata/03199_join_with_materialized_column/ast.json b/parser/testdata/03199_join_with_materialized_column/ast.json new file mode 100644 index 000000000..686844e58 --- /dev/null +++ b/parser/testdata/03199_join_with_materialized_column/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000973596, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03199_join_with_materialized_column/metadata.json b/parser/testdata/03199_join_with_materialized_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03199_join_with_materialized_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03199_join_with_materialized_column/query.sql b/parser/testdata/03199_join_with_materialized_column/query.sql new file mode 100644 index 000000000..3b7c5e52b --- /dev/null +++ b/parser/testdata/03199_join_with_materialized_column/query.sql @@ -0,0 +1,6 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS table_with_materialized; +CREATE TABLE table_with_materialized (col String MATERIALIZED 'A', ins Int Ephemeral) ENGINE = Memory; +SELECT number FROM numbers(1) AS n, table_with_materialized; +DROP TABLE table_with_materialized; diff --git a/parser/testdata/03199_json_extract_dynamic/ast.json b/parser/testdata/03199_json_extract_dynamic/ast.json new file mode 100644 index 000000000..5300503d6 --- /dev/null +++ b/parser/testdata/03199_json_extract_dynamic/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001120026, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03199_json_extract_dynamic/metadata.json b/parser/testdata/03199_json_extract_dynamic/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03199_json_extract_dynamic/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03199_json_extract_dynamic/query.sql b/parser/testdata/03199_json_extract_dynamic/query.sql new file mode 100644 index 000000000..2a01e7ed6 --- /dev/null +++ b/parser/testdata/03199_json_extract_dynamic/query.sql @@ -0,0 +1,38 @@ +set input_format_json_try_infer_numbers_from_strings=1; +set input_format_json_infer_array_of_dynamic_from_array_of_different_types=0; + +select JSONExtract(materialize('{"d" : true}'), 'd', 'Dynamic') as d, dynamicType(d); +select JSONExtract(materialize('{"d" : 42}'), 'd', 'Dynamic') as d, dynamicType(d); +select JSONExtract(materialize('{"d" : -42}'), 'd', 'Dynamic') as d, dynamicType(d); +select JSONExtract(materialize('{"d" : 18446744073709551615}'), 'd', 'Dynamic') as d, dynamicType(d); +select JSONExtract(materialize('{"d" : 42.42}'), 'd', 'Dynamic') as d, dynamicType(d); +select JSONExtract(materialize('{"d" : "42"}'), 'd', 'Dynamic') as d, dynamicType(d); +select JSONExtract(materialize('{"d" : "-42"}'), 'd', 'Dynamic') as d, dynamicType(d); +select JSONExtract(materialize('{"d" : "18446744073709551615"}'), 'd', 'Dynamic') as d, dynamicType(d); + +select JSONExtract(materialize('{"d" : "Hello"}'), 'd', 'Dynamic') as d, dynamicType(d); +select JSONExtract(materialize('{"d" : "2020-01-01"}'), 'd', 'Dynamic') as d, dynamicType(d); +select JSONExtract(materialize('{"d" : "2020-01-01 00:00:00.000"}'), 'd', 'Dynamic') as d, dynamicType(d); + +select JSONExtract(materialize('{"d" : [1, 2, 3]}'), 'd', 'Dynamic') as d, dynamicType(d); +select JSONExtract(materialize('{"d" : ["str1", "str2", "str3"]}'), 'd', 'Dynamic') as d, dynamicType(d); +select JSONExtract(materialize('{"d" : [[[1], [2, 3, 4]], [[5, 6], [7]]]}'), 'd', 'Dynamic') as d, dynamicType(d); + +select JSONExtract(materialize('{"d" : ["2020-01-01", "2020-01-01 00:00:00"]}'), 'd', 'Dynamic') as d, dynamicType(d); +select JSONExtract(materialize('{"d" : ["2020-01-01", "2020-01-01 date"]}'), 'd', 'Dynamic') as d, dynamicType(d); +select JSONExtract(materialize('{"d" : ["2020-01-01", "2020-01-01 00:00:00", "str"]}'), 'd', 'Dynamic') as d, dynamicType(d); +select JSONExtract(materialize('{"d" : ["2020-01-01", "2020-01-01 00:00:00", "42"]}'), 'd', 'Dynamic') as d, dynamicType(d); +select JSONExtract(materialize('{"d" : ["str", "42"]}'), 'd', 'Dynamic') as d, dynamicType(d); +select JSONExtract(materialize('{"d" : [42, 42.42]}'), 'd', 'Dynamic') as d, dynamicType(d); +select JSONExtract(materialize('{"d" : [42, 18446744073709551615, 42.42]}'), 'd', 'Dynamic') as d, dynamicType(d); +select JSONExtract(materialize('{"d" : [42, 42.42]}'), 'd', 'Dynamic') as d, dynamicType(d); + +select JSONExtract(materialize('{"d" : [null, null]}'), 'd', 'Dynamic') as d, dynamicType(d); +select JSONExtract(materialize('{"d" : [null, 42]}'), 'd', 'Dynamic') as d, dynamicType(d); +select JSONExtract(materialize('{"d" : [[null], [], [42]]}'), 'd', 'Dynamic') as d, dynamicType(d); +select JSONExtract(materialize('{"a" : [[], [null, null], ["1", null, "3"], [null, "2", null]]}'), 'a', 'Dynamic') as d, dynamicType(d); +select JSONExtract(materialize('{"a" : [[], [null, null], ["1", null, "3"], [null, "2", null], ["2020-01-01"]]}'), 'a', 'Dynamic') as d, dynamicType(d); + +select JSONExtract(materialize('{"d" : ["str", 42, [42]]}'), 'd', 'Dynamic') as d, dynamicType(d); +select JSONExtract(materialize('{"d" : [42, 18446744073709551615]}'), 'd', 'Dynamic') as d, dynamicType(d); +select JSONExtract(materialize('{"d" : [-42, 18446744073709551615]}'), 'd', 'Dynamic') as d, dynamicType(d); diff --git a/parser/testdata/03199_merge_filters_bug/ast.json b/parser/testdata/03199_merge_filters_bug/ast.json new file mode 100644 index 000000000..d9a972fee --- /dev/null +++ b/parser/testdata/03199_merge_filters_bug/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001240109, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03199_merge_filters_bug/metadata.json b/parser/testdata/03199_merge_filters_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03199_merge_filters_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03199_merge_filters_bug/query.sql b/parser/testdata/03199_merge_filters_bug/query.sql new file mode 100644 index 000000000..696856c91 --- /dev/null +++ b/parser/testdata/03199_merge_filters_bug/query.sql @@ -0,0 +1,104 @@ +set allow_reorder_prewhere_conditions=0; + +drop table if exists t1; +drop table if exists t2; + +CREATE TABLE t1 +( + `s1` String, + `s2` String, + `s3` String +) +ENGINE = MergeTree +ORDER BY tuple(); + + +CREATE TABLE t2 +( + `fs1` FixedString(10), + `fs2` FixedString(10) +) +ENGINE = MergeTree +ORDER BY tuple(); + +INSERT INTO t1 SELECT + repeat('t', 15) s1, + 'test' s2, + 'test' s3; + +INSERT INTO t1 SELECT + substring(s1, 1, 10), + s2, + s3 +FROM generateRandom('s1 String, s2 String, s3 String') +LIMIT 10000; + +INSERT INTO t2 SELECT * +FROM generateRandom() +LIMIT 10000; + +WITH +tmp1 AS +( + SELECT + CAST(s1, 'FixedString(10)') AS fs1, + s2 AS sector, + s3 + FROM t1 + WHERE (s3 != 'test') +) + SELECT + fs1 + FROM t2 + LEFT JOIN tmp1 USING (fs1) + WHERE (fs1 IN ('test')) SETTINGS enable_multiple_prewhere_read_steps = 0, query_plan_merge_filters=0; + +WITH +tmp1 AS +( + SELECT + CAST(s1, 'FixedString(10)') AS fs1, + s2 AS sector, + s3 + FROM t1 + WHERE (s3 != 'test') +) + SELECT + fs1 + FROM t2 + LEFT JOIN tmp1 USING (fs1) + WHERE (fs1 IN ('test')) SETTINGS enable_multiple_prewhere_read_steps = 1, query_plan_merge_filters=1; + +optimize table t1 final; + +WITH +tmp1 AS +( + SELECT + CAST(s1, 'FixedString(10)') AS fs1, + s2 AS sector, + s3 + FROM t1 + WHERE (s3 != 'test') +) + SELECT + fs1 + FROM t2 + LEFT JOIN tmp1 USING (fs1) + WHERE (fs1 IN ('test')) SETTINGS enable_multiple_prewhere_read_steps = 0, query_plan_merge_filters=0; + +WITH +tmp1 AS +( + SELECT + CAST(s1, 'FixedString(10)') AS fs1, + s2 AS sector, + s3 + FROM t1 + WHERE (s3 != 'test') +) + SELECT + fs1 + FROM t2 + LEFT JOIN tmp1 USING (fs1) + WHERE (fs1 IN ('test')) SETTINGS enable_multiple_prewhere_read_steps = 1, query_plan_merge_filters=1; diff --git a/parser/testdata/03199_queries_with_new_analyzer/ast.json b/parser/testdata/03199_queries_with_new_analyzer/ast.json new file mode 100644 index 000000000..1c621f60f --- /dev/null +++ b/parser/testdata/03199_queries_with_new_analyzer/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00121591, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03199_queries_with_new_analyzer/metadata.json b/parser/testdata/03199_queries_with_new_analyzer/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03199_queries_with_new_analyzer/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03199_queries_with_new_analyzer/query.sql b/parser/testdata/03199_queries_with_new_analyzer/query.sql new file mode 100644 index 000000000..d400a025f --- /dev/null +++ b/parser/testdata/03199_queries_with_new_analyzer/query.sql @@ -0,0 +1,40 @@ +SET enable_analyzer=1; + +SELECT *, ngramMinHash(*) AS minhash, mortonEncode(untuple(ngramMinHash(*))) AS z +FROM (SELECT toString(number) FROM numbers(10)) +ORDER BY z LIMIT 100; + +CREATE TABLE test ( + idx UInt64, + coverage Array(UInt64), + test_name String +) +ENGINE = MergeTree +ORDER BY tuple(); + +INSERT INTO test VALUES (10, [0,1,2,3], 'xx'), (20, [3,4,5,6], 'xxx'), (90, [3,4,5,6,9], 'xxxx'); + +WITH + 4096 AS w, 4096 AS h, w * h AS pixels, + arrayJoin(coverage) AS num, + num DIV (32768 * 32768 DIV pixels) AS idx, + mortonDecode(2, idx) AS coord, + 255 AS b, + least(255, uniq(test_name)) AS r, + 255 * uniq(test_name) / (max(uniq(test_name)) OVER ()) AS g +SELECT r::UInt8, g::UInt8, b::UInt8 +FROM test +GROUP BY coord +ORDER BY coord.2 * w + coord.1 +WITH FILL FROM 0 TO 10; + + +CREATE TABLE seq ( + number UInt64 +) +ENGINE = MergeTree +ORDER BY tuple(); + +INSERT INTO seq VALUES (0), (6), (7); + +WITH (Select min(number), max(number) from seq) as range Select * from numbers(range.1, range.2); diff --git a/parser/testdata/03200_memory_engine_alter_dynamic/ast.json b/parser/testdata/03200_memory_engine_alter_dynamic/ast.json new file mode 100644 index 000000000..cf665c68a --- /dev/null +++ b/parser/testdata/03200_memory_engine_alter_dynamic/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001130832, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03200_memory_engine_alter_dynamic/metadata.json b/parser/testdata/03200_memory_engine_alter_dynamic/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03200_memory_engine_alter_dynamic/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03200_memory_engine_alter_dynamic/query.sql b/parser/testdata/03200_memory_engine_alter_dynamic/query.sql new file mode 100644 index 000000000..939b49e15 --- /dev/null +++ b/parser/testdata/03200_memory_engine_alter_dynamic/query.sql @@ -0,0 +1,8 @@ +set allow_experimental_dynamic_type=1; +drop table if exists test; +create table test (d Dynamic) engine=Memory; +insert into table test select * from numbers(5); +alter table test modify column d Dynamic(max_types=0); +select d.UInt64 from test settings enable_analyzer=1; +select d.UInt64 from test settings enable_analyzer=0; +drop table test; diff --git a/parser/testdata/03200_subcolumns_join_use_nulls/ast.json b/parser/testdata/03200_subcolumns_join_use_nulls/ast.json new file mode 100644 index 000000000..cfa52d179 --- /dev/null +++ b/parser/testdata/03200_subcolumns_join_use_nulls/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_subcolumns_join (children 1)" + }, + { + "explain": " Identifier t_subcolumns_join" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001195066, + "rows_read": 2, + "bytes_read": 86 + } +} diff --git a/parser/testdata/03200_subcolumns_join_use_nulls/metadata.json b/parser/testdata/03200_subcolumns_join_use_nulls/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03200_subcolumns_join_use_nulls/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03200_subcolumns_join_use_nulls/query.sql b/parser/testdata/03200_subcolumns_join_use_nulls/query.sql new file mode 100644 index 000000000..6777a7534 --- /dev/null +++ b/parser/testdata/03200_subcolumns_join_use_nulls/query.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS t_subcolumns_join; + +CREATE TABLE t_subcolumns_join (id UInt64) ENGINE=MergeTree ORDER BY tuple(); + +INSERT INTO t_subcolumns_join SELECT number as number FROM numbers(10000); + +SELECT + count() +FROM (SELECT number FROM numbers(10)) as tbl LEFT JOIN t_subcolumns_join ON number = id +WHERE id is null +SETTINGS enable_analyzer = 1, optimize_functions_to_subcolumns = 1, join_use_nulls = 1; + +DROP TABLE t_subcolumns_join; diff --git a/parser/testdata/03201_analyzer_resolve_in_parent_scope/ast.json b/parser/testdata/03201_analyzer_resolve_in_parent_scope/ast.json new file mode 100644 index 000000000..1c8fea88f --- /dev/null +++ b/parser/testdata/03201_analyzer_resolve_in_parent_scope/ast.json @@ -0,0 +1,76 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery fake (children 2)" + }, + { + "explain": " Identifier fake" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Identifier table" + }, + { + "explain": " Identifier database" + }, + { + "explain": " Identifier name" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.tables" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier database" + }, + { + "explain": " Function currentDatabase (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 18, + + "statistics": + { + "elapsed": 0.001067585, + "rows_read": 18, + "bytes_read": 678 + } +} diff --git a/parser/testdata/03201_analyzer_resolve_in_parent_scope/metadata.json b/parser/testdata/03201_analyzer_resolve_in_parent_scope/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03201_analyzer_resolve_in_parent_scope/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03201_analyzer_resolve_in_parent_scope/query.sql b/parser/testdata/03201_analyzer_resolve_in_parent_scope/query.sql new file mode 100644 index 000000000..f2f41130e --- /dev/null +++ b/parser/testdata/03201_analyzer_resolve_in_parent_scope/query.sql @@ -0,0 +1,87 @@ +CREATE VIEW fake AS SELECT table, database, name FROM system.tables WHERE database = currentDatabase(); + +WITH +(`database` NOT LIKE 'system' and `name` = 'fake') AS `$condition`, +`$main` AS (SELECT DISTINCT table FROM fake WHERE `$condition`) +SELECT * FROM `$main`; + +with properties_value[indexOf(properties_key, 'objectId')] as objectId, +data as ( + select + ['objectId'] as properties_key, + ['objectValue'] as properties_value +), +nested_query as ( + select + objectId + from + data +) +select + * +from + nested_query; + +WITH leftPad('abc', 4, '0') as paddedval +SELECT * FROM (SELECT paddedval); + +with ('408','420') as some_tuple +select '408' in some_tuple as flag; + +CREATE VIEW another_fake AS SELECT bytes, table FROM system.parts; + +WITH + sum(bytes) as s, + data as ( + SELECT + formatReadableSize(s), + table + FROM another_fake + GROUP BY table + ORDER BY s + ) +select * from data +FORMAT Null; + +CREATE TABLE test + ( + a UInt64, + b UInt64, + Block_Height UInt64, + Block_Date Date + ) ENGINE = Log; + +WITH Block_Height BETWEEN 1 AND 2 AS block_filter +SELECT * +FROM test +WHERE block_filter +AND ( + Block_Date IN ( + SELECT Block_Date FROM test WHERE block_filter + ) +); + +CREATE TABLE test_cte +( + a UInt64, + b UInt64, +) +ENGINE = MergeTree +ORDER BY tuple(); + +WITH + (a > b) as cte, + query AS + ( + SELECT count() + FROM test_cte + WHERE cte + ) +SELECT * +FROM query; + +WITH arrayMap(x -> (x + 1), [0]) AS a +SELECT 1 +WHERE 1 IN ( + SELECT arrayJoin(a) +); diff --git a/parser/testdata/03201_sumIf_to_countIf_return_type/ast.json b/parser/testdata/03201_sumIf_to_countIf_return_type/ast.json new file mode 100644 index 000000000..13b84e49d --- /dev/null +++ b/parser/testdata/03201_sumIf_to_countIf_return_type/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001417427, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03201_sumIf_to_countIf_return_type/metadata.json b/parser/testdata/03201_sumIf_to_countIf_return_type/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03201_sumIf_to_countIf_return_type/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03201_sumIf_to_countIf_return_type/query.sql b/parser/testdata/03201_sumIf_to_countIf_return_type/query.sql new file mode 100644 index 000000000..b791f328d --- /dev/null +++ b/parser/testdata/03201_sumIf_to_countIf_return_type/query.sql @@ -0,0 +1,2 @@ +SET enable_analyzer = 1; +EXPLAIN QUERY TREE SELECT tuple(sumIf(toInt64(1), 1)) FROM numbers(100) settings optimize_rewrite_sum_if_to_count_if=1; diff --git a/parser/testdata/03202_enum_json_cast/ast.json b/parser/testdata/03202_enum_json_cast/ast.json new file mode 100644 index 000000000..7ae6d3e27 --- /dev/null +++ b/parser/testdata/03202_enum_json_cast/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000913373, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/03202_enum_json_cast/metadata.json b/parser/testdata/03202_enum_json_cast/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03202_enum_json_cast/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03202_enum_json_cast/query.sql b/parser/testdata/03202_enum_json_cast/query.sql new file mode 100644 index 000000000..e8e2ab84b --- /dev/null +++ b/parser/testdata/03202_enum_json_cast/query.sql @@ -0,0 +1,25 @@ +DROP TABLE IF EXISTS test; +CREATE TABLE test +( + `answer` Enum8('Question' = 1, 'Answer' = 2, 'Wiki' = 3, 'TagWikiExcerpt' = 4, 'TagWiki' = 5, 'ModeratorNomination' = 6, 'WikiPlaceholder' = 7, 'PrivilegeWiki' = 8) +) +ENGINE = Memory; + +INSERT INTO test FORMAT JSONEachRow {"answer": 1}; + +INSERT INTO test FORMAT JSONEachRow {"answer": "2"}; + +SELECT * FROM test ORDER BY ALL; + +DROP TABLE test; +CREATE TABLE test +( + `answer` Enum8('1' = 2, '2' = 1, 'Wiki' = 3) +) +ENGINE = Memory; + +INSERT INTO test FORMAT JSONEachRow {"answer": 1}; + +INSERT INTO test FORMAT JSONEachRow {"answer": "2"}; + +SELECT * FROM test ORDER BY ALL; diff --git a/parser/testdata/03202_system_load_primary_key/ast.json b/parser/testdata/03202_system_load_primary_key/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03202_system_load_primary_key/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03202_system_load_primary_key/metadata.json b/parser/testdata/03202_system_load_primary_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03202_system_load_primary_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03202_system_load_primary_key/query.sql b/parser/testdata/03202_system_load_primary_key/query.sql new file mode 100644 index 000000000..965e4f98b --- /dev/null +++ b/parser/testdata/03202_system_load_primary_key/query.sql @@ -0,0 +1,87 @@ +-- Tags: no-parallel +-- no-parallel: test loads/unloads PKs of all tables, this affects expected results if the test runs in parallel + +DROP TABLE IF EXISTS test; +DROP TABLE IF EXISTS test2; + +CREATE TABLE test (s String) ENGINE = MergeTree ORDER BY s SETTINGS index_granularity = 1, use_primary_key_cache = 0; +CREATE TABLE test2 (s String) ENGINE = MergeTree ORDER BY s SETTINGS index_granularity = 1, use_primary_key_cache = 0; + +SELECT '-- Insert data into columns'; +INSERT INTO test SELECT randomPrintableASCII(100) FROM numbers(10000); +INSERT INTO test2 SELECT randomPrintableASCII(100) FROM numbers(10000); +SELECT (SELECT count() FROM test), (SELECT count() FROM test2); + +SELECT '-- Check primary key memory after inserting into both tables'; +SELECT + table, + round(primary_key_bytes_in_memory, -6), + round(primary_key_bytes_in_memory_allocated, -6) +FROM system.parts +WHERE + database = currentDatabase() + AND table IN ('test', 'test2') +ORDER BY table; + +SELECT '-- Unload primary keys for all tables in the database'; +SYSTEM UNLOAD PRIMARY KEY; +SELECT 'OK'; + +SELECT '-- Check the primary key memory after unloading all tables'; +SELECT + table, + round(primary_key_bytes_in_memory, -6), + round(primary_key_bytes_in_memory_allocated, -6) +FROM system.parts +WHERE + database = currentDatabase() + AND table IN ('test', 'test2') +ORDER BY table; + +SELECT '-- Load primary key for all tables'; +SYSTEM LOAD PRIMARY KEY; +SELECT 'OK'; + +SELECT '-- Check the primary key memory after loading all tables'; +SELECT + table, + round(primary_key_bytes_in_memory, -6), + round(primary_key_bytes_in_memory_allocated, -6) +FROM system.parts +WHERE + database = currentDatabase() + AND table IN ('test', 'test2') +ORDER BY table; + +SELECT '-- Unload primary keys for all tables in the database'; +SYSTEM UNLOAD PRIMARY KEY; +SELECT 'OK'; + +SELECT '-- Check the primary key memory after unloading all tables'; +SELECT + table, + round(primary_key_bytes_in_memory, -6), + round(primary_key_bytes_in_memory_allocated, -6) +FROM system.parts +WHERE + database = currentDatabase() + AND table IN ('test', 'test2') +ORDER BY table; + +SELECT '-- Load primary key for only one table'; +SYSTEM LOAD PRIMARY KEY test; +SELECT 'OK'; + +SELECT '-- Check the primary key memory after loading only one table'; +SELECT + table, + round(primary_key_bytes_in_memory, -6), + round(primary_key_bytes_in_memory_allocated, -6) +FROM system.parts +WHERE + database = currentDatabase() + AND table IN ('test', 'test2') +ORDER BY table; + +DROP TABLE test; +DROP TABLE test2; diff --git a/parser/testdata/03203_count_with_non_deterministic_function/ast.json b/parser/testdata/03203_count_with_non_deterministic_function/ast.json new file mode 100644 index 000000000..efdd6aee1 --- /dev/null +++ b/parser/testdata/03203_count_with_non_deterministic_function/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery t (children 3)" + }, + { + "explain": " Identifier t" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration p (children 1)" + }, + { + "explain": " DataType UInt8" + }, + { + "explain": " ColumnDeclaration x (children 1)" + }, + { + "explain": " DataType UInt64" + }, + { + "explain": " Storage definition (children 3)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Identifier p" + }, + { + "explain": " Identifier x" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001190492, + "rows_read": 12, + "bytes_read": 383 + } +} diff --git a/parser/testdata/03203_count_with_non_deterministic_function/metadata.json b/parser/testdata/03203_count_with_non_deterministic_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03203_count_with_non_deterministic_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03203_count_with_non_deterministic_function/query.sql b/parser/testdata/03203_count_with_non_deterministic_function/query.sql new file mode 100644 index 000000000..b45def0c5 --- /dev/null +++ b/parser/testdata/03203_count_with_non_deterministic_function/query.sql @@ -0,0 +1,4 @@ +CREATE TABLE t (p UInt8, x UInt64) Engine = MergeTree PARTITION BY p ORDER BY x; +INSERT INTO t SELECT 0, number FROM numbers(10) SETTINGS max_block_size = 100; +SELECT count() FROM t WHERE p = 0 AND rowNumberInAllBlocks() = 1 SETTINGS enable_analyzer = 0; +SELECT count() FROM t WHERE p = 0 AND rowNumberInAllBlocks() = 1 SETTINGS enable_analyzer = 1; diff --git a/parser/testdata/03203_drop_detached_partition_all/ast.json b/parser/testdata/03203_drop_detached_partition_all/ast.json new file mode 100644 index 000000000..2b7f37fb3 --- /dev/null +++ b/parser/testdata/03203_drop_detached_partition_all/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_03203 (children 1)" + }, + { + "explain": " Identifier t_03203" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001443488, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/03203_drop_detached_partition_all/metadata.json b/parser/testdata/03203_drop_detached_partition_all/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03203_drop_detached_partition_all/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03203_drop_detached_partition_all/query.sql b/parser/testdata/03203_drop_detached_partition_all/query.sql new file mode 100644 index 000000000..e29eb4ae3 --- /dev/null +++ b/parser/testdata/03203_drop_detached_partition_all/query.sql @@ -0,0 +1,8 @@ +DROP TABLE IF EXISTS t_03203; +CREATE TABLE t_03203 (p UInt64, v UInt64) ENGINE = MergeTree PARTITION BY p ORDER BY v; +INSERT INTO t_03203 VALUES (1, 1), (2, 2), (3, 3); +SELECT * FROM t_03203 ORDER BY p, v; +ALTER TABLE t_03203 DETACH PARTITION ALL; +SELECT count() FROM system.detached_parts WHERE database = currentDatabase() AND table = 't_03203'; +ALTER TABLE t_03203 DROP DETACHED PARTITION ALL SETTINGS allow_drop_detached = 1; +SELECT count() FROM system.detached_parts WHERE database = currentDatabase() AND table = 't_03203'; diff --git a/parser/testdata/03203_fill_missed_subcolumns/ast.json b/parser/testdata/03203_fill_missed_subcolumns/ast.json new file mode 100644 index 000000000..9f14de05e --- /dev/null +++ b/parser/testdata/03203_fill_missed_subcolumns/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_missed_subcolumns (children 1)" + }, + { + "explain": " Identifier t_missed_subcolumns" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00128821, + "rows_read": 2, + "bytes_read": 90 + } +} diff --git a/parser/testdata/03203_fill_missed_subcolumns/metadata.json b/parser/testdata/03203_fill_missed_subcolumns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03203_fill_missed_subcolumns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03203_fill_missed_subcolumns/query.sql b/parser/testdata/03203_fill_missed_subcolumns/query.sql new file mode 100644 index 000000000..9fd69c110 --- /dev/null +++ b/parser/testdata/03203_fill_missed_subcolumns/query.sql @@ -0,0 +1,48 @@ +DROP TABLE IF EXISTS t_missed_subcolumns; + +-- .null subcolumn + +CREATE TABLE t_missed_subcolumns (x UInt32) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO t_missed_subcolumns SELECT * FROM numbers(10); + +ALTER TABLE t_missed_subcolumns ADD COLUMN `y` Nullable(UInt32); + +INSERT INTO t_missed_subcolumns SELECT number, if(number % 2, NULL, number) FROM numbers(10); + +SELECT x FROM t_missed_subcolumns WHERE y IS NOT NULL SETTINGS optimize_functions_to_subcolumns = 1; +SELECT x FROM t_missed_subcolumns WHERE y IS NOT NULL SETTINGS optimize_functions_to_subcolumns = 0; + +DROP TABLE IF EXISTS t_missed_subcolumns; + +-- .null and .size0 subcolumn in array + +CREATE TABLE t_missed_subcolumns (id UInt64, `n.a` Array(Nullable(String))) ENGINE = MergeTree ORDER BY id; + +INSERT INTO t_missed_subcolumns VALUES (1, ['aaa', NULL]) (2, ['ccc']) (3, [NULL]); +ALTER TABLE t_missed_subcolumns ADD COLUMN `n.b` Array(Nullable(String)); +INSERT INTO t_missed_subcolumns VALUES (4, [NULL, 'bbb'], ['ddd', NULL]), (5, [NULL], [NULL]); + +SELECT id, n.a, n.b FROM t_missed_subcolumns ORDER BY id; +SELECT id, n.a.size0, n.b.size0 FROM t_missed_subcolumns ORDER BY id; +SELECT id, n.a.null, n.b.null FROM t_missed_subcolumns ORDER BY id; +SELECT n.b.size0 FROM t_missed_subcolumns ORDER BY id; + +DROP TABLE IF EXISTS t_missed_subcolumns; + +-- subcolumns and custom defaults + +CREATE TABLE t_missed_subcolumns (id UInt64) ENGINE = MergeTree ORDER BY id; +SYSTEM STOP MERGES t_missed_subcolumns; + +INSERT INTO t_missed_subcolumns VALUES (1); + +ALTER TABLE t_missed_subcolumns ADD COLUMN t Tuple(a String, b String) DEFAULT ('foo', 'bar'); +INSERT INTO t_missed_subcolumns VALUES (2, ('aaa', 'bbb')); + +ALTER TABLE t_missed_subcolumns ADD COLUMN arr Array(Nullable(UInt64)) DEFAULT [1, NULL, 3]; +INSERT INTO t_missed_subcolumns VALUES (3, ('ccc', 'ddd'), [4, 5, 6]); + +SELECT id, t, arr FROM t_missed_subcolumns ORDER BY id; +SELECT id, t.a, t.b, arr.size0, arr.null FROM t_missed_subcolumns ORDER BY id; + +DROP TABLE t_missed_subcolumns; diff --git a/parser/testdata/03203_function_printf/ast.json b/parser/testdata/03203_function_printf/ast.json new file mode 100644 index 000000000..007fb34ce --- /dev/null +++ b/parser/testdata/03203_function_printf/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001278039, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03203_function_printf/metadata.json b/parser/testdata/03203_function_printf/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03203_function_printf/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03203_function_printf/query.sql b/parser/testdata/03203_function_printf/query.sql new file mode 100644 index 000000000..88ef966ac --- /dev/null +++ b/parser/testdata/03203_function_printf/query.sql @@ -0,0 +1,49 @@ +SET output_format_pretty_fallback_to_vertical = 0; + +-- Testing integer formats +select printf('%%d: %d', 123); +select printf('%%d: %d', -123); +select printf('%%d: %d', 0); +select printf('%%d: %d', 9223372036854775807); +select printf('%%i: %i', 123); +select printf('%%u: %u', 123); +select printf('%%o: %o', 123); +select printf('%%x: %x', 123); +select printf('%%X: %X', 123); + +-- Testing floating point formats +select printf('%%f: %f', 0.0); +select printf('%%f: %f', 123.456); +select printf('%%f: %f', -123.456); +select printf('%%F: %F', 123.456); +select printf('%%e: %e', 123.456); +select printf('%%E: %E', 123.456); +select printf('%%g: %g', 123.456); +select printf('%%G: %G', 123.456); +select printf('%%a: %a', 123.456); +select printf('%%A: %A', 123.456); + +-- Testing character formats +select printf('%%s: %s', 'abc'); +SELECT printf('%%s: %s', '\n\t') FORMAT PrettyCompact; +select printf('%%s: %s', ''); + +-- Testing the %% specifier +select printf('%%%%: %%'); + +-- Testing integer formats with precision +select printf('%%.5d: %.5d', 123); + +-- Testing floating point formats with precision +select printf('%%.2f: %.2f', 123.456); +select printf('%%.2e: %.2e', 123.456); +select printf('%%.2g: %.2g', 123.456); + +-- Testing character formats with precision +select printf('%%.2s: %.2s', 'abc'); + +select printf('%%X: %X', 123.123); -- { serverError BAD_ARGUMENTS } +select printf('%%A: %A', 'abc'); -- { serverError BAD_ARGUMENTS } +select printf('%%s: %s', 100); -- { serverError BAD_ARGUMENTS } +select printf('%%n: %n', 100); -- { serverError BAD_ARGUMENTS } +select printf('%%f: %f', 0); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/03203_multiif_and_where_2_conditions_old_analyzer_bug/ast.json b/parser/testdata/03203_multiif_and_where_2_conditions_old_analyzer_bug/ast.json new file mode 100644 index 000000000..9fa8a65c0 --- /dev/null +++ b/parser/testdata/03203_multiif_and_where_2_conditions_old_analyzer_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery bugcheck1 (children 1)" + }, + { + "explain": " Identifier bugcheck1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001164361, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/03203_multiif_and_where_2_conditions_old_analyzer_bug/metadata.json b/parser/testdata/03203_multiif_and_where_2_conditions_old_analyzer_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03203_multiif_and_where_2_conditions_old_analyzer_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03203_multiif_and_where_2_conditions_old_analyzer_bug/query.sql b/parser/testdata/03203_multiif_and_where_2_conditions_old_analyzer_bug/query.sql new file mode 100644 index 000000000..4e8694bc5 --- /dev/null +++ b/parser/testdata/03203_multiif_and_where_2_conditions_old_analyzer_bug/query.sql @@ -0,0 +1,35 @@ +DROP TABLE IF EXISTS bugcheck1; + +CREATE TABLE bugcheck1 +ENGINE = MergeTree +ORDER BY tuple() +AS SELECT + 'c1' as column_a, + 'c2' as column_b; + +select 'this query used to be broken in old analyser:'; +SELECT *, + multiIf(column_b IN (SELECT 'c2' as someproduct), 'yes', 'no') AS condition_1, + multiIf(column_b = 'c2', 'true', 'false') AS condition_2 +FROM (SELECT column_a, column_b FROM bugcheck1) +WHERE (condition_1 IN ('yes')) AND (condition_2 in ('true')) +SETTINGS enable_analyzer=0; + +select 'this query worked:'; + +SELECT + multiIf(column_b IN (SELECT 'c2' as someproduct), 'yes', 'no') AS condition_1, + multiIf(column_b = 'c2', 'true', 'false') AS condition_2 +FROM (SELECT column_a, column_b FROM bugcheck1) +WHERE (condition_1 IN ('yes')) AND (condition_2 in ('true')) +SETTINGS enable_analyzer=0; + +select 'experimental analyzer:'; +SELECT *, + multiIf(column_b IN (SELECT 'c2' as someproduct), 'yes', 'no') AS condition_1, + multiIf(column_b = 'c2', 'true', 'false') AS condition_2 +FROM (SELECT column_a, column_b FROM bugcheck1) +WHERE (condition_1 IN ('yes')) AND (condition_2 in ('true')) +SETTINGS enable_analyzer=1; + +DROP TABLE bugcheck1; diff --git a/parser/testdata/03203_optimize_disjunctions_chain_to_in/ast.json b/parser/testdata/03203_optimize_disjunctions_chain_to_in/ast.json new file mode 100644 index 000000000..1eb246838 --- /dev/null +++ b/parser/testdata/03203_optimize_disjunctions_chain_to_in/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001020558, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03203_optimize_disjunctions_chain_to_in/metadata.json b/parser/testdata/03203_optimize_disjunctions_chain_to_in/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03203_optimize_disjunctions_chain_to_in/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03203_optimize_disjunctions_chain_to_in/query.sql b/parser/testdata/03203_optimize_disjunctions_chain_to_in/query.sql new file mode 100644 index 000000000..1644b6613 --- /dev/null +++ b/parser/testdata/03203_optimize_disjunctions_chain_to_in/query.sql @@ -0,0 +1,12 @@ +SET enable_analyzer=1; +CREATE TABLE foo (i Date) ENGINE MergeTree ORDER BY i; +INSERT INTO foo VALUES ('2020-01-01'); +INSERT INTO foo VALUES ('2020-01-02'); + +SET optimize_min_equality_disjunction_chain_length = 3; +SELECT * +FROM foo +WHERE (foo.i = parseDateTimeBestEffort('2020-01-01')) + OR (foo.i = parseDateTimeBestEffort('2020-01-02')) + OR (foo.i = parseDateTimeBestEffort('2020-01-03')) +ORDER BY foo.i ASC diff --git a/parser/testdata/03203_system_numbers_limit_and_offset_complex/ast.json b/parser/testdata/03203_system_numbers_limit_and_offset_complex/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03203_system_numbers_limit_and_offset_complex/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03203_system_numbers_limit_and_offset_complex/metadata.json b/parser/testdata/03203_system_numbers_limit_and_offset_complex/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03203_system_numbers_limit_and_offset_complex/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03203_system_numbers_limit_and_offset_complex/query.sql b/parser/testdata/03203_system_numbers_limit_and_offset_complex/query.sql new file mode 100644 index 000000000..756e08da2 --- /dev/null +++ b/parser/testdata/03203_system_numbers_limit_and_offset_complex/query.sql @@ -0,0 +1,38 @@ +--- The following query was buggy before, so let's use it as a test case +WITH + (num > 1) AND (arraySum(arrayMap(y -> ((y > 1) AND (y < num) AND ((num % y) = 0)), range(toUInt64(sqrt(num)) + 1))) = 0) AS is_prime_slow +SELECT + num, + ds, +FROM +( + WITH + arraySum(arrayMap(y -> toUInt8(y), splitByString('', toString(num)))) AS digits_sum + SELECT + 1 + (number * 2) AS num, + digits_sum AS ds + FROM numbers_mt(10000) + WHERE ds IN ( + WITH + (number > 1) AND (arraySum(arrayMap(y -> ((y > 1) AND (y < number) AND ((number % y) = 0)), range(toUInt64(sqrt(number)) + 1))) = 0) AS is_prime_slow + SELECT number + FROM numbers(180 + 1) + WHERE is_prime_slow + ) +) +WHERE is_prime_slow +ORDER BY num ASC +LIMIT 998, 1 +SETTINGS max_block_size = 64, max_threads=16; + +SELECT number +FROM numbers_mt(120) +WHERE (number % 10) = 0 +ORDER BY number ASC +SETTINGS max_block_size = 31, max_threads = 11; + +SELECT number +FROM numbers_mt(4242, 9) +WHERE (number % 10) = 0 +ORDER BY number ASC +SETTINGS max_block_size = 31, max_threads = 11; diff --git a/parser/testdata/03203_system_numbers_limit_and_offset_simple/ast.json b/parser/testdata/03203_system_numbers_limit_and_offset_simple/ast.json new file mode 100644 index 000000000..c7add8120 --- /dev/null +++ b/parser/testdata/03203_system_numbers_limit_and_offset_simple/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'case 1'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001311987, + "rows_read": 5, + "bytes_read": 177 + } +} diff --git a/parser/testdata/03203_system_numbers_limit_and_offset_simple/metadata.json b/parser/testdata/03203_system_numbers_limit_and_offset_simple/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03203_system_numbers_limit_and_offset_simple/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03203_system_numbers_limit_and_offset_simple/query.sql b/parser/testdata/03203_system_numbers_limit_and_offset_simple/query.sql new file mode 100644 index 000000000..df01ddf43 --- /dev/null +++ b/parser/testdata/03203_system_numbers_limit_and_offset_simple/query.sql @@ -0,0 +1,11 @@ +SELECT 'case 1'; +SELECT number FROM numbers_mt(10000) +WHERE (number % 10) = 0 +ORDER BY number ASC +LIMIT 990, 3; + +SELECT 'case 2'; +SELECT number FROM numbers_mt(10000) +WHERE (number % 10) = 0 +ORDER BY number ASC +LIMIT 999, 20 SETTINGS max_block_size = 31; diff --git a/parser/testdata/03203_variant_convert_field_to_type_bug/ast.json b/parser/testdata/03203_variant_convert_field_to_type_bug/ast.json new file mode 100644 index 000000000..b95b4c194 --- /dev/null +++ b/parser/testdata/03203_variant_convert_field_to_type_bug/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001428339, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03203_variant_convert_field_to_type_bug/metadata.json b/parser/testdata/03203_variant_convert_field_to_type_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03203_variant_convert_field_to_type_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03203_variant_convert_field_to_type_bug/query.sql b/parser/testdata/03203_variant_convert_field_to_type_bug/query.sql new file mode 100644 index 000000000..b73bb8ffa --- /dev/null +++ b/parser/testdata/03203_variant_convert_field_to_type_bug/query.sql @@ -0,0 +1,5 @@ +set allow_experimental_variant_type=1; +set use_variant_as_common_type=1; + +SELECT * FROM numbers([tuple(1, 2), NULL], 2); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} + diff --git a/parser/testdata/03204_distributed_with_scalar_subquery/ast.json b/parser/testdata/03204_distributed_with_scalar_subquery/ast.json new file mode 100644 index 000000000..fee98de41 --- /dev/null +++ b/parser/testdata/03204_distributed_with_scalar_subquery/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_c3oollc8r (children 1)" + }, + { + "explain": " Identifier t_c3oollc8r" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001294689, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/03204_distributed_with_scalar_subquery/metadata.json b/parser/testdata/03204_distributed_with_scalar_subquery/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03204_distributed_with_scalar_subquery/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03204_distributed_with_scalar_subquery/query.sql b/parser/testdata/03204_distributed_with_scalar_subquery/query.sql new file mode 100644 index 000000000..0a07ce482 --- /dev/null +++ b/parser/testdata/03204_distributed_with_scalar_subquery/query.sql @@ -0,0 +1,10 @@ +DROP TABLE IF EXISTS t_c3oollc8r; +CREATE TABLE t_c3oollc8r (c_k37 Int32, c_y String, c_bou Int32, c_g1 Int32, c_lfntfzg Int32, c_kntw50q Int32) ENGINE = MergeTree ORDER BY (); + +SELECT ( + SELECT c_k37 + FROM t_c3oollc8r + ) > c_lfntfzg +FROM remote('127.0.0.{1,2}', currentDatabase(), t_c3oollc8r); + +DROP TABLE t_c3oollc8r; diff --git a/parser/testdata/03204_index_hint_fuzzer/ast.json b/parser/testdata/03204_index_hint_fuzzer/ast.json new file mode 100644 index 000000000..866766ecd --- /dev/null +++ b/parser/testdata/03204_index_hint_fuzzer/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function indexHint (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toLowCardinality (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'aaa'" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001334455, + "rows_read": 12, + "bytes_read": 474 + } +} diff --git a/parser/testdata/03204_index_hint_fuzzer/metadata.json b/parser/testdata/03204_index_hint_fuzzer/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03204_index_hint_fuzzer/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03204_index_hint_fuzzer/query.sql b/parser/testdata/03204_index_hint_fuzzer/query.sql new file mode 100644 index 000000000..5794f3eee --- /dev/null +++ b/parser/testdata/03204_index_hint_fuzzer/query.sql @@ -0,0 +1 @@ +SELECT tuple(indexHint(toLowCardinality('aaa')), 1); diff --git a/parser/testdata/03204_storage_join_optimize/ast.json b/parser/testdata/03204_storage_join_optimize/ast.json new file mode 100644 index 000000000..a8376b58e --- /dev/null +++ b/parser/testdata/03204_storage_join_optimize/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery dict_03204 (children 3)" + }, + { + "explain": " Identifier dict_03204" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration k (children 1)" + }, + { + "explain": " DataType UInt64" + }, + { + "explain": " ColumnDeclaration v (children 1)" + }, + { + "explain": " DataType UInt64" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function Join (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Identifier ANY" + }, + { + "explain": " Identifier LEFT" + }, + { + "explain": " Identifier k" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001410393, + "rows_read": 14, + "bytes_read": 481 + } +} diff --git a/parser/testdata/03204_storage_join_optimize/metadata.json b/parser/testdata/03204_storage_join_optimize/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03204_storage_join_optimize/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03204_storage_join_optimize/query.sql b/parser/testdata/03204_storage_join_optimize/query.sql new file mode 100644 index 000000000..03a4658ba --- /dev/null +++ b/parser/testdata/03204_storage_join_optimize/query.sql @@ -0,0 +1,5 @@ +CREATE TABLE dict_03204 (k UInt64, v UInt64) ENGINE = Join(ANY, LEFT, k); +INSERT INTO dict_03204 SELECT number, number FROM numbers(10); +OPTIMIZE TABLE dict_03204; +SELECT * FROM dict_03204 ORDER BY k; +DROP TABLE dict_03204; diff --git a/parser/testdata/03205_column_type_check/ast.json b/parser/testdata/03205_column_type_check/ast.json new file mode 100644 index 000000000..c1cd6a826 --- /dev/null +++ b/parser/testdata/03205_column_type_check/ast.json @@ -0,0 +1,142 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 2)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (alias t) (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toUInt256 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " TablesInSelectQueryElement (children 2)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (alias u) (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function greatCircleAngle (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Function toLowCardinality (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toNullable (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toUInt256 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1048575" + }, + { + "explain": " Literal UInt64_257" + }, + { + "explain": " Literal Int64_-9223372036854775808" + }, + { + "explain": " Literal UInt64_1048576" + }, + { + "explain": " Literal UInt64_1048575" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " TableJoin" + } + ], + + "rows": 40, + + "statistics": + { + "elapsed": 0.001244137, + "rows_read": 40, + "bytes_read": 1786 + } +} diff --git a/parser/testdata/03205_column_type_check/metadata.json b/parser/testdata/03205_column_type_check/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03205_column_type_check/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03205_column_type_check/query.sql b/parser/testdata/03205_column_type_check/query.sql new file mode 100644 index 000000000..332061d6b --- /dev/null +++ b/parser/testdata/03205_column_type_check/query.sql @@ -0,0 +1,9 @@ +SELECT * FROM (SELECT toUInt256(1)) AS t, (SELECT greatCircleAngle(toLowCardinality(toNullable(toUInt256(1048575))), 257, -9223372036854775808, 1048576), 1048575, materialize(2)) AS u; + + +SET join_algorithm='hash'; +SET allow_experimental_join_condition=1; +SELECT * FROM ( SELECT 1 AS a, toLowCardinality(1), 1) AS t1 CROSS JOIN (SELECT toLowCardinality(1 AS a), 1 AS b) AS t2; + + +SELECT * FROM (SELECT tuple(), 1 GROUP BY greatCircleAngle(toNullable(1048575), 257, toInt128(-9223372036854775808), materialize(1048576)) WITH TOTALS) AS t, (SELECT greatCircleAngle(toUInt256(1048575), 257, toNullable(-9223372036854775808), 1048576), 1048575, 2) AS u diff --git a/parser/testdata/03205_hashing_empty_tuples/ast.json b/parser/testdata/03205_hashing_empty_tuples/ast.json new file mode 100644 index 000000000..f9085260d --- /dev/null +++ b/parser/testdata/03205_hashing_empty_tuples/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function sipHash64 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001324265, + "rows_read": 8, + "bytes_read": 302 + } +} diff --git a/parser/testdata/03205_hashing_empty_tuples/metadata.json b/parser/testdata/03205_hashing_empty_tuples/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03205_hashing_empty_tuples/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03205_hashing_empty_tuples/query.sql b/parser/testdata/03205_hashing_empty_tuples/query.sql new file mode 100644 index 000000000..4a97f30ce --- /dev/null +++ b/parser/testdata/03205_hashing_empty_tuples/query.sql @@ -0,0 +1,23 @@ +SELECT sipHash64(()); +SELECT sipHash64((), ()); +SELECT sipHash64((), 1); +SELECT sipHash64(1, ()); +SELECT sipHash64(1, (), 1); +SELECT sipHash64((), 1, ()); +SELECT sipHash64((), (1, 2)); +SELECT sipHash64((), (1, 2)); +SELECT sipHash64((1, 2), ()); +SELECT sipHash64((), (1, 2), ()); +SELECT sipHash64((1, 2), (), (3, 4)); + +SELECT sipHash64(materialize(())); +SELECT sipHash64(materialize(()), materialize(())); +SELECT sipHash64(materialize(()), 1); +SELECT sipHash64(1, materialize(())); +SELECT sipHash64(1, materialize(()), 1); +SELECT sipHash64((), 1, materialize(())); +SELECT sipHash64(materialize(()), (1, 2)); +SELECT sipHash64(materialize(()), (1, 2)); +SELECT sipHash64((1, 2), materialize(())); +SELECT sipHash64(materialize(()), (1, 2), ()); +SELECT sipHash64((1, 2), materialize(()), (3, 4)); diff --git a/parser/testdata/03205_json_cast_from_string/ast.json b/parser/testdata/03205_json_cast_from_string/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03205_json_cast_from_string/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03205_json_cast_from_string/metadata.json b/parser/testdata/03205_json_cast_from_string/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03205_json_cast_from_string/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03205_json_cast_from_string/query.sql b/parser/testdata/03205_json_cast_from_string/query.sql new file mode 100644 index 000000000..32ac894d3 --- /dev/null +++ b/parser/testdata/03205_json_cast_from_string/query.sql @@ -0,0 +1,22 @@ +-- Tags: no-fasttest +set enable_json_type=1; + +select materialize('{}')::JSON; +select materialize('{"a" : 42, "b" : "Hello"}')::JSON; +select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON; +select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON(a.b.c.d Bool); +select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON(SKIP a.b.c.d); +select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON(SKIP a.b.c); +select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON(SKIP a.b); +select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON(SKIP a); +select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON(SKIP REGEXP '.*a.*b'); +select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON(SKIP REGEXP '.*a.*'); +select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON(SKIP REGEXP '.*'); + +select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON as json, JSONAllPathsWithTypes(json), JSONDynamicPathsWithTypes(json), JSONSharedDataPathsWithTypes(json); +select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON(max_dynamic_paths = 2) as json, JSONAllPathsWithTypes(json), JSONDynamicPathsWithTypes(json), JSONSharedDataPathsWithTypes(json); +select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON(max_dynamic_paths = 1) as json, JSONAllPathsWithTypes(json), JSONDynamicPathsWithTypes(json), JSONSharedDataPathsWithTypes(json); +select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON(max_dynamic_paths = 0) as json, JSONAllPathsWithTypes(json), JSONDynamicPathsWithTypes(json), JSONSharedDataPathsWithTypes(json); +select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON(max_dynamic_paths = 2, max_dynamic_types=0) as json, JSONAllPathsWithTypes(json), JSONDynamicPathsWithTypes(json), JSONSharedDataPathsWithTypes(json); +select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON(max_dynamic_paths = 1, max_dynamic_types=0) as json, JSONAllPathsWithTypes(json), JSONDynamicPathsWithTypes(json), JSONSharedDataPathsWithTypes(json); +select materialize('{"a" : {"b" : {"c" : {"d" : 42}, "e" : 43}, "f" : 44}, "g" : 44}')::JSON(max_dynamic_paths = 0, max_dynamic_types=0) as json, JSONAllPathsWithTypes(json), JSONDynamicPathsWithTypes(json), JSONSharedDataPathsWithTypes(json); diff --git a/parser/testdata/03205_json_syntax/ast.json b/parser/testdata/03205_json_syntax/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03205_json_syntax/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03205_json_syntax/metadata.json b/parser/testdata/03205_json_syntax/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03205_json_syntax/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03205_json_syntax/query.sql b/parser/testdata/03205_json_syntax/query.sql new file mode 100644 index 000000000..a5e82e25a --- /dev/null +++ b/parser/testdata/03205_json_syntax/query.sql @@ -0,0 +1,40 @@ +-- Tags: no-fasttest + +set enable_json_type=1; +drop table if exists test; +create table test (json JSON) engine=Memory; +drop table test; +create table test (json JSON(max_dynamic_paths=10)) engine=Memory; +drop table test; +create table test (json JSON(max_dynamic_types=10)) engine=Memory; +drop table test; +create table test (json JSON(a UInt32)) engine=Memory; +drop table test; +create table test (json JSON(aaaaa UInt32)) engine=Memory; +drop table test; +create table test (json JSON(`a b c d` UInt32)) engine=Memory; +drop table test; +create table test (json JSON(a.b.c UInt32)) engine=Memory; +drop table test; +create table test (json JSON(aaaa.b.cccc UInt32)) engine=Memory; +drop table test; +create table test (json JSON(`some path`.`path some` UInt32)) engine=Memory; +drop table test; +create table test (json JSON(a.b.c Tuple(d UInt32, e UInt32))) engine=Memory; +drop table test; +create table test (json JSON(SKIP a)) engine=Memory; +drop table test; +create table test (json JSON(SKIP aaaa)) engine=Memory; +drop table test; +create table test (json JSON(SKIP `a b c d`)) engine=Memory; +drop table test; +create table test (json JSON(SKIP a.b.c)) engine=Memory; +drop table test; +create table test (json JSON(SKIP aaaa.b.cccc)) engine=Memory; +drop table test; +create table test (json JSON(SKIP `some path`.`path some`)) engine=Memory; +drop table test; +create table test (json JSON(SKIP REGEXP '.*a.*')) engine=Memory; +drop table test; +create table test (json JSON(max_dynamic_paths=10, max_dynamic_types=10, a.b.c UInt32, b.c.d String, SKIP g.d.a, SKIP o.g.a, SKIP REGEXP '.*u.*', SKIP REGEXP 'abc')) engine=Memory; +drop table test; diff --git a/parser/testdata/03205_overlay/ast.json b/parser/testdata/03205_overlay/ast.json new file mode 100644 index 000000000..b28c5f0b5 --- /dev/null +++ b/parser/testdata/03205_overlay/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'Negative test of overlay'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001318924, + "rows_read": 5, + "bytes_read": 195 + } +} diff --git a/parser/testdata/03205_overlay/metadata.json b/parser/testdata/03205_overlay/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03205_overlay/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03205_overlay/query.sql b/parser/testdata/03205_overlay/query.sql new file mode 100644 index 000000000..b692cc0c5 --- /dev/null +++ b/parser/testdata/03205_overlay/query.sql @@ -0,0 +1,47 @@ +SELECT 'Negative test of overlay'; +SELECT overlay('hello', 'world'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT overlay('hello', 'world', 2, 3, 'extra'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT overlay(123, 'world', 2, 3); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT overlay('hello', 456, 2, 3); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT overlay('hello', 'world', 'two', 3); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT overlay('hello', 'world', 2, 'three'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT 'Test with 3 arguments and various combinations of const/non-const columns'; +SELECT overlay('Spark SQL', '_', 6), overlayUTF8('Spark SQL和CH', '_', 6); +SELECT overlay(materialize('Spark SQL'), '_', 6), overlayUTF8(materialize('Spark SQL和CH'), '_', 6); +SELECT overlay('Spark SQL', materialize('_'), 6), overlayUTF8('Spark SQL和CH', materialize('_'), 6); +SELECT overlay('Spark SQL', '_', materialize(6)), overlayUTF8('Spark SQL和CH', '_', materialize(6)); +SELECT overlay(materialize('Spark SQL'), materialize('_'), 6), overlayUTF8(materialize('Spark SQL和CH'), materialize('_'), 6); +SELECT overlay(materialize('Spark SQL'), '_', materialize(6)), overlayUTF8(materialize('Spark SQL和CH'), '_', materialize(6)); +SELECT overlay('Spark SQL', materialize('_'), materialize(6)), overlayUTF8('Spark SQL和CH', materialize('_'), materialize(6)); +SELECT overlay(materialize('Spark SQL'), materialize('_'), materialize(6)), overlayUTF8(materialize('Spark SQL和CH'), materialize('_'), materialize(6)); + +SELECT 'Test with 4 arguments and various combinations of const/non-const columns'; +SELECT overlay('Spark SQL', 'ANSI ', 7, 0), overlayUTF8('Spark SQL和CH', 'ANSI ', 7, 0); +SELECT overlay(materialize('Spark SQL'), 'ANSI ', 7, 0), overlayUTF8(materialize('Spark SQL和CH'), 'ANSI ', 7, 0); +SELECT overlay('Spark SQL', materialize('ANSI '), 7, 0), overlayUTF8('Spark SQL和CH', materialize('ANSI '), 7, 0); +SELECT overlay('Spark SQL', 'ANSI ', materialize(7), 0), overlayUTF8('Spark SQL和CH', 'ANSI ', materialize(7), 0); +SELECT overlay('Spark SQL', 'ANSI ', 7, materialize(0)), overlayUTF8('Spark SQL和CH', 'ANSI ', 7, materialize(0)); +SELECT overlay(materialize('Spark SQL'), materialize('ANSI '), 7, 0), overlayUTF8(materialize('Spark SQL和CH'), materialize('ANSI '), 7, 0); +SELECT overlay(materialize('Spark SQL'), 'ANSI ', materialize(7), 0), overlayUTF8(materialize('Spark SQL和CH'), 'ANSI ', materialize(7), 0); +SELECT overlay(materialize('Spark SQL'), 'ANSI ', 7, materialize(0)), overlayUTF8(materialize('Spark SQL和CH'), 'ANSI ', 7, materialize(0)); +SELECT overlay('Spark SQL', materialize('ANSI '), materialize(7), 0), overlayUTF8('Spark SQL和CH', materialize('ANSI '), materialize(7), 0); +SELECT overlay('Spark SQL', materialize('ANSI '), 7, materialize(0)), overlayUTF8('Spark SQL和CH', materialize('ANSI '), 7, materialize(0)); +SELECT overlay('Spark SQL', 'ANSI ', materialize(7), materialize(0)), overlayUTF8('Spark SQL和CH', 'ANSI ', materialize(7), materialize(0)); +SELECT overlay(materialize('Spark SQL'), materialize('ANSI '), materialize(7), 0), overlayUTF8(materialize('Spark SQL和CH'), materialize('ANSI '), materialize(7), 0); +SELECT overlay(materialize('Spark SQL'), materialize('ANSI '), 7, materialize(0)), overlayUTF8(materialize('Spark SQL和CH'), materialize('ANSI '), 7, materialize(0)); +SELECT overlay(materialize('Spark SQL'), 'ANSI ', materialize(7), materialize(0)), overlayUTF8(materialize('Spark SQL和CH'), 'ANSI ', materialize(7), materialize(0)); +SELECT overlay('Spark SQL', materialize('ANSI '), materialize(7), materialize(0)), overlayUTF8('Spark SQL和CH', materialize('ANSI '), materialize(7), materialize(0)); +SELECT overlay(materialize('Spark SQL'), materialize('ANSI '), materialize(7), materialize(0)), overlayUTF8(materialize('Spark SQL和CH'), materialize('ANSI '), materialize(7), materialize(0)); + +SELECT 'Test with special offset values'; +WITH number - 12 AS offset SELECT offset, overlay('Spark SQL', '__', offset), overlayUTF8('Spark SQL和CH', '之', offset) FROM numbers(26) ORDER BY number; + +SELECT 'Test with special length values'; +WITH number - 1 AS length SELECT length, overlay('Spark SQL', 'ANSI ', 7, length), overlayUTF8('Spark SQL和CH', 'ANSI ', 7, length) FROM numbers(8) ORDER BY number; + +SELECT 'Test with special input and replace values'; +SELECT overlay('', '_', 6), overlayUTF8('', '_', 6); +SELECT overlay('Spark SQL', '', 6), overlayUTF8('Spark SQL和CH', '', 6); +SELECT overlay('', 'ANSI ', 7, 0), overlayUTF8('', 'ANSI ', 7, 0); +SELECT overlay('Spark SQL', '', 7, 0), overlayUTF8('Spark SQL和CH', '', 7, 0); diff --git a/parser/testdata/03205_parallel_replicas_alter_select_ubsan/ast.json b/parser/testdata/03205_parallel_replicas_alter_select_ubsan/ast.json new file mode 100644 index 000000000..2061aec54 --- /dev/null +++ b/parser/testdata/03205_parallel_replicas_alter_select_ubsan/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001316472, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03205_parallel_replicas_alter_select_ubsan/metadata.json b/parser/testdata/03205_parallel_replicas_alter_select_ubsan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03205_parallel_replicas_alter_select_ubsan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03205_parallel_replicas_alter_select_ubsan/query.sql b/parser/testdata/03205_parallel_replicas_alter_select_ubsan/query.sql new file mode 100644 index 000000000..2ec936832 --- /dev/null +++ b/parser/testdata/03205_parallel_replicas_alter_select_ubsan/query.sql @@ -0,0 +1,35 @@ +SET alter_sync = 2; +SET max_parallel_replicas = 3, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_for_non_replicated_merge_tree = true; + +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t1__fuzz_26; + +CREATE TABLE t1__fuzz_26 (`a` Nullable(Float64), `b` Nullable(Float32), `pk` Int64) ENGINE = MergeTree ORDER BY pk; +CREATE TABLE t1 ( a Float64, b Int64, pk String) Engine = MergeTree() ORDER BY pk; + +ALTER TABLE t1 + (MODIFY COLUMN `a` Float64 TTL toDateTime(b) + toIntervalMonth(viewExplain('EXPLAIN', 'actions = 1', ( + SELECT + toIntervalMonth(1), + 2 + FROM t1__fuzz_26 + GROUP BY + toFixedString('%Prewhere%', 10), + toNullable(12) + WITH ROLLUP + )), 1)) settings allow_experimental_parallel_reading_from_replicas = 1; -- { serverError INCORRECT_RESULT_OF_SCALAR_SUBQUERY } + +ALTER TABLE t1 + (MODIFY COLUMN `a` Float64 TTL toDateTime(b) + toIntervalMonth(viewExplain('EXPLAIN', 'actions = 1', ( + SELECT + toIntervalMonth(1), + 2 + FROM t1__fuzz_26 + GROUP BY + toFixedString('%Prewhere%', 10), + toNullable(12) + WITH ROLLUP + )), 1)) settings allow_experimental_parallel_reading_from_replicas = 0; -- { serverError INCORRECT_RESULT_OF_SCALAR_SUBQUERY } + +DROP TABLE t1; +DROP TABLE t1__fuzz_26; diff --git a/parser/testdata/03205_parallel_window_finctions_and_column_sparse_bug/ast.json b/parser/testdata/03205_parallel_window_finctions_and_column_sparse_bug/ast.json new file mode 100644 index 000000000..092e10fef --- /dev/null +++ b/parser/testdata/03205_parallel_window_finctions_and_column_sparse_bug/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery t (children 3)" + }, + { + "explain": " Identifier t" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration c (children 1)" + }, + { + "explain": " DataType Int32" + }, + { + "explain": " ColumnDeclaration d (children 1)" + }, + { + "explain": " DataType Bool" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Identifier c" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001191814, + "rows_read": 11, + "bytes_read": 359 + } +} diff --git a/parser/testdata/03205_parallel_window_finctions_and_column_sparse_bug/metadata.json b/parser/testdata/03205_parallel_window_finctions_and_column_sparse_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03205_parallel_window_finctions_and_column_sparse_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03205_parallel_window_finctions_and_column_sparse_bug/query.sql b/parser/testdata/03205_parallel_window_finctions_and_column_sparse_bug/query.sql new file mode 100644 index 000000000..5be858442 --- /dev/null +++ b/parser/testdata/03205_parallel_window_finctions_and_column_sparse_bug/query.sql @@ -0,0 +1,33 @@ +create table t(c Int32, d Bool) Engine=MergeTree order by c; +system stop merges t; + +insert into t values (1, 0); +insert into t values (1, 0); +insert into t values (1, 1); +insert into t values (1, 0)(1, 1); + +SELECT d, c, row_number() over (partition by d order by c) as c8 FROM t qualify c8=1 order by d settings max_threads=2, enable_analyzer = 1; +SELECT '---'; +SELECT d, c, row_number() over (partition by d order by c) as c8 FROM t order by d, c8 settings max_threads=2; +SELECT '---'; + +drop table t; + +create table t ( + c Int32 primary key , + s Bool , + w Float64 + ); + +system stop merges t; + +insert into t values(439499072,true,0),(1393290072,true,0); +insert into t values(-1317193174,false,0),(1929066636,false,0); +insert into t values(-2,false,0),(1962246186,true,0),(2054878592,false,0); +insert into t values(-1893563136,true,41.55); +insert into t values(-1338380855,true,-0.7),(-991301833,true,0),(-755809149,false,43.18),(-41,true,0),(3,false,0),(255,false,0),(255,false,0),(189195893,false,0),(195550885,false,9223372036854776000); + +SELECT * FROM ( +SELECT c, min(w) OVER (PARTITION BY s ORDER BY c ASC, s ASC, w ASC) +FROM t limit toUInt64(-1)) +WHERE c = -755809149; diff --git a/parser/testdata/03205_system_sync_replica_format/ast.json b/parser/testdata/03205_system_sync_replica_format/ast.json new file mode 100644 index 000000000..6d1d0c4d0 --- /dev/null +++ b/parser/testdata/03205_system_sync_replica_format/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function formatQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'SYSTEM SYNC REPLICA db.table LIGHTWEIGHT'" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001271965, + "rows_read": 7, + "bytes_read": 298 + } +} diff --git a/parser/testdata/03205_system_sync_replica_format/metadata.json b/parser/testdata/03205_system_sync_replica_format/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03205_system_sync_replica_format/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03205_system_sync_replica_format/query.sql b/parser/testdata/03205_system_sync_replica_format/query.sql new file mode 100644 index 000000000..329bce80a --- /dev/null +++ b/parser/testdata/03205_system_sync_replica_format/query.sql @@ -0,0 +1 @@ +SELECT formatQuery('SYSTEM SYNC REPLICA db.table LIGHTWEIGHT'); diff --git a/parser/testdata/03206_is_null_constant_result_old_analyzer_bug/ast.json b/parser/testdata/03206_is_null_constant_result_old_analyzer_bug/ast.json new file mode 100644 index 000000000..767f4efee --- /dev/null +++ b/parser/testdata/03206_is_null_constant_result_old_analyzer_bug/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery left (children 3)" + }, + { + "explain": " Identifier left" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration x (children 1)" + }, + { + "explain": " DataType UUID" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001032549, + "rows_read": 9, + "bytes_read": 308 + } +} diff --git a/parser/testdata/03206_is_null_constant_result_old_analyzer_bug/metadata.json b/parser/testdata/03206_is_null_constant_result_old_analyzer_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03206_is_null_constant_result_old_analyzer_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03206_is_null_constant_result_old_analyzer_bug/query.sql b/parser/testdata/03206_is_null_constant_result_old_analyzer_bug/query.sql new file mode 100644 index 000000000..19b5485aa --- /dev/null +++ b/parser/testdata/03206_is_null_constant_result_old_analyzer_bug/query.sql @@ -0,0 +1,23 @@ +CREATE TABLE left (x UUID) ORDER BY tuple(); + +CREATE TABLE right (x UUID) ORDER BY tuple(); + +SET enable_analyzer=0; + +SELECT left.x, (right.x IS NULL)::Boolean FROM left LEFT OUTER JOIN right ON left.x = right.x GROUP BY ALL; + +SELECT isNullable(number)::Boolean, now() FROM numbers(2) GROUP BY isNullable(number)::Boolean, now() FORMAT Null; + +SELECT isNull(number)::Boolean, now() FROM numbers(2) GROUP BY isNull(number)::Boolean, now() FORMAT Null; + +SELECT (number IS NULL)::Boolean, now() FROM numbers(2) GROUP BY (number IS NULL)::Boolean, now() FORMAT Null; + +SET enable_analyzer=1; + +SELECT left.x, (right.x IS NULL)::Boolean FROM left LEFT OUTER JOIN right ON left.x = right.x GROUP BY ALL; + +SELECT isNullable(number)::Boolean, now() FROM numbers(2) GROUP BY isNullable(number)::Boolean, now() FORMAT Null; + +SELECT isNull(number)::Boolean, now() FROM numbers(2) GROUP BY isNull(number)::Boolean, now() FORMAT Null; + +SELECT (number IS NULL)::Boolean, now() FROM numbers(2) GROUP BY (number IS NULL)::Boolean, now() FORMAT Null; diff --git a/parser/testdata/03206_projection_merge_special_mergetree/ast.json b/parser/testdata/03206_projection_merge_special_mergetree/ast.json new file mode 100644 index 000000000..c55fda0c8 --- /dev/null +++ b/parser/testdata/03206_projection_merge_special_mergetree/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tp (children 1)" + }, + { + "explain": " Identifier tp" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001126303, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03206_projection_merge_special_mergetree/metadata.json b/parser/testdata/03206_projection_merge_special_mergetree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03206_projection_merge_special_mergetree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03206_projection_merge_special_mergetree/query.sql b/parser/testdata/03206_projection_merge_special_mergetree/query.sql new file mode 100644 index 000000000..3fafb28cb --- /dev/null +++ b/parser/testdata/03206_projection_merge_special_mergetree/query.sql @@ -0,0 +1,104 @@ +DROP TABLE IF EXISTS tp; + +-- test regular merge tree +CREATE TABLE tp ( + type Int32, + eventcnt UInt64, + PROJECTION p (select sum(eventcnt), type group by type) +) engine = MergeTree order by type; + +INSERT INTO tp SELECT number%3, 1 FROM numbers(3); + +OPTIMIZE TABLE tp DEDUPLICATE; -- { serverError SUPPORT_IS_DISABLED } + +DROP TABLE tp; + +CREATE TABLE tp ( + type Int32, + eventcnt UInt64, + PROJECTION p (select sum(eventcnt), type group by type) +) engine = MergeTree order by type +SETTINGS deduplicate_merge_projection_mode = 'drop'; + +INSERT INTO tp SELECT number%3, 1 FROM numbers(3); + +OPTIMIZE TABLE tp DEDUPLICATE; + +ALTER TABLE tp MODIFY SETTING deduplicate_merge_projection_mode = 'throw'; + +OPTIMIZE TABLE tp DEDUPLICATE; -- { serverError SUPPORT_IS_DISABLED } + +DROP TABLE tp; + + +-- test irregular merge tree +CREATE TABLE tp ( + type Int32, + eventcnt UInt64, + PROJECTION p (select sum(eventcnt), type group by type) +) engine = ReplacingMergeTree order by type; -- { serverError SUPPORT_IS_DISABLED } + +CREATE TABLE tp ( + type Int32, + eventcnt UInt64, + PROJECTION p (select sum(eventcnt), type group by type) +) engine = ReplacingMergeTree order by type +SETTINGS deduplicate_merge_projection_mode = 'throw'; -- { serverError SUPPORT_IS_DISABLED } + +CREATE TABLE tp ( + type Int32, + eventcnt UInt64, + PROJECTION p (select sum(eventcnt), type group by type) +) engine = ReplacingMergeTree order by type +SETTINGS deduplicate_merge_projection_mode = 'drop'; + +INSERT INTO tp SELECT number%3, 1 FROM numbers(3); + +OPTIMIZE TABLE tp FINAL; + +-- expecting no projection +SELECT + name +FROM system.projection_parts +WHERE (database = currentDatabase()) AND (`table` = 'tp') AND (active = 1); + +ALTER TABLE tp MODIFY SETTING deduplicate_merge_projection_mode = 'throw'; + +OPTIMIZE TABLE tp DEDUPLICATE; -- { serverError SUPPORT_IS_DISABLED } + +DROP TABLE tp; + +CREATE TABLE tp ( + type Int32, + eventcnt UInt64, + PROJECTION p (select sum(eventcnt), type group by type) +) engine = ReplacingMergeTree order by type +SETTINGS deduplicate_merge_projection_mode = 'rebuild'; + +ALTER TABLE tp MODIFY SETTING deduplicate_merge_projection_mode = 'throw'; + +OPTIMIZE TABLE tp DEDUPLICATE; -- { serverError SUPPORT_IS_DISABLED } + +DROP TABLE tp; + +-- test alter add projection case +CREATE TABLE tp ( + type Int32, + eventcnt UInt64 +) engine = ReplacingMergeTree order by type; + +ALTER TABLE tp ADD PROJECTION p (SELECT sum(eventcnt), type GROUP BY type); -- { serverError SUPPORT_IS_DISABLED } + +ALTER TABLE tp MODIFY SETTING deduplicate_merge_projection_mode = 'drop'; + +ALTER TABLE tp ADD PROJECTION p (SELECT sum(eventcnt), type GROUP BY type); + +INSERT INTO tp SELECT number%3, 1 FROM numbers(3); + +-- expecting projection p +SELECT + name +FROM system.projection_parts +WHERE (database = currentDatabase()) AND (`table` = 'tp') AND (active = 1); + +DROP TABLE tp; \ No newline at end of file diff --git a/parser/testdata/03206_projection_merge_special_mergetree_ignore/ast.json b/parser/testdata/03206_projection_merge_special_mergetree_ignore/ast.json new file mode 100644 index 000000000..73583e4d2 --- /dev/null +++ b/parser/testdata/03206_projection_merge_special_mergetree_ignore/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tp (children 1)" + }, + { + "explain": " Identifier tp" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001164987, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03206_projection_merge_special_mergetree_ignore/metadata.json b/parser/testdata/03206_projection_merge_special_mergetree_ignore/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03206_projection_merge_special_mergetree_ignore/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03206_projection_merge_special_mergetree_ignore/query.sql b/parser/testdata/03206_projection_merge_special_mergetree_ignore/query.sql new file mode 100644 index 000000000..c98dced7c --- /dev/null +++ b/parser/testdata/03206_projection_merge_special_mergetree_ignore/query.sql @@ -0,0 +1,33 @@ +DROP TABLE IF EXISTS tp; + +CREATE TABLE tp ( + type Int32, + eventcnt UInt64, + PROJECTION p (select sum(eventcnt), type group by type) +) engine = ReplacingMergeTree order by type +SETTINGS deduplicate_merge_projection_mode = 'ignore'; + +INSERT INTO tp SELECT number%3, 1 FROM numbers(3); +INSERT INTO tp SELECT number%3, 2 FROM numbers(3); + +OPTIMIZE TABLE tp DEDUPLICATE; -- { serverError SUPPORT_IS_DISABLED } + +OPTIMIZE TABLE tp FINAL; + +set parallel_replicas_local_plan = 1, parallel_replicas_support_projection = 1, optimize_aggregation_in_order = 0; + +SET optimize_use_projections = false, force_optimize_projection = false; + +SELECT sum(eventcnt) eventcnt, type +FROM tp +GROUP BY type +ORDER BY eventcnt, type; + +SET optimize_use_projections = true, force_optimize_projection = true; + +SELECT sum(eventcnt) eventcnt, type +FROM tp +GROUP BY type +ORDER By eventcnt, type; + +DROP TABLE tp; diff --git a/parser/testdata/03206_replication_lag_metric/ast.json b/parser/testdata/03206_replication_lag_metric/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03206_replication_lag_metric/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03206_replication_lag_metric/metadata.json b/parser/testdata/03206_replication_lag_metric/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03206_replication_lag_metric/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03206_replication_lag_metric/query.sql b/parser/testdata/03206_replication_lag_metric/query.sql new file mode 100644 index 000000000..998c332a1 --- /dev/null +++ b/parser/testdata/03206_replication_lag_metric/query.sql @@ -0,0 +1,11 @@ +-- Tags: no-parallel + +CREATE DATABASE rdb1 ENGINE = Replicated('/test/test_replication_lag_metric', 'shard1', 'replica1'); +CREATE DATABASE rdb2 ENGINE = Replicated('/test/test_replication_lag_metric', 'shard1', 'replica2'); + +SET distributed_ddl_task_timeout = 0; +CREATE TABLE rdb1.t (id UInt32) ENGINE = ReplicatedMergeTree ORDER BY id; +SELECT replication_lag FROM system.clusters WHERE cluster IN ('rdb1', 'rdb2') ORDER BY cluster ASC, replica_num ASC; + +DROP DATABASE rdb1; +DROP DATABASE rdb2; diff --git a/parser/testdata/03207_composite_expressions_lambda_consistent_formatting/ast.json b/parser/testdata/03207_composite_expressions_lambda_consistent_formatting/ast.json new file mode 100644 index 000000000..61ac07407 --- /dev/null +++ b/parser/testdata/03207_composite_expressions_lambda_consistent_formatting/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001270555, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03207_composite_expressions_lambda_consistent_formatting/metadata.json b/parser/testdata/03207_composite_expressions_lambda_consistent_formatting/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03207_composite_expressions_lambda_consistent_formatting/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03207_composite_expressions_lambda_consistent_formatting/query.sql b/parser/testdata/03207_composite_expressions_lambda_consistent_formatting/query.sql new file mode 100644 index 000000000..1b0131bf7 --- /dev/null +++ b/parser/testdata/03207_composite_expressions_lambda_consistent_formatting/query.sql @@ -0,0 +1,7 @@ +SET enable_analyzer = 1; +SELECT [1, (x -> 1)]; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT (1, (x -> 1)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT map(1, (x -> 1)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT [1, lambda(x, 1)]; -- { serverError UNKNOWN_IDENTIFIER } +SELECT (1, lambda(x, 1)); -- { serverError UNKNOWN_IDENTIFIER } +SELECT map(1, lambda(x, 1)); -- { serverError UNKNOWN_IDENTIFIER } diff --git a/parser/testdata/03207_json_read_subcolumns_1_memory/ast.json b/parser/testdata/03207_json_read_subcolumns_1_memory/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03207_json_read_subcolumns_1_memory/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03207_json_read_subcolumns_1_memory/metadata.json b/parser/testdata/03207_json_read_subcolumns_1_memory/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03207_json_read_subcolumns_1_memory/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03207_json_read_subcolumns_1_memory/query.sql b/parser/testdata/03207_json_read_subcolumns_1_memory/query.sql new file mode 100644 index 000000000..b8be6de4d --- /dev/null +++ b/parser/testdata/03207_json_read_subcolumns_1_memory/query.sql @@ -0,0 +1,87 @@ +-- Tags: no-fasttest, long +SET enable_json_type = 1; +set allow_experimental_variant_type = 1; +set use_variant_as_common_type=1; +set session_timezone = 'UTC'; + +drop table if exists test; +create table test (id UInt64, json JSON(max_dynamic_paths=2, a.b.c UInt32)) engine=Memory; + +truncate table test; +insert into test select number, '{}' from numbers(5); +insert into test select number, toJSONString(map('a.b.c', number)) from numbers(5, 5); +insert into test select number, toJSONString(map('a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number))) from numbers(10, 5); +insert into test select number, toJSONString(map('b.b.d', number::UInt32, 'b.b.e', 'str_' || toString(number))) from numbers(15, 5); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number))) from numbers(20, 5); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number), 'b.b._' || toString(number), number::UInt32)) from numbers(25, 5); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', range(number % + 1)::Array(UInt32), 'a.b.e', 'str_' || toString(number), 'd.a', number::UInt32, 'd.c', toDate(number))) from numbers(30, 5); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', toDateTime(number), 'a.b.e', 'str_' || toString(number), 'd.a', range(number % 5 + 1)::Array(UInt32), 'd.b', number::UInt32)) from numbers(35, 5); + +select distinct arrayJoin(JSONAllPathsWithTypes(json)) as paths_with_types from test order by paths_with_types; + +select json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.`_25`, json.b.b.`_25`.:Int64, json.b.b.`_25`.:UUID, json.b.b.`_26`, json.b.b.`_26`.:Int64, json.b.b.`_26`.:UUID, json.b.b.`_27`, json.b.b.`_27`.:Int64, json.b.b.`_27`.:UUID, json.b.b.`_28`, json.b.b.`_28`.:Int64, json.b.b.`_28`.:UUID, json.b.b.`_29`, json.b.b.`_29`.:Int64, json.b.b.`_29`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test order by id format JSONColumns; +select json, json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.`_25`, json.b.b.`_25`.:Int64, json.b.b.`_25`.:UUID, json.b.b.`_26`, json.b.b.`_26`.:Int64, json.b.b.`_26`.:UUID, json.b.b.`_27`, json.b.b.`_27`.:Int64, json.b.b.`_27`.:UUID, json.b.b.`_28`, json.b.b.`_28`.:Int64, json.b.b.`_28`.:UUID, json.b.b.`_29`, json.b.b.`_29`.:Int64, json.b.b.`_29`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test order by id format JSONColumns; + +select json.non.existing.path from test order by id format JSONColumns; +select json.non.existing.path.:Int64 from test order by id format JSONColumns; +select json.non.existing.path, json.non.existing.path.:Int64 from test order by id format JSONColumns; +select json, json.non.existing.path from test order by id format JSONColumns; +select json, json.non.existing.path.:Int64 from test order by id format JSONColumns; +select json, json.non.existing.path, json.non.existing.path.:Int64 from test order by id format JSONColumns; + +select json.a.b.c from test order by id format JSONColumns; +select json, json.a.b.c from test order by id format JSONColumns; + +select json.b.b.e from test order by id format JSONColumns; +select json.b.b.e.:String, json.b.b.e.:Date from test order by id format JSONColumns; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test order by id format JSONColumns; +select json, json.b.b.e from test order by id format JSONColumns; +select json, json.b.b.e.:String, json.b.b.e.:Date from test order by id format JSONColumns; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test order by id format JSONColumns; + +select json.b.b.e, json.a.b.d from test order by id format JSONColumns; +select json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; +select json, json.b.b.e, json.a.b.d from test order by id format JSONColumns; +select json, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; + +select json.b.b.e, json.d.a from test order by id format JSONColumns; +select json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format JSONColumns; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format JSONColumns; +select json, json.b.b.e, json.d.a from test order by id format JSONColumns; +select json, json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format JSONColumns; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format JSONColumns; + +select json.b.b.e, json.d.a, json.d.b from test order by id format JSONColumns; +select json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; +select json, json.b.b.e, json.d.a, json.d.b from test order by id format JSONColumns; +select json, json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; + +select json.d.a, json.d.b from test order by id format JSONColumns; +select json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; +select json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; +select json, json.d.a, json.d.b from test order by id format JSONColumns; +select json, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; +select json, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format JSONColumns; + +select json.d.a, json.b.b.`_26` from test order by id format JSONColumns; +select json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_26`.:Int64, json.b.b.`_26`.:Date from test order by id format JSONColumns; +select json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_26`.:Int64, json.b.b, json.b.b.`_26`.:Date from test order by id format JSONColumns; +select json, json.d.a, json.b.b.`_26` from test order by id format JSONColumns; +select json, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_26`.:Int64, json.b.b.`_26`.:Date from test order by id format JSONColumns; +select json, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_26`.:Int64, json.b.b, json.b.b.`_26`.:Date from test order by id format JSONColumns; + +select json.^a, json.a.b.c from test order by id format JSONColumns; +select json, json.^a, json.a.b.c from test order by id format JSONColumns; + +select json.^a, json.a.b.d from test order by id format JSONColumns; +select json.^a, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; +select json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; +select json, json.^a, json.a.b.d from test order by id format JSONColumns; +select json, json.^a, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; +select json, json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format JSONColumns; + +drop table test; diff --git a/parser/testdata/03207_json_read_subcolumns_2_memory/ast.json b/parser/testdata/03207_json_read_subcolumns_2_memory/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03207_json_read_subcolumns_2_memory/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03207_json_read_subcolumns_2_memory/metadata.json b/parser/testdata/03207_json_read_subcolumns_2_memory/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03207_json_read_subcolumns_2_memory/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03207_json_read_subcolumns_2_memory/query.sql b/parser/testdata/03207_json_read_subcolumns_2_memory/query.sql new file mode 100644 index 000000000..c7d701723 --- /dev/null +++ b/parser/testdata/03207_json_read_subcolumns_2_memory/query.sql @@ -0,0 +1,123 @@ +-- Tags: no-fasttest, long, no-debug, no-tsan, no-asan, no-msan, no-ubsan + +SET enable_json_type = 1; +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; +set session_timezone = 'UTC'; + +drop table if exists test; +create table test (id UInt64, json JSON(max_dynamic_paths=2, a.b.c UInt32)) engine=Memory; + +truncate table test; +insert into test select number, '{}' from numbers(100000); +insert into test select number, toJSONString(map('a.b.c', number)) from numbers(100000, 100000); +insert into test select number, toJSONString(map('a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number))) from numbers(200000, 100000); +insert into test select number, toJSONString(map('b.b.d', number::UInt32, 'b.b.e', 'str_' || toString(number))) from numbers(300000, 100000); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number))) from numbers(400000, 100000); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number), 'b.b._' || toString(number % 5), number::UInt32)) from numbers(500000, 100000); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', range(number % + 1)::Array(UInt32), 'a.b.e', 'str_' || toString(number), 'd.a', number::UInt32, 'd.c', toDate(number))) from numbers(600000, 100000); +insert into test select number, toJSONString(map('a.b.c', number, 'a.b.d', toDateTime(number), 'a.b.e', 'str_' || toString(number), 'd.a', range(number % 5 + 1)::Array(UInt32), 'd.b', number::UInt32)) from numbers(700000, 100000); + +select distinct arrayJoin(JSONAllPathsWithTypes(json)) as paths_with_types from test order by paths_with_types; + +select json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.`_0`, json.b.b.`_0`.:Int64, json.b.b.`_0`.:UUID, json.b.b.`_1`, json.b.b.`_1`.:Int64, json.b.b.`_1`.:UUID, json.b.b.`_2`, json.b.b.`_2`.:Int64, json.b.b.`_2`.:UUID, json.b.b.`_3`, json.b.b.`_3`.:Int64, json.b.b.`_3`.:UUID, json.b.b.`_4`, json.b.b.`_4`.:Int64, json.b.b.`_4`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test format Null; +select json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.`_0`, json.b.b.`_0`.:Int64, json.b.b.`_0`.:UUID, json.b.b.`_1`, json.b.b.`_1`.:Int64, json.b.b.`_1`.:UUID, json.b.b.`_2`, json.b.b.`_2`.:Int64, json.b.b.`_2`.:UUID, json.b.b.`_3`, json.b.b.`_3`.:Int64, json.b.b.`_3`.:UUID, json.b.b.`_4`, json.b.b.`_4`.:Int64, json.b.b.`_4`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test order by id format Null; +select json, json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.`_0`, json.b.b.`_0`.:Int64, json.b.b.`_0`.:UUID, json.b.b.`_1`, json.b.b.`_1`.:Int64, json.b.b.`_1`.:UUID, json.b.b.`_2`, json.b.b.`_2`.:Int64, json.b.b.`_2`.:UUID, json.b.b.`_3`, json.b.b.`_3`.:Int64, json.b.b.`_3`.:UUID, json.b.b.`_4`, json.b.b.`_4`.:Int64, json.b.b.`_4`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test format Null; +select json, json.non.existing.path, json.a.b.c, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:UUID, json.a.b.e, json.a.b.e.:String, json.a.b.e.:UUID, json.b.b.`_0`, json.b.b.`_0`.:Int64, json.b.b.`_0`.:UUID, json.b.b.`_1`, json.b.b.`_1`.:Int64, json.b.b.`_1`.:UUID, json.b.b.`_2`, json.b.b.`_2`.:Int64, json.b.b.`_2`.:UUID, json.b.b.`_3`, json.b.b.`_3`.:Int64, json.b.b.`_3`.:UUID, json.b.b.`_4`, json.b.b.`_4`.:Int64, json.b.b.`_4`.:UUID, json.b.b.d, json.b.b.d.:Int64, json.b.b.d.:UUID, json.b.b.e, json.b.b.e.:String, json.b.b.e.:UUID, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:UUID, json.d.b, json.d.b.:Int64, json.d.b.:UUID, json.d.c, json.d.c.:Date, json.d.c.:UUID, json.^n, json.^a, json.^a.b, json.^b, json.^d from test order by id format Null; + +select count() from test where json.non.existing.path is Null; +select count() from test where json.non.existing.path.:String is Null; +select json.non.existing.path from test order by id format Null; +select json.non.existing.path.:Int64 from test order by id format Null; +select json.non.existing.path, json.non.existing.path.:Int64 from test order by id format Null; +select json, json.non.existing.path from test order by id format Null; +select json, json.non.existing.path.:Int64 from test order by id format Null; +select json, json.non.existing.path, json.non.existing.path.:Int64 from test format Null; +select json, json.non.existing.path, json.non.existing.path.:Int64 from test order by id format Null; + +select count() from test where json.a.b.c == 0; +select json.a.b.c from test format Null; +select json.a.b.c from test order by id format Null; +select json, json.a.b.c from test format Null; +select json, json.a.b.c from test order by id format Null; + +select count() from test where json.b.b.e is Null; +select count() from test where json.b.b.e.:String is Null; +select json.b.b.e from test format Null; +select json.b.b.e from test order by id format Null; +select json.b.b.e.:String, json.b.b.e.:Date from test format Null; +select json.b.b.e.:String, json.b.b.e.:Date from test order by id format Null; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test format Null; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test order by id format Null; +select json, json.b.b.e from test format Null; +select json, json.b.b.e from test order by id format Null; +select json, json.b.b.e.:String, json.b.b.e.:Date from test format Null; +select json, json.b.b.e.:String, json.b.b.e.:Date from test order by id format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date from test order by id format Null; + +select count() from test where json.b.b.e is Null and json.a.b.d is Null ; +select count() from test where json.b.b.e.:String is Null and json.a.b.d.:Int64 is Null; +select json.b.b.e, json.a.b.d from test order by id format Null; +select json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; +select json, json.b.b.e, json.a.b.d from test order by id format Null; +select json, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; + +select count() from test where json.b.b.e is Null and json.d.a is Null; +select count() from test where json.b.b.e.:String is Null and empty(json.d.a.:`Array(Nullable(Int64))`); +select json.b.b.e, json.d.a from test order by id format Null; +select json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format Null; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format Null; +select json, json.b.b.e, json.d.a from test order by id format Null; +select json, json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date from test order by id format Null; + +select count() from test where json.b.b.e is Null and json.d.a is Null and json.d.b is Null; +select count() from test where json.b.b.e.:String is Null and empty(json.d.a.:`Array(Nullable(Int64))`) and json.d.b.:Int64 is Null; +select json.b.b.e, json.d.a, json.d.b from test order by id format Null; +select json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; +select json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; +select json, json.b.b.e, json.d.a, json.d.b from test order by id format Null; +select json, json.b.b.e.:String, json.b.b.e.:Date, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test format Null; +select json, json.b.b.e, json.b.b.e.:String, json.b.b.e.:Date, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; + +select count() from test where json.d.a is Null and json.d.b is Null; +select count() from test where empty(json.d.a.:`Array(Nullable(Int64))`) and json.d.b.:Int64 is Null; +select json.d.a, json.d.b from test order by id format Null; +select json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; +select json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; +select json, json.d.a, json.d.b from test order by id format Null; +select json, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; +select json, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test format Null; +select json, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.d.b, json.d.b.:Int64, json.d.b.:Date from test order by id format Null; + +select count() from test where json.d.a is Null and json.b.b.`_1` is Null; +select count() from test where empty(json.d.a.:`Array(Nullable(Int64))`) and json.b.b.`_1`.:Int64 is Null; +select json.d.a, json.b.b.`_1` from test order by id format Null; +select json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_1`.:Int64, json.b.b.`_1`.:Date from test order by id format Null; +select json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_1`.:Int64, json.b.b, json.b.b.`_1`.:Date from test order by id format Null; +select json, json.d.a, json.b.b.`_1` from test order by id format Null; +select json, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_1`.:Int64, json.b.b.`_1`.:Date from test order by id format Null; +select json, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_1`.:Int64, json.b.b, json.b.b.`_1`.:Date from test format Null; +select json, json.d.a, json.d.a.:`Array(Nullable(Int64))`, json.d.a.:Date, json.b.b.`_1`.:Int64, json.b.b, json.b.b.`_1`.:Date from test order by id format Null; + +select count() from test where empty(json.^a) and json.a.b.c == 0; +select json.^a, json.a.b.c from test order by id format Null; +select json, json.^a, json.a.b.c from test format Null; +select json, json.^a, json.a.b.c from test order by id format Null; + +select count() from test where empty(json.^a) and json.a.b.d is Null; +select json.^a, json.a.b.d from test order by id format Null; +select json.^a, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; +select json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; +select json, json.^a, json.a.b.d from test order by id format Null; +select json, json.^a, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; +select json, json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test format Null; +select json, json.^a, json.a.b.d, json.a.b.d.:Int64, json.a.b.d.:Date from test order by id format Null; + +drop table test; diff --git a/parser/testdata/03208_array_of_json_read_subcolumns_2_memory/ast.json b/parser/testdata/03208_array_of_json_read_subcolumns_2_memory/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03208_array_of_json_read_subcolumns_2_memory/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03208_array_of_json_read_subcolumns_2_memory/metadata.json b/parser/testdata/03208_array_of_json_read_subcolumns_2_memory/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03208_array_of_json_read_subcolumns_2_memory/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03208_array_of_json_read_subcolumns_2_memory/query.sql b/parser/testdata/03208_array_of_json_read_subcolumns_2_memory/query.sql new file mode 100644 index 000000000..d6b21dfa6 --- /dev/null +++ b/parser/testdata/03208_array_of_json_read_subcolumns_2_memory/query.sql @@ -0,0 +1,51 @@ + +SET enable_json_type = 1; +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; + +drop table if exists test; +create table test (id UInt64, json JSON(max_dynamic_paths=8, a.b Array(JSON))) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000; + +insert into test select number, '{}' from numbers(10000); +insert into test select number, toJSONString(map('a.b', arrayMap(x -> map('b.c.d_' || toString(x), number::UInt32, 'c.d.e', range((number + x) % 5 + 1)), range(number % 5 + 1)))) from numbers(10000, 10000); +insert into test select number, toJSONString(map('a.r', arrayMap(x -> map('b.c.d_' || toString(x), number::UInt32, 'c.d.e', range((number + x) % 5 + 1)), range(number % 5 + 1)))) from numbers(20000, 10000); +insert into test select number, toJSONString(map('a.a1', number, 'a.a2', number, 'a.a3', number, 'a.a4', number, 'a.a5', number, 'a.a6', number, 'a.a7', number, 'a.a8', number, 'a.r', arrayMap(x -> map('b.c.d_' || toString(x), number::UInt32, 'c.d.e', range((number + x) % 5 + 1)), range(number % 5 + 1)))) from numbers(30000, 10000); + +select distinct arrayJoin(JSONAllPathsWithTypes(json)) as paths_with_types from test order by paths_with_types; +select distinct arrayJoin(JSONAllPathsWithTypes(arrayJoin(json.a.b))) as paths_with_types from test order by paths_with_types; +select distinct arrayJoin(JSONAllPathsWithTypes(arrayJoin(json.a.r[]))) as paths_with_types from test order by paths_with_types; + +select json, json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test format Null; +select json, json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test order by id format Null; +select json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test format Null; +select json.a.b, json.a.b.c, json.a.b.c.d.e, json.a.b.b.c.d_0, json.a.b.b.c.d_1, json.a.b.b.c.d_2, json.a.b.b.c.d_3, json.a.b.b.c.d_4, json.a.r, json.a.r[], json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1, json.a.r[].b.c.d_2, json.a.r[].b.c.d_3, json.a.r[].b.c.d_4, json.^a, json.a.b.^b.c, json.a.r[].^b.c from test order by id format Null; + +select count() from test where empty(json.a.r[].c.d.e) and empty(json.a.r[].b.c.d_0) and empty(json.a.r[].b.c.d_1); +select count() from test where empty(json.a.r[].c.d.e.:`Array(Nullable(Int64))`) and empty(json.a.r[].b.c.d_0.:Int64) and empty(json.a.r[].b.c.d_1.:Int64); +select count() from test where arrayJoin(json.a.r[].c.d.e) is null and arrayJoin(json.a.r[].b.c.d_0) is null and arrayJoin(json.a.r[].b.c.d_1) is null; +select count() from test where arrayJoin(json.a.r[].c.d.e.:`Array(Nullable(Int64))`) is null and arrayJoin(json.a.r[].b.c.d_0.:Int64) is null and arrayJoin(json.a.r[].b.c.d_1.:Int64) is null; + +select json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test format Null; +select json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test order by id format Null; +select json.a.r[].c.d.e.:`Array(Nullable(Int64))`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test format Null; +select json.a.r[].c.d.e.:`Array(Nullable(Int64))`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test order by id format Null; +select json.a.r, json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test format Null; +select json.a.r, json.a.r[].c.d.e, json.a.r[].b.c.d_0, json.a.r[].b.c.d_1 from test order by id format Null; +select json.a.r, json.a.r[].c.d.e.:`Array(Nullable(Int64))`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test format Null; +select json.a.r, json.a.r[].c.d.e.:`Array(Nullable(Int64))`, json.a.r[].b.c.d_0.:Int64, json.a.r[].b.c.d_1.:Int64 from test order by id format Null; + +select count() from test where empty(json.a.r[].^b) and empty(json.a.r[].^b.c) and empty(json.a.r[].b.c.d_0); +select count() from test where empty(json.a.r[].^b) and empty(json.a.r[].^b.c) and empty(json.a.r[].b.c.d_0.:Int64); +select count() from test where empty(arrayJoin(json.a.r[].^b)) and empty(arrayJoin(json.a.r[].^b.c)) and arrayJoin(json.a.r[].b.c.d_0) is null; +select count() from test where empty(arrayJoin(json.a.r[].^b)) and empty(arrayJoin(json.a.r[].^b.c)) and arrayJoin(json.a.r[].b.c.d_0.:Int64) is null; + +select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test format Null; +select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test order by id format Null; +select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test format Null; +select json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test order by id format Null; +select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test format Null; +select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0 from test order by id format Null; +select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test format Null; +select json.a.r, json.a.r[].^b, json.a.r[].^b.c, json.a.r[].b.c.d_0.:Int64 from test order by id format Null; + +drop table test; diff --git a/parser/testdata/03208_buffer_over_distributed_type_mismatch/ast.json b/parser/testdata/03208_buffer_over_distributed_type_mismatch/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03208_buffer_over_distributed_type_mismatch/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03208_buffer_over_distributed_type_mismatch/metadata.json b/parser/testdata/03208_buffer_over_distributed_type_mismatch/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03208_buffer_over_distributed_type_mismatch/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03208_buffer_over_distributed_type_mismatch/query.sql b/parser/testdata/03208_buffer_over_distributed_type_mismatch/query.sql new file mode 100644 index 000000000..3da04dcb8 --- /dev/null +++ b/parser/testdata/03208_buffer_over_distributed_type_mismatch/query.sql @@ -0,0 +1,76 @@ + +DROP TABLE IF EXISTS realtimedrep; +CREATE TABLE realtimedrep (`amount` Int32) ENGINE = MergeTree() ORDER BY tuple(); +INSERT INTO realtimedrep FORMAT Values (100); + +DROP TABLE IF EXISTS realtimedistributed; +CREATE TABLE realtimedistributed (`amount` Int32) ENGINE = Distributed(test_cluster_two_shards, currentDatabase(), realtimedrep, rand()); + +DROP TABLE IF EXISTS realtimebuff__fuzz_19; +CREATE TABLE realtimebuff__fuzz_19 (`amount` UInt32) ENGINE = Buffer(currentDatabase(), 'realtimedistributed', 16, 3600, 36000, 10000, 1000000, 10000000, 100000000); +INSERT INTO realtimebuff__fuzz_19 FORMAT Values (101); + +DROP TABLE IF EXISTS realtimebuff__fuzz_20; +CREATE TABLE realtimebuff__fuzz_20 (`amount` Nullable(Int32)) ENGINE = Buffer(currentDatabase(), 'realtimedistributed', 16, 3600, 36000, 10000, 1000000, 10000000, 100000000); +INSERT INTO realtimebuff__fuzz_20 FORMAT Values (101); + +SELECT amount FROM realtimebuff__fuzz_19 t1 ORDER BY ALL; +SELECT amount + 1 FROM realtimebuff__fuzz_19 t1 ORDER BY ALL; +SELECT amount + 1 FROM realtimebuff__fuzz_20 t1 ORDER BY ALL; +SELECT sum(amount) = 100 FROM realtimebuff__fuzz_19 ORDER BY ALL; -- { serverError CANNOT_CONVERT_TYPE } +SELECT sum(amount) = 100 FROM realtimebuff__fuzz_20 ORDER BY ALL; -- { serverError CANNOT_CONVERT_TYPE } + +SELECT amount FROM realtimebuff__fuzz_19 t1 +JOIN (SELECT number :: UInt32 AS amount FROM numbers(3) ) t2 ON t1.amount = t2.amount +ORDER BY ALL +SETTINGS enable_analyzer = 0; -- { serverError UNKNOWN_IDENTIFIER } + +SELECT amount FROM realtimebuff__fuzz_19 t1 +JOIN (SELECT number :: UInt32 AS amount FROM numbers(3) ) t2 ON t1.amount = t2.amount +ORDER BY ALL +SETTINGS enable_analyzer = 1; + +SELECT amount FROM realtimebuff__fuzz_19 t1 +JOIN (SELECT number :: UInt32 AS amount FROM numbers(300) ) t2 ON t1.amount = t2.amount +ORDER BY ALL +SETTINGS enable_analyzer = 0; -- { serverError UNKNOWN_IDENTIFIER } + +SELECT amount FROM realtimebuff__fuzz_19 t1 +JOIN (SELECT number :: UInt32 AS amount FROM numbers(300) ) t2 ON t1.amount = t2.amount +ORDER BY ALL +SETTINGS enable_analyzer = 1; + +SELECT t2.amount + 1 FROM (SELECT number :: UInt32 AS amount FROM numbers(300) ) t1 +JOIN realtimebuff__fuzz_19 t2 USING (amount) +ORDER BY ALL +; + +SELECT t2.amount + 1 FROM (SELECT number :: UInt32 AS amount FROM numbers(300) ) t1 +JOIN realtimebuff__fuzz_19 t2 ON t1.amount = t2.amount +ORDER BY ALL +; + +SELECT amount FROM realtimebuff__fuzz_19 t1 +JOIN realtimebuff__fuzz_19 t2 ON t1.amount = t2.amount +; -- { serverError NOT_IMPLEMENTED,UNKNOWN_IDENTIFIER } + +SELECT amount FROM realtimebuff__fuzz_19 t1 +JOIN realtimebuff__fuzz_19 t2 ON t1.amount = t2.amount +JOIN realtimebuff__fuzz_19 t3 ON t1.amount = t3.amount +; -- { serverError NOT_IMPLEMENTED,AMBIGUOUS_COLUMN_NAME } + + +-- fuzzers: + +SELECT + toLowCardinality(1) + materialize(toLowCardinality(2)) +FROM realtimebuff__fuzz_19 +GROUP BY toLowCardinality(1) +FORMAT Null +; + +SELECT intDivOrZero(intDivOrZero(toLowCardinality(-128), toLowCardinality(-1)) = 0, materialize(toLowCardinality(4))) +FROM realtimebuff__fuzz_19 GROUP BY materialize(toLowCardinality(-127)), intDivOrZero(0, 0) = toLowCardinality(toLowCardinality(0)) +WITH TOTALS ORDER BY ALL DESC NULLS FIRST +FORMAT Null +; diff --git a/parser/testdata/03208_datetime_cast_losing_precision/ast.json b/parser/testdata/03208_datetime_cast_losing_precision/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03208_datetime_cast_losing_precision/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03208_datetime_cast_losing_precision/metadata.json b/parser/testdata/03208_datetime_cast_losing_precision/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03208_datetime_cast_losing_precision/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03208_datetime_cast_losing_precision/query.sql b/parser/testdata/03208_datetime_cast_losing_precision/query.sql new file mode 100644 index 000000000..2e2c7009c --- /dev/null +++ b/parser/testdata/03208_datetime_cast_losing_precision/query.sql @@ -0,0 +1,43 @@ +WITH toDateTime('2024-10-16 18:00:30') as t +SELECT toDateTime64(t, 3) + interval 100 milliseconds IN (SELECT t) settings transform_null_in=0; + +WITH toDateTime('2024-10-16 18:00:30') as t +SELECT toDateTime64(t, 3) + interval 100 milliseconds IN (SELECT t) settings transform_null_in=1; + +WITH toDateTime('1970-01-01 00:00:01') as t +SELECT toDateTime64(t, 3) + interval 100 milliseconds IN (now(), Null) settings transform_null_in=1; + +WITH toDateTime('1970-01-01 00:00:01') as t +SELECT toDateTime64(t, 3) + interval 100 milliseconds IN (now(), Null) settings transform_null_in=0; + +WITH toDateTime('1970-01-01 00:00:01') as t, + arrayJoin([Null, toDateTime64(t, 3) + interval 100 milliseconds]) as x +SELECT x IN (now(), Null) settings transform_null_in=0; + +WITH toDateTime('1970-01-01 00:00:01') as t, + arrayJoin([Null, toDateTime64(t, 3) + interval 100 milliseconds]) as x +SELECT x IN (now(), Null) settings transform_null_in=1; + +WITH toDateTime('2024-10-16 18:00:30') as t +SELECT ( + SELECT + toDateTime64(t, 3) + interval 100 milliseconds, + toDateTime64(t, 3) + interval 101 milliseconds +) +IN ( + SELECT + t, + t +) SETTINGS transform_null_in=0; + +WITH toDateTime('2024-10-16 18:00:30') as t +SELECT ( + SELECT + toDateTime64(t, 3) + interval 100 milliseconds, + toDateTime64(t, 3) + interval 101 milliseconds +) +IN ( + SELECT + t, + t +) SETTINGS transform_null_in=1; diff --git a/parser/testdata/03208_groupArrayIntersect_serialization/ast.json b/parser/testdata/03208_groupArrayIntersect_serialization/ast.json new file mode 100644 index 000000000..eec68d86f --- /dev/null +++ b/parser/testdata/03208_groupArrayIntersect_serialization/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function hex (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function groupArrayIntersectState (alias a) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[UInt64_1]" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier a" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001504561, + "rows_read": 12, + "bytes_read": 488 + } +} diff --git a/parser/testdata/03208_groupArrayIntersect_serialization/metadata.json b/parser/testdata/03208_groupArrayIntersect_serialization/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03208_groupArrayIntersect_serialization/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03208_groupArrayIntersect_serialization/query.sql b/parser/testdata/03208_groupArrayIntersect_serialization/query.sql new file mode 100644 index 000000000..1b3d48ce0 --- /dev/null +++ b/parser/testdata/03208_groupArrayIntersect_serialization/query.sql @@ -0,0 +1,43 @@ +SELECT hex(groupArrayIntersectState([1]) AS a), toTypeName(a); +SELECT finalizeAggregation(CAST(unhex('010101'), 'AggregateFunction(groupArrayIntersect, Array(UInt8))')); + +DROP TABLE IF EXISTS grouparray; +CREATE TABLE grouparray +( + `v` AggregateFunction(groupArrayIntersect, Array(UInt8)) +) +ENGINE = Log; + +INSERT INTO grouparray Select groupArrayIntersectState([2, 4, 6, 8, 10]::Array(UInt8)); +SELECT '1', arraySort(groupArrayIntersectMerge(v)) FROM grouparray; +INSERT INTO grouparray Select groupArrayIntersectState([2, 4, 6, 8, 10]::Array(UInt8)); +SELECT '2', arraySort(groupArrayIntersectMerge(v)) FROM grouparray; +INSERT INTO grouparray Select groupArrayIntersectState([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]::Array(UInt8)); +SELECT '3', arraySort(groupArrayIntersectMerge(v)) FROM grouparray; +INSERT INTO grouparray Select groupArrayIntersectState([2, 6, 10]::Array(UInt8)); +SELECT '5', arraySort(groupArrayIntersectMerge(v)) FROM grouparray; +INSERT INTO grouparray Select groupArrayIntersectState([10]::Array(UInt8)); +SELECT '6', arraySort(groupArrayIntersectMerge(v)) FROM grouparray; +INSERT INTO grouparray Select groupArrayIntersectState([]::Array(UInt8)); +SELECT '7', arraySort(groupArrayIntersectMerge(v)) FROM grouparray; + +DROP TABLE IF EXISTS grouparray; + + +DROP TABLE IF EXISTS grouparray_string; +CREATE TABLE grouparray_string +( + `v` AggregateFunction(groupArrayIntersect, Array(Tuple(Array(String)))) +) +ENGINE = Log; + +INSERT INTO grouparray_string Select groupArrayIntersectState([tuple(['2', '4', '6', '8', '10'])]); +SELECT 'a', arraySort(groupArrayIntersectMerge(v)) FROM grouparray_string; +INSERT INTO grouparray_string Select groupArrayIntersectState([tuple(['2', '4', '6', '8', '10']), tuple(['2', '4', '6', '8', '10'])]); +SELECT 'b', arraySort(groupArrayIntersectMerge(v)) FROM grouparray_string; +INSERT INTO grouparray_string Select groupArrayIntersectState([tuple(['2', '4', '6', '8', '10']), tuple(['2', '4', '6', '8', '10', '14'])]); +SELECT 'c', arraySort(groupArrayIntersectMerge(v)) FROM grouparray_string; +INSERT INTO grouparray_string Select groupArrayIntersectState([tuple(['2', '4', '6', '8', '10', '20']), tuple(['2', '4', '6', '8', '10', '14'])]); +SELECT 'd', arraySort(groupArrayIntersectMerge(v)) FROM grouparray_string; +INSERT INTO grouparray_string Select groupArrayIntersectState([]::Array(Tuple(Array(String)))); +SELECT 'e', arraySort(groupArrayIntersectMerge(v)) FROM grouparray_string; diff --git a/parser/testdata/03208_multiple_joins_with_storage_join/ast.json b/parser/testdata/03208_multiple_joins_with_storage_join/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03208_multiple_joins_with_storage_join/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03208_multiple_joins_with_storage_join/metadata.json b/parser/testdata/03208_multiple_joins_with_storage_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03208_multiple_joins_with_storage_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03208_multiple_joins_with_storage_join/query.sql b/parser/testdata/03208_multiple_joins_with_storage_join/query.sql new file mode 100644 index 000000000..26fedf826 --- /dev/null +++ b/parser/testdata/03208_multiple_joins_with_storage_join/query.sql @@ -0,0 +1,84 @@ +-- Tags: no-parallel-replicas + +DROP TABLE IF EXISTS tab; +CREATE TABLE tab ( `k` Nullable(UInt32), `k1` Nullable(UInt32), `k2` Nullable(UInt32), `v` String ) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO tab VALUES (1, 1, 1, 'a'), (2, 2, 2, 'b'); + +DROP TABLE IF EXISTS mem; +CREATE TABLE mem ( `k` UInt64, `v` String ) ENGINE = Join(ANY, LEFT, k); +INSERT INTO mem VALUES (1, 'A'), (2, 'B'), (3, 'B'); + +DROP TABLE IF EXISTS mem2; +CREATE TABLE mem2 ( `k` UInt64, `v` String ) ENGINE = Join(ANY, RIGHT, k); +INSERT INTO mem2 VALUES (1, 'A'), (2, 'B'), (3, 'B'); + +DROP TABLE IF EXISTS mem3; +CREATE TABLE mem3 ( `k` UInt64, `v` String ) ENGINE = Join(ALL, FULL, k) SETTINGS join_use_nulls = 1; +INSERT INTO mem3 VALUES (1, 'A'), (2, 'B'), (3, 'B'); + +DROP TABLE IF EXISTS mem4; +CREATE TABLE mem4 ( `k1` UInt64, `k2` UInt64, `v` String ) ENGINE = Join(ALL, FULL, k1, k2); +INSERT INTO mem4 VALUES (1, 1, 'A'), (2, 2, 'B'), (3, 3, 'B'); + +SET enable_analyzer = 1; + +SELECT '-----'; + +SELECT * +FROM tab +ANY LEFT JOIN mem ON k1 = mem.k +ANY LEFT JOIN mem AS t ON k2 = t.k +ORDER BY tab.v +; + +SELECT '-----'; + +SELECT * +FROM tab +ANY LEFT JOIN mem ON k1 = mem.k +ANY RIGHT JOIN mem2 ON k2 = mem2.k +ORDER BY tab.v +; + +SELECT '-----'; + +SELECT * +FROM tab +FULL JOIN mem3 AS t1 ON k1 = t1.k +FULL JOIN mem3 AS t2 ON k2 = t2.k +ORDER BY tab.v +SETTINGS join_use_nulls = 1 +; +SELECT '-----'; + +SELECT * +FROM tab +FULL JOIN mem4 AS t1 ON tab.k1 = t1.k1 AND tab.k2 = t1.k2 +FULL JOIN mem4 AS t2 ON tab.k1 = t2.k1 AND tab.k2 = t2.k2 +ORDER BY tab.v +; + +SELECT '-----'; + +SELECT * +FROM tab +FULL JOIN mem4 AS t1 USING (k1, k2) +FULL JOIN mem4 AS t2 USING (k1, k2) +ORDER BY tab.v +; + +SELECT '-----'; + +SELECT count() FROM ( + EXPLAIN PLAN + SELECT * FROM tab + ANY LEFT JOIN mem AS t1 ON tab.k = t1.k + ANY LEFT JOIN mem AS t2 ON tab.k = t2.k + ANY LEFT JOIN mem AS t3 ON tab.k = t3.k + ANY LEFT JOIN mem AS t4 ON tab.k = t4.k + ANY RIGHT JOIN mem2 AS t5 ON tab.k = t5.k + ANY LEFT JOIN mem AS t6 ON tab.k = t6.k + ANY LEFT JOIN mem AS t7 ON tab.k = t7.k +) +WHERE explain like '%FilledJoin%' +; diff --git a/parser/testdata/03208_numbers_total_rows_approx/ast.json b/parser/testdata/03208_numbers_total_rows_approx/ast.json new file mode 100644 index 000000000..728322fc1 --- /dev/null +++ b/parser/testdata/03208_numbers_total_rows_approx/ast.json @@ -0,0 +1,82 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Set" + } + ], + + "rows": 20, + + "statistics": + { + "elapsed": 0.001241748, + "rows_read": 20, + "bytes_read": 728 + } +} diff --git a/parser/testdata/03208_numbers_total_rows_approx/metadata.json b/parser/testdata/03208_numbers_total_rows_approx/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03208_numbers_total_rows_approx/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03208_numbers_total_rows_approx/query.sql b/parser/testdata/03208_numbers_total_rows_approx/query.sql new file mode 100644 index 000000000..7855dfb62 --- /dev/null +++ b/parser/testdata/03208_numbers_total_rows_approx/query.sql @@ -0,0 +1 @@ +SELECT number FROM numbers(2, 1) WHERE number % 2 = 0 SETTINGS max_rows_to_read = 10; diff --git a/parser/testdata/03208_uniq_with_empty_tuple/ast.json b/parser/testdata/03208_uniq_with_empty_tuple/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03208_uniq_with_empty_tuple/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03208_uniq_with_empty_tuple/metadata.json b/parser/testdata/03208_uniq_with_empty_tuple/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03208_uniq_with_empty_tuple/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03208_uniq_with_empty_tuple/query.sql b/parser/testdata/03208_uniq_with_empty_tuple/query.sql new file mode 100644 index 000000000..09eeaf6f3 --- /dev/null +++ b/parser/testdata/03208_uniq_with_empty_tuple/query.sql @@ -0,0 +1,4 @@ +-- Tags: no-fasttest +-- https://github.com/ClickHouse/ClickHouse/issues/67303 +SELECT uniqTheta(tuple()); +SELECT uniq(tuple()); diff --git a/parser/testdata/03209_functions_json_msan_fuzzer_issue/ast.json b/parser/testdata/03209_functions_json_msan_fuzzer_issue/ast.json new file mode 100644 index 000000000..f114649b4 --- /dev/null +++ b/parser/testdata/03209_functions_json_msan_fuzzer_issue/ast.json @@ -0,0 +1,199 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '{ \"v\":1.1}' (alias raw)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Function JSONExtract (alias float32_1) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier raw" + }, + { + "explain": " Literal 'float'" + }, + { + "explain": " Function JSONExtract (alias float32_2) (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Function concat (children 1)" + }, + { + "explain": " ExpressionList (children 6)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 11)" + }, + { + "explain": " Literal '1970-01-05'" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Function toUInt256 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Function toNullable (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toUInt256 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal ', '" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function toLowCardinality (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toLowCardinality (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Identifier raw" + }, + { + "explain": " Function toLowCardinality (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'v'" + }, + { + "explain": " Literal 'Float32'" + }, + { + "explain": " Function JSONExtractFloat (alias float64_1) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier raw" + }, + { + "explain": " Function JSONExtract (alias float64_2) (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Identifier raw" + }, + { + "explain": " Literal 'v'" + }, + { + "explain": " Literal 'double'" + } + ], + + "rows": 59, + + "statistics": + { + "elapsed": 0.001563779, + "rows_read": 59, + "bytes_read": 2402 + } +} diff --git a/parser/testdata/03209_functions_json_msan_fuzzer_issue/metadata.json b/parser/testdata/03209_functions_json_msan_fuzzer_issue/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03209_functions_json_msan_fuzzer_issue/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03209_functions_json_msan_fuzzer_issue/query.sql b/parser/testdata/03209_functions_json_msan_fuzzer_issue/query.sql new file mode 100644 index 000000000..a05b07d59 --- /dev/null +++ b/parser/testdata/03209_functions_json_msan_fuzzer_issue/query.sql @@ -0,0 +1,2 @@ +WITH '{ "v":1.1}' AS raw SELECT JSONExtract(raw, 'float') AS float32_1, JSONExtract(concat(tuple('1970-01-05', 10, materialize(10), 10, 10, 10, toUInt256(10), 10, toNullable(10), 10, 10), materialize(toUInt256(0)), ', ', 2, 2, toLowCardinality(toLowCardinality(2))), raw, toLowCardinality('v'), 'Float32') AS float32_2, JSONExtractFloat(raw) AS float64_1, JSONExtract(raw, 'v', 'double') AS float64_2; + diff --git a/parser/testdata/03209_parallel_replicas_lost_decimal_conversion/ast.json b/parser/testdata/03209_parallel_replicas_lost_decimal_conversion/ast.json new file mode 100644 index 000000000..859d42817 --- /dev/null +++ b/parser/testdata/03209_parallel_replicas_lost_decimal_conversion/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_03209 (children 1)" + }, + { + "explain": " Identifier t_03209" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000971831, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/03209_parallel_replicas_lost_decimal_conversion/metadata.json b/parser/testdata/03209_parallel_replicas_lost_decimal_conversion/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03209_parallel_replicas_lost_decimal_conversion/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03209_parallel_replicas_lost_decimal_conversion/query.sql b/parser/testdata/03209_parallel_replicas_lost_decimal_conversion/query.sql new file mode 100644 index 000000000..bcc9dec30 --- /dev/null +++ b/parser/testdata/03209_parallel_replicas_lost_decimal_conversion/query.sql @@ -0,0 +1,11 @@ +DROP TABLE IF EXISTS t_03209 SYNC; + +CREATE TABLE t_03209 ( `a` Decimal(18, 0), `b` Decimal(18, 1), `c` Decimal(36, 0) ) ENGINE = ReplicatedMergeTree('/clickhouse/{database}/test_03209', 'r1') ORDER BY tuple(); +INSERT INTO t_03209 VALUES ('33', '44.4', '35'); + +SET max_parallel_replicas = 2, cluster_for_parallel_replicas='parallel_replicas'; + +SELECT * FROM t_03209 WHERE a IN toDecimal32('33.3000', 4) SETTINGS allow_experimental_parallel_reading_from_replicas=0; +SELECT * FROM t_03209 WHERE a IN toDecimal32('33.3000', 4) SETTINGS allow_experimental_parallel_reading_from_replicas=1; + +DROP TABLE t_03209 SYNC; diff --git a/parser/testdata/03209_parallel_replicas_order_by_all/ast.json b/parser/testdata/03209_parallel_replicas_order_by_all/ast.json new file mode 100644 index 000000000..dc859ce55 --- /dev/null +++ b/parser/testdata/03209_parallel_replicas_order_by_all/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery order_by_all (children 1)" + }, + { + "explain": " Identifier order_by_all" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001202436, + "rows_read": 2, + "bytes_read": 76 + } +} diff --git a/parser/testdata/03209_parallel_replicas_order_by_all/metadata.json b/parser/testdata/03209_parallel_replicas_order_by_all/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03209_parallel_replicas_order_by_all/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03209_parallel_replicas_order_by_all/query.sql b/parser/testdata/03209_parallel_replicas_order_by_all/query.sql new file mode 100644 index 000000000..904c01f88 --- /dev/null +++ b/parser/testdata/03209_parallel_replicas_order_by_all/query.sql @@ -0,0 +1,20 @@ +DROP TABLE IF EXISTS order_by_all SYNC; +CREATE TABLE order_by_all +( + a String, + b Nullable(Int32), + all UInt64 +) +ENGINE = ReplicatedMergeTree('/clickhouse/{database}/test_03210', 'r1') ORDER BY tuple(); + +INSERT INTO order_by_all VALUES ('B', 3, 10), ('C', NULL, 40), ('D', 1, 20), ('A', 2, 30); + +SET allow_experimental_parallel_reading_from_replicas=1, max_parallel_replicas=3, cluster_for_parallel_replicas='parallel_replicas'; +SET enable_analyzer=1; -- fix has been done only for the analyzer +SET enable_order_by_all = 0; + +-- { echoOn } +SELECT a, b, all FROM order_by_all ORDER BY all SETTINGS enable_order_by_all = 0, allow_experimental_parallel_reading_from_replicas=0; +SELECT a, b, all FROM order_by_all ORDER BY all SETTINGS enable_order_by_all = 0, allow_experimental_parallel_reading_from_replicas=1; + +DROP TABLE order_by_all SYNC; diff --git a/parser/testdata/03209_parameterized_view_with_non_literal_params/ast.json b/parser/testdata/03209_parameterized_view_with_non_literal_params/ast.json new file mode 100644 index 000000000..ae0627466 --- /dev/null +++ b/parser/testdata/03209_parameterized_view_with_non_literal_params/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00127829, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03209_parameterized_view_with_non_literal_params/metadata.json b/parser/testdata/03209_parameterized_view_with_non_literal_params/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03209_parameterized_view_with_non_literal_params/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03209_parameterized_view_with_non_literal_params/query.sql b/parser/testdata/03209_parameterized_view_with_non_literal_params/query.sql new file mode 100644 index 000000000..92ed6f8cf --- /dev/null +++ b/parser/testdata/03209_parameterized_view_with_non_literal_params/query.sql @@ -0,0 +1,99 @@ +SET enable_analyzer = 1; +select 'Test with Date parameter'; + +drop table if exists date_table_pv; +create table date_table_pv (id Int32, dt Date) engine = Memory(); + +insert into date_table_pv values(1, today()); +insert into date_table_pv values(2, yesterday()); +insert into date_table_pv values(3, toDate('1974-04-07')); + +drop view if exists date_pv; +create view date_pv as select * from date_table_pv where dt = {dtparam:Date}; + +select id from date_pv(dtparam=today()); +select id from date_pv(dtparam=yesterday()); +select id from date_pv(dtparam=yesterday()+1); +select id from date_pv(dtparam='1974-04-07'); +select id from date_pv(dtparam=toDate('1974-04-07')); +select id from date_pv(dtparam=toString(toDate('1974-04-07'))); +select id from date_pv(dtparam=toDate('1975-04-07')); +select id from date_pv(dtparam=(select dt from date_table_pv where id = 2)); + +select 'Test with Date32 parameter'; + +drop table if exists date32_table_pv; +create table date32_table_pv (id Int32, dt Date32) engine = Memory(); + +insert into date32_table_pv values(1, today()); +insert into date32_table_pv values(2, yesterday()); +insert into date32_table_pv values(3, toDate32('2199-12-31')); +insert into date32_table_pv values(4, toDate32('1950-12-25')); +insert into date32_table_pv values(5, toDate32('1900-01-01')); + +drop view if exists date32_pv; +create view date32_pv as select * from date32_table_pv where dt = {dtparam:Date32}; + +select id from date32_pv(dtparam=today()); +select id from date32_pv(dtparam=yesterday()); +select id from date32_pv(dtparam=yesterday()+1); +select id from date32_pv(dtparam='2199-12-31'); +select id from date32_pv(dtparam=toDate32('1900-01-01')); +select id from date32_pv(dtparam=(select dt from date32_table_pv where id = 3)); +select id from date32_pv(dtparam=(select dt from date32_table_pv where id = 4)); + + +select 'Test with UUID parameter'; +drop table if exists uuid_table_pv; +create table uuid_table_pv (id Int32, uu UUID) engine = Memory(); + +insert into uuid_table_pv values(1, generateUUIDv4()); +insert into uuid_table_pv values(2, generateUUIDv7()); +insert into uuid_table_pv values(3, toUUID('11111111-2222-3333-4444-555555555555')); +insert into uuid_table_pv select 4, serverUUID(); + + +drop view if exists uuid_pv; +create view uuid_pv as select * from uuid_table_pv where uu = {uuidparam:UUID}; +select id from uuid_pv(uuidparam=serverUUID()); +select id from uuid_pv(uuidparam=toUUID('11111111-2222-3333-4444-555555555555')); +select id from uuid_pv(uuidparam='11111111-2222-3333-4444-555555555555'); +select id from uuid_pv(uuidparam=(select uu from uuid_table_pv where id = 1)); +select id from uuid_pv(uuidparam=(select uu from uuid_table_pv where id = 2)); +-- generateUUIDv4() is not constant foldable, hence cannot be used as parameter value +select id from uuid_pv(uuidparam=generateUUIDv4()); -- { serverError UNKNOWN_QUERY_PARAMETER } +-- But nested "select generateUUIDv4()" works! +select id from uuid_pv(uuidparam=(select generateUUIDv4())); + +select 'Test with 2 parameters'; + +drop view if exists date_pv2; +create view date_pv2 as select * from date_table_pv where dt = {dtparam:Date} and id = {intparam:Int32}; +select id from date_pv2(dtparam=today(),intparam=1); +select id from date_pv2(dtparam=today(),intparam=length('A')); +select id from date_pv2(dtparam='1974-04-07',intparam=length('AAA')); +select id from date_pv2(dtparam=toDate('1974-04-07'),intparam=length('BBB')); + +select 'Test with IPv4'; + +drop table if exists ipv4_table_pv; +create table ipv4_table_pv (id Int32, ipaddr IPv4) ENGINE = Memory(); +insert into ipv4_table_pv values (1, '116.106.34.242'); +insert into ipv4_table_pv values (2, '116.106.34.243'); +insert into ipv4_table_pv values (3, '116.106.34.244'); + +drop view if exists ipv4_pv; +create view ipv4_pv as select * from ipv4_table_pv where ipaddr = {ipv4param:IPv4}; +select id from ipv4_pv(ipv4param='116.106.34.242'); +select id from ipv4_pv(ipv4param=toIPv4('116.106.34.243')); +select id from ipv4_pv(ipv4param=(select ipaddr from ipv4_table_pv where id=3)); + +drop view date_pv; +drop view date_pv2; +drop view date32_pv; +drop view uuid_pv; +drop view ipv4_pv; +drop table date_table_pv; +drop table date32_table_pv; +drop table uuid_table_pv; +drop table ipv4_table_pv; diff --git a/parser/testdata/03210_convert_outer_join_to_inner_join_any_join/ast.json b/parser/testdata/03210_convert_outer_join_to_inner_join_any_join/ast.json new file mode 100644 index 000000000..08c4df146 --- /dev/null +++ b/parser/testdata/03210_convert_outer_join_to_inner_join_any_join/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery user_country (children 1)" + }, + { + "explain": " Identifier user_country" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001201457, + "rows_read": 2, + "bytes_read": 76 + } +} diff --git a/parser/testdata/03210_convert_outer_join_to_inner_join_any_join/metadata.json b/parser/testdata/03210_convert_outer_join_to_inner_join_any_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03210_convert_outer_join_to_inner_join_any_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03210_convert_outer_join_to_inner_join_any_join/query.sql b/parser/testdata/03210_convert_outer_join_to_inner_join_any_join/query.sql new file mode 100644 index 000000000..599875e90 --- /dev/null +++ b/parser/testdata/03210_convert_outer_join_to_inner_join_any_join/query.sql @@ -0,0 +1,33 @@ +DROP TABLE IF EXISTS user_country; +DROP TABLE IF EXISTS user_transactions; + +CREATE TABLE user_country ( + user_id UInt64, + country String +) +ENGINE = ReplacingMergeTree +ORDER BY user_id; + +CREATE TABLE user_transactions ( + user_id UInt64, + transaction_id String +) +ENGINE = MergeTree +ORDER BY user_id; + +INSERT INTO user_country (user_id, country) VALUES (1, 'US'); +INSERT INTO user_transactions (user_id, transaction_id) VALUES (1, 'tx1'), (1, 'tx2'), (1, 'tx3'), (2, 'tx1'); + +-- Expected 3 rows, got only 1. Removing 'ANY' and adding 'FINAL' fixes +-- the issue (but it is not always possible). Moving filter by 'country' to +-- an outer query doesn't help. Query without filter by 'country' works +-- as expected (returns 3 rows). +SELECT * FROM user_transactions +ANY LEFT JOIN user_country USING (user_id) +WHERE + user_id = 1 + AND country = 'US' +ORDER BY ALL; + +DROP TABLE user_country; +DROP TABLE user_transactions; diff --git a/parser/testdata/03210_dynamic_squashing/ast.json b/parser/testdata/03210_dynamic_squashing/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03210_dynamic_squashing/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03210_dynamic_squashing/metadata.json b/parser/testdata/03210_dynamic_squashing/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03210_dynamic_squashing/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03210_dynamic_squashing/query.sql b/parser/testdata/03210_dynamic_squashing/query.sql new file mode 100644 index 000000000..28555088b --- /dev/null +++ b/parser/testdata/03210_dynamic_squashing/query.sql @@ -0,0 +1,26 @@ +-- Tags: long +-- Random settings limits: index_granularity=(100, None) + +set allow_experimental_dynamic_type = 1; +set max_block_size = 1000; + +drop table if exists test; + +create table test (d Dynamic) engine=MergeTree order by tuple(); +insert into test select multiIf(number < 1000, NULL::Dynamic(max_types=1), number < 3000, range(number % 5)::Dynamic(max_types=1), number::Dynamic(max_types=1)) from numbers(100000); +select '1'; +select distinct dynamicType(d) as type, isDynamicElementInSharedData(d) as flag from test order by type; + +drop table test; +create table test (d Dynamic(max_types=1)) engine=MergeTree order by tuple(); +insert into test select multiIf(number < 1000, NULL::Dynamic(max_types=1), number < 3000, range(number % 5)::Dynamic(max_types=1), number::Dynamic(max_types=1)) from numbers(100000); +select '2'; +select distinct dynamicType(d) as type, isDynamicElementInSharedData(d) as flag from test order by type; + +truncate table test; +insert into test select multiIf(number < 1000, 'Str'::Dynamic(max_types=1), number < 3000, range(number % 5)::Dynamic(max_types=1), number::Dynamic(max_types=1)) from numbers(100000); +select '3'; +select distinct dynamicType(d) as type, isDynamicElementInSharedData(d) as flag from test order by type; + +drop table test; + diff --git a/parser/testdata/03210_empty_tuple_lhs_of_in/ast.json b/parser/testdata/03210_empty_tuple_lhs_of_in/ast.json new file mode 100644 index 000000000..8916a3482 --- /dev/null +++ b/parser/testdata/03210_empty_tuple_lhs_of_in/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function in (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Set" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001495895, + "rows_read": 12, + "bytes_read": 424 + } +} diff --git a/parser/testdata/03210_empty_tuple_lhs_of_in/metadata.json b/parser/testdata/03210_empty_tuple_lhs_of_in/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03210_empty_tuple_lhs_of_in/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03210_empty_tuple_lhs_of_in/query.sql b/parser/testdata/03210_empty_tuple_lhs_of_in/query.sql new file mode 100644 index 000000000..bfaf69c14 --- /dev/null +++ b/parser/testdata/03210_empty_tuple_lhs_of_in/query.sql @@ -0,0 +1 @@ +SELECT tuple() IN tuple(1) SETTINGS allow_experimental_map_type = 1; -- { serverError TYPE_MISMATCH } diff --git a/parser/testdata/03210_fix_single_value_data_assertion/ast.json b/parser/testdata/03210_fix_single_value_data_assertion/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03210_fix_single_value_data_assertion/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03210_fix_single_value_data_assertion/metadata.json b/parser/testdata/03210_fix_single_value_data_assertion/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03210_fix_single_value_data_assertion/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03210_fix_single_value_data_assertion/query.sql b/parser/testdata/03210_fix_single_value_data_assertion/query.sql new file mode 100644 index 000000000..a1243ef0b --- /dev/null +++ b/parser/testdata/03210_fix_single_value_data_assertion/query.sql @@ -0,0 +1,19 @@ +SELECT + intDiv(number, 2) AS k, + sumArgMax(number, number % 20), + sumArgMax(number, leftPad(toString(number % 20), 5, '0')), -- Pad with 0 to preserve number ordering + sumArgMax(number, [number % 20, number % 20]), + sumArgMin(number, number % 20), + sumArgMin(number, leftPad(toString(number % 20), 5, '0')), + sumArgMin(number, [number % 20, number % 20]), +FROM +( + SELECT number + FROM system.numbers + LIMIT 65537 +) +GROUP BY k + WITH TOTALS +ORDER BY k ASC + LIMIT 10 +SETTINGS group_by_overflow_mode = 'any', totals_mode = 'before_having', max_rows_to_group_by = 100000; diff --git a/parser/testdata/03210_lag_lead_inframe_types/ast.json b/parser/testdata/03210_lag_lead_inframe_types/ast.json new file mode 100644 index 000000000..d02f9599c --- /dev/null +++ b/parser/testdata/03210_lag_lead_inframe_types/ast.json @@ -0,0 +1,82 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function lagInFrame (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '2'" + }, + { + "explain": " Literal 'UInt128'" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " WindowListElement" + } + ], + + "rows": 20, + + "statistics": + { + "elapsed": 0.001197974, + "rows_read": 20, + "bytes_read": 756 + } +} diff --git a/parser/testdata/03210_lag_lead_inframe_types/metadata.json b/parser/testdata/03210_lag_lead_inframe_types/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03210_lag_lead_inframe_types/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03210_lag_lead_inframe_types/query.sql b/parser/testdata/03210_lag_lead_inframe_types/query.sql new file mode 100644 index 000000000..cc6746e42 --- /dev/null +++ b/parser/testdata/03210_lag_lead_inframe_types/query.sql @@ -0,0 +1,24 @@ +SELECT lagInFrame(2::UInt128, 2, number) OVER w FROM numbers(10) WINDOW w AS (ORDER BY number); +SELECT leadInFrame(2::UInt128, 2, number) OVER w FROM numbers(10) WINDOW w AS (ORDER BY number); +SELECT lagInFrame(2::UInt64, 2, number) OVER w FROM numbers(10) WINDOW w AS (ORDER BY number); +SELECT leadInFrame(2::UInt64, 2, number) OVER w FROM numbers(10) WINDOW w AS (ORDER BY number); + +SELECT + number, + YYYYMMDDToDate(1, toLowCardinality(11), max(YYYYMMDDToDate(YYYYMMDDToDate(toLowCardinality(1), 11, materialize(NULL), 19700101.1, 1, 27, 7, materialize(toUInt256(37)), 9, 19, 9), 1, toUInt128(11), NULL, 19700101.1, 1, 27, 7, 37, 9, 19, 9), toUInt256(30)) IGNORE NULLS OVER w, NULL, 19700101.1, toNullable(1), 27, materialize(7), 37, 9, 19, 9), + p, + pp, + lagInFrame(number, number - pp) OVER w AS lag2, + lagInFrame(number, number - pp, number * 11) OVER w AS lag, + leadInFrame(number, number - pp, number * 11) OVER w AS lead +FROM +( + SELECT + number, + intDiv(number, 5) AS p, + p * 5 AS pp + FROM numbers(16) +) +WHERE toLowCardinality(1) +WINDOW w AS (PARTITION BY p ORDER BY number ASC NULLS FIRST ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) +ORDER BY number DESC NULLS LAST; diff --git a/parser/testdata/03210_nested_short_circuit_functions_bug/ast.json b/parser/testdata/03210_nested_short_circuit_functions_bug/ast.json new file mode 100644 index 000000000..bdb3cbd67 --- /dev/null +++ b/parser/testdata/03210_nested_short_circuit_functions_bug/ast.json @@ -0,0 +1,100 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function if (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'abc'" + }, + { + "explain": " Literal 'aws.lambda.duration'" + }, + { + "explain": " Function if (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function less (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toFloat64 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'x86_74'" + }, + { + "explain": " Literal Float64_50" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Set" + } + ], + + "rows": 26, + + "statistics": + { + "elapsed": 0.001718087, + "rows_read": 26, + "bytes_read": 1022 + } +} diff --git a/parser/testdata/03210_nested_short_circuit_functions_bug/metadata.json b/parser/testdata/03210_nested_short_circuit_functions_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03210_nested_short_circuit_functions_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03210_nested_short_circuit_functions_bug/query.sql b/parser/testdata/03210_nested_short_circuit_functions_bug/query.sql new file mode 100644 index 000000000..923f1e3be --- /dev/null +++ b/parser/testdata/03210_nested_short_circuit_functions_bug/query.sql @@ -0,0 +1,3 @@ +select if(equals(materialize('abc'), 'aws.lambda.duration'), if(toFloat64(materialize('x86_74')) < 50.0000, 0, 1), 0) settings short_circuit_function_evaluation='enable'; +select if(equals(materialize('abc'), 'aws.lambda.duration'), if(toFloat64(materialize('x86_74')) < 50.0000, 0, 1), 0) settings short_circuit_function_evaluation='force_enable'; + diff --git a/parser/testdata/03210_optimize_rewrite_aggregate_function_with_if_return_type_bug/ast.json b/parser/testdata/03210_optimize_rewrite_aggregate_function_with_if_return_type_bug/ast.json new file mode 100644 index 000000000..cf5ea4050 --- /dev/null +++ b/parser/testdata/03210_optimize_rewrite_aggregate_function_with_if_return_type_bug/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001103726, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03210_optimize_rewrite_aggregate_function_with_if_return_type_bug/metadata.json b/parser/testdata/03210_optimize_rewrite_aggregate_function_with_if_return_type_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03210_optimize_rewrite_aggregate_function_with_if_return_type_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03210_optimize_rewrite_aggregate_function_with_if_return_type_bug/query.sql b/parser/testdata/03210_optimize_rewrite_aggregate_function_with_if_return_type_bug/query.sql new file mode 100644 index 000000000..4d9e7d672 --- /dev/null +++ b/parser/testdata/03210_optimize_rewrite_aggregate_function_with_if_return_type_bug/query.sql @@ -0,0 +1,40 @@ +SET enable_analyzer = 1; +SET allow_suspicious_primary_key = 1; + +-- For function count, rewrite countState to countStateIf changes the type from AggregateFunction(count, Nullable(UInt64)) to AggregateFunction(count, UInt64) +-- We can cast AggregateFunction(count, UInt64) back to AggregateFunction(count, Nullable(UInt64)) with additional _CAST +select hex(countState(if(toNullable(number % 2 = 0), number, null))) from numbers(5) settings optimize_rewrite_aggregate_function_with_if=1; +select toTypeName(countState(if(toNullable(number % 2 = 0), number, null))) from numbers(5) settings optimize_rewrite_aggregate_function_with_if=1; +select arrayStringConcat(arraySlice(splitByString(', ', trimLeft(explain)), 2), ', ') from (explain query tree select hex(countState(if(toNullable(number % 2 = 0), number, null))) from numbers(5) settings optimize_rewrite_aggregate_function_with_if=1) where explain like '%AggregateFunction%'; + +-- For function uniq, rewrite uniqState to uniqStateIf changes the type from AggregateFunction(uniq, Nullable(UInt64)) to AggregateFunction(uniq, UInt64) +-- We can't cast AggregateFunction(uniq, UInt64) back to AggregateFunction(uniq, Nullable(UInt64)) so rewrite is not happening. +select toTypeName(uniqState(if(toNullable(number % 2 = 0), number, null))) from numbers(5) settings optimize_rewrite_aggregate_function_with_if=1; +select hex(uniqState(if(toNullable(number % 2 = 0), number, null))) from numbers(5) settings optimize_rewrite_aggregate_function_with_if=1; +select arrayStringConcat(arraySlice(splitByString(', ', trimLeft(explain)), 2), ', ') from (explain query tree select hex(uniqState(if(toNullable(number % 2 = 0), number, null))) from numbers(5) settings optimize_rewrite_aggregate_function_with_if=1) where explain like '%AggregateFunction%'; + +select '----'; + +CREATE TABLE a +( + `a_id` String +) +ENGINE = MergeTree +PARTITION BY tuple() +ORDER BY tuple(); + + +CREATE TABLE b +( + `b_id` AggregateFunction(uniq, Nullable(String)) +) +ENGINE = AggregatingMergeTree +PARTITION BY tuple() +ORDER BY tuple(); + +CREATE MATERIALIZED VIEW mv TO b +( + `b_id` AggregateFunction(uniq, Nullable(String)) +) +AS SELECT uniqState(if(a_id != '', a_id, NULL)) AS b_id +FROM a; diff --git a/parser/testdata/03210_variant_with_aggregate_function_type/ast.json b/parser/testdata/03210_variant_with_aggregate_function_type/ast.json new file mode 100644 index 000000000..e5cb5f82b --- /dev/null +++ b/parser/testdata/03210_variant_with_aggregate_function_type/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001176378, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03210_variant_with_aggregate_function_type/metadata.json b/parser/testdata/03210_variant_with_aggregate_function_type/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03210_variant_with_aggregate_function_type/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03210_variant_with_aggregate_function_type/query.sql b/parser/testdata/03210_variant_with_aggregate_function_type/query.sql new file mode 100644 index 000000000..4a3ae2789 --- /dev/null +++ b/parser/testdata/03210_variant_with_aggregate_function_type/query.sql @@ -0,0 +1,63 @@ +SET allow_experimental_variant_type = 1; + +DROP TABLE IF EXISTS source; +CREATE TABLE source +( + Name String, + Value Int64 + +) ENGINE = MergeTree ORDER BY (); + +INSERT INTO source SELECT ['fail', 'success'][((number + 1) % 2) + 1] as Name, number AS Value FROM numbers(1000); + +DROP TABLE IF EXISTS test_agg_variant; +CREATE TABLE test_agg_variant +( + Name String, + Value Variant(AggregateFunction(uniqExact, Int64), AggregateFunction(avg, Int64)) +) +ENGINE = MergeTree +ORDER BY (Name); + +INSERT INTO test_agg_variant +SELECT + Name, + t AS Value +FROM +( + SELECT + Name, + arrayJoin([ + uniqExactState(Value)::Variant(AggregateFunction(uniqExact, Int64), AggregateFunction(avg, Int64)), + avgState(Value)::Variant(AggregateFunction(uniqExact, Int64), AggregateFunction(avg, Int64)) + ]) AS t + FROM source + GROUP BY Name +); + +SELECT + Name, + uniqExactMerge(Value.`AggregateFunction(uniqExact, Int64)`) AS Value +FROM test_agg_variant +GROUP BY Name +ORDER BY Name; + +SELECT + Name, + avgMerge(Value.`AggregateFunction(avg, Int64)`) AS Value +FROM test_agg_variant +GROUP BY Name +ORDER BY Name; + +SELECT + Name, + uniqExactMerge(Value.`AggregateFunction(uniqExact, Int64)`) AS ValueUniq, + avgMerge(Value.`AggregateFunction(avg, Int64)`) AS ValueAvg +FROM test_agg_variant +GROUP BY Name +ORDER BY Name; + + +DROP TABLE test_agg_variant; +DROP TABLE source; + diff --git a/parser/testdata/03211_convert_outer_join_to_inner_join_anti_join/ast.json b/parser/testdata/03211_convert_outer_join_to_inner_join_anti_join/ast.json new file mode 100644 index 000000000..97055787b --- /dev/null +++ b/parser/testdata/03211_convert_outer_join_to_inner_join_anti_join/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t0 (children 1)" + }, + { + "explain": " Identifier t0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001695205, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03211_convert_outer_join_to_inner_join_anti_join/metadata.json b/parser/testdata/03211_convert_outer_join_to_inner_join_anti_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03211_convert_outer_join_to_inner_join_anti_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03211_convert_outer_join_to_inner_join_anti_join/query.sql b/parser/testdata/03211_convert_outer_join_to_inner_join_anti_join/query.sql new file mode 100644 index 000000000..77b1d52dd --- /dev/null +++ b/parser/testdata/03211_convert_outer_join_to_inner_join_anti_join/query.sql @@ -0,0 +1,45 @@ +DROP TABLE IF EXISTS t0; + +CREATE TABLE t0 (c0 Int32, c1 Int32, c2 String) ENGINE = Log() ; +INSERT INTO t0(c0, c1, c2) VALUES (826636805,0, ''), (0, 150808457, ''); + +SELECT 'DATA'; +SELECT * FROM t0 FORMAT PrettyMonoBlock; + +SELECT 'NUMBER OF ROWS IN FIRST SHOULD BE EQUAL TO SECOND'; + + +SELECT 'FISRT'; +SELECT left.c2 FROM t0 AS left +LEFT ANTI JOIN t0 AS right_0 ON ((left.c0)=(right_0.c1)) +WHERE (abs ((- ((sign (right_0.c1)))))); + +SELECT 'SECOND'; +SELECT SUM(check <> 0) +FROM +( + SELECT (abs ((- ((sign (right_0.c1)))))) AS `check` + FROM t0 AS left + LEFT ANTI JOIN t0 AS right_0 ON ((left.c0)=(right_0.c1)) +); + + +SELECT 'TO DEBUG I TOOK JUST A SUBQUERY AND IT HAS 1 ROW'; + +SELECT 'THIRD'; + +SELECT (abs ((- ((sign (right_0.c1)))))) AS `check` +FROM t0 AS left +LEFT ANTI JOIN t0 AS right_0 ON ((left.c0)=(right_0.c1)); + + +SELECT 'AND I ADDED SINGLE CONDITION THAT CONDITION <>0 THAT IS 1 IN THIRD QUERY AND IT HAS NO RESULT!!!'; + + +SELECT 'FOURTH'; +SELECT (abs ((- ((sign (right_0.c1)))))) AS `check` +FROM t0 AS left +LEFT ANTI JOIN t0 AS right_0 ON ((left.c0)=(right_0.c1)) +WHERE check <> 0; + +DROP TABLE t0; diff --git a/parser/testdata/03212_optimize_with_constraints_logical_error/ast.json b/parser/testdata/03212_optimize_with_constraints_logical_error/ast.json new file mode 100644 index 000000000..19711b0ee --- /dev/null +++ b/parser/testdata/03212_optimize_with_constraints_logical_error/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_table (children 1)" + }, + { + "explain": " Identifier test_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001127048, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/03212_optimize_with_constraints_logical_error/metadata.json b/parser/testdata/03212_optimize_with_constraints_logical_error/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03212_optimize_with_constraints_logical_error/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03212_optimize_with_constraints_logical_error/query.sql b/parser/testdata/03212_optimize_with_constraints_logical_error/query.sql new file mode 100644 index 000000000..4e682e914 --- /dev/null +++ b/parser/testdata/03212_optimize_with_constraints_logical_error/query.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value String +) ENGINE=TinyLog; + +EXPLAIN SYNTAX run_query_tree_passes=1 +WITH 1 AS compound_value SELECT * APPLY (x -> compound_value.*) +FROM test_table WHERE x > 0 +SETTINGS convert_query_to_cnf = true, optimize_using_constraints = true, optimize_substitute_columns = true; -- { serverError UNKNOWN_IDENTIFIER,UNSUPPORTED_METHOD } + +DROP TABLE test_table; diff --git a/parser/testdata/03212_variant_dynamic_cast_or_default/ast.json b/parser/testdata/03212_variant_dynamic_cast_or_default/ast.json new file mode 100644 index 000000000..899cb9892 --- /dev/null +++ b/parser/testdata/03212_variant_dynamic_cast_or_default/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001066014, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03212_variant_dynamic_cast_or_default/metadata.json b/parser/testdata/03212_variant_dynamic_cast_or_default/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03212_variant_dynamic_cast_or_default/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03212_variant_dynamic_cast_or_default/query.sql b/parser/testdata/03212_variant_dynamic_cast_or_default/query.sql new file mode 100644 index 000000000..a64bab6ef --- /dev/null +++ b/parser/testdata/03212_variant_dynamic_cast_or_default/query.sql @@ -0,0 +1,169 @@ +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; +set allow_experimental_dynamic_type = 1; +set allow_suspicious_low_cardinality_types = 1; +set session_timezone = 'UTC'; + +select accurateCastOrDefault(variant, 'UInt32'), multiIf(number % 4 == 0, NULL, number % 4 == 1, number, number % 4 == 2, 'str_' || toString(number), range(number)) as variant from numbers(8); +select accurateCastOrNull(variant, 'UInt32'), multiIf(number % 4 == 0, NULL, number % 4 == 1, number, number % 4 == 2, 'str_' || toString(number), range(number)) as variant from numbers(8); + +select accurateCastOrDefault(dynamic, 'UInt32'), multiIf(number % 4 == 0, NULL, number % 4 == 1, number, number % 4 == 2, 'str_' || toString(number), range(number))::Dynamic as dynamic from numbers(8); +select accurateCastOrNull(dynamic, 'UInt32'), multiIf(number % 4 == 0, NULL, number % 4 == 1, number, number % 4 == 2, 'str_' || toString(number), range(number))::Dynamic as dynamic from numbers(8); + +drop table if exists t; +create table t (id UInt64 DEFAULT generateSerialID('03212_variant_seq'), d Dynamic) engine=MergeTree order by id; + +-- Integer types: signed and unsigned integers (UInt8, UInt16, UInt32, UInt64, UInt128, UInt256, Int8, Int16, Int32, Int64, Int128, Int256) +INSERT INTO t (d) VALUES (-128::Int8), (-127::Int8), (-1::Int8), (0::Int8), (1::Int8), (126::Int8), (127::Int8); +INSERT INTO t (d) VALUES (-128::Int8), (-127::Int8), (-1::Int8), (0::Int8), (1::Int8), (126::Int8), (127::Int8); +INSERT INTO t (d) VALUES (-128::Int8), (-127::Int8), (-1::Int8), (0::Int8), (1::Int8), (126::Int8), (127::Int8); +INSERT INTO t (d) VALUES (-32768::Int16), (-32767::Int16), (-1::Int16), (0::Int16), (1::Int16), (32766::Int16), (32767::Int16); +INSERT INTO t (d) VALUES (-2147483648::Int32), (-2147483647::Int32), (-1::Int32), (0::Int32), (1::Int32), (2147483646::Int32), (2147483647::Int32); +INSERT INTO t (d) VALUES (-9223372036854775808::Int64), (-9223372036854775807::Int64), (-1::Int64), (0::Int64), (1::Int64), (9223372036854775806::Int64), (9223372036854775807::Int64); +INSERT INTO t (d) VALUES (-170141183460469231731687303715884105728::Int128), (-170141183460469231731687303715884105727::Int128), (-1::Int128), (0::Int128), (1::Int128), (170141183460469231731687303715884105726::Int128), (170141183460469231731687303715884105727::Int128); +INSERT INTO t (d) VALUES (-57896044618658097711785492504343953926634992332820282019728792003956564819968::Int256), (-57896044618658097711785492504343953926634992332820282019728792003956564819967::Int256), (-1::Int256), (0::Int256), (1::Int256), (57896044618658097711785492504343953926634992332820282019728792003956564819966::Int256), (57896044618658097711785492504343953926634992332820282019728792003956564819967::Int256); + +INSERT INTO t (d) VALUES (0::UInt8), (1::UInt8), (254::UInt8), (255::UInt8); +INSERT INTO t (d) VALUES (0::UInt16), (1::UInt16), (65534::UInt16), (65535::UInt16); +INSERT INTO t (d) VALUES (0::UInt32), (1::UInt32), (4294967294::UInt32), (4294967295::UInt32); +INSERT INTO t (d) VALUES (0::UInt64), (1::UInt64), (18446744073709551614::UInt64), (18446744073709551615::UInt64); +INSERT INTO t (d) VALUES (0::UInt128), (1::UInt128), (340282366920938463463374607431768211454::UInt128), (340282366920938463463374607431768211455::UInt128); +INSERT INTO t (d) VALUES (0::UInt256), (1::UInt256), (115792089237316195423570985008687907853269984665640564039457584007913129639934::UInt256), (115792089237316195423570985008687907853269984665640564039457584007913129639935::UInt256); + +-- Floating-point numbers: floats(Float32 and Float64) values +INSERT INTO t (d) VALUES (1.17549435e-38::Float32), (3.40282347e+38::Float32), (-3.40282347e+38::Float32), (-1.17549435e-38::Float32), (1.4e-45::Float32), (-1.4e-45::Float32); +INSERT INTO t (d) VALUES (inf::Float32), (-inf::Float32), (nan::Float32); +INSERT INTO t (d) VALUES (inf::FLOAT(12)), (-inf::FLOAT(12)), (nan::FLOAT(12)); +INSERT INTO t (d) VALUES (inf::FLOAT(15,22)), (-inf::FLOAT(15,22)), (nan::FLOAT(15,22)); + +INSERT INTO t (d) VALUES (1.17549435e-38::Float64), (3.40282347e+38::Float64), (-3.40282347e+38::Float64), (-1.17549435e-38::Float64), (1.4e-45::Float64), (-1.4e-45::Float64); +INSERT INTO t (d) VALUES (2.2250738585072014e-308::Float64), (1.7976931348623157e+308::Float64), (-1.7976931348623157e+308::Float64), (-2.2250738585072014e-308::Float64); +INSERT INTO t (d) VALUES (inf::Float64), (-inf::Float64), (nan::Float64); +INSERT INTO t (d) VALUES (inf::DOUBLE(12)), (-inf::DOUBLE(12)), (nan::DOUBLE(12)); +INSERT INTO t (d) VALUES (inf::DOUBLE(15,22)), (-inf::DOUBLE(15,22)), (nan::DOUBLE(15,22)); + +-- Strings: String and FixedString +INSERT INTO t (d) VALUES ('string'::String), ('1'::FixedString(1)), ('1'::FixedString(2)), ('1'::FixedString(10)); --(''::String), + +-- Boolean +INSERT INTO t (d) VALUES ('1'::Bool), (0::Bool); + +-- UUID +INSERT INTO t (d) VALUES ('dededdb6-7835-4ce4-8d11-b5de6f2820e9'::UUID); +INSERT INTO t (d) VALUES ('00000000-0000-0000-0000-000000000000'::UUID); + +-- LowCardinality +INSERT INTO t (d) VALUES ('1'::LowCardinality(String)), ('1'::LowCardinality(String)), (0::LowCardinality(UInt16)); + +-- Arrays +INSERT INTO t (d) VALUES ([]::Array(Dynamic)), ([[]]::Array(Array(Dynamic))), ([[[]]]::Array(Array(Array(Dynamic)))); + +-- Tuple +INSERT INTO t (d) VALUES (()::Tuple(Dynamic)), ((())::Tuple(Tuple(Dynamic))), (((()))::Tuple(Tuple(Tuple(Dynamic)))); + +-- Map. +INSERT INTO t (d) VALUES (map(11::Dynamic, 'v1'::Dynamic, '22'::Dynamic, 1::Dynamic)); + +-- SimpleAggregateFunction +INSERT INTO t (d) VALUES ([1,2]::SimpleAggregateFunction(anyLast, Array(Int16))); + +-- IPs +INSERT INTO t (d) VALUES (toIPv4('192.168.0.1')), (toIPv6('::1')); + +-- Geo +INSERT INTO t (d) VALUES ((1.23, 4.56)::Point), (([(1.23, 4.56)::Point, (2.34, 5.67)::Point])::Ring); +INSERT INTO t (d) VALUES ([[[(0, 0), (10, 0), (10, 10), (0, 10)]], [[(20, 20), (50, 20), (50, 50), (20, 50)],[(30, 30), (50, 50), (50, 30)]]]::MultiPolygon); + +-- Interval +INSERT INTO t (d) VALUES (interval '1' day), (interval '2' month), (interval '3' year); + +-- Nested +INSERT INTO t (d) VALUES ([(1, 'aa'), (2, 'bb')]::Nested(x UInt32, y String)); +INSERT INTO t (d) VALUES ([(1, (2, ['aa', 'bb']), [(3, 'cc'), (4, 'dd')]), (5, (6, ['ee', 'ff']), [(7, 'gg'), (8, 'hh')])]::Nested(x UInt32, y Tuple(y1 UInt32, y2 Array(String)), z Nested(z1 UInt32, z2 String))); + +optimize table t final; + +WITH + (SELECT count() + FROM t + WHERE accurateCastOrNull(d,'IPv4') IS NOT NULL + AND toIPv4(accurateCastOrNull(d,'IPv4')) NOT IN (toIPv4('0.0.0.0'), toIPv4('192.168.0.1')) + ) AS bad_v4, + (SELECT count() + FROM t + WHERE accurateCastOrNull(d,'IPv6') IS NOT NULL + AND toIPv6(accurateCastOrNull(d,'IPv6')) NOT IN (toIPv6('::'), toIPv6('::1'), toIPv6('::ffff:192.168.0.1')) + ) AS bad_v6, + bad_v4 + bad_v6 AS bad_cnt +SELECT + 'ch_dbg_summary' AS tag, + (SELECT count() FROM t) AS total, + (SELECT count() FROM t WHERE accurateCastOrNull(d,'IPv4') IS NOT NULL) AS typed_v4, + (SELECT count() FROM t WHERE accurateCastOrNull(d,'IPv6') IS NOT NULL) AS typed_v6, + bad_v4, bad_v6, + version() AS ver, + getSetting('session_timezone') AS tz +WHERE bad_cnt > 0; + +WITH + (SELECT count() + FROM t + WHERE accurateCastOrNull(d,'IPv4') IS NOT NULL + AND toIPv4(accurateCastOrNull(d,'IPv4')) NOT IN (toIPv4('0.0.0.0'), toIPv4('192.168.0.1')) + ) + + (SELECT count() + FROM t + WHERE accurateCastOrNull(d,'IPv6') IS NOT NULL + AND toIPv6(accurateCastOrNull(d,'IPv6')) NOT IN (toIPv6('::'), toIPv6('::1'), toIPv6('::ffff:192.168.0.1')) + ) AS bad_cnt +SELECT + 'ch_dbg_offenders' AS tag, + id, + toTypeName(d) AS src_type, + toIPv4(accurateCastOrNull(d,'IPv4')) AS v4, + toIPv6(accurateCastOrNull(d,'IPv6')) AS v6, + toString(d) AS raw_d +FROM t +WHERE bad_cnt > 0 + AND ( + (accurateCastOrNull(d,'IPv4') IS NOT NULL + AND toIPv4(accurateCastOrNull(d,'IPv4')) NOT IN (toIPv4('0.0.0.0'), toIPv4('192.168.0.1'))) + OR (accurateCastOrNull(d,'IPv6') IS NOT NULL + AND toIPv6(accurateCastOrNull(d,'IPv6')) NOT IN (toIPv6('::'), toIPv6('::1'), toIPv6('::ffff:192.168.0.1'))) + ) +ORDER BY id +LIMIT 20; + +select distinct toInt8OrDefault(d) as res from t order by res; +select distinct toUInt8OrDefault(d) as res from t order by res; +select distinct toInt16OrDefault(d) as res from t order by res; +select distinct toUInt16OrDefault(d) as res from t order by res; +select distinct toInt32OrDefault(d) as res from t order by res; +select distinct toUInt32OrDefault(d) as res from t order by res; +select distinct toInt64OrDefault(d) as res from t order by res; +select distinct toUInt64OrDefault(d) as res from t order by res; +select distinct toInt128OrDefault(d) as res from t order by res; +select distinct toUInt128OrDefault(d) as res from t order by res; +select distinct toInt256OrDefault(d) as res from t order by res; +select distinct toUInt256OrDefault(d) as res from t order by res; + +select distinct toFloat32OrDefault(d) as res from t order by res; +select distinct toFloat64OrDefault(d) as res from t order by res; + +select distinct toDecimal32OrDefault(d, 3) as res from t order by res; +select distinct toDecimal64OrDefault(d, 3) as res from t order by res; +select distinct toDecimal128OrDefault(d, 3) as res from t order by res; +select distinct toDecimal256OrDefault(d, 3) as res from t order by res; + +select distinct toDateOrDefault(d) as res from t order by res; +select distinct toDate32OrDefault(d) as res from t order by res; +select distinct toDateTimeOrDefault(d) as res from t order by res; + +select distinct toIPv4OrDefault(d) as res from t order by res; +select distinct toIPv6OrDefault(d) as res from t order by res; + +select distinct toUUIDOrDefault(d) as res from t order by res; + +SELECT d FROM t ORDER BY id; + +drop table t; diff --git a/parser/testdata/03213_array_element_msan/ast.json b/parser/testdata/03213_array_element_msan/ast.json new file mode 100644 index 000000000..546a01655 --- /dev/null +++ b/parser/testdata/03213_array_element_msan/ast.json @@ -0,0 +1,85 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayElement (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayElement (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Array_[UInt64_10, UInt64_2, UInt64_13, UInt64_15]" + }, + { + "explain": " Function toNullable (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toLowCardinality (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toLowCardinality (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 21, + + "statistics": + { + "elapsed": 0.00126981, + "rows_read": 21, + "bytes_read": 948 + } +} diff --git a/parser/testdata/03213_array_element_msan/metadata.json b/parser/testdata/03213_array_element_msan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03213_array_element_msan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03213_array_element_msan/query.sql b/parser/testdata/03213_array_element_msan/query.sql new file mode 100644 index 000000000..689558567 --- /dev/null +++ b/parser/testdata/03213_array_element_msan/query.sql @@ -0,0 +1,2 @@ +SELECT [[10, 2, 13, 15][toNullable(toLowCardinality(1))]][materialize(toLowCardinality(1))]; +SELECT '-- system.settings_profiles' GROUP BY [[[[[[[[[[10, toNullable(10)][1], [materialize(toLowCardinality(10)), 2][materialize(toLowCardinality(1))]][1]][materialize(materialize(1))], [10, 2, 1][1]][1]][1], 1][toLowCardinality(1)]][1], 1][1], 10][1], [[10, toLowCardinality(2)][toNullable(toLowCardinality(1))]][materialize(toLowCardinality(1))]][1], [[[[10, 2][1]][1]][1], [10, 2][materialize(1)], [[[2][1]][materialize(1)], 2, 1][1], [2, 10, toNullable(1)][1]] WITH CUBE; diff --git a/parser/testdata/03213_deep_json/ast.json b/parser/testdata/03213_deep_json/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03213_deep_json/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03213_deep_json/metadata.json b/parser/testdata/03213_deep_json/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03213_deep_json/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03213_deep_json/query.sql b/parser/testdata/03213_deep_json/query.sql new file mode 100644 index 000000000..2a9476381 --- /dev/null +++ b/parser/testdata/03213_deep_json/query.sql @@ -0,0 +1,5 @@ +-- The default limit works. +SELECT * FROM format("JSONCompactEachRow", 'x UInt32, y UInt32', REPEAT('[1,1,', 100000)) SETTINGS input_format_json_compact_allow_variable_number_of_columns = 1; -- { serverError TOO_DEEP_RECURSION, INCORRECT_DATA } +-- Even if we relax the limit, it is also safe. +SET input_format_json_max_depth = 100000; +SELECT * FROM format("JSONCompactEachRow", 'x UInt32, y UInt32', REPEAT('[1,1,', 100000)) SETTINGS input_format_json_compact_allow_variable_number_of_columns = 1; -- { serverError TOO_DEEP_RECURSION, INCORRECT_DATA } diff --git a/parser/testdata/03213_denseRank_percentRank_alias/ast.json b/parser/testdata/03213_denseRank_percentRank_alias/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03213_denseRank_percentRank_alias/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03213_denseRank_percentRank_alias/metadata.json b/parser/testdata/03213_denseRank_percentRank_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03213_denseRank_percentRank_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03213_denseRank_percentRank_alias/query.sql b/parser/testdata/03213_denseRank_percentRank_alias/query.sql new file mode 100644 index 000000000..ff841294e --- /dev/null +++ b/parser/testdata/03213_denseRank_percentRank_alias/query.sql @@ -0,0 +1,59 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/67042 +-- Reference generated using percent_rank() and dense_rank() + +-- From ClickHouse/tests/queries/0_stateless/01591_window_functions.sql (for deterministic query) +SELECT '---- denseRank() ----'; +select number, p, o, + count(*) over w, + rank() over w, + denseRank() over w, + row_number() over w +from (select number, intDiv(number, 5) p, mod(number, 3) o + from numbers(31) order by o, number) t +window w as (partition by p order by o, number) +order by p, o, number +settings max_block_size = 2; + +-- Modifed from ClickHouse/tests/queries/0_stateless/01592_window_functions.sql (for deterministic query) +SELECT '---- percentRank() ----'; + +drop table if exists product_groups; +drop table if exists products; + +CREATE TABLE product_groups ( + group_id Int64, + group_name String +) Engine = Memory; + +CREATE TABLE products ( + product_id Int64, + product_name String, + price DECIMAL(11, 2), + group_id Int64 +) Engine = Memory; + +INSERT INTO product_groups VALUES (1, 'Smartphone'),(2, 'Laptop'),(3, 'Tablet'); +INSERT INTO products (product_id,product_name, group_id,price) VALUES (1, 'Microsoft Lumia', 1, 200), (2, 'HTC One', 1, 400), (3, 'Nexus', 1, 500), (4, 'iPhone', 1, 900),(5, 'HP Elite', 2, 1200),(6, 'Lenovo Thinkpad', 2, 700),(7, 'Sony VAIO', 2, 700),(8, 'Dell Vostro', 2, 800),(9, 'iPad', 3, 700),(10, 'Kindle Fire', 3, 150),(11, 'Samsung Galaxy Tab', 3, 200); +INSERT INTO product_groups VALUES (4, 'Unknow'); +INSERT INTO products (product_id,product_name, group_id,price) VALUES (12, 'Others', 4, 200); + + +SELECT * +FROM +( + SELECT + product_name, + group_name, + price, + rank() OVER (PARTITION BY group_name ORDER BY price ASC) AS rank, + percentRank() OVER (PARTITION BY group_name ORDER BY price ASC) AS percent + FROM products + INNER JOIN product_groups USING (group_id) +) AS t +ORDER BY + group_name ASC, + price ASC, + product_name ASC; + +drop table product_groups; +drop table products; diff --git a/parser/testdata/03213_distributed_analyzer/ast.json b/parser/testdata/03213_distributed_analyzer/ast.json new file mode 100644 index 000000000..4387b7608 --- /dev/null +++ b/parser/testdata/03213_distributed_analyzer/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001422025, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03213_distributed_analyzer/metadata.json b/parser/testdata/03213_distributed_analyzer/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03213_distributed_analyzer/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03213_distributed_analyzer/query.sql b/parser/testdata/03213_distributed_analyzer/query.sql new file mode 100644 index 000000000..33d25010b --- /dev/null +++ b/parser/testdata/03213_distributed_analyzer/query.sql @@ -0,0 +1,5 @@ +SET max_threads = 8; + +-- This triggered a nullptr dereference due to the confusion between old and new analyzers: +SELECT sum(*) FROM remote('127.0.0.4', currentDatabase(), viewExplain('EXPLAIN PIPELINE', 'graph = 1', (SELECT * FROM remote('127.0.0.4', system, one)))); -- { serverError UNKNOWN_FUNCTION } +SELECT groupArray(*) FROM cluster(test_cluster_two_shards, viewExplain('EXPLAIN PIPELINE', 'graph = 1', (SELECT * FROM remote('127.0.0.4', system, one)))); diff --git a/parser/testdata/03213_rand_dos/ast.json b/parser/testdata/03213_rand_dos/ast.json new file mode 100644 index 000000000..4d5281b35 --- /dev/null +++ b/parser/testdata/03213_rand_dos/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function randChiSquared (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Float64_-1e-7" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001074767, + "rows_read": 7, + "bytes_read": 272 + } +} diff --git a/parser/testdata/03213_rand_dos/metadata.json b/parser/testdata/03213_rand_dos/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03213_rand_dos/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03213_rand_dos/query.sql b/parser/testdata/03213_rand_dos/query.sql new file mode 100644 index 000000000..1250995bf --- /dev/null +++ b/parser/testdata/03213_rand_dos/query.sql @@ -0,0 +1,5 @@ +SELECT randChiSquared(-0.0000001); -- { serverError BAD_ARGUMENTS } +SELECT randChiSquared(-0.0); -- { serverError BAD_ARGUMENTS } +SELECT randStudentT(-0.); -- { serverError BAD_ARGUMENTS } +SELECT randFisherF(-0., 1); -- { serverError BAD_ARGUMENTS } +SELECT randFisherF(1, -0.); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/03214_bitslice_argument_evaluation/ast.json b/parser/testdata/03214_bitslice_argument_evaluation/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03214_bitslice_argument_evaluation/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03214_bitslice_argument_evaluation/metadata.json b/parser/testdata/03214_bitslice_argument_evaluation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03214_bitslice_argument_evaluation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03214_bitslice_argument_evaluation/query.sql b/parser/testdata/03214_bitslice_argument_evaluation/query.sql new file mode 100644 index 000000000..1731dfa0d --- /dev/null +++ b/parser/testdata/03214_bitslice_argument_evaluation/query.sql @@ -0,0 +1,11 @@ +-- { echo } +-- No arguments passed +SELECT bitSlice(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +-- Invalid 1st argument passed +SELECT bitSlice(1, 1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +-- Valid 1st argument, invalid 2nd argument passed +SELECT bitSlice('Hello', 'World'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +-- Valid 1st argument & 2nd argument, invalid 3rd argument passed +SELECT bitSlice('Hello', 1, 'World'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +-- More arguments then expected +SELECT bitSlice('Hello', 1, 1, 'World'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } diff --git a/parser/testdata/03214_count_distinct_null_key_memory_leak/ast.json b/parser/testdata/03214_count_distinct_null_key_memory_leak/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03214_count_distinct_null_key_memory_leak/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03214_count_distinct_null_key_memory_leak/metadata.json b/parser/testdata/03214_count_distinct_null_key_memory_leak/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03214_count_distinct_null_key_memory_leak/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03214_count_distinct_null_key_memory_leak/query.sql b/parser/testdata/03214_count_distinct_null_key_memory_leak/query.sql new file mode 100644 index 000000000..84804e4e0 --- /dev/null +++ b/parser/testdata/03214_count_distinct_null_key_memory_leak/query.sql @@ -0,0 +1,18 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS testnull; +CREATE TABLE testnull +( + `a` Nullable(String), + `b` Nullable(String), + `c` Nullable(String) +) +ENGINE = MergeTree +PARTITION BY tuple() +ORDER BY c +SETTINGS index_granularity = 8192, allow_nullable_key=1; + +INSERT INTO testnull(b,c) SELECT toString(rand64()) AS b, toString(rand64()) AS c FROM numbers(1000000); +SELECT count(distinct b) FROM testnull GROUP BY a SETTINGS max_memory_usage = 10000000; -- {serverError MEMORY_LIMIT_EXCEEDED} + +DROP TABLE testnull; \ No newline at end of file diff --git a/parser/testdata/03214_join_on_tuple_comparison_elimination_bug/ast.json b/parser/testdata/03214_join_on_tuple_comparison_elimination_bug/ast.json new file mode 100644 index 000000000..ee4b2e45f --- /dev/null +++ b/parser/testdata/03214_join_on_tuple_comparison_elimination_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery a (children 1)" + }, + { + "explain": " Identifier a" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001161942, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/03214_join_on_tuple_comparison_elimination_bug/metadata.json b/parser/testdata/03214_join_on_tuple_comparison_elimination_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03214_join_on_tuple_comparison_elimination_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03214_join_on_tuple_comparison_elimination_bug/query.sql b/parser/testdata/03214_join_on_tuple_comparison_elimination_bug/query.sql new file mode 100644 index 000000000..7ef98f88c --- /dev/null +++ b/parser/testdata/03214_join_on_tuple_comparison_elimination_bug/query.sql @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS a; +DROP TABLE IF EXISTS b; + +CREATE TABLE a (key Nullable(String)) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO a VALUES (NULL), ('1'); + +CREATE TABLE b (key Nullable(String)) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO b VALUES (NULL), ('1'); + +SELECT a.key FROM a LEFT SEMI JOIN b ON tuple(a.key) = tuple(b.key) ORDER BY a.key; +SELECT a.key FROM a LEFT SEMI JOIN b ON a.key IS NOT DISTINCT FROM b.key ORDER BY a.key; +SELECT a.key FROM a LEFT SEMI JOIN b ON tuple(a.key) = tuple(b.key) ORDER BY a.key; +SELECT a.key FROM a LEFT ANY JOIN b ON tuple(a.key) = tuple(b.key) ORDER BY a.key; + +DROP TABLE IF EXISTS a; +DROP TABLE IF EXISTS b; diff --git a/parser/testdata/03214_json_typed_dynamic_path/ast.json b/parser/testdata/03214_json_typed_dynamic_path/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03214_json_typed_dynamic_path/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03214_json_typed_dynamic_path/metadata.json b/parser/testdata/03214_json_typed_dynamic_path/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03214_json_typed_dynamic_path/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03214_json_typed_dynamic_path/query.sql b/parser/testdata/03214_json_typed_dynamic_path/query.sql new file mode 100644 index 000000000..de1e9e9de --- /dev/null +++ b/parser/testdata/03214_json_typed_dynamic_path/query.sql @@ -0,0 +1,18 @@ +-- Tags: no-fasttest + +SET enable_json_type = 1; +set allow_experimental_dynamic_type = 1; +drop table if exists test; +create table test (json JSON(a Dynamic)) engine=MergeTree order by tuple() settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1; +insert into test select '{"a" : 42}'; +insert into test select '{"a" : [1, 2, 3]}'; +optimize table test; +select * from test order by toString(json); +drop table test; + +create table test (json JSON(a Dynamic)) engine=MergeTree order by tuple() settings min_rows_for_wide_part=10000000, min_bytes_for_wide_part=10000000; +insert into test select '{"a" : 42}'; +insert into test select '{"a" : [1, 2, 3]}'; +optimize table test; +select * from test order by toString(json); +drop table test; diff --git a/parser/testdata/03215_analyzer_materialized_constants_bug/ast.json b/parser/testdata/03215_analyzer_materialized_constants_bug/ast.json new file mode 100644 index 000000000..0908b3881 --- /dev/null +++ b/parser/testdata/03215_analyzer_materialized_constants_bug/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001625024, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03215_analyzer_materialized_constants_bug/metadata.json b/parser/testdata/03215_analyzer_materialized_constants_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03215_analyzer_materialized_constants_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03215_analyzer_materialized_constants_bug/query.sql b/parser/testdata/03215_analyzer_materialized_constants_bug/query.sql new file mode 100644 index 000000000..30e8bd1e4 --- /dev/null +++ b/parser/testdata/03215_analyzer_materialized_constants_bug/query.sql @@ -0,0 +1,26 @@ +SET enable_analyzer = 1; + +SELECT concat(materialize(toLowCardinality('b')), 'a') FROM remote('127.0.0.{1,2}', system, one) GROUP BY 'a'; + +SELECT concat(NULLIF(1, materialize(toLowCardinality(1))), concat(NULLIF(1, 1))) FROM remote('127.0.0.{1,2}', system, one) GROUP BY concat(NULLIF(1, 1)); + +DROP TABLE IF EXISTS test__fuzz_21; +CREATE TABLE test__fuzz_21 +( + `x` Decimal(18, 10) +) +ENGINE = MergeTree +ORDER BY x; + +INSERT INTO test__fuzz_21 VALUES (1), (2), (3); + +WITH ( + SELECT CAST(toFixedString(toFixedString(materialize(toFixedString('111111111111111111111111111111111111111', 39)), 39), 39), 'UInt128') + ) AS v +SELECT + coalesce(materialize(toLowCardinality(toNullable(1))), 10, NULL), + max(v) +FROM remote('127.0.0.{1,2}', currentDatabase(), test__fuzz_21) +GROUP BY + coalesce(NULL), + coalesce(1, 10, 10, materialize(NULL)); diff --git a/parser/testdata/03215_analyzer_replace_with_dummy_tables/ast.json b/parser/testdata/03215_analyzer_replace_with_dummy_tables/ast.json new file mode 100644 index 000000000..e7c7ece3f --- /dev/null +++ b/parser/testdata/03215_analyzer_replace_with_dummy_tables/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery t (children 3)" + }, + { + "explain": " Identifier t" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration number (children 1)" + }, + { + "explain": " DataType UInt64" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Identifier number" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001626953, + "rows_read": 9, + "bytes_read": 302 + } +} diff --git a/parser/testdata/03215_analyzer_replace_with_dummy_tables/metadata.json b/parser/testdata/03215_analyzer_replace_with_dummy_tables/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03215_analyzer_replace_with_dummy_tables/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03215_analyzer_replace_with_dummy_tables/query.sql b/parser/testdata/03215_analyzer_replace_with_dummy_tables/query.sql new file mode 100644 index 000000000..6d8bb7b5c --- /dev/null +++ b/parser/testdata/03215_analyzer_replace_with_dummy_tables/query.sql @@ -0,0 +1,15 @@ +create table t (number UInt64) engine MergeTree order by number; + +SELECT 1 +FROM +( + SELECT number IN ( + SELECT number + FROM view( + SELECT number + FROM numbers(1) + ) + ) + FROM t +) +SETTINGS allow_experimental_parallel_reading_from_replicas = 1, cluster_for_parallel_replicas='not_exists', max_parallel_replicas = 2, enable_analyzer = 1, parallel_replicas_for_non_replicated_merge_tree = 1; -- { serverError CLUSTER_DOESNT_EXIST } diff --git a/parser/testdata/03215_fix_get_index_in_tuple/ast.json b/parser/testdata/03215_fix_get_index_in_tuple/ast.json new file mode 100644 index 000000000..9364f685f --- /dev/null +++ b/parser/testdata/03215_fix_get_index_in_tuple/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery dummy_table_03215 (children 1)" + }, + { + "explain": " Identifier dummy_table_03215" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000939438, + "rows_read": 2, + "bytes_read": 87 + } +} diff --git a/parser/testdata/03215_fix_get_index_in_tuple/metadata.json b/parser/testdata/03215_fix_get_index_in_tuple/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03215_fix_get_index_in_tuple/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03215_fix_get_index_in_tuple/query.sql b/parser/testdata/03215_fix_get_index_in_tuple/query.sql new file mode 100644 index 000000000..ae35eac12 --- /dev/null +++ b/parser/testdata/03215_fix_get_index_in_tuple/query.sql @@ -0,0 +1,16 @@ +CREATE TABLE IF NOT EXISTS dummy_table_03215 +( + id_col Nullable(String), + date_col Date +) + ENGINE = MergeTree() + ORDER BY date_col; + +INSERT INTO dummy_table_03215 (id_col, date_col) + VALUES ('some_string', '2024-05-21'); + +SELECT 0 as _row_exists +FROM dummy_table_03215 +WHERE (date_col, id_col) IN (SELECT (date_col, id_col) FROM dummy_table_03215); + +DROP TABLE IF EXISTS dummy_table_03215; diff --git a/parser/testdata/03215_key_condition_bug/ast.json b/parser/testdata/03215_key_condition_bug/ast.json new file mode 100644 index 000000000..35b0b3545 --- /dev/null +++ b/parser/testdata/03215_key_condition_bug/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery t (children 3)" + }, + { + "explain": " Identifier t" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration x (children 1)" + }, + { + "explain": " DataType Int8" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Identifier x" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001679332, + "rows_read": 9, + "bytes_read": 290 + } +} diff --git a/parser/testdata/03215_key_condition_bug/metadata.json b/parser/testdata/03215_key_condition_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03215_key_condition_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03215_key_condition_bug/query.sql b/parser/testdata/03215_key_condition_bug/query.sql new file mode 100644 index 000000000..ef2113e81 --- /dev/null +++ b/parser/testdata/03215_key_condition_bug/query.sql @@ -0,0 +1,3 @@ +CREATE TABLE t (x Int8) ENGINE MergeTree ORDER BY x; +INSERT INTO t VALUES (1); +SELECT arrayJoin([tuple((toNullable(10) * toLowCardinality(20)) < materialize(30))]) AS row FROM t WHERE row.1 = 0; \ No newline at end of file diff --git a/parser/testdata/03215_multilinestring_geometry/ast.json b/parser/testdata/03215_multilinestring_geometry/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03215_multilinestring_geometry/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03215_multilinestring_geometry/metadata.json b/parser/testdata/03215_multilinestring_geometry/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03215_multilinestring_geometry/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03215_multilinestring_geometry/query.sql b/parser/testdata/03215_multilinestring_geometry/query.sql new file mode 100644 index 000000000..cf4ef15f6 --- /dev/null +++ b/parser/testdata/03215_multilinestring_geometry/query.sql @@ -0,0 +1,26 @@ +-- { echoOn } +SELECT readWKTMultiLineString('MULTILINESTRING ((1 1, 2 2, 3 3, 1 1))'); +SELECT toTypeName(readWKTMultiLineString('MULTILINESTRING ((1 1, 2 2, 3 3, 1 1))')); +SELECT wkt(readWKTMultiLineString('MULTILINESTRING ((1 1, 2 2, 3 3, 1 1))')); + +SELECT readWKTMultiLineString('MULTILINESTRING ((1 1, 2 2, 3 3, 1 1), (1 0, 2 0, 3 0))'); +SELECT toTypeName(readWKTMultiLineString('MULTILINESTRING ((1 1, 2 2, 3 3, 1 1), (1 0, 2 0, 3 0))')); +SELECT wkt(readWKTMultiLineString('MULTILINESTRING ((1 1, 2 2, 3 3, 1 1), (1 0, 2 0, 3 0))')); + +-- Native Array(Array(Tuple(Float64, Float64))) is treated as Polygon, not as MultiLineString. +WITH wkt(CAST([[(1, 1), (2, 2), (3, 3), (1, 1)]], 'Array(Array(Tuple(Float64, Float64)))')) as x +SELECT x, toTypeName(x), readWKTPolygon(x) as y, toTypeName(y); + +-- Non constant tests + +DROP TABLE IF EXISTS t; +CREATE TABLE IF NOT EXISTS t (shape Array(Array(Tuple(Float64, Float64))), wkt_string String, ord Float64) Engine = Memory; +INSERT INTO t (ord, shape, wkt_string) VALUES (1, [[(1, 1), (2, 2), (3, 3), (1, 1)]], 'MULTILINESTRING ((1 1, 2 2, 3 3, 1 1))'); +INSERT INTO t (ord, shape, wkt_string) VALUES (2, [[(1, 1), (2, 2), (3, 3), (1, 1)], [(1, 0), (2, 0), (3, 0)]], 'MULTILINESTRING ((1 1, 2 2, 3 3, 1 1), (1 0, 2 0, 3 0))'); +INSERT INTO t (ord, shape, wkt_string) VALUES (3, [[(1, 0), (2, 1), (3, 0), (4, 1), (5, 0), (6, 1), (7, 0), (8, 1), (9, 0), (10, 1)]], 'MULTILINESTRING ((1 0, 2 1, 3 0, 4 1, 5 0, 6 1, 7 0, 8 1, 9 0, 10 1))'); + +-- Native Array(Array(Tuple(Float64, Float64))) is treated as Polygon, not as MultiLineString. +-- but reading MultiLineString should still return an Array(Array(Tuple(Float64, Float64))) +select wkt(shape), readWKTMultiLineString(wkt_string), readWKTMultiLineString(wkt_string) = shape from t order by ord; + + diff --git a/parser/testdata/03215_parquet_index/ast.json b/parser/testdata/03215_parquet_index/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03215_parquet_index/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03215_parquet_index/metadata.json b/parser/testdata/03215_parquet_index/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03215_parquet_index/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03215_parquet_index/query.sql b/parser/testdata/03215_parquet_index/query.sql new file mode 100644 index 000000000..5766f40f0 --- /dev/null +++ b/parser/testdata/03215_parquet_index/query.sql @@ -0,0 +1,21 @@ +-- Tags: no-fasttest + +-- default settings. +DROP TABLE IF EXISTS test_parquet; +CREATE TABLE test_parquet (col1 int, col2 String) ENGINE=File(Parquet); +INSERT INTO test_parquet SELECT number, toString(number) FROM numbers(100); +SELECT col1, col2 FROM test_parquet; + + +-- Parquet will have indexes in columns. We are not checking that indexes exist here, there is an integration test test_parquet_page_index for that. We just check that a setting doesn't break the SELECT +DROP TABLE IF EXISTS test_parquet; +CREATE TABLE test_parquet (col1 int, col2 String) ENGINE=File(Parquet) SETTINGS output_format_parquet_use_custom_encoder=false, output_format_parquet_write_page_index=true; +INSERT INTO test_parquet SELECT number, toString(number) FROM numbers(100); +SELECT col1, col2 FROM test_parquet; + + +-- Parquet will not have indexes in columns. +DROP TABLE IF EXISTS test_parquet; +CREATE TABLE test_parquet (col1 int, col2 String) ENGINE=File(Parquet) SETTINGS output_format_parquet_use_custom_encoder=false, output_format_parquet_write_page_index=false; +INSERT INTO test_parquet SELECT number, toString(number) FROM numbers(100); +SELECT col1, col2 FROM test_parquet; diff --git a/parser/testdata/03215_parsing_archive_name_s3/ast.json b/parser/testdata/03215_parsing_archive_name_s3/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03215_parsing_archive_name_s3/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03215_parsing_archive_name_s3/metadata.json b/parser/testdata/03215_parsing_archive_name_s3/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03215_parsing_archive_name_s3/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03215_parsing_archive_name_s3/query.sql b/parser/testdata/03215_parsing_archive_name_s3/query.sql new file mode 100644 index 000000000..e0d63d313 --- /dev/null +++ b/parser/testdata/03215_parsing_archive_name_s3/query.sql @@ -0,0 +1,15 @@ +-- Tags: no-fasttest +-- Tag no-fasttest: Depends on AWS + +SET s3_truncate_on_insert=1; + +INSERT INTO FUNCTION s3(s3_conn, filename='::03215_archive.csv') SELECT 1; +SELECT _file, _path FROM s3(s3_conn, filename='::03215_archive.csv') ORDER BY (_file, _path); + +SELECT _file, _path FROM s3(s3_conn, filename='test :: 03215_archive.csv') ORDER BY (_file, _path); -- { serverError S3_ERROR } + +INSERT INTO FUNCTION s3(s3_conn, filename='test::03215_archive.csv') SELECT 1; +SELECT _file, _path FROM s3(s3_conn, filename='test::03215_archive.csv') ORDER BY (_file, _path); + +INSERT INTO FUNCTION s3(s3_conn, filename='test.zip::03215_archive.csv') SETTINGS allow_archive_path_syntax=0 SELECT 1; +SELECT _file, _path FROM s3(s3_conn, filename='test.zip::03215_archive.csv') ORDER BY (_file, _path) SETTINGS allow_archive_path_syntax=0; diff --git a/parser/testdata/03215_partition_in_tuple/ast.json b/parser/testdata/03215_partition_in_tuple/ast.json new file mode 100644 index 000000000..7828f5a86 --- /dev/null +++ b/parser/testdata/03215_partition_in_tuple/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery t (children 2)" + }, + { + "explain": " Identifier t" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration a (children 1)" + }, + { + "explain": " DataType DateTime64 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " ColumnDeclaration b (children 1)" + }, + { + "explain": " DataType FixedString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_6" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001244575, + "rows_read": 12, + "bytes_read": 447 + } +} diff --git a/parser/testdata/03215_partition_in_tuple/metadata.json b/parser/testdata/03215_partition_in_tuple/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03215_partition_in_tuple/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03215_partition_in_tuple/query.sql b/parser/testdata/03215_partition_in_tuple/query.sql new file mode 100644 index 000000000..56aeb5d26 --- /dev/null +++ b/parser/testdata/03215_partition_in_tuple/query.sql @@ -0,0 +1,45 @@ +CREATE TABLE t(a DateTime64(3), b FixedString(6)) +ENGINE = MergeTree +PARTITION BY toStartOfDay(a) +ORDER BY (a, b) +AS SELECT * from values( + ('2023-01-01 00:00:00.000', 'fd4c03'), + ('2023-01-01 00:00:00.000', '123456')); + +CREATE TABLE t1(a DateTime64(3), b FixedString(6)) +ENGINE = MergeTree +PARTITION BY toStartOfDay(a) +ORDER BY (a, b) +AS SELECT '2023-01-01 00:00:00.000', 'fd4c03'; + +SELECT * FROM t WHERE tuple(a, b) IN (SELECT tuple(a, b) FROM t1); + +SELECT * FROM t WHERE tuple(a, b) NOT IN (SELECT tuple(a, b) FROM t1); + +SELECT * FROM t WHERE (a, b) IN (SELECT a, b FROM t1); + +SELECT * FROM t WHERE (a, b) NOT IN (SELECT a, b FROM t1); + +DROP TABLE t; DROP TABLE t1; + +CREATE TABLE t(a DateTime, b FixedString(6)) +ENGINE = MergeTree +PARTITION BY toStartOfDay(a) +ORDER BY (a, b) +AS SELECT * from values( + ('2023-01-01 00:00:00', 'fd4c03'), + ('2023-01-01 00:00:00', '123456')); + +CREATE TABLE t1(a DateTime64, b FixedString(6)) +ENGINE = MergeTree +PARTITION BY toStartOfDay(a) +ORDER BY (a, b) +AS SELECT '2023-01-01 00:00:00', 'fd4c03'; + +SELECT * FROM t WHERE tuple(a, b) IN (SELECT tuple(a, b) FROM t1); + +SELECT * FROM t WHERE tuple(a, b) NOT IN (SELECT tuple(a, b) FROM t1); + +SELECT * FROM t WHERE (a, b) IN (SELECT a, b FROM t1); + +SELECT * FROM t WHERE (a, b) NOT IN (SELECT a, b FROM t1); diff --git a/parser/testdata/03215_toStartOfWeek_with_dateTime64_fix/ast.json b/parser/testdata/03215_toStartOfWeek_with_dateTime64_fix/ast.json new file mode 100644 index 000000000..4d1d20600 --- /dev/null +++ b/parser/testdata/03215_toStartOfWeek_with_dateTime64_fix/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toStartOfWeek (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDateTime64 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '1970-01-01'" + }, + { + "explain": " Literal UInt64_6" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001228251, + "rows_read": 10, + "bytes_read": 394 + } +} diff --git a/parser/testdata/03215_toStartOfWeek_with_dateTime64_fix/metadata.json b/parser/testdata/03215_toStartOfWeek_with_dateTime64_fix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03215_toStartOfWeek_with_dateTime64_fix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03215_toStartOfWeek_with_dateTime64_fix/query.sql b/parser/testdata/03215_toStartOfWeek_with_dateTime64_fix/query.sql new file mode 100644 index 000000000..1769d96aa --- /dev/null +++ b/parser/testdata/03215_toStartOfWeek_with_dateTime64_fix/query.sql @@ -0,0 +1,2 @@ +SELECT toStartOfWeek(toDateTime64('1970-01-01', 6)); +SELECT toStartOfWeek(toDateTime('1970-01-01')); diff --git a/parser/testdata/03215_udf_with_union/ast.json b/parser/testdata/03215_udf_with_union/ast.json new file mode 100644 index 000000000..9136ba920 --- /dev/null +++ b/parser/testdata/03215_udf_with_union/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropFunctionQuery" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001136053, + "rows_read": 1, + "bytes_read": 25 + } +} diff --git a/parser/testdata/03215_udf_with_union/metadata.json b/parser/testdata/03215_udf_with_union/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03215_udf_with_union/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03215_udf_with_union/query.sql b/parser/testdata/03215_udf_with_union/query.sql new file mode 100644 index 000000000..00390c5d9 --- /dev/null +++ b/parser/testdata/03215_udf_with_union/query.sql @@ -0,0 +1,14 @@ +DROP FUNCTION IF EXISTS 03215_udf_with_union; +CREATE FUNCTION 03215_udf_with_union AS () -> ( + SELECT sum(s) + FROM + ( + SELECT 1 AS s + UNION ALL + SELECT 1 AS s + ) +); + +SELECT 03215_udf_with_union(); + +DROP FUNCTION 03215_udf_with_union; diff --git a/parser/testdata/03215_validate_type_in_alter_add_modify_column/ast.json b/parser/testdata/03215_validate_type_in_alter_add_modify_column/ast.json new file mode 100644 index 000000000..5ad48fabe --- /dev/null +++ b/parser/testdata/03215_validate_type_in_alter_add_modify_column/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001102731, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03215_validate_type_in_alter_add_modify_column/metadata.json b/parser/testdata/03215_validate_type_in_alter_add_modify_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03215_validate_type_in_alter_add_modify_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03215_validate_type_in_alter_add_modify_column/query.sql b/parser/testdata/03215_validate_type_in_alter_add_modify_column/query.sql new file mode 100644 index 000000000..a8c7e326c --- /dev/null +++ b/parser/testdata/03215_validate_type_in_alter_add_modify_column/query.sql @@ -0,0 +1,13 @@ +set allow_suspicious_low_cardinality_types = 0; +set allow_suspicious_fixed_string_types = 0; + +drop table if exists test; +create table test (id UInt64) engine=MergeTree order by id; +alter table test add column bad LowCardinality(UInt8); -- {serverError SUSPICIOUS_TYPE_FOR_LOW_CARDINALITY} +alter table test add column bad FixedString(10000); -- {serverError ILLEGAL_COLUMN} + +alter table test modify column id LowCardinality(UInt8); -- {serverError SUSPICIOUS_TYPE_FOR_LOW_CARDINALITY} +alter table test modify column id FixedString(10000); -- {serverError ILLEGAL_COLUMN} + +drop table test; + diff --git a/parser/testdata/03215_varian_as_common_type_integers/ast.json b/parser/testdata/03215_varian_as_common_type_integers/ast.json new file mode 100644 index 000000000..f175c9143 --- /dev/null +++ b/parser/testdata/03215_varian_as_common_type_integers/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001055274, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03215_varian_as_common_type_integers/metadata.json b/parser/testdata/03215_varian_as_common_type_integers/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03215_varian_as_common_type_integers/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03215_varian_as_common_type_integers/query.sql b/parser/testdata/03215_varian_as_common_type_integers/query.sql new file mode 100644 index 000000000..dcc697355 --- /dev/null +++ b/parser/testdata/03215_varian_as_common_type_integers/query.sql @@ -0,0 +1,8 @@ +set use_variant_as_common_type = 1; +set allow_experimental_variant_type = 1; + +SELECT if(number % 2, number::Int64, number::UInt64) as res, toTypeName(res) FROM numbers(2); +SELECT if(number % 2, number::Int32, number::UInt64) as res, toTypeName(res) FROM numbers(2); +SELECT if(number % 2, number::Int16, number::UInt64) as res, toTypeName(res) FROM numbers(2); +SELECT if(number % 2, number::Int8, number::UInt64) as res, toTypeName(res) FROM numbers(2); + diff --git a/parser/testdata/03215_varian_as_common_type_tuple_map/ast.json b/parser/testdata/03215_varian_as_common_type_tuple_map/ast.json new file mode 100644 index 000000000..cfdb77796 --- /dev/null +++ b/parser/testdata/03215_varian_as_common_type_tuple_map/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001650589, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03215_varian_as_common_type_tuple_map/metadata.json b/parser/testdata/03215_varian_as_common_type_tuple_map/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03215_varian_as_common_type_tuple_map/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03215_varian_as_common_type_tuple_map/query.sql b/parser/testdata/03215_varian_as_common_type_tuple_map/query.sql new file mode 100644 index 000000000..3981baf28 --- /dev/null +++ b/parser/testdata/03215_varian_as_common_type_tuple_map/query.sql @@ -0,0 +1,8 @@ +set use_variant_as_common_type = 1; +set allow_experimental_variant_type = 1; +SET enable_named_columns_in_function_tuple=1; + +SELECT if(number % 2, tuple(number), tuple(toString(number))) as res, toTypeName(res) FROM numbers(5); +SELECT if(number % 2, map(number, number), map(toString(number), toString(number))) as res, toTypeName(res) FROM numbers(5); + + diff --git a/parser/testdata/03215_view_with_recursive/ast.json b/parser/testdata/03215_view_with_recursive/ast.json new file mode 100644 index 000000000..fef03c375 --- /dev/null +++ b/parser/testdata/03215_view_with_recursive/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001678934, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03215_view_with_recursive/metadata.json b/parser/testdata/03215_view_with_recursive/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03215_view_with_recursive/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03215_view_with_recursive/query.sql b/parser/testdata/03215_view_with_recursive/query.sql new file mode 100644 index 000000000..8d212ac08 --- /dev/null +++ b/parser/testdata/03215_view_with_recursive/query.sql @@ -0,0 +1,43 @@ +SET enable_analyzer = 1; + +CREATE VIEW 03215_test_v +AS WITH RECURSIVE test_table AS + ( + SELECT 1 AS number + UNION ALL + SELECT number + 1 + FROM test_table + WHERE number < 100 + ) +SELECT sum(number) +FROM test_table; + +SELECT * FROM 03215_test_v; + +CREATE VIEW 03215_multi_v +AS WITH RECURSIVE + task AS + ( + SELECT + number AS task_id, + number - 1 AS parent_id + FROM numbers(10) + ), + rtq AS + ( + SELECT + task_id, + parent_id + FROM task AS t + WHERE t.parent_id = 1 + UNION ALL + SELECT + t.task_id, + t.parent_id + FROM task AS t, rtq AS r + WHERE t.parent_id = r.task_id + ) +SELECT count() +FROM rtq; + +SELECT * FROM 03215_multi_v; diff --git a/parser/testdata/03216_arrayWithConstant_limits/ast.json b/parser/testdata/03216_arrayWithConstant_limits/ast.json new file mode 100644 index 000000000..c374440bc --- /dev/null +++ b/parser/testdata/03216_arrayWithConstant_limits/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayWithConstant (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_96142475" + }, + { + "explain": " Literal Array_['qMUF']" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.00114229, + "rows_read": 8, + "bytes_read": 313 + } +} diff --git a/parser/testdata/03216_arrayWithConstant_limits/metadata.json b/parser/testdata/03216_arrayWithConstant_limits/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03216_arrayWithConstant_limits/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03216_arrayWithConstant_limits/query.sql b/parser/testdata/03216_arrayWithConstant_limits/query.sql new file mode 100644 index 000000000..56cdf4a69 --- /dev/null +++ b/parser/testdata/03216_arrayWithConstant_limits/query.sql @@ -0,0 +1,6 @@ +SELECT arrayWithConstant(96142475, ['qMUF']); -- { serverError TOO_LARGE_ARRAY_SIZE } +SELECT arrayWithConstant(100000000, materialize([[[[[[[[[['Hello, world!']]]]]]]]]])); -- { serverError TOO_LARGE_ARRAY_SIZE } +SELECT length(arrayWithConstant(10000000, materialize([[[[[[[[[['Hello world']]]]]]]]]]))); + +CREATE TABLE args (value Array(Int)) ENGINE=Memory AS SELECT [1, 1, 1, 1] as value FROM numbers(1, 100); +SELECT length(arrayWithConstant(1000000, value)) FROM args FORMAT NULL; diff --git a/parser/testdata/03217_datetime64_constant_to_ast/ast.json b/parser/testdata/03217_datetime64_constant_to_ast/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03217_datetime64_constant_to_ast/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03217_datetime64_constant_to_ast/metadata.json b/parser/testdata/03217_datetime64_constant_to_ast/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03217_datetime64_constant_to_ast/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03217_datetime64_constant_to_ast/query.sql b/parser/testdata/03217_datetime64_constant_to_ast/query.sql new file mode 100644 index 000000000..d01bb8d72 --- /dev/null +++ b/parser/testdata/03217_datetime64_constant_to_ast/query.sql @@ -0,0 +1,25 @@ + +SET session_timezone = 'UTC'; + +SELECT toDateTime64('1970-01-01 00:00:01', 3) +FROM remote('127.0.0.{1,2}', system, one) +; + +SELECT toDateTime64('1970-01-01 00:00:01', 3), dummy +FROM remote('127.0.0.{1,2}', system, one) +GROUP BY dummy +ORDER BY dummy +; + +SELECT materialize(toDateTime64('1970-01-01 00:00:01', 3)), dummy +FROM remote('127.0.0.{1,2}', system, one) +GROUP BY dummy +ORDER BY dummy +; + + +SELECT toDateTime64('1970-01-01 00:00:01', 3), sum(dummy), hostname() +FROM remote('127.0.0.{1,2}', system, one) +GROUP BY hostname() +ORDER BY ALL +; diff --git a/parser/testdata/03217_filtering_in_storage_merge/ast.json b/parser/testdata/03217_filtering_in_storage_merge/ast.json new file mode 100644 index 000000000..7f7d0a8dc --- /dev/null +++ b/parser/testdata/03217_filtering_in_storage_merge/ast.json @@ -0,0 +1,40 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery test_03217_merge_replica_1 (children 2)" + }, + { + "explain": " Identifier test_03217_merge_replica_1" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration x (children 1)" + }, + { + "explain": " DataType UInt32" + } + ], + + "rows": 6, + + "statistics": + { + "elapsed": 0.001968294, + "rows_read": 6, + "bytes_read": 252 + } +} diff --git a/parser/testdata/03217_filtering_in_storage_merge/metadata.json b/parser/testdata/03217_filtering_in_storage_merge/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03217_filtering_in_storage_merge/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03217_filtering_in_storage_merge/query.sql b/parser/testdata/03217_filtering_in_storage_merge/query.sql new file mode 100644 index 000000000..c138a6cf8 --- /dev/null +++ b/parser/testdata/03217_filtering_in_storage_merge/query.sql @@ -0,0 +1,16 @@ +CREATE TABLE test_03217_merge_replica_1(x UInt32) + ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_03217_merge_replica', 'r1') + ORDER BY x; +CREATE TABLE test_03217_merge_replica_2(x UInt32) + ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_03217_merge_replica', 'r2') + ORDER BY x; + + +CREATE TABLE test_03217_all_replicas (x UInt32) + ENGINE = Merge(currentDatabase(), 'test_03217_merge_replica_*'); + +INSERT INTO test_03217_merge_replica_1 SELECT number AS x FROM numbers(10); +SYSTEM SYNC REPLICA test_03217_merge_replica_2; + +-- If the filter on _table is not applied, then the plan will show both replicas +EXPLAIN SELECT _table, count() FROM test_03217_all_replicas WHERE _table = 'test_03217_merge_replica_1' AND x >= 0 GROUP BY _table SETTINGS enable_analyzer=1; diff --git a/parser/testdata/03217_filtering_in_system_tables/ast.json b/parser/testdata/03217_filtering_in_system_tables/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03217_filtering_in_system_tables/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03217_filtering_in_system_tables/metadata.json b/parser/testdata/03217_filtering_in_system_tables/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03217_filtering_in_system_tables/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03217_filtering_in_system_tables/query.sql b/parser/testdata/03217_filtering_in_system_tables/query.sql new file mode 100644 index 000000000..1fc761f66 --- /dev/null +++ b/parser/testdata/03217_filtering_in_system_tables/query.sql @@ -0,0 +1,30 @@ +-- If filtering is not done correctly on databases, then this query report to read 3 rows, which are: `system.tables`, `information_schema.tables` and `INFORMATION_SCHEMA.tables` +SELECT database, table FROM system.tables WHERE database = 'information_schema' AND table = 'tables'; + +CREATE TABLE test_03217_system_tables_replica_1(x UInt32) + ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_03217_system_tables_replica', 'r1') + ORDER BY x; +CREATE TABLE test_03217_system_tables_replica_2(x UInt32) + ENGINE ReplicatedMergeTree('/clickhouse/tables/{database}/test_03217_system_tables_replica', 'r2') + ORDER BY x; + +-- Make sure we can read both replicas +-- The replica name might be altered because of `_functional_tests_helper_database_replicated_replace_args_macros`, +-- thus we need to use `left` +SELECT 'both', database, table, left(replica_name, 2) FROM system.replicas WHERE database = currentDatabase(); +-- If filtering is not done correctly on database-table column, then this query report to read 2 rows, which are the above tables +SELECT database, table, left(replica_name, 2) FROM system.replicas WHERE database = currentDatabase() AND table = 'test_03217_system_tables_replica_1' AND replica_name LIKE 'r1%'; +SYSTEM FLUSH LOGS query_log; +-- argMax is necessary to make the test repeatable + +-- StorageSystemTables +SELECT argMax(read_rows, event_time_microseconds) FROM system.query_log WHERE 1 + AND current_database = currentDatabase() + AND query LIKE '%SELECT database, table FROM system.tables WHERE database = \'information_schema\' AND table = \'tables\';' + AND type = 'QueryFinish'; + +-- StorageSystemReplicas +SELECT argMax(read_rows, event_time_microseconds) FROM system.query_log WHERE 1 + AND current_database = currentDatabase() + AND query LIKE '%SELECT database, table, left(replica_name, 2) FROM system.replicas WHERE database = currentDatabase() AND table = \'test_03217_system_tables_replica_1\' AND replica_name LIKE \'r1\%\';' + AND type = 'QueryFinish'; diff --git a/parser/testdata/03217_fliter_pushdown_no_keys/ast.json b/parser/testdata/03217_fliter_pushdown_no_keys/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03217_fliter_pushdown_no_keys/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03217_fliter_pushdown_no_keys/metadata.json b/parser/testdata/03217_fliter_pushdown_no_keys/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03217_fliter_pushdown_no_keys/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03217_fliter_pushdown_no_keys/query.sql b/parser/testdata/03217_fliter_pushdown_no_keys/query.sql new file mode 100644 index 000000000..cb8bf59e7 --- /dev/null +++ b/parser/testdata/03217_fliter_pushdown_no_keys/query.sql @@ -0,0 +1,26 @@ + + + +select * from ( select sum(last_seen) as dates_seen, materialize(1) as last_seen ) where last_seen > 2; +select * from ( select sum(last_seen) as dates_seen, materialize(2) as last_seen ) where last_seen < 2; +select * from ( select sum(last_seen) as dates_seen, materialize(2) as last_seen GROUP BY 'a' ) where last_seen < 2; + +select '---'; +select * from ( select sum(last_seen) as dates_seen, 1 as last_seen UNION ALL select sum(last_seen) as dates_seen, 3 as last_seen ) where last_seen < 2; + +select '---'; +select * from ( select sum(last_seen) as dates_seen, 1 as last_seen UNION ALL select sum(last_seen) as dates_seen, 3 as last_seen ) where last_seen > 2; + +select '---'; +with activity as ( + select + groupUniqArrayState(toDate('2025-01-01 01:00:00')) as dates_seen, + toDateTime('2025-01-01 01:00:00') as last_seen + union all + select + groupUniqArrayState(toDate('2023-11-11 11:11:11')) as dates_seen, + toDateTime('2023-11-11 11:11:11') as last_seen +) +select last_seen from activity +where last_seen < toDateTime('2020-01-01 00:00:00'); +select '---'; diff --git a/parser/testdata/03217_json_merge_patch_stack_overflow/ast.json b/parser/testdata/03217_json_merge_patch_stack_overflow/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03217_json_merge_patch_stack_overflow/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03217_json_merge_patch_stack_overflow/metadata.json b/parser/testdata/03217_json_merge_patch_stack_overflow/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03217_json_merge_patch_stack_overflow/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03217_json_merge_patch_stack_overflow/query.sql b/parser/testdata/03217_json_merge_patch_stack_overflow/query.sql new file mode 100644 index 000000000..4b366b08c --- /dev/null +++ b/parser/testdata/03217_json_merge_patch_stack_overflow/query.sql @@ -0,0 +1,9 @@ +-- Tags: no-fasttest +-- Needs rapidjson library +SELECT JSONMergePatch(REPEAT('{"c":', 1000000)); -- { serverError BAD_ARGUMENTS } +SELECT JSONMergePatch(REPEAT('{"c":', 100000)); -- { serverError BAD_ARGUMENTS } +SELECT JSONMergePatch(REPEAT('{"c":', 10000)); -- { serverError BAD_ARGUMENTS } +SELECT JSONMergePatch(REPEAT('{"c":', 1000)); -- { serverError BAD_ARGUMENTS } +SELECT JSONMergePatch(REPEAT('{"c":', 100)); -- { serverError BAD_ARGUMENTS } +SELECT JSONMergePatch(REPEAT('{"c":', 10)); -- { serverError BAD_ARGUMENTS } +SELECT JSONMergePatch(REPEAT('{"c":', 1)); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/03217_primary_index_memory_leak/ast.json b/parser/testdata/03217_primary_index_memory_leak/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03217_primary_index_memory_leak/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03217_primary_index_memory_leak/metadata.json b/parser/testdata/03217_primary_index_memory_leak/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03217_primary_index_memory_leak/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03217_primary_index_memory_leak/query.sql b/parser/testdata/03217_primary_index_memory_leak/query.sql new file mode 100644 index 000000000..d5a553c7d --- /dev/null +++ b/parser/testdata/03217_primary_index_memory_leak/query.sql @@ -0,0 +1,15 @@ +-- Tags: no-debug, no-tsan, no-msan, no-asan, no-random-settings, no-random-merge-tree-settings + +DROP TABLE IF EXISTS t_primary_index_memory; +CREATE TABLE t_primary_index_memory (s String) ENGINE = MergeTree +ORDER BY s SETTINGS index_granularity = 1; + +INSERT INTO t_primary_index_memory SELECT repeat('a', 10000) FROM numbers(150000) +SETTINGS + max_block_size = 32, + max_memory_usage = '100M', + max_insert_block_size = 1024, + min_insert_block_size_rows = 1024; + +SELECT count() FROM t_primary_index_memory; +DROP TABLE t_primary_index_memory; diff --git a/parser/testdata/03218_materialize_msan/ast.json b/parser/testdata/03218_materialize_msan/ast.json new file mode 100644 index 000000000..f1bf4cafb --- /dev/null +++ b/parser/testdata/03218_materialize_msan/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001221309, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03218_materialize_msan/metadata.json b/parser/testdata/03218_materialize_msan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03218_materialize_msan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03218_materialize_msan/query.sql b/parser/testdata/03218_materialize_msan/query.sql new file mode 100644 index 000000000..7e7043e68 --- /dev/null +++ b/parser/testdata/03218_materialize_msan/query.sql @@ -0,0 +1,23 @@ +SET enable_analyzer = 1; + +SELECT + materialize([(NULL, '11\01111111\011111', '1111')]) AS t, + (t[1048576]).2, + materialize(-2147483648), + (t[-2147483648]).1 +GROUP BY + materialize([(NULL, '1')]), + '', + (materialize((t[1023]).2), (materialize(''), (t[2147483647]).1, materialize(9223372036854775807)), (materialize(''), materialize(NULL, 2147483647, t[65535], 256)), materialize(NULL)) +; -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} + +SELECT + materialize([(NULL, '11\01111111\011111', '1111')]) AS t, + (t[1048576]).2, + materialize(-2147483648), + (t[-2147483648]).1 +GROUP BY + materialize([(NULL, '1')]), + '', + (materialize((t[1023]).2), (materialize(''), (t[2147483647]).1, materialize(9223372036854775807)), (materialize(''), materialize(NULL), materialize(2147483647), materialize(t[65535]), materialize(256)), materialize(NULL)) +; diff --git a/parser/testdata/03221_create_if_not_exists_setting/ast.json b/parser/testdata/03221_create_if_not_exists_setting/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03221_create_if_not_exists_setting/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03221_create_if_not_exists_setting/metadata.json b/parser/testdata/03221_create_if_not_exists_setting/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03221_create_if_not_exists_setting/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03221_create_if_not_exists_setting/query.sql b/parser/testdata/03221_create_if_not_exists_setting/query.sql new file mode 100644 index 000000000..18b3ed7bc --- /dev/null +++ b/parser/testdata/03221_create_if_not_exists_setting/query.sql @@ -0,0 +1,24 @@ +-- Tags: no-parallel + +SET create_if_not_exists=0; -- Default + +DROP TABLE IF EXISTS example_table; +CREATE TABLE example_table (id UInt32) ENGINE=MergeTree() ORDER BY id; +CREATE TABLE example_table (id UInt32) ENGINE=MergeTree() ORDER BY id; -- { serverError TABLE_ALREADY_EXISTS } + +DROP DATABASE IF EXISTS example_database; +CREATE DATABASE example_database; +CREATE DATABASE example_database; -- { serverError DATABASE_ALREADY_EXISTS } + +SET create_if_not_exists=1; + +DROP TABLE IF EXISTS example_table; +CREATE TABLE example_table (id UInt32) ENGINE=MergeTree() ORDER BY id; +CREATE TABLE example_table (id UInt32) ENGINE=MergeTree() ORDER BY id; + +DROP DATABASE IF EXISTS example_database; +CREATE DATABASE example_database; +CREATE DATABASE example_database; + +DROP DATABASE IF EXISTS example_database; +DROP TABLE IF EXISTS example_table; \ No newline at end of file diff --git a/parser/testdata/03221_incomplete_utf8_sequence/ast.json b/parser/testdata/03221_incomplete_utf8_sequence/ast.json new file mode 100644 index 000000000..253cde979 --- /dev/null +++ b/parser/testdata/03221_incomplete_utf8_sequence/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001899767, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03221_incomplete_utf8_sequence/metadata.json b/parser/testdata/03221_incomplete_utf8_sequence/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03221_incomplete_utf8_sequence/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03221_incomplete_utf8_sequence/query.sql b/parser/testdata/03221_incomplete_utf8_sequence/query.sql new file mode 100644 index 000000000..ee4f25f3b --- /dev/null +++ b/parser/testdata/03221_incomplete_utf8_sequence/query.sql @@ -0,0 +1,2 @@ +SET output_format_write_statistics = 0; +SELECT unhex('f0') FORMAT JSONCompact; diff --git a/parser/testdata/03221_key_condition_bug/ast.json b/parser/testdata/03221_key_condition_bug/ast.json new file mode 100644 index 000000000..145dc7f04 --- /dev/null +++ b/parser/testdata/03221_key_condition_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery report_metrics_v2 (children 1)" + }, + { + "explain": " Identifier report_metrics_v2" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001553709, + "rows_read": 2, + "bytes_read": 87 + } +} diff --git a/parser/testdata/03221_key_condition_bug/metadata.json b/parser/testdata/03221_key_condition_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03221_key_condition_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03221_key_condition_bug/query.sql b/parser/testdata/03221_key_condition_bug/query.sql new file mode 100644 index 000000000..bac3e631a --- /dev/null +++ b/parser/testdata/03221_key_condition_bug/query.sql @@ -0,0 +1,11 @@ +CREATE TABLE IF NOT EXISTS report_metrics_v2 +( + `a` UInt64 +) Engine = MergeTree() +ORDER BY a; + +insert into report_metrics_v2 SELECT * FROM system.numbers LIMIT 50000; + +SELECT count(*) from report_metrics_v2 WHERE (intDiv(a, 50) = 200) AND (intDiv(a, 50000) = 0); + +DROP TABLE report_metrics_v2; diff --git a/parser/testdata/03221_merge_profile_events/ast.json b/parser/testdata/03221_merge_profile_events/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03221_merge_profile_events/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03221_merge_profile_events/metadata.json b/parser/testdata/03221_merge_profile_events/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03221_merge_profile_events/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03221_merge_profile_events/query.sql b/parser/testdata/03221_merge_profile_events/query.sql new file mode 100644 index 000000000..8ebaf7121 --- /dev/null +++ b/parser/testdata/03221_merge_profile_events/query.sql @@ -0,0 +1,96 @@ +-- Tags: no-random-settings, no-random-merge-tree-settings + +DROP TABLE IF EXISTS t_merge_profile_events_1; + +CREATE TABLE t_merge_profile_events_1 (id UInt64, v1 UInt64, v2 UInt64) +ENGINE = MergeTree ORDER BY id +SETTINGS min_bytes_for_wide_part = 0; + +INSERT INTO t_merge_profile_events_1 SELECT number, number, number FROM numbers(10000); +INSERT INTO t_merge_profile_events_1 SELECT number, number, number FROM numbers(10000); + +OPTIMIZE TABLE t_merge_profile_events_1 FINAL; +SYSTEM FLUSH LOGS part_log; + +SELECT + merge_algorithm, + ProfileEvents['Merge'], + ProfileEvents['MergedRows'], + ProfileEvents['MergedColumns'], + ProfileEvents['GatheredColumns'], + ProfileEvents['MergedUncompressedBytes'], + ProfileEvents['MergeTotalMilliseconds'] > 0, + ProfileEvents['MergeExecuteMilliseconds'] > 0, + ProfileEvents['MergeHorizontalStageTotalMilliseconds'] > 0, + ProfileEvents['MergeHorizontalStageExecuteMilliseconds'] > 0, + ProfileEvents['UserTimeMicroseconds'] > 0, + ProfileEvents['OSCPUVirtualTimeMicroseconds'] > 0, +FROM system.part_log WHERE database = currentDatabase() AND table = 't_merge_profile_events_1' AND event_type = 'MergeParts' AND part_name = 'all_1_2_1'; + +DROP TABLE IF EXISTS t_merge_profile_events_1; + +DROP TABLE IF EXISTS t_merge_profile_events_2; + +CREATE TABLE t_merge_profile_events_2 (id UInt64, v1 UInt64, v2 UInt64) +ENGINE = MergeTree ORDER BY id +SETTINGS min_bytes_for_wide_part = 0, vertical_merge_algorithm_min_rows_to_activate = 1, vertical_merge_algorithm_min_columns_to_activate = 1; + +INSERT INTO t_merge_profile_events_2 SELECT number, number, number FROM numbers(10000); +INSERT INTO t_merge_profile_events_2 SELECT number, number, number FROM numbers(10000); + +OPTIMIZE TABLE t_merge_profile_events_2 FINAL; +SYSTEM FLUSH LOGS part_log; + +SELECT + merge_algorithm, + ProfileEvents['Merge'], + ProfileEvents['MergedRows'], + ProfileEvents['MergedColumns'], + ProfileEvents['GatheredColumns'], + ProfileEvents['MergedUncompressedBytes'], + ProfileEvents['MergeTotalMilliseconds'] > 0, + ProfileEvents['MergeExecuteMilliseconds'] > 0, + ProfileEvents['MergeHorizontalStageTotalMilliseconds'] > 0, + ProfileEvents['MergeHorizontalStageExecuteMilliseconds'] > 0, + ProfileEvents['MergeVerticalStageTotalMilliseconds'] > 0, + ProfileEvents['MergeVerticalStageExecuteMilliseconds'] > 0, + ProfileEvents['UserTimeMicroseconds'] > 0, + ProfileEvents['OSCPUVirtualTimeMicroseconds'] > 0, +FROM system.part_log WHERE database = currentDatabase() AND table = 't_merge_profile_events_2' AND event_type = 'MergeParts' AND part_name = 'all_1_2_1'; + +DROP TABLE IF EXISTS t_merge_profile_events_2; + +DROP TABLE IF EXISTS t_merge_profile_events_3; + +CREATE TABLE t_merge_profile_events_3 (id UInt64, v1 UInt64, v2 UInt64, PROJECTION p (SELECT v2, v2 * v2, v2 * 2, v2 * 10, v1 ORDER BY v1)) +ENGINE = MergeTree ORDER BY id +SETTINGS min_bytes_for_wide_part = 0, vertical_merge_algorithm_min_rows_to_activate = 1, vertical_merge_algorithm_min_columns_to_activate = 1; + +INSERT INTO t_merge_profile_events_3 SELECT number, number, number FROM numbers(100000); +INSERT INTO t_merge_profile_events_3 SELECT number, number, number FROM numbers(100000); + +OPTIMIZE TABLE t_merge_profile_events_3 FINAL; +SYSTEM FLUSH LOGS part_log; + +SELECT + merge_algorithm, + ProfileEvents['Merge'], + ProfileEvents['MergedRows'], + ProfileEvents['MergedColumns'], + ProfileEvents['GatheredColumns'], + ProfileEvents['MergedUncompressedBytes'], + ProfileEvents['MergeTotalMilliseconds'] > 0, + ProfileEvents['MergeExecuteMilliseconds'] > 0, + ProfileEvents['MergeHorizontalStageTotalMilliseconds'] > 0, + ProfileEvents['MergeHorizontalStageExecuteMilliseconds'] > 0, + ProfileEvents['MergeVerticalStageTotalMilliseconds'] > 0, + ProfileEvents['MergeVerticalStageExecuteMilliseconds'] > 0, + ProfileEvents['MergeProjectionStageTotalMilliseconds'] > 0, + ProfileEvents['MergeProjectionStageExecuteMilliseconds'] > 0, + ProfileEvents['MergeExecuteMilliseconds'] <= duration_ms, + ProfileEvents['MergeTotalMilliseconds'] <= duration_ms, + ProfileEvents['UserTimeMicroseconds'] > 0, + ProfileEvents['OSCPUVirtualTimeMicroseconds'] > 0, +FROM system.part_log WHERE database = currentDatabase() AND table = 't_merge_profile_events_3' AND event_type = 'MergeParts' AND part_name = 'all_1_2_1'; + +DROP TABLE IF EXISTS t_merge_profile_events_3; diff --git a/parser/testdata/03221_refreshable_matview_progress/ast.json b/parser/testdata/03221_refreshable_matview_progress/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03221_refreshable_matview_progress/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03221_refreshable_matview_progress/metadata.json b/parser/testdata/03221_refreshable_matview_progress/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03221_refreshable_matview_progress/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03221_refreshable_matview_progress/query.sql b/parser/testdata/03221_refreshable_matview_progress/query.sql new file mode 100644 index 000000000..48f67121b --- /dev/null +++ b/parser/testdata/03221_refreshable_matview_progress/query.sql @@ -0,0 +1,18 @@ +-- Tags: no-replicated-database, no-ordinary-database + +CREATE MATERIALIZED VIEW 03221_rmv +REFRESH AFTER 10 SECOND +( +x UInt64 +) +ENGINE = Memory +AS SELECT number AS x +FROM numbers(3) +UNION ALL +SELECT rand64() AS x; + +SELECT sleep(2); + +SELECT read_rows, total_rows, progress FROM system.view_refreshes WHERE database = currentDatabase() and view = '03221_rmv'; + +DROP TABLE 03221_rmv; diff --git a/parser/testdata/03221_variant_logical_error/ast.json b/parser/testdata/03221_variant_logical_error/ast.json new file mode 100644 index 000000000..28bd8c652 --- /dev/null +++ b/parser/testdata/03221_variant_logical_error/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001828348, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03221_variant_logical_error/metadata.json b/parser/testdata/03221_variant_logical_error/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03221_variant_logical_error/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03221_variant_logical_error/query.sql b/parser/testdata/03221_variant_logical_error/query.sql new file mode 100644 index 000000000..bff0d89e6 --- /dev/null +++ b/parser/testdata/03221_variant_logical_error/query.sql @@ -0,0 +1,27 @@ +set allow_experimental_variant_type = 1; +set allow_suspicious_types_in_order_by = 1; + +DROP TABLE IF EXISTS test; + +CREATE TABLE test( + key String, + val Map(String, Variant(String, Int32, DateTime64(3, 'UTC'))) +) engine = ReplicatedMergeTree('/clickhouse/tables/{database}/table', '1') +order by key; + +insert into test VALUES ('a', {'a':'a', 'b':1, 'c': '2020-01-01 10:10:10.11'}); +insert into test VALUES ('', {'':'xx', '':4}); +insert into test VALUES ('', {'x':'xx'}); +insert into test VALUES ('', {}); +insert into test VALUES ('a', {'a':'a', 'b':1, 'c': '2020-01-01 10:10:10'}); +insert into test VALUES ('a', {'a':'b', 'b':1, 'c': '2020-01-01'}); +insert into test VALUES ('z', {'a':'a'}); +insert into test VALUES ('a', {'a': Null}); +insert into test VALUES ('a', {'a': Null, 'a': Null}); +insert into test VALUES ('a', {'a': Null, 'c': Null}); + +SELECT variantElement(arrayJoin(mapValues(val)), 'String') FROM test ORDER BY ALL; +select '---'; +SELECT key, arrayJoin(mapValues(val)) FROM test ORDER BY ALL; + +DROP TABLE test; diff --git a/parser/testdata/03222_create_timeseries_table/ast.json b/parser/testdata/03222_create_timeseries_table/ast.json new file mode 100644 index 000000000..96723e20f --- /dev/null +++ b/parser/testdata/03222_create_timeseries_table/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00105827, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03222_create_timeseries_table/metadata.json b/parser/testdata/03222_create_timeseries_table/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03222_create_timeseries_table/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03222_create_timeseries_table/query.sql b/parser/testdata/03222_create_timeseries_table/query.sql new file mode 100644 index 000000000..87ebe3d48 --- /dev/null +++ b/parser/testdata/03222_create_timeseries_table/query.sql @@ -0,0 +1,8 @@ +SET allow_experimental_time_series_table = 1; + +CREATE TABLE 03222_timeseries_table1 ENGINE = TimeSeries FORMAT Null; +CREATE TABLE 03222_timeseries_table2 ENGINE = TimeSeries SETTINGS store_min_time_and_max_time = 1, aggregate_min_time_and_max_time = 1 FORMAT Null; + +--- This doesn't work because allow_nullable_key cannot be set in query for the internal MergeTree tables +--- CREATE TABLE 03222_timeseries_table3 ENGINE = TimeSeries SETTINGS store_min_time_and_max_time = 1, aggregate_min_time_and_max_time = 0; +CREATE TABLE 03222_timeseries_table4 ENGINE = TimeSeries SETTINGS store_min_time_and_max_time = 0 FORMAT Null; diff --git a/parser/testdata/03222_date_time_inference/ast.json b/parser/testdata/03222_date_time_inference/ast.json new file mode 100644 index 000000000..3406bf661 --- /dev/null +++ b/parser/testdata/03222_date_time_inference/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001826494, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03222_date_time_inference/metadata.json b/parser/testdata/03222_date_time_inference/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03222_date_time_inference/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03222_date_time_inference/query.sql b/parser/testdata/03222_date_time_inference/query.sql new file mode 100644 index 000000000..b16f72c72 --- /dev/null +++ b/parser/testdata/03222_date_time_inference/query.sql @@ -0,0 +1,288 @@ +set input_format_try_infer_datetimes = 1; +set input_format_try_infer_dates = 1; +set schema_inference_make_columns_nullable = 0; +set input_format_json_try_infer_numbers_from_strings = 0; +set session_timezone = 'UTC'; + +select 'Date'; +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020:01:01"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020:1:01"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020:01:1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020:1:1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020-01-01"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020-1-01"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020-01-1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020-1-1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020/01/01"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020/1/01"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020/01/1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020/1/1"}'); + +select 'String'; +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020_01_01"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020_1_01"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020_01_1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020_1_1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020a01a01"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020a1a01"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020a01a1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020a1a1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "20200101"}'); + +select 'DateTime'; +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020:01:01 42:42:42"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020/01/01 42:42:42"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020-01-01 42:42:42"}'); + +select 'String'; +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020_01_01 42:42:42"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020a01a01 42:42:42"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020-01-01 42.42.42"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020-01-01 42 42 42"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020-01-01 42a42a42"}'); + +select 'DateTime64'; +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020:01:01 42:42:42.4242"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020/01/01 42:42:42.4242"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020-01-01 42:42:42.4242"}'); + +select 'String'; +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020_01_01 42:42:42.4242"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020a01a01 42:42:42.4242"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020-01-01 42.42.42.4242"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020-01-01 42 42 42.4242"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2020-01-01 42a42a42.4242"}'); + +set date_time_input_format='best_effort'; +select 'DateTime/DateTime64 best effort'; +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000-01-01 00:00:00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000-01-01 01:00:00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000-01-01 01:00:00.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "02/01/17 010203 MSK"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "02/01/17 010203.000 MSK"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "02/01/17 010203 MSK+0100"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "02/01/17 010203.000 MSK+0100"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "02/01/17 010203 UTC+0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "02/01/17 010203.000 UTC+0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "02/01/17 010203Z"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "02/01/17 010203.000Z"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "02/01/1970 010203Z"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "02/01/1970 010203.000Z"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "02/01/70 010203Z"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "02/01/70 010203.000Z"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "11 Feb 2018 06:40:50 +0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "11 Feb 2018 06:40:50.000 +0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "17 Apr 2000 2 1:2:3"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "17 Apr 2000 2 1:2:3.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "19700102 01:00:00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "19700102 01:00:00.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "19700102010203Z"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "19700102010203Z.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "1970/01/02 010203Z"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "1970/01/02 010203.000Z"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2016-01-01MSD"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2016-01-01 MSD"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2016-01-01UTC"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2016-01-01Z"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "201701 02 010203 UTC+0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "201701 02 010203.000 UTC+0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05+0"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05.000+0"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05+00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05.000+00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05+0000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05.000+0000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05 -0100"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05.000 -0100"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05+030"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05.000+030"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05+0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05.000+0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05+1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05.000+1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05+300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05.000+300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05+900"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05.000+900"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05GMT"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05.000GMT"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05 MSD"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05.000 MSD"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05 MSD Feb"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05.000 MSD Feb"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05 MSD Jun"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05.000 MSD Jun"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05 MSK"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02 03:04:05.000 MSK"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02T03:04:05"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02T03:04:05.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02T03:04:05+00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02T03:04:05.000+00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02T03:04:05 -0100"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02T03:04:05.000 -0100"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02T03:04:05-0100"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02T03:04:05.000-0100"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02T03:04:05+0100"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02T03:04:05.000+0100"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02T03:04:05Z"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017-01-02T03:04:05.000Z"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 01 11:22:33"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 01 11:22:33.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 010203 UTC+0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 010203.000 UTC+0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 01:2:3 UTC+0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 01:2:3.000 UTC+0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:02:3"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:02:3.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 11:22:33"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 11:22:33.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:2:03"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:2:03.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:22:33"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:22:33.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:2:3"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:2:3.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:2:33"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:2:33.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:2:3 MSK"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:2:3.000 MSK"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:2:3 UTC+0000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:2:3.000 UTC+0000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:2:3 UTC+0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:2:3.000 UTC+0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:2:3 UTC+0400"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 02 1:2:3.000 UTC+0400"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 2 1:2:3"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Apr 2 1:2:3.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Jan 02 010203 UTC+0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2017 Jan 02 010203.000 UTC+0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Apr 2017 01:02:03"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Apr 2017 01:02:03.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Apr 2017 1:2:3"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Apr 2017 1:2:3.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3 MSK"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3.000 MSK"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3 PM"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3.000 PM"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3Z"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3.000Z"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3 Z"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3.000 Z"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3 Z +0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3.000 Z +0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3 Z+03:00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3.000 Z+03:00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3 Z +03:00 PM"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3.000 Z +03:00 PM"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3 Z +0300 PM"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3.000 Z +0300 PM"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3 Z+03:00 PM"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3.000 Z+03:00 PM"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3 Z +03:30 PM"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3.000 Z +03:30 PM"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3Z Mon"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3.000Z Mon"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3 Z PM"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3.000 Z PM"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3Z PM"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3.000Z PM"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3 Z PM +03:00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "25 Jan 2017 1:2:3.000 Z PM +03:00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun 11 Feb 2018 06:40:50 +0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun 11 Feb 2018 06:40:50.000 +0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun, 11 Feb 2018 06:40:50 +0300"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun, 11 Feb 2018 06:40:50.000 +0300"}'); + +select 'String'; +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "20"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "200"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "20000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "200001"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000010"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "20000101"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "200001010"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000010101"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "20000101010"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "200001010101"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000010101010"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "20000101010101"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2.1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "20.1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "200.1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000.1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "20000.1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "200001.1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000010.1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "20000101.1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "200001010.1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000010101.1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "20000101010.1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "200001010101.1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000010101010.1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "20000101010101.1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar 1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar01"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar 01"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar2020"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar 2020"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar012020"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar 012020"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar01012020"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar 01012020"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar0101202001"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar 0101202001"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar010120200101"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar 010120200101"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar01012020010101"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar 01012020010101"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar01012020010101.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar 0101202001010101.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun 1"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun01"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun 01"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun2020"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun 2020"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun012020"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun 012020"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun01012020"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun 01012020"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun0101202001"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun 0101202001"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun010120200101"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun 010120200101"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun01012020010101"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun 01012020010101"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun01012020010101.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Sun 0101202001010101.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000 01 01 01:00:00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000 01 01 01:00:00.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000a01a01 01:00:00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000a01a01 01:00:00.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000-01-01 01 00 00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000-01-01 01 00 00.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000-01-01 01-00-00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000-01-01 01-00-00.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000-01-01 01a00a00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000-01-01 01a00a00.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000-01 01:00:00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000-01 01:00:00.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000 01"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000-01"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar 2000 00:00:00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar 2000 00:00:00.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000 00:00:00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "2000 00:00:00.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar 2000-01-01 00:00:00"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "Mar 2000-01-01 00:00:00.000"}'); +select x, toTypeName(x) from format(JSONEachRow, '{"x" : "1.7.10"}'); + diff --git a/parser/testdata/03222_datetime64_small_value_const/ast.json b/parser/testdata/03222_datetime64_small_value_const/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03222_datetime64_small_value_const/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03222_datetime64_small_value_const/metadata.json b/parser/testdata/03222_datetime64_small_value_const/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03222_datetime64_small_value_const/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03222_datetime64_small_value_const/query.sql b/parser/testdata/03222_datetime64_small_value_const/query.sql new file mode 100644 index 000000000..2855ce488 --- /dev/null +++ b/parser/testdata/03222_datetime64_small_value_const/query.sql @@ -0,0 +1,44 @@ +-- Tags: shard +set session_timezone = 'UTC'; -- don't randomize the session timezone +SET enable_analyzer = 1; + +select *, (select toDateTime64(0, 3)) from remote('127.0.0.1', system.one) settings prefer_localhost_replica=0; +select *, (select toDateTime64(5, 3)) from remote('127.0.0.1', system.one) settings prefer_localhost_replica=0; +select *, (select toDateTime64('1970-01-01 00:45:25.456789', 6)) from remote('127.0.0.1', system.one) settings prefer_localhost_replica=0; +select *, (select toDateTime64('1970-01-01 00:53:25.456789123', 9)) from remote('127.0.0.1', system.one) settings prefer_localhost_replica=0; +select *, (select toDateTime64(null,3)) from remote('127.0.0.1', system.one) settings prefer_localhost_replica=0; + +create database if not exists shard_0; +create database if not exists shard_1; + +drop table if exists shard_0.dt64_03222; +drop table if exists shard_1.dt64_03222; +drop table if exists distr_03222_dt64; + +create table shard_0.dt64_03222(id UInt64, dt DateTime64(3)) engine = MergeTree order by id; +create table shard_1.dt64_03222(id UInt64, dt DateTime64(3)) engine = MergeTree order by id; +create table distr_03222_dt64 (id UInt64, dt DateTime64(3)) engine = Distributed(test_cluster_two_shards_different_databases, '', dt64_03222); + +insert into shard_0.dt64_03222 values(1, toDateTime64('1970-01-01 00:00:00.000',3)); +insert into shard_0.dt64_03222 values(2, toDateTime64('1970-01-01 00:00:02.456',3)); +insert into shard_1.dt64_03222 values(3, toDateTime64('1970-01-01 00:00:04.811',3)); +insert into shard_1.dt64_03222 values(4, toDateTime64('1970-01-01 00:10:05',3)); +insert into shard_1.dt64_03222 values(5, toDateTime64(0,3)); + +--Output : 1,5 2,3,4 4 1,2,3,5 0 0 5 +select id, dt from distr_03222_dt64 where dt = (select toDateTime64(0,3)) order by id; +select id, dt from distr_03222_dt64 where dt > (select toDateTime64(0,3)) order by id; +select id, dt from distr_03222_dt64 where dt > (select toDateTime64('1970-01-01 00:10:00.000',3)) order by id; +select id, dt from distr_03222_dt64 where dt < (select toDateTime64(5,3)) order by id; + +select count(*) from distr_03222_dt64 where dt > (select toDateTime64('2024-07-20 00:00:00',3)); +select count(*) from distr_03222_dt64 where dt > (select now()); +select count(*) from distr_03222_dt64 where dt < (select toDateTime64('2004-07-20 00:00:00',3)); + + +drop table if exists shard_0.dt64_03222; +drop table if exists shard_1.dt64_03222; +drop table if exists distr_03222_dt64; + +drop database shard_0; +drop database shard_1; diff --git a/parser/testdata/03222_ignore_nulls_query_tree_elimination/ast.json b/parser/testdata/03222_ignore_nulls_query_tree_elimination/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03222_ignore_nulls_query_tree_elimination/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03222_ignore_nulls_query_tree_elimination/metadata.json b/parser/testdata/03222_ignore_nulls_query_tree_elimination/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03222_ignore_nulls_query_tree_elimination/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03222_ignore_nulls_query_tree_elimination/query.sql b/parser/testdata/03222_ignore_nulls_query_tree_elimination/query.sql new file mode 100644 index 000000000..72f9781ed --- /dev/null +++ b/parser/testdata/03222_ignore_nulls_query_tree_elimination/query.sql @@ -0,0 +1,51 @@ +#!/usr/bin/env -S ${HOME}/clickhouse-client --queries-file + +DROP TABLE IF EXISTS with_fill_date__fuzz_0; + +CREATE TABLE with_fill_date__fuzz_0 +( + `d` Date, + `d32` Nullable(Int32), + `d33` Int32 +) +ENGINE = Memory; + + +INSERT INTO with_fill_date__fuzz_0 VALUES (toDate('2020-03-03'), 1, 3), (toDate('2020-03-03'), NULL, 3), (toDate('2020-02-05'), 1, 1); + + +SELECT count() +FROM with_fill_date__fuzz_0 +ORDER BY + count(), + count() IGNORE NULLS, + max(d) +WITH FILL STEP toIntervalDay(10) +; + + +SELECT count() +FROM with_fill_date__fuzz_0 +ORDER BY + any(d32) RESPECT NULLS, + any_respect_nulls(d32), + max(d) +WITH FILL STEP toIntervalDay(10) +; + + +SELECT count() +FROM with_fill_date__fuzz_0 +ORDER BY + any(d32), + any(d32) IGNORE NULLS, + any(d32) RESPECT NULLS, + any_respect_nulls(d32) IGNORE NULLS, + any_respect_nulls(d32), + sum(d33), + sum(d33) IGNORE NULLS, + max(d) +WITH FILL STEP toIntervalDay(10) +; + + diff --git a/parser/testdata/03222_json_empty_as_default/ast.json b/parser/testdata/03222_json_empty_as_default/ast.json new file mode 100644 index 000000000..e94e769e2 --- /dev/null +++ b/parser/testdata/03222_json_empty_as_default/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00123485, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03222_json_empty_as_default/metadata.json b/parser/testdata/03222_json_empty_as_default/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03222_json_empty_as_default/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03222_json_empty_as_default/query.sql b/parser/testdata/03222_json_empty_as_default/query.sql new file mode 100644 index 000000000..1243d450c --- /dev/null +++ b/parser/testdata/03222_json_empty_as_default/query.sql @@ -0,0 +1,60 @@ +SET input_format_json_empty_as_default = 1, allow_experimental_variant_type = 1; + +-- Simple types +-- { echoOn } +SELECT x FROM format(JSONEachRow, 'x Date', '{"x":""}'); +SELECT x FROM format(JSONEachRow, 'x Date32', '{"x":""}'); +SELECT toTimeZone(x, 'UTC') FROM format(JSONEachRow, 'x DateTime', '{"x":""}'); +SELECT toTimeZone(x, 'UTC') FROM format(JSONEachRow, 'x DateTime64', '{"x":""}'); +SELECT x FROM format(JSONEachRow, 'x IPv4', '{"x":""}'); +SELECT x FROM format(JSONEachRow, 'x IPv6', '{"x":""}'); +SELECT x FROM format(JSONEachRow, 'x UUID', '{"x":""}'); +-- { echoOff } + +-- Simple type AggregateFunction +DROP TABLE IF EXISTS table1; +CREATE TABLE table1(col AggregateFunction(uniq, UInt64)) ENGINE=Memory(); +DROP TABLE IF EXISTS table2; +CREATE TABLE table2(UserID UInt64) ENGINE=Memory(); + +INSERT INTO table1 SELECT uniqState(UserID) FROM table2; +INSERT INTO table1 SELECT x FROM format(JSONEachRow, 'x AggregateFunction(uniq, UInt64)' AS T, '{"x":""}'); + +-- { echoOn } +SELECT COUNT(DISTINCT col) FROM table1; +-- { echoOff } + +DROP TABLE table1; +DROP TABLE table2; + +-- The setting input_format_defaults_for_omitted_fields determines the default value if enabled. +CREATE TABLE table1(address IPv6 DEFAULT toIPv6('2001:db8:3333:4444:5555:6666:7777:8888')) ENGINE=Memory(); + +SET input_format_defaults_for_omitted_fields = 0; +INSERT INTO table1 FORMAT JSONEachRow {"address":""}; + +SET input_format_defaults_for_omitted_fields = 1; +INSERT INTO table1 FORMAT JSONEachRow {"address":""}; + +-- { echoOn } +SELECT * FROM table1 ORDER BY address ASC; +-- { echoOff } + +DROP TABLE table1; + +-- Nullable +-- { echoOn } +SELECT x FROM format(JSONEachRow, 'x Nullable(IPv6)', '{"x":""}'); + +-- Compound types +SELECT x FROM format(JSONEachRow, 'x Array(UUID)', '{"x":["00000000-0000-0000-0000-000000000000","b15f852c-c41a-4fd6-9247-1929c841715e",""]}'); +SELECT x FROM format(JSONEachRow, 'x Array(Nullable(IPv6))', '{"x":["",""]}'); +SELECT x FROM format(JSONEachRow, 'x Tuple(Date, IPv4, String)', '{"x":["", "", "abc"]}'); +SELECT x FROM format(JSONEachRow, 'x Map(String, IPv6)', '{"x":{"abc": ""}}'); +SELECT x FROM format(JSONEachRow, 'x Variant(Date, UUID)', '{"x":""}'); + +-- Deep composition +SELECT x FROM format(JSONEachRow, 'x Array(Array(IPv6))', '{"x":[["2001:db8:3333:4444:CCCC:DDDD:EEEE:FFFF", ""], ["", "2001:db8:3333:4444:5555:6666:7777:8888"]]}'); +SELECT x FROM format(JSONEachRow, 'x Variant(Date, Array(UUID))', '{"x":["", "b15f852c-c41a-4fd6-9247-1929c841715e"]}'); +SELECT x FROM format(JSONEachRow, 'x Tuple(Array(UUID), Tuple(UUID, Map(String, IPv6)))', '{"x":[[""], ["",{"abc":""}]]}'); +SELECT x FROM format(JSONEachRow, 'x Map(Tuple(Date,IPv4), Variant(UUID,IPv6))', '{"x":{["",""]:""}}'); diff --git a/parser/testdata/03222_json_squashing/ast.json b/parser/testdata/03222_json_squashing/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03222_json_squashing/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03222_json_squashing/metadata.json b/parser/testdata/03222_json_squashing/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03222_json_squashing/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03222_json_squashing/query.sql b/parser/testdata/03222_json_squashing/query.sql new file mode 100644 index 000000000..ef818bad5 --- /dev/null +++ b/parser/testdata/03222_json_squashing/query.sql @@ -0,0 +1,82 @@ +-- Tags: long + +SET enable_json_type = 1; +set max_block_size = 1000; + +drop table if exists test; + +create table test (json JSON) engine=MergeTree order by tuple(); +insert into test select multiIf(number < 1000, '{}'::JSON, number < 3000, '{"a" : 42, "b" : "Hello"}'::JSON, '{"c" : [1, 2, 3], "d" : "2020-01-01"}'::JSON) from numbers(20000); +select 'All paths'; +select distinct arrayJoin(JSONAllPaths(json)) as path from test order by path; +select 'Dynamic paths'; +select distinct arrayJoin(JSONDynamicPaths(json)) as path from test order by path; +select 'Shared data paths'; +select distinct arrayJoin(JSONSharedDataPaths(json)) as path from test order by path; + +truncate table test; +insert into test select multiIf(number < 1000, '{"a" : 42, "b" : "Hello"}'::JSON, number < 3000, '{"c" : [1, 2, 3], "d" : "2020-01-01"}'::JSON, '{"e" : 43, "f" : ["s1", "s2", "s3"]}'::JSON) from numbers(20000); +select 'All paths'; +select distinct arrayJoin(JSONAllPaths(json)) as path from test order by path; +select 'Dynamic paths'; +select distinct arrayJoin(JSONDynamicPaths(json)) as path from test order by path; +select 'Shared data paths'; +select distinct arrayJoin(JSONSharedDataPaths(json)) as path from test order by path; + +drop table test; +create table test (json JSON(max_dynamic_paths=2)) engine=MergeTree order by tuple(); +insert into test select multiIf(number < 1000, '{}'::JSON(max_dynamic_paths=2), number < 3000, '{"a" : 42, "b" : "Hello"}'::JSON(max_dynamic_paths=2), '{"c" : [1, 2, 3], "d" : "2020-01-01"}'::JSON(max_dynamic_paths=2)) from numbers(20000); +select 'All paths'; +select distinct arrayJoin(JSONAllPaths(json)) as path from test order by path; +select 'Dynamic paths'; +select distinct arrayJoin(JSONDynamicPaths(json)) as path from test order by path; +select 'Shared data paths'; +select distinct arrayJoin(JSONSharedDataPaths(json)) as path from test order by path; + +truncate table test; +insert into test select multiIf(number < 1000, '{"a" : 42, "b" : "Hello"}'::JSON(max_dynamic_paths=2), number < 3000, '{"c" : [1, 2, 3], "d" : "2020-01-01"}'::JSON(max_dynamic_paths=2), '{"e" : 43, "f" : ["s1", "s2", "s3"]}'::JSON(max_dynamic_paths=2)) from numbers(20000); +select 'All paths'; +select distinct arrayJoin(JSONAllPaths(json)) as path from test order by path; +select 'Dynamic paths'; +select distinct arrayJoin(JSONDynamicPaths(json)) as path from test order by path; +select 'Shared data paths'; +select distinct arrayJoin(JSONSharedDataPaths(json)) as path from test order by path; + +truncate table test; +insert into test select multiIf(number < 1000, '{"a" : 42}'::JSON(max_dynamic_paths=2), number < 3000, '{"b" : "Hello", "c" : [1, 2, 3], "d" : "2020-01-01"}'::JSON(max_dynamic_paths=2), '{"e" : 43}'::JSON(max_dynamic_paths=2)) from numbers(20000); +select 'All paths'; +select distinct arrayJoin(JSONAllPaths(json)) as path from test order by path; +select 'Dynamic paths'; +select distinct arrayJoin(JSONDynamicPaths(json)) as path from test order by path; +select 'Shared data paths'; +select distinct arrayJoin(JSONSharedDataPaths(json)) as path from test order by path; + +drop table test; +create table test (json JSON(max_dynamic_paths=8)) engine=MergeTree order by tuple(); +insert into test select multiIf(number < 1000, '{}'::JSON(max_dynamic_paths=8), number < 3000, materialize('{"a" : [{"b" : 42, "c" : [1, 2, 3]}]}')::JSON(max_dynamic_paths=8), materialize('{"a" : [{"d" : "2020-01-01", "e" : "Hello"}]}')::JSON(max_dynamic_paths=8)) from numbers(20000); +select 'All paths'; +select distinct arrayJoin(JSONAllPaths(arrayJoin(json.a[]))) as path from test order by path; +select 'Dynamic paths'; +select distinct arrayJoin(JSONDynamicPaths(arrayJoin(json.a[]))) as path from test order by path; +select 'Shared data paths'; +select distinct arrayJoin(JSONSharedDataPaths(arrayJoin(json.a[]))) as path from test order by path; + +truncate table test; +insert into test select multiIf(number < 1000, materialize('{"a" : [{"b" : 42, "c" : [1, 2, 3]}]}')::JSON(max_dynamic_paths=8), number < 3000, materialize('{"a" : [{"d" : "2020-01-01", "e" : "Hello"}]}')::JSON(max_dynamic_paths=8), materialize('{"a" : [{"f" : "2020-01-01 00:00:00", "g" : "Hello2"}]}')::JSON(max_dynamic_paths=8)) from numbers(20000); +select 'All paths'; +select distinct arrayJoin(JSONAllPaths(arrayJoin(json.a[]))) as path from test order by path; +select 'Dynamic paths'; +select distinct arrayJoin(JSONDynamicPaths(arrayJoin(json.a[]))) as path from test order by path; +select 'Shared data paths'; +select distinct arrayJoin(JSONSharedDataPaths(arrayJoin(json.a[]))) as path from test order by path; + +truncate table test; +insert into test select multiIf(number < 1000, materialize('{"a" : [{"b" : 42}]}')::JSON(max_dynamic_paths=8), number < 3000, materialize('{"a" : [{"d" : "2020-01-01", "e" : "Hello"}]}')::JSON(max_dynamic_paths=8), materialize('{"a" : [{"f" : "2020-01-01 00:00:00"}]}')::JSON(max_dynamic_paths=8)) from numbers(20000); +select 'All paths'; +select distinct arrayJoin(JSONAllPaths(arrayJoin(json.a[]))) as path from test order by path; +select 'Dynamic paths'; +select distinct arrayJoin(JSONDynamicPaths(arrayJoin(json.a[]))) as path from test order by path; +select 'Shared data paths'; +select distinct arrayJoin(JSONSharedDataPaths(arrayJoin(json.a[]))) as path from test order by path; + +drop table test; diff --git a/parser/testdata/03222_parallel_replicas_final_in_subquery/ast.json b/parser/testdata/03222_parallel_replicas_final_in_subquery/ast.json new file mode 100644 index 000000000..1f3241fa1 --- /dev/null +++ b/parser/testdata/03222_parallel_replicas_final_in_subquery/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_00808 (children 1)" + }, + { + "explain": " Identifier test_00808" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001274962, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/03222_parallel_replicas_final_in_subquery/metadata.json b/parser/testdata/03222_parallel_replicas_final_in_subquery/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03222_parallel_replicas_final_in_subquery/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03222_parallel_replicas_final_in_subquery/query.sql b/parser/testdata/03222_parallel_replicas_final_in_subquery/query.sql new file mode 100644 index 000000000..bcf84aaa9 --- /dev/null +++ b/parser/testdata/03222_parallel_replicas_final_in_subquery/query.sql @@ -0,0 +1,22 @@ +DROP TABLE IF EXISTS test_00808; + +CREATE TABLE test_00808 +( + `date` Date, + `id` Int8, + `name` String, + `value` Int64, + `sign` Int8 +) +ENGINE = CollapsingMergeTree(sign) +ORDER BY (id, date); + +INSERT INTO test_00808 VALUES('2000-01-01', 1, 'test string 1', 1, 1); +INSERT INTO test_00808 VALUES('2000-01-01', 2, 'test string 2', 2, 1); + +SET allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 3, parallel_replicas_for_non_replicated_merge_tree=1, cluster_for_parallel_replicas='test_cluster_one_shard_three_replicas_localhost'; +SET parallel_replicas_only_with_analyzer = 0; -- necessary for CI run with disabled analyzer + +SELECT * FROM (SELECT * FROM test_00808 FINAL) WHERE id = 1; -- { serverError SUPPORT_IS_DISABLED } + +DROP TABLE test_00808; diff --git a/parser/testdata/03222_parallel_replicas_min_marks_to_read_overflow/ast.json b/parser/testdata/03222_parallel_replicas_min_marks_to_read_overflow/ast.json new file mode 100644 index 000000000..1482b1983 --- /dev/null +++ b/parser/testdata/03222_parallel_replicas_min_marks_to_read_overflow/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test__fuzz_22 (children 1)" + }, + { + "explain": " Identifier test__fuzz_22" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001545106, + "rows_read": 2, + "bytes_read": 78 + } +} diff --git a/parser/testdata/03222_parallel_replicas_min_marks_to_read_overflow/metadata.json b/parser/testdata/03222_parallel_replicas_min_marks_to_read_overflow/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03222_parallel_replicas_min_marks_to_read_overflow/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03222_parallel_replicas_min_marks_to_read_overflow/query.sql b/parser/testdata/03222_parallel_replicas_min_marks_to_read_overflow/query.sql new file mode 100644 index 000000000..6f486f8f0 --- /dev/null +++ b/parser/testdata/03222_parallel_replicas_min_marks_to_read_overflow/query.sql @@ -0,0 +1,23 @@ +DROP TABLE IF EXISTS test__fuzz_22 SYNC; + +CREATE TABLE test__fuzz_22 (k Float32, v String) ENGINE = ReplicatedMergeTree('/clickhouse/03222/{database}/test__fuzz_22', 'r1') ORDER BY k SETTINGS index_granularity = 1; + +INSERT INTO test__fuzz_22 SELECT number, toString(number) FROM numbers(10_000); + +SET allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 3, cluster_for_parallel_replicas='test_cluster_one_shard_three_replicas_localhost'; + +SELECT v +FROM test__fuzz_22 +ORDER BY v +LIMIT 10, 10 +SETTINGS merge_tree_min_rows_for_concurrent_read = 9223372036854775806; + +SELECT '---'; + +SELECT k, v +FROM test__fuzz_22 +ORDER BY k +LIMIT 100, 10 +SETTINGS optimize_read_in_order=1, merge_tree_min_rows_for_concurrent_read = 9223372036854775806; + +DROP TABLE test__fuzz_22 SYNC; diff --git a/parser/testdata/03222_pr_asan_index_granularity/ast.json b/parser/testdata/03222_pr_asan_index_granularity/ast.json new file mode 100644 index 000000000..388c0be64 --- /dev/null +++ b/parser/testdata/03222_pr_asan_index_granularity/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001392392, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/03222_pr_asan_index_granularity/metadata.json b/parser/testdata/03222_pr_asan_index_granularity/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03222_pr_asan_index_granularity/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03222_pr_asan_index_granularity/query.sql b/parser/testdata/03222_pr_asan_index_granularity/query.sql new file mode 100644 index 000000000..b7f37dd28 --- /dev/null +++ b/parser/testdata/03222_pr_asan_index_granularity/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS test; + +CREATE TABLE test (k UInt64, v String) +ENGINE = MergeTree +ORDER BY k +SETTINGS index_granularity=1; + +INSERT INTO test SELECT number, toString(number) FROM numbers(10_000); + +SET allow_experimental_parallel_reading_from_replicas = 2, max_parallel_replicas = 3, parallel_replicas_for_non_replicated_merge_tree=1, cluster_for_parallel_replicas='test_cluster_one_shard_three_replicas_localhost'; + +SELECT 0, materialize(18), k FROM test PREWHERE toNullable(toNullable(11)) WHERE toNullable(11) ORDER BY k DESC NULLS LAST LIMIT 100, 100 SETTINGS optimize_read_in_order = 1, merge_tree_min_rows_for_concurrent_read = 9223372036854775806, max_threads = 1; + +-- DROP TABLE test; diff --git a/parser/testdata/03223_analyzer_with_cube_fuzz/ast.json b/parser/testdata/03223_analyzer_with_cube_fuzz/ast.json new file mode 100644 index 000000000..6ff7a810b --- /dev/null +++ b/parser/testdata/03223_analyzer_with_cube_fuzz/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001415911, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03223_analyzer_with_cube_fuzz/metadata.json b/parser/testdata/03223_analyzer_with_cube_fuzz/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03223_analyzer_with_cube_fuzz/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03223_analyzer_with_cube_fuzz/query.sql b/parser/testdata/03223_analyzer_with_cube_fuzz/query.sql new file mode 100644 index 000000000..e323ada79 --- /dev/null +++ b/parser/testdata/03223_analyzer_with_cube_fuzz/query.sql @@ -0,0 +1,29 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE TABLE t1 (`a` Int64, `b` Int64) ENGINE = MergeTree ORDER BY a; +CREATE TABLE t2 (`key` Int32, `val` Int64) ENGINE = MergeTree ORDER BY key; +insert into t1 Select number, number from numbers(100000); +insert into t2 Select number, number from numbers(100000); + + +SELECT + 1 * 1000.0001, + (count(1.) = -2147483647) AND (count(a) = 1.1920928955078125e-7) AND (count(val) = 1048577) AND (sum(val) = ((NULL * 1048576) / -9223372036854775807)) AND (sum(a) = ((9223372036854775806 * 10000000000.) / 1048575)) +FROM +( + SELECT + a, + val + FROM t1 + FULL OUTER JOIN t2 ON (t1.a = t2.key) OR (1 * inf) OR (t1.b = t2.key) +) +GROUP BY '65537' + WITH CUBE +FORMAT Null +SETTINGS max_block_size = 100, join_use_nulls = 1, max_execution_time = 1., max_result_rows = 0, max_result_bytes = 0; -- { serverError TIMEOUT_EXCEEDED, QUERY_WAS_CANCELLED } + +DROP TABLE t1; +DROP TABLE t2; diff --git a/parser/testdata/03223_interval_data_type_comparison/ast.json b/parser/testdata/03223_interval_data_type_comparison/ast.json new file mode 100644 index 000000000..b328150e1 --- /dev/null +++ b/parser/testdata/03223_interval_data_type_comparison/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'Comparing nanoseconds'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.00138534, + "rows_read": 5, + "bytes_read": 192 + } +} diff --git a/parser/testdata/03223_interval_data_type_comparison/metadata.json b/parser/testdata/03223_interval_data_type_comparison/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03223_interval_data_type_comparison/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03223_interval_data_type_comparison/query.sql b/parser/testdata/03223_interval_data_type_comparison/query.sql new file mode 100644 index 000000000..77b6e2fa3 --- /dev/null +++ b/parser/testdata/03223_interval_data_type_comparison/query.sql @@ -0,0 +1,142 @@ +SELECT('Comparing nanoseconds'); +SELECT INTERVAL 500 NANOSECOND > INTERVAL 300 NANOSECOND; +SELECT INTERVAL 1000 NANOSECOND < INTERVAL 1500 NANOSECOND; +SELECT INTERVAL 2000 NANOSECOND = INTERVAL 2000 NANOSECOND; +SELECT INTERVAL 1000 NANOSECOND >= INTERVAL 1 MICROSECOND; +SELECT INTERVAL 1000001 NANOSECOND > INTERVAL 1 MILLISECOND; +SELECT INTERVAL 2000000001 NANOSECOND > INTERVAL 2 SECOND; +SELECT INTERVAL 60000000000 NANOSECOND = INTERVAL 1 MINUTE; +SELECT INTERVAL 7199999999999 NANOSECOND < INTERVAL 2 HOUR; +SELECT INTERVAL 1 NANOSECOND < INTERVAL 2 DAY; +SELECT INTERVAL 5 NANOSECOND < INTERVAL 1 WEEK; + +SELECT INTERVAL 500 NANOSECOND < INTERVAL 300 NANOSECOND; +SELECT INTERVAL 1000 NANOSECOND > INTERVAL 1500 NANOSECOND; +SELECT INTERVAL 2000 NANOSECOND != INTERVAL 2000 NANOSECOND; +SELECT INTERVAL 1000 NANOSECOND < INTERVAL 1 MICROSECOND; +SELECT INTERVAL 1000001 NANOSECOND < INTERVAL 1 MILLISECOND; +SELECT INTERVAL 2000000001 NANOSECOND < INTERVAL 2 SECOND; +SELECT INTERVAL 60000000000 NANOSECOND != INTERVAL 1 MINUTE; +SELECT INTERVAL 7199999999999 NANOSECOND > INTERVAL 2 HOUR; +SELECT INTERVAL 1 NANOSECOND > INTERVAL 2 DAY; +SELECT INTERVAL 5 NANOSECOND > INTERVAL 1 WEEK; + +SELECT INTERVAL 1 NANOSECOND < INTERVAL 2 MONTH; -- { serverError NO_COMMON_TYPE } + +SELECT('Comparing microseconds'); +SELECT INTERVAL 1 MICROSECOND < INTERVAL 999 MICROSECOND; +SELECT INTERVAL 1001 MICROSECOND > INTERVAL 1 MILLISECOND; +SELECT INTERVAL 2000000 MICROSECOND = INTERVAL 2 SECOND; +SELECT INTERVAL 179999999 MICROSECOND < INTERVAL 3 MINUTE; +SELECT INTERVAL 3600000000 MICROSECOND = INTERVAL 1 HOUR; +SELECT INTERVAL 36000000000000 MICROSECOND > INTERVAL 2 DAY; +SELECT INTERVAL 1209600000000 MICROSECOND = INTERVAL 2 WEEK; + +SELECT INTERVAL 1 MICROSECOND > INTERVAL 999 MICROSECOND; +SELECT INTERVAL 1001 MICROSECOND < INTERVAL 1 MILLISECOND; +SELECT INTERVAL 2000000 MICROSECOND != INTERVAL 2 SECOND; +SELECT INTERVAL 179999999 MICROSECOND > INTERVAL 3 MINUTE; +SELECT INTERVAL 3600000000 MICROSECOND != INTERVAL 1 HOUR; +SELECT INTERVAL 36000000000000 MICROSECOND < INTERVAL 2 DAY; +SELECT INTERVAL 1209600000000 MICROSECOND != INTERVAL 2 WEEK; + +SELECT INTERVAL 36000000000000 MICROSECOND < INTERVAL 1 QUARTER; -- { serverError NO_COMMON_TYPE } + +SELECT('Comparing milliseconds'); +SELECT INTERVAL 2000 MILLISECOND > INTERVAL 2 MILLISECOND; +SELECT INTERVAL 2000 MILLISECOND = INTERVAL 2 SECOND; +SELECT INTERVAL 170000 MILLISECOND < INTERVAL 3 MINUTE; +SELECT INTERVAL 144000001 MILLISECOND > INTERVAL 40 HOUR; +SELECT INTERVAL 1728000000 MILLISECOND = INTERVAL 20 DAY; +SELECT INTERVAL 1198599999 MILLISECOND < INTERVAL 2 WEEK; + +SELECT INTERVAL 2000 MILLISECOND < INTERVAL 2 MILLISECOND; +SELECT INTERVAL 2000 MILLISECOND != INTERVAL 2 SECOND; +SELECT INTERVAL 170000 MILLISECOND > INTERVAL 3 MINUTE; +SELECT INTERVAL 144000001 MILLISECOND < INTERVAL 40 HOUR; +SELECT INTERVAL 1728000000 MILLISECOND != INTERVAL 20 DAY; +SELECT INTERVAL 1198599999 MILLISECOND > INTERVAL 2 WEEK; + +SELECT INTERVAL 36000000000000 MILLISECOND < INTERVAL 1 YEAR; -- { serverError NO_COMMON_TYPE } + +SELECT('Comparing seconds'); +SELECT INTERVAL 120 SECOND > INTERVAL 2 SECOND; +SELECT INTERVAL 120 SECOND = INTERVAL 2 MINUTE; +SELECT INTERVAL 1 SECOND < INTERVAL 2 HOUR; +SELECT INTERVAL 86401 SECOND >= INTERVAL 1 DAY; +SELECT INTERVAL 1209600 SECOND = INTERVAL 2 WEEK; + +SELECT INTERVAL 120 SECOND < INTERVAL 2 SECOND; +SELECT INTERVAL 120 SECOND != INTERVAL 2 MINUTE; +SELECT INTERVAL 1 SECOND > INTERVAL 2 HOUR; +SELECT INTERVAL 86401 SECOND < INTERVAL 1 DAY; +SELECT INTERVAL 1209600 SECOND != INTERVAL 2 WEEK; + +SELECT INTERVAL 36000000000000 SECOND < INTERVAL 1 MONTH; -- { serverError NO_COMMON_TYPE } + +SELECT('Comparing minutes'); +SELECT INTERVAL 1 MINUTE < INTERVAL 59 MINUTE; +SELECT INTERVAL 1 MINUTE < INTERVAL 59 HOUR; +SELECT INTERVAL 1440 MINUTE = INTERVAL 1 DAY; +SELECT INTERVAL 30241 MINUTE > INTERVAL 3 WEEK; + +SELECT INTERVAL 1 MINUTE > INTERVAL 59 MINUTE; +SELECT INTERVAL 1 MINUTE > INTERVAL 59 HOUR; +SELECT INTERVAL 1440 MINUTE != INTERVAL 1 DAY; +SELECT INTERVAL 30241 MINUTE < INTERVAL 3 WEEK; + +SELECT INTERVAL 2 MINUTE = INTERVAL 120 QUARTER; -- { serverError NO_COMMON_TYPE } + +SELECT('Comparing hours'); +SELECT INTERVAL 48 HOUR > INTERVAL 2 HOUR; +SELECT INTERVAL 48 HOUR >= INTERVAL 2 DAY; +SELECT INTERVAL 672 HOUR = INTERVAL 4 WEEK; + +SELECT INTERVAL 48 HOUR < INTERVAL 2 HOUR; +SELECT INTERVAL 48 HOUR < INTERVAL 2 DAY; +SELECT INTERVAL 672 HOUR != INTERVAL 4 WEEK; + +SELECT INTERVAL 2 HOUR < INTERVAL 1 YEAR; -- { serverError NO_COMMON_TYPE } + +SELECT('Comparing days'); +SELECT INTERVAL 1 DAY < INTERVAL 23 DAY; +SELECT INTERVAL 25 DAY > INTERVAL 3 WEEK; + +SELECT INTERVAL 1 DAY > INTERVAL 23 DAY; +SELECT INTERVAL 25 DAY < INTERVAL 3 WEEK; + +SELECT INTERVAL 2 DAY = INTERVAL 48 MONTH; -- { serverError NO_COMMON_TYPE } + +SELECT('Comparing weeks'); +SELECT INTERVAL 1 WEEK < INTERVAL 6 WEEK; + +SELECT INTERVAL 1 WEEK > INTERVAL 6 WEEK; + +SELECT INTERVAL 124 WEEK > INTERVAL 8 QUARTER; -- { serverError NO_COMMON_TYPE } + +SELECT('Comparing months'); +SELECT INTERVAL 1 MONTH < INTERVAL 3 MONTH; +SELECT INTERVAL 124 MONTH > INTERVAL 5 QUARTER; +SELECT INTERVAL 36 MONTH = INTERVAL 3 YEAR; + +SELECT INTERVAL 1 MONTH > INTERVAL 3 MONTH; +SELECT INTERVAL 124 MONTH < INTERVAL 5 QUARTER; +SELECT INTERVAL 36 MONTH != INTERVAL 3 YEAR; + +SELECT INTERVAL 6 MONTH = INTERVAL 26 MICROSECOND; -- { serverError NO_COMMON_TYPE } + +SELECT('Comparing quarters'); +SELECT INTERVAL 5 QUARTER > INTERVAL 4 QUARTER; +SELECT INTERVAL 20 QUARTER = INTERVAL 5 YEAR; + +SELECT INTERVAL 5 QUARTER < INTERVAL 4 QUARTER; +SELECT INTERVAL 20 QUARTER != INTERVAL 5 YEAR; + +SELECT INTERVAL 2 QUARTER = INTERVAL 6 NANOSECOND; -- { serverError NO_COMMON_TYPE } + +SELECT('Comparing years'); +SELECT INTERVAL 1 YEAR < INTERVAL 3 YEAR; + +SELECT INTERVAL 1 YEAR > INTERVAL 3 YEAR; + +SELECT INTERVAL 2 YEAR = INTERVAL 8 SECOND; -- { serverError NO_COMMON_TYPE } \ No newline at end of file diff --git a/parser/testdata/03223_nested_json_in_shared_data_merges/ast.json b/parser/testdata/03223_nested_json_in_shared_data_merges/ast.json new file mode 100644 index 000000000..1ec334020 --- /dev/null +++ b/parser/testdata/03223_nested_json_in_shared_data_merges/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.002154418, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03223_nested_json_in_shared_data_merges/metadata.json b/parser/testdata/03223_nested_json_in_shared_data_merges/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03223_nested_json_in_shared_data_merges/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03223_nested_json_in_shared_data_merges/query.sql b/parser/testdata/03223_nested_json_in_shared_data_merges/query.sql new file mode 100644 index 000000000..900a49514 --- /dev/null +++ b/parser/testdata/03223_nested_json_in_shared_data_merges/query.sql @@ -0,0 +1,26 @@ +SET enable_json_type = 1; + +drop table if exists test; +create table test (json JSON(max_dynamic_paths=8)) engine=MergeTree order by tuple() settings min_bytes_for_wide_part=1, min_rows_for_wide_part=1; +insert into test select materialize('{"a" : [{"b" : 42}]}')::JSON(max_dynamic_paths=8) from numbers(5); +insert into test select materialize('{"aa1" : 42, "aa2" : 42, "aa3" : 42, "aa4" : 42, "aa5" : 42, "aa6" : 42, "aa7" : 42, "aa8" : 42, "a" : [{"c" : 42}]}') from numbers(5); +optimize table test final; + +select 'All paths'; +select JSONAllPaths(arrayJoin(json.a[])) from test; +select 'Dynamic paths'; +select JSONDynamicPaths(arrayJoin(json.a[])) from test; +select 'Shared data paths'; +select JSONSharedDataPaths(arrayJoin(json.a[])) from test; + +insert into test select materialize('{"a" : [{"b" : 42}]}')::JSON(max_dynamic_paths=8) from numbers(5); +optimize table test final; + +select 'All paths'; +select JSONAllPaths(arrayJoin(json.a[])) from test; +select 'Dynamic paths'; +select JSONDynamicPaths(arrayJoin(json.a[])) from test; +select 'Shared data paths'; +select JSONSharedDataPaths(arrayJoin(json.a[])) from test; + +drop table test; diff --git a/parser/testdata/03223_system_tables_set_not_ready/ast.json b/parser/testdata/03223_system_tables_set_not_ready/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03223_system_tables_set_not_ready/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03223_system_tables_set_not_ready/metadata.json b/parser/testdata/03223_system_tables_set_not_ready/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03223_system_tables_set_not_ready/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03223_system_tables_set_not_ready/query.sql b/parser/testdata/03223_system_tables_set_not_ready/query.sql new file mode 100644 index 000000000..f90286e70 --- /dev/null +++ b/parser/testdata/03223_system_tables_set_not_ready/query.sql @@ -0,0 +1,31 @@ +-- Tags: no-fasttest, no-shared-merge-tree, use-rocksdb +-- Tag no-fasttest -- due to EmbeddedRocksDB +-- Tag no-shared-merge-tree -- due to system.replication_queue + +drop table if exists null; +drop table if exists dist; +create table null as system.one engine=Null; +create table dist as null engine=Distributed(test_cluster_two_shards, currentDatabase(), 'null', rand()); +insert into dist settings prefer_localhost_replica=0 values (1); +select 'system.distribution_queue', count() from system.distribution_queue where exists(select 1) and database = currentDatabase(); + +drop table if exists rocksdb; +create table rocksdb (key Int) engine=EmbeddedRocksDB() primary key key; +insert into rocksdb values (1); +select 'system.rocksdb', count()>0 from system.rocksdb where exists(select 1) and database = currentDatabase(); + +select 'system.databases', count() from system.databases where exists(select 1) and database = currentDatabase(); + +drop table if exists mt; +create table mt (key Int) engine=MergeTree() order by key; +alter table mt delete where 1; +select 'system.mutations', count() from system.mutations where exists(select 1) and database = currentDatabase(); + +drop table if exists rep1; +drop table if exists rep2; +create table rep1 (key Int) engine=ReplicatedMergeTree('/{database}/rep', '{table}') order by key; +create table rep2 (key Int) engine=ReplicatedMergeTree('/{database}/rep', '{table}') order by key; +system stop fetches rep2; +insert into rep1 values (1); +system sync replica rep2 pull; +select 'system.replication_queue', count()>0 from system.replication_queue where exists(select 1) and database = currentDatabase(); diff --git a/parser/testdata/03224_arrayUnion/ast.json b/parser/testdata/03224_arrayUnion/ast.json new file mode 100644 index 000000000..7f49bdf85 --- /dev/null +++ b/parser/testdata/03224_arrayUnion/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery array_union (children 1)" + }, + { + "explain": " Identifier array_union" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001154848, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/03224_arrayUnion/metadata.json b/parser/testdata/03224_arrayUnion/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03224_arrayUnion/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03224_arrayUnion/query.sql b/parser/testdata/03224_arrayUnion/query.sql new file mode 100644 index 000000000..5fd09b146 --- /dev/null +++ b/parser/testdata/03224_arrayUnion/query.sql @@ -0,0 +1,87 @@ +drop table if exists array_union; + +create table array_union (date Date, arr Array(UInt8)) engine=MergeTree partition by date order by date; + +insert into array_union values ('2019-01-01', [1,2,3]); +insert into array_union values ('2019-01-01', [1,2]); +insert into array_union values ('2019-01-01', [1]); +insert into array_union values ('2019-01-01', []); + + +select arraySort(arrayUnion(arr, [1,2])) from array_union order by arr; +select '-------'; +select arraySort(arrayUnion(arr, [])) from array_union order by arr; +select '-------'; +select arraySort(arrayUnion([], arr)) from array_union order by arr; +select '-------'; +select arraySort(arrayUnion([1,2], arr)) from array_union order by arr; +select '-------'; +select arraySort(arrayUnion([1,2], [1,2,3,4])) from array_union order by arr; +select '-------'; +select arraySort(arrayUnion([], [])) from array_union order by arr; + +drop table if exists array_union; + +select '-------'; +select arraySort(arrayUnion([-100], [156])); +select '-------'; +select arraySort(arrayUnion([1], [-257, -100])); +select '-------'; +select arraySort(arrayUnion(['hi'], ['hello', 'hi'], [])); +select '-------'; +SELECT arraySort(arrayUnion([1, 2, NULL], [1, 3, NULL], [2, 3, NULL])); +select '-------'; +SELECT arraySort(arrayUnion([NULL, NULL, NULL, 1], [1, NULL, NULL], [1, 2, 3, NULL])); +select '-------'; +SELECT arraySort(arrayUnion([1, 1, 1, 2, 3], [2, 2, 4], [5, 10, 20])); +select '-------'; +SELECT arraySort(arrayUnion([1, 2], [1, 3], [])); +select '-------'; +-- example from docs +SELECT + arrayUnion([-2, 1], [10, 1], [-2], []) as num_example, + arrayUnion(['hi'], [], ['hello', 'hi']) as str_example, + arrayUnion([1, 3, NULL], [2, 3, NULL]) as null_example; +select '-------'; +--mix of types +SELECT arrayUnion([1], [-2], [1.1, 'hi'], [NULL, 'hello', []]); -- {serverError NO_COMMON_TYPE} +select '-------'; +SELECT arrayUnion([1]); +SELECT arrayUnion(); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +select '-------'; +--bigger arrays +SELECT arraySort(arrayUnion(range(1, 256), range(2, 257))); +SELECT length(arrayUnion(range(1, 100000), range(9999, 200000))); +select '-------'; +--bigger number of arguments +SELECT arraySort(arrayUnion([1, 2], [1, 3], [1, 4], [1, 5], [1, 6], [1, 7], [1, 8], [1, 9], [1, 10], [1, 11], [1, 12], [1, 13], [1, 14], [1, 15], [1, 16], [1, 17], [1, 18], [1, 19])); + +-- Table with batch inserts +DROP TABLE IF EXISTS test_array_union; +CREATE TABLE test_array_union +( + `id` Int8, + `properties` Array(String), +) +ENGINE = MergeTree +ORDER BY id +SETTINGS index_granularity = 8192; + +INSERT INTO test_array_union +VALUES +(1, ['1']), +(2, ['2']), +(3, ['3']), +(4, ['4']), +(5, ['5']), +(6, ['6']), +(7, ['7']), +(8, ['8']), +(9, ['9']), +(10, ['10']); + +SELECT + ta.id AS id, + ta.properties AS properties, + arrayUnion(ta.properties) AS propertiesUnion +FROM test_array_union ta; diff --git a/parser/testdata/03224_invalid_alter/ast.json b/parser/testdata/03224_invalid_alter/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03224_invalid_alter/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03224_invalid_alter/metadata.json b/parser/testdata/03224_invalid_alter/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03224_invalid_alter/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03224_invalid_alter/query.sql b/parser/testdata/03224_invalid_alter/query.sql new file mode 100644 index 000000000..8f9c1d30b --- /dev/null +++ b/parser/testdata/03224_invalid_alter/query.sql @@ -0,0 +1,84 @@ +-- Tags: no-replicated-database +-- no-replicated-database: It messes up the output and this test explicitly checks the replicated database + +CREATE TABLE test +( + str String, + column_with_codec String CODEC(ZSTD), + column_with_alias String MATERIALIZED concat(str, 'a' AS a), +) +ENGINE = MergeTree() +ORDER BY tuple(); + +-- Cannot have a different expression with the same alias +ALTER TABLE test ADD COLUMN invalid_column String MATERIALIZED concat(str, 'b' AS a); -- { serverError MULTIPLE_EXPRESSIONS_FOR_ALIAS } +ALTER TABLE test ADD COLUMN invalid_column String DEFAULT concat(str, 'b' AS a); -- { serverError MULTIPLE_EXPRESSIONS_FOR_ALIAS } +-- Cannot specify codec for column type ALIAS +ALTER TABLE test MODIFY COLUMN column_with_codec String ALIAS str; -- { serverError BAD_ARGUMENTS } +-- alias is defined exactly the same +ALTER TABLE test ADD COLUMN valid_column_1 String DEFAULT concat(str, 'a' AS a); +-- different alias +ALTER TABLE test ADD COLUMN valid_column_2 String MATERIALIZED concat(str, 'c' AS c); +-- do one insert to make sure we can insert into the table +INSERT INTO test(str, column_with_codec) VALUES ('test', 'test2'); +SELECT str, column_with_alias, valid_column_1, valid_column_2 FROM test; +DROP TABLE test; + +CREATE TABLE test2 +( + str String, + column_with_codec String CODEC(ZSTD), + column_with_alias String MATERIALIZED concat(str, 'a' AS a), +) +ENGINE = ReplicatedMergeTree('/clickhouse/03224_invalid_alter/{database}/{table}', 'r1') +ORDER BY tuple(); + +ALTER TABLE test2 ADD COLUMN invalid_column String MATERIALIZED concat(str, 'b' AS a); -- { serverError MULTIPLE_EXPRESSIONS_FOR_ALIAS } +ALTER TABLE test2 ADD COLUMN invalid_column String DEFAULT concat(str, 'b' AS a); -- { serverError MULTIPLE_EXPRESSIONS_FOR_ALIAS } +ALTER TABLE test2 MODIFY COLUMN column_with_codec String ALIAS str; -- { serverError BAD_ARGUMENTS } +ALTER TABLE test2 ADD COLUMN valid_column_1 String DEFAULT concat(str, 'a' AS a); +ALTER TABLE test2 ADD COLUMN valid_column_2 String MATERIALIZED concat(str, 'c' AS c); +INSERT INTO test2(str, column_with_codec) VALUES ('test2', 'test22'); +SELECT str, column_with_alias, valid_column_1, valid_column_2 FROM test2; + +DROP DATABASE {CLICKHOUSE_DATABASE:Identifier}; + +CREATE DATABASE {CLICKHOUSE_DATABASE:Identifier} ON CLUSTER test_shard_localhost ENGINE = Atomic; + +CREATE TABLE test3 ON CLUSTER test_shard_localhost +( + str String, + column_with_codec String CODEC(ZSTD), + column_with_alias String MATERIALIZED concat(str, 'a' AS a), +) +ENGINE = ReplicatedMergeTree('/clickhouse/03224_invalid_alter/{database}_atomic/{table}', 'r1') +ORDER BY tuple(); + +ALTER TABLE test3 ON CLUSTER test_shard_localhost ADD COLUMN invalid_column String MATERIALIZED concat(str, 'b' AS a) FORMAT Null SETTINGS distributed_ddl_output_mode='throw'; -- { serverError MULTIPLE_EXPRESSIONS_FOR_ALIAS } +ALTER TABLE test3 ON CLUSTER test_shard_localhost ADD COLUMN invalid_column String DEFAULT concat(str, 'b' AS a) FORMAT Null SETTINGS distributed_ddl_output_mode='throw'; -- { serverError MULTIPLE_EXPRESSIONS_FOR_ALIAS } +ALTER TABLE test3 ON CLUSTER test_shard_localhost MODIFY COLUMN column_with_codec String ALIAS str FORMAT Null SETTINGS distributed_ddl_output_mode='throw'; -- { serverError BAD_ARGUMENTS } +ALTER TABLE test3 ON CLUSTER test_shard_localhost ADD COLUMN valid_column_1 String DEFAULT concat(str, 'a' AS a); +ALTER TABLE test3 ON CLUSTER test_shard_localhost ADD COLUMN valid_column_2 String MATERIALIZED concat(str, 'c' AS c); +INSERT INTO test3(str, column_with_codec) VALUES ('test3', 'test32'); +SELECT str, column_with_alias, valid_column_1, valid_column_2 FROM test3; + +DROP DATABASE {CLICKHOUSE_DATABASE:Identifier}; +CREATE DATABASE {CLICKHOUSE_DATABASE:Identifier} ENGINE = Replicated('/clickhouse/03224_invalid_alter/{database}_replicated', 'shard1', 'replica1') FORMAT Null; + +CREATE TABLE test4 +( + str String, + column_with_codec String CODEC(ZSTD), + column_with_alias String MATERIALIZED concat(str, 'a' AS a), +) +ENGINE = ReplicatedMergeTree() +ORDER BY tuple() +FORMAT Null; + +ALTER TABLE test4 ADD COLUMN invalid_column String MATERIALIZED concat(str, 'b' AS a) FORMAT Null SETTINGS distributed_ddl_output_mode='throw'; -- { serverError MULTIPLE_EXPRESSIONS_FOR_ALIAS } +ALTER TABLE test4 ADD COLUMN invalid_column String DEFAULT concat(str, 'b' AS a) FORMAT Null SETTINGS distributed_ddl_output_mode='throw'; -- { serverError MULTIPLE_EXPRESSIONS_FOR_ALIAS } +ALTER TABLE test4 MODIFY COLUMN column_with_codec String ALIAS str FORMAT Null SETTINGS distributed_ddl_output_mode='throw'; -- { serverError BAD_ARGUMENTS } +ALTER TABLE test4 ADD COLUMN valid_column_1 String DEFAULT concat(str, 'a' AS a) FORMAT Null SETTINGS distributed_ddl_output_mode='throw'; +ALTER TABLE test4 ADD COLUMN valid_column_2 String MATERIALIZED concat(str, 'c' AS c) FORMAT Null SETTINGS distributed_ddl_output_mode='throw'; +INSERT INTO test4(str, column_with_codec) VALUES ('test4', 'test42'); +SELECT str, column_with_alias, valid_column_1, valid_column_2 FROM test4; diff --git a/parser/testdata/03224_json_merges_new_type_in_shared_data/ast.json b/parser/testdata/03224_json_merges_new_type_in_shared_data/ast.json new file mode 100644 index 000000000..39c3033cc --- /dev/null +++ b/parser/testdata/03224_json_merges_new_type_in_shared_data/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001000943, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03224_json_merges_new_type_in_shared_data/metadata.json b/parser/testdata/03224_json_merges_new_type_in_shared_data/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03224_json_merges_new_type_in_shared_data/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03224_json_merges_new_type_in_shared_data/query.sql b/parser/testdata/03224_json_merges_new_type_in_shared_data/query.sql new file mode 100644 index 000000000..945e7c163 --- /dev/null +++ b/parser/testdata/03224_json_merges_new_type_in_shared_data/query.sql @@ -0,0 +1,12 @@ +SET enable_json_type = 1; + +drop table if exists test; +create table test (json JSON(max_dynamic_paths=1)) engine=MergeTree order by tuple() settings min_rows_for_wide_part = 1, min_bytes_for_wide_part = 1; +insert into test select '{"b" : 42}' from numbers(5); +insert into test select '{"a" : 42, "b" : [1, 2, 3]}' from numbers(5); +optimize table test final; +select distinct dynamicType(json.b) as type, isDynamicElementInSharedData(json.b) from test order by type; +insert into test select '{"b" : 42}' from numbers(5); +optimize table test final; +select distinct dynamicType(json.b) as type, isDynamicElementInSharedData(json.b) from test order by type; +drop table test; diff --git a/parser/testdata/03224_nested_json_merges_new_type_in_shared_data/ast.json b/parser/testdata/03224_nested_json_merges_new_type_in_shared_data/ast.json new file mode 100644 index 000000000..498bced2a --- /dev/null +++ b/parser/testdata/03224_nested_json_merges_new_type_in_shared_data/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001231804, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03224_nested_json_merges_new_type_in_shared_data/metadata.json b/parser/testdata/03224_nested_json_merges_new_type_in_shared_data/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03224_nested_json_merges_new_type_in_shared_data/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03224_nested_json_merges_new_type_in_shared_data/query.sql b/parser/testdata/03224_nested_json_merges_new_type_in_shared_data/query.sql new file mode 100644 index 000000000..144efab27 --- /dev/null +++ b/parser/testdata/03224_nested_json_merges_new_type_in_shared_data/query.sql @@ -0,0 +1,25 @@ +SET enable_json_type = 1; + +drop table if exists test; +create table test (json JSON(max_dynamic_paths=8)) engine=MergeTree order by tuple() settings min_rows_for_wide_part = 1, min_bytes_for_wide_part = 1; +insert into test select materialize('{"a" : 42}')::JSON(max_dynamic_paths=8) from numbers(5); +insert into test select materialize('{"a1" : 42, "a2" : 42, "a3" : 42, "a4" : 42, "a5" : 42, "a6" : 42, "a7" : 42, "a8" : 42, "a" : [{"c" : 42}]}')::JSON(max_dynamic_paths=8) from numbers(5); +optimize table test final; +select distinct dynamicType(json.a) as type, isDynamicElementInSharedData(json.a) from test order by type; +insert into test select materialize('{"a1" : 42, "a2" : 42, "a3" : 42, "a4" : 42, "a5" : 42, "a6" : 42, "a7" : 42, "a8" : 42, "a" : [{"d" : 42}]}')::JSON(max_dynamic_paths=8) from numbers(5); +optimize table test final; +select distinct dynamicType(json.a) as type, isDynamicElementInSharedData(json.a) from test order by type; +select distinct JSONSharedDataPaths(arrayJoin(json.a[])) as path from test order by path; +insert into test select materialize('{"a" : 42}')::JSON(max_dynamic_paths=8) from numbers(5); +optimize table test final; +select distinct dynamicType(json.a) as type, isDynamicElementInSharedData(json.a) from test order by type; +select distinct JSONDynamicPaths(arrayJoin(json.a[])) as path from test order by path; +select distinct dynamicType(arrayJoin(json.a[].c)) as type, isDynamicElementInSharedData(arrayJoin(json.a[].c)) from test order by type; +select distinct dynamicType(arrayJoin(json.a[].d)) as type, isDynamicElementInSharedData(arrayJoin(json.a[].d)) from test order by type; +insert into test select materialize('{"a" : 42}')::JSON(max_dynamic_paths=8) from numbers(5); +optimize table test final; +select distinct dynamicType(json.a) as type, isDynamicElementInSharedData(json.a) from test order by type; +select distinct JSONDynamicPaths(arrayJoin(json.a[])) as path from test order by path; +select distinct dynamicType(arrayJoin(json.a[].c)) as type, isDynamicElementInSharedData(arrayJoin(json.a[].c)) from test order by type; +select distinct dynamicType(arrayJoin(json.a[].d)) as type, isDynamicElementInSharedData(arrayJoin(json.a[].d)) from test order by type; +drop table test; diff --git a/parser/testdata/03224_trim_empty_string/ast.json b/parser/testdata/03224_trim_empty_string/ast.json new file mode 100644 index 000000000..17808d7ce --- /dev/null +++ b/parser/testdata/03224_trim_empty_string/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'foo'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001323757, + "rows_read": 5, + "bytes_read": 174 + } +} diff --git a/parser/testdata/03224_trim_empty_string/metadata.json b/parser/testdata/03224_trim_empty_string/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03224_trim_empty_string/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03224_trim_empty_string/query.sql b/parser/testdata/03224_trim_empty_string/query.sql new file mode 100644 index 000000000..444bd1d7c --- /dev/null +++ b/parser/testdata/03224_trim_empty_string/query.sql @@ -0,0 +1,7 @@ +SELECT trim(LEADING '' FROM 'foo'); +SELECT trim(TRAILING '' FROM 'foo'); +SELECT trim(BOTH '' FROM 'foo'); + +SELECT trim(LEADING '' FROM ' foo ') FORMAT CSV; +SELECT trim(TRAILING '' FROM ' foo ') FORMAT CSV; +SELECT trim(BOTH '' FROM ' foo ') FORMAT CSV; diff --git a/parser/testdata/03224_tuple_element_identifier/ast.json b/parser/testdata/03224_tuple_element_identifier/ast.json new file mode 100644 index 000000000..bd3b78fa2 --- /dev/null +++ b/parser/testdata/03224_tuple_element_identifier/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001204079, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03224_tuple_element_identifier/metadata.json b/parser/testdata/03224_tuple_element_identifier/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03224_tuple_element_identifier/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03224_tuple_element_identifier/query.sql b/parser/testdata/03224_tuple_element_identifier/query.sql new file mode 100644 index 000000000..6c47e69a7 --- /dev/null +++ b/parser/testdata/03224_tuple_element_identifier/query.sql @@ -0,0 +1,14 @@ +SET enable_analyzer = 1; +SET enable_named_columns_in_function_tuple=1; + +SELECT JSONExtract('{"hello":[{"world":"wtf"}]}', 'Tuple(hello Array(Tuple(world String)))') AS x, + x.hello, x.hello[1].world; + +SELECT JSONExtract('{"hello":[{" wow ":"wtf"}]}', 'Tuple(hello Array(Tuple(` wow ` String)))') AS x, + x.hello, x.hello[1].` wow `; + +SELECT JSONExtract('{"hello":[{" wow ":"wtf"}]}', 'Tuple(hello Array(Tuple(` wow ` String)))') AS x, + x.hello, x.hello[1].`wow`; -- { serverError NOT_FOUND_COLUMN_IN_BLOCK } + +SELECT ('Hello' AS world,).world; +SELECT ('Hello' AS world,) AS t, t.world, (t).world, identity(t).world; diff --git a/parser/testdata/03225_const_prewhere_non_ataptive/ast.json b/parser/testdata/03225_const_prewhere_non_ataptive/ast.json new file mode 100644 index 000000000..f32f6b54d --- /dev/null +++ b/parser/testdata/03225_const_prewhere_non_ataptive/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_const_prewhere (children 1)" + }, + { + "explain": " Identifier t_const_prewhere" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001181275, + "rows_read": 2, + "bytes_read": 84 + } +} diff --git a/parser/testdata/03225_const_prewhere_non_ataptive/metadata.json b/parser/testdata/03225_const_prewhere_non_ataptive/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03225_const_prewhere_non_ataptive/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03225_const_prewhere_non_ataptive/query.sql b/parser/testdata/03225_const_prewhere_non_ataptive/query.sql new file mode 100644 index 000000000..66546177c --- /dev/null +++ b/parser/testdata/03225_const_prewhere_non_ataptive/query.sql @@ -0,0 +1,23 @@ +DROP TABLE IF EXISTS t_const_prewhere; + +CREATE TABLE t_const_prewhere (id Int16, row_ver UInt64) +ENGINE = MergeTree ORDER BY id +SETTINGS index_granularity_bytes = 0, index_granularity = 42, min_bytes_for_wide_part = 0, min_bytes_for_full_part_storage = 0; + +INSERT INTO t_const_prewhere FORMAT VALUES (1, 1); + +SELECT * FROM t_const_prewhere PREWHERE 1; +SELECT * FROM t_const_prewhere PREWHERE materialize(1); + +DROP TABLE IF EXISTS t_const_prewhere; + +CREATE TABLE t_const_prewhere (id Int16, row_ver UInt64) +ENGINE = MergeTree() ORDER BY id +SETTINGS index_granularity_bytes = '10M', index_granularity = 42; + +INSERT INTO t_const_prewhere FORMAT VALUES (1, 1); + +SELECT * FROM t_const_prewhere PREWHERE 1; +SELECT * FROM t_const_prewhere PREWHERE materialize(1); + +DROP TABLE IF EXISTS t_const_prewhere; diff --git a/parser/testdata/03227_distinct_dynamic_types_json_paths/ast.json b/parser/testdata/03227_distinct_dynamic_types_json_paths/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03227_distinct_dynamic_types_json_paths/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03227_distinct_dynamic_types_json_paths/metadata.json b/parser/testdata/03227_distinct_dynamic_types_json_paths/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03227_distinct_dynamic_types_json_paths/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03227_distinct_dynamic_types_json_paths/query.sql b/parser/testdata/03227_distinct_dynamic_types_json_paths/query.sql new file mode 100644 index 000000000..40d85824a --- /dev/null +++ b/parser/testdata/03227_distinct_dynamic_types_json_paths/query.sql @@ -0,0 +1,63 @@ +-- Tags: long + +set allow_experimental_dynamic_type = 1; +SET enable_json_type = 1; +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; +set max_block_size = 10000; + +drop table if exists test_json_dynamic_aggregate_functions; +create table test_json_dynamic_aggregate_functions (json JSON(a1 String, max_dynamic_paths=2, max_dynamic_types=2)) engine=Memory; +insert into test_json_dynamic_aggregate_functions select toJSONString(map('a' || number % 13, multiIf(number % 5 == 0, NULL, number % 5 == 1, number::UInt32, number % 5 == 2, 'str_' || number, number % 5 == 3, range(number % 5), toBool(number % 2)))) from numbers(100000); +select arrayJoin(distinctJSONPaths(json)) from test_json_dynamic_aggregate_functions; +select arrayJoin(distinctJSONPathsAndTypes(json)) from test_json_dynamic_aggregate_functions; +select arrayJoin(distinctDynamicTypes(json.a2)) from test_json_dynamic_aggregate_functions; +select arrayJoin(distinctDynamicTypes(json.a3)) from test_json_dynamic_aggregate_functions; +select arrayJoin(distinctDynamicTypes(json.a42)) from test_json_dynamic_aggregate_functions; + +select 'Filter'; +select arrayJoin(distinctJSONPaths(json)) from test_json_dynamic_aggregate_functions where dynamicType(json.a2) == 'String'; +select arrayJoin(distinctJSONPathsAndTypes(json)) from test_json_dynamic_aggregate_functions where dynamicType(json.a2) == 'String'; +select arrayJoin(distinctDynamicTypes(json.a2)) from test_json_dynamic_aggregate_functions where dynamicType(json.a2) == 'String'; + +select 'If'; +select arrayJoin(distinctJSONPathsIf(json, dynamicType(json.a2) == 'String')) from test_json_dynamic_aggregate_functions; +select arrayJoin(distinctJSONPathsAndTypesIf(json, dynamicType(json.a2) == 'String')) from test_json_dynamic_aggregate_functions; +select arrayJoin(distinctDynamicTypesIf(json.a2, dynamicType(json.a2) == 'String')) from test_json_dynamic_aggregate_functions; + +select 'Group by'; +select dynamicType(json.a2), distinctJSONPaths(json) from test_json_dynamic_aggregate_functions group by dynamicType(json.a2) order by dynamicType(json.a2); +select dynamicType(json.a2), distinctJSONPathsAndTypes(json) from test_json_dynamic_aggregate_functions group by dynamicType(json.a2) order by dynamicType(json.a2); +select dynamicType(json.a2), distinctDynamicTypes(json.a2) from test_json_dynamic_aggregate_functions group by dynamicType(json.a2) order by dynamicType(json.a2); + +select 'Remote'; +select arrayJoin(distinctJSONPaths(json)) from remote('127.0.0.{1,2,3}', currentDatabase(), test_json_dynamic_aggregate_functions); +select arrayJoin(distinctJSONPathsAndTypes(json)) from remote('127.0.0.{1,2,3}', currentDatabase(), test_json_dynamic_aggregate_functions); +select arrayJoin(distinctDynamicTypes(json.a2)) from remote('127.0.0.{1,2,3}', currentDatabase(), test_json_dynamic_aggregate_functions); + +select 'Remote filter'; +select arrayJoin(distinctJSONPaths(json)) from remote('127.0.0.{1,2,3}', currentDatabase(), test_json_dynamic_aggregate_functions) where dynamicType(json.a2) == 'String'; +select arrayJoin(distinctJSONPathsAndTypes(json)) from remote('127.0.0.{1,2,3}', currentDatabase(), test_json_dynamic_aggregate_functions) where dynamicType(json.a2) == 'String'; +select arrayJoin(distinctDynamicTypes(json.a2)) from remote('127.0.0.{1,2,3}', currentDatabase(), test_json_dynamic_aggregate_functions) where dynamicType(json.a2) == 'String'; + +select 'Remote if'; +select arrayJoin(distinctJSONPathsIf(json, dynamicType(json.a2) == 'String')) from remote('127.0.0.{1,2,3}', currentDatabase(), test_json_dynamic_aggregate_functions); +select arrayJoin(distinctJSONPathsAndTypesIf(json, dynamicType(json.a2) == 'String')) from remote('127.0.0.{1,2,3}', currentDatabase(), test_json_dynamic_aggregate_functions); +select arrayJoin(distinctDynamicTypesIf(json.a2, dynamicType(json.a2) == 'String')) from remote('127.0.0.{1,2,3}', currentDatabase(), test_json_dynamic_aggregate_functions); + +select 'Remote group by'; +select dynamicType(json.a2), distinctJSONPaths(json) from remote('127.0.0.{1,2,3}', currentDatabase(), test_json_dynamic_aggregate_functions) group by dynamicType(json.a2) order by dynamicType(json.a2); +select dynamicType(json.a2), distinctJSONPathsAndTypes(json) from remote('127.0.0.{1,2,3}', currentDatabase(), test_json_dynamic_aggregate_functions) group by dynamicType(json.a2) order by dynamicType(json.a2); +select dynamicType(json.a2), distinctDynamicTypes(json.a2) from remote('127.0.0.{1,2,3}', currentDatabase(), test_json_dynamic_aggregate_functions) group by dynamicType(json.a2) order by dynamicType(json.a2); + +select distinctJSONPaths() from test_json_dynamic_aggregate_functions; -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +select distinctJSONPaths(json, 42) from test_json_dynamic_aggregate_functions; -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +select distinctJSONPaths(42) from test_json_dynamic_aggregate_functions; -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +select distinctJSONPathsAndTypes() from test_json_dynamic_aggregate_functions; -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +select distinctJSONPathsAndTypes(json, 42) from test_json_dynamic_aggregate_functions; -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +select distinctJSONPathsAndTypes(42) from test_json_dynamic_aggregate_functions; -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +select distinctDynamicTypes() from test_json_dynamic_aggregate_functions; -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +select distinctDynamicTypes(json.a2, 42) from test_json_dynamic_aggregate_functions; -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +select distinctDynamicTypes(42) from test_json_dynamic_aggregate_functions; -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} + +drop table test_json_dynamic_aggregate_functions; diff --git a/parser/testdata/03227_dynamic_subcolumns_enumerate_streams/ast.json b/parser/testdata/03227_dynamic_subcolumns_enumerate_streams/ast.json new file mode 100644 index 000000000..ceedd2b59 --- /dev/null +++ b/parser/testdata/03227_dynamic_subcolumns_enumerate_streams/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000932204, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03227_dynamic_subcolumns_enumerate_streams/metadata.json b/parser/testdata/03227_dynamic_subcolumns_enumerate_streams/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03227_dynamic_subcolumns_enumerate_streams/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03227_dynamic_subcolumns_enumerate_streams/query.sql b/parser/testdata/03227_dynamic_subcolumns_enumerate_streams/query.sql new file mode 100644 index 000000000..a3b5e05b0 --- /dev/null +++ b/parser/testdata/03227_dynamic_subcolumns_enumerate_streams/query.sql @@ -0,0 +1,8 @@ +set enable_json_type=1; +drop table if exists test; +create table test (json JSON) engine=Memory; +insert into test select toJSONString(map('a', 'str_' || number)) from numbers(5); +select json.a.String from test; +select json.a.:String from test; +select json.a.UInt64 from test; +drop table test; diff --git a/parser/testdata/03227_implicit_select/ast.json b/parser/testdata/03227_implicit_select/ast.json new file mode 100644 index 000000000..1db69b6a0 --- /dev/null +++ b/parser/testdata/03227_implicit_select/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001222832, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03227_implicit_select/metadata.json b/parser/testdata/03227_implicit_select/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03227_implicit_select/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03227_implicit_select/query.sql b/parser/testdata/03227_implicit_select/query.sql new file mode 100644 index 000000000..9a79817f0 --- /dev/null +++ b/parser/testdata/03227_implicit_select/query.sql @@ -0,0 +1,4 @@ +SET implicit_select = 1; + +1 + 2; +upper('Hello'); diff --git a/parser/testdata/03227_json_invalid_regexp/ast.json b/parser/testdata/03227_json_invalid_regexp/ast.json new file mode 100644 index 000000000..9fde655e2 --- /dev/null +++ b/parser/testdata/03227_json_invalid_regexp/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000984191, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03227_json_invalid_regexp/metadata.json b/parser/testdata/03227_json_invalid_regexp/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03227_json_invalid_regexp/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03227_json_invalid_regexp/query.sql b/parser/testdata/03227_json_invalid_regexp/query.sql new file mode 100644 index 000000000..f304aafac --- /dev/null +++ b/parser/testdata/03227_json_invalid_regexp/query.sql @@ -0,0 +1,3 @@ +SET enable_json_type = 1; +create table test (json JSON(SKIP REGEXP '[]')) engine=Memory(); -- {serverError CANNOT_COMPILE_REGEXP} +create table test (json JSON(SKIP REGEXP '+')) engine=Memory(); -- {serverError CANNOT_COMPILE_REGEXP}; diff --git a/parser/testdata/03227_proper_parsing_of_cast_operator/ast.json b/parser/testdata/03227_proper_parsing_of_cast_operator/ast.json new file mode 100644 index 000000000..f8962932f --- /dev/null +++ b/parser/testdata/03227_proper_parsing_of_cast_operator/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '414243'" + }, + { + "explain": " Literal 'String'" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001135519, + "rows_read": 8, + "bytes_read": 287 + } +} diff --git a/parser/testdata/03227_proper_parsing_of_cast_operator/metadata.json b/parser/testdata/03227_proper_parsing_of_cast_operator/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03227_proper_parsing_of_cast_operator/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03227_proper_parsing_of_cast_operator/query.sql b/parser/testdata/03227_proper_parsing_of_cast_operator/query.sql new file mode 100644 index 000000000..0c2e7dc58 --- /dev/null +++ b/parser/testdata/03227_proper_parsing_of_cast_operator/query.sql @@ -0,0 +1,6 @@ +SELECT '414243'::String; +SELECT x'414243'::String; +SELECT b'01000001'::String; +SELECT '{"a": \'\x41\'}'::String; +SELECT '{"a": \'\x4\'}'::String; -- { clientError SYNTAX_ERROR } +SELECT '{"a": \'a\x4\'}'::String; -- { clientError SYNTAX_ERROR } diff --git a/parser/testdata/03227_test_sample_n/ast.json b/parser/testdata/03227_test_sample_n/ast.json new file mode 100644 index 000000000..ed6cf7a17 --- /dev/null +++ b/parser/testdata/03227_test_sample_n/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery table_name (children 1)" + }, + { + "explain": " Identifier table_name" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001417674, + "rows_read": 2, + "bytes_read": 73 + } +} diff --git a/parser/testdata/03227_test_sample_n/metadata.json b/parser/testdata/03227_test_sample_n/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03227_test_sample_n/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03227_test_sample_n/query.sql b/parser/testdata/03227_test_sample_n/query.sql new file mode 100644 index 000000000..d38bdd0a3 --- /dev/null +++ b/parser/testdata/03227_test_sample_n/query.sql @@ -0,0 +1,20 @@ +CREATE TABLE IF NOT EXISTS table_name +( +id UInt64 +) +ENGINE = MergeTree() +ORDER BY cityHash64(id) +SAMPLE BY cityHash64(id); + +INSERT INTO table_name SELECT rand() from system.numbers limit 10000; +INSERT INTO table_name SELECT rand() from system.numbers limit 10000; +INSERT INTO table_name SELECT rand() from system.numbers limit 10000; +INSERT INTO table_name SELECT rand() from system.numbers limit 10000; +INSERT INTO table_name SELECT rand() from system.numbers limit 10000; + +select count() from table_name; +SELECT count() < 50 * 5 FROM ( + SELECT * FROM table_name SAMPLE 50 +); + +DROP TABLE table_name; diff --git a/parser/testdata/03228_async_insert_query_params_bad_type/ast.json b/parser/testdata/03228_async_insert_query_params_bad_type/ast.json new file mode 100644 index 000000000..a66bc78a4 --- /dev/null +++ b/parser/testdata/03228_async_insert_query_params_bad_type/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_async_insert_params (children 1)" + }, + { + "explain": " Identifier t_async_insert_params" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001231251, + "rows_read": 2, + "bytes_read": 94 + } +} diff --git a/parser/testdata/03228_async_insert_query_params_bad_type/metadata.json b/parser/testdata/03228_async_insert_query_params_bad_type/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03228_async_insert_query_params_bad_type/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03228_async_insert_query_params_bad_type/query.sql b/parser/testdata/03228_async_insert_query_params_bad_type/query.sql new file mode 100644 index 000000000..359174245 --- /dev/null +++ b/parser/testdata/03228_async_insert_query_params_bad_type/query.sql @@ -0,0 +1,20 @@ +DROP TABLE IF EXISTS t_async_insert_params; + +CREATE TABLE t_async_insert_params (id UInt64) ENGINE = MergeTree ORDER BY tuple(); + +SET param_p1 = 'Hello'; + +SET async_insert = 1; +SET wait_for_async_insert = 1; + +INSERT INTO t_async_insert_params VALUES ({p1:UInt64}); -- { serverError BAD_QUERY_PARAMETER } +INSERT INTO t_async_insert_params VALUES ({p1:String}); -- { serverError TYPE_MISMATCH } + +ALTER TABLE t_async_insert_params MODIFY COLUMN id String; + +INSERT INTO t_async_insert_params VALUES ({p1:UInt64}); -- { serverError BAD_QUERY_PARAMETER } +INSERT INTO t_async_insert_params VALUES ({p1:String}); + +SELECT * FROM t_async_insert_params ORDER BY id; + +DROP TABLE t_async_insert_params; diff --git a/parser/testdata/03228_dynamic_serializations_uninitialized_value/ast.json b/parser/testdata/03228_dynamic_serializations_uninitialized_value/ast.json new file mode 100644 index 000000000..537d78bbf --- /dev/null +++ b/parser/testdata/03228_dynamic_serializations_uninitialized_value/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001222408, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03228_dynamic_serializations_uninitialized_value/metadata.json b/parser/testdata/03228_dynamic_serializations_uninitialized_value/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03228_dynamic_serializations_uninitialized_value/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03228_dynamic_serializations_uninitialized_value/query.sql b/parser/testdata/03228_dynamic_serializations_uninitialized_value/query.sql new file mode 100644 index 000000000..60e2439d4 --- /dev/null +++ b/parser/testdata/03228_dynamic_serializations_uninitialized_value/query.sql @@ -0,0 +1,5 @@ +set allow_experimental_dynamic_type=1; +set allow_suspicious_types_in_group_by=1; +set cast_keep_nullable=1; +SELECT toFixedString('str', 3), 3, CAST(if(1 = 0, toInt8(3), NULL), 'Int32') AS x from numbers(10) GROUP BY GROUPING SETS ((CAST(toInt32(1), 'Int32')), ('str', 3), (CAST(toFixedString('str', 3), 'Dynamic')), (CAST(toFixedString(toFixedString('str', 3), 3), 'Dynamic'))); + diff --git a/parser/testdata/03228_dynamic_subcolumns_from_subquery/ast.json b/parser/testdata/03228_dynamic_subcolumns_from_subquery/ast.json new file mode 100644 index 000000000..928857f05 --- /dev/null +++ b/parser/testdata/03228_dynamic_subcolumns_from_subquery/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001382716, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03228_dynamic_subcolumns_from_subquery/metadata.json b/parser/testdata/03228_dynamic_subcolumns_from_subquery/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03228_dynamic_subcolumns_from_subquery/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03228_dynamic_subcolumns_from_subquery/query.sql b/parser/testdata/03228_dynamic_subcolumns_from_subquery/query.sql new file mode 100644 index 000000000..594e657e3 --- /dev/null +++ b/parser/testdata/03228_dynamic_subcolumns_from_subquery/query.sql @@ -0,0 +1,8 @@ +set allow_experimental_dynamic_type=1; +set enable_json_type=1; +SET enable_analyzer=1; + +select d.String from (select 'str'::Dynamic as d); +select json.a from (select '{"a" : 42}'::JSON as json); +select json.a from (select '{"a" : 42}'::JSON(a UInt32) as json); +select json.a.:Int64 from (select materialize('{"a" : 42}')::JSON as json); diff --git a/parser/testdata/03228_join_to_rerange_right_table/ast.json b/parser/testdata/03228_join_to_rerange_right_table/ast.json new file mode 100644 index 000000000..db683a059 --- /dev/null +++ b/parser/testdata/03228_join_to_rerange_right_table/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_left (children 1)" + }, + { + "explain": " Identifier test_left" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001369933, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/03228_join_to_rerange_right_table/metadata.json b/parser/testdata/03228_join_to_rerange_right_table/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03228_join_to_rerange_right_table/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03228_join_to_rerange_right_table/query.sql b/parser/testdata/03228_join_to_rerange_right_table/query.sql new file mode 100644 index 000000000..52a979a21 --- /dev/null +++ b/parser/testdata/03228_join_to_rerange_right_table/query.sql @@ -0,0 +1,16 @@ +drop table if exists test_left; +drop table if exists test_right; + +CREATE TABLE test_left (a Int64, b String, c LowCardinality(String)) ENGINE = MergeTree() ORDER BY a; +CREATE TABLE test_right (a Int64, b String, c LowCardinality(String)) ENGINE = MergeTree() ORDER BY a; + +INSERT INTO test_left SELECT number % 10000, number % 10000, number % 10000 FROM numbers(100000); +INSERT INTO test_right SELECT number % 10 , number % 10, number % 10 FROM numbers(10000); + +SET allow_experimental_join_right_table_sorting = true; + +SELECT MAX(test_right.a), count() FROM test_left INNER JOIN test_right on test_left.b = test_right.b; +SELECT MAX(test_right.a), count() FROM test_left LEFT JOIN test_right on test_left.b = test_right.b; + +drop table test_left; +drop table test_right; diff --git a/parser/testdata/03228_pr_subquery_view_order_by/ast.json b/parser/testdata/03228_pr_subquery_view_order_by/ast.json new file mode 100644 index 000000000..87b5fdfdb --- /dev/null +++ b/parser/testdata/03228_pr_subquery_view_order_by/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery view1 (children 1)" + }, + { + "explain": " Identifier view1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001495382, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/03228_pr_subquery_view_order_by/metadata.json b/parser/testdata/03228_pr_subquery_view_order_by/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03228_pr_subquery_view_order_by/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03228_pr_subquery_view_order_by/query.sql b/parser/testdata/03228_pr_subquery_view_order_by/query.sql new file mode 100644 index 000000000..804a97f73 --- /dev/null +++ b/parser/testdata/03228_pr_subquery_view_order_by/query.sql @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS view1; +DROP TABLE IF EXISTS table1; +CREATE TABLE table1 (number UInt64) ENGINE=MergeTree ORDER BY number SETTINGS index_granularity=1; +INSERT INTO table1 SELECT number FROM numbers(1, 300); +CREATE VIEW view1 AS SELECT number FROM table1; + +SELECT * +FROM +( + SELECT * + FROM view1 +) +ORDER BY number DESC +LIMIT 20 +SETTINGS cluster_for_parallel_replicas = 'parallel_replicas', allow_experimental_parallel_reading_from_replicas = 1, max_parallel_replicas = 3, parallel_replicas_for_non_replicated_merge_tree = 1, parallel_replicas_local_plan = 1; + +DROP TABLE view1; +DROP TABLE table1; diff --git a/parser/testdata/03228_url_engine_response_headers/ast.json b/parser/testdata/03228_url_engine_response_headers/ast.json new file mode 100644 index 000000000..187eb5f2d --- /dev/null +++ b/parser/testdata/03228_url_engine_response_headers/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier _headers" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001532509, + "rows_read": 7, + "bytes_read": 266 + } +} diff --git a/parser/testdata/03228_url_engine_response_headers/metadata.json b/parser/testdata/03228_url_engine_response_headers/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03228_url_engine_response_headers/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03228_url_engine_response_headers/query.sql b/parser/testdata/03228_url_engine_response_headers/query.sql new file mode 100644 index 000000000..a7234979b --- /dev/null +++ b/parser/testdata/03228_url_engine_response_headers/query.sql @@ -0,0 +1,7 @@ +SELECT toTypeName(_headers) +FROM url('http://127.0.0.1:8123/?query=select+1&user=default', LineAsString, 's String'); + +SELECT + *, + mapFromString(_headers['X-ClickHouse-Summary'], ':', '{,')['read_rows'] +FROM url('http://127.0.0.1:8123/?query=select+1&user=default', LineAsString, 's String'); diff --git a/parser/testdata/03228_variant_permutation_issue/ast.json b/parser/testdata/03228_variant_permutation_issue/ast.json new file mode 100644 index 000000000..2ca5f6670 --- /dev/null +++ b/parser/testdata/03228_variant_permutation_issue/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00138022, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03228_variant_permutation_issue/metadata.json b/parser/testdata/03228_variant_permutation_issue/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03228_variant_permutation_issue/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03228_variant_permutation_issue/query.sql b/parser/testdata/03228_variant_permutation_issue/query.sql new file mode 100644 index 000000000..01757a65d --- /dev/null +++ b/parser/testdata/03228_variant_permutation_issue/query.sql @@ -0,0 +1,32 @@ +SET enable_json_type = 1; + +DROP TABLE IF EXISTS test_new_json_type; +CREATE TABLE test_new_json_type(id UInt32, data JSON, version UInt64) ENGINE=ReplacingMergeTree(version) ORDER BY id; +INSERT INTO test_new_json_type format JSONEachRow +{"id":1,"data":{"foo1":"bar"},"version":1} +{"id":2,"data":{"foo2":"bar"},"version":1} +{"id":3,"data":{"foo2":"bar"},"version":1} +; + +SELECT * FROM test_new_json_type FINAL WHERE data.foo2 is not null ORDER BY id; + +INSERT INTO test_new_json_type SELECT id, '{"foo2":"baz"}' AS _data, version+1 AS _version FROM test_new_json_type where id=2; + +SELECT * FROM test_new_json_type FINAL WHERE data.foo2 is not null ORDER BY id; + +DROP TABLE test_new_json_type; + +CREATE TABLE test_new_json_type(id Nullable(UInt32), data JSON, version UInt64) ENGINE=ReplacingMergeTree(version) ORDER BY id settings allow_nullable_key=1; +INSERT INTO test_new_json_type format JSONEachRow +{"id":1,"data":{"foo1":"bar"},"version":1} +{"id":2,"data":{"foo2":"bar"},"version":1} +{"id":3,"data":{"foo2":"bar"},"version":1} +; + +SELECT * FROM test_new_json_type FINAL WHERE data.foo2 is not null ORDER BY id; + +INSERT INTO test_new_json_type SELECT id, '{"foo2":"baz"}' AS _data, version+1 AS _version FROM test_new_json_type where id=2; + +SELECT * FROM test_new_json_type FINAL PREWHERE data.foo2 IS NOT NULL WHERE data.foo2 IS NOT NULL ORDER BY id ASC NULLS FIRST; + +DROP TABLE test_new_json_type; diff --git a/parser/testdata/03228_virtual_column_merge_dist/ast.json b/parser/testdata/03228_virtual_column_merge_dist/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03228_virtual_column_merge_dist/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03228_virtual_column_merge_dist/metadata.json b/parser/testdata/03228_virtual_column_merge_dist/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03228_virtual_column_merge_dist/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03228_virtual_column_merge_dist/query.sql b/parser/testdata/03228_virtual_column_merge_dist/query.sql new file mode 100644 index 000000000..e58c7f38d --- /dev/null +++ b/parser/testdata/03228_virtual_column_merge_dist/query.sql @@ -0,0 +1,27 @@ +-- There is a bug in old analyzer with currentDatabase() and distributed query. +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS t_local_1; +DROP TABLE IF EXISTS t_local_2; +DROP TABLE IF EXISTS t_merge; +DROP TABLE IF EXISTS t_distr; + +CREATE TABLE t_local_1 (a UInt32) ENGINE = MergeTree ORDER BY a; +CREATE TABLE t_local_2 (a UInt32) ENGINE = MergeTree ORDER BY a; + +INSERT INTO t_local_1 VALUES (1); +INSERT INTO t_local_2 VALUES (2); + +CREATE TABLE t_merge AS t_local_1 ENGINE = Merge(currentDatabase(), '^(t_local_1|t_local_2)$'); +CREATE TABLE t_distr AS t_local_1 ENGINE = Distributed('test_shard_localhost', currentDatabase(), t_merge, rand()); + +SELECT a, _table FROM t_merge ORDER BY a; +SELECT a, _table FROM t_distr ORDER BY a; + +SELECT a, _database = currentDatabase() FROM t_merge ORDER BY a; +SELECT a, _database = currentDatabase() FROM t_distr ORDER BY a; + +DROP TABLE IF EXISTS t_local_1; +DROP TABLE IF EXISTS t_local_2; +DROP TABLE IF EXISTS t_merge; +DROP TABLE IF EXISTS t_distr; diff --git a/parser/testdata/03229_async_insert_alter/ast.json b/parser/testdata/03229_async_insert_alter/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03229_async_insert_alter/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03229_async_insert_alter/metadata.json b/parser/testdata/03229_async_insert_alter/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03229_async_insert_alter/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03229_async_insert_alter/query.sql b/parser/testdata/03229_async_insert_alter/query.sql new file mode 100644 index 000000000..bf46bf3c8 --- /dev/null +++ b/parser/testdata/03229_async_insert_alter/query.sql @@ -0,0 +1,45 @@ +-- Tags: no-parallel, no-async-insert +-- no-parallel because the test uses FLUSH ASYNC INSERT QUEUE + +SET wait_for_async_insert = 0; +SET async_insert_busy_timeout_max_ms = 300000; +SET async_insert_busy_timeout_min_ms = 300000; +SET async_insert_use_adaptive_busy_timeout = 0; + +DROP TABLE IF EXISTS t_async_insert_alter; + +CREATE TABLE t_async_insert_alter (id Int64, v1 Int64) ENGINE = MergeTree ORDER BY id SETTINGS async_insert = 1; + +-- ADD COLUMN + +INSERT INTO t_async_insert_alter VALUES (42, 24); + +ALTER TABLE t_async_insert_alter ADD COLUMN value2 Int64; + +SYSTEM FLUSH ASYNC INSERT QUEUE; + +SELECT * FROM t_async_insert_alter ORDER BY id; + +-- MODIFY COLUMN + +INSERT INTO t_async_insert_alter VALUES (43, 34, 55); + +ALTER TABLE t_async_insert_alter MODIFY COLUMN value2 String; + +SYSTEM FLUSH ASYNC INSERT QUEUE; + +SELECT * FROM t_async_insert_alter ORDER BY id; + +-- DROP COLUMN + +INSERT INTO t_async_insert_alter VALUES ('100', '200', '300'); + +ALTER TABLE t_async_insert_alter DROP COLUMN value2; + +SYSTEM FLUSH ASYNC INSERT QUEUE; +SYSTEM FLUSH LOGS asynchronous_insert_log; + +SELECT * FROM t_async_insert_alter ORDER BY id; +SELECT query, data_kind, status FROM system.asynchronous_insert_log WHERE database = currentDatabase() AND table = 't_async_insert_alter' ORDER BY event_time_microseconds; + +DROP TABLE t_async_insert_alter; diff --git a/parser/testdata/03229_empty_tuple_in_array/ast.json b/parser/testdata/03229_empty_tuple_in_array/ast.json new file mode 100644 index 000000000..527a8c893 --- /dev/null +++ b/parser/testdata/03229_empty_tuple_in_array/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayElement (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Literal UInt64_0" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001216198, + "rows_read": 11, + "bytes_read": 422 + } +} diff --git a/parser/testdata/03229_empty_tuple_in_array/metadata.json b/parser/testdata/03229_empty_tuple_in_array/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03229_empty_tuple_in_array/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03229_empty_tuple_in_array/query.sql b/parser/testdata/03229_empty_tuple_in_array/query.sql new file mode 100644 index 000000000..09ba3595a --- /dev/null +++ b/parser/testdata/03229_empty_tuple_in_array/query.sql @@ -0,0 +1 @@ +select [()][0]; diff --git a/parser/testdata/03229_json_null_as_default_for_tuple/ast.json b/parser/testdata/03229_json_null_as_default_for_tuple/ast.json new file mode 100644 index 000000000..2525773e0 --- /dev/null +++ b/parser/testdata/03229_json_null_as_default_for_tuple/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00111269, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03229_json_null_as_default_for_tuple/metadata.json b/parser/testdata/03229_json_null_as_default_for_tuple/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03229_json_null_as_default_for_tuple/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03229_json_null_as_default_for_tuple/query.sql b/parser/testdata/03229_json_null_as_default_for_tuple/query.sql new file mode 100644 index 000000000..c34df1f7d --- /dev/null +++ b/parser/testdata/03229_json_null_as_default_for_tuple/query.sql @@ -0,0 +1,4 @@ +set enable_json_type=1; +set input_format_json_infer_array_of_dynamic_from_array_of_different_types=0; + +select materialize('{"a" : [[1, {}], null]}')::JSON as json, getSubcolumn(json, 'a'), dynamicType(getSubcolumn(json, 'a')); diff --git a/parser/testdata/03229_json_structure_comparison/ast.json b/parser/testdata/03229_json_structure_comparison/ast.json new file mode 100644 index 000000000..f104c67e6 --- /dev/null +++ b/parser/testdata/03229_json_structure_comparison/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001169959, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03229_json_structure_comparison/metadata.json b/parser/testdata/03229_json_structure_comparison/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03229_json_structure_comparison/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03229_json_structure_comparison/query.sql b/parser/testdata/03229_json_structure_comparison/query.sql new file mode 100644 index 000000000..5b1d81c14 --- /dev/null +++ b/parser/testdata/03229_json_structure_comparison/query.sql @@ -0,0 +1,20 @@ +SET enable_json_type=1; + +DROP TABLE IF EXISTS test_new_json_type; + +CREATE TABLE test_new_json_type(id UInt32, data JSON, version UInt64) ENGINE=ReplacingMergeTree(version) ORDER BY id; + +INSERT INTO test_new_json_type format JSONEachRow +{"id":1,"data":{"foo1":"bar"},"version":1} +{"id":2,"data":{"foo2":"bar"},"version":1} +{"id":3,"data":{"foo2":"bar"},"version":1} +; + +SELECT + a.data, + b.data +FROM test_new_json_type AS a +INNER JOIN test_new_json_type AS b ON a.id = b.id +ORDER BY id; + +DROP TABLE test_new_json_type; diff --git a/parser/testdata/03229_query_condition_cache_drop_cache/ast.json b/parser/testdata/03229_query_condition_cache_drop_cache/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03229_query_condition_cache_drop_cache/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03229_query_condition_cache_drop_cache/metadata.json b/parser/testdata/03229_query_condition_cache_drop_cache/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03229_query_condition_cache_drop_cache/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03229_query_condition_cache_drop_cache/query.sql b/parser/testdata/03229_query_condition_cache_drop_cache/query.sql new file mode 100644 index 000000000..ce4240244 --- /dev/null +++ b/parser/testdata/03229_query_condition_cache_drop_cache/query.sql @@ -0,0 +1,25 @@ +-- Tags: no-parallel +-- Tag no-parallel: Messes with internal cache + +-- Tests that SYSTEM DROP QUERY CONDITION CACHE works + +SET allow_experimental_analyzer = 1; + +-- (it's silly to use what will be tested below but we have to assume other tests cluttered the query cache) +SYSTEM DROP QUERY CONDITION CACHE; + +DROP TABLE IF EXISTS tab; +CREATE TABLE tab (a Int64, b Int64) ENGINE = MergeTree ORDER BY a; +INSERT INTO tab SELECT number, number FROM numbers(1_000_000); -- 1 mio rows sounds like a lot but the QCC doesn't cache anything if there is less data + +SELECT count(*) FROM tab WHERE b = 10_000 SETTINGS use_query_condition_cache = true FORMAT Null; + +SELECT 'Expect a single entry in the cache'; +SELECT count(*) FROM system.query_condition_cache; + +SYSTEM DROP QUERY CONDITION CACHE; + +SELECT 'Expect empty cache after DROP CACHE'; +SELECT count(*) FROM system.query_condition_cache; + +DROP TABLE tab; diff --git a/parser/testdata/03229_query_condition_cache_final/ast.json b/parser/testdata/03229_query_condition_cache_final/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03229_query_condition_cache_final/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03229_query_condition_cache_final/metadata.json b/parser/testdata/03229_query_condition_cache_final/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03229_query_condition_cache_final/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03229_query_condition_cache_final/query.sql b/parser/testdata/03229_query_condition_cache_final/query.sql new file mode 100644 index 000000000..b3a3592f5 --- /dev/null +++ b/parser/testdata/03229_query_condition_cache_final/query.sql @@ -0,0 +1,31 @@ +-- Tags: no-parallel +-- Tag no-parallel: Messes with internal cache + +-- Tests that the query condition cache rejects queries with FINAL keyword + +SET allow_experimental_analyzer = 1; + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab (a Int64, b Int64) ENGINE = ReplacingMergeTree ORDER BY a; +INSERT INTO tab SELECT number, number FROM numbers(1_000_000); -- 1 mio rows sounds like a lot but the QCC doesn't cache anything for less data + +SELECT '--- with move to PREWHERE'; +SET optimize_move_to_prewhere = true; + +SYSTEM DROP QUERY CONDITION CACHE; + +SELECT 'Query conditions with FINAL keyword must not be cached.'; +SELECT count(*) FROM tab FINAL WHERE b = 10_000 SETTINGS use_query_condition_cache = true FORMAT Null; +SELECT count(*) FROM system.query_condition_cache; + +SELECT '--- without move to PREWHERE'; +SET optimize_move_to_prewhere = false; + +SYSTEM DROP QUERY CONDITION CACHE; + +SELECT 'Query conditions with FINAL keyword must not be cached.'; +SELECT count(*) FROM tab FINAL WHERE b = 10_000 SETTINGS use_query_condition_cache = true FORMAT Null; +SELECT count(*) FROM system.query_condition_cache; + +DROP TABLE tab; diff --git a/parser/testdata/03229_query_condition_cache_in_operator/ast.json b/parser/testdata/03229_query_condition_cache_in_operator/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03229_query_condition_cache_in_operator/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03229_query_condition_cache_in_operator/metadata.json b/parser/testdata/03229_query_condition_cache_in_operator/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03229_query_condition_cache_in_operator/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03229_query_condition_cache_in_operator/query.sql b/parser/testdata/03229_query_condition_cache_in_operator/query.sql new file mode 100644 index 000000000..67f624d43 --- /dev/null +++ b/parser/testdata/03229_query_condition_cache_in_operator/query.sql @@ -0,0 +1,47 @@ +-- Tags: no-parallel +-- Tag no-parallel: Messes with internal cache + +-- Test for issue #84508 (incorrect results caused by query condition cache when used with IN functions on non-const sets) + +SET allow_experimental_analyzer = 1; +SET use_query_condition_cache = 1; + +DROP TABLE IF EXISTS tab1; +DROP TABLE IF EXISTS tab2; + +CREATE TABLE tab1 ( + id UInt32 DEFAULT 0, +) +ENGINE = MergeTree() +ORDER BY tuple(); + +CREATE TABLE tab2 ( + filter_id UInt32 DEFAULT 0 +) +ENGINE = MergeTree() +ORDER BY tuple(); + +INSERT INTO tab1 SELECT number AS id FROM numbers(1000000); + +INSERT INTO tab2 VALUES(1); + +-- Should return 1 +SELECT count() +FROM tab1 +WHERE id IN ( + SELECT filter_id + FROM tab2 +); + +INSERT INTO tab2 VALUES(100001); + +-- Should return 2 +SELECT count() +FROM tab1 +WHERE id IN ( + SELECT filter_id + FROM tab2 +); + +DROP TABLE tab1; +DROP TABLE tab2; diff --git a/parser/testdata/03229_query_condition_cache_nondeterministic_functions/ast.json b/parser/testdata/03229_query_condition_cache_nondeterministic_functions/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03229_query_condition_cache_nondeterministic_functions/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03229_query_condition_cache_nondeterministic_functions/metadata.json b/parser/testdata/03229_query_condition_cache_nondeterministic_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03229_query_condition_cache_nondeterministic_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03229_query_condition_cache_nondeterministic_functions/query.sql b/parser/testdata/03229_query_condition_cache_nondeterministic_functions/query.sql new file mode 100644 index 000000000..1a87f24ea --- /dev/null +++ b/parser/testdata/03229_query_condition_cache_nondeterministic_functions/query.sql @@ -0,0 +1,31 @@ +-- Tags: no-parallel +-- Tag no-parallel: Messes with internal cache + +-- Tests that the query condition cache rejects conditions with non-deterministic functions + +SET allow_experimental_analyzer = 1; + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab (a Int64, b Int64) ENGINE = MergeTree ORDER BY a; +INSERT INTO tab SELECT number, number FROM numbers(1_000_000); -- 1 mio rows sounds like a lot but the QCC doesn't cache anything for less data + +SELECT '--- with move to PREWHERE'; +SET optimize_move_to_prewhere = true; + +SYSTEM DROP QUERY CONDITION CACHE; + +SELECT 'Query conditions with non-deterministic functions must not be cached.'; +SELECT count(*) FROM tab WHERE b = rand64() SETTINGS use_query_condition_cache = true FORMAT Null; +SELECT count(*) FROM system.query_condition_cache; + +SELECT '--- without move to PREWHERE'; +SET optimize_move_to_prewhere = false; + +SYSTEM DROP QUERY CONDITION CACHE; + +SELECT 'Query conditions with non-deterministic functions must not be cached.'; +SELECT count(*) FROM tab WHERE b = rand64() SETTINGS use_query_condition_cache = true FORMAT Null; +SELECT count(*) FROM system.query_condition_cache; + +DROP TABLE tab; diff --git a/parser/testdata/03229_query_condition_cache_plaintext_condition/ast.json b/parser/testdata/03229_query_condition_cache_plaintext_condition/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03229_query_condition_cache_plaintext_condition/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03229_query_condition_cache_plaintext_condition/metadata.json b/parser/testdata/03229_query_condition_cache_plaintext_condition/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03229_query_condition_cache_plaintext_condition/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03229_query_condition_cache_plaintext_condition/query.sql b/parser/testdata/03229_query_condition_cache_plaintext_condition/query.sql new file mode 100644 index 000000000..78d141191 --- /dev/null +++ b/parser/testdata/03229_query_condition_cache_plaintext_condition/query.sql @@ -0,0 +1,35 @@ +-- Tags: no-parallel +-- Tag no-parallel: Messes with internal cache + +SET allow_experimental_analyzer = 1; + +-- Tests the effect of setting 'query_condition_cache_store_conditions_as_plaintext' + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab (a Int64, b Int64) ENGINE = MergeTree ORDER BY a; +INSERT INTO tab SELECT number, number FROM numbers(1_000_000); -- 1 mio rows sounds like a lot but the QCC doesn't cache anything if there is less data + +SELECT '--- with move to PREWHERE'; +SET optimize_move_to_prewhere = true; + +SYSTEM DROP QUERY CONDITION CACHE; + +SELECT 'Run two queries, with and without query_condition_cache_store_conditions_as_plaintext enabled'; +SELECT count(*) FROM tab WHERE b = 10_000 SETTINGS use_query_condition_cache = true, query_condition_cache_store_conditions_as_plaintext = false FORMAT Null; +SELECT count(*) FROM tab WHERE b = 90_000 SETTINGS use_query_condition_cache = true, query_condition_cache_store_conditions_as_plaintext = true FORMAT Null; +SELECT part_name, condition FROM system.query_condition_cache ORDER BY condition; + +SELECT '--- without move to PREWHERE'; +SET optimize_move_to_prewhere = false; + +SYSTEM DROP QUERY CONDITION CACHE; + +SELECT 'Run two queries, with and without query_condition_cache_store_conditions_as_plaintext enabled'; +SELECT count(*) FROM tab WHERE b = 10_000 SETTINGS use_query_condition_cache = true, query_condition_cache_store_conditions_as_plaintext = false FORMAT Null; +SELECT count(*) FROM tab WHERE b = 90_000 SETTINGS use_query_condition_cache = true, query_condition_cache_store_conditions_as_plaintext = true FORMAT Null; +SELECT part_name, condition FROM system.query_condition_cache ORDER BY condition; + +SYSTEM DROP QUERY CONDITION CACHE; + +DROP TABLE tab; diff --git a/parser/testdata/03229_query_condition_cache_profile_events/ast.json b/parser/testdata/03229_query_condition_cache_profile_events/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03229_query_condition_cache_profile_events/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03229_query_condition_cache_profile_events/metadata.json b/parser/testdata/03229_query_condition_cache_profile_events/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03229_query_condition_cache_profile_events/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03229_query_condition_cache_profile_events/query.sql b/parser/testdata/03229_query_condition_cache_profile_events/query.sql new file mode 100644 index 000000000..f1072bbea --- /dev/null +++ b/parser/testdata/03229_query_condition_cache_profile_events/query.sql @@ -0,0 +1,84 @@ +-- Tags: no-parallel, no-parallel-replicas +-- Tag no-parallel: Messes with internal cache + + -- w/o local plan for parallel replicas the test will fail in ParallelReplicas CI run since filter steps will be executed as part of remote queries +set parallel_replicas_local_plan=1; + +SET allow_experimental_analyzer = 1; + +-- Tests that queries with enabled query condition cache correctly populate profile events + +SELECT '--- with move to PREWHERE'; + +SYSTEM DROP QUERY CONDITION CACHE; + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab (a Int64, b Int64) ENGINE = MergeTree ORDER BY a; +INSERT INTO tab SELECT number, number FROM numbers(1_000_000); -- 1 mio rows sounds like a lot but the QCC doesn't cache anything for less data + +SELECT count(*) FROM tab WHERE b = 10_000 FORMAT Null SETTINGS use_query_condition_cache = true; + +SYSTEM FLUSH LOGS query_log; +SELECT + ProfileEvents['QueryConditionCacheHits'], + ProfileEvents['QueryConditionCacheMisses'], + toInt32(ProfileEvents['SelectedMarks']) < toInt32(ProfileEvents['SelectedMarksTotal']) +FROM system.query_log +WHERE + type = 'QueryFinish' + AND current_database = currentDatabase() + AND query = 'SELECT count(*) FROM tab WHERE b = 10_000 FORMAT Null SETTINGS use_query_condition_cache = true;' +ORDER BY + event_time_microseconds; + +SELECT * FROM tab WHERE b = 10_000 FORMAT Null SETTINGS use_query_condition_cache = true; + +SYSTEM FLUSH LOGS query_log; +SELECT + ProfileEvents['QueryConditionCacheHits'], + ProfileEvents['QueryConditionCacheMisses'], + toInt32(ProfileEvents['SelectedMarks']) < toInt32(ProfileEvents['SelectedMarksTotal']) +FROM system.query_log +WHERE + type = 'QueryFinish' + AND current_database = currentDatabase() + AND query = 'SELECT * FROM tab WHERE b = 10_000 FORMAT Null SETTINGS use_query_condition_cache = true;' +ORDER BY + event_time_microseconds; + +SELECT '--- without move to PREWHERE'; + +SYSTEM DROP QUERY CONDITION CACHE; + +SELECT count(*) FROM tab WHERE b = 10_000 FORMAT Null SETTINGS use_query_condition_cache = true, optimize_move_to_prewhere = false; + +SYSTEM FLUSH LOGS query_log; +SELECT + ProfileEvents['QueryConditionCacheHits'], + ProfileEvents['QueryConditionCacheMisses'], + toInt32(ProfileEvents['SelectedMarks']) < toInt32(ProfileEvents['SelectedMarksTotal']) +FROM system.query_log +WHERE + type = 'QueryFinish' + AND current_database = currentDatabase() + AND query = 'SELECT count(*) FROM tab WHERE b = 10_000 FORMAT Null SETTINGS use_query_condition_cache = true, optimize_move_to_prewhere = false;' +ORDER BY + event_time_microseconds; + +SELECT * FROM tab WHERE b = 10_000 FORMAT Null SETTINGS use_query_condition_cache = true, optimize_move_to_prewhere = false; + +SYSTEM FLUSH LOGS query_log; +SELECT + ProfileEvents['QueryConditionCacheHits'], + ProfileEvents['QueryConditionCacheMisses'], + toInt32(ProfileEvents['SelectedMarks']) < toInt32(ProfileEvents['SelectedMarksTotal']) +FROM system.query_log +WHERE + type = 'QueryFinish' + AND current_database = currentDatabase() + AND query = 'SELECT * FROM tab WHERE b = 10_000 FORMAT Null SETTINGS use_query_condition_cache = true, optimize_move_to_prewhere = false;' +ORDER BY + event_time_microseconds; + +DROP TABLE tab; diff --git a/parser/testdata/03229_query_condition_cache_recursive_cte/ast.json b/parser/testdata/03229_query_condition_cache_recursive_cte/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03229_query_condition_cache_recursive_cte/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03229_query_condition_cache_recursive_cte/metadata.json b/parser/testdata/03229_query_condition_cache_recursive_cte/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03229_query_condition_cache_recursive_cte/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03229_query_condition_cache_recursive_cte/query.sql b/parser/testdata/03229_query_condition_cache_recursive_cte/query.sql new file mode 100644 index 000000000..b9d70dc44 --- /dev/null +++ b/parser/testdata/03229_query_condition_cache_recursive_cte/query.sql @@ -0,0 +1,63 @@ +-- Tags: no-parallel +-- Tag no-parallel: Messes with internal cache + +-- Test for issue #81506 (recursive CTEs return wrong results if the query condition cache is on) + +SET allow_experimental_analyzer = 1; -- needed by recursive CTEs + +-- Start from a clean query condition cache +SYSTEM DROP QUERY CONDITION CACHE; + +SELECT '-- Prepare data'; + +DROP TABLE IF EXISTS tab; +CREATE TABLE tab +( + id String, + parent String, +) +ENGINE = MergeTree +ORDER BY tuple(); + +INSERT INTO tab (id, parent) VALUES + ('uuid1', 'uuid2'), + ('uuid3', 'uuid4'), + ('uuid4', 'uuid2'), + ('uuid2', 'empty'), + ('uuid5', 'uuid2'), + ('uuid6', 'uuid4'); + +SELECT '-- First run'; + +WITH RECURSIVE + recursive AS ( + SELECT id FROM tab WHERE id = 'uuid3' + UNION ALL + SELECT parent AS id + FROM tab + WHERE tab.id IN recursive AND parent != 'empty' + GROUP BY parent + ) +SELECT * +FROM recursive +GROUP BY id +ORDER BY id; + +SELECT '-- Second run'; + +-- same query as before, expect to get the same result +WITH RECURSIVE + recursive AS ( + SELECT id FROM tab WHERE id = 'uuid3' + UNION ALL + SELECT parent AS id + FROM tab + WHERE tab.id IN recursive AND parent != 'empty' + GROUP BY parent + ) +SELECT * +FROM recursive +GROUP BY id +ORDER BY id; + +DROP TABLE tab; diff --git a/parser/testdata/03229_query_condition_cache_system_table/ast.json b/parser/testdata/03229_query_condition_cache_system_table/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03229_query_condition_cache_system_table/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03229_query_condition_cache_system_table/metadata.json b/parser/testdata/03229_query_condition_cache_system_table/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03229_query_condition_cache_system_table/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03229_query_condition_cache_system_table/query.sql b/parser/testdata/03229_query_condition_cache_system_table/query.sql new file mode 100644 index 000000000..b3aa8c027 --- /dev/null +++ b/parser/testdata/03229_query_condition_cache_system_table/query.sql @@ -0,0 +1,39 @@ +-- Tags: no-parallel +-- Tag no-parallel: Messes with internal cache + +-- Tests system table 'system.query_condition_cache' + +SET allow_experimental_analyzer = 1; + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab (a Int64, b Int64) ENGINE = MergeTree ORDER BY a; +INSERT INTO tab SELECT number, number FROM numbers(1_000_000); -- 1 mio rows sounds like a lot but the QCC doesn't cache anything if there is less data + +SELECT '--- with move to PREWHERE'; +SET optimize_move_to_prewhere = true; + +SYSTEM DROP QUERY CONDITION CACHE; + +SELECT 'Expect one entry in the cache after the first query.'; +SELECT count(*) FROM tab WHERE b = 10_000 SETTINGS use_query_condition_cache = true FORMAT Null; +SELECT count(*) FROM system.query_condition_cache; + +SELECT 'If the same query runs again, the cache still contains just a single entry.'; +SELECT count(*) FROM tab WHERE b = 10_000 SETTINGS use_query_condition_cache = true FORMAT Null; +SELECT count(*) FROM system.query_condition_cache; + +SELECT '--- without move to PREWHERE'; +SET optimize_move_to_prewhere = false; + +SYSTEM DROP QUERY CONDITION CACHE; + +SELECT 'Expect one entry in the cache after the first query.'; +SELECT count(*) FROM tab WHERE b = 10_000 SETTINGS use_query_condition_cache = true FORMAT Null; +SELECT count(*) FROM system.query_condition_cache; + +SELECT 'If the same query runs again, the cache still contains just a single entry.'; +SELECT count(*) FROM tab WHERE b = 10_000 SETTINGS use_query_condition_cache = true FORMAT Null; +SELECT count(*) FROM system.query_condition_cache; + +DROP TABLE tab; diff --git a/parser/testdata/03230_alter_with_mixed_mutations_and_remove_materialized/ast.json b/parser/testdata/03230_alter_with_mixed_mutations_and_remove_materialized/ast.json new file mode 100644 index 000000000..7ab0549ef --- /dev/null +++ b/parser/testdata/03230_alter_with_mixed_mutations_and_remove_materialized/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery a (children 1)" + }, + { + "explain": " Identifier a" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001414996, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/03230_alter_with_mixed_mutations_and_remove_materialized/metadata.json b/parser/testdata/03230_alter_with_mixed_mutations_and_remove_materialized/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03230_alter_with_mixed_mutations_and_remove_materialized/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03230_alter_with_mixed_mutations_and_remove_materialized/query.sql b/parser/testdata/03230_alter_with_mixed_mutations_and_remove_materialized/query.sql new file mode 100644 index 000000000..d8ac32807 --- /dev/null +++ b/parser/testdata/03230_alter_with_mixed_mutations_and_remove_materialized/query.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS a SYNC; +CREATE TABLE a (x String, y String MATERIALIZED 'str') ENGINE = ReplicatedMergeTree('/clickhouse/{database}/a', 'r1') ORDER BY x; + +INSERT INTO a SELECT toString(number) FROM numbers(100); +SELECT 'BEFORE', table, name, type, default_kind, default_expression FROM system.columns WHERE database = currentDatabase() AND table = 'a' ORDER BY table, name; + +-- DROP INDEX is important to make the mutation not a pure metadata mutation +ALTER TABLE a + DROP INDEX IF EXISTS some_index, + MODIFY COLUMN y REMOVE MATERIALIZED +SETTINGS alter_sync = 2, mutations_sync = 2; + +SELECT 'AFTER', table, name, type, default_kind, default_expression FROM system.columns WHERE database = currentDatabase() AND table = 'a' ORDER BY table, name; diff --git a/parser/testdata/03230_anyHeavy_merge/ast.json b/parser/testdata/03230_anyHeavy_merge/ast.json new file mode 100644 index 000000000..eea122ea9 --- /dev/null +++ b/parser/testdata/03230_anyHeavy_merge/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001497362, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/03230_anyHeavy_merge/metadata.json b/parser/testdata/03230_anyHeavy_merge/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03230_anyHeavy_merge/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03230_anyHeavy_merge/query.sql b/parser/testdata/03230_anyHeavy_merge/query.sql new file mode 100644 index 000000000..5d4c0e55d --- /dev/null +++ b/parser/testdata/03230_anyHeavy_merge/query.sql @@ -0,0 +1,4 @@ +DROP TABLE IF EXISTS t; +CREATE TABLE t (letter String) ENGINE=MergeTree order by () partition by letter; +INSERT INTO t VALUES ('a'), ('a'), ('a'), ('a'), ('b'), ('a'), ('a'), ('a'), ('a'), ('a'), ('a'), ('a'), ('a'), ('a'), ('a'), ('a'), ('c'); +SELECT anyHeavy(if(letter != 'b', letter, NULL)) FROM t; diff --git a/parser/testdata/03230_array_zip_unaligned/ast.json b/parser/testdata/03230_array_zip_unaligned/ast.json new file mode 100644 index 000000000..895b203e1 --- /dev/null +++ b/parser/testdata/03230_array_zip_unaligned/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function arrayZipUnaligned (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Array_['a', 'b', 'c']" + }, + { + "explain": " Literal Array_['d', 'e', 'f']" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001191748, + "rows_read": 11, + "bytes_read": 446 + } +} diff --git a/parser/testdata/03230_array_zip_unaligned/metadata.json b/parser/testdata/03230_array_zip_unaligned/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03230_array_zip_unaligned/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03230_array_zip_unaligned/query.sql b/parser/testdata/03230_array_zip_unaligned/query.sql new file mode 100644 index 000000000..08d77737e --- /dev/null +++ b/parser/testdata/03230_array_zip_unaligned/query.sql @@ -0,0 +1,15 @@ +SELECT arrayZipUnaligned(['a', 'b', 'c'], ['d', 'e', 'f']) as x, toTypeName(x); + +SELECT arrayZipUnaligned(['a', 'b', 'c'], ['d', 'e', 'f'], ['g', 'h', 'i']); + +SELECT arrayZipUnaligned(); + +SELECT arrayZipUnaligned('a', 'b', 'c'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT arrayZipUnaligned(['a', 'b', 'c'], ['d', 'e', 'f', 'g']); + +SELECT arrayZipUnaligned(['a'], [1, 2, 3]); + +SELECT arrayZipUnaligned(['a', 'b', 'c'], [1, 2], [1.1, 2.2, 3.3, 4.4]); + +SELECT arrayZipUnaligned(materialize(['g', 'h', 'i'])) from numbers(3); diff --git a/parser/testdata/03230_date_trunc_and_to_start_of_interval_on_date32/ast.json b/parser/testdata/03230_date_trunc_and_to_start_of_interval_on_date32/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03230_date_trunc_and_to_start_of_interval_on_date32/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03230_date_trunc_and_to_start_of_interval_on_date32/metadata.json b/parser/testdata/03230_date_trunc_and_to_start_of_interval_on_date32/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03230_date_trunc_and_to_start_of_interval_on_date32/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03230_date_trunc_and_to_start_of_interval_on_date32/query.sql b/parser/testdata/03230_date_trunc_and_to_start_of_interval_on_date32/query.sql new file mode 100644 index 000000000..b2b6385f0 --- /dev/null +++ b/parser/testdata/03230_date_trunc_and_to_start_of_interval_on_date32/query.sql @@ -0,0 +1,26 @@ +-- { echoOn } +select toStartOfInterval(toDate32('2022-09-16'), INTERVAL 1 YEAR); +select toStartOfInterval(toDate32('2022-09-16'), INTERVAL 1 QUARTER); +select toStartOfInterval(toDate32('2022-09-16'), INTERVAL 1 MONTH); +select toStartOfInterval(toDate32('2022-09-16'), INTERVAL 1 WEEK); +select toStartOfInterval(toDate32('2022-09-16'), INTERVAL 1 DAY); +select toStartOfInterval(toDate32('2022-09-16'), INTERVAL 1 HOUR); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select toStartOfInterval(toDate32('2022-09-16'), INTERVAL 1 MINUTE); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select toStartOfInterval(toDate32('2022-09-16'), INTERVAL 1 SECOND); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select toStartOfInterval(toDate32('2022-09-16'), INTERVAL 1 MILLISECOND); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select toStartOfInterval(toDate32('2022-09-16'), INTERVAL 1 MICROSECOND); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select toStartOfInterval(toDate32('2022-09-16'), INTERVAL 1 NANOSECOND); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +select date_trunc('YEAR', toDate32('2022-09-16')); +select date_trunc('QUARTER', toDate32('2022-09-16')); +select date_trunc('MONTH', toDate32('2022-09-16')); +select date_trunc('WEEK', toDate32('2022-09-16')); +select date_trunc('DAY', toDate32('2022-09-16')); +select date_trunc('HOUR', toDate32('2022-09-16')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select date_trunc('MINUTE', toDate32('2022-09-16')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select date_trunc('SECOND', toDate32('2022-09-16')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select date_trunc('MILLISECOND', toDate32('2022-09-16')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select date_trunc('MICROSECOND', toDate32('2022-09-16')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select date_trunc('NANOSECOND', toDate32('2022-09-16')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + + diff --git a/parser/testdata/03230_show_create_query_identifier_quoting_style/ast.json b/parser/testdata/03230_show_create_query_identifier_quoting_style/ast.json new file mode 100644 index 000000000..069159c42 --- /dev/null +++ b/parser/testdata/03230_show_create_query_identifier_quoting_style/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery uk_mortgage_rates_dict (children 1)" + }, + { + "explain": " Identifier uk_mortgage_rates_dict" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001225531, + "rows_read": 2, + "bytes_read": 96 + } +} diff --git a/parser/testdata/03230_show_create_query_identifier_quoting_style/metadata.json b/parser/testdata/03230_show_create_query_identifier_quoting_style/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03230_show_create_query_identifier_quoting_style/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03230_show_create_query_identifier_quoting_style/query.sql b/parser/testdata/03230_show_create_query_identifier_quoting_style/query.sql new file mode 100644 index 000000000..e7e0bbe7f --- /dev/null +++ b/parser/testdata/03230_show_create_query_identifier_quoting_style/query.sql @@ -0,0 +1,388 @@ +DROP DICTIONARY IF EXISTS uk_mortgage_rates_dict; +DROP TABLE IF EXISTS uk_mortgage_rates; +DROP VIEW IF EXISTS uk_prices_aggs_view; +DROP TABLE IF EXISTS uk_prices_aggs_dest; +DROP VIEW IF EXISTS prices_by_year_view; +DROP TABLE IF EXISTS prices_by_year_dest; +DROP TABLE IF EXISTS uk_price_paid; + +-- Create tables, views, dictionaries + +CREATE TABLE uk_price_paid +( + Table String, + Engine String, + price UInt32, + date Date, + postcode1 LowCardinality(String), + postcode2 LowCardinality(String), + type Enum('terraced' = 1, 'semi-detached' = 2, 'detached' = 3, 'flat' = 4, 'other' = 0), + is_new UInt8, + duration Enum('freehold' = 1, 'leasehold' = 2, 'unknown' = 0), + addr1 String, + addr2 String, + street LowCardinality(String), + locality LowCardinality(String), + town LowCardinality(String), + district LowCardinality(String), + county LowCardinality(String), + INDEX county_index county TYPE set(10) GRANULARITY 1, + PROJECTION town_date_projection + ( + SELECT + town, + date, + price + ORDER BY + town, + date + ), + PROJECTION handy_aggs_projection + ( + SELECT + avg(price), + max(price), + sum(price) + GROUP BY town + ) +) +ENGINE = MergeTree +ORDER BY (postcode1, postcode2, date); + +CREATE TABLE prices_by_year_dest ( + Table String, + Engine String, + price UInt32, + date Date, + addr1 String, + addr2 String, + street LowCardinality(String), + town LowCardinality(String), + district LowCardinality(String), + county LowCardinality(String) +) +ENGINE = MergeTree +PRIMARY KEY (town, date) +PARTITION BY toYear(date); + +CREATE MATERIALIZED VIEW prices_by_year_view +TO prices_by_year_dest +AS + SELECT + price, + date, + addr1, + addr2, + street, + town, + district, + county + FROM uk_price_paid; + +CREATE TABLE uk_prices_aggs_dest ( + month Date, + min_price SimpleAggregateFunction(min, UInt32), + max_price SimpleAggregateFunction(max, UInt32), + volume AggregateFunction(count, UInt32), + avg_price AggregateFunction(avg, UInt32) +) +ENGINE = AggregatingMergeTree +PRIMARY KEY month; + +CREATE MATERIALIZED VIEW uk_prices_aggs_view +TO uk_prices_aggs_dest +AS + WITH + toStartOfMonth(date) AS month + SELECT + month, + minSimpleState(price) AS min_price, + maxSimpleState(price) AS max_price, + countState(price) AS volume, + avgState(price) AS avg_price + FROM uk_price_paid + GROUP BY month; + +CREATE TABLE uk_mortgage_rates ( + date DateTime64, + variable Decimal32(2), + fixed Decimal32(2), + bank Decimal32(2) +) +ENGINE Memory(); + +INSERT INTO uk_mortgage_rates VALUES ('2004-02-29', 5.02, 4.9, 4); +INSERT INTO uk_mortgage_rates VALUES ('2004-03-31', 5.11, 4.91, 4); + +CREATE DICTIONARY uk_mortgage_rates_dict ( + date DateTime64, + variable Decimal32(2), + fixed Decimal32(2), + bank Decimal32(2) +) +PRIMARY KEY date +SOURCE( + CLICKHOUSE(TABLE 'uk_mortgage_rates') +) +LAYOUT(COMPLEX_KEY_HASHED()) +LIFETIME(2628000000); + + +-- Show tables, views, dictionaries with default settings +SELECT('Settings: default'); +SHOW CREATE TABLE uk_price_paid; +SHOW CREATE VIEW prices_by_year_view; +SHOW CREATE uk_prices_aggs_dest; +SHOW CREATE VIEW uk_prices_aggs_view; +SHOW CREATE DICTIONARY uk_mortgage_rates_dict; + +-- Show tables, views, dictionaries with show_create_query_identifier_quoting_rule='always', show_create_query_identifier_quoting_style='Backticks' +SELECT('Settings: always & Backticks'); +SHOW CREATE TABLE uk_price_paid +SETTINGS + show_create_query_identifier_quoting_rule='always', + show_create_query_identifier_quoting_style='Backticks'; + +SHOW CREATE VIEW prices_by_year_view +SETTINGS + show_create_query_identifier_quoting_rule='always', + show_create_query_identifier_quoting_style='Backticks'; + +SHOW CREATE uk_prices_aggs_dest +SETTINGS + show_create_query_identifier_quoting_rule='always', + show_create_query_identifier_quoting_style='Backticks'; + +SHOW CREATE VIEW uk_prices_aggs_view +SETTINGS + show_create_query_identifier_quoting_rule='always', + show_create_query_identifier_quoting_style='Backticks'; + +SHOW CREATE DICTIONARY uk_mortgage_rates_dict +SETTINGS + show_create_query_identifier_quoting_rule='always', + show_create_query_identifier_quoting_style='Backticks'; + +-- Show tables, views, dictionaries with show_create_query_identifier_quoting_rule='user_display', show_create_query_identifier_quoting_style='Backticks' +SELECT('Settings: user_display & Backticks'); +SHOW CREATE TABLE uk_price_paid +SETTINGS + show_create_query_identifier_quoting_rule='user_display', + show_create_query_identifier_quoting_style='Backticks'; + +SHOW CREATE VIEW prices_by_year_view +SETTINGS + show_create_query_identifier_quoting_rule='user_display', + show_create_query_identifier_quoting_style='Backticks'; + +SHOW CREATE uk_prices_aggs_dest +SETTINGS + show_create_query_identifier_quoting_rule='user_display', + show_create_query_identifier_quoting_style='Backticks'; + +SHOW CREATE VIEW uk_prices_aggs_view +SETTINGS + show_create_query_identifier_quoting_rule='user_display', + show_create_query_identifier_quoting_style='Backticks'; + +SHOW CREATE DICTIONARY uk_mortgage_rates_dict +SETTINGS + show_create_query_identifier_quoting_rule='user_display', + show_create_query_identifier_quoting_style='Backticks'; + +-- Show tables, views, dictionaries with show_create_query_identifier_quoting_rule='when_necessary', show_create_query_identifier_quoting_style='Backticks' +SELECT('Settings: when_necessary & Backticks'); +SHOW CREATE TABLE uk_price_paid +SETTINGS + show_create_query_identifier_quoting_rule='when_necessary', + show_create_query_identifier_quoting_style='Backticks'; + +SHOW CREATE VIEW prices_by_year_view +SETTINGS + show_create_query_identifier_quoting_rule='when_necessary', + show_create_query_identifier_quoting_style='Backticks'; + +SHOW CREATE uk_prices_aggs_dest +SETTINGS + show_create_query_identifier_quoting_rule='when_necessary', + show_create_query_identifier_quoting_style='Backticks'; + +SHOW CREATE VIEW uk_prices_aggs_view +SETTINGS + show_create_query_identifier_quoting_rule='when_necessary', + show_create_query_identifier_quoting_style='Backticks'; + +SHOW CREATE DICTIONARY uk_mortgage_rates_dict +SETTINGS + show_create_query_identifier_quoting_rule='when_necessary', + show_create_query_identifier_quoting_style='Backticks'; + +-- Show tables, views, dictionaries with show_create_query_identifier_quoting_rule='always', show_create_query_identifier_quoting_style='DoubleQuotes' +SELECT('Settings: always & DoubleQuotes'); +SHOW CREATE TABLE uk_price_paid +SETTINGS + show_create_query_identifier_quoting_rule='always', + show_create_query_identifier_quoting_style='DoubleQuotes'; + +SHOW CREATE VIEW prices_by_year_view +SETTINGS + show_create_query_identifier_quoting_rule='always', + show_create_query_identifier_quoting_style='DoubleQuotes'; + +SHOW CREATE uk_prices_aggs_dest +SETTINGS + show_create_query_identifier_quoting_rule='always', + show_create_query_identifier_quoting_style='DoubleQuotes'; + +SHOW CREATE VIEW uk_prices_aggs_view +SETTINGS + show_create_query_identifier_quoting_rule='always', + show_create_query_identifier_quoting_style='DoubleQuotes'; + +SHOW CREATE DICTIONARY uk_mortgage_rates_dict +SETTINGS + show_create_query_identifier_quoting_rule='always', + show_create_query_identifier_quoting_style='DoubleQuotes'; + +-- Show tables, views, dictionaries with show_create_query_identifier_quoting_rule='user_display', show_create_query_identifier_quoting_style='DoubleQuotes' +SELECT('Settings: user_display & DoubleQuotes'); +SHOW CREATE TABLE uk_price_paid +SETTINGS + show_create_query_identifier_quoting_rule='user_display', + show_create_query_identifier_quoting_style='DoubleQuotes'; + +SHOW CREATE VIEW prices_by_year_view +SETTINGS + show_create_query_identifier_quoting_rule='user_display', + show_create_query_identifier_quoting_style='DoubleQuotes'; + +SHOW CREATE uk_prices_aggs_dest +SETTINGS + show_create_query_identifier_quoting_rule='user_display', + show_create_query_identifier_quoting_style='DoubleQuotes'; + +SHOW CREATE VIEW uk_prices_aggs_view +SETTINGS + show_create_query_identifier_quoting_rule='user_display', + show_create_query_identifier_quoting_style='DoubleQuotes'; + +SHOW CREATE DICTIONARY uk_mortgage_rates_dict +SETTINGS + show_create_query_identifier_quoting_rule='user_display', + show_create_query_identifier_quoting_style='DoubleQuotes'; + +-- Show tables, views, dictionaries with show_create_query_identifier_quoting_rule='when_necessary', show_create_query_identifier_quoting_style='DoubleQuotes' +SELECT('Settings: when_necessary & DoubleQuotes'); +SHOW CREATE TABLE uk_price_paid +SETTINGS + show_create_query_identifier_quoting_rule='when_necessary', + show_create_query_identifier_quoting_style='DoubleQuotes'; + +SHOW CREATE VIEW prices_by_year_view +SETTINGS + show_create_query_identifier_quoting_rule='when_necessary', + show_create_query_identifier_quoting_style='DoubleQuotes'; + +SHOW CREATE uk_prices_aggs_dest +SETTINGS + show_create_query_identifier_quoting_rule='when_necessary', + show_create_query_identifier_quoting_style='DoubleQuotes'; + +SHOW CREATE VIEW uk_prices_aggs_view +SETTINGS + show_create_query_identifier_quoting_rule='when_necessary', + show_create_query_identifier_quoting_style='DoubleQuotes'; + +SHOW CREATE DICTIONARY uk_mortgage_rates_dict +SETTINGS + show_create_query_identifier_quoting_rule='when_necessary', + show_create_query_identifier_quoting_style='DoubleQuotes'; + +-- Show tables, views, dictionaries with show_create_query_identifier_quoting_rule='always', show_create_query_identifier_quoting_style='BackticksMySQL' +SELECT('Settings: always & BackticksMySQL'); +SHOW CREATE TABLE uk_price_paid +SETTINGS + show_create_query_identifier_quoting_rule='always', + show_create_query_identifier_quoting_style='BackticksMySQL'; + +SHOW CREATE VIEW prices_by_year_view +SETTINGS + show_create_query_identifier_quoting_rule='always', + show_create_query_identifier_quoting_style='BackticksMySQL'; + +SHOW CREATE uk_prices_aggs_dest +SETTINGS + show_create_query_identifier_quoting_rule='always', + show_create_query_identifier_quoting_style='BackticksMySQL'; + +SHOW CREATE VIEW uk_prices_aggs_view +SETTINGS + show_create_query_identifier_quoting_rule='always', + show_create_query_identifier_quoting_style='BackticksMySQL'; + +SHOW CREATE DICTIONARY uk_mortgage_rates_dict +SETTINGS + show_create_query_identifier_quoting_rule='always', + show_create_query_identifier_quoting_style='BackticksMySQL'; + +-- Show tables, views, dictionaries with show_create_query_identifier_quoting_rule='user_display', show_create_query_identifier_quoting_style='BackticksMySQL' +SELECT('Settings: user_display & BackticksMySQL'); +SHOW CREATE TABLE uk_price_paid +SETTINGS + show_create_query_identifier_quoting_rule='user_display', + show_create_query_identifier_quoting_style='BackticksMySQL'; + +SHOW CREATE VIEW prices_by_year_view +SETTINGS + show_create_query_identifier_quoting_rule='user_display', + show_create_query_identifier_quoting_style='BackticksMySQL'; + +SHOW CREATE uk_prices_aggs_dest +SETTINGS + show_create_query_identifier_quoting_rule='user_display', + show_create_query_identifier_quoting_style='BackticksMySQL'; + +SHOW CREATE VIEW uk_prices_aggs_view +SETTINGS + show_create_query_identifier_quoting_rule='user_display', + show_create_query_identifier_quoting_style='BackticksMySQL'; + +SHOW CREATE DICTIONARY uk_mortgage_rates_dict +SETTINGS + show_create_query_identifier_quoting_rule='user_display', + show_create_query_identifier_quoting_style='BackticksMySQL'; + +-- Show tables, views, dictionaries with show_create_query_identifier_quoting_rule='when_necessary', show_create_query_identifier_quoting_style='BackticksMySQL' +SELECT('Settings: when_necessary & BackticksMySQL'); +SHOW CREATE TABLE uk_price_paid +SETTINGS + show_create_query_identifier_quoting_rule='when_necessary', + show_create_query_identifier_quoting_style='BackticksMySQL'; + +SHOW CREATE VIEW prices_by_year_view +SETTINGS + show_create_query_identifier_quoting_rule='when_necessary', + show_create_query_identifier_quoting_style='BackticksMySQL'; + +SHOW CREATE uk_prices_aggs_dest +SETTINGS + show_create_query_identifier_quoting_rule='when_necessary', + show_create_query_identifier_quoting_style='BackticksMySQL'; + +SHOW CREATE VIEW uk_prices_aggs_view +SETTINGS + show_create_query_identifier_quoting_rule='when_necessary', + show_create_query_identifier_quoting_style='BackticksMySQL'; + +SHOW CREATE DICTIONARY uk_mortgage_rates_dict +SETTINGS + show_create_query_identifier_quoting_rule='when_necessary', + show_create_query_identifier_quoting_style='BackticksMySQL'; + +DROP DICTIONARY uk_mortgage_rates_dict; +DROP TABLE uk_mortgage_rates; +DROP VIEW uk_prices_aggs_view; +DROP TABLE uk_prices_aggs_dest; +DROP VIEW prices_by_year_view; +DROP TABLE prices_by_year_dest; +DROP TABLE uk_price_paid; diff --git a/parser/testdata/03230_subcolumns_mv/ast.json b/parser/testdata/03230_subcolumns_mv/ast.json new file mode 100644 index 000000000..2673bd633 --- /dev/null +++ b/parser/testdata/03230_subcolumns_mv/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery rawtable (children 1)" + }, + { + "explain": " Identifier rawtable" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00129098, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/03230_subcolumns_mv/metadata.json b/parser/testdata/03230_subcolumns_mv/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03230_subcolumns_mv/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03230_subcolumns_mv/query.sql b/parser/testdata/03230_subcolumns_mv/query.sql new file mode 100644 index 000000000..ffede134b --- /dev/null +++ b/parser/testdata/03230_subcolumns_mv/query.sql @@ -0,0 +1,38 @@ +DROP TABLE IF EXISTS rawtable; +DROP TABLE IF EXISTS raw_to_attributes_mv; +DROP TABLE IF EXISTS attributes; + +SET optimize_functions_to_subcolumns = 1; +SET allow_suspicious_primary_key = 1; + +CREATE TABLE rawtable +( + `Attributes` Map(String, String), +) +ENGINE = MergeTree +ORDER BY tuple(); + +CREATE TABLE attributes +( + `AttributeKeys` Array(String), + `AttributeValues` Array(String) +) +ENGINE = ReplacingMergeTree +ORDER BY tuple(); + +CREATE MATERIALIZED VIEW raw_to_attributes_mv TO attributes +( + `AttributeKeys` Array(String), + `AttributeValues` Array(String) +) +AS SELECT + mapKeys(Attributes) AS AttributeKeys, + mapValues(Attributes) AS AttributeValues +FROM rawtable; + +INSERT INTO rawtable VALUES ({'key1': 'value1', 'key2': 'value2'}); +SELECT * FROM raw_to_attributes_mv ORDER BY AttributeKeys; + +DROP TABLE IF EXISTS rawtable; +DROP TABLE IF EXISTS raw_to_attributes_mv; +DROP TABLE IF EXISTS attributes; diff --git a/parser/testdata/03230_system_projections/ast.json b/parser/testdata/03230_system_projections/ast.json new file mode 100644 index 000000000..b65ee35a2 --- /dev/null +++ b/parser/testdata/03230_system_projections/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery projections (children 1)" + }, + { + "explain": " Identifier projections" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001383591, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/03230_system_projections/metadata.json b/parser/testdata/03230_system_projections/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03230_system_projections/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03230_system_projections/query.sql b/parser/testdata/03230_system_projections/query.sql new file mode 100644 index 000000000..37c1e5df8 --- /dev/null +++ b/parser/testdata/03230_system_projections/query.sql @@ -0,0 +1,39 @@ +DROP TABLE IF EXISTS projections; +DROP TABLE IF EXISTS projections_2; + +CREATE TABLE projections +( + key String, + d1 Int, + PROJECTION improved_sorting_key ( + SELECT * + ORDER BY d1, key + ) +) +Engine=MergeTree() +ORDER BY key; + +CREATE TABLE projections_2 +( + name String, + frequency UInt64, + PROJECTION agg ( + SELECT name, max(frequency) max_frequency + GROUP BY name + ), + PROJECTION agg_no_key ( + SELECT max(frequency) max_frequency + ) +) +Engine=MergeTree() +ORDER BY name; + +SELECT * FROM system.projections WHERE database = currentDatabase(); + +SELECT count(*) FROM system.projections WHERE table = 'projections' AND database = currentDatabase(); +SELECT count(*) FROM system.projections WHERE table = 'projections_2' AND database = currentDatabase(); + +SELECT name FROM system.projections WHERE type = 'Normal' AND database = currentDatabase(); + +DROP TABLE projections; +DROP TABLE projections_2; \ No newline at end of file diff --git a/parser/testdata/03231_alter_no_properties_before_remove_modify_reset/ast.json b/parser/testdata/03231_alter_no_properties_before_remove_modify_reset/ast.json new file mode 100644 index 000000000..7db59315d --- /dev/null +++ b/parser/testdata/03231_alter_no_properties_before_remove_modify_reset/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery a (children 1)" + }, + { + "explain": " Identifier a" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001776209, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/03231_alter_no_properties_before_remove_modify_reset/metadata.json b/parser/testdata/03231_alter_no_properties_before_remove_modify_reset/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03231_alter_no_properties_before_remove_modify_reset/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03231_alter_no_properties_before_remove_modify_reset/query.sql b/parser/testdata/03231_alter_no_properties_before_remove_modify_reset/query.sql new file mode 100644 index 000000000..692526cae --- /dev/null +++ b/parser/testdata/03231_alter_no_properties_before_remove_modify_reset/query.sql @@ -0,0 +1,151 @@ +DROP TABLE IF EXISTS a SYNC; +CREATE TABLE a (x Int64, y Int64 MATERIALIZED 1 SETTINGS (max_compress_block_size = 30000)) ENGINE = MergeTree ORDER BY x; + +-- In cases when the type is not present in column declaration, the parser interprets TTL/COLLATE/SETTINGS as a data type, +-- thus such queries doesn't throw syntax error on client side, just fails to parse. For server side validation these +-- queries still result in an exception of syntax error. Even though the exception is throw for a different reason, they +-- are good safe guards for the future where the parsing of such properties might change. +SELECT 'REMOVE'; +ALTER TABLE a MODIFY COLUMN y Int64 REMOVE MATERIALIZED; -- { clientError SYNTAX_ERROR } +ALTER TABLE a MODIFY COLUMN y DEFAULT 2 REMOVE MATERIALIZED; -- { clientError SYNTAX_ERROR } +ALTER TABLE a MODIFY COLUMN y MATERIALIZED 3 REMOVE MATERIALIZED; -- { clientError SYNTAX_ERROR } +ALTER TABLE a MODIFY COLUMN y EPHEMERAL 4 REMOVE MATERIALIZED; -- { clientError SYNTAX_ERROR } +ALTER TABLE a MODIFY COLUMN y COMMENT '5' REMOVE MATERIALIZED; -- { clientError SYNTAX_ERROR } +ALTER TABLE a MODIFY COLUMN y CODEC(ZSTD) REMOVE MATERIALIZED; -- { clientError SYNTAX_ERROR } +ALTER TABLE a MODIFY COLUMN y STATISTICS(tdigest) REMOVE MATERIALIZED; -- { clientError SYNTAX_ERROR } +ALTER TABLE a MODIFY COLUMN y PRIMARY KEY REMOVE MATERIALIZED; -- { clientError SYNTAX_ERROR } + +SELECT 'The same, but with type'; +ALTER TABLE a MODIFY COLUMN y Int64 DEFAULT 2 REMOVE MATERIALIZED; -- { clientError SYNTAX_ERROR } +ALTER TABLE a MODIFY COLUMN y Int64 MATERIALIZED 3 REMOVE MATERIALIZED; -- { clientError SYNTAX_ERROR } +ALTER TABLE a MODIFY COLUMN y Int64 EPHEMERAL 4 REMOVE MATERIALIZED; -- { clientError SYNTAX_ERROR } +ALTER TABLE a MODIFY COLUMN y Int64 COMMENT '5' REMOVE MATERIALIZED; -- { clientError SYNTAX_ERROR } +ALTER TABLE a MODIFY COLUMN y Int64 CODEC(ZSTD) REMOVE MATERIALIZED; -- { clientError SYNTAX_ERROR } +ALTER TABLE a MODIFY COLUMN y Int64 STATISTICS(tdigest) REMOVE MATERIALIZED; -- { clientError SYNTAX_ERROR } +ALTER TABLE a MODIFY COLUMN y Int64 TTL toDate('2025-01-01') + toIntervalDay(x) REMOVE MATERIALIZED; -- { clientError SYNTAX_ERROR } +ALTER TABLE a MODIFY COLUMN y Int64 COLLATE binary REMOVE MATERIALIZED; -- { clientError SYNTAX_ERROR } +ALTER TABLE a MODIFY COLUMN y Int64 SETTINGS (max_compress_block_size = 20000) REMOVE MATERIALIZED; -- { clientError SYNTAX_ERROR } +ALTER TABLE a MODIFY COLUMN y Int64 PRIMARY KEY REMOVE MATERIALIZED; -- { clientError SYNTAX_ERROR } + +SELECT 'MODIFY SETTING'; +ALTER TABLE a MODIFY COLUMN y Int64 MODIFY SETTING max_compress_block_size = 20000; -- { clientError SYNTAX_ERROR } +ALTER TABLE a MODIFY COLUMN y DEFAULT 2 MODIFY SETTING max_compress_block_size = 20000; -- { clientError SYNTAX_ERROR } +ALTER TABLE a MODIFY COLUMN y MATERIALIZED 3 MODIFY SETTING max_compress_block_size = 20000; -- { clientError SYNTAX_ERROR } +ALTER TABLE a MODIFY COLUMN y EPHEMERAL 4 MODIFY SETTING max_compress_block_size = 20000; -- { clientError SYNTAX_ERROR } +ALTER TABLE a MODIFY COLUMN y COMMENT '5' MODIFY SETTING max_compress_block_size = 20000; -- { clientError SYNTAX_ERROR } +ALTER TABLE a MODIFY COLUMN y CODEC(ZSTD) MODIFY SETTING max_compress_block_size = 20000; -- { clientError SYNTAX_ERROR } +ALTER TABLE a MODIFY COLUMN y STATISTICS(tdigest) MODIFY SETTING max_compress_block_size = 20000; -- { clientError SYNTAX_ERROR } +ALTER TABLE a MODIFY COLUMN y PRIMARY KEY MODIFY SETTING max_compress_block_size = 20000; -- { clientError SYNTAX_ERROR } + +SELECT 'The same, but with type'; +ALTER TABLE a MODIFY COLUMN y Int64 DEFAULT 2 MODIFY SETTING max_compress_block_size = 20000; -- { clientError SYNTAX_ERROR } +ALTER TABLE a MODIFY COLUMN y Int64 MATERIALIZED 3 MODIFY SETTING max_compress_block_size = 20000; -- { clientError SYNTAX_ERROR } +ALTER TABLE a MODIFY COLUMN y Int64 EPHEMERAL 4 MODIFY SETTING max_compress_block_size = 20000; -- { clientError SYNTAX_ERROR } +ALTER TABLE a MODIFY COLUMN y Int64 COMMENT '5' MODIFY SETTING max_compress_block_size = 20000; -- { clientError SYNTAX_ERROR } +ALTER TABLE a MODIFY COLUMN y Int64 CODEC(ZSTD) MODIFY SETTING max_compress_block_size = 20000; -- { clientError SYNTAX_ERROR } +ALTER TABLE a MODIFY COLUMN y Int64 STATISTICS(tdigest) MODIFY SETTING max_compress_block_size = 20000; -- { clientError SYNTAX_ERROR } +ALTER TABLE a MODIFY COLUMN y Int64 TTL toDate('2025-01-01') + toIntervalDay(x) MODIFY SETTING max_compress_block_size = 20000; -- { clientError SYNTAX_ERROR } +ALTER TABLE a MODIFY COLUMN y Int64 COLLATE binary MODIFY SETTING max_compress_block_size = 20000; -- { clientError SYNTAX_ERROR } +ALTER TABLE a MODIFY COLUMN y Int64 SETTINGS (some_setting = 2) MODIFY SETTING max_compress_block_size = 20000; -- { clientError SYNTAX_ERROR } +ALTER TABLE a MODIFY COLUMN y Int64 PRIMARY KEY MODIFY SETTING max_compress_block_size = 20000; -- { clientError SYNTAX_ERROR } + +SELECT 'RESET SETTING'; +ALTER TABLE a MODIFY COLUMN y Int64 RESET SETTING max_compress_block_size; -- { clientError SYNTAX_ERROR } +ALTER TABLE a MODIFY COLUMN y DEFAULT 2 RESET SETTING max_compress_block_size; -- { clientError SYNTAX_ERROR } +ALTER TABLE a MODIFY COLUMN y MATERIALIZED 3 RESET SETTING max_compress_block_size; -- { clientError SYNTAX_ERROR } +ALTER TABLE a MODIFY COLUMN y EPHEMERAL 4 RESET SETTING max_compress_block_size; -- { clientError SYNTAX_ERROR } +ALTER TABLE a MODIFY COLUMN y COMMENT '5' RESET SETTING max_compress_block_size; -- { clientError SYNTAX_ERROR } +ALTER TABLE a MODIFY COLUMN y CODEC(ZSTD) RESET SETTING max_compress_block_size; -- { clientError SYNTAX_ERROR } +ALTER TABLE a MODIFY COLUMN y STATISTICS(tdigest) RESET SETTING max_compress_block_size; -- { clientError SYNTAX_ERROR } +ALTER TABLE a MODIFY COLUMN y PRIMARY KEY RESET SETTING max_compress_block_size; -- { clientError SYNTAX_ERROR } + +SELECT 'The same, but with type'; +ALTER TABLE a MODIFY COLUMN y Int64 DEFAULT 2 RESET SETTING max_compress_block_size; -- { clientError SYNTAX_ERROR } +ALTER TABLE a MODIFY COLUMN y Int64 MATERIALIZED 3 RESET SETTING max_compress_block_size; -- { clientError SYNTAX_ERROR } +ALTER TABLE a MODIFY COLUMN y Int64 EPHEMERAL 4 RESET SETTING max_compress_block_size; -- { clientError SYNTAX_ERROR } +ALTER TABLE a MODIFY COLUMN y Int64 COMMENT '5' RESET SETTING max_compress_block_size; -- { clientError SYNTAX_ERROR } +ALTER TABLE a MODIFY COLUMN y Int64 CODEC(ZSTD) RESET SETTING max_compress_block_size; -- { clientError SYNTAX_ERROR } +ALTER TABLE a MODIFY COLUMN y Int64 STATISTICS(tdigest) RESET SETTING max_compress_block_size; -- { clientError SYNTAX_ERROR } +ALTER TABLE a MODIFY COLUMN y Int64 TTL toDate('2025-01-01') + toIntervalDay(x) RESET SETTING max_compress_block_size; -- { clientError SYNTAX_ERROR } +ALTER TABLE a MODIFY COLUMN y Int64 COLLATE binary RESET SETTING max_compress_block_size; -- { clientError SYNTAX_ERROR } +ALTER TABLE a MODIFY COLUMN y Int64 SETTINGS (some_setting = 2) RESET SETTING max_compress_block_size; -- { clientError SYNTAX_ERROR } +ALTER TABLE a MODIFY COLUMN y Int64 PRIMARY KEY RESET SETTING max_compress_block_size; -- { clientError SYNTAX_ERROR } + + + +SELECT 'All the above, but on server side'; + +SELECT 'REMOVE'; +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 REMOVE MATERIALIZED'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y DEFAULT 2 REMOVE MATERIALIZED'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y MATERIALIZED 3 REMOVE MATERIALIZED'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y EPHEMERAL 4 REMOVE MATERIALIZED'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y COMMENT \'5\' REMOVE MATERIALIZED'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y CODEC(ZSTD) REMOVE MATERIALIZED'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y STATISTICS(tdigest) REMOVE MATERIALIZED'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y TTL toDate(\'2025-01-01\') + toIntervalDay(x) REMOVE MATERIALIZED'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y COLLATE binary REMOVE MATERIALIZED'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y SETTINGS (max_compress_block_size = 20000) REMOVE MATERIALIZED'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y PRIMARY KEY REMOVE MATERIALIZED'); -- { serverError SYNTAX_ERROR } + +SELECT 'The same, but with type'; +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 DEFAULT 2 REMOVE MATERIALIZED'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 MATERIALIZED 3 REMOVE MATERIALIZED'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 EPHEMERAL 4 REMOVE MATERIALIZED'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 COMMENT \'5\' REMOVE MATERIALIZED'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 CODEC(ZSTD) REMOVE MATERIALIZED'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 STATISTICS(tdigest) REMOVE MATERIALIZED'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 TTL toDate(\'2025-01-01\') + toIntervalDay(x) REMOVE MATERIALIZED'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 COLLATE binary REMOVE MATERIALIZED'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 SETTINGS (max_compress_block_size = 20000) REMOVE MATERIALIZED'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 PRIMARY KEY REMOVE MATERIALIZED'); -- { serverError SYNTAX_ERROR } + +SELECT 'MODIFY SETTING'; +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 MODIFY SETTING max_compress_block_size = 20000'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y DEFAULT 2 MODIFY SETTING max_compress_block_size = 20000'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y MATERIALIZED 3 MODIFY SETTING max_compress_block_size = 20000'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y EPHEMERAL 4 MODIFY SETTING max_compress_block_size = 20000'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y COMMENT \'5\' MODIFY SETTING max_compress_block_size = 20000'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y CODEC(ZSTD) MODIFY SETTING max_compress_block_size = 20000'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y STATISTICS(tdigest) MODIFY SETTING max_compress_block_size = 20000'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y TTL toDate(\'2025-01-01\') + toIntervalDay(x) MODIFY SETTING max_compress_block_size = 20000'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y COLLATE binary MODIFY SETTING max_compress_block_size = 20000'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y SETTINGS (some_setting = 2) MODIFY SETTING max_compress_block_size = 20000'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y PRIMARY KEY MODIFY SETTING max_compress_block_size = 20000'); -- { serverError SYNTAX_ERROR } + +SELECT 'The same, but with type'; +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 DEFAULT 2 MODIFY SETTING max_compress_block_size = 20000'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 MATERIALIZED 3 MODIFY SETTING max_compress_block_size = 20000'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 EPHEMERAL 4 MODIFY SETTING max_compress_block_size = 20000'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 COMMENT \'5\' MODIFY SETTING max_compress_block_size = 20000'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 CODEC(ZSTD) MODIFY SETTING max_compress_block_size = 20000'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 STATISTICS(tdigest) MODIFY SETTING max_compress_block_size = 20000'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 TTL toDate(\'2025-01-01\') + toIntervalDay(x) MODIFY SETTING max_compress_block_size = 20000'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 COLLATE binary MODIFY SETTING max_compress_block_size = 20000'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 SETTINGS (some_setting = 2) MODIFY SETTING max_compress_block_size = 20000'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 PRIMARY KEY MODIFY SETTING max_compress_block_size = 20000'); -- { serverError SYNTAX_ERROR } + +SELECT 'RESET SETTING'; +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 RESET SETTING max_compress_block_size'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y DEFAULT 2 RESET SETTING max_compress_block_size'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y MATERIALIZED 3 RESET SETTING max_compress_block_size'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y EPHEMERAL 4 RESET SETTING max_compress_block_size'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y COMMENT \'5\' RESET SETTING max_compress_block_size'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y CODEC(ZSTD) RESET SETTING max_compress_block_size'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y STATISTICS(tdigest) RESET SETTING max_compress_block_size'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y TTL toDate(\'2025-01-01\') + toIntervalDay(x) RESET SETTING max_compress_block_size'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y COLLATE binary RESET SETTING max_compress_block_size'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y SETTINGS (some_setting = 2) RESET SETTING max_compress_block_size'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y PRIMARY KEY RESET SETTING max_compress_block_size'); -- { serverError SYNTAX_ERROR } + +SELECT 'The same, but with type'; +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 DEFAULT 2 RESET SETTING max_compress_block_size'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 MATERIALIZED 3 RESET SETTING max_compress_block_size'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 EPHEMERAL 4 RESET SETTING max_compress_block_size'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 COMMENT \'5\' RESET SETTING max_compress_block_size'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 CODEC(ZSTD) RESET SETTING max_compress_block_size'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 STATISTICS(tdigest) RESET SETTING max_compress_block_size'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 TTL toDate(\'2025-01-01\') + toIntervalDay(x) RESET SETTING max_compress_block_size'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 COLLATE binary RESET SETTING max_compress_block_size'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 SETTINGS (some_setting = 2) RESET SETTING max_compress_block_size'); -- { serverError SYNTAX_ERROR } +SELECT formatQuery('ALTER TABLE a MODIFY COLUMN y Int64 PRIMARY KEY RESET SETTING max_compress_block_size'); -- { serverError SYNTAX_ERROR } diff --git a/parser/testdata/03231_create_with_clone_as/ast.json b/parser/testdata/03231_create_with_clone_as/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03231_create_with_clone_as/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03231_create_with_clone_as/metadata.json b/parser/testdata/03231_create_with_clone_as/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03231_create_with_clone_as/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03231_create_with_clone_as/query.sql b/parser/testdata/03231_create_with_clone_as/query.sql new file mode 100644 index 000000000..7150a96ec --- /dev/null +++ b/parser/testdata/03231_create_with_clone_as/query.sql @@ -0,0 +1,121 @@ +-- Tags: no-replicated-database, memory-engine +-- Tag no-replicated-database: Unsupported type of CREATE TABLE ... CLONE AS ... query + +DROP TABLE IF EXISTS foo_memory; +DROP TABLE IF EXISTS clone_as_foo_memory; +DROP TABLE IF EXISTS foo_file; +DROP TABLE IF EXISTS clone_as_foo_file; +DROP TABLE IF EXISTS foo_merge_tree; +DROP TABLE IF EXISTS clone_as_foo_merge_tree; +DROP TABLE IF EXISTS clone_as_foo_merge_tree_p_x; +DROP TABLE IF EXISTS clone_as_foo_merge_tree_p_y; +DROP TABLE IF EXISTS foo_replacing_merge_tree; +DROP TABLE IF EXISTS clone_as_foo_replacing_merge_tree; +DROP TABLE IF EXISTS clone_as_foo_replacing_merge_tree_p_x; +DROP TABLE IF EXISTS clone_as_foo_replacing_merge_tree_p_y; +DROP TABLE IF EXISTS foo_replicated_merge_tree; +DROP TABLE IF EXISTS clone_as_foo_replicated_merge_tree; +DROP TABLE IF EXISTS clone_as_foo_replicated_merge_tree_p_x; +DROP TABLE IF EXISTS clone_as_foo_replicated_merge_tree_p_y; + +-- CLONE AS with a table of Memory engine +CREATE TABLE foo_memory (x Int8, y String) ENGINE=Memory; +SHOW CREATE TABLE foo_memory; +INSERT INTO foo_memory VALUES (1, 'a'), (2, 'b'); + +CREATE TABLE clone_as_foo_memory CLONE AS foo_memory; -- { serverError SUPPORT_IS_DISABLED } + +-- CLONE AS with a table of File engine +CREATE TABLE foo_file (x Int8, y String) ENGINE=File(TabSeparated); +SHOW CREATE TABLE foo_file; +INSERT INTO foo_file VALUES (1, 'a'), (2, 'b'); + +CREATE TABLE clone_as_foo_file CLONE AS foo_file; -- { serverError SUPPORT_IS_DISABLED } + +-- CLONE AS with a table of MergeTree engine +CREATE TABLE foo_merge_tree (x Int8, y String) ENGINE=MergeTree PRIMARY KEY x; +SHOW CREATE TABLE foo_merge_tree; +INSERT INTO foo_merge_tree VALUES (1, 'a'), (2, 'b'); +SELECT * FROM foo_merge_tree; + +CREATE TABLE clone_as_foo_merge_tree CLONE AS foo_merge_tree; +SHOW CREATE TABLE clone_as_foo_merge_tree; +SELECT 'from foo_merge_tree'; +SELECT * FROM foo_merge_tree; +SELECT 'from clone_as_foo_merge_tree'; +SELECT * FROM clone_as_foo_merge_tree; + +-- Specify ENGINE +CREATE TABLE clone_as_foo_merge_tree_p_x CLONE AS foo_merge_tree ENGINE=MergeTree PRIMARY KEY x; +SELECT 'from clone_as_foo_merge_tree_p_x'; +SELECT * FROM clone_as_foo_merge_tree_p_x; +CREATE TABLE clone_as_foo_merge_tree_p_y CLONE AS foo_merge_tree ENGINE=MergeTree PRIMARY KEY y; -- { serverError BAD_ARGUMENTS } + +-- CLONE AS with a table of ReplacingMergeTree engine +CREATE TABLE foo_replacing_merge_tree (x Int8, y String) ENGINE=ReplacingMergeTree PRIMARY KEY x; +SHOW CREATE TABLE foo_replacing_merge_tree; +INSERT INTO foo_replacing_merge_tree VALUES (1, 'a'), (2, 'b'); +SELECT * FROM foo_replacing_merge_tree; + +CREATE TABLE clone_as_foo_replacing_merge_tree CLONE AS foo_replacing_merge_tree; +SHOW CREATE TABLE clone_as_foo_replacing_merge_tree; +SELECT 'from foo_replacing_merge_tree'; +SELECT * FROM foo_replacing_merge_tree; +SELECT 'from clone_as_foo_replacing_merge_tree'; +SELECT * FROM clone_as_foo_replacing_merge_tree; + +-- Specify ENGINE +CREATE TABLE clone_as_foo_replacing_merge_tree_p_x CLONE AS foo_replacing_merge_tree ENGINE=ReplacingMergeTree PRIMARY KEY x; +SELECT 'from clone_as_foo_replacing_merge_tree_p_x'; +SELECT * FROM clone_as_foo_replacing_merge_tree_p_x; +CREATE TABLE clone_as_foo_replacing_merge_tree_p_y CLONE AS foo_replacing_merge_tree ENGINE=ReplacingMergeTree PRIMARY KEY y; -- { serverError BAD_ARGUMENTS } + +-- CLONE AS with a table of ReplicatedMergeTree engine +CREATE TABLE foo_replicated_merge_tree (x Int8, y String) ENGINE=ReplicatedMergeTree('/clickhouse/tables/{database}/test_foo_replicated_merge_tree', 'r1') PRIMARY KEY x; +SHOW CREATE TABLE foo_replicated_merge_tree; +INSERT INTO foo_replicated_merge_tree VALUES (1, 'a'), (2, 'b'); +SELECT 'from foo_replicated_merge_tree'; +SELECT * FROM foo_replicated_merge_tree; + +CREATE TABLE clone_as_foo_replicated_merge_tree CLONE AS foo_replicated_merge_tree; -- { serverError REPLICA_ALREADY_EXISTS } + +-- Specify ENGINE +CREATE TABLE clone_as_foo_replicated_merge_tree_p_x CLONE AS foo_replicated_merge_tree ENGINE=ReplicatedMergeTree('/clickhouse/tables/{database}/clone_as_foo_replicated_merge_tree_p_x', 'r1') PRIMARY KEY x; +SHOW CREATE TABLE clone_as_foo_replicated_merge_tree_p_x; +SELECT 'from clone_as_foo_replicated_merge_tree_p_x'; +SELECT * FROM foo_replicated_merge_tree; +CREATE TABLE clone_as_foo_replicated_merge_tree_p_y CLONE AS foo_replicated_merge_tree ENGINE=ReplicatedMergeTree('/clickhouse/tables/{database}/clone_as_foo_replicated_merge_tree_p_y', 'r1') PRIMARY KEY y; -- { serverError BAD_ARGUMENTS } + +DROP TABLE IF EXISTS foo_memory; +DROP TABLE IF EXISTS clone_as_foo_memory; +DROP TABLE IF EXISTS foo_file; +DROP TABLE IF EXISTS clone_as_foo_file; +DROP TABLE IF EXISTS foo_merge_tree; +DROP TABLE IF EXISTS clone_as_foo_merge_tree; +DROP TABLE IF EXISTS clone_as_foo_merge_tree_p_x; +DROP TABLE IF EXISTS clone_as_foo_merge_tree_p_y; +DROP TABLE IF EXISTS foo_replacing_merge_tree; +DROP TABLE IF EXISTS clone_as_foo_replacing_merge_tree; +DROP TABLE IF EXISTS clone_as_foo_replacing_merge_tree_p_x; +DROP TABLE IF EXISTS clone_as_foo_replacing_merge_tree_p_y; +DROP TABLE IF EXISTS foo_replicated_merge_tree; +DROP TABLE IF EXISTS clone_as_foo_replicated_merge_tree; +DROP TABLE IF EXISTS clone_as_foo_replicated_merge_tree_p_x; +DROP TABLE IF EXISTS clone_as_foo_replicated_merge_tree_p_y; + +-- CLONE AS with a Replicated database +DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE_1:Identifier}; + +CREATE DATABASE {CLICKHOUSE_DATABASE_1:Identifier} ENGINE = Replicated('/test/databases/{database}/test_03231', 's1', 'r1'); +USE {CLICKHOUSE_DATABASE_1:Identifier}; + +CREATE TABLE foo_merge_tree (x Int8, y String) ENGINE=MergeTree PRIMARY KEY x; +SHOW CREATE TABLE foo_merge_tree; +INSERT INTO foo_merge_tree VALUES (1, 'a'), (2, 'b'); +SELECT 'from foo_merge_tree'; +SELECT * FROM foo_merge_tree; +CREATE TABLE clone_as_foo_merge_tree CLONE AS foo_merge_tree; -- { serverError SUPPORT_IS_DISABLED } + +DROP TABLE IF EXISTS clone_as_foo_merge_tree; +DROP TABLE IF EXISTS foo_merge_tree; +DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE_1:Identifier}; diff --git a/parser/testdata/03231_csv_dont_infer_bool_from_string/ast.json b/parser/testdata/03231_csv_dont_infer_bool_from_string/ast.json new file mode 100644 index 000000000..c53106b09 --- /dev/null +++ b/parser/testdata/03231_csv_dont_infer_bool_from_string/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001466001, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03231_csv_dont_infer_bool_from_string/metadata.json b/parser/testdata/03231_csv_dont_infer_bool_from_string/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03231_csv_dont_infer_bool_from_string/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03231_csv_dont_infer_bool_from_string/query.sql b/parser/testdata/03231_csv_dont_infer_bool_from_string/query.sql new file mode 100644 index 000000000..e3cf77249 --- /dev/null +++ b/parser/testdata/03231_csv_dont_infer_bool_from_string/query.sql @@ -0,0 +1,4 @@ +set input_format_csv_try_infer_numbers_from_strings = 1; +desc format(CSV, '"42","42.42","True"'); +select * from format(CSV, '"42","42.42","True"'); + diff --git a/parser/testdata/03231_dynamic_incomplete_type_insert_bug/ast.json b/parser/testdata/03231_dynamic_incomplete_type_insert_bug/ast.json new file mode 100644 index 000000000..2790fab27 --- /dev/null +++ b/parser/testdata/03231_dynamic_incomplete_type_insert_bug/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001757397, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03231_dynamic_incomplete_type_insert_bug/metadata.json b/parser/testdata/03231_dynamic_incomplete_type_insert_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03231_dynamic_incomplete_type_insert_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03231_dynamic_incomplete_type_insert_bug/query.sql b/parser/testdata/03231_dynamic_incomplete_type_insert_bug/query.sql new file mode 100644 index 000000000..4e845a665 --- /dev/null +++ b/parser/testdata/03231_dynamic_incomplete_type_insert_bug/query.sql @@ -0,0 +1,12 @@ +SET allow_experimental_dynamic_type = 1; +SET allow_suspicious_types_in_order_by = 1; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (c0 Array(Dynamic)) ENGINE = MergeTree() ORDER BY tuple(); +INSERT INTO t1 (c0) VALUES ([]); +INSERT INTO t1 (c0) VALUES ([[]]), (['had', 1]); +INSERT INTO t1 (c0) VALUES ([['saw']]); +INSERT INTO t1 (c0) VALUES ([]); +OPTIMIZE TABLE t1 final; +SELECT * FROM t1 ORDER BY ALL; +DROP TABLE t1; + diff --git a/parser/testdata/03231_dynamic_uniq_group_by/ast.json b/parser/testdata/03231_dynamic_uniq_group_by/ast.json new file mode 100644 index 000000000..20b17ec4a --- /dev/null +++ b/parser/testdata/03231_dynamic_uniq_group_by/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001805843, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03231_dynamic_uniq_group_by/metadata.json b/parser/testdata/03231_dynamic_uniq_group_by/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03231_dynamic_uniq_group_by/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03231_dynamic_uniq_group_by/query.sql b/parser/testdata/03231_dynamic_uniq_group_by/query.sql new file mode 100644 index 000000000..d8869e714 --- /dev/null +++ b/parser/testdata/03231_dynamic_uniq_group_by/query.sql @@ -0,0 +1,17 @@ +set allow_experimental_dynamic_type = 1; +set allow_suspicious_types_in_group_by = 1; +set allow_suspicious_types_in_order_by = 1; +drop table if exists test; +create table test (d Dynamic(max_types=2)) engine=Memory; +insert into test values (42), ('Hello'), ([1,2,3]), ('2020-01-01'); +insert into test values ('Hello'), ([1,2,3]), ('2020-01-01'), (42); +insert into test values ([1,2,3]), ('2020-01-01'), (42), ('Hello'); +insert into test values ('2020-01-01'), (42), ('Hello'), ([1,2,3]); +insert into test values (42); +insert into test values ('Hello'); +insert into test values ([1,2,3]); +insert into test values ('2020-01-01'); + +select uniqExact(d) from test; +select count(), d from test group by d order by d; +drop table test; diff --git a/parser/testdata/03231_dynamic_variant_in_order_by_group_by/ast.json b/parser/testdata/03231_dynamic_variant_in_order_by_group_by/ast.json new file mode 100644 index 000000000..55363c530 --- /dev/null +++ b/parser/testdata/03231_dynamic_variant_in_order_by_group_by/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001250268, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03231_dynamic_variant_in_order_by_group_by/metadata.json b/parser/testdata/03231_dynamic_variant_in_order_by_group_by/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03231_dynamic_variant_in_order_by_group_by/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03231_dynamic_variant_in_order_by_group_by/query.sql b/parser/testdata/03231_dynamic_variant_in_order_by_group_by/query.sql new file mode 100644 index 000000000..dc9171ab6 --- /dev/null +++ b/parser/testdata/03231_dynamic_variant_in_order_by_group_by/query.sql @@ -0,0 +1,166 @@ +set allow_experimental_variant_type=1; +set allow_experimental_dynamic_type=1; + +drop table if exists test; + +create table test (d Dynamic) engine=MergeTree order by d; -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Dynamic) engine=MergeTree order by tuple(d); -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Dynamic) engine=MergeTree order by array(d); -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Dynamic) engine=MergeTree order by map('str', d); -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Dynamic) engine=MergeTree order by tuple() primary key d; -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Dynamic) engine=MergeTree order by tuple() partition by d; -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Dynamic) engine=MergeTree order by tuple() partition by tuple(d); -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Dynamic) engine=MergeTree order by tuple() partition by array(d); -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Dynamic) engine=MergeTree order by tuple() partition by map('str', d); -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} + +create table test (d Variant(UInt64)) engine=MergeTree order by d; -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Variant(UInt64)) engine=MergeTree order by tuple(d); -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Variant(UInt64)) engine=MergeTree order by array(d); -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Variant(UInt64)) engine=MergeTree order by map('str', d); -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Variant(UInt64)) engine=MergeTree order by tuple() primary key d; -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Variant(UInt64)) engine=MergeTree order by tuple() partition by d; -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Variant(UInt64)) engine=MergeTree order by tuple() partition by tuple(d); -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Variant(UInt64)) engine=MergeTree order by tuple() partition by array(d); -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} +create table test (d Variant(UInt64)) engine=MergeTree order by tuple() partition by map('str', d); -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY} + +create table test (d Dynamic) engine=Memory; +insert into test select * from numbers(5); + +SET enable_analyzer=1; + +set allow_suspicious_types_in_group_by=1; +set allow_suspicious_types_in_order_by=0; + +select * from test order by d; -- {serverError ILLEGAL_COLUMN} +select * from test order by tuple(d); -- {serverError ILLEGAL_COLUMN} +select * from test order by array(d); -- {serverError ILLEGAL_COLUMN} +select * from test order by map('str', d); -- {serverError ILLEGAL_COLUMN} + +set allow_suspicious_types_in_group_by=0; +set allow_suspicious_types_in_order_by=1; + +select * from test group by d; -- {serverError ILLEGAL_COLUMN} +select * from test group by tuple(d); -- {serverError ILLEGAL_COLUMN} +select array(d) from test group by array(d); -- {serverError ILLEGAL_COLUMN} +select map('str', d) from test group by map('str', d); -- {serverError ILLEGAL_COLUMN} +select * from test group by grouping sets ((d), ('str')); -- {serverError ILLEGAL_COLUMN} + +set allow_suspicious_types_in_group_by=1; +set allow_suspicious_types_in_order_by=1; + +select * from test order by d; +select * from test order by tuple(d); +select * from test order by array(d); +select * from test order by map('str', d); + +select * from test group by d order by all; +select * from test group by tuple(d) order by all; +select array(d) from test group by array(d) order by all; +select map('str', d) from test group by map('str', d) order by all; +select * from test group by grouping sets ((d), ('str')) order by all; + +SET enable_analyzer=0; + +set allow_suspicious_types_in_group_by=1; +set allow_suspicious_types_in_order_by=0; + +select * from test order by d; -- {serverError ILLEGAL_COLUMN} +select * from test order by tuple(d); -- {serverError ILLEGAL_COLUMN} +select * from test order by array(d); -- {serverError ILLEGAL_COLUMN} +select * from test order by map('str', d); -- {serverError ILLEGAL_COLUMN} + +set allow_suspicious_types_in_group_by=0; +set allow_suspicious_types_in_order_by=1; + +select * from test group by d; -- {serverError ILLEGAL_COLUMN} +select * from test group by tuple(d); -- {serverError ILLEGAL_COLUMN} +select array(d) from test group by array(d); -- {serverError ILLEGAL_COLUMN} +select map('str', d) from test group by map('str', d); -- {serverError ILLEGAL_COLUMN} +select * from test group by grouping sets ((d), ('str')); -- {serverError ILLEGAL_COLUMN} + +set allow_suspicious_types_in_group_by=1; +set allow_suspicious_types_in_order_by=1; + +select * from test order by d; +select * from test order by tuple(d); +select * from test order by array(d); +select * from test order by map('str', d); + +select * from test group by d order by all; +select * from test group by tuple(d) order by all; +select array(d) from test group by array(d) order by all; +select map('str', d) from test group by map('str', d) order by all; +select * from test group by grouping sets ((d), ('str')) order by all; + +drop table test; + +create table test (d Variant(UInt64)) engine=Memory; +insert into test select * from numbers(5); + +SET enable_analyzer=1; + +set allow_suspicious_types_in_group_by=1; +set allow_suspicious_types_in_order_by=0; + +select * from test order by d; -- {serverError ILLEGAL_COLUMN} +select * from test order by tuple(d); -- {serverError ILLEGAL_COLUMN} +select * from test order by array(d); -- {serverError ILLEGAL_COLUMN} +select * from test order by map('str', d); -- {serverError ILLEGAL_COLUMN} + +set allow_suspicious_types_in_group_by=0; +set allow_suspicious_types_in_order_by=1; + +select * from test group by d; -- {serverError ILLEGAL_COLUMN} +select * from test group by tuple(d); -- {serverError ILLEGAL_COLUMN} +select array(d) from test group by array(d); -- {serverError ILLEGAL_COLUMN} +select map('str', d) from test group by map('str', d); -- {serverError ILLEGAL_COLUMN} +select * from test group by grouping sets ((d), ('str')); -- {serverError ILLEGAL_COLUMN} + +set allow_suspicious_types_in_group_by=1; +set allow_suspicious_types_in_order_by=1; + +select * from test order by d; +select * from test order by tuple(d); +select * from test order by array(d); +select * from test order by map('str', d); + +select * from test group by d order by all; +select * from test group by tuple(d) order by all; +select array(d) from test group by array(d) order by all; +select map('str', d) from test group by map('str', d) order by all; +select * from test group by grouping sets ((d), ('str')) order by all; + +SET enable_analyzer=0; + +set allow_suspicious_types_in_group_by=1; +set allow_suspicious_types_in_order_by=0; + +select * from test order by d; -- {serverError ILLEGAL_COLUMN} +select * from test order by tuple(d); -- {serverError ILLEGAL_COLUMN} +select * from test order by array(d); -- {serverError ILLEGAL_COLUMN} +select * from test order by map('str', d); -- {serverError ILLEGAL_COLUMN} + +set allow_suspicious_types_in_group_by=0; +set allow_suspicious_types_in_order_by=1; + +select * from test group by d; -- {serverError ILLEGAL_COLUMN} +select * from test group by tuple(d); -- {serverError ILLEGAL_COLUMN} +select array(d) from test group by array(d); -- {serverError ILLEGAL_COLUMN} +select map('str', d) from test group by map('str', d); -- {serverError ILLEGAL_COLUMN} +select * from test group by grouping sets ((d), ('str')); -- {serverError ILLEGAL_COLUMN} + +set allow_suspicious_types_in_group_by=1; +set allow_suspicious_types_in_order_by=1; + +select * from test order by d; +select * from test order by tuple(d); +select * from test order by array(d); +select * from test order by map('str', d); + +select * from test group by d order by all; +select * from test group by tuple(d) order by all; +select array(d) from test group by array(d) order by all; +select map('str', d) from test group by map('str', d) order by all; +select * from test group by grouping sets ((d), ('str')) order by all; + +drop table test; diff --git a/parser/testdata/03231_pr_duplicate_announcement/ast.json b/parser/testdata/03231_pr_duplicate_announcement/ast.json new file mode 100644 index 000000000..607144eee --- /dev/null +++ b/parser/testdata/03231_pr_duplicate_announcement/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_table (children 1)" + }, + { + "explain": " Identifier test_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001123823, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/03231_pr_duplicate_announcement/metadata.json b/parser/testdata/03231_pr_duplicate_announcement/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03231_pr_duplicate_announcement/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03231_pr_duplicate_announcement/query.sql b/parser/testdata/03231_pr_duplicate_announcement/query.sql new file mode 100644 index 000000000..cd1fc277f --- /dev/null +++ b/parser/testdata/03231_pr_duplicate_announcement/query.sql @@ -0,0 +1,23 @@ +DROP TABLE IF EXISTS test_table SYNC; +CREATE TABLE test_table +( + id UInt64, + value String +) ENGINE=ReplicatedMergeTree('/clickhouse/test/{database}/test_table', 'r1') ORDER BY tuple(); + +INSERT INTO test_table VALUES (0, 'Value_0'), (1, 'Value_1'), (2, 'Value_2'); + +DROP TABLE IF EXISTS test_table_for_in SYNC; +CREATE TABLE test_table_for_in +( + id UInt64 +) ENGINE=ReplicatedMergeTree('/clickhouse/test/{database}/test_table_for_in', 'r1') ORDER BY tuple(); + +INSERT INTO test_table_for_in VALUES (0), (1); + +SET allow_experimental_parallel_reading_from_replicas=1, max_parallel_replicas=3, cluster_for_parallel_replicas='test_cluster_one_shard_three_replicas_localhost'; + +SELECT id, value FROM test_table WHERE id IN (SELECT id FROM test_table_for_in UNION DISTINCT SELECT id FROM test_table_for_in); + +DROP TABLE test_table SYNC; +DROP TABLE test_table_for_in SYNC; diff --git a/parser/testdata/03231_pr_duplicate_announcement_2/ast.json b/parser/testdata/03231_pr_duplicate_announcement_2/ast.json new file mode 100644 index 000000000..ce261dbdc --- /dev/null +++ b/parser/testdata/03231_pr_duplicate_announcement_2/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery ANIMAL (children 1)" + }, + { + "explain": " Identifier ANIMAL" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00114418, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/03231_pr_duplicate_announcement_2/metadata.json b/parser/testdata/03231_pr_duplicate_announcement_2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03231_pr_duplicate_announcement_2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03231_pr_duplicate_announcement_2/query.sql b/parser/testdata/03231_pr_duplicate_announcement_2/query.sql new file mode 100644 index 000000000..eec1443ad --- /dev/null +++ b/parser/testdata/03231_pr_duplicate_announcement_2/query.sql @@ -0,0 +1,45 @@ +DROP TABLE IF EXISTS ANIMAL SYNC; + +CREATE TABLE ANIMAL ( ANIMAL Nullable(String) ) ENGINE = ReplicatedMergeTree('/clickhouse/test/{database}/animal', 'r1') ORDER BY tuple(); + +INSERT INTO ANIMAL (ANIMAL) VALUES ('CAT'), ('FISH'), ('DOG'), ('HORSE'), ('BIRD'); + +SET joined_subquery_requires_alias = 0; +SET allow_experimental_parallel_reading_from_replicas=1, max_parallel_replicas=3, cluster_for_parallel_replicas='test_cluster_one_shard_three_replicas_localhost'; + +SELECT * +FROM +( + SELECT + x.b AS x, + countDistinct(x.c) AS ANIMAL + FROM + ( + SELECT + a.ANIMAL AS a, + 'CAT' AS b, + c.ANIMAL AS c, + d.ANIMAL AS d + FROM ANIMAL AS a + INNER JOIN ANIMAL AS b ON a.ANIMAL = b.ANIMAL + LEFT JOIN ANIMAL AS c ON b.ANIMAL = c.ANIMAL + RIGHT JOIN + ( + SELECT * + FROM ANIMAL + UNION ALL + SELECT * + FROM ANIMAL + UNION ALL + SELECT * + FROM ANIMAL + ) AS d ON a.ANIMAL = d.ANIMAL + WHERE (d.ANIMAL != 'CAT') AND (c.ANIMAL != 'DOG') AND (b.ANIMAL != 'FISH') + ) AS x + WHERE x.b >= 'CAT' + GROUP BY x.b + HAVING ANIMAL >= 0 +) AS ANIMAL +WHERE ANIMAL.ANIMAL >= 0; + +DROP TABLE ANIMAL SYNC; diff --git a/parser/testdata/03231_pr_reverse_in_order/ast.json b/parser/testdata/03231_pr_reverse_in_order/ast.json new file mode 100644 index 000000000..47bac8ef9 --- /dev/null +++ b/parser/testdata/03231_pr_reverse_in_order/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery checks (children 1)" + }, + { + "explain": " Identifier checks" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000945211, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/03231_pr_reverse_in_order/metadata.json b/parser/testdata/03231_pr_reverse_in_order/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03231_pr_reverse_in_order/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03231_pr_reverse_in_order/query.sql b/parser/testdata/03231_pr_reverse_in_order/query.sql new file mode 100644 index 000000000..f5a4db448 --- /dev/null +++ b/parser/testdata/03231_pr_reverse_in_order/query.sql @@ -0,0 +1,49 @@ +DROP TABLE IF EXISTS checks SYNC; + +CREATE TABLE checks +( + `check_name` LowCardinality(String), + `check_status` LowCardinality(String), + `check_start_time` DateTime, + `test_name` LowCardinality(String), + `test_status` LowCardinality(String), +) +ENGINE = ReplicatedMergeTree('/clickhouse/{database}/checks', '{replica}') +ORDER BY check_start_time; + +SYSTEM STOP MERGES checks; + +INSERT INTO checks SELECT 'asan', if(number % 2, 'success', 'fail'), toDateTime('2024-06-07 00:00:01') + INTERVAL number HOUR, '02221_parallel_replicas_bug', 'FAIL' from numbers(100); +INSERT INTO checks SELECT 'asan', if(number % 2, 'success', 'fail'), toDateTime('2024-06-06 00:00:02') + INTERVAL number HOUR, '02221_parallel_replicas_bug', 'FAIL' from numbers(100); +INSERT INTO checks SELECT 'asan', if(number % 2, 'success', 'fail'), toDateTime('2024-06-05 00:00:03') + INTERVAL number HOUR, '02221_parallel_replicas_bug', 'FAIL' from numbers(100); + +SELECT trimBoth(explain) +FROM +( + EXPLAIN actions=1 SELECT + check_start_time, + check_name, + test_name, + test_status, + check_status + FROM checks + WHERE 1 AND (test_status != 'SKIPPED') AND (test_status != 'OK') AND (check_status != 'success') AND (test_name ILIKE '%parallel_replicas%') + ORDER BY + check_start_time DESC, + check_name ASC, + test_name ASC + SETTINGS query_plan_read_in_order = 1, optimize_read_in_order = 1, max_parallel_replicas = 1 +) +WHERE explain LIKE '%InReverseOrder%'; + +SELECT check_start_time, check_name, test_name, test_status, check_status +FROM checks +WHERE 1 + AND test_status != 'SKIPPED' + AND test_status != 'OK' + AND check_status != 'success' + AND test_name ilike '%parallel_replicas%' +ORDER BY check_start_time desc, check_name, test_name +SETTINGS query_plan_read_in_order = 1, optimize_read_in_order = 1, allow_experimental_parallel_reading_from_replicas = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', max_parallel_replicas = 3; + +DROP TABLE checks SYNC; diff --git a/parser/testdata/03231_prewhere_conditions_order/ast.json b/parser/testdata/03231_prewhere_conditions_order/ast.json new file mode 100644 index 000000000..8679a7d1d --- /dev/null +++ b/parser/testdata/03231_prewhere_conditions_order/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001392963, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/03231_prewhere_conditions_order/metadata.json b/parser/testdata/03231_prewhere_conditions_order/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03231_prewhere_conditions_order/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03231_prewhere_conditions_order/query.sql b/parser/testdata/03231_prewhere_conditions_order/query.sql new file mode 100644 index 000000000..6df5b1392 --- /dev/null +++ b/parser/testdata/03231_prewhere_conditions_order/query.sql @@ -0,0 +1,6 @@ +drop table if exists test; +create table test (x UInt32, arr1 Array(UInt32), arr2 Array(UInt32)) engine=MergeTree order by x; +insert into test values (1, [0, 1], [0, 1]), (2, [0], [0, 1]); +select * from test where x == 1 and arrayExists((x1, x2) -> (x1 == x2), arr1, arr2) settings allow_reorder_prewhere_conditions = 0; +drop table test; + diff --git a/parser/testdata/03231_values_respect_format_settings_in_fields_conversion/ast.json b/parser/testdata/03231_values_respect_format_settings_in_fields_conversion/ast.json new file mode 100644 index 000000000..6b4dc6438 --- /dev/null +++ b/parser/testdata/03231_values_respect_format_settings_in_fields_conversion/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001030135, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/03231_values_respect_format_settings_in_fields_conversion/metadata.json b/parser/testdata/03231_values_respect_format_settings_in_fields_conversion/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03231_values_respect_format_settings_in_fields_conversion/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03231_values_respect_format_settings_in_fields_conversion/query.sql b/parser/testdata/03231_values_respect_format_settings_in_fields_conversion/query.sql new file mode 100644 index 000000000..484a16bb2 --- /dev/null +++ b/parser/testdata/03231_values_respect_format_settings_in_fields_conversion/query.sql @@ -0,0 +1,7 @@ +drop table if exists test; +create table test (map Map(String, DateTime)) engine=Memory; +set date_time_input_format='best_effort'; +insert into test values (map('Hello', '01/01/2020')); +select * from test; +drop table test; + diff --git a/parser/testdata/03232_json_uniq_group_by/ast.json b/parser/testdata/03232_json_uniq_group_by/ast.json new file mode 100644 index 000000000..5b247bd16 --- /dev/null +++ b/parser/testdata/03232_json_uniq_group_by/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001577219, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03232_json_uniq_group_by/metadata.json b/parser/testdata/03232_json_uniq_group_by/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03232_json_uniq_group_by/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03232_json_uniq_group_by/query.sql b/parser/testdata/03232_json_uniq_group_by/query.sql new file mode 100644 index 000000000..329b2db12 --- /dev/null +++ b/parser/testdata/03232_json_uniq_group_by/query.sql @@ -0,0 +1,39 @@ +SET enable_json_type = 1; +drop table if exists test; +create table test (json JSON(a UInt32, max_dynamic_paths=2)) engine=Memory; +insert into test values ('{"a" : 42, "b" : "Hello", "c" : [1, 2, 3], "d" : "2020-01-01", "e" : [{"f" : 42}]}'); +insert into test values ('{"b" : "Hello", "c" : [1, 2, 3], "d" : "2020-01-01", "e" : [{"f" : 42}], "a" : 42}'); +insert into test values ('{"c" : [1, 2, 3], "d" : "2020-01-01", "e" : [{"f" : 42}], "a" : 42, "b" : "Hello"}'); +insert into test values ('{"d" : "2020-01-01", "e" : [{"f" : 42}], "a" : 42, "b" : "Hello", "c" : [1, 2, 3]}'); +insert into test values ('{"e" : [{"f" : 42}], "a" : 42, "b" : "Hello", "c" : [1, 2, 3], "d" : "2020-01-01"}'); +insert into test values ('{"a" : 42}'), ('{"b" : "Hello"}'), ('{"c" : [1, 2, 3]}'), ('{"d" : "2020-01-01"}'), ('{"e" : [{"f" : 42}]}'); +insert into test values ('{"b" : "Hello"}'), ('{"c" : [1, 2, 3]}'), ('{"d" : "2020-01-01"}'), ('{"e" : [{"f" : 42}]}'), ('{"a" : 42}'); +insert into test values ('{"c" : [1, 2, 3]}'), ('{"d" : "2020-01-01"}'), ('{"e" : [{"f" : 42}]}'), ('{"a" : 42}'), ('{"b" : "Hello"}'); +insert into test values ('{"d" : "2020-01-01"}'), ('{"e" : [{"f" : 42}]}'), ('{"a" : 42}'), ('{"b" : "Hello"}'), ('{"c" : [1, 2, 3]}'); +insert into test values ('{"e" : [{"f" : 42}]}'), ('{"a" : 42}'), ('{"b" : "Hello"}'), ('{"c" : [1, 2, 3]}'), ('{"d" : "2020-01-01"}'); +insert into test values ('{"a" : 42}'); +insert into test values ('{"b" : "Hello"}'); +insert into test values ('{"c" : [1, 2, 3]}'); +insert into test values ('{"d" : "2020-01-01"}'); +insert into test values ('{"e" : [{"f" : 42}]}'); + +insert into test values ('{"a" : 42, "c" : "Hello", "d" : [1, 2, 3], "e" : "2020-01-01", "b" : [{"f" : 42}]}'); +insert into test values ('{"c" : "Hello", "d" : [1, 2, 3], "e" : "2020-01-01", "b" : [{"f" : 42}], "a" : 42}'); +insert into test values ('{"d" : [1, 2, 3], "e" : "2020-01-01", "b" : [{"f" : 42}], "a" : 42, "c" : "Hello"}'); +insert into test values ('{"e" : "2020-01-01", "b" : [{"f" : 42}], "a" : 42, "c" : "Hello", "d" : [1, 2, 3]}'); +insert into test values ('{"b" : [{"f" : 42}], "a" : 42, "c" : "Hello", "d" : [1, 2, 3], "e" : "2020-01-01"}'); +insert into test values ('{"a" : 42}'), ('{"c" : "Hello"}'), ('{"d" : [1, 2, 3]}'), ('{"e" : "2020-01-01"}'), ('{"b" : [{"f" : 42}]}'); +insert into test values ('{"c" : "Hello"}'), ('{"d" : [1, 2, 3]}'), ('{"e" : "2020-01-01"}'), ('{"b" : [{"f" : 42}]}'), ('{"a" : 42}'); +insert into test values ('{"d" : [1, 2, 3]}'), ('{"e" : "2020-01-01"}'), ('{"b" : [{"f" : 42}]}'), ('{"a" : 42}'), ('{"c" : "Hello"}'); +insert into test values ('{"e" : "2020-01-01"}'), ('{"b" : [{"f" : 42}]}'), ('{"a" : 42}'), ('{"c" : "Hello"}'), ('{"d" : [1, 2, 3]}'); +insert into test values ('{"b" : [{"f" : 42}]}'), ('{"a" : 42}'), ('{"c" : "Hello"}'), ('{"d" : [1, 2, 3]}'), ('{"e" : "2020-01-01"}'); +insert into test values ('{"a" : 42}'); +insert into test values ('{"c" : "Hello"}'); +insert into test values ('{"d" : [1, 2, 3]}'); +insert into test values ('{"e" : "2020-01-01"}'); +insert into test values ('{"b" : [{"f" : 42}]}'); + +select uniqExact(json) from test; +select count(), json from test group by json order by toString(json); + +drop table test; diff --git a/parser/testdata/03232_pr_not_ready_set/ast.json b/parser/testdata/03232_pr_not_ready_set/ast.json new file mode 100644 index 000000000..4839145fa --- /dev/null +++ b/parser/testdata/03232_pr_not_ready_set/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SYSTEM query" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001114828, + "rows_read": 1, + "bytes_read": 20 + } +} diff --git a/parser/testdata/03232_pr_not_ready_set/metadata.json b/parser/testdata/03232_pr_not_ready_set/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03232_pr_not_ready_set/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03232_pr_not_ready_set/query.sql b/parser/testdata/03232_pr_not_ready_set/query.sql new file mode 100644 index 000000000..371eb2c5d --- /dev/null +++ b/parser/testdata/03232_pr_not_ready_set/query.sql @@ -0,0 +1,19 @@ +SYSTEM FLUSH LOGS query_log; +SELECT + is_initial_query, + count() AS c, + replaceRegexpAll(query, '_data_(\\d+)_(\\d+)', '_data_') AS query +FROM system.query_log +WHERE (event_date >= yesterday()) AND (type = 'QueryFinish') AND (ignore(54, 0, ignore('QueryFinish', 11, toLowCardinality(toLowCardinality(11)), 11, 11, 11), 'QueryFinish', materialize(11), toUInt128(11)) IN ( + SELECT query_id + FROM system.query_log + WHERE (current_database = currentDatabase()) AND (event_date >= yesterday()) AND (type = 'QueryFinish') AND (query LIKE '-- Parallel inner query alone%') +)) +GROUP BY + is_initial_query, + query +ORDER BY + is_initial_query ASC, + c ASC, + query ASC +SETTINGS allow_experimental_parallel_reading_from_replicas=1, max_parallel_replicas=3, cluster_for_parallel_replicas='test_cluster_one_shard_three_replicas_localhost', parallel_replicas_for_non_replicated_merge_tree=1, parallel_replicas_min_number_of_rows_per_replica=10; diff --git a/parser/testdata/03232_workload_create_and_drop/ast.json b/parser/testdata/03232_workload_create_and_drop/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03232_workload_create_and_drop/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03232_workload_create_and_drop/metadata.json b/parser/testdata/03232_workload_create_and_drop/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03232_workload_create_and_drop/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03232_workload_create_and_drop/query.sql b/parser/testdata/03232_workload_create_and_drop/query.sql new file mode 100644 index 000000000..1d8f97baf --- /dev/null +++ b/parser/testdata/03232_workload_create_and_drop/query.sql @@ -0,0 +1,11 @@ +-- Tags: no-parallel +-- Do not run this test in parallel because `all` workload might affect other queries execution process +CREATE OR REPLACE WORKLOAD all; +SELECT name, parent, create_query FROM system.workloads ORDER BY name; +CREATE WORKLOAD IF NOT EXISTS production IN all; +CREATE WORKLOAD development IN all; +SELECT name, parent, create_query FROM system.workloads ORDER BY name; +DROP WORKLOAD IF EXISTS production; +DROP WORKLOAD development; +SELECT name, parent, create_query FROM system.workloads ORDER BY name; +DROP WORKLOAD all; diff --git a/parser/testdata/03232_workloads_and_resources/ast.json b/parser/testdata/03232_workloads_and_resources/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03232_workloads_and_resources/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03232_workloads_and_resources/metadata.json b/parser/testdata/03232_workloads_and_resources/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03232_workloads_and_resources/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03232_workloads_and_resources/query.sql b/parser/testdata/03232_workloads_and_resources/query.sql new file mode 100644 index 000000000..cdff5acd5 --- /dev/null +++ b/parser/testdata/03232_workloads_and_resources/query.sql @@ -0,0 +1,70 @@ +-- Tags: no-parallel +-- Do not run this test in parallel because `all` workload might affect other queries execution process + +-- Test simple resource and workload hierarchy creation +create resource 03232_write (write disk 03232_fake_disk); +create resource 03232_read (read disk 03232_fake_disk); +create resource 03232_invalid_mix (read disk 03232_another_fake_disk, master thread); -- {clientError BAD_ARGUMENTS} +create workload all settings max_io_requests = 100 for 03232_write, max_io_requests = 200 for 03232_read; +create workload admin in all settings priority = 0; +create workload production in all settings priority = 1, weight = 9; +create workload development in all settings priority = 1, weight = 1; + +-- Test that illegal actions are not allowed +create workload another_root; -- {serverError BAD_ARGUMENTS} +create workload self_ref in self_ref; -- {serverError BAD_ARGUMENTS} +drop workload all; -- {serverError BAD_ARGUMENTS} +create workload invalid in 03232_write; -- {serverError BAD_ARGUMENTS} +create workload invalid in all settings priority = 0 for all; -- {serverError BAD_ARGUMENTS} +create workload invalid in all settings priority = 'invalid_value'; -- {serverError BAD_GET} +create workload invalid in all settings weight = 0; -- {serverError INVALID_SCHEDULER_NODE} +create workload invalid in all settings weight = -1; -- {serverError BAD_ARGUMENTS} +create workload invalid in all settings max_speed = -1; -- {serverError BAD_ARGUMENTS} +create workload invalid in all settings max_bytes_inflight = -1; -- {serverError BAD_ARGUMENTS} +create workload invalid in all settings unknown_setting = 42; -- {serverError BAD_ARGUMENTS} +create workload invalid in all settings max_io_requests = -1; -- {serverError BAD_ARGUMENTS} +create workload invalid in all settings max_io_requests = 1.5; -- {serverError BAD_GET} +create or replace workload all in production; -- {serverError BAD_ARGUMENTS} + +-- Test CREATE OR REPLACE WORKLOAD +create or replace workload all settings max_io_requests = 200 for 03232_write, max_io_requests = 100 for 03232_read, max_concurrent_threads = 16, max_concurrent_threads_ratio_to_cores = 2.5; +create or replace workload admin in all settings priority = 1; +create or replace workload admin in all settings priority = 2; +create or replace workload admin in all settings priority = 0; +create or replace workload production in all settings priority = 1, weight = 90; +create or replace workload production in all settings priority = 0, weight = 9; +create or replace workload production in all settings priority = 2, weight = 9; +create or replace workload development in all settings priority = 1; +create or replace workload development in all settings priority = 0; +create or replace workload development in all settings priority = 2; + +-- Test CREATE OR REPLACE RESOURCE +create or replace resource 03232_write (write disk 03232_fake_disk_2); +create or replace resource 03232_read (read disk 03232_fake_disk_2); + +-- Test update settings with CREATE OR REPLACE WORKLOAD +create or replace workload production in all settings priority = 1, weight = 9, max_io_requests = 100; +create or replace workload development in all settings priority = 1, weight = 1, max_io_requests = 10; +create or replace workload production in all settings priority = 1, weight = 9, max_bytes_inflight = 100000; +create or replace workload development in all settings priority = 1, weight = 1, max_bytes_inflight = 10000; +create or replace workload production in all settings priority = 1, weight = 9, max_speed = 1000000; +create or replace workload development in all settings priority = 1, weight = 1, max_speed = 100000; +create or replace workload production in all settings priority = 1, weight = 9, max_speed = 1000000, max_burst = 10000000; +create or replace workload development in all settings priority = 1, weight = 1, max_speed = 100000, max_burst = 1000000; +create or replace workload all settings max_bytes_inflight = 1000000, max_speed = 100000 for 03232_write, max_speed = 200000 for 03232_read; +create or replace workload all settings max_io_requests = 100 for 03232_write, max_io_requests = 200 for 03232_read; +create or replace workload production in all settings priority = 1, weight = 9; +create or replace workload development in all settings priority = 1, weight = 1; + +-- Test change parent with CREATE OR REPLACE WORKLOAD +create or replace workload development in production settings priority = 1, weight = 1; +create or replace workload development in admin settings priority = 1, weight = 1; +create or replace workload development in all settings priority = 1, weight = 1; + +-- Clean up +drop workload if exists production; +drop workload if exists development; +drop workload if exists admin; +drop workload if exists all; +drop resource if exists 03232_write; +drop resource if exists 03232_read; diff --git a/parser/testdata/03233_dynamic_in_functions/ast.json b/parser/testdata/03233_dynamic_in_functions/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03233_dynamic_in_functions/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03233_dynamic_in_functions/metadata.json b/parser/testdata/03233_dynamic_in_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03233_dynamic_in_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03233_dynamic_in_functions/query.sql b/parser/testdata/03233_dynamic_in_functions/query.sql new file mode 100644 index 000000000..6e1f00d5b --- /dev/null +++ b/parser/testdata/03233_dynamic_in_functions/query.sql @@ -0,0 +1,358 @@ +-- Tags: no-fasttest + +set allow_experimental_dynamic_type = 1; + +drop table if exists test; +create table test (x UInt64, d Dynamic) engine=Memory; +insert into test select number, number from numbers(4); + +select d + 1 as res, toTypeName(res) from test; +select 1 + d as res, toTypeName(res) from test; +select d + x as res, toTypeName(res) from test; +select x + d as res, toTypeName(res) from test; +select d + d as res, toTypeName(res) from test; +select d + 1 + d as res, toTypeName(res) from test; +select d + x + d as res, toTypeName(res) from test; +select d + NULL as res, toTypeName(res) from test; + +select d < 2 as res, toTypeName(res) from test; +select d < d as res, toTypeName(res) from test; +select d < x as res, toTypeName(res) from test; +select d > 2 as res, toTypeName(res) from test; +select d > d as res, toTypeName(res) from test; +select d > x as res, toTypeName(res) from test; +select d = 2 as res, toTypeName(res) from test; +select d = d as res, toTypeName(res) from test; +select d = x as res, toTypeName(res) from test; +select d = NULL as res, toTypeName(res) from test; + +select * from test where d < 2; +select * from test where d > 2; +select * from test where d = 2; +select * from test where d < d; +select * from test where d > d; +select * from test where d = d; +select * from test where d < x; +select * from test where d > x; +select * from test where d = x; +select * from test where d = NULL; + +select exp2(d) as res, toTypeName(res) from test; +select sin(d) as res, toTypeName(res) from test; +select cos(d) as res, toTypeName(res) from test; +select tan(d) as res, toTypeName(res) from test; +select mortonEncode(d) as res, toTypeName(res) from test; +select hilbertEncode(d) as res, toTypeName(res) from test; +select bitmaskToList(d) as res, toTypeName(res) from test; +select bitPositionsToArray(d) as res, toTypeName(res) from test; +select isFinite(d) as res, toTypeName(res) from test; +select sipHash64(d) as res, toTypeName(res) from test; +select sipHash128(d) as res, toTypeName(res) from test; +select intHash32(d) as res, toTypeName(res) from test; +select intHash64(d) as res, toTypeName(res) from test; +select h3CellAreaM2(d) as res, toTypeName(res) from test; +select h3CellAreaRads2(d) as res, toTypeName(res) from test; +select h3Distance(d, d) as res, toTypeName(res) from test; +select sqid(d) as res, toTypeName(res) from test; + +select sipHash64(d, x) as res, toTypeName(res) from test; + +select 'str_' || d as res, toTypeName(res) from test; +select 'str_' || d || x as res, toTypeName(res) from test; +select 'str_' || d || d as res, toTypeName(res) from test; +select 'str_' || d || x || d as res, toTypeName(res) from test; +select d || NULL as res, toTypeName(res) from test; +select 'str_' || d || NULL as res, toTypeName(res) from test; + + +drop table test; +create table test (x Nullable(UInt64), d Dynamic) engine=Memory; +insert into test select number % 2 ? NULL : number, number from numbers(4); + +select d + x as res, toTypeName(res) from test; +select x + d as res, toTypeName(res) from test; +select d + x + d as res, toTypeName(res) from test; + +select d < x as res, toTypeName(res) from test; +select d > x as res, toTypeName(res) from test; +select d = x as res, toTypeName(res) from test; + +select * from test where d < x; +select * from test where d > x; +select * from test where d = x; + +select sipHash64(d, x) as res, toTypeName(res) from test; + +select 'str_' || d || x as res, toTypeName(res) from test; +select 'str_' || d || x || d as res, toTypeName(res) from test; + +drop table test; +create table test (x String, d Dynamic) engine=Memory; +insert into test select 'str_' || number, 'str_' || number from numbers(4); + +select d < 'str_2' as res, toTypeName(res) from test; +select d < d as res, toTypeName(res) from test; +select d < x as res, toTypeName(res) from test; +select d > 'str_2' as res, toTypeName(res) from test; +select d > d as res, toTypeName(res) from test; +select d > x as res, toTypeName(res) from test; +select d = 'str_2' as res, toTypeName(res) from test; +select d = d as res, toTypeName(res) from test; +select d = x as res, toTypeName(res) from test; +select d = NULL as res, toTypeName(res) from test; + +select * from test where d < 'str_2'; +select * from test where d > 'str_2'; +select * from test where d = 'str_2'; +select * from test where d < d; +select * from test where d > d; +select * from test where d = d; +select * from test where d < x; +select * from test where d > x; +select * from test where d = x; +select * from test where d = NULL; + +select upper(d) as res, toTypeName(res) from test; +select appendTrailingCharIfAbsent(d, 'd') as res, toTypeName(res) from test; +select match(d, 'str') as res, toTypeName(res) from test; +select concatWithSeparator('|', d, d) as res, toTypeName(res) from test; +select extract(d, '([0-3])') as res, toTypeName(res) from test; +select startsWith(d, 'str') as res, toTypeName(res) from test; +select length(d) as res, toTypeName(res) from test; +select replaceAll(d, 'str', 'a') as res, toTypeName(res) from test; +select repeat(d, 2) as res, toTypeName(res) from test; +select substring(d, 1, 3) as res, toTypeName(res) from test; + +truncate table test; +insert into test select 'str_' || number, toFixedString('str_' || number, 5) from numbers(4); + +select upper(d) as res, toTypeName(res) from test; +select match(d, 'str') as res, toTypeName(res) from test; +select concatWithSeparator('|', d, d) as res, toTypeName(res) from test; +select startsWith(d, 'str') as res, toTypeName(res) from test; +select length(d) as res, toTypeName(res) from test; +select replaceAll(d, 'str', 'a') as res, toTypeName(res) from test; + +drop table test; +create table test (x Nullable(String), d Dynamic) engine=Memory; +insert into test select number % 2 ? NULL : 'str_' || number, 'str_' || number from numbers(4); + +select d < x as res, toTypeName(res) from test; +select d > x as res, toTypeName(res) from test; +select d = x as res, toTypeName(res) from test; + +select * from test where d < x; +select * from test where d > x; +select * from test where d = x; + +select sipHash64(d, x) as res, toTypeName(res) from test; + +truncate table test; +insert into test select number % 2 ? NULL : 'str_' || number, toFixedString('str_' || number, 5) from numbers(4); +select upper(d) as res, toTypeName(res) from test; + +drop table test; + +create table test (x UInt64, d Dynamic) engine=Memory; +insert into test select number, number % 2 ? NULL : number from numbers(4); + +select d + 1 as res, toTypeName(res) from test; +select 1 + d as res, toTypeName(res) from test; +select d + x as res, toTypeName(res) from test; +select x + d as res, toTypeName(res) from test; +select d + d as res, toTypeName(res) from test; +select d + 1 + d as res, toTypeName(res) from test; +select d + x + d as res, toTypeName(res) from test; +select d + NULL as res, toTypeName(res) from test; + +select d < 2 as res, toTypeName(res) from test; +select d < d as res, toTypeName(res) from test; +select d < x as res, toTypeName(res) from test; +select d > 2 as res, toTypeName(res) from test; +select d > d as res, toTypeName(res) from test; +select d > x as res, toTypeName(res) from test; +select d = 2 as res, toTypeName(res) from test; +select d = d as res, toTypeName(res) from test; +select d = x as res, toTypeName(res) from test; +select d = NULL as res, toTypeName(res) from test; + +select * from test where d < 2; +select * from test where d > 2; +select * from test where d = 2; +select * from test where d < d; +select * from test where d > d; +select * from test where d = d; +select * from test where d < x; +select * from test where d > x; +select * from test where d = x; +select * from test where d = NULL; + +select sipHash64(d) as res, toTypeName(res) from test; +select sipHash64(d, d) as res, toTypeName(res) from test; +select sipHash64(d, x) as res, toTypeName(res) from test; + +drop table test; +create table test (x Nullable(UInt64), d Dynamic) engine=Memory; +insert into test select number % 2 ? NULL : number, number % 2 ? NULL : number from numbers(4); + +select d + x as res, toTypeName(res) from test; +select x + d as res, toTypeName(res) from test; +select d + x + d as res, toTypeName(res) from test; + +select d < x as res, toTypeName(res) from test; +select d > x as res, toTypeName(res) from test; +select d = x as res, toTypeName(res) from test; + +select * from test where d < x; +select * from test where d > x; +select * from test where d = x; + +select sipHash64(d, x) as res, toTypeName(res) from test; + +truncate table test; +insert into test select number % 2 ? NULL : number, NULL from numbers(4); + +select d + x as res, toTypeName(res) from test; +select x + d as res, toTypeName(res) from test; +select d + x + d as res, toTypeName(res) from test; + +select d < x as res, toTypeName(res) from test; +select d > x as res, toTypeName(res) from test; +select d = x as res, toTypeName(res) from test; + +select * from test where d < x; +select * from test where d > x; +select * from test where d = x; + +select sipHash64(d, x) as res, toTypeName(res) from test; + +drop table test; +create table test (x String, d Dynamic) engine=Memory; +insert into test select 'str_' || number, number % 2 ? NULL : 'str_' || number from numbers(4); + +select d < 'str_2' as res, toTypeName(res) from test; +select d < d as res, toTypeName(res) from test; +select d < x as res, toTypeName(res) from test; +select d > 'str_2' as res, toTypeName(res) from test; +select d > d as res, toTypeName(res) from test; +select d > x as res, toTypeName(res) from test; +select d = 'str_2' as res, toTypeName(res) from test; +select d = d as res, toTypeName(res) from test; +select d = x as res, toTypeName(res) from test; +select d = NULL as res, toTypeName(res) from test; + +select * from test where d < 'str_2'; +select * from test where d > 'str_2'; +select * from test where d = 'str_2'; +select * from test where d < d; +select * from test where d > d; +select * from test where d = d; +select * from test where d < x; +select * from test where d > x; +select * from test where d = x; +select * from test where d = NULL; + +select upper(d) as res, toTypeName(res) from test; + +truncate table test; +insert into test select 'str_' || number, number % 2 ? NULL : toFixedString('str_' || number, 5) from numbers(4); +select upper(d) as res, toTypeName(res) from test; + +drop table test; + +create table test (x Nullable(String), d Dynamic) engine=Memory; +insert into test select number % 2 ? NULL : 'str_' || number, 'str_' || number from numbers(4); + +select d < x as res, toTypeName(res) from test; +select d > x as res, toTypeName(res) from test; +select d = x as res, toTypeName(res) from test; + +select * from test where d < x; +select * from test where d > x; +select * from test where d = x; + +select sipHash64(d, x) as res, toTypeName(res) from test; + +truncate table test; + +insert into test select number % 2 ? NULL : 'str_' || number, number % 2 ? NULL : toFixedString('str_' || number, 5) from numbers(4); +select upper(d) as res, toTypeName(res) from test; + +drop table test; + +create table test (x UInt64, d Dynamic(max_types=5)) engine=Memory; +insert into test values (0, NULL), (1, 1::Int8), (2, 2::UInt8), (3, 3::Int16), (4, 4::UInt16), (5, 5::Int32), (6, 6::UInt32), (7, 7::Int64), (8, 8::UInt64), (9, 9::Float32), (10, 10::Float64); + +select d + 1 as res, toTypeName(res), dynamicType(res) from test; +select 1 + d as res, toTypeName(res), dynamicType(res) from test; +select d + x as res, toTypeName(res), dynamicType(res) from test; +select x + d as res, toTypeName(res), dynamicType(res) from test; +select d + d as res, toTypeName(res), dynamicType(res) from test; +select d + 1 + d as res, toTypeName(res), dynamicType(res) from test; +select d + x + d as res, toTypeName(res), dynamicType(res) from test; +select d + NULL as res, toTypeName(res), dynamicType(res) from test; + +select d < 5 as res, toTypeName(res) from test; +select d < d as res, toTypeName(res) from test; +select d < x as res, toTypeName(res) from test; +select d > 5 as res, toTypeName(res) from test; +select d > d as res, toTypeName(res) from test; +select d > x as res, toTypeName(res) from test; +select d = 5 as res, toTypeName(res) from test; +select d = d as res, toTypeName(res) from test; +select d = x as res, toTypeName(res) from test; +select d = NULL as res, toTypeName(res) from test; + +select * from test where d < 5; +select * from test where d > 5; +select * from test where d = 5; +select * from test where d < d; +select * from test where d > d; +select * from test where d = d; +select * from test where d < x; +select * from test where d > x; +select * from test where d = x; +select * from test where d = NULL; + +select sipHash64(d) as res, toTypeName(res) from test; +select sipHash64(d, d) as res, toTypeName(res) from test; +select sipHash64(d, x) as res, toTypeName(res) from test; + +drop table test; +create table test (x Nullable(UInt64), d Dynamic) engine=Memory; +insert into test values (0, NULL), (NULL, 1::Int8), (2, 2::UInt8), (NULL, 3::Int16), (4, 4::UInt16), (NULL, 5::Int32), (6, 6::UInt32), (NULL, 7::Int64), (8, 8::UInt64), (NULL, 9::Float32), (10, 10::Float64); + +select d + x as res, toTypeName(res) from test; +select x + d as res, toTypeName(res) from test; +select d + x + d as res, toTypeName(res) from test; + +select d < x as res, toTypeName(res) from test; +select d > x as res, toTypeName(res) from test; +select d = x as res, toTypeName(res) from test; + +select * from test where d < x; +select * from test where d > x; +select * from test where d = x; + +select sipHash64(d, x) as res, toTypeName(res) from test; + +drop table test; + +create table test (d Dynamic) engine=Memory; +insert into test values ('str_0'), ('str_1'::FixedString(5)); + +select upper(d) as res, toTypeName(res) from test; + +truncate table test; +insert into test select range(number + 1) from numbers(4); +select d[1] as res, toTypeName(res) from test; + +truncate table test; +insert into test values ('str'), ([1, 2, 3]), (42::Int32), ('2020-01-01'::Date); + +select d + 1 from test; -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +select length(d) from test; -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +select d[1] from test; -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +select sipHash64(d) from test; +select sipHash64(d, 42) from test; +select sipHash64(d, d) from test; diff --git a/parser/testdata/03234_enable_secure_identifiers/ast.json b/parser/testdata/03234_enable_secure_identifiers/ast.json new file mode 100644 index 000000000..5001209b6 --- /dev/null +++ b/parser/testdata/03234_enable_secure_identifiers/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_foo_# (children 1)" + }, + { + "explain": " Identifier test_foo_#" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001418635, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/03234_enable_secure_identifiers/metadata.json b/parser/testdata/03234_enable_secure_identifiers/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03234_enable_secure_identifiers/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03234_enable_secure_identifiers/query.sql b/parser/testdata/03234_enable_secure_identifiers/query.sql new file mode 100644 index 000000000..7381ffb78 --- /dev/null +++ b/parser/testdata/03234_enable_secure_identifiers/query.sql @@ -0,0 +1,111 @@ +DROP TABLE IF EXISTS `test_foo_#`; +CREATE TABLE `test_foo_#` ( + `date` Date, + `town` LowCardinality(String), +) +ENGINE = MergeTree +PRIMARY KEY (town, date) +PARTITION BY toYear(date) +COMMENT 'test' -- to end ENGINE definition, so SETTINGS will be in the query level +SETTINGS + enforce_strict_identifier_format=true; -- { serverError BAD_ARGUMENTS } +DROP TABLE IF EXISTS `test_foo_#`; + + +DROP TABLE IF EXISTS test_foo; +CREATE TABLE test_foo ( + `insecure_#` Int8, + `date` Date, + `town` LowCardinality(String), +) +ENGINE = MergeTree +PRIMARY KEY (town, date) +PARTITION BY toYear(date) +COMMENT 'test' -- to end ENGINE definition, so SETTINGS will be in the query level +SETTINGS + enforce_strict_identifier_format=true; -- { serverError BAD_ARGUMENTS } + +DROP TABLE IF EXISTS test_foo; +CREATE TABLE test_foo ( + `insecure_'` Int8, + `date` Date, + `town` LowCardinality(String), +) +ENGINE = MergeTree +PRIMARY KEY (town, date) +PARTITION BY toYear(date) +COMMENT 'test' -- to end ENGINE definition, so SETTINGS will be in the query level +SETTINGS + enforce_strict_identifier_format=true; -- { serverError BAD_ARGUMENTS } + +DROP TABLE IF EXISTS test_foo; +CREATE TABLE test_foo ( + `insecure_"` Int8, + `date` Date, + `town` LowCardinality(String), +) +ENGINE = MergeTree +PRIMARY KEY (town, date) +PARTITION BY toYear(date) +COMMENT 'test' -- to end ENGINE definition, so SETTINGS will be in the query level +SETTINGS + enforce_strict_identifier_format=true; -- { serverError BAD_ARGUMENTS } + +DROP TABLE IF EXISTS test_foo; +CREATE TABLE test_foo ( + `secure_123` Int8, + `date` Date, + `town` LowCardinality(String), +) +ENGINE = MergeTree +PRIMARY KEY (town, date) +PARTITION BY toYear(date) +COMMENT 'test' -- to end ENGINE definition, so SETTINGS will be in the query level +SETTINGS + enforce_strict_identifier_format=true; + +SHOW CREATE TABLE test_foo +SETTINGS + enforce_strict_identifier_format=true; + +DROP TABLE IF EXISTS test_foo; +CREATE TABLE test_foo ( + `123_secure` Int8, + `date` Date, + `town` LowCardinality(String), +) +ENGINE = MergeTree +PRIMARY KEY (town, date) +PARTITION BY toYear(date) +COMMENT 'test' -- to end ENGINE definition, so SETTINGS will be in the query level +SETTINGS + enforce_strict_identifier_format=true; + +SHOW CREATE TABLE test_foo +SETTINGS + enforce_strict_identifier_format=true; + +-- CREATE TABLE without `enforce_strict_identifier_format` +DROP TABLE IF EXISTS test_foo; +CREATE TABLE `test_foo` ( + `insecure_$` Int8, + `date` Date, + `town` LowCardinality(String), +) +ENGINE = MergeTree +PRIMARY KEY (town, date) +PARTITION BY toYear(date); +-- Then SHOW CREATE .. with `enforce_strict_identifier_format` +-- While the result contains insecure identifiers (`insecure_$`), the `SHOW CREATE TABLE ...` query does not have any. So the query is expected to succeed. +SHOW CREATE TABLE test_foo +SETTINGS + enforce_strict_identifier_format=true; + +DROP TABLE IF EXISTS test_foo; + +-- SHOW CREATE .. query contains an insecure identifier (`test_foo$`) with `enforce_strict_identifier_format` +SHOW CREATE TABLE `test_foo$` +SETTINGS + enforce_strict_identifier_format=true; -- { serverError BAD_ARGUMENTS } + +DROP TABLE IF EXISTS test_foo; diff --git a/parser/testdata/03234_evaluate_constant_analyzer/ast.json b/parser/testdata/03234_evaluate_constant_analyzer/ast.json new file mode 100644 index 000000000..d25d3ca8d --- /dev/null +++ b/parser/testdata/03234_evaluate_constant_analyzer/ast.json @@ -0,0 +1,244 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function count (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " TablesInSelectQuery (children 3)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (alias n1) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function cityHash64 (children 1)" + }, + { + "explain": " ExpressionList (children 7)" + }, + { + "explain": " Function globalIn (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toLowCardinality (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toNullable (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal Tuple_(NULL, Int64_-2147483648, Int64_-9223372036854775808)" + }, + { + "explain": " Literal Float64_nan" + }, + { + "explain": " Literal UInt64_1024" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Float64_1.000100016593933" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Literal UInt64_4" + }, + { + "explain": " TablesInSelectQueryElement (children 2)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (alias n2) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " TableJoin" + }, + { + "explain": " TablesInSelectQueryElement (children 2)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (alias n3) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_6" + }, + { + "explain": " TableJoin" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Function cityHash64 (children 1)" + }, + { + "explain": " ExpressionList (children 5)" + }, + { + "explain": " Literal Float64_inf" + }, + { + "explain": " Literal Int64_-2147483648" + }, + { + "explain": " Function toLowCardinality (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_16" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Literal Float64_10.000100135803223" + }, + { + "explain": " Function cityHash64 (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Literal Float64_1.1754943508222875e-38" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Literal UInt64_2147483647" + }, + { + "explain": " Function cityHash64 (children 1)" + }, + { + "explain": " ExpressionList (children 7)" + }, + { + "explain": " Literal Float64_0" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Literal Float64_10000000000" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Set" + } + ], + + "rows": 74, + + "statistics": + { + "elapsed": 0.001906967, + "rows_read": 74, + "bytes_read": 3001 + } +} diff --git a/parser/testdata/03234_evaluate_constant_analyzer/metadata.json b/parser/testdata/03234_evaluate_constant_analyzer/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03234_evaluate_constant_analyzer/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03234_evaluate_constant_analyzer/query.sql b/parser/testdata/03234_evaluate_constant_analyzer/query.sql new file mode 100644 index 000000000..8ddd4baaf --- /dev/null +++ b/parser/testdata/03234_evaluate_constant_analyzer/query.sql @@ -0,0 +1 @@ +SELECT count() FROM numbers(cityHash64(materialize(toLowCardinality(toNullable(0))) GLOBAL IN (NULL, -2147483648, -9223372036854775808), nan, 1024, NULL, materialize(1.000100016593933), 0, NULL), 4) AS n1, numbers(3) AS n2, numbers(6) AS n3 GROUP BY (NULL, cityHash64(inf, -2147483648, toLowCardinality(16), NULL, 10.000100135803223), cityHash64(1.1754943508222875e-38, NULL, NULL, NULL), 2147483647), cityHash64(0., 3, NULL, NULL, 10000000000., NULL, NULL) SETTINGS enable_analyzer = 1; -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/03234_get_setting_or_default/ast.json b/parser/testdata/03234_get_setting_or_default/ast.json new file mode 100644 index 000000000..927d776ed --- /dev/null +++ b/parser/testdata/03234_get_setting_or_default/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001457961, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03234_get_setting_or_default/metadata.json b/parser/testdata/03234_get_setting_or_default/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03234_get_setting_or_default/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03234_get_setting_or_default/query.sql b/parser/testdata/03234_get_setting_or_default/query.sql new file mode 100644 index 000000000..3954e9fe8 --- /dev/null +++ b/parser/testdata/03234_get_setting_or_default/query.sql @@ -0,0 +1,24 @@ +SET custom_a = 'value_a'; +SET custom_b = 'value_b'; +SET custom_c = null; +SET custom_d = 5; + +SELECT getSettingOrDefault('custom_a', 'default_a'); +SELECT getSettingOrDefault('custom_b', 'default_b'); +SELECT getSettingOrDefault('custom_c', 'default_c'); +SELECT getSettingOrDefault('custom_d', 'default_d'); + +SELECT getSetting('custom_e'); -- { serverError UNKNOWN_SETTING } + +SELECT getSettingOrDefault('custom_e', 'default_e'); +SELECT getSettingOrDefault('custom_e', 500); +SELECT getSettingOrDefault('custom_e', null); +SELECT isNull(getSettingOrDefault('custom_e', null)); + +SELECT getSettingOrDefault('custom_e'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT getSettingOrDefault(115, 'name should be string'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT count(*) FROM numbers(10) WHERE number = getSettingOrDefault('custom_e', 5); + +SET custom_e_backup = 'backup'; +SELECT getSettingOrDefault('custom_e', getSetting('custom_e_backup')); diff --git a/parser/testdata/03235_groupArray_string_consistency/ast.json b/parser/testdata/03235_groupArray_string_consistency/ast.json new file mode 100644 index 000000000..09a1aa19c --- /dev/null +++ b/parser/testdata/03235_groupArray_string_consistency/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery t (children 3)" + }, + { + "explain": " Identifier t" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration st (children 1)" + }, + { + "explain": " DataType FixedString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_54" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001324834, + "rows_read": 12, + "bytes_read": 422 + } +} diff --git a/parser/testdata/03235_groupArray_string_consistency/metadata.json b/parser/testdata/03235_groupArray_string_consistency/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03235_groupArray_string_consistency/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03235_groupArray_string_consistency/query.sql b/parser/testdata/03235_groupArray_string_consistency/query.sql new file mode 100644 index 000000000..618ec6f83 --- /dev/null +++ b/parser/testdata/03235_groupArray_string_consistency/query.sql @@ -0,0 +1,10 @@ +CREATE TABLE t (st FixedString(54)) ENGINE=MergeTree ORDER BY (); + +INSERT INTO t VALUES +('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRTUVWXYZ'), +('\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0'), +('IIIIIIIIII\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0'); + +WITH (SELECT groupConcat(',')(st) FROM t) AS a, + (SELECT groupConcat(',')(st :: String) FROM t) AS b +SELECT equals(a, b); diff --git a/parser/testdata/03236_create_query_ttl_where/ast.json b/parser/testdata/03236_create_query_ttl_where/ast.json new file mode 100644 index 000000000..39b566bf1 --- /dev/null +++ b/parser/testdata/03236_create_query_ttl_where/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery ttl (children 1)" + }, + { + "explain": " Identifier ttl" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001345218, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/03236_create_query_ttl_where/metadata.json b/parser/testdata/03236_create_query_ttl_where/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03236_create_query_ttl_where/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03236_create_query_ttl_where/query.sql b/parser/testdata/03236_create_query_ttl_where/query.sql new file mode 100644 index 000000000..4256b53cb --- /dev/null +++ b/parser/testdata/03236_create_query_ttl_where/query.sql @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS ttl; + +CREATE TABLE ttl +( + `a` UInt32, + `timestamp` DateTime +) +ENGINE = MergeTree +ORDER BY a +TTL timestamp + toIntervalSecond(2) WHERE a IN ( + SELECT number + FROM system.numbers + LIMIT 100000 +); + +SHOW CREATE ttl; +DROP TABLE ttl; diff --git a/parser/testdata/03236_keeper_map_engine_parameters/ast.json b/parser/testdata/03236_keeper_map_engine_parameters/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03236_keeper_map_engine_parameters/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03236_keeper_map_engine_parameters/metadata.json b/parser/testdata/03236_keeper_map_engine_parameters/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03236_keeper_map_engine_parameters/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03236_keeper_map_engine_parameters/query.sql b/parser/testdata/03236_keeper_map_engine_parameters/query.sql new file mode 100644 index 000000000..bb60accd7 --- /dev/null +++ b/parser/testdata/03236_keeper_map_engine_parameters/query.sql @@ -0,0 +1,8 @@ +-- Tags: no-ordinary-database, no-fasttest + +DROP TABLE IF EXISTS 03236_keeper_map_engine_parameters; + +CREATE TABLE 03236_keeper_map_engine_parameters (key UInt64, value UInt64) Engine=KeeperMap('/' || currentDatabase() || '/test2417') PRIMARY KEY(key); +SHOW CREATE 03236_keeper_map_engine_parameters; + +DROP TABLE 03236_keeper_map_engine_parameters; diff --git a/parser/testdata/03236_squashing_high_memory/ast.json b/parser/testdata/03236_squashing_high_memory/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03236_squashing_high_memory/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03236_squashing_high_memory/metadata.json b/parser/testdata/03236_squashing_high_memory/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03236_squashing_high_memory/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03236_squashing_high_memory/query.sql b/parser/testdata/03236_squashing_high_memory/query.sql new file mode 100644 index 000000000..57a1e80f4 --- /dev/null +++ b/parser/testdata/03236_squashing_high_memory/query.sql @@ -0,0 +1,29 @@ +-- Tags: no-fasttest, no-asan, no-tsan, no-msan, no-ubsan, no-random-settings, no-random-merge-tree-settings +-- reason: test requires too many rows to read + +SET max_rows_to_read = '501G'; +SET enable_lazy_columns_replication = 0; + +DROP TABLE IF EXISTS id_values; + +DROP TABLE IF EXISTS test_table; + +CREATE TABLE id_values ENGINE MergeTree ORDER BY id1 AS + SELECT arrayJoin(range(500000)) AS id1, arrayJoin(range(1000)) AS id2; + +SET max_memory_usage = '1G'; +SET query_plan_join_swap_table = 'false'; + +CREATE TABLE test_table ENGINE MergeTree ORDER BY id AS +SELECT id_values.id1 AS id, + string_values.string_val1 AS string_val1, + string_values.string_val2 AS string_val2 +FROM id_values + JOIN (SELECT arrayJoin(range(10)) AS id1, + 'qwe' AS string_val1, + 'asd' AS string_val2) AS string_values + ON id_values.id1 = string_values.id1 + SETTINGS join_algorithm = 'hash'; + +DROP TABLE IF EXISTS id_values; +DROP TABLE IF EXISTS test_table; diff --git a/parser/testdata/03236_test_zero_field_decimal/ast.json b/parser/testdata/03236_test_zero_field_decimal/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03236_test_zero_field_decimal/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03236_test_zero_field_decimal/metadata.json b/parser/testdata/03236_test_zero_field_decimal/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03236_test_zero_field_decimal/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03236_test_zero_field_decimal/query.sql b/parser/testdata/03236_test_zero_field_decimal/query.sql new file mode 100644 index 000000000..047b255b7 --- /dev/null +++ b/parser/testdata/03236_test_zero_field_decimal/query.sql @@ -0,0 +1,10 @@ +-- Tags: no-parallel + +DROP TABLE IF EXISTS users_03236_zero; +CREATE TABLE users_03236_zero (uid Int16, name String, num Int16) ENGINE=Memory; + +INSERT INTO users_03236_zero VALUES (1231, 'John', 33); +INSERT INTO users_03236_zero VALUES (6666, 'John', 48); +INSERT INTO users_03236_zero VALUES (8888, 'Alice', 50); + +select sum(num/toDecimal256(1000, 18)) from users_03236_zero; diff --git a/parser/testdata/03237_create_table_select_as_with_recursive/ast.json b/parser/testdata/03237_create_table_select_as_with_recursive/ast.json new file mode 100644 index 000000000..d861acbc2 --- /dev/null +++ b/parser/testdata/03237_create_table_select_as_with_recursive/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001136459, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/03237_create_table_select_as_with_recursive/metadata.json b/parser/testdata/03237_create_table_select_as_with_recursive/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03237_create_table_select_as_with_recursive/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03237_create_table_select_as_with_recursive/query.sql b/parser/testdata/03237_create_table_select_as_with_recursive/query.sql new file mode 100644 index 000000000..bd8c19cc4 --- /dev/null +++ b/parser/testdata/03237_create_table_select_as_with_recursive/query.sql @@ -0,0 +1,10 @@ +drop table if exists t; +SET enable_analyzer = 1; +create table t1 (a Int64, s DateTime('Asia/Istanbul')) Engine = MergeTree() ORDER BY a; +create view t AS ( + WITH RECURSIVE 42 as ttt, + toDate(s) as start_date, + _table as (select 1 as number union all select number + 1 from _table where number < 10) + SELECT a, ttt from t1 join _table on t1.a = _table.number and start_date = '2024-09-23' +); +drop table t; diff --git a/parser/testdata/03237_get_subcolumn_low_cardinality_column/ast.json b/parser/testdata/03237_get_subcolumn_low_cardinality_column/ast.json new file mode 100644 index 000000000..f8b36d499 --- /dev/null +++ b/parser/testdata/03237_get_subcolumn_low_cardinality_column/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function getSubcolumn (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'str'" + }, + { + "explain": " Literal 'Tuple(a LowCardinality(String))'" + }, + { + "explain": " Literal 'a'" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001106071, + "rows_read": 15, + "bytes_read": 619 + } +} diff --git a/parser/testdata/03237_get_subcolumn_low_cardinality_column/metadata.json b/parser/testdata/03237_get_subcolumn_low_cardinality_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03237_get_subcolumn_low_cardinality_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03237_get_subcolumn_low_cardinality_column/query.sql b/parser/testdata/03237_get_subcolumn_low_cardinality_column/query.sql new file mode 100644 index 000000000..4d4755541 --- /dev/null +++ b/parser/testdata/03237_get_subcolumn_low_cardinality_column/query.sql @@ -0,0 +1 @@ +SELECT toTypeName(getSubcolumn(tuple('str')::Tuple(a LowCardinality(String)), 'a')) diff --git a/parser/testdata/03237_max_map_state_decimal_serialization/ast.json b/parser/testdata/03237_max_map_state_decimal_serialization/ast.json new file mode 100644 index 000000000..d962c97c6 --- /dev/null +++ b/parser/testdata/03237_max_map_state_decimal_serialization/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function maxMapState (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Array_[UInt64_0]" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toDateTime64 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Identifier JSONEachRow" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001027427, + "rows_read": 14, + "bytes_read": 558 + } +} diff --git a/parser/testdata/03237_max_map_state_decimal_serialization/metadata.json b/parser/testdata/03237_max_map_state_decimal_serialization/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03237_max_map_state_decimal_serialization/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03237_max_map_state_decimal_serialization/query.sql b/parser/testdata/03237_max_map_state_decimal_serialization/query.sql new file mode 100644 index 000000000..8742f470c --- /dev/null +++ b/parser/testdata/03237_max_map_state_decimal_serialization/query.sql @@ -0,0 +1 @@ +select maxMapState([0], [toDateTime64(0, 0)]) as x format JSONEachRow; diff --git a/parser/testdata/03238_analyzer_unknown_function/ast.json b/parser/testdata/03238_analyzer_unknown_function/ast.json new file mode 100644 index 000000000..3e21cd20a --- /dev/null +++ b/parser/testdata/03238_analyzer_unknown_function/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier source.count (alias count)" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.00103782, + "rows_read": 5, + "bytes_read": 198 + } +} diff --git a/parser/testdata/03238_analyzer_unknown_function/metadata.json b/parser/testdata/03238_analyzer_unknown_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03238_analyzer_unknown_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03238_analyzer_unknown_function/query.sql b/parser/testdata/03238_analyzer_unknown_function/query.sql new file mode 100644 index 000000000..d7beb548b --- /dev/null +++ b/parser/testdata/03238_analyzer_unknown_function/query.sql @@ -0,0 +1,10 @@ +SELECT source.count AS count +FROM +( + SELECT + count(*) AS count, + key + FROM numbers(10) + GROUP BY number % 2 AS key +) AS source +RIGHT JOIN system.one AS r ON source.key = r.dummy; diff --git a/parser/testdata/03239_if_constant_folding/ast.json b/parser/testdata/03239_if_constant_folding/ast.json new file mode 100644 index 000000000..6f3a75838 --- /dev/null +++ b/parser/testdata/03239_if_constant_folding/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001230771, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03239_if_constant_folding/metadata.json b/parser/testdata/03239_if_constant_folding/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03239_if_constant_folding/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03239_if_constant_folding/query.sql b/parser/testdata/03239_if_constant_folding/query.sql new file mode 100644 index 000000000..a0fab48af --- /dev/null +++ b/parser/testdata/03239_if_constant_folding/query.sql @@ -0,0 +1,21 @@ +SET enable_analyzer = 1; + +select false ? c : '' as c, count() from (select '' c) group by c; +select if( 0 , c, '') _c, count() from (select '' c) group by _c; +select if(1 = 0, c, '') _c, count() from (select '' c) group by _c; +select materialize(false) ? c : 'x' as c, count() from (select 'o' c) group by c; +select if(1 = 0, c, '') _c, count() from (select '' c) group by _c; +select if(1 = 1, c, '') _c, count() from (select '' c) group by _c; + +DROP TABLE IF EXISTS f; +CREATE TABLE f(c String) ENGINE = Null; + +DROP TABLE IF EXISTS v; +create materialized view v engine = Null as +select + false ? c : '' as c, + countState() t +from f group by c; + +DROP TABLE v; +DROP TABLE f; diff --git a/parser/testdata/03239_nan_with_fill/ast.json b/parser/testdata/03239_nan_with_fill/ast.json new file mode 100644 index 000000000..6ec72d8a7 --- /dev/null +++ b/parser/testdata/03239_nan_with_fill/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Float64_nan" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.000930354, + "rows_read": 8, + "bytes_read": 286 + } +} diff --git a/parser/testdata/03239_nan_with_fill/metadata.json b/parser/testdata/03239_nan_with_fill/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03239_nan_with_fill/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03239_nan_with_fill/query.sql b/parser/testdata/03239_nan_with_fill/query.sql new file mode 100644 index 000000000..6cf351be0 --- /dev/null +++ b/parser/testdata/03239_nan_with_fill/query.sql @@ -0,0 +1,8 @@ +SELECT nan ORDER BY 1 WITH FILL; +SELECT -nan ORDER BY 1 WITH FILL; +SELECT 0./0. ORDER BY 1 WITH FILL; +SELECT 1 ORDER BY nan WITH FILL FROM 1; + +CREATE TABLE t0 (c0 Float32) ENGINE = Memory(); +INSERT INTO TABLE t0 (c0) VALUES (1),(inf),(2); +SELECT 1 FROM t0 ORDER BY t0.c0 WITH FILL; diff --git a/parser/testdata/03240_array_element_or_null/ast.json b/parser/testdata/03240_array_element_or_null/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03240_array_element_or_null/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03240_array_element_or_null/metadata.json b/parser/testdata/03240_array_element_or_null/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03240_array_element_or_null/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03240_array_element_or_null/query.sql b/parser/testdata/03240_array_element_or_null/query.sql new file mode 100644 index 000000000..d19ecdcd6 --- /dev/null +++ b/parser/testdata/03240_array_element_or_null/query.sql @@ -0,0 +1,67 @@ +-- { echoOn } +DROP TABLE IF EXISTS array_element_or_null_test; +CREATE TABLE array_element_or_null_test (arr Array(Int32), id Int32) ENGINE = Memory; +insert into array_element_or_null_test VALUES ([11,12,13], 2), ([11,12], 3), ([11,12,13], -1), ([11,12], -2), ([11,12], -3), ([11], 0); +select arrayElementOrNull(arr, id) from array_element_or_null_test; + +DROP TABLE IF EXISTS array_element_or_null_test; +CREATE TABLE array_element_or_null_test (arr Array(Int32), id UInt32) ENGINE = Memory; +insert into array_element_or_null_test VALUES ([11,12,13], 2), ([11,12], 3), ([11,12,13], 1), ([11,12], 4), ([11], 0); +select arrayElementOrNull(arr, id) from array_element_or_null_test; + +DROP TABLE IF EXISTS array_element_or_null_test; +CREATE TABLE array_element_or_null_test (arr Array(String), id Int32) ENGINE = Memory; +insert into array_element_or_null_test VALUES (['Abc','Df','Q'], 2), (['Abc','DEFQ'], 3), (['ABC','Q','ERT'], -1), (['Ab','ber'], -2), (['AB','asd'], -3), (['A'], 0); +select arrayElementOrNull(arr, id) from array_element_or_null_test; + +DROP TABLE IF EXISTS array_element_or_null_test; +CREATE TABLE array_element_or_null_test (arr Array(String), id UInt32) ENGINE = Memory; +insert into array_element_or_null_test VALUES (['Abc','Df','Q'], 2), (['Abc','DEFQ'], 3), (['ABC','Q','ERT'], 1), (['Ab','ber'], 4), (['A'], 0); +select arrayElementOrNull(arr, id) from array_element_or_null_test; + +DROP TABLE IF EXISTS array_element_or_null_test; +CREATE TABLE array_element_or_null_test (id UInt32) ENGINE = Memory; +insert into array_element_or_null_test VALUES (2), (1), (4), (3), (0); +select [1, 2, 3] as arr, arrayElementOrNull(arr, id) from array_element_or_null_test; + +DROP TABLE IF EXISTS array_element_or_null_test; +CREATE TABLE array_element_or_null_test (id Int32) ENGINE = Memory; +insert into array_element_or_null_test VALUES (-2), (1), (-4), (3), (2), (-1), (4), (-3), (0); +select [1, 2, 3] as arr, arrayElementOrNull(arr, id) from array_element_or_null_test; + +DROP TABLE array_element_or_null_test; + +SELECT arrayElementOrNull(range(0), -1); +SELECT arrayElementOrNull(range(0), 1); +SELECT arrayElementOrNull(range(number), 2) FROM system.numbers LIMIT 3; +SELECT arrayElementOrNull(range(number), -1) FROM system.numbers LIMIT 3; +SELECT arrayElementOrNull(range(number), number) FROM system.numbers LIMIT 3; +SELECT arrayElementOrNull(range(number), 2 - number) FROM system.numbers LIMIT 3; + +SELECT arrayElementOrNull(arrayMap(x -> toString(x), range(number)), 2) FROM system.numbers LIMIT 3; +SELECT arrayElementOrNull(arrayMap(x -> toString(x), range(number)), -1) FROM system.numbers LIMIT 3; +SELECT arrayElementOrNull(arrayMap(x -> toString(x), range(number)), number) FROM system.numbers LIMIT 3; +SELECT arrayElementOrNull(arrayMap(x -> toString(x), range(number)), 2 - number) FROM system.numbers LIMIT 3; + +SELECT arrayElementOrNull(arrayMap(x -> range(x), range(number)), 2) FROM system.numbers LIMIT 3; +SELECT arrayElementOrNull(arrayMap(x -> range(x), range(number)), -1) FROM system.numbers LIMIT 3; +SELECT arrayElementOrNull(arrayMap(x -> range(x), range(number)), number) FROM system.numbers LIMIT 3; +SELECT arrayElementOrNull(arrayMap(x -> range(x), range(number)), 2 - number) FROM system.numbers LIMIT 3; + +SELECT arrayElementOrNull([[1]], 1), arrayElementOrNull(materialize([[1]]), 1), arrayElementOrNull([[1]], materialize(1)), arrayElementOrNull(materialize([[1]]), materialize(1)); +SELECT arrayElementOrNull([['Hello']], 1), arrayElementOrNull(materialize([['World']]), 1), arrayElementOrNull([['Hello']], materialize(1)), arrayElementOrNull(materialize([['World']]), materialize(1)); + +SELECT arrayElementOrNull(([[['a'], ['b', 'c']], [['d', 'e', 'f'], ['g', 'h', 'i', 'j'], ['k', 'l', 'm', 'n', 'o']], [['p', 'q', 'r', 's', 't', 'u'], ['v', 'w', 'x', 'y', 'z', 'aa', 'bb'], ['cc', 'dd', 'ee', 'ff', 'gg', 'hh', 'ii', 'jj'], ['kk', 'll', 'mm', 'nn', 'oo', 'pp', 'qq', 'rr', 'ss']]] AS arr), number), arrayElementOrNull(arr[number], number), arrayElementOrNull(arr[number][number], number) FROM system.numbers LIMIT 10; + +SELECT arrayElementOrNull([1, 2], 3), arrayElementOrNull([1, NULL, 2], 4), arrayElementOrNull([('1', 1), ('2', 2)], -3); + +select groupArray(a) as b, arrayElementOrNull(b, 1), arrayElementOrNull(b, 0) from (select (1, 2) as a); + +SELECT [toNullable(1)] AS x, arrayElementOrNull(x, toNullable(1)) AS y; +SELECT materialize([toNullable(1)]) AS x, arrayElementOrNull(x, toNullable(1)) AS y; +SELECT [toNullable(1)] AS x, arrayElementOrNull(x, materialize(toNullable(1))) AS y; +SELECT materialize([toNullable(1)]) AS x, arrayElementOrNull(x, materialize(toNullable(1))) AS y; + +select arrayElementOrNull(m, 0), materialize(map('key', 42)) as m; -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} + +-- { echoOff } diff --git a/parser/testdata/03240_array_element_or_null_for_map/ast.json b/parser/testdata/03240_array_element_or_null_for_map/ast.json new file mode 100644 index 000000000..c26aba108 --- /dev/null +++ b/parser/testdata/03240_array_element_or_null_for_map/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '...const maps...'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.000992998, + "rows_read": 5, + "bytes_read": 187 + } +} diff --git a/parser/testdata/03240_array_element_or_null_for_map/metadata.json b/parser/testdata/03240_array_element_or_null_for_map/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03240_array_element_or_null_for_map/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03240_array_element_or_null_for_map/query.sql b/parser/testdata/03240_array_element_or_null_for_map/query.sql new file mode 100644 index 000000000..e7bb6907e --- /dev/null +++ b/parser/testdata/03240_array_element_or_null_for_map/query.sql @@ -0,0 +1,38 @@ +SELECT '...const maps...'; +WITH map(1, 2, 3, 4) AS m SELECT arrayElementOrNull(m, number) FROM numbers(5); +WITH map('1', 2, '3', 4) AS m SELECT arrayElementOrNull(m, toString(number)) FROM numbers(5); + +WITH map(1, 2, 3, 4) AS m SELECT arrayElementOrNull(m, 3); +WITH map('1', 2, '3', 4) AS m SELECT arrayElementOrNull(m, '3'); + +DROP TABLE IF EXISTS t_map_03240; + +CREATE TABLE t_map_03240(i1 UInt64, i2 Int32, m1 Map(UInt32, String), m2 Map(Int8, String), m3 Map(Int128, String)) ENGINE = Memory; +INSERT INTO t_map_03240 VALUES (1, -1, map(1, 'foo', 2, 'bar'), map(-1, 'foo', 1, 'bar'), map(-1, 'foo', 1, 'bar')); + +SELECT '...int keys...'; + +SELECT arrayElementOrNull(m1, i1), arrayElementOrNull(m2, i1), arrayElementOrNull(m3, i1) FROM t_map_03240; +SELECT arrayElementOrNull(m1, i2), arrayElementOrNull(m2, i2), arrayElementOrNull(m3, i2) FROM t_map_03240; + +DROP TABLE IF EXISTS t_map_03240; + +CREATE TABLE t_map_03240(s String, fs FixedString(3), m1 Map(String, String), m2 Map(FixedString(3), String)) ENGINE = Memory; +INSERT INTO t_map_03240 VALUES ('aaa', 'bbb', map('aaa', 'foo', 'bbb', 'bar'), map('aaa', 'foo', 'bbb', 'bar')); + +SELECT '...string keys...'; + +SELECT arrayElementOrNull(m1, 'aaa'), arrayElementOrNull(m2, 'aaa') FROM t_map_03240; +SELECT arrayElementOrNull(m1, 'aaa'::FixedString(3)), arrayElementOrNull(m2, 'aaa'::FixedString(3)) FROM t_map_03240; +SELECT arrayElementOrNull(m1, s), arrayElementOrNull(m2, s) FROM t_map_03240; +SELECT arrayElementOrNull(m1, fs), arrayElementOrNull(m2, fs) FROM t_map_03240; +SELECT length(arrayElementOrNull(m2, 'aaa'::FixedString(4))) FROM t_map_03240; + +DROP TABLE IF EXISTS t_map_03240; + +SELECT '...tuple values...'; +with map('a', (1, 'foo')) as m select arrayElementOrNull(m, 'a'), arrayElementOrNull(m, 'c'); + +SELECT '...map values...'; +with map('a', map(1, 'foo')) as m select arrayElementOrNull(m, 'a'), arrayElementOrNull(m, 'c'); + diff --git a/parser/testdata/03240_cte_in_subquery/ast.json b/parser/testdata/03240_cte_in_subquery/ast.json new file mode 100644 index 000000000..543c14478 --- /dev/null +++ b/parser/testdata/03240_cte_in_subquery/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001617482, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03240_cte_in_subquery/metadata.json b/parser/testdata/03240_cte_in_subquery/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03240_cte_in_subquery/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03240_cte_in_subquery/query.sql b/parser/testdata/03240_cte_in_subquery/query.sql new file mode 100644 index 000000000..d4b3978b2 --- /dev/null +++ b/parser/testdata/03240_cte_in_subquery/query.sql @@ -0,0 +1,35 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS subquery_cte_in; + +CREATE TABLE subquery_cte_in +( + `date` DateTime64(3), + `label` UInt32, + `id` UInt32 +) +ENGINE = MergeTree +ORDER BY (label, id, date); + +INSERT INTO subquery_cte_in VALUES (toDateTime('2023-10-24 16:13:38'), 2, 6), (toDateTime('2023-10-24 16:00:00'), 2, 10), (toDateTime('2023-10-24 00:00:00'), 2, 6); + +SELECT max(date_out) +FROM +( + WITH + ( + SELECT max(date) + FROM subquery_cte_in + WHERE (id = 6) AND (label = 2) + ) AS cte_1, + ( + SELECT max(date) + FROM subquery_cte_in + WHERE (id = 10) AND (label = 2) + ) AS cte_2 + SELECT date AS date_out + FROM subquery_cte_in + WHERE date IN (cte_1, cte_2) +); + +DROP TABLE subquery_cte_in; diff --git a/parser/testdata/03240_insert_select_named_tuple/ast.json b/parser/testdata/03240_insert_select_named_tuple/ast.json new file mode 100644 index 000000000..2309d415b --- /dev/null +++ b/parser/testdata/03240_insert_select_named_tuple/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001662972, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03240_insert_select_named_tuple/metadata.json b/parser/testdata/03240_insert_select_named_tuple/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03240_insert_select_named_tuple/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03240_insert_select_named_tuple/query.sql b/parser/testdata/03240_insert_select_named_tuple/query.sql new file mode 100644 index 000000000..c3e12db4e --- /dev/null +++ b/parser/testdata/03240_insert_select_named_tuple/query.sql @@ -0,0 +1,22 @@ +SET enable_analyzer = 1; +SET enable_named_columns_in_function_tuple = 1; + +DROP TABLE IF EXISTS src; +DROP TABLE IF EXISTS dst; + +CREATE TABLE src (id UInt32, type String, data String) ENGINE=MergeTree ORDER BY tuple(); +CREATE TABLE dst (id UInt32, a Tuple (col_a Nullable(String), type String), b Tuple (col_b Nullable(String), type String)) ENGINE = MergeTree ORDER BY id; + +INSERT INTO src VALUES (1, 'ok', 'data'); +INSERT INTO dst (id, a, b) SELECT id, tuple(replaceAll(data, 'a', 'e') AS col_a, type) AS a, tuple(replaceAll(data, 'a', 'e') AS col_b, type) AS b FROM src; +SELECT * FROM dst; + +DROP TABLE src; +DROP TABLE dst; + +DROP TABLE IF EXISTS src; +CREATE TABLE src (id UInt32, type String, data String) ENGINE=MergeTree ORDER BY tuple(); +INSERT INTO src VALUES (1, 'ok', 'data'); +SELECT id, tuple(replaceAll(data, 'a', 'e') AS col_a, type) AS a, tuple(replaceAll(data, 'a', 'e') AS col_b, type) AS b FROM cluster(test_cluster_two_shards, currentDatabase(), src) SETTINGS prefer_localhost_replica=0 FORMAT JSONEachRow; + +DROP TABLE src; diff --git a/parser/testdata/03240_quantile_exact_weighted_interpolated/ast.json b/parser/testdata/03240_quantile_exact_weighted_interpolated/ast.json new file mode 100644 index 000000000..88dcf6383 --- /dev/null +++ b/parser/testdata/03240_quantile_exact_weighted_interpolated/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery decimal (children 1)" + }, + { + "explain": " Identifier decimal" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001251554, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/03240_quantile_exact_weighted_interpolated/metadata.json b/parser/testdata/03240_quantile_exact_weighted_interpolated/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03240_quantile_exact_weighted_interpolated/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03240_quantile_exact_weighted_interpolated/query.sql b/parser/testdata/03240_quantile_exact_weighted_interpolated/query.sql new file mode 100644 index 000000000..a64b46e75 --- /dev/null +++ b/parser/testdata/03240_quantile_exact_weighted_interpolated/query.sql @@ -0,0 +1,76 @@ +DROP TABLE IF EXISTS decimal; + +CREATE TABLE decimal +( + a Decimal32(4), + b Decimal64(8), + c Decimal128(8), + f Float64, + d Date, + w UInt64 +) ENGINE = Memory; + +INSERT INTO decimal (a, b, c, f, d, w) +SELECT toDecimal32(number - 50, 4), toDecimal64(number - 50, 8) / 3, toDecimal128(number - 50, 8) / 5, number/2, addDays(toDate('2024-01-01'), number), number +FROM system.numbers LIMIT 101; + +SELECT 'quantileExactWeightedInterpolated'; +SELECT medianExactWeightedInterpolated(a, 1), + medianExactWeightedInterpolated(b, 2), + medianExactWeightedInterpolated(c, 3) as x, + medianExactWeightedInterpolated(f, 4), + medianExactWeightedInterpolated(d, 5), + toTypeName(x) FROM decimal; +SELECT quantileExactWeightedInterpolated(a, 1), + quantileExactWeightedInterpolated(b, 2), + quantileExactWeightedInterpolated(c, 3) as x, + quantileExactWeightedInterpolated(f, 4), + quantileExactWeightedInterpolated(d, 5), + toTypeName(x) FROM decimal WHERE a < 0; +SELECT quantileExactWeightedInterpolated(0.0)(a, 1), quantileExactWeightedInterpolated(0.0)(b, 2), quantileExactWeightedInterpolated(0.0)(c, 3) FROM decimal WHERE a >= 0; +SELECT quantileExactWeightedInterpolated(0.2)(a, 1), quantileExactWeightedInterpolated(0.2)(b, 2), quantileExactWeightedInterpolated(0.2)(c, 3) FROM decimal WHERE a >= 0; +SELECT quantileExactWeightedInterpolated(0.4)(a, 1), quantileExactWeightedInterpolated(0.4)(b, 2), quantileExactWeightedInterpolated(0.4)(c, 3) FROM decimal WHERE a >= 0; +SELECT quantileExactWeightedInterpolated(0.6)(a, 1), quantileExactWeightedInterpolated(0.6)(b, 2), quantileExactWeightedInterpolated(0.6)(c, 3) FROM decimal WHERE a >= 0; +SELECT quantileExactWeightedInterpolated(0.8)(a, 1), quantileExactWeightedInterpolated(0.8)(b, 2), quantileExactWeightedInterpolated(0.8)(c, 3) FROM decimal WHERE a >= 0; +SELECT quantileExactWeightedInterpolated(1.0)(a, 1), quantileExactWeightedInterpolated(1.0)(b, 2), quantileExactWeightedInterpolated(1.0)(c, 3) FROM decimal WHERE a >= 0; +SELECT quantilesExactWeightedInterpolated(0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)(a, 1) FROM decimal; +SELECT quantilesExactWeightedInterpolated(0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)(b, 2) FROM decimal; +SELECT quantilesExactWeightedInterpolated(0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)(c, 3) FROM decimal; +SELECT quantilesExactWeightedInterpolated(0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)(f, 4) FROM decimal; +SELECT quantilesExactWeightedInterpolated(0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)(d, 5) FROM decimal; + +SELECT 'quantileExactWeightedInterpolatedState'; +SELECT quantilesExactWeightedInterpolatedMerge(0.2, 0.4, 0.6, 0.8)(x) +FROM +( + SELECT quantilesExactWeightedInterpolatedState(0.2, 0.4, 0.6, 0.8)(number + 1, 1) AS x + FROM numbers(49999) +); + +SELECT 'Test with filter that returns no rows'; +SELECT medianExactWeightedInterpolated(a, 1), + medianExactWeightedInterpolated(b, 2), + medianExactWeightedInterpolated(c, 3), + medianExactWeightedInterpolated(f, 4), + medianExactWeightedInterpolated(d, 5) FROM decimal WHERE a > 1000; +SELECT quantileExactWeightedInterpolated(a, 1), + quantileExactWeightedInterpolated(b, 2), + quantileExactWeightedInterpolated(c, 3), + quantileExactWeightedInterpolated(f, 4), + quantileExactWeightedInterpolated(d, 5) FROM decimal WHERE d < toDate('2024-01-01'); + +SELECT 'Test with dynamic weights'; +SELECT medianExactWeightedInterpolated(a, w), + medianExactWeightedInterpolated(b, w), + medianExactWeightedInterpolated(c, w), + medianExactWeightedInterpolated(f, w), + medianExactWeightedInterpolated(d, w) FROM decimal; + +SELECT 'Test with all weights set to 0'; +SELECT medianExactWeightedInterpolated(a, 0), + medianExactWeightedInterpolated(b, 0), + medianExactWeightedInterpolated(c, 0), + medianExactWeightedInterpolated(f, 0), + medianExactWeightedInterpolated(d, 0) FROM decimal; + +DROP TABLE IF EXISTS decimal; diff --git a/parser/testdata/03241_orc_dictionary_encode/ast.json b/parser/testdata/03241_orc_dictionary_encode/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03241_orc_dictionary_encode/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03241_orc_dictionary_encode/metadata.json b/parser/testdata/03241_orc_dictionary_encode/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03241_orc_dictionary_encode/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03241_orc_dictionary_encode/query.sql b/parser/testdata/03241_orc_dictionary_encode/query.sql new file mode 100644 index 000000000..d7837ac19 --- /dev/null +++ b/parser/testdata/03241_orc_dictionary_encode/query.sql @@ -0,0 +1,38 @@ +-- Tags: no-fasttest +set input_format_orc_use_fast_decoder = 1; + +set input_format_orc_dictionary_as_low_cardinality = 1; +insert into function file(concat(currentDatabase(), '_03241_data1_without_dict.orc')) +select toLowCardinality(cast(if (number % 10 = 0, null, number % 10) as Nullable(String))) as c from numbers(100000) +settings output_format_orc_dictionary_key_size_threshold = 0, engine_file_truncate_on_insert = 1; + +insert into function file(concat(currentDatabase(), '_03241_data1_with_dict.orc')) +select toLowCardinality(cast(if (number % 10 = 0, null, number % 10) as Nullable(String))) as c from numbers(100000) +settings output_format_orc_dictionary_key_size_threshold = 0.1, engine_file_truncate_on_insert = 1; + +desc file(concat(currentDatabase(), '_03241_data1_without_dict.orc')); +desc file(concat(currentDatabase(), '_03241_data1_with_dict.orc')); + +select c, count(1) from file(concat(currentDatabase(), '_03241_data1_without_dict.orc')) group by c order by c; +select c, count(1) from file(concat(currentDatabase(), '_03241_data1_with_dict.orc')) group by c order by c; + +select c, count(1) from file(concat(currentDatabase(), '_03241_data1_without_dict.orc'), ORC, 'c String') group by c order by c; +select c, count(1) from file(concat(currentDatabase(), '_03241_data1_with_dict.orc'), ORC, 'c LowCardinality(String)') group by c order by c; + +set input_format_orc_dictionary_as_low_cardinality = 0; +insert into function file(concat(currentDatabase(), '_03241_data2_without_dict.orc')) +select toLowCardinality(cast(if (number % 10 = 0, null, number % 10) as Nullable(String))) as c from numbers(100000) +settings output_format_orc_dictionary_key_size_threshold = 0, engine_file_truncate_on_insert = 1; + +insert into function file(concat(currentDatabase(), '_03241_data2_with_dict.orc')) +select toLowCardinality(cast(if (number % 10 = 0, null, number % 10) as Nullable(String))) as c from numbers(100000) +settings output_format_orc_dictionary_key_size_threshold = 0.1, engine_file_truncate_on_insert = 1; + +desc file(concat(currentDatabase(), '_03241_data2_without_dict.orc')); +desc file(concat(currentDatabase(), '_03241_data2_with_dict.orc')); + +select c, count(1) from file(concat(currentDatabase(), '_03241_data2_without_dict.orc')) group by c order by c; +select c, count(1) from file(concat(currentDatabase(), '_03241_data2_with_dict.orc')) group by c order by c; + +select c, count(1) from file(concat(currentDatabase(), '_03241_data2_without_dict.orc'), ORC, 'c String') group by c order by c; +select c, count(1) from file(concat(currentDatabase(), '_03241_data2_with_dict.orc'), ORC, 'c LowCardinality(String)') group by c order by c; diff --git a/parser/testdata/03241_view_block_structure/ast.json b/parser/testdata/03241_view_block_structure/ast.json new file mode 100644 index 000000000..7e1d04f70 --- /dev/null +++ b/parser/testdata/03241_view_block_structure/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery foo (children 1)" + }, + { + "explain": " Identifier foo" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001126812, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/03241_view_block_structure/metadata.json b/parser/testdata/03241_view_block_structure/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03241_view_block_structure/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03241_view_block_structure/query.sql b/parser/testdata/03241_view_block_structure/query.sql new file mode 100644 index 000000000..b93bc4a84 --- /dev/null +++ b/parser/testdata/03241_view_block_structure/query.sql @@ -0,0 +1,26 @@ +DROP TABLE IF EXISTS foo; +DROP TABLE IF EXISTS vfoo; + +CREATE TABLE foo(to_dttm DateTime) ENGINE = MergeTree() ORDER BY (); + +CREATE VIEW vfoo AS +SELECT CAST(foo.to_dttm AS DateTime64(6)) AS feature, + toDateTime64('2024-03-26 11:35:03.620846', 6) AS to_dttm +FROM foo; + +SELECT * FROM vfoo WHERE vfoo.to_dttm=toDateTime64('2024-03-26 11:35:03.620846', 6); + +DROP TABLE vfoo; +DROP TABLE foo; + +CREATE TABLE foo(to_dttm_blaaaaaaaaaaaaaa DateTime) ENGINE = MergeTree() ORDER BY (); + +CREATE VIEW vfoo AS +SELECT CAST(foo.to_dttm_blaaaaaaaaaaaaaa AS DateTime64(6)) AS feature, + toDateTime64('2024-03-26 11:35:03.620846', 6) AS to_dttm +FROM foo; + +SELECT * FROM vfoo WHERE vfoo.to_dttm=toDateTime64('2024-03-26 11:35:03.620846', 6); + +DROP TABLE vfoo; +DROP TABLE foo; diff --git a/parser/testdata/03242_view_block_structure/ast.json b/parser/testdata/03242_view_block_structure/ast.json new file mode 100644 index 000000000..c30cc1891 --- /dev/null +++ b/parser/testdata/03242_view_block_structure/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery a (children 1)" + }, + { + "explain": " Identifier a" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001067268, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/03242_view_block_structure/metadata.json b/parser/testdata/03242_view_block_structure/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03242_view_block_structure/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03242_view_block_structure/query.sql b/parser/testdata/03242_view_block_structure/query.sql new file mode 100644 index 000000000..46df7af56 --- /dev/null +++ b/parser/testdata/03242_view_block_structure/query.sql @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS a; +DROP TABLE IF EXISTS b; + +CREATE VIEW a AS +SELECT 'a' AS id; + +CREATE VIEW b AS +SELECT + 'a' AS id, + 'b' as other +; + +SELECT * +FROM b +WHERE id IN (SELECT id from a); + +DROP TABLE a; +DROP TABLE b; diff --git a/parser/testdata/03243_array_join_lambda/ast.json b/parser/testdata/03243_array_join_lambda/ast.json new file mode 100644 index 000000000..4d9ac0c66 --- /dev/null +++ b/parser/testdata/03243_array_join_lambda/ast.json @@ -0,0 +1,121 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayMap (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function lambda (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function plus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function length (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayMap (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function lambda (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier y" + }, + { + "explain": " Function plus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier y" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal Array_[UInt64_3]" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_2]" + } + ], + + "rows": 33, + + "statistics": + { + "elapsed": 0.001387584, + "rows_read": 33, + "bytes_read": 1501 + } +} diff --git a/parser/testdata/03243_array_join_lambda/metadata.json b/parser/testdata/03243_array_join_lambda/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03243_array_join_lambda/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03243_array_join_lambda/query.sql b/parser/testdata/03243_array_join_lambda/query.sql new file mode 100644 index 000000000..0cd93a8d7 --- /dev/null +++ b/parser/testdata/03243_array_join_lambda/query.sql @@ -0,0 +1 @@ +SELECT arrayMap(x -> (x + length(arrayJoin([arrayMap(y -> (y + 1), [3])]))), [1, 2]); diff --git a/parser/testdata/03243_check_for_nullable_nothing_in_alter/ast.json b/parser/testdata/03243_check_for_nullable_nothing_in_alter/ast.json new file mode 100644 index 000000000..6da3efc79 --- /dev/null +++ b/parser/testdata/03243_check_for_nullable_nothing_in_alter/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery src (children 1)" + }, + { + "explain": " Identifier src" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001273765, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/03243_check_for_nullable_nothing_in_alter/metadata.json b/parser/testdata/03243_check_for_nullable_nothing_in_alter/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03243_check_for_nullable_nothing_in_alter/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03243_check_for_nullable_nothing_in_alter/query.sql b/parser/testdata/03243_check_for_nullable_nothing_in_alter/query.sql new file mode 100644 index 000000000..c77989ef8 --- /dev/null +++ b/parser/testdata/03243_check_for_nullable_nothing_in_alter/query.sql @@ -0,0 +1,12 @@ +drop table if exists src; +drop table if exists dst; +drop view if exists v; +create table src (x Nullable(Int32)) engine=Memory; +alter table src modify column x Nullable(Nothing); -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_TABLES} +create table dst (x Nullable(Int32)) engine=Memory; +create materialized view v to dst as select x from src; +alter table v modify query select NULL as x from src; -- {serverError DATA_TYPE_CANNOT_BE_USED_IN_TABLES} +drop view v; +drop table dst; +drop table src; + diff --git a/parser/testdata/03243_cluster_not_found_column/ast.json b/parser/testdata/03243_cluster_not_found_column/ast.json new file mode 100644 index 000000000..8b8d87b20 --- /dev/null +++ b/parser/testdata/03243_cluster_not_found_column/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001143792, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03243_cluster_not_found_column/metadata.json b/parser/testdata/03243_cluster_not_found_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03243_cluster_not_found_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03243_cluster_not_found_column/query.sql b/parser/testdata/03243_cluster_not_found_column/query.sql new file mode 100644 index 000000000..c1c9d9dfe --- /dev/null +++ b/parser/testdata/03243_cluster_not_found_column/query.sql @@ -0,0 +1,14 @@ +SET enable_analyzer = 1; + +SELECT + c, + count(c) +FROM +( + SELECT + dummy, + count() AS c + FROM clusterAllReplicas(test_cluster_two_shards, system.one) + GROUP BY ALL +) +GROUP BY ALL; diff --git a/parser/testdata/03243_compatibility_setting_with_alias/ast.json b/parser/testdata/03243_compatibility_setting_with_alias/ast.json new file mode 100644 index 000000000..463c7433d --- /dev/null +++ b/parser/testdata/03243_compatibility_setting_with_alias/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001155227, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03243_compatibility_setting_with_alias/metadata.json b/parser/testdata/03243_compatibility_setting_with_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03243_compatibility_setting_with_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03243_compatibility_setting_with_alias/query.sql b/parser/testdata/03243_compatibility_setting_with_alias/query.sql new file mode 100644 index 000000000..ee1a2b830 --- /dev/null +++ b/parser/testdata/03243_compatibility_setting_with_alias/query.sql @@ -0,0 +1,8 @@ +SET enable_analyzer = DEFAULT; +SELECT name, value, changed from system.settings where name IN ('allow_experimental_analyzer', 'enable_analyzer') ORDER BY name; +SET compatibility = '24.8'; +SELECT name, value, changed from system.settings where name IN ('allow_experimental_analyzer', 'enable_analyzer') ORDER BY name; +SET compatibility = '24.3'; +SELECT name, value, changed from system.settings where name IN ('allow_experimental_analyzer', 'enable_analyzer') ORDER BY name; +SET compatibility = '24.1'; +SELECT name, value, changed from system.settings where name IN ('allow_experimental_analyzer', 'enable_analyzer') ORDER BY name; diff --git a/parser/testdata/03243_create_or_replace_view_dependency_check/ast.json b/parser/testdata/03243_create_or_replace_view_dependency_check/ast.json new file mode 100644 index 000000000..b0dcf9c80 --- /dev/null +++ b/parser/testdata/03243_create_or_replace_view_dependency_check/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001288567, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/03243_create_or_replace_view_dependency_check/metadata.json b/parser/testdata/03243_create_or_replace_view_dependency_check/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03243_create_or_replace_view_dependency_check/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03243_create_or_replace_view_dependency_check/query.sql b/parser/testdata/03243_create_or_replace_view_dependency_check/query.sql new file mode 100644 index 000000000..9432ef9b5 --- /dev/null +++ b/parser/testdata/03243_create_or_replace_view_dependency_check/query.sql @@ -0,0 +1,21 @@ +drop table if exists test; +drop view if exists v; +drop dictionary if exists dict; +create table test (x UInt32, v String) engine=Memory; +create view v (x UInt32, v String) as select x, v from test; +CREATE DICTIONARY dict +( + x UInt64, + v String +) +PRIMARY KEY x +SOURCE(CLICKHOUSE(TABLE 'v')) +LAYOUT(FLAT()) +LIFETIME(MIN 0 MAX 1000); + +drop view v; -- {serverError HAVE_DEPENDENT_OBJECTS} +create or replace view v (x UInt32, v String, y UInt32) as select x, v, 42 as y from test; +drop dictionary dict; +drop view v; +drop table test; + diff --git a/parser/testdata/03243_lower_utf8_msan/ast.json b/parser/testdata/03243_lower_utf8_msan/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03243_lower_utf8_msan/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03243_lower_utf8_msan/metadata.json b/parser/testdata/03243_lower_utf8_msan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03243_lower_utf8_msan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03243_lower_utf8_msan/query.sql b/parser/testdata/03243_lower_utf8_msan/query.sql new file mode 100644 index 000000000..d147ccc34 --- /dev/null +++ b/parser/testdata/03243_lower_utf8_msan/query.sql @@ -0,0 +1,4 @@ +-- Tags: no-fasttest +-- no-fasttest: upper/lowerUTF8 use ICU + +SELECT ignore(lengthUTF8(lowerUTF8(randomStringUTF8(99)))); -- bug #49672: msan assert diff --git a/parser/testdata/03243_to_start_of_interval_aliases/ast.json b/parser/testdata/03243_to_start_of_interval_aliases/ast.json new file mode 100644 index 000000000..3e0f6a325 --- /dev/null +++ b/parser/testdata/03243_to_start_of_interval_aliases/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function date_bin (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function toDate (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '2023-10-09'" + }, + { + "explain": " Function toIntervalYear (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function toDate (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '2022-02-01'" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001545384, + "rows_read": 15, + "bytes_read": 595 + } +} diff --git a/parser/testdata/03243_to_start_of_interval_aliases/metadata.json b/parser/testdata/03243_to_start_of_interval_aliases/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03243_to_start_of_interval_aliases/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03243_to_start_of_interval_aliases/query.sql b/parser/testdata/03243_to_start_of_interval_aliases/query.sql new file mode 100644 index 000000000..31f20278a --- /dev/null +++ b/parser/testdata/03243_to_start_of_interval_aliases/query.sql @@ -0,0 +1,8 @@ +SELECT date_bin(toDate('2023-10-09'), toIntervalYear(1), toDate('2022-02-01')); +SELECT date_bin(toDate('2023-10-09'), toIntervalYear(1)); +SELECT date_BIN(toDateTime('2023-10-09 10:11:12'), toIntervalYear(1), toDateTime('2022-02-01 09:08:07')); +SELECT date_BIN(toDateTime('2023-10-09 10:11:12'), toIntervalYear(1)); +SELECT time_bucket(toDateTime('2023-10-09 10:11:12'), toIntervalYear(1), toDateTime('2022-02-01 09:08:07')); +SELECT time_bucket(toDateTime('2023-10-09 10:11:12'), toIntervalYear(1)); +SELECT TIME_bucket(toDateTime('2023-10-09 10:11:12'), toIntervalYear(1), toDateTime('2022-02-01 09:08:07')); +SELECT TIME_bucket(toDateTime('2023-10-09 10:11:12'), toIntervalYear(1)); diff --git a/parser/testdata/03244_skip_index_in_final_query_with_pk_rescan/ast.json b/parser/testdata/03244_skip_index_in_final_query_with_pk_rescan/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03244_skip_index_in_final_query_with_pk_rescan/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03244_skip_index_in_final_query_with_pk_rescan/metadata.json b/parser/testdata/03244_skip_index_in_final_query_with_pk_rescan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03244_skip_index_in_final_query_with_pk_rescan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03244_skip_index_in_final_query_with_pk_rescan/query.sql b/parser/testdata/03244_skip_index_in_final_query_with_pk_rescan/query.sql new file mode 100644 index 000000000..5d0c27b5e --- /dev/null +++ b/parser/testdata/03244_skip_index_in_final_query_with_pk_rescan/query.sql @@ -0,0 +1,45 @@ +-- Tags: long, no-tsan, no-asan, no-msan, no-s3-storage +SET use_skip_indexes = 1; +SET use_skip_indexes_if_final = 1; + +DROP TABLE IF EXISTS t_final_query_tbl; + +CREATE TABLE t_final_query_tbl(id UInt64, v UInt64, INDEX secondaryidx v TYPE minmax) ENGINE = ReplacingMergeTree ORDER BY id; + +SYSTEM STOP MERGES t_final_query_tbl; + +INSERT INTO t_final_query_tbl SELECT number, if(number=100444, 98889991, number) FROM numbers(1000000); +INSERT INTO t_final_query_tbl SELECT number, if(number=100444, 88889992, number+1) FROM numbers(1000000); +INSERT INTO t_final_query_tbl SELECT number, if(number=100444, 78889993, number+1) FROM numbers(1000000); +INSERT INTO t_final_query_tbl SELECT number, if(number=100444, 68889994, number+1) FROM numbers(1000000); +INSERT INTO t_final_query_tbl SELECT number, if(number=100444, 58889995, number+1) FROM numbers(1000000); + +SELECT 'Next 4 queries should return 0 rows and the 5th query should return 1 row'; +SELECT count(id) FROM t_final_query_tbl FINAL where v = 98889991 SETTINGS use_skip_indexes_if_final_exact_mode=1; +SELECT count(id) FROM t_final_query_tbl FINAL where v = 88889992 SETTINGS use_skip_indexes_if_final_exact_mode=1; +SELECT count(id) FROM t_final_query_tbl FINAL where v = 78889993 SETTINGS use_skip_indexes_if_final_exact_mode=1; +SELECT count(id) FROM t_final_query_tbl FINAL where v = 68889994 SETTINGS use_skip_indexes_if_final_exact_mode=1; +SELECT count(id) FROM t_final_query_tbl FINAL where v = 58889995 SETTINGS use_skip_indexes_if_final_exact_mode=1; + +DROP TABLE t_final_query_tbl; + +DROP TABLE IF EXISTS t_final_query_tbl2; + +CREATE TABLE t_final_query_tbl2(id1 String, id2 UInt64, id3 DateTime, v UInt64, INDEX secondaryidx v TYPE minmax) ENGINE = ReplacingMergeTree ORDER BY (id1,id2,id3); + +SYSTEM STOP MERGES t_final_query_tbl2; + +INSERT INTO t_final_query_tbl2 SELECT substr(lower(hex(MD5(toString(trunc(number/1000))))), 1, 10), trunc(number%100), toDateTime(number), if(number=100444, 98889991, number) from numbers(1000000); +INSERT INTO t_final_query_tbl2 SELECT substr(lower(hex(MD5(toString(trunc(number/1000))))), 1, 10), trunc(number%100), toDateTime(number), if(number=100444, 88889992, number) from numbers(1000000); +INSERT INTO t_final_query_tbl2 SELECT substr(lower(hex(MD5(toString(trunc(number/1000))))), 1, 10), trunc(number%100), toDateTime(number), if(number=100444, 78889993, number) from numbers(1000000); +INSERT INTO t_final_query_tbl2 SELECT substr(lower(hex(MD5(toString(trunc(number/1000))))), 1, 10), trunc(number%100), toDateTime(number), if(number=100444, 68889994, number) from numbers(1000000); +INSERT INTO t_final_query_tbl2 SELECT substr(lower(hex(MD5(toString(trunc(number/1000))))), 1, 10), trunc(number%100), toDateTime(number), if(number=100444, 58889995, number) from numbers(1000000); + +SELECT 'Next 4 queries should return 0 rows and the 5th query should return 1 row'; +SELECT count(id1) FROM t_final_query_tbl2 FINAL where v = 98889991 SETTINGS use_skip_indexes_if_final_exact_mode=1; +SELECT count(id1) FROM t_final_query_tbl2 FINAL where v = 88889992 SETTINGS use_skip_indexes_if_final_exact_mode=1; +SELECT count(id1) FROM t_final_query_tbl2 FINAL where v = 78889993 SETTINGS use_skip_indexes_if_final_exact_mode=1; +SELECT count(id1) FROM t_final_query_tbl2 FINAL where v = 68889994 SETTINGS use_skip_indexes_if_final_exact_mode=1; +SELECT count(id1) FROM t_final_query_tbl2 FINAL where v = 58889995 SETTINGS use_skip_indexes_if_final_exact_mode=1; + +DROP TABLE t_final_query_tbl2; diff --git a/parser/testdata/03244_skip_index_in_final_query_with_pk_rescan_basic/ast.json b/parser/testdata/03244_skip_index_in_final_query_with_pk_rescan_basic/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03244_skip_index_in_final_query_with_pk_rescan_basic/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03244_skip_index_in_final_query_with_pk_rescan_basic/metadata.json b/parser/testdata/03244_skip_index_in_final_query_with_pk_rescan_basic/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03244_skip_index_in_final_query_with_pk_rescan_basic/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03244_skip_index_in_final_query_with_pk_rescan_basic/query.sql b/parser/testdata/03244_skip_index_in_final_query_with_pk_rescan_basic/query.sql new file mode 100644 index 000000000..6cd37c2b2 --- /dev/null +++ b/parser/testdata/03244_skip_index_in_final_query_with_pk_rescan_basic/query.sql @@ -0,0 +1,29 @@ +-- Tags: no-random-settings +-- Testcase from https://github.com/ClickHouse/ClickHouse/pull/34243 + +DROP TABLE IF EXISTS data_02201; + +CREATE TABLE data_02201 ( + key Int, + value_max SimpleAggregateFunction(max, Int), + INDEX idx value_max TYPE minmax GRANULARITY 1 +) +Engine=AggregatingMergeTree() +ORDER BY key +PARTITION BY key; + +SYSTEM STOP MERGES data_02201; + +INSERT INTO data_02201 SELECT number, number FROM numbers(10); +INSERT INTO data_02201 SELECT number, number+1 FROM numbers(10); + +SELECT 'Correct result - 1 row by next query'; +SELECT * FROM data_02201 FINAL WHERE value_max = 1 ORDER BY key, value_max SETTINGS use_skip_indexes=0, use_skip_indexes_if_final=0; + +SELECT 'Wrong result - 2 rows by next query'; +SELECT * FROM data_02201 FINAL WHERE value_max = 1 ORDER BY key, value_max SETTINGS use_skip_indexes=1, use_skip_indexes_if_final=1,use_skip_indexes_if_final_exact_mode=0; + +SELECT 'Correct result - 1 row by next query'; +SELECT * FROM data_02201 FINAL WHERE value_max = 1 ORDER BY key, value_max SETTINGS use_skip_indexes=1, use_skip_indexes_if_final=1,use_skip_indexes_if_final_exact_mode=1; + +DROP TABLE data_02201; diff --git a/parser/testdata/03244_skip_index_in_final_query_with_pk_rescan_extremes/ast.json b/parser/testdata/03244_skip_index_in_final_query_with_pk_rescan_extremes/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03244_skip_index_in_final_query_with_pk_rescan_extremes/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03244_skip_index_in_final_query_with_pk_rescan_extremes/metadata.json b/parser/testdata/03244_skip_index_in_final_query_with_pk_rescan_extremes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03244_skip_index_in_final_query_with_pk_rescan_extremes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03244_skip_index_in_final_query_with_pk_rescan_extremes/query.sql b/parser/testdata/03244_skip_index_in_final_query_with_pk_rescan_extremes/query.sql new file mode 100644 index 000000000..fdb5d9718 --- /dev/null +++ b/parser/testdata/03244_skip_index_in_final_query_with_pk_rescan_extremes/query.sql @@ -0,0 +1,103 @@ +-- More tests for use_skip_index_if_final_exact_mode optimization + +SET use_skip_indexes = 1; +SET use_skip_indexes_if_final = 1; +SET use_skip_indexes_if_final_exact_mode = 1; + +DROP TABLE IF EXISTS tab1; + +CREATE TABLE tab1 (id Int32, v Int32, INDEX secondaryidx v TYPE minmax) ENGINE=ReplacingMergeTree ORDER BY id SETTINGS index_granularity=2; + +SYSTEM STOP MERGES tab1; + +INSERT INTO tab1 SELECT number, number FROM numbers(10); +INSERT INTO tab1 SELECT number, 100 + number FROM numbers(10); -- 'v' column has changed + +-- Should correctly read 1st granule in 2nd part and return no rows +SELECT id FROM tab1 FINAL WHERE v = 0; + +-- Should correctly read last granule in 2nd part and return no rows +SELECT id FROM tab1 FINAL WHERE v = 9; + +-- All these queries will return 0 rows by correctly reading extra granule from 2nd part +SELECT id FROM tab1 FINAL WHERE v = 1; +SELECT id FROM tab1 FINAL WHERE v = 2; +SELECT id FROM tab1 FINAL WHERE v = 3; +SELECT id FROM tab1 FINAL WHERE v = 4; +SELECT id FROM tab1 FINAL WHERE v = 5; +SELECT id FROM tab1 FINAL WHERE v = 6; +SELECT id FROM tab1 FINAL WHERE v = 7; +SELECT id FROM tab1 FINAL WHERE v = 8; + +INSERT INTO tab1 VALUES (0, 8888), (9, 9999); + +-- Should correctly read 1st granule in 2nd part & 1st granule in 3rd part +SELECT id FROM tab1 FINAL WHERE v = 0; + +-- Should correctly read last granule in 2nd part & 1st granule in 3rd part +SELECT id FROM tab1 FINAL WHERE v = 9; + +-- Rows with id = 0 and id = 9 should be printed +SELECT id FROM tab1 FINAL WHERE v = 8888; +SELECT id FROM tab1 FINAL WHERE v = 9999; + + +-- Test for repeated PK range. Rows will have PK like this - +-- (1,1,1,<v>), (1,1,2,<v>), (1,1,3,<v>), ... +-- Test for PR https://github.com/ClickHouse/ClickHouse/pull/82667 + +DROP TABLE IF EXISTS tab2; +CREATE TABLE tab2 (id1 Int32, id2 Int32, id3 Int32, v Int32, INDEX secondaryidx v TYPE minmax) ENGINE=ReplacingMergeTree ORDER BY (id1, id2, id3) SETTINGS index_granularity=64; + +SYSTEM STOP MERGES tab2; + +INSERT INTO tab2 SELECT (number % 2500), (number % 500), number, number from numbers(10000); +INSERT INTO tab2 SELECT (number % 2500), (number % 500), number, 100000 + number from numbers(10000); -- 'v' column has changed + +-- No rows should be selected by below queries as 'v' does not have value < 10000 due to updates in 2nd part +SELECT id1, id2, id3 FROM tab2 FINAL WHERE v = rand() % 10000; +SELECT id1, id2, id3 FROM tab2 FINAL WHERE v = rand() % 10000; +SELECT id1, id2, id3 FROM tab2 FINAL WHERE v = rand() % 10000; +SELECT id1, id2, id3 FROM tab2 FINAL WHERE v = rand() % 10000; +SELECT id1, id2, id3 FROM tab2 FINAL WHERE v = rand() % 10000; +SELECT id1, id2, id3 FROM tab2 FINAL WHERE v = rand() % 10000; +SELECT id1, id2, id3 FROM tab2 FINAL WHERE v = rand() % 10000; +SELECT id1, id2, id3 FROM tab2 FINAL WHERE v = rand() % 10000; +SELECT id1, id2, id3 FROM tab2 FINAL WHERE v = rand() % 10000; +SELECT id1, id2, id3 FROM tab2 FINAL WHERE v = rand() % 10000; + +-- Tests with single range parts (https://github.com/ClickHouse/ClickHouse/issues/82792) +DROP TABLE IF EXISTS tab3; + +CREATE TABLE tab3( + key Int, + value Int, + INDEX idx value TYPE minmax GRANULARITY 1 +) +Engine=ReplacingMergeTree() +ORDER BY key +PARTITION BY key; + +SYSTEM STOP MERGES tab3; + +INSERT INTO tab3 SELECT number, number FROM numbers(10); -- 10 parts + +SELECT key, value FROM tab3 FINAL WHERE value = 1 SETTINGS max_rows_to_read = 1; -- 1,1 + +INSERT INTO tab3 VALUES (0, 100), (1, 101), (2, 102), (3, 103), (4, 104), (5, 105), (6, 106), (7, 107), (8, 108), (9, 109); -- 10 more parts + +-- Next statements return 0 rows. Read 1 range each from 2 parts +SELECT key, value FROM tab3 FINAL WHERE value = 0 SETTINGS max_rows_to_read = 2; +SELECT key, value FROM tab3 FINAL WHERE value = 1 SETTINGS max_rows_to_read = 2; +SELECT key, value FROM tab3 FINAL WHERE value = 2 SETTINGS max_rows_to_read = 2; +SELECT key, value FROM tab3 FINAL WHERE value = 3 SETTINGS max_rows_to_read = 2; +SELECT key, value FROM tab3 FINAL WHERE value = 4 SETTINGS max_rows_to_read = 2; +SELECT key, value FROM tab3 FINAL WHERE value = 5 SETTINGS max_rows_to_read = 2; +SELECT key, value FROM tab3 FINAL WHERE value = 6 SETTINGS max_rows_to_read = 2; +SELECT key, value FROM tab3 FINAL WHERE value = 7 SETTINGS max_rows_to_read = 2; +SELECT key, value FROM tab3 FINAL WHERE value = 8 SETTINGS max_rows_to_read = 2; +SELECT key, value FROM tab3 FINAL WHERE value = 9 SETTINGS max_rows_to_read = 2; + +DROP TABLE tab1; +DROP TABLE tab2; +DROP TABLE tab3; diff --git a/parser/testdata/03244_skip_index_in_final_query_with_pk_rescan_no_final_mark/ast.json b/parser/testdata/03244_skip_index_in_final_query_with_pk_rescan_no_final_mark/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03244_skip_index_in_final_query_with_pk_rescan_no_final_mark/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03244_skip_index_in_final_query_with_pk_rescan_no_final_mark/metadata.json b/parser/testdata/03244_skip_index_in_final_query_with_pk_rescan_no_final_mark/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03244_skip_index_in_final_query_with_pk_rescan_no_final_mark/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03244_skip_index_in_final_query_with_pk_rescan_no_final_mark/query.sql b/parser/testdata/03244_skip_index_in_final_query_with_pk_rescan_no_final_mark/query.sql new file mode 100644 index 000000000..318563760 --- /dev/null +++ b/parser/testdata/03244_skip_index_in_final_query_with_pk_rescan_no_final_mark/query.sql @@ -0,0 +1,38 @@ +-- More tests for use_skip_index_if_final_exact_mode optimization +-- Test when part does not have final mark, optimization will just expand to all ranges + +SET use_skip_indexes = 1; +SET use_skip_indexes_if_final = 1; +SET use_skip_indexes_if_final_exact_mode = 1; + +DROP TABLE IF EXISTS tab1; + +-- The CREATE TABLE raises a warning due to index_granularity_bytes = 0 +SET send_logs_level = 'fatal'; + +CREATE TABLE tab1 +( + `valueDate` Date, + `bb_ticker` String, + `ric` String, + `update_timestamp` DateTime, + INDEX tab1_bb_ticker_idx bb_ticker TYPE bloom_filter GRANULARITY 4, + INDEX tab1_ric_idx ric TYPE bloom_filter GRANULARITY 4 +) +ENGINE = ReplacingMergeTree(update_timestamp) +PRIMARY KEY (valueDate, bb_ticker, ric) +ORDER BY (valueDate, bb_ticker, ric) +SETTINGS index_granularity = 111, index_granularity_bytes = 0, compress_primary_key = 0; + +SET send_logs_level = 'warning'; + +SYSTEM STOP MERGES tab1; + +INSERT INTO tab1(valueDate, bb_ticker, ric) + SELECT today(), number%1111, number%111111 + FROM numbers(1e4); + +-- No exception/assert & no rows. +SELECT * FROM tab1 FINAL WHERE ric = 'BOWNU.O'; + +DROP TABLE tab1; diff --git a/parser/testdata/03245_ripemd160/ast.json b/parser/testdata/03245_ripemd160/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03245_ripemd160/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03245_ripemd160/metadata.json b/parser/testdata/03245_ripemd160/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03245_ripemd160/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03245_ripemd160/query.sql b/parser/testdata/03245_ripemd160/query.sql new file mode 100644 index 000000000..f70f759c4 --- /dev/null +++ b/parser/testdata/03245_ripemd160/query.sql @@ -0,0 +1,12 @@ +-- Tags: no-fasttest, no-openssl-fips + +SELECT HEX(RIPEMD160('The quick brown fox jumps over the lazy dog')); + +SELECT HEX(RIPEMD160('The quick brown fox jumps over the lazy cog')); + +SELECT HEX(RIPEMD160('')); + +SELECT HEX(RIPEMD160('A-very-long-string-that-should-be-hashed-using-ripemd160')); + +SELECT HEX(RIPEMD160(toString(avg(number))) ) +FROM (SELECT arrayJoin([1, 2, 3, 4, 5]) AS number); diff --git a/parser/testdata/03245_views_and_filter_push_down_bug/ast.json b/parser/testdata/03245_views_and_filter_push_down_bug/ast.json new file mode 100644 index 000000000..504209499 --- /dev/null +++ b/parser/testdata/03245_views_and_filter_push_down_bug/ast.json @@ -0,0 +1,121 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function view (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier id" + }, + { + "explain": " Function toDateTime (alias date) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier date" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function view (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1 (alias id)" + }, + { + "explain": " Literal '2024-05-02' (alias date)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier date" + }, + { + "explain": " Literal '2024-05-02'" + } + ], + + "rows": 33, + + "statistics": + { + "elapsed": 0.001741903, + "rows_read": 33, + "bytes_read": 1475 + } +} diff --git a/parser/testdata/03245_views_and_filter_push_down_bug/metadata.json b/parser/testdata/03245_views_and_filter_push_down_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03245_views_and_filter_push_down_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03245_views_and_filter_push_down_bug/query.sql b/parser/testdata/03245_views_and_filter_push_down_bug/query.sql new file mode 100644 index 000000000..be5f27b55 --- /dev/null +++ b/parser/testdata/03245_views_and_filter_push_down_bug/query.sql @@ -0,0 +1 @@ +select * from view(select id, toDateTime(date) as date from view(select 1 as id, '2024-05-02' as date)) where date='2024-05-02'; diff --git a/parser/testdata/03246_alter_update_dynamic_hung/ast.json b/parser/testdata/03246_alter_update_dynamic_hung/ast.json new file mode 100644 index 000000000..06ab77058 --- /dev/null +++ b/parser/testdata/03246_alter_update_dynamic_hung/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001419883, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03246_alter_update_dynamic_hung/metadata.json b/parser/testdata/03246_alter_update_dynamic_hung/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03246_alter_update_dynamic_hung/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03246_alter_update_dynamic_hung/query.sql b/parser/testdata/03246_alter_update_dynamic_hung/query.sql new file mode 100644 index 000000000..e3bf7bb5a --- /dev/null +++ b/parser/testdata/03246_alter_update_dynamic_hung/query.sql @@ -0,0 +1,7 @@ +SET allow_experimental_dynamic_type = 1; +DROP TABLE IF EXISTS t0; +CREATE TABLE t0 (c0 Int) ENGINE = MergeTree() ORDER BY tuple(); +INSERT INTO t0 (c0) VALUES (1); +ALTER TABLE t0 UPDATE c0 = EXISTS (SELECT 1 FROM t1 CROSS JOIN t0) WHERE 1; +ALTER TABLE t0 MODIFY COLUMN c0 Dynamic; --{serverError UNFINISHED} +DROP TABLE t0; diff --git a/parser/testdata/03246_join_on_asterisk/ast.json b/parser/testdata/03246_join_on_asterisk/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03246_join_on_asterisk/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03246_join_on_asterisk/metadata.json b/parser/testdata/03246_join_on_asterisk/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03246_join_on_asterisk/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03246_join_on_asterisk/query.sql b/parser/testdata/03246_join_on_asterisk/query.sql new file mode 100644 index 000000000..1af3319bb --- /dev/null +++ b/parser/testdata/03246_join_on_asterisk/query.sql @@ -0,0 +1,7 @@ + +DROP TABLE IF EXISTS t0; +CREATE TABLE t0 (c0 Int) ENGINE = Memory(); + +SELECT 1 FROM t0 JOIN t0 ON *; -- { serverError BAD_ARGUMENTS,INVALID_JOIN_ON_EXPRESSION } +SELECT 1 FROM t0 JOIN t0 ON (*,); -- { serverError AMBIGUOUS_COLUMN_NAME,ILLEGAL_TYPE_OF_ARGUMENT } +SELECT 1 FROM t0 JOIN t0 USING *; -- { serverError BAD_ARGUMENTS,UNSUPPORTED_JOIN_KEYS } diff --git a/parser/testdata/03246_json_subcolumn_correct_type/ast.json b/parser/testdata/03246_json_subcolumn_correct_type/ast.json new file mode 100644 index 000000000..30c401ad3 --- /dev/null +++ b/parser/testdata/03246_json_subcolumn_correct_type/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001309149, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03246_json_subcolumn_correct_type/metadata.json b/parser/testdata/03246_json_subcolumn_correct_type/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03246_json_subcolumn_correct_type/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03246_json_subcolumn_correct_type/query.sql b/parser/testdata/03246_json_subcolumn_correct_type/query.sql new file mode 100644 index 000000000..27b6a1968 --- /dev/null +++ b/parser/testdata/03246_json_subcolumn_correct_type/query.sql @@ -0,0 +1,9 @@ +set enable_json_type=1; +set enable_analyzer=1; +set allow_dynamic_type_in_join_keys=1; +drop table if exists test; +create table test (json JSON(max_dynamic_types=1)) engine=Memory; +insert into test values ('{"c0" : 1}'), ('{"c0" : 2}'); +select toTypeName(json.c0) from test; +SELECT 1 FROM (SELECT 1 AS c0) tx FULL OUTER JOIN test ON test.json.Float32 = tx.c0; +drop table test; diff --git a/parser/testdata/03246_json_tuple_decompress_race/ast.json b/parser/testdata/03246_json_tuple_decompress_race/ast.json new file mode 100644 index 000000000..86996f31c --- /dev/null +++ b/parser/testdata/03246_json_tuple_decompress_race/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001061474, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03246_json_tuple_decompress_race/metadata.json b/parser/testdata/03246_json_tuple_decompress_race/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03246_json_tuple_decompress_race/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03246_json_tuple_decompress_race/query.sql b/parser/testdata/03246_json_tuple_decompress_race/query.sql new file mode 100644 index 000000000..70d96d906 --- /dev/null +++ b/parser/testdata/03246_json_tuple_decompress_race/query.sql @@ -0,0 +1,32 @@ +SET enable_json_type = 1, type_json_skip_duplicated_paths = 1, allow_simdjson = 1; +DROP TABLE IF EXISTS t0; +DROP TABLE IF EXISTS t1; +CREATE TABLE t0 (c0 JSON, c1 Array(Nullable(Decimal))) ENGINE = Memory(); +CREATE TABLE t1 AS t0; +SET min_compress_block_size = 2654980, cross_join_min_rows_to_compress = 1, insert_null_as_default = 1, check_query_single_value_result = 1, partial_merge_join_optimizations = 1, use_uncompressed_cache = 0; +INSERT INTO t0 (c0, c1) VALUES ('{"c1":[{"c1":"Uw"}]}', [-5205545.0]), ('{"c1":{"c0":{"c2":["j^)7`{"]}}}', [-183841.0, 1232295168.0]), ('{"c2":["Dh X,r<"]}', [-518583.0, -563.0, -2442132402.0, -4991142492.0, -34823.0, -5.0, -6.0]), ('{"c2":"n 8k]wa_V"}', NULL), ('{"c2":{"c0":{"c1":-98}}}', NULL), ('{"c2":36}', [-89513.0, -9306968710.0, 6332325249.0, -48245723.0]), ('{"c2":{}}', [-10755380.0, 60433.0, 4047617.0, -98905.0, -993.0, NULL, 93055562.0, 917699.0, NULL]), ('{"c0":true,"c1":"g=n4"}', [752.0, 1.0, 96.0]), ('{"c3":false}', [NULL, 4039255.0, -88372248.0, 1256111069.0, 2383.0]); +INSERT INTO t1 (c0, c1) VALUES ('{"c2":["7AKy`Zh",[[-94]]],"c0":"&A:2ho","c3":[[{}],{"c2":257}]}', [-7451549.0, 633.0, 7009760932.0, NULL, NULL]), ('{"c2":65}', [-12226902.0]), ('{"c1":[{"c2":{}},"Tk26#E)"],"c3":"","c3":-608}', [-342709852.0, -6359192.0, -13.0, 403.0, 88126338.0, -31192275.0, -5.0, -4840.0, -82845285.0]); +INSERT INTO t0 (c0, c1) VALUES ('{"c0":[":+k,"]}', [-265468152.0, 96787546.0, -8980013521.0, 9164448735.0, -1447.0]), ('{"c0":[[{}]],"c0":{"c0":973,"c0":-345}}', [50.0, NULL, 805297790.0, 3038967.0, 7847438.0, 5716507241.0, 26414475.0]), ('{"c2":["7/jinz|"]}', NULL), ('{"c2":972}', [10507.0, -9616192.0, 595.0, -6102996.0, NULL, -17.0]); +INSERT INTO t1 (c0, c1) VALUES ('{"c3":{},"c2":[true,"US","a.;lxbvH1w"],"c3":{}}', [NULL, -5633149870.0, -1557199.0, -686626.0, NULL, 22726.0, 9771277778.0, -684868.0]), ('{"c3":{}}', [-502.0, 16260784.0, -584.0, 2265729.0, 1.0, 3768742.0]), ('{"c1":-570}', []), ('{"c1":[[312]],"c1":-27}', [60.0, 4.0, -3054.0, -216.0, 85786.0, -16407500.0, 1.0, -64565119.0]), ('{"c2":{"c1":"CM0tk"}}', [-3571176.0, -24519.0, 757451.0]), ('{"c1":767,"c3":421}', [-6116211.0, NULL, -77835774.0]), ('{"c0":805}', [5345543778.0, -6493852.0, 461884.0, -3158812.0, -3.0, 1.0]); +INSERT INTO t0 (c0, c1) VALUES ('{"c1":669,"c1":[{"c3":-101},{"c2":[443]}]}', [NULL, -50848.0, 9270050424.0, 439285082.0, 4991131460.0, 5324167069.0]), ('{"c1":{"c0":654}}', [18184400.0, 1165.0, -9067743190.0, 55008.0, 84573.0, 312777.0, -38.0, -180.0]), (NULL, [-51431.0, -205.0, 6391704.0, -3531393554.0, 4.0, -445378.0, 4499335205.0]), ('{"c2":-973}', [-13697135.0, -3232582571.0, 5063774471.0, -671011.0, 1882007.0, -94.0, -42828350.0, -9.0]), ('{"c0":[{}]}', []), ('{"c0":[961,"FE"],"c2":-74}', [-149626420.0]), ('{"c1":[-936,false]}', [-5436.0, -4267685.0, -9337344399.0, 90404.0, -24037337.0]), ('{"c1":[null,{}]}', [-50821332.0]), ('{"c0":"sC06!j0Y,W"}', [4834282.0, -863431.0, -535818460.0, 9592396.0]); +INSERT INTO t1 (c0, c1) VALUES ('{"c2":null,"c1":"bo^v6"}', [-1719.0, -16074.0, -3.0]), ('{"c0":{"c0":{}}}', [-3826.0, 2.0, 160017.0, 19500513.0, -8.0, -739458.0, NULL, 4420975388.0, -5230674.0]), ('{"c1":{"c2":326,"c3":{"c2":[-66]}}}', [-29.0, 742516.0, -6328.0, NULL, -1.0, 3.0, 877215.0]), ('{"c1":[{"c1":{}}]}', [168872177.0, 48258375.0, -6983476.0, -7633.0, 1.0]), ('{"c3":{"c0":306}}', [-64221197.0, NULL, NULL, -3753326.0, -10665.0]), ('{"c1":[";~R&R2Eb9o","|abDlI``-j"]}', [NULL, 8.0, 697608174.0, 323490017.0]), ('{"c2":[[["y(4:erKU/(",423],[null]],[false,[-137]],-282],"c0":937}', []), ('{"c0":{"c3":true}}', []); +INSERT INTO t1 (c0, c1) VALUES ('{"c2":{}}', [-702.0]), ('{"c2":true}', [8756332921.0, 1128192142.0]), ('{"c3":"iNGbzf","c1":{}}', [-442.0, 1439.0, -58.0, -6321.0, 9803746.0, -98.0]), ('{"c2":{"c0":[true,25],"c3":-887},"c3":true}', []), ('{"c3":[{"c3":[-568,true,""],"c1":{}},[{},{}],{"c2":-755}],"c0":{"c3":{"c3":null,"c3":{}},"c0":{"c3":638}}}', [2.0, 96001085.0]), ('{"c0":{}}', [524.0, NULL, -1252951.0, 1017260.0, -81620.0]), ('{"c3":{}}', [6.0]), ('{"c2":{"c0":{},"c0":{}}}', [-578.0, -6053615.0, -927647.0, 55.0, 29276.0]); +INSERT INTO t1 (c0, c1) VALUES ('{"c1":{"c2":{},"c0":{}},"c1":{"c0":{}}}', NULL), ('{"c2":{"c2":{"c2":[true,null],"c2":{}}}}', NULL), ('{"c0":[null],"c1":{}}', [58.0, 630440989.0, -64846.0, -7344.0, -220570.0, -2.0, -1.0]), ('{"c3":{}}', [-28.0, 113441645.0]), ('{"c0":null}', [-6.0, -80605.0]), ('{"c1":null}', [NULL, 68176530.0]); +INSERT INTO t1 (c0, c1) VALUES ('{"c2":"O(w1RrE","c3":598,"c1":{"c2":{"c2":-476,"c1":{}}}}', []), ('{"c2":{"c1":[{},{},-517],"c1":{"c3":{},"c0":-392,"c3":"A7_a"},"c0":"Lcuchjta"},"c0":-939}', [-592.0, -21901.0, -19.0, -268264638.0, 43.0, -4676673989.0, -9055.0, -44.0, -769.0]), ('{"c2":-500}', []), ('{"c3":["Yf-{*M,Z[b"],"c3":[[[false,true],-23],{"c1":{}}]}', [-4.0, -76.0, -1834.0, 116114.0]), ('{"c1":{"c3":{"c0":{}},"c2":{"c2":{},"c1":[-766,":o;o]B@b 5"],"c2":["[ZL@tVniT😂"]}},"c2":"G","c3":true}', [-424454555.0, 464554127.0, -271.0, -6767008.0]), ('{"c0":719}', [621.0, -640.0]), ('{"c3":"T4Wz"}', [-511.0, -1.0, -83925131.0, 264491.0, -1.0]), ('{"c1":[["i]6yMcs|cB",true],{}]}', [-6.0, 83219.0, -6735.0, 192018793.0, 1956.0, -9573927.0, 84699.0, 54263916.0, 631.0]), ('{"c3":{"c0":74}}', [1206189.0, -7592480392.0, -93161125.0, 817865670.0]); +INSERT INTO t1 (c0, c1) VALUES ('{"c3":[-546]}', [2.0, NULL, -1326235.0]), ('{"c3":672}', []), ('{"c0":{}}', [7.0, 59133.0, -56738201.0, -49026173.0, -81121.0]), ('{"c0":442}', [-8408.0, 691.0, -7.0, -253.0]), ('{"c3":{},"c1":{}}', [4931930.0, -7377.0, 158.0, 36156.0, 803877.0, NULL, NULL, 62.0, -9846.0]), (NULL, [-1758064.0, 4290.0, 4775.0, NULL, 22.0, -439.0]), ('{"c2":["",136],"c0":"ib"}', [2645931.0, -674758808.0, 5014674.0, 76.0, -1355423029.0, -7520419854.0, -6.0, 78413978.0, -4011.0]), ('{"c1":{}}', [NULL, -544875204.0]), ('{"c3":[[170]]}', [NULL, NULL, 73890.0]), ('{"c1":{}}', [183547.0, 93516376.0, 5.0, -720.0, -749201.0, 123727322.0, -65.0]); +INSERT INTO t0 (c0, c1) VALUES ('{"c1":"","c0":274}', NULL), ('{"c0":[":0pN9k*W"]}', [60.0, 25.0, 6.0, 9520.0, 90466666.0, -3.0]); +INSERT INTO t0 (c0, c1) VALUES ('{"c0":{"c3":{"c1":[null]}},"c2":{"c0":92}}', NULL), ('{"c2":{"c2":[true]}}', [NULL, -95331.0, NULL, 1308199.0, NULL]), ('{"c1":[[{}]]}', [276463640.0, 718712799.0, -50123.0, -12043.0, NULL]); +INSERT INTO t0 (c0, c1) VALUES ('{"c3":{"c3":[false]}}', [-260.0, NULL, -1.0, -40.0]), (NULL, []), ('{"c3":{"c3":[{}]}}', [-3117135934.0, 173.0]), ('{"c1":{"c3":[[-112]],"c1":"%nI"}}', [-2510.0]), ('{"c3":{}}', [-638201656.0, NULL, 18.0, 56925070.0, -6815.0, -869.0, -36617736.0]), ('{"c2":["X"]}', NULL), ('{"c0":{},"c3":[null,"*3QZc8",true]}', [-1.0, 84.0, -819479844.0]), ('{"c2":{"c2":true}}', [-107.0, NULL]), ('{"c3":true}', [-278665.0, 116.0, 18.0, 31965.0, 5711148.0, -8234.0, NULL, -19369679.0]); +INSERT INTO t1 (c0, c1) VALUES ('{"c2":[-225]}', [1.0, 1510841132.0, -12.0, 1307.0, -4483.0, 55.0, 9549.0]), ('{"c1":[{"c0":-728}],"c2":{"c2":[-958,{}],"c3":[true]}}', [-4053.0, -876356.0, NULL]), ('{"c3":[[[24]]],"c0":{"c1":401,"c2":{"c3":[483,null],"c1":-83},"c1":{"c1":-203}},"c3":-680}', [NULL]), ('{"c0":{}}', []), ('{"c2":{}}', [-1707859140.0, -5.0]); +INSERT INTO t0 (c0, c1) VALUES ('{"c1":false,"c1":-360,"c3":-739}', [-3868.0, 548174539.0, 78824.0, NULL, 964751.0]), ('{"c3":{"c1":-751}}', [3225.0, -333274171.0]), ('{"c3":[{"c3":false},[{}]],"c1":{"c1":{"c0":{},"c0":{},"c3":true},"c2":{"c1":{}},"c0":16},"c3":{"c1":[{},577,{}],"c0":{"c1":false,"c2":{},"c3":{}},"c1":{"c0":{},"c1":{},"c0":"Q9}f*"}}}', [-146.0, NULL, -1984141.0, -5535507413.0]), ('{"c1":[[["CYa"]]],"c1":[[892]],"c1":{"c0":null}}', []), ('{"c1":["__C`X ;Oy4"]}', [68158746.0, -173.0, 12.0, -5.0, -8881621.0, 1822742.0, 752262442.0, -97340.0]), ('{"c3":935,"c2":[-999]}', [-212414562.0]), ('{"c1":false}', []); +INSERT INTO t0 (c0, c1) VALUES ('{"c3":"w=v%C"}', [-133239.0, 41893484.0]), ('{"c0":"?D.B#["}', [-660565014.0, -3.0, 1778026873.0, -12892.0, 37295.0, -8.0, -4049.0]); +INSERT INTO t1 (c0, c1) VALUES ('{"c0":96}', [-9486418055.0, 1.0, -19153.0, -3330.0]), ('{"c1":{"c1":""}}', [-120.0]), ('{"c3":{"c2":[false,"sT"],"c2":{}},"c2":{"c0":[null],"c2":["t0-}.Dm",119]}}', [4008130576.0, -6381371.0, 660095684.0, -892497.0, -76.0, -811584704.0, NULL, 16359874.0, -315983.0]), ('{"c3":{"c1":{"c0":{}}}}', [-1086.0]), ('{"c2":{"c3":"f"}}', [3064910.0]), (NULL, [-51357.0, 8319955.0]), ('{"c1":false}', [NULL, 12020.0, 44851173.0, 89.0]), ('{"c3":[false]}', [4.0, -361122.0]), ('{"c2":[[975]],"c1":[505]}', [-833.0]), ('{"c0":{"c3":["e~"]}}', []); +INSERT INTO t0 (c0, c1) VALUES ('{"c0":{}}', [-2664277.0, NULL]), ('{"c3":{}}', [490623582.0, 2.0, -77004.0, -1101.0, -1573.0, 5.0]), ('{"c2":"!Xs.wZ{>^B"}', [8571380046.0, -27.0, 1.0, -29.0, -45787234.0]), ('{"c3":{}}', NULL), ('{"c3":[[{}],"n<>M9w9"],"c3":[[[-565]],true]}', [-4037092.0, -27.0, NULL, 8364633.0, 120211.0, -800861.0, -3.0, 656171602.0, 1480.0]); +INSERT INTO t1 (c0, c1) VALUES ('{"c2":"O<D#Igat","c2":[{},{}]}', [1559.0, NULL, -19.0, -39806978.0, NULL, -1.0]), ('{"c1":{"c1":"2DOO+@/x","c3":-715},"c2":[414,{}]}', [8889219.0, 164896.0, 9.0]), ('{"c0":[-164,true]}', [56343624.0, -22.0, -1587835977.0, -886080615.0, -491768.0]), ('{"c2":[441,-104],"c2":[-326]}', [NULL, 58478.0, 10412256.0, -87126654.0, 4.0]), ('{"c1":[{"c2":[true]},[{}]],"c1":[{"c0":[true]}]}', [8737658.0]), ('{"c0":["&}9)zR",699]}', [-177497.0, -9588529.0, -5494693.0, -605197275.0]); +INSERT INTO t1 (c0, c1) VALUES ('{"c0":[[[false]]]}', [1093795.0, -9.0]), ('{"c0":[["Tbk{JjZbH"],[-448]],"c1":[[-173,"Ub","1smY"],[true,false],-928]}', [79264.0, -5942.0, 6489268644.0]); +INSERT INTO t1 (c0, c1) VALUES ('{"c3":[null,486]}', [6.0, -94703.0, 5934259709.0, 2787468781.0]), ('{"c3":"V@zzO0","c0":[true,[[-13]]]}', NULL), ('{"c3":948}', [NULL, 290646705.0, -9509830440.0, -8.0, 398009.0, -778839.0, NULL, NULL]); +INSERT INTO t1 (c0, c1) VALUES ('{"c3":[[[true]]]}', [13.0, -137995.0, 178221101.0, 7831579.0, -71.0, 5717989341.0]), ('{"c1":[[["Rg?sQ`"]]]}', [6385881116.0]), ('{"c3":{"c3":-855,"c1":{"c3":[-636,"dTB7cTRW"],"c0":{},"c0":[349,""]},"c1":[[-474],{},"sD_N:$,f"]},"c1":{"c3":null,"c0":{"c0":null,"c2":{},"c2":";jq;&0"}}}', [91556.0, 3.0, 98551853.0, -70963.0, -9.0, -972.0]), ('{"c2":[{"c1":{},"c3":"BESfLf"},{"c2":"E[]O"}]}', []), ('{"c2":[-214]}', [886886482.0, -147518.0]), ('{"c0":[315]}', [-16044.0]), ('{"c0":112}', [2040.0]), ('{"c3":{"c2":"n`y)6f"}}', [-8.0, -9436780.0]); +INSERT INTO t1 (c0, c1) VALUES ('{"c0":[572]}', [-88.0, 418510.0, 634661.0, -21968036.0, 4679032593.0, 24249254.0, -65293468.0, 5844420.0]), ('{"c2":[{}]}', [-45461.0, -10839002.0, NULL, 79.0, -598357.0, -962.0]), ('{"c2":{"c1":919,"c0":{}}}', [96561.0, 4925250.0, 972.0, -900104.0, -753.0, NULL, 5880.0]), ('{"c0":[-978],"c2":{}}', [-49801790.0, -4729553.0]), (NULL, [NULL, 21.0, 235044.0, -193.0, -4871144646.0, 52.0, 69832.0, -38731.0, NULL]), ('{"c0":{}}', [-37.0]), ('{"c2":"e(b^QdQA","c0":{"c2":[[false]]}}', [817.0, -915.0, -70628.0, 81.0, -339.0, 631594.0, NULL, -984.0, 818940950.0]), ('{"c0":true,"c3":{"c1":null},"c2":[767,{}]}', [8708779778.0, -130783.0]), ('{"c2":-131}', [-87997.0, -27054.0, 187.0, NULL, 807390381.0, NULL, 18.0, -163140.0]), ('{"c1":[[42]]}', [-266584.0, -893661.0, 1869508617.0, 7.0, 531402010.0, -9164798497.0, -78.0, 9727950289.0]); +SELECT 1 FROM t1 CROSS JOIN t0 FORMAT Null; +DROP TABLE t1; +DROP TABLE t0; diff --git a/parser/testdata/03246_range_literal_replacement_works/ast.json b/parser/testdata/03246_range_literal_replacement_works/ast.json new file mode 100644 index 000000000..2c4d62f5b --- /dev/null +++ b/parser/testdata/03246_range_literal_replacement_works/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001202616, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03246_range_literal_replacement_works/metadata.json b/parser/testdata/03246_range_literal_replacement_works/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03246_range_literal_replacement_works/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03246_range_literal_replacement_works/query.sql b/parser/testdata/03246_range_literal_replacement_works/query.sql new file mode 100644 index 000000000..57bd369e6 --- /dev/null +++ b/parser/testdata/03246_range_literal_replacement_works/query.sql @@ -0,0 +1,10 @@ +SET input_format_values_interpret_expressions = 0; +SET input_format_values_accurate_types_of_literals = 0; + +CREATE TABLE IF NOT EXISTS 03246_range_literal_replacement_works (id UInt8) Engine=Memory; + +INSERT INTO 03246_range_literal_replacement_works VALUES (1 BETWEEN 0 AND 2); + +SELECT * FROM 03246_range_literal_replacement_works; + +DROP TABLE IF EXISTS 03246_range_literal_replacement_works; diff --git a/parser/testdata/03246_toStartOfInterval_date_timezone_bug/ast.json b/parser/testdata/03246_toStartOfInterval_date_timezone_bug/ast.json new file mode 100644 index 000000000..d1ebb9ae0 --- /dev/null +++ b/parser/testdata/03246_toStartOfInterval_date_timezone_bug/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001191103, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03246_toStartOfInterval_date_timezone_bug/metadata.json b/parser/testdata/03246_toStartOfInterval_date_timezone_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03246_toStartOfInterval_date_timezone_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03246_toStartOfInterval_date_timezone_bug/query.sql b/parser/testdata/03246_toStartOfInterval_date_timezone_bug/query.sql new file mode 100644 index 000000000..0056877ab --- /dev/null +++ b/parser/testdata/03246_toStartOfInterval_date_timezone_bug/query.sql @@ -0,0 +1,3 @@ +SET session_timezone = 'Europe/Amsterdam'; + +SELECT toStartOfInterval(CAST('2024-10-26', 'Date'), toIntervalMonth(1), CAST('2023-01-15', 'Date')); diff --git a/parser/testdata/03247_generic_arrayMin_arrayMax_fixes/ast.json b/parser/testdata/03247_generic_arrayMin_arrayMax_fixes/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03247_generic_arrayMin_arrayMax_fixes/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03247_generic_arrayMin_arrayMax_fixes/metadata.json b/parser/testdata/03247_generic_arrayMin_arrayMax_fixes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03247_generic_arrayMin_arrayMax_fixes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03247_generic_arrayMin_arrayMax_fixes/query.sql b/parser/testdata/03247_generic_arrayMin_arrayMax_fixes/query.sql new file mode 100644 index 000000000..2bdc9b2ae --- /dev/null +++ b/parser/testdata/03247_generic_arrayMin_arrayMax_fixes/query.sql @@ -0,0 +1,26 @@ +-- { echoOn } +-- https://github.com/ClickHouse/ClickHouse/issues/68895 +SELECT arrayMax(x -> toFixedString('.', 1), []); + +-- https://github.com/ClickHouse/ClickHouse/issues/69600 +SELECT arrayMax(x -> (-x), [1, 2, 4]) AS res; +SELECT arrayMax(x -> toUInt16(-x), [1, 2, 4]) AS res; + +-- https://github.com/ClickHouse/ClickHouse/pull/69640 +SELECT arrayMin(x1 -> (x1 * toNullable(-1)), materialize([1, 2, 3])); +SELECT arrayMin(x1 -> x1 * -1, [1,2,3]); + +DROP TABLE IF EXISTS test_aggregation_array; +CREATE TABLE test_aggregation_array (x Array(Int)) ENGINE=MergeTree() ORDER by tuple(); +INSERT INTO test_aggregation_array VALUES ([1,2,3,4,5,6]), ([]), ([1,2,3]); + +SELECT [arrayMin(x1 -> (x1 * materialize(-1)), [toNullable(toUInt256(0)), materialize(4)])], arrayMin([arrayMin([0])]) FROM test_aggregation_array GROUP BY arrayAvg([1]), [0, toUInt256(8)] WITH CUBE SETTINGS enable_analyzer = 1; +SELECT [arrayMin([3, arrayMin([toUInt128(8)]), 4, 5]), arrayMax([materialize(1)]), arrayMin([arrayMax([1]), 2]), 2], arrayMin([0, toLowCardinality(8)]), 2, arrayMax(x1 -> (x1 * -1), x) FROM test_aggregation_array; + +select arrayMax(x -> x.1, [(1, 'a'), (0, 'b')]); +select arrayMin(x -> x.2, [(1, 'a'), (0, 'b')]); + +-- Extra validation of generic arrayMin/arrayMax +WITH [(1,2),(1,3)] AS t SELECT arrayMin(t), arrayMax(t); +WITH [map('a', 1, 'b', 2), map('a',1,'b',3)] AS t SELECT arrayMin(t), arrayMax(t); +WITH [map('a', 1, 'b', 2, 'c', 10), map('a',1,'b',3, 'c', 0)] AS t SELECT arrayMin(x -> x['c'], t), arrayMax(x -> x['c'], t); diff --git a/parser/testdata/03247_json_extract_lc_nullable/ast.json b/parser/testdata/03247_json_extract_lc_nullable/ast.json new file mode 100644 index 000000000..ad93d2d12 --- /dev/null +++ b/parser/testdata/03247_json_extract_lc_nullable/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function JSONExtract (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal '{\"a\" : 128}'" + }, + { + "explain": " Literal 'a'" + }, + { + "explain": " Literal 'LowCardinality(Nullable(Int128))'" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001265259, + "rows_read": 9, + "bytes_read": 350 + } +} diff --git a/parser/testdata/03247_json_extract_lc_nullable/metadata.json b/parser/testdata/03247_json_extract_lc_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03247_json_extract_lc_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03247_json_extract_lc_nullable/query.sql b/parser/testdata/03247_json_extract_lc_nullable/query.sql new file mode 100644 index 000000000..bac1e34c1 --- /dev/null +++ b/parser/testdata/03247_json_extract_lc_nullable/query.sql @@ -0,0 +1,2 @@ +select JSONExtract('{"a" : 128}', 'a', 'LowCardinality(Nullable(Int128))'); + diff --git a/parser/testdata/03247_materialized_view_select_intersect/ast.json b/parser/testdata/03247_materialized_view_select_intersect/ast.json new file mode 100644 index 000000000..62e5e7fc3 --- /dev/null +++ b/parser/testdata/03247_materialized_view_select_intersect/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery v0 (children 2)" + }, + { + "explain": " Identifier v0" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectIntersectExceptQuery (children 2)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.00099682, + "rows_read": 15, + "bytes_read": 580 + } +} diff --git a/parser/testdata/03247_materialized_view_select_intersect/metadata.json b/parser/testdata/03247_materialized_view_select_intersect/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03247_materialized_view_select_intersect/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03247_materialized_view_select_intersect/query.sql b/parser/testdata/03247_materialized_view_select_intersect/query.sql new file mode 100644 index 000000000..72efac0ce --- /dev/null +++ b/parser/testdata/03247_materialized_view_select_intersect/query.sql @@ -0,0 +1 @@ +CREATE MATERIALIZED VIEW v0 AS (SELECT 1) INTERSECT (SELECT 1); --{serverError QUERY_IS_NOT_SUPPORTED_IN_MATERIALIZED_VIEW} diff --git a/parser/testdata/03247_object_column_copy/ast.json b/parser/testdata/03247_object_column_copy/ast.json new file mode 100644 index 000000000..56f641d94 --- /dev/null +++ b/parser/testdata/03247_object_column_copy/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001229942, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03247_object_column_copy/metadata.json b/parser/testdata/03247_object_column_copy/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03247_object_column_copy/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03247_object_column_copy/query.sql b/parser/testdata/03247_object_column_copy/query.sql new file mode 100644 index 000000000..d254b5c55 --- /dev/null +++ b/parser/testdata/03247_object_column_copy/query.sql @@ -0,0 +1,8 @@ +SET enable_json_type = 1; +SET allow_experimental_variant_type = 1; +DROP TABLE IF EXISTS t0; +CREATE TABLE t0 (c0 Int) ENGINE = Memory(); +INSERT INTO t0 (c0) VALUES (1); +ALTER TABLE t0 (ADD COLUMN c1 JSON(c1 Variant(Int,JSON(c1 Int)))); +INSERT INTO t0 (c0, c1) VALUES (2, '{"c1":1}'::JSON(c1 Int)); +DROP TABLE t0; diff --git a/parser/testdata/03247_pr_local_plan_non_constant_in_source/ast.json b/parser/testdata/03247_pr_local_plan_non_constant_in_source/ast.json new file mode 100644 index 000000000..5c49b72e8 --- /dev/null +++ b/parser/testdata/03247_pr_local_plan_non_constant_in_source/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery table_3 (children 1)" + }, + { + "explain": " Identifier table_3" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001304357, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/03247_pr_local_plan_non_constant_in_source/metadata.json b/parser/testdata/03247_pr_local_plan_non_constant_in_source/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03247_pr_local_plan_non_constant_in_source/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03247_pr_local_plan_non_constant_in_source/query.sql b/parser/testdata/03247_pr_local_plan_non_constant_in_source/query.sql new file mode 100644 index 000000000..bf0f18caf --- /dev/null +++ b/parser/testdata/03247_pr_local_plan_non_constant_in_source/query.sql @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS table_3 SYNC; + +CREATE TABLE table_3 (uid UUID, date DateTime('Asia/Kamchatka')) ENGINE = ReplicatedMergeTree('/pr_local_plan/{database}/table_3', 'r1') ORDER BY date; + +INSERT INTO table_3 VALUES ('4c36abda-8bd8-11eb-8204-005056aa8bf6', '2021-03-24 01:04:27'), ('4c408902-8bd8-11eb-8204-005056aa8bf6', '2021-03-24 01:04:27'), ('4c5bf20a-8bd8-11eb-8204-005056aa8bf6', '2021-03-24 01:04:27'), ('4c61623a-8bd8-11eb-8204-005056aa8bf6', '2021-03-24 01:04:27'), ('4c6efab2-8bd8-11eb-a952-005056aa8bf6', '2021-03-24 01:04:27'); + +SELECT + uid, + date, + toDate(date) = toDate('2021-03-24') AS res +FROM table_3 +WHERE res = 1 +ORDER BY + uid ASC, + date ASC +SETTINGS enable_parallel_replicas = 1, max_parallel_replicas = 3, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_local_plan = 1; + +DROP TABLE table_3 SYNC; diff --git a/parser/testdata/03248_invalid_where/ast.json b/parser/testdata/03248_invalid_where/ast.json new file mode 100644 index 000000000..c95f713dd --- /dev/null +++ b/parser/testdata/03248_invalid_where/ast.json @@ -0,0 +1,91 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function lambda (alias lambda_1) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function toString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function arrayMap (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier lambda_1 (alias lambda_2)" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_2, UInt64_3]" + }, + { + "explain": " Function arrayMap (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier lambda_2" + }, + { + "explain": " Literal Array_['1', '2', '3']" + }, + { + "explain": " Identifier lambda_2" + }, + { + "explain": " Set" + } + ], + + "rows": 23, + + "statistics": + { + "elapsed": 0.001413503, + "rows_read": 23, + "bytes_read": 901 + } +} diff --git a/parser/testdata/03248_invalid_where/metadata.json b/parser/testdata/03248_invalid_where/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03248_invalid_where/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03248_invalid_where/query.sql b/parser/testdata/03248_invalid_where/query.sql new file mode 100644 index 000000000..65fcd2fe4 --- /dev/null +++ b/parser/testdata/03248_invalid_where/query.sql @@ -0,0 +1,2 @@ +WITH x -> toString(x) AS lambda_1 SELECT arrayMap(lambda_1 AS lambda_2, [1, 2, 3]), arrayMap(lambda_2, ['1', '2', '3']) WHERE lambda_2 SETTINGS enable_analyzer = 0; -- { serverError UNEXPECTED_EXPRESSION } +WITH x -> toString(x) AS lambda_1 SELECT arrayMap(lambda_1 AS lambda_2, [1, 2, 3]), arrayMap(lambda_2, ['1', '2', '3']) WHERE lambda_2 SETTINGS enable_analyzer = 1; -- { serverError UNKNOWN_IDENTIFIER } diff --git a/parser/testdata/03248_max_parts_to_move/ast.json b/parser/testdata/03248_max_parts_to_move/ast.json new file mode 100644 index 000000000..1914b66c7 --- /dev/null +++ b/parser/testdata/03248_max_parts_to_move/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001092718, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/03248_max_parts_to_move/metadata.json b/parser/testdata/03248_max_parts_to_move/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03248_max_parts_to_move/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03248_max_parts_to_move/query.sql b/parser/testdata/03248_max_parts_to_move/query.sql new file mode 100644 index 000000000..aed41021e --- /dev/null +++ b/parser/testdata/03248_max_parts_to_move/query.sql @@ -0,0 +1,23 @@ +DROP TABLE IF EXISTS t; +DROP TABLE IF EXISTS t2; + +CREATE TABLE t (x Int32) ENGINE = MergeTree ORDER BY x; +CREATE TABLE t2 (x Int32) ENGINE = MergeTree ORDER BY x; + +SYSTEM STOP MERGES t; + +SET max_insert_block_size = 1; +SET min_insert_block_size_rows = 1; +SET max_block_size = 1; + +SET max_parts_to_move = 5; +INSERT INTO t SELECT number from numbers(10); + +ALTER TABLE t MOVE PARTITION tuple() TO TABLE t2; -- { serverError TOO_MANY_PARTS } + +SET max_parts_to_move = 15; + +ALTER TABLE t MOVE PARTITION tuple() TO TABLE t2; + +DROP TABLE IF EXISTS t; +DROP TABLE IF EXISTS t2; diff --git a/parser/testdata/03248_with_fill_string_crash/ast.json b/parser/testdata/03248_with_fill_string_crash/ast.json new file mode 100644 index 000000000..f8b14b2a7 --- /dev/null +++ b/parser/testdata/03248_with_fill_string_crash/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery users (children 3)" + }, + { + "explain": " Identifier users" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " ColumnDeclaration date (children 1)" + }, + { + "explain": " DataType DateTime" + }, + { + "explain": " ColumnDeclaration name (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " ColumnDeclaration age (children 1)" + }, + { + "explain": " DataType Int16" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Identifier date" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001203822, + "rows_read": 14, + "bytes_read": 490 + } +} diff --git a/parser/testdata/03248_with_fill_string_crash/metadata.json b/parser/testdata/03248_with_fill_string_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03248_with_fill_string_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03248_with_fill_string_crash/query.sql b/parser/testdata/03248_with_fill_string_crash/query.sql new file mode 100644 index 000000000..ba00640f8 --- /dev/null +++ b/parser/testdata/03248_with_fill_string_crash/query.sql @@ -0,0 +1,7 @@ +CREATE TABLE users (date DateTime, name String, age Int16) ENGINE=MergeTree() ORDER BY date; + +INSERT INTO users VALUES ('2024-01-01', 'John', 33), + ('2024-02-01', 'Ksenia', 48), + ('2024-02-15', 'Alice', 50); + +SELECT * FROM users ORDER BY date WITH FILL TO '2024-02-17' STEP toIntervalHour(1); -- { serverError INVALID_WITH_FILL_EXPRESSION } diff --git a/parser/testdata/03248_with_insert/ast.json b/parser/testdata/03248_with_insert/ast.json new file mode 100644 index 000000000..186bc0d45 --- /dev/null +++ b/parser/testdata/03248_with_insert/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery x (children 1)" + }, + { + "explain": " Identifier x" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001306538, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/03248_with_insert/metadata.json b/parser/testdata/03248_with_insert/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03248_with_insert/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03248_with_insert/query.sql b/parser/testdata/03248_with_insert/query.sql new file mode 100644 index 000000000..4f2a26e0e --- /dev/null +++ b/parser/testdata/03248_with_insert/query.sql @@ -0,0 +1,49 @@ +DROP TABLE IF EXISTS x; + +CREATE TABLE x ENGINE = Log AS SELECT * FROM numbers(0); + +SYSTEM STOP MERGES x; + +WITH y AS + ( + SELECT * + FROM numbers(10) + ) +INSERT INTO x +SELECT * +FROM y +INTERSECT +SELECT * +FROM numbers(5); + +WITH y AS + ( + SELECT * + FROM numbers(10) + ) +INSERT INTO x +SELECT * +FROM numbers(5) +INTERSECT +SELECT * +FROM y; + +SELECT * FROM x; + +DROP TABLE x; + +CREATE TABLE x (d date) ENGINE = Log; + +WITH y AS + ( + SELECT + number, + date_add(YEAR, number, toDate('2025-01-01')) AS new_date + FROM numbers(10) + ) +INSERT INTO x +SELECT y.new_date FROM y; + +SELECT * FROM x; + +DROP TABLE x; \ No newline at end of file diff --git a/parser/testdata/03249_dynamic_alter_consistency/ast.json b/parser/testdata/03249_dynamic_alter_consistency/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03249_dynamic_alter_consistency/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03249_dynamic_alter_consistency/metadata.json b/parser/testdata/03249_dynamic_alter_consistency/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03249_dynamic_alter_consistency/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03249_dynamic_alter_consistency/query.sql b/parser/testdata/03249_dynamic_alter_consistency/query.sql new file mode 100644 index 000000000..6cc314a54 --- /dev/null +++ b/parser/testdata/03249_dynamic_alter_consistency/query.sql @@ -0,0 +1,11 @@ +-- Random settings limits: index_granularity=(100, None) + +set allow_experimental_dynamic_type=1; + +drop table if exists test; +create table test (d Dynamic) engine=MergeTree order by tuple() settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1; +insert into test select number < 600000 ? number::Dynamic : ('str_' || number)::Dynamic from numbers(1000000); +alter table test modify column d Dynamic(max_types=1); +select count(), dynamicType(d), isDynamicElementInSharedData(d) from test group by dynamicType(d), isDynamicElementInSharedData(d) order by count(); +drop table test; + diff --git a/parser/testdata/03250_avoid_prefetch_empty_parts/ast.json b/parser/testdata/03250_avoid_prefetch_empty_parts/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03250_avoid_prefetch_empty_parts/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03250_avoid_prefetch_empty_parts/metadata.json b/parser/testdata/03250_avoid_prefetch_empty_parts/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03250_avoid_prefetch_empty_parts/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03250_avoid_prefetch_empty_parts/query.sql b/parser/testdata/03250_avoid_prefetch_empty_parts/query.sql new file mode 100644 index 000000000..f2e0f6303 --- /dev/null +++ b/parser/testdata/03250_avoid_prefetch_empty_parts/query.sql @@ -0,0 +1,15 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS 03250_avoid_prefetch; +CREATE table 03250_avoid_prefetch(id UInt64, string LowCardinality(String)) +ENGINE = MergeTree +ORDER BY id +SETTINGS enable_vertical_merge_algorithm = 1, vertical_merge_algorithm_min_rows_to_activate = 1, vertical_merge_remote_filesystem_prefetch = 1, +vertical_merge_algorithm_min_bytes_to_activate = 1, vertical_merge_algorithm_min_columns_to_activate = 1, +min_rows_for_wide_part = 1, min_bytes_for_wide_part = 1, remove_empty_parts = 0, storage_policy = 's3_no_cache'; + +INSERT INTO 03250_avoid_prefetch VALUES (1, 'test'); +ALTER TABLE 03250_avoid_prefetch DELETE WHERE id = 1; +INSERT INTO 03250_avoid_prefetch VALUES (2, 'test'); +OPTIMIZE TABLE 03250_avoid_prefetch FINAL; + diff --git a/parser/testdata/03250_ephemeral_comment/ast.json b/parser/testdata/03250_ephemeral_comment/ast.json new file mode 100644 index 000000000..21912d5af --- /dev/null +++ b/parser/testdata/03250_ephemeral_comment/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000998089, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/03250_ephemeral_comment/metadata.json b/parser/testdata/03250_ephemeral_comment/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03250_ephemeral_comment/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03250_ephemeral_comment/query.sql b/parser/testdata/03250_ephemeral_comment/query.sql new file mode 100644 index 000000000..906417b9c --- /dev/null +++ b/parser/testdata/03250_ephemeral_comment/query.sql @@ -0,0 +1,11 @@ +drop table if exists test; +CREATE TABLE test ( + `start_s` UInt32 EPHEMERAL COMMENT 'start UNIX time' , + `start_us` UInt16 EPHEMERAL COMMENT 'start microseconds', + `finish_s` UInt32 EPHEMERAL COMMENT 'finish UNIX time', + `finish_us` UInt16 EPHEMERAL COMMENT 'finish microseconds', + `captured` DateTime MATERIALIZED fromUnixTimestamp(start_s), + `duration` Decimal32(6) MATERIALIZED finish_s - start_s + (finish_us - start_us)/1000000 +) +ENGINE Null; +drop table if exists test; diff --git a/parser/testdata/03250_json_group_by_sub_object_subcolumn/ast.json b/parser/testdata/03250_json_group_by_sub_object_subcolumn/ast.json new file mode 100644 index 000000000..6edf357a2 --- /dev/null +++ b/parser/testdata/03250_json_group_by_sub_object_subcolumn/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001357498, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03250_json_group_by_sub_object_subcolumn/metadata.json b/parser/testdata/03250_json_group_by_sub_object_subcolumn/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03250_json_group_by_sub_object_subcolumn/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03250_json_group_by_sub_object_subcolumn/query.sql b/parser/testdata/03250_json_group_by_sub_object_subcolumn/query.sql new file mode 100644 index 000000000..bf4bf5b7e --- /dev/null +++ b/parser/testdata/03250_json_group_by_sub_object_subcolumn/query.sql @@ -0,0 +1,9 @@ +SET enable_json_type = 1; +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; + +drop table if exists test; +create table test (json JSON(max_dynamic_paths = 20, `a.b.c` UInt32)) engine = Memory; +insert into test select toJSONString(map('a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number))) from numbers(2); +select json.^a from test group by json.^a order by toString(json.^a); +drop table test; diff --git a/parser/testdata/03251_unaligned_window_function_state/ast.json b/parser/testdata/03251_unaligned_window_function_state/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03251_unaligned_window_function_state/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03251_unaligned_window_function_state/metadata.json b/parser/testdata/03251_unaligned_window_function_state/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03251_unaligned_window_function_state/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03251_unaligned_window_function_state/query.sql b/parser/testdata/03251_unaligned_window_function_state/query.sql new file mode 100644 index 000000000..2ff75f61f --- /dev/null +++ b/parser/testdata/03251_unaligned_window_function_state/query.sql @@ -0,0 +1,3 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/70569 +-- Reproduces UBSAN alert about misaligned address +SELECT anyLast(id), anyLast(time), exponentialTimeDecayedAvg(10)(id, time) FROM values('id Int8, time DateTime', (1,1),(1,2),(2,3),(3,3),(3,5)); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/03252_check_number_of_arguments_for_dynamic/ast.json b/parser/testdata/03252_check_number_of_arguments_for_dynamic/ast.json new file mode 100644 index 000000000..4330155c8 --- /dev/null +++ b/parser/testdata/03252_check_number_of_arguments_for_dynamic/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001235675, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03252_check_number_of_arguments_for_dynamic/metadata.json b/parser/testdata/03252_check_number_of_arguments_for_dynamic/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03252_check_number_of_arguments_for_dynamic/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03252_check_number_of_arguments_for_dynamic/query.sql b/parser/testdata/03252_check_number_of_arguments_for_dynamic/query.sql new file mode 100644 index 000000000..aefbb3dc5 --- /dev/null +++ b/parser/testdata/03252_check_number_of_arguments_for_dynamic/query.sql @@ -0,0 +1,18 @@ +set enable_analyzer=1; +set enable_json_type=1; + +CREATE TABLE t +( + `a` JSON +) +ENGINE = MergeTree() +ORDER BY tuple(); + +insert into t values ('{"a":1}'), ('{"a":2.0}'); + +SELECT 1 +FROM +( + SELECT 1 AS c0 +) AS tx +FULL OUTER JOIN t AS t2 ON equals(t2.a.Float32); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } diff --git a/parser/testdata/03252_fill_missed_arrays/ast.json b/parser/testdata/03252_fill_missed_arrays/ast.json new file mode 100644 index 000000000..05d4be103 --- /dev/null +++ b/parser/testdata/03252_fill_missed_arrays/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_fill_arrays (children 1)" + }, + { + "explain": " Identifier t_fill_arrays" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001504751, + "rows_read": 2, + "bytes_read": 78 + } +} diff --git a/parser/testdata/03252_fill_missed_arrays/metadata.json b/parser/testdata/03252_fill_missed_arrays/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03252_fill_missed_arrays/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03252_fill_missed_arrays/query.sql b/parser/testdata/03252_fill_missed_arrays/query.sql new file mode 100644 index 000000000..585cd3706 --- /dev/null +++ b/parser/testdata/03252_fill_missed_arrays/query.sql @@ -0,0 +1,22 @@ +DROP TABLE IF EXISTS t_fill_arrays; + +CREATE TABLE t_fill_arrays +( + `id` String, + `mapCol` Map(String, Array(String)), +) +ENGINE = MergeTree +ORDER BY id +SETTINGS vertical_merge_algorithm_min_rows_to_activate = 1, vertical_merge_algorithm_min_columns_to_activate = 1, min_bytes_for_full_part_storage = 0; + +INSERT INTO t_fill_arrays (id) SELECT hex(number) FROM numbers(10000); + +ALTER TABLE t_fill_arrays ADD COLUMN arrCol Array(String) DEFAULT []; + +INSERT INTO t_fill_arrays (id) SELECT hex(number) FROM numbers(10000); + +SELECT count() FROM t_fill_arrays WHERE NOT ignore(arrCol, mapCol.values); + +OPTIMIZE TABLE t_fill_arrays FINAL; + +DROP TABLE t_fill_arrays; diff --git a/parser/testdata/03252_merge_tree_min_bytes_to_seek/ast.json b/parser/testdata/03252_merge_tree_min_bytes_to_seek/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03252_merge_tree_min_bytes_to_seek/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03252_merge_tree_min_bytes_to_seek/metadata.json b/parser/testdata/03252_merge_tree_min_bytes_to_seek/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03252_merge_tree_min_bytes_to_seek/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03252_merge_tree_min_bytes_to_seek/query.sql b/parser/testdata/03252_merge_tree_min_bytes_to_seek/query.sql new file mode 100644 index 000000000..2dcf0f54b --- /dev/null +++ b/parser/testdata/03252_merge_tree_min_bytes_to_seek/query.sql @@ -0,0 +1,26 @@ + +-- Disable query condition cache because it affects the `SelectedRanges` metric. +SET use_query_condition_cache = 0; + +DROP TABLE IF EXISTS t_min_bytes_to_seek; + +CREATE TABLE t_min_bytes_to_seek (id UInt64) +ENGINE = MergeTree +ORDER BY id SETTINGS index_granularity = 128, index_granularity_bytes = '1M'; + +INSERT INTO t_min_bytes_to_seek SELECT number FROM numbers(10000); + +SELECT count() FROM t_min_bytes_to_seek WHERE id IN (10, 1000, 5000, 9000) SETTINGS merge_tree_min_bytes_for_seek = 0; +SELECT count() FROM t_min_bytes_to_seek WHERE id IN (10, 1000, 5000, 9000) SETTINGS merge_tree_min_bytes_for_seek = 1000000000; + +SELECT count() FROM t_min_bytes_to_seek WHERE id IN (10, 1000, 5000, 9000) SETTINGS merge_tree_min_rows_for_seek = 0; +SELECT count() FROM t_min_bytes_to_seek WHERE id IN (10, 1000, 5000, 9000) SETTINGS merge_tree_min_rows_for_seek = 1000000000; + +SYSTEM FLUSH LOGS query_log; + +SELECT ProfileEvents['SelectedRanges'] +FROM system.query_log +WHERE current_database = currentDatabase() AND query LIKE 'SELECT count() FROM t_min_bytes_to_seek%' AND type = 'QueryFinish' +ORDER BY event_time_microseconds; + +DROP TABLE t_min_bytes_to_seek; diff --git a/parser/testdata/03253_getMaxTableNameLength/ast.json b/parser/testdata/03253_getMaxTableNameLength/ast.json new file mode 100644 index 000000000..e5e8c975c --- /dev/null +++ b/parser/testdata/03253_getMaxTableNameLength/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function getMaxTableNameLengthForDatabase (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'default'" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001292284, + "rows_read": 7, + "bytes_read": 286 + } +} diff --git a/parser/testdata/03253_getMaxTableNameLength/metadata.json b/parser/testdata/03253_getMaxTableNameLength/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03253_getMaxTableNameLength/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03253_getMaxTableNameLength/query.sql b/parser/testdata/03253_getMaxTableNameLength/query.sql new file mode 100644 index 000000000..6d7abc299 --- /dev/null +++ b/parser/testdata/03253_getMaxTableNameLength/query.sql @@ -0,0 +1,3 @@ +SELECT getMaxTableNameLengthForDatabase('default'); +SELECT getMaxTableNameLengthForDatabase('default21'); +SELECT getMaxTableNameLengthForDatabase(''); -- { serverError INCORRECT_DATA } diff --git a/parser/testdata/03253_group_by_cube_too_many_keys/ast.json b/parser/testdata/03253_group_by_cube_too_many_keys/ast.json new file mode 100644 index 000000000..cadca1e5e --- /dev/null +++ b/parser/testdata/03253_group_by_cube_too_many_keys/ast.json @@ -0,0 +1,256 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 65)" + }, + { + "explain": " Literal '' (alias c0)" + }, + { + "explain": " Literal '' (alias c1)" + }, + { + "explain": " Literal '' (alias c2)" + }, + { + "explain": " Literal '' (alias c3)" + }, + { + "explain": " Literal '' (alias c4)" + }, + { + "explain": " Literal '' (alias c5)" + }, + { + "explain": " Literal '' (alias c6)" + }, + { + "explain": " Literal '' (alias c7)" + }, + { + "explain": " Literal '' (alias c8)" + }, + { + "explain": " Literal '' (alias c9)" + }, + { + "explain": " Literal '' (alias c10)" + }, + { + "explain": " Literal '' (alias c11)" + }, + { + "explain": " Literal '' (alias c12)" + }, + { + "explain": " Literal '' (alias c13)" + }, + { + "explain": " Literal '' (alias c14)" + }, + { + "explain": " Literal '' (alias c15)" + }, + { + "explain": " Literal '' (alias c16)" + }, + { + "explain": " Literal '' (alias c17)" + }, + { + "explain": " Literal '' (alias c18)" + }, + { + "explain": " Literal '' (alias c19)" + }, + { + "explain": " Literal '' (alias c20)" + }, + { + "explain": " Literal '' (alias c21)" + }, + { + "explain": " Literal '' (alias c22)" + }, + { + "explain": " Literal '' (alias c23)" + }, + { + "explain": " Literal '' (alias c24)" + }, + { + "explain": " Literal '' (alias c25)" + }, + { + "explain": " Literal '' (alias c26)" + }, + { + "explain": " Literal '' (alias c27)" + }, + { + "explain": " Literal '' (alias c28)" + }, + { + "explain": " Literal '' (alias c29)" + }, + { + "explain": " Literal '' (alias c30)" + }, + { + "explain": " Literal '' (alias c31)" + }, + { + "explain": " Literal '' (alias c32)" + }, + { + "explain": " Literal '' (alias c33)" + }, + { + "explain": " Literal '' (alias c34)" + }, + { + "explain": " Literal '' (alias c35)" + }, + { + "explain": " Literal '' (alias c36)" + }, + { + "explain": " Literal '' (alias c37)" + }, + { + "explain": " Literal '' (alias c38)" + }, + { + "explain": " Literal '' (alias c39)" + }, + { + "explain": " Literal '' (alias c40)" + }, + { + "explain": " Literal '' (alias c41)" + }, + { + "explain": " Literal '' (alias c42)" + }, + { + "explain": " Literal '' (alias c43)" + }, + { + "explain": " Literal '' (alias c44)" + }, + { + "explain": " Literal '' (alias c45)" + }, + { + "explain": " Literal '' (alias c46)" + }, + { + "explain": " Literal '' (alias c47)" + }, + { + "explain": " Literal '' (alias c48)" + }, + { + "explain": " Literal '' (alias c49)" + }, + { + "explain": " Literal '' (alias c50)" + }, + { + "explain": " Literal '' (alias c51)" + }, + { + "explain": " Literal '' (alias c52)" + }, + { + "explain": " Literal '' (alias c53)" + }, + { + "explain": " Literal '' (alias c54)" + }, + { + "explain": " Literal '' (alias c55)" + }, + { + "explain": " Literal '' (alias c56)" + }, + { + "explain": " Literal '' (alias c57)" + }, + { + "explain": " Literal '' (alias c58)" + }, + { + "explain": " Literal '' (alias c59)" + }, + { + "explain": " Literal '' (alias c60)" + }, + { + "explain": " Literal '' (alias c61)" + }, + { + "explain": " Literal '' (alias c62)" + }, + { + "explain": " Literal '' (alias c63)" + }, + { + "explain": " Literal '' (alias c64)" + } + ], + + "rows": 78, + + "statistics": + { + "elapsed": 0.00173134, + "rows_read": 78, + "bytes_read": 3172 + } +} diff --git a/parser/testdata/03253_group_by_cube_too_many_keys/metadata.json b/parser/testdata/03253_group_by_cube_too_many_keys/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03253_group_by_cube_too_many_keys/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03253_group_by_cube_too_many_keys/query.sql b/parser/testdata/03253_group_by_cube_too_many_keys/query.sql new file mode 100644 index 000000000..616387d39 --- /dev/null +++ b/parser/testdata/03253_group_by_cube_too_many_keys/query.sql @@ -0,0 +1,2 @@ +SELECT * FROM (SELECT '' AS c0, '' AS c1, '' AS c2, '' AS c3, '' AS c4, '' AS c5, '' AS c6, '' AS c7, '' AS c8, '' AS c9, '' AS c10, '' AS c11, '' AS c12, '' AS c13, '' AS c14, '' AS c15, '' AS c16, '' AS c17, '' AS c18, '' AS c19, '' AS c20, '' AS c21, '' AS c22, '' AS c23, '' AS c24, '' AS c25, '' AS c26, '' AS c27, '' AS c28, '' AS c29, '' AS c30, '' AS c31, '' AS c32, '' AS c33, '' AS c34, '' AS c35, '' AS c36, '' AS c37, '' AS c38, '' AS c39, '' AS c40, '' AS c41, '' AS c42, '' AS c43, '' AS c44, '' AS c45, '' AS c46, '' AS c47, '' AS c48, '' AS c49, '' AS c50, '' AS c51, '' AS c52, '' AS c53, '' AS c54, '' AS c55, '' AS c56, '' AS c57, '' AS c58, '' AS c59, '' AS c60, '' AS c61, '' AS c62, '' AS c63, '' AS c64) +GROUP BY ALL WITH CUBE; -- { serverError TOO_MANY_COLUMNS } diff --git a/parser/testdata/03254_attach_part_order/ast.json b/parser/testdata/03254_attach_part_order/ast.json new file mode 100644 index 000000000..286311d24 --- /dev/null +++ b/parser/testdata/03254_attach_part_order/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery test_table (children 1)" + }, + { + "explain": " Identifier test_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001238402, + "rows_read": 2, + "bytes_read": 73 + } +} diff --git a/parser/testdata/03254_attach_part_order/metadata.json b/parser/testdata/03254_attach_part_order/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03254_attach_part_order/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03254_attach_part_order/query.sql b/parser/testdata/03254_attach_part_order/query.sql new file mode 100644 index 000000000..81439dca0 --- /dev/null +++ b/parser/testdata/03254_attach_part_order/query.sql @@ -0,0 +1,34 @@ +CREATE TABLE test_table +( + dt DateTime, + id UInt32, + url String, + visits UInt32 +) +ENGINE ReplacingMergeTree +ORDER BY (dt, id) +PARTITION BY toYYYYMM(dt); + +SYSTEM STOP merges test_table; + +INSERT INTO test_table VALUES (toDate('2024-10-24'), 1, '/index', 100); +INSERT INTO test_table VALUES (toDate('2024-10-24'), 1, '/index', 101); +INSERT INTO test_table VALUES (toDate('2024-10-24'), 1, '/index', 102); +INSERT INTO test_table VALUES (toDate('2024-10-24'), 1, '/index', 103); +INSERT INTO test_table VALUES (toDate('2024-10-24'), 1, '/index', 104); +INSERT INTO test_table VALUES (toDate('2024-10-24'), 1, '/index', 105); +INSERT INTO test_table VALUES (toDate('2024-10-24'), 1, '/index', 106); +INSERT INTO test_table VALUES (toDate('2024-10-24'), 1, '/index', 107); +INSERT INTO test_table VALUES (toDate('2024-10-24'), 1, '/index', 108); +INSERT INTO test_table VALUES (toDate('2024-10-24'), 1, '/index', 109); +INSERT INTO test_table VALUES (toDate('2024-10-24'), 1, '/index', 110); +INSERT INTO test_table VALUES (toDate('2024-10-24'), 1, '/index', 111); +INSERT INTO test_table VALUES (toDate('2024-10-24'), 1, '/index', 112); +INSERT INTO test_table VALUES (toDate('2024-10-24'), 1, '/index', 113); +INSERT INTO test_table VALUES (toDate('2024-10-24'), 1, '/index', 114); +INSERT INTO test_table VALUES (toDate('2024-10-24'), 1, '/index', 115); + +ALTER TABLE test_table DETACH PARTITION 202410; +ALTER TABLE test_table ATTACH PARTITION 202410; + +SELECT id, visits FROM test_table FINAL ORDER BY id FORMAT Vertical; \ No newline at end of file diff --git a/parser/testdata/03254_last_2_samples_aggregate_function/ast.json b/parser/testdata/03254_last_2_samples_aggregate_function/ast.json new file mode 100644 index 000000000..6bb3a50e6 --- /dev/null +++ b/parser/testdata/03254_last_2_samples_aggregate_function/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001098892, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03254_last_2_samples_aggregate_function/metadata.json b/parser/testdata/03254_last_2_samples_aggregate_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03254_last_2_samples_aggregate_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03254_last_2_samples_aggregate_function/query.sql b/parser/testdata/03254_last_2_samples_aggregate_function/query.sql new file mode 100644 index 000000000..8dfedfad7 --- /dev/null +++ b/parser/testdata/03254_last_2_samples_aggregate_function/query.sql @@ -0,0 +1,140 @@ +SET allow_experimental_ts_to_grid_aggregate_function=1; + +-- Table for raw data +CREATE TABLE t_raw_timeseries +( + metric_id UInt64, + timestamp DateTime64(3, 'UTC') CODEC(DoubleDelta, ZSTD), + value Float64 CODEC(DoubleDelta) +) +ENGINE = MergeTree() +ORDER BY (metric_id, timestamp); + +-- Table with data resmapled to bigger time steps +CREATE TABLE t_resampled_timeseries +( + step UInt32, -- Resampling step in seconds + metric_id UInt64, + grid_timestamp DateTime('UTC') CODEC(DoubleDelta, ZSTD), + samples AggregateFunction(timeSeriesLastTwoSamples, DateTime64(3, 'UTC'), Float64) +) +ENGINE = AggregatingMergeTree() +ORDER BY (step, metric_id, grid_timestamp); + +-- MV for populating resampled table +CREATE MATERIALIZED VIEW mv_resampled_timeseries TO t_resampled_timeseries +( + step UInt32, + metric_id UInt64, + grid_timestamp DateTime('UTC') CODEC(DoubleDelta, ZSTD), + samples AggregateFunction(timeSeriesLastTwoSamples, DateTime64(3, 'UTC'), Float64) +) +AS SELECT * +FROM +( + SELECT + 10 AS step, + metric_id, + ceil(toUnixTimestamp(timestamp + interval 999 millisecond) / step, 0) * step AS grid_timestamp, -- Round timestamp up to the next grid point + initializeAggregation('timeSeriesLastTwoSamplesState', timestamp, value) AS samples + FROM t_raw_timeseries + ORDER BY metric_id, grid_timestamp + UNION ALL + SELECT + 30 AS step, + metric_id, + ceil(toUnixTimestamp(timestamp + interval 999 millisecond) / step, 0) * step AS grid_timestamp, -- Round timestamp up to the next grid point + initializeAggregation('timeSeriesLastTwoSamplesState', timestamp, value) AS samples + FROM t_raw_timeseries + ORDER BY metric_id, grid_timestamp +) +ORDER BY step, metric_id, grid_timestamp +SETTINGS query_plan_remove_redundant_sorting = 0; + + +-- Table with data resmapled to bigger time steps +-- The difference from t_resampled_timeseries is that we store diff between timestamp and grid_timestamp to improve compression +CREATE TABLE t_resampled_timeseries_delta +( + step UInt32, -- Resampling step in seconds + metric_id UInt64, + grid_timestamp DateTime('UTC') CODEC(DoubleDelta, ZSTD), + samples AggregateFunction(timeSeriesLastTwoSamples, Int16, Float64) +) +ENGINE = AggregatingMergeTree() +ORDER BY (step, metric_id, grid_timestamp); + +-- MV for populating resampled table +CREATE MATERIALIZED VIEW mv_resampled_timeseries_delta TO t_resampled_timeseries_delta +( + step UInt32, + metric_id UInt64, + grid_timestamp DateTime('UTC') CODEC(DoubleDelta, ZSTD), + samples AggregateFunction(timeSeriesLastTwoSamples, Int16, Float64) +) +AS SELECT * +FROM +( + SELECT + 10 AS step, + metric_id, + ceil(toUnixTimestamp(timestamp + interval 999 millisecond) / step, 0) * step AS grid_timestamp, -- Round timestamp up to the next grid point + initializeAggregation('timeSeriesLastTwoSamplesState', dateDiff('ms', grid_timestamp::DateTime64(3, 'UTC'), timestamp)::Int16, value) AS samples + FROM t_raw_timeseries + ORDER BY metric_id, grid_timestamp + UNION ALL + SELECT + 30 AS step, + metric_id, + ceil(toUnixTimestamp(timestamp + interval 999 millisecond) / step, 0) * step AS grid_timestamp, -- Round timestamp up to the next grid point + initializeAggregation('timeSeriesLastTwoSamplesState', dateDiff('ms', grid_timestamp::DateTime64(3, 'UTC'), timestamp)::Int16, value) AS samples + FROM t_raw_timeseries + ORDER BY metric_id, grid_timestamp +) +ORDER BY step, metric_id, grid_timestamp +SETTINGS query_plan_remove_redundant_sorting = 0; + + +-- Insert some data +INSERT INTO t_raw_timeseries(metric_id, timestamp, value) SELECT number%10 AS metric_id, '2024-12-12 12:00:00'::DateTime64(3, 'UTC') + interval ((number/10)%100)*900 millisecond as timestamp, number AS value FROM numbers(1000); + +SELECT * +FROM t_raw_timeseries +WHERE metric_id IN (3,7) AND timestamp BETWEEN '2024-12-12 12:00:07' AND '2024-12-12 12:00:13' +ORDER BY metric_id, timestamp; + +-- Reload table to test serialization/deserialization of state +DETACH TABLE t_resampled_timeseries; +ATTACH TABLE t_resampled_timeseries; + +-- Check resampled data +SELECT metric_id, grid_timestamp, (finalizeAggregation(samples).1 as timestamp, finalizeAggregation(samples).2 as value) +FROM t_resampled_timeseries +WHERE step = 10 AND metric_id IN (3,7) AND grid_timestamp BETWEEN '2024-12-12 12:00:00' AND '2024-12-12 12:02:00' +ORDER BY metric_id, grid_timestamp; + +SELECT metric_id, grid_timestamp, (finalizeAggregation(samples).1 as timestamp, finalizeAggregation(samples).2 as value), arrayMap(x -> grid_timestamp + interval x millisecond, timestamp) as ts +FROM clusterAllReplicas('test_shard_localhost', currentDatabase(),t_resampled_timeseries_delta) +WHERE step = 10 AND metric_id IN (3,7) AND grid_timestamp BETWEEN '2024-12-12 12:00:00' AND '2024-12-12 12:02:00' +ORDER BY metric_id, grid_timestamp +SETTINGS enable_parallel_replicas=1, max_parallel_replicas=3, parallel_replicas_for_non_replicated_merge_tree=1; + +-- Reload table to test serialization/deserialization of state +DETACH TABLE t_resampled_timeseries_delta; +ATTACH TABLE t_resampled_timeseries_delta; + +SELECT step, count(distinct(metric_id)), count(), min(grid_timestamp), max(grid_timestamp) FROM t_resampled_timeseries FINAL GROUP BY step ORDER BY step; + +-- Compare aggregated table with timestamps and aggregated table with timestamp deltas +SELECT step, count(), sum(ts1 == ts2) +FROM ( + SELECT + t.step as step, + finalizeAggregation(t.samples).1 as ts1, + arrayMap(x -> t_delta.grid_timestamp + interval x millisecond, finalizeAggregation(t_delta.samples).1) as ts2 + FROM t_resampled_timeseries as t FINAL + JOIN t_resampled_timeseries_delta as t_delta FINAL + ON t.step == t_delta.step AND t.metric_id == t_delta.metric_id AND t.grid_timestamp == t_delta.grid_timestamp +) +GROUP BY step +ORDER BY step; diff --git a/parser/testdata/03254_last_2_samples_aggregate_function_simple/ast.json b/parser/testdata/03254_last_2_samples_aggregate_function_simple/ast.json new file mode 100644 index 000000000..ab22ad373 --- /dev/null +++ b/parser/testdata/03254_last_2_samples_aggregate_function_simple/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000951451, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03254_last_2_samples_aggregate_function_simple/metadata.json b/parser/testdata/03254_last_2_samples_aggregate_function_simple/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03254_last_2_samples_aggregate_function_simple/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03254_last_2_samples_aggregate_function_simple/query.sql b/parser/testdata/03254_last_2_samples_aggregate_function_simple/query.sql new file mode 100644 index 000000000..2a2287ca5 --- /dev/null +++ b/parser/testdata/03254_last_2_samples_aggregate_function_simple/query.sql @@ -0,0 +1,90 @@ +SET allow_experimental_ts_to_grid_aggregate_function=1; + +-- Table for raw data +CREATE TABLE t_raw_timeseries +( + metric_id UInt64, + timestamp DateTime64(3, 'UTC') CODEC(DoubleDelta, ZSTD), + value Float64 CODEC(DoubleDelta) +) +ENGINE = MergeTree() +ORDER BY (metric_id, timestamp); + +-- Table with data re-sampled to bigger (15 sec) time steps +CREATE TABLE t_resampled_timeseries_15_sec +( + metric_id UInt64, + grid_timestamp DateTime('UTC') CODEC(DoubleDelta, ZSTD), -- Timestamp aligned to 15 sec + samples AggregateFunction(timeSeriesLastTwoSamples, DateTime64(3, 'UTC'), Float64) +) +ENGINE = AggregatingMergeTree() +ORDER BY (metric_id, grid_timestamp); + +-- MV for populating re-sampled table +CREATE MATERIALIZED VIEW mv_resampled_timeseries TO t_resampled_timeseries_15_sec +( + metric_id UInt64, + grid_timestamp DateTime('UTC') CODEC(DoubleDelta, ZSTD), + samples AggregateFunction(timeSeriesLastTwoSamples, DateTime64(3, 'UTC'), Float64) +) +AS SELECT + metric_id, + ceil(toUnixTimestamp(timestamp + interval 999 millisecond) / 15, 0) * 15 AS grid_timestamp, -- Round timestamp up to the next grid point + initializeAggregation('timeSeriesLastTwoSamplesState', timestamp, value) AS samples +FROM t_raw_timeseries +ORDER BY metric_id, grid_timestamp; + +-- Insert some data +INSERT INTO t_raw_timeseries(metric_id, timestamp, value) +SELECT + number%10 AS metric_id, + '2024-12-12 12:00:00'::DateTime64(3, 'UTC') + interval ((number/10)%100)*900 millisecond as timestamp, + number%3+number%29 AS value +FROM numbers(1000); + +-- Check raw data +SELECT * +FROM t_raw_timeseries +WHERE metric_id = 3 AND timestamp BETWEEN '2024-12-12 12:00:12' AND '2024-12-12 12:00:31' +ORDER BY metric_id, timestamp; + +-- Check re-sampled data +SELECT metric_id, grid_timestamp, (finalizeAggregation(samples).1 as timestamp, finalizeAggregation(samples).2 as value) +FROM t_resampled_timeseries_15_sec +WHERE metric_id = 3 AND grid_timestamp BETWEEN '2024-12-12 12:00:15' AND '2024-12-12 12:00:30' +ORDER BY metric_id, grid_timestamp; + +-- Calculate idelta and irate from the raw data +WITH + '2024-12-12 12:00:15'::DateTime64(3,'UTC') AS start_ts, -- start of timestamp grid + start_ts + interval 60 second AS end_ts, -- end of timestamp grid + 15 AS step_seconds, -- step of timestamp grid + 45 AS window_seconds -- "staleness" window +SELECT + metric_id, + timeSeriesInstantDeltaToGrid(start_ts, end_ts, step_seconds, window_seconds)(timestamp, value), + timeSeriesInstantRateToGrid(start_ts, end_ts, step_seconds, window_seconds)(timestamp, value) +FROM t_raw_timeseries +WHERE metric_id = 3 AND timestamp BETWEEN start_ts - interval window_seconds seconds AND end_ts +GROUP BY metric_id; + +-- Calculate idelta and irate from the re-sampled data +WITH + '2024-12-12 12:00:15'::DateTime64(3,'UTC') AS start_ts, -- start of timestamp grid + start_ts + interval 60 second AS end_ts, -- end of timestamp grid + 15 AS step_seconds, -- step of timestamp grid + 45 AS window_seconds -- "staleness" window +SELECT + metric_id, + timeSeriesInstantDeltaToGrid(start_ts, end_ts, step_seconds, window_seconds)(timestamps, values), + timeSeriesInstantRateToGrid(start_ts, end_ts, step_seconds, window_seconds)(timestamps, values) +FROM ( + SELECT + metric_id, + finalizeAggregation(samples).1 as timestamps, + finalizeAggregation(samples).2 as values + FROM t_resampled_timeseries_15_sec + WHERE metric_id = 3 AND grid_timestamp BETWEEN start_ts - interval window_seconds seconds AND end_ts +) +GROUP BY metric_id; + diff --git a/parser/testdata/03254_merge_source_parts/ast.json b/parser/testdata/03254_merge_source_parts/ast.json new file mode 100644 index 000000000..3ab0176be --- /dev/null +++ b/parser/testdata/03254_merge_source_parts/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001036425, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/03254_merge_source_parts/metadata.json b/parser/testdata/03254_merge_source_parts/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03254_merge_source_parts/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03254_merge_source_parts/query.sql b/parser/testdata/03254_merge_source_parts/query.sql new file mode 100644 index 000000000..eeaa01135 --- /dev/null +++ b/parser/testdata/03254_merge_source_parts/query.sql @@ -0,0 +1,8 @@ +DROP TABLE IF EXISTS test; +CREATE TABLE test (x UInt8) ORDER BY x; +INSERT INTO test VALUES (1); +INSERT INTO test VALUES (2); +OPTIMIZE TABLE test FINAL; +SYSTEM FLUSH LOGS part_log; +SELECT ProfileEvents['Merge'], ProfileEvents['MergeSourceParts'], ProfileEvents['MergedRows'], ProfileEvents['MergedColumns'] FROM system.part_log WHERE database = currentDatabase() AND table = 'test' AND event_type = 'MergeParts'; +DROP TABLE test; diff --git a/parser/testdata/03254_normalize_aggregate_states_with_named_tuple_args/ast.json b/parser/testdata/03254_normalize_aggregate_states_with_named_tuple_args/ast.json new file mode 100644 index 000000000..bcf7e9c10 --- /dev/null +++ b/parser/testdata/03254_normalize_aggregate_states_with_named_tuple_args/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001031923, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03254_normalize_aggregate_states_with_named_tuple_args/metadata.json b/parser/testdata/03254_normalize_aggregate_states_with_named_tuple_args/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03254_normalize_aggregate_states_with_named_tuple_args/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03254_normalize_aggregate_states_with_named_tuple_args/query.sql b/parser/testdata/03254_normalize_aggregate_states_with_named_tuple_args/query.sql new file mode 100644 index 000000000..29eb6549f --- /dev/null +++ b/parser/testdata/03254_normalize_aggregate_states_with_named_tuple_args/query.sql @@ -0,0 +1,33 @@ +SET enable_analyzer = 1; +SET enable_named_columns_in_function_tuple = 1; + +SELECT + * APPLY finalizeAggregation +FROM +( + WITH + (1, 2)::Tuple(a int, b int) AS nt + SELECT + uniqState(nt)::AggregateFunction(uniq, Tuple(int, int)) x, + uniqState([nt])::AggregateFunction(uniq, Array(Tuple(int, int))) y, + uniqState(map(nt, nt))::AggregateFunction(uniq, Map(Tuple(int, int), Tuple(int, int))) z +) +FORMAT JSONEachRow; + +DROP TABLE IF EXISTS users; +DROP TABLE IF EXISTS users2; +DROP TABLE IF EXISTS test_mv; + +CREATE TABLE users (id UInt8, city String, name String) ENGINE=Memory; +CREATE TABLE users2 (id UInt8, city_name_uniq AggregateFunction(uniq, Tuple(String,String))) ENGINE=AggregatingMergeTree() ORDER BY (id); +CREATE MATERIALIZED VIEW test_mv TO users2 AS SELECT id, uniqState((city, name)) AS city_name_uniq FROM users GROUP BY id; + +INSERT INTO users VALUES (1, 'London', 'John'); +INSERT INTO users VALUES (1, 'Berlin', 'Ksenia'); +INSERT INTO users VALUES (2, 'Paris', 'Alice'); + +SELECT id, uniqMerge(city_name_uniq) FROM users2 GROUP BY id ORDER BY id; + +DROP TABLE IF EXISTS users; +DROP TABLE IF EXISTS users2; +DROP TABLE IF EXISTS test_mv; diff --git a/parser/testdata/03254_part_log_partition_column_is_set/ast.json b/parser/testdata/03254_part_log_partition_column_is_set/ast.json new file mode 100644 index 000000000..db5385304 --- /dev/null +++ b/parser/testdata/03254_part_log_partition_column_is_set/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001026092, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/03254_part_log_partition_column_is_set/metadata.json b/parser/testdata/03254_part_log_partition_column_is_set/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03254_part_log_partition_column_is_set/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03254_part_log_partition_column_is_set/query.sql b/parser/testdata/03254_part_log_partition_column_is_set/query.sql new file mode 100644 index 000000000..72dd01a4b --- /dev/null +++ b/parser/testdata/03254_part_log_partition_column_is_set/query.sql @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS test; +CREATE TABLE test (x UInt8, y UInt8, z String DEFAULT toString(x)) PARTITION BY x ORDER BY x; +INSERT INTO test (x, y) VALUES (1, 1); +INSERT INTO test (x, y) VALUES (1, 2); +OPTIMIZE TABLE test FINAL; +INSERT INTO test (x, y) VALUES (2, 1); +ALTER TABLE test DROP PARTITION 2; +SET mutations_sync = 1; +ALTER TABLE test UPDATE z = x || y WHERE 1; +SELECT * FROM test ORDER BY ALL; +TRUNCATE TABLE test; +DROP TABLE test SYNC; +SYSTEM FLUSH LOGS part_log; + +-- SELECT * FROM system.part_log WHERE database = currentDatabase() FORMAT Vertical; +SELECT DISTINCT throwIf(empty(partition)) FROM system.part_log WHERE database = currentDatabase(); diff --git a/parser/testdata/03254_pr_join_on_dups/ast.json b/parser/testdata/03254_pr_join_on_dups/ast.json new file mode 100644 index 000000000..648016a4d --- /dev/null +++ b/parser/testdata/03254_pr_join_on_dups/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery X (children 1)" + }, + { + "explain": " Identifier X" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001171869, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/03254_pr_join_on_dups/metadata.json b/parser/testdata/03254_pr_join_on_dups/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03254_pr_join_on_dups/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03254_pr_join_on_dups/query.sql b/parser/testdata/03254_pr_join_on_dups/query.sql new file mode 100644 index 000000000..aca4fc6b6 --- /dev/null +++ b/parser/testdata/03254_pr_join_on_dups/query.sql @@ -0,0 +1,73 @@ +drop table if exists X sync; +drop table if exists Y sync; + +set min_bytes_to_use_direct_io = 0; -- min_bytes_to_use_direct_io > 0 is broken and leads to unexpected results, https://github.com/ClickHouse/ClickHouse/issues/65690 + +create table X (id Int32, x_a String, x_b Nullable(Int32)) engine ReplicatedMergeTree('/clickhouse/{database}/X', '1') order by tuple(); +create table Y (id Int32, y_a String, y_b Nullable(String)) engine ReplicatedMergeTree('/clickhouse/{database}/Y', '1') order by tuple(); + +insert into X (id, x_a, x_b) values (1, 'l1', 1), (2, 'l2', 2), (2, 'l3', 3), (3, 'l4', 4); +insert into X (id, x_a) values (4, 'l5'), (4, 'l6'), (5, 'l7'), (8, 'l8'), (9, 'l9'); +insert into Y (id, y_a) values (1, 'r1'), (1, 'r2'), (2, 'r3'), (3, 'r4'), (3, 'r5'); +insert into Y (id, y_a, y_b) values (4, 'r6', 'nr6'), (6, 'r7', 'nr7'), (7, 'r8', 'nr8'), (9, 'r9', 'nr9'); + +set enable_analyzer = 1, enable_parallel_replicas = 1, max_parallel_replicas = 3, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost'; + +select 'inner'; +select X.*, Y.* from X inner join Y on X.id = Y.id order by X.id, X.x_a, X.x_b, Y.id, Y.y_a, Y.y_b; +select 'inner subs'; +select s.*, j.* from (select * from X) as s inner join (select * from Y) as j on s.id = j.id order by s.id, s.x_a, s.x_b, j.id, j.y_a, j.y_b; +select 'inner expr'; +select X.*, Y.* from X inner join Y on (X.id + 1) = (Y.id + 1) order by X.id, X.x_a, X.x_b, Y.id, Y.y_a, Y.y_b; + +select 'left'; +select X.*, Y.* from X left join Y on X.id = Y.id order by X.id, X.x_a, X.x_b, Y.id, Y.y_a, Y.y_b; +select 'left subs'; +select s.*, j.* from (select * from X) as s left join (select * from Y) as j on s.id = j.id order by s.id, s.x_a, s.x_b, j.id, j.y_a, j.y_b; +select 'left expr'; +select X.*, Y.* from X left join Y on (X.id + 1) = (Y.id + 1) order by X.id, X.x_a, X.x_b, Y.id, Y.y_a, Y.y_b; + +select 'right'; +select X.*, Y.* from X right join Y on X.id = Y.id order by X.id, X.x_a, X.x_b, Y.id, Y.y_a, Y.y_b; +select 'right subs'; +select s.*, j.* from (select * from X) as s right join (select * from Y) as j on s.id = j.id order by s.id, s.x_a, s.x_b, j.id, j.y_a, j.y_b; + +select 'full'; +select X.*, Y.* from X full join Y on X.id = Y.id order by X.id, X.x_a, X.x_b, Y.id, Y.y_a, Y.y_b; +select 'full subs'; +select s.*, j.* from (select * from X) as s full join (select * from Y) as j on s.id = j.id order by s.id, s.x_a, s.x_b, j.id, j.y_a, j.y_b; + +select 'self inner'; +select X.*, s.* from X inner join (select * from X) as s on X.id = s.id order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +select 'self inner nullable'; +select X.*, s.* from X inner join (select * from X) as s on X.x_b = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +select 'self inner nullable vs not nullable'; +select X.*, s.* from X inner join (select * from X) as s on X.id = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +select 'self inner nullable vs not nullable 2'; +select Y.*, s.* from Y inner join (select * from Y) as s on concat('n', Y.y_a) = s.y_b order by Y.id, Y.y_a, Y.y_b, s.id, s.y_a, s.y_b; + +select 'self left'; +select X.*, s.* from X left join (select * from X) as s on X.id = s.id order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +select 'self left nullable'; +select X.*, s.* from X left join (select * from X) as s on X.x_b = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +select 'self left nullable vs not nullable'; +select X.*, s.* from X left join (select * from X) as s on X.id = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +select 'self left nullable vs not nullable 2'; +select Y.*, s.* from Y left join (select * from Y) as s on concat('n', Y.y_a) = s.y_b order by Y.id, Y.y_a, Y.y_b, s.id, s.y_a, s.y_b; + +select 'self right'; +select X.*, s.* from X right join (select * from X) as s on X.id = s.id order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +select 'self right nullable'; +select X.*, s.* from X right join (select * from X) as s on X.x_b = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +select 'self right nullable vs not nullable'; +select X.*, s.* from X right join (select * from X) as s on X.id = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; + +select 'self full'; +select X.*, s.* from X full join (select * from X) as s on X.id = s.id order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +select 'self full nullable'; +select X.*, s.* from X full join (select * from X) as s on X.x_b = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; +select 'self full nullable vs not nullable'; +select X.*, s.* from X full join (select * from X) as s on X.id = s.x_b order by X.id, X.x_a, X.x_b, s.id, s.x_a, s.x_b; + +drop table X sync; +drop table Y sync; diff --git a/parser/testdata/03254_prewarm_mark_cache_columns/ast.json b/parser/testdata/03254_prewarm_mark_cache_columns/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03254_prewarm_mark_cache_columns/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03254_prewarm_mark_cache_columns/metadata.json b/parser/testdata/03254_prewarm_mark_cache_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03254_prewarm_mark_cache_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03254_prewarm_mark_cache_columns/query.sql b/parser/testdata/03254_prewarm_mark_cache_columns/query.sql new file mode 100644 index 000000000..459a5f25e --- /dev/null +++ b/parser/testdata/03254_prewarm_mark_cache_columns/query.sql @@ -0,0 +1,30 @@ +-- Tags: no-parallel, no-random-settings, no-random-merge-tree-settings + +DROP TABLE IF EXISTS t_prewarm_columns; + +CREATE TABLE t_prewarm_columns (a UInt64, b UInt64, c UInt64, d UInt64) +ENGINE = MergeTree ORDER BY a +SETTINGS min_bytes_for_wide_part = 0, prewarm_mark_cache = 1, columns_to_prewarm_mark_cache = 'a,c'; + +INSERT INTO t_prewarm_columns VALUES (1, 1, 1, 1); + +SELECT count() FROM t_prewarm_columns WHERE NOT ignore(*); + +SYSTEM DROP MARK CACHE; +DETACH TABLE t_prewarm_columns; +ATTACH TABLE t_prewarm_columns; + +SELECT count() FROM t_prewarm_columns WHERE NOT ignore(*); + +SYSTEM DROP MARK CACHE; +SYSTEM PREWARM MARK CACHE t_prewarm_columns; + +SELECT count() FROM t_prewarm_columns WHERE NOT ignore(*); + +SYSTEM FLUSH LOGS query_log; + +SELECT ProfileEvents['LoadedMarksCount'] FROM system.query_log +WHERE current_database = currentDatabase() AND type = 'QueryFinish' AND query LIKE 'SELECT count() FROM t_prewarm_columns%' +ORDER BY event_time_microseconds; + +DROP TABLE t_prewarm_columns; diff --git a/parser/testdata/03254_prewarm_mark_cache_rmt/ast.json b/parser/testdata/03254_prewarm_mark_cache_rmt/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03254_prewarm_mark_cache_rmt/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03254_prewarm_mark_cache_rmt/metadata.json b/parser/testdata/03254_prewarm_mark_cache_rmt/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03254_prewarm_mark_cache_rmt/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03254_prewarm_mark_cache_rmt/query.sql b/parser/testdata/03254_prewarm_mark_cache_rmt/query.sql new file mode 100644 index 000000000..eca743019 --- /dev/null +++ b/parser/testdata/03254_prewarm_mark_cache_rmt/query.sql @@ -0,0 +1,65 @@ +-- Tags: no-parallel, no-shared-merge-tree + +DROP TABLE IF EXISTS t_prewarm_cache_rmt_1; +DROP TABLE IF EXISTS t_prewarm_cache_rmt_2; + +CREATE TABLE t_prewarm_cache_rmt_1 (a UInt64, b UInt64, c UInt64) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/03254_prewarm_mark_cache_smt/t_prewarm_cache', '1') +ORDER BY a SETTINGS prewarm_mark_cache = 1; + +CREATE TABLE t_prewarm_cache_rmt_2 (a UInt64, b UInt64, c UInt64) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/03254_prewarm_mark_cache_smt/t_prewarm_cache', '2') +ORDER BY a SETTINGS prewarm_mark_cache = 1; + +SYSTEM DROP MARK CACHE; + +SYSTEM STOP FETCHES t_prewarm_cache_rmt_2; + +-- Check that prewarm works on insert. +INSERT INTO t_prewarm_cache_rmt_1 SELECT number, rand(), rand() FROM numbers(20000); +SELECT count() FROM t_prewarm_cache_rmt_1 WHERE NOT ignore(*); + +-- Check that prewarm works on fetch. +SYSTEM DROP MARK CACHE; +SYSTEM START FETCHES t_prewarm_cache_rmt_2; +SYSTEM SYNC REPLICA t_prewarm_cache_rmt_2; +SELECT count() FROM t_prewarm_cache_rmt_2 WHERE NOT ignore(*); + +-- Check that prewarm works on merge. +INSERT INTO t_prewarm_cache_rmt_1 SELECT number, rand(), rand() FROM numbers(20000); +OPTIMIZE TABLE t_prewarm_cache_rmt_1 FINAL; + +SYSTEM SYNC REPLICA t_prewarm_cache_rmt_2; + +SELECT count() FROM t_prewarm_cache_rmt_1 WHERE NOT ignore(*); +SELECT count() FROM t_prewarm_cache_rmt_2 WHERE NOT ignore(*); + +-- Check that prewarm works on restart. +SYSTEM DROP MARK CACHE; + +DETACH TABLE t_prewarm_cache_rmt_1; +DETACH TABLE t_prewarm_cache_rmt_2; + +ATTACH TABLE t_prewarm_cache_rmt_1; +ATTACH TABLE t_prewarm_cache_rmt_2; + +SELECT count() FROM t_prewarm_cache_rmt_1 WHERE NOT ignore(*); +SELECT count() FROM t_prewarm_cache_rmt_2 WHERE NOT ignore(*); + +SYSTEM DROP MARK CACHE; + +SELECT count() FROM t_prewarm_cache_rmt_1 WHERE NOT ignore(*); + +--- Check that system query works. +SYSTEM PREWARM MARK CACHE t_prewarm_cache_rmt_1; + +SELECT count() FROM t_prewarm_cache_rmt_1 WHERE NOT ignore(*); + +SYSTEM FLUSH LOGS query_log; + +SELECT ProfileEvents['LoadedMarksCount'] > 0 FROM system.query_log +WHERE current_database = currentDatabase() AND type = 'QueryFinish' AND query LIKE 'SELECT count() FROM t_prewarm_cache%' +ORDER BY event_time_microseconds; + +DROP TABLE IF EXISTS t_prewarm_cache_rmt_1; +DROP TABLE IF EXISTS t_prewarm_cache_rmt_2; diff --git a/parser/testdata/03254_project_lwd_respects_row_exists/ast.json b/parser/testdata/03254_project_lwd_respects_row_exists/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03254_project_lwd_respects_row_exists/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03254_project_lwd_respects_row_exists/metadata.json b/parser/testdata/03254_project_lwd_respects_row_exists/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03254_project_lwd_respects_row_exists/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03254_project_lwd_respects_row_exists/query.sql b/parser/testdata/03254_project_lwd_respects_row_exists/query.sql new file mode 100644 index 000000000..073f57837 --- /dev/null +++ b/parser/testdata/03254_project_lwd_respects_row_exists/query.sql @@ -0,0 +1,46 @@ + +-- compact test +DROP TABLE IF EXISTS users_compact; + +CREATE TABLE users_compact ( + uid Int16, + name String, + age Int16, + projection p1 (select age, count() group by age), +) ENGINE = MergeTree order by uid +SETTINGS lightweight_mutation_projection_mode = 'rebuild', +min_bytes_for_wide_part = 10485760; + +INSERT INTO users_compact VALUES (1231, 'John', 33), (1232, 'Mary', 34); + +DELETE FROM users_compact WHERE uid = 1231; + +SELECT + age, + count() +FROM users_compact +GROUP BY age +SETTINGS optimize_use_projections = 1, force_optimize_projection = 1, parallel_replicas_local_plan = 1, parallel_replicas_support_projection = 1, optimize_aggregation_in_order = 0; + +-- wide test +DROP TABLE IF EXISTS users_wide; + +CREATE TABLE users_wide ( + uid Int16, + name String, + age Int16, + projection p1 (select age, count() group by age), +) ENGINE = MergeTree order by uid +SETTINGS lightweight_mutation_projection_mode = 'rebuild', +min_bytes_for_wide_part = 0; + +INSERT INTO users_wide VALUES (1231, 'John', 33), (1232, 'Mary', 34); + +DELETE FROM users_wide WHERE uid = 1231; + +SELECT + age, + count() +FROM users_wide +GROUP BY age +SETTINGS optimize_use_projections = 1, force_optimize_projection = 1, parallel_replicas_local_plan = 1, parallel_replicas_support_projection = 1, optimize_aggregation_in_order = 0; diff --git a/parser/testdata/03254_system_prewarm_mark_cache/ast.json b/parser/testdata/03254_system_prewarm_mark_cache/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03254_system_prewarm_mark_cache/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03254_system_prewarm_mark_cache/metadata.json b/parser/testdata/03254_system_prewarm_mark_cache/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03254_system_prewarm_mark_cache/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03254_system_prewarm_mark_cache/query.sql b/parser/testdata/03254_system_prewarm_mark_cache/query.sql new file mode 100644 index 000000000..d26c3cb5b --- /dev/null +++ b/parser/testdata/03254_system_prewarm_mark_cache/query.sql @@ -0,0 +1,27 @@ +-- Tags: no-parallel + +DROP TABLE IF EXISTS t_prewarm_cache; + +CREATE TABLE t_prewarm_cache (a UInt64, b UInt64, c UInt64) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/03254_prewarm_mark_cache_smt/t_prewarm_cache', '1') +ORDER BY a SETTINGS prewarm_mark_cache = 0; + +SYSTEM DROP MARK CACHE; + +INSERT INTO t_prewarm_cache SELECT number, rand(), rand() FROM numbers(20000); + +SELECT count() FROM t_prewarm_cache WHERE NOT ignore(*); + +SYSTEM DROP MARK CACHE; + +SYSTEM PREWARM MARK CACHE t_prewarm_cache; + +SELECT count() FROM t_prewarm_cache WHERE NOT ignore(*); + +SYSTEM FLUSH LOGS query_log; + +SELECT ProfileEvents['LoadedMarksCount'] > 0 FROM system.query_log +WHERE current_database = currentDatabase() AND type = 'QueryFinish' AND query LIKE 'SELECT count() FROM t_prewarm_cache%' +ORDER BY event_time_microseconds; + +DROP TABLE IF EXISTS t_prewarm_cache; diff --git a/parser/testdata/03254_test_alter_user_no_changes/ast.json b/parser/testdata/03254_test_alter_user_no_changes/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03254_test_alter_user_no_changes/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03254_test_alter_user_no_changes/metadata.json b/parser/testdata/03254_test_alter_user_no_changes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03254_test_alter_user_no_changes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03254_test_alter_user_no_changes/query.sql b/parser/testdata/03254_test_alter_user_no_changes/query.sql new file mode 100644 index 000000000..b71930eea --- /dev/null +++ b/parser/testdata/03254_test_alter_user_no_changes/query.sql @@ -0,0 +1,5 @@ +-- Tags: no-parallel + +create user u_03254_alter_user; +alter user u_03254_alter_user; -- { clientError SYNTAX_ERROR } +drop user u_03254_alter_user; diff --git a/parser/testdata/03254_timeseries_functions/ast.json b/parser/testdata/03254_timeseries_functions/ast.json new file mode 100644 index 000000000..a1cc8239b --- /dev/null +++ b/parser/testdata/03254_timeseries_functions/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery ts_raw_data (children 3)" + }, + { + "explain": " Identifier ts_raw_data" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration timestamp (children 1)" + }, + { + "explain": " DataType DateTime64 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Literal 'UTC'" + }, + { + "explain": " ColumnDeclaration value (children 1)" + }, + { + "explain": " DataType Float64" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Identifier timestamp" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001193077, + "rows_read": 15, + "bytes_read": 555 + } +} diff --git a/parser/testdata/03254_timeseries_functions/metadata.json b/parser/testdata/03254_timeseries_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03254_timeseries_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03254_timeseries_functions/query.sql b/parser/testdata/03254_timeseries_functions/query.sql new file mode 100644 index 000000000..734e11c66 --- /dev/null +++ b/parser/testdata/03254_timeseries_functions/query.sql @@ -0,0 +1,53 @@ +CREATE TABLE ts_raw_data(timestamp DateTime64(3,'UTC'), value Float64) ENGINE = MergeTree() ORDER BY timestamp; + +INSERT INTO ts_raw_data SELECT arrayJoin(*).1::DateTime64(3, 'UTC') AS timestamp, arrayJoin(*).2 AS value +FROM ( +select [ +(1734955421.374, 0), +(1734955436.374, 0), +(1734955451.374, 0), +(1734955466.374, 0), +(1734955481.374, 0), +(1734955496.374, 0), +(1734955511.374, 1), +(1734955526.374, 3), +(1734955541.374, 5), +(1734955556.374, 5), +(1734955571.374, 5), +(1734955586.374, 5), +(1734955601.374, 8), +(1734955616.374, 8), +(1734955631.374, 8), +(1734955646.374, 8), +(1734955661.374, 8), +(1734955676.374, 8) +]); + +SELECT groupArraySorted(20)((timestamp::Decimal(20,3), value)) FROM ts_raw_data; + +SET allow_experimental_ts_to_grid_aggregate_function = 1; + +WITH + 1734955380 AS start, 1734955680 AS end, 15 AS step, 0 AS staleness, + timeSeriesRange(start, end, step) as grid +SELECT arrayZip(grid, timeSeriesResampleToGridWithStaleness(start, end, step, staleness)(timestamp, value)) FROM ts_raw_data; + +WITH + 1734955380 AS start, 1734955680 AS end, 15 AS step, 300 AS window, + timeSeriesRange(start, end, step) as grid +SELECT + arrayZip(grid, timeSeriesLastToGrid(start, end, step, window)(timestamp, value)) as last_5m, + arrayZip(grid, timeSeriesRateToGrid(start, end, step, window)(timestamp, value)) as rate_5m, + arrayZip(grid, timeSeriesDeltaToGrid(start, end, step, window)(timestamp, value)) as delta_5m +FROM ts_raw_data FORMAT Vertical; + +WITH + 1734955380 AS start, 1734955680 AS end, 15 AS step, + timeSeriesRange(start, end, step) as grid +SELECT + arrayZip(grid, timeSeriesInstantRateToGrid(start, end, step, 30)(timestamp, value)) as irate_30s, -- previous timestamp is within the window + arrayZip(grid, timeSeriesInstantRateToGrid(start, end, step, 19)(timestamp, value)) as irate_19s, -- previous timestamp is still within the window + arrayZip(grid, timeSeriesInstantRateToGrid(start, end, step, 18)(timestamp, value)) as irate_18s -- previous timestamp is outside the window +FROM ts_raw_data FORMAT Vertical; + +DROP TABLE ts_raw_data; diff --git a/parser/testdata/03254_timeseries_functions_various_arguments/ast.json b/parser/testdata/03254_timeseries_functions_various_arguments/ast.json new file mode 100644 index 000000000..d9ef45294 --- /dev/null +++ b/parser/testdata/03254_timeseries_functions_various_arguments/ast.json @@ -0,0 +1,76 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery ts_data (children 3)" + }, + { + "explain": " Identifier ts_data" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " ColumnDeclaration id (children 1)" + }, + { + "explain": " DataType UInt64" + }, + { + "explain": " ColumnDeclaration timestamps (children 1)" + }, + { + "explain": " DataType Array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " DataType DateTime" + }, + { + "explain": " ColumnDeclaration values (children 1)" + }, + { + "explain": " DataType Array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " DataType Float64" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Identifier id" + } + ], + + "rows": 18, + + "statistics": + { + "elapsed": 0.001035643, + "rows_read": 18, + "bytes_read": 663 + } +} diff --git a/parser/testdata/03254_timeseries_functions_various_arguments/metadata.json b/parser/testdata/03254_timeseries_functions_various_arguments/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03254_timeseries_functions_various_arguments/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03254_timeseries_functions_various_arguments/query.sql b/parser/testdata/03254_timeseries_functions_various_arguments/query.sql new file mode 100644 index 000000000..6429cf80b --- /dev/null +++ b/parser/testdata/03254_timeseries_functions_various_arguments/query.sql @@ -0,0 +1,113 @@ +CREATE TABLE ts_data(id UInt64, timestamps Array(DateTime), values Array(Float64)) ENGINE = MergeTree() ORDER BY id; +CREATE TABLE ts_data_nullable(id UInt64, timestamp UInt32, value Nullable(Float64)) ENGINE = MergeTree() ORDER BY id; + +INSERT INTO ts_data VALUES (1, [10,20], [1,2]), (2, [30,40,50], [3,4]), (3, [60], [6]), (4, [], []), (5, [80], [8,9]), (6, [100], [10]); +INSERT INTO ts_data_nullable SELECT id, timestamp, value FROM ts_data ARRAY JOIN timestamps as timestamp, arrayResize(values, length(timestamps), NULL) AS value; + +SET allow_experimental_time_series_aggregate_functions = 1; + +-- Fail because of rows with non-matching lengths of timestamps and values +SELECT timeSeriesResampleToGridWithStaleness(10, 120, 10, 10)(timestamps, values) FROM ts_data; -- {serverError BAD_ARGUMENTS} +SELECT timeSeriesRateToGrid(10, 120, 10, 10)(timestamps, values) FROM ts_data; -- {serverError BAD_ARGUMENTS} +SELECT timeSeriesDeltaToGrid(10, 120, 10, 10)(timestamps, values) FROM ts_data; -- {serverError BAD_ARGUMENTS} +SELECT timeSeriesInstantRateToGrid(10, 120, 10, 10)(timestamps, values) FROM ts_data; -- {serverError BAD_ARGUMENTS} +SELECT timeSeriesInstantDeltaToGrid(10, 120, 10, 10)(timestamps, values) FROM ts_data; -- {serverError BAD_ARGUMENTS} + +-- Filter out invalid rows where timestamp and values arrays lengths do not match +SELECT 'staleness = 10:'; +SELECT timeSeriesResampleToGridWithStaleness(10, 120, 10, 10)(timestamps, values) FROM (SELECT * FROM ts_data WHERE length(timestamps) = length(values)); +SELECT timeSeriesResampleToGridWithStaleness(10, 120, 10, 10)(timestamps, values) FROM ts_data WHERE length(timestamps) = length(values); +SELECT timeSeriesResampleToGridWithStalenessIf(10, 120, 10, 10)(timestamps, values, length(timestamps) = length(values)) FROM ts_data; +SELECT timeSeriesResampleToGridWithStalenessIf(10, 120, 10, 10)(timestamps, values, toNullable(length(timestamps) = length(values))) FROM ts_data; + +SELECT 'staleness = 11:'; +SELECT timeSeriesResampleToGridWithStaleness(10, 120, 10, 11)(timestamps, values) FROM (SELECT * FROM ts_data WHERE length(timestamps) = length(values)); +SELECT timeSeriesResampleToGridWithStaleness(10, 120, 10, 11)(timestamps, values) FROM ts_data WHERE length(timestamps) = length(values); +SELECT timeSeriesResampleToGridWithStalenessIf(10, 120, 10, 11)(timestamps, values, length(timestamps) = length(values)) FROM ts_data; +SELECT timeSeriesResampleToGridWithStalenessIf(10, 120, 10, 11)(timestamps, values, toNullable(length(timestamps) = length(values))) FROM ts_data; + +SELECT 'staleness = 60:'; +SELECT timeSeriesRateToGrid(10, 120, 10, 60)(timestamps, values) FROM ts_data WHERE length(timestamps) = length(values); +SELECT timeSeriesRateToGridIf(10, 120, 10, 60)(timestamps, values, toNullable(true)) FROM ts_data WHERE length(timestamps) = length(values); +SELECT timeSeriesRateToGridIf(10, 120, 10, 60)(timestamps, values, length(timestamps) = length(values)) FROM ts_data; + +SELECT timeSeriesDeltaToGrid(10, 120, 10, 60)(timestamps, values) FROM ts_data WHERE length(timestamps) = length(values); +SELECT timeSeriesDeltaToGridIf(10, 120, 10, 60)(timestamps, values, length(timestamps) = length(values)) FROM ts_data; +SELECT timeSeriesDeltaToGridIf(10, 120, 10, 60)(timestamps, values, toNullable(length(timestamps) = length(values))) FROM ts_data; + +SELECT timeSeriesInstantRateToGrid(10, 120, 10, 60)(timestamps, values) FROM ts_data WHERE length(timestamps) = length(values); +SELECT timeSeriesInstantRateToGridIf(10, 120, 10, 60)(timestamps, values, length(timestamps) = length(values)) FROM ts_data; +SELECT timeSeriesInstantRateToGridIf(10, 120, 10, 60)(timestamps, values, if(length(timestamps) = length(values), true, NULL)) FROM ts_data; + +SELECT timeSeriesInstantDeltaToGrid(10, 120, 10, 60)(timestamps, values) FROM ts_data WHERE length(timestamps) = length(values); +SELECT timeSeriesInstantDeltaToGridIf(10, 120, 10, 60)(timestamps, values, length(timestamps) = length(values)) FROM ts_data; +SELECT timeSeriesInstantDeltaToGridIf(10, 120, 10, 60)(timestamps, values, toNullable(length(timestamps) = length(values))) FROM ts_data; + +SELECT 'staleness = 61:'; +SELECT timeSeriesRateToGrid(10, 120, 10, 61)(timestamps, values) FROM ts_data WHERE length(timestamps) = length(values); +SELECT timeSeriesDeltaToGrid(10, 120, 10, 61)(timestamps, values) FROM ts_data WHERE length(timestamps) = length(values); +SELECT timeSeriesInstantRateToGrid(10, 120, 10, 61)(timestamps, values) FROM ts_data WHERE length(timestamps) = length(values); +SELECT timeSeriesInstantDeltaToGrid(10, 120, 10, 61)(timestamps, values) FROM ts_data WHERE length(timestamps) = length(values); + + +SELECT * FROM ts_data_nullable WHERE value IS NULL AND id < 5; + +SELECT timeSeriesResampleToGridWithStalenessIf(15, 125, 10, 10)(timestamps, values, length(timestamps) = length(values)) FROM ts_data; + +-- Test with Nullable arguments +SELECT timeSeriesResampleToGridWithStaleness(15, 125, 10, 10)(arrayResize(timestamps, arrayMin([length(timestamps), length(values)]) as min_len), arrayResize(values, min_len)) FROM ts_data; +SELECT timeSeriesResampleToGridWithStaleness(15, 125, 10, 10)(timestamp, value) FROM ts_data_nullable; +SELECT timeSeriesResampleToGridWithStalenessIf(15, 125, 10, 10)(timestamp, value, id < 5) FROM ts_data_nullable; + +SELECT timeSeriesResampleToGridWithStaleness(15, 125, 10, 10)([10, 20, 30]::Array(UInt32), [1.0, 2.0, NULL]); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT timeSeriesResampleToGridWithStaleness(15, 125, 10, 10)([10, NULL, 30]::Array(Nullable(UInt32)), [1.0, 2.0, 3.0]); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} + +-- End timestamp not aligned by step +SELECT timeSeriesResampleToGridWithStaleness(100, 110, 15, 10)([89, 101, 109]::Array(UInt32), [89, 101, 109]::Array(Float32)); +SELECT timeSeriesResampleToGridWithStaleness(100, 120, 15, 10)([89, 101, 109]::Array(UInt32), [89, 101, 109]::Array(Float32)); +SELECT timeSeriesRateToGrid(100, 140, 15, 40)([89, 101, 109]::Array(UInt32), [89, 101, 109]::Array(Float32)); +SELECT timeSeriesInstantRateToGrid(100, 140, 15, 40)([89, 101, 109]::Array(UInt32), [89, 101, 109]::Array(Float32)); +SELECT timeSeriesInstantDeltaToGrid(100, 150, 15, 20)([89, 101, 109]::Array(UInt32), [89, 101, 109]::Array(Float32)); + +-- Start timestamp equals to end timestamp +SELECT timeSeriesRateToGrid(120, 120, 0, 40)([89, 101, 109]::Array(UInt32), [89, 101, 109]::Array(Float32)); +SELECT timeSeriesInstantRateToGrid(120, 120, 0, 40)([89, 101, 109]::Array(UInt32), [89, 101, 109]::Array(Float32)); +SELECT timeSeriesInstantDeltaToGrid(120, 120, 0, 20)([89, 101, 109]::Array(UInt32), [89, 101, 109]::Array(Float32)); + +-- First bucket doesn't have a value. +SELECT timeSeriesResampleToGridWithStaleness(105, 210, 15, 30)([110, 120, 130, 140, 190, 200, 210, 220, 230]::Array(UInt32), [1, 1, 3, 4, 5, 5, 8, 12, 13]::Array(Float32)); +SELECT timeSeriesResampleToGridWithStaleness(105, 210, 15, 300)([110, 120, 130, 140, 190, 200, 210, 220, 230]::Array(UInt32), [1, 1, 3, 4, 5, 5, 8, 12, 13]::Array(Float32)); +SELECT timeSeriesResampleToGridWithStaleness(90, 210, 15, 300)([110, 120, 130, 140, 190, 200, 210, 220, 230]::Array(UInt32), [1, 1, 3, 4, 5, 5, 8, 12, 13]::Array(Float32)); + +SELECT timeSeriesResampleToGridWithStaleness(100, 150, 10, 30)(toDateTime(105), [1., 2., 3.]); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT timeSeriesRateToGrid(100, 150, 10, 30)(toDateTime(105), [1., 2., 3.]); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT timeSeriesInstantDeltaToGrid(100, 150, 10, 30)(toDateTime(105), [1., 2., 3.]); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT timeSeriesResampleToGridWithStaleness(100, 150, 10, 30)(toDateTime(105), arrayJoin([1., 2., 3.])); +SELECT timeSeriesInstantRateToGrid(100, 150, 10, 30)([1, 2, 3]::Array(UInt32), 1.); --{serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT timeSeriesDeltaToGrid(100, 150, 10, 30)([1, 2, 3]::Array(UInt32), 1.); --{serverError ILLEGAL_TYPE_OF_ARGUMENT} + +-- Try to use aggregation function state in combinators with start, end, step and window parameters that are different from original parameters +-- An error should be returned +SELECT timeSeriesResampleToGridWithStalenessMerge(toNullable(60), 100, 200, 20)( + initializeAggregation('timeSeriesResampleToGridWithStalenessState(100, 200, 20, 60)', (100 + number*10)::DateTime32, number::Float64) +) FROM numbers(5); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT timeSeriesInstantDeltaToGridMerge(toNullable(60), 100, 200, 20)( + initializeAggregation('timeSeriesInstantDeltaToGridState(100, 200, 20, 60)', (100 + number*10)::DateTime32, number::Float64) +) FROM numbers(5); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT timeSeriesResampleToGridWithStalenessMerge(60, 100, 200, 20)( + initializeAggregation('timeSeriesResampleToGridWithStalenessState(100, 200, 20, 60)', (100 + number*10)::DateTime32, number::Float64) +) FROM numbers(5); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} + +-- With matching parameters everything should work +SELECT timeSeriesResampleToGridWithStalenessMerge(100, 200, 20, 60)( + initializeAggregation('timeSeriesResampleToGridWithStalenessState(100, 200, 20, 60)', (100 + number*10)::DateTime32, number::Float64) +) FROM numbers(5); +SELECT timeSeriesInstantDeltaToGridMerge(100, 200, 20, 60)( + initializeAggregation('timeSeriesInstantDeltaToGridState(100, 200, 20, 60)', (100 + number*10)::DateTime32, number::Float64) +) FROM numbers(5); +SELECT timeSeriesResampleToGridWithStalenessMerge(100, 200, 20, 60)( + initializeAggregation('timeSeriesResampleToGridWithStalenessState(100, 200, 20, 60)', (100 + number*10)::DateTime32, number::Float64) +) FROM numbers(5); + +DROP TABLE ts_data; +DROP TABLE ts_data_nullable; diff --git a/parser/testdata/03254_timeseries_group_array/ast.json b/parser/testdata/03254_timeseries_group_array/ast.json new file mode 100644 index 000000000..46f81c883 --- /dev/null +++ b/parser/testdata/03254_timeseries_group_array/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery ts_raw_data (children 3)" + }, + { + "explain": " Identifier ts_raw_data" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration timestamp (children 1)" + }, + { + "explain": " DataType DateTime64 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Literal 'UTC'" + }, + { + "explain": " ColumnDeclaration value (children 1)" + }, + { + "explain": " DataType Float64" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Identifier timestamp" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001190223, + "rows_read": 15, + "bytes_read": 555 + } +} diff --git a/parser/testdata/03254_timeseries_group_array/metadata.json b/parser/testdata/03254_timeseries_group_array/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03254_timeseries_group_array/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03254_timeseries_group_array/query.sql b/parser/testdata/03254_timeseries_group_array/query.sql new file mode 100644 index 000000000..c905927b1 --- /dev/null +++ b/parser/testdata/03254_timeseries_group_array/query.sql @@ -0,0 +1,21 @@ +CREATE TABLE ts_raw_data(timestamp DateTime64(3,'UTC'), value Float64) ENGINE = MergeTree() ORDER BY timestamp; + +INSERT INTO ts_raw_data SELECT arrayJoin(*).1::DateTime64(3, 'UTC') AS timestamp, arrayJoin(*).2 AS value +FROM ( +select [ +(1734955421.020, 0), +(1734955436.020, 5), +(1734955451.020, 3), +(1734955451.020, 2), +(1734955435.020, 4), +(1734955436.020, 3), +(1734955511.020, 5) +]); + +SELECT * FROM ts_raw_data; + +SET allow_experimental_time_series_aggregate_functions = 1; + +SELECT 'groupArray: ', timeSeriesGroupArray(timestamp, value) FROM ts_raw_data; + +DROP TABLE ts_raw_data; diff --git a/parser/testdata/03254_timeseries_instant_value_aggregate_functions/ast.json b/parser/testdata/03254_timeseries_instant_value_aggregate_functions/ast.json new file mode 100644 index 000000000..1f12db051 --- /dev/null +++ b/parser/testdata/03254_timeseries_instant_value_aggregate_functions/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00101191, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03254_timeseries_instant_value_aggregate_functions/metadata.json b/parser/testdata/03254_timeseries_instant_value_aggregate_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03254_timeseries_instant_value_aggregate_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03254_timeseries_instant_value_aggregate_functions/query.sql b/parser/testdata/03254_timeseries_instant_value_aggregate_functions/query.sql new file mode 100644 index 000000000..0549f6d24 --- /dev/null +++ b/parser/testdata/03254_timeseries_instant_value_aggregate_functions/query.sql @@ -0,0 +1,132 @@ +SET allow_experimental_ts_to_grid_aggregate_function=1; + +SET cluster_for_parallel_replicas = 'test_shard_localhost'; + + +CREATE TABLE t_resampled_timeseries +( + step UInt32, -- Resampling step in seconds + metric_id UInt64, + grid_timestamp DateTime('UTC') CODEC(DoubleDelta, ZSTD), + samples Tuple(Array(DateTime('UTC')), Array(Float64)) -- Timeseries data resampled to the grid +) +ENGINE = AggregatingMergeTree() +ORDER BY (step, metric_id, grid_timestamp); + +INSERT INTO t_resampled_timeseries(step, metric_id, grid_timestamp, samples) VALUES +(10, 42, '2024-12-12 12:00:10', (['2024-12-12 12:00:09', '2024-12-12 12:00:07'], [100, 90])), +(10, 42, '2024-12-12 12:00:20', (['2024-12-12 12:00:19'], [110])), +(10, 42, '2024-12-12 12:00:30', (['2024-12-12 12:00:29', '2024-12-12 12:00:23'], [100, 100])), +(10, 42, '2024-12-12 12:00:40', (['2024-12-12 12:00:39', '2024-12-12 12:00:38'], [90, 100])); + +WITH + toDateTime('2024-12-12 12:00:10', 'UTC') AS start_ts, + toDateTime('2024-12-12 12:01:00', 'UTC') AS end_ts, + 10 AS step_sec, + 50 as window_sec, + range(toUnixTimestamp(start_ts), toUnixTimestamp(end_ts) + 1, step_sec) as grid +SELECT metric_id, arrayJoin(arrayZip(grid, irate_values, irate_values_scale_3, idelta_values, rate_values, rate_values_scale_5, delta_values)) +FROM ( + SELECT + metric_id, + timeSeriesInstantRateToGrid(start_ts, end_ts, step_sec, window_sec)(samples.1, samples.2) as irate_values, + timeSeriesInstantRateToGrid(start_ts, end_ts, step_sec, window_sec)(samples.1::Array(DateTime64(3, 'UTC')), samples.2) as irate_values_scale_3, + timeSeriesInstantDeltaToGrid(start_ts, end_ts, step_sec, window_sec)(samples.1, samples.2) as idelta_values, + timeSeriesRateToGrid(start_ts, end_ts, step_sec, window_sec)(samples.1, samples.2) as rate_values, + timeSeriesRateToGrid(start_ts, end_ts, step_sec, window_sec)(samples.1::Array(DateTime64(5, 'UTC')), samples.2) as rate_values_scale_5, + timeSeriesDeltaToGrid(start_ts, end_ts, step_sec, window_sec)(samples.1, samples.2) as delta_values + FROM clusterAllReplicas('test_shard_localhost', currentDatabase(), t_resampled_timeseries) + GROUP BY metric_id +) +ORDER BY metric_id +SETTINGS enable_parallel_replicas=1, max_parallel_replicas=3, parallel_replicas_for_non_replicated_merge_tree=1, enable_analyzer=1; + +-- Test with DateTime64 + +CREATE TABLE t_resampled_timeseries_64 +( + step UInt32, -- Resampling step in seconds + metric_id UInt64, + grid_timestamp DateTime('UTC') CODEC(DoubleDelta, ZSTD), + samples Tuple(Array(DateTime64(3, 'UTC')), Array(Float64)) -- Timeseries data resampled to the grid +) +ENGINE = AggregatingMergeTree() +ORDER BY (step, metric_id, grid_timestamp); + +INSERT INTO t_resampled_timeseries_64(step, metric_id, grid_timestamp, samples) VALUES +(10, 142, '2024-12-12 12:00:10', (['2024-12-12 12:00:09.100', '2024-12-12 12:00:08.600'], [100, 90])), +(10, 142, '2024-12-12 12:00:20', (['2024-12-12 12:00:19.100'], [110])), +(10, 142, '2024-12-12 12:00:30', (['2024-12-12 12:00:29.300', '2024-12-12 12:00:23.400'], [100, 100])), +(10, 142, '2024-12-12 12:00:40', (['2024-12-12 12:00:39.400', '2024-12-12 12:00:38.500'], [90, 100])); + +WITH + toDateTime64('2024-12-12 12:00:10', 3, 'UTC') AS start_ts, + toDateTime64('2024-12-12 12:01:00', 3, 'UTC') AS end_ts, + 10 AS step_sec, + 50 as window_sec, + range(toUnixTimestamp(start_ts), toUnixTimestamp(end_ts) + 1, step_sec) as grid +SELECT metric_id, arrayJoin(arrayZip(grid, irate_values, idelta_values, rate_values, delta_values)) +FROM ( + SELECT + metric_id, + timeSeriesInstantRateToGrid(start_ts, end_ts, step_sec, window_sec)(samples.1, samples.2) as irate_values, + timeSeriesInstantDeltaToGrid(start_ts, end_ts, step_sec, window_sec)(samples.1, samples.2) as idelta_values, + timeSeriesRateToGrid(start_ts, end_ts, step_sec, window_sec)(samples.1, samples.2) as rate_values, + timeSeriesDeltaToGrid(start_ts, end_ts, step_sec, window_sec)(samples.1, samples.2) as delta_values + FROM clusterAllReplicas('test_shard_localhost', currentDatabase(), t_resampled_timeseries_64) + GROUP BY metric_id +) +ORDER BY metric_id +SETTINGS enable_parallel_replicas=1, max_parallel_replicas=3, parallel_replicas_for_non_replicated_merge_tree=1, enable_analyzer=1; + +-- Another test with a reset +WITH [ +(1600000000, 10), +(1600000010, 20), +(1600000020, 30), +(1600000030, 40), +(1600000040, 10), +(1600000050, 70), +(1600000060, 90), +(1600000270, 20), +(1600000330, 10) +]::Array(Tuple(UInt32, Float64)) AS data +SELECT * FROM ( + SELECT 'delta' as name, timeSeriesDeltaToGrid(1600000010, 1600000320, 10, 300)(data.1, data.2) + UNION ALL + SELECT 'idelta' as name, timeSeriesInstantDeltaToGrid(1600000010, 1600000320, 10, 300)(data.1, data.2) +) ORDER BY name; + + +-- Tests to validate block header compatibility in queries with parallel replicas +SET serialize_query_plan=1, prefer_localhost_replica = false; + +SELECT + metric_id, + toTypeName(timeSeriesInstantRateToGridState('2024-12-12 12:00:10'::DateTime64(3,'UTC'), '2024-12-12 12:01:00'::DateTime64(3,'UTC'), 10, 60)(samples.1, samples.2)) as c1, + toTypeName(timeSeriesInstantRateToGridState('2024-12-12 12:00:10'::DateTime64(4,'UTC'), '2024-12-12 12:01:00'::DateTime64(3,'UTC'), 10, 60)(samples.1, samples.2)) as c2, + toTypeName(timeSeriesInstantRateToGridState('2024-12-12 12:00:10'::DateTime64(3,'UTC'), '2024-12-12 12:01:00'::DateTime64(2,'UTC'), 10, 60)(samples.1, samples.2)) as c3, + toTypeName(timeSeriesInstantRateToGridState('2024-12-12 12:00:10.123'::DateTime64(4,'UTC'), '2024-12-12 12:01:01'::DateTime64(3,'UTC'), 10, 60)(samples.1, samples.2)) as c4, + toTypeName(timeSeriesInstantRateToGridState('2024-12-12 12:00:10.123456'::DateTime64(4,'UTC'), '2024-12-12 12:01:00.123'::DateTime64(3,'UTC'), 10, 60)(samples.1, samples.2)) as c5, + toTypeName(timeSeriesInstantRateToGridState('2024-12-12 12:00:10.12'::DateTime64(1,'UTC'), '2024-12-12 12:01:00.123'::DateTime64(3,'UTC'), 10, 60)(samples.1, samples.2)) as c6, + toTypeName(timeSeriesInstantRateToGridState('2024-12-12 12:00:10.123'::DateTime64(0,'UTC'), '2024-12-12 12:01:00'::DateTime64(3,'UTC'), 10, 60)(samples.1, samples.2)) as c7, + toTypeName(timeSeriesInstantRateToGridState('2024-12-12 12:00:10'::DateTime('UTC'), '2024-12-12 12:01:00'::DateTime64(3,'UTC'), 10, 60)(samples.1, samples.2)) as c8, + toTypeName(timeSeriesInstantRateToGridState('2024-12-12 12:00:10.123'::DateTime64(2,'UTC'), '2024-12-12 12:01:01'::DateTime('UTC'), 10, 60)(samples.1, samples.2)) as c9 +FROM clusterAllReplicas('test_shard_localhost', currentDatabase(), t_resampled_timeseries_64) +GROUP BY metric_id +FORMAT Vertical; + +SELECT + metric_id, + timeSeriesInstantRateToGrid('2024-12-12 12:00:10'::DateTime64(3,'UTC'), '2024-12-12 12:01:00'::DateTime64(3,'UTC'), 10, 60)(samples.1, samples.2) as c1, + timeSeriesInstantRateToGrid('2024-12-12 12:00:10'::DateTime64(4,'UTC'), '2024-12-12 12:01:00'::DateTime64(3,'UTC'), 10, 60)(samples.1, samples.2) as c2, + timeSeriesInstantRateToGrid('2024-12-12 12:00:10'::DateTime64(3,'UTC'), '2024-12-12 12:01:00'::DateTime64(2,'UTC'), 10, 60)(samples.1, samples.2) as c3, + timeSeriesInstantRateToGrid('2024-12-12 12:00:10.123'::DateTime64(4,'UTC'), '2024-12-12 12:01:01'::DateTime64(3,'UTC'), 10, 60)(samples.1, samples.2) as c4, + timeSeriesInstantRateToGrid('2024-12-12 12:00:10.123456'::DateTime64(4,'UTC'), '2024-12-12 12:01:00.123'::DateTime64(3,'UTC'), 10, 60)(samples.1, samples.2) as c5, + timeSeriesInstantRateToGrid('2024-12-12 12:00:10.12'::DateTime64(1,'UTC'), '2024-12-12 12:01:00.123'::DateTime64(3,'UTC'), 10, 60)(samples.1, samples.2) as c6, + timeSeriesInstantRateToGrid('2024-12-12 12:00:10.123'::DateTime64(0,'UTC'), '2024-12-12 12:01:00'::DateTime64(3,'UTC'), 10, 60)(samples.1, samples.2) as c7, + timeSeriesInstantRateToGrid('2024-12-12 12:00:10'::DateTime('UTC'), '2024-12-12 12:01:00'::DateTime64(3,'UTC'), 10, 60)(samples.1, samples.2) as c8, + timeSeriesInstantRateToGrid('2024-12-12 12:00:10.123'::DateTime64(2,'UTC'), '2024-12-12 12:01:01'::DateTime('UTC'), 10, 60)(samples.1, samples.2) as c9 +FROM clusterAllReplicas('test_shard_localhost', currentDatabase(), t_resampled_timeseries_64) +GROUP BY metric_id +FORMAT Vertical; diff --git a/parser/testdata/03254_timeseries_range/ast.json b/parser/testdata/03254_timeseries_range/ast.json new file mode 100644 index 000000000..9991fc356 --- /dev/null +++ b/parser/testdata/03254_timeseries_range/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function timeSeriesRange (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '2025-06-01 00:00:00'" + }, + { + "explain": " Literal 'DateTime64(3)'" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '2025-06-01 00:01:30.000'" + }, + { + "explain": " Literal 'DateTime64(3)'" + }, + { + "explain": " Literal UInt64_30" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001409754, + "rows_read": 15, + "bytes_read": 605 + } +} diff --git a/parser/testdata/03254_timeseries_range/metadata.json b/parser/testdata/03254_timeseries_range/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03254_timeseries_range/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03254_timeseries_range/query.sql b/parser/testdata/03254_timeseries_range/query.sql new file mode 100644 index 000000000..ef893645e --- /dev/null +++ b/parser/testdata/03254_timeseries_range/query.sql @@ -0,0 +1,27 @@ +SELECT timeSeriesRange('2025-06-01 00:00:00'::DateTime64(3), '2025-06-01 00:01:30.000'::DateTime64(3), 30); +SELECT timeSeriesRange('2025-06-01 00:00:00'::DateTime64(3), '2025-06-01 00:00:01.000'::DateTime64(3), '0.10'::Decimal64(3)); +SELECT timeSeriesRange('2025-06-01 00:00:00'::DateTime64(3), '2025-06-01 00:00:30.000'::DateTime64(3), 30); +SELECT timeSeriesRange('2025-06-01 00:00:00'::DateTime64(3), '2025-06-01 00:00:00.000'::DateTime64(3), 30); +SELECT timeSeriesRange('2025-06-01 00:00:00'::DateTime64(3), '2025-06-01 00:00:00.000'::DateTime64(3), 0); + +-- Different scales +SELECT timeSeriesRange('2025-06-01 00:00:00.0'::DateTime64(1), '2025-06-01 00:00:01.00'::DateTime64(2), '0.123'::Decimal64(3)); + +-- Wrong range: end_timestamp < start_timestamp +SELECT timeSeriesRange('2025-06-01 00:01:00'::DateTime64(3), '2025-06-01 00:00:00.000'::DateTime64(3), 30); -- {serverError BAD_ARGUMENTS} +SELECT timeSeriesRange('2025-06-01 00:01:00'::DateTime64(3), '2025-06-01 00:00:00.000'::DateTime64(3), -30); -- {serverError BAD_ARGUMENTS} + +-- Wrong step +SELECT timeSeriesRange('2025-06-01 00:00:00'::DateTime64(3), '2025-06-01 00:00:50.000'::DateTime64(3), 0); -- {serverError BAD_ARGUMENTS} +SELECT timeSeriesRange('2025-06-01 00:00:00'::DateTime64(3), '2025-06-01 00:00:50.000'::DateTime64(3), -10); -- {serverError BAD_ARGUMENTS} + +-- timeSeriesFromGrid without NULLs +SELECT timeSeriesFromGrid('2025-06-01 00:00:00'::DateTime64(3), '2025-06-01 00:01:30.000'::DateTime64(3), 30, [100, 200, 300, 400]); + +-- timeSeriesFromGrid with NULLs +SELECT timeSeriesFromGrid('2025-06-01 00:00:00'::DateTime64(3), '2025-06-01 00:01:30.000'::DateTime64(3), 30, [100, 200, NULL, 400]); +SELECT timeSeriesFromGrid('2025-06-01 00:00:00'::DateTime64(3), '2025-06-01 00:03:00.000'::DateTime64(3), 30, [100, NULL, 300, NULL, NULL, 600, NULL]); + +-- Wrong number of values +SELECT timeSeriesFromGrid('2025-06-01 00:00:00'::DateTime64(3), '2025-06-01 00:01:30.000'::DateTime64(3), 30, [10, 20, 30]); -- {serverError BAD_ARGUMENTS} +SELECT timeSeriesFromGrid('2025-06-01 00:00:00'::DateTime64(3), '2025-06-01 00:01:30.000'::DateTime64(3), 30, [10, 20, 30, 40, 50]); -- {serverError BAD_ARGUMENTS} diff --git a/parser/testdata/03254_timeseries_to_grid_aggregate_function/ast.json b/parser/testdata/03254_timeseries_to_grid_aggregate_function/ast.json new file mode 100644 index 000000000..25a8d349b --- /dev/null +++ b/parser/testdata/03254_timeseries_to_grid_aggregate_function/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery ts_data (children 3)" + }, + { + "explain": " Identifier ts_data" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration timestamp (children 1)" + }, + { + "explain": " DataType DateTime (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'UTC'" + }, + { + "explain": " ColumnDeclaration value (children 1)" + }, + { + "explain": " DataType Float64" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001144153, + "rows_read": 15, + "bytes_read": 547 + } +} diff --git a/parser/testdata/03254_timeseries_to_grid_aggregate_function/metadata.json b/parser/testdata/03254_timeseries_to_grid_aggregate_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03254_timeseries_to_grid_aggregate_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03254_timeseries_to_grid_aggregate_function/query.sql b/parser/testdata/03254_timeseries_to_grid_aggregate_function/query.sql new file mode 100644 index 000000000..0dd3ce1bb --- /dev/null +++ b/parser/testdata/03254_timeseries_to_grid_aggregate_function/query.sql @@ -0,0 +1,135 @@ +CREATE TABLE ts_data (timestamp DateTime('UTC'), value Float64) ENGINE=MergeTree() ORDER BY tuple(); + +WITH [11, 57, 71, 88, 89, 101, 127, 135, 151] as timestamps +INSERT INTO ts_data SELECT ts::DateTime64 as timestamp, ts + 10000 as value FROM (SELECT arrayJoin(timestamps) as ts); + +WITH [102, 104, 112, 113, 120] as timestamps +INSERT INTO ts_data SELECT ts::DateTime64 as timestamp, ts + 10000 as value FROM (SELECT arrayJoin(timestamps) as ts); + +SET allow_experimental_ts_to_grid_aggregate_function = 1; + +SELECT 'Original data (ts, val):'; +SELECT groupArraySorted(30)((toUnixTimestamp(timestamp), value)) FROM ts_data; + +SELECT 'timeSeriesResampleToGridWithStaleness(100, 200, 10, 15):'; + +WITH + 100 as begin, + 200 as end, + 10 as step_sec, + 15 as staleness_sec, + CAST(begin as DateTime('UTC')) as begin_ts, + CAST(end as DateTime('UTC')) as end_ts, + range(begin, end+step_sec, step_sec) as grid +SELECT + arrayZip( + grid, + timeSeriesResampleToGridWithStaleness(begin, end, step_sec, staleness_sec)(timestamp, value) + ) as a +FROM ts_data; + +WITH + 100 as begin, + 200 as end, + 10 as step_sec, + 15 as staleness_sec, + CAST(begin as DateTime('UTC')) as begin_ts, + CAST(end as DateTime('UTC')) as end_ts, + range(begin, end+step_sec, step_sec) as grid +SELECT + arrayZip( + grid, + timeSeriesResampleToGridWithStaleness(begin_ts, end_ts, step_sec, staleness_sec)(timestamp, value) + ) as b +FROM ts_data; + +WITH + 100 as begin, + 200 as end, + 10 as step_sec, + 15 as staleness_sec, + CAST(begin as DateTime('UTC')) as begin_ts, + CAST(end as DateTime('UTC')) as end_ts, + range(begin, end+step_sec, step_sec) as grid +SELECT + arrayZip( + grid, + timeSeriesResampleToGridWithStaleness(begin_ts, end_ts, step_sec, staleness_sec)(timestamp::DateTime64(3, 'UTC'), value::Float32) + ) as c +FROM ts_data; + +WITH + 100 as begin, + 200 as end, + 10 as step_sec, + 15 as staleness_sec, + CAST(begin as DateTime('UTC')) as begin_ts, + CAST(end as DateTime('UTC')) as end_ts, + range(begin, end+step_sec, step_sec) as grid +SELECT + arrayZip( + grid, + timeSeriesResampleToGridWithStaleness(begin_ts::DateTime64(2, 'UTC'), end_ts::DateTime64(1, 'UTC'), step_sec::Decimal(6,2), staleness_sec::Decimal(18,3))(timestamp::DateTime64(3, 'UTC'), value::Float32) + ) as d +FROM ts_data; + +WITH + 100 as begin, + 200 as end, + 10 as step_sec, + 15 as staleness_sec, + CAST(begin as DateTime('UTC')) as begin_ts, + CAST(end as DateTime('UTC')) as end_ts, + range(begin, end+step_sec, step_sec) as grid +SELECT + arrayZip( + grid, + timeSeriesResampleToGridWithStaleness(begin_ts, end_ts::DateTime64(3, 'UTC'), step_sec::Decimal(6,2), staleness_sec)(timestamp::DateTime64(6, 'UTC'), value) + ) as e +FROM ts_data; + +-- AggregatingMergeTree Table to test (de)serialization of timeSeriesResampleToGridWithStaleness state +CREATE TABLE ts_data_agg(k UInt64, agg AggregateFunction(timeSeriesResampleToGridWithStaleness(100, 200, 10, 15), DateTime('UTC'), Float64)) ENGINE AggregatingMergeTree() ORDER BY k; + +-- Insert the data splitting it into several pieces +INSERT INTO ts_data_agg SELECT toUnixTimestamp(timestamp)%3, initializeAggregation('timeSeriesResampleToGridWithStalenessState(100, 200, 10, 15)', timestamp, value) FROM ts_data; + +SELECT k, finalizeAggregation(agg) FROM ts_data_agg FINAL ORDER BY k; + +-- Check that -Merge returns the same result as the result form original table +SELECT timeSeriesResampleToGridWithStaleness(100, 200, 10, 15)(timestamp, value) FROM ts_data; +SELECT timeSeriesResampleToGridWithStalenessMerge(100, 200, 10, 15)(agg) FROM ts_data_agg; + +-- Check various data types for parameters and arguments +SELECT timeSeriesResampleToGridWithStaleness(100, 150, 15, 50)(timestamp, value) AS res FROM ts_data; +SELECT timeSeriesResampleToGridWithStaleness(100, 150, 15, 50)(timestamp::DateTime64(2,'UTC'), value) AS res FROM ts_data; +SELECT timeSeriesResampleToGridWithStaleness(100::Int32, 150::UInt16, 15::Decimal(10,2), 50)(timestamp::DateTime64(3, 'UTC'), value::Float32) AS res FROM ts_data; +SELECT timeSeriesResampleToGridWithStaleness(100, 100, 15, 50)(timestamp::DateTime64(3, 'UTC'), value::Float32) AS res FROM ts_data; +SELECT timeSeriesResampleToGridWithStalenessIf(100, 150, 15, 50)(timestamp, value, value%2==0) AS res FROM ts_data; + +-- Subsecond step and window parameters +select timeSeriesResampleToGridWithStaleness( + '2025-06-01 12:00:00.300'::DateTime64(3, 'UTC'), + '2025-06-01 12:00:00.900'::DateTime64(3, 'UTC'), + '0.300'::Decimal64(3), + '0.500'::Decimal64(3)) + (['2025-06-01 12:00:00.011', '2025-06-01 12:00:00.768']::Array(DateTime64(3, 'UTC')), + [10, 20]::Array(Float64)); + +SELECT timeSeriesResampleToGridWithStaleness(100, 150, 15, 50)(timestamp, value::Decimal(10,3)) AS res FROM ts_data; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT timeSeriesResampleToGridWithStaleness(100, 150, 15, 50)(timestamp, value::Int64) AS res FROM ts_data; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT timeSeriesResampleToGridWithStaleness(100, 150, 15, 50)(timestamp, value::String) AS res FROM ts_data; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT timeSeriesResampleToGridWithStaleness(100, 150, 15, 50)(timestamp, value::DateTime) AS res FROM ts_data; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT timeSeriesResampleToGridWithStaleness(100::Float64, 150, 15, 50)(timestamp, value) AS res FROM ts_data; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT timeSeriesResampleToGridWithStaleness(100, 150::Float32, 15, 50)(timestamp, value) AS res FROM ts_data; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT timeSeriesResampleToGridWithStaleness(100, 150, 15::Float32, 50)(timestamp, value) AS res FROM ts_data; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT timeSeriesResampleToGridWithStaleness(100, 150, 15, 50::Float64)(timestamp, value) AS res FROM ts_data; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT timeSeriesResampleToGridWithStaleness(-100, 150, 15, 50)(timestamp, value) AS res FROM ts_data; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT timeSeriesResampleToGridWithStaleness(100, -150, 15, 50)(timestamp, value) AS res FROM ts_data; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT timeSeriesResampleToGridWithStaleness(100, 150, -15, 50)(timestamp, value) AS res FROM ts_data; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT timeSeriesResampleToGridWithStaleness(100, 150, 15, -50)(timestamp, value) AS res FROM ts_data; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT timeSeriesResampleToGridWithStaleness(200, 100, 15, 50)(timestamp, value) AS res FROM ts_data; -- { serverError BAD_ARGUMENTS } +SELECT timeSeriesResampleToGridWithStaleness(100, 150, 0, 50)(timestamp, value) AS res FROM ts_data; -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/03254_timeseries_to_grid_aggregate_function_sparse/ast.json b/parser/testdata/03254_timeseries_to_grid_aggregate_function_sparse/ast.json new file mode 100644 index 000000000..bc8e3c5d8 --- /dev/null +++ b/parser/testdata/03254_timeseries_to_grid_aggregate_function_sparse/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery ts_data (children 3)" + }, + { + "explain": " Identifier ts_data" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration timestamp (children 1)" + }, + { + "explain": " DataType DateTime (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'UTC'" + }, + { + "explain": " ColumnDeclaration value (children 1)" + }, + { + "explain": " DataType Float64" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001069337, + "rows_read": 15, + "bytes_read": 547 + } +} diff --git a/parser/testdata/03254_timeseries_to_grid_aggregate_function_sparse/metadata.json b/parser/testdata/03254_timeseries_to_grid_aggregate_function_sparse/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03254_timeseries_to_grid_aggregate_function_sparse/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03254_timeseries_to_grid_aggregate_function_sparse/query.sql b/parser/testdata/03254_timeseries_to_grid_aggregate_function_sparse/query.sql new file mode 100644 index 000000000..8120e26c7 --- /dev/null +++ b/parser/testdata/03254_timeseries_to_grid_aggregate_function_sparse/query.sql @@ -0,0 +1,159 @@ +CREATE TABLE ts_data (timestamp DateTime('UTC'), value Float64) ENGINE=MergeTree() ORDER BY tuple(); + +WITH [11, 57, 71, 88, 89, 101, 127, 135, 151] as timestamps +INSERT INTO ts_data SELECT ts::DateTime64 as timestamp, ts + 10000 as value FROM (SELECT arrayJoin(timestamps) as ts); + +WITH [102, 104, 112, 113, 120] as timestamps +INSERT INTO ts_data SELECT ts::DateTime64 as timestamp, ts + 10000 as value FROM (SELECT arrayJoin(timestamps) as ts); + +SET allow_experimental_ts_to_grid_aggregate_function = 1; + +SELECT 'Original data (ts, val):'; +SELECT groupArraySorted(30)((toUnixTimestamp(timestamp), value)) FROM ts_data; + +SELECT 'timeSeriesResampleToGridWithStaleness(100, 200, 10, 15):'; + +WITH + 100 as begin, + 200 as end, + 10 as step_sec, + 15 as staleness_sec, + CAST(begin as DateTime('UTC')) as begin_ts, + CAST(end as DateTime('UTC')) as end_ts, + range(begin, end+step_sec, step_sec) as grid +SELECT + arrayZip( + grid, + timeSeriesResampleToGridWithStaleness(begin, end, step_sec, staleness_sec)(timestamp, value) + ) as a +FROM ts_data; + +WITH + 100 as begin, + 200 as end, + 10 as step_sec, + 15 as staleness_sec, + CAST(begin as DateTime('UTC')) as begin_ts, + CAST(end as DateTime('UTC')) as end_ts, + range(begin, end+step_sec, step_sec) as grid +SELECT + arrayZip( + grid, + timeSeriesResampleToGridWithStaleness(begin_ts, end_ts, step_sec, staleness_sec)(timestamp, value) + ) as b +FROM ts_data; + +WITH + 100 as begin, + 200 as end, + 10 as step_sec, + 15 as staleness_sec, + CAST(begin as DateTime('UTC')) as begin_ts, + CAST(end as DateTime('UTC')) as end_ts, + range(begin, end+step_sec, step_sec) as grid +SELECT + arrayZip( + grid, + timeSeriesResampleToGridWithStaleness(begin_ts, end_ts, step_sec, staleness_sec)(timestamp::DateTime64(3, 'UTC'), value::Float32) + ) as c +FROM ts_data; + +WITH + 100 as begin, + 200 as end, + 10 as step_sec, + 15 as staleness_sec, + CAST(begin as DateTime('UTC')) as begin_ts, + CAST(end as DateTime('UTC')) as end_ts, + range(begin, end+step_sec, step_sec) as grid +SELECT + arrayZip( + grid, + timeSeriesResampleToGridWithStaleness(begin_ts::DateTime64(2, 'UTC'), end_ts::DateTime64(1, 'UTC'), step_sec::Decimal(6,2), staleness_sec::Decimal(18,3))(timestamp::DateTime64(3, 'UTC'), value::Float32) + ) as d +FROM ts_data; + +WITH + 100 as begin, + 200 as end, + 10 as step_sec, + 15 as staleness_sec, + CAST(begin as DateTime('UTC')) as begin_ts, + CAST(end as DateTime('UTC')) as end_ts, + range(begin, end+step_sec, step_sec) as grid +SELECT + arrayZip( + grid, + timeSeriesResampleToGridWithStaleness(begin_ts, end_ts::DateTime64(3, 'UTC'), step_sec::Decimal(6,2), staleness_sec)(timestamp::DateTime64(6, 'UTC'), value) + ) as e +FROM ts_data; + +WITH + 100 as begin, + 200 as end, + 10 as step_sec, + 15 as staleness_sec, + CAST(begin as DateTime('UTC')) as begin_ts, + CAST(end as DateTime('UTC')) as end_ts, + range(begin, end+step_sec, step_sec) as grid +SELECT + arrayZip( + grid, + timeSeriesResampleToGridWithStaleness(begin_ts, end_ts, step_sec, staleness_sec)(timestamp::DateTime64(6, 'UTC'), value) + ) as e +FROM clusterAllReplicas('test_shard_localhost', currentDatabase(), ts_data) SETTINGS enable_parallel_replicas=1, max_parallel_replicas=3, parallel_replicas_for_non_replicated_merge_tree=1, prefer_localhost_replica = 0; + +-- Test for returning multiple rows in batch +SELECT intDiv(toUnixTimestamp(timestamp), 130)*130 as fake_key, timeSeriesResampleToGridWithStaleness(100, 200, 10, 15)(timestamp, value) FROM ts_data GROUP BY fake_key ORDER BY fake_key; + +-- Compute average in each bucket +SELECT avgForEach(values), countForEach(values) AS avg_values +FROM ( + SELECT intDiv(toUnixTimestamp(timestamp), 130)*130 as fake_key, timeSeriesResampleToGridWithStaleness(100, 200, 10, 15)(timestamp, value) AS values FROM ts_data GROUP BY fake_key +); + +-- AggregatingMergeTree Table to test (de)serialization of timeSeriesResampleToGridWithStaleness state +CREATE TABLE ts_data_agg(k UInt64, agg AggregateFunction(timeSeriesResampleToGridWithStaleness(100, 200, 10, 15), DateTime('UTC'), Float64)) ENGINE AggregatingMergeTree() ORDER BY k; + +-- Insert the data splitting it into several pieces +INSERT INTO ts_data_agg SELECT toUnixTimestamp(timestamp)%3, initializeAggregation('timeSeriesResampleToGridWithStalenessState(100, 200, 10, 15)', timestamp, value) FROM ts_data; + +SELECT k, finalizeAggregation(agg) FROM ts_data_agg FINAL ORDER BY k; + +-- Reload table and check that the data is the same (i.e. serialize-deserialize worked correctly) +DETACH TABLE ts_data_agg; +ATTACH TABLE ts_data_agg; +SELECT k, finalizeAggregation(agg) FROM ts_data_agg FINAL ORDER BY k; + +-- Check that -Merge returns the same result as the result form original table +SELECT timeSeriesResampleToGridWithStaleness(100, 200, 10, 15)(timestamp, value) FROM ts_data; +SELECT timeSeriesResampleToGridWithStalenessMerge(100, 200, 10, 15)(agg) FROM ts_data_agg; + +-- Check various data types for parameters and arguments +SELECT timeSeriesResampleToGridWithStaleness(100, 150, 15, 50)(timestamp, value) AS res FROM ts_data; +SELECT timeSeriesResampleToGridWithStaleness(100, 150, 15, 50)(timestamp::DateTime64(2,'UTC'), value) AS res FROM ts_data; +SELECT timeSeriesResampleToGridWithStaleness(100::Int32, 150::UInt16, 15::Decimal(10,2), 50)(timestamp::DateTime64(3, 'UTC'), value::Float32) AS res FROM ts_data; +SELECT timeSeriesResampleToGridWithStaleness(100, 100, 15, 50)(timestamp::DateTime64(3, 'UTC'), value::Float32) AS res FROM ts_data; +SELECT timeSeriesResampleToGridWithStalenessIf(100, 150, 15, 50)(timestamp, value, value%2==0) AS res FROM ts_data; + +-- Test with Nullable timestamps and values +SELECT timeSeriesResampleToGridWithStaleness(100, 150, 15, 50)(if (value < 10120, Null, timestamp), value::Float32) AS res FROM ts_data; +SELECT timeSeriesResampleToGridWithStaleness(100, 150, 15, 50)(timestamp, if (value < 10120, Null, value)) AS res FROM ts_data; + +SELECT timeSeriesResampleToGridWithStaleness(100, 150, 15, 50)(timestamp, value::Decimal(10,3)) AS res FROM ts_data; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT timeSeriesResampleToGridWithStaleness(100, 150, 15, 50)(timestamp, value::Int64) AS res FROM ts_data; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT timeSeriesResampleToGridWithStaleness(100, 150, 15, 50)(timestamp, value::String) AS res FROM ts_data; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT timeSeriesResampleToGridWithStaleness(100, 150, 15, 50)(timestamp, value::DateTime) AS res FROM ts_data; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT timeSeriesResampleToGridWithStaleness(100::Float64, 150, 15, 50)(timestamp, value) AS res FROM ts_data; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT timeSeriesResampleToGridWithStaleness(100, 150::Float32, 15, 50)(timestamp, value) AS res FROM ts_data; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT timeSeriesResampleToGridWithStaleness(100, 150, 15::Float32, 50)(timestamp, value) AS res FROM ts_data; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT timeSeriesResampleToGridWithStaleness(100, 150, 15, 50::Float64)(timestamp, value) AS res FROM ts_data; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT timeSeriesResampleToGridWithStaleness(-100, 150, 15, 50)(timestamp, value) AS res FROM ts_data; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT timeSeriesResampleToGridWithStaleness(100, -150, 15, 50)(timestamp, value) AS res FROM ts_data; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT timeSeriesResampleToGridWithStaleness(100, 150, -15, 50)(timestamp, value) AS res FROM ts_data; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT timeSeriesResampleToGridWithStaleness(100, 150, 15, -50)(timestamp, value) AS res FROM ts_data; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT timeSeriesResampleToGridWithStaleness(200, 100, 15, 50)(timestamp, value) AS res FROM ts_data; -- { serverError BAD_ARGUMENTS } +SELECT timeSeriesResampleToGridWithStaleness(100, 150, 0, 50)(timestamp, value) AS res FROM ts_data; -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/03254_uniq_exact_two_level_negative_zero/ast.json b/parser/testdata/03254_uniq_exact_two_level_negative_zero/ast.json new file mode 100644 index 000000000..004e321da --- /dev/null +++ b/parser/testdata/03254_uniq_exact_two_level_negative_zero/ast.json @@ -0,0 +1,145 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function if (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_1000" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Function if (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function rand (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal Float64_0" + }, + { + "explain": " Literal Float64_-0" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal 'Float64'" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function length (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function uniqExactState (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal 'String'" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1000000" + } + ], + + "rows": 41, + + "statistics": + { + "elapsed": 0.001116664, + "rows_read": 41, + "bytes_read": 1621 + } +} diff --git a/parser/testdata/03254_uniq_exact_two_level_negative_zero/metadata.json b/parser/testdata/03254_uniq_exact_two_level_negative_zero/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03254_uniq_exact_two_level_negative_zero/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03254_uniq_exact_two_level_negative_zero/query.sql b/parser/testdata/03254_uniq_exact_two_level_negative_zero/query.sql new file mode 100644 index 000000000..3237818d2 --- /dev/null +++ b/parser/testdata/03254_uniq_exact_two_level_negative_zero/query.sql @@ -0,0 +1 @@ +WITH number % 1000 = 0 ? (rand() % 2 ? 0.0 : -0.0) : number::Float64 AS x SELECT length(uniqExactState(x)::String) FROM numbers(1000000); diff --git a/parser/testdata/03255_fix_sbstrings_logical_error/ast.json b/parser/testdata/03255_fix_sbstrings_logical_error/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03255_fix_sbstrings_logical_error/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03255_fix_sbstrings_logical_error/metadata.json b/parser/testdata/03255_fix_sbstrings_logical_error/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03255_fix_sbstrings_logical_error/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03255_fix_sbstrings_logical_error/query.sql b/parser/testdata/03255_fix_sbstrings_logical_error/query.sql new file mode 100644 index 000000000..fd4775ddd --- /dev/null +++ b/parser/testdata/03255_fix_sbstrings_logical_error/query.sql @@ -0,0 +1,9 @@ +SELECT + concat(concat(11), + 5, + countSubstringsCaseInsensitive( + concat(countSubstringsCaseInsensitive( + concat(11, toString(number), materialize('aaa111'), 6, materialize(6)), char(number)), + 'aaa111'), + char(countSubstringsCaseInsensitive(concat(' test'), char(toLowCardinality(6))))), + 'aaa111', 6) FROM numbers(1); diff --git a/parser/testdata/03255_merge_mutation_start_entry_in_the_part_log/ast.json b/parser/testdata/03255_merge_mutation_start_entry_in_the_part_log/ast.json new file mode 100644 index 000000000..74bfb42c9 --- /dev/null +++ b/parser/testdata/03255_merge_mutation_start_entry_in_the_part_log/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001316533, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/03255_merge_mutation_start_entry_in_the_part_log/metadata.json b/parser/testdata/03255_merge_mutation_start_entry_in_the_part_log/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03255_merge_mutation_start_entry_in_the_part_log/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03255_merge_mutation_start_entry_in_the_part_log/query.sql b/parser/testdata/03255_merge_mutation_start_entry_in_the_part_log/query.sql new file mode 100644 index 000000000..e8c4756bb --- /dev/null +++ b/parser/testdata/03255_merge_mutation_start_entry_in_the_part_log/query.sql @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS test; +CREATE TABLE test (x UInt8, y UInt8, z String DEFAULT toString(x)) PARTITION BY x ORDER BY x; +INSERT INTO test (x, y) VALUES (1, 1); +INSERT INTO test (x, y) VALUES (1, 2); +OPTIMIZE TABLE test FINAL; +INSERT INTO test (x, y) VALUES (2, 1); +ALTER TABLE test DROP PARTITION 2; +SET mutations_sync = 1; +ALTER TABLE test UPDATE z = x || y WHERE 1; +SELECT * FROM test ORDER BY ALL; +TRUNCATE TABLE test; +DROP TABLE test SYNC; +SYSTEM FLUSH LOGS part_log; + +SELECT event_type, merge_reason, table, part_name, partition_id, partition, rows, merged_from +FROM system.part_log WHERE database = currentDatabase() AND event_type IN ('MergePartsStart', 'MergeParts', 'MutatePartStart', 'MutatePart') +ORDER BY event_time_microseconds FORMAT Vertical; diff --git a/parser/testdata/03256_invalid_mutation_query/ast.json b/parser/testdata/03256_invalid_mutation_query/ast.json new file mode 100644 index 000000000..e1ada0326 --- /dev/null +++ b/parser/testdata/03256_invalid_mutation_query/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001083154, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/03256_invalid_mutation_query/metadata.json b/parser/testdata/03256_invalid_mutation_query/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03256_invalid_mutation_query/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03256_invalid_mutation_query/query.sql b/parser/testdata/03256_invalid_mutation_query/query.sql new file mode 100644 index 000000000..9b4e8f9a7 --- /dev/null +++ b/parser/testdata/03256_invalid_mutation_query/query.sql @@ -0,0 +1,21 @@ +DROP TABLE IF EXISTS t; +DROP TABLE IF EXISTS t2; + +CREATE TABLE t (x int) ENGINE = MergeTree() ORDER BY (); + +DELETE FROM t WHERE y in (SELECT x FROM t); -- { serverError UNKNOWN_IDENTIFIER } +DELETE FROM t WHERE x in (SELECT y FROM t); -- { serverError UNKNOWN_IDENTIFIER } +DELETE FROM t WHERE x IN (SELECT * FROM t2); -- { serverError UNKNOWN_TABLE } +ALTER TABLE t DELETE WHERE x in (SELECT y FROM t); -- { serverError UNKNOWN_IDENTIFIER } +ALTER TABLE t UPDATE x = 1 WHERE x IN (SELECT y FROM t); -- { serverError UNKNOWN_IDENTIFIER } + +DELETE FROM t WHERE x IN (SELECT foo FROM bar) SETTINGS validate_mutation_query = 0; + +ALTER TABLE t ADD COLUMN y int; +DELETE FROM t WHERE y in (SELECT y FROM t); + +CREATE TABLE t2 (x int) ENGINE = MergeTree() ORDER BY (); +DELETE FROM t WHERE x IN (SELECT * FROM t2); + +DROP TABLE t; +DROP TABLE t2; diff --git a/parser/testdata/03257_json_escape_file_names/ast.json b/parser/testdata/03257_json_escape_file_names/ast.json new file mode 100644 index 000000000..6cc5b9ff9 --- /dev/null +++ b/parser/testdata/03257_json_escape_file_names/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00108011, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03257_json_escape_file_names/metadata.json b/parser/testdata/03257_json_escape_file_names/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03257_json_escape_file_names/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03257_json_escape_file_names/query.sql b/parser/testdata/03257_json_escape_file_names/query.sql new file mode 100644 index 000000000..158d36e25 --- /dev/null +++ b/parser/testdata/03257_json_escape_file_names/query.sql @@ -0,0 +1,9 @@ +SET enable_json_type = 1; +drop table if exists test; +create table test (json JSON) engine=MergeTree order by tuple() settings min_rows_for_wide_part=0, min_bytes_for_wide_part=0; +insert into test format JSONAsObject {"a/b/c" : 42, "a-b-c" : 43, "a-b/c-d/e" : 44}; + +select * from test; +select json.`a/b/c`, json.`a-b-c`, json.`a-b/c-d/e` from test; +select json.`a/b/c`.:Int64, json.`a-b-c`.:Int64, json.`a-b/c-d/e`.:Int64 from test; +drop table test; diff --git a/parser/testdata/03257_reverse_sorting_key/ast.json b/parser/testdata/03257_reverse_sorting_key/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03257_reverse_sorting_key/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03257_reverse_sorting_key/metadata.json b/parser/testdata/03257_reverse_sorting_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03257_reverse_sorting_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03257_reverse_sorting_key/query.sql b/parser/testdata/03257_reverse_sorting_key/query.sql new file mode 100644 index 000000000..9dbb46873 --- /dev/null +++ b/parser/testdata/03257_reverse_sorting_key/query.sql @@ -0,0 +1,52 @@ +-- Tags: no-random-merge-tree-settings + +set optimize_read_in_order = 1; +set read_in_order_two_level_merge_threshold=100; + +drop table if exists x1; + +drop table if exists x2; + +create table x1 (i Nullable(int)) engine MergeTree order by i desc settings allow_nullable_key = 1, index_granularity = 2, allow_experimental_reverse_key = 1; + +insert into x1 select * from numbers(100); + +optimize table x1 final; + +select * from x1 where i = 3; + +select count() from x1 where i between 3 and 10; + +select trimLeft(explain) from (explain actions=1 select * from x1 order by i desc nulls first limit 5) where explain ilike '%sort%' settings max_threads=1, enable_analyzer=1; +explain pipeline select * from x1 order by i desc nulls first limit 5 settings max_threads=1; + +select * from x1 order by i desc limit 5; + +select trimLeft(explain) from (explain actions=1 select * from x1 order by i limit 5) where explain ilike '%sort%' settings max_threads=1, enable_analyzer=1; +explain pipeline select * from x1 order by i limit 5 settings max_threads=1; + +select * from x1 order by i limit 5; + +create table x2 (i Nullable(int), j Nullable(int)) engine MergeTree order by (i, j desc) settings allow_nullable_key = 1, index_granularity = 2, allow_experimental_reverse_key = 1; + +insert into x2 select number % 10, number + 1000 from numbers(100); + +optimize table x2 final; + +select * from x2 where j = 1003; + +select count() from x2 where i between 3 and 10 and j between 1003 and 1008; + +select trimLeft(explain) from (explain actions=1 select * from x2 order by i, j desc nulls first limit 5) where explain ilike '%sort%' settings max_threads=1, enable_analyzer=1; +explain pipeline select * from x2 order by i, j desc nulls first limit 5 settings max_threads=1; + +select * from x2 order by i, j desc limit 5; + +select trimLeft(explain) from (explain actions=1 select * from x2 order by i, j limit 5) where explain ilike '%sort%' settings max_threads=1, enable_analyzer=1; +explain pipeline select * from x2 order by i, j limit 5 settings max_threads=1; + +select * from x2 order by i, j limit 5; + +drop table x1; + +drop table x2; diff --git a/parser/testdata/03257_reverse_sorting_key_simple/ast.json b/parser/testdata/03257_reverse_sorting_key_simple/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03257_reverse_sorting_key_simple/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03257_reverse_sorting_key_simple/metadata.json b/parser/testdata/03257_reverse_sorting_key_simple/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03257_reverse_sorting_key_simple/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03257_reverse_sorting_key_simple/query.sql b/parser/testdata/03257_reverse_sorting_key_simple/query.sql new file mode 100644 index 000000000..62d7fd9ef --- /dev/null +++ b/parser/testdata/03257_reverse_sorting_key_simple/query.sql @@ -0,0 +1,40 @@ +-- Tags: no-random-merge-tree-settings + +set optimize_read_in_order = 1; +set read_in_order_two_level_merge_threshold=100; + +drop table if exists x1; + +drop table if exists x2; + +create table x1 (i Nullable(int)) engine MergeTree order by i desc settings allow_nullable_key = 1, index_granularity = 2, allow_experimental_reverse_key = 1; + +insert into x1 select * from numbers(100); + +optimize table x1 final; + +select * from x1 where i = 3; + +select count() from x1 where i between 3 and 10; + +select * from x1 order by i desc limit 5; + +select * from x1 order by i limit 5; + +create table x2 (i Nullable(int), j Nullable(int)) engine MergeTree order by (i, j desc) settings allow_nullable_key = 1, index_granularity = 2, allow_experimental_reverse_key = 1; + +insert into x2 select number % 10, number + 1000 from numbers(100); + +optimize table x2 final; + +select * from x2 where j = 1003; + +select count() from x2 where i between 3 and 10 and j between 1003 and 1008; + +select * from x2 order by i, j desc limit 5; + +select * from x2 order by i, j limit 5; + +drop table x1; + +drop table x2; diff --git a/parser/testdata/03257_reverse_sorting_key_zookeeper/ast.json b/parser/testdata/03257_reverse_sorting_key_zookeeper/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03257_reverse_sorting_key_zookeeper/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03257_reverse_sorting_key_zookeeper/metadata.json b/parser/testdata/03257_reverse_sorting_key_zookeeper/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03257_reverse_sorting_key_zookeeper/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03257_reverse_sorting_key_zookeeper/query.sql b/parser/testdata/03257_reverse_sorting_key_zookeeper/query.sql new file mode 100644 index 000000000..108d348fb --- /dev/null +++ b/parser/testdata/03257_reverse_sorting_key_zookeeper/query.sql @@ -0,0 +1,16 @@ +-- Tags: zookeeper, no-random-merge-tree-settings, no-replicated-database + +drop table if exists x1; +drop table if exists x2; + +create table x1 (i Nullable(int)) engine ReplicatedMergeTree('/clickhouse/tables/{database}/x1', 'r1') order by i desc settings allow_nullable_key = 1, index_granularity = 2, index_granularity_bytes = 10000, allow_experimental_reverse_key = 1; + +create table x2 (i Nullable(int), j Nullable(int)) engine ReplicatedMergeTree('/clickhouse/tables/{database}/x2', 'r1') order by (i, j desc) settings allow_nullable_key = 1, index_granularity = 2, index_granularity_bytes = 10000, allow_experimental_reverse_key = 1; + +set allow_unrestricted_reads_from_keeper = 'true'; + +select value from system.zookeeper where path = '/clickhouse/tables/' || currentDatabase() || '/x1' and name = 'metadata'; +select value from system.zookeeper where path = '/clickhouse/tables/' || currentDatabase() || '/x2' and name = 'metadata'; + +drop table x1; +drop table x2; diff --git a/parser/testdata/03257_scalar_in_format_table_expression/ast.json b/parser/testdata/03257_scalar_in_format_table_expression/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03257_scalar_in_format_table_expression/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03257_scalar_in_format_table_expression/metadata.json b/parser/testdata/03257_scalar_in_format_table_expression/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03257_scalar_in_format_table_expression/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03257_scalar_in_format_table_expression/query.sql b/parser/testdata/03257_scalar_in_format_table_expression/query.sql new file mode 100644 index 000000000..ec89c9874 --- /dev/null +++ b/parser/testdata/03257_scalar_in_format_table_expression/query.sql @@ -0,0 +1,84 @@ +SELECT * FROM format( + JSONEachRow, +$$ +{"a": "Hello", "b": 111} +{"a": "World", "b": 123} +$$ + ); + +-- Should be equivalent to the previous one +SELECT * FROM format( + JSONEachRow, + ( + SELECT $$ +{"a": "Hello", "b": 111} +{"a": "World", "b": 123} +$$ + ) + ); + +-- The scalar subquery is incorrect so it should throw the proper error +SELECT * FROM format( + JSONEachRow, + ( + SELECT $$ +{"a": "Hello", "b": 111} +{"a": "World", "b": 123} +$$ + WHERE column_does_not_exists = 4 + ) + ); -- { serverError UNKNOWN_IDENTIFIER } + +-- https://github.com/ClickHouse/ClickHouse/issues/70177 + +-- Resolution of the scalar subquery should work ok (already did, adding a test just for safety) +-- Disabled for the old analyzer since it incorrectly passes 's' to format, instead of resolving s and passing that +WITH (SELECT sum(number)::String as s FROM numbers(4)) as s +SELECT *, s +FROM format(TSVRaw, s) +SETTINGS enable_analyzer=1; + +SELECT count() +FROM format(TSVRaw, ( + SELECT where_qualified__fuzz_19 + FROM numbers(10000) +)); -- { serverError UNKNOWN_IDENTIFIER } + +SELECT count() +FROM format(TSVRaw, ( + SELECT where_qualified__fuzz_19 + FROM numbers(10000) + UNION ALL + SELECT where_qualified__fuzz_35 + FROM numbers(10000) +)); -- { serverError UNKNOWN_IDENTIFIER } + +WITH ( + SELECT where_qualified__fuzz_19 + FROM numbers(10000) +) as s SELECT count() +FROM format(TSVRaw, s); -- { serverError UNKNOWN_IDENTIFIER } + +-- https://github.com/ClickHouse/ClickHouse/issues/70675 +SELECT count() +FROM format(TSVRaw, ( + SELECT CAST(arrayStringConcat(groupArray(format(TSVRaw, ( + SELECT CAST(arrayStringConcat(1 GLOBAL IN ( + SELECT 1 + WHERE 1 GLOBAL IN ( + SELECT toUInt128(1) + GROUP BY + GROUPING SETS ((1)) + WITH ROLLUP + ) + GROUP BY 1 + WITH CUBE + ), groupArray('some long string')), 'LowCardinality(String)') + FROM numbers(10000) + )), toLowCardinality('some long string')) RESPECT NULLS, '\n'), 'LowCardinality(String)') + FROM numbers(10000) +)) +FORMAT TSVRaw; -- { serverError UNKNOWN_IDENTIFIER, ILLEGAL_TYPE_OF_ARGUMENT } + +-- Same but for table function numbers +SELECT 1 FROM numbers((SELECT DEFAULT)); -- { serverError UNKNOWN_IDENTIFIER } diff --git a/parser/testdata/03257_setting_tiers/ast.json b/parser/testdata/03257_setting_tiers/ast.json new file mode 100644 index 000000000..79665177c --- /dev/null +++ b/parser/testdata/03257_setting_tiers/ast.json @@ -0,0 +1,73 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function greater (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function count (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.settings" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier tier" + }, + { + "explain": " Literal 'Production'" + } + ], + + "rows": 17, + + "statistics": + { + "elapsed": 0.001103089, + "rows_read": 17, + "bytes_read": 649 + } +} diff --git a/parser/testdata/03257_setting_tiers/metadata.json b/parser/testdata/03257_setting_tiers/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03257_setting_tiers/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03257_setting_tiers/query.sql b/parser/testdata/03257_setting_tiers/query.sql new file mode 100644 index 000000000..4e16ec0da --- /dev/null +++ b/parser/testdata/03257_setting_tiers/query.sql @@ -0,0 +1,12 @@ +SELECT count() > 0 FROM system.settings WHERE tier = 'Production'; +SELECT count() > 0 FROM system.settings WHERE tier = 'Beta'; +SELECT count() > 0 FROM system.settings WHERE tier = 'Experimental'; +SELECT count() > 0 FROM system.settings WHERE tier = 'Obsolete'; +SELECT count() == countIf(tier IN ['Production', 'Beta', 'Experimental', 'Obsolete']) FROM system.settings; + +SELECT count() > 0 FROM system.merge_tree_settings WHERE tier = 'Production'; +-- Currently there aren't any merge tree settings with tier 'Beta'. +SELECT count() > 0 FROM system.merge_tree_settings WHERE tier = 'Beta'; +SELECT count() > 0 FROM system.merge_tree_settings WHERE tier = 'Experimental'; +SELECT count() > 0 FROM system.merge_tree_settings WHERE tier = 'Obsolete'; +SELECT count() == countIf(tier IN ['Production', 'Beta', 'Experimental', 'Obsolete']) FROM system.merge_tree_settings; diff --git a/parser/testdata/03258_dynamic_in_functions_weak_ptr_exception/ast.json b/parser/testdata/03258_dynamic_in_functions_weak_ptr_exception/ast.json new file mode 100644 index 000000000..747275cbd --- /dev/null +++ b/parser/testdata/03258_dynamic_in_functions_weak_ptr_exception/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000957995, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03258_dynamic_in_functions_weak_ptr_exception/metadata.json b/parser/testdata/03258_dynamic_in_functions_weak_ptr_exception/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03258_dynamic_in_functions_weak_ptr_exception/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03258_dynamic_in_functions_weak_ptr_exception/query.sql b/parser/testdata/03258_dynamic_in_functions_weak_ptr_exception/query.sql new file mode 100644 index 000000000..92851cfdc --- /dev/null +++ b/parser/testdata/03258_dynamic_in_functions_weak_ptr_exception/query.sql @@ -0,0 +1,7 @@ +SET allow_experimental_dynamic_type = 1; +SET allow_dynamic_type_in_join_keys = 1; +DROP TABLE IF EXISTS t0; +CREATE TABLE t0 (c0 Tuple(c1 Int,c2 Dynamic)) ENGINE = Memory(); +SELECT 1 FROM t0 tx JOIN t0 ty ON tx.c0 = ty.c0; +DROP TABLE t0; + diff --git a/parser/testdata/03258_multiple_array_joins/ast.json b/parser/testdata/03258_multiple_array_joins/ast.json new file mode 100644 index 000000000..efec12525 --- /dev/null +++ b/parser/testdata/03258_multiple_array_joins/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001843339, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03258_multiple_array_joins/metadata.json b/parser/testdata/03258_multiple_array_joins/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03258_multiple_array_joins/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03258_multiple_array_joins/query.sql b/parser/testdata/03258_multiple_array_joins/query.sql new file mode 100644 index 000000000..ddfac1da0 --- /dev/null +++ b/parser/testdata/03258_multiple_array_joins/query.sql @@ -0,0 +1,25 @@ +SET enable_analyzer = 1; +DROP TABLE IF EXISTS test_multiple_array_join; + +CREATE TABLE test_multiple_array_join ( + id UInt64, + person Nested ( + name String, + surname String + ), + properties Nested ( + key String, + value String + ) +) Engine=MergeTree ORDER BY id; + +INSERT INTO test_multiple_array_join VALUES (1, ['Thomas', 'Michel'], ['Aquinas', 'Foucault'], ['profession', 'alive'], ['philosopher', 'no']); +INSERT INTO test_multiple_array_join VALUES (2, ['Thomas', 'Nicola'], ['Edison', 'Tesla'], ['profession', 'alive'], ['inventor', 'no']); + +SELECT * +FROM test_multiple_array_join +ARRAY JOIN person +ARRAY JOIN properties +ORDER BY ALL; + +DROP TABLE test_multiple_array_join; diff --git a/parser/testdata/03258_old_analyzer_const_expr_bug/ast.json b/parser/testdata/03258_old_analyzer_const_expr_bug/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03258_old_analyzer_const_expr_bug/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03258_old_analyzer_const_expr_bug/metadata.json b/parser/testdata/03258_old_analyzer_const_expr_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03258_old_analyzer_const_expr_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03258_old_analyzer_const_expr_bug/query.sql b/parser/testdata/03258_old_analyzer_const_expr_bug/query.sql new file mode 100644 index 000000000..913de3b84 --- /dev/null +++ b/parser/testdata/03258_old_analyzer_const_expr_bug/query.sql @@ -0,0 +1,23 @@ +WITH + multiIf('-1' = '-1', 10080, '-1' = '7', 60, '-1' = '1', 5, 1440) AS interval_start, -- noqa + multiIf('-1' = '-1', CEIL((today() - toDate('2017-06-22')) / 7)::UInt16, '-1' = '7', 168, '-1' = '1', 288, 90) AS days_run, -- noqa:L045 + block_time as (SELECT arrayJoin( + arrayMap( + i -> toDateTime(toStartOfInterval(now(), INTERVAL interval_start MINUTE) - interval_start * 60 * i, 'UTC'), + range(days_run) + ) + )), + +sales AS ( + SELECT + toDateTime(toStartOfInterval(now(), INTERVAL interval_start MINUTE), 'UTC') AS block_time + FROM + numbers(1) + GROUP BY + block_time + ORDER BY + block_time) + +SELECT + block_time +FROM sales where block_time >= (SELECT MIN(block_time) FROM sales) format Null; diff --git a/parser/testdata/03258_quantile_exact_weighted_issue/ast.json b/parser/testdata/03258_quantile_exact_weighted_issue/ast.json new file mode 100644 index 000000000..cbbafdc73 --- /dev/null +++ b/parser/testdata/03258_quantile_exact_weighted_issue/ast.json @@ -0,0 +1,94 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function quantilesExactWeightedState (alias x) (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function plus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Literal Float64_0.2" + }, + { + "explain": " Literal Float64_0.4" + }, + { + "explain": " Literal Float64_0.6" + }, + { + "explain": " Literal Float64_0.8" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_49999" + } + ], + + "rows": 24, + + "statistics": + { + "elapsed": 0.00165412, + "rows_read": 24, + "bytes_read": 973 + } +} diff --git a/parser/testdata/03258_quantile_exact_weighted_issue/metadata.json b/parser/testdata/03258_quantile_exact_weighted_issue/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03258_quantile_exact_weighted_issue/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03258_quantile_exact_weighted_issue/query.sql b/parser/testdata/03258_quantile_exact_weighted_issue/query.sql new file mode 100644 index 000000000..3069389f4 --- /dev/null +++ b/parser/testdata/03258_quantile_exact_weighted_issue/query.sql @@ -0,0 +1,2 @@ +SELECT toTypeName(quantilesExactWeightedState(0.2, 0.4, 0.6, 0.8)(number + 1, 1) AS x) FROM numbers(49999); +SELECT toTypeName(quantilesExactWeightedInterpolatedState(0.2, 0.4, 0.6, 0.8)(number + 1, 1) AS x) FROM numbers(49999); diff --git a/parser/testdata/03259_grouping_sets_aliases/ast.json b/parser/testdata/03259_grouping_sets_aliases/ast.json new file mode 100644 index 000000000..755285984 --- /dev/null +++ b/parser/testdata/03259_grouping_sets_aliases/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery users (children 1)" + }, + { + "explain": " Identifier users" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001285031, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/03259_grouping_sets_aliases/metadata.json b/parser/testdata/03259_grouping_sets_aliases/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03259_grouping_sets_aliases/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03259_grouping_sets_aliases/query.sql b/parser/testdata/03259_grouping_sets_aliases/query.sql new file mode 100644 index 000000000..80ab80086 --- /dev/null +++ b/parser/testdata/03259_grouping_sets_aliases/query.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS users; +CREATE TABLE users (name String, score UInt8, user_level String ALIAS multiIf(score <= 3, 'LOW', score <= 6, 'MEDIUM', 'HIGH') ) ENGINE=MergeTree ORDER BY name; + +INSERT INTO users VALUES ('a',1),('b',2),('c', 50); + +SELECT user_level as level_alias, uniq(name) as name_alias, grouping(level_alias) as _totals +FROM remote('127.0.0.{1,2}', currentDatabase(), users) +GROUP BY GROUPING SETS ((level_alias)) +ORDER BY name_alias DESC; diff --git a/parser/testdata/03259_join_condition_executed_block_bug/ast.json b/parser/testdata/03259_join_condition_executed_block_bug/ast.json new file mode 100644 index 000000000..b6a3d98e5 --- /dev/null +++ b/parser/testdata/03259_join_condition_executed_block_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001595775, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03259_join_condition_executed_block_bug/metadata.json b/parser/testdata/03259_join_condition_executed_block_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03259_join_condition_executed_block_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03259_join_condition_executed_block_bug/query.sql b/parser/testdata/03259_join_condition_executed_block_bug/query.sql new file mode 100644 index 000000000..e94d81256 --- /dev/null +++ b/parser/testdata/03259_join_condition_executed_block_bug/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE TABLE t1 (key String, attr String, a UInt64, b UInt64, c Nullable(UInt64)) ENGINE = MergeTree ORDER BY key; +CREATE TABLE t2 (key String, attr String, a UInt64, b UInt64, c Nullable(UInt64)) ENGINE = MergeTree ORDER BY key; + +INSERT INTO t1 VALUES ('key1', 'a', 1, 1, 2), ('key1', 'b', 2, 3, 2), ('key1', 'c', 3, 2, 1), ('key1', 'd', 4, 7, 2), ('key1', 'e', 5, 5, 5), ('key2', 'a2', 1, 1, 1), ('key4', 'f', 2, 3, 4); +INSERT INTO t2 VALUES ('key1', 'A', 1, 2, 1), ('key1', 'B', 2, 1, 2), ('key1', 'C', 3, 4, 5), ('key1', 'D', 4, 1, 6), ('key3', 'a3', 1, 1, 1), ('key4', 'F', 1,1,1); + +SET allow_experimental_join_condition = true; +SET enable_analyzer = true; + +SELECT t1.* FROM t1 FULL OUTER JOIN t2 ON t1.key = t2.key AND (t1.a = 2 OR indexHint(t2.a = 2)) FORMAT Null +; diff --git a/parser/testdata/03259_negate_key_overflow/ast.json b/parser/testdata/03259_negate_key_overflow/ast.json new file mode 100644 index 000000000..e18dd41fb --- /dev/null +++ b/parser/testdata/03259_negate_key_overflow/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery a (children 3)" + }, + { + "explain": " Identifier a" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration x (children 1)" + }, + { + "explain": " DataType UInt64" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Identifier x" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001193013, + "rows_read": 9, + "bytes_read": 292 + } +} diff --git a/parser/testdata/03259_negate_key_overflow/metadata.json b/parser/testdata/03259_negate_key_overflow/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03259_negate_key_overflow/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03259_negate_key_overflow/query.sql b/parser/testdata/03259_negate_key_overflow/query.sql new file mode 100644 index 000000000..eb825615e --- /dev/null +++ b/parser/testdata/03259_negate_key_overflow/query.sql @@ -0,0 +1,14 @@ +create table a (x UInt64) engine MergeTree order by x; +insert into a values (12345678901234567890), (42); +select * from a where -x = -42; +drop table a; + +create table a (x UInt128) engine MergeTree order by x; +insert into a values (170141183460469231731687303715884105828), (42); +select * from a where -x = -42; +drop table a; + +create table a (x UInt256) engine MergeTree order by x; +insert into a values (57896044618658097711785492504343953926634992332820282019728792003956564820068), (42); +select * from a where -x = -42; +drop table a; diff --git a/parser/testdata/03259_orc_date_out_of_range/ast.json b/parser/testdata/03259_orc_date_out_of_range/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03259_orc_date_out_of_range/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03259_orc_date_out_of_range/metadata.json b/parser/testdata/03259_orc_date_out_of_range/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03259_orc_date_out_of_range/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03259_orc_date_out_of_range/query.sql b/parser/testdata/03259_orc_date_out_of_range/query.sql new file mode 100644 index 000000000..e73d2faa5 --- /dev/null +++ b/parser/testdata/03259_orc_date_out_of_range/query.sql @@ -0,0 +1,14 @@ +-- Tags: no-fasttest, no-parallel + +SET session_timezone = 'UTC'; +SET engine_file_truncate_on_insert = 1; + +insert into function file('03259.orc', 'ORC') +select + number, + if (number % 2 = 0, null, toDate32(number)) as date_field +from numbers(10); + +desc file('03259.orc', 'ORC'); + +select date_field from file('03259.orc', 'ORC') order by number; diff --git a/parser/testdata/03260_dynamic_low_cardinality_dict_bug/ast.json b/parser/testdata/03260_dynamic_low_cardinality_dict_bug/ast.json new file mode 100644 index 000000000..2d67550ff --- /dev/null +++ b/parser/testdata/03260_dynamic_low_cardinality_dict_bug/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001603278, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03260_dynamic_low_cardinality_dict_bug/metadata.json b/parser/testdata/03260_dynamic_low_cardinality_dict_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03260_dynamic_low_cardinality_dict_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03260_dynamic_low_cardinality_dict_bug/query.sql b/parser/testdata/03260_dynamic_low_cardinality_dict_bug/query.sql new file mode 100644 index 000000000..c5b981d59 --- /dev/null +++ b/parser/testdata/03260_dynamic_low_cardinality_dict_bug/query.sql @@ -0,0 +1,12 @@ +set allow_experimental_dynamic_type = 1; +set min_bytes_to_use_direct_io = 0; + +drop table if exists test; +create table test (id UInt64, d Dynamic) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, index_granularity=1, use_adaptive_write_buffer_for_dynamic_subcolumns=0, max_compress_block_size=8, min_compress_block_size=8, use_compact_variant_discriminators_serialization=0; + +insert into test select number, '12345678'::LowCardinality(String) from numbers(20); + +select d.`LowCardinality(String)` from test settings max_threads=1; + +drop table test; + diff --git a/parser/testdata/03261_any_respect_camelCase_aliases/ast.json b/parser/testdata/03261_any_respect_camelCase_aliases/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03261_any_respect_camelCase_aliases/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03261_any_respect_camelCase_aliases/metadata.json b/parser/testdata/03261_any_respect_camelCase_aliases/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03261_any_respect_camelCase_aliases/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03261_any_respect_camelCase_aliases/query.sql b/parser/testdata/03261_any_respect_camelCase_aliases/query.sql new file mode 100644 index 000000000..c56f09624 --- /dev/null +++ b/parser/testdata/03261_any_respect_camelCase_aliases/query.sql @@ -0,0 +1,40 @@ +-- Tests aliases of any and anyLast functions + +-- aliases of any + +SELECT 'anyRespectNulls'; +SELECT anyRespectNulls(number) FROM numbers(5); +SELECT arrayReduce('anyRespectNulls', [NULL, 10]::Array(Nullable(UInt8))); +SELECT anyRespectNullsMerge(t) FROM (SELECT anyRespectNullsState(NULL::Nullable(UInt8)) as t FROM numbers(5)); +SELECT finalizeAggregation(CAST(unhex('01'), 'AggregateFunction(anyRespectNulls, UInt64)')); +SELECT anyRespectNullsIf (number, NOT isNull(number) AND (assumeNotNull(number) > 5)) FROM numbers(10); + +SELECT 'firstValueRespectNulls'; +SELECT firstValueRespectNulls(number) FROM numbers(5); +SELECT arrayReduce('firstValueRespectNulls', [NULL, 10]::Array(Nullable(UInt8))); +SELECT firstValueRespectNullsMerge(t) FROM (SELECT firstValueRespectNullsState(NULL::Nullable(UInt8)) as t FROM numbers(5)); +SELECT finalizeAggregation(CAST(unhex('01'), 'AggregateFunction(firstValueRespectNulls, UInt64)')); +SELECT firstValueRespectNullsIf (number, NOT isNull(number) AND (assumeNotNull(number) > 5)) FROM numbers(10); + +SELECT 'anyValueRespectNulls'; +SELECT anyValueRespectNulls(number) FROM numbers(5); +SELECT arrayReduce('anyValueRespectNulls', [NULL, 10]::Array(Nullable(UInt8))); +SELECT anyValueRespectNullsMerge(t) FROM (SELECT anyValueRespectNullsState(NULL::Nullable(UInt8)) as t FROM numbers(5)); +SELECT finalizeAggregation(CAST(unhex('01'), 'AggregateFunction(anyValueRespectNulls, UInt64)')); +SELECT anyValueRespectNullsIf (number, NOT isNull(number) AND (assumeNotNull(number) > 5)) FROM numbers(10); + +-- aliases of anyLast + +SELECT 'lastValueRespectNulls'; +SELECT lastValueRespectNulls(number) FROM numbers(5); +SELECT arrayReduce('lastValueRespectNulls', [10, NULL]::Array(Nullable(UInt8))); +SELECT lastValueRespectNullsMerge(t) FROM (SELECT lastValueRespectNullsState(NULL::Nullable(UInt8)) as t FROM numbers(5)); +SELECT finalizeAggregation(CAST(unhex('01'), 'AggregateFunction(lastValueRespectNulls, UInt64)')); +SELECT lastValueRespectNullsIf (number, NOT isNull(number) AND (assumeNotNull(number) > 5)) FROM numbers(10); + +SELECT 'anyLastRespectNulls'; +SELECT anyLastRespectNulls(number) FROM numbers(5); +SELECT arrayReduce('anyLastRespectNulls', [10, NULL]::Array(Nullable(UInt8))); +SELECT anyLastRespectNullsMerge(t) FROM (SELECT anyLastRespectNullsState(NULL::Nullable(UInt8)) as t FROM numbers(5)); +SELECT finalizeAggregation(CAST(unhex('01'), 'AggregateFunction(anyLastRespectNulls, UInt64)')); +SELECT anyLastRespectNullsIf (number, NOT isNull(number) AND (assumeNotNull(number) > 5)) FROM numbers(10); diff --git a/parser/testdata/03261_delayed_streams_memory/ast.json b/parser/testdata/03261_delayed_streams_memory/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03261_delayed_streams_memory/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03261_delayed_streams_memory/metadata.json b/parser/testdata/03261_delayed_streams_memory/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03261_delayed_streams_memory/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03261_delayed_streams_memory/query.sql b/parser/testdata/03261_delayed_streams_memory/query.sql new file mode 100644 index 000000000..c663a8ddb --- /dev/null +++ b/parser/testdata/03261_delayed_streams_memory/query.sql @@ -0,0 +1,20 @@ +-- Tags: long, no-debug, no-asan, no-tsan, no-msan, no-ubsan, no-random-settings, no-random-merge-tree-settings + +DROP TABLE IF EXISTS t_100_columns; + +CREATE TABLE t_100_columns (id UInt64, c0 String, c1 String, c2 String, c3 String, c4 String, c5 String, c6 String, c7 String, c8 String, c9 String, c10 String, c11 String, c12 String, c13 String, c14 String, c15 String, c16 String, c17 String, c18 String, c19 String, c20 String, c21 String, c22 String, c23 String, c24 String, c25 String, c26 String, c27 String, c28 String, c29 String, c30 String, c31 String, c32 String, c33 String, c34 String, c35 String, c36 String, c37 String, c38 String, c39 String, c40 String, c41 String, c42 String, c43 String, c44 String, c45 String, c46 String, c47 String, c48 String, c49 String, c50 String) +ENGINE = MergeTree +ORDER BY id PARTITION BY id % 50 +SETTINGS min_bytes_for_wide_part = 0, ratio_of_defaults_for_sparse_serialization = 1.0, serialization_info_version = 'basic', max_compress_block_size = '1M', storage_policy = 's3_cache', auto_statistics_types = ''; + +SET max_insert_delayed_streams_for_parallel_write = 55; + +INSERT INTO t_100_columns (id) SELECT number FROM numbers(100); + +SYSTEM FLUSH LOGS query_log; + +SELECT if (memory_usage < 300000000, 'Ok', format('Fail: memory usage {}', formatReadableSize(memory_usage))) +FROM system.query_log +WHERE current_database = currentDatabase() AND query LIKE 'INSERT INTO t_100_columns%' AND type = 'QueryFinish'; + +DROP TABLE t_100_columns; diff --git a/parser/testdata/03261_json_hints_types_check/ast.json b/parser/testdata/03261_json_hints_types_check/ast.json new file mode 100644 index 000000000..161ab0640 --- /dev/null +++ b/parser/testdata/03261_json_hints_types_check/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '{}'" + }, + { + "explain": " Literal 'JSON(a LowCardinality(Int128))'" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001217875, + "rows_read": 8, + "bytes_read": 307 + } +} diff --git a/parser/testdata/03261_json_hints_types_check/metadata.json b/parser/testdata/03261_json_hints_types_check/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03261_json_hints_types_check/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03261_json_hints_types_check/query.sql b/parser/testdata/03261_json_hints_types_check/query.sql new file mode 100644 index 000000000..2dd01813e --- /dev/null +++ b/parser/testdata/03261_json_hints_types_check/query.sql @@ -0,0 +1,2 @@ +select '{}'::JSON(a LowCardinality(Int128)); -- {serverError SUSPICIOUS_TYPE_FOR_LOW_CARDINALITY} +select '{}'::JSON(a FixedString(100000)); -- {serverError ILLEGAL_COLUMN} diff --git a/parser/testdata/03261_low_cardinality_nullable_to_dynamic_cast/ast.json b/parser/testdata/03261_low_cardinality_nullable_to_dynamic_cast/ast.json new file mode 100644 index 000000000..f4320cab5 --- /dev/null +++ b/parser/testdata/03261_low_cardinality_nullable_to_dynamic_cast/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001344049, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03261_low_cardinality_nullable_to_dynamic_cast/metadata.json b/parser/testdata/03261_low_cardinality_nullable_to_dynamic_cast/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03261_low_cardinality_nullable_to_dynamic_cast/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03261_low_cardinality_nullable_to_dynamic_cast/query.sql b/parser/testdata/03261_low_cardinality_nullable_to_dynamic_cast/query.sql new file mode 100644 index 000000000..fdb497a62 --- /dev/null +++ b/parser/testdata/03261_low_cardinality_nullable_to_dynamic_cast/query.sql @@ -0,0 +1,7 @@ +SET allow_suspicious_low_cardinality_types = 1, allow_experimental_dynamic_type = 1; +DROP TABLE IF EXISTS t0; +CREATE TABLE t0 (c0 LowCardinality(Nullable(Int))) ENGINE = Memory(); +INSERT INTO TABLE t0 (c0) VALUES (NULL); +SELECT c0::Dynamic FROM t0; +SELECT c0 FROM t0; +DROP TABLE t0; diff --git a/parser/testdata/03261_minmax_indices_by_default/ast.json b/parser/testdata/03261_minmax_indices_by_default/ast.json new file mode 100644 index 000000000..fbccef7e0 --- /dev/null +++ b/parser/testdata/03261_minmax_indices_by_default/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001576085, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03261_minmax_indices_by_default/metadata.json b/parser/testdata/03261_minmax_indices_by_default/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03261_minmax_indices_by_default/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03261_minmax_indices_by_default/query.sql b/parser/testdata/03261_minmax_indices_by_default/query.sql new file mode 100644 index 000000000..3030e15b9 --- /dev/null +++ b/parser/testdata/03261_minmax_indices_by_default/query.sql @@ -0,0 +1,132 @@ +SET mutations_sync = 2; + +DROP TABLE IF EXISTS tbl1; +DROP TABLE IF EXISTS tbl2; +DROP TABLE IF EXISTS tbl3; +DROP TABLE IF EXISTS tbl4; +DROP TABLE IF EXISTS tbl5; + +CREATE TABLE tbl1 +( + key Int, + x Int, + y Int, + INDEX x_idx x TYPE minmax GRANULARITY 1 +) +ENGINE=MergeTree() +ORDER BY key +SETTINGS add_minmax_index_for_numeric_columns = true, + add_minmax_index_for_string_columns = true, + index_granularity = 8192, + index_granularity_bytes = 10485760; + +INSERT INTO tbl1 VALUES (1,1,1), (2,2,2), (3,3,3); + +SELECT 'Check skipping indices after table creation'; +-- Expect x_idx and two implicit minmax indices +SELECT name, type, expr, data_compressed_bytes FROM system.data_skipping_indices WHERE table = 'tbl1' AND database = currentDatabase(); + +-- Settings 'add_minmax_index_for_numeric_columns' and 'add_minmax_index_for_string_columns' cannot be changed after table creation +ALTER TABLE tbl1 MODIFY SETTING add_minmax_index_for_numeric_columns = false; -- { serverError READONLY_SETTING } +ALTER TABLE tbl1 MODIFY SETTING add_minmax_index_for_string_columns = false; -- { serverError READONLY_SETTING } +ALTER TABLE tbl1 RESET SETTING add_minmax_index_for_numeric_columns; -- { serverError READONLY_SETTING } +ALTER TABLE tbl1 RESET SETTING add_minmax_index_for_string_columns; -- { serverError READONLY_SETTING } + +SELECT 'Add numeric column'; +ALTER TABLE tbl1 ADD COLUMN n Int; +-- the implicitly minmax index is only created but not materialized +SELECT name, type, expr, data_compressed_bytes FROM system.data_skipping_indices WHERE table = 'tbl1' AND database = currentDatabase(); + +SELECT 'After materialize'; +ALTER TABLE tbl1 MATERIALIZE INDEX auto_minmax_index_n; +SELECT name, type, expr, data_compressed_bytes FROM system.data_skipping_indices WHERE table = 'tbl1' AND database = currentDatabase(); + +SELECT 'Drop numeric column'; +ALTER TABLE tbl1 DROP COLUMN n; +SELECT name, type, expr, data_compressed_bytes FROM system.data_skipping_indices WHERE table = 'tbl1' AND database = currentDatabase(); + +SELECT 'Add string column'; +ALTER TABLE tbl1 ADD COLUMN s String; +SELECT name, type, expr, data_compressed_bytes FROM system.data_skipping_indices WHERE table = 'tbl1' AND database = currentDatabase(); + +SELECT 'Add another string column'; +ALTER TABLE tbl1 ADD COLUMN t String; +SELECT name, type, expr, data_compressed_bytes FROM system.data_skipping_indices WHERE table = 'tbl1' AND database = currentDatabase(); + +SELECT 'Drop string column t'; +ALTER TABLE tbl1 DROP COLUMN t; +SELECT name, type, expr, data_compressed_bytes FROM system.data_skipping_indices WHERE table = 'tbl1' AND database = currentDatabase(); + +SELECT 'Rename column s to t'; +ALTER TABLE tbl1 RENAME COLUMN s to t; +SELECT name, type, expr, data_compressed_bytes FROM system.data_skipping_indices WHERE table = 'tbl1' AND database = currentDatabase(); + +-- Check that users cannot create explicit minmax indices with the names of internal minmax indices + +CREATE TABLE tbl2 +( + key Int, + x Int, + y Int, + INDEX auto_minmax_index_x x TYPE minmax +) +ENGINE=MergeTree() +ORDER BY key +SETTINGS add_minmax_index_for_numeric_columns = true; -- { serverError BAD_ARGUMENTS } + +CREATE TABLE tbl2 +( + key Int, + x Int, + y Int, + INDEX auto_minmax_index_x x TYPE minmax -- fine, add_minmax_index_for_numeric_columns isn't set +) +ENGINE=MergeTree() +ORDER BY key; + +CREATE TABLE tbl3 +( + key Int, + x Int, + y Int +) +ENGINE=MergeTree() +ORDER BY key; + +ALTER TABLE tbl3 ADD INDEX auto_minmax_index_y y TYPE minmax; + +CREATE TABLE tbl4 +( + key Int, + x Int, + y Int +) +ENGINE=MergeTree() +ORDER BY key +SETTINGS add_minmax_index_for_string_columns = true; + +ALTER TABLE tbl4 ADD INDEX auto_minmax_index_y y TYPE minmax; -- { serverError BAD_ARGUMENTS } + +CREATE TABLE tbl5 +( + key Int, + x Int, + y Int, + s String, + INDEX x_idx x TYPE minmax +) +ENGINE=MergeTree() +ORDER BY key; + +SELECT 'tbl5 with add_minmax_index_for_numeric_columns and add_minmax_index_for_string_columns disabled'; +SELECT name,type,expr,data_compressed_bytes FROM system.data_skipping_indices WHERE table = 'tbl5' AND database = currentDatabase(); + +-- check that ATTACH of such tables will not throw "uses a reserved index name" error +DETACH TABLE tbl1; +ATTACH TABLE tbl1; + +DROP TABLE tbl1; +DROP TABLE tbl2; +DROP TABLE tbl3; +DROP TABLE tbl4; +DROP TABLE tbl5; diff --git a/parser/testdata/03261_minmax_indices_by_default_table_copy/ast.json b/parser/testdata/03261_minmax_indices_by_default_table_copy/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03261_minmax_indices_by_default_table_copy/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03261_minmax_indices_by_default_table_copy/metadata.json b/parser/testdata/03261_minmax_indices_by_default_table_copy/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03261_minmax_indices_by_default_table_copy/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03261_minmax_indices_by_default_table_copy/query.sql b/parser/testdata/03261_minmax_indices_by_default_table_copy/query.sql new file mode 100644 index 000000000..41bd81d8f --- /dev/null +++ b/parser/testdata/03261_minmax_indices_by_default_table_copy/query.sql @@ -0,0 +1,18 @@ +-- Test for issue #75677 + +DROP TABLE IF EXISTS tab1; + +CREATE TABLE tab1 ( + a Int32, + b String, + c Float64) +ENGINE MergeTree +ORDER BY a +SETTINGS + add_minmax_index_for_numeric_columns = 1, + add_minmax_index_for_string_columns = 1; + +CREATE TABLE tab2 AS tab1; + +DROP TABLE tab1; +DROP TABLE tab2; diff --git a/parser/testdata/03261_mongodb_argumetns_crash/ast.json b/parser/testdata/03261_mongodb_argumetns_crash/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03261_mongodb_argumetns_crash/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03261_mongodb_argumetns_crash/metadata.json b/parser/testdata/03261_mongodb_argumetns_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03261_mongodb_argumetns_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03261_mongodb_argumetns_crash/query.sql b/parser/testdata/03261_mongodb_argumetns_crash/query.sql new file mode 100644 index 000000000..ca558ac6b --- /dev/null +++ b/parser/testdata/03261_mongodb_argumetns_crash/query.sql @@ -0,0 +1,14 @@ +-- Tags: no-fasttest + +SELECT * FROM mongodb('mongodb://some-cluster:27017/?retryWrites=false', NULL, 'my_collection', 'test_user', 'password', 'x Int32'); -- { serverError BAD_ARGUMENTS } +SELECT * FROM mongodb('mongodb://some-cluster:27017/?retryWrites=false', 'test', NULL, 'test_user', 'password', 'x Int32'); -- { serverError BAD_ARGUMENTS } +SELECT * FROM mongodb('mongodb://some-cluster:27017/?retryWrites=false', 'test', 'my_collection', NULL, 'password', 'x Int32'); -- { serverError BAD_ARGUMENTS } +SELECT * FROM mongodb('mongodb://some-cluster:27017/?retryWrites=false', 'test', 'my_collection', 'test_user', NULL, 'x Int32'); -- { serverError BAD_ARGUMENTS } +SELECT * FROM mongodb('mongodb://some-cluster:27017/?retryWrites=false', 'test', 'my_collection', 'test_user', 'password', NULL); -- { serverError BAD_ARGUMENTS } +SELECT * FROM mongodb('mongodb://some-cluster:27017/?retryWrites=false', 'test', 'my_collection', 'test_user', 'password', materialize(1) + 1); -- { serverError BAD_ARGUMENTS } +SELECT * FROM mongodb('mongodb://some-cluster:27017/?retryWrites=false', 'test', 'my_collection', 'test_user', 'password', 'x Int32', NULL); -- { serverError BAD_ARGUMENTS } +SELECT * FROM mongodb('mongodb://some-cluster:27017/?retryWrites=false', 'test', 'my_collection', 'test_user', 'password', NULL, 'x Int32'); -- { serverError BAD_ARGUMENTS } +SELECT * FROM mongodb('mongodb://some-cluster:27017/?retryWrites=false', 'test', 'my_collection', 'test_user', 'password', NULL, 'x Int32'); -- { serverError BAD_ARGUMENTS } +SELECT * FROM mongodb(NULL, 'test', 'my_collection', 'test_user', 'password', 'x Int32'); -- { serverError BAD_ARGUMENTS } + +CREATE TABLE IF NOT EXISTS store_version ( `_id` String ) ENGINE = MongoDB(`localhost:27017`, mongodb, storeinfo, adminUser, adminUser); -- { serverError NAMED_COLLECTION_DOESNT_EXIST } diff --git a/parser/testdata/03261_optimize_rewrite_array_exists_to_has_crash/ast.json b/parser/testdata/03261_optimize_rewrite_array_exists_to_has_crash/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03261_optimize_rewrite_array_exists_to_has_crash/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03261_optimize_rewrite_array_exists_to_has_crash/metadata.json b/parser/testdata/03261_optimize_rewrite_array_exists_to_has_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03261_optimize_rewrite_array_exists_to_has_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03261_optimize_rewrite_array_exists_to_has_crash/query.sql b/parser/testdata/03261_optimize_rewrite_array_exists_to_has_crash/query.sql new file mode 100644 index 000000000..e0018632b --- /dev/null +++ b/parser/testdata/03261_optimize_rewrite_array_exists_to_has_crash/query.sql @@ -0,0 +1,10 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/71382 +DROP TABLE IF EXISTS rewrite; +CREATE TABLE rewrite (c0 Int) ENGINE = Memory(); +SELECT 1 +FROM rewrite +INNER JOIN rewrite AS y ON ( + SELECT 1 +) +INNER JOIN rewrite AS z ON 1 +SETTINGS optimize_rewrite_array_exists_to_has=1; diff --git a/parser/testdata/03261_pr_semi_anti_join/ast.json b/parser/testdata/03261_pr_semi_anti_join/ast.json new file mode 100644 index 000000000..8073eb3f3 --- /dev/null +++ b/parser/testdata/03261_pr_semi_anti_join/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001376964, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03261_pr_semi_anti_join/metadata.json b/parser/testdata/03261_pr_semi_anti_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03261_pr_semi_anti_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03261_pr_semi_anti_join/query.sql b/parser/testdata/03261_pr_semi_anti_join/query.sql new file mode 100644 index 000000000..2d671756d --- /dev/null +++ b/parser/testdata/03261_pr_semi_anti_join/query.sql @@ -0,0 +1,26 @@ +DROP TABLE IF EXISTS t1 SYNC; +DROP TABLE IF EXISTS t2 SYNC; + +CREATE TABLE t1 (x UInt32, s String) engine ReplicatedMergeTree('/clickhouse/{database}/t1', '1') order by tuple(); +CREATE TABLE t2 (x UInt32, s String) engine ReplicatedMergeTree('/clickhouse/{database}/t2', '1') order by tuple(); + +INSERT INTO t1 (x, s) VALUES (0, 'a1'), (1, 'a2'), (2, 'a3'), (3, 'a4'), (4, 'a5'), (2, 'a6'); +INSERT INTO t2 (x, s) VALUES (2, 'b1'), (2, 'b2'), (4, 'b3'), (4, 'b4'), (4, 'b5'), (5, 'b6'); + +SET join_use_nulls = 0; +set enable_analyzer = 1, enable_parallel_replicas = 1, max_parallel_replicas = 3, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost'; + +SELECT 'semi left'; +SELECT t1.*, t2.* FROM t1 SEMI LEFT JOIN t2 USING(x) ORDER BY t1.x, t2.x, t1.s, t2.s; + +SELECT 'semi right'; +SELECT t1.*, t2.* FROM t1 SEMI RIGHT JOIN t2 USING(x) ORDER BY t1.x, t2.x, t1.s, t2.s; + +SELECT 'anti left'; +SELECT t1.*, t2.* FROM t1 ANTI LEFT JOIN t2 USING(x) ORDER BY t1.x, t2.x, t1.s, t2.s; + +SELECT 'anti right'; +SELECT t1.*, t2.* FROM t1 ANTI RIGHT JOIN t2 USING(x) ORDER BY t1.x, t2.x, t1.s, t2.s; + +DROP TABLE t1 SYNC; +DROP TABLE t2 SYNC; diff --git a/parser/testdata/03261_sort_cursor_crash/ast.json b/parser/testdata/03261_sort_cursor_crash/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03261_sort_cursor_crash/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03261_sort_cursor_crash/metadata.json b/parser/testdata/03261_sort_cursor_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03261_sort_cursor_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03261_sort_cursor_crash/query.sql b/parser/testdata/03261_sort_cursor_crash/query.sql new file mode 100644 index 000000000..d95c7a9cc --- /dev/null +++ b/parser/testdata/03261_sort_cursor_crash/query.sql @@ -0,0 +1,26 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/70779 +-- Crash in SortCursorImpl with the old analyzer, which produces a block with 0 columns and 1 row +DROP TABLE IF EXISTS t0; +DROP TABLE IF EXISTS t1; + +SET allow_suspicious_primary_key = 1; + +CREATE TABLE t0 (c0 Int) ENGINE = AggregatingMergeTree() ORDER BY tuple(); +INSERT INTO TABLE t0 (c0) VALUES (1); +SELECT 42 FROM t0 FINAL PREWHERE t0.c0 = 1; +DROP TABLE t0; + +CREATE TABLE t0 (c0 Int) ENGINE = SummingMergeTree() ORDER BY tuple(); +INSERT INTO TABLE t0 (c0) VALUES (1); +SELECT 43 FROM t0 FINAL PREWHERE t0.c0 = 1; +DROP TABLE t0; + +CREATE TABLE t0 (c0 Int) ENGINE = ReplacingMergeTree() ORDER BY tuple(); +INSERT INTO TABLE t0 (c0) VALUES (1); +SELECT 44 FROM t0 FINAL PREWHERE t0.c0 = 1; +DROP TABLE t0; + +CREATE TABLE t1 (a0 UInt8, c0 Int32, c1 UInt8) ENGINE = AggregatingMergeTree() ORDER BY tuple(); +INSERT INTO TABLE t1 (a0, c0, c1) VALUES (1, 1, 1); +SELECT 45 FROM t1 FINAL PREWHERE t1.c0 = t1.c1; +DROP TABLE t1; diff --git a/parser/testdata/03261_tuple_map_to_json_cast/ast.json b/parser/testdata/03261_tuple_map_to_json_cast/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03261_tuple_map_to_json_cast/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03261_tuple_map_to_json_cast/metadata.json b/parser/testdata/03261_tuple_map_to_json_cast/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03261_tuple_map_to_json_cast/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03261_tuple_map_to_json_cast/query.sql b/parser/testdata/03261_tuple_map_to_json_cast/query.sql new file mode 100644 index 000000000..709794ef9 --- /dev/null +++ b/parser/testdata/03261_tuple_map_to_json_cast/query.sql @@ -0,0 +1,14 @@ +-- Tags: no-fasttest + +SET enable_json_type = 1; +set allow_experimental_variant_type = 1; +set use_variant_as_common_type = 1; +set enable_named_columns_in_function_tuple = 1; +set enable_analyzer = 1; + +select 'Map to JSON'; +select map('a', number::UInt32, 'b', toDate(number), 'c', range(number), 'd', [map('e', number::UInt32)])::JSON as json, JSONAllPathsWithTypes(json) from numbers(5); +select map('a' || number % 3, number::UInt32, 'b' || number % 3, toDate(number), 'c' || number % 3, range(number), 'd' || number % 3, [map('e' || number % 3, number::UInt32)])::JSON as json, JSONAllPathsWithTypes(json) from numbers(5); + +select 'Tuple to JSON'; +select tuple(number::UInt32 as a, toDate(number) as b, range(number) as c, [tuple(number::UInt32 as e)] as d)::JSON as json, JSONAllPathsWithTypes(json) from numbers(5); diff --git a/parser/testdata/03261_variant_permutation_bug/ast.json b/parser/testdata/03261_variant_permutation_bug/ast.json new file mode 100644 index 000000000..d3d091e0f --- /dev/null +++ b/parser/testdata/03261_variant_permutation_bug/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001108364, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03261_variant_permutation_bug/metadata.json b/parser/testdata/03261_variant_permutation_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03261_variant_permutation_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03261_variant_permutation_bug/query.sql b/parser/testdata/03261_variant_permutation_bug/query.sql new file mode 100644 index 000000000..373dd9e19 --- /dev/null +++ b/parser/testdata/03261_variant_permutation_bug/query.sql @@ -0,0 +1,6 @@ +set allow_experimental_variant_type=1; +create table test (x UInt64, d Variant(UInt64)) engine=Memory; +insert into test select number, null from numbers(200000); +select d from test order by d::String limit 32213 format Null; +drop table test; + diff --git a/parser/testdata/03262_analyzer_materialized_view_in_with_cte/ast.json b/parser/testdata/03262_analyzer_materialized_view_in_with_cte/ast.json new file mode 100644 index 000000000..918819948 --- /dev/null +++ b/parser/testdata/03262_analyzer_materialized_view_in_with_cte/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001371349, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03262_analyzer_materialized_view_in_with_cte/metadata.json b/parser/testdata/03262_analyzer_materialized_view_in_with_cte/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03262_analyzer_materialized_view_in_with_cte/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03262_analyzer_materialized_view_in_with_cte/query.sql b/parser/testdata/03262_analyzer_materialized_view_in_with_cte/query.sql new file mode 100644 index 000000000..a2a86f7ed --- /dev/null +++ b/parser/testdata/03262_analyzer_materialized_view_in_with_cte/query.sql @@ -0,0 +1,63 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS mv_test; +DROP TABLE IF EXISTS mv_test_target; +DROP VIEW IF EXISTS mv_test_mv; + +CREATE TABLE mv_test +( + `id` UInt64, + `ref_id` UInt64, + `final_id` Nullable(UInt64), + `display` String +) +ENGINE = Log; + +CREATE TABLE mv_test_target +( + `id` UInt64, + `ref_id` UInt64, + `final_id` Nullable(UInt64), + `display` String +) +ENGINE = Log; + +CREATE MATERIALIZED VIEW mv_test_mv TO mv_test_target +( + `id` UInt64, + `ref_id` UInt64, + `final_id` Nullable(UInt64), + `display` String +) +AS WITH + tester AS + ( + SELECT + id, + ref_id, + final_id, + display + FROM mv_test + ), + id_set AS + ( + SELECT + display, + max(id) AS max_id + FROM mv_test + GROUP BY display + ) +SELECT * +FROM tester +WHERE id IN ( + SELECT max_id + FROM id_set +); + +INSERT INTO mv_test ( id, ref_id, display) values ( 1, 2, 'test'); + +SELECT * FROM mv_test_target; + +DROP VIEW mv_test_mv; +DROP TABLE mv_test_target; +DROP TABLE mv_test; diff --git a/parser/testdata/03262_column_sizes_with_dynamic_structure/ast.json b/parser/testdata/03262_column_sizes_with_dynamic_structure/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03262_column_sizes_with_dynamic_structure/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03262_column_sizes_with_dynamic_structure/metadata.json b/parser/testdata/03262_column_sizes_with_dynamic_structure/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03262_column_sizes_with_dynamic_structure/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03262_column_sizes_with_dynamic_structure/query.sql b/parser/testdata/03262_column_sizes_with_dynamic_structure/query.sql new file mode 100644 index 000000000..9da13ccfd --- /dev/null +++ b/parser/testdata/03262_column_sizes_with_dynamic_structure/query.sql @@ -0,0 +1,22 @@ +-- Tags: no-random-settings, no-fasttest + +set allow_experimental_dynamic_type = 1; +SET enable_json_type = 1; + + +drop table if exists test; +create table test (d Dynamic, json JSON) engine=MergeTree order by tuple() settings min_rows_for_wide_part=0, min_bytes_for_wide_part=1; +insert into test select number, '{"a" : 42, "b" : "Hello, World"}' from numbers(10000000); + +SELECT + `table`, + sum(rows) AS rows, + floor(sum(data_uncompressed_bytes) / (1024 * 1024)) AS data_size_uncompressed, + floor(sum(data_compressed_bytes) / (1024 * 1024)) AS data_size_compressed, + floor(sum(bytes_on_disk) / (1024 * 1024)) AS total_size_on_disk +FROM system.parts +WHERE active AND (database = currentDatabase()) AND (`table` = 'test') +GROUP BY `table` +ORDER BY `table` ASC; + +drop table test; diff --git a/parser/testdata/03262_common_expression_optimization/ast.json b/parser/testdata/03262_common_expression_optimization/ast.json new file mode 100644 index 000000000..bdd633053 --- /dev/null +++ b/parser/testdata/03262_common_expression_optimization/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001357349, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03262_common_expression_optimization/metadata.json b/parser/testdata/03262_common_expression_optimization/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03262_common_expression_optimization/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03262_common_expression_optimization/query.sql b/parser/testdata/03262_common_expression_optimization/query.sql new file mode 100644 index 000000000..1648cff62 --- /dev/null +++ b/parser/testdata/03262_common_expression_optimization/query.sql @@ -0,0 +1,158 @@ +SET enable_analyzer = 1; +SET optimize_extract_common_expressions = 1; + +DROP TABLE IF EXISTS x; +CREATE TABLE x (x Int64, A UInt8, B UInt8, C UInt8, D UInt8, E UInt8, F UInt8) ENGINE = MergeTree ORDER BY x; +INSERT INTO x + SELECT + cityHash64(number) AS x, + cityHash64(number + 1) % 2 AS A, + cityHash64(number + 2) % 2 AS B, + cityHash64(number + 3) % 2 AS C, + cityHash64(number + 4) % 2 AS D, + cityHash64(number + 5) % 2 AS E, + cityHash64(number + 6) % 2 AS F + FROM numbers(2000); + +-- Verify that optimization optimization setting works as expected +EXPLAIN QUERY TREE dump_ast = 1 SELECT count() FROM x WHERE (A AND B) OR (A AND C) SETTINGS optimize_extract_common_expressions = 0; +EXPLAIN QUERY TREE dump_ast = 1 SELECT count() FROM x WHERE (A AND B) OR (A AND C) SETTINGS optimize_extract_common_expressions = 1; + +-- Test multiple cases +SELECT * FROM x WHERE A AND ((B AND C) OR (B AND C AND F)) ORDER BY x LIMIT 10 SETTINGS optimize_extract_common_expressions = 0; +SELECT * FROM x WHERE A AND ((B AND C) OR (B AND C AND F)) ORDER BY x LIMIT 10; +EXPLAIN QUERY TREE dump_ast = 1 SELECT count() FROM x WHERE A AND ((B AND C) OR (B AND C AND F)); + +SELECT * FROM x WHERE A AND ((B AND C AND E) OR (B AND C AND F)) ORDER BY x LIMIT 10 SETTINGS optimize_extract_common_expressions = 0; +SELECT * FROM x WHERE A AND ((B AND C AND E) OR (B AND C AND F)) ORDER BY x LIMIT 10; +EXPLAIN QUERY TREE dump_ast = 1 SELECT count() FROM x WHERE A AND ((B AND C AND E) OR (B AND C AND F)); + +SELECT * FROM x WHERE A AND ((B AND (C AND E)) OR (B AND C AND F)) ORDER BY x LIMIT 10 SETTINGS optimize_extract_common_expressions = 0; +SELECT * FROM x WHERE A AND ((B AND (C AND E)) OR (B AND C AND F)) ORDER BY x LIMIT 10; +EXPLAIN QUERY TREE dump_ast = 1 SELECT count() FROM x WHERE A AND ((B AND (C AND E)) OR (B AND C AND F)); + +SELECT * FROM x WHERE A AND ((B AND C) OR (B AND D) OR (B AND E)) ORDER BY x LIMIT 10 SETTINGS optimize_extract_common_expressions = 0; +SELECT * FROM x WHERE A AND ((B AND C) OR (B AND D) OR (B AND E)) ORDER BY x LIMIT 10; +EXPLAIN QUERY TREE dump_ast = 1 SELECT count() FROM x WHERE A AND ((B AND C) OR (B AND D) OR (B AND E)); + +SELECT * FROM x WHERE A AND ((B AND C) OR ((B AND D) OR (B AND E))) ORDER BY x LIMIT 10 SETTINGS optimize_extract_common_expressions = 0; +SELECT * FROM x WHERE A AND ((B AND C) OR ((B AND D) OR (B AND E))) ORDER BY x LIMIT 10; +EXPLAIN QUERY TREE dump_ast = 1 SELECT count() FROM x WHERE A AND ((B AND C) OR ((B AND D) OR (B AND E))); + +-- Without AND as a root +SELECT * FROM x WHERE ((B AND C) OR (B AND C AND F)) ORDER BY x LIMIT 10 SETTINGS optimize_extract_common_expressions = 0; +SELECT * FROM x WHERE ((B AND C) OR (B AND C AND F)) ORDER BY x LIMIT 10; +EXPLAIN QUERY TREE dump_ast = 1 SELECT count() FROM x WHERE ((B AND C) OR (B AND C AND F)); + +SELECT * FROM x WHERE ((B AND C AND E) OR (B AND C AND F)) ORDER BY x LIMIT 10 SETTINGS optimize_extract_common_expressions = 0; +SELECT * FROM x WHERE ((B AND C AND E) OR (B AND C AND F)) ORDER BY x LIMIT 10; +EXPLAIN QUERY TREE dump_ast = 1 SELECT count() FROM x WHERE ((B AND C AND E) OR (B AND C AND F)); + +SELECT * FROM x WHERE ((B AND (C AND E)) OR (B AND C AND F)) ORDER BY x LIMIT 10 SETTINGS optimize_extract_common_expressions = 0; +SELECT * FROM x WHERE ((B AND (C AND E)) OR (B AND C AND F)) ORDER BY x LIMIT 10; +EXPLAIN QUERY TREE dump_ast = 1 SELECT count() FROM x WHERE ((B AND (C AND E)) OR (B AND C AND F)); + +SELECT * FROM x WHERE ((B AND C) OR (B AND D) OR (B AND E)) ORDER BY x LIMIT 10 SETTINGS optimize_extract_common_expressions = 0; +SELECT * FROM x WHERE ((B AND C) OR (B AND D) OR (B AND E)) ORDER BY x LIMIT 10; +EXPLAIN QUERY TREE dump_ast = 1 SELECT count() FROM x WHERE ((B AND C) OR (B AND D) OR (B AND E)); + +SELECT * FROM x WHERE ((B AND C) OR ((B AND D) OR (B AND E))) ORDER BY x LIMIT 10 SETTINGS optimize_extract_common_expressions = 0; +SELECT * FROM x WHERE ((B AND C) OR ((B AND D) OR (B AND E))) ORDER BY x LIMIT 10; +EXPLAIN QUERY TREE dump_ast = 1 SELECT count() FROM x WHERE ((B AND C) OR ((B AND D) OR (B AND E))); + +-- Complex expression +SELECT * FROM x WHERE (A AND (sipHash64(C) = sipHash64(D))) OR (B AND (sipHash64(C) = sipHash64(D))) ORDER BY x LIMIT 10 SETTINGS optimize_extract_common_expressions = 0; +SELECT * FROM x WHERE (A AND (sipHash64(C) = sipHash64(D))) OR (B AND (sipHash64(C) = sipHash64(D))) ORDER BY x LIMIT 10; +EXPLAIN QUERY TREE dump_ast = 1 SELECT count() FROM x WHERE (A AND (sipHash64(C) = sipHash64(D))) OR (B AND (sipHash64(C) = sipHash64(D))); + +-- Flattening is only happening if something can be extracted +SELECT * FROM x WHERE ((A AND B) OR ((C AND D) OR (E AND F))) ORDER BY x LIMIT 10 SETTINGS optimize_extract_common_expressions = 0; +SELECT * FROM x WHERE ((A AND B) OR ((C AND D) OR (E AND F))) ORDER BY x LIMIT 10; +EXPLAIN QUERY TREE dump_ast = 1 SELECT count() FROM x WHERE ((A AND B) OR ((C AND D) OR (E AND F))); + +SELECT * FROM x WHERE ((A AND B) OR ((B AND D) OR (E AND F))) ORDER BY x LIMIT 10 SETTINGS optimize_extract_common_expressions = 0; +SELECT * FROM x WHERE ((A AND B) OR ((B AND D) OR (E AND F))) ORDER BY x LIMIT 10; +EXPLAIN QUERY TREE dump_ast = 1 SELECT count() FROM x WHERE ((A AND B) OR ((B AND D) OR (E AND F))); + +-- Duplicates +SELECT * FROM x WHERE (A AND B AND C) OR ((A AND A AND A AND B AND B AND E AND E) OR (A AND B AND B AND F AND F)) ORDER BY x LIMIT 10 SETTINGS optimize_extract_common_expressions = 0; +SELECT * FROM x WHERE (A AND B AND C) OR ((A AND A AND A AND B AND B AND E AND E) OR (A AND B AND B AND F AND F)) ORDER BY x LIMIT 10; +EXPLAIN QUERY TREE dump_ast = 1 SELECT count() FROM x WHERE (A AND B AND C) OR ((A AND A AND A AND B AND B AND E AND E) OR (A AND B AND B AND F AND F)); + +SELECT * FROM x WHERE ((A AND B AND C) OR (A AND B AND D)) AND ((B AND A AND E) OR (B AND A AND F)) ORDER BY x LIMIT 10 SETTINGS optimize_extract_common_expressions = 0; +SELECT * FROM x WHERE ((A AND B AND C) OR (A AND B AND D)) AND ((B AND A AND E) OR (B AND A AND F)) ORDER BY x LIMIT 10; +EXPLAIN QUERY TREE dump_ast = 1 SELECT count() FROM x WHERE ((A AND B AND C) OR (A AND B AND D)) AND ((B AND A AND E) OR (B AND A AND F)); + + +-- _CAST function has to be used to maintain the same result type +EXPLAIN QUERY TREE dump_ast = 1 SELECT count() FROM x WHERE ((B AND C) OR (B AND C AND toNullable(F))); +EXPLAIN QUERY TREE dump_ast = 1 SELECT count() FROM x WHERE (x AND x) OR (x AND x); +-- Here the result type stays nullable because of `toNullable(C)`, so no cast is needed +EXPLAIN QUERY TREE dump_ast = 1 SELECT count() FROM x WHERE ((B AND toNullable(C)) OR (B AND toNullable(C) AND toNullable(F))); + +-- Check that optimization only happen on top level, (C AND D) OR (C AND E) shouldn't be optimized +EXPLAIN QUERY TREE dump_ast = 1 SELECT count() FROM x WHERE A OR (B AND ((C AND D) OR (C AND E))); + + +DROP TABLE IF EXISTS y; +CREATE TABLE y (x Int64, A UInt8, B UInt8, C UInt8, D UInt8, E UInt8, F UInt8) ENGINE = MergeTree ORDER BY x; +INSERT INTO y + SELECT + murmurHash3_64(number) AS x, + murmurHash3_64(number + 1) % 2 AS A, + murmurHash3_64(number + 2) % 2 AS B, + murmurHash3_64(number + 3) % 2 AS C, + murmurHash3_64(number + 4) % 2 AS D, + murmurHash3_64(number + 5) % 2 AS E, + murmurHash3_64(number + 6) % 2 AS F + FROM numbers(2000); + +-- JOIN expressions +-- As the optimization code is shared between ON and WHERE, it is enough to test that the optimization is done also in ON +SELECT * FROM x INNER JOIN y ON ((x.A = y.A ) AND x.B = 1) OR ((x.A = y.A) AND y.C = 1) ORDER BY ALL LIMIT 10 SETTINGS optimize_extract_common_expressions = 0; +SELECT * FROM x INNER JOIN y ON ((x.A = y.A ) AND x.B = 1) OR ((x.A = y.A) AND y.C = 1) ORDER BY ALL LIMIT 10; +EXPLAIN QUERY TREE dump_ast = 1 SELECT count() FROM x INNER JOIN y ON ((x.A = y.A ) AND x.B = 1) OR ((x.A = y.A) AND y.C = 1); + +-- Check that optimization only happen on top level, (x.C = y.C AND x.D = y.D) OR (x.C = y.C AND x.E = y.E) shouldn't be optimized +EXPLAIN QUERY TREE dump_ast = 1 SELECT count() FROM x INNER JOIN y ON (x.A = y.A) OR ((x.B = y.B) AND ((x.C = y.C AND x.D = y.D) OR (x.C = y.C AND x.E = y.E))); + +-- Duplicated subexpressions, found by fuzzer +SELECT * FROM x WHERE (D AND 5) OR ((C AND E) AND (C AND E)) ORDER BY ALL LIMIT 3 SETTINGS optimize_extract_common_expressions = 0; +SELECT * FROM x WHERE (D AND 5) OR ((C AND E) AND (C AND E)) ORDER BY ALL LIMIT 3; +EXPLAIN QUERY TREE dump_ast = 1 SELECT * FROM x WHERE (C AND E) OR ((C AND E) AND (C AND E)); + +-- HAVING +SELECT x, max(A) AS mA, max(B) AS mB, max(C) AS mC FROM x GROUP BY x HAVING (mA AND mB) OR (mA AND mC) ORDER BY x LIMIT 10 SETTINGS optimize_extract_common_expressions = 0; +SELECT x, max(A) AS mA, max(B) AS mB, max(C) AS mC FROM x GROUP BY x HAVING (mA AND mB) OR (mA AND mC) ORDER BY x LIMIT 10; +EXPLAIN QUERY TREE dump_ast = 1 SELECT x, max(A) AS mA, max(B) AS mB, max(C) AS mC FROM x GROUP BY x HAVING (mA AND mB) OR (mA AND mC); + +-- QUALIFY +SELECT + x, + max(A) OVER (PARTITION BY x % 1000) AS mA, + max(B) OVER (PARTITION BY x % 1000) AS mB, + max(C) OVER (PARTITION BY x % 1000) AS mC +FROM x +QUALIFY (mA AND mB) OR (mA AND mC) +ORDER BY x +LIMIT 10 +SETTINGS optimize_extract_common_expressions = 0; + +SELECT + x, + max(A) OVER (PARTITION BY x % 1000) AS mA, + max(B) OVER (PARTITION BY x % 1000) AS mB, + max(C) OVER (PARTITION BY x % 1000) AS mC +FROM x +QUALIFY (mA AND mB) OR (mA AND mC) +ORDER BY x +LIMIT 10; + +EXPLAIN QUERY TREE dump_ast = 1 +SELECT + x, + max(A) OVER (PARTITION BY x % 1000) AS mA, + max(B) OVER (PARTITION BY x % 1000) AS mB, + max(C) OVER (PARTITION BY x % 1000) AS mC +FROM x +QUALIFY (mA AND mB) OR (mA AND mC); diff --git a/parser/testdata/03262_const_adaptive_index_granularity/ast.json b/parser/testdata/03262_const_adaptive_index_granularity/ast.json new file mode 100644 index 000000000..a71fcc583 --- /dev/null +++ b/parser/testdata/03262_const_adaptive_index_granularity/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_index_granularity (children 1)" + }, + { + "explain": " Identifier t_index_granularity" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001183372, + "rows_read": 2, + "bytes_read": 90 + } +} diff --git a/parser/testdata/03262_const_adaptive_index_granularity/metadata.json b/parser/testdata/03262_const_adaptive_index_granularity/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03262_const_adaptive_index_granularity/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03262_const_adaptive_index_granularity/query.sql b/parser/testdata/03262_const_adaptive_index_granularity/query.sql new file mode 100644 index 000000000..7445f66dc --- /dev/null +++ b/parser/testdata/03262_const_adaptive_index_granularity/query.sql @@ -0,0 +1,53 @@ +DROP TABLE IF EXISTS t_index_granularity; + +CREATE TABLE t_index_granularity (id UInt64, s String) +ENGINE = MergeTree ORDER BY id +SETTINGS min_bytes_for_wide_part = 0, + index_granularity = 10, + index_granularity_bytes = 4096, + merge_max_block_size = 10, + merge_max_block_size_bytes = 4096, + enable_index_granularity_compression = 1, + use_const_adaptive_granularity = 0, + enable_vertical_merge_algorithm = 0; + +INSERT INTO t_index_granularity SELECT number, 'a' FROM numbers(15); +INSERT INTO t_index_granularity SELECT number, repeat('a', 2048) FROM numbers(15, 15); + +SELECT 'adaptive non-const, before merge'; +SELECT * FROM mergeTreeIndex(currentDatabase(), t_index_granularity) ORDER BY ALL; +SELECT name, index_granularity_bytes_in_memory FROM system.parts WHERE database = currentDatabase() AND table = 't_index_granularity' AND active; + +OPTIMIZE TABLE t_index_granularity FINAL; + +SELECT 'adaptive non-const, after merge'; +SELECT * FROM mergeTreeIndex(currentDatabase(), t_index_granularity) ORDER BY ALL; +SELECT name, index_granularity_bytes_in_memory FROM system.parts WHERE database = currentDatabase() AND table = 't_index_granularity' AND active; + +DROP TABLE t_index_granularity; + +CREATE TABLE t_index_granularity (id UInt64, s String) +ENGINE = MergeTree ORDER BY id +SETTINGS min_bytes_for_wide_part = 0, + index_granularity = 10, + index_granularity_bytes = 4096, + merge_max_block_size = 10, + merge_max_block_size_bytes = 4096, + enable_index_granularity_compression = 1, + use_const_adaptive_granularity = 1, + enable_vertical_merge_algorithm = 0; + +INSERT INTO t_index_granularity SELECT number, 'a' FROM numbers(15); +INSERT INTO t_index_granularity SELECT number, repeat('a', 2048) FROM numbers(15, 15); + +SELECT 'adaptive const, before merge'; +SELECT * FROM mergeTreeIndex(currentDatabase(), t_index_granularity) ORDER BY ALL; +SELECT name, index_granularity_bytes_in_memory FROM system.parts WHERE database = currentDatabase() AND table = 't_index_granularity' AND active; + +OPTIMIZE TABLE t_index_granularity FINAL; + +SELECT 'adaptive const, after merge'; +SELECT * FROM mergeTreeIndex(currentDatabase(), t_index_granularity) ORDER BY ALL; +SELECT name, index_granularity_bytes_in_memory FROM system.parts WHERE database = currentDatabase() AND table = 't_index_granularity' AND active; + +DROP TABLE t_index_granularity; diff --git a/parser/testdata/03262_filter_push_down_view/ast.json b/parser/testdata/03262_filter_push_down_view/ast.json new file mode 100644 index 000000000..c1efc2d89 --- /dev/null +++ b/parser/testdata/03262_filter_push_down_view/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery alpha (children 1)" + }, + { + "explain": " Identifier alpha" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001491446, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/03262_filter_push_down_view/metadata.json b/parser/testdata/03262_filter_push_down_view/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03262_filter_push_down_view/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03262_filter_push_down_view/query.sql b/parser/testdata/03262_filter_push_down_view/query.sql new file mode 100644 index 000000000..cae35fe9c --- /dev/null +++ b/parser/testdata/03262_filter_push_down_view/query.sql @@ -0,0 +1,36 @@ +DROP TABLE IF EXISTS alpha; +DROP TABLE IF EXISTS alpha__day; + +SET session_timezone = 'Etc/UTC'; + +CREATE TABLE alpha +( + `ts` DateTime64(6), + `auid` Int64, +) +ENGINE = MergeTree +ORDER BY (auid, ts) +SETTINGS index_granularity = 1; + +CREATE VIEW alpha__day +( + `ts_date` Date, + `auid` Int64, +) +AS SELECT + ts_date, + auid, +FROM +( + SELECT + toDate(ts) AS ts_date, + auid + FROM alpha +) +WHERE ts_date <= toDateTime('2024-01-01 00:00:00') - INTERVAL 1 DAY; + +INSERT INTO alpha VALUES (toDateTime64('2024-01-01 00:00:00.000', 3) - INTERVAL 3 DAY, 1); +INSERT INTO alpha VALUES (toDateTime64('2024-01-01 00:00:00.000', 3) - INTERVAL 3 DAY, 2); +INSERT INTO alpha VALUES (toDateTime64('2024-01-01 00:00:00.000', 3) - INTERVAL 3 DAY, 3); + +select trimLeft(explain) from (EXPLAIN indexes = 1 SELECT auid FROM alpha__day WHERE auid = 1) where explain like '%Condition:%' or explain like '%Granules:%' SETTINGS enable_analyzer = 1; diff --git a/parser/testdata/03262_system_functions_should_not_fill_query_log_functions/ast.json b/parser/testdata/03262_system_functions_should_not_fill_query_log_functions/ast.json new file mode 100644 index 000000000..af08f342f --- /dev/null +++ b/parser/testdata/03262_system_functions_should_not_fill_query_log_functions/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.functions" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier name" + }, + { + "explain": " Literal 'bitShiftLeft'" + }, + { + "explain": " Identifier Null" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.00217476, + "rows_read": 14, + "bytes_read": 515 + } +} diff --git a/parser/testdata/03262_system_functions_should_not_fill_query_log_functions/metadata.json b/parser/testdata/03262_system_functions_should_not_fill_query_log_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03262_system_functions_should_not_fill_query_log_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03262_system_functions_should_not_fill_query_log_functions/query.sql b/parser/testdata/03262_system_functions_should_not_fill_query_log_functions/query.sql new file mode 100644 index 000000000..1cf3f21fe --- /dev/null +++ b/parser/testdata/03262_system_functions_should_not_fill_query_log_functions/query.sql @@ -0,0 +1,9 @@ +SELECT * FROM system.functions WHERE name = 'bitShiftLeft' format Null; +SYSTEM FLUSH LOGS query_log; +SELECT used_aggregate_functions, used_functions, used_table_functions +FROM system.query_log +WHERE + event_date >= yesterday() + AND type = 'QueryFinish' + AND current_database = currentDatabase() + AND query LIKE '%bitShiftLeft%'; diff --git a/parser/testdata/03263_analyzer_materialized_view_cte_nested/ast.json b/parser/testdata/03263_analyzer_materialized_view_cte_nested/ast.json new file mode 100644 index 000000000..b62b5920e --- /dev/null +++ b/parser/testdata/03263_analyzer_materialized_view_cte_nested/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001428207, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03263_analyzer_materialized_view_cte_nested/metadata.json b/parser/testdata/03263_analyzer_materialized_view_cte_nested/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03263_analyzer_materialized_view_cte_nested/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03263_analyzer_materialized_view_cte_nested/query.sql b/parser/testdata/03263_analyzer_materialized_view_cte_nested/query.sql new file mode 100644 index 000000000..8331ae488 --- /dev/null +++ b/parser/testdata/03263_analyzer_materialized_view_cte_nested/query.sql @@ -0,0 +1,19 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS test_table; +DROP VIEW IF EXISTS test_mv; + +CREATE TABLE test_table ENGINE = MergeTree ORDER BY tuple() AS SELECT 1 as col1; + +CREATE MATERIALIZED VIEW test_mv ENGINE = MergeTree ORDER BY tuple() AS +WITH + subquery_on_source AS (SELECT col1 AS aliased FROM test_table), + output AS (SELECT * FROM test_table WHERE col1 IN (SELECT aliased FROM subquery_on_source)) +SELECT * FROM output; + +INSERT INTO test_table VALUES (2); + +SELECT * FROM test_mv; + +DROP VIEW test_mv; +DROP TABLE test_table; diff --git a/parser/testdata/03263_forbid_materialize_sort_key/ast.json b/parser/testdata/03263_forbid_materialize_sort_key/ast.json new file mode 100644 index 000000000..82c1f245e --- /dev/null +++ b/parser/testdata/03263_forbid_materialize_sort_key/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery test (children 3)" + }, + { + "explain": " Identifier test" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration a (children 1)" + }, + { + "explain": " DataType UInt64" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Identifier a" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001554115, + "rows_read": 10, + "bytes_read": 336 + } +} diff --git a/parser/testdata/03263_forbid_materialize_sort_key/metadata.json b/parser/testdata/03263_forbid_materialize_sort_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03263_forbid_materialize_sort_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03263_forbid_materialize_sort_key/query.sql b/parser/testdata/03263_forbid_materialize_sort_key/query.sql new file mode 100644 index 000000000..a27be98e0 --- /dev/null +++ b/parser/testdata/03263_forbid_materialize_sort_key/query.sql @@ -0,0 +1,24 @@ +CREATE TABLE IF NOT EXISTS test (a UInt64) ENGINE=MergeTree() ORDER BY a; + +INSERT INTO test (a) SELECT 1 FROM numbers(1000); + +ALTER TABLE test ADD COLUMN b Float64 AFTER a, MODIFY ORDER BY (a, b); +ALTER TABLE test MODIFY COLUMN b DEFAULT rand64() % 100000; +ALTER TABLE test MATERIALIZE COLUMN b; -- { serverError CANNOT_UPDATE_COLUMN } +DROP TABLE IF EXISTS test; + + +CREATE TABLE IF NOT EXISTS tab (x UInt32, y UInt32) engine = MergeTree ORDER BY tuple(); +CREATE DICTIONARY IF NOT EXISTS dict (x UInt32, y UInt32) primary key x source(clickhouse(table 'tab')) LAYOUT(FLAT()) LIFETIME(MIN 0 MAX 1000); +INSERT INTO tab VALUES (1, 2), (3, 4); +SYSTEM RELOAD DICTIONARY dict; +CREATE TABLE IF NOT EXISTS tab2 (x UInt32, y UInt32 materialized dictGet(dict, 'y', x)) engine = MergeTree ORDER BY (y); +INSERT INTO tab2 (x) VALUES (1), (3); +TRUNCATE TABLE tab; +INSERT INTO tab VALUES (1, 4), (3, 2); +SYSTEM RELOAD DICTIONARY dict; +SET mutations_sync=2; +ALTER TABLE tab2 materialize column y; -- { serverError CANNOT_UPDATE_COLUMN } +DROP TABLE IF EXISTS tab2; +DROP DICTIONARY IF EXISTS dict; +DROP TABLE IF EXISTS tab; diff --git a/parser/testdata/03263_parquet_write_bloom_filter/ast.json b/parser/testdata/03263_parquet_write_bloom_filter/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03263_parquet_write_bloom_filter/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03263_parquet_write_bloom_filter/metadata.json b/parser/testdata/03263_parquet_write_bloom_filter/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03263_parquet_write_bloom_filter/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03263_parquet_write_bloom_filter/query.sql b/parser/testdata/03263_parquet_write_bloom_filter/query.sql new file mode 100644 index 000000000..16807b8bf --- /dev/null +++ b/parser/testdata/03263_parquet_write_bloom_filter/query.sql @@ -0,0 +1,39 @@ +-- Tags: no-fasttest, no-parallel + +set engine_file_truncate_on_insert=1; +set output_format_parquet_use_custom_encoder = 1; +set output_format_parquet_row_group_size = 100; +set input_format_parquet_filter_push_down = 0; +set input_format_parquet_page_filter_push_down = 0; +set input_format_parquet_bloom_filter_push_down = 1; +set schema_inference_make_columns_nullable = 'auto'; +SET enable_analyzer = 1; -- required for multiple array joins +set max_block_size = 1000000; -- have only one block to make sure rows are split into row groups deterministically +set preferred_block_size_bytes = 1000000000000; + +create table data (n UInt64, s String, l UInt256, d Decimal128(4), lc LowCardinality(String)) engine Memory; +insert into data select number*100, toString(number*10), number*10000000000000000000000000000000000000000::UInt256, number*123.456::Decimal128(4), 'lc'||intDiv(number, 123)*2 from numbers(1000); + +-- Baseline test without bloom filter. +insert into function file(bf_03263.parquet) select * from data settings output_format_parquet_write_bloom_filter=0; +select 'no bf: metadata', rg.num_rows, col.name, col.bloom_filter_bytes from file(bf_03263.parquet, ParquetMetadata) array join row_groups as rg array join rg.columns as col order by rg.file_offset, col.name; +select 'no bf: UInt64 hit', count(), sum(n = 12300 as cond) from file(bf_03263.parquet) where indexHint(cond); +select 'no bf: UInt64 miss', count(), sum(n = 12345 as cond) from file(bf_03263.parquet) where indexHint(cond); + +-- With bloom filter. +insert into function file(bf_03263.parquet) select * from data settings output_format_parquet_write_bloom_filter=1; +select 'bf: metadata', rg.num_rows, col.name, col.bloom_filter_bytes from file(bf_03263.parquet, ParquetMetadata) array join row_groups as rg array join rg.columns as col order by rg.file_offset, col.name; +select 'bf: UInt64 hit', count(), sum(n = 12300 as cond) from file(bf_03263.parquet) where indexHint(cond); +select 'bf: UInt64 miss', count(), sum(n = 12345 as cond) from file(bf_03263.parquet) where indexHint(cond); +select 'bf: String hit', count(), sum(s = '1230' as cond) from file(bf_03263.parquet) where indexHint(cond); +select 'bf: String miss', count(), sum(s = '1234' as cond) from file(bf_03263.parquet) where indexHint(cond); +select 'bf: UInt256 hit', count(), sum(l = 7890000000000000000000000000000000000000000::UInt256 as cond) from file(bf_03263.parquet, Parquet, 'l UInt256') where indexHint(cond); +select 'bf: UInt256 miss', count(), sum(l = 7890000000000000000000000000000000000000001::UInt256 as cond) from file(bf_03263.parquet, Parquet, 'l UInt256') where indexHint(cond); +select 'bf: Decimal128(4) hit', count(), sum(d = 108147.456::Decimal128(4) as cond) from file(bf_03263.parquet) where indexHint(cond); +select 'bf: Decimal128(4) miss', count(), sum(d = 108147.4567::Decimal128(4) as cond) from file(bf_03263.parquet) where indexHint(cond); +select 'bf: LowCardinality(String) hit', count(), sum(lc = 'lc4' as cond) from file(bf_03263.parquet) where indexHint(cond); +select 'bf: LowCardinality(String) miss', count(), sum(lc = 'lc1' as cond) from file(bf_03263.parquet) where indexHint(cond); + +-- Try different output_format_parquet_bloom_filter_bits_per_value, check that size changed. +insert into function file(bf_03263.parquet) select * from data settings output_format_parquet_write_bloom_filter=1, output_format_parquet_bloom_filter_bits_per_value=30; +select 'more bits per value: metadata', rg.num_rows, col.name, col.bloom_filter_bytes from file(bf_03263.parquet, ParquetMetadata) array join row_groups as rg array join rg.columns as col order by rg.file_offset, col.name; diff --git a/parser/testdata/03266_lowcardinality_string_monotonicity/ast.json b/parser/testdata/03266_lowcardinality_string_monotonicity/ast.json new file mode 100644 index 000000000..163e9f1dc --- /dev/null +++ b/parser/testdata/03266_lowcardinality_string_monotonicity/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_lc_pk (children 1)" + }, + { + "explain": " Identifier test_lc_pk" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001247197, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/03266_lowcardinality_string_monotonicity/metadata.json b/parser/testdata/03266_lowcardinality_string_monotonicity/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03266_lowcardinality_string_monotonicity/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03266_lowcardinality_string_monotonicity/query.sql b/parser/testdata/03266_lowcardinality_string_monotonicity/query.sql new file mode 100644 index 000000000..4ec03f1c6 --- /dev/null +++ b/parser/testdata/03266_lowcardinality_string_monotonicity/query.sql @@ -0,0 +1,30 @@ +DROP TABLE IF EXISTS test_lc_pk; +CREATE TABLE test_lc_pk (s String) engine = MergeTree ORDER BY s; + +INSERT INTO test_lc_pk SELECT toString(number) FROM numbers(1e6); + +SELECT trimLeft(explain) +FROM +( + SELECT * + FROM viewExplain('EXPLAIN', 'indexes = 1', ( + SELECT count() + FROM test_lc_pk + WHERE CAST(s, 'LowCardinality(String)') = '42' + )) +) +WHERE explain LIKE '%Condition%'; -- We basically try to verify that we have our column as the key in explain indexes (we don't read all data) + +SELECT trimLeft(explain) +FROM +( + SELECT * + FROM viewExplain('EXPLAIN', 'indexes = 1', ( + SELECT count() + FROM test_lc_pk + WHERE CAST(s, 'String') = '42' + )) +) +WHERE explain LIKE '%Condition%'; + +DROP TABLE test_lc_pk; diff --git a/parser/testdata/03266_with_fill_staleness/ast.json b/parser/testdata/03266_with_fill_staleness/ast.json new file mode 100644 index 000000000..3f41c2856 --- /dev/null +++ b/parser/testdata/03266_with_fill_staleness/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00122458, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03266_with_fill_staleness/metadata.json b/parser/testdata/03266_with_fill_staleness/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03266_with_fill_staleness/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03266_with_fill_staleness/query.sql b/parser/testdata/03266_with_fill_staleness/query.sql new file mode 100644 index 000000000..de47d8287 --- /dev/null +++ b/parser/testdata/03266_with_fill_staleness/query.sql @@ -0,0 +1,34 @@ +SET session_timezone='Europe/Amsterdam'; +SET enable_analyzer=1; + +DROP TABLE IF EXISTS with_fill_staleness; +CREATE TABLE with_fill_staleness (a DateTime, b DateTime, c UInt64) ENGINE = MergeTree ORDER BY a; + +SELECT 'add samples'; + +INSERT INTO with_fill_staleness +SELECT + toDateTime('2016-06-15 23:00:00') + number AS a, a as b, number as c +FROM numbers(30) +WHERE (number % 5) == 0; + +SELECT 'regular with fill'; +SELECT a, c, 'original' as original FROM with_fill_staleness ORDER BY a ASC WITH FILL INTERPOLATE (c); + +SELECT 'staleness 1 seconds'; +SELECT a, c, 'original' as original FROM with_fill_staleness ORDER BY a ASC WITH FILL STALENESS INTERVAL 1 SECOND INTERPOLATE (c); + +SELECT 'staleness 3 seconds'; +SELECT a, c, 'original' as original FROM with_fill_staleness ORDER BY a ASC WITH FILL STALENESS INTERVAL 3 SECOND INTERPOLATE (c); + +SELECT 'descending order'; +SELECT a, c, 'original' as original FROM with_fill_staleness ORDER BY a DESC WITH FILL STALENESS INTERVAL -2 SECOND INTERPOLATE (c); + +SELECT 'staleness with to and step'; +SELECT a, c, 'original' as original FROM with_fill_staleness ORDER BY a ASC WITH FILL TO toDateTime('2016-06-15 23:00:40') STEP 3 STALENESS INTERVAL 7 SECOND INTERPOLATE (c); + +SELECT 'staleness with another regular with fill'; +SELECT a, b, c, 'original' as original FROM with_fill_staleness ORDER BY a ASC WITH FILL STALENESS INTERVAL 2 SECOND, b ASC WITH FILL FROM 0 TO 3 INTERPOLATE (c); + +SELECT 'double staleness'; +SELECT a, b, c, 'original' as original FROM with_fill_staleness ORDER BY a ASC WITH FILL STALENESS INTERVAL 2 SECOND, b ASC WITH FILL TO toDateTime('2016-06-15 23:01:00') STEP 2 STALENESS 5 INTERPOLATE (c); diff --git a/parser/testdata/03266_with_fill_staleness_cases/ast.json b/parser/testdata/03266_with_fill_staleness_cases/ast.json new file mode 100644 index 000000000..c8a3722b6 --- /dev/null +++ b/parser/testdata/03266_with_fill_staleness_cases/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001108665, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03266_with_fill_staleness_cases/metadata.json b/parser/testdata/03266_with_fill_staleness_cases/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03266_with_fill_staleness_cases/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03266_with_fill_staleness_cases/query.sql b/parser/testdata/03266_with_fill_staleness_cases/query.sql new file mode 100644 index 000000000..9e28041c9 --- /dev/null +++ b/parser/testdata/03266_with_fill_staleness_cases/query.sql @@ -0,0 +1,25 @@ +SET enable_analyzer=1; + +DROP TABLE IF EXISTS test; +CREATE TABLE test (a Int64, b Int64, c Int64) Engine=MergeTree ORDER BY a; +INSERT INTO test(a, b, c) VALUES (0, 5, 10), (7, 8, 15), (14, 10, 20); + +SELECT 'test-1'; +SELECT *, 'original' AS orig FROM test ORDER BY a, b WITH FILL TO 20 STEP 2 STALENESS 3, c WITH FILL TO 25 step 3; + +DROP TABLE IF EXISTS test2; +CREATE TABLE test2 (a Int64, b Int64) Engine=MergeTree ORDER BY a; +INSERT INTO test2(a, b) values (1, 0), (1, 4), (1, 8), (1, 12); + +SELECT 'test-2-1'; +SELECT *, 'original' AS orig FROM test2 ORDER BY a, b WITH FILL; + +SELECT 'test-2-2'; +SELECT *, 'original' AS orig FROM test2 ORDER BY a WITH FILL to 20 STALENESS 4, b WITH FILL TO 15 STALENESS 7; + +DROP TABLE IF EXISTS test2; +CREATE TABLE test3 (a Int64, b Int64) Engine=MergeTree ORDER BY a; +INSERT INTO test3(a, b) VALUES (25, 17), (30, 18); + +SELECT 'test-3-1'; +SELECT a, b, 'original' AS orig FROM test3 ORDER BY a WITH FILL TO 33 STEP 3, b WITH FILL FROM -10 STEP 2; diff --git a/parser/testdata/03266_with_fill_staleness_errors/ast.json b/parser/testdata/03266_with_fill_staleness_errors/ast.json new file mode 100644 index 000000000..343c87eb3 --- /dev/null +++ b/parser/testdata/03266_with_fill_staleness_errors/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001251791, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03266_with_fill_staleness_errors/metadata.json b/parser/testdata/03266_with_fill_staleness_errors/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03266_with_fill_staleness_errors/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03266_with_fill_staleness_errors/query.sql b/parser/testdata/03266_with_fill_staleness_errors/query.sql new file mode 100644 index 000000000..fbfaf3743 --- /dev/null +++ b/parser/testdata/03266_with_fill_staleness_errors/query.sql @@ -0,0 +1,5 @@ +SET enable_analyzer=1; + +SELECT 1 AS a, 2 AS b ORDER BY a, b WITH FILL FROM 0 TO 10 STALENESS 3; -- { serverError INVALID_WITH_FILL_EXPRESSION } +SELECT 1 AS a, 2 AS b ORDER BY a, b DESC WITH FILL TO 10 STALENESS 3; -- { serverError INVALID_WITH_FILL_EXPRESSION } +SELECT 1 AS a, 2 AS b ORDER BY a, b ASC WITH FILL TO 10 STALENESS -3; -- { serverError INVALID_WITH_FILL_EXPRESSION } diff --git a/parser/testdata/03267_join_swap_bug/ast.json b/parser/testdata/03267_join_swap_bug/ast.json new file mode 100644 index 000000000..93e72a98a --- /dev/null +++ b/parser/testdata/03267_join_swap_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t0 (children 1)" + }, + { + "explain": " Identifier t0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001318381, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03267_join_swap_bug/metadata.json b/parser/testdata/03267_join_swap_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03267_join_swap_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03267_join_swap_bug/query.sql b/parser/testdata/03267_join_swap_bug/query.sql new file mode 100644 index 000000000..f86d681e5 --- /dev/null +++ b/parser/testdata/03267_join_swap_bug/query.sql @@ -0,0 +1,36 @@ +DROP TABLE IF EXISTS t0; +CREATE TABLE t0 (c0 Int) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO TABLE t0 (c0) VALUES (1); + +SELECT 1 FROM t0 PASTE JOIN (SELECT 1 c0) tx PASTE JOIN t0 t1 GROUP BY tx.c0; +SELECT count() FROM t0 PASTE JOIN (SELECT 1 c0) tx PASTE JOIN t0 t1 GROUP BY tx.c0; + +SELECT 1 FROM t0 FULL JOIN (SELECT 0 AS c0) tx ON t0.c0 = tx.c0 PASTE JOIN (SELECT 0 AS c0, 1 AS c1) ty ORDER BY ty.c0, ty.c1 +SETTINGS query_plan_join_swap_table = 'true'; + +SET enable_analyzer = 1; + +SELECT * +FROM +( + SELECT * + FROM system.one +) AS a +INNER JOIN +( + SELECT * + FROM system.one +) AS b USING (dummy) +INNER JOIN +( + SELECT * + FROM system.one +) AS c USING (dummy) +SETTINGS join_algorithm = 'full_sorting_merge'; + + +SELECT count(1) +FROM ( SELECT 1 AS x, x ) AS t1 +RIGHT JOIN (SELECT materialize(2) AS x) AS t2 +ON t1.x = t2.x +; diff --git a/parser/testdata/03267_materialized_view_keeps_security_context/ast.json b/parser/testdata/03267_materialized_view_keeps_security_context/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03267_materialized_view_keeps_security_context/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03267_materialized_view_keeps_security_context/metadata.json b/parser/testdata/03267_materialized_view_keeps_security_context/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03267_materialized_view_keeps_security_context/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03267_materialized_view_keeps_security_context/query.sql b/parser/testdata/03267_materialized_view_keeps_security_context/query.sql new file mode 100644 index 000000000..bb44e4920 --- /dev/null +++ b/parser/testdata/03267_materialized_view_keeps_security_context/query.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}.rview; +DROP TABLE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}.wview; + +-- Read from view +CREATE MATERIALIZED VIEW rview ENGINE = File(CSV) POPULATE AS SELECT 1 AS c0; +SELECT 1 FROM rview; + +-- Write through view populate +CREATE MATERIALIZED VIEW wview ENGINE = Join(ALL, INNER, c0) POPULATE AS SELECT 1 AS c0; diff --git a/parser/testdata/03268_empty_tuple_update/ast.json b/parser/testdata/03268_empty_tuple_update/ast.json new file mode 100644 index 000000000..ead5bd37b --- /dev/null +++ b/parser/testdata/03268_empty_tuple_update/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t0 (children 1)" + }, + { + "explain": " Identifier t0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001638541, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03268_empty_tuple_update/metadata.json b/parser/testdata/03268_empty_tuple_update/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03268_empty_tuple_update/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03268_empty_tuple_update/query.sql b/parser/testdata/03268_empty_tuple_update/query.sql new file mode 100644 index 000000000..038c61343 --- /dev/null +++ b/parser/testdata/03268_empty_tuple_update/query.sql @@ -0,0 +1,11 @@ +DROP TABLE IF EXISTS t0; + +CREATE TABLE t0 (c0 Tuple(), c1 int) ENGINE = Memory(); + +INSERT INTO t0 VALUES ((), 1); + +ALTER TABLE t0 UPDATE c0 = (), c1 = 2 WHERE EXISTS (SELECT 1) SETTINGS mutations_sync=2; + +SELECT * FROM t0; + +DROP TABLE t0; diff --git a/parser/testdata/03268_nested_analyzer/ast.json b/parser/testdata/03268_nested_analyzer/ast.json new file mode 100644 index 000000000..f909c8041 --- /dev/null +++ b/parser/testdata/03268_nested_analyzer/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001537015, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03268_nested_analyzer/metadata.json b/parser/testdata/03268_nested_analyzer/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03268_nested_analyzer/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03268_nested_analyzer/query.sql b/parser/testdata/03268_nested_analyzer/query.sql new file mode 100644 index 000000000..5aceccc60 --- /dev/null +++ b/parser/testdata/03268_nested_analyzer/query.sql @@ -0,0 +1,32 @@ +set enable_analyzer=1; +-- {echoOn } + +SELECT nested(['a', 'b'], [1, 2], [3, 4]); +SELECT nested(['a', 'b'], [1, 2], materialize([3, 4])); +SELECT nested(['a', 'b'], materialize([1, 2]), materialize([3, 4])); + +SELECT nested([['a', 'b']], [[1, 2], [3]], [[4, 5], [6]]); +SELECT nested([['a'], ['b']], [[1, 2], [3]], [[4, 5], [6]]); -- {serverError BAD_ARGUMENTS} + +select x, y, z, nested(['a', 'b', 'c'], x, y, z), nested([['a', 'b', 'c']], x, y, z) from system.one array join [[[1, 2], [3]], [[4], [5, 6]]] as x, [[[7, 8], [9]], [[10], [11, 12]]] as y, [[[13, 14], [15]], [[16], [17, 18]]] as z format Pretty; +select nested([[['a', 'b', 'c']]], [[[1, 2], [3]], [[4], [5, 6]]] as x, [[[7, 8], [9]], [[10], [11, 12]]] as y, [[[13, 14], [15]], [[16], [17, 18]]] as z) format Pretty; + +SELECT nested(['a', 'b'], [[1, 2], [3, 4]], [[5], [6]]); +SELECT nested([['a', 'b']], [[1, 2], [3, 4]], [[5], [6]]); -- {serverError SIZES_OF_ARRAYS_DONT_MATCH} + +-- {echoOff} + +DROP TABLE IF EXISTS test; +CREATE TABLE test +( + x UInt8, + “struct.x” DEFAULT [0], + “struct.y” ALIAS [1], +) +ENGINE = Memory; + +insert into test (x) values (0); +select * from test array join struct; +select x, struct.x, struct.y from test array join struct; + +DROP TABLE test; diff --git a/parser/testdata/03268_system_parts_index_granularity/ast.json b/parser/testdata/03268_system_parts_index_granularity/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03268_system_parts_index_granularity/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03268_system_parts_index_granularity/metadata.json b/parser/testdata/03268_system_parts_index_granularity/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03268_system_parts_index_granularity/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03268_system_parts_index_granularity/query.sql b/parser/testdata/03268_system_parts_index_granularity/query.sql new file mode 100644 index 000000000..3df9f6be0 --- /dev/null +++ b/parser/testdata/03268_system_parts_index_granularity/query.sql @@ -0,0 +1,21 @@ +-- Tags: no-random-settings, no-random-merge-tree-settings +DROP TABLE IF EXISTS t; + +CREATE TABLE t ( + key UInt64, + value String +) +ENGINE MergeTree() +ORDER by key SETTINGS index_granularity = 10, index_granularity_bytes = '1024K'; + +ALTER TABLE t MODIFY SETTING enable_index_granularity_compression = 0; + +INSERT INTO t SELECT number, toString(number) FROM numbers(100); + +ALTER TABLE t MODIFY SETTING enable_index_granularity_compression = 1; + +INSERT INTO t SELECT number, toString(number) FROM numbers(100); + +SELECT index_granularity_bytes_in_memory, index_granularity_bytes_in_memory_allocated FROM system.parts where table = 't' and database = currentDatabase() ORDER BY name; + +DROP TABLE IF EXISTS t; diff --git a/parser/testdata/03268_vertical_pretty_numbers/ast.json b/parser/testdata/03268_vertical_pretty_numbers/ast.json new file mode 100644 index 000000000..ddd95ad5c --- /dev/null +++ b/parser/testdata/03268_vertical_pretty_numbers/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001448304, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03268_vertical_pretty_numbers/metadata.json b/parser/testdata/03268_vertical_pretty_numbers/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03268_vertical_pretty_numbers/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03268_vertical_pretty_numbers/query.sql b/parser/testdata/03268_vertical_pretty_numbers/query.sql new file mode 100644 index 000000000..0462134ed --- /dev/null +++ b/parser/testdata/03268_vertical_pretty_numbers/query.sql @@ -0,0 +1,11 @@ +SET output_format_pretty_color = 1, output_format_pretty_highlight_digit_groups = 1, output_format_pretty_single_large_number_tip_threshold = 1; +SELECT exp2(number), exp10(number), 'test'||number FROM numbers(64) FORMAT Vertical; + +SET output_format_pretty_color = 0, output_format_pretty_highlight_digit_groups = 1, output_format_pretty_single_large_number_tip_threshold = 1; +SELECT exp2(number), exp10(number), 'test'||number FROM numbers(64) FORMAT Vertical; + +SET output_format_pretty_color = 1, output_format_pretty_highlight_digit_groups = 0, output_format_pretty_single_large_number_tip_threshold = 1; +SELECT exp2(number), exp10(number), 'test'||number FROM numbers(64) FORMAT Vertical; + +SET output_format_pretty_color = 0, output_format_pretty_highlight_digit_groups = 0, output_format_pretty_single_large_number_tip_threshold = 0; +SELECT exp2(number), exp10(number), 'test'||number FROM numbers(64) FORMAT Vertical; diff --git a/parser/testdata/03269_bf16/ast.json b/parser/testdata/03269_bf16/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03269_bf16/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03269_bf16/metadata.json b/parser/testdata/03269_bf16/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03269_bf16/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03269_bf16/query.sql b/parser/testdata/03269_bf16/query.sql new file mode 100644 index 000000000..a364c00e6 --- /dev/null +++ b/parser/testdata/03269_bf16/query.sql @@ -0,0 +1,98 @@ +-- This is a smoke test, non exhaustive. + +-- Conversions + +SELECT + 1::BFloat16, + -1::BFloat16, + 1.1::BFloat16, + -1.1::BFloat16, + CAST(1 AS BFloat16), + CAST(-1 AS BFloat16), + CAST(1.1 AS BFloat16), + CAST(-1.1 AS BFloat16), + CAST(0xFFFFFFFFFFFFFFFF AS BFloat16), + CAST(-0.0 AS BFloat16), + CAST(inf AS BFloat16), + CAST(-inf AS BFloat16), + CAST(nan AS BFloat16); + +-- Conversions back + +SELECT + CAST(1.1::BFloat16 AS BFloat16), + CAST(1.1::BFloat16 AS Float32), + CAST(1.1::BFloat16 AS Float64), + CAST(1.1::BFloat16 AS Int8); + +-- Comparisons + +SELECT + 1.1::BFloat16 = 1.1::BFloat16, + 1.1::BFloat16 < 1.1, + 1.1::BFloat16 > 1.1, + 1.1::BFloat16 > 1, + 1.1::BFloat16 = 1.09375; + +-- Arithmetic + +SELECT + 1.1::BFloat16 - 1.1::BFloat16 AS a, + 1.1::BFloat16 + 1.1::BFloat16 AS b, + 1.1::BFloat16 * 1.1::BFloat16 AS c, + 1.1::BFloat16 / 1.1::BFloat16 AS d, + toTypeName(a), toTypeName(b), toTypeName(c), toTypeName(d); + +SELECT + 1.1::BFloat16 - 1.1 AS a, + 1.1 + 1.1::BFloat16 AS b, + 1.1::BFloat16 * 1.1 AS c, + 1.1 / 1.1::BFloat16 AS d, + toTypeName(a), toTypeName(b), toTypeName(c), toTypeName(d); + +-- Tables + +DROP TABLE IF EXISTS t; +CREATE TEMPORARY TABLE t (n UInt64, x BFloat16); +INSERT INTO t SELECT number, number FROM numbers(10000); +SELECT *, n = x, n - x FROM t WHERE n % 1000 = 0 ORDER BY n; + +-- Aggregate functions + +SELECT sum(n), sum(x), avg(n), avg(x), min(n), min(x), max(n), max(x), uniq(n), uniq(x), uniqExact(n), uniqExact(x) FROM t; + +-- MergeTree + +DROP TABLE t; +CREATE TABLE t (n UInt64, x BFloat16) ENGINE = MergeTree ORDER BY n; +INSERT INTO t SELECT number, number FROM numbers(10000); +SELECT *, n = x, n - x FROM t WHERE n % 1000 = 0 ORDER BY n; +SELECT sum(n), sum(x), avg(n), avg(x), min(n), min(x), max(n), max(x), uniq(n), uniq(x), uniqExact(n), uniqExact(x) FROM t; + +-- Distances + +WITH + arrayMap(x -> toFloat32(x) / 2, range(384)) AS a32, + arrayMap(x -> toBFloat16(x) / 2, range(384)) AS a16, + arrayMap(x -> x + 1, a32) AS a32_1, + arrayMap(x -> x + 1, a16) AS a16_1 +SELECT a32, a16, a32_1, a16_1, + dotProduct(a32, a32_1), dotProduct(a16, a16_1), + cosineDistance(a32, a32_1), cosineDistance(a16, a16_1), + L2Distance(a32, a32_1), L2Distance(a16, a16_1), + L1Distance(a32, a32_1), L1Distance(a16, a16_1), + LinfDistance(a32, a32_1), LinfDistance(a16, a16_1), + LpDistance(a32, a32_1, 5), LpDistance(a16, a16_1, 5) +FORMAT Vertical; + +-- Introspection + +SELECT 1.1::BFloat16 AS x, + hex(x), bin(x), + byteSize(x), + reinterpretAsUInt16(x), hex(reinterpretAsString(x)); + +-- Rounding (this could be not towards the nearest) + +SELECT 1.1::BFloat16 AS x, + round(x), round(x, 1), round(x, 2), round(x, -1); diff --git a/parser/testdata/03269_partition_key_not_in_set/ast.json b/parser/testdata/03269_partition_key_not_in_set/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03269_partition_key_not_in_set/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03269_partition_key_not_in_set/metadata.json b/parser/testdata/03269_partition_key_not_in_set/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03269_partition_key_not_in_set/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03269_partition_key_not_in_set/query.sql b/parser/testdata/03269_partition_key_not_in_set/query.sql new file mode 100644 index 000000000..562521fb7 --- /dev/null +++ b/parser/testdata/03269_partition_key_not_in_set/query.sql @@ -0,0 +1,81 @@ +-- Related to https://github.com/ClickHouse/ClickHouse/issues/69829 +-- +-- The main goal of the test is to assert that constant transformation +-- for set constant while partition pruning won't be performed +-- if it's not allowed (NOT IN operator case) + +DROP TABLE IF EXISTS 03269_filters; +CREATE TABLE 03269_filters ( + id Int32, + dt Date +) +engine = MergeTree +order by id; + +INSERT INTO 03269_filters +SELECT 6, '2020-01-01' +UNION ALL +SELECT 38, '2021-01-01'; + +SELECT '-- Monotonic function in partition key'; + +DROP TABLE IF EXISTS 03269_single_monotonic; +CREATE TABLE 03269_single_monotonic( + id Int32 +) +ENGINE = MergeTree +PARTITION BY intDiv(id, 10) +ORDER BY id; + +INSERT INTO 03269_single_monotonic SELECT number FROM numbers(50); + +SELECT count() FROM 03269_single_monotonic WHERE id NOT IN (6, 38); +SELECT count() FROM 03269_single_monotonic WHERE id NOT IN ( + SELECT id FROM 03269_filters +); + +DROP TABLE 03269_single_monotonic; + +SELECT '-- Non-monotonic function in partition key'; + +DROP TABLE IF EXISTS 03269_single_non_monotonic; +CREATE TABLE 03269_single_non_monotonic ( + id Int32 +) +ENGINE = MergeTree +PARTITION BY id % 10 +ORDER BY id; + +INSERT INTO 03269_single_non_monotonic SELECT number FROM numbers(50); + +SELECT count() FROM 03269_single_non_monotonic WHERE id NOT IN (6, 38); +SELECT count() FROM 03269_single_non_monotonic WHERE id NOT IN (SELECT id FROM 03269_filters); + +DROP TABLE 03269_single_non_monotonic; + +SELECT '-- Multiple partition columns'; + +DROP TABLE IF EXISTS 03269_multiple_part_cols; +CREATE TABLE 03269_multiple_part_cols ( + id Int32, + dt Date, +) +ENGINE = MergeTree +PARTITION BY (dt, intDiv(id, 10)) +ORDER BY id; + +INSERT INTO 03269_multiple_part_cols +SELECT number, '2020-01-01' FROM numbers(50) +UNION ALL +SELECT number, '2021-01-01' FROM numbers(50); + +SELECT count() FROM 03269_multiple_part_cols WHERE dt NOT IN ('2020-01-01'); +SELECT count() FROM 03269_multiple_part_cols WHERE dt NOT IN (SELECT dt FROM 03269_filters WHERE dt < '2021-01-01'); + +SELECT count() FROM 03269_multiple_part_cols WHERE id NOT IN (6, 38); +SELECT count() FROM 03269_multiple_part_cols WHERE id NOT IN (SELECT id FROM 03269_filters); + +SELECT count() FROM 03269_multiple_part_cols WHERE (id, dt) NOT IN ((6, '2020-01-01'), (38, '2021-01-01')); +SELECT count() FROM 03269_multiple_part_cols WHERE (id, dt) NOT IN (SELECT id, dt FROM 03269_filters); + +DROP TABLE 03269_multiple_part_cols; diff --git a/parser/testdata/03270_empty_tuple_in_array_intersect/ast.json b/parser/testdata/03270_empty_tuple_in_array_intersect/ast.json new file mode 100644 index 000000000..eec95354b --- /dev/null +++ b/parser/testdata/03270_empty_tuple_in_array_intersect/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayIntersect (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001490951, + "rows_read": 10, + "bytes_read": 394 + } +} diff --git a/parser/testdata/03270_empty_tuple_in_array_intersect/metadata.json b/parser/testdata/03270_empty_tuple_in_array_intersect/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03270_empty_tuple_in_array_intersect/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03270_empty_tuple_in_array_intersect/query.sql b/parser/testdata/03270_empty_tuple_in_array_intersect/query.sql new file mode 100644 index 000000000..7f876d7e6 --- /dev/null +++ b/parser/testdata/03270_empty_tuple_in_array_intersect/query.sql @@ -0,0 +1 @@ +SELECT arrayIntersect([tuple()]); diff --git a/parser/testdata/03270_fix_column_modifier_write_order/ast.json b/parser/testdata/03270_fix_column_modifier_write_order/ast.json new file mode 100644 index 000000000..ac8f5efac --- /dev/null +++ b/parser/testdata/03270_fix_column_modifier_write_order/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001594828, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03270_fix_column_modifier_write_order/metadata.json b/parser/testdata/03270_fix_column_modifier_write_order/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03270_fix_column_modifier_write_order/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03270_fix_column_modifier_write_order/query.sql b/parser/testdata/03270_fix_column_modifier_write_order/query.sql new file mode 100644 index 000000000..cf3683df8 --- /dev/null +++ b/parser/testdata/03270_fix_column_modifier_write_order/query.sql @@ -0,0 +1,5 @@ +drop table if exists t1; +SET allow_experimental_statistics = 1; +create table t1 (d Datetime, c0 Int TTL d + INTERVAL 1 MONTH SETTINGS (max_compress_block_size = 1), c2 Int STATISTICS(Uniq) SETTINGS (max_compress_block_size = 1)) Engine = MergeTree() ORDER BY (); +insert into t1 values ('2024-11-15 18:30:00', 25, 20); + diff --git a/parser/testdata/03270_max_bytes_ratio_before_external_group_by/ast.json b/parser/testdata/03270_max_bytes_ratio_before_external_group_by/ast.json new file mode 100644 index 000000000..b8146f74c --- /dev/null +++ b/parser/testdata/03270_max_bytes_ratio_before_external_group_by/ast.json @@ -0,0 +1,100 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function uniqExact (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal 'String'" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Float64_10000000" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_100" + }, + { + "explain": " Literal 'String'" + }, + { + "explain": " Identifier Null" + }, + { + "explain": " Set" + } + ], + + "rows": 26, + + "statistics": + { + "elapsed": 0.001431394, + "rows_read": 26, + "bytes_read": 972 + } +} diff --git a/parser/testdata/03270_max_bytes_ratio_before_external_group_by/metadata.json b/parser/testdata/03270_max_bytes_ratio_before_external_group_by/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03270_max_bytes_ratio_before_external_group_by/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03270_max_bytes_ratio_before_external_group_by/query.sql b/parser/testdata/03270_max_bytes_ratio_before_external_group_by/query.sql new file mode 100644 index 000000000..48865e447 --- /dev/null +++ b/parser/testdata/03270_max_bytes_ratio_before_external_group_by/query.sql @@ -0,0 +1,2 @@ +SELECT uniqExact(number::String) FROM numbers(10e6) GROUP BY (number%100)::String FORMAT Null SETTINGS max_bytes_ratio_before_external_group_by=-0.1; -- { serverError BAD_ARGUMENTS } +SELECT uniqExact(number::String) FROM numbers(10e6) GROUP BY (number%100)::String FORMAT Null SETTINGS max_bytes_ratio_before_external_group_by=1; -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/03271_date_to_datetime_saturation/ast.json b/parser/testdata/03271_date_to_datetime_saturation/ast.json new file mode 100644 index 000000000..909dd7fb3 --- /dev/null +++ b/parser/testdata/03271_date_to_datetime_saturation/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001364406, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/03271_date_to_datetime_saturation/metadata.json b/parser/testdata/03271_date_to_datetime_saturation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03271_date_to_datetime_saturation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03271_date_to_datetime_saturation/query.sql b/parser/testdata/03271_date_to_datetime_saturation/query.sql new file mode 100644 index 000000000..812033490 --- /dev/null +++ b/parser/testdata/03271_date_to_datetime_saturation/query.sql @@ -0,0 +1,106 @@ +drop table if exists test; + +create table test (stamp Date) engine MergeTree order by stamp; + +insert into test select '2024-10-30' from numbers(100); +insert into test select '2024-11-19' from numbers(100); +insert into test select '2149-06-06' from numbers(100); + +optimize table test final; + +-- { echoOn } +-- implicit toDateTime (always saturate) +select count() from test where stamp >= parseDateTimeBestEffort('2024-11-01'); + +select count() from test where toDateTime(stamp) >= parseDateTimeBestEffort('2024-11-01') settings date_time_overflow_behavior = 'saturate'; +select count() from test where toDateTime(stamp) >= parseDateTimeBestEffort('2024-11-01') settings date_time_overflow_behavior = 'ignore'; +select count() from test where toDateTime(stamp) >= parseDateTimeBestEffort('2024-11-01') settings date_time_overflow_behavior = 'throw'; -- { serverError VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE } + +drop table test; + +create table test (stamp Date) engine MergeTree order by stamp settings index_granularity = 20; + +insert into test select number from numbers(65536); + +set session_timezone = 'UTC'; -- The following tests are timezone sensitive +set optimize_use_implicit_projections = 0; + +-- Boundary at UNIX epoch +SELECT count() FROM test WHERE stamp >= toDateTime(0) SETTINGS force_primary_key = 1; +SELECT count() FROM test WHERE identity(stamp) >= toDateTime(0); + +-- Arbitrary DateTime +SELECT count() FROM test WHERE stamp >= toDateTime('2024-10-24 21:30:00') SETTINGS force_primary_key = 1; +SELECT count() FROM test WHERE identity(stamp) >= toDateTime('2024-10-24 21:30:00'); + +-- Extreme value beyond supported range +SELECT count() FROM test WHERE stamp >= toDateTime(4294967295) SETTINGS force_primary_key = 1; +SELECT count() FROM test WHERE identity(stamp) >= toDateTime(4294967295); + +-- Negative timestamp +SELECT count() FROM test WHERE stamp >= toDateTime(-1) SETTINGS force_primary_key = 1; +SELECT count() FROM test WHERE identity(stamp) >= toDateTime(-1); + +-- Pre-Gregorian date +SELECT count() FROM test WHERE stamp >= toDateTime('1000-01-01 00:00:00') SETTINGS force_primary_key = 1; +SELECT count() FROM test WHERE identity(stamp) >= toDateTime('1000-01-01 00:00:00'); + +-- UNIX epoch +SELECT count() FROM test WHERE stamp >= toDateTime('1970-01-01 00:00:00') SETTINGS force_primary_key = 1; +SELECT count() FROM test WHERE identity(stamp) >= toDateTime('1970-01-01 00:00:00'); + +-- Modern date within supported range +SELECT count() FROM test WHERE stamp >= toDateTime('2023-01-01 00:00:00') SETTINGS force_primary_key = 1; +SELECT count() FROM test WHERE identity(stamp) >= toDateTime('2023-01-01 00:00:00'); + +-- Far future but still valid +SELECT count() FROM test WHERE stamp >= toDateTime('2100-12-31 23:59:59') SETTINGS force_primary_key = 1; +SELECT count() FROM test WHERE identity(stamp) >= toDateTime('2100-12-31 23:59:59'); + +-- Maximum 32-bit timestamp +SELECT count() FROM test WHERE stamp >= toDateTime(2147483647) SETTINGS force_primary_key = 1; +SELECT count() FROM test WHERE identity(stamp) >= toDateTime(2147483647); + +-- Maximum 32-bit unsigned overflow +SELECT count() FROM test WHERE stamp >= toDateTime(4294967295) SETTINGS force_primary_key = 1; +SELECT count() FROM test WHERE identity(stamp) >= toDateTime(4294967295); + +-- Minimum Date boundary +SELECT count() FROM test WHERE stamp >= toDate('0000-01-01') SETTINGS force_primary_key = 1; +SELECT count() FROM test WHERE identity(stamp) >= toDate('0000-01-01'); + +-- Maximum Date boundary +SELECT count() FROM test WHERE stamp >= toDate('9999-12-31') SETTINGS force_primary_key = 1; +SELECT count() FROM test WHERE identity(stamp) >= toDate('9999-12-31'); + +-- Convert stamp to Date +SELECT count() FROM test WHERE toDate(stamp) >= toDateTime(0) SETTINGS force_primary_key = 1; +SELECT count() FROM test WHERE toDate(identity(stamp)) >= toDateTime(0); + +-- Convert stamp to DateTime (This will overflow and should not use primary key) +SELECT count() FROM test WHERE toDateTime(stamp) >= toDateTime(0) SETTINGS force_primary_key = 1; -- { serverError INDEX_NOT_USED } +SELECT count() FROM test WHERE toDateTime(identity(stamp)) >= toDateTime(0); + +-- Exact Date match +SELECT count() FROM test WHERE stamp = toDate('2023-01-01') SETTINGS force_primary_key = 1; +SELECT count() FROM test WHERE identity(stamp) = toDate('2023-01-01'); + +-- Exact DateTime match +SELECT count() FROM test WHERE stamp = toDateTime('2023-01-01 00:00:00') SETTINGS force_primary_key = 1; +SELECT count() FROM test WHERE identity(stamp) = toDateTime('2023-01-01 00:00:00'); + +-- Invalid DateTime (negative) +SELECT count() FROM test WHERE stamp < toDateTime(-1) SETTINGS force_primary_key = 1; +SELECT count() FROM test WHERE identity(stamp) < toDateTime(-1); + +-- Extremely large DateTime +SELECT count() FROM test WHERE stamp > toDateTime(9999999999) SETTINGS force_primary_key = 1; +SELECT count() FROM test WHERE identity(stamp) > toDateTime(9999999999); + +-- NULL DateTime +SELECT count() FROM test WHERE stamp >= toDateTime(NULL) SETTINGS force_primary_key = 1; +SELECT count() FROM test WHERE identity(stamp) >= toDateTime(NULL); + +-- NULL Date +SELECT count() FROM test WHERE stamp <= toDate(NULL) SETTINGS force_primary_key = 1; +SELECT count() FROM test WHERE identity(stamp) <= toDate(NULL); diff --git a/parser/testdata/03271_decimal_monotonic_day_of_week/ast.json b/parser/testdata/03271_decimal_monotonic_day_of_week/ast.json new file mode 100644 index 000000000..0525dbcd0 --- /dev/null +++ b/parser/testdata/03271_decimal_monotonic_day_of_week/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery decimal_dt (children 1)" + }, + { + "explain": " Identifier decimal_dt" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001238685, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/03271_decimal_monotonic_day_of_week/metadata.json b/parser/testdata/03271_decimal_monotonic_day_of_week/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03271_decimal_monotonic_day_of_week/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03271_decimal_monotonic_day_of_week/query.sql b/parser/testdata/03271_decimal_monotonic_day_of_week/query.sql new file mode 100644 index 000000000..66e085c50 --- /dev/null +++ b/parser/testdata/03271_decimal_monotonic_day_of_week/query.sql @@ -0,0 +1,7 @@ +DROP TABLE IF EXISTS decimal_dt; + +CREATE TABLE decimal_dt (timestamp DateTime64(9)) ENGINE=MergeTree() ORDER BY timestamp; +INSERT INTO decimal_dt VALUES (toDate('2024-11-11')),(toDate('2024-11-12')),(toDate('2024-11-13')),(toDate('2024-11-14')),(toDate('2024-11-15')),(toDate('2024-11-16')),(toDate('2024-11-17')); +SELECT count() FROM decimal_dt WHERE toDayOfWeek(timestamp) > 3; + +DROP TABLE IF EXISTS decimal_dt; diff --git a/parser/testdata/03271_dynamic_variant_in_min_max/ast.json b/parser/testdata/03271_dynamic_variant_in_min_max/ast.json new file mode 100644 index 000000000..67edb6980 --- /dev/null +++ b/parser/testdata/03271_dynamic_variant_in_min_max/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001434334, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03271_dynamic_variant_in_min_max/metadata.json b/parser/testdata/03271_dynamic_variant_in_min_max/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03271_dynamic_variant_in_min_max/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03271_dynamic_variant_in_min_max/query.sql b/parser/testdata/03271_dynamic_variant_in_min_max/query.sql new file mode 100644 index 000000000..a7fea9415 --- /dev/null +++ b/parser/testdata/03271_dynamic_variant_in_min_max/query.sql @@ -0,0 +1,18 @@ +set allow_experimental_dynamic_type=1; +select max(number::Dynamic) from numbers(10); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +select min(number::Dynamic) from numbers(10); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +select argMax(number, number::Dynamic) from numbers(10); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +select argMin(number, number::Dynamic) from numbers(10); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +select anyArgMax(number, number::Dynamic) from numbers(10); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +select anyArgMin(number, number::Dynamic) from numbers(10); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +create table test (d Dynamic, index idx d type minmax); -- {serverError BAD_ARGUMENTS} + +set allow_experimental_variant_type=1; +select max(number::Variant(UInt64)) from numbers(10); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +select min(number::Variant(UInt64)) from numbers(10); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +select argMax(number, number::Variant(UInt64)) from numbers(10); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +select argMin(number, number::Variant(UInt64)) from numbers(10); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +select anyArgMax(number, number::Variant(UInt64)) from numbers(10); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +select anyArgMin(number, number::Variant(UInt64)) from numbers(10); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +create table test (d Variant(UInt64), index idx d type minmax); -- {serverError BAD_ARGUMENTS} + diff --git a/parser/testdata/03271_max_bytes_ratio_before_external_order_by/ast.json b/parser/testdata/03271_max_bytes_ratio_before_external_order_by/ast.json new file mode 100644 index 000000000..5c239b43e --- /dev/null +++ b/parser/testdata/03271_max_bytes_ratio_before_external_order_by/ast.json @@ -0,0 +1,70 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Float64_100000000" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Identifier Null" + }, + { + "explain": " Set" + } + ], + + "rows": 16, + + "statistics": + { + "elapsed": 0.001062294, + "rows_read": 16, + "bytes_read": 582 + } +} diff --git a/parser/testdata/03271_max_bytes_ratio_before_external_order_by/metadata.json b/parser/testdata/03271_max_bytes_ratio_before_external_order_by/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03271_max_bytes_ratio_before_external_order_by/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03271_max_bytes_ratio_before_external_order_by/query.sql b/parser/testdata/03271_max_bytes_ratio_before_external_order_by/query.sql new file mode 100644 index 000000000..9f734a5a2 --- /dev/null +++ b/parser/testdata/03271_max_bytes_ratio_before_external_order_by/query.sql @@ -0,0 +1,2 @@ +select number from numbers(100e6) order by number format Null settings max_bytes_ratio_before_external_sort=-0.1; -- { serverError BAD_ARGUMENTS } +select number from numbers(100e6) order by number format Null settings max_bytes_ratio_before_external_sort=1; -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/03271_s3_table_function_asterisk_glob/ast.json b/parser/testdata/03271_s3_table_function_asterisk_glob/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03271_s3_table_function_asterisk_glob/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03271_s3_table_function_asterisk_glob/metadata.json b/parser/testdata/03271_s3_table_function_asterisk_glob/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03271_s3_table_function_asterisk_glob/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03271_s3_table_function_asterisk_glob/query.sql b/parser/testdata/03271_s3_table_function_asterisk_glob/query.sql new file mode 100644 index 000000000..d3dba883f --- /dev/null +++ b/parser/testdata/03271_s3_table_function_asterisk_glob/query.sql @@ -0,0 +1,28 @@ +-- Tags: no-parallel, no-fasttest +-- Tag no-fasttest: Depends on AWS + +SET s3_truncate_on_insert = 1; +SET s3_skip_empty_files = 0; + +INSERT INTO FUNCTION s3(s3_conn, filename='dir1/03271_s3_table_function_asterisk_glob/', format=Parquet) SELECT 0 as num; +INSERT INTO FUNCTION s3(s3_conn, filename='dir1/03271_s3_table_function_asterisk_glob/file1', format=Parquet) SELECT 1 as num; +INSERT INTO FUNCTION s3(s3_conn, filename='dir1/03271_s3_table_function_asterisk_glob/file2', format=Parquet) SELECT 2 as num; +INSERT INTO FUNCTION s3(s3_conn, filename='dir1/03271_s3_table_function_asterisk_glob/file3', format=Parquet) SELECT 3 as num; + +SELECT * FROM s3(s3_conn, filename='dir1/03271_s3_table_function_asterisk_glob/*') ORDER BY ALL SETTINGS max_threads = 1; +SELECT * FROM s3(s3_conn, filename='dir1/03271_s3_table_function_asterisk_glob/*') ORDER BY ALL SETTINGS max_threads = 4; + +SELECT * FROM s3Cluster('test_cluster_two_shards_localhost', s3_conn, filename='dir1/03271_s3_table_function_asterisk_glob/*') ORDER BY ALL SETTINGS max_threads = 1; +SELECT * FROM s3Cluster('test_cluster_two_shards_localhost', s3_conn, filename='dir1/03271_s3_table_function_asterisk_glob/*') ORDER BY ALL SETTINGS max_threads = 4; + +-- Empty "directory" files created implicitly by S3 console: +-- https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-folders.html +SELECT * +FROM s3('https://clickhouse-public-datasets.s3.amazonaws.com/wikistat/original/*', NOSIGN) +LIMIT 1 +FORMAT Null; + +SELECT * +FROM s3Cluster('test_cluster_two_shards_localhost', 'https://clickhouse-public-datasets.s3.amazonaws.com/wikistat/original/*', NOSIGN) +LIMIT 1 +Format Null; diff --git a/parser/testdata/03271_sqllancer_having_issue/ast.json b/parser/testdata/03271_sqllancer_having_issue/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03271_sqllancer_having_issue/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03271_sqllancer_having_issue/metadata.json b/parser/testdata/03271_sqllancer_having_issue/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03271_sqllancer_having_issue/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03271_sqllancer_having_issue/query.sql b/parser/testdata/03271_sqllancer_having_issue/query.sql new file mode 100644 index 000000000..aa9659693 --- /dev/null +++ b/parser/testdata/03271_sqllancer_having_issue/query.sql @@ -0,0 +1,10 @@ +-- https://s3.amazonaws.com/clickhouse-test-reports/0/a02b20a9813c6ba0880c67f079363ef1c5440109/sqlancer__debug_.html +-- Caused by enablement of query_plan_merge_filters. Will fail if the next line is uncommented +-- set query_plan_merge_filters=1; + +CREATE TABLE IF NOT EXISTS t3 (c0 Int32) ENGINE = Memory() ; +INSERT INTO t3(c0) VALUES (1110866669); + +-- These 2 queries are expected to return the same +SELECT (tan (t3.c0)), SUM(-1017248723), ((t3.c0)%(t3.c0)) FROM t3 GROUP BY t3.c0 SETTINGS aggregate_functions_null_for_empty=1, enable_optimize_predicate_expression=0; +SELECT (tan (t3.c0)), SUM(-1017248723), ((t3.c0)%(t3.c0)) FROM t3 GROUP BY t3.c0 HAVING ((tan ((- (SUM(-1017248723)))))) and ((sqrt (SUM(-1017248723)))) UNION ALL SELECT (tan (t3.c0)), SUM(-1017248723), ((t3.c0)%(t3.c0)) FROM t3 GROUP BY t3.c0 HAVING (NOT (((tan ((- (SUM(-1017248723)))))) and ((sqrt (SUM(-1017248723)))))) UNION ALL SELECT (tan (t3.c0)), SUM(-1017248723), ((t3.c0)%(t3.c0)) FROM t3 GROUP BY t3.c0 HAVING ((((tan ((- (SUM(-1017248723)))))) and ((sqrt (SUM(-1017248723))))) IS NULL) SETTINGS aggregate_functions_null_for_empty=1, enable_optimize_predicate_expression=0; diff --git a/parser/testdata/03272_arrayAUCPR/ast.json b/parser/testdata/03272_arrayAUCPR/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03272_arrayAUCPR/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03272_arrayAUCPR/metadata.json b/parser/testdata/03272_arrayAUCPR/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03272_arrayAUCPR/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03272_arrayAUCPR/query.sql b/parser/testdata/03272_arrayAUCPR/query.sql new file mode 100644 index 000000000..4d355c208 --- /dev/null +++ b/parser/testdata/03272_arrayAUCPR/query.sql @@ -0,0 +1,76 @@ +-- type correctness tests +select floor(arrayAUCPR([0.1, 0.4, 0.35, 0.8], [0, 0, 1, 1]), 10); +select floor(arrayAUCPR([0.1, 0.4, 0.35, 0.8], cast([0, 0, 1, 1] as Array(Int8))), 10); +select floor(arrayAUCPR([0.1, 0.4, 0.35, 0.8], cast([-1, -1, 1, 1] as Array(Int8))), 10); +select floor(arrayAUCPR([0.1, 0.4, 0.35, 0.8], cast(['false', 'false', 'true', 'true'] as Array(Enum8('false' = 0, 'true' = 1)))), 10); +select floor(arrayAUCPR([0.1, 0.4, 0.35, 0.8], cast(['false', 'false', 'true', 'true'] as Array(Enum8('false' = -1, 'true' = 1)))), 10); +select floor(arrayAUCPR(cast([10, 40, 35, 80] as Array(UInt8)), [0, 0, 1, 1]), 10); +select floor(arrayAUCPR(cast([10, 40, 35, 80] as Array(UInt16)), [0, 0, 1, 1]), 10); +select floor(arrayAUCPR(cast([10, 40, 35, 80] as Array(UInt32)), [0, 0, 1, 1]), 10); +select floor(arrayAUCPR(cast([10, 40, 35, 80] as Array(UInt64)), [0, 0, 1, 1]), 10); +select floor(arrayAUCPR(cast([-10, -40, -35, -80] as Array(Int8)), [0, 0, 1, 1]), 10); +select floor(arrayAUCPR(cast([-10, -40, -35, -80] as Array(Int16)), [0, 0, 1, 1]), 10); +select floor(arrayAUCPR(cast([-10, -40, -35, -80] as Array(Int32)), [0, 0, 1, 1]), 10); +select floor(arrayAUCPR(cast([-10, -40, -35, -80] as Array(Int64)), [0, 0, 1, 1]), 10); +select floor(arrayAUCPR(cast([-0.1, -0.4, -0.35, -0.8] as Array(Float32)) , [0, 0, 1, 1]), 10); + +-- output value correctness test +select floor(arrayAUCPR([0.1, 0.4, 0.35, 0.8], [0, 0, 1, 1]), 10); +select floor(arrayAUCPR([0.1, 0.4, 0.4, 0.35, 0.8], [0, 0, 1, 1, 1]), 10); +select floor(arrayAUCPR([0.1, 0.35, 0.4, 0.8], [1, 0, 1, 0]), 10); +select floor(arrayAUCPR([0.1, 0.35, 0.4, 0.4, 0.8], [1, 0, 1, 0, 0]), 10); +select floor(arrayAUCPR([0, 3, 5, 6, 7.5, 8], [1, 0, 1, 0, 0, 0]), 10); +select floor(arrayAUCPR([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 0, 1, 0, 0, 0, 1, 0, 0, 1]), 10); +select floor(arrayAUCPR([0, 1, 1, 2, 2, 2, 3, 3, 3, 3], [1, 0, 1, 0, 0, 0, 1, 0, 0, 1]), 10); + +-- output shouldn't change when passing [0, 0, 0] to the offsets arg +select floor(arrayAUCPR([0.1, 0.4, 0.35, 0.8], [0, 0, 1, 1], [0, 0, 0]), 10); +select floor(arrayAUCPR([0.1, 0.4, 0.4, 0.35, 0.8], [0, 0, 1, 1, 1], [0, 0, 0]), 10); +select floor(arrayAUCPR([0.1, 0.35, 0.4, 0.8], [1, 0, 1, 0], [0, 0, 0]), 10); +select floor(arrayAUCPR([0.1, 0.35, 0.4, 0.4, 0.8], [1, 0, 1, 0, 0], [0, 0, 0]), 10); +select floor(arrayAUCPR([0, 3, 5, 6, 7.5, 8], [1, 0, 1, 0, 0, 0], [0, 0, 0]), 10); +select floor(arrayAUCPR([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 0, 1, 0, 0, 0, 1, 0, 0, 1], [0, 0, 0]), 10); +select floor(arrayAUCPR([0, 1, 1, 2, 2, 2, 3, 3, 3, 3], [1, 0, 1, 0, 0, 0, 1, 0, 0, 1], [0, 0, 0]), 10); + +-- edge cases +SELECT floor(arrayAUCPR([1], [1]), 10); +SELECT floor(arrayAUCPR([1], [0]), 10); +SELECT floor(arrayAUCPR([0], [0]), 10); +SELECT floor(arrayAUCPR([0], [1]), 10); +SELECT floor(arrayAUCPR([1, 1], [1, 1]), 10); +SELECT floor(arrayAUCPR([1, 1], [0, 0]), 10); +SELECT floor(arrayAUCPR([1, 1], [0, 1]), 10); +SELECT floor(arrayAUCPR([0, 1], [0, 1]), 10); +SELECT floor(arrayAUCPR([1, 0], [0, 1]), 10); +SELECT floor(arrayAUCPR([0, 0, 1], [0, 1, 1]), 10); +SELECT floor(arrayAUCPR([0, 1, 1], [0, 1, 1]), 10); +SELECT floor(arrayAUCPR([0, 1, 1], [0, 0, 1]), 10); + +-- alias +SELECT floor(arrayPRAUC([1], [1]), 10); + +-- general negative tests +select arrayAUCPR([], []); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select arrayAUCPR([0, 0, 1, 1]); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +select arrayAUCPR([0.1, 0.35], [0, 0, 1, 1]); -- { serverError BAD_ARGUMENTS } +select arrayAUCPR([0.1, 0.4, 0.35, 0.8], []); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select arrayAUCPR([0.1, 0.4, 0.35, 0.8], [0, 0, 1, 1], [0, 0, 0], [1, 1, 0, 1]); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +select arrayAUCPR(['a', 'b', 'c', 'd'], [1, 0, 1, 1]); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select arrayAUCPR([0.1, 0.4, NULL, 0.8], [0, 0, 1, 1]); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select arrayAUCPR([0.1, 0.4, 0.35, 0.8], [0, NULL, 1, 1]); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +-- negative tests for optional argument +select arrayAUCPR([0.1, 0.4, 0.35, 0.8], [0, 0, 1, 1], [0, 0, NULL]); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select arrayAUCPR([0.1, 0.4, 0.35, 0.8], [0, 0, 1, 1], ['a', 'b', 'c']); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select arrayAUCPR([0.1, 0.4, 0.35, 0.8], [0, 0, 1, 1], [0, 1, 0, 0]); -- { serverError BAD_ARGUMENTS } +select arrayAUCPR([0.1, 0.4, 0.35, 0.8], [0, 0, 1, 1], [0, -1, 0]); -- { serverError BAD_ARGUMENTS } +select arrayAUCPR(x, y, z) from ( + select [1] as x, [0] as y, [0, 0, 0, 0, 0, 0] as z + UNION ALL + select [1] as x, [0] as y, [] as z +); -- { serverError BAD_ARGUMENTS } +select arrayAUCPR(x, y, z) from ( + select [1] as x, [0] as y, [0, 0] as z + UNION ALL + select [1] as x, [1] as y, [0, 0, 0, 0] as z +); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/03272_bad_aggregate_function/ast.json b/parser/testdata/03272_bad_aggregate_function/ast.json new file mode 100644 index 000000000..1c1d01bf1 --- /dev/null +++ b/parser/testdata/03272_bad_aggregate_function/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function deltaSumTimestamp (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_2" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.0012158, + "rows_read": 8, + "bytes_read": 300 + } +} diff --git a/parser/testdata/03272_bad_aggregate_function/metadata.json b/parser/testdata/03272_bad_aggregate_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03272_bad_aggregate_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03272_bad_aggregate_function/query.sql b/parser/testdata/03272_bad_aggregate_function/query.sql new file mode 100644 index 000000000..fe66f6171 --- /dev/null +++ b/parser/testdata/03272_bad_aggregate_function/query.sql @@ -0,0 +1 @@ +SELECT deltaSumTimestamp(1, 2); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } diff --git a/parser/testdata/03272_bitmapTransform_error_counter/ast.json b/parser/testdata/03272_bitmapTransform_error_counter/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03272_bitmapTransform_error_counter/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03272_bitmapTransform_error_counter/metadata.json b/parser/testdata/03272_bitmapTransform_error_counter/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03272_bitmapTransform_error_counter/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03272_bitmapTransform_error_counter/query.sql b/parser/testdata/03272_bitmapTransform_error_counter/query.sql new file mode 100644 index 000000000..843c837b4 --- /dev/null +++ b/parser/testdata/03272_bitmapTransform_error_counter/query.sql @@ -0,0 +1,11 @@ +-- Tags: no-parallel + +CREATE TABLE counters (value UInt64) ENGINE = MergeTree() ORDER BY value; + +INSERT INTO counters SELECT sum(value) FROM system.errors WHERE name = 'ILLEGAL_TYPE_OF_ARGUMENT'; + +SELECT bitmapToArray(bitmapTransform(bitmapBuild([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), cast([5,999,2] as Array(UInt32)), cast([2,888,20] as Array(UInt32)))) AS res FORMAT Null; + +INSERT INTO counters SELECT sum(value) FROM system.errors WHERE name = 'ILLEGAL_TYPE_OF_ARGUMENT'; + +SELECT (max(value) - min(value)) == 0 FROM counters; diff --git a/parser/testdata/03272_json_to_json_cast_1/ast.json b/parser/testdata/03272_json_to_json_cast_1/ast.json new file mode 100644 index 000000000..040f0df9f --- /dev/null +++ b/parser/testdata/03272_json_to_json_cast_1/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001162837, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03272_json_to_json_cast_1/metadata.json b/parser/testdata/03272_json_to_json_cast_1/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03272_json_to_json_cast_1/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03272_json_to_json_cast_1/query.sql b/parser/testdata/03272_json_to_json_cast_1/query.sql new file mode 100644 index 000000000..5455ffb40 --- /dev/null +++ b/parser/testdata/03272_json_to_json_cast_1/query.sql @@ -0,0 +1,99 @@ +SET enable_json_type = 1; +set enable_analyzer = 1; +set output_format_native_write_json_as_string=0; + +drop table if exists test; +create table test (json JSON(max_dynamic_paths=2, max_dynamic_types=2, a UInt32, b String, SKIP c)) engine=Memory; +insert into test format JSONAsObject +{"a" : 1, "b" : "str1", "k1" : 1, "k2" : 2, "k3" : 3, "k4" : 4}, +{"a" : 2, "b" : "str2", "c" : 42, "k1" : "kstr1", "k2" : "kstr2", "k3" : "kstr3", "k4" : "kstr4"}, +{"a" : 3, "b" : "str3", "k1" : [1], "k2" : [2], "k3" : [3], "k4" : [4]}, +{"a" : 4, "b" : "str4", "c" : 42, "k1" : 5, "k2" : 6, "k3" : 7, "k4" : 8}; + +select 'Keep max_dynamic_paths, decrease max_dynamic_types'; +select + json::JSON(max_dynamic_paths=2, max_dynamic_types=1, a UInt32, b String, SKIP c) as json2, + JSONDynamicPaths(json2), + JSONSharedDataPaths(json2), + dynamicType(json2.k2), + isDynamicElementInSharedData(json2.k2), + dynamicType(json2.k4), + isDynamicElementInSharedData(json2.k4) +from test; + +select 'Keep max_dynamic_paths, increase max_dynamic_types'; +select + json::JSON(max_dynamic_paths=2, max_dynamic_types=4, a UInt32, b String, SKIP c) as json2, + JSONDynamicPaths(json2), + JSONSharedDataPaths(json2), + dynamicType(json2.k2), + isDynamicElementInSharedData(json2.k2), + dynamicType(json2.k4), + isDynamicElementInSharedData(json2.k4) +from test; + +select 'Increase max_dynamic_paths, keep max_dynamic_types'; +select + json::JSON(max_dynamic_paths=4, max_dynamic_types=2, a UInt32, b String, SKIP c) as json2, + JSONDynamicPaths(json2), + JSONSharedDataPaths(json2), + dynamicType(json2.k2), + isDynamicElementInSharedData(json2.k2), + dynamicType(json2.k4), + isDynamicElementInSharedData(json2.k4) +from test; + +select 'Increase max_dynamic_paths, decrease max_dynamic_types'; +select + json::JSON(max_dynamic_paths=4, max_dynamic_types=1, a UInt32, b String, SKIP c) as json2, + JSONDynamicPaths(json2), + JSONSharedDataPaths(json2), + dynamicType(json2.k2), + isDynamicElementInSharedData(json2.k2), + dynamicType(json2.k4), + isDynamicElementInSharedData(json2.k4) +from test; + +select 'Increase max_dynamic_paths, increase max_dynamic_types'; +select + json::JSON(max_dynamic_paths=4, max_dynamic_types=4, a UInt32, b String, SKIP c) as json2, + JSONDynamicPaths(json2), + JSONSharedDataPaths(json2), + dynamicType(json2.k2), + isDynamicElementInSharedData(json2.k2), + dynamicType(json2.k4), + isDynamicElementInSharedData(json2.k4) +from test; + +select 'Decrease max_dynamic_paths, keep max_dynamic_types'; +select + json::JSON(max_dynamic_paths=1, max_dynamic_types=2, a UInt32, b String, SKIP c) as json2, + JSONDynamicPaths(json2), + JSONSharedDataPaths(json2), + dynamicType(json2.k2), + isDynamicElementInSharedData(json2.k2), + dynamicType(json2.k4), + isDynamicElementInSharedData(json2.k4) +from test; + +select 'Decrease max_dynamic_paths, decrease max_dynamic_types'; +select + json::JSON(max_dynamic_paths=1, max_dynamic_types=1, a UInt32, b String, SKIP c) as json2, + JSONDynamicPaths(json2), + JSONSharedDataPaths(json2), + dynamicType(json2.k2), + isDynamicElementInSharedData(json2.k2), + dynamicType(json2.k4), + isDynamicElementInSharedData(json2.k4) +from test; + +select 'Decrease max_dynamic_paths, increase max_dynamic_types'; +select + json::JSON(max_dynamic_paths=1, max_dynamic_types=4, a UInt32, b String, SKIP c) as json2, + JSONDynamicPaths(json2), + JSONSharedDataPaths(json2), + dynamicType(json2.k2), + isDynamicElementInSharedData(json2.k2), + dynamicType(json2.k4), + isDynamicElementInSharedData(json2.k4) +from test; diff --git a/parser/testdata/03272_json_to_json_cast_2/ast.json b/parser/testdata/03272_json_to_json_cast_2/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03272_json_to_json_cast_2/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03272_json_to_json_cast_2/metadata.json b/parser/testdata/03272_json_to_json_cast_2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03272_json_to_json_cast_2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03272_json_to_json_cast_2/query.sql b/parser/testdata/03272_json_to_json_cast_2/query.sql new file mode 100644 index 000000000..f723b0da0 --- /dev/null +++ b/parser/testdata/03272_json_to_json_cast_2/query.sql @@ -0,0 +1,44 @@ +-- Tags: long + +SET enable_json_type = 1; +set enable_analyzer = 1; +set output_format_native_write_json_as_string = 0; + +drop table if exists test; +create table test (json JSON(max_dynamic_paths=4)) engine=Memory; +insert into test format JSONAsObject +{"k1" : 1} +{"k1" : 2, "k4" : 22} +{"k1" : 3, "k4" : 23, "k3" : 33} +{"k1" : 4, "k4" : 24, "k3" : 34, "k2" : 44}; + +select 'max_dynamic_paths=3'; +select json::JSON(max_dynamic_paths=3) as json2, JSONAllPaths(json2), JSONSharedDataPaths(json2), json2.k1, json2.k2, json2.k3, json2.k4 from test; +select 'max_dynamic_paths=2'; +select json::JSON(max_dynamic_paths=2) as json2, JSONAllPaths(json2), JSONSharedDataPaths(json2), json2.k1, json2.k2, json2.k3, json2.k4 from test; +select 'max_dynamic_paths=1'; +select json::JSON(max_dynamic_paths=1) as json2, JSONAllPaths(json2), JSONSharedDataPaths(json2), json2.k1, json2.k2, json2.k3, json2.k4 from test; +select 'max_dynamic_paths=0'; +select json::JSON(max_dynamic_paths=0) as json2, JSONAllPaths(json2), JSONSharedDataPaths(json2), json2.k1, json2.k2, json2.k3, json2.k4 from test; + +drop table test; + +set max_block_size=1000; +set max_threads=1; +create table test (id UInt64, json JSON(max_dynamic_paths=4)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1; +insert into test select number, multiIf(number < 1000, '{"k2" : 42}', number < 3000, '{"k3" : 42}', number < 6000, '{"k4" : 42}', number < 10000, '{"k1" : 42}', '{"k1" : 42, "k2" : 42, "k3" : 42, "k4" : 42}') from numbers(15000); + +select 'max_dynamic_paths=3'; +select 'All paths'; +select distinct arrayJoin(JSONAllPaths(json2)) from (select json::JSON(max_dynamic_paths=3) as json2 from test) order by all; +select 'max_dynamic_paths=2'; +select 'All paths'; +select distinct arrayJoin(JSONAllPaths(json2)) from (select json::JSON(max_dynamic_paths=2) as json2 from test) order by all; +select 'max_dynamic_paths=1'; +select 'All paths'; +select distinct arrayJoin(JSONAllPaths(json2)) from (select json::JSON(max_dynamic_paths=1) as json2 from test) order by all; +select 'max_dynamic_paths=0'; +select 'All paths'; +select distinct arrayJoin(JSONAllPaths(json2)) from (select json::JSON(max_dynamic_paths=0) as json2 from test) order by all; + +drop table test; diff --git a/parser/testdata/03272_json_to_json_cast_3/ast.json b/parser/testdata/03272_json_to_json_cast_3/ast.json new file mode 100644 index 000000000..045051010 --- /dev/null +++ b/parser/testdata/03272_json_to_json_cast_3/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001464314, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03272_json_to_json_cast_3/metadata.json b/parser/testdata/03272_json_to_json_cast_3/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03272_json_to_json_cast_3/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03272_json_to_json_cast_3/query.sql b/parser/testdata/03272_json_to_json_cast_3/query.sql new file mode 100644 index 000000000..11cb40546 --- /dev/null +++ b/parser/testdata/03272_json_to_json_cast_3/query.sql @@ -0,0 +1,81 @@ +SET enable_json_type = 1; +set enable_analyzer = 1; +set output_format_native_write_json_as_string = 0; + +drop table if exists test; +create table test (json JSON(max_dynamic_paths=2)) engine=Memory; +insert into test format JSONAsObject +{"a" : {"b" : 1}, "b" : 1, "c" : {"d" : 1}, "d" : 1} +{"a" : {"b" : 2}, "b" : 2, "c" : {"d" : 2}, "d" : 2} +{"a" : {"b" : 3}, "b" : 3, "c" : {"d" : 3}, "d" : 3}; + +select 'skip a'; +select json::JSON(SKIP a, max_dynamic_paths=2) from test; +select 'skip a.b'; +select json::JSON(SKIP a.b, max_dynamic_paths=2) from test; +select 'skip b'; +select json::JSON(SKIP b, max_dynamic_paths=2) from test; +select 'skip c'; +select json::JSON(SKIP c, max_dynamic_paths=2) from test; +select 'skip c.d'; +select json::JSON(SKIP c.d, max_dynamic_paths=2) from test; +select 'skip d'; +select json::JSON(SKIP d, max_dynamic_paths=2) from test; +select 'skip a, skip b'; +select json::JSON(SKIP a, SKIP b, max_dynamic_paths=2) from test; +select 'skip a, skip c'; +select json::JSON(SKIP a, SKIP c, max_dynamic_paths=2) from test; +select 'skip a, skip c.d'; +select json::JSON(SKIP a, SKIP c.d, max_dynamic_paths=2) from test; +select 'skip a, skip d'; +select json::JSON(SKIP a, SKIP d, max_dynamic_paths=2) from test; +select 'skip a.b, skip b'; +select json::JSON(SKIP a.b, SKIP b, max_dynamic_paths=2) from test; +select 'skip a.b, skip c'; +select json::JSON(SKIP a.b, SKIP c, max_dynamic_paths=2) from test; +select 'skip a.b, skip c.d'; +select json::JSON(SKIP a.b, SKIP c.d, max_dynamic_paths=2) from test; +select 'skip a.b, skip d'; +select json::JSON(SKIP a.b, SKIP d, max_dynamic_paths=2) from test; +select 'skip b, skip c'; +select json::JSON(SKIP b, SKIP c, max_dynamic_paths=2) from test; +select 'skip b, skip c.d'; +select json::JSON(SKIP b, SKIP c.d, max_dynamic_paths=2) from test; +select 'skip b, skip d'; +select json::JSON(SKIP b, SKIP d, max_dynamic_paths=2) from test; +select 'skip c, skip d'; +select json::JSON(SKIP c, SKIP d, max_dynamic_paths=2) from test; +select 'skip a, skip b, skip c'; +select json::JSON(SKIP a, SKIP b, SKIP c, max_dynamic_paths=2) from test; +select 'skip a, skip b, skip c.d'; +select json::JSON(SKIP a, SKIP b, SKIP c.d, max_dynamic_paths=2) from test; +select 'skip a, skip b, skip d'; +select json::JSON(SKIP a, SKIP b, SKIP d, max_dynamic_paths=2) from test; +select 'skip a, skip c, skip d'; +select json::JSON(SKIP a, SKIP c, SKIP d, max_dynamic_paths=2) from test; +select 'skip a, skip c.d, skip d'; +select json::JSON(SKIP a, SKIP c.d, SKIP d, max_dynamic_paths=2) from test; +select 'skip a.b, skip b, skip c'; +select json::JSON(SKIP a.b, SKIP b, SKIP c, max_dynamic_paths=2) from test; +select 'skip a.b, skip b, skip c.d'; +select json::JSON(SKIP a.b, SKIP b, SKIP c.d, max_dynamic_paths=2) from test; +select 'skip a.b, skip b, skip d'; +select json::JSON(SKIP a.b, SKIP b, SKIP d, max_dynamic_paths=2) from test; +select 'skip a.b, skip c, skip d'; +select json::JSON(SKIP a.b, SKIP c, SKIP d, max_dynamic_paths=2) from test; +select 'skip a.b, skip c.d, skip d'; +select json::JSON(SKIP a.b, SKIP c.d, SKIP d, max_dynamic_paths=2) from test; +select 'skip b, skip c, skip d'; +select json::JSON(SKIP b, SKIP c, SKIP d, max_dynamic_paths=2) from test; +select 'skip b, skip c.d, skip d'; +select json::JSON(SKIP b, SKIP c.d, SKIP d, max_dynamic_paths=2) from test; +select 'skip regexp .*a.*'; +select json::JSON(SKIP REGEXP '.*a.*', max_dynamic_paths=2) from test; +select 'skip regexp .*b.*'; +select json::JSON(SKIP REGEXP '.*b.*', max_dynamic_paths=2) from test; +select 'skip regexp .*d.*'; +select json::JSON(SKIP REGEXP '.*d.*', max_dynamic_paths=2) from test; +select 'skip regexp .*c.*'; +select json::JSON(SKIP REGEXP '.*c.*', max_dynamic_paths=2) from test; + +drop table test; diff --git a/parser/testdata/03272_json_to_json_cast_4/ast.json b/parser/testdata/03272_json_to_json_cast_4/ast.json new file mode 100644 index 000000000..71c494366 --- /dev/null +++ b/parser/testdata/03272_json_to_json_cast_4/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001439811, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03272_json_to_json_cast_4/metadata.json b/parser/testdata/03272_json_to_json_cast_4/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03272_json_to_json_cast_4/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03272_json_to_json_cast_4/query.sql b/parser/testdata/03272_json_to_json_cast_4/query.sql new file mode 100644 index 000000000..641699842 --- /dev/null +++ b/parser/testdata/03272_json_to_json_cast_4/query.sql @@ -0,0 +1,24 @@ +SET enable_json_type = 1; +set enable_analyzer = 1; +set output_format_native_write_json_as_string = 0; + +drop table if exists test; +create table test (json JSON(max_dynamic_paths=2, k1 UInt32, k2 String)) engine=Memory; +insert into test format JSONAsObject +{"k1" : 1, "k2" : "2020-01-01", "k3" : 31, "k4" : [1, 2, 3], "k5" : "str1" } +{"k1" : 2, "k2" : "2020-01-02", "k4" : [5, 6, 7], "k6" : false} +{"k1" : 3, "k2" : "2020-01-03", "k3" : 32, "k5" : "str1" } +{"k1" : 4, "k2" : "2020-01-04", "k4" : [8, 9, 0], "k5" : "str1", "k6" : true } + +select 'Add new typed path'; +select json::JSON(max_dynamic_paths=2, k1 UInt32, k2 String, k3 UInt32, k6 Bool, k7 UInt32) as json2, JSONDynamicPaths(json2), JSONSharedDataPaths(json2), json2.k1, json2.k2, json2.k3, json2.k4, json2.k5, json2.k6, json2.k7 from test; +select 'Remove typed paths'; +select json::JSON(max_dynamic_paths=2) as json2, JSONDynamicPaths(json2), JSONSharedDataPaths(json2), json2.k1, json2.k2, json2.k3, json2.k4, json2.k5, json2.k6, json2.k7 from test; +select 'Change type for typed path'; +select json::JSON(max_dynamic_paths=2, k1 UInt32, k2 DateTime) as json2, JSONDynamicPaths(json2), JSONSharedDataPaths(json2), json2.k1, json2.k2, json2.k3, json2.k4, json2.k5, json2.k6, json2.k7 from test; +select 'Remove and skip typed path'; +select json::JSON(max_dynamic_paths=2, k1 UInt32, SKIP k2) as json2, JSONDynamicPaths(json2), JSONSharedDataPaths(json2), json2.k1, json2.k2, json2.k3, json2.k4, json2.k5, json2.k6, json2.k7 from test; +select 'Remove, change and add new typed paths'; +select json::JSON(max_dynamic_paths=2, k2 DateTime, k3 UInt32, k6 Bool, k7 UInt32) as json2, JSONDynamicPaths(json2), JSONSharedDataPaths(json2), json2.k1, json2.k2, json2.k3, json2.k4, json2.k5, json2.k6, json2.k7 from test; + +drop table test; diff --git a/parser/testdata/03272_json_to_json_cast_5/ast.json b/parser/testdata/03272_json_to_json_cast_5/ast.json new file mode 100644 index 000000000..3051536a5 --- /dev/null +++ b/parser/testdata/03272_json_to_json_cast_5/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001345288, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03272_json_to_json_cast_5/metadata.json b/parser/testdata/03272_json_to_json_cast_5/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03272_json_to_json_cast_5/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03272_json_to_json_cast_5/query.sql b/parser/testdata/03272_json_to_json_cast_5/query.sql new file mode 100644 index 000000000..52af8ce8a --- /dev/null +++ b/parser/testdata/03272_json_to_json_cast_5/query.sql @@ -0,0 +1,33 @@ +SET enable_json_type = 1; +set enable_analyzer = 1; +set output_format_native_write_json_as_string = 0; + +drop table if exists test; +create table test (json JSON(max_dynamic_paths=4, k1 UInt32, k2 String)) engine=Memory; +insert into test format JSONAsObject +{"k1" : 1, "k2" : "2020-01-01", "k3" : 31, "k4" : [1, 2, 3], "k5" : "str1", "k7" : 71, "k9" : 91} +{"k1" : 2, "k2" : "2020-01-02", "k4" : [5, 6, 7], "k6" : false, "k9" : 92} +{"k1" : 3, "k2" : "2020-01-03", "k3" : 32, "k5" : "str1", "k7" : 73 } +{"k1" : 4, "k2" : "2020-01-04", "k4" : [8, 9, 0], "k5" : "str1", "k6" : true, "k8" : 84.84, "k9" : 94} + +select 'max_dynamic_paths=5'; +select json::JSON(max_dynamic_paths=5, k2 DateTime, k3 UInt32, k6 Bool, k0 UInt32, SKIP k5, SKIP k8) as json2, JSONDynamicPaths(json2), JSONSharedDataPaths(json2), json2.k0, json2.k1, json2.k2, json2.k3, json2.k4, json2.k5, json2.k6, json2.k7, json2.k8, json2.k9 from test; +select 'max_dynamic_paths=3'; +select json::JSON(max_dynamic_paths=3, k2 DateTime, k3 UInt32, k6 Bool, k0 UInt32, SKIP k5, SKIP k8) as json2, JSONDynamicPaths(json2), JSONSharedDataPaths(json2), json2.k0, json2.k1, json2.k2, json2.k3, json2.k4, json2.k5, json2.k6, json2.k7, json2.k8, json2.k9 from test; +select 'max_dynamic_paths=2'; +select json::JSON(max_dynamic_paths=2, k2 DateTime, k3 UInt32, k6 Bool, k0 UInt32, SKIP k5, SKIP k8) as json2, JSONDynamicPaths(json2), JSONSharedDataPaths(json2), json2.k0, json2.k1, json2.k2, json2.k3, json2.k4, json2.k5, json2.k6, json2.k7, json2.k8, json2.k9 from test; +select 'max_dynamic_paths=1'; +select json::JSON(max_dynamic_paths=1, k2 DateTime, k3 UInt32, k6 Bool, k0 UInt32, SKIP k5, SKIP k8) as json2, JSONDynamicPaths(json2), JSONSharedDataPaths(json2), json2.k0, json2.k1, json2.k2, json2.k3, json2.k4, json2.k5, json2.k6, json2.k7, json2.k8, json2.k9 from test; +select 'max_dynamic_paths=0'; +select json::JSON(max_dynamic_paths=0, k2 DateTime, k3 UInt32, k6 Bool, k0 UInt32, SKIP k5, SKIP k8) as json2, JSONDynamicPaths(json2), JSONSharedDataPaths(json2), json2.k0, json2.k1, json2.k2, json2.k3, json2.k4, json2.k5, json2.k6, json2.k7, json2.k8, json2.k9 from test; +select 'max_dynamic_paths=3, max_dynamic_types=0'; +select json::JSON(max_dynamic_paths=3, max_dynamic_types=0, k2 DateTime, k3 UInt32, k6 Bool, k0 UInt32, SKIP k5, SKIP k8) as json2, JSONDynamicPaths(json2), JSONSharedDataPaths(json2), isDynamicElementInSharedData(json2.k1), isDynamicElementInSharedData(json2.k9), json2.k0, json2.k1, json2.k2, json2.k3, json2.k4, json2.k5, json2.k6, json2.k7, json2.k8, json2.k9 from test; +select 'max_dynamic_paths=2, max_dynamic_types=0'; +select json::JSON(max_dynamic_paths=2, max_dynamic_types=0, k2 DateTime, k3 UInt32, k6 Bool, k0 UInt32, SKIP k5, SKIP k8) as json2, JSONDynamicPaths(json2), JSONSharedDataPaths(json2), isDynamicElementInSharedData(json2.k1), isDynamicElementInSharedData(json2.k9), json2.k0, json2.k1, json2.k2, json2.k3, json2.k4, json2.k5, json2.k6, json2.k7, json2.k8, json2.k9 from test; +select 'max_dynamic_paths=1, max_dynamic_types=0'; +select json::JSON(max_dynamic_paths=1, max_dynamic_types=0, k2 DateTime, k3 UInt32, k6 Bool, k0 UInt32, SKIP k5, SKIP k8) as json2, JSONDynamicPaths(json2), JSONSharedDataPaths(json2), isDynamicElementInSharedData(json2.k1), isDynamicElementInSharedData(json2.k9), json2.k0, json2.k1, json2.k2, json2.k3, json2.k4, json2.k5, json2.k6, json2.k7, json2.k8, json2.k9 from test; +select 'max_dynamic_paths=0, max_dynamic_types=0'; +select json::JSON(max_dynamic_paths=0, max_dynamic_types=0, k2 DateTime, k3 UInt32, k6 Bool, k0 UInt32, SKIP k5, SKIP k8) as json2, JSONDynamicPaths(json2), JSONSharedDataPaths(json2), isDynamicElementInSharedData(json2.k1), isDynamicElementInSharedData(json2.k9), json2.k0, json2.k1, json2.k2, json2.k3, json2.k4, json2.k5, json2.k6, json2.k7, json2.k8, json2.k9 from test; + + +drop table test; diff --git a/parser/testdata/03272_json_to_json_cast_6/ast.json b/parser/testdata/03272_json_to_json_cast_6/ast.json new file mode 100644 index 000000000..c224bb43d --- /dev/null +++ b/parser/testdata/03272_json_to_json_cast_6/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001360956, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03272_json_to_json_cast_6/metadata.json b/parser/testdata/03272_json_to_json_cast_6/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03272_json_to_json_cast_6/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03272_json_to_json_cast_6/query.sql b/parser/testdata/03272_json_to_json_cast_6/query.sql new file mode 100644 index 000000000..67e3fbd30 --- /dev/null +++ b/parser/testdata/03272_json_to_json_cast_6/query.sql @@ -0,0 +1,6 @@ +set enable_analyzer=1; + +select '{"a" : [{"b" : 42}]}'::JSON(a Array(JSON)) as json, json.a.b, json::JSON as json2, dynamicType(json2.a), json2.a[].b; + +select '{"transaction": {"date": "2025-03-13 22:20:29.751999"}}'::JSON::JSON(transaction Nullable(JSON(date DateTime64(6, 'UTC')))); + diff --git a/parser/testdata/03272_parallel_replicas_read_in_order/ast.json b/parser/testdata/03272_parallel_replicas_read_in_order/ast.json new file mode 100644 index 000000000..d9a0fb70e --- /dev/null +++ b/parser/testdata/03272_parallel_replicas_read_in_order/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00168593, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03272_parallel_replicas_read_in_order/metadata.json b/parser/testdata/03272_parallel_replicas_read_in_order/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03272_parallel_replicas_read_in_order/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03272_parallel_replicas_read_in_order/query.sql b/parser/testdata/03272_parallel_replicas_read_in_order/query.sql new file mode 100644 index 000000000..da1cd4660 --- /dev/null +++ b/parser/testdata/03272_parallel_replicas_read_in_order/query.sql @@ -0,0 +1,26 @@ +SET log_queries = 1; +SET optimize_read_in_order=1; +DROP TABLE IF EXISTS read_in_order_with_parallel_replicas; +CREATE TABLE read_in_order_with_parallel_replicas(id UInt64) ENGINE=MergeTree ORDER BY id SETTINGS index_granularity=1; + +SET max_execution_time = 300; +INSERT INTO read_in_order_with_parallel_replicas SELECT number from system.numbers limit 100000; + +SELECT * from read_in_order_with_parallel_replicas ORDER BY id desc limit 1; +SELECT * from read_in_order_with_parallel_replicas ORDER BY id limit 1; + +SET enable_analyzer=1, enable_parallel_replicas=2, max_parallel_replicas=2, cluster_for_parallel_replicas='parallel_replicas', parallel_replicas_for_non_replicated_merge_tree=1; + +SELECT * from read_in_order_with_parallel_replicas ORDER BY id desc limit 1 +SETTINGS max_threads=1, log_comment='test read in order desc with parallel replicas'; + +SELECT * from read_in_order_with_parallel_replicas ORDER BY id limit 1 +SETTINGS max_threads=1, log_comment='test read in order asc with parallel replicas'; + +-- Check we don't read more mark in parallel replicas +SYSTEM FLUSH LOGS query_log; +SET parallel_replicas_for_non_replicated_merge_tree=0; +select count(1) from system.query_log where current_database = currentDatabase() AND log_comment = 'test read in order desc with parallel replicas' and read_rows>2; +select count(1) from system.query_log where current_database = currentDatabase() AND log_comment = 'test read in order asc with parallel replicas' and read_rows>2; + +DROP TABLE read_in_order_with_parallel_replicas; diff --git a/parser/testdata/03272_partition_pruning_monotonic_func_bug/ast.json b/parser/testdata/03272_partition_pruning_monotonic_func_bug/ast.json new file mode 100644 index 000000000..dfb1158e7 --- /dev/null +++ b/parser/testdata/03272_partition_pruning_monotonic_func_bug/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001890009, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03272_partition_pruning_monotonic_func_bug/metadata.json b/parser/testdata/03272_partition_pruning_monotonic_func_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03272_partition_pruning_monotonic_func_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03272_partition_pruning_monotonic_func_bug/query.sql b/parser/testdata/03272_partition_pruning_monotonic_func_bug/query.sql new file mode 100644 index 000000000..b69b7b8a7 --- /dev/null +++ b/parser/testdata/03272_partition_pruning_monotonic_func_bug/query.sql @@ -0,0 +1,19 @@ +SET session_timezone = 'Etc/UTC'; + +DROP TABLE IF EXISTS tt; +CREATE TABLE tt +( + `id` Int64, + `ts` DateTime +) +ENGINE = MergeTree() +ORDER BY dateTrunc('hour', ts) +SETTINGS index_granularity = 8192; + +INSERT INTO tt VALUES (1, '2024-11-14 00:00:00'), (2, '2024-11-14 00:00:00'); + +SELECT id FROM tt PREWHERE ts BETWEEN toDateTime(1731506400) AND toDateTime(1731594420); + +explain indexes=1, description=0 SELECT id FROM tt PREWHERE ts BETWEEN toDateTime(1731506400) AND toDateTime(1731594420); + +DROP TABLE tt; diff --git a/parser/testdata/03272_prewarm_mark_cache_add_column/ast.json b/parser/testdata/03272_prewarm_mark_cache_add_column/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03272_prewarm_mark_cache_add_column/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03272_prewarm_mark_cache_add_column/metadata.json b/parser/testdata/03272_prewarm_mark_cache_add_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03272_prewarm_mark_cache_add_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03272_prewarm_mark_cache_add_column/query.sql b/parser/testdata/03272_prewarm_mark_cache_add_column/query.sql new file mode 100644 index 000000000..a293d7c4f --- /dev/null +++ b/parser/testdata/03272_prewarm_mark_cache_add_column/query.sql @@ -0,0 +1,30 @@ +-- Tags: no-parallel +-- no-parallel: SYSTEM DROP MARK CACHE is used. + +DROP TABLE IF EXISTS t_prewarm_add_column; + +CREATE TABLE t_prewarm_add_column (a UInt64) +ENGINE = MergeTree ORDER BY a +SETTINGS prewarm_mark_cache = 1, min_bytes_for_wide_part = 0; + +-- Drop mark cache because it may be full and we will fail to add new entries to it. +SYSTEM DROP MARK CACHE; +SYSTEM STOP MERGES t_prewarm_add_column; + +INSERT INTO t_prewarm_add_column VALUES (1); + +ALTER TABLE t_prewarm_add_column ADD COLUMN b UInt64; + +INSERT INTO t_prewarm_add_column VALUES (2, 2); + +DETACH TABLE t_prewarm_add_column; +ATTACH TABLE t_prewarm_add_column; + +SELECT * FROM t_prewarm_add_column ORDER BY a; +SYSTEM FLUSH LOGS query_log; + +SELECT ProfileEvents['LoadedMarksCount'] FROM system.query_log +WHERE current_database = currentDatabase() AND type = 'QueryFinish' AND query LIKE 'SELECT * FROM t_prewarm_add_column%' +ORDER BY event_time_microseconds; + +DROP TABLE t_prewarm_add_column; diff --git a/parser/testdata/03273_better_json_subcolumns_parsing/ast.json b/parser/testdata/03273_better_json_subcolumns_parsing/ast.json new file mode 100644 index 000000000..434cc3c50 --- /dev/null +++ b/parser/testdata/03273_better_json_subcolumns_parsing/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001458818, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03273_better_json_subcolumns_parsing/metadata.json b/parser/testdata/03273_better_json_subcolumns_parsing/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03273_better_json_subcolumns_parsing/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03273_better_json_subcolumns_parsing/query.sql b/parser/testdata/03273_better_json_subcolumns_parsing/query.sql new file mode 100644 index 000000000..a6e286584 --- /dev/null +++ b/parser/testdata/03273_better_json_subcolumns_parsing/query.sql @@ -0,0 +1,7 @@ +SET enable_json_type = 1; +drop table if exists test; +create table test (json JSON) engine=Memory; +insert into test format JSONAsObject {"a" : 42}, {"a" : 42.42}, {"a" : 43}; + +select dynamicType(json.a), json.a from test; +drop table test; diff --git a/parser/testdata/03273_dynamic_pretty_json_serialization/ast.json b/parser/testdata/03273_dynamic_pretty_json_serialization/ast.json new file mode 100644 index 000000000..7189ef0d7 --- /dev/null +++ b/parser/testdata/03273_dynamic_pretty_json_serialization/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001414533, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03273_dynamic_pretty_json_serialization/metadata.json b/parser/testdata/03273_dynamic_pretty_json_serialization/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03273_dynamic_pretty_json_serialization/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03273_dynamic_pretty_json_serialization/query.sql b/parser/testdata/03273_dynamic_pretty_json_serialization/query.sql new file mode 100644 index 000000000..0c5ff73d8 --- /dev/null +++ b/parser/testdata/03273_dynamic_pretty_json_serialization/query.sql @@ -0,0 +1,6 @@ +set allow_experimental_dynamic_type = 1; + +select 'Hello'::Dynamic(max_types=1) as d format PrettyJSONEachRow; +select 'Hello'::Dynamic(max_types=0) as d format PrettyJSONEachRow; + + diff --git a/parser/testdata/03273_format_inference_create_query_s3_url/ast.json b/parser/testdata/03273_format_inference_create_query_s3_url/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03273_format_inference_create_query_s3_url/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03273_format_inference_create_query_s3_url/metadata.json b/parser/testdata/03273_format_inference_create_query_s3_url/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03273_format_inference_create_query_s3_url/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03273_format_inference_create_query_s3_url/query.sql b/parser/testdata/03273_format_inference_create_query_s3_url/query.sql new file mode 100644 index 000000000..5d653843e --- /dev/null +++ b/parser/testdata/03273_format_inference_create_query_s3_url/query.sql @@ -0,0 +1,59 @@ +-- Tags: no-fasttest + +drop table if exists test; + +create table test engine=S3('http://localhost:11111/test/json_data'); +show create table test; +drop table test; + +create table test engine=S3('http://localhost:11111/test/json_data', NOSIGN); +show create table test; +drop table test; + +create table test engine=S3('http://localhost:11111/test/json_data', auto); +show create table test; +drop table test; + +create table test engine=S3('http://localhost:11111/test/json_data', auto, 'none'); +show create table test; +drop table test; + +create table test engine=S3('http://localhost:11111/test/json_data', NOSIGN, auto); +show create table test; +drop table test; + +create table test engine=S3('http://localhost:11111/test/json_data', 'test', 'testtest'); +show create table test; +drop table test; + +create table test engine=S3('http://localhost:11111/test/json_data', NOSIGN, auto, 'none'); +show create table test; +drop table test; + +create table test engine=S3('http://localhost:11111/test/json_data', 'test', 'testtest', ''); +show create table test; +drop table test; + +create table test engine=S3('http://localhost:11111/test/json_data', 'test', 'testtest', '', auto); +show create table test; +drop table test; + +create table test engine=S3('http://localhost:11111/test/json_data', 'test', 'testtest', auto, 'none'); +show create table test; +drop table test; + +create table test engine=S3('http://localhost:11111/test/json_data', 'test', 'testtest', '', auto, 'none'); +show create table test; +drop table test; + +create table test engine=URL('http://localhost:11111/test/json_data'); +show create table test; +drop table test; + +create table test engine=URL('http://localhost:11111/test/json_data', auto); +show create table test; +drop table test; + +create table test engine=URL('http://localhost:11111/test/json_data', auto, 'none'); +show create table test; +drop table test; diff --git a/parser/testdata/03273_group_by_in_order_still_used_when_group_by_key_doesnt_match_order_by_key/ast.json b/parser/testdata/03273_group_by_in_order_still_used_when_group_by_key_doesnt_match_order_by_key/ast.json new file mode 100644 index 000000000..10fad7e4c --- /dev/null +++ b/parser/testdata/03273_group_by_in_order_still_used_when_group_by_key_doesnt_match_order_by_key/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001370329, + "rows_read": 2, + "bytes_read": 61 + } +} diff --git a/parser/testdata/03273_group_by_in_order_still_used_when_group_by_key_doesnt_match_order_by_key/metadata.json b/parser/testdata/03273_group_by_in_order_still_used_when_group_by_key_doesnt_match_order_by_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03273_group_by_in_order_still_used_when_group_by_key_doesnt_match_order_by_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03273_group_by_in_order_still_used_when_group_by_key_doesnt_match_order_by_key/query.sql b/parser/testdata/03273_group_by_in_order_still_used_when_group_by_key_doesnt_match_order_by_key/query.sql new file mode 100644 index 000000000..d4585ea54 --- /dev/null +++ b/parser/testdata/03273_group_by_in_order_still_used_when_group_by_key_doesnt_match_order_by_key/query.sql @@ -0,0 +1,24 @@ +CREATE TABLE test +( + a UInt64, + b UInt64 +) +ENGINE = MergeTree +ORDER BY (a, b); + +INSERT INTO test SELECT number, number FROM numbers_mt(1e6); + +set enable_analyzer = 1; + +SELECT trimBoth(replaceRegexpAll(explain, '__table1.', '')) +FROM +( + EXPLAIN actions = 1 + SELECT count(*) + FROM test + GROUP BY + b, + a + SETTINGS optimize_aggregation_in_order = 1 +) +WHERE explain LIKE '%Order%'; diff --git a/parser/testdata/03273_primary_index_cache/ast.json b/parser/testdata/03273_primary_index_cache/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03273_primary_index_cache/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03273_primary_index_cache/metadata.json b/parser/testdata/03273_primary_index_cache/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03273_primary_index_cache/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03273_primary_index_cache/query.sql b/parser/testdata/03273_primary_index_cache/query.sql new file mode 100644 index 000000000..800612fcb --- /dev/null +++ b/parser/testdata/03273_primary_index_cache/query.sql @@ -0,0 +1,41 @@ +-- Tags: no-parallel + +DROP TABLE IF EXISTS t_primary_index_cache; + +CREATE TABLE t_primary_index_cache (a UInt64, b UInt64) +ENGINE = MergeTree ORDER BY a PARTITION BY a % 2 +SETTINGS use_primary_key_cache = 1, prewarm_primary_key_cache = 0, index_granularity = 64, index_granularity_bytes = '10M', min_bytes_for_wide_part = 0; + +SYSTEM DROP PRIMARY INDEX CACHE; + +INSERT INTO t_primary_index_cache SELECT number, number FROM numbers(10000); + +SELECT sum(primary_key_bytes_in_memory) FROM system.parts WHERE table = 't_primary_index_cache' AND active; +SELECT metric, value FROM system.metrics WHERE metric IN ('PrimaryIndexCacheFiles', 'PrimaryIndexCacheBytes') ORDER BY metric; + +SELECT count() FROM t_primary_index_cache WHERE a > 100 AND a < 200; + +SELECT sum(primary_key_bytes_in_memory) FROM system.parts WHERE table = 't_primary_index_cache' AND active; +SELECT metric, value FROM system.metrics WHERE metric IN ('PrimaryIndexCacheFiles', 'PrimaryIndexCacheBytes') ORDER BY metric; + +SYSTEM DROP PRIMARY INDEX CACHE; + +SELECT sum(primary_key_bytes_in_memory) FROM system.parts WHERE table = 't_primary_index_cache' AND active; +SELECT metric, value FROM system.metrics WHERE metric IN ('PrimaryIndexCacheFiles', 'PrimaryIndexCacheBytes') ORDER BY metric; + +SELECT count() FROM t_primary_index_cache WHERE a > 100 AND a < 200 AND a % 2 = 0; + +SELECT sum(primary_key_bytes_in_memory) FROM system.parts WHERE table = 't_primary_index_cache' AND active; +SELECT metric, value FROM system.metrics WHERE metric IN ('PrimaryIndexCacheFiles', 'PrimaryIndexCacheBytes') ORDER BY metric; + +SYSTEM FLUSH LOGS query_log; + +SELECT + ProfileEvents['LoadedPrimaryIndexFiles'], + ProfileEvents['LoadedPrimaryIndexRows'], + ProfileEvents['LoadedPrimaryIndexBytes'] +FROM system.query_log +WHERE query LIKE 'SELECT count() FROM t_primary_index_cache%' AND current_database = currentDatabase() AND type = 'QueryFinish' +ORDER BY event_time_microseconds; + +DROP TABLE t_primary_index_cache; diff --git a/parser/testdata/03273_primary_index_cache_low_cardinality/ast.json b/parser/testdata/03273_primary_index_cache_low_cardinality/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03273_primary_index_cache_low_cardinality/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03273_primary_index_cache_low_cardinality/metadata.json b/parser/testdata/03273_primary_index_cache_low_cardinality/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03273_primary_index_cache_low_cardinality/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03273_primary_index_cache_low_cardinality/query.sql b/parser/testdata/03273_primary_index_cache_low_cardinality/query.sql new file mode 100644 index 000000000..33dcd184d --- /dev/null +++ b/parser/testdata/03273_primary_index_cache_low_cardinality/query.sql @@ -0,0 +1,38 @@ +-- Tags: no-parallel + +DROP TABLE IF EXISTS t_primary_index_cache; + +SYSTEM DROP PRIMARY INDEX CACHE; + +CREATE TABLE t_primary_index_cache (a LowCardinality(String), b LowCardinality(String)) +ENGINE = MergeTree ORDER BY (a, b) +SETTINGS use_primary_key_cache = 1, prewarm_primary_key_cache = 1, index_granularity = 8192, index_granularity_bytes = '10M', min_bytes_for_wide_part = 0; + +-- Insert will prewarm primary index cache +INSERT INTO t_primary_index_cache SELECT number%10, number%11 FROM numbers(10000); + +-- Check cache size +SELECT metric, value FROM system.metrics WHERE metric IN ('PrimaryIndexCacheFiles', 'PrimaryIndexCacheBytes') ORDER BY metric; + +SYSTEM DROP PRIMARY INDEX CACHE; + +-- Check that cache is empty +SELECT metric, value FROM system.metrics WHERE metric IN ('PrimaryIndexCacheFiles', 'PrimaryIndexCacheBytes') ORDER BY metric; + +-- Trigger index reload +SELECT max(length(a || b)) FROM t_primary_index_cache WHERE a > '1' AND b < '99' SETTINGS log_comment = '03273_reload_query'; + +-- Check that cache size is the same as after prewarm +SELECT metric, value FROM system.metrics WHERE metric IN ('PrimaryIndexCacheFiles', 'PrimaryIndexCacheBytes') ORDER BY metric; + +SYSTEM FLUSH LOGS query_log; + +SELECT + ProfileEvents['LoadedPrimaryIndexFiles'], + ProfileEvents['LoadedPrimaryIndexRows'], + ProfileEvents['LoadedPrimaryIndexBytes'] +FROM system.query_log +WHERE log_comment = '03273_reload_query' AND current_database = currentDatabase() AND type = 'QueryFinish' +ORDER BY event_time_microseconds; + +DROP TABLE t_primary_index_cache; diff --git a/parser/testdata/03273_select_from_explain_ast_non_select/ast.json b/parser/testdata/03273_select_from_explain_ast_non_select/ast.json new file mode 100644 index 000000000..098f39c4f --- /dev/null +++ b/parser/testdata/03273_select_from_explain_ast_non_select/ast.json @@ -0,0 +1,121 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function viewExplain (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal 'EXPLAIN AST'" + }, + { + "explain": " Literal ''" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 33, + + "statistics": + { + "elapsed": 0.001740871, + "rows_read": 33, + "bytes_read": 1521 + } +} diff --git a/parser/testdata/03273_select_from_explain_ast_non_select/metadata.json b/parser/testdata/03273_select_from_explain_ast_non_select/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03273_select_from_explain_ast_non_select/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03273_select_from_explain_ast_non_select/query.sql b/parser/testdata/03273_select_from_explain_ast_non_select/query.sql new file mode 100644 index 000000000..729818641 --- /dev/null +++ b/parser/testdata/03273_select_from_explain_ast_non_select/query.sql @@ -0,0 +1,5 @@ +SELECT * FROM ( EXPLAIN AST SELECT * FROM numbers(10) ); +SELECT * FROM ( EXPLAIN AST CREATE TABLE test ENGINE=Memory ); -- {clientError BAD_ARGUMENTS} +SELECT * FROM ( EXPLAIN AST CREATE MATERIALIZED VIEW mv (data String) AS SELECT data FROM table ); -- {clientError BAD_ARGUMENTS} +SELECT * FROM ( EXPLAIN AST INSERT INTO TABLE test VALUES); -- {clientError BAD_ARGUMENTS} +SELECT * FROM ( EXPLAIN AST ALTER TABLE test MODIFY COLUMN x UInt32 ); -- {clientError BAD_ARGUMENTS} diff --git a/parser/testdata/03274_aliases_in_udf/ast.json b/parser/testdata/03274_aliases_in_udf/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03274_aliases_in_udf/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03274_aliases_in_udf/metadata.json b/parser/testdata/03274_aliases_in_udf/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03274_aliases_in_udf/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03274_aliases_in_udf/query.sql b/parser/testdata/03274_aliases_in_udf/query.sql new file mode 100644 index 000000000..db1efcf68 --- /dev/null +++ b/parser/testdata/03274_aliases_in_udf/query.sql @@ -0,0 +1,51 @@ +-- Tags: no-parallel + +SET skip_redundant_aliases_in_udf = 0; + +SELECT 'FIX ISSUE #69143'; + +DROP TABLE IF EXISTS test_table; + +CREATE FUNCTION IF NOT EXISTS 03274_test_function AS ( input_column_name ) -> (( + '1' AS a, + input_column_name AS input_column_name + ).2); + +CREATE TABLE IF NOT EXISTS test_table +( + `metadata_a` String, + `metadata_b` String +) +ENGINE = MergeTree() +ORDER BY tuple(); + +ALTER TABLE test_table ADD COLUMN mat_a String MATERIALIZED 03274_test_function(metadata_a); +ALTER TABLE test_table MATERIALIZE COLUMN `mat_a`; + +ALTER TABLE test_table ADD COLUMN mat_b String MATERIALIZED 03274_test_function(metadata_b); -- { serverError MULTIPLE_EXPRESSIONS_FOR_ALIAS } + +SET skip_redundant_aliases_in_udf = 1; + +ALTER TABLE test_table ADD COLUMN mat_b String MATERIALIZED 03274_test_function(metadata_b); +ALTER TABLE test_table MATERIALIZE COLUMN `mat_b`; + +INSERT INTO test_table SELECT 'a', 'b'; + +SELECT mat_a FROM test_table; +SELECT mat_b FROM test_table; + +SELECT 'EXPLAIN SYNTAX OF UDF'; + +CREATE FUNCTION IF NOT EXISTS test_03274 AS ( x ) -> ((x + 1 as y, y + 2)); + +SET skip_redundant_aliases_in_udf = 0; + +EXPLAIN SYNTAX SELECT test_03274(4 + 2); + +SET skip_redundant_aliases_in_udf = 1; + +EXPLAIN SYNTAX SELECT test_03274(4 + 2); + +DROP FUNCTION 03274_test_function; +DROP FUNCTION test_03274; +DROP TABLE IF EXISTS test_table; diff --git a/parser/testdata/03274_dynamic_column_data_race_with_concurrent_hj/ast.json b/parser/testdata/03274_dynamic_column_data_race_with_concurrent_hj/ast.json new file mode 100644 index 000000000..ba4a7d5ee --- /dev/null +++ b/parser/testdata/03274_dynamic_column_data_race_with_concurrent_hj/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001189358, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03274_dynamic_column_data_race_with_concurrent_hj/metadata.json b/parser/testdata/03274_dynamic_column_data_race_with_concurrent_hj/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03274_dynamic_column_data_race_with_concurrent_hj/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03274_dynamic_column_data_race_with_concurrent_hj/query.sql b/parser/testdata/03274_dynamic_column_data_race_with_concurrent_hj/query.sql new file mode 100644 index 000000000..6e3766336 --- /dev/null +++ b/parser/testdata/03274_dynamic_column_data_race_with_concurrent_hj/query.sql @@ -0,0 +1,8 @@ +SET join_algorithm = 'parallel_hash'; +SET allow_experimental_dynamic_type = 1; +SET allow_dynamic_type_in_join_keys = 1; +DROP TABLE IF EXISTS t0; +CREATE TABLE t0 (c0 Tuple(c1 Int,c2 Dynamic)) ENGINE = Memory(); +SELECT 1 FROM t0 tx JOIN t0 ty ON tx.c0 = ty.c0; +DROP TABLE t0; + diff --git a/parser/testdata/03274_dynamic_column_sizes_vertical_merge/ast.json b/parser/testdata/03274_dynamic_column_sizes_vertical_merge/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03274_dynamic_column_sizes_vertical_merge/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03274_dynamic_column_sizes_vertical_merge/metadata.json b/parser/testdata/03274_dynamic_column_sizes_vertical_merge/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03274_dynamic_column_sizes_vertical_merge/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03274_dynamic_column_sizes_vertical_merge/query.sql b/parser/testdata/03274_dynamic_column_sizes_vertical_merge/query.sql new file mode 100644 index 000000000..974a1d3cc --- /dev/null +++ b/parser/testdata/03274_dynamic_column_sizes_vertical_merge/query.sql @@ -0,0 +1,24 @@ +-- Tags: no-random-settings, no-fasttest + +set allow_experimental_dynamic_type = 1; +SET enable_json_type = 1; + + +drop table if exists test; +create table test (d Dynamic, json JSON) engine=MergeTree order by tuple() settings min_rows_for_wide_part=0, min_bytes_for_wide_part=0, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=0, object_serialization_version='v2', dynamic_serialization_version='v2'; +insert into test select number, '{"a" : 42, "b" : "Hello, World"}' from numbers(1000000); +insert into test select number, '{"a" : 42, "b" : "Hello, World"}' from numbers(1000000); +optimize table test final; + +SELECT + `table`, + sum(rows) AS rows, + floor(sum(data_uncompressed_bytes) / (1024 * 1024)) AS data_size_uncompressed, + floor(sum(data_compressed_bytes) / (1024 * 1024)) AS data_size_compressed, + floor(sum(bytes_on_disk) / (1024 * 1024)) AS total_size_on_disk +FROM system.parts +WHERE active AND (database = currentDatabase()) AND (`table` = 'test') +GROUP BY `table` +ORDER BY `table` ASC; + +drop table test; diff --git a/parser/testdata/03274_grace_hash_max_joined_block_size_rows_bug/ast.json b/parser/testdata/03274_grace_hash_max_joined_block_size_rows_bug/ast.json new file mode 100644 index 000000000..9d6df5294 --- /dev/null +++ b/parser/testdata/03274_grace_hash_max_joined_block_size_rows_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t0 (children 1)" + }, + { + "explain": " Identifier t0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001416124, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03274_grace_hash_max_joined_block_size_rows_bug/metadata.json b/parser/testdata/03274_grace_hash_max_joined_block_size_rows_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03274_grace_hash_max_joined_block_size_rows_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03274_grace_hash_max_joined_block_size_rows_bug/query.sql b/parser/testdata/03274_grace_hash_max_joined_block_size_rows_bug/query.sql new file mode 100644 index 000000000..c91e226fd --- /dev/null +++ b/parser/testdata/03274_grace_hash_max_joined_block_size_rows_bug/query.sql @@ -0,0 +1,15 @@ +DROP TABLE IF EXISTS t0; +DROP TABLE IF EXISTS t1; + +CREATE TABLE t0 (x UInt64) ENGINE = MergeTree ORDER BY x; +INSERT INTO t0 SELECT number from numbers(20); + +CREATE TABLE t1 (x UInt64) ENGINE = MergeTree ORDER BY x; +INSERT INTO t1 SELECT number from numbers(5, 20); + +SET max_joined_block_size_rows = 1; +SET grace_hash_join_initial_buckets = 2; +SET join_algorithm = 'grace_hash'; + +SELECT sum(x), count() FROM t0 JOIN t1 USING x; + diff --git a/parser/testdata/03274_join_algorithm_default/ast.json b/parser/testdata/03274_join_algorithm_default/ast.json new file mode 100644 index 000000000..74117f7c0 --- /dev/null +++ b/parser/testdata/03274_join_algorithm_default/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001088526, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03274_join_algorithm_default/metadata.json b/parser/testdata/03274_join_algorithm_default/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03274_join_algorithm_default/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03274_join_algorithm_default/query.sql b/parser/testdata/03274_join_algorithm_default/query.sql new file mode 100644 index 000000000..d4f307464 --- /dev/null +++ b/parser/testdata/03274_join_algorithm_default/query.sql @@ -0,0 +1,62 @@ +SET query_plan_join_swap_table = false; +SET allow_experimental_analyzer = 1; +SET enable_parallel_replicas=0; +SET query_plan_optimize_join_order_limit = 0; + +-- Test that with default join_algorithm setting, we are doing a parallel hash join + +SELECT value == 'direct,parallel_hash,hash' FROM system.settings WHERE name = 'join_algorithm'; + +EXPLAIN PIPELINE +SELECT + * +FROM + ( + SELECT * FROM system.numbers LIMIT 100000 + ) t1 + JOIN + ( + SELECT * FROM system.numbers LIMIT 100000 + ) t2 +USING number +SETTINGS max_threads=16; + +-- Test that join_algorithm = default does a hash join + +SET join_algorithm='default'; + +SELECT value == 'default' FROM system.settings WHERE name = 'join_algorithm'; + +EXPLAIN PIPELINE +SELECT + * +FROM + ( + SELECT * FROM system.numbers LIMIT 100000 + ) t1 + JOIN + ( + SELECT * FROM system.numbers LIMIT 100000 + ) t2 +USING number +SETTINGS max_threads=16; + +SET join_algorithm=DEFAULT; -- reset + +-- Check that compat setting also achieves a hash join + +SET compatibility='24.11'; + +EXPLAIN PIPELINE +SELECT + * +FROM + ( + SELECT * FROM system.numbers LIMIT 100000 + ) t1 + JOIN + ( + SELECT * FROM system.numbers LIMIT 100000 + ) t2 +USING number +SETTINGS max_threads=16; diff --git a/parser/testdata/03274_json_pretty_output/ast.json b/parser/testdata/03274_json_pretty_output/ast.json new file mode 100644 index 000000000..85ffadc56 --- /dev/null +++ b/parser/testdata/03274_json_pretty_output/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001536852, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03274_json_pretty_output/metadata.json b/parser/testdata/03274_json_pretty_output/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03274_json_pretty_output/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03274_json_pretty_output/query.sql b/parser/testdata/03274_json_pretty_output/query.sql new file mode 100644 index 000000000..a54af6ce5 --- /dev/null +++ b/parser/testdata/03274_json_pretty_output/query.sql @@ -0,0 +1,7 @@ +set enable_named_columns_in_function_tuple=1; +set output_format_write_statistics=0; +set enable_analyzer=1; + +select tuple(1 as b, 2 as c) as a, map('e', 3, 'f', 4) as d, [5, 6] as g, tuple(map('i', [tuple(7 as j, 8 as k)]) as l, 42 as m) as h format JSON; +select tuple(1 as b, 2 as c) as a, map('e', 3, 'f', 4) as d, [5, 6] as g, tuple(map('i', [tuple(7 as j, 8 as k)]) as l, 42 as m) as h format JSON settings output_format_json_pretty_print=0; + diff --git a/parser/testdata/03274_json_to_json_alter_nested_json/ast.json b/parser/testdata/03274_json_to_json_alter_nested_json/ast.json new file mode 100644 index 000000000..ca270f18d --- /dev/null +++ b/parser/testdata/03274_json_to_json_alter_nested_json/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.0015231, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03274_json_to_json_alter_nested_json/metadata.json b/parser/testdata/03274_json_to_json_alter_nested_json/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03274_json_to_json_alter_nested_json/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03274_json_to_json_alter_nested_json/query.sql b/parser/testdata/03274_json_to_json_alter_nested_json/query.sql new file mode 100644 index 000000000..63e7e53c0 --- /dev/null +++ b/parser/testdata/03274_json_to_json_alter_nested_json/query.sql @@ -0,0 +1,68 @@ +SET enable_json_type = 1; +set output_format_native_write_json_as_string = 0; + +drop table if exists test; +create table test (json JSON(max_dynamic_paths=8, max_dynamic_types=4)) engine=Memory; +insert into test format JSONAsObject +{"k1" : [{"k1_1" : [{"k1_1_1" : 42}], "k1_2" : [42], "k1_3" : [{"k1_3_1" : 42}]}], "k2" : [42], "k3" : 42, "k4" : 42, "k5" : 42, "k6" : 42, "k7" : 42, "k8" : 42, "k9" : [{"k9_1" : [{"k9_1_1" : 42}], "k9_2" : [42], "k9_3" : [{"k9_3_1" : 42}]}]} +{"k1" : [{"k1_1" : [{"k1_1_1" : 42}], "k1_2" : [[42]], "k1_3" : [{"k1_3_1" : 42}]}], "k2" : [[42]], "k3" : 42, "k4" : 42, "k5" : 42, "k6" : 42, "k7" : 42, "k8" : 42, "k9" : [{"k9_1" : [{"k9_1_1" : 42}], "k9_2" : [[42]], "k9_3" : [{"k9_3_1" : 42}]}]} +{"k1" : [{"k1_1" : [{"k1_1_1" : 42}], "k1_2" : [[[42]]], "k1_3" : [{"k1_3_1" : 42}]}], "k2" : [[[42]]], "k3" : 42, "k4" : 42, "k5" : 42, "k6" : 42, "k7" : 42, "k8" : 42, "k9" : [{"k9_1" : [{"k9_1_1" : 42}], "k9_2" : [[[42]]], "k9_3" : [{"k9_3_1" : 42}]}]} +{"k1" : [{"k1_1" : [{"k1_1_1" : 42}], "k1_2" : [[[[42]]]], "k1_3" : [{"k1_3_1" : 42}]}], "k2" : [[[[42]]]], "k3" : 42, "k4" : 42, "k5" : 42, "k6" : 42, "k7" : 42, "k8" : 42, "k9" : [{"k9_1" : [{"k9_1_1" : 42}], "k9_2" : [[[[42]]]], "k9_3" : [{"k9_3_1" : 42}]}]} +{"k1" : [{"k1_1" : [{"k1_1_1" : 42}], "k1_2" : [{"k1_2_1" : 42}], "k1_3" : [{"k1_3_1" : 42}]}], "k2" : [{"k2_1" : [{"k2_1_1" : 42}], "k2_2" : [{"k2_2_1" : 42}], "k2_3" : [{"k2_3_1" : 42}]}], "k3" : 42, "k4" : 42, "k5" : 42, "k6" : 42, "k7" : 42, "k8" : 42, "k9" : [{"k9_1" : [{"k9_1_1" : 42}], "k9_2" : [{"k9_2_1" : 42}], "k9_3" : [{"k9_3_1" : 42}]}]} + + +select 'json'; +select arrayJoin(distinctJSONPathsAndTypes(json)) from test; +select 'k1'; +select arrayJoin(distinctJSONPathsAndTypes(arrayJoin(json.k1[]))) from test; +select 'k2'; +select arrayJoin(distinctJSONPathsAndTypes(arrayJoin(json.k2[]))) from test; +select 'k9'; +select arrayJoin(distinctJSONPathsAndTypes(arrayJoin(json.k9[]))) from test; +select 'subcolumns'; +select json.k1[], json.k1[].k1_1[], json.k1[].k1_1[].k1_1_1, json.k1[].k1_2[], json.k1[].k1_2[].k1_2_1, json.k2[], json.k2[].k2_1[], json.k2[].k2_1[].k2_1_1, json.k2[].k2_2[], json.k2[].k2_2[].k2_2_1, json.k9[], json.k9[].k9_1[], json.k9[].k9_1[].k9_1_1, json.k9[].k9_2[], json.k9[].k9_2[].k9_2_1 from test format JSONColumns; + +drop table if exists test2; +create table test2 (json JSON(max_dynamic_paths=16, max_dynamic_types=8)) engine=Memory; +insert into test2 select json from test; +select 'json'; +select arrayJoin(distinctJSONPathsAndTypes(json)) from test2; +select 'k1'; +select arrayJoin(distinctJSONPathsAndTypes(arrayJoin(json.k1[]))) from test2; +select 'k2'; +select arrayJoin(distinctJSONPathsAndTypes(arrayJoin(json.k2[]))) from test2; +select 'k9'; +select arrayJoin(distinctJSONPathsAndTypes(arrayJoin(json.k9[]))) from test2; +select 'subcolumns'; +select json.k1[], json.k1[].k1_1[], json.k1[].k1_1[].k1_1_1, json.k1[].k1_2[], json.k1[].k1_2[].k1_2_1, json.k2[], json.k2[].k2_1[], json.k2[].k2_1[].k2_1_1, json.k2[].k2_2[], json.k2[].k2_2[].k2_2_1, json.k9[], json.k9[].k9_1[], json.k9[].k9_1[].k9_1_1, json.k9[].k9_2[], json.k9[].k9_2[].k9_2_1 from test format JSONColumns; + +create table test3 (json JSON(max_dynamic_paths=4, max_dynamic_types=2)) engine=Memory; +insert into test3 select json from test; +select 'json'; +select arrayJoin(distinctJSONPathsAndTypes(json)) from test3; +select 'k1'; +select arrayJoin(distinctJSONPathsAndTypes(arrayJoin(json.k1[]))) from test3; +select 'k2'; +select arrayJoin(distinctJSONPathsAndTypes(arrayJoin(json.k2[]))) from test3; +select 'k9'; +select arrayJoin(distinctJSONPathsAndTypes(arrayJoin(json.k9[]))) from test3; +select 'subcolumns'; +select json.k1[], json.k1[].k1_1[], json.k1[].k1_1[].k1_1_1, json.k1[].k1_2[], json.k1[].k1_2[].k1_2_1, json.k2[], json.k2[].k2_1[], json.k2[].k2_1[].k2_1_1, json.k2[].k2_2[], json.k2[].k2_2[].k2_2_1, json.k9[], json.k9[].k9_1[], json.k9[].k9_1[].k9_1_1, json.k9[].k9_2[], json.k9[].k9_2[].k9_2_1 from test format JSONColumns; + +create table test4 (json JSON(max_dynamic_paths=8, max_dynamic_types=4)) engine=Memory; +insert into test4 select json from test2; +select 'json'; +select arrayJoin(distinctJSONPathsAndTypes(json)) from test4; +select 'k1'; +select arrayJoin(distinctJSONPathsAndTypes(arrayJoin(json.k1[]))) from test4; +select 'k2'; +select arrayJoin(distinctJSONPathsAndTypes(arrayJoin(json.k2[]))) from test4; +select 'k9'; +select arrayJoin(distinctJSONPathsAndTypes(arrayJoin(json.k9[]))) from test4; +select 'subcolumns'; +select json.k1[], json.k1[].k1_1[], json.k1[].k1_1[].k1_1_1, json.k1[].k1_2[], json.k1[].k1_2[].k1_2_1, json.k2[], json.k2[].k2_1[], json.k2[].k2_1[].k2_1_1, json.k2[].k2_2[], json.k2[].k2_2[].k2_2_1, json.k9[], json.k9[].k9_1[], json.k9[].k9_1[].k9_1_1, json.k9[].k9_2[], json.k9[].k9_2[].k9_2_1 from test format JSONColumns; + +drop table test; +drop table test2; +drop table test3; +drop table test4; diff --git a/parser/testdata/03274_philipzucker/ast.json b/parser/testdata/03274_philipzucker/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03274_philipzucker/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03274_philipzucker/metadata.json b/parser/testdata/03274_philipzucker/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03274_philipzucker/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03274_philipzucker/query.sql b/parser/testdata/03274_philipzucker/query.sql new file mode 100644 index 000000000..5acaff2ed --- /dev/null +++ b/parser/testdata/03274_philipzucker/query.sql @@ -0,0 +1,29 @@ +-- https://www.philipzucker.com/sql_graph_csp/ + +set enable_analyzer = 1; + +WITH RECURSIVE digits AS ( + SELECT 0 AS digit + UNION ALL + SELECT digit + 1 + FROM digits + WHERE digit < 9 +) +SELECT s.digit AS S, e.digit AS E, n.digit AS N, d.digit AS D, + m.digit AS M, o.digit AS O, r.digit AS R, y.digit AS Y +FROM digits s, digits e, digits n, digits d, digits m, digits o, digits r, digits y +WHERE s.digit <> e.digit AND s.digit <> n.digit AND s.digit <> d.digit AND s.digit <> m.digit AND + s.digit <> o.digit AND s.digit <> r.digit AND s.digit <> y.digit AND + e.digit <> n.digit AND e.digit <> d.digit AND e.digit <> m.digit AND + e.digit <> o.digit AND e.digit <> r.digit AND e.digit <> y.digit AND + n.digit <> d.digit AND n.digit <> m.digit AND n.digit <> o.digit AND + n.digit <> r.digit AND n.digit <> y.digit AND + d.digit <> m.digit AND d.digit <> o.digit AND d.digit <> r.digit AND + d.digit <> y.digit AND + m.digit <> o.digit AND m.digit <> r.digit AND m.digit <> y.digit AND + o.digit <> r.digit AND o.digit <> y.digit AND + r.digit <> y.digit AND + s.digit <> 0 AND m.digit <> 0 AND + (1000 * s.digit + 100 * e.digit + 10 * n.digit + d.digit) + + (1000 * m.digit + 100 * o.digit + 10 * r.digit + e.digit) = + (10000 * m.digit + 1000 * o.digit + 100 * n.digit + 10 * e.digit + y.digit); diff --git a/parser/testdata/03274_prewarm_primary_index_cache/ast.json b/parser/testdata/03274_prewarm_primary_index_cache/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03274_prewarm_primary_index_cache/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03274_prewarm_primary_index_cache/metadata.json b/parser/testdata/03274_prewarm_primary_index_cache/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03274_prewarm_primary_index_cache/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03274_prewarm_primary_index_cache/query.sql b/parser/testdata/03274_prewarm_primary_index_cache/query.sql new file mode 100644 index 000000000..96a07d614 --- /dev/null +++ b/parser/testdata/03274_prewarm_primary_index_cache/query.sql @@ -0,0 +1,74 @@ +-- Tags: no-parallel, no-shared-merge-tree + +DROP TABLE IF EXISTS t_prewarm_cache_rmt_1; +DROP TABLE IF EXISTS t_prewarm_cache_rmt_2; + +CREATE TABLE t_prewarm_cache_rmt_1 (a UInt64, b UInt64, c UInt64) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/03274_prewarm_mark_cache_smt/t_prewarm_cache', '1') +ORDER BY a PARTITION BY a % 2 +SETTINGS prewarm_primary_key_cache = 1, use_primary_key_cache = 1; + +CREATE TABLE t_prewarm_cache_rmt_2 (a UInt64, b UInt64, c UInt64) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/03274_prewarm_mark_cache_smt/t_prewarm_cache', '2') +ORDER BY a PARTITION BY a % 2 +SETTINGS prewarm_primary_key_cache = 1, use_primary_key_cache = 1; + +SYSTEM DROP PRIMARY INDEX CACHE; +SYSTEM STOP FETCHES t_prewarm_cache_rmt_2; + +-- Check that prewarm works on insert. +INSERT INTO t_prewarm_cache_rmt_1 SELECT number, rand(), rand() FROM numbers(20000); + +SELECT count() FROM t_prewarm_cache_rmt_1 WHERE a % 2 = 0 AND a > 100 AND a < 1000; +SELECT sum(primary_key_bytes_in_memory) FROM system.parts WHERE database = currentDatabase() AND table IN ('t_prewarm_cache_rmt_1', 't_prewarm_cache_rmt_2'); + +-- Check that prewarm works on fetch. +SYSTEM DROP PRIMARY INDEX CACHE; +SYSTEM START FETCHES t_prewarm_cache_rmt_2; +SYSTEM SYNC REPLICA t_prewarm_cache_rmt_2; + +SELECT count() FROM t_prewarm_cache_rmt_2 WHERE a % 2 = 0 AND a > 100 AND a < 1000; +SELECT sum(primary_key_bytes_in_memory) FROM system.parts WHERE database = currentDatabase() AND table IN ('t_prewarm_cache_rmt_1', 't_prewarm_cache_rmt_2'); + +-- Check that prewarm works on merge. +INSERT INTO t_prewarm_cache_rmt_1 SELECT number, rand(), rand() FROM numbers(20000); +OPTIMIZE TABLE t_prewarm_cache_rmt_1 FINAL; + +SYSTEM SYNC REPLICA t_prewarm_cache_rmt_2; + +SELECT count() FROM t_prewarm_cache_rmt_1 WHERE a % 2 = 0 AND a > 100 AND a < 1000; +SELECT count() FROM t_prewarm_cache_rmt_2 WHERE a % 2 = 0 AND a > 100 AND a < 1000; +SELECT sum(primary_key_bytes_in_memory) FROM system.parts WHERE database = currentDatabase() AND table IN ('t_prewarm_cache_rmt_1', 't_prewarm_cache_rmt_2'); + +-- Check that prewarm works on restart. +SYSTEM DROP PRIMARY INDEX CACHE; + +DETACH TABLE t_prewarm_cache_rmt_1; +DETACH TABLE t_prewarm_cache_rmt_2; + +ATTACH TABLE t_prewarm_cache_rmt_1; +ATTACH TABLE t_prewarm_cache_rmt_2; + +SELECT count() FROM t_prewarm_cache_rmt_1 WHERE a % 2 = 0 AND a > 100 AND a < 1000; +SELECT count() FROM t_prewarm_cache_rmt_2 WHERE a % 2 = 0 AND a > 100 AND a < 1000; +SELECT sum(primary_key_bytes_in_memory) FROM system.parts WHERE database = currentDatabase() AND table IN ('t_prewarm_cache_rmt_1', 't_prewarm_cache_rmt_2'); + +SYSTEM DROP PRIMARY INDEX CACHE; + +SELECT count() FROM t_prewarm_cache_rmt_1 WHERE a % 2 = 0 AND a > 100 AND a < 1000; +SELECT sum(primary_key_bytes_in_memory) FROM system.parts WHERE database = currentDatabase() AND table IN ('t_prewarm_cache_rmt_1', 't_prewarm_cache_rmt_2'); + +--- Check that system query works. +SYSTEM PREWARM PRIMARY INDEX CACHE t_prewarm_cache_rmt_1; + +SELECT count() FROM t_prewarm_cache_rmt_1 WHERE a % 2 = 0 AND a > 100 AND a < 1000; +SELECT sum(primary_key_bytes_in_memory) FROM system.parts WHERE database = currentDatabase() AND table IN ('t_prewarm_cache_rmt_1', 't_prewarm_cache_rmt_2'); + +SYSTEM FLUSH LOGS query_log; + +SELECT ProfileEvents['LoadedPrimaryIndexFiles'] FROM system.query_log +WHERE current_database = currentDatabase() AND type = 'QueryFinish' AND query LIKE 'SELECT count() FROM t_prewarm_cache%' +ORDER BY event_time_microseconds; + +DROP TABLE IF EXISTS t_prewarm_cache_rmt_1; +DROP TABLE IF EXISTS t_prewarm_cache_rmt_2; diff --git a/parser/testdata/03274_squashing_transform_sparse_bug/ast.json b/parser/testdata/03274_squashing_transform_sparse_bug/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03274_squashing_transform_sparse_bug/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03274_squashing_transform_sparse_bug/metadata.json b/parser/testdata/03274_squashing_transform_sparse_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03274_squashing_transform_sparse_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03274_squashing_transform_sparse_bug/query.sql b/parser/testdata/03274_squashing_transform_sparse_bug/query.sql new file mode 100644 index 000000000..d690ef38c --- /dev/null +++ b/parser/testdata/03274_squashing_transform_sparse_bug/query.sql @@ -0,0 +1,19 @@ + +DROP TABLE IF EXISTS t0; +DROP TABLE IF EXISTS t1; + +SET max_insert_block_size = 1; +SET min_insert_block_size_rows = 1; +SET min_insert_block_size_bytes = 1; +SET max_execution_time = 300; + +CREATE TABLE t0 (x UInt64, y Tuple(UInt64, UInt64) ) ENGINE = MergeTree ORDER BY x SETTINGS ratio_of_defaults_for_sparse_serialization = 0.5; +SYSTEM STOP MERGES t0; +INSERT INTO t0 SELECT if(number % 2 = 0, 0, number) as x, (x, 0) from numbers(200) SETTINGS max_block_size = 1; + +CREATE TABLE t1 (x UInt64, y Tuple(UInt64, UInt64) ) ENGINE = MergeTree ORDER BY x; + +SET min_joined_block_size_bytes = 100; + +SET join_algorithm = 'parallel_hash'; +SELECT sum(ignore(*)) FROM t0 a FULL JOIN t1 b ON a.x = b.x FORMAT Null; diff --git a/parser/testdata/03274_with_fill_dup_sort_bug/ast.json b/parser/testdata/03274_with_fill_dup_sort_bug/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03274_with_fill_dup_sort_bug/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03274_with_fill_dup_sort_bug/metadata.json b/parser/testdata/03274_with_fill_dup_sort_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03274_with_fill_dup_sort_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03274_with_fill_dup_sort_bug/query.sql b/parser/testdata/03274_with_fill_dup_sort_bug/query.sql new file mode 100644 index 000000000..41d7bb845 --- /dev/null +++ b/parser/testdata/03274_with_fill_dup_sort_bug/query.sql @@ -0,0 +1,7 @@ +SELECT + 1 AS a, + 2 AS b +ORDER BY + a ASC, + 1 ASC, + b ASC WITH FILL TO 10; diff --git a/parser/testdata/03275_auto_cluster_functions_with_parallel_replicas/ast.json b/parser/testdata/03275_auto_cluster_functions_with_parallel_replicas/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03275_auto_cluster_functions_with_parallel_replicas/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03275_auto_cluster_functions_with_parallel_replicas/metadata.json b/parser/testdata/03275_auto_cluster_functions_with_parallel_replicas/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03275_auto_cluster_functions_with_parallel_replicas/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03275_auto_cluster_functions_with_parallel_replicas/query.sql b/parser/testdata/03275_auto_cluster_functions_with_parallel_replicas/query.sql new file mode 100644 index 000000000..92e0e1126 --- /dev/null +++ b/parser/testdata/03275_auto_cluster_functions_with_parallel_replicas/query.sql @@ -0,0 +1,48 @@ +-- Tags: no-fasttest +-- Tag no-fasttest: Depends on Minio + +SET enable_analyzer=1; +SET enable_parallel_replicas=1; +SET max_parallel_replicas=4; +SET cluster_for_parallel_replicas='test_cluster_two_shards'; +SET query_plan_join_swap_table=0; + + +SET parallel_replicas_for_cluster_engines=true; + +EXPLAIN SELECT * FROM url('http://localhost:8123'); +EXPLAIN SELECT * FROM s3('http://localhost:11111/test/a.tsv', 'TSV'); +EXPLAIN SELECT * FROM s3('http://localhost:11111/test/a.tsv', 'TSV') where c1 in (SELECT c1 FROM s3('http://localhost:11111/test/a.tsv', 'TSV')); +EXPLAIN SELECT sum(c1) FROM (SELECT * FROM s3('http://localhost:11111/test/a.tsv', 'TSV')); +EXPLAIN SELECT number FROM system.numbers n JOIN s3('http://localhost:11111/test/a.tsv', 'TSV') s ON (toInt64(n.number) = toInt64(s.c1)); +EXPLAIN SELECT number FROM system.numbers n JOIN (SELECT * FROM s3('http://localhost:11111/test/a.tsv', 'TSV')) s ON (toInt64(n.number) = toInt64(s.c1)); + +SELECT count() FROM s3('http://localhost:11111/test/a.tsv', 'TSV'); + +DROP TABLE IF EXISTS dupe_test_with_auto_functions; +CREATE TABLE dupe_test_with_auto_functions (n1 String, n2 String, n3 String) ENGINE = MergeTree ORDER BY n1; +INSERT INTO dupe_test_with_auto_functions SELECT * FROM s3('http://localhost:11111/test/a.tsv', 'TSV'); +SELECT count() FROM dupe_test_with_auto_functions; + +DROP TABLE IF EXISTS insert_with_url_function; +CREATE TABLE insert_with_url_function (n1 String, n2 String, n3 String) ENGINE = MergeTree ORDER BY n1; +INSERT INTO insert_with_url_function SELECT * FROM url('http://localhost:11111/test/a.tsv', 'TSV'); +SELECT count() FROM insert_with_url_function; + + +SET parallel_replicas_for_cluster_engines=false; + +EXPLAIN SELECT * FROM url('http://localhost:8123'); +EXPLAIN SELECT * FROM s3('http://localhost:11111/test/a.tsv', 'TSV'); + +SELECT count() FROM s3('http://localhost:11111/test/a.tsv', 'TSV'); + +DROP TABLE IF EXISTS dupe_test_without_cluster_functions; +CREATE TABLE dupe_test_without_cluster_functions (n1 String, n2 String, n3 String) ENGINE = MergeTree ORDER BY n1; +INSERT INTO dupe_test_without_cluster_functions SELECT * FROM s3('http://localhost:11111/test/a.tsv', 'TSV'); +SELECT count() FROM dupe_test_without_cluster_functions; + +DROP TABLE IF EXISTS dupe_test_with_cluster_function; +CREATE TABLE dupe_test_with_cluster_function (n1 String, n2 String, n3 String) ENGINE = MergeTree ORDER BY n1; +INSERT INTO dupe_test_with_cluster_function SELECT * FROM s3Cluster('test_cluster_two_shards', 'http://localhost:11111/test/a.tsv', 'TSV'); +SELECT count() FROM dupe_test_with_cluster_function; diff --git a/parser/testdata/03275_block_number_mutation/ast.json b/parser/testdata/03275_block_number_mutation/ast.json new file mode 100644 index 000000000..f903efacf --- /dev/null +++ b/parser/testdata/03275_block_number_mutation/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_block_number_delete (children 1)" + }, + { + "explain": " Identifier t_block_number_delete" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001717889, + "rows_read": 2, + "bytes_read": 94 + } +} diff --git a/parser/testdata/03275_block_number_mutation/metadata.json b/parser/testdata/03275_block_number_mutation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03275_block_number_mutation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03275_block_number_mutation/query.sql b/parser/testdata/03275_block_number_mutation/query.sql new file mode 100644 index 000000000..032800a5f --- /dev/null +++ b/parser/testdata/03275_block_number_mutation/query.sql @@ -0,0 +1,41 @@ +DROP TABLE IF EXISTS t_block_number_delete sync; + +SET mutations_sync = 2; + +CREATE TABLE t_block_number_delete (x UInt32, ts DateTime) ENGINE = MergeTree ORDER BY x SETTINGS enable_block_number_column = 1, enable_block_offset_column = 0, min_bytes_for_wide_part = 1; + +INSERT INTO t_block_number_delete SELECT number, now() - INTERVAL number minute from numbers(10); +OPTIMIZE TABLE t_block_number_delete final; +ALTER TABLE t_block_number_delete DELETE WHERE x < 2; + +SELECT count(), sum(x) FROM t_block_number_delete; +SELECT command, is_done, latest_fail_reason FROM system.mutations WHERE database = currentDatabase() AND table = 't_block_number_delete'; +SELECT column, count() FROM system.parts_columns WHERE database = currentDatabase() AND table = 't_block_number_delete' AND active GROUP BY column ORDER BY column; + +DETACH TABLE t_block_number_delete; +ATTACH TABLE t_block_number_delete; + +SELECT count(), sum(x) FROM t_block_number_delete; +SELECT command, is_done, latest_fail_reason FROM system.mutations WHERE database = currentDatabase() AND table = 't_block_number_delete'; +SELECT column, count() FROM system.parts_columns WHERE database = currentDatabase() AND table = 't_block_number_delete' AND active GROUP BY column ORDER BY column; + +DROP TABLE t_block_number_delete; + +CREATE TABLE t_block_number_delete (x UInt32, ts DateTime) ENGINE = MergeTree ORDER BY x SETTINGS enable_block_number_column = 1, enable_block_offset_column = 0, min_bytes_for_wide_part = '10G'; + +INSERT INTO t_block_number_delete SELECT number, now() - INTERVAL number minute from numbers(10); +OPTIMIZE TABLE t_block_number_delete final; +ALTER TABLE t_block_number_delete DELETE WHERE x < 2; + +SELECT count(), sum(x) FROM t_block_number_delete; +SELECT command, is_done, latest_fail_reason FROM system.mutations WHERE database = currentDatabase() AND table = 't_block_number_delete'; +SELECT column, count() FROM system.parts_columns WHERE database = currentDatabase() AND table = 't_block_number_delete' AND active GROUP BY column ORDER BY column; + +DETACH TABLE t_block_number_delete; +ATTACH TABLE t_block_number_delete; + +SELECT count(), sum(x) FROM t_block_number_delete; +SELECT command, is_done, latest_fail_reason FROM system.mutations WHERE database = currentDatabase() AND table = 't_block_number_delete'; +SELECT column, count() FROM system.parts_columns WHERE database = currentDatabase() AND table = 't_block_number_delete' AND active GROUP BY column ORDER BY column; + +DROP TABLE t_block_number_delete; diff --git a/parser/testdata/03275_block_number_update/ast.json b/parser/testdata/03275_block_number_update/ast.json new file mode 100644 index 000000000..7608253c5 --- /dev/null +++ b/parser/testdata/03275_block_number_update/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_block_number_mut (children 1)" + }, + { + "explain": " Identifier t_block_number_mut" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001720772, + "rows_read": 2, + "bytes_read": 88 + } +} diff --git a/parser/testdata/03275_block_number_update/metadata.json b/parser/testdata/03275_block_number_update/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03275_block_number_update/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03275_block_number_update/query.sql b/parser/testdata/03275_block_number_update/query.sql new file mode 100644 index 000000000..ca8160b48 --- /dev/null +++ b/parser/testdata/03275_block_number_update/query.sql @@ -0,0 +1,27 @@ +DROP TABLE IF EXISTS t_block_number_mut; + +SET mutations_sync = 2; + +CREATE TABLE t_block_number_mut (n int) ENGINE = MergeTree ORDER BY tuple() SETTINGS enable_block_number_column = 1, min_bytes_for_wide_part = 0; + +INSERT INTO t_block_number_mut VALUES (1) (2); + +OPTIMIZE TABLE t_block_number_mut FINAL; + +ALTER TABLE t_block_number_mut UPDATE n = n + 1 WHERE 1; + +SELECT * FROM t_block_number_mut; + +DROP TABLE IF EXISTS t_block_number_mut; + +CREATE TABLE t_block_number_mut (n int) ENGINE = MergeTree ORDER BY tuple() SETTINGS enable_block_number_column = 1, min_bytes_for_wide_part = '1G'; + +INSERT INTO t_block_number_mut VALUES (1) (2); + +OPTIMIZE TABLE t_block_number_mut FINAL; + +ALTER TABLE t_block_number_mut UPDATE n = n + 1 WHERE 1; + +SELECT * FROM t_block_number_mut; + +DROP TABLE IF EXISTS t_block_number_mut; diff --git a/parser/testdata/03275_count_digits_argument_evaluation/ast.json b/parser/testdata/03275_count_digits_argument_evaluation/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03275_count_digits_argument_evaluation/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03275_count_digits_argument_evaluation/metadata.json b/parser/testdata/03275_count_digits_argument_evaluation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03275_count_digits_argument_evaluation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03275_count_digits_argument_evaluation/query.sql b/parser/testdata/03275_count_digits_argument_evaluation/query.sql new file mode 100644 index 000000000..cc83e747d --- /dev/null +++ b/parser/testdata/03275_count_digits_argument_evaluation/query.sql @@ -0,0 +1,6 @@ +-- No arguments passed +SELECT countDigits(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +-- Invalid 1st argument passed +SELECT countDigits(toFloat32(1)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +-- More arguments then expected +SELECT countDigits(1, 1); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } diff --git a/parser/testdata/03275_ignore_nonexistent_files_fix/ast.json b/parser/testdata/03275_ignore_nonexistent_files_fix/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03275_ignore_nonexistent_files_fix/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03275_ignore_nonexistent_files_fix/metadata.json b/parser/testdata/03275_ignore_nonexistent_files_fix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03275_ignore_nonexistent_files_fix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03275_ignore_nonexistent_files_fix/query.sql b/parser/testdata/03275_ignore_nonexistent_files_fix/query.sql new file mode 100644 index 000000000..99c7bd123 --- /dev/null +++ b/parser/testdata/03275_ignore_nonexistent_files_fix/query.sql @@ -0,0 +1,9 @@ +-- Tags: no-fasttest + +SELECT * FROM s3( + 'http://localhost:11111/test/03036_json_archive.zip :: example11.jsonl', + JSONEachRow, + 'id UInt32, data String' + ) +ORDER BY tuple(*) +SETTINGS s3_ignore_file_doesnt_exist=1, use_cache_for_count_from_files=0; diff --git a/parser/testdata/03275_matview_with_union/ast.json b/parser/testdata/03275_matview_with_union/ast.json new file mode 100644 index 000000000..490882b9f --- /dev/null +++ b/parser/testdata/03275_matview_with_union/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery src (children 1)" + }, + { + "explain": " Identifier src" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001115189, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/03275_matview_with_union/metadata.json b/parser/testdata/03275_matview_with_union/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03275_matview_with_union/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03275_matview_with_union/query.sql b/parser/testdata/03275_matview_with_union/query.sql new file mode 100644 index 000000000..0fc64ae54 --- /dev/null +++ b/parser/testdata/03275_matview_with_union/query.sql @@ -0,0 +1,84 @@ +DROP TABLE IF EXISTS src; +DROP TABLE IF EXISTS dst; +DROP TABLE IF EXISTS matview; + +SET use_async_executor_for_materialized_views=1; + +CREATE TABLE src ( + event_time DateTime, + key UInt64, + value Int64 +) +ENGINE = MergeTree() +ORDER BY (event_time, key); + +CREATE TABLE dst ( + step UInt16, + rounded_event_time DateTime, + key UInt64, + value AggregateFunction(max, Int64) +) +ENGINE = AggregatingMergeTree() +ORDER BY (step, rounded_event_time, key); + +CREATE MATERIALIZED VIEW matview TO dst +( + step UInt16, + rounded_event_time DateTime, + key UInt64, + value AggregateFunction(max, Int64) +) AS +SELECT * FROM ( + SELECT + 1 AS step, + key, + intDiv(toUnixTimestamp(event_time), step) * step AS rounded_event_time, + initializeAggregation('maxState', value) AS value + FROM src + ORDER BY + rounded_event_time, + key + UNION ALL + SELECT + 5 AS step, + key, + intDiv(toUnixTimestamp(event_time), step) * step AS rounded_event_time, + initializeAggregation('maxState', value) AS value + FROM src + ORDER BY + rounded_event_time, + key + UNION ALL + SELECT + 15 AS step, + key, + intDiv(toUnixTimestamp(event_time), step) * step AS rounded_event_time, + initializeAggregation('maxState', value) AS value + FROM src + ORDER BY + rounded_event_time, + key + UNION ALL + SELECT + 30 AS step, + key, + intDiv(toUnixTimestamp(event_time), step) * step AS rounded_event_time, + initializeAggregation('maxState', value) AS value + FROM src + ORDER BY + rounded_event_time, + key +) +ORDER BY step, rounded_event_time, key SETTINGS query_plan_remove_redundant_sorting = 0; + +set optimize_on_insert = 1; + +INSERT INTO src SELECT toDateTime('2020-10-01 00:00:00') + number, number % 100, number from numbers(1000); + +SELECT count() FROM dst; + +SELECT count(), key FROM dst WHERE step = 30 group by key ORDER BY key LIMIT 5; + +DROP TABLE IF EXISTS src; +DROP TABLE IF EXISTS dst; +DROP TABLE IF EXISTS matview; diff --git a/parser/testdata/03275_pr_any_join/ast.json b/parser/testdata/03275_pr_any_join/ast.json new file mode 100644 index 000000000..e190c983f --- /dev/null +++ b/parser/testdata/03275_pr_any_join/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001376236, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03275_pr_any_join/metadata.json b/parser/testdata/03275_pr_any_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03275_pr_any_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03275_pr_any_join/query.sql b/parser/testdata/03275_pr_any_join/query.sql new file mode 100644 index 000000000..6a3daff95 --- /dev/null +++ b/parser/testdata/03275_pr_any_join/query.sql @@ -0,0 +1,31 @@ +DROP TABLE IF EXISTS t1 SYNC; +DROP TABLE IF EXISTS t2 SYNC; + +CREATE TABLE t1 (x UInt32, s String) engine = ReplicatedMergeTree('/clickhouse/{database}/t1', 'r1') ORDER BY tuple(); +CREATE TABLE t2 (x UInt32, s String) engine = ReplicatedMergeTree('/clickhouse/{database}/t2', 'r1') ORDER BY tuple(); + +INSERT INTO t1 (x, s) VALUES (0, 'a1'), (1, 'a2'), (2, 'a3'), (3, 'a4'), (4, 'a5'); +INSERT INTO t2 (x, s) VALUES (2, 'b1'), (4, 'b2'), (5, 'b4'); + +set enable_analyzer = 1, enable_parallel_replicas = 1, max_parallel_replicas = 3, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost'; + +SELECT 'any left'; +SELECT t1.*, t2.* FROM t1 ANY LEFT JOIN t2 USING(x) ORDER BY t1.x, t2.x; + +SELECT 'any left (rev)'; +SELECT t1.*, t2.* FROM t2 ANY LEFT JOIN t1 USING(x) ORDER BY t1.x, t2.x; + +SELECT 'any inner'; +SELECT t1.*, t2.* FROM t1 ANY INNER JOIN t2 USING(x) ORDER BY t1.x, t2.x; + +SELECT 'any inner (rev)'; +SELECT t1.*, t2.* FROM t2 ANY INNER JOIN t1 USING(x) ORDER BY t1.x, t2.x; + +SELECT 'any right'; +SELECT t1.*, t2.* FROM t1 ANY RIGHT JOIN t2 USING(x) ORDER BY t1.x, t2.x; + +SELECT 'any right (rev)'; +SELECT t1.*, t2.* FROM t2 ANY RIGHT JOIN t1 USING(x) ORDER BY t1.x, t2.x; + +DROP TABLE t1 SYNC; +DROP TABLE t2 SYNC; diff --git a/parser/testdata/03275_subcolumns_in_primary_key_bug/ast.json b/parser/testdata/03275_subcolumns_in_primary_key_bug/ast.json new file mode 100644 index 000000000..fd2055ace --- /dev/null +++ b/parser/testdata/03275_subcolumns_in_primary_key_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test1 (children 1)" + }, + { + "explain": " Identifier test1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001281462, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/03275_subcolumns_in_primary_key_bug/metadata.json b/parser/testdata/03275_subcolumns_in_primary_key_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03275_subcolumns_in_primary_key_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03275_subcolumns_in_primary_key_bug/query.sql b/parser/testdata/03275_subcolumns_in_primary_key_bug/query.sql new file mode 100644 index 000000000..f08feecb6 --- /dev/null +++ b/parser/testdata/03275_subcolumns_in_primary_key_bug/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS test1; + +CREATE TABLE test1 +( + `t` Tuple(a Boolean) +) +ENGINE = MergeTree +ORDER BY t.a; + +INSERT INTO test1 FORMAT Values (tuple(true)), (tuple(false)); + +ALTER TABLE test1 (DELETE WHERE t.a) SETTINGS alter_sync = 2, mutations_sync = 2; + +SELECT * FROM test1; diff --git a/parser/testdata/03276_database_backup_merge_tree_table_file_engine/ast.json b/parser/testdata/03276_database_backup_merge_tree_table_file_engine/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03276_database_backup_merge_tree_table_file_engine/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03276_database_backup_merge_tree_table_file_engine/metadata.json b/parser/testdata/03276_database_backup_merge_tree_table_file_engine/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03276_database_backup_merge_tree_table_file_engine/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03276_database_backup_merge_tree_table_file_engine/query.sql b/parser/testdata/03276_database_backup_merge_tree_table_file_engine/query.sql new file mode 100644 index 000000000..5596c6999 --- /dev/null +++ b/parser/testdata/03276_database_backup_merge_tree_table_file_engine/query.sql @@ -0,0 +1,23 @@ +-- Tags: no-parallel, no-fasttest, no-flaky-check, no-encrypted-storage +-- Because we are creating a backup with fixed path. + +DROP DATABASE IF EXISTS 03276_test_database; +CREATE DATABASE 03276_test_database; + +CREATE TABLE 03276_test_database.test_table (id UInt64, value String) ENGINE = MergeTree ORDER BY id; +INSERT INTO 03276_test_database.test_table SELECT number, number FROM numbers(15000); + +SELECT (id % 10) AS key, count() FROM 03276_test_database.test_table GROUP BY key ORDER BY key; + +BACKUP TABLE 03276_test_database.test_table TO File('03276_test_database.test_table') FORMAT Null; + +SELECT '--'; + +DROP DATABASE IF EXISTS 03276_test_table_backup_database; +CREATE DATABASE 03276_test_table_backup_database ENGINE = Backup('03276_test_database', File('03276_test_database.test_table')); + +SELECT (id % 10) AS key, count() FROM 03276_test_table_backup_database.test_table GROUP BY key ORDER BY key; + +DROP DATABASE 03276_test_table_backup_database; + +DROP DATABASE 03276_test_database; diff --git a/parser/testdata/03276_empty_variant_type/ast.json b/parser/testdata/03276_empty_variant_type/ast.json new file mode 100644 index 000000000..81f8e439d --- /dev/null +++ b/parser/testdata/03276_empty_variant_type/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001159092, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03276_empty_variant_type/metadata.json b/parser/testdata/03276_empty_variant_type/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03276_empty_variant_type/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03276_empty_variant_type/query.sql b/parser/testdata/03276_empty_variant_type/query.sql new file mode 100644 index 000000000..b87ebbee7 --- /dev/null +++ b/parser/testdata/03276_empty_variant_type/query.sql @@ -0,0 +1,3 @@ +set allow_experimental_variant_type=1; +create table test (v Variant()) engine=Variant(); -- {serverError BAD_ARGUMENTS} + diff --git a/parser/testdata/03276_functions_to_subcolumns_lc/ast.json b/parser/testdata/03276_functions_to_subcolumns_lc/ast.json new file mode 100644 index 000000000..c020515fa --- /dev/null +++ b/parser/testdata/03276_functions_to_subcolumns_lc/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_map_lc (children 1)" + }, + { + "explain": " Identifier t_map_lc" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001065589, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/03276_functions_to_subcolumns_lc/metadata.json b/parser/testdata/03276_functions_to_subcolumns_lc/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03276_functions_to_subcolumns_lc/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03276_functions_to_subcolumns_lc/query.sql b/parser/testdata/03276_functions_to_subcolumns_lc/query.sql new file mode 100644 index 000000000..b3b8c1a79 --- /dev/null +++ b/parser/testdata/03276_functions_to_subcolumns_lc/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS t_map_lc; + +CREATE TABLE t_map_lc +( + kv Map(LowCardinality(String), LowCardinality(String)), + k Array(LowCardinality(String)) ALIAS mapKeys(kv), + v Array(LowCardinality(String)) ALIAS mapValues(kv) +) ENGINE = Memory; + +INSERT INTO t_map_lc VALUES (map('foo', 'bar')); + +SELECT k, v FROM t_map_lc SETTINGS optimize_functions_to_subcolumns=1; + +DROP TABLE t_map_lc; diff --git a/parser/testdata/03276_index_empty_part/ast.json b/parser/testdata/03276_index_empty_part/ast.json new file mode 100644 index 000000000..9d8ff5660 --- /dev/null +++ b/parser/testdata/03276_index_empty_part/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_index_empty_part (children 1)" + }, + { + "explain": " Identifier t_index_empty_part" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001205788, + "rows_read": 2, + "bytes_read": 88 + } +} diff --git a/parser/testdata/03276_index_empty_part/metadata.json b/parser/testdata/03276_index_empty_part/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03276_index_empty_part/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03276_index_empty_part/query.sql b/parser/testdata/03276_index_empty_part/query.sql new file mode 100644 index 000000000..19d160c5c --- /dev/null +++ b/parser/testdata/03276_index_empty_part/query.sql @@ -0,0 +1,15 @@ +DROP TABLE IF EXISTS t_index_empty_part; + +CREATE TABLE t_index_empty_part (c0 Int, c1 Int) +ENGINE = MergeTree() PRIMARY KEY (c0, c1) +SETTINGS primary_key_lazy_load = 0, remove_empty_parts = 0; + +INSERT INTO TABLE t_index_empty_part (c0, c1) VALUES (1, 1); + +TRUNCATE t_index_empty_part; +DETACH TABLE t_index_empty_part; +ATTACH TABLE t_index_empty_part; + +SELECT rows, primary_key_bytes_in_memory FROM system.parts WHERE database = currentDatabase() AND table = 't_index_empty_part'; + +DROP TABLE t_index_empty_part; diff --git a/parser/testdata/03276_index_of_assume_sorted/ast.json b/parser/testdata/03276_index_of_assume_sorted/ast.json new file mode 100644 index 000000000..d1764f364 --- /dev/null +++ b/parser/testdata/03276_index_of_assume_sorted/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001114188, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/03276_index_of_assume_sorted/metadata.json b/parser/testdata/03276_index_of_assume_sorted/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03276_index_of_assume_sorted/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03276_index_of_assume_sorted/query.sql b/parser/testdata/03276_index_of_assume_sorted/query.sql new file mode 100644 index 000000000..b07846646 --- /dev/null +++ b/parser/testdata/03276_index_of_assume_sorted/query.sql @@ -0,0 +1,26 @@ +DROP TABLE IF EXISTS test; + +CREATE TABLE test( + id UInt64, + numbers Array(Int64) +) +ENGINE = MergeTree() +ORDER BY id; + +INSERT INTO test VALUES(1, [1, 2, 2, 3, 3, 3, 4, 4, 4, 5, 6, 7]); +INSERT INTO test VALUES (2, [1, 2, 3, 4, 5, 6, 7, 8]); +INSERT INTO test VALUES(3, [1, 3, 7, 10]); +INSERT INTO test VALUES(4, [0, 0, 0]); +INSERT INTO test VALUES(5, [10, 10, 10]); + +SELECT indexOfAssumeSorted(numbers, 4) FROM test WHERE id = 1; +SELECT indexOfAssumeSorted(numbers, 5) FROM test WHERE id = 2; +SELECT indexOfAssumeSorted(numbers, 5) FROM test WHERE id = 3; +SELECT indexOfAssumeSorted(numbers, 1) FROM test WHERE id = 4; +SELECT indexOfAssumeSorted(numbers, 1) FROM test WHERE id = 5; + +SELECT indexOfAssumeSorted([1, 2, 2, 2, 3, 3, 3, 4, 4], 4); +SELECT indexOfAssumeSorted([10, 10, 10], 1); +SELECT indexOfAssumeSorted([1, 1, 1], 10); + +DROP TABLE IF EXISTS test; diff --git a/parser/testdata/03276_merge_tree_index_lazy_load/ast.json b/parser/testdata/03276_merge_tree_index_lazy_load/ast.json new file mode 100644 index 000000000..42f7855f0 --- /dev/null +++ b/parser/testdata/03276_merge_tree_index_lazy_load/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_index_lazy_load (children 1)" + }, + { + "explain": " Identifier t_index_lazy_load" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001475285, + "rows_read": 2, + "bytes_read": 86 + } +} diff --git a/parser/testdata/03276_merge_tree_index_lazy_load/metadata.json b/parser/testdata/03276_merge_tree_index_lazy_load/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03276_merge_tree_index_lazy_load/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03276_merge_tree_index_lazy_load/query.sql b/parser/testdata/03276_merge_tree_index_lazy_load/query.sql new file mode 100644 index 000000000..70b4ee282 --- /dev/null +++ b/parser/testdata/03276_merge_tree_index_lazy_load/query.sql @@ -0,0 +1,22 @@ +DROP TABLE IF EXISTS t_index_lazy_load; + +CREATE TABLE t_index_lazy_load (a UInt64) +ENGINE = MergeTree ORDER BY a +SETTINGS index_granularity = 4, index_granularity_bytes = '10M', primary_key_lazy_load = 1, use_primary_key_cache = 0; + +INSERT INTO t_index_lazy_load SELECT number FROM numbers(15); + +DETACH TABLE t_index_lazy_load; +ATTACH TABLE t_index_lazy_load; + +SELECT name, primary_key_bytes_in_memory FROM system.parts WHERE database = currentDatabase() AND table = 't_index_lazy_load'; + +-- Check that if index is not requested it is not loaded. +SELECT part_name, mark_number, rows_in_granule FROM mergeTreeIndex(currentDatabase(), t_index_lazy_load) ORDER BY part_name, mark_number; +SELECT name, primary_key_bytes_in_memory FROM system.parts WHERE database = currentDatabase() AND table = 't_index_lazy_load'; + +-- If index is requested we have to load it and keep in memory. +SELECT part_name, mark_number, rows_in_granule, a FROM mergeTreeIndex(currentDatabase(), t_index_lazy_load) ORDER BY part_name, mark_number; +SELECT name, primary_key_bytes_in_memory FROM system.parts WHERE database = currentDatabase() AND table = 't_index_lazy_load'; + +DROP TABLE t_index_lazy_load; diff --git a/parser/testdata/03276_parquet_output_compression_level/ast.json b/parser/testdata/03276_parquet_output_compression_level/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03276_parquet_output_compression_level/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03276_parquet_output_compression_level/metadata.json b/parser/testdata/03276_parquet_output_compression_level/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03276_parquet_output_compression_level/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03276_parquet_output_compression_level/query.sql b/parser/testdata/03276_parquet_output_compression_level/query.sql new file mode 100644 index 000000000..27e096a63 --- /dev/null +++ b/parser/testdata/03276_parquet_output_compression_level/query.sql @@ -0,0 +1,19 @@ +-- Tags: no-parallel, no-fasttest + +insert into function file(03276_parquet_custom_encoder_compression_level_1.parquet) SETTINGS output_format_parquet_compression_method = 'zstd', output_format_compression_level=1, output_format_parquet_use_custom_encoder=1, engine_file_truncate_on_insert=1 SELECT number AS id, toString(number) AS name, now() + number AS timestamp FROM numbers(100000); +insert into function file(03276_parquet_custom_encoder_compression_level_22.parquet) SETTINGS output_format_parquet_compression_method = 'zstd', output_format_compression_level=22, output_format_parquet_use_custom_encoder=1, engine_file_truncate_on_insert=1 SELECT number AS id, toString(number) AS name, now() + number AS timestamp FROM numbers(100000); + +WITH + (SELECT total_compressed_size FROM file(03276_parquet_custom_encoder_compression_level_1.parquet, ParquetMetadata)) AS size_level_1, + (SELECT total_compressed_size FROM file(03276_parquet_custom_encoder_compression_level_22.parquet, ParquetMetadata)) AS size_level_22 +SELECT + size_level_22 < size_level_1 AS compression_higher_level_produces_smaller_file; + +insert into function file(03276_parquet_arrow_encoder_compression_level_1.parquet) SETTINGS output_format_parquet_compression_method = 'zstd', output_format_compression_level=1, output_format_parquet_use_custom_encoder=0, engine_file_truncate_on_insert=1 SELECT number AS id, toString(number) AS name, now() + number AS timestamp FROM numbers(100000); +insert into function file(03276_parquet_arrow_encoder_compression_level_22.parquet) SETTINGS output_format_parquet_compression_method = 'zstd', output_format_compression_level=22, output_format_parquet_use_custom_encoder=0, engine_file_truncate_on_insert=1 SELECT number AS id, toString(number) AS name, now() + number AS timestamp FROM numbers(100000); + +WITH + (SELECT total_compressed_size FROM file(03276_parquet_arrow_encoder_compression_level_1.parquet, ParquetMetadata)) AS size_level_1, + (SELECT total_compressed_size FROM file(03276_parquet_arrow_encoder_compression_level_22.parquet, ParquetMetadata)) AS size_level_22 +SELECT + size_level_22 < size_level_1 AS compression_higher_level_produces_smaller_file; diff --git a/parser/testdata/03277_analyzer_array_join_fix/ast.json b/parser/testdata/03277_analyzer_array_join_fix/ast.json new file mode 100644 index 000000000..7e0aefab5 --- /dev/null +++ b/parser/testdata/03277_analyzer_array_join_fix/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery repro (children 1)" + }, + { + "explain": " Identifier repro" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001543831, + "rows_read": 2, + "bytes_read": 63 + } +} diff --git a/parser/testdata/03277_analyzer_array_join_fix/metadata.json b/parser/testdata/03277_analyzer_array_join_fix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03277_analyzer_array_join_fix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03277_analyzer_array_join_fix/query.sql b/parser/testdata/03277_analyzer_array_join_fix/query.sql new file mode 100644 index 000000000..9d6f05dd8 --- /dev/null +++ b/parser/testdata/03277_analyzer_array_join_fix/query.sql @@ -0,0 +1,21 @@ +CREATE TABLE IF NOT EXISTS repro +( + `a` LowCardinality(String), + `foos` Nested(x LowCardinality(String)) +) +ENGINE = MergeTree +ORDER BY a; + +CREATE TABLE IF NOT EXISTS repro_dist +( + "a" LowCardinality(String), + "foos" Nested( + "x" LowCardinality(String), + ) +) ENGINE = Distributed('test_cluster_two_shards', currentDatabase(), 'repro'); + +SELECT + a, + foo.x +FROM repro_dist +ARRAY JOIN foos AS foo; diff --git a/parser/testdata/03277_dead_letter_queue_unsupported/ast.json b/parser/testdata/03277_dead_letter_queue_unsupported/ast.json new file mode 100644 index 000000000..ee6da2fb8 --- /dev/null +++ b/parser/testdata/03277_dead_letter_queue_unsupported/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'qfilelog'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001491901, + "rows_read": 5, + "bytes_read": 179 + } +} diff --git a/parser/testdata/03277_dead_letter_queue_unsupported/metadata.json b/parser/testdata/03277_dead_letter_queue_unsupported/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03277_dead_letter_queue_unsupported/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03277_dead_letter_queue_unsupported/query.sql b/parser/testdata/03277_dead_letter_queue_unsupported/query.sql new file mode 100644 index 000000000..6c3d0b934 --- /dev/null +++ b/parser/testdata/03277_dead_letter_queue_unsupported/query.sql @@ -0,0 +1,3 @@ +SELECT 'qfilelog'; +CREATE TABLE qfilelog (key UInt64, value UInt64) ENGINE = FileLog('/tmp/app.log', 'JSONEachRow') +SETTINGS handle_error_mode = 'dead_letter_queue'; -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/03277_join_adaptive_spill/ast.json b/parser/testdata/03277_join_adaptive_spill/ast.json new file mode 100644 index 000000000..be51826f3 --- /dev/null +++ b/parser/testdata/03277_join_adaptive_spill/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery adaptive_spill_03277_1 (children 3)" + }, + { + "explain": " Identifier adaptive_spill_03277_1" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration k (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " ColumnDeclaration x (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function Memory" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.00115468, + "rows_read": 10, + "bytes_read": 379 + } +} diff --git a/parser/testdata/03277_join_adaptive_spill/metadata.json b/parser/testdata/03277_join_adaptive_spill/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03277_join_adaptive_spill/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03277_join_adaptive_spill/query.sql b/parser/testdata/03277_join_adaptive_spill/query.sql new file mode 100644 index 000000000..4530995b5 --- /dev/null +++ b/parser/testdata/03277_join_adaptive_spill/query.sql @@ -0,0 +1,19 @@ +create table adaptive_spill_03277_1 (`k` String, `x` String ) Engine=Memory; +create table adaptive_spill_03277_2 (`k` String, `x` String ) Engine=Memory; +create table adaptive_spill_03277_3 (`k` String, `x` String ) Engine=Memory; + +insert into adaptive_spill_03277_1 select cast(number as String) as k, cast(number as String) as x from numbers(1000000); +insert into adaptive_spill_03277_2 select cast(number as String) as k, cast(number as String) as x from numbers(1000000); +insert into adaptive_spill_03277_3 select cast(number as String) as k, cast(number as String) as x from numbers(1000000); + +set max_threads=4; +set join_algorithm='grace_hash'; +set max_bytes_in_join=0; +--set max_memory_usage=629145600 + +set enable_adaptive_memory_spill_scheduler=true; +select * from (select t1.k as k, t1.x as x1, t2.x as x2, t3.x as x3 from adaptive_spill_03277_1 as t1 left join adaptive_spill_03277_2 as t2 on t1.k = t2.k left join adaptive_spill_03277_3 as t3 on t1.k = t3.k) order by k, x1, x2, x3 limit 100; + +drop table if exists adaptive_spill_03277_1; +drop table if exists adaptive_spill_03277_2; +drop table if exists adaptive_spill_03277_3; diff --git a/parser/testdata/03277_join_adaptive_spill_oom/ast.json b/parser/testdata/03277_join_adaptive_spill_oom/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03277_join_adaptive_spill_oom/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03277_join_adaptive_spill_oom/metadata.json b/parser/testdata/03277_join_adaptive_spill_oom/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03277_join_adaptive_spill_oom/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03277_join_adaptive_spill_oom/query.sql b/parser/testdata/03277_join_adaptive_spill_oom/query.sql new file mode 100644 index 000000000..81767b802 --- /dev/null +++ b/parser/testdata/03277_join_adaptive_spill_oom/query.sql @@ -0,0 +1,25 @@ +-- Tags: no-asan, no-tsan +create table adaptive_spill_03277_1 (`k` String, `x` String ) Engine=Memory; +create table adaptive_spill_03277_2 (`k` String, `x` String ) Engine=Memory; +create table adaptive_spill_03277_3 (`k` String, `x` String ) Engine=Memory; + +insert into adaptive_spill_03277_1 select cast(rand() as String) as k, cast(rand() as String) as x from numbers(1000000); +insert into adaptive_spill_03277_2 select cast(rand() as String) as k, cast(rand() as String) as x from numbers(1000000); +insert into adaptive_spill_03277_3 select cast(rand() as String) as k, cast(rand() as String) as x from numbers(1000000); + +set max_threads=1; +set join_algorithm='grace_hash'; +set max_memory_usage=314572800; +set collect_hash_table_stats_during_joins=0; +-- don't limit the memory usage for join +set max_bytes_in_join=0; + +set enable_adaptive_memory_spill_scheduler=false; +select t1.k, t2.x, t3.x from adaptive_spill_03277_1 as t1 left join adaptive_spill_03277_2 as t2 on t1.k = t2.k left join adaptive_spill_03277_3 as t3 on t1.k = t3.k Format Null; --{serverError MEMORY_LIMIT_EXCEEDED} + +set enable_adaptive_memory_spill_scheduler=true; +select t1.k, t2.x, t3.x from adaptive_spill_03277_1 as t1 left join adaptive_spill_03277_2 as t2 on t1.k = t2.k left join adaptive_spill_03277_3 as t3 on t1.k = t3.k Format Null; + +drop table if exists adaptive_spill_03277_1; +drop table if exists adaptive_spill_03277_2; +drop table if exists adaptive_spill_03277_3; diff --git a/parser/testdata/03277_logging_elapsed_ns/ast.json b/parser/testdata/03277_logging_elapsed_ns/ast.json new file mode 100644 index 000000000..aabb752f8 --- /dev/null +++ b/parser/testdata/03277_logging_elapsed_ns/ast.json @@ -0,0 +1,40 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_42" + }, + { + "explain": " Set" + } + ], + + "rows": 6, + + "statistics": + { + "elapsed": 0.001292335, + "rows_read": 6, + "bytes_read": 192 + } +} diff --git a/parser/testdata/03277_logging_elapsed_ns/metadata.json b/parser/testdata/03277_logging_elapsed_ns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03277_logging_elapsed_ns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03277_logging_elapsed_ns/query.sql b/parser/testdata/03277_logging_elapsed_ns/query.sql new file mode 100644 index 000000000..4f09c67e5 --- /dev/null +++ b/parser/testdata/03277_logging_elapsed_ns/query.sql @@ -0,0 +1,9 @@ +SELECT 42 SETTINGS log_comment='03277_logging_elapsed_ns'; + +SYSTEM FLUSH LOGS query_log; + +SELECT + ProfileEvents['LogDebug'] + ProfileEvents['LogTrace'] > 0, + ProfileEvents['LoggerElapsedNanoseconds'] > 0 +FROM system.query_log +WHERE current_database = currentDatabase() AND log_comment = '03277_logging_elapsed_ns' AND type = 'QueryFinish'; diff --git a/parser/testdata/03278_database_backup_merge_tree_table_disk_engine/ast.json b/parser/testdata/03278_database_backup_merge_tree_table_disk_engine/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03278_database_backup_merge_tree_table_disk_engine/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03278_database_backup_merge_tree_table_disk_engine/metadata.json b/parser/testdata/03278_database_backup_merge_tree_table_disk_engine/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03278_database_backup_merge_tree_table_disk_engine/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03278_database_backup_merge_tree_table_disk_engine/query.sql b/parser/testdata/03278_database_backup_merge_tree_table_disk_engine/query.sql new file mode 100644 index 000000000..4a4fa04f4 --- /dev/null +++ b/parser/testdata/03278_database_backup_merge_tree_table_disk_engine/query.sql @@ -0,0 +1,23 @@ +-- Tags: no-parallel, no-fasttest, no-flaky-check, no-encrypted-storage +-- Because we are creating a backup with fixed path. + +DROP DATABASE IF EXISTS 03278_test_database; +CREATE DATABASE 03278_test_database; + +CREATE TABLE 03278_test_database.test_table (id UInt64, value String) ENGINE = MergeTree ORDER BY id; +INSERT INTO 03278_test_database.test_table SELECT number, number FROM numbers(15000); + +SELECT (id % 10) AS key, count() FROM 03278_test_database.test_table GROUP BY key ORDER BY key; + +BACKUP TABLE 03278_test_database.test_table TO Disk('backups', '03278_test_database.test_table') FORMAT Null; + +SELECT '--'; + +DROP DATABASE IF EXISTS 03278_test_table_backup_database; +CREATE DATABASE 03278_test_table_backup_database ENGINE = Backup('03278_test_database', Disk('backups', '03278_test_database.test_table')); + +SELECT (id % 10) AS key, count() FROM 03278_test_table_backup_database.test_table GROUP BY key ORDER BY key; + +DROP DATABASE 03278_test_table_backup_database; + +DROP DATABASE 03278_test_database; diff --git a/parser/testdata/03278_dateTime64_in_dateTime64_bug/ast.json b/parser/testdata/03278_dateTime64_in_dateTime64_bug/ast.json new file mode 100644 index 000000000..bff4cd924 --- /dev/null +++ b/parser/testdata/03278_dateTime64_in_dateTime64_bug/ast.json @@ -0,0 +1,85 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery datetime64_issue (children 3)" + }, + { + "explain": " Identifier datetime64_issue" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " ColumnDeclaration id (children 1)" + }, + { + "explain": " DataType int" + }, + { + "explain": " ColumnDeclaration dt (children 1)" + }, + { + "explain": " DataType DateTime64 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " ColumnDeclaration dtn (children 1)" + }, + { + "explain": " DataType Nullable (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " DataType DateTime64 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Storage definition (children 3)" + }, + { + "explain": " Function MergeTree (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Identifier id" + }, + { + "explain": " Identifier id" + } + ], + + "rows": 21, + + "statistics": + { + "elapsed": 0.001292455, + "rows_read": 21, + "bytes_read": 787 + } +} diff --git a/parser/testdata/03278_dateTime64_in_dateTime64_bug/metadata.json b/parser/testdata/03278_dateTime64_in_dateTime64_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03278_dateTime64_in_dateTime64_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03278_dateTime64_in_dateTime64_bug/query.sql b/parser/testdata/03278_dateTime64_in_dateTime64_bug/query.sql new file mode 100644 index 000000000..5d6bd825f --- /dev/null +++ b/parser/testdata/03278_dateTime64_in_dateTime64_bug/query.sql @@ -0,0 +1,9 @@ +CREATE TABLE datetime64_issue (id int, dt DateTime64(3), dtn Nullable(DateTime64(3))) ENGINE = MergeTree() ORDER BY id PRIMARY KEY id; + +INSERT INTO datetime64_issue(id, dt, dtn) VALUES (1, toDateTime64('2001-01-11 01:11:21.100', 3), toDateTime64('2001-01-11 01:11:21.100', 3)); + +SELECT * FROM datetime64_issue WHERE dt in (toDateTime64('2001-01-11 01:11:21.100', 3)); + +SELECT * FROM datetime64_issue WHERE dtn in (toDateTime64('2001-01-11 01:11:21.100', 3)); + +DROP TABLE datetime64_issue; diff --git a/parser/testdata/03278_enum_in_unknown_value/ast.json b/parser/testdata/03278_enum_in_unknown_value/ast.json new file mode 100644 index 000000000..253e494e2 --- /dev/null +++ b/parser/testdata/03278_enum_in_unknown_value/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_enum_in_unknown_value (children 1)" + }, + { + "explain": " Identifier t_enum_in_unknown_value" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001394588, + "rows_read": 2, + "bytes_read": 98 + } +} diff --git a/parser/testdata/03278_enum_in_unknown_value/metadata.json b/parser/testdata/03278_enum_in_unknown_value/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03278_enum_in_unknown_value/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03278_enum_in_unknown_value/query.sql b/parser/testdata/03278_enum_in_unknown_value/query.sql new file mode 100644 index 000000000..7763ac298 --- /dev/null +++ b/parser/testdata/03278_enum_in_unknown_value/query.sql @@ -0,0 +1,31 @@ +DROP TABLE IF EXISTS t_enum_in_unknown_value; + +CREATE TABLE t_enum_in_unknown_value (e Enum('a'=1, 'b'=2)) ENGINE=Memory; + +INSERT INTO t_enum_in_unknown_value VALUES ('a'); + +SELECT * FROM t_enum_in_unknown_value; + +SELECT * FROM t_enum_in_unknown_value WHERE e IN ('a'); +SELECT * FROM t_enum_in_unknown_value WHERE e NOT IN ('a'); + +SELECT * FROM t_enum_in_unknown_value WHERE e IN ('a', 'b'); +SELECT * FROM t_enum_in_unknown_value WHERE e NOT IN ('a', 'b'); + +SELECT * FROM t_enum_in_unknown_value WHERE e IN (1, 'b'); +SELECT * FROM t_enum_in_unknown_value WHERE e NOT IN (1, 'b'); + +SELECT * FROM t_enum_in_unknown_value WHERE e IN ('a', 'c'); +SELECT * FROM t_enum_in_unknown_value WHERE e NOT IN ('a', 'c'); + +SELECT * FROM t_enum_in_unknown_value WHERE e IN ('a', 'b', 'c'); +SELECT * FROM t_enum_in_unknown_value WHERE e NOT IN ('a', 'b', 'c'); + +SELECT * FROM t_enum_in_unknown_value WHERE e IN ('c'); +SELECT * FROM t_enum_in_unknown_value WHERE e NOT IN ('c'); + +SET validate_enum_literals_in_operators = 1; + +SELECT * FROM t_enum_in_unknown_value WHERE e IN ('a', 'b', 'c'); -- { serverError UNKNOWN_ELEMENT_OF_ENUM } +SELECT * FROM t_enum_in_unknown_value WHERE e NOT IN ('a', 'b', 'c'); -- { serverError UNKNOWN_ELEMENT_OF_ENUM } +SELECT * FROM t_enum_in_unknown_value WHERE e IN ('a', 'b', 3); -- { serverError UNKNOWN_ELEMENT_OF_ENUM } diff --git a/parser/testdata/03278_enum_string_functions/ast.json b/parser/testdata/03278_enum_string_functions/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03278_enum_string_functions/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03278_enum_string_functions/metadata.json b/parser/testdata/03278_enum_string_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03278_enum_string_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03278_enum_string_functions/query.sql b/parser/testdata/03278_enum_string_functions/query.sql new file mode 100644 index 000000000..8cebc2b46 --- /dev/null +++ b/parser/testdata/03278_enum_string_functions/query.sql @@ -0,0 +1,47 @@ +-- { echoOn } + +DROP TABLE IF EXISTS test_enum_string_functions; +CREATE TABLE test_enum_string_functions(e Enum('a'=1, 'b'=2)) ENGINE=TinyLog; +INSERT INTO test_enum_string_functions VALUES ('a'); +SELECT * from test_enum_string_functions WHERE e LIKE '%abc%'; +SELECT * from test_enum_string_functions WHERE e NOT LIKE '%abc%'; +SELECT * from test_enum_string_functions WHERE e iLike '%a%'; +SELECT position(e, 'a') FROM test_enum_string_functions; +SELECT match(e, 'a') FROM test_enum_string_functions; +SELECT locate('a', e) FROM test_enum_string_functions; +SELECT countSubstrings(e, 'a') FROM test_enum_string_functions; +SELECT countSubstringsCaseInsensitive(e, 'a') FROM test_enum_string_functions; +SELECT countSubstringsCaseInsensitiveUTF8(e, 'a') FROM test_enum_string_functions; +SELECT hasToken(e, 'a') FROM test_enum_string_functions; +SELECT hasTokenOrNull(e, 'a') FROM test_enum_string_functions; + +DROP TABLE IF EXISTS jsons; +CREATE TABLE jsons +( + `json` Enum('a', '{"a":1}') +) +ENGINE = Memory; +INSERT INTO jsons VALUES ('{"a":1}'); +INSERT INTO jsons VALUES ('a'); +SELECT simpleJSONHas(json, 'foo') as res FROM jsons order by res; +SELECT simpleJSONHas(json, 'a') as res FROM jsons order by res; +SELECT simpleJSONExtractUInt(json, 'a') as res FROM jsons order by res; +SELECT simpleJSONExtractUInt(json, 'not exsits') as res FROM jsons order by res; +SELECT simpleJSONExtractInt(json, 'a') as res FROM jsons order by res; +SELECT simpleJSONExtractInt(json, 'not exsits') as res FROM jsons order by res; +SELECT simpleJSONExtractFloat(json, 'a') as res FROM jsons order by res; +SELECT simpleJSONExtractFloat(json, 'not exsits') as res FROM jsons order by res; +SELECT simpleJSONExtractBool(json, 'a') as res FROM jsons order by res; +SELECT simpleJSONExtractBool(json, 'not exsits') as res FROM jsons order by res; +SELECT positionUTF8(json, 'a') as res FROM jsons order by res; +SELECT positionCaseInsensitiveUTF8(json, 'A') as res FROM jsons order by res; +SELECT positionCaseInsensitive(json, 'A') as res FROM jsons order by res; + +SELECT materialize(CAST('a', 'Enum(\'a\' = 1)')) LIKE randomString(0) from numbers(10); +SELECT CAST('a', 'Enum(\'a\' = 1)') LIKE randomString(0); -- {serverError ILLEGAL_COLUMN} + +SELECT materialize(CAST('a', 'Enum16(\'a\' = 1)')) LIKE randomString(0) from numbers(10); +SELECT CAST('a', 'Enum16(\'a\' = 1)') LIKE randomString(0); -- {serverError ILLEGAL_COLUMN} + +SELECT CAST('a', 'Enum(\'a\' = 1)') LIKE 'a'; +SELECT materialize(CAST('a', 'Enum(\'a\' = 1)')) LIKE 'a' from numbers(10); diff --git a/parser/testdata/03279_array_normalized_gini/ast.json b/parser/testdata/03279_array_normalized_gini/ast.json new file mode 100644 index 000000000..cfbb9e5bf --- /dev/null +++ b/parser/testdata/03279_array_normalized_gini/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayNormalizedGini (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Array_[Float64_0.9, Float64_0.3, Float64_0.8, Float64_0.7]" + }, + { + "explain": " Literal Array_[UInt64_6, UInt64_1, UInt64_0, UInt64_2]" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001318892, + "rows_read": 8, + "bytes_read": 390 + } +} diff --git a/parser/testdata/03279_array_normalized_gini/metadata.json b/parser/testdata/03279_array_normalized_gini/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03279_array_normalized_gini/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03279_array_normalized_gini/query.sql b/parser/testdata/03279_array_normalized_gini/query.sql new file mode 100644 index 000000000..e8e655418 --- /dev/null +++ b/parser/testdata/03279_array_normalized_gini/query.sql @@ -0,0 +1,24 @@ +SELECT arrayNormalizedGini([0.9, 0.3, 0.8, 0.7], [6, 1, 0, 2]); +SELECT arrayNormalizedGini([0.9, 0.3, 0.8, 0.7], [6, 1, 0, 2, 1]); -- { serverError ILLEGAL_COLUMN } + +SELECT arrayNormalizedGini([0.9, 0.3, 0.8, 0.75, 0.65, 0.6, 0.78, 0.7, 0.05, 0.4, 0.4, 0.05, 0.5, 0.1, 0.1], [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]); + +SELECT arrayNormalizedGini(arrayResize([1], 2000000), arrayResize([1], 2000000)); -- { serverError TOO_LARGE_ARRAY_SIZE } + +DROP TABLE IF EXISTS t; +CREATE TABLE t +( + `a1` Array(Float32), + `a2` Array(UInt32) +) +ENGINE = MergeTree +ORDER BY tuple(); + +INSERT INTO t VALUES ([0.9, 0.3, 0.8, 0.7], [6, 1, 0, 2]), ([0.9, 0.3, 0.8, 0.7], [6, 1, 0, 2]), ([0.9, 0.3, 0.8, 0.7], [6, 1, 0, 2]), ([0.9, 0.3, 0.8, 0.7], [6, 1, 0, 2]); + +SELECT arrayNormalizedGini(a1, a2) FROM t; + +SELECT arrayNormalizedGini(a1, [6, 1, 0, 2]) FROM t; +SELECT arrayNormalizedGini([0.9, 0.3, 0.8, 0.7], a2) FROM t; + +DROP TABLE t; diff --git a/parser/testdata/03279_database_backup_database_disk_engine/ast.json b/parser/testdata/03279_database_backup_database_disk_engine/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03279_database_backup_database_disk_engine/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03279_database_backup_database_disk_engine/metadata.json b/parser/testdata/03279_database_backup_database_disk_engine/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03279_database_backup_database_disk_engine/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03279_database_backup_database_disk_engine/query.sql b/parser/testdata/03279_database_backup_database_disk_engine/query.sql new file mode 100644 index 000000000..399471d23 --- /dev/null +++ b/parser/testdata/03279_database_backup_database_disk_engine/query.sql @@ -0,0 +1,38 @@ +-- Tags: no-parallel, no-fasttest, no-flaky-check, no-encrypted-storage +-- Because we are creating a backup with fixed path. + +DROP DATABASE IF EXISTS 03279_test_database; +CREATE DATABASE 03279_test_database; + +CREATE TABLE 03279_test_database.test_table_1 (id UInt64, value String) ENGINE = MergeTree ORDER BY id; +INSERT INTO 03279_test_database.test_table_1 SELECT number, number FROM numbers(15000); + +CREATE TABLE 03279_test_database.test_table_2 (id UInt64, value String) ENGINE = MergeTree ORDER BY id; +INSERT INTO 03279_test_database.test_table_2 SELECT number, number FROM numbers(15000); + +SELECT (id % 10) AS key, count() FROM 03279_test_database.test_table_1 GROUP BY key ORDER BY key; + +SELECT '--'; + +SELECT (id % 10) AS key, count() FROM 03279_test_database.test_table_2 GROUP BY key ORDER BY key; + +BACKUP DATABASE 03279_test_database TO Disk('backups', '03279_test_database') FORMAT Null; + +SELECT '--'; + +DROP DATABASE IF EXISTS 03279_test_database_backup_database; +CREATE DATABASE 03279_test_database_backup_database ENGINE = Backup('03279_test_database', Disk('backups', '03279_test_database')); + +SELECT name, total_rows FROM system.tables WHERE database = '03279_test_database_backup_database' ORDER BY name; + +SELECT '--'; + +SELECT (id % 10) AS key, count() FROM 03279_test_database_backup_database.test_table_1 GROUP BY key ORDER BY key; + +SELECT '--'; + +SELECT (id % 10) AS key, count() FROM 03279_test_database_backup_database.test_table_2 GROUP BY key ORDER BY key; + +DROP DATABASE 03279_test_database_backup_database; + +DROP DATABASE 03279_test_database; diff --git a/parser/testdata/03279_join_choose_build_table/ast.json b/parser/testdata/03279_join_choose_build_table/ast.json new file mode 100644 index 000000000..ee0bdc48f --- /dev/null +++ b/parser/testdata/03279_join_choose_build_table/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001122756, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03279_join_choose_build_table/metadata.json b/parser/testdata/03279_join_choose_build_table/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03279_join_choose_build_table/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03279_join_choose_build_table/query.sql b/parser/testdata/03279_join_choose_build_table/query.sql new file mode 100644 index 000000000..9c2970129 --- /dev/null +++ b/parser/testdata/03279_join_choose_build_table/query.sql @@ -0,0 +1,100 @@ +SET allow_statistics_optimize = 0; +SET use_skip_indexes_on_data_read = 0; -- for correct row count estimation in join order planning +DROP TABLE IF EXISTS products; +DROP TABLE IF EXISTS sales; + +SET enable_analyzer = 1; + +CREATE TABLE sales ( + id Int32, + date Date, + amount Decimal(10, 2), + product_id Int32 +) ENGINE = MergeTree ORDER BY id +SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +INSERT INTO sales SELECT number, '2024-05-05' + INTERVAL intDiv(number, 1000) DAY , (number + 1) % 100, number % 100_000 FROM numbers(1_000_000); + +CREATE TABLE products (id Int32, name String) ENGINE = MergeTree ORDER BY id; +INSERT INTO products SELECT number, 'product ' || toString(number) FROM numbers(100_000); + +SET query_plan_join_swap_table = 'auto'; +SET query_plan_optimize_join_order_limit = 2; + +SELECT * FROM products, sales +WHERE sales.product_id = products.id AND date = '2024-05-07' +SETTINGS log_comment = '03279_join_choose_build_table_no_idx' FORMAT Null; + +SELECT * FROM sales, products +WHERE sales.product_id = products.id AND date = '2024-05-07' +SETTINGS log_comment = '03279_join_choose_build_table_no_idx' FORMAT Null; + +SET mutations_sync = 2; +ALTER TABLE sales ADD INDEX date_idx date TYPE minmax GRANULARITY 1; +ALTER TABLE sales MATERIALIZE INDEX date_idx; + +SELECT * FROM products, sales +WHERE sales.product_id = products.id AND date = '2024-05-07' +SETTINGS log_comment = '03279_join_choose_build_table_idx' FORMAT Null; + +SELECT * FROM sales, products +WHERE sales.product_id = products.id AND date = '2024-05-07' +SETTINGS log_comment = '03279_join_choose_build_table_idx' FORMAT Null; + +SYSTEM FLUSH LOGS query_log; + +-- condtitions are pushed down, but no filter by index applied +-- build table is as it's written in query + +SELECT + if(ProfileEvents['JoinBuildTableRowCount'] BETWEEN 100 AND 2000, 'ok', format('fail({}): {}', query_id, ProfileEvents['JoinBuildTableRowCount'])), + if(ProfileEvents['JoinProbeTableRowCount'] BETWEEN 90_000 AND 110_000, 'ok', format('fail({}): {}', query_id, ProfileEvents['JoinProbeTableRowCount'])), + if(ProfileEvents['JoinResultRowCount'] == 1000, 'ok', format('fail({}): {}', query_id, ProfileEvents['JoinResultRowCount'])), + Settings['query_plan_join_swap_table'], +FROM system.query_log +WHERE type = 'QueryFinish' AND event_date >= yesterday() AND query_kind = 'Select' AND current_database = currentDatabase() +AND query like '%products, sales%' +AND log_comment = '03279_join_choose_build_table_no_idx' +ORDER BY event_time DESC +LIMIT 1; + +SELECT + if(ProfileEvents['JoinBuildTableRowCount'] BETWEEN 90_000 AND 110_000, 'ok', format('fail({}): {}', query_id, ProfileEvents['JoinBuildTableRowCount'])), + if(ProfileEvents['JoinProbeTableRowCount'] BETWEEN 100 AND 2000, 'ok', format('fail({}): {}', query_id, ProfileEvents['JoinProbeTableRowCount'])), + if(ProfileEvents['JoinResultRowCount'] == 1000, 'ok', format('fail({}): {}', query_id, ProfileEvents['JoinResultRowCount'])), + Settings['query_plan_join_swap_table'], +FROM system.query_log +WHERE type = 'QueryFinish' AND event_date >= yesterday() AND query_kind = 'Select' AND current_database = currentDatabase() +AND query like '%sales, products%' +AND log_comment = '03279_join_choose_build_table_no_idx' +ORDER BY event_time DESC +LIMIT 1; + +-- after adding index, optimizer can choose best table order + +SELECT + if(ProfileEvents['JoinBuildTableRowCount'] BETWEEN 100 AND 2000, 'ok', format('fail({}): {}', query_id, ProfileEvents['JoinBuildTableRowCount'])), + if(ProfileEvents['JoinProbeTableRowCount'] BETWEEN 90_000 AND 110_000, 'ok', format('fail({}): {}', query_id, ProfileEvents['JoinProbeTableRowCount'])), + if(ProfileEvents['JoinResultRowCount'] == 1000, 'ok', format('fail({}): {}', query_id, ProfileEvents['JoinResultRowCount'])), + Settings['query_plan_join_swap_table'], +FROM system.query_log +WHERE type = 'QueryFinish' AND event_date >= yesterday() AND query_kind = 'Select' AND current_database = currentDatabase() +AND query like '%products, sales%' +AND log_comment = '03279_join_choose_build_table_idx' +ORDER BY event_time DESC +LIMIT 1; + +SELECT + if(ProfileEvents['JoinBuildTableRowCount'] BETWEEN 100 AND 2000, 'ok', format('fail({}): {}', query_id, ProfileEvents['JoinBuildTableRowCount'])), + if(ProfileEvents['JoinProbeTableRowCount'] BETWEEN 90_000 AND 110_000, 'ok', format('fail({}): {}', query_id, ProfileEvents['JoinProbeTableRowCount'])), + if(ProfileEvents['JoinResultRowCount'] == 1000, 'ok', format('fail({}): {}', query_id, ProfileEvents['JoinResultRowCount'])), + Settings['query_plan_join_swap_table'], +FROM system.query_log +WHERE type = 'QueryFinish' AND event_date >= yesterday() AND query_kind = 'Select' AND current_database = currentDatabase() +AND query like '%sales, products%' +AND log_comment = '03279_join_choose_build_table_idx' +ORDER BY event_time DESC +LIMIT 1; + +DROP TABLE IF EXISTS products; +DROP TABLE IF EXISTS sales; diff --git a/parser/testdata/03279_join_choose_build_table_auto_statistics/ast.json b/parser/testdata/03279_join_choose_build_table_auto_statistics/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03279_join_choose_build_table_auto_statistics/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03279_join_choose_build_table_auto_statistics/metadata.json b/parser/testdata/03279_join_choose_build_table_auto_statistics/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03279_join_choose_build_table_auto_statistics/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03279_join_choose_build_table_auto_statistics/query.sql b/parser/testdata/03279_join_choose_build_table_auto_statistics/query.sql new file mode 100644 index 000000000..dab5656bf --- /dev/null +++ b/parser/testdata/03279_join_choose_build_table_auto_statistics/query.sql @@ -0,0 +1,64 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS products; +DROP TABLE IF EXISTS sales; + +SET enable_analyzer = 1; + +CREATE TABLE sales ( + id Int32, + date Date, + amount Decimal(10, 2), + product_id Int32 +) ENGINE = MergeTree ORDER BY id +SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi', auto_statistics_types = 'countmin'; + +INSERT INTO sales SELECT number, '2024-05-05' + INTERVAL intDiv(number, 1000) DAY , (number + 1) % 100, number % 100_000 FROM numbers(1_000_000); + +CREATE TABLE products (id Int32, name String) ENGINE = MergeTree ORDER BY id; +INSERT INTO products SELECT number, 'product ' || toString(number) FROM numbers(100_000); + +SET query_plan_join_swap_table = 'auto'; +SET query_plan_optimize_join_order_limit = 2; +SET allow_statistics_optimize=1; +SET allow_experimental_statistics=1; + +SELECT * FROM products, sales +WHERE sales.product_id = products.id AND date = '2024-05-07' +SETTINGS log_comment = '03279_join_choose_build_table_stats' FORMAT Null; + +SELECT * FROM sales, products +WHERE sales.product_id = products.id AND date = '2024-05-07' +SETTINGS log_comment = '03279_join_choose_build_table_stats' FORMAT Null; + +SYSTEM FLUSH LOGS query_log; + +-- condtitions are pushed down, but no filter by index applied +-- build table is as it's written in query + +SELECT + if(ProfileEvents['JoinBuildTableRowCount'] BETWEEN 100 AND 2000, 'ok', format('fail({}): {}', query_id, ProfileEvents['JoinBuildTableRowCount'])), + if(ProfileEvents['JoinProbeTableRowCount'] BETWEEN 90_000 AND 110_000, 'ok', format('fail({}): {}', query_id, ProfileEvents['JoinProbeTableRowCount'])), + if(ProfileEvents['JoinResultRowCount'] == 1000, 'ok', format('fail({}): {}', query_id, ProfileEvents['JoinResultRowCount'])), + Settings['query_plan_join_swap_table'], +FROM system.query_log +WHERE type = 'QueryFinish' AND event_date >= yesterday() AND query_kind = 'Select' AND current_database = currentDatabase() +AND query like '%products, sales%' +AND log_comment = '03279_join_choose_build_table_stats' +ORDER BY event_time DESC +LIMIT 1; + +SELECT + if(ProfileEvents['JoinBuildTableRowCount'] BETWEEN 100 AND 2000, 'ok', format('fail({}): {}', query_id, ProfileEvents['JoinBuildTableRowCount'])), + if(ProfileEvents['JoinProbeTableRowCount'] BETWEEN 90_000 AND 110_000, 'ok', format('fail({}): {}', query_id, ProfileEvents['JoinProbeTableRowCount'])), + if(ProfileEvents['JoinResultRowCount'] == 1000, 'ok', format('fail({}): {}', query_id, ProfileEvents['JoinResultRowCount'])), + Settings['query_plan_join_swap_table'], +FROM system.query_log +WHERE type = 'QueryFinish' AND event_date >= yesterday() AND query_kind = 'Select' AND current_database = currentDatabase() +AND query like '%sales, products%' +AND log_comment = '03279_join_choose_build_table_stats' +ORDER BY event_time DESC +LIMIT 1; + +DROP TABLE IF EXISTS products; +DROP TABLE IF EXISTS sales; diff --git a/parser/testdata/03279_join_choose_build_table_statistics/ast.json b/parser/testdata/03279_join_choose_build_table_statistics/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03279_join_choose_build_table_statistics/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03279_join_choose_build_table_statistics/metadata.json b/parser/testdata/03279_join_choose_build_table_statistics/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03279_join_choose_build_table_statistics/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03279_join_choose_build_table_statistics/query.sql b/parser/testdata/03279_join_choose_build_table_statistics/query.sql new file mode 100644 index 000000000..627a9bc7c --- /dev/null +++ b/parser/testdata/03279_join_choose_build_table_statistics/query.sql @@ -0,0 +1,102 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS products; +DROP TABLE IF EXISTS sales; + +SET enable_analyzer = 1; + +CREATE TABLE sales ( + id Int32, + date Date, + amount Decimal(10, 2), + product_id Int32 +) ENGINE = MergeTree ORDER BY id +SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi', auto_statistics_types = ''; + +INSERT INTO sales SELECT number, '2024-05-05' + INTERVAL intDiv(number, 1000) DAY , (number + 1) % 100, number % 100_000 FROM numbers(1_000_000); + +CREATE TABLE products (id Int32, name String) ENGINE = MergeTree ORDER BY id; +INSERT INTO products SELECT number, 'product ' || toString(number) FROM numbers(100_000); + +SET query_plan_join_swap_table = 'auto'; +SET query_plan_optimize_join_order_limit = 2; +SET allow_statistics_optimize=1; +SET allow_experimental_statistics=1; + +SELECT * FROM products, sales +WHERE sales.product_id = products.id AND date = '2024-05-07' +SETTINGS log_comment = '03279_join_choose_build_table_no_stats' FORMAT Null; + +SELECT * FROM sales, products +WHERE sales.product_id = products.id AND date = '2024-05-07' +SETTINGS log_comment = '03279_join_choose_build_table_no_stats' FORMAT Null; + +SET mutations_sync = 2; +ALTER TABLE sales ADD STATISTICS date TYPE CountMin; +ALTER TABLE sales MATERIALIZE STATISTICS date; + +SELECT * FROM products, sales +WHERE sales.product_id = products.id AND date = '2024-05-07' +SETTINGS log_comment = '03279_join_choose_build_table_stats' FORMAT Null; + +SELECT * FROM sales, products +WHERE sales.product_id = products.id AND date = '2024-05-07' +SETTINGS log_comment = '03279_join_choose_build_table_stats' FORMAT Null; + +SYSTEM FLUSH LOGS query_log; + +-- condtitions are pushed down, but no filter by index applied +-- build table is as it's written in query + +SELECT + if(ProfileEvents['JoinBuildTableRowCount'] BETWEEN 100 AND 2000, 'ok', format('fail({}): {}', query_id, ProfileEvents['JoinBuildTableRowCount'])), + if(ProfileEvents['JoinProbeTableRowCount'] BETWEEN 90_000 AND 110_000, 'ok', format('fail({}): {}', query_id, ProfileEvents['JoinProbeTableRowCount'])), + if(ProfileEvents['JoinResultRowCount'] == 1000, 'ok', format('fail({}): {}', query_id, ProfileEvents['JoinResultRowCount'])), + Settings['query_plan_join_swap_table'], +FROM system.query_log +WHERE type = 'QueryFinish' AND event_date >= yesterday() AND query_kind = 'Select' AND current_database = currentDatabase() +AND query like '%products, sales%' +AND log_comment = '03279_join_choose_build_table_no_stats' +ORDER BY event_time DESC +LIMIT 1; + +SELECT + if(ProfileEvents['JoinBuildTableRowCount'] BETWEEN 90_000 AND 110_000, 'ok', format('fail({}): {}', query_id, ProfileEvents['JoinBuildTableRowCount'])), + if(ProfileEvents['JoinProbeTableRowCount'] BETWEEN 100 AND 2000, 'ok', format('fail({}): {}', query_id, ProfileEvents['JoinProbeTableRowCount'])), + if(ProfileEvents['JoinResultRowCount'] == 1000, 'ok', format('fail({}): {}', query_id, ProfileEvents['JoinResultRowCount'])), + Settings['query_plan_join_swap_table'], +FROM system.query_log +WHERE type = 'QueryFinish' AND event_date >= yesterday() AND query_kind = 'Select' AND current_database = currentDatabase() +AND query like '%sales, products%' +AND log_comment = '03279_join_choose_build_table_no_stats' +ORDER BY event_time DESC +LIMIT 1; + +-- after adding index, optimizer can choose best table order + +SELECT + if(ProfileEvents['JoinBuildTableRowCount'] BETWEEN 100 AND 2000, 'ok', format('fail({}): {}', query_id, ProfileEvents['JoinBuildTableRowCount'])), + if(ProfileEvents['JoinProbeTableRowCount'] BETWEEN 90_000 AND 110_000, 'ok', format('fail({}): {}', query_id, ProfileEvents['JoinProbeTableRowCount'])), + if(ProfileEvents['JoinResultRowCount'] == 1000, 'ok', format('fail({}): {}', query_id, ProfileEvents['JoinResultRowCount'])), + Settings['query_plan_join_swap_table'], +FROM system.query_log +WHERE type = 'QueryFinish' AND event_date >= yesterday() AND query_kind = 'Select' AND current_database = currentDatabase() +AND query like '%products, sales%' +AND log_comment = '03279_join_choose_build_table_stats' +ORDER BY event_time DESC +LIMIT 1; + +SELECT + if(ProfileEvents['JoinBuildTableRowCount'] BETWEEN 100 AND 2000, 'ok', format('fail({}): {}', query_id, ProfileEvents['JoinBuildTableRowCount'])), + if(ProfileEvents['JoinProbeTableRowCount'] BETWEEN 90_000 AND 110_000, 'ok', format('fail({}): {}', query_id, ProfileEvents['JoinProbeTableRowCount'])), + if(ProfileEvents['JoinResultRowCount'] == 1000, 'ok', format('fail({}): {}', query_id, ProfileEvents['JoinResultRowCount'])), + Settings['query_plan_join_swap_table'], +FROM system.query_log +WHERE type = 'QueryFinish' AND event_date >= yesterday() AND query_kind = 'Select' AND current_database = currentDatabase() +AND query like '%sales, products%' +AND log_comment = '03279_join_choose_build_table_stats' +ORDER BY event_time DESC +LIMIT 1; + +DROP TABLE IF EXISTS products; +DROP TABLE IF EXISTS sales; diff --git a/parser/testdata/03279_not_empty_json/ast.json b/parser/testdata/03279_not_empty_json/ast.json new file mode 100644 index 000000000..5e8e1914b --- /dev/null +++ b/parser/testdata/03279_not_empty_json/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001204205, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03279_not_empty_json/metadata.json b/parser/testdata/03279_not_empty_json/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03279_not_empty_json/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03279_not_empty_json/query.sql b/parser/testdata/03279_not_empty_json/query.sql new file mode 100644 index 000000000..d4080b725 --- /dev/null +++ b/parser/testdata/03279_not_empty_json/query.sql @@ -0,0 +1,23 @@ +set enable_json_type=1; + +create table test (json JSON) engine=Memory; +insert into test values ('{}'), ('{"a" : 42}'), ('{"b" : {"c" : 42}}'); +select json, notEmpty(json) from test; +drop table test; + +create table test (json JSON(a UInt32)) engine=Memory; +insert into test values ('{}'), ('{"a" : 42}'), ('{"b" : {"c" : 42}}'); +select json, notEmpty(json) from test; +drop table test; + +create table test (json JSON(max_dynamic_paths=1)) engine=Memory; +insert into test values ('{}'), ('{"a" : 42}'), ('{"b" : {"c" : 42}}'); +select json, notEmpty(json) from test; +drop table test; + +create table test (json JSON(max_dynamic_paths=0)) engine=Memory; +insert into test values ('{}'), ('{"a" : 42}'), ('{"b" : {"c" : 42}}'); +select json, notEmpty(json) from test; +drop table test; + + diff --git a/parser/testdata/03280_aliases_for_selects_and_views/ast.json b/parser/testdata/03280_aliases_for_selects_and_views/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03280_aliases_for_selects_and_views/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03280_aliases_for_selects_and_views/metadata.json b/parser/testdata/03280_aliases_for_selects_and_views/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03280_aliases_for_selects_and_views/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03280_aliases_for_selects_and_views/query.sql b/parser/testdata/03280_aliases_for_selects_and_views/query.sql new file mode 100644 index 000000000..bccc89e78 --- /dev/null +++ b/parser/testdata/03280_aliases_for_selects_and_views/query.sql @@ -0,0 +1,75 @@ +EXPLAIN AST CREATE VIEW test_view_1_03280 (a, b] AS SELECT 1, 2; -- { clientError SYNTAX_ERROR } + +EXPLAIN AST CREATE VIEW test_view_1_03280 ((a, b)) AS SELECT 1, 2; -- { clientError SYNTAX_ERROR } + +SET enable_analyzer = 1; + +SELECT b FROM +( + SELECT number, number*2 + FROM numbers(2) +) AS x (a, b); + +SELECT a FROM +( + SELECT number, number*2 + FROM numbers(2) +) AS x (a, b); + +SELECT a FROM +( + SELECT number, number*2 + FROM numbers(2) +) AS x (a); -- { serverError BAD_ARGUMENTS } + +SELECT c FROM +( + SELECT number, number*2 + FROM numbers(2) +) as x (a, b); -- { serverError UNKNOWN_IDENTIFIER } + +DROP VIEW IF EXISTS test_view_03280; + +CREATE VIEW test_view_03280 (a,b) AS SELECT 1, 2; + +SELECT a FROM test_view_03280; + +SELECT b FROM test_view_03280; + +SELECT c FROM test_view_03280; -- { serverError UNKNOWN_IDENTIFIER } + +DROP VIEW IF EXISTS test_view_03280; + +CREATE VIEW test_view_1_03280 (a) AS SELECT 1, 2; -- { serverError BAD_ARGUMENTS } + +WITH t (a, b) AS ( + SELECT 1, 2 +) +SELECT a +FROM t; + +WITH t (a, b) AS ( + SELECT 1, 2 +) +SELECT b +FROM t; + +WITH t (a) AS ( + SELECT * FROM numbers(1) +) +SELECT a +FROM t; + +explain query tree dump_ast = 1 WITH t (a, b) AS (SELECT 1, 2) SELECT b FROM t; + +WITH t (a) AS ( + SELECT 1, 2 +) +SELECT b +FROM t; -- { serverError BAD_ARGUMENTS } + +WITH t (a, b) AS ( + SELECT 1, 2 +) +SELECT c +FROM t; -- { serverError UNKNOWN_IDENTIFIER } diff --git a/parser/testdata/03280_dynamic_if_null/ast.json b/parser/testdata/03280_dynamic_if_null/ast.json new file mode 100644 index 000000000..50c876a06 --- /dev/null +++ b/parser/testdata/03280_dynamic_if_null/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001044473, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03280_dynamic_if_null/metadata.json b/parser/testdata/03280_dynamic_if_null/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03280_dynamic_if_null/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03280_dynamic_if_null/query.sql b/parser/testdata/03280_dynamic_if_null/query.sql new file mode 100644 index 000000000..1fb956467 --- /dev/null +++ b/parser/testdata/03280_dynamic_if_null/query.sql @@ -0,0 +1,4 @@ +set enable_dynamic_type = 1; + +select ifNull(number % 2 ? NULL : number::Dynamic, 42) from numbers(5); + diff --git a/parser/testdata/03281_dynamic_coalesce/ast.json b/parser/testdata/03281_dynamic_coalesce/ast.json new file mode 100644 index 000000000..f2685130e --- /dev/null +++ b/parser/testdata/03281_dynamic_coalesce/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001237472, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03281_dynamic_coalesce/metadata.json b/parser/testdata/03281_dynamic_coalesce/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03281_dynamic_coalesce/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03281_dynamic_coalesce/query.sql b/parser/testdata/03281_dynamic_coalesce/query.sql new file mode 100644 index 000000000..1eb25d3d8 --- /dev/null +++ b/parser/testdata/03281_dynamic_coalesce/query.sql @@ -0,0 +1,15 @@ +set enable_dynamic_type=1; + +select coalesce(number % 2 ? NULL : number::Dynamic, 42) as res from numbers(5); +select coalesce(number % 2 ? NULL : number::Dynamic, number % 3 ? NULL : 42) as res from numbers(5); +select coalesce(number % 2 ? NULL : number, number % 3 ? NULL : 42::Dynamic) as res from numbers(5); +select coalesce(number % 2 ? NULL : number::Dynamic, number % 3 ? NULL : 42::Dynamic) as res from numbers(5); +select coalesce(number % 2 ? NULL : number::Dynamic, number % 3 ? NULL : 42, number % 4 == 1 ? NULL : 43) as res from numbers(10); +select coalesce(number % 2 ? NULL : number, number % 3 ? NULL : 42::Dynamic, number % 4 == 1 ? NULL : 43) as res from numbers(10); +select coalesce(number % 2 ? NULL : number, number % 3 ? NULL : 42, number % 4 == 1 ? NULL : 43::Dynamic) as res from numbers(10); +select coalesce(number % 2 ? NULL : number, number % 3 ? NULL : 42, number % 4 == 1 ? NULL : 43::Dynamic) as res from numbers(10); +select coalesce(number % 2 ? NULL : number::Dynamic, number % 3 ? NULL : 42::Dynamic, number % 4 == 1 ? NULL : 43) as res from numbers(10); +select coalesce(number % 2 ? NULL : number, number % 3 ? NULL : 42::Dynamic, number % 4 == 1 ? NULL : 43::Dynamic) as res from numbers(10); +select coalesce(number % 2 ? NULL : number::Dynamic, number % 3 ? NULL : 42, number % 4 == 1 ? NULL : 43::Dynamic) as res from numbers(10); +select coalesce(number % 2 ? NULL : number::Dynamic, number % 3 ? NULL : 42::Dynamic, number % 4 == 1 ? NULL : 43::Dynamic) as res from numbers(10); + diff --git a/parser/testdata/03282_block_number_otehr_mutations/ast.json b/parser/testdata/03282_block_number_otehr_mutations/ast.json new file mode 100644 index 000000000..0147c32ba --- /dev/null +++ b/parser/testdata/03282_block_number_otehr_mutations/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001436565, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03282_block_number_otehr_mutations/metadata.json b/parser/testdata/03282_block_number_otehr_mutations/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03282_block_number_otehr_mutations/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03282_block_number_otehr_mutations/query.sql b/parser/testdata/03282_block_number_otehr_mutations/query.sql new file mode 100644 index 000000000..e9ff928fc --- /dev/null +++ b/parser/testdata/03282_block_number_otehr_mutations/query.sql @@ -0,0 +1,32 @@ +SET mutations_sync = 2; + +DROP TABLE IF EXISTS t_block_number_proj; + +CREATE TABLE t_block_number_proj (a UInt64, b UInt64) ENGINE = MergeTree ORDER BY a +SETTINGS enable_block_number_column = 1, min_bytes_for_wide_part = 0, index_granularity = 128; + +INSERT INTO t_block_number_proj SELECT number, number FROM numbers(1000); + +OPTIMIZE TABLE t_block_number_proj FINAL; + +ALTER TABLE t_block_number_proj ADD PROJECTION p (SELECT a, b ORDER BY b); +ALTER TABLE t_block_number_proj MATERIALIZE PROJECTION p; + +set parallel_replicas_local_plan = 1, parallel_replicas_support_projection = 1, optimize_aggregation_in_order = 0; + +SELECT a, b FROM t_block_number_proj WHERE b = 5 SETTINGS force_optimize_projection = 1; + +DROP TABLE t_block_number_proj; + +DROP TABLE IF EXISTS t_block_number_ttl; + +CREATE TABLE t_block_number_ttl (d Date, a UInt64, b UInt64) ENGINE = MergeTree ORDER BY a +SETTINGS enable_block_number_column = 1, min_bytes_for_wide_part = 0, index_granularity = 128; + +INSERT INTO t_block_number_ttl VALUES ('2000-10-10', 1, 1) ('2100-10-10', 1, 1); +OPTIMIZE TABLE t_block_number_ttl FINAL; + +ALTER TABLE t_block_number_ttl MODIFY TTL d + INTERVAL 1 MONTH; +SELECT * FROM t_block_number_ttl ORDER BY a; + +DROP TABLE t_block_number_ttl; diff --git a/parser/testdata/03282_dynamic_in_functions_convert/ast.json b/parser/testdata/03282_dynamic_in_functions_convert/ast.json new file mode 100644 index 000000000..897b3204d --- /dev/null +++ b/parser/testdata/03282_dynamic_in_functions_convert/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001036097, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03282_dynamic_in_functions_convert/metadata.json b/parser/testdata/03282_dynamic_in_functions_convert/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03282_dynamic_in_functions_convert/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03282_dynamic_in_functions_convert/query.sql b/parser/testdata/03282_dynamic_in_functions_convert/query.sql new file mode 100644 index 000000000..9ad378c9e --- /dev/null +++ b/parser/testdata/03282_dynamic_in_functions_convert/query.sql @@ -0,0 +1,6 @@ +set enable_dynamic_type = 1; +create table test (d Dynamic(max_types=3)) engine=Memory; +insert into test values (1::UInt8), (2::UInt16), (3::UInt32), (4::UInt64), ('5'::String), ('2020-01-01'::Date); +select toFloat64(d) from test; +select toUInt32(d) from test; +drop table test; diff --git a/parser/testdata/03282_highlight_trailing_whitespace_pretty/ast.json b/parser/testdata/03282_highlight_trailing_whitespace_pretty/ast.json new file mode 100644 index 000000000..00f921733 --- /dev/null +++ b/parser/testdata/03282_highlight_trailing_whitespace_pretty/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001178978, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03282_highlight_trailing_whitespace_pretty/metadata.json b/parser/testdata/03282_highlight_trailing_whitespace_pretty/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03282_highlight_trailing_whitespace_pretty/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03282_highlight_trailing_whitespace_pretty/query.sql b/parser/testdata/03282_highlight_trailing_whitespace_pretty/query.sql new file mode 100644 index 000000000..c8f3daaa2 --- /dev/null +++ b/parser/testdata/03282_highlight_trailing_whitespace_pretty/query.sql @@ -0,0 +1,22 @@ +SET output_format_pretty_display_footer_column_names = 0; +SET output_format_pretty_color = 1; +SET output_format_pretty_highlight_trailing_spaces = 1; +SET output_format_pretty_fallback_to_vertical = 0; + +DROP TABLE IF EXISTS strings_whitespace; +CREATE TABLE strings_whitespace (str String) ENGINE = Memory; + +INSERT INTO strings_whitespace VALUES ('string0'), ('string1 '), ('string2 '), ('string3 '), ('string4\n'), ('string5\r'), +('string6\t'), ('string7 \t'), ('string8\t\t'), ('string9 \n'), ('string10\t\n\r'), +('string11 '), ('string 12'), ('string 13 '), +(''), (' '), ('\n'), ('\r'), ('\t'), ('\t\t\t\n\r'); + +SELECT * FROM strings_whitespace FORMAT Pretty; +SELECT * FROM strings_whitespace FORMAT PrettyCompact; +SELECT * FROM strings_whitespace FORMAT PrettyMonoBlock; +SELECT * FROM strings_whitespace FORMAT PrettySpace; +SELECT * FROM strings_whitespace FORMAT PrettyCompactMonoBlock; +SELECT * FROM strings_whitespace FORMAT PrettySpaceMonoBlock; +SELECT * FROM strings_whitespace FORMAT Vertical; + +DROP TABLE strings_whitespace; diff --git a/parser/testdata/03282_join_distributed_no_columns/ast.json b/parser/testdata/03282_join_distributed_no_columns/ast.json new file mode 100644 index 000000000..ac741f917 --- /dev/null +++ b/parser/testdata/03282_join_distributed_no_columns/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001205372, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03282_join_distributed_no_columns/metadata.json b/parser/testdata/03282_join_distributed_no_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03282_join_distributed_no_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03282_join_distributed_no_columns/query.sql b/parser/testdata/03282_join_distributed_no_columns/query.sql new file mode 100644 index 000000000..97c0ea939 --- /dev/null +++ b/parser/testdata/03282_join_distributed_no_columns/query.sql @@ -0,0 +1,23 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS source_table1; +DROP TABLE IF EXISTS source_table2; +DROP TABLE IF EXISTS distributed_table1; +DROP TABLE IF EXISTS distributed_table2; + +CREATE TABLE source_table1 (a Int64, b String) ENGINE = MergeTree ORDER BY a; +CREATE TABLE source_table2 (c Int64, d String) ENGINE = MergeTree ORDER BY c; + +INSERT INTO source_table1 VALUES (42, 'qwe'); +INSERT INTO source_table2 VALUES (42, 'asd'); + +CREATE TABLE distributed_table1 AS source_table1 +ENGINE = Distributed('test_shard_localhost', currentDatabase(), source_table1); + +CREATE TABLE distributed_table2 AS source_table2 +ENGINE = Distributed('test_shard_localhost', currentDatabase(), source_table2); + +SELECT 1 FROM distributed_table1 AS t1 GLOBAL JOIN distributed_table2 AS t2 ON materialize(42) = t1.a; +SELECT count() FROM distributed_table1 AS t1 GLOBAL JOIN distributed_table2 AS t2 ON materialize(42) = t1.a; +SELECT t1.* FROM distributed_table1 AS t1 GLOBAL JOIN distributed_table2 AS t2 ON materialize(42) = t1.a; +SELECT t2.* FROM distributed_table1 AS t1 GLOBAL JOIN distributed_table2 AS t2 ON materialize(42) = t1.a; diff --git a/parser/testdata/03282_json_equal_comparison/ast.json b/parser/testdata/03282_json_equal_comparison/ast.json new file mode 100644 index 000000000..42c60972b --- /dev/null +++ b/parser/testdata/03282_json_equal_comparison/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001137759, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03282_json_equal_comparison/metadata.json b/parser/testdata/03282_json_equal_comparison/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03282_json_equal_comparison/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03282_json_equal_comparison/query.sql b/parser/testdata/03282_json_equal_comparison/query.sql new file mode 100644 index 000000000..bc6565612 --- /dev/null +++ b/parser/testdata/03282_json_equal_comparison/query.sql @@ -0,0 +1,23 @@ +set enable_json_type=1; +set output_format_native_write_json_as_string=0; +create table test (json1 JSON(max_dynamic_paths=2, a UInt32), json2 JSON(max_dynamic_paths=2, a UInt32)) engine=Memory; +insert into test format JSONEachRow +{"json1" : {}, "json2" : {}} +{"json1" : {"a" : 42}, "json2" : {"a" : 42}} +{"json1" : {"a" : 42}, "json2" : {"a" : 43}} +{"json1" : {"a" : 42, "b" : 42}, "json2" : {"a" : 42, "b" : 42}} +{"json1" : {"a" : 42, "b" : 42}, "json2" : {"a" : 42, "b" : 43}} +{"json1" : {"a" : 42, "b" : 42, "c" : 42}, "json2" : {"a" : 42, "b" : 42, "d" : 42}} +{"json1" : {"a" : 42, "b" : 42, "c" : 42, "d" : 42}, "json2" : {"a" : 42, "b" : 42, "d" : 42, "c" : 42}} +{"json1" : {"a" : 42, "b" : 42, "c" : 42, "d" : 42}, "json2" : {"a" : 42, "b" : 42, "d" : 43, "c" : 42}} +{"json1" : {"a" : 42, "b" : 42, "c" : 42, "d" : 42}, "json2" : {"a" : 42, "b" : 42, "d" : 42, "c" : 43}} +{"json1" : {"a" : 42, "b" : 42, "c" : null, "d" : 42}, "json2" : {"a" : 42, "b" : 42, "d" : 42, "c" : null}} +{"json1" : {"a" : 42, "b" : 42, "c" : null, "d" : 42}, "json2" : {"a" : 42, "b" : 42, "d" : 42, "c" : 42}} +{"json1" : {"a" : 42, "b" : 42, "c" : 42, "d" : null}, "json2" : {"a" : 42, "b" : 42, "d" : null, "c" : 42}} +{"json1" : {"a" : 42, "b" : 42, "c" : 42, "d" : null}, "json2" : {"a" : 42, "b" : 42, "d" : 42, "c" : 42}} +{"json1" : {"a" : 42, "b" : 42, "c" : 42, "d" : 42, "e" : 42}, "json2" : {"a" : 42, "b" : 42, "d" : 42, "c" : 42, "e" : 42}} +{"json1" : {"a" : 42, "b" : 42, "c" : 42, "d" : 42, "e" : 42}, "json2" : {"a" : 42, "b" : 42, "d" : 42, "c" : 42, "e" : "42"}} +{"json1" : {"a" : 42, "b" : 42, "c" : 42, "d" : 42, "e" : 42}, "json2" : {"a" : 42, "b" : 42, "d" : 42, "c" : 42, "e" : 42.0}}; + +select json1, json2, json1 == json2, json1 != json2 from test; +drop table test; diff --git a/parser/testdata/03282_materialized_views_ignore_errors/ast.json b/parser/testdata/03282_materialized_views_ignore_errors/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03282_materialized_views_ignore_errors/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03282_materialized_views_ignore_errors/metadata.json b/parser/testdata/03282_materialized_views_ignore_errors/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03282_materialized_views_ignore_errors/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03282_materialized_views_ignore_errors/query.sql b/parser/testdata/03282_materialized_views_ignore_errors/query.sql new file mode 100644 index 000000000..c5f4e94ee --- /dev/null +++ b/parser/testdata/03282_materialized_views_ignore_errors/query.sql @@ -0,0 +1,28 @@ +-- more blocks to process +set max_block_size = 10; +set min_insert_block_size_rows = 10; + +drop table if exists testX; +drop table if exists testXA; + +create table testX (A Int64) engine=MergeTree partition by (intDiv(A, 10), throwIf(A=2)) order by tuple(); +create materialized view testXA engine=MergeTree order by tuple() as select sleep(0.1) from testX; + +-- { echoOn } + +insert into testX select number from numbers(20) + settings materialized_views_ignore_errors = 0; -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO } + +select count() from testX; +select count() from testXA; + +insert into testX select number from numbers(20) + settings materialized_views_ignore_errors = 1; -- { serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO } + +select count() from testX; +select count() from testXA; + +-- { echoOff } + +drop table testX; +drop view testXA; diff --git a/parser/testdata/03282_memory_transaction_crash/ast.json b/parser/testdata/03282_memory_transaction_crash/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03282_memory_transaction_crash/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03282_memory_transaction_crash/metadata.json b/parser/testdata/03282_memory_transaction_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03282_memory_transaction_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03282_memory_transaction_crash/query.sql b/parser/testdata/03282_memory_transaction_crash/query.sql new file mode 100644 index 000000000..d8bd54412 --- /dev/null +++ b/parser/testdata/03282_memory_transaction_crash/query.sql @@ -0,0 +1,9 @@ +-- Tags: zookeeper, no-encrypted-storage +-- https://github.com/ClickHouse/ClickHouse/issues/72887 +DROP TABLE IF EXISTS t0; + +CREATE TABLE t0 (c0 Int) ENGINE = Memory(); +BEGIN TRANSACTION; +EXPLAIN PLAN SELECT 1 FROM (SELECT 1) tx JOIN t0 ON TRUE; -- { serverError NOT_IMPLEMENTED } +ROLLBACK; +DROP TABLE IF EXISTS t0; diff --git a/parser/testdata/03282_parallel_join_with_additional_filter/ast.json b/parser/testdata/03282_parallel_join_with_additional_filter/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03282_parallel_join_with_additional_filter/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03282_parallel_join_with_additional_filter/metadata.json b/parser/testdata/03282_parallel_join_with_additional_filter/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03282_parallel_join_with_additional_filter/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03282_parallel_join_with_additional_filter/query.sql b/parser/testdata/03282_parallel_join_with_additional_filter/query.sql new file mode 100644 index 000000000..4dadc777a --- /dev/null +++ b/parser/testdata/03282_parallel_join_with_additional_filter/query.sql @@ -0,0 +1,24 @@ +CREATE TABLE t1 ( + key UInt32, + a UInt32, + attr String +) ENGINE = MergeTree ORDER BY key; + +CREATE TABLE t2 ( + key UInt32, + a UInt32, + attr String +) ENGINE = MergeTree ORDER BY key; + +INSERT INTO t1 (key, a, attr) VALUES (1, 10, 'alpha'), (2, 15, 'beta'), (3, 20, 'gamma'); +INSERT INTO t2 (key, a, attr) VALUES (1, 5, 'ALPHA'), (2, 10, 'beta'), (4, 25, 'delta'); + +SET allow_experimental_join_condition = 1; +SET enable_analyzer = 1; +SET max_threads = 16; + +SELECT '---- HASH'; +SELECT t1.*, t2.* FROM t1 LEFT JOIN t2 ON t1.key = t2.key AND (t1.key < t2.a OR t1.a % 2 = 0) ORDER BY ALL SETTINGS join_algorithm = 'hash'; + +SELECT '---- PARALLEL HASH'; +SELECT t1.*, t2.* FROM t1 LEFT JOIN t2 ON t1.key = t2.key AND (t1.key < t2.a OR t1.a % 2 = 0) ORDER BY ALL SETTINGS join_algorithm = 'parallel_hash'; diff --git a/parser/testdata/03283_json_binary_serialization_use_default_setttings/ast.json b/parser/testdata/03283_json_binary_serialization_use_default_setttings/ast.json new file mode 100644 index 000000000..ed238dd19 --- /dev/null +++ b/parser/testdata/03283_json_binary_serialization_use_default_setttings/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001214628, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03283_json_binary_serialization_use_default_setttings/metadata.json b/parser/testdata/03283_json_binary_serialization_use_default_setttings/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03283_json_binary_serialization_use_default_setttings/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03283_json_binary_serialization_use_default_setttings/query.sql b/parser/testdata/03283_json_binary_serialization_use_default_setttings/query.sql new file mode 100644 index 000000000..d9fa1e204 --- /dev/null +++ b/parser/testdata/03283_json_binary_serialization_use_default_setttings/query.sql @@ -0,0 +1,8 @@ +set enable_json_type=1; +set output_format_binary_write_json_as_string=1; + +drop table if exists test; +create table test (json JSON(max_dynamic_paths=0)) engine=Memory; +insert into test format JSONAsObject {"a" : [{"b" : 42}]}; + +select * from test; diff --git a/parser/testdata/03283_optimize_on_insert_level/ast.json b/parser/testdata/03283_optimize_on_insert_level/ast.json new file mode 100644 index 000000000..0ca7b3eca --- /dev/null +++ b/parser/testdata/03283_optimize_on_insert_level/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001197252, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03283_optimize_on_insert_level/metadata.json b/parser/testdata/03283_optimize_on_insert_level/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03283_optimize_on_insert_level/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03283_optimize_on_insert_level/query.sql b/parser/testdata/03283_optimize_on_insert_level/query.sql new file mode 100644 index 000000000..70b758df0 --- /dev/null +++ b/parser/testdata/03283_optimize_on_insert_level/query.sql @@ -0,0 +1,42 @@ +SET insert_keeper_fault_injection_probability = 0; +SET max_threads = 4; + +DROP TABLE IF EXISTS t_optimize_level; + +CREATE TABLE t_optimize_level (a UInt64, b UInt64) +ENGINE = ReplacingMergeTree ORDER BY a +SETTINGS index_granularity = 1; + +SYSTEM STOP MERGES t_optimize_level; + +INSERT INTO t_optimize_level VALUES (1, 1) (1, 2) (2, 3); +INSERT INTO t_optimize_level VALUES (4, 3) (5, 4); + +SELECT _part, a, b FROM t_optimize_level ORDER BY a; +SELECT count() FROM (EXPLAIN PIPELINE SELECT a, b FROM t_optimize_level FINAL) WHERE explain LIKE '%Replacing%'; + +ALTER TABLE t_optimize_level DETACH PARTITION tuple(); +ALTER TABLE t_optimize_level ATTACH PARTITION tuple(); + +SELECT name FROM system.parts WHERE database = currentDatabase() AND table = 't_optimize_level' AND active; + +DROP TABLE t_optimize_level; + +CREATE TABLE t_optimize_level (a UInt64, b UInt64) +ENGINE = ReplicatedReplacingMergeTree('/clickhouse/tables/{database}/03283_optimize_on_insert_level', '1') ORDER BY a +SETTINGS index_granularity = 1; + +SYSTEM STOP MERGES t_optimize_level; + +INSERT INTO t_optimize_level VALUES (1, 1) (1, 2) (2, 3); +INSERT INTO t_optimize_level VALUES (4, 3) (5, 4); + +SELECT _part, a, b FROM t_optimize_level ORDER BY a; +SELECT count() FROM (EXPLAIN PIPELINE SELECT a, b FROM t_optimize_level FINAL) WHERE explain LIKE '%Replacing%'; + +ALTER TABLE t_optimize_level DETACH PARTITION tuple(); +ALTER TABLE t_optimize_level ATTACH PARTITION tuple(); + +SELECT name FROM system.parts WHERE database = currentDatabase() AND table = 't_optimize_level' AND active; + +DROP TABLE t_optimize_level; diff --git a/parser/testdata/03284_json_object_as_tuple_duplicate_keys/ast.json b/parser/testdata/03284_json_object_as_tuple_duplicate_keys/ast.json new file mode 100644 index 000000000..e09915c46 --- /dev/null +++ b/parser/testdata/03284_json_object_as_tuple_duplicate_keys/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function format (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Identifier JSONEachRow" + }, + { + "explain": " Literal 'a Tuple(b UInt32)'" + }, + { + "explain": " Literal '{\"a\" : {\"b\" : 1, \"b\" : 2}}'" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001188703, + "rows_read": 13, + "bytes_read": 521 + } +} diff --git a/parser/testdata/03284_json_object_as_tuple_duplicate_keys/metadata.json b/parser/testdata/03284_json_object_as_tuple_duplicate_keys/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03284_json_object_as_tuple_duplicate_keys/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03284_json_object_as_tuple_duplicate_keys/query.sql b/parser/testdata/03284_json_object_as_tuple_duplicate_keys/query.sql new file mode 100644 index 000000000..a60b8c948 --- /dev/null +++ b/parser/testdata/03284_json_object_as_tuple_duplicate_keys/query.sql @@ -0,0 +1,9 @@ +select * from format(JSONEachRow, 'a Tuple(b UInt32)', '{"a" : {"b" : 1, "b" : 2}}'); -- {serverError INCORRECT_DATA} +select * from format(JSONEachRow, '{"a" : {"b" : 1, "b" : 2}}'); -- {serverError INCORRECT_DATA} +select * from format(JSONEachRow, '{"a" : {"b" : 1, "b" : 2, "b" : 3}, "c" : 42}'); -- {serverError INCORRECT_DATA} +set input_format_json_use_string_type_for_ambiguous_paths_in_named_tuples_inference_from_objects=1; +desc format(JSONEachRow, '{"a" : {"b" : 1, "b" : "Hello"}}'); +select * from format(JSONEachRow, '{"a" : {"b" : 1, "b" : "Hello"}}'); -- {serverError INCORRECT_DATA} +desc format(JSONEachRow, '{"a" : {"b" : 1, "b" : {"c" : "Hello"}}}'); +select * from format(JSONEachRow, '{"a" : {"b" : 1, "b" : {"c" : "Hello"}}}'); -- {serverError INCORRECT_DATA} + diff --git a/parser/testdata/03285_analyzer_array_join_nested/ast.json b/parser/testdata/03285_analyzer_array_join_nested/ast.json new file mode 100644 index 000000000..d08484805 --- /dev/null +++ b/parser/testdata/03285_analyzer_array_join_nested/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001371815, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03285_analyzer_array_join_nested/metadata.json b/parser/testdata/03285_analyzer_array_join_nested/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03285_analyzer_array_join_nested/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03285_analyzer_array_join_nested/query.sql b/parser/testdata/03285_analyzer_array_join_nested/query.sql new file mode 100644 index 000000000..e779db908 --- /dev/null +++ b/parser/testdata/03285_analyzer_array_join_nested/query.sql @@ -0,0 +1,40 @@ +set enable_analyzer = 1; + +create table hourly( + hour datetime, + `metric.names` Array(String), + `metric.values` Array(Int64) +) Engine=Memory +as select '2020-01-01', ['a', 'b'], [1,2]; + +-- { echoOn } + +explain query tree dump_ast = 1 +SELECT + `metric.names` +from hourly ARRAY JOIN metric; + +explain query tree dump_ast = 1 +SELECT + metric.names +from hourly ARRAY JOIN metric; + +-- { echoOff } + +create table tab (`x.a` Array(String), `x.b.first` Array(Array(UInt32)), `x.b.second` Array(Array(String))) engine = MergeTree order by tuple(); + +insert into tab select ['a1', 'a2'], [[1, 2, 3], [4, 5, 6]], [['b1', 'b2', 'b3'], ['b4', 'b5', 'b6']]; + +-- { echoOn } + +SELECT nested(['click', 'house'], x.b.first, x.b.second) AS n, toTypeName(n) FROM tab; +SELECT nested([['click', 'house']], x.b.first, x.b.second) AS n, toTypeName(n) FROM tab; +SELECT nested([['click'], ['house']], x.b.first, x.b.second) AS n, toTypeName(n) FROM tab; -- {serverError BAD_ARGUMENTS} + +set analyzer_compatibility_allow_compound_identifiers_in_unflatten_nested = 0; +select x from tab; +select y, y.b.first, y.b.second from tab array join x as y; -- { serverError UNKNOWN_IDENTIFIER } + +set analyzer_compatibility_allow_compound_identifiers_in_unflatten_nested = 1; +select x from tab; +select y, y.b.first, y.b.second from tab array join x as y; diff --git a/parser/testdata/03285_analyzer_extract_common_expr_bug/ast.json b/parser/testdata/03285_analyzer_extract_common_expr_bug/ast.json new file mode 100644 index 000000000..420dd6564 --- /dev/null +++ b/parser/testdata/03285_analyzer_extract_common_expr_bug/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001312064, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03285_analyzer_extract_common_expr_bug/metadata.json b/parser/testdata/03285_analyzer_extract_common_expr_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03285_analyzer_extract_common_expr_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03285_analyzer_extract_common_expr_bug/query.sql b/parser/testdata/03285_analyzer_extract_common_expr_bug/query.sql new file mode 100644 index 000000000..6f4b38496 --- /dev/null +++ b/parser/testdata/03285_analyzer_extract_common_expr_bug/query.sql @@ -0,0 +1,27 @@ +set enable_analyzer = 1; + +EXPLAIN QUERY TREE dump_ast = 1 +SELECT a +FROM numbers(10) +GROUP BY (number > 10) OR ((number > 2) AND (number > 10)) AS a +HAVING a +SETTINGS optimize_extract_common_expressions = 0; + +SELECT a +FROM numbers(10) +GROUP BY (number > 10) OR ((number > 2) AND (number > 10)) AS a +HAVING a +SETTINGS optimize_extract_common_expressions = 0; + +EXPLAIN QUERY TREE dump_ast = 1 +SELECT a +FROM numbers(10) +GROUP BY (number > 10) OR ((number > 2) AND (number > 10)) AS a +HAVING a +SETTINGS optimize_extract_common_expressions = 1; + +SELECT a +FROM numbers(10) +GROUP BY (number > 10) OR ((number > 2) AND (number > 10)) AS a +HAVING a +SETTINGS optimize_extract_common_expressions = 1; diff --git a/parser/testdata/03285_analyzer_optimize_disjunctions/ast.json b/parser/testdata/03285_analyzer_optimize_disjunctions/ast.json new file mode 100644 index 000000000..f2749d5b3 --- /dev/null +++ b/parser/testdata/03285_analyzer_optimize_disjunctions/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001445273, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03285_analyzer_optimize_disjunctions/metadata.json b/parser/testdata/03285_analyzer_optimize_disjunctions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03285_analyzer_optimize_disjunctions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03285_analyzer_optimize_disjunctions/query.sql b/parser/testdata/03285_analyzer_optimize_disjunctions/query.sql new file mode 100644 index 000000000..9459576e4 --- /dev/null +++ b/parser/testdata/03285_analyzer_optimize_disjunctions/query.sql @@ -0,0 +1,39 @@ +SET enable_analyzer = 1; +SET optimize_extract_common_expressions = 1; + +DROP TABLE IF EXISTS x; +CREATE TABLE x (x Int64, A UInt8, B UInt8, C UInt8, D UInt8, E UInt8, F UInt8) ENGINE = MergeTree ORDER BY x; +INSERT INTO x + SELECT + cityHash64(number) AS x, + cityHash64(number + 1) % 2 AS A, + cityHash64(number + 2) % 2 AS B, + cityHash64(number + 3) % 2 AS C, + cityHash64(number + 4) % 2 AS D, + cityHash64(number + 5) % 2 AS E, + cityHash64(number + 6) % 2 AS F + FROM numbers(2000); + +EXPLAIN QUERY TREE dump_ast = 1 SELECT count() FROM x WHERE A or B or (B and C) SETTINGS optimize_extract_common_expressions = 0; +SELECT count() FROM x WHERE A or B or (B and C) SETTINGS optimize_extract_common_expressions = 0; + +EXPLAIN QUERY TREE dump_ast = 1 SELECT count() FROM x WHERE A or B or (B and C) SETTINGS optimize_extract_common_expressions = 1; +SELECT count() FROM x WHERE A or B or (B and C) SETTINGS optimize_extract_common_expressions = 1; + +EXPLAIN QUERY TREE dump_ast = 1 SELECT count() FROM x WHERE (A and B) or (B and C) or (B and D and A) SETTINGS optimize_extract_common_expressions = 1; +SELECT count() FROM x WHERE (A and B) or (B and C) or (B and D and A) SETTINGS optimize_extract_common_expressions = 1; + +EXPLAIN QUERY TREE dump_ast = 1 SELECT count() FROM x WHERE (A and B) or (B and C) or (B and D and A) SETTINGS optimize_extract_common_expressions = 0; +SELECT count() FROM x WHERE (A and B) or (B and C) or (B and D and A) SETTINGS optimize_extract_common_expressions = 0; + +EXPLAIN QUERY TREE dump_ast = 1 SELECT count() FROM x WHERE (A and B) or (A and B and C) or (D and E) SETTINGS optimize_extract_common_expressions = 1; +SELECT count() FROM x WHERE (A and B) or (A and B and C) or (D and E) SETTINGS optimize_extract_common_expressions = 1; + +EXPLAIN QUERY TREE dump_ast = 1 SELECT count() FROM x WHERE (A and B) or (A and B and C) or (D and E) SETTINGS optimize_extract_common_expressions = 0; +SELECT count() FROM x WHERE (A and B) or (A and B and C) or (D and E) SETTINGS optimize_extract_common_expressions = 0; + +EXPLAIN QUERY TREE dump_ast = 1 SELECT count() FROM x WHERE (A and B) or (A and B and C) or (B and C) SETTINGS optimize_extract_common_expressions = 1; +SELECT count() FROM x WHERE (A and B) or (A and B and C) or (B and C) SETTINGS optimize_extract_common_expressions = 1; + +EXPLAIN QUERY TREE dump_ast = 1 SELECT count() FROM x WHERE (A and B) or (A and B and C) or (B and C) SETTINGS optimize_extract_common_expressions = 0; +SELECT count() FROM x WHERE (A and B) or (A and B and C) or (B and C) SETTINGS optimize_extract_common_expressions = 0; diff --git a/parser/testdata/03285_default_engine_with_settings/ast.json b/parser/testdata/03285_default_engine_with_settings/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03285_default_engine_with_settings/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03285_default_engine_with_settings/metadata.json b/parser/testdata/03285_default_engine_with_settings/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03285_default_engine_with_settings/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03285_default_engine_with_settings/query.sql b/parser/testdata/03285_default_engine_with_settings/query.sql new file mode 100644 index 000000000..fd9db4f77 --- /dev/null +++ b/parser/testdata/03285_default_engine_with_settings/query.sql @@ -0,0 +1,22 @@ +-- Tags: memory-engine +-- https://github.com/ClickHouse/ClickHouse/issues/73099 + +DROP TABLE IF EXISTS example_table; +DROP TABLE IF EXISTS example_table2; + +set default_table_engine = 'MergeTree'; +CREATE TABLE example_table (id UInt32, data String) ORDER BY id SETTINGS max_part_loading_threads=8; +SHOW CREATE TABLE example_table; + +SET default_table_engine = 'Memory'; +-- Memory with ORDER BY is wrong +CREATE TABLE example_table2 (id UInt32, data String) ORDER BY id SETTINGS max_part_loading_threads=8; -- { serverError BAD_ARGUMENTS } + +-- Memory with max_part_loading_threads is wrong +CREATE TABLE example_table2 (id UInt32, data String) SETTINGS max_part_loading_threads=8; -- { serverError UNKNOWN_SETTING } + +CREATE TABLE example_table2 (id UInt32, data String) SETTINGS max_rows_to_keep=42; +SHOW CREATE TABLE example_table2; + +DROP TABLE IF EXISTS example_table; +DROP TABLE IF EXISTS example_table2; diff --git a/parser/testdata/03285_materialize_ttl_only_drop_parts/ast.json b/parser/testdata/03285_materialize_ttl_only_drop_parts/ast.json new file mode 100644 index 000000000..2292a3db9 --- /dev/null +++ b/parser/testdata/03285_materialize_ttl_only_drop_parts/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_03285_mat_ttl (children 1)" + }, + { + "explain": " Identifier test_03285_mat_ttl" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001355489, + "rows_read": 2, + "bytes_read": 88 + } +} diff --git a/parser/testdata/03285_materialize_ttl_only_drop_parts/metadata.json b/parser/testdata/03285_materialize_ttl_only_drop_parts/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03285_materialize_ttl_only_drop_parts/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03285_materialize_ttl_only_drop_parts/query.sql b/parser/testdata/03285_materialize_ttl_only_drop_parts/query.sql new file mode 100644 index 000000000..df563791c --- /dev/null +++ b/parser/testdata/03285_materialize_ttl_only_drop_parts/query.sql @@ -0,0 +1,30 @@ +DROP TABLE IF EXISTS test_03285_mat_ttl; + +CREATE TABLE test_03285_mat_ttl (id UInt64, value String, event_time DateTime) + ENGINE MergeTree() + ORDER BY id + SETTINGS min_bytes_for_wide_part = 1000000000, index_granularity = 8192, index_granularity_bytes = '10Mi', ttl_only_drop_parts=1; + +INSERT INTO test_03285_mat_ttl SELECT number, randomString(10), now() + INTERVAL 2 MONTH FROM numbers(50000); +INSERT INTO test_03285_mat_ttl SELECT number, randomString(10), now() FROM numbers(50000); + +OPTIMIZE TABLE test_03285_mat_ttl FINAL SETTINGS mutations_sync = 1; + +SET mutations_sync=1; + +SELECT 'Rows in parts', SUM(rows) FROM system.parts WHERE database = currentDatabase() AND table = 'test_03285_mat_ttl' AND active; +SELECT 'Count', count() FROM test_03285_mat_ttl; + +ALTER TABLE test_03285_mat_ttl MODIFY TTL event_time + INTERVAL 1 MONTH SETTINGS mutations_sync = 1; +OPTIMIZE TABLE test_03285_mat_ttl FINAL SETTINGS mutations_sync = 1; + +SELECT 'Rows in parts', SUM(rows) FROM system.parts WHERE database = currentDatabase() AND table = 'test_03285_mat_ttl' AND active; +SELECT 'Count', count() FROM test_03285_mat_ttl; + +ALTER TABLE test_03285_mat_ttl MODIFY TTL event_time - INTERVAL 3 MONTH SETTINGS mutations_sync = 1; +OPTIMIZE TABLE test_03285_mat_ttl FINAL SETTINGS mutations_sync = 1; + +SELECT 'Rows in parts', SUM(rows) FROM system.parts WHERE database = currentDatabase() AND table = 'test_03285_mat_ttl' AND active; +SELECT 'Count', count() FROM test_03285_mat_ttl; + +DROP TABLE test_03285_mat_ttl; diff --git a/parser/testdata/03285_parallel_replicas_one_replica/ast.json b/parser/testdata/03285_parallel_replicas_one_replica/ast.json new file mode 100644 index 000000000..f5d326baa --- /dev/null +++ b/parser/testdata/03285_parallel_replicas_one_replica/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery src (children 4)" + }, + { + "explain": " Identifier src" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration y (children 1)" + }, + { + "explain": " DataType Int8" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Identifier y" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001103516, + "rows_read": 14, + "bytes_read": 476 + } +} diff --git a/parser/testdata/03285_parallel_replicas_one_replica/metadata.json b/parser/testdata/03285_parallel_replicas_one_replica/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03285_parallel_replicas_one_replica/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03285_parallel_replicas_one_replica/query.sql b/parser/testdata/03285_parallel_replicas_one_replica/query.sql new file mode 100644 index 000000000..735cfdc52 --- /dev/null +++ b/parser/testdata/03285_parallel_replicas_one_replica/query.sql @@ -0,0 +1,2 @@ +create table src (y Int8) engine MergeTree order by y as select 1; +select 1 from (select 2 from src) settings enable_parallel_replicas=1, max_parallel_replicas=2, cluster_for_parallel_replicas='test_shard_localhost', parallel_replicas_for_non_replicated_merge_tree=1; diff --git a/parser/testdata/03286_backup_to_memory/ast.json b/parser/testdata/03286_backup_to_memory/ast.json new file mode 100644 index 000000000..3ef344b51 --- /dev/null +++ b/parser/testdata/03286_backup_to_memory/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001109498, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03286_backup_to_memory/metadata.json b/parser/testdata/03286_backup_to_memory/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03286_backup_to_memory/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03286_backup_to_memory/query.sql b/parser/testdata/03286_backup_to_memory/query.sql new file mode 100644 index 000000000..13c2b6080 --- /dev/null +++ b/parser/testdata/03286_backup_to_memory/query.sql @@ -0,0 +1,12 @@ +DROP TABLE IF EXISTS t1; + +CREATE TABLE t1(x Int32) ENGINE=MergeTree() ORDER BY tuple(); +INSERT INTO t1 VALUES (1), (2), (3); + +BACKUP TABLE t1 TO Memory('b1') FORMAT Null; +DROP TABLE t1 SYNC; +RESTORE TABLE t1 FROM Memory('b1') FORMAT Null; + +SELECT * FROM t1 ORDER BY x; + +DROP TABLE t1; diff --git a/parser/testdata/03286_backup_to_null/ast.json b/parser/testdata/03286_backup_to_null/ast.json new file mode 100644 index 000000000..e154f4b53 --- /dev/null +++ b/parser/testdata/03286_backup_to_null/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001201685, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03286_backup_to_null/metadata.json b/parser/testdata/03286_backup_to_null/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03286_backup_to_null/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03286_backup_to_null/query.sql b/parser/testdata/03286_backup_to_null/query.sql new file mode 100644 index 000000000..896a8bb92 --- /dev/null +++ b/parser/testdata/03286_backup_to_null/query.sql @@ -0,0 +1,10 @@ +DROP TABLE IF EXISTS t1; + +CREATE TABLE t1(x Int32) ENGINE=MergeTree() ORDER BY tuple(); +INSERT INTO t1 VALUES (1), (2), (3); + +BACKUP TABLE t1 TO Null FORMAT Null; + +DROP TABLE t1 SYNC; + +RESTORE TABLE t1 FROM Null FORMAT Null; -- { serverError BACKUP_NOT_FOUND } diff --git a/parser/testdata/03286_collation_locale_with_modifier/ast.json b/parser/testdata/03286_collation_locale_with_modifier/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03286_collation_locale_with_modifier/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03286_collation_locale_with_modifier/metadata.json b/parser/testdata/03286_collation_locale_with_modifier/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03286_collation_locale_with_modifier/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03286_collation_locale_with_modifier/query.sql b/parser/testdata/03286_collation_locale_with_modifier/query.sql new file mode 100644 index 000000000..7dfe1d91d --- /dev/null +++ b/parser/testdata/03286_collation_locale_with_modifier/query.sql @@ -0,0 +1,10 @@ +-- Tags: no-fasttest +-- no-fasttest: needs ICU + +SELECT 'Sort by standard Turkish locale'; +SELECT arrayJoin(['kk 50', 'KK 01', ' KK 2', ' KK 3', 'kk 1', 'x9y99', 'x9y100']) item +ORDER BY item ASC COLLATE 'tr'; + +SELECT 'Sort by Turkish locale with modifiers'; +SELECT arrayJoin(['kk 50', 'KK 01', ' KK 2', ' KK 3', 'kk 1', 'x9y99', 'x9y100']) item +ORDER BY item ASC COLLATE 'tr-u-kn-true-ka-shifted'; diff --git a/parser/testdata/03286_format_datetime_timezones/ast.json b/parser/testdata/03286_format_datetime_timezones/ast.json new file mode 100644 index 000000000..929a17a63 --- /dev/null +++ b/parser/testdata/03286_format_datetime_timezones/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function formatDateTime (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDateTime64 (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal '2024-12-11 12:34:56.000'" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Literal 'US\/Pacific'" + }, + { + "explain": " Literal '%W%z'" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001284624, + "rows_read": 12, + "bytes_read": 472 + } +} diff --git a/parser/testdata/03286_format_datetime_timezones/metadata.json b/parser/testdata/03286_format_datetime_timezones/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03286_format_datetime_timezones/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03286_format_datetime_timezones/query.sql b/parser/testdata/03286_format_datetime_timezones/query.sql new file mode 100644 index 000000000..66bb8ab50 --- /dev/null +++ b/parser/testdata/03286_format_datetime_timezones/query.sql @@ -0,0 +1,5 @@ +select formatDateTime(toDateTime64('2024-12-11 12:34:56.000', 3, 'US/Pacific'), '%W%z'); +select formatDateTime(toDateTime64('2024-12-11 12:34:56.000', 3, 'US/Eastern'), '%W%z'); +select formatDateTime(toDateTime64('2024-12-11 12:34:56.000', 3, 'UTC'), '%W%z'); +select formatDateTime(toDateTime64('2024-12-11 12:34:56.000', 3, 'Europe/Berlin'), '%W%z'); +select formatDateTime(toDateTime64('2024-12-11 12:34:56.000', 3, 'Europe/Athens'), '%W%z'); diff --git a/parser/testdata/03286_parallel_replicas_cross_join_bug/ast.json b/parser/testdata/03286_parallel_replicas_cross_join_bug/ast.json new file mode 100644 index 000000000..43a38b4a0 --- /dev/null +++ b/parser/testdata/03286_parallel_replicas_cross_join_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tab (children 1)" + }, + { + "explain": " Identifier tab" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001150361, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/03286_parallel_replicas_cross_join_bug/metadata.json b/parser/testdata/03286_parallel_replicas_cross_join_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03286_parallel_replicas_cross_join_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03286_parallel_replicas_cross_join_bug/query.sql b/parser/testdata/03286_parallel_replicas_cross_join_bug/query.sql new file mode 100644 index 000000000..319e1ff1d --- /dev/null +++ b/parser/testdata/03286_parallel_replicas_cross_join_bug/query.sql @@ -0,0 +1,10 @@ +drop table if exists tab; +create table tab (x UInt64) engine = MergeTree order by tuple(); +insert into tab select number from numbers(1e7); + +set enable_parallel_replicas = 1, max_parallel_replicas = 3, cluster_for_parallel_replicas = 'parallel_replicas', parallel_replicas_for_non_replicated_merge_tree = true; + +select * from tab l, tab r where l.x < r.x and r.x < 2; +select sum(x), sum(r.x) from (select * from tab l, tab r where r.x < 2 and l.x < 3); + +drop table if exists tab; diff --git a/parser/testdata/03286_reverse_sorting_key_final/ast.json b/parser/testdata/03286_reverse_sorting_key_final/ast.json new file mode 100644 index 000000000..56161a92c --- /dev/null +++ b/parser/testdata/03286_reverse_sorting_key_final/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t0 (children 1)" + }, + { + "explain": " Identifier t0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001573425, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03286_reverse_sorting_key_final/metadata.json b/parser/testdata/03286_reverse_sorting_key_final/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03286_reverse_sorting_key_final/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03286_reverse_sorting_key_final/query.sql b/parser/testdata/03286_reverse_sorting_key_final/query.sql new file mode 100644 index 000000000..aab8abd22 --- /dev/null +++ b/parser/testdata/03286_reverse_sorting_key_final/query.sql @@ -0,0 +1,24 @@ +DROP TABLE IF EXISTS t0; +CREATE TABLE t0 (c0 Nested(c1 Int)) ENGINE = SummingMergeTree ORDER BY (c0.c1 DESC) SETTINGS allow_experimental_reverse_key = 1; +INSERT INTO t0 (c0.c1) VALUES ([1]), ([2]); +SELECT 1 FROM t0 FINAL; +DROP TABLE t0; + +-- For consistency of the EXPLAIN output: +SET allow_prefetched_read_pool_for_remote_filesystem = 0; + +-- PartsSplitter should work for reverse keys. +CREATE TABLE t0(a Int, b Int) Engine=ReplacingMergeTree order by (a desc, b desc) SETTINGS allow_experimental_reverse_key = 1, allow_nullable_key = 1, index_granularity = 8192, index_granularity_bytes = '10Mi'; +INSERT INTO t0 select number, number from numbers(5); +INSERT INTO t0 select number, number from numbers(5, 2); +set max_threads = 2; +explain pipeline select * from t0 final SETTINGS enable_vertical_final = 0; +DROP TABLE t0; + +-- PartsSplitter is disabled when some keys are in ascending order while others are in descending order. +CREATE TABLE t0(a Int, b Int) Engine=ReplacingMergeTree order by (a desc, b) SETTINGS allow_experimental_reverse_key = 1, index_granularity = 8192, index_granularity_bytes = '10Mi'; +INSERT INTO t0 select number, number from numbers(5); +INSERT INTO t0 select number, number from numbers(5,2); +set max_threads = 2; +explain pipeline select * from t0 final SETTINGS enable_vertical_final = 0; +DROP TABLE t0; diff --git a/parser/testdata/03286_reverse_sorting_key_final2/ast.json b/parser/testdata/03286_reverse_sorting_key_final2/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03286_reverse_sorting_key_final2/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03286_reverse_sorting_key_final2/metadata.json b/parser/testdata/03286_reverse_sorting_key_final2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03286_reverse_sorting_key_final2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03286_reverse_sorting_key_final2/query.sql b/parser/testdata/03286_reverse_sorting_key_final2/query.sql new file mode 100644 index 000000000..e23d0c1c0 --- /dev/null +++ b/parser/testdata/03286_reverse_sorting_key_final2/query.sql @@ -0,0 +1,20 @@ +-- { echo ON } +DROP TABLE IF EXISTS t0; + +CREATE TABLE t0 (c0 Int) ENGINE = SummingMergeTree() ORDER BY (c0 DESC) PRIMARY KEY (c0) SETTINGS allow_experimental_reverse_key = 1; +INSERT INTO TABLE t0 (c0) VALUES (1), (2); +SELECT sum(c0) FROM t0 FINAL; + +DROP TABLE t0; + +CREATE TABLE t0 (c0 Int, c1 Int) ENGINE = SummingMergeTree() ORDER BY (c0 DESC, c1) PRIMARY KEY (c0) SETTINGS allow_experimental_reverse_key = 1; +INSERT INTO TABLE t0 (c0) VALUES (1), (2); +SELECT sum(c0) FROM t0 FINAL; + +DROP TABLE t0; + +CREATE TABLE t0 (c0 Int, c1 Int) ENGINE = SummingMergeTree() ORDER BY (c0 DESC, c1) SETTINGS allow_experimental_reverse_key = 1; +INSERT INTO TABLE t0 (c0) VALUES (1), (2); +SELECT sum(c0) FROM t0 FINAL; + +DROP TABLE t0; diff --git a/parser/testdata/03286_serialization_hint_system_columns/ast.json b/parser/testdata/03286_serialization_hint_system_columns/ast.json new file mode 100644 index 000000000..e6e7d4fb8 --- /dev/null +++ b/parser/testdata/03286_serialization_hint_system_columns/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_serialization_hints (children 1)" + }, + { + "explain": " Identifier t_serialization_hints" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001116928, + "rows_read": 2, + "bytes_read": 94 + } +} diff --git a/parser/testdata/03286_serialization_hint_system_columns/metadata.json b/parser/testdata/03286_serialization_hint_system_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03286_serialization_hint_system_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03286_serialization_hint_system_columns/query.sql b/parser/testdata/03286_serialization_hint_system_columns/query.sql new file mode 100644 index 000000000..3239cc81a --- /dev/null +++ b/parser/testdata/03286_serialization_hint_system_columns/query.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS t_serialization_hints; + +CREATE TABLE t_serialization_hints (a UInt64, b UInt64, c Array(String)) +ENGINE = MergeTree ORDER BY a +SETTINGS ratio_of_defaults_for_sparse_serialization = 0.9; + +INSERT INTO t_serialization_hints SELECT number, 0, [] FROM numbers(1000); + +SELECT name, serialization_hint FROM system.columns +WHERE database = currentDatabase() AND table = 't_serialization_hints' +ORDER BY name; + +DROP TABLE t_serialization_hints; diff --git a/parser/testdata/03287_dynamic_and_json_squashing_fix/ast.json b/parser/testdata/03287_dynamic_and_json_squashing_fix/ast.json new file mode 100644 index 000000000..2497ca412 --- /dev/null +++ b/parser/testdata/03287_dynamic_and_json_squashing_fix/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001390232, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03287_dynamic_and_json_squashing_fix/metadata.json b/parser/testdata/03287_dynamic_and_json_squashing_fix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03287_dynamic_and_json_squashing_fix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03287_dynamic_and_json_squashing_fix/query.sql b/parser/testdata/03287_dynamic_and_json_squashing_fix/query.sql new file mode 100644 index 000000000..a770113db --- /dev/null +++ b/parser/testdata/03287_dynamic_and_json_squashing_fix/query.sql @@ -0,0 +1,24 @@ +set enable_json_type=1; +set enable_dynamic_type=1; + +drop table if exists src; +drop table if exists dst; + +create table src (d Dynamic) engine=Memory; +create table dst (d Dynamic) engine=MergeTree order by tuple(); +insert into src select materialize(42)::Int64; +insert into src select 'Hello'; +insert into dst select * from remote('127.0.0.2', currentDatabase(), src); +select isDynamicElementInSharedData(d) from dst; +drop table src; +drop table dst; + +create table src (json JSON) engine=Memory; +create table dst (json JSON) engine=MergeTree order by tuple(); +insert into src select '{"a" : 42}'; +insert into src select '{"b" : 42}'; +insert into dst select * from remote('127.0.0.2', currentDatabase(), src); +select JSONSharedDataPaths(json) from dst; +drop table src; +drop table dst; + diff --git a/parser/testdata/03287_format_datetime_mysqlfraction/ast.json b/parser/testdata/03287_format_datetime_mysqlfraction/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03287_format_datetime_mysqlfraction/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03287_format_datetime_mysqlfraction/metadata.json b/parser/testdata/03287_format_datetime_mysqlfraction/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03287_format_datetime_mysqlfraction/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03287_format_datetime_mysqlfraction/query.sql b/parser/testdata/03287_format_datetime_mysqlfraction/query.sql new file mode 100644 index 000000000..9ed5a1151 --- /dev/null +++ b/parser/testdata/03287_format_datetime_mysqlfraction/query.sql @@ -0,0 +1,17 @@ +-- Test for issue #72879 + +SELECT 'Default settings'; +SELECT formatDateTime(toDateTime64('1970-01-01 00:00:00.12345678', 8, 'UTC'), '%f') SETTINGS formatdatetime_f_prints_scale_number_of_digits = 0; +SELECT formatDateTime(toDateTime64('1970-01-01 00:00:00.123456', 6, 'UTC'), '%f') SETTINGS formatdatetime_f_prints_scale_number_of_digits = 0; +SELECT formatDateTime(toDateTime64('1970-01-01 00:00:00.1234', 4, 'UTC'), '%f') SETTINGS formatdatetime_f_prints_scale_number_of_digits = 0; +SELECT formatDateTime(toDateTime64('1970-01-01 00:00:00.12', 2, 'UTC'), '%f') SETTINGS formatdatetime_f_prints_scale_number_of_digits = 0; +SELECT formatDateTime(toDateTime64('1970-01-01 00:00:00.1', 1, 'UTC'), '%f') SETTINGS formatdatetime_f_prints_scale_number_of_digits = 0; +SELECT formatDateTime(toDateTime64('1970-01-01 00:00:00', 0, 'UTC'), '%f') SETTINGS formatdatetime_f_prints_scale_number_of_digits = 0; + +SELECT 'Compatibility settings'; +SELECT formatDateTime(toDateTime64('1970-01-01 00:00:00.12345678', 8, 'UTC'), '%f') SETTINGS formatdatetime_f_prints_scale_number_of_digits = 1; +SELECT formatDateTime(toDateTime64('1970-01-01 00:00:00.123456', 6, 'UTC'), '%f') SETTINGS formatdatetime_f_prints_scale_number_of_digits = 1; +SELECT formatDateTime(toDateTime64('1970-01-01 00:00:00.1234', 4, 'UTC'), '%f') SETTINGS formatdatetime_f_prints_scale_number_of_digits = 1; +SELECT formatDateTime(toDateTime64('1970-01-01 00:00:00.12', 2, 'UTC'), '%f') SETTINGS formatdatetime_f_prints_scale_number_of_digits = 1; +SELECT formatDateTime(toDateTime64('1970-01-01 00:00:00.1', 1, 'UTC'), '%f') SETTINGS formatdatetime_f_prints_scale_number_of_digits = 1; +SELECT formatDateTime(toDateTime64('1970-01-01 00:00:00', 0, 'UTC'), '%f') SETTINGS formatdatetime_f_prints_scale_number_of_digits = 1; diff --git a/parser/testdata/03287_not_comparable_types_in_order_by_and_comparison_functions/ast.json b/parser/testdata/03287_not_comparable_types_in_order_by_and_comparison_functions/ast.json new file mode 100644 index 000000000..69d01f47e --- /dev/null +++ b/parser/testdata/03287_not_comparable_types_in_order_by_and_comparison_functions/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001761772, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03287_not_comparable_types_in_order_by_and_comparison_functions/metadata.json b/parser/testdata/03287_not_comparable_types_in_order_by_and_comparison_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03287_not_comparable_types_in_order_by_and_comparison_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03287_not_comparable_types_in_order_by_and_comparison_functions/query.sql b/parser/testdata/03287_not_comparable_types_in_order_by_and_comparison_functions/query.sql new file mode 100644 index 000000000..44d40e706 --- /dev/null +++ b/parser/testdata/03287_not_comparable_types_in_order_by_and_comparison_functions/query.sql @@ -0,0 +1,14 @@ +set enable_json_type=1; + +drop table if exists test; +create table test (agg1 AggregateFunction(sum, UInt64), agg2 AggregateFunction(sum, UInt64)) engine=Memory; +insert into test select sumState(number), sumState(number + 1) from numbers(10); +select * from test order by agg1; -- {serverError ILLEGAL_COLUMN} +select agg1 < agg2 from test; -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +select agg1 <= agg2 from test; -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +select agg1 > agg2 from test; -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +select agg1 >= agg2 from test; -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +select agg1 = agg2 from test; -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +select agg1 != agg2 from test; -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} + +drop table test; diff --git a/parser/testdata/03289_explain_syntax_statistics/ast.json b/parser/testdata/03289_explain_syntax_statistics/ast.json new file mode 100644 index 000000000..2107988a0 --- /dev/null +++ b/parser/testdata/03289_explain_syntax_statistics/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery data_01247 (children 2)" + }, + { + "explain": " Identifier data_01247" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function Memory (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001766126, + "rows_read": 5, + "bytes_read": 176 + } +} diff --git a/parser/testdata/03289_explain_syntax_statistics/metadata.json b/parser/testdata/03289_explain_syntax_statistics/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03289_explain_syntax_statistics/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03289_explain_syntax_statistics/query.sql b/parser/testdata/03289_explain_syntax_statistics/query.sql new file mode 100644 index 000000000..a916040c4 --- /dev/null +++ b/parser/testdata/03289_explain_syntax_statistics/query.sql @@ -0,0 +1,7 @@ +create table data_01247 as system.numbers engine=Memory(); +insert into data_01247 select * from system.numbers limit 2; +create table dist_01247 as data_01247 engine=Distributed(test_cluster_two_shards, currentDatabase(), data_01247, number); + +set allow_statistics_optimize = 1; + +EXPLAIN SYNTAX SELECT 'Get hierarchy', toNullable(13), count() IGNORE NULLS FROM dist_01247 GROUP BY number WITH CUBE SETTINGS distributed_group_by_no_merge = 3 FORMAT Null; diff --git a/parser/testdata/03289_tuple_element_to_subcolumn/ast.json b/parser/testdata/03289_tuple_element_to_subcolumn/ast.json new file mode 100644 index 000000000..ee3d1a39c --- /dev/null +++ b/parser/testdata/03289_tuple_element_to_subcolumn/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_tuple_elem (children 1)" + }, + { + "explain": " Identifier t_tuple_elem" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001172024, + "rows_read": 2, + "bytes_read": 76 + } +} diff --git a/parser/testdata/03289_tuple_element_to_subcolumn/metadata.json b/parser/testdata/03289_tuple_element_to_subcolumn/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03289_tuple_element_to_subcolumn/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03289_tuple_element_to_subcolumn/query.sql b/parser/testdata/03289_tuple_element_to_subcolumn/query.sql new file mode 100644 index 000000000..776b6ce8b --- /dev/null +++ b/parser/testdata/03289_tuple_element_to_subcolumn/query.sql @@ -0,0 +1,24 @@ +DROP TABLE IF EXISTS t_tuple_elem; + +SET enable_variant_type = 1; + +CREATE TABLE t_tuple_elem ( + t1 Tuple( + a Array(UInt64), + b Array(LowCardinality(String))), + v Variant( + Array(UInt64), + Array(LowCardinality(String))) +) +ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO t_tuple_elem VALUES (([1, 2], ['a', 'b', 'c']), [3, 4]); +INSERT INTO t_tuple_elem VALUES (([3, 4], ['d', 'e']), ['d', 'e']); + +SET optimize_functions_to_subcolumns = 1; + +SELECT (tupleElement(t1, 1), tupleElement(t1, 2)) FROM t_tuple_elem ORDER BY ALL; +SELECT (tupleElement(t1, 'a'), tupleElement(t1, 'b')) FROM t_tuple_elem ORDER BY ALL; +SELECT (variantElement(v, 'Array(UInt64)'), variantElement(v, 'Array(LowCardinality(String))')) FROM t_tuple_elem ORDER BY ALL; + +DROP TABLE t_tuple_elem; diff --git a/parser/testdata/03290_dictionary_assert_on_function/ast.json b/parser/testdata/03290_dictionary_assert_on_function/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03290_dictionary_assert_on_function/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03290_dictionary_assert_on_function/metadata.json b/parser/testdata/03290_dictionary_assert_on_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03290_dictionary_assert_on_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03290_dictionary_assert_on_function/query.sql b/parser/testdata/03290_dictionary_assert_on_function/query.sql new file mode 100644 index 000000000..83028e915 --- /dev/null +++ b/parser/testdata/03290_dictionary_assert_on_function/query.sql @@ -0,0 +1,15 @@ +CREATE DICTIONARY default.currency_conversion_dict +( + `a` String, + `b` Decimal(18, 8) +) +PRIMARY KEY a +SOURCE(CLICKHOUSE( + TABLE '' + STRUCTURE ( + a String + b Decimal(18, 8) + ) +)) +LIFETIME (MIN 0 MAX 3600) +LAYOUT (FLAT()); -- {serverError INCORRECT_DICTIONARY_DEFINITION} diff --git a/parser/testdata/03290_final_collapsing/ast.json b/parser/testdata/03290_final_collapsing/ast.json new file mode 100644 index 000000000..c3c54f6be --- /dev/null +++ b/parser/testdata/03290_final_collapsing/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_final_collapsing (children 1)" + }, + { + "explain": " Identifier t_final_collapsing" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001476953, + "rows_read": 2, + "bytes_read": 88 + } +} diff --git a/parser/testdata/03290_final_collapsing/metadata.json b/parser/testdata/03290_final_collapsing/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03290_final_collapsing/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03290_final_collapsing/query.sql b/parser/testdata/03290_final_collapsing/query.sql new file mode 100644 index 000000000..b73c8441c --- /dev/null +++ b/parser/testdata/03290_final_collapsing/query.sql @@ -0,0 +1,52 @@ +DROP TABLE IF EXISTS t_final_collapsing; + +CREATE TABLE t_final_collapsing +( + key Int8, + sign Int8 +) +ENGINE = CollapsingMergeTree(sign) ORDER BY key; + +INSERT INTO t_final_collapsing VALUES (5, -1); + +OPTIMIZE TABLE t_final_collapsing FINAL; -- to move part to a level 1, to enable optimizations + +SET split_parts_ranges_into_intersecting_and_non_intersecting_final = 0, split_intersecting_parts_ranges_into_layers_final = 0; +SELECT count() FROM t_final_collapsing FINAL; + +SET split_parts_ranges_into_intersecting_and_non_intersecting_final = 0, split_intersecting_parts_ranges_into_layers_final = 1; +SELECT count() FROM t_final_collapsing FINAL; + +SET split_parts_ranges_into_intersecting_and_non_intersecting_final = 1, split_intersecting_parts_ranges_into_layers_final = 0; +SELECT count() FROM t_final_collapsing FINAL; + +SET split_parts_ranges_into_intersecting_and_non_intersecting_final = 1, split_intersecting_parts_ranges_into_layers_final = 1; +SELECT count() FROM t_final_collapsing FINAL; + +DROP TABLE t_final_collapsing; + +CREATE TABLE t_final_collapsing +( + key Int8, + sign Int8, + version UInt64 +) +ENGINE = VersionedCollapsingMergeTree(sign, version) ORDER BY key; + +INSERT INTO t_final_collapsing VALUES (5, -1, 1); + +OPTIMIZE TABLE t_final_collapsing FINAL; -- to move part to a level 1, to enable optimizations + +SET split_parts_ranges_into_intersecting_and_non_intersecting_final = 0, split_intersecting_parts_ranges_into_layers_final = 0; +SELECT count() FROM t_final_collapsing FINAL; + +SET split_parts_ranges_into_intersecting_and_non_intersecting_final = 0, split_intersecting_parts_ranges_into_layers_final = 1; +SELECT count() FROM t_final_collapsing FINAL; + +SET split_parts_ranges_into_intersecting_and_non_intersecting_final = 1, split_intersecting_parts_ranges_into_layers_final = 0; +SELECT count() FROM t_final_collapsing FINAL; + +SET split_parts_ranges_into_intersecting_and_non_intersecting_final = 1, split_intersecting_parts_ranges_into_layers_final = 1; +SELECT count() FROM t_final_collapsing FINAL; + +DROP TABLE t_final_collapsing; diff --git a/parser/testdata/03290_final_replacing/ast.json b/parser/testdata/03290_final_replacing/ast.json new file mode 100644 index 000000000..5cca0bb41 --- /dev/null +++ b/parser/testdata/03290_final_replacing/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_final_replacing (children 1)" + }, + { + "explain": " Identifier t_final_replacing" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001486395, + "rows_read": 2, + "bytes_read": 86 + } +} diff --git a/parser/testdata/03290_final_replacing/metadata.json b/parser/testdata/03290_final_replacing/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03290_final_replacing/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03290_final_replacing/query.sql b/parser/testdata/03290_final_replacing/query.sql new file mode 100644 index 000000000..dd18a0290 --- /dev/null +++ b/parser/testdata/03290_final_replacing/query.sql @@ -0,0 +1,24 @@ +DROP TABLE IF EXISTS t_final_replacing; + +CREATE TABLE t_final_replacing (a UInt64, b UInt64) ENGINE = ReplacingMergeTree ORDER BY a SETTINGS index_granularity = 1; + +INSERT INTO t_final_replacing VALUES (1, 1) (1, 2) (2, 3); +INSERT INTO t_final_replacing VALUES (2, 3) (5, 4); + +OPTIMIZE TABLE t_final_replacing FINAL; + +SET optimize_read_in_order = 0; + +SET split_parts_ranges_into_intersecting_and_non_intersecting_final = 0, split_intersecting_parts_ranges_into_layers_final = 0; +SELECT a, b FROM t_final_replacing FINAL ORDER BY a, b; + +SET split_parts_ranges_into_intersecting_and_non_intersecting_final = 0, split_intersecting_parts_ranges_into_layers_final = 1; +SELECT a, b FROM t_final_replacing FINAL ORDER BY a, b; + +SET split_parts_ranges_into_intersecting_and_non_intersecting_final = 1, split_intersecting_parts_ranges_into_layers_final = 0; +SELECT a, b FROM t_final_replacing FINAL ORDER BY a, b; + +SET split_parts_ranges_into_intersecting_and_non_intersecting_final = 1, split_intersecting_parts_ranges_into_layers_final = 1; +SELECT a, b FROM t_final_replacing FINAL ORDER BY a, b; + +DROP TABLE t_final_replacing; diff --git a/parser/testdata/03290_final_sample/ast.json b/parser/testdata/03290_final_sample/ast.json new file mode 100644 index 000000000..402f50eb3 --- /dev/null +++ b/parser/testdata/03290_final_sample/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_sample_final (children 1)" + }, + { + "explain": " Identifier t_sample_final" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001148208, + "rows_read": 2, + "bytes_read": 80 + } +} diff --git a/parser/testdata/03290_final_sample/metadata.json b/parser/testdata/03290_final_sample/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03290_final_sample/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03290_final_sample/query.sql b/parser/testdata/03290_final_sample/query.sql new file mode 100644 index 000000000..9fde622ec --- /dev/null +++ b/parser/testdata/03290_final_sample/query.sql @@ -0,0 +1,44 @@ +DROP TABLE IF EXISTS t_sample_final; + +CREATE TABLE t_sample_final +( + CounterID UInt32, + UserID UInt64, + Version UInt64 +) +ENGINE = ReplacingMergeTree(Version) +ORDER BY (CounterID, intHash32(UserID)) +SAMPLE BY intHash32(UserID) +SETTINGS index_granularity = 8192; + +INSERT INTO t_sample_final SELECT * FROM generateRandom('CounterID UInt32, UserID UInt64, Version UInt64', 1, 10, 2) LIMIT 10; + +OPTIMIZE TABLE t_sample_final FINAL; + +set send_logs_level = 'error'; + +SET split_parts_ranges_into_intersecting_and_non_intersecting_final = 0, split_intersecting_parts_ranges_into_layers_final = 0; +SELECT count() FROM t_sample_final FINAL SAMPLE 1 / 2 WHERE NOT ignore(CounterID); + +SET split_parts_ranges_into_intersecting_and_non_intersecting_final = 0, split_intersecting_parts_ranges_into_layers_final = 1; +SELECT count() FROM t_sample_final FINAL SAMPLE 1 / 2 WHERE NOT ignore(CounterID); + +SET split_parts_ranges_into_intersecting_and_non_intersecting_final = 1, split_intersecting_parts_ranges_into_layers_final = 0; +SELECT count() FROM t_sample_final FINAL SAMPLE 1 / 2 WHERE NOT ignore(CounterID); + +SET split_parts_ranges_into_intersecting_and_non_intersecting_final = 1, split_intersecting_parts_ranges_into_layers_final = 1; +SELECT count() FROM t_sample_final FINAL SAMPLE 1 / 2 WHERE NOT ignore(CounterID); + +SET split_parts_ranges_into_intersecting_and_non_intersecting_final = 0, split_intersecting_parts_ranges_into_layers_final = 0; +SELECT count() FROM t_sample_final FINAL SAMPLE 1 / 2 WHERE NOT ignore(*); + +SET split_parts_ranges_into_intersecting_and_non_intersecting_final = 0, split_intersecting_parts_ranges_into_layers_final = 1; +SELECT count() FROM t_sample_final FINAL SAMPLE 1 / 2 WHERE NOT ignore(*); + +SET split_parts_ranges_into_intersecting_and_non_intersecting_final = 1, split_intersecting_parts_ranges_into_layers_final = 0; +SELECT count() FROM t_sample_final FINAL SAMPLE 1 / 2 WHERE NOT ignore(*); + +SET split_parts_ranges_into_intersecting_and_non_intersecting_final = 1, split_intersecting_parts_ranges_into_layers_final = 1; +SELECT count() FROM t_sample_final FINAL SAMPLE 1 / 2 WHERE NOT ignore(*); + +DROP TABLE t_sample_final; diff --git a/parser/testdata/03290_force_normal_projection/ast.json b/parser/testdata/03290_force_normal_projection/ast.json new file mode 100644 index 000000000..c8a9f7dbb --- /dev/null +++ b/parser/testdata/03290_force_normal_projection/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery users (children 1)" + }, + { + "explain": " Identifier users" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001414019, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/03290_force_normal_projection/metadata.json b/parser/testdata/03290_force_normal_projection/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03290_force_normal_projection/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03290_force_normal_projection/query.sql b/parser/testdata/03290_force_normal_projection/query.sql new file mode 100644 index 000000000..d1b84c3dc --- /dev/null +++ b/parser/testdata/03290_force_normal_projection/query.sql @@ -0,0 +1,30 @@ +DROP TABLE IF EXISTS users; + +CREATE TABLE users +( + `uid` Int16, + `name` String, + `age` Int16, + PROJECTION p1 + ( + SELECT + name, + uid + ORDER BY age + ) +) +ENGINE = MergeTree +ORDER BY uid; + +INSERT INTO users VALUES (1231, 'John', 33), (1232, 'Mary', 34); + +set parallel_replicas_local_plan = 1, parallel_replicas_support_projection = 1, optimize_aggregation_in_order = 0; + +SELECT + name, + uid +FROM users +ORDER BY age ASC +SETTINGS optimize_use_projections = 1, force_optimize_projection = 1; + +DROP TABLE users; diff --git a/parser/testdata/03290_formatReadable_other_numeric_types/ast.json b/parser/testdata/03290_formatReadable_other_numeric_types/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03290_formatReadable_other_numeric_types/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03290_formatReadable_other_numeric_types/metadata.json b/parser/testdata/03290_formatReadable_other_numeric_types/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03290_formatReadable_other_numeric_types/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03290_formatReadable_other_numeric_types/query.sql b/parser/testdata/03290_formatReadable_other_numeric_types/query.sql new file mode 100644 index 000000000..b7ca1dd68 --- /dev/null +++ b/parser/testdata/03290_formatReadable_other_numeric_types/query.sql @@ -0,0 +1,59 @@ +SELECT + 'Int128', + formatReadableDecimalSize(number), + formatReadableSize(number), + formatReadableQuantity(number), + formatReadableTimeDelta(number) +FROM (SELECT number::Int128 AS number FROM numbers(2)); + +SELECT + 'Int128', + formatReadableDecimalSize(number), + formatReadableSize(number), + formatReadableQuantity(number), + formatReadableTimeDelta(number) +FROM (SELECT number::UInt256 AS number FROM numbers(2)); + +SELECT + 'Decimal32', + formatReadableDecimalSize(number), + formatReadableSize(number), + formatReadableQuantity(number), + formatReadableTimeDelta(number) +FROM (SELECT number::Decimal32(2) AS number FROM numbers(2)); + +SELECT + 'Decimal256', + formatReadableDecimalSize(number), + formatReadableSize(number), + formatReadableQuantity(number), + formatReadableTimeDelta(number) +FROM (SELECT number::Decimal256(2) AS number FROM numbers(2)); + +SELECT + 'BFloat16', + formatReadableDecimalSize(number), + formatReadableSize(number), + formatReadableQuantity(number), + formatReadableTimeDelta(number) +FROM (SELECT number AS number FROM numbers(2)); + +SELECT formatReadableDecimalSize(number::Date) FROM numbers(2); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT formatReadableSize(number::Date) FROM numbers(2); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT formatReadableQuantity(number::Date) FROM numbers(2); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT formatReadableTimeDelta(number::Date) FROM numbers(2); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT formatReadableDecimalSize(number::Date32) FROM numbers(2); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT formatReadableSize(number::Date32) FROM numbers(2); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT formatReadableQuantity(number::Date32) FROM numbers(2); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT formatReadableTimeDelta(number::Date32) FROM numbers(2); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT formatReadableDecimalSize(number::DateTime) FROM numbers(2); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT formatReadableSize(number::DateTime) FROM numbers(2); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT formatReadableQuantity(number::DateTime) FROM numbers(2); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT formatReadableTimeDelta(number::DateTime) FROM numbers(2); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT formatReadableDecimalSize(number::DateTime64(3)) FROM numbers(2); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT formatReadableSize(number::DateTime64(3)) FROM numbers(2); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT formatReadableQuantity(number::DateTime64(3)) FROM numbers(2); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT formatReadableTimeDelta(number::DateTime64(3)) FROM numbers(2); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } diff --git a/parser/testdata/03290_limit_by_segv/ast.json b/parser/testdata/03290_limit_by_segv/ast.json new file mode 100644 index 000000000..d73a37871 --- /dev/null +++ b/parser/testdata/03290_limit_by_segv/ast.json @@ -0,0 +1,85 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " TablesInSelectQuery (children 2)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (alias t0) (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1 (alias c0)" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnsRegexpMatcher" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " ArrayJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier c0" + } + ], + + "rows": 21, + + "statistics": + { + "elapsed": 0.001208341, + "rows_read": 21, + "bytes_read": 854 + } +} diff --git a/parser/testdata/03290_limit_by_segv/metadata.json b/parser/testdata/03290_limit_by_segv/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03290_limit_by_segv/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03290_limit_by_segv/query.sql b/parser/testdata/03290_limit_by_segv/query.sql new file mode 100644 index 000000000..23d969ec0 --- /dev/null +++ b/parser/testdata/03290_limit_by_segv/query.sql @@ -0,0 +1 @@ +SELECT 1 FROM (SELECT 1 AS c0 LIMIT 0 BY COLUMNS('1')) t0 ARRAY JOIN c0; -- { serverError TYPE_MISMATCH } \ No newline at end of file diff --git a/parser/testdata/03290_mix_engine_and_query_settings/ast.json b/parser/testdata/03290_mix_engine_and_query_settings/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03290_mix_engine_and_query_settings/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03290_mix_engine_and_query_settings/metadata.json b/parser/testdata/03290_mix_engine_and_query_settings/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03290_mix_engine_and_query_settings/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03290_mix_engine_and_query_settings/query.sql b/parser/testdata/03290_mix_engine_and_query_settings/query.sql new file mode 100644 index 000000000..f9ffb3691 --- /dev/null +++ b/parser/testdata/03290_mix_engine_and_query_settings/query.sql @@ -0,0 +1,48 @@ +-- Tags: memory-engine +SET enable_json_type = 0; + +DROP TABLE IF EXISTS example_mt; +CREATE TABLE example_mt +( + `id` UInt32, + `data` LowCardinality(UInt8) +) +ENGINE = MergeTree() +ORDER BY id +SETTINGS async_insert = 1, allow_suspicious_low_cardinality_types = 1; +SHOW CREATE TABLE example_mt; +DROP TABLE IF EXISTS example_mt; + +DROP TABLE IF EXISTS example_memory; +CREATE TABLE example_memory +( + `id` UInt64, + `data` LowCardinality(UInt8) +) +ENGINE = Memory +SETTINGS max_rows_to_keep = 100, allow_suspicious_low_cardinality_types = 1; +SHOW CREATE TABLE example_memory; +DROP TABLE IF EXISTS example_memory; + + +DROP TABLE IF EXISTS example_set; +CREATE TABLE example_set +( + `id` UInt64, + `data` LowCardinality(UInt8) +) +ENGINE = Set +SETTINGS persistent = 1, allow_suspicious_low_cardinality_types = 1; +SHOW CREATE TABLE example_set; +DROP TABLE IF EXISTS example_set; + +DROP TABLE IF EXISTS example_join; +CREATE TABLE example_join +( + `id` UInt64, + `data` LowCardinality(UInt8) +) +ENGINE = Join(ANY, LEFT, id) +SETTINGS persistent = 1, allow_suspicious_low_cardinality_types = 1; +SHOW CREATE TABLE example_join; +DROP TABLE IF EXISTS example_join; diff --git a/parser/testdata/03290_partial_arrayROCAUC_and_arrayAUCPR/ast.json b/parser/testdata/03290_partial_arrayROCAUC_and_arrayAUCPR/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03290_partial_arrayROCAUC_and_arrayAUCPR/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03290_partial_arrayROCAUC_and_arrayAUCPR/metadata.json b/parser/testdata/03290_partial_arrayROCAUC_and_arrayAUCPR/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03290_partial_arrayROCAUC_and_arrayAUCPR/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03290_partial_arrayROCAUC_and_arrayAUCPR/query.sql b/parser/testdata/03290_partial_arrayROCAUC_and_arrayAUCPR/query.sql new file mode 100644 index 000000000..b9335f593 --- /dev/null +++ b/parser/testdata/03290_partial_arrayROCAUC_and_arrayAUCPR/query.sql @@ -0,0 +1,104 @@ + +-- CASE 1 +-- scores = [0.1, 0.4, 0.35, 0.8] +-- labels = [0, 0, 1, 1] + +select + floor(arrayAUCPR([0.1, 0.4, 0.35, 0.8], [0, 0, 1, 1]), 10), + floor(arrayROCAUC([0.1, 0.4, 0.35, 0.8], [0, 0, 1, 1]), 10); + +with partial_aucs as ( + select + arrayAUCPR(scores, labels, pr_offsets) as partial_pr_auc, + arrayROCAUC(scores, labels, true, roc_offsets) as partial_roc_auc + from ( + select [0.8] as scores, [1] as labels, [0, 0, 2] as pr_offsets, [0, 0, 2, 2] as roc_offsets + UNION ALL + select [0.4] as scores, [0] as labels, [1, 0, 2] as pr_offsets, [1, 0, 2, 2] as roc_offsets + UNION ALL + select [0.35] as scores, [1] as labels, [1, 1, 2] as pr_offsets, [1, 1, 2, 2] as roc_offsets + UNION ALL + select [0.1] as scores, [0] as labels, [2, 1, 2] as pr_offsets, [2, 1, 2, 2] as roc_offsets + ) +) +select + floor(sum(partial_pr_auc), 10), + floor(sum(partial_roc_auc), 10) +from partial_aucs; + +with partial_aucs as ( + select + arrayAUCPR(scores, labels, pr_offsets) as partial_pr_auc, + arrayROCAUC(scores, labels, true, roc_offsets) as partial_roc_auc + from ( + select [0.8, 0.4] as scores, [1, 0] as labels, [0, 0, 2] as pr_offsets, [0, 0, 2, 2] as roc_offsets + UNION ALL + select [0.35, 0.1] as scores, [1, 0] as labels, [1, 1, 2] as pr_offsets, [1, 1, 2, 2] as roc_offsets + ) +) +select + floor(sum(partial_pr_auc), 10), + floor(sum(partial_roc_auc), 10) +from partial_aucs; + +-- CASE 2 +-- scores = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] +-- labels = [1, 0, 1, 0, 0, 0, 1, 0, 0, 1] + +select + floor(arrayAUCPR([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 0, 1, 0, 0, 0, 1, 0, 0, 1]), 10), + floor(arrayROCAUC([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 0, 1, 0, 0, 0, 1, 0, 0, 1]), 10); + +-- Example of a more robust query that can be used to calculate AUC for a large dataset +WITH score_with_group AS ( + SELECT + scores[idx] AS score, + labels[idx] AS label, + FLOOR(score / 3) as group + FROM + (SELECT + range(1, 11) AS idx, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] AS scores, + [1, 0, 1, 0, 0, 0, 1, 0, 0, 1] AS labels) + ARRAY JOIN idx +), +grouped_scores AS ( + SELECT + group, + groupArrayArray(array(score)) as scores, + groupArrayArray(array(label)) as labels, + countIf(label > 0) as group_tp, + countIf(label = 0) as group_fp, + COALESCE (SUM(group_tp) OVER (ORDER BY group DESC ROWS BETWEEN UNBOUNDED PRECEDING AND 1 PRECEDING), 0) AS prev_group_tp, + COALESCE (SUM(group_fp) OVER (ORDER BY group DESC ROWS BETWEEN UNBOUNDED PRECEDING AND 1 PRECEDING), 0) AS prev_group_fp + FROM score_with_group + GROUP BY group +), +partial_aucs AS ( + SELECT + arrayAUCPR( + scores, + labels, + [ + COALESCE(prev_group_tp, 0), + COALESCE(prev_group_fp, 0), + COALESCE(SUM(group_tp) OVER(), 0) + ] + ) as partial_pr_auc, + arrayROCAUC( + scores, + labels, + true, + [ + COALESCE(prev_group_tp, 0), + COALESCE(prev_group_fp, 0), + COALESCE(SUM(group_tp) OVER (), 0), + COALESCE(SUM(group_fp) OVER (), 0) + ] + ) as partial_roc_auc + FROM grouped_scores +) +SELECT + floor(sum(partial_pr_auc), 10) as pr_auc, + floor(sum(partial_roc_auc), 10) as roc_auc +FROM partial_aucs diff --git a/parser/testdata/03290_pr_non_replicated_in_subquery/ast.json b/parser/testdata/03290_pr_non_replicated_in_subquery/ast.json new file mode 100644 index 000000000..6eee8ca6a --- /dev/null +++ b/parser/testdata/03290_pr_non_replicated_in_subquery/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery table1 (children 1)" + }, + { + "explain": " Identifier table1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001238103, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/03290_pr_non_replicated_in_subquery/metadata.json b/parser/testdata/03290_pr_non_replicated_in_subquery/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03290_pr_non_replicated_in_subquery/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03290_pr_non_replicated_in_subquery/query.sql b/parser/testdata/03290_pr_non_replicated_in_subquery/query.sql new file mode 100644 index 000000000..105f5c41f --- /dev/null +++ b/parser/testdata/03290_pr_non_replicated_in_subquery/query.sql @@ -0,0 +1,21 @@ +DROP TABLE IF EXISTS table1; +CREATE TABLE table1 (number UInt64) ENGINE=MergeTree ORDER BY number; +INSERT INTO table1 SELECT number FROM numbers(300); + +SELECT count() +FROM +( + SELECT * + FROM table1 +); + +-- check that parallel_replicas_for_non_replicated_merge_tree(off by default) is respected in subquery +SELECT count() +FROM +( + SELECT * + FROM table1 +) +SETTINGS cluster_for_parallel_replicas = 'parallel_replicas', enable_parallel_replicas = 1, max_parallel_replicas = 2; + +DROP TABLE table1; diff --git a/parser/testdata/03291_collapsing_invalid_sign/ast.json b/parser/testdata/03291_collapsing_invalid_sign/ast.json new file mode 100644 index 000000000..6a685933b --- /dev/null +++ b/parser/testdata/03291_collapsing_invalid_sign/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000957612, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03291_collapsing_invalid_sign/metadata.json b/parser/testdata/03291_collapsing_invalid_sign/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03291_collapsing_invalid_sign/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03291_collapsing_invalid_sign/query.sql b/parser/testdata/03291_collapsing_invalid_sign/query.sql new file mode 100644 index 000000000..314315c89 --- /dev/null +++ b/parser/testdata/03291_collapsing_invalid_sign/query.sql @@ -0,0 +1,71 @@ +SET send_logs_level = 'fatal'; +SET allow_suspicious_primary_key = 1; + +DROP TABLE IF EXISTS t_03291_collapsing_invalid_sign; + +CREATE TABLE t_03291_collapsing_invalid_sign +( + x int, + sign Int8 +) +ENGINE = CollapsingMergeTree(sign) +ORDER BY x; + +INSERT INTO t_03291_collapsing_invalid_sign VALUES (1, 1), (1, -1), (1, 2), (1, 1); -- { serverError INCORRECT_DATA } + +SET optimize_on_insert = 0; + +INSERT INTO t_03291_collapsing_invalid_sign VALUES (1, 1), (1, -1), (1, 2), (1, 1); + +-- Invalid sign does not show up in result of SELECT ... FINAL +SELECT * FROM t_03291_collapsing_invalid_sign FINAL; + +-- But it is still there +SELECT * FROM t_03291_collapsing_invalid_sign WHERE sign = 2; + +DROP TABLE t_03291_collapsing_invalid_sign; + +DROP TABLE IF EXISTS t0; + +-- From https://github.com/ClickHouse/ClickHouse/issues/70964 +CREATE TABLE t0 (c0 Int64, c1 Int8) ENGINE = CollapsingMergeTree(c1) ORDER BY tuple(); +INSERT INTO TABLE t0 (c0, c1) VALUES (3113015407243198120, 1), (-5920792414778679906, 1), (-5884299248894554057, -1), (-5350317157177243253, 1); +INSERT INTO TABLE t0 (c0, c1) VALUES (6441560602946720835, 1), (214113373379542270, 1), (3450274783568693535, 1), (1229915363377101410, 1), (NULL, 1), (-3790756411322064052, 1), (-6476097574609257986, -1), (8340200647406930909, 1), (901374356156240451, 1), (-7937301880510480569, -1), (2718049489997799810, 1), (-7046070647647655389, 1), (NULL, 1), (NULL, -1), (539551042235977697, 1), (-6680126182402620465, -1), (-545071884260589694, 1), (-1609843843077275108, -1), (-2307499311918146032, 1), (-5800479665755413671, -1), (3122130530598217471, -1), (-4898541304738768940, -1), (1800869867489008348, -1), (6828243795825354769, 1), (4633856863838964429, 1), (5387774862927778951, -1), (71439926420223535, DEFAULT), (-291862315794351910, -1), (5204497349871065609, 1), (2815893477348202253, 1), (-2025994286668060087, -1), (1292848446755680198, 1), (2319903341156302022, -1), (5957514701048533074, -1), (1640080969736049288, 1), (-3213144610890525779, 1), (3054901382518697286, 1), (-6476734360473598468, 1), (-4863659638853829529, -1), (-8396840173526540909, -1), (9064411074374728617, 1); +INSERT INTO TABLE t0 (c0, c1) VALUES (-7330260992317873624, -1), (-3940238436997132320, -1), (2067369422305733562, 1), (-2941185840448845891, -1), (-6558865023141323556, -1), (-5421824708847807728, 1), (NULL, 1), (312212090923478057, 1), (-6725679606605613683, -1), (1446537700060712436, -1), (8173868172223781772, 1), (-1681206259557367358, -1), (-4906346581586950660, 1), (4365639853271319101, -1), (NULL, 1), (-4789376951758674727, -1); +INSERT INTO TABLE t0 (c1, c0) VALUES (1, 8937762244024852571), (-1, 8950824945935547949), (1, -1959904094995983529), (-1, -409312721222347063), (-1, 7342321349808831741), (1, -8193843876476887514), (-1, 8137088644401983521), (1, -1958965057666766522), (-1, 3954120584133380877), (1, 365814659508092868), (-1, -3483686915076827837), (1, -2852016893620489384), (-1, -2559434526558052722), (1, -1393500976668128115), (-1, 3969045642355663450), (-1, -2350556058112780554), (1, -8338109442121810335); +INSERT INTO TABLE t0 (c1, c0) VALUES (1, -4892066199024209089), (-1, -5491182770048413687), (-1, -4018923873457626147), (1, 675764533488331690), (-1, 8420772111929760862), (1, -5937640180539735892), (-1, -4626474415721923630), (-1, 6769798025186732866), (-1, 5345456016261312976), (1, 1710841889478637274), (-1, -1266913271723499160), (-1, 1103743554745717673), (-1, NULL), (1, -2597305165640059095), (1, -6912729702612168562), (-1, -7186477069505009142), (1, 821404772064670938), (-1, -5463163374280298685), (1, 1394078067264958090), (-1, 1036875756161173151); +INSERT INTO TABLE t0 (c1, c0) VALUES (-1, 8482902059126745084), (-1, -7937110385117446797), (1, NULL), (1, -8431229851988407072), (1, 759760791731484731), (1, NULL), (1, -6184256087929051233), (1, -7330483878500565160), (-1, -7087214691055696530), (-1, 1889179400346464951), (-1, 6710360786486282081), (-1, NULL), (1, NULL), (-1, 13275412783444588), (-1, 479269188324861296), (1, -1081506768672197531), (-1, 7649822949489003927), (1, 8565342891848886719), (-1, -2415400797217663574), (1, NULL), (1, 6434701107584272949), (1, -5975572166119718758), (-1, 8053866671492250673), (-1, -1085746824172150211), (1, -1416598885201198302), (-1, 402267973706390353), (1, 6653089490664761997), (-1, 4832610248880324173), (-1, -3273236545927562170), (1, 6947739671971480893), (1, 5807108610187298768), (1, 2036765517915890303), (-1, 1913625072931392084), (1, 5149177015152753379), (-1, NULL), (-1, -217515546249929827), (-1, 1405490038821945609), (1, -4437589030189839929), (-1, 493190889810948579), (1, NULL), (1, -7914077003684076929); + +-- Should only show the last inserted row with valid sign +SELECT * FROM t0 FINAL; + +DELETE FROM t0 WHERE TRUE; + +SELECT * FROM t0; + +DROP TABLE t0; + +-- CI found a LOGICAL_ERROR during vertical merge, testing for it here +DROP TABLE IF EXISTS t_03291_collapsing_invalid_sign_vertical_merge; + +CREATE TABLE t_03291_collapsing_invalid_sign_vertical_merge (c0 Int64, c1 Int8) ENGINE = CollapsingMergeTree(c1) ORDER BY tuple() +SETTINGS enable_vertical_merge_algorithm = 1, vertical_merge_algorithm_min_rows_to_activate = 0, vertical_merge_algorithm_min_bytes_to_activate = 0, vertical_merge_algorithm_min_columns_to_activate = 0, min_bytes_for_wide_part = 0; + +INSERT INTO TABLE t_03291_collapsing_invalid_sign_vertical_merge VALUES (1, 0); + +INSERT INTO TABLE t_03291_collapsing_invalid_sign_vertical_merge VALUES (1, 0); + +OPTIMIZE TABLE t_03291_collapsing_invalid_sign_vertical_merge; + +DROP TABLE t_03291_collapsing_invalid_sign_vertical_merge; + +-- CI found a SEGV, testing for it here: https://github.com/ClickHouse/ClickHouse/issues/74219 +DROP TABLE IF EXISTS t0; + +CREATE TABLE t0 (c0 Int8, c1 Int) ENGINE = CollapsingMergeTree(c0) ORDER BY (c1); + +INSERT INTO TABLE t0 (c1, c0) VALUES (69938204, 1), (-968049277, 1), (-1302413209, -1), (1059244139, DEFAULT); + +SELECT 1 FROM t0 FINAL; + +DROP TABLE t0; diff --git a/parser/testdata/03291_json_big_structure_deserialization/ast.json b/parser/testdata/03291_json_big_structure_deserialization/ast.json new file mode 100644 index 000000000..df0dfffd2 --- /dev/null +++ b/parser/testdata/03291_json_big_structure_deserialization/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001182907, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03291_json_big_structure_deserialization/metadata.json b/parser/testdata/03291_json_big_structure_deserialization/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03291_json_big_structure_deserialization/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03291_json_big_structure_deserialization/query.sql b/parser/testdata/03291_json_big_structure_deserialization/query.sql new file mode 100644 index 000000000..8a83388a6 --- /dev/null +++ b/parser/testdata/03291_json_big_structure_deserialization/query.sql @@ -0,0 +1,13 @@ +set enable_json_type=1; + +create table test (json JSON(max_dynamic_paths=0)) engine=MergeTree order by tuple() settings max_compress_block_size = 128, marks_compress_block_size=128, min_rows_for_wide_part = 1, min_bytes_for_wide_part = 1, index_granularity = 8192, replace_long_file_name_to_hash=1; +insert into test select toJSONString(map(repeat('a' || number, 5000), 42)) from numbers(10000); + +set max_threads=1; +set enable_filesystem_cache=0; +set max_parallel_replicas=1; +set remote_filesystem_read_method='read'; +set remote_filesystem_read_prefetch=0; + +select json.a from test format Null; +drop table test; diff --git a/parser/testdata/03291_low_cardinality_uuid/ast.json b/parser/testdata/03291_low_cardinality_uuid/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03291_low_cardinality_uuid/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03291_low_cardinality_uuid/metadata.json b/parser/testdata/03291_low_cardinality_uuid/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03291_low_cardinality_uuid/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03291_low_cardinality_uuid/query.sql b/parser/testdata/03291_low_cardinality_uuid/query.sql new file mode 100644 index 000000000..bb7f22a91 --- /dev/null +++ b/parser/testdata/03291_low_cardinality_uuid/query.sql @@ -0,0 +1,6 @@ +-- creating this data type is allowed by default: +CREATE TEMPORARY TABLE test (x LowCardinality(UUID)); + +-- you can use it: +INSERT INTO test VALUES ('e1b005c0-b947-4893-be97-c9390d0aa583'); +SELECT * FROM test; diff --git a/parser/testdata/03292_format_tty_friendly/ast.json b/parser/testdata/03292_format_tty_friendly/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03292_format_tty_friendly/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03292_format_tty_friendly/metadata.json b/parser/testdata/03292_format_tty_friendly/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03292_format_tty_friendly/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03292_format_tty_friendly/query.sql b/parser/testdata/03292_format_tty_friendly/query.sql new file mode 100644 index 000000000..76e0aa70e --- /dev/null +++ b/parser/testdata/03292_format_tty_friendly/query.sql @@ -0,0 +1,5 @@ +-- Tags: no-fasttest + +SELECT name, is_output, is_tty_friendly FROM system.formats WHERE name IN ('Pretty', 'TSV', 'JSON', 'JSONEachRow', +'ODBCDriver2', 'Parquet', 'Arrow', 'BSONEachRow', 'Protobuf', 'ProtobufList', 'ProtobufSingle', 'CapnProto', 'Npy', 'ArrowStream', 'ORC', 'MsgPack', 'Avro', 'RowBinary', 'RowBinaryWithNames', 'RowBinaryWithNamesAndTypes', 'Native', 'Buffers', 'MySQLWire', 'PostgreSQLWire') +ORDER BY name; diff --git a/parser/testdata/03292_nullable_json_schema_inference/ast.json b/parser/testdata/03292_nullable_json_schema_inference/ast.json new file mode 100644 index 000000000..e49f0f576 --- /dev/null +++ b/parser/testdata/03292_nullable_json_schema_inference/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00109424, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03292_nullable_json_schema_inference/metadata.json b/parser/testdata/03292_nullable_json_schema_inference/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03292_nullable_json_schema_inference/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03292_nullable_json_schema_inference/query.sql b/parser/testdata/03292_nullable_json_schema_inference/query.sql new file mode 100644 index 000000000..63ab51964 --- /dev/null +++ b/parser/testdata/03292_nullable_json_schema_inference/query.sql @@ -0,0 +1,4 @@ +set enable_json_type = 1; +set schema_inference_make_json_columns_nullable = 1; +select JSONAllPathsWithTypes(materialize('{"a" : [{"b" : 42}]}')::JSON); +select getSubcolumn(materialize('{"a" : [{"b" : 42}]}')::JSON, 'a.:`Array(Nullable(JSON(max_dynamic_types=16, max_dynamic_paths=256)))`'); diff --git a/parser/testdata/03293_forbid_cluster_table_engine/ast.json b/parser/testdata/03293_forbid_cluster_table_engine/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03293_forbid_cluster_table_engine/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03293_forbid_cluster_table_engine/metadata.json b/parser/testdata/03293_forbid_cluster_table_engine/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03293_forbid_cluster_table_engine/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03293_forbid_cluster_table_engine/query.sql b/parser/testdata/03293_forbid_cluster_table_engine/query.sql new file mode 100644 index 000000000..717ade978 --- /dev/null +++ b/parser/testdata/03293_forbid_cluster_table_engine/query.sql @@ -0,0 +1,4 @@ +-- Tags: no-fasttest +-- s3Cluster is not used in fast tests + +CREATE TABLE test AS s3Cluster('test_shard_localhost', 'http://localhost:11111/test/a.tsv', 'TSV', 'a Int32'); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/03296_bfloat16_ubsan/ast.json b/parser/testdata/03296_bfloat16_ubsan/ast.json new file mode 100644 index 000000000..50d1de23a --- /dev/null +++ b/parser/testdata/03296_bfloat16_ubsan/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001370571, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03296_bfloat16_ubsan/metadata.json b/parser/testdata/03296_bfloat16_ubsan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03296_bfloat16_ubsan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03296_bfloat16_ubsan/query.sql b/parser/testdata/03296_bfloat16_ubsan/query.sql new file mode 100644 index 000000000..d1cac3d8e --- /dev/null +++ b/parser/testdata/03296_bfloat16_ubsan/query.sql @@ -0,0 +1,2 @@ +SET allow_experimental_bfloat16_type = 1; +SELECT (65535::BFloat16)::Int16; -- The result is implementation defined on overflow. diff --git a/parser/testdata/03297_cut_column_name/ast.json b/parser/testdata/03297_cut_column_name/ast.json new file mode 100644 index 000000000..bcc759271 --- /dev/null +++ b/parser/testdata/03297_cut_column_name/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001094018, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03297_cut_column_name/metadata.json b/parser/testdata/03297_cut_column_name/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03297_cut_column_name/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03297_cut_column_name/query.sql b/parser/testdata/03297_cut_column_name/query.sql new file mode 100644 index 000000000..c5e5c9f5b --- /dev/null +++ b/parser/testdata/03297_cut_column_name/query.sql @@ -0,0 +1,68 @@ +SET output_format_pretty_color = 1, output_format_pretty_max_column_name_width_cut_to = 16; + +SELECT CAST((1, 'Hello') AS Tuple(a UInt64, b String)) AS `абвгдежзийклмнопрстуф`, 'Hello' AS x, 'World' AS "абвгдежзийклмнопрстуфхцчшщъыьэюя" FORMAT Pretty; +SELECT CAST((1, 'Hello') AS Tuple(a UInt64, b String)) AS `абвгдежзийклмнопрстуф`, 'Hello' AS x, 'World' AS "абвгдежзийклмнопрстуфхцчшщъыьэюя" FORMAT PrettyCompact; +SELECT CAST((1, 'Hello') AS Tuple(a UInt64, b String)) AS `абвгдежзийклмнопрстуф`, 'Hello' AS x, 'World' AS "абвгдежзийклмнопрстуфхцчшщъыьэюя" FORMAT PrettySpace; +SELECT CAST((1, 'Hello') AS Tuple(a UInt64, b String)) AS `абвгдежзийклмнопрстуф`, 'Hello' AS x, 'World' AS "абвгдежзийклмнопрстуфхцчшщъыьэюя" FORMAT Vertical; + +SELECT 1 AS "абвгдежзийклмнопрстуфхцчшщъыьэюя" FORMAT PrettyCompact; +SELECT 1 AS "абвгдежзийклмнопрстуфхцчшщъыьэю" FORMAT PrettyCompact; +SELECT 1 AS "абвгдежзийклмнопрстуфхцчшщъыьэ" FORMAT PrettyCompact; +SELECT 1 AS "абвгдежзийклмнопрстуфхцчшщъыь" FORMAT PrettyCompact; +SELECT 1 AS "абвгдежзийклмнопрстуфхцчшщъы" FORMAT PrettyCompact; +SELECT 1 AS "абвгдежзийклмнопрстуфхцчшщъ" FORMAT PrettyCompact; +SELECT 1 AS "абвгдежзийклмнопрстуфхцчшщ" FORMAT PrettyCompact; +SELECT 1 AS "абвгдежзийклмнопрстуфхцчш" FORMAT PrettyCompact; +SELECT 1 AS "абвгдежзийклмнопрстуфхцч" FORMAT PrettyCompact; +SELECT 1 AS "абвгдежзийклмнопрстуфхц" FORMAT PrettyCompact; +SELECT 1 AS "абвгдежзийклмнопрстуфх" FORMAT PrettyCompact; +SELECT 1 AS "абвгдежзийклмнопрстуф" FORMAT PrettyCompact; +SELECT 1 AS "абвгдежзийклмнопрсту" FORMAT PrettyCompact; +SELECT 1 AS "абвгдежзийклмнопрст" FORMAT PrettyCompact; +SELECT 1 AS "абвгдежзийклмнопрс" FORMAT PrettyCompact; +SELECT 1 AS "абвгдежзийклмнопр" FORMAT PrettyCompact; +SELECT 1 AS "абвгдежзийклмноп" FORMAT PrettyCompact; +SELECT 1 AS "абвгдежзийклмно" FORMAT PrettyCompact; +SELECT 1 AS "абвгдежзийклмн" FORMAT PrettyCompact; +SELECT 1 AS "абвгдежзийклм" FORMAT PrettyCompact; +SELECT 1 AS "абвгдежзийкл" FORMAT PrettyCompact; +SELECT 1 AS "абвгдежзийк" FORMAT PrettyCompact; +SELECT 1 AS "абвгдежзий" FORMAT PrettyCompact; +SELECT 1 AS "абвгдежзи" FORMAT PrettyCompact; +SELECT 1 AS "абвгдежз" FORMAT PrettyCompact; +SELECT 1 AS "абвгдеж" FORMAT PrettyCompact; +SELECT 1 AS "абвгде" FORMAT PrettyCompact; +SELECT 1 AS "абвгд" FORMAT PrettyCompact; +SELECT 1 AS "абвг" FORMAT PrettyCompact; +SELECT 1 AS "абв" FORMAT PrettyCompact; +SELECT 1 AS "аб" FORMAT PrettyCompact; +SELECT 1 AS "а" FORMAT PrettyCompact; + +SELECT 1 AS "@abcdefghijklmnopqrstuvxyz" FORMAT PrettyCompact; +SELECT 1 AS "@abcdefghijklmnopqrstuvxy" FORMAT PrettyCompact; +SELECT 1 AS "@abcdefghijklmnopqrstuvx" FORMAT PrettyCompact; +SELECT 1 AS "@abcdefghijklmnopqrstuv" FORMAT PrettyCompact; +SELECT 1 AS "@abcdefghijklmnopqrstu" FORMAT PrettyCompact; +SELECT 1 AS "@abcdefghijklmnopqrst" FORMAT PrettyCompact; +SELECT 1 AS "@abcdefghijklmnopqrs" FORMAT PrettyCompact; +SELECT 1 AS "@abcdefghijklmnopqr" FORMAT PrettyCompact; +SELECT 1 AS "@abcdefghijklmnopq" FORMAT PrettyCompact; +SELECT 1 AS "@abcdefghijklmnop" FORMAT PrettyCompact; +SELECT 1 AS "@abcdefghijklmno" FORMAT PrettyCompact; +SELECT 1 AS "@abcdefghijklmn" FORMAT PrettyCompact; +SELECT 1 AS "@abcdefghijklm" FORMAT PrettyCompact; +SELECT 1 AS "@abcdefghijkl" FORMAT PrettyCompact; +SELECT 1 AS "@abcdefghijk" FORMAT PrettyCompact; +SELECT 1 AS "@abcdefghij" FORMAT PrettyCompact; +SELECT 1 AS "@abcdefghi" FORMAT PrettyCompact; +SELECT 1 AS "@abcdefgh" FORMAT PrettyCompact; +SELECT 1 AS "@abcdefg" FORMAT PrettyCompact; +SELECT 1 AS "@abcdef" FORMAT PrettyCompact; +SELECT 1 AS "@abcde" FORMAT PrettyCompact; +SELECT 1 AS "@abcd" FORMAT PrettyCompact; +SELECT 1 AS "@abc" FORMAT PrettyCompact; +SELECT 1 AS "@ab" FORMAT PrettyCompact; +SELECT 1 AS "@a" FORMAT PrettyCompact; + +SET output_format_pretty_max_column_name_width_cut_to = 0; +SELECT 1 AS "абвгдежзийклмнопрстуфхцчшщъыьэюя" FORMAT PrettyCompact; diff --git a/parser/testdata/03298_analyzer_group_by_all_fix/ast.json b/parser/testdata/03298_analyzer_group_by_all_fix/ast.json new file mode 100644 index 000000000..a8f24245e --- /dev/null +++ b/parser/testdata/03298_analyzer_group_by_all_fix/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery users (children 3)" + }, + { + "explain": " Identifier users" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " ColumnDeclaration uid (children 1)" + }, + { + "explain": " DataType Int16" + }, + { + "explain": " ColumnDeclaration name (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " ColumnDeclaration age (children 1)" + }, + { + "explain": " DataType Int16" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function Memory" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001228925, + "rows_read": 12, + "bytes_read": 420 + } +} diff --git a/parser/testdata/03298_analyzer_group_by_all_fix/metadata.json b/parser/testdata/03298_analyzer_group_by_all_fix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03298_analyzer_group_by_all_fix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03298_analyzer_group_by_all_fix/query.sql b/parser/testdata/03298_analyzer_group_by_all_fix/query.sql new file mode 100644 index 000000000..f226c4905 --- /dev/null +++ b/parser/testdata/03298_analyzer_group_by_all_fix/query.sql @@ -0,0 +1,7 @@ +CREATE TABLE users (uid Int16, name String, age Int16) ENGINE=Memory; + +INSERT INTO users VALUES (1231, 'John', 33); +INSERT INTO users VALUES (6666, 'Ksenia', 48); +INSERT INTO users VALUES (8888, 'Alice', 50); + +SELECT uid, count(*) over () FROM users group by ALL FORMAT Null; diff --git a/parser/testdata/03298_server_client_native_settings/ast.json b/parser/testdata/03298_server_client_native_settings/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03298_server_client_native_settings/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03298_server_client_native_settings/metadata.json b/parser/testdata/03298_server_client_native_settings/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03298_server_client_native_settings/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03298_server_client_native_settings/query.sql b/parser/testdata/03298_server_client_native_settings/query.sql new file mode 100644 index 000000000..340462029 --- /dev/null +++ b/parser/testdata/03298_server_client_native_settings/query.sql @@ -0,0 +1,10 @@ +-- Tags: memory-engine +DROP TABLE IF EXISTS t0; +CREATE TABLE t0 (c0 Int) ENGINE = Memory; +INSERT INTO TABLE t0 (c0) SETTINGS output_format_native_encode_types_in_binary_format = 1, input_format_native_decode_types_in_binary_format = 1 VALUES (1); +SET output_format_native_encode_types_in_binary_format = 1; +SET input_format_native_decode_types_in_binary_format = 1; +INSERT INTO TABLE t0 (c0) VALUES (1); +SELECT * FROM t0; +DROP TABLE t0; + diff --git a/parser/testdata/03298_triger_local_error_format/ast.json b/parser/testdata/03298_triger_local_error_format/ast.json new file mode 100644 index 000000000..100ce3736 --- /dev/null +++ b/parser/testdata/03298_triger_local_error_format/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal '\/dev\/null'" + }, + { + "explain": " Identifier Npy" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001338461, + "rows_read": 8, + "bytes_read": 256 + } +} diff --git a/parser/testdata/03298_triger_local_error_format/metadata.json b/parser/testdata/03298_triger_local_error_format/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03298_triger_local_error_format/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03298_triger_local_error_format/query.sql b/parser/testdata/03298_triger_local_error_format/query.sql new file mode 100644 index 000000000..802173e29 --- /dev/null +++ b/parser/testdata/03298_triger_local_error_format/query.sql @@ -0,0 +1,3 @@ +SELECT 1, 2 INTO OUTFILE '/dev/null' TRUNCATE FORMAT Npy; -- { clientError TOO_MANY_COLUMNS } +SELECT 1 INTO OUTFILE '/dev/null' TRUNCATE FORMAT Template; -- { clientError INVALID_TEMPLATE_FORMAT } +SELECT 'a' INTO OUTFILE '/dev/null' TRUNCATE FORMAT Avro; -- { clientError STD_EXCEPTION, UNKNOWN_FORMAT } diff --git a/parser/testdata/03298_vertical_columns/ast.json b/parser/testdata/03298_vertical_columns/ast.json new file mode 100644 index 000000000..47d97b85e --- /dev/null +++ b/parser/testdata/03298_vertical_columns/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001186741, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03298_vertical_columns/metadata.json b/parser/testdata/03298_vertical_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03298_vertical_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03298_vertical_columns/query.sql b/parser/testdata/03298_vertical_columns/query.sql new file mode 100644 index 000000000..fa085f530 --- /dev/null +++ b/parser/testdata/03298_vertical_columns/query.sql @@ -0,0 +1,2 @@ +SET output_format_pretty_color = 1; +SELECT 1 AS x FORMAT Vertical; diff --git a/parser/testdata/03299_deep_nested_map_creation/ast.json b/parser/testdata/03299_deep_nested_map_creation/ast.json new file mode 100644 index 000000000..4d9cc0a35 --- /dev/null +++ b/parser/testdata/03299_deep_nested_map_creation/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001111066, + "rows_read": 2, + "bytes_read": 61 + } +} diff --git a/parser/testdata/03299_deep_nested_map_creation/metadata.json b/parser/testdata/03299_deep_nested_map_creation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03299_deep_nested_map_creation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03299_deep_nested_map_creation/query.sql b/parser/testdata/03299_deep_nested_map_creation/query.sql new file mode 100644 index 000000000..5efb37416 --- /dev/null +++ b/parser/testdata/03299_deep_nested_map_creation/query.sql @@ -0,0 +1,7 @@ +CREATE TABLE test +( + `x` Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Map(Int32, Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32), Int32) +) +ENGINE = MergeTree +ORDER BY tuple(); + diff --git a/parser/testdata/03299_map_named_tuple/ast.json b/parser/testdata/03299_map_named_tuple/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03299_map_named_tuple/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03299_map_named_tuple/metadata.json b/parser/testdata/03299_map_named_tuple/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03299_map_named_tuple/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03299_map_named_tuple/query.sql b/parser/testdata/03299_map_named_tuple/query.sql new file mode 100644 index 000000000..3e8e597c6 --- /dev/null +++ b/parser/testdata/03299_map_named_tuple/query.sql @@ -0,0 +1,8 @@ + +-- This is a very subtle issue when map(1 ,2) data type (which is nested with Array(Tuple(key, value))) firstly created with +-- explicitly named tuple ('keys' key, 'values' value) as an argument of `mapConcat`, and then as a constant is referenced by +-- map constant in the lambda function of mapApply which creates its type without explicit naming and further these types +-- are compared, and because tuple naming is participating in this comparison the result is negative - which leads to +-- 'incompatible types' error showing exactly same Map types - issue https://github.com/ClickHouse/ClickHouse/issues/64805 + +SELECT mapConcat(map(1, 2)), mapApply((x, y) -> (map(1, 2), x + 1), map(1, 0)) diff --git a/parser/testdata/03299_pretty_squash/ast.json b/parser/testdata/03299_pretty_squash/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03299_pretty_squash/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03299_pretty_squash/metadata.json b/parser/testdata/03299_pretty_squash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03299_pretty_squash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03299_pretty_squash/query.sql b/parser/testdata/03299_pretty_squash/query.sql new file mode 100644 index 000000000..e3bddba79 --- /dev/null +++ b/parser/testdata/03299_pretty_squash/query.sql @@ -0,0 +1,3 @@ +--- Despite max_block_size = 1, this will squash the blocks and output everything as a single block: +SELECT number FROM numbers(10) FORMAT PrettyCompact SETTINGS max_block_size = 1, output_format_pretty_squash_consecutive_ms = 60000, output_format_pretty_squash_max_wait_ms = 60000; +SELECT number FROM numbers(10) FORMAT PrettyCompact SETTINGS max_block_size = 1, output_format_pretty_squash_consecutive_ms = 0; diff --git a/parser/testdata/03300_generate_random_const_expr_params/ast.json b/parser/testdata/03300_generate_random_const_expr_params/ast.json new file mode 100644 index 000000000..9a60fb22a --- /dev/null +++ b/parser/testdata/03300_generate_random_const_expr_params/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery t0 (children 3)" + }, + { + "explain": " Identifier t0" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration c0 (children 1)" + }, + { + "explain": " DataType Int32" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function GenerateRandom (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function rand (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001277846, + "rows_read": 11, + "bytes_read": 393 + } +} diff --git a/parser/testdata/03300_generate_random_const_expr_params/metadata.json b/parser/testdata/03300_generate_random_const_expr_params/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03300_generate_random_const_expr_params/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03300_generate_random_const_expr_params/query.sql b/parser/testdata/03300_generate_random_const_expr_params/query.sql new file mode 100644 index 000000000..ff910ee40 --- /dev/null +++ b/parser/testdata/03300_generate_random_const_expr_params/query.sql @@ -0,0 +1,4 @@ +CREATE TABLE t0 (c0 Int32) ENGINE = GenerateRandom(rand()); -- { serverError BAD_ARGUMENTS } +CREATE TABLE t1 (c0 Int32) ENGINE = GenerateRandom(now() % 1073741824); +CREATE TABLE t2 (c0 Int32) ENGINE = GenerateRandom(1 + 1); +CREATE TABLE t4 (c0 Int32) ENGINE = GenerateRandom(now() % 1073741824, 1+1, '123'::UInt64); diff --git a/parser/testdata/03300_nested_json_empty_keys/ast.json b/parser/testdata/03300_nested_json_empty_keys/ast.json new file mode 100644 index 000000000..8ab757a75 --- /dev/null +++ b/parser/testdata/03300_nested_json_empty_keys/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '{\"\" : {\"\" : {\"\" : 42}}}'" + }, + { + "explain": " Literal 'JSON'" + }, + { + "explain": " Set" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001258255, + "rows_read": 11, + "bytes_read": 407 + } +} diff --git a/parser/testdata/03300_nested_json_empty_keys/metadata.json b/parser/testdata/03300_nested_json_empty_keys/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03300_nested_json_empty_keys/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03300_nested_json_empty_keys/query.sql b/parser/testdata/03300_nested_json_empty_keys/query.sql new file mode 100644 index 000000000..995c83a4b --- /dev/null +++ b/parser/testdata/03300_nested_json_empty_keys/query.sql @@ -0,0 +1 @@ +select materialize('{"" : {"" : {"" : 42}}}')::JSON settings enable_json_type=1; diff --git a/parser/testdata/03300_pretty_vertical_cut/ast.json b/parser/testdata/03300_pretty_vertical_cut/ast.json new file mode 100644 index 000000000..37eabeb96 --- /dev/null +++ b/parser/testdata/03300_pretty_vertical_cut/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001361473, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03300_pretty_vertical_cut/metadata.json b/parser/testdata/03300_pretty_vertical_cut/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03300_pretty_vertical_cut/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03300_pretty_vertical_cut/query.sql b/parser/testdata/03300_pretty_vertical_cut/query.sql new file mode 100644 index 000000000..8fc4f72a6 --- /dev/null +++ b/parser/testdata/03300_pretty_vertical_cut/query.sql @@ -0,0 +1,11 @@ +SET output_format_pretty_max_rows = 10; + +SELECT number, 'Hello'||number FROM numbers(25) FORMAT Pretty; +SELECT number, 'Hello'||number FROM numbers(25) FORMAT PrettyCompact; +SELECT number, 'Hello'||number FROM numbers(25) FORMAT PrettySpace; + +SET output_format_pretty_max_rows = 11; + +SELECT number, 'Hello'||number FROM numbers(25) FORMAT Pretty; +SELECT number, 'Hello'||number FROM numbers(25) FORMAT PrettyCompact; +SELECT number, 'Hello'||number FROM numbers(25) FORMAT PrettySpace; diff --git a/parser/testdata/03301_is_ipv4_string/ast.json b/parser/testdata/03301_is_ipv4_string/ast.json new file mode 100644 index 000000000..b60e7a29a --- /dev/null +++ b/parser/testdata/03301_is_ipv4_string/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function isIPv4String (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '1.1.1.1\\01'" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001160288, + "rows_read": 7, + "bytes_read": 269 + } +} diff --git a/parser/testdata/03301_is_ipv4_string/metadata.json b/parser/testdata/03301_is_ipv4_string/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03301_is_ipv4_string/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03301_is_ipv4_string/query.sql b/parser/testdata/03301_is_ipv4_string/query.sql new file mode 100644 index 000000000..d856b12e2 --- /dev/null +++ b/parser/testdata/03301_is_ipv4_string/query.sql @@ -0,0 +1 @@ +SELECT isIPv4String('1.1.1.1\01'); diff --git a/parser/testdata/03301_subcolumns_in_mv/ast.json b/parser/testdata/03301_subcolumns_in_mv/ast.json new file mode 100644 index 000000000..0580b2148 --- /dev/null +++ b/parser/testdata/03301_subcolumns_in_mv/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001088929, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03301_subcolumns_in_mv/metadata.json b/parser/testdata/03301_subcolumns_in_mv/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03301_subcolumns_in_mv/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03301_subcolumns_in_mv/query.sql b/parser/testdata/03301_subcolumns_in_mv/query.sql new file mode 100644 index 000000000..1c04bb0a8 --- /dev/null +++ b/parser/testdata/03301_subcolumns_in_mv/query.sql @@ -0,0 +1,16 @@ +set enable_json_type = 1; +drop table if exists src; +drop table if exists dst; +drop view if exists view; + +create table src (a Tuple(b Tuple(c UInt32)), json JSON(x.y UInt32)) engine=Memory; +create table dst (a UInt32, x UInt32, z UInt32) engine=Memory; +create materialized view view to dst as select a.b.c as a, json.x.y as x, json.z::UInt32 as z from src; + +insert into src select tuple(tuple(1)), '{"x" : {"y" : 2}, "z" : 3}'; +select * from dst; + +drop view view; +drop table dst; +drop table src; + diff --git a/parser/testdata/03302_analyzer_distributed_filter_push_down/ast.json b/parser/testdata/03302_analyzer_distributed_filter_push_down/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03302_analyzer_distributed_filter_push_down/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03302_analyzer_distributed_filter_push_down/metadata.json b/parser/testdata/03302_analyzer_distributed_filter_push_down/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03302_analyzer_distributed_filter_push_down/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03302_analyzer_distributed_filter_push_down/query.sql b/parser/testdata/03302_analyzer_distributed_filter_push_down/query.sql new file mode 100644 index 000000000..cc869da09 --- /dev/null +++ b/parser/testdata/03302_analyzer_distributed_filter_push_down/query.sql @@ -0,0 +1,101 @@ +-- Tags: no-random-merge-tree-settings + +set enable_analyzer=1; +set serialize_query_plan = 0; +set enable_parallel_replicas = 0; +set prefer_localhost_replica=1; +set optimize_aggregation_in_order=0, optimize_read_in_order=0; + +select '============ #66878'; + +CREATE TABLE tab0 (x UInt32, y UInt32) engine = MergeTree order by x; +insert into tab0 select number, number from numbers(8192 * 123); + + +select * from (explain indexes=1, actions=1, distributed=1 + select * from (select * from remote('127.0.0.{1,2}', currentDatabase(), tab0)) where x = 42 +); + +select '============ lambdas'; + +--- lambdas are not supported +select * from (select * from remote('127.0.0.{1,2}', currentDatabase(), tab0)) where arraySum(arrayMap(y -> y + 1, [x])) = 42; +select * from (select * from remote('127.0.0.{1,2}', currentDatabase(), tab0)) where arraySum(arrayMap(y -> x + y + 2, [x])) = 42; + +select '============ #69472'; + +select * from (explain indexes=1, actions=1, distributed=1 + select sum(y) from (select * from remote('127.0.0.{1,2}', currentDatabase(), tab0)) where x = 42 +); + +select * from (explain indexes=1, actions=1, distributed=1 + select * from (select x, sum(y) from remote('127.0.0.{1,2}', currentDatabase(), tab0) group by x) where x = 42 +); + +select '============ in / global in'; + +--- IN is supported +select * from (explain indexes=1, distributed=1 + select sum(y) from (select * from remote('127.0.0.{1,2}', currentDatabase(), tab0)) where x in (select number + 42 from numbers(1)) +); + +--- GLOBAL IN is replaced to temporary table + +select sum(y) from (select * from remote('127.0.0.2', currentDatabase(), tab0)) where x global in (select number + 42 from numbers(1)); +select * from (explain indexes=1, distributed=1 + select sum(y) from (select * from remote('127.0.0.2', currentDatabase(), tab0)) where x global in (select number + 42 from numbers(1)) +); + +select sum(y) from (select * from remote('127.0.0.1', currentDatabase(), tab0)) where x global in (select number + 42 from numbers(1)); +select * from (explain indexes=1, distributed=1 + select sum(y) from (select * from remote('127.0.0.1', currentDatabase(), tab0)) where x global in (select number + 42 from numbers(1)) +); + +select sum(y) from (select * from remote('127.0.0.{2,3}', currentDatabase(), tab0)) where x global in (select number + 42 from numbers(1)); +select * from (explain indexes=1, distributed=1 + select sum(y) from (select * from remote('127.0.0.{2,3}', currentDatabase(), tab0)) where x global in (select number + 42 from numbers(1)) +); + +select sum(y) from (select * from remote('127.0.0.{1,2}', currentDatabase(), tab0)) where x global in (select number + 42 from numbers(1)); +select * from (explain indexes=1, distributed=1 + select sum(y) from (select * from remote('127.0.0.{1,2}', currentDatabase(), tab0)) where x global in (select number + 42 from numbers(1)) +); + +select sum(y) from (select * from remote('127.0.0.{1,2,3}', currentDatabase(), tab0)) where x global in (select number + 42 from numbers(1)); +select * from (explain indexes=1, distributed=1 + select sum(y) from (select * from remote('127.0.0.{1,2,3}', currentDatabase(), tab0)) where x global in (select number + 42 from numbers(1)) +); + +select sum(x) from (select * from remote('127.0.0.{1,2,3}', currentDatabase(), tab0)) where y global in (select number + 42 from numbers(1)); +select * from (explain indexes=1, distributed=1 + select sum(x) from (select * from remote('127.0.0.{1,2,3}', currentDatabase(), tab0)) where y global in (select number + 42 from numbers(1)) +); + +select '============ #65638'; + +CREATE TABLE tab1 +( + `tenant` String, + `recordTimestamp` Int64, + `responseBody` String, + `colAlias` String ALIAS responseBody || 'something else', + INDEX ngrams colAlias TYPE ngrambf_v1(3, 2097152, 3, 0) GRANULARITY 10, +) +ENGINE = MergeTree ORDER BY recordTimestamp; + +INSERT INTO tab1 SELECT * FROM generateRandom('tenant String, recordTimestamp Int64, responseBody String') LIMIT 10; + + +select * from (explain indexes=1, distributed=1 + select * from (select * from remote('127.0.0.{1,2}', currentDatabase(), tab1)) where (tenant,recordTimestamp) IN ( + select tenant,recordTimestamp from remote('127.0.0.{1,2}', currentDatabase(), tab1) where colAlias like '%abcd%' +)); + +select '============ #68030'; + +CREATE TABLE tab2 ENGINE=ReplacingMergeTree ORDER BY n AS SELECT intDiv(number,2) as n from numbers(8192 * 123); +CREATE VIEW test_view AS SELECT * FROM remote('127.0.0.{1,2}', currentDatabase(), tab2); + +select * from (explain indexes=1, actions=1, distributed=1 + SELECT * from test_view WHERE n=100 +); diff --git a/parser/testdata/03302_analyzer_join_filter_push_down_bug/ast.json b/parser/testdata/03302_analyzer_join_filter_push_down_bug/ast.json new file mode 100644 index 000000000..5c944f93c --- /dev/null +++ b/parser/testdata/03302_analyzer_join_filter_push_down_bug/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery t1 (children 3)" + }, + { + "explain": " Identifier t1" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration key (children 1)" + }, + { + "explain": " DataType Int32" + }, + { + "explain": " ColumnDeclaration value (children 1)" + }, + { + "explain": " DataType DateTime" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function Log" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001124861, + "rows_read": 10, + "bytes_read": 343 + } +} diff --git a/parser/testdata/03302_analyzer_join_filter_push_down_bug/metadata.json b/parser/testdata/03302_analyzer_join_filter_push_down_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03302_analyzer_join_filter_push_down_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03302_analyzer_join_filter_push_down_bug/query.sql b/parser/testdata/03302_analyzer_join_filter_push_down_bug/query.sql new file mode 100644 index 000000000..f0f1ba31b --- /dev/null +++ b/parser/testdata/03302_analyzer_join_filter_push_down_bug/query.sql @@ -0,0 +1,9 @@ +CREATE TABLE t1 (key Int32, value DateTime) ENGINE = Log; +INSERT INTO t1 select number, number from numbers(10000); +create table t2 ENGINE = Log as select key as key1, value from t1; + +explain actions=1 select count() from +(SELECT key from t1 CROSS JOIN t2 + where t1.value >= toDateTime(toString(t2.value)) +) where key = 162601 +settings enable_analyzer=1; diff --git a/parser/testdata/03302_any_enum_aggregation/ast.json b/parser/testdata/03302_any_enum_aggregation/ast.json new file mode 100644 index 000000000..c854bca94 --- /dev/null +++ b/parser/testdata/03302_any_enum_aggregation/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_33602_t0a (children 1)" + }, + { + "explain": " Identifier test_33602_t0a" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001120838, + "rows_read": 2, + "bytes_read": 80 + } +} diff --git a/parser/testdata/03302_any_enum_aggregation/metadata.json b/parser/testdata/03302_any_enum_aggregation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03302_any_enum_aggregation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03302_any_enum_aggregation/query.sql b/parser/testdata/03302_any_enum_aggregation/query.sql new file mode 100644 index 000000000..9c6102a98 --- /dev/null +++ b/parser/testdata/03302_any_enum_aggregation/query.sql @@ -0,0 +1,50 @@ +DROP TABLE IF EXISTS test_33602_t0a; +DROP TABLE IF EXISTS test_33602_t0b; +DROP TABLE IF EXISTS test_33602; + +SELECT 'Issue 68605'; + +CREATE TABLE test_33602 (name String, score UInt8, user_level Enum8('LOW' = 1, 'MEDIUM' = 2, 'HIGH' = 3)) ENGINE=Memory; +SELECT any(user_level) FROM test_33602; +SELECT any(user_level), any(name), any(score) FROM test_33602; +SELECT anyLast(user_level), anyLast(name), anyLast(score) FROM test_33602; +SELECT anyHeavy(user_level) FROM test_33602; +SELECT min(user_level), max(user_level) FROM test_33602; +SELECT argMin(user_level, user_level), argMax(user_level, user_level) FROM test_33602; +DROP TABLE test_33602; + +SELECT 'Empty Enum8 table:'; +DROP TABLE IF EXISTS test_33602_t0a; +CREATE TABLE test_33602_t0a (e Enum8('LOW' = 123, 'MEDIUM' = 12, 'HIGH' = 33)) ENGINE=Memory; +SELECT any(e) FROM test_33602_t0a; +SELECT anyLast(e) FROM test_33602_t0a; +SELECT anyHeavy(e) FROM test_33602_t0a; +Select min(e), max(e) FROM test_33602_t0a; +Select argMin(e, e), argMax(e, e) FROM test_33602_t0a; + +SELECT 'Enum8 table with HIGH value:'; +INSERT INTO test_33602_t0a VALUES('HIGH'); +SELECT any(e) FROM test_33602_t0a; +SELECT anyLast(e) FROM test_33602_t0a; +SELECT anyHeavy(e) FROM test_33602_t0a; +SELECT min(e), max(e) FROM test_33602_t0a; +Select argMin(e, e), argMax(e, e) FROM test_33602_t0a; +DROP TABLE test_33602_t0a; + +SELECT 'Empty Enum16 table:'; +DROP TABLE IF EXISTS test_33602_t0b; +CREATE TABLE test_33602_t0b (e Enum16('LOW' = 123, 'MEDIUM' = 12, 'HIGH' = 33)) ENGINE=Memory; +SELECT any(e) FROM test_33602_t0b; +SELECT anyLast(e) FROM test_33602_t0b; +SELECT anyHeavy(e) FROM test_33602_t0b; +SELECT min(e), max(e) FROM test_33602_t0b; +Select argMin(e, e), argMax(e, e) FROM test_33602_t0b; + +SELECT 'Enum16 table with HIGH value:'; +INSERT INTO test_33602_t0b VALUES('HIGH'); +SELECT any(e) FROM test_33602_t0b; +SELECT anyLast(e) FROM test_33602_t0b; +SELECT anyHeavy(e) FROM test_33602_t0b; +SELECT min(e), max(e) FROM test_33602_t0b; +Select argMin(e, e), argMax(e, e) FROM test_33602_t0b; +DROP TABLE test_33602_t0b; diff --git a/parser/testdata/03302_merge_table_structure_unification/ast.json b/parser/testdata/03302_merge_table_structure_unification/ast.json new file mode 100644 index 000000000..ba73037ea --- /dev/null +++ b/parser/testdata/03302_merge_table_structure_unification/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001199089, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03302_merge_table_structure_unification/metadata.json b/parser/testdata/03302_merge_table_structure_unification/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03302_merge_table_structure_unification/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03302_merge_table_structure_unification/query.sql b/parser/testdata/03302_merge_table_structure_unification/query.sql new file mode 100644 index 000000000..41bfa8dc2 --- /dev/null +++ b/parser/testdata/03302_merge_table_structure_unification/query.sql @@ -0,0 +1,52 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS test_a; +DROP TABLE IF EXISTS test_b; +DROP TABLE IF EXISTS test_merge; + +CREATE TABLE test_a +( + a UInt8, + b String, + c Array(String) +) ENGINE = Memory; + +CREATE TABLE test_b +( + a Int32, + c Array(Nullable(String)), + d DateTime('UTC') DEFAULT now(), +) ENGINE = Memory; + +INSERT INTO test_a VALUES (1, 'Hello', ['World']); +INSERT INTO test_b VALUES (-1, ['Goodbye'], '2025-01-01 02:03:04'); + +CREATE TABLE test_merge ENGINE = Merge(currentDatabase(), '^test_'); + +-- TODO: defaults are not calculated +SELECT * FROM test_merge ORDER BY a; + +SELECT '--- table function'; +DESCRIBE merge('^test_'); + +-- Note that this will also pick up the test_merge table, duplicating the results +SELECT * FROM merge('^test_') ORDER BY a; + +DROP TABLE test_merge; + +SET merge_table_max_tables_to_look_for_schema_inference = 1; + +CREATE TABLE test_merge ENGINE = Merge(currentDatabase(), '^test_'); + +SELECT '--- merge_table_max_tables_to_look_for_schema_inference = 1'; +SELECT * FROM test_merge ORDER BY a; + +SELECT '--- table function'; +DESCRIBE merge('^test_'); + +SELECT * FROM merge('^test_') ORDER BY a; + +DROP TABLE test_merge; + +DROP TABLE test_a; +DROP TABLE test_b; diff --git a/parser/testdata/03303_alias_inverse_order/ast.json b/parser/testdata/03303_alias_inverse_order/ast.json new file mode 100644 index 000000000..a17d76ff5 --- /dev/null +++ b/parser/testdata/03303_alias_inverse_order/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_alias_inverse_order (children 1)" + }, + { + "explain": " Identifier test_alias_inverse_order" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001266957, + "rows_read": 2, + "bytes_read": 100 + } +} diff --git a/parser/testdata/03303_alias_inverse_order/metadata.json b/parser/testdata/03303_alias_inverse_order/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03303_alias_inverse_order/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03303_alias_inverse_order/query.sql b/parser/testdata/03303_alias_inverse_order/query.sql new file mode 100644 index 000000000..4598f55ee --- /dev/null +++ b/parser/testdata/03303_alias_inverse_order/query.sql @@ -0,0 +1,15 @@ +DROP TABLE IF EXISTS test_alias_inverse_order; + +CREATE TABLE test_alias_inverse_order +( + x int, + y int ALIAS x + 1, + z int ALIAS y + 1 +) +ENGINE = MergeTree +ORDER BY (); + +SELECT x, y, z FROM test_alias_inverse_order SETTINGS enable_analyzer = 1; +SELECT x, z, y FROM test_alias_inverse_order SETTINGS enable_analyzer = 1; + +DROP TABLE IF EXISTS test_alias_inverse_order; diff --git a/parser/testdata/03303_distributed_explain/ast.json b/parser/testdata/03303_distributed_explain/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03303_distributed_explain/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03303_distributed_explain/metadata.json b/parser/testdata/03303_distributed_explain/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03303_distributed_explain/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03303_distributed_explain/query.sql b/parser/testdata/03303_distributed_explain/query.sql new file mode 100644 index 000000000..0b02725e2 --- /dev/null +++ b/parser/testdata/03303_distributed_explain/query.sql @@ -0,0 +1,23 @@ +-- Tags: no-random-settings + +set enable_analyzer=1; + +set serialize_query_plan=0; +explain actions = 1, distributed=1 select sum(number) from remote('127.0.0.{1,2,3}', numbers(5)) group by bitAnd(number, 3); +explain distributed=1 select * from (select * from remote('127.0.0.{1,2}', numbers(2)) where number=1); + +select '----------'; + +set serialize_query_plan=1; +explain actions = 1, distributed=1 select sum(number) from remote('127.0.0.{1,2,3}', numbers(5)) group by bitAnd(number, 3); +explain distributed=1 select * from (select * from remote('127.0.0.{1,2}', numbers(2)) where number=1); + +select '----------'; + +DROP TABLE IF EXISTS test_parallel_replicas; +CREATE TABLE test_parallel_replicas (number UInt64) ENGINE=MergeTree() ORDER BY tuple(); +INSERT INTO test_parallel_replicas SELECT * FROM numbers(10); + +SET enable_parallel_replicas=2, max_parallel_replicas=2, cluster_for_parallel_replicas='test_cluster_one_shard_two_replicas', parallel_replicas_for_non_replicated_merge_tree=1, parallel_replicas_local_plan=1; + +explain actions = 1, distributed=1 SELECT sum(number) from test_parallel_replicas group by bitAnd(number, 3); diff --git a/parser/testdata/03303_dynamic_in_not_xor/ast.json b/parser/testdata/03303_dynamic_in_not_xor/ast.json new file mode 100644 index 000000000..956a682b5 --- /dev/null +++ b/parser/testdata/03303_dynamic_in_not_xor/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001416819, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03303_dynamic_in_not_xor/metadata.json b/parser/testdata/03303_dynamic_in_not_xor/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03303_dynamic_in_not_xor/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03303_dynamic_in_not_xor/query.sql b/parser/testdata/03303_dynamic_in_not_xor/query.sql new file mode 100644 index 000000000..5597079c8 --- /dev/null +++ b/parser/testdata/03303_dynamic_in_not_xor/query.sql @@ -0,0 +1,6 @@ +set enable_dynamic_type=1; +select not materialize(1)::Dynamic as res, toTypeName(res); +select xor(materialize(1)::Dynamic, materialize(0)::Dynamic) as res, toTypeName(res); + +SELECT sum((NOT CAST(materialize(1) AS Dynamic)) = TRUE); + diff --git a/parser/testdata/03303_pretty_multiline/ast.json b/parser/testdata/03303_pretty_multiline/ast.json new file mode 100644 index 000000000..1a2831a85 --- /dev/null +++ b/parser/testdata/03303_pretty_multiline/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001264939, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03303_pretty_multiline/metadata.json b/parser/testdata/03303_pretty_multiline/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03303_pretty_multiline/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03303_pretty_multiline/query.sql b/parser/testdata/03303_pretty_multiline/query.sql new file mode 100644 index 000000000..65889a957 --- /dev/null +++ b/parser/testdata/03303_pretty_multiline/query.sql @@ -0,0 +1,5 @@ +SET output_format_pretty_color = 1; +SET output_format_pretty_fallback_to_vertical = 0; +SELECT * FROM VALUES(('Hello', 'World'), ('Hel\nlo', 'World'), ('Hello\n', 'World'), ('\nHello\n\n', 'Wor \n ld')) FORMAT Pretty; +SELECT * FROM VALUES(('Hello', 'World'), ('Hel\nlo', 'World'), ('Hello\n', 'World'), ('\nHello\n\n', 'Wor \n ld')) FORMAT PrettyCompact; +SELECT * FROM VALUES(('Hello', 'World'), ('Hel\nlo', 'World'), ('Hello\n', 'World'), ('\nHello\n\n', 'Wor \n ld')) FORMAT PrettySpace; diff --git a/parser/testdata/03304_compare_substrings/ast.json b/parser/testdata/03304_compare_substrings/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03304_compare_substrings/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03304_compare_substrings/metadata.json b/parser/testdata/03304_compare_substrings/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03304_compare_substrings/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03304_compare_substrings/query.sql b/parser/testdata/03304_compare_substrings/query.sql new file mode 100644 index 000000000..1d1034e09 --- /dev/null +++ b/parser/testdata/03304_compare_substrings/query.sql @@ -0,0 +1,74 @@ +-- Negative tests + +-- Five arguments are expected +select compareSubstrings(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +select compareSubstrings('abc'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +select compareSubstrings('abc', 'abc'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +select compareSubstrings('abc', 'abc', 0); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +select compareSubstrings('abc', 'abc', 0, 0); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +select compareSubstrings('abc', 'abc', 0, 0, 0, 0); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +-- 1st/2nd argument must be string, 3rd/4th/5th argument must be integer +select compareSubstrings(0, 'abc', 0, 0, 0); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select compareSubstrings('abc', 0, 0, 0, 0); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select compareSubstrings('abc', 'abc', 'abc', 0, 0); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select compareSubstrings('abc', 'abc', 0, 'abc', 0); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select compareSubstrings('abc', 'abc', 0, 0, 'abc'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +-- 3rd, 4th, 5th argument must be non-negative +select compareSubstrings('abc', 'abc', -1, 0, 0); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select compareSubstrings('abc', 'abc', 0, -1, 0); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +select compareSubstrings('abc', 'abc', 0, 0, -1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +-- {echoOn } + +select compareSubstrings('abc', 'abc', 0, 0, 3); +select compareSubstrings('abd', 'abc', 0, 0, 3); +select compareSubstrings('abb', 'abc', 0, 0, 3); +select compareSubstrings('abc', 'abd', 1, 1, 3); +select compareSubstrings('abc', 'abc', 4, 0, 3); +select compareSubstrings('abc', 'abc', 3, 4, 3); +select compareSubstrings('ab1', 'abc', 0, 0, 0); + +select s, compareSubstrings(s, 'ab3', 0, 0, 3) from (select cast(s as FixedString(6)) as s from (select concat('ab', number % 6, 'cde') as s from numbers(8))); +select s, compareSubstrings(s, 'ab3', 6, 0, 3) from (select cast(s as FixedString(6)) as s from (select concat('ab', number % 6, 'cde') as s from numbers(8))); +select s, compareSubstrings(s, 'ab3', 0, 3, 3) from (select cast(s as FixedString(6)) as s from (select concat('ab', number % 6, 'cde') as s from numbers(8))); +select s, compareSubstrings(s, 'ab3', 6, 3, 3) from (select cast(s as FixedString(6)) as s from (select concat('ab', number % 6, 'cde') as s from numbers(8))); +select s, compareSubstrings('ab3', s, 0, 0, 3) from (select cast(s as FixedString(6)) as s from (select concat('ab', number % 6, 'cde') as s from numbers(8))); +select s, compareSubstrings('ab3', s, 3, 0, 3) from (select cast(s as FixedString(6)) as s from (select concat('ab', number % 6, 'cde') as s from numbers(8))); +select s, compareSubstrings('ab3', s, 0, 6, 3) from (select cast(s as FixedString(6)) as s from (select concat('ab', number % 6, 'cde') as s from numbers(8))); +select s, compareSubstrings('ab3', s, 3, 6, 3) from (select cast(s as FixedString(6)) as s from (select concat('ab', number % 6, 'cde') as s from numbers(8))); + + +select s, compareSubstrings(s, 'ab3', 0, 0, 3) from (select concat('ab', number % 6, 'cde') as s from numbers(8)); +select s, compareSubstrings(s, 'ab3', 6, 0, 3) from (select concat('ab', number % 6, 'cde') as s from numbers(8)); +select s, compareSubstrings(s, 'ab3', 0, 3, 3) from (select concat('ab', number % 6, 'cde') as s from numbers(8)); +select s, compareSubstrings(s, 'ab3', 6, 3, 3) from (select concat('ab', number % 6, 'cde') as s from numbers(8)); +select s, compareSubstrings('ab3', s, 0, 0, 3) from (select concat('ab', number % 6, 'cde') as s from numbers(8)); +select s, compareSubstrings('ab3', s, 3, 0, 3) from (select concat('ab', number % 6, 'cde') as s from numbers(8)); +select s, compareSubstrings('ab3', s, 0, 6, 3) from (select concat('ab', number % 6, 'cde') as s from numbers(8)); +select s, compareSubstrings('ab3', s, 3, 6, 3) from (select concat('ab', number % 6, 'cde') as s from numbers(8)); + +select s1, s2, compareSubstrings(s1, s2, 0, 0, 4) from (select concat('ab', number % 3) as s1, concat('ab', number % 4) as s2 from numbers(6)); +select s1, s2, compareSubstrings(s1, s2, 3, 0, 4) from (select concat('ab', number % 3) as s1, concat('ab', number % 4) as s2 from numbers(6)); +select s1, s2, compareSubstrings(s1, s2, 0, 3, 4) from (select concat('ab', number % 3) as s1, concat('ab', number % 4) as s2 from numbers(6)); +select s1, s2, compareSubstrings(s1, s2, 4, 4, 4) from (select concat('ab', number % 3) as s1, concat('ab', number % 4) as s2 from numbers(6)); + + +select s1, s2, compareSubstrings(s1, s2, 0, 0, 4) from (select cast(s1 as FixedString(3)) as s1, s2 from (select concat('ab', number % 3) as s1, concat('ab', number % 4) as s2 from numbers(6))); +select s1, s2, compareSubstrings(s1, s2, 3, 0, 4) from (select cast(s1 as FixedString(3)) as s1, s2 from (select concat('ab', number % 3) as s1, concat('ab', number % 4) as s2 from numbers(6))); +select s1, s2, compareSubstrings(s1, s2, 0, 3, 4) from (select cast(s1 as FixedString(3)) as s1, s2 from (select concat('ab', number % 3) as s1, concat('ab', number % 4) as s2 from numbers(6))); +select s1, s2, compareSubstrings(s1, s2, 4, 4, 4) from (select cast(s1 as FixedString(3)) as s1, s2 from (select concat('ab', number % 3) as s1, concat('ab', number % 4) as s2 from numbers(6))); + + +select s1, s2, compareSubstrings(s1, s2, 0, 0, 4) from (select cast(s2 as FixedString(3)) as s2, s1 from (select concat('ab', number % 3) as s1, concat('ab', number % 4) as s2 from numbers(6))); +select s1, s2, compareSubstrings(s1, s2, 3, 0, 4) from (select cast(s2 as FixedString(3)) as s2, s1 from (select concat('ab', number % 3) as s1, concat('ab', number % 4) as s2 from numbers(6))); +select s1, s2, compareSubstrings(s1, s2, 0, 3, 4) from (select cast(s2 as FixedString(3)) as s2, s1 from (select concat('ab', number % 3) as s1, concat('ab', number % 4) as s2 from numbers(6))); +select s1, s2, compareSubstrings(s1, s2, 4, 4, 4) from (select cast(s2 as FixedString(3)) as s2, s1 from (select concat('ab', number % 3) as s1, concat('ab', number % 4) as s2 from numbers(6))); + +select s1, s2, compareSubstrings(s1, s2, 0, 0, 4) from (select cast(s2 as FixedString(3)) as s2, cast(s1 as FixedString(3)) as s1 from (select concat('ab', number % 3) as s1, concat('ab', number % 4) as s2 from numbers(6))); +select s1, s2, compareSubstrings(s1, s2, 3, 0, 4) from (select cast(s2 as FixedString(3)) as s2, cast(s1 as FixedString(3)) as s1 from (select concat('ab', number % 3) as s1, concat('ab', number % 4) as s2 from numbers(6))); +select s1, s2, compareSubstrings(s1, s2, 0, 3, 4) from (select cast(s2 as FixedString(3)) as s2, cast(s1 as FixedString(3)) as s1 from (select concat('ab', number % 3) as s1, concat('ab', number % 4) as s2 from numbers(6))); +select s1, s2, compareSubstrings(s1, s2, 4, 4, 4) from (select cast(s2 as FixedString(3)) as s2, cast(s1 as FixedString(3)) as s1 from (select concat('ab', number % 3) as s1, concat('ab', number % 4) as s2 from numbers(6))); + +-- {echoOff } diff --git a/parser/testdata/03304_fill_virtual_columns/ast.json b/parser/testdata/03304_fill_virtual_columns/ast.json new file mode 100644 index 000000000..c8c3fafbb --- /dev/null +++ b/parser/testdata/03304_fill_virtual_columns/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_virtual_columns (children 1)" + }, + { + "explain": " Identifier test_virtual_columns" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001160768, + "rows_read": 2, + "bytes_read": 92 + } +} diff --git a/parser/testdata/03304_fill_virtual_columns/metadata.json b/parser/testdata/03304_fill_virtual_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03304_fill_virtual_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03304_fill_virtual_columns/query.sql b/parser/testdata/03304_fill_virtual_columns/query.sql new file mode 100644 index 000000000..8d6c11e73 --- /dev/null +++ b/parser/testdata/03304_fill_virtual_columns/query.sql @@ -0,0 +1,15 @@ +DROP TABLE IF EXISTS test_virtual_columns; + +-- { echoOn } +CREATE TABLE test_virtual_columns(a Int32) ENGINE = MergeTree() ORDER BY a; + +INSERT INTO test_virtual_columns VALUES (1) (2); + +SELECT _part_offset FROM test_virtual_columns; + +DELETE FROM test_virtual_columns WHERE a = 1; + +SELECT _part_offset FROM test_virtual_columns; + +-- { echoOff } +DROP TABLE test_virtual_columns; diff --git a/parser/testdata/03304_pretty_fallback_to_vertical/ast.json b/parser/testdata/03304_pretty_fallback_to_vertical/ast.json new file mode 100644 index 000000000..be0fb1c67 --- /dev/null +++ b/parser/testdata/03304_pretty_fallback_to_vertical/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'Hello, world' (alias x)" + }, + { + "explain": " Literal 'Goodbye' (alias y)" + }, + { + "explain": " Identifier Pretty" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001021845, + "rows_read": 7, + "bytes_read": 258 + } +} diff --git a/parser/testdata/03304_pretty_fallback_to_vertical/metadata.json b/parser/testdata/03304_pretty_fallback_to_vertical/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03304_pretty_fallback_to_vertical/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03304_pretty_fallback_to_vertical/query.sql b/parser/testdata/03304_pretty_fallback_to_vertical/query.sql new file mode 100644 index 000000000..a5429cf4e --- /dev/null +++ b/parser/testdata/03304_pretty_fallback_to_vertical/query.sql @@ -0,0 +1,3 @@ +SELECT 'Hello, world' AS x, 'Goodbye' AS y FORMAT Pretty; +SELECT 'Hello,\nworld' AS x FORMAT Pretty; +SELECT 'Hello, world' AS x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x FORMAT Pretty; diff --git a/parser/testdata/03305_compressed_memory_eng_crash_reading_subcolumn/ast.json b/parser/testdata/03305_compressed_memory_eng_crash_reading_subcolumn/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03305_compressed_memory_eng_crash_reading_subcolumn/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03305_compressed_memory_eng_crash_reading_subcolumn/metadata.json b/parser/testdata/03305_compressed_memory_eng_crash_reading_subcolumn/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03305_compressed_memory_eng_crash_reading_subcolumn/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03305_compressed_memory_eng_crash_reading_subcolumn/query.sql b/parser/testdata/03305_compressed_memory_eng_crash_reading_subcolumn/query.sql new file mode 100644 index 000000000..eaa32ac88 --- /dev/null +++ b/parser/testdata/03305_compressed_memory_eng_crash_reading_subcolumn/query.sql @@ -0,0 +1,9 @@ +-- Tags: memory-engine +DROP TABLE IF EXISTS t0; + +CREATE TABLE t0 (c0 Nullable(Int)) ENGINE = Memory() SETTINGS compress = 1; +INSERT INTO TABLE t0 (c0) VALUES (1); + +SELECT t0.c0.null FROM t0 FORMAT Null SETTINGS enable_analyzer = 1; + +DROP TABLE t0; diff --git a/parser/testdata/03305_fix_kafka_table_with_kw_arguments/ast.json b/parser/testdata/03305_fix_kafka_table_with_kw_arguments/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03305_fix_kafka_table_with_kw_arguments/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03305_fix_kafka_table_with_kw_arguments/metadata.json b/parser/testdata/03305_fix_kafka_table_with_kw_arguments/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03305_fix_kafka_table_with_kw_arguments/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03305_fix_kafka_table_with_kw_arguments/query.sql b/parser/testdata/03305_fix_kafka_table_with_kw_arguments/query.sql new file mode 100644 index 000000000..bcdf9982d --- /dev/null +++ b/parser/testdata/03305_fix_kafka_table_with_kw_arguments/query.sql @@ -0,0 +1,8 @@ +-- Tags: no-fasttest + +CREATE TABLE default.test +( + `id` UInt32, + `message` String +) +ENGINE = Kafka(a = '1', 'clickhouse'); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/03305_log_unsupported_types/ast.json b/parser/testdata/03305_log_unsupported_types/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03305_log_unsupported_types/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03305_log_unsupported_types/metadata.json b/parser/testdata/03305_log_unsupported_types/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03305_log_unsupported_types/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03305_log_unsupported_types/query.sql b/parser/testdata/03305_log_unsupported_types/query.sql new file mode 100644 index 000000000..2e2266702 --- /dev/null +++ b/parser/testdata/03305_log_unsupported_types/query.sql @@ -0,0 +1,19 @@ +-- Tags: no-parallel, log-engine + +set enable_json_type=1; +set enable_dynamic_type=1; + +drop table if exists test; +create table test (d Dynamic) engine=Log(); -- {serverError ILLEGAL_COLUMN} +create table test (d Dynamic) engine=TinyLog(); -- {serverError ILLEGAL_COLUMN} +create table test (d Array(Map(String, Dynamic))) engine=Log(); -- {serverError ILLEGAL_COLUMN} +create table test (d Array(Map(String, Dynamic))) engine=TinyLog(); -- {serverError ILLEGAL_COLUMN} +create table test (d JSON) engine=Log(); -- {serverError ILLEGAL_COLUMN} +create table test (d JSON) engine=TinyLog(); -- {serverError ILLEGAL_COLUMN} +create table test (d Array(Map(String, JSON))) engine=Log(); -- {serverError ILLEGAL_COLUMN} +create table test (d Array(Map(String, JSON))) engine=TinyLog(); -- {serverError ILLEGAL_COLUMN} +create table test (d Variant(Int32)) engine=Log(); -- {serverError ILLEGAL_COLUMN} +create table test (d Variant(Int32)) engine=TinyLog(); -- {serverError ILLEGAL_COLUMN} +create table test (d Array(Map(String, Variant(Int32)))) engine=Log(); -- {serverError ILLEGAL_COLUMN} +create table test (d Array(Map(String, Variant(Int32)))) engine=TinyLog(); -- {serverError ILLEGAL_COLUMN} + diff --git a/parser/testdata/03305_mergine_aggregated_filter_push_down/ast.json b/parser/testdata/03305_mergine_aggregated_filter_push_down/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03305_mergine_aggregated_filter_push_down/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03305_mergine_aggregated_filter_push_down/metadata.json b/parser/testdata/03305_mergine_aggregated_filter_push_down/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03305_mergine_aggregated_filter_push_down/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03305_mergine_aggregated_filter_push_down/query.sql b/parser/testdata/03305_mergine_aggregated_filter_push_down/query.sql new file mode 100644 index 000000000..4a4d511c2 --- /dev/null +++ b/parser/testdata/03305_mergine_aggregated_filter_push_down/query.sql @@ -0,0 +1,27 @@ +-- Tags: no-random-merge-tree-settings + +CREATE TABLE tab (x UInt32, y UInt32, z UInt32) engine = MergeTree order by x settings min_rows_for_wide_part=0, min_bytes_for_wide_part=0; +insert into tab select number, number, number from numbers(8129 * 123); + +set enable_analyzer=1; +set prefer_localhost_replica=1; +set optimize_aggregation_in_order=0, optimize_read_in_order=0; + +-- { echoOn } + +select * from (select x, sum(y) from remote('127.0.0.{1,2}', currentDatabase(), tab) group by x) where x = 42; +explain indexes=1 select * from (select x, sum(y) from remote('127.0.0.{1,2}', currentDatabase(), tab) group by x) where x = 42; + +select * from (select x, sum(y) from remote('127.0.0.{1,2}', currentDatabase(), tab) group by grouping sets ((x, z + 1), (x, z + 2))) where x = 42; +explain indexes=1 select * from (select x, sum(y) from remote('127.0.0.{1,2}', currentDatabase(), tab) group by grouping sets ((x, z + 1), (x, z + 2))) where x = 42; + +select * from (select x, sum(y), z + 1 as q from remote('127.0.0.{1,2}', currentDatabase(), tab) group by grouping sets ((x, z + 1), (x, z + 2))) where q = 42; +explain indexes=1 select * from (select x, sum(y), z + 1 as q from remote('127.0.0.{1,2}', currentDatabase(), tab) group by grouping sets ((x, z + 1), (x, z + 2))) where q = 42; + +set group_by_use_nulls=1; + +select * from (select x, sum(y) from remote('127.0.0.{1,2}', currentDatabase(), tab) group by x) where x = 42; +explain indexes=1 select * from (select x, sum(y) from remote('127.0.0.{1,2}', currentDatabase(), tab) group by x) where x = 42; + +select * from (select x, sum(y) from remote('127.0.0.{1,2}', currentDatabase(), tab) group by grouping sets ((x, z + 1), (x, z + 2))) where x = 42; +explain indexes=1 select * from (select x, sum(y) from remote('127.0.0.{1,2}', currentDatabase(), tab) group by grouping sets ((x, z + 1), (x, z + 2))) where x = 42; diff --git a/parser/testdata/03305_parallel_with/ast.json b/parser/testdata/03305_parallel_with/ast.json new file mode 100644 index 000000000..b86c15c0e --- /dev/null +++ b/parser/testdata/03305_parallel_with/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery table1 (children 1)" + }, + { + "explain": " Identifier table1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001133175, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/03305_parallel_with/metadata.json b/parser/testdata/03305_parallel_with/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03305_parallel_with/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03305_parallel_with/query.sql b/parser/testdata/03305_parallel_with/query.sql new file mode 100644 index 000000000..8a60ca65f --- /dev/null +++ b/parser/testdata/03305_parallel_with/query.sql @@ -0,0 +1,33 @@ +DROP TABLE IF EXISTS table1 +PARALLEL WITH +DROP TABLE IF EXISTS table2; + +CREATE TABLE table1(x Int32) ENGINE=MergeTree order by x +PARALLEL WITH +CREATE TABLE table2(y Int32) ENGINE=MergeTree order by y; + +SHOW CREATE TABLE table1; +SHOW CREATE TABLE table2; + +CREATE TABLE table1(x Int32) ENGINE=MergeTree order by x +PARALLEL WITH +CREATE TABLE table2(y Int32) ENGINE=MergeTree order by y; -- { serverError TABLE_ALREADY_EXISTS } + +INSERT INTO table1 SELECT number FROM numbers(3) +PARALLEL WITH +INSERT INTO table1 SELECT number FROM numbers(10, 2) +PARALLEL WITH +INSERT INTO table2 SELECT number FROM numbers(20, 1); + +SELECT 'table1:'; +SELECT * FROM table1 ORDER BY x; +SELECT 'table2:'; +SELECT * FROM table2 ORDER BY y; + +DROP TABLE table1 +PARALLEL WITH +DROP TABLE table2; + +SELECT 'tables exist:'; +EXISTS TABLE table1; +EXISTS TABLE table2; diff --git a/parser/testdata/03306_materialized_vew_prewhere_supported_columns/ast.json b/parser/testdata/03306_materialized_vew_prewhere_supported_columns/ast.json new file mode 100644 index 000000000..944a42ae7 --- /dev/null +++ b/parser/testdata/03306_materialized_vew_prewhere_supported_columns/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery dst (children 1)" + }, + { + "explain": " Identifier dst" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001105696, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/03306_materialized_vew_prewhere_supported_columns/metadata.json b/parser/testdata/03306_materialized_vew_prewhere_supported_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03306_materialized_vew_prewhere_supported_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03306_materialized_vew_prewhere_supported_columns/query.sql b/parser/testdata/03306_materialized_vew_prewhere_supported_columns/query.sql new file mode 100644 index 000000000..f64433659 --- /dev/null +++ b/parser/testdata/03306_materialized_vew_prewhere_supported_columns/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS dst; +DROP TABLE IF EXISTS src; +DROP VIEW IF EXISTS v; +CREATE TABLE dst (c0 Int, c1 Int) ENGINE = MergeTree() ORDER BY tuple(); +CREATE TABLE src (c0 Nullable(Int), c1 Int) ENGINE = MergeTree() ORDER BY tuple(); +CREATE MATERIALIZED VIEW v TO dst (c0 Nullable(Int), c1 Int) AS (SELECT c0, c1 from src); +INSERT INTO TABLE src (c0, c1) VALUES (1, 1); +SELECT * FROM v PREWHERE c0 = 1; -- {serverError ILLEGAL_PREWHERE} +SELECT * FROM v PREWHERE c1 = 1; +SELECT * FROM v PREWHERE c0 = c1; -- {serverError ILLEGAL_PREWHERE} +DROP VIEW v; +DROP TABLE src; +DROP TABLE dst; + diff --git a/parser/testdata/03306_optimize_table_force_keyword/ast.json b/parser/testdata/03306_optimize_table_force_keyword/ast.json new file mode 100644 index 000000000..8bdfb15ce --- /dev/null +++ b/parser/testdata/03306_optimize_table_force_keyword/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery tab (children 3)" + }, + { + "explain": " Identifier tab" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration x (children 1)" + }, + { + "explain": " DataType UInt32" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Identifier x" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001101895, + "rows_read": 9, + "bytes_read": 296 + } +} diff --git a/parser/testdata/03306_optimize_table_force_keyword/metadata.json b/parser/testdata/03306_optimize_table_force_keyword/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03306_optimize_table_force_keyword/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03306_optimize_table_force_keyword/query.sql b/parser/testdata/03306_optimize_table_force_keyword/query.sql new file mode 100644 index 000000000..f27fa44fc --- /dev/null +++ b/parser/testdata/03306_optimize_table_force_keyword/query.sql @@ -0,0 +1,14 @@ +CREATE TABLE tab (x UInt32) ENGINE = MergeTree ORDER BY x; + +INSERT INTO tab values (1); +INSERT INTO tab values (2); + +-- To force merge, traditionally keyword 'FINAL' is used. +-- Test that FORCE works as well. +OPTIMIZE TABLE tab FORCE; + +SELECT count(*) +FROM system.parts +WHERE database = currentDatabase() + AND table = 'tab' + AND active = 1 diff --git a/parser/testdata/03307_forbid_loop_table_function_as_engine/ast.json b/parser/testdata/03307_forbid_loop_table_function_as_engine/ast.json new file mode 100644 index 000000000..3fa3faaea --- /dev/null +++ b/parser/testdata/03307_forbid_loop_table_function_as_engine/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery tab (children 3)" + }, + { + "explain": " Identifier tab" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration col (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function Loop" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001066663, + "rows_read": 8, + "bytes_read": 271 + } +} diff --git a/parser/testdata/03307_forbid_loop_table_function_as_engine/metadata.json b/parser/testdata/03307_forbid_loop_table_function_as_engine/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03307_forbid_loop_table_function_as_engine/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03307_forbid_loop_table_function_as_engine/query.sql b/parser/testdata/03307_forbid_loop_table_function_as_engine/query.sql new file mode 100644 index 000000000..526767f23 --- /dev/null +++ b/parser/testdata/03307_forbid_loop_table_function_as_engine/query.sql @@ -0,0 +1 @@ +CREATE TABLE tab (col String) ENGINE=Loop; -- { serverError INCORRECT_QUERY } diff --git a/parser/testdata/03307_parallel_hash_max_joined_rows/ast.json b/parser/testdata/03307_parallel_hash_max_joined_rows/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03307_parallel_hash_max_joined_rows/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03307_parallel_hash_max_joined_rows/metadata.json b/parser/testdata/03307_parallel_hash_max_joined_rows/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03307_parallel_hash_max_joined_rows/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03307_parallel_hash_max_joined_rows/query.sql b/parser/testdata/03307_parallel_hash_max_joined_rows/query.sql new file mode 100644 index 000000000..a282f5519 --- /dev/null +++ b/parser/testdata/03307_parallel_hash_max_joined_rows/query.sql @@ -0,0 +1,13 @@ +-- Tags: no-tsan, no-asan, no-msan, no-ubsan, no-sanitize-coverage, no-parallel-replicas +-- no sanitizers -- memory consumption is unpredicatable with sanitizers + +drop table if exists t; + +create table t(s String, n UInt8) Engine = MergeTree order by tuple(); +insert into t select repeat('x', 100) as s, number from numbers_mt(3e5); + +set max_result_rows = 0, max_result_bytes = 0, max_block_size = 65409, max_threads = 32, join_algorithm = 'parallel_hash'; +set max_memory_usage = '5Gi'; -- on my machine with max_joined_block_size_rows=65K I see consumption of ~1G, + -- without this limit (i.e. max_joined_block_size_rows=0) consumption is ~8-10G + +select * from t t1 join t t2 on t1.n = t2.n format Null settings max_joined_block_size_rows = 65409; diff --git a/parser/testdata/03310_aggregate_projection_count_nullable/ast.json b/parser/testdata/03310_aggregate_projection_count_nullable/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03310_aggregate_projection_count_nullable/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03310_aggregate_projection_count_nullable/metadata.json b/parser/testdata/03310_aggregate_projection_count_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03310_aggregate_projection_count_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03310_aggregate_projection_count_nullable/query.sql b/parser/testdata/03310_aggregate_projection_count_nullable/query.sql new file mode 100644 index 000000000..b647a2be0 --- /dev/null +++ b/parser/testdata/03310_aggregate_projection_count_nullable/query.sql @@ -0,0 +1,77 @@ + +DROP TABLE IF EXISTS log; + +CREATE TABLE log( + collectorReceiptTime DateTime, + eventId String, + ruleId Nullable(String), + PROJECTION ailog_rule_count ( + SELECT + collectorReceiptTime, + ruleId, + count(ruleId) + GROUP BY + collectorReceiptTime, + ruleId + ) +) +ENGINE = MergeTree +PARTITION BY toYYYYMMDD(collectorReceiptTime) +ORDER BY (collectorReceiptTime, eventId); + +INSERT INTO log VALUES ('2025-01-01 00:02:03', 'eventId_001', Null); +INSERT INTO log VALUES ('2025-01-01 01:04:05', 'eventId_002', Null); +INSERT INTO log VALUES ('2025-01-01 02:06:07', 'eventId_003', Null); +INSERT INTO log VALUES ('2025-01-01 03:08:09', 'eventId_004', Null); +INSERT INTO log VALUES ('2025-01-01 04:10:11', 'eventId_005', Null); +INSERT INTO log VALUES ('2025-01-01 05:12:13', 'eventId_006', Null); + +SET parallel_replicas_local_plan = 1, parallel_replicas_support_projection = 1, optimize_aggregation_in_order = 0; + +SELECT + formatDateTime(toStartOfInterval(collectorReceiptTime, toIntervalHour(1)), '%Y-%m-%d %H') AS time, + COUNT() AS count +FROM log +WHERE (collectorReceiptTime >= '2025-01-01 00:00:00') AND (collectorReceiptTime <= '2025-01-01 23:59:59') +GROUP BY time +ORDER BY time DESC; + +-- Another similar case to verify that COUNT(NOT NULL) should be able to use aggregate projection. + +DROP TABLE log; + +CREATE TABLE log( + collectorReceiptTime DateTime, + eventId String, + ruleId String, + PROJECTION ailog_rule_count ( + SELECT + collectorReceiptTime, + ruleId, + count(ruleId) + GROUP BY + collectorReceiptTime, + ruleId + ) +) +ENGINE = MergeTree +PARTITION BY toYYYYMMDD(collectorReceiptTime) +ORDER BY (collectorReceiptTime, eventId); + +INSERT INTO log VALUES ('2025-01-01 00:02:03', 'eventId_001', ''); +INSERT INTO log VALUES ('2025-01-01 01:04:05', 'eventId_002', ''); +INSERT INTO log VALUES ('2025-01-01 02:06:07', 'eventId_003', ''); +INSERT INTO log VALUES ('2025-01-01 03:08:09', 'eventId_004', ''); +INSERT INTO log VALUES ('2025-01-01 04:10:11', 'eventId_005', ''); +INSERT INTO log VALUES ('2025-01-01 05:12:13', 'eventId_006', ''); + +SELECT + formatDateTime(toStartOfInterval(collectorReceiptTime, toIntervalHour(1)), '%Y-%m-%d %H') AS time, + COUNT() AS count +FROM log +WHERE (collectorReceiptTime >= '2025-01-01 00:00:00') AND (collectorReceiptTime <= '2025-01-01 23:59:59') +GROUP BY time +ORDER BY time DESC +SETTINGS force_optimize_projection = 1; + +DROP TABLE log; diff --git a/parser/testdata/03310_create_database_with_settings/ast.json b/parser/testdata/03310_create_database_with_settings/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03310_create_database_with_settings/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03310_create_database_with_settings/metadata.json b/parser/testdata/03310_create_database_with_settings/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03310_create_database_with_settings/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03310_create_database_with_settings/query.sql b/parser/testdata/03310_create_database_with_settings/query.sql new file mode 100644 index 000000000..576c872a3 --- /dev/null +++ b/parser/testdata/03310_create_database_with_settings/query.sql @@ -0,0 +1,9 @@ +DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE_1:Identifier}; +CREATE DATABASE {CLICKHOUSE_DATABASE_1:Identifier} SETTINGS distributed_ddl_task_timeout=42; +SYSTEM FLUSH LOGS query_log; +SELECT Settings['distributed_ddl_task_timeout'] FROM system.query_log where + current_database = currentDatabase() and + type != 'QueryStart' and + query like 'CREATE DATABASE % SETTINGS %'; + +DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE_1:Identifier}; \ No newline at end of file diff --git a/parser/testdata/03310_index_hints_read_columns/ast.json b/parser/testdata/03310_index_hints_read_columns/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03310_index_hints_read_columns/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03310_index_hints_read_columns/metadata.json b/parser/testdata/03310_index_hints_read_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03310_index_hints_read_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03310_index_hints_read_columns/query.sql b/parser/testdata/03310_index_hints_read_columns/query.sql new file mode 100644 index 000000000..7f94f7015 --- /dev/null +++ b/parser/testdata/03310_index_hints_read_columns/query.sql @@ -0,0 +1,71 @@ +-- Tags: no-parallel, no-random-settings, no-object-storage + +SET enable_analyzer = 1; +DROP TABLE IF EXISTS t_index_hint; + +CREATE TABLE t_index_hint (a UInt64, b UInt64) +ENGINE = MergeTree ORDER BY a +SETTINGS index_granularity = 1, min_bytes_for_wide_part = 0, serialization_info_version = 'basic'; + +INSERT INTO t_index_hint SELECT number, number FROM numbers(1000); + +SYSTEM DROP MARK CACHE; +SELECT sum(b) FROM t_index_hint WHERE b >= 100 AND b < 200 SETTINGS max_threads = 1; + +SYSTEM DROP MARK CACHE; +SELECT sum(b) FROM t_index_hint WHERE a >= 100 AND a < 200 AND b >= 100 AND b < 200 SETTINGS max_threads = 1, force_primary_key = 1; + +SYSTEM DROP MARK CACHE; +SELECT sum(b) FROM t_index_hint WHERE indexHint(a >= 100 AND a < 200) AND b >= 100 AND b < 200 SETTINGS max_threads = 1, force_primary_key = 1; + +SYSTEM FLUSH LOGS query_log; + +SELECT + ProfileEvents['FileOpen'], + read_rows, + arraySort(arrayMap(x -> splitByChar('.', x)[-1], columns)) +FROM system.query_log +WHERE type = 'QueryFinish' + AND current_database = currentDatabase() + AND query LIKE '%SELECT sum(b) FROM t_index_hint%' +ORDER BY event_time_microseconds; + +DROP TABLE IF EXISTS t_index_hint; + +CREATE TABLE t_index_hint +( + a UInt64, + s String, + s_tokens Array(String) MATERIALIZED arrayDistinct(splitByWhitespace(s)), + INDEX idx_tokens s_tokens TYPE bloom_filter(0.01) GRANULARITY 1, +) +ENGINE = MergeTree ORDER BY a +SETTINGS index_granularity = 1, min_bytes_for_wide_part = 0, serialization_info_version = 'basic'; + +INSERT INTO t_index_hint (a, s) VALUES (1, 'Text with my_token') (2, 'Another text'); + +SYSTEM DROP MARK CACHE; +SYSTEM DROP INDEX MARK CACHE; +SELECT count() FROM t_index_hint WHERE s LIKE '%my_token%' SETTINGS max_threads = 1; + +SYSTEM DROP MARK CACHE; +SYSTEM DROP INDEX MARK CACHE; +SELECT count() FROM t_index_hint WHERE has(s_tokens, 'my_token') AND s LIKE '%my_token%' SETTINGS max_threads = 1, force_data_skipping_indices = 'idx_tokens'; + +SYSTEM DROP MARK CACHE; +SYSTEM DROP INDEX MARK CACHE; +SELECT count() FROM t_index_hint WHERE indexHint(has(s_tokens, 'my_token')) AND s LIKE '%my_token%' SETTINGS max_threads = 1, force_data_skipping_indices = 'idx_tokens'; + +SYSTEM FLUSH LOGS query_log; + +SELECT + ProfileEvents['FileOpen'], + read_rows, + arraySort(arrayMap(x -> splitByChar('.', x)[-1], columns)) +FROM system.query_log +WHERE type = 'QueryFinish' + AND current_database = currentDatabase() + AND query LIKE '%SELECT count() FROM t_index_hint%' +ORDER BY event_time_microseconds; + +DROP TABLE t_index_hint; diff --git a/parser/testdata/03310_materialized_view_with_bad_select/ast.json b/parser/testdata/03310_materialized_view_with_bad_select/ast.json new file mode 100644 index 000000000..fd6dab90f --- /dev/null +++ b/parser/testdata/03310_materialized_view_with_bad_select/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001654987, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03310_materialized_view_with_bad_select/metadata.json b/parser/testdata/03310_materialized_view_with_bad_select/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03310_materialized_view_with_bad_select/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03310_materialized_view_with_bad_select/query.sql b/parser/testdata/03310_materialized_view_with_bad_select/query.sql new file mode 100644 index 000000000..0e54bcb8e --- /dev/null +++ b/parser/testdata/03310_materialized_view_with_bad_select/query.sql @@ -0,0 +1,44 @@ +SET allow_materialized_view_with_bad_select = 1; + +DROP TABLE IF EXISTS src; +DROP TABLE IF EXISTS dst; +DROP TABLE IF EXISTS mv; + +CREATE TABLE src (x int, y int) ENGINE = MergeTree ORDER BY (); + +CREATE TABLE dst (x int, z int) ENGINE = MergeTree ORDER BY (); + +CREATE MATERIALIZED VIEW mv TO dst AS SELECT x, y FROM src; + +INSERT INTO src VALUES (1, 1); + +SELECT * FROM dst; + +SET allow_materialized_view_with_bad_select = 0; + +-- Insert into existing bad MV is still possible +INSERT INTO src VALUES (2, 2); + +SELECT * FROM dst ORDER BY ALL; + +-- Re-creating it is not +DROP TABLE mv; + +CREATE MATERIALIZED VIEW mv TO dst AS SELECT x, y FROM src; -- { serverError THERE_IS_NO_COLUMN } + +-- Creating an MV with a nonexistent target table is no longer possible +SET allow_materialized_view_with_bad_select = 1; + +CREATE MATERIALIZED VIEW mv TO nonexistent AS SELECT x, y FROM src; + +INSERT INTO src VALUES (3, 3); -- { serverError UNKNOWN_TABLE } + +DROP TABLE mv; + +SET allow_materialized_view_with_bad_select = 0; + +CREATE MATERIALIZED VIEW mv TO nonexistent AS SELECT x, y FROM src; -- { serverError UNKNOWN_TABLE } + +DROP TABLE src; +DROP TABLE dst; +DROP TABLE IF EXISTS mv; diff --git a/parser/testdata/03311_constantnode/ast.json b/parser/testdata/03311_constantnode/ast.json new file mode 100644 index 000000000..5f0af33ad --- /dev/null +++ b/parser/testdata/03311_constantnode/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function uniqUpTo (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function unhex (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '00'" + }, + { + "explain": " Literal 'AggregateFunction(uniqUpTo(5), Nullable(Nothing))'" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_5" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001030498, + "rows_read": 14, + "bytes_read": 575 + } +} diff --git a/parser/testdata/03311_constantnode/metadata.json b/parser/testdata/03311_constantnode/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03311_constantnode/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03311_constantnode/query.sql b/parser/testdata/03311_constantnode/query.sql new file mode 100644 index 000000000..2e76ee6cd --- /dev/null +++ b/parser/testdata/03311_constantnode/query.sql @@ -0,0 +1,5 @@ +SELECT uniqUpTo(5)(CAST(unhex('00'), 'AggregateFunction(uniqUpTo(5), Nullable(Nothing))')); +SELECT largestTriangleThreeBucketsMerge(4)(CAST(unhex('0101000000000000F03F000000000000F03F'), 'AggregateFunction(largestTriangleThreeBuckets(4), UInt8 ,UInt8)')); +SELECT map(('{"a":1,"b":1,"b":1}',),1, ('{"a":1}',),2, ('{"a":1,"c":1}',),2666514966)::Map(Tuple(JSON(max_dynamic_paths=2)),Variant(UInt32)) SETTINGS enable_variant_type = 1, enable_json_type = 1, type_json_skip_duplicated_paths = 1; +SELECT materialize(100000000) AND 0, arrayFold((acc, x) -> x, [0, 1], toUInt8(0)); +DESCRIBE TABLE format(((toLowCardinality(0) OR NULL) OR inf OR 2) OR greatCircleAngle(materialize(0), 45, 1, 45) OR greatCircleAngle(toNullable(toUInt256(0)), 45, 1, 45) OR 0, JSONEachRow, toFixedString('\n{"a": "Hello", "b": 111}\n{"a": "World", "b": 123}\n{"a": "Hello", "b": 111}\n{"a": "World", "b": 123}\n', materialize(101))); -- { serverError ILLEGAL_COLUMN } diff --git a/parser/testdata/03311_issue_72265/ast.json b/parser/testdata/03311_issue_72265/ast.json new file mode 100644 index 000000000..02f965753 --- /dev/null +++ b/parser/testdata/03311_issue_72265/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001152664, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03311_issue_72265/metadata.json b/parser/testdata/03311_issue_72265/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03311_issue_72265/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03311_issue_72265/query.sql b/parser/testdata/03311_issue_72265/query.sql new file mode 100644 index 000000000..f8927e620 --- /dev/null +++ b/parser/testdata/03311_issue_72265/query.sql @@ -0,0 +1,33 @@ +SET allow_suspicious_low_cardinality_types = 1; + +SELECT 'Test fixture for issue: https://github.com/ClickHouse/ClickHouse/issues/72265'; + +DROP TABLE IF EXISTS test_table_72265_1; +CREATE TABLE test_table_72265_1 +( + `a` LowCardinality(Nullable(Int64)), + `b` UInt64 +) +ENGINE = MergeTree +PARTITION BY a % 2 +ORDER BY a +SETTINGS + allow_nullable_key = 1, + index_granularity = 64, + index_granularity_bytes = '10M', + min_bytes_for_wide_part = 0; + +INSERT INTO test_table_72265_1 SELECT number, number FROM numbers(10000); +SELECT count() FROM test_table_72265_1 WHERE (a > 100) AND ((a % 2) = toUInt128(0)); + +DROP TABLE IF EXISTS test_table_72265_2; +CREATE TABLE test_table_72265_2 +( + `part` LowCardinality(Nullable(Int64)) +) +ENGINE = MergeTree +PARTITION BY part +ORDER BY part +SETTINGS allow_nullable_key = 1; +INSERT INTO test_table_72265_2 (part) FORMAT Values (1); +SELECT * FROM test_table_72265_2 PREWHERE part = toUInt128(1); diff --git a/parser/testdata/03311_min_max_count_projection_with_constant_partition_expr/ast.json b/parser/testdata/03311_min_max_count_projection_with_constant_partition_expr/ast.json new file mode 100644 index 000000000..1c8041bb9 --- /dev/null +++ b/parser/testdata/03311_min_max_count_projection_with_constant_partition_expr/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t0 (children 1)" + }, + { + "explain": " Identifier t0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00115931, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03311_min_max_count_projection_with_constant_partition_expr/metadata.json b/parser/testdata/03311_min_max_count_projection_with_constant_partition_expr/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03311_min_max_count_projection_with_constant_partition_expr/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03311_min_max_count_projection_with_constant_partition_expr/query.sql b/parser/testdata/03311_min_max_count_projection_with_constant_partition_expr/query.sql new file mode 100644 index 000000000..9257a7dbf --- /dev/null +++ b/parser/testdata/03311_min_max_count_projection_with_constant_partition_expr/query.sql @@ -0,0 +1,5 @@ +DROP TABLE IF EXISTS t0; + +CREATE TABLE t0 (c0 Int) ENGINE = MergeTree() PARTITION BY (EXISTS (SELECT 1)) ORDER BY tuple(); + +DROP TABLE t0; diff --git a/parser/testdata/03311_recursive_cte_old_analyzer_error/ast.json b/parser/testdata/03311_recursive_cte_old_analyzer_error/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03311_recursive_cte_old_analyzer_error/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03311_recursive_cte_old_analyzer_error/metadata.json b/parser/testdata/03311_recursive_cte_old_analyzer_error/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03311_recursive_cte_old_analyzer_error/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03311_recursive_cte_old_analyzer_error/query.sql b/parser/testdata/03311_recursive_cte_old_analyzer_error/query.sql new file mode 100644 index 000000000..8b5e2bba8 --- /dev/null +++ b/parser/testdata/03311_recursive_cte_old_analyzer_error/query.sql @@ -0,0 +1,12 @@ +SET enable_analyzer=0; + +WITH RECURSIVE test_table AS + ( + SELECT 1 AS number + UNION ALL + SELECT number + 1 + FROM test_table + WHERE number < 100 + ) +SELECT sum(number) +FROM test_table; -- { serverError UNSUPPORTED_METHOD } diff --git a/parser/testdata/03311_subcolumns_in_default_and_materialized_expressions/ast.json b/parser/testdata/03311_subcolumns_in_default_and_materialized_expressions/ast.json new file mode 100644 index 000000000..9ba48581c --- /dev/null +++ b/parser/testdata/03311_subcolumns_in_default_and_materialized_expressions/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001448374, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03311_subcolumns_in_default_and_materialized_expressions/metadata.json b/parser/testdata/03311_subcolumns_in_default_and_materialized_expressions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03311_subcolumns_in_default_and_materialized_expressions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03311_subcolumns_in_default_and_materialized_expressions/query.sql b/parser/testdata/03311_subcolumns_in_default_and_materialized_expressions/query.sql new file mode 100644 index 000000000..fdd42848c --- /dev/null +++ b/parser/testdata/03311_subcolumns_in_default_and_materialized_expressions/query.sql @@ -0,0 +1,15 @@ +set enable_json_type=1; + +drop table if exists test; +create table test (t Tuple(a UInt32), json JSON(b UInt32), a UInt32 default t.a, b UInt32 default json.b, c UInt32 default json.c) engine=Memory; +insert into test (t, json) select tuple(42), '{"b" : 42, "c" : 42}'; +select * from test; +drop table test; + +create table test (t Tuple(a UInt32), json JSON(b UInt32), a UInt32 materialized t.a, b UInt32 materialized json.b, c UInt32 materialized json.c) engine=Memory; +insert into test (t, json) select tuple(42), '{"b" : 42, "c" : 42}'; +select *, a, b, c from test; +drop table test; + +select * from format(JSONEachRow, 't Tuple(a UInt32), json JSON(b UInt32), a UInt32 default t.a, b UInt32 default json.b, c UInt32 default json.c', '{"t" : {"a" : 42}, "json" : {"b" : 42, "c" : 42}}'); + diff --git a/parser/testdata/03312_analyzer_unused_projection_fix/ast.json b/parser/testdata/03312_analyzer_unused_projection_fix/ast.json new file mode 100644 index 000000000..6f55df4ee --- /dev/null +++ b/parser/testdata/03312_analyzer_unused_projection_fix/ast.json @@ -0,0 +1,40 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function count (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 6, + + "statistics": + { + "elapsed": 0.001307968, + "rows_read": 6, + "bytes_read": 215 + } +} diff --git a/parser/testdata/03312_analyzer_unused_projection_fix/metadata.json b/parser/testdata/03312_analyzer_unused_projection_fix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03312_analyzer_unused_projection_fix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03312_analyzer_unused_projection_fix/query.sql b/parser/testdata/03312_analyzer_unused_projection_fix/query.sql new file mode 100644 index 000000000..9cd88b4cb --- /dev/null +++ b/parser/testdata/03312_analyzer_unused_projection_fix/query.sql @@ -0,0 +1,39 @@ +SELECT count() +FROM +( + SELECT + 1 AS a, + 2 AS b + INTERSECT ALL + SELECT + 1, + 1 +); + +SELECT count() +FROM +( + SELECT + 1 AS a, + 2 AS b + EXCEPT ALL + SELECT + 1, + 1 +); + +-- THE RESULT CTE RETURNS 1 ROW WITH ALL COLUMN VALUES NULL. WHICH IS SUPPOSED TO BE THE EXPECTED BEHAVIOUR +WITH T1 AS (SELECT 1 A, 1 B UNION ALL SELECT 1 A, 1 B), +T2 AS (SELECT 1 A, 1 B), +T1_WITH_ROWNUM AS (SELECT A, B, ROW_NUMBER() OVER (PARTITION BY A, B ORDER BY A, B) RNK FROM T1), +T2_WITH_ROWNUM AS (SELECT A, B, ROW_NUMBER() OVER (PARTITION BY A, B ORDER BY A, B) RNK FROM T2), +RESULT AS (SELECT * FROM T1_WITH_ROWNUM EXCEPT SELECT * FROM T2_WITH_ROWNUM) +SELECT * FROM RESULT; + +-- HOWEVER UPON EXPLICITLY USING COLUMN NAMES RATHER THAN *, NO RESULTS IN THE OUTPUT +WITH T1 AS (SELECT 1 A, 1 B UNION ALL SELECT 1 A, 1 B), +T2 AS (SELECT 1 A, 1 B), +T1_WITH_ROWNUM AS (SELECT A, B, ROW_NUMBER() OVER (PARTITION BY A, B ORDER BY A, B) RNK FROM T1), +T2_WITH_ROWNUM AS (SELECT A, B, ROW_NUMBER() OVER (PARTITION BY A, B ORDER BY A, B) RNK FROM T2), +RESULT AS (SELECT * FROM T1_WITH_ROWNUM EXCEPT SELECT * FROM T2_WITH_ROWNUM) +SELECT A,B FROM RESULT; diff --git a/parser/testdata/03312_explain_syntax_analyzer/ast.json b/parser/testdata/03312_explain_syntax_analyzer/ast.json new file mode 100644 index 000000000..1f72faf23 --- /dev/null +++ b/parser/testdata/03312_explain_syntax_analyzer/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001295777, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03312_explain_syntax_analyzer/metadata.json b/parser/testdata/03312_explain_syntax_analyzer/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03312_explain_syntax_analyzer/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03312_explain_syntax_analyzer/query.sql b/parser/testdata/03312_explain_syntax_analyzer/query.sql new file mode 100644 index 000000000..14f3f7551 --- /dev/null +++ b/parser/testdata/03312_explain_syntax_analyzer/query.sql @@ -0,0 +1,71 @@ +SET optimize_min_equality_disjunction_chain_length = 3; + + +EXPLAIN SYNTAX +select * +from +system.numbers AS a, +system.numbers AS b, +system.numbers AS c +WHERE a.number == 1 OR a.number == 2 OR a.number == 3 OR a.number = 4 OR a.number = 5 +; + + +EXPLAIN SYNTAX oneline = 1 +select * +from +system.numbers AS a, +system.numbers AS b, +system.numbers AS c +WHERE a.number == 1 OR a.number == 2 OR a.number == 3 OR a.number = 4 OR a.number = 5 +; + +EXPLAIN SYNTAX run_query_tree_passes = 0 +select * +from +system.numbers AS a, +system.numbers AS b, +system.numbers AS c +WHERE a.number == 1 OR a.number == 2 OR a.number == 3 OR a.number = 4 OR a.number = 5 +; + +EXPLAIN SYNTAX run_query_tree_passes = 1 +select * +from +system.numbers AS a, +system.numbers AS b, +system.numbers AS c +WHERE a.number == 1 OR a.number == 2 OR a.number == 3 OR a.number = 4 OR a.number = 5 +; + +EXPLAIN SYNTAX run_query_tree_passes = 1, query_tree_passes = 1 +select * +from +system.numbers AS a, +system.numbers AS b, +system.numbers AS c +WHERE a.number == 1 OR a.number == 2 OR a.number == 3 OR a.number = 4 OR a.number = 5 +; + + +EXPLAIN SYNTAX run_query_tree_passes = 1, query_tree_passes = 0 +select * +from +system.numbers AS a, +system.numbers AS b, +system.numbers AS c +WHERE a.number == 1 OR a.number == 2 OR a.number == 3 OR a.number = 4 OR a.number = 5 +; + + + +-- { echoOn } +EXPLAIN SYNTAX run_query_tree_passes = 1, oneline = 1 +select * +from +system.numbers AS a, +system.numbers AS b, +system.numbers AS c +WHERE a.number == 1 OR a.number == 2 OR a.number == 3 OR a.number = 4 OR a.number = 5 +; + diff --git a/parser/testdata/03312_issue_74299/ast.json b/parser/testdata/03312_issue_74299/ast.json new file mode 100644 index 000000000..ba0510154 --- /dev/null +++ b/parser/testdata/03312_issue_74299/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t0 (children 1)" + }, + { + "explain": " Identifier t0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001260719, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03312_issue_74299/metadata.json b/parser/testdata/03312_issue_74299/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03312_issue_74299/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03312_issue_74299/query.sql b/parser/testdata/03312_issue_74299/query.sql new file mode 100644 index 000000000..f69c9fdd4 --- /dev/null +++ b/parser/testdata/03312_issue_74299/query.sql @@ -0,0 +1,7 @@ +DROP TABLE if exists t0; + +CREATE TABLE t0 (c0 Int) ENGINE = Memory; + +INSERT INTO TABLE t0 (c0) VALUES (currentProfiles()); -- { error TYPE_MISMATCH } + +DROP TABLE t0; diff --git a/parser/testdata/03312_json_literal_remote/ast.json b/parser/testdata/03312_json_literal_remote/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03312_json_literal_remote/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03312_json_literal_remote/metadata.json b/parser/testdata/03312_json_literal_remote/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03312_json_literal_remote/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03312_json_literal_remote/query.sql b/parser/testdata/03312_json_literal_remote/query.sql new file mode 100644 index 000000000..85c2e403b --- /dev/null +++ b/parser/testdata/03312_json_literal_remote/query.sql @@ -0,0 +1,55 @@ +-- Tags: no-fasttest + +set enable_json_type=1; +set enable_analyzer=1; +set output_format_native_write_json_as_string=0; +set input_format_json_infer_array_of_dynamic_from_array_of_different_types=0; + +select '{"a" : false}'::JSON as json, JSONAllPathsWithTypes(json) from remote('127.0.0.2', 'system.one'); +select '{"a" : null}'::JSON as json, JSONAllPathsWithTypes(json) from remote('127.0.0.2', 'system.one'); +select '{"a" : 42}'::JSON as json, JSONAllPathsWithTypes(json) from remote('127.0.0.2', 'system.one'); +select '{"a" : 42.42}'::JSON as json, JSONAllPathsWithTypes(json) from remote('127.0.0.2', 'system.one'); +select '{"a" : [1, 2, 3]}'::JSON as json, JSONAllPathsWithTypes(json) from remote('127.0.0.2', 'system.one'); +select '{"a" : {"b" : 42}}'::JSON as json, JSONAllPathsWithTypes(json) from remote('127.0.0.2', 'system.one'); +select '{"a" : [{"b" : 42}]}'::JSON as json, JSONAllPathsWithTypes(json) from remote('127.0.0.2', 'system.one'); +select '{"a" : [1, "str", [1]]}'::JSON as json, JSONAllPathsWithTypes(json) from remote('127.0.0.2', 'system.one'); + +select '{"a" : false}'::JSON(a Bool) as json, JSONAllPathsWithTypes(json) from remote('127.0.0.2', 'system.one'); +select '{"a" : null}'::JSON(a Nullable(UInt32)) as json, JSONAllPathsWithTypes(json) from remote('127.0.0.2', 'system.one'); +select '{"a" : 42}'::JSON(a Int64) as json, JSONAllPathsWithTypes(json) from remote('127.0.0.2', 'system.one'); +select '{"a" : 42}'::JSON(a UInt64) as json, JSONAllPathsWithTypes(json) from remote('127.0.0.2', 'system.one'); +select '{"a" : 42}'::JSON(a Int128) as json, JSONAllPathsWithTypes(json) from remote('127.0.0.2', 'system.one'); +select '{"a" : 42}'::JSON(a UInt128) as json, JSONAllPathsWithTypes(json) from remote('127.0.0.2', 'system.one'); +select '{"a" : 42}'::JSON(a Int256) as json, JSONAllPathsWithTypes(json) from remote('127.0.0.2', 'system.one'); +select '{"a" : 42}'::JSON(a UInt256) as json, JSONAllPathsWithTypes(json) from remote('127.0.0.2', 'system.one'); +select '{"a" : 42.42}'::JSON(a Float64) as json, JSONAllPathsWithTypes(json) from remote('127.0.0.2', 'system.one'); +select '{"a" : 42.42}'::JSON(a Decimal32(2)) as json, JSONAllPathsWithTypes(json) from remote('127.0.0.2', 'system.one'); +select '{"a" : 42.42}'::JSON(a Decimal64(2)) as json, JSONAllPathsWithTypes(json) from remote('127.0.0.2', 'system.one'); +select '{"a" : 42.42}'::JSON(a Decimal128(2)) as json, JSONAllPathsWithTypes(json) from remote('127.0.0.2', 'system.one'); +select '{"a" : 42.42}'::JSON(a Decimal256(2)) as json, JSONAllPathsWithTypes(json) from remote('127.0.0.2', 'system.one'); +select '{"a" : "41b43b2a-46c9-4ff8-a354-621299fd5d52"}'::JSON(a UUID) as json, JSONAllPathsWithTypes(json) from remote('127.0.0.2', 'system.one'); +select '{"a" : "121.0.0.1"}'::JSON(a IPv4) as json, JSONAllPathsWithTypes(json) from remote('127.0.0.2', 'system.one'); +select '{"a" : "2001:0db8:85a3:0000:0000:8a2e:0370:7334"}'::JSON(a IPv6) as json, JSONAllPathsWithTypes(json) from remote('127.0.0.2', 'system.one'); +select '{"a" : [1, 2, 3]}'::JSON(a Array(Int64)) as json, JSONAllPathsWithTypes(json) from remote('127.0.0.2', 'system.one'); +select '{"a" : [1, "str", [1]]}'::JSON(a Tuple(Int64, String, Array(Int64))) as json, JSONAllPathsWithTypes(json) from remote('127.0.0.2', 'system.one'); +select '{"a" : {"b" : 1, "c" : "str", "d" : [1]}}'::JSON(a Tuple(b Int64, c String, d Array(Int64))) as json, JSONAllPathsWithTypes(json) from remote('127.0.0.2', 'system.one'); +select '{"a" : {"b" : 1, "c" : 2, "d" : 3}}'::JSON(a Map(String, Int64)) as json, JSONAllPathsWithTypes(json) from remote('127.0.0.2', 'system.one'); +select '{"a" : [{"b" : 42}]}'::JSON(a Array(JSON(b Int64))) as json, JSONAllPathsWithTypes(json) from remote('127.0.0.2', 'system.one'); + +select '{"a" : false}'::JSON(max_dynamic_paths=0) as json, JSONAllPathsWithTypes(json) from remote('127.0.0.2', 'system.one'); +select '{"a" : null}'::JSON(max_dynamic_paths=0) as json, JSONAllPathsWithTypes(json) from remote('127.0.0.2', 'system.one'); +select '{"a" : 42}'::JSON(max_dynamic_paths=0) as json, JSONAllPathsWithTypes(json) from remote('127.0.0.2', 'system.one'); +select '{"a" : 42.42}'::JSON(max_dynamic_paths=0) as json, JSONAllPathsWithTypes(json) from remote('127.0.0.2', 'system.one'); +select '{"a" : [1, 2, 3]}'::JSON(max_dynamic_paths=0) as json, JSONAllPathsWithTypes(json) from remote('127.0.0.2', 'system.one'); +select '{"a" : {"b" : 42}}'::JSON(max_dynamic_paths=0) as json, JSONAllPathsWithTypes(json) from remote('127.0.0.2', 'system.one'); +select '{"a" : [{"b" : 42}]}'::JSON(max_dynamic_paths=0) as json, JSONAllPathsWithTypes(json) from remote('127.0.0.2', 'system.one'); +select '{"a" : [1, "str", [1]]}'::JSON(max_dynamic_paths=0) as json, JSONAllPathsWithTypes(json) from remote('127.0.0.2', 'system.one'); + +select '{"a" : false}'::JSON(max_dynamic_types=0) as json, JSONAllPathsWithTypes(json) from remote('127.0.0.2', 'system.one'); +select '{"a" : null}'::JSON(max_dynamic_types=0) as json, JSONAllPathsWithTypes(json) from remote('127.0.0.2', 'system.one'); +select '{"a" : 42}'::JSON(max_dynamic_types=0) as json, JSONAllPathsWithTypes(json) from remote('127.0.0.2', 'system.one'); +select '{"a" : 42.42}'::JSON(max_dynamic_types=0) as json, JSONAllPathsWithTypes(json) from remote('127.0.0.2', 'system.one'); +select '{"a" : [1, 2, 3]}'::JSON(max_dynamic_types=0) as json, JSONAllPathsWithTypes(json) from remote('127.0.0.2', 'system.one'); +select '{"a" : {"b" : 42}}'::JSON(max_dynamic_types=0) as json, JSONAllPathsWithTypes(json) from remote('127.0.0.2', 'system.one'); +select '{"a" : [{"b" : 42}]}'::JSON(max_dynamic_types=0) as json, JSONAllPathsWithTypes(json) from remote('127.0.0.2', 'system.one'); +select '{"a" : [1, "str", [1]]}'::JSON(max_dynamic_types=0) as json, JSONAllPathsWithTypes(json) from remote('127.0.0.2', 'system.one'); diff --git a/parser/testdata/03312_line_numbers/ast.json b/parser/testdata/03312_line_numbers/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03312_line_numbers/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03312_line_numbers/metadata.json b/parser/testdata/03312_line_numbers/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03312_line_numbers/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03312_line_numbers/query.sql b/parser/testdata/03312_line_numbers/query.sql new file mode 100644 index 000000000..a28687db7 --- /dev/null +++ b/parser/testdata/03312_line_numbers/query.sql @@ -0,0 +1,14 @@ +-- Tags: no-fasttest +-- ^ due to the usage of system logs + +SELECT 'This is the first query, and it is located on line 4', +1, -- Just random stuff to ensure proper counting of lines. +2, 3; + +SELECT 'This is the second query, and it is located on line 8'; + +SYSTEM FLUSH LOGS query_log, text_log; +SELECT type, script_query_number, script_line_number, query FROM system.query_log WHERE current_database = currentDatabase() AND event_date >= yesterday() ORDER BY event_time_microseconds, type; + +SELECT 'Ok' FROM system.text_log WHERE event_date >= yesterday() AND message LIKE '%(query 1, line 4)%' AND message LIKE '%This is the first query%' LIMIT 1; +SELECT 'Ok' FROM system.text_log WHERE event_date >= yesterday() AND message LIKE '%(query 2, line 8)%' AND message LIKE '%This is the second query%' LIMIT 1; diff --git a/parser/testdata/03312_sparse_column_tuple/ast.json b/parser/testdata/03312_sparse_column_tuple/ast.json new file mode 100644 index 000000000..8ad1d2b5b --- /dev/null +++ b/parser/testdata/03312_sparse_column_tuple/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery dst_sparse (children 1)" + }, + { + "explain": " Identifier dst_sparse" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000950734, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/03312_sparse_column_tuple/metadata.json b/parser/testdata/03312_sparse_column_tuple/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03312_sparse_column_tuple/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03312_sparse_column_tuple/query.sql b/parser/testdata/03312_sparse_column_tuple/query.sql new file mode 100644 index 000000000..5521bf638 --- /dev/null +++ b/parser/testdata/03312_sparse_column_tuple/query.sql @@ -0,0 +1,54 @@ +DROP TABLE IF EXISTS dst_sparse; +DROP TABLE IF EXISTS mytable_sparse; + +CREATE TABLE dst_sparse ( + `id` Int64, + `budget` Tuple(currencyCode String) +) +ENGINE = MergeTree ORDER BY id +SETTINGS ratio_of_defaults_for_sparse_serialization = 0.9, serialization_info_version = 'basic' +AS SELECT number, arrayJoin([tuple('')]) FROM numbers(999); + +INSERT INTO dst_sparse VALUES (999, tuple('x')); + +OPTIMIZE TABLE dst_sparse FINAL; + +CREATE TABLE mytable_sparse ENGINE = MergeTree ORDER BY id +SETTINGS ratio_of_defaults_for_sparse_serialization = 1.0, serialization_info_version = 'basic' +AS SELECT id, budget FROM dst_sparse; + +SELECT count() from mytable_sparse; + +SELECT DISTINCT table, column, serialization_kind, subcolumns.names, subcolumns.serializations +FROM system.parts_columns +WHERE database = currentDatabase() AND table IN ('dst_sparse', 'mytable_sparse') AND active AND column = 'budget' +ORDER BY table; + +DROP TABLE IF EXISTS dst_sparse; +DROP TABLE IF EXISTS mytable_sparse; + +CREATE TABLE dst_sparse ( + `id` Int64, + `budget` Tuple(currencyCode String) +) +ENGINE = MergeTree ORDER BY id +SETTINGS ratio_of_defaults_for_sparse_serialization = 0.9, serialization_info_version = 'basic' +AS SELECT number, arrayJoin([tuple('')]) FROM numbers(999); + +INSERT INTO dst_sparse VALUES (999, tuple('x')); + +OPTIMIZE TABLE dst_sparse FINAL; + +CREATE TABLE mytable_sparse ENGINE = MergeTree ORDER BY id +SETTINGS ratio_of_defaults_for_sparse_serialization = 0.9, serialization_info_version = 'basic' +AS SELECT id, budget FROM dst_sparse; + +SELECT count() from mytable_sparse; + +SELECT DISTINCT table, column, serialization_kind, subcolumns.names, subcolumns.serializations +FROM system.parts_columns +WHERE database = currentDatabase() AND table IN ('dst_sparse', 'mytable_sparse') AND active AND column = 'budget' +ORDER BY table; + +DROP TABLE IF EXISTS dst_sparse; +DROP TABLE IF EXISTS mytable_sparse; diff --git a/parser/testdata/03312_squashing_with_low_card_mem_usage/ast.json b/parser/testdata/03312_squashing_with_low_card_mem_usage/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03312_squashing_with_low_card_mem_usage/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03312_squashing_with_low_card_mem_usage/metadata.json b/parser/testdata/03312_squashing_with_low_card_mem_usage/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03312_squashing_with_low_card_mem_usage/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03312_squashing_with_low_card_mem_usage/query.sql b/parser/testdata/03312_squashing_with_low_card_mem_usage/query.sql new file mode 100644 index 000000000..c73afaa82 --- /dev/null +++ b/parser/testdata/03312_squashing_with_low_card_mem_usage/query.sql @@ -0,0 +1,25 @@ +-- Tags: no-tsan, no-asan, no-msan, no-ubsan, no-random-settings, no-random-merge-tree-settings +-- no sanitizers -- memory consumption is unpredicatable with sanitizers +-- no random settings -- it was quite hard to reproduce and I'm afraid that settings randomisation will make the test weaker + +drop table if exists t; +create table t(s LowCardinality(String)) Engine = MergeTree order by tuple() settings min_bytes_for_wide_part = 0, min_rows_for_wide_part = 0; + +-- The problem was that we didn't account for dictionary size in `ColumnLowCardinality::byteSize()`. +-- Because of that we tend to accumulate too many blocks in `SimpleSquashingChunksTransform`. +-- To reproduce we need a column with heavy dictionaries and ideally nothing else consuming significant amount of memory. +insert into t select repeat('x', 1000) || toString(number) as s from numbers_mt(5e6) settings max_insert_threads = 16, max_memory_usage = '15Gi'; + +WITH t2 AS + ( + SELECT + 'x' AS s, + number + FROM numbers_mt(10000.) + ) +SELECT t1.s +FROM t AS t1 +INNER JOIN t2 ON substr(t1.s, 1, 1) = t2.s +LIMIT 1e5 +SETTINGS max_threads = 32, max_memory_usage = '2Gi', join_algorithm = 'parallel_hash', min_joined_block_size_bytes = '1Mi' +FORMAT Null; diff --git a/parser/testdata/03312_system_errors_last_error/ast.json b/parser/testdata/03312_system_errors_last_error/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03312_system_errors_last_error/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03312_system_errors_last_error/metadata.json b/parser/testdata/03312_system_errors_last_error/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03312_system_errors_last_error/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03312_system_errors_last_error/query.sql b/parser/testdata/03312_system_errors_last_error/query.sql new file mode 100644 index 000000000..744ec9fb9 --- /dev/null +++ b/parser/testdata/03312_system_errors_last_error/query.sql @@ -0,0 +1,12 @@ +-- Tags: no-parallel +-- Tag no-parallel: The test checks system.errors values which are global + +-- For the old analyzer last_error_message is slightly different. +SET enable_analyzer = 1; + +SELECT throwIf(1); -- {serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO} + +-- We expect an extended error message here like "Value passed to 'throwIf' function is non-zero: while executing throwIf(1)", +-- and not just "Value passed to 'throwIf' function is non-zero". +SELECT last_error_message, last_error_format_string FROM system.errors +WHERE name = 'FUNCTION_THROW_IF_VALUE_IS_NON_ZERO' AND last_error_time > now() - 10 AND not remote; diff --git a/parser/testdata/03313_case_insensitive_json_type_declaration/ast.json b/parser/testdata/03313_case_insensitive_json_type_declaration/ast.json new file mode 100644 index 000000000..144719bf9 --- /dev/null +++ b/parser/testdata/03313_case_insensitive_json_type_declaration/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001602268, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03313_case_insensitive_json_type_declaration/metadata.json b/parser/testdata/03313_case_insensitive_json_type_declaration/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03313_case_insensitive_json_type_declaration/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03313_case_insensitive_json_type_declaration/query.sql b/parser/testdata/03313_case_insensitive_json_type_declaration/query.sql new file mode 100644 index 000000000..6349af48c --- /dev/null +++ b/parser/testdata/03313_case_insensitive_json_type_declaration/query.sql @@ -0,0 +1,3 @@ +set enable_json_type=1; + +select '{}'::json(500); -- {clientError SYNTAX_ERROR} diff --git a/parser/testdata/03313_h3togeo_result_order/ast.json b/parser/testdata/03313_h3togeo_result_order/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03313_h3togeo_result_order/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03313_h3togeo_result_order/metadata.json b/parser/testdata/03313_h3togeo_result_order/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03313_h3togeo_result_order/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03313_h3togeo_result_order/query.sql b/parser/testdata/03313_h3togeo_result_order/query.sql new file mode 100644 index 000000000..fefd5a4f6 --- /dev/null +++ b/parser/testdata/03313_h3togeo_result_order/query.sql @@ -0,0 +1,7 @@ +-- Tags: no-fasttest +-- no-fasttest: h3ToGeo needs binary with Uber H3 libary + +-- Test for setting 'h3togeo_lon_lat_result_order' + +SELECT h3ToGeo(644325524701193974) SETTINGS h3togeo_lon_lat_result_order = true; +SELECT h3ToGeo(644325524701193974) SETTINGS h3togeo_lon_lat_result_order = false; diff --git a/parser/testdata/03314_analyzer_resolve_in_parent_scope/ast.json b/parser/testdata/03314_analyzer_resolve_in_parent_scope/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03314_analyzer_resolve_in_parent_scope/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03314_analyzer_resolve_in_parent_scope/metadata.json b/parser/testdata/03314_analyzer_resolve_in_parent_scope/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03314_analyzer_resolve_in_parent_scope/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03314_analyzer_resolve_in_parent_scope/query.sql b/parser/testdata/03314_analyzer_resolve_in_parent_scope/query.sql new file mode 100644 index 000000000..4240d831d --- /dev/null +++ b/parser/testdata/03314_analyzer_resolve_in_parent_scope/query.sql @@ -0,0 +1,27 @@ +WITH ws_wh AS + ( + SELECT + ws1.ws_order_number + FROM + ( + SELECT + 1 AS ws_order_number, + 1 AS ws_warehouse_sk + ) AS ws1, + ( + SELECT + 1 AS ws_order_number, + 2 AS ws_warehouse_sk + ) AS ws2 + WHERE (ws1.ws_order_number = ws2.ws_order_number) AND (ws1.ws_warehouse_sk != ws2.ws_warehouse_sk) + ) +SELECT COUNT() +FROM +( + SELECT 1 AS ws_order_number +) AS ws1 +WHERE (ws1.ws_order_number IN ( + SELECT ws_order_number + FROM ws_wh +)) +SETTINGS join_use_nulls=1; diff --git a/parser/testdata/03314_analyzer_resolve_in_parent_scope_2/ast.json b/parser/testdata/03314_analyzer_resolve_in_parent_scope_2/ast.json new file mode 100644 index 000000000..02d07a46b --- /dev/null +++ b/parser/testdata/03314_analyzer_resolve_in_parent_scope_2/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001257103, + "rows_read": 2, + "bytes_read": 61 + } +} diff --git a/parser/testdata/03314_analyzer_resolve_in_parent_scope_2/metadata.json b/parser/testdata/03314_analyzer_resolve_in_parent_scope_2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03314_analyzer_resolve_in_parent_scope_2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03314_analyzer_resolve_in_parent_scope_2/query.sql b/parser/testdata/03314_analyzer_resolve_in_parent_scope_2/query.sql new file mode 100644 index 000000000..7e958d07f --- /dev/null +++ b/parser/testdata/03314_analyzer_resolve_in_parent_scope_2/query.sql @@ -0,0 +1,27 @@ +create table test +( + raw_id String, + columns_n Nested + ( + col_1 Nullable(String), + col_2 Nullable(String) + ) +) +Engine = MergeTree +order by (raw_id); + +insert into test +VALUES('1', ['type_1','type_2','type_1'],['0','0','1']), + ('2', ['type_3','type_2','type_1'],['0','1','1']), + ('3', ['type_1','type_2','type_3'],['1','0','1']) +; + +with t like '%_1%' as issue +select + raw_id, + arrayFilter((t, t2) -> (not issue), + columns_n.col_1, + columns_n.col_2 + ) +from test +order by raw_id; diff --git a/parser/testdata/03314_analyzer_resolve_in_parent_scope_3/ast.json b/parser/testdata/03314_analyzer_resolve_in_parent_scope_3/ast.json new file mode 100644 index 000000000..225a398e5 --- /dev/null +++ b/parser/testdata/03314_analyzer_resolve_in_parent_scope_3/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001093358, + "rows_read": 2, + "bytes_read": 61 + } +} diff --git a/parser/testdata/03314_analyzer_resolve_in_parent_scope_3/metadata.json b/parser/testdata/03314_analyzer_resolve_in_parent_scope_3/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03314_analyzer_resolve_in_parent_scope_3/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03314_analyzer_resolve_in_parent_scope_3/query.sql b/parser/testdata/03314_analyzer_resolve_in_parent_scope_3/query.sql new file mode 100644 index 000000000..84d74d568 --- /dev/null +++ b/parser/testdata/03314_analyzer_resolve_in_parent_scope_3/query.sql @@ -0,0 +1,18 @@ +CREATE TABLE test +( + a UInt64, + b UInt64, +) +ENGINE = MergeTree +ORDER BY tuple(); + +WITH + (a > b) as cte, + query AS + ( + SELECT count() + FROM test + WHERE cte + ) +SELECT * +FROM query; diff --git a/parser/testdata/03314_analyzer_resolve_in_parent_scope_4/ast.json b/parser/testdata/03314_analyzer_resolve_in_parent_scope_4/ast.json new file mode 100644 index 000000000..dbb67fb7b --- /dev/null +++ b/parser/testdata/03314_analyzer_resolve_in_parent_scope_4/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001277633, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03314_analyzer_resolve_in_parent_scope_4/metadata.json b/parser/testdata/03314_analyzer_resolve_in_parent_scope_4/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03314_analyzer_resolve_in_parent_scope_4/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03314_analyzer_resolve_in_parent_scope_4/query.sql b/parser/testdata/03314_analyzer_resolve_in_parent_scope_4/query.sql new file mode 100644 index 000000000..ecacf06be --- /dev/null +++ b/parser/testdata/03314_analyzer_resolve_in_parent_scope_4/query.sql @@ -0,0 +1,22 @@ +SET enable_analyzer=1; +SET allow_experimental_correlated_subqueries = 0; + +with + arrayMap(x -> x + 1, [0]) as a +select + 1 +where + 1 in (select arrayJoin(a)); + +CREATE TABLE users (uid Int16, name String, age Int16) ENGINE=Memory; + +INSERT INTO users VALUES (1231, 'John', 10); +INSERT INTO users VALUES (6666, 'Ksenia', 20); +INSERT INTO users VALUES (8888, 'Alice', 30); + +CREATE OR REPLACE FUNCTION oldest_before AS (age_max) -> ( + SELECT uid FROM users WHERE users.age < age_max ORDER BY users.age DESC LIMIT 1 +); + +SELECT *, oldest_before(users.age) +FROM users; -- { serverError UNSUPPORTED_METHOD } diff --git a/parser/testdata/03314_analyzer_resolve_in_parent_scope_5/ast.json b/parser/testdata/03314_analyzer_resolve_in_parent_scope_5/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03314_analyzer_resolve_in_parent_scope_5/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03314_analyzer_resolve_in_parent_scope_5/metadata.json b/parser/testdata/03314_analyzer_resolve_in_parent_scope_5/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03314_analyzer_resolve_in_parent_scope_5/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03314_analyzer_resolve_in_parent_scope_5/query.sql b/parser/testdata/03314_analyzer_resolve_in_parent_scope_5/query.sql new file mode 100644 index 000000000..b7fd8e111 --- /dev/null +++ b/parser/testdata/03314_analyzer_resolve_in_parent_scope_5/query.sql @@ -0,0 +1,11 @@ +with query_1 as ( + with shared_data as ( + select 1 as value + ), shared_data_2 as ( + select * from shared_data + ) + select * from shared_data_2 +), shared_data as ( + select * from query_1 +) +select * from shared_data s; diff --git a/parser/testdata/03314_divide_decimal_short_circuit/ast.json b/parser/testdata/03314_divide_decimal_short_circuit/ast.json new file mode 100644 index 000000000..bd403a4df --- /dev/null +++ b/parser/testdata/03314_divide_decimal_short_circuit/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery 03314_divide_decimal_short_circuit (children 1)" + }, + { + "explain": " Identifier 03314_divide_decimal_short_circuit" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001366899, + "rows_read": 2, + "bytes_read": 120 + } +} diff --git a/parser/testdata/03314_divide_decimal_short_circuit/metadata.json b/parser/testdata/03314_divide_decimal_short_circuit/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03314_divide_decimal_short_circuit/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03314_divide_decimal_short_circuit/query.sql b/parser/testdata/03314_divide_decimal_short_circuit/query.sql new file mode 100644 index 000000000..54acabf68 --- /dev/null +++ b/parser/testdata/03314_divide_decimal_short_circuit/query.sql @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS 03314_divide_decimal_short_circuit; +CREATE TABLE 03314_divide_decimal_short_circuit( + `n1` Decimal(38,2), + `n2` Decimal(38,2) +) ENGINE=Memory; + +INSERT INTO 03314_divide_decimal_short_circuit VALUES + (7, 0), + (0.07, 0), + (0.07, 0.01), + (0.07, -70), + (0.07, 1506) +; + +SELECT n1, n2, multiIf(n2 != 0, n1 / n2, 0), multiIf(n2 != 0, divideDecimal(n1, n2, 6), 0) +FROM 03314_divide_decimal_short_circuit; diff --git a/parser/testdata/03314_empty_tuple_in_protobuf_format/ast.json b/parser/testdata/03314_empty_tuple_in_protobuf_format/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03314_empty_tuple_in_protobuf_format/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03314_empty_tuple_in_protobuf_format/metadata.json b/parser/testdata/03314_empty_tuple_in_protobuf_format/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03314_empty_tuple_in_protobuf_format/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03314_empty_tuple_in_protobuf_format/query.sql b/parser/testdata/03314_empty_tuple_in_protobuf_format/query.sql new file mode 100644 index 000000000..d0c2ee604 --- /dev/null +++ b/parser/testdata/03314_empty_tuple_in_protobuf_format/query.sql @@ -0,0 +1,10 @@ +-- Tags: no-fasttest +-- ^ depends on the Protobuf library. + +DROP TABLE IF EXISTS t0; + +CREATE TABLE t0 (c0 Tuple()) ENGINE = Memory(); +INSERT INTO TABLE t0 (c0) VALUES (()), (()); +INSERT INTO TABLE FUNCTION file(currentDatabase() || '.protobuf', 'Protobuf', 'c0 Tuple()') SELECT c0 FROM t0; -- { serverError NO_COLUMNS_SERIALIZED_TO_PROTOBUF_FIELDS } + +DROP TABLE t0; diff --git a/parser/testdata/03314_grace_hash_join_buckets/ast.json b/parser/testdata/03314_grace_hash_join_buckets/ast.json new file mode 100644 index 000000000..e02add048 --- /dev/null +++ b/parser/testdata/03314_grace_hash_join_buckets/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t0 (children 1)" + }, + { + "explain": " Identifier t0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001234056, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03314_grace_hash_join_buckets/metadata.json b/parser/testdata/03314_grace_hash_join_buckets/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03314_grace_hash_join_buckets/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03314_grace_hash_join_buckets/query.sql b/parser/testdata/03314_grace_hash_join_buckets/query.sql new file mode 100644 index 000000000..04583674e --- /dev/null +++ b/parser/testdata/03314_grace_hash_join_buckets/query.sql @@ -0,0 +1,3 @@ +DROP TABLE IF EXISTS t0; +CREATE TABLE t0 (c0 Int) ENGINE = Memory(); +SELECT 1 FROM t0 JOIN t0 tx ON tx.c0 = t0.c0 SETTINGS join_algorithm = 'grace_hash', grace_hash_join_max_buckets = 0; -- { clientError BAD_ARGUMENTS } diff --git a/parser/testdata/03314_has_column_in_table_alias_column/ast.json b/parser/testdata/03314_has_column_in_table_alias_column/ast.json new file mode 100644 index 000000000..2c6c45ffe --- /dev/null +++ b/parser/testdata/03314_has_column_in_table_alias_column/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001147684, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/03314_has_column_in_table_alias_column/metadata.json b/parser/testdata/03314_has_column_in_table_alias_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03314_has_column_in_table_alias_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03314_has_column_in_table_alias_column/query.sql b/parser/testdata/03314_has_column_in_table_alias_column/query.sql new file mode 100644 index 000000000..f85f0038c --- /dev/null +++ b/parser/testdata/03314_has_column_in_table_alias_column/query.sql @@ -0,0 +1,4 @@ +DROP TABLE IF EXISTS test; +CREATE TABLE test(`a` String, `alias_col_a` String ALIAS a) ENGINE = MergeTree ORDER BY a; +SELECT hasColumnInTable(currentDatabase(), 'test', 'alias_col_a'); +DROP TABLE test; diff --git a/parser/testdata/03314_nullable_key_no_optimize_functions_to_subcolumns/ast.json b/parser/testdata/03314_nullable_key_no_optimize_functions_to_subcolumns/ast.json new file mode 100644 index 000000000..ec6161008 --- /dev/null +++ b/parser/testdata/03314_nullable_key_no_optimize_functions_to_subcolumns/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000996283, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03314_nullable_key_no_optimize_functions_to_subcolumns/metadata.json b/parser/testdata/03314_nullable_key_no_optimize_functions_to_subcolumns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03314_nullable_key_no_optimize_functions_to_subcolumns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03314_nullable_key_no_optimize_functions_to_subcolumns/query.sql b/parser/testdata/03314_nullable_key_no_optimize_functions_to_subcolumns/query.sql new file mode 100644 index 000000000..174fb7d15 --- /dev/null +++ b/parser/testdata/03314_nullable_key_no_optimize_functions_to_subcolumns/query.sql @@ -0,0 +1,5 @@ +SET enable_analyzer = 1; +CREATE TABLE t0 (c0 Nullable(Int)) ENGINE = MergeTree() PARTITION BY (c0) ORDER BY tuple() SETTINGS allow_nullable_key = 1; +SET optimize_functions_to_subcolumns = 0; +INSERT INTO TABLE t0 (c0) VALUES (NULL); +SELECT count() FROM t0 WHERE (t0.c0 IS NULL) = TRUE; diff --git a/parser/testdata/03314_summing_merge_tree_final_not_found_column_in_block/ast.json b/parser/testdata/03314_summing_merge_tree_final_not_found_column_in_block/ast.json new file mode 100644 index 000000000..93a4b680e --- /dev/null +++ b/parser/testdata/03314_summing_merge_tree_final_not_found_column_in_block/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t0 (children 1)" + }, + { + "explain": " Identifier t0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001242522, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03314_summing_merge_tree_final_not_found_column_in_block/metadata.json b/parser/testdata/03314_summing_merge_tree_final_not_found_column_in_block/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03314_summing_merge_tree_final_not_found_column_in_block/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03314_summing_merge_tree_final_not_found_column_in_block/query.sql b/parser/testdata/03314_summing_merge_tree_final_not_found_column_in_block/query.sql new file mode 100644 index 000000000..c78afdfbb --- /dev/null +++ b/parser/testdata/03314_summing_merge_tree_final_not_found_column_in_block/query.sql @@ -0,0 +1,5 @@ +DROP TABLE IF EXISTS t0; + +CREATE TABLE t0 (c0 Int, c1 Nullable(Int) MATERIALIZED 1) ENGINE = SummingMergeTree() PRIMARY KEY (abs(c1)) SETTINGS allow_nullable_key = 1; +INSERT INTO TABLE t0 (c0) VALUES (1); +SELECT c0 FROM t0 FINAL; diff --git a/parser/testdata/03314_variant_rowbinary_file/ast.json b/parser/testdata/03314_variant_rowbinary_file/ast.json new file mode 100644 index 000000000..8d36b7107 --- /dev/null +++ b/parser/testdata/03314_variant_rowbinary_file/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001353768, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03314_variant_rowbinary_file/metadata.json b/parser/testdata/03314_variant_rowbinary_file/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03314_variant_rowbinary_file/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03314_variant_rowbinary_file/query.sql b/parser/testdata/03314_variant_rowbinary_file/query.sql new file mode 100644 index 000000000..fcff24974 --- /dev/null +++ b/parser/testdata/03314_variant_rowbinary_file/query.sql @@ -0,0 +1,5 @@ +SET enable_variant_type = 1; +DROP TABLE IF EXISTS t0; +CREATE TABLE t0 (c0 Variant(Int,Int)) ENGINE = Memory(); +INSERT INTO TABLE FUNCTION file('data_03314.file', 'RowBinary', 'c0 Variant(Int,Int)') SELECT c0 FROM t0; +INSERT INTO TABLE t0 (c0) SELECT * FROM file('data_03314.file', 'RowBinary', 'c0 Variant(Int,Int)'); diff --git a/parser/testdata/03315_analyzer_correlated_subqueries/ast.json b/parser/testdata/03315_analyzer_correlated_subqueries/ast.json new file mode 100644 index 000000000..c800c742b --- /dev/null +++ b/parser/testdata/03315_analyzer_correlated_subqueries/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001206412, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03315_analyzer_correlated_subqueries/metadata.json b/parser/testdata/03315_analyzer_correlated_subqueries/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03315_analyzer_correlated_subqueries/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03315_analyzer_correlated_subqueries/query.sql b/parser/testdata/03315_analyzer_correlated_subqueries/query.sql new file mode 100644 index 000000000..30ee0a529 --- /dev/null +++ b/parser/testdata/03315_analyzer_correlated_subqueries/query.sql @@ -0,0 +1,21 @@ +set enable_analyzer = 1; +set allow_experimental_correlated_subqueries = 1; + +EXPLAIN QUERY TREE +SELECT * +FROM numbers(2) +WHERE exists(( + SELECT count() + WHERE number = 2 +)); + +EXPLAIN QUERY TREE +SELECT * +FROM numbers(2) +WHERE exists(( + SELECT + 1, + dummy, + 1 + WHERE number = 2 +)); diff --git a/parser/testdata/03315_array_join_scalar/ast.json b/parser/testdata/03315_array_join_scalar/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03315_array_join_scalar/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03315_array_join_scalar/metadata.json b/parser/testdata/03315_array_join_scalar/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03315_array_join_scalar/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03315_array_join_scalar/query.sql b/parser/testdata/03315_array_join_scalar/query.sql new file mode 100644 index 000000000..75e706509 --- /dev/null +++ b/parser/testdata/03315_array_join_scalar/query.sql @@ -0,0 +1,8 @@ +WITH + table_x AS ( + SELECT col_a, col_b + FROM (SELECT 'a' AS col_a, ['b', 'c'] AS col_b) + ARRAY JOIN col_b + ), + (SELECT groupArray((col_a, col_b)) FROM table_x) AS group_a +SELECT group_a, groupArray((col_a, col_b)) FROM table_x; diff --git a/parser/testdata/03315_join_on_optimize_pass_alias/ast.json b/parser/testdata/03315_join_on_optimize_pass_alias/ast.json new file mode 100644 index 000000000..3486fbdcb --- /dev/null +++ b/parser/testdata/03315_join_on_optimize_pass_alias/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001484732, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03315_join_on_optimize_pass_alias/metadata.json b/parser/testdata/03315_join_on_optimize_pass_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03315_join_on_optimize_pass_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03315_join_on_optimize_pass_alias/query.sql b/parser/testdata/03315_join_on_optimize_pass_alias/query.sql new file mode 100644 index 000000000..2db4a7243 --- /dev/null +++ b/parser/testdata/03315_join_on_optimize_pass_alias/query.sql @@ -0,0 +1,26 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE TABLE t1 (x Nullable(Int32), y Nullable(Int32)) ENGINE = Memory; +INSERT INTO t1 VALUES (1, 1), (2, 2), (NULL, NULL); + +CREATE TABLE t2 (x Nullable(Int32), y Nullable(Int32)) ENGINE = Memory; +INSERT INTO t2 VALUES (2, 2), (3, 3), (NULL, NULL); + +SELECT e2 FROM t1 FULL OUTER JOIN t2 +ON ( + (((t1.y = t2.y) OR ((t1.y IS NULL) AND (t2.y IS NULL))) AND (COALESCE(t1.x, 0) != 2)) + OR (((t1.x = t2.x)) AND ((t2.x IS NOT NULL) AND (t1.x IS NOT NULL))) + AS e2 +) +ORDER BY ALL; + +SELECT *, e2 FROM t1 FULL OUTER JOIN t2 +ON ( + (((t1.y = t2.y) OR ((t1.y IS NULL) AND (t2.y IS NULL))) AND (COALESCE(t1.x, 0) != 2)) + OR (((t1.x = t2.x)) AND ((t2.x IS NOT NULL) AND (t1.x IS NOT NULL))) + AS e2 +) AND (t1.x = 1) AND (t2.x = 1) +ORDER BY ALL; diff --git a/parser/testdata/03315_join_temporary_table_names/ast.json b/parser/testdata/03315_join_temporary_table_names/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03315_join_temporary_table_names/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03315_join_temporary_table_names/metadata.json b/parser/testdata/03315_join_temporary_table_names/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03315_join_temporary_table_names/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03315_join_temporary_table_names/query.sql b/parser/testdata/03315_join_temporary_table_names/query.sql new file mode 100644 index 000000000..86f5109ee --- /dev/null +++ b/parser/testdata/03315_join_temporary_table_names/query.sql @@ -0,0 +1,36 @@ + +DROP TABLE IF EXISTS T1; +DROP TABLE IF EXISTS T2; + +CREATE TABLE T1 ( A Int32 , B Int32 ) ENGINE = Memory(); +INSERT INTO T1 VALUES (11, 22); + +CREATE TABLE T2 ( A Int32 , B Int32 , C Int32 ) ENGINE = Memory(); +INSERT INTO T2 VALUES (11, 22, 33); + +CREATE TEMPORARY TABLE T1 +( + A Int32 , B Int32 +) +ENGINE = MergeTree ORDER BY A +AS SELECT 1 AS A, 2 AS B; + +CREATE TEMPORARY TABLE T2 +( + A Int32 , B Int32 , C Int32 +) +ENGINE = MergeTree ORDER BY A +AS SELECT 1 AS A, 2 AS B, 3 AS C; + + +SELECT * FROM T1 FULL JOIN T2 ON T1.A = T2.A ORDER BY ALL +FORMAT PrettyCompactMonoBlock; + +SELECT * FROM T1 FULL JOIN T2 USING(A) ORDER BY ALL +FORMAT PrettyCompactMonoBlock; + +SELECT * FROM T1 FULL JOIN {CLICKHOUSE_DATABASE:Identifier}.T2 ON T1.A = T2.A ORDER BY ALL +FORMAT PrettyCompactMonoBlock; + +SELECT * FROM {CLICKHOUSE_DATABASE:Identifier}.T1 FULL JOIN T2 ON T1.A = T2.A ORDER BY ALL +FORMAT PrettyCompactMonoBlock; diff --git a/parser/testdata/03315_quantile_bfloat16_ubsan/ast.json b/parser/testdata/03315_quantile_bfloat16_ubsan/ast.json new file mode 100644 index 000000000..2061db0c1 --- /dev/null +++ b/parser/testdata/03315_quantile_bfloat16_ubsan/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function quantileBFloat16 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '2106-02-07 06:28:14'" + }, + { + "explain": " Literal 'DateTime(\\'Europe\/Amsterdam\\')'" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001522911, + "rows_read": 10, + "bytes_read": 422 + } +} diff --git a/parser/testdata/03315_quantile_bfloat16_ubsan/metadata.json b/parser/testdata/03315_quantile_bfloat16_ubsan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03315_quantile_bfloat16_ubsan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03315_quantile_bfloat16_ubsan/query.sql b/parser/testdata/03315_quantile_bfloat16_ubsan/query.sql new file mode 100644 index 000000000..6442623cb --- /dev/null +++ b/parser/testdata/03315_quantile_bfloat16_ubsan/query.sql @@ -0,0 +1,2 @@ +SELECT quantileBFloat16('2106-02-07 06:28:14'::DateTime('Europe/Amsterdam')); +SELECT quantileBFloat16('2106-02-07 06:28:15'::DateTime('Europe/Amsterdam')); diff --git a/parser/testdata/03315_trim_two_args/ast.json b/parser/testdata/03315_trim_two_args/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03315_trim_two_args/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03315_trim_two_args/metadata.json b/parser/testdata/03315_trim_two_args/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03315_trim_two_args/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03315_trim_two_args/query.sql b/parser/testdata/03315_trim_two_args/query.sql new file mode 100644 index 000000000..e41057326 --- /dev/null +++ b/parser/testdata/03315_trim_two_args/query.sql @@ -0,0 +1,111 @@ +-- Tests the second argument (custom trim characters) for functions trim, trimLeft and trimRight. + +SELECT 'Basic custom character trimming'; +SELECT + trimLeft('#@hello#@', '#@') = 'hello#@' AS left_custom_ok, + trimRight('#@hello#@', '#@') = '#@hello' AS right_custom_ok, + trimBoth('#@hello#@', '#@') = 'hello' AS both_custom_ok, + trimLeft(toFixedString('#@hello#@', 9), '#@') = 'hello#@' AS left_fixed_custom_ok, + trimRight(toFixedString('#@hello#@', 9), '#@') = '#@hello' AS right_fixed_custom_ok, + trimBoth(toFixedString('#@hello#@', 9), '#@') = 'hello' AS both_fixed_custom_ok; + +SELECT 'Same AS before but with non-const input strings'; +SELECT + trimLeft(materialize('#@hello#@'), '#@') = 'hello#@' AS left_custom_ok, + trimRight(materialize('#@hello#@'), '#@') = '#@hello' AS right_custom_ok, + trimBoth(materialize('#@hello#@'), '#@') = 'hello' AS both_custom_ok, + trimLeft(materialize(toFixedString('#@hello#@', 9)), '#@') = 'hello#@' AS left_fixed_custom_ok, + trimRight(materialize(toFixedString('#@hello#@', 9)), '#@') = '#@hello' AS right_fixed_custom_ok, + trimBoth(materialize(toFixedString('#@hello#@', 9)), '#@') = 'hello' AS both_fixed_custom_ok; + +SELECT 'Multiple different characters to trim'; +SELECT + trimLeft('##@@hello##@@', '#@') = 'hello##@@' AS left_multi_ok, + trimRight('##@@hello##@@', '#@') = '##@@hello' AS right_multi_ok, + trimBoth('##@@hello##@@', '#@') = 'hello' AS both_multi_ok, + trimLeft(toFixedString('##@@hello##@@', 13), '#@') = 'hello##@@' AS left_fixed_multi_ok, + trimRight(toFixedString('##@@hello##@@', 13), '#@') = '##@@hello' AS right_fixed_multi_ok, + trimBoth(toFixedString('##@@hello##@@', 13), '#@') = 'hello' AS both_fixed_multi_ok; + +SELECT 'Empty trim character string'; +SELECT + trimLeft(' hello ', '') = ' hello ' AS left_empty_chars_ok, + trimRight(' hello ', '') = ' hello ' AS right_empty_chars_ok, + trimBoth(' hello ', '') = ' hello ' AS both_empty_chars_ok, + trimLeft(toFixedString(' hello ', 9), '') = ' hello ' AS left_fixed_empty_chars_ok, + trimRight(toFixedString(' hello ', 9), '') = ' hello ' AS right_fixed_empty_chars_ok, + trimBoth(toFixedString(' hello ', 9), '') = ' hello ' AS both_fixed_empty_chars_ok; + +SELECT 'Empty string to trim'; +SELECT + trimLeft('', '#@') = '' AS left_empty_str_ok, + trimRight('', '#@') = '' AS right_empty_str_ok, + trimBoth('', '#@') = '' AS both_empty_str_ok; + -- FixedString(0) is illegal --> no tests + + +SELECT 'String containing only trim characters'; +SELECT + trimLeft('####', '#') = '' AS left_only_trim_chars_ok, + trimRight('####', '#') = '' AS right_only_trim_chars_ok, + trimBoth('####', '#') = '' AS both_only_trim_chars_ok, + trimLeft(toFixedString('####', 4), '#') = '' AS left_fixed_only_trim_chars_ok, + trimRight(toFixedString('####', 4), '#') = '' AS right_fixed_only_trim_chars_ok, + trimBoth(toFixedString('####', 4), '#') = '' AS both_fixed_only_trim_chars_ok; + +SELECT 'Characters that have special meaning in regex'; +SELECT + trimLeft('...hello...', '.') = 'hello...' AS left_special_ok, + trimRight('...hello...', '.') = '...hello' AS right_special_ok, + trimBoth('...hello...', '.') = 'hello' AS both_special_ok, + trimLeft(toFixedString('...hello...', 11), '.') = 'hello...' AS left_fixed_special_ok, + trimRight(toFixedString('...hello...', 11), '.') = '...hello' AS right_fixed_special_ok, + trimBoth(toFixedString('...hello...', 11), '.') = 'hello' AS both_fixed_special_ok; + +SELECT 'Very long input strings'; +WITH + repeat('x', 1000) AS long_str, + toFixedString(long_str, 1000) AS long_fixed_str, + repeat('#@', 50) AS trim_chars +SELECT + length(trimLeft(concat(trim_chars, long_str, trim_chars), '#@')) = 1100 AS left_long_ok, + length(trimRight(concat(trim_chars, long_str, trim_chars), '#@')) = 1100 AS right_long_ok, + length(trimBoth(concat(trim_chars, long_str, trim_chars), '#@')) = 1000 AS both_long_ok, + length(trimLeft(concat(trim_chars, long_fixed_str, trim_chars), '#@')) = 1100 AS left_fixed_long_ok, + length(trimRight(concat(trim_chars, long_fixed_str, trim_chars), '#@')) = 1100 AS right_fixed_long_ok, + length(trimBoth(concat(trim_chars, long_fixed_str, trim_chars), '#@')) = 1000 AS both_fixed_long_ok; + +SELECT 'Overlapping trim characters'; +SELECT + trimLeft('aabbccHELLOccbbaa', 'abc') = 'HELLOccbbaa' AS left_overlap_ok, + trimRight('aabbccHELLOccbbaa', 'abc') = 'aabbccHELLO' AS right_overlap_ok, + trimBoth('aabbccHELLOccbbaa', 'abc') = 'HELLO' AS both_overlap_ok, + trimLeft(toFixedString('aabbccHELLOccbbaa', 17), 'abc') = 'HELLOccbbaa' AS left_fixed_overlap_ok, + trimRight(toFixedString('aabbccHELLOccbbaa', 17), 'abc') = 'aabbccHELLO' AS right_fixed_overlap_ok, + trimBoth(toFixedString('aabbccHELLOccbbaa', 17), 'abc') = 'HELLO' AS both_fixed_overlap_ok; + +SELECT 'Same trim characters provided more than once'; +SELECT + trimLeft('#@hello#@', '#@#@') = 'hello#@' AS left_custom_ok, + trimRight('#@hello#@', '#@#@') = '#@hello' AS right_custom_ok, + trimBoth('#@hello#@', '#@#@') = 'hello' AS both_custom_ok, + trimLeft(toFixedString('#@hello#@', 9), '#@#@') = 'hello#@' AS left_fixed_custom_ok, + trimRight(toFixedString('#@hello#@', 9), '#@#@') = '#@hello' AS right_fixed_custom_ok, + trimBoth(toFixedString('#@hello#@', 9), '#@#@') = 'hello' AS both_fixed_custom_ok; + +SELECT 'Negative tests'; +SELECT trimLeft('hello', 'a', 'b'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT trimRight(123, 'a'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT trimBoth('hello', 123); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT trimBoth('hello', materialize('a')); -- { serverError ILLEGAL_COLUMN } + +SELECT 'Special tests'; + +CREATE TABLE tab (col FixedString(3)) ENGINE = Memory; +INSERT INTO tab VALUES ('abc'); +SELECT trim(trailing char(0) from col) FROM tab; +SELECT trim(both 'ac' from col) FROM tab; +DROP TABLE tab; + +-- Bug 78796 +SELECT isConstant(trimBoth('')); diff --git a/parser/testdata/03316_analyzer_unique_table_aliases_dist/ast.json b/parser/testdata/03316_analyzer_unique_table_aliases_dist/ast.json new file mode 100644 index 000000000..a6da9fa37 --- /dev/null +++ b/parser/testdata/03316_analyzer_unique_table_aliases_dist/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00184377, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03316_analyzer_unique_table_aliases_dist/metadata.json b/parser/testdata/03316_analyzer_unique_table_aliases_dist/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03316_analyzer_unique_table_aliases_dist/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03316_analyzer_unique_table_aliases_dist/query.sql b/parser/testdata/03316_analyzer_unique_table_aliases_dist/query.sql new file mode 100644 index 000000000..6e2e99c58 --- /dev/null +++ b/parser/testdata/03316_analyzer_unique_table_aliases_dist/query.sql @@ -0,0 +1,13 @@ +SET max_parallel_replicas = 3, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', enable_parallel_replicas = 1, parallel_replicas_for_non_replicated_merge_tree=1; + +SELECT + sum(number GLOBAL IN ( + SELECT number AS n + FROM numbers(5) + WHERE number GLOBAL IN ( + SELECT * + FROM numbers(3) + ) + ) AS res), + sum(number * res) +FROM remote('127.0.0.2', numbers(10)); diff --git a/parser/testdata/03317_index_hint_prewhere/ast.json b/parser/testdata/03317_index_hint_prewhere/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03317_index_hint_prewhere/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03317_index_hint_prewhere/metadata.json b/parser/testdata/03317_index_hint_prewhere/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03317_index_hint_prewhere/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03317_index_hint_prewhere/query.sql b/parser/testdata/03317_index_hint_prewhere/query.sql new file mode 100644 index 000000000..b7b559bc3 --- /dev/null +++ b/parser/testdata/03317_index_hint_prewhere/query.sql @@ -0,0 +1,31 @@ +-- Tags: no-parallel-replicas + +DROP TABLE IF EXISTS test_indexHint_prewhere; + +CREATE TABLE test_indexHint_prewhere +( + id UInt32, + colA String, + colB String, + INDEX colA_tokens_idx tokens(colA) TYPE bloom_filter GRANULARITY 1, + INDEX colB_tokens_idx tokens(colB) TYPE bloom_filter GRANULARITY 1 +) +ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 1; + +INSERT INTO test_indexHint_prewhere SELECT number, randomPrintableASCII(30), randomPrintableASCII(40) FROM numbers(100); + +SELECT count() FROM +( + EXPLAIN actions = 1 SELECT * FROM test_indexHint_prewhere + WHERE (id IN (62, 88, 89, 67)) AND ((colA LIKE '%ymo82%') OR (colB LIKE '%dKappNQY6I%')) +) +WHERE explain LIKE '%Prewhere filter column%colA%colB%'; + +SELECT count() FROM +( + EXPLAIN actions = 1 SELECT * FROM test_indexHint_prewhere + WHERE (id IN (62, 88, 89, 67)) AND ((indexHint(has(tokens(colA), 'ymo82')) AND (colA LIKE '%ymo82%')) OR (indexHint(has(tokens(colB), 'dKappNQY6I')) AND (colB LIKE '%dKappNQY6I%'))) +) +WHERE explain LIKE '%Prewhere filter column%colA%colB%'; + +DROP TABLE test_indexHint_prewhere; diff --git a/parser/testdata/03317_pretty_fallback_to_vertical_consistent/ast.json b/parser/testdata/03317_pretty_fallback_to_vertical_consistent/ast.json new file mode 100644 index 000000000..fdeed71f4 --- /dev/null +++ b/parser/testdata/03317_pretty_fallback_to_vertical_consistent/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001519611, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03317_pretty_fallback_to_vertical_consistent/metadata.json b/parser/testdata/03317_pretty_fallback_to_vertical_consistent/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03317_pretty_fallback_to_vertical_consistent/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03317_pretty_fallback_to_vertical_consistent/query.sql b/parser/testdata/03317_pretty_fallback_to_vertical_consistent/query.sql new file mode 100644 index 000000000..ee7fb094d --- /dev/null +++ b/parser/testdata/03317_pretty_fallback_to_vertical_consistent/query.sql @@ -0,0 +1,2 @@ +SET output_format_pretty_fallback_to_vertical_min_columns = 2; +SELECT repeat('x', 100 - number) AS x, repeat('x', 100 - number) AS y FROM numbers(100) SETTINGS max_block_size = 10, output_format_pretty_fallback_to_vertical_min_table_width = 100, output_format_pretty_squash_consecutive_ms = 0 FORMAT PrettyCompact; diff --git a/parser/testdata/03318_ubsan_resample_arguments_count/ast.json b/parser/testdata/03318_ubsan_resample_arguments_count/ast.json new file mode 100644 index 000000000..1fffd5e85 --- /dev/null +++ b/parser/testdata/03318_ubsan_resample_arguments_count/ast.json @@ -0,0 +1,154 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function quantileResampleMerge (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function tupleElement (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Literal Float64_0.5" + }, + { + "explain": " Literal UInt64_257" + }, + { + "explain": " Literal UInt64_65536" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function quantileResampleState (children 2)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function murmurHash3_128 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_88" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Identifier number" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Literal Float64_0.1" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_42" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_100" + } + ], + + "rows": 44, + + "statistics": + { + "elapsed": 0.001319217, + "rows_read": 44, + "bytes_read": 1857 + } +} diff --git a/parser/testdata/03318_ubsan_resample_arguments_count/metadata.json b/parser/testdata/03318_ubsan_resample_arguments_count/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03318_ubsan_resample_arguments_count/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03318_ubsan_resample_arguments_count/query.sql b/parser/testdata/03318_ubsan_resample_arguments_count/query.sql new file mode 100644 index 000000000..14383cde1 --- /dev/null +++ b/parser/testdata/03318_ubsan_resample_arguments_count/query.sql @@ -0,0 +1 @@ +SELECT quantileResampleMerge(0.5, 257, 65536, 1)(tuple(*).1) IGNORE NULLS FROM (SELECT quantileResampleState(0.1, 1, 2, 42)(murmurHash3_128(88, NULL), number, number) FROM numbers(100)); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } diff --git a/parser/testdata/03319_concurrent_hash_join_double_preallocation_bug/ast.json b/parser/testdata/03319_concurrent_hash_join_double_preallocation_bug/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03319_concurrent_hash_join_double_preallocation_bug/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03319_concurrent_hash_join_double_preallocation_bug/metadata.json b/parser/testdata/03319_concurrent_hash_join_double_preallocation_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03319_concurrent_hash_join_double_preallocation_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03319_concurrent_hash_join_double_preallocation_bug/query.sql b/parser/testdata/03319_concurrent_hash_join_double_preallocation_bug/query.sql new file mode 100644 index 000000000..355944087 --- /dev/null +++ b/parser/testdata/03319_concurrent_hash_join_double_preallocation_bug/query.sql @@ -0,0 +1,25 @@ +-- Tags: no-tsan, no-asan, no-msan, no-ubsan, no-parallel-replicas + +drop table if exists lhs; +drop table if exists rhs; + +create table lhs(a UInt64, b UInt64) Engine = MergeTree order by tuple(); +create table rhs(a UInt64, b UInt64) Engine = MergeTree order by tuple(); + +insert into lhs select number, number from numbers_mt(2e5); +-- rhs should be bigger to trigger tables swap (see `query_plan_join_swap_table`) +insert into rhs select number, number from numbers_mt(1e6); + +set max_threads = 8, query_plan_join_swap_table = 1, join_algorithm = 'parallel_hash', enable_analyzer = 1; + +-- First populate the cache of hash table sizes +select * from lhs as t1 join rhs as t2 on t1.a = t2.a format Null; + +-- For the next run we will preallocate the space +select * from lhs as t1 join rhs as t2 on t1.a = t2.a format Null settings log_comment = '03319_second_query'; + +system flush logs query_log; + +select ProfileEvents['HashJoinPreallocatedElementsInHashTables'] +from system.query_log +where event_date >= yesterday() and current_database = currentDatabase() and type = 'QueryFinish' and log_comment = '03319_second_query'; diff --git a/parser/testdata/03321_create_table_as_replicated_engine_args/ast.json b/parser/testdata/03321_create_table_as_replicated_engine_args/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03321_create_table_as_replicated_engine_args/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03321_create_table_as_replicated_engine_args/metadata.json b/parser/testdata/03321_create_table_as_replicated_engine_args/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03321_create_table_as_replicated_engine_args/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03321_create_table_as_replicated_engine_args/query.sql b/parser/testdata/03321_create_table_as_replicated_engine_args/query.sql new file mode 100644 index 000000000..4578d2e1c --- /dev/null +++ b/parser/testdata/03321_create_table_as_replicated_engine_args/query.sql @@ -0,0 +1,20 @@ +-- Tags: zookeeper, no-replicated-database, no-ordinary-database +-- no-replicated-database: we explicitly run this test by creating a replicated database test_03321 + +DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}; + +CREATE DATABASE {CLICKHOUSE_DATABASE:Identifier} ENGINE=Replicated('/clickhouse/databases/{database}', 'shard1', 'replica1'); + +USE {CLICKHOUSE_DATABASE:Identifier}; + +CREATE TABLE t1 (x UInt8, y String) ENGINE=ReplicatedMergeTree ORDER BY x; + +-- Test that `database_replicated_allow_replicated_engine_arguments=0` for *MergeTree tables in Replicated databases work as expected for `CREATE TABLE ... AS ...` queries. +-- While creating t2 with the table structure of t1, the AST for the create query would contain the engine args from t1. For this kind of queries, skip this validation if +-- the value of the arguments match the default values and also skip throwing an exception. +SET database_replicated_allow_replicated_engine_arguments=0; +CREATE TABLE t2 AS t1; + +SHOW CREATE TABLE t2; + +DROP DATABASE {CLICKHOUSE_DATABASE:Identifier}; diff --git a/parser/testdata/03321_functions_to_subcolumns_skip_index/ast.json b/parser/testdata/03321_functions_to_subcolumns_skip_index/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03321_functions_to_subcolumns_skip_index/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03321_functions_to_subcolumns_skip_index/metadata.json b/parser/testdata/03321_functions_to_subcolumns_skip_index/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03321_functions_to_subcolumns_skip_index/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03321_functions_to_subcolumns_skip_index/query.sql b/parser/testdata/03321_functions_to_subcolumns_skip_index/query.sql new file mode 100644 index 000000000..b1ac8ce1a --- /dev/null +++ b/parser/testdata/03321_functions_to_subcolumns_skip_index/query.sql @@ -0,0 +1,48 @@ +-- Tags: no-parallel-replicas + +DROP TABLE IF EXISTS bloom_filter_test; + +CREATE TABLE bloom_filter_test +( + id UInt64, + m Map(String, String), + INDEX idx_mk mapKeys(m) TYPE bloom_filter GRANULARITY 1 +) +ENGINE = MergeTree +ORDER BY id +SETTINGS index_granularity = 1; + +INSERT INTO bloom_filter_test VALUES (1, {'1': '1'}), (2, {'2': '2'}), (3, {'3': '3'}); + +SET enable_analyzer = 1; +SET optimize_functions_to_subcolumns = 1; + +SELECT trim(explain) FROM +( + EXPLAIN indexes = 1 + SELECT id -- 'm' not in projection columns + FROM bloom_filter_test + WHERE mapContains(m, '1') + ORDER BY id +) WHERE explain LIKE '%Granules:%'; + +SELECT id -- 'm' not in projection columns +FROM bloom_filter_test +WHERE mapContains(m, '1') +ORDER BY id; + +SELECT trim(explain) FROM +( + EXPLAIN indexes = 1 + SELECT * -- 'm' in projection columns + FROM bloom_filter_test + WHERE mapContains(m, '1') + ORDER BY id +) WHERE explain LIKE '%Granules:%'; + +SELECT * -- 'm' in projection columns +FROM bloom_filter_test +WHERE mapContains(m, '1') +ORDER BY id; + +DROP TABLE bloom_filter_test; diff --git a/parser/testdata/03321_inner_materialized_view_nested/ast.json b/parser/testdata/03321_inner_materialized_view_nested/ast.json new file mode 100644 index 000000000..6147cb426 --- /dev/null +++ b/parser/testdata/03321_inner_materialized_view_nested/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001312936, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03321_inner_materialized_view_nested/metadata.json b/parser/testdata/03321_inner_materialized_view_nested/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03321_inner_materialized_view_nested/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03321_inner_materialized_view_nested/query.sql b/parser/testdata/03321_inner_materialized_view_nested/query.sql new file mode 100644 index 000000000..96e265186 --- /dev/null +++ b/parser/testdata/03321_inner_materialized_view_nested/query.sql @@ -0,0 +1,26 @@ +SET flatten_nested = 1; + +DROP TABLE IF EXISTS t; +DROP TABLE IF EXISTS mv; + +CREATE TABLE t (x int, y int, z int) ORDER BY x; + +CREATE MATERIALIZED VIEW mv ORDER BY () AS SELECT x, ([(y, z)])::Nested(y int, z int) FROM t; + +INSERT INTO t VALUES (1, 2, 3); + +SELECT * FROM mv; + +DROP TABLE t; +DROP TABLE mv; + +CREATE TABLE t (x int, y Array(int), z Array(int)) ORDER BY x; + +CREATE MATERIALIZED VIEW mv ORDER BY () AS SELECT x, arrayZip(y, z)::Nested(a int, b int) n FROM t; + +INSERT INTO t VALUES (1, [1, 2], [3, 4]); + +SELECT n.a[1], n.a[2], n.b[1], n.b[2] FROM mv; + +DROP TABLE t; +DROP TABLE mv; diff --git a/parser/testdata/03321_join_on_is_null_lowcardinality/ast.json b/parser/testdata/03321_join_on_is_null_lowcardinality/ast.json new file mode 100644 index 000000000..766b58df4 --- /dev/null +++ b/parser/testdata/03321_join_on_is_null_lowcardinality/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001344969, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/03321_join_on_is_null_lowcardinality/metadata.json b/parser/testdata/03321_join_on_is_null_lowcardinality/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03321_join_on_is_null_lowcardinality/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03321_join_on_is_null_lowcardinality/query.sql b/parser/testdata/03321_join_on_is_null_lowcardinality/query.sql new file mode 100644 index 000000000..bda16815c --- /dev/null +++ b/parser/testdata/03321_join_on_is_null_lowcardinality/query.sql @@ -0,0 +1,26 @@ +DROP TABLE IF EXISTS test; +DROP TABLE IF EXISTS test2; + +SET enable_analyzer = 1; + +CREATE TABLE test ( `id` UInt32, `value` LowCardinality(Nullable(String)) ) +ENGINE = MergeTree ORDER BY id; + +CREATE TABLE test2 ( `id` UInt32, `value` LowCardinality(Nullable(String)) ) +ENGINE = MergeTree ORDER BY id; + +INSERT INTO test VALUES (123, NULL); +INSERT INTO test2 VALUES (123, NULL); + +SELECT * FROM test +FULL JOIN test2 +ON test.value IS NULL AND test2.value IS NULL OR test.value == test2.value +SETTINGS join_use_nulls = 1 +; + +SELECT * FROM test +FULL JOIN test2 +ON test.value == test2.value +ORDER BY ALL +SETTINGS join_use_nulls = 1 +; diff --git a/parser/testdata/03321_system_tables_parametrized_view_params/ast.json b/parser/testdata/03321_system_tables_parametrized_view_params/ast.json new file mode 100644 index 000000000..e0837c590 --- /dev/null +++ b/parser/testdata/03321_system_tables_parametrized_view_params/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery raw_data (children 1)" + }, + { + "explain": " Identifier raw_data" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001471091, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/03321_system_tables_parametrized_view_params/metadata.json b/parser/testdata/03321_system_tables_parametrized_view_params/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03321_system_tables_parametrized_view_params/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03321_system_tables_parametrized_view_params/query.sql b/parser/testdata/03321_system_tables_parametrized_view_params/query.sql new file mode 100644 index 000000000..57dce6fa8 --- /dev/null +++ b/parser/testdata/03321_system_tables_parametrized_view_params/query.sql @@ -0,0 +1,65 @@ +DROP TABLE IF EXISTS raw_data; + +DROP TABLE IF EXISTS raw_temporary_data; + +DROP TABLE IF EXISTS parameterized_view_one_param; + +DROP TABLE IF EXISTS parameterized_view_multiple_params; + +DROP TABLE IF EXISTS parameterized_view_one_param_temporary; + +DROP TABLE IF EXISTS parameterized_view_multiple_params_temporary; + +SELECT '-----------------------------------------'; + +SELECT 'TEST TABLE'; + +CREATE TABLE raw_data (id UInt32, data String) ENGINE = MergeTree ORDER BY id; + +CREATE VIEW parameterized_view_one_param AS SELECT * FROM raw_data WHERE id = {id:UInt32}; + +SELECT name, engine, parameterized_view_parameters + FROM system.tables + WHERE database = currentDatabase() and name = 'parameterized_view_one_param'; + +CREATE VIEW parameterized_view_multiple_params AS SELECT * FROM raw_data WHERE id BETWEEN {id_from:UInt32} AND {id_to:UInt32}; + +SELECT name, engine, parameterized_view_parameters + FROM system.tables + WHERE database = currentDatabase() and name = 'parameterized_view_multiple_params'; + +SELECT '-----------------------------------------'; + +SELECT 'TEST TEMPORARY TABLE'; + +CREATE TEMPORARY TABLE raw_temporary_data (id UInt32, data String); + +CREATE TEMPORARY TABLE alter_test (CounterID UInt32, StartDate Date, UserID UInt32, VisitID UInt32); + +CREATE VIEW parameterized_view_one_param_temporary AS SELECT * FROM raw_data WHERE id = {id:UInt32}; + +SELECT name, engine, parameterized_view_parameters + FROM system.tables + WHERE database = currentDatabase() and name = 'parameterized_view_one_param_temporary'; + +CREATE VIEW parameterized_view_multiple_params_temporary + AS SELECT * FROM raw_data + WHERE CounterID BETWEEN {counter_id_from:UInt32} AND {counter_id_to:UInt32} + AND StartDate BETWEEN {date_from:Date} AND {date_to:UInt32} + and UserId = {UserId:UInt32}; + +SELECT name, engine, parameterized_view_parameters + FROM system.tables + WHERE database = currentDatabase() and name = 'parameterized_view_multiple_params_temporary'; + +DROP TABLE parameterized_view_one_param; + +DROP TABLE parameterized_view_multiple_params; + +DROP TABLE parameterized_view_one_param_temporary; + +DROP TABLE parameterized_view_multiple_params_temporary; + +DROP TABLE raw_temporary_data; + +DROP TABLE raw_data; diff --git a/parser/testdata/03322_bugfix_of_with_insert/ast.json b/parser/testdata/03322_bugfix_of_with_insert/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03322_bugfix_of_with_insert/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03322_bugfix_of_with_insert/metadata.json b/parser/testdata/03322_bugfix_of_with_insert/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03322_bugfix_of_with_insert/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03322_bugfix_of_with_insert/query.sql b/parser/testdata/03322_bugfix_of_with_insert/query.sql new file mode 100644 index 000000000..506421f0f --- /dev/null +++ b/parser/testdata/03322_bugfix_of_with_insert/query.sql @@ -0,0 +1,7 @@ +-- fix crash issue of with + insert + +WITH x AS (SELECT 1) INSERT INTO TABLE t0 (c0) WITH y AS (SELECT 1) (SELECT 1); -- { clientError SYNTAX_ERROR } + +WITH z AS (SELECT 1) INSERT INTO TABLE x SELECT 1 FROM ((SELECT 1) UNION (WITH y AS (SELECT 1) (SELECT 1) UNION (SELECT 1))); -- { clientError SYNTAX_ERROR } + +WITH x AS (SELECT 1 as c0) INSERT INTO TABLE t0 (c0) SELECT [1,; -- { clientError SYNTAX_ERROR } \ No newline at end of file diff --git a/parser/testdata/03322_check_count_for_parquet_in_s3/ast.json b/parser/testdata/03322_check_count_for_parquet_in_s3/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03322_check_count_for_parquet_in_s3/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03322_check_count_for_parquet_in_s3/metadata.json b/parser/testdata/03322_check_count_for_parquet_in_s3/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03322_check_count_for_parquet_in_s3/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03322_check_count_for_parquet_in_s3/query.sql b/parser/testdata/03322_check_count_for_parquet_in_s3/query.sql new file mode 100644 index 000000000..8e60bbc62 --- /dev/null +++ b/parser/testdata/03322_check_count_for_parquet_in_s3/query.sql @@ -0,0 +1,7 @@ +-- Tags: no-fasttest +-- Tag no-fasttest: Depends on AWS + +SELECT count() FROM s3(s3_conn, filename='03322_*.parquet', format='Parquet', structure='a Int, b Int, c Int'); +SELECT count() FROM s3(s3_conn, filename='03322_*.parquet', format='Parquet', structure='a Int, b Int, c Int') WHERE a = 1; +SELECT count() FROM s3(s3_conn, filename='03322_*.parquet', format='Parquet', structure='a Int, b Int, c Int'); + diff --git a/parser/testdata/03322_initial_query_start_time_check/ast.json b/parser/testdata/03322_initial_query_start_time_check/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03322_initial_query_start_time_check/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03322_initial_query_start_time_check/metadata.json b/parser/testdata/03322_initial_query_start_time_check/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03322_initial_query_start_time_check/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03322_initial_query_start_time_check/query.sql b/parser/testdata/03322_initial_query_start_time_check/query.sql new file mode 100644 index 000000000..e445a3487 --- /dev/null +++ b/parser/testdata/03322_initial_query_start_time_check/query.sql @@ -0,0 +1,9 @@ +-- Tags: shard + +DROP TABLE IF EXISTS tmp; + +CREATE OR REPLACE VIEW tmp AS SELECT initialQueryStartTime() as it, now() AS t FROM system.one WHERE NOT ignore(sleep(0.5)); +SELECT now()==max(t), initialQueryStartTime()==max(it), initialQueryStartTime()==min(it), initialQueryStartTime()>=now()-1 FROM (SELECT it, t FROM remote('127.0.0.{1..10}', currentDatabase(), tmp)) SETTINGS max_distributed_connections=1, async_socket_for_remote=0; + +DROP TABLE tmp; + diff --git a/parser/testdata/03322_materialized_view_ignore_errors_url/ast.json b/parser/testdata/03322_materialized_view_ignore_errors_url/ast.json new file mode 100644 index 000000000..8bf838bf4 --- /dev/null +++ b/parser/testdata/03322_materialized_view_ignore_errors_url/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery src (children 1)" + }, + { + "explain": " Identifier src" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001426131, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/03322_materialized_view_ignore_errors_url/metadata.json b/parser/testdata/03322_materialized_view_ignore_errors_url/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03322_materialized_view_ignore_errors_url/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03322_materialized_view_ignore_errors_url/query.sql b/parser/testdata/03322_materialized_view_ignore_errors_url/query.sql new file mode 100644 index 000000000..528347b4c --- /dev/null +++ b/parser/testdata/03322_materialized_view_ignore_errors_url/query.sql @@ -0,0 +1,23 @@ +DROP TABLE IF EXISTS src; +DROP TABLE IF EXISTS dst; +DROP TABLE IF EXISTS mv; + +CREATE TABLE src (x int) ORDER BY (); + +CREATE TABLE dst (x int) AS url('http://127.0.0.1/', JSONEachRow, 'x int'); + +CREATE MATERIALIZED VIEW mv TO dst AS SELECT * FROM src; + +INSERT INTO src SETTINGS materialized_views_ignore_errors = 0 VALUES (1); -- { serverError POCO_EXCEPTION } + +INSERT INTO src SETTINGS materialized_views_ignore_errors = 1 VALUES (2); + +--- value 2 should be in src, value 1 could be in src +SELECT * FROM src WHERE x = 2; + +DROP TABLE src; +DROP TABLE dst; +DROP TABLE mv; + +-- Ensure this still fails +insert into function url('http://127.0.0.1/foo.tsv', 'TabSeparated', 'key Int') settings http_max_tries=1, materialized_views_ignore_errors=1 values (2); -- { serverError POCO_EXCEPTION } diff --git a/parser/testdata/03322_unused_interpolate_expressions/ast.json b/parser/testdata/03322_unused_interpolate_expressions/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03322_unused_interpolate_expressions/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03322_unused_interpolate_expressions/metadata.json b/parser/testdata/03322_unused_interpolate_expressions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03322_unused_interpolate_expressions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03322_unused_interpolate_expressions/query.sql b/parser/testdata/03322_unused_interpolate_expressions/query.sql new file mode 100644 index 000000000..6a77f2ba9 --- /dev/null +++ b/parser/testdata/03322_unused_interpolate_expressions/query.sql @@ -0,0 +1,60 @@ +CREATE TABLE foo ( + open_time Int64, + open_price Int8, + close_price Int8 +) +ENGINE = MergeTree +ORDER BY open_time; + +INSERT INTO foo SELECT number, cityHash64(number) % 256, cityHash64(number * number) % 256 FROM numbers(30); + +-- Both interpolate expression are removed +SELECT + group_id +FROM ( + SELECT + intDiv(open_time, 10) AS group_id, + toFloat64(argMin(open_price, open_time)) as open, + toFloat64(argMax(close_price, open_time)) as close + FROM + foo + GROUP BY + group_id + ORDER BY group_id ASC WITH FILL STEP 1 INTERPOLATE ( + open, close + ) +); + +-- `close` interpolate expression is removed +SELECT + group_id, open +FROM ( + SELECT + intDiv(open_time, 10) AS group_id, + toFloat64(argMin(open_price, open_time)) as open, + toFloat64(argMax(close_price, open_time)) as close + FROM + foo + GROUP BY + group_id + ORDER BY group_id ASC WITH FILL STEP 1 INTERPOLATE ( + open, close + ) +); + +-- Both interpolate expressions are kept +SELECT + group_id, open, close +FROM ( + SELECT + intDiv(open_time, 10) AS group_id, + toFloat64(argMin(open_price, open_time)) as open, + toFloat64(argMax(close_price, open_time)) as close + FROM + foo + GROUP BY + group_id + ORDER BY group_id ASC WITH FILL STEP 1 INTERPOLATE ( + open, close + ) +); diff --git a/parser/testdata/03322_view_over_parameterized_view/ast.json b/parser/testdata/03322_view_over_parameterized_view/ast.json new file mode 100644 index 000000000..cb4caf483 --- /dev/null +++ b/parser/testdata/03322_view_over_parameterized_view/ast.json @@ -0,0 +1,82 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery v (children 2)" + }, + { + "explain": " Identifier v" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_5" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " QueryParameter parity:Int8" + } + ], + + "rows": 20, + + "statistics": + { + "elapsed": 0.001908158, + "rows_read": 20, + "bytes_read": 765 + } +} diff --git a/parser/testdata/03322_view_over_parameterized_view/metadata.json b/parser/testdata/03322_view_over_parameterized_view/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03322_view_over_parameterized_view/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03322_view_over_parameterized_view/query.sql b/parser/testdata/03322_view_over_parameterized_view/query.sql new file mode 100644 index 000000000..cc85437ff --- /dev/null +++ b/parser/testdata/03322_view_over_parameterized_view/query.sql @@ -0,0 +1,3 @@ +create view v as select number from numbers(5) where number%2={parity:Int8}; +create table vv (number Int8) engine Merge(currentDatabase(),'v'); +select * from vv; -- { serverError STORAGE_REQUIRES_PARAMETER } diff --git a/parser/testdata/03323_bfloat16_least_supertype/ast.json b/parser/testdata/03323_bfloat16_least_supertype/ast.json new file mode 100644 index 000000000..a530f5db2 --- /dev/null +++ b/parser/testdata/03323_bfloat16_least_supertype/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function if (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier d" + }, + { + "explain": " Literal UInt64_4" + }, + { + "explain": " Identifier d" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001439687, + "rows_read": 12, + "bytes_read": 425 + } +} diff --git a/parser/testdata/03323_bfloat16_least_supertype/metadata.json b/parser/testdata/03323_bfloat16_least_supertype/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03323_bfloat16_least_supertype/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03323_bfloat16_least_supertype/query.sql b/parser/testdata/03323_bfloat16_least_supertype/query.sql new file mode 100644 index 000000000..b00058114 --- /dev/null +++ b/parser/testdata/03323_bfloat16_least_supertype/query.sql @@ -0,0 +1,5 @@ +SELECT if(d = 4, d, 1) +FROM +( + SELECT materialize(1::BFloat16) as d +); diff --git a/parser/testdata/03323_union_all_constants_bug/ast.json b/parser/testdata/03323_union_all_constants_bug/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03323_union_all_constants_bug/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03323_union_all_constants_bug/metadata.json b/parser/testdata/03323_union_all_constants_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03323_union_all_constants_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03323_union_all_constants_bug/query.sql b/parser/testdata/03323_union_all_constants_bug/query.sql new file mode 100644 index 000000000..00795ac71 --- /dev/null +++ b/parser/testdata/03323_union_all_constants_bug/query.sql @@ -0,0 +1,31 @@ +WITH transactions_data AS + ( + SELECT + 42 AS grade_name_id, + 42 AS today_flow_transaction_count, + CAST('good', 'Nullable(String)') AS status + FROM + ( + SELECT 42 AS dispenser_id + ) AS trans_his + INNER JOIN + ( + SELECT 42 AS dispenser_id + ) AS gde ON trans_his.dispenser_id = gde.dispenser_id + ) +SELECT flag +FROM +( + SELECT + transactions_data.grade_name_id AS grade_name_id, + multiIf(transactions_data.status = 'low', 'YELLOW', NULL) AS flag + FROM transactions_data + UNION ALL + SELECT + grade_name_id, + multiIf(status = 'good', 'GREEN', NULL) AS flag + FROM transactions_data + WHERE status = 'good' +) +ORDER BY grade_name_id ASC; + diff --git a/parser/testdata/03324_aggregating_merge_tree_final_extremes/ast.json b/parser/testdata/03324_aggregating_merge_tree_final_extremes/ast.json new file mode 100644 index 000000000..3774d1797 --- /dev/null +++ b/parser/testdata/03324_aggregating_merge_tree_final_extremes/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t03324 (children 1)" + }, + { + "explain": " Identifier t03324" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001449165, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/03324_aggregating_merge_tree_final_extremes/metadata.json b/parser/testdata/03324_aggregating_merge_tree_final_extremes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03324_aggregating_merge_tree_final_extremes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03324_aggregating_merge_tree_final_extremes/query.sql b/parser/testdata/03324_aggregating_merge_tree_final_extremes/query.sql new file mode 100644 index 000000000..b178f2bd1 --- /dev/null +++ b/parser/testdata/03324_aggregating_merge_tree_final_extremes/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS t03324; + +-- { echoOn } + +CREATE TABLE t03324 (c0 Nullable(Int)) ENGINE = AggregatingMergeTree() PARTITION BY (c0) ORDER BY (c0) SETTINGS allow_nullable_key = 1; +INSERT INTO TABLE t03324 (c0) VALUES (1), (NULL); + +OPTIMIZE TABLE t03324 FINAL SETTINGS optimize_throw_if_noop = 1; +SELECT c0 FROM t03324 FINAL ORDER BY c0 DESC SETTINGS extremes = 1; +TRUNCATE t03324; +INSERT INTO TABLE t03324 (c0) VALUES (1), (NULL); +SELECT c0 FROM t03324 FINAL ORDER BY c0 DESC SETTINGS extremes = 1; + +DROP TABLE t03324; diff --git a/parser/testdata/03325_alter_ast_format/ast.json b/parser/testdata/03325_alter_ast_format/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03325_alter_ast_format/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03325_alter_ast_format/metadata.json b/parser/testdata/03325_alter_ast_format/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03325_alter_ast_format/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03325_alter_ast_format/query.sql b/parser/testdata/03325_alter_ast_format/query.sql new file mode 100644 index 000000000..e07ab1a49 --- /dev/null +++ b/parser/testdata/03325_alter_ast_format/query.sql @@ -0,0 +1,2 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/74369 +ALTER TABLE t22 (DELETE WHERE ('叫' = c1) OR ((792.3673220441809 = c0) AND (c0 = c1))), (MODIFY SETTING persistent = 1), (UPDATE c1 = 'would' WHERE NOT f2()), (MODIFY SETTING persistent = 0); -- { serverError UNKNOWN_TABLE } \ No newline at end of file diff --git a/parser/testdata/03325_alter_modify_projection_primary_key_column/ast.json b/parser/testdata/03325_alter_modify_projection_primary_key_column/ast.json new file mode 100644 index 000000000..2a3dd0054 --- /dev/null +++ b/parser/testdata/03325_alter_modify_projection_primary_key_column/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001555855, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/03325_alter_modify_projection_primary_key_column/metadata.json b/parser/testdata/03325_alter_modify_projection_primary_key_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03325_alter_modify_projection_primary_key_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03325_alter_modify_projection_primary_key_column/query.sql b/parser/testdata/03325_alter_modify_projection_primary_key_column/query.sql new file mode 100644 index 000000000..2c4ec6376 --- /dev/null +++ b/parser/testdata/03325_alter_modify_projection_primary_key_column/query.sql @@ -0,0 +1,13 @@ +drop table if exists test; +create table test (x UInt16, y UInt16, projection proj (select * order by x)) engine=MergeTree order by tuple() settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1; +insert into test select number, number from numbers(100000); +alter table test modify column x UInt64; +select * from test where x = 10; +drop table test; + +drop table if exists test; +create table test (x UInt16, y UInt16, projection proj (select * order by x)) engine=MergeTree order by tuple() settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000; +insert into test select number, number from numbers(100000); +alter table test modify column x UInt64; +select * from test where x = 10; +drop table test; diff --git a/parser/testdata/03325_count_summing_merge_tree_order_by_tuple/ast.json b/parser/testdata/03325_count_summing_merge_tree_order_by_tuple/ast.json new file mode 100644 index 000000000..2ba2f4ea8 --- /dev/null +++ b/parser/testdata/03325_count_summing_merge_tree_order_by_tuple/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001206572, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03325_count_summing_merge_tree_order_by_tuple/metadata.json b/parser/testdata/03325_count_summing_merge_tree_order_by_tuple/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03325_count_summing_merge_tree_order_by_tuple/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03325_count_summing_merge_tree_order_by_tuple/query.sql b/parser/testdata/03325_count_summing_merge_tree_order_by_tuple/query.sql new file mode 100644 index 000000000..62fa3bd56 --- /dev/null +++ b/parser/testdata/03325_count_summing_merge_tree_order_by_tuple/query.sql @@ -0,0 +1,7 @@ +SET enable_analyzer = 1; +SET allow_suspicious_primary_key = 1; + +CREATE TABLE t0 (c0 Nullable(Int)) ENGINE = SummingMergeTree() ORDER BY tuple() PARTITION BY (c0) SETTINGS allow_nullable_key = 1; +INSERT INTO TABLE t0 (c0) VALUES (NULL); +SELECT * FROM t0 FINAL; +SELECT count() FROM t0 FINAL WHERE ((t0.c0 IS NULL) = TRUE); diff --git a/parser/testdata/03325_distributed_join_json_array_subcolumns/ast.json b/parser/testdata/03325_distributed_join_json_array_subcolumns/ast.json new file mode 100644 index 000000000..f3911436f --- /dev/null +++ b/parser/testdata/03325_distributed_join_json_array_subcolumns/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001351653, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03325_distributed_join_json_array_subcolumns/metadata.json b/parser/testdata/03325_distributed_join_json_array_subcolumns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03325_distributed_join_json_array_subcolumns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03325_distributed_join_json_array_subcolumns/query.sql b/parser/testdata/03325_distributed_join_json_array_subcolumns/query.sql new file mode 100644 index 000000000..e0695d45b --- /dev/null +++ b/parser/testdata/03325_distributed_join_json_array_subcolumns/query.sql @@ -0,0 +1,35 @@ +SET enable_json_type=1; +SET allow_experimental_analyzer=1; + +DROP TABLE IF EXISTS test_distr; +DROP TABLE IF EXISTS test; + +CREATE TABLE test +( + id Int64, + data JSON(arr1 Array(String), arr2 Array(Int32)) +) +ENGINE = MergeTree ORDER BY id; + + +CREATE TABLE test_distr +( + id Int64, + data JSON(arr1 Array(String), arr2 Array(Int32)) +) +ENGINE = Distributed(test_cluster_one_shard_three_replicas_localhost, currentDatabase(), 'test', murmurHash2_32(id)); + +INSERT INTO test FORMAT Values (1, '{"arr1" : ["s1", "s2", "s3"], "arr2" : []}'), (2, '{"arr1" : ["s4", "s5"], "arr2" : [42]}'); + +SELECT count() +FROM test_distr as left +GLOBAL INNER JOIN test_distr as right on left.id = right.id +WHERE has(right.data.arr1, 's3') AND has(right.data.arr2, 42) settings serialize_query_plan = 0; + +SELECT count() +FROM test_distr as left +GLOBAL INNER JOIN test_distr as right on left.id = right.id +WHERE has(right.data.arr1, 's3') AND has(right.data.arr2, 42) settings enable_parallel_replicas = 0; + +DROP TABLE test_distr; +DROP TABLE test; diff --git a/parser/testdata/03326_parallel_replicas_out_of_range/ast.json b/parser/testdata/03326_parallel_replicas_out_of_range/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03326_parallel_replicas_out_of_range/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03326_parallel_replicas_out_of_range/metadata.json b/parser/testdata/03326_parallel_replicas_out_of_range/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03326_parallel_replicas_out_of_range/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03326_parallel_replicas_out_of_range/query.sql b/parser/testdata/03326_parallel_replicas_out_of_range/query.sql new file mode 100644 index 000000000..2cb592115 --- /dev/null +++ b/parser/testdata/03326_parallel_replicas_out_of_range/query.sql @@ -0,0 +1,13 @@ +-- There are no settings to enable parallel replicas explicitly, because +-- we have a separate test run with them and they will be enabled automatically. + +SET enable_analyzer=1; + +SYSTEM FLUSH LOGS query_log; + +SELECT + count(materialize(toLowCardinality(1))) IGNORE NULLS AS num, + hostName() AS hostName +FROM system.query_log AS a +INNER JOIN system.processes AS b ON (type = toFixedString(toNullable('QueryStart'), 10)) AND (dateDiff('second', event_time, now()) > 5) AND (current_database = currentDatabase()) +FORMAT `Null`; diff --git a/parser/testdata/03326_toStartOfNanosecond_ubsan/ast.json b/parser/testdata/03326_toStartOfNanosecond_ubsan/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03326_toStartOfNanosecond_ubsan/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03326_toStartOfNanosecond_ubsan/metadata.json b/parser/testdata/03326_toStartOfNanosecond_ubsan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03326_toStartOfNanosecond_ubsan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03326_toStartOfNanosecond_ubsan/query.sql b/parser/testdata/03326_toStartOfNanosecond_ubsan/query.sql new file mode 100644 index 000000000..ce61ec02d --- /dev/null +++ b/parser/testdata/03326_toStartOfNanosecond_ubsan/query.sql @@ -0,0 +1,2 @@ +-- Bug #71775 +SELECT toStartOfNanosecond('2263-01-01 00:00:00'::DateTime64); diff --git a/parser/testdata/03327_alias_column_constant/ast.json b/parser/testdata/03327_alias_column_constant/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03327_alias_column_constant/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03327_alias_column_constant/metadata.json b/parser/testdata/03327_alias_column_constant/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03327_alias_column_constant/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03327_alias_column_constant/query.sql b/parser/testdata/03327_alias_column_constant/query.sql new file mode 100644 index 000000000..a37a414ed --- /dev/null +++ b/parser/testdata/03327_alias_column_constant/query.sql @@ -0,0 +1,18 @@ + +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a Int32, b Int32 ALIAS 1) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO t1 VALUES (1), (2), (3); + +DROP TABLE IF EXISTS t2; +CREATE TABLE t2 (a Int32, b Int32 ALIAS 1) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO t2 VALUES (2), (3), (4); + +-- { echoOn } + +SELECT b FROM t1; +SELECT b FROM t1 JOIN t2 USING b; +SELECT 1 AS b FROM t1 JOIN t2 USING b; +SELECT 1 AS b FROM t1 JOIN t2 USING b SETTINGS analyzer_compatibility_join_using_top_level_identifier = 1; +SELECT 2 AS a FROM t1 JOIN t2 USING a SETTINGS analyzer_compatibility_join_using_top_level_identifier = 1; + + diff --git a/parser/testdata/03327_hypothesis_index_sanity/ast.json b/parser/testdata/03327_hypothesis_index_sanity/ast.json new file mode 100644 index 000000000..9f6fa0cfd --- /dev/null +++ b/parser/testdata/03327_hypothesis_index_sanity/ast.json @@ -0,0 +1,73 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery t3 (children 3)" + }, + { + "explain": " Identifier t3" + }, + { + "explain": " Columns definition (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration x (children 1)" + }, + { + "explain": " DataType UInt8" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Index (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function hypothesis (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Storage definition (children 3)" + }, + { + "explain": " Function MergeTree (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Set" + } + ], + + "rows": 17, + + "statistics": + { + "elapsed": 0.001828346, + "rows_read": 17, + "bytes_read": 545 + } +} diff --git a/parser/testdata/03327_hypothesis_index_sanity/metadata.json b/parser/testdata/03327_hypothesis_index_sanity/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03327_hypothesis_index_sanity/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03327_hypothesis_index_sanity/query.sql b/parser/testdata/03327_hypothesis_index_sanity/query.sql new file mode 100644 index 000000000..ebe348e34 --- /dev/null +++ b/parser/testdata/03327_hypothesis_index_sanity/query.sql @@ -0,0 +1,7 @@ +CREATE TABLE t3 (x UInt8, INDEX i x TYPE hypothesis GRANULARITY 100) ENGINE = MergeTree() ORDER BY tuple() SETTINGS index_granularity = 1; +INSERT INTO TABLE t3 VALUES (1), (2); +SELECT 1 FROM t3 WHERE x=1; + +CREATE TABLE t0 (c0 Int, INDEX i0 c0 TYPE hypothesis GRANULARITY 9) ENGINE = MergeTree() ORDER BY tuple() SETTINGS index_granularity = 4; +INSERT INTO TABLE t0 (c0) VALUES (1), (2), (3), (2), (4), (5), (6), (7); +SELECT 1 FROM t0 tx JOIN t0 ON tx.c0 = t0.c0; diff --git a/parser/testdata/03328_formatting_assignment_expression/ast.json b/parser/testdata/03328_formatting_assignment_expression/ast.json new file mode 100644 index 000000000..fa8d8e26d --- /dev/null +++ b/parser/testdata/03328_formatting_assignment_expression/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function formatQuerySingleLine (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'ALTER TABLE t (UPDATE c = (1 AS a) WHERE true)'" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001354238, + "rows_read": 7, + "bytes_read": 314 + } +} diff --git a/parser/testdata/03328_formatting_assignment_expression/metadata.json b/parser/testdata/03328_formatting_assignment_expression/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03328_formatting_assignment_expression/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03328_formatting_assignment_expression/query.sql b/parser/testdata/03328_formatting_assignment_expression/query.sql new file mode 100644 index 000000000..fbb1c3369 --- /dev/null +++ b/parser/testdata/03328_formatting_assignment_expression/query.sql @@ -0,0 +1,3 @@ +SELECT formatQuerySingleLine('ALTER TABLE t (UPDATE c = (1 AS a) WHERE true)'); +SELECT formatQuerySingleLine('ALTER TABLE t (UPDATE c = a > 1 WHERE true)'); +SELECT formatQuerySingleLine('ALTER TABLE t (UPDATE c = a IS NULL WHERE true)'); diff --git a/parser/testdata/03328_normalized_query_hash/ast.json b/parser/testdata/03328_normalized_query_hash/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03328_normalized_query_hash/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03328_normalized_query_hash/metadata.json b/parser/testdata/03328_normalized_query_hash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03328_normalized_query_hash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03328_normalized_query_hash/query.sql b/parser/testdata/03328_normalized_query_hash/query.sql new file mode 100644 index 000000000..86237436e --- /dev/null +++ b/parser/testdata/03328_normalized_query_hash/query.sql @@ -0,0 +1,3 @@ +SELECT normalized_query_hash AS a, normalizedQueryHash(query) AS b, a = b FROM system.processes WHERE query LIKE +'SELECT normalized_query_hash AS a, normalizedQueryHash(query) AS b, a = b FROM system.processes WHERE query LIKE%' +LIMIT 1; diff --git a/parser/testdata/03333_merge_table_total_rows_no_database_system_tables_exception/ast.json b/parser/testdata/03333_merge_table_total_rows_no_database_system_tables_exception/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03333_merge_table_total_rows_no_database_system_tables_exception/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03333_merge_table_total_rows_no_database_system_tables_exception/metadata.json b/parser/testdata/03333_merge_table_total_rows_no_database_system_tables_exception/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03333_merge_table_total_rows_no_database_system_tables_exception/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03333_merge_table_total_rows_no_database_system_tables_exception/query.sql b/parser/testdata/03333_merge_table_total_rows_no_database_system_tables_exception/query.sql new file mode 100644 index 000000000..658b89aa7 --- /dev/null +++ b/parser/testdata/03333_merge_table_total_rows_no_database_system_tables_exception/query.sql @@ -0,0 +1,15 @@ +-- Tags: no-parallel, no-replicated-database +-- ^ creates a database. + +DROP DATABASE IF EXISTS test_03333; +CREATE DATABASE test_03333; +CREATE TABLE test_03333.t (x UInt8) ENGINE = Memory; +DROP TABLE IF EXISTS merge; +CREATE TABLE merge ENGINE = Merge(test_03333, 't'); +SELECT * FROM merge; +SELECT table, total_rows, total_bytes FROM system.tables WHERE database = currentDatabase() AND table = 'merge'; +DROP DATABASE test_03333; +SELECT * FROM merge; -- { serverError UNKNOWN_DATABASE } +-- Even when the database behing the merge table does not exist anymore, querying the 'total_rows' field from system.tables does not throw an exception: +SELECT table, total_rows, total_bytes FROM system.tables WHERE database = currentDatabase() AND table = 'merge'; +DROP TABLE merge; diff --git a/parser/testdata/03334_aliases_function_infinite_loop/ast.json b/parser/testdata/03334_aliases_function_infinite_loop/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03334_aliases_function_infinite_loop/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03334_aliases_function_infinite_loop/metadata.json b/parser/testdata/03334_aliases_function_infinite_loop/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03334_aliases_function_infinite_loop/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03334_aliases_function_infinite_loop/query.sql b/parser/testdata/03334_aliases_function_infinite_loop/query.sql new file mode 100644 index 000000000..ac629188d --- /dev/null +++ b/parser/testdata/03334_aliases_function_infinite_loop/query.sql @@ -0,0 +1,8 @@ +-- Tags: no-parallel +-- ^ creates a function +SET enable_analyzer = 1; +DROP FUNCTION IF EXISTS f0; +DROP VIEW IF EXISTS v0; +CREATE FUNCTION f0 AS (x) -> toInt32((x AS c0) % 2 AS c1); +CREATE VIEW v0 AS (SELECT 0 AS c0, c0 AS c1, f0(c1) AS c2); -- { serverError UNKNOWN_IDENTIFIER } +DROP FUNCTION f0; diff --git a/parser/testdata/03339_native_reader_exact_rows/ast.json b/parser/testdata/03339_native_reader_exact_rows/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03339_native_reader_exact_rows/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03339_native_reader_exact_rows/metadata.json b/parser/testdata/03339_native_reader_exact_rows/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03339_native_reader_exact_rows/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03339_native_reader_exact_rows/query.sql b/parser/testdata/03339_native_reader_exact_rows/query.sql new file mode 100644 index 000000000..d74e44e41 --- /dev/null +++ b/parser/testdata/03339_native_reader_exact_rows/query.sql @@ -0,0 +1,16 @@ +-- Tags: long + +-- We use temporary files that uses NativeReader, with a block slightly bigger +-- then power of two, previously it rounds the allocation up to power of 2, +-- which leads to excessive memory usage. + +-- This will create a temporary buffer of two columns for the total of 5m rows +SELECT number FROM numbers(5e6) ORDER BY number * 1234567890123456789 LIMIT 4999980, 20 +SETTINGS + max_threads=1, + max_memory_usage='200Mi', + /* 65536 rows takes buffer of 512KB, so use slightly bigger value to increase overhead */ + max_block_size=65540, + max_bytes_before_external_sort='2Mi', + max_bytes_ratio_before_external_sort=0 +FORMAT Null diff --git a/parser/testdata/03340_projections_formatting/ast.json b/parser/testdata/03340_projections_formatting/ast.json new file mode 100644 index 000000000..80286d496 --- /dev/null +++ b/parser/testdata/03340_projections_formatting/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001499545, + "rows_read": 2, + "bytes_read": 61 + } +} diff --git a/parser/testdata/03340_projections_formatting/metadata.json b/parser/testdata/03340_projections_formatting/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03340_projections_formatting/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03340_projections_formatting/query.sql b/parser/testdata/03340_projections_formatting/query.sql new file mode 100644 index 000000000..3213b294d --- /dev/null +++ b/parser/testdata/03340_projections_formatting/query.sql @@ -0,0 +1,94 @@ +CREATE TEMPORARY TABLE test +( + `user_id` UInt64, + `item_id` UInt64, + PROJECTION order_by_item_id + ( + WITH toString(user_id) as user + SELECT user + ORDER BY user_id + ) +) +ENGINE = MergeTree ORDER BY (); +SHOW CREATE TEMPORARY test FORMAT LineAsString; +DROP TABLE test; + +CREATE TEMPORARY TABLE test +( + `user_id` UInt64, + `item_id` UInt64, + PROJECTION order_by_item_id + ( + WITH toString(user_id) as user, toString(item_id) as item + SELECT user, item + ORDER BY user_id, item_id + ) +) +ENGINE = MergeTree ORDER BY (); +SHOW CREATE TEMPORARY test FORMAT LineAsString; +DROP TABLE test; + +CREATE TEMPORARY TABLE test +( + `user_id` UInt64, + `item_id` UInt64, + PROJECTION order_by_item_id + ( + SELECT _part_offset ORDER BY item_id + ) +) +ENGINE = MergeTree ORDER BY (); +SHOW CREATE TEMPORARY test FORMAT LineAsString; +DROP TABLE test; + +CREATE TEMPORARY TABLE test +( + `user_id` UInt64, + `item_id` UInt64, + PROJECTION order_by_item_id + ( + SELECT _part_offset, user_id ORDER BY item_id + ) +) +ENGINE = MergeTree ORDER BY (); +SHOW CREATE TEMPORARY test FORMAT LineAsString; +DROP TABLE test; + +CREATE TEMPORARY TABLE test +( + `user_id` UInt64, + `item_id` UInt64, + PROJECTION order_by_item_id + ( + SELECT _part_offset ORDER BY user_id, item_id + ) +) +ENGINE = MergeTree ORDER BY (); +SHOW CREATE TEMPORARY test FORMAT LineAsString; +DROP TABLE test; + +CREATE TEMPORARY TABLE test +( + `user_id` UInt64, + `item_id` UInt64, + PROJECTION order_by_item_id + ( + SELECT user_id GROUP BY user_id + ) +) +ENGINE = MergeTree ORDER BY (); +SHOW CREATE TEMPORARY test FORMAT LineAsString; +DROP TABLE test; + +CREATE TEMPORARY TABLE test +( + `user_id` UInt64, + `item_id` UInt64, + PROJECTION order_by_item_id + ( + SELECT user_id, item_id GROUP BY user_id, item_id + ) +) +ENGINE = MergeTree ORDER BY (); +SHOW CREATE TEMPORARY test FORMAT LineAsString; +DROP TABLE test; diff --git a/parser/testdata/03340_transform_logical_error_fix/ast.json b/parser/testdata/03340_transform_logical_error_fix/ast.json new file mode 100644 index 000000000..26837b314 --- /dev/null +++ b/parser/testdata/03340_transform_logical_error_fix/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001242118, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03340_transform_logical_error_fix/metadata.json b/parser/testdata/03340_transform_logical_error_fix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03340_transform_logical_error_fix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03340_transform_logical_error_fix/query.sql b/parser/testdata/03340_transform_logical_error_fix/query.sql new file mode 100644 index 000000000..bc1f1b0a4 --- /dev/null +++ b/parser/testdata/03340_transform_logical_error_fix/query.sql @@ -0,0 +1,7 @@ +SET enable_analyzer = 1; + +SELECT transform(NULL, ['', ''], ['', ''], *) +FROM +( + SELECT NULL +); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/03350_alter_table_fetch_partition_thread_pool/ast.json b/parser/testdata/03350_alter_table_fetch_partition_thread_pool/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03350_alter_table_fetch_partition_thread_pool/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03350_alter_table_fetch_partition_thread_pool/metadata.json b/parser/testdata/03350_alter_table_fetch_partition_thread_pool/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03350_alter_table_fetch_partition_thread_pool/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03350_alter_table_fetch_partition_thread_pool/query.sql b/parser/testdata/03350_alter_table_fetch_partition_thread_pool/query.sql new file mode 100644 index 000000000..b5e268c1f --- /dev/null +++ b/parser/testdata/03350_alter_table_fetch_partition_thread_pool/query.sql @@ -0,0 +1,19 @@ +-- Tags: no-parallel, no-replicated-database, no-shared-merge-tree +-- Tag: no-parallel - to avoid polluting FETCH PARTITION thread pool with other fetches +-- Tag: no-replicated-database - replica_path is different + +drop table if exists data1; +drop table if exists data2; + +create table data1 (key Int) engine=ReplicatedMergeTree('/tables/{database}/{table}', 'r1') order by (); +create table data2 (key Int) engine=ReplicatedMergeTree('/tables/{database}/{table}', 'r1') order by (); + +system stop merges data1; +insert into data1 select * from numbers(100) settings max_block_size=1, min_insert_block_size_rows=1; +select 'parts in data1', count() from system.parts where database = currentDatabase() and table = 'data1'; + +alter table data2 fetch partition () from '/tables/{database}/data1'; +select 'detached parts in data2', count() from system.detached_parts where database = currentDatabase() and table = 'data2'; + +system flush logs query_log; +select 'FETCH PARTITION uses multiple threads', peak_threads_usage>10 from system.query_log where event_date >= yesterday() and type != 'QueryStart' and query_kind = 'Alter' and current_database = currentDatabase(); diff --git a/parser/testdata/03350_json_parsing_quickly/ast.json b/parser/testdata/03350_json_parsing_quickly/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03350_json_parsing_quickly/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03350_json_parsing_quickly/metadata.json b/parser/testdata/03350_json_parsing_quickly/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03350_json_parsing_quickly/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03350_json_parsing_quickly/query.sql b/parser/testdata/03350_json_parsing_quickly/query.sql new file mode 100644 index 000000000..6e8b9ab50 --- /dev/null +++ b/parser/testdata/03350_json_parsing_quickly/query.sql @@ -0,0 +1,5 @@ +-- Tags: no-fasttest +-- ^ because of base64, which is only present in full builds +SELECT * FROM format(RowBinary, 'x JSON', substring(base64Decode( +'alNPTgr/DUMnJycnJycnJycnJycnJycnJycnJycnJycnJycnJycwJ////////wNuJycnJycnBQAnJycnJycnJycnJycnJycnJycnJycnJycnJycnJycnJycnJycnJycnJycnAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACcnJycnJycnJyck/0gFAA==' +), 6)) SETTINGS enable_json_type = 1, type_json_skip_duplicated_paths = 1; -- { serverError CANNOT_READ_ALL_DATA } diff --git a/parser/testdata/03351_client_insert_bad_connection_state/ast.json b/parser/testdata/03351_client_insert_bad_connection_state/ast.json new file mode 100644 index 000000000..e89575efb --- /dev/null +++ b/parser/testdata/03351_client_insert_bad_connection_state/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "InsertQuery (children 2)" + }, + { + "explain": " Literal '\/dev\/null'" + }, + { + "explain": " Function null (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'x String'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.000998192, + "rows_read": 5, + "bytes_read": 163 + } +} diff --git a/parser/testdata/03351_client_insert_bad_connection_state/metadata.json b/parser/testdata/03351_client_insert_bad_connection_state/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03351_client_insert_bad_connection_state/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03351_client_insert_bad_connection_state/query.sql b/parser/testdata/03351_client_insert_bad_connection_state/query.sql new file mode 100644 index 000000000..ebcaa57ef --- /dev/null +++ b/parser/testdata/03351_client_insert_bad_connection_state/query.sql @@ -0,0 +1,5 @@ +INSERT INTO function null('x String') FROM INFILE '/dev/null'; -- { clientError BAD_ARGUMENTS } +-- previously next query throws "Unexpected packet Query received from client." +SELECT 'Ok'; +-- previously next query hangs +SELECT 'Ok Ok'; diff --git a/parser/testdata/03352_allow_suspicious_ttl/ast.json b/parser/testdata/03352_allow_suspicious_ttl/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03352_allow_suspicious_ttl/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03352_allow_suspicious_ttl/metadata.json b/parser/testdata/03352_allow_suspicious_ttl/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03352_allow_suspicious_ttl/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03352_allow_suspicious_ttl/query.sql b/parser/testdata/03352_allow_suspicious_ttl/query.sql new file mode 100644 index 000000000..5fd2bb3bf --- /dev/null +++ b/parser/testdata/03352_allow_suspicious_ttl/query.sql @@ -0,0 +1,58 @@ + -- Tags: long, zookeeper + +-- Replicated + +SET allow_suspicious_ttl_expressions = 0; +DROP TABLE IF EXISTS replicated_ttl_00933 SYNC; + +-- Create +CREATE TABLE replicated_ttl_00933 (a Int32, d DateTime) + ENGINE=ReplicatedMergeTree('/clickhouse/tables/{database}/test_ttl_00933', 'r1') + ORDER BY a PARTITION BY tuple() TTL now() + INTERVAL 1 second; -- { serverError BAD_ARGUMENTS } + +SET allow_suspicious_ttl_expressions = 1; + +CREATE TABLE replicated_ttl_00933 (a Int32, d DateTime) + ENGINE=ReplicatedMergeTree('/clickhouse/tables/{database}/test_ttl_00933', 'r1') + ORDER BY a PARTITION BY tuple() TTL now() + INTERVAL 1 second; + +-- Alter +SET allow_suspicious_ttl_expressions = 0; +ALTER TABLE replicated_ttl_00933 MODIFY TTL now() + interval 1 day; -- { serverError BAD_ARGUMENTS } + +SET allow_suspicious_ttl_expressions = 1; +ALTER TABLE replicated_ttl_00933 MODIFY TTL now() + interval 1 day; + +DROP TABLE IF EXISTS replicated_ttl_00933 SYNC; + +-- Column TTL +SET allow_suspicious_ttl_expressions = 0; + +CREATE TABLE replicated_ttl_00933 +( + a Int32 TTL now() + INTERVAL 1 second, + d DateTime +) +ENGINE=ReplicatedMergeTree('/clickhouse/tables/{database}/test_ttl_00933', 'r1') +ORDER BY d; -- { serverError BAD_ARGUMENTS } + +SET allow_suspicious_ttl_expressions = 1; + +CREATE TABLE replicated_ttl_00933 +( + a Int32 TTL now() + INTERVAL 1 second, + d DateTime +) +ENGINE=ReplicatedMergeTree('/clickhouse/tables/{database}/test_ttl_00933', 'r1') +ORDER BY d; + +-- Alter column TTL +SET allow_suspicious_ttl_expressions = 0; +ALTER TABLE replicated_ttl_00933 MODIFY COLUMN a Int32 TTL now() + INTERVAL 1 day; -- { serverError BAD_ARGUMENTS } + +SET allow_suspicious_ttl_expressions = 1; +ALTER TABLE replicated_ttl_00933 MODIFY COLUMN a Int32 TTL now() + INTERVAL 1 day; + + +SHOW CREATE TABLE replicated_ttl_00933; +DROP TABLE IF EXISTS replicated_ttl_00933 SYNC; diff --git a/parser/testdata/03352_distinct_sorted_bug/ast.json b/parser/testdata/03352_distinct_sorted_bug/ast.json new file mode 100644 index 000000000..a4a23203e --- /dev/null +++ b/parser/testdata/03352_distinct_sorted_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t0 (children 1)" + }, + { + "explain": " Identifier t0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001326783, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03352_distinct_sorted_bug/metadata.json b/parser/testdata/03352_distinct_sorted_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03352_distinct_sorted_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03352_distinct_sorted_bug/query.sql b/parser/testdata/03352_distinct_sorted_bug/query.sql new file mode 100644 index 000000000..0406b0bc8 --- /dev/null +++ b/parser/testdata/03352_distinct_sorted_bug/query.sql @@ -0,0 +1,20 @@ +DROP TABLE IF EXISTS t0; +CREATE TABLE t0 (c0 Int) Engine = MergeTree() ORDER BY (c0); +INSERT INTO t0 VALUES (1); + +-- ../src/Interpreters/AggregationCommon.h:90:35: runtime error: downcast of address 0x743320010d90 which does not point to an object of type 'const ColumnFixedSizeHelper' +-- 0x743320010d90: note: object is of type 'DB::ColumnConst' +-- 00 00 00 00 b8 42 24 fb 47 5d 00 00 01 00 00 00 00 00 00 00 40 28 01 20 33 74 00 00 01 00 00 00 +-- ^~~~~~~~~~~~~~~~~~~~~~~ +-- vptr for 'DB::ColumnConst' +-- SUMMARY: UndefinedBehaviorSanitizer: undefined-behavior ../src/Interpreters/AggregationCommon.h:90:35 +SELECT DISTINCT multiIf(1, 2, 1, materialize(toInt128(3)), 4), c0 FROM t0; + +DROP TABLE IF EXISTS t0__fuzz_41; +CREATE TABLE t0__fuzz_41 (c0 DateTime) ENGINE = MergeTree ORDER BY c0; +INSERT INTO t0__fuzz_41 FORMAT Values (1) (2) (3) (4) (5) (6) (7) (8) (9) (10); + +SELECT multiIf(1, 2, 1, materialize(3), 4), c0 FROM t0__fuzz_41 FORMAT Null; +SELECT multiIf(0, 2, 1, materialize(3), 4), c0 FROM t0__fuzz_41 FORMAT Null; + +SELECT DISTINCT subquery_1.id, subquery_2.id FROM (SELECT 1 AS id, 2 AS value) AS subquery_1, (SELECT 3 AS id, 4) AS subquery_2; diff --git a/parser/testdata/03352_lazy_column_filter_by_uint8/ast.json b/parser/testdata/03352_lazy_column_filter_by_uint8/ast.json new file mode 100644 index 000000000..08269117e --- /dev/null +++ b/parser/testdata/03352_lazy_column_filter_by_uint8/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_filter (children 1)" + }, + { + "explain": " Identifier t_filter" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001474483, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/03352_lazy_column_filter_by_uint8/metadata.json b/parser/testdata/03352_lazy_column_filter_by_uint8/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03352_lazy_column_filter_by_uint8/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03352_lazy_column_filter_by_uint8/query.sql b/parser/testdata/03352_lazy_column_filter_by_uint8/query.sql new file mode 100644 index 000000000..e90e1eb9b --- /dev/null +++ b/parser/testdata/03352_lazy_column_filter_by_uint8/query.sql @@ -0,0 +1,12 @@ +DROP TABLE IF EXISTS t_filter; +CREATE TABLE t_filter(s String, a Array(FixedString(3)), u UInt64, f UInt8) +ENGINE = MergeTree ORDER BY u; + +INSERT INTO t_filter SELECT toString(number), ['foo', 'bar'], number, toUInt8(number) FROM numbers(1000); + +SET optimize_read_in_order = 0; -- this trigger error + +SELECT * FROM t_filter WHERE f ORDER BY u LIMIT 5; +SELECT * FROM t_filter WHERE f != 0 ORDER BY u LIMIT 5; + +DROP TABLE IF EXISTS t_filter; diff --git a/parser/testdata/03353_codec_zstd_doubledelta_data_corruption/ast.json b/parser/testdata/03353_codec_zstd_doubledelta_data_corruption/ast.json new file mode 100644 index 000000000..2dff558cf --- /dev/null +++ b/parser/testdata/03353_codec_zstd_doubledelta_data_corruption/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001134706, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03353_codec_zstd_doubledelta_data_corruption/metadata.json b/parser/testdata/03353_codec_zstd_doubledelta_data_corruption/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03353_codec_zstd_doubledelta_data_corruption/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03353_codec_zstd_doubledelta_data_corruption/query.sql b/parser/testdata/03353_codec_zstd_doubledelta_data_corruption/query.sql new file mode 100644 index 000000000..4a1164e44 --- /dev/null +++ b/parser/testdata/03353_codec_zstd_doubledelta_data_corruption/query.sql @@ -0,0 +1,4 @@ +SET allow_suspicious_codecs = 1; +CREATE TABLE t0 (c0 Float64 CODEC(ZSTD, DoubleDelta)) ENGINE = MergeTree() ORDER BY tuple(); +INSERT INTO TABLE t0 (c0) VALUES (NULL), (1); +SELECT c0 FROM t0; diff --git a/parser/testdata/03354_translate_crap/ast.json b/parser/testdata/03354_translate_crap/ast.json new file mode 100644 index 000000000..585070040 --- /dev/null +++ b/parser/testdata/03354_translate_crap/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function translate (alias a) (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal 'aAbBcC'" + }, + { + "explain": " Literal 'abc'" + }, + { + "explain": " Function toFixedString (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '12'" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function toTypeName (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier a" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001301173, + "rows_read": 15, + "bytes_read": 560 + } +} diff --git a/parser/testdata/03354_translate_crap/metadata.json b/parser/testdata/03354_translate_crap/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03354_translate_crap/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03354_translate_crap/query.sql b/parser/testdata/03354_translate_crap/query.sql new file mode 100644 index 000000000..60037a24c --- /dev/null +++ b/parser/testdata/03354_translate_crap/query.sql @@ -0,0 +1 @@ +SELECT translate('aAbBcC', 'abc', toFixedString('12', 2)) AS a, toTypeName(a); diff --git a/parser/testdata/03355_array_join_subcolumns/ast.json b/parser/testdata/03355_array_join_subcolumns/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03355_array_join_subcolumns/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03355_array_join_subcolumns/metadata.json b/parser/testdata/03355_array_join_subcolumns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03355_array_join_subcolumns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03355_array_join_subcolumns/query.sql b/parser/testdata/03355_array_join_subcolumns/query.sql new file mode 100644 index 000000000..0b92966c1 --- /dev/null +++ b/parser/testdata/03355_array_join_subcolumns/query.sql @@ -0,0 +1,16 @@ +-- Tags: no-parallel-replicas + +set enable_analyzer=1; + +drop table if exists test; +create table test (arr Array(UInt64), t Tuple(a Array(UInt32), b Array(UInt32))) engine=Memory; +insert into test select [1, 2, 3], tuple([1, 2], [1, 2, 3, 4]); + +select t.a from test array join arr; +explain query tree select t.a from test array join arr; + +select t.a from test array join t.b; +explain query tree select t.a from test array join t.b; + +drop table test; + diff --git a/parser/testdata/03355_issue_31183/ast.json b/parser/testdata/03355_issue_31183/ast.json new file mode 100644 index 000000000..326790f9e --- /dev/null +++ b/parser/testdata/03355_issue_31183/ast.json @@ -0,0 +1,76 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery test1 (children 3)" + }, + { + "explain": " Identifier test1" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration col (children 1)" + }, + { + "explain": " DataType UInt64" + }, + { + "explain": " ColumnDeclaration col_sq (children 2)" + }, + { + "explain": " DataType UInt64" + }, + { + "explain": " Function multiply (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier col" + }, + { + "explain": " Identifier col" + }, + { + "explain": " Storage definition (children 3)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 18, + + "statistics": + { + "elapsed": 0.001274275, + "rows_read": 18, + "bytes_read": 617 + } +} diff --git a/parser/testdata/03355_issue_31183/metadata.json b/parser/testdata/03355_issue_31183/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03355_issue_31183/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03355_issue_31183/query.sql b/parser/testdata/03355_issue_31183/query.sql new file mode 100644 index 000000000..3e2e3d766 --- /dev/null +++ b/parser/testdata/03355_issue_31183/query.sql @@ -0,0 +1,16 @@ +create table test1(col UInt64, col_sq UInt64 MATERIALIZED col*col) Engine=MergeTree partition by tuple() order by tuple(); +insert into test1 values (1),(2); + +create table test2(col UInt64) Engine=MergeTree partition by tuple() order by tuple(); +insert into test2 values (1),(2); + +SELECT t1.col, t1.col_sq +FROM test2 t2 +LEFT JOIN test1 t1 ON t1.col = t2.col +SETTINGS enable_analyzer=1; + +SELECT t1.col, t1.col_sq +FROM test2 t2 +LEFT JOIN test1 t1 ON t1.col = t2.col +SETTINGS enable_analyzer=0; -- {serverError UNKNOWN_IDENTIFIER} + diff --git a/parser/testdata/03355_issue_32743/ast.json b/parser/testdata/03355_issue_32743/ast.json new file mode 100644 index 000000000..8293e1518 --- /dev/null +++ b/parser/testdata/03355_issue_32743/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery distributor (children 3)" + }, + { + "explain": " Identifier distributor" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration id (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " ColumnDeclaration name (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Identifier id" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.00132256, + "rows_read": 12, + "bytes_read": 425 + } +} diff --git a/parser/testdata/03355_issue_32743/metadata.json b/parser/testdata/03355_issue_32743/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03355_issue_32743/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03355_issue_32743/query.sql b/parser/testdata/03355_issue_32743/query.sql new file mode 100644 index 000000000..60b083e31 --- /dev/null +++ b/parser/testdata/03355_issue_32743/query.sql @@ -0,0 +1,14 @@ +create table distributor (id String, name String) Engine = MergeTree() order by id; +create table product (id String, name String) Engine = MergeTree() order by id; +create table sales ( + id String, + distributor String, + product String, + amount Float32 +) Engine = MergeTree() order by id; +SELECT * FROM + view( + SELECT * FROM sales + LEFT JOIN distributor ON distributor.id = sales.distributor + ) AS newSales +LEFT JOIN product ON product.id = newSales.product SETTINGS enable_analyzer=1; diff --git a/parser/testdata/03355_join_to_in_optimization/ast.json b/parser/testdata/03355_join_to_in_optimization/ast.json new file mode 100644 index 000000000..37e319d22 --- /dev/null +++ b/parser/testdata/03355_join_to_in_optimization/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001231635, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03355_join_to_in_optimization/metadata.json b/parser/testdata/03355_join_to_in_optimization/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03355_join_to_in_optimization/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03355_join_to_in_optimization/query.sql b/parser/testdata/03355_join_to_in_optimization/query.sql new file mode 100644 index 000000000..8917e616f --- /dev/null +++ b/parser/testdata/03355_join_to_in_optimization/query.sql @@ -0,0 +1,95 @@ +SET enable_analyzer = 1; +SET join_algorithm = 'hash'; + +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +CREATE TABLE t1 (`id` Int32, key String, key2 String) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity=8192; +CREATE TABLE t2 (`id` Int32, key String, key2 String) ENGINE = MergeTree ORDER BY id SETTINGS index_granularity=8192; +INSERT INTO t1 VALUES (1, '111', '111'),(2, '222', '2'),(2, '222', '222'),(3, '333', '333'); +INSERT INTO t2 VALUES (2, 'AAA', 'AAA'),(2, 'AAA', 'a'),(3, 'BBB', 'BBB'),(4, 'CCC', 'CCC'); + +EXPLAIN actions = 1, optimize = 1, header = 1 +SELECT t1.id +FROM t1, t2 +WHERE t1.id = t2.id +SETTINGS query_plan_use_new_logical_join_step = true, query_plan_convert_join_to_in = true; + +SELECT + t1.key, + t1.key2 +FROM t1 +ALL INNER JOIN t2 ON (t1.id = t2.id) AND (t2.key = t2.key2) +ORDER BY + t1.key ASC, + t1.key2 ASC +SETTINGS query_plan_use_new_logical_join_step = true, query_plan_convert_join_to_in = true; + +SYSTEM FLUSH LOGS system.query_log; +EXPLAIN +SELECT hostName() AS hostName +FROM system.query_log AS a +INNER JOIN system.processes AS b ON (a.query_id = b.query_id) AND (type = 'QueryStart') +WHERE current_database = currentDatabase() +SETTINGS query_plan_use_new_logical_join_step = true, query_plan_convert_join_to_in = true; + +SELECT dummy +FROM +( + SELECT dummy + FROM system.one +) AS a +INNER JOIN +( + SELECT dummy + FROM system.one +) AS b USING (dummy) +INNER JOIN +( + SELECT dummy + FROM system.one +) AS c USING (dummy) +SETTINGS query_plan_use_new_logical_join_step = true, query_plan_convert_join_to_in = true; + +-- check type, modified from 02988_join_using_prewhere_pushdown +SET allow_suspicious_low_cardinality_types = 1; + +DROP TABLE IF EXISTS t; +CREATE TABLE t (`id` UInt16, `u` LowCardinality(Int32), `s` LowCardinality(String)) +ENGINE = MergeTree ORDER BY id; + +INSERT INTO t VALUES (1,1,'a'),(2,2,'b'); + +SELECT + u, + s +FROM t +INNER JOIN +( + SELECT CAST(number, 'Int32') AS u + FROM numbers(10) +) AS t1 USING (u) +FORMAT Null +SETTINGS query_plan_use_new_logical_join_step = true, query_plan_convert_join_to_in = true; + +-- check filter column remove, modified from 01852_multiple_joins_with_union_join +DROP TABLE IF EXISTS v1; +DROP TABLE IF EXISTS v2; + +CREATE TABLE v1 ( id Int32 ) ENGINE = MergeTree() ORDER BY id; +CREATE TABLE v2 ( value Int32 ) ENGINE = MergeTree() ORDER BY value; + +INSERT INTO v1 ( id ) VALUES (1); +INSERT INTO v2 ( value ) VALUES (1); + +SELECT * FROM v1 AS t1 +JOIN v1 AS t2 USING (id) +CROSS JOIN v2 AS n1; + +-- from fuzzer +SELECT 10 +FROM system.query_log AS a +INNER JOIN system.processes AS b +ON (a.query_id = b.query_id) AND (a.query_id = b.query_id) +WHERE current_database = currentDatabase() +FORMAT Null +SETTINGS query_plan_use_new_logical_join_step = true, query_plan_convert_join_to_in = true; diff --git a/parser/testdata/03355_mergetree_table_disk/ast.json b/parser/testdata/03355_mergetree_table_disk/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03355_mergetree_table_disk/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03355_mergetree_table_disk/metadata.json b/parser/testdata/03355_mergetree_table_disk/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03355_mergetree_table_disk/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03355_mergetree_table_disk/query.sql b/parser/testdata/03355_mergetree_table_disk/query.sql new file mode 100644 index 000000000..6867cd529 --- /dev/null +++ b/parser/testdata/03355_mergetree_table_disk/query.sql @@ -0,0 +1,40 @@ +-- Tags: no-parallel, no-fasttest +-- Tag no-parallel - uses external data source +-- Tag no-fasttest - requires SSL for https + +DROP TABLE IF EXISTS uk_price_paid; + +-- table_disk is supported only by s3_plain/s3_plain_rewritable/web +CREATE TABLE test_table_disk_requires_disk (key Int) ENGINE=MergeTree ORDER BY () SETTINGS table_disk=1; -- { serverError BAD_ARGUMENTS } +CREATE TABLE test_table_disk_requires_proper_disk (key Int) ENGINE=MergeTree ORDER BY () SETTINGS disk='default', table_disk=1; -- { serverError BAD_ARGUMENTS } + +CREATE TABLE uk_price_paid +( + price UInt32, + date Date, + postcode1 LowCardinality(String), + postcode2 LowCardinality(String), + type Enum8('other' = 0, 'terraced' = 1, 'semi-detached' = 2, 'detached' = 3, 'flat' = 4), + is_new UInt8, + duration Enum8('unknown' = 0, 'freehold' = 1, 'leasehold' = 2), + addr1 String, + addr2 String, + street LowCardinality(String), + locality LowCardinality(String), + town LowCardinality(String), + district LowCardinality(String), + county LowCardinality(String) +) +ENGINE = MergeTree +ORDER BY (postcode1, postcode2, addr1, addr2) +SETTINGS disk = disk(type = web, endpoint = 'https://raw.githubusercontent.com/ClickHouse/web-tables-demo/main/web/store/cf7/cf712b4f-2ca8-435c-ac23-c4393efe52f7/'), table_disk=1; +SELECT count() FROM uk_price_paid; + +ALTER TABLE uk_price_paid MODIFY SETTING table_disk = 0; -- { serverError TABLE_IS_READ_ONLY } + +-- drop does not hung +DROP TABLE uk_price_paid; + +-- now let's ensure that the table_disk is immutable +CREATE TABLE test_table_disk_is_immutable (key Int) ENGINE=MergeTree ORDER BY tuple(); +ALTER TABLE test_table_disk_is_immutable MODIFY SETTING table_disk = 1; -- { serverError READONLY_SETTING } diff --git a/parser/testdata/03356_analyzer_qualified_matcher_error/ast.json b/parser/testdata/03356_analyzer_qualified_matcher_error/ast.json new file mode 100644 index 000000000..5790e0533 --- /dev/null +++ b/parser/testdata/03356_analyzer_qualified_matcher_error/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery test_table (children 1)" + }, + { + "explain": " Identifier test_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000992681, + "rows_read": 2, + "bytes_read": 73 + } +} diff --git a/parser/testdata/03356_analyzer_qualified_matcher_error/metadata.json b/parser/testdata/03356_analyzer_qualified_matcher_error/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03356_analyzer_qualified_matcher_error/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03356_analyzer_qualified_matcher_error/query.sql b/parser/testdata/03356_analyzer_qualified_matcher_error/query.sql new file mode 100644 index 000000000..c659857b7 --- /dev/null +++ b/parser/testdata/03356_analyzer_qualified_matcher_error/query.sql @@ -0,0 +1,32 @@ +CREATE TABLE test_table +( + `smt` String +) +ENGINE = MergeTree +ORDER BY smt; + +WITH + statement1 AS + ( + SELECT '' AS name + FROM test_table + ), + statement2 AS + ( + SELECT '' AS name + FROM test_table + ) +SELECT + statement1.*, + statement2.*; -- { serverError UNKNOWN_IDENTIFIER } + +WITH + t as (SELECT sum(number) as x FROM numbers(10)) +SELECT t.*; -- { serverError UNKNOWN_IDENTIFIER } + +WITH a AS (SELECT 1) SELECT a.* FROM (SELECT 1 FROM a); -- { serverError UNKNOWN_IDENTIFIER } + +WITH + t as (SELECT sum(number) as x FROM numbers(10)), + t1 as (SELECT t.* FROM numbers(1)) +SELECT * FROM t1; -- { serverError UNKNOWN_IDENTIFIER } diff --git a/parser/testdata/03356_analyzer_unused_scalar_subquery/ast.json b/parser/testdata/03356_analyzer_unused_scalar_subquery/ast.json new file mode 100644 index 000000000..c20c76c75 --- /dev/null +++ b/parser/testdata/03356_analyzer_unused_scalar_subquery/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00093481, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03356_analyzer_unused_scalar_subquery/metadata.json b/parser/testdata/03356_analyzer_unused_scalar_subquery/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03356_analyzer_unused_scalar_subquery/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03356_analyzer_unused_scalar_subquery/query.sql b/parser/testdata/03356_analyzer_unused_scalar_subquery/query.sql new file mode 100644 index 000000000..cb8594046 --- /dev/null +++ b/parser/testdata/03356_analyzer_unused_scalar_subquery/query.sql @@ -0,0 +1,16 @@ +set enable_analyzer = 1; + +WITH ( + SELECT sleepEachRow(3) + ) AS res +SELECT * +FROM system.one +FORMAT Null +SETTINGS max_execution_time = 2; + +WITH sleepEachRow(3) AS res +SELECT * +FROM system.one +FORMAT Null +SETTINGS max_execution_time = 2; + diff --git a/parser/testdata/03356_array_join_subcolumns_indexes/ast.json b/parser/testdata/03356_array_join_subcolumns_indexes/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03356_array_join_subcolumns_indexes/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03356_array_join_subcolumns_indexes/metadata.json b/parser/testdata/03356_array_join_subcolumns_indexes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03356_array_join_subcolumns_indexes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03356_array_join_subcolumns_indexes/query.sql b/parser/testdata/03356_array_join_subcolumns_indexes/query.sql new file mode 100644 index 000000000..85c93776c --- /dev/null +++ b/parser/testdata/03356_array_join_subcolumns_indexes/query.sql @@ -0,0 +1,15 @@ +-- Tags: no-parallel-replicas + +set enable_json_type=1; +set allow_experimental_variant_type=1; +set use_variant_as_common_type=1; +set enable_analyzer=1; + +drop table if exists test; +create table test (json JSON(a Array(UInt32), b Array(UInt32), c UInt32), index idx1 json.a type set(0), index idx2 json.c type minmax) engine=MergeTree order by tuple() settings index_granularity=1; +insert into test select toJSONString(map('a', range(number % 3 + 1), 'b', range(number % 2 + 1), 'c', number)) from numbers(10); + +select json.a from test array join json.b where has(json.a, 2); +explain indexes=1 select json.a from test array join json.b where has(json.a, 2) and json.c < 5; + +drop table test; diff --git a/parser/testdata/03356_postgresql_mysql_endpoint_parsing/ast.json b/parser/testdata/03356_postgresql_mysql_endpoint_parsing/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03356_postgresql_mysql_endpoint_parsing/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03356_postgresql_mysql_endpoint_parsing/metadata.json b/parser/testdata/03356_postgresql_mysql_endpoint_parsing/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03356_postgresql_mysql_endpoint_parsing/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03356_postgresql_mysql_endpoint_parsing/query.sql b/parser/testdata/03356_postgresql_mysql_endpoint_parsing/query.sql new file mode 100644 index 000000000..8336a82d8 --- /dev/null +++ b/parser/testdata/03356_postgresql_mysql_endpoint_parsing/query.sql @@ -0,0 +1,6 @@ +-- Tags: no-fasttest +-- We test only that parsing of the endpoint works in this test. +DROP TABLE IF EXISTS tablefunc01; +DROP TABLE IF EXISTS tablefunc02; +CREATE TABLE tablefunc01 (x int) AS postgresql('localhost:9005/postgresql', 'postgres_db', 'postgres_table', 'postgres_user', '124444'); +CREATE TABLE tablefunc02 (x int) AS mysql('127.0.0.1:9004/mysql', 'mysql_db', 'mysql_table', 'mysql_user','123123'); diff --git a/parser/testdata/03356_pull_entry_before_detach_part/ast.json b/parser/testdata/03356_pull_entry_before_detach_part/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03356_pull_entry_before_detach_part/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03356_pull_entry_before_detach_part/metadata.json b/parser/testdata/03356_pull_entry_before_detach_part/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03356_pull_entry_before_detach_part/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03356_pull_entry_before_detach_part/query.sql b/parser/testdata/03356_pull_entry_before_detach_part/query.sql new file mode 100644 index 000000000..fc1f18b26 --- /dev/null +++ b/parser/testdata/03356_pull_entry_before_detach_part/query.sql @@ -0,0 +1,16 @@ +-- Tags: no-fasttest, no-parallel + +-- Forbid fault injection to avoid part name randomization, since we rely on it +SET insert_keeper_fault_injection_probability=0; + +DROP TABLE IF EXISTS t1 SYNC; + +CREATE TABLE t1 (x UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_03356/t1', '1') ORDER BY tuple(); + +SYSTEM STOP PULLING REPLICATION LOG t1; + +INSERT INTO t1 VALUES (1); + +SYSTEM START PULLING REPLICATION LOG t1; + +ALTER TABLE t1 DETACH PART 'all_0_0_0'; diff --git a/parser/testdata/03356_tables_with_binary_identifiers_invalid_utf8/ast.json b/parser/testdata/03356_tables_with_binary_identifiers_invalid_utf8/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03356_tables_with_binary_identifiers_invalid_utf8/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03356_tables_with_binary_identifiers_invalid_utf8/metadata.json b/parser/testdata/03356_tables_with_binary_identifiers_invalid_utf8/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03356_tables_with_binary_identifiers_invalid_utf8/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03356_tables_with_binary_identifiers_invalid_utf8/query.sql b/parser/testdata/03356_tables_with_binary_identifiers_invalid_utf8/query.sql new file mode 100644 index 000000000..a4257b3ab --- /dev/null +++ b/parser/testdata/03356_tables_with_binary_identifiers_invalid_utf8/query.sql @@ -0,0 +1,11 @@ +-- Tags: no-random-merge-tree-settings +DROP TABLE IF EXISTS test; +CREATE TABLE test (`\xFF\0привет` UInt8) ENGINE = MergeTree ORDER BY `\xFF\0привет` COMMENT '\0'; + +INSERT INTO test VALUES (123); +SELECT * FROM test; +DETACH TABLE test; +ATTACH TABLE test; + +SELECT * FROM test; +DROP TABLE test; diff --git a/parser/testdata/03356_threshold_for_parallel_hash/ast.json b/parser/testdata/03356_threshold_for_parallel_hash/ast.json new file mode 100644 index 000000000..9a1ae4f40 --- /dev/null +++ b/parser/testdata/03356_threshold_for_parallel_hash/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery lhs (children 3)" + }, + { + "explain": " Identifier lhs" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration a (children 1)" + }, + { + "explain": " DataType UInt64" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001418103, + "rows_read": 10, + "bytes_read": 336 + } +} diff --git a/parser/testdata/03356_threshold_for_parallel_hash/metadata.json b/parser/testdata/03356_threshold_for_parallel_hash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03356_threshold_for_parallel_hash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03356_threshold_for_parallel_hash/query.sql b/parser/testdata/03356_threshold_for_parallel_hash/query.sql new file mode 100644 index 000000000..26e433bb4 --- /dev/null +++ b/parser/testdata/03356_threshold_for_parallel_hash/query.sql @@ -0,0 +1,77 @@ +create table lhs(a UInt64) Engine=MergeTree order by (); +create table rhs(a UInt64) Engine=MergeTree order by (); + +insert into lhs select * from numbers_mt(1e5); +insert into rhs select * from numbers_mt(1e6); + +set enable_parallel_replicas = 0; -- join optimization (and table size estimation) disabled with parallel replicas +set enable_analyzer = 1, use_query_condition_cache = 0; + +set join_algorithm = 'direct,parallel_hash,hash'; -- default +set parallel_hash_join_threshold = 100001; + +-- Tables should be swapped; the new right table is below the threshold - use HashJoin +select trimBoth(explain) +from ( + explain actions=1 select * from lhs t0 join rhs t1 on t0.a = t1.a settings query_plan_join_swap_table = 'auto' +) +where explain ilike '%Algorithm%'; + +-- Tables were not swapped; the right table is above the threshold - use ConcurrentHashJoin +select trimBoth(explain) +from ( + explain actions=1 select * from lhs t0 join rhs t1 on t0.a = t1.a settings query_plan_join_swap_table = false +) +where explain ilike '%Algorithm%'; + +-- Check estimations obtained from the cache +-- Tables should be swapped; the new right table is below the threshold - use HashJoin +select trimBoth(explain) +from ( + explain actions=1 select * from lhs t0 join rhs t1 on t0.a = t1.a settings query_plan_join_swap_table = true +) +where explain ilike '%Algorithm%'; + +-- Same queries but we cannot do fallback to `hash` +set join_algorithm = 'parallel_hash'; + +select trimBoth(explain) +from ( + explain actions=1 select * from lhs t0 join rhs t1 on t0.a = t1.a settings query_plan_join_swap_table = 'auto' +) +where explain ilike '%Algorithm%'; + +select trimBoth(explain) +from ( + explain actions=1 select * from lhs t0 join rhs t1 on t0.a = t1.a settings query_plan_join_swap_table = false +) +where explain ilike '%Algorithm%'; + +select trimBoth(explain) +from ( + explain actions=1 select * from lhs t0 join rhs t1 on t0.a = t1.a settings query_plan_join_swap_table = true +) +where explain ilike '%Algorithm%'; + +set join_algorithm = 'direct,parallel_hash,hash'; -- default + +-- Check estimations obtained from the cache + +-- Right table is big, regardless of cardinality of join key, we should use ConcurrentHashJoin +select * from lhs t0 join (select a % 10000 as a from rhs) t1 on t0.a = t1.a settings query_plan_join_swap_table = false format Null; + +select trimBoth(explain) +from ( + explain actions=1 select * from lhs t0 join (select a % 10000 as a from rhs) t1 on t0.a = t1.a settings query_plan_join_swap_table = false +) +where explain ilike '%Algorithm%'; + +-- Right table is big, but only a small fraction of rows reaches the join - use HashJoin +select * from lhs t0 join rhs t1 on t0.a = t1.a where t1.a < 10000 settings query_plan_join_swap_table = false format Null; + +select trimBoth(explain) +from ( + explain actions=1 select * from lhs t0 join rhs t1 on t0.a = t1.a where t1.a < 10000 settings query_plan_join_swap_table = false +) +where explain ilike '%Algorithm%'; + diff --git a/parser/testdata/03357_analyzer_insert_view/ast.json b/parser/testdata/03357_analyzer_insert_view/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03357_analyzer_insert_view/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03357_analyzer_insert_view/metadata.json b/parser/testdata/03357_analyzer_insert_view/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03357_analyzer_insert_view/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03357_analyzer_insert_view/query.sql b/parser/testdata/03357_analyzer_insert_view/query.sql new file mode 100644 index 000000000..7f18a48ff --- /dev/null +++ b/parser/testdata/03357_analyzer_insert_view/query.sql @@ -0,0 +1,48 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/65981 +SET allow_experimental_analyzer = 1; +DROP TABLE IF EXISTS input; +DROP TABLE IF EXISTS deduplicate; +DROP TABLE IF EXISTS deduplicate_mv; +DROP TABLE IF EXISTS event; + +CREATE TABLE input (json_message String) ENGINE = MergeTree ORDER BY json_message; + +CREATE TABLE deduplicate +( + `id` UInt64 +) +ENGINE = MergeTree +ORDER BY (id); + +CREATE TABLE event +( + `id` UInt64 +) +ENGINE = MergeTree +ORDER BY (id); + +CREATE MATERIALIZED VIEW deduplicate_mv TO deduplicate +AS +WITH event AS + ( + SELECT + JSONExtract(json_message, 'id', 'Nullable(UInt64)') AS id + FROM input + WHERE (id IS NOT NULL) + ) +SELECT DISTINCT * +FROM event +WHERE id NOT IN +( + SELECT id + FROM deduplicate + WHERE id IN + ( + SELECT id + FROM event + ) +); + +INSERT INTO input VALUES ('{"id":5}'); + +SELECT * FROM deduplicate_mv FORMAT Null; diff --git a/parser/testdata/03357_arraySymmetricDifference/ast.json b/parser/testdata/03357_arraySymmetricDifference/ast.json new file mode 100644 index 000000000..5b97a07bc --- /dev/null +++ b/parser/testdata/03357_arraySymmetricDifference/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'Negative tests'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001192176, + "rows_read": 5, + "bytes_read": 185 + } +} diff --git a/parser/testdata/03357_arraySymmetricDifference/metadata.json b/parser/testdata/03357_arraySymmetricDifference/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03357_arraySymmetricDifference/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03357_arraySymmetricDifference/query.sql b/parser/testdata/03357_arraySymmetricDifference/query.sql new file mode 100644 index 000000000..f71fd78e6 --- /dev/null +++ b/parser/testdata/03357_arraySymmetricDifference/query.sql @@ -0,0 +1,59 @@ +SELECT 'Negative tests'; +SELECT arraySymmetricDifference(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT arraySymmetricDifference(1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT arraySymmetricDifference(1, 2); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT arraySymmetricDifference(1, [1, 2]); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT arraySymmetricDifference([1, 2], 1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT 'Const arguments'; +SELECT arraySort(arraySymmetricDifference([])); +SELECT arraySort(arraySymmetricDifference([1, 2])); +SELECT arraySort(arraySymmetricDifference([1, 2], [1, 3])); +SELECT arraySort(arraySymmetricDifference(['a', 'b'], ['a', 'c'])); +SELECT arraySort(arraySymmetricDifference([1, NULL], [1, 3])); +SELECT arraySort(arraySymmetricDifference([1, NULL], [NULL, 3])); +SELECT arraySort(arraySymmetricDifference([1, 1], [1, 1])); +SELECT arraySort(arraySymmetricDifference([1, 2], [1, 2])); +SELECT arraySort(arraySymmetricDifference([1, 2], [1, 2], [1, 2])); +SELECT arraySort(arraySymmetricDifference([1, 2], [1, 2], [1, 3])); + +SELECT toTypeName(arraySymmetricDifference([(1, ['a', 'b']), (Null, ['c'])], [(2, ['c', Null]), (1, ['a', 'b'])])); + +SELECT 'Non-const arguments'; +WITH + materialize([(1, ['a', 'b']), (NULL, ['c'])]) AS f, + materialize([(2, ['c', NULL]), (1, ['a', 'b'])]) AS s +SELECT arraySort(arraySymmetricDifference(f, s)); +WITH + materialize([(1, ['a', 'b']::Array(LowCardinality(String))), (NULL, ['c']::Array(LowCardinality(String)))]) AS f, + materialize([(2, ['c', NULL]::Array(LowCardinality(Nullable(String)))), (1, ['a', 'b']::Array(LowCardinality(String)))]) AS s +SELECT arraySort(arraySymmetricDifference(f, s)); + +-- Table with batch inserts +DROP TABLE IF EXISTS test_arraySymmetricDifference; +CREATE TABLE test_arraySymmetricDifference +( + `id` Int8, + `arr1` Array(String), + `arr2` Array(String) +) +ENGINE = MergeTree +ORDER BY id; + +INSERT INTO test_arraySymmetricDifference +VALUES +(1, ['1'], ['2']), +(2, ['2'], ['2']), +(3, ['3'], ['3', '2']), +(4, ['4'], ['1']), +(5, ['5'], []), +(6, ['6', '4'], ['5', '6']), +(7, ['7', '0'], []), +(8, ['8', '9', '10'], []), +(9, ['9'], ['-1']), +(10, ['10'], ['5']); + +SELECT + ta.id AS id, + arraySort(arraySymmetricDifference(ta.arr1, ta.arr2)) AS symmetricDifference +FROM test_arraySymmetricDifference ta; diff --git a/parser/testdata/03357_block_structure_union_step/ast.json b/parser/testdata/03357_block_structure_union_step/ast.json new file mode 100644 index 000000000..a26da2900 --- /dev/null +++ b/parser/testdata/03357_block_structure_union_step/ast.json @@ -0,0 +1,94 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery pk_block_union (children 3)" + }, + { + "explain": " Identifier pk_block_union" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " ColumnDeclaration d (children 2)" + }, + { + "explain": " DataType Date" + }, + { + "explain": " Literal '2000-01-01'" + }, + { + "explain": " ColumnDeclaration x (children 1)" + }, + { + "explain": " DataType DateTime" + }, + { + "explain": " ColumnDeclaration y (children 1)" + }, + { + "explain": " DataType UInt64" + }, + { + "explain": " ColumnDeclaration z (children 1)" + }, + { + "explain": " DataType UInt64" + }, + { + "explain": " Storage definition (children 3)" + }, + { + "explain": " Function MergeTree (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Identifier d" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function toStartOfMinute (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Identifier y" + }, + { + "explain": " Identifier z" + } + ], + + "rows": 24, + + "statistics": + { + "elapsed": 0.001255819, + "rows_read": 24, + "bytes_read": 836 + } +} diff --git a/parser/testdata/03357_block_structure_union_step/metadata.json b/parser/testdata/03357_block_structure_union_step/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03357_block_structure_union_step/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03357_block_structure_union_step/query.sql b/parser/testdata/03357_block_structure_union_step/query.sql new file mode 100644 index 000000000..8ea906da3 --- /dev/null +++ b/parser/testdata/03357_block_structure_union_step/query.sql @@ -0,0 +1,5 @@ +CREATE TABLE pk_block_union (d Date DEFAULT '2000-01-01', x DateTime, y UInt64, z UInt64) ENGINE = MergeTree() PARTITION BY d ORDER BY (toStartOfMinute(x), y, z); + +INSERT INTO pk_block_union (x, y, z) VALUES (1, 11, 1235), (2, 11, 4395), (3, 22, 3545), (4, 22, 6984), (5, 33, 4596), (61, 11, 4563), (62, 11, 4578), (63, 11, 3572), (64, 22, 5786), (65, 22, 5786), (66, 22, 2791), (67, 22, 2791), (121, 33, 2791), (122, 33, 2791), (123, 33, 1235), (124, 44, 4935), (125, 44, 4578), (126, 55, 5786), (127, 55, 2791), (128, 55, 1235); + +SELECT cityHash64(1, (x = 3) AND (y = 44), '0', 1, 1, 1, 1, 1, 1, 1, toLowCardinality(1), 1, 1, 1, 1, 1, toNullable(toUInt256(1)), 1, 1, 1, toUInt128(1), 1, toLowCardinality(toUInt128(1)), 1, 1), cityHash64('0', 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, toUInt128(1), 1, 1, toLowCardinality(1), 1, 1, toLowCardinality(1), toLowCardinality(1), 1, 1, 1, 1, 1), * FROM pk_block_union WHERE (x = 3) AND (y = 44) ORDER BY ALL DESC; diff --git a/parser/testdata/03357_check_contraints_null_syntax/ast.json b/parser/testdata/03357_check_contraints_null_syntax/ast.json new file mode 100644 index 000000000..e4f9848fc --- /dev/null +++ b/parser/testdata/03357_check_contraints_null_syntax/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001492692, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03357_check_contraints_null_syntax/metadata.json b/parser/testdata/03357_check_contraints_null_syntax/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03357_check_contraints_null_syntax/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03357_check_contraints_null_syntax/query.sql b/parser/testdata/03357_check_contraints_null_syntax/query.sql new file mode 100644 index 000000000..a44be08bc --- /dev/null +++ b/parser/testdata/03357_check_contraints_null_syntax/query.sql @@ -0,0 +1,12 @@ +SET enable_analyzer = 1; + +CREATE TABLE mister_table +( + c0 Nullable(Int), + CONSTRAINT c1 check c0.null +) +ENGINE = Memory(); + +CREATE TABLE mister_table_2 (c0 Nullable(Int)) ENGINE = MergeTree() ORDER BY tuple(); +CREATE MATERIALIZED VIEW mister_view ENGINE = MergeTree() ORDER BY tuple() AS (SELECT mister_table_2.c0.null AS c0 FROM mister_table_2); +INSERT INTO mister_table_2 VALUES (1); diff --git a/parser/testdata/03357_jit_strikes_again/ast.json b/parser/testdata/03357_jit_strikes_again/ast.json new file mode 100644 index 000000000..c6389c655 --- /dev/null +++ b/parser/testdata/03357_jit_strikes_again/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001444433, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03357_jit_strikes_again/metadata.json b/parser/testdata/03357_jit_strikes_again/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03357_jit_strikes_again/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03357_jit_strikes_again/query.sql b/parser/testdata/03357_jit_strikes_again/query.sql new file mode 100644 index 000000000..811e56a00 --- /dev/null +++ b/parser/testdata/03357_jit_strikes_again/query.sql @@ -0,0 +1,14 @@ +SET compile_expressions = true, min_count_to_compile_expression = 1; + +CREATE TABLE data2013 (name String, value UInt32) ENGINE = Memory; +CREATE TABLE data2014 (name String, value UInt32) ENGINE = Memory; + +INSERT INTO data2013(name,value) VALUES('Alice', 1000); +INSERT INTO data2013(name,value) VALUES('Bob', 2000); +INSERT INTO data2013(name,value) VALUES('Carol', 5000); + +INSERT INTO data2014(name,value) VALUES('Alice', 2000); +INSERT INTO data2014(name,value) VALUES('Bob', 2000); +INSERT INTO data2014(name,value) VALUES('Dennis', 35000); + +SELECT arraySplit(x -> ((x % toNullable(2)) = 1), [2]), nn FROM (SELECT name AS nn, value AS vv FROM data2013 UNION ALL SELECT name AS nn, value AS vv FROM data2014) ORDER BY tuple('Nullable(String)', 16, toNullable(16), materialize(16)) DESC, tuple(toLowCardinality('9279104477'), toNullable(10), 10, 10, 10, 10, 10, toUInt128(10), 10, 10, 10, 10, 10, 10, 10, 10, 10, 10) DESC, nn ASC NULLS FIRST, vv ASC NULLS FIRST; diff --git a/parser/testdata/03357_join_pk_sharding/ast.json b/parser/testdata/03357_join_pk_sharding/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03357_join_pk_sharding/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03357_join_pk_sharding/metadata.json b/parser/testdata/03357_join_pk_sharding/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03357_join_pk_sharding/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03357_join_pk_sharding/query.sql b/parser/testdata/03357_join_pk_sharding/query.sql new file mode 100644 index 000000000..a40eb546a --- /dev/null +++ b/parser/testdata/03357_join_pk_sharding/query.sql @@ -0,0 +1,68 @@ +-- Tags: long + +SET allow_statistics_optimize = 0; +drop table if exists tab_l; +drop table if exists tab_m; +drop table if exists tab_r; + +create table tab_l (a UInt32, b UInt32, c UInt32, d UInt32) engine = MergeTree order by (a * 2, b + c); +create table tab_m (a UInt32, b UInt32, c UInt32, d UInt32) engine = MergeTree order by (c + d, b * 2); +create table tab_r (a UInt32, b UInt32, c UInt32, d UInt32) engine = MergeTree order by (a * 2, c * 2); + +insert into tab_l select number, number, number, number from numbers(1e6); +insert into tab_m select number, number, number, number from numbers(1e6); +insert into tab_r select number, number, number, number from numbers(1e6); + +--select explain e from (explain actions = 1 ) +--where e like '%ReadFromMergeTree%' or e like '%Expression%' or e like '%Join%' or e like '%Clauses%' or e like '%Sharding%'; + +set enable_analyzer=1; +set query_plan_join_swap_table=0; +set query_plan_join_shard_by_pk_ranges=1; +set allow_experimental_parallel_reading_from_replicas=0; + +-- { echo On } + +-- two tables +select * from tab_l l inner join tab_m m on l.a * 2 = m.c + m.d and l.d = m.a and l.b + l.c = m.b * 2 order by l.a limit 10 offset 999990; + +select explain e from (explain actions = 1 select * from tab_l l inner join tab_m m on l.a * 2 = m.c + m.d and l.b + l.c = m.b * 2) +where e like '%ReadFromMergeTree%' or e like '%Expression%' or e like '%Join%' or e like '%Clauses%' or e like '%Sharding%'; + +-- three tables +select * from tab_l l inner join tab_m m on l.a * 2 = m.c + m.d and l.d = m.a and l.b + l.c = m.b * 2 inner join tab_r r on l.a * 2 = r.a * 2 and l.b + l.c = r.c * 2 and l.d = r.d order by l.a limit 10 offset 999990; + +select explain e from (explain actions = 1 select * from tab_l l inner join tab_m m on l.a * 2 = m.c + m.d and l.d = m.a and l.b + l.c = m.b * 2 inner join tab_r r on l.a * 2 = r.a * 2 and l.b + l.c = r.c * 2 and l.d = r.d) +where e like '%ReadFromMergeTree%' or e like '%Expression%' or e like '%Join%' or e like '%Clauses%' or e like '%Sharding%'; + +--- three tables, where m table matches one key, so that r table can match only one key as well +select * from tab_l l inner join tab_m m on l.a * 2 = m.c + m.d and l.d = m.a inner join tab_r r on l.a * 2 = r.a * 2 and l.b + l.c = r.c * 2 and l.d = r.d order by l.a limit 10 offset 999990; + +select explain e from (explain actions = 1 select * from tab_l l inner join tab_m m on l.a * 2 = m.c + m.d and l.d = m.a inner join tab_r r on l.a * 2 = r.a * 2 and l.b + l.c = r.c * 2 and l.d = r.d) +where e like '%ReadFromMergeTree%' or e like '%Expression%' or e like '%Join%' or e like '%Clauses%' or e like '%Sharding%'; + +--- three tables, right table matches one key +select * from tab_l l inner join tab_m m on l.a * 2 = m.c + m.d and l.d = m.a and l.b + l.c = m.b * 2 inner join tab_r r on l.a * 2 = r.a * 2 and l.d = r.d order by l.a limit 10 offset 999990; + +select explain e from (explain actions = 1 select * from tab_l l inner join tab_m m on l.a * 2 = m.c + m.d and l.d = m.a and l.b + l.c = m.b * 2 inner join tab_r r on l.a * 2 = r.a * 2 and l.d = r.d) +where e like '%ReadFromMergeTree%' or e like '%Expression%' or e like '%Join%' or e like '%Clauses%' or e like '%Sharding%'; + +--- three tables, tab_m table matches noting, so right table can match both keys +select * from tab_l l inner join tab_m m on l.d = m.a inner join tab_r r on l.a * 2 = r.a * 2 and l.b + l.c = r.c * 2 and l.d = r.d order by l.a limit 10 offset 999990; + +select explain e from (explain actions = 1 select * from tab_l l inner join tab_m m on l.d = m.a inner join tab_r r on l.a * 2 = r.a * 2 and l.b + l.c = r.c * 2 and l.d = r.d) +where e like '%ReadFromMergeTree%' or e like '%Expression%' or e like '%Join%' or e like '%Clauses%' or e like '%Sharding%'; + +set join_use_nulls=1; + +-- two tables +select * from tab_l l right join tab_m m on l.a * 2 = m.c + m.d and l.d = m.a and l.b + l.c = m.b * 2 order by l.a limit 10 offset 999990; + +select explain e from (explain actions = 1 select * from tab_l l right join tab_m m on l.a * 2 = m.c + m.d and l.b + l.c = m.b * 2) +where e like '%ReadFromMergeTree%' or e like '%Expression%' or e like '%Join%' or e like '%Clauses%' or e like '%Sharding%'; + +-- three tables +select * from tab_l l left join tab_m m on l.a * 2 = m.c + m.d and l.d = m.a and l.b + l.c = m.b * 2 left join tab_r r on l.a * 2 = r.a * 2 and l.b + l.c = r.c * 2 and l.d = r.d order by l.a limit 10 offset 999990; + +select explain e from (explain actions = 1 select * from tab_l l left join tab_m m on l.a * 2 = m.c + m.d and l.d = m.a and l.b + l.c = m.b * 2 left join tab_r r on l.a * 2 = r.a * 2 and l.b + l.c = r.c * 2 and l.d = r.d) +where e like '%ReadFromMergeTree%' or e like '%Expression%' or e like '%Join%' or e like '%Clauses%' or e like '%Sharding%'; diff --git a/parser/testdata/03357_recursive_cte_no_logical_error/ast.json b/parser/testdata/03357_recursive_cte_no_logical_error/ast.json new file mode 100644 index 000000000..a50745462 --- /dev/null +++ b/parser/testdata/03357_recursive_cte_no_logical_error/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001458894, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03357_recursive_cte_no_logical_error/metadata.json b/parser/testdata/03357_recursive_cte_no_logical_error/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03357_recursive_cte_no_logical_error/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03357_recursive_cte_no_logical_error/query.sql b/parser/testdata/03357_recursive_cte_no_logical_error/query.sql new file mode 100644 index 000000000..a56177223 --- /dev/null +++ b/parser/testdata/03357_recursive_cte_no_logical_error/query.sql @@ -0,0 +1,40 @@ +SET enable_analyzer=1; + +CREATE TABLE department__fuzz_0 +( + `id` UInt64, + `parent_department` Decimal(76, 43), + `name` String +) +ENGINE = TinyLog; + +INSERT INTO department__fuzz_0 FORMAT Values (0, NULL, 'ROOT'); + +-- Actually anything except LOGICAL_ERROR is Ok. +WITH RECURSIVE q AS +( + SELECT * + FROM department__fuzz_0 + UNION ALL + ( + WITH RECURSIVE x AS + ( + SELECT * + FROM department__fuzz_0 + UNION ALL + ( + SELECT * + FROM q + WHERE least(toFixedString('world', 5), 5, 5, inf, 58, nan, NULL) + UNION ALL + SELECT * + FROM x + WHERE sipHash128(toLowCardinality('world'), toLowCardinality(materialize(5)), toUInt128(greatest(1, nan, NULL), toUInt128(5)), toUInt128(5), 5, toUInt128(5), materialize(5)) + ) + ) + SELECT * + FROM x + ) +) +SELECT 1 +FROM q; -- { serverError NO_COMMON_TYPE } diff --git a/parser/testdata/03357_storage_join_mv_context/ast.json b/parser/testdata/03357_storage_join_mv_context/ast.json new file mode 100644 index 000000000..9dc7cacb0 --- /dev/null +++ b/parser/testdata/03357_storage_join_mv_context/ast.json @@ -0,0 +1,82 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery destination_join (children 3)" + }, + { + "explain": " Identifier destination_join" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 5)" + }, + { + "explain": " ColumnDeclaration key (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " ColumnDeclaration id (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " ColumnDeclaration color (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " ColumnDeclaration section (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " ColumnDeclaration description (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function Join (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Identifier ANY" + }, + { + "explain": " Identifier LEFT" + }, + { + "explain": " Identifier key" + } + ], + + "rows": 20, + + "statistics": + { + "elapsed": 0.00153661, + "rows_read": 20, + "bytes_read": 728 + } +} diff --git a/parser/testdata/03357_storage_join_mv_context/metadata.json b/parser/testdata/03357_storage_join_mv_context/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03357_storage_join_mv_context/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03357_storage_join_mv_context/query.sql b/parser/testdata/03357_storage_join_mv_context/query.sql new file mode 100644 index 000000000..1bf17d4bf --- /dev/null +++ b/parser/testdata/03357_storage_join_mv_context/query.sql @@ -0,0 +1,4 @@ +CREATE TABLE destination_join ( `key` String, `id` String, `color` String, `section` String, `description` String) ENGINE = Join(ANY, LEFT, key); +CREATE TABLE destination_set (`key` String) ENGINE = Set; +CREATE MATERIALIZED VIEW mv_to_set TO `destination_set` AS SELECT key FROM destination_join; +INSERT INTO mv_to_set values ('kek'); diff --git a/parser/testdata/03357_with_cube_with_totals_assertion/ast.json b/parser/testdata/03357_with_cube_with_totals_assertion/ast.json new file mode 100644 index 000000000..111fce039 --- /dev/null +++ b/parser/testdata/03357_with_cube_with_totals_assertion/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001332168, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03357_with_cube_with_totals_assertion/metadata.json b/parser/testdata/03357_with_cube_with_totals_assertion/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03357_with_cube_with_totals_assertion/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03357_with_cube_with_totals_assertion/query.sql b/parser/testdata/03357_with_cube_with_totals_assertion/query.sql new file mode 100644 index 000000000..ec6d879f0 --- /dev/null +++ b/parser/testdata/03357_with_cube_with_totals_assertion/query.sql @@ -0,0 +1,27 @@ +SET enable_analyzer=1; +SET allow_suspicious_low_cardinality_types = 1; +SET max_rows_to_group_by = 65535; +SET max_threads = 1; +SET max_block_size = 65536; +SET group_by_overflow_mode = 'any'; +SET totals_mode = 'after_having_auto'; +SET totals_auto_threshold = 0.5; + +CREATE TABLE combinator_argMin_table_r1__fuzz_791 +( + `id` LowCardinality(Int32), + `value` String, + `agg_time` UInt64 +) +ENGINE = MergeTree ORDER BY id; + +INSERT INTO combinator_argMin_table_r1__fuzz_791 +SELECT number % 10 AS id, + number AS value, + '01-01-2024 00:00:00' + toIntervalDay(number) +FROM numbers(100); + +-- No idea what's the right answer here, but definitely not a logical error! +SELECT maxArgMax(agg_time, value) +FROM combinator_argMin_table_r1__fuzz_791 +GROUP BY id WITH CUBE WITH TOTALS FORMAT Null; diff --git a/parser/testdata/03358_block_structure_match/ast.json b/parser/testdata/03358_block_structure_match/ast.json new file mode 100644 index 000000000..e7016f038 --- /dev/null +++ b/parser/testdata/03358_block_structure_match/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001528249, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03358_block_structure_match/metadata.json b/parser/testdata/03358_block_structure_match/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03358_block_structure_match/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03358_block_structure_match/query.sql b/parser/testdata/03358_block_structure_match/query.sql new file mode 100644 index 000000000..d8886454d --- /dev/null +++ b/parser/testdata/03358_block_structure_match/query.sql @@ -0,0 +1,31 @@ +SET enable_analyzer=1; + +SELECT * FROM (SELECT 1 AS a, 2 AS b FROM system.one INNER JOIN system.one USING (dummy) UNION ALL SELECT 3 AS a, 4 AS b FROM system.one) WHERE a != 10 ORDER BY a ASC, a != 10 ASC, b ASC; + +SELECT 4 +FROM +( + SELECT + 1 AS X, + 2 AS Y + UNION ALL + SELECT + 3, + 4 + GROUP BY 2 +) +WHERE materialize(4) +ORDER BY materialize(4) ASC NULLS LAST; + +SELECT * +FROM +( + SELECT 1 AS a + GROUP BY + GROUPING SETS ((tuple(toUInt128(67)))) + UNION ALL + SELECT materialize(2) +) +WHERE a +ORDER BY (75, ((tuple(((67, (67, (tuple((tuple(toLowCardinality(toLowCardinality(1))), 1)), toNullable(1))), (tuple(toUInt256(1)), 1)), 1)), 1), 1), toNullable(1)) ASC +FORMAT Pretty; diff --git a/parser/testdata/03358_lambda_resolution_segfault_analyzer/ast.json b/parser/testdata/03358_lambda_resolution_segfault_analyzer/ast.json new file mode 100644 index 000000000..3dba3418c --- /dev/null +++ b/parser/testdata/03358_lambda_resolution_segfault_analyzer/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001317941, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03358_lambda_resolution_segfault_analyzer/metadata.json b/parser/testdata/03358_lambda_resolution_segfault_analyzer/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03358_lambda_resolution_segfault_analyzer/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03358_lambda_resolution_segfault_analyzer/query.sql b/parser/testdata/03358_lambda_resolution_segfault_analyzer/query.sql new file mode 100644 index 000000000..f59f57471 --- /dev/null +++ b/parser/testdata/03358_lambda_resolution_segfault_analyzer/query.sql @@ -0,0 +1,8 @@ +SET enable_analyzer=1; +WITH x -> toString(x) AS lambda_1 +SELECT + 3, + arrayMap(lambda_1 AS lambda_2, [1, 2, 3]), + arrayMap(lambda_2, materialize(['1', '2', '3'])) +WHERE toNullable(9) +GROUP BY lambda_2; -- { serverError UNKNOWN_IDENTIFIER } diff --git a/parser/testdata/03359_analyzer_rewrite_view_query/ast.json b/parser/testdata/03359_analyzer_rewrite_view_query/ast.json new file mode 100644 index 000000000..20afbc465 --- /dev/null +++ b/parser/testdata/03359_analyzer_rewrite_view_query/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery mydestination (children 1)" + }, + { + "explain": " Identifier mydestination" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00132032, + "rows_read": 2, + "bytes_read": 79 + } +} diff --git a/parser/testdata/03359_analyzer_rewrite_view_query/metadata.json b/parser/testdata/03359_analyzer_rewrite_view_query/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03359_analyzer_rewrite_view_query/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03359_analyzer_rewrite_view_query/query.sql b/parser/testdata/03359_analyzer_rewrite_view_query/query.sql new file mode 100644 index 000000000..3f8bc96df --- /dev/null +++ b/parser/testdata/03359_analyzer_rewrite_view_query/query.sql @@ -0,0 +1,13 @@ +CREATE TABLE mydestination +( + `object` String +) +ENGINE = MergeTree +ORDER BY object; + +CREATE MATERIALIZED VIEW myview TO mydestination +AS WITH ('foo', 'bar') AS objects +SELECT 'foo' AS object +WHERE object IN (objects); + +SELECT * FROM myview; diff --git a/parser/testdata/03359_point_in_polygon_index/ast.json b/parser/testdata/03359_point_in_polygon_index/ast.json new file mode 100644 index 000000000..ce7e7a547 --- /dev/null +++ b/parser/testdata/03359_point_in_polygon_index/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_point_in_polygon (children 1)" + }, + { + "explain": " Identifier t_point_in_polygon" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001388988, + "rows_read": 2, + "bytes_read": 88 + } +} diff --git a/parser/testdata/03359_point_in_polygon_index/metadata.json b/parser/testdata/03359_point_in_polygon_index/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03359_point_in_polygon_index/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03359_point_in_polygon_index/query.sql b/parser/testdata/03359_point_in_polygon_index/query.sql new file mode 100644 index 000000000..a48b284ab --- /dev/null +++ b/parser/testdata/03359_point_in_polygon_index/query.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS t_point_in_polygon; + +CREATE TABLE t_point_in_polygon (a UInt64, p Point) ENGINE = MergeTree ORDER BY a; + +INSERT INTO t_point_in_polygon (a) VALUES (1); + +SELECT * FROM t_point_in_polygon WHERE pointInPolygon(p, [(0, 0), (10, 0), (10, 10), (0, 10)]); + +DROP TABLE t_point_in_polygon; diff --git a/parser/testdata/03359_ub_merging_aggregated_transform/ast.json b/parser/testdata/03359_ub_merging_aggregated_transform/ast.json new file mode 100644 index 000000000..66bc77569 --- /dev/null +++ b/parser/testdata/03359_ub_merging_aggregated_transform/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery t_having (children 1)" + }, + { + "explain": " Identifier t_having" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001188806, + "rows_read": 2, + "bytes_read": 69 + } +} diff --git a/parser/testdata/03359_ub_merging_aggregated_transform/metadata.json b/parser/testdata/03359_ub_merging_aggregated_transform/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03359_ub_merging_aggregated_transform/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03359_ub_merging_aggregated_transform/query.sql b/parser/testdata/03359_ub_merging_aggregated_transform/query.sql new file mode 100644 index 000000000..ab8a6b158 --- /dev/null +++ b/parser/testdata/03359_ub_merging_aggregated_transform/query.sql @@ -0,0 +1,23 @@ +CREATE TABLE t_having +( + c0 Int32, + c1 UInt64 +) +ENGINE = MergeTree +ORDER BY c0 +SETTINGS index_granularity = 18398, min_bytes_for_wide_part = 985217954, ratio_of_defaults_for_sparse_serialization = 0.5019329786300659, replace_long_file_name_to_hash = true, max_file_name_length = 0, min_bytes_for_full_part_storage = 536870912, compact_parts_max_bytes_to_buffer = 3894513, compact_parts_max_granules_to_buffer = 216, compact_parts_merge_max_bytes_to_prefetch_part = 27835806, merge_max_block_size = 17883, old_parts_lifetime = 196., prefer_fetch_merged_part_size_threshold = 10737418240, vertical_merge_algorithm_min_rows_to_activate = 1, vertical_merge_algorithm_min_columns_to_activate = 100, min_merge_bytes_to_use_direct_io = 6973717094, index_granularity_bytes = 26319752, use_const_adaptive_granularity = true, enable_index_granularity_compression = true, concurrent_part_removal_threshold = 31, allow_vertical_merges_from_compact_to_wide_parts = true, enable_block_number_column = false, enable_block_offset_column = false, cache_populated_by_fetch = false, marks_compress_block_size = 64286, primary_key_compress_block_size = 40716, use_primary_key_cache = false, prewarm_primary_key_cache = true, prewarm_mark_cache = false; + +INSERT INTO t_having SELECT number, number FROM numbers(1e3); + +SET enable_optimize_predicate_expression = 0, min_compress_block_size = 1013937, max_compress_block_size = 2211670, max_block_size = 97680, min_external_table_block_size_bytes = 1, max_joined_block_size_rows = 98012, max_insert_threads = 2, max_threads = 32, max_parsing_threads = 10, max_read_buffer_size = 740736, connect_timeout_with_failover_ms = 2000, connect_timeout_with_failover_secure_ms = 3000, idle_connection_timeout = 36000, s3_max_get_rps = 1000000, s3_max_get_burst = 2000000, s3_max_put_rps = 1000000, s3_max_put_burst = 2000000, s3_check_objects_after_upload = true, use_uncompressed_cache = true, max_remote_read_network_bandwidth = 1000000000000, max_remote_write_network_bandwidth = 1000000000000, max_local_read_bandwidth = 1000000000000, max_local_write_bandwidth = 1000000000000, stream_like_engine_allow_direct_select = true, enable_multiple_prewhere_read_steps = false, replication_wait_for_inactive_replica_timeout = 30, min_count_to_compile_expression = 0, group_by_two_level_threshold = 1000000, group_by_two_level_threshold_bytes = 32424819, distributed_aggregation_memory_efficient = false, enable_positional_arguments = false, allow_nonconst_timezone_arguments = true, group_by_use_nulls = true, min_chunk_bytes_for_parallel_parsing = 5815363, merge_tree_coarse_index_granularity = 6, min_bytes_to_use_direct_io = 10737418240, min_bytes_to_use_mmap_io = 1, log_queries = true, insert_quorum_timeout = 60000, table_function_remote_max_addresses = 200, merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0.6000000238418579, http_wait_end_of_query = true, http_response_buffer_size = 7985738, fsync_metadata = true, join_use_nulls = true, query_plan_join_swap_table = false, http_send_timeout = 60., http_receive_timeout = 60., use_index_for_in_with_subqueries_max_values = 1000000000, opentelemetry_start_trace_probability = 0.10000000149011612, enable_vertical_final = false, max_rows_to_read = 20000000, max_bytes_to_read = 1000000000000, max_bytes_to_read_leaf = 1000000000000, max_rows_to_group_by = 10000000000, max_bytes_ratio_before_external_group_by = 0.2, max_rows_to_sort = 10000000000, max_bytes_to_sort = 10000000000, prefer_external_sort_block_bytes = 100000000, max_bytes_ratio_before_external_sort = 0.19, max_bytes_before_remerge_sort = 2087479364, max_result_rows = 1000000000, max_result_bytes = 1000000000, max_execution_time = 60., max_execution_time_leaf = 600., max_execution_speed = 100000000000, max_execution_speed_bytes = 10000000000000, timeout_before_checking_execution_speed = 300., max_estimated_execution_time = 600., max_columns_to_read = 20000, max_temporary_columns = 20000, max_temporary_non_const_columns = 20000, max_rows_in_set = 10000000000, max_bytes_in_set = 10000000000, max_rows_in_join = 10000000000, max_bytes_in_join = 10000000000, join_algorithm = 'grace_hash', cross_join_min_rows_to_compress = 100000000, cross_join_min_bytes_to_compress = 0, max_rows_to_transfer = 1000000000, max_bytes_to_transfer = 1000000000, max_rows_in_distinct = 10000000000, max_bytes_in_distinct = 10000000000, max_memory_usage = 10000000000, max_memory_usage_for_user = 19896741888, max_untracked_memory = 1048576, memory_profiler_step = 1048576, max_network_bandwidth = 100000000000, max_network_bytes = 1000000000000, max_network_bandwidth_for_user = 100000000000, max_network_bandwidth_for_all_users = 100000000000, max_temporary_data_on_disk_size_for_user = 100000000000, max_temporary_data_on_disk_size_for_query = 100000000000, max_backup_bandwidth = 100000000000, log_comment = '01798_having_push_down.sql', send_logs_level = 'warning', enable_optimize_predicate_expression = false, optimize_aggregation_in_order = true, aggregation_in_order_max_block_bytes = 49246335, read_in_order_two_level_merge_threshold = 48, max_hyperscan_regexp_length = 1000000, max_hyperscan_regexp_total_length = 10000000, allow_introspection_functions = true, database_atomic_wait_for_drop_and_detach_synchronously = true, optimize_or_like_chain = true, optimize_if_chain_to_multiif = true, optimize_functions_to_subcolumns = false, optimize_append_index = true, use_query_cache = true, query_cache_nondeterministic_function_handling = 'ignore', query_cache_system_table_handling = 'ignore', query_cache_max_size_in_bytes = 10000000, query_cache_max_entries = 100000, database_replicated_allow_replicated_engine_arguments = 1, distributed_ddl_entry_format_version = 6, external_storage_max_read_rows = 10000000000, external_storage_max_read_bytes = 10000000000, local_filesystem_read_method = 'pread', remote_filesystem_read_method = 'read', local_filesystem_read_prefetch = true, remote_filesystem_read_prefetch = false, merge_tree_min_bytes_per_task_for_remote_reading = 16777216, merge_tree_compact_parts_min_granules_to_multibuffer_read = 55, async_insert_busy_timeout_max_ms = 5000, enable_filesystem_cache = true, enable_filesystem_cache_on_write_operations = true, filesystem_cache_segments_batch_size = 10, use_page_cache_for_disks_without_file_cache = true, load_marks_asynchronously = true, allow_prefetched_read_pool_for_remote_filesystem = true, allow_prefetched_read_pool_for_local_filesystem = false, filesystem_prefetch_step_bytes = 104857600, filesystem_prefetch_max_memory_usage = 134217728, filesystem_prefetches_limit = 10, allow_deprecated_database_ordinary = true, max_streams_for_merge_tree_reading = 1000, insert_keeper_max_retries = 100, insert_keeper_retry_initial_backoff_ms = 1, insert_keeper_retry_max_backoff_ms = 10, insert_keeper_fault_injection_probability = 0.009999999776482582, ignore_drop_queries_probability = 0.20000000298023224, optimize_distinct_in_order = false, allow_experimental_parallel_reading_from_replicas = 1, max_parallel_replicas = 3, cluster_for_parallel_replicas = 'parallel_replicas', parallel_replicas_for_non_replicated_merge_tree = true, session_timezone = 'Africa/Juba'; + +SELECT + c0 + -1, + sum(intDivOrZero(intDivOrZero(NULL, NULL), '2'), intDivOrZero(10000000000., intDivOrZero(intDivOrZero(intDivOrZero(NULL, NULL), 10), NULL))) +FROM t_having +GROUP BY + c0 = 2, + c0 = 10, + intDivOrZero(intDivOrZero(intDivOrZero(NULL, NULL), NULL), NULL), + c0 +HAVING c0 = 2; diff --git a/parser/testdata/03360_any_join_parallel_hash_bug/ast.json b/parser/testdata/03360_any_join_parallel_hash_bug/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03360_any_join_parallel_hash_bug/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03360_any_join_parallel_hash_bug/metadata.json b/parser/testdata/03360_any_join_parallel_hash_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03360_any_join_parallel_hash_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03360_any_join_parallel_hash_bug/query.sql b/parser/testdata/03360_any_join_parallel_hash_bug/query.sql new file mode 100644 index 000000000..f8453b5e8 --- /dev/null +++ b/parser/testdata/03360_any_join_parallel_hash_bug/query.sql @@ -0,0 +1,12 @@ +-- Previously, due to a bug in `ConcurrentHashJoin::onBuildPhaseFinish()` we reserved much less space in `used_flags` than needed. +-- This test just checks that we won't crash. +SET enable_analyzer=1; +SELECT + number, + number +FROM system.numbers +ANY INNER JOIN system.numbers AS alias277 ON number = alias277.number +LIMIT 102400 +FORMAT `Null` +SETTINGS join_algorithm = 'parallel_hash'; + diff --git a/parser/testdata/03360_bool_remote/ast.json b/parser/testdata/03360_bool_remote/ast.json new file mode 100644 index 000000000..ac5eefd3a --- /dev/null +++ b/parser/testdata/03360_bool_remote/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Bool_1 (alias x)" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function remote (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '127.0.0.{1,2}'" + }, + { + "explain": " Identifier system.one" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001487981, + "rows_read": 13, + "bytes_read": 507 + } +} diff --git a/parser/testdata/03360_bool_remote/metadata.json b/parser/testdata/03360_bool_remote/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03360_bool_remote/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03360_bool_remote/query.sql b/parser/testdata/03360_bool_remote/query.sql new file mode 100644 index 000000000..69c9b6da2 --- /dev/null +++ b/parser/testdata/03360_bool_remote/query.sql @@ -0,0 +1,3 @@ +SELECT true AS x FROM remote('127.0.0.{1,2}', system.one) LIMIT 1; +SELECT materialize(true) AS x FROM remote('127.0.0.{1,2}', system.one) LIMIT 1; +SELECT true AS x FROM remote('127.0.0.{1,2}', system.one) GROUP BY x; diff --git a/parser/testdata/03362_create_table_after_truncate_replicated_database/ast.json b/parser/testdata/03362_create_table_after_truncate_replicated_database/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03362_create_table_after_truncate_replicated_database/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03362_create_table_after_truncate_replicated_database/metadata.json b/parser/testdata/03362_create_table_after_truncate_replicated_database/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03362_create_table_after_truncate_replicated_database/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03362_create_table_after_truncate_replicated_database/query.sql b/parser/testdata/03362_create_table_after_truncate_replicated_database/query.sql new file mode 100644 index 000000000..e66202e8b --- /dev/null +++ b/parser/testdata/03362_create_table_after_truncate_replicated_database/query.sql @@ -0,0 +1,14 @@ +-- Tags: zookeeper, no-replicated-database, no-ordinary-database +-- no-replicated-database: we explicitly run this test by creating a replicated database + +DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}; + +CREATE DATABASE {CLICKHOUSE_DATABASE:Identifier} ENGINE=Replicated('/clickhouse/databases/{database}', 'shard1', 'replica1') FORMAT NULL; + +USE {CLICKHOUSE_DATABASE:Identifier}; + +CREATE TABLE t1 (x UInt8, y String) ENGINE=ReplicatedMergeTree ORDER BY x FORMAT NULL; + +TRUNCATE DATABASE {CLICKHOUSE_DATABASE:Identifier}; -- { serverError 48 } + +DROP DATABASE {CLICKHOUSE_DATABASE:Identifier}; diff --git a/parser/testdata/03362_default_profiles_context/ast.json b/parser/testdata/03362_default_profiles_context/ast.json new file mode 100644 index 000000000..fe48cdaa4 --- /dev/null +++ b/parser/testdata/03362_default_profiles_context/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery t0 (children 3)" + }, + { + "explain": " Identifier t0" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration c0 (children 2)" + }, + { + "explain": " DataType Int" + }, + { + "explain": " Function defaultProfiles (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.000992748, + "rows_read": 12, + "bytes_read": 408 + } +} diff --git a/parser/testdata/03362_default_profiles_context/metadata.json b/parser/testdata/03362_default_profiles_context/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03362_default_profiles_context/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03362_default_profiles_context/query.sql b/parser/testdata/03362_default_profiles_context/query.sql new file mode 100644 index 000000000..92954c8ce --- /dev/null +++ b/parser/testdata/03362_default_profiles_context/query.sql @@ -0,0 +1,2 @@ +CREATE TEMPORARY TABLE t0 (c0 Int TTL defaultProfiles()) ENGINE = MergeTree ORDER BY tuple(); -- { serverError BAD_ARGUMENTS } +CREATE TEMPORARY TABLE t0 (c0 Int TTL defaultRoles()) ENGINE = MergeTree ORDER BY tuple(); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/03362_filter_transform_profile_events/ast.json b/parser/testdata/03362_filter_transform_profile_events/ast.json new file mode 100644 index 000000000..71a270783 --- /dev/null +++ b/parser/testdata/03362_filter_transform_profile_events/ast.json @@ -0,0 +1,73 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal UInt64_100" + } + ], + + "rows": 17, + + "statistics": + { + "elapsed": 0.001613074, + "rows_read": 17, + "bytes_read": 629 + } +} diff --git a/parser/testdata/03362_filter_transform_profile_events/metadata.json b/parser/testdata/03362_filter_transform_profile_events/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03362_filter_transform_profile_events/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03362_filter_transform_profile_events/query.sql b/parser/testdata/03362_filter_transform_profile_events/query.sql new file mode 100644 index 000000000..604239868 --- /dev/null +++ b/parser/testdata/03362_filter_transform_profile_events/query.sql @@ -0,0 +1,6 @@ +SELECT * FROM system.numbers WHERE number % 2 = 0 LIMIT 100; +SELECT count() = 2 AS assert_exists FROM system.events WHERE name IN ('FilterTransformPassedRows', 'FilterTransformPassedBytes') HAVING assert_exists; + +SELECT * FROM system.numbers WHERE number % 2 = 0 LIMIT 100 FORMAT Null; +SYSTEM FLUSH LOGS query_log; +SELECT ProfileEvents['FilterTransformPassedRows'] > 0, ProfileEvents['FilterTransformPassedBytes'] > 0 FROM system.query_log WHERE (type = 'QueryFinish') AND (current_database = currentDatabase()) AND (query = 'SELECT * FROM system.numbers WHERE number % 2 = 0 LIMIT 100 FORMAT Null;'); diff --git a/parser/testdata/03362_iceberg_table_with_confusing_name/ast.json b/parser/testdata/03362_iceberg_table_with_confusing_name/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03362_iceberg_table_with_confusing_name/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03362_iceberg_table_with_confusing_name/metadata.json b/parser/testdata/03362_iceberg_table_with_confusing_name/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03362_iceberg_table_with_confusing_name/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03362_iceberg_table_with_confusing_name/query.sql b/parser/testdata/03362_iceberg_table_with_confusing_name/query.sql new file mode 100644 index 000000000..f42dca735 --- /dev/null +++ b/parser/testdata/03362_iceberg_table_with_confusing_name/query.sql @@ -0,0 +1,4 @@ +-- Tags: no-fasttest +-- Tag no-fasttest: Depends on AWS + +select * from icebergS3(s3_conn, filename='est') limit 10; \ No newline at end of file diff --git a/parser/testdata/03362_join_on_filterpushdown/ast.json b/parser/testdata/03362_join_on_filterpushdown/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03362_join_on_filterpushdown/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03362_join_on_filterpushdown/metadata.json b/parser/testdata/03362_join_on_filterpushdown/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03362_join_on_filterpushdown/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03362_join_on_filterpushdown/query.sql b/parser/testdata/03362_join_on_filterpushdown/query.sql new file mode 100644 index 000000000..730fe9236 --- /dev/null +++ b/parser/testdata/03362_join_on_filterpushdown/query.sql @@ -0,0 +1,133 @@ +-- Tags: no-parallel-replicas + +SET enable_parallel_replicas = 0; +SET query_plan_join_swap_table = false; +SET enable_analyzer = 1; +SET query_plan_filter_push_down = 1; + +SELECT * +FROM (SELECT number AS key, number AS value FROM numbers(100)) t1 +LEFT JOIN (SELECT number AS key, number AS value FROM numbers(100)) t2 +ON t1.key = t2.key + AND t1.value < 50 + AND t2.value < 50 +FORMAT Null +SETTINGS log_comment = '03362_join_on_filterpushdown_left' +; + + +SELECT * +FROM (SELECT number AS key, number AS value FROM numbers(100)) t1 +LEFT JOIN (SELECT number AS key, number AS value FROM numbers(100)) t2 +ON t1.key = t2.key +WHERE t1.value < 50 + AND t2.value < 50 +FORMAT Null +SETTINGS log_comment = '03362_join_on_filterpushdown_left_where' +; + +SELECT * +FROM (SELECT number AS key, number AS value FROM numbers(100)) t1 +LEFT JOIN (SELECT number AS key, number AS value FROM numbers(100)) t2 +ON t1.key = t2.key +WHERE t1.value >= 50 + AND t2.value >= 50 +FORMAT Null +SETTINGS log_comment = '03362_join_on_filterpushdown_left_where_filter_zeros' +; + +SELECT * +FROM (SELECT number AS key, number AS value FROM numbers(100)) t1 +RIGHT JOIN (SELECT number AS key, number AS value FROM numbers(100)) t2 +ON t1.key = t2.key + AND t1.value < 50 + AND t2.value < 50 +FORMAT Null +SETTINGS log_comment = '03362_join_on_filterpushdown_right' +; + +SELECT * +FROM (SELECT number AS key, number AS value FROM numbers(100)) t1 +JOIN (SELECT number AS key, number AS value FROM numbers(100)) t2 +ON t1.key = t2.key + AND t1.value < 50 + AND t2.value < 50 +FORMAT Null +SETTINGS log_comment = '03362_join_on_filterpushdown_inner' +; + + +SELECT * +FROM (SELECT number AS key, number AS value FROM numbers(100)) t1 +FULL JOIN (SELECT number AS key, number AS value FROM numbers(100)) t2 +ON t1.key = t2.key + AND t1.value < 50 + AND t2.value < 50 +FORMAT Null +SETTINGS log_comment = '03362_join_on_filterpushdown_full' +; + +SYSTEM FLUSH LOGS query_log; + +SELECT + if(ProfileEvents['JoinProbeTableRowCount'] == 100, 'ok', 'fail: ' || toString(ProfileEvents['JoinProbeTableRowCount'])), + if(ProfileEvents['JoinBuildTableRowCount'] == 50, 'ok', 'fail: ' || toString(ProfileEvents['JoinBuildTableRowCount'])), + if(ProfileEvents['JoinResultRowCount'] == 100, 'ok', 'fail: ' || toString(ProfileEvents['JoinResultRowCount'])), +FROM system.query_log +WHERE type = 'QueryFinish' AND event_date >= yesterday() AND query_kind = 'Select' AND current_database = currentDatabase() +AND log_comment = '03362_join_on_filterpushdown_left' +ORDER BY event_time DESC +LIMIT 1; + + +SELECT + if(ProfileEvents['JoinProbeTableRowCount'] == 50, 'ok', 'fail: ' || toString(ProfileEvents['JoinProbeTableRowCount'])), + if(ProfileEvents['JoinBuildTableRowCount'] == 100, 'ok', 'fail: ' || toString(ProfileEvents['JoinBuildTableRowCount'])), + if(ProfileEvents['JoinResultRowCount'] == 50, 'ok', 'fail: ' || toString(ProfileEvents['JoinResultRowCount'])), +FROM system.query_log +WHERE type = 'QueryFinish' AND event_date >= yesterday() AND query_kind = 'Select' AND current_database = currentDatabase() +AND log_comment = '03362_join_on_filterpushdown_left_where' +ORDER BY event_time DESC +LIMIT 1; + +SELECT + if(ProfileEvents['JoinProbeTableRowCount'] == 50, 'ok', 'fail: ' || toString(ProfileEvents['JoinProbeTableRowCount'])), + if(ProfileEvents['JoinBuildTableRowCount'] == 50, 'ok', 'fail: ' || toString(ProfileEvents['JoinBuildTableRowCount'])), + if(ProfileEvents['JoinResultRowCount'] == 50, 'ok', 'fail: ' || toString(ProfileEvents['JoinResultRowCount'])), +FROM system.query_log +WHERE type = 'QueryFinish' AND event_date >= yesterday() AND query_kind = 'Select' AND current_database = currentDatabase() +AND log_comment = '03362_join_on_filterpushdown_left_where_filter_zeros' +ORDER BY event_time DESC +LIMIT 1; + +SELECT + if(ProfileEvents['JoinProbeTableRowCount'] == 50, 'ok', 'fail: ' || toString(ProfileEvents['JoinProbeTableRowCount'])), + if(ProfileEvents['JoinBuildTableRowCount'] == 100, 'ok', 'fail: ' || toString(ProfileEvents['JoinBuildTableRowCount'])), + if(ProfileEvents['JoinResultRowCount'] == 100, 'ok', 'fail: ' || toString(ProfileEvents['JoinResultRowCount'])), +FROM system.query_log +WHERE type = 'QueryFinish' AND event_date >= yesterday() AND query_kind = 'Select' AND current_database = currentDatabase() +AND log_comment = '03362_join_on_filterpushdown_right' +ORDER BY event_time DESC +LIMIT 1; + + +SELECT + if(ProfileEvents['JoinProbeTableRowCount'] == 50, 'ok', 'fail: ' || toString(ProfileEvents['JoinProbeTableRowCount'])), + if(ProfileEvents['JoinBuildTableRowCount'] == 50, 'ok', 'fail: ' || toString(ProfileEvents['JoinBuildTableRowCount'])), + if(ProfileEvents['JoinResultRowCount'] == 50, 'ok', 'fail: ' || toString(ProfileEvents['JoinResultRowCount'])), +FROM system.query_log +WHERE type = 'QueryFinish' AND event_date >= yesterday() AND query_kind = 'Select' AND current_database = currentDatabase() +AND log_comment = '03362_join_on_filterpushdown_inner' +ORDER BY event_time DESC +LIMIT 1; + +SELECT + if(ProfileEvents['JoinProbeTableRowCount'] == 100, 'ok', 'fail: ' || toString(ProfileEvents['JoinProbeTableRowCount'])), + if(ProfileEvents['JoinBuildTableRowCount'] == 100, 'ok', 'fail: ' || toString(ProfileEvents['JoinBuildTableRowCount'])), + if(ProfileEvents['JoinResultRowCount'] == 150, 'ok', 'fail: ' || toString(ProfileEvents['JoinResultRowCount'])), +FROM system.query_log +WHERE type = 'QueryFinish' AND event_date >= yesterday() AND query_kind = 'Select' AND current_database = currentDatabase() +AND log_comment = '03362_join_on_filterpushdown_full' +ORDER BY event_time DESC +LIMIT 1; + diff --git a/parser/testdata/03362_join_where_false_76670/ast.json b/parser/testdata/03362_join_where_false_76670/ast.json new file mode 100644 index 000000000..99b4165a4 --- /dev/null +++ b/parser/testdata/03362_join_where_false_76670/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t0 (children 1)" + }, + { + "explain": " Identifier t0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001149413, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03362_join_where_false_76670/metadata.json b/parser/testdata/03362_join_where_false_76670/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03362_join_where_false_76670/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03362_join_where_false_76670/query.sql b/parser/testdata/03362_join_where_false_76670/query.sql new file mode 100644 index 000000000..c8c67b391 --- /dev/null +++ b/parser/testdata/03362_join_where_false_76670/query.sql @@ -0,0 +1,22 @@ +DROP TABLE IF EXISTS t0; +DROP TABLE IF EXISTS t1; + +CREATE TABLE t0(x Int) ENGINE = MergeTree ORDER BY tuple(); +CREATE TABLE t1(x Int) ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO t0 SELECT number FROM numbers(10); +INSERT INTO t1 SELECT number + 2 FROM numbers(10); + +SET enable_analyzer = 1; + +SELECT * FROM t1 +RIGHT JOIN t0 AS t2 +ON NOT t0.x = t2.x +WHERE false +; + +SELECT * FROM t1 +RIGHT JOIN t0 AS t2 +ON NOT t0.x = t2.x +WHERE identity(false) +; -- { serverError INVALID_JOIN_ON_EXPRESSION } diff --git a/parser/testdata/03362_optimize_using_constraints_type_mismatch/ast.json b/parser/testdata/03362_optimize_using_constraints_type_mismatch/ast.json new file mode 100644 index 000000000..72fe195ca --- /dev/null +++ b/parser/testdata/03362_optimize_using_constraints_type_mismatch/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001311066, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03362_optimize_using_constraints_type_mismatch/metadata.json b/parser/testdata/03362_optimize_using_constraints_type_mismatch/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03362_optimize_using_constraints_type_mismatch/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03362_optimize_using_constraints_type_mismatch/query.sql b/parser/testdata/03362_optimize_using_constraints_type_mismatch/query.sql new file mode 100644 index 000000000..05694d15b --- /dev/null +++ b/parser/testdata/03362_optimize_using_constraints_type_mismatch/query.sql @@ -0,0 +1,3 @@ +SET optimize_using_constraints = 1, convert_query_to_cnf = 1, enable_analyzer=1; +CREATE TABLE t0 (c0 String, c1 String, CONSTRAINT c ASSUME (c0 = '2000-01-01 00:00:00'::DateTime64 AND c1 = '')) ENGINE = Memory; +SELECT 1 FROM t0 WHERE t0.c0 = t0.c0; -- { serverError TYPE_MISMATCH } diff --git a/parser/testdata/03362_reverse_sorting_key_explicit_primary_key/ast.json b/parser/testdata/03362_reverse_sorting_key_explicit_primary_key/ast.json new file mode 100644 index 000000000..017cbb75e --- /dev/null +++ b/parser/testdata/03362_reverse_sorting_key_explicit_primary_key/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery x1 (children 1)" + }, + { + "explain": " Identifier x1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001163187, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03362_reverse_sorting_key_explicit_primary_key/metadata.json b/parser/testdata/03362_reverse_sorting_key_explicit_primary_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03362_reverse_sorting_key_explicit_primary_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03362_reverse_sorting_key_explicit_primary_key/query.sql b/parser/testdata/03362_reverse_sorting_key_explicit_primary_key/query.sql new file mode 100644 index 000000000..20b6cce28 --- /dev/null +++ b/parser/testdata/03362_reverse_sorting_key_explicit_primary_key/query.sql @@ -0,0 +1,13 @@ +drop table if exists x1; + +create table x1 (i Nullable(int)) engine MergeTree order by i desc primary key i settings allow_nullable_key = 1, index_granularity = 2, allow_experimental_reverse_key = 1; + +insert into x1 select * from numbers(100); + +optimize table x1 final; + +select * from x1 where i = 3; + +select count() from x1 where i between 3 and 10; + +drop table x1; diff --git a/parser/testdata/03363_constant_nullable_key/ast.json b/parser/testdata/03363_constant_nullable_key/ast.json new file mode 100644 index 000000000..429860942 --- /dev/null +++ b/parser/testdata/03363_constant_nullable_key/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t0 (children 1)" + }, + { + "explain": " Identifier t0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001310524, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03363_constant_nullable_key/metadata.json b/parser/testdata/03363_constant_nullable_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03363_constant_nullable_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03363_constant_nullable_key/query.sql b/parser/testdata/03363_constant_nullable_key/query.sql new file mode 100644 index 000000000..e84814be8 --- /dev/null +++ b/parser/testdata/03363_constant_nullable_key/query.sql @@ -0,0 +1,7 @@ +DROP TABLE IF EXISTS t0; + +CREATE TABLE t0 (c0 Int) ENGINE = MergeTree() ORDER BY (c0 * NULL) SETTINGS allow_nullable_key = 1; +INSERT INTO TABLE t0 (c0) VALUES (1); +SELECT * FROM t0; + +DROP TABLE t0; diff --git a/parser/testdata/03363_estimate_compression_ratio_validation/ast.json b/parser/testdata/03363_estimate_compression_ratio_validation/ast.json new file mode 100644 index 000000000..373a777db --- /dev/null +++ b/parser/testdata/03363_estimate_compression_ratio_validation/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_table_for_estimate_compression_ratio (children 1)" + }, + { + "explain": " Identifier test_table_for_estimate_compression_ratio" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001097327, + "rows_read": 2, + "bytes_read": 134 + } +} diff --git a/parser/testdata/03363_estimate_compression_ratio_validation/metadata.json b/parser/testdata/03363_estimate_compression_ratio_validation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03363_estimate_compression_ratio_validation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03363_estimate_compression_ratio_validation/query.sql b/parser/testdata/03363_estimate_compression_ratio_validation/query.sql new file mode 100644 index 000000000..cc9f7b71e --- /dev/null +++ b/parser/testdata/03363_estimate_compression_ratio_validation/query.sql @@ -0,0 +1,15 @@ +DROP TABLE IF EXISTS test_table_for_estimate_compression_ratio; +CREATE TABLE test_table_for_estimate_compression_ratio (some_column Int64, other_column Int64) ENGINE = MergeTree ORDER BY some_column; + +SELECT estimateCompressionRatio('lz4', 8192)(some_column, other_column) from test_table_for_estimate_compression_ratio; -- { serverError 42 } +SELECT estimateCompressionRatio('lz4', 8192, 2025)(some_column) from test_table_for_estimate_compression_ratio; -- { serverError 456 } +SELECT estimateCompressionRatio('zstd', 'lz4')(some_column) from test_table_for_estimate_compression_ratio; -- { serverError 457 } + +SELECT estimateCompressionRatio('zstd', 8192)(some_column) from test_table_for_estimate_compression_ratio; -- positive case, should always be 0 +-- order is not important +SELECT estimateCompressionRatio(8192, 'zstd')(some_column) from test_table_for_estimate_compression_ratio; + +-- block_size_bytes > 0 +SELECT estimateCompressionRatio(0)(some_column) from test_table_for_estimate_compression_ratio; -- { serverError BAD_QUERY_PARAMETER } +-- and now with some data +SELECT estimateCompressionRatio(0)(0); -- { serverError BAD_QUERY_PARAMETER } diff --git a/parser/testdata/03363_function_keccak256/ast.json b/parser/testdata/03363_function_keccak256/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03363_function_keccak256/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03363_function_keccak256/metadata.json b/parser/testdata/03363_function_keccak256/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03363_function_keccak256/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03363_function_keccak256/query.sql b/parser/testdata/03363_function_keccak256/query.sql new file mode 100644 index 000000000..773752cfa --- /dev/null +++ b/parser/testdata/03363_function_keccak256/query.sql @@ -0,0 +1,19 @@ +-- Tags: no-fasttest + +SELECT hex(keccak256('')); +SELECT hex(keccak256(unhex(''))); +SELECT hex(keccak256('hello')); +SELECT hex(keccak256(toFixedString('hello', 5))); +SELECT hex(keccak256(toFixedString('hello', 6))); +SELECT hex(keccak256('Hello')); +SELECT hex(keccak256('Ethereum')); +SELECT hex(keccak256(repeat('a', 1000))); +SELECT hex(keccak256('Hello, 世界!')); +SELECT keccak256(NULL) AS null_hash; +SELECT hex(keccak256(unhex('deadbeef'))) AS binary_hash; +SELECT hex(keccak256('\n\t\r')) AS escaped_chars_hash, hex(keccak256('\\n\\t\\r')) AS literal_backslash_hash; +SELECT hex(keccak256(keccak256('test'))) AS double_hash; + +WITH iterations AS ( SELECT number AS n FROM system.numbers LIMIT 5 ) +SELECT hex(keccak256('consistent')), hex(keccak256(toString(n))) FROM iterations +ORDER BY n; diff --git a/parser/testdata/03363_hive_style_partition/ast.json b/parser/testdata/03363_hive_style_partition/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03363_hive_style_partition/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03363_hive_style_partition/metadata.json b/parser/testdata/03363_hive_style_partition/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03363_hive_style_partition/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03363_hive_style_partition/query.sql b/parser/testdata/03363_hive_style_partition/query.sql new file mode 100644 index 000000000..4cf4b92d5 --- /dev/null +++ b/parser/testdata/03363_hive_style_partition/query.sql @@ -0,0 +1,151 @@ +-- Tags: no-parallel, no-fasttest, no-random-settings + +SET allow_suspicious_low_cardinality_types=1; + +DROP TABLE IF EXISTS t_03363_parquet, t_03363_csv, s3_table_half_schema_with_format; + +CREATE TABLE t_03363_parquet (year UInt16, country String, counter UInt8) +ENGINE = S3(s3_conn, filename = 't_03363_parquet', format = Parquet, partition_strategy='hive') +PARTITION BY (year, country); + +INSERT INTO t_03363_parquet VALUES + (2022, 'USA', 1), + (2022, 'Canada', 2), + (2023, 'USA', 3), + (2023, 'Mexico', 4), + (2024, 'France', 5), + (2024, 'Germany', 6), + (2024, 'Germany', 7), + (1999, 'Brazil', 8), + (2100, 'Japan', 9), + (2024, 'CN', 10), + (2025, '', 11); + +-- distinct because minio isn't cleaned up +select distinct on (counter) replaceRegexpAll(_path, '/[0-9]+\\.parquet', '/<snowflakeid>.parquet') AS _path, counter from t_03363_parquet order by counter; + +-- CSV test +CREATE TABLE t_03363_csv (year UInt16, country String, counter UInt8) +ENGINE = S3(s3_conn, filename = 't_03363_csv', format = CSV, partition_strategy='hive') +PARTITION BY (year, country); + +INSERT INTO t_03363_csv VALUES + (2022, 'USA', 1), + (2022, 'Canada', 2), + (2023, 'USA', 3), + (2023, 'Mexico', 4), + (2024, 'France', 5), + (2024, 'Germany', 6), + (2024, 'Germany', 7), + (1999, 'Brazil', 8), + (2100, 'Japan', 9), + (2024, 'CN', 10), + (2025, '', 11); + +select distinct on (counter) replaceRegexpAll(_path, '/[0-9]+\\.csv', '/<snowflakeid>.csv') AS _path, counter from t_03363_csv order by counter; + +-- s3 table function +INSERT INTO FUNCTION s3(s3_conn, filename='t_03363_function', format=Parquet, partition_strategy='hive') PARTITION BY (year, country) SELECT country, year, counter FROM t_03363_parquet; +select distinct on (counter) replaceRegexpAll(_path, '/[0-9]+\\.parquet', '/<snowflakeid>.parquet') AS _path, counter from s3(s3_conn, filename='t_03363_function/**.parquet') order by counter; + +-- create a "bucket" with mixed partitioning schemes so we can simulate a malformed storage +INSERT INTO FUNCTION s3(s3_conn, filename='t_03363_mixed_partitioning', format=Parquet, partition_strategy='hive') PARTITION BY (year) select 1 as id, 2025 as year; +INSERT INTO FUNCTION s3(s3_conn, filename='t_03363_mixed_partitioning', format=Parquet, partition_strategy='hive') PARTITION BY (country) select 1 as id, 'Brazil' as country; + +-- Depends on the above two inserts, should throw exception because it could not find the hive partition columns it was looking for +-- The format is null because one of the files contains the requested columns and might return the data before we throw the exception +select * from s3(s3_conn, filename='t_03363_mixed_partitioning/**.parquet') Format null; -- {serverError INCORRECT_DATA} + +-- Depends on the above two inserts, should throw exception because it could not find the hive partition columns it was looking for +-- The format is null because one of the files contains the requested columns and might return the data before we throw the exception +CREATE TABLE t_03363_mixed_partitioning (id Int32, year UInt16) ENGINE=S3(s3_conn, filename='t_03363_mixed_partitioning', format=Parquet, partition_strategy='hive') PARTITION BY (year); +SELECT * FROM t_03363_mixed_partitioning Format null; -- {serverError INCORRECT_DATA} + +-- should output 1 because partition columns are not written down to the file by default when hive style is being used +select num_columns from s3(s3_conn, filename='t_03363_function/**.parquet', format=ParquetMetadata) limit 1; + +INSERT INTO FUNCTION s3(s3_conn, filename='t_03363_function_write_down_partition_columns', format=Parquet, partition_strategy='hive', partition_columns_in_data_file=1) PARTITION BY (year, country) SELECT country, year, counter FROM t_03363_parquet; +select num_columns from s3(s3_conn, filename='t_03363_function_write_down_partition_columns/**.parquet', format=ParquetMetadata) limit 1; + +-- hive partitioning = 0 so we know it is not reading columns from the path +select * from s3(s3_conn, filename='t_03363_function_write_down_partition_columns/**.parquet', format=Parquet) order by counter limit 1 SETTINGS use_hive_partitioning=0; + +-- only partition columns +INSERT INTO FUNCTION s3(s3_conn, filename='t_03363_parquet', format=Parquet, partition_strategy='hive') PARTITION BY (year, country) SELECT 2020 as year, 'Brazil' as country; -- {serverError INCORRECT_DATA}; + +-- Schema specified, but the hive partition column is missing in the schema (present in the data tho) +INSERT INTO FUNCTION s3(s3_conn, filename='half_baked', format=Parquet, partition_strategy='hive') PARTITION BY year SELECT 1 AS key, 2020 AS year; + +-- Should fail because contains only partition columns in schema and `use_hive_partitioning=1` +CREATE TABLE s3_table_half_schema_with_format (year UInt64) engine=S3(s3_conn, filename='half_baked/**.parquet', format=Parquet) SETTINGS use_hive_partitioning=1; -- {serverError INCORRECT_DATA} + +-- Should succeed because hive is off +CREATE TABLE s3_table_half_schema_with_format (year UInt64) engine=S3(s3_conn, filename='half_baked/**.parquet', format=Parquet) SETTINGS use_hive_partitioning=0; + +-- Should succeed and not return hive columns (as nothing else is in schema - then no columns at all). Distinct because maybe MinIO isn't cleaned up +SELECT DISTINCT * FROM s3_table_half_schema_with_format; + +CREATE TABLE s3_table_half_schema_with_format_2 (key UInt64) engine=S3(s3_conn, filename='half_baked/**.parquet', format=Parquet) SETTINGS use_hive_partitioning=0; + +SELECT DISTINCT * FROM s3_table_half_schema_with_format_2; + +-- Should fail because the column year does not exist +SELECT key, * FROM s3_table_half_schema_with_format; -- {serverError UNKNOWN_IDENTIFIER} + +-- hive with partition id placeholder +CREATE TABLE t_03363_s3_sink (year UInt16, country String, counter UInt8) +ENGINE = S3(s3_conn, filename = 't_03363_parquet/{_partition_id}', format = Parquet, partition_strategy='hive') +PARTITION BY (year, country); -- {serverError BAD_ARGUMENTS}; + +-- unknown partitioning style +CREATE TABLE t_03363_s3_sink (year UInt16, country String, counter UInt8) +ENGINE = S3(s3_conn, filename = 't_03363_parquet', format = Parquet, partition_strategy='abc') +PARTITION BY (year, country); -- {serverError BAD_ARGUMENTS}; + +-- hive partition strategy can't be used without partition by clause +CREATE TABLE t_03363_s3_err (year UInt16, country String, counter UInt8) +ENGINE = S3(s3_conn, filename = 't_03363_parquet', partition_strategy='hive', format=Parquet); -- {serverError BAD_ARGUMENTS} + +-- hive partition strategy can't be used without partition by clause +INSERT INTO FUNCTION s3(s3_conn, filename = 't_03363_parquet', partition_strategy='hive', format=Parquet) VALUES 1; -- {serverError BAD_ARGUMENTS} + +-- hive partition strategy can't be used with globbed path +CREATE TABLE t_03363_s3_err (year UInt16, country String, counter UInt8) +ENGINE = S3(s3_conn, filename = 't_03363_parquet/**', partition_strategy='hive', format=Parquet); -- {serverError BAD_ARGUMENTS} + +-- hive partition strategy can't be used with globbed path +INSERT INTO FUNCTION s3(s3_conn, filename = 't_03363_parquet/**', partition_strategy='hive', format=Parquet) VALUES 1; -- {serverError BAD_ARGUMENTS} + +-- partition_columns_in_data_file can't be zero for non hive +CREATE TABLE t_03363_s3_err (year UInt16, country String, counter UInt8) +ENGINE = S3(s3_conn, filename = 't_03363_parquet{_partition_id}', partition_strategy='wildcard', format=Parquet, partition_columns_in_data_file=0) +PARTITION BY (year, country); -- {serverError BAD_ARGUMENTS} + +-- partition_columns_in_data_file can't be zero for non hive strategy +CREATE TABLE t_03363_s3_err (year UInt16, country String, counter UInt8) +ENGINE = S3(s3_conn, filename = 't_03363_parquet', format=Parquet, partition_columns_in_data_file=0) PARTITION BY (year, country); -- {serverError BAD_ARGUMENTS} + +-- hive partition strategy can't be set in select statement? +select * from s3(s3_conn, filename='t_03363_function_write_down_partition_columns/**.parquet', format=Parquet, partition_strategy='hive'); -- {serverError BAD_ARGUMENTS} + +-- do not support expressions in hive partitioning +CREATE TABLE t_invalid_expression (year UInt16, country String, counter UInt8) + ENGINE = S3(s3_conn, filename = 'invalid', format = Parquet, partition_strategy='hive') + PARTITION BY toString(year); -- {serverError BAD_ARGUMENTS} + +-- floating types not supported +CREATE TABLE t_invalid_expression (year UInt16, country String, counter Float64) + ENGINE = S3(s3_conn, filename = 'invalid', format = Parquet, partition_strategy='hive') + PARTITION BY counter; -- {serverError BAD_ARGUMENTS} + +-- Data lake like engines do not support the `partition_strategy` argument +CREATE TABLE t_03363_iceberg ENGINE=IcebergS3(s3_conn, filename = 'iceberg_data/default/t_iceberg/', format='parquet', url = 'http://minio1:9001/bucket/', partition_strategy='WILDCARD'); -- {serverError BAD_ARGUMENTS} +CREATE TABLE t_03363_iceberg ENGINE=IcebergS3(s3_conn, filename = 'iceberg_data/default/t_iceberg/', format='parquet', url = 'http://minio1:9001/bucket/', partition_strategy='HIVE'); -- {serverError BAD_ARGUMENTS} + +-- Should throw because format is not present and it is mandatory for hive strategy and it should not be a LOGICAL_ERROR +CREATE TABLE t_03363_hive_requires_format (c0 Int) ENGINE = S3(s3_conn, partition_strategy = 'hive') PARTITION BY (c0); -- {serverError BAD_ARGUMENTS} + +-- Should throw because hive strategy does not allow partition columns to be the only columns +CREATE TABLE t_03363_hive_only_partition_columns (country String, year UInt16) ENGINE = S3(s3_conn, partition_strategy='hive') PARTITION BY (year, country); -- {serverError BAD_ARGUMENTS}; + +DROP TABLE IF EXISTS t_03363_parquet, t_03363_csv, s3_table_half_schema_with_format; diff --git a/parser/testdata/03363_qbit_create_insert_select/ast.json b/parser/testdata/03363_qbit_create_insert_select/ast.json new file mode 100644 index 000000000..ddf343fbf --- /dev/null +++ b/parser/testdata/03363_qbit_create_insert_select/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001261827, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03363_qbit_create_insert_select/metadata.json b/parser/testdata/03363_qbit_create_insert_select/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03363_qbit_create_insert_select/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03363_qbit_create_insert_select/query.sql b/parser/testdata/03363_qbit_create_insert_select/query.sql new file mode 100644 index 000000000..919729b39 --- /dev/null +++ b/parser/testdata/03363_qbit_create_insert_select/query.sql @@ -0,0 +1,134 @@ +SET allow_experimental_qbit_type = 1; + +-- Tests when number of elements in QBit % 8 == 0 +-- Also tests the default value of QBit when no default is specified (when inserting data without `vec`) +SELECT 'Tests when number of elements in QBit % 8 == 0'; + +-- 16-bit QBit +SELECT 'TEST 16-bit QBit'; +DROP TABLE IF EXISTS qbits_16; +CREATE TABLE qbits_16 (id UInt32, vec QBit(BFloat16, 16)) ENGINE = Memory; + +INSERT INTO qbits_16 VALUES (1, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]); +INSERT INTO qbits_16 VALUES (2, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -32]); +INSERT INTO qbits_16 (id) VALUES (3); +INSERT INTO qbits_16 VALUES (4, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); -- { error TYPE_MISMATCH } + + +SELECT * FROM qbits_16 ORDER BY id; +DROP TABLE qbits_16; + +-- 32-bit QBit +SELECT 'TEST 32-bit QBit'; +DROP TABLE IF EXISTS qbits_32; +CREATE TABLE qbits_32 (id UInt32, vec QBit(Float32, 16)) ENGINE = Memory; + +INSERT INTO qbits_32 VALUES (1, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]); +INSERT INTO qbits_32 VALUES (2, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -32]); +INSERT INTO qbits_32 (id) VALUES (3); +INSERT INTO qbits_32 VALUES (4, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); -- { error TYPE_MISMATCH } + +SELECT * FROM qbits_32 ORDER BY id; +DROP TABLE qbits_32; + +-- 64-bit QBit +SELECT 'TEST 64-bit QBit'; +DROP TABLE IF EXISTS qbits_64; +CREATE TABLE qbits_64 (id UInt32, vec QBit(Float64, 16)) ENGINE = Memory; + +INSERT INTO qbits_64 VALUES (1, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]); +INSERT INTO qbits_64 VALUES (2, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -32]); +INSERT INTO qbits_64 (id) VALUES (3); +INSERT INTO qbits_64 VALUES (4, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]); -- { error TYPE_MISMATCH } + +SELECT * FROM qbits_64 ORDER BY id; +DROP TABLE qbits_64; + + + +-- Tests when number of elements in QBit % 8 != 0 +SELECT 'Tests when number of elements in QBit % 8 == 1'; + +-- 16-bit QBit +SELECT 'TEST 16-bit QBit'; +CREATE TABLE qbits_16 (id UInt32, vec QBit(BFloat16, 9)) ENGINE = Memory; + +INSERT INTO qbits_16 VALUES (1, [1, 2, 3, 4, 5, 6, 7, 8, 9]); +INSERT INTO qbits_16 VALUES (2, [0, 0, 0, 0, 0, 0, 0, 0, -32]); +INSERT INTO qbits_16 (id) VALUES (3); +INSERT INTO qbits_16 VALUES (4, [0]); -- { error TYPE_MISMATCH } + +SELECT * FROM qbits_16 ORDER BY id; +DROP TABLE qbits_16; + +-- 32-bit QBit +SELECT 'TEST 32-bit QBit'; +CREATE TABLE qbits_32 (id UInt32, vec QBit(Float32, 9)) ENGINE = Memory; + +INSERT INTO qbits_32 VALUES (1, [1, 2, 3, 4, 5, 6, 7, 8, 9]); +INSERT INTO qbits_32 VALUES (2, [0, 0, 0, 0, 0, 0, 0, 0, -32]); +INSERT INTO qbits_32 (id) VALUES (3); +INSERT INTO qbits_32 VALUES (4, [0]); -- { error TYPE_MISMATCH } + +SELECT * FROM qbits_32 ORDER BY id; +DROP TABLE qbits_32; + +-- 64-bit QBit +SELECT 'TEST 64-bit QBit'; +CREATE TABLE qbits_64 (id UInt32, vec QBit(Float64, 9)) ENGINE = Memory; + +INSERT INTO qbits_64 VALUES (1, [1, 2, 3, 4, 5, 6, 7, 8, 9]); +INSERT INTO qbits_64 VALUES (2, [0, 0, 0, 0, 0, 0, 0, 0, -32]); +INSERT INTO qbits_64 (id) VALUES (3); +INSERT INTO qbits_64 VALUES (4, [0]); -- { error TYPE_MISMATCH } + +SELECT * FROM qbits_64 ORDER BY id; +DROP TABLE qbits_64; + +SELECT 'TEST INSERTS THROUGH convertFieldToType'; +CREATE TABLE qbits (id UInt32, vec QBit(BFloat16, 1)) ENGINE = Memory; +INSERT INTO qbits VALUES (1, [toFloat64(1)]); +SELECT * FROM qbits; +DROP TABLE qbits; + +CREATE TABLE qbits (id UInt32, vec QBit(Float64, 1)) ENGINE = Memory; +INSERT INTO qbits VALUES (1, [toBFloat16(1)]); +SELECT * FROM qbits; +DROP TABLE qbits; + +-- Difficult test +SELECT 'Difficult test'; + +DROP TABLE IF EXISTS array_64; +DROP TABLE IF EXISTS array_32; +DROP TABLE IF EXISTS array_16; +DROP TABLE IF EXISTS qbit_64; +DROP TABLE IF EXISTS qbit_32; +DROP TABLE IF EXISTS qbit_16; + +CREATE TABLE array_64 (id UInt32, vec Array(Float64)) ENGINE = Memory; +CREATE TABLE array_32 (id UInt32, vec Array(Float32)) ENGINE = Memory; +CREATE TABLE array_16 (id UInt32, vec Array(BFloat16)) ENGINE = Memory; + +INSERT INTO array_64 VALUES (1, [-9.91396531e-02, 4.71480675e-02, 2.32308246e-02, 8.30315799e-02, 6.67378604e-02, 2.18743533e-02, -8.63768607e-02, 2.43411604e-02, 2.19195038e-02, 1.24536417e-02, -1.97167601e-02, 8.65434185e-02, 1.87990386e-02, 2.78113149e-02, 4.55952510e-02, -1.80673841e-02, 1.49496756e-02, 4.65492159e-02, 2.61444114e-02, 6.46661744e-02, -4.14983965e-02, 8.17299914e-03, 6.44170940e-02, 3.95379104e-02, 5.79034053e-02, -7.23726954e-03, -1.11746430e-01, -3.06927301e-02]); +INSERT INTO array_32 SELECT id, vec FROM array_64 WHERE id = 1; +INSERT INTO array_16 SELECT id, vec FROM array_64 WHERE id = 1; + +CREATE TABLE qbit_64 (id UInt32, vec QBit(Float64, 28)) ENGINE = Memory; +CREATE TABLE qbit_32 (id UInt32, vec QBit(Float32, 28)) ENGINE = Memory; +CREATE TABLE qbit_16 (id UInt32, vec QBit(BFloat16, 28)) ENGINE = Memory; + +INSERT INTO qbit_64 SELECT id, CAST(vec AS QBit(Float64, 28)) FROM array_64; +INSERT INTO qbit_32 SELECT id, CAST(vec AS QBit(Float32, 28)) FROM array_32; +INSERT INTO qbit_16 SELECT id, CAST(vec AS QBit(BFloat16, 28)) FROM array_16; + +SELECT * FROM qbit_64 ORDER BY id; +SELECT * FROM qbit_32 ORDER BY id; +SELECT * FROM qbit_16 ORDER BY id; + +DROP TABLE array_64; +DROP TABLE array_32; +DROP TABLE array_16; +DROP TABLE qbit_64; +DROP TABLE qbit_32; +DROP TABLE qbit_16; diff --git a/parser/testdata/03363_read_json_and_subcolumns_from_view/ast.json b/parser/testdata/03363_read_json_and_subcolumns_from_view/ast.json new file mode 100644 index 000000000..0ac7cb75d --- /dev/null +++ b/parser/testdata/03363_read_json_and_subcolumns_from_view/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00135234, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03363_read_json_and_subcolumns_from_view/metadata.json b/parser/testdata/03363_read_json_and_subcolumns_from_view/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03363_read_json_and_subcolumns_from_view/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03363_read_json_and_subcolumns_from_view/query.sql b/parser/testdata/03363_read_json_and_subcolumns_from_view/query.sql new file mode 100644 index 000000000..7ca237984 --- /dev/null +++ b/parser/testdata/03363_read_json_and_subcolumns_from_view/query.sql @@ -0,0 +1,14 @@ +set enable_json_type=1; +set enable_analyzer=1; + +drop table if exists test; +create table test (data JSON) engine=Memory; +insert into test select '{"a" : 42}'; +create view test_view as select data from test; +select * from test_view; +select data from test_view; +select data.a from test_view; +select data.b from test_view; +select data.a.:Int64 from test_view; +drop table test; + diff --git a/parser/testdata/03364_gorilla_codec_parameters/ast.json b/parser/testdata/03364_gorilla_codec_parameters/ast.json new file mode 100644 index 000000000..ffb5c7d4b --- /dev/null +++ b/parser/testdata/03364_gorilla_codec_parameters/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001414007, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03364_gorilla_codec_parameters/metadata.json b/parser/testdata/03364_gorilla_codec_parameters/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03364_gorilla_codec_parameters/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03364_gorilla_codec_parameters/query.sql b/parser/testdata/03364_gorilla_codec_parameters/query.sql new file mode 100644 index 000000000..2e75748e7 --- /dev/null +++ b/parser/testdata/03364_gorilla_codec_parameters/query.sql @@ -0,0 +1,9 @@ +SET allow_suspicious_codecs = 1; +CREATE TABLE 03364_gorilla (c0 String CODEC(Gorilla(1))) ENGINE = MergeTree() ORDER BY tuple(); +CREATE TABLE 03364_delta (c0 String CODEC(Delta(1))) ENGINE = MergeTree() ORDER BY tuple(); +DETACH TABLE 03364_gorilla; +DETACH TABLE 03364_delta; +ATTACH TABLE 03364_gorilla; +ATTACH TABLE 03364_delta; +DROP TABLE 03364_gorilla; +DROP TABLE 03364_delta; diff --git a/parser/testdata/03364_pretty_json_bool/ast.json b/parser/testdata/03364_pretty_json_bool/ast.json new file mode 100644 index 000000000..af1887c46 --- /dev/null +++ b/parser/testdata/03364_pretty_json_bool/ast.json @@ -0,0 +1,39 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Bool_1 (alias a)" + }, + { + "explain": " Literal Bool_0 (alias b)" + }, + { + "explain": " Identifier JSON" + }, + { + "explain": " Set" + } + ], + + "rows": 8 +} diff --git a/parser/testdata/03364_pretty_json_bool/metadata.json b/parser/testdata/03364_pretty_json_bool/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03364_pretty_json_bool/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03364_pretty_json_bool/query.sql b/parser/testdata/03364_pretty_json_bool/query.sql new file mode 100644 index 000000000..5b2c17780 --- /dev/null +++ b/parser/testdata/03364_pretty_json_bool/query.sql @@ -0,0 +1,2 @@ +select true as a, false as b format JSON settings output_format_write_statistics=0; + diff --git a/parser/testdata/03364_qbit_negative/ast.json b/parser/testdata/03364_qbit_negative/ast.json new file mode 100644 index 000000000..2e2138860 --- /dev/null +++ b/parser/testdata/03364_qbit_negative/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001610165, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03364_qbit_negative/metadata.json b/parser/testdata/03364_qbit_negative/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03364_qbit_negative/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03364_qbit_negative/query.sql b/parser/testdata/03364_qbit_negative/query.sql new file mode 100644 index 000000000..42410492a --- /dev/null +++ b/parser/testdata/03364_qbit_negative/query.sql @@ -0,0 +1,14 @@ +SET allow_experimental_qbit_type = 1; + +DROP TABLE IF EXISTS qbits; + +CREATE TABLE qbits (id UInt32, vec QBit(BFloat16, 0)) ENGINE = Memory; -- { serverError UNEXPECTED_AST_STRUCTURE } +CREATE TABLE qbits (id UInt32, vec QBit(UInt32, 2)) ENGINE = Memory; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +CREATE TABLE qbits (id UInt32, vec QBit(Float64, 1)) ENGINE = Memory; +INSERT INTO qbits VALUES (1, array(1.0)::QBit(Float64, 1)); +INSERT INTO qbits VALUES (1, array(1)); -- { error TYPE_MISMATCH } +SELECT vec::QBit(Float32, 1) FROM qbits; -- { serverError NOT_IMPLEMENTED } +SELECT vec::QBit(Float64, 2) FROM qbits; -- { serverError TYPE_MISMATCH } +SELECT vec::QBit(Float64, 1) FROM qbits; +DROP TABLE qbits; diff --git a/parser/testdata/03364_s3_globbed_path_in_bucket_portion/ast.json b/parser/testdata/03364_s3_globbed_path_in_bucket_portion/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03364_s3_globbed_path_in_bucket_portion/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03364_s3_globbed_path_in_bucket_portion/metadata.json b/parser/testdata/03364_s3_globbed_path_in_bucket_portion/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03364_s3_globbed_path_in_bucket_portion/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03364_s3_globbed_path_in_bucket_portion/query.sql b/parser/testdata/03364_s3_globbed_path_in_bucket_portion/query.sql new file mode 100644 index 000000000..4b2ee57c7 --- /dev/null +++ b/parser/testdata/03364_s3_globbed_path_in_bucket_portion/query.sql @@ -0,0 +1,30 @@ +-- Tags: no-fasttest +-- virtual hosted style +create table s3_03364 (id UInt32) engine=S3('http://{_partition_id}.s3.region.amazonaws.com/key'); -- {serverError BAD_ARGUMENTS} +create table s3_03364 (id UInt32) engine=S3('http://{_partition_id}something.s3.region.amazonaws.com/key'); -- {serverError BAD_ARGUMENTS} + +select * from s3('http://{_partition_id}.s3.region.amazonaws.com/key', 'Parquet'); -- {serverError BAD_ARGUMENTS} +select * from s3('http://{_partition_id}something.s3.region.amazonaws.com/key', 'Parquet'); -- {serverError BAD_ARGUMENTS} + +insert into table function s3('http://{_partition_id}.s3.region.amazonaws.com/key', 'NOSIGN', 'Parquet') select * from numbers(5); -- {serverError BAD_ARGUMENTS} +insert into table function s3('http://{_partition_id}something.s3.region.amazonaws.com/key', 'NOSIGN', 'Parquet') select * from numbers(5); -- {serverError BAD_ARGUMENTS} + +-- path style +create table s3_03364 (id UInt32) engine=S3('http://s3.region.amazonaws.com/{_partition_id}'); -- {serverError BAD_ARGUMENTS} +create table s3_03364 (id UInt32) engine=S3('http://s3.region.amazonaws.com/{_partition_id}/key'); -- {serverError BAD_ARGUMENTS} + +select * from s3('http://s3.region.amazonaws.com/{_partition_id}', 'Parquet'); -- {serverError BAD_ARGUMENTS} +select * from s3('http://s3.region.amazonaws.com/{_partition_id}/key', 'Parquet'); -- {serverError BAD_ARGUMENTS} + +insert into table function s3('http://s3.region.amazonaws.com/{_partition_id}', 'NOSIGN', 'Parquet') select * from numbers(5); -- {serverError BAD_ARGUMENTS} +insert into table function s3('http://s3.region.amazonaws.com/{_partition_id}/key', 'NOSIGN', 'Parquet') select * from numbers(5); -- {serverError BAD_ARGUMENTS} + +-- aws private link style +create table s3_03364 (id UInt32) engine=S3('http://bucket.vpce-07a1cd78f1bd55c5f-j3a3vg6w.s3.us-east-1.vpce.amazonaws.com/{_partition_id}'); -- {serverError BAD_ARGUMENTS} +create table s3_03364 (id UInt32) engine=S3('http://bucket.vpce-07a1cd78f1bd55c5f-j3a3vg6w.s3.us-east-1.vpce.amazonaws.com/{_partition_id}/key'); -- {serverError BAD_ARGUMENTS} + +select * from s3('http://bucket.vpce-07a1cd78f1bd55c5f-j3a3vg6w.s3.us-east-1.vpce.amazonaws.com/{_partition_id}', 'Parquet'); -- {serverError BAD_ARGUMENTS} +select * from s3('http://bucket.vpce-07a1cd78f1bd55c5f-j3a3vg6w.s3.us-east-1.vpce.amazonaws.com/{_partition_id}/key', 'Parquet'); -- {serverError BAD_ARGUMENTS} + +insert into table function s3('http://bucket.vpce-07a1cd78f1bd55c5f-j3a3vg6w.s3.us-east-1.vpce.amazonaws.com/{_partition_id}', 'NOSIGN', 'Parquet') select * from numbers(5); -- {serverError BAD_ARGUMENTS} +insert into table function s3('http://bucket.vpce-07a1cd78f1bd55c5f-j3a3vg6w.s3.us-east-1.vpce.amazonaws.com/{_partition_id}/key', 'NOSIGN', 'Parquet') select * from numbers(5); -- {serverError BAD_ARGUMENTS} diff --git a/parser/testdata/03364_ttl_should_recalculate_minmax_index/ast.json b/parser/testdata/03364_ttl_should_recalculate_minmax_index/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03364_ttl_should_recalculate_minmax_index/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03364_ttl_should_recalculate_minmax_index/metadata.json b/parser/testdata/03364_ttl_should_recalculate_minmax_index/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03364_ttl_should_recalculate_minmax_index/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03364_ttl_should_recalculate_minmax_index/query.sql b/parser/testdata/03364_ttl_should_recalculate_minmax_index/query.sql new file mode 100644 index 000000000..3d62de3e0 --- /dev/null +++ b/parser/testdata/03364_ttl_should_recalculate_minmax_index/query.sql @@ -0,0 +1,17 @@ +-- { echoOn } + +drop table if exists x; + +create table x (dt DateTime, i Int32) engine MergeTree partition by indexHint(dt) order by dt TTL dt + toIntervalDay(15) settings index_granularity = 8192; + +insert into x values (now(), 1), (now() - toIntervalDay(30), 2); + +optimize table x final; + +select i from x; + +select minmax_dt.1 == minmax_dt.2 from mergeTreeIndex(currentDatabase(), x, with_minmax = 1); + +select (select min(dt) from x) == (select minDistinct(dt) from x); + +drop table x; diff --git a/parser/testdata/03364_with_fill_select_from_cluster_view/ast.json b/parser/testdata/03364_with_fill_select_from_cluster_view/ast.json new file mode 100644 index 000000000..3747e57b4 --- /dev/null +++ b/parser/testdata/03364_with_fill_select_from_cluster_view/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001649646, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03364_with_fill_select_from_cluster_view/metadata.json b/parser/testdata/03364_with_fill_select_from_cluster_view/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03364_with_fill_select_from_cluster_view/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03364_with_fill_select_from_cluster_view/query.sql b/parser/testdata/03364_with_fill_select_from_cluster_view/query.sql new file mode 100644 index 000000000..15f40a1a9 --- /dev/null +++ b/parser/testdata/03364_with_fill_select_from_cluster_view/query.sql @@ -0,0 +1,11 @@ +SET param_CurrentStart='2025-02-09', param_CurrentEnd='2025-02-11'; +SET prefer_localhost_replica = 0; + +SELECT * +FROM cluster(test_shard_localhost, view( + SELECT toDate({CurrentStart:String}), dummy::Date x from system.one + order by x + ASC WITH FILL FROM toDate({CurrentStart:String}) + TO toDate({CurrentEnd:String}) + toIntervalDay(1) STEP toIntervalDay(1) + +)); diff --git a/parser/testdata/03365_bind_host/ast.json b/parser/testdata/03365_bind_host/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03365_bind_host/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03365_bind_host/metadata.json b/parser/testdata/03365_bind_host/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03365_bind_host/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03365_bind_host/query.sql b/parser/testdata/03365_bind_host/query.sql new file mode 100644 index 000000000..52e632a05 --- /dev/null +++ b/parser/testdata/03365_bind_host/query.sql @@ -0,0 +1,20 @@ +-- Tags: shard, no-fasttest + +DROP TABLE IF EXISTS mem1; +DROP TABLE IF EXISTS dist_1; +DROP TABLE IF EXISTS dist_2; +DROP TABLE IF EXISTS dist_fail; + +CREATE TABLE mem1 (key Int) Engine=Memory(); + +CREATE TABLE dist_1 Engine=Distributed(test_shard_bind_host, currentDatabase(), mem1); + +CREATE TABLE dist_2 Engine=Distributed(test_shard_bind_host_secure, currentDatabase(), mem1); + +CREATE TABLE dist_fail Engine=Distributed(test_shard_bind_host_fail, currentDatabase(), mem1); -- { serverError NO_REMOTE_SHARD_AVAILABLE } + +DROP TABLE IF EXISTS mem1; +DROP TABLE IF EXISTS dist_1; +DROP TABLE IF EXISTS dist_2; +DROP TABLE IF EXISTS dist_fail; + diff --git a/parser/testdata/03365_csv_time_deserialization_bug/ast.json b/parser/testdata/03365_csv_time_deserialization_bug/ast.json new file mode 100644 index 000000000..945789163 --- /dev/null +++ b/parser/testdata/03365_csv_time_deserialization_bug/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001148395, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03365_csv_time_deserialization_bug/metadata.json b/parser/testdata/03365_csv_time_deserialization_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03365_csv_time_deserialization_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03365_csv_time_deserialization_bug/query.sql b/parser/testdata/03365_csv_time_deserialization_bug/query.sql new file mode 100644 index 000000000..10f2f593b --- /dev/null +++ b/parser/testdata/03365_csv_time_deserialization_bug/query.sql @@ -0,0 +1,31 @@ +SET enable_time_time64_type = 1; +SET input_format_csv_use_default_on_bad_values = 1; +SET input_format_parallel_parsing = 1; + +DROP TABLE IF EXISTS t0; +CREATE TABLE t0 +( + c0 Int, + c1 Time, + c2 Int128, + c3 Int +) ENGINE = Memory; + +INSERT INTO t0 (c2, c1, c3, c0) VALUES + ( 1, '0:00:00', 1, 1), + (-10011714060220656711, '0:00:00', 2, 1), + ( 2, '0:00:00', 3, 2); + +-- Write to server-side user_files (relative path resolves there) +INSERT INTO FUNCTION file(currentDatabase() || '_table_time_bug.csv', 'CSV', + 'c3 Int, c2 Int128, c1 Time, c0 Int') +SELECT c3, c2, c1, c0 FROM t0; + +TRUNCATE TABLE t0; + +-- Read it back from server-side using the file() table function +-- (NOT FROM INFILE, which is client-side) +INSERT INTO t0 (c0, c1, c2, c3) +SELECT c0, c1, c2, c3 +FROM file(currentDatabase() || '_table_time_bug.csv', 'CSV', + 'c0 Int, c1 Time, c2 Int128, c3 Int'); \ No newline at end of file diff --git a/parser/testdata/03365_dynamic_column_datetime/ast.json b/parser/testdata/03365_dynamic_column_datetime/ast.json new file mode 100644 index 000000000..9b7aef925 --- /dev/null +++ b/parser/testdata/03365_dynamic_column_datetime/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001331053, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03365_dynamic_column_datetime/metadata.json b/parser/testdata/03365_dynamic_column_datetime/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03365_dynamic_column_datetime/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03365_dynamic_column_datetime/query.sql b/parser/testdata/03365_dynamic_column_datetime/query.sql new file mode 100644 index 000000000..e292b4eb4 --- /dev/null +++ b/parser/testdata/03365_dynamic_column_datetime/query.sql @@ -0,0 +1,7 @@ +set enable_dynamic_type = 1; + +drop table if exists test; +create table test (d Dynamic) engine=MergeTree order by tuple() settings min_bytes_for_wide_part=1, min_rows_for_wide_part=1; +insert into test select toDateTime64(materialize('2024-01-01'), 3, 'Asia/Istanbul'); + +drop table test; \ No newline at end of file diff --git a/parser/testdata/03365_finish_sorting_crash/ast.json b/parser/testdata/03365_finish_sorting_crash/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03365_finish_sorting_crash/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03365_finish_sorting_crash/metadata.json b/parser/testdata/03365_finish_sorting_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03365_finish_sorting_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03365_finish_sorting_crash/query.sql b/parser/testdata/03365_finish_sorting_crash/query.sql new file mode 100644 index 000000000..d441c6bdc --- /dev/null +++ b/parser/testdata/03365_finish_sorting_crash/query.sql @@ -0,0 +1,10 @@ +CREATE TABLE test( + key String, + val Array(String) +) engine = MergeTree +order by key; + +insert into test VALUES ('', ['xx']); +insert into test VALUES ('', []); + +SELECT key, arrayJoin(val) as res FROM test ORDER BY ALL settings max_threads = 1, read_in_order_two_level_merge_threshold = 0 format Null; diff --git a/parser/testdata/03365_if_time_time64/ast.json b/parser/testdata/03365_if_time_time64/ast.json new file mode 100644 index 000000000..b10d587fc --- /dev/null +++ b/parser/testdata/03365_if_time_time64/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001501975, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03365_if_time_time64/metadata.json b/parser/testdata/03365_if_time_time64/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03365_if_time_time64/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03365_if_time_time64/query.sql b/parser/testdata/03365_if_time_time64/query.sql new file mode 100644 index 000000000..7e1a2843c --- /dev/null +++ b/parser/testdata/03365_if_time_time64/query.sql @@ -0,0 +1,11 @@ +SET use_legacy_to_time = 0; + +SELECT number % 2 ? toTime('00:00:00') : toTime('04:05:06') FROM numbers(2); +SELECT number % 2 ? toTime('00:00:00') : materialize(toTime('04:05:06')) FROM numbers(2); +SELECT number % 2 ? materialize(toTime('00:00:00')) : toTime('04:05:06') FROM numbers(2); +SELECT number % 2 ? materialize(toTime('00:00:00')) : materialize(toTime('04:05:06')) FROM numbers(2); + +SELECT number % 2 ? toTime64('00:00:00', 2) : toTime64('04:05:06', 2) FROM numbers(2); +SELECT number % 2 ? toTime64('00:00:00', 2) : materialize(toTime64('04:05:06', 2)) FROM numbers(2); +SELECT number % 2 ? materialize(toTime64('00:00:00', 2)) : toTime64('04:05:06', 2) FROM numbers(2); +SELECT number % 2 ? materialize(toTime64('00:00:00', 2)) : materialize(toTime64('04:05:06', 2)) FROM numbers(2); diff --git a/parser/testdata/03365_json_with_variant_subcolumn/ast.json b/parser/testdata/03365_json_with_variant_subcolumn/ast.json new file mode 100644 index 000000000..8b1b734c6 --- /dev/null +++ b/parser/testdata/03365_json_with_variant_subcolumn/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001796656, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03365_json_with_variant_subcolumn/metadata.json b/parser/testdata/03365_json_with_variant_subcolumn/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03365_json_with_variant_subcolumn/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03365_json_with_variant_subcolumn/query.sql b/parser/testdata/03365_json_with_variant_subcolumn/query.sql new file mode 100644 index 000000000..b9c218a60 --- /dev/null +++ b/parser/testdata/03365_json_with_variant_subcolumn/query.sql @@ -0,0 +1,21 @@ +SET allow_experimental_variant_type = 1; +SET enable_json_type = 1; + +DROP TABLE IF EXISTS json_test; +CREATE TABLE json_test +( + `id` String, + `json` JSON( + foo Variant(String, Array(String)) + ) +) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/json_test', '1') +PARTITION BY tuple() +ORDER BY id +SETTINGS index_granularity = 8192, min_rows_for_wide_part = 0, min_bytes_for_wide_part = 0; + +INSERT INTO json_test VALUES('1', '{"foo":"bar"}'); + +SELECT count(*) FROM json_test; + +DROP TABLE json_test; diff --git a/parser/testdata/03365_parsing_time_time64_short/ast.json b/parser/testdata/03365_parsing_time_time64_short/ast.json new file mode 100644 index 000000000..88356b10a --- /dev/null +++ b/parser/testdata/03365_parsing_time_time64_short/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001376652, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03365_parsing_time_time64_short/metadata.json b/parser/testdata/03365_parsing_time_time64_short/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03365_parsing_time_time64_short/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03365_parsing_time_time64_short/query.sql b/parser/testdata/03365_parsing_time_time64_short/query.sql new file mode 100644 index 000000000..6f66c5727 --- /dev/null +++ b/parser/testdata/03365_parsing_time_time64_short/query.sql @@ -0,0 +1,152 @@ +SET use_legacy_to_time = 0; + +-- Regular Time formats (MM:SS) +SELECT toTime('45:30'); +SELECT toTime('00:01'); +SELECT toTime('59:59'); + +-- Single digit minutes (M:SS) +SELECT toTime('5:30'); +SELECT toTime('9:59'); +SELECT toTime('0:01'); + +-- Seconds only (SS) +SELECT toTime('45'); +SELECT toTime('01'); +SELECT toTime('59'); + +-- Single digit seconds (S) +SELECT toTime('5'); +SELECT toTime('9'); +SELECT toTime('0'); + +-- Negative values for Time (MM:SS) +SELECT toTime('-45:30'); +SELECT toTime('-00:01'); +SELECT toTime('-59:59'); + +-- Negative single digit minutes (M:SS) +SELECT toTime('-5:30'); +SELECT toTime('-9:59'); +SELECT toTime('-0:01'); + +-- Negative seconds only (SS) +SELECT toTime('-45'); +SELECT toTime('-01'); +SELECT toTime('-59'); + +-- Negative single digit seconds (S) +SELECT toTime('-5'); +SELECT toTime('-9'); +SELECT toTime('-0'); + +-- Edge cases +SELECT toTime('99:99'); -- Beyond normal time limits but should parse + +-- Time64 formats with precision 0 (MM:SS) +SELECT toTime64('45:30', 0); +SELECT toTime64('00:01', 0); +SELECT toTime64('59:59', 0); + +-- Time64 formats with precision 0 (M:SS) +SELECT toTime64('5:30', 0); +SELECT toTime64('9:59', 0); +SELECT toTime64('0:01', 0); + +-- Time64 formats with precision 0 (SS) +SELECT toTime64('45', 0); +SELECT toTime64('01', 0); +SELECT toTime64('59', 0); + +-- Time64 formats with precision 0 (S) +SELECT toTime64('5', 0); +SELECT toTime64('9', 0); +SELECT toTime64('0', 0); + +-- Time64 with fractional part, precision 3 (MM:SS.fff) +SELECT toTime64('45:30.123', 3); +SELECT toTime64('00:01.456', 3); +SELECT toTime64('59:59.999', 3); + +-- Time64 with fractional part, precision 3 (M:SS.fff) +SELECT toTime64('5:30.123', 3); +SELECT toTime64('9:59.456', 3); +SELECT toTime64('0:01.789', 3); + +-- Time64 with fractional part, precision 3 (SS.fff) +SELECT toTime64('45.123', 3); +SELECT toTime64('01.456', 3); +SELECT toTime64('59.999', 3); + +-- Time64 with fractional part, precision 3 (S.fff) +SELECT toTime64('5.123', 3); +SELECT toTime64('9.456', 3); +SELECT toTime64('0.789', 3); + +-- Negative Time64 with fractional part, precision 3 (MM:SS.fff) +SELECT toTime64('-45:30.123', 3); +SELECT toTime64('-00:01.456', 3); +SELECT toTime64('-59:59.999', 3); + +-- Negative Time64 with fractional part, precision 3 (M:SS.fff) +SELECT toTime64('-5:30.123', 3); +SELECT toTime64('-9:59.456', 3); +SELECT toTime64('-0:01.789', 3); + +-- Negative Time64 with fractional part, precision 3 (SS.fff) +SELECT toTime64('-45.123', 3); +SELECT toTime64('-01.456', 3); +SELECT toTime64('-59.999', 3); + +-- Negative Time64 with fractional part, precision 3 (S.fff) +SELECT toTime64('-5.123', 3); +SELECT toTime64('-9.456', 3); +SELECT toTime64('-0.789', 3); + +-- Time64 with fractional part, precision 6 (MM:SS.ffffff) +SELECT toTime64('45:30.123456', 6); +SELECT toTime64('00:01.456789', 6); +SELECT toTime64('59:59.987654', 6); + +-- Time64 with fractional part, precision 6 (M:SS.ffffff) +SELECT toTime64('5:30.123456', 6); +SELECT toTime64('9:59.456789', 6); +SELECT toTime64('0:01.987654', 6); + +-- Time64 with fractional part, precision 6 (SS.ffffff) +SELECT toTime64('45.123456', 6); +SELECT toTime64('01.456789', 6); +SELECT toTime64('59.987654', 6); + +-- Time64 with fractional part, precision 6 (S.ffffff) +SELECT toTime64('5.123456', 6); +SELECT toTime64('9.456789', 6); +SELECT toTime64('0.987654', 6); + +-- Time64 with fractional part, precision 9 (MM:SS.fffffffff) +SELECT toTime64('45:30.123456789', 9); +SELECT toTime64('00:01.123456789', 9); +SELECT toTime64('59:59.987654321', 9); + +-- Time64 with fractional part, precision 9 (M:SS.fffffffff) +SELECT toTime64('5:30.123456789', 9); +SELECT toTime64('9:59.123456789', 9); +SELECT toTime64('0:01.987654321', 6); + +-- Time64 with fractional part, precision 9 (SS.fffffffff) +SELECT toTime64('45.123456789', 9); +SELECT toTime64('01.123456789', 9); +SELECT toTime64('59.987654321', 9); + +-- Time64 with fractional part, precision 9 (S.fffffffff) +SELECT toTime64('5.123456789', 9); +SELECT toTime64('9.123456789', 9); +SELECT toTime64('0.987654321', 9); + +-- Testing error cases +SELECT toTime('A:30'); -- { serverError CANNOT_PARSE_DATETIME } +SELECT toTime('45:A'); -- { serverError CANNOT_PARSE_TEXT } +SELECT toTime('45:30:15'); -- Not an error, but will be parsed as HH:MM:SS +SELECT toTime64('A:30', 3); -- { serverError CANNOT_PARSE_DATETIME } +SELECT toTime64('45:A.123', 3); -- { serverError CANNOT_PARSE_TEXT } +SELECT toTime64('45:30:15.123', 3); -- Not an error, but will be parsed as HH:MM:SS.fff diff --git a/parser/testdata/03365_qbit_casts_as_from_array/ast.json b/parser/testdata/03365_qbit_casts_as_from_array/ast.json new file mode 100644 index 000000000..48da230a8 --- /dev/null +++ b/parser/testdata/03365_qbit_casts_as_from_array/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001544909, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03365_qbit_casts_as_from_array/metadata.json b/parser/testdata/03365_qbit_casts_as_from_array/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03365_qbit_casts_as_from_array/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03365_qbit_casts_as_from_array/query.sql b/parser/testdata/03365_qbit_casts_as_from_array/query.sql new file mode 100644 index 000000000..f8f56aca2 --- /dev/null +++ b/parser/testdata/03365_qbit_casts_as_from_array/query.sql @@ -0,0 +1,70 @@ +SET allow_experimental_qbit_type = 1; + +SELECT 'Test Array → QBit CAST AS: Float64'; + +SELECT CAST((SELECT groupArray(number + 0.1) FROM numbers(2)) AS QBit(Float64, 2)); +SELECT CAST((SELECT groupArray(number + 0.1) FROM numbers(2)) AS QBit(Float64, 2)); +SELECT CAST((SELECT groupArray(number + 0.1) FROM numbers(2)) AS QBit(Float64, 2)); +SELECT * FROM format('Values', 'qbit QBit(Float64, 2)', '(array(0.1,1.1))'); + + +SELECT 'Test Array → QBit CAST AS: Float32'; +SELECT CAST((SELECT groupArray(toFloat32(number + 0.1)) FROM numbers(2)) AS QBit(Float32, 2)); +SELECT CAST((SELECT groupArray(toFloat32(number + 0.1)) FROM numbers(2)) AS QBit(Float32, 2)); +SELECT CAST((SELECT groupArray(toFloat32(number + 0.1)) FROM numbers(2)) AS QBit(Float32, 2)); +SELECT * FROM format('Values', 'qbit QBit(Float32, 2)', '(array(0.1,1.1))'); + + +SELECT 'Test Array → QBit CAST AS: BFloat16'; +SELECT CAST((SELECT groupArray(toBFloat16(number + 0.1)) FROM numbers(2)) AS QBit(BFloat16, 2)); +SELECT CAST((SELECT groupArray(toBFloat16(number + 0.1)) FROM numbers(2)) AS QBit(BFloat16, 2)); +SELECT CAST((SELECT groupArray(toBFloat16(number + 0.1)) FROM numbers(2)) AS QBit(BFloat16, 2)); +SELECT * FROM format('Values', 'qbit QBit(BFloat16, 2)', '(array(0.1,1.1))'); + +SELECT * FROM format('Values', 'qbit QBit(BFloat16, 3)', '(tuple([1,2,3]::QBit(BFloat16, 3).1, + [1,2,3]::QBit(BFloat16, 3).2, + [1,2,3]::QBit(BFloat16, 3).3, + [1,2,3]::QBit(BFloat16, 3).4, + [1,2,3]::QBit(BFloat16, 3).5, + [1,2,3]::QBit(BFloat16, 3).6, + [1,2,3]::QBit(BFloat16, 3).7, + [1,2,3]::QBit(BFloat16, 3).8, + [1,2,3]::QBit(BFloat16, 3).9, + [1,2,3]::QBit(BFloat16, 3).10, + [1,2,3]::QBit(BFloat16, 3).11, + [1,2,3]::QBit(BFloat16, 3).12, + [1,2,3]::QBit(BFloat16, 3).13, + [1,2,3]::QBit(BFloat16, 3).14, + [1,2,3]::QBit(BFloat16, 3).15, + [1,2,3]::QBit(BFloat16, 3).16))'); + +SELECT * FROM format('Values', 'qbit QBit(BFloat16, 3)', '(tuple([1,2,3,4,5,6,7,8,9]::QBit(BFloat16, 9).1, + [1,2,3,4,5,6,7,8,9]::QBit(BFloat16, 9).2, + [1,2,3,4,5,6,7,8,9]::QBit(BFloat16, 9).3, + [1,2,3,4,5,6,7,8,9]::QBit(BFloat16, 9).4, + [1,2,3,4,5,6,7,8,9]::QBit(BFloat16, 9).5, + [1,2,3,4,5,6,7,8,9]::QBit(BFloat16, 9).6, + [1,2,3,4,5,6,7,8,9]::QBit(BFloat16, 9).7, + [1,2,3,4,5,6,7,8,9]::QBit(BFloat16, 9).8, + [1,2,3,4,5,6,7,8,9]::QBit(BFloat16, 9).9, + [1,2,3,4,5,6,7,8,9]::QBit(BFloat16, 9).10, + [1,2,3,4,5,6,7,8,9]::QBit(BFloat16, 9).11, + [1,2,3,4,5,6,7,8,9]::QBit(BFloat16, 9).12, + [1,2,3,4,5,6,7,8,9]::QBit(BFloat16, 9).13, + [1,2,3,4,5,6,7,8,9]::QBit(BFloat16, 9).14, + [1,2,3,4,5,6,7,8,9]::QBit(BFloat16, 9).15, + [1,2,3,4,5,6,7,8,9]::QBit(BFloat16, 9).16))'); -- { serverError TYPE_MISMATCH } + +SELECT * FROM format('Values', 'qbit QBit(BFloat16, 9)', '(tuple([1,2,3]::QBit(BFloat16, 3).1))'); -- { serverError TYPE_MISMATCH } + + +SELECT 'Test with and without analyzer / constant QBit'; +SELECT L2DistanceTransposed([1,2,3]::QBit(Float64, 3), [1,2,3]::Array(Float64), 3) settings enable_analyzer=0; +SELECT L2DistanceTransposed([1,2,3]::QBit(Float64, 3), [1,2,3]::Array(Float64), 3) settings enable_analyzer=1; +SELECT L2DistanceTransposed(materialize([1,2,3]::QBit(Float64, 3)), [1,2,3]::Array(Float64), 3) settings enable_analyzer=0; +SELECT L2DistanceTransposed(materialize([1,2,3]::QBit(Float64, 3)), [1,2,3]::Array(Float64), 3) settings enable_analyzer=1; + +SELECT 'Difficult tests'; +SELECT bin(CAST([0.1, 0.2], 'QBit(Float64, 2)').1) AS tuple_result; +SELECT bin(CAST([0.1, 0.2], 'QBit(Float64, 2)').2) AS tuple_result; +SELECT bin(CAST([0.1, 0.2], 'QBit(Float64, 2)').3) AS tuple_result; diff --git a/parser/testdata/03365_read_negative_time_implicitly/ast.json b/parser/testdata/03365_read_negative_time_implicitly/ast.json new file mode 100644 index 000000000..5741b0fad --- /dev/null +++ b/parser/testdata/03365_read_negative_time_implicitly/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001366732, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03365_read_negative_time_implicitly/metadata.json b/parser/testdata/03365_read_negative_time_implicitly/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03365_read_negative_time_implicitly/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03365_read_negative_time_implicitly/query.sql b/parser/testdata/03365_read_negative_time_implicitly/query.sql new file mode 100644 index 000000000..cd4d8ac4b --- /dev/null +++ b/parser/testdata/03365_read_negative_time_implicitly/query.sql @@ -0,0 +1,7 @@ +SET enable_time_time64_type = 1; + +CREATE OR REPLACE TABLE tx (c0 Time) ENGINE = Memory; +INSERT INTO TABLE tx (c0) VALUES ('-1:00:00'); +SELECT c0 FROM tx; + +DROP TABLE tx; diff --git a/parser/testdata/03365_time64_casts/ast.json b/parser/testdata/03365_time64_casts/ast.json new file mode 100644 index 000000000..2ee857a5e --- /dev/null +++ b/parser/testdata/03365_time64_casts/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001410015, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03365_time64_casts/metadata.json b/parser/testdata/03365_time64_casts/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03365_time64_casts/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03365_time64_casts/query.sql b/parser/testdata/03365_time64_casts/query.sql new file mode 100644 index 000000000..a42c7351a --- /dev/null +++ b/parser/testdata/03365_time64_casts/query.sql @@ -0,0 +1,14 @@ +SET enable_time_time64_type=1; +-- Downscale (6 -> 3), exact millisecond boundary +SELECT toString(CAST(toTime64('01:02:03.123000', 6) AS Time64(3))); + +-- Upscale (3 -> 6) +SELECT toString(CAST(toTime64('01:02:03.123', 3) AS Time64(6))); + +-- Type names of casts +SELECT toTypeName(CAST(toTime64('01:02:03.123000', 6) AS Time64(3))); +SELECT toTypeName(CAST(toTime64('01:02:03.123', 3) AS Time64(6))); + +-- Value equality on exact boundaries +SELECT CAST(toTime64('01:02:03.123000', 6) AS Time64(3)) = toTime64('01:02:03.123', 3); +SELECT CAST(toTime64('01:02:03.123', 3) AS Time64(6)) = toTime64('01:02:03.123000', 6); diff --git a/parser/testdata/03365_time64_from_datetime_timezone_respect/ast.json b/parser/testdata/03365_time64_from_datetime_timezone_respect/ast.json new file mode 100644 index 000000000..53574755c --- /dev/null +++ b/parser/testdata/03365_time64_from_datetime_timezone_respect/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001270582, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03365_time64_from_datetime_timezone_respect/metadata.json b/parser/testdata/03365_time64_from_datetime_timezone_respect/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03365_time64_from_datetime_timezone_respect/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03365_time64_from_datetime_timezone_respect/query.sql b/parser/testdata/03365_time64_from_datetime_timezone_respect/query.sql new file mode 100644 index 000000000..330d1c4ef --- /dev/null +++ b/parser/testdata/03365_time64_from_datetime_timezone_respect/query.sql @@ -0,0 +1,39 @@ +SET allow_experimental_time_time64_type = 1; +SET use_legacy_to_time = 0; + +SET session_timezone = 'Antarctica/DumontDUrville'; +SELECT toTime64(toDateTime(1200000), 3); +SELECT toDateTime(1200000); + +SET session_timezone = 'Cuba'; +SELECT toTime64(reinterpret(toUInt64(12345), 'DateTime64(0)'), 3); +SELECT toTime(reinterpret(toUInt64(12345), 'DateTime64(0)')); +SELECT toDateTime64(12345, 0); + +-- When DateTime has explicit timezone, toTime and toTime64 should use it, not session_timezone + +SET session_timezone = 'UTC'; +SELECT toTime64(toDateTime('2020-01-01 12:34:56', 'Europe/Moscow'), 3); +SELECT toTime(toDateTime('2020-01-01 12:34:56', 'Europe/Moscow')); + +SET session_timezone = 'America/New_York'; +SELECT toTime64(toDateTime('2020-01-01 12:34:56', 'Europe/Moscow'), 3); +SELECT toTime(toDateTime('2020-01-01 12:34:56', 'Europe/Moscow')); + +-- Same for DateTime64 with explicit timezone +SET session_timezone = 'UTC'; +SELECT toTime64(toDateTime64('2020-01-01 12:34:56.789', 3, 'Europe/Moscow'), 3); +SELECT toTime(toDateTime64('2020-01-01 12:34:56.789', 3, 'Europe/Moscow')); + +SET session_timezone = 'America/New_York'; +SELECT toTime64(toDateTime64('2020-01-01 12:34:56.789', 3, 'Europe/Moscow'), 3); +SELECT toTime(toDateTime64('2020-01-01 12:34:56.789', 3, 'Europe/Moscow')); + +-- No timezone for DateTime, but session_timezone is used +SET session_timezone = 'America/New_York'; +SELECT toTime64(toDateTime('2020-01-01 12:34:56'), 3); +SELECT toTime(toDateTime('2020-01-01 12:34:56')); + +SET session_timezone = 'America/New_York'; +SELECT toTime64(toDateTime64('2020-01-01 12:34:56.789', 3), 3); +SELECT toTime(toDateTime64('2020-01-01 12:34:56.789', 3)); diff --git a/parser/testdata/03365_time_implicit_conversion/ast.json b/parser/testdata/03365_time_implicit_conversion/ast.json new file mode 100644 index 000000000..419fce299 --- /dev/null +++ b/parser/testdata/03365_time_implicit_conversion/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001446702, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03365_time_implicit_conversion/metadata.json b/parser/testdata/03365_time_implicit_conversion/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03365_time_implicit_conversion/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03365_time_implicit_conversion/query.sql b/parser/testdata/03365_time_implicit_conversion/query.sql new file mode 100644 index 000000000..52e4cb0b4 --- /dev/null +++ b/parser/testdata/03365_time_implicit_conversion/query.sql @@ -0,0 +1,14 @@ +SET allow_experimental_time_time64_type = 1; + +DROP TABLE IF EXISTS dt; + +CREATE TABLE dt +( + `time` Time, + `event_id` UInt8 +) +ENGINE = TinyLog; + +INSERT INTO dt VALUES ('100:00:00', 1), (12453, 3); + +SELECT * FROM dt WHERE time = '100:00:00'; diff --git a/parser/testdata/03365_time_in_json/ast.json b/parser/testdata/03365_time_in_json/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03365_time_in_json/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03365_time_in_json/metadata.json b/parser/testdata/03365_time_in_json/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03365_time_in_json/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03365_time_in_json/query.sql b/parser/testdata/03365_time_in_json/query.sql new file mode 100644 index 000000000..a0dec8454 --- /dev/null +++ b/parser/testdata/03365_time_in_json/query.sql @@ -0,0 +1,46 @@ +-- Test Time and Time64 types in JSON + +SET enable_time_time64_type = 1; + +-- Clean up +DROP TABLE IF EXISTS json_time_test; +DROP TABLE IF EXISTS json_time64_test; +DROP TABLE IF EXISTS json_splits; + +-- Time type in JSON +CREATE TABLE json_time_test +( + `json` JSON(`time_value` Time, `id` String) +) +ENGINE = Memory; + +INSERT INTO json_time_test VALUES ('{"time_value": "12:30:45", "id": "1"}'); +INSERT INTO json_time_test VALUES ('{"time_value": "01:05:10", "id": "2"}'); +INSERT INTO json_time_test VALUES ('{"time_value": "23:59:59", "id": "3"}'); + +SELECT json.time_value, json.id FROM json_time_test ORDER BY json.id; + +-- Time64 type in JSON +CREATE TABLE json_time64_test +( + `json` JSON(`time_value` Time64(3), `id` String) +) +ENGINE = Memory; + +INSERT INTO json_time64_test VALUES ('{"time_value": "12:30:45.123", "id": "1"}'); +INSERT INTO json_time64_test VALUES ('{"time_value": "01:05:10.456", "id": "2"}'); +INSERT INTO json_time64_test VALUES ('{"time_value": "23:59:59.999", "id": "3"}'); + +SELECT json.time_value, json.id FROM json_time64_test ORDER BY json.id; + +-- #82267 +CREATE TABLE json_splits +( + `json` JSON(`metric.moving_time` Time, `id` String) +) +ORDER BY json.id; + +INSERT INTO json_splits VALUES ('{"metric":{"moving_time":"01:23:45"}, "id": "1"}'); +INSERT INTO json_splits VALUES ('{"metric":{"moving_time":"02:10:30"}, "id": "2"}'); + +SELECT json.`metric.moving_time`, json.id FROM json_splits ORDER BY json.id; diff --git a/parser/testdata/03365_time_parsing_msan_issue/ast.json b/parser/testdata/03365_time_parsing_msan_issue/ast.json new file mode 100644 index 000000000..f7fe971a5 --- /dev/null +++ b/parser/testdata/03365_time_parsing_msan_issue/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00120228, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03365_time_parsing_msan_issue/metadata.json b/parser/testdata/03365_time_parsing_msan_issue/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03365_time_parsing_msan_issue/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03365_time_parsing_msan_issue/query.sql b/parser/testdata/03365_time_parsing_msan_issue/query.sql new file mode 100644 index 000000000..967bab1d6 --- /dev/null +++ b/parser/testdata/03365_time_parsing_msan_issue/query.sql @@ -0,0 +1,7 @@ +SET use_legacy_to_time = false; + +SELECT toTime('default', 1); -- { serverError CANNOT_PARSE_DATETIME } +SELECT toTime('-default', 1); -- { serverError CANNOT_PARSE_DATETIME } +SELECT toTime('-1:11:1', 1); -- { serverError CANNOT_PARSE_TEXT } +SELECT toTime('-1:1:11', 1); -- { serverError CANNOT_PARSE_TEXT } +SELECT toTime('111:11:1', 1); -- { serverError CANNOT_PARSE_TEXT } diff --git a/parser/testdata/03365_time_prewhere_supertype_bug/ast.json b/parser/testdata/03365_time_prewhere_supertype_bug/ast.json new file mode 100644 index 000000000..462cfdfdc --- /dev/null +++ b/parser/testdata/03365_time_prewhere_supertype_bug/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001016918, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03365_time_prewhere_supertype_bug/metadata.json b/parser/testdata/03365_time_prewhere_supertype_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03365_time_prewhere_supertype_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03365_time_prewhere_supertype_bug/query.sql b/parser/testdata/03365_time_prewhere_supertype_bug/query.sql new file mode 100644 index 000000000..bef15c623 --- /dev/null +++ b/parser/testdata/03365_time_prewhere_supertype_bug/query.sql @@ -0,0 +1,8 @@ +SET enable_analyzer=1; + +SELECT * +PREWHERE * OR ((8 OR * OR 1) OR 1 OR (toTime64(isZeroOrNull(13), 2) <= *) OR materialize(2)) +GROUP BY + toLowCardinality(materialize(2)), + 1, + toTime64(isNullable(materialize(materialize(13))), * IS NULL) <= *; -- { serverError ILLEGAL_PREWHERE } diff --git a/parser/testdata/03365_time_time64_aggregate_functions/ast.json b/parser/testdata/03365_time_time64_aggregate_functions/ast.json new file mode 100644 index 000000000..9ccb56e49 --- /dev/null +++ b/parser/testdata/03365_time_time64_aggregate_functions/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001264611, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03365_time_time64_aggregate_functions/metadata.json b/parser/testdata/03365_time_time64_aggregate_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03365_time_time64_aggregate_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03365_time_time64_aggregate_functions/query.sql b/parser/testdata/03365_time_time64_aggregate_functions/query.sql new file mode 100644 index 000000000..2fc914020 --- /dev/null +++ b/parser/testdata/03365_time_time64_aggregate_functions/query.sql @@ -0,0 +1,19 @@ +SET enable_time_time64_type = 1; + +CREATE TABLE dt +( + `time` Time, + `event_id` UInt8 +) +ENGINE = TinyLog; + +INSERT INTO dt VALUES ('100:00:00', 1), (12453, 3); + +SELECT max(time) +FROM dt; + +SELECT min(time) +FROM dt; + +SELECT avg(time) +FROM dt; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } diff --git a/parser/testdata/03365_time_time64_as_primary_key/ast.json b/parser/testdata/03365_time_time64_as_primary_key/ast.json new file mode 100644 index 000000000..b45a8bc90 --- /dev/null +++ b/parser/testdata/03365_time_time64_as_primary_key/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001258587, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03365_time_time64_as_primary_key/metadata.json b/parser/testdata/03365_time_time64_as_primary_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03365_time_time64_as_primary_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03365_time_time64_as_primary_key/query.sql b/parser/testdata/03365_time_time64_as_primary_key/query.sql new file mode 100644 index 000000000..25c6dc045 --- /dev/null +++ b/parser/testdata/03365_time_time64_as_primary_key/query.sql @@ -0,0 +1,21 @@ +SET allow_experimental_time_time64_type = 1; +SET use_legacy_to_time = 0; + +DROP TABLE IF EXISTS test_time; +CREATE TABLE test_time (a Time, b String) engine=MergeTree order by a; +INSERT INTO test_time SELECT toTime(12 + number), toTime(12 + number)::String FROM numbers(1000000); + +SELECT a, b FROM test_time WHERE a > 12435 AND a < 12437; + +SELECT + floor(CAST(a, 'Int32') / 60) AS minute_bucket, + count(*) AS total_records, + min(a) AS min_time, + max(a) AS max_time, + groupArray(b) AS all_b_values +FROM test_time +WHERE (a > 12435) AND (a < 12437) +GROUP BY minute_bucket +ORDER BY minute_bucket ASC; + +DROP TABLE test_time; diff --git a/parser/testdata/03365_time_time64_best_effort_parsing/ast.json b/parser/testdata/03365_time_time64_best_effort_parsing/ast.json new file mode 100644 index 000000000..6421f1638 --- /dev/null +++ b/parser/testdata/03365_time_time64_best_effort_parsing/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001368797, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03365_time_time64_best_effort_parsing/metadata.json b/parser/testdata/03365_time_time64_best_effort_parsing/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03365_time_time64_best_effort_parsing/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03365_time_time64_best_effort_parsing/query.sql b/parser/testdata/03365_time_time64_best_effort_parsing/query.sql new file mode 100644 index 000000000..ec42f62ff --- /dev/null +++ b/parser/testdata/03365_time_time64_best_effort_parsing/query.sql @@ -0,0 +1,5 @@ +SET enable_time_time64_type = 1, date_time_input_format = 'best_effort'; +SELECT ['2010-10-10 23:10:33']::Array(DateTime); +SELECT ['123:10:33']::Array(Time); +SELECT toString(['10:33'])::Array(Time); +SELECT ['123:10:33.123']::Array(Time64); diff --git a/parser/testdata/03365_time_time64_cap_max_time/ast.json b/parser/testdata/03365_time_time64_cap_max_time/ast.json new file mode 100644 index 000000000..b1074cd41 --- /dev/null +++ b/parser/testdata/03365_time_time64_cap_max_time/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001256415, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03365_time_time64_cap_max_time/metadata.json b/parser/testdata/03365_time_time64_cap_max_time/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03365_time_time64_cap_max_time/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03365_time_time64_cap_max_time/query.sql b/parser/testdata/03365_time_time64_cap_max_time/query.sql new file mode 100644 index 000000000..387192de4 --- /dev/null +++ b/parser/testdata/03365_time_time64_cap_max_time/query.sql @@ -0,0 +1,7 @@ +SET use_legacy_to_time = 0; + +SELECT toTime(1264724816471); +SELECT toTime(-1264724816471); + +SELECT toTime64(1264724816471122, 2); +SELECT toTime64(-1264724816471122, 2); diff --git a/parser/testdata/03365_time_time64_comparison/ast.json b/parser/testdata/03365_time_time64_comparison/ast.json new file mode 100644 index 000000000..41fa7e400 --- /dev/null +++ b/parser/testdata/03365_time_time64_comparison/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001179761, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03365_time_time64_comparison/metadata.json b/parser/testdata/03365_time_time64_comparison/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03365_time_time64_comparison/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03365_time_time64_comparison/query.sql b/parser/testdata/03365_time_time64_comparison/query.sql new file mode 100644 index 000000000..c4926e5af --- /dev/null +++ b/parser/testdata/03365_time_time64_comparison/query.sql @@ -0,0 +1,174 @@ +SET use_legacy_to_time = 0; + +-- TIME AND TIME +-- Both positive +SELECT toTime(12) > toTime(13); +SELECT toTime(13) > toTime(12); +SELECT toTime(12) > toTime(12); + +SELECT toTime(12) < toTime(13); +SELECT toTime(13) < toTime(12); +SELECT toTime(12) < toTime(12); + +SELECT toTime(12) == toTime(13); +SELECT toTime(13) == toTime(12); +SELECT toTime(12) == toTime(12); + +-- Both negative +SELECT toTime(-12) > toTime(-13); +SELECT toTime(-13) > toTime(-12); +SELECT toTime(-12) > toTime(-12); + +SELECT toTime(-12) < toTime(-13); +SELECT toTime(-13) < toTime(-12); +SELECT toTime(-12) < toTime(-12); + +SELECT toTime(-12) == toTime(-13); +SELECT toTime(-13) == toTime(-12); +SELECT toTime(-12) == toTime(-12); + +-- Left negative +SELECT toTime(-12) > toTime(13); +SELECT toTime(-13) > toTime(12); +SELECT toTime(-12) > toTime(12); + +SELECT toTime(-12) < toTime(13); +SELECT toTime(-13) < toTime(12); +SELECT toTime(-12) < toTime(12); + +SELECT toTime(-12) == toTime(13); +SELECT toTime(-13) == toTime(12); +SELECT toTime(-12) == toTime(12); + +-- Right negative +SELECT toTime(12) > toTime(-13); +SELECT toTime(13) > toTime(-12); +SELECT toTime(12) > toTime(-12); + +SELECT toTime(12) < toTime(-13); +SELECT toTime(13) < toTime(-12); +SELECT toTime(12) < toTime(-12); + +SELECT toTime(12) == toTime(-13); +SELECT toTime(13) == toTime(-12); +SELECT toTime(12) == toTime(-12); + +-- TIME64 AND TIME64 +-- Both positive +SELECT toTime64(12, 2) > toTime64(13, 2); +SELECT toTime64(13, 2) > toTime64(12, 2); +SELECT toTime64(12, 2) > toTime64(12, 2); + +SELECT toTime64(12, 2) < toTime64(13, 2); +SELECT toTime64(13, 2) < toTime64(12, 2); +SELECT toTime64(12, 2) < toTime64(12, 2); + +SELECT toTime64(12, 2) == toTime64(13, 2); +SELECT toTime64(13, 2) == toTime64(12, 2); +SELECT toTime64(12, 2) == toTime64(12, 2); + +-- Both negative +SELECT toTime64(-12, 2) > toTime64(-13, 2); +SELECT toTime64(-13, 2) > toTime64(-12, 2); +SELECT toTime64(-12, 2) > toTime64(-12, 2); + +SELECT toTime64(-12, 2) < toTime64(-13, 2); +SELECT toTime64(-13, 2) < toTime64(-12, 2); +SELECT toTime64(-12, 2) < toTime64(-12, 2); + +SELECT toTime64(-12, 2) == toTime64(-13, 2); +SELECT toTime64(-13, 2) == toTime64(-12, 2); +SELECT toTime64(-12, 2) == toTime64(-12, 2); + +-- Left negative +SELECT toTime64(-12, 2) > toTime64(13, 2); +SELECT toTime64(-13, 2) > toTime64(12, 2); +SELECT toTime64(-12, 2) > toTime64(12, 2); + +SELECT toTime64(-12, 2) < toTime64(13, 2); +SELECT toTime64(-13, 2) < toTime64(12, 2); +SELECT toTime64(-12, 2) < toTime64(12, 2); + +SELECT toTime64(-12, 2) == toTime64(13, 2); +SELECT toTime64(-13, 2) == toTime64(12, 2); +SELECT toTime64(-12, 2) == toTime64(12, 2); + +-- Right negative +SELECT toTime64(12, 2) > toTime64(-13, 2); +SELECT toTime64(13, 2) > toTime64(-12, 2); +SELECT toTime64(12, 2) > toTime64(-12, 2); + +SELECT toTime64(12, 2) < toTime64(-13, 2); +SELECT toTime64(13, 2) < toTime64(-12, 2); +SELECT toTime64(12, 2) < toTime64(-12, 2); + +SELECT toTime64(12, 2) == toTime64(-13, 2); +SELECT toTime64(13, 2) == toTime64(-12, 2); +SELECT toTime64(12, 2) == toTime64(-12, 2); + + +-- Different fractional size +SELECT toTime64(12, 2) > toTime64(13, 3); +SELECT toTime64(13, 2) > toTime64(12, 3); +SELECT toTime64(12, 2) > toTime64(12, 3); + +SELECT toTime64(12, 2) < toTime64(13, 3); +SELECT toTime64(13, 2) < toTime64(12, 3); +SELECT toTime64(12, 2) < toTime64(12, 3); + +SELECT toTime64(12, 2) == toTime64(13, 3); +SELECT toTime64(13, 2) == toTime64(12, 3); +SELECT toTime64(12, 2) == toTime64(12, 3); + +-- TIME AND TIME64 +-- Both positive +SELECT toTime(12) > toTime64(13, 2); +SELECT toTime(13) > toTime64(12, 2); +SELECT toTime(12) > toTime64(12, 2); + +SELECT toTime(12) < toTime64(13, 2); +SELECT toTime(13) < toTime64(12, 2); +SELECT toTime(12) < toTime64(12, 2); + +SELECT toTime(12) == toTime64(13, 2); +SELECT toTime(13) == toTime64(12, 2); +SELECT toTime(12) == toTime64(12, 2); + +-- Both negative +SELECT toTime(-12) > toTime64(-13, 2); +SELECT toTime(-13) > toTime64(-12, 2); +SELECT toTime(-12) > toTime64(-12, 2); + +SELECT toTime(-12) < toTime64(-13, 2); +SELECT toTime(-13) < toTime64(-12, 2); +SELECT toTime(-12) < toTime64(-12, 2); + +SELECT toTime(-12) == toTime64(-13, 2); +SELECT toTime(-13) == toTime64(-12, 2); +SELECT toTime(-12) == toTime64(-12, 2); + +-- Left negative +SELECT toTime(-12) > toTime64(13, 2); +SELECT toTime(-13) > toTime64(12, 2); +SELECT toTime(-12) > toTime64(12, 2); + +SELECT toTime(-12) < toTime64(13, 2); +SELECT toTime(-13) < toTime64(12, 2); +SELECT toTime(-12) < toTime64(12, 2); + +SELECT toTime(-12) == toTime64(13, 2); +SELECT toTime(-13) == toTime64(12, 2); +SELECT toTime(-12) == toTime64(12, 2); + +-- Right negative +SELECT toTime(12) > toTime64(-13, 2); +SELECT toTime(13) > toTime64(-12, 2); +SELECT toTime(12) > toTime64(-12, 2); + +SELECT toTime(12) < toTime64(-13, 2); +SELECT toTime(13) < toTime64(-12, 2); +SELECT toTime(12) < toTime64(-12, 2); + +SELECT toTime(12) == toTime64(-13, 2); +SELECT toTime(13) == toTime64(-12, 2); +SELECT toTime(12) == toTime64(-12, 2); diff --git a/parser/testdata/03365_time_time64_conversions/ast.json b/parser/testdata/03365_time_time64_conversions/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03365_time_time64_conversions/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03365_time_time64_conversions/metadata.json b/parser/testdata/03365_time_time64_conversions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03365_time_time64_conversions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03365_time_time64_conversions/query.sql b/parser/testdata/03365_time_time64_conversions/query.sql new file mode 100644 index 000000000..6ccac0995 --- /dev/null +++ b/parser/testdata/03365_time_time64_conversions/query.sql @@ -0,0 +1,302 @@ +-- echoOn + +SET session_timezone = 'UTC'; +SET allow_experimental_time_time64_type = 1; +SET use_legacy_to_time = 0; + +-- Conversion from Time to String +SELECT toTime(0)::String; +SELECT toTime(12)::String; +SELECT toTime(3600)::String; + +-- Conversion from numeric to Time +-- Int, using toTime +SELECT toTime(0); +SELECT toTime(12); +SELECT toTime(3600); +SELECT toTime(360000); +SELECT toTime(-12); +SELECT toTime(-3600); +SELECT toTime(-360000); +SELECT toTime(999999999); + +-- Float, using toTime +SELECT toTime(0.1); +SELECT toTime(12.1); +SELECT toTime(3600.1); +SELECT toTime(360000.1); +SELECT toTime(-12.1); +SELECT toTime(-3600.1); +SELECT toTime(-360000.1); +SELECT toTime(999999999.1); + +-- UInt16 +SELECT 0::UInt16::Time; +SELECT 12::UInt16::Time; +SELECT 3600::UInt16::Time; + +-- UInt32 +SELECT 0::UInt32::Time; +SELECT 12::UInt32::Time; +SELECT 3600::UInt32::Time; +SELECT 360000::UInt32::Time; + +-- UInt64 +SELECT 0::UInt64::Time; +SELECT 12::UInt64::Time; +SELECT 3600::UInt64::Time; +SELECT 360000::UInt64::Time; +SELECT 999999999::UInt64::Time; + +-- UInt128 +SELECT 0::UInt128::Time; +SELECT 12::UInt128::Time; +SELECT 3600::UInt128::Time; +SELECT 360000::UInt128::Time; +SELECT 999999999::UInt128::Time; + +-- UInt256 +SELECT 0::UInt256::Time; +SELECT 12::UInt256::Time; +SELECT 3600::UInt256::Time; +SELECT 360000::UInt256::Time; +SELECT 999999999::UInt256::Time; + +-- Int16 +SELECT 0::Int16::Time; +SELECT 12::Int16::Time; +SELECT 3600::Int16::Time; +SELECT -0::Int16::Time; +SELECT -12::Int16::Time; +SELECT -3600::Int16::Time; + +-- Int32 +SELECT 0::Int32::Time; +SELECT 12::Int32::Time; +SELECT 3600::Int32::Time; +SELECT 360000::Int32::Time; +SELECT -0::Int32::Time; +SELECT -12::Int32::Time; +SELECT -3600::Int32::Time; +SELECT -360000::Int32::Time; + +-- Int64 +SELECT 0::Int64::Time; +SELECT 12::Int64::Time; +SELECT 3600::Int64::Time; +SELECT 360000::Int64::Time; +SELECT 999999999::Int64::Time; +SELECT -0::Int64::Time; +SELECT -12::Int64::Time; +SELECT -3600::Int64::Time; +SELECT -360000::Int64::Time; + +-- Int128 +SELECT 0::Int128::Time; +SELECT 12::Int128::Time; +SELECT 3600::Int128::Time; +SELECT 360000::Int128::Time; +SELECT 999999999::Int128::Time; +SELECT -0::Int128::Time; +SELECT -12::Int128::Time; +SELECT -3600::Int128::Time; +SELECT -360000::Int128::Time; + +-- Int256 +SELECT 0::Int256::Time; +SELECT 12::Int256::Time; +SELECT 3600::Int256::Time; +SELECT 360000::Int256::Time; +SELECT 999999999::Int256::Time; +SELECT -0::Int256::Time; +SELECT -12::Int256::Time; +SELECT -3600::Int256::Time; +SELECT -360000::Int256::Time; + +-- Float32 +SELECT 0::Float32::Time; +SELECT 12::Float32::Time; +SELECT 3600::Float32::Time; +SELECT 360000::Float32::Time; +SELECT -0::Float32::Time; +SELECT -12::Float32::Time; +SELECT -3600::Float32::Time; +SELECT -360000::Float32::Time; + +-- Float64 +SELECT 0::Float64::Time; +SELECT 12::Float64::Time; +SELECT 3600::Float64::Time; +SELECT 360000::Float64::Time; +SELECT -0::Float64::Time; +SELECT -12::Float64::Time; +SELECT -3600::Float64::Time; +SELECT -360000::Float64::Time; + +-- Conversion from numeric to Time64 +-- Int, using toTime64 +SELECT toTime64(0, 0); +SELECT toTime64(12, 0); +SELECT toTime64(3600, 0); +SELECT toTime64(360000, 0); +SELECT toTime64(-12, 0); +SELECT toTime64(-3600, 0); +SELECT toTime64(-360000, 0); + +-- Float, using toTime64 +SELECT toTime64(0.1, 2); +SELECT toTime64(12.1, 2); +SELECT toTime64(3600.1, 2); +SELECT toTime64(360000.1, 2); +SELECT toTime64(-12.1, 2); +SELECT toTime64(-3600.1, 2); +SELECT toTime64(-360000.1, 2); + +-- UInt16 +SELECT 0::UInt16::Time64; +SELECT 12::UInt16::Time64; +SELECT 3600::UInt16::Time64; + +-- UInt32 +SELECT 0::UInt32::Time64; +SELECT 12::UInt32::Time64; +SELECT 3600::UInt32::Time64; +SELECT 360000::UInt32::Time64; + +-- UInt64 +SELECT 0::UInt64::Time64; +SELECT 12::UInt64::Time64; +SELECT 3600::UInt64::Time64; +SELECT 360000::UInt64::Time64; + +-- Int16 +SELECT 0::Int16::Time64; +SELECT 12::Int16::Time64; +SELECT 3600::Int16::Time64; +SELECT -0::Int16::Time64; +SELECT -12::Int16::Time64; +SELECT -3600::Int16::Time64; + +-- Int32 +SELECT 0::Int32::Time64; +SELECT 12::Int32::Time64; +SELECT 3600::Int32::Time64; +SELECT 360000::Int32::Time64; +SELECT -0::Int32::Time64; +SELECT -12::Int32::Time64; +SELECT -3600::Int32::Time64; +SELECT -360000::Int32::Time64; + +-- Int64 +SELECT 0::Int64::Time64; +SELECT 12::Int64::Time64; +SELECT 3600::Int64::Time64; +SELECT 360000::Int64::Time64; +SELECT 999999999::Int64::Time64; +SELECT -0::Int64::Time64; +SELECT -12::Int64::Time64; +SELECT -3600::Int64::Time64; +SELECT -360000::Int64::Time64; + +-- Float32 +SELECT 0::Float32::Time64; +SELECT 12::Float32::Time64; +SELECT 3600::Float32::Time64; +SELECT 360000::Float32::Time64; +SELECT -0::Float32::Time64; +SELECT -12::Float32::Time64; +SELECT -3600::Float32::Time64; +SELECT -360000::Float32::Time64; + +-- Float64 +SELECT 0::Float64::Time64; +SELECT 12::Float64::Time64; +SELECT 3600::Float64::Time64; +SELECT 360000::Float64::Time64; +SELECT -0::Float64::Time64; +SELECT -12::Float64::Time64; +SELECT -3600::Float64::Time64; +SELECT -360000::Float64::Time64; + +-- Conversion from DateTime to Time +SELECT toTime(toDateTime('2022-01-01 12:12:12')); +SELECT toTime(toDateTime('1970-01-01 12:12:12')); +SELECT toTime(toDateTime('2022-01-01 23:99:12')); +SELECT toTime(toDateTime('1970-01-01 23:99:12')); +SELECT toTime(toDateTime('2022-01-01 00:99:12')); +SELECT toTime(toDateTime('1970-01-01 00:99:12')); + +-- Conversion from Time to DateTime +SELECT toDateTime(toTime('12:12:12')); +SELECT toDateTime(toTime('23:99:12')); +SELECT toDateTime(toTime('00:99:12')); +SELECT toDateTime(toTime('100:99:12')); +SELECT toDateTime(toTime('999:59:12')); + +-- Conversion from DateTime64 to Time +SELECT toTime(toDateTime64('2022-01-01 12:12:12.123', 2)); +SELECT toTime(toDateTime64('1970-01-01 12:12:12.123', 2)); +SELECT toTime(toDateTime64('2022-01-01 23:99:12.123', 2)); +SELECT toTime(toDateTime64('1970-01-01 23:99:12.123', 2)); +SELECT toTime(toDateTime64('2022-01-01 00:99:12.123', 2)); +SELECT toTime(toDateTime64('1970-01-01 00:99:12.123', 2)); + +-- Conversion from DateTime to Time64 +SELECT toTime64(toDateTime('2022-01-01 12:12:12'), 2); +SELECT toTime64(toDateTime('1970-01-01 12:12:12'), 2); +SELECT toTime64(toDateTime('2022-01-01 23:99:12'), 2); +SELECT toTime64(toDateTime('1970-01-01 23:99:12'), 2); +SELECT toTime64(toDateTime('2022-01-01 00:99:12'), 2); +SELECT toTime64(toDateTime('1970-01-01 00:99:12'), 2); + +-- Conversion from Time64 to DateTime64 +SELECT toDateTime64(toTime64('12:12:12', 2), 2); +SELECT toDateTime64(toTime64('23:99:12', 2), 2); +SELECT toDateTime64(toTime64('00:99:12', 2), 2); +SELECT toDateTime64(toTime64('100:99:12', 2), 2); +SELECT toDateTime64(toTime64('999:59:12', 2), 2); + +-- Conversion from DateTime64 to Time64 +SELECT toTime64(toDateTime64('2022-01-01 12:12:12', 2), 2); +SELECT toTime64(toDateTime64('1970-01-01 12:12:12', 2), 2); +SELECT toTime64(toDateTime64('2022-01-01 23:99:12', 2), 2); +SELECT toTime64(toDateTime64('1970-01-01 23:99:12', 2), 2); +SELECT toTime64(toDateTime64('2022-01-01 00:99:12', 2), 2); +SELECT toTime64(toDateTime64('1970-01-01 00:99:12', 2), 2); + +-- Conversion from Date to Time +SELECT toTime(toDate('2022-01-01')); +SELECT toTime(toDate('1970-01-01')); +SELECT toTime(toDate('2022-01-01')); +SELECT toTime(toDate('1970-01-01')); +SELECT toTime(toDate('2022-01-01')); +SELECT toTime(toDate('1970-01-01')); + +-- Conversion from Date to Time64 +SELECT toTime64(toDate('2022-01-01'), 2); +SELECT toTime64(toDate('1970-01-01'), 2); +SELECT toTime64(toDate('2022-01-01'), 2); +SELECT toTime64(toDate('1970-01-01'), 2); +SELECT toTime64(toDate('2022-01-01'), 2); +SELECT toTime64(toDate('1970-01-01'), 2); + +-- Conversion from Time to Date +SELECT toDate(toTime('00:99:12')); +SELECT toDate(toTime('23:99:12')); +SELECT toDate(toTime('999:99:99')); + +-- Conversion from Time to Date32 +SELECT toDate32(toTime('00:99:12')); +SELECT toDate32(toTime('23:99:12')); +SELECT toDate32(toTime('999:99:99')); + +-- Conversion from Time64 to Date32 (result is always zero) +SELECT toDate32(toTime64('00:99:12', 2)); +SELECT toDate32(toTime64('23:99:12', 2)); +SELECT toDate32(toTime64('999:99:99', 2)); + + +-- Conversion from Time64 to Date (result is always zero) +SELECT toDate(toTime64('00:99:12', 2)); +SELECT toDate(toTime64('23:99:12', 2)); +SELECT toDate(toTime64('999:99:99', 2)); diff --git a/parser/testdata/03365_time_time64_extreme_values/ast.json b/parser/testdata/03365_time_time64_extreme_values/ast.json new file mode 100644 index 000000000..50f40cadc --- /dev/null +++ b/parser/testdata/03365_time_time64_extreme_values/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001602873, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03365_time_time64_extreme_values/metadata.json b/parser/testdata/03365_time_time64_extreme_values/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03365_time_time64_extreme_values/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03365_time_time64_extreme_values/query.sql b/parser/testdata/03365_time_time64_extreme_values/query.sql new file mode 100644 index 000000000..237db9a2c --- /dev/null +++ b/parser/testdata/03365_time_time64_extreme_values/query.sql @@ -0,0 +1,18 @@ +SET use_legacy_to_time = 0; + +-- Within the acceptable range +SELECT toTime('999:59:59'); +SELECT toTime64('999:59:59.999999999', 9); +SELECT toTime64('999:59:59.9999999999999', 9); +SELECT toTime('-999:59:59'); +SELECT toTime64('-999:59:59.999999999', 9); +SELECT toTime64('-999:59:59.9999999999999', 9); +SELECT toTime('0:00:00'); +SELECT toTime64('0:00:00.0', 9); +SELECT toTime64('0:00:00.0', 9); +SELECT toTime('-0:00:00'); +SELECT toTime64('-0:00:00.0', 9); +SELECT toTime64('-0:00:00.0', 9); + +-- Exceeds the acceptable range +SELECT toTime('999:99:99'); diff --git a/parser/testdata/03365_time_time64_insertion_bug/ast.json b/parser/testdata/03365_time_time64_insertion_bug/ast.json new file mode 100644 index 000000000..671ad1a00 --- /dev/null +++ b/parser/testdata/03365_time_time64_insertion_bug/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001363787, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03365_time_time64_insertion_bug/metadata.json b/parser/testdata/03365_time_time64_insertion_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03365_time_time64_insertion_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03365_time_time64_insertion_bug/query.sql b/parser/testdata/03365_time_time64_insertion_bug/query.sql new file mode 100644 index 000000000..8ab81fc0e --- /dev/null +++ b/parser/testdata/03365_time_time64_insertion_bug/query.sql @@ -0,0 +1,6 @@ +SET enable_time_time64_type=1; + +CREATE TABLE IF NOT EXISTS t0 (c0 Time64) ENGINE = Memory; +INSERT INTO TABLE t0 (c0) FORMAT VALUES ('😂'); -- { error CANNOT_PARSE_DATETIME } + +DROP TABLE t0; diff --git a/parser/testdata/03365_time_time64_operations/ast.json b/parser/testdata/03365_time_time64_operations/ast.json new file mode 100644 index 000000000..1f88b33be --- /dev/null +++ b/parser/testdata/03365_time_time64_operations/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001604505, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03365_time_time64_operations/metadata.json b/parser/testdata/03365_time_time64_operations/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03365_time_time64_operations/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03365_time_time64_operations/query.sql b/parser/testdata/03365_time_time64_operations/query.sql new file mode 100644 index 000000000..7abb6447b --- /dev/null +++ b/parser/testdata/03365_time_time64_operations/query.sql @@ -0,0 +1,85 @@ +SET session_timezone = 'UTC'; +SET use_legacy_to_time = 0; +-- Operations <Time> + <number> +SELECT toTime(12) + 1; +SELECT toTime(12) + 25; +SELECT toTime(12) + 1.1; +SELECT toTime(12) + 25.2; +-- Operations <Time> - <number> +SELECT toTime(12) - 1; +SELECT toTime(12) - 25; +SELECT toTime(12) - 1.1; +SELECT toTime(12) - 25.2; +-- Operations <Time> + <INTERVAL> +SELECT toTime(12) + INTERVAL 1 NANOSECOND; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toTime(12) + INTERVAL 1 MICROSECOND; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toTime(12) + INTERVAL 1 MILLISECOND; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toTime(12) + INTERVAL 1 SECOND; +SELECT toTime(12) + INTERVAL 1 MINUTE; +SELECT toTime(12) + INTERVAL 1 HOUR; +SELECT toTime(12) + INTERVAL 1 DAY; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toTime(12) + INTERVAL 1 WEEK; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toTime(12) + INTERVAL 1 MONTH; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toTime(12) + INTERVAL 1 QUARTER; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toTime(12) + INTERVAL 1 YEAR; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +-- Operations <Time> - <INTERVAL> +SELECT toTime(12) - INTERVAL 1 NANOSECOND; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toTime(12) - INTERVAL 1 MICROSECOND; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toTime(12) - INTERVAL 1 MILLISECOND; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toTime(12) - INTERVAL 1 SECOND; +SELECT toTime(72) - INTERVAL 1 MINUTE; +SELECT toTime(3610) - INTERVAL 1 HOUR; +SELECT toTime(12) - INTERVAL 1 MINUTE; +SELECT toTime(12) - INTERVAL 1 HOUR; +SELECT toTime(12) - INTERVAL 1 DAY; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toTime(12) - INTERVAL 1 WEEK; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toTime(12) - INTERVAL 1 MONTH; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toTime(12) - INTERVAL 1 QUARTER; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toTime(12) - INTERVAL 1 YEAR; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +-- Operations <Time> % <number> +SELECT toTime(12) % 1; +SELECT toTime(12) % 7; +SELECT toTime(12) % 25; +SELECT toTime(12) % 7.1; +SELECT toTime(12) % 25.2; +-- Operations <Time> - <Time> +SELECT toTime(12) - toTime(11); +SELECT toTime(12) - toTime(2); +SELECT toTime(12) - toTime(20); + +-- Operations <Time64> + <number> +SELECT toTime64(12, 2) + 1; +SELECT toTime64(12, 2) + 25; +SELECT toTime64(12, 2) + 1.1; +SELECT toTime64(12, 2) + 25.2; +-- Operations <Time64> - <number> +SELECT toTime64(12, 2) - 1; +SELECT toTime64(12, 2) - 25; +SELECT toTime64(12, 2) - 1.1; +SELECT toTime64(12, 2) - 25.2; +-- Operations <Time64> + <INTERVAL> +SELECT toTime64(12, 2) + INTERVAL 1 NANOSECOND; +SELECT toTime64(12, 2) + INTERVAL 1 MICROSECOND; +SELECT toTime64(12, 2) + INTERVAL 1 MILLISECOND; +SELECT toTime64(12, 2) + INTERVAL 1 SECOND; +SELECT toTime64(12, 2) + INTERVAL 1 MINUTE; +SELECT toTime64(12, 2) + INTERVAL 1 HOUR; +SELECT toTime64(12, 2) + INTERVAL 1 DAY; +SELECT toTime64(12, 2) + INTERVAL 1 WEEK; +SELECT toTime64(12, 2) + INTERVAL 1 MONTH; +SELECT toTime64(12, 2) + INTERVAL 1 QUARTER; +-- Operations <Time64> - <INTERVAL> +SELECT toTime64(12, 2) - INTERVAL 1 NANOSECOND; +SELECT toTime64(12, 2) - INTERVAL 1 MICROSECOND; +SELECT toTime64(12, 2) - INTERVAL 1 MILLISECOND; +SELECT toTime64(12, 2) - INTERVAL 1 SECOND; +SELECT toTime64(72, 2) - INTERVAL 1 MINUTE; +SELECT toTime64(3610, 2) - INTERVAL 1 HOUR; +SELECT toTime64(86412, 2) - INTERVAL 1 DAY; +SELECT toTime64(604812, 2) - INTERVAL 1 WEEK; +SELECT toTime64(12, 2) - INTERVAL 1 MONTH + INTERVAL 1 MONTH; +SELECT toTime64(12, 2) - INTERVAL 1 QUARTER + INTERVAL 1 QUARTER; +-- Operations <Time64> - <Time64> +SELECT toTime64(12, 2) - toTime64(12, 2); +SELECT toTime64(12, 2) - toTime64(10.13, 2); +SELECT toTime64(12, 2) - toTime64(20.13, 2); diff --git a/parser/testdata/03365_time_time64_parsing/ast.json b/parser/testdata/03365_time_time64_parsing/ast.json new file mode 100644 index 000000000..18e4693bc --- /dev/null +++ b/parser/testdata/03365_time_time64_parsing/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001505039, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03365_time_time64_parsing/metadata.json b/parser/testdata/03365_time_time64_parsing/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03365_time_time64_parsing/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03365_time_time64_parsing/query.sql b/parser/testdata/03365_time_time64_parsing/query.sql new file mode 100644 index 000000000..c113b470b --- /dev/null +++ b/parser/testdata/03365_time_time64_parsing/query.sql @@ -0,0 +1,242 @@ +SET use_legacy_to_time = 0; + +-- Time with three-digit hours +SELECT toTime('000:00:01'); +SELECT toTime('001:01:01'); +SELECT toTime('100:01:01'); +SELECT toTime('999:01:01'); +SELECT toTime('999:59:59'); +-- Time with two-digit hours +SELECT toTime('00:00:01'); +SELECT toTime('01:01:01'); +SELECT toTime('10:01:01'); +SELECT toTime('99:01:01'); +SELECT toTime('99:59:59'); +-- Time with one-digit hours +SELECT toTime('0:00:01'); +SELECT toTime('1:01:01'); +SELECT toTime('0:01:01'); +SELECT toTime('9:01:01'); +SELECT toTime('9:99:99'); +-- Negative Time with three-digit hours +SELECT toTime('-000:00:01'); +SELECT toTime('-001:01:01'); +SELECT toTime('-100:01:01'); +SELECT toTime('-999:01:01'); +SELECT toTime('-999:59:59'); +-- Negative Time with two-digit hours +SELECT toTime('-00:00:01'); +SELECT toTime('-01:01:01'); +SELECT toTime('-10:01:01'); +SELECT toTime('-99:01:01'); +SELECT toTime('-99:99:99'); +-- Negative Time with one-digit hours +SELECT toTime('-0:00:01'); +SELECT toTime('-1:01:01'); +SELECT toTime('-0:01:01'); +SELECT toTime('-9:01:01'); +SELECT toTime('-9:59:59'); + +-- Testing Time with minute/second part bigger than 59. +SELECT toTime('-9:99:99'); +SELECT toTime('9:99:99'); + +-- NO FRACTIONAL PART (with trailing last digit) +-- Time64 with three-digit hours +SELECT toTime64('000:00:01', 0); +SELECT toTime64('001:01:01.1', 0); +SELECT toTime64('100:01:01', 0); +SELECT toTime64('999:01:01.1', 0); +SELECT toTime64('999:59:59', 0); +-- Time64 with two-digit hours +SELECT toTime64('00:00:01.1', 0); +SELECT toTime64('01:01:01', 0); +SELECT toTime64('10:01:01.1', 0); +SELECT toTime64('99:01:01', 0); +SELECT toTime64('99:59:59.1', 0); +-- Time64 with one-digit hours +SELECT toTime64('0:00:01', 0); +SELECT toTime64('1:01:01.1', 0); +SELECT toTime64('0:01:01', 0); +SELECT toTime64('9:01:01.1', 0); +SELECT toTime64('9:59:59', 0); +-- Negative Time64 with three-digit hours +SELECT toTime64('-000:00:01.1', 0); +SELECT toTime64('-001:01:01', 0); +SELECT toTime64('-100:01:01.1', 0); +SELECT toTime64('-999:01:01', 0); +SELECT toTime64('-999:59:59.1', 0); +-- Negative Time64 with two-digit hours +SELECT toTime64('-00:00:01', 0); +SELECT toTime64('-01:01:01.1', 0); +SELECT toTime64('-10:01:01', 0); +SELECT toTime64('-99:01:01.1', 0); +SELECT toTime64('-99:59:59', 0); +-- Negative Time64 with one-digit hours +SELECT toTime64('-0:00:01.1', 0); +SELECT toTime64('-1:01:01', 0); +SELECT toTime64('-0:01:01.1', 0); +SELECT toTime64('-9:01:01', 0); +SELECT toTime64('-9:59:59.1', 0); + +-- FRACTIONAL PART WITH SIZE 3 +-- Time64 with three-digit hours +SELECT toTime64('000:00:01.123', 3); +SELECT toTime64('001:01:01.1234', 3); +SELECT toTime64('100:01:01.123', 3); +SELECT toTime64('999:01:01.1234', 3); +SELECT toTime64('999:59:59.123', 3); +-- Time64 with two-digit hours +SELECT toTime64('00:00:01.123', 3); +SELECT toTime64('01:01:01.1234', 3); +SELECT toTime64('10:01:01.123', 3); +SELECT toTime64('99:01:01.1234', 3); +SELECT toTime64('99:59:59.123', 3); +-- Time64 with one-digit hours +SELECT toTime64('0:00:01.123', 3); +SELECT toTime64('1:01:01.1234', 3); +SELECT toTime64('0:01:01.123', 3); +SELECT toTime64('9:01:01.1234', 3); +SELECT toTime64('9:59:59.123', 3); +-- Negative Time64 with three-digit hours +SELECT toTime64('-000:00:01.123', 3); +SELECT toTime64('-001:01:01.1234', 3); +SELECT toTime64('-100:01:01.123', 3); +SELECT toTime64('-999:01:01.1234', 3); +SELECT toTime64('-999:59:59.123', 3); +-- Negative Time64 with two-digit hours +SELECT toTime64('-00:00:01.123', 3); +SELECT toTime64('-01:01:01.1234', 3); +SELECT toTime64('-10:01:01.123', 3); +SELECT toTime64('-99:01:01.1234', 3); +SELECT toTime64('-99:59:59.123', 3); +-- Negative Time64 with one-digit hours +SELECT toTime64('-0:00:01.123', 3); +SELECT toTime64('-1:01:01.1234', 3); +SELECT toTime64('-0:01:01.123', 3); +SELECT toTime64('-9:01:01.1234', 3); +SELECT toTime64('-9:59:59.123', 3); + +-- FRACTIONAL PART WITH SIZE 6 +-- Time64 with three-digit hours +SELECT toTime64('000:00:01.123456', 6); +SELECT toTime64('001:01:01.1234567', 6); +SELECT toTime64('100:01:01.123456', 6); +SELECT toTime64('999:01:01.1234567', 6); +SELECT toTime64('999:59:59.123456', 6); +-- Time64 with two-digit hours +SELECT toTime64('00:00:01.1234567', 6); +SELECT toTime64('01:01:01.123456', 6); +SELECT toTime64('10:01:01.1234567', 6); +SELECT toTime64('99:01:01.123456', 6); +SELECT toTime64('99:59:59.1234567', 6); +-- Time64 with one-digit hours +SELECT toTime64('0:00:01.123456', 6); +SELECT toTime64('1:01:01.1234567', 6); +SELECT toTime64('0:01:01.123456', 6); +SELECT toTime64('9:01:01.1234567', 6); +SELECT toTime64('9:59:59.123456', 6); +-- Negative Time64 with three-digit hours +SELECT toTime64('-000:00:01.1234567', 6); +SELECT toTime64('-001:01:01.123456', 6); +SELECT toTime64('-100:01:01.1234567', 6); +SELECT toTime64('-999:01:01.123456', 6); +SELECT toTime64('-999:59:59.1234567', 6); +-- Negative Time64 with two-digit hours +SELECT toTime64('-00:00:01.123456', 6); +SELECT toTime64('-01:01:01.1234567', 6); +SELECT toTime64('-10:01:01.123456', 6); +SELECT toTime64('-99:01:01.1234567', 6); +SELECT toTime64('-99:59:59.123456', 6); +-- Negative Time64 with one-digit hours +SELECT toTime64('-0:00:01.1234567', 6); +SELECT toTime64('-1:01:01.123456', 6); +SELECT toTime64('-0:01:01.1234567', 6); +SELECT toTime64('-9:01:01.123456', 6); +SELECT toTime64('-9:59:59.1234567', 6); + +-- FRACTIONAL PART WITH SIZE 7 +-- Time64 with three-digit hours +SELECT toTime64('000:00:01.1234567', 7); +SELECT toTime64('001:01:01.12345678', 7); +SELECT toTime64('100:01:01.1234567', 7); +SELECT toTime64('999:01:01.12345678', 7); +SELECT toTime64('999:59:59.1234567', 7); +-- Time64 with two-digit hours +SELECT toTime64('00:00:01.12345678', 7); +SELECT toTime64('01:01:01.1234567', 7); +SELECT toTime64('10:01:01.12345678', 7); +SELECT toTime64('99:01:01.1234567', 7); +SELECT toTime64('99:59:59.12345678', 7); +-- Time64 with one-digit hours +SELECT toTime64('0:00:01.1234567', 7); +SELECT toTime64('1:01:01.12345678', 7); +SELECT toTime64('0:01:01.1234567', 7); +SELECT toTime64('9:01:01.12345678', 7); +SELECT toTime64('9:59:59.1234567', 7); +-- Negative Time64 with three-digit hours +SELECT toTime64('-000:00:01.12345678', 7); +SELECT toTime64('-001:01:01.1234567', 7); +SELECT toTime64('-100:01:01.12345678', 7); +SELECT toTime64('-999:01:01.1234567', 7); +SELECT toTime64('-999:59:59.12345678', 7); +-- Negative Time64 with two-digit hours +SELECT toTime64('-00:00:01.1234567', 7); +SELECT toTime64('-01:01:01.12345678', 7); +SELECT toTime64('-10:01:01.1234567', 7); +SELECT toTime64('-99:01:01.12345678', 7); +SELECT toTime64('-99:59:59.1234567', 7); +-- Negative Time64 with one-digit hours +SELECT toTime64('-0:00:01.12345678', 7); +SELECT toTime64('-1:01:01.1234567', 7); +SELECT toTime64('-0:01:01.12345678', 7); +SELECT toTime64('-9:01:01.1234567', 7); +SELECT toTime64('-9:59:59.12345678', 7); + +-- FRACTIONAL PART WITH SIZE 9 +-- Time64 with three-digit hours +SELECT toTime64('000:00:01.1234567891', 9); +SELECT toTime64('001:01:01.123456789', 9); +SELECT toTime64('100:01:01.1234567891', 9); +SELECT toTime64('999:01:01.123456789', 9); +SELECT toTime64('999:59:59.1234567891', 9); +-- Time64 with two-digit hours +SELECT toTime64('00:00:01.123456789', 9); +SELECT toTime64('01:01:01.1234567891', 9); +SELECT toTime64('10:01:01.123456789', 9); +SELECT toTime64('99:01:01.1234567891', 9); +SELECT toTime64('99:59:59.123456789', 9); +-- Time64 with one-digit hours +SELECT toTime64('0:00:01.1234567891', 9); +SELECT toTime64('1:01:01.123456789', 9); +SELECT toTime64('0:01:01.1234567891', 9); +SELECT toTime64('9:01:01.123456789', 9); +SELECT toTime64('9:59:59.1234567891', 9); +-- Negative Time64 with three-digit hours +SELECT toTime64('-000:00:01.123456789', 9); +SELECT toTime64('-001:01:01.1234567891', 9); +SELECT toTime64('-100:01:01.123456789', 9); +SELECT toTime64('-999:01:01.1234567891', 9); +SELECT toTime64('-999:59:59.123456789', 9); +-- Negative Time64 with two-digit hours +SELECT toTime64('-00:00:01.1234567891', 9); +SELECT toTime64('-01:01:01.123456789', 9); +SELECT toTime64('-10:01:01.1234567891', 9); +SELECT toTime64('-99:01:01.123456789', 9); +SELECT toTime64('-99:59:59.1234567891', 9); +-- Negative Time64 with one-digit hours +SELECT toTime64('-0:00:01.123456789', 9); +SELECT toTime64('-1:01:01.1234567891', 9); +SELECT toTime64('-0:01:01.123456789', 9); +SELECT toTime64('-9:01:01.1234567891', 9); +SELECT toTime64('-9:59:59.123456789', 9); + +-- Testing Time64 with minute/second part bigger than 59. +SELECT toTime64('-9:99:99', 0); +SELECT toTime64('9:99:99', 0); +SELECT toTime64('-9:99:99.1234', 3); +SELECT toTime64('9:99:99.1234', 3); + +-- Testing cases where error excected +SELECT toTime('a'); -- { serverError CANNOT_PARSE_DATETIME } +SELECT toTime64('a', 0); -- { serverError CANNOT_PARSE_DATETIME } diff --git a/parser/testdata/03365_time_time64_supertype/ast.json b/parser/testdata/03365_time_time64_supertype/ast.json new file mode 100644 index 000000000..699d1ef62 --- /dev/null +++ b/parser/testdata/03365_time_time64_supertype/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001106848, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03365_time_time64_supertype/metadata.json b/parser/testdata/03365_time_time64_supertype/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03365_time_time64_supertype/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03365_time_time64_supertype/query.sql b/parser/testdata/03365_time_time64_supertype/query.sql new file mode 100644 index 000000000..3c2627ea1 --- /dev/null +++ b/parser/testdata/03365_time_time64_supertype/query.sql @@ -0,0 +1,54 @@ +SET use_legacy_to_time=0; +SET enable_time_time64_type=1; +SET enable_analyzer=1; + +-- Time + Time -> Time +SELECT toTypeName([toTime('00:00:00'), toTime('00:00:01')]); + +-- Time + Time64(0) -> Time64(0) +SELECT toTypeName([toTime('00:00:00'), toTime64('00:00:01', 0)]); + +-- Time + Time64(3) -> Time64(3) +SELECT toTypeName([toTime('00:00:00'), toTime64('00:00:01', 3)]); + +-- Time64(1) + Time64(3) -> Time64(3) +SELECT toTypeName([toTime64('00:00:00.1', 1), toTime64('00:00:00.123', 3)]); + +-- Time64(6) + Time64(3) -> Time64(6) +SELECT toTypeName([toTime64('00:00:00.010000', 6), toTime64('00:00:00.123', 3)]); + +-- Nullable with Time -> Nullable(Time) +SELECT toTypeName([NULL, toTime('00:00:00')]); + +-- Nullable with Time64(6) -> Nullable(Time64(6)) +SELECT toTypeName([NULL, toTime64('00:00:00', 6)]); + +-- Mixing Nullable(Time) and Time64(6) -> Nullable(Time64(6)) +SELECT toTypeName([toTime('00:00:00'), NULL, toTime64('00:00:00', 6)]); + +-- IF(Time, Time64(6)) -> Time64(6) +SELECT toTypeName(if(1, toTime('00:00:00'), toTime64('00:00:00', 6))); + +-- IF(Time64(3), Time64(6)) -> Time64(6) +SELECT toTypeName(if(0, toTime64('00:00:00', 3), toTime64('00:00:00', 6))); + +-- multiIf with mixture -> Time64(6) +SELECT toTypeName(multiIf(1, toTime64('00:00:00', 3), 0, toTime64('00:00:00', 6), toTime('00:00:00'))); + +-- UNION ALL unifies to max scale (Time + Time64(6) -> Time64(6)); use LIMIT 1 to avoid duplicate lines +SELECT toTypeName(x) FROM +( + SELECT toTime('00:00:00') AS x + UNION ALL + SELECT toTime64('00:00:00', 6) +) +LIMIT 1; + +-- Time with Int -> NO_COMMON_TYPE +SELECT toTypeName([toTime('00:00:00'), 1]); -- { serverError NO_COMMON_TYPE } + +-- Time64 with Date -> NO_COMMON_TYPE +SELECT toTypeName([toTime64('00:00:00', 3), toDate('2020-01-01')]); -- { serverError NO_COMMON_TYPE } + +-- Time64 with DateTime -> NO_COMMON_TYPE +SELECT toTypeName([toTime64('00:00:00', 3), toDateTime('2020-01-01 00:00:00')]); -- { serverError NO_COMMON_TYPE } diff --git a/parser/testdata/03365_time_time64_without_timezone/ast.json b/parser/testdata/03365_time_time64_without_timezone/ast.json new file mode 100644 index 000000000..a876e78a9 --- /dev/null +++ b/parser/testdata/03365_time_time64_without_timezone/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001431798, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03365_time_time64_without_timezone/metadata.json b/parser/testdata/03365_time_time64_without_timezone/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03365_time_time64_without_timezone/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03365_time_time64_without_timezone/query.sql b/parser/testdata/03365_time_time64_without_timezone/query.sql new file mode 100644 index 000000000..6e8963d35 --- /dev/null +++ b/parser/testdata/03365_time_time64_without_timezone/query.sql @@ -0,0 +1,42 @@ +SET use_legacy_to_time=0; +-- 1. Test toTime with various types of inputs and timezone parameter +SELECT toTime('12:34:56', 'UTC'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT toTime(toDateTime('2023-01-01 12:34:56'), 'UTC'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT toTime(toDateTime64('2023-01-01 12:34:56.789', 3), 'UTC'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT toTime(45296, 'UTC'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT toTime(now(), 'UTC'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +-- 2. Test toTime64 with various types of inputs and timezone parameter +SELECT toTime64('12:34:56.789', 3, 'UTC'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT toTime64(toDateTime('2023-01-01 12:34:56'), 3, 'UTC'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT toTime64(toDateTime64('2023-01-01 12:34:56.789', 3), 3, 'UTC'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT toTime64(45296789, 3, 'UTC'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT toTime64(now(), 3, 'UTC'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +-- 3. Test OrNull variants with timezone parameter +SELECT toTimeOrNull('12:34:56', 'UTC'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT toTime64OrNull('12:34:56.789', 3, 'UTC'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +-- 4. Test OrZero variants with timezone parameter +SELECT toTimeOrZero('12:34:56', 'UTC'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT toTime64OrZero('12:34:56.789', 3, 'UTC'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +-- 5. Test array inputs with timezone parameter +SELECT toTime(['12:34:56', '01:23:45'], 'UTC'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT toTime64(['12:34:56.789', '01:23:45.678'], 3, 'UTC'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +-- 6. Test with different timezone formats to ensure all are rejected +SELECT toTime('12:34:56', 'Europe/Moscow'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT toTime64('12:34:56.789', 3, 'Europe/Moscow'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT toTime('12:34:56', '+03:00'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT toTime64('12:34:56.789', 3, '+03:00'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +-- 7. Test for a table creation where columns are Time[64] with timezone +DROP TABLE IF EXISTS test_time; +DROP TABLE IF EXISTS test_time64; +CREATE TABLE test_time (t Time('UTC')) engine=MergeTree; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +CREATE TABLE test_time64 (t Time64(3, 'UTC')) engine=MergeTree; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +-- Clean up +DROP TABLE IF EXISTS test_time; +DROP TABLE IF EXISTS test_time64; diff --git a/parser/testdata/03365_time_timezone_issue/ast.json b/parser/testdata/03365_time_timezone_issue/ast.json new file mode 100644 index 000000000..70480d723 --- /dev/null +++ b/parser/testdata/03365_time_timezone_issue/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001508372, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03365_time_timezone_issue/metadata.json b/parser/testdata/03365_time_timezone_issue/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03365_time_timezone_issue/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03365_time_timezone_issue/query.sql b/parser/testdata/03365_time_timezone_issue/query.sql new file mode 100644 index 000000000..d4d55334c --- /dev/null +++ b/parser/testdata/03365_time_timezone_issue/query.sql @@ -0,0 +1,15 @@ +SET enable_time_time64_type=1; + +CREATE TEMPORARY TABLE test ( + t1 Time, + t2 Time64(3), + t3 DateTime64(3) +); + +SET session_timezone='Europe/Amsterdam'; + +INSERT INTO test VALUES (36610, 36610.111::Decimal32(3), 33010.111::Decimal32(3)); + +SELECT t1, t2, t3 FROM test; + +DROP TABLE test; diff --git a/parser/testdata/03365_time_to_time64_conv_bug/ast.json b/parser/testdata/03365_time_to_time64_conv_bug/ast.json new file mode 100644 index 000000000..bce8ef2e8 --- /dev/null +++ b/parser/testdata/03365_time_to_time64_conv_bug/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001344147, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03365_time_to_time64_conv_bug/metadata.json b/parser/testdata/03365_time_to_time64_conv_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03365_time_to_time64_conv_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03365_time_to_time64_conv_bug/query.sql b/parser/testdata/03365_time_to_time64_conv_bug/query.sql new file mode 100644 index 000000000..8406f1fda --- /dev/null +++ b/parser/testdata/03365_time_to_time64_conv_bug/query.sql @@ -0,0 +1,4 @@ +SET enable_time_time64_type = 1; +SELECT toDateTime64('2025-11-18 20:25:52', 3)::Time; +SELECT toDateTime64('2025-11-18 20:25:52', 3)::Time64; +SELECT (toDateTime64('2025-11-18 20:25:52', 3)::Time)::Time64; diff --git a/parser/testdata/03365_use_legacy_to_time/ast.json b/parser/testdata/03365_use_legacy_to_time/ast.json new file mode 100644 index 000000000..28b45eb7d --- /dev/null +++ b/parser/testdata/03365_use_legacy_to_time/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001258972, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03365_use_legacy_to_time/metadata.json b/parser/testdata/03365_use_legacy_to_time/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03365_use_legacy_to_time/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03365_use_legacy_to_time/query.sql b/parser/testdata/03365_use_legacy_to_time/query.sql new file mode 100644 index 000000000..2b7ab8373 --- /dev/null +++ b/parser/testdata/03365_use_legacy_to_time/query.sql @@ -0,0 +1,10 @@ +SET session_timezone = 'UTC'; +SET use_legacy_to_time = 0; +SELECT 'Time Type Result'; +WITH toTime(toDateTime(12)) AS a +SELECT toTypeName(a), a; + +SET use_legacy_to_time = 1; +SELECT 'DateTime Type Result'; +WITH toTime(toDateTime(12)) AS a +SELECT toTypeName(a), a; diff --git a/parser/testdata/03365_variant_bool_parsing/ast.json b/parser/testdata/03365_variant_bool_parsing/ast.json new file mode 100644 index 000000000..8da0c9678 --- /dev/null +++ b/parser/testdata/03365_variant_bool_parsing/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001180794, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03365_variant_bool_parsing/metadata.json b/parser/testdata/03365_variant_bool_parsing/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03365_variant_bool_parsing/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03365_variant_bool_parsing/query.sql b/parser/testdata/03365_variant_bool_parsing/query.sql new file mode 100644 index 000000000..b501327e5 --- /dev/null +++ b/parser/testdata/03365_variant_bool_parsing/query.sql @@ -0,0 +1,35 @@ +set enable_variant_type=1; + +select 't'::Variant(String, Bool) as v, variantType(v); +select 'on'::Variant(String, Bool) as v, variantType(v); +select 'f'::Variant(String, Bool) as v, variantType(v); +select 'off'::Variant(String, Bool) as v, variantType(v); +select 'true'::Variant(String, Bool) as v, variantType(v); +select 'false'::Variant(String, Bool) as v, variantType(v); + +set allow_special_bool_values_inside_variant=1; +select 't'::Variant(String, Bool) as v, variantType(v); +select 'on'::Variant(String, Bool) as v, variantType(v); +select 'f'::Variant(String, Bool) as v, variantType(v); +select 'off'::Variant(String, Bool) as v, variantType(v); +select 'true'::Variant(String, Bool) as v, variantType(v); +select 'false'::Variant(String, Bool) as v, variantType(v); + +set allow_special_bool_values_inside_variant=0; +set cast_string_to_variant_use_inference=0; +select 't'::Variant(String, Bool) as v, variantType(v); +select 'on'::Variant(String, Bool) as v, variantType(v); +select 'f'::Variant(String, Bool) as v, variantType(v); +select 'off'::Variant(String, Bool) as v, variantType(v); +select 'true'::Variant(String, Bool) as v, variantType(v); +select 'false'::Variant(String, Bool) as v, variantType(v); + +set allow_special_bool_values_inside_variant=1; +select 't'::Variant(String, Bool) as v, variantType(v); +select 'on'::Variant(String, Bool) as v, variantType(v); +select 'f'::Variant(String, Bool) as v, variantType(v); +select 'off'::Variant(String, Bool) as v, variantType(v); +select 'true'::Variant(String, Bool) as v, variantType(v); +select 'false'::Variant(String, Bool) as v, variantType(v); + + diff --git a/parser/testdata/03366_bfloat16_sorting/ast.json b/parser/testdata/03366_bfloat16_sorting/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03366_bfloat16_sorting/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03366_bfloat16_sorting/metadata.json b/parser/testdata/03366_bfloat16_sorting/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03366_bfloat16_sorting/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03366_bfloat16_sorting/query.sql b/parser/testdata/03366_bfloat16_sorting/query.sql new file mode 100644 index 000000000..8e8a6082b --- /dev/null +++ b/parser/testdata/03366_bfloat16_sorting/query.sql @@ -0,0 +1,4 @@ +SELECT * FROM format(Values, 'x BFloat16', ' +(915309.3), (-172.4), (-1555612173), (-794649755), (9.0), (1853837358), (-752.038), (1396433067), (-807596209), (6980930564848204980), (inf), (-334802707), (-9208770324484017588), (1542974135), (8.65110), (3653113333946823132), (-0.7048), (-521.0), (8435282414075074417), (-6292128116417116397), (-1.1), (402684563), (7100378348544520322), (-1.758), (5866537631201593121), (1448199529), (-1012621), (-69567.11), (-976.8334), (-28.35), (-8547.0), (+0.0), (5106142703787698805), (8753460139368692361), (-962398.0), (-1191866042), (-3.53), (5736130821808181266), (1.5), (-2102861541), (-4318589268046737694), (+0.0), (8925059011873647780), (-156.896869), (-2882154502015236978), (-1149877585), (0.48367), (1135103111), (1842836061), (-0.02826), (-3726604190761547609), (-453901554.40), (2888527236479160511), (-7.454), (854116431), (6258347412277750223), (-1632245551464937874), (-3887.35), (8767.14), (-2026982035343500009), (10084.047), (-12.6150), (-4490844512257432684), (80.0), (4003882477879050470), (-8032628616449150130), (nan), (283.11), (-2040930083), (-1660845000), (-46.4), (2431480081340864910), (-8348560450377606243), (-8502763961981072530), (-15.67900), (230036155), (1942939659), (4794.81023), (-1639852492), (-1352376506), (-2.7), (-1.088), (-1286266918863494194), (-5474135460424898823), (-701987997), (-3677164403360004369), (7.9723935), (-6243785367315650126), (1131.4), (13.0), (88967630.99), (-4857582370574805786), (-4288683376502787204), (1299393795189677177), (827.0), (-779228479), (5278875253796893698), (-1.241), (-9.4), (8652067825981452928), (5.13222918), (-2739239481541406400), (-38.29), (-1557979152), (-457695388), (-1959634948), (-463871221), (-7491614107579678858), (7901991093387175754), (-749021875), (-563485266992292414), (7903784173186423016), (6415568457236688716), (0.0), (1041044414), (-inf), (-7.36), (1.834), (56.0), (-0.0), (nan), (1407263544961752054), (-2825675448399084472), (-704.2), (7438299440420973263), (2922890613588943306), (452195848), (-673820791), (9172.0), (-7847859370969306041), (1956615306), (144567.23), (-33.1076), (-53.91200), (-6.75), (-4448835675253045899), (2168295801156564119), (1655170692), (4316746107587705818), (-8269286236795419058), (1727302703), (-214033129), (-7.7), (nan), (-0.7), (+nan), (-4547328351920373605), (nan), (-1487104083), (1658546204), (1151636392), (416290247.08), (-4699301667687142318), (-1.7), (-6323293279463878865), (5855641835618078507), (6000839524613739767), (-1029895710511952591), (486712), (-3730616670168004577), (3010252056832300847), (-1929.84917), (-2253628015236776383), (-1355690956), (1.8), (1255622011), (-7633225657025354393), (-1809222556741732999), (8451912649540627017), (134.521), (-6312210627363870622), (-230600560), (9.45), (-0.8798), (-856.606), (nan), (1626812583630422427), (116036431), (-2896591025188201292), (2101820842), (2079731433), (-5311080917199036345), (-577360.40844459), (-58257.86), (-6364996476077464518), (9112295453040021646), (8734695561715260733), (-355588047), (1895493280), (349020560979472682), (914602758), (5192612204687053771), (-5241047726046761998), (1099543785538793655), (-311288720), (1633155009), (1613280409437033273), (+nan), (-10.4), (-68033.01), (-8728247972026697820), (7563764808015298425), (-0.860), (-2.0), (355270636), (181943082), (2077491986200197340), (-5196084623766115387), (-0.1280), (18.24323), (1439645894), (-3.0), (+nan), (-4920802313749507761), (9.581223), (-4977.639), (21.6613), (-1281671910), (-15.4), (-5066115181153139762), (1831511780), (3616962447275671394), (-5035837218861299864), (917383330), (+inf), (223894.264), (-96.275444), (nan), (-547606170), (-261714371), (1734489497), (1760637531924834025), (-2130171631), (99.40196), (3106130344652597122), (9084751387559563274), (-9.2), (772.20502704), (5987185449054785806), (+nan), (-461241595), (869648491758383603), (9.4), (7760123765394357048), (-979023095), (-682791909), (0.0), (-2.84), (-1887928434), (-8.602), (43.8), (-740133382), (-1628407200), (nan), (4048950104344667845), (7.923), (-2290446205925660340), (1817370121), (700.0), (1683434407), (8994999237835107052), (8158939128335705548), (-6779280.5), (520417636), (4856231070881691400), (+inf), (84333.5), (1814647746), (-1783864249), (2035802471), (70752.0), (1124938650), (5737532713519861979), (1196647506), (-4146469507106105938), (7869998886095964339), (312457241), (nan), (3663885939535420359), (-29385.769192130), (4217936517369282398), (67.9), (-1981743187), (489740628), (6528054616798929895), (-5516525484994056924), (3.6), (-1291051489), (791837371), (2559503485226905057) +') +ORDER BY x; diff --git a/parser/testdata/03366_qbit_array_map_populate/ast.json b/parser/testdata/03366_qbit_array_map_populate/ast.json new file mode 100644 index 000000000..1bc20b04d --- /dev/null +++ b/parser/testdata/03366_qbit_array_map_populate/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001457121, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03366_qbit_array_map_populate/metadata.json b/parser/testdata/03366_qbit_array_map_populate/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03366_qbit_array_map_populate/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03366_qbit_array_map_populate/query.sql b/parser/testdata/03366_qbit_array_map_populate/query.sql new file mode 100644 index 000000000..0d1dafc30 --- /dev/null +++ b/parser/testdata/03366_qbit_array_map_populate/query.sql @@ -0,0 +1,25 @@ +SET allow_experimental_qbit_type = 1; + +SELECT 'Test QBit population with arrayMap: Float64'; + +DROP TABLE IF EXISTS qbits; +CREATE TABLE qbits (id UInt32, vec QBit(Float64, 9)) ENGINE = Memory; +INSERT INTO qbits SELECT number + 1 AS id, arrayMap(i -> toFloat64(i + number), range(9)) AS vec FROM numbers(9); +SELECT * FROM qbits ORDER BY id; +DROP TABLE qbits; + + +SELECT 'Test QBit population with arrayMap: Float32'; + +CREATE TABLE qbits (id UInt32, vec QBit(Float32, 9)) ENGINE = Memory; +INSERT INTO qbits SELECT number + 1 AS id, arrayMap(i -> toFloat32(i + number), range(9)) AS vec FROM numbers(9); +SELECT * FROM qbits ORDER BY id; +DROP TABLE qbits; + + +SELECT 'Test QBit population with arrayMap: BFloat16'; + +CREATE TABLE qbits (id UInt32, vec QBit(BFloat16, 9)) ENGINE = Memory; +INSERT INTO qbits SELECT number + 1 AS id, arrayMap(i -> toBFloat16(i + number), range(9)) AS vec FROM numbers(9); +SELECT * FROM qbits ORDER BY id; +DROP TABLE qbits; diff --git a/parser/testdata/03366_with_fill_dag/ast.json b/parser/testdata/03366_with_fill_dag/ast.json new file mode 100644 index 000000000..54800e6f6 --- /dev/null +++ b/parser/testdata/03366_with_fill_dag/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001442264, + "rows_read": 5, + "bytes_read": 178 + } +} diff --git a/parser/testdata/03366_with_fill_dag/metadata.json b/parser/testdata/03366_with_fill_dag/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03366_with_fill_dag/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03366_with_fill_dag/query.sql b/parser/testdata/03366_with_fill_dag/query.sql new file mode 100644 index 000000000..32f7e92f5 --- /dev/null +++ b/parser/testdata/03366_with_fill_dag/query.sql @@ -0,0 +1,16 @@ +SELECT number +FROM numbers(10) +ORDER BY + number ASC WITH FILL STEP 1, + 'aaa' ASC +LIMIT 1 BY number; + +WITH 1 AS one +SELECT + number AS num, + one +FROM numbers(4) +ORDER BY + num ASC WITH FILL STEP 1, + one ASC +INTERPOLATE ( one AS 42 ); diff --git a/parser/testdata/03367_bfloat16_tuple_final/ast.json b/parser/testdata/03367_bfloat16_tuple_final/ast.json new file mode 100644 index 000000000..0570a38b0 --- /dev/null +++ b/parser/testdata/03367_bfloat16_tuple_final/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t0 (children 1)" + }, + { + "explain": " Identifier t0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001233374, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03367_bfloat16_tuple_final/metadata.json b/parser/testdata/03367_bfloat16_tuple_final/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03367_bfloat16_tuple_final/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03367_bfloat16_tuple_final/query.sql b/parser/testdata/03367_bfloat16_tuple_final/query.sql new file mode 100644 index 000000000..9101e03f8 --- /dev/null +++ b/parser/testdata/03367_bfloat16_tuple_final/query.sql @@ -0,0 +1,5 @@ +DROP TABLE IF EXISTS t0; +CREATE TABLE t0 (c0 Tuple(BFloat16)) ENGINE = SummingMergeTree() ORDER BY (c0); +INSERT INTO TABLE t0 (c0) VALUES ((-3023198492688736344, )), ((915309.3, )), ((-172.4, )), ((-1555612173, )), ((-794649755, )), ((9.0, )), ((1853837358, )), ((-752.038, )), ((1396433067, )), ((-807596209, )), ((6980930564848204980, )), ((inf, )), ((-334802707, )), ((-9208770324484017588, )), ((1542974135, )), ((8.65110, )), ((3653113333946823132, )), ((-0.7048, )), ((-521.0, )), ((8435282414075074417, )), ((-6292128116417116397, )), ((-1.1, )), ((402684563, )), ((7100378348544520322, )), ((-1.758, )), ((5866537631201593121, )), ((1448199529, )), ((-1012621, )), ((-69567.11, )), ((-976.8334, )), ((-28.35, )), ((-8547.0, )), ((+0.0, )), ((5106142703787698805, )), ((8753460139368692361, )), ((-962398.0, )), ((-1191866042, )), ((-3.53, )), ((5736130821808181266, )), ((1.5, )), ((-2102861541, )), ((-4318589268046737694, )), ((+0.0, )), ((8925059011873647780, )), ((-156.896869, )), ((-2882154502015236978, )), ((-1149877585, )), ((0.48367, )), ((1135103111, )), ((1842836061, )), ((-0.02826, )), ((-3726604190761547609, )), ((-453901554.40, )), ((2888527236479160511, )), ((-7.454, )), ((854116431, )), ((6258347412277750223, )), ((-1632245551464937874, )), ((-3887.35, )), ((8767.14, )), ((-2026982035343500009, )), ((10084.047, )), ((-12.6150, )), ((-4490844512257432684, )), ((80.0, )), ((4003882477879050470, )), ((-8032628616449150130, )), ((nan, )), ((283.11, )), ((-2040930083, )), ((-1660845000, )), ((-46.4, )), ((2431480081340864910, )), ((-8348560450377606243, )), ((-8502763961981072530, )), ((-15.67900, )), ((230036155, )), ((1942939659, )), ((4794.81023, )), ((-1639852492, )), ((-1352376506, )), ((-2.7, )), ((-1.088, )), ((-1286266918863494194, )), ((-5474135460424898823, )), ((-701987997, )), ((-3677164403360004369, )), ((7.9723935, )), ((-6243785367315650126, )), ((1131.4, )), ((13.0, )), ((88967630.99, )), ((-4857582370574805786, )), ((-4288683376502787204, )), ((1299393795189677177, )), ((827.0, )), ((-779228479, )), ((5278875253796893698, )), ((-1.241, )), ((-9.4, )), ((8652067825981452928, )), ((5.13222918, )), ((-2739239481541406400, )), ((-38.29, )), ((-1557979152, )), ((-457695388, )), ((-1959634948, )), ((-463871221, )), ((-7491614107579678858, )), ((7901991093387175754, )), ((-749021875, )), ((-563485266992292414, )), ((7903784173186423016, )), ((6415568457236688716, )), ((0.0, )), ((1041044414, )), ((-inf, )), ((-7.36, )), ((1.834, )), ((56.0, )), ((-0.0, )), ((nan, )), ((1407263544961752054, )), ((-2825675448399084472, )), ((-704.2, )), ((7438299440420973263, )), ((2922890613588943306, )), ((452195848, )), ((-673820791, )), ((9172.0, )), ((-7847859370969306041, )), ((1956615306, )), ((144567.23, )), ((-33.1076, )), ((-53.91200, )), ((-6.75, )), ((-4448835675253045899, )), ((2168295801156564119, )), ((1655170692, )), ((4316746107587705818, )), ((-8269286236795419058, )), ((1727302703, )), ((-214033129, )), ((-7.7, )), ((nan, )), ((-0.7, )), ((+nan, )), ((-4547328351920373605, )), ((nan, )), ((-1487104083, )), ((1658546204, )), ((1151636392, )), ((416290247.08, )), ((-4699301667687142318, )), ((-1.7, )), ((-6323293279463878865, )), ((5855641835618078507, )), ((6000839524613739767, )), ((-1029895710511952591, )), ((486712, )), ((-3730616670168004577, )), ((3010252056832300847, )), ((-1929.84917, )), ((-2253628015236776383, )), ((-1355690956, )), ((1.8, )), ((1255622011, )), ((-7633225657025354393, )), ((-1809222556741732999, )), ((8451912649540627017, )), ((134.521, )), ((-6312210627363870622, )), ((-230600560, )), ((9.45, )), ((-0.8798, )), ((-856.606, )), ((-nan, )), ((1626812583630422427, )), ((116036431, )), ((-2896591025188201292, )), ((2101820842, )), ((2079731433, )), ((-5311080917199036345, )), ((-577360.40844459, )), ((-58257.86, )), ((-6364996476077464518, )), ((9112295453040021646, )), ((8734695561715260733, )), ((-355588047, )), ((1895493280, )), ((349020560979472682, )), ((914602758, )), ((5192612204687053771, )), ((-5241047726046761998, )), ((1099543785538793655, )), ((-311288720, )), ((1633155009, )), ((1613280409437033273, )), ((+nan, )), ((-10.4, )), ((-68033.01, )), ((-8728247972026697820, )), ((7563764808015298425, )), ((-0.860, )), ((-2.0, )), ((355270636, )), ((181943082, )), ((2077491986200197340, )), ((-5196084623766115387, )), ((-0.1280, )), ((18.24323, )), ((1439645894, )), ((-3.0, )), ((+nan, )), ((-4920802313749507761, )), ((9.581223, )), ((-4977.639, )), ((21.6613, )), ((-1281671910, )), ((-15.4, )), ((-5066115181153139762, )), ((1831511780, )), ((3616962447275671394, )), ((-5035837218861299864, )), ((917383330, )), ((+inf, )), ((223894.264, )), ((-96.275444, )), ((-nan, )), ((-547606170, )), ((-261714371, )), ((1734489497, )), ((1760637531924834025, )), ((-2130171631, )), ((99.40196, )), ((3106130344652597122, )), ((9084751387559563274, )), ((-9.2, )), ((772.20502704, )), ((5987185449054785806, )), ((+nan, )), ((-461241595, )), ((869648491758383603, )), ((9.4, )), ((7760123765394357048, )), ((-979023095, )), ((-682791909, )), ((0.0, )), ((-2.84, )), ((-1887928434, )), ((-8.602, )), ((43.8, )), ((-740133382, )), ((-1628407200, )), ((nan, )), ((4048950104344667845, )), ((7.923, )), ((-2290446205925660340, )), ((1817370121, )), ((700.0, )), ((1683434407, )), ((8994999237835107052, )), ((8158939128335705548, )), ((-6779280.5, )), ((520417636, )), ((4856231070881691400, )), ((+inf, )), ((84333.5, )), ((1814647746, )), ((-1783864249, )), ((2035802471, )), ((70752.0, )), ((1124938650, )), ((5737532713519861979, )), ((1196647506, )), ((-4146469507106105938, )), ((7869998886095964339, )), ((312457241, )), ((-nan, )), ((3663885939535420359, )), ((-29385.769192130, )), ((4217936517369282398, )), ((67.9, )), ((-1981743187, )), ((489740628, )), ((6528054616798929895, )), ((-5516525484994056924, )), ((3.6, )), ((-1291051489, )), ((791837371, )), ((2559503485226905057, )), ((423224585269353485, )); +SELECT c0 FROM t0 FINAL; +DROP TABLE t0; diff --git a/parser/testdata/03368_bfloat16_merge_join/ast.json b/parser/testdata/03368_bfloat16_merge_join/ast.json new file mode 100644 index 000000000..977fd2a9b --- /dev/null +++ b/parser/testdata/03368_bfloat16_merge_join/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t0 (children 1)" + }, + { + "explain": " Identifier t0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001385878, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03368_bfloat16_merge_join/metadata.json b/parser/testdata/03368_bfloat16_merge_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03368_bfloat16_merge_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03368_bfloat16_merge_join/query.sql b/parser/testdata/03368_bfloat16_merge_join/query.sql new file mode 100644 index 000000000..c1e64c64d --- /dev/null +++ b/parser/testdata/03368_bfloat16_merge_join/query.sql @@ -0,0 +1,7 @@ +DROP TABLE IF EXISTS t0; +CREATE TABLE t0 (c0 Float64, c1 BFloat16) ENGINE = MergeTree() PARTITION BY (murmurHash3_64(c0)) PRIMARY KEY (c0); +SET join_algorithm = 'full_sorting_merge'; +INSERT INTO TABLE t0 (c0, c1) VALUES (4, -1), (-1745033997, 7), (-1940737579, nan); +SELECT count() FROM t0 t0d0 JOIN t0 t1d0 ON t1d0.c0 = t0d0.c0 JOIN t0 t2d0 ON t1d0.c1 = t2d0.c1 WHERE t1d0.c0 != t2d0.c0; +SELECT sum(t1d0.c0 != t2d0.c0) FROM t0 t0d0 JOIN t0 t1d0 ON t1d0.c0 = t0d0.c0 JOIN t0 t2d0 ON t1d0.c1 = t2d0.c1; +DROP TABLE t0; diff --git a/parser/testdata/03368_qbit_subcolumns/ast.json b/parser/testdata/03368_qbit_subcolumns/ast.json new file mode 100644 index 000000000..b3b071e80 --- /dev/null +++ b/parser/testdata/03368_qbit_subcolumns/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00105202, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03368_qbit_subcolumns/metadata.json b/parser/testdata/03368_qbit_subcolumns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03368_qbit_subcolumns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03368_qbit_subcolumns/query.sql b/parser/testdata/03368_qbit_subcolumns/query.sql new file mode 100644 index 000000000..f354f1133 --- /dev/null +++ b/parser/testdata/03368_qbit_subcolumns/query.sql @@ -0,0 +1,72 @@ +SET allow_experimental_qbit_type = 1; + +SELECT 'Test QBit subcolumns: corner cases'; +DROP TABLE IF EXISTS qbit; +CREATE TABLE qbit (id UInt32, vec QBit(BFloat16, 3)) ENGINE = Memory; + +INSERT INTO qbit VALUES (1, [-1.70141183460469231731687e+38, -1.70141183460469231731687e+38, -1.70141183460469231731687e+38]); +INSERT INTO qbit VALUES (2, [-1.70141183460469231731687e+38, -1.70141183460469231731687e+38, -1.70141183460469231731687e+38]); +INSERT INTO qbit VALUES (3, [-1.70141183460469231731687e+38, -1.70141183460469231731687e+38, -1.70141183460469231731687e+38]); + +select vec.0 from qbit; -- { serverError NOT_FOUND_COLUMN_IN_BLOCK } +select vec.17 from qbit; -- { serverError NOT_FOUND_COLUMN_IN_BLOCK } +select bin(vec.1) from qbit; +select bin(vec.4) from qbit; +select bin(vec.15) from qbit; +select bin(vec.16) from qbit; + + + +SELECT 'Test QBit subcolumns: BFloat16'; +DROP TABLE IF EXISTS qbit; +CREATE TABLE qbit (id UInt32, vec QBit(BFloat16, 8)) ENGINE = Memory; + +INSERT INTO qbit VALUES (1, [14660154687488, -3.03164882520939871213272e-13, 14660154687488, -3.03164882520939871213272e-13, 14660154687488, -3.03164882520939871213272e-13, 14660154687488, -3.04941266465394433815561e-13]); +INSERT INTO qbit VALUES (2, [14660154687488, -3.03164882520939871213272e-13, 14660154687488, -3.03164882520939871213272e-13, 14660154687488, -3.03164882520939871213272e-13, 14660154687488, -3.03164882520939871213272e-13]); +INSERT INTO qbit VALUES (3, [2, 0, 0, 0, 0, 0, 0, 0]); +INSERT INTO qbit VALUES (4, [-0, 0, 0, 0, 0, 0, 0, 0]); + +SELECT bin(vec.1) FROM qbit ORDER BY id; +SELECT bin(vec.2) FROM qbit ORDER BY id; +SELECT bin(vec.7) FROM qbit ORDER BY id; +SELECT bin(vec.15) FROM qbit ORDER BY id; + + + +SELECT 'Test QBit subcolumns: Float32'; +DROP TABLE IF EXISTS qbit; +CREATE TABLE qbit (id UInt32, vec QBit(Float32, 8)) ENGINE = Memory; + +INSERT INTO qbit VALUES (1, [14660154687488, -3.03164882520939871213272e-13, 14660154687488, -3.03164882520939871213272e-13, 14660154687488, -3.03164882520939871213272e-13, 14660154687488, -3.04941266465394433815561e-13]); +INSERT INTO qbit VALUES (2, [14660154687488, -3.03164882520939871213272e-13, 14660154687488, -3.03164882520939871213272e-13, 14660154687488, -3.03164882520939871213272e-13, 14660154687488, -3.03164882520939871213272e-13]); +INSERT INTO qbit VALUES (3, [2, 0, 0, 0, 0, 0, 0, 0]); +INSERT INTO qbit VALUES (4, [-0, 0, 0, 0, 0, 0, 0, 0]); + +SELECT bin(vec.1) FROM qbit ORDER BY id; +SELECT bin(vec.2) FROM qbit ORDER BY id; +SELECT bin(vec.7) FROM qbit ORDER BY id; +SELECT bin(vec.15) FROM qbit ORDER BY id; +SELECT bin(vec.23) FROM qbit ORDER BY id; +SELECT bin(vec.31) FROM qbit ORDER BY id; + + + +SELECT 'Test QBit subcolumns: Float64'; +DROP TABLE IF EXISTS qbit; +CREATE TABLE qbit (id UInt32, vec QBit(Float64, 8)) ENGINE = Memory; + +INSERT INTO qbit VALUES (1, [1.19453052916149551265420436166e103, -3.7206620809969885439391603375e-103, 1.19453052916149551265420436166e103, -3.7206620809969885439391603375e-103, 1.19453052916149551265420436166e103, -3.7206620809969885439391603375e-103, 1.19453052916149551265420436166e103, -3.7206620809969885439391603375e-103]); +INSERT INTO qbit VALUES (2, [1.19453052916149551265420436166e103, -3.7206620809969885439391603375e-103, 1.19453052916149551265420436166e103, -3.7206620809969885439391603375e-103, 1.19453052916149551265420436166e103, -3.7206620809969885439391603375e-103, 1.19453052916149551265420436166e103, -3.7206620809969885439391603375e-103]); +INSERT INTO qbit VALUES (3, [2, 0, 0, 0, 0, 0, 0, 0]); +INSERT INTO qbit VALUES (4, [-0, 0, 0, 0, 0, 0, 0, 0]); + +SELECT bin(vec.1) FROM qbit ORDER BY id; +SELECT bin(vec.2) FROM qbit ORDER BY id; +SELECT bin(vec.7) FROM qbit ORDER BY id; +SELECT bin(vec.15) FROM qbit ORDER BY id; +SELECT bin(vec.23) FROM qbit ORDER BY id; +SELECT bin(vec.31) FROM qbit ORDER BY id; +SELECT bin(vec.39) FROM qbit ORDER BY id; +SELECT bin(vec.47) FROM qbit ORDER BY id; +SELECT bin(vec.55) FROM qbit ORDER BY id; +SELECT bin(vec.63) FROM qbit ORDER BY id; diff --git a/parser/testdata/03369_bfloat16_map/ast.json b/parser/testdata/03369_bfloat16_map/ast.json new file mode 100644 index 000000000..f0c0f0f63 --- /dev/null +++ b/parser/testdata/03369_bfloat16_map/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t0 (children 1)" + }, + { + "explain": " Identifier t0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001181655, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03369_bfloat16_map/metadata.json b/parser/testdata/03369_bfloat16_map/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03369_bfloat16_map/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03369_bfloat16_map/query.sql b/parser/testdata/03369_bfloat16_map/query.sql new file mode 100644 index 000000000..1f70bde38 --- /dev/null +++ b/parser/testdata/03369_bfloat16_map/query.sql @@ -0,0 +1,5 @@ +DROP TABLE IF EXISTS t0; +CREATE TABLE t0 (c0 Map(Int,Int), c1 Nullable(BFloat16)) ENGINE = MergeTree() ORDER BY (c1, c0) SETTINGS allow_nullable_key = 1; +INSERT INTO TABLE t0 (c0, c1) VALUES (map(), 2), (map(), 1), (map(1, 1, 2, 2), nan); +SELECT c1 FROM t0 ORDER BY c1 ASC; +DROP TABLE t0; diff --git a/parser/testdata/03369_function_arrayLevenshtein/ast.json b/parser/testdata/03369_function_arrayLevenshtein/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03369_function_arrayLevenshtein/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03369_function_arrayLevenshtein/metadata.json b/parser/testdata/03369_function_arrayLevenshtein/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03369_function_arrayLevenshtein/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03369_function_arrayLevenshtein/query.sql b/parser/testdata/03369_function_arrayLevenshtein/query.sql new file mode 100644 index 000000000..36920fdb4 --- /dev/null +++ b/parser/testdata/03369_function_arrayLevenshtein/query.sql @@ -0,0 +1,106 @@ +-- arrayLevenshteinDistance +CREATE TABLE simple_levenshtein (lhs Array(UInt8), rhs Array(UInt8)) ENGINE MergeTree ORDER BY tuple(); +INSERT INTO simple_levenshtein VALUES + ([1, 2, 3, 4], [1, 2, 3, 4]), + ([1, 2, 3, 4], [1, 3, 3, 4]), + ([1, 2, 3, 4], [1, 3, 2, 4]), + ([1, 4], [1, 2, 3, 4]), + ([1, 2, 3, 4], []), + ([], [1, 3, 2, 4]), + ([], []); +SELECT arrayLevenshteinDistance(lhs, rhs) FROM simple_levenshtein; +SELECT ''; + +-- arrayLevenshteinDistance for different types +SELECT arrayLevenshteinDistance(['1', '2'], ['1']), + arrayLevenshteinDistance([toFixedString('1', 16), toFixedString('2', 16)], [toFixedString('1', 16)]), + arrayLevenshteinDistance([toUInt16(1)], [toUInt16(2), 1]), + arrayLevenshteinDistance([toFloat32(1.1), 2], [toFloat32(1.1)]), + arrayLevenshteinDistance([toFloat64(1.1), 2], [toFloat64(1.1)]), + arrayLevenshteinDistance([toDate('2025-01-01'), toDate('2025-01-02')], [toDate('2025-01-01')]); +SELECT ''; + +-- arrayLevenshteinDistanceWeighted +CREATE TABLE weighted_levenshtein (lhs Array(String), rhs Array(String), lhs_weights Array(Float64), rhs_weights Array(Float64)) ENGINE MergeTree ORDER BY tuple(); +INSERT INTO weighted_levenshtein VALUES + (['A', 'B', 'C'], ['A', 'C'], [1, 2, 3], [1, 3]), + (['A', 'C'], ['A', 'B', 'C'], [1, 3], [1, 2, 3]), + (['A', 'B'], ['A', 'C'], [1, 2], [3, 4]), + (['A', 'B', 'C'], ['A', 'K', 'L'], [1, 2, 3], [3, 4, 5]), + ([], [], [], []), + (['A', 'B'], [], [1, 2], []), + (['A', 'B'], ['A', 'B'], [1, 2], [2, 1]), + (['A', 'B'], ['C', 'D'], [1, 2], [3, 4]), + (['A', 'B', 'C'], ['C', 'B', 'A'], [1, 2, 3], [4, 5, 6]), + (['A', 'B'], ['C', 'A', 'B'], [1, 2], [4, 5, 6]), + (['A', 'B', 'C', 'D', 'E', 'F', 'G'], ['A', 'B', 'X', 'D', 'E', 'Y', 'G'], [1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1]); +SELECT arrayLevenshteinDistanceWeighted(lhs, rhs, lhs_weights, rhs_weights) FROM weighted_levenshtein; +SELECT ''; + +-- arrayLevenshteinDistance for different arrays' types +SELECT arrayLevenshteinDistanceWeighted(['1', '2'], ['1'], [1., 2], [1.]), + arrayLevenshteinDistanceWeighted([toFixedString('1', 16), toFixedString('2', 16)], [toFixedString('1', 16)], [1., 2], [1.]), + arrayLevenshteinDistanceWeighted([toUInt16(1)], [toUInt16(2), 1], [1.], [2., 1]), + arrayLevenshteinDistanceWeighted([toFloat32(1.1), 2], [toFloat32(1.1)], [1., 2], [1.]), + arrayLevenshteinDistanceWeighted([toFloat64(1.1), 2], [toFloat64(1.1)], [1., 2], [1.]), + arrayLevenshteinDistanceWeighted([toDate('2025-01-01'), toDate('2025-01-02')], [toDate('2025-01-01')], [1., 2], [1.]); +SELECT ''; + +-- arrayLevenshteinDistanceWeighted for different weight types +SELECT arrayLevenshteinDistanceWeighted(['1', '2'], ['1'], [toUInt8(1), 2], [toUInt8(1)]), + arrayLevenshteinDistanceWeighted(['1', '2'], ['1'], [toUInt16(1), 2], [toUInt16(1)]), + arrayLevenshteinDistanceWeighted(['1', '2'], ['1'], [toUInt32(1), 2], [toUInt32(1)]), + arrayLevenshteinDistanceWeighted(['1', '2'], ['1'], [toUInt64(1), 2], [toUInt64(1)]), + arrayLevenshteinDistanceWeighted(['1', '2'], ['1'], [toUInt128(1), 2], [toUInt128(1)]), + arrayLevenshteinDistanceWeighted(['1', '2'], ['1'], [toUInt256(1), 2], [toUInt256(1)]), + arrayLevenshteinDistanceWeighted(['1', '2'], ['1'], [toInt8(1), toInt8(2)], [toInt8(1)]), -- automatic type assumption gives Int16 + arrayLevenshteinDistanceWeighted(['1', '2'], ['1'], [toInt16(1), 2], [toInt16(1)]), + arrayLevenshteinDistanceWeighted(['1', '2'], ['1'], [toInt32(1), 2], [toInt32(1)]), + arrayLevenshteinDistanceWeighted(['1', '2'], ['1'], [toInt64(1), 2], [toInt64(1)]), + arrayLevenshteinDistanceWeighted(['1', '2'], ['1'], [toInt128(1), 2], [toInt128(1)]), + arrayLevenshteinDistanceWeighted(['1', '2'], ['1'], [toInt256(1), 2], [toInt256(1)]), + arrayLevenshteinDistanceWeighted(['1', '2'], ['1'], [toFloat32(1), 2], [toFloat32(1)]), + arrayLevenshteinDistanceWeighted(['1', '2'], ['1'], [toFloat64(1), 2], [toFloat64(1)]); +SELECT ''; + +-- arraySimilarity +SELECT round(arraySimilarity(lhs, rhs, lhs_weights, rhs_weights), 5) FROM weighted_levenshtein; +SELECT ''; + +-- arraySimilarity for different arrays' types +SELECT arraySimilarity(['1', '2'], ['1'], [1., 2], [1.]), + arraySimilarity([toFixedString('1', 16), toFixedString('2', 16)], [toFixedString('1', 16)], [1., 2], [1.]), + arraySimilarity([toUInt16(1)], [toUInt16(2), 1], [1.], [2., 1]), + arraySimilarity([toFloat32(1.1), 2], [toFloat32(1.1)], [1., 2], [1.]), + arraySimilarity([toFloat64(1.1), 2], [toFloat64(1.1)], [1., 2], [1.]), + arraySimilarity([toDate('2025-01-01'), toDate('2025-01-02')], [toDate('2025-01-01')], [1., 2], [1.]); +SELECT ''; + +-- arraySimilarity for different weight types +SELECT arraySimilarity(['1', '2'], ['1'], [toUInt8(1), 2], [toUInt8(1)]), + arraySimilarity(['1', '2'], ['1'], [toUInt16(1), 2], [toUInt16(1)]), + arraySimilarity(['1', '2'], ['1'], [toUInt32(1), 2], [toUInt32(1)]), + arraySimilarity(['1', '2'], ['1'], [toUInt64(1), 2], [toUInt64(1)]), + arraySimilarity(['1', '2'], ['1'], [toUInt128(1), 2], [toUInt128(1)]), + arraySimilarity(['1', '2'], ['1'], [toUInt256(1), 2], [toUInt256(1)]), + arraySimilarity(['1', '2'], ['1'], [toInt8(1), toInt8(2)], [toInt8(1)]), -- automatic type assumption gives Int16 + arraySimilarity(['1', '2'], ['1'], [toInt16(1), 2], [toInt16(1)]), + arraySimilarity(['1', '2'], ['1'], [toInt32(1), 2], [toInt32(1)]), + arraySimilarity(['1', '2'], ['1'], [toInt64(1), 2], [toInt64(1)]), + arraySimilarity(['1', '2'], ['1'], [toInt128(1), 2], [toInt128(1)]), + arraySimilarity(['1', '2'], ['1'], [toInt256(1), 2], [toInt256(1)]), + arraySimilarity(['1', '2'], ['1'], [toFloat32(1), 2], [toFloat32(1)]), + arraySimilarity(['1', '2'], ['1'], [toFloat64(1), 2], [toFloat64(1)]); +SELECT ''; + +-- errors NUMBER_OF_ARGUMENTS_DOESNT_MATCH +SELECT arrayLevenshteinDistance(lhs, rhs, lhs_weights, rhs_weights) FROM weighted_levenshtein; -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT arrayLevenshteinDistanceWeighted(lhs, rhs) FROM simple_levenshtein; -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT round(arraySimilarity(lhs, rhs), 5) FROM simple_levenshtein; -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +-- errors ILLEGAL_TYPE_OF_ARGUMENT +SELECT arrayLevenshteinDistance(1, [1]); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT arrayLevenshteinDistanceWeighted([1], 1, [1, 2], [1]); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT arraySimilarity([1, 2], 1, [1, 2], [1]); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +-- +SELECT arrayLevenshteinDistanceWeighted([1, 2], [1], [1., 2], [1., 2]); -- { serverError SIZES_OF_ARRAYS_DONT_MATCH } +SELECT arraySimilarity([1, 2], [1], [1., 2], [1., 2]); -- { serverError SIZES_OF_ARRAYS_DONT_MATCH } diff --git a/parser/testdata/03369_l2_distance_transposed_variadic/ast.json b/parser/testdata/03369_l2_distance_transposed_variadic/ast.json new file mode 100644 index 000000000..edbeacb6b --- /dev/null +++ b/parser/testdata/03369_l2_distance_transposed_variadic/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001288065, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03369_l2_distance_transposed_variadic/metadata.json b/parser/testdata/03369_l2_distance_transposed_variadic/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03369_l2_distance_transposed_variadic/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03369_l2_distance_transposed_variadic/query.sql b/parser/testdata/03369_l2_distance_transposed_variadic/query.sql new file mode 100644 index 000000000..1f4330992 --- /dev/null +++ b/parser/testdata/03369_l2_distance_transposed_variadic/query.sql @@ -0,0 +1,59 @@ +SET allow_experimental_qbit_type = 1; + +SELECT 'Test L2DistanceTransposed: BFloat16'; +DROP TABLE IF EXISTS qbit; +CREATE TABLE qbit (id UInt32, vec QBit(BFloat16, 3)) ENGINE = Memory; + +INSERT INTO qbit VALUES (1, [0,1,2]); +INSERT INTO qbit VALUES (2, [1,2,3]); +INSERT INTO qbit VALUES (3, [2,3,4]); + + +-- Use rounding to avoid minor precision differences between different architectures +WITH [toBFloat16(0), 1, 2] AS reference_vec SELECT id, round(L2DistanceTransposed(vec.1, vec.2, vec.3, vec.4, vec.5, vec.6, vec.7, vec.8, vec.9, vec.10, vec.11, vec.12, vec.13, vec.14, vec.15, vec.16, 3, reference_vec), 5) AS dist FROM qbit ORDER BY id; +WITH [toBFloat16(0), 1, 2] AS reference_vec SELECT id, round(L2DistanceTransposed(vec.1, vec.2, vec.3, vec.4, vec.5, vec.6, vec.7, vec.8, 3, reference_vec), 5) AS dist FROM qbit ORDER BY id; +WITH [toBFloat16(0), 1, 2] AS reference_vec SELECT id, round(L2DistanceTransposed(vec.1, vec.2, vec.3, vec.4, 3, reference_vec), 5) AS dist FROM qbit ORDER BY id; +DROP TABLE qbit; + + +SELECT 'Test L2DistanceTransposed: Float32'; +CREATE TABLE qbit (id UInt32, vec QBit(Float32, 3)) ENGINE = Memory; + +INSERT INTO qbit VALUES (1, [0,1,2]); +INSERT INTO qbit VALUES (2, [1,2,3]); +INSERT INTO qbit VALUES (3, [2,3,4]); + +-- Use rounding to avoid minor precision differences when using non-vectorized implementations of distance function +WITH [toFloat32(0), 1, 2] AS reference_vec SELECT id, round(round(L2DistanceTransposed(vec.1, vec.2, vec.3, vec.4, vec.5, vec.6, vec.7, vec.8, vec.9, vec.10, vec.11, vec.12, vec.13, vec.14, vec.15, vec.16, vec.17, vec.18, vec.19, vec.20, vec.21, vec.22, vec.23, vec.24, vec.25, vec.26, vec.27, vec.28, vec.29, vec.30, vec.31, vec.32, 3, reference_vec), 5), 5) AS dist FROM qbit ORDER BY id; +WITH [toFloat32(0), 1, 2] AS reference_vec SELECT id, round(round(L2DistanceTransposed(vec.1, vec.2, vec.3, vec.4, vec.5, vec.6, vec.7, vec.8, vec.9, vec.10, vec.11, vec.12, vec.13, vec.14, vec.15, vec.16, 3, reference_vec), 5), 5) AS dist FROM qbit ORDER BY id; +WITH [toFloat32(0), 1, 2] AS reference_vec SELECT id, round(round(L2DistanceTransposed(vec.1, vec.2, vec.3, vec.4, vec.5, vec.6, vec.7, vec.8, 3, reference_vec), 5), 5) AS dist FROM qbit ORDER BY id; +DROP TABLE qbit; + + +SELECT 'Test L2DistanceTransposed: Float64'; +CREATE TABLE qbit (id UInt32, vec QBit(Float64, 3)) ENGINE = Memory; + +INSERT INTO qbit VALUES (1, [0,1,2]); +INSERT INTO qbit VALUES (2, [1,2,3]); +INSERT INTO qbit VALUES (3, [2,3,4]); + +WITH [toFloat64(0), 1, 2] AS reference_vec SELECT id, round(L2DistanceTransposed(vec.1, vec.2, vec.3, vec.4, vec.5, vec.6, vec.7, vec.8, vec.9, vec.10, vec.11, vec.12, vec.13, vec.14, vec.15, vec.16, vec.17, vec.18, vec.19, vec.20, vec.21, vec.22, vec.23, vec.24, vec.25, vec.26, vec.27, vec.28, vec.29, vec.30, vec.31, vec.32, vec.33, vec.34, vec.35, vec.36, vec.37, vec.38, vec.39, vec.40, vec.41, vec.42, vec.43, vec.44, vec.45, vec.46, vec.47, vec.48, vec.49, vec.50, vec.51, vec.52, vec.53, vec.54, vec.55, vec.56, vec.57, vec.58, vec.59, vec.60, vec.61, vec.62, vec.63, vec.64, 3, reference_vec), 5) AS dist FROM qbit ORDER BY id; +WITH [toFloat64(0), 1, 2] AS reference_vec SELECT id, round(L2DistanceTransposed(vec.1, vec.2, vec.3, vec.4, vec.5, vec.6, vec.7, vec.8, vec.9, vec.10, vec.11, vec.12, vec.13, vec.14, vec.15, vec.16, vec.17, vec.18, vec.19, vec.20, vec.21, vec.22, vec.23, vec.24, vec.25, vec.26, vec.27, vec.28, vec.29, vec.30, vec.31, vec.32, 3, reference_vec), 5) AS dist FROM qbit ORDER BY id; +WITH [toFloat64(0), 1, 2] AS reference_vec SELECT id, round(L2DistanceTransposed(vec.1, vec.2, vec.3, vec.4, vec.5, vec.6, vec.7, vec.8, vec.9, vec.10, vec.11, vec.12, vec.13, vec.14, vec.15, vec.16, 3, reference_vec), 5) AS dist FROM qbit ORDER BY id; +DROP TABLE qbit; + + +SELECT 'Difficult test'; + +CREATE TABLE qbit (id UInt32, vec QBit(Float32, 3)) ENGINE = Memory; + +INSERT INTO qbit SELECT number + 1 AS id, arrayMap(i -> toFloat32(i + number), range(3)) AS vec FROM numbers(3); + +WITH [toFloat32(0), 1, 2] AS reference_vec SELECT id, round(L2DistanceTransposed(vec.1, vec.2, vec.3, vec.4, 3, reference_vec), 5) AS dist FROM qbit ORDER BY id; +WITH [toFloat32(0), 1, 2] AS reference_vec SELECT id, round(L2DistanceTransposed(vec.1, vec.2, vec.3, vec.4, 3.1, reference_vec), 5) AS dist FROM qbit ORDER BY id; -- { serverError TOO_MANY_ARGUMENTS_FOR_FUNCTION } +WITH [toFloat32(0), 1, 2] AS reference_vec SELECT id, round(L2DistanceTransposed(vec.1, vec.2, vec.3, vec.4, 0, reference_vec), 5) AS dist FROM qbit ORDER BY id; -- { serverError TOO_MANY_ARGUMENTS_FOR_FUNCTION } +DROP TABLE qbit; + +-- Check that constant FixedString columns are supported +SELECT round(L2DistanceTransposed(''::FixedString(1), 3, [1,2,3]::Array(Float32)), 5); -- useDefaultImplementationForConstants is triggered +SELECT round(L2DistanceTransposed(''::FixedString(1), 3, materialize([1,2,3])::Array(Float32)), 5); diff --git a/parser/testdata/03369_predicate_pushdown_enforce_literal_type/ast.json b/parser/testdata/03369_predicate_pushdown_enforce_literal_type/ast.json new file mode 100644 index 000000000..c54f9b2ce --- /dev/null +++ b/parser/testdata/03369_predicate_pushdown_enforce_literal_type/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_03369 (children 1)" + }, + { + "explain": " Identifier t_03369" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001187209, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/03369_predicate_pushdown_enforce_literal_type/metadata.json b/parser/testdata/03369_predicate_pushdown_enforce_literal_type/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03369_predicate_pushdown_enforce_literal_type/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03369_predicate_pushdown_enforce_literal_type/query.sql b/parser/testdata/03369_predicate_pushdown_enforce_literal_type/query.sql new file mode 100644 index 000000000..c141fc9ad --- /dev/null +++ b/parser/testdata/03369_predicate_pushdown_enforce_literal_type/query.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS t_03369; + +CREATE TABLE t_03369 (d Date, event String, c UInt32) ENGINE = Memory; + +INSERT INTO t_03369 VALUES (toDate('2025-03-03'), 'foo', 1); + +SET prefer_localhost_replica = 0, allow_push_predicate_ast_for_distributed_subqueries = 1; + +SELECT d, count() FROM remote('127.0.0.{1..10}', currentDatabase(), t_03369) WHERE event != '' GROUP BY d HAVING d >= toDate(1738281600) AND count() >= 1; diff --git a/parser/testdata/03369_values_template_types_mismatch/ast.json b/parser/testdata/03369_values_template_types_mismatch/ast.json new file mode 100644 index 000000000..d69a3bbe4 --- /dev/null +++ b/parser/testdata/03369_values_template_types_mismatch/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery t0 (children 3)" + }, + { + "explain": " Identifier t0" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration c0 (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function Memory" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001344489, + "rows_read": 8, + "bytes_read": 270 + } +} diff --git a/parser/testdata/03369_values_template_types_mismatch/metadata.json b/parser/testdata/03369_values_template_types_mismatch/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03369_values_template_types_mismatch/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03369_values_template_types_mismatch/query.sql b/parser/testdata/03369_values_template_types_mismatch/query.sql new file mode 100644 index 000000000..cbe82ab6d --- /dev/null +++ b/parser/testdata/03369_values_template_types_mismatch/query.sql @@ -0,0 +1,3 @@ +CREATE TABLE t0 (c0 String) ENGINE = Memory; +INSERT INTO TABLE t0 (c0) VALUES ([1, NULL]::Array(Nullable(Int32))), ((1,1)); + diff --git a/parser/testdata/03369_variant_escape_filename_merge_tree/ast.json b/parser/testdata/03369_variant_escape_filename_merge_tree/ast.json new file mode 100644 index 000000000..e0e013c23 --- /dev/null +++ b/parser/testdata/03369_variant_escape_filename_merge_tree/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001008149, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03369_variant_escape_filename_merge_tree/metadata.json b/parser/testdata/03369_variant_escape_filename_merge_tree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03369_variant_escape_filename_merge_tree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03369_variant_escape_filename_merge_tree/query.sql b/parser/testdata/03369_variant_escape_filename_merge_tree/query.sql new file mode 100644 index 000000000..dcbd7f34b --- /dev/null +++ b/parser/testdata/03369_variant_escape_filename_merge_tree/query.sql @@ -0,0 +1,13 @@ +set enable_variant_type=1; + +drop table if exists test; +create table test (v Variant(Tuple(a UInt32, b UInt32))) engine=MergeTree order by tuple() settings min_rows_for_wide_part=0, min_bytes_for_wide_part=0, escape_variant_subcolumn_filenames=1, replace_long_file_name_to_hash=0; +insert into test select tuple(1, 2)::Tuple(a UInt32, b UInt32); +select filenames from system.parts_columns where table = 'test' and database = currentDatabase(); +drop table test; + +create table test (v Variant(Tuple(a UInt32, b UInt32))) engine=MergeTree order by tuple() settings min_rows_for_wide_part=0, min_bytes_for_wide_part=0, escape_variant_subcolumn_filenames=0, replace_long_file_name_to_hash=0; +insert into test select tuple(1, 2)::Tuple(a UInt32, b UInt32); +select filenames from system.parts_columns where table = 'test' and database = currentDatabase(); +drop table test; + diff --git a/parser/testdata/03370_join_identifiers/ast.json b/parser/testdata/03370_join_identifiers/ast.json new file mode 100644 index 000000000..775fc6985 --- /dev/null +++ b/parser/testdata/03370_join_identifiers/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t0 (children 1)" + }, + { + "explain": " Identifier t0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001108134, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03370_join_identifiers/metadata.json b/parser/testdata/03370_join_identifiers/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03370_join_identifiers/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03370_join_identifiers/query.sql b/parser/testdata/03370_join_identifiers/query.sql new file mode 100644 index 000000000..d83594dc8 --- /dev/null +++ b/parser/testdata/03370_join_identifiers/query.sql @@ -0,0 +1,31 @@ +DROP TABLE IF EXISTS t0; +DROP TABLE IF EXISTS t1; + +CREATE TABLE t0 +( + `id` INT UNSIGNED NOT NULL, + `rev` INT UNSIGNED NOT NULL, + `content` varchar(200) NOT NULL +) +ENGINE = MergeTree +PRIMARY KEY (id, rev); + +CREATE TABLE t1 +( + `id` INT UNSIGNED NOT NULL, + `rev` INT UNSIGNED NOT NULL, + `content` varchar(200) NOT NULL +) +ENGINE = MergeTree +PRIMARY KEY (id, rev); + +INSERT INTO TABLE t0 VALUES (1,1,'1'); +INSERT INTO TABLE t1 VALUES (1,1,'1'); + +SELECT SUM(t1.rev) AS aggr +FROM t1 +INNER JOIN t0 AS right_0 ON t1.id = right_0.id +INNER JOIN t1 AS right_1 ON t1.id = right_1.id; + +DROP TABLE t0; +DROP TABLE t1; diff --git a/parser/testdata/03370_rocks_db_engine_subcolumn_in_pk/ast.json b/parser/testdata/03370_rocks_db_engine_subcolumn_in_pk/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03370_rocks_db_engine_subcolumn_in_pk/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03370_rocks_db_engine_subcolumn_in_pk/metadata.json b/parser/testdata/03370_rocks_db_engine_subcolumn_in_pk/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03370_rocks_db_engine_subcolumn_in_pk/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03370_rocks_db_engine_subcolumn_in_pk/query.sql b/parser/testdata/03370_rocks_db_engine_subcolumn_in_pk/query.sql new file mode 100644 index 000000000..9ede2d4ca --- /dev/null +++ b/parser/testdata/03370_rocks_db_engine_subcolumn_in_pk/query.sql @@ -0,0 +1,4 @@ +-- Tags: no-fasttest, use-rocksdb + +CREATE TABLE test (t Tuple(a Int32)) ENGINE = EmbeddedRocksDB() PRIMARY KEY (t.a); -- {serverError BAD_ARGUMENTS} + diff --git a/parser/testdata/03371_analyzer_filter_pushdown_distributed/ast.json b/parser/testdata/03371_analyzer_filter_pushdown_distributed/ast.json new file mode 100644 index 000000000..2b6a9766d --- /dev/null +++ b/parser/testdata/03371_analyzer_filter_pushdown_distributed/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001262407, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03371_analyzer_filter_pushdown_distributed/metadata.json b/parser/testdata/03371_analyzer_filter_pushdown_distributed/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03371_analyzer_filter_pushdown_distributed/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03371_analyzer_filter_pushdown_distributed/query.sql b/parser/testdata/03371_analyzer_filter_pushdown_distributed/query.sql new file mode 100644 index 000000000..6dd0a1657 --- /dev/null +++ b/parser/testdata/03371_analyzer_filter_pushdown_distributed/query.sql @@ -0,0 +1,42 @@ +set enable_analyzer=1; + +CREATE TABLE bug_table +( + `date_column` Date, + `c1` String, + `c2` String +) +ENGINE = MergeTree +PARTITION BY toYYYYMM(date_column) +ORDER BY (c1, c2); + +INSERT INTO bug_table values + (toDate(now()),hex(rand()),hex(now())), + (toDate(now()),hex(rand()),hex(now())), + (toDate(now()),hex(rand()),hex(now())), + (toDate(now()),hex(rand()),hex(now())), + (toDate(now()),hex(rand()),hex(now())); + +CREATE TABLE distributed_bug_table +( +date_column Date, +c1 String, +c2 String +) +ENGINE = Distributed('test_cluster_two_shards', currentDatabase(), 'bug_table', cityHash64(c1)); + +set distributed_product_mode = 'allow'; + +set prefer_localhost_replica=1; + +WITH alias_1 AS + (SELECT c1,c2 FROM distributed_bug_table) +SELECT c1 from alias_1 where c2 IN (SELECT DISTINCT c2 from alias_1) +FORMAT Null; + +set prefer_localhost_replica=0; + +WITH alias_1 AS + (SELECT c1,c2 FROM distributed_bug_table) +SELECT c1 from alias_1 where c2 IN (SELECT DISTINCT c2 from alias_1) +FORMAT Null; diff --git a/parser/testdata/03371_bfloat16_special_values/ast.json b/parser/testdata/03371_bfloat16_special_values/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03371_bfloat16_special_values/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03371_bfloat16_special_values/metadata.json b/parser/testdata/03371_bfloat16_special_values/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03371_bfloat16_special_values/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03371_bfloat16_special_values/query.sql b/parser/testdata/03371_bfloat16_special_values/query.sql new file mode 100644 index 000000000..7218c5674 --- /dev/null +++ b/parser/testdata/03371_bfloat16_special_values/query.sql @@ -0,0 +1,44 @@ +-- Check that Float32 and BFloat16 return the same values for calculations with special values +SELECT toFloat32(0.0) == toFloat32(-0.0), toBFloat16(0.0) == toBFloat16(-0.0); +SELECT toFloat32(0.0) != toFloat32(-0.0), toBFloat16(0.0) != toBFloat16(-0.0); +SELECT toFloat32(0.0) > toFloat32(-0.0), toBFloat16(0.0) > toBFloat16(-0.0); +SELECT toFloat32(0.0) < toFloat32(-0.0), toBFloat16(0.0) < toBFloat16(-0.0); +SELECT toFloat32(0.0) + toFloat32(-0.0), toBFloat16(0.0) + toBFloat16(-0.0); +SELECT toFloat32(-0.0) + toFloat32(-0.0), toBFloat16(-0.0) + toBFloat16(-0.0); +SELECT toFloat32(NaN) == toFloat32(NaN), toBFloat16(NaN) == toBFloat16(NaN); +SELECT toFloat32(NaN) + toFloat32(NaN), toBFloat16(NaN) + toBFloat16(NaN); +SELECT toFloat32(NaN) - toFloat32(NaN), toBFloat16(NaN) - toBFloat16(NaN); +SELECT toFloat32(NaN) * toFloat32(NaN), toBFloat16(NaN) * toBFloat16(NaN); +SELECT toFloat32(NaN) / toFloat32(NaN), toBFloat16(NaN) / toBFloat16(NaN); +SELECT toFloat32(NaN) % toFloat32(NaN), toBFloat16(NaN) % toBFloat16(NaN); +SELECT toFloat32(5.5) + toFloat32(NaN), toBFloat16(5.5) + toBFloat16(NaN); +SELECT toFloat32(5.5) - toFloat32(NaN), toBFloat16(5.5) - toBFloat16(NaN); +SELECT toFloat32(5.5) * toFloat32(NaN), toBFloat16(5.5) * toBFloat16(NaN); +SELECT toFloat32(5.5) / toFloat32(NaN), toBFloat16(5.5) / toBFloat16(NaN); +SELECT toFloat32(5.5) % toFloat32(NaN), toBFloat16(5.5) % toBFloat16(NaN); +SELECT toFloat32(Inf) == toFloat32(Inf), toBFloat16(Inf) == toBFloat16(Inf); +SELECT toFloat32(Inf) + toFloat32(Inf), toBFloat16(Inf) + toBFloat16(Inf); +SELECT toFloat32(Inf) - toFloat32(Inf), toBFloat16(Inf) - toBFloat16(Inf); +SELECT toFloat32(Inf) * toFloat32(Inf), toBFloat16(Inf) * toBFloat16(Inf); +SELECT toFloat32(Inf) / toFloat32(Inf), toBFloat16(Inf) / toBFloat16(Inf); +SELECT toFloat32(Inf) % toFloat32(Inf), toBFloat16(Inf) % toBFloat16(Inf); +SELECT toFloat32(-Inf) == toFloat32(-Inf), toBFloat16(-Inf) == toBFloat16(-Inf); +SELECT toFloat32(5.5) + toFloat32(Inf), toBFloat16(5.5) + toBFloat16(Inf); +SELECT toFloat32(5.5) - toFloat32(Inf), toBFloat16(5.5) - toBFloat16(Inf); +SELECT toFloat32(5.5) * toFloat32(Inf), toBFloat16(5.5) * toBFloat16(Inf); +SELECT toFloat32(5.5) / toFloat32(Inf), toBFloat16(5.5) / toBFloat16(Inf); +SELECT toFloat32(5.5) % toFloat32(Inf), toBFloat16(5.5) % toBFloat16(Inf); + +-- Test for Bug 77087 +DROP TABLE IF EXISTS tab; +CREATE TABLE tab (c0 Tuple(BFloat16)) ENGINE = SummingMergeTree() ORDER BY (c0) SETTINGS ratio_of_defaults_for_sparse_serialization = 1.0; -- Disable sparse serialization, otherwise the test becomes flaky +INSERT INTO TABLE tab (c0) VALUES ((-0.0, )), ((nan, )), ((0.0, )); +SELECT c0 FROM tab FINAL; +DROP TABLE tab; + +-- Test for Bug 77224 +CREATE TABLE tab (c0 BFloat16 PRIMARY KEY) ENGINE = SummingMergeTree() SETTINGS ratio_of_defaults_for_sparse_serialization = 1.0; -- Disable sparse serialization, otherwise the test becomes flaky +INSERT INTO TABLE tab (c0) VALUES (nan), (-0.0); +INSERT INTO TABLE tab (c0) VALUES (0.0), (nan); +SELECT c0 FROM tab FINAL; +DROP TABLE tab; diff --git a/parser/testdata/03371_constant_alias_columns/ast.json b/parser/testdata/03371_constant_alias_columns/ast.json new file mode 100644 index 000000000..470982ee4 --- /dev/null +++ b/parser/testdata/03371_constant_alias_columns/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001168601, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03371_constant_alias_columns/metadata.json b/parser/testdata/03371_constant_alias_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03371_constant_alias_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03371_constant_alias_columns/query.sql b/parser/testdata/03371_constant_alias_columns/query.sql new file mode 100644 index 000000000..7c68996e7 --- /dev/null +++ b/parser/testdata/03371_constant_alias_columns/query.sql @@ -0,0 +1,25 @@ +SET parallel_replicas_for_non_replicated_merge_tree = 1; +SET allow_experimental_parallel_reading_from_replicas = 1; +SET cluster_for_parallel_replicas = 'parallel_replicas'; +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table (a UInt64, b UInt64, c UInt64, d UInt64, x Array(String)) +ENGINE MergeTree() PARTITION BY b ORDER BY a; +INSERT INTO test_table SELECT number, number % 2, number, number % 3, ['a', 'b', 'c'] FROM numbers(1); +ALTER TABLE test_table ADD COLUMN y Array(String) ALIAS ['qwqw'] AFTER x; + +SELECT y FROM test_table ORDER BY c; + +SET allow_experimental_parallel_reading_from_replicas = 0; +SELECT '----'; + +SELECT y FROM remote('127.0.0.{1,2}', currentDatabase(), test_table) ORDER BY c settings extremes=1; + +SELECT '----'; +DROP TABLE IF EXISTS test_table; +SET allow_experimental_parallel_reading_from_replicas = 1; + +CREATE TABLE test_table (a UInt64, b UInt64, c UInt64, d UInt64, n Nested(x String)) +ENGINE MergeTree() PARTITION BY b ORDER BY a; +INSERT INTO test_table SELECT number, number % 2, number, number % 3, ['a'] FROM numbers(1); +ALTER TABLE test_table ADD COLUMN n.y Array(String) ALIAS ['qwqw'] AFTER n.x; +SELECT a, b, c, d, n.x, n.y FROM test_table ORDER BY c; diff --git a/parser/testdata/03371_nullable_tuple_string_comparison/ast.json b/parser/testdata/03371_nullable_tuple_string_comparison/ast.json new file mode 100644 index 000000000..a0cdf2595 --- /dev/null +++ b/parser/testdata/03371_nullable_tuple_string_comparison/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001076741, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03371_nullable_tuple_string_comparison/metadata.json b/parser/testdata/03371_nullable_tuple_string_comparison/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03371_nullable_tuple_string_comparison/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03371_nullable_tuple_string_comparison/query.sql b/parser/testdata/03371_nullable_tuple_string_comparison/query.sql new file mode 100644 index 000000000..8c724fefc --- /dev/null +++ b/parser/testdata/03371_nullable_tuple_string_comparison/query.sql @@ -0,0 +1,21 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE TABLE t1 (id UInt64, s1 Nullable(String), s2 Nullable(String)) ORDER BY id; +CREATE TABLE t2 (id UInt64, s1 String, s2 String) ORDER BY id; + +INSERT INTO t1 VALUES (1, 'a', 'b'), (2, 'c', 'd'), (3, 'e', null); +INSERT INTO t2 VALUES (4, 'z', 'y'), (5, 'x', 'w'); + +SELECT id FROM t1 WHERE (s1, s2) = ('a', 'b'); +SELECT id FROM t1 WHERE (s1, s2) = '(\'a\',\'b\')'; +SELECT id FROM t1 WHERE (s1, s2) = CAST((SELECT s1, s2 FROM t1 WHERE s1 = 'a') AS text); +SELECT id FROM t1 WHERE (s1, null) = ('a', null); +SELECT id FROM t1 WHERE (s1, null) = '(\'a\',null)'; +SELECT id FROM t1 WHERE (s1, null) = CAST((SELECT s1, s2 FROM t1 WHERE s1 = 'e' and s2 is null) AS text); + +SELECT id FROM t2 WHERE (s1, null) = ('z', null); +SELECT id FROM t2 WHERE (s1, null) = '(\'z\',null)'; + +DROP TABLE t1; +DROP TABLE t2; diff --git a/parser/testdata/03372_get_subcolumn_null/ast.json b/parser/testdata/03372_get_subcolumn_null/ast.json new file mode 100644 index 000000000..a02149237 --- /dev/null +++ b/parser/testdata/03372_get_subcolumn_null/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001239452, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/03372_get_subcolumn_null/metadata.json b/parser/testdata/03372_get_subcolumn_null/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03372_get_subcolumn_null/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03372_get_subcolumn_null/query.sql b/parser/testdata/03372_get_subcolumn_null/query.sql new file mode 100644 index 000000000..51cf2f6b0 --- /dev/null +++ b/parser/testdata/03372_get_subcolumn_null/query.sql @@ -0,0 +1,6 @@ +drop table if exists test; +create table test (x Nullable(UInt32)) engine=Memory; +insert into test select number % 2 ? null : number from numbers(10); +select getSubcolumn(x, 'null') from test; +drop table test; + diff --git a/parser/testdata/03372_qbit_mergetree_1/ast.json b/parser/testdata/03372_qbit_mergetree_1/ast.json new file mode 100644 index 000000000..8afc06880 --- /dev/null +++ b/parser/testdata/03372_qbit_mergetree_1/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001574434, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03372_qbit_mergetree_1/metadata.json b/parser/testdata/03372_qbit_mergetree_1/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03372_qbit_mergetree_1/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03372_qbit_mergetree_1/query.sql b/parser/testdata/03372_qbit_mergetree_1/query.sql new file mode 100644 index 000000000..d471f0062 --- /dev/null +++ b/parser/testdata/03372_qbit_mergetree_1/query.sql @@ -0,0 +1,46 @@ +SET allow_experimental_qbit_type = 1; + +DROP TABLE IF EXISTS qbits; + +SELECT 'Test QBit with MergeTree engine'; +CREATE TABLE qbits (id UInt32, vec QBit(BFloat16, 16)) ENGINE = MergeTree ORDER BY id; + +INSERT INTO qbits VALUES (1, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, -16]); +INSERT INTO qbits VALUES (2, [17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, -32]); +INSERT INTO qbits VALUES (2, [17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, -32]); + +-- Check everything is correct after parts are merged +SELECT * FROM qbits ORDER BY id; +OPTIMIZE TABLE qbits FINAL; +SELECT * FROM qbits ORDER BY id; + +DROP TABLE qbits; + + +SELECT 'Test QBit with ReplacingMergeTree engine'; +CREATE TABLE qbits (id UInt32, vec QBit(BFloat16, 16)) ENGINE = ReplacingMergeTree ORDER BY id; + +-- Add duplicates to test replacing +INSERT INTO qbits VALUES (1, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, -16]); +INSERT INTO qbits VALUES (2, [17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, -32]); +INSERT INTO qbits VALUES (2, [17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, -32]); + +SELECT * FROM qbits ORDER BY id; +OPTIMIZE TABLE qbits FINAL; +SELECT * FROM qbits ORDER BY id; + +DROP TABLE qbits; + + +SELECT 'Test QBit with CoalescingMergeTree engine'; +CREATE TABLE qbits (id UInt32, vec QBit(BFloat16, 16)) ENGINE = CoalescingMergeTree ORDER BY id; + +INSERT INTO qbits VALUES (1, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, -16]); +INSERT INTO qbits VALUES (2, [17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, -32]); +INSERT INTO qbits VALUES (2, [17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, -32]); + +SELECT * FROM qbits ORDER BY id; +OPTIMIZE TABLE qbits FINAL; +SELECT * FROM qbits ORDER BY id; + +DROP TABLE qbits; diff --git a/parser/testdata/03372_qbit_mergetree_2/ast.json b/parser/testdata/03372_qbit_mergetree_2/ast.json new file mode 100644 index 000000000..6979690ef --- /dev/null +++ b/parser/testdata/03372_qbit_mergetree_2/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001507252, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03372_qbit_mergetree_2/metadata.json b/parser/testdata/03372_qbit_mergetree_2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03372_qbit_mergetree_2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03372_qbit_mergetree_2/query.sql b/parser/testdata/03372_qbit_mergetree_2/query.sql new file mode 100644 index 000000000..ec0b1322c --- /dev/null +++ b/parser/testdata/03372_qbit_mergetree_2/query.sql @@ -0,0 +1,59 @@ +SET allow_experimental_qbit_type = 1; + +DROP TABLE IF EXISTS qbits; + +SELECT 'Test QBit with SummingMergeTree engine'; +CREATE TABLE qbits (id UInt32, vec QBit(BFloat16, 16)) ENGINE = SummingMergeTree ORDER BY id; + +-- The elements of qbits will not be summed and this is expected behavior +INSERT INTO qbits VALUES (1, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, -16]); +INSERT INTO qbits VALUES (2, [17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, -32]); +INSERT INTO qbits VALUES (2, [17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, -32]); + +SELECT * FROM qbits ORDER BY id; +OPTIMIZE TABLE qbits FINAL; +SELECT * FROM qbits ORDER BY id; + +DROP TABLE qbits; + + +SELECT 'Test QBit with AggregatingMergeTree engine'; +CREATE TABLE qbits (id UInt32, vec QBit(BFloat16, 16)) ENGINE = AggregatingMergeTree ORDER BY id; + +INSERT INTO qbits VALUES (1, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, -16]); +INSERT INTO qbits VALUES (2, [17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, -32]); +INSERT INTO qbits VALUES (2, [17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, -32]); + +SELECT * FROM qbits ORDER BY id; +OPTIMIZE TABLE qbits FINAL; +SELECT * FROM qbits ORDER BY id; + +DROP TABLE qbits; + + +SELECT 'Test QBit with CollapsingMergeTree engine'; +CREATE TABLE qbits (id UInt8, sign Int8, vec QBit(BFloat16, 16), order UInt8) ENGINE = CollapsingMergeTree(sign) ORDER BY id; + +INSERT INTO qbits VALUES (1, 1, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, -16], 1); +INSERT INTO qbits VALUES (1, -1, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, -16], 2); +INSERT INTO qbits VALUES (1, 1, [17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, -32], 3); + +SELECT * FROM qbits ORDER BY order; +OPTIMIZE TABLE qbits FINAL; +SELECT * FROM qbits ORDER BY order; + +DROP TABLE qbits; + + +SELECT 'Test QBit with VersionedCollapsingMergeTree engine'; +CREATE TABLE qbits (id Int8, sign Int8, ver Int8, vec QBit(BFloat16, 16)) ENGINE = VersionedCollapsingMergeTree(sign, ver) ORDER BY ver; + +INSERT INTO qbits VALUES (0, 1, 0, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, -16]); +INSERT INTO qbits VALUES (1, 1, 1, [17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, -32]); +INSERT INTO qbits VALUES (2, -1, 1, [17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, -32]); + +SELECT * FROM qbits ORDER BY id; +OPTIMIZE TABLE qbits FINAL; +SELECT * FROM qbits ORDER BY id; + +DROP TABLE qbits; diff --git a/parser/testdata/03373_qbit_dynamic/ast.json b/parser/testdata/03373_qbit_dynamic/ast.json new file mode 100644 index 000000000..e59dd1ad4 --- /dev/null +++ b/parser/testdata/03373_qbit_dynamic/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00164504, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03373_qbit_dynamic/metadata.json b/parser/testdata/03373_qbit_dynamic/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03373_qbit_dynamic/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03373_qbit_dynamic/query.sql b/parser/testdata/03373_qbit_dynamic/query.sql new file mode 100644 index 000000000..406382da1 --- /dev/null +++ b/parser/testdata/03373_qbit_dynamic/query.sql @@ -0,0 +1,24 @@ +SET allow_experimental_qbit_type = 1; +SET allow_experimental_dynamic_type = 1; + +DROP TABLE IF EXISTS qbit_dynamic_test; + + +SELECT 'Test QBit within Dynamic columns'; +CREATE TABLE qbit_dynamic_test (id UInt32, data Dynamic) ENGINE = Memory; +INSERT INTO qbit_dynamic_test VALUES + (1, [1.0, 2.0, 3.0, 4.0]::QBit(Float32, 4)), + (2, [5.0, 6.0, 7.0, 8.0]::QBit(Float64, 4)), + (3, [1.5, 2.5, 3.5, 4.5]::QBit(BFloat16, 4)); +SELECT id, data, dynamicType(data) as type FROM qbit_dynamic_test ORDER BY id; + + +SELECT 'Mixed Dynamic column with QBit and other types'; +INSERT INTO qbit_dynamic_test VALUES + (4, 'string_value'), + (5, 42), + (6, [9.0, 10.0, 11.0, 12.0]::QBit(Float32, 4)); +SELECT id, data, dynamicType(data) as type FROM qbit_dynamic_test ORDER BY id; + + +DROP TABLE qbit_dynamic_test; diff --git a/parser/testdata/03374_date_trunc_with_negatives/ast.json b/parser/testdata/03374_date_trunc_with_negatives/ast.json new file mode 100644 index 000000000..1dbca9551 --- /dev/null +++ b/parser/testdata/03374_date_trunc_with_negatives/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001303417, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03374_date_trunc_with_negatives/metadata.json b/parser/testdata/03374_date_trunc_with_negatives/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03374_date_trunc_with_negatives/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03374_date_trunc_with_negatives/query.sql b/parser/testdata/03374_date_trunc_with_negatives/query.sql new file mode 100644 index 000000000..5733dc91d --- /dev/null +++ b/parser/testdata/03374_date_trunc_with_negatives/query.sql @@ -0,0 +1,9 @@ +SET session_timezone = 'UTC'; + +SELECT dateTrunc('Second', toDateTime64('1955-03-01 12:55:55', 2)); +SELECT dateTrunc('Minute', toDateTime64('1955-03-01 12:55:55', 2)); +SELECT dateTrunc('Hour', toDateTime64('1960-03-01 12:55:55', 2)); +SELECT dateTrunc('Day', toDateTime64('1950-03-01 12:55:55', 2)); +SELECT dateTrunc('Week', toDateTime64('1960-03-01 12:55:55', 2)); +SELECT dateTrunc('Month', toDateTime64('1950-03-01 12:55:55', 2)); +SELECT dateTrunc('Year', toDateTime64('1955-03-01 12:55:55', 2)); diff --git a/parser/testdata/03374_indexes_with_literals/ast.json b/parser/testdata/03374_indexes_with_literals/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03374_indexes_with_literals/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03374_indexes_with_literals/metadata.json b/parser/testdata/03374_indexes_with_literals/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03374_indexes_with_literals/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03374_indexes_with_literals/query.sql b/parser/testdata/03374_indexes_with_literals/query.sql new file mode 100644 index 000000000..cc6ccce52 --- /dev/null +++ b/parser/testdata/03374_indexes_with_literals/query.sql @@ -0,0 +1,65 @@ +-- Tags: no-random-settings, no-parallel-replicas + +set enable_analyzer=1; + +DROP TABLE IF EXISTS test; +CREATE TABLE test +( + `x` Int64, + INDEX idx1 CAST(x, 'String') TYPE bloom_filter GRANULARITY 1, +) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS index_granularity=1; +INSERT INTO test SELECT number FROM numbers(1000); +EXPLAIN indexes = 1 SELECT * FROM test WHERE CAST(x, 'String') = '100'; +DROP TABLE test; + +CREATE TABLE test +( + `x` Int64, + INDEX idx1 CAST(x, 'String') TYPE set(0) GRANULARITY 1, +) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS index_granularity=1; +INSERT INTO test SELECT number FROM numbers(1000); +EXPLAIN indexes = 1 SELECT * FROM test WHERE CAST(x, 'String') = '100'; +DROP TABLE test; + +CREATE TABLE test +( + `x` Int64, + INDEX idx1 CAST(x, 'String') TYPE tokenbf_v1(16000, 2, 0) GRANULARITY 1, +) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS index_granularity=1; +INSERT INTO test SELECT number FROM numbers(1000); +EXPLAIN indexes = 1 SELECT * FROM test WHERE CAST(x, 'String') = '100'; +DROP TABLE test; + +CREATE TABLE test +( + `x` Int64, + INDEX idx1 CAST(x, 'String') TYPE ngrambf_v1(4, 16000, 2, 0) GRANULARITY 1, +) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS index_granularity=1; +INSERT INTO test SELECT number FROM numbers(1000); +EXPLAIN indexes = 1 SELECT * FROM test WHERE CAST(x, 'String') = '100'; +DROP TABLE test; + +CREATE TABLE test +( + `x` Int64, + INDEX idx1 CAST(x, 'String') TYPE minmax GRANULARITY 1, +) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS index_granularity=1; +INSERT INTO test SELECT number FROM numbers(1000); +EXPLAIN indexes = 1 SELECT * FROM test WHERE CAST(x, 'String') = '100'; +DROP TABLE test; + diff --git a/parser/testdata/03374_indexes_with_trivial_cast/ast.json b/parser/testdata/03374_indexes_with_trivial_cast/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03374_indexes_with_trivial_cast/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03374_indexes_with_trivial_cast/metadata.json b/parser/testdata/03374_indexes_with_trivial_cast/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03374_indexes_with_trivial_cast/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03374_indexes_with_trivial_cast/query.sql b/parser/testdata/03374_indexes_with_trivial_cast/query.sql new file mode 100644 index 000000000..10a65f4f1 --- /dev/null +++ b/parser/testdata/03374_indexes_with_trivial_cast/query.sql @@ -0,0 +1,64 @@ +-- Tags: no-random-settings, no-parallel-replicas +set enable_analyzer=1; + +DROP TABLE IF EXISTS test; +CREATE TABLE test +( + `x` String, + INDEX idx1 x TYPE bloom_filter GRANULARITY 1, +) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS index_granularity=1; +INSERT INTO test SELECT number FROM numbers(1000); +EXPLAIN indexes = 1 SELECT * FROM test WHERE CAST(x, 'String') = '100'; +DROP TABLE test; + +CREATE TABLE test +( + `x` String, + INDEX idx1 x TYPE set(0) GRANULARITY 1, +) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS index_granularity=1; +INSERT INTO test SELECT number FROM numbers(1000); +EXPLAIN indexes = 1 SELECT * FROM test WHERE CAST(x, 'String') = '100'; +DROP TABLE test; + +CREATE TABLE test +( + `x` String, + INDEX idx1 x TYPE tokenbf_v1(16000, 2, 0) GRANULARITY 1, +) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS index_granularity=1; +INSERT INTO test SELECT number FROM numbers(1000); +EXPLAIN indexes = 1 SELECT * FROM test WHERE CAST(x, 'String') = '100'; +DROP TABLE test; + +CREATE TABLE test +( + `x` String, + INDEX idx1 x TYPE ngrambf_v1(4, 16000, 2, 0) GRANULARITY 1, +) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS index_granularity=1; +INSERT INTO test SELECT number FROM numbers(1000); +EXPLAIN indexes = 1 SELECT * FROM test WHERE CAST(x, 'String') = '100'; +DROP TABLE test; + +CREATE TABLE test +( + `x` String, + INDEX idx1 x TYPE minmax GRANULARITY 1, +) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS index_granularity=1; +INSERT INTO test SELECT number FROM numbers(1000); +EXPLAIN indexes = 1 SELECT * FROM test WHERE CAST(x, 'String') = '100'; +DROP TABLE test; + diff --git a/parser/testdata/03374_qbit_nullable/ast.json b/parser/testdata/03374_qbit_nullable/ast.json new file mode 100644 index 000000000..8c8916cef --- /dev/null +++ b/parser/testdata/03374_qbit_nullable/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001446224, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03374_qbit_nullable/metadata.json b/parser/testdata/03374_qbit_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03374_qbit_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03374_qbit_nullable/query.sql b/parser/testdata/03374_qbit_nullable/query.sql new file mode 100644 index 000000000..52872662c --- /dev/null +++ b/parser/testdata/03374_qbit_nullable/query.sql @@ -0,0 +1,14 @@ +SET allow_experimental_qbit_type = 1; + +DROP TABLE IF EXISTS qbit_nullable_test; + +SELECT 'Test QBit within Nullable columns'; +CREATE TABLE qbit_nullable_test (id UInt32, data Nullable(QBit(Float64, 4))) ENGINE = Memory; +INSERT INTO qbit_nullable_test VALUES (1, NULL), + (2, [5.0, 6.0, 7.0, 8.0]::QBit(Float64, 4)), + (3, [1.5, 2.5, 3.5, 4.5]::QBit(Float64, 4)), + (4, NULL); + +SELECT id, data FROM qbit_nullable_test ORDER BY id; + +DROP TABLE qbit_nullable_test; diff --git a/parser/testdata/03375_bloom_filter_array_equals/ast.json b/parser/testdata/03375_bloom_filter_array_equals/ast.json new file mode 100644 index 000000000..33d59c602 --- /dev/null +++ b/parser/testdata/03375_bloom_filter_array_equals/ast.json @@ -0,0 +1,76 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery test (children 3)" + }, + { + "explain": " Identifier test" + }, + { + "explain": " Columns definition (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration x (children 1)" + }, + { + "explain": " DataType Array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Index (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function bloom_filter (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Float64_0.025" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 18, + + "statistics": + { + "elapsed": 0.001278676, + "rows_read": 18, + "bytes_read": 630 + } +} diff --git a/parser/testdata/03375_bloom_filter_array_equals/metadata.json b/parser/testdata/03375_bloom_filter_array_equals/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03375_bloom_filter_array_equals/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03375_bloom_filter_array_equals/query.sql b/parser/testdata/03375_bloom_filter_array_equals/query.sql new file mode 100644 index 000000000..80222cdb4 --- /dev/null +++ b/parser/testdata/03375_bloom_filter_array_equals/query.sql @@ -0,0 +1,4 @@ +create table test (x Array(String), index idx1 x type bloom_filter(0.025)) engine=MergeTree order by tuple(); +insert into test values (['s1']); +select * from test where x = ['s1']; + diff --git a/parser/testdata/03375_bloom_filter_has_hasAny_const_array/ast.json b/parser/testdata/03375_bloom_filter_has_hasAny_const_array/ast.json new file mode 100644 index 000000000..4c929d5fe --- /dev/null +++ b/parser/testdata/03375_bloom_filter_has_hasAny_const_array/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001369146, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03375_bloom_filter_has_hasAny_const_array/metadata.json b/parser/testdata/03375_bloom_filter_has_hasAny_const_array/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03375_bloom_filter_has_hasAny_const_array/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03375_bloom_filter_has_hasAny_const_array/query.sql b/parser/testdata/03375_bloom_filter_has_hasAny_const_array/query.sql new file mode 100644 index 000000000..016482eda --- /dev/null +++ b/parser/testdata/03375_bloom_filter_has_hasAny_const_array/query.sql @@ -0,0 +1,40 @@ +SET parallel_replicas_local_plan=1; + +DROP TABLE IF EXISTS bloom_filter_has_const_array; + +CREATE TABLE bloom_filter_has_const_array +( + `bf` String, + `abf` Array(String), + INDEX idx_bf bf TYPE bloom_filter(0.01) GRANULARITY 1, + INDEX idx_abf abf TYPE bloom_filter(0.01) GRANULARITY 1 +) +ENGINE = MergeTree +ORDER BY () +SETTINGS index_granularity=1; + +INSERT INTO bloom_filter_has_const_array +VALUES ('a', ['a','a']), ('b', ['b','b']), ('c', ['c','c']), ('d',['d','e']); + +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes = 1 + SELECT bf + FROM bloom_filter_has_const_array + WHERE hasAny(['a','c','d'], abf) +) +WHERE explain LIKE 'Description%' or explain LIKE 'Granules%'; + +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes = 1 + SELECT bf + FROM bloom_filter_has_const_array + WHERE has(['a','d'], bf) +) +WHERE explain LIKE 'Description%' or explain LIKE 'Granules%'; + +SELECT bf +FROM bloom_filter_has_const_array +WHERE hasAny(['a','c','d'], abf) and has(['a','d'], bf) and hasAll(['d','e'], abf); + +DROP TABLE IF EXISTS bloom_filter_has_const_array; + diff --git a/parser/testdata/03375_bloom_filter_ngram_has_hasAny_const_array/ast.json b/parser/testdata/03375_bloom_filter_ngram_has_hasAny_const_array/ast.json new file mode 100644 index 000000000..40bdee83e --- /dev/null +++ b/parser/testdata/03375_bloom_filter_ngram_has_hasAny_const_array/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001114992, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03375_bloom_filter_ngram_has_hasAny_const_array/metadata.json b/parser/testdata/03375_bloom_filter_ngram_has_hasAny_const_array/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03375_bloom_filter_ngram_has_hasAny_const_array/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03375_bloom_filter_ngram_has_hasAny_const_array/query.sql b/parser/testdata/03375_bloom_filter_ngram_has_hasAny_const_array/query.sql new file mode 100644 index 000000000..af1eb6667 --- /dev/null +++ b/parser/testdata/03375_bloom_filter_ngram_has_hasAny_const_array/query.sql @@ -0,0 +1,40 @@ +SET parallel_replicas_local_plan=1; + +DROP TABLE IF EXISTS bloom_filter_has_const_array; + +CREATE TABLE bloom_filter_has_const_array +( + `bf` String, + `abf` Array(String), + INDEX idx_bf bf TYPE ngrambf_v1(1, 512, 3, 0) GRANULARITY 1, + INDEX idx_abf abf TYPE ngrambf_v1(1, 512, 3, 0) GRANULARITY 1, +) +ENGINE = MergeTree +ORDER BY () +SETTINGS index_granularity=1; + +INSERT INTO bloom_filter_has_const_array +VALUES ('a', ['a','a']), ('b', ['b','b']), ('c', ['c','c']), ('d',['d','e']); + +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes = 1 + SELECT bf + FROM bloom_filter_has_const_array + WHERE hasAny(['a','c','d'], abf) +) +WHERE explain LIKE 'Description%' or explain LIKE 'Granules%'; + +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes = 1 + SELECT bf + FROM bloom_filter_has_const_array + WHERE has(['a','d'], bf) +) +WHERE explain LIKE 'Description%' or explain LIKE 'Granules%'; + +SELECT bf +FROM bloom_filter_has_const_array +WHERE hasAny(['a','c','d'], abf) and has(['a','d'], bf) and hasAll(['d','e'], abf); + +DROP TABLE IF EXISTS bloom_filter_has_const_array; + diff --git a/parser/testdata/03375_bloom_filter_token_has_hasAny_const_array/ast.json b/parser/testdata/03375_bloom_filter_token_has_hasAny_const_array/ast.json new file mode 100644 index 000000000..a2723cc13 --- /dev/null +++ b/parser/testdata/03375_bloom_filter_token_has_hasAny_const_array/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00142858, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03375_bloom_filter_token_has_hasAny_const_array/metadata.json b/parser/testdata/03375_bloom_filter_token_has_hasAny_const_array/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03375_bloom_filter_token_has_hasAny_const_array/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03375_bloom_filter_token_has_hasAny_const_array/query.sql b/parser/testdata/03375_bloom_filter_token_has_hasAny_const_array/query.sql new file mode 100644 index 000000000..70228d5a7 --- /dev/null +++ b/parser/testdata/03375_bloom_filter_token_has_hasAny_const_array/query.sql @@ -0,0 +1,40 @@ +SET parallel_replicas_local_plan=1; + +DROP TABLE IF EXISTS bloom_filter_has_const_array; + +CREATE TABLE bloom_filter_has_const_array +( + `bf` String, + `abf` Array(String), + INDEX idx_bf bf TYPE tokenbf_v1(512,3,0) GRANULARITY 1, + INDEX idx_abf abf TYPE tokenbf_v1(512,3,0) GRANULARITY 1, +) +ENGINE = MergeTree +ORDER BY () +SETTINGS index_granularity=1; + +INSERT INTO bloom_filter_has_const_array +VALUES ('a', ['a','a']), ('b', ['b','b']), ('c', ['c','c']), ('d',['d','e']); + +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes = 1 + SELECT bf + FROM bloom_filter_has_const_array + WHERE hasAny(['a','c','d'], abf) +) +WHERE explain LIKE 'Description%' or explain LIKE 'Granules%'; + +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes = 1 + SELECT bf + FROM bloom_filter_has_const_array + WHERE has(['a','d'], bf) +) +WHERE explain LIKE 'Description%' or explain LIKE 'Granules%'; + +SELECT bf +FROM bloom_filter_has_const_array +WHERE hasAny(['a','c','d'], abf) and has(['a','d'], bf) and hasAll(['d','e'], abf); + +DROP TABLE IF EXISTS bloom_filter_has_const_array; + diff --git a/parser/testdata/03375_bool_partition/ast.json b/parser/testdata/03375_bool_partition/ast.json new file mode 100644 index 000000000..a0a51b213 --- /dev/null +++ b/parser/testdata/03375_bool_partition/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t0 (children 1)" + }, + { + "explain": " Identifier t0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001299622, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03375_bool_partition/metadata.json b/parser/testdata/03375_bool_partition/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03375_bool_partition/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03375_bool_partition/query.sql b/parser/testdata/03375_bool_partition/query.sql new file mode 100644 index 000000000..e0dfd1a18 --- /dev/null +++ b/parser/testdata/03375_bool_partition/query.sql @@ -0,0 +1,8 @@ +DROP TABLE IF EXISTS t0; +CREATE TABLE t0 (c0 Bool) ENGINE = MergeTree() ORDER BY tuple() PARTITION BY (c0); +INSERT INTO TABLE t0 (c0) VALUES (FALSE), (TRUE); +SELECT part_name FROM system.parts where table='t0' and database=currentDatabase(); +DELETE FROM t0 WHERE c0; +SELECT c0 FROM t0; +DROP TABLE t0; + diff --git a/parser/testdata/03375_l2_distance_transposed_partial_reads_pass/ast.json b/parser/testdata/03375_l2_distance_transposed_partial_reads_pass/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03375_l2_distance_transposed_partial_reads_pass/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03375_l2_distance_transposed_partial_reads_pass/metadata.json b/parser/testdata/03375_l2_distance_transposed_partial_reads_pass/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03375_l2_distance_transposed_partial_reads_pass/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03375_l2_distance_transposed_partial_reads_pass/query.sql b/parser/testdata/03375_l2_distance_transposed_partial_reads_pass/query.sql new file mode 100644 index 000000000..ad6ad41d1 --- /dev/null +++ b/parser/testdata/03375_l2_distance_transposed_partial_reads_pass/query.sql @@ -0,0 +1,10 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/88362 + +SET allow_experimental_qbit_type = 1; + +CREATE TABLE qbit (id UInt32, vec QBit(BFloat16, 1)) ENGINE = Memory; +INSERT INTO qbit VALUES (1, [toBFloat16(1)]); + +WITH [toBFloat16(2)] AS reference_vec SELECT id, round(L2DistanceTransposed(vec, reference_vec, toNullable(1)), 5) AS dist FROM qbit; +WITH [toBFloat16(2)] AS reference_vec SELECT id, round(L2DistanceTransposed(vec, reference_vec, toLowCardinality(toNullable(1))), 5) AS dist FROM qbit; +DROP TABLE qbit; diff --git a/parser/testdata/03376_forbid_nan_inf_for_float_settings/ast.json b/parser/testdata/03376_forbid_nan_inf_for_float_settings/ast.json new file mode 100644 index 000000000..941b2adbf --- /dev/null +++ b/parser/testdata/03376_forbid_nan_inf_for_float_settings/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001067155, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03376_forbid_nan_inf_for_float_settings/metadata.json b/parser/testdata/03376_forbid_nan_inf_for_float_settings/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03376_forbid_nan_inf_for_float_settings/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03376_forbid_nan_inf_for_float_settings/query.sql b/parser/testdata/03376_forbid_nan_inf_for_float_settings/query.sql new file mode 100644 index 000000000..4b0a9b305 --- /dev/null +++ b/parser/testdata/03376_forbid_nan_inf_for_float_settings/query.sql @@ -0,0 +1,5 @@ +SET log_queries_probability = inf; -- { serverError CANNOT_PARSE_NUMBER } +SET log_queries_probability = nan; -- { serverError CANNOT_PARSE_NUMBER } +SET log_queries_probability = -inf; -- { serverError CANNOT_PARSE_NUMBER } + +SELECT 1 SETTINGS log_queries_probability = -inf; -- { clientError CANNOT_PARSE_NUMBER } diff --git a/parser/testdata/03376_iceberg_truncate/ast.json b/parser/testdata/03376_iceberg_truncate/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03376_iceberg_truncate/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03376_iceberg_truncate/metadata.json b/parser/testdata/03376_iceberg_truncate/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03376_iceberg_truncate/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03376_iceberg_truncate/query.sql b/parser/testdata/03376_iceberg_truncate/query.sql new file mode 100644 index 000000000..894ddb5ff --- /dev/null +++ b/parser/testdata/03376_iceberg_truncate/query.sql @@ -0,0 +1,56 @@ +-- Test taken from Iceberg repo https://github.com/apache/iceberg/blob/6e8718113c08aebf76d8e79a9e2534c89c73407a/api/src/test/java/org/apache/iceberg/transforms/TestTruncate.java + +SELECT icebergTruncate(10, 0); +SELECT icebergTruncate(10, 1); +SELECT icebergTruncate(10, 5); +SELECT icebergTruncate(10, 9); +SELECT icebergTruncate(10, 10); +SELECT icebergTruncate(10, 11); +SELECT icebergTruncate(10, -1); +SELECT icebergTruncate(10, -5); +SELECT icebergTruncate(10, -10); +SELECT icebergTruncate(10, -11); + +SELECT icebergTruncate(10, 0::Int64); +SELECT icebergTruncate(10, 1::Int64); +SELECT icebergTruncate(10, 5::Int64); +SELECT icebergTruncate(10, 9::Int64); +SELECT icebergTruncate(10, 10::Int64); +SELECT icebergTruncate(10, 11::Int64); +SELECT icebergTruncate(10, -1::Int64); +SELECT icebergTruncate(10, -5::Int64); +SELECT icebergTruncate(10, -10::Int64); +SELECT icebergTruncate(10, -11::Int64); + +SELECT icebergTruncate(10, toDecimal64('12.34', 2)); +SELECT icebergTruncate(10, toDecimal64('12.30', 2)); +SELECT icebergTruncate(10, toDecimal64('12.29', 2)); +SELECT icebergTruncate(10, toDecimal64('0.05', 2)); +SELECT icebergTruncate(10, toDecimal64('-0.05', 2)); + +SELECT icebergTruncate(10, toDecimal32('12.34', 2)); +SELECT icebergTruncate(10, toDecimal32('12.30', 2)); +SELECT icebergTruncate(10, toDecimal32('12.29', 2)); +SELECT icebergTruncate(10, toDecimal32('0.05', 2)); +SELECT icebergTruncate(10, toDecimal32('-0.05', 2)); + +SELECT icebergTruncate(5, 'abcdefg'); +SELECT icebergTruncate(5, 'abc'); +SELECT icebergTruncate(5, 'abcde'); + +SELECT icebergTruncate(5, toFixedString('abcdefg', 30)); +SELECT icebergTruncate(5, toFixedString('abc', 3)); +SELECT icebergTruncate(5, toFixedString('abcde', 5)); + +SELECT icebergTruncate(0, 55); --{serverError BAD_ARGUMENTS} +SELECT icebergTruncate(-1, 55); --{serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT icebergTruncate(3, 0.0); --{serverError ILLEGAL_TYPE_OF_ARGUMENT} + +-- Test taken from examples: https://iceberg.apache.org/spec/#truncate-transform-details + +SELECT icebergTruncate(10, 1); +SELECT icebergTruncate(10, -1); +SELECT icebergTruncate(10, 1::Int64); +SELECT icebergTruncate(10, -1::Int64); +SELECT icebergTruncate(50, toDecimal64('10.65', 2)); +SELECT icebergTruncate(3, 'iceberg'); diff --git a/parser/testdata/03376_json_comparison/ast.json b/parser/testdata/03376_json_comparison/ast.json new file mode 100644 index 000000000..82015a94b --- /dev/null +++ b/parser/testdata/03376_json_comparison/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001335254, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03376_json_comparison/metadata.json b/parser/testdata/03376_json_comparison/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03376_json_comparison/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03376_json_comparison/query.sql b/parser/testdata/03376_json_comparison/query.sql new file mode 100644 index 000000000..365224fcc --- /dev/null +++ b/parser/testdata/03376_json_comparison/query.sql @@ -0,0 +1,103 @@ +set enable_json_type=1; + +select '{}'::JSON as j1, '{}'::JSON as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1}'::JSON as j1, '{}'::JSON as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1}'::JSON as j1, '{"a" : 1}'::JSON as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1}'::JSON as j1, '{"a" : 2}'::JSON as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1}'::JSON as j1, '{"a" : 0}'::JSON as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1}'::JSON as j1, '{"b" : 1}'::JSON as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1}'::JSON as j1, '{"a" : 1}'::JSON as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1}'::JSON as j1, '{"a" : 1, "b" : 1}'::JSON as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1}'::JSON as j1, '{"a" : 1, "b" : 2}'::JSON as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1}'::JSON as j1, '{"a" : 1, "b" : 0}'::JSON as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1}'::JSON as j1, '{"a" : 0, "b" : 1}'::JSON as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1}'::JSON as j1, '{"a" : 2, "b" : 1}'::JSON as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1}'::JSON as j1, '{"b" : 1, "c" : 1}'::JSON as j2, j1 < j2, j1 = j2, j1 > j2; + +select '{}'::JSON(a UInt32) as j1, '{}'::JSON(a UInt32) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1}'::JSON(a UInt32) as j1, '{}'::JSON(a UInt32) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1}'::JSON(a UInt32) as j1, '{"a" : 1}'::JSON(a UInt32) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1}'::JSON(a UInt32) as j1, '{"a" : 0}'::JSON(a UInt32) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1}'::JSON(a UInt32) as j1, '{"a" : 2}'::JSON(a UInt32) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1}'::JSON(a UInt32, b UInt32) as j1, '{"a" : 1}'::JSON(a UInt32, b UInt32) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1}'::JSON(a UInt32, b UInt32) as j1, '{"a" : 1, "b" : 1}'::JSON(a UInt32, b UInt32) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1}'::JSON(a UInt32, b UInt32) as j1, '{"a" : 1, "b" : 0}'::JSON(a UInt32, b UInt32) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1}'::JSON(a UInt32, b UInt32) as j1, '{"a" : 1, "b" : 2}'::JSON(a UInt32, b UInt32) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1}'::JSON(a UInt32, b UInt32) as j1, '{"a" : 0, "b" : 1}'::JSON(a UInt32, b UInt32) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1}'::JSON(a UInt32, b UInt32) as j1, '{"a" : 2, "b" : 1}'::JSON(a UInt32, b UInt32) as j2, j1 < j2, j1 = j2, j1 > j2; + +select '{}'::JSON(max_dynamic_paths=2) as j1, '{}'::JSON(max_dynamic_paths=2) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1, "c" : 1, "d" : 1}'::JSON(max_dynamic_paths=2) as j1, '{"a" : 1, "b" : 1, "c" : 1, "d" : 1}'::JSON(max_dynamic_paths=2) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1, "c" : 1, "d" : 1}'::JSON(max_dynamic_paths=2) as j1, '{"a" : 0, "b" : 1, "c" : 1, "d" : 1}'::JSON(max_dynamic_paths=2) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1, "c" : 1, "d" : 1}'::JSON(max_dynamic_paths=2) as j1, '{"a" : 2, "b" : 1, "c" : 1, "d" : 1}'::JSON(max_dynamic_paths=2) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1, "c" : 1, "d" : 1}'::JSON(max_dynamic_paths=2) as j1, '{"a" : 1, "b" : 0, "c" : 1, "d" : 1}'::JSON(max_dynamic_paths=2) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1, "c" : 1, "d" : 1}'::JSON(max_dynamic_paths=2) as j1, '{"a" : 1, "b" : 2, "c" : 1, "d" : 1}'::JSON(max_dynamic_paths=2) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1, "c" : 1, "d" : 1}'::JSON(max_dynamic_paths=2) as j1, '{"a" : 1, "b" : 1, "c" : 0, "d" : 1}'::JSON(max_dynamic_paths=2) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1, "c" : 1, "d" : 1}'::JSON(max_dynamic_paths=2) as j1, '{"a" : 1, "b" : 1, "c" : 2, "d" : 1}'::JSON(max_dynamic_paths=2) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1, "c" : 1, "d" : 1}'::JSON(max_dynamic_paths=2) as j1, '{"a" : 1, "b" : 1, "c" : 1, "d" : 0}'::JSON(max_dynamic_paths=2) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1, "c" : 1, "d" : 1}'::JSON(max_dynamic_paths=2) as j1, '{}'::JSON(max_dynamic_paths=2) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1, "c" : 1, "d" : 1}'::JSON(max_dynamic_paths=2) as j1, '{"a" : 1}'::JSON(max_dynamic_paths=2) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1, "c" : 1, "d" : 1}'::JSON(max_dynamic_paths=2) as j1, '{"a" : 0}'::JSON(max_dynamic_paths=2) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1, "c" : 1, "d" : 1}'::JSON(max_dynamic_paths=2) as j1, '{"a" : 2}'::JSON(max_dynamic_paths=2) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1, "c" : 1, "d" : 1}'::JSON(max_dynamic_paths=2) as j1, '{"b" : 1}'::JSON(max_dynamic_paths=2) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1, "c" : 1, "d" : 1}'::JSON(max_dynamic_paths=2) as j1, '{"b" : 0}'::JSON(max_dynamic_paths=2) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1, "c" : 1, "d" : 1}'::JSON(max_dynamic_paths=2) as j1, '{"b" : 2}'::JSON(max_dynamic_paths=2) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1, "c" : 1, "d" : 1}'::JSON(max_dynamic_paths=2) as j1, '{"a" : 1, "b" : 1}'::JSON(max_dynamic_paths=2) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1, "c" : 1, "d" : 1}'::JSON(max_dynamic_paths=2) as j1, '{"a" : 1, "b" : 0}'::JSON(max_dynamic_paths=2) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1, "c" : 1, "d" : 1}'::JSON(max_dynamic_paths=2) as j1, '{"a" : 1, "b" : 2}'::JSON(max_dynamic_paths=2) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1, "c" : 1, "d" : 1}'::JSON(max_dynamic_paths=2) as j1, '{"a" : 1, "b" : 1, "c" : 1}'::JSON(max_dynamic_paths=2) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1, "c" : 1, "d" : 1}'::JSON(max_dynamic_paths=2) as j1, '{"a" : 1, "b" : 1, "c" : 0}'::JSON(max_dynamic_paths=2) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1, "c" : 1, "d" : 1}'::JSON(max_dynamic_paths=2) as j1, '{"a" : 1, "b" : 1, "c" : 2}'::JSON(max_dynamic_paths=2) as j2, j1 < j2, j1 = j2, j1 > j2; + +select '{}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j1, '{}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1, "c" : 1, "d" : 1, "e" : 1, "f" : 1}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j1, '{"a" : 1, "b" : 1, "c" : 1, "d" : 1, "e" : 1, "f" : 1}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1, "c" : 1, "d" : 1, "e" : 1, "f" : 1}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j1, '{"a" : 0, "b" : 1, "c" : 1, "d" : 1, "e" : 1, "f" : 1}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1, "c" : 1, "d" : 1, "e" : 1, "f" : 1}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j1, '{"a" : 2, "b" : 1, "c" : 1, "d" : 1, "e" : 1, "f" : 1}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1, "c" : 1, "d" : 1, "e" : 1, "f" : 1}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j1, '{"a" : 1, "b" : 0, "c" : 1, "d" : 1, "e" : 1, "f" : 1}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1, "c" : 1, "d" : 1, "e" : 1, "f" : 1}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j1, '{"a" : 1, "b" : 2, "c" : 1, "d" : 1, "e" : 1, "f" : 1}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1, "c" : 1, "d" : 1, "e" : 1, "f" : 1}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j1, '{"a" : 1, "b" : 1, "c" : 0, "d" : 1, "e" : 1, "f" : 1}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1, "c" : 1, "d" : 1, "e" : 1, "f" : 1}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j1, '{"a" : 1, "b" : 1, "c" : 2, "d" : 1, "e" : 1, "f" : 1}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1, "c" : 1, "d" : 1, "e" : 1, "f" : 1}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j1, '{"a" : 1, "b" : 1, "c" : 1, "d" : 0, "e" : 1, "f" : 1}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1, "c" : 1, "d" : 1, "e" : 1, "f" : 1}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j1, '{"a" : 1, "b" : 1, "c" : 1, "d" : 2, "e" : 1, "f" : 1}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1, "c" : 1, "d" : 1, "e" : 1, "f" : 1}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j1, '{"a" : 1, "b" : 1, "c" : 1, "d" : 1, "e" : 0, "f" : 1}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1, "c" : 1, "d" : 1, "e" : 1, "f" : 1}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j1, '{"a" : 1, "b" : 1, "c" : 1, "d" : 1, "e" : 2, "f" : 1}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1, "c" : 1, "d" : 1, "e" : 1, "f" : 1}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j1, '{"a" : 1, "b" : 1, "c" : 1, "d" : 1, "e" : 1, "f" : 0}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1, "c" : 1, "d" : 1, "e" : 1, "f" : 1}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j1, '{"a" : 1, "b" : 1, "c" : 1, "d" : 1, "e" : 1, "f" : 2}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1, "c" : 1, "d" : 1, "e" : 1, "f" : 1}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j1, '{}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1, "c" : 1, "d" : 1, "e" : 1, "f" : 1}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j1, '{"a" : 1}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1, "c" : 1, "d" : 1, "e" : 1, "f" : 1}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j1, '{"a" : 0}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1, "c" : 1, "d" : 1, "e" : 1, "f" : 1}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j1, '{"a" : 2}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1, "c" : 1, "d" : 1, "e" : 1, "f" : 1}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j1, '{"a" : 1, "b" : 1}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1, "c" : 1, "d" : 1, "e" : 1, "f" : 1}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j1, '{"a" : 1, "b" : 0}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1, "c" : 1, "d" : 1, "e" : 1, "f" : 1}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j1, '{"a" : 1, "b" : 2}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1, "c" : 1, "d" : 1, "e" : 1, "f" : 1}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j1, '{"a" : 1, "b" : 1, "c" : 1}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1, "c" : 1, "d" : 1, "e" : 1, "f" : 1}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j1, '{"a" : 1, "b" : 1, "c" : 0}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1, "c" : 1, "d" : 1, "e" : 1, "f" : 1}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j1, '{"a" : 1, "b" : 1, "c" : 2}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1, "c" : 1, "d" : 1, "e" : 1, "f" : 1}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j1, '{"a" : 1, "b" : 1, "c" : 1, "d" : 1}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1, "c" : 1, "d" : 1, "e" : 1, "f" : 1}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j1, '{"a" : 1, "b" : 1, "c" : 1, "d" : 0}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1, "c" : 1, "d" : 1, "e" : 1, "f" : 1}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j1, '{"a" : 1, "b" : 1, "c" : 1, "d" : 2}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1, "c" : 1, "d" : 1, "e" : 1, "f" : 1}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j1, '{"a" : 1, "b" : 1, "c" : 1, "d" : 1, "e" : 1}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1, "c" : 1, "d" : 1, "e" : 1, "f" : 1}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j1, '{"a" : 1, "b" : 1, "c" : 1, "d" : 1, "e" : 0}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j2, j1 < j2, j1 = j2, j1 > j2; +select '{"a" : 1, "b" : 1, "c" : 1, "d" : 1, "e" : 1, "f" : 1}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j1, '{"a" : 1, "b" : 1, "c" : 1, "d" : 1, "e" : 2}'::JSON(a UInt32, b UInt32, max_dynamic_paths=2) as j2, j1 < j2, j1 = j2, j1 > j2; + +create table test (json1 JSON(max_dynamic_paths=2, a UInt32), json2 JSON(max_dynamic_paths=2, a UInt32)) engine=Memory; +insert into test format JSONEachRow +{"json1" : {}, "json2" : {}} +{"json1" : {"a" : 42}, "json2" : {"a" : 42}} +{"json1" : {"a" : 42}, "json2" : {"a" : 43}} +{"json1" : {"a" : 42, "b" : 42}, "json2" : {"a" : 42, "b" : 42}} +{"json1" : {"a" : 42, "b" : 42}, "json2" : {"a" : 42, "b" : 43}} +{"json1" : {"a" : 42, "b" : 42, "c" : 42}, "json2" : {"a" : 42, "b" : 42, "d" : 42}} +{"json1" : {"a" : 42, "b" : 42, "c" : 42, "d" : 42}, "json2" : {"a" : 42, "b" : 42, "d" : 42, "c" : 42}} +{"json1" : {"a" : 42, "b" : 42, "c" : 42, "d" : 42}, "json2" : {"a" : 42, "b" : 42, "d" : 43, "c" : 42}} +{"json1" : {"a" : 42, "b" : 42, "c" : 42, "d" : 42}, "json2" : {"a" : 42, "b" : 42, "d" : 42, "c" : 43}} +{"json1" : {"a" : 42, "b" : 42, "c" : null, "d" : 42}, "json2" : {"a" : 42, "b" : 42, "d" : 42, "c" : null}} +{"json1" : {"a" : 42, "b" : 42, "c" : null, "d" : 42}, "json2" : {"a" : 42, "b" : 42, "d" : 42, "c" : 42}} +{"json1" : {"a" : 42, "b" : 42, "c" : 42, "d" : null}, "json2" : {"a" : 42, "b" : 42, "d" : null, "c" : 42}} +{"json1" : {"a" : 42, "b" : 42, "c" : 42, "d" : null}, "json2" : {"a" : 42, "b" : 42, "d" : 42, "c" : 42}} +{"json1" : {"a" : 42, "b" : 42, "c" : 42, "d" : 42, "e" : 42}, "json2" : {"a" : 42, "b" : 42, "d" : 42, "c" : 42, "e" : 42}} +{"json1" : {"a" : 42, "b" : 42, "c" : 42, "d" : 42, "e" : 42}, "json2" : {"a" : 42, "b" : 42, "d" : 42, "c" : 42, "e" : "42"}} +{"json1" : {"a" : 42, "b" : 42, "c" : 42, "d" : 42, "e" : 42}, "json2" : {"a" : 42, "b" : 42, "d" : 42, "c" : 42, "e" : 42.0}}; + +select json1, json2, json1 == json2, json1 != json2, json1 < json2, json1 > json2 from test; +drop table test; diff --git a/parser/testdata/03376_l2_distance_transposed_type_mismatch/ast.json b/parser/testdata/03376_l2_distance_transposed_type_mismatch/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03376_l2_distance_transposed_type_mismatch/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03376_l2_distance_transposed_type_mismatch/metadata.json b/parser/testdata/03376_l2_distance_transposed_type_mismatch/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03376_l2_distance_transposed_type_mismatch/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03376_l2_distance_transposed_type_mismatch/query.sql b/parser/testdata/03376_l2_distance_transposed_type_mismatch/query.sql new file mode 100644 index 000000000..c0821438e --- /dev/null +++ b/parser/testdata/03376_l2_distance_transposed_type_mismatch/query.sql @@ -0,0 +1,64 @@ +-- Tags: no-parallel-replicas +-- No parallel replicas because: https://github.com/ClickHouse/ClickHouse/issues/74367 + +-- https://github.com/ClickHouse/ClickHouse/issues/89976 +-- Check whether the distance is calculated correctly when query_vec has a different type than qbit + +SET allow_experimental_qbit_type = 1; + +DROP TABLE IF EXISTS test; + +CREATE TABLE test +( + id UInt32, + qbit QBit(Float32, 16) +) +ENGINE = MergeTree +ORDER BY id; + +INSERT INTO test VALUES (1, [-0.042587746,0.029204812,-0.018542241,-0.0006326993,-0.046840265,0.017869968,-0.036177695,0.008778641,-0.0062302556,0.030549359,-0.009787052,-0.01996496,-0.0034493103,-0.01415683,-0.04583967,-0.047684517]); + +SELECT 'Test L2DistanceTransposed(qbit, ref_vec, precision)'; +------------------------------------------------------------- +WITH [-0.042587746, 0.029204812, -0.018542241, -0.0006326993, -0.046840265, 0.017869968, -0.036177695, 0.008778641, -0.0062302556, 0.030549359, -0.009787052, -0.01996496, -0.0034493103, -0.01415683, -0.04583967, -0.047684517] AS query_vec +SELECT id, L2DistanceTransposed(qbit, query_vec, 32) AS distance +FROM test SETTINGS enable_analyzer=1; + +WITH [-0.042587746, 0.029204812, -0.018542241, -0.0006326993, -0.046840265, 0.017869968, -0.036177695, 0.008778641, -0.0062302556, 0.030549359, -0.009787052, -0.01996496, -0.0034493103, -0.01415683, -0.04583967, -0.047684517] AS query_vec +SELECT id, L2DistanceTransposed(qbit, query_vec, 32) AS distance +FROM test SETTINGS enable_analyzer=0; + + +SELECT 'Test L2DistanceTransposed(qbit.1, ..., qbit.precision, qbit_dim, ref_vec)'; +----------------------------------------------------------------------------------- +WITH [-0.042587746, 0.029204812, -0.018542241, -0.0006326993, -0.046840265, 0.017869968, -0.036177695, 0.008778641, -0.0062302556, 0.030549359, -0.009787052, -0.01996496, -0.0034493103, -0.01415683, -0.04583967, -0.047684517] AS query_vec +SELECT + id, + L2DistanceTransposed(qbit.1, qbit.2, qbit.3, qbit.4, qbit.5, qbit.6, qbit.7, qbit.8, qbit.9, qbit.10, qbit.11, qbit.12, qbit.13, qbit.14, qbit.15, qbit.16, qbit.17, qbit.18, qbit.19, qbit.20, qbit.21, qbit.22, qbit.23, qbit.24, qbit.25, qbit.26, qbit.27, qbit.28, qbit.29, qbit.30, qbit.31, qbit.32, + 16, + CAST(query_vec, 'Array(Float32)') + ) AS distance +FROM test; + +-- This should expectedly give a wrong distance because the type of query_vec doesn't match the type of qbit +WITH [-0.042587746, 0.029204812, -0.018542241, -0.0006326993, -0.046840265, 0.017869968, -0.036177695, 0.008778641, -0.0062302556, 0.030549359, -0.009787052, -0.01996496, -0.0034493103, -0.01415683, -0.04583967, -0.047684517] AS query_vec +SELECT + id, + L2DistanceTransposed(qbit.1, qbit.2, qbit.3, qbit.4, qbit.5, qbit.6, qbit.7, qbit.8, qbit.9, qbit.10, qbit.11, qbit.12, qbit.13, qbit.14, qbit.15, qbit.16, qbit.17, qbit.18, qbit.19, qbit.20, qbit.21, qbit.22, qbit.23, qbit.24, qbit.25, qbit.26, qbit.27, qbit.28, qbit.29, qbit.30, qbit.31, qbit.32, + 16, + CAST(query_vec, 'Array(Float64)') + ) AS distance +FROM test; + +-- Test with more columns than the function expects +WITH [-0.042587746, 0.029204812, -0.018542241, -0.0006326993, -0.046840265, 0.017869968, -0.036177695, 0.008778641, -0.0062302556, 0.030549359, -0.009787052, -0.01996496, -0.0034493103, -0.01415683, -0.04583967, -0.047684517] AS query_vec +SELECT + id, + L2DistanceTransposed(qbit.1, qbit.2, qbit.3, qbit.4, qbit.5, qbit.6, qbit.7, qbit.8, qbit.9, qbit.10, qbit.11, qbit.12, qbit.13, qbit.14, qbit.15, qbit.16, qbit.17, qbit.18, qbit.19, qbit.20, qbit.21, qbit.22, qbit.23, qbit.24, qbit.25, qbit.26, qbit.27, qbit.28, qbit.29, qbit.30, qbit.31, qbit.32, qbit.32, + 16, + CAST(query_vec, 'Array(Float32)') + ) AS distance +FROM test; -- { serverError TOO_MANY_ARGUMENTS_FOR_FUNCTION } + + +DROP TABLE test; diff --git a/parser/testdata/03377_json_order_by_bug/ast.json b/parser/testdata/03377_json_order_by_bug/ast.json new file mode 100644 index 000000000..38109fbe3 --- /dev/null +++ b/parser/testdata/03377_json_order_by_bug/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001518098, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03377_json_order_by_bug/metadata.json b/parser/testdata/03377_json_order_by_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03377_json_order_by_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03377_json_order_by_bug/query.sql b/parser/testdata/03377_json_order_by_bug/query.sql new file mode 100644 index 000000000..2b7f4dc2d --- /dev/null +++ b/parser/testdata/03377_json_order_by_bug/query.sql @@ -0,0 +1,7 @@ +SET enable_variant_type = 1, allow_suspicious_variant_types = 1, enable_json_type = 1, allow_suspicious_types_in_order_by = 1, type_json_skip_duplicated_paths=1; +DROP TABLE IF EXISTS t0; +CREATE TABLE t0 (c0 Variant(Float32,UInt8,JSON)) ENGINE = Memory; +INSERT INTO TABLE t0 (c0) VALUES ('{"😆":"1913-10-01"}'), (61), (-470815662), (1707852935), (128), (1609237855), ('{"c0":[]}'), (-679.297), (253), (-1.2671), (-1072401825), (98560553.3484), (1), (-299610930), ('{"😉😉":{"😉😉":{}}}'), (143), (49), (150), (253), ('{"😆":[],"c0.c1":[]}'), (121), ('{"😉😉":["2019-01-14"]}'), (44), ('{"c1":[]}'), (5149976132735004555), ('{"c1":-355}'), ('{"c0.c1":{},"c0":false}'), (83), ('{"c1":false,"c0":[]}'), (64), (91), (8137177240502908368), (173), (114), (180), (245), (5815441155154325732), ('{"😉😉":"叫","c1":["got"],"c1":{}}'), ('{}'), (100), ('{"c1":{},"😉😉":{}}'), (249), (-867482.56), ('{"😉😉":{}}'), (183), (-40.1), (4.59), ('{"c1":[[-1322908855950944162]]}'), ('{"c0.c1":{"c1":"run","c0.c1":{}}}'), (212), (216), ('{"😉😉":811,"c1":{"c0.c1":-16.1227463}}'), (154), (27655478), (54), (0.655); +SELECT 1 FROM t0 ORDER BY t0.c0 DESC FORMAT Null; +DROP TABLE t0; + diff --git a/parser/testdata/03377_qbit_parameters/ast.json b/parser/testdata/03377_qbit_parameters/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03377_qbit_parameters/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03377_qbit_parameters/metadata.json b/parser/testdata/03377_qbit_parameters/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03377_qbit_parameters/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03377_qbit_parameters/query.sql b/parser/testdata/03377_qbit_parameters/query.sql new file mode 100644 index 000000000..ad79dd975 --- /dev/null +++ b/parser/testdata/03377_qbit_parameters/query.sql @@ -0,0 +1,35 @@ +-- Tests for QBit query parameters +-- https://github.com/ClickHouse/ClickHouse/issues/91103 + +SET allow_experimental_qbit_type = 1; + +SELECT 'Test QBit query parameters'; +SET param_q1 = [1, 2, 3, 4]; +SELECT {q1:QBit(Float32, 4)}; + +SET param_q2 = [1.5, 2.5, 3.5, 4.5]; +SELECT {q2:QBit(Float64, 4)}; + +SET param_q3 = [1, 2, 3, 4, 5, 6, 7, 8]; +SELECT {q3:QBit(BFloat16, 8)}; + +SELECT 'Test QBit query parameters with different dimensions'; +SET param_q4 = [1, 2]; +SELECT {q4:QBit(Float32, 2)}; + +SET param_q5 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; +SELECT {q5:QBit(Float32, 16)}; + +SELECT 'Test QBit query parameters in expressions'; +SET param_q6 = [1, 2, 3, 4]; +SELECT L2DistanceTransposed({q6:QBit(Float32, 4)}, [1, 2, 3, 4], 32); + +SELECT 'Test QBit query parameters with table'; +DROP TABLE IF EXISTS qbit_param_test; +CREATE TABLE qbit_param_test (id UInt32, vec QBit(Float32, 4)) ENGINE = Memory; +INSERT INTO qbit_param_test VALUES (1, [1, 0, 0, 0]), (2, [0, 1, 0, 0]), (3, [1, 1, 1, 1]); + +SET param_q7 = [1, 1, 1, 1]; +SELECT id, L2DistanceTransposed(vec, {q7:Array(Float32)}, 4) AS dist FROM qbit_param_test ORDER BY id; + +DROP TABLE qbit_param_test; diff --git a/parser/testdata/03380_input_async_insert/ast.json b/parser/testdata/03380_input_async_insert/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03380_input_async_insert/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03380_input_async_insert/metadata.json b/parser/testdata/03380_input_async_insert/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03380_input_async_insert/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03380_input_async_insert/query.sql b/parser/testdata/03380_input_async_insert/query.sql new file mode 100644 index 000000000..18652ea7e --- /dev/null +++ b/parser/testdata/03380_input_async_insert/query.sql @@ -0,0 +1,23 @@ +--- +--- Analyzer +--- + +insert into function null() select * from input('x Int, y String') settings async_insert=1, allow_experimental_analyzer=1 format JSONEachRow {"x" : 1, "y" : "string1"}, {"y" : "string2", "x" : 2}; + +insert into function null('auto') select * from input('x Int, y String') settings async_insert=1, allow_experimental_analyzer=1 format JSONEachRow {"x" : 1, "y" : "string1"}, {"y" : "string2", "x" : 2}; + +insert into function null('x Int, y String') select * from input('x Int, y String') settings async_insert=1, allow_experimental_analyzer=1 format JSONEachRow {"x" : 1, "y" : "string1"}, {"y" : "string2", "x" : 2}; + +--- +--- Non-analyzer - does not support INSERT INTO FUNCTION null('auto') SELECT FROM input() +--- +insert into function null() select * from input('x Int, y String') settings async_insert=1, allow_experimental_analyzer=0 format JSONEachRow {"x" : 1, "y" : "string1"}, {"y" : "string2", "x" : 2}; -- { serverError QUERY_IS_PROHIBITED } + +insert into function null('x Int, y String') select * from input('x Int, y String') settings async_insert=1, allow_experimental_analyzer=0 format JSONEachRow {"x" : 1, "y" : "string1"}, {"y" : "string2", "x" : 2}; + +drop table if exists x; + +create table x (x Int, y String) engine=Memory; +insert into x select * from input('x Int, y String') settings async_insert=1 format JSONEachRow {"x" : 1, "y" : "string1"}, {"y" : "string2", "x" : 2}; + +select * from x; diff --git a/parser/testdata/03381_lazy_materialization_limit_offset/ast.json b/parser/testdata/03381_lazy_materialization_limit_offset/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03381_lazy_materialization_limit_offset/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03381_lazy_materialization_limit_offset/metadata.json b/parser/testdata/03381_lazy_materialization_limit_offset/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03381_lazy_materialization_limit_offset/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03381_lazy_materialization_limit_offset/query.sql b/parser/testdata/03381_lazy_materialization_limit_offset/query.sql new file mode 100644 index 000000000..0223e5908 --- /dev/null +++ b/parser/testdata/03381_lazy_materialization_limit_offset/query.sql @@ -0,0 +1,20 @@ +-- Random settings limits: index_granularity=(None, 60000) +-- Tags: long + +-- test is derived from 03246_alter_from_string_to_json.sql + +set max_block_size = 20000; + +drop table if exists test; + +create table test (x UInt64, json String) engine=MergeTree order by x; + +insert into test select number, toJSONString(map('key' || multiIf(number < 60000, number % 2, number < 120000, number % 2 + 2, number % 2 + 4), 'value' || number)) from numbers(200000); + +alter table test modify column json JSON settings mutations_sync=1; + +set optimize_read_in_order=0; -- disabling read in order optimization leads to error +select json from test order by x limit 10 offset 120000; +select json.key0, json.key1, json.key2, json.key3, json.key4, json.key5 from test order by x limit 10 offset 120000; + +drop table test; diff --git a/parser/testdata/03381_remote_constants/ast.json b/parser/testdata/03381_remote_constants/ast.json new file mode 100644 index 000000000..12467cb56 --- /dev/null +++ b/parser/testdata/03381_remote_constants/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001573417, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03381_remote_constants/metadata.json b/parser/testdata/03381_remote_constants/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03381_remote_constants/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03381_remote_constants/query.sql b/parser/testdata/03381_remote_constants/query.sql new file mode 100644 index 000000000..3ca95f0c2 --- /dev/null +++ b/parser/testdata/03381_remote_constants/query.sql @@ -0,0 +1,27 @@ +set enable_analyzer=1; +set session_timezone='UTC'; + +select '1970-01-01 00:00:01.000'::DateTime64(3) from remote('127.0.0.{1,2}', 'system.one'); +select ['1970-01-01 00:00:01.000']::Array(DateTime64(3)) from remote('127.0.0.{1,2}', 'system.one'); +select map('a', '1970-01-01 00:00:01.000')::Map(String, DateTime64(3)) from remote('127.0.0.{1,2}', 'system.one'); +select tuple('1970-01-01 00:00:01.000')::Tuple(d DateTime64(3)) from remote('127.0.0.{1,2}', 'system.one'); +select '1970-01-01 00:00:01.000'::Variant(DateTime64(3)) from remote('127.0.0.{1,2}', 'system.one'); +select '1970-01-01 00:00:01.000'::DateTime64(3)::Dynamic from remote('127.0.0.{1,2}', 'system.one'); +select '{"a" : "1970-01-01 00:00:01.000"}'::JSON(a DateTime64(3)) from remote('127.0.0.{1,2}', 'system.one'); +select map('a', [tuple('1970-01-01 00:00:01.000')])::Map(String, Array(Tuple(d Variant(DateTime64(3))))) from remote('127.0.0.{1,2}', 'system.one'); + +select '1970-01-01'::Date32::Dynamic from remote('127.0.0.{1,2}', 'system.one'); +select '1970-01-01'::Date::Dynamic from remote('127.0.0.{1,2}', 'system.one'); +select '1970-01-01 00:00:01'::DateTime::Dynamic from remote('127.0.0.{1,2}', 'system.one'); +select [tuple('1970-01-01')]::Array(Tuple(Date32))::Dynamic as d, dynamicType(d) from remote('127.0.0.{1,2}', 'system.one'); + +select [tuple('1970-01-01')]::Array(Tuple(Date))::Dynamic as d, dynamicType(d) from remote('127.0.0.{1,2}', 'system.one'); +select [tuple('1970-01-01 00:00:01')]::Array(Tuple(DateTime))::Dynamic as d, dynamicType(d) from remote('127.0.0.{1,2}', 'system.one'); +select [tuple('1970-01-01 00:00:01.00')]::Array(Tuple(DateTime64(3)))::Dynamic as d, dynamicType(d) from remote('127.0.0.{1,2}', 'system.one'); + +select '{"a" : 42, "b" : "1970-01-01", "c" : "1970-01-01 00:00:01", "d" : "1970-01-01 00:00:01.00"}'::JSON as json, JSONAllPathsWithTypes(json) from remote('127.0.0.{1,2}', 'system.one'); +select map('a', ['{"a" : 42, "b" : "1970-01-01", "c" : "1970-01-01 00:00:01", "d" : "1970-01-01 00:00:01.00"}'])::Map(String, Array(Variant(JSON))) as json, JSONAllPathsWithTypes(assumeNotNull(variantElement(json['a'][1], 'JSON'))) from remote('127.0.0.{1,2}', 'system.one'); +select '{"a" : [{"aa" : [42]}]}'::JSON as json, JSONAllPathsWithTypes(arrayJoin(json.a[])) from remote('127.0.0.{1,2}', 'system.one'); +select '{"a" : [{"aa" : ["1970-01-01"]}]}'::JSON as json, JSONAllPathsWithTypes(arrayJoin(json.a[])) from remote('127.0.0.{1,2}', 'system.one'); +select '{"a" : [{"aa" : ["1970-01-01 00:00:01"]}]}'::JSON as json, JSONAllPathsWithTypes(arrayJoin(json.a[])) from remote('127.0.0.{1,2}', 'system.one'); +select '{"a" : [{"aa" : ["1970-01-01 00:00:01.000"]}]}'::JSON as json, JSONAllPathsWithTypes(arrayJoin(json.a[])) from remote('127.0.0.{1,2}', 'system.one'); diff --git a/parser/testdata/03382_dynamic_serialization_default_settings/ast.json b/parser/testdata/03382_dynamic_serialization_default_settings/ast.json new file mode 100644 index 000000000..03da1f417 --- /dev/null +++ b/parser/testdata/03382_dynamic_serialization_default_settings/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001419976, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03382_dynamic_serialization_default_settings/metadata.json b/parser/testdata/03382_dynamic_serialization_default_settings/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03382_dynamic_serialization_default_settings/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03382_dynamic_serialization_default_settings/query.sql b/parser/testdata/03382_dynamic_serialization_default_settings/query.sql new file mode 100644 index 000000000..b1ae4a111 --- /dev/null +++ b/parser/testdata/03382_dynamic_serialization_default_settings/query.sql @@ -0,0 +1,7 @@ +SET enable_json_type = 1; +DROP TABLE IF EXISTS t0; +CREATE TABLE t0 (c0 JSON(max_dynamic_types=0)) ENGINE = Memory; +INSERT INTO TABLE t0 (c0) SETTINGS input_format_binary_read_json_as_string = 1, output_format_native_write_json_as_string = 1 VALUES ('{"a":[{},1]}'); +SELECT * FROM t0; +DROP TABLE t0; + diff --git a/parser/testdata/03390_non_constant_case/ast.json b/parser/testdata/03390_non_constant_case/ast.json new file mode 100644 index 000000000..d7ff1d000 --- /dev/null +++ b/parser/testdata/03390_non_constant_case/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'Numeric CASE'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.00166039, + "rows_read": 5, + "bytes_read": 183 + } +} diff --git a/parser/testdata/03390_non_constant_case/metadata.json b/parser/testdata/03390_non_constant_case/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03390_non_constant_case/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03390_non_constant_case/query.sql b/parser/testdata/03390_non_constant_case/query.sql new file mode 100644 index 000000000..c1248c44d --- /dev/null +++ b/parser/testdata/03390_non_constant_case/query.sql @@ -0,0 +1,62 @@ +SELECT 'Numeric CASE'; +SELECT number, + CASE number % 2 + WHEN number % 3 THEN 'Match Mod 3' + WHEN number % 4 THEN 'Match Mod 4' + ELSE 'No Match' + END AS result +FROM numbers(10); + +SELECT 'String CASE'; +SELECT name, + CASE name + WHEN substring(name, 1, 1) THEN 'First letter' + WHEN reverse(name) THEN 'Reversed match' + ELSE 'No Match' + END AS result +FROM (SELECT arrayJoin(['Alice', 'Bob', 'Charlie', 'David', 'abba', 'A']) AS name); + +SELECT 'Date CASE'; +SELECT event_date, + CASE event_date + WHEN toDate('2024-03-10') THEN 'Special day' + WHEN toDate('2024-03-12') THEN 'Today' + ELSE 'Normal Day' + END AS event_type +FROM (SELECT arrayJoin([toDate('2024-03-10'), toDate('2024-03-11'), toDate('2024-03-12')]) AS event_date); + +SELECT '1M Rows'; +SELECT count() FROM numbers(1000000) WHERE CASE number % 2 + WHEN number % 3 THEN 1 + WHEN number % 5 THEN 1 + ELSE 0 +END = 1; + +SELECT DISTINCT caseWithExpression(1.1, toNullable(0.1), 'a', 1.1, 'b', materialize(2.1), toFixedString('c', 1), 'default' ) AS f; + +SELECT + caseWithExpression(NULL, materialize(NULL), NULL, NULL) AS f1, + if(NULL, toDateTimeOrZero(NULL), NULL) AS f2 +FROM numbers(1); + +SELECT CASE number WHEN 1 THEN number + 2 ELSE number * 2 END FROM numbers(3); + +SELECT caseWithExpression( + materialize( + materialize(NULL) + ), + materialize(NULL), + NULL, + NULL +); + +SELECT caseWithExpression( + materialize( + assumeNotNull( + materialize(NULL) + ) + ), + materialize(NULL), + NULL, + NULL +); -- { serverError ILLEGAL_COLUMN } diff --git a/parser/testdata/03391_logical_error_sort_column/ast.json b/parser/testdata/03391_logical_error_sort_column/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03391_logical_error_sort_column/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03391_logical_error_sort_column/metadata.json b/parser/testdata/03391_logical_error_sort_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03391_logical_error_sort_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03391_logical_error_sort_column/query.sql b/parser/testdata/03391_logical_error_sort_column/query.sql new file mode 100644 index 000000000..cda1d059a --- /dev/null +++ b/parser/testdata/03391_logical_error_sort_column/query.sql @@ -0,0 +1,5 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/77558 +DROP TABLE IF EXISTS t0; +CREATE TABLE t0 (c0 Int, c1 Int ALIAS 1) ENGINE = Memory; +SELECT t0.c1 FROM t0 ORDER BY 1; +DROP TABLE IF EXISTS t0; diff --git a/parser/testdata/03392_crash_group_by_use_nulls/ast.json b/parser/testdata/03392_crash_group_by_use_nulls/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03392_crash_group_by_use_nulls/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03392_crash_group_by_use_nulls/metadata.json b/parser/testdata/03392_crash_group_by_use_nulls/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03392_crash_group_by_use_nulls/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03392_crash_group_by_use_nulls/query.sql b/parser/testdata/03392_crash_group_by_use_nulls/query.sql new file mode 100644 index 000000000..6f24fee98 --- /dev/null +++ b/parser/testdata/03392_crash_group_by_use_nulls/query.sql @@ -0,0 +1,27 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/77485 +SELECT min(c0 >= ANY(SELECT '1' GROUP BY GROUPING SETS (1))) FROM (SELECT 1 c0) t0 SETTINGS group_by_use_nulls = 1; + +SELECT max(number >= ( + SELECT min(x) + FROM + ( + SELECT '1' AS x + GROUP BY + GROUPING SETS ((1)) + ) + )) +FROM numbers(2) +SETTINGS group_by_use_nulls = 1, enable_analyzer = 1; + +EXPLAIN QUERY TREE +SELECT max(number >= ( + SELECT min(x) + FROM + ( + SELECT '1' AS x + GROUP BY + GROUPING SETS ((1)) + ) + )) +FROM numbers(2) +SETTINGS group_by_use_nulls = 1, enable_analyzer = 1; diff --git a/parser/testdata/03392_inconsistent_formatting_of_lambda/ast.json b/parser/testdata/03392_inconsistent_formatting_of_lambda/ast.json new file mode 100644 index 000000000..18a60eb37 --- /dev/null +++ b/parser/testdata/03392_inconsistent_formatting_of_lambda/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function lambda (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001045038, + "rows_read": 10, + "bytes_read": 374 + } +} diff --git a/parser/testdata/03392_inconsistent_formatting_of_lambda/metadata.json b/parser/testdata/03392_inconsistent_formatting_of_lambda/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03392_inconsistent_formatting_of_lambda/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03392_inconsistent_formatting_of_lambda/query.sql b/parser/testdata/03392_inconsistent_formatting_of_lambda/query.sql new file mode 100644 index 000000000..b9f8e5867 --- /dev/null +++ b/parser/testdata/03392_inconsistent_formatting_of_lambda/query.sql @@ -0,0 +1,2 @@ +SELECT lambda(tuple(1), 1); -- {serverError BAD_ARGUMENTS, TYPE_MISMATCH} argument is not identifier +SELECT lambda(tuple(1, 2), materialize(1) + x); -- {serverError BAD_ARGUMENTS, TYPE_MISMATCH} argument is not identifier diff --git a/parser/testdata/03393_ASTTableIdentifier_fuzzer/ast.json b/parser/testdata/03393_ASTTableIdentifier_fuzzer/ast.json new file mode 100644 index 000000000..747d34348 --- /dev/null +++ b/parser/testdata/03393_ASTTableIdentifier_fuzzer/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery null (children 3)" + }, + { + "explain": " Identifier null" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration key (children 1)" + }, + { + "explain": " DataType Int" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function Null" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001313801, + "rows_read": 8, + "bytes_read": 270 + } +} diff --git a/parser/testdata/03393_ASTTableIdentifier_fuzzer/metadata.json b/parser/testdata/03393_ASTTableIdentifier_fuzzer/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03393_ASTTableIdentifier_fuzzer/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03393_ASTTableIdentifier_fuzzer/query.sql b/parser/testdata/03393_ASTTableIdentifier_fuzzer/query.sql new file mode 100644 index 000000000..62dc20e55 --- /dev/null +++ b/parser/testdata/03393_ASTTableIdentifier_fuzzer/query.sql @@ -0,0 +1,2 @@ +create table null (key Int) engine=Null; +SELECT * FROM {CLICKHOUSE_DATABASE:Identifier}.null; diff --git a/parser/testdata/03393_join_bug_77848/ast.json b/parser/testdata/03393_join_bug_77848/ast.json new file mode 100644 index 000000000..db7151c38 --- /dev/null +++ b/parser/testdata/03393_join_bug_77848/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001543706, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03393_join_bug_77848/metadata.json b/parser/testdata/03393_join_bug_77848/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03393_join_bug_77848/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03393_join_bug_77848/query.sql b/parser/testdata/03393_join_bug_77848/query.sql new file mode 100644 index 000000000..d085655e9 --- /dev/null +++ b/parser/testdata/03393_join_bug_77848/query.sql @@ -0,0 +1,18 @@ +SET parallel_replicas_local_plan=1; + + +DROP TABLE IF EXISTS BadTable; +DROP TABLE IF EXISTS BadJoin; + +CREATE TABLE IF NOT EXISTS BadTable (id_uint UInt128) ENGINE = MergeTree() ORDER BY id_uint; +INSERT INTO BadTable SELECT toUInt128(12); + +CREATE TABLE IF NOT EXISTS BadJoin (id UUID, name LowCardinality(String)) ENGINE = MergeTree() ORDER BY (name); +INSERT INTO BadJoin SELECT '12a34567-8901-2345-6789-012345678901', '12'; +SET enable_analyzer = 1; +-- TODO(@vdimir): NOT_FOUND_COLUMN_IN_BLOCK is a bug, should be fixed +-- This tests ensures that query does not crash at least + +SELECT 1 FROM BadTable i LEFT JOIN BadJoin c ON i.id_uint = toUInt128(c.id) WHERE equals(i.id_uint, 12); + +SELECT equals(i.id_uint, 12) FROM BadTable i LEFT JOIN BadJoin c ON i.id_uint = toUInt128(c.id) WHERE equals(i.id_uint, 12); diff --git a/parser/testdata/03393_max_merge_delayed_streams_for_parallel_write/ast.json b/parser/testdata/03393_max_merge_delayed_streams_for_parallel_write/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03393_max_merge_delayed_streams_for_parallel_write/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03393_max_merge_delayed_streams_for_parallel_write/metadata.json b/parser/testdata/03393_max_merge_delayed_streams_for_parallel_write/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03393_max_merge_delayed_streams_for_parallel_write/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03393_max_merge_delayed_streams_for_parallel_write/query.sql b/parser/testdata/03393_max_merge_delayed_streams_for_parallel_write/query.sql new file mode 100644 index 000000000..44634de4d --- /dev/null +++ b/parser/testdata/03393_max_merge_delayed_streams_for_parallel_write/query.sql @@ -0,0 +1,37 @@ +-- Tags: no-fasttest, long, no-parallel, no-flaky-check, no-msan +-- - no-fasttest -- S3 is required +-- - no-flaky-check -- not compatible with ThreadFuzzer + +-- The real example with metric_log with 1200+ columns! +system flush logs system.metric_log; + +create table metric_log as system.metric_log +engine = MergeTree +partition by () +order by () +settings + -- cache has it's own problems (see filesystem_cache_prefer_bigger_buffer_size) + storage_policy = 's3_no_cache', + -- horizontal merges does opens all stream at once, so will still use huge amount of memory + min_rows_for_wide_part = 0, + min_bytes_for_wide_part = 0, + vertical_merge_algorithm_min_rows_to_activate = 0, + vertical_merge_algorithm_min_columns_to_activate = 1, + min_bytes_for_full_part_storage = 0, + --- avoid excessive memory usage (due to default buffer size of 1MiB that is created for each column) + max_merge_delayed_streams_for_parallel_write = 100, + -- avoid superfluous merges + merge_selector_base = 1000, + auto_statistics_types = ''; + +insert into metric_log select * from generateRandom() limit 10; + +optimize table metric_log final; +system flush logs part_log; +select 'max_merge_delayed_streams_for_parallel_write=100' as test, * from system.part_log where table = 'metric_log' and database = currentDatabase() and event_date >= yesterday() and event_type = 'MergeParts' and peak_memory_usage > 1_000_000_000 format Vertical; + +alter table metric_log modify setting max_merge_delayed_streams_for_parallel_write = 10000; + +optimize table metric_log final; +system flush logs part_log; +select 'max_merge_delayed_streams_for_parallel_write=1000' as test, count() as count from system.part_log where table = 'metric_log' and database = currentDatabase() and event_date >= yesterday() and event_type = 'MergeParts' and peak_memory_usage > 1_000_000_000; diff --git a/parser/testdata/03393_max_read_buffer_size_non_zero/ast.json b/parser/testdata/03393_max_read_buffer_size_non_zero/ast.json new file mode 100644 index 000000000..d0bc6051e --- /dev/null +++ b/parser/testdata/03393_max_read_buffer_size_non_zero/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000881987, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03393_max_read_buffer_size_non_zero/metadata.json b/parser/testdata/03393_max_read_buffer_size_non_zero/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03393_max_read_buffer_size_non_zero/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03393_max_read_buffer_size_non_zero/query.sql b/parser/testdata/03393_max_read_buffer_size_non_zero/query.sql new file mode 100644 index 000000000..32cec6207 --- /dev/null +++ b/parser/testdata/03393_max_read_buffer_size_non_zero/query.sql @@ -0,0 +1 @@ +SET max_read_buffer_size = 0; -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/03393_non_constant_second_argument_for_in/ast.json b/parser/testdata/03393_non_constant_second_argument_for_in/ast.json new file mode 100644 index 000000000..7a4337970 --- /dev/null +++ b/parser/testdata/03393_non_constant_second_argument_for_in/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00123754, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03393_non_constant_second_argument_for_in/metadata.json b/parser/testdata/03393_non_constant_second_argument_for_in/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03393_non_constant_second_argument_for_in/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03393_non_constant_second_argument_for_in/query.sql b/parser/testdata/03393_non_constant_second_argument_for_in/query.sql new file mode 100644 index 000000000..eeff394e9 --- /dev/null +++ b/parser/testdata/03393_non_constant_second_argument_for_in/query.sql @@ -0,0 +1,158 @@ +SET allow_experimental_analyzer = 1; + +SELECT number FROM numbers(10) WHERE has([number % 3, number % 5], number % 2) ORDER BY number; +SELECT '-- IN --'; +SELECT number FROM numbers(10) WHERE number % 2 IN [number % 3, number % 5] ORDER BY number SETTINGS allow_experimental_analyzer = 1; +SELECT number FROM numbers(10) WHERE number % 2 IN [number % 3, number % 5] ORDER BY number SETTINGS allow_experimental_analyzer = 0; -- { serverError UNKNOWN_IDENTIFIER } + +SELECT '-- MORE CASES --'; + +-- { echoOn } + +SELECT (1, 2) in [number % 3, number % 5] FROM numbers(2); -- { serverError NO_COMMON_TYPE } +SELECT (1, 2) in (SELECT [0, 0] UNION ALL SELECT [1, 1]); -- { serverError TYPE_MISMATCH } + +SELECT (1, 2) in [(number % 3, number % 5)] FROM numbers(2); +SELECT (1, 2) in (SELECT (0, 0)), (1, 2) in (SELECT (1, 1)); + +SELECT (1, 1) in [(number % 3, number % 5)] FROM numbers(2); +SELECT (1, 1) in (SELECT (0, 0)), (1, 1) in (SELECT (1, 1)); + +SELECT (1, null) in [(number % 3, number % 5)] FROM numbers(2); +SELECT (1, null) in (SELECT (0, 0::Nullable(Int))), (1, null) in (SELECT (1, 1::Nullable(Int))); + +SELECT (1, null) in [(number % 3, number % 5), (1, null)] FROM numbers(2); +SELECT (1, null) in (SELECT (0, 0::Nullable(Int)) UNION ALL SELECT (1, null)), (1, null) in (SELECT (1, 1::Nullable(Int)) UNION ALL SELECT (1, null)); + +SELECT 'ANOTHER SETTING'; + +set transform_null_in = 1; + +SELECT (1, 2) in [number % 3, number % 5] FROM numbers(2); -- { serverError NO_COMMON_TYPE } +SELECT (1, 2) in (SELECT [0, 0] UNION ALL SELECT [1, 1]); -- { serverError TYPE_MISMATCH } + +SELECT (1, 2) in [(number % 3, number % 5)] FROM numbers(2); +SELECT (1, 2) in (SELECT (0, 0)), (1, 2) in (SELECT (1, 1)); + +SELECT (1, 1) in [(number % 3, number % 5)] FROM numbers(2); +SELECT (1, 1) in (SELECT (0, 0)), (1, 1) in (SELECT (1, 1)); + +SELECT (1, null) in [(number % 3, number % 5)] FROM numbers(2); +SELECT (1, null) in (SELECT (0, 0::Nullable(Int))), (1, null) in (SELECT (1, 1::Nullable(Int))); + +SELECT (1, null) in [(number % 3, number % 5), (1, null)] FROM numbers(2); +SELECT (1, null) in (SELECT (0, 0::Nullable(Int)) UNION ALL SELECT (1, null)), (1, null) in (SELECT (1, 1::Nullable(Int)) UNION ALL SELECT (1, null)); + +--- with tuple rewritten into array +SELECT * +FROM numbers(1000) +WHERE number IN (123, 10 - number, 456); + +-- Consistency of transform_null_in to non-const arguments +SELECT + NULL IN (1, number), + NULL IN (1, number, NULL), + NULL IN (1, 2), + NULL IN (1, NULL) +FROM numbers(1) +SETTINGS transform_null_in = 1; + +SELECT + NULL IN (1, number), + NULL IN (1, number, NULL), + NULL IN (1, 2), + NULL IN (1, NULL) +FROM numbers(1) +SETTINGS transform_null_in = 0; + +-- Consistency for arrays/tuples +SELECT toNullable(1) IN [1, number] +FROM numbers(2); + +SELECT toNullable(1) IN (1, number) +FROM numbers(2); + +-- Common type consistency +SELECT 'a' IN (5, number, 'a') +FROM numbers(2); + +SELECT + NULL IN ( + 258, + CAST('string' AS Nullable(String)), + CAST(number AS Nullable(UInt64)) + ) +FROM numbers(1) +SETTINGS transform_null_in = 1; + +SELECT + NULL IN ( + 258, + CAST('string' AS Nullable(String)), + CAST(number AS Nullable(UInt64)) + ) +FROM numbers(1) +SETTINGS transform_null_in = 0; + +SELECT + NULL IN ( + 258, + CAST('string' AS Nullable(String)), + CAST(number AS Nullable(UInt64)), + NULL + ) +FROM numbers(1) +SETTINGS transform_null_in = 1; + +SELECT + NULL IN ( + 258, + CAST('string' AS Nullable(String)), + CAST(number AS Nullable(UInt64)), + NULL + ) +FROM numbers(1) +SETTINGS transform_null_in = 0; + +SELECT NULL IN [1, number] +FROM numbers(1) +SETTINGS transform_null_in = 1; + +SELECT NULL IN [1, number] +FROM numbers(1) +SETTINGS transform_null_in = 0; + +SELECT 1 IN [1, toNullable(number)] +FROM numbers(2) +SETTINGS transform_null_in = 1; + +SELECT 1 IN [1, toNullable(number)] +FROM numbers(2) +SETTINGS transform_null_in = 0; + +SELECT + 0 AS x, + [if(number > 1, NULL, number)] AS arr, + tuple(arr[1]) AS t, + x IN (t), + x IN (arr) +FROM numbers(3) +SETTINGS transform_null_in = 0; + +SELECT + arrayJoin([0, 1, NULL]) AS x, + [if(number > 1, NULL, number)] AS arr, + tuple(arr[1]) AS t, + x IN (t), + x IN (arr) +FROM numbers(3) +SETTINGS transform_null_in = 0; + +SELECT + NULL AS x, + [if(number > 1, NULL, number)] AS arr, + tuple(arr[1]) AS t, + x IN (t), + x IN (arr) +FROM numbers(3) +SETTINGS transform_null_in = 0; diff --git a/parser/testdata/03393_smallest_index_floating_point/ast.json b/parser/testdata/03393_smallest_index_floating_point/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03393_smallest_index_floating_point/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03393_smallest_index_floating_point/metadata.json b/parser/testdata/03393_smallest_index_floating_point/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03393_smallest_index_floating_point/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03393_smallest_index_floating_point/query.sql b/parser/testdata/03393_smallest_index_floating_point/query.sql new file mode 100644 index 000000000..e4dc2eac9 --- /dev/null +++ b/parser/testdata/03393_smallest_index_floating_point/query.sql @@ -0,0 +1,4 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/77699 +SELECT argMin(1 + toNullable(1), number / number) FROM numbers(1); +SELECT argMin(1 + toNullable(1), CAST('NaN', 'Float64')) FROM numbers(10000); +SELECT argMin(1 + toNullable(number), CAST('NaN', 'Float32')) FROM numbers(10000); diff --git a/parser/testdata/03393_validate_storage_buffer_args/ast.json b/parser/testdata/03393_validate_storage_buffer_args/ast.json new file mode 100644 index 000000000..adc8911b9 --- /dev/null +++ b/parser/testdata/03393_validate_storage_buffer_args/ast.json @@ -0,0 +1,76 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery invalid (children 3)" + }, + { + "explain": " Identifier invalid" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration c0 (children 1)" + }, + { + "explain": " DataType Int" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function Buffer (children 1)" + }, + { + "explain": " ExpressionList (children 9)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 18, + + "statistics": + { + "elapsed": 0.001472886, + "rows_read": 18, + "bytes_read": 572 + } +} diff --git a/parser/testdata/03393_validate_storage_buffer_args/metadata.json b/parser/testdata/03393_validate_storage_buffer_args/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03393_validate_storage_buffer_args/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03393_validate_storage_buffer_args/query.sql b/parser/testdata/03393_validate_storage_buffer_args/query.sql new file mode 100644 index 000000000..5aa14eaeb --- /dev/null +++ b/parser/testdata/03393_validate_storage_buffer_args/query.sql @@ -0,0 +1,4 @@ +CREATE TABLE invalid (c0 Int) ENGINE = Buffer(x, x, 0, 1, 1, 1, 1, 1, 1); -- {serverError BAD_ARGUMENTS} must be a positive integer +CREATE TABLE invalid (c0 Int) ENGINE = Buffer(x, x, -1, 1, 1e6, 1, 1, 1, 1); -- {serverError BAD_ARGUMENTS} must be non-negative value +CREATE TABLE invalid (c0 Int) ENGINE = Buffer(x, x, 1, 1, -1, 1, 1, 1, 1); -- {serverError BAD_ARGUMENTS} must be non-negative value +CREATE TABLE invalid (c0 Int) ENGINE = Buffer(x, x, 1, 1, 1, 1, 1, -1e6, 1); -- {serverError BAD_ARGUMENTS} must be non-negative value diff --git a/parser/testdata/03394_fix_to_start_of_interval_for_zero_origin_argument/ast.json b/parser/testdata/03394_fix_to_start_of_interval_for_zero_origin_argument/ast.json new file mode 100644 index 000000000..db49f67f7 --- /dev/null +++ b/parser/testdata/03394_fix_to_start_of_interval_for_zero_origin_argument/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001394253, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03394_fix_to_start_of_interval_for_zero_origin_argument/metadata.json b/parser/testdata/03394_fix_to_start_of_interval_for_zero_origin_argument/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03394_fix_to_start_of_interval_for_zero_origin_argument/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03394_fix_to_start_of_interval_for_zero_origin_argument/query.sql b/parser/testdata/03394_fix_to_start_of_interval_for_zero_origin_argument/query.sql new file mode 100644 index 000000000..289e75e27 --- /dev/null +++ b/parser/testdata/03394_fix_to_start_of_interval_for_zero_origin_argument/query.sql @@ -0,0 +1,8 @@ +SET session_timezone = 'UTC'; + +SELECT toStartOfInterval( + toDateTime64('2024-03-13 11:29:01.000000', 6, 'Europe/Rome'), + INTERVAL 1 QUARTER, + toDateTime64('1970-01-01 00:00:00.000', 6), + 'Europe/Rome' +); diff --git a/parser/testdata/03394_naive_bayes_classifier_negative/ast.json b/parser/testdata/03394_naive_bayes_classifier_negative/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03394_naive_bayes_classifier_negative/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03394_naive_bayes_classifier_negative/metadata.json b/parser/testdata/03394_naive_bayes_classifier_negative/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03394_naive_bayes_classifier_negative/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03394_naive_bayes_classifier_negative/query.sql b/parser/testdata/03394_naive_bayes_classifier_negative/query.sql new file mode 100644 index 000000000..747815ea6 --- /dev/null +++ b/parser/testdata/03394_naive_bayes_classifier_negative/query.sql @@ -0,0 +1,11 @@ +-- Tags: no-fasttest +-- no-fasttest: depends on model binary and model details via config files + +SELECT naiveBayesClassifier('sentiment', 3); -- { serverError BAD_ARGUMENTS } + +SELECT naiveBayesClassifier(0, 'hello'); -- { serverError BAD_ARGUMENTS } + +SELECT naiveBayesClassifier('zzz_nonexistent_model_4ae239f8', 'hello'); -- { serverError BAD_ARGUMENTS } + +-- Empty input not allowed +SELECT naiveBayesClassifier('lang_byte_2', ''); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/03394_pr_insert_select/ast.json b/parser/testdata/03394_pr_insert_select/ast.json new file mode 100644 index 000000000..d148aa737 --- /dev/null +++ b/parser/testdata/03394_pr_insert_select/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000938463, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03394_pr_insert_select/metadata.json b/parser/testdata/03394_pr_insert_select/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03394_pr_insert_select/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03394_pr_insert_select/query.sql b/parser/testdata/03394_pr_insert_select/query.sql new file mode 100644 index 000000000..f3a9dade3 --- /dev/null +++ b/parser/testdata/03394_pr_insert_select/query.sql @@ -0,0 +1,27 @@ +SET enable_analyzer=1; -- parallel distributed insert select for replicated tables works only with analyzer +SET parallel_distributed_insert_select=2; + +DROP TABLE IF EXISTS t_mt_source; +DROP TABLE IF EXISTS t_rmt_target SYNC; + +CREATE TABLE t_mt_source (k UInt64, v String) ENGINE = MergeTree() ORDER BY k; +CREATE TABLE t_rmt_target (k UInt64, v String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/t_rmt_target', 'r1') ORDER BY (); + +INSERT INTO t_mt_source SELECT number as k, toString(number) as v FROM system.numbers LIMIT 1e6; +select 'mt source table count()', count() from t_mt_source; + +SET enable_parallel_replicas = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', max_parallel_replicas = 3; + +select '-- check result without local pipeline'; +INSERT INTO t_rmt_target SELECT * FROM t_mt_source SETTINGS log_comment='cb01f13a-410c-4985-b233-35289776b58f', parallel_replicas_for_non_replicated_merge_tree = 1, parallel_replicas_local_plan=0; + +SYSTEM FLUSH LOGS query_log; +select count() from system.query_log where (current_database = currentDatabase() or has(databases, currentDatabase())) and type = 'QueryFinish' and query_kind = 'Insert' and log_comment='cb01f13a-410c-4985-b233-35289776b58f' and event_date >= yesterday(); + +select count() from t_rmt_target; +select * from t_rmt_target order by k +except +select * from t_mt_source order by k; + +DROP TABLE t_mt_source; +DROP TABLE t_rmt_target SYNC; diff --git a/parser/testdata/03394_pr_insert_select_local_pipeline/ast.json b/parser/testdata/03394_pr_insert_select_local_pipeline/ast.json new file mode 100644 index 000000000..b4c343128 --- /dev/null +++ b/parser/testdata/03394_pr_insert_select_local_pipeline/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000982277, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03394_pr_insert_select_local_pipeline/metadata.json b/parser/testdata/03394_pr_insert_select_local_pipeline/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03394_pr_insert_select_local_pipeline/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03394_pr_insert_select_local_pipeline/query.sql b/parser/testdata/03394_pr_insert_select_local_pipeline/query.sql new file mode 100644 index 000000000..ee0d52c6f --- /dev/null +++ b/parser/testdata/03394_pr_insert_select_local_pipeline/query.sql @@ -0,0 +1,28 @@ +SET enable_analyzer=1; -- parallel distributed insert select for replicated tables works only with analyzer +SET parallel_distributed_insert_select=2; + +DROP TABLE IF EXISTS t_mt_source; +DROP TABLE IF EXISTS t_rmt_target SYNC; + +CREATE TABLE t_mt_source (k UInt64, v String) ENGINE = MergeTree() ORDER BY k; +CREATE TABLE t_rmt_target (k UInt64, v String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/t_rmt_target', 'r1') ORDER BY (); + +INSERT INTO t_mt_source SELECT number as k, toString(number) as v FROM system.numbers_mt LIMIT 1e5; +select 'mt source table count()', count() from t_mt_source; + +SET enable_parallel_replicas = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', max_parallel_replicas = 3; + +select '-- check result with local pipeline'; +TRUNCATE TABLE t_rmt_target; +INSERT INTO t_rmt_target SELECT * FROM t_mt_source SETTINGS log_comment='c1fcb43d-1703-4ddb-b353-c8079b405c16', parallel_replicas_for_non_replicated_merge_tree = 1, parallel_replicas_local_plan=1, parallel_replicas_insert_select_local_pipeline=1; + +SYSTEM FLUSH LOGS query_log; +select count() from system.query_log where (current_database = currentDatabase() or has(databases, currentDatabase())) and type = 'QueryFinish' and query_kind = 'Insert' and log_comment='c1fcb43d-1703-4ddb-b353-c8079b405c16' and event_date >= yesterday(); + +select count() from t_rmt_target; +select * from t_rmt_target order by k +except +select * from t_mt_source order by k; + +DROP TABLE t_mt_source; +DROP TABLE t_rmt_target SYNC; diff --git a/parser/testdata/03394_pr_insert_select_threads/ast.json b/parser/testdata/03394_pr_insert_select_threads/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03394_pr_insert_select_threads/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03394_pr_insert_select_threads/metadata.json b/parser/testdata/03394_pr_insert_select_threads/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03394_pr_insert_select_threads/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03394_pr_insert_select_threads/query.sql b/parser/testdata/03394_pr_insert_select_threads/query.sql new file mode 100644 index 000000000..c239e2f5a --- /dev/null +++ b/parser/testdata/03394_pr_insert_select_threads/query.sql @@ -0,0 +1,38 @@ +-- Tags: long, no-parallel, no-object-storage, no-msan, no-tsan + +SET enable_analyzer=1; -- parallel distributed insert select for replicated tables works only with analyzer +SET parallel_distributed_insert_select=2; + +DROP TABLE IF EXISTS t_mt_source; +DROP TABLE IF EXISTS t_rmt_target SYNC; + +CREATE TABLE t_mt_source (k UInt64, v String) ENGINE = MergeTree() ORDER BY k SETTINGS index_granularity=10; +CREATE TABLE t_rmt_target (k UInt64, v String) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/t_rmt_target', 'r1') ORDER BY (); + +SYSTEM STOP MERGES t_mt_source; +INSERT INTO t_mt_source SELECT number as k, toString(number) as v FROM system.numbers_mt LIMIT 1e6 SETTINGS max_block_size=1000, min_insert_block_size_rows=1000; +SELECT count() FROM system.parts WHERE database = currentDatabase() and table = 't_mt_source'; + +SET cluster_for_parallel_replicas='test_cluster_one_shard_three_replicas_localhost', max_parallel_replicas=3, parallel_replicas_for_non_replicated_merge_tree=1, parallel_replicas_mark_segment_size=128; + +-- we do not want concurrency control to limit the number of threads +SET use_concurrency_control=0; + +-- reduce block size to ensure that all threads will be used +INSERT INTO t_rmt_target SELECT * FROM t_mt_source SETTINGS allow_experimental_parallel_reading_from_replicas=1, max_threads=8, max_insert_threads=4, max_block_size=1000, min_insert_block_size_rows=1000, parallel_replicas_local_plan=0; +INSERT INTO t_rmt_target SELECT * FROM t_mt_source SETTINGS allow_experimental_parallel_reading_from_replicas=1, max_threads=8, max_insert_threads=4, max_block_size=1000, min_insert_block_size_rows=1000, parallel_replicas_local_plan=1, parallel_replicas_insert_select_local_pipeline=1; + +SYSTEM FLUSH LOGS query_log; +SELECT + if(is_initial_query, 'inital', 'secondary'), + if(toUInt64OrZero(Settings['parallel_replicas_insert_select_local_pipeline']) == 0 and is_initial_query, 1, 8) threads_limit, + least(peak_threads_usage, threads_limit), + format('local_pipeline={}', Settings['parallel_replicas_insert_select_local_pipeline']) +FROM system.query_log +WHERE (current_database = currentDatabase() OR has(databases, currentDatabase())) AND type = 'QueryFinish' AND Settings['allow_experimental_parallel_reading_from_replicas']='1' AND query_kind = 'Insert' AND has(tables, currentDatabase() || '.t_rmt_target') +ORDER BY event_time_microseconds +SETTINGS allow_experimental_parallel_reading_from_replicas=0 +; + +DROP TABLE t_mt_source; +DROP TABLE t_rmt_target SYNC; diff --git a/parser/testdata/03395_global_join_supported_kind/ast.json b/parser/testdata/03395_global_join_supported_kind/ast.json new file mode 100644 index 000000000..58dde5cd4 --- /dev/null +++ b/parser/testdata/03395_global_join_supported_kind/ast.json @@ -0,0 +1,91 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " QualifiedAsterisk (children 1)" + }, + { + "explain": " Identifier t1" + }, + { + "explain": " TablesInSelectQuery (children 2)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function remote (alias t1) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '127.1'" + }, + { + "explain": " TablesInSelectQueryElement (children 2)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function remote (alias t2) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '127.1'" + }, + { + "explain": " TableJoin (children 1)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier t1.dummy" + }, + { + "explain": " Identifier t2.dummy" + }, + { + "explain": " Set" + } + ], + + "rows": 23, + + "statistics": + { + "elapsed": 0.001240358, + "rows_read": 23, + "bytes_read": 899 + } +} diff --git a/parser/testdata/03395_global_join_supported_kind/metadata.json b/parser/testdata/03395_global_join_supported_kind/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03395_global_join_supported_kind/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03395_global_join_supported_kind/query.sql b/parser/testdata/03395_global_join_supported_kind/query.sql new file mode 100644 index 000000000..b6a883141 --- /dev/null +++ b/parser/testdata/03395_global_join_supported_kind/query.sql @@ -0,0 +1 @@ +SELECT t1.* FROM remote('127.1') AS t1 global FULL OUTER JOIN remote('127.1') AS t2 ON t1.dummy = t2.dummy SETTINGS allow_experimental_analyzer=1; diff --git a/parser/testdata/03397_information_schema_tables_index_length/ast.json b/parser/testdata/03397_information_schema_tables_index_length/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03397_information_schema_tables_index_length/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03397_information_schema_tables_index_length/metadata.json b/parser/testdata/03397_information_schema_tables_index_length/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03397_information_schema_tables_index_length/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03397_information_schema_tables_index_length/query.sql b/parser/testdata/03397_information_schema_tables_index_length/query.sql new file mode 100644 index 000000000..4ec3d4a18 --- /dev/null +++ b/parser/testdata/03397_information_schema_tables_index_length/query.sql @@ -0,0 +1,29 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/57590 + +DROP TABLE IF EXISTS tab_with_primary_key_index; +CREATE TABLE tab_with_primary_key_index (id UInt32, a UInt32) ENGINE = MergeTree ORDER BY id; +INSERT INTO tab_with_primary_key_index SELECT number, number % 2 ? 1 : number FROM numbers(10); + +DROP TABLE IF EXISTS tab_with_primary_key_index_and_skipping_index; +CREATE TABLE tab_with_primary_key_index_and_skipping_index (id UInt32, a UInt32, INDEX idx a TYPE set(0)) ENGINE = MergeTree ORDER BY id; +INSERT INTO tab_with_primary_key_index_and_skipping_index SELECT number, number % 2 ? 1 : number FROM numbers(10); + +-- Check that information_schema.tables.index_length is larger than 0 for both tables +SELECT if(index_length > 0, 'OK', 'FAIL') +FROM information_schema.tables +WHERE table_name LIKE 'tab_with_primary_key_index%' + AND table_schema = currentDatabase(); + +-- A very crude check that information_schema.tables.index_length is different for both tables +SELECT count(*) +FROM information_schema.tables +WHERE table_name LIKE 'tab_with_primary_key_index%' + AND table_schema = currentDatabase(); + +DROP TABLE tab_with_primary_key_index; + +-- Check that information_schema.tables.index_length is 0 for non-MergeTree tables +SELECT if(index_length = 0, 'OK', 'FAIL') +FROM information_schema.tables +WHERE table_name = 'tables' + AND table_schema = 'system'; -- table engine is 'SystemTables' diff --git a/parser/testdata/03398_group_array_zero_max_elements/ast.json b/parser/testdata/03398_group_array_zero_max_elements/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03398_group_array_zero_max_elements/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03398_group_array_zero_max_elements/metadata.json b/parser/testdata/03398_group_array_zero_max_elements/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03398_group_array_zero_max_elements/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03398_group_array_zero_max_elements/query.sql b/parser/testdata/03398_group_array_zero_max_elements/query.sql new file mode 100644 index 000000000..6e40d5dbd --- /dev/null +++ b/parser/testdata/03398_group_array_zero_max_elements/query.sql @@ -0,0 +1,53 @@ +-- Related to https://github.com/ClickHouse/ClickHouse/issues/78088 + +-- Asserting that groupArray* function calls with zero `max_size` argument of +-- different types (Int/UInt) will produce BAD_ARGUMENTS error + +SELECT groupArray(0::UInt64)(1); -- { serverError BAD_ARGUMENTS } +SELECT groupArray(0::Int64)(1); -- { serverError BAD_ARGUMENTS } +SELECT groupArray(0::UInt64)('x'); -- { serverError BAD_ARGUMENTS } +SELECT groupArray(0::Int64)('x'); -- { serverError BAD_ARGUMENTS } +SELECT groupArray(0::UInt64)(number) FROM numbers(5); -- { serverError BAD_ARGUMENTS } +SELECT groupArray(0::Int64)(number) FROM numbers(5); -- { serverError BAD_ARGUMENTS } + +SELECT groupArraySorted(0::UInt64)(1); -- { serverError BAD_ARGUMENTS } +SELECT groupArraySorted(0::Int64)(1); -- { serverError BAD_ARGUMENTS } +SELECT groupArraySorted(0::UInt64)('x'); -- { serverError BAD_ARGUMENTS } +SELECT groupArraySorted(0::Int64)('x'); -- { serverError BAD_ARGUMENTS } +SELECT groupArraySorted(0::UInt64)(number) FROM numbers(5); -- { serverError BAD_ARGUMENTS } +SELECT groupArraySorted(0::Int64)(number) FROM numbers(5); -- { serverError BAD_ARGUMENTS } + +SELECT groupArraySample(0::UInt64, 123)(1); -- { serverError BAD_ARGUMENTS } +SELECT groupArraySample(0::Int64, 123)(1); -- { serverError BAD_ARGUMENTS } +SELECT groupArraySample(0::UInt64, 123)('x'); -- { serverError BAD_ARGUMENTS } +SELECT groupArraySample(0::Int64, 123)('x'); -- { serverError BAD_ARGUMENTS } +SELECT groupArraySample(0::UInt64, 123)(number) FROM numbers(5); -- { serverError BAD_ARGUMENTS } +SELECT groupArraySample(0::Int64, 123)(number) FROM numbers(5); -- { serverError BAD_ARGUMENTS } + +SELECT groupArrayLast(0::UInt64)(1); -- { serverError BAD_ARGUMENTS } +SELECT groupArrayLast(0::Int64)(1); -- { serverError BAD_ARGUMENTS } +SELECT groupArrayLast(0::UInt64)('x'); -- { serverError BAD_ARGUMENTS } +SELECT groupArrayLast(0::Int64)('x'); -- { serverError BAD_ARGUMENTS } +SELECT groupArrayLast(0::UInt64)(number) FROM numbers(5); -- { serverError BAD_ARGUMENTS } +SELECT groupArrayLast(0::Int64)(number) FROM numbers(5); -- { serverError BAD_ARGUMENTS } + +SELECT groupArrayMovingSum(0::UInt64)(1); -- { serverError BAD_ARGUMENTS } +SELECT groupArrayMovingSum(0::Int64)(1); -- { serverError BAD_ARGUMENTS } +SELECT groupArrayMovingSum(0::UInt64)('x'); -- { serverError BAD_ARGUMENTS } +SELECT groupArrayMovingSum(0::Int64)('x'); -- { serverError BAD_ARGUMENTS } +SELECT groupArrayMovingSum(0::UInt64)(number) FROM numbers(5); -- { serverError BAD_ARGUMENTS } +SELECT groupArrayMovingSum(0::Int64)(number) FROM numbers(5); -- { serverError BAD_ARGUMENTS } + +SELECT groupArrayMovingAvg(0::UInt64)(1); -- { serverError BAD_ARGUMENTS } +SELECT groupArrayMovingAvg(0::Int64)(1); -- { serverError BAD_ARGUMENTS } +SELECT groupArrayMovingAvg(0::UInt64)('x'); -- { serverError BAD_ARGUMENTS } +SELECT groupArrayMovingAvg(0::Int64)('x'); -- { serverError BAD_ARGUMENTS } +SELECT groupArrayMovingAvg(0::UInt64)(number) FROM numbers(5); -- { serverError BAD_ARGUMENTS } +SELECT groupArrayMovingAvg(0::Int64)(number) FROM numbers(5); -- { serverError BAD_ARGUMENTS } + +SELECT groupUniqArray(0::UInt64)(1); -- { serverError BAD_ARGUMENTS } +SELECT groupUniqArray(0::Int64)(1); -- { serverError BAD_ARGUMENTS } +SELECT groupUniqArray(0::UInt64)('x'); -- { serverError BAD_ARGUMENTS } +SELECT groupUniqArray(0::Int64)('x'); -- { serverError BAD_ARGUMENTS } +SELECT groupUniqArray(0::UInt64)(number) FROM numbers(5); -- { serverError BAD_ARGUMENTS } +SELECT groupUniqArray(0::Int64)(number) FROM numbers(5); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/03399_advanced_expr_contains_sharding_key/ast.json b/parser/testdata/03399_advanced_expr_contains_sharding_key/ast.json new file mode 100644 index 000000000..797ec72e8 --- /dev/null +++ b/parser/testdata/03399_advanced_expr_contains_sharding_key/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery local_table (children 1)" + }, + { + "explain": " Identifier local_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000945537, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/03399_advanced_expr_contains_sharding_key/metadata.json b/parser/testdata/03399_advanced_expr_contains_sharding_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03399_advanced_expr_contains_sharding_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03399_advanced_expr_contains_sharding_key/query.sql b/parser/testdata/03399_advanced_expr_contains_sharding_key/query.sql new file mode 100644 index 000000000..52e5f9667 --- /dev/null +++ b/parser/testdata/03399_advanced_expr_contains_sharding_key/query.sql @@ -0,0 +1,38 @@ +drop table if exists local_table; +drop table if exists distributed_table; +drop table if exists distributed_table2; + +set optimize_skip_unused_shards = true; +set prefer_localhost_replica=0; +SET allow_experimental_analyzer = 1; + +create table local_table(id UInt64) engine MergeTree order by id; +create table distributed_table as local_table engine Distributed(test_cluster_two_shard_three_replicas_localhost, currentDatabase(), local_table, id); +create table distributed_table2 as local_table engine Distributed(test_cluster_two_shard_three_replicas_localhost, currentDatabase(), local_table); + +insert into local_table select number from numbers(100); + +select 'query plan of GROUP BY sharding key'; +explain select count() from distributed_table group by id; +explain select count() from distributed_table2 group by id; + +explain select count() from distributed_table group by toString(id); +explain select count() from distributed_table2 group by toString(id); + +select 'query plan of DISTINCT sharding key'; +explain select distinct id from distributed_table; +explain select distinct id from distributed_table2; + +explain select distinct toString(id) from distributed_table; +explain select distinct toString(id) from distributed_table2; + +select 'query plan of LIMIT BY sharding key'; +explain select * from distributed_table limit 1 by id; +explain select * from distributed_table2 limit 1 by id; + +explain select * from distributed_table limit 1 by toString(id); +explain select * from distributed_table2 limit 1 by toString(id); + +drop table if exists local_table; +drop table if exists distributed_table; +drop table if exists distributed_table2; \ No newline at end of file diff --git a/parser/testdata/03399_analyzer_correlated_subquery/ast.json b/parser/testdata/03399_analyzer_correlated_subquery/ast.json new file mode 100644 index 000000000..1804f69db --- /dev/null +++ b/parser/testdata/03399_analyzer_correlated_subquery/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001157826, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03399_analyzer_correlated_subquery/metadata.json b/parser/testdata/03399_analyzer_correlated_subquery/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03399_analyzer_correlated_subquery/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03399_analyzer_correlated_subquery/query.sql b/parser/testdata/03399_analyzer_correlated_subquery/query.sql new file mode 100644 index 000000000..d2855b12e --- /dev/null +++ b/parser/testdata/03399_analyzer_correlated_subquery/query.sql @@ -0,0 +1,48 @@ +set enable_analyzer = 1; + +DROP TABLE IF EXISTS users; +CREATE TABLE users (uid Int16, name String, age Int16) ENGINE=Memory; + +INSERT INTO users VALUES (1231, 'John', 33); +INSERT INTO users VALUES (6666, 'Ksenia', 48); +INSERT INTO users VALUES (8888, 'Alice', 50); + +DROP TABLE IF EXISTS users2; +CREATE TABLE users2 (uid Int16, name String, age Int16) ENGINE=Memory; + +INSERT INTO users2 VALUES (1231, 'John', 33); + +-- { echoOn } + +SET allow_experimental_correlated_subqueries = 1; + +SELECT * FROM users u1 +WHERE EXISTS ( + SELECT * FROM users2 u2 + WHERE u1.age = u2.age +); + +SELECT * +FROM users AS u1 +WHERE (age = 50) OR exists(( + SELECT * + FROM users2 AS u2 + WHERE u1.age = u2.age +)) +ORDER BY ALL +SETTINGS allow_experimental_correlated_subqueries = 1; + +SELECT * +FROM users AS u1 +WHERE (age = 50) OR exists(( + SELECT * + FROM users2 AS u2 + WHERE u1.age = u2.age + UNION ALL + SELECT * + FROM users2 AS u2 + WHERE u1.age != u2.age +)) +ORDER BY ALL +FORMAT Null +SETTINGS allow_experimental_correlated_subqueries = 1; diff --git a/parser/testdata/03399_divide_zero_or_null/ast.json b/parser/testdata/03399_divide_zero_or_null/ast.json new file mode 100644 index 000000000..2793502d0 --- /dev/null +++ b/parser/testdata/03399_divide_zero_or_null/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'Test with two const arguments'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001405643, + "rows_read": 5, + "bytes_read": 200 + } +} diff --git a/parser/testdata/03399_divide_zero_or_null/metadata.json b/parser/testdata/03399_divide_zero_or_null/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03399_divide_zero_or_null/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03399_divide_zero_or_null/query.sql b/parser/testdata/03399_divide_zero_or_null/query.sql new file mode 100644 index 000000000..3df83d338 --- /dev/null +++ b/parser/testdata/03399_divide_zero_or_null/query.sql @@ -0,0 +1,69 @@ +SELECT 'Test with two const arguments'; +SELECT moduloOrNull(1, 0); +SELECT moduloOrNull(0, 0); +SELECT moduloOrNull(NULL, 0); +SELECT moduloOrNull(1, NULL); +SELECT moduloOrNull(NULL, 1); +SELECT moduloOrNull(NULL, NULL); +SELECT positiveModuloOrNull(1, 0); +SELECT positiveModuloOrNull(0, 0); +SELECT positiveModuloOrNull(NULL, 0); +SELECT positiveModuloOrNull(1, NULL); +SELECT positiveModuloOrNull(NULL, 1); +SELECT positiveModuloOrNull(NULL, NULL); +SELECT intDivOrNull(1, 0); +SELECT intDivOrNull(0, 0); +SELECT intDivOrNull(NULL, 0); +SELECT intDivOrNull(1, NULL); +SELECT intDivOrNull(null, 1); +SELECT intDivOrNull(NULL, NULL); +SELECT divideOrNull(1, 0); +SELECT divideOrNull(0, 0); +SELECT divideOrNull(NULL, 0); +SELECT divideOrNull(1, NULL); +SELECT divideOrNull(NULL, 1); +SELECT divideOrNull(NULL, NULL); + +SELECT 'Test with one const arguments'; +SELECT moduloOrNull(materialize(1), 0); +SELECT moduloOrNull(materialize(1), NULL); +SELECT moduloOrNull(1, materialize(0)); +SELECT moduloOrNull(1, materialize(NULL)); +SELECT moduloOrNull(materialize(NULL), 1); +SELECT moduloOrNull(materialize(NULL), 0); +SELECT positiveModuloOrNull(materialize(1), 0); +SELECT positiveModuloOrNull(materialize(1), NULL); +SELECT positiveModuloOrNull(1, materialize(0)); +SELECT positiveModuloOrNull(1, materialize(NULL)); +SELECT positiveModuloOrNull(materialize(NULL), 1); +SELECT positiveModuloOrNull(materialize(NULL), 0); +SELECT intDivOrNull(materialize(1), 0); +SELECT intDivOrNull(materialize(1), NULL); +SELECT intDivOrNull(1, materialize(0)); +SELECT intDivOrNull(1, materialize(NULL)); +SELECT intDivOrNull(materialize(NULL), 1); +SELECT intDivOrNull(materialize(NULL), 0); +SELECT divideOrNull(materialize(1), 0); +SELECT divideOrNull(materialize(1), NULL); +SELECT divideOrNull(1, materialize(0)); +SELECT divideOrNull(1, materialize(NULL)); +SELECT divideOrNull(materialize(NULL), 1); +SELECT divideOrNull(materialize(NULL), 0); + +SELECT 'TEST with non const arguments'; +SELECT moduloOrNull(materialize(1), materialize(0)); +SELECT moduloOrNull(materialize(1), materialize(NULL)); +SELECT moduloOrNull(materialize(NULL), materialize(0)); +SELECT moduloOrNull(materialize(NULL), materialize(NULL)); +SELECT positiveModuloOrNull(materialize(1), materialize(0)); +SELECT positiveModuloOrNull(materialize(1), materialize(NULL)); +SELECT positiveModuloOrNull(materialize(NULL), materialize(0)); +SELECT positiveModuloOrNull(materialize(NULL), materialize(NULL)); +SELECT intDivOrNull(materialize(1), materialize(0)); +SELECT intDivOrNull(materialize(1), materialize(NULL)); +SELECT intDivOrNull(materialize(NULL), materialize(0)); +SELECT intDivOrNull(materialize(NULL), materialize(NULL)); +SELECT divideOrNull(materialize(1), materialize(0)); +SELECT divideOrNull(materialize(1), materialize(NULL)); +SELECT divideOrNull(materialize(NULL), materialize(0)); +SELECT divideOrNull(materialize(NULL), materialize(NULL)); \ No newline at end of file diff --git a/parser/testdata/03399_lc_nullable_mapfromarrays/ast.json b/parser/testdata/03399_lc_nullable_mapfromarrays/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03399_lc_nullable_mapfromarrays/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03399_lc_nullable_mapfromarrays/metadata.json b/parser/testdata/03399_lc_nullable_mapfromarrays/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03399_lc_nullable_mapfromarrays/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03399_lc_nullable_mapfromarrays/query.sql b/parser/testdata/03399_lc_nullable_mapfromarrays/query.sql new file mode 100644 index 000000000..0370b206c --- /dev/null +++ b/parser/testdata/03399_lc_nullable_mapfromarrays/query.sql @@ -0,0 +1,87 @@ +-- Original reproducer from the issue +-- https://github.com/ClickHouse/ClickHouse/issues/77803 +SELECT mapFromArrays( + [toNullable(toLowCardinality('c')), toFixedString(toFixedString('d', toUInt256(1)), toLowCardinality(1))], + map('b', 1, toFixedString('a', 1), 2) +) +GROUP BY 1; + +SELECT ''; +SELECT '-- Literal tests'; + +-- Simpler test variations +SELECT mapFromArrays([toLowCardinality(toNullable('a')), toLowCardinality(toNullable('b'))], [1, 2]) GROUP BY 1; +SELECT mapFromArrays([toLowCardinality(toNullable(1)), toLowCardinality(toNullable(2))], [3, 4]) GROUP BY 1; + +SELECT mapFromArrays( + [toLowCardinality(toNullable(1)), toLowCardinality(cast(NULL as Nullable(Int32)))], + [3, 4] +) GROUP BY 1; -- { serverError BAD_ARGUMENTS } + +SELECT mapFromArrays( + [toLowCardinality(toNullable('x')), toLowCardinality(cast(NULL as Nullable(String)))], + [3, 4] +) GROUP BY 1; -- { serverError BAD_ARGUMENTS } + +SELECT ''; +SELECT '-- Table tests'; + +-- Run tests on tables +SET allow_suspicious_low_cardinality_types=1; + +DROP TABLE IF EXISTS 03399_lc_nullable_int_simple; +CREATE TABLE 03399_lc_nullable_int_simple( + k Array(LowCardinality(Nullable(Int32))), + v Array(Int32) +) engine = Memory +AS +SELECT [1, 2], [3, 4]; + +SELECT mapFromArrays(k, v) FROM 03399_lc_nullable_int_simple; +SELECT mapFromArrays(k, v) FROM 03399_lc_nullable_int_simple GROUP BY 1; + +DROP TABLE IF EXISTS 03399_lc_nullable_int_simple; + +DROP TABLE IF EXISTS 03399_lc_nullable_int_mixed; +CREATE TABLE 03399_lc_nullable_int_mixed( + k Array(LowCardinality(Nullable(Int32))), + v Array(Int32) +) engine = Memory +AS +SELECT [1, 2], [3, 4] +UNION ALL +SELECT [5, null], [7, 8]; + +SELECT mapFromArrays(k, v) FROM 03399_lc_nullable_int_mixed; -- { serverError BAD_ARGUMENTS } +SELECT mapFromArrays(k, v) FROM 03399_lc_nullable_int_mixed GROUP BY 1; -- { serverError BAD_ARGUMENTS } + +DROP TABLE IF EXISTS 03399_lc_nullable_int_mixed; + + +DROP TABLE IF EXISTS 03399_lc_nullable_string_simple; +CREATE TABLE 03399_lc_nullable_string_simple( + k Array(LowCardinality(Nullable(String))), + v Array(Int32) +) engine = Memory +AS +SELECT ['a', 'b'], [1, 2]; + +SELECT mapFromArrays(k, v) FROM 03399_lc_nullable_string_simple; +SELECT mapFromArrays(k, v) FROM 03399_lc_nullable_string_simple GROUP BY 1; + +DROP TABLE IF EXISTS 03399_lc_nullable_string_simple; + +DROP TABLE IF EXISTS 03399_lc_nullable_string_mixed; +CREATE TABLE 03399_lc_nullable_string_mixed( + k Array(LowCardinality(Nullable(String))), + v Array(Int32) +) engine = Memory +AS +SELECT ['a', 'b'], [1, 2] +UNION ALL +SELECT [NULL, 'c'], [3, 4]; + +SELECT mapFromArrays(k, v) FROM 03399_lc_nullable_string_mixed; -- { serverError BAD_ARGUMENTS } +SELECT mapFromArrays(k, v) FROM 03399_lc_nullable_string_mixed GROUP BY 1; -- { serverError BAD_ARGUMENTS } + +DROP TABLE IF EXISTS 03399_lc_nullable_string_mixed; diff --git a/parser/testdata/03399_mapContains_functions/ast.json b/parser/testdata/03399_mapContains_functions/ast.json new file mode 100644 index 000000000..2ccc98519 --- /dev/null +++ b/parser/testdata/03399_mapContains_functions/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery map_containsValueLike_test (children 1)" + }, + { + "explain": " Identifier map_containsValueLike_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001333638, + "rows_read": 2, + "bytes_read": 104 + } +} diff --git a/parser/testdata/03399_mapContains_functions/metadata.json b/parser/testdata/03399_mapContains_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03399_mapContains_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03399_mapContains_functions/query.sql b/parser/testdata/03399_mapContains_functions/query.sql new file mode 100644 index 000000000..b2276a51e --- /dev/null +++ b/parser/testdata/03399_mapContains_functions/query.sql @@ -0,0 +1,24 @@ +DROP TABLE IF EXISTS map_containsValueLike_test; + +CREATE TABLE map_containsValueLike_test (id UInt32, map Map(String, String)) Engine=MergeTree() ORDER BY id settings index_granularity=2; + +INSERT INTO map_containsValueLike_test VALUES (1, {'1-K1':'1-V1','1-K2':'1-V2'}),(2,{'2-K1':'2-V1','2-K2':'2-V2'}); +INSERT INTO map_containsValueLike_test VALUES (3, {'3-K1':'3-V1','3-K2':'3-V2'}),(4, {'4-K1':'4-V1','4-K2':'4-V2'}); +INSERT INTO map_containsValueLike_test VALUES (5, {'5-K1':'5-V1','5-K2':'5-V2'}),(6, {'6-K1':'6-V1','6-K2':'6-V2'}); + +SELECT id, map FROM map_containsValueLike_test WHERE mapContainsValueLike(map, '1-%') = 1; +SELECT id, map FROM map_containsValueLike_test WHERE mapContainsValueLike(map, '3-%') = 0 order by id; + +DROP TABLE map_containsValueLike_test; + +SELECT mapContainsValueLike(map('aa', '1', 'bb', '2'), '1%'); +SELECT mapContainsValueLike(map('aa', toLowCardinality('1'), 'b', toLowCardinality('2')), '1%'); +SELECT mapContainsValueLike(map('aa', '1', 'bb', '2'), materialize('1%')); +SELECT mapContainsValueLike(materialize(map('aa', '1', 'bb', '2')), '1%'); +SELECT mapContainsValueLike(materialize(map('aa', '1', 'bb', '2')), materialize('1%')); + +SELECT mapContainsValueLike(map('aa', 'cc', 'bb', 'dd'), 'd%'); +SELECT mapContainsValueLike(map('aa', 'cc', 'bb', 'dd'), 'q%'); + +SELECT mapExtractValueLike(map('aa', 'cc', 'bb', 'dd'), 'd%'); +SELECT mapExtractValueLike(map('aa', 'cc', 'bb', 'dd'), 'q%'); diff --git a/parser/testdata/03399_sparse_grams/ast.json b/parser/testdata/03399_sparse_grams/ast.json new file mode 100644 index 000000000..457a6e9f6 --- /dev/null +++ b/parser/testdata/03399_sparse_grams/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '--- Regular calls'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001289241, + "rows_read": 5, + "bytes_read": 188 + } +} diff --git a/parser/testdata/03399_sparse_grams/metadata.json b/parser/testdata/03399_sparse_grams/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03399_sparse_grams/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03399_sparse_grams/query.sql b/parser/testdata/03399_sparse_grams/query.sql new file mode 100644 index 000000000..cf10c9ae4 --- /dev/null +++ b/parser/testdata/03399_sparse_grams/query.sql @@ -0,0 +1,46 @@ +SELECT '--- Regular calls'; +SELECT sparseGrams(''); +SELECT sparseGrams('ab'); +SELECT sparseGrams('bce'); +SELECT sparseGrams('abcdef'); +SELECT sparseGrams('hello world'); +SELECT sparseGrams('hello world hello world'); +SELECT sparseGrams(concat('hello ', number, ' world')) FROM numbers(3); + +SELECT '--- Minimal ngram length'; +SELECT sparseGrams('', 5); +SELECT sparseGrams('hello world', 5); +SELECT sparseGrams('hello world hello world', 10); + +SELECT '--- With UTF-8 chars'; +SELECT sparseGramsUTF8(''); +SELECT sparseGramsUTF8('a😊Ω𐍈界𝄞bЦ⛄'); +SELECT sparseGramsUTF8('AΩЖ中😊🚀𝄞✨🎵🦄💡❄️', 4); +SELECT sparseGramsUTF8(concat('a😊Ω𐍈', number, '🦄𝄞bЦ⛄', 4)) FROM numbers(3); +SELECT sparseGramsUTF8('Ω', 5); + +SELECT '--- Regular hashes'; +SELECT sparseGramsHashes(''); +SELECT sparseGramsHashes('ab'); +SELECT sparseGramsHashes('bce'); +SELECT sparseGramsHashes('abcdef'); +SELECT sparseGramsHashes('hello world'); +SELECT sparseGramsHashes('hello world hello world'); +SELECT sparseGramsHashes(concat('hello ', number, ' world')) FROM numbers(3); + +SELECT '--- Hashes with minimal ngram length'; +SELECT sparseGramsHashes('', 5); +SELECT sparseGramsHashes('hello world', 5); +SELECT sparseGramsHashes('hello whole hello whole', 5); + +SELECT '--- Hashes with UTF-8 strings'; +SELECT sparseGramsHashesUTF8(''); +SELECT sparseGramsHashesUTF8('a😊Ω𐍈界𝄞bЦ⛄'); +SELECT sparseGramsHashesUTF8('AΩЖ中😊𝄞✨🌍🎵🦄💡❄️', 4); +SELECT sparseGramsHashesUTF8(concat('a😊Ω𐍈', number, '🦄𝄞bЦ⛄', 4)) FROM numbers(3); + +SELECT '--- Maximal ngram length'; +SELECT sparseGrams('hello world hello world', 3, 4); +SELECT sparseGramsHashes('hello world hello world', 3, 4); +SELECT sparseGramsUTF8('a😊Ω𐍈界𝄞bЦ⛄', 3, 4); +SELECT sparseGramsHashesUTF8('a😊Ω𐍈界𝄞bЦ⛄', 3, 4); diff --git a/parser/testdata/03400_analyzer_correlated_subquery_unused_column/ast.json b/parser/testdata/03400_analyzer_correlated_subquery_unused_column/ast.json new file mode 100644 index 000000000..d0dbd715e --- /dev/null +++ b/parser/testdata/03400_analyzer_correlated_subquery_unused_column/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001071961, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03400_analyzer_correlated_subquery_unused_column/metadata.json b/parser/testdata/03400_analyzer_correlated_subquery_unused_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03400_analyzer_correlated_subquery_unused_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03400_analyzer_correlated_subquery_unused_column/query.sql b/parser/testdata/03400_analyzer_correlated_subquery_unused_column/query.sql new file mode 100644 index 000000000..f6e76608e --- /dev/null +++ b/parser/testdata/03400_analyzer_correlated_subquery_unused_column/query.sql @@ -0,0 +1,33 @@ +set enable_analyzer = 1; + +DROP TABLE IF EXISTS users; +CREATE TABLE users (uid Int16, name String, age Int16) ENGINE=MergeTree() ORDER BY uid; + +INSERT INTO users VALUES (1231, 'John', 33); +INSERT INTO users VALUES (6666, 'Ksenia', 48); +INSERT INTO users VALUES (8888, 'Alice', 50); + +DROP TABLE IF EXISTS users2; +CREATE TABLE users2 (uid Int16, name String, age Int16) ENGINE=MergeTree() ORDER BY uid; + +INSERT INTO users2 VALUES (1231, 'John', 33); + +-- { echoOn } + +SET allow_experimental_correlated_subqueries = 1; + +SELECT name FROM users u1 +WHERE EXISTS ( + SELECT * FROM users2 u2 + WHERE u1.age = u2.age +); + +SELECT name +FROM users AS u1 +WHERE (age = 50) OR exists(( + SELECT * + FROM users2 AS u2 + WHERE u1.age = u2.age +)) +ORDER BY ALL +SETTINGS allow_experimental_correlated_subqueries = 1 diff --git a/parser/testdata/03400_distributed_final/ast.json b/parser/testdata/03400_distributed_final/ast.json new file mode 100644 index 000000000..af3476f96 --- /dev/null +++ b/parser/testdata/03400_distributed_final/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery 03400_users (children 1)" + }, + { + "explain": " Identifier 03400_users" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001370033, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/03400_distributed_final/metadata.json b/parser/testdata/03400_distributed_final/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03400_distributed_final/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03400_distributed_final/query.sql b/parser/testdata/03400_distributed_final/query.sql new file mode 100644 index 000000000..df6c24068 --- /dev/null +++ b/parser/testdata/03400_distributed_final/query.sql @@ -0,0 +1,33 @@ +DROP TABLE IF EXISTS 03400_users; +DROP TABLE IF EXISTS 03400_dist_users; + +CREATE TABLE 03400_users +( + `uid` Int16, + `name` String, + `age` Int16, + `version` UInt8 +) +ENGINE = ReplacingMergeTree(version) +ORDER BY (uid, name); + +INSERT INTO 03400_users VALUES (111, 'John', 33, 1); +INSERT INTO 03400_users VALUES (111, 'John', 33, 2); +INSERT INTO 03400_users VALUES (8888, 'Alice', 50, 1); + +CREATE TABLE 03400_dist_users AS 03400_users +ENGINE = Distributed(test_cluster_two_shards, currentDatabase(), 03400_users); + +SET max_threads=1; + +SELECT * +FROM 03400_dist_users AS l +FINAL +LEFT JOIN +( + SELECT * + FROM 03400_dist_users AS d + FINAL +) AS r ON l.uid = r.uid +ORDER BY l.version +SETTINGS distributed_product_mode = 'local'; diff --git a/parser/testdata/03400_explain_distributed_bug/ast.json b/parser/testdata/03400_explain_distributed_bug/ast.json new file mode 100644 index 000000000..fba0ac610 --- /dev/null +++ b/parser/testdata/03400_explain_distributed_bug/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001015316, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03400_explain_distributed_bug/metadata.json b/parser/testdata/03400_explain_distributed_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03400_explain_distributed_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03400_explain_distributed_bug/query.sql b/parser/testdata/03400_explain_distributed_bug/query.sql new file mode 100644 index 000000000..15f0152bd --- /dev/null +++ b/parser/testdata/03400_explain_distributed_bug/query.sql @@ -0,0 +1,7 @@ +set enable_analyzer=1, prefer_localhost_replica=1; + +set serialize_query_plan=0; +explain distributed=1 SELECT * FROM remote('127.0.0.{1,2}', numbers_mt(1e6)) GROUP BY number ORDER BY number DESC LIMIT 10; + +set serialize_query_plan=1; +explain distributed=1 SELECT * FROM remote('127.0.0.{1,2}', numbers_mt(1e6)) GROUP BY number ORDER BY number DESC LIMIT 10; diff --git a/parser/testdata/03400_get_server_setting/ast.json b/parser/testdata/03400_get_server_setting/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03400_get_server_setting/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03400_get_server_setting/metadata.json b/parser/testdata/03400_get_server_setting/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03400_get_server_setting/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03400_get_server_setting/query.sql b/parser/testdata/03400_get_server_setting/query.sql new file mode 100644 index 000000000..baa69f729 --- /dev/null +++ b/parser/testdata/03400_get_server_setting/query.sql @@ -0,0 +1,24 @@ +SELECT + toBool(t1.val = t2.val) AS should_be_equal +FROM + (SELECT toBool(value) AS val FROM system.server_settings WHERE name = 'allow_use_jemalloc_memory') AS t1, + (SELECT getServerSetting('allow_use_jemalloc_memory') AS val) AS t2; + +SELECT + toBool(t1.val = t2.val) AS should_be_equal +FROM + (SELECT value AS val FROM system.server_settings WHERE name = 'mark_cache_policy') AS t1, + (SELECT getServerSetting('mark_cache_policy') AS val) AS t2; + +SELECT ('TEST INVALID ARGUMENTS'); + +SELECT getServerSetting(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +SELECT getServerSetting(10); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT getServerSetting('allow_use_jemalloc_memory')(10); -- { serverError FUNCTION_CANNOT_HAVE_PARAMETERS } + +SELECT getServerSetting('marks_compression_codec'); -- { serverError UNKNOWN_SETTING } + +SELECT getServerSetting('allow_use_jemalloc_memory', + 'background_merges_mutations_scheduling_policy'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } diff --git a/parser/testdata/03401_get_merge_tree_setting/ast.json b/parser/testdata/03401_get_merge_tree_setting/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03401_get_merge_tree_setting/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03401_get_merge_tree_setting/metadata.json b/parser/testdata/03401_get_merge_tree_setting/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03401_get_merge_tree_setting/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03401_get_merge_tree_setting/query.sql b/parser/testdata/03401_get_merge_tree_setting/query.sql new file mode 100644 index 000000000..b2c100fd9 --- /dev/null +++ b/parser/testdata/03401_get_merge_tree_setting/query.sql @@ -0,0 +1,23 @@ +SELECT + toBool(t1.val = t2.val) AS should_be_equal +FROM + (SELECT toString(value) AS val FROM system.merge_tree_settings WHERE name = 'index_granularity') AS t1, + (SELECT toString(getMergeTreeSetting('index_granularity')) AS val) AS t2; + +SELECT + toBool(t1.val = t2.val) AS should_be_equal +FROM + (SELECT toString(value) AS val FROM system.merge_tree_settings WHERE name = 'max_merge_selecting_sleep_ms') AS t1, + (SELECT toString(getMergeTreeSetting('max_merge_selecting_sleep_ms')) AS val) AS t2; + +SELECT ('TEST INVALID ARGUMENTS'); + +SELECT getMergeTreeSetting(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +SELECT getMergeTreeSetting(10); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT getMergeTreeSetting('index_granularity')(4096); -- { serverError FUNCTION_CANNOT_HAVE_PARAMETERS } + +SELECT getMergeTreeSetting('keeper_multiread_batch_size'); -- { serverError UNKNOWN_SETTING } + +SELECT getMergeTreeSetting('index_granularity', 'marks_compression_codec'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } diff --git a/parser/testdata/03401_normal_projection_with_part_offset/ast.json b/parser/testdata/03401_normal_projection_with_part_offset/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03401_normal_projection_with_part_offset/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03401_normal_projection_with_part_offset/metadata.json b/parser/testdata/03401_normal_projection_with_part_offset/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03401_normal_projection_with_part_offset/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03401_normal_projection_with_part_offset/query.sql b/parser/testdata/03401_normal_projection_with_part_offset/query.sql new file mode 100644 index 000000000..9444d98e5 --- /dev/null +++ b/parser/testdata/03401_normal_projection_with_part_offset/query.sql @@ -0,0 +1,87 @@ +-- { echo ON } + +DROP TABLE IF EXISTS test; + +CREATE TABLE test +( + `a` Int32, + `b` Int32, + PROJECTION p + ( + SELECT + a, + b, + _part_offset + ORDER BY b + ) +) +ENGINE = MergeTree +ORDER BY a +SETTINGS index_granularity_bytes = 10485760, index_granularity = 8192; + +-- Insert enough rows so that future projection materialization test will trigger level 1 merge +INSERT INTO test SELECT number * 3, rand() FROM numbers(360000); +INSERT INTO test SELECT number * 3 + 1, rand() FROM numbers(360000); +INSERT INTO test SELECT number * 3 + 2, rand() FROM numbers(360000); +SELECT sum(l._part_offset = r._parent_part_offset) FROM test l JOIN mergeTreeProjection(currentDatabase(), test, p) r USING (a) SETTINGS enable_analyzer = 1; + +OPTIMIZE TABLE test FINAL; +SELECT sum(l._part_offset = r._parent_part_offset) FROM test l JOIN mergeTreeProjection(currentDatabase(), test, p) r USING (a) SETTINGS enable_analyzer = 1; + +ALTER TABLE test ADD PROJECTION p2 (SELECT a, b, _part_offset ORDER BY b); +ALTER TABLE test MATERIALIZE PROJECTION p2 SETTINGS mutations_sync = 2; +SELECT sum(l._part_offset = r._parent_part_offset) FROM test l JOIN mergeTreeProjection(currentDatabase(), test, p2) r USING (a) SETTINGS enable_analyzer = 1; + +-- Cannot add physical _part_offset, _part_index and _parent_part_offset when there exists projection that refers to its parent `_part_offset`. +ALTER TABLE test ADD COLUMN _part_offset int; -- { serverError BAD_ARGUMENTS } +ALTER TABLE test ADD COLUMN _part_index int; -- { serverError BAD_ARGUMENTS } +ALTER TABLE test ADD COLUMN _parent_part_offset int; -- { serverError BAD_ARGUMENTS } + +DROP TABLE test; + +CREATE TABLE test +( + `a` Int32, + `b` Int32, + `_part_offset` Int32, + PROJECTION p + ( + SELECT + a, + b, + _part_offset + ORDER BY b + ) +) +ENGINE = MergeTree +ORDER BY a; + +-- This works because now projection will refer to the parent's physical `_part_offset` +ALTER TABLE test ADD COLUMN _part_index int; +ALTER TABLE test ADD COLUMN _parent_part_offset int; + +DROP TABLE test; + +CREATE TABLE test +( + `a` Int32, + `b` Int32, + PROJECTION p + ( + SELECT + a, + b, + _part_offset + ORDER BY b + ) +) +ENGINE = MergeTree +ORDER BY a +SETTINGS index_granularity = 1; + +INSERT INTO test SELECT number, 10 - number FROM numbers(5); + +-- Projection analysis should work +SELECT _part_offset FROM test WHERE b = 8; + +DROP TABLE test; diff --git a/parser/testdata/03401_normal_projection_with_part_offset_no_sorting/ast.json b/parser/testdata/03401_normal_projection_with_part_offset_no_sorting/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03401_normal_projection_with_part_offset_no_sorting/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03401_normal_projection_with_part_offset_no_sorting/metadata.json b/parser/testdata/03401_normal_projection_with_part_offset_no_sorting/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03401_normal_projection_with_part_offset_no_sorting/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03401_normal_projection_with_part_offset_no_sorting/query.sql b/parser/testdata/03401_normal_projection_with_part_offset_no_sorting/query.sql new file mode 100644 index 000000000..d0f7cf087 --- /dev/null +++ b/parser/testdata/03401_normal_projection_with_part_offset_no_sorting/query.sql @@ -0,0 +1,35 @@ +-- { echo ON } + +DROP TABLE IF EXISTS test; + +CREATE TABLE test +( + `a` Int32, + `b` Int32, + PROJECTION p + ( + SELECT + a, + b, + _part_offset + ORDER BY b + ) +) +ENGINE = MergeTree +ORDER BY () +SETTINGS index_granularity_bytes = 10485760, index_granularity = 8192; + +-- Insert enough rows so that future projection materialization test will trigger level 1 merge +INSERT INTO test SELECT number * 3, rand() FROM numbers(360000); +INSERT INTO test SELECT number * 3 + 1, rand() FROM numbers(360000); +INSERT INTO test SELECT number * 3 + 2, rand() FROM numbers(360000); +SELECT sum(l._part_offset = r._parent_part_offset) FROM test l JOIN mergeTreeProjection(currentDatabase(), test, p) r USING (a) SETTINGS enable_analyzer = 1; + +OPTIMIZE TABLE test FINAL; +SELECT sum(l._part_offset = r._parent_part_offset) FROM test l JOIN mergeTreeProjection(currentDatabase(), test, p) r USING (a) SETTINGS enable_analyzer = 1; + +ALTER TABLE test ADD PROJECTION p2 (SELECT a, b, _part_offset ORDER BY b); +ALTER TABLE test MATERIALIZE PROJECTION p2 SETTINGS mutations_sync = 2; +SELECT sum(l._part_offset = r._parent_part_offset) FROM test l JOIN mergeTreeProjection(currentDatabase(), test, p2) r USING (a) SETTINGS enable_analyzer = 1; + +DROP TABLE test; diff --git a/parser/testdata/03401_remote_bool/ast.json b/parser/testdata/03401_remote_bool/ast.json new file mode 100644 index 000000000..9b539f1b0 --- /dev/null +++ b/parser/testdata/03401_remote_bool/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001232643, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03401_remote_bool/metadata.json b/parser/testdata/03401_remote_bool/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03401_remote_bool/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03401_remote_bool/query.sql b/parser/testdata/03401_remote_bool/query.sql new file mode 100644 index 000000000..4874ab12f --- /dev/null +++ b/parser/testdata/03401_remote_bool/query.sql @@ -0,0 +1,3 @@ +set enable_analyzer=1; + +SELECT ((number % 2) = 0) = true AS isEven FROM remote('localhos{t,t,t}', numbers(10)) GROUP BY all ORDER BY all; diff --git a/parser/testdata/03401_several_iceberg_tables_in_one_dir/ast.json b/parser/testdata/03401_several_iceberg_tables_in_one_dir/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03401_several_iceberg_tables_in_one_dir/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03401_several_iceberg_tables_in_one_dir/metadata.json b/parser/testdata/03401_several_iceberg_tables_in_one_dir/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03401_several_iceberg_tables_in_one_dir/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03401_several_iceberg_tables_in_one_dir/query.sql b/parser/testdata/03401_several_iceberg_tables_in_one_dir/query.sql new file mode 100644 index 000000000..bb537a9f5 --- /dev/null +++ b/parser/testdata/03401_several_iceberg_tables_in_one_dir/query.sql @@ -0,0 +1,14 @@ +-- Tags: no-fasttest +-- Tag no-fasttest: Depends on AWS + +SELECT * FROM icebergS3(s3_conn, filename='merged_several_tables_test', SETTINGS iceberg_metadata_table_uuid = 'ea8d1178-7756-4b89-b21f-00e9f31fe03e') ORDER BY id; +SELECT * FROM icebergS3(s3_conn, filename='merged_several_tables_test', SETTINGS iceberg_metadata_table_uuid = 'A90EED4CF74B4E5BB630096FB9D09021') ORDER BY id; +SELECT * FROM icebergS3(s3_conn, filename='merged_several_tables_test', SETTINGS iceberg_metadata_table_uuid = '6f6f6407_c6A5465f_A808ea8900_e35a38') ORDER BY id; +SELECT * FROM icebergS3(s3_conn, filename='merged_several_tables_test', SETTINGS iceberg_metadata_table_uuid = '88005553-5352-8222-8993-abacaba01010') ORDER BY id; -- { serverError FILE_DOESNT_EXIST } + +SELECT count() FROM icebergS3(s3_conn, filename='merged_several_tables_test', SETTINGS iceberg_metadata_file_path = 'metadata/00001-aec4e034-3f73-48f7-87ad-51b7b42a8db7.metadata.json'); +SELECT count() FROM icebergS3(s3_conn, filename='merged_several_tables_test', SETTINGS iceberg_metadata_file_path = 'metadata/00001-2aad93a8-a893-4943-8504-f6021f83ecab.metadata.json'); +SELECT count() FROM icebergS3(s3_conn, filename='merged_several_tables_test', SETTINGS iceberg_metadata_file_path = 'metadata/00001-aec4e034-3f73-48f7-87ad-51b7b42a8db7.metadata.json'); + + +SELECT * FROM icebergS3(s3_conn, filename='merged_several_tables_test', SETTINGS iceberg_recent_metadata_file_by_last_updated_ms_field = true) ORDER BY id; \ No newline at end of file diff --git a/parser/testdata/03402_adding_projection_to_temporary_table/ast.json b/parser/testdata/03402_adding_projection_to_temporary_table/ast.json new file mode 100644 index 000000000..8c30a47be --- /dev/null +++ b/parser/testdata/03402_adding_projection_to_temporary_table/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t0 (children 1)" + }, + { + "explain": " Identifier t0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00116837, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03402_adding_projection_to_temporary_table/metadata.json b/parser/testdata/03402_adding_projection_to_temporary_table/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03402_adding_projection_to_temporary_table/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03402_adding_projection_to_temporary_table/query.sql b/parser/testdata/03402_adding_projection_to_temporary_table/query.sql new file mode 100644 index 000000000..792956b5a --- /dev/null +++ b/parser/testdata/03402_adding_projection_to_temporary_table/query.sql @@ -0,0 +1,7 @@ +DROP TABLE IF EXISTS t0; + +CREATE TEMPORARY TABLE t0 (c0 Int, c1 Int, PROJECTION p0 (SELECT c0 GROUP BY c0)) ENGINE = MergeTree() ORDER BY tuple(); + +ALTER TABLE t0 CLEAR COLUMN c0; + +DROP TABLE IF EXISTS t0; diff --git a/parser/testdata/03402_cluster_table_functions_settings_parsing/ast.json b/parser/testdata/03402_cluster_table_functions_settings_parsing/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03402_cluster_table_functions_settings_parsing/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03402_cluster_table_functions_settings_parsing/metadata.json b/parser/testdata/03402_cluster_table_functions_settings_parsing/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03402_cluster_table_functions_settings_parsing/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03402_cluster_table_functions_settings_parsing/query.sql b/parser/testdata/03402_cluster_table_functions_settings_parsing/query.sql new file mode 100644 index 000000000..0fe0091c6 --- /dev/null +++ b/parser/testdata/03402_cluster_table_functions_settings_parsing/query.sql @@ -0,0 +1,5 @@ +-- Tags: no-fasttest +-- Tag no-fasttest: Depends on AWS + +SELECT * FROM icebergS3Cluster('test_cluster_two_shards_localhost', 'http://localhost:11111/test/est', 'clickhouse', 'clickhouse', SETTINGS iceberg_metadata_file_path = 'metadata/v2.metadata.json'); +SELECT * FROM icebergS3Cluster('test_cluster_two_shards_localhost', s3_conn, filename='est', SETTINGS iceberg_metadata_file_path = 'metadata/v2.metadata.json'); \ No newline at end of file diff --git a/parser/testdata/03402_concurrent_right_full_join/ast.json b/parser/testdata/03402_concurrent_right_full_join/ast.json new file mode 100644 index 000000000..833de2215 --- /dev/null +++ b/parser/testdata/03402_concurrent_right_full_join/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001135035, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03402_concurrent_right_full_join/metadata.json b/parser/testdata/03402_concurrent_right_full_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03402_concurrent_right_full_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03402_concurrent_right_full_join/query.sql b/parser/testdata/03402_concurrent_right_full_join/query.sql new file mode 100644 index 000000000..9fbb4498a --- /dev/null +++ b/parser/testdata/03402_concurrent_right_full_join/query.sql @@ -0,0 +1,161 @@ +SET join_use_nulls = 1; +SET enable_analyzer = 1; +SET join_algorithm = 'parallel_hash'; +SET query_plan_join_swap_table = 0; + +-- 1) Small dataset: RIGHT OUTER ALL +DROP TABLE IF EXISTS t_l_small; +DROP TABLE IF EXISTS t_r_small; +CREATE TABLE t_l_small (id UInt32, value String) ENGINE = Memory; +CREATE TABLE t_r_small (id UInt32, description String) ENGINE = Memory; +INSERT INTO t_l_small VALUES (1, 'A'), (2, 'B'), (3, 'C'); +INSERT INTO t_r_small VALUES (2, 'Second'), (3, 'Third'), (4, 'Fourth'), (5, 'Fifth'); + +SELECT 'right_all_small'; +SELECT l.id, l.value, r.description +FROM t_l_small AS l +RIGHT JOIN t_r_small AS r ON l.id = r.id +ORDER BY r.id, l.id; + +-- Explain plan for a small RIGHT OUTER join (should use ConcurrentHashJoin / parallel_hash) +SELECT 'explain_right_all_small'; +EXPLAIN actions=1, keep_logical_steps=0 +SELECT l.id, l.value, r.description +FROM t_l_small AS l +RIGHT JOIN t_r_small AS r ON l.id = r.id +ORDER BY r.id, l.id; + +-- 2) Small dataset: FULL OUTER ALL +SELECT 'full_all_small'; +SELECT l.id, l.value, r.description +FROM t_l_small AS l +FULL OUTER JOIN t_r_small AS r ON l.id = r.id +ORDER BY coalesce(l.id, r.id), r.id; + +-- 3) RIGHT ANY with duplicates on left (identical values to avoid nondeterminism), aggregated checks +DROP TABLE IF EXISTS t_l_any; +DROP TABLE IF EXISTS t_r_any; +CREATE TABLE t_l_any (id UInt32, value String) ENGINE = Memory; +CREATE TABLE t_r_any (id UInt32, description String) ENGINE = Memory; +INSERT INTO t_l_any VALUES (2, 'B'), (2, 'B'), (3, 'C'), (10, 'X'); +INSERT INTO t_r_any VALUES (2, 'Second'), (3, 'Third'), (4, 'Fourth'); + +SELECT 'right_any_small'; +SELECT + count(), + countIf(isNull(l.value)) +FROM t_l_any AS l +RIGHT ANY JOIN t_r_any AS r ON l.id = r.id; + +-- 4) RIGHT OUTER with additional ON filter +DROP TABLE IF EXISTS t_l_filter; +DROP TABLE IF EXISTS t_r_filter; +CREATE TABLE t_l_filter (id UInt32, value String) ENGINE = Memory; +CREATE TABLE t_r_filter (id UInt32, description String) ENGINE = Memory; +INSERT INTO t_l_filter VALUES (2, 'B'), (3, 'C'), (4, 'D'); +INSERT INTO t_r_filter VALUES (2, 'Second'), (3, 'Third'), (4, 'Fourth'); + +SELECT 'right_all_with_on_filter'; +SELECT l.id, l.value, r.description +FROM t_l_filter AS l +RIGHT JOIN t_r_filter AS r ON l.id = r.id AND r.description LIKE 'F%' +ORDER BY r.id; + +-- 5) RIGHT OUTER with null keys on right +DROP TABLE IF EXISTS t_l_null; +DROP TABLE IF EXISTS t_r_null; +CREATE TABLE t_l_null (id UInt32, v String) ENGINE = Memory; +CREATE TABLE t_r_null (id Nullable(UInt32), d String) ENGINE = Memory; +INSERT INTO t_l_null VALUES (1, 'A'), (2, 'B'); +INSERT INTO t_r_null VALUES (1, 'one'), (NULL, 'null1'), (3, 'three'); + +SELECT 'right_all_null_keys'; +SELECT l.id, l.v, r.d +FROM t_l_null AS l +RIGHT JOIN t_r_null AS r ON l.id = r.id +ORDER BY r.d; + +-- 6) Composite key RIGHT OUTER ALL +DROP TABLE IF EXISTS t_l_cmp; +DROP TABLE IF EXISTS t_r_cmp; +CREATE TABLE t_l_cmp (id UInt32, grp UInt8, val String) ENGINE = Memory; +CREATE TABLE t_r_cmp (id UInt32, grp UInt8, descr String) ENGINE = Memory; +INSERT INTO t_l_cmp VALUES (1, 1, 'a'), (1, 2, 'b'), (2, 1, 'c'); +INSERT INTO t_r_cmp VALUES (1, 1, 'r11'), (2, 1, 'r21'), (3, 1, 'r31'); + +SELECT 'right_all_composite_keys'; +SELECT l.id, l.grp, l.val, r.descr +FROM t_l_cmp AS l +RIGHT JOIN t_r_cmp AS r ON (l.id = r.id) AND (l.grp = r.grp) +ORDER BY r.id, r.grp; + +-- 7) Large volume RIGHT OUTER ALL (aggregated) +SELECT 'right_all_large'; +SELECT + count(), + countIf(isNull(l.id)), + sum(coalesce(l.id, 0)), + sum(r.id) +FROM + (SELECT number AS id FROM numbers(15000)) AS l +RIGHT JOIN + (SELECT number AS id FROM numbers(20000)) AS r +ON l.id = r.id; + +-- 8) Large volume FULL OUTER ALL (aggregated) +SELECT 'full_all_large'; +SELECT + count(), + countIf(isNull(l.id)), -- right-only + countIf(isNull(r.id)) -- left-only +FROM + (SELECT number AS id FROM numbers(15000)) AS l +FULL OUTER JOIN + (SELECT number AS id FROM numbers(15500)) AS r +ON l.id = r.id; + +-- Cleanup +DROP TABLE IF EXISTS t_l_small; +DROP TABLE IF EXISTS t_r_small; +DROP TABLE IF EXISTS t_l_any; +DROP TABLE IF EXISTS t_r_any; +DROP TABLE IF EXISTS t_l_filter; +DROP TABLE IF EXISTS t_r_filter; +DROP TABLE IF EXISTS t_l_null; +DROP TABLE IF EXISTS t_r_null; +DROP TABLE IF EXISTS t_l_cmp; +DROP TABLE IF EXISTS t_r_cmp; + +SELECT 'Consistency of results with HashJoin for multiple conditions'; + +SET allow_experimental_analyzer = 1; +SET join_use_nulls = 1; + +DROP TABLE IF EXISTS l; +DROP TABLE IF EXISTS r; +CREATE TABLE l (k UInt8, v UInt8) ENGINE = Memory; +CREATE TABLE r (k UInt8, v UInt8) ENGINE = Memory; + +INSERT INTO l SELECT toUInt8(number), toUInt8(number) FROM numbers(200); +INSERT INTO r SELECT toUInt8(number), toUInt8(number) FROM numbers(200); + +SET max_threads = 8; SET join_algorithm = 'hash'; +SELECT 'hash' AS alg, count() AS cnt +FROM l RIGHT JOIN r ON l.k = r.k AND l.v > r.v; + +SET join_algorithm = 'parallel_hash'; +SELECT 'parallel_hash' AS alg, count() AS cnt +FROM l RIGHT JOIN r ON l.k = r.k AND l.v > r.v; + +SET join_algorithm = 'hash'; +SELECT 'hash right-only', countIf(l.k IS NULL) FROM l RIGHT JOIN r ON l.k = r.k AND l.v > r.v; +SET join_algorithm = 'parallel_hash'; +SELECT 'parallel right-only', countIf(l.k IS NULL) FROM l RIGHT JOIN r ON l.k = r.k AND l.v > r.v; + +SET join_algorithm = 'hash'; +SELECT 'hash', count() FROM l FULL OUTER JOIN r ON l.k = r.k AND l.v > r.v; +SET join_algorithm = 'parallel_hash'; +SELECT 'parallel_hash', count() FROM l FULL OUTER JOIN r ON l.k = r.k AND l.v > r.v; + +DROP TABLE IF EXISTS l; +DROP TABLE IF EXISTS r; diff --git a/parser/testdata/03402_cyclic_alter_dependencies/ast.json b/parser/testdata/03402_cyclic_alter_dependencies/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03402_cyclic_alter_dependencies/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03402_cyclic_alter_dependencies/metadata.json b/parser/testdata/03402_cyclic_alter_dependencies/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03402_cyclic_alter_dependencies/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03402_cyclic_alter_dependencies/query.sql b/parser/testdata/03402_cyclic_alter_dependencies/query.sql new file mode 100644 index 000000000..371d3ff61 --- /dev/null +++ b/parser/testdata/03402_cyclic_alter_dependencies/query.sql @@ -0,0 +1,29 @@ +-- Tags: no-parallel +DROP DICTIONARY IF EXISTS d0; +DROP TABLE IF EXISTS t0; + +CREATE TABLE t0 ( + key Int32, + value Int32 +) +ENGINE=MergeTree() +PRIMARY KEY key +PARTITION BY key % 2; + +INSERT INTO t0 VALUES (0, 0); + +CREATE DICTIONARY d0 ( + key Int32, + value Int32 +) +PRIMARY KEY key +SOURCE(CLICKHOUSE(DATABASE default TABLE t0)) +LIFETIME(MIN 0 MAX 0) +LAYOUT(HASHED()); + +SELECT * FROM d0; + +ALTER TABLE t0 ADD COLUMN key2 Int32 DEFAULT dictGetOrDefault('d0', 'value', 0, 1); -- {serverError INFINITE_LOOP} + +DROP DICTIONARY d0; +DROP TABLE t0; diff --git a/parser/testdata/03402_join_using_alias/ast.json b/parser/testdata/03402_join_using_alias/ast.json new file mode 100644 index 000000000..8e02ebd68 --- /dev/null +++ b/parser/testdata/03402_join_using_alias/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001109125, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03402_join_using_alias/metadata.json b/parser/testdata/03402_join_using_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03402_join_using_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03402_join_using_alias/query.sql b/parser/testdata/03402_join_using_alias/query.sql new file mode 100644 index 000000000..1d12227e9 --- /dev/null +++ b/parser/testdata/03402_join_using_alias/query.sql @@ -0,0 +1,17 @@ +set enable_analyzer=1; + +DROP TABLE IF EXISTS t0; + +CREATE TABLE t0 (c0 Int, c1 Int ALIAS 1) ENGINE = Memory; +INSERT INTO t0 VALUES (42), (43); + +SELECT c0 FROM remote('localhost', currentDatabase(), 't0') tx JOIN t0 USING (c1) ORDER BY c0; + +CREATE TABLE t1_dist ( c0 Int, c1 Int, c2 Int ALIAS 2 ) ENGINE = Distributed('test_shard_localhost', currentDatabase(), 't0', rand()); + +SELECT c0 FROM t1_dist tx JOIN t0 USING (c1) ORDER BY c0; + +-- Cannot join using alias column defined in Distributed table +SELECT c0 FROM t1_dist tx JOIN t0 USING (c2); -- { serverError UNKNOWN_IDENTIFIER } + +DROP TABLE IF EXISTS t0; diff --git a/parser/testdata/03402_materialized_tuple_element/ast.json b/parser/testdata/03402_materialized_tuple_element/ast.json new file mode 100644 index 000000000..598b5fde7 --- /dev/null +++ b/parser/testdata/03402_materialized_tuple_element/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001678244, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03402_materialized_tuple_element/metadata.json b/parser/testdata/03402_materialized_tuple_element/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03402_materialized_tuple_element/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03402_materialized_tuple_element/query.sql b/parser/testdata/03402_materialized_tuple_element/query.sql new file mode 100644 index 000000000..edf233c61 --- /dev/null +++ b/parser/testdata/03402_materialized_tuple_element/query.sql @@ -0,0 +1,11 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS t; +CREATE TABLE t (id Int32, d Tuple(year Int32, month Int32, day Int32)) +ENGINE = MergeTree ORDER BY (); + +ALTER TABLE t ADD COLUMN dt Date MATERIALIZED makeDate(d.year, d.month, d.day); +INSERT INTO t (id, d) SELECT number, (2000 + number, 1 + number % 10, 1 + number % 30) FROM numbers(10); + +SELECT *, dt FROM t; +DROP TABLE t; diff --git a/parser/testdata/03402_secondary_indexes_analyzer_bugs/ast.json b/parser/testdata/03402_secondary_indexes_analyzer_bugs/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03402_secondary_indexes_analyzer_bugs/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03402_secondary_indexes_analyzer_bugs/metadata.json b/parser/testdata/03402_secondary_indexes_analyzer_bugs/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03402_secondary_indexes_analyzer_bugs/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03402_secondary_indexes_analyzer_bugs/query.sql b/parser/testdata/03402_secondary_indexes_analyzer_bugs/query.sql new file mode 100644 index 000000000..b579f6ceb --- /dev/null +++ b/parser/testdata/03402_secondary_indexes_analyzer_bugs/query.sql @@ -0,0 +1,52 @@ +-- Tags: no-random-merge-tree-settings, no-random-settings, no-parallel-replicas + +--- #65607 +select 'index is applied while using column alias'; + +drop table if exists t; + +CREATE TABLE t +( + `tenant` String, + `recordTimestamp` Int64, + `responseBody` String, + `colAlias` String ALIAS responseBody || 'something else', + INDEX ngrams colAlias TYPE ngrambf_v1(3, 2097152, 3, 0) GRANULARITY 1, +) +ENGINE = MergeTree +ORDER BY recordTimestamp +SETTINGS index_granularity = 8192, index_granularity_bytes = '10Mi'; + +INSERT INTO t SELECT toString(number), number, toString(number) from numbers(65536); + +explain indexes=1 select tenant,recordTimestamp from t where colAlias like '%abcd%' settings enable_analyzer=0; +explain indexes=1 select tenant,recordTimestamp from t where colAlias like '%abcd%' settings enable_analyzer=1; + + +--- #69373 +select 'index is applied to view'; + +drop table if exists tab_v1; +CREATE TABLE tab_v1 +( + content String, + INDEX idx_content_bloom content TYPE bloom_filter(0.01) +) +ENGINE = MergeTree +ORDER BY content; + +drop table if exists tab_v3; +CREATE VIEW tab_v3 +AS SELECT * FROM tab_v1; + +INSERT INTO tab_v1 (content) VALUES ('aaa bbb'), ('ccc ddd'); + +SELECT count() +FROM tab_v3 +WHERE content = 'iii' +SETTINGS force_data_skipping_indices='idx_content_bloom', enable_analyzer=0; + +SELECT count() +FROM tab_v3 +WHERE content = 'iii' +SETTINGS force_data_skipping_indices='idx_content_bloom', enable_analyzer=1; diff --git a/parser/testdata/03402_zero_streams_after_max_streams_to_max_threads_ratio/ast.json b/parser/testdata/03402_zero_streams_after_max_streams_to_max_threads_ratio/ast.json new file mode 100644 index 000000000..896338295 --- /dev/null +++ b/parser/testdata/03402_zero_streams_after_max_streams_to_max_threads_ratio/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery 03402_data (children 1)" + }, + { + "explain": " Identifier 03402_data" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001332471, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/03402_zero_streams_after_max_streams_to_max_threads_ratio/metadata.json b/parser/testdata/03402_zero_streams_after_max_streams_to_max_threads_ratio/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03402_zero_streams_after_max_streams_to_max_threads_ratio/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03402_zero_streams_after_max_streams_to_max_threads_ratio/query.sql b/parser/testdata/03402_zero_streams_after_max_streams_to_max_threads_ratio/query.sql new file mode 100644 index 000000000..c0a55d83d --- /dev/null +++ b/parser/testdata/03402_zero_streams_after_max_streams_to_max_threads_ratio/query.sql @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS 03402_data; + +CREATE TABLE 03402_data (id UInt32) ENGINE = MergeTree ORDER BY id; +INSERT INTO 03402_data SELECT * FROM numbers(100); + +SELECT avg(id) FROM 03402_data SETTINGS max_threads = 4, max_streams_to_max_threads_ratio = 0; +SELECT avg(id) FROM 03402_data SETTINGS max_threads = 0, max_streams_to_max_threads_ratio = 0; +SELECT avg(id) FROM 03402_data SETTINGS max_threads = 2, max_streams_to_max_threads_ratio = 0.2; + +SELECT ''; + +SELECT id FROM 03402_data ORDER BY id LIMIT 1 SETTINGS max_threads = 4, max_streams_to_max_threads_ratio = 0; +SELECT id FROM 03402_data ORDER BY id LIMIT 1 SETTINGS max_threads = 0, max_streams_to_max_threads_ratio = 0; +SELECT id FROM 03402_data ORDER BY id LIMIT 1 SETTINGS max_threads = 2, max_streams_to_max_threads_ratio = 0.2; + +DROP TABLE 03402_data; diff --git a/parser/testdata/03403_distributed_merge_two_level_aggregation/ast.json b/parser/testdata/03403_distributed_merge_two_level_aggregation/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03403_distributed_merge_two_level_aggregation/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03403_distributed_merge_two_level_aggregation/metadata.json b/parser/testdata/03403_distributed_merge_two_level_aggregation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03403_distributed_merge_two_level_aggregation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03403_distributed_merge_two_level_aggregation/query.sql b/parser/testdata/03403_distributed_merge_two_level_aggregation/query.sql new file mode 100644 index 000000000..011b387c3 --- /dev/null +++ b/parser/testdata/03403_distributed_merge_two_level_aggregation/query.sql @@ -0,0 +1,42 @@ +-- Tags: no-fasttest, long + +DROP TABLE IF EXISTS test_table_1; +CREATE TABLE test_table_1(number UInt64) ENGINE = MergeTree ORDER BY number; +SYSTEM STOP MERGES test_table_1; + +DROP TABLE IF EXISTS dist_test_table_1; +CREATE TABLE dist_test_table_1(number UInt64) ENGINE = Distributed('test_cluster_thirty_shards_localhost', currentDatabase(), test_table_1, rand()); +INSERT INTO dist_test_table_1 SELECT number from numbers_mt(10000) SETTINGS distributed_foreground_insert = 1; + +DROP TABLE IF EXISTS test_table_2; +CREATE TABLE test_table_2(number UInt64) ENGINE = MergeTree ORDER BY number; +SYSTEM STOP MERGES test_table_2; + +DROP TABLE IF EXISTS dist_test_table_2; +CREATE TABLE dist_test_table_2(number UInt64) ENGINE = Distributed('test_cluster_thirty_shards_localhost', currentDatabase(), test_table_2, rand()); +INSERT INTO dist_test_table_2 SELECT number from numbers_mt(10000) SETTINGS distributed_foreground_insert = 1; + +DROP TABLE IF EXISTS merge_test_table; +CREATE TABLE merge_test_table ENGINE = Merge(currentDatabase(), '^dist_test_table_(1|2)$'); + +EXPLAIN PIPELINE +SELECT + cityHash64(number), + sum(1) +FROM remote('127.0.0.{1,1}', currentDatabase(), merge_test_table) +GROUP BY 1 +SETTINGS distributed_aggregation_memory_efficient = 1, max_threads = 4, optimize_aggregation_in_order = 0, prefer_localhost_replica = 1, async_socket_for_remote = 1, enable_analyzer = 0, enable_producing_buckets_out_of_order_in_aggregation = 0, enable_memory_bound_merging_of_aggregation_results = 0; + +SELECT + cityHash64(number), + sum(1) +FROM remote('127.0.0.{1,1}', currentDatabase(), merge_test_table) +GROUP BY 1 +FORMAT Null +SETTINGS distributed_aggregation_memory_efficient = 1, max_threads = 4, optimize_aggregation_in_order = 0, prefer_localhost_replica = 1, async_socket_for_remote = 1, enable_analyzer = 0, enable_producing_buckets_out_of_order_in_aggregation = 0, enable_memory_bound_merging_of_aggregation_results = 0, max_memory_usage='500Mi', group_by_two_level_threshold=1e6, group_by_two_level_threshold_bytes='500Mi'; + +DROP TABLE merge_test_table; +DROP TABLE dist_test_table_1; +DROP TABLE dist_test_table_2; +DROP TABLE test_table_1; +DROP TABLE test_table_2; diff --git a/parser/testdata/03403_function_tokens/ast.json b/parser/testdata/03403_function_tokens/ast.json new file mode 100644 index 000000000..8644edd34 --- /dev/null +++ b/parser/testdata/03403_function_tokens/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'Constants: tokens should be constant'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001212789, + "rows_read": 5, + "bytes_read": 207 + } +} diff --git a/parser/testdata/03403_function_tokens/metadata.json b/parser/testdata/03403_function_tokens/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03403_function_tokens/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03403_function_tokens/query.sql b/parser/testdata/03403_function_tokens/query.sql new file mode 100644 index 000000000..ce9be6136 --- /dev/null +++ b/parser/testdata/03403_function_tokens/query.sql @@ -0,0 +1,123 @@ +SELECT 'Constants: tokens should be constant'; +SELECT 'Negative tests'; +-- Must accept one to three arguments +SELECT tokens(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT tokens('a', 'b', 'c', 'd'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +-- 1st arg must be String or FixedString +SELECT tokens(1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +-- 2nd arg (if given) must be const String +SELECT tokens('a', 1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT tokens('a', toFixedString('b', 1)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT tokens('a', materialize('b')); -- { serverError ILLEGAL_COLUMN } +-- 2nd arg (if given) must be a supported tokenizer +SELECT tokens('a', 'unsupported_tokenizer'); -- { serverError BAD_ARGUMENTS } +-- 3rd arg (if given) must be +-- const UInt8 (for "ngram") +SELECT tokens('a', 'ngrams', 'c'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT tokens('a', 'ngrams', toInt8(-1)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT tokens('a', 'ngrams', toFixedString('c', 1)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT tokens('a', 'ngrams', materialize(1)); -- { serverError ILLEGAL_COLUMN } +-- If 2nd arg is "ngram", then the 3rd arg must be between 1 and 8 +SELECT tokens('a', 'ngrams', 0); -- { serverError BAD_ARGUMENTS} +SELECT tokens('a', 'ngrams', 9); -- { serverError BAD_ARGUMENTS} +-- const Array (for "split") +SELECT tokens('a', 'splitByString', 'c'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT tokens('a', 'splitByString', toInt8(-1)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT tokens('a', 'splitByString', toFixedString('c', 1)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT tokens('a', 'splitByString', materialize(['c'])); -- { serverError ILLEGAL_COLUMN } +SELECT tokens('a', 'splitByString', [1, 2]); -- { serverError INCORRECT_QUERY } +SELECT tokens(' a bc d', 'splitByString', []); -- { serverError INCORRECT_QUERY } + + +SELECT 'Default tokenizer'; + +SELECT tokens('') AS tokenized, toTypeName(tokenized), isConstant(tokenized); +SELECT tokens('abc+ def- foo! bar? baz= code; hello: world/ xäöüx') AS tokenized, toTypeName(tokenized), isConstant(tokenized); +SELECT tokens('abc+ def- foo! bar? baz= code; hello: world/ xäöüx', 'splitByNonAlpha') AS tokenized, toTypeName(tokenized), isConstant(tokenized); + +SELECT 'Ngram tokenizer'; + +SELECT tokens('', 'ngrams') AS tokenized, toTypeName(tokenized), isConstant(tokenized); +SELECT tokens('abc def', 'ngrams') AS tokenized, toTypeName(tokenized), isConstant(tokenized); +SELECT tokens('abc def', 'ngrams', 3) AS tokenized, toTypeName(tokenized), isConstant(tokenized); +SELECT tokens('abc def', 'ngrams', 8) AS tokenized, toTypeName(tokenized), isConstant(tokenized); + +SELECT 'Split tokenizer'; + +SELECT tokens('', 'splitByString') AS tokenized, toTypeName(tokenized), isConstant(tokenized); +SELECT tokens(' a bc d', 'splitByString', [' ']) AS tokenized, toTypeName(tokenized), isConstant(tokenized); +SELECT tokens('()()a()bc()d', 'splitByString', ['()']) AS tokenized, toTypeName(tokenized), isConstant(tokenized); +SELECT tokens(',()a(),bc,(),d,', 'splitByString', ['()', ',']) AS tokenized, toTypeName(tokenized), isConstant(tokenized); +SELECT tokens('\\a\n\\bc\\d\n', 'splitByString', ['\n', '\\']) AS tokenized, toTypeName(tokenized), isConstant(tokenized); + +SELECT 'No-op tokenizer'; + +SELECT tokens('', 'array') AS tokenized, toTypeName(tokenized), isConstant(tokenized); +SELECT tokens('abc def', 'array') AS tokenized, toTypeName(tokenized), isConstant(tokenized); + +SELECT 'Special cases (not systematically tested)'; +SELECT '-- FixedString inputs'; +SELECT tokens(toFixedString('abc+ def- foo! bar? baz= code; hello: world/', 44)) AS tokenized, toTypeName(tokenized), isConstant(tokenized); +SELECT '-- non-const inputs'; +SELECT tokens(materialize('abc+ def- foo! bar? baz= code; hello: world/')) AS tokenized, toTypeName(tokenized), isConstant(tokenized); + +SELECT 'Column values: tokens should be non-constant'; +SELECT 'Default tokenizer'; + +CREATE TABLE tab ( + id Int64, + str String +) ENGINE = MergeTree() ORDER BY id; + +INSERT INTO tab (id, str) VALUES (1, 'abc+ def-'), (2, 'hello: world/'), (3, 'xäöüx code;'); + +SELECT tokens(str, 'splitByNonAlpha') AS tokenized, toTypeName(tokenized), isConstant(tokenized) FROM tab; + +DROP TABLE tab; + +SELECT 'Ngram tokenizer'; + +CREATE TABLE tab ( + id Int64, + str String +) ENGINE = MergeTree() ORDER BY id; + +INSERT INTO tab (id, str) VALUES (1, 'abc def'), (2, 'ClickHouse'); + +SELECT tokens(str, 'ngrams', 3) AS tokenized, toTypeName(tokenized), isConstant(tokenized) FROM tab; + +DROP TABLE tab; + +SELECT 'Split tokenizer'; + +CREATE TABLE tab ( + id Int64, + str String +) ENGINE = MergeTree() ORDER BY id; + +INSERT INTO tab (id, str) VALUES (1, '()()a()bc()d'), (2, ',()a(),bc,(),d,'); + +SELECT tokens(str, 'splitByString', ['()', ',']) AS tokenized, toTypeName(tokenized), isConstant(tokenized) FROM tab; + +DROP TABLE tab; + +SELECT 'No-op tokenizer'; + +CREATE TABLE tab ( + id Int64, + str String +) ENGINE = MergeTree() ORDER BY id; + +INSERT INTO tab (id, str) VALUES (1, ''), (2, 'abc def'); + +SELECT tokens(str, 'array') AS tokenized, toTypeName(tokenized), isConstant(tokenized) FROM tab; + +DROP TABLE tab; +SELECT tokens(materialize('abc+ def- foo! bar? baz= code; hello: world/')); + +SELECT 'Sparse tokenizer'; + +SELECT tokens('', 'sparseGrams') AS tokenized; +SELECT tokens('abc def cba', 'sparseGrams') AS tokenized; +SELECT tokens('abc def cba', 'sparseGrams', 4, 10) AS tokenized; +SELECT tokens('abc def cba', 'sparseGrams', 4, 10, 6) AS tokenized; diff --git a/parser/testdata/03403_parallel_blocks_marshalling_for_distributed/ast.json b/parser/testdata/03403_parallel_blocks_marshalling_for_distributed/ast.json new file mode 100644 index 000000000..d9b4f94b4 --- /dev/null +++ b/parser/testdata/03403_parallel_blocks_marshalling_for_distributed/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001784493, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03403_parallel_blocks_marshalling_for_distributed/metadata.json b/parser/testdata/03403_parallel_blocks_marshalling_for_distributed/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03403_parallel_blocks_marshalling_for_distributed/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03403_parallel_blocks_marshalling_for_distributed/query.sql b/parser/testdata/03403_parallel_blocks_marshalling_for_distributed/query.sql new file mode 100644 index 000000000..c5ce818ac --- /dev/null +++ b/parser/testdata/03403_parallel_blocks_marshalling_for_distributed/query.sql @@ -0,0 +1,44 @@ +set serialize_query_plan = 0; + +CREATE TABLE t(a UInt64, b UInt64) ENGINE = MergeTree ORDER BY a; + +INSERT INTO t SELECT + number, + number +FROM numbers_mt(1000000); + +SET enable_parallel_replicas = 1, parallel_replicas_local_plan = 1, max_parallel_replicas = 3, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_for_non_replicated_merge_tree = 1; +SET prefer_localhost_replica = 1, enable_analyzer = 1; + +SELECT replaceRegexpAll(explain, 'ReadFromRemoteParallelReplicas.*', 'ReadFromRemoteParallelReplicas') +FROM ( + EXPLAIN distributed = 1 + SELECT a + FROM t + GROUP BY a +); + +SELECT replaceRegexpAll(explain, 'ReadFromRemoteParallelReplicas.*', 'ReadFromRemoteParallelReplicas') +FROM ( + EXPLAIN distributed = 1 + SELECT a + FROM t +); + +-- This test case triggers a logical error in fuzzing tests. The issue existed on master prior to this PR. See https://github.com/ClickHouse/ClickHouse/pull/81610 +--SELECT replaceRegexpAll(explain, 'ReadFromRemoteParallelReplicas.*', 'ReadFromRemoteParallelReplicas') +--FROM ( +-- EXPLAIN distributed = 1 +-- SELECT a +-- FROM remote('127.0.0.{1,2}', currentDatabase(), t) +-- GROUP BY a +--); + +-- Not yet supported; see the comment in `Planner::buildPlanForQueryNode()` +--SELECT replaceRegexpAll(explain, 'ReadFromRemoteParallelReplicas.*', 'ReadFromRemoteParallelReplicas') +--FROM ( +-- EXPLAIN distributed = 1 +-- SELECT a +-- FROM remote('127.0.0.{1,2}', default.t) +--); + diff --git a/parser/testdata/03403_read_in_order_streams_memory_usage/ast.json b/parser/testdata/03403_read_in_order_streams_memory_usage/ast.json new file mode 100644 index 000000000..08c52b4fe --- /dev/null +++ b/parser/testdata/03403_read_in_order_streams_memory_usage/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001474841, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03403_read_in_order_streams_memory_usage/metadata.json b/parser/testdata/03403_read_in_order_streams_memory_usage/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03403_read_in_order_streams_memory_usage/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03403_read_in_order_streams_memory_usage/query.sql b/parser/testdata/03403_read_in_order_streams_memory_usage/query.sql new file mode 100644 index 000000000..be29f7535 --- /dev/null +++ b/parser/testdata/03403_read_in_order_streams_memory_usage/query.sql @@ -0,0 +1,23 @@ +SET enable_parallel_blocks_marshalling = 0; + +DROP TABLE IF EXISTS 03403_data; +CREATE TABLE 03403_data(id UInt32, val String) ENGINE = MergeTree ORDER BY id AS SELECT 1, 'test'; + +SELECT * +FROM 03403_data +ORDER BY id +FORMAT Null +SETTINGS max_threads = 1024, + max_streams_to_max_threads_ratio = 10000000; + +SYSTEM FLUSH LOGS query_log; + +SELECT * +FROM system.query_log +WHERE Settings['max_streams_to_max_threads_ratio'] = '10000000' + AND query like '%FROM 03403_data%' + AND type = 'QueryFinish' + AND memory_usage > 20_000_000 + AND current_database = currentDatabase(); + +DROP TABLE 03403_data; diff --git a/parser/testdata/03403_toInterval/ast.json b/parser/testdata/03403_toInterval/ast.json new file mode 100644 index 000000000..8cedd30f8 --- /dev/null +++ b/parser/testdata/03403_toInterval/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toInterval (alias interval) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_5" + }, + { + "explain": " Literal 'nanosecond'" + }, + { + "explain": " Function plus (alias res) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDateTime64 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '2025-01-01 00:00:00'" + }, + { + "explain": " Literal UInt64_9" + }, + { + "explain": " Identifier interval" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001414429, + "rows_read": 15, + "bytes_read": 604 + } +} diff --git a/parser/testdata/03403_toInterval/metadata.json b/parser/testdata/03403_toInterval/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03403_toInterval/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03403_toInterval/query.sql b/parser/testdata/03403_toInterval/query.sql new file mode 100644 index 000000000..eaa544208 --- /dev/null +++ b/parser/testdata/03403_toInterval/query.sql @@ -0,0 +1,30 @@ +SELECT toInterval(5, 'nanosecond') as interval, toDateTime64('2025-01-01 00:00:00', 9) + interval AS res; +SELECT toInterval(5, 'microsecond') as interval, toDateTime64('2025-01-01 00:00:00', 9) + interval AS res; +SELECT toInterval(5, 'millisecond') as interval, toDateTime64('2025-01-01 00:00:00', 9) + interval AS res; +SELECT toInterval(5, 'second') as interval, toDateTime('2025-01-01 00:00:00') + interval AS res; +SELECT toInterval(5, 'Second') as interval, toDateTime('2025-01-01 00:00:00') + interval AS res; +SELECT toInterval(5, 'SECOND') as interval, toDateTime('2025-01-01 00:00:00') + interval AS res; +SELECT toInterval(5, 'Minute') as interval, toDateTime('2025-01-01 00:00:00') + interval AS res; +SELECT toInterval(5, 'Hour') as interval, toDateTime('2025-01-01 00:00:00') + interval AS res; +SELECT toInterval(5, 'Day') as interval, toDateTime('2025-01-01 00:00:00') + interval AS res; +SELECT toInterval(5, 'Week') as interval, toDateTime('2025-01-01 00:00:00') + interval AS res; +SELECT toInterval(5, 'Month') as interval, toDateTime('2025-01-01 00:00:00') + interval AS res; +SELECT toInterval(5, 'Quarter') as interval, toDateTime('2025-01-01 00:00:00') + interval AS res; +SELECT toInterval(5, 'Year') as interval, toDateTime('2025-01-01 00:00:00') + interval AS res; +SELECT toDateTime('2025-01-01 00:00:00') + toInterval(5, 'Year') AS res; +SELECT toDateTime('2025-01-01 00:00:00') + toInterval(number, 'second') FROM numbers(5); +SELECT toDateTime('2025-01-01 00:00:00') + toInterval(null, 'second'); +SELECT toDateTime('2025-01-01 00:00:01') + toInterval(-1, 'second'); +SELECT toDateTime('2025-01-01 00:00:00') + toInterval(0, 'second'); +SELECT toDateTime('2025-01-01 00:00:00') + toInterval(1.5, 'second'); +SELECT toDateTime('2025-01-01 00:00:00') + toInterval('5', 'second'); + +SELECT toInterval(); -- { serverError 42} +SELECT toInterval(''); -- { serverError 42} +SELECT toInterval('second'); -- { serverError 42 } +SELECT toInterval(5, 'second', 10); -- { serverError 42 } + +SELECT toInterval(10, 5); -- { serverError 43 } + +SELECT toInterval(5, ''); -- { serverError 36 } +SELECT toInterval(5, 'invalid kind'); -- { serverError 36 } diff --git a/parser/testdata/03403_truncate_all_tables_like/ast.json b/parser/testdata/03403_truncate_all_tables_like/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03403_truncate_all_tables_like/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03403_truncate_all_tables_like/metadata.json b/parser/testdata/03403_truncate_all_tables_like/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03403_truncate_all_tables_like/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03403_truncate_all_tables_like/query.sql b/parser/testdata/03403_truncate_all_tables_like/query.sql new file mode 100644 index 000000000..8a5430c6f --- /dev/null +++ b/parser/testdata/03403_truncate_all_tables_like/query.sql @@ -0,0 +1,76 @@ +-- Tags: no-replicated-database + +CREATE TABLE IF NOT EXISTS truncate_test_set(id UInt64) ENGINE = Set; +CREATE TABLE IF NOT EXISTS truncate_test_log(id UInt64) ENGINE = Log; +CREATE TABLE IF NOT EXISTS truncate_test_memory(id UInt64) ENGINE = Memory; +CREATE TABLE IF NOT EXISTS truncate_test_tiny_log(id UInt64) ENGINE = TinyLog; +CREATE TABLE IF NOT EXISTS truncate_test_stripe_log(id UInt64) ENGINE = StripeLog; +CREATE TABLE IF NOT EXISTS truncate_test_merge_tree(p Date, k UInt64) ENGINE = MergeTree ORDER BY p; + +SELECT '======Before Truncate======'; +INSERT INTO truncate_test_set VALUES(0); +INSERT INTO truncate_test_log VALUES(1); +INSERT INTO truncate_test_memory VALUES(1); +INSERT INTO truncate_test_tiny_log VALUES(1); +INSERT INTO truncate_test_stripe_log VALUES(1); +INSERT INTO truncate_test_merge_tree VALUES('2000-01-01', 1); +SELECT * FROM system.numbers WHERE number NOT IN truncate_test_set LIMIT 1; +SELECT * FROM truncate_test_log; +SELECT * FROM truncate_test_memory; +SELECT * FROM truncate_test_tiny_log; +SELECT * FROM truncate_test_stripe_log; +SELECT * FROM truncate_test_merge_tree; + +SELECT '======Testing Truncate Without ALL Keyword======'; +TRUNCATE TABLES FROM IF EXISTS {CLICKHOUSE_DATABASE:Identifier}; +SELECT * FROM system.numbers WHERE number NOT IN truncate_test_set LIMIT 1; +SELECT * FROM truncate_test_log; +SELECT * FROM truncate_test_memory; +SELECT * FROM truncate_test_tiny_log; +SELECT * FROM truncate_test_stripe_log; +SELECT * FROM truncate_test_merge_tree; + +SELECT '======Insert Values Again======'; +INSERT INTO truncate_test_set VALUES(0); +INSERT INTO truncate_test_log VALUES(1); +INSERT INTO truncate_test_memory VALUES(1); +INSERT INTO truncate_test_tiny_log VALUES(1); +INSERT INTO truncate_test_stripe_log VALUES(1); +INSERT INTO truncate_test_merge_tree VALUES('2000-01-01', 1); + +SELECT '======Truncate With LIKE Keyword======'; +TRUNCATE TABLES FROM IF EXISTS {CLICKHOUSE_DATABASE:Identifier} LIKE '%merge_tree'; +SELECT * FROM truncate_test_stripe_log; +SELECT * FROM truncate_test_merge_tree; + +SELECT '======Insert Values Again======'; +INSERT INTO truncate_test_merge_tree VALUES('2000-01-01', 1); + +SELECT '======Truncate With NOT LIKE Keyword======'; +TRUNCATE TABLES FROM IF EXISTS {CLICKHOUSE_DATABASE:Identifier} NOT LIKE '%merge_tree'; +SELECT * FROM truncate_test_stripe_log; +SELECT * FROM truncate_test_merge_tree; + +TRUNCATE TABLES FROM IF EXISTS {CLICKHOUSE_DATABASE:Identifier} NOT LIKE '%stripe%'; +SELECT * FROM truncate_test_stripe_log; +SELECT * FROM truncate_test_merge_tree; + +SELECT '======After Truncate And Insert Data======'; +INSERT INTO truncate_test_set VALUES(0); +INSERT INTO truncate_test_log VALUES(1); +INSERT INTO truncate_test_memory VALUES(1); +INSERT INTO truncate_test_tiny_log VALUES(1); +INSERT INTO truncate_test_stripe_log VALUES(1); +INSERT INTO truncate_test_merge_tree VALUES('2000-01-01', 1); +SELECT * FROM system.numbers WHERE number NOT IN truncate_test_set LIMIT 1; +SELECT * FROM truncate_test_log; +SELECT * FROM truncate_test_memory; +SELECT * FROM truncate_test_tiny_log; +SELECT * FROM truncate_test_stripe_log; +SELECT * FROM truncate_test_merge_tree; + +DROP TABLE truncate_test_log; +DROP TABLE truncate_test_memory; +DROP TABLE truncate_test_tiny_log; +DROP TABLE truncate_test_stripe_log; +DROP TABLE truncate_test_merge_tree; diff --git a/parser/testdata/03404_bfloat16_insert_values/ast.json b/parser/testdata/03404_bfloat16_insert_values/ast.json new file mode 100644 index 000000000..46577d6dc --- /dev/null +++ b/parser/testdata/03404_bfloat16_insert_values/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'Basic tests'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001566594, + "rows_read": 5, + "bytes_read": 182 + } +} diff --git a/parser/testdata/03404_bfloat16_insert_values/metadata.json b/parser/testdata/03404_bfloat16_insert_values/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03404_bfloat16_insert_values/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03404_bfloat16_insert_values/query.sql b/parser/testdata/03404_bfloat16_insert_values/query.sql new file mode 100644 index 000000000..75cb3fe45 --- /dev/null +++ b/parser/testdata/03404_bfloat16_insert_values/query.sql @@ -0,0 +1,20 @@ +SELECT 'Basic tests'; +SELECT toBFloat16(-1) IN [0, 1, 2] AS result; +SELECT toBFloat16(-1) IN [-2, -1, 0, 1, 2] AS result; +SELECT toBFloat16(-1) IN [toFloat32(-2), toFloat32(-1), toFloat32(0), toFloat32(1), toFloat32(2)] AS result; +SELECT toBFloat16(-1) IN [toFloat64(-2), toFloat64(-1), toFloat64(0), toFloat64(1), toFloat64(2)] AS result; +SELECT toFloat64(-1) IN [toBFloat16(-2), toBFloat16(-1), toBFloat16(0), toBFloat16(1), toBFloat16(2)] AS result; + +SELECT 'Edge cases'; +SELECT toBFloat16(0) IN [-0] AS result; +SELECT toBFloat16(0/0) IN [0/0] AS result; +SELECT toBFloat16(0/0) IN [-0/0] AS result; +SELECT toBFloat16(1/0) IN [1/0] AS result; +SELECT toBFloat16(1/0) IN [-1/0] AS result; + +SELECT 'Compare to Float32'; +SELECT toFloat32(0) IN [-0] AS result; +SELECT toFloat32(0/0) IN [0/0] AS result; +SELECT toFloat32(0/0) IN [-0/0] AS result; +SELECT toFloat32(1/0) IN [1/0] AS result; +SELECT toFloat32(1/0) IN [-1/0] AS result; diff --git a/parser/testdata/03404_dynamic_in_interval_bug/ast.json b/parser/testdata/03404_dynamic_in_interval_bug/ast.json new file mode 100644 index 000000000..ab0af6315 --- /dev/null +++ b/parser/testdata/03404_dynamic_in_interval_bug/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toIntervalMinute (alias c0) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function toIntervalDay (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier c0" + }, + { + "explain": " Literal 'Dynamic'" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001521404, + "rows_read": 13, + "bytes_read": 511 + } +} diff --git a/parser/testdata/03404_dynamic_in_interval_bug/metadata.json b/parser/testdata/03404_dynamic_in_interval_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03404_dynamic_in_interval_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03404_dynamic_in_interval_bug/query.sql b/parser/testdata/03404_dynamic_in_interval_bug/query.sql new file mode 100644 index 000000000..e6a6f0bf9 --- /dev/null +++ b/parser/testdata/03404_dynamic_in_interval_bug/query.sql @@ -0,0 +1,2 @@ +SELECT INTERVAL 1 MINUTE AS c0, INTERVAL c0::Dynamic DAY; + diff --git a/parser/testdata/03404_geotoh3_input_order/ast.json b/parser/testdata/03404_geotoh3_input_order/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03404_geotoh3_input_order/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03404_geotoh3_input_order/metadata.json b/parser/testdata/03404_geotoh3_input_order/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03404_geotoh3_input_order/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03404_geotoh3_input_order/query.sql b/parser/testdata/03404_geotoh3_input_order/query.sql new file mode 100644 index 000000000..e5abca3bf --- /dev/null +++ b/parser/testdata/03404_geotoh3_input_order/query.sql @@ -0,0 +1,7 @@ +-- Tags: no-fasttest +-- no-fasttest: h3ToGeo needs binary with Uber H3 libary + +-- Test for setting 'geotoh3_argument_order' + +SELECT geoToH3(37.79506683, 55.71290588, 15) AS h3Index SETTINGS geotoh3_argument_order = 'lon_lat'; +SELECT geoToH3(55.71290588, 37.79506683, 15) AS h3Index SETTINGS geotoh3_argument_order = 'lat_lon'; diff --git a/parser/testdata/03404_json_tables/ast.json b/parser/testdata/03404_json_tables/ast.json new file mode 100644 index 000000000..cb3bbd8d3 --- /dev/null +++ b/parser/testdata/03404_json_tables/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t0 (children 1)" + }, + { + "explain": " Identifier t0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001759674, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03404_json_tables/metadata.json b/parser/testdata/03404_json_tables/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03404_json_tables/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03404_json_tables/query.sql b/parser/testdata/03404_json_tables/query.sql new file mode 100644 index 000000000..5c531bbf3 --- /dev/null +++ b/parser/testdata/03404_json_tables/query.sql @@ -0,0 +1,15 @@ +DROP TABLE IF EXISTS t0; +CREATE TABLE t0 (c0 JSON) ENGINE = MergeTree() ORDER BY (c0); -- { serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY } +INSERT INTO TABLE t0 (c0) VALUES ('{"c1":1}'), ('{"c0":1}'); -- { serverError UNKNOWN_TABLE } +DELETE FROM t0 WHERE true; -- { serverError UNKNOWN_TABLE } +DROP TABLE t0; -- { serverError UNKNOWN_TABLE } + +SELECT '---'; + +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (c0 JSON) ENGINE = MergeTree() ORDER BY tuple() PARTITION BY (c0); -- { serverError DATA_TYPE_CANNOT_BE_USED_IN_KEY } +INSERT INTO TABLE t1 (c0) VALUES ('{"c0":[null,true]}'); -- { serverError UNKNOWN_TABLE } +SELECT c0 FROM t1 ORDER BY c0; -- { serverError UNKNOWN_TABLE } +ALTER TABLE t1 APPLY DELETED MASK; -- { serverError UNKNOWN_TABLE } +SELECT c0 FROM t1 ORDER BY c0; -- { serverError UNKNOWN_TABLE } +DROP TABLE t1; -- { serverError UNKNOWN_TABLE } diff --git a/parser/testdata/03404_lazy_materialization_distributed/ast.json b/parser/testdata/03404_lazy_materialization_distributed/ast.json new file mode 100644 index 000000000..f5d351b0e --- /dev/null +++ b/parser/testdata/03404_lazy_materialization_distributed/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tt (children 1)" + }, + { + "explain": " Identifier tt" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001377154, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03404_lazy_materialization_distributed/metadata.json b/parser/testdata/03404_lazy_materialization_distributed/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03404_lazy_materialization_distributed/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03404_lazy_materialization_distributed/query.sql b/parser/testdata/03404_lazy_materialization_distributed/query.sql new file mode 100644 index 000000000..67c5a9f44 --- /dev/null +++ b/parser/testdata/03404_lazy_materialization_distributed/query.sql @@ -0,0 +1,15 @@ +DROP TABLE IF EXISTS tt; +CREATE TABLE tt (k UInt64, v String, blob String) ENGINE=MergeTree() ORDER BY tuple(); +INSERT INTO tt SELECT number, toString(number), repeat('blob_bob', number) FROM numbers(1, 10); + +-- make sure the optimization is enabled +set query_plan_optimize_lazy_materialization=true, query_plan_max_limit_for_lazy_materialization=10; + +SELECT + v, + blob +FROM clusterAllReplicas(test_cluster_one_shard_three_replicas_localhost, currentDatabase(), tt) +ORDER BY k +LIMIT 3; + +DROP TABLE tt; diff --git a/parser/testdata/03404_ubsan_distinct_join_const_column/ast.json b/parser/testdata/03404_ubsan_distinct_join_const_column/ast.json new file mode 100644 index 000000000..9cff143bb --- /dev/null +++ b/parser/testdata/03404_ubsan_distinct_join_const_column/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001021106, + "rows_read": 5, + "bytes_read": 173 + } +} diff --git a/parser/testdata/03404_ubsan_distinct_join_const_column/metadata.json b/parser/testdata/03404_ubsan_distinct_join_const_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03404_ubsan_distinct_join_const_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03404_ubsan_distinct_join_const_column/query.sql b/parser/testdata/03404_ubsan_distinct_join_const_column/query.sql new file mode 100644 index 000000000..be2f52761 --- /dev/null +++ b/parser/testdata/03404_ubsan_distinct_join_const_column/query.sql @@ -0,0 +1,18 @@ +SELECT DISTINCT t +FROM +( + SELECT CAST('[(1, \'a\')]', 'String') AS t + GROUP BY + GROUPING SETS ( + (1), + (printf(printf(NULL, 7, printf(isNullable(7), 7, '%%d: %d', 7, 7, 7, NULL), 7, materialize(toUInt256(7)), '%%d: %d', toNullable(7) IS NULL, 7), materialize(toNullable(NULL)))), + (isZeroOrNull(isNullable(7)) IS NULL)) +) AS na, +( + SELECT CAST(toNullable('[(1, \'a\')]'), 'String') AS t + GROUP BY + 1, + toNullable(toUInt256(123)), + printf(printf(NULL, *, printf(NULL, 7, 7, 7, '%%d: %d', 7, 7, materialize(7), 7, 7), 7, 7, '%%d: %d', 7, 7, 7)) +) +SETTINGS joined_subquery_requires_alias = 0; diff --git a/parser/testdata/03405_bool_array_to_fixed_strings/ast.json b/parser/testdata/03405_bool_array_to_fixed_strings/ast.json new file mode 100644 index 000000000..8cfd50f66 --- /dev/null +++ b/parser/testdata/03405_bool_array_to_fixed_strings/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Array_[Bool_1, Bool_0]" + }, + { + "explain": " Literal 'Array(FixedString(5))'" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001312236, + "rows_read": 8, + "bytes_read": 316 + } +} diff --git a/parser/testdata/03405_bool_array_to_fixed_strings/metadata.json b/parser/testdata/03405_bool_array_to_fixed_strings/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03405_bool_array_to_fixed_strings/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03405_bool_array_to_fixed_strings/query.sql b/parser/testdata/03405_bool_array_to_fixed_strings/query.sql new file mode 100644 index 000000000..347415978 --- /dev/null +++ b/parser/testdata/03405_bool_array_to_fixed_strings/query.sql @@ -0,0 +1,5 @@ +select [true, false]::Array(FixedString(5)); + +select arrayMap(x -> x::FixedString(5), [true, false]); + +select [false, true]::Array(FixedString(5)); diff --git a/parser/testdata/03405_join_using_alias_constant/ast.json b/parser/testdata/03405_join_using_alias_constant/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03405_join_using_alias_constant/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03405_join_using_alias_constant/metadata.json b/parser/testdata/03405_join_using_alias_constant/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03405_join_using_alias_constant/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03405_join_using_alias_constant/query.sql b/parser/testdata/03405_join_using_alias_constant/query.sql new file mode 100644 index 000000000..e6947200d --- /dev/null +++ b/parser/testdata/03405_join_using_alias_constant/query.sql @@ -0,0 +1,36 @@ +-- Tags: no-parallel-replicas + +-- FIXME: with parallel replicas: +-- Cannot convert column `__table1.b` because it is non constant in source stream but must be constant in result. (ILLEGAL_COLUMN) + +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t1lc; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS t2lc; + +SET allow_suspicious_low_cardinality_types = 1; +SET enable_analyzer = 1; + +CREATE TABLE t1 (`a` UInt64, `b` Int32 ALIAS 1) ENGINE = MergeTree ORDER BY tuple(); +CREATE TABLE t1lc (`a` UInt64, `b` LowCardinality(Int32) ALIAS 1) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO t1 VALUES (1), (2), (3); +INSERT INTO t1lc VALUES (1), (2), (3); + +CREATE TABLE t2 (`a` UInt64, `b` Nullable(Int64) ) ENGINE = MergeTree ORDER BY tuple(); +CREATE TABLE t2lc (`a` UInt64, `b` LowCardinality(Nullable(Int64)) ) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO t2 VALUES (1, 1), (2, 1), (3, 3); +INSERT INTO t2lc VALUES (1, 1), (2, 1), (3, 3); + +SELECT b FROM t1 JOIN t2 USING (b) ORDER BY ALL; +SELECT b FROM t1lc JOIN t2lc USING (b) ORDER BY ALL; +SELECT b FROM t1lc JOIN t2 USING (b) ORDER BY ALL; + +SELECT * FROM t1 JOIN t2 USING (b) ORDER BY ALL; +SELECT * FROM t1lc JOIN t2lc USING (b) ORDER BY ALL; +SELECT * FROM t1lc JOIN t2 USING (b) ORDER BY ALL; + +SELECT t1.* FROM t1 JOIN t2 USING (b) ORDER BY ALL; +SELECT t1.b FROM t1 JOIN t2 USING (b) ORDER BY ALL; +SELECT t1.*, t2.* FROM t1 JOIN t2 USING (b) ORDER BY ALL; +SELECT t1lc.*, t2lc.* FROM t1lc JOIN t2lc USING (b) ORDER BY ALL; +SELECT t1lc.*, t2.* FROM t1lc JOIN t2 USING (b) ORDER BY ALL; diff --git a/parser/testdata/03405_json_parsing_error_bug/ast.json b/parser/testdata/03405_json_parsing_error_bug/ast.json new file mode 100644 index 000000000..43a884182 --- /dev/null +++ b/parser/testdata/03405_json_parsing_error_bug/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '[\\'{}\\', \\'{\"c\" : [1, {\"b\" : []}]}\\']'" + }, + { + "explain": " Literal 'Array(JSON)'" + }, + { + "explain": " Set" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.00157932, + "rows_read": 9, + "bytes_read": 337 + } +} diff --git a/parser/testdata/03405_json_parsing_error_bug/metadata.json b/parser/testdata/03405_json_parsing_error_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03405_json_parsing_error_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03405_json_parsing_error_bug/query.sql b/parser/testdata/03405_json_parsing_error_bug/query.sql new file mode 100644 index 000000000..70c76edb8 --- /dev/null +++ b/parser/testdata/03405_json_parsing_error_bug/query.sql @@ -0,0 +1,2 @@ +select ['{}', '{"c" : [1, {"b" : []}]}']::Array(JSON) settings input_format_json_infer_incomplete_types_as_strings=0; -- {serverError INCORRECT_DATA} + diff --git a/parser/testdata/03405_merge_filter_into_join/ast.json b/parser/testdata/03405_merge_filter_into_join/ast.json new file mode 100644 index 000000000..ebd129ebf --- /dev/null +++ b/parser/testdata/03405_merge_filter_into_join/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery users (children 3)" + }, + { + "explain": " Identifier users" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " ColumnDeclaration uid (children 1)" + }, + { + "explain": " DataType Int16" + }, + { + "explain": " ColumnDeclaration name (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " ColumnDeclaration age (children 1)" + }, + { + "explain": " DataType Int16" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001533665, + "rows_read": 14, + "bytes_read": 485 + } +} diff --git a/parser/testdata/03405_merge_filter_into_join/metadata.json b/parser/testdata/03405_merge_filter_into_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03405_merge_filter_into_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03405_merge_filter_into_join/query.sql b/parser/testdata/03405_merge_filter_into_join/query.sql new file mode 100644 index 000000000..60256573b --- /dev/null +++ b/parser/testdata/03405_merge_filter_into_join/query.sql @@ -0,0 +1,29 @@ +CREATE TABLE users (uid Int16, name String, age Int16) ENGINE=MergeTree ORDER BY tuple(); + +INSERT INTO users VALUES (1231, 'John', 33); +INSERT INTO users VALUES (6666, 'Ksenia', 48); +INSERT INTO users VALUES (8888, 'Alice', 50); + +-- For some reason planner sometimes decides to swap tables. +-- It breaks test because it prints query plan with actions. +set query_plan_join_swap_table = 0; +set enable_analyzer = 1; -- Optimization requires LogicalJoinStep +set enable_parallel_replicas = 0; -- Optimization requires LogicalJoinStep +set parallel_hash_join_threshold = 0; + +-- { echoOn } + +EXPLAIN PLAN actions = 1 +SELECT * FROM (SELECT * FROM users u1 INNER JOIN users u2 ON 1) WHERE age = u2.age ORDER BY ALL; +SELECT * FROM (SELECT * FROM users u1 INNER JOIN users u2 ON 1) WHERE age = u2.age ORDER BY ALL; + +EXPLAIN PLAN actions = 1 +SELECT * FROM (SELECT * FROM users u1 CROSS JOIN users u2) WHERE age = u2.age ORDER BY ALL; +SELECT * FROM (SELECT * FROM users u1 CROSS JOIN users u2) WHERE age = u2.age ORDER BY ALL; + +EXPLAIN PLAN actions = 1 +SELECT * FROM (SELECT * FROM users u1 SEMI JOIN users u2 ON 1) WHERE age = u2.age ORDER BY ALL; +EXPLAIN PLAN actions = 1 +SELECT * FROM (SELECT * FROM users u1 FULL JOIN users u2 ON 1) WHERE age = u2.age ORDER BY ALL; +EXPLAIN PLAN actions = 1 +SELECT * FROM (SELECT * FROM users u1 ANTI JOIN users u2 ON 1) WHERE age = u2.age ORDER BY ALL; diff --git a/parser/testdata/03405_naive_bayes_classifier_token/ast.json b/parser/testdata/03405_naive_bayes_classifier_token/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03405_naive_bayes_classifier_token/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03405_naive_bayes_classifier_token/metadata.json b/parser/testdata/03405_naive_bayes_classifier_token/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03405_naive_bayes_classifier_token/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03405_naive_bayes_classifier_token/query.sql b/parser/testdata/03405_naive_bayes_classifier_token/query.sql new file mode 100644 index 000000000..0014c256f --- /dev/null +++ b/parser/testdata/03405_naive_bayes_classifier_token/query.sql @@ -0,0 +1,9 @@ +-- Tags: no-fasttest +-- no-fasttest: depends on model binary and model details via config files + +SELECT naiveBayesClassifier('sentiment_token_1', 'The interface is beautiful and intuitive'); +SELECT naiveBayesClassifier('sentiment_token_1', 'This product is amazing in every way'); +SELECT naiveBayesClassifier('sentiment_token_1', 'I am impressed by the excellent quality'); +SELECT naiveBayesClassifier('sentiment_token_1', 'The app is awful and barely works'); +SELECT naiveBayesClassifier('sentiment_token_1', 'Customer support was horrible today'); +SELECT naiveBayesClassifier('sentiment_token_1', 'This experience was a total disaster'); diff --git a/parser/testdata/03405_non_zero_batch_mode/ast.json b/parser/testdata/03405_non_zero_batch_mode/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03405_non_zero_batch_mode/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03405_non_zero_batch_mode/metadata.json b/parser/testdata/03405_non_zero_batch_mode/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03405_non_zero_batch_mode/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03405_non_zero_batch_mode/query.sql b/parser/testdata/03405_non_zero_batch_mode/query.sql new file mode 100644 index 000000000..9ffa5763e --- /dev/null +++ b/parser/testdata/03405_non_zero_batch_mode/query.sql @@ -0,0 +1,2 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/78392 +SET output_format_parquet_batch_size = 0; -- { serverError BAD_ARGUMENTS } \ No newline at end of file diff --git a/parser/testdata/03405_ssd_cache_incorrect_min_max_lifetimes_and_block_size/ast.json b/parser/testdata/03405_ssd_cache_incorrect_min_max_lifetimes_and_block_size/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03405_ssd_cache_incorrect_min_max_lifetimes_and_block_size/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03405_ssd_cache_incorrect_min_max_lifetimes_and_block_size/metadata.json b/parser/testdata/03405_ssd_cache_incorrect_min_max_lifetimes_and_block_size/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03405_ssd_cache_incorrect_min_max_lifetimes_and_block_size/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03405_ssd_cache_incorrect_min_max_lifetimes_and_block_size/query.sql b/parser/testdata/03405_ssd_cache_incorrect_min_max_lifetimes_and_block_size/query.sql new file mode 100644 index 000000000..84010bf4b --- /dev/null +++ b/parser/testdata/03405_ssd_cache_incorrect_min_max_lifetimes_and_block_size/query.sql @@ -0,0 +1,19 @@ +-- Tags: no-ordinary-database + +-- Tests that various conditions are checked during creation of 'ssd_cache' and 'complex_key_ssd_cache' dictionaries + +-- Github issue #78314 + +DROP DICTIONARY IF EXISTS dict; + +SELECT 'BLOCK_SIZE is a negative value.'; +CREATE DICTIONARY dict (col Int64 default null) PRIMARY KEY (col) SOURCE(NULL()) LAYOUT(SSD_CACHE(BLOCK_SIZE -1)) LIFETIME(1); -- { serverError BAD_ARGUMENTS } +CREATE DICTIONARY dict (col Int64 default null) PRIMARY KEY (col) SOURCE(NULL()) LAYOUT(COMPLEX_KEY_SSD_CACHE(BLOCK_SIZE -1)) LIFETIME(1); -- { serverError BAD_ARGUMENTS } + +SELECT 'BLOCK_SIZE is zero.'; +CREATE DICTIONARY dict (col Int64 default null) PRIMARY KEY (col) SOURCE(NULL()) LAYOUT(SSD_CACHE(BLOCK_SIZE 0)) LIFETIME(1); -- { serverError BAD_ARGUMENTS } +CREATE DICTIONARY dict (col Int64 default null) PRIMARY KEY (col) SOURCE(NULL()) LAYOUT(COMPLEX_KEY_SSD_CACHE(BLOCK_SIZE 0)) LIFETIME(1); -- { serverError BAD_ARGUMENTS } + +SELECT 'WRITE_BUFFER_SIZE is zero.'; +CREATE DICTIONARY dict (col Int64 default null) PRIMARY KEY (col) SOURCE(NULL()) LAYOUT(SSD_CACHE(WRITE_BUFFER_SIZE 0)) LIFETIME(1); -- { serverError BAD_ARGUMENTS } +CREATE DICTIONARY dict (col Int64 default null) PRIMARY KEY (col) SOURCE(NULL()) LAYOUT(COMPLEX_KEY_SSD_CACHE(WRITE_BUFFER_SIZE 0)) LIFETIME(1); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/03406_dictionary_incorrect_min_max_lifetimes/ast.json b/parser/testdata/03406_dictionary_incorrect_min_max_lifetimes/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03406_dictionary_incorrect_min_max_lifetimes/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03406_dictionary_incorrect_min_max_lifetimes/metadata.json b/parser/testdata/03406_dictionary_incorrect_min_max_lifetimes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03406_dictionary_incorrect_min_max_lifetimes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03406_dictionary_incorrect_min_max_lifetimes/query.sql b/parser/testdata/03406_dictionary_incorrect_min_max_lifetimes/query.sql new file mode 100644 index 000000000..a0aec5e78 --- /dev/null +++ b/parser/testdata/03406_dictionary_incorrect_min_max_lifetimes/query.sql @@ -0,0 +1,21 @@ +-- Tags: no-ordinary-database + +-- Tests that various lifetime conditions are checked during creation of a dictionary + +-- Github issue #78314 + +DROP DICTIONARY IF EXISTS dict; +DROP TABLE IF EXISTS tbl; + +CREATE TABLE tbl (col Int) ENGINE = Memory; + +SELECT 'MIN is a negative value.'; +CREATE DICTIONARY dict (col Int DEFAULT 1) PRIMARY KEY (col) SOURCE(CLICKHOUSE(TABLE 'tbl')) LAYOUT(HASHED_ARRAY()) LIFETIME(MIN -1 MAX 0); -- { clientError SYNTAX_ERROR } + +SELECT 'MAX is a negative value.'; +CREATE DICTIONARY dict (col Int DEFAULT 1) PRIMARY KEY (col) SOURCE(CLICKHOUSE(TABLE 'tbl')) LAYOUT(HASHED_ARRAY()) LIFETIME(MIN 0 MAX -1); -- { clientError SYNTAX_ERROR } + +SELECT 'MIN is greater than MAX.'; +CREATE DICTIONARY dict (col Int DEFAULT 1) PRIMARY KEY (col) SOURCE(CLICKHOUSE(TABLE 'tbl')) LAYOUT(HASHED_ARRAY()) LIFETIME(MIN 1 MAX 0); -- { serverError BAD_ARGUMENTS } + +DROP TABLE tbl; diff --git a/parser/testdata/03406_naive_bayes_classifier_codepoint/ast.json b/parser/testdata/03406_naive_bayes_classifier_codepoint/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03406_naive_bayes_classifier_codepoint/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03406_naive_bayes_classifier_codepoint/metadata.json b/parser/testdata/03406_naive_bayes_classifier_codepoint/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03406_naive_bayes_classifier_codepoint/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03406_naive_bayes_classifier_codepoint/query.sql b/parser/testdata/03406_naive_bayes_classifier_codepoint/query.sql new file mode 100644 index 000000000..762349783 --- /dev/null +++ b/parser/testdata/03406_naive_bayes_classifier_codepoint/query.sql @@ -0,0 +1,23 @@ +-- Tags: no-fasttest +-- no-fasttest: depends on model binary and model details via config files + +/* +Output language code mapping: + Bengali 0 + Mandarin Chinese 1 + German 2 + Greek 3 + English 4 + French 5 + Russian 6 + Spanish 7 +*/ + +SELECT naiveBayesClassifier('lang_codepoint_1', 'আজ আকাশটা খুব পরিষ্কার।'); +SELECT naiveBayesClassifier('lang_codepoint_1', '她每天早上喝一杯绿茶'); +SELECT naiveBayesClassifier('lang_codepoint_1', 'Der Hund schläft unter dem Tisch.'); +SELECT naiveBayesClassifier('lang_codepoint_1', 'Το ποδήλατο είναι δίπλα στο δέντρο.'); +SELECT naiveBayesClassifier('lang_codepoint_1', 'He forgot his umbrella at the cafe.'); +SELECT naiveBayesClassifier('lang_codepoint_1', 'Le chat regarde par la fenêtre'); +SELECT naiveBayesClassifier('lang_codepoint_1', 'Мы гуляли в парке до заката'); +SELECT naiveBayesClassifier('lang_codepoint_1', 'Ella escribe una carta a su abuela'); diff --git a/parser/testdata/03406_reservoir_sample_self_merging/ast.json b/parser/testdata/03406_reservoir_sample_self_merging/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03406_reservoir_sample_self_merging/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03406_reservoir_sample_self_merging/metadata.json b/parser/testdata/03406_reservoir_sample_self_merging/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03406_reservoir_sample_self_merging/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03406_reservoir_sample_self_merging/query.sql b/parser/testdata/03406_reservoir_sample_self_merging/query.sql new file mode 100644 index 000000000..679522fef --- /dev/null +++ b/parser/testdata/03406_reservoir_sample_self_merging/query.sql @@ -0,0 +1,14 @@ +SELECT finalizeAggregation(10 * ( + SELECT medianState(number) + FROM numbers(10) +)); + +SELECT finalizeAggregation(10 * ( + SELECT medianState(number) + FROM numbers(1000) +)); + +SELECT finalizeAggregation(10 * ( + SELECT medianState(number) + FROM numbers(100000) +)); diff --git a/parser/testdata/03407_csv_bad_date_time_parsing/ast.json b/parser/testdata/03407_csv_bad_date_time_parsing/ast.json new file mode 100644 index 000000000..e0c909f00 --- /dev/null +++ b/parser/testdata/03407_csv_bad_date_time_parsing/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function format (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Identifier CSV" + }, + { + "explain": " Literal 'd DateTime64(3)'" + }, + { + "explain": " Literal '1744042005 797'" + }, + { + "explain": " Set" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001220817, + "rows_read": 14, + "bytes_read": 513 + } +} diff --git a/parser/testdata/03407_csv_bad_date_time_parsing/metadata.json b/parser/testdata/03407_csv_bad_date_time_parsing/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03407_csv_bad_date_time_parsing/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03407_csv_bad_date_time_parsing/query.sql b/parser/testdata/03407_csv_bad_date_time_parsing/query.sql new file mode 100644 index 000000000..8dde2194f --- /dev/null +++ b/parser/testdata/03407_csv_bad_date_time_parsing/query.sql @@ -0,0 +1,3 @@ +select * from format(CSV, 'd DateTime64(3)', '1744042005 797') settings date_time_input_format='best_effort'; -- {serverError UNEXPECTED_DATA_AFTER_PARSED_VALUE} +select * from format(CSV, 'd DateTime', '1744042005 797') settings date_time_input_format='best_effort'; -- {serverError UNEXPECTED_DATA_AFTER_PARSED_VALUE} + diff --git a/parser/testdata/03407_naive_bayes_classifier_byte/ast.json b/parser/testdata/03407_naive_bayes_classifier_byte/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03407_naive_bayes_classifier_byte/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03407_naive_bayes_classifier_byte/metadata.json b/parser/testdata/03407_naive_bayes_classifier_byte/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03407_naive_bayes_classifier_byte/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03407_naive_bayes_classifier_byte/query.sql b/parser/testdata/03407_naive_bayes_classifier_byte/query.sql new file mode 100644 index 000000000..65a28d8c9 --- /dev/null +++ b/parser/testdata/03407_naive_bayes_classifier_byte/query.sql @@ -0,0 +1,23 @@ +-- Tags: no-fasttest +-- no-fasttest: depends on model binary and model details via config files + +/* +Output language code mapping: + Bengali 0 + Mandarin Chinese 1 + German 2 + Greek 3 + English 4 + French 5 + Russian 6 + Spanish 7 +*/ + +SELECT naiveBayesClassifier('lang_byte_2', 'বইটি টেবিলের উপর রাখা আছে।'); +SELECT naiveBayesClassifier('lang_byte_2', '他们正在公园里散步'); +SELECT naiveBayesClassifier('lang_byte_2', 'Er kocht Suppe für seine Familie'); +SELECT naiveBayesClassifier('lang_byte_2', 'Η βροχή σταμάτησε πριν από λίγο'); +SELECT naiveBayesClassifier('lang_byte_2', 'She painted the wall a bright yellow'); +SELECT naiveBayesClassifier('lang_byte_2', 'Nous attendons le bus depuis dix minutes.'); +SELECT naiveBayesClassifier('lang_byte_2', 'На кухне пахнет свежим хлебом.'); +SELECT naiveBayesClassifier('lang_byte_2', 'Los niños juegan en la arena.'); diff --git a/parser/testdata/03407_parse_date_time_best_effort_unix_timestamp_with_fraction/ast.json b/parser/testdata/03407_parse_date_time_best_effort_unix_timestamp_with_fraction/ast.json new file mode 100644 index 000000000..8b61f3f5f --- /dev/null +++ b/parser/testdata/03407_parse_date_time_best_effort_unix_timestamp_with_fraction/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001349845, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03407_parse_date_time_best_effort_unix_timestamp_with_fraction/metadata.json b/parser/testdata/03407_parse_date_time_best_effort_unix_timestamp_with_fraction/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03407_parse_date_time_best_effort_unix_timestamp_with_fraction/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03407_parse_date_time_best_effort_unix_timestamp_with_fraction/query.sql b/parser/testdata/03407_parse_date_time_best_effort_unix_timestamp_with_fraction/query.sql new file mode 100644 index 000000000..26b715c8a --- /dev/null +++ b/parser/testdata/03407_parse_date_time_best_effort_unix_timestamp_with_fraction/query.sql @@ -0,0 +1,22 @@ +set session_timezone='UTC'; + +select parseDateTime64BestEffort('1744042005.1', 1); +select parseDateTime64BestEffort('1744042005.12', 2); +select parseDateTime64BestEffort('1744042005.123', 3); +select parseDateTime64BestEffort('1744042005.1234', 4); +select parseDateTime64BestEffort('1744042005.12345', 5); +select parseDateTime64BestEffort('1744042005.123456', 6); +select parseDateTime64BestEffort('1744042005.1234567', 7); +select parseDateTime64BestEffort('1744042005.12345678', 8); +select parseDateTime64BestEffort('1744042005.123456789', 9); + +select parseDateTime64BestEffort('174404200.1', 1); +select parseDateTime64BestEffort('174404200.12', 2); +select parseDateTime64BestEffort('174404200.123', 3); +select parseDateTime64BestEffort('174404200.1234', 4); +select parseDateTime64BestEffort('174404200.12345', 5); +select parseDateTime64BestEffort('174404200.123456', 6); +select parseDateTime64BestEffort('174404200.1234567', 7); +select parseDateTime64BestEffort('174404200.12345678', 8); +select parseDateTime64BestEffort('174404200.123456789', 9); + diff --git a/parser/testdata/03408_analyzer_correlated_subquery_simple/ast.json b/parser/testdata/03408_analyzer_correlated_subquery_simple/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03408_analyzer_correlated_subquery_simple/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03408_analyzer_correlated_subquery_simple/metadata.json b/parser/testdata/03408_analyzer_correlated_subquery_simple/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03408_analyzer_correlated_subquery_simple/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03408_analyzer_correlated_subquery_simple/query.sql b/parser/testdata/03408_analyzer_correlated_subquery_simple/query.sql new file mode 100644 index 000000000..af0393bd9 --- /dev/null +++ b/parser/testdata/03408_analyzer_correlated_subquery_simple/query.sql @@ -0,0 +1,32 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/72459 + +CREATE TABLE t1 (c1 UInt64, c2 UInt64) ORDER BY c1; +CREATE TABLE t2 (c1 UInt64) ORDER BY c1; + +INSERT INTO t1 SELECT number, number % 100 FROM numbers(100); +INSERT INTO t2 SELECT number*number FROM numbers(100); + +set enable_analyzer = 1; +set allow_experimental_correlated_subqueries = 1; + +-- { echoOn } + +SELECT count(t1.c1) FROM t1 +WHERE +t1.c2 = 10 +AND EXISTS (SELECT * FROM t2 WHERE t1.c1 = t2.c1); + +SELECT count(t1.c1) FROM t1 +WHERE +t1.c2 = 10 +AND t1.c1 IN (SELECT c1 FROM t2); + +SELECT count(t1.c1) FROM t1 +WHERE +t1.c2 = 10 +AND NOT EXISTS (SELECT * FROM t2 WHERE t1.c1 = t2.c1); + +SELECT count(t1.c1) FROM t1 +WHERE +t1.c2 = 10 +AND t1.c1 NOT IN (SELECT c1 FROM t2); diff --git a/parser/testdata/03408_cte_self_reference/ast.json b/parser/testdata/03408_cte_self_reference/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03408_cte_self_reference/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03408_cte_self_reference/metadata.json b/parser/testdata/03408_cte_self_reference/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03408_cte_self_reference/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03408_cte_self_reference/query.sql b/parser/testdata/03408_cte_self_reference/query.sql new file mode 100644 index 000000000..a5f559d8a --- /dev/null +++ b/parser/testdata/03408_cte_self_reference/query.sql @@ -0,0 +1,24 @@ +-- Tags: no-tsan +SET enable_analyzer = 1; + +WITH `cte1` AS ( + SELECT * + FROM ( + WITH `cte2` AS ( + SELECT + id, + key, + values, + FROM cte1 + LEFT ARRAY JOIN + mapKeys(key_values) AS key, + mapValues(key_values) AS values + ) + SELECT + id, + key, + value + FROM cte2 LEFT ARRAY JOIN values AS value + ) +) +SELECT * FROM `cte1` -- { serverError UNKNOWN_TABLE } diff --git a/parser/testdata/03408_current_database_on_cluster_constraint/ast.json b/parser/testdata/03408_current_database_on_cluster_constraint/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03408_current_database_on_cluster_constraint/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03408_current_database_on_cluster_constraint/metadata.json b/parser/testdata/03408_current_database_on_cluster_constraint/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03408_current_database_on_cluster_constraint/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03408_current_database_on_cluster_constraint/query.sql b/parser/testdata/03408_current_database_on_cluster_constraint/query.sql new file mode 100644 index 000000000..d5d70302a --- /dev/null +++ b/parser/testdata/03408_current_database_on_cluster_constraint/query.sql @@ -0,0 +1,12 @@ +-- Tags: replica, no-parallel + +DROP DATABASE IF EXISTS shard_0; + +CREATE DATABASE shard_0; + +SET distributed_ddl_entry_format_version = 2; +SET distributed_ddl_output_mode='throw'; + +CREATE TABLE shard_0.t0 ON CLUSTER 'test_cluster_two_shards_different_databases' (c0 Int, CONSTRAINT cc CHECK currentDatabase()) ENGINE = MergeTree() ORDER BY tuple(); + +DROP DATABASE shard_0; diff --git a/parser/testdata/03408_hash_functions_on_null/ast.json b/parser/testdata/03408_hash_functions_on_null/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03408_hash_functions_on_null/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03408_hash_functions_on_null/metadata.json b/parser/testdata/03408_hash_functions_on_null/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03408_hash_functions_on_null/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03408_hash_functions_on_null/query.sql b/parser/testdata/03408_hash_functions_on_null/query.sql new file mode 100644 index 000000000..eb63ad7e1 --- /dev/null +++ b/parser/testdata/03408_hash_functions_on_null/query.sql @@ -0,0 +1,61 @@ +-- { echoOn } + +select xxHash32(null); +select xxHash64(null); +select xxHash64([]); +select xxHash64([null]); +select xxHash64([null, null]); +select xxHash64([null::Nullable(Int64)]); +select xxHash64([null::Nullable(String)]); +select xxHash64(tuple()); +select xxHash64(tuple(null)); +select xxHash64(tuple(null, null)); +select xxHash64(tuple(null::Nullable(Int64))); +select xxHash64(tuple(null::Nullable(String))); + +select xxHash32(materialize(null)); +select xxHash64(materialize(null)); +select xxHash64(materialize([])); +select xxHash64(materialize([null])); +select xxHash64(materialize([null, null])); +select xxHash64(materialize([null::Nullable(Int64)])); +select xxHash64(materialize([null::Nullable(String)])); +select xxHash64(materialize(tuple())); +select xxHash64(materialize(tuple(null))); +select xxHash64(materialize(tuple(null, null))); +select xxHash64(materialize(tuple(null::Nullable(Int64)))); +select xxHash64(materialize(tuple(null::Nullable(String)))); + +create table test_hash_on_null (a Array(Nullable(Int64))) engine Memory; +insert into test_hash_on_null values (null) ([null, null]); +select xxHash32(a) from test_hash_on_null; + +select cityHash64([1]); +select cityHash64([toNullable(1)]); +select cityHash64('hi'); +select cityHash64(tuple('hi')); +select cityHash64(tuple(toNullable('hi'))); +select cityHash64(tuple(toLowCardinality(toNullable('hi')))); +select cityHash64(materialize(tuple(toLowCardinality(toNullable('hi'))))); + +create table test_mix_null (a Nullable(Int64)) engine Memory; +insert into test_mix_null values (null) (toNullable(4)) (null) (toNullable(4454559)); +select a, xxHash32(a), xxHash32(tuple(a)) from test_mix_null; + +create table t (a Array(Tuple(x Nullable(Int64), y Map(Int64, Nullable(String)), z LowCardinality(Nullable(FixedString(16)))))) engine Memory; +insert into t values ([(null, map(10, null, 20, 'meow', 30, '', 40, null), 'fs'), (42, map(), null)]), ([]), ([(null, map(), null)]), ([(null, map(1, null), null), (1, map(2, 'hi'), 3)]); +select reinterpret(sipHash128(tuple(*)), 'UInt128') from t; +select cityHash64(tuple(*)) from t; +select cityHash64(*) from t; +select cityHash64(a.x) from t; +select cityHash64(a.y) from t; +select cityHash64(a.z) from t; + +--- Keyed. +select sipHash64Keyed(materialize((1::UInt64, 2::UInt64)), null) from numbers(2); +select sipHash64Keyed((1::UInt64, 2::UInt64), tuple(null)) from numbers(2); +select sipHash64Keyed(materialize((1::UInt64, 2::UInt64)), tuple(null)) from numbers(2); +select sipHash64Keyed((1::UInt64, number), tuple(null)) from numbers(3); + +-- Make sure all types are allowed. +select sum(ignore(cityHash64(tuple(*)))) from (select * from generateRandom() limit 100); diff --git a/parser/testdata/03408_implicit_table/ast.json b/parser/testdata/03408_implicit_table/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03408_implicit_table/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03408_implicit_table/metadata.json b/parser/testdata/03408_implicit_table/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03408_implicit_table/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03408_implicit_table/query.sql b/parser/testdata/03408_implicit_table/query.sql new file mode 100644 index 000000000..58e292ed3 --- /dev/null +++ b/parser/testdata/03408_implicit_table/query.sql @@ -0,0 +1,15 @@ +-- { echo } +SET implicit_select = 1, implicit_table_at_top_level = 'test', enable_analyzer = 1; +DROP TABLE IF EXISTS test; +CREATE TABLE test (s String) ENGINE = Memory; +INSERT INTO test VALUES ('Hello'), ('World'); + +s; +count(); +1; +*; +SELECT *, (SELECT 1); +SELECT * FROM (SELECT *); +SELECT * UNION ALL SELECT *; + +DROP TABLE test; diff --git a/parser/testdata/03408_limit_by_rows_before_limit/ast.json b/parser/testdata/03408_limit_by_rows_before_limit/ast.json new file mode 100644 index 000000000..f3cd347f3 --- /dev/null +++ b/parser/testdata/03408_limit_by_rows_before_limit/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001231117, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03408_limit_by_rows_before_limit/metadata.json b/parser/testdata/03408_limit_by_rows_before_limit/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03408_limit_by_rows_before_limit/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03408_limit_by_rows_before_limit/query.sql b/parser/testdata/03408_limit_by_rows_before_limit/query.sql new file mode 100644 index 000000000..9a897b5ed --- /dev/null +++ b/parser/testdata/03408_limit_by_rows_before_limit/query.sql @@ -0,0 +1,60 @@ +SET output_format_write_statistics = 0; + +DROP TABLE IF EXISTS 03408_unsorted; + +CREATE TABLE 03408_unsorted (id Int32, val String) ENGINE = MergeTree ORDER BY tuple() SETTINGS min_bytes_for_wide_part=1 +AS +SELECT number % 10, leftPad(toString(number), 2, '0') FROM numbers(50); + +SELECT '-- Assert total number of groups and records in unsorted'; +SELECT uniqExact(id), count() FROM 03408_unsorted; + +SELECT ''; +SELECT '-- Assert rows_before_limit for unsorted ORDER BY + LIMIT BY + LIMIT'; + +SELECT id, val FROM 03408_unsorted ORDER BY id, val LIMIT 1 BY id LIMIT 3 +FORMAT JsonCompact SETTINGS max_block_size=1, exact_rows_before_limit=0; + +SELECT ''; +SELECT '-- Assert rows_before_limit for unsorted ORDER BY + LIMIT BY + LIMIT, exact'; + +SELECT id, val FROM 03408_unsorted ORDER BY id, val LIMIT 1 BY id LIMIT 3 +FORMAT JsonCompact SETTINGS max_block_size=1, exact_rows_before_limit=1; + +SELECT ''; +SELECT '-- Assert rows_before_limit for unsorted HAVING + ORDER BY + LIMIT BY + LIMIT, exact'; + +SELECT id, val FROM 03408_unsorted GROUP BY id, val HAVING id < 7 ORDER BY id, val DESC LIMIT 1 BY id LIMIT 3 +FORMAT JsonCompact SETTINGS max_block_size=1, exact_rows_before_limit=1; + +DROP TABLE 03408_unsorted; + +DROP TABLE IF EXISTS 03408_sorted; + +CREATE TABLE 03408_sorted (id Int32, val String) ENGINE = MergeTree ORDER BY (id, val) SETTINGS min_bytes_for_wide_part=1 +AS +SELECT number % 10, leftPad(toString(number), 2, '0') FROM numbers(50); + +SELECT ''; +SELECT '-- Assert total number of groups and records in sorted'; +SELECT uniqExact(id), count() FROM 03408_sorted; + +SELECT ''; +SELECT '-- Assert rows_before_limit for sorted ORDER BY + LIMIT BY + LIMIT'; + +SELECT id, val FROM 03408_sorted ORDER BY id, val LIMIT 1 BY id LIMIT 3 +FORMAT JsonCompact SETTINGS max_block_size=1, exact_rows_before_limit=0; + +SELECT ''; +SELECT '-- Assert rows_before_limit for sorted ORDER BY + LIMIT BY + LIMIT, exact'; + +SELECT id, val FROM 03408_sorted ORDER BY id, val LIMIT 1 BY id LIMIT 3 +FORMAT JsonCompact SETTINGS max_block_size=1, exact_rows_before_limit=1; + +SELECT ''; +SELECT '-- Assert rows_before_limit for sorted HAVING + ORDER BY + LIMIT BY + LIMIT, exact'; + +SELECT id, val FROM 03408_sorted GROUP BY id, val HAVING id < 7 ORDER BY id, val DESC LIMIT 1 BY id LIMIT 3 +FORMAT JsonCompact SETTINGS max_block_size=1, exact_rows_before_limit=1; + +DROP TABLE 03408_sorted; diff --git a/parser/testdata/03408_limit_by_rows_before_limit_dist/ast.json b/parser/testdata/03408_limit_by_rows_before_limit_dist/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03408_limit_by_rows_before_limit_dist/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03408_limit_by_rows_before_limit_dist/metadata.json b/parser/testdata/03408_limit_by_rows_before_limit_dist/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03408_limit_by_rows_before_limit_dist/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03408_limit_by_rows_before_limit_dist/query.sql b/parser/testdata/03408_limit_by_rows_before_limit_dist/query.sql new file mode 100644 index 000000000..48cb4a224 --- /dev/null +++ b/parser/testdata/03408_limit_by_rows_before_limit_dist/query.sql @@ -0,0 +1,42 @@ +-- Tags: shard + +SET output_format_write_statistics = 0; + +DROP TABLE IF EXISTS 03408_local; +DROP TABLE IF EXISTS 03408_dist; + +CREATE TABLE 03408_local (id Int32, val String) ENGINE = MergeTree ORDER BY tuple() SETTINGS min_bytes_for_wide_part=1 +AS +SELECT number % 10, leftPad(toString(number), 2, '0') FROM numbers(50); + +CREATE TABLE 03408_dist(id Int32, val String) engine = Distributed(test_cluster_two_shards, currentDatabase(), 03408_local, id); + +SELECT '-- Assert total number of groups and records in distributed'; +SELECT uniqExact(id), count() FROM 03408_dist; + +SELECT ''; +SELECT '-- Assert rows_before_limit for distributed ORDER BY + LIMIT BY + LIMIT'; + +SELECT id, val FROM 03408_dist ORDER BY id, val LIMIT 1 BY id LIMIT 3 +FORMAT JsonCompact SETTINGS max_block_size=1, exact_rows_before_limit=0; + +SELECT ''; +SELECT '-- Assert rows_before_limit for distributed ORDER BY + LIMIT BY + LIMIT, exact'; + +SELECT id, val FROM 03408_dist ORDER BY id, val LIMIT 1 BY id LIMIT 3 +FORMAT JsonCompact SETTINGS max_block_size=1, exact_rows_before_limit=1; + +SELECT ''; +SELECT '-- Assert rows_before_limit for distributed HAVING + ORDER BY + LIMIT BY + LIMIT, exact'; + +SELECT id, val FROM 03408_dist GROUP BY id, val HAVING id < 7 ORDER BY id, val DESC LIMIT 1 BY id LIMIT 3 +FORMAT JsonCompact SETTINGS max_block_size=1, exact_rows_before_limit=1; + +SELECT ''; +SELECT '-- Assert rows_before_limit for distributed without LIMIT BY on initiator, exact'; + +SELECT id, max(val) FROM 03408_dist GROUP BY id ORDER BY id LIMIT 1 BY id LIMIT 4 +FORMAT JSONCompact SETTINGS max_block_size=1, exact_rows_before_limit = 1, distributed_group_by_no_merge=2; + +DROP TABLE 03408_local; +DROP TABLE 03408_dist; diff --git a/parser/testdata/03408_limit_by_rows_before_limit_mem/ast.json b/parser/testdata/03408_limit_by_rows_before_limit_mem/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03408_limit_by_rows_before_limit_mem/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03408_limit_by_rows_before_limit_mem/metadata.json b/parser/testdata/03408_limit_by_rows_before_limit_mem/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03408_limit_by_rows_before_limit_mem/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03408_limit_by_rows_before_limit_mem/query.sql b/parser/testdata/03408_limit_by_rows_before_limit_mem/query.sql new file mode 100644 index 000000000..8c80ff804 --- /dev/null +++ b/parser/testdata/03408_limit_by_rows_before_limit_mem/query.sql @@ -0,0 +1,24 @@ +-- Tags: no-parallel-replicas + +SET output_format_write_statistics = 0; + +DROP TABLE IF EXISTS 03408_memory; + +CREATE TABLE 03408_memory (id Int32, val String) ENGINE = Memory +AS +SELECT number % 10, leftPad(toString(number), 2, '0') FROM numbers(50); + +SELECT '-- Assert total number of groups and records in memory'; +SELECT uniqExact(id), count() FROM 03408_memory; + +SELECT ''; +SELECT '-- Assert rows_before_limit for memory ORDER BY + LIMIT BY + LIMIT, exact'; +SELECT id, val FROM 03408_memory ORDER BY id, val LIMIT 1 BY id LIMIT 3 +FORMAT JsonCompact SETTINGS exact_rows_before_limit=1; + +SELECT ''; +SELECT '-- Assert rows_before_limit for memory HAVING + ORDER BY + LIMIT BY + LIMIT, exact'; +SELECT id, val FROM 03408_memory GROUP BY id, val HAVING id < 7 ORDER BY id, val DESC LIMIT 1 BY id LIMIT 3 +FORMAT JsonCompact SETTINGS exact_rows_before_limit=1; + +DROP TABLE 03408_memory; diff --git a/parser/testdata/03409_coalescing_merge_tree/ast.json b/parser/testdata/03409_coalescing_merge_tree/ast.json new file mode 100644 index 000000000..dd1d42201 --- /dev/null +++ b/parser/testdata/03409_coalescing_merge_tree/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'Test without parameters'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001212615, + "rows_read": 5, + "bytes_read": 194 + } +} diff --git a/parser/testdata/03409_coalescing_merge_tree/metadata.json b/parser/testdata/03409_coalescing_merge_tree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03409_coalescing_merge_tree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03409_coalescing_merge_tree/query.sql b/parser/testdata/03409_coalescing_merge_tree/query.sql new file mode 100644 index 000000000..d6b0b52a6 --- /dev/null +++ b/parser/testdata/03409_coalescing_merge_tree/query.sql @@ -0,0 +1,49 @@ +SELECT 'Test without parameters'; + +DROP TABLE IF EXISTS 03409_users; + +CREATE TABLE 03409_users +( + `uid` Int16, + `name` String, + `age` Nullable(Int16), + `age2` Nullable(Int16), + `version` Nullable(UInt8) +) +ENGINE = CoalescingMergeTree() +ORDER BY (uid, name); + +INSERT INTO 03409_users VALUES (111, 'John', 23, 12, 1); +INSERT INTO 03409_users VALUES (111, 'John', null, 34, null); +INSERT INTO 03409_users VALUES (111, 'John', null, null, 3); +INSERT INTO 03409_users VALUES (111, 'John', 52, null, 4); +INSERT INTO 03409_users VALUES (8888, 'Alice', 50, 50, 1); + +SELECT * FROM 03409_users FINAL ORDER BY ALL; +OPTIMIZE TABLE 03409_users FINAL; +SELECT * FROM 03409_users ORDER BY ALL; + +SELECT 'Test with parameters'; + +DROP TABLE IF EXISTS 03409_users; + +CREATE TABLE 03409_users +( + `uid` Int16, + `name` String, + `age` Nullable(Int16), + `age2` Nullable(Int16), + `version` Nullable(UInt8) +) +ENGINE = CoalescingMergeTree(version) +ORDER BY (uid, name); + +INSERT INTO 03409_users VALUES (111, 'John', 23, 12, 1); +INSERT INTO 03409_users VALUES (111, 'John', null, 34, 2); +INSERT INTO 03409_users VALUES (111, 'John', null, null, null); +INSERT INTO 03409_users VALUES (111, 'John', 52, null, 4); +INSERT INTO 03409_users VALUES (8888, 'Alice', 50, 50, 1); + +SELECT * FROM 03409_users FINAL ORDER BY ALL; +OPTIMIZE TABLE 03409_users FINAL; +SELECT * FROM 03409_users ORDER BY ALL; diff --git a/parser/testdata/03409_coalescing_replicated_merge_tree/ast.json b/parser/testdata/03409_coalescing_replicated_merge_tree/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03409_coalescing_replicated_merge_tree/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03409_coalescing_replicated_merge_tree/metadata.json b/parser/testdata/03409_coalescing_replicated_merge_tree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03409_coalescing_replicated_merge_tree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03409_coalescing_replicated_merge_tree/query.sql b/parser/testdata/03409_coalescing_replicated_merge_tree/query.sql new file mode 100644 index 000000000..5e4ea392a --- /dev/null +++ b/parser/testdata/03409_coalescing_replicated_merge_tree/query.sql @@ -0,0 +1,55 @@ +-- Tags: replica, no-shared-merge-tree, no-distributed-cache +-- Tag no-shared-merge-tree: Requires update in private (@jkartseva) +-- Tag no-distributed-cache: Requires update in private (@jkartseva) + +SELECT 'Test without parameters'; + +DROP TABLE IF EXISTS 03409_users SYNC; + +CREATE TABLE 03409_users +( + `uid` Int16, + `name` String, + `age` Nullable(Int16), + `age2` Nullable(Int16), + `version` Nullable(UInt8) +) +ENGINE = ReplicatedCoalescingMergeTree('/clickhouse/tables/{database}/test_00754/summing', 'r1') +ORDER BY (uid, name); + +INSERT INTO 03409_users VALUES (111, 'John', 23, 12, 1); +INSERT INTO 03409_users VALUES (111, 'John', null, 34, null); +INSERT INTO 03409_users VALUES (111, 'John', null, null, 3); +INSERT INTO 03409_users VALUES (111, 'John', 52, null, 4); +INSERT INTO 03409_users VALUES (8888, 'Alice', 50, 50, 1); + +OPTIMIZE TABLE 03409_users FINAL; +SYSTEM SYNC REPLICA 03409_users; + +SELECT * FROM 03409_users ORDER BY ALL; + +SELECT 'Test with parameters'; + +DROP TABLE IF EXISTS 03409_users SYNC; + +CREATE TABLE 03409_users +( + `uid` Int16, + `name` String, + `age` Nullable(Int16), + `age2` Nullable(Int16), + `version` Nullable(UInt8) +) +ENGINE = ReplicatedCoalescingMergeTree('/clickhouse/tables/{database}/test_00754/summing', 'r1', version) +ORDER BY (uid, name); + +INSERT INTO 03409_users VALUES (111, 'John', 23, 12, 1); +INSERT INTO 03409_users VALUES (111, 'John', null, 34, 2); +INSERT INTO 03409_users VALUES (111, 'John', null, null, null); +INSERT INTO 03409_users VALUES (111, 'John', 52, null, 4); +INSERT INTO 03409_users VALUES (8888, 'Alice', 50, 50, 1); + +OPTIMIZE TABLE 03409_users FINAL; +SYSTEM SYNC REPLICA 03409_users; + +SELECT * FROM 03409_users ORDER BY ALL; diff --git a/parser/testdata/03409_variant_type_values_format_field_conversion/ast.json b/parser/testdata/03409_variant_type_values_format_field_conversion/ast.json new file mode 100644 index 000000000..8ba2355a9 --- /dev/null +++ b/parser/testdata/03409_variant_type_values_format_field_conversion/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001574021, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/03409_variant_type_values_format_field_conversion/metadata.json b/parser/testdata/03409_variant_type_values_format_field_conversion/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03409_variant_type_values_format_field_conversion/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03409_variant_type_values_format_field_conversion/query.sql b/parser/testdata/03409_variant_type_values_format_field_conversion/query.sql new file mode 100644 index 000000000..164c3165d --- /dev/null +++ b/parser/testdata/03409_variant_type_values_format_field_conversion/query.sql @@ -0,0 +1,8 @@ +DROP TABLE IF EXISTS t; +CREATE TABLE t (v Variant(Map(String, Int32), Tuple(String, Int32))) ENGINE = Memory; +INSERT INTO t VALUES +(map('a', 1)), +(('b', 1)); +SELECT * FROM t; +DROP TABLE t; + diff --git a/parser/testdata/03410_polygons_intersects/ast.json b/parser/testdata/03410_polygons_intersects/ast.json new file mode 100644 index 000000000..bcc2c2de9 --- /dev/null +++ b/parser/testdata/03410_polygons_intersects/ast.json @@ -0,0 +1,115 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function polygonsIntersectCartesian (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 8)" + }, + { + "explain": " Literal Tuple_(UInt64_0, UInt64_0)" + }, + { + "explain": " Literal Tuple_(UInt64_0, UInt64_3)" + }, + { + "explain": " Literal Tuple_(UInt64_1, Float64_2.9)" + }, + { + "explain": " Literal Tuple_(UInt64_2, Float64_2.6)" + }, + { + "explain": " Literal Tuple_(Float64_2.6, UInt64_2)" + }, + { + "explain": " Literal Tuple_(Float64_2.9, UInt64_1)" + }, + { + "explain": " Literal Tuple_(UInt64_3, UInt64_0)" + }, + { + "explain": " Literal Tuple_(UInt64_0, UInt64_0)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 5)" + }, + { + "explain": " Literal Tuple_(Float64_1, Float64_1)" + }, + { + "explain": " Literal Tuple_(Float64_1, Float64_4)" + }, + { + "explain": " Literal Tuple_(Float64_4, Float64_4)" + }, + { + "explain": " Literal Tuple_(Float64_4, Float64_1)" + }, + { + "explain": " Literal Tuple_(Float64_1, Float64_1)" + } + ], + + "rows": 31, + + "statistics": + { + "elapsed": 0.002119585, + "rows_read": 31, + "bytes_read": 1495 + } +} diff --git a/parser/testdata/03410_polygons_intersects/metadata.json b/parser/testdata/03410_polygons_intersects/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03410_polygons_intersects/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03410_polygons_intersects/query.sql b/parser/testdata/03410_polygons_intersects/query.sql new file mode 100644 index 000000000..6e216e748 --- /dev/null +++ b/parser/testdata/03410_polygons_intersects/query.sql @@ -0,0 +1,19 @@ +select polygonsIntersectCartesian([[[(0, 0),(0, 3),(1, 2.9),(2, 2.6),(2.6, 2),(2.9, 1),(3, 0),(0, 0)]]], [[[(1., 1.),(1., 4.),(4., 4.),(4., 1.),(1., 1.)]]]); +select polygonsIntersectCartesian([[[(2., 2.), (2., 3.), (3., 3.), (3., 2.)]]], [[[(1., 1.),(1., 4.),(4., 4.),(4., 1.),(1., 1.)]]]); +select polygonsIntersectCartesian([[[(5., 5.), (5., 6.), (6., 6.), (6., 5.)]]], [[[(1., 1.),(1., 4.),(4., 4.),(4., 1.),(1., 1.)]]]); + +select polygonsIntersectSpherical([[[(4.3613577, 50.8651821), (4.349556, 50.8535879), (4.3602419, 50.8435626), (4.3830299, 50.8428851), (4.3904543, 50.8564867), (4.3613148, 50.8651279)]]], [[[(4.346693, 50.858306), (4.367945, 50.852455), (4.366227, 50.840809), (4.344961, 50.833264), (4.338074, 50.848677), (4.346693, 50.858306)]]]); +select polygonsIntersectSpherical([[[(4.3501568, 50.8518269), (4.3444920, 50.8439961), (4.3565941, 50.8443213), (4.3501568, 50.8518269)]]], [[[(4.3679450, 50.8524550),(4.3466930, 50.8583060),(4.3380740, 50.8486770),(4.3449610, 50.8332640),(4.3662270, 50.8408090),(4.3679450, 50.8524550)]]]); +select polygonsIntersectSpherical([[[(5.0, 51.0), (5.1, 51.0), (5.1, 51.1), (5.0, 51.1)]]], [[[(4.3679450, 50.8524550),(4.3466930, 50.8583060),(4.3380740, 50.8486770),(4.3449610, 50.8332640),(4.3662270, 50.8408090),(4.3679450, 50.8524550)]]]); + +select '-------- MultiPolygon with Polygon'; +select polygonsIntersectCartesian([[(29.453587685533865,59.779570356240356),(29.393139070478895,52.276266797422124),(40.636581470703206,59.38168915000267),(41.21084331372543,59.103467777099866),(29.786055068336193,52.146627480315004),(31.23682182965546,52.16517054781818),(41.69443223416517,58.85424941916091),(42.51048853740727,58.47703162291134),(32.59691566839227,52.22075341251539),(34.289476889931414,52.22075341251539),(43.02430176537451,58.07974369546071),(43.02430176537451,57.25537683364851),(35.468224883503325,52.2022335126388),(37.16078610504247,52.23926559241349),(43.02430176537451,56.26136189644947),(43.02430176537451,55.326904361850836),(38.33953409861437,52.16517054781818),(40.09254393520848,52.16517054781818),(44.4146199116388,55.3097062225408),(44.47506852669377,59.80998197603594),(39.72985224487867,59.931351417569715),(30.23941968124846,53.67744677450975),(30.20919537372098,54.63314259659509),(38.73245009647167,59.94649146557819),(37.2816833351524,59.97675082987618),(30.23941968124846,55.2752875586599),(30.33009260383092,56.19415599955667),(36.28428118674541,59.96162460231375),(34.863738732953635,59.97675082987618),(30.178971066193498,56.97640788219866),(30.178971066193498,57.91957806959033),(33.65476643185424,59.94649146557819),(32.32489690064491,59.94649146557819),(30.481214141468342,58.85424941916091),(30.571887064050795,59.99187015036608),(29.453587685533865,59.779570356240356)]], [[[(33.473420586689336,58.85424941916091),(32.23422397806246,58.492830557036),(32.173775363007486,58.03176922751564),(31.508840597402823,57.499784781503735),(31.750635057622702,56.86092686957355),(31.508840597402823,55.941082594334574),(32.20399967053497,55.515591939372456),(31.84130798020516,54.998862226280465),(31.418167674820367,54.422670886434275),(32.47601843828233,53.83826377018255),(32.08310244042503,53.408048308050866),(33.171177511414484,52.82758702113742),(34.77306581037117,52.91880107773494),(34.77306581037117,53.784726518357985),(34.108131044766516,54.17574726780569),(35.07530888564602,54.59813930694554),(34.25925258240394,54.96417435716029),(35.01486027059106,55.361278263643584),(33.50364489421682,55.37845402950552),(32.7480372060297,55.90721384574556),(35.67979503619571,55.68634475630185),(32.83871012861215,56.311688992608396),(34.591719965206266,56.29492065473883),(35.7100193437232,56.311688992608396),(33.83611227701915,56.695333481003644),(32.95960735872209,56.9434497616887),(36.072711034053015,57.091531913901434),(33.171177511414484,57.33702717078384),(36.193608264162954,57.499784781503735),(33.23162612646945,57.77481561306047),(36.43540272438284,58.04776787540811),(33.62454212432676,58.27099811968307),(36.344729801800376,58.54018474404165),(33.83611227701915,58.68186423448108),(34.74284150284369,59.565911441555244),(33.473420586689336,58.85424941916091)]], [[(34.65216858026123,58.91672306881671),(37.19101041256995,58.68186423448108),(36.01226241899805,58.28688958537609),(37.16078610504247,58.04776787540811),(35.74024365125068,57.79092907387934),(37.009664567405046,57.499784781503735),(35.77046795877817,57.25537683364851),(36.979440259877556,57.07510745541089),(34.22902827487645,56.794777197297435),(36.7074214921302,56.210968525786996),(34.712617195316206,56.10998276812964),(36.55629995449277,55.63519693782703),(35.13575750070099,55.53270067649592),(36.43540272438284,55.34409504165558),(34.83351442542614,55.01619492319591),(35.61934642114075,54.49294870011772),(34.89396304048112,54.12264226523038),(35.37755196092087,53.046178687628185),(37.43280487278982,52.95523300597458),(35.92158949641559,53.80257986695776),(36.91899164482259,53.856094327816805),(36.01226241899805,54.75541714463799),(37.765272255592166,55.189110239786885),(36.828318722240134,55.44708256557195),(38.03729102333953,55.652253637168315),(36.64697287707522,55.941082594334574),(38.21863686850443,56.05939028508024),(36.37495410932787,56.64551287174558),(38.30930979108689,56.992876013526654),(37.16078610504247,57.25537683364851),(38.127963945921984,57.516020773674256),(37.43280487278982,57.710289827306724),(38.33953409861437,57.935626886818994),(37.40258056526235,58.31865112960426),(38.58132855883426,58.744648733419496),(37.31190764267989,59.02578062465136),(34.65216858026123,58.91672306881671)]], [[(38.52087994377928,59.11898412389468),(39.54850639971376,58.713270635642914),(38.369758406141855,58.28688958537609),(38.85334732658162,58.06375936407028),(38.33953409861437,57.710289827306724),(38.73245009647167,57.48354156434209),(38.21863686850443,57.271721400459285),(38.97424455669155,56.87744603722649),(37.463029180317314,56.5623320541159),(38.94402024916407,56.05939028508024),(38.18841256097694,55.856355210835915),(38.490655636251795,55.53270067649592),(37.795496563119656,55.39562234093384),(38.30930979108689,55.154587013355666),(36.7074214921302,54.65063295250911),(37.31190764267989,53.92734063371401),(36.979440259877556,53.58783775557231),(37.855945178174615,52.91880107773497),(39.57873070724124,52.69956490610895),(38.33953409861437,53.281741738901104),(40.00187101262603,53.35396273604752),(39.54850639971376,53.58783775557231),(40.24366547284591,53.58783775557231),(39.97164670509855,53.98069568468355),(40.60635716317572,54.03398248547225),(40.39478701048334,54.44025165268903),(39.54850639971376,54.56310590284329),(39.54850639971376,54.87732350170489),(40.39478701048334,54.87732350170489),(40.39478701048334,55.24083903654295),(39.82052516746112,55.2752875586599),(39.760076552406154,55.75443792473942),(40.57613285564824,55.78844000174894),(40.425011318010824,56.19415599955667),(39.82052516746112,56.07626182891758),(39.79030085993364,56.41214455508424),(40.48545993306579,56.495655446714636),(40.33433839542836,56.95993246553937),(39.79030085993364,56.992876013526654),(39.72985224487867,57.46729112028032),(40.33433839542836,57.46729112028032),(40.24366547284591,58.04776787540811),(39.63917932229622,58.04776787540811),(39.63917932229622,58.382088724871295),(40.33433839542836,58.382088724871295),(40.45523562553831,58.9011152358548),(38.52087994377928,59.11898412389468)]]]); + +select '-------- MultiPolygon with Polygon'; +select polygonsIntersectSpherical([[(29.453587685533865,59.779570356240356),(29.393139070478895,52.276266797422124),(40.636581470703206,59.38168915000267),(41.21084331372543,59.103467777099866),(29.786055068336193,52.146627480315004),(31.23682182965546,52.16517054781818),(41.69443223416517,58.85424941916091),(42.51048853740727,58.47703162291134),(32.59691566839227,52.22075341251539),(34.289476889931414,52.22075341251539),(43.02430176537451,58.07974369546071),(43.02430176537451,57.25537683364851),(35.468224883503325,52.2022335126388),(37.16078610504247,52.23926559241349),(43.02430176537451,56.26136189644947),(43.02430176537451,55.326904361850836),(38.33953409861437,52.16517054781818),(40.09254393520848,52.16517054781818),(44.4146199116388,55.3097062225408),(44.47506852669377,59.80998197603594),(39.72985224487867,59.931351417569715),(30.23941968124846,53.67744677450975),(30.20919537372098,54.63314259659509),(38.73245009647167,59.94649146557819),(37.2816833351524,59.97675082987618),(30.23941968124846,55.2752875586599),(30.33009260383092,56.19415599955667),(36.28428118674541,59.96162460231375),(34.863738732953635,59.97675082987618),(30.178971066193498,56.97640788219866),(30.178971066193498,57.91957806959033),(33.65476643185424,59.94649146557819),(32.32489690064491,59.94649146557819),(30.481214141468342,58.85424941916091),(30.571887064050795,59.99187015036608),(29.453587685533865,59.779570356240356)]], [[[(33.473420586689336,58.85424941916091),(32.23422397806246,58.492830557036),(32.173775363007486,58.03176922751564),(31.508840597402823,57.499784781503735),(31.750635057622702,56.86092686957355),(31.508840597402823,55.941082594334574),(32.20399967053497,55.515591939372456),(31.84130798020516,54.998862226280465),(31.418167674820367,54.422670886434275),(32.47601843828233,53.83826377018255),(32.08310244042503,53.408048308050866),(33.171177511414484,52.82758702113742),(34.77306581037117,52.91880107773494),(34.77306581037117,53.784726518357985),(34.108131044766516,54.17574726780569),(35.07530888564602,54.59813930694554),(34.25925258240394,54.96417435716029),(35.01486027059106,55.361278263643584),(33.50364489421682,55.37845402950552),(32.7480372060297,55.90721384574556),(35.67979503619571,55.68634475630185),(32.83871012861215,56.311688992608396),(34.591719965206266,56.29492065473883),(35.7100193437232,56.311688992608396),(33.83611227701915,56.695333481003644),(32.95960735872209,56.9434497616887),(36.072711034053015,57.091531913901434),(33.171177511414484,57.33702717078384),(36.193608264162954,57.499784781503735),(33.23162612646945,57.77481561306047),(36.43540272438284,58.04776787540811),(33.62454212432676,58.27099811968307),(36.344729801800376,58.54018474404165),(33.83611227701915,58.68186423448108),(34.74284150284369,59.565911441555244),(33.473420586689336,58.85424941916091)]], [[(34.65216858026123,58.91672306881671),(37.19101041256995,58.68186423448108),(36.01226241899805,58.28688958537609),(37.16078610504247,58.04776787540811),(35.74024365125068,57.79092907387934),(37.009664567405046,57.499784781503735),(35.77046795877817,57.25537683364851),(36.979440259877556,57.07510745541089),(34.22902827487645,56.794777197297435),(36.7074214921302,56.210968525786996),(34.712617195316206,56.10998276812964),(36.55629995449277,55.63519693782703),(35.13575750070099,55.53270067649592),(36.43540272438284,55.34409504165558),(34.83351442542614,55.01619492319591),(35.61934642114075,54.49294870011772),(34.89396304048112,54.12264226523038),(35.37755196092087,53.046178687628185),(37.43280487278982,52.95523300597458),(35.92158949641559,53.80257986695776),(36.91899164482259,53.856094327816805),(36.01226241899805,54.75541714463799),(37.765272255592166,55.189110239786885),(36.828318722240134,55.44708256557195),(38.03729102333953,55.652253637168315),(36.64697287707522,55.941082594334574),(38.21863686850443,56.05939028508024),(36.37495410932787,56.64551287174558),(38.30930979108689,56.992876013526654),(37.16078610504247,57.25537683364851),(38.127963945921984,57.516020773674256),(37.43280487278982,57.710289827306724),(38.33953409861437,57.935626886818994),(37.40258056526235,58.31865112960426),(38.58132855883426,58.744648733419496),(37.31190764267989,59.02578062465136),(34.65216858026123,58.91672306881671)]], [[(38.52087994377928,59.11898412389468),(39.54850639971376,58.713270635642914),(38.369758406141855,58.28688958537609),(38.85334732658162,58.06375936407028),(38.33953409861437,57.710289827306724),(38.73245009647167,57.48354156434209),(38.21863686850443,57.271721400459285),(38.97424455669155,56.87744603722649),(37.463029180317314,56.5623320541159),(38.94402024916407,56.05939028508024),(38.18841256097694,55.856355210835915),(38.490655636251795,55.53270067649592),(37.795496563119656,55.39562234093384),(38.30930979108689,55.154587013355666),(36.7074214921302,54.65063295250911),(37.31190764267989,53.92734063371401),(36.979440259877556,53.58783775557231),(37.855945178174615,52.91880107773497),(39.57873070724124,52.69956490610895),(38.33953409861437,53.281741738901104),(40.00187101262603,53.35396273604752),(39.54850639971376,53.58783775557231),(40.24366547284591,53.58783775557231),(39.97164670509855,53.98069568468355),(40.60635716317572,54.03398248547225),(40.39478701048334,54.44025165268903),(39.54850639971376,54.56310590284329),(39.54850639971376,54.87732350170489),(40.39478701048334,54.87732350170489),(40.39478701048334,55.24083903654295),(39.82052516746112,55.2752875586599),(39.760076552406154,55.75443792473942),(40.57613285564824,55.78844000174894),(40.425011318010824,56.19415599955667),(39.82052516746112,56.07626182891758),(39.79030085993364,56.41214455508424),(40.48545993306579,56.495655446714636),(40.33433839542836,56.95993246553937),(39.79030085993364,56.992876013526654),(39.72985224487867,57.46729112028032),(40.33433839542836,57.46729112028032),(40.24366547284591,58.04776787540811),(39.63917932229622,58.04776787540811),(39.63917932229622,58.382088724871295),(40.33433839542836,58.382088724871295),(40.45523562553831,58.9011152358548),(38.52087994377928,59.11898412389468)]]]); + +select '-------- MultiPolygon with Polygon with Holes'; +select polygonsIntersectCartesian([[(33.473420586689336,58.85424941916091),(32.23422397806246,58.492830557036),(32.173775363007486,58.03176922751564),(31.508840597402823,57.499784781503735),(31.750635057622702,56.86092686957355),(31.508840597402823,55.941082594334574),(32.20399967053497,55.515591939372456),(31.84130798020516,54.998862226280465),(31.418167674820367,54.422670886434275),(32.47601843828233,53.83826377018255),(32.08310244042503,53.408048308050866),(33.171177511414484,52.82758702113742),(34.77306581037117,52.91880107773494),(34.77306581037117,53.784726518357985),(34.108131044766516,54.17574726780569),(35.07530888564602,54.59813930694554),(34.25925258240394,54.96417435716029),(35.01486027059106,55.361278263643584),(33.50364489421682,55.37845402950552),(32.7480372060297,55.90721384574556),(35.67979503619571,55.68634475630185),(32.83871012861215,56.311688992608396),(34.591719965206266,56.29492065473883),(35.7100193437232,56.311688992608396),(33.83611227701915,56.695333481003644),(32.95960735872209,56.9434497616887),(36.072711034053015,57.091531913901434),(33.171177511414484,57.33702717078384),(36.193608264162954,57.499784781503735),(33.23162612646945,57.77481561306047),(36.43540272438284,58.04776787540811),(33.62454212432676,58.27099811968307),(36.344729801800376,58.54018474404165),(33.83611227701915,58.68186423448108),(34.74284150284369,59.565911441555244),(33.473420586689336,58.85424941916091)]], [[(24.367675781249993,61.45977057029751),(19.577636718749993,58.67693767258692),(19.577636718749993,57.492213666700735),(19.445800781249996,55.87531083569678),(19.445800781249996,54.085173420886775),(17.468261718749996,53.014783245859235),(20.017089843749993,51.563412328675895),(21.203613281249993,50.205033264943324),(26.125488281249993,50.40151532278236),(27.22412109374999,48.980216985374994),(32.80517578124999,49.525208341974405),(35.26611328124999,48.74894534343292),(36.93603515624999,49.66762782262194),(42.56103515625,48.77791275550183),(43.92333984374999,49.8096315635631),(47.17529296875,49.152969656170455),(49.28466796875,50.54136296522162),(48.05419921875,51.17934297928929),(51.39404296875,52.48278022207825),(50.64697265625,53.014783245859235),(52.88818359375,53.93021986394004),(51.65771484374999,54.29088164657006),(52.66845703125,55.825973254619015),(50.25146484375,56.145549500679095),(51.92138671875,57.914847767009206),(49.15283203125,58.17070248348605),(49.59228515625,60.086762746260064),(47.043457031249986,59.88893689676584),(43.57177734375,61.37567331572748),(42.64892578125,60.630101766266705),(36.89208984374999,62.000904713685856),(36.01318359374999,61.143235250840576),(31.398925781249993,62.02152819100766),(30.563964843749996,61.05828537037917),(26.872558593749993,61.71070595883174),(26.652832031249993,61.10078883158897),(24.367675781249993,61.45977057029751)], [(24.455566406249993,59.42272750081452),(21.203613281249993,58.49369382056807),(21.335449218749993,56.89700392127261),(21.599121093749993,55.92458580482949),(25.202636718749993,55.998380955359636),(28.850097656249993,57.06463027327854),(27.09228515625,57.844750992890994),(28.806152343749996,59.17592824927138),(26.257324218749993,59.17592824927138),(24.455566406249993,59.42272750081452)], [(35.13427734375,59.84481485969107),(31.970214843749993,58.97266715450152),(33.20068359374999,56.776808316568406),(36.67236328125,56.41390137600675),(39.08935546874999,57.25528054528888),(42.69287109374999,58.03137242177638),(40.89111328124999,59.26588062825809),(37.28759765625,58.722598828043374),(37.11181640624999,59.66774058164964),(35.13427734375,59.84481485969107)], [(29.157714843749993,55.75184939173528),(22.565917968749993,55.128649068488784),(22.565917968749993,53.54030739150019),(22.038574218749996,51.48138289610097),(26.257324218749993,51.42661449707484),(30.124511718749993,50.54136296522162),(32.18994140624999,51.17934297928929),(30.124511718749993,53.173119202640635),(35.09033203124999,53.173119202640635),(33.11279296875,54.085173420886775),(29.597167968749993,55.50374985927513),(29.157714843749993,55.75184939173528)], [(42.82470703125,56.58369172128337),(36.584472656249986,55.329144408405085),(37.99072265625,53.592504809039355),(34.95849609374999,51.48138289610097),(36.54052734374999,50.40151532278236),(39.66064453124999,50.289339253291786),(39.79248046875,52.13348804077148),(41.77001953125,50.68079714532166),(44.49462890624999,51.97134580885171),(47.30712890624999,52.509534770327264),(44.05517578125,53.54030739150019),(46.60400390625,53.696706475303245),(47.61474609375,55.40406982700608),(45.37353515625,55.40406982700608),(42.82470703125,56.58369172128337)]]); + +select '-------- MultiPolygon with Polygon with Holes'; +select polygonsIntersectSpherical([[(29.453587685533865,59.779570356240356),(29.393139070478895,52.276266797422124),(40.636581470703206,59.38168915000267),(41.21084331372543,59.103467777099866),(29.786055068336193,52.146627480315004),(31.23682182965546,52.16517054781818),(41.69443223416517,58.85424941916091),(42.51048853740727,58.47703162291134),(32.59691566839227,52.22075341251539),(34.289476889931414,52.22075341251539),(43.02430176537451,58.07974369546071),(43.02430176537451,57.25537683364851),(35.468224883503325,52.2022335126388),(37.16078610504247,52.23926559241349),(43.02430176537451,56.26136189644947),(43.02430176537451,55.326904361850836),(38.33953409861437,52.16517054781818),(40.09254393520848,52.16517054781818),(44.4146199116388,55.3097062225408),(44.47506852669377,59.80998197603594),(39.72985224487867,59.931351417569715),(30.23941968124846,53.67744677450975),(30.20919537372098,54.63314259659509),(38.73245009647167,59.94649146557819),(37.2816833351524,59.97675082987618),(30.23941968124846,55.2752875586599),(30.33009260383092,56.19415599955667),(36.28428118674541,59.96162460231375),(34.863738732953635,59.97675082987618),(30.178971066193498,56.97640788219866),(30.178971066193498,57.91957806959033),(33.65476643185424,59.94649146557819),(32.32489690064491,59.94649146557819),(30.481214141468342,58.85424941916091),(30.571887064050795,59.99187015036608),(29.453587685533865,59.779570356240356)]], [[(24.367675781249993,61.45977057029751),(19.577636718749993,58.67693767258692),(19.577636718749993,57.492213666700735),(19.445800781249996,55.87531083569678),(19.445800781249996,54.085173420886775),(17.468261718749996,53.014783245859235),(20.017089843749993,51.563412328675895),(21.203613281249993,50.205033264943324),(26.125488281249993,50.40151532278236),(27.22412109374999,48.980216985374994),(32.80517578124999,49.525208341974405),(35.26611328124999,48.74894534343292),(36.93603515624999,49.66762782262194),(42.56103515625,48.77791275550183),(43.92333984374999,49.8096315635631),(47.17529296875,49.152969656170455),(49.28466796875,50.54136296522162),(48.05419921875,51.17934297928929),(51.39404296875,52.48278022207825),(50.64697265625,53.014783245859235),(52.88818359375,53.93021986394004),(51.65771484374999,54.29088164657006),(52.66845703125,55.825973254619015),(50.25146484375,56.145549500679095),(51.92138671875,57.914847767009206),(49.15283203125,58.17070248348605),(49.59228515625,60.086762746260064),(47.043457031249986,59.88893689676584),(43.57177734375,61.37567331572748),(42.64892578125,60.630101766266705),(36.89208984374999,62.000904713685856),(36.01318359374999,61.143235250840576),(31.398925781249993,62.02152819100766),(30.563964843749996,61.05828537037917),(26.872558593749993,61.71070595883174),(26.652832031249993,61.10078883158897),(24.367675781249993,61.45977057029751)], [(24.455566406249993,59.42272750081452),(21.203613281249993,58.49369382056807),(21.335449218749993,56.89700392127261),(21.599121093749993,55.92458580482949),(25.202636718749993,55.998380955359636),(28.850097656249993,57.06463027327854),(27.09228515625,57.844750992890994),(28.806152343749996,59.17592824927138),(26.257324218749993,59.17592824927138),(24.455566406249993,59.42272750081452)], [(35.13427734375,59.84481485969107),(31.970214843749993,58.97266715450152),(33.20068359374999,56.776808316568406),(36.67236328125,56.41390137600675),(39.08935546874999,57.25528054528888),(42.69287109374999,58.03137242177638),(40.89111328124999,59.26588062825809),(37.28759765625,58.722598828043374),(37.11181640624999,59.66774058164964),(35.13427734375,59.84481485969107)], [(29.157714843749993,55.75184939173528),(22.565917968749993,55.128649068488784),(22.565917968749993,53.54030739150019),(22.038574218749996,51.48138289610097),(26.257324218749993,51.42661449707484),(30.124511718749993,50.54136296522162),(32.18994140624999,51.17934297928929),(30.124511718749993,53.173119202640635),(35.09033203124999,53.173119202640635),(33.11279296875,54.085173420886775),(29.597167968749993,55.50374985927513),(29.157714843749993,55.75184939173528)], [(42.82470703125,56.58369172128337),(36.584472656249986,55.329144408405085),(37.99072265625,53.592504809039355),(34.95849609374999,51.48138289610097),(36.54052734374999,50.40151532278236),(39.66064453124999,50.289339253291786),(39.79248046875,52.13348804077148),(41.77001953125,50.68079714532166),(44.49462890624999,51.97134580885171),(47.30712890624999,52.509534770327264),(44.05517578125,53.54030739150019),(46.60400390625,53.696706475303245),(47.61474609375,55.40406982700608),(45.37353515625,55.40406982700608),(42.82470703125,56.58369172128337)]]); diff --git a/parser/testdata/03411_analyzer_scalar_correlated_subquery/ast.json b/parser/testdata/03411_analyzer_scalar_correlated_subquery/ast.json new file mode 100644 index 000000000..739896975 --- /dev/null +++ b/parser/testdata/03411_analyzer_scalar_correlated_subquery/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001430543, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03411_analyzer_scalar_correlated_subquery/metadata.json b/parser/testdata/03411_analyzer_scalar_correlated_subquery/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03411_analyzer_scalar_correlated_subquery/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03411_analyzer_scalar_correlated_subquery/query.sql b/parser/testdata/03411_analyzer_scalar_correlated_subquery/query.sql new file mode 100644 index 000000000..aa6e1dd15 --- /dev/null +++ b/parser/testdata/03411_analyzer_scalar_correlated_subquery/query.sql @@ -0,0 +1,12 @@ +set enable_analyzer = 1; +set allow_experimental_correlated_subqueries = 1; + +EXPLAIN QUERY TREE +SELECT * +FROM numbers(2) +WHERE (SELECT count() FROM system.one WHERE number = 2) is NULL; + +SELECT * +FROM numbers(2) +WHERE (SELECT count() FROM system.one WHERE number = 2) is NULL +ORDER BY all; diff --git a/parser/testdata/03411_iceberg_bucket/ast.json b/parser/testdata/03411_iceberg_bucket/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03411_iceberg_bucket/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03411_iceberg_bucket/metadata.json b/parser/testdata/03411_iceberg_bucket/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03411_iceberg_bucket/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03411_iceberg_bucket/query.sql b/parser/testdata/03411_iceberg_bucket/query.sql new file mode 100644 index 000000000..f3bf86af2 --- /dev/null +++ b/parser/testdata/03411_iceberg_bucket/query.sql @@ -0,0 +1,100 @@ +-- Tags: no-random-settings, +-- Test is taken both from Iceberg spec (https://iceberg.apache.org/spec/#appendix-b-32-bit-hash-requirements) and reference Iceberg repo (https://github.com/apache/iceberg/blob/6e8718113c08aebf76d8e79a9e2534c89c73407a/api/src/test/java/org/apache/iceberg/transforms/TestBucketing.java) +SELECT 'icebergHash'; +SELECT icebergHash(true); +SELECT icebergHash(1); +SELECT icebergHash(0.0 :: Float32); +SELECT icebergHash(-0.0 :: Float32); +SELECT icebergHash(1.0 :: Float32); +SELECT icebergHash(0.0 :: Float64); +SELECT icebergHash(-0.0 :: Float64); +SELECT icebergHash(1.0 :: Float64); +SELECT icebergHash(34 :: Int32); +SELECT icebergHash(34 :: UInt32); +SELECT icebergHash(34 :: Int64); +SELECT icebergHash(34 :: UInt64); +SELECT icebergHash(14.20 :: Decimal32(2)); +SELECT icebergHash(14.20 :: Decimal64(2)); +SELECT icebergHash(14.20 :: Decimal128(2)); +SELECT icebergHash('2017-11-16' :: Date); +WITH + toDateTime64('1970-01-01 22:31:08', 6, 'UTC') AS ts, + toUnixTimestamp64Micro(ts) AS microseconds_since_day_start +SELECT icebergHash(microseconds_since_day_start); +SELECT icebergHash(toDateTime64('2017-11-16T22:31:08', 6, 'UTC')); +SELECT icebergHash(toDateTime64('2017-11-16T22:31:08.000001', 6, 'UTC')); +SELECT icebergHash(toDateTime64('2017-11-16T22:31:08', 9, 'UTC')); +SELECT icebergHash(toDateTime64('2017-11-16T22:31:08.000001', 9, 'UTC')); +SELECT icebergHash(toDateTime64('2017-11-16T22:31:08.000001001', 9, 'UTC')); +SELECT icebergHash('iceberg'); +SELECT icebergHash('iceberg' :: String); +SELECT icebergHash('iceberg' :: FixedString(7)); +SELECT icebergHash('\x00\x01\x02\x03' :: FixedString(4)); +SELECT icebergHash('\x00\x01\x02\x03' :: String); +SELECT icebergHash('f79c3e09-677c-4bbd-a479-3f349cb785e7' :: UUID); + +SELECT 'icebergBucket, modulo 5'; +SELECT icebergBucket(5, true); +SELECT icebergBucket(5, 1); +SELECT icebergBucket(5, 0.0 :: Float32); +SELECT icebergBucket(5, -0.0 :: Float32); +SELECT icebergBucket(5, 1.0 :: Float32); +SELECT icebergBucket(5, 0.0 :: Float64); +SELECT icebergBucket(5, -0.0 :: Float64); +SELECT icebergBucket(5, 1.0 :: Float64); +SELECT icebergBucket(5, 34 :: Int32); +SELECT icebergBucket(5, 34 :: UInt32); +SELECT icebergBucket(5, 34 :: Int64); +SELECT icebergBucket(5, 34 :: UInt64); +SELECT icebergBucket(5, 14.20 :: Decimal32(2)); +SELECT icebergBucket(5, 14.20 :: Decimal64(2)); +SELECT icebergBucket(5, 14.20 :: Decimal128(2)); +SELECT icebergBucket(5, '2017-11-16' :: Date); +WITH + toDateTime64('1970-01-01 22:31:08', 6, 'UTC') AS ts, + toUnixTimestamp64Micro(ts) AS microseconds_since_day_start +SELECT icebergBucket(5, microseconds_since_day_start); +SELECT icebergBucket(5, toDateTime64('2017-11-16T22:31:08', 6, 'UTC')); +SELECT icebergBucket(5, toDateTime64('2017-11-16T22:31:08.000001', 6, 'UTC')); +SELECT icebergBucket(5, toDateTime64('2017-11-16T22:31:08', 9, 'UTC')); +SELECT icebergBucket(5, toDateTime64('2017-11-16T22:31:08.000001', 9, 'UTC')); +SELECT icebergBucket(5, toDateTime64('2017-11-16T22:31:08.000001001', 9, 'UTC')); +SELECT icebergBucket(5, 'iceberg'); +SELECT icebergBucket(5, 'iceberg' :: String); +SELECT icebergBucket(5, 'iceberg' :: FixedString(7)); +SELECT icebergBucket(5, '\x00\x01\x02\x03' :: FixedString(4)); +SELECT icebergBucket(5, '\x00\x01\x02\x03' :: String); +SELECT icebergBucket(5, 'f79c3e09-677c-4bbd-a479-3f349cb785e7' :: UUID); + +SELECT 'icebergBucket, modulo 13'; +SELECT icebergBucket(13, true); +SELECT icebergBucket(13, 1); +SELECT icebergBucket(13, 0.0 :: Float32); +SELECT icebergBucket(13, -0.0 :: Float32); +SELECT icebergBucket(13, 1.0 :: Float32); +SELECT icebergBucket(13, 0.0 :: Float64); +SELECT icebergBucket(13, -0.0 :: Float64); +SELECT icebergBucket(13, 1.0 :: Float64); +SELECT icebergBucket(13, 34 :: Int32); +SELECT icebergBucket(13, 34 :: UInt32); +SELECT icebergBucket(13, 34 :: Int64); +SELECT icebergBucket(13, 34 :: UInt64); +SELECT icebergBucket(13, 14.20 :: Decimal32(2)); +SELECT icebergBucket(13, 14.20 :: Decimal64(2)); +SELECT icebergBucket(13, 14.20 :: Decimal128(2)); +SELECT icebergBucket(13, '2017-11-16' :: Date); +WITH + toDateTime64('1970-01-01 22:31:08', 6, 'UTC') AS ts, + toUnixTimestamp64Micro(ts) AS microseconds_since_day_start +SELECT icebergBucket(13, microseconds_since_day_start); +SELECT icebergBucket(13, toDateTime64('2017-11-16T22:31:08', 6, 'UTC')); +SELECT icebergBucket(13, toDateTime64('2017-11-16T22:31:08.000001', 6, 'UTC')); +SELECT icebergBucket(13, toDateTime64('2017-11-16T22:31:08', 9, 'UTC')); +SELECT icebergBucket(13, toDateTime64('2017-11-16T22:31:08.000001', 9, 'UTC')); +SELECT icebergBucket(13, toDateTime64('2017-11-16T22:31:08.000001001', 9, 'UTC')); +SELECT icebergBucket(13, 'iceberg'); +SELECT icebergBucket(13, 'iceberg' :: String); +SELECT icebergBucket(13, 'iceberg' :: FixedString(7)); +SELECT icebergBucket(13, '\x00\x01\x02\x03' :: FixedString(4)); +SELECT icebergBucket(13, '\x00\x01\x02\x03' :: String); +SELECT icebergBucket(13, 'f79c3e09-677c-4bbd-a479-3f349cb785e7' :: UUID); \ No newline at end of file diff --git a/parser/testdata/03411_summing_merge_tree_dynamic_values/ast.json b/parser/testdata/03411_summing_merge_tree_dynamic_values/ast.json new file mode 100644 index 000000000..3970e3841 --- /dev/null +++ b/parser/testdata/03411_summing_merge_tree_dynamic_values/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t0 (children 1)" + }, + { + "explain": " Identifier t0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001286352, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03411_summing_merge_tree_dynamic_values/metadata.json b/parser/testdata/03411_summing_merge_tree_dynamic_values/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03411_summing_merge_tree_dynamic_values/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03411_summing_merge_tree_dynamic_values/query.sql b/parser/testdata/03411_summing_merge_tree_dynamic_values/query.sql new file mode 100644 index 000000000..a279b612d --- /dev/null +++ b/parser/testdata/03411_summing_merge_tree_dynamic_values/query.sql @@ -0,0 +1,7 @@ +DROP TABLE IF EXISTS t0; +SET allow_suspicious_primary_key = 1; +CREATE TABLE t0 (c0 Dynamic) ENGINE = SummingMergeTree() ORDER BY tuple(); +INSERT INTO TABLE t0 (c0) VALUES ('a'::Enum('a' = 1)), (2); +SELECT c0 FROM t0 FINAL; +DROP TABLE t0; + diff --git a/parser/testdata/03411_variant_basic_discriminators_deserialization_bug/ast.json b/parser/testdata/03411_variant_basic_discriminators_deserialization_bug/ast.json new file mode 100644 index 000000000..62c8c779c --- /dev/null +++ b/parser/testdata/03411_variant_basic_discriminators_deserialization_bug/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00151078, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03411_variant_basic_discriminators_deserialization_bug/metadata.json b/parser/testdata/03411_variant_basic_discriminators_deserialization_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03411_variant_basic_discriminators_deserialization_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03411_variant_basic_discriminators_deserialization_bug/query.sql b/parser/testdata/03411_variant_basic_discriminators_deserialization_bug/query.sql new file mode 100644 index 000000000..f115229cd --- /dev/null +++ b/parser/testdata/03411_variant_basic_discriminators_deserialization_bug/query.sql @@ -0,0 +1,8 @@ +set max_threads=1; +DROP TABLE IF EXISTS t0; +CREATE TABLE t0 (c0 Variant(String, Int)) ENGINE = MergeTree() PRIMARY KEY tuple() SETTINGS use_compact_variant_discriminators_serialization = 0, index_granularity=1; +INSERT INTO TABLE t0 (c0) VALUES (42), ('a'); +optimize table t0 final; +SELECT c0 FROM t0; +DROP TABLE t0; + diff --git a/parser/testdata/03412_analyzer_correlated_subquery_bug/ast.json b/parser/testdata/03412_analyzer_correlated_subquery_bug/ast.json new file mode 100644 index 000000000..577bcfb07 --- /dev/null +++ b/parser/testdata/03412_analyzer_correlated_subquery_bug/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001190756, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03412_analyzer_correlated_subquery_bug/metadata.json b/parser/testdata/03412_analyzer_correlated_subquery_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03412_analyzer_correlated_subquery_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03412_analyzer_correlated_subquery_bug/query.sql b/parser/testdata/03412_analyzer_correlated_subquery_bug/query.sql new file mode 100644 index 000000000..3dae41c02 --- /dev/null +++ b/parser/testdata/03412_analyzer_correlated_subquery_bug/query.sql @@ -0,0 +1,32 @@ +set enable_analyzer = 1; +set allow_experimental_correlated_subqueries = 1; + +create table mem2 engine = Memory as select number from numbers(2); + +SELECT number +FROM mem2 AS tbl +WHERE exists(( + SELECT number + FROM numbers(1) + WHERE number >= tbl.number +)); + +SELECT '--'; + +SELECT number +FROM mem2 AS tbl +WHERE exists(( + SELECT number + FROM numbers(2) + WHERE number >= tbl.number +)); + +SELECT number +FROM mem2 AS tbl +WHERE length(arrayFilter(x -> (x OR exists(( + SELECT number + FROM numbers(1) + WHERE number >= tbl.number +))), range(number))) > 0; + +SELECT number FROM mem2 AS tbl INNER JOIN (SELECT number FROM numbers(1) WHERE tbl.number >= number) AS alias4 ON alias4.number = number; -- { serverError NOT_IMPLEMENTED} \ No newline at end of file diff --git a/parser/testdata/03412_dynamic_in_arg_min_max/ast.json b/parser/testdata/03412_dynamic_in_arg_min_max/ast.json new file mode 100644 index 000000000..e43cc91b6 --- /dev/null +++ b/parser/testdata/03412_dynamic_in_arg_min_max/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001217986, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/03412_dynamic_in_arg_min_max/metadata.json b/parser/testdata/03412_dynamic_in_arg_min_max/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03412_dynamic_in_arg_min_max/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03412_dynamic_in_arg_min_max/query.sql b/parser/testdata/03412_dynamic_in_arg_min_max/query.sql new file mode 100644 index 000000000..39d15db05 --- /dev/null +++ b/parser/testdata/03412_dynamic_in_arg_min_max/query.sql @@ -0,0 +1,7 @@ +drop table if exists test; +create table test (a UInt32, d Dynamic, ad Array(Dynamic), td Tuple(Dynamic), md Map(String, Dynamic), j JSON, x UInt32, y UInt32, z UInt32) engine=Memory; +insert into test select 1, 94, [94], tuple(94), map('a', 94), '{"a" : 94}', 1, 0, 3; +insert into test select 2, 40000, [40000], tuple(40000), map('a', 40000), '{"a" : 40000}', 1, 10, 3; +select x, y, z, argMax(d, a), argMax(ad, a), argMax(td, a), argMax(md, a), argMax(j, a), max(a), argMin(d, a), argMin(ad, a), argMin(td, a), argMin(md, a), argMin(j, a), min(a) from test group by x, y, z order by x, y, z; +drop table test; + diff --git a/parser/testdata/03412_materialized_view_to_distributed_different_headers/ast.json b/parser/testdata/03412_materialized_view_to_distributed_different_headers/ast.json new file mode 100644 index 000000000..4df1db988 --- /dev/null +++ b/parser/testdata/03412_materialized_view_to_distributed_different_headers/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001155207, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03412_materialized_view_to_distributed_different_headers/metadata.json b/parser/testdata/03412_materialized_view_to_distributed_different_headers/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03412_materialized_view_to_distributed_different_headers/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03412_materialized_view_to_distributed_different_headers/query.sql b/parser/testdata/03412_materialized_view_to_distributed_different_headers/query.sql new file mode 100644 index 000000000..842335d41 --- /dev/null +++ b/parser/testdata/03412_materialized_view_to_distributed_different_headers/query.sql @@ -0,0 +1,11 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t0; +DROP VIEW IF EXISTS v0; +CREATE TABLE t1 (c0 Int,c1 Int) ENGINE = MergeTree() ORDER BY tuple(); +CREATE TABLE t0 (c0 Int,c1 Int) ENGINE = Distributed('test_shard_localhost', currentDatabase(), t1, c0); +CREATE MATERIALIZED VIEW v0 TO t0 (c0 LowCardinality(Int),c1 LowCardinality(Int)) AS (SELECT 1 AS c0, 1 AS c1); +SELECT c0::Int FROM v0; +DROP VIEW v0; +DROP TABLE t0; +DROP TABLE t1; + diff --git a/parser/testdata/03412_merge_final_prewhere/ast.json b/parser/testdata/03412_merge_final_prewhere/ast.json new file mode 100644 index 000000000..cb5eb724c --- /dev/null +++ b/parser/testdata/03412_merge_final_prewhere/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t (children 1)" + }, + { + "explain": " Identifier t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001290327, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/03412_merge_final_prewhere/metadata.json b/parser/testdata/03412_merge_final_prewhere/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03412_merge_final_prewhere/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03412_merge_final_prewhere/query.sql b/parser/testdata/03412_merge_final_prewhere/query.sql new file mode 100644 index 000000000..f883d4215 --- /dev/null +++ b/parser/testdata/03412_merge_final_prewhere/query.sql @@ -0,0 +1,21 @@ +DROP TABLE IF EXISTS t; +DROP TABLE IF EXISTS tmerge; + +CREATE TABLE t +( + k Int64, + dt DateTime, + s String +) +ENGINE = ReplacingMergeTree +ORDER BY (k, dt); + +CREATE TABLE tmerge AS t ENGINE = Merge(currentDatabase(), '^t$'); + +INSERT INTO t +SELECT number, '2020-01-01 00:00:00', '' FROM numbers(10); + +SELECT count() FROM tmerge FINAL PREWHERE dt >= '2020-01-01 00:00:00'; + +DROP TABLE tmerge; +DROP TABLE t; diff --git a/parser/testdata/03413_analyzer_correlated_subqueries_bug_2/ast.json b/parser/testdata/03413_analyzer_correlated_subqueries_bug_2/ast.json new file mode 100644 index 000000000..2db403174 --- /dev/null +++ b/parser/testdata/03413_analyzer_correlated_subqueries_bug_2/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001048476, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03413_analyzer_correlated_subqueries_bug_2/metadata.json b/parser/testdata/03413_analyzer_correlated_subqueries_bug_2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03413_analyzer_correlated_subqueries_bug_2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03413_analyzer_correlated_subqueries_bug_2/query.sql b/parser/testdata/03413_analyzer_correlated_subqueries_bug_2/query.sql new file mode 100644 index 000000000..ae67b4822 --- /dev/null +++ b/parser/testdata/03413_analyzer_correlated_subqueries_bug_2/query.sql @@ -0,0 +1,55 @@ +set enable_analyzer = 1; +set allow_experimental_correlated_subqueries = 1; + +CREATE TABLE orders ( + o_orderkey Int32, + o_custkey Int32, + o_orderstatus String, + o_totalprice Decimal(15,2), + o_orderdate Date, + o_orderpriority String, + o_clerk String, + o_shippriority Int32, + o_comment String) +ORDER BY (o_orderkey); + +INSERT INTO orders SELECT * FROM generateRandom() LIMIT 10; + +CREATE TABLE lineitem ( + l_orderkey Int32, + l_partkey Int32, + l_suppkey Int32, + l_linenumber Int32, + l_quantity Decimal(15,2), + l_extendedprice Decimal(15,2), + l_discount Decimal(15,2), + l_tax Decimal(15,2), + l_returnflag String, + l_linestatus String, + l_shipdate Date, + l_commitdate Date, + l_receiptdate Date, + l_shipinstruct String, + l_shipmode String, + l_comment String) +ORDER BY (l_orderkey, l_linenumber); + +SELECT + o_orderpriority, + count(*) AS order_count +FROM +( + SELECT + o_orderpriority, + o_orderkey + FROM orders + WHERE (o_orderdate >= toDate('1993-07-01')) AND (o_orderdate < (toDate('1993-07-01') + toIntervalMonth('3'))) +) +WHERE exists(( + SELECT l_orderkey + FROM lineitem + WHERE (l_orderkey = o_orderkey) AND (l_commitdate < l_receiptdate) +)) +GROUP BY o_orderpriority +ORDER BY o_orderpriority ASC +FORMAT Null; diff --git a/parser/testdata/03413_dynamic_in_in/ast.json b/parser/testdata/03413_dynamic_in_in/ast.json new file mode 100644 index 000000000..6d0c0591f --- /dev/null +++ b/parser/testdata/03413_dynamic_in_in/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function in (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '42'" + }, + { + "explain": " Literal 'Dynamic'" + }, + { + "explain": " Literal UInt64_42" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.000992746, + "rows_read": 11, + "bytes_read": 399 + } +} diff --git a/parser/testdata/03413_dynamic_in_in/metadata.json b/parser/testdata/03413_dynamic_in_in/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03413_dynamic_in_in/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03413_dynamic_in_in/query.sql b/parser/testdata/03413_dynamic_in_in/query.sql new file mode 100644 index 000000000..77345950b --- /dev/null +++ b/parser/testdata/03413_dynamic_in_in/query.sql @@ -0,0 +1,15 @@ +select 42::Dynamic in 42; -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +select materialize(42)::Dynamic in 42; -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +select [42::Dynamic] in [42]; -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +select [materialize(42)::Dynamic] in [42]; -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +select tuple(map(42, 42::Dynamic)) in tuple(map(42, 42)); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +select tuple(map(42, materialize(42)::Dynamic)) in tuple(map(42, 42)); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} + +select '{}'::JSON in '{}'; -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +select materialize('{}'::JSON)::Dynamic in '{}'; -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +select ['{}'::JSON] in ['{}']; -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +select [materialize('{}')::JSON] in ['{}']; -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +select tuple(map(42, '{}'::JSON)) in tuple(map(42, '{}')); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +select tuple(map(42, materialize('{}')::JSON)) in tuple(map(42, '{}')); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} + + diff --git a/parser/testdata/03413_experimental_settings_cannot_be_enabled_by_default/ast.json b/parser/testdata/03413_experimental_settings_cannot_be_enabled_by_default/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03413_experimental_settings_cannot_be_enabled_by_default/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03413_experimental_settings_cannot_be_enabled_by_default/metadata.json b/parser/testdata/03413_experimental_settings_cannot_be_enabled_by_default/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03413_experimental_settings_cannot_be_enabled_by_default/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03413_experimental_settings_cannot_be_enabled_by_default/query.sql b/parser/testdata/03413_experimental_settings_cannot_be_enabled_by_default/query.sql new file mode 100644 index 000000000..718eb63ad --- /dev/null +++ b/parser/testdata/03413_experimental_settings_cannot_be_enabled_by_default/query.sql @@ -0,0 +1,8 @@ +-- Tags: no-random-settings + +-- It is not allowed to have experimental settings enabled by default. + +-- However, some settings in the experimental tier are meant to control another experimental feature, and then they can be enabled as long as the feature itself is disabled. +-- These are in the exceptions list inside NOT IN. +SELECT name, value FROM system.settings WHERE tier = 'Experimental' AND type = 'Bool' AND value != '0' AND name NOT IN ('throw_on_unsupported_query_inside_transaction'); +SELECT name, value FROM system.merge_tree_settings WHERE tier = 'Experimental' AND type = 'Bool' AND value != '0' AND name NOT IN ('remove_rolled_back_parts_immediately'); diff --git a/parser/testdata/03413_group_by_all_in_subquery/ast.json b/parser/testdata/03413_group_by_all_in_subquery/ast.json new file mode 100644 index 000000000..83a58c3b4 --- /dev/null +++ b/parser/testdata/03413_group_by_all_in_subquery/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001036318, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03413_group_by_all_in_subquery/metadata.json b/parser/testdata/03413_group_by_all_in_subquery/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03413_group_by_all_in_subquery/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03413_group_by_all_in_subquery/query.sql b/parser/testdata/03413_group_by_all_in_subquery/query.sql new file mode 100644 index 000000000..93c94eec4 --- /dev/null +++ b/parser/testdata/03413_group_by_all_in_subquery/query.sql @@ -0,0 +1,32 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS t; +DROP TABLE IF EXISTS t_dist; + +CREATE TABLE t +( + `id` int, + `a` int, + `b` int +) +ENGINE = MergeTree +ORDER BY id; + +INSERT INTO t VALUES (1, 2, 3); + +CREATE TABLE t_dist AS t +ENGINE = Distributed(test_cluster_two_shards_localhost, currentDatabase(), t, id); + +SELECT a +FROM +( + SELECT + a, + b, + count(*) AS v + FROM t_dist + GROUP BY ALL +) AS Z; + +DROP TABLE t_dist; +DROP TABLE t; diff --git a/parser/testdata/03414_analyzer_correlated_subqueries_in_function/ast.json b/parser/testdata/03414_analyzer_correlated_subqueries_in_function/ast.json new file mode 100644 index 000000000..14476de33 --- /dev/null +++ b/parser/testdata/03414_analyzer_correlated_subqueries_in_function/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.0013813, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03414_analyzer_correlated_subqueries_in_function/metadata.json b/parser/testdata/03414_analyzer_correlated_subqueries_in_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03414_analyzer_correlated_subqueries_in_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03414_analyzer_correlated_subqueries_in_function/query.sql b/parser/testdata/03414_analyzer_correlated_subqueries_in_function/query.sql new file mode 100644 index 000000000..31e728eea --- /dev/null +++ b/parser/testdata/03414_analyzer_correlated_subqueries_in_function/query.sql @@ -0,0 +1,14 @@ +set enable_analyzer = 1; +set allow_experimental_correlated_subqueries = 1; + +SELECT count() +FROM numbers(3) AS t +WHERE 1 IN ( + SELECT 1 + FROM numbers(3) + WHERE number = t.number +); -- { serverError NOT_IMPLEMENTED } + +SELECT count() +FROM numbers(3) AS t +WHERE (SELECT count() FROM numbers(3) WHERE number = t.number) IN (1); -- { serverError NOT_IMPLEMENTED } diff --git a/parser/testdata/03414_formatDateTime_compound_formatter_after_varsize_formatter/ast.json b/parser/testdata/03414_formatDateTime_compound_formatter_after_varsize_formatter/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03414_formatDateTime_compound_formatter_after_varsize_formatter/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03414_formatDateTime_compound_formatter_after_varsize_formatter/metadata.json b/parser/testdata/03414_formatDateTime_compound_formatter_after_varsize_formatter/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03414_formatDateTime_compound_formatter_after_varsize_formatter/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03414_formatDateTime_compound_formatter_after_varsize_formatter/query.sql b/parser/testdata/03414_formatDateTime_compound_formatter_after_varsize_formatter/query.sql new file mode 100644 index 000000000..a7054be04 --- /dev/null +++ b/parser/testdata/03414_formatDateTime_compound_formatter_after_varsize_formatter/query.sql @@ -0,0 +1,8 @@ +-- Tests that formatDateTime correctly handles the case of variable-size formatter (e.g. %W aka. weekday 'Monday', 'Tuesday', etc.), +-- followed by a compound formatter (= formatter that print multiple components at once, e.g. %D aka. the American date '05/04/25') + +SELECT formatDateTime(toDateTime('2025-05-04'), '%W %D'); +SELECT formatDateTime(toDateTime('2025-05-04'), '%W %F'); +SELECT formatDateTime(toDateTime('2025-05-04'), '%W %r'); +SELECT formatDateTime(toDateTime('2025-05-04'), '%W %R'); +SELECT formatDateTime(toDateTime('2025-05-04'), '%W %T'); diff --git a/parser/testdata/03415_dont_highlight_probable_hashes/ast.json b/parser/testdata/03415_dont_highlight_probable_hashes/ast.json new file mode 100644 index 000000000..3bebf3e14 --- /dev/null +++ b/parser/testdata/03415_dont_highlight_probable_hashes/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function cityHash64 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal ''" + }, + { + "explain": " Identifier Pretty" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001034481, + "rows_read": 8, + "bytes_read": 283 + } +} diff --git a/parser/testdata/03415_dont_highlight_probable_hashes/metadata.json b/parser/testdata/03415_dont_highlight_probable_hashes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03415_dont_highlight_probable_hashes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03415_dont_highlight_probable_hashes/query.sql b/parser/testdata/03415_dont_highlight_probable_hashes/query.sql new file mode 100644 index 000000000..fd5b5283f --- /dev/null +++ b/parser/testdata/03415_dont_highlight_probable_hashes/query.sql @@ -0,0 +1,2 @@ +SELECT cityHash64('') FORMAT Pretty; +SELECT 123456789 FORMAT Pretty; diff --git a/parser/testdata/03416_glue_chunks/ast.json b/parser/testdata/03416_glue_chunks/ast.json new file mode 100644 index 000000000..a939f4df9 --- /dev/null +++ b/parser/testdata/03416_glue_chunks/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001308362, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03416_glue_chunks/metadata.json b/parser/testdata/03416_glue_chunks/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03416_glue_chunks/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03416_glue_chunks/query.sql b/parser/testdata/03416_glue_chunks/query.sql new file mode 100644 index 000000000..b4e76d106 --- /dev/null +++ b/parser/testdata/03416_glue_chunks/query.sql @@ -0,0 +1,20 @@ +SET output_format_pretty_row_numbers = 1; +SET output_format_pretty_glue_chunks = 1; +SET output_format_pretty_squash_consecutive_ms = 0; +SET max_block_size = 2; + +SELECT sleep(0.01), number FROM numbers(11) FORMAT PrettyCompact; +SELECT sleep(0.01), number FROM numbers(11) FORMAT PrettySpace; +SELECT sleep(0.01), number FROM numbers(11) FORMAT Pretty; + +SET output_format_pretty_row_numbers = 0; + +SELECT sleep(0.01), number FROM numbers(11) FORMAT PrettyCompact; +SELECT sleep(0.01), number FROM numbers(11) FORMAT PrettySpace; +SELECT sleep(0.01), number FROM numbers(11) FORMAT Pretty; + +SET output_format_pretty_display_footer_column_names_min_rows = 1; + +SELECT sleep(0.01), number FROM numbers(11) FORMAT PrettyCompact; +SELECT sleep(0.01), number FROM numbers(11) FORMAT PrettySpace; +SELECT sleep(0.01), number FROM numbers(11) FORMAT Pretty; diff --git a/parser/testdata/03432_input_format_parquet_max_block_size_validation/ast.json b/parser/testdata/03432_input_format_parquet_max_block_size_validation/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03432_input_format_parquet_max_block_size_validation/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03432_input_format_parquet_max_block_size_validation/metadata.json b/parser/testdata/03432_input_format_parquet_max_block_size_validation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03432_input_format_parquet_max_block_size_validation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03432_input_format_parquet_max_block_size_validation/query.sql b/parser/testdata/03432_input_format_parquet_max_block_size_validation/query.sql new file mode 100644 index 000000000..65ebe10cd --- /dev/null +++ b/parser/testdata/03432_input_format_parquet_max_block_size_validation/query.sql @@ -0,0 +1,11 @@ +-- Tags: no-fasttest + +-- Test that setting input_format_parquet_max_block_size to 0 is not allowed +SET input_format_parquet_max_block_size = 0; -- { serverError BAD_ARGUMENTS } + +-- Test that negative values are not allowed +SET input_format_parquet_max_block_size = -1; -- { serverError CANNOT_CONVERT_TYPE } + +-- Test that valid positive values are allowed +SET input_format_parquet_max_block_size = 1024; +SELECT 'a' INTO OUTFILE '/dev/null' TRUNCATE FORMAT Parquet SETTINGS input_format_parquet_max_block_size = 1024; diff --git a/parser/testdata/03440_no_glue_totals/ast.json b/parser/testdata/03440_no_glue_totals/ast.json new file mode 100644 index 000000000..ea9c9f851 --- /dev/null +++ b/parser/testdata/03440_no_glue_totals/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001151822, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03440_no_glue_totals/metadata.json b/parser/testdata/03440_no_glue_totals/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03440_no_glue_totals/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03440_no_glue_totals/query.sql b/parser/testdata/03440_no_glue_totals/query.sql new file mode 100644 index 000000000..0574968d2 --- /dev/null +++ b/parser/testdata/03440_no_glue_totals/query.sql @@ -0,0 +1,7 @@ +SET output_format_pretty_row_numbers = 1; +SET output_format_pretty_glue_chunks = 1; +SET output_format_pretty_squash_consecutive_ms = 0; +SET max_threads = 1; +SET max_block_size = 1; + +SELECT number, count() FROM numbers(5) GROUP BY number WITH TOTALS ORDER BY number FORMAT PrettyCompact; diff --git a/parser/testdata/03441_deltalake_clickhouse_public_datasets/ast.json b/parser/testdata/03441_deltalake_clickhouse_public_datasets/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03441_deltalake_clickhouse_public_datasets/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03441_deltalake_clickhouse_public_datasets/metadata.json b/parser/testdata/03441_deltalake_clickhouse_public_datasets/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03441_deltalake_clickhouse_public_datasets/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03441_deltalake_clickhouse_public_datasets/query.sql b/parser/testdata/03441_deltalake_clickhouse_public_datasets/query.sql new file mode 100644 index 000000000..c8c4ae73c --- /dev/null +++ b/parser/testdata/03441_deltalake_clickhouse_public_datasets/query.sql @@ -0,0 +1,6 @@ +-- Tags: no-fasttest, no-msan +-- Tag no-fasttest: Depends on AWS +-- Tag no-msan: delta-kernel is not built with msan + +SELECT count() +FROM deltaLake('https://clickhouse-public-datasets.s3.amazonaws.com/delta_lake/hits/', NOSIGN, SETTINGS allow_experimental_delta_kernel_rs = 1); diff --git a/parser/testdata/03441_deltalake_clickhouse_virtual_columns/ast.json b/parser/testdata/03441_deltalake_clickhouse_virtual_columns/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03441_deltalake_clickhouse_virtual_columns/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03441_deltalake_clickhouse_virtual_columns/metadata.json b/parser/testdata/03441_deltalake_clickhouse_virtual_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03441_deltalake_clickhouse_virtual_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03441_deltalake_clickhouse_virtual_columns/query.sql b/parser/testdata/03441_deltalake_clickhouse_virtual_columns/query.sql new file mode 100644 index 000000000..2d1d5a906 --- /dev/null +++ b/parser/testdata/03441_deltalake_clickhouse_virtual_columns/query.sql @@ -0,0 +1,9 @@ +-- Tags: no-fasttest, no-msan +-- Tag no-fasttest: Depends on AWS +-- Tag no-msan: delta-kernel is not built with msan + +SET parallel_replicas_for_cluster_engines = 0; + +SELECT _data_lake_snapshot_version +FROM deltaLake('https://clickhouse-public-datasets.s3.amazonaws.com/delta_lake/hits/', NOSIGN) +LIMIT 1; \ No newline at end of file diff --git a/parser/testdata/03442_alter_delete_empty_part/ast.json b/parser/testdata/03442_alter_delete_empty_part/ast.json new file mode 100644 index 000000000..66d8137bd --- /dev/null +++ b/parser/testdata/03442_alter_delete_empty_part/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_delete_empty_part (children 1)" + }, + { + "explain": " Identifier t_delete_empty_part" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001146275, + "rows_read": 2, + "bytes_read": 90 + } +} diff --git a/parser/testdata/03442_alter_delete_empty_part/metadata.json b/parser/testdata/03442_alter_delete_empty_part/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03442_alter_delete_empty_part/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03442_alter_delete_empty_part/query.sql b/parser/testdata/03442_alter_delete_empty_part/query.sql new file mode 100644 index 000000000..49ed5e575 --- /dev/null +++ b/parser/testdata/03442_alter_delete_empty_part/query.sql @@ -0,0 +1,26 @@ +DROP TABLE IF EXISTS t_delete_empty_part; + +CREATE TABLE t_delete_empty_part (a UInt64, b UInt64) +ENGINE = MergeTree ORDER BY b PARTITION BY a; + +INSERT INTO t_delete_empty_part SELECT 1, number FROM numbers(1000); +INSERT INTO t_delete_empty_part SELECT 2, number FROM numbers(1000); +INSERT INTO t_delete_empty_part SELECT 3, number FROM numbers(2000, 1000); + +SET mutations_sync = 2; +ALTER TABLE t_delete_empty_part DELETE WHERE a = 2 OR b < 500; + +SELECT count() FROM t_delete_empty_part; + +SYSTEM FLUSH LOGS part_log; + +SELECT + part_name, + ProfileEvents['MutationTotalParts'], + ProfileEvents['MutationUntouchedParts'], + ProfileEvents['MutationCreatedEmptyParts'] +FROM system.part_log +WHERE database = currentDatabase() AND table = 't_delete_empty_part' AND event_type = 'MutatePart' +ORDER BY part_name; + +DROP TABLE t_delete_empty_part; diff --git a/parser/testdata/03442_alter_delete_empty_part_rmt/ast.json b/parser/testdata/03442_alter_delete_empty_part_rmt/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03442_alter_delete_empty_part_rmt/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03442_alter_delete_empty_part_rmt/metadata.json b/parser/testdata/03442_alter_delete_empty_part_rmt/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03442_alter_delete_empty_part_rmt/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03442_alter_delete_empty_part_rmt/query.sql b/parser/testdata/03442_alter_delete_empty_part_rmt/query.sql new file mode 100644 index 000000000..20875a577 --- /dev/null +++ b/parser/testdata/03442_alter_delete_empty_part_rmt/query.sql @@ -0,0 +1,32 @@ +-- Tags: no-replicated-database +-- no-replicated-database: test relies on system.part_log but mutation can be executed on the second replica + +DROP TABLE IF EXISTS t_delete_empty_part_rmt; + +CREATE TABLE t_delete_empty_part_rmt (a UInt64, b UInt64) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/t_delete_empty_part_rmt', '1') +ORDER BY b PARTITION BY a; + +SET insert_keeper_fault_injection_probability = 0.0; + +INSERT INTO t_delete_empty_part_rmt SELECT 1, number FROM numbers(1000); +INSERT INTO t_delete_empty_part_rmt SELECT 2, number FROM numbers(1000); +INSERT INTO t_delete_empty_part_rmt SELECT 3, number FROM numbers(2000, 1000); + +SET mutations_sync = 2; +ALTER TABLE t_delete_empty_part_rmt DELETE WHERE a = 2 OR b < 500; + +SELECT count() FROM t_delete_empty_part_rmt; + +SYSTEM FLUSH LOGS part_log; + +SELECT + part_name, + ProfileEvents['MutationTotalParts'], + ProfileEvents['MutationUntouchedParts'], + ProfileEvents['MutationCreatedEmptyParts'] +FROM system.part_log +WHERE database = currentDatabase() AND table = 't_delete_empty_part_rmt' AND event_type = 'MutatePart' +ORDER BY part_name; + +DROP TABLE t_delete_empty_part_rmt; diff --git a/parser/testdata/03442_detach_view/ast.json b/parser/testdata/03442_detach_view/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03442_detach_view/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03442_detach_view/metadata.json b/parser/testdata/03442_detach_view/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03442_detach_view/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03442_detach_view/query.sql b/parser/testdata/03442_detach_view/query.sql new file mode 100644 index 000000000..ed17a9d48 --- /dev/null +++ b/parser/testdata/03442_detach_view/query.sql @@ -0,0 +1,8 @@ +DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}; +CREATE DATABASE IF NOT EXISTS {CLICKHOUSE_DATABASE:Identifier} ENGINE = Memory; +DROP TABLE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}.v0; +CREATE MATERIALIZED VIEW {CLICKHOUSE_DATABASE:Identifier}.v0 REFRESH AFTER 1 SECOND APPEND ENGINE = MergeTree() ORDER BY tuple() AS (SELECT 1 c0); +DETACH TABLE {CLICKHOUSE_DATABASE:Identifier}.v0 PERMANENTLY; -- { serverError NOT_IMPLEMENTED } +ALTER TABLE {CLICKHOUSE_DATABASE:Identifier}.v0 MODIFY REFRESH AFTER 1 SECOND APPEND; +DROP TABLE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}.v0; +DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}; diff --git a/parser/testdata/03442_json_duplicate_path/ast.json b/parser/testdata/03442_json_duplicate_path/ast.json new file mode 100644 index 000000000..65d80d40e --- /dev/null +++ b/parser/testdata/03442_json_duplicate_path/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '{\"a\" : 42, \"a\" : {\"b\" : 42}}'" + }, + { + "explain": " Literal 'JSON'" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001163994, + "rows_read": 8, + "bytes_read": 307 + } +} diff --git a/parser/testdata/03442_json_duplicate_path/metadata.json b/parser/testdata/03442_json_duplicate_path/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03442_json_duplicate_path/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03442_json_duplicate_path/query.sql b/parser/testdata/03442_json_duplicate_path/query.sql new file mode 100644 index 000000000..400dbce0c --- /dev/null +++ b/parser/testdata/03442_json_duplicate_path/query.sql @@ -0,0 +1,5 @@ +select '{"a" : 42, "a" : {"b" : 42}}'::JSON; -- {serverError INCORRECT_DATA} +select '{"a" : {"b" : 42}, "a" : 42}'::JSON; -- {serverError INCORRECT_DATA} +select '{"a" : 42, "a" : {"b" : 42}}'::JSON settings type_json_skip_duplicated_paths=1; +select '{"a" : {"b" : 42}, "a" : 42}'::JSON settings type_json_skip_duplicated_paths=1; + diff --git a/parser/testdata/03442_lightweight_deletes_on_fly/ast.json b/parser/testdata/03442_lightweight_deletes_on_fly/ast.json new file mode 100644 index 000000000..000b52e7e --- /dev/null +++ b/parser/testdata/03442_lightweight_deletes_on_fly/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_deletes (children 1)" + }, + { + "explain": " Identifier test_deletes" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001048831, + "rows_read": 2, + "bytes_read": 76 + } +} diff --git a/parser/testdata/03442_lightweight_deletes_on_fly/metadata.json b/parser/testdata/03442_lightweight_deletes_on_fly/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03442_lightweight_deletes_on_fly/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03442_lightweight_deletes_on_fly/query.sql b/parser/testdata/03442_lightweight_deletes_on_fly/query.sql new file mode 100644 index 000000000..5d34b6fcf --- /dev/null +++ b/parser/testdata/03442_lightweight_deletes_on_fly/query.sql @@ -0,0 +1,31 @@ +DROP TABLE IF EXISTS test_deletes; + +CREATE TABLE test_deletes (a UInt64) ENGINE = MergeTree ORDER BY a; + +INSERT INTO test_deletes VALUES (1) (2) (3); + +SYSTEM STOP MERGES test_deletes; +SET mutations_sync = 0; +SET lightweight_deletes_sync = 0; + +ALTER TABLE test_deletes DELETE WHERE a = 1 SETTINGS mutations_sync = 0; +DELETE FROM test_deletes WHERE a = 2 SETTINGS lightweight_deletes_sync = 0; + +SELECT a FROM test_deletes SETTINGS apply_mutations_on_fly = 1; + +DROP TABLE test_deletes; + +CREATE TABLE test_deletes (a UInt64, b UInt64) ENGINE = MergeTree ORDER BY a; + +INSERT INTO test_deletes SELECT number, 0 FROM numbers(10000); + +DELETE FROM test_deletes WHERE a >= 100 AND a < 200 SETTINGS lightweight_deletes_sync = 1; + +SYSTEM STOP MERGES test_deletes; + +ALTER TABLE test_deletes UPDATE b = 1 WHERE a >= 150 AND a < 250 SETTINGS mutations_sync = 0; +DELETE FROM test_deletes WHERE b = 1 SETTINGS lightweight_deletes_sync = 0; + +SELECT count() FROM test_deletes SETTINGS apply_mutations_on_fly = 1; + +DROP TABLE test_deletes; diff --git a/parser/testdata/03442_string_bytes_functions/ast.json b/parser/testdata/03442_string_bytes_functions/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03442_string_bytes_functions/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03442_string_bytes_functions/metadata.json b/parser/testdata/03442_string_bytes_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03442_string_bytes_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03442_string_bytes_functions/query.sql b/parser/testdata/03442_string_bytes_functions/query.sql new file mode 100644 index 000000000..a6ceafe16 --- /dev/null +++ b/parser/testdata/03442_string_bytes_functions/query.sql @@ -0,0 +1,61 @@ +-- { echo } + +SELECT stringBytesUniq('Hello'); +SELECT stringBytesUniq(''); +SELECT stringBytesUniq('aaaa'); +SELECT stringBytesUniq('abcABC123'); +SELECT stringBytesUniq(toNullable('Hello')); +SELECT stringBytesUniq(toLowCardinality('Hello')); + +SELECT stringBytesEntropy('Hello'); +SELECT stringBytesEntropy(''); +SELECT stringBytesEntropy('aaaa'); +SELECT stringBytesEntropy('abcABC123'); +SELECT stringBytesEntropy(toNullable('Hello')); +SELECT stringBytesEntropy(toLowCardinality('Hello')); + +SELECT stringBytesUniq(NULL); +SELECT stringBytesEntropy(NULL); +SELECT stringBytesUniq(toNullable(NULL)); +SELECT stringBytesEntropy(toNullable(NULL)); + +SELECT stringBytesUniq(unhex(concat( + '000102030405060708090A0B0C0D0E0F', + '101112131415161718191A1B1C1D1E1F', + '202122232425262728292A2B2C2D2E2F', + '303132333435363738393A3B3C3D3E3F', + '404142434445464748494A4B4C4D4E4F', + '505152535455565758595A5B5C5D5E5F', + '606162636465666768696A6B6C6D6E6F', + '707172737475767778797A7B7C7D7E7F', + '808182838485868788898A8B8C8D8E8F', + '909192939495969798999A9B9C9D9E9F', + 'A0A1A2A3A4A5A6A7A8A9AAABACADAEAF', + 'B0B1B2B3B4B5B6B7B8B9BABBBCBDBEBF', + 'C0C1C2C3C4C5C6C7C8C9CACBCCCDCECF', + 'D0D1D2D3D4D5D6D7D8D9DADBDCDDDEDF', + 'E0E1E2E3E4E5E6E7E8E9EAEBECEDEEEF', + 'F0F1F2F3F4F5F6F7F8F9FAFBFCFDFEFF' +))); +SELECT stringBytesEntropy(unhex(concat( + '000102030405060708090A0B0C0D0E0F', + '101112131415161718191A1B1C1D1E1F', + '202122232425262728292A2B2C2D2E2F', + '303132333435363738393A3B3C3D3E3F', + '404142434445464748494A4B4C4D4E4F', + '505152535455565758595A5B5C5D5E5F', + '606162636465666768696A6B6C6D6E6F', + '707172737475767778797A7B7C7D7E7F', + '808182838485868788898A8B8C8D8E8F', + '909192939495969798999A9B9C9D9E9F', + 'A0A1A2A3A4A5A6A7A8A9AAABACADAEAF', + 'B0B1B2B3B4B5B6B7B8B9BABBBCBDBEBF', + 'C0C1C2C3C4C5C6C7C8C9CACBCCCDCECF', + 'D0D1D2D3D4D5D6D7D8D9DADBDCDDDEDF', + 'E0E1E2E3E4E5E6E7E8E9EAEBECEDEEEF', + 'F0F1F2F3F4F5F6F7F8F9FAFBFCFDFEFF' +))); + +SELECT stringBytesUniq(s) FROM (SELECT arrayJoin(['Hello', 'World', 'ClickHouse']) AS s); +SELECT stringBytesEntropy(s) FROM (SELECT arrayJoin(['Hello', 'World', 'ClickHouse']) AS s); +SELECT arrayJoin(['Hello', 'World', 'Foo', 'Bar', 'ClickHouse']) AS str, stringBytesUniq(str), stringBytesEntropy(str); \ No newline at end of file diff --git a/parser/testdata/03443_alias_with_asterisk/ast.json b/parser/testdata/03443_alias_with_asterisk/ast.json new file mode 100644 index 000000000..20f882fa9 --- /dev/null +++ b/parser/testdata/03443_alias_with_asterisk/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery t0 (children 3)" + }, + { + "explain": " Identifier t0" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration c0 (children 2)" + }, + { + "explain": " DataType Int" + }, + { + "explain": " Function if (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Asterisk" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function Memory" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.000958859, + "rows_read": 13, + "bytes_read": 421 + } +} diff --git a/parser/testdata/03443_alias_with_asterisk/metadata.json b/parser/testdata/03443_alias_with_asterisk/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03443_alias_with_asterisk/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03443_alias_with_asterisk/query.sql b/parser/testdata/03443_alias_with_asterisk/query.sql new file mode 100644 index 000000000..936beaf1c --- /dev/null +++ b/parser/testdata/03443_alias_with_asterisk/query.sql @@ -0,0 +1 @@ +CREATE TABLE t0 (c0 Int ALIAS if(NULL, 1, *)) ENGINE = Memory; -- { serverError UNKNOWN_IDENTIFIER } \ No newline at end of file diff --git a/parser/testdata/03443_index_match_alternatives/ast.json b/parser/testdata/03443_index_match_alternatives/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03443_index_match_alternatives/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03443_index_match_alternatives/metadata.json b/parser/testdata/03443_index_match_alternatives/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03443_index_match_alternatives/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03443_index_match_alternatives/query.sql b/parser/testdata/03443_index_match_alternatives/query.sql new file mode 100644 index 000000000..6e8d65648 --- /dev/null +++ b/parser/testdata/03443_index_match_alternatives/query.sql @@ -0,0 +1,40 @@ + +DROP TABLE IF EXISTS 03443_data; + +CREATE TABLE 03443_data +( + id Int32, + name String, + INDEX idx_name name TYPE ngrambf_v1(1, 1024, 3, 0) GRANULARITY 1 +) +ENGINE = MergeTree ORDER BY id SETTINGS index_granularity = 1 +AS +SELECT 1, 'John' UNION ALL +SELECT 2, 'Ksenia' UNION ALL +SELECT 3, 'Alice'; + +SELECT '-- Without index'; +SELECT name FROM 03443_data WHERE match(name, 'J|XYZ') SETTINGS use_skip_indexes = 0; +SELECT name FROM 03443_data WHERE match(name, 'XYZ|J') SETTINGS use_skip_indexes = 0; +SELECT name FROM 03443_data WHERE match(name, '[J]|XYZ') SETTINGS use_skip_indexes = 0; +SELECT name FROM 03443_data WHERE match(name, 'XYZ|[J]') SETTINGS use_skip_indexes = 0; + +SELECT '-- With index'; +SELECT name FROM 03443_data WHERE match(name, 'J|XYZ') SETTINGS use_skip_indexes = 1; +SELECT name FROM 03443_data WHERE match(name, 'XYZ|J') SETTINGS use_skip_indexes = 1; +SELECT name FROM 03443_data WHERE match(name, '[J]|XYZ') SETTINGS use_skip_indexes = 1; +SELECT name FROM 03443_data WHERE match(name, 'XYZ|[J]') SETTINGS use_skip_indexes = 1; + +SELECT '-- Assert selected granules'; + +SET parallel_replicas_local_plan = 1; + +SELECT trim(leading ' ' from explain) FROM (EXPLAIN indexes=1 SELECT name FROM 03443_data WHERE match(name, 'J|XYZ')) WHERE explain LIKE '%Granules: %' SETTINGS use_skip_indexes = 1; +SELECT ''; +SELECT trim(leading ' ' from explain) FROM (EXPLAIN indexes=1 SELECT name FROM 03443_data WHERE match(name, 'XYZ|J')) WHERE explain LIKE '%Granules: %' SETTINGS use_skip_indexes = 1; +SELECT ''; +SELECT trim(leading ' ' from explain) FROM (EXPLAIN indexes=1 SELECT name FROM 03443_data WHERE match(name, '[J]|XYZ')) WHERE explain LIKE '%Granules: %' SETTINGS use_skip_indexes = 1; +SELECT ''; +SELECT trim(leading ' ' from explain) FROM (EXPLAIN indexes=1 SELECT name FROM 03443_data WHERE match(name, 'XYZ|[J]')) WHERE explain LIKE '%Granules: %' SETTINGS use_skip_indexes = 1; + +DROP TABLE 03443_data; diff --git a/parser/testdata/03443_part_starting_offset/ast.json b/parser/testdata/03443_part_starting_offset/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03443_part_starting_offset/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03443_part_starting_offset/metadata.json b/parser/testdata/03443_part_starting_offset/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03443_part_starting_offset/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03443_part_starting_offset/query.sql b/parser/testdata/03443_part_starting_offset/query.sql new file mode 100644 index 000000000..ed3d8a9f2 --- /dev/null +++ b/parser/testdata/03443_part_starting_offset/query.sql @@ -0,0 +1,27 @@ +-- { echo ON } + +drop table if exists test; + +-- disable merge +create table test (i int, j int, projection p (select *, _part_offset order by j)) engine MergeTree order by i settings index_granularity = 1, max_bytes_to_merge_at_max_space_in_pool = 1; + +-- make 5 parts +insert into test select number, 10 - number from numbers(5); +insert into test select number, 10 - number from numbers(5); +insert into test select number, 10 - number from numbers(5); +insert into test select number, 10 - number from numbers(5); +insert into test select number, 10 - number from numbers(5); + +-- verify _part_starting_offset and _part_offset in parent part and projection +select _part, _part_starting_offset, _part_offset from test order by all; +select _part, _part_starting_offset, _part_offset from test where j = 8 order by all; + +-- make sure key analysis works correctly +select *, _part_starting_offset + _part_offset from test where _part_starting_offset + _part_offset = 8 settings parallel_replicas_local_plan = 0, max_rows_to_read = 1; +select *, _part_offset + _part_starting_offset from test where _part_offset + _part_starting_offset = 8 settings parallel_replicas_local_plan = 0, max_rows_to_read = 1; + +-- from fuzzer +select * from test prewhere 8 = (_part_offset + _part_starting_offset) where 8 = (_part_offset + _part_starting_offset) settings parallel_replicas_local_plan = 0, max_rows_to_read = 1; +select * from test prewhere (8 = (_part_starting_offset * _part_offset)) AND 3 WHERE 8 = (_part_starting_offset + _part_offset); + +drop table test; diff --git a/parser/testdata/03443_pr_lazy_materialization/ast.json b/parser/testdata/03443_pr_lazy_materialization/ast.json new file mode 100644 index 000000000..aefbec75b --- /dev/null +++ b/parser/testdata/03443_pr_lazy_materialization/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery pr_tt (children 1)" + }, + { + "explain": " Identifier pr_tt" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001488098, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/03443_pr_lazy_materialization/metadata.json b/parser/testdata/03443_pr_lazy_materialization/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03443_pr_lazy_materialization/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03443_pr_lazy_materialization/query.sql b/parser/testdata/03443_pr_lazy_materialization/query.sql new file mode 100644 index 000000000..7ae28239c --- /dev/null +++ b/parser/testdata/03443_pr_lazy_materialization/query.sql @@ -0,0 +1,24 @@ +DROP TABLE IF EXISTS pr_tt; +CREATE TABLE pr_tt (k UInt64, v String, blob String) ENGINE=MergeTree() ORDER BY tuple() settings index_granularity=100; +INSERT INTO pr_tt SELECT number, toString(number), repeat('blob_', number % 10) FROM numbers(1_000_000); + +-- make sure the optimization is enabled +set enable_analyzer=1, query_plan_optimize_lazy_materialization=true, query_plan_max_limit_for_lazy_materialization=10; +SET enable_parallel_replicas = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_for_non_replicated_merge_tree = 1; + +select trimLeft(explain) as s from (EXPLAIN +SELECT + v, + blob +FROM pr_tt +ORDER BY k ASC +LIMIT 10 settings parallel_replicas_local_plan=1) where s ilike 'LazilyRead%'; + +SELECT + v, + blob +FROM pr_tt +ORDER BY k +LIMIT 10; + +DROP TABLE pr_tt; diff --git a/parser/testdata/03443_projection_sparse/ast.json b/parser/testdata/03443_projection_sparse/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03443_projection_sparse/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03443_projection_sparse/metadata.json b/parser/testdata/03443_projection_sparse/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03443_projection_sparse/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03443_projection_sparse/query.sql b/parser/testdata/03443_projection_sparse/query.sql new file mode 100644 index 000000000..50f257a22 --- /dev/null +++ b/parser/testdata/03443_projection_sparse/query.sql @@ -0,0 +1,28 @@ + +DROP TABLE IF EXISTS t_projection_sparse; +CREATE TABLE t_projection_sparse +( + `id` String, + `val` AggregateFunction(sum, UInt64), + PROJECTION projection_traces_by_id + ( + SELECT + id, + finalizeAggregation(val) + ORDER BY finalizeAggregation(val) + ) +) +ENGINE = AggregatingMergeTree +ORDER BY id +SETTINGS deduplicate_merge_projection_mode = 'rebuild', index_granularity = 1; + +INSERT INTO t_projection_sparse VALUES ('aa', initializeAggregation('sumState', 0::UInt64)); +INSERT INTO t_projection_sparse VALUES ('aa', initializeAggregation('sumState', 0::UInt64)); +INSERT INTO t_projection_sparse VALUES ('bb', initializeAggregation('sumState', 0::UInt64)); + +OPTIMIZE TABLE t_projection_sparse FINAL; +OPTIMIZE TABLE t_projection_sparse FINAL; + +SELECT count() FROM t_projection_sparse WHERE finalizeAggregation(val) = 0; + +DROP TABLE t_projection_sparse; diff --git a/parser/testdata/03444_analyzer_resolve_alias_columns/ast.json b/parser/testdata/03444_analyzer_resolve_alias_columns/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03444_analyzer_resolve_alias_columns/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03444_analyzer_resolve_alias_columns/metadata.json b/parser/testdata/03444_analyzer_resolve_alias_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03444_analyzer_resolve_alias_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03444_analyzer_resolve_alias_columns/query.sql b/parser/testdata/03444_analyzer_resolve_alias_columns/query.sql new file mode 100644 index 000000000..ee8136b47 --- /dev/null +++ b/parser/testdata/03444_analyzer_resolve_alias_columns/query.sql @@ -0,0 +1,25 @@ +CREATE TABLE users ( + uid Int16, + name String, + age Int16, + v Array(Int16) ALIAS arrayMap(x -> age, array(name)) +) ENGINE=Memory; + +INSERT INTO users VALUES (1231, 'John', 33); +INSERT INTO users VALUES (6666, 'Ksenia', 48); +INSERT INTO users VALUES (8888, 'Alice', 50); + +SELECT * FROM users FORMAT Null; + +CREATE TABLE out1 + ( + id UInt64, + j JSON, + name Array(UInt32) ALIAS arrayMap(x -> toUInt32(x), JSONAllPaths(j)), + value Array(Array(UInt32)) ALIAS arrayMap(x -> JSONExtract(CAST(j, 'String'), indexOf(name, x), 'Array(UInt32)'), name) +) +ORDER BY id; + +INSERT INTO out1 SELECT 42, '{"a" : 42}'; + +SELECT * FROM out1 FORMAT Null; diff --git a/parser/testdata/03444_case_with_expression_exception/ast.json b/parser/testdata/03444_case_with_expression_exception/ast.json new file mode 100644 index 000000000..cf7f2f7a5 --- /dev/null +++ b/parser/testdata/03444_case_with_expression_exception/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function caseWithExpression (children 1)" + }, + { + "explain": " ExpressionList (children 5)" + }, + { + "explain": " Literal 'C'" + }, + { + "explain": " Literal 'A'" + }, + { + "explain": " Literal Bool_1" + }, + { + "explain": " Literal 'B'" + }, + { + "explain": " Literal Bool_0" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001073705, + "rows_read": 11, + "bytes_read": 372 + } +} diff --git a/parser/testdata/03444_case_with_expression_exception/metadata.json b/parser/testdata/03444_case_with_expression_exception/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03444_case_with_expression_exception/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03444_case_with_expression_exception/query.sql b/parser/testdata/03444_case_with_expression_exception/query.sql new file mode 100644 index 000000000..6a1fb051d --- /dev/null +++ b/parser/testdata/03444_case_with_expression_exception/query.sql @@ -0,0 +1 @@ +SELECT caseWithExpression('C', 'A', true, 'B', false); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/03444_explain_asterisk/ast.json b/parser/testdata/03444_explain_asterisk/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03444_explain_asterisk/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03444_explain_asterisk/metadata.json b/parser/testdata/03444_explain_asterisk/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03444_explain_asterisk/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03444_explain_asterisk/query.sql b/parser/testdata/03444_explain_asterisk/query.sql new file mode 100644 index 000000000..3ea88f899 --- /dev/null +++ b/parser/testdata/03444_explain_asterisk/query.sql @@ -0,0 +1,11 @@ +-- Fuzzing `Can't set alias of * of Asterisk on alias` +DROP TABLE IF EXISTS t1, t2, t3__fuzz_0; +CREATE TABLE t1 (`a` UInt64, `b` UInt64) ENGINE = Log; +CREATE TABLE t2 (`a` UInt64, `b` UInt64) ENGINE = Log; +CREATE TABLE t3__fuzz_0 (`a` LowCardinality(UInt64), `b` UInt64) ENGINE = Log SETTINGS allow_suspicious_low_cardinality_types=1; + +SET enable_analyzer = 0; +EXPLAIN SYNTAX SELECT * FROM t1, t2, (SELECT toNullable(10), *, isZeroOrNull(10), 10, *, 10, *, *, 10, *, *, *, assumeNotNull(materialize(10)), 10 IS NOT NULL, a AS x FROM t3__fuzz_0 WHERE (toNullable(toUInt256(1)) + a) = b) AS t3 WHERE if(t2.b > 0, t2.a, 0) = t1.a ORDER BY t3.x ASC NULLS FIRST, t2.a DESC NULLS LAST, t1.a DESC NULLS FIRST; + +SET enable_analyzer = 1; +EXPLAIN SYNTAX SELECT * FROM t1, t2, (SELECT toNullable(10), *, isZeroOrNull(10), 10, *, 10, *, *, 10, *, *, *, assumeNotNull(materialize(10)), 10 IS NOT NULL, a AS x FROM t3__fuzz_0 WHERE (toNullable(toUInt256(1)) + a) = b) AS t3 WHERE if(t2.b > 0, t2.a, 0) = t1.a ORDER BY t3.x ASC NULLS FIRST, t2.a DESC NULLS LAST, t1.a DESC NULLS FIRST; diff --git a/parser/testdata/03444_flip_coordinates/ast.json b/parser/testdata/03444_flip_coordinates/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03444_flip_coordinates/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03444_flip_coordinates/metadata.json b/parser/testdata/03444_flip_coordinates/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03444_flip_coordinates/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03444_flip_coordinates/query.sql b/parser/testdata/03444_flip_coordinates/query.sql new file mode 100644 index 000000000..4180101df --- /dev/null +++ b/parser/testdata/03444_flip_coordinates/query.sql @@ -0,0 +1,23 @@ +-- {echo} +SELECT flipCoordinates(CAST((10.0, 20.0) AS Point)); + +SELECT flipCoordinates(CAST([(10, 20), (30, 40), (50, 60)] AS LineString)); + +WITH CAST([[(0, 0), (10, 0), (10, 10), (0, 10), (0, 0)]] AS Polygon) AS poly +SELECT flipCoordinates(poly); + +WITH CAST([[[(0, 0), (10, 0), (10, 10), (0, 10), (0, 0)]]] AS MultiPolygon) AS mpoly +SELECT flipCoordinates(mpoly); + +WITH CAST([ + [(0, 0), (100, 0), (100, 100), (0, 100), (0, 0)], + [(25, 25), (75, 25), (75, 75), (25, 75), (25, 25)] +] AS Polygon) AS poly_with_hole +SELECT flipCoordinates(poly_with_hole); + +WITH CAST([[(10, 20), (30, 40)], [(50, 60), (70, 80)]] AS MultiLineString) AS multiline +SELECT flipCoordinates(multiline); + +SELECT flipCoordinates(([[[(0, 0), (10, 0), (10, 10), (0, 10)]], [[(20, 20), (50, 20), (50, 50), (20, 50)],[(30, 30), (50, 50), (50, 30)]]]::MultiPolygon)); + +SELECT flipCoordinates((1.23, 4.56)::Point), (([(1.23, 4.56)::Point, (2.34, 5.67)::Point])::Ring); diff --git a/parser/testdata/03444_lm_block_mismatch/ast.json b/parser/testdata/03444_lm_block_mismatch/ast.json new file mode 100644 index 000000000..23f1225b4 --- /dev/null +++ b/parser/testdata/03444_lm_block_mismatch/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_03444_lazy (children 1)" + }, + { + "explain": " Identifier test_03444_lazy" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001031433, + "rows_read": 2, + "bytes_read": 82 + } +} diff --git a/parser/testdata/03444_lm_block_mismatch/metadata.json b/parser/testdata/03444_lm_block_mismatch/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03444_lm_block_mismatch/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03444_lm_block_mismatch/query.sql b/parser/testdata/03444_lm_block_mismatch/query.sql new file mode 100644 index 000000000..6510608ce --- /dev/null +++ b/parser/testdata/03444_lm_block_mismatch/query.sql @@ -0,0 +1,11 @@ +DROP TABLE IF EXISTS test_03444_lazy; +CREATE TABLE test_03444_lazy (n UInt32) ENGINE = MergeTree ORDER BY n; +INSERT INTO test_03444_lazy SELECT * FROM generateRandom() LIMIT 50; + +-- make sure the optimization is enabled +set query_plan_optimize_lazy_materialization=true, query_plan_max_limit_for_lazy_materialization=10; +SELECT count() FROM (SELECT * FROM test_03444_lazy ORDER BY rand() LIMIT 5); + +select trimLeft(explain) as s from (EXPLAIN SELECT * FROM test_03444_lazy ORDER BY rand() LIMIT 5) where s ilike 'LazilyRead%'; + +DROP TABLE test_03444_lazy; diff --git a/parser/testdata/03445_subcolumns_prewhere_pushdown/ast.json b/parser/testdata/03445_subcolumns_prewhere_pushdown/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03445_subcolumns_prewhere_pushdown/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03445_subcolumns_prewhere_pushdown/metadata.json b/parser/testdata/03445_subcolumns_prewhere_pushdown/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03445_subcolumns_prewhere_pushdown/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03445_subcolumns_prewhere_pushdown/query.sql b/parser/testdata/03445_subcolumns_prewhere_pushdown/query.sql new file mode 100644 index 000000000..c5c212de5 --- /dev/null +++ b/parser/testdata/03445_subcolumns_prewhere_pushdown/query.sql @@ -0,0 +1,24 @@ +-- Tags: no-parallel-replicas + +set enable_analyzer=1; + +drop table if exists test; +create table test (x UInt64, n Nullable(UInt32), t Tuple(a UInt32, b UInt32), json JSON) engine=MergeTree order by tuple() settings index_granularity=4; +insert into test select number, number < 4 ? NULL : number, tuple(number, number), toJSONString(map('a', number, 'b', number)) from numbers(12); +explain actions=1 select * from test where n.null settings optimize_move_to_prewhere=1; +select * from test where n.null settings optimize_move_to_prewhere=1; +explain actions=1 select * from test where n.null settings optimize_move_to_prewhere=0; +select * from test where n.null settings optimize_move_to_prewhere=0; + +explain actions=1 select * from test where t.a < 4 settings optimize_move_to_prewhere=1; +select * from test where t.a < 4 settings optimize_move_to_prewhere=1; +explain actions=1 select * from test where t.a < 4 settings optimize_move_to_prewhere=0; +select * from test where t.a < 4 settings optimize_move_to_prewhere=0; + +explain actions=1 select * from test where json.a::Int64 < 4 settings optimize_move_to_prewhere=1; +select * from test where json.a::Int64 < 4 settings optimize_move_to_prewhere=1; +explain actions=1 select * from test where json.a::Int64 < 4 settings optimize_move_to_prewhere=0; +select * from test where json.a::Int64 < 4 settings optimize_move_to_prewhere=0; + +drop table test; + diff --git a/parser/testdata/03447_analyzer_correlated_subqueries_tpc_h/ast.json b/parser/testdata/03447_analyzer_correlated_subqueries_tpc_h/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03447_analyzer_correlated_subqueries_tpc_h/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03447_analyzer_correlated_subqueries_tpc_h/metadata.json b/parser/testdata/03447_analyzer_correlated_subqueries_tpc_h/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03447_analyzer_correlated_subqueries_tpc_h/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03447_analyzer_correlated_subqueries_tpc_h/query.sql b/parser/testdata/03447_analyzer_correlated_subqueries_tpc_h/query.sql new file mode 100644 index 000000000..36c100d58 --- /dev/null +++ b/parser/testdata/03447_analyzer_correlated_subqueries_tpc_h/query.sql @@ -0,0 +1,314 @@ +CREATE TABLE nation ( + n_nationkey Int32, + n_name String, + n_regionkey Int32, + n_comment String) +ORDER BY (n_nationkey); + +CREATE TABLE region ( + r_regionkey Int32, + r_name String, + r_comment String) +ORDER BY (r_regionkey); + +CREATE TABLE part ( + p_partkey Int32, + p_name String, + p_mfgr String, + p_brand String, + p_type String, + p_size Int32, + p_container String, + p_retailprice Decimal(15,2), + p_comment String) +ORDER BY (p_partkey); + +CREATE TABLE supplier ( + s_suppkey Int32, + s_name String, + s_address String, + s_nationkey Int32, + s_phone String, + s_acctbal Decimal(15,2), + s_comment String) +ORDER BY (s_suppkey); + +CREATE TABLE partsupp ( + ps_partkey Int32, + ps_suppkey Int32, + ps_availqty Int32, + ps_supplycost Decimal(15,2), + ps_comment String) +ORDER BY (ps_partkey, ps_suppkey); + +CREATE TABLE customer ( + c_custkey Int32, + c_name String, + c_address String, + c_nationkey Int32, + c_phone String, + c_acctbal Decimal(15,2), + c_mktsegment String, + c_comment String) +ORDER BY (c_custkey); + +CREATE TABLE orders ( + o_orderkey Int32, + o_custkey Int32, + o_orderstatus String, + o_totalprice Decimal(15,2), + o_orderdate Date, + o_orderpriority String, + o_clerk String, + o_shippriority Int32, + o_comment String) +ORDER BY (o_orderkey); +-- The following is an alternative order key which is not compliant with the official TPC-H rules but recommended by sec. 4.5 in +-- "Quantifying TPC-H Choke Points and Their Optimizations": +-- ORDER BY (o_orderdate, o_orderkey); + +CREATE TABLE lineitem ( + l_orderkey Int32, + l_partkey Int32, + l_suppkey Int32, + l_linenumber Int32, + l_quantity Decimal(15,2), + l_extendedprice Decimal(15,2), + l_discount Decimal(15,2), + l_tax Decimal(15,2), + l_returnflag String, + l_linestatus String, + l_shipdate Date, + l_commitdate Date, + l_receiptdate Date, + l_shipinstruct String, + l_shipmode String, + l_comment String) +ORDER BY (l_orderkey, l_linenumber); +-- The following is an alternative order key which is not compliant with the official TPC-H rules but recommended by sec. 4.5 in +-- "Quantifying TPC-H Choke Points and Their Optimizations": +-- ORDER BY (l_shipdate, l_orderkey, l_linenumber); + +INSERT INTO nation SELECT * FROM generateRandom() LIMIT 1; +INSERT INTO region SELECT * FROM generateRandom() LIMIT 1; +INSERT INTO part SELECT * FROM generateRandom() LIMIT 1; +INSERT INTO supplier SELECT * FROM generateRandom() LIMIT 1; +INSERT INTO partsupp SELECT * FROM generateRandom() LIMIT 1; +INSERT INTO customer SELECT * FROM generateRandom() LIMIT 1; +INSERT INTO orders SELECT * FROM generateRandom() LIMIT 1; +INSERT INTO lineitem SELECT * FROM generateRandom() LIMIT 1; + +set enable_analyzer = 1; +set allow_experimental_correlated_subqueries = 1; +SET enable_parallel_replicas = 0; + +-- Q2 +SELECT + s_acctbal, + s_name, + n_name, + p_partkey, + p_mfgr, + s_address, + s_phone, + s_comment +FROM + part, + supplier, + partsupp, + nation, + region +WHERE + p_partkey = ps_partkey + AND s_suppkey = ps_suppkey + AND p_size = 15 + AND p_type LIKE '%BRASS' + AND s_nationkey = n_nationkey + AND n_regionkey = r_regionkey + AND r_name = 'EUROPE' + AND ps_supplycost = ( + SELECT + min(ps_supplycost) + FROM + partsupp, + supplier, + nation, + region + WHERE + p_partkey = ps_partkey + AND s_suppkey = ps_suppkey + AND s_nationkey = n_nationkey + AND n_regionkey = r_regionkey + AND r_name = 'EUROPE' + ) +ORDER BY + s_acctbal DESC, + n_name, + s_name, + p_partkey +FORMAT Null; + +-- Q4 +SELECT + o_orderpriority, + count(*) AS order_count +FROM + orders +WHERE + o_orderdate >= DATE '1993-07-01' + AND o_orderdate < DATE '1993-07-01' + INTERVAL '3' MONTH + AND EXISTS ( + SELECT + * + FROM + lineitem + WHERE + l_orderkey = o_orderkey + AND l_commitdate < l_receiptdate + ) +GROUP BY + o_orderpriority +ORDER BY + o_orderpriority +FORMAT Null; + +-- Q17 +SELECT + sum(l_extendedprice) / 7.0 AS avg_yearly +FROM + lineitem, + part +WHERE + p_partkey = l_partkey + AND p_brand = 'Brand#23' + AND p_container = 'MED BOX' + AND l_quantity < ( + SELECT + 0.2 * avg(l_quantity) + FROM + lineitem + WHERE + l_partkey = p_partkey + ) +FORMAT Null; + +-- Q20 +SELECT + s_name, + s_address +FROM + supplier, + nation +WHERE + s_suppkey in ( + SELECT + ps_suppkey + FROM + partsupp + WHERE + ps_partkey in ( + SELECT + p_partkey + FROM + part + WHERE + p_name LIKE 'forest%' + ) + AND ps_availqty > ( + SELECT + 0.5 * sum(l_quantity) + FROM + lineitem + WHERE + l_partkey = ps_partkey + AND l_suppkey = ps_suppkey + AND l_shipdate >= DATE '1994-01-01' + AND l_shipdate < DATE '1994-01-01' + INTERVAL '1' year + ) + ) + AND s_nationkey = n_nationkey + AND n_name = 'CANADA' +ORDER BY + s_name +FORMAT Null; + +-- Q21 +SELECT + s_name, + count(*) AS numwait +FROM + supplier, + lineitem l1, + orders, + nation +WHERE + s_suppkey = l1.l_suppkey + AND o_orderkey = l1.l_orderkey + AND o_orderstatus = 'F' + AND l1.l_receiptdate > l1.l_commitdate + AND EXISTS ( + SELECT + * + FROM + lineitem l2 + WHERE + l2.l_orderkey = l1.l_orderkey + AND l2.l_suppkey <> l1.l_suppkey + ) + AND NOT EXISTS ( + SELECT + * + FROM + lineitem l3 + WHERE + l3.l_orderkey = l1.l_orderkey + AND l3.l_suppkey <> l1.l_suppkey + AND l3.l_receiptdate > l3.l_commitdate + ) + AND s_nationkey = n_nationkey + AND n_name = 'SAUDI ARABIA' +GROUP BY + s_name +ORDER BY + numwait DESC, + s_name +FORMAT Null; + +-- Q22 +SELECT + cntrycode, + count(*) AS numcust, + sum(c_acctbal) AS totacctbal +FROM ( + SELECT + substring(c_phone FROM 1 for 2) AS cntrycode, + c_acctbal + FROM + customer + WHERE + substring(c_phone FROM 1 for 2) in + ('13', '31', '23', '29', '30', '18', '17') + AND c_acctbal > ( + SELECT + avg(c_acctbal) + FROM + customer + WHERE + c_acctbal > 0.00 + AND substring(c_phone FROM 1 for 2) in + ('13', '31', '23', '29', '30', '18', '17') + ) + AND NOT EXISTS ( + SELECT + * + FROM + orders + WHERE + o_custkey = c_custkey + ) + ) AS custsale +GROUP BY + cntrycode +ORDER BY + cntrycode +FORMAT Null; diff --git a/parser/testdata/03447_base32_encode_decode/ast.json b/parser/testdata/03447_base32_encode_decode/ast.json new file mode 100644 index 000000000..902bcfc2d --- /dev/null +++ b/parser/testdata/03447_base32_encode_decode/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function base32Encode (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'This is a test string'" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001269447, + "rows_read": 7, + "bytes_read": 280 + } +} diff --git a/parser/testdata/03447_base32_encode_decode/metadata.json b/parser/testdata/03447_base32_encode_decode/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03447_base32_encode_decode/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03447_base32_encode_decode/query.sql b/parser/testdata/03447_base32_encode_decode/query.sql new file mode 100644 index 000000000..610e90b8e --- /dev/null +++ b/parser/testdata/03447_base32_encode_decode/query.sql @@ -0,0 +1,214 @@ +SELECT base32Encode('This is a test string'); + +SELECT base32Encode('This is a test string', 'Second arg'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +DROP TABLE IF EXISTS t3447; +CREATE TABLE t3447 (id Int32, str String, b32 String) ENGINE = Memory; +INSERT INTO t3447 VALUES + (100, '', ''), + (101, 'f', 'MY======'), + (102, 'fo', 'MZXQ===='), + (103, 'foo', 'MZXW6==='), + (104, 'foob', 'MZXW6YQ='), + (105, 'fooba', 'MZXW6YTB'), + (106, 'foobar', 'MZXW6YTBOI======'), + + (200, '\x00', 'AA======'), + (201, '\x00\x00', 'AAAA===='), + (202, '\x00\x00\x00', 'AAAAA==='), + (203, '\x00\x00\x00\x00', 'AAAAAAA='), + (204, '\x00\x00\x00\x00\x00', 'AAAAAAAA'), + + (300, '\xFF', '74======'), + (301, '\xFF\xFF', '777Q===='), + (302, '\xFF\xFF\xFF', '77776==='), + (303, '\xFF\xFF\xFF\xFF', '777777Y='), + (304, '\xFF\xFF\xFF\xFF\xFF', '77777777'), + + (400, '\x01\x23\x45\x67\x89', 'AERUKZ4J'), + (401, '\xAB\xCD\xEF\x01\x23', 'VPG66AJD'), + + (402, '1234567890', 'GEZDGNBVGY3TQOJQ'), + (403, 'The quick brown fox jumps over the lazy dog', 'KRUGKIDROVUWG2ZAMJZG653OEBTG66BANJ2W24DTEBXXMZLSEB2GQZJANRQXU6JAMRXWO==='), + + (500, 'a', 'ME======'), + (501, 'ab', 'MFRA===='), + (502, 'abc', 'MFRGG==='), + (503, 'abcd', 'MFRGGZA='), + (504, 'abcde', 'MFRGGZDF'), + (505, 'abcdef', 'MFRGGZDFMY======'), + (506, 'foo', 'MZXW6==='), + (507, 'foobar', 'MZXW6YTBOI======'), + (508, 'Hello world!', 'JBSWY3DPEB3W64TMMQQQ===='), + (509, 'Hold my beer', 'JBXWYZBANV4SAYTFMVZA===='), + (510, 'Hold another beer', 'JBXWYZBAMFXG65DIMVZCAYTFMVZA===='), + (511, 'And a wine', 'IFXGIIDBEB3WS3TF'), + (512, 'And another wine', 'IFXGIIDBNZXXI2DFOIQHO2LOMU======'), + (513, 'And a lemonade', 'IFXGIIDBEBWGK3LPNZQWIZI='), + (514, 't1Zv2yaZ', 'OQYVU5RSPFQVU==='), + (515, 'And another wine', 'IFXGIIDBNZXXI2DFOIQHO2LOMU======'); + + +SELECT 'Part 1 - Encoding'; +SELECT id, str AS input, hex(str) AS input_hex, base32Encode(str) AS result, b32, result == b32 FROM t3447; + +SELECT 'Part 2a - Decoding'; +SELECT id, b32 as input, base32Decode(input) AS result, hex(result) as result_hex, hex(str) as expected_hex, result == str FROM t3447; + +SELECT 'Part 2b - Decoding lowercase'; +SELECT id, lower(b32) as input, base32Decode(input) AS result, hex(result) as result_hex, hex(str) as expected_hex, result == str FROM t3447; + +SELECT 'Part 3 - Roundtrip'; +SELECT id, str AS input, hex(str) AS input_hex, base32Decode(base32Encode(str)) AS result, result == str FROM t3447; + +SELECT 'Part 4 - Invalid input'; +SELECT base32Decode('========'); -- { serverError INCORRECT_DATA } +SELECT base32Decode('MZXW6YT!'); -- { serverError INCORRECT_DATA } +SELECT base32Decode('MZXW6Y=B'); -- { serverError INCORRECT_DATA } +SELECT base32Decode('MZXW6Y=!'); -- { serverError INCORRECT_DATA } +SELECT base32Decode('MZXW6Y==='); -- { serverError INCORRECT_DATA } +SELECT base32Decode('MZXW6YQ=Q'); -- { serverError INCORRECT_DATA } +SELECT base32Decode('MZXW6YQ======'); -- { serverError INCORRECT_DATA } +SELECT base32Decode('12345678'); -- { serverError INCORRECT_DATA } +SELECT base32Decode('MZXW6YQ'); -- { serverError INCORRECT_DATA } +SELECT base32Decode('MZXW6YQ=='); -- { serverError INCORRECT_DATA } +SELECT base32Decode('MZXW6YQ==='); -- { serverError INCORRECT_DATA } +SELECT base32Decode('MZXW6YQ===='); -- { serverError INCORRECT_DATA } +SELECT base32Decode('MZXW6YQ====='); -- { serverError INCORRECT_DATA } +SELECT base32Decode('MZXW6YQ======'); -- { serverError INCORRECT_DATA } +SELECT base32Decode('MZXW6YQ======='); -- { serverError INCORRECT_DATA } +SELECT base32Decode('MZXW6YQ====!=='); -- { serverError INCORRECT_DATA } +SELECT base32Decode('MZXW6YQ====A=='); -- { serverError INCORRECT_DATA } +SELECT base32Decode('MZXW6YQ======'); -- { serverError INCORRECT_DATA } + +SELECT 'Part 5 - tryBase32Decode'; +SELECT tryBase32Decode('========'); +SELECT tryBase32Decode('MZXW6YT!'); +SELECT tryBase32Decode('MZXW6Y=B'); +SELECT tryBase32Decode('MZXW6Y=!'); +SELECT tryBase32Decode('MZXW6Y==='); +SELECT tryBase32Decode('MZXW6YQ=Q'); +SELECT tryBase32Decode('MZXW6YQ======'); +SELECT tryBase32Decode('12345678'); +SELECT tryBase32Decode('MZXW6YQ'); +SELECT tryBase32Decode('MZXW6YQ=='); +SELECT tryBase32Decode('MZXW6YQ==='); +SELECT tryBase32Decode('MZXW6YQ===='); +SELECT tryBase32Decode('MZXW6YQ====='); +SELECT tryBase32Decode('MZXW6YQ======'); +SELECT tryBase32Decode('MZXW6YQ======='); +SELECT tryBase32Decode('MZXW6YQ====!=='); +SELECT tryBase32Decode('MZXW6YQ====A=='); +SELECT tryBase32Decode('MZXW6YQ======'); + +SELECT 'Part 6 - FixedString encoding + decoding'; +SELECT val, hex(val), base32Encode(val) as enc_res, hex(base32Decode(enc_res)) as dec_res, dec_res == hex(val) FROM (SELECT arrayJoin([ + toFixedString('', 1), + toFixedString('f', 1), + toFixedString('fo', 2), + toFixedString('foo', 3), + toFixedString('foob', 4), + toFixedString('fooba', 5), + toFixedString('foobar', 6), + toFixedString('\x00', 1), + toFixedString('\x00\x00', 2), + toFixedString('\x00\x00\x00', 3), + toFixedString('\x00\x00\x00\x00', 4), + toFixedString('\x00\x00\x00\x00\x00', 5), + toFixedString('\xFF', 1), + toFixedString('\xFF\xFF', 2), + toFixedString('\xFF\xFF\xFF', 3), + toFixedString('\xFF\xFF\xFF\xFF', 4), + toFixedString('\xFF\xFF\xFF\xFF\xFF', 5), + toFixedString('\x01\x23\x45\x67\x89', 5), + toFixedString('\xAB\xCD\xEF\x01\x23', 5), + toFixedString('1234567890', 10), + toFixedString('The quick brown fox jumps over the lazy dog', 43), + toFixedString('a', 1), + toFixedString('ab', 2), + toFixedString('abc', 3), + toFixedString('abcd', 4), + toFixedString('abcde', 5), + toFixedString('abcdef', 6), + toFixedString('foo', 3), + toFixedString('foobar', 6), + toFixedString('Hello world!', 12), + toFixedString('Hold my beer', 12), + toFixedString('Hold another beer', 18), + toFixedString('And a wine', 10), + toFixedString('And another wine', 17), + toFixedString('And a lemonade', 14), + toFixedString('t1Zv2yaZ', 8), + toFixedString('And another wine', 17) + ]) val); + +SELECT 'Part 6 - FixedString decoding + encoding'; +SELECT val, base32Decode(val) as dec_res, hex(dec_res), base32Encode(dec_res) as enc_res, enc_res == val FROM (SELECT arrayJoin([ + toFixedString('AAAAA===', 8), + toFixedString('MY======', 8), + toFixedString('MZXQ====', 8), + toFixedString('MZXW6===', 8), + toFixedString('MZXW6YQ=', 8), + toFixedString('MZXW6YTB', 8), + toFixedString('MZXW6YTBOI======', 16), + toFixedString('AA======', 8), + toFixedString('AAAA====', 8), + toFixedString('AAAAA===', 8), + toFixedString('AAAAAAA=', 8), + toFixedString('AAAAAAAA', 8), + toFixedString('74======', 8), + toFixedString('777Q====', 8), + toFixedString('77776===', 8), + toFixedString('777777Y=', 8), + toFixedString('77777777', 8), + toFixedString('AERUKZ4J', 8), + toFixedString('VPG66AJD', 8), + toFixedString('GEZDGNBVGY3TQOJQ', 16), + toFixedString('KRUGKIDROVUWG2ZAMJZG653OEBTG66BANJ2W24DTEBXXMZLSEB2GQZJANRQXU6JAMRXWO===', 96), + toFixedString('ME======', 8), + toFixedString('MFRA====', 8), + toFixedString('MFRGG===', 8), + toFixedString('MFRGGZA=', 8), + toFixedString('MFRGGZDF', 8), + toFixedString('MFRGGZDFMY======', 16), + toFixedString('MZXW6===', 8), + toFixedString('MZXW6YTBOI======', 16), + toFixedString('JBSWY3DPEB3W64TMMQQQ====', 24), + toFixedString('JBXWYZBANV4SAYTFMVZA====', 24), + toFixedString('JBXWYZBAMFXG65DIMVZCAYTFMVZA====', 32), + toFixedString('IFXGIIDBEB3WS3TF', 16), + toFixedString('IFXGIIDBNZXXI2DFOIQHO2LOMU======', 32), + toFixedString('IFXGIIDBEBWGK3LPNZQWIZI=', 24), + toFixedString('OQYVU5RSPFQVU===', 16), + toFixedString('IFXGIIDBNZXXI2DFOIQHO2LOMU======', 32) + ]) val); + +SELECT 'Part 7 - Similar to 02337_base58.sql'; + +SELECT base32Decode(encoded) FROM (SELECT base32Encode(val) as encoded FROM (SELECT arrayJoin(['', 'f', 'fo', 'foo', 'foob', 'fooba', 'foobar', 'Hello world!']) val)); +SELECT tryBase32Decode(encoded) FROM (SELECT base32Encode(val) as encoded FROM (SELECT arrayJoin(['', 'f', 'fo', 'foo', 'foob', 'fooba', 'foobar', 'Hello world!']) val)); +SELECT tryBase32Decode(val) FROM (SELECT arrayJoin(['Hold my beer', 'Hold another beer', '3csAg9', 'And a wine', 'And another wine', 'And a lemonade', 't1Zv2yaZ', 'And another wine']) val); + +SELECT base32Encode(val) FROM (SELECT arrayJoin(['', 'f', 'fo', 'foo', 'foob', 'fooba', 'foobar']) val); +SELECT base32Decode(val) FROM (SELECT arrayJoin(['', 'MY======', 'MZXQ====', 'MZXW6===', 'MZXW6YQ=', 'MZXW6YTB', 'MZXW6YTBOI======']) val); + +SELECT base32Encode(base32Decode('KRUGKIDROVUWG2ZAMJZG653OEBTG66BANJ2W24DTEBXXMZLSEB2GQZJANRQXU6JAMRXWO===')) == 'KRUGKIDROVUWG2ZAMJZG653OEBTG66BANJ2W24DTEBXXMZLSEB2GQZJANRQXU6JAMRXWO==='; +SELECT base32Encode('\xAB\xCD\xEF\x01\x23') == 'VPG66AJD'; + +SELECT base32Encode(toFixedString('Hold my beer...', 15)); +SELECT base32Decode(toFixedString('t1Zv2yaZ', 8)); -- { serverError INCORRECT_DATA } +SELECT tryBase32Decode(toFixedString('t1Zv2yaZ', 8)); + +SELECT base32Encode(val) FROM (SELECT arrayJoin([toFixedString('', 3), toFixedString('f', 3), toFixedString('fo', 3), toFixedString('foo', 3)]) val); +SELECT base32Decode(val) FROM (SELECT arrayJoin([toFixedString('AAAAA===', 8), toFixedString('MYAAA===', 8), toFixedString('MZXQA===', 8), toFixedString('MZXW6===', 8)]) val); + +SELECT base32Encode(reinterpretAsFixedString(byteSwap(toUInt256('256')))) == 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAA===='; +SELECT base32Encode(reinterpretAsString(byteSwap(toUInt256('256')))) == 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAE======'; -- { reinterpretAsString drops the last null byte hence, encoded value is different than the FixedString version above } + +SELECT base32Encode('Testing') == 'KRSXG5DJNZTQ===='; +SELECT base32Decode('KRSXG5DJNZTQ====') == 'Testing'; + +SELECT base32Encode(val) FROM (SELECT arrayJoin(['test1', 'test2', 'test3', 'test123', 'test456']) val); +SELECT base32Decode(val) FROM (SELECT arrayJoin(['KRSXG5A=', 'ORSXG5BA', 'ORSXG5BB']) val); + +DROP TABLE IF EXISTS t3447; diff --git a/parser/testdata/03447_float_nan_order/ast.json b/parser/testdata/03447_float_nan_order/ast.json new file mode 100644 index 000000000..05e458eab --- /dev/null +++ b/parser/testdata/03447_float_nan_order/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '--- short array ASC NULLS FIRST'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.000936662, + "rows_read": 5, + "bytes_read": 202 + } +} diff --git a/parser/testdata/03447_float_nan_order/metadata.json b/parser/testdata/03447_float_nan_order/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03447_float_nan_order/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03447_float_nan_order/query.sql b/parser/testdata/03447_float_nan_order/query.sql new file mode 100644 index 000000000..c82e2bdbd --- /dev/null +++ b/parser/testdata/03447_float_nan_order/query.sql @@ -0,0 +1,39 @@ +SELECT '--- short array ASC NULLS FIRST'; +SELECT number + number / number AS a FROM numbers(3) ORDER BY a ASC NULLS FIRST; +SELECT '--- short array ASC NULLS LAST'; +SELECT number + number / number AS a FROM numbers(3) ORDER BY a ASC NULLS LAST; +SELECT '--- short array DESC NULLS FIRST'; +SELECT number + number / number AS a FROM numbers(3) ORDER BY a DESC NULLS FIRST; +SELECT '--- short array DESC NULLS LAST'; +SELECT number + number / number AS a FROM numbers(3) ORDER BY a DESC NULLS LAST; + +-- After 256 elements radix sort is used +SELECT '--- long array ASC NULLS FIRST'; +SELECT number + number / number AS a FROM numbers(256) ORDER BY a ASC NULLS FIRST; +SELECT '--- long array ASC NULLS LAST'; +SELECT number + number / number AS a FROM numbers(256) ORDER BY a ASC NULLS LAST; +SELECT '--- long array DESC NULLS FIRST'; +SELECT number + number / number AS a FROM numbers(256) ORDER BY a DESC NULLS FIRST; +SELECT '--- long array DESC NULLS LAST'; +SELECT number + number / number AS a FROM numbers(256) ORDER BY a DESC NULLS LAST; + +-- Same for sort of ranges + +SELECT '--- short array partial ASC NULLS FIRST'; +SELECT number + number / number AS a FROM numbers(3) ORDER BY a ASC NULLS FIRST, 1 ASC; +SELECT '--- short array partial ASC NULLS LAST'; +SELECT number + number / number AS a FROM numbers(3) ORDER BY a ASC NULLS LAST, 1 ASC; +SELECT '--- short array partial DESC NULLS FIRST'; +SELECT number + number / number AS a FROM numbers(3) ORDER BY a DESC NULLS FIRST, 1 ASC; +SELECT '--- short array partial DESC NULLS LAST'; +SELECT number + number / number AS a FROM numbers(3) ORDER BY a DESC NULLS LAST, 1 ASC; + +-- After 256 elements radix sort is used +SELECT '--- long array partial ASC NULLS FIRST'; +SELECT number + number / number AS a FROM numbers(256) ORDER BY a ASC NULLS FIRST, 1 ASC; +SELECT '--- long array partial ASC NULLS LAST'; +SELECT number + number / number AS a FROM numbers(256) ORDER BY a ASC NULLS LAST, 1 ASC; +SELECT '--- long array partial DESC NULLS FIRST'; +SELECT number + number / number AS a FROM numbers(256) ORDER BY a DESC NULLS FIRST, 1 ASC; +SELECT '--- long array partial DESC NULLS LAST'; +SELECT number + number / number AS a FROM numbers(256) ORDER BY a DESC NULLS LAST, 1 ASC; diff --git a/parser/testdata/03447_function_reverse_for_tuple/ast.json b/parser/testdata/03447_function_reverse_for_tuple/ast.json new file mode 100644 index 000000000..67e0b5528 --- /dev/null +++ b/parser/testdata/03447_function_reverse_for_tuple/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function reverse (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal 'Hello'" + }, + { + "explain": " Literal Array_[UInt64_2, UInt64_3]" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001112358, + "rows_read": 11, + "bytes_read": 426 + } +} diff --git a/parser/testdata/03447_function_reverse_for_tuple/metadata.json b/parser/testdata/03447_function_reverse_for_tuple/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03447_function_reverse_for_tuple/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03447_function_reverse_for_tuple/query.sql b/parser/testdata/03447_function_reverse_for_tuple/query.sql new file mode 100644 index 000000000..a3783c2f7 --- /dev/null +++ b/parser/testdata/03447_function_reverse_for_tuple/query.sql @@ -0,0 +1,12 @@ +SELECT reverse((1, 'Hello', [2, 3])); + +DROP TABLE IF EXISTS t_tuple; + +CREATE TABLE t_tuple(tuple Tuple(a Int32, b String)) engine = MergeTree order by tuple(); + +INSERT INTO t_tuple VALUES((1, 'hello')), ((2, 'world')), ((3, 'clickhouse')); + +SELECT reverse(tuple) FROM t_tuple; +SELECT reverse(tuple).a, reverse(tuple).b FROM t_tuple; + +DROP TABLE t_tuple; diff --git a/parser/testdata/03447_grouping_sets_analyzer_const_columns/ast.json b/parser/testdata/03447_grouping_sets_analyzer_const_columns/ast.json new file mode 100644 index 000000000..2c18211a0 --- /dev/null +++ b/parser/testdata/03447_grouping_sets_analyzer_const_columns/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001345508, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03447_grouping_sets_analyzer_const_columns/metadata.json b/parser/testdata/03447_grouping_sets_analyzer_const_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03447_grouping_sets_analyzer_const_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03447_grouping_sets_analyzer_const_columns/query.sql b/parser/testdata/03447_grouping_sets_analyzer_const_columns/query.sql new file mode 100644 index 000000000..f12e5636b --- /dev/null +++ b/parser/testdata/03447_grouping_sets_analyzer_const_columns/query.sql @@ -0,0 +1,26 @@ +SET enable_analyzer=1; + +SELECT 'Const column in grouping set, analyzer on:'; + +SELECT grouping(key_a), grouping(key_b), key_a, key_b, count() FROM ( + SELECT 'value' as key_a, number as key_b FROM numbers(4) +) +GROUP BY GROUPING SETS((key_b), (key_a, key_b)) +ORDER BY (grouping(key_a), grouping(key_b), key_a, key_b); + +SELECT 'Non-const column in grouping set, analyzer on:'; + +SELECT grouping(key_a), grouping(key_b), key_a, key_b, count() FROM ( + SELECT materialize('value') as key_a, number as key_b FROM numbers(4) +) +GROUP BY GROUPING SETS((key_b), (key_a, key_b)) +ORDER BY (grouping(key_a), grouping(key_b), key_a, key_b); + +SELECT 'Const column in grouping set, analyzer off:'; + +SELECT grouping(key_a), grouping(key_b), key_a, key_b, count() FROM ( + SELECT 'value' as key_a, number as key_b FROM numbers(4) +) +GROUP BY GROUPING SETS((key_b), (key_a, key_b)) +ORDER BY (grouping(key_a), grouping(key_b), key_a, key_b) +SETTINGS allow_experimental_analyzer=0; diff --git a/parser/testdata/03447_order_by_json_and_other_column/ast.json b/parser/testdata/03447_order_by_json_and_other_column/ast.json new file mode 100644 index 000000000..f56eeab84 --- /dev/null +++ b/parser/testdata/03447_order_by_json_and_other_column/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001219736, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/03447_order_by_json_and_other_column/metadata.json b/parser/testdata/03447_order_by_json_and_other_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03447_order_by_json_and_other_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03447_order_by_json_and_other_column/query.sql b/parser/testdata/03447_order_by_json_and_other_column/query.sql new file mode 100644 index 000000000..76c64424d --- /dev/null +++ b/parser/testdata/03447_order_by_json_and_other_column/query.sql @@ -0,0 +1,6 @@ +drop table if exists test; +create table test (json JSON) engine=Memory; +insert into test values ('{"a" : 1}'), ('{"a" : 2}'), ('{"a" : 2}'), ('{"a" : 4}'); +select json, materialize('') from test order by all asc; +drop table test; + diff --git a/parser/testdata/03447_storage_join_unsupported_keys/ast.json b/parser/testdata/03447_storage_join_unsupported_keys/ast.json new file mode 100644 index 000000000..d837653ec --- /dev/null +++ b/parser/testdata/03447_storage_join_unsupported_keys/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001412661, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03447_storage_join_unsupported_keys/metadata.json b/parser/testdata/03447_storage_join_unsupported_keys/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03447_storage_join_unsupported_keys/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03447_storage_join_unsupported_keys/query.sql b/parser/testdata/03447_storage_join_unsupported_keys/query.sql new file mode 100644 index 000000000..9c8b75da2 --- /dev/null +++ b/parser/testdata/03447_storage_join_unsupported_keys/query.sql @@ -0,0 +1,66 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS segmented_ctr_cache; +DROP TABLE IF EXISTS bookmarks_join; +DROP TABLE IF EXISTS cart_join; + +create table if not exists segmented_ctr_cache +( + product_id Int32, + segment_id Int32, + count_in_viewport UInt64, + count_in_viewed UInt64 +) + engine = Memory; + +INSERT INTO segmented_ctr_cache VALUES (1182604, 44, 15, 3); +INSERT INTO segmented_ctr_cache VALUES (311577, 52, 4, 2); +INSERT INTO segmented_ctr_cache VALUES (284246, 45, 2, 2); +INSERT INTO segmented_ctr_cache VALUES (1115559, 52, 9, 2); +INSERT INTO segmented_ctr_cache VALUES (1551941, 0, 163, 4); +INSERT INTO segmented_ctr_cache VALUES (7165089, 45, 17, 1); + + + +CREATE TABLE bookmarks_join +( + product_id Int32, + segment_id Int32, + count_in_bookmark Int32 +) ENGINE = Join(ALL, LEFT, product_id, segment_id); +INSERT INTO bookmarks_join VALUES (1182604, 44, 1); +INSERT INTO bookmarks_join VALUES (311577, 52, 1); +INSERT INTO bookmarks_join VALUES (7165089, 0, 1); +INSERT INTO bookmarks_join VALUES (7165089, 50, 1); + + +CREATE TABLE cart_join +( +product_id Int32, +segment_id Int32, +count_in_cart Int32 +) ENGINE = Join(ALL, LEFT, product_id, segment_id); + +INSERT INTO cart_join VALUES (311577, 44, 1); +INSERT INTO cart_join VALUES (311577, 52, 1); +INSERT INTO cart_join VALUES (7165089, 0, 1); +INSERT INTO cart_join VALUES (7165089, 45, 3); + + +SELECT + segmented_ctr_cache.product_id, + segmented_ctr_cache.segment_id, + count_in_bookmark, + count_in_cart +FROM segmented_ctr_cache +LEFT JOIN cart_join ON + cart_join.product_id = segmented_ctr_cache.product_id + AND cart_join.segment_id = segmented_ctr_cache.segment_id +LEFT JOIN bookmarks_join ON + bookmarks_join.product_id = segmented_ctr_cache.product_id + AND bookmarks_join.segment_id = segmented_ctr_cache.segment_id +ORDER BY ALL; + +DROP TABLE segmented_ctr_cache; +DROP TABLE bookmarks_join; +DROP TABLE cart_join; diff --git a/parser/testdata/03447_window_functions_distinct/ast.json b/parser/testdata/03447_window_functions_distinct/ast.json new file mode 100644 index 000000000..21134416f --- /dev/null +++ b/parser/testdata/03447_window_functions_distinct/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'Single Numeric Data:'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001108309, + "rows_read": 5, + "bytes_read": 191 + } +} diff --git a/parser/testdata/03447_window_functions_distinct/metadata.json b/parser/testdata/03447_window_functions_distinct/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03447_window_functions_distinct/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03447_window_functions_distinct/query.sql b/parser/testdata/03447_window_functions_distinct/query.sql new file mode 100644 index 000000000..d54eeee54 --- /dev/null +++ b/parser/testdata/03447_window_functions_distinct/query.sql @@ -0,0 +1,61 @@ +SELECT 'Single Numeric Data:'; +SELECT number, sum(DISTINCT number) OVER () FROM numbers(0, 5) ORDER BY number; +SELECT number, sum(DISTINCT number) OVER (ORDER BY number) FROM numbers(0, 5) ORDER BY number; + +WITH intHash64(number) % 10 AS x +SELECT + x, + sumDistinct(x) OVER (ORDER BY number ASC ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS a, + sumDistinct(x) OVER (ORDER BY number ASC ROWS BETWEEN CURRENT ROW AND UNBOUNDED FOLLOWING) AS b +FROM numbers(10); + +SELECT 'Single Generic Plain Column:'; +SELECT number, s, min(DISTINCT s) OVER(PARTITION BY number % 2) AS min_varlen_string +FROM +( + SELECT number, toString(number % 5) AS s + FROM numbers(11) +) ORDER BY number; + +SELECT 'Single Generic Non-Plain Column:'; +SELECT arr, min(DISTINCT arr) OVER (ORDER BY arr) +FROM +( + SELECT [toString(number), toString(number+1)] AS arr + FROM numbers(10) +) ORDER BY arr; + +SELECT 'Multiple Generic Columns:'; +SELECT s, ts, argMax(DISTINCT ts, s) OVER(PARTITION BY ts % 2) AS value_with_max_ts +FROM +( + SELECT number AS ts, toString(number % 5) AS s + FROM numbers(11) +) ORDER BY ts; + +SELECT 'Aggregate State Checks:'; +WITH + arrayReduce('sumDistinctState', [1 + number, 2, 3]) AS a, + arrayReduce('sumDistinctState', [2 + number, 3, 4]) AS b +SELECT + finalizeAggregation(a), + finalizeAggregation(b), + finalizeAggregation(a + b), + finalizeAggregation(a * 2), + finalizeAggregation(b * 2) +FROM numbers(10); + +WITH + arrayReduce('sumDistinctState', [1, 2, 3]) AS a, + arrayReduce('sumDistinctState', [2, 3, 4]) AS b, + arrayReduce('sumDistinctState', [3, 4, 5]) AS c +SELECT + finalizeAggregation(a), + finalizeAggregation(b), + finalizeAggregation(c), + finalizeAggregation(a + b), + finalizeAggregation(a + c), + finalizeAggregation(b + c), + finalizeAggregation((a + b) + c), + finalizeAggregation((b + c) + a), + finalizeAggregation((c + a) + b); diff --git a/parser/testdata/03448_analyzer_array_join_alias_in_join_using_bug/ast.json b/parser/testdata/03448_analyzer_array_join_alias_in_join_using_bug/ast.json new file mode 100644 index 000000000..8cb6a6c37 --- /dev/null +++ b/parser/testdata/03448_analyzer_array_join_alias_in_join_using_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery local_table (children 1)" + }, + { + "explain": " Identifier local_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001045169, + "rows_read": 2, + "bytes_read": 75 + } +} diff --git a/parser/testdata/03448_analyzer_array_join_alias_in_join_using_bug/metadata.json b/parser/testdata/03448_analyzer_array_join_alias_in_join_using_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03448_analyzer_array_join_alias_in_join_using_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03448_analyzer_array_join_alias_in_join_using_bug/query.sql b/parser/testdata/03448_analyzer_array_join_alias_in_join_using_bug/query.sql new file mode 100644 index 000000000..06829d394 --- /dev/null +++ b/parser/testdata/03448_analyzer_array_join_alias_in_join_using_bug/query.sql @@ -0,0 +1,45 @@ +CREATE TABLE local_table +( + id Int8, + `arr` Array(UInt8) +) +ENGINE = MergeTree +ORDER BY id; + +insert into local_table select 42, [0, 1, 2]; + +-- { echoOn } + +SELECT arr +FROM remote('127.0.0.2', currentDatabase(), local_table) r +ARRAY JOIN arr AS dummy +INNER JOIN system.one AS foo USING (dummy); + +SELECT arr +FROM remote('127.0.0.{1,2}', currentDatabase(), local_table) r +ARRAY JOIN arr AS dummy +INNER JOIN system.one AS foo USING (dummy); + +SELECT arr +FROM remote('127.0.0.2', currentDatabase(), local_table) r +ARRAY JOIN arr AS arr_item +INNER JOIN (SELECT 1 as arr_item) AS foo USING (arr_item); + +SELECT arr +FROM remote('127.0.0.{1,2}', currentDatabase(), local_table) r +ARRAY JOIN arr AS arr_item +INNER JOIN (SELECT 1 as arr_item) AS foo USING (arr_item); + +SELECT arr, arr_item +FROM remote('127.0.0.2', currentDatabase(), local_table) r +ARRAY JOIN arr AS arr_item +INNER JOIN (SELECT 1 + number as arr_item from numbers(2)) AS foo USING (arr_item); + +SELECT arr, arr_item +FROM remote('127.0.0.{1,2}', currentDatabase(), local_table) r +ARRAY JOIN arr AS arr_item +INNER JOIN (SELECT 1 + number as arr_item from numbers(2)) AS foo USING (arr_item); + +-- Fuzzed + +SELECT arr FROM remote('127.0.0.2', currentDatabase(), local_table) AS r ARRAY JOIN arr AS arr_item GLOBAL RIGHT JOIN (SELECT 1 AS arr_item) AS foo USING (arr_item); diff --git a/parser/testdata/03448_analyzer_correlated_subquery_in_projection/ast.json b/parser/testdata/03448_analyzer_correlated_subquery_in_projection/ast.json new file mode 100644 index 000000000..6f71b1ea5 --- /dev/null +++ b/parser/testdata/03448_analyzer_correlated_subquery_in_projection/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00134335, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03448_analyzer_correlated_subquery_in_projection/metadata.json b/parser/testdata/03448_analyzer_correlated_subquery_in_projection/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03448_analyzer_correlated_subquery_in_projection/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03448_analyzer_correlated_subquery_in_projection/query.sql b/parser/testdata/03448_analyzer_correlated_subquery_in_projection/query.sql new file mode 100644 index 000000000..57cc976c9 --- /dev/null +++ b/parser/testdata/03448_analyzer_correlated_subquery_in_projection/query.sql @@ -0,0 +1,42 @@ +set enable_analyzer = 1; +set allow_experimental_correlated_subqueries = 1; + +SELECT (SELECT count() FROM system.one WHERE number = 2) FROM numbers(2); + +SELECT (SELECT count() FROM system.one WHERE number = 2) FROM numbers(2) GROUP BY number % 2; -- { serverError NOT_IMPLEMENTED } + +CREATE TABLE A +( + id UInt32 +) +ENGINE = Memory(); + +INSERT INTO A SELECT number + 1 as id FROM numbers(19); + +CREATE TABLE B +( + id UInt32, + A_ids Array(UInt32) +) +ENGINE = Memory(); + +INSERT INTO B (id, A_ids) VALUES +(101, [1, 3, 5]), +(102, [2, 4]), +(103, [1, 7, 9, 11]), +(104, [6, 8, 10]), +(105, [3, 12, 15]), +(106, [16, 18, 20]); + + +-- The Query using Subqueries +SELECT + A.id AS a_id, + ( + SELECT groupArraySorted(5)(B.id) + FROM B + WHERE has(B.A_ids, A.id) + ) AS b_ids_containing_a_id +FROM A +ORDER BY a_id +LIMIT 20; diff --git a/parser/testdata/03448_analyzer_skip_index_and_lambdas/ast.json b/parser/testdata/03448_analyzer_skip_index_and_lambdas/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03448_analyzer_skip_index_and_lambdas/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03448_analyzer_skip_index_and_lambdas/metadata.json b/parser/testdata/03448_analyzer_skip_index_and_lambdas/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03448_analyzer_skip_index_and_lambdas/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03448_analyzer_skip_index_and_lambdas/query.sql b/parser/testdata/03448_analyzer_skip_index_and_lambdas/query.sql new file mode 100644 index 000000000..bccbb7fa3 --- /dev/null +++ b/parser/testdata/03448_analyzer_skip_index_and_lambdas/query.sql @@ -0,0 +1,52 @@ +-- Tags: no-random-merge-tree-settings, no-random-settings, no-parallel-replicas + +DROP TABLE IF EXISTS index_test; +CREATE TABLE index_test +( + id UInt32, + arr Array(String), + INDEX array_index arrayMap(x -> lower(x), arr) TYPE bloom_filter(0.01) GRANULARITY 1, + INDEX array_index_2 arrayMap((x, y) -> concat(lower(x), y), arr, arr) TYPE bloom_filter(0.01) GRANULARITY 1, + INDEX array_index_3 arrayMap((x, y) -> concat(lower(x), y, '_', toString(id)), arr, arr) TYPE bloom_filter(0.01) GRANULARITY 1 +) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS allow_suspicious_indices = 1, index_granularity = 4; + +insert into index_test select number, arrayMap(x -> 'A_' || toString(x) , range(number)) from numbers(16); + +-- { echo On } + +EXPLAIN indexes = 1, description=0 +SELECT arr +FROM index_test +WHERE has(arrayMap(x -> lower(x), arr), lower('a_12')) +SETTINGS enable_analyzer = 1; + +SELECT arr +FROM index_test +WHERE has(arrayMap(x -> lower(x), arr), lower('a_12')) +SETTINGS enable_analyzer = 1; + + +EXPLAIN indexes = 1, description=0 +SELECT arr +FROM index_test +WHERE has(arrayMap((x, y) -> concat(lower(x), y), arr, arr), 'a_12A_12') +SETTINGS enable_analyzer = 1; + +SELECT arr +FROM index_test +WHERE has(arrayMap((x, y) -> concat(lower(x), y), arr, arr), 'a_12A_12') +SETTINGS enable_analyzer = 1; + +EXPLAIN indexes = 1, description=0 +SELECT arr +FROM index_test +WHERE has(arrayMap((x, y) -> concat(lower(x), y, '_', toString(id)), arr, arr), 'a_12A_12_13') +SETTINGS enable_analyzer = 1; + +SELECT arr +FROM index_test +WHERE has(arrayMap((x, y) -> concat(lower(x), y, '_', toString(id)), arr, arr), 'a_12A_12_13') +SETTINGS enable_analyzer = 1; diff --git a/parser/testdata/03448_in_select_tuple/ast.json b/parser/testdata/03448_in_select_tuple/ast.json new file mode 100644 index 000000000..7615a235d --- /dev/null +++ b/parser/testdata/03448_in_select_tuple/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery table1 (children 1)" + }, + { + "explain": " Identifier table1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001250758, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/03448_in_select_tuple/metadata.json b/parser/testdata/03448_in_select_tuple/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03448_in_select_tuple/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03448_in_select_tuple/query.sql b/parser/testdata/03448_in_select_tuple/query.sql new file mode 100644 index 000000000..54dad8a3b --- /dev/null +++ b/parser/testdata/03448_in_select_tuple/query.sql @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS table1; +DROP TABLE IF EXISTS table2; + +CREATE TABLE table1 +( + `id1` UInt64, + `id2` UInt8, +) +ENGINE = MergeTree ORDER BY (id1) +AS SELECT 1, 1; + +CREATE TABLE table2 (id1 UInt64, id2 UInt8) ENGINE = Memory as select 1, 1; + +select * from table1 where (id1, id2) in (select tuple(id1, id2) from table2); + +DROP TABLE table1; +DROP TABLE table2; diff --git a/parser/testdata/03448_topk_merging/ast.json b/parser/testdata/03448_topk_merging/ast.json new file mode 100644 index 000000000..9d0ff8d36 --- /dev/null +++ b/parser/testdata/03448_topk_merging/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery topk_test (children 1)" + }, + { + "explain": " Identifier topk_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001277237, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/03448_topk_merging/metadata.json b/parser/testdata/03448_topk_merging/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03448_topk_merging/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03448_topk_merging/query.sql b/parser/testdata/03448_topk_merging/query.sql new file mode 100644 index 000000000..416f2bcd2 --- /dev/null +++ b/parser/testdata/03448_topk_merging/query.sql @@ -0,0 +1,66 @@ +DROP TABLE IF EXISTS topk_test; + +CREATE TABLE topk_test ( + foo UInt64, + top_items AggregateFunction(topKWeighted(100, 3, 'counts'), String, UInt64) +) +ENGINE = AggregatingMergeTree() +ORDER BY (foo); + +INSERT INTO topk_test +SELECT + 6 AS foo, + topKWeightedState(100, 3, 'counts')(item, toUInt64(10)) +FROM ( + SELECT arrayJoin(['a','a','a','b','b','b','c','c','c','d','d','e','e']) AS item +); + +INSERT INTO topk_test +SELECT + 8 AS foo, + topKWeightedState(100, 3, 'counts')(item, toUInt64(10)) +FROM ( + SELECT arrayJoin(['i','i','i','j','j','j','k','k','k','d','d','e','e']) AS item +); + +INSERT INTO topk_test +SELECT + 9 AS foo, + topKWeightedState(100, 3, 'counts')(item, toUInt64(10)) +FROM ( + SELECT arrayJoin(['l','l','l','m','m','m','n','n','n','d','d','e','e']) AS item +); + +INSERT INTO topk_test +SELECT + 10 AS foo, + topKWeightedState(100, 3, 'counts')(item, toUInt64(10)) +FROM ( + SELECT arrayJoin(['z','z','z','w','w','w','y','y','y','d','d','e','e']) AS item +); + +INSERT INTO topk_test +SELECT + 11 AS foo, + topKWeightedState(100, 3, 'counts')(item, toUInt64(10)) +FROM ( + SELECT arrayJoin(['i','i','i','j','j','j','k','k','k','d','d','e','e']) AS item +); + +SELECT * FROM ( + SELECT + foo, + untuple(arrayJoin(topKWeightedMerge(100, 3, 'counts')(top_items))) AS top + FROM topk_test + GROUP BY foo +) +ORDER BY foo, top.count DESC, top.item; + +SELECT * FROM ( + SELECT + untuple(arrayJoin(topKWeightedMerge(100, 3, 'counts')(top_items))) AS top + FROM topk_test FINAL +) +ORDER BY top.count DESC, top.item; + +DROP TABLE topk_test; diff --git a/parser/testdata/03448_trivial_count_single_threaded_merge/ast.json b/parser/testdata/03448_trivial_count_single_threaded_merge/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03448_trivial_count_single_threaded_merge/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03448_trivial_count_single_threaded_merge/metadata.json b/parser/testdata/03448_trivial_count_single_threaded_merge/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03448_trivial_count_single_threaded_merge/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03448_trivial_count_single_threaded_merge/query.sql b/parser/testdata/03448_trivial_count_single_threaded_merge/query.sql new file mode 100644 index 000000000..0e3e159cc --- /dev/null +++ b/parser/testdata/03448_trivial_count_single_threaded_merge/query.sql @@ -0,0 +1,22 @@ +-- Tags: no-object-storage +-- no-object-storage since the output of the pipeline depends on the read method + +SET enable_analyzer = 1; +SET max_threads=4; + +DROP TABLE IF EXISTS trivial_count; +CREATE TABLE trivial_count ENGINE = MergeTree() ORDER BY number AS Select * from numbers(10) ; + +-- { echo On } +-- We should use just a single thread to merge the state of trivial count +EXPLAIN PIPELINE SELECT count() FROM trivial_count; + +-- But not if we are filtering or doing other operations (no trivial count) +EXPLAIN PIPELINE SELECT count() FROM trivial_count WHERE number % 3 = 2; +EXPLAIN PIPELINE SELECT count() FROM trivial_count GROUP BY number % 10; + +-- Other aggregations should still use as many threads as necessary +EXPLAIN PIPELINE SELECT sum(number) FROM trivial_count; +EXPLAIN PIPELINE SELECT count(), sum(number) FROM trivial_count; + +DROP TABLE IF EXISTS trivial_count; \ No newline at end of file diff --git a/parser/testdata/03448_window_functions_distinct_distributed/ast.json b/parser/testdata/03448_window_functions_distinct_distributed/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03448_window_functions_distinct_distributed/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03448_window_functions_distinct_distributed/metadata.json b/parser/testdata/03448_window_functions_distinct_distributed/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03448_window_functions_distinct_distributed/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03448_window_functions_distinct_distributed/query.sql b/parser/testdata/03448_window_functions_distinct_distributed/query.sql new file mode 100644 index 000000000..cab3ca071 --- /dev/null +++ b/parser/testdata/03448_window_functions_distinct_distributed/query.sql @@ -0,0 +1,73 @@ +-- Tags: distributed + +SET distributed_aggregation_memory_efficient = 1; + +SELECT any(total) AS total_distinct_avg +FROM ( + SELECT number, + avgDistinct(number) OVER () AS total + FROM remote('127.0.0.{1,2,3}', numbers_mt(100_000)) +); + +SELECT max(running_avg) AS final_running_avg +FROM ( + SELECT number, + avgDistinct(number) OVER (ORDER BY number) AS running_avg + FROM remote('127.0.0.{1,2,3}', numbers_mt(100_000)) +); + +SELECT + arraySort(groupUniqArray((partition_mod, max_val))) AS max_strings_per_partition +FROM ( + SELECT + number % 10 AS partition_mod, + maxDistinct(toString(number % 5)) OVER (PARTITION BY number % 7) AS max_val + FROM remote('127.0.0.{1,2,3}', numbers_mt(100_000)) +); + +SELECT + arraySort(groupUniqArray((partition_mod, argMin_val))) AS argmin_per_partition +FROM ( + SELECT + number AS ts, + ts % 10 AS partition_mod, + toString(ts % 5) AS s, + argMinDistinct(ts, s) OVER (PARTITION BY ts % 3 ORDER BY ts) AS argMin_val + FROM remote('127.0.0.{1,2,3}', numbers_mt(100_000)) ORDER BY ts +); + +SET distributed_aggregation_memory_efficient = 0; + +SELECT any(total) AS total_distinct_avg +FROM ( + SELECT number, + avgDistinct(number) OVER () AS total + FROM remote('127.0.0.{1,2,3}', numbers_mt(100_000)) +); + +SELECT max(running_avg) AS final_running_avg +FROM ( + SELECT number, + avgDistinct(number) OVER (ORDER BY number) AS running_avg + FROM remote('127.0.0.{1,2,3}', numbers_mt(100_000)) +); + +SELECT + arraySort(groupUniqArray((partition_mod, max_val))) AS max_strings_per_partition +FROM ( + SELECT + number % 10 AS partition_mod, + maxDistinct(toString(number % 5)) OVER (PARTITION BY number % 7) AS max_val + FROM remote('127.0.0.{1,2,3}', numbers_mt(100_000)) +); + +SELECT + arraySort(groupUniqArray((partition_mod, argMin_val))) AS argmin_per_partition +FROM ( + SELECT + number AS ts, + ts % 10 AS partition_mod, + toString(ts % 5) AS s, + argMinDistinct(ts, s) OVER (PARTITION BY ts % 3 ORDER BY ts) AS argMin_val + FROM remote('127.0.0.{1,2,3}', numbers_mt(100_000)) ORDER BY ts +); diff --git a/parser/testdata/03449_join_using_allow_alias/ast.json b/parser/testdata/03449_join_using_allow_alias/ast.json new file mode 100644 index 000000000..7e4ac561e --- /dev/null +++ b/parser/testdata/03449_join_using_allow_alias/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001380307, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03449_join_using_allow_alias/metadata.json b/parser/testdata/03449_join_using_allow_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03449_join_using_allow_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03449_join_using_allow_alias/query.sql b/parser/testdata/03449_join_using_allow_alias/query.sql new file mode 100644 index 000000000..5e4a3fe5d --- /dev/null +++ b/parser/testdata/03449_join_using_allow_alias/query.sql @@ -0,0 +1,9 @@ +set enable_analyzer=1; + +-- { echo On } + +select * from numbers(1) l inner join system.one r using (number as dummy); +select * from system.one l inner join numbers(1) r using (dummy as number); + +select * from numbers(2) l inner join (select number + 1 as dummy from numbers(1)) r using (number as dummy); +select * from (select number + 1 as dummy from numbers(1)) l inner join numbers(2) r using (dummy as number); diff --git a/parser/testdata/03449_window_cannot_find_column/ast.json b/parser/testdata/03449_window_cannot_find_column/ast.json new file mode 100644 index 000000000..909cd64f3 --- /dev/null +++ b/parser/testdata/03449_window_cannot_find_column/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t0 (children 1)" + }, + { + "explain": " Identifier t0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001372042, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03449_window_cannot_find_column/metadata.json b/parser/testdata/03449_window_cannot_find_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03449_window_cannot_find_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03449_window_cannot_find_column/query.sql b/parser/testdata/03449_window_cannot_find_column/query.sql new file mode 100644 index 000000000..141f9f6b6 --- /dev/null +++ b/parser/testdata/03449_window_cannot_find_column/query.sql @@ -0,0 +1,24 @@ +DROP TABLE IF EXISTS t0; +DROP TABLE IF EXISTS t3; + +create table t0 (vkey UInt32, primary key(vkey)) engine = MergeTree; +create view t3 as +select distinct + ref_0.vkey as c_2_c16_0 + from + t0 as ref_0; +insert into t0 values (4); + +select 'It was a bug-triggering query:'; +with cte_4 as (select + rank() over w0 as c_2_c2398_0 + from + t3 as ref_15 + window w0 as (partition by ref_15.c_2_c16_0 order by ref_15.c_2_c16_0 desc)) +select distinct + ref_39.c_2_c2398_0 as c_9_c2479_0 + from + cte_4 as ref_39; + +DROP TABLE t3; +DROP TABLE t0; diff --git a/parser/testdata/03450_parameterized_view_forward/ast.json b/parser/testdata/03450_parameterized_view_forward/ast.json new file mode 100644 index 000000000..603601c90 --- /dev/null +++ b/parser/testdata/03450_parameterized_view_forward/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " TableIdentifier inner_view" + }, + { + "explain": " TableIdentifier outer_view_hardcoded_ok" + }, + { + "explain": " TableIdentifier outer_view_parameterized_ko" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001495239, + "rows_read": 5, + "bytes_read": 206 + } +} diff --git a/parser/testdata/03450_parameterized_view_forward/metadata.json b/parser/testdata/03450_parameterized_view_forward/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03450_parameterized_view_forward/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03450_parameterized_view_forward/query.sql b/parser/testdata/03450_parameterized_view_forward/query.sql new file mode 100644 index 000000000..bdf060180 --- /dev/null +++ b/parser/testdata/03450_parameterized_view_forward/query.sql @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS inner_view, outer_view_hardcoded_ok, outer_view_parameterized_ko; + +CREATE VIEW inner_view AS + SELECT {inner_a:Int32} + {inner_b:Int32} `n`; + +CREATE VIEW outer_view_hardcoded_ok AS + SELECT n * 2 `c` + FROM inner_view(inner_a=1, inner_b=2); + +SELECT * FROM outer_view_hardcoded_ok; + +CREATE VIEW outer_view_parameterized_ko AS +SELECT n * 2 `c` +FROM inner_view(inner_a={a:Int32}, inner_b={b:Int32}); + +SELECT * FROM outer_view_parameterized_ko(a=1, b=2); + +DROP TABLE inner_view, outer_view_hardcoded_ok, outer_view_parameterized_ko; diff --git a/parser/testdata/03451_parameterized_views_without_alias/ast.json b/parser/testdata/03451_parameterized_views_without_alias/ast.json new file mode 100644 index 000000000..858f65407 --- /dev/null +++ b/parser/testdata/03451_parameterized_views_without_alias/ast.json @@ -0,0 +1,34 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " TableIdentifier parameterized_view_without_renaming" + }, + { + "explain": " TableIdentifier parameterized_view_with_renaming" + } + ], + + "rows": 4, + + "statistics": + { + "elapsed": 0.001470612, + "rows_read": 4, + "bytes_read": 187 + } +} diff --git a/parser/testdata/03451_parameterized_views_without_alias/metadata.json b/parser/testdata/03451_parameterized_views_without_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03451_parameterized_views_without_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03451_parameterized_views_without_alias/query.sql b/parser/testdata/03451_parameterized_views_without_alias/query.sql new file mode 100644 index 000000000..07ccdb020 --- /dev/null +++ b/parser/testdata/03451_parameterized_views_without_alias/query.sql @@ -0,0 +1,11 @@ +DROP TABLE IF EXISTS parameterized_view_without_renaming, parameterized_view_with_renaming; + +CREATE VIEW parameterized_view_without_renaming AS +SELECT {test:Int32} * 2; +SELECT * FROM parameterized_view_without_renaming(test=42); + +CREATE VIEW parameterized_view_with_renaming AS +SELECT {test:Int32} * 2 `result`; +SELECT * FROM parameterized_view_with_renaming(test=42); + +DROP TABLE parameterized_view_without_renaming, parameterized_view_with_renaming; diff --git a/parser/testdata/03453_group_by_all_grouping/ast.json b/parser/testdata/03453_group_by_all_grouping/ast.json new file mode 100644 index 000000000..567996fea --- /dev/null +++ b/parser/testdata/03453_group_by_all_grouping/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001238396, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03453_group_by_all_grouping/metadata.json b/parser/testdata/03453_group_by_all_grouping/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03453_group_by_all_grouping/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03453_group_by_all_grouping/query.sql b/parser/testdata/03453_group_by_all_grouping/query.sql new file mode 100644 index 000000000..8294ef52a --- /dev/null +++ b/parser/testdata/03453_group_by_all_grouping/query.sql @@ -0,0 +1,34 @@ +SET allow_experimental_analyzer = 1; + +-- Basic functionality with GROUPING and GROUP BY ALL WITH ROLLUP +SELECT l.number, sum(r.number), grouping(l.number) +FROM numbers(1) l JOIN numbers(2) r ON l.number < r.number +GROUP BY ALL WITH ROLLUP; + +-- Multiple GROUPING functions +SELECT l.number, r.number % 3 AS mod3, sum(r.number), + grouping(l.number), grouping(mod3), grouping(l.number, mod3) +FROM numbers(1) l JOIN numbers(2) r ON l.number < r.number +GROUP BY ALL WITH ROLLUP; + +-- GROUPING with CUBE +SELECT l.number, r.number % 3 AS mod3, sum(r.number), + grouping(l.number), grouping(mod3) +FROM numbers(1) l JOIN numbers(2) r ON l.number < r.number +GROUP BY ALL WITH CUBE; + +-- Mix of regular columns and expressions with GROUPING +SELECT + l.number, + l.number % 2 AS parity, + sum(r.number), + max(r.number), + grouping(l.number), + grouping(parity) +FROM numbers(1) l JOIN numbers(2) r ON l.number < r.number +GROUP BY ALL WITH ROLLUP; + +-- Verify error is still thrown when GROUPING is explicitly in GROUP BY +SELECT l.number, sum(r.number), grouping(l.number) as g +FROM numbers(1) l JOIN numbers(2) r ON l.number < r.number +GROUP BY l.number, g WITH ROLLUP; -- { serverError ILLEGAL_AGGREGATION } diff --git a/parser/testdata/03453_parameterized_view_array_of_points/ast.json b/parser/testdata/03453_parameterized_view_array_of_points/ast.json new file mode 100644 index 000000000..fa0c46277 --- /dev/null +++ b/parser/testdata/03453_parameterized_view_array_of_points/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " TableIdentifier point_test" + }, + { + "explain": " TableIdentifier point_test_parameterized" + }, + { + "explain": " TableIdentifier point_test_parameterized2" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001599201, + "rows_read": 5, + "bytes_read": 205 + } +} diff --git a/parser/testdata/03453_parameterized_view_array_of_points/metadata.json b/parser/testdata/03453_parameterized_view_array_of_points/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03453_parameterized_view_array_of_points/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03453_parameterized_view_array_of_points/query.sql b/parser/testdata/03453_parameterized_view_array_of_points/query.sql new file mode 100644 index 000000000..87a112141 --- /dev/null +++ b/parser/testdata/03453_parameterized_view_array_of_points/query.sql @@ -0,0 +1,28 @@ +DROP TABLE IF EXISTS point_test, point_test_parameterized, point_test_parameterized2; + +CREATE TABLE point_test +( + `name` String, + `coord` Point +) +ENGINE = Memory; + +INSERT INTO point_test FORMAT Values ('one', (0.12,46.45)), ('two', (0,0)), ('three',(1,0)), ('four', (0,1)); + +CREATE VIEW point_test_parameterized AS +SELECT * +FROM point_test +WHERE coord = {point:Point}; + +SELECT * +FROM point_test_parameterized(point = (0, 0)); + +CREATE VIEW point_test_parameterized2 AS +SELECT * +FROM point_test +WHERE coord IN {point:Array(Point)}; + +SELECT * +FROM point_test_parameterized2(point = [(0, 0)]); + +DROP TABLE point_test, point_test_parameterized, point_test_parameterized2; diff --git a/parser/testdata/03454_global_join_index_subqueries/ast.json b/parser/testdata/03454_global_join_index_subqueries/ast.json new file mode 100644 index 000000000..a87d3da57 --- /dev/null +++ b/parser/testdata/03454_global_join_index_subqueries/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'query1'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001106949, + "rows_read": 5, + "bytes_read": 177 + } +} diff --git a/parser/testdata/03454_global_join_index_subqueries/metadata.json b/parser/testdata/03454_global_join_index_subqueries/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03454_global_join_index_subqueries/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03454_global_join_index_subqueries/query.sql b/parser/testdata/03454_global_join_index_subqueries/query.sql new file mode 100644 index 000000000..63c52276f --- /dev/null +++ b/parser/testdata/03454_global_join_index_subqueries/query.sql @@ -0,0 +1,13 @@ +select 'query1'; + +SELECT * +FROM cluster(test_cluster_two_shards, system.one) AS A +GLOBAL INNER JOIN (select * from cluster(test_cluster_two_shards, system.one)) AS B ON A.dummy = B.dummy; + +select 'query2'; + +set use_index_for_in_with_subqueries = 0; + +SELECT * +FROM cluster(test_cluster_two_shards, system.one) AS A +GLOBAL INNER JOIN (select * from cluster(test_cluster_two_shards, system.one)) AS B ON A.dummy = B.dummy; diff --git a/parser/testdata/03454_parameterized_view_constant_identifier/ast.json b/parser/testdata/03454_parameterized_view_constant_identifier/ast.json new file mode 100644 index 000000000..42945d508 --- /dev/null +++ b/parser/testdata/03454_parameterized_view_constant_identifier/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery c (children 1)" + }, + { + "explain": " Identifier c" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001117992, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/03454_parameterized_view_constant_identifier/metadata.json b/parser/testdata/03454_parameterized_view_constant_identifier/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03454_parameterized_view_constant_identifier/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03454_parameterized_view_constant_identifier/query.sql b/parser/testdata/03454_parameterized_view_constant_identifier/query.sql new file mode 100644 index 000000000..2cc9bc284 --- /dev/null +++ b/parser/testdata/03454_parameterized_view_constant_identifier/query.sql @@ -0,0 +1,8 @@ +DROP TABLE IF EXISTS c; +create view c as select 3 as result where {a:Int16} = 0; + +SET enable_analyzer = 1; +select 1, result from c(a=0); +select 2, result from c(a=3); + +DROP TABLE c; diff --git a/parser/testdata/03454_parameterized_views_null/ast.json b/parser/testdata/03454_parameterized_views_null/ast.json new file mode 100644 index 000000000..4073a756a --- /dev/null +++ b/parser/testdata/03454_parameterized_views_null/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001201492, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03454_parameterized_views_null/metadata.json b/parser/testdata/03454_parameterized_views_null/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03454_parameterized_views_null/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03454_parameterized_views_null/query.sql b/parser/testdata/03454_parameterized_views_null/query.sql new file mode 100644 index 000000000..ff0be5342 --- /dev/null +++ b/parser/testdata/03454_parameterized_views_null/query.sql @@ -0,0 +1,6 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS test_table; +create view test_table as select * from system.one where dummy={param:Nullable(Int64)} IS NULL; +select * from test_table(param=NULL); +DROP TABLE test_table; diff --git a/parser/testdata/03455_direct_io_read_array_values/ast.json b/parser/testdata/03455_direct_io_read_array_values/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03455_direct_io_read_array_values/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03455_direct_io_read_array_values/metadata.json b/parser/testdata/03455_direct_io_read_array_values/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03455_direct_io_read_array_values/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03455_direct_io_read_array_values/query.sql b/parser/testdata/03455_direct_io_read_array_values/query.sql new file mode 100644 index 000000000..a10cec27b --- /dev/null +++ b/parser/testdata/03455_direct_io_read_array_values/query.sql @@ -0,0 +1,19 @@ +-- Tags: long + +set max_threads = 3; +set min_bytes_to_use_direct_io = 1; + +drop table if exists test; +create table test (id UInt64, json JSON(max_dynamic_paths=4)) engine=MergeTree order by id settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1, index_granularity_bytes=28437532, merge_max_block_size=3520, index_granularity=26762; + +system stop merges test; +insert into test select number, toJSONString(map('a', number)) from numbers(100000); +insert into test select number, toJSONString(map('b', arrayMap(x -> map('c', x), range(number % 5 + 1)))) from numbers(100000); +insert into test select number, toJSONString(map('b', arrayMap(x -> map('d', x), range(number % 5 + 1)))) from numbers(50000); + +system start merges test; +optimize table test final; + +select * from (select distinct json.b[] from test) order by all; + +drop table test; diff --git a/parser/testdata/03456_match_index_prefix_extraction/ast.json b/parser/testdata/03456_match_index_prefix_extraction/ast.json new file mode 100644 index 000000000..ec32646ea --- /dev/null +++ b/parser/testdata/03456_match_index_prefix_extraction/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001249799, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03456_match_index_prefix_extraction/metadata.json b/parser/testdata/03456_match_index_prefix_extraction/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03456_match_index_prefix_extraction/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03456_match_index_prefix_extraction/query.sql b/parser/testdata/03456_match_index_prefix_extraction/query.sql new file mode 100644 index 000000000..56eeb8bdb --- /dev/null +++ b/parser/testdata/03456_match_index_prefix_extraction/query.sql @@ -0,0 +1,111 @@ +SET parallel_replicas_local_plan=1; + +drop table if exists foo; + +CREATE TABLE foo (id UInt8, path String) engine = MergeTree ORDER BY (path) SETTINGS index_granularity=1; + +INSERT INTO foo VALUES (1, 'xxx|yyy'), +(2, 'xxx(zzz'), +(3, 'xxx)zzz'), +(4, 'xxx^zzz'), +(5, 'xxx$zzz'), +(6, 'xxx.zzz'), +(7, 'xxx[zzz'), +(8, 'xxx]zzz'), +(9, 'xxx?zzz'), +(10, 'xxx*zzz'), +(11, 'xxx+zzz'), +(12, 'xxx\\zzz'), +(13, 'xxx{zzz'), +(14, 'xxx}zzz'), +(15, 'xxx-zzz'); + + +-- check if also escaped sequence are properly extracted +SELECT trimLeft(explain) FROM (EXPLAIN PLAN indexes=1 SELECT id FROM foo WHERE match(path, '^xxx\\(zzz')) WHERE explain like '%Condition%'; +SELECT count() FROM foo WHERE match(path, '^xxx\\(zzz') SETTINGS force_primary_key = 1; + +SELECT trimLeft(explain) FROM (EXPLAIN PLAN indexes=1 SELECT id FROM foo WHERE match(path, '^xxx\\)zzz')) WHERE explain like '%Condition%'; +SELECT count() FROM foo WHERE match(path, '^xxx\\)zzz') SETTINGS force_primary_key = 1; + +SELECT trimLeft(explain) FROM (EXPLAIN PLAN indexes=1 SELECT id FROM foo WHERE match(path, '^xxx\\^zzz')) WHERE explain like '%Condition%'; +SELECT count() FROM foo WHERE match(path, '^xxx\\^zzz') SETTINGS force_primary_key = 1; + +SELECT trimLeft(explain) FROM (EXPLAIN PLAN indexes=1 SELECT id FROM foo WHERE match(path, '^xxx\\$zzz')) WHERE explain like '%Condition%'; +SELECT count() FROM foo WHERE match(path, '^xxx\\$zzz') SETTINGS force_primary_key = 1; + +SELECT trimLeft(explain) FROM (EXPLAIN PLAN indexes=1 SELECT id FROM foo WHERE match(path, '^xxx\\.zzz')) WHERE explain like '%Condition%'; +SELECT count() FROM foo WHERE match(path, '^xxx\\.zzz') SETTINGS force_primary_key = 1; + +SELECT trimLeft(explain) FROM (EXPLAIN PLAN indexes=1 SELECT id FROM foo WHERE match(path, '^xxx\\[zzz')) WHERE explain like '%Condition%'; +SELECT count() FROM foo WHERE match(path, '^xxx\\[zzz') SETTINGS force_primary_key = 1; + +SELECT trimLeft(explain) FROM (EXPLAIN PLAN indexes=1 SELECT id FROM foo WHERE match(path, '^xxx\\]zzz')) WHERE explain like '%Condition%'; +SELECT count() FROM foo WHERE match(path, '^xxx\\]zzz') SETTINGS force_primary_key = 1; + +SELECT trimLeft(explain) FROM (EXPLAIN PLAN indexes=1 SELECT id FROM foo WHERE match(path, '^xxx\\?zzz')) WHERE explain like '%Condition%'; +SELECT count() FROM foo WHERE match(path, '^xxx\\?zzz') SETTINGS force_primary_key = 1; + +SELECT trimLeft(explain) FROM (EXPLAIN PLAN indexes=1 SELECT id FROM foo WHERE match(path, '^xxx\\*zzz')) WHERE explain like '%Condition%'; +SELECT count() FROM foo WHERE match(path, '^xxx\\*zzz') SETTINGS force_primary_key = 1; + +SELECT trimLeft(explain) FROM (EXPLAIN PLAN indexes=1 SELECT id FROM foo WHERE match(path, '^xxx\\+zzz')) WHERE explain like '%Condition%'; +SELECT count() FROM foo WHERE match(path, '^xxx\\+zzz') SETTINGS force_primary_key = 1; + +SELECT trimLeft(explain) FROM (EXPLAIN PLAN indexes=1 SELECT id FROM foo WHERE match(path, '^xxx\\\\zzz')) WHERE explain like '%Condition%'; +SELECT count() FROM foo WHERE match(path, '^xxx\\\\zzz') SETTINGS force_primary_key = 1; + +SELECT trimLeft(explain) FROM (EXPLAIN PLAN indexes=1 SELECT id FROM foo WHERE match(path, '^xxx\\{zzz')) WHERE explain like '%Condition%'; +SELECT count() FROM foo WHERE match(path, '^xxx\\{zzz') SETTINGS force_primary_key = 1; + +SELECT trimLeft(explain) FROM (EXPLAIN PLAN indexes=1 SELECT id FROM foo WHERE match(path, '^xxx\\}zzz')) WHERE explain like '%Condition%'; +SELECT count() FROM foo WHERE match(path, '^xxx\\}zzz') SETTINGS force_primary_key = 1; + +SELECT trimLeft(explain) FROM (EXPLAIN PLAN indexes=1 SELECT id FROM foo WHERE match(path, '^xxx\\-zzz')) WHERE explain like '%Condition%'; +SELECT count() FROM foo WHERE match(path, '^xxx\\-zzz') SETTINGS force_primary_key = 1; + + +-- those regex chars prevent the index use (only 3 first chars used during index scan) +SELECT trimLeft(explain) FROM (EXPLAIN PLAN indexes=1 SELECT id FROM foo WHERE match(path, '^xxx\0bla')) WHERE explain like '%Condition%'; +SELECT count() FROM foo WHERE match(path, '^xxx\0bla') SETTINGS force_primary_key = 1; + +SELECT trimLeft(explain) FROM (EXPLAIN PLAN indexes=1 SELECT id FROM foo WHERE match(path, '^xxx(bla)')) WHERE explain like '%Condition%'; +SELECT count() FROM foo WHERE match(path, '^xxx(bla)') SETTINGS force_primary_key = 1; + +SELECT trimLeft(explain) FROM (EXPLAIN PLAN indexes=1 SELECT id FROM foo WHERE match(path, '^xxx[bla]')) WHERE explain like '%Condition%'; +SELECT count() FROM foo WHERE match(path, '^xxx[bla]') SETTINGS force_primary_key = 1; + +SELECT trimLeft(explain) FROM (EXPLAIN PLAN indexes=1 SELECT id FROM foo WHERE match(path, '^xxx^bla')) WHERE explain like '%Condition%'; +SELECT count() FROM foo WHERE match(path, '^xxx^bla') SETTINGS force_primary_key = 1; + +SELECT trimLeft(explain) FROM (EXPLAIN PLAN indexes=1 SELECT id FROM foo WHERE match(path, '^xxx.bla')) WHERE explain like '%Condition%'; +SELECT count() FROM foo WHERE match(path, '^xxx.bla') SETTINGS force_primary_key = 1; + +SELECT trimLeft(explain) FROM (EXPLAIN PLAN indexes=1 SELECT id FROM foo WHERE match(path, '^xxx+bla')) WHERE explain like '%Condition%'; +SELECT count() FROM foo WHERE match(path, '^xxx+bla') SETTINGS force_primary_key = 1; + + +-- here the forth char is not used during index, because it has 0+ quantifier +SELECT trimLeft(explain) FROM (EXPLAIN PLAN indexes=1 SELECT id FROM foo WHERE match(path, '^xxxx{0,1}')) WHERE explain like '%Condition%'; +SELECT count() FROM foo WHERE match(path, '^xxxx{0,1}') SETTINGS force_primary_key = 1; + +SELECT trimLeft(explain) FROM (EXPLAIN PLAN indexes=1 SELECT id FROM foo WHERE match(path, '^xxxx?')) WHERE explain like '%Condition%'; +SELECT count() FROM foo WHERE match(path, '^xxxx?') SETTINGS force_primary_key = 1; + +SELECT trimLeft(explain) FROM (EXPLAIN PLAN indexes=1 SELECT id FROM foo WHERE match(path, '^xxxx*')) WHERE explain like '%Condition%'; +SELECT count() FROM foo WHERE match(path, '^xxxx*') SETTINGS force_primary_key = 1; + +-- some unsupported regex chars - only 3 first chars used during index scan +SELECT trimLeft(explain) FROM (EXPLAIN PLAN indexes=1 SELECT id FROM foo WHERE match(path, '^xxx\d+')) WHERE explain like '%Condition%'; +SELECT count() FROM foo WHERE match(path, '^xxx\d+') SETTINGS force_primary_key = 1; + +SELECT trimLeft(explain) FROM (EXPLAIN PLAN indexes=1 SELECT id FROM foo WHERE match(path, '^xxx\w+')) WHERE explain like '%Condition%'; +SELECT count() FROM foo WHERE match(path, '^xxx\w+') SETTINGS force_primary_key = 1; + + +-- fully disabled for pipes - see https://github.com/ClickHouse/ClickHouse/pull/54696 +SELECT trimLeft(explain) FROM (EXPLAIN PLAN indexes=1 SELECT id FROM foo WHERE match(path, '^xxx\\|zzz')) WHERE explain like '%Condition%'; +SELECT count() FROM foo WHERE match(path, '^xxx\\|zzz') SETTINGS force_primary_key = 1; -- { serverError INDEX_NOT_USED } + +SELECT trimLeft(explain) FROM (EXPLAIN PLAN indexes=1 SELECT id FROM foo WHERE match(path, '^xxxx|foo')) WHERE explain like '%Condition%'; +SELECT count() FROM foo WHERE match(path, '^xxxx|foo') SETTINGS force_primary_key = 1; -- { serverError INDEX_NOT_USED } diff --git a/parser/testdata/03457_bitmapContains_nullable/ast.json b/parser/testdata/03457_bitmapContains_nullable/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03457_bitmapContains_nullable/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03457_bitmapContains_nullable/metadata.json b/parser/testdata/03457_bitmapContains_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03457_bitmapContains_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03457_bitmapContains_nullable/query.sql b/parser/testdata/03457_bitmapContains_nullable/query.sql new file mode 100644 index 000000000..fe5aae2d8 --- /dev/null +++ b/parser/testdata/03457_bitmapContains_nullable/query.sql @@ -0,0 +1,36 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/19311 +WITH (SELECT groupBitmapState(number::Nullable(UInt8)) as n from numbers(1)) as n SELECT number as x, bitmapContains(n, x) FROM numbers(10); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +WITH + men AS + ( + SELECT + toUInt32(number) AS id, + mod(id, 100) AS age, + mod(id, 1000) * 60 AS sal, + mod(id, 60) AS nat + FROM system.numbers + LIMIT 10 + ), + t AS + ( + SELECT + number AS n, + multiIf(n = 0, 0, n = 1, 6, n = 2, 30, NULL) AS lo, + multiIf(n = 0, 5, n = 1, 29, n = 2, 99, NULL) AS hi + FROM system.numbers + LIMIT 3 + ), + t2 AS + ( + SELECT + toString(n) AS name, + groupBitmapState(multiIf((age >= lo) AND (age <= hi), id, NULL)) AS bit_state + FROM men, t + GROUP BY n + ) +SELECT + name, + sumIf(sal, bitmapContains(bit_state, id)) +FROM men, t2 +GROUP BY name; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } diff --git a/parser/testdata/03457_bug79403_marks_compress_block_is_zero/ast.json b/parser/testdata/03457_bug79403_marks_compress_block_is_zero/ast.json new file mode 100644 index 000000000..141c2b6b3 --- /dev/null +++ b/parser/testdata/03457_bug79403_marks_compress_block_is_zero/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery t0 (children 3)" + }, + { + "explain": " Identifier t0" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration c0 (children 1)" + }, + { + "explain": " DataType Int" + }, + { + "explain": " Storage definition (children 3)" + }, + { + "explain": " Function MergeTree (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Set" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001214046, + "rows_read": 12, + "bytes_read": 383 + } +} diff --git a/parser/testdata/03457_bug79403_marks_compress_block_is_zero/metadata.json b/parser/testdata/03457_bug79403_marks_compress_block_is_zero/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03457_bug79403_marks_compress_block_is_zero/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03457_bug79403_marks_compress_block_is_zero/query.sql b/parser/testdata/03457_bug79403_marks_compress_block_is_zero/query.sql new file mode 100644 index 000000000..78bcc26e4 --- /dev/null +++ b/parser/testdata/03457_bug79403_marks_compress_block_is_zero/query.sql @@ -0,0 +1,2 @@ +CREATE TABLE t0 (c0 Int) ENGINE = MergeTree() ORDER BY tuple() SETTINGS marks_compress_block_size = 0; -- { serverError BAD_ARGUMENTS } +CREATE TABLE t0 (c0 Int) ENGINE = MergeTree() ORDER BY tuple() SETTINGS primary_key_compress_block_size = 0; -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/03457_inconsistent_formatting_except/ast.json b/parser/testdata/03457_inconsistent_formatting_except/ast.json new file mode 100644 index 000000000..bda012992 --- /dev/null +++ b/parser/testdata/03457_inconsistent_formatting_except/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001068877, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03457_inconsistent_formatting_except/metadata.json b/parser/testdata/03457_inconsistent_formatting_except/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03457_inconsistent_formatting_except/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03457_inconsistent_formatting_except/query.sql b/parser/testdata/03457_inconsistent_formatting_except/query.sql new file mode 100644 index 000000000..9bc0d8aef --- /dev/null +++ b/parser/testdata/03457_inconsistent_formatting_except/query.sql @@ -0,0 +1,8 @@ +SET enable_analyzer = 1; +-- { echo } + +SELECT (*) EXCEPT SELECT 1; +(SELECT *) EXCEPT SELECT 1; +SELECT * + 1 EXCEPT SELECT 1; +(SELECT * EXCEPT a) EXCEPT SELECT 1; +SELECT * FROM (SELECT 1 UNION ALL SELECT 2 EXCEPT SELECT 3 UNION ALL SELECT (*) EXCEPT SELECT 4) ORDER BY *; diff --git a/parser/testdata/03457_merge_engine_subcolumns/ast.json b/parser/testdata/03457_merge_engine_subcolumns/ast.json new file mode 100644 index 000000000..9601c87c1 --- /dev/null +++ b/parser/testdata/03457_merge_engine_subcolumns/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test1 (children 1)" + }, + { + "explain": " Identifier test1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001359179, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/03457_merge_engine_subcolumns/metadata.json b/parser/testdata/03457_merge_engine_subcolumns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03457_merge_engine_subcolumns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03457_merge_engine_subcolumns/query.sql b/parser/testdata/03457_merge_engine_subcolumns/query.sql new file mode 100644 index 000000000..2a8e59972 --- /dev/null +++ b/parser/testdata/03457_merge_engine_subcolumns/query.sql @@ -0,0 +1,36 @@ +drop table if exists test1; +drop table if exists test2; +drop table if exists test_merge; + +create table test1 (x UInt64, t Tuple(a UInt32, b UInt32), y String) engine=Memory; +create table test2 (x UInt64, t Tuple(a UInt32, b UInt32), y String) engine=Memory; +create table test_merge (x UInt64, t Tuple(a UInt32, b UInt32), y String) engine=Merge(currentDatabase(), 'test'); + +insert into test1 select 1, tuple(2, 3), 's1'; +insert into test2 select 4, tuple(5, 6), 's2'; + +set allow_suspicious_types_in_order_by=1; + +select * from test_merge order by all; +select t.a from test_merge order by all; +select t.b from test_merge order by all; +select x, t.a from test_merge order by all; +select y, t.a from test_merge order by all; +select t.a, t.b from test_merge order by all; +select x, t.a, t.b from test_merge order by all; +select y, t.a, t.b from test_merge order by all; +select x, t.a, t.b, y from test_merge order by all; + +drop table test_merge; +drop table test1; +drop table test2; + +drop table if exists test; +create table test (json JSON) engine=Memory; +create table test_merge (json JSON) engine=Merge(currentDatabase(), 'test'); +insert into test values ('{"a" : {"b" : 42, "g" : 42.42}, "c" : [1, 2, 3], "d" : "2020-01-01"}'), ('{"f" : "Hello, World!", "d" : "2020-01-02"}'), ('{"a" : {"b" : 43, "e" : 10, "g" : 43.43}, "c" : [4, 5, 6]}'); + +select json.a.b, json.a.g, json.c, json.d from test_merge; +drop table test_merge; +drop table test; + diff --git a/parser/testdata/03457_move_global_in_to_prewhere/ast.json b/parser/testdata/03457_move_global_in_to_prewhere/ast.json new file mode 100644 index 000000000..11c2ad4d2 --- /dev/null +++ b/parser/testdata/03457_move_global_in_to_prewhere/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery 03457_data (children 1)" + }, + { + "explain": " Identifier 03457_data" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001119391, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/03457_move_global_in_to_prewhere/metadata.json b/parser/testdata/03457_move_global_in_to_prewhere/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03457_move_global_in_to_prewhere/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03457_move_global_in_to_prewhere/query.sql b/parser/testdata/03457_move_global_in_to_prewhere/query.sql new file mode 100644 index 000000000..83f195122 --- /dev/null +++ b/parser/testdata/03457_move_global_in_to_prewhere/query.sql @@ -0,0 +1,65 @@ +DROP TABLE IF EXISTS 03457_data; +DROP TABLE IF EXISTS 03457_filter; + +SET parallel_replicas_local_plan = 1; + +CREATE TABLE 03457_filter (key UInt64) ENGINE = Memory +AS +SELECT 3 UNION ALL SELECT 23; + +CREATE TABLE 03457_data (key UInt64, val String) ENGINE = MergeTree ORDER BY key +AS +SELECT number, randomString(2048) FROM numbers(100); + +SELECT key, length(val) FROM ( + SELECT * FROM 03457_data WHERE key GLOBAL IN (03457_filter) +) +ORDER BY key; + +SELECT replaceRegexpAll(trim(explain), '__table\d\.|__set_\d+_\d+|_subquery\d+|03457_filter', '') +FROM ( + EXPLAIN actions = 1 + SELECT * FROM 03457_data WHERE key GLOBAL IN (03457_filter) +) +WHERE explain LIKE '%Prewhere filter column: globalIn%'; + +SELECT key, length(val) FROM ( + SELECT * FROM 03457_data WHERE key GLOBAL IN (SELECT * FROM 03457_filter WHERE key = 3) +) +ORDER BY key; + +SELECT replaceRegexpAll(trim(explain), '__table\d\.|__set_\d+_\d+|_subquery\d+|03457_filter', '') +FROM ( + EXPLAIN actions = 1 + SELECT * FROM 03457_data WHERE key GLOBAL IN (SELECT * FROM 03457_filter WHERE key = 3) +) +WHERE explain LIKE '%Prewhere filter column: globalIn%'; + +SELECT key, length(val) FROM ( + SELECT * FROM 03457_data WHERE key GLOBAL NOT IN (03457_filter) +) +ORDER BY key +LIMIT 5; + +SELECT replaceRegexpAll(trim(explain), '__table\d\.|__set_\d+_\d+|_subquery\d+|03457_filter', '') +FROM ( + EXPLAIN actions = 1 + SELECT * FROM 03457_data WHERE key GLOBAL NOT IN (03457_filter) +) +WHERE explain LIKE '%Prewhere filter column: globalNotIn%'; + +SELECT key, length(val) FROM ( + SELECT * FROM 03457_data WHERE key GLOBAL NOT IN (SELECT * FROM 03457_filter WHERE key = 3) +) +ORDER BY key +LIMIT 5; + +SELECT replaceRegexpAll(trim(explain), '__table\d\.|__set_\d+_\d+|_subquery\d+|03457_filter', '') +FROM ( + EXPLAIN actions = 1 + SELECT * FROM 03457_data WHERE key GLOBAL NOT IN (SELECT * FROM 03457_filter WHERE key = 3) +) +WHERE explain LIKE '%Prewhere filter column: globalNotIn%'; + +DROP TABLE 03457_data; +DROP TABLE 03457_filter; diff --git a/parser/testdata/03457_numeric_indexed_vector_build/ast.json b/parser/testdata/03457_numeric_indexed_vector_build/ast.json new file mode 100644 index 000000000..71aba4baa --- /dev/null +++ b/parser/testdata/03457_numeric_indexed_vector_build/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'TEST groupNumericIndexedVector'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.000981492, + "rows_read": 5, + "bytes_read": 201 + } +} diff --git a/parser/testdata/03457_numeric_indexed_vector_build/metadata.json b/parser/testdata/03457_numeric_indexed_vector_build/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03457_numeric_indexed_vector_build/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03457_numeric_indexed_vector_build/query.sql b/parser/testdata/03457_numeric_indexed_vector_build/query.sql new file mode 100644 index 000000000..87dcad9b5 --- /dev/null +++ b/parser/testdata/03457_numeric_indexed_vector_build/query.sql @@ -0,0 +1,42 @@ +select 'TEST groupNumericIndexedVector'; + +DROP TABLE IF EXISTS uin_value_details; +CREATE TABLE uin_value_details +( + ds Date, + uin UInt32, + value UInt64 +) +ENGINE = MergeTree() +ORDER BY ds; + +INSERT INTO uin_value_details (ds, uin, value) values ('2023-12-26', 105, 5), ('2023-12-26', 104, 4), ('2023-12-26', 103, 3); +INSERT INTO uin_value_details (ds, uin, value) values ('2023-12-27', 10000001, 7), ('2023-12-27', 10000002, 3); + +select numericIndexedVectorShortDebugString(groupNumericIndexedVectorState(uin, value)) from uin_value_details; +select groupNumericIndexedVector(uin, value) from uin_value_details; +select numericIndexedVectorAllValueSum(groupNumericIndexedVectorState(uin, value)) from uin_value_details; +select numericIndexedVectorCardinality(groupNumericIndexedVectorState(uin, value)) from uin_value_details; + +select numericIndexedVectorShortDebugString(groupNumericIndexedVectorStateIf(uin, value, ds = '2023-12-26')) from uin_value_details; +select numericIndexedVectorShortDebugString(groupNumericIndexedVectorStateIf(uin, value, ds = '2023-12-27')) from uin_value_details; + +select numericIndexedVectorCardinality(groupNumericIndexedVectorState('BSI', 64, 0)(uin, value)) from uin_value_details; +select numericIndexedVectorCardinality(groupNumericIndexedVectorState('BSI', 32, 0)(uin, value)) from uin_value_details; +select numericIndexedVectorCardinality(groupNumericIndexedVectorState('BSI', 16, 0)(uin, value)) from uin_value_details; +select numericIndexedVectorCardinality(groupNumericIndexedVectorState('BSI', 32, 14)(uin, value)) from uin_value_details; -- { serverError BAD_ARGUMENTS } +select numericIndexedVectorCardinality(groupNumericIndexedVectorState('RawSum', 32, 14)(uin, value)) from uin_value_details; -- { serverError BAD_ARGUMENTS } +select numericIndexedVectorCardinality(groupNumericIndexedVectorState('BSI', 64, 14)(uin, value)) from uin_value_details; -- { serverError BAD_ARGUMENTS } + +with + numericIndexedVectorBuild(mapFromArrays([1, 2, 3], [10, 20, 30])) AS res1, + numericIndexedVectorBuild(mapFromArrays(arrayMap(x -> toUInt32(x), [1, 2, 3]), [10.5, 20.3, 30.892])) AS res2 +select tuple( + numericIndexedVectorAllValueSum(res1), + numericIndexedVectorCardinality(res1), + toTypeName(res1), + numericIndexedVectorAllValueSum(res2), + numericIndexedVectorCardinality(res2), + toTypeName(res2) +); + diff --git a/parser/testdata/03458_numeric_indexed_vector_operations_bit_promote/ast.json b/parser/testdata/03458_numeric_indexed_vector_operations_bit_promote/ast.json new file mode 100644 index 000000000..0dbf156ad --- /dev/null +++ b/parser/testdata/03458_numeric_indexed_vector_operations_bit_promote/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'TEST numericIndexedVectorPointwise operations in bit promotion with zero values and Float64 value type'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001123749, + "rows_read": 5, + "bytes_read": 273 + } +} diff --git a/parser/testdata/03458_numeric_indexed_vector_operations_bit_promote/metadata.json b/parser/testdata/03458_numeric_indexed_vector_operations_bit_promote/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03458_numeric_indexed_vector_operations_bit_promote/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03458_numeric_indexed_vector_operations_bit_promote/query.sql b/parser/testdata/03458_numeric_indexed_vector_operations_bit_promote/query.sql new file mode 100644 index 000000000..59ff1444d --- /dev/null +++ b/parser/testdata/03458_numeric_indexed_vector_operations_bit_promote/query.sql @@ -0,0 +1,215 @@ +select 'TEST numericIndexedVectorPointwise operations in bit promotion with zero values and Float64 value type'; +DROP TABLE IF EXISTS uin_value_details; +CREATE TABLE uin_value_details +( + ds Date, + uin UInt32, + value Float64 +) +ENGINE = MergeTree() +ORDER BY ds; +INSERT INTO uin_value_details (ds, uin, value) values ('2023-12-26', 10000001, 7.3), ('2023-12-26', 10000002, 8.3), ('2023-12-26', 10000003, 0), ('2023-12-26', 10000004, 0), ('2023-12-26', 20000005, 0), ('2023-12-26', 30000005, 100.6543782), ('2023-12-26', 50000005, 0); +INSERT INTO uin_value_details (ds, uin, value) values ('2023-12-27', 10000001, 7.3), ('2023-12-27', 10000002, -8.3), ('2023-12-27', 10000003, 30.5), ('2023-12-27', 10000004, -3.384), ('2023-12-27', 20000005, 0), ('2023-12-27', 40000005, 100.66666666), ('2023-12-27', 60000005, 0); + +with +( + select groupNumericIndexedVectorStateIf('BSI', 32, 24)(uin, value, ds = '2023-12-26') + from uin_value_details +) as vec_1, +( + select groupNumericIndexedVectorStateIf('BSI', 24, 24)(uin, value, ds = '2023-12-27') + from uin_value_details +) as vec_2 +select arrayJoin([ + numericIndexedVectorToMap(vec_1) + , numericIndexedVectorToMap(vec_2) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseAdd(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseAdd(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseAdd(vec_1, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseAdd(vec_2, 0)) +]); + +with +( + select groupNumericIndexedVectorStateIf('BSI', 32, 15)(uin, value, ds = '2023-12-26') + from uin_value_details +) as vec_1, +( + select groupNumericIndexedVectorStateIf('BSI', 24, 23)(uin, value, ds = '2023-12-27') + from uin_value_details +) as vec_2 +select arrayJoin([ + numericIndexedVectorToMap(vec_1) + , numericIndexedVectorToMap(vec_2) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseAdd(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseSubtract(vec_1, vec_2)) +]); + +with +( + select groupNumericIndexedVectorStateIf('BSI', 32, 15)(uin, value, ds = '2023-12-26') + from uin_value_details +) as vec_1, +( + select groupNumericIndexedVectorStateIf('BSI', 24, 23)(uin, value, ds = '2023-12-27') + from uin_value_details +) as vec_2 +select arrayJoin([ + numericIndexedVectorShortDebugString(vec_1) + , numericIndexedVectorShortDebugString(vec_2) + , numericIndexedVectorShortDebugString(numericIndexedVectorPointwiseAdd(vec_1, vec_2)) + , numericIndexedVectorShortDebugString(numericIndexedVectorPointwiseSubtract(vec_1, vec_2)) +]); + +with +( + select groupNumericIndexedVectorStateIf('BSI', 32, 0)(uin, value, ds = '2023-12-26') + from uin_value_details +) as vec_1, +( + select groupNumericIndexedVectorStateIf('BSI', 24, 0)(uin, value, ds = '2023-12-27') + from uin_value_details +) as vec_2 +select arrayJoin([ + numericIndexedVectorToMap(vec_1) + , numericIndexedVectorToMap(vec_2) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseEqual(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseEqual(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseEqual(vec_1, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseNotEqual(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseNotEqual(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseNotEqual(vec_2, 0)) +]); +with +( + select groupNumericIndexedVectorStateIf('BSI', 32, 0)(uin, value, ds = '2023-12-26') + from uin_value_details +) as vec_1, +( + select groupNumericIndexedVectorStateIf('BSI', 24, 0)(uin, value, ds = '2023-12-27') + from uin_value_details +) as vec_2 +select arrayJoin([ + numericIndexedVectorShortDebugString(vec_1) + , numericIndexedVectorShortDebugString(vec_2) + , numericIndexedVectorShortDebugString(numericIndexedVectorPointwiseEqual(vec_1, vec_2)) + , numericIndexedVectorShortDebugString(numericIndexedVectorPointwiseEqual(vec_1, 2)) + , numericIndexedVectorShortDebugString(numericIndexedVectorPointwiseEqual(vec_1, 0)) + , numericIndexedVectorShortDebugString(numericIndexedVectorPointwiseNotEqual(vec_1, vec_2)) + , numericIndexedVectorShortDebugString(numericIndexedVectorPointwiseNotEqual(vec_1, 2)) + , numericIndexedVectorShortDebugString(numericIndexedVectorPointwiseNotEqual(vec_2, 0)) +]); + +with +( + select groupNumericIndexedVectorStateIf('BSI', 32, 0)(uin, value, ds = '2023-12-26') + from uin_value_details +) as vec_1, +( + select groupNumericIndexedVectorStateIf('BSI', 24, 0)(uin, value, ds = '2023-12-27') + from uin_value_details +) as vec_2 +select arrayJoin([ + numericIndexedVectorToMap(vec_1) + , numericIndexedVectorToMap(vec_2) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_2, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLessEqual(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLessEqual(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLessEqual(vec_2, 0)) +]); + +with +( + select groupNumericIndexedVectorStateIf('BSI', 32, 0)(uin, value, ds = '2023-12-26') + from uin_value_details +) as vec_1, +( + select groupNumericIndexedVectorStateIf('BSI', 24, 0)(uin, value, ds = '2023-12-27') + from uin_value_details +) as vec_2 +select arrayJoin([ + numericIndexedVectorToMap(vec_1) + , numericIndexedVectorToMap(vec_2) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreater(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreater(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreater(vec_2, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreaterEqual(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreaterEqual(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreaterEqual(vec_2, 0)) +]); + +with +( + select groupNumericIndexedVectorStateIf('BSI', 32, 15)(uin, value, ds = '2023-12-26') + from uin_value_details +) as vec_1, +( + select groupNumericIndexedVectorStateIf('BSI', 24, 0)(uin, value, ds = '2023-12-27') + from uin_value_details +) as vec_2 +select arrayJoin([ + numericIndexedVectorToMap(vec_1) + , numericIndexedVectorToMap(vec_2) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseMultiply(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseMultiply(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseMultiply(vec_1, -2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseMultiply(vec_2, 8)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseMultiply(vec_2, 0)) +]); + +with +( + select groupNumericIndexedVectorStateIf('BSI', 32, 0)(uin, value, ds = '2023-12-26') + from uin_value_details +) as vec_1, +( + select groupNumericIndexedVectorStateIf('BSI', 24, 0)(uin, value, ds = '2023-12-27') + from uin_value_details +) as vec_2 +select arrayJoin([ + numericIndexedVectorToMap(vec_1) + , numericIndexedVectorToMap(vec_2) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseDivide(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseDivide(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseDivide(vec_2, 0)) +]); + +with +( + select groupNumericIndexedVectorStateIf('BSI', 32, 0)(uin, value, ds = '2023-12-26') + from uin_value_details +) as vec_1, +( + select groupNumericIndexedVectorStateIf('BSI', 24, 0)(uin, value, ds = '2023-12-27') + from uin_value_details +) as vec_2 +select arrayJoin([ + numericIndexedVectorShortDebugString(vec_1) + , numericIndexedVectorShortDebugString(vec_2) + , numericIndexedVectorShortDebugString(numericIndexedVectorPointwiseDivide(vec_1, vec_2)) + , numericIndexedVectorShortDebugString(numericIndexedVectorPointwiseDivide(vec_1, 2)) + , numericIndexedVectorShortDebugString(numericIndexedVectorPointwiseDivide(vec_2, 0)) +]); + +with +( + select groupNumericIndexedVectorStateIf('BSI', 0, 0)(uin, value, ds = '2023-12-28') + from uin_value_details +) as vec_1, +( + select groupNumericIndexedVectorStateIf('BSI', 24, 24)(uin, value, ds = '2023-12-27') + from uin_value_details +) as vec_2 +select arrayJoin([ + numericIndexedVectorShortDebugString(vec_1) + , numericIndexedVectorShortDebugString(vec_2) + , numericIndexedVectorShortDebugString(numericIndexedVectorPointwiseLess(vec_1, vec_2)) + , numericIndexedVectorShortDebugString(numericIndexedVectorPointwiseAdd(vec_1, 2)) + , numericIndexedVectorShortDebugString(numericIndexedVectorPointwiseAdd(vec_1, 0)) + , numericIndexedVectorShortDebugString(numericIndexedVectorPointwiseAdd(vec_2, 0)) +]); + + +DROP TABLE IF EXISTS uin_value_details; diff --git a/parser/testdata/03458_numeric_indexed_vector_operations_i8f64/ast.json b/parser/testdata/03458_numeric_indexed_vector_operations_i8f64/ast.json new file mode 100644 index 000000000..5447b7a1a --- /dev/null +++ b/parser/testdata/03458_numeric_indexed_vector_operations_i8f64/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'TEST numericIndexedVectorPointwise operations with zero values and UInt8 index type'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001385035, + "rows_read": 5, + "bytes_read": 254 + } +} diff --git a/parser/testdata/03458_numeric_indexed_vector_operations_i8f64/metadata.json b/parser/testdata/03458_numeric_indexed_vector_operations_i8f64/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03458_numeric_indexed_vector_operations_i8f64/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03458_numeric_indexed_vector_operations_i8f64/query.sql b/parser/testdata/03458_numeric_indexed_vector_operations_i8f64/query.sql new file mode 100644 index 000000000..60415286c --- /dev/null +++ b/parser/testdata/03458_numeric_indexed_vector_operations_i8f64/query.sql @@ -0,0 +1,61 @@ +select 'TEST numericIndexedVectorPointwise operations with zero values and UInt8 index type'; +DROP TABLE IF EXISTS uin_value_details; +CREATE TABLE uin_value_details +( + ds Date, + uin UInt8, + value Float64 +) +ENGINE = MergeTree() +ORDER BY ds; +INSERT INTO uin_value_details (ds, uin, value) values ('2023-12-26', 1001, 7.3), ('2023-12-26', 1002, 8.3), ('2023-12-26', 1003, 0), ('2023-12-26', 1004, 0), ('2023-12-26', 2005, 0), ('2023-12-26', 3005, 100.6543782), ('2023-12-26', 5005, 0); +INSERT INTO uin_value_details (ds, uin, value) values ('2023-12-27', 1001, 7.3), ('2023-12-27', 1002, -8.3), ('2023-12-27', 1003, 30.5), ('2023-12-27', 1004, -3.384), ('2023-12-27', 2005, 0), ('2023-12-27', 4005, 100.66666666), ('2023-12-27', 6005, 0); + +with +( + select groupNumericIndexedVectorStateIf(uin, value, ds = '2023-12-26') + from uin_value_details +) as vec_1, +( + select groupNumericIndexedVectorStateIf(uin, value, ds = '2023-12-27') + from uin_value_details +) as vec_2 +select arrayJoin([ + numericIndexedVectorToMap(vec_1) + , numericIndexedVectorToMap(vec_2) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseAdd(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseAdd(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseAdd(vec_1, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseAdd(vec_2, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseSubtract(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseSubtract(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseSubtract(vec_2, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseMultiply(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseMultiply(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseMultiply(vec_1, -2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseMultiply(vec_2, 8)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseMultiply(vec_2, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseDivide(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseDivide(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseDivide(vec_2, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseEqual(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseEqual(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseEqual(vec_1, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseNotEqual(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseNotEqual(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseNotEqual(vec_2, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_2, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLessEqual(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLessEqual(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLessEqual(vec_2, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreater(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreater(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreater(vec_2, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreaterEqual(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreaterEqual(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreaterEqual(vec_2, 0)) +]); + +DROP TABLE IF EXISTS uin_value_details; diff --git a/parser/testdata/03458_numeric_indexed_vector_operations_u16f64/ast.json b/parser/testdata/03458_numeric_indexed_vector_operations_u16f64/ast.json new file mode 100644 index 000000000..5348bb6e9 --- /dev/null +++ b/parser/testdata/03458_numeric_indexed_vector_operations_u16f64/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'TEST numericIndexedVectorPointwise operations with zero values and UInt16 index type'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.00120758, + "rows_read": 5, + "bytes_read": 255 + } +} diff --git a/parser/testdata/03458_numeric_indexed_vector_operations_u16f64/metadata.json b/parser/testdata/03458_numeric_indexed_vector_operations_u16f64/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03458_numeric_indexed_vector_operations_u16f64/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03458_numeric_indexed_vector_operations_u16f64/query.sql b/parser/testdata/03458_numeric_indexed_vector_operations_u16f64/query.sql new file mode 100644 index 000000000..931e54968 --- /dev/null +++ b/parser/testdata/03458_numeric_indexed_vector_operations_u16f64/query.sql @@ -0,0 +1,61 @@ +select 'TEST numericIndexedVectorPointwise operations with zero values and UInt16 index type'; +DROP TABLE IF EXISTS uin_value_details; +CREATE TABLE uin_value_details +( + ds Date, + uin UInt16, + value Float64 +) +ENGINE = MergeTree() +ORDER BY ds; +INSERT INTO uin_value_details (ds, uin, value) values ('2023-12-26', 1001, 7.3), ('2023-12-26', 1002, 8.3), ('2023-12-26', 1003, 0), ('2023-12-26', 1004, 0), ('2023-12-26', 2005, 0), ('2023-12-26', 3005, 100.6543782), ('2023-12-26', 5005, 0); +INSERT INTO uin_value_details (ds, uin, value) values ('2023-12-27', 1001, 7.3), ('2023-12-27', 1002, -8.3), ('2023-12-27', 1003, 30.5), ('2023-12-27', 1004, -3.384), ('2023-12-27', 2005, 0), ('2023-12-27', 4005, 100.66666666), ('2023-12-27', 6005, 0); + +with +( + select groupNumericIndexedVectorStateIf(uin, value, ds = '2023-12-26') + from uin_value_details +) as vec_1, +( + select groupNumericIndexedVectorStateIf(uin, value, ds = '2023-12-27') + from uin_value_details +) as vec_2 +select arrayJoin([ + numericIndexedVectorToMap(vec_1) + , numericIndexedVectorToMap(vec_2) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseAdd(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseAdd(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseAdd(vec_1, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseAdd(vec_2, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseSubtract(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseSubtract(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseSubtract(vec_2, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseMultiply(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseMultiply(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseMultiply(vec_1, -2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseMultiply(vec_2, 8)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseMultiply(vec_2, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseDivide(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseDivide(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseDivide(vec_2, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseEqual(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseEqual(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseEqual(vec_1, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseNotEqual(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseNotEqual(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseNotEqual(vec_2, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_2, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLessEqual(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLessEqual(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLessEqual(vec_2, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreater(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreater(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreater(vec_2, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreaterEqual(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreaterEqual(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreaterEqual(vec_2, 0)) +]); + +DROP TABLE IF EXISTS uin_value_details; diff --git a/parser/testdata/03458_numeric_indexed_vector_operations_u32f32/ast.json b/parser/testdata/03458_numeric_indexed_vector_operations_u32f32/ast.json new file mode 100644 index 000000000..389a9e2c1 --- /dev/null +++ b/parser/testdata/03458_numeric_indexed_vector_operations_u32f32/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'TEST numericIndexedVectorPointwise operations with zero values and Float32 value type'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001583085, + "rows_read": 5, + "bytes_read": 256 + } +} diff --git a/parser/testdata/03458_numeric_indexed_vector_operations_u32f32/metadata.json b/parser/testdata/03458_numeric_indexed_vector_operations_u32f32/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03458_numeric_indexed_vector_operations_u32f32/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03458_numeric_indexed_vector_operations_u32f32/query.sql b/parser/testdata/03458_numeric_indexed_vector_operations_u32f32/query.sql new file mode 100644 index 000000000..00f3a8a4c --- /dev/null +++ b/parser/testdata/03458_numeric_indexed_vector_operations_u32f32/query.sql @@ -0,0 +1,61 @@ +select 'TEST numericIndexedVectorPointwise operations with zero values and Float32 value type'; +DROP TABLE IF EXISTS uin_value_details; +CREATE TABLE uin_value_details +( + ds Date, + uin UInt32, + value Float32 +) +ENGINE = MergeTree() +ORDER BY ds; +INSERT INTO uin_value_details (ds, uin, value) values ('2023-12-26', 10000001, 7.3), ('2023-12-26', 10000002, 8.3), ('2023-12-26', 10000003, 0), ('2023-12-26', 10000004, 0), ('2023-12-26', 20000005, 0), ('2023-12-26', 30000005, 100.6543782), ('2023-12-26', 50000005, 0); +INSERT INTO uin_value_details (ds, uin, value) values ('2023-12-27', 10000001, 7.3), ('2023-12-27', 10000002, -8.3), ('2023-12-27', 10000003, 30.5), ('2023-12-27', 10000004, -3.384), ('2023-12-27', 20000005, 0), ('2023-12-27', 40000005, 100.66666666), ('2023-12-27', 60000005, 0); + +with +( + select groupNumericIndexedVectorStateIf(uin, value, ds = '2023-12-26') + from uin_value_details +) as vec_1, +( + select groupNumericIndexedVectorStateIf(uin, value, ds = '2023-12-27') + from uin_value_details +) as vec_2 +select arrayJoin([ + numericIndexedVectorToMap(vec_1) + , numericIndexedVectorToMap(vec_2) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseAdd(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseAdd(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseAdd(vec_1, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseAdd(vec_2, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseSubtract(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseSubtract(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseSubtract(vec_2, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseMultiply(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseMultiply(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseMultiply(vec_1, -2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseMultiply(vec_2, 8)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseMultiply(vec_2, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseDivide(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseDivide(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseDivide(vec_2, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseEqual(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseEqual(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseEqual(vec_1, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseNotEqual(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseNotEqual(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseNotEqual(vec_2, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_2, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLessEqual(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLessEqual(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLessEqual(vec_2, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreater(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreater(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreater(vec_2, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreaterEqual(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreaterEqual(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreaterEqual(vec_2, 0)) +]); + +DROP TABLE IF EXISTS uin_value_details; diff --git a/parser/testdata/03458_numeric_indexed_vector_operations_u32f64/ast.json b/parser/testdata/03458_numeric_indexed_vector_operations_u32f64/ast.json new file mode 100644 index 000000000..bb6a56b24 --- /dev/null +++ b/parser/testdata/03458_numeric_indexed_vector_operations_u32f64/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'TEST numericIndexedVectorPointwise operations with zero values and Float64 value type'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001317671, + "rows_read": 5, + "bytes_read": 256 + } +} diff --git a/parser/testdata/03458_numeric_indexed_vector_operations_u32f64/metadata.json b/parser/testdata/03458_numeric_indexed_vector_operations_u32f64/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03458_numeric_indexed_vector_operations_u32f64/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03458_numeric_indexed_vector_operations_u32f64/query.sql b/parser/testdata/03458_numeric_indexed_vector_operations_u32f64/query.sql new file mode 100644 index 000000000..7688ce5f0 --- /dev/null +++ b/parser/testdata/03458_numeric_indexed_vector_operations_u32f64/query.sql @@ -0,0 +1,61 @@ +select 'TEST numericIndexedVectorPointwise operations with zero values and Float64 value type'; +DROP TABLE IF EXISTS uin_value_details; +CREATE TABLE uin_value_details +( + ds Date, + uin UInt32, + value Float64 +) +ENGINE = MergeTree() +ORDER BY ds; +INSERT INTO uin_value_details (ds, uin, value) values ('2023-12-26', 10000001, 7.3), ('2023-12-26', 10000002, 8.3), ('2023-12-26', 10000003, 0), ('2023-12-26', 10000004, 0), ('2023-12-26', 20000005, 0), ('2023-12-26', 30000005, 100.6543782), ('2023-12-26', 50000005, 0); +INSERT INTO uin_value_details (ds, uin, value) values ('2023-12-27', 10000001, 7.3), ('2023-12-27', 10000002, -8.3), ('2023-12-27', 10000003, 30.5), ('2023-12-27', 10000004, -3.384), ('2023-12-27', 20000005, 0), ('2023-12-27', 40000005, 100.66666666), ('2023-12-27', 60000005, 0); + +with +( + select groupNumericIndexedVectorStateIf(uin, value, ds = '2023-12-26') + from uin_value_details +) as vec_1, +( + select groupNumericIndexedVectorStateIf(uin, value, ds = '2023-12-27') + from uin_value_details +) as vec_2 +select arrayJoin([ + numericIndexedVectorToMap(vec_1) + , numericIndexedVectorToMap(vec_2) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseAdd(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseAdd(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseAdd(vec_1, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseAdd(vec_2, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseSubtract(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseSubtract(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseSubtract(vec_2, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseMultiply(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseMultiply(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseMultiply(vec_1, -2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseMultiply(vec_2, 8)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseMultiply(vec_2, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseDivide(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseDivide(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseDivide(vec_2, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseEqual(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseEqual(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseEqual(vec_1, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseNotEqual(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseNotEqual(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseNotEqual(vec_2, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_2, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLessEqual(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLessEqual(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLessEqual(vec_2, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreater(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreater(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreater(vec_2, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreaterEqual(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreaterEqual(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreaterEqual(vec_2, 0)) +]); + +DROP TABLE IF EXISTS uin_value_details; diff --git a/parser/testdata/03458_numeric_indexed_vector_operations_u32i16/ast.json b/parser/testdata/03458_numeric_indexed_vector_operations_u32i16/ast.json new file mode 100644 index 000000000..bc3c198b1 --- /dev/null +++ b/parser/testdata/03458_numeric_indexed_vector_operations_u32i16/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'TEST numericIndexedVectorPointwise operations with zero values and Int16 value type'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001264816, + "rows_read": 5, + "bytes_read": 254 + } +} diff --git a/parser/testdata/03458_numeric_indexed_vector_operations_u32i16/metadata.json b/parser/testdata/03458_numeric_indexed_vector_operations_u32i16/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03458_numeric_indexed_vector_operations_u32i16/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03458_numeric_indexed_vector_operations_u32i16/query.sql b/parser/testdata/03458_numeric_indexed_vector_operations_u32i16/query.sql new file mode 100644 index 000000000..0c1af832b --- /dev/null +++ b/parser/testdata/03458_numeric_indexed_vector_operations_u32i16/query.sql @@ -0,0 +1,56 @@ +select 'TEST numericIndexedVectorPointwise operations with zero values and Int16 value type'; +DROP TABLE IF EXISTS uin_value_details; +CREATE TABLE uin_value_details +( + ds Date, + uin UInt32, + value Int16 +) +ENGINE = MergeTree() +ORDER BY ds; +INSERT INTO uin_value_details (ds, uin, value) values ('2023-12-26', 10000001, 7), ('2023-12-26', 10000002, 8), ('2023-12-26', 10000003, 0), ('2023-12-26', 10000004, 0), ('2023-12-26', 20000005, 0), ('2023-12-26', 30000005, 100), ('2023-12-26', 50000005, 0); +INSERT INTO uin_value_details (ds, uin, value) values ('2023-12-27', 10000001, 7), ('2023-12-27', 10000002, -8), ('2023-12-27', 10000003, 30), ('2023-12-27', 10000004, -3), ('2023-12-27', 20000005, 0), ('2023-12-27', 40000005, 100), ('2023-12-27', 60000005, 0); + +with +( + select groupNumericIndexedVectorStateIf(uin, value, ds = '2023-12-26') + from uin_value_details +) as vec_1, +( + select groupNumericIndexedVectorStateIf(uin, value, ds = '2023-12-27') + from uin_value_details +) as vec_2 +select arrayJoin([ + numericIndexedVectorToMap(vec_1) + , numericIndexedVectorToMap(vec_2) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseAdd(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseAdd(vec_1, -7)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseAdd(vec_1, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseSubtract(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseSubtract(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseMultiply(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseMultiply(vec_1, -8)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseDivide(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseDivide(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseDivide(vec_1, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseEqual(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseEqual(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseNotEqual(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseNotEqual(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseNotEqual(vec_1, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_2, -5)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_1, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_2, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLessEqual(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLessEqual(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_1, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_2, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreater(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreater(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreaterEqual(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreaterEqual(vec_1, 2)) +]); + +DROP TABLE IF EXISTS uin_value_details; diff --git a/parser/testdata/03458_numeric_indexed_vector_operations_u32i32/ast.json b/parser/testdata/03458_numeric_indexed_vector_operations_u32i32/ast.json new file mode 100644 index 000000000..b54ba9fde --- /dev/null +++ b/parser/testdata/03458_numeric_indexed_vector_operations_u32i32/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'TEST numericIndexedVectorPointwise operations with zero values and Int32 value type'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001129976, + "rows_read": 5, + "bytes_read": 254 + } +} diff --git a/parser/testdata/03458_numeric_indexed_vector_operations_u32i32/metadata.json b/parser/testdata/03458_numeric_indexed_vector_operations_u32i32/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03458_numeric_indexed_vector_operations_u32i32/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03458_numeric_indexed_vector_operations_u32i32/query.sql b/parser/testdata/03458_numeric_indexed_vector_operations_u32i32/query.sql new file mode 100644 index 000000000..287e3732b --- /dev/null +++ b/parser/testdata/03458_numeric_indexed_vector_operations_u32i32/query.sql @@ -0,0 +1,56 @@ +select 'TEST numericIndexedVectorPointwise operations with zero values and Int32 value type'; +DROP TABLE IF EXISTS uin_value_details; +CREATE TABLE uin_value_details +( + ds Date, + uin UInt32, + value Int32 +) +ENGINE = MergeTree() +ORDER BY ds; +INSERT INTO uin_value_details (ds, uin, value) values ('2023-12-26', 10000001, 7), ('2023-12-26', 10000002, 8), ('2023-12-26', 10000003, 0), ('2023-12-26', 10000004, 0), ('2023-12-26', 20000005, 0), ('2023-12-26', 30000005, 100), ('2023-12-26', 50000005, 0); +INSERT INTO uin_value_details (ds, uin, value) values ('2023-12-27', 10000001, 7), ('2023-12-27', 10000002, -8), ('2023-12-27', 10000003, 30), ('2023-12-27', 10000004, -3), ('2023-12-27', 20000005, 0), ('2023-12-27', 40000005, 100), ('2023-12-27', 60000005, 0); + +with +( + select groupNumericIndexedVectorStateIf(uin, value, ds = '2023-12-26') + from uin_value_details +) as vec_1, +( + select groupNumericIndexedVectorStateIf(uin, value, ds = '2023-12-27') + from uin_value_details +) as vec_2 +select arrayJoin([ + numericIndexedVectorToMap(vec_1) + , numericIndexedVectorToMap(vec_2) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseAdd(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseAdd(vec_1, -7)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseAdd(vec_1, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseSubtract(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseSubtract(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseMultiply(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseMultiply(vec_1, -8)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseDivide(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseDivide(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseDivide(vec_1, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseEqual(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseEqual(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseNotEqual(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseNotEqual(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseNotEqual(vec_1, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_2, -5)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_1, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_2, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLessEqual(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLessEqual(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_1, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_2, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreater(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreater(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreaterEqual(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreaterEqual(vec_1, 2)) +]); + +DROP TABLE IF EXISTS uin_value_details; diff --git a/parser/testdata/03458_numeric_indexed_vector_operations_u32i64/ast.json b/parser/testdata/03458_numeric_indexed_vector_operations_u32i64/ast.json new file mode 100644 index 000000000..4703e26f4 --- /dev/null +++ b/parser/testdata/03458_numeric_indexed_vector_operations_u32i64/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'TEST numericIndexedVectorPointwise operations with zero values and Int64 value type'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001216859, + "rows_read": 5, + "bytes_read": 254 + } +} diff --git a/parser/testdata/03458_numeric_indexed_vector_operations_u32i64/metadata.json b/parser/testdata/03458_numeric_indexed_vector_operations_u32i64/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03458_numeric_indexed_vector_operations_u32i64/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03458_numeric_indexed_vector_operations_u32i64/query.sql b/parser/testdata/03458_numeric_indexed_vector_operations_u32i64/query.sql new file mode 100644 index 000000000..c35607b97 --- /dev/null +++ b/parser/testdata/03458_numeric_indexed_vector_operations_u32i64/query.sql @@ -0,0 +1,56 @@ +select 'TEST numericIndexedVectorPointwise operations with zero values and Int64 value type'; +DROP TABLE IF EXISTS uin_value_details; +CREATE TABLE uin_value_details +( + ds Date, + uin UInt32, + value Int64 +) +ENGINE = MergeTree() +ORDER BY ds; +INSERT INTO uin_value_details (ds, uin, value) values ('2023-12-26', 10000001, 7), ('2023-12-26', 10000002, 8), ('2023-12-26', 10000003, 0), ('2023-12-26', 10000004, 0), ('2023-12-26', 20000005, 0), ('2023-12-26', 30000005, 100), ('2023-12-26', 50000005, 0); +INSERT INTO uin_value_details (ds, uin, value) values ('2023-12-27', 10000001, 7), ('2023-12-27', 10000002, -8), ('2023-12-27', 10000003, 30), ('2023-12-27', 10000004, -3), ('2023-12-27', 20000005, 0), ('2023-12-27', 40000005, 100), ('2023-12-27', 60000005, 0); + +with +( + select groupNumericIndexedVectorStateIf(uin, value, ds = '2023-12-26') + from uin_value_details +) as vec_1, +( + select groupNumericIndexedVectorStateIf(uin, value, ds = '2023-12-27') + from uin_value_details +) as vec_2 +select arrayJoin([ + numericIndexedVectorToMap(vec_1) + , numericIndexedVectorToMap(vec_2) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseAdd(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseAdd(vec_1, -7)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseAdd(vec_1, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseSubtract(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseSubtract(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseMultiply(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseMultiply(vec_1, -8)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseDivide(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseDivide(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseDivide(vec_1, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseEqual(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseEqual(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseNotEqual(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseNotEqual(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseNotEqual(vec_1, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_2, -5)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_1, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_2, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLessEqual(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLessEqual(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_1, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_2, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreater(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreater(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreaterEqual(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreaterEqual(vec_1, 2)) +]); + +DROP TABLE IF EXISTS uin_value_details; diff --git a/parser/testdata/03458_numeric_indexed_vector_operations_u32i8/ast.json b/parser/testdata/03458_numeric_indexed_vector_operations_u32i8/ast.json new file mode 100644 index 000000000..f4d693387 --- /dev/null +++ b/parser/testdata/03458_numeric_indexed_vector_operations_u32i8/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'TEST numericIndexedVectorPointwise operations with zero values and Int8 value type'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001290992, + "rows_read": 5, + "bytes_read": 253 + } +} diff --git a/parser/testdata/03458_numeric_indexed_vector_operations_u32i8/metadata.json b/parser/testdata/03458_numeric_indexed_vector_operations_u32i8/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03458_numeric_indexed_vector_operations_u32i8/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03458_numeric_indexed_vector_operations_u32i8/query.sql b/parser/testdata/03458_numeric_indexed_vector_operations_u32i8/query.sql new file mode 100644 index 000000000..8f39c26cb --- /dev/null +++ b/parser/testdata/03458_numeric_indexed_vector_operations_u32i8/query.sql @@ -0,0 +1,56 @@ +select 'TEST numericIndexedVectorPointwise operations with zero values and Int8 value type'; +DROP TABLE IF EXISTS uin_value_details; +CREATE TABLE uin_value_details +( + ds Date, + uin UInt32, + value Int8 +) +ENGINE = MergeTree() +ORDER BY ds; +INSERT INTO uin_value_details (ds, uin, value) values ('2023-12-26', 10000001, 7), ('2023-12-26', 10000002, 8), ('2023-12-26', 10000003, 0), ('2023-12-26', 10000004, 0), ('2023-12-26', 20000005, 0), ('2023-12-26', 30000005, 100), ('2023-12-26', 50000005, 0); +INSERT INTO uin_value_details (ds, uin, value) values ('2023-12-27', 10000001, 7), ('2023-12-27', 10000002, -8), ('2023-12-27', 10000003, 30), ('2023-12-27', 10000004, -3), ('2023-12-27', 20000005, 0), ('2023-12-27', 40000005, 100), ('2023-12-27', 60000005, 0); + +with +( + select groupNumericIndexedVectorStateIf(uin, value, ds = '2023-12-26') + from uin_value_details +) as vec_1, +( + select groupNumericIndexedVectorStateIf(uin, value, ds = '2023-12-27') + from uin_value_details +) as vec_2 +select arrayJoin([ + numericIndexedVectorToMap(vec_1) + , numericIndexedVectorToMap(vec_2) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseAdd(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseAdd(vec_1, -7)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseAdd(vec_1, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseSubtract(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseSubtract(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseMultiply(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseMultiply(vec_1, -8)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseDivide(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseDivide(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseDivide(vec_1, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseEqual(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseEqual(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseNotEqual(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseNotEqual(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseNotEqual(vec_1, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_2, -5)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_1, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_2, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLessEqual(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLessEqual(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_1, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_2, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreater(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreater(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreaterEqual(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreaterEqual(vec_1, 2)) +]); + +DROP TABLE IF EXISTS uin_value_details; diff --git a/parser/testdata/03458_numeric_indexed_vector_operations_u32u16/ast.json b/parser/testdata/03458_numeric_indexed_vector_operations_u32u16/ast.json new file mode 100644 index 000000000..7759c2556 --- /dev/null +++ b/parser/testdata/03458_numeric_indexed_vector_operations_u32u16/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'TEST numericIndexedVectorPointwise operations with zero values and UInt16 value type'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001322015, + "rows_read": 5, + "bytes_read": 255 + } +} diff --git a/parser/testdata/03458_numeric_indexed_vector_operations_u32u16/metadata.json b/parser/testdata/03458_numeric_indexed_vector_operations_u32u16/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03458_numeric_indexed_vector_operations_u32u16/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03458_numeric_indexed_vector_operations_u32u16/query.sql b/parser/testdata/03458_numeric_indexed_vector_operations_u32u16/query.sql new file mode 100644 index 000000000..efdb459c4 --- /dev/null +++ b/parser/testdata/03458_numeric_indexed_vector_operations_u32u16/query.sql @@ -0,0 +1,56 @@ +select 'TEST numericIndexedVectorPointwise operations with zero values and UInt16 value type'; +DROP TABLE IF EXISTS uin_value_details; +CREATE TABLE uin_value_details +( + ds Date, + uin UInt32, + value UInt16 +) +ENGINE = MergeTree() +ORDER BY ds; +INSERT INTO uin_value_details (ds, uin, value) values ('2023-12-26', 10000001, 7), ('2023-12-26', 10000002, 8), ('2023-12-26', 10000003, 0), ('2023-12-26', 10000004, 0), ('2023-12-26', 20000005, 0), ('2023-12-26', 30000005, 100), ('2023-12-26', 50000005, 0); +INSERT INTO uin_value_details (ds, uin, value) values ('2023-12-27', 10000001, 7), ('2023-12-27', 10000002, -8), ('2023-12-27', 10000003, 30), ('2023-12-27', 10000004, -3), ('2023-12-27', 20000005, 0), ('2023-12-27', 40000005, 100), ('2023-12-27', 60000005, 0); + +with +( + select groupNumericIndexedVectorStateIf(uin, value, ds = '2023-12-26') + from uin_value_details +) as vec_1, +( + select groupNumericIndexedVectorStateIf(uin, value, ds = '2023-12-27') + from uin_value_details +) as vec_2 +select arrayJoin([ + numericIndexedVectorToMap(vec_1) + , numericIndexedVectorToMap(vec_2) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseAdd(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseAdd(vec_1, -7)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseAdd(vec_1, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseSubtract(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseSubtract(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseMultiply(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseMultiply(vec_1, -8)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseDivide(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseDivide(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseDivide(vec_1, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseEqual(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseEqual(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseNotEqual(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseNotEqual(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseNotEqual(vec_1, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_2, -5)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_1, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_2, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLessEqual(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLessEqual(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_1, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_2, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreater(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreater(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreaterEqual(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreaterEqual(vec_1, 2)) +]); + +DROP TABLE IF EXISTS uin_value_details; diff --git a/parser/testdata/03458_numeric_indexed_vector_operations_u32u32/ast.json b/parser/testdata/03458_numeric_indexed_vector_operations_u32u32/ast.json new file mode 100644 index 000000000..a99eb8f13 --- /dev/null +++ b/parser/testdata/03458_numeric_indexed_vector_operations_u32u32/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'TEST numericIndexedVectorPointwise operations with zero values and UInt32 value type'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001541046, + "rows_read": 5, + "bytes_read": 255 + } +} diff --git a/parser/testdata/03458_numeric_indexed_vector_operations_u32u32/metadata.json b/parser/testdata/03458_numeric_indexed_vector_operations_u32u32/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03458_numeric_indexed_vector_operations_u32u32/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03458_numeric_indexed_vector_operations_u32u32/query.sql b/parser/testdata/03458_numeric_indexed_vector_operations_u32u32/query.sql new file mode 100644 index 000000000..5395db66d --- /dev/null +++ b/parser/testdata/03458_numeric_indexed_vector_operations_u32u32/query.sql @@ -0,0 +1,56 @@ +select 'TEST numericIndexedVectorPointwise operations with zero values and UInt32 value type'; +DROP TABLE IF EXISTS uin_value_details; +CREATE TABLE uin_value_details +( + ds Date, + uin UInt32, + value UInt32 +) +ENGINE = MergeTree() +ORDER BY ds; +INSERT INTO uin_value_details (ds, uin, value) values ('2023-12-26', 10000001, 7), ('2023-12-26', 10000002, 8), ('2023-12-26', 10000003, 0), ('2023-12-26', 10000004, 0), ('2023-12-26', 20000005, 0), ('2023-12-26', 30000005, 100), ('2023-12-26', 50000005, 0); +INSERT INTO uin_value_details (ds, uin, value) values ('2023-12-27', 10000001, 7), ('2023-12-27', 10000002, -8), ('2023-12-27', 10000003, 30), ('2023-12-27', 10000004, -3), ('2023-12-27', 20000005, 0), ('2023-12-27', 40000005, 100), ('2023-12-27', 60000005, 0); + +with +( + select groupNumericIndexedVectorStateIf(uin, value, ds = '2023-12-26') + from uin_value_details +) as vec_1, +( + select groupNumericIndexedVectorStateIf(uin, value, ds = '2023-12-27') + from uin_value_details +) as vec_2 +select arrayJoin([ + numericIndexedVectorToMap(vec_1) + , numericIndexedVectorToMap(vec_2) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseAdd(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseAdd(vec_1, -7)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseAdd(vec_1, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseSubtract(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseSubtract(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseMultiply(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseMultiply(vec_1, -8)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseDivide(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseDivide(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseDivide(vec_1, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseEqual(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseEqual(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseNotEqual(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseNotEqual(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseNotEqual(vec_1, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_2, -5)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_1, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_2, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLessEqual(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLessEqual(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_1, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_2, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreater(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreater(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreaterEqual(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreaterEqual(vec_1, 2)) +]); + +DROP TABLE IF EXISTS uin_value_details; diff --git a/parser/testdata/03458_numeric_indexed_vector_operations_u32u64/ast.json b/parser/testdata/03458_numeric_indexed_vector_operations_u32u64/ast.json new file mode 100644 index 000000000..d37576006 --- /dev/null +++ b/parser/testdata/03458_numeric_indexed_vector_operations_u32u64/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'TEST numericIndexedVectorPointwise operations with zero values and UInt64 value type'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001475901, + "rows_read": 5, + "bytes_read": 255 + } +} diff --git a/parser/testdata/03458_numeric_indexed_vector_operations_u32u64/metadata.json b/parser/testdata/03458_numeric_indexed_vector_operations_u32u64/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03458_numeric_indexed_vector_operations_u32u64/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03458_numeric_indexed_vector_operations_u32u64/query.sql b/parser/testdata/03458_numeric_indexed_vector_operations_u32u64/query.sql new file mode 100644 index 000000000..b9e74a4ee --- /dev/null +++ b/parser/testdata/03458_numeric_indexed_vector_operations_u32u64/query.sql @@ -0,0 +1,56 @@ +select 'TEST numericIndexedVectorPointwise operations with zero values and UInt64 value type'; +DROP TABLE IF EXISTS uin_value_details; +CREATE TABLE uin_value_details +( + ds Date, + uin UInt32, + value UInt64 +) +ENGINE = MergeTree() +ORDER BY ds; +INSERT INTO uin_value_details (ds, uin, value) values ('2023-12-26', 10000001, 7), ('2023-12-26', 10000002, 8), ('2023-12-26', 10000003, 0), ('2023-12-26', 10000004, 0), ('2023-12-26', 20000005, 0), ('2023-12-26', 30000005, 100), ('2023-12-26', 50000005, 0); +INSERT INTO uin_value_details (ds, uin, value) values ('2023-12-27', 10000001, 7), ('2023-12-27', 10000002, 8), ('2023-12-27', 10000003, 30), ('2023-12-27', 10000004, 3), ('2023-12-27', 20000005, 0), ('2023-12-27', 40000005, 100), ('2023-12-27', 60000005, 0); + +with +( + select groupNumericIndexedVectorStateIf(uin, value, ds = '2023-12-26') + from uin_value_details +) as vec_1, +( + select groupNumericIndexedVectorStateIf(uin, value, ds = '2023-12-27') + from uin_value_details +) as vec_2 +select arrayJoin([ + numericIndexedVectorToMap(vec_1) + , numericIndexedVectorToMap(vec_2) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseAdd(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseAdd(vec_1, 7)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseAdd(vec_1, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseSubtract(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseSubtract(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseMultiply(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseMultiply(vec_1, 8)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseDivide(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseDivide(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseDivide(vec_1, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseEqual(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseEqual(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseNotEqual(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseNotEqual(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseNotEqual(vec_1, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_2, 5)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_1, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_2, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLessEqual(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLessEqual(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_1, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_2, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreater(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreater(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreaterEqual(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreaterEqual(vec_1, 2)) +]); + +DROP TABLE IF EXISTS uin_value_details; diff --git a/parser/testdata/03458_numeric_indexed_vector_operations_u32u8/ast.json b/parser/testdata/03458_numeric_indexed_vector_operations_u32u8/ast.json new file mode 100644 index 000000000..16135f1d2 --- /dev/null +++ b/parser/testdata/03458_numeric_indexed_vector_operations_u32u8/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'TEST numericIndexedVectorPointwise operations with zero values and UInt8 value type'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001075593, + "rows_read": 5, + "bytes_read": 254 + } +} diff --git a/parser/testdata/03458_numeric_indexed_vector_operations_u32u8/metadata.json b/parser/testdata/03458_numeric_indexed_vector_operations_u32u8/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03458_numeric_indexed_vector_operations_u32u8/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03458_numeric_indexed_vector_operations_u32u8/query.sql b/parser/testdata/03458_numeric_indexed_vector_operations_u32u8/query.sql new file mode 100644 index 000000000..57767bcbc --- /dev/null +++ b/parser/testdata/03458_numeric_indexed_vector_operations_u32u8/query.sql @@ -0,0 +1,56 @@ +select 'TEST numericIndexedVectorPointwise operations with zero values and UInt8 value type'; +DROP TABLE IF EXISTS uin_value_details; +CREATE TABLE uin_value_details +( + ds Date, + uin UInt32, + value UInt8 +) +ENGINE = MergeTree() +ORDER BY ds; +INSERT INTO uin_value_details (ds, uin, value) values ('2023-12-26', 10000001, 7), ('2023-12-26', 10000002, 8), ('2023-12-26', 10000003, 0), ('2023-12-26', 10000004, 0), ('2023-12-26', 20000005, 0), ('2023-12-26', 30000005, 100), ('2023-12-26', 50000005, 0); +INSERT INTO uin_value_details (ds, uin, value) values ('2023-12-27', 10000001, 7), ('2023-12-27', 10000002, -8), ('2023-12-27', 10000003, 30), ('2023-12-27', 10000004, -3), ('2023-12-27', 20000005, 0), ('2023-12-27', 40000005, 100), ('2023-12-27', 60000005, 0); + +with +( + select groupNumericIndexedVectorStateIf(uin, value, ds = '2023-12-26') + from uin_value_details +) as vec_1, +( + select groupNumericIndexedVectorStateIf(uin, value, ds = '2023-12-27') + from uin_value_details +) as vec_2 +select arrayJoin([ + numericIndexedVectorToMap(vec_1) + , numericIndexedVectorToMap(vec_2) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseAdd(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseAdd(vec_1, -7)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseAdd(vec_1, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseSubtract(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseSubtract(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseMultiply(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseMultiply(vec_1, -8)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseDivide(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseDivide(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseDivide(vec_1, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseEqual(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseEqual(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseNotEqual(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseNotEqual(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseNotEqual(vec_1, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_2, -5)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_1, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_2, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLessEqual(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLessEqual(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLessEqual(vec_1, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseLessEqual(vec_2, 0)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreater(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreater(vec_1, 2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreaterEqual(vec_1, vec_2)) + , numericIndexedVectorToMap(numericIndexedVectorPointwiseGreaterEqual(vec_1, 2)) +]); + +DROP TABLE IF EXISTS uin_value_details; diff --git a/parser/testdata/03458_wkb_function/ast.json b/parser/testdata/03458_wkb_function/ast.json new file mode 100644 index 000000000..4b9263a9a --- /dev/null +++ b/parser/testdata/03458_wkb_function/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function readWKBPoint (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function unhex (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '010100000000000000000000000000000000000000'" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001200911, + "rows_read": 9, + "bytes_read": 386 + } +} diff --git a/parser/testdata/03458_wkb_function/metadata.json b/parser/testdata/03458_wkb_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03458_wkb_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03458_wkb_function/query.sql b/parser/testdata/03458_wkb_function/query.sql new file mode 100644 index 000000000..b72ef609e --- /dev/null +++ b/parser/testdata/03458_wkb_function/query.sql @@ -0,0 +1,41 @@ +SELECT readWKBPoint(unhex('010100000000000000000000000000000000000000')); +SELECT readWKBPolygon(unhex('01030000000100000005000000000000000000f03f0000000000000000000000000000244000000000000000000000000000002440000000000000244000000000000000000000000000002440000000000000f03f0000000000000000')); +SELECT readWKBPolygon(unhex('010300000002000000050000000000000000000000000000000000000000000000000024400000000000000000000000000000244000000000000024400000000000000000000000000000244000000000000000000000000000000000050000000000000000001040000000000000104000000000000014400000000000001040000000000000144000000000000014400000000000001040000000000000144000000000000010400000000000001040')); +SELECT readWKBMultiPolygon(unhex('0106000000020000000103000000020000000500000000000000000000400000000000000000000000000000244000000000000000000000000000002440000000000000244000000000000000000000000000002440000000000000004000000000000000000500000000000000000010400000000000001040000000000000144000000000000010400000000000001440000000000000144000000000000010400000000000001440000000000000104000000000000010400103000000010000000400000000000000000024c000000000000024c000000000000024c000000000000022c000000000000022c0000000000000244000000000000024c000000000000024c0')); +SELECT readWKBMultiPolygon(unhex('0106000000010000000103000000010000000B000000FCC6D79E59F755C0B874CC79C65C4540029EB47059F755C0C3BAF1EEC85C4540BA83D89942F755C0200A664CC15C4540027E8D2441F755C06DA7AD11C15C4540F3E2C4573BF755C06DA7AD11C15C45406A2DCC423BF755C093DFA293A55C45403EB0E3BF40F755C0F2E7DB82A55C4540BF266BD443F755C046425BCEA55C45400551F70148F755C0814067D2A65C45405BCF108E59F755C0D1CABDC0AC5C4540FCC6D79E59F755C0B874CC79C65C4540')); +SELECT readWKBMultiPolygon(unhex('987654321987654321987654321987654321')); -- {serverError BAD_ARGUMENTS} +SELECT readWKBMultiPolygon(unhex('010154321987654321987654321987654321')); -- {serverError BAD_ARGUMENTS} +SELECT readWKBPolygon(unhex('01030000000200000005000000000000000000000000000000000000000000000000002440000000000000000000000000000024400000000000002440')); -- {serverError CANNOT_READ_ALL_DATA} +SELECT readWKBPolygon(unhex('010300000002000000050000000000000000000000000000000000000')); -- {serverError BAD_ARGUMENTS} +SELECT readWKBMultiPolygon(unhex('0106000000020000000103000000020000000500000000000000000000400000000000000000000000000000244')); -- {serverError BAD_ARGUMENTS} +SELECT ST_PointFromWKB(unhex('010100000000000000000000000000000000000000')); +SELECT ST_LineFromWKB(unhex('01020000000300000000000000000000000000000000000000000000000000f03f000000000000f03f00000000000000400000000000000040')); +SELECT ST_PolyFromWKB(unhex('01030000000100000005000000000000000000f03f0000000000000000000000000000244000000000000000000000000000002440000000000000244000000000000000000000000000002440000000000000f03f0000000000000000')); +SELECT ST_MLineFromWKB(unhex('01050000000200000001020000000200000000000000000000000000000000000000000000000000f03f000000000000f03f0102000000020000000000000000000040000000000000004000000000000008400000000000000840')); +SELECT ST_MPolyFromWKB(unhex('0106000000020000000103000000020000000500000000000000000000400000000000000000000000000000244000000000000000000000000000002440000000000000244000000000000000000000000000002440000000000000004000000000000000000500000000000000000010400000000000001040000000000000144000000000000010400000000000001440000000000000144000000000000010400000000000001440000000000000104000000000000010400103000000010000000400000000000000000024c000000000000024c000000000000024c000000000000022c000000000000022c0000000000000244000000000000024c000000000000024c0')); + +DROP TABLE IF EXISTS geo; +CREATE TABLE geo (s String, id Int) engine=Memory(); +INSERT INTO geo VALUES (unhex('010100000000000000000000000000000000000000'), 1); +INSERT INTO geo VALUES (unhex('0101000000000000000000f03f0000000000000000'), 2); +INSERT INTO geo VALUES (unhex('010100000000000000000000400000000000000000'), 3); +SELECT readWKBPoint(s) FROM geo ORDER BY id; + +DROP TABLE IF EXISTS geo; +CREATE TABLE geo (s String, id Int) engine=Memory(); +INSERT INTO geo VALUES (unhex('01030000000100000005000000000000000000f03f0000000000000000000000000000244000000000000000000000000000002440000000000000244000000000000000000000000000002440000000000000f03f0000000000000000'), 1); +INSERT INTO geo VALUES (unhex('010300000001000000050000000000000000000000000000000000000000000000000024400000000000000000000000000000244000000000000024400000000000000000000000000000244000000000000000000000000000000000'), 2); +INSERT INTO geo VALUES (unhex('010300000001000000050000000000000000000040000000000000000000000000000024400000000000000000000000000000244000000000000024400000000000000000000000000000244000000000000000400000000000000000'), 3); +INSERT INTO geo VALUES (unhex('010300000002000000050000000000000000000000000000000000000000000000000024400000000000000000000000000000244000000000000024400000000000000000000000000000244000000000000000000000000000000000050000000000000000001040000000000000104000000000000014400000000000001040000000000000144000000000000014400000000000001040000000000000144000000000000010400000000000001040'), 4); +INSERT INTO geo VALUES (unhex('010300000002000000050000000000000000000040000000000000000000000000000024400000000000000000000000000000244000000000000024400000000000000000000000000000244000000000000000400000000000000000050000000000000000001040000000000000104000000000000014400000000000001040000000000000144000000000000014400000000000001040000000000000144000000000000010400000000000001040'), 5); +INSERT INTO geo VALUES (unhex('01030000000200000005000000000000000000f03f0000000000000000000000000000244000000000000000000000000000002440000000000000244000000000000000000000000000002440000000000000f03f0000000000000000050000000000000000001040000000000000104000000000000014400000000000001040000000000000144000000000000014400000000000001040000000000000144000000000000010400000000000001040'), 6); +SELECT readWKBPolygon(s) FROM geo ORDER BY id; + +DROP TABLE IF EXISTS geo; +CREATE TABLE geo (s String, id Int) engine=Memory(); +INSERT INTO geo VALUES (unhex('01060000000200000001030000000200000005000000000000000000f03f0000000000000000000000000000244000000000000000000000000000002440000000000000244000000000000000000000000000002440000000000000f03f00000000000000000500000000000000000010400000000000001040000000000000144000000000000010400000000000001440000000000000144000000000000010400000000000001440000000000000104000000000000010400103000000010000000400000000000000000024c000000000000024c000000000000024c000000000000022c000000000000022c0000000000000244000000000000024c000000000000024c0'), 1); +INSERT INTO geo VALUES (unhex('0106000000020000000103000000020000000500000000000000000000000000000000000000000000000000244000000000000000000000000000002440000000000000244000000000000000000000000000002440000000000000000000000000000000000500000000000000000010400000000000001040000000000000144000000000000010400000000000001440000000000000144000000000000010400000000000001440000000000000104000000000000010400103000000010000000400000000000000000024c000000000000024c000000000000024c000000000000022c000000000000022c0000000000000244000000000000024c000000000000024c0'), 2); +INSERT INTO geo VALUES (unhex('0106000000020000000103000000020000000500000000000000000000400000000000000000000000000000244000000000000000000000000000002440000000000000244000000000000000000000000000002440000000000000004000000000000000000500000000000000000010400000000000001040000000000000144000000000000010400000000000001440000000000000144000000000000010400000000000001440000000000000104000000000000010400103000000010000000400000000000000000024c000000000000024c000000000000024c000000000000022c000000000000022c0000000000000244000000000000024c000000000000024c0'), 3); +SELECT readWKBMultiPolygon(s) FROM geo ORDER BY id; + +DROP TABLE geo; diff --git a/parser/testdata/03459-reverse-sorting-key-stable-result/ast.json b/parser/testdata/03459-reverse-sorting-key-stable-result/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03459-reverse-sorting-key-stable-result/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03459-reverse-sorting-key-stable-result/metadata.json b/parser/testdata/03459-reverse-sorting-key-stable-result/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03459-reverse-sorting-key-stable-result/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03459-reverse-sorting-key-stable-result/query.sql b/parser/testdata/03459-reverse-sorting-key-stable-result/query.sql new file mode 100644 index 000000000..21df7f78d --- /dev/null +++ b/parser/testdata/03459-reverse-sorting-key-stable-result/query.sql @@ -0,0 +1,14 @@ +-- { echo ON } + +drop table if exists t; + +create table t(A Int64) partition by (A % 64) order by A desc settings allow_experimental_reverse_key=1 +as select intDiv(number,11111) from numbers(7e5) union all select number from numbers(7e5); + +set max_threads=1; + +select cityHash64(groupArray(A)) from (select A from t order by A desc limit 10); + +select cityHash64(groupArray(A)) from (select A from t order by identity(A) desc limit 10); + +drop table t; diff --git a/parser/testdata/03459_join_cannot_add_column/ast.json b/parser/testdata/03459_join_cannot_add_column/ast.json new file mode 100644 index 000000000..2ef46f28c --- /dev/null +++ b/parser/testdata/03459_join_cannot_add_column/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001322312, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03459_join_cannot_add_column/metadata.json b/parser/testdata/03459_join_cannot_add_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03459_join_cannot_add_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03459_join_cannot_add_column/query.sql b/parser/testdata/03459_join_cannot_add_column/query.sql new file mode 100644 index 000000000..9aa006014 --- /dev/null +++ b/parser/testdata/03459_join_cannot_add_column/query.sql @@ -0,0 +1,22 @@ +SET enable_analyzer = 1, max_threads = 1; + +DROP TABLE IF EXISTS t1, t2, t3; + +create table t1 (id UInt32, name String) engine=MergeTree order by id; +create table t2 (id UInt32, name String) engine=MergeTree order by id; + +insert into t1 (id, name) values (1, 'a'), (2, 'b'), (3, 'c'); +insert into t2 (id, name) values (3, 'c'), (2, 'cdasd'); + +select * from t1 ORDER BY ALL; +select * from t2 ORDER BY ALL; + +SELECT * FROM (select rowNumberInAllBlocks() + 1 as id, t1.id, t2.id from t1 left join t2 on t1.id=t2.id) ORDER BY ALL; +SELECT * FROM (select rowNumberInAllBlocks() + 1 as id, t1.id, t2.id, t1.name, t2.name from t1 left join t2 on t1.id=t2.id) ORDER BY ALL; + +create table t3 engine=MergeTree order by id as +select rowNumberInAllBlocks() + 1 as id, t1.id, t2.id, t1.name, t2.name from t1 left join t2 on t1.id=t2.id; + +SELECT * FROM t3 ORDER BY ALL; + +DROP TABLE t1, t2, t3; diff --git a/parser/testdata/03459_numeric_indexed_vector_decode/ast.json b/parser/testdata/03459_numeric_indexed_vector_decode/ast.json new file mode 100644 index 000000000..c6c16d35e --- /dev/null +++ b/parser/testdata/03459_numeric_indexed_vector_decode/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'TEST toVectorCompactArray'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001332742, + "rows_read": 5, + "bytes_read": 196 + } +} diff --git a/parser/testdata/03459_numeric_indexed_vector_decode/metadata.json b/parser/testdata/03459_numeric_indexed_vector_decode/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03459_numeric_indexed_vector_decode/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03459_numeric_indexed_vector_decode/query.sql b/parser/testdata/03459_numeric_indexed_vector_decode/query.sql new file mode 100644 index 000000000..18a1d36b4 --- /dev/null +++ b/parser/testdata/03459_numeric_indexed_vector_decode/query.sql @@ -0,0 +1,149 @@ +select 'TEST toVectorCompactArray'; + +DROP TABLE IF EXISTS uin_value_details; +CREATE TABLE uin_value_details +( + ds Date, + uin UInt32, + value Float64 +) +ENGINE = MergeTree() +ORDER BY ds; + +insert into uin_value_details (ds, uin, value) select '2023-12-20', number, number * number from numbers(1000); +insert into uin_value_details (ds, uin, value) select '2023-12-21', number, number from numbers(1000); + +with +( + select groupNumericIndexedVectorStateIf(uin, value, ds = '2023-12-20') + from uin_value_details +) as vec_1, +( + select groupNumericIndexedVectorStateIf(uin, value, ds = '2023-12-21') + from uin_value_details +) as vec_2 +, numericIndexedVectorPointwiseDivide(vec_1, vec_2) as vec_3 +select arrayJoin([ + numericIndexedVectorShortDebugString(vec_1) + , toString(numericIndexedVectorAllValueSum(vec_1)) + , numericIndexedVectorShortDebugString(vec_2) + , toString(numericIndexedVectorAllValueSum(vec_2)) + , numericIndexedVectorShortDebugString(vec_3) + , toString(numericIndexedVectorAllValueSum(vec_3)) +]); + +select 'TEST toVectorCompactBitset'; + +DROP TABLE IF EXISTS uin_value_details; +CREATE TABLE uin_value_details +( + ds Date, + uin UInt32, + value Float64 +) +ENGINE = MergeTree() +ORDER BY ds; + +insert into uin_value_details (ds, uin, value) select '2023-12-22', number, number * number from numbers(5000); +insert into uin_value_details (ds, uin, value) select '2023-12-23', number, number from numbers(5000); + +with +( + select groupNumericIndexedVectorStateIf(uin, value, ds = '2023-12-22') + from uin_value_details +) as vec_1, +( + select groupNumericIndexedVectorStateIf(uin, value, ds = '2023-12-23') + from uin_value_details +) as vec_2 +, numericIndexedVectorPointwiseDivide(vec_1, vec_2) as vec_3 +select arrayJoin([ + numericIndexedVectorShortDebugString(vec_1) + , toString(numericIndexedVectorAllValueSum(vec_1)) + , numericIndexedVectorShortDebugString(vec_2) + , toString(numericIndexedVectorAllValueSum(vec_2)) + , numericIndexedVectorShortDebugString(vec_3) + , toString(numericIndexedVectorAllValueSum(vec_3)) +]); + +select 'TEST toVectorCompactBitsetDense'; + +DROP TABLE IF EXISTS uin_value_details; +CREATE TABLE uin_value_details +( + ds Date, + uin UInt32, + value Float64 +) +ENGINE = MergeTree() +ORDER BY ds; + +insert into uin_value_details (ds, uin, value) select '2023-12-26', number, number * number from numbers(30000); +insert into uin_value_details (ds, uin, value) select '2023-12-27', number, number from numbers(30000); +insert into uin_value_details (ds, uin, value) select '2023-12-28', number * 3, number * 3 * number * 3 from numbers(30000); +insert into uin_value_details (ds, uin, value) select '2023-12-29', number * 3, number * 3 from numbers(30000); + +with +( + select groupNumericIndexedVectorStateIf(uin, value, ds = '2023-12-26') + from uin_value_details +) as vec_1, +( + select groupNumericIndexedVectorStateIf(uin, value, ds = '2023-12-27') + from uin_value_details +) as vec_2 +, ( + select groupNumericIndexedVectorStateIf(uin, value, ds = '2023-12-28') + from uin_value_details +) as vec_3, +( + select groupNumericIndexedVectorStateIf(uin, value, ds = '2023-12-29') + from uin_value_details +) as vec_4 +, numericIndexedVectorPointwiseDivide(vec_1, vec_2) as vec_5 +, numericIndexedVectorPointwiseDivide(vec_1, vec_3) as vec_6 +, numericIndexedVectorPointwiseDivide(vec_1, vec_4) as vec_7 +, numericIndexedVectorPointwiseDivide(vec_2, vec_3) as vec_8 +, numericIndexedVectorPointwiseDivide(vec_2, vec_4) as vec_9 +, numericIndexedVectorPointwiseDivide(vec_3, vec_4) as vec_10 +select arrayJoin([ + numericIndexedVectorShortDebugString(vec_1) + , toString(numericIndexedVectorAllValueSum(vec_1)) + , numericIndexedVectorShortDebugString(vec_2) + , toString(numericIndexedVectorAllValueSum(vec_2)) + , numericIndexedVectorShortDebugString(vec_3) + , toString(numericIndexedVectorAllValueSum(vec_3)) + , numericIndexedVectorShortDebugString(vec_4) + , toString(numericIndexedVectorAllValueSum(vec_4)) + , numericIndexedVectorShortDebugString(vec_5) + , toString(numericIndexedVectorAllValueSum(vec_5)) + , numericIndexedVectorShortDebugString(vec_6) + , toString(numericIndexedVectorAllValueSum(vec_6)) + , numericIndexedVectorShortDebugString(vec_7) + , toString(numericIndexedVectorAllValueSum(vec_7)) + , numericIndexedVectorShortDebugString(vec_8) + , toString(numericIndexedVectorAllValueSum(vec_8)) + , numericIndexedVectorShortDebugString(vec_9) + , toString(numericIndexedVectorAllValueSum(vec_9)) + , numericIndexedVectorShortDebugString(vec_10) + , toString(numericIndexedVectorAllValueSum(vec_10)) +]); + + +select 'test insert empty aggregate function'; + +DROP TABLE IF EXISTS numeric_indexed_vector; +CREATE TABLE numeric_indexed_vector +( + ds Date, + vector AggregateFunction(groupNumericIndexedVector, UInt32, Float64) +) +ENGINE = MergeTree() +ORDER BY ds; + +insert into numeric_indexed_vector (ds) values ('2023-12-26'); + +DROP TABLE IF EXISTS uin_value_details; +DROP TABLE IF EXISTS uin_value_details; +DROP TABLE IF EXISTS uin_value_details; +DROP TABLE IF EXISTS numeric_indexed_vector; diff --git a/parser/testdata/03459_socket_asynchronous_metrics/ast.json b/parser/testdata/03459_socket_asynchronous_metrics/ast.json new file mode 100644 index 000000000..acbe499a7 --- /dev/null +++ b/parser/testdata/03459_socket_asynchronous_metrics/ast.json @@ -0,0 +1,70 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function greater (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier value" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.asynchronous_metrics" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier name" + }, + { + "explain": " Literal 'NetworkTCPSockets'" + } + ], + + "rows": 16, + + "statistics": + { + "elapsed": 0.001213787, + "rows_read": 16, + "bytes_read": 628 + } +} diff --git a/parser/testdata/03459_socket_asynchronous_metrics/metadata.json b/parser/testdata/03459_socket_asynchronous_metrics/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03459_socket_asynchronous_metrics/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03459_socket_asynchronous_metrics/query.sql b/parser/testdata/03459_socket_asynchronous_metrics/query.sql new file mode 100644 index 000000000..b0bf859d2 --- /dev/null +++ b/parser/testdata/03459_socket_asynchronous_metrics/query.sql @@ -0,0 +1,3 @@ +SELECT value > 0 FROM system.asynchronous_metrics WHERE name = 'NetworkTCPSockets'; +SELECT value > 0 FROM system.asynchronous_metrics WHERE name = 'NetworkTCPSockets_LISTEN'; +SELECT value > 0 FROM system.asynchronous_metrics WHERE name = 'NetworkTCPSocketRemoteAddresses'; diff --git a/parser/testdata/03460_alter_materialized_view_on_cluster/ast.json b/parser/testdata/03460_alter_materialized_view_on_cluster/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03460_alter_materialized_view_on_cluster/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03460_alter_materialized_view_on_cluster/metadata.json b/parser/testdata/03460_alter_materialized_view_on_cluster/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03460_alter_materialized_view_on_cluster/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03460_alter_materialized_view_on_cluster/query.sql b/parser/testdata/03460_alter_materialized_view_on_cluster/query.sql new file mode 100644 index 000000000..ef14f0acd --- /dev/null +++ b/parser/testdata/03460_alter_materialized_view_on_cluster/query.sql @@ -0,0 +1,42 @@ +-- Tags: no-replicated-database +-- ^ due to the usage of ON CLUSTER queries + +SET distributed_ddl_output_mode = 'none', enable_analyzer = true; + +drop table if exists source, mview; + +CREATE TABLE source +( + timestamp DateTime, + card_id UInt64, + _id String +) +ENGINE = MergeTree Partition by toYYYYMM(timestamp) +ORDER BY _id TTL toDateTime(timestamp + toIntervalDay(7)); + +CREATE MATERIALIZED VIEW mview on cluster test_shard_localhost +ENGINE = SummingMergeTree ORDER BY (day, card_id) +as SELECT + toDate(timestamp) AS day, + card_id, + count(*) AS card_view +FROM source GROUP BY (day, card_id); + +DROP TABLE mview; + +CREATE MATERIALIZED VIEW mview on cluster test_shard_localhost +( + day Date, + card_id UInt64, + card_view Int64 +) +ENGINE = SummingMergeTree ORDER BY (day, card_id) +as SELECT + toDate(timestamp) AS day, + card_id, + count(*) AS card_view +FROM source GROUP BY (day, card_id); + +alter table source on cluster test_shard_localhost MODIFY SETTING ttl_only_drop_parts = 1; + +drop table if exists mview, source; diff --git a/parser/testdata/03460_normal_projection_index/ast.json b/parser/testdata/03460_normal_projection_index/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03460_normal_projection_index/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03460_normal_projection_index/metadata.json b/parser/testdata/03460_normal_projection_index/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03460_normal_projection_index/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03460_normal_projection_index/query.sql b/parser/testdata/03460_normal_projection_index/query.sql new file mode 100644 index 000000000..2b5fc310d --- /dev/null +++ b/parser/testdata/03460_normal_projection_index/query.sql @@ -0,0 +1,151 @@ +-- { echo ON } + +SET enable_analyzer = 1; +-- enable projection for parallel replicas +SET parallel_replicas_local_plan = 1; +SET optimize_aggregation_in_order = 0; +DROP TABLE IF EXISTS test_simple_projection; + +CREATE TABLE test_simple_projection +( + id UInt64, + event_date Date, + user_id UInt32, + url String, + region String, + PROJECTION region_proj + ( + SELECT _part_offset ORDER BY region + ), + PROJECTION user_id_proj + ( + SELECT _part_offset ORDER BY user_id + ) +) +ENGINE = MergeTree +ORDER BY (event_date, id) +SETTINGS + index_granularity = 1, min_bytes_for_wide_part = 0, + min_bytes_for_full_part_storage = 0, enable_vertical_merge_algorithm = 0; + +INSERT INTO test_simple_projection VALUES (1, '2023-01-01', 101, 'https://example.com/page1', 'europe'); +INSERT INTO test_simple_projection VALUES (2, '2023-01-01', 102, 'https://example.com/page2', 'us_west'); +INSERT INTO test_simple_projection VALUES (3, '2023-01-02', 106, 'https://example.com/page3', 'us_west'); +INSERT INTO test_simple_projection VALUES (4, '2023-01-02', 107, 'https://example.com/page4', 'us_west'); +INSERT INTO test_simple_projection VALUES (5, '2023-01-03', 104, 'https://example.com/page5', 'asia'); + +OPTIMIZE TABLE test_simple_projection FINAL; + +-- aggressively use projection index +SET min_table_rows_to_use_projection_index = 0; + +SELECT trimLeft(explain) +FROM (EXPLAIN projections = 1 SELECT * FROM test_simple_projection WHERE region = 'europe' AND user_id = 101) +WHERE explain LIKE '%ReadFromMergeTree%' OR match(explain, '^\s+[A-Z][a-z]+(\s+[A-Z][a-z]+)*:'); +SELECT * FROM test_simple_projection WHERE region = 'europe' AND user_id = 101 ORDER BY ALL; + +-- region_proj is enough to filter part +SELECT trimLeft(explain) +FROM (EXPLAIN projections = 1 SELECT * FROM test_simple_projection WHERE region = 'zzz' AND user_id = 101) +WHERE explain LIKE '%ReadFromMergeTree%' OR match(explain, '^\s+[A-Z][a-z]+(\s+[A-Z][a-z]+)*:'); +SELECT * FROM test_simple_projection WHERE region = 'zzz' AND user_id = 101 ORDER BY ALL; + +-- narrowing filter via user_id_proj +SELECT trimLeft(explain) +FROM (EXPLAIN projections = 1 SELECT * FROM test_simple_projection WHERE region = 'us_west' AND user_id = 106) +WHERE explain LIKE '%ReadFromMergeTree%' OR match(explain, '^\s+[A-Z][a-z]+(\s+[A-Z][a-z]+)*:'); +SELECT * FROM test_simple_projection WHERE region = 'us_west' AND user_id = 106 ORDER BY ALL; + +-- it's not possible to use different projection indexes with or filter +SELECT trimLeft(explain) +FROM (EXPLAIN projections = 1 SELECT * FROM test_simple_projection WHERE region = 'asia' OR user_id = 101) +WHERE explain LIKE '%ReadFromMergeTree%' OR match(explain, '^\s+[A-Z][a-z]+(\s+[A-Z][a-z]+)*:'); +SELECT * FROM test_simple_projection WHERE region = 'asia' OR user_id = 101 ORDER BY ALL; + +-- Fuzzer +SELECT *, _part_offset = (isNullable(1) = toUInt128(6)), * FROM test_simple_projection PREWHERE (101 = user_id) = ignore(255, isZeroOrNull(assumeNotNull(0))) WHERE (106 = user_id) AND (region = 'us_west'); + +DROP TABLE test_simple_projection; + +-- verify projection index can filter individual matching rows at top, middle, and bottom of a single granule. + +DROP TABLE IF EXISTS test_projection_granule_edge_cases; + +CREATE TABLE test_projection_granule_edge_cases +( + id UInt64, + region String, + user_id UInt32, + PROJECTION region_proj + ( + SELECT _part_offset ORDER BY region + ) +) +ENGINE = MergeTree +ORDER BY id +SETTINGS + index_granularity = 16, min_bytes_for_wide_part = 0, + min_bytes_for_full_part_storage = 0, enable_vertical_merge_algorithm = 0; + +INSERT INTO test_projection_granule_edge_cases VALUES (0, 'top_region', 100); +INSERT INTO test_projection_granule_edge_cases SELECT number + 1, 'other_region', 101 FROM numbers(6); +INSERT INTO test_projection_granule_edge_cases VALUES (7, 'mid_region', 102); +INSERT INTO test_projection_granule_edge_cases SELECT number + 8, 'other_region', 103 FROM numbers(6); +INSERT INTO test_projection_granule_edge_cases VALUES (15, 'bol_region', 104); + +-- add more data to ensure projection index is triggered during query planning +INSERT INTO test_projection_granule_edge_cases SELECT number + 100, 'unknown_region', 999 FROM numbers(1000); + +OPTIMIZE TABLE test_projection_granule_edge_cases FINAL; + +SELECT trimLeft(explain) +FROM (EXPLAIN projections = 1 SELECT * FROM test_projection_granule_edge_cases WHERE region = 'top_region') +WHERE explain LIKE '%ReadFromMergeTree%' OR match(explain, '^\s+[A-Z][a-z]+(\s+[A-Z][a-z]+)*:'); +SELECT * FROM test_projection_granule_edge_cases WHERE region = 'top_region' ORDER BY ALL; + +SELECT trimLeft(explain) +FROM (EXPLAIN projections = 1 SELECT * FROM test_projection_granule_edge_cases WHERE region = 'mid_region') +WHERE explain LIKE '%ReadFromMergeTree%' OR match(explain, '^\s+[A-Z][a-z]+(\s+[A-Z][a-z]+)*:'); +SELECT * FROM test_projection_granule_edge_cases WHERE region = 'mid_region' ORDER BY ALL; + +SELECT trimLeft(explain) +FROM (EXPLAIN projections = 1 SELECT * FROM test_projection_granule_edge_cases WHERE region = 'bol_region') +WHERE explain LIKE '%ReadFromMergeTree%' OR match(explain, '^\s+[A-Z][a-z]+(\s+[A-Z][a-z]+)*:'); +SELECT * FROM test_projection_granule_edge_cases WHERE region = 'bol_region' ORDER BY ALL; + +DROP TABLE test_projection_granule_edge_cases; + +-- check partially materialized projection index, it should only affect related parts + +DROP TABLE IF EXISTS test_partial_projection; + +CREATE TABLE test_partial_projection +( + id UInt64, + region String +) +ENGINE = MergeTree +ORDER BY id +SETTINGS + index_granularity = 1, min_bytes_for_wide_part = 0, + min_bytes_for_full_part_storage = 0, enable_vertical_merge_algorithm = 0; + +INSERT INTO test_partial_projection VALUES (1, 'us'), (2, 'eu'), (3, 'cn'); + +ALTER TABLE test_partial_projection ADD PROJECTION region_proj (SELECT _part_offset ORDER BY region); + +INSERT INTO test_partial_projection VALUES (4, 'cn'), (5, 'ru'), (6, 'br'); + +SET min_table_rows_to_use_projection_index = 0; + +SELECT trimLeft(explain) +FROM (EXPLAIN projections = 1 SELECT * FROM test_partial_projection WHERE region = 'ru') +WHERE explain LIKE '%ReadFromMergeTree%' OR match(explain, '^\s+[A-Z][a-z]+(\s+[A-Z][a-z]+)*:'); +SELECT * FROM test_partial_projection WHERE region = 'ru' ORDER BY ALL; + +SELECT trimLeft(explain) +FROM (EXPLAIN projections = 1 SELECT * FROM test_partial_projection WHERE region = 'cn') +WHERE explain LIKE '%ReadFromMergeTree%' OR match(explain, '^\s+[A-Z][a-z]+(\s+[A-Z][a-z]+)*:'); +SELECT * FROM test_partial_projection WHERE region = 'cn' ORDER BY ALL; + +DROP TABLE test_partial_projection; diff --git a/parser/testdata/03460_normal_projection_index_bug_race_conditions/ast.json b/parser/testdata/03460_normal_projection_index_bug_race_conditions/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03460_normal_projection_index_bug_race_conditions/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03460_normal_projection_index_bug_race_conditions/metadata.json b/parser/testdata/03460_normal_projection_index_bug_race_conditions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03460_normal_projection_index_bug_race_conditions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03460_normal_projection_index_bug_race_conditions/query.sql b/parser/testdata/03460_normal_projection_index_bug_race_conditions/query.sql new file mode 100644 index 000000000..5e2f2b671 --- /dev/null +++ b/parser/testdata/03460_normal_projection_index_bug_race_conditions/query.sql @@ -0,0 +1,27 @@ +-- { echo ON } + +SET max_threads=8; +SET max_projection_rows_to_use_projection_index = 102400000; +SET min_table_rows_to_use_projection_index = 0; + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab +( + `i` Int64, + `a` Int64, + `text` String, + PROJECTION by_text + ( + SELECT _part_offset + ORDER BY text + ) +) +ENGINE = MergeTree +PARTITION BY i % 4 +ORDER BY a +SETTINGS index_granularity = 8192; + +INSERT INTO tab SELECT number, number, number FROM numbers(8192 * 10); + +SELECT * FROM tab WHERE text = '1000' SETTINGS use_query_condition_cache = 0, optimize_use_projections = 1, optimize_use_projection_filtering = 1; diff --git a/parser/testdata/03460_numeric_indexed_vector_to_value_map/ast.json b/parser/testdata/03460_numeric_indexed_vector_to_value_map/ast.json new file mode 100644 index 000000000..3bde3a2f9 --- /dev/null +++ b/parser/testdata/03460_numeric_indexed_vector_to_value_map/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'TEST numericIndexedVectorToMap'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001502607, + "rows_read": 5, + "bytes_read": 201 + } +} diff --git a/parser/testdata/03460_numeric_indexed_vector_to_value_map/metadata.json b/parser/testdata/03460_numeric_indexed_vector_to_value_map/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03460_numeric_indexed_vector_to_value_map/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03460_numeric_indexed_vector_to_value_map/query.sql b/parser/testdata/03460_numeric_indexed_vector_to_value_map/query.sql new file mode 100644 index 000000000..10c97668a --- /dev/null +++ b/parser/testdata/03460_numeric_indexed_vector_to_value_map/query.sql @@ -0,0 +1,79 @@ +select 'TEST numericIndexedVectorToMap'; + +DROP TABLE IF EXISTS uin_value_details_int32_int8; +CREATE TABLE uin_value_details_int32_int8 +( + ds Date, + uin UInt32, + value Int8 +) +ENGINE = MergeTree() +ORDER BY ds; +INSERT INTO uin_value_details_int32_int8 (ds, uin, value) values ('2023-12-26', 105, -5), ('2023-12-26', 104, -128), ('2023-12-26', 103, 3), ('2023-12-26', 102, -2), ('2023-12-26', 10000001, -127), ('2023-12-26', 10000002, 127), ('2023-12-26', 10000003, 25), ('2023-12-26', 10000004, 38); +INSERT INTO uin_value_details_int32_int8 (ds, uin, value) values ('2023-12-27', 103, -4), ('2023-12-27', 104, -5), ('2023-12-27', 105, -5), ('2023-12-27', 106, 6), ('2023-12-27', 10000001, 7), ('2023-12-27', 10000002, 3); + +with +( + select groupNumericIndexedVectorStateIf(uin, value, ds = '2023-12-26') + from uin_value_details_int32_int8 +) as vec_1, +( + select groupNumericIndexedVectorStateIf(uin, value, ds = '2023-12-27') + from uin_value_details_int32_int8 +) as vec_2 +select arrayJoin([ + numericIndexedVectorToMap(vec_1) + , numericIndexedVectorToMap(vec_2) +]); + +DROP TABLE IF EXISTS uin_value_details_int32_int64; +CREATE TABLE uin_value_details_int32_int64 +( + ds Date, + uin UInt32, + value Int64 +) +ENGINE = MergeTree() +ORDER BY ds; +INSERT INTO uin_value_details_int32_int64 (ds, uin, value) values ('2023-12-26', 105, -9223372036854775808), ('2023-12-26', 104, 9223372036854775807), ('2023-12-26', 103, 3), ('2023-12-26', 102, -2), ('2023-12-26', 10000001, -127), ('2023-12-26', 10000002, 127), ('2023-12-26', 10000003, 25), ('2023-12-26', 10000004, 38); +INSERT INTO uin_value_details_int32_int64 (ds, uin, value) values ('2023-12-27', 103, -9223372036854775807), ('2023-12-27', 104, -9223372036854775806), ('2023-12-27', 105, -5), ('2023-12-27', 106, 6), ('2023-12-27', 10000001, 7), ('2023-12-27', 10000002, 3); + +with +( + select groupNumericIndexedVectorStateIf(uin, value, ds = '2023-12-26') + from uin_value_details_int32_int64 +) as vec_1, +( + select groupNumericIndexedVectorStateIf(uin, value, ds = '2023-12-27') + from uin_value_details_int32_int64 +) as vec_2 +select arrayJoin([ + numericIndexedVectorToMap(vec_1) + , numericIndexedVectorToMap(vec_2) +]); + +DROP TABLE IF EXISTS uin_value_details_int32_float64; +CREATE TABLE uin_value_details_int32_float64 +( + ds Date, + uin UInt32, + value Float64 +) +ENGINE = MergeTree() +ORDER BY ds; +INSERT INTO uin_value_details_int32_float64 (ds, uin, value) values ('2023-12-26', 105, -549755813887.3421), ('2023-12-26', 104, 549755813877.3421), ('2023-12-26', 103, 3), ('2023-12-26', 102, -2), ('2023-12-26', 10000001, -127), ('2023-12-26', 10000002, 127), ('2023-12-26', 10000003, 25), ('2023-12-26', 10000004, 38); +INSERT INTO uin_value_details_int32_float64 (ds, uin, value) values ('2023-12-27', 103, -549755813887.34125083415), ('2023-12-27', 104, -9223372.32169785621), ('2023-12-27', 105, -5), ('2023-12-27', 106, 6), ('2023-12-27', 10000001, 7), ('2023-12-27', 10000002, 3); + +with +( + select groupNumericIndexedVectorStateIf(uin, value, ds = '2023-12-26') + from uin_value_details_int32_float64 +) as vec_1, +( + select groupNumericIndexedVectorStateIf(uin, value, ds = '2023-12-27') + from uin_value_details_int32_float64 +) as vec_2 +select arrayJoin([ + numericIndexedVectorToMap(vec_1) + , numericIndexedVectorToMap(vec_2) +]); diff --git a/parser/testdata/03460_projection_part_filtering_and_introspection/ast.json b/parser/testdata/03460_projection_part_filtering_and_introspection/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03460_projection_part_filtering_and_introspection/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03460_projection_part_filtering_and_introspection/metadata.json b/parser/testdata/03460_projection_part_filtering_and_introspection/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03460_projection_part_filtering_and_introspection/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03460_projection_part_filtering_and_introspection/query.sql b/parser/testdata/03460_projection_part_filtering_and_introspection/query.sql new file mode 100644 index 000000000..c39b36097 --- /dev/null +++ b/parser/testdata/03460_projection_part_filtering_and_introspection/query.sql @@ -0,0 +1,57 @@ +-- { echo ON } + +DROP TABLE IF EXISTS test_simple_projection; + +CREATE TABLE test_simple_projection +( + id UInt64, + event_date Date, + user_id UInt32, + url String, + region String, + PROJECTION region_proj + ( + SELECT _part_offset ORDER BY region + ), + PROJECTION user_id_proj + ( + SELECT _part_offset ORDER BY user_id + ) +) +ENGINE = MergeTree +ORDER BY (event_date, id) +SETTINGS index_granularity = 1, max_bytes_to_merge_at_max_space_in_pool = 1; -- disable merge + +INSERT INTO test_simple_projection VALUES (1, '2023-01-01', 101, 'https://example.com/page1', 'europe'); +INSERT INTO test_simple_projection VALUES (2, '2023-01-01', 102, 'https://example.com/page2', 'us_west'); +INSERT INTO test_simple_projection VALUES (3, '2023-01-02', 106, 'https://example.com/page3', 'us_west'); +INSERT INTO test_simple_projection VALUES (4, '2023-01-02', 107, 'https://example.com/page4', 'us_west'); +INSERT INTO test_simple_projection VALUES (5, '2023-01-03', 104, 'https://example.com/page5', 'asia'); + +SET enable_analyzer = 1; +SET optimize_use_projection_filtering = 1; +-- enable projection for parallel replicas +SET parallel_replicas_local_plan = 1; +SET optimize_aggregation_in_order = 0; + +-- region projection is enough effective for filtering +SELECT trimLeft(explain) +FROM (EXPLAIN projections = 1 SELECT * FROM test_simple_projection WHERE region = 'europe' AND user_id = 101) +WHERE explain LIKE '%ReadFromMergeTree%' OR match(explain, '^\s+[A-Z][a-z]+(\s+[A-Z][a-z]+)*:'); + +-- Only user_id projection is effective for filtering +SELECT trimLeft(explain) +FROM (EXPLAIN projections = 1 SELECT * FROM test_simple_projection WHERE region != 'unknown' AND user_id = 106) +WHERE explain LIKE '%ReadFromMergeTree%' OR match(explain, '^\s+[A-Z][a-z]+(\s+[A-Z][a-z]+)*:'); + +-- Both region and user_id projections are effective for filtering +SELECT trimLeft(explain) +FROM (EXPLAIN projections = 1 SELECT * FROM test_simple_projection WHERE region = 'us_west' AND user_id = 107) +WHERE explain LIKE '%ReadFromMergeTree%' OR match(explain, '^\s+[A-Z][a-z]+(\s+[A-Z][a-z]+)*:'); + +-- Neither projection is effective for filtering +SELECT trimLeft(explain) +FROM (EXPLAIN projections = 1 SELECT * FROM test_simple_projection WHERE region != 'unknown' AND user_id != 999) +WHERE explain LIKE '%ReadFromMergeTree%' OR match(explain, '^\s+[A-Z][a-z]+(\s+[A-Z][a-z]+)*:'); + +DROP TABLE test_simple_projection; diff --git a/parser/testdata/03460_query_condition_cache_with_projections/ast.json b/parser/testdata/03460_query_condition_cache_with_projections/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03460_query_condition_cache_with_projections/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03460_query_condition_cache_with_projections/metadata.json b/parser/testdata/03460_query_condition_cache_with_projections/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03460_query_condition_cache_with_projections/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03460_query_condition_cache_with_projections/query.sql b/parser/testdata/03460_query_condition_cache_with_projections/query.sql new file mode 100644 index 000000000..6ba44d7d5 --- /dev/null +++ b/parser/testdata/03460_query_condition_cache_with_projections/query.sql @@ -0,0 +1,29 @@ +-- Tags: no-parallel +-- Tag no-parallel: Messes with internal cache + +-- { echo ON } + +set enable_analyzer = 1; +set parallel_replicas_local_plan = 1, parallel_replicas_support_projection = 1, optimize_aggregation_in_order = 0; +set optimize_use_projection_filtering = 1; + +drop table if exists t; + +create table t (i int, j int, projection p (select * order by j)) engine MergeTree order by tuple() +settings index_granularity = 1, max_bytes_to_merge_at_max_space_in_pool = 1; -- disable merge + +-- The following data is constructed in a way to verifies that query condition +-- cache no longer has key collisions for projection parts +insert into t select 20, number from numbers(10); + +insert into t select 1, number + 1 from numbers(10); + +system drop query condition cache; + +select j from t where j > 3 and i = 20 order by j settings max_threads = 1, use_query_condition_cache = 1, query_condition_cache_store_conditions_as_plaintext = 1; + +select part_name from system.query_condition_cache order by part_name; + +select j from t where j > 3 and i = 20 order by j settings max_threads = 1, use_query_condition_cache = 1, query_condition_cache_store_conditions_as_plaintext = 1; + +drop table t; diff --git a/parser/testdata/03461_numeric_indexed_vector_chain/ast.json b/parser/testdata/03461_numeric_indexed_vector_chain/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03461_numeric_indexed_vector_chain/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03461_numeric_indexed_vector_chain/metadata.json b/parser/testdata/03461_numeric_indexed_vector_chain/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03461_numeric_indexed_vector_chain/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03461_numeric_indexed_vector_chain/query.sql b/parser/testdata/03461_numeric_indexed_vector_chain/query.sql new file mode 100644 index 000000000..5fa12a234 --- /dev/null +++ b/parser/testdata/03461_numeric_indexed_vector_chain/query.sql @@ -0,0 +1,10 @@ +WITH + numericIndexedVectorBuild(mapFromArrays([1, 2, 3], arrayMap(x -> toFloat64(x), [10, 20, 30]))) AS vec1 +SELECT + numericIndexedVectorToMap(numericIndexedVectorPointwiseAdd(numericIndexedVectorPointwiseSubtract(vec1, 10), 3)); + +WITH + numericIndexedVectorBuild(mapFromArrays([1, 2, 3], arrayMap(x -> toFloat64(x), [0, 1, 2]))) AS vec1, + numericIndexedVectorBuild(mapFromArrays([1, 2, 3], arrayMap(x -> toFloat64(x), [1, 2, 3]))) AS vec2 +SELECT + numericIndexedVectorToMap(numericIndexedVectorPointwiseAdd(numericIndexedVectorPointwiseMultiply(vec1, vec2), 2)) AS res1; diff --git a/parser/testdata/03461_pk_prefix_trivial_count/ast.json b/parser/testdata/03461_pk_prefix_trivial_count/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03461_pk_prefix_trivial_count/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03461_pk_prefix_trivial_count/metadata.json b/parser/testdata/03461_pk_prefix_trivial_count/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03461_pk_prefix_trivial_count/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03461_pk_prefix_trivial_count/query.sql b/parser/testdata/03461_pk_prefix_trivial_count/query.sql new file mode 100644 index 000000000..7f733de09 --- /dev/null +++ b/parser/testdata/03461_pk_prefix_trivial_count/query.sql @@ -0,0 +1,11 @@ +-- { echo ON } + +DROP TABLE IF EXISTS t; + +CREATE TABLE t(k String) ORDER BY k as select 'dst_'||number from numbers(10); + +SELECT count(*) FROM t WHERE k LIKE 'dst_kkkk_1111%'; + +SELECT count(*) FROM t WHERE k LIKE 'dst%kkkk'; + +DROP TABLE t; diff --git a/parser/testdata/03461_string_to_date_time_cast_modes/ast.json b/parser/testdata/03461_string_to_date_time_cast_modes/ast.json new file mode 100644 index 000000000..6ec96201b --- /dev/null +++ b/parser/testdata/03461_string_to_date_time_cast_modes/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001023167, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03461_string_to_date_time_cast_modes/metadata.json b/parser/testdata/03461_string_to_date_time_cast_modes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03461_string_to_date_time_cast_modes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03461_string_to_date_time_cast_modes/query.sql b/parser/testdata/03461_string_to_date_time_cast_modes/query.sql new file mode 100644 index 000000000..8b9774c21 --- /dev/null +++ b/parser/testdata/03461_string_to_date_time_cast_modes/query.sql @@ -0,0 +1,17 @@ +set session_timezone='UTC'; + +set cast_string_to_date_time_mode='basic'; +select '2020-02-01 20:00:00'::DateTime; +select '2020-02-01 20:00:00Z'::DateTime; -- {serverError CANNOT_PARSE_TEXT} +select '01-02-2020 20:00:00Z'::DateTime; -- {serverError CANNOT_PARSE_TEXT} + +set cast_string_to_date_time_mode='best_effort'; +select '2020-02-01 20:00:00'::DateTime; +select '2020-02-01 20:00:00Z'::DateTime; +select '01-02-2020 20:00:00Z'::DateTime; + +set cast_string_to_date_time_mode='best_effort_us'; +select '2020-02-01 20:00:00'::DateTime; +select '2020-02-01 20:00:00Z'::DateTime; +select '01-02-2020 20:00:00Z'::DateTime; + diff --git a/parser/testdata/03462_numeric_indexed_vector_serialization/ast.json b/parser/testdata/03462_numeric_indexed_vector_serialization/ast.json new file mode 100644 index 000000000..40b9aa256 --- /dev/null +++ b/parser/testdata/03462_numeric_indexed_vector_serialization/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery uin_value_details_int32_float64 (children 1)" + }, + { + "explain": " Identifier uin_value_details_int32_float64" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001574993, + "rows_read": 2, + "bytes_read": 114 + } +} diff --git a/parser/testdata/03462_numeric_indexed_vector_serialization/metadata.json b/parser/testdata/03462_numeric_indexed_vector_serialization/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03462_numeric_indexed_vector_serialization/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03462_numeric_indexed_vector_serialization/query.sql b/parser/testdata/03462_numeric_indexed_vector_serialization/query.sql new file mode 100644 index 000000000..5b4b2c7d8 --- /dev/null +++ b/parser/testdata/03462_numeric_indexed_vector_serialization/query.sql @@ -0,0 +1,49 @@ +DROP TABLE IF EXISTS uin_value_details_int32_float64; +CREATE TABLE uin_value_details_int32_float64 +( + ds Date, + uin UInt32, + value Float64 +) +ENGINE = MergeTree() +ORDER BY ds; +INSERT INTO uin_value_details_int32_float64 (ds, uin, value) values ('2023-12-26', 105, -549755813887.3421), ('2023-12-26', 104, 549755813877.3421), ('2023-12-26', 103, 3), ('2023-12-26', 102, -2), ('2023-12-26', 10000001, -127), ('2023-12-26', 10000002, 127), ('2023-12-26', 10000003, 25), ('2023-12-26', 10000004, 38); +INSERT INTO uin_value_details_int32_float64 (ds, uin, value) values ('2023-12-27', 103, -549755813887.34125083415), ('2023-12-27', 104, -9223372.32169785621), ('2023-12-27', 105, -5), ('2023-12-27', 106, 6), ('2023-12-27', 10000001, 7), ('2023-12-27', 10000002, 3); + +with +( + select groupNumericIndexedVectorStateIf(uin, value, ds = '2023-12-26') + from uin_value_details_int32_float64 +) as vec_1, +( + select groupNumericIndexedVectorStateIf(uin, value, ds = '2023-12-27') + from uin_value_details_int32_float64 +) as vec_2 +select arrayJoin([ + numericIndexedVectorToMap(vec_1) + , numericIndexedVectorToMap(vec_2) + , numericIndexedVectorToMap(CAST(CAST(vec_1, 'String'), 'AggregateFunction(groupNumericIndexedVector, UInt32, Float64)')) + , numericIndexedVectorToMap(CAST(CAST(vec_2, 'String'), 'AggregateFunction(groupNumericIndexedVector, UInt32, Float64)')) +]); + +DROP TABLE IF EXISTS vector_int32_float64; +CREATE TABLE vector_int32_float64 +( + ds Date + , vec AggregateFunction(groupNumericIndexedVector, UInt32, Float64) + , vec_str String +) +ENGINE = MergeTree() +ORDER BY ds; + +INSERT INTO vector_int32_float64 (ds, vec_str, vec) +SELECT ds, groupNumericIndexedVectorState(uin, value) as vec, + CAST(vec, 'String') as vec_str +FROM uin_value_details_int32_float64 group by ds; + +select ds, numericIndexedVectorToMap(vec) + , numericIndexedVectorToMap(CAST(vec_str, 'AggregateFunction(groupNumericIndexedVector, UInt32, Float64)')) +from vector_int32_float64; + +DROP TABLE IF EXISTS uin_value_details_int32_float64; +DROP TABLE IF EXISTS vector_int32_float64; diff --git a/parser/testdata/03463_numeric_indexed_vector_overflow/ast.json b/parser/testdata/03463_numeric_indexed_vector_overflow/ast.json new file mode 100644 index 000000000..0e6e2989e --- /dev/null +++ b/parser/testdata/03463_numeric_indexed_vector_overflow/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery uin_value_details (children 1)" + }, + { + "explain": " Identifier uin_value_details" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001717171, + "rows_read": 2, + "bytes_read": 86 + } +} diff --git a/parser/testdata/03463_numeric_indexed_vector_overflow/metadata.json b/parser/testdata/03463_numeric_indexed_vector_overflow/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03463_numeric_indexed_vector_overflow/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03463_numeric_indexed_vector_overflow/query.sql b/parser/testdata/03463_numeric_indexed_vector_overflow/query.sql new file mode 100644 index 000000000..4201b63a3 --- /dev/null +++ b/parser/testdata/03463_numeric_indexed_vector_overflow/query.sql @@ -0,0 +1,70 @@ +DROP TABLE IF EXISTS uin_value_details; + +CREATE TABLE uin_value_details (uin UInt8, value Float64) ENGINE = MergeTree() ORDER BY uin; + +INSERT INTO uin_value_details (uin, value) values (1, 7.3), (2, 8.3), (3, 0), (4, 0), (5, 0), (6, 100.6543782), (7, 0); + +WITH (SELECT groupNumericIndexedVectorState(uin, value) FROM uin_value_details) AS vec_1 +SELECT numericIndexedVectorToMap(numericIndexedVectorPointwiseMultiply(vec_1, 9999999999)); + +WITH (SELECT groupNumericIndexedVectorState(uin, value) FROM uin_value_details) AS vec_1 +SELECT numericIndexedVectorToMap(numericIndexedVectorPointwiseMultiply(vec_1, -9999999999)); + +WITH (SELECT groupNumericIndexedVectorState(uin, value) FROM uin_value_details) AS vec_1 +SELECT numericIndexedVectorToMap(numericIndexedVectorPointwiseMultiply(vec_1, nan)); -- { serverError INCORRECT_DATA } + +WITH (SELECT groupNumericIndexedVectorState(uin, value) FROM uin_value_details) AS vec_1 +SELECT numericIndexedVectorToMap(numericIndexedVectorPointwiseMultiply(vec_1, Null)); + +WITH (SELECT groupNumericIndexedVectorState(uin, value) FROM uin_value_details) AS vec_1 +SELECT numericIndexedVectorToMap(numericIndexedVectorPointwiseMultiply(vec_1, inf)); -- { serverError INCORRECT_DATA } + +WITH (SELECT groupNumericIndexedVectorState(uin, value) FROM uin_value_details) AS vec_1 +SELECT numericIndexedVectorToMap(numericIndexedVectorPointwiseDivide(vec_1, inf)); -- { serverError INCORRECT_DATA } + +WITH (SELECT groupNumericIndexedVectorState(uin, value) FROM uin_value_details) AS vec_1 +SELECT numericIndexedVectorToMap(numericIndexedVectorPointwiseAdd(vec_1, inf)); -- { serverError INCORRECT_DATA } + +WITH (SELECT groupNumericIndexedVectorState(uin, value) FROM uin_value_details) AS vec_1 +SELECT numericIndexedVectorToMap(numericIndexedVectorPointwiseEqual(vec_1, inf)); -- { serverError INCORRECT_DATA } + +WITH (SELECT groupNumericIndexedVectorState(uin, value) FROM uin_value_details) AS vec_1 +SELECT numericIndexedVectorToMap(numericIndexedVectorPointwiseNotEqual(vec_1, inf)); -- { serverError INCORRECT_DATA } + +WITH (SELECT groupNumericIndexedVectorState(uin, value) FROM uin_value_details) AS vec_1 +SELECT numericIndexedVectorToMap(numericIndexedVectorPointwiseLess(vec_1, inf)); -- { serverError INCORRECT_DATA } + +WITH (SELECT groupNumericIndexedVectorState(uin, value) FROM uin_value_details) AS vec_1 +SELECT numericIndexedVectorToMap(numericIndexedVectorPointwiseLessEqual(vec_1, inf)); -- { serverError INCORRECT_DATA } + +WITH (SELECT groupNumericIndexedVectorState(uin, value) FROM uin_value_details) AS vec_1 +SELECT numericIndexedVectorToMap(numericIndexedVectorPointwiseGreater(vec_1, inf)); -- { serverError INCORRECT_DATA } + +WITH (SELECT groupNumericIndexedVectorState(uin, value) FROM uin_value_details) AS vec_1 +SELECT numericIndexedVectorToMap(numericIndexedVectorPointwiseGreaterEqual(vec_1, inf)); -- { serverError INCORRECT_DATA } + +INSERT INTO uin_value_details (uin, value) values (1, 7.3), (2, 8.3), (3, 0), (4, 0), (5, 0), (6, 100.6543782), (7, inf); +WITH (SELECT groupNumericIndexedVectorState(uin, value) FROM uin_value_details) AS vec_1 +SELECT numericIndexedVectorToMap(numericIndexedVectorPointwiseGreaterEqual(vec_1, 0)); -- { serverError INCORRECT_DATA } + +DROP TABLE uin_value_details; + +SET allow_suspicious_primary_key = 1; + +-- https://github.com/ClickHouse/ClickHouse/issues/82239 +SELECT 'Test with NaN, INFs and Nulls' AS test; + +SELECT groupNumericIndexedVector(x, y) FROM values('x Nullable(Int32), y Nullable(Float64)', (1, 0), (3, nan), (3, 2), (0, 0), (5, 1)); -- { serverError INCORRECT_DATA } +SELECT groupNumericIndexedVector(x, y) FROM values('x Nullable(Int32), y Nullable(Float64)', (1, 0), (3, Null), (3, 2), (0, 0), (5, 1)); +SELECT groupNumericIndexedVector(x, y) FROM values('x Nullable(Int32), y Nullable(Float64)', (1, 0), (3, inf), (3, 2), (0, 0), (5, 1)); -- { serverError INCORRECT_DATA } +SELECT groupNumericIndexedVector(x, y) FROM values('x Nullable(Int32), y Nullable(Float64)', (1, 0), (3, -inf), (3, 2), (0, 0), (5, 1)); -- { serverError INCORRECT_DATA } + +-- https://github.com/ClickHouse/ClickHouse/issues/83591 +SELECT 'Test for overflows' AS test; +CREATE TABLE test (t AggregateFunction(groupNumericIndexedVectorState, UInt32, Float64)) ENGINE = AggregatingMergeTree ORDER BY tuple(); +CREATE TABLE test2 (t AggregateFunction(groupNumericIndexedVectorState, UInt32, UInt64)) ENGINE = AggregatingMergeTree ORDER BY tuple(); +INSERT INTO test SELECT groupNumericIndexedVectorState(toUInt32(1), 1.54743e+26); -- { serverError INCORRECT_DATA } +INSERT INTO test SELECT groupNumericIndexedVectorState(toUInt32(2), -1.54743e+26); -- { serverError INCORRECT_DATA } +INSERT INTO test2 SELECT groupNumericIndexedVectorState(toUInt32(1), 18446744073709551615); -- { serverError INCORRECT_DATA } +DROP TABLE test; +DROP TABLE test2; diff --git a/parser/testdata/03464_projections_with_subcolumns/ast.json b/parser/testdata/03464_projections_with_subcolumns/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03464_projections_with_subcolumns/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03464_projections_with_subcolumns/metadata.json b/parser/testdata/03464_projections_with_subcolumns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03464_projections_with_subcolumns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03464_projections_with_subcolumns/query.sql b/parser/testdata/03464_projections_with_subcolumns/query.sql new file mode 100644 index 000000000..4d1aec583 --- /dev/null +++ b/parser/testdata/03464_projections_with_subcolumns/query.sql @@ -0,0 +1,92 @@ +-- Tags: long + +set enable_analyzer=1; +set mutations_sync=1; +set parallel_replicas_local_plan = 1, parallel_replicas_support_projection = 1, optimize_aggregation_in_order = 0; + +drop table if exists test; + +create table test ( + a UInt32, + json JSON(a UInt32), + t Tuple(a UInt32, b UInt32), + projection p1 (select json order by json.a), + projection p2 (select t order by t.a), + projection p3 (select json order by json.c[].d.:Int64), +) engine=MergeTree order by tuple() settings index_granularity=1; + +insert into test select number, toJSONString(map('a', number, 'b', 'str', 'c', [toJSONString(map('d', number::UInt32))::JSON])), tuple(number, number) from numbers(100) settings use_variant_as_common_type=1, output_format_json_quote_64bit_integers=0; + +explain indexes=1 select json from test where json.a = 1 settings enable_parallel_replicas=0; +select trimLeft(*) from (explain indexes=1 select json from test where json.a = 1) where explain like '%ReadFromMergeTree%'; +select json from test where json.a = 1; + +explain indexes=1 select t from test where t.a = 1 settings enable_parallel_replicas=0; +select trimLeft(*) from (explain indexes=1 select t from test where t.a = 1) where explain like '%ReadFromMergeTree%'; +select t from test where t.a = 1; + +explain indexes=1 select json from test where json.c[].d.:Int64 = [1] settings enable_parallel_replicas=0; +select trimLeft(*) from (explain indexes=1 select json from test where json.c[].d.:Int64 = [1]) where explain like '%ReadFromMergeTree%'; +select json from test where json.c[].d.:Int64 = [1]; + +insert into test select number, toJSONString(map('a', number, 'b', 'str', 'c', [toJSONString(map('d', number::UInt32))::JSON])), tuple(number, number) from numbers(100) settings use_variant_as_common_type=1, output_format_json_quote_64bit_integers=0; + +optimize table test final; + +explain indexes=1 select json from test where json.a = 1 settings enable_parallel_replicas=0; +select trimLeft(*) from (explain indexes=1 select json from test where json.a = 1) where explain like '%ReadFromMergeTree%'; +select json from test where json.a = 1; + +explain indexes=1 select t from test where t.a = 1 settings enable_parallel_replicas=0; +select trimLeft(*) from (explain indexes=1 select t from test where t.a = 1) where explain like '%ReadFromMergeTree%'; +select t from test where t.a = 1; + +explain indexes=1 select json from test where json.c[].d.:Int64 = [1] settings enable_parallel_replicas=0; +select trimLeft(*) from (explain indexes=1 select json from test where json.c[].d.:Int64 = [1]) where explain like '%ReadFromMergeTree%'; +select json from test where json.c[].d.:Int64 = [1]; + +drop table test; + +select '------------------------------------------------------------------'; + +create table test ( + a UInt32, + json JSON(a UInt32), + t Tuple(a UInt32, b UInt32), +) engine=MergeTree order by tuple() settings index_granularity=1; + +insert into test select number, toJSONString(map('a', number, 'b', 'str', 'c', [toJSONString(map('d', number::UInt32))::JSON])), tuple(number, number) from numbers(100) settings use_variant_as_common_type=1, output_format_json_quote_64bit_integers=0; + +alter table test add projection p1 (select json order by json.a); +alter table test materialize projection p1; + +alter table test add projection p2 (select t order by t.a); +alter table test materialize projection p2; + +alter table test add projection p3 (select json order by json.c[].d.:Int64); +alter table test materialize projection p3; + +alter table test add projection p (select json.b order by json.a); -- {serverError NOT_IMPLEMENTED} +alter table test add projection p (select t.a order by json.a); -- {serverError NOT_IMPLEMENTED} +alter table test add projection p (select a order by json.a); -- {serverError NOT_IMPLEMENTED} +alter table test add projection p (select t.b order by t.a); -- {serverError NOT_IMPLEMENTED} +alter table test add projection p (select json.a order by t.a); -- {serverError NOT_IMPLEMENTED} +alter table test add projection p (select a order by t.a); -- {serverError NOT_IMPLEMENTED} +alter table test add projection p (select json.a order by json.c[].d.:Int64); -- {serverError NOT_IMPLEMENTED} +alter table test add projection p (select t.a order by json.c[].d.:Int64); -- {serverError NOT_IMPLEMENTED} +alter table test add projection p (select a order by json.c[].d.:Int64);-- {serverError NOT_IMPLEMENTED} + +explain indexes=1 select json from test where json.a = 1 settings enable_parallel_replicas=0; +select trimLeft(*) from (explain indexes=1 select json from test where json.a = 1) where explain like '%ReadFromMergeTree%'; +select json from test where json.a = 1; + +explain indexes=1 select t from test where t.a = 1 settings enable_parallel_replicas=0; +select trimLeft(*) from (explain indexes=1 select t from test where t.a = 1) where explain like '%ReadFromMergeTree%'; +select t from test where t.a = 1; + +explain indexes=1 select json from test where json.c[].d.:Int64 = [1] settings enable_parallel_replicas=0; +select trimLeft(*) from (explain indexes=1 select json from test where json.c[].d.:Int64 = [1]) where explain like '%ReadFromMergeTree%'; +select json from test where json.c[].d.:Int64 = [1]; + + +drop table test; diff --git a/parser/testdata/03509_stripe_log_compatible_types/ast.json b/parser/testdata/03509_stripe_log_compatible_types/ast.json new file mode 100644 index 000000000..49b3e12bc --- /dev/null +++ b/parser/testdata/03509_stripe_log_compatible_types/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_tz (children 1)" + }, + { + "explain": " Identifier test_tz" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001065328, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/03509_stripe_log_compatible_types/metadata.json b/parser/testdata/03509_stripe_log_compatible_types/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03509_stripe_log_compatible_types/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03509_stripe_log_compatible_types/query.sql b/parser/testdata/03509_stripe_log_compatible_types/query.sql new file mode 100644 index 000000000..d27801167 --- /dev/null +++ b/parser/testdata/03509_stripe_log_compatible_types/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS test_tz; + +CREATE TABLE test_tz +( + `dt` DateTime('UTC') +) +ENGINE = StripeLog; + +INSERT INTO test_tz VALUES ('2022-09-21 03:03:24'); + +SELECT * +FROM test_tz; + +DROP TABLE test_tz; diff --git a/parser/testdata/03511_formatDateTime_e_space_padding/ast.json b/parser/testdata/03511_formatDateTime_e_space_padding/ast.json new file mode 100644 index 000000000..e7e9ba13d --- /dev/null +++ b/parser/testdata/03511_formatDateTime_e_space_padding/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function formatDateTime (alias _date) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toDate (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '2024-05-07'" + }, + { + "explain": " Literal '%e\/%m\/%Y'" + }, + { + "explain": " Function length (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier _date" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001191495, + "rows_read": 13, + "bytes_read": 513 + } +} diff --git a/parser/testdata/03511_formatDateTime_e_space_padding/metadata.json b/parser/testdata/03511_formatDateTime_e_space_padding/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03511_formatDateTime_e_space_padding/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03511_formatDateTime_e_space_padding/query.sql b/parser/testdata/03511_formatDateTime_e_space_padding/query.sql new file mode 100644 index 000000000..17fbf520d --- /dev/null +++ b/parser/testdata/03511_formatDateTime_e_space_padding/query.sql @@ -0,0 +1,3 @@ +SELECT formatDateTime(toDate('2024-05-07'), '%e/%m/%Y') as _date, length(_date); -- default behavior +SELECT formatDateTime(toDate('2024-05-07'), '%e/%m/%Y') as _date, length(_date) settings formatdatetime_e_with_space_padding = 1; +SELECT formatDateTime(toDate('2024-05-07'), '%e/%m/%Y') as _date, length(_date) settings formatdatetime_e_with_space_padding = 0; diff --git a/parser/testdata/03512_bech32_functions/ast.json b/parser/testdata/03512_bech32_functions/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03512_bech32_functions/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03512_bech32_functions/metadata.json b/parser/testdata/03512_bech32_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03512_bech32_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03512_bech32_functions/query.sql b/parser/testdata/03512_bech32_functions/query.sql new file mode 100644 index 000000000..9b40bdba4 --- /dev/null +++ b/parser/testdata/03512_bech32_functions/query.sql @@ -0,0 +1,173 @@ +-- Tags: no-fasttest + +-- baseline test, encode of value should match expected val +SELECT bech32Encode('bc', unhex('751e76e8199196d454941c45d1b3a323f1433bd6')); +-- different hrp value should yield a different result +SELECT bech32Encode('tb', unhex('751e76e8199196d454941c45d1b3a323f1433bd6')); +-- exactly the max amount of characters (50) should work +SELECT bech32Encode('bc', unhex('751e76e8199196d454941c45d1b3a323f1433bd6751e76e8199196d454941c45d1b3a323f1433bd6751e76e8199196d45494')); +-- strange, but valid +SELECT bech32Encode('bcrt', unhex('')); + +-- test other hrps +SELECT bech32Encode('bcrt', unhex('751e76e8199196d454941c45d1b3a323f1433bd6')); +SELECT bech32Encode('tltc', unhex('751e76e8199196d454941c45d1b3a323f1433bd6')); +SELECT bech32Encode('tltssdfsdvjnasdfnjkbhksdfasnbdfkljhaksdjfnakjsdhasdfnasdkfasdfasdfasdf', unhex('751e')); + +-- negative tests +-- too many chars +SELECT bech32Encode('tltssdfsdvjnasdfnjkbhksdfasnbdfkljhaksdjfnakjsdhasdfnasdkfasdfasdfasdfdljsdfasdfahc', unhex('751e76e8199196d454941c45d1b3a323f1433bd6')); +-- empty hrp +SELECT bech32Encode('', unhex('751e76e8199196d454941c45d1b3a323f1433bd6')); +-- 51 chars should return nothing +SELECT bech32Encode('', unhex('751e76e8199196d454941c45d1b3a323f1433bd6751e76e8199196d454941c45d1b3a323f1433bd6751e76e8199196d45494a')); + +-- test with explicit witver = 1, should be same as default +SELECT bech32Encode('bc', unhex('751e76e8199196d454941c45d1b3a323f1433bd6'), 1) == + bech32Encode('bc', unhex('751e76e8199196d454941c45d1b3a323f1433bd6')); + +-- testing old bech32 algo +SELECT bech32Encode('bc', unhex('751e76e8199196d454941c45d1b3a323f1433bd6'), 0); + +-- different witvers will not match perfectly, but the encoded data should match, so we strip off the first 4 chars (hrp and prepended witver) +-- as well as the last 6 chars (checksum) +SELECT substring(s1, 5, -6) == substring(s2, 5, -6) +FROM +( + SELECT + bech32Encode('bc', unhex('751e76e8199196d454941c45d1b3a323f1433bd6'), 1) AS s1, + bech32Encode('bc', unhex('751e76e8199196d454941c45d1b3a323f1433bd6'), 10) AS s2 +); + +-- roundtrip +SELECT tup.1 AS hrp, hex(tup.2) AS data FROM (SELECT bech32Decode(bech32Encode('bc', unhex('751e76e8199196d454941c45d1b3a323f1433bd6'))) AS tup); + +DROP TABLE IF EXISTS hex_data; +CREATE TABLE hex_data +( + hrp String, + data String, + witver UInt8 +) +ENGINE = Memory; + +INSERT INTO hex_data VALUES + ('bc', '6687112a6eadb4d88d29c7a45da56eff0c23b0e14e757d408e', 0), + ('tb', '8f8cdd4364bb7dca11c49743da2c4b54062fa0388bbf924078', 1), + ('bc', '50b80d45cc275f36eb5fb2c22a93f6a4e83ba9380e55c67f6a', 15), + ('tb', 'b103a1937c6e2fb9de707a4be02d5d39e217b4bca7ce3c9c12', 0), + ('bcrt', '95eb334ff82ef8ad76151c29094abdae6c9e8bb8244523e347', 2); + +-- test const hrp with column data +SELECT bech32Encode('bc', unhex(data)) FROM hex_data limit 1; + +-- test const data with column hrp +SELECT bech32Encode(hrp, unhex('6687112a6eadb4d88d29c7a45da56eff0c23b0e14e757d408e')) FROM hex_data limit 1; + +-- test column hrp and data with const witver +SELECT bech32Encode(hrp, unhex(data), 1) FROM hex_data limit 1; + +-- for encoding, if using a FixedString column for the data it is crucial that there is no padding +-- since the input is binary, there is no way to check for it +DROP TABLE IF EXISTS bech32_test; +CREATE TABLE bech32_test +( + hrp String, + data String, + hrp_fixed FixedString(4), + data_fixed FixedString(50), + witver UInt8 +) +ENGINE = Memory; + +INSERT INTO bech32_test +SELECT hrp, data, CAST(hrp, 'FixedString(4)'), CAST(data, 'FixedString(50)'), witver +FROM hex_data; + +SELECT + bech32Encode(hrp, unhex(data)) AS enc, + bech32Encode(hrp, unhex(data), witver) AS enc_witver, + bech32Encode(hrp, unhex(data), witver) = bech32Encode(hrp_fixed, unhex(data_fixed), witver) AS match1, + bech32Encode(hrp, unhex(data), witver) = bech32Encode(hrp, unhex(data_fixed), witver) AS match2, + bech32Encode(hrp, unhex(data), witver) = bech32Encode(hrp_fixed, unhex(data), witver) AS match3 +FROM bech32_test; + +-- sanity check, should return hrp and data used to create it +SELECT tup.1, hex(tup.2) FROM (SELECT bech32Decode('bc1qar0srrr7xfkvy5l643lydnw9re59gtzzwf5mdq') AS tup); + +SELECT + hrp, + data, + hrp = tup.1 AS match_hrp, + data = lower(hex(tup.2)) AS match_data +FROM +( + SELECT + hrp, + data, + bech32Decode(bech32Encode(hrp, unhex(data), witver)) AS tup + FROM bech32_test +) AS round_trip; + +DROP TABLE hex_data; +DROP TABLE bech32_test; + +-- negative tests +SELECT bech32Decode(''); +SELECT bech32Decode('foo'); + +-- decode valid string, witver 0, hrp=bc +SELECT tup.1 AS hrp, hex(tup.2) AS data FROM (SELECT bech32Decode('bc1pw508d6qejxtdg4y5r3zarvary0c5xw7kj9wkru') AS tup); +-- decode valid string, witver 1, hrp=tb +SELECT tup.1 AS hrp, hex(tup.2) AS data FROM (SELECT bech32Decode('tb1pw508d6qejxtdg4y5r3zarvary0c5xw7kcr49c0') AS tup); +-- decoding address created with same data but different witvers should be same +SELECT t1.1 != '', t1.1 == t2.1, t1.2 == t2.2 FROM ( + SELECT bech32Decode('bc1qw508d6qejxtdg4y5r3zarvary0c5xw7kv8f3t4') AS t1, + bech32Decode('bc1pw508d6qejxtdg4y5r3zarvary0c5xw7kj9wkru') AS t2); +-- decoding address created with same data but different witvers should be same +SELECT t1.1 != '', t1.1 == t2.1, t1.2 == t2.2 FROM ( + SELECT bech32Decode('tb1qw508d6qejxtdg4y5r3zarvary0c5xw7kxpjzsx') AS t1, + bech32Decode('tb1pw508d6qejxtdg4y5r3zarvary0c5xw7kcr49c0') AS t2); + +-- testing max length, this should work +SELECT tup.1 AS hrp, hex(tup.2) AS data FROM (SELECT bech32Decode('b1pw508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y565gdg8') AS tup); +-- testing max length, this should return nothing +SELECT tup.1 AS hrp, hex(tup.2) AS data FROM (SELECT bech32Decode('b1w508dfqejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5xgqsaanm') AS tup); + +-- test decode from table +DROP TABLE IF EXISTS addresses; +CREATE TABLE addresses +( + address String +) +ENGINE = Memory; + +INSERT INTO addresses VALUES + ('bc1qw508d6qejxtdg4y5r3zarvary0c5xw7kv8f3t4'), + ('tb1qw508d6qejxtdg4y5r3zarvary0c5xw7kxpjzsx'), + ('bc1pw508d6qejxtdg4y5r3zarvary0c5xw7kj9wkru'), + ('tb1pw508d6qejxtdg4y5r3zarvary0c5xw7kcr49c0'); + +-- test that fixed strings give same result as regular string column +DROP TABLE IF EXISTS bech32_test; +CREATE TABLE bech32_test +( + address String, + address_fixed FixedString(45) +) +ENGINE = Memory; + +INSERT INTO bech32_test +SELECT address, CAST(address, 'FixedString(45)') +FROM addresses; + +SELECT + address, + bech32Decode(address).1 AS hrp, + hex(bech32Decode(address).2) AS decoded, + hex(bech32Decode(address_fixed).2) AS decoded_fixed, + hex(bech32Decode(address).2) = hex(bech32Decode(address_fixed).2) AS match +FROM bech32_test; + +DROP TABLE addresses; +DROP TABLE bech32_test diff --git a/parser/testdata/03512_cast_logical_error/ast.json b/parser/testdata/03512_cast_logical_error/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03512_cast_logical_error/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03512_cast_logical_error/metadata.json b/parser/testdata/03512_cast_logical_error/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03512_cast_logical_error/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03512_cast_logical_error/query.sql b/parser/testdata/03512_cast_logical_error/query.sql new file mode 100644 index 000000000..097cbb21c --- /dev/null +++ b/parser/testdata/03512_cast_logical_error/query.sql @@ -0,0 +1,24 @@ +-- Tags: distributed + +-- https://github.com/ClickHouse/ClickHouse/issues/77468 + +DROP TABLE IF EXISTS t0; +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS v0; + +CREATE TABLE t0 (c0 Array(String)) ENGINE = Memory; +CREATE TABLE t1 (c0 Array(String)) ENGINE = Distributed('test_shard_localhost', currentDatabase(), t0); +CREATE MATERIALIZED VIEW v0 TO t1 (c0 String) AS (SELECT 1::Array(Int) AS c0); -- { serverError CANNOT_READ_ARRAY_FROM_TEXT } + +DROP TABLE IF EXISTS t0; +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS v0; + +CREATE TABLE t0 (c1 Int) ENGINE = MergeTree() ORDER BY tuple(); +CREATE TABLE t1 (c1 Date) ENGINE = Distributed('test_shard_localhost', currentDatabase(), t0); +CREATE MATERIALIZED VIEW v0 TO t1 (c1 String) AS (SELECT '2010-10-10' AS c1); +SELECT CAST(c1 AS Enum('1' = 1)) FROM v0; + +DROP TABLE IF EXISTS t0; +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS v0; diff --git a/parser/testdata/03512_join_using_parent_scope_matcher/ast.json b/parser/testdata/03512_join_using_parent_scope_matcher/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03512_join_using_parent_scope_matcher/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03512_join_using_parent_scope_matcher/metadata.json b/parser/testdata/03512_join_using_parent_scope_matcher/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03512_join_using_parent_scope_matcher/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03512_join_using_parent_scope_matcher/query.sql b/parser/testdata/03512_join_using_parent_scope_matcher/query.sql new file mode 100644 index 000000000..27d1e0068 --- /dev/null +++ b/parser/testdata/03512_join_using_parent_scope_matcher/query.sql @@ -0,0 +1,22 @@ +#!/usr/bin/env -S ${HOME}/clickhouse-client --queries-file + + +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE TABLE t1 (`b` Float64) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO t1 VALUES (1.0), (2.0), (3.0), (4.0), (5.0); + +CREATE TABLE t2 (`a` UInt32) ENGINE = MergeTree ORDER BY a; +INSERT INTO t2 VALUES (1), (2), (3), (4), (5); + +SET enable_analyzer = 1; + +SET analyzer_compatibility_join_using_top_level_identifier = 1; + +SELECT * APPLY ((x) -> x+1), b + 1 AS a FROM t1 INNER JOIN t2 USING (a) ORDER BY ALL; +SELECT t1.* APPLY ((x) -> x+1), b + 1 AS a FROM t1 INNER JOIN t2 USING (a) ORDER BY ALL; +SELECT t2.* APPLY ((x) -> x+1), b + 1 AS a FROM t1 INNER JOIN t2 USING (a) ORDER BY ALL; + +SELECT (*, 1), b + 1 AS a, b + 1 AS a FROM t1 INNER JOIN t2 USING (a) ORDER BY ALL; + diff --git a/parser/testdata/03512_naive_bayes_classifier_general/ast.json b/parser/testdata/03512_naive_bayes_classifier_general/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03512_naive_bayes_classifier_general/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03512_naive_bayes_classifier_general/metadata.json b/parser/testdata/03512_naive_bayes_classifier_general/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03512_naive_bayes_classifier_general/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03512_naive_bayes_classifier_general/query.sql b/parser/testdata/03512_naive_bayes_classifier_general/query.sql new file mode 100644 index 000000000..c841c1aaf --- /dev/null +++ b/parser/testdata/03512_naive_bayes_classifier_general/query.sql @@ -0,0 +1,57 @@ +-- Tags: no-fasttest +-- no-fasttest: depends on model binary and model details via config files + +/* +Output language code mapping: + Bengali 0 + Mandarin Chinese 1 + German 2 + Greek 3 + English 4 + French 5 + Russian 6 + Spanish 7 +*/ + +SELECT number, naiveBayesClassifier('lang_byte_2', 'She painted the wall a bright yellow') +FROM numbers(10) ORDER BY number; + +DROP TABLE IF EXISTS model_names; +CREATE TABLE model_names ( + model_name String, +) ENGINE = MergeTree() +ORDER BY model_name; + +INSERT INTO model_names VALUES +('lang_byte_2'), +('lang_codepoint_1'); + +DROP TABLE IF EXISTS input_texts; +CREATE TABLE input_texts ( + input_text String, +) ENGINE = MergeTree() +ORDER BY input_text; + +INSERT INTO input_texts VALUES +('He fixed the broken chair yesterday'), +('The sun came out after the storm'), +('Sie liest jeden Abend ein spannendes Buch.'), +('Ο σκύλος κοιμάται δίπλα στο τζάκι.'), +('El gato observa a los pájaros desde la ventana.'), +('В саду распустились красные тюльпаны.'), +('Nous préparons le dîner pour nos invités.'), +('They have finished their homework already'), +('孩子们在花园里追逐蝴蝶。'), +('সে প্রতিদিন ভোরে দৌড়াতে যায়।'); + +SELECT + model_name, + input_text, + naiveBayesClassifier(model_name, input_text) AS classification +FROM + model_names +CROSS JOIN + input_texts +ORDER BY + model_name, + input_text; diff --git a/parser/testdata/03512_settings_max_block_size/ast.json b/parser/testdata/03512_settings_max_block_size/ast.json new file mode 100644 index 000000000..7cadf7d45 --- /dev/null +++ b/parser/testdata/03512_settings_max_block_size/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery tab (children 3)" + }, + { + "explain": " Identifier tab" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration column (children 1)" + }, + { + "explain": " DataType Int" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function Memory" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001323636, + "rows_read": 8, + "bytes_read": 273 + } +} diff --git a/parser/testdata/03512_settings_max_block_size/metadata.json b/parser/testdata/03512_settings_max_block_size/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03512_settings_max_block_size/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03512_settings_max_block_size/query.sql b/parser/testdata/03512_settings_max_block_size/query.sql new file mode 100644 index 000000000..989c3bac5 --- /dev/null +++ b/parser/testdata/03512_settings_max_block_size/query.sql @@ -0,0 +1,7 @@ +CREATE TABLE tab (column Int) ENGINE = Memory; + +SELECT 'Set to zero.'; +INSERT INTO TABLE tab (column) FROM INFILE '/dev/null' SETTINGS max_block_size = 0 FORMAT Values; -- { clientError BAD_ARGUMENTS } +SELECT count() FROM numbers(10) AS a, numbers(11) AS b, numbers(12) AS c SETTINGS max_block_size = 0; -- { clientError BAD_ARGUMENTS } + +DROP TABLE tab; diff --git a/parser/testdata/03513_filter_push_down_rand_bug/ast.json b/parser/testdata/03513_filter_push_down_rand_bug/ast.json new file mode 100644 index 000000000..06a5491cf --- /dev/null +++ b/parser/testdata/03513_filter_push_down_rand_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery users_items (children 1)" + }, + { + "explain": " Identifier users_items" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001016736, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/03513_filter_push_down_rand_bug/metadata.json b/parser/testdata/03513_filter_push_down_rand_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03513_filter_push_down_rand_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03513_filter_push_down_rand_bug/query.sql b/parser/testdata/03513_filter_push_down_rand_bug/query.sql new file mode 100644 index 000000000..972160b5c --- /dev/null +++ b/parser/testdata/03513_filter_push_down_rand_bug/query.sql @@ -0,0 +1,21 @@ +drop table if exists users_items; +CREATE TABLE users_items (user_id UInt64) ENGINE = Log; +INSERT INTO users_items SELECT bitAnd(number, 15) from numbers(64); + +SELECT sum(in_sample) +FROM +( + WITH RandomUsers AS + ( + SELECT + user_id, + rand() % 2 AS in_sample + FROM users_items + GROUP BY user_id + ) + SELECT + user_id, + in_sample + FROM RandomUsers + WHERE in_sample = 0 +); diff --git a/parser/testdata/03513_fix_shard_num_column_to_function_pass_with_nulls/ast.json b/parser/testdata/03513_fix_shard_num_column_to_function_pass_with_nulls/ast.json new file mode 100644 index 000000000..f373ac9fb --- /dev/null +++ b/parser/testdata/03513_fix_shard_num_column_to_function_pass_with_nulls/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001511679, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03513_fix_shard_num_column_to_function_pass_with_nulls/metadata.json b/parser/testdata/03513_fix_shard_num_column_to_function_pass_with_nulls/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03513_fix_shard_num_column_to_function_pass_with_nulls/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03513_fix_shard_num_column_to_function_pass_with_nulls/query.sql b/parser/testdata/03513_fix_shard_num_column_to_function_pass_with_nulls/query.sql new file mode 100644 index 000000000..86fbf492b --- /dev/null +++ b/parser/testdata/03513_fix_shard_num_column_to_function_pass_with_nulls/query.sql @@ -0,0 +1,12 @@ +SET join_use_nulls = 1; +DROP TABLE IF EXISTS t0; +DROP TABLE IF EXISTS t1; + +CREATE TABLE t0 (c0 Int) ENGINE = Memory(); +INSERT INTO t0 SELECT number FROM numbers(3); +CREATE TABLE t1 (c0 Int) ENGINE = Distributed('test_cluster_two_shards', currentDatabase(), t0); +SELECT t1._shard_num % t1._shard_num FROM t1 FULL JOIN (SELECT 1 AS c0) tx ON TRUE; +SELECT t1._shard_num % shardNum() FROM t1 FULL JOIN (SELECT 1 AS c0) tx ON TRUE; + +-- This doesn't work, but it is a separate bug https://github.com/ClickHouse/ClickHouse/issues/80691 +-- SELECT t1._shard_num % t1._shard_num FROM t1 FULL JOIN (SELECT 1 AS c0) tx ON TRUE ORDER BY t1._shard_num; diff --git a/parser/testdata/03513_lazy_materialization_projections_fix/ast.json b/parser/testdata/03513_lazy_materialization_projections_fix/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03513_lazy_materialization_projections_fix/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03513_lazy_materialization_projections_fix/metadata.json b/parser/testdata/03513_lazy_materialization_projections_fix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03513_lazy_materialization_projections_fix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03513_lazy_materialization_projections_fix/query.sql b/parser/testdata/03513_lazy_materialization_projections_fix/query.sql new file mode 100644 index 000000000..a319e3e5d --- /dev/null +++ b/parser/testdata/03513_lazy_materialization_projections_fix/query.sql @@ -0,0 +1,40 @@ +-- Tags: no-random-merge-tree-settings +SET query_plan_optimize_lazy_materialization = 1; +SET query_plan_max_limit_for_lazy_materialization = 10; +SET parallel_replicas_local_plan = 1, parallel_replicas_support_projection = 1, optimize_aggregation_in_order = 0; +SET enable_analyzer=1; + +DROP TABLE IF EXISTS tt0; +CREATE TABLE tt0 (k UInt64, v String, blob String, PROJECTION proj_v (select * order by v)) ENGINE=MergeTree() ORDER BY tuple(); +INSERT INTO tt0 SELECT number, toString(number), repeat('blob_', number % 10) FROM numbers(1_000_000); + +select '-- no projection'; +select trimLeft(explain) as s from (EXPLAIN SELECT * FROM tt0 ORDER BY k ASC LIMIT 10) where s ilike 'LazilyRead%'; + +SELECT * FROM tt0 ORDER BY k ASC LIMIT 10; + +select '-- projection'; +select trimLeft(explain) as s from (EXPLAIN SELECT * FROM tt0 WHERE v = '3' ORDER BY v ASC LIMIT 10) where s ilike 'ReadFromMergeTree (proj_v)'; +select trimLeft(explain) as s from (EXPLAIN SELECT * FROM tt0 WHERE v = '3' ORDER BY v ASC LIMIT 10) where s ilike 'LazilyRead%'; + +SELECT * FROM tt0 WHERE v = '3' ORDER BY v ASC LIMIT 10; + +-- create table which has parts with and without projection +select '-- mixed reading'; +DROP TABLE IF EXISTS tt1; +CREATE TABLE tt1 (k UInt64, v String, blob String) ENGINE=MergeTree() ORDER BY tuple() settings index_granularity=10; +SYSTEM STOP MERGES tt1; +INSERT INTO tt1 SELECT number, toString(number), repeat('blob_', number % 10) FROM numbers(1_000); + +ALTER TABLE tt1 ADD PROJECTION proj_v (select * order by v); +INSERT INTO tt1 SELECT number, toString(number), repeat('blob_', number % 10) FROM numbers(1_000, 1_000); + +-- check that table has 2 parts without and with projection +select name, projections from system.parts where database = currentDatabase() and table = 'tt1' order by name; +-- reading using projection from the table should have 2 reading steps, - one for part w/o proj and one for part with proj +select 'Reading steps: '|| count() from (EXPLAIN SELECT * FROM tt1 WHERE v = '1001' ORDER BY v ASC LIMIT 10) where trimLeft(explain) ilike 'ReadFromMergeTree%'; +-- currently lazy materialization doesn't support such mixed reading +select trimLeft(explain) as s from (EXPLAIN SELECT * FROM tt1 WHERE v = '1001' ORDER BY v ASC LIMIT 10) where s ilike 'LazilyRead%'; + +DROP TABLE tt1; +DROP TABLE tt0; diff --git a/parser/testdata/03513_nullsafe_join_storage/ast.json b/parser/testdata/03513_nullsafe_join_storage/ast.json new file mode 100644 index 000000000..e484018fb --- /dev/null +++ b/parser/testdata/03513_nullsafe_join_storage/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t0 (children 1)" + }, + { + "explain": " Identifier t0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001179087, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03513_nullsafe_join_storage/metadata.json b/parser/testdata/03513_nullsafe_join_storage/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03513_nullsafe_join_storage/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03513_nullsafe_join_storage/query.sql b/parser/testdata/03513_nullsafe_join_storage/query.sql new file mode 100644 index 000000000..5ab11fce2 --- /dev/null +++ b/parser/testdata/03513_nullsafe_join_storage/query.sql @@ -0,0 +1,7 @@ +DROP TABLE IF EXISTS t0; +CREATE TABLE t0 (c0 Nullable(Int)) ENGINE = Join(ALL, INNER, c0); +INSERT INTO t0 VALUES (1), (2), (3), (4), (5); + +SELECT 1 FROM t0 JOIN t0 tx ON t0.c0 <=> tx.c0; -- { serverError INCOMPATIBLE_TYPE_OF_JOIN } + +EXPLAIN AST optimize=1 SELECT 1 FROM t0 JOIN t0 tx ON t0.c0 <=> tx.c0 PASTE JOIN t0 ty; -- { serverError INCOMPATIBLE_TYPE_OF_JOIN } diff --git a/parser/testdata/03513_read_in_order_nullable/ast.json b/parser/testdata/03513_read_in_order_nullable/ast.json new file mode 100644 index 000000000..7930656a2 --- /dev/null +++ b/parser/testdata/03513_read_in_order_nullable/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001215669, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03513_read_in_order_nullable/metadata.json b/parser/testdata/03513_read_in_order_nullable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03513_read_in_order_nullable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03513_read_in_order_nullable/query.sql b/parser/testdata/03513_read_in_order_nullable/query.sql new file mode 100644 index 000000000..b3bc740a2 --- /dev/null +++ b/parser/testdata/03513_read_in_order_nullable/query.sql @@ -0,0 +1,67 @@ +SET optimize_read_in_order = 1; +SET max_threads = 1; + +CREATE TABLE t0 (c0 Nullable(Int64)) ENGINE = MergeTree() ORDER BY c0 SETTINGS allow_nullable_key=1; +INSERT INTO TABLE t0 VALUES (0); +INSERT INTO TABLE t0 VALUES (NULL), (1); + +SELECT '--- table asc, query desc, last'; +SELECT * FROM t0 ORDER BY c0 DESC NULLS LAST; +SELECT '--- table asc, query desc, first'; +SELECT * FROM t0 ORDER BY c0 DESC NULLS FIRST; +SELECT '--- table asc, query asc, last'; +SELECT * FROM t0 ORDER BY c0 ASC NULLS LAST; +SELECT '--- table asc, query asc, first'; +SELECT * FROM t0 ORDER BY c0 ASC NULLS FIRST; + +CREATE TABLE t1 (c0 Nullable(Int64)) ENGINE = MergeTree() ORDER BY c0 DESC SETTINGS allow_nullable_key=1, allow_experimental_reverse_key=1; +INSERT INTO TABLE t1 VALUES (0); +INSERT INTO TABLE t1 VALUES (NULL), (1); + +SELECT '--- table desc, query desc, last'; +SELECT * FROM t1 ORDER BY c0 DESC NULLS LAST; +SELECT '--- table desc, query desc, first'; +SELECT * FROM t1 ORDER BY c0 DESC NULLS FIRST; +SELECT '--- table desc, query asc, last'; +SELECT * FROM t1 ORDER BY c0 ASC NULLS LAST; +SELECT '--- table desc, query asc, first'; +SELECT * FROM t1 ORDER BY c0 ASC NULLS FIRST; + +CREATE TABLE f0 (c0 Float64) ENGINE = MergeTree() ORDER BY c0; +INSERT INTO TABLE f0 VALUES (0); +INSERT INTO TABLE f0 VALUES (0/0), (1); + +SELECT '--- table asc, query desc, last'; +SELECT * FROM f0 ORDER BY c0 DESC NULLS LAST; +SELECT '--- table asc, query desc, first'; +SELECT * FROM f0 ORDER BY c0 DESC NULLS FIRST; +SELECT '--- table asc, query asc, last'; +SELECT * FROM f0 ORDER BY c0 ASC NULLS LAST; +SELECT '--- table asc, query asc, first'; +SELECT * FROM f0 ORDER BY c0 ASC NULLS FIRST; + +CREATE TABLE f1 (c0 Float64) ENGINE = MergeTree() ORDER BY c0 DESC SETTINGS allow_experimental_reverse_key=1; +INSERT INTO TABLE f1 VALUES (0); +INSERT INTO TABLE f1 VALUES (0/0), (1); + +SELECT '--- table desc, query desc, last'; +SELECT * FROM f1 ORDER BY c0 DESC NULLS LAST; +SELECT '--- table desc, query desc, first'; +SELECT * FROM f1 ORDER BY c0 DESC NULLS FIRST; +SELECT '--- table desc, query asc, last'; +SELECT * FROM f1 ORDER BY c0 ASC NULLS LAST; +SELECT '--- table desc, query asc, first'; +SELECT * FROM f1 ORDER BY c0 ASC NULLS FIRST; + +SET allow_suspicious_low_cardinality_types = 1; +CREATE TABLE lct0 (c0 LowCardinality(Nullable(Int64))) ENGINE = MergeTree() ORDER BY c0 SETTINGS allow_nullable_key=1; +INSERT INTO TABLE lct0 VALUES (0); +INSERT INTO TABLE lct0 VALUES (NULL), (1); +SELECT '--- table asc, query desc, last'; +SELECT * FROM lct0 ORDER BY c0 DESC NULLS LAST; +SELECT '--- table asc, query desc, first'; +SELECT * FROM lct0 ORDER BY c0 DESC NULLS FIRST; +SELECT '--- table asc, query asc, last'; +SELECT * FROM lct0 ORDER BY c0 ASC NULLS LAST; +SELECT '--- table asc, query asc, first'; +SELECT * FROM lct0 ORDER BY c0 ASC NULLS FIRST; diff --git a/parser/testdata/03513_resize_pipeline_after_totals/ast.json b/parser/testdata/03513_resize_pipeline_after_totals/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03513_resize_pipeline_after_totals/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03513_resize_pipeline_after_totals/metadata.json b/parser/testdata/03513_resize_pipeline_after_totals/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03513_resize_pipeline_after_totals/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03513_resize_pipeline_after_totals/query.sql b/parser/testdata/03513_resize_pipeline_after_totals/query.sql new file mode 100644 index 000000000..0132675a8 --- /dev/null +++ b/parser/testdata/03513_resize_pipeline_after_totals/query.sql @@ -0,0 +1,6 @@ +EXPLAIN PIPELINE +SELECT cityHash64(number) +FROM numbers_mt(100) +GROUP BY number + WITH TOTALS +SETTINGS max_threads = 4; diff --git a/parser/testdata/03513_simple_aggregate_function_any_respect_nulls_in_aggregating_merge_tree/ast.json b/parser/testdata/03513_simple_aggregate_function_any_respect_nulls_in_aggregating_merge_tree/ast.json new file mode 100644 index 000000000..a55d99f6d --- /dev/null +++ b/parser/testdata/03513_simple_aggregate_function_any_respect_nulls_in_aggregating_merge_tree/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery simple_agf_any_aggregating_mt (children 1)" + }, + { + "explain": " Identifier simple_agf_any_aggregating_mt" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001333397, + "rows_read": 2, + "bytes_read": 110 + } +} diff --git a/parser/testdata/03513_simple_aggregate_function_any_respect_nulls_in_aggregating_merge_tree/metadata.json b/parser/testdata/03513_simple_aggregate_function_any_respect_nulls_in_aggregating_merge_tree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03513_simple_aggregate_function_any_respect_nulls_in_aggregating_merge_tree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03513_simple_aggregate_function_any_respect_nulls_in_aggregating_merge_tree/query.sql b/parser/testdata/03513_simple_aggregate_function_any_respect_nulls_in_aggregating_merge_tree/query.sql new file mode 100644 index 000000000..f5fc96937 --- /dev/null +++ b/parser/testdata/03513_simple_aggregate_function_any_respect_nulls_in_aggregating_merge_tree/query.sql @@ -0,0 +1,67 @@ +DROP TABLE IF EXISTS simple_agf_any_aggregating_mt; + +CREATE TABLE simple_agf_any_aggregating_mt +( + a Int64, + any_simple SimpleAggregateFunction(any_respect_nulls, Nullable(UInt64)), + any_agg AggregateFunction(any_respect_nulls, Nullable(UInt64)), + anyLast_simple SimpleAggregateFunction(anyLast_respect_nulls, Nullable(UInt64)), + anyLast_agg AggregateFunction(anyLast_respect_nulls, Nullable(UInt64)) +) +ENGINE = AggregatingMergeTree +ORDER BY a; + +INSERT INTO simple_agf_any_aggregating_mt SELECT + a, + any_respect_nulls(any_simple), + any_respect_nullsState(any_agg), + anyLast_respect_nulls(anyLast_simple), + anyLast_respect_nullsState(anyLast_agg) +FROM +( + SELECT + 42 AS a, + NULL::Nullable(UInt64) AS any_simple, + NULL::Nullable(UInt64) AS any_agg, + NULL::Nullable(UInt64) AS anyLast_simple, + NULL::Nullable(UInt64) AS anyLast_agg +) +GROUP BY a; + +INSERT INTO simple_agf_any_aggregating_mt SELECT + number % 51 as a, + any_respect_nulls(toNullable(number)), + any_respect_nullsState(toNullable(number)), + anyLast_respect_nulls(toNullable(number)), + anyLast_respect_nullsState(toNullable(number)), +FROM numbers(10000) +GROUP BY a; + +INSERT INTO simple_agf_any_aggregating_mt SELECT + a, + any_respect_nulls(any_simple), + any_respect_nullsState(any_agg), + anyLast_respect_nulls(anyLast_simple), + anyLast_respect_nullsState(anyLast_agg) +FROM +( + SELECT + 50 AS a, + NULL::Nullable(UInt64) AS any_simple, + NULL::Nullable(UInt64) AS any_agg, + NULL::Nullable(UInt64) AS anyLast_simple, + NULL::Nullable(UInt64) AS anyLast_agg +) +GROUP BY a; + +OPTIMIZE TABLE simple_agf_any_aggregating_mt FINAL; + +SELECT + a, + any_respect_nulls(any_simple), + any_respect_nullsMerge(any_agg), + anyLast_respect_nulls(anyLast_simple), + anyLast_respect_nullsMerge(anyLast_agg) +FROM simple_agf_any_aggregating_mt +GROUP BY a +ORDER BY a; diff --git a/parser/testdata/03513_simple_aggregate_function_any_respect_nulls_in_summing_merge_tree/ast.json b/parser/testdata/03513_simple_aggregate_function_any_respect_nulls_in_summing_merge_tree/ast.json new file mode 100644 index 000000000..77479f4b4 --- /dev/null +++ b/parser/testdata/03513_simple_aggregate_function_any_respect_nulls_in_summing_merge_tree/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery simple_agf_any_summing_mt (children 1)" + }, + { + "explain": " Identifier simple_agf_any_summing_mt" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001283045, + "rows_read": 2, + "bytes_read": 102 + } +} diff --git a/parser/testdata/03513_simple_aggregate_function_any_respect_nulls_in_summing_merge_tree/metadata.json b/parser/testdata/03513_simple_aggregate_function_any_respect_nulls_in_summing_merge_tree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03513_simple_aggregate_function_any_respect_nulls_in_summing_merge_tree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03513_simple_aggregate_function_any_respect_nulls_in_summing_merge_tree/query.sql b/parser/testdata/03513_simple_aggregate_function_any_respect_nulls_in_summing_merge_tree/query.sql new file mode 100644 index 000000000..01be74b14 --- /dev/null +++ b/parser/testdata/03513_simple_aggregate_function_any_respect_nulls_in_summing_merge_tree/query.sql @@ -0,0 +1,67 @@ +DROP TABLE IF EXISTS simple_agf_any_summing_mt; + +CREATE TABLE simple_agf_any_summing_mt +( + a Int64, + any_simple SimpleAggregateFunction(any_respect_nulls, Nullable(UInt64)), + any_agg AggregateFunction(any_respect_nulls, Nullable(UInt64)), + anyLast_simple SimpleAggregateFunction(anyLast_respect_nulls, Nullable(UInt64)), + anyLast_agg AggregateFunction(anyLast_respect_nulls, Nullable(UInt64)) +) +ENGINE = SummingMergeTree +ORDER BY a; + +INSERT INTO simple_agf_any_summing_mt SELECT + a, + any_respect_nulls(any_simple), + any_respect_nullsState(any_agg), + anyLast_respect_nulls(anyLast_simple), + anyLast_respect_nullsState(anyLast_agg) +FROM +( + SELECT + 42 AS a, + NULL::Nullable(UInt64) AS any_simple, + NULL::Nullable(UInt64) AS any_agg, + NULL::Nullable(UInt64) AS anyLast_simple, + NULL::Nullable(UInt64) AS anyLast_agg +) +GROUP BY a; + +INSERT INTO simple_agf_any_summing_mt SELECT + number % 51 as a, + any_respect_nulls(toNullable(number)), + any_respect_nullsState(toNullable(number)), + anyLast_respect_nulls(toNullable(number)), + anyLast_respect_nullsState(toNullable(number)), +FROM numbers(10000) +GROUP BY a; + +INSERT INTO simple_agf_any_summing_mt SELECT + a, + any_respect_nulls(any_simple), + any_respect_nullsState(any_agg), + anyLast_respect_nulls(anyLast_simple), + anyLast_respect_nullsState(anyLast_agg) +FROM +( + SELECT + 50 AS a, + NULL::Nullable(UInt64) AS any_simple, + NULL::Nullable(UInt64) AS any_agg, + NULL::Nullable(UInt64) AS anyLast_simple, + NULL::Nullable(UInt64) AS anyLast_agg +) +GROUP BY a; + +OPTIMIZE TABLE simple_agf_any_summing_mt FINAL; + +SELECT + a, + any_respect_nulls(any_simple), + any_respect_nullsMerge(any_agg), + anyLast_respect_nulls(anyLast_simple), + anyLast_respect_nullsMerge(anyLast_agg) +FROM simple_agf_any_summing_mt +GROUP BY a +ORDER BY a; diff --git a/parser/testdata/03514_grace_hash_join_logical_error/ast.json b/parser/testdata/03514_grace_hash_join_logical_error/ast.json new file mode 100644 index 000000000..f4391924f --- /dev/null +++ b/parser/testdata/03514_grace_hash_join_logical_error/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery A (children 1)" + }, + { + "explain": " Identifier A" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001502716, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/03514_grace_hash_join_logical_error/metadata.json b/parser/testdata/03514_grace_hash_join_logical_error/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03514_grace_hash_join_logical_error/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03514_grace_hash_join_logical_error/query.sql b/parser/testdata/03514_grace_hash_join_logical_error/query.sql new file mode 100644 index 000000000..27a89ea5c --- /dev/null +++ b/parser/testdata/03514_grace_hash_join_logical_error/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS A; + +create table A (A Int64, B Int64, S String) Engine=MergeTree order by A +as select number,number, toString(arrayMap(i->cityHash64(i*number), range(10))) from numbers(1e6); + +SET join_algorithm = 'grace_hash', grace_hash_join_initial_buckets=128, grace_hash_join_max_buckets=256; + +select * from A a join A as b on a.A = b.A limit 1 FORMAT Null; + +SET join_algorithm = 'grace_hash', grace_hash_join_initial_buckets=128, grace_hash_join_max_buckets=128; + +select * from A a join A as b on a.A = b.A limit 1 FORMAT Null; + +DROP TABLE A; diff --git a/parser/testdata/03515_array_join_different_sizes/ast.json b/parser/testdata/03515_array_join_different_sizes/ast.json new file mode 100644 index 000000000..70d892c45 --- /dev/null +++ b/parser/testdata/03515_array_join_different_sizes/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00117918, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03515_array_join_different_sizes/metadata.json b/parser/testdata/03515_array_join_different_sizes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03515_array_join_different_sizes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03515_array_join_different_sizes/query.sql b/parser/testdata/03515_array_join_different_sizes/query.sql new file mode 100644 index 000000000..8b056a593 --- /dev/null +++ b/parser/testdata/03515_array_join_different_sizes/query.sql @@ -0,0 +1,27 @@ +SET enable_analyzer = 1; + +SELECT count(*) +FROM +( + SELECT + ['a', 'b'] AS a1, + [1] AS a2 +) AS bb +ARRAY JOIN + a2, + a1 +SETTINGS enable_unaligned_array_join = 1 +; + +SELECT count(*) +FROM +( + SELECT + ['a', 'b'] AS a1, + [1] AS a2 +) AS bb +ARRAY JOIN + a1, + a2 +SETTINGS enable_unaligned_array_join = 1 +; diff --git a/parser/testdata/03516_comparison_pk_bug/ast.json b/parser/testdata/03516_comparison_pk_bug/ast.json new file mode 100644 index 000000000..bfa3ba025 --- /dev/null +++ b/parser/testdata/03516_comparison_pk_bug/ast.json @@ -0,0 +1,73 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function toIntervalHour (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_0" + } + ], + + "rows": 17, + + "statistics": + { + "elapsed": 0.001193706, + "rows_read": 17, + "bytes_read": 658 + } +} diff --git a/parser/testdata/03516_comparison_pk_bug/metadata.json b/parser/testdata/03516_comparison_pk_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03516_comparison_pk_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03516_comparison_pk_bug/query.sql b/parser/testdata/03516_comparison_pk_bug/query.sql new file mode 100644 index 000000000..affc6fc20 --- /dev/null +++ b/parser/testdata/03516_comparison_pk_bug/query.sql @@ -0,0 +1,7 @@ +SELECT 1 FROM numbers(1) WHERE toIntervalHour(number) = 0; + +CREATE TABLE t1 (c0 Decimal(18,0)) ENGINE = MergeTree() ORDER BY (c0); +INSERT INTO TABLE t1(c0) VALUES (1); + +SELECT c0 = 6812671276462221925::Int64 FROM t1; +SELECT 1 FROM t1 WHERE c0 = 6812671276462221925::Int64; diff --git a/parser/testdata/03516_int_exp2_join/ast.json b/parser/testdata/03516_int_exp2_join/ast.json new file mode 100644 index 000000000..489d3d47e --- /dev/null +++ b/parser/testdata/03516_int_exp2_join/ast.json @@ -0,0 +1,40 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " TableIdentifier t0" + }, + { + "explain": " TableIdentifier t1" + }, + { + "explain": " TableIdentifier t4" + }, + { + "explain": " TableIdentifier t5" + } + ], + + "rows": 6, + + "statistics": + { + "elapsed": 0.001581178, + "rows_read": 6, + "bytes_read": 180 + } +} diff --git a/parser/testdata/03516_int_exp2_join/metadata.json b/parser/testdata/03516_int_exp2_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03516_int_exp2_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03516_int_exp2_join/query.sql b/parser/testdata/03516_int_exp2_join/query.sql new file mode 100644 index 000000000..1f6557c04 --- /dev/null +++ b/parser/testdata/03516_int_exp2_join/query.sql @@ -0,0 +1,61 @@ +DROP TABLE IF EXISTS t0, t1, t4, t5; + +set join_use_nulls = 1; + +create table t0 (c2 String, primary key(c2)) engine = MergeTree; +create table t1 (vkey UInt32, c8 String, primary key(vkey))engine = MergeTree; +create view t4 as +select + ref_1.vkey as c_2_c48_2 + from + t0 as ref_0 + left outer join t1 as ref_1 + on (ref_0.c2 = ref_1.c8) ; +create table t5 (pkey UInt32, c52 UInt32, c56 String, primary key(pkey))engine = MergeTree; + +insert into t0 values (null); +insert into t0 values (''); +insert into t1 values (59, ''); +insert into t5 values (12000, null, ''); +insert into t5 values (22000, null, null); +insert into t5 values (24000, 14, 'YLq?'); +insert into t5 values (30000, 0, '-'); +insert into t5 values (33000, null, 'Wm@c'); +insert into t5 values (37000, 0, 'IB'); +insert into t5 values (38000, 59, ''); +insert into t5 values (56000, 0, null); +insert into t5 values (64000, 74, ''); +insert into t5 values (72000, 36, 'q:/'); +insert into t5 values (79000, null, '[P'); +insert into t5 values (82000, 0, 'V-Qr'); +insert into t5 values (88000, 44, '1Z '); +insert into t5 values (94000, 15, 'G]A5'); +insert into t5 values (96000, -0, 'C8'); +insert into t5 values (97000, 56, null); + +select + count(*) + from + t5 as ref_2 + left outer join (select + ref_3.c_2_c48_2 as c_6_c185_6 + from + t4 as ref_3 + ) as subq_1 + on (ref_2.c52 = subq_1.c_6_c185_6 ) + where intExp2(ref_2.pkey) <= + (case when ((subq_1.c_6_c185_6 = 1) and (not (subq_1.c_6_c185_6 = 1))) then 0 else hiveHash(ref_2.c56) end); + +select + count(*) + from + t5 as ref_2 + left outer join (select + ref_3.c_2_c48_2 as c_6_c185_6 + from + t4 as ref_3 + ) as subq_1 + on (ref_2.c52 = subq_1.c_6_c185_6 ) + where intExp2(ref_2.pkey) <= hiveHash(ref_2.c56); + +DROP TABLE t0, t1, t4, t5; diff --git a/parser/testdata/03517_logical_join_predicate_push_down_with_pre_expression_bug/ast.json b/parser/testdata/03517_logical_join_predicate_push_down_with_pre_expression_bug/ast.json new file mode 100644 index 000000000..4c3165799 --- /dev/null +++ b/parser/testdata/03517_logical_join_predicate_push_down_with_pre_expression_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery td (children 1)" + }, + { + "explain": " Identifier td" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001354071, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03517_logical_join_predicate_push_down_with_pre_expression_bug/metadata.json b/parser/testdata/03517_logical_join_predicate_push_down_with_pre_expression_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03517_logical_join_predicate_push_down_with_pre_expression_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03517_logical_join_predicate_push_down_with_pre_expression_bug/query.sql b/parser/testdata/03517_logical_join_predicate_push_down_with_pre_expression_bug/query.sql new file mode 100644 index 000000000..a098ede07 --- /dev/null +++ b/parser/testdata/03517_logical_join_predicate_push_down_with_pre_expression_bug/query.sql @@ -0,0 +1,24 @@ +drop table if exists td; +drop table if exists tdt; + +CREATE TABLE td (id Int16, d Date) ENGINE=MergeTree() order by id; +CREATE TABLE tdt (id Int16, dt DateTime) ENGINE=MergeTree() order by id; + +insert into td values (1,'2025-03-01'),(2,'2025-04-01'); +insert into tdt values (1,'2025-03-01 01:01:01'),(2,'2025-03-01 02:01:01'),(3,'2025-04-01 03:01:01'),(4,'2025-04-01 04:01:01'),(5,'2025-04-01 05:01:01'); + +SELECT td_d FROM (SELECT t.id td_id, t.d td_d, uniqExact(tdt.id) as cnt FROM td as t LEFT JOIN tdt ON toDate(tdt.dt) = t.d GROUP BY td_id, td_d) WHERE td_d = '2025-04-01'; +SELECT td_d FROM (SELECT t.id td_id, t.d td_d, uniqExact(tdt.id) as cnt FROM tdt RIGHT JOIN td as t ON toDate(tdt.dt) = t.d GROUP BY td_id, td_d) WHERE td_d = '2025-04-01'; + +SELECT td_d FROM (SELECT t.id td_id, t.d td_d, uniqExact(tdt.id) as cnt FROM td as t INNER JOIN tdt ON toDate(tdt.dt) = t.d GROUP BY td_id, td_d) WHERE td_d = '2025-04-01'; +SELECT td_d FROM (SELECT t.id td_id, t.d td_d, uniqExact(tdt.id) as cnt FROM tdt INNER JOIN td as t ON toDate(tdt.dt) = t.d GROUP BY td_id, td_d) WHERE td_d = '2025-04-01'; + +CREATE VIEW v AS +SELECT + t.id td_id, t.d td_d, uniqExact(tdt.id) as cnt +FROM + td as t + LEFT JOIN tdt ON toDate(tdt.dt) = t.d +GROUP BY td_id, td_d; + +SELECT td_d FROM v WHERE td_d = '2025-04-01'; diff --git a/parser/testdata/03517_s3_plain_rewritable_encrypted_empty_path/ast.json b/parser/testdata/03517_s3_plain_rewritable_encrypted_empty_path/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03517_s3_plain_rewritable_encrypted_empty_path/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03517_s3_plain_rewritable_encrypted_empty_path/metadata.json b/parser/testdata/03517_s3_plain_rewritable_encrypted_empty_path/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03517_s3_plain_rewritable_encrypted_empty_path/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03517_s3_plain_rewritable_encrypted_empty_path/query.sql b/parser/testdata/03517_s3_plain_rewritable_encrypted_empty_path/query.sql new file mode 100644 index 000000000..f7c61eef7 --- /dev/null +++ b/parser/testdata/03517_s3_plain_rewritable_encrypted_empty_path/query.sql @@ -0,0 +1,14 @@ +-- Tags: no-fasttest, no-shared-merge-tree +-- Tag no-fasttest: requires S3 +-- Tag no-shared-merge-tree: does not support replication + +DROP TABLE IF EXISTS t0 SYNC; + +CREATE TABLE t0 (c0 Int32) ENGINE = MergeTree() ORDER BY c0 +SETTINGS disk='disk_encrypted_03517'; + +INSERT INTO t0 VALUES (1), (2), (3); + +SELECT * FROM t0; + +DROP TABLE t0; diff --git a/parser/testdata/03518_bad_sql_udf/ast.json b/parser/testdata/03518_bad_sql_udf/ast.json new file mode 100644 index 000000000..a20f6e845 --- /dev/null +++ b/parser/testdata/03518_bad_sql_udf/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateFunctionQuery 03518_bad_sql_udf (children 2)" + }, + { + "explain": " Identifier 03518_bad_sql_udf" + }, + { + "explain": " Function lambda (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function identity (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Identifier x" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001621158, + "rows_read": 8, + "bytes_read": 297 + } +} diff --git a/parser/testdata/03518_bad_sql_udf/metadata.json b/parser/testdata/03518_bad_sql_udf/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03518_bad_sql_udf/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03518_bad_sql_udf/query.sql b/parser/testdata/03518_bad_sql_udf/query.sql new file mode 100644 index 000000000..2c60941d0 --- /dev/null +++ b/parser/testdata/03518_bad_sql_udf/query.sql @@ -0,0 +1 @@ +CREATE OR REPLACE FUNCTION 03518_bad_sql_udf AS lambda(identity(x), x); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/03518_left_to_cross_incorrect/ast.json b/parser/testdata/03518_left_to_cross_incorrect/ast.json new file mode 100644 index 000000000..b31d5cbfe --- /dev/null +++ b/parser/testdata/03518_left_to_cross_incorrect/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001451236, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03518_left_to_cross_incorrect/metadata.json b/parser/testdata/03518_left_to_cross_incorrect/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03518_left_to_cross_incorrect/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03518_left_to_cross_incorrect/query.sql b/parser/testdata/03518_left_to_cross_incorrect/query.sql new file mode 100644 index 000000000..10c32466d --- /dev/null +++ b/parser/testdata/03518_left_to_cross_incorrect/query.sql @@ -0,0 +1,27 @@ +SET enable_analyzer = 1; + +WITH table AS + ( + SELECT 1 AS key + ) +SELECT * +FROM table AS T1 +LEFT JOIN +( + SELECT * + FROM table + WHERE false +) AS T2 ON 1; + +WITH table AS + ( + SELECT 1 AS key + ) +SELECT * +FROM table AS T1 +LEFT JOIN +( + SELECT * + FROM table + WHERE false +) AS T2 ON 0; diff --git a/parser/testdata/03518_table_function_remote_no_replicas/ast.json b/parser/testdata/03518_table_function_remote_no_replicas/ast.json new file mode 100644 index 000000000..1de4d4157 --- /dev/null +++ b/parser/testdata/03518_table_function_remote_no_replicas/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function remote (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '|'" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.00151908, + "rows_read": 11, + "bytes_read": 415 + } +} diff --git a/parser/testdata/03518_table_function_remote_no_replicas/metadata.json b/parser/testdata/03518_table_function_remote_no_replicas/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03518_table_function_remote_no_replicas/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03518_table_function_remote_no_replicas/query.sql b/parser/testdata/03518_table_function_remote_no_replicas/query.sql new file mode 100644 index 000000000..51e7b9031 --- /dev/null +++ b/parser/testdata/03518_table_function_remote_no_replicas/query.sql @@ -0,0 +1 @@ +SELECT * FROM remote('|'); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/03519_analyzer_tuple_cast/ast.json b/parser/testdata/03519_analyzer_tuple_cast/ast.json new file mode 100644 index 000000000..d3e40d15c --- /dev/null +++ b/parser/testdata/03519_analyzer_tuple_cast/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001512111, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03519_analyzer_tuple_cast/metadata.json b/parser/testdata/03519_analyzer_tuple_cast/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03519_analyzer_tuple_cast/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03519_analyzer_tuple_cast/query.sql b/parser/testdata/03519_analyzer_tuple_cast/query.sql new file mode 100644 index 000000000..33dd02184 --- /dev/null +++ b/parser/testdata/03519_analyzer_tuple_cast/query.sql @@ -0,0 +1,17 @@ +set enable_analyzer=1; + +DROP TABLE IF EXISTS test, src; + +SELECT count(), plus((-9, 0), (number, number)) AS k FROM remote('127.0.0.{3,2}', numbers(2)) GROUP BY k ORDER BY k; +SELECT count(), mapAdd(map(1::UInt128, 1), map(1::UInt128 ,number)) AS k FROM remote('127.0.0.{3,2}', numbers(2)) GROUP BY k ORDER BY k; + +CREATE TABLE test (s String) ORDER BY (); +INSERT INTO test VALUES ('a'), ('b'); +SELECT transform(s, ['a', 'b'], [(1, 2), (3, 4)], (0, 0)) AS k FROM test ORDER BY k; +SELECT s != '' ? (1,2) : (0,0) AS k FROM test ORDER BY k; + +CREATE TABLE src (id UInt32, type String, data String) ENGINE=MergeTree ORDER BY tuple(); +INSERT INTO src VALUES (1, 'ok', 'data'); +SELECT id, tuple(replaceAll(data, 'a', 'e') AS col_a, type) AS a, tuple(replaceAll(data, 'a', 'e') AS col_b, type) AS b FROM src; + +DROP TABLE IF EXISTS test, src; diff --git a/parser/testdata/03519_cte_allow_push_predicate_ast_for_distributed_subqueries_bug/ast.json b/parser/testdata/03519_cte_allow_push_predicate_ast_for_distributed_subqueries_bug/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03519_cte_allow_push_predicate_ast_for_distributed_subqueries_bug/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03519_cte_allow_push_predicate_ast_for_distributed_subqueries_bug/metadata.json b/parser/testdata/03519_cte_allow_push_predicate_ast_for_distributed_subqueries_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03519_cte_allow_push_predicate_ast_for_distributed_subqueries_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03519_cte_allow_push_predicate_ast_for_distributed_subqueries_bug/query.sql b/parser/testdata/03519_cte_allow_push_predicate_ast_for_distributed_subqueries_bug/query.sql new file mode 100644 index 000000000..79f74823a --- /dev/null +++ b/parser/testdata/03519_cte_allow_push_predicate_ast_for_distributed_subqueries_bug/query.sql @@ -0,0 +1,4 @@ +-- Tags: no-parallel-replicas +with sub as (select number from numbers(1)) select x from (select number as x from remote('127.0.0.{1,2}', numbers(2))) where x in sub settings allow_push_predicate_ast_for_distributed_subqueries = 1, enable_analyzer=1; +select '-'; +with sub as (select number from numbers(1)) select x from (select number as x from remote('127.0.0.{1,2}', numbers(2))) where x global in sub settings allow_push_predicate_ast_for_distributed_subqueries = 1, enable_analyzer=1; diff --git a/parser/testdata/03519_fulter_push_down_duplicate_column_name_bug/ast.json b/parser/testdata/03519_fulter_push_down_duplicate_column_name_bug/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03519_fulter_push_down_duplicate_column_name_bug/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03519_fulter_push_down_duplicate_column_name_bug/metadata.json b/parser/testdata/03519_fulter_push_down_duplicate_column_name_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03519_fulter_push_down_duplicate_column_name_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03519_fulter_push_down_duplicate_column_name_bug/query.sql b/parser/testdata/03519_fulter_push_down_duplicate_column_name_bug/query.sql new file mode 100644 index 000000000..8965bf105 --- /dev/null +++ b/parser/testdata/03519_fulter_push_down_duplicate_column_name_bug/query.sql @@ -0,0 +1,8 @@ +SELECT + number, + countIf(1, number > 0) +FROM numbers(10) +GROUP BY number +HAVING (count() <= 10) AND 1 +ORDER BY number ASC +SETTINGS enable_analyzer = 1; diff --git a/parser/testdata/03519_left_to_cross_incorrect/ast.json b/parser/testdata/03519_left_to_cross_incorrect/ast.json new file mode 100644 index 000000000..afb27ad53 --- /dev/null +++ b/parser/testdata/03519_left_to_cross_incorrect/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00156725, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03519_left_to_cross_incorrect/metadata.json b/parser/testdata/03519_left_to_cross_incorrect/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03519_left_to_cross_incorrect/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03519_left_to_cross_incorrect/query.sql b/parser/testdata/03519_left_to_cross_incorrect/query.sql new file mode 100644 index 000000000..48795322f --- /dev/null +++ b/parser/testdata/03519_left_to_cross_incorrect/query.sql @@ -0,0 +1,22 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS t0; +DROP TABLE IF EXISTS t1; + +CREATE TABLE t0 ( + c0 Int32 DEFAULT 0, + c1 Int32 +) ENGINE = MergeTree() ORDER BY c0; + +CREATE TABLE t1 ( + c0 Int32 +) ENGINE = MergeTree() ORDER BY c0; + +INSERT INTO t1 (c0) VALUES (0); + +SELECT * +FROM t1 +LEFT JOIN t0 ON 1=1; + +DROP TABLE t0; +DROP TABLE t1; diff --git a/parser/testdata/03519_merge_engine_read_column_existing_in_subset_of_tables/ast.json b/parser/testdata/03519_merge_engine_read_column_existing_in_subset_of_tables/ast.json new file mode 100644 index 000000000..9879f95e2 --- /dev/null +++ b/parser/testdata/03519_merge_engine_read_column_existing_in_subset_of_tables/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001170093, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03519_merge_engine_read_column_existing_in_subset_of_tables/metadata.json b/parser/testdata/03519_merge_engine_read_column_existing_in_subset_of_tables/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03519_merge_engine_read_column_existing_in_subset_of_tables/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03519_merge_engine_read_column_existing_in_subset_of_tables/query.sql b/parser/testdata/03519_merge_engine_read_column_existing_in_subset_of_tables/query.sql new file mode 100644 index 000000000..0e82425cb --- /dev/null +++ b/parser/testdata/03519_merge_engine_read_column_existing_in_subset_of_tables/query.sql @@ -0,0 +1,86 @@ +set enable_analyzer=1; + +drop table if exists test1; +drop table if exists test2; +drop table if exists test3; +drop table if exists test_merge; + +create table test1(a UInt64, b UInt64) engine=Memory; +create table test2(a UInt64, c UInt64) engine=Memory; +create table test3(a UInt64, d UInt64) engine=Memory; +create table test_merge (a UInt64, b UInt64, c UInt64, d UInt64, e UInt64) engine=Merge(database(), 'test'); + +insert into test1 select 1, 2; +insert into test2 select 3, 4; +insert into test3 select 5, 6; + +select 'a'; +select a from test_merge order by all; +select 'b'; +select b from test_merge order by all; +select 'c'; +select c from test_merge order by all; +select 'd'; +select d from test_merge order by all; +select 'e'; +select e from test_merge order by all; +select 'a, b'; +select a, b from test_merge order by all; +select 'a, c'; +select a, c from test_merge order by all; +select 'a, d'; +select a, d from test_merge order by all; +select 'a, e'; +select a, e from test_merge order by all; +select 'b, c'; +select b, c from test_merge order by all; +select 'b, d'; +select b, d from test_merge order by all; +select 'b, e'; +select b, e from test_merge order by all; +select 'c, d'; +select c, d from test_merge order by all; +select 'c, e'; +select c, e from test_merge order by all; +select 'a, b, c'; +select a, b, c from test_merge order by all; +select 'a, b, d'; +select a, b, d from test_merge order by all; +select 'a, b, e'; +select a, b, e from test_merge order by all; +select 'a, c, d'; +select a, c, d from test_merge order by all; +select 'a, c, e'; +select a, c, e from test_merge order by all; +select 'b, c, d'; +select b, c, d from test_merge order by all; +select 'b, c, e'; +select b, c, e from test_merge order by all; +select 'c, d, e'; +select c, d, e from test_merge order by all; +select 'b, _table'; +select b, _table from test_merge order by all; +select 'c, _table'; +select c, _table from test_merge order by all; +select 'd, _table'; +select d, _table from test_merge order by all; +select 'e, _table'; +select e, _table from test_merge order by all; +select 'b, c, _table'; +select b, c, _table from test_merge order by all; +select 'b, d, _table'; +select b, d, _table from test_merge order by all; +select 'b, e, _table'; +select b, e, _table from test_merge order by all; +select 'c, d, _table'; +select c, d, _table from test_merge order by all; +select 'c, e, _table'; +select c, e, _table from test_merge order by all; +select 'd, e, _table'; +select d, e, _table from test_merge order by all; + +drop table test1; +drop table test2; +drop table test3; +drop table test_merge; + diff --git a/parser/testdata/03519_merge_tree_part_info_coverage/ast.json b/parser/testdata/03519_merge_tree_part_info_coverage/ast.json new file mode 100644 index 000000000..95c9f60ad --- /dev/null +++ b/parser/testdata/03519_merge_tree_part_info_coverage/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001354153, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03519_merge_tree_part_info_coverage/metadata.json b/parser/testdata/03519_merge_tree_part_info_coverage/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03519_merge_tree_part_info_coverage/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03519_merge_tree_part_info_coverage/query.sql b/parser/testdata/03519_merge_tree_part_info_coverage/query.sql new file mode 100644 index 000000000..04f35d84e --- /dev/null +++ b/parser/testdata/03519_merge_tree_part_info_coverage/query.sql @@ -0,0 +1,22 @@ +SET enable_analyzer = 1; + +create table mt (names Array(String)) engine = MergeTree order by names; +insert into mt values (['0_1_1_0', '0_1_9_1', '0_3_3_0', '0_5_5_0', '0_7_7_0', '0_9_9_0', '0_11_11_0', '0_11_19_1', '0_13_13_0', '0_15_15_0', '0_17_17_0', '0_19_19_0', '0_21_21_0', '0_21_29_2', '0_23_23_0', '0_23_29_1', '0_24_24_0', '0_25_25_0', '0_28_28_0', '0_29_29_0', '0_33_33_0', '0_33_41_1', '0_34_34_0', '0_36_36_0', '0_38_38_0', '0_41_41_0', '0_43_43_0', '0_43_51_1', '0_44_44_0', '0_47_47_0', '0_49_49_0', '0_51_51_0', '0_53_53_0', '0_53_62_2', '0_54_54_0', '0_54_62_1', '0_56_56_0', '0_59_59_0', '0_61_61_0', '0_62_62_0', '0_65_65_0', '0_65_73_1', '0_66_66_0', '0_68_68_0', '0_70_70_0', '0_73_73_0', '0_74_74_0', '0_74_83_1', '0_77_77_0', '0_79_79_0', '0_81_81_0', '0_83_83_0', '0_85_85_0', '0_85_93_1', '0_87_87_0', '0_89_89_0', '0_91_91_0', '0_93_93_0', '0_95_95_0', '0_95_104_2', '0_97_97_0', '0_97_104_1', '0_99_99_0', '0_101_101_0', '0_102_102_0', '0_104_104_0', '0_106_106_0', '0_106_119_2', '0_109_109_0', '0_109_116_1', '0_111_111_0', '0_113_113_0', '0_115_115_0', '0_116_116_0', '0_119_119_0', '0_121_121_0', '0_121_129_1', '0_123_123_0', '0_125_125_0', '0_127_127_0', '0_129_129_0', '0_131_131_0', '0_131_137_1', '0_133_133_0', '0_135_135_0', '0_136_136_0', '0_137_137_0', '0_141_141_0', '0_141_148_1', '0_143_143_0', '0_145_145_0', '0_146_146_0', '0_148_148_0', '0_151_151_0', '0_151_159_1', '0_152_152_0', '0_155_155_0', '0_156_156_0', '0_159_159_0', '0_161_161_0', '0_161_166_1', '0_163_163_0', '0_164_164_0', '0_166_166_0', '0_169_169_0', '0_169_176_1', '0_171_171_0', '0_173_173_0', '0_175_175_0', '0_176_176_0', '0_178_178_0', '0_178_186_2', '0_180_180_0', '0_180_186_1', '0_182_182_0', '0_184_184_0', '0_185_185_0', '0_186_186_0', '0_189_189_0', '0_189_197_1', '0_189_201_2', '0_193_193_0', '0_195_195_0', '0_197_197_0', '0_199_199_0', '0_201_201_0', '0_203_203_0', '0_203_211_1', '0_203_213_2', '0_205_205_0', '0_207_207_0', '0_209_209_0', '0_211_211_0', '0_213_213_0', '0_215_215_0', '0_215_222_1', '0_217_217_0', '0_219_219_0', '0_221_221_0', '0_222_222_0', '0_224_224_0', '0_224_232_1', '0_227_227_0', '0_229_229_0', '0_230_230_0', '0_232_232_0', '0_235_235_0', '0_235_242_1', '0_237_237_0', '0_239_239_0', '0_241_241_0', '0_242_242_0', '0_245_245_0', '0_245_253_1', '0_247_247_0', '0_249_249_0', '0_251_251_0', '0_253_253_0', '0_255_255_0', '0_255_261_1', '0_255_263_2', '0_257_257_0', '0_259_259_0', '0_261_261_0', '0_263_263_0', '0_265_265_0', '0_265_271_1', '0_267_267_0', '0_268_268_0', '0_269_269_0', '0_271_271_0', '0_274_274_0', '0_274_277_1', '0_274_289_2', '0_275_275_0', '0_277_277_0', '0_278_278_0', '0_278_289_1', '0_283_283_0', '0_285_285_0', '0_286_286_0', '0_289_289_0', '0_291_291_0', '0_291_299_1', '0_293_293_0', '0_295_295_0', '0_297_297_0', '0_299_299_0', '0_301_301_0', '0_301_310_2', '0_303_303_0', '0_303_310_1', '0_305_305_0', '0_307_307_0', '0_309_309_0', '0_310_310_0', '0_312_312_0', '0_312_324_2', '0_313_313_0', '0_315_315_0', '0_315_324_1', '0_319_319_0', '0_320_320_0', '0_322_322_0', '0_324_324_0', '0_327_327_0', '0_327_335_1', '0_328_328_0', '0_329_329_0', '0_330_330_0', '0_335_335_0', '0_337_337_0', '0_337_345_1', '0_339_339_0', '0_341_341_0', '0_343_343_0', '0_345_345_0', '0_347_347_0', '0_347_355_1', '0_349_349_0', '0_351_351_0', '0_353_353_0', '0_355_355_0', '0_357_357_0', '0_357_367_2', '0_359_359_0', '0_359_367_1', '0_361_361_0', '0_363_363_0', '0_365_365_0', '0_367_367_0', '0_369_369_0', '0_369_377_1', '0_369_381_2', '0_371_371_0', '0_372_372_0', '0_375_375_0', '0_377_377_0', '0_379_379_0', '0_381_381_0', '0_383_383_0', '0_383_387_1', '0_383_391_2', '0_383_395_3', '0_385_385_0', '0_387_387_0', '0_389_389_0', '0_391_391_0', '0_393_393_0', '0_395_395_0', '0_397_397_0', '0_397_399_1', '0_399_399_0', '1_2_2_0', '1_2_10_1', '1_4_4_0', '1_6_6_0', '1_8_8_0', '1_10_10_0', '1_12_12_0', '1_12_20_1', '1_14_14_0', '1_16_16_0', '1_18_18_0', '1_20_20_0', '1_22_22_0', '1_22_27_1', '1_26_26_0', '1_27_27_0', '1_30_30_0', '1_30_37_1', '1_31_31_0', '1_32_32_0', '1_35_35_0', '1_37_37_0', '1_39_39_0', '1_39_46_1', '1_40_40_0', '1_42_42_0', '1_45_45_0', '1_46_46_0', '1_48_48_0', '1_48_57_1', '1_50_50_0', '1_52_52_0', '1_55_55_0', '1_57_57_0', '1_58_58_0', '1_58_63_1', '1_60_60_0', '1_63_63_0', '1_64_64_0', '1_64_72_1', '1_67_67_0', '1_69_69_0', '1_71_71_0', '1_72_72_0', '1_75_75_0', '1_75_84_2', '1_76_76_0', '1_76_84_1', '1_78_78_0', '1_80_80_0', '1_82_82_0', '1_84_84_0', '1_86_86_0', '1_86_94_1', '1_88_88_0', '1_90_90_0', '1_92_92_0', '1_94_94_0', '1_96_96_0', '1_96_100_1', '1_96_110_2', '1_98_98_0', '1_100_100_0', '1_103_103_0', '1_103_110_1', '1_105_105_0', '1_107_107_0', '1_108_108_0', '1_110_110_0', '1_112_112_0', '1_112_120_1', '1_114_114_0', '1_117_117_0', '1_118_118_0', '1_120_120_0', '1_122_122_0', '1_122_130_1', '1_124_124_0', '1_126_126_0', '1_128_128_0', '1_130_130_0', '1_132_132_0', '1_132_142_2', '1_134_134_0', '1_134_142_1', '1_138_138_0', '1_139_139_0', '1_140_140_0', '1_142_142_0', '1_144_144_0', '1_144_153_1', '1_147_147_0', '1_149_149_0', '1_150_150_0', '1_153_153_0', '1_154_154_0', '1_154_160_1', '1_154_162_2', '1_157_157_0', '1_158_158_0', '1_160_160_0', '1_162_162_0', '1_165_165_0', '1_165_172_1', '1_167_167_0', '1_168_168_0', '1_170_170_0', '1_172_172_0', '1_174_174_0', '1_174_181_1', '1_174_188_2', '1_177_177_0', '1_179_179_0', '1_181_181_0', '1_183_183_0', '1_187_187_0', '1_188_188_0', '1_190_190_0', '1_190_196_1', '1_191_191_0', '1_192_192_0', '1_194_194_0', '1_196_196_0', '1_198_198_0', '1_198_206_1', '1_200_200_0', '1_202_202_0', '1_204_204_0', '1_206_206_0', '1_208_208_0', '1_208_216_1', '1_210_210_0', '1_212_212_0', '1_214_214_0', '1_216_216_0', '1_218_218_0', '1_218_228_2', '1_220_220_0', '1_220_228_1', '1_223_223_0', '1_225_225_0', '1_226_226_0', '1_228_228_0', '1_231_231_0', '1_231_238_1', '1_233_233_0', '1_234_234_0', '1_236_236_0', '1_238_238_0', '1_240_240_0', '1_240_248_1', '1_243_243_0', '1_244_244_0', '1_246_246_0', '1_248_248_0', '1_250_250_0', '1_250_258_1', '1_252_252_0', '1_254_254_0', '1_256_256_0', '1_258_258_0', '1_260_260_0', '1_260_264_1', '1_260_273_2', '1_262_262_0', '1_264_264_0', '1_266_266_0', '1_266_273_1', '1_270_270_0', '1_272_272_0', '1_273_273_0', '1_276_276_0', '1_276_282_1', '1_279_279_0', '1_280_280_0', '1_281_281_0', '1_282_282_0', '1_284_284_0', '1_284_294_2', '1_287_287_0', '1_287_294_1', '1_288_288_0', '1_290_290_0', '1_292_292_0', '1_294_294_0', '1_296_296_0', '1_296_300_1', '1_296_308_2', '1_298_298_0', '1_300_300_0', '1_302_302_0', '1_302_308_1', '1_304_304_0', '1_306_306_0', '1_308_308_0', '1_311_311_0', '1_311_318_1', '1_314_314_0', '1_316_316_0', '1_317_317_0', '1_318_318_0', '1_321_321_0', '1_321_331_1', '1_323_323_0', '1_325_325_0', '1_326_326_0', '1_331_331_0', '1_332_332_0', '1_332_338_1', '1_333_333_0', '1_334_334_0', '1_336_336_0', '1_338_338_0', '1_340_340_0', '1_340_350_2', '1_342_342_0', '1_342_350_1', '1_344_344_0', '1_346_346_0', '1_348_348_0', '1_350_350_0', '1_352_352_0', '1_352_360_1', '1_354_354_0', '1_356_356_0', '1_358_358_0', '1_360_360_0', '1_362_362_0', '1_362_370_1', '1_364_364_0', '1_366_366_0', '1_368_368_0', '1_370_370_0', '1_373_373_0', '1_373_378_1', '1_373_382_2', '1_374_374_0', '1_376_376_0', '1_378_378_0', '1_380_380_0', '1_382_382_0', '1_384_384_0', '1_384_388_1', '1_384_392_2', '1_384_396_3', '1_386_386_0', '1_388_388_0', '1_390_390_0', '1_392_392_0', '1_394_394_0', '1_396_396_0', '1_398_398_0', '1_398_400_1', '1_400_400_0']); + +select + tag1 as part_name +from ( + select + tag1, sum(is_covered) as is_covered + from ( + select + tag1, tag2, isMergeTreePartCoveredBy(tag1, tag2) as is_covered + from mt + array join names AS tag1 + array join names AS tag2 + where tag1 != tag2 + ) + group by tag1 +) +where is_covered = 0 +order by mergeTreePartInfo(part_name).partition_id, mergeTreePartInfo(part_name).min_block; diff --git a/parser/testdata/03519_merge_tree_part_info_unpack/ast.json b/parser/testdata/03519_merge_tree_part_info_unpack/ast.json new file mode 100644 index 000000000..2495df949 --- /dev/null +++ b/parser/testdata/03519_merge_tree_part_info_unpack/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001704496, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03519_merge_tree_part_info_unpack/metadata.json b/parser/testdata/03519_merge_tree_part_info_unpack/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03519_merge_tree_part_info_unpack/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03519_merge_tree_part_info_unpack/query.sql b/parser/testdata/03519_merge_tree_part_info_unpack/query.sql new file mode 100644 index 000000000..5e3785f65 --- /dev/null +++ b/parser/testdata/03519_merge_tree_part_info_unpack/query.sql @@ -0,0 +1,23 @@ +SET enable_analyzer = 1; + +WITH mergeTreePartInfo('all_12_25_7_4') AS info +SELECT info.partition_id, info.min_block, info.max_block, info.level, info.mutation; + +WITH mergeTreePartInfo('merge-not-byte-identical_all_12_25_7_4_try100') AS info +SELECT info.partition_id, info.prefix, info.suffix, info.min_block, info.max_block, info.level, info.mutation; + +WITH mergeTreePartInfo('all_12_25_7_4_try100') AS info +SELECT info.partition_id, info.prefix, info.suffix, info.min_block, info.max_block, info.level, info.mutation; + +WITH mergeTreePartInfo('broken-on-start_all_12_25_7_4') AS info +SELECT info.partition_id, info.prefix, info.suffix, info.min_block, info.max_block, info.level, info.mutation; + +CREATE TABLE mt(key UInt64, value String) +ENGINE = MergeTree +ORDER BY key; + +SYSTEM STOP MERGES mt; +INSERT INTO mt SELECT rand(), rand() FROM numbers(4) SETTINGS min_insert_block_size_rows=1, max_block_size=1; +SELECT _part FROM mt ORDER BY mergeTreePartInfo(_part).max_block DESC; + +SELECT _part, isMergeTreePartCoveredBy(_part, 'all_1_2_10') FROM mt ORDER BY mergeTreePartInfo(_part).max_block DESC; diff --git a/parser/testdata/03519_multiple_join_using/ast.json b/parser/testdata/03519_multiple_join_using/ast.json new file mode 100644 index 000000000..1c138cae2 --- /dev/null +++ b/parser/testdata/03519_multiple_join_using/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000959944, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03519_multiple_join_using/metadata.json b/parser/testdata/03519_multiple_join_using/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03519_multiple_join_using/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03519_multiple_join_using/query.sql b/parser/testdata/03519_multiple_join_using/query.sql new file mode 100644 index 000000000..10b8e2c03 --- /dev/null +++ b/parser/testdata/03519_multiple_join_using/query.sql @@ -0,0 +1,19 @@ +SET enable_analyzer=1; + +CREATE TABLE table0 (id Int64, val String) Engine=MergeTree ORDER BY id; +CREATE TABLE table1 (id2 Int64, val String) Engine=MergeTree ORDER BY id2; +CREATE TABLE table2 (id Int64, id2 Int64, val String) Engine=MergeTree ORDER BY (id, id2); + +INSERT INTO table0 VALUES (1, 'a'), (2, 'b'), (3, 'c'); +INSERT INTO table1 VALUES (1, 'a'), (2,'b'), (3, 'c'); +INSERT INTO table2 VALUES (1, 1, 'a'), (1, 2, 'b'), (2, 2, 'b'), (1, 3, 'c'), (3, 2, 'b'); + +-- { echoOn } +SELECT * FROM table0 JOIN table2 USING id JOIN table1 USING id2 ORDER BY ALL; + +SELECT * FROM table0 AS t0 JOIN table2 USING val JOIN table1 USING val ORDER BY ALL; + +WITH t0 AS ( + SELECT * FROM table0 WHERE val LIKE 'b%' +) +SELECT * FROM t0 JOIN table2 AS t2 USING id JOIN table1 AS t1 USING id2 ORDER BY ALL; diff --git a/parser/testdata/03519_ttl_extended_data_types/ast.json b/parser/testdata/03519_ttl_extended_data_types/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03519_ttl_extended_data_types/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03519_ttl_extended_data_types/metadata.json b/parser/testdata/03519_ttl_extended_data_types/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03519_ttl_extended_data_types/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03519_ttl_extended_data_types/query.sql b/parser/testdata/03519_ttl_extended_data_types/query.sql new file mode 100644 index 000000000..e7c7dcebe --- /dev/null +++ b/parser/testdata/03519_ttl_extended_data_types/query.sql @@ -0,0 +1,39 @@ +-- Row TTL with extended data types +DROP TABLE IF EXISTS ttl_03519_1 SYNC; +CREATE TABLE ttl_03519_1 (date Date32, date_key Int) ENGINE=MergeTree TTL date + INTERVAL 1 MONTH ORDER BY date; +INSERT INTO ttl_03519_1 VALUES ('2010-01-01', 2010); +INSERT INTO ttl_03519_1 VALUES ('1901-01-01', 1901); +INSERT INTO ttl_03519_1 VALUES ('2170-01-01', 2170); +OPTIMIZE TABLE ttl_03519_1 FINAL; +SELECT * FROM ttl_03519_1 ORDER BY date FORMAT CSV; +DROP TABLE ttl_03519_1 SYNC; + +DROP TABLE IF EXISTS ttl_03519_2 SYNC; +CREATE TABLE ttl_03519_2 (date DateTime64(5, 'UTC'), date_key Int) ENGINE=MergeTree TTL date + INTERVAL 1 MONTH ORDER BY date; +INSERT INTO ttl_03519_2 VALUES ('2010-01-01 12:12:12.12345', 2010); +INSERT INTO ttl_03519_2 VALUES ('1901-01-01 12:12:12.12345', 1901); +INSERT INTO ttl_03519_2 VALUES ('2170-01-01 12:12:12.12345', 2170); +OPTIMIZE TABLE ttl_03519_2 FINAL; +SELECT * FROM ttl_03519_2 ORDER BY date FORMAT CSV; +DROP TABLE ttl_03519_2 SYNC; + +-- Column TTL with extended data types + +DROP TABLE IF EXISTS ttl_03519_3 SYNC; +CREATE TABLE ttl_03519_3 (date Date32, str String TTL date + INTERVAL 1 MONTH) ENGINE=MergeTree ORDER BY date; +INSERT INTO ttl_03519_3 VALUES ('2010-01-01', 'qwe'); +INSERT INTO ttl_03519_3 VALUES ('1901-01-01', 'rty'); +INSERT INTO ttl_03519_3 VALUES ('2170-01-01', 'uio'); +OPTIMIZE TABLE ttl_03519_3 FINAL; +SELECT * FROM ttl_03519_3 ORDER BY date FORMAT CSV; +DROP TABLE ttl_03519_3 SYNC; + +DROP TABLE IF EXISTS ttl_03519_4 SYNC; +CREATE TABLE ttl_03519_4 (date DateTime64(5, 'UTC'), str String TTL date + INTERVAL 1 MONTH) ENGINE=MergeTree ORDER BY date; +INSERT INTO ttl_03519_4 VALUES ('2010-01-01 12:12:12', 'qwe'); +INSERT INTO ttl_03519_4 VALUES ('1901-01-01 12:12:12.12345', 'rty'); +INSERT INTO ttl_03519_4 VALUES ('2170-01-01 12:12:12.12345', 'uio'); +OPTIMIZE TABLE ttl_03519_4 FINAL; +SELECT * FROM ttl_03519_4 ORDER BY date FORMAT CSV; +DROP TABLE ttl_03519_4 SYNC; + diff --git a/parser/testdata/03519_zero_filesystem_prefetch_max_memory_usage/ast.json b/parser/testdata/03519_zero_filesystem_prefetch_max_memory_usage/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03519_zero_filesystem_prefetch_max_memory_usage/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03519_zero_filesystem_prefetch_max_memory_usage/metadata.json b/parser/testdata/03519_zero_filesystem_prefetch_max_memory_usage/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03519_zero_filesystem_prefetch_max_memory_usage/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03519_zero_filesystem_prefetch_max_memory_usage/query.sql b/parser/testdata/03519_zero_filesystem_prefetch_max_memory_usage/query.sql new file mode 100644 index 000000000..23fdefbc4 --- /dev/null +++ b/parser/testdata/03519_zero_filesystem_prefetch_max_memory_usage/query.sql @@ -0,0 +1,3 @@ +-- Checks that session setting 'filesystem_prefetch_max_memory_usage' must not be 0 + +SET filesystem_prefetch_max_memory_usage = 0; -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/03520_analyzer_distributed_in_cte_bug/ast.json b/parser/testdata/03520_analyzer_distributed_in_cte_bug/ast.json new file mode 100644 index 000000000..8303c9d9f --- /dev/null +++ b/parser/testdata/03520_analyzer_distributed_in_cte_bug/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001590698, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03520_analyzer_distributed_in_cte_bug/metadata.json b/parser/testdata/03520_analyzer_distributed_in_cte_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03520_analyzer_distributed_in_cte_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03520_analyzer_distributed_in_cte_bug/query.sql b/parser/testdata/03520_analyzer_distributed_in_cte_bug/query.sql new file mode 100644 index 000000000..1836aaa6a --- /dev/null +++ b/parser/testdata/03520_analyzer_distributed_in_cte_bug/query.sql @@ -0,0 +1,5 @@ +set enable_analyzer=1; +WITH a AS (SELECT dummy FROM remote('127.0.0.{3,2}', system.one)) SELECT sum(dummy IN (a)) FROM remote('127.0.0.{3,2}', system.one); +WITH a AS (SELECT dummy FROM remote('127.0.0.{1,2}', system.one)) SELECT sum(dummy IN (a)) FROM remote('127.0.0.{1,2}', system.one); +WITH a AS (SELECT dummy FROM remote('127.0.0.{1,2}', system.one)) SELECT sum(dummy IN (a)) FROM remote('127.0.0.{3,2}', system.one); +WITH a AS (SELECT dummy FROM remote('127.0.0.{3,2}', system.one)) SELECT sum(dummy IN (a)) FROM remote('127.0.0.{1,2}', system.one); diff --git a/parser/testdata/03520_left_to_cross_incorrect/ast.json b/parser/testdata/03520_left_to_cross_incorrect/ast.json new file mode 100644 index 000000000..6ad56fc31 --- /dev/null +++ b/parser/testdata/03520_left_to_cross_incorrect/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001492916, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03520_left_to_cross_incorrect/metadata.json b/parser/testdata/03520_left_to_cross_incorrect/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03520_left_to_cross_incorrect/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03520_left_to_cross_incorrect/query.sql b/parser/testdata/03520_left_to_cross_incorrect/query.sql new file mode 100644 index 000000000..4b6f41e49 --- /dev/null +++ b/parser/testdata/03520_left_to_cross_incorrect/query.sql @@ -0,0 +1,5 @@ +SET enable_analyzer = 1; + +select * +from (select 1 a) t +left join (select 1 b where false) u on true; diff --git a/parser/testdata/03520_pr_distinct_in_order/ast.json b/parser/testdata/03520_pr_distinct_in_order/ast.json new file mode 100644 index 000000000..9b3cd6db5 --- /dev/null +++ b/parser/testdata/03520_pr_distinct_in_order/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_dio (children 1)" + }, + { + "explain": " Identifier t_dio" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001428189, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/03520_pr_distinct_in_order/metadata.json b/parser/testdata/03520_pr_distinct_in_order/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03520_pr_distinct_in_order/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03520_pr_distinct_in_order/query.sql b/parser/testdata/03520_pr_distinct_in_order/query.sql new file mode 100644 index 000000000..6281b6a93 --- /dev/null +++ b/parser/testdata/03520_pr_distinct_in_order/query.sql @@ -0,0 +1,11 @@ +drop table if exists t_dio; + +set optimize_read_in_order=0, optimize_distinct_in_order=1; +set enable_parallel_replicas=1, max_parallel_replicas=3, cluster_for_parallel_replicas='test_cluster_one_shard_three_replicas_localhost', parallel_replicas_for_non_replicated_merge_tree=1, parallel_replicas_local_plan=1; + +create table t_dio (a int, b int, c int) engine=MergeTree() order by (a, b); +insert into t_dio select number % number, number % 5, number % 10 from numbers(1,1000000); + +select distinct a, b, x, y from (select a, b, 1 as x, 2 as y from t_dio order by a) order by a, b; + +drop table if exists t_dio; diff --git a/parser/testdata/03520_pr_read_in_order/ast.json b/parser/testdata/03520_pr_read_in_order/ast.json new file mode 100644 index 000000000..384f4e7e7 --- /dev/null +++ b/parser/testdata/03520_pr_read_in_order/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_rio (children 1)" + }, + { + "explain": " Identifier t_rio" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001303551, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/03520_pr_read_in_order/metadata.json b/parser/testdata/03520_pr_read_in_order/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03520_pr_read_in_order/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03520_pr_read_in_order/query.sql b/parser/testdata/03520_pr_read_in_order/query.sql new file mode 100644 index 000000000..6a7dca58f --- /dev/null +++ b/parser/testdata/03520_pr_read_in_order/query.sql @@ -0,0 +1,11 @@ +drop table if exists t_rio; + +set optimize_read_in_order=1; +set enable_parallel_replicas=1, max_parallel_replicas=3, cluster_for_parallel_replicas='test_cluster_one_shard_three_replicas_localhost', parallel_replicas_for_non_replicated_merge_tree=1, parallel_replicas_local_plan=1; + +create table t_rio (a int, b int, c int) engine=MergeTree() order by (a, b); +insert into t_rio select number % number, number % 5, number % 10 from numbers(1,1000000); + +select a, b, x, y from (select a, b, 1 as x, 2 as y from t_rio order by a) order by a, b format Null; + +drop table if exists t_rio; diff --git a/parser/testdata/03521_bitNot_String_NUL_terminated/ast.json b/parser/testdata/03521_bitNot_String_NUL_terminated/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03521_bitNot_String_NUL_terminated/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03521_bitNot_String_NUL_terminated/metadata.json b/parser/testdata/03521_bitNot_String_NUL_terminated/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03521_bitNot_String_NUL_terminated/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03521_bitNot_String_NUL_terminated/query.sql b/parser/testdata/03521_bitNot_String_NUL_terminated/query.sql new file mode 100644 index 000000000..682be367b --- /dev/null +++ b/parser/testdata/03521_bitNot_String_NUL_terminated/query.sql @@ -0,0 +1,4 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/80774 +-- Regression for "Logical error: 'res.data[res.size] == '\0'" (in StringValueCompatibility) +SELECT uniqCombined64(bitNot(x)) IGNORE NULLS, anyHeavy(bitNot(x)) IGNORE NULLS FROM (SELECT DISTINCT concat(2, number) AS x FROM numbers(10)) FORMAT Null; +SELECT hex(any(bitNot('foo'))); diff --git a/parser/testdata/03521_long_partition_column_name/ast.json b/parser/testdata/03521_long_partition_column_name/ast.json new file mode 100644 index 000000000..043735c77 --- /dev/null +++ b/parser/testdata/03521_long_partition_column_name/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_long_partition_column_name (children 1)" + }, + { + "explain": " Identifier t_long_partition_column_name" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001565911, + "rows_read": 2, + "bytes_read": 108 + } +} diff --git a/parser/testdata/03521_long_partition_column_name/metadata.json b/parser/testdata/03521_long_partition_column_name/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03521_long_partition_column_name/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03521_long_partition_column_name/query.sql b/parser/testdata/03521_long_partition_column_name/query.sql new file mode 100644 index 000000000..a5af59134 --- /dev/null +++ b/parser/testdata/03521_long_partition_column_name/query.sql @@ -0,0 +1,15 @@ +DROP TABLE IF EXISTS t_long_partition_column_name; + +CREATE TABLE t_long_partition_column_name ( +`一个非常非常非常非常非常非常非常非常非常非常非常长的中文字符串` Int, +) +ENGINE = MergeTree() +PARTITION BY `一个非常非常非常非常非常非常非常非常非常非常非常长的中文字符串` +ORDER BY tuple() +SETTINGS replace_long_file_name_to_hash = 1, max_file_name_length = 127; + +insert into t_long_partition_column_name values(1); + +SELECT * FROM t_long_partition_column_name; + +DROP TABLE IF EXISTS t_long_partition_column_name; \ No newline at end of file diff --git a/parser/testdata/03521_long_statistics_name/ast.json b/parser/testdata/03521_long_statistics_name/ast.json new file mode 100644 index 000000000..54ffd3b8b --- /dev/null +++ b/parser/testdata/03521_long_statistics_name/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_long_statistics_name (children 1)" + }, + { + "explain": " Identifier t_long_statistics_name" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001817653, + "rows_read": 2, + "bytes_read": 96 + } +} diff --git a/parser/testdata/03521_long_statistics_name/metadata.json b/parser/testdata/03521_long_statistics_name/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03521_long_statistics_name/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03521_long_statistics_name/query.sql b/parser/testdata/03521_long_statistics_name/query.sql new file mode 100644 index 000000000..d971891fd --- /dev/null +++ b/parser/testdata/03521_long_statistics_name/query.sql @@ -0,0 +1,24 @@ +DROP TABLE IF EXISTS t_long_statistics_name; + +CREATE TABLE t_long_statistics_name ( +`一个非常非常非常非常非常非常非常非常非常非常非常长的中文字符串` Int, +) +ENGINE = MergeTree() +ORDER BY tuple() +SETTINGS replace_long_file_name_to_hash = 1, max_file_name_length = 127, auto_statistics_types = 'minmax,uniq'; + +INSERT INTO t_long_statistics_name VALUES (10) (100); + +SELECT + rows, + statistics, + estimates.cardinality, + estimates.min, + estimates.max, +FROM system.parts_columns +WHERE table = 't_long_statistics_name' + AND database = currentDatabase() + AND table = 't_long_statistics_name' + AND column = '一个非常非常非常非常非常非常非常非常非常非常非常长的中文字符串'; + +DROP TABLE IF EXISTS t_long_statistics_name; diff --git a/parser/testdata/03521_system_unicode/ast.json b/parser/testdata/03521_system_unicode/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03521_system_unicode/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03521_system_unicode/metadata.json b/parser/testdata/03521_system_unicode/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03521_system_unicode/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03521_system_unicode/query.sql b/parser/testdata/03521_system_unicode/query.sql new file mode 100644 index 000000000..f1582f58d --- /dev/null +++ b/parser/testdata/03521_system_unicode/query.sql @@ -0,0 +1,12 @@ +-- Tags: no-fasttest +SELECT code_point, code_point_value FROM system.unicode WHERE code_point = '😂'; + +SELECT code_point, code_point_value FROM system.unicode WHERE emoji_presentation = 1 ORDER BY code_point_value LIMIT 5; + +SELECT code_point, lowercase_mapping FROM system.unicode WHERE code_point = 'A' or code_point = 'Ä' or code_point = 'Ω' order by code_point; +-- special mapping +SELECT code_point, uppercase_mapping, simple_uppercase_mapping FROM system.unicode WHERE code_point = 'ß'; +-- no language-specific mappings +SELECT code_point, uppercase_mapping, simple_uppercase_mapping FROM system.unicode WHERE code_point = 'i'; + +SELECT code_point, script_extensions, identifier_type FROM system.unicode WHERE code_point = 'A'; diff --git a/parser/testdata/03521_tuple_of_dynamic_with_string_comparison/ast.json b/parser/testdata/03521_tuple_of_dynamic_with_string_comparison/ast.json new file mode 100644 index 000000000..4f5ee5738 --- /dev/null +++ b/parser/testdata/03521_tuple_of_dynamic_with_string_comparison/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'a'" + }, + { + "explain": " Literal 'Dynamic'" + }, + { + "explain": " Literal '(\\'a\\')'" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001131496, + "rows_read": 13, + "bytes_read": 493 + } +} diff --git a/parser/testdata/03521_tuple_of_dynamic_with_string_comparison/metadata.json b/parser/testdata/03521_tuple_of_dynamic_with_string_comparison/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03521_tuple_of_dynamic_with_string_comparison/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03521_tuple_of_dynamic_with_string_comparison/query.sql b/parser/testdata/03521_tuple_of_dynamic_with_string_comparison/query.sql new file mode 100644 index 000000000..6b3ffb5d7 --- /dev/null +++ b/parser/testdata/03521_tuple_of_dynamic_with_string_comparison/query.sql @@ -0,0 +1,3 @@ +select tuple('a'::Dynamic) = '(\'a\')'; +select tuple(materialize('a')::Dynamic) = '(\'a\')'; + diff --git a/parser/testdata/03522_alter_modify_column_and_materialize_projection/ast.json b/parser/testdata/03522_alter_modify_column_and_materialize_projection/ast.json new file mode 100644 index 000000000..94fd5a272 --- /dev/null +++ b/parser/testdata/03522_alter_modify_column_and_materialize_projection/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001910967, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/03522_alter_modify_column_and_materialize_projection/metadata.json b/parser/testdata/03522_alter_modify_column_and_materialize_projection/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03522_alter_modify_column_and_materialize_projection/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03522_alter_modify_column_and_materialize_projection/query.sql b/parser/testdata/03522_alter_modify_column_and_materialize_projection/query.sql new file mode 100644 index 000000000..ca93719de --- /dev/null +++ b/parser/testdata/03522_alter_modify_column_and_materialize_projection/query.sql @@ -0,0 +1,9 @@ +drop table if exists test; +create table test (s String) engine=MergeTree order by tuple() settings min_bytes_for_wide_part=0; +insert into test select 'str' from numbers(1); +alter table test modify column s Nullable(String); +alter table test add projection p1 (select s order by s); +alter table test materialize projection p1 settings mutations_sync=1; +select * from test; +drop table test; + diff --git a/parser/testdata/03522_analyzer_check_correlated_columns/ast.json b/parser/testdata/03522_analyzer_check_correlated_columns/ast.json new file mode 100644 index 000000000..fbd0e98d2 --- /dev/null +++ b/parser/testdata/03522_analyzer_check_correlated_columns/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001341588, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03522_analyzer_check_correlated_columns/metadata.json b/parser/testdata/03522_analyzer_check_correlated_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03522_analyzer_check_correlated_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03522_analyzer_check_correlated_columns/query.sql b/parser/testdata/03522_analyzer_check_correlated_columns/query.sql new file mode 100644 index 000000000..f3ca233d9 --- /dev/null +++ b/parser/testdata/03522_analyzer_check_correlated_columns/query.sql @@ -0,0 +1,7 @@ +set enable_analyzer = 1; + +SELECT min(*) y FROM (SELECT 1 IN (SELECT y)); + +WITH toDateTime(*) AS t SELECT t IN (SELECT t WHERE t IN (SELECT t)); + +SELECT (SELECT min(*) FROM (SELECT t0.c0)) AS a0, (SELECT a0) FROM (SELECT 1 c0) AS t0 SETTINGS allow_experimental_correlated_subqueries = 1; -- { serverError NOT_IMPLEMENTED } diff --git a/parser/testdata/03522_function_first_non_default/ast.json b/parser/testdata/03522_function_first_non_default/ast.json new file mode 100644 index 000000000..eae59a40a --- /dev/null +++ b/parser/testdata/03522_function_first_non_default/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function firstNonDefault (alias result) (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Literal NULL" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal UInt64_43" + }, + { + "explain": " Literal UInt64_256" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001109893, + "rows_read": 10, + "bytes_read": 372 + } +} diff --git a/parser/testdata/03522_function_first_non_default/metadata.json b/parser/testdata/03522_function_first_non_default/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03522_function_first_non_default/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03522_function_first_non_default/query.sql b/parser/testdata/03522_function_first_non_default/query.sql new file mode 100644 index 000000000..525801df5 --- /dev/null +++ b/parser/testdata/03522_function_first_non_default/query.sql @@ -0,0 +1,105 @@ +SELECT firstNonDefault(NULL, 0, 43, 256) AS result; +SELECT firstNonDefault(NULL :: Nullable(UInt8), 0 :: Nullable(UInt8), 42 :: UInt8) AS result; +SELECT firstNonDefault('', '0', 'hello') AS result; +SELECT firstNonDefault(NULL::Nullable(UInt8), 0::UInt8) AS result; +SELECT firstNonDefault(false, true) AS result; + +SELECT firstNonDefault([] :: Array(UInt8), [1, 2, 3] :: Array(UInt8)) AS result; +SELECT firstNonDefault(NULL::Nullable(String), ''::String, 'foo') as result, toTypeName(result); + +SELECT firstNonDefault(0::UInt8, 0::UInt16, 42::UInt32) AS result, toTypeName(result); +SELECT firstNonDefault(0::Int8, 0::Int16, 42::Int32) AS result, toTypeName(result); +SELECT firstNonDefault(0::UInt32, 0::UInt64, 42::UInt128) AS result, toTypeName(result); +SELECT firstNonDefault(0::Int128, 0::Int128, 42::Int128) AS result, toTypeName(result); +SELECT firstNonDefault(0::UInt8, 0::Int8, 42::Int16) AS result, toTypeName(result); +SELECT firstNonDefault(0::Int64, 0::Int64, 42::Int64) AS result, toTypeName(result); +SELECT firstNonDefault(0.0::Float32, 0.0::Float64, 42.5::Float64) AS result, toTypeName(result); +SELECT firstNonDefault(0::Float64, 0.0::Float64, 42.0::Float64) AS result, toTypeName(result); +SELECT firstNonDefault(NULL::Nullable(Int32), 0::Nullable(Int32), 42::Nullable(Int32)) AS result, toTypeName(result); +SELECT firstNonDefault(NULL, 0::Int32, 42::Nullable(Int32)) AS result, toTypeName(result); +SELECT firstNonDefault(''::String, '0'::String, 'hello'::String) AS result, toTypeName(result); +SELECT firstNonDefault(''::FixedString(5), '0'::String, 'hello'::String) AS result, toTypeName(result); +SELECT firstNonDefault([]::Array(Int32), [0]::Array(Int32), [1, 2, 3]::Array(Int32)) AS result, toTypeName(result); +SELECT firstNonDefault([]::Array(String), ['']::Array(String), ['hello']::Array(String)) AS result, toTypeName(result); +SELECT firstNonDefault(NULL::Nullable(UInt8), 0::UInt8, 42::UInt8, 100::UInt8) AS result, toTypeName(result); +SELECT firstNonDefault(NULL::Nullable(String), ''::String, '0'::String, 'hello'::String) AS result, toTypeName(result); + +SELECT firstNonDefault(NULL) AS result, toTypeName(result); +SELECT firstNonDefault(0) AS result, toTypeName(result); +SELECT firstNonDefault(''::String) AS result, toTypeName(result); +SELECT firstNonDefault([]::Array(UInt8)) AS result, toTypeName(result); + +SELECT firstNonDefault(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +SELECT firstNonDefault(0, 'hello'); -- { serverError NO_COMMON_TYPE } +SELECT firstNonDefault([]::Array(UInt8), 42); -- { serverError NO_COMMON_TYPE } +SELECT firstNonDefault([]::Array(UInt8), 'hello'); -- { serverError NO_COMMON_TYPE } +SELECT firstNonDefault(0::UInt64, 1::Int64); -- { serverError NO_COMMON_TYPE } +SELECT firstNonDefault(NULL::Nullable(Array(UInt8)), []::Array(UInt8)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT firstNonDefault( + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + number +) FROM numbers(3); + +DROP TABLE IF EXISTS test_first_truthy; + +CREATE TABLE test_first_truthy +( + a Nullable(Int32), + b Nullable(Int32), + c Nullable(String), + d Array(Int32) +) ENGINE = Memory; + +INSERT INTO test_first_truthy VALUES +(NULL, 0, NULL, []), +(0, NULL, '', []), +(NULL, NULL, NULL, []), +(0, 0, '', []), +(1, 0, '', []), +(0, 2, '', []), +(0, 0, 'hello', []), +(0, 0, '', [1, 2, 3]); + +SELECT + a, b, + firstNonDefault(a, b) AS result, + toTypeName(firstNonDefault(a, b)) AS type +FROM test_first_truthy +ORDER BY ALL; + +SELECT + c, + firstNonDefault(c, 'default'::String) AS result, + toTypeName(firstNonDefault(c, 'default'::String)) AS type +FROM test_first_truthy +ORDER BY ALL; + +SELECT + d, + firstNonDefault(d, [99, 100]::Array(Int32)) AS result, + toTypeName(firstNonDefault(d, [99, 100]::Array(Int32))) AS type +FROM test_first_truthy +ORDER BY length(result); + +SELECT + a, b, + firstNonDefault(a + b, a * b, a - b) AS result, + toTypeName(firstNonDefault(a + b, a * b, a - b)) AS type +FROM test_first_truthy +ORDER BY ALL; + +SELECT + a, b, + firstNonDefault(42, a, b) AS result1, + firstNonDefault(0, a, b) AS result2, + firstNonDefault(NULL, a, b) AS result3 +FROM test_first_truthy +ORDER BY ALL; + +DROP TABLE test_first_truthy; diff --git a/parser/testdata/03522_join_resolve_matcher_recursive_bug/ast.json b/parser/testdata/03522_join_resolve_matcher_recursive_bug/ast.json new file mode 100644 index 000000000..f9d3ac007 --- /dev/null +++ b/parser/testdata/03522_join_resolve_matcher_recursive_bug/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001120387, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03522_join_resolve_matcher_recursive_bug/metadata.json b/parser/testdata/03522_join_resolve_matcher_recursive_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03522_join_resolve_matcher_recursive_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03522_join_resolve_matcher_recursive_bug/query.sql b/parser/testdata/03522_join_resolve_matcher_recursive_bug/query.sql new file mode 100644 index 000000000..b3dcfc1d6 --- /dev/null +++ b/parser/testdata/03522_join_resolve_matcher_recursive_bug/query.sql @@ -0,0 +1,3 @@ +SET enable_analyzer = 1; +SELECT 1 FROM (SELECT 1) tx JOIN VALUES ((*)) ty USING (c0); -- { serverError UNKNOWN_IDENTIFIER } +SELECT * FROM numbers(1) AS t1 FULL JOIN numbers(1, 46 AND (1 IS NULL) AND (* AND 3) ) AS t2 USING (number); diff --git a/parser/testdata/03522_join_using_bug_78907/ast.json b/parser/testdata/03522_join_using_bug_78907/ast.json new file mode 100644 index 000000000..279625f44 --- /dev/null +++ b/parser/testdata/03522_join_using_bug_78907/ast.json @@ -0,0 +1,103 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery tabc__fuzz_21 (children 3)" + }, + { + "explain": " Identifier tabc__fuzz_21" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " ColumnDeclaration a (children 1)" + }, + { + "explain": " DataType Int64" + }, + { + "explain": " ColumnDeclaration b (children 2)" + }, + { + "explain": " DataType Int8" + }, + { + "explain": " Function plus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier a" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " ColumnDeclaration c (children 2)" + }, + { + "explain": " DataType DateTime" + }, + { + "explain": " Function plus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier b" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " ColumnDeclaration s (children 1)" + }, + { + "explain": " DataType Nullable (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " DataType DateTime64 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Identifier a" + } + ], + + "rows": 27, + + "statistics": + { + "elapsed": 0.001249483, + "rows_read": 27, + "bytes_read": 968 + } +} diff --git a/parser/testdata/03522_join_using_bug_78907/metadata.json b/parser/testdata/03522_join_using_bug_78907/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03522_join_using_bug_78907/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03522_join_using_bug_78907/query.sql b/parser/testdata/03522_join_using_bug_78907/query.sql new file mode 100644 index 000000000..5e32642ad --- /dev/null +++ b/parser/testdata/03522_join_using_bug_78907/query.sql @@ -0,0 +1,8 @@ +CREATE TABLE tabc__fuzz_21 (`a` Int64, `b` Int8 ALIAS a + 1, `c` DateTime ALIAS b + 1, `s` Nullable(DateTime64(3))) ENGINE = MergeTree ORDER BY a; +INSERT INTO tabc__fuzz_21 (a, s) SELECT number, concat('abc', toString(number)) FROM numbers(4); + +CREATE TABLE tb__fuzz_0 (`b` Nullable(Int32)) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO tb__fuzz_0 SELECT number FROM numbers(4); + + +SELECT (*, 8, 8, 8, toFixedString('FirstKey', 8), 8, 8, (toNullable(8) IS NOT NULL) IS NULL, 8, toNullable(toNullable(toNullable(8))), 8, 8, 'FirstKey', (8 IS NOT NULL) IS NOT NULL), *, b + 1 AS a FROM tb__fuzz_0 GLOBAL ALL INNER JOIN tabc__fuzz_21 USING (a) ORDER BY `ALL` DESC SETTINGS enable_analyzer = 1, max_block_size = 900, max_threads = 20, receive_timeout = 10., receive_data_timeout_ms = 10000, allow_suspicious_low_cardinality_types = true, merge_tree_min_rows_for_concurrent_read = 1000, log_queries = true, table_function_remote_max_addresses = 200, join_use_nulls = true, max_execution_time = 10., read_in_order_use_virtual_row = true, allow_introspection_functions = true, mutations_sync = 2, optimize_trivial_insert_select = true, aggregate_functions_null_for_empty = true, enable_filesystem_cache = false, allow_prefetched_read_pool_for_remote_filesystem = false, parallel_replicas_for_cluster_engines = false, allow_experimental_analyzer = true, analyzer_compatibility_join_using_top_level_identifier = true; diff --git a/parser/testdata/03522_nullable_partition_key/ast.json b/parser/testdata/03522_nullable_partition_key/ast.json new file mode 100644 index 000000000..706300c68 --- /dev/null +++ b/parser/testdata/03522_nullable_partition_key/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery t (children 3)" + }, + { + "explain": " Identifier t" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration c0 (children 1)" + }, + { + "explain": " DataType Nullable (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " DataType Int" + }, + { + "explain": " Storage definition (children 4)" + }, + { + "explain": " Function MergeTree (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Identifier c0" + }, + { + "explain": " Identifier c0" + }, + { + "explain": " Set" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001305181, + "rows_read": 14, + "bytes_read": 449 + } +} diff --git a/parser/testdata/03522_nullable_partition_key/metadata.json b/parser/testdata/03522_nullable_partition_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03522_nullable_partition_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03522_nullable_partition_key/query.sql b/parser/testdata/03522_nullable_partition_key/query.sql new file mode 100644 index 000000000..589a0642c --- /dev/null +++ b/parser/testdata/03522_nullable_partition_key/query.sql @@ -0,0 +1,10 @@ +CREATE TABLE t (c0 Nullable(Int)) ENGINE = MergeTree() ORDER BY (c0) PARTITION BY (c0) SETTINGS allow_nullable_key = 1; + +INSERT INTO TABLE t (c0) VALUES (NULL),(1); +OPTIMIZE TABLE t FINAL; +SELECT c0, _part FROM t ORDER BY ALL; + +CREATE TABLE taggr (c0 Nullable(Int)) ENGINE = AggregatingMergeTree() ORDER BY (c0) PARTITION BY (c0) SETTINGS allow_nullable_key = 1; +INSERT INTO TABLE taggr (c0) VALUES (1), (2); +INSERT INTO TABLE taggr (c0) VALUES (NULL), (1), (2); +SELECT c0, _part FROM taggr FINAL ORDER BY ALL; diff --git a/parser/testdata/03522_window_table_arg/ast.json b/parser/testdata/03522_window_table_arg/ast.json new file mode 100644 index 000000000..21a3b5b35 --- /dev/null +++ b/parser/testdata/03522_window_table_arg/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001293565, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03522_window_table_arg/metadata.json b/parser/testdata/03522_window_table_arg/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03522_window_table_arg/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03522_window_table_arg/query.sql b/parser/testdata/03522_window_table_arg/query.sql new file mode 100644 index 000000000..369e4cf73 --- /dev/null +++ b/parser/testdata/03522_window_table_arg/query.sql @@ -0,0 +1,16 @@ +SET enable_analyzer=1; + +SELECT * FROM view( + SELECT row_number() OVER w + FROM numbers(3) + WINDOW w AS () +); + +SELECT * FROM viewExplain('EXPLAIN', '', (SELECT 1 WINDOW w0 AS ())); + +-- Fuzzed, fails, but shouldn't crash server +SELECT + number +FROM numbers(assumeNotNull(viewExplain('EXPLAIN', '', ( + SELECT 1 WINDOW w0 AS () QUALIFY number +)))) -- { serverError UNKNOWN_IDENTIFIER } diff --git a/parser/testdata/03524_nullable_extremes/ast.json b/parser/testdata/03524_nullable_extremes/ast.json new file mode 100644 index 000000000..51c493935 --- /dev/null +++ b/parser/testdata/03524_nullable_extremes/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '--- int, single part'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001418801, + "rows_read": 5, + "bytes_read": 191 + } +} diff --git a/parser/testdata/03524_nullable_extremes/metadata.json b/parser/testdata/03524_nullable_extremes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03524_nullable_extremes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03524_nullable_extremes/query.sql b/parser/testdata/03524_nullable_extremes/query.sql new file mode 100644 index 000000000..669a432dc --- /dev/null +++ b/parser/testdata/03524_nullable_extremes/query.sql @@ -0,0 +1,19 @@ +SELECT '--- int, single part'; +CREATE TABLE single_int (k Int64, c Nullable(Int64)) Engine=MergeTree ORDER BY k; +INSERT INTO single_int VALUES (1, 1), (2, 2), (3, NULL); +SELECT c FROM single_int ORDER BY ALL SETTINGS extremes=1; + +SELECT '--- int, multi part'; +CREATE TABLE multi_int (k Int64, c Nullable(Int64)) Engine=MergeTree ORDER BY k PARTITION BY k; +INSERT INTO multi_int VALUES (1, 1), (2, 2), (3, NULL); +SELECT c FROM multi_int ORDER BY ALL SETTINGS extremes=1; + +SELECT '--- float, single part'; +CREATE TABLE single_float (k Int64, c Float64) Engine=MergeTree ORDER BY c; +INSERT INTO single_float VALUES (1, 1), (2, 2), (3, 0/0); +SELECT c FROM single_float ORDER BY ALL SETTINGS extremes=1; + +SELECT '--- float, multi part'; +CREATE TABLE multi_float (k Int64, c Float64) Engine=MergeTree ORDER BY k PARTITION BY k; +INSERT INTO multi_float VALUES (1, 1), (2, 2), (3, 0/0); +SELECT c FROM multi_float ORDER BY ALL SETTINGS extremes=1; diff --git a/parser/testdata/03524_sign_argument/ast.json b/parser/testdata/03524_sign_argument/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03524_sign_argument/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03524_sign_argument/metadata.json b/parser/testdata/03524_sign_argument/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03524_sign_argument/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03524_sign_argument/query.sql b/parser/testdata/03524_sign_argument/query.sql new file mode 100644 index 000000000..ca93f231a --- /dev/null +++ b/parser/testdata/03524_sign_argument/query.sql @@ -0,0 +1,18 @@ +DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}; +CREATE DATABASE IF NOT EXISTS {CLICKHOUSE_DATABASE:Identifier}; +USE {CLICKHOUSE_DATABASE:Identifier}; +CREATE TABLE IF NOT EXISTS {CLICKHOUSE_DATABASE:Identifier}.t0 (c0 String) ENGINE = Memory() ; +CREATE TABLE IF NOT EXISTS {CLICKHOUSE_DATABASE:Identifier}.t1 (c0 String, c1 Int32, c2 Int32 CODEC (ZSTD)) ENGINE = Memory() ; +CREATE TABLE IF NOT EXISTS {CLICKHOUSE_DATABASE:Identifier}.t2 (c0 Int32, c1 Int32, c2 String) ENGINE = MergeTree() ORDER BY ((c1)/(c0)) PARTITION BY (- (c1)) SETTINGS allow_suspicious_indices=1; +CREATE TABLE IF NOT EXISTS {CLICKHOUSE_DATABASE:Identifier}.t3 (c0 Int32, c1 Int32, c2 Int32) ENGINE = Log() ; +INSERT INTO t0(c0) VALUES ('f'), (''); +INSERT INTO t2(c0) VALUES (1310905838), (79356129), (669114174); +INSERT INTO t2(c2, c0, c1) VALUES ('\raOA', 666486049, 1540502719); +INSERT INTO t0(c0) VALUES ('-1577479114'), (''); +INSERT INTO t1(c1) VALUES (1329054001); +INSERT INTO t3(c0, c2) VALUES (-1195000556, -635596661); + +SELECT right_0.c0, ((pow(pow(right_1.c1,t1.c2),(sign (t1.c2))))*((- (((t1.c2)/(t1.c2)))))), t1.c1 FROM t1 RIGHT OUTER JOIN t0 AS right_0 ON (('.D')=('m\'X')) RIGHT OUTER JOIN t3 AS right_1 ON ((t1.c1)=(right_1.c1)) ORDER BY ALL; + +USE default; +DROP DATABASE {CLICKHOUSE_DATABASE:Identifier}; diff --git a/parser/testdata/03525_distributed_product_mode_local_IN_cross_replication_analyzer_bug/ast.json b/parser/testdata/03525_distributed_product_mode_local_IN_cross_replication_analyzer_bug/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03525_distributed_product_mode_local_IN_cross_replication_analyzer_bug/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03525_distributed_product_mode_local_IN_cross_replication_analyzer_bug/metadata.json b/parser/testdata/03525_distributed_product_mode_local_IN_cross_replication_analyzer_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03525_distributed_product_mode_local_IN_cross_replication_analyzer_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03525_distributed_product_mode_local_IN_cross_replication_analyzer_bug/query.sql b/parser/testdata/03525_distributed_product_mode_local_IN_cross_replication_analyzer_bug/query.sql new file mode 100644 index 000000000..dc7658a83 --- /dev/null +++ b/parser/testdata/03525_distributed_product_mode_local_IN_cross_replication_analyzer_bug/query.sql @@ -0,0 +1,52 @@ +-- Tags: no-parallel + +create database if not exists shard_0; +create database if not exists shard_1; +drop table if exists shard_0.test; +drop table if exists shard_1.test; +drop table if exists test_dist; + +CREATE TABLE shard_0.test +( + `id` UInt32, + `name` String, + `dtm` UInt32 +) +ENGINE = MergeTree +PARTITION BY dtm +ORDER BY id +SETTINGS index_granularity = 8192; + +CREATE TABLE shard_1.test +( + `id` UInt32, + `name` String, + `dtm` UInt32 +) +ENGINE = MergeTree +PARTITION BY dtm +ORDER BY id +SETTINGS index_granularity = 8192; + +CREATE TABLE test_dist +( + `id` UInt32, + `name` String, + `dtm` UInt32 +) +ENGINE = Distributed('test_cluster_two_shards_different_databases', '', 'test'); + +insert into shard_0.test select number, number, number % 3 from numbers(6); +insert into shard_1.test select number + 3, number, (number + 1) % 3 from numbers(6); + +-- { echoOn } + +select _shard_num, * from test_dist order by id, _shard_num; + +select count() from test_dist a where id in (select id from test_dist where dtm != 1 settings distributed_product_mode='allow') settings enable_analyzer=1; +select count() from test_dist a where id in (select id from test_dist where dtm != 1 settings distributed_product_mode='local') settings enable_analyzer=1; + +select count() from test_dist a where id in (select id from test_dist where dtm != 1 settings distributed_product_mode='local') and id in (select id from test_dist where dtm != 2 settings distributed_product_mode='local') settings enable_analyzer=1; +select count() from test_dist a where id in (select id from test_dist where dtm != 1 settings distributed_product_mode='local') and id in (select id from test_dist where dtm != 2 settings distributed_product_mode='allow') settings enable_analyzer=1; +select count() from test_dist a where id in (select id from test_dist where dtm != 1 settings distributed_product_mode='allow') and id in (select id from test_dist where dtm != 2 settings distributed_product_mode='local') settings enable_analyzer=1; +select count() from test_dist a where id in (select id from test_dist where dtm != 1 settings distributed_product_mode='allow') and id in (select id from test_dist where dtm != 2 settings distributed_product_mode='allow') settings enable_analyzer=1; diff --git a/parser/testdata/03525_json_extract_datetime64_from_numbers/ast.json b/parser/testdata/03525_json_extract_datetime64_from_numbers/ast.json new file mode 100644 index 000000000..d491630fc --- /dev/null +++ b/parser/testdata/03525_json_extract_datetime64_from_numbers/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001544037, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03525_json_extract_datetime64_from_numbers/metadata.json b/parser/testdata/03525_json_extract_datetime64_from_numbers/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03525_json_extract_datetime64_from_numbers/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03525_json_extract_datetime64_from_numbers/query.sql b/parser/testdata/03525_json_extract_datetime64_from_numbers/query.sql new file mode 100644 index 000000000..5ad8731eb --- /dev/null +++ b/parser/testdata/03525_json_extract_datetime64_from_numbers/query.sql @@ -0,0 +1,8 @@ +set session_timezone='UTC'; + +select JSONExtract('{"utc" : 1747771112221}', 'utc', 'DateTime64(3)'); +select JSONExtract('{"utc" : -1747771112221}', 'utc', 'DateTime64(3)'); +select '{"utc" : 1747771112221}'::JSON(utc DateTime64); +select '{"utc" : -1747771112221}'::JSON(utc DateTime64); + + diff --git a/parser/testdata/03525_json_infer_array_of_dynamic_from_array_of_different_types/ast.json b/parser/testdata/03525_json_infer_array_of_dynamic_from_array_of_different_types/ast.json new file mode 100644 index 000000000..9230b8b5f --- /dev/null +++ b/parser/testdata/03525_json_infer_array_of_dynamic_from_array_of_different_types/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001438955, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03525_json_infer_array_of_dynamic_from_array_of_different_types/metadata.json b/parser/testdata/03525_json_infer_array_of_dynamic_from_array_of_different_types/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03525_json_infer_array_of_dynamic_from_array_of_different_types/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03525_json_infer_array_of_dynamic_from_array_of_different_types/query.sql b/parser/testdata/03525_json_infer_array_of_dynamic_from_array_of_different_types/query.sql new file mode 100644 index 000000000..96b24049e --- /dev/null +++ b/parser/testdata/03525_json_infer_array_of_dynamic_from_array_of_different_types/query.sql @@ -0,0 +1,12 @@ +set enable_analyzer=1; + +desc format(JSONEachRow, '{"a" : [42, "hello", [1, 2, 3]]}'); +select a, toTypeName(a), arrayMap(x -> dynamicType(x), a) from format(JSONEachRow, '{"a" : [42, "hello", [1, 2, 3]]}'); +desc format(JSONEachRow, '{"a" : [42, "hello"]}'); +select a, toTypeName(a), arrayMap(x -> dynamicType(x), a) from format(JSONEachRow, '{"a" : [42, "hello"]}'); +desc format(JSONEachRow, '{"a" : [42, "hello", {"b" : 42}]}'); +select a, toTypeName(a), arrayMap(x -> dynamicType(x), a) from format(JSONEachRow, '{"a" : [42, "hello", {"b" : 42}]}'); + +select '{"a" : [42, "hello", [1, 2, 3]]}'::JSON as json, JSONAllPathsWithTypes(json), dynamicType(json.a), json.a.:`Array(Dynamic)`.Int64, json.a.:`Array(Dynamic)`.String, json.a.:`Array(Dynamic)`.`Array(Nullable(Int64))`; +select '{"a" : [42, "hello", [1, 2, 3], {"b" : 42}]}'::JSON as json, JSONAllPathsWithTypes(json), dynamicType(json.a), json.a.:`Array(Dynamic)`.Int64, json.a.:`Array(Dynamic)`.String, json.a.:`Array(Dynamic)`.`Array(Nullable(Int64))`, json.a.:`Array(Dynamic)`.JSON.b.:Int64; +select '{"a" : [42, "hello", [1, 2, 3]]}'::JSON::JSON(a Array(String)) as json, JSONAllPathsWithTypes(json); diff --git a/parser/testdata/03525_timezoneof_illegal_type/ast.json b/parser/testdata/03525_timezoneof_illegal_type/ast.json new file mode 100644 index 000000000..412c1f5b6 --- /dev/null +++ b/parser/testdata/03525_timezoneof_illegal_type/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function timeZoneOf (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.000996971, + "rows_read": 7, + "bytes_read": 263 + } +} diff --git a/parser/testdata/03525_timezoneof_illegal_type/metadata.json b/parser/testdata/03525_timezoneof_illegal_type/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03525_timezoneof_illegal_type/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03525_timezoneof_illegal_type/query.sql b/parser/testdata/03525_timezoneof_illegal_type/query.sql new file mode 100644 index 000000000..3cfc432e9 --- /dev/null +++ b/parser/testdata/03525_timezoneof_illegal_type/query.sql @@ -0,0 +1 @@ +SELECT timeZoneOf(1) -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } diff --git a/parser/testdata/03525_transform_null_in_subqeury_with_not_nullable_type/ast.json b/parser/testdata/03525_transform_null_in_subqeury_with_not_nullable_type/ast.json new file mode 100644 index 000000000..20c7dcbc7 --- /dev/null +++ b/parser/testdata/03525_transform_null_in_subqeury_with_not_nullable_type/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001350261, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03525_transform_null_in_subqeury_with_not_nullable_type/metadata.json b/parser/testdata/03525_transform_null_in_subqeury_with_not_nullable_type/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03525_transform_null_in_subqeury_with_not_nullable_type/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03525_transform_null_in_subqeury_with_not_nullable_type/query.sql b/parser/testdata/03525_transform_null_in_subqeury_with_not_nullable_type/query.sql new file mode 100644 index 000000000..f0d861184 --- /dev/null +++ b/parser/testdata/03525_transform_null_in_subqeury_with_not_nullable_type/query.sql @@ -0,0 +1,10 @@ +set transform_null_in=1; + +select null::Nullable(String) in (select 'abc'); +select (null::Nullable(String), 42) in (select 'abc', 42); +select (null::Nullable(String), null::Nullable(UInt32)) in (select 'abc', 42); + +select (number % 2 ? null : 'abc') in (select 'abc') from numbers(2); +select (number % 2 ? null : 'abc', materialize(42)) in (select 'abc', 42) from numbers(2); +select (number % 2 == 0 ? null : 'abc', number < 2 ? null : 42) in (select 'abc', 42) from numbers(4); + diff --git a/parser/testdata/03526_columns_substreams_in_wide_parts/ast.json b/parser/testdata/03526_columns_substreams_in_wide_parts/ast.json new file mode 100644 index 000000000..9bf7ca78f --- /dev/null +++ b/parser/testdata/03526_columns_substreams_in_wide_parts/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001057361, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/03526_columns_substreams_in_wide_parts/metadata.json b/parser/testdata/03526_columns_substreams_in_wide_parts/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03526_columns_substreams_in_wide_parts/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03526_columns_substreams_in_wide_parts/query.sql b/parser/testdata/03526_columns_substreams_in_wide_parts/query.sql new file mode 100644 index 000000000..a56677218 --- /dev/null +++ b/parser/testdata/03526_columns_substreams_in_wide_parts/query.sql @@ -0,0 +1,78 @@ +DROP TABLE IF EXISTS test; +CREATE TABLE test +( + int UInt32, + str String, + t Tuple( + a UInt32, + b Array(UInt32)), + json JSON(a UInt32, b Array(String)), + nested Nested(a UInt32, b UInt32) +) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS min_rows_for_wide_part = 1, min_bytes_for_wide_part = 1, vertical_merge_algorithm_min_rows_to_activate=1000000, vertical_merge_algorithm_min_columns_to_activate=100000, object_serialization_version='v2', enable_block_number_column=0, enable_block_offset_column=0, replace_long_file_name_to_hash=0, escape_variant_subcolumn_filenames=0, serialization_info_version='basic'; + +INSERT INTO test SELECT 42, 'str', tuple(42, [1, 2, 3]), '{"a" : 42, "b" : ["a", "b", "c"], "d" : "Hello", "e" : 42, "f" : [{"g" : 42, "k" : [1, 2, 3]}]}', [1, 2, 3], [1, 2, 3]; +SELECT column, type, substreams, filenames FROM system.parts_columns where database=currentDatabase() and table = 'test' and active; +SELECT '-------------------------------------------------------------------------'; + +INSERT INTO test SELECT 42, 'str', tuple(42, [1, 2, 3]), '{"a" : 42, "b" : ["a", "b", "c"], "d" : "Hello", "e" : 42, "f" : [{"g" : 42, "k" : [1, 2, 3]}]}', [1, 2, 3], [1, 2, 3]; +OPTIMIZE TABLE test FINAL; +SELECT 'Horizontal merge'; +SELECT column, type, substreams, filenames FROM system.parts_columns where database=currentDatabase() and table = 'test' and active; +SELECT '-------------------------------------------------------------------------'; + +ALTER TABLE test MODIFY SETTING vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1; +INSERT INTO test SELECT 42, 'str', tuple(42, [1, 2, 3]), '{"a" : 42, "b" : ["a", "b", "c"], "d" : "Hello", "e" : 42, "f" : [{"g" : 42, "k" : [1, 2, 3]}]}', [1, 2, 3], [1, 2, 3]; +OPTIMIZE TABLE test FINAL; +SELECT 'Vertical merge'; +SELECT column, type, substreams, filenames FROM system.parts_columns where database=currentDatabase() and table = 'test' and active; +SELECT '-------------------------------------------------------------------------'; + +SELECT 'Alter add column'; +ALTER TABLE test ADD COLUMN x Array(UInt32); +SELECT column, type, substreams, filenames FROM system.parts_columns where database=currentDatabase() and table = 'test' and active; +SELECT '-------------------------------------------------------------------------'; +SELECT 'After merge'; +OPTIMIZE TABLE test FINAL; +SELECT column, type, substreams, filenames FROM system.parts_columns where database=currentDatabase() and table = 'test' and active; +SELECT '-------------------------------------------------------------------------'; + +SELECT 'Alter drop column'; +ALTER TABLE test DROP COLUMN int; +SELECT column, type, substreams, filenames FROM system.parts_columns where database=currentDatabase() and table = 'test' and active; +SELECT '-------------------------------------------------------------------------'; + +SELECT 'Alter rename column'; +ALTER TABLE test RENAME COLUMN t TO tt; +SELECT column, type, substreams, filenames FROM system.parts_columns where database=currentDatabase() and table = 'test' and active; +SELECT '-------------------------------------------------------------------------'; + +SELECT 'Alter drop and rename column'; +ALTER TABLE test DROP COLUMN str, RENAME COLUMN x TO str; +SELECT column, type, substreams, filenames FROM system.parts_columns where database=currentDatabase() and table = 'test' and active; +SELECT '-------------------------------------------------------------------------'; + +SELECT 'Alter modify column'; +ALTER TABLE test MODIFY COLUMN tt Tuple(a UInt32, b Array(String), c UInt32); +SELECT column, type, substreams, filenames FROM system.parts_columns where database=currentDatabase() and table = 'test' and active; +SELECT '-------------------------------------------------------------------------'; + +SELECT 'Alter update column'; +ALTER TABLE test UPDATE tt = tuple(42, ['a'], 42) WHERE 1; +SELECT column, type, substreams, filenames FROM system.parts_columns where database=currentDatabase() and table = 'test' and active; +SELECT '-------------------------------------------------------------------------'; + +SELECT 'Alter rename nested column'; +ALTER TABLE test RENAME COLUMN nested.a to nested.aa; +SELECT column, type, substreams, filenames FROM system.parts_columns where database=currentDatabase() and table = 'test' and active; +SELECT '-------------------------------------------------------------------------'; + +SELECT 'Alter rename all nested column'; +ALTER TABLE test RENAME COLUMN nested.aa to nested.aaa, RENAME COLUMN nested.b to nested.bbb; +SELECT column, type, substreams, filenames FROM system.parts_columns where database=currentDatabase() and table = 'test' and active; +SELECT '-------------------------------------------------------------------------'; + +DROP TABLE test; + diff --git a/parser/testdata/03528_s3_insert_partition_by_whitespaces/ast.json b/parser/testdata/03528_s3_insert_partition_by_whitespaces/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03528_s3_insert_partition_by_whitespaces/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03528_s3_insert_partition_by_whitespaces/metadata.json b/parser/testdata/03528_s3_insert_partition_by_whitespaces/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03528_s3_insert_partition_by_whitespaces/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03528_s3_insert_partition_by_whitespaces/query.sql b/parser/testdata/03528_s3_insert_partition_by_whitespaces/query.sql new file mode 100644 index 000000000..ffb0ad860 --- /dev/null +++ b/parser/testdata/03528_s3_insert_partition_by_whitespaces/query.sql @@ -0,0 +1,28 @@ +-- Tags: no-fasttest +-- ^ for S3 + +INSERT INTO FUNCTION + s3( + s3_conn, + filename = currentDatabase() || '/{_partition_id}/test.parquet', + format = Parquet + ) + PARTITION BY 1 +SELECT + * +FROM system.numbers +LIMIT 10; + +SELECT * FROM s3(s3_conn, filename = currentDatabase() || '/1/test.parquet'); + +INSERT INTO FUNCTION + s3( + s3_conn, + filename = currentDatabase() || '/{_partition_id}/test.parquet', + format = Parquet + ) PARTITION BY 2 SELECT + * +FROM system.numbers +LIMIT 10; + +SELECT * FROM s3(s3_conn, filename = currentDatabase() || '/2/test.parquet'); diff --git a/parser/testdata/03529_quantile_deterministic_ubsan/ast.json b/parser/testdata/03529_quantile_deterministic_ubsan/ast.json new file mode 100644 index 000000000..ec298d689 --- /dev/null +++ b/parser/testdata/03529_quantile_deterministic_ubsan/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function quantileDeterministicState (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_8193" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001382904, + "rows_read": 14, + "bytes_read": 565 + } +} diff --git a/parser/testdata/03529_quantile_deterministic_ubsan/metadata.json b/parser/testdata/03529_quantile_deterministic_ubsan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03529_quantile_deterministic_ubsan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03529_quantile_deterministic_ubsan/query.sql b/parser/testdata/03529_quantile_deterministic_ubsan/query.sql new file mode 100644 index 000000000..31a273748 --- /dev/null +++ b/parser/testdata/03529_quantile_deterministic_ubsan/query.sql @@ -0,0 +1 @@ +SELECT quantileDeterministicState(number, 0) FROM numbers(8193); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/03530_insert_into_distributed_different_types_sparseness/ast.json b/parser/testdata/03530_insert_into_distributed_different_types_sparseness/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03530_insert_into_distributed_different_types_sparseness/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03530_insert_into_distributed_different_types_sparseness/metadata.json b/parser/testdata/03530_insert_into_distributed_different_types_sparseness/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03530_insert_into_distributed_different_types_sparseness/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03530_insert_into_distributed_different_types_sparseness/query.sql b/parser/testdata/03530_insert_into_distributed_different_types_sparseness/query.sql new file mode 100644 index 000000000..f4be1307c --- /dev/null +++ b/parser/testdata/03530_insert_into_distributed_different_types_sparseness/query.sql @@ -0,0 +1,30 @@ +-- Ensure that sparse columns does not leads to any errors/warnings while pushing via Distributed + +drop table if exists sparse; +drop table if exists intermediate; +drop table if exists non_sparse; +drop table if exists non_sparse_remote; +drop table if exists mv_non_sparse; +drop table if exists log; +drop table if exists log_remote; +drop table if exists mv_log; + +create table sparse (key String) engine=MergeTree order by () settings ratio_of_defaults_for_sparse_serialization=0.01; +insert into sparse select ''::String from numbers(100); +select dumpColumnStructure(*) from sparse limit 1; + +-- we need a table that supports sparse columns as intermediate, hence MergeTree +create table intermediate (key String) engine=MergeTree order by (); + +create table non_sparse (key String) engine=MergeTree order by () settings ratio_of_defaults_for_sparse_serialization=1; +create table non_sparse_remote as remote('127.1', currentDatabase(), non_sparse); +create materialized view mv_non_sparse to non_sparse_remote as select * from intermediate; + +-- now ensure that insert into Log will not break anything +create table log (key String) engine=Log; +create table log_remote as remote('127.1', currentDatabase(), log); +create materialized view mv_log to log as select * from intermediate; + +insert into intermediate select * from sparse; +select count() from non_sparse; +select count() from mv_log; diff --git a/parser/testdata/03531_check_count_for_parquet/ast.json b/parser/testdata/03531_check_count_for_parquet/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03531_check_count_for_parquet/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03531_check_count_for_parquet/metadata.json b/parser/testdata/03531_check_count_for_parquet/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03531_check_count_for_parquet/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03531_check_count_for_parquet/query.sql b/parser/testdata/03531_check_count_for_parquet/query.sql new file mode 100644 index 000000000..cf2242d9b --- /dev/null +++ b/parser/testdata/03531_check_count_for_parquet/query.sql @@ -0,0 +1,13 @@ +-- Tags: no-fasttest, no-parallel +-- no-fasttest because of Parquet +-- no-parallel because we're writing a file with fixed name + +select count() from url('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/sample.parquet'); +select count() from url('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/sample.parquet') where column1 = 'meow'; +select count() from url('http://localhost:11111/test/hive_partitioning/column0=Elizabeth/sample.parquet'); + +insert into function file('03531.parquet') select * from numbers(42) settings engine_file_truncate_on_insert=1, output_format_parquet_row_group_size=10; +select sleep(1); -- quirk in schema cache: cache is not used for up to 1s after file is written +select count() from file('03531.parquet'); +select count() from file('03531.parquet') where number = 13; +select count() from file('03531.parquet'); diff --git a/parser/testdata/03531_insert_removing_sparse_transform/ast.json b/parser/testdata/03531_insert_removing_sparse_transform/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03531_insert_removing_sparse_transform/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03531_insert_removing_sparse_transform/metadata.json b/parser/testdata/03531_insert_removing_sparse_transform/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03531_insert_removing_sparse_transform/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03531_insert_removing_sparse_transform/query.sql b/parser/testdata/03531_insert_removing_sparse_transform/query.sql new file mode 100644 index 000000000..53d6c7efd --- /dev/null +++ b/parser/testdata/03531_insert_removing_sparse_transform/query.sql @@ -0,0 +1,30 @@ +-- Tags: no-debug, no-debug, no-asan, no-tsan, no-msan, no-ubsan, no-sanitize-coverage, no-parallel-replicas, no-flaky-check +-- - debug build adds CheckTokenTransform +-- - no-parallel-replicas - has --replace-log-memory-with-mergetree switch + +drop table if exists t_log; +drop table if exists t_mt; +drop table if exists mv; + +set max_threads=1; +set max_insert_threads=1; +set deduplicate_blocks_in_dependent_materialized_views=0; + +-- { echo } + +-- Log does not support sparse columns - RemovingSparseTransform added +create table t_log (key Int) engine=Log; +explain pipeline insert into t_log select * from system.one; + +-- MergeTree support sparse columns - no RemovingSparseTransform +create table t_mt (key Int) engine=MergeTree order by (); +explain pipeline insert into t_mt select * from system.one; + +-- MergeTree pushes to Log, which does not support sparse columns - RemovingSparseTransform added +create materialized view mv to t_log as select * from t_mt; +explain pipeline insert into t_mt select * from system.one; +drop table mv; + +-- Log does not support sparse columns - RemovingSparseTransform added +create materialized view mv to t_mt as select * from t_log; +explain pipeline insert into t_log select * from system.one; diff --git a/parser/testdata/03532_create_user_query_on_wrong_parametric_grantees/ast.json b/parser/testdata/03532_create_user_query_on_wrong_parametric_grantees/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03532_create_user_query_on_wrong_parametric_grantees/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03532_create_user_query_on_wrong_parametric_grantees/metadata.json b/parser/testdata/03532_create_user_query_on_wrong_parametric_grantees/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03532_create_user_query_on_wrong_parametric_grantees/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03532_create_user_query_on_wrong_parametric_grantees/query.sql b/parser/testdata/03532_create_user_query_on_wrong_parametric_grantees/query.sql new file mode 100644 index 000000000..b3dfc4a18 --- /dev/null +++ b/parser/testdata/03532_create_user_query_on_wrong_parametric_grantees/query.sql @@ -0,0 +1,2 @@ +CREATE USER test_user_03532 GRANTEES {grantees:Identifier}; -- { clientError SYNTAX_ERROR } +SHOW CREATE USER test_user_03532; -- { serverError UNKNOWN_USER } diff --git a/parser/testdata/03532_divideOrNull_jit_crash/ast.json b/parser/testdata/03532_divideOrNull_jit_crash/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03532_divideOrNull_jit_crash/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03532_divideOrNull_jit_crash/metadata.json b/parser/testdata/03532_divideOrNull_jit_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03532_divideOrNull_jit_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03532_divideOrNull_jit_crash/query.sql b/parser/testdata/03532_divideOrNull_jit_crash/query.sql new file mode 100644 index 000000000..88e635cb1 --- /dev/null +++ b/parser/testdata/03532_divideOrNull_jit_crash/query.sql @@ -0,0 +1,2 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/81346 +SELECT (NOT divideOrNull(0, *)) AND (NOT intDivOrNull(*, 1)) SETTINGS compile_expressions = 1, min_count_to_compile_expression = 0; \ No newline at end of file diff --git a/parser/testdata/03532_dynamic_column_inside_map_rollback/ast.json b/parser/testdata/03532_dynamic_column_inside_map_rollback/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03532_dynamic_column_inside_map_rollback/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03532_dynamic_column_inside_map_rollback/metadata.json b/parser/testdata/03532_dynamic_column_inside_map_rollback/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03532_dynamic_column_inside_map_rollback/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03532_dynamic_column_inside_map_rollback/query.sql b/parser/testdata/03532_dynamic_column_inside_map_rollback/query.sql new file mode 100644 index 000000000..49c36310e --- /dev/null +++ b/parser/testdata/03532_dynamic_column_inside_map_rollback/query.sql @@ -0,0 +1,5 @@ +select c0 from format(CSV, 'c0 Map(Dynamic, String)', $$ +"{}" +"{['a', 'b'] : 'a', ''a' : 1}" +$$) settings input_format_allow_errors_num=1; + diff --git a/parser/testdata/03532_dynamic_flattened_serialization_bug/ast.json b/parser/testdata/03532_dynamic_flattened_serialization_bug/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03532_dynamic_flattened_serialization_bug/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03532_dynamic_flattened_serialization_bug/metadata.json b/parser/testdata/03532_dynamic_flattened_serialization_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03532_dynamic_flattened_serialization_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03532_dynamic_flattened_serialization_bug/query.sql b/parser/testdata/03532_dynamic_flattened_serialization_bug/query.sql new file mode 100644 index 000000000..55acc2191 --- /dev/null +++ b/parser/testdata/03532_dynamic_flattened_serialization_bug/query.sql @@ -0,0 +1,12 @@ +-- Tags: no-fasttest + +SET type_json_skip_duplicated_paths=1; +SET allow_suspicious_primary_key=1; + +DROP TABLE IF EXISTS t0; +CREATE TABLE t0 (c0 Variant(Int,JSON(max_dynamic_paths=3, max_dynamic_types=13))) ENGINE = SummingMergeTree() ORDER BY tuple(); +INSERT INTO TABLE t0 (c0) VALUES ('{"c0":"992:20:55.723081186","😆":{"c1":0.2173147,"c0.c1":[true,"-48:28:48"]}}'), ('{"😉😉":null,"😉😉":{},"😆":"133.255.110.124"}'), ('{"c0":[[null],"叫"],"😆":{"😉😉":{}}}'), (1683795563), ('{}'), ('{"c0.c1":"went","c1":["got"],"😆":{}}'), (982353035), (936165152), ('{"😉😉":[]}'), ('{"c1":"99.32.225.166"}'), (-1639743405), ('{"😆":[["1982-11-02 23:19:29.196875786"]]}'), (-194589659), (-1589702347), (1729726898), (1837618099), (1095229141), (-704854738), ('{"😉😉":"c:0:d:3:2:6:9:e"}'), (1245962182), (-1948473775), (771485528), (-1271733140), (-109091627), ('{"c1":"2057-03-29"}'); +INSERT INTO TABLE t0 (c0) VALUES ('{"c0.c1":[]}'), ('{"😆":"2062-12-18 16:51:55.946416515","c0.c1":[[],{},[]],"😆":"😉"}'), ('{"c0.c1":"7:9:a:c:b:7:a:c"}'), ('{"😉😉":"230.232.166.39"}'), (-673463142), (-1607537269), ('{"c0.c1":[[],{}],"😉😉":[[294]]}'), (2158150), (-539108270), (-591335476), ('{}'), (-1687367455), ('{}'), ('{}'), ('{}'), ('{"c0":{"😉😉":["2039-11-02","728ef23e-fcbe-b1fb-6074-7dd13779ae4c"],"😉😉":["2071-11-04","兄弟姐妹","2061-03-28 19:00:48.403210868"]}}'), ('{"😉😉":{}}'), (-346391673), ('{"😆":[],"c1":"165:21:14"}'), ('{}'), (656028231), (-314759878), ('{"c0":["美国"],"c0":null,"c1":{}}'), ('{"c0":6872.02,"c1":"9:e:f:6:a:e:5:d"}'), ('{"c0":0.7,"😉😉":{"c0":{"c1":["286:45:04",-7076004352056519724]},"😉😉":"1970-08-18","😉😉":{"c0.c1":["41.142.87.9",false],"c0.c1":[true,"美国",362],"😆":true}}}'); +SELECT c0 FROM t0 FINAL SETTINGS output_format_native_use_flattened_dynamic_and_json_serialization = 1; +DROP TABLE t0; + diff --git a/parser/testdata/03532_json_dynamic_updates/ast.json b/parser/testdata/03532_json_dynamic_updates/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03532_json_dynamic_updates/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03532_json_dynamic_updates/metadata.json b/parser/testdata/03532_json_dynamic_updates/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03532_json_dynamic_updates/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03532_json_dynamic_updates/query.sql b/parser/testdata/03532_json_dynamic_updates/query.sql new file mode 100644 index 000000000..01361b170 --- /dev/null +++ b/parser/testdata/03532_json_dynamic_updates/query.sql @@ -0,0 +1,70 @@ +-- Tags: long + +set mutations_sync=1; + +drop table if exists test_updates; +create table test_updates (id UInt64, json JSON, dynamic Dynamic) engine=MergeTree order by tuple() settings min_rows_for_wide_part=10000000, min_bytes_for_wide_part=1000000000, index_granularity = 8192, index_granularity_bytes = '10Mi'; +insert into test_updates select number, '{"a" : 42, "b" : 42, "c" : 42}', 42::Int64 from numbers(1000000); +alter table test_updates update json = '{"a" : [1, 2, 3], "d" : 42}' where id >= 500000; +alter table test_updates update dynamic = [1, 2, 3]::Array(Int64) where id >= 500000; +select 'Dynamic paths'; +select distinct arrayJoin(JSONDynamicPaths(json)) from test_updates order by all; +select 'Shared data paths'; +select distinct arrayJoin(JSONSharedDataPaths(json)) from test_updates order by all; +select 'JSON path dynamic types'; +select dynamicType(json.a), isDynamicElementInSharedData(json.a), count() from test_updates group by all order by all; +select 'Dynamic types'; +select dynamicType(dynamic), isDynamicElementInSharedData(dynamic), count() from test_updates group by all order by all; + +select json, json.a, dynamic from test_updates where id in (499999, 500000) order by id; +select json, dynamic from test_updates format Null; + +alter table test_updates update json = '{}' where 1; +alter table test_updates update dynamic = NULL where 1; + +select 'Dynamic paths'; +select distinct arrayJoin(JSONDynamicPaths(json)) from test_updates order by all; +select 'Shared data paths'; +select distinct arrayJoin(JSONSharedDataPaths(json)) from test_updates order by all; +select 'JSON path dynamic types'; +select dynamicType(json.a), isDynamicElementInSharedData(json.a), count() from test_updates group by all order by all; +select 'Dynamic types'; +select dynamicType(dynamic), isDynamicElementInSharedData(dynamic), count() from test_updates group by all order by all; + +select json, json.a, dynamic from test_updates where id in (499999, 500000) order by id; +select json, dynamic from test_updates format Null; + +drop table test_updates; + +create table test_updates (id UInt64, json JSON, dynamic Dynamic) engine=MergeTree order by tuple() settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1, index_granularity = 8192, index_granularity_bytes = '10Mi'; +insert into test_updates select number, '{"a" : 42, "b" : 42, "c" : 42}', 42::Int64 from numbers(1000000); +alter table test_updates update json = '{"a" : [1, 2, 3], "d" : 42}' where id >= 500000; +alter table test_updates update dynamic = [1, 2, 3]::Array(Int64) where id >= 500000; +select 'Dynamic paths'; +select distinct arrayJoin(JSONDynamicPaths(json)) from test_updates order by all; +select 'Shared data paths'; +select distinct arrayJoin(JSONSharedDataPaths(json)) from test_updates order by all; +select 'JSON path dynamic types'; +select dynamicType(json.a), isDynamicElementInSharedData(json.a), count() from test_updates group by all order by all; +select 'Dynamic types'; +select dynamicType(dynamic), isDynamicElementInSharedData(dynamic), count() from test_updates group by all order by all; + +select json, json.a, dynamic from test_updates where id in (499999, 500000) order by id; +select json, dynamic from test_updates format Null; + +alter table test_updates update json = '{}' where 1; +alter table test_updates update dynamic = NULL where 1; + +select 'Dynamic paths'; +select distinct arrayJoin(JSONDynamicPaths(json)) from test_updates order by all; +select 'Shared data paths'; +select distinct arrayJoin(JSONSharedDataPaths(json)) from test_updates order by all; +select 'JSON path dynamic types'; +select dynamicType(json.a), isDynamicElementInSharedData(json.a), count() from test_updates group by all order by all; +select 'Dynamic types'; +select dynamicType(dynamic), isDynamicElementInSharedData(dynamic), count() from test_updates group by all order by all; + +select json, json.a, dynamic from test_updates where id in (499999, 500000) order by id; +select json, dynamic from test_updates format Null; + +drop table test_updates; diff --git a/parser/testdata/03532_pr_unused_query_cancelling_with_limit/ast.json b/parser/testdata/03532_pr_unused_query_cancelling_with_limit/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03532_pr_unused_query_cancelling_with_limit/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03532_pr_unused_query_cancelling_with_limit/metadata.json b/parser/testdata/03532_pr_unused_query_cancelling_with_limit/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03532_pr_unused_query_cancelling_with_limit/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03532_pr_unused_query_cancelling_with_limit/query.sql b/parser/testdata/03532_pr_unused_query_cancelling_with_limit/query.sql new file mode 100644 index 000000000..307582fc7 --- /dev/null +++ b/parser/testdata/03532_pr_unused_query_cancelling_with_limit/query.sql @@ -0,0 +1,11 @@ +-- the test just execute simple select with limit with PR +-- to test concurrent query cancellation (1) because of limit and (2) because of reading assignment is completed by PR coordinator +DROP TABLE IF EXISTS pr_tt; +CREATE TABLE pr_tt (k UInt64, v String, blob String) ENGINE=MergeTree() ORDER BY k; +INSERT INTO pr_tt SELECT number, toString(number), repeat('blob_', number % 10) FROM numbers(1_000); + +SET enable_parallel_replicas = 1, cluster_for_parallel_replicas = 'parallel_replicas', parallel_replicas_for_non_replicated_merge_tree = 1; + +SELECT * FROM pr_tt LIMIT 10 format Null; + +DROP TABLE pr_tt; diff --git a/parser/testdata/03532_redis_empty_variant_key/ast.json b/parser/testdata/03532_redis_empty_variant_key/ast.json new file mode 100644 index 000000000..7d28f6248 --- /dev/null +++ b/parser/testdata/03532_redis_empty_variant_key/ast.json @@ -0,0 +1,70 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery t0 (children 3)" + }, + { + "explain": " Identifier t0" + }, + { + "explain": " Columns definition (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration c0 (children 1)" + }, + { + "explain": " DataType Variant (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier c0" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function Redis (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal '<host>:<port>'" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " Literal '<password>'" + } + ], + + "rows": 16, + + "statistics": + { + "elapsed": 0.001004046, + "rows_read": 16, + "bytes_read": 556 + } +} diff --git a/parser/testdata/03532_redis_empty_variant_key/metadata.json b/parser/testdata/03532_redis_empty_variant_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03532_redis_empty_variant_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03532_redis_empty_variant_key/query.sql b/parser/testdata/03532_redis_empty_variant_key/query.sql new file mode 100644 index 000000000..38e38c5df --- /dev/null +++ b/parser/testdata/03532_redis_empty_variant_key/query.sql @@ -0,0 +1 @@ +CREATE TABLE t0 (c0 Variant() PRIMARY KEY) ENGINE = Redis('<host>:<port>', 0, '<password>'); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/03532_use_database_syntax/ast.json b/parser/testdata/03532_use_database_syntax/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03532_use_database_syntax/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03532_use_database_syntax/metadata.json b/parser/testdata/03532_use_database_syntax/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03532_use_database_syntax/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03532_use_database_syntax/query.sql b/parser/testdata/03532_use_database_syntax/query.sql new file mode 100644 index 000000000..a79b5ccbd --- /dev/null +++ b/parser/testdata/03532_use_database_syntax/query.sql @@ -0,0 +1,23 @@ +-- Tags: no-parallel +-- ^ creates a database + +CREATE DATABASE IF NOT EXISTS d1; + +CREATE TABLE IF NOT EXISTS d1.t1 (val Int) engine=Memory; +INSERT INTO d1.t1 SELECT 1; + +SELECT * FROM t1; -- { serverError UNKNOWN_TABLE } + +USE DATABASE d1; + +SELECT * FROM t1; + +DROP TABLE t1; +DROP DATABASE d1; + +CREATE DATABASE IF NOT EXISTS database; + +USE DATABASE database; +USE database; + +DROP DATABASE database; diff --git a/parser/testdata/03532_window_function_and_null_source_max_threads/ast.json b/parser/testdata/03532_window_function_and_null_source_max_threads/ast.json new file mode 100644 index 000000000..d6084def7 --- /dev/null +++ b/parser/testdata/03532_window_function_and_null_source_max_threads/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery empty (children 3)" + }, + { + "explain": " Identifier empty" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration n (children 1)" + }, + { + "explain": " DataType UInt64" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Identifier n" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.00150288, + "rows_read": 10, + "bytes_read": 338 + } +} diff --git a/parser/testdata/03532_window_function_and_null_source_max_threads/metadata.json b/parser/testdata/03532_window_function_and_null_source_max_threads/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03532_window_function_and_null_source_max_threads/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03532_window_function_and_null_source_max_threads/query.sql b/parser/testdata/03532_window_function_and_null_source_max_threads/query.sql new file mode 100644 index 000000000..42abe552f --- /dev/null +++ b/parser/testdata/03532_window_function_and_null_source_max_threads/query.sql @@ -0,0 +1,143 @@ +CREATE TABLE empty (n UInt64) ENGINE = MergeTree() ORDER BY n; + +-- A query that reproduces the problem, it has a JOIN of two empty tables followed by some window functions. +-- Before the fix max_threads limit was lost and the resulting pipeline was resized multiple times multiplying the number of streams by 20 +-- So the result of the EXPLAIN below looked like this: +-- +--(Expression) +--ExpressionTransform × 160000 +-- (Window) +-- Resize 400 → 160000 +-- WindowTransform × 400 +-- (Sorting) +-- MergeSortingTransform × 400 +-- LimitsCheckingTransform × 400 +-- PartialSortingTransform × 400 +-- Resize × 400 400 → 1 +-- ScatterByPartitionTransform × 400 1 → 400 +-- (Expression) +-- ExpressionTransform × 400 +-- (Window) +-- Resize 20 → 400 +-- WindowTransform × 20 +-- (Sorting) +-- MergeSortingTransform × 20 +-- LimitsCheckingTransform × 20 +-- PartialSortingTransform × 20 +-- Resize × 20 20 → 1 +-- ScatterByPartitionTransform × 20 1 → 20 +-- (Expression) +-- ExpressionTransform × 20 +-- (Expression) +-- ExpressionTransform × 20 +-- (Join) +-- SimpleSquashingTransform × 20 +-- ColumnPermuteTransform × 20 +-- JoiningTransform × 20 2 → 1 +-- Resize 1 → 20 +-- (Expression) +-- ExpressionTransform +-- (Expression) +-- ExpressionTransform +-- (ReadFromPreparedSource) +-- NullSource 0 → 1 +-- (Expression) +-- Resize × 2 20 → 1 +-- ..... +SELECT trimLeft(explain) FROM ( + EXPLAIN PIPELINE + WITH + nums AS + ( + SELECT + n1.n AS a, + n2.n AS b + FROM empty AS n1, empty AS n2 + WHERE (n1.n % 7) = (n2.n % 5) + ), + window1 AS + ( + SELECT + a, + lagInFrame(a, 1, a) OVER (PARTITION BY a ORDER BY a ASC ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS lag + FROM nums + ), + window2 AS + ( + SELECT + lag, + leadInFrame(lag, 1, a) OVER (PARTITION BY a ORDER BY a ASC ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS lead + FROM window1 + ) + SELECT lead + FROM window2 + SETTINGS max_threads = 20, enable_parallel_replicas=0 +) WHERE explain LIKE '%Resize%' LIMIT 3; + + +-- Same query by with crazy max_threads +SELECT trimLeft(explain) FROM ( + EXPLAIN PIPELINE + WITH + nums AS + ( + SELECT + n1.n AS a, + n2.n AS b + FROM empty AS n1, empty AS n2 + WHERE (n1.n % 7) = (n2.n % 5) + ), + window1 AS + ( + SELECT + a, + lagInFrame(a, 1, a) OVER (PARTITION BY a ORDER BY a ASC ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS lag + FROM nums + ), + window2 AS + ( + SELECT + lag, + leadInFrame(lag, 1, a) OVER (PARTITION BY a ORDER BY a ASC ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS lead + FROM window1 + ) + SELECT lead + FROM window2 + SETTINGS max_threads = 2000, enable_parallel_replicas=0 +) WHERE explain LIKE '%Resize%' LIMIT 3; -- {serverError LIMIT_EXCEEDED} + + +SELECT 'Same query with max_threads = 300'; +SELECT trimLeft(explain) FROM ( + EXPLAIN PIPELINE + WITH + nums AS + ( + SELECT + n1.n AS a, + n2.n AS b + FROM empty AS n1, empty AS n2 + WHERE (n1.n % 7) = (n2.n % 5) + ), + window1 AS + ( + SELECT + a, + lagInFrame(a, 1, a) OVER (PARTITION BY a ORDER BY a ASC ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS lag + FROM nums + ), + window2 AS + ( + SELECT + lag, + leadInFrame(lag, 1, a) OVER (PARTITION BY a ORDER BY a ASC ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS lead + FROM window1 + ) + SELECT lead + FROM window2 + SETTINGS max_threads = 300, enable_parallel_replicas=0 +) WHERE explain LIKE '%Resize%' LIMIT 1; + +DROP TABLE empty; + + diff --git a/parser/testdata/03533_analyzer_correlated_column_check/ast.json b/parser/testdata/03533_analyzer_correlated_column_check/ast.json new file mode 100644 index 000000000..d6abfb9ed --- /dev/null +++ b/parser/testdata/03533_analyzer_correlated_column_check/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001211015, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03533_analyzer_correlated_column_check/metadata.json b/parser/testdata/03533_analyzer_correlated_column_check/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03533_analyzer_correlated_column_check/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03533_analyzer_correlated_column_check/query.sql b/parser/testdata/03533_analyzer_correlated_column_check/query.sql new file mode 100644 index 000000000..ff27c2d1a --- /dev/null +++ b/parser/testdata/03533_analyzer_correlated_column_check/query.sql @@ -0,0 +1,12 @@ +SET enable_analyzer = 1; +SET allow_experimental_correlated_subqueries = 1; + +SELECT deltaSumMerge(rows) AS delta_sum FROM (SELECT * FROM (SELECT 1 AS x, deltaSumState(arrayJoin([3, 5])) AS rows UNION ALL SELECT 3, deltaSumState(arrayJoin([4, 6])) IGNORE NULLS AS rows WITH TOTALS UNION ALL SELECT DISTINCT 2, deltaSumState(*) IGNORE NULLS AS rows QUALIFY delta_sum = ignore(ignore(materialize(1023), *, *, toUInt256(10), *, *, 10, *, toUInt256(toUInt128(10)), 10 IS NOT NULL, 10, 10, 10, 10, materialize(toNullable(toUInt128(10))), *, isNullable(NULL), 10))) ORDER BY 1 DESC, ignore(*, *, 10, *, 10, 10, 10, 10, 10, *, toUInt128(10), 10, *, 10, NULL IS NULL, 10, 10) ASC NULLS FIRST, x ASC) ORDER BY ALL DESC NULLS FIRST; -- { serverError ILLEGAL_AGGREGATION } + +CREATE TABLE t (id Int64, path String) ENGINE = MergeTree ORDER BY path; + +SELECT explain FROM ( + SELECT * FROM viewExplain('EXPLAIN', ( + SELECT id FROM t WHERE a + ))) +WHERE equals(id AS a); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/03533_named_tuple_supertype/ast.json b/parser/testdata/03533_named_tuple_supertype/ast.json new file mode 100644 index 000000000..ca38d8319 --- /dev/null +++ b/parser/testdata/03533_named_tuple_supertype/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00138296, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03533_named_tuple_supertype/metadata.json b/parser/testdata/03533_named_tuple_supertype/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03533_named_tuple_supertype/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03533_named_tuple_supertype/query.sql b/parser/testdata/03533_named_tuple_supertype/query.sql new file mode 100644 index 000000000..a4942d985 --- /dev/null +++ b/parser/testdata/03533_named_tuple_supertype/query.sql @@ -0,0 +1,13 @@ +SET enable_analyzer=1; + +CREATE TABLE named_tuples_03533_1 (`a` Tuple(s String, i Int64), `b` Tuple(s String, i Int32)) ENGINE=Memory; + +INSERT INTO named_tuples_03533_1 VALUES (('y', 20),('a', 100)), (('x',-10),('b', 10)); + +SELECT x, toTypeName(x) FROM ( SELECT arrayFilter(x -> ((x.i) > 10), [if(a.i > 0, a, b)]) AS x FROM named_tuples_03533_1 ); + +CREATE TABLE named_tuples_03533_2 (`a` Tuple(s String, i Int64), `b` Tuple(x String, y Int32)) ENGINE=Memory; + +INSERT INTO named_tuples_03533_2 VALUES (('y', 10),('a', 100)), (('x',-10),('b', 10)); + +SELECT x, toTypeName(x) FROM ( SELECT if(a.i > 0, a, b) AS x FROM named_tuples_03533_2 ); diff --git a/parser/testdata/03533_skip_index_on_data_reading/ast.json b/parser/testdata/03533_skip_index_on_data_reading/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03533_skip_index_on_data_reading/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03533_skip_index_on_data_reading/metadata.json b/parser/testdata/03533_skip_index_on_data_reading/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03533_skip_index_on_data_reading/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03533_skip_index_on_data_reading/query.sql b/parser/testdata/03533_skip_index_on_data_reading/query.sql new file mode 100644 index 000000000..dfcdf51a3 --- /dev/null +++ b/parser/testdata/03533_skip_index_on_data_reading/query.sql @@ -0,0 +1,125 @@ +-- Tags: no-parallel-replicas +-- no-parallel-replicas: use_skip_indexes_on_data_read is not supported with parallel replicas. + +-- { echo ON } + +SET use_skip_indexes_on_data_read = 1; +SET max_rows_to_read = 0; + +set use_query_condition_cache=0; +set merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability=0; + +DROP TABLE IF EXISTS test; + +CREATE TABLE test +( + id UInt64, + event_date Date, + user_id UInt32, + url String, + region String, + INDEX region_idx region TYPE minmax GRANULARITY 1, + INDEX user_id_idx user_id TYPE minmax GRANULARITY 1 +) +ENGINE = MergeTree +ORDER BY (event_date, id) +SETTINGS + index_granularity = 1, + min_bytes_for_wide_part = 0, + min_bytes_for_full_part_storage = 0, + max_bytes_to_merge_at_max_space_in_pool = 1; + +-- create 3 parts to test concurrent processing. +INSERT INTO test VALUES (1, '2023-01-01', 101, 'https://example.com/page1', 'europe'), (2, '2023-01-01', 102, 'https://example.com/page2', 'us_west'), (3, '2023-01-02', 106, 'https://example.com/page3', 'us_west'), (4, '2023-01-02', 107, 'https://example.com/page4', 'us_west'), (5, '2023-01-03', 104, 'https://example.com/page5', 'asia'); + +INSERT INTO test VALUES (1, '2023-01-01', 101, 'https://example.com/page1', 'europe'), (2, '2023-01-01', 102, 'https://example.com/page2', 'us_west'), (3, '2023-01-02', 106, 'https://example.com/page3', 'us_west'), (4, '2023-01-02', 107, 'https://example.com/page4', 'us_west'), (5, '2023-01-03', 104, 'https://example.com/page5', 'asia'); + +INSERT INTO test VALUES (1, '2023-01-01', 101, 'https://example.com/page1', 'europe'), (2, '2023-01-01', 102, 'https://example.com/page2', 'us_west'), (3, '2023-01-02', 106, 'https://example.com/page3', 'us_west'), (4, '2023-01-02', 107, 'https://example.com/page4', 'us_west'), (5, '2023-01-03', 104, 'https://example.com/page5', 'asia'); + +-- disable move to PREWHERE to ensure RowsReadByPrewhereReaders and RowsReadByMainReader reflect actual filtering on read behavior for testing +SET optimize_move_to_prewhere = 0; + +-- agree on one granule +SELECT * FROM test WHERE region = 'europe' AND user_id = 101 ORDER BY ALL SETTINGS log_comment = 'test_1'; + +-- all filtered +SELECT * FROM test WHERE region = 'unknown' AND user_id = 101 ORDER BY ALL SETTINGS log_comment = 'test_2'; + +-- narrowing filter via user_id_idx +SELECT * FROM test WHERE region = 'us_west' AND user_id = 106 ORDER BY ALL SETTINGS log_comment = 'test_3'; + +-- test with an OR filter - 3 rows/granules for user_id=101 union 3 rows/granules for 'asia' +SELECT * FROM test WHERE region = 'asia' OR user_id = 101 ORDER BY ALL SETTINGS log_comment = 'test_4'; + +SYSTEM FLUSH LOGS query_log; + +SELECT ProfileEvents['RowsReadByPrewhereReaders'], ProfileEvents['RowsReadByMainReader'] FROM system.query_log WHERE event_date >= yesterday() AND current_database = currentDatabase() AND type = 'QueryFinish' AND log_comment='test_1'; + +SELECT ProfileEvents['RowsReadByPrewhereReaders'], ProfileEvents['RowsReadByMainReader'] FROM system.query_log WHERE event_date >= yesterday() AND current_database = currentDatabase() AND type = 'QueryFinish' AND log_comment='test_2'; + +SELECT ProfileEvents['RowsReadByPrewhereReaders'], ProfileEvents['RowsReadByMainReader'] FROM system.query_log WHERE event_date >= yesterday() AND current_database = currentDatabase() AND type = 'QueryFinish' AND log_comment='test_3'; + +SELECT ProfileEvents['RowsReadByPrewhereReaders'], ProfileEvents['RowsReadByMainReader'] FROM system.query_log WHERE event_date >= yesterday() AND current_database = currentDatabase() AND type = 'QueryFinish' AND log_comment='test_4'; + +DROP TABLE test; + +-- check partially materialized index, it should only affect related parts + +DROP TABLE IF EXISTS test_partial_index; + +CREATE TABLE test_partial_index +( + id UInt64, + event_date Date, + user_id UInt32, + url String, + region String +) +ENGINE = MergeTree +ORDER BY (event_date, id) +SETTINGS + index_granularity = 1, + min_bytes_for_wide_part = 0, + min_bytes_for_full_part_storage = 0, + max_bytes_to_merge_at_max_space_in_pool = 1; + +-- insert a part with no index +INSERT INTO test_partial_index VALUES (1, '2023-01-01', 101, 'https://example.com/page1', 'europe'), (2, '2023-01-01', 102, 'https://example.com/page2', 'us_west'), (3, '2023-01-02', 106, 'https://example.com/page3', 'us_west'), (4, '2023-01-02', 107, 'https://example.com/page4', 'us_west'), (5, '2023-01-03', 104, 'https://example.com/page5', 'asia'); + +ALTER TABLE test_partial_index ADD INDEX region_idx region TYPE minmax GRANULARITY 1; + +-- insert a part with region index +INSERT INTO test_partial_index VALUES (1, '2023-01-01', 101, 'https://example.com/page1', 'europe'), (2, '2023-01-01', 102, 'https://example.com/page2', 'us_west'), (3, '2023-01-02', 106, 'https://example.com/page3', 'us_west'), (4, '2023-01-02', 107, 'https://example.com/page4', 'us_west'), (5, '2023-01-03', 104, 'https://example.com/page5', 'asia'); + +ALTER TABLE test_partial_index ADD INDEX user_id_idx user_id TYPE minmax GRANULARITY 1; + +-- insert a part with user_id index +INSERT INTO test_partial_index VALUES (1, '2023-01-01', 101, 'https://example.com/page1', 'europe'), (2, '2023-01-01', 102, 'https://example.com/page2', 'us_west'), (3, '2023-01-02', 106, 'https://example.com/page3', 'us_west'), (4, '2023-01-02', 107, 'https://example.com/page4', 'us_west'), (5, '2023-01-03', 104, 'https://example.com/page5', 'asia'); + +-- agree on one granule +SELECT * FROM test_partial_index WHERE region = 'europe' AND user_id = 101 ORDER BY ALL SETTINGS log_comment = 'test_partial_1'; + +-- all filtered +SELECT * FROM test_partial_index WHERE region = 'unknown' AND user_id = 101 ORDER BY ALL SETTINGS log_comment = 'test_partial_2'; + +-- narrowing filter via user_id_idx +SELECT * FROM test_partial_index WHERE region = 'us_west' AND user_id = 106 ORDER BY ALL SETTINGS log_comment = 'test_partial_3'; + +-- Skip indexes on OR supported. +-- All 5 rows from part1 (no skip indexes) + +-- All 5 rows from part2 (because no index on user_id) + +-- 2 rows from part3 -> 1 row each for region='asia' and user_id=101. +-- Total 12 +SELECT * FROM test_partial_index WHERE region = 'asia' OR user_id = 101 ORDER BY ALL SETTINGS log_comment = 'test_partial_4'; + +SYSTEM FLUSH LOGS query_log; + +SELECT ProfileEvents['RowsReadByPrewhereReaders'], ProfileEvents['RowsReadByMainReader'] FROM system.query_log WHERE event_date >= yesterday() AND current_database = currentDatabase() AND type = 'QueryFinish' AND log_comment='test_partial_1'; + +SELECT ProfileEvents['RowsReadByPrewhereReaders'], ProfileEvents['RowsReadByMainReader'] FROM system.query_log WHERE event_date >= yesterday() AND current_database = currentDatabase() AND type = 'QueryFinish' AND log_comment='test_partial_2'; + +SELECT ProfileEvents['RowsReadByPrewhereReaders'], ProfileEvents['RowsReadByMainReader'] FROM system.query_log WHERE event_date >= yesterday() AND current_database = currentDatabase() AND type = 'QueryFinish' AND log_comment='test_partial_3'; + +SELECT ProfileEvents['RowsReadByPrewhereReaders'], ProfileEvents['RowsReadByMainReader'] FROM system.query_log WHERE event_date >= yesterday() AND current_database = currentDatabase() AND type = 'QueryFinish' AND log_comment='test_partial_4'; + +DROP TABLE test_partial_index; diff --git a/parser/testdata/03533_xirr/ast.json b/parser/testdata/03533_xirr/ast.json new file mode 100644 index 000000000..b37fd05ed --- /dev/null +++ b/parser/testdata/03533_xirr/ast.json @@ -0,0 +1,94 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function round (alias xirr_rate) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function financialInternalRateOfReturnExtended (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Array_[Int64_-10000, UInt64_5750, UInt64_4250, UInt64_3250]" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Function toDate (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '2020-01-01'" + }, + { + "explain": " Function toDate (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '2020-03-01'" + }, + { + "explain": " Function toDate (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '2020-10-30'" + }, + { + "explain": " Function toDate (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '2021-02-15'" + }, + { + "explain": " Literal UInt64_6" + } + ], + + "rows": 24, + + "statistics": + { + "elapsed": 0.001221192, + "rows_read": 24, + "bytes_read": 1089 + } +} diff --git a/parser/testdata/03533_xirr/metadata.json b/parser/testdata/03533_xirr/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03533_xirr/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03533_xirr/query.sql b/parser/testdata/03533_xirr/query.sql new file mode 100644 index 000000000..6ce675f2b --- /dev/null +++ b/parser/testdata/03533_xirr/query.sql @@ -0,0 +1,145 @@ +SELECT round(financialInternalRateOfReturnExtended([-10000, 5750, 4250, 3250], [toDate('2020-01-01'), toDate('2020-03-01'), toDate('2020-10-30'), toDate('2021-02-15')]), 6) AS xirr_rate; +SELECT round(financialInternalRateOfReturnExtended([-10000, 5750, 4250, 3250], [toDate('2020-01-01'), toDate('2020-03-01'), toDate('2020-10-30'), toDate('2021-02-15')], 0.5), 6) AS xirr_rate; +SELECT round(financialInternalRateOfReturnExtended([-10000, 5750, 4250, 3250], [toDate32('2020-01-01'), toDate32('2020-03-01'), toDate32('2020-10-30'), toDate32('2021-02-15')]), 6) AS xirr_rate; + +SELECT 'Different day count modes:'; +SELECT round(financialInternalRateOfReturnExtended([100000, -110000], [toDate('2020-01-01'), toDate('2021-01-01')], 0.1, 'ACT_365F'), 6) AS xirr_365, + round(financialInternalRateOfReturnExtended([100000, -110000], [toDate('2020-01-01'), toDate('2021-01-01')], 0.1, 'ACT_365_25'), 6) AS xirr_365_25; + +SELECT financialInternalRateOfReturnExtended(123, toDate('2020-01-01')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT financialInternalRateOfReturnExtended([123], toDate('2020-01-01')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT financialInternalRateOfReturnExtended(123, [toDate('2020-01-01')]); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT round(financialInternalRateOfReturnExtended([-10000], [toDate32('2020-01-01'), toDate32('2020-03-01'), toDate32('2020-10-30'), toDate32('2021-02-15')]), 6) AS xirr_rate; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT round(financialInternalRateOfReturnExtended([-10000, NULL, 4250, 3250], [toDate32('2020-01-01'), toDate32('2020-03-01'), toDate32('2020-10-30'), toDate32('2021-02-15')]), 6) AS xirr_rate; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT financialInternalRateOfReturnExtended([-100, 110], [toDate('2020-01-01'), toDate('2020-02-01')], 1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT financialInternalRateOfReturnExtended([-100, 110], [toDate('2020-01-01'), toDate('2020-02-01')], 1.0, 'QWERTY'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT 'Zero cashflow entries -> NaN:'; +SELECT financialInternalRateOfReturnExtended([]::Array(Float32), []::Array(Date)); +SELECT 'Just one cashflow entry -> NaN:'; +SELECT financialInternalRateOfReturnExtended([-10000], [toDate('2020-01-01')]); +SELECT 'Zero cashflow -> NaN:'; +SELECT financialInternalRateOfReturnExtended([-0., 0.], [toDate('2020-01-01'), toDate('2020-01-02')]); +SELECT 'Unsorted dates -> NaN:'; +SELECT round(financialInternalRateOfReturnExtended([-10000, 5750, 4250, 3250], [toDate('2025-01-01'), toDate('2020-03-01'), toDate('2020-10-30'), toDate('2021-02-15')]), 6) AS xirr_rate; +SELECT 'Non-unique dates -> NaN:'; +SELECT financialInternalRateOfReturnExtended([-100, 10], [toDate('2020-01-01'), toDate('2020-01-01')]); + +CREATE TABLE IF NOT EXISTS 3533_xirr_test ( + tag String, + date Date, + date32 Date32, + value Float64, + r Float64 +) ENGINE = Memory; + +INSERT INTO 3533_xirr_test VALUES +('a', '2020-01-01', '2020-01-01', -10000, 0.08), +('a', '2020-06-01', '2020-06-01', 3000, 0.08), +('a', '2020-12-31', '2020-12-31', 8000, 0.08), +('b', '2020-03-15', '2020-03-15', -5000, 0.09), +('b', '2020-09-15', '2020-09-15', 2500, 0.09), +('b', '2021-03-15', '2021-03-15', 3000, 0.09), +('c', '2019-12-31', '2019-12-31', -15000, 0.10), +('c', '2020-04-30', '2020-04-30', 5000, 0.10), +('c', '2020-08-31', '2020-08-31', 6000, 0.10), +('c', '2020-12-31', '2020-12-31', 5000, 0.10), +('c', '2021-02-28', '2021-02-28', 2000, 0.10), +('d', '2020-01-01', '2020-01-01', -10000, 0.11), +('d', '2020-03-01', '2020-03-01', 5750, 0.11), +('d', '2020-10-30', '2020-10-30', 4250, 0.11), +('d', '2021-02-15', '2021-02-15', 3250, 0.11) +; + +SELECT + tag, + round( financialInternalRateOfReturnExtended(groupArray(value), groupArray(date)), 6) AS result_f64_date, + round( financialInternalRateOfReturnExtended(groupArray(value), groupArray(date32)), 6) AS result_f64_date32, + round( financialInternalRateOfReturnExtended(groupArray(toFloat32(value)), groupArray(date)), 6) AS result_f32_date, + round( financialInternalRateOfReturnExtended(groupArray(toFloat32(value)), groupArray(date32)), 6) AS result_f32_date32, + round( financialInternalRateOfReturnExtended(groupArray(toInt64(value)), groupArray(date)), 6) AS result_i64_date, + round( financialInternalRateOfReturnExtended(groupArray(toInt64(value)), groupArray(date32)), 6) AS result_i64_date32 +FROM ( + SELECT + tag, + date, + date32, + value + FROM 3533_xirr_test + ORDER BY tag, date +) +GROUP BY tag +ORDER BY tag; + +SELECT 'IRR'; +SELECT financialInternalRateOfReturn(123); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT financialInternalRateOfReturn([1,2,NULL]); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT financialInternalRateOfReturn([]); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT [-100, 39, 59, 55, 20] as cf, round(financialInternalRateOfReturn(cf), 6) as irr_rate, round(financialNetPresentValue(irr_rate, cf), 6) as financialNetPresentValue_from_irr; +SELECT financialInternalRateOfReturn([0., 39., 59., 55., 20.]); + +SELECT 'XNPV:'; +SELECT financialNetPresentValueExtended(0.1, 123., [toDate('2020-01-01')]); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT financialNetPresentValueExtended(0.1, [123.], toDate('2020-01-01')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT financialNetPresentValueExtended(0.1, [-100, 110], [toDate('2020-01-01'), toDate('2020-02-01')], 'QWERTY'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT financialNetPresentValueExtended(0.1, [], []); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT financialNetPresentValueExtended(0.1, [-10], [toDate('2020-01-01')]); +SELECT financialNetPresentValueExtended(0.1, [-0., 0.], [toDate('2020-01-01'), toDate('2020-01-02')]); +SELECT round(financialNetPresentValueExtended(0.1, [-10_000., 5750., 4250., 3250.], [toDate('2020-01-01'), toDate('2020-03-01'), toDate('2020-10-30'), toDate('2021-02-15')]), 6); +SELECT round(financialNetPresentValueExtended(0.1, [-10_000., 5750., 4250., 3250.], [toDate('2020-01-01'), toDate('2020-03-01'), toDate('2020-10-30'), toDate('2021-02-15')], 'ACT_365_25'), 6); + +SELECT tag, + round(financialNetPresentValueExtended(any(r), groupArray(value), groupArray(date)), 6) AS financialNetPresentValueExtended_f64_date, + round(financialNetPresentValueExtended(any(r), groupArray(value), groupArray(date32)), 6) AS financialNetPresentValueExtended_f64_date32, + round(financialNetPresentValueExtended(any(toFloat32(r)), groupArray(toFloat32(value)), groupArray(date)), 6) AS financialNetPresentValueExtended_f32_date +FROM ( + SELECT + tag, + date, + date32, + value, + r + FROM 3533_xirr_test + ORDER BY tag, date +) +GROUP BY tag +ORDER BY tag; + + +SELECT 'NPV:'; +SELECT financialNetPresentValue(0.1, 123., True); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT financialNetPresentValue(0.1, [1.,2.], 2.); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT financialNetPresentValue(0.1, [1.,NULL]); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT financialNetPresentValue(0.1, []); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT round(financialNetPresentValue(0.08, [-40_000., 5_000., 8_000., 12_000., 30_000.]), 6); +SELECT round(financialNetPresentValue(0.08, [-40_000., 5_000., 8_000., 12_000., 30_000.], True), 6); +SELECT round(financialNetPresentValue(0.08, [-40_000., 5_000., 8_000., 12_000., 30_000.], False), 6); + +SELECT tag, + round(financialNetPresentValue(any(r), groupArray(value)), 6) AS financialNetPresentValueExtended_f64_date, + round(financialNetPresentValue(any(r), groupArray(value)), 6) AS financialNetPresentValueExtended_f64_date32, + round(financialNetPresentValue(any(toFloat32(r)), groupArray(toFloat32(value))), 6) AS financialNetPresentValueExtended_f32_date +FROM ( + SELECT + tag, + date, + date32, + value, + r + FROM 3533_xirr_test + ORDER BY tag, date +) +GROUP BY tag +ORDER BY tag; + + +DROP TABLE IF EXISTS 3533_xirr_test; + +SELECT 'Excel docs example:'; +SELECT round(financialNetPresentValue(0.1, [-10000, 3000, 4200, 6800], False), 6); +SELECT round(financialNetPresentValue(0.08, [8000., 9200., 10000., 12000., 14500.], False), 6) - 40000; +SELECT round(financialNetPresentValueExtended(0.09, [-10_000, 2750, 4250, 3250, 2750], [toDate('2008-01-01'), toDate('2008-03-01'), toDate('2008-10-30'), toDate('2009-02-15'), toDate('2009-04-01')], 'ACT_365F'), 6); +SELECT round(financialInternalRateOfReturn([-70000, 12000, 15000, 18000, 21000, 26000]), 6); +SELECT round(financialInternalRateOfReturnExtended([-10000, 2750, 4250, 3250, 2750], [toDate32('2008-01-01'), toDate32('2008-03-01'), toDate32('2008-10-30'), toDate32('2009-02-15'), toDate32('2009-04-01')]), 6); + diff --git a/parser/testdata/03534_npy_output_to_url/ast.json b/parser/testdata/03534_npy_output_to_url/ast.json new file mode 100644 index 000000000..b2e018d6d --- /dev/null +++ b/parser/testdata/03534_npy_output_to_url/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t0 (children 1)" + }, + { + "explain": " Identifier t0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001102167, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03534_npy_output_to_url/metadata.json b/parser/testdata/03534_npy_output_to_url/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03534_npy_output_to_url/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03534_npy_output_to_url/query.sql b/parser/testdata/03534_npy_output_to_url/query.sql new file mode 100644 index 000000000..88beb5b15 --- /dev/null +++ b/parser/testdata/03534_npy_output_to_url/query.sql @@ -0,0 +1,4 @@ +DROP TABLE IF EXISTS t0; +CREATE TABLE t0 (c0 Int) ENGINE = URL('http://localhost:80/', Npy); +INSERT INTO TABLE t0 (c0) VALUES (1); -- { serverError POCO_EXCEPTION } +DROP TABLE t0; diff --git a/parser/testdata/03534_skip_index_bug89691/ast.json b/parser/testdata/03534_skip_index_bug89691/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03534_skip_index_bug89691/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03534_skip_index_bug89691/metadata.json b/parser/testdata/03534_skip_index_bug89691/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03534_skip_index_bug89691/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03534_skip_index_bug89691/query.sql b/parser/testdata/03534_skip_index_bug89691/query.sql new file mode 100644 index 000000000..5ad369514 --- /dev/null +++ b/parser/testdata/03534_skip_index_bug89691/query.sql @@ -0,0 +1,21 @@ +-- { echo ON } + +SET use_skip_indexes_on_data_read = 1; +SET use_skip_indexes = 1; +SET use_query_condition_cache = 0; + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab +( + `i` Int64, + `s` String, + INDEX bf_s s TYPE bloom_filter(0.001) GRANULARITY 1, +) +ENGINE = MergeTree +ORDER BY i +SETTINGS index_granularity = 4,index_granularity_bytes = 0, min_bytes_for_wide_part = 0; + +INSERT INTO tab SELECT 100, 'aaa'; -- Single-granule part, which contained rows less than index_granularity. + +SELECT i FROM tab WHERE s = 'aaa'; diff --git a/parser/testdata/03535_system_formats/ast.json b/parser/testdata/03535_system_formats/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03535_system_formats/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03535_system_formats/metadata.json b/parser/testdata/03535_system_formats/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03535_system_formats/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03535_system_formats/query.sql b/parser/testdata/03535_system_formats/query.sql new file mode 100644 index 000000000..5035e1b83 --- /dev/null +++ b/parser/testdata/03535_system_formats/query.sql @@ -0,0 +1,7 @@ +-- Tags: no-fasttest +-- ^ some formats are not included in the fast test build + +SELECT * FROM system.formats +WHERE name IN ('Arrow','ArrowStream','Avro','AvroConfluent','BSONEachRow', 'Buffers', 'CSV','CSVWithNames','CSVWithNamesAndTypes','CapnProto','CustomSeparated','CustomSeparatedIgnoreSpaces','CustomSeparatedIgnoreSpacesWithNames','CustomSeparatedIgnoreSpacesWithNamesAndTypes','CustomSeparatedWithNames','CustomSeparatedWithNamesAndTypes','Form','HiveText','JSON','JSONAsObject','JSONAsString','JSONColumns','JSONColumnsWithMetadata','JSONCompact','JSONCompactColumns','JSONCompactEachRow','JSONCompactEachRowWithNames','JSONCompactEachRowWithNamesAndTypes','JSONCompactEachRowWithProgress','JSONCompactStrings','JSONCompactStringsEachRow','JSONCompactStringsEachRowWithNames','JSONCompactStringsEachRowWithNamesAndTypes','JSONCompactStringsEachRowWithProgress','JSONEachRow','JSONEachRowWithProgress','JSONLines','JSONObjectEachRow','JSONStrings','JSONStringsEachRow','JSONStringsEachRowWithProgress','LineAsString','LineAsStringWithNames','LineAsStringWithNamesAndTypes','Markdown','MsgPack','MySQLDump','MySQLWire','NDJSON','Native','Npy','Null','ODBCDriver2','ORC','One','Parquet','ParquetMetadata','PostgreSQLWire','Pretty','PrettyCompact','PrettyCompactMonoBlock','PrettyCompactNoEscapes','PrettyCompactNoEscapesMonoBlock','PrettyJSONEachRow','PrettyJSONLines','PrettyMonoBlock','PrettyNDJSON','PrettyNoEscapes','PrettyNoEscapesMonoBlock','PrettySpace','PrettySpaceMonoBlock','PrettySpaceNoEscapes','PrettySpaceNoEscapesMonoBlock','Prometheus','Protobuf','ProtobufList','ProtobufSingle','Raw','RawBLOB','RawWithNames','RawWithNamesAndTypes','Regexp','RowBinary','RowBinaryWithDefaults','RowBinaryWithNames','RowBinaryWithNamesAndTypes','SQLInsert','TSKV','TSV','TSVRaw','TSVRawWithNames','TSVRawWithNamesAndTypes','TSVWithNames','TSVWithNamesAndTypes','TabSeparated','TabSeparatedRaw','TabSeparatedRawWithNames','TabSeparatedRawWithNamesAndTypes','TabSeparatedWithNames','TabSeparatedWithNamesAndTypes','Template','TemplateIgnoreSpaces','Values','Vertical','XML') +ORDER BY name +FORMAT PrettyCompact; diff --git a/parser/testdata/03537_kusto_ubsan/ast.json b/parser/testdata/03537_kusto_ubsan/ast.json new file mode 100644 index 000000000..8b5a0991d --- /dev/null +++ b/parser/testdata/03537_kusto_ubsan/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001078535, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03537_kusto_ubsan/metadata.json b/parser/testdata/03537_kusto_ubsan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03537_kusto_ubsan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03537_kusto_ubsan/query.sql b/parser/testdata/03537_kusto_ubsan/query.sql new file mode 100644 index 000000000..f1573eb9f --- /dev/null +++ b/parser/testdata/03537_kusto_ubsan/query.sql @@ -0,0 +1,2 @@ +SET interval_output_format = 'kusto'; +SELECT INTERVAL 3508 MONTH; -- { clientError BAD_ARGUMENTS } diff --git a/parser/testdata/03538_analyzer_correlated_query_collect_columns_fix/ast.json b/parser/testdata/03538_analyzer_correlated_query_collect_columns_fix/ast.json new file mode 100644 index 000000000..c2bf85eff --- /dev/null +++ b/parser/testdata/03538_analyzer_correlated_query_collect_columns_fix/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000955455, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03538_analyzer_correlated_query_collect_columns_fix/metadata.json b/parser/testdata/03538_analyzer_correlated_query_collect_columns_fix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03538_analyzer_correlated_query_collect_columns_fix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03538_analyzer_correlated_query_collect_columns_fix/query.sql b/parser/testdata/03538_analyzer_correlated_query_collect_columns_fix/query.sql new file mode 100644 index 000000000..7a770f6c8 --- /dev/null +++ b/parser/testdata/03538_analyzer_correlated_query_collect_columns_fix/query.sql @@ -0,0 +1,13 @@ +SET enable_analyzer = 1; +SET allow_experimental_correlated_subqueries = 1; + +CREATE TABLE users (uid Int16, name String, age Int16) ORDER BY uid; + +INSERT INTO users VALUES (1231, 'John', 33); +INSERT INTO users VALUES (6666, 'Ksenia', 48); +INSERT INTO users VALUES (8888, 'Alice', 50); + +SELECT name, (SELECT count() FROM numbers(50) WHERE number = age) +FROM users +ORDER BY name +SETTINGS query_plan_merge_filter_into_join_condition = 0; diff --git a/parser/testdata/03538_analyzer_filter_analysis_alias_columns/ast.json b/parser/testdata/03538_analyzer_filter_analysis_alias_columns/ast.json new file mode 100644 index 000000000..c6d2016c0 --- /dev/null +++ b/parser/testdata/03538_analyzer_filter_analysis_alias_columns/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000986985, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03538_analyzer_filter_analysis_alias_columns/metadata.json b/parser/testdata/03538_analyzer_filter_analysis_alias_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03538_analyzer_filter_analysis_alias_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03538_analyzer_filter_analysis_alias_columns/query.sql b/parser/testdata/03538_analyzer_filter_analysis_alias_columns/query.sql new file mode 100644 index 000000000..db4cf3add --- /dev/null +++ b/parser/testdata/03538_analyzer_filter_analysis_alias_columns/query.sql @@ -0,0 +1,19 @@ +SET allow_experimental_analyzer = 1; +SET enable_parallel_replicas = 0; + +CREATE TABLE t0 (c0 Int, c1 Int ALIAS 1) ENGINE = Memory; +CREATE TABLE t0__fuzz_42 (`c0` Array(Nullable(UInt32)), `c1` IPv4 ALIAS 1) ENGINE = Memory; +SELECT c0 FROM remote('localhost', currentDatabase(), 't0') AS tx INNER JOIN t0__fuzz_42 USING (c1); -- { serverError NOT_IMPLEMENTED } + +DROP TABLE t0; + +CREATE TABLE t0 (c0 Int ALIAS 1, c1 Int) ENGINE = Memory; +SELECT 1 FROM (SELECT 1 AS c0 FROM t0, remote('localhost:9000', currentDatabase(), 't0') ty) tx JOIN t0 ON tx.c0 = t0.c0; + +( + SELECT 1 x, x y FROM remote('localhost', currentDatabase(), t0) tx +) +UNION ALL +( + SELECT 1, c0 FROM t0 +); diff --git a/parser/testdata/03538_analyzer_lag_lead_functions/ast.json b/parser/testdata/03538_analyzer_lag_lead_functions/ast.json new file mode 100644 index 000000000..809e92b15 --- /dev/null +++ b/parser/testdata/03538_analyzer_lag_lead_functions/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001158586, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03538_analyzer_lag_lead_functions/metadata.json b/parser/testdata/03538_analyzer_lag_lead_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03538_analyzer_lag_lead_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03538_analyzer_lag_lead_functions/query.sql b/parser/testdata/03538_analyzer_lag_lead_functions/query.sql new file mode 100644 index 000000000..4c5d6cf6c --- /dev/null +++ b/parser/testdata/03538_analyzer_lag_lead_functions/query.sql @@ -0,0 +1,40 @@ +set enable_analyzer = 1; + +SELECT number + ,lag(number, 1, 8472) OVER () lag + ,LAG(number, 1, 8472) OVER () lagInsensitive + ,lag(number, 1, 8472) OVER (ORDER BY number ASC) lagASC + ,lag(number, 1, 8472) OVER (ORDER BY number DESC) lagDESC + ,lead(number, 1, 8472) OVER () lead + ,LEAD(number, 1, 8472) OVER () leadInsensitive + ,lead(number, 1, 8472) OVER (ORDER BY number DESC) leadDESC + ,lead(number, 1, 8472) OVER (ORDER BY number ASC) leadASC +FROM numbers(5) +ORDER BY number +FORMAT Pretty; + +SELECT number + ,lead(number, 1, 8472) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) lead +FROM numbers(5) +ORDER BY number +FORMAT Pretty; -- { serverError BAD_ARGUMENTS } + +SELECT number + ,lag(number, 1, 8472) OVER (ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) lag +FROM numbers(5) +ORDER BY number +FORMAT Pretty; -- { serverError BAD_ARGUMENTS } + +set enable_analyzer = 0; + +SELECT number + ,lead(number, 1, 8472) OVER () lead +FROM numbers(5) +ORDER BY number +FORMAT Pretty; -- { serverError NOT_IMPLEMENTED } + +SELECT number + ,lag(number, 1, 8472) OVER () lag +FROM numbers(5) +ORDER BY number +FORMAT Pretty; -- { serverError NOT_IMPLEMENTED } diff --git a/parser/testdata/03538_analyzer_scalar_correlated_subquery_fix/ast.json b/parser/testdata/03538_analyzer_scalar_correlated_subquery_fix/ast.json new file mode 100644 index 000000000..810b8b6f5 --- /dev/null +++ b/parser/testdata/03538_analyzer_scalar_correlated_subquery_fix/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00106133, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03538_analyzer_scalar_correlated_subquery_fix/metadata.json b/parser/testdata/03538_analyzer_scalar_correlated_subquery_fix/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03538_analyzer_scalar_correlated_subquery_fix/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03538_analyzer_scalar_correlated_subquery_fix/query.sql b/parser/testdata/03538_analyzer_scalar_correlated_subquery_fix/query.sql new file mode 100644 index 000000000..57c007038 --- /dev/null +++ b/parser/testdata/03538_analyzer_scalar_correlated_subquery_fix/query.sql @@ -0,0 +1,42 @@ +set enable_analyzer = 1; +set allow_experimental_correlated_subqueries = 1; + +CREATE TABLE partsupp ( + ps_partkey Int32, + ps_suppkey Int32, + ps_availqty Int32 +) +ORDER BY (ps_partkey, ps_suppkey); + +INSERT INTO partsupp (ps_partkey, ps_suppkey, ps_availqty) VALUES (114, 115, 1), (369, 7870, 1); + +CREATE TABLE lineitem ( + l_partkey Int32, + l_suppkey Int32, + l_quantity Decimal(15,2) +) +ORDER BY (); + +INSERT INTO lineitem (l_partkey, l_suppkey, l_quantity) VALUES (1, 1, 1.0); + +SELECT + ps_partkey, + ps_suppkey, + ( + SELECT 0.5 * sum(l_quantity) + FROM lineitem + WHERE (l_partkey = ps_partkey) AND (l_suppkey = ps_suppkey) + ) AS half_sum_quantity +FROM partsupp +WHERE (ps_partkey, ps_suppkey) IN ((114, 115), (369, 7870)) +ORDER BY ps_partkey, ps_suppkey; + +SELECT + ps_partkey, + ps_suppkey +FROM partsupp +WHERE ps_availqty > ( + SELECT 0.5 * sum(l_quantity) + FROM lineitem + WHERE (l_partkey = ps_partkey) AND (l_suppkey = ps_suppkey) + ); diff --git a/parser/testdata/03538_array_except/ast.json b/parser/testdata/03538_array_except/ast.json new file mode 100644 index 000000000..6d8e56b73 --- /dev/null +++ b/parser/testdata/03538_array_except/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery 3538_array_except1 (children 1)" + }, + { + "explain": " Identifier 3538_array_except1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001184409, + "rows_read": 2, + "bytes_read": 88 + } +} diff --git a/parser/testdata/03538_array_except/metadata.json b/parser/testdata/03538_array_except/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03538_array_except/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03538_array_except/query.sql b/parser/testdata/03538_array_except/query.sql new file mode 100644 index 000000000..8bf0d5b1a --- /dev/null +++ b/parser/testdata/03538_array_except/query.sql @@ -0,0 +1,264 @@ +DROP TABLE IF EXISTS 3538_array_except1; +DROP TABLE IF EXISTS 3538_array_except2; +DROP TABLE IF EXISTS 3538_array_except3; +DROP TABLE IF EXISTS 3538_array_except4; +DROP TABLE IF EXISTS 3538_array_except5; +DROP TABLE IF EXISTS 3538_array_except6; + +SELECT arrayExcept([1, 2, 3, 4], [3, 5]) AS result; +SELECT arrayExcept([1, 2, 2, 3], [2]) AS result; +SELECT arrayExcept(['apple', 'banana', 'cherry'], ['banana', 'date']) AS result; + +SELECT arrayExcept([]::Array(UInt8), [1, 2]) AS result; +SELECT arrayExcept([1, 2, 3], []::Array(UInt8)) AS result; +SELECT arrayExcept([1, 2, 3], [1, 2, 3]) AS result; + +SELECT arrayExcept(['laptop', 'phone', 'tablet', 'watch'], ['watch', 'headphones']) AS result; +SELECT arrayExcept([200, 404, 404, 500, 503], [404, 500]) AS result; + +SELECT arrayExcept([1, NULL, 2], [2]) AS result; +SELECT arrayExcept([1, 2, 3], [2, NULL]) AS result; + +SELECT arrayExcept(materialize(['11','2','3','4','0']), materialize([1.5])); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT arrayExcept(materialize('11'), materialize('1')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT arrayExcept(materialize(['11','2','3','4','0']), materialize('1')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT arrayExcept(materialize(['11','2','3','4','0']), materialize('1')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT arrayExcept(materialize([['11','2','3','4','0']]), materialize([['1']])); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT arrayExcept([1, 2, 3, 4], [3, 5]) AS result FROM numbers(3); +SELECT arrayExcept(materialize([1, 2, 3, 4]), [3, 5]) AS result FROM numbers(3); + +WITH excludes AS ( + SELECT 1 as id, ['b','d'] AS exclude + UNION ALL + SELECT 2 as id, ['a','c'] + UNION ALL + SELECT 3 as id, ['x','y'] +) +SELECT + id, arrayExcept(['a','b','c'], exclude) AS result +FROM excludes ORDER BY id; + +SELECT + arrayExcept( + multiIf( + number = 0, [1, 2, 3], + number = 1, [4, 5, 6], + [7, 8, 9] + ), + [2, 5, 8] + ) AS result +FROM numbers(3); + +SELECT arrayExcept(['premium', 'active', 'new']::Array(LowCardinality(String)), ['active']::Array(LowCardinality(String))) AS result; +SELECT arrayExcept(materialize(['premium', 'active', 'new']::Array(LowCardinality(String))), ['active']::Array(LowCardinality(String))) AS result; +SELECT arrayExcept(['premium', 'active', 'new']::Array(LowCardinality(String)), materialize(['active']::Array(LowCardinality(String)))) AS result; +SELECT arrayExcept(materialize(['premium', 'active', 'new']::Array(LowCardinality(String))), materialize(['active']::Array(LowCardinality(String)))) AS result; +SELECT arrayExcept(['a','b','c']::Array(LowCardinality(String)), ['b','d']::Array(String)) AS result; + +CREATE TABLE 3538_array_except1 +( + id UInt32, + source Array(UInt32), + except Array(UInt32), + except_null Array(Nullable(UInt32)), + expected Array(UInt32) +) +ENGINE = Memory; + +INSERT INTO 3538_array_except1 VALUES +( 1, [1, 2, 3], [2], [NULL, 2], [1, 3]), +( 2, [1, 2, 3, 2], [2], [NULL, 2], [1, 3]), +( 3, [1, 2, 3], [4], [NULL, 4], [1, 2, 3]), +( 4, [], [1], [NULL, 1], []), +( 5, [1], [], [NULL], [1]), +( 6, [], [], [NULL], []), +( 7, [1, 2], [], [NULL], [1, 2]), +( 8, [], [1, 2], [NULL, 1, 2], []), +( 9, [1, 1, 2, 3], [1], [NULL, 1], [2, 3]), +(10, [1, 2, 2, 3], [2], [NULL, 2, NULL], [1, 3]), +(11, [1, 2, 3, 3], [3], [NULL, 3], [1, 2]), +(12, [1, 2, 3, 4, 5], [2, 4], [NULL, 2, 4], [1, 3, 5]), +(13, [10, 20, 30, 40], [10, 30], [NULL, 10, 30, NULL], [20, 40]), +(14, [100, 200], [300, 400], [NULL, 300, NULL, 400], [100, 200]), +(15, [5, 6, 7], [8, 9], [NULL, 8, 9], [5, 6, 7]), +(16, [1, 2, 3], [1, 2, 3], [NULL, 1, NULL, 2, 3], []), +(17, [42], [42], [NULL, 42], []); + +SELECT 'Array(UInt32) exceptArray(UInt32)'; +SELECT id, source, except, arrayExcept(source, except) AS result, expected, if(result = expected, 'OK', 'NOK') AS status FROM 3538_array_except1 ORDER BY id; +SELECT 'Array(UInt32) exceptArray(Nullable(UInt32))'; +SELECT id, source, except_null, arrayExcept(source, except_null) AS result, expected, if(result = expected, 'OK', 'NOK') AS status FROM 3538_array_except1 ORDER BY id; + +CREATE TABLE 3538_array_except2 +( + id UInt32, + source_null Array(Nullable(UInt32)), + except Array(UInt32), + expected Array(Nullable(UInt32)), + except_null Array(Nullable(UInt32)), + expected_null Array(Nullable(UInt32)) +) +ENGINE = Memory; + +INSERT INTO 3538_array_except2 VALUES +(1, [1, 2, 3], [2], [1, 3], [NULL, 2], [1, 3]), +(2, [1, 2, 3, 2], [2], [1, 3], [NULL, 2], [1, 3]), +(3, [1, NULL, 3], [2], [1, NULL, 3], [NULL, 2], [1, 3]), +(4, [1, NULL, 3], [1], [NULL, 3], [NULL, 1], [3]), +(5, [1, NULL, 3], [999], [1, NULL, 3], [NULL], [1, 3]), +(6, [NULL, NULL, NULL], [999], [NULL,NULL,NULL],[NULL, 999], []), +(7, [NULL, NULL, NULL], [999], [NULL,NULL,NULL],[NULL], []), +(8, [], [1], [], [NULL, 1], []), +(9, [NULL], [999], [NULL], [NULL], []), +(10, [], [999], [], [NULL], []), +(11, [1, 2, 3], [1, 2, 3], [], [NULL, 1, 2, 3], []), +(12, [NULL, NULL], [999], [NULL, NULL], [NULL], []), +(13, [1, NULL, 2, NULL, 3], [2], [1, NULL, NULL, 3], [NULL, 2], [1, 3]), +(14, [1, NULL, 2, 3], [1, 3], [NULL, 2], [NULL, 1, 3], [2]), +(15, [1, NULL, 3], [4, 5], [1, NULL, 3], [NULL, 4, 5], [1, 3]), +(16, [NULL, NULL], [1, 2], [NULL, NULL], [NULL, 1, 2], []), +(17, [1, 1, NULL, NULL, 2], [1], [NULL, NULL, 2], [NULL, 1], [2]), +(18, [1, NULL, 1, NULL], [1], [NULL, NULL], [NULL, 1], []), +(19, [1, NULL, 2, 3, NULL], [2,3], [1, NULL, NULL], [NULL, 2, 3], [1]), +(20, [10, NULL, 20, NULL, 30], [10,30], [NULL, 20, NULL], [NULL, 10, 30], [20]); + +SELECT 'Array(Nullable(UInt32)) exceptArray(UInt32)'; +SELECT id, source_null, except, arrayExcept(source_null, except) AS result, expected, if(result = expected, 'OK', 'NOK') AS status FROM 3538_array_except2 ORDER BY id; +SELECT 'Array(Nullable(UInt32)) exceptArray(Nullable(UInt32))'; +SELECT id, source_null, except_null, arrayExcept(source_null, except_null) AS result, expected_null, if(result = expected_null, 'OK', 'NOK') AS status FROM 3538_array_except2 ORDER BY id; + +CREATE TABLE 3538_array_except3 +( + id UInt32, + source Array(String), + except Array(String), + except_null Array(Nullable(String)), + expected Array(String) +) +ENGINE = Memory; + +INSERT INTO 3538_array_except3 VALUES +(1, ['apple', 'banana', 'cherry'], ['banana'], [NULL, 'banana'], ['apple', 'cherry']), +(2, ['apple', 'banana', 'cherry', 'banana'], ['banana'], [NULL, 'banana'], ['apple', 'cherry']), +(3, ['', 'banana', ''], ['banana'], [NULL, 'banana'], ['', '']), +(4, ['', '', ''], [''], [NULL, ''], []), +(5, ['Apple', 'apple', 'APPLE'], ['apple'], [NULL, 'apple'], ['Apple', 'APPLE']), +(6, ['app', 'apple', 'application'], ['apple'], [NULL, 'apple'], ['app', 'application']), +(7, ['café', 'naïve', 'hôtel'], ['naïve'], [NULL, 'naïve'], ['café', 'hôtel']), +(8, ['日本', '中国', '韓国'], ['中国'], [NULL, '中国'], ['日本', '韓国']), +(9, [ + 'this_is_a_very_long_string_1234567890', + 'another_long_string_abcdefghijklmnop', + 'short' +], ['another_long_string_abcdefghijklmnop'], [ + NULL, 'another_long_string_abcdefghijklmnop' +], [ + 'this_is_a_very_long_string_1234567890', + 'short' +]), +(10, ['a$b', 'c*d', 'e\\f'], ['c*d'], [NULL, 'c*d'], ['a$b', 'e\\f']), +(11, ['A', 'B', 'C'], ['A', 'B', 'C'], [NULL, 'A', 'B', 'C'], []), +(12, ['X', 'X', 'X'], ['X'], [NULL, 'X'], []), +(13, ['cat', 'dog', 'fish'], ['bird'], [NULL, 'bird'], ['cat', 'dog', 'fish']), +(14, ['alpha', 'beta'], ['gamma'], [NULL, 'gamma'], ['alpha', 'beta']), +(15, ['x', 'x', 'y', 'y'], ['x'], [NULL, 'x'], ['y', 'y']), +(16, ['a', 'b', 'a', 'b'], ['a', 'b'], [NULL, 'a', 'b'], []), +(17, ['a', 'bb', 'ccc', 'dddd'], ['bb', 'dddd'], [NULL, 'bb', 'dddd'], ['a', 'ccc']), +(18, ['short', 'medium', 'longer'], ['medium'], [NULL, 'medium'], ['short', 'longer']), +(19, ['pre-1', 'pre-2', 'post-1'], ['pre-2'], [NULL, 'pre-2'], ['pre-1', 'post-1']), +(20, ['start', 'middle', 'end'], ['middle'], [NULL, 'middle'], ['start', 'end']); + + +SELECT 'Array(String) exceptArray(String)'; +SELECT id, source, except, arrayExcept(source, except) AS result, expected, if(result = expected, 'OK', 'NOK') AS status FROM 3538_array_except3 ORDER BY id; +SELECT 'Array(String) exceptArray(Nullable(String))'; +SELECT id, source, except_null, arrayExcept(source, except_null) AS result, expected, if(result = expected, 'OK', 'NOK') AS status FROM 3538_array_except3 ORDER BY id; + +CREATE TABLE 3538_array_except4 +( + id UInt32, + source_null Array(Nullable(String)), + except Array(String), + expected Array(Nullable(String)), + except_null Array(Nullable(String)), + expected_null Array(Nullable(String)) +) +ENGINE = Memory; + +INSERT INTO 3538_array_except4 VALUES +(1, ['apple', NULL, 'cherry'], ['apple'], [NULL, 'cherry'], [NULL, 'apple'], ['cherry']), +(2, [NULL, 'banana', NULL, 'banana'], ['banana'], [NULL, NULL], [NULL, 'banana'], []), +(3, [NULL, NULL, NULL], ['missing'], [NULL, NULL, NULL], [NULL, 'missing'], []), +(4, [NULL, NULL, NULL], [NULL], [NULL, NULL, NULL], [NULL], []), +(5, ['', NULL, ''], [''], [NULL], [NULL, ''], []), +(6, [NULL, '', NULL], ['x'], [NULL, '', NULL], [NULL, 'x'], ['']), +(7, ['Apple', NULL, 'APPLE'], ['apple'], ['Apple', NULL, 'APPLE'], [NULL, 'apple'], ['Apple', 'APPLE']), +(8, [NULL, 'a', NULL, 'A'], ['a'], [NULL, NULL, 'A'], [NULL, 'a'], ['A']), +(9, ['café', NULL, 'hôtel'], ['naïve'], ['café', NULL, 'hôtel'], [NULL, 'naïve'], ['café', 'hôtel']), +(10, [NULL, '中国', NULL], ['日本'], [NULL, '中国', NULL], [NULL, '日本'], ['中国']), +(11, ['A', NULL, 'C'], ['A', 'C'], [NULL], [NULL, 'A', 'C'], []), +(12, [NULL, 'X', NULL], ['X'], [NULL, NULL], [NULL, 'X'], []), +(13, ['cat', NULL, 'fish'], ['bird'], ['cat', NULL, 'fish'], [NULL, 'bird'], ['cat', 'fish']), +(14, [NULL, 'beta'], ['gamma'], [NULL, 'beta'], [NULL, 'gamma'], ['beta']), +(15, ['x', NULL, 'x', 'y'], ['x'], [NULL, 'y'], [NULL, 'x'], ['y']), +(16, [NULL, 'b', NULL, 'b'], ['b'], [NULL, NULL], [NULL, 'b'], []), +(17, ['pre', NULL, 'post'], ['pre'], [NULL, 'post'], [NULL, 'pre'], ['post']), +(18, [NULL, 'middle', 'end'], ['middle'], [NULL, 'end'], [NULL, 'middle'], ['end']), +(19, ['keep', NULL, 'keep'], ['missing'], ['keep', NULL, 'keep'], [NULL], ['keep', 'keep']), +(20, [NULL, 'remove', NULL], ['remove'], [NULL, NULL], [NULL, 'remove'], []); + +SELECT 'Array(Nullable(String)) exceptArray(String)'; +SELECT id, source_null, except, arrayExcept(source_null, except) AS result, expected, if(result = expected, 'OK', 'NOK') AS status FROM 3538_array_except4 ORDER BY id; +SELECT 'Array(Nullable(String)) exceptArray(Nullable(String))'; +SELECT id, source_null, except_null, arrayExcept(source_null, except_null) AS result, expected_null, if(result = expected_null, 'OK', 'NOK') AS status FROM 3538_array_except4 ORDER BY id; + +CREATE TABLE 3538_array_except5 ( + id UInt32, + source Array(FixedString(5)), + except Array(FixedString(5)), + except_null Array(Nullable(FixedString(5))), + expected Array(FixedString(5)) +) ENGINE = Memory; + +INSERT INTO 3538_array_except5 VALUES +(1, ['abc\0\0', 'def\0\0'], ['def\0\0'], ['def\0\0', NULL], ['abc\0\0']), +(2, ['hello', 'wor\0\0'], ['wor\0\0'], [NULL, 'wor\0\0'], ['hello']), +(3, ['a\0\0\0\0', 'b\0\0\0\0'], ['a\0\0\0\0', 'b\0\0\0\0'], ['a\0\0\0\0', NULL, 'b\0\0\0\0'], []), +(4, ['x\0y\0\0', 'x\0\0\0\0'], ['x\0\0\0\0'], [NULL, 'x\0\0\0\0', NULL], ['x\0y\0\0']); + +SELECT 'Array(FixedString(5)) exceptArray(FixedString(5))'; +SELECT id, source, except, arrayExcept(source, except) AS result, expected, if(result = expected, 'OK', 'NOK') AS status FROM 3538_array_except5 ORDER BY id; +SELECT 'Array(FixedString(5)) exceptArray(Nullable(FixedString(5)))'; +SELECT id, source, except_null, arrayExcept(source, except_null) AS result, expected, if(result = expected, 'OK', 'NOK') AS status FROM 3538_array_except5 ORDER BY id; + + +CREATE TABLE 3538_array_except6 +( + id UInt32, + source_null Array(Nullable(FixedString(5))), + except Array(FixedString(5)), + expected Array(Nullable(FixedString(5))), + except_null Array(Nullable(FixedString(5))), + expected_null Array(Nullable(FixedString(5))) +) +ENGINE = Memory; + +INSERT INTO 3538_array_except6 VALUES +(1, ['abc\0\0', NULL, 'def\0\0'], ['def\0\0'], ['abc\0\0', NULL], [NULL, 'def\0\0'], ['abc\0\0']), +(2, [NULL, NULL, NULL], ['xyz\0\0'], [NULL, NULL, NULL], [NULL, 'xyz\0\0'], []), +(3, ['\0\0\0\0\0', 'hello', NULL], ['hello'], ['\0\0\0\0\0', NULL], [NULL, 'hello'], ['\0\0\0\0\0']), +(4, ['a\0\0\0\0', 'b\0\0\0\0'], ['a\0\0\0\0', 'b\0\0\0\0'], [], [NULL, 'a\0\0\0\0', 'b\0\0\0\0'], []), +(5, ['cat\0\0', 'dog\0\0', 'fox\0\0'], ['dog\0\0'], ['cat\0\0', 'fox\0\0'], [NULL, 'dog\0\0'], ['cat\0\0', 'fox\0\0']); + +SELECT 'Array(Nullable(FixedString(5))) exceptArray(FixedString(5))'; +SELECT id, source_null, except, arrayExcept(source_null, except) AS result, expected, if(result = expected, 'OK', 'NOK') AS status FROM 3538_array_except6 ORDER BY id; +SELECT 'Array(Nullable(FixedString(5))) exceptArray(Nullable(FixedString(5)))'; +SELECT id, source_null, except_null, arrayExcept(source_null, except_null) AS result, expected_null, if(result = expected_null, 'OK', 'NOK') AS status FROM 3538_array_except6 ORDER BY id; + +DROP TABLE IF EXISTS 3538_array_except6; +DROP TABLE IF EXISTS 3538_array_except5; +DROP TABLE IF EXISTS 3538_array_except4; +DROP TABLE IF EXISTS 3538_array_except3; +DROP TABLE IF EXISTS 3538_array_except2; +DROP TABLE IF EXISTS 3538_array_except1; diff --git a/parser/testdata/03538_crash_in_parallel_hash_with_empty_using/ast.json b/parser/testdata/03538_crash_in_parallel_hash_with_empty_using/ast.json new file mode 100644 index 000000000..45bf21f25 --- /dev/null +++ b/parser/testdata/03538_crash_in_parallel_hash_with_empty_using/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001476669, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03538_crash_in_parallel_hash_with_empty_using/metadata.json b/parser/testdata/03538_crash_in_parallel_hash_with_empty_using/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03538_crash_in_parallel_hash_with_empty_using/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03538_crash_in_parallel_hash_with_empty_using/query.sql b/parser/testdata/03538_crash_in_parallel_hash_with_empty_using/query.sql new file mode 100644 index 000000000..6e9b831d4 --- /dev/null +++ b/parser/testdata/03538_crash_in_parallel_hash_with_empty_using/query.sql @@ -0,0 +1,33 @@ +set enable_parallel_replicas = 0; + +SELECT + n, + myfield +FROM +( + SELECT toString(number) AS n + FROM system.numbers + LIMIT 1000 +) AS a +ANY LEFT JOIN +( + SELECT 1 AS myfield +) AS b USING () +FORMAT Null +SETTINGS allow_experimental_analyzer = false; + +SELECT + n, + myfield +FROM +( + SELECT toString(number) AS n + FROM system.numbers + LIMIT 1000 +) AS a +ANY LEFT JOIN +( + SELECT 1 AS myfield +) AS b USING () +FORMAT Null +SETTINGS allow_experimental_analyzer = true; -- { serverError INVALID_JOIN_ON_EXPRESSION } diff --git a/parser/testdata/03538_higher_order_functions_null_filter/ast.json b/parser/testdata/03538_higher_order_functions_null_filter/ast.json new file mode 100644 index 000000000..a00649f11 --- /dev/null +++ b/parser/testdata/03538_higher_order_functions_null_filter/ast.json @@ -0,0 +1,70 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayFirst (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function lambda (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Function notEquals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Literal ''" + }, + { + "explain": " Literal Array_[NULL, 'arrayFirst']" + } + ], + + "rows": 16, + + "statistics": + { + "elapsed": 0.001122778, + "rows_read": 16, + "bytes_read": 631 + } +} diff --git a/parser/testdata/03538_higher_order_functions_null_filter/metadata.json b/parser/testdata/03538_higher_order_functions_null_filter/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03538_higher_order_functions_null_filter/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03538_higher_order_functions_null_filter/query.sql b/parser/testdata/03538_higher_order_functions_null_filter/query.sql new file mode 100644 index 000000000..e2d8efaa9 --- /dev/null +++ b/parser/testdata/03538_higher_order_functions_null_filter/query.sql @@ -0,0 +1,5 @@ +SELECT arrayFirst(x -> x != '', [NULL, 'arrayFirst']); +SELECT arrayFirstIndex(x -> x != '', [NULL, 'arrayFirstIndex']); +SELECT arrayFilter(x -> x != '', [NULL, 'arrayFilter']); +SELECT arrayExists(x -> x != '', [NULL, 'arrayExists']); +SELECT arrayAll(x -> x != '', [NULL, 'arrayAll']); diff --git a/parser/testdata/03538_optimize_rewrite_regexp_functions/ast.json b/parser/testdata/03538_optimize_rewrite_regexp_functions/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03538_optimize_rewrite_regexp_functions/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03538_optimize_rewrite_regexp_functions/metadata.json b/parser/testdata/03538_optimize_rewrite_regexp_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03538_optimize_rewrite_regexp_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03538_optimize_rewrite_regexp_functions/query.sql b/parser/testdata/03538_optimize_rewrite_regexp_functions/query.sql new file mode 100644 index 000000000..e5f37eb54 --- /dev/null +++ b/parser/testdata/03538_optimize_rewrite_regexp_functions/query.sql @@ -0,0 +1,117 @@ +-- { echo ON } + +SET enable_analyzer = 1; +SET optimize_rewrite_regexp_functions = 1; + +-- Rule 1: replaceRegexpAll / regexp_replace -> replaceRegexpOne if pattern without alternatives starts with ^ or ends with unescaped $ + +-- Starts with ^ (should rewrite) +EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT regexp_replace(identity('abc123'), '^abc', ''); +EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpAll(identity('abc123'), '^abc', ''); + +-- Ends with unescaped $ (should rewrite) +EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT regexp_replace(identity('abc123'), '123$', ''); +EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpAll(identity('abc123'), '123$', ''); + +-- Ends with escaped $ (should NOT rewrite) +EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpAll(identity('abc123$'), '123\$', ''); + +-- Starts with escaped ^ (should NOT rewrite) +EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpAll(identity('abc123'), '\^abc', ''); + +-- Pattern with ^ not at start (should NOT rewrite) +EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpAll(identity('abc123'), 'a^bc', ''); + +-- Pattern with $ not at end (should NOT rewrite) +EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpAll(identity('abc123'), '123$abc', ''); + +-- Pattern with alternatives (should NOT rewrite) +EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpAll(identity('abc123'), '^123|456$', ''); + +-- Rule 2: If a replaceRegexpOne function has a replacement of nothing other than \1 and some subpatterns in the regexp, or \0 and no subpatterns in the regexp, rewrite it with extract. + +-- NOTE: \0 is specially treated as NUL instead of capture group reference. Need to use \\0 instead. + +-- Only \0, no capture group (should rewrite) +EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpOne(identity('abc123'), '^abc123$', '\\0'); + +-- Only \1, with one capture group (should rewrite) +EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpOne(identity('abc123'), '^(abc)$', '\1'); + +-- Only \1, no capture group (should NOT rewrite) +EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpOne(identity('abc123'), '^abc$', '\1'); + +-- Pattern not full (should NOT rewrite) +EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpOne(identity('abc123'), '^abc', '\\0'); + +-- Pattern not full (should NOT rewrite) +EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpOne(identity('abc123'), 'abc$', '\\0'); + +-- Pattern not full (should NOT rewrite) +EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpOne(identity('abc123'), 'abc', '\\0'); + +-- Pattern not full (should NOT rewrite) +EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpOne(identity('abc123'), '^abc\\$', '\\0'); + +-- Pattern not full (should NOT rewrite) +EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpOne(identity('abc123'), '^ab|c$', '\\0'); + +-- \0 with extra characters (should NOT rewrite) +EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpOne(identity('abc123'), '^abc123$', 'pre\\0post'); + +-- \1 with two capture groups (should rewrite — only \1 used) +EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpOne(identity('abc123'), '^(a)(b)$', '\1'); + +-- \2 used (should NOT rewrite) +EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpOne(identity('abc123'), '^(a)(b)$', '\2'); + +-- Mixed content in replacement (should NOT rewrite) +EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpOne(identity('abc123'), '^(abc)$', 'X\1Y'); + +-- Escaped backslash in replacement (should NOT rewrite) +EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpOne(identity('abc123'), '^(abc)$', '\\\\1'); + + +-- Rule 3: If an extract function has a regexp with some subpatterns and the regexp starts with ^.* or ending with an unescaped .*$, remove this prefix and/or suffix. + +-- Starts with ^.* (should strip prefix) +EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT extract(identity('abc123'), '^.*(123)'); + +-- Ends with unescaped .*$ (should strip suffix) +EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT extract(identity('abc123'), '(abc).*$'); + +-- Starts and ends (should strip both) +EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT extract(identity('abc123'), '^.*(abc).*$'); + +-- Starts and ends (should NOT rewrite without capture groups) +EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT extract(identity('abc123'), '^.*$'); + +-- Escaped dot before * (should NOT rewrite) +EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT extract(identity('abc123'), '(abc)\.*$'); + +-- No prefix or suffix (should NOT rewrite) +EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT extract(identity('abc123'), '(abc)'); + +-- Starts with .* but not ^.* (should NOT rewrite) +EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT extract(identity('abc123'), '.*(abc)'); + +-- Starts with ^.*? (should NOT rewrite) +EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT extract(identity('abc123abc456'), '^.*?(abc.*)'); + +-- Ends with .* but not .*$ (should NOT rewrite) +EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT extract(identity('abc123'), '(abc).*'); + + +-- Cascade tests + +-- Rule 1 + Rule 2: replaceRegexpAll to replaceRegexpOne to extract +EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpAll(identity('abc'), '^(abc)', '\1'); + +-- Rule 2 + 3: replaceRegexpOne -> extract -> simplified extract +EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpOne(identity('abc'), '^.*(abc).*$','\1'); + +-- Rule 1 + 2 + 3: replaceRegexpAll -> replaceRegexpOne -> extract -> simplified extract +EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT replaceRegexpAll(identity('abc'), '^.*(abc).*$','\1'); + +-- ClickBench Q28 +EXPLAIN QUERY TREE dump_tree = 0, dump_ast = 1 SELECT REGEXP_REPLACE(identity('some referer'), '^https?://(?:www\.)?([^/]+)/.*$', '\1'); diff --git a/parser/testdata/03538_validate_setting_merge_max_block_size/ast.json b/parser/testdata/03538_validate_setting_merge_max_block_size/ast.json new file mode 100644 index 000000000..1586f334e --- /dev/null +++ b/parser/testdata/03538_validate_setting_merge_max_block_size/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery validate_setting_merge_max_block_size (children 1)" + }, + { + "explain": " Identifier validate_setting_merge_max_block_size" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001461775, + "rows_read": 2, + "bytes_read": 126 + } +} diff --git a/parser/testdata/03538_validate_setting_merge_max_block_size/metadata.json b/parser/testdata/03538_validate_setting_merge_max_block_size/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03538_validate_setting_merge_max_block_size/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03538_validate_setting_merge_max_block_size/query.sql b/parser/testdata/03538_validate_setting_merge_max_block_size/query.sql new file mode 100644 index 000000000..0891527fd --- /dev/null +++ b/parser/testdata/03538_validate_setting_merge_max_block_size/query.sql @@ -0,0 +1,7 @@ +DROP TABLE IF EXISTS validate_setting_merge_max_block_size; + +CREATE TABLE validate_setting_merge_max_block_size (x Int64) ENGINE = MergeTree() ORDER BY tuple() SETTINGS merge_max_block_size = 0; -- {serverError BAD_ARGUMENTS} +CREATE TABLE validate_setting_merge_max_block_size (x Int64) ENGINE = MergeTree() ORDER BY tuple() SETTINGS merge_max_block_size = 1; +ALTER TABLE validate_setting_merge_max_block_size MODIFY SETTING merge_max_block_size = 0; -- {serverError BAD_ARGUMENTS} + +DROP TABLE validate_setting_merge_max_block_size; diff --git a/parser/testdata/03539_kusto_output_format_trash/ast.json b/parser/testdata/03539_kusto_output_format_trash/ast.json new file mode 100644 index 000000000..31cf6c501 --- /dev/null +++ b/parser/testdata/03539_kusto_output_format_trash/ast.json @@ -0,0 +1,76 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "InsertQuery (children 3)" + }, + { + "explain": " Function file (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function concat (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function currentDatabase (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Literal '\/query.data'" + }, + { + "explain": " Literal 'RowBinary'" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toIntervalSecond (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Set" + }, + { + "explain": " Set" + } + ], + + "rows": 18, + + "statistics": + { + "elapsed": 0.001447678, + "rows_read": 18, + "bytes_read": 629 + } +} diff --git a/parser/testdata/03539_kusto_output_format_trash/metadata.json b/parser/testdata/03539_kusto_output_format_trash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03539_kusto_output_format_trash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03539_kusto_output_format_trash/query.sql b/parser/testdata/03539_kusto_output_format_trash/query.sql new file mode 100644 index 000000000..4dc51c519 --- /dev/null +++ b/parser/testdata/03539_kusto_output_format_trash/query.sql @@ -0,0 +1 @@ +INSERT INTO TABLE FUNCTION file(currentDatabase() || '/query.data', 'RowBinary') SELECT INTERVAL 1 SECOND SETTINGS interval_output_format = 'kusto'; diff --git a/parser/testdata/03540_date_trunc_old_behaviour/ast.json b/parser/testdata/03540_date_trunc_old_behaviour/ast.json new file mode 100644 index 000000000..7c39c553e --- /dev/null +++ b/parser/testdata/03540_date_trunc_old_behaviour/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001489374, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03540_date_trunc_old_behaviour/metadata.json b/parser/testdata/03540_date_trunc_old_behaviour/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03540_date_trunc_old_behaviour/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03540_date_trunc_old_behaviour/query.sql b/parser/testdata/03540_date_trunc_old_behaviour/query.sql new file mode 100644 index 000000000..95510687e --- /dev/null +++ b/parser/testdata/03540_date_trunc_old_behaviour/query.sql @@ -0,0 +1,4 @@ +set function_date_trunc_return_type_behavior=1; +set session_timezone='UTC'; +select dateTrunc('second', '2020-10-10 10:10:10.10'::DateTime64(2)) as result, toTypeName(result); +select dateTrunc('month', '2020-10-10'::Date32) as result, toTypeName(result); diff --git a/parser/testdata/03541_rename_column_start/ast.json b/parser/testdata/03541_rename_column_start/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03541_rename_column_start/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03541_rename_column_start/metadata.json b/parser/testdata/03541_rename_column_start/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03541_rename_column_start/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03541_rename_column_start/query.sql b/parser/testdata/03541_rename_column_start/query.sql new file mode 100644 index 000000000..b5d4fa03f --- /dev/null +++ b/parser/testdata/03541_rename_column_start/query.sql @@ -0,0 +1,14 @@ + -- Tags: zookeeper + +CREATE TABLE rmt (a UInt64, b UInt64) +ENGINE=ReplicatedMergeTree('/clickhouse/tables/{database}/rmt', '1') +ORDER BY a; + +INSERT INTO rmt VALUES (1, 4); + +SYSTEM STOP MERGES rmt; +ALTER TABLE rmt UPDATE b = 10 WHERE a != 0; +ALTER TABLE rmt RENAME COLUMN b to c; -- {serverError BAD_ARGUMENTS}; + +SYSTEM START MERGES rmt; +DROP TABLE rmt SYNC; diff --git a/parser/testdata/03541_table_without_insertable_columns/ast.json b/parser/testdata/03541_table_without_insertable_columns/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03541_table_without_insertable_columns/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03541_table_without_insertable_columns/metadata.json b/parser/testdata/03541_table_without_insertable_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03541_table_without_insertable_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03541_table_without_insertable_columns/query.sql b/parser/testdata/03541_table_without_insertable_columns/query.sql new file mode 100644 index 000000000..e76e253c8 --- /dev/null +++ b/parser/testdata/03541_table_without_insertable_columns/query.sql @@ -0,0 +1,12 @@ +-- Tags: no-parallel, no-ordinary-database +-- Tag no-parallel: static UUID +-- Tag no-ordinary-database: requires UUID + +CREATE TABLE no_physical (a Int EPHEMERAL) Engine=Memory; -- { serverError EMPTY_LIST_OF_COLUMNS_PASSED } +CREATE TABLE no_physical (a Int ALIAS 1) Engine=Memory; -- { serverError EMPTY_LIST_OF_COLUMNS_PASSED } + +CREATE TABLE no_insertable (a Int MATERIALIZED 1) Engine=Memory; -- { serverError EMPTY_LIST_OF_COLUMNS_PASSED } +ATTACH TABLE no_insertable UUID '00000000-0000-0000-0000-000000003541' (a Int MATERIALIZED 1) Engine=Memory; + +CREATE TABLE insertable (a Int EPHEMERAL, b Int MATERIALIZED 1) Engine=Memory; +ALTER TABLE insertable DROP COLUMN a; -- { serverError EMPTY_LIST_OF_COLUMNS_PASSED } diff --git a/parser/testdata/03542_TTL_dict/ast.json b/parser/testdata/03542_TTL_dict/ast.json new file mode 100644 index 000000000..252383a84 --- /dev/null +++ b/parser/testdata/03542_TTL_dict/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery ttl_dict (children 1)" + }, + { + "explain": " Identifier ttl_dict" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001264763, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/03542_TTL_dict/metadata.json b/parser/testdata/03542_TTL_dict/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03542_TTL_dict/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03542_TTL_dict/query.sql b/parser/testdata/03542_TTL_dict/query.sql new file mode 100644 index 000000000..0b9ebdba5 --- /dev/null +++ b/parser/testdata/03542_TTL_dict/query.sql @@ -0,0 +1,15 @@ +DROP TABLE IF EXISTS ttl_dict; +DROP DICTIONARY IF EXISTS always_alive_ids_dict; +DROP TABLE IF EXISTS always_alive_ids; + +CREATE TABLE always_alive_ids (id UInt64) engine=Memory(); +INSERT INTO always_alive_ids VALUES (-1); + +CREATE DICTIONARY always_alive_ids_dict (id UInt64) PRIMARY KEY id SOURCE(CLICKHOUSE(TABLE 'always_alive_ids')) LAYOUT(HASHED()) LIFETIME(0); +CREATE TABLE ttl_dict (id UInt64, event_date Date) ENGINE = MergeTree ORDER BY (id) TTL event_date + INTERVAL 1 MONTH WHERE NOT dictHas({CLICKHOUSE_DATABASE:String} || '.always_alive_ids_dict', id); + +INSERT INTO ttl_dict VALUES (1, today()-60)(2, today()-60)(3, today()); + +OPTIMIZE TABLE ttl_dict FINAL; + +SELECT id FROM ttl_dict ORDER BY id; diff --git a/parser/testdata/03545_analyzer_correlated_subqueries_use_equivalence_classes/ast.json b/parser/testdata/03545_analyzer_correlated_subqueries_use_equivalence_classes/ast.json new file mode 100644 index 000000000..ba4a7c70d --- /dev/null +++ b/parser/testdata/03545_analyzer_correlated_subqueries_use_equivalence_classes/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001368377, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03545_analyzer_correlated_subqueries_use_equivalence_classes/metadata.json b/parser/testdata/03545_analyzer_correlated_subqueries_use_equivalence_classes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03545_analyzer_correlated_subqueries_use_equivalence_classes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03545_analyzer_correlated_subqueries_use_equivalence_classes/query.sql b/parser/testdata/03545_analyzer_correlated_subqueries_use_equivalence_classes/query.sql new file mode 100644 index 000000000..5eb30dcc3 --- /dev/null +++ b/parser/testdata/03545_analyzer_correlated_subqueries_use_equivalence_classes/query.sql @@ -0,0 +1,15 @@ +SET enable_analyzer = 1; +SET allow_experimental_correlated_subqueries = 1; +SET query_plan_join_swap_table = 0; -- Changes query plan +SET correlated_subqueries_default_join_kind = 'left'; + +EXPLAIN actions = 1 +SELECT + (SELECT + count() + FROM + numbers(10) + WHERE + number = n.number) +FROM + numbers(10) n; diff --git a/parser/testdata/03545_analyzer_correlated_subqueries_use_equivalence_classes_2/ast.json b/parser/testdata/03545_analyzer_correlated_subqueries_use_equivalence_classes_2/ast.json new file mode 100644 index 000000000..cbce505ef --- /dev/null +++ b/parser/testdata/03545_analyzer_correlated_subqueries_use_equivalence_classes_2/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001010442, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03545_analyzer_correlated_subqueries_use_equivalence_classes_2/metadata.json b/parser/testdata/03545_analyzer_correlated_subqueries_use_equivalence_classes_2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03545_analyzer_correlated_subqueries_use_equivalence_classes_2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03545_analyzer_correlated_subqueries_use_equivalence_classes_2/query.sql b/parser/testdata/03545_analyzer_correlated_subqueries_use_equivalence_classes_2/query.sql new file mode 100644 index 000000000..babf94551 --- /dev/null +++ b/parser/testdata/03545_analyzer_correlated_subqueries_use_equivalence_classes_2/query.sql @@ -0,0 +1,35 @@ +SET enable_analyzer = 1; +SET allow_experimental_correlated_subqueries = 1; +SET enable_parallel_replicas = 0; +SET correlated_subqueries_substitute_equivalent_expressions=1; +SET query_plan_join_swap_table = false; +SET correlated_subqueries_default_join_kind = 'left'; + +CREATE TABLE a(c1 Int64, c2 Int64, c3 Int64, c4 Int64) ENGINE = MergeTree() ORDER BY (); +CREATE TABLE b(c1 Int64, c2 Int64, c3 Int64, c4 Int64) ENGINE = MergeTree() ORDER BY (); + +INSERT INTO a VALUES (1, 1, 1, 1), (2, 1, 1, 2); +INSERT INTO b VALUES (1, 1, 1, 1), (1, 2, 2, 1); + +-- {echoOn} +-- All columns in subquery condition belong to the same equivalence class +EXPLAIN +SELECT + c1, + ( + SELECT max(c3) + FROM a + WHERE a.c1 = b.c2 AND b.c1 = b.c3 AND b.c1 = b.c2 AND b.c2 = b.c4 + ) +FROM b; + +-- Same query but with slightly different order of conditions in subquery +EXPLAIN +SELECT + c1, + ( + SELECT max(c3) + FROM a + WHERE a.c1 = b.c2 AND b.c1 = b.c2 AND b.c1 = b.c3 AND b.c2 = b.c4 + ) +FROM b; diff --git a/parser/testdata/03545_analyzer_correlated_subquery_exists_asterisk/ast.json b/parser/testdata/03545_analyzer_correlated_subquery_exists_asterisk/ast.json new file mode 100644 index 000000000..e5db8c881 --- /dev/null +++ b/parser/testdata/03545_analyzer_correlated_subquery_exists_asterisk/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001255507, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03545_analyzer_correlated_subquery_exists_asterisk/metadata.json b/parser/testdata/03545_analyzer_correlated_subquery_exists_asterisk/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03545_analyzer_correlated_subquery_exists_asterisk/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03545_analyzer_correlated_subquery_exists_asterisk/query.sql b/parser/testdata/03545_analyzer_correlated_subquery_exists_asterisk/query.sql new file mode 100644 index 000000000..b55101712 --- /dev/null +++ b/parser/testdata/03545_analyzer_correlated_subquery_exists_asterisk/query.sql @@ -0,0 +1,40 @@ +SET enable_analyzer = 1; +SET allow_experimental_correlated_subqueries = 1; +SET enable_parallel_replicas = 0; +SET correlated_subqueries_default_join_kind = 'left'; + +-- Disable table swaps during query planning +SET query_plan_join_swap_table = false; + +DROP TABLE IF EXISTS test; +CREATE TABLE test( + i1 Int64, + i2 Int64, + i3 Int64, + i4 Int64, + i5 Int64, + i6 Int64, + i7 Int64, + i8 Int64, + i9 Int64, + i10 Int64 +) +ENGINE = MergeTree() +ORDER BY (); + +INSERT INTO test VALUES (1, 2, 3, 4, 5, 6, 7, 8, 9, 10); + +SET correlated_subqueries_substitute_equivalent_expressions = 0; + +EXPLAIN actions = 1 +SELECT 1 FROM test AS t1 +WHERE EXISTS ( + SELECT * FROM test AS t2 + WHERE t1.i1 = t2.i2 +); + +SELECT 1 FROM test AS t1 +WHERE EXISTS ( + SELECT * FROM test AS t2 + WHERE t1.i1 = t2.i2 +); diff --git a/parser/testdata/03545_array_join_index_set_bug/ast.json b/parser/testdata/03545_array_join_index_set_bug/ast.json new file mode 100644 index 000000000..e26f4c978 --- /dev/null +++ b/parser/testdata/03545_array_join_index_set_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery right (children 1)" + }, + { + "explain": " Identifier right" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001307879, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/03545_array_join_index_set_bug/metadata.json b/parser/testdata/03545_array_join_index_set_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03545_array_join_index_set_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03545_array_join_index_set_bug/query.sql b/parser/testdata/03545_array_join_index_set_bug/query.sql new file mode 100644 index 000000000..03701c235 --- /dev/null +++ b/parser/testdata/03545_array_join_index_set_bug/query.sql @@ -0,0 +1,22 @@ +drop table if exists right; +CREATE TABLE right +( + `array_in_index` Array(String), + `array_not_in_index` Array(String), + `Id` String, + INDEX index_document_udm_type_names array_in_index TYPE set(100) GRANULARITY 1, +) +ENGINE = MergeTree +ORDER BY tuple(); + +insert into right select [''], [''], toString(number) from numbers(1000); + +SELECT COUNT() AS x +FROM +( + SELECT + array_in_index, + arrayJoin(array_not_in_index) AS array_not_in_index_joined + FROM right +) AS T00J0 +WHERE T00J0.array_not_in_index_joined = '' AND has(T00J0.array_in_index, 'abc'); diff --git a/parser/testdata/03545_map_contains_bloom_index_bug/ast.json b/parser/testdata/03545_map_contains_bloom_index_bug/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03545_map_contains_bloom_index_bug/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03545_map_contains_bloom_index_bug/metadata.json b/parser/testdata/03545_map_contains_bloom_index_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03545_map_contains_bloom_index_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03545_map_contains_bloom_index_bug/query.sql b/parser/testdata/03545_map_contains_bloom_index_bug/query.sql new file mode 100644 index 000000000..e941135c1 --- /dev/null +++ b/parser/testdata/03545_map_contains_bloom_index_bug/query.sql @@ -0,0 +1,30 @@ + +DROP TABLE IF EXISTS test_map_contains_values; + +CREATE TABLE test_map_contains_values +( + `ResourceAttributes` Map(LowCardinality(String), String), + INDEX idx_res_attr_value mapValues(ResourceAttributes) TYPE bloom_filter(0.01) GRANULARITY 1, +) +ORDER BY tuple(); + +INSERT INTO test_map_contains_values VALUES ( { 'rum.sessionId': 'session123' } ); + +DROP TABLE IF EXISTS test_map_contains_keys; + +CREATE TABLE test_map_contains_keys +( + `ResourceAttributes` Map(LowCardinality(String), String), + INDEX idx_res_attr_value mapKeys(ResourceAttributes) TYPE bloom_filter(0.01) GRANULARITY 1, +) +ORDER BY tuple(); + +INSERT INTO test_map_contains_keys VALUES ( { 'rum.sessionId': 'session123' } ); + + +SELECT * FROM test_map_contains_values WHERE mapContains(ResourceAttributes, 'rum.sessionId'); +SELECT * FROM test_map_contains_values WHERE mapContainsKey(ResourceAttributes, 'rum.sessionId'); +SELECT * FROM test_map_contains_values WHERE mapContainsValue(ResourceAttributes, 'session123'); +SELECT * FROM test_map_contains_keys WHERE mapContains(ResourceAttributes, 'rum.sessionId'); +SELECT * FROM test_map_contains_keys WHERE mapContainsKey(ResourceAttributes, 'rum.sessionId'); +SELECT * FROM test_map_contains_keys WHERE mapContainsValue(ResourceAttributes, 'session123'); diff --git a/parser/testdata/03545_number_of_rows_in_ttltransform/ast.json b/parser/testdata/03545_number_of_rows_in_ttltransform/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03545_number_of_rows_in_ttltransform/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03545_number_of_rows_in_ttltransform/metadata.json b/parser/testdata/03545_number_of_rows_in_ttltransform/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03545_number_of_rows_in_ttltransform/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03545_number_of_rows_in_ttltransform/query.sql b/parser/testdata/03545_number_of_rows_in_ttltransform/query.sql new file mode 100644 index 000000000..15db46cfa --- /dev/null +++ b/parser/testdata/03545_number_of_rows_in_ttltransform/query.sql @@ -0,0 +1,51 @@ +-- Tags: no-parallel + +CREATE TABLE t +( + `timestamp` DateTime, + `id` String, + `value` UInt16, +) +ENGINE = MergeTree +ORDER BY (id, toStartOfDay(timestamp)) +TTL timestamp + toIntervalDay(1) + GROUP BY id, toStartOfDay(timestamp) + SET timestamp = max(timestamp) + interval 100 years, id = argMax(id, timestamp), value = max(value); + +SYSTEM STOP MERGES t; +INSERT INTO t VALUES (parseDateTimeBestEffort('2000-06-9 10:00'), 'pepe', 1000); +INSERT INTO t VALUES (parseDateTimeBestEffort('2000-06-10 10:00'), 'pepe', 1000); + +-- Inserts the maximum value, but with an older timestmap. The value should be taken in the aggregation. +INSERT INTO t VALUES (parseDateTimeBestEffort('2000-06-10 11:00'), 'pepe', 11000); +INSERT INTO t VALUES (parseDateTimeBestEffort('2000-06-10 12:00'), 'pepe', 1200); + +-- Inserts the latest timestamp, which should be the one taken in the aggregation. +INSERT INTO t VALUES (parseDateTimeBestEffort('2000-06-10 13:00'), 'pepe', 1300); + +SYSTEM START MERGES t; +OPTIMIZE TABLE t FINAL; +SELECT '-- Intersecting columns in GROUP BY and SET'; +SELECT * FROM t ORDER BY ALL; + +REPLACE TABLE t +( + `timestamp` DateTime, + `id` String, + `value` String, +) +ENGINE = MergeTree +ORDER BY (id, toStartOfDay(timestamp)) +TTL timestamp + toIntervalDay(1) + GROUP BY id, toStartOfDay(timestamp) + SET timestamp = max(timestamp) + interval 100 years, id = max(value); + +SYSTEM STOP MERGES t; +INSERT INTO t VALUES (parseDateTimeBestEffort('2000-06-9 10:00'), 'pepe', 'a'); +INSERT INTO t VALUES (parseDateTimeBestEffort('2000-06-10 10:00'), 'pepe', 'b'); +INSERT INTO t VALUES (parseDateTimeBestEffort('2000-06-10 11:00'), 'pepe', 'c'); + +SYSTEM START MERGES t; +OPTIMIZE TABLE t FINAL; +SELECT '-- Intersecting columns in GROUP AND SET where SET is prioritized'; +SELECT * FROM t ORDER BY ALL; diff --git a/parser/testdata/03545_union_allow_column_with_no_common_type/ast.json b/parser/testdata/03545_union_allow_column_with_no_common_type/ast.json new file mode 100644 index 000000000..1e8edb0ed --- /dev/null +++ b/parser/testdata/03545_union_allow_column_with_no_common_type/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001145843, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03545_union_allow_column_with_no_common_type/metadata.json b/parser/testdata/03545_union_allow_column_with_no_common_type/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03545_union_allow_column_with_no_common_type/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03545_union_allow_column_with_no_common_type/query.sql b/parser/testdata/03545_union_allow_column_with_no_common_type/query.sql new file mode 100644 index 000000000..aed0920a1 --- /dev/null +++ b/parser/testdata/03545_union_allow_column_with_no_common_type/query.sql @@ -0,0 +1,9 @@ +set enable_analyzer = 1; +set use_variant_as_common_type = 1; +SELECT x FROM (SELECT 1 AS x, 1 AS ord UNION ALL SELECT 'Hello', 2 AS ord) ORDER BY ord; +SELECT x FROM (SELECT 1 AS x, 1 AS ord UNION ALL SELECT 'Hello', 2 AS ord UNION ALL SELECT 3.14 AS x, 3 AS ord) ORDER BY ord; +SELECT x FROM (SELECT 1 AS x, 1 AS ord UNION ALL SELECT toDate('2024-01-01') AS x, 2 AS ord) ORDER BY ord; +SELECT x FROM (SELECT 1 AS x, 1 AS ord UNION ALL SELECT [1,2,3] AS x, 2 AS ord) ORDER BY ord; +SELECT x FROM (SELECT 'Hello' AS x, 1 AS ord UNION ALL SELECT map('a',1) AS x, 2 AS ord) ORDER BY ord; +SELECT x FROM (SELECT 1 AS x, 1 AS ord UNION ALL SELECT tuple(1,'a') AS x, 2 AS ord) ORDER BY ord; +SELECT x FROM (SELECT 1 AS x, 1 AS ord UNION ALL SELECT toUUID('00000000-0000-0000-0000-000000000000') AS x, 2 AS ord) ORDER BY ord; diff --git a/parser/testdata/03546_03545_analyzer_correlated_subquery_exists_asterisk_crash/ast.json b/parser/testdata/03546_03545_analyzer_correlated_subquery_exists_asterisk_crash/ast.json new file mode 100644 index 000000000..374d6f17d --- /dev/null +++ b/parser/testdata/03546_03545_analyzer_correlated_subquery_exists_asterisk_crash/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001271678, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03546_03545_analyzer_correlated_subquery_exists_asterisk_crash/metadata.json b/parser/testdata/03546_03545_analyzer_correlated_subquery_exists_asterisk_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03546_03545_analyzer_correlated_subquery_exists_asterisk_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03546_03545_analyzer_correlated_subquery_exists_asterisk_crash/query.sql b/parser/testdata/03546_03545_analyzer_correlated_subquery_exists_asterisk_crash/query.sql new file mode 100644 index 000000000..7de8995f2 --- /dev/null +++ b/parser/testdata/03546_03545_analyzer_correlated_subquery_exists_asterisk_crash/query.sql @@ -0,0 +1,9 @@ +set enable_analyzer = 1; +set allow_experimental_correlated_subqueries = 1; + +CREATE TABLE test (`i1` Int64, `i2` Int64, `i3` Int64, `i4` Int64, `i5` Int64, `i6` Int64, `i7` Int64, `i8` Int64, `i9` Int64, `i10` Int64) ENGINE = MergeTree ORDER BY tuple(); + +SELECT * * number +FROM numbers( + exists(SELECT 1 FROM (SELECT 53, *, materialize(toNullable(53)) FROM test AS t2 PREWHERE * OR (materialize(15) IS NOT NULL) WHERE * OR 1) LIMIT 1) = t1.i1, 5 + ) -- { serverError UNKNOWN_IDENTIFIER } diff --git a/parser/testdata/03546_add_distinct_to_in_clause/ast.json b/parser/testdata/03546_add_distinct_to_in_clause/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03546_add_distinct_to_in_clause/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03546_add_distinct_to_in_clause/metadata.json b/parser/testdata/03546_add_distinct_to_in_clause/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03546_add_distinct_to_in_clause/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03546_add_distinct_to_in_clause/query.sql b/parser/testdata/03546_add_distinct_to_in_clause/query.sql new file mode 100644 index 000000000..64c05e71b --- /dev/null +++ b/parser/testdata/03546_add_distinct_to_in_clause/query.sql @@ -0,0 +1,62 @@ +-- Tags: shard, no-parallel + +drop table if exists local_table_1; +drop table if exists local_table_2; +drop table if exists distributed_table_1; +drop table if exists distributed_table_2; + +SET prefer_localhost_replica = 0; +SET allow_experimental_analyzer = 1; +SET distributed_product_mode = 'allow'; +SET prefer_global_in_and_join = 1; +SET max_rows_to_read = 100000000; +SET read_overflow_mode = 'break'; + +create table local_table_1 (id int) engine = MergeTree order by id; +create table local_table_2 (id int) engine = MergeTree order by id; +create table distributed_table_1 (id int) engine = Distributed(test_cluster_two_shard_three_replicas_localhost, currentDatabase(), local_table_1); +create table distributed_table_2 (id int) engine = Distributed(test_cluster_two_shard_three_replicas_localhost, currentDatabase(), local_table_2); + +insert into local_table_1 select number from numbers(100); + +insert into local_table_2 select 1 from numbers(1000000); +insert into local_table_2 select 2 from numbers(1000000); +insert into local_table_2 select 3 from numbers(1000000); + +select id from distributed_table_1 where id in (select id from distributed_table_2) settings enable_add_distinct_to_in_subqueries = 1; +-- Query with DISTINCT optimization disabled +select id from distributed_table_1 where id in (select id from distributed_table_2) settings enable_add_distinct_to_in_subqueries = 0; + +SYSTEM FLUSH LOGS query_log; + + +-- Compare both NetworkReceiveBytes between with_distinct and without_distinct +WITH + -- Get the value for with_distinct + (SELECT read_rows, ProfileEvents + FROM system.query_log + WHERE current_database = currentDatabase() + AND query LIKE '%select id from distributed_table_1 where id in (select id from distributed_table_2) settings enable_add_distinct_to_in_subqueries = 1%' + AND type = 'QueryFinish' + AND is_initial_query + ORDER BY event_time DESC LIMIT 1) AS q1, + + -- Get the value for without_distinct + (SELECT read_rows, ProfileEvents + FROM system.query_log + WHERE current_database = currentDatabase() + AND query LIKE '%select id from distributed_table_1 where id in (select id from distributed_table_2) settings enable_add_distinct_to_in_subqueries = 0%' + AND type = 'QueryFinish' + AND is_initial_query + ORDER BY event_time DESC LIMIT 1) AS q2 + +SELECT + q1.read_rows < q2.read_rows AS read_rows_optimization_effective, + q1.ProfileEvents['NetworkSendBytes'] < q2.ProfileEvents['NetworkSendBytes'] AS send_optimization_effective, + q1.ProfileEvents['NetworkReceiveBytes'] < q2.ProfileEvents['NetworkReceiveBytes'] AS recv_optimization_effective; + + +drop table if exists local_table_1; +drop table if exists local_table_2; +drop table if exists distributed_table_1; +drop table if exists distributed_table_2; diff --git a/parser/testdata/03546_join_key_value_storage_with_casted_key/ast.json b/parser/testdata/03546_join_key_value_storage_with_casted_key/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03546_join_key_value_storage_with_casted_key/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03546_join_key_value_storage_with_casted_key/metadata.json b/parser/testdata/03546_join_key_value_storage_with_casted_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03546_join_key_value_storage_with_casted_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03546_join_key_value_storage_with_casted_key/query.sql b/parser/testdata/03546_join_key_value_storage_with_casted_key/query.sql new file mode 100644 index 000000000..d66353100 --- /dev/null +++ b/parser/testdata/03546_join_key_value_storage_with_casted_key/query.sql @@ -0,0 +1,9 @@ +-- Tags: use-rocksdb +-- Old analyzer doesn't support this case +SET enable_analyzer=1; + +CREATE TABLE t0 (c0 Bool) ENGINE = EmbeddedRocksDB PRIMARY KEY (c0); +INSERT INTO TABLE t0 (c0) VALUES (TRUE); + +SELECT t0.c0 FROM (SELECT NULL c0) v0 RIGHT JOIN t0 USING (c0); +SELECT t0.c0 FROM (SELECT NULL c0) v0 LEFT JOIN t0 USING (c0); diff --git a/parser/testdata/03546_leftover_dependencies/ast.json b/parser/testdata/03546_leftover_dependencies/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03546_leftover_dependencies/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03546_leftover_dependencies/metadata.json b/parser/testdata/03546_leftover_dependencies/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03546_leftover_dependencies/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03546_leftover_dependencies/query.sql b/parser/testdata/03546_leftover_dependencies/query.sql new file mode 100644 index 000000000..38fd960b8 --- /dev/null +++ b/parser/testdata/03546_leftover_dependencies/query.sql @@ -0,0 +1,29 @@ +DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE:Identifier} SYNC; + +CREATE DATABASE {CLICKHOUSE_DATABASE:Identifier} ENGINE = Atomic; +USE {CLICKHOUSE_DATABASE:Identifier}; +CREATE TABLE data (id UInt64) ENGINE=MergeTree() ORDER BY id; +CREATE MATERIALIZED VIEW mv1 (id UInt64) ENGINE=MergeTree() ORDER BY id AS SELECT id + 1 AS id FROM data; +INSERT INTO data VALUES (0); +SELECT * FROM mv1; +ALTER TABLE mv1 MODIFY COMMENT 'TEST COMMENT'; +RENAME TABLE mv1 TO mv2; +DROP DATABASE {CLICKHOUSE_DATABASE:Identifier} SYNC; + +CREATE DATABASE {CLICKHOUSE_DATABASE:Identifier} ENGINE = Atomic; +USE {CLICKHOUSE_DATABASE:Identifier}; +CREATE TABLE data (id UInt64) ENGINE=MergeTree() ORDER BY id; +CREATE MATERIALIZED VIEW mv1 (id UInt64) ENGINE=MergeTree() ORDER BY id AS SELECT id + 2 AS id FROM data; +INSERT INTO data VALUES (0); +SELECT * FROM mv1; +ALTER TABLE mv1 MODIFY COMMENT 'TEST COMMENT'; +RENAME DATABASE {CLICKHOUSE_DATABASE:Identifier} TO {CLICKHOUSE_DATABASE_1:Identifier}; +DROP DATABASE {CLICKHOUSE_DATABASE_1:Identifier} SYNC; + +CREATE DATABASE {CLICKHOUSE_DATABASE:Identifier} ENGINE = Atomic; +USE {CLICKHOUSE_DATABASE:Identifier}; +CREATE TABLE data (id UInt64) ENGINE=MergeTree() ORDER BY id; +CREATE MATERIALIZED VIEW mv1 (id UInt64) ENGINE=MergeTree() ORDER BY id AS SELECT id + 3 AS id FROM data; +INSERT INTO data VALUES (0); +SELECT * FROM mv1; +DROP DATABASE {CLICKHOUSE_DATABASE:Identifier} SYNC; \ No newline at end of file diff --git a/parser/testdata/03546_merge_tree_projection_shared_snapshot/ast.json b/parser/testdata/03546_merge_tree_projection_shared_snapshot/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03546_merge_tree_projection_shared_snapshot/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03546_merge_tree_projection_shared_snapshot/metadata.json b/parser/testdata/03546_merge_tree_projection_shared_snapshot/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03546_merge_tree_projection_shared_snapshot/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03546_merge_tree_projection_shared_snapshot/query.sql b/parser/testdata/03546_merge_tree_projection_shared_snapshot/query.sql new file mode 100644 index 000000000..eb1e46a31 --- /dev/null +++ b/parser/testdata/03546_merge_tree_projection_shared_snapshot/query.sql @@ -0,0 +1,26 @@ +-- { echo ON } + +DROP TABLE IF EXISTS test; + +CREATE TABLE test +( + `a` Int32, + `b` Int32, + PROJECTION p + ( + SELECT + a, + b, + _part_offset + ORDER BY b + ) +) +ENGINE = MergeTree +ORDER BY () +SETTINGS index_granularity_bytes = 10485760, index_granularity = 8192; + +INSERT INTO test VALUES (1, 1); + +SELECT sum(l._part_offset = r._parent_part_offset) FROM test l JOIN mergeTreeProjection(currentDatabase(), test, p) r USING (a) SETTINGS enable_analyzer = 1, enable_shared_storage_snapshot_in_query = 1; + +DROP TABLE test; diff --git a/parser/testdata/03546_multiple_join_use_nulls_matcher/ast.json b/parser/testdata/03546_multiple_join_use_nulls_matcher/ast.json new file mode 100644 index 000000000..2b49ab18b --- /dev/null +++ b/parser/testdata/03546_multiple_join_use_nulls_matcher/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tableA (children 1)" + }, + { + "explain": " Identifier tableA" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001214934, + "rows_read": 2, + "bytes_read": 64 + } +} diff --git a/parser/testdata/03546_multiple_join_use_nulls_matcher/metadata.json b/parser/testdata/03546_multiple_join_use_nulls_matcher/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03546_multiple_join_use_nulls_matcher/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03546_multiple_join_use_nulls_matcher/query.sql b/parser/testdata/03546_multiple_join_use_nulls_matcher/query.sql new file mode 100644 index 000000000..6f57fd24f --- /dev/null +++ b/parser/testdata/03546_multiple_join_use_nulls_matcher/query.sql @@ -0,0 +1,52 @@ +DROP TABLE IF EXISTS tableA; +DROP TABLE IF EXISTS tableB; +DROP TABLE IF EXISTS tableC; + +CREATE TABLE tableA ( key String ) ENGINE = MergeTree() ORDER BY tuple(); +CREATE TABLE tableB ( key String, value2 Int32 ) ENGINE = MergeTree() ORDER BY tuple(); +CREATE TABLE tableC ( key String ) ENGINE = MergeTree() ORDER BY tuple(); + +INSERT INTO tableA VALUES ('a'), ('b'), ('c'), ('d'), ('e'), ('f'); +INSERT INTO tableB VALUES ('c', 3), ('d', 4), ('e', 5), ('f', 6), ('g', 7), ('h', 8); +INSERT INTO tableC VALUES ('d'), ('e'), ('f'), ('g'), ('h'), ('i'), ('j'); + +SET enable_analyzer = 1; + +SET join_use_nulls = 1; + +SELECT value2 = 1 as x, toTypeName(x) +FROM ( + SELECT * + FROM tableA + LEFT JOIN tableB ON tableB.key = tableA.key + LEFT JOIN tableC AS t ON tableB.key = t.key +) ORDER BY 1; + +SELECT value2 = 1 as x, toTypeName(x) +FROM ( + SELECT tableB.* + FROM tableA + LEFT JOIN tableB ON tableB.key = tableA.key + LEFT JOIN tableC AS t ON tableB.key = t.key +) ORDER BY 1; + +SELECT value2 = 1 as x, toTypeName(x) +FROM ( + SELECT tableB.* + FROM tableA + INNER JOIN tableB ON tableB.key = tableA.key + LEFT JOIN tableC AS t ON tableB.key = t.key +) ORDER BY 1; + + +SELECT value2 = 1 as x, toTypeName(x) +FROM ( + SELECT tableB.* + FROM tableA + INNER JOIN tableB ON tableB.key = tableA.key + RIGHT JOIN tableC AS t ON tableB.key = t.key +) ORDER BY 1; + +DROP TABLE IF EXISTS tableA; +DROP TABLE IF EXISTS tableB; +DROP TABLE IF EXISTS tableC; diff --git a/parser/testdata/03546_paimon_all_supported_type/ast.json b/parser/testdata/03546_paimon_all_supported_type/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03546_paimon_all_supported_type/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03546_paimon_all_supported_type/metadata.json b/parser/testdata/03546_paimon_all_supported_type/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03546_paimon_all_supported_type/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03546_paimon_all_supported_type/query.sql b/parser/testdata/03546_paimon_all_supported_type/query.sql new file mode 100644 index 000000000..92137ea79 --- /dev/null +++ b/parser/testdata/03546_paimon_all_supported_type/query.sql @@ -0,0 +1,56 @@ +-- Tags: no-fasttest +-- Tag no-fasttest: Depends on AWS +set enable_time_time64_type=1, session_timezone='UTC'; + +desc paimonS3(s3_conn, filename='paimon_all_types'); + +select '==='; + +SELECT f_boolean, +f_char, +f_varchar, +f_string, +f_binary, +f_varbinary, +f_bytes, +f_decimal, +f_decimal2, +f_decimal3, +f_tinyint, +f_smallint, +f_int, +f_bigint, +f_float, +f_double, +f_date, +f_time, +f_timestamp, +f_timestamp2, +toTimeZone(f_timestamp3, 'Asia/Shanghai'), +f_boolean_nn, +f_char_nn, +f_varchar_nn, +f_string_nn, +f_binary_nn, +f_varbinary_nn, +f_bytes_nn, +f_decimal_nn, +f_decimal2_nn, +f_decimal3_nn, +f_tinyint_nn, +f_smallint_nn, +f_int_nn, +f_bigint_nn, +f_float_nn, +f_double_nn, +f_date_nn, +f_time_nn, +f_timestamp_nn, +f_timestamp2_nn, +toTimeZone(f_timestamp3_nn, 'Asia/Shanghai'), +f_array, +f_map +FROM paimonS3(s3_conn, filename='paimon_all_types') ORDER BY f_int_nn; + +select '==='; +SELECT count(1) FROM paimonS3(s3_conn, filename='paimon_all_types'); diff --git a/parser/testdata/03546_part_granule_offset/ast.json b/parser/testdata/03546_part_granule_offset/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03546_part_granule_offset/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03546_part_granule_offset/metadata.json b/parser/testdata/03546_part_granule_offset/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03546_part_granule_offset/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03546_part_granule_offset/query.sql b/parser/testdata/03546_part_granule_offset/query.sql new file mode 100644 index 000000000..4ec838207 --- /dev/null +++ b/parser/testdata/03546_part_granule_offset/query.sql @@ -0,0 +1,17 @@ +-- { echo ON } + +DROP TABLE IF EXISTS test_part_granule_offset; + +CREATE TABLE test_part_granule_offset (n UInt64) ENGINE = MergeTree ORDER BY () SETTINGS index_granularity = 2; + +INSERT INTO test_part_granule_offset SELECT number FROM numbers(101); + +OPTIMIZE TABLE test_part_granule_offset FINAL; + +SELECT _part_granule_offset FROM test_part_granule_offset WHERE n < 10 ORDER BY all; + +SELECT _part_granule_offset, groupArraySorted(200)(n) FROM test_part_granule_offset GROUP BY _part_granule_offset ORDER BY ALL; + +SELECT * FROM test_part_granule_offset WHERE _part_granule_offset % 10 = 1 ORDER BY ALL; + +DROP TABLE test_part_granule_offset; diff --git a/parser/testdata/03547_analyzer_correlated_columns_check_bug/ast.json b/parser/testdata/03547_analyzer_correlated_columns_check_bug/ast.json new file mode 100644 index 000000000..6f1c38e1f --- /dev/null +++ b/parser/testdata/03547_analyzer_correlated_columns_check_bug/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery users (children 3)" + }, + { + "explain": " Identifier users" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " ColumnDeclaration uid (children 1)" + }, + { + "explain": " DataType Int16" + }, + { + "explain": " ColumnDeclaration name (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " ColumnDeclaration age (children 1)" + }, + { + "explain": " DataType Int16" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function Memory" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.00114362, + "rows_read": 12, + "bytes_read": 420 + } +} diff --git a/parser/testdata/03547_analyzer_correlated_columns_check_bug/metadata.json b/parser/testdata/03547_analyzer_correlated_columns_check_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03547_analyzer_correlated_columns_check_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03547_analyzer_correlated_columns_check_bug/query.sql b/parser/testdata/03547_analyzer_correlated_columns_check_bug/query.sql new file mode 100644 index 000000000..8360930a0 --- /dev/null +++ b/parser/testdata/03547_analyzer_correlated_columns_check_bug/query.sql @@ -0,0 +1,9 @@ +CREATE TABLE users (uid Int16, name String, age Int16) ENGINE=Memory; + +INSERT INTO users VALUES (1231, 'John', 33); +INSERT INTO users VALUES (6666, 'Ksenia', 48); +INSERT INTO users VALUES (8888, 'Alice', 50); + +CREATE ROW POLICY a ON users FOR SELECT USING arrayExists(a -> a = age, [uid]) TO ALL; + +SELECT * FROM users; diff --git a/parser/testdata/03547_analyzer_correlated_subqueries/ast.json b/parser/testdata/03547_analyzer_correlated_subqueries/ast.json new file mode 100644 index 000000000..cac17a721 --- /dev/null +++ b/parser/testdata/03547_analyzer_correlated_subqueries/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001409385, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03547_analyzer_correlated_subqueries/metadata.json b/parser/testdata/03547_analyzer_correlated_subqueries/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03547_analyzer_correlated_subqueries/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03547_analyzer_correlated_subqueries/query.sql b/parser/testdata/03547_analyzer_correlated_subqueries/query.sql new file mode 100644 index 000000000..cb290d851 --- /dev/null +++ b/parser/testdata/03547_analyzer_correlated_subqueries/query.sql @@ -0,0 +1,21 @@ +SET enable_analyzer = 1; +SET allow_experimental_correlated_subqueries = 1; + +SELECT number +FROM numbers(10) AS t +WHERE exists(( + SELECT * + FROM + ( + SELECT number * 2 AS number + FROM + ( + SELECT number + FROM numbers(6) + WHERE (number + 2) < t.number + ) + ) + WHERE number = t.number +)) +ORDER BY number +SETTINGS query_plan_merge_filter_into_join_condition = 0; diff --git a/parser/testdata/03547_equals_optimizer_lowcardinality/ast.json b/parser/testdata/03547_equals_optimizer_lowcardinality/ast.json new file mode 100644 index 000000000..372eeb341 --- /dev/null +++ b/parser/testdata/03547_equals_optimizer_lowcardinality/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001061763, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/03547_equals_optimizer_lowcardinality/metadata.json b/parser/testdata/03547_equals_optimizer_lowcardinality/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03547_equals_optimizer_lowcardinality/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03547_equals_optimizer_lowcardinality/query.sql b/parser/testdata/03547_equals_optimizer_lowcardinality/query.sql new file mode 100644 index 000000000..784284fc1 --- /dev/null +++ b/parser/testdata/03547_equals_optimizer_lowcardinality/query.sql @@ -0,0 +1,41 @@ +DROP TABLE IF EXISTS test; + +CREATE TABLE test (d1 Dynamic(max_types=2), d2 Dynamic(max_types=2)) ENGINE = Memory; + +INSERT INTO test VALUES (42, 42), (42, 43), (43, 42), ('abc', 'abc'), ('abc', 'abd'), ('abd', 'abc'), +([1,2,3], [1,2,3]), ([1,2,3], [1,2,4]), ([1,2,4], [1,2,3]), +('2020-01-01', '2020-01-01'), ('2020-01-01', '2020-01-02'), ('2020-01-02', '2020-01-01'), +(NULL, NULL), (42, 'abc'), ('abc', 42), (42, [1,2,3]), ([1,2,3], 42), (42, NULL), (NULL, 42), +('abc', [1,2,3]), ([1,2,3], 'abc'), ('abc', NULL), (NULL, 'abc'), ([1,2,3], NULL), (NULL, [1,2,3]), +(42, '2020-01-01'), ('2020-01-01', 42), ('2020-01-01', 'abc'), ('abc', '2020-01-01'), +('2020-01-01', [1,2,3]), ([1,2,3], '2020-01-01'), ('2020-01-01', NULL), (NULL, '2020-01-01'); + +SELECT + dynamicType(d2) +FROM test +GROUP BY + dynamicType(d2) +HAVING + 0 + OR ( + ( + materialize(toLowCardinality(0)) = 0 + ) = anyLast(1) + ) +ORDER BY 1; + +SELECT + dynamicType(d2) +FROM test +GROUP BY + dynamicType(d2) +HAVING + 0 + OR ( + ( + materialize(toLowCardinality(0)) = 0 + ) = anyLast(0) + ) +ORDER BY 1; + +DROP TABLE IF EXISTS test; diff --git a/parser/testdata/03547_reinterpret_to_array/ast.json b/parser/testdata/03547_reinterpret_to_array/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03547_reinterpret_to_array/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03547_reinterpret_to_array/metadata.json b/parser/testdata/03547_reinterpret_to_array/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03547_reinterpret_to_array/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03547_reinterpret_to_array/query.sql b/parser/testdata/03547_reinterpret_to_array/query.sql new file mode 100644 index 000000000..347e91a2b --- /dev/null +++ b/parser/testdata/03547_reinterpret_to_array/query.sql @@ -0,0 +1,49 @@ +-- Tests for reinterpret(<source string>, <array_of_fixed_type>) + +SELECT 'Verify correct destination type is instantiated'; +SELECT toTypeName(reinterpret(x'01010101', 'Array(Int32)')); +SELECT toTypeName(reinterpret(x'01010101', 'Array(UInt32)')); +SELECT toTypeName(reinterpret(x'01010101', 'Array(Float32)')); +SELECT toTypeName(reinterpret(x'0101010101010101', 'Array(Int64)')); +SELECT toTypeName(reinterpret(x'0101010101010101', 'Array(UInt64)')); +SELECT toTypeName(reinterpret(x'0101010101010101', 'Array(Float64)')); +SELECT toTypeName(reinterpret(x'0101010101010101', 'Array(FixedString(4))')); +SELECT toTypeName(reinterpret(x'0101010101010101', 'Array(Decimal)')); +SELECT toTypeName(reinterpret(x'0101010101010101', 'Array(Decimal64(4))')); + +SELECT 'Unsupported destination type'; +SELECT toTypeName(reinterpret(x'0101010101010101', 'Array(String)')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT toTypeName(reinterpret(x'0101010101010101', 'Array(Array(Int32))')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT 'Verify few output values are correct'; + +SELECT reinterpret(x'01010101', 'Int32'); +SELECT reinterpret(x'01010101', 'Array(Int32)'); +SELECT reinterpret(x'0101010102020202', 'Array(Int32)'); + +SELECT reinterpret(x'3108b440', 'Float32'); +SELECT reinterpret(x'3108d440', 'Float32'); +SELECT reinterpret(x'3108b4403108d4403108b4403108d440', 'Array(Float32)'); +SELECT reinterpret(repeat(x'3108b4403108d4403108b4403108d440', 10), 'Array(Float32)'); +SELECT length(reinterpret(repeat(x'3108b4403108d4403108b4403108d440', 10), 'Array(Float32)')); + +SELECT reinterpret('abab', 'Array(FixedString(4))'); +SELECT length(reinterpret(repeat('abab', 100), 'Array(FixedString(4))')) = 100; + +SELECT 'Input data of wrong length should throw error'; +SELECT reinterpret('ababc', 'Array(FixedString(4))'); -- { serverError BAD_ARGUMENTS } +SELECT reinterpret(x'3108d4', 'Array(Float32)'); -- { serverError BAD_ARGUMENTS } +SELECT reinterpret(concat(repeat(x'3108b4403108d4403108b4403108d440', 10), x'aa'), 'Array(Float32)'); -- { serverError BAD_ARGUMENTS } + +SELECT 'Wrong source type should throw error'; +SELECT reinterpret(95, 'Array(FixedString(4))'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT 'Couple of tests with FixedString input'; +DROP TABLE IF EXISTS tab1; +CREATE TABLE tab1 (id Int32, s FixedString(8)) Engine = Memory; +INSERT INTO tab1 VALUES (1, x'3108b4403108d440'); +INSERT INTO tab1 VALUES (2, x'3108d4403108b440'); + +SELECT reinterpret(s, 'Array(Float32)') FROM tab1 ORDER BY id; + +DROP TABLE tab1; diff --git a/parser/testdata/03548_analyzer_indentifier_resolution_invariant/ast.json b/parser/testdata/03548_analyzer_indentifier_resolution_invariant/ast.json new file mode 100644 index 000000000..09408a26f --- /dev/null +++ b/parser/testdata/03548_analyzer_indentifier_resolution_invariant/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001145156, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03548_analyzer_indentifier_resolution_invariant/metadata.json b/parser/testdata/03548_analyzer_indentifier_resolution_invariant/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03548_analyzer_indentifier_resolution_invariant/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03548_analyzer_indentifier_resolution_invariant/query.sql b/parser/testdata/03548_analyzer_indentifier_resolution_invariant/query.sql new file mode 100644 index 000000000..f63ec1f5b --- /dev/null +++ b/parser/testdata/03548_analyzer_indentifier_resolution_invariant/query.sql @@ -0,0 +1,4 @@ +SET allow_experimental_analyzer = 1; + +CREATE TABLE t0 (c0 Int) ENGINE = Memory; +CREATE VIEW v0 AS (SELECT 1 AS a0, (1) IN a0 FROM t0 tx JOIN t0 ty ON 1 CROSS JOIN t0 tz); -- { serverError UNKNOWN_IDENTIFIER } diff --git a/parser/testdata/03548_array_group_last_serialization/ast.json b/parser/testdata/03548_array_group_last_serialization/ast.json new file mode 100644 index 000000000..b833ad598 --- /dev/null +++ b/parser/testdata/03548_array_group_last_serialization/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery mv0 (children 1)" + }, + { + "explain": " Identifier mv0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001391209, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/03548_array_group_last_serialization/metadata.json b/parser/testdata/03548_array_group_last_serialization/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03548_array_group_last_serialization/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03548_array_group_last_serialization/query.sql b/parser/testdata/03548_array_group_last_serialization/query.sql new file mode 100644 index 000000000..d10c61957 --- /dev/null +++ b/parser/testdata/03548_array_group_last_serialization/query.sql @@ -0,0 +1,45 @@ +DROP VIEW IF EXISTS mv0; +DROP TABLE IF EXISTS t0; +DROP TABLE IF EXISTS t1; + +CREATE TABLE t0 +( + k0 String, + v0 Nullable(String) +) +ENGINE = MergeTree() +ORDER BY (k0); + +CREATE TABLE t1 +( + k0 String, + v00 AggregateFunction(groupArrayLast(1), Nullable(String)) +) +ENGINE = MergeTree +ORDER BY (k0); + +CREATE MATERIALIZED VIEW mv0 TO t1 +( + k0 String, + v0 AggregateFunction(groupArrayLast(1), Nullable(String)) +) +AS SELECT + k0, + groupArrayLastState(1)(v0) AS v00 +FROM t0 +GROUP BY k0; + +INSERT INTO t0 SELECT * FROM generateRandom(2, 9) LIMIT 9; + +SELECT v00 FROM t1 ORDER BY k0 FORMAT JSONEachRow; + +DROP VIEW mv0; +DROP TABLE t0; +DROP TABLE t1; + +DROP TABLE IF EXISTS t_memory; +CREATE TABLE t_memory (k Int, v AggregateFunction(groupArrayLast(1), Nullable(String))) ENGINE = Memory; +INSERT INTO t_memory SELECT 1, groupArrayLastState(1)(*) FROM values('v Nullable(String)', null); +INSERT INTO t_memory SELECT 2, groupArrayLastState(1)(*) FROM values('v Nullable(String)', 'foo'); +SELECT v FROM t_memory ORDER BY k FORMAT JSONEachRow; +DROP TABLE t_memory; diff --git a/parser/testdata/03548_optimize_syntax_fuse_functions_clash/ast.json b/parser/testdata/03548_optimize_syntax_fuse_functions_clash/ast.json new file mode 100644 index 000000000..137ceaa8b --- /dev/null +++ b/parser/testdata/03548_optimize_syntax_fuse_functions_clash/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00111808, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03548_optimize_syntax_fuse_functions_clash/metadata.json b/parser/testdata/03548_optimize_syntax_fuse_functions_clash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03548_optimize_syntax_fuse_functions_clash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03548_optimize_syntax_fuse_functions_clash/query.sql b/parser/testdata/03548_optimize_syntax_fuse_functions_clash/query.sql new file mode 100644 index 000000000..85d4497b0 --- /dev/null +++ b/parser/testdata/03548_optimize_syntax_fuse_functions_clash/query.sql @@ -0,0 +1,11 @@ +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (number UInt64) ENGINE = MergeTree() ORDER BY number AS SELECT * FROM numbers(2); + +SELECT sum(number) +FROM t1 +PREWHERE number IN ( SELECT sum(number) FROM t1 GROUP BY number ) + WHERE number IN ( SELECT sum(number) FROM t1 GROUP BY number ) +GROUP BY number +ORDER BY number +SETTINGS optimize_syntax_fuse_functions = 1 +; diff --git a/parser/testdata/03549_aggregate_arithmetic_logical_error/ast.json b/parser/testdata/03549_aggregate_arithmetic_logical_error/ast.json new file mode 100644 index 000000000..2da121831 --- /dev/null +++ b/parser/testdata/03549_aggregate_arithmetic_logical_error/ast.json @@ -0,0 +1,70 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function sumMerge (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function multiply (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function initializeAggregation (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'sumState'" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '1.1.1.1'" + }, + { + "explain": " Literal 'IPv4'" + } + ], + + "rows": 16, + + "statistics": + { + "elapsed": 0.00113794, + "rows_read": 16, + "bytes_read": 643 + } +} diff --git a/parser/testdata/03549_aggregate_arithmetic_logical_error/metadata.json b/parser/testdata/03549_aggregate_arithmetic_logical_error/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03549_aggregate_arithmetic_logical_error/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03549_aggregate_arithmetic_logical_error/query.sql b/parser/testdata/03549_aggregate_arithmetic_logical_error/query.sql new file mode 100644 index 000000000..3ece5aec5 --- /dev/null +++ b/parser/testdata/03549_aggregate_arithmetic_logical_error/query.sql @@ -0,0 +1,6 @@ +SELECT sumMerge(initializeAggregation('sumState', 1) * CAST('1.1.1.1', 'IPv4')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +DROP TABLE IF EXISTS t; +CREATE TABLE t (a IPv4, b BFloat16) ENGINE = Memory; +SELECT sumMerge(y * a) FROM (SELECT a, sumState(b) AS y FROM t GROUP BY a); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +DROP TABLE t; diff --git a/parser/testdata/03549_analyzer_fix_filter_removal/ast.json b/parser/testdata/03549_analyzer_fix_filter_removal/ast.json new file mode 100644 index 000000000..09464e259 --- /dev/null +++ b/parser/testdata/03549_analyzer_fix_filter_removal/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001033064, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03549_analyzer_fix_filter_removal/metadata.json b/parser/testdata/03549_analyzer_fix_filter_removal/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03549_analyzer_fix_filter_removal/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03549_analyzer_fix_filter_removal/query.sql b/parser/testdata/03549_analyzer_fix_filter_removal/query.sql new file mode 100644 index 000000000..c114c7eed --- /dev/null +++ b/parser/testdata/03549_analyzer_fix_filter_removal/query.sql @@ -0,0 +1,8 @@ +SET allow_experimental_analyzer = 1; + +CREATE TABLE m (`key` UInt32) ENGINE = Merge(currentDatabase(), 'a'); +CREATE TABLE b (`key` UInt32, `ID` UInt32) ENGINE = MergeTree ORDER BY key; + +CREATE TABLE a1 (`day` Date, `id` UInt32) ENGINE = Distributed('test_cluster_two_shards', currentDatabase(), a1_replicated, id); + +SELECT * FROM m INNER JOIN b USING (key) WHERE ID = 1; -- { serverError UNKNOWN_TABLE, ALL_CONNECTION_TRIES_FAILED } diff --git a/parser/testdata/03549_conv_function/ast.json b/parser/testdata/03549_conv_function/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03549_conv_function/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03549_conv_function/metadata.json b/parser/testdata/03549_conv_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03549_conv_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03549_conv_function/query.sql b/parser/testdata/03549_conv_function/query.sql new file mode 100644 index 000000000..844633e12 --- /dev/null +++ b/parser/testdata/03549_conv_function/query.sql @@ -0,0 +1,59 @@ +-- Basic functionality tests +SELECT conv('10', 10, 2); -- 1010 +SELECT conv('255', 10, 16); -- FF +SELECT conv('FF', 16, 10); -- 255 +SELECT conv('1010', 2, 8); -- 12 +SELECT conv('12', 8, 10); -- 10 + +-- Case insensitive hex +SELECT conv('ff', 16, 10); -- 255 +SELECT conv('AbC', 16, 10); -- 2748 + +-- Negative numbers +SELECT conv('-10', 10, 2); -- 1111111111111111111111111111111111111111111111111111111111110110 +SELECT conv('-255', 10, 16); -- FFFFFFFFFFFFFF01 +SELECT conv('-1', 10, 16); -- FFFFFFFFFFFFFFFF + +-- Edge cases with whitespace +SELECT conv(' 123 ', 10, 16); -- 7B +SELECT conv(' -456 ', 10, 8); -- 1777777777777777777070 + +-- Zero and empty cases +SELECT conv('0', 10, 2); -- 0 +SELECT conv('0', 16, 10); -- 0 + +-- Invalid characters (should stop at first invalid) +SELECT conv('123XYZ', 10, 16); -- 7B +SELECT conv('FF99GG', 16, 10); -- 65433 +SELECT conv('1012', 2, 10); -- 5 + +-- Boundary bases +SELECT conv('10', 2, 36); -- 2 +SELECT conv('ZZ', 36, 10); -- 1295 +SELECT conv('10', 36, 2); -- 100100 + +-- Large numbers (test overflow handling) +SELECT conv('18446744073709551615', 10, 16); -- Max UInt64 +SELECT conv('FFFFFFFFFFFFFFFF', 16, 10); -- Max UInt64 in hex -- 18446744073709551615 +SELECT conv('999999999999999999999', 10, 16); -- Overflow case -- FFFFFFFFFFFFFFFF + +-- Only whitespace test +SELECT conv(' ', 16, 10); -- 0 + +-- Numeric input types +SELECT conv(255, 10, 16); -- FF +SELECT conv(1010, 2, 10); -- 10 +SELECT conv(-123, 10, 16); -- FFFFFFFFFFFFFF85 + +-- Test with different column types +SELECT conv(toString(number), 10, 16) FROM system.numbers LIMIT 5; -- 0 1 2 3 4 +SELECT conv(number, 10, 2) FROM system.numbers WHERE number < 8; + +-- Const column optimization test +SELECT conv('FF', 16, 10) FROM system.numbers LIMIT 3; -- 255 255 255 + + + +-- Mixed scenarios +SELECT conv(toString(number), 10, 36), conv(toString(number), 10, 2) +FROM system.numbers WHERE number BETWEEN 10 AND 15; diff --git a/parser/testdata/03549_keeper_map_column_comments/ast.json b/parser/testdata/03549_keeper_map_column_comments/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03549_keeper_map_column_comments/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03549_keeper_map_column_comments/metadata.json b/parser/testdata/03549_keeper_map_column_comments/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03549_keeper_map_column_comments/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03549_keeper_map_column_comments/query.sql b/parser/testdata/03549_keeper_map_column_comments/query.sql new file mode 100644 index 000000000..2d17d1977 --- /dev/null +++ b/parser/testdata/03549_keeper_map_column_comments/query.sql @@ -0,0 +1,9 @@ +-- Tags: no-ordinary-database, no-fasttest + +DROP TABLE IF EXISTS 03549_test SYNC; + +CREATE TABLE 03549_test (key UInt64, value UInt64 COMMENT 'value') Engine=KeeperMap('/' || currentDatabase() || '/test3549') PRIMARY KEY(key); +CREATE TABLE 03549_test_another (key UInt64 COMMENT 'key', value UInt64) Engine=KeeperMap('/' || currentDatabase() || '/test3549') PRIMARY KEY(key); + +INSERT INTO 03549_test VALUES (1, 11); +SELECT * FROM 03549_test_another ORDER BY key; diff --git a/parser/testdata/03549_system_dimensional_metrics/ast.json b/parser/testdata/03549_system_dimensional_metrics/ast.json new file mode 100644 index 000000000..5e54cf070 --- /dev/null +++ b/parser/testdata/03549_system_dimensional_metrics/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function greater (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function count (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Literal UInt64_0" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.dimensional_metrics" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.000984603, + "rows_read": 13, + "bytes_read": 521 + } +} diff --git a/parser/testdata/03549_system_dimensional_metrics/metadata.json b/parser/testdata/03549_system_dimensional_metrics/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03549_system_dimensional_metrics/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03549_system_dimensional_metrics/query.sql b/parser/testdata/03549_system_dimensional_metrics/query.sql new file mode 100644 index 000000000..4509c3452 --- /dev/null +++ b/parser/testdata/03549_system_dimensional_metrics/query.sql @@ -0,0 +1 @@ +SELECT count() > 0 FROM system.dimensional_metrics diff --git a/parser/testdata/03549_window_collation/ast.json b/parser/testdata/03549_window_collation/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03549_window_collation/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03549_window_collation/metadata.json b/parser/testdata/03549_window_collation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03549_window_collation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03549_window_collation/query.sql b/parser/testdata/03549_window_collation/query.sql new file mode 100644 index 000000000..6567d4ca9 --- /dev/null +++ b/parser/testdata/03549_window_collation/query.sql @@ -0,0 +1,9 @@ +-- Tags: no-fasttest +-- no-fasttest - collations support is disabled for fasttest build + +set enable_analyzer = 1; + +SELECT rank() OVER (ORDER BY c0) FROM (SELECT '1' c0) v0 QUALIFY rank() OVER (ORDER BY c0 COLLATE 'vi') > 0; +SELECT rank() OVER (ORDER BY c0 COLLATE 'vi') FROM (SELECT '1' c0) v0 QUALIFY rank() OVER (ORDER BY c0 COLLATE 'vi') > 0; +SELECT rank() OVER (ORDER BY c0) FROM (SELECT '1' c0) v0 QUALIFY rank() OVER (ORDER BY c0) > 0; +SELECT rank() OVER (ORDER BY c0 DESC COLLATE 'vi') FROM (SELECT '1' c0) v0 QUALIFY rank() OVER (ORDER BY c0 ASC COLLATE 'vi') > 0; diff --git a/parser/testdata/03549_wkb_function/ast.json b/parser/testdata/03549_wkb_function/ast.json new file mode 100644 index 000000000..9dfc5d407 --- /dev/null +++ b/parser/testdata/03549_wkb_function/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery geom1 (children 1)" + }, + { + "explain": " Identifier geom1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001219437, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/03549_wkb_function/metadata.json b/parser/testdata/03549_wkb_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03549_wkb_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03549_wkb_function/query.sql b/parser/testdata/03549_wkb_function/query.sql new file mode 100644 index 000000000..771310b41 --- /dev/null +++ b/parser/testdata/03549_wkb_function/query.sql @@ -0,0 +1,31 @@ +DROP TABLE IF EXISTS geom1; +CREATE TABLE IF NOT EXISTS geom1 (a Point) ENGINE = Memory(); +INSERT INTO geom1 VALUES((0, 0)); +INSERT INTO geom1 VALUES((0, 20)); +INSERT INTO geom1 VALUES((10, 20)); +SELECT hex(wkb(a)) FROM geom1 ORDER BY ALL; + +DROP TABLE IF EXISTS geom2; +CREATE TABLE IF NOT EXISTS geom2 (a LineString) ENGINE = Memory(); +INSERT INTO geom2 VALUES([]); +INSERT INTO geom2 VALUES([(0, 0), (10, 0), (10, 10), (0, 10)]); +SELECT hex(wkb(a)) FROM geom2 ORDER BY ALL; + +DROP TABLE IF EXISTS geom3; +CREATE TABLE IF NOT EXISTS geom3 (a Polygon) ENGINE = Memory(); +INSERT INTO geom3 VALUES([]); +INSERT INTO geom3 VALUES([[(20, 20), (50, 20), (50, 50), (20, 50)], [(30, 30), (50, 50), (50, 30)]]); +SELECT hex(wkb(a)) FROM geom3 ORDER BY ALL; + +DROP TABLE IF EXISTS geom4; +CREATE TABLE IF NOT EXISTS geom4 (a MultiLineString) ENGINE = Memory(); +INSERT INTO geom4 VALUES([]); +INSERT INTO geom4 VALUES([[(0, 0), (10, 0), (10, 10), (0, 10)], [(1, 1), (2, 2), (3, 3)]]); +SELECT hex(wkb(a)) FROM geom4 ORDER BY ALL; + +DROP TABLE IF EXISTS geom5; +CREATE TABLE IF NOT EXISTS geom5 (a MultiPolygon) ENGINE = Memory(); +INSERT INTO geom5 VALUES([]); +INSERT INTO geom5 VALUES([[[(0, 0), (10, 0), (10, 10), (0, 10)]], [[(20, 20), (50, 20), (50, 50), (20, 50)],[(30, 30), (50, 50), (50, 30)]]]); +SELECT hex(wkb(a)) FROM geom5 ORDER BY ALL; + diff --git a/parser/testdata/03550_analyzer_remote_view_columns/ast.json b/parser/testdata/03550_analyzer_remote_view_columns/ast.json new file mode 100644 index 000000000..79db2d212 --- /dev/null +++ b/parser/testdata/03550_analyzer_remote_view_columns/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001296507, + "rows_read": 2, + "bytes_read": 61 + } +} diff --git a/parser/testdata/03550_analyzer_remote_view_columns/metadata.json b/parser/testdata/03550_analyzer_remote_view_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03550_analyzer_remote_view_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03550_analyzer_remote_view_columns/query.sql b/parser/testdata/03550_analyzer_remote_view_columns/query.sql new file mode 100644 index 000000000..8489551a5 --- /dev/null +++ b/parser/testdata/03550_analyzer_remote_view_columns/query.sql @@ -0,0 +1,42 @@ +CREATE TABLE test +( + `i1` Int64, + `i2` Int64, + `i3` Int64, + `i4` Int64, + `i5` Int64, + `i6` Int64, + `i7` Int64, + `i8` Int64, + `i9` Int64, + `i10` Int64 +) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS index_granularity = 8192; + +CREATE VIEW test_view +AS SELECT * +FROM test; + +SET prefer_localhost_replica = 0; +SET serialize_query_plan = 0; + +SELECT max(i1) +FROM remote('localhost', currentDatabase(), test_view) +SETTINGS log_comment = 'THIS IS A COMMENT TO MARK THE INITIAL QUERY'; + +SYSTEM FLUSH LOGS query_log; + +SELECT columns +FROM system.query_log +WHERE + initial_query_id = ( + SELECT query_id + FROM system.query_log + WHERE + current_database = currentDatabase() + AND log_comment = 'THIS IS A COMMENT TO MARK THE INITIAL QUERY' + LIMIT 1) + AND type = 'QueryFinish' + AND NOT is_initial_query; diff --git a/parser/testdata/03550_projection_with_part_offset_ttl/ast.json b/parser/testdata/03550_projection_with_part_offset_ttl/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03550_projection_with_part_offset_ttl/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03550_projection_with_part_offset_ttl/metadata.json b/parser/testdata/03550_projection_with_part_offset_ttl/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03550_projection_with_part_offset_ttl/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03550_projection_with_part_offset_ttl/query.sql b/parser/testdata/03550_projection_with_part_offset_ttl/query.sql new file mode 100644 index 000000000..89872a28e --- /dev/null +++ b/parser/testdata/03550_projection_with_part_offset_ttl/query.sql @@ -0,0 +1,23 @@ +-- { echo ON } + +DROP TABLE IF EXISTS test; + +CREATE TABLE test +( + `order` int, + `indexed` int, + PROJECTION proj + ( + SELECT _part_offset + ORDER BY indexed + ) +) +ENGINE = MergeTree +ORDER BY order +TTL if(order = 1, '1970-01-02T00:00:00'::DateTime, '2030-01-01T00:00:00'::DateTime); + +INSERT INTO test SELECT 1, 10; + +OPTIMIZE TABLE test final; + +DROP TABLE test; diff --git a/parser/testdata/03550_s3queue_no_settings/ast.json b/parser/testdata/03550_s3queue_no_settings/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03550_s3queue_no_settings/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03550_s3queue_no_settings/metadata.json b/parser/testdata/03550_s3queue_no_settings/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03550_s3queue_no_settings/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03550_s3queue_no_settings/query.sql b/parser/testdata/03550_s3queue_no_settings/query.sql new file mode 100644 index 000000000..a4163d1dd --- /dev/null +++ b/parser/testdata/03550_s3queue_no_settings/query.sql @@ -0,0 +1,3 @@ +-- Tags: no-fasttest + +CREATE TABLE s3_queue (name String, value UInt32) ENGINE = S3Queue('http://localhost:11111/test/{a,b,c}.tsv'); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/03550_union_intersect_except_default_mode_rewrite_exception_safety/ast.json b/parser/testdata/03550_union_intersect_except_default_mode_rewrite_exception_safety/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03550_union_intersect_except_default_mode_rewrite_exception_safety/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03550_union_intersect_except_default_mode_rewrite_exception_safety/metadata.json b/parser/testdata/03550_union_intersect_except_default_mode_rewrite_exception_safety/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03550_union_intersect_except_default_mode_rewrite_exception_safety/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03550_union_intersect_except_default_mode_rewrite_exception_safety/query.sql b/parser/testdata/03550_union_intersect_except_default_mode_rewrite_exception_safety/query.sql new file mode 100644 index 000000000..e638e0314 --- /dev/null +++ b/parser/testdata/03550_union_intersect_except_default_mode_rewrite_exception_safety/query.sql @@ -0,0 +1 @@ +(SELECT 1 EXCEPT SELECT 1) SETTINGS except_default_mode = ''; -- { serverError EXPECTED_ALL_OR_DISTINCT } diff --git a/parser/testdata/03550_variant_extend_union/ast.json b/parser/testdata/03550_variant_extend_union/ast.json new file mode 100644 index 000000000..fcc456bd2 --- /dev/null +++ b/parser/testdata/03550_variant_extend_union/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001079469, + "rows_read": 5, + "bytes_read": 169 + } +} diff --git a/parser/testdata/03550_variant_extend_union/metadata.json b/parser/testdata/03550_variant_extend_union/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03550_variant_extend_union/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03550_variant_extend_union/query.sql b/parser/testdata/03550_variant_extend_union/query.sql new file mode 100644 index 000000000..c625e661c --- /dev/null +++ b/parser/testdata/03550_variant_extend_union/query.sql @@ -0,0 +1,20 @@ +SELECT * +FROM +( + ( + ( + SELECT '{"a":42}'::JSON::Variant(JSON, String)::Variant(JSON, String, Array(String)) + ) + EXCEPT ALL + ( + SELECT 'b'::Variant(JSON, String)::Variant(JSON, String, Array(String)) + ) + ) + UNION ALL + ( + ( + SELECT ['c']::Array(String)::Variant(JSON, String, Array(String)) + ) + ) +) +ORDER BY ALL SETTINGS allow_suspicious_types_in_order_by = 1; diff --git a/parser/testdata/03551_cast_decimal_to_float/ast.json b/parser/testdata/03551_cast_decimal_to_float/ast.json new file mode 100644 index 000000000..4d3ec6b53 --- /dev/null +++ b/parser/testdata/03551_cast_decimal_to_float/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '9'" + }, + { + "explain": " Literal 'Decimal(76, 38)'" + }, + { + "explain": " Literal 'Float64'" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001197785, + "rows_read": 11, + "bytes_read": 408 + } +} diff --git a/parser/testdata/03551_cast_decimal_to_float/metadata.json b/parser/testdata/03551_cast_decimal_to_float/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03551_cast_decimal_to_float/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03551_cast_decimal_to_float/query.sql b/parser/testdata/03551_cast_decimal_to_float/query.sql new file mode 100644 index 000000000..05b61e207 --- /dev/null +++ b/parser/testdata/03551_cast_decimal_to_float/query.sql @@ -0,0 +1,3 @@ +SELECT 9::Decimal(76, 38)::Float64; +SELECT 9::Decimal(76, 38)::Float32; +SELECT 9::Decimal(76, 38)::BFloat16; diff --git a/parser/testdata/03551_no_alter_for_columns_to_sum/ast.json b/parser/testdata/03551_no_alter_for_columns_to_sum/ast.json new file mode 100644 index 000000000..91a1b40f2 --- /dev/null +++ b/parser/testdata/03551_no_alter_for_columns_to_sum/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t0 (children 1)" + }, + { + "explain": " Identifier t0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001451101, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03551_no_alter_for_columns_to_sum/metadata.json b/parser/testdata/03551_no_alter_for_columns_to_sum/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03551_no_alter_for_columns_to_sum/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03551_no_alter_for_columns_to_sum/query.sql b/parser/testdata/03551_no_alter_for_columns_to_sum/query.sql new file mode 100644 index 000000000..14fbaab1e --- /dev/null +++ b/parser/testdata/03551_no_alter_for_columns_to_sum/query.sql @@ -0,0 +1,5 @@ +DROP TABLE IF EXISTS t0; +SET allow_suspicious_primary_key = 1; +CREATE TABLE t0 (c0 Int) ENGINE = SummingMergeTree((c0)) ORDER BY tuple(); +ALTER TABLE t0 RENAME COLUMN c0 TO c1; -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } +DROP TABLE t0; diff --git a/parser/testdata/03552_inconsistent_formatting_operator_as_table_function/ast.json b/parser/testdata/03552_inconsistent_formatting_operator_as_table_function/ast.json new file mode 100644 index 000000000..88126912f --- /dev/null +++ b/parser/testdata/03552_inconsistent_formatting_operator_as_table_function/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function globalIn (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'a'" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.000970761, + "rows_read": 12, + "bytes_read": 449 + } +} diff --git a/parser/testdata/03552_inconsistent_formatting_operator_as_table_function/metadata.json b/parser/testdata/03552_inconsistent_formatting_operator_as_table_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03552_inconsistent_formatting_operator_as_table_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03552_inconsistent_formatting_operator_as_table_function/query.sql b/parser/testdata/03552_inconsistent_formatting_operator_as_table_function/query.sql new file mode 100644 index 000000000..be82f1de4 --- /dev/null +++ b/parser/testdata/03552_inconsistent_formatting_operator_as_table_function/query.sql @@ -0,0 +1,14 @@ +SELECT * FROM globalIn('a', 1); -- { serverError UNKNOWN_FUNCTION } +SELECT * FROM plus(1, 2); -- { serverError UNKNOWN_FUNCTION } +SELECT * FROM negate(x); -- { serverError UNKNOWN_FUNCTION } + +SELECT not((SELECT * AND(16)) AND 1); + +SELECT -[(1)]; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT NOT ((1, 1, 1)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT ProfileEvents['LoadedMarksCount'], 1 OR toLowCardinality(1) FROM system.nonexistent PREWHERE tupleElement(*, 1) AND match(query, 'SELECT * FROM t_prewarm_add_column%') AND (currentDatabase() = current_database) WHERE ('SELECT * FROM t_prewarm_add_column%' NOT LIKE query) AND (type = 'QueryFinish') AND (current_database = currentDatabase()) ORDER BY ALL DESC NULLS FIRST; -- { serverError UNKNOWN_TABLE } + +select (((1), (2))); + +SELECT (1 AS c0).1, (1 AS c0).1; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT (1 AS c0,).1, (1 AS c0,).1; diff --git a/parser/testdata/03553_inconsistent_formatting_of_dictionary/ast.json b/parser/testdata/03553_inconsistent_formatting_of_dictionary/ast.json new file mode 100644 index 000000000..5cd5a6426 --- /dev/null +++ b/parser/testdata/03553_inconsistent_formatting_of_dictionary/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery d0 (children 3)" + }, + { + "explain": " Identifier d0" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " DictionaryAttributeDeclaration c1 (children 1)" + }, + { + "explain": " DataType Nested (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " NameTypePair c2 (children 1)" + }, + { + "explain": " DataType Int" + }, + { + "explain": " Dictionary definition (children 1)" + }, + { + "explain": " Dictionary range" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001155606, + "rows_read": 10, + "bytes_read": 363 + } +} diff --git a/parser/testdata/03553_inconsistent_formatting_of_dictionary/metadata.json b/parser/testdata/03553_inconsistent_formatting_of_dictionary/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03553_inconsistent_formatting_of_dictionary/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03553_inconsistent_formatting_of_dictionary/query.sql b/parser/testdata/03553_inconsistent_formatting_of_dictionary/query.sql new file mode 100644 index 000000000..0d4faeb36 --- /dev/null +++ b/parser/testdata/03553_inconsistent_formatting_of_dictionary/query.sql @@ -0,0 +1 @@ +CREATE DICTIONARY d0 (c1 Nested(c2 Int)) RANGE(MIN c1 MAX `c1.c2`); -- { serverError INCORRECT_DICTIONARY_DEFINITION } diff --git a/parser/testdata/03553_json_shared_data_advanced_serialization/ast.json b/parser/testdata/03553_json_shared_data_advanced_serialization/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03553_json_shared_data_advanced_serialization/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03553_json_shared_data_advanced_serialization/metadata.json b/parser/testdata/03553_json_shared_data_advanced_serialization/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03553_json_shared_data_advanced_serialization/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03553_json_shared_data_advanced_serialization/query.sql b/parser/testdata/03553_json_shared_data_advanced_serialization/query.sql new file mode 100644 index 000000000..996695511 --- /dev/null +++ b/parser/testdata/03553_json_shared_data_advanced_serialization/query.sql @@ -0,0 +1,414 @@ +-- Tags: long + +set output_format_json_quote_64bit_integers=0; + +drop table if exists source; +create table source (json JSON(max_dynamic_paths=8)) engine=Memory; +insert into source format JSONAsObject +{"?1" : 1, "?2" : 1, "?3" : 1, "?4" : 1, "?5" : 1, "?6" : 1, "?7" : 1, "?8" : 1, "a" : {"a1" : 1, "a2" : 2, "arr" : [{"arr1" : 3, "arr2" : 4, "arr3" : 5, "arr4" : 6}]}, "b" : 7, "c" : 8, "arr" : [{"arr1" : 9, "arr2" : 10, "arr3" : 11, "arr4" : 12}]} +{"a" : {"a1" : 2, "a2" : 3, "arr" : [{"arr1" : 4, "arr2" : 5, "arr3" : 6, "arr4" : 7}]}, "b" : 8, "c" : 9, "arr" : [{"arr1" : 10, "arr2" : 11, "arr3" : 12, "arr4" : 13}]} +{} +{} +{"a" : {"a1" : 3, "a2" : 4, "arr" : [{"arr1" : 5, "arr2" : 6, "arr3" : 7, "arr4" : 8}]}} +{"a" : {"a1" : 4, "a2" : 5, "arr" : [{"arr1" : 6, "arr2" : 7, "arr3" : 8, "arr4" : 9}]}} +{"b" : 9, "c" : 10} +{"b" : 10, "c" : 11} +{"arr" : [{"arr1" : 11, "arr2" : 12, "arr3" : 13, "arr4" : 14}]} +{"arr" : [{"arr1" : 12, "arr2" : 13, "arr3" : 14, "arr4" :15 }]} +{"a" : {"a1" : 5, "a2" : 6}} +{"a" : {"a1" : 6, "a2" : 7}}; + +drop table if exists test_compact_without_substreams_advanced; +create table test_compact_without_substreams_advanced (json JSON(max_dynamic_paths=8)) engine=MergeTree order by tuple() settings index_granularity=2, min_bytes_for_wide_part='200G', min_rows_for_wide_part=1, write_marks_for_substreams_in_compact_parts=0, object_serialization_version='v3', object_shared_data_serialization_version='advanced', object_shared_data_serialization_version_for_zero_level_parts='advanced', object_shared_data_buckets_for_compact_part=2; +insert into test_compact_without_substreams_advanced select * from source; + +select 'select json'; +select json from test_compact_without_substreams_advanced; + +drop table test_compact_without_substreams_advanced; + +drop table if exists test_compact_advanced; +create table test_compact_advanced (json JSON(max_dynamic_paths=8)) engine=MergeTree order by tuple() settings index_granularity=2, min_bytes_for_wide_part='200G', min_rows_for_wide_part=1, write_marks_for_substreams_in_compact_parts=1, object_serialization_version='v3', object_shared_data_serialization_version='advanced', object_shared_data_serialization_version_for_zero_level_parts='advanced', object_shared_data_buckets_for_compact_part=2; +insert into test_compact_advanced select * from source; + +select 'select json'; +select json from test_compact_advanced; +select 'select json.b'; +select json.b from test_compact_advanced; +select 'select json.b, json.c'; +select json.b, json.c from test_compact_advanced; +select 'select json.arr[].arr1'; +select json.arr[].arr1 from test_compact_advanced; +select 'select json.arr[].arr1, json.arr[].arr2'; +select json.arr[].arr1, json.arr[].arr2 from test_compact_advanced; +select 'select json.arr[].arr2, json.arr[].arr1'; +select json.arr[].arr2, json.arr[].arr1 from test_compact_advanced; +select 'select json.arr[].arr1, json.arr[].arr4'; +select json.arr[].arr1, json.arr[].arr4 from test_compact_advanced; +select 'select json.arr[].arr4, json.arr[].arr1'; +select json.arr[].arr4, json.arr[].arr1 from test_compact_advanced; +select 'select json.arr[].arr1, json.arr[].arr99'; +select json.arr[].arr1, json.arr[].arr99 from test_compact_advanced; +select 'select json.arr[].arr99, json.arr[].arr1'; +select json.arr[].arr4, json.arr[].arr1 from test_compact_advanced; +select 'select json.arr, json.arr[].arr1'; +select json.arr, json.arr[].arr1 from test_compact_advanced; +select 'select json.arr[].arr1, json.arr'; +select json.arr[].arr1, json.arr from test_compact_advanced; +select 'select json, json.b, json.c'; +select json, json.b, json.c from test_compact_advanced; +select 'select json.b, json.c, json'; +select json.b, json.c, json from test_compact_advanced; +select 'select json, json.b, json.c, json.arr[].arr1'; +select json, json.b, json.c, json.arr[].arr1 from test_compact_advanced; +select 'select json.b, json.c, json.arr[].arr1, json'; +select json.b, json.c, json.arr[].arr1, json from test_compact_advanced; +select 'select json, json.b, json.c, json.arr, json.arr[].arr1'; +select json, json.b, json.c, json.arr, json.arr[].arr1 from test_compact_advanced; +select 'select json.b, json.c, json.arr[].arr1, json.arr, json'; +select json.b, json.c, json.arr[].arr1, json.arr, json from test_compact_advanced; +select 'select json.^a'; +select json.^a from test_compact_advanced; +select 'select json, json.^a'; +select json, json.^a from test_compact_advanced; +select 'select json.^a, json.a.a1'; +select json.^a, json.a.a1 from test_compact_advanced; +select 'select json.a.a1, json.^a'; +select json.a.a1, json.^a from test_compact_advanced; +select 'select json.^a, json.a.a1, json.a.arr[].arr1'; +select json.^a, json.a.a1, json.a.arr[].arr1 from test_compact_advanced; +select 'select json.a.a1, json.a.arr[].arr1, json.^a'; +select json.a.a1, json.a.arr[].arr1, json.^a from test_compact_advanced; +select 'select json, json.a.a1, json.a.arr[].arr1, json.^a'; +select json, json.a.a1, json.a.arr[].arr1, json.^a from test_compact_advanced; +select 'select json.a.a1, json.a.arr[].arr1, json.^a, json'; +select json.a.a1, json.a.arr[].arr1, json.^a, json from test_compact_advanced; +select 'select json.^a, json.a.a1, json.a.arr[].arr1, json.b, json.arr'; +select json.^a, json.a.a1, json.a.arr[].arr1, json.b, json.arr from test_compact_advanced; +select 'select json.a.a1, json.a.arr[].arr1, json.b, json.arr, json.^a'; +select json.a.a1, json.a.arr[].arr1, json.b, json.arr, json.^a from test_compact_advanced; +select 'select json, json.^a, json.a.a1, json.a.arr[].arr1, json.b, json.arr'; +select json, json.^a, json.a.a1, json.a.arr[].arr1, json.b, json.arr from test_compact_advanced; +select 'select json.a.a1, json.a.arr[].arr1, json.b, json.arr, json.^a, json'; +select json.a.a1, json.a.arr[].arr1, json.b, json.arr, json.^a, json from test_compact_advanced; +select 'select json.arr[].arr1, json.^a, json.a.a1, json.a.arr[].arr1, json.b, json.arr'; +select json.arr[].arr1, json.^a, json.a.a1, json.a.arr[].arr1, json.b, json.arr from test_compact_advanced; +select 'select json.a.a1, json.a.arr[].arr1, json.b, json.arr, json.^a, json.arr[].arr1'; +select json.a.a1, json.a.arr[].arr1, json.b, json.arr, json.^a, json.arr[].arr1 from test_compact_advanced; +select 'select json, json.arr[].arr1, json.^a, json.a.a1, json.a.arr[].arr1, json.b, json.arr'; +select json, json.arr[].arr1, json.^a, json.a.a1, json.a.arr[].arr1, json.b, json.arr from test_compact_advanced; +select 'select json.a.a1, json.a.arr[].arr1, json.b, json.arr, json.^a, json.arr[].arr1, json'; +select json.a.a1, json.a.arr[].arr1, json.b, json.arr, json.^a, json.arr[].arr1, json from test_compact_advanced; + +drop table test_compact_advanced; + +drop table if exists test_compact_advanced_tuple; +create table test_compact_advanced_tuple (json Tuple(data JSON(max_dynamic_paths=8))) engine=MergeTree order by tuple() settings index_granularity=2, min_bytes_for_wide_part='200G', min_rows_for_wide_part=1, write_marks_for_substreams_in_compact_parts=1, object_serialization_version='v3', object_shared_data_serialization_version='advanced', object_shared_data_serialization_version_for_zero_level_parts='advanced', object_shared_data_buckets_for_compact_part=2; +insert into test_compact_advanced_tuple select tuple(json) from source; + +select 'select json.data'; +select json.data from test_compact_advanced_tuple; +select 'select json.data, json.data.b'; +select json.data, json.data.b from test_compact_advanced_tuple; +select 'select json.data.b, json.data'; +select json.data.b, json.data from test_compact_advanced_tuple; +select 'select json.data, json.data.b, json.data.c'; +select json.data, json.data.b, json.data.c from test_compact_advanced_tuple; +select 'select json.data.b, json.data, json.data.c'; +select json.data.b, json.data, json.data.c from test_compact_advanced_tuple; +select 'select json.data.b, json.data.c, json.data'; +select json.data.b, json.data.c, json.data from test_compact_advanced_tuple; +select 'select json.data, json.data.^a'; +select json.data, json.data.^a from test_compact_advanced_tuple; +select 'select json.data.^a, json.data'; +select json.data.^a, json.data from test_compact_advanced_tuple; +select 'select json.data, json.data.^a, json.data.b'; +select json.data, json.data.^a, json.data.b from test_compact_advanced_tuple; +select 'select json.data.b, json.data.^a, json.data'; +select json.data.b, json.data.^a, json.data from test_compact_advanced_tuple; + +drop table test_compact_advanced_tuple; + +drop table if exists test_wide_advanced; +create table test_wide_advanced (json JSON(max_dynamic_paths=8)) engine=MergeTree order by tuple() settings index_granularity=2, min_bytes_for_wide_part=1, min_rows_for_wide_part=1, write_marks_for_substreams_in_compact_parts=1, object_serialization_version='v3', object_shared_data_serialization_version='advanced', object_shared_data_serialization_version_for_zero_level_parts='advanced', object_shared_data_buckets_for_wide_part=2; +insert into test_wide_advanced select * from source; + +select 'select json'; +select json from test_wide_advanced; +select 'select json.b'; +select json.b from test_wide_advanced; +select 'select json.b, json.c'; +select json.b, json.c from test_wide_advanced; +select 'select json.arr[].arr1'; +select json.arr[].arr1 from test_wide_advanced; +select 'select json.arr[].arr1, json.arr[].arr2'; +select json.arr[].arr1, json.arr[].arr2 from test_wide_advanced; +select 'select json.arr[].arr2, json.arr[].arr1'; +select json.arr[].arr2, json.arr[].arr1 from test_wide_advanced; +select 'select json.arr[].arr1, json.arr[].arr4'; +select json.arr[].arr1, json.arr[].arr4 from test_wide_advanced; +select 'select json.arr[].arr4, json.arr[].arr1'; +select json.arr[].arr4, json.arr[].arr1 from test_wide_advanced; +select 'select json.arr[].arr1, json.arr[].arr99'; +select json.arr[].arr1, json.arr[].arr99 from test_wide_advanced; +select 'select json.arr[].arr99, json.arr[].arr1'; +select json.arr[].arr99, json.arr[].arr1 from test_wide_advanced; +select 'select json.arr, json.arr[].arr1'; +select json.arr, json.arr[].arr1 from test_wide_advanced; +select 'select json.arr[].arr1, json.arr'; +select json.arr[].arr1, json.arr from test_wide_advanced; +select 'select json, json.b, json.c'; +select json, json.b, json.c from test_wide_advanced; +select 'select json.b, json.c, json'; +select json.b, json.c, json from test_wide_advanced; +select 'select json, json.b, json.c, json.arr[].arr1'; +select json, json.b, json.c, json.arr[].arr1 from test_wide_advanced; +select 'select json.b, json.c, json.arr[].arr1, json'; +select json.b, json.c, json.arr[].arr1, json from test_wide_advanced; +select 'select json, json.b, json.c, json.arr, json.arr[].arr1'; +select json, json.b, json.c, json.arr, json.arr[].arr1 from test_wide_advanced; +select 'select json.b, json.c, json.arr[].arr1, json.arr, json'; +select json.b, json.c, json.arr[].arr1, json.arr, json from test_wide_advanced; +select 'select json.^a'; +select json.^a from test_wide_advanced; +select 'select json, json.^a'; +select json, json.^a from test_wide_advanced; +select 'select json.^a, json.a.a1'; +select json.^a, json.a.a1 from test_wide_advanced; +select 'select json.a.a1, json.^a'; +select json.a.a1, json.^a from test_wide_advanced; +select 'select json.^a, json.a.a1, json.a.arr[].arr1'; +select json.^a, json.a.a1, json.a.arr[].arr1 from test_wide_advanced; +select 'select json.a.a1, json.a.arr[].arr1, json.^a'; +select json.a.a1, json.a.arr[].arr1, json.^a from test_wide_advanced; +select 'select json, json.a.a1, json.a.arr[].arr1, json.^a'; +select json, json.a.a1, json.a.arr[].arr1, json.^a from test_wide_advanced; +select 'select json.a.a1, json.a.arr[].arr1, json.^a, json'; +select json.a.a1, json.a.arr[].arr1, json.^a, json from test_wide_advanced; +select 'select json.^a, json.a.a1, json.a.arr[].arr1, json.b, json.arr'; +select json.^a, json.a.a1, json.a.arr[].arr1, json.b, json.arr from test_wide_advanced; +select 'select json.a.a1, json.a.arr[].arr1, json.b, json.arr, json.^a'; +select json.a.a1, json.a.arr[].arr1, json.b, json.arr, json.^a from test_wide_advanced; +select 'select json, json.^a, json.a.a1, json.a.arr[].arr1, json.b, json.arr'; +select json, json.^a, json.a.a1, json.a.arr[].arr1, json.b, json.arr from test_wide_advanced; +select 'select json.a.a1, json.a.arr[].arr1, json.b, json.arr, json.^a, json'; +select json.a.a1, json.a.arr[].arr1, json.b, json.arr, json.^a, json from test_wide_advanced; +select 'select json.arr[].arr1, json.^a, json.a.a1, json.a.arr[].arr1, json.b, json.arr'; +select json.arr[].arr1, json.^a, json.a.a1, json.a.arr[].arr1, json.b, json.arr from test_wide_advanced; +select 'select json.a.a1, json.a.arr[].arr1, json.b, json.arr, json.^a, json.arr[].arr1'; +select json.a.a1, json.a.arr[].arr1, json.b, json.arr, json.^a, json.arr[].arr1 from test_wide_advanced; +select 'select json, json.arr[].arr1, json.^a, json.a.a1, json.a.arr[].arr1, json.b, json.arr'; +select json, json.arr[].arr1, json.^a, json.a.a1, json.a.arr[].arr1, json.b, json.arr from test_wide_advanced; +select 'select json.a.a1, json.a.arr[].arr1, json.b, json.arr, json.^a, json.arr[].arr1, json'; +select json.a.a1, json.a.arr[].arr1, json.b, json.arr, json.^a, json.arr[].arr1, json from test_wide_advanced; + +select 'select json limit 3'; +select json from test_wide_advanced limit 3; +select 'select json.b limit 3'; +select json.b from test_wide_advanced limit 3; +select 'select json.b, json.c limit 3'; +select json.b, json.c from test_wide_advanced limit 3; +select 'select json.arr[].arr1 limit 3'; +select json.arr[].arr1 from test_wide_advanced limit 3; +select 'select json.arr[].arr1, json.arr[].arr2 limit 3'; +select json.arr[].arr1, json.arr[].arr2 from test_wide_advanced limit 3; +select 'select json.arr[].arr2, json.arr[].arr1 limit 3'; +select json.arr[].arr2, json.arr[].arr1 from test_wide_advanced limit 3; +select 'select json.arr[].arr1, json.arr[].arr4 limit 3'; +select json.arr[].arr1, json.arr[].arr4 from test_wide_advanced limit 3; +select 'select json.arr[].arr4, json.arr[].arr1 limit 3'; +select json.arr[].arr4, json.arr[].arr1 from test_wide_advanced limit 3; +select 'select json.arr[].arr1, json.arr[].arr99 limit 3'; +select json.arr[].arr1, json.arr[].arr99 from test_wide_advanced limit 3; +select 'select json.arr[].arr99, json.arr[].arr1 limit 3'; +select json.arr[].arr99, json.arr[].arr1 from test_wide_advanced limit 3; +select 'select json.arr, json.arr[].arr1 limit 3'; +select json.arr, json.arr[].arr1 from test_wide_advanced limit 3; +select 'select json.arr[].arr1, json.arr limit 3'; +select json.arr[].arr1, json.arr from test_wide_advanced limit 3; +select 'select json, json.b, json.c limit 3'; +select json, json.b, json.c from test_wide_advanced limit 3; +select 'select json.b, json.c, json limit 3'; +select json.b, json.c, json from test_wide_advanced limit 3; +select 'select json, json.b, json.c, json.arr[].arr1 limit 3'; +select json, json.b, json.c, json.arr[].arr1 from test_wide_advanced limit 3; +select 'select json.b, json.c, json.arr[].arr1, json limit 3'; +select json.b, json.c, json.arr[].arr1, json from test_wide_advanced limit 3; +select 'select json, json.b, json.c, json.arr, json.arr[].arr1 limit 3'; +select json, json.b, json.c, json.arr, json.arr[].arr1 from test_wide_advanced limit 3; +select 'select json.b, json.c, json.arr[].arr1, json.arr, json limit 3'; +select json.b, json.c, json.arr[].arr1, json.arr, json from test_wide_advanced limit 3; +select 'select json.^a limit 3'; +select json.^a from test_wide_advanced limit 3; +select 'select json, json.^a limit 3'; +select json, json.^a from test_wide_advanced limit 3; +select 'select json.^a, json.a.a1 limit 3'; +select json.^a, json.a.a1 from test_wide_advanced limit 3; +select 'select json.a.a1, json.^a limit 3'; +select json.a.a1, json.^a from test_wide_advanced limit 3; +select 'select json.^a, json.a.a1, json.a.arr[].arr1 limit 3'; +select json.^a, json.a.a1, json.a.arr[].arr1 from test_wide_advanced limit 3; +select 'select json.a.a1, json.a.arr[].arr1, json.^a limit 3'; +select json.a.a1, json.a.arr[].arr1, json.^a from test_wide_advanced limit 3; +select 'select json, json.a.a1, json.a.arr[].arr1, json.^a limit 3'; +select json, json.a.a1, json.a.arr[].arr1, json.^a from test_wide_advanced limit 3; +select 'select json.a.a1, json.a.arr[].arr1, json.^a, json limit 3'; +select json.a.a1, json.a.arr[].arr1, json.^a, json from test_wide_advanced limit 3; +select 'select json.^a, json.a.a1, json.a.arr[].arr1, json.b, json.arr limit 3'; +select json.^a, json.a.a1, json.a.arr[].arr1, json.b, json.arr from test_wide_advanced limit 3; +select 'select json.a.a1, json.a.arr[].arr1, json.b, json.arr, json.^a limit 3'; +select json.a.a1, json.a.arr[].arr1, json.b, json.arr, json.^a from test_wide_advanced limit 3; +select 'select json, json.^a, json.a.a1, json.a.arr[].arr1, json.b, json.arr limit 3'; +select json, json.^a, json.a.a1, json.a.arr[].arr1, json.b, json.arr from test_wide_advanced limit 3; +select 'select json.a.a1, json.a.arr[].arr1, json.b, json.arr, json.^a, json limit 3'; +select json.a.a1, json.a.arr[].arr1, json.b, json.arr, json.^a, json from test_wide_advanced limit 3; +select 'select json.arr[].arr1, json.^a, json.a.a1, json.a.arr[].arr1, json.b, json.arr limit 3'; +select json.arr[].arr1, json.^a, json.a.a1, json.a.arr[].arr1, json.b, json.arr from test_wide_advanced limit 3; +select 'select json.a.a1, json.a.arr[].arr1, json.b, json.arr, json.^a, json.arr[].arr1 limit 3'; +select json.a.a1, json.a.arr[].arr1, json.b, json.arr, json.^a, json.arr[].arr1 from test_wide_advanced limit 3; +select 'select json, json.arr[].arr1, json.^a, json.a.a1, json.a.arr[].arr1, json.b, json.arr limit 3'; +select json, json.arr[].arr1, json.^a, json.a.a1, json.a.arr[].arr1, json.b, json.arr from test_wide_advanced limit 3; +select 'select json.a.a1, json.a.arr[].arr1, json.b, json.arr, json.^a, json.arr[].arr1, json limit 3'; +select json.a.a1, json.a.arr[].arr1, json.b, json.arr, json.^a, json.arr[].arr1, json from test_wide_advanced limit 3; + +select 'select json settings max_block_size=3'; +select json from test_wide_advanced settings max_block_size=3; +select 'select json.b settings max_block_size=3'; +select json.b from test_wide_advanced settings max_block_size=3; +select 'select json.b, json.c settings max_block_size=3'; +select json.b, json.c from test_wide_advanced settings max_block_size=3; +select 'select json.arr[].arr1 settings max_block_size=3'; +select json.arr[].arr1 from test_wide_advanced settings max_block_size=3; +select 'select json.arr[].arr1, json.arr[].arr2 settings max_block_size=3'; +select json.arr[].arr1, json.arr[].arr2 from test_wide_advanced settings max_block_size=3; +select 'select json.arr[].arr2, json.arr[].arr1 settings max_block_size=3'; +select json.arr[].arr2, json.arr[].arr1 from test_wide_advanced settings max_block_size=3; +select 'select json.arr[].arr1, json.arr[].arr4 settings max_block_size=3'; +select json.arr[].arr1, json.arr[].arr4 from test_wide_advanced settings max_block_size=3; +select 'select json.arr[].arr4, json.arr[].arr1 settings max_block_size=3'; +select json.arr[].arr4, json.arr[].arr1 from test_wide_advanced settings max_block_size=3; +select 'select json.arr[].arr1, json.arr[].arr99 settings max_block_size=3'; +select json.arr[].arr1, json.arr[].arr99 from test_wide_advanced settings max_block_size=3; +select 'select json.arr[].arr99, json.arr[].arr1 settings max_block_size=3'; +select json.arr[].arr99, json.arr[].arr1 from test_wide_advanced settings max_block_size=3; +select 'select json.arr, json.arr[].arr1 settings max_block_size=3'; +select json.arr, json.arr[].arr1 from test_wide_advanced settings max_block_size=3; +select 'select json.arr[].arr1, json.arr settings max_block_size=3'; +select json.arr[].arr1, json.arr from test_wide_advanced settings max_block_size=3; +select 'select json, json.b, json.c settings max_block_size=3'; +select json, json.b, json.c from test_wide_advanced settings max_block_size=3; +select 'select json.b, json.c, json settings max_block_size=3'; +select json.b, json.c, json from test_wide_advanced settings max_block_size=3; +select 'select json, json.b, json.c, json.arr[].arr1 settings max_block_size=3'; +select json, json.b, json.c, json.arr[].arr1 from test_wide_advanced settings max_block_size=3; +select 'select json.b, json.c, json.arr[].arr1, json settings max_block_size=3'; +select json.b, json.c, json.arr[].arr1, json from test_wide_advanced settings max_block_size=3; +select 'select json, json.b, json.c, json.arr, json.arr[].arr1 settings max_block_size=3'; +select json, json.b, json.c, json.arr, json.arr[].arr1 from test_wide_advanced settings max_block_size=3; +select 'select json.b, json.c, json.arr[].arr1, json.arr, json settings max_block_size=3'; +select json.b, json.c, json.arr[].arr1, json.arr, json from test_wide_advanced settings max_block_size=3; +select 'select json.^a settings max_block_size=3'; +select json.^a from test_wide_advanced settings max_block_size=3; +select 'select json, json.^a settings max_block_size=3'; +select json, json.^a from test_wide_advanced settings max_block_size=3; +select 'select json.^a, json.a.a1 settings max_block_size=3'; +select json.^a, json.a.a1 from test_wide_advanced settings max_block_size=3; +select 'select json.a.a1, json.^a settings max_block_size=3'; +select json.a.a1, json.^a from test_wide_advanced settings max_block_size=3; +select 'select json.^a, json.a.a1, json.a.arr[].arr1 settings max_block_size=3'; +select json.^a, json.a.a1, json.a.arr[].arr1 from test_wide_advanced settings max_block_size=3; +select 'select json.a.a1, json.a.arr[].arr1, json.^a settings max_block_size=3'; +select json.a.a1, json.a.arr[].arr1, json.^a from test_wide_advanced settings max_block_size=3; +select 'select json, json.a.a1, json.a.arr[].arr1, json.^a settings max_block_size=3'; +select json, json.a.a1, json.a.arr[].arr1, json.^a from test_wide_advanced settings max_block_size=3; +select 'select json.a.a1, json.a.arr[].arr1, json.^a, json settings max_block_size=3'; +select json.a.a1, json.a.arr[].arr1, json.^a, json from test_wide_advanced settings max_block_size=3; +select 'select json.^a, json.a.a1, json.a.arr[].arr1, json.b, json.arr settings max_block_size=3'; +select json.^a, json.a.a1, json.a.arr[].arr1, json.b, json.arr from test_wide_advanced settings max_block_size=3; +select 'select json.a.a1, json.a.arr[].arr1, json.b, json.arr, json.^a settings max_block_size=3'; +select json.a.a1, json.a.arr[].arr1, json.b, json.arr, json.^a from test_wide_advanced settings max_block_size=3; +select 'select json, json.^a, json.a.a1, json.a.arr[].arr1, json.b, json.arr settings max_block_size=3'; +select json, json.^a, json.a.a1, json.a.arr[].arr1, json.b, json.arr from test_wide_advanced settings max_block_size=3; +select 'select json.a.a1, json.a.arr[].arr1, json.b, json.arr, json.^a, json settings max_block_size=3'; +select json.a.a1, json.a.arr[].arr1, json.b, json.arr, json.^a, json from test_wide_advanced settings max_block_size=3; +select 'select json.arr[].arr1, json.^a, json.a.a1, json.a.arr[].arr1, json.b, json.arr settings max_block_size=3'; +select json.arr[].arr1, json.^a, json.a.a1, json.a.arr[].arr1, json.b, json.arr from test_wide_advanced settings max_block_size=3; +select 'select json.a.a1, json.a.arr[].arr1, json.b, json.arr, json.^a, json.arr[].arr1 settings max_block_size=3'; +select json.a.a1, json.a.arr[].arr1, json.b, json.arr, json.^a, json.arr[].arr1 from test_wide_advanced settings max_block_size=3; +select 'select json, json.arr[].arr1, json.^a, json.a.a1, json.a.arr[].arr1, json.b, json.arr settings max_block_size=3'; +select json, json.arr[].arr1, json.^a, json.a.a1, json.a.arr[].arr1, json.b, json.arr from test_wide_advanced settings max_block_size=3; +select 'select json.a.a1, json.a.arr[].arr1, json.b, json.arr, json.^a, json.arr[].arr1, json settings max_block_size=3'; +select json.a.a1, json.a.arr[].arr1, json.b, json.arr, json.^a, json.arr[].arr1, json from test_wide_advanced settings max_block_size=3; + + +drop table test_wide_advanced; + +drop table if exists test_wide_advanced_tuple; +create table test_wide_advanced_tuple (json Tuple(data JSON(max_dynamic_paths=8))) engine=MergeTree order by tuple() settings index_granularity=2, min_bytes_for_wide_part=1, min_rows_for_wide_part=1, write_marks_for_substreams_in_compact_parts=1, object_serialization_version='v3', object_shared_data_serialization_version='advanced', object_shared_data_serialization_version_for_zero_level_parts='advanced', object_shared_data_buckets_for_wide_part=2; +insert into test_wide_advanced_tuple select tuple(json) from source; + +select 'select json.data'; +select json.data from test_wide_advanced_tuple; +select 'select json.data, json.data.b'; +select json.data, json.data.b from test_wide_advanced_tuple; +select 'select json.data.b, json.data'; +select json.data.b, json.data from test_wide_advanced_tuple; +select 'select json.data, json.data.b, json.data.c'; +select json.data, json.data.b, json.data.c from test_wide_advanced_tuple; +select 'select json.data.b, json.data, json.data.c'; +select json.data.b, json.data, json.data.c from test_wide_advanced_tuple; +select 'select json.data.b, json.data.c, json.data'; +select json.data.b, json.data.c, json.data from test_wide_advanced_tuple; +select 'select json.data, json.data.^a'; +select json.data, json.data.^a from test_wide_advanced_tuple; +select 'select json.data.^a, json.data'; +select json.data.^a, json.data from test_wide_advanced_tuple; +select 'select json.data, json.data.^a, json.data.b'; +select json.data, json.data.^a, json.data.b from test_wide_advanced_tuple; +select 'select json.data.b, json.data.^a, json.data'; +select json.data.b, json.data.^a, json.data from test_wide_advanced_tuple; + +select 'select json.data limit 3'; +select json.data from test_wide_advanced_tuple limit 3; +select 'select json.data, json.data.b limit 3'; +select json.data, json.data.b from test_wide_advanced_tuple limit 3; +select 'select json.data.b, json.data limit 3'; +select json.data.b, json.data from test_wide_advanced_tuple limit 3; +select 'select json.data, json.data.b, json.data.c limit 3'; +select json.data, json.data.b, json.data.c from test_wide_advanced_tuple limit 3; +select 'select json.data.b, json.data, json.data.c limit 3'; +select json.data.b, json.data, json.data.c from test_wide_advanced_tuple limit 3; +select 'select json.data.b, json.data.c, json.data limit 3'; +select json.data.b, json.data.c, json.data from test_wide_advanced_tuple limit 3; +select 'select json.data, json.data.^a limit 3'; +select json.data, json.data.^a from test_wide_advanced_tuple limit 3; +select 'select json.data.^a, json.data limit 3'; +select json.data.^a, json.data from test_wide_advanced_tuple limit 3; +select 'select json.data, json.data.^a, json.data.b limit 3'; +select json.data, json.data.^a, json.data.b from test_wide_advanced_tuple limit 3; +select 'select json.data.b, json.data.^a, json.data limit 3'; +select json.data.b, json.data.^a, json.data from test_wide_advanced_tuple limit 3; + +select 'select json.data settings max_block_size=3'; +select json.data from test_wide_advanced_tuple settings max_block_size=3; +select 'select json.data, json.data.b settings max_block_size=3'; +select json.data, json.data.b from test_wide_advanced_tuple settings max_block_size=3; +select 'select json.data.b, json.data settings max_block_size=3'; +select json.data.b, json.data from test_wide_advanced_tuple settings max_block_size=3; +select 'select json.data, json.data.b, json.data.c settings max_block_size=3'; +select json.data, json.data.b, json.data.c from test_wide_advanced_tuple settings max_block_size=3; +select 'select json.data.b, json.data, json.data.c settings max_block_size=3'; +select json.data.b, json.data, json.data.c from test_wide_advanced_tuple settings max_block_size=3; +select 'select json.data.b, json.data.c, json.data settings max_block_size=3'; +select json.data.b, json.data.c, json.data from test_wide_advanced_tuple settings max_block_size=3; +select 'select json.data, json.data.^a settings max_block_size=3'; +select json.data, json.data.^a from test_wide_advanced_tuple settings max_block_size=3; +select 'select json.data.^a, json.data settings max_block_size=3'; +select json.data.^a, json.data from test_wide_advanced_tuple settings max_block_size=3; +select 'select json.data, json.data.^a, json.data.b settings max_block_size=3'; +select json.data, json.data.^a, json.data.b from test_wide_advanced_tuple settings max_block_size=3; +select 'select json.data.b, json.data.^a, json.data settings max_block_size=3'; +select json.data.b, json.data.^a, json.data from test_wide_advanced_tuple settings max_block_size=3; + +drop table test_wide_advanced_tuple; +drop table source; diff --git a/parser/testdata/03553_json_shared_data_map_serialization/ast.json b/parser/testdata/03553_json_shared_data_map_serialization/ast.json new file mode 100644 index 000000000..40c2f551a --- /dev/null +++ b/parser/testdata/03553_json_shared_data_map_serialization/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001321305, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03553_json_shared_data_map_serialization/metadata.json b/parser/testdata/03553_json_shared_data_map_serialization/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03553_json_shared_data_map_serialization/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03553_json_shared_data_map_serialization/query.sql b/parser/testdata/03553_json_shared_data_map_serialization/query.sql new file mode 100644 index 000000000..895d08a31 --- /dev/null +++ b/parser/testdata/03553_json_shared_data_map_serialization/query.sql @@ -0,0 +1,202 @@ +set output_format_json_quote_64bit_integers=0; + +drop table if exists source; +create table source (json JSON(max_dynamic_paths=8)) engine=Memory; +insert into source format JSONAsObject +{"?1" : 1, "?2" : 1, "?3" : 1, "?4" : 1, "?5" : 1, "?6" : 1, "?7" : 1, "?8" : 1, "a" : {"a1" : 1, "a2" : 2, "arr" : [{"arr1" : 3, "arr2" : 4, "arr3" : 5, "arr4" : 6}]}, "b" : 7, "c" : 8, "arr" : [{"arr1" : 9, "arr2" : 10, "arr3" : 11, "arr4" : 12}]} +{"a" : {"a1" : 2, "a2" : 3, "arr" : [{"arr1" : 4, "arr2" : 5, "arr3" : 6, "arr4" : 7}]}, "b" : 8, "c" : 9, "arr" : [{"arr1" : 10, "arr2" : 11, "arr3" : 12, "arr4" : 13}]} +{} +{} +{"a" : {"a1" : 3, "a2" : 4, "arr" : [{"arr1" : 5, "arr2" : 6, "arr3" : 7, "arr4" : 8}]}} +{"a" : {"a1" : 4, "a2" : 5, "arr" : [{"arr1" : 6, "arr2" : 7, "arr3" : 8, "arr4" : 9}]}} +{"b" : 9, "c" : 10} +{"b" : 10, "c" : 11} +{"arr" : [{"arr1" : 11, "arr2" : 12, "arr3" : 13, "arr4" : 14}]} +{"arr" : [{"arr1" : 12, "arr2" : 13, "arr3" : 14, "arr4" :15 }]} +{"a" : {"a1" : 5, "a2" : 6}} +{"a" : {"a1" : 6, "a2" : 7}}; + +drop table if exists test_compact_map; +create table test_compact_map (json JSON(max_dynamic_paths=8)) engine=MergeTree order by tuple() settings index_granularity=2, min_bytes_for_wide_part='200G', min_rows_for_wide_part=1, write_marks_for_substreams_in_compact_parts=1, object_serialization_version='v3', object_shared_data_serialization_version='map', object_shared_data_serialization_version_for_zero_level_parts='map'; +insert into test_compact_map select * from source; + +select 'select json'; +select json from test_compact_map; +select 'select json.b'; +select json.b from test_compact_map; +select 'select json.b, json.c'; +select json.b, json.c from test_compact_map; +select 'select json, json.b, json.c'; +select json, json.b, json.c from test_compact_map; +select 'select json.b, json.c, json'; +select json.b, json.c, json from test_compact_map; +select 'select json.^a'; +select json.^a from test_compact_map; +select 'select json, json.^a'; +select json, json.^a from test_compact_map; +select 'select json.^a, json.a.a1'; +select json.^a, json.a.a1 from test_compact_map; +select 'select json.a.a1, json.^a'; +select json.a.a1, json.^a from test_compact_map; +drop table test_compact_map; + +drop table if exists test_compact_map_tuple; +create table test_compact_map_tuple (json Tuple(data JSON(max_dynamic_paths=8))) engine=MergeTree order by tuple() settings index_granularity=2, min_bytes_for_wide_part='200G', min_rows_for_wide_part=1, write_marks_for_substreams_in_compact_parts=1, object_serialization_version='v3', object_shared_data_serialization_version='map', object_shared_data_serialization_version_for_zero_level_parts='map'; +insert into test_compact_map_tuple select tuple(json) from source; + +select 'select json.data'; +select json.data from test_compact_map_tuple; +select 'select json.data, json.data.b'; +select json.data, json.data.b from test_compact_map_tuple; +select 'select json.data.b, json.data'; +select json.data.b, json.data from test_compact_map_tuple; +select 'select json.data, json.data.b, json.data.c'; +select json.data, json.data.b, json.data.c from test_compact_map_tuple; +select 'select json.data.b, json.data, json.data.c'; +select json.data.b, json.data, json.data.c from test_compact_map_tuple; +select 'select json.data.b, json.data.c, json.data'; +select json.data.b, json.data.c, json.data from test_compact_map_tuple; +select 'select json.data, json.data.^a'; +select json.data, json.data.^a from test_compact_map_tuple; +select 'select json.data.^a, json.data'; +select json.data.^a, json.data from test_compact_map_tuple; +select 'select json.data, json.data.^a, json.data.b'; +select json.data, json.data.^a, json.data.b from test_compact_map_tuple; +select 'select json.data.b, json.data.^a, json.data'; +select json.data.b, json.data.^a, json.data from test_compact_map_tuple; + +drop table test_compact_map_tuple; + +drop table if exists test_wide_map; +create table test_wide_map (json JSON(max_dynamic_paths=8)) engine=MergeTree order by tuple() settings index_granularity=2, min_bytes_for_wide_part=1, min_rows_for_wide_part=1, write_marks_for_substreams_in_compact_parts=1, object_serialization_version='v3', object_shared_data_serialization_version='map', object_shared_data_serialization_version_for_zero_level_parts='map'; +insert into test_wide_map select * from source; + +select 'select json'; +select json from test_wide_map; +select 'select json.b'; +select json.b from test_wide_map; +select 'select json.b, json.c'; +select json.b, json.c from test_wide_map; +select 'select json, json.b, json.c'; +select json, json.b, json.c from test_wide_map; +select 'select json.b, json.c, json'; +select json.b, json.c, json from test_wide_map; +select 'select json.^a'; +select json.^a from test_wide_map; +select 'select json, json.^a'; +select json, json.^a from test_wide_map; +select 'select json.^a, json.a.a1'; +select json.^a, json.a.a1 from test_wide_map; +select 'select json.a.a1, json.^a'; +select json.a.a1, json.^a from test_wide_map; + +select 'select json limit 3'; +select json from test_wide_map limit 3; +select 'select json.b limit 3'; +select json.b from test_wide_map limit 3; +select 'select json.b, json.c limit 3'; +select json.b, json.c from test_wide_map limit 3; +select 'select json, json.b, json.c limit 3'; +select json, json.b, json.c from test_wide_map limit 3; +select 'select json.b, json.c, json limit 3'; +select json.b, json.c, json from test_wide_map limit 3; +select 'select json.^a limit 3'; +select json.^a from test_wide_map limit 3; +select 'select json, json.^a limit 3'; +select json, json.^a from test_wide_map limit 3; +select 'select json.^a, json.a.a1 limit 3'; +select json.^a, json.a.a1 from test_wide_map limit 3; +select 'select json.a.a1, json.^a limit 3'; +select json.a.a1, json.^a from test_wide_map limit 3; + +select 'select json settings max_block_size=3'; +select json from test_wide_map settings max_block_size=3; +select 'select json.b settings max_block_size=3'; +select json.b from test_wide_map settings max_block_size=3; +select 'select json.b, json.c settings max_block_size=3'; +select json.b, json.c from test_wide_map settings max_block_size=3; +select 'select json, json.b, json.c settings max_block_size=3'; +select json, json.b, json.c from test_wide_map settings max_block_size=3; +select 'select json.b, json.c, json settings max_block_size=3'; +select json.b, json.c, json from test_wide_map settings max_block_size=3; +select 'select json.^a settings max_block_size=3'; +select json.^a from test_wide_map settings max_block_size=3; +select 'select json, json.^a settings max_block_size=3'; +select json, json.^a from test_wide_map settings max_block_size=3; +select 'select json.^a, json.a.a1 settings max_block_size=3'; +select json.^a, json.a.a1 from test_wide_map settings max_block_size=3; +select 'select json.a.a1, json.^a settings max_block_size=3'; +select json.a.a1, json.^a from test_wide_map settings max_block_size=3; + + +drop table test_wide_map; + +drop table if exists test_wide_map_tuple; +create table test_wide_map_tuple (json Tuple(data JSON(max_dynamic_paths=8))) engine=MergeTree order by tuple() settings index_granularity=2, min_bytes_for_wide_part=1, min_rows_for_wide_part=1, write_marks_for_substreams_in_compact_parts=1, object_serialization_version='v3', object_shared_data_serialization_version='map', object_shared_data_serialization_version_for_zero_level_parts='map'; +insert into test_wide_map_tuple select tuple(json) from source; + +select 'select json.data'; +select json.data from test_wide_map_tuple; +select 'select json.data, json.data.b'; +select json.data, json.data.b from test_wide_map_tuple; +select 'select json.data.b, json.data'; +select json.data.b, json.data from test_wide_map_tuple; +select 'select json.data, json.data.b, json.data.c'; +select json.data, json.data.b, json.data.c from test_wide_map_tuple; +select 'select json.data.b, json.data, json.data.c'; +select json.data.b, json.data, json.data.c from test_wide_map_tuple; +select 'select json.data.b, json.data.c, json.data'; +select json.data.b, json.data.c, json.data from test_wide_map_tuple; +select 'select json.data, json.data.^a'; +select json.data, json.data.^a from test_wide_map_tuple; +select 'select json.data.^a, json.data'; +select json.data.^a, json.data from test_wide_map_tuple; +select 'select json.data, json.data.^a, json.data.b'; +select json.data, json.data.^a, json.data.b from test_wide_map_tuple; +select 'select json.data.b, json.data.^a, json.data'; +select json.data.b, json.data.^a, json.data from test_wide_map_tuple; + +select 'select json.data limit 3'; +select json.data from test_wide_map_tuple limit 3; +select 'select json.data, json.data.b limit 3'; +select json.data, json.data.b from test_wide_map_tuple limit 3; +select 'select json.data.b, json.data limit 3'; +select json.data.b, json.data from test_wide_map_tuple limit 3; +select 'select json.data, json.data.b, json.data.c limit 3'; +select json.data, json.data.b, json.data.c from test_wide_map_tuple limit 3; +select 'select json.data.b, json.data, json.data.c limit 3'; +select json.data.b, json.data, json.data.c from test_wide_map_tuple limit 3; +select 'select json.data.b, json.data.c, json.data limit 3'; +select json.data.b, json.data.c, json.data from test_wide_map_tuple limit 3; +select 'select json.data, json.data.^a limit 3'; +select json.data, json.data.^a from test_wide_map_tuple limit 3; +select 'select json.data.^a, json.data limit 3'; +select json.data.^a, json.data from test_wide_map_tuple limit 3; +select 'select json.data, json.data.^a, json.data.b limit 3'; +select json.data, json.data.^a, json.data.b from test_wide_map_tuple limit 3; +select 'select json.data.b, json.data.^a, json.data limit 3'; +select json.data.b, json.data.^a, json.data from test_wide_map_tuple limit 3; + +select 'select json.data settings max_block_size=3'; +select json.data from test_wide_map_tuple settings max_block_size=3; +select 'select json.data, json.data.b settings max_block_size=3'; +select json.data, json.data.b from test_wide_map_tuple settings max_block_size=3; +select 'select json.data.b, json.data settings max_block_size=3'; +select json.data.b, json.data from test_wide_map_tuple settings max_block_size=3; +select 'select json.data, json.data.b, json.data.c settings max_block_size=3'; +select json.data, json.data.b, json.data.c from test_wide_map_tuple settings max_block_size=3; +select 'select json.data.b, json.data, json.data.c settings max_block_size=3'; +select json.data.b, json.data, json.data.c from test_wide_map_tuple settings max_block_size=3; +select 'select json.data.b, json.data.c, json.data settings max_block_size=3'; +select json.data.b, json.data.c, json.data from test_wide_map_tuple settings max_block_size=3; +select 'select json.data, json.data.^a settings max_block_size=3'; +select json.data, json.data.^a from test_wide_map_tuple settings max_block_size=3; +select 'select json.data.^a, json.data settings max_block_size=3'; +select json.data.^a, json.data from test_wide_map_tuple settings max_block_size=3; +select 'select json.data, json.data.^a, json.data.b settings max_block_size=3'; +select json.data, json.data.^a, json.data.b from test_wide_map_tuple settings max_block_size=3; +select 'select json.data.b, json.data.^a, json.data settings max_block_size=3'; +select json.data.b, json.data.^a, json.data from test_wide_map_tuple settings max_block_size=3; + +drop table test_wide_map_tuple; +drop table source; diff --git a/parser/testdata/03553_json_shared_data_map_with_buckets_serialization/ast.json b/parser/testdata/03553_json_shared_data_map_with_buckets_serialization/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03553_json_shared_data_map_with_buckets_serialization/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03553_json_shared_data_map_with_buckets_serialization/metadata.json b/parser/testdata/03553_json_shared_data_map_with_buckets_serialization/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03553_json_shared_data_map_with_buckets_serialization/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03553_json_shared_data_map_with_buckets_serialization/query.sql b/parser/testdata/03553_json_shared_data_map_with_buckets_serialization/query.sql new file mode 100644 index 000000000..f9f0d58f7 --- /dev/null +++ b/parser/testdata/03553_json_shared_data_map_with_buckets_serialization/query.sql @@ -0,0 +1,252 @@ +-- Tags: long + +set output_format_json_quote_64bit_integers=0; + +drop table if exists source; +create table source (json JSON(max_dynamic_paths=8)) engine=Memory; +insert into source format JSONAsObject +{"?1" : 1, "?2" : 1, "?3" : 1, "?4" : 1, "?5" : 1, "?6" : 1, "?7" : 1, "?8" : 1, "a" : {"a1" : 1, "a2" : 2, "arr" : [{"arr1" : 3, "arr2" : 4, "arr3" : 5, "arr4" : 6}]}, "b" : 7, "c" : 8, "arr" : [{"arr1" : 9, "arr2" : 10, "arr3" : 11, "arr4" : 12}]} +{"a" : {"a1" : 2, "a2" : 3, "arr" : [{"arr1" : 4, "arr2" : 5, "arr3" : 6, "arr4" : 7}]}, "b" : 8, "c" : 9, "arr" : [{"arr1" : 10, "arr2" : 11, "arr3" : 12, "arr4" : 13}]} +{} +{} +{"a" : {"a1" : 3, "a2" : 4, "arr" : [{"arr1" : 5, "arr2" : 6, "arr3" : 7, "arr4" : 8}]}} +{"a" : {"a1" : 4, "a2" : 5, "arr" : [{"arr1" : 6, "arr2" : 7, "arr3" : 8, "arr4" : 9}]}} +{"b" : 9, "c" : 10} +{"b" : 10, "c" : 11} +{"arr" : [{"arr1" : 11, "arr2" : 12, "arr3" : 13, "arr4" : 14}]} +{"arr" : [{"arr1" : 12, "arr2" : 13, "arr3" : 14, "arr4" :15 }]} +{"a" : {"a1" : 5, "a2" : 6}} +{"a" : {"a1" : 6, "a2" : 7}}; + +drop table if exists test_compact_map_with_buckets; +create table test_compact_map_with_buckets (json JSON(max_dynamic_paths=8)) engine=MergeTree order by tuple() settings index_granularity=2, min_bytes_for_wide_part='200G', min_rows_for_wide_part=1, write_marks_for_substreams_in_compact_parts=1, object_serialization_version='v3', object_shared_data_serialization_version='map_with_buckets', object_shared_data_serialization_version_for_zero_level_parts='map_with_buckets', object_shared_data_buckets_for_compact_part=2; +insert into test_compact_map_with_buckets select * from source; + +select 'select json'; +select json from test_compact_map_with_buckets; +select 'select json.b'; +select json.b from test_compact_map_with_buckets; +select 'select json.arr'; +select json.arr from test_compact_map_with_buckets; +select 'select json.b, json.c'; +select json.b, json.c from test_compact_map_with_buckets; +select 'select json.b, json.c, json.arr'; +select json.b, json.c, json.arr from test_compact_map_with_buckets; +select 'select json, json.b, json.c'; +select json, json.b, json.c from test_compact_map_with_buckets; +select 'select json, json.b, json.c, json.arr'; +select json, json.b, json.c, json.arr from test_compact_map_with_buckets; +select 'select json.b, json.c, json'; +select json.b, json.c, json from test_compact_map_with_buckets; +select 'select json.b, json.c, json.arr, json'; +select json.b, json.c, json.arr, json from test_compact_map_with_buckets; +select 'select json.^a'; +select json.^a from test_compact_map_with_buckets; +select 'select json, json.^a'; +select json, json.^a from test_compact_map_with_buckets; +select 'select json.^a, json.a.a1'; +select json.^a, json.a.a1 from test_compact_map_with_buckets; +select json.a.a1, json.^a from test_compact_map_with_buckets; +select 'select json.^a, json.a.a1, json.arr'; +select json.^a, json.a.a1, json.arr from test_compact_map_with_buckets; +select 'select json.a.a1, json.arr, json.^a'; +select json.a.a1, json.arr, json.^a from test_compact_map_with_buckets; + +drop table test_compact_map_with_buckets; + +drop table if exists test_compact_map_with_buckets_tuple; +create table test_compact_map_with_buckets_tuple (json Tuple(data JSON(max_dynamic_paths=8))) engine=MergeTree order by tuple() settings index_granularity=2, min_bytes_for_wide_part='200G', min_rows_for_wide_part=1, write_marks_for_substreams_in_compact_parts=1, object_serialization_version='v3', object_shared_data_serialization_version='map_with_buckets', object_shared_data_serialization_version_for_zero_level_parts='map_with_buckets', object_shared_data_buckets_for_compact_part=2; +insert into test_compact_map_with_buckets_tuple select tuple(json) from source; + +select 'select json.data'; +select json.data from test_compact_map_with_buckets_tuple; +select 'select json.data, json.data.b'; +select json.data, json.data.b from test_compact_map_with_buckets_tuple; +select 'select json.data.b, json.data'; +select json.data.b, json.data from test_compact_map_with_buckets_tuple; +select 'select json.data, json.data.b, json.data.c'; +select json.data, json.data.b, json.data.c from test_compact_map_with_buckets_tuple; +select 'select json.data.b, json.data, json.data.c'; +select json.data.b, json.data, json.data.c from test_compact_map_with_buckets_tuple; +select 'select json.data.b, json.data.c, json.data'; +select json.data.b, json.data.c, json.data from test_compact_map_with_buckets_tuple; +select 'select json.data, json.data.^a'; +select json.data, json.data.^a from test_compact_map_with_buckets_tuple; +select 'select json.data.^a, json.data'; +select json.data.^a, json.data from test_compact_map_with_buckets_tuple; +select 'select json.data, json.data.^a, json.data.b'; +select json.data, json.data.^a, json.data.b from test_compact_map_with_buckets_tuple; +select 'select json.data.b, json.data.^a, json.data'; +select json.data.b, json.data.^a, json.data from test_compact_map_with_buckets_tuple; + +drop table test_compact_map_with_buckets_tuple; + +drop table if exists test_wide_map_with_buckets; +create table test_wide_map_with_buckets (json JSON(max_dynamic_paths=8)) engine=MergeTree order by tuple() settings index_granularity=2, min_bytes_for_wide_part=1, min_rows_for_wide_part=1, write_marks_for_substreams_in_compact_parts=1, object_serialization_version='v3', object_shared_data_serialization_version='map_with_buckets', object_shared_data_serialization_version_for_zero_level_parts='map_with_buckets', object_shared_data_buckets_for_wide_part=2; +insert into test_wide_map_with_buckets select * from source; + +select 'select json'; +select json from test_wide_map_with_buckets; +select 'select json.b'; +select json.b from test_wide_map_with_buckets; +select 'select json.arr'; +select json.arr from test_wide_map_with_buckets; +select 'select json.b, json.c'; +select json.b, json.c from test_wide_map_with_buckets; +select 'select json.b, json.c, json.arr'; +select json.b, json.c, json.arr from test_wide_map_with_buckets; +select 'select json, json.b, json.c'; +select json, json.b, json.c from test_wide_map_with_buckets; +select 'select json, json.b, json.c, json.arr'; +select json, json.b, json.c, json.arr from test_wide_map_with_buckets; +select 'select json.b, json.c, json'; +select json.b, json.c, json from test_wide_map_with_buckets; +select 'select json.b, json.c, json.arr, json'; +select json.b, json.c, json.arr, json from test_wide_map_with_buckets; +select 'select json.^a'; +select json.^a from test_wide_map_with_buckets; +select 'select json, json.^a'; +select json, json.^a from test_wide_map_with_buckets; +select 'select json.^a, json.a.a1'; +select json.^a, json.a.a1 from test_wide_map_with_buckets; +select 'select json.a.a1, json.^a'; +select json.a.a1, json.^a from test_wide_map_with_buckets; +select 'select json.^a, json.a.a1, json.arr'; +select json.^a, json.a.a1, json.arr from test_wide_map_with_buckets; +select 'select json.a.a1, json.arr, json.^a'; +select json.a.a1, json.arr, json.^a from test_wide_map_with_buckets; + +select 'select json limit 3'; +select json from test_wide_map_with_buckets limit 3; +select 'select json.b limit 3'; +select json.b from test_wide_map_with_buckets limit 3; +select 'select json.arr limit 3'; +select json.arr from test_wide_map_with_buckets limit 3; +select 'select json.b, json.c limit 3'; +select json.b, json.c from test_wide_map_with_buckets limit 3; +select 'select json.b, json.c, json.arr limit 3'; +select json.b, json.c, json.arr from test_wide_map_with_buckets limit 3; +select 'select json, json.b, json.c limit 3'; +select json, json.b, json.c from test_wide_map_with_buckets limit 3; +select 'select json, json.b, json.c, json.arr limit 3'; +select json, json.b, json.c, json.arr from test_wide_map_with_buckets limit 3; +select 'select json.b, json.c, json limit 3'; +select json.b, json.c, json from test_wide_map_with_buckets limit 3; +select 'select json.b, json.c, json.arr, json limit 3'; +select json.b, json.c, json.arr, json from test_wide_map_with_buckets limit 3; +select 'select json.^a limit 3'; +select json.^a from test_wide_map_with_buckets limit 3; +select 'select json, json.^a limit 3'; +select json, json.^a from test_wide_map_with_buckets limit 3; +select 'select json.^a, json.a.a1 limit 3'; +select json.^a, json.a.a1 from test_wide_map_with_buckets limit 3; +select 'select json.a.a1, json.^a limit 3'; +select json.a.a1, json.^a from test_wide_map_with_buckets limit 3; +select 'select json.^a, json.a.a1, json.arr limit 3'; +select json.^a, json.a.a1, json.arr from test_wide_map_with_buckets limit 3; +select 'select json.a.a1, json.arr, json.^a' limit 3; +select json.a.a1, json.arr, json.^a from test_wide_map_with_buckets limit 3; + +select 'select json settings max_block_size=3'; +select json from test_wide_map_with_buckets settings max_block_size=3; +select 'select json.b settings max_block_size=3'; +select json.b from test_wide_map_with_buckets settings max_block_size=3; +select 'select json.arr settings max_block_size=3'; +select json.arr from test_wide_map_with_buckets settings max_block_size=3; +select 'select json.b, json.c settings max_block_size=3'; +select json.b, json.c from test_wide_map_with_buckets settings max_block_size=3; +select 'select json.b, json.c, json.arr settings max_block_size=3'; +select json.b, json.c, json.arr from test_wide_map_with_buckets settings max_block_size=3; +select 'select json, json.b, json.c settings max_block_size=3'; +select json, json.b, json.c from test_wide_map_with_buckets settings max_block_size=3; +select 'select json, json.b, json.c, json.arr settings max_block_size=3'; +select json, json.b, json.c, json.arr from test_wide_map_with_buckets settings max_block_size=3; +select 'select json.b, json.c, json settings max_block_size=3'; +select json.b, json.c, json from test_wide_map_with_buckets settings max_block_size=3; +select 'select json.b, json.c, json.arr, json settings max_block_size=3'; +select json.b, json.c, json.arr, json from test_wide_map_with_buckets settings max_block_size=3; +select 'select json.^a settings max_block_size=3'; +select json.^a from test_wide_map_with_buckets settings max_block_size=3; +select 'select json, json.^a settings max_block_size=3'; +select json, json.^a from test_wide_map_with_buckets settings max_block_size=3; +select 'select json.^a, json.a.a1 settings max_block_size=3'; +select json.^a, json.a.a1 from test_wide_map_with_buckets settings max_block_size=3; +select 'select json.a.a1, json.^a settings max_block_size=3'; +select json.a.a1, json.^a from test_wide_map_with_buckets settings max_block_size=3; +select 'select json.^a, json.a.a1, json.arr settings max_block_size=3'; +select json.^a, json.a.a1, json.arr from test_wide_map_with_buckets settings max_block_size=3; +select 'select json.a.a1, json.arr, json.^a settings max_block_size=3'; +select json.a.a1, json.arr, json.^a from test_wide_map_with_buckets settings max_block_size=3; + + +drop table test_wide_map_with_buckets; + +drop table if exists test_wide_map_with_buckets_tuple; +create table test_wide_map_with_buckets_tuple (json Tuple(data JSON(max_dynamic_paths=8))) engine=MergeTree order by tuple() settings index_granularity=2, min_bytes_for_wide_part=1, min_rows_for_wide_part=1, write_marks_for_substreams_in_compact_parts=1, object_serialization_version='v3', object_shared_data_serialization_version='map_with_buckets', object_shared_data_serialization_version_for_zero_level_parts='map_with_buckets', object_shared_data_buckets_for_wide_part=2; +insert into test_wide_map_with_buckets_tuple select tuple(json) from source; + +select 'select json.data'; +select json.data from test_wide_map_with_buckets_tuple; +select 'select json.data, json.data.b'; +select json.data, json.data.b from test_wide_map_with_buckets_tuple; +select 'select json.data.b, json.data'; +select json.data.b, json.data from test_wide_map_with_buckets_tuple; +select 'select json.data, json.data.b, json.data.c'; +select json.data, json.data.b, json.data.c from test_wide_map_with_buckets_tuple; +select 'select json.data.b, json.data, json.data.c'; +select json.data.b, json.data, json.data.c from test_wide_map_with_buckets_tuple; +select 'select json.data.b, json.data.c, json.data'; +select json.data.b, json.data.c, json.data from test_wide_map_with_buckets_tuple; +select 'select json.data, json.data.^a'; +select json.data, json.data.^a from test_wide_map_with_buckets_tuple; +select 'select json.data.^a, json.data'; +select json.data.^a, json.data from test_wide_map_with_buckets_tuple; +select 'select json.data, json.data.^a, json.data.b'; +select json.data, json.data.^a, json.data.b from test_wide_map_with_buckets_tuple; +select 'select json.data.b, json.data.^a, json.data'; +select json.data.b, json.data.^a, json.data from test_wide_map_with_buckets_tuple; + +select 'select json.data limit 3'; +select json.data from test_wide_map_with_buckets_tuple limit 3; +select 'select json.data, json.data.b limit 3'; +select json.data, json.data.b from test_wide_map_with_buckets_tuple limit 3; +select 'select json.data.b, json.data limit 3'; +select json.data.b, json.data from test_wide_map_with_buckets_tuple limit 3; +select 'select json.data, json.data.b, json.data.c limit 3'; +select json.data, json.data.b, json.data.c from test_wide_map_with_buckets_tuple limit 3; +select 'select json.data.b, json.data, json.data.c limit 3'; +select json.data.b, json.data, json.data.c from test_wide_map_with_buckets_tuple limit 3; +select 'select json.data.b, json.data.c, json.data limit 3'; +select json.data.b, json.data.c, json.data from test_wide_map_with_buckets_tuple limit 3; +select 'select json.data, json.data.^a limit 3'; +select json.data, json.data.^a from test_wide_map_with_buckets_tuple limit 3; +select 'select json.data.^a, json.data limit 3'; +select json.data.^a, json.data from test_wide_map_with_buckets_tuple limit 3; +select 'select json.data, json.data.^a, json.data.b limit 3'; +select json.data, json.data.^a, json.data.b from test_wide_map_with_buckets_tuple limit 3; +select 'select json.data.b, json.data.^a, json.data limit 3'; +select json.data.b, json.data.^a, json.data from test_wide_map_with_buckets_tuple limit 3; + +select 'select json.data settings settings max_block_size=3'; +select json.data from test_wide_map_with_buckets_tuple settings max_block_size=3; +select 'select json.data, json.data.b settings settings max_block_size=3'; +select json.data, json.data.b from test_wide_map_with_buckets_tuple settings max_block_size=3; +select 'select json.data.b, json.data settings settings max_block_size=3'; +select json.data.b, json.data from test_wide_map_with_buckets_tuple settings max_block_size=3; +select 'select json.data, json.data.b, json.data.c settings settings max_block_size=3'; +select json.data, json.data.b, json.data.c from test_wide_map_with_buckets_tuple settings max_block_size=3; +select 'select json.data.b, json.data, json.data.c settings settings max_block_size=3'; +select json.data.b, json.data, json.data.c from test_wide_map_with_buckets_tuple settings max_block_size=3; +select 'select json.data.b, json.data.c, json.data settings settings max_block_size=3'; +select json.data.b, json.data.c, json.data from test_wide_map_with_buckets_tuple settings max_block_size=3; +select 'select json.data, json.data.^a settings settings max_block_size=3'; +select json.data, json.data.^a from test_wide_map_with_buckets_tuple settings max_block_size=3; +select 'select json.data.^a, json.data settings settings max_block_size=3'; +select json.data.^a, json.data from test_wide_map_with_buckets_tuple settings max_block_size=3; +select 'select json.data, json.data.^a, json.data.b settings settings max_block_size=3'; +select json.data, json.data.^a, json.data.b from test_wide_map_with_buckets_tuple settings max_block_size=3; +select 'select json.data.b, json.data.^a, json.data settings settings max_block_size=3'; +select json.data.b, json.data.^a, json.data from test_wide_map_with_buckets_tuple settings max_block_size=3; + +drop table test_wide_map_with_buckets_tuple; +drop table source; diff --git a/parser/testdata/03554_json_shared_data_advanced_serialization_compact_part_big/ast.json b/parser/testdata/03554_json_shared_data_advanced_serialization_compact_part_big/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03554_json_shared_data_advanced_serialization_compact_part_big/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03554_json_shared_data_advanced_serialization_compact_part_big/metadata.json b/parser/testdata/03554_json_shared_data_advanced_serialization_compact_part_big/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03554_json_shared_data_advanced_serialization_compact_part_big/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03554_json_shared_data_advanced_serialization_compact_part_big/query.sql b/parser/testdata/03554_json_shared_data_advanced_serialization_compact_part_big/query.sql new file mode 100644 index 000000000..4285ae15d --- /dev/null +++ b/parser/testdata/03554_json_shared_data_advanced_serialization_compact_part_big/query.sql @@ -0,0 +1,114 @@ +-- Tags: long +-- Random settings limits: index_granularity=(100, None); index_granularity_bytes=(100000, None) + +drop table if exists test_compact_without_substreams_advanced; +create table test_compact_without_substreams_advanced (json JSON(max_dynamic_paths=8)) engine=MergeTree order by tuple() settings min_bytes_for_wide_part='200G', min_rows_for_wide_part=1, write_marks_for_substreams_in_compact_parts=0, object_serialization_version='v3', object_shared_data_serialization_version='advanced', object_shared_data_serialization_version_for_zero_level_parts='advanced', object_shared_data_buckets_for_compact_part=2; +insert into test_compact_without_substreams_advanced select multiIf( +number < 15000, +'{"?1" : 1, "?2" : 1, "?3" : 1, "?4" : 1, "?5" : 1, "?6" : 1, "?7" : 1, "?8" : 1}', +number < 20000, +'{"a" : {"a1" : 1, "a2" : 2, "arr" : [{"arr1" : 3, "arr2" : 4, "arr3" : 5, "arr4" : 6}]}, "b" : 7, "c" : 8, "arr" : [{"arr1" : 9, "arr2" : 10, "arr3" : 11, "arr4" : 12}]}', +number < 25000, +'{}', +number < 30000, +'{"a" : {"a1" : 3, "a2" : 4, "arr" : [{"arr1" : 5, "arr2" : 6, "arr3" : 7, "arr4" : 8}]}}', +number < 35000, +'{"b" : 9, "c" : 10}', +number < 40000, +'{"arr" : [{"arr1" : 11, "arr2" : 12, "arr3" : 13, "arr4" : 14}]}', +'{"a" : {"a1" : 5, "a2" : 6}}' +) from numbers(45000); + +select 'select json'; +select json from test_compact_without_substreams_advanced format Null; + +drop table test_compact_without_substreams_advanced; + +drop table if exists test_compact_advanced; +create table test_compact_advanced (json JSON(max_dynamic_paths=8)) engine=MergeTree order by tuple() settings min_bytes_for_wide_part='200G', min_rows_for_wide_part=1, write_marks_for_substreams_in_compact_parts=1, object_serialization_version='v3', object_shared_data_serialization_version='advanced', object_shared_data_serialization_version_for_zero_level_parts='advanced', object_shared_data_buckets_for_compact_part=2; +insert into test_compact_advanced select multiIf( +number < 15000, +'{"?1" : 1, "?2" : 1, "?3" : 1, "?4" : 1, "?5" : 1, "?6" : 1, "?7" : 1, "?8" : 1}', +number < 20000, +'{"a" : {"a1" : 1, "a2" : 2, "arr" : [{"arr1" : 3, "arr2" : 4, "arr3" : 5, "arr4" : 6}]}, "b" : 7, "c" : 8, "arr" : [{"arr1" : 9, "arr2" : 10, "arr3" : 11, "arr4" : 12}]}', +number < 25000, +'{}', +number < 30000, +'{"a" : {"a1" : 3, "a2" : 4, "arr" : [{"arr1" : 5, "arr2" : 6, "arr3" : 7, "arr4" : 8}]}}', +number < 35000, +'{"b" : 9, "c" : 10}', +number < 40000, +'{"arr" : [{"arr1" : 11, "arr2" : 12, "arr3" : 13, "arr4" : 14}]}', +'{"a" : {"a1" : 5, "a2" : 6}}' +) from numbers(45000); + +select 'select json'; +select json from test_compact_advanced format Null; +select 'select json.b'; +select json.b from test_compact_advanced format Null; +select 'select json.b, json.c'; +select json.b, json.c from test_compact_advanced format Null; +select 'select json.arr[].arr1'; +select json.arr[].arr1 from test_compact_advanced format Null; +select 'select json.arr[].arr1, json.arr[].arr2'; +select json.arr[].arr1, json.arr[].arr2 from test_compact_advanced format Null; +select 'select json.arr[].arr2, json.arr[].arr1'; +select json.arr[].arr2, json.arr[].arr1 from test_compact_advanced format Null; +select 'select json.arr[].arr1, json.arr[].arr4'; +select json.arr[].arr1, json.arr[].arr4 from test_compact_advanced format Null; +select 'select json.arr[].arr4, json.arr[].arr1'; +select json.arr[].arr4, json.arr[].arr1 from test_compact_advanced format Null; +select 'select json.arr[].arr1, json.arr[].arr99'; +select json.arr[].arr1, json.arr[].arr99 from test_compact_advanced format Null; +select 'select json.arr[].arr99, json.arr[].arr1'; +select json.arr[].arr4, json.arr[].arr1 from test_compact_advanced format Null; +select 'select json.arr, json.arr[].arr1'; +select json.arr, json.arr[].arr1 from test_compact_advanced format Null; +select 'select json.arr[].arr1, json.arr'; +select json.arr[].arr1, json.arr from test_compact_advanced format Null; +select 'select json, json.b, json.c'; +select json, json.b, json.c from test_compact_advanced format Null; +select 'select json.b, json.c, json'; +select json.b, json.c, json from test_compact_advanced format Null; +select 'select json, json.b, json.c, json.arr[].arr1'; +select json, json.b, json.c, json.arr[].arr1 from test_compact_advanced format Null; +select 'select json.b, json.c, json.arr[].arr1, json'; +select json.b, json.c, json.arr[].arr1, json from test_compact_advanced format Null; +select 'select json, json.b, json.c, json.arr, json.arr[].arr1'; +select json, json.b, json.c, json.arr, json.arr[].arr1 from test_compact_advanced format Null; +select 'select json.b, json.c, json.arr[].arr1, json.arr, json'; +select json.b, json.c, json.arr[].arr1, json.arr, json from test_compact_advanced format Null; +select 'select json.^a'; +select json.^a from test_compact_advanced format Null; +select 'select json, json.^a'; +select json, json.^a from test_compact_advanced format Null; +select 'select json.^a, json.a.a1'; +select json.^a, json.a.a1 from test_compact_advanced format Null; +select 'select json.a.a1, json.^a'; +select json.a.a1, json.^a from test_compact_advanced format Null; +select 'select json.^a, json.a.a1, json.a.arr[].arr1'; +select json.^a, json.a.a1, json.a.arr[].arr1 from test_compact_advanced format Null; +select 'select json.a.a1, json.a.arr[].arr1, json.^a'; +select json.a.a1, json.a.arr[].arr1, json.^a from test_compact_advanced format Null; +select 'select json, json.a.a1, json.a.arr[].arr1, json.^a'; +select json, json.a.a1, json.a.arr[].arr1, json.^a from test_compact_advanced format Null; +select 'select json.a.a1, json.a.arr[].arr1, json.^a, json'; +select json.a.a1, json.a.arr[].arr1, json.^a, json from test_compact_advanced format Null; +select 'select json.^a, json.a.a1, json.a.arr[].arr1, json.b, json.arr'; +select json.^a, json.a.a1, json.a.arr[].arr1, json.b, json.arr from test_compact_advanced format Null; +select 'select json.a.a1, json.a.arr[].arr1, json.b, json.arr, json.^a'; +select json.a.a1, json.a.arr[].arr1, json.b, json.arr, json.^a from test_compact_advanced format Null; +select 'select json, json.^a, json.a.a1, json.a.arr[].arr1, json.b, json.arr'; +select json, json.^a, json.a.a1, json.a.arr[].arr1, json.b, json.arr from test_compact_advanced format Null; +select 'select json.a.a1, json.a.arr[].arr1, json.b, json.arr, json.^a, json'; +select json.a.a1, json.a.arr[].arr1, json.b, json.arr, json.^a, json from test_compact_advanced format Null; +select 'select json.arr[].arr1, json.^a, json.a.a1, json.a.arr[].arr1, json.b, json.arr'; +select json.arr[].arr1, json.^a, json.a.a1, json.a.arr[].arr1, json.b, json.arr from test_compact_advanced format Null; +select 'select json.a.a1, json.a.arr[].arr1, json.b, json.arr, json.^a, json.arr[].arr1'; +select json.a.a1, json.a.arr[].arr1, json.b, json.arr, json.^a, json.arr[].arr1 from test_compact_advanced format Null; +select 'select json, json.arr[].arr1, json.^a, json.a.a1, json.a.arr[].arr1, json.b, json.arr'; +select json, json.arr[].arr1, json.^a, json.a.a1, json.a.arr[].arr1, json.b, json.arr from test_compact_advanced format Null; +select 'select json.a.a1, json.a.arr[].arr1, json.b, json.arr, json.^a, json.arr[].arr1, json'; +select json.a.a1, json.a.arr[].arr1, json.b, json.arr, json.^a, json.arr[].arr1, json from test_compact_advanced format Null; + +drop table test_compact_advanced; diff --git a/parser/testdata/03554_json_shared_data_advanced_serialization_wide_part_big/ast.json b/parser/testdata/03554_json_shared_data_advanced_serialization_wide_part_big/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03554_json_shared_data_advanced_serialization_wide_part_big/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03554_json_shared_data_advanced_serialization_wide_part_big/metadata.json b/parser/testdata/03554_json_shared_data_advanced_serialization_wide_part_big/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03554_json_shared_data_advanced_serialization_wide_part_big/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03554_json_shared_data_advanced_serialization_wide_part_big/query.sql b/parser/testdata/03554_json_shared_data_advanced_serialization_wide_part_big/query.sql new file mode 100644 index 000000000..948c6c58e --- /dev/null +++ b/parser/testdata/03554_json_shared_data_advanced_serialization_wide_part_big/query.sql @@ -0,0 +1,91 @@ +-- Tags: long +-- Random settings limits: index_granularity=(100, None); index_granularity_bytes=(100000, None) + +drop table if exists test_wide_advanced; +create table test_wide_advanced (json JSON(max_dynamic_paths=8)) engine=MergeTree order by tuple() settings min_bytes_for_wide_part=1, min_rows_for_wide_part=1, write_marks_for_substreams_in_compact_parts=1, object_serialization_version='v3', object_shared_data_serialization_version='advanced', object_shared_data_serialization_version_for_zero_level_parts='advanced', object_shared_data_buckets_for_wide_part=2; +insert into test_wide_advanced select multiIf( +number < 15000, +'{"?1" : 1, "?2" : 1, "?3" : 1, "?4" : 1, "?5" : 1, "?6" : 1, "?7" : 1, "?8" : 1}', +number < 20000, +'{"a" : {"a1" : 1, "a2" : 2, "arr" : [{"arr1" : 3, "arr2" : 4, "arr3" : 5, "arr4" : 6}]}, "b" : 7, "c" : 8, "arr" : [{"arr1" : 9, "arr2" : 10, "arr3" : 11, "arr4" : 12}]}', +number < 25000, +'{}', +number < 30000, +'{"a" : {"a1" : 3, "a2" : 4, "arr" : [{"arr1" : 5, "arr2" : 6, "arr3" : 7, "arr4" : 8}]}}', +number < 35000, +'{"b" : 9, "c" : 10}', +number < 40000, +'{"arr" : [{"arr1" : 11, "arr2" : 12, "arr3" : 13, "arr4" : 14}]}', +'{"a" : {"a1" : 5, "a2" : 6}}' +) from numbers(45000); + +select 'select json'; +select json from test_wide_advanced format Null; +select 'select json.b'; +select json.b from test_wide_advanced format Null; +select 'select json.b, json.c'; +select json.b, json.c from test_wide_advanced format Null; +select 'select json.arr[].arr1'; +select json.arr[].arr1 from test_wide_advanced format Null; +select 'select json.arr[].arr1, json.arr[].arr2'; +select json.arr[].arr1, json.arr[].arr2 from test_wide_advanced format Null; +select 'select json.arr[].arr2, json.arr[].arr1'; +select json.arr[].arr2, json.arr[].arr1 from test_wide_advanced format Null; +select 'select json.arr[].arr1, json.arr[].arr4'; +select json.arr[].arr1, json.arr[].arr4 from test_wide_advanced format Null; +select 'select json.arr[].arr4, json.arr[].arr1'; +select json.arr[].arr4, json.arr[].arr1 from test_wide_advanced format Null; +select 'select json.arr[].arr1, json.arr[].arr99'; +select json.arr[].arr1, json.arr[].arr99 from test_wide_advanced format Null; +select 'select json.arr[].arr99, json.arr[].arr1'; +select json.arr[].arr99, json.arr[].arr1 from test_wide_advanced format Null; +select 'select json.arr, json.arr[].arr1'; +select json.arr, json.arr[].arr1 from test_wide_advanced format Null; +select 'select json.arr[].arr1, json.arr'; +select json.arr[].arr1, json.arr from test_wide_advanced format Null; +select 'select json, json.b, json.c'; +select json, json.b, json.c from test_wide_advanced format Null; +select 'select json.b, json.c, json'; +select json.b, json.c, json from test_wide_advanced format Null; +select 'select json, json.b, json.c, json.arr[].arr1'; +select json, json.b, json.c, json.arr[].arr1 from test_wide_advanced format Null; +select 'select json.b, json.c, json.arr[].arr1, json'; +select json.b, json.c, json.arr[].arr1, json from test_wide_advanced format Null; +select 'select json, json.b, json.c, json.arr, json.arr[].arr1'; +select json, json.b, json.c, json.arr, json.arr[].arr1 from test_wide_advanced format Null; +select 'select json.b, json.c, json.arr[].arr1, json.arr, json'; +select json.b, json.c, json.arr[].arr1, json.arr, json from test_wide_advanced format Null; +select 'select json.^a'; +select json.^a from test_wide_advanced format Null; +select 'select json, json.^a'; +select json, json.^a from test_wide_advanced format Null; +select 'select json.^a, json.a.a1'; +select json.^a, json.a.a1 from test_wide_advanced format Null; +select 'select json.a.a1, json.^a'; +select json.a.a1, json.^a from test_wide_advanced format Null; +select 'select json.^a, json.a.a1, json.a.arr[].arr1'; +select json.^a, json.a.a1, json.a.arr[].arr1 from test_wide_advanced format Null; +select 'select json.a.a1, json.a.arr[].arr1, json.^a'; +select json.a.a1, json.a.arr[].arr1, json.^a from test_wide_advanced format Null; +select 'select json, json.a.a1, json.a.arr[].arr1, json.^a'; +select json, json.a.a1, json.a.arr[].arr1, json.^a from test_wide_advanced format Null; +select 'select json.a.a1, json.a.arr[].arr1, json.^a, json'; +select json.a.a1, json.a.arr[].arr1, json.^a, json from test_wide_advanced format Null; +select 'select json.^a, json.a.a1, json.a.arr[].arr1, json.b, json.arr'; +select json.^a, json.a.a1, json.a.arr[].arr1, json.b, json.arr from test_wide_advanced format Null; +select 'select json.a.a1, json.a.arr[].arr1, json.b, json.arr, json.^a'; +select json.a.a1, json.a.arr[].arr1, json.b, json.arr, json.^a from test_wide_advanced format Null; +select 'select json, json.^a, json.a.a1, json.a.arr[].arr1, json.b, json.arr'; +select json, json.^a, json.a.a1, json.a.arr[].arr1, json.b, json.arr from test_wide_advanced format Null; +select 'select json.a.a1, json.a.arr[].arr1, json.b, json.arr, json.^a, json'; +select json.a.a1, json.a.arr[].arr1, json.b, json.arr, json.^a, json from test_wide_advanced format Null; +select 'select json.arr[].arr1, json.^a, json.a.a1, json.a.arr[].arr1, json.b, json.arr'; +select json.arr[].arr1, json.^a, json.a.a1, json.a.arr[].arr1, json.b, json.arr from test_wide_advanced format Null; +select 'select json.a.a1, json.a.arr[].arr1, json.b, json.arr, json.^a, json.arr[].arr1'; +select json.a.a1, json.a.arr[].arr1, json.b, json.arr, json.^a, json.arr[].arr1 from test_wide_advanced format Null; +select 'select json, json.arr[].arr1, json.^a, json.a.a1, json.a.arr[].arr1, json.b, json.arr'; +select json, json.arr[].arr1, json.^a, json.a.a1, json.a.arr[].arr1, json.b, json.arr from test_wide_advanced format Null; +select 'select json.a.a1, json.a.arr[].arr1, json.b, json.arr, json.^a, json.arr[].arr1, json'; +select json.a.a1, json.a.arr[].arr1, json.b, json.arr, json.^a, json.arr[].arr1, json from test_wide_advanced format Null; + +drop table test_wide_advanced; diff --git a/parser/testdata/03554_json_shared_data_map_serialization_compact_part_big/ast.json b/parser/testdata/03554_json_shared_data_map_serialization_compact_part_big/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03554_json_shared_data_map_serialization_compact_part_big/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03554_json_shared_data_map_serialization_compact_part_big/metadata.json b/parser/testdata/03554_json_shared_data_map_serialization_compact_part_big/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03554_json_shared_data_map_serialization_compact_part_big/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03554_json_shared_data_map_serialization_compact_part_big/query.sql b/parser/testdata/03554_json_shared_data_map_serialization_compact_part_big/query.sql new file mode 100644 index 000000000..31ad95673 --- /dev/null +++ b/parser/testdata/03554_json_shared_data_map_serialization_compact_part_big/query.sql @@ -0,0 +1,81 @@ +-- Tags: long +-- Random settings limits: index_granularity=(100, None); index_granularity_bytes=(100000, None) + +drop table if exists test_compact_map; +create table test_compact_map (json JSON(max_dynamic_paths=8)) engine=MergeTree order by tuple() settings min_bytes_for_wide_part='200G', min_rows_for_wide_part=1, write_marks_for_substreams_in_compact_parts=1, object_serialization_version='v3', object_shared_data_serialization_version='map', object_shared_data_serialization_version_for_zero_level_parts='map'; +insert into test_compact_map select multiIf( +number < 15000, +'{"?1" : 1, "?2" : 1, "?3" : 1, "?4" : 1, "?5" : 1, "?6" : 1, "?7" : 1, "?8" : 1}', +number < 20000, +'{"a" : {"a1" : 1, "a2" : 2, "arr" : [{"arr1" : 3, "arr2" : 4, "arr3" : 5, "arr4" : 6}]}, "b" : 7, "c" : 8, "arr" : [{"arr1" : 9, "arr2" : 10, "arr3" : 11, "arr4" : 12}]}', +number < 25000, +'{}', +number < 30000, +'{"a" : {"a1" : 3, "a2" : 4, "arr" : [{"arr1" : 5, "arr2" : 6, "arr3" : 7, "arr4" : 8}]}}', +number < 35000, +'{"b" : 9, "c" : 10}', +number < 40000, +'{"arr" : [{"arr1" : 11, "arr2" : 12, "arr3" : 13, "arr4" : 14}]}', +'{"a" : {"a1" : 5, "a2" : 6}}' +) from numbers(45000); + +select 'select json'; +select json from test_compact_map format Null; +select 'select json.b'; +select json.b from test_compact_map format Null; +select 'select json.b, json.c'; +select json.b, json.c from test_compact_map format Null; +select 'select json, json.b, json.c'; +select json, json.b, json.c from test_compact_map format Null; +select 'select json.b, json.c, json'; +select json.b, json.c, json from test_compact_map format Null; +select 'select json.^a'; +select json.^a from test_compact_map format Null; +select 'select json, json.^a'; +select json, json.^a from test_compact_map format Null; +select 'select json.^a, json.a.a1'; +select json.^a, json.a.a1 from test_compact_map format Null; +select 'select json.a.a1, json.^a'; +select json.a.a1, json.^a from test_compact_map format Null; +drop table test_compact_map format Null; + +drop table if exists test_compact_map_tuple; +create table test_compact_map_tuple (json Tuple(data JSON(max_dynamic_paths=8))) engine=MergeTree order by tuple() settings min_bytes_for_wide_part='200G', min_rows_for_wide_part=1, write_marks_for_substreams_in_compact_parts=1, object_serialization_version='v3', object_shared_data_serialization_version='map', object_shared_data_serialization_version_for_zero_level_parts='map'; +insert into test_compact_map_tuple select tuple(multiIf( +number < 15000, +'{"?1" : 1, "?2" : 1, "?3" : 1, "?4" : 1, "?5" : 1, "?6" : 1, "?7" : 1, "?8" : 1}', +number < 20000, +'{"a" : {"a1" : 1, "a2" : 2, "arr" : [{"arr1" : 3, "arr2" : 4, "arr3" : 5, "arr4" : 6}]}, "b" : 7, "c" : 8, "arr" : [{"arr1" : 9, "arr2" : 10, "arr3" : 11, "arr4" : 12}]}', +number < 25000, +'{}', +number < 30000, +'{"a" : {"a1" : 3, "a2" : 4, "arr" : [{"arr1" : 5, "arr2" : 6, "arr3" : 7, "arr4" : 8}]}}', +number < 35000, +'{"b" : 9, "c" : 10}', +number < 40000, +'{"arr" : [{"arr1" : 11, "arr2" : 12, "arr3" : 13, "arr4" : 14}]}', +'{"a" : {"a1" : 5, "a2" : 6}}' +)) from numbers(45000); + +select 'select json.data'; +select json.data from test_compact_map_tuple format Null; +select 'select json.data, json.data.b'; +select json.data, json.data.b from test_compact_map_tuple format Null; +select 'select json.data.b, json.data'; +select json.data.b, json.data from test_compact_map_tuple format Null; +select 'select json.data, json.data.b, json.data.c'; +select json.data, json.data.b, json.data.c from test_compact_map_tuple format Null; +select 'select json.data.b, json.data, json.data.c'; +select json.data.b, json.data, json.data.c from test_compact_map_tuple format Null; +select 'select json.data.b, json.data.c, json.data'; +select json.data.b, json.data.c, json.data from test_compact_map_tuple format Null; +select 'select json.data, json.data.^a'; +select json.data, json.data.^a from test_compact_map_tuple format Null; +select 'select json.data.^a, json.data'; +select json.data.^a, json.data from test_compact_map_tuple format Null; +select 'select json.data, json.data.^a, json.data.b'; +select json.data, json.data.^a, json.data.b from test_compact_map_tuple format Null; +select 'select json.data.b, json.data.^a, json.data'; +select json.data.b, json.data.^a, json.data from test_compact_map_tuple format Null; + +drop table test_compact_map_tuple; diff --git a/parser/testdata/03554_json_shared_data_map_serialization_wide_part_big/ast.json b/parser/testdata/03554_json_shared_data_map_serialization_wide_part_big/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03554_json_shared_data_map_serialization_wide_part_big/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03554_json_shared_data_map_serialization_wide_part_big/metadata.json b/parser/testdata/03554_json_shared_data_map_serialization_wide_part_big/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03554_json_shared_data_map_serialization_wide_part_big/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03554_json_shared_data_map_serialization_wide_part_big/query.sql b/parser/testdata/03554_json_shared_data_map_serialization_wide_part_big/query.sql new file mode 100644 index 000000000..3e76e85e1 --- /dev/null +++ b/parser/testdata/03554_json_shared_data_map_serialization_wide_part_big/query.sql @@ -0,0 +1,82 @@ +-- Tags: long +-- Random settings limits: index_granularity=(100, None); index_granularity_bytes=(100000, None) + +drop table if exists test_wide_map; +create table test_wide_map (json JSON(max_dynamic_paths=8)) engine=MergeTree order by tuple() settings min_bytes_for_wide_part=1, min_rows_for_wide_part=1, write_marks_for_substreams_in_compact_parts=1, object_serialization_version='v3', object_shared_data_serialization_version='map', object_shared_data_serialization_version_for_zero_level_parts='map'; +insert into test_wide_map select multiIf( +number < 15000, +'{"?1" : 1, "?2" : 1, "?3" : 1, "?4" : 1, "?5" : 1, "?6" : 1, "?7" : 1, "?8" : 1}', +number < 20000, +'{"a" : {"a1" : 1, "a2" : 2, "arr" : [{"arr1" : 3, "arr2" : 4, "arr3" : 5, "arr4" : 6}]}, "b" : 7, "c" : 8, "arr" : [{"arr1" : 9, "arr2" : 10, "arr3" : 11, "arr4" : 12}]}', +number < 25000, +'{}', +number < 30000, +'{"a" : {"a1" : 3, "a2" : 4, "arr" : [{"arr1" : 5, "arr2" : 6, "arr3" : 7, "arr4" : 8}]}}', +number < 35000, +'{"b" : 9, "c" : 10}', +number < 40000, +'{"arr" : [{"arr1" : 11, "arr2" : 12, "arr3" : 13, "arr4" : 14}]}', +'{"a" : {"a1" : 5, "a2" : 6}}' +) from numbers(45000); + +select 'select json'; +select json from test_wide_map format Null; +select 'select json.b'; +select json.b from test_wide_map format Null; +select 'select json.b, json.c'; +select json.b, json.c from test_wide_map format Null; +select 'select json, json.b, json.c'; +select json, json.b, json.c from test_wide_map format Null; +select 'select json.b, json.c, json'; +select json.b, json.c, json from test_wide_map format Null; +select 'select json.^a'; +select json.^a from test_wide_map format Null; +select 'select json, json.^a'; +select json, json.^a from test_wide_map format Null; +select 'select json.^a, json.a.a1'; +select json.^a, json.a.a1 from test_wide_map format Null; +select 'select json.a.a1, json.^a'; +select json.a.a1, json.^a from test_wide_map format Null; + +drop table test_wide_map; + +drop table if exists test_wide_map_tuple; +create table test_wide_map_tuple (json Tuple(data JSON(max_dynamic_paths=8))) engine=MergeTree order by tuple() settings min_bytes_for_wide_part=1, min_rows_for_wide_part=1, write_marks_for_substreams_in_compact_parts=1, object_serialization_version='v3', object_shared_data_serialization_version='map', object_shared_data_serialization_version_for_zero_level_parts='map'; +insert into test_wide_map_tuple select tuple(multiIf( +number < 15000, +'{"?1" : 1, "?2" : 1, "?3" : 1, "?4" : 1, "?5" : 1, "?6" : 1, "?7" : 1, "?8" : 1}', +number < 20000, +'{"a" : {"a1" : 1, "a2" : 2, "arr" : [{"arr1" : 3, "arr2" : 4, "arr3" : 5, "arr4" : 6}]}, "b" : 7, "c" : 8, "arr" : [{"arr1" : 9, "arr2" : 10, "arr3" : 11, "arr4" : 12}]}', +number < 25000, +'{}', +number < 30000, +'{"a" : {"a1" : 3, "a2" : 4, "arr" : [{"arr1" : 5, "arr2" : 6, "arr3" : 7, "arr4" : 8}]}}', +number < 35000, +'{"b" : 9, "c" : 10}', +number < 40000, +'{"arr" : [{"arr1" : 11, "arr2" : 12, "arr3" : 13, "arr4" : 14}]}', +'{"a" : {"a1" : 5, "a2" : 6}}' +)) from numbers(45000); + +select 'select json.data'; +select json.data from test_wide_map_tuple format Null; +select 'select json.data, json.data.b'; +select json.data, json.data.b from test_wide_map_tuple format Null; +select 'select json.data.b, json.data'; +select json.data.b, json.data from test_wide_map_tuple format Null; +select 'select json.data, json.data.b, json.data.c'; +select json.data, json.data.b, json.data.c from test_wide_map_tuple format Null; +select 'select json.data.b, json.data, json.data.c'; +select json.data.b, json.data, json.data.c from test_wide_map_tuple format Null; +select 'select json.data.b, json.data.c, json.data'; +select json.data.b, json.data.c, json.data from test_wide_map_tuple format Null; +select 'select json.data, json.data.^a'; +select json.data, json.data.^a from test_wide_map_tuple format Null; +select 'select json.data.^a, json.data'; +select json.data.^a, json.data from test_wide_map_tuple format Null; +select 'select json.data, json.data.^a, json.data.b'; +select json.data, json.data.^a, json.data.b from test_wide_map_tuple format Null; +select 'select json.data.b, json.data.^a, json.data'; +select json.data.b, json.data.^a, json.data from test_wide_map_tuple format Null; + +drop table test_wide_map_tuple format Null; diff --git a/parser/testdata/03554_json_shared_data_map_with_buckets_serialization_compact_part_big/ast.json b/parser/testdata/03554_json_shared_data_map_with_buckets_serialization_compact_part_big/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03554_json_shared_data_map_with_buckets_serialization_compact_part_big/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03554_json_shared_data_map_with_buckets_serialization_compact_part_big/metadata.json b/parser/testdata/03554_json_shared_data_map_with_buckets_serialization_compact_part_big/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03554_json_shared_data_map_with_buckets_serialization_compact_part_big/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03554_json_shared_data_map_with_buckets_serialization_compact_part_big/query.sql b/parser/testdata/03554_json_shared_data_map_with_buckets_serialization_compact_part_big/query.sql new file mode 100644 index 000000000..90da10fd1 --- /dev/null +++ b/parser/testdata/03554_json_shared_data_map_with_buckets_serialization_compact_part_big/query.sql @@ -0,0 +1,53 @@ +-- Tags: long +-- Random settings limits: index_granularity=(100, None); index_granularity_bytes=(100000, None) + +drop table if exists test_compact_map_with_buckets; +create table test_compact_map_with_buckets (json JSON(max_dynamic_paths=8)) engine=MergeTree order by tuple() settings min_bytes_for_wide_part='200G', min_rows_for_wide_part=1, write_marks_for_substreams_in_compact_parts=1, object_serialization_version='v3', object_shared_data_serialization_version='map_with_buckets', object_shared_data_serialization_version_for_zero_level_parts='map_with_buckets', object_shared_data_buckets_for_compact_part=2; +insert into test_compact_map_with_buckets select multiIf( +number < 15000, +'{"?1" : 1, "?2" : 1, "?3" : 1, "?4" : 1, "?5" : 1, "?6" : 1, "?7" : 1, "?8" : 1}', +number < 20000, +'{"a" : {"a1" : 1, "a2" : 2, "arr" : [{"arr1" : 3, "arr2" : 4, "arr3" : 5, "arr4" : 6}]}, "b" : 7, "c" : 8, "arr" : [{"arr1" : 9, "arr2" : 10, "arr3" : 11, "arr4" : 12}]}', +number < 25000, +'{}', +number < 30000, +'{"a" : {"a1" : 3, "a2" : 4, "arr" : [{"arr1" : 5, "arr2" : 6, "arr3" : 7, "arr4" : 8}]}}', +number < 35000, +'{"b" : 9, "c" : 10}', +number < 40000, +'{"arr" : [{"arr1" : 11, "arr2" : 12, "arr3" : 13, "arr4" : 14}]}', +'{"a" : {"a1" : 5, "a2" : 6}}' +) from numbers(45000); + +select 'select json'; +select json from test_compact_map_with_buckets format Null; +select 'select json.b'; +select json.b from test_compact_map_with_buckets format Null; +select 'select json.arr'; +select json.arr from test_compact_map_with_buckets format Null; +select 'select json.b, json.c'; +select json.b, json.c from test_compact_map_with_buckets format Null; +select 'select json.b, json.c, json.arr'; +select json.b, json.c, json.arr from test_compact_map_with_buckets format Null; +select 'select json, json.b, json.c'; +select json, json.b, json.c from test_compact_map_with_buckets format Null; +select 'select json, json.b, json.c, json.arr'; +select json, json.b, json.c, json.arr from test_compact_map_with_buckets format Null; +select 'select json.b, json.c, json'; +select json.b, json.c, json from test_compact_map_with_buckets format Null; +select 'select json.b, json.c, json.arr, json'; +select json.b, json.c, json.arr, json from test_compact_map_with_buckets format Null; +select 'select json.^a'; +select json.^a from test_compact_map_with_buckets format Null; +select 'select json, json.^a'; +select json, json.^a from test_compact_map_with_buckets format Null; +select 'select json.^a, json.a.a1'; +select json.^a, json.a.a1 from test_compact_map_with_buckets format Null; +select json.a.a1, json.^a from test_compact_map_with_buckets format Null; +select 'select json.^a, json.a.a1, json.arr'; +select json.^a, json.a.a1, json.arr from test_compact_map_with_buckets format Null; +select 'select json.a.a1, json.arr, json.^a'; +select json.a.a1, json.arr, json.^a from test_compact_map_with_buckets format Null; + +drop table test_compact_map_with_buckets format Null; + diff --git a/parser/testdata/03554_json_shared_data_map_with_buckets_serialization_wide_part_big/ast.json b/parser/testdata/03554_json_shared_data_map_with_buckets_serialization_wide_part_big/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03554_json_shared_data_map_with_buckets_serialization_wide_part_big/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03554_json_shared_data_map_with_buckets_serialization_wide_part_big/metadata.json b/parser/testdata/03554_json_shared_data_map_with_buckets_serialization_wide_part_big/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03554_json_shared_data_map_with_buckets_serialization_wide_part_big/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03554_json_shared_data_map_with_buckets_serialization_wide_part_big/query.sql b/parser/testdata/03554_json_shared_data_map_with_buckets_serialization_wide_part_big/query.sql new file mode 100644 index 000000000..455a882e4 --- /dev/null +++ b/parser/testdata/03554_json_shared_data_map_with_buckets_serialization_wide_part_big/query.sql @@ -0,0 +1,53 @@ +-- Tags: long +-- Random settings limits: index_granularity=(100, None); index_granularity_bytes=(100000, None) + +drop table if exists test_wide_map_with_buckets; +create table test_wide_map_with_buckets (json JSON(max_dynamic_paths=8)) engine=MergeTree order by tuple() settings min_bytes_for_wide_part=1, min_rows_for_wide_part=1, write_marks_for_substreams_in_compact_parts=1, object_serialization_version='v3', object_shared_data_serialization_version='map_with_buckets', object_shared_data_serialization_version_for_zero_level_parts='map_with_buckets', object_shared_data_buckets_for_wide_part=2; +insert into test_wide_map_with_buckets select multiIf( +number < 15000, +'{"?1" : 1, "?2" : 1, "?3" : 1, "?4" : 1, "?5" : 1, "?6" : 1, "?7" : 1, "?8" : 1}', +number < 20000, +'{"a" : {"a1" : 1, "a2" : 2, "arr" : [{"arr1" : 3, "arr2" : 4, "arr3" : 5, "arr4" : 6}]}, "b" : 7, "c" : 8, "arr" : [{"arr1" : 9, "arr2" : 10, "arr3" : 11, "arr4" : 12}]}', +number < 25000, +'{}', +number < 30000, +'{"a" : {"a1" : 3, "a2" : 4, "arr" : [{"arr1" : 5, "arr2" : 6, "arr3" : 7, "arr4" : 8}]}}', +number < 35000, +'{"b" : 9, "c" : 10}', +number < 40000, +'{"arr" : [{"arr1" : 11, "arr2" : 12, "arr3" : 13, "arr4" : 14}]}', +'{"a" : {"a1" : 5, "a2" : 6}}' +) from numbers(45000); + +select 'select json'; +select json from test_wide_map_with_buckets format Null; +select 'select json.b'; +select json.b from test_wide_map_with_buckets format Null; +select 'select json.arr'; +select json.arr from test_wide_map_with_buckets format Null; +select 'select json.b, json.c'; +select json.b, json.c from test_wide_map_with_buckets format Null; +select 'select json.b, json.c, json.arr'; +select json.b, json.c, json.arr from test_wide_map_with_buckets format Null; +select 'select json, json.b, json.c'; +select json, json.b, json.c from test_wide_map_with_buckets format Null; +select 'select json, json.b, json.c, json.arr'; +select json, json.b, json.c, json.arr from test_wide_map_with_buckets format Null; +select 'select json.b, json.c, json'; +select json.b, json.c, json from test_wide_map_with_buckets format Null; +select 'select json.b, json.c, json.arr, json'; +select json.b, json.c, json.arr, json from test_wide_map_with_buckets format Null; +select 'select json.^a'; +select json.^a from test_wide_map_with_buckets format Null; +select 'select json, json.^a'; +select json, json.^a from test_wide_map_with_buckets format Null; +select 'select json.^a, json.a.a1'; +select json.^a, json.a.a1 from test_wide_map_with_buckets format Null; +select 'select json.a.a1, json.^a'; +select json.a.a1, json.^a from test_wide_map_with_buckets format Null; +select 'select json.^a, json.a.a1, json.arr'; +select json.^a, json.a.a1, json.arr from test_wide_map_with_buckets format Null; +select 'select json.a.a1, json.arr, json.^a'; +select json.a.a1, json.arr, json.^a from test_wide_map_with_buckets format Null; + +drop table test_wide_map_with_buckets; diff --git a/parser/testdata/03554_json_shared_data_tuple_advanced_serialization_compact_part_big/ast.json b/parser/testdata/03554_json_shared_data_tuple_advanced_serialization_compact_part_big/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03554_json_shared_data_tuple_advanced_serialization_compact_part_big/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03554_json_shared_data_tuple_advanced_serialization_compact_part_big/metadata.json b/parser/testdata/03554_json_shared_data_tuple_advanced_serialization_compact_part_big/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03554_json_shared_data_tuple_advanced_serialization_compact_part_big/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03554_json_shared_data_tuple_advanced_serialization_compact_part_big/query.sql b/parser/testdata/03554_json_shared_data_tuple_advanced_serialization_compact_part_big/query.sql new file mode 100644 index 000000000..f1b3b8a4c --- /dev/null +++ b/parser/testdata/03554_json_shared_data_tuple_advanced_serialization_compact_part_big/query.sql @@ -0,0 +1,43 @@ +-- Tags: long +-- Random settings limits: index_granularity=(100, None); index_granularity_bytes=(100000, None) + +drop table if exists test_compact_advanced_tuple; +create table test_compact_advanced_tuple (json Tuple(data JSON(max_dynamic_paths=8))) engine=MergeTree order by tuple() settings min_bytes_for_wide_part='200G', min_rows_for_wide_part=1, write_marks_for_substreams_in_compact_parts=1, object_serialization_version='v3', object_shared_data_serialization_version='advanced', object_shared_data_serialization_version_for_zero_level_parts='advanced', object_shared_data_buckets_for_compact_part=2; +insert into test_compact_advanced_tuple select tuple(multiIf( +number < 15000, +'{"?1" : 1, "?2" : 1, "?3" : 1, "?4" : 1, "?5" : 1, "?6" : 1, "?7" : 1, "?8" : 1}', +number < 20000, +'{"a" : {"a1" : 1, "a2" : 2, "arr" : [{"arr1" : 3, "arr2" : 4, "arr3" : 5, "arr4" : 6}]}, "b" : 7, "c" : 8, "arr" : [{"arr1" : 9, "arr2" : 10, "arr3" : 11, "arr4" : 12}]}', +number < 25000, +'{}', +number < 30000, +'{"a" : {"a1" : 3, "a2" : 4, "arr" : [{"arr1" : 5, "arr2" : 6, "arr3" : 7, "arr4" : 8}]}}', +number < 35000, +'{"b" : 9, "c" : 10}', +number < 40000, +'{"arr" : [{"arr1" : 11, "arr2" : 12, "arr3" : 13, "arr4" : 14}]}', +'{"a" : {"a1" : 5, "a2" : 6}}' +)) from numbers(45000); + +select 'select json.data'; +select json.data from test_compact_advanced_tuple format Null; +select 'select json.data, json.data.b'; +select json.data, json.data.b from test_compact_advanced_tuple format Null; +select 'select json.data.b, json.data'; +select json.data.b, json.data from test_compact_advanced_tuple format Null; +select 'select json.data, json.data.b, json.data.c'; +select json.data, json.data.b, json.data.c from test_compact_advanced_tuple format Null; +select 'select json.data.b, json.data, json.data.c'; +select json.data.b, json.data, json.data.c from test_compact_advanced_tuple format Null; +select 'select json.data.b, json.data.c, json.data'; +select json.data.b, json.data.c, json.data from test_compact_advanced_tuple format Null; +select 'select json.data, json.data.^a'; +select json.data, json.data.^a from test_compact_advanced_tuple format Null; +select 'select json.data.^a, json.data'; +select json.data.^a, json.data from test_compact_advanced_tuple format Null; +select 'select json.data, json.data.^a, json.data.b'; +select json.data, json.data.^a, json.data.b from test_compact_advanced_tuple format Null; +select 'select json.data.b, json.data.^a, json.data'; +select json.data.b, json.data.^a, json.data from test_compact_advanced_tuple format Null; + +drop table test_compact_advanced_tuple; diff --git a/parser/testdata/03554_json_shared_data_tuple_advanced_serialization_wide_part_big/ast.json b/parser/testdata/03554_json_shared_data_tuple_advanced_serialization_wide_part_big/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03554_json_shared_data_tuple_advanced_serialization_wide_part_big/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03554_json_shared_data_tuple_advanced_serialization_wide_part_big/metadata.json b/parser/testdata/03554_json_shared_data_tuple_advanced_serialization_wide_part_big/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03554_json_shared_data_tuple_advanced_serialization_wide_part_big/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03554_json_shared_data_tuple_advanced_serialization_wide_part_big/query.sql b/parser/testdata/03554_json_shared_data_tuple_advanced_serialization_wide_part_big/query.sql new file mode 100644 index 000000000..aa4791415 --- /dev/null +++ b/parser/testdata/03554_json_shared_data_tuple_advanced_serialization_wide_part_big/query.sql @@ -0,0 +1,43 @@ +-- Tags: long +-- Random settings limits: index_granularity=(100, None); index_granularity_bytes=(100000, None) + +drop table if exists test_wide_advanced_tuple; +create table test_wide_advanced_tuple (json Tuple(data JSON(max_dynamic_paths=8))) engine=MergeTree order by tuple() settings min_bytes_for_wide_part=1, min_rows_for_wide_part=1, write_marks_for_substreams_in_compact_parts=1, object_serialization_version='v3', object_shared_data_serialization_version='advanced', object_shared_data_serialization_version_for_zero_level_parts='advanced', object_shared_data_buckets_for_wide_part=2; +insert into test_wide_advanced_tuple select tuple(multiIf( +number < 15000, +'{"?1" : 1, "?2" : 1, "?3" : 1, "?4" : 1, "?5" : 1, "?6" : 1, "?7" : 1, "?8" : 1}', +number < 20000, +'{"a" : {"a1" : 1, "a2" : 2, "arr" : [{"arr1" : 3, "arr2" : 4, "arr3" : 5, "arr4" : 6}]}, "b" : 7, "c" : 8, "arr" : [{"arr1" : 9, "arr2" : 10, "arr3" : 11, "arr4" : 12}]}', +number < 25000, +'{}', +number < 30000, +'{"a" : {"a1" : 3, "a2" : 4, "arr" : [{"arr1" : 5, "arr2" : 6, "arr3" : 7, "arr4" : 8}]}}', +number < 35000, +'{"b" : 9, "c" : 10}', +number < 40000, +'{"arr" : [{"arr1" : 11, "arr2" : 12, "arr3" : 13, "arr4" : 14}]}', +'{"a" : {"a1" : 5, "a2" : 6}}' +)) from numbers(45000); + +select 'select json.data'; +select json.data from test_wide_advanced_tuple format Null; +select 'select json.data, json.data.b'; +select json.data, json.data.b from test_wide_advanced_tuple format Null; +select 'select json.data.b, json.data'; +select json.data.b, json.data from test_wide_advanced_tuple format Null; +select 'select json.data, json.data.b, json.data.c'; +select json.data, json.data.b, json.data.c from test_wide_advanced_tuple format Null; +select 'select json.data.b, json.data, json.data.c'; +select json.data.b, json.data, json.data.c from test_wide_advanced_tuple format Null; +select 'select json.data.b, json.data.c, json.data'; +select json.data.b, json.data.c, json.data from test_wide_advanced_tuple format Null; +select 'select json.data, json.data.^a'; +select json.data, json.data.^a from test_wide_advanced_tuple format Null; +select 'select json.data.^a, json.data'; +select json.data.^a, json.data from test_wide_advanced_tuple format Null; +select 'select json.data, json.data.^a, json.data.b'; +select json.data, json.data.^a, json.data.b from test_wide_advanced_tuple format Null; +select 'select json.data.b, json.data.^a, json.data'; +select json.data.b, json.data.^a, json.data from test_wide_advanced_tuple format Null; + +drop table test_wide_advanced_tuple; diff --git a/parser/testdata/03554_json_shared_data_tuple_map_with_buckets_serialization_compact_part_big/ast.json b/parser/testdata/03554_json_shared_data_tuple_map_with_buckets_serialization_compact_part_big/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03554_json_shared_data_tuple_map_with_buckets_serialization_compact_part_big/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03554_json_shared_data_tuple_map_with_buckets_serialization_compact_part_big/metadata.json b/parser/testdata/03554_json_shared_data_tuple_map_with_buckets_serialization_compact_part_big/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03554_json_shared_data_tuple_map_with_buckets_serialization_compact_part_big/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03554_json_shared_data_tuple_map_with_buckets_serialization_compact_part_big/query.sql b/parser/testdata/03554_json_shared_data_tuple_map_with_buckets_serialization_compact_part_big/query.sql new file mode 100644 index 000000000..e1399df85 --- /dev/null +++ b/parser/testdata/03554_json_shared_data_tuple_map_with_buckets_serialization_compact_part_big/query.sql @@ -0,0 +1,43 @@ +-- Tags: long +-- Random settings limits: index_granularity=(100, None); index_granularity_bytes=(100000, None) + +drop table if exists test_compact_map_with_buckets_tuple; +create table test_compact_map_with_buckets_tuple (json Tuple(data JSON(max_dynamic_paths=8))) engine=MergeTree order by tuple() settings min_bytes_for_wide_part='200G', min_rows_for_wide_part=1, write_marks_for_substreams_in_compact_parts=1, object_serialization_version='v3', object_shared_data_serialization_version='map_with_buckets', object_shared_data_serialization_version_for_zero_level_parts='map_with_buckets', object_shared_data_buckets_for_compact_part=2; +insert into test_compact_map_with_buckets_tuple select tuple(multiIf( +number < 15000, +'{"?1" : 1, "?2" : 1, "?3" : 1, "?4" : 1, "?5" : 1, "?6" : 1, "?7" : 1, "?8" : 1}', +number < 20000, +'{"a" : {"a1" : 1, "a2" : 2, "arr" : [{"arr1" : 3, "arr2" : 4, "arr3" : 5, "arr4" : 6}]}, "b" : 7, "c" : 8, "arr" : [{"arr1" : 9, "arr2" : 10, "arr3" : 11, "arr4" : 12}]}', +number < 25000, +'{}', +number < 30000, +'{"a" : {"a1" : 3, "a2" : 4, "arr" : [{"arr1" : 5, "arr2" : 6, "arr3" : 7, "arr4" : 8}]}}', +number < 35000, +'{"b" : 9, "c" : 10}', +number < 40000, +'{"arr" : [{"arr1" : 11, "arr2" : 12, "arr3" : 13, "arr4" : 14}]}', +'{"a" : {"a1" : 5, "a2" : 6}}' +)) from numbers(450000); + +select 'select json.data'; +select json.data from test_compact_map_with_buckets_tuple format Null; +select 'select json.data, json.data.b'; +select json.data, json.data.b from test_compact_map_with_buckets_tuple format Null; +select 'select json.data.b, json.data'; +select json.data.b, json.data from test_compact_map_with_buckets_tuple format Null; +select 'select json.data, json.data.b, json.data.c'; +select json.data, json.data.b, json.data.c from test_compact_map_with_buckets_tuple format Null; +select 'select json.data.b, json.data, json.data.c'; +select json.data.b, json.data, json.data.c from test_compact_map_with_buckets_tuple format Null; +select 'select json.data.b, json.data.c, json.data'; +select json.data.b, json.data.c, json.data from test_compact_map_with_buckets_tuple format Null; +select 'select json.data, json.data.^a'; +select json.data, json.data.^a from test_compact_map_with_buckets_tuple format Null; +select 'select json.data.^a, json.data'; +select json.data.^a, json.data from test_compact_map_with_buckets_tuple format Null; +select 'select json.data, json.data.^a, json.data.b'; +select json.data, json.data.^a, json.data.b from test_compact_map_with_buckets_tuple format Null; +select 'select json.data.b, json.data.^a, json.data'; +select json.data.b, json.data.^a, json.data from test_compact_map_with_buckets_tuple format Null; + +drop table test_compact_map_with_buckets_tuple; diff --git a/parser/testdata/03554_json_shared_data_tuple_map_with_buckets_serialization_wide_part_big/ast.json b/parser/testdata/03554_json_shared_data_tuple_map_with_buckets_serialization_wide_part_big/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03554_json_shared_data_tuple_map_with_buckets_serialization_wide_part_big/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03554_json_shared_data_tuple_map_with_buckets_serialization_wide_part_big/metadata.json b/parser/testdata/03554_json_shared_data_tuple_map_with_buckets_serialization_wide_part_big/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03554_json_shared_data_tuple_map_with_buckets_serialization_wide_part_big/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03554_json_shared_data_tuple_map_with_buckets_serialization_wide_part_big/query.sql b/parser/testdata/03554_json_shared_data_tuple_map_with_buckets_serialization_wide_part_big/query.sql new file mode 100644 index 000000000..ee168fea1 --- /dev/null +++ b/parser/testdata/03554_json_shared_data_tuple_map_with_buckets_serialization_wide_part_big/query.sql @@ -0,0 +1,43 @@ +-- Tags: long +-- Random settings limits: index_granularity=(100, None); index_granularity_bytes=(100000, None) + +drop table if exists test_wide_map_with_buckets_tuple; +create table test_wide_map_with_buckets_tuple (json Tuple(data JSON(max_dynamic_paths=8))) engine=MergeTree order by tuple() settings min_bytes_for_wide_part=1, min_rows_for_wide_part=1, write_marks_for_substreams_in_compact_parts=1, object_serialization_version='v3', object_shared_data_serialization_version='map_with_buckets', object_shared_data_serialization_version_for_zero_level_parts='map_with_buckets', object_shared_data_buckets_for_wide_part=2; +insert into test_wide_map_with_buckets_tuple select tuple(multiIf( +number < 15000, +'{"?1" : 1, "?2" : 1, "?3" : 1, "?4" : 1, "?5" : 1, "?6" : 1, "?7" : 1, "?8" : 1}', +number < 20000, +'{"a" : {"a1" : 1, "a2" : 2, "arr" : [{"arr1" : 3, "arr2" : 4, "arr3" : 5, "arr4" : 6}]}, "b" : 7, "c" : 8, "arr" : [{"arr1" : 9, "arr2" : 10, "arr3" : 11, "arr4" : 12}]}', +number < 25000, +'{}', +number < 30000, +'{"a" : {"a1" : 3, "a2" : 4, "arr" : [{"arr1" : 5, "arr2" : 6, "arr3" : 7, "arr4" : 8}]}}', +number < 35000, +'{"b" : 9, "c" : 10}', +number < 40000, +'{"arr" : [{"arr1" : 11, "arr2" : 12, "arr3" : 13, "arr4" : 14}]}', +'{"a" : {"a1" : 5, "a2" : 6}}' +)) from numbers(45000); + +select 'select json.data'; +select json.data from test_wide_map_with_buckets_tuple format Null; +select 'select json.data, json.data.b'; +select json.data, json.data.b from test_wide_map_with_buckets_tuple format Null; +select 'select json.data.b, json.data'; +select json.data.b, json.data from test_wide_map_with_buckets_tuple format Null; +select 'select json.data, json.data.b, json.data.c'; +select json.data, json.data.b, json.data.c from test_wide_map_with_buckets_tuple format Null; +select 'select json.data.b, json.data, json.data.c'; +select json.data.b, json.data, json.data.c from test_wide_map_with_buckets_tuple format Null; +select 'select json.data.b, json.data.c, json.data'; +select json.data.b, json.data.c, json.data from test_wide_map_with_buckets_tuple format Null; +select 'select json.data, json.data.^a'; +select json.data, json.data.^a from test_wide_map_with_buckets_tuple format Null; +select 'select json.data.^a, json.data'; +select json.data.^a, json.data from test_wide_map_with_buckets_tuple format Null; +select 'select json.data, json.data.^a, json.data.b'; +select json.data, json.data.^a, json.data.b from test_wide_map_with_buckets_tuple format Null; +select 'select json.data.b, json.data.^a, json.data'; +select json.data.b, json.data.^a, json.data from test_wide_map_with_buckets_tuple format Null; + +drop table test_wide_map_with_buckets_tuple; diff --git a/parser/testdata/03555_inconsistent_formatting_ttl/ast.json b/parser/testdata/03555_inconsistent_formatting_ttl/ast.json new file mode 100644 index 000000000..528e80061 --- /dev/null +++ b/parser/testdata/03555_inconsistent_formatting_ttl/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t0 (children 1)" + }, + { + "explain": " Identifier t0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00152995, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03555_inconsistent_formatting_ttl/metadata.json b/parser/testdata/03555_inconsistent_formatting_ttl/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03555_inconsistent_formatting_ttl/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03555_inconsistent_formatting_ttl/query.sql b/parser/testdata/03555_inconsistent_formatting_ttl/query.sql new file mode 100644 index 000000000..45a549321 --- /dev/null +++ b/parser/testdata/03555_inconsistent_formatting_ttl/query.sql @@ -0,0 +1,3 @@ +DROP TABLE IF EXISTS t0; +CREATE TABLE t0 (c0 Date) ENGINE = MergeTree() ORDER BY () TTL (materialize(c0)); +DROP TABLE t0; diff --git a/parser/testdata/03555_json_shared_data_advanced_paths_indexes_1/ast.json b/parser/testdata/03555_json_shared_data_advanced_paths_indexes_1/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03555_json_shared_data_advanced_paths_indexes_1/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03555_json_shared_data_advanced_paths_indexes_1/metadata.json b/parser/testdata/03555_json_shared_data_advanced_paths_indexes_1/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03555_json_shared_data_advanced_paths_indexes_1/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03555_json_shared_data_advanced_paths_indexes_1/query.sql b/parser/testdata/03555_json_shared_data_advanced_paths_indexes_1/query.sql new file mode 100644 index 000000000..81dd5a9cf --- /dev/null +++ b/parser/testdata/03555_json_shared_data_advanced_paths_indexes_1/query.sql @@ -0,0 +1,18 @@ +-- Tags: no-s3-storage, long, no-msan, no-tsan, no-asan, no-ubsan + +set output_format_json_quote_64bit_integers=0; + +drop table if exists test; + +create table test (json JSON(max_dynamic_paths=0)) engine=MergeTree order by tuple() settings index_granularity=1, min_bytes_for_wide_part='200G', min_rows_for_wide_part=1, write_marks_for_substreams_in_compact_parts=0, object_serialization_version='v3', object_shared_data_serialization_version='advanced', object_shared_data_serialization_version_for_zero_level_parts='advanced', object_shared_data_buckets_for_compact_part=1; + +insert into test select toJSONString(arrayMap(x -> tuple('key' || x, x), range(255))::Map(String, UInt32)); +insert into test select toJSONString(arrayMap(x -> tuple('key' || x, x), range(256))::Map(String, UInt32)); +insert into test select toJSONString(arrayMap(x -> tuple('key' || x, x), range(65535))::Map(String, UInt32)); +insert into test select toJSONString(arrayMap(x -> tuple('key' || x, x), range(65536))::Map(String, UInt32)); + +optimize table test final; + +select sipHash64(json::String) from test order by all; + +drop table test; diff --git a/parser/testdata/03555_json_shared_data_advanced_paths_indexes_2/ast.json b/parser/testdata/03555_json_shared_data_advanced_paths_indexes_2/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03555_json_shared_data_advanced_paths_indexes_2/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03555_json_shared_data_advanced_paths_indexes_2/metadata.json b/parser/testdata/03555_json_shared_data_advanced_paths_indexes_2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03555_json_shared_data_advanced_paths_indexes_2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03555_json_shared_data_advanced_paths_indexes_2/query.sql b/parser/testdata/03555_json_shared_data_advanced_paths_indexes_2/query.sql new file mode 100644 index 000000000..25edb6f53 --- /dev/null +++ b/parser/testdata/03555_json_shared_data_advanced_paths_indexes_2/query.sql @@ -0,0 +1,18 @@ +-- Tags: no-s3-storage, long, no-msan, no-tsan, no-asan, no-ubsan + +set output_format_json_quote_64bit_integers=0; + +drop table if exists test; + +create table test (json JSON(max_dynamic_paths=0)) engine=MergeTree order by tuple() settings index_granularity=1, min_bytes_for_wide_part='200G', min_rows_for_wide_part=1, write_marks_for_substreams_in_compact_parts=1, object_serialization_version='v3', object_shared_data_serialization_version='advanced', object_shared_data_serialization_version_for_zero_level_parts='advanced', object_shared_data_buckets_for_compact_part=1; + +insert into test select toJSONString(arrayMap(x -> tuple('key' || x, x), range(255))::Map(String, UInt32)); +insert into test select toJSONString(arrayMap(x -> tuple('key' || x, x), range(256))::Map(String, UInt32)); +insert into test select toJSONString(arrayMap(x -> tuple('key' || x, x), range(65535))::Map(String, UInt32)); +insert into test select toJSONString(arrayMap(x -> tuple('key' || x, x), range(65536))::Map(String, UInt32)); + +optimize table test final; + +select sipHash64(json::String) from test order by all; + +drop table test; diff --git a/parser/testdata/03555_json_shared_data_advanced_paths_indexes_3/ast.json b/parser/testdata/03555_json_shared_data_advanced_paths_indexes_3/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03555_json_shared_data_advanced_paths_indexes_3/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03555_json_shared_data_advanced_paths_indexes_3/metadata.json b/parser/testdata/03555_json_shared_data_advanced_paths_indexes_3/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03555_json_shared_data_advanced_paths_indexes_3/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03555_json_shared_data_advanced_paths_indexes_3/query.sql b/parser/testdata/03555_json_shared_data_advanced_paths_indexes_3/query.sql new file mode 100644 index 000000000..08b925a12 --- /dev/null +++ b/parser/testdata/03555_json_shared_data_advanced_paths_indexes_3/query.sql @@ -0,0 +1,18 @@ +-- Tags: no-s3-storage, long, no-msan, no-tsan, no-asan, no-ubsan + +set output_format_json_quote_64bit_integers=0; + +drop table if exists test; + +create table test (json JSON(max_dynamic_paths=0)) engine=MergeTree order by tuple() settings index_granularity=1, min_bytes_for_wide_part=1, min_rows_for_wide_part=1, object_serialization_version='v3', object_shared_data_serialization_version='advanced', object_shared_data_serialization_version_for_zero_level_parts='advanced', object_shared_data_buckets_for_wide_part=1; + +insert into test select toJSONString(arrayMap(x -> tuple('key' || x, x), range(255))::Map(String, UInt32)); +insert into test select toJSONString(arrayMap(x -> tuple('key' || x, x), range(256))::Map(String, UInt32)); +insert into test select toJSONString(arrayMap(x -> tuple('key' || x, x), range(65535))::Map(String, UInt32)); +insert into test select toJSONString(arrayMap(x -> tuple('key' || x, x), range(65536))::Map(String, UInt32)); + +optimize table test final; + +select sipHash64(json::String) from test order by all; + +drop table test diff --git a/parser/testdata/03558_no_alias_in_single_expressions/ast.json b/parser/testdata/03558_no_alias_in_single_expressions/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03558_no_alias_in_single_expressions/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03558_no_alias_in_single_expressions/metadata.json b/parser/testdata/03558_no_alias_in_single_expressions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03558_no_alias_in_single_expressions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03558_no_alias_in_single_expressions/query.sql b/parser/testdata/03558_no_alias_in_single_expressions/query.sql new file mode 100644 index 000000000..c506a8d01 --- /dev/null +++ b/parser/testdata/03558_no_alias_in_single_expressions/query.sql @@ -0,0 +1,7 @@ +CREATE TABLE t0 (c0 Int) ENGINE = MergeTree() ORDER BY ((c0 AS x)); -- { clientError SYNTAX_ERROR } +CREATE TABLE t0 (c0 Int) ENGINE = MergeTree() ORDER BY (c0 AS x); -- { clientError SYNTAX_ERROR } +CREATE TABLE t0 (c0 Int) ENGINE = MergeTree() ORDER BY (c0 AS x) DESC; -- { clientError SYNTAX_ERROR } +CREATE TABLE t0 (c0 Int) ENGINE = MergeTree() ORDER BY c0 AS x; -- { clientError SYNTAX_ERROR } +CREATE TABLE t0 (c0 Int) ENGINE = MergeTree() ORDER BY c0 AS x DESC; -- { clientError SYNTAX_ERROR } +DELETE FROM t0 WHERE (true AS a0); -- { clientError SYNTAX_ERROR } +DELETE FROM t0 WHERE true AS a0; -- { clientError SYNTAX_ERROR } diff --git a/parser/testdata/03559_explain_ast_in_subquery/ast.json b/parser/testdata/03559_explain_ast_in_subquery/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03559_explain_ast_in_subquery/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03559_explain_ast_in_subquery/metadata.json b/parser/testdata/03559_explain_ast_in_subquery/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03559_explain_ast_in_subquery/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03559_explain_ast_in_subquery/query.sql b/parser/testdata/03559_explain_ast_in_subquery/query.sql new file mode 100644 index 000000000..638bac1ba --- /dev/null +++ b/parser/testdata/03559_explain_ast_in_subquery/query.sql @@ -0,0 +1 @@ +SELECT (EXPLAIN AST SELECT 1 INTO OUTFILE 'a') FROM numbers(0); -- { clientError BAD_ARGUMENTS } diff --git a/parser/testdata/03560_low_cardinality_keys_filter/ast.json b/parser/testdata/03560_low_cardinality_keys_filter/ast.json new file mode 100644 index 000000000..1ba0af0e3 --- /dev/null +++ b/parser/testdata/03560_low_cardinality_keys_filter/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000991412, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/03560_low_cardinality_keys_filter/metadata.json b/parser/testdata/03560_low_cardinality_keys_filter/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03560_low_cardinality_keys_filter/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03560_low_cardinality_keys_filter/query.sql b/parser/testdata/03560_low_cardinality_keys_filter/query.sql new file mode 100644 index 000000000..28cb9de0b --- /dev/null +++ b/parser/testdata/03560_low_cardinality_keys_filter/query.sql @@ -0,0 +1,15 @@ +DROP TABLE IF EXISTS test; + +CREATE TABLE test +( + s LowCardinality(String), + client_name String, +) ENGINE = MergeTree ORDER BY (); + +INSERT INTO test +SELECT number < 8000 ? 'ok' : 'fail' AS s, + number < 8000 ? 'client1' : 'client2' FROM numbers(20000); + +SELECT DISTINCT lowCardinalityKeys(s) FROM test PREWHERE client_name = 'client1' ORDER BY ALL; + +DROP TABLE test; diff --git a/parser/testdata/03560_new_analyzer_default_expression/ast.json b/parser/testdata/03560_new_analyzer_default_expression/ast.json new file mode 100644 index 000000000..e8baa717f --- /dev/null +++ b/parser/testdata/03560_new_analyzer_default_expression/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001269512, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03560_new_analyzer_default_expression/metadata.json b/parser/testdata/03560_new_analyzer_default_expression/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03560_new_analyzer_default_expression/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03560_new_analyzer_default_expression/query.sql b/parser/testdata/03560_new_analyzer_default_expression/query.sql new file mode 100644 index 000000000..281680330 --- /dev/null +++ b/parser/testdata/03560_new_analyzer_default_expression/query.sql @@ -0,0 +1,26 @@ +SET enable_analyzer=1; + +CREATE TABLE default (a Int64 DEFAULT 1) Engine=Memory(); +-- This case is failing with old analyzer, because "_subquery" name prefix has a special treatment. +CREATE TABLE clashing_name (_subquery_test Int64 DEFAULT 7) Engine=Memory(); +CREATE TABLE dependent_defaults (a Int64 DEFAULT 7, b Int64 DEFAULT a + 7, c Int64 DEFAULT a * b) Engine=Memory(); +CREATE TABLE regular_column (a Int64, b Int64 DEFAULT 7, c Int64 DEFAULT a * b) Engine=Memory(); +CREATE TABLE complex_name_backward_compat (a Int64, `b.b` Int64 DEFAULT 7, c Int64 DEFAULT a * `b.b`) Engine=Memory(); +CREATE TABLE compound_id (a Tuple(x Int64, y Int64), b Int64 DEFAULT a.x + 7) Engine=Memory(); + +SET insert_null_as_default=1; + +INSERT INTO default SELECT NULL FROM system.one; +INSERT INTO clashing_name SELECT NULL FROM system.one; +INSERT INTO dependent_defaults SELECT NULL, NULL, NULL FROM system.one; +INSERT INTO regular_column SELECT 2, NULL, NULL FROM system.one; +INSERT INTO complex_name_backward_compat SELECT 3, NULL, NULL FROM system.one; +INSERT INTO compound_id (a) VALUES ((1, 2)); + +-- { echoOn } +SELECT * FROM default ORDER BY ALL; +SELECT * FROM clashing_name ORDER BY ALL; +SELECT * FROM dependent_defaults ORDER BY ALL; +SELECT * FROM regular_column ORDER BY ALL; +SELECT * FROM complex_name_backward_compat ORDER BY ALL; +SELECT * FROM compound_id ORDER BY ALL; diff --git a/parser/testdata/03560_parallel_replicas_external_aggregation/ast.json b/parser/testdata/03560_parallel_replicas_external_aggregation/ast.json new file mode 100644 index 000000000..356af1ef6 --- /dev/null +++ b/parser/testdata/03560_parallel_replicas_external_aggregation/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_proj_external_agg (children 1)" + }, + { + "explain": " Identifier t_proj_external_agg" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001125868, + "rows_read": 2, + "bytes_read": 90 + } +} diff --git a/parser/testdata/03560_parallel_replicas_external_aggregation/metadata.json b/parser/testdata/03560_parallel_replicas_external_aggregation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03560_parallel_replicas_external_aggregation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03560_parallel_replicas_external_aggregation/query.sql b/parser/testdata/03560_parallel_replicas_external_aggregation/query.sql new file mode 100644 index 000000000..50d0ac668 --- /dev/null +++ b/parser/testdata/03560_parallel_replicas_external_aggregation/query.sql @@ -0,0 +1,35 @@ +DROP TABLE IF EXISTS t_proj_external_agg; + +CREATE TABLE t_proj_external_agg +( + k1 UInt32, + k2 UInt32, + k3 UInt32, + value UInt32 +) +ENGINE = MergeTree +ORDER BY tuple(); + +INSERT INTO t_proj_external_agg SELECT 1, number%2, number%4, number FROM numbers(50000); + +SYSTEM STOP MERGES t_proj_external_agg; + +ALTER TABLE t_proj_external_agg ADD PROJECTION aaaa ( + SELECT + k1, + k2, + k3, + sum(value) + GROUP BY k1, k2, k3 +); + +INSERT INTO t_proj_external_agg SELECT 1, number%2, number%4, number FROM numbers(100000) LIMIT 50000, 100000; + +SYSTEM ENABLE FAILPOINT slowdown_parallel_replicas_local_plan_read; + +SELECT '*** enable slowdown_parallel_replicas_local_plan_read ***'; +SELECT k1, k2, k3, sum(value) v FROM t_proj_external_agg GROUP BY k1, k2, k3 ORDER BY k1, k2, k3 SETTINGS optimize_aggregation_in_order = 0, max_bytes_before_external_group_by = 1, max_bytes_ratio_before_external_group_by = 0, group_by_two_level_threshold = 1; + +SYSTEM DISABLE FAILPOINT slowdown_parallel_replicas_local_plan_read; + +DROP TABLE IF EXISTS t_proj_external_agg; diff --git a/parser/testdata/03560_parallel_replicas_memory_bound_merging_projection/ast.json b/parser/testdata/03560_parallel_replicas_memory_bound_merging_projection/ast.json new file mode 100644 index 000000000..071f31c40 --- /dev/null +++ b/parser/testdata/03560_parallel_replicas_memory_bound_merging_projection/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery pr_t (children 1)" + }, + { + "explain": " Identifier pr_t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00121214, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/03560_parallel_replicas_memory_bound_merging_projection/metadata.json b/parser/testdata/03560_parallel_replicas_memory_bound_merging_projection/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03560_parallel_replicas_memory_bound_merging_projection/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03560_parallel_replicas_memory_bound_merging_projection/query.sql b/parser/testdata/03560_parallel_replicas_memory_bound_merging_projection/query.sql new file mode 100644 index 000000000..1fae05014 --- /dev/null +++ b/parser/testdata/03560_parallel_replicas_memory_bound_merging_projection/query.sql @@ -0,0 +1,23 @@ +drop table if exists pr_t; + +create table pr_t(a UInt64, b UInt64) engine=MergeTree order by a; +ALTER TABLE pr_t ADD PROJECTION p_agg (SELECT a, sum(b) GROUP BY a); +insert into pr_t select number % 1000, number % 1000 from numbers_mt(1e6); + +set parallel_replicas_only_with_analyzer = 0; -- necessary for CI run with disabled analyzer + +set max_threads = 4; +set enable_parallel_replicas = 1, parallel_replicas_for_non_replicated_merge_tree = 1, max_parallel_replicas = 3, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost'; +set distributed_aggregation_memory_efficient = 1, enable_memory_bound_merging_of_aggregation_results = 1; +set enable_analyzer = 1, parallel_replicas_local_plan = 1, optimize_use_projections = 1, parallel_replicas_support_projection = 1; +set read_in_order_two_level_merge_threshold = 1000; + +-- { echoOn } -- +set optimize_aggregation_in_order = 0; +SELECT trimLeft(*) FROM (explain select sum(b) from pr_t group by a order by a limit 5 offset 500) WHERE explain LIKE '%ReadFromMergeTree%'; +set optimize_aggregation_in_order = 1; +explain pipeline select sum(b) from pr_t group by a order by a limit 5 offset 500; +select sum(b) from pr_t group by a order by a limit 5 offset 500; +-- { echoOff } -- + +drop table if exists pr_t; diff --git a/parser/testdata/03560_parallel_replicas_projection/ast.json b/parser/testdata/03560_parallel_replicas_projection/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03560_parallel_replicas_projection/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03560_parallel_replicas_projection/metadata.json b/parser/testdata/03560_parallel_replicas_projection/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03560_parallel_replicas_projection/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03560_parallel_replicas_projection/query.sql b/parser/testdata/03560_parallel_replicas_projection/query.sql new file mode 100644 index 000000000..977f4c9a0 --- /dev/null +++ b/parser/testdata/03560_parallel_replicas_projection/query.sql @@ -0,0 +1,74 @@ +-- Tags: long + +DROP TABLE IF EXISTS normal; +CREATE TABLE IF NOT EXISTS normal +( + `key` UInt32, + `value` UInt32, +) +ENGINE = MergeTree +ORDER BY tuple() settings index_granularity=1; + +SYSTEM STOP MERGES normal; + +INSERT INTO normal select number as key, number as value from numbers(10000); +ALTER TABLE normal ADD PROJECTION p_normal (SELECT key, value ORDER BY key); +INSERT INTO normal select number as key, number as value from numbers(10000, 100); + +SET parallel_replicas_only_with_analyzer = 0; +SET optimize_use_projections = 1, optimize_aggregation_in_order = 0; +SET enable_parallel_replicas = 2, parallel_replicas_local_plan = 1, parallel_replicas_support_projection = 1, max_parallel_replicas = 3, parallel_replicas_for_non_replicated_merge_tree=1, cluster_for_parallel_replicas='test_cluster_one_shard_three_replicas_localhost'; + +SELECT '---normal : contains both projections and parts ---'; +SELECT trimLeft(replaceRegexpAll(explain, 'ReadFromRemoteParallelReplicas.*', 'ReadFromRemoteParallelReplicas')) FROM (explain SELECT sum(key) FROM normal WHERE key > 9999 AND key < 10010) WHERE explain LIKE '%ReadFromMergeTree%' OR explain LIKE '%ReadFromRemoteParallelReplicas%' SETTINGS enable_analyzer = 1; +SELECT sum(key) FROM normal WHERE key > 9999 AND key < 10010; + +SELECT '---normal : contains only projections ---'; +TRUNCATE TABLE normal; +INSERT INTO normal select number as key, number as value from numbers(10100); + +SELECT trimLeft(replaceRegexpAll(explain, 'ReadFromRemoteParallelReplicas.*', 'ReadFromRemoteParallelReplicas')) FROM (explain SELECT sum(key) FROM normal WHERE key > 9999 AND key < 10010) WHERE explain LIKE '%ReadFromMergeTree%' OR explain LIKE '%ReadFromRemoteParallelReplicas%' SETTINGS enable_analyzer = 1; +SELECT sum(key) FROM normal WHERE key > 9999 AND key < 10010; + +DROP TABLE normal; + +DROP TABLE IF EXISTS agg; +CREATE TABLE agg +( + `key` UInt32, + `value` UInt32, +) +ENGINE = MergeTree +ORDER BY tuple() settings index_granularity=1; + +SYSTEM STOP MERGES agg; + +INSERT INTO agg SELECT number AS key, number AS value FROM numbers(100); +ALTER TABLE agg ADD PROJECTION p_agg (SELECT key, sum(value) GROUP BY key); +INSERT INTO agg SELECT number AS key, number AS value FROM numbers(100, 100); + +SELECT '---agg : contains both projections and parts ---'; +SELECT trimLeft(replaceRegexpAll(explain, 'ReadFromRemoteParallelReplicas.*', 'ReadFromRemoteParallelReplicas')) FROM (explain SELECT sum(value) AS v FROM agg where key > 90 AND key < 110) WHERE explain LIKE '%ReadFromMergeTree%' OR explain LIKE '%ReadFromRemoteParallelReplicas%' SETTINGS enable_analyzer = 1; +SELECT sum(value) AS v FROM agg where key > 90 AND key < 110; + +SELECT '---agg : contains only projections ---'; +TRUNCATE TABLE agg; +INSERT INTO agg SELECT number AS key, number AS value FROM numbers(200); + +SELECT trimLeft(replaceRegexpAll(explain, 'ReadFromRemoteParallelReplicas.*', 'ReadFromRemoteParallelReplicas')) FROM (explain SELECT sum(value) AS v FROM agg where key > 90 AND key < 110) WHERE explain LIKE '%ReadFromMergeTree%' OR explain LIKE '%ReadFromRemoteParallelReplicas%' SETTINGS enable_analyzer = 1; +SELECT sum(value) AS v FROM agg where key > 90 AND key < 110; +DROP TABLE agg; + +DROP TABLE IF EXISTS x; +CREATE TABLE x (i int) engine MergeTree ORDER BY i SETTINGS index_granularity = 3; + +INSERT INTO x SELECT * FROM numbers(10); + +SELECT '--- min-max projection ---'; + +SELECT trimLeft(replaceRegexpAll(explain, 'ReadFromRemoteParallelReplicas.*', 'ReadFromRemoteParallelReplicas')) FROM (explain SELECT max(i) FROM x) WHERE explain LIKE '%ReadFromPreparedSource%' OR explain LIKE '%ReadFromRemoteParallelReplicas%' SETTINGS enable_analyzer = 1; +SELECT max(i) FROM x SETTINGS enable_analyzer = 1, max_rows_to_read = 2, optimize_use_implicit_projections = 1, optimize_use_projection_filtering = 1; + +SELECT '--- exact-count projection ---'; +SELECT trimLeft(replaceRegexpAll(explain, 'ReadFromRemoteParallelReplicas.*', 'ReadFromRemoteParallelReplicas')) FROM (explain SELECT count() FROM x WHERE (i >= 3 AND i <= 6) OR i = 7) WHERE explain LIKE '%ReadFromPreparedSource%' OR explain LIKE '%ReadFromMergeTree%' OR explain LIKE '%ReadFromRemoteParallelReplicas%' SETTINGS enable_analyzer = 1; +SELECT count() FROM x WHERE (i >= 3 AND i <= 6) OR i = 7; diff --git a/parser/testdata/03560_validate_max_insert_block_size/ast.json b/parser/testdata/03560_validate_max_insert_block_size/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03560_validate_max_insert_block_size/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03560_validate_max_insert_block_size/metadata.json b/parser/testdata/03560_validate_max_insert_block_size/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03560_validate_max_insert_block_size/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03560_validate_max_insert_block_size/query.sql b/parser/testdata/03560_validate_max_insert_block_size/query.sql new file mode 100644 index 000000000..11d1a247e --- /dev/null +++ b/parser/testdata/03560_validate_max_insert_block_size/query.sql @@ -0,0 +1,10 @@ +-- Test for https://github.com/ClickHouse/ClickHouse/issues/83620 + +DROP TABLE IF EXISTS t; + +CREATE TABLE t (n Int) ENGINE = MergeTree ORDER BY n SETTINGS merge_max_block_size = 0; -- { serverError BAD_ARGUMENTS } +CREATE TABLE t (n Int) ENGINE = MergeTree ORDER BY n SETTINGS merge_max_block_size = 1; +ALTER TABLE t MODIFY SETTING merge_max_block_size = 0; -- { serverError BAD_ARGUMENTS } +INSERT INTO TABLE t (n) SETTINGS max_insert_block_size = 0 VALUES (1); -- { clientError BAD_ARGUMENTS } + +DROP TABLE t; diff --git a/parser/testdata/03561_analyzer_cte_cycle_resolve_bug/ast.json b/parser/testdata/03561_analyzer_cte_cycle_resolve_bug/ast.json new file mode 100644 index 000000000..b84ee11fb --- /dev/null +++ b/parser/testdata/03561_analyzer_cte_cycle_resolve_bug/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery users (children 3)" + }, + { + "explain": " Identifier users" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " ColumnDeclaration uid (children 1)" + }, + { + "explain": " DataType Int16" + }, + { + "explain": " ColumnDeclaration name (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " ColumnDeclaration age (children 1)" + }, + { + "explain": " DataType Int16" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function Memory" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001215975, + "rows_read": 12, + "bytes_read": 420 + } +} diff --git a/parser/testdata/03561_analyzer_cte_cycle_resolve_bug/metadata.json b/parser/testdata/03561_analyzer_cte_cycle_resolve_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03561_analyzer_cte_cycle_resolve_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03561_analyzer_cte_cycle_resolve_bug/query.sql b/parser/testdata/03561_analyzer_cte_cycle_resolve_bug/query.sql new file mode 100644 index 000000000..da698005b --- /dev/null +++ b/parser/testdata/03561_analyzer_cte_cycle_resolve_bug/query.sql @@ -0,0 +1,18 @@ +CREATE TABLE users (uid Int16, name String, age Int16) ENGINE=Memory; + +INSERT INTO users VALUES (1231, 'John', 33); +INSERT INTO users VALUES (6666, 'Ksenia', 48); +INSERT INTO users VALUES (8888, 'Alice', 50); + +set enable_analyzer = 1; + +WITH + users AS ( + WITH t as ( + SELECT * FROM users + ) + SELECT * FROM t + ) +SELECT * +FROM users +FORMAT Null; diff --git a/parser/testdata/03561_colorSRGBToOKLCH/ast.json b/parser/testdata/03561_colorSRGBToOKLCH/ast.json new file mode 100644 index 000000000..ce43ca312 --- /dev/null +++ b/parser/testdata/03561_colorSRGBToOKLCH/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '--- Wrong arguments'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.000856094, + "rows_read": 5, + "bytes_read": 190 + } +} diff --git a/parser/testdata/03561_colorSRGBToOKLCH/metadata.json b/parser/testdata/03561_colorSRGBToOKLCH/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03561_colorSRGBToOKLCH/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03561_colorSRGBToOKLCH/query.sql b/parser/testdata/03561_colorSRGBToOKLCH/query.sql new file mode 100644 index 000000000..61f0cd44a --- /dev/null +++ b/parser/testdata/03561_colorSRGBToOKLCH/query.sql @@ -0,0 +1,54 @@ +SELECT '--- Wrong arguments'; +SELECT colorSRGBToOKLCH(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT colorSRGBToOKLCH(1, 2); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT colorSRGBToOKLCH((1, 2)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT colorSRGBToOKLCH((1, 'a', 3)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT colorSRGBToOKLCH((1, 2, 3), 'a'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT '--- Regular calls'; +WITH colorSRGBToOKLCH((0, 0, 0)) AS t +SELECT tuple(round(t.1, 6), round(t.2, 6), round(t.3, 6)); +WITH colorSRGBToOKLCH((255, 0, 0)) AS t +SELECT tuple(round(t.1, 6), round(t.2, 6), round(t.3, 6)); +WITH colorSRGBToOKLCH((0, 255, 0)) AS t +SELECT tuple(round(t.1, 6), round(t.2, 6), round(t.3, 6)); +WITH colorSRGBToOKLCH((0, 0, 255)) AS t +SELECT tuple(round(t.1, 6), round(t.2, 6), round(t.3, 6)); +WITH colorSRGBToOKLCH((1, 2, 3)) AS t +SELECT tuple(round(t.1, 6), round(t.2, 6), round(t.3, 6)); +WITH colorSRGBToOKLCH((15, 241, 63)) AS t +SELECT tuple(round(t.1, 6), round(t.2, 6), round(t.3, 6)); +WITH colorSRGBToOKLCH((129, 87, 220)) AS t +SELECT tuple(round(t.1, 6), round(t.2, 6), round(t.3, 6)); + +SELECT '--- Varying gamma'; +WITH colorSRGBToOKLCH((128, 64, 32), 1.0) AS t +SELECT tuple(round(t.1, 6), round(t.2, 6), round(t.3, 6)); +WITH colorSRGBToOKLCH((128, 64, 32), 1.8) AS t +SELECT tuple(round(t.1, 6), round(t.2, 6), round(t.3, 6)); +WITH colorSRGBToOKLCH((128, 64, 32), 2.2) AS t +SELECT tuple(round(t.1, 6), round(t.2, 6), round(t.3, 6)); +WITH colorSRGBToOKLCH((128, 64, 32), 2.4) AS t +SELECT tuple(round(t.1, 6), round(t.2, 6), round(t.3, 6)); +WITH colorSRGBToOKLCH((128, 64, 32), 3.0) AS t +SELECT tuple(round(t.1, 6), round(t.2, 6), round(t.3, 6)); + +SELECT '--- Edge case colors'; +WITH colorSRGBToOKLCH((128, 128, 128)) AS t +SELECT tuple(round(t.1, 6), round(t.2, 6), round(t.3, 6)); +WITH colorSRGBToOKLCH((0.628, 0.2577, 29.2)) AS t +SELECT tuple(round(t.1, 6), round(t.2, 6), round(t.3, 6)); +WITH colorSRGBToOKLCH((-53, -134, -180)) AS t +SELECT tuple(round(t.1, 6), round(t.2, 6), round(t.3, 6)); +WITH colorSRGBToOKLCH((53, 134, 180), 0) AS t +SELECT tuple(round(t.1, 6), round(t.2, 6), round(t.3, 6)); +WITH colorSRGBToOKLCH((53, 134, 180), -1000) AS t +SELECT tuple(round(t.1, 6), round(t.2, 6), round(t.3, 6)); +WITH colorSRGBToOKLCH((53, 134, 180), 1000) AS t +SELECT tuple(round(t.1, 6), round(t.2, 6), round(t.3, 6)); +WITH colorSRGBToOKLCH((1e-3, 1e-6, 180)) AS t +SELECT tuple(round(t.1, 6), round(t.2, 6), round(t.3, 6)); + +SELECT '--- Fuzzing'; +WITH colorSRGBToOKLCH((128, 128, 128), materialize(1.)) AS t +SELECT tuple(round(t.1, 6), round(t.2, 6), round(t.3, 6)); diff --git a/parser/testdata/03561_materialized_subcolumns_materialized_view/ast.json b/parser/testdata/03561_materialized_subcolumns_materialized_view/ast.json new file mode 100644 index 000000000..b5549e8bc --- /dev/null +++ b/parser/testdata/03561_materialized_subcolumns_materialized_view/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " TableIdentifier source" + }, + { + "explain": " TableIdentifier destination" + }, + { + "explain": " TableIdentifier source_to_destination_mv" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001085657, + "rows_read": 5, + "bytes_read": 187 + } +} diff --git a/parser/testdata/03561_materialized_subcolumns_materialized_view/metadata.json b/parser/testdata/03561_materialized_subcolumns_materialized_view/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03561_materialized_subcolumns_materialized_view/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03561_materialized_subcolumns_materialized_view/query.sql b/parser/testdata/03561_materialized_subcolumns_materialized_view/query.sql new file mode 100644 index 000000000..036581e15 --- /dev/null +++ b/parser/testdata/03561_materialized_subcolumns_materialized_view/query.sql @@ -0,0 +1,21 @@ +DROP TABLE IF EXISTS source, destination, source_to_destination_mv; + +CREATE OR REPLACE TABLE destination ( + some_data Nullable(JSON), + a_val String MATERIALIZED some_data.a[1] +) +ENGINE = MergeTree +ORDER BY tuple(); + +CREATE OR REPLACE TABLE source ( + some_data Nullable(JSON) +) +ENGINE = Null; + +CREATE MATERIALIZED VIEW source_to_destination_mv TO destination AS +SELECT some_data +FROM source; + +INSERT INTO source VALUES('{"a": ["baz"]}'); + +SELECT some_data, a_val FROM destination; diff --git a/parser/testdata/03561_two_mvs_bad_select/ast.json b/parser/testdata/03561_two_mvs_bad_select/ast.json new file mode 100644 index 000000000..d619d08d3 --- /dev/null +++ b/parser/testdata/03561_two_mvs_bad_select/ast.json @@ -0,0 +1,40 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " TableIdentifier 03561_t0" + }, + { + "explain": " TableIdentifier 03561_t1" + }, + { + "explain": " TableIdentifier 03561_v0" + }, + { + "explain": " TableIdentifier 03561_v1" + } + ], + + "rows": 6, + + "statistics": + { + "elapsed": 0.001386566, + "rows_read": 6, + "bytes_read": 204 + } +} diff --git a/parser/testdata/03561_two_mvs_bad_select/metadata.json b/parser/testdata/03561_two_mvs_bad_select/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03561_two_mvs_bad_select/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03561_two_mvs_bad_select/query.sql b/parser/testdata/03561_two_mvs_bad_select/query.sql new file mode 100644 index 000000000..40af55f12 --- /dev/null +++ b/parser/testdata/03561_two_mvs_bad_select/query.sql @@ -0,0 +1,11 @@ +DROP TABLE IF EXISTS 03561_t0, 03561_t1, 03561_v0, 03561_v1; + +SET allow_materialized_view_with_bad_select = 1; + +CREATE TABLE 03561_t0 (c0 Int) ENGINE = Memory; +CREATE TABLE 03561_t1 (c0 Int) ENGINE = Memory; +CREATE MATERIALIZED VIEW 03561_v0 TO 03561_t0 (c0 Int) AS (SELECT 1 c0 FROM 03561_t0); +CREATE MATERIALIZED VIEW 03561_v1 TO 03561_t1 (c0 Int) AS (SELECT grouping(c0) c0 FROM 03561_t0); +INSERT INTO TABLE 03561_t0 (c0) VALUES (1); -- { serverError TOO_DEEP_RECURSION } + +DROP TABLE IF EXISTS 03561_t0, 03561_t1, 03561_v0, 03561_v1; diff --git a/parser/testdata/03562_colorOKLCHToSRGB/ast.json b/parser/testdata/03562_colorOKLCHToSRGB/ast.json new file mode 100644 index 000000000..0252bb17f --- /dev/null +++ b/parser/testdata/03562_colorOKLCHToSRGB/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '--- Wrong arguments'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001271255, + "rows_read": 5, + "bytes_read": 190 + } +} diff --git a/parser/testdata/03562_colorOKLCHToSRGB/metadata.json b/parser/testdata/03562_colorOKLCHToSRGB/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03562_colorOKLCHToSRGB/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03562_colorOKLCHToSRGB/query.sql b/parser/testdata/03562_colorOKLCHToSRGB/query.sql new file mode 100644 index 000000000..034bf4a86 --- /dev/null +++ b/parser/testdata/03562_colorOKLCHToSRGB/query.sql @@ -0,0 +1,53 @@ +SELECT '--- Wrong arguments'; +SELECT colorOKLCHToSRGB(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT colorOKLCHToSRGB(1, 2); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT colorOKLCHToSRGB((1, 2)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT colorOKLCHToSRGB((1, 'a', 3)); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT colorOKLCHToSRGB((1, 2, 3), 'a'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + + +SELECT '--- Regular calls'; +WITH colorOKLCHToSRGB((0, 0, 0)) as t +SELECT tuple(round(t.1, 6), round(t.2, 6), round(t.3, 6)); +WITH colorOKLCHToSRGB((0.628, 0.2577, 29.23)) as t +SELECT tuple(round(t.1, 6), round(t.2, 6), round(t.3, 6)); +WITH colorOKLCHToSRGB((0.8664, 0.294827, 142.4953)) as t +SELECT tuple(round(t.1, 6), round(t.2, 6), round(t.3, 6)); +WITH colorOKLCHToSRGB((0.452, 0.313214, 264.052)) as t +SELECT tuple(round(t.1, 6), round(t.2, 6), round(t.3, 6)); +WITH colorOKLCHToSRGB((0.0823, 0.008, 240.75)) as t +SELECT tuple(round(t.1, 6), round(t.2, 6), round(t.3, 6)); +WITH colorOKLCHToSRGB((0.833, 0.264, 144.44)) as t +SELECT tuple(round(t.1, 6), round(t.2, 6), round(t.3, 6)); +WITH colorOKLCHToSRGB((0.5701, 0.194, 293.9)) as t +SELECT tuple(round(t.1, 6), round(t.2, 6), round(t.3, 6)); + +SELECT '--- Varying gamma'; +WITH colorOKLCHToSRGB((0.4466, 0.0991, 45.44), 1.0) as t +SELECT tuple(round(t.1, 6), round(t.2, 6), round(t.3, 6)); +WITH colorOKLCHToSRGB((0.4466, 0.0991, 45.44), 1.8) as t +SELECT tuple(round(t.1, 6), round(t.2, 6), round(t.3, 6)); +WITH colorOKLCHToSRGB((0.4466, 0.0991, 45.44), 2.2) as t +SELECT tuple(round(t.1, 6), round(t.2, 6), round(t.3, 6)); +WITH colorOKLCHToSRGB((0.4466, 0.0991, 45.44), 2.4) as t +SELECT tuple(round(t.1, 6), round(t.2, 6), round(t.3, 6)); +WITH colorOKLCHToSRGB((0.4466, 0.0991, 45.44), 3.0) as t +SELECT tuple(round(t.1, 6), round(t.2, 6), round(t.3, 6)); + +SELECT '--- Edge case colors'; +WITH colorOKLCHToSRGB((0.6, 0, 0)) as t +SELECT tuple(round(t.1, 6), round(t.2, 6), round(t.3, 6)); +WITH colorOKLCHToSRGB((-0.591, 0.1047, 57.35)) as t +SELECT tuple(round(t.1, 6), round(t.2, 6), round(t.3, 6)); +WITH colorOKLCHToSRGB((0.591, 0.1047, 237.35), 0) as t +SELECT tuple(round(t.1, 6), round(t.2, 6), round(t.3, 6)); +WITH colorOKLCHToSRGB((0.591, 0.1047, 237.35), -1000) as t +SELECT tuple(round(t.1, 6), round(t.2, 6), round(t.3, 6)); +WITH colorOKLCHToSRGB((0.591, 0.1047, 237.35), 1000) as t +SELECT tuple(round(t.1, 6), round(t.2, 6), round(t.3, 6)); +WITH colorOKLCHToSRGB((1e3, 1e6, 180)) as t +SELECT tuple(round(t.1, 6), round(t.2, 6), round(t.3, 6)); + +SELECT '--- Fuzzing'; +WITH colorOKLCHToSRGB((1e3, 1e6, 180), materialize(0.)) as t +SELECT tuple(round(t.1, 6), round(t.2, 6), round(t.3, 6)); diff --git a/parser/testdata/03562_geometry_type/ast.json b/parser/testdata/03562_geometry_type/ast.json new file mode 100644 index 000000000..c472bc319 --- /dev/null +++ b/parser/testdata/03562_geometry_type/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001157024, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03562_geometry_type/metadata.json b/parser/testdata/03562_geometry_type/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03562_geometry_type/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03562_geometry_type/query.sql b/parser/testdata/03562_geometry_type/query.sql new file mode 100644 index 000000000..b1d65ca05 --- /dev/null +++ b/parser/testdata/03562_geometry_type/query.sql @@ -0,0 +1,34 @@ +SET allow_suspicious_variant_types = 1; + +DROP TABLE IF EXISTS geom1; +CREATE TABLE IF NOT EXISTS geom1 (geom Geometry) ENGINE = Memory(); +INSERT INTO geom1 VALUES((10, 20)); +INSERT INTO geom1 VALUES((30, 40)); +INSERT INTO geom1 VALUES([(0, 0), (10, 0), (10, 10), (0, 10)]); +INSERT INTO geom1 VALUES([[(20, 20), (50, 20), (50, 50), (20, 50)], [(30, 30), (50, 50), (50, 30)]]); +SELECT count(*) FROM geom1; + +DROP TABLE IF EXISTS geo; +CREATE TABLE IF NOT EXISTS geo (geom String, id Int) ENGINE = Memory(); +INSERT INTO geo VALUES ('POLYGON((1 0,10 0,10 10,0 10,1 0),(4 4,5 4,5 5,4 5,4 4))', 1); +INSERT INTO geo VALUES ('POINT(0 0)', 2); +INSERT INTO geo VALUES ('MULTIPOLYGON(((1 0,10 0,10 10,0 10,1 0),(4 4,5 4,5 5,4 5,4 4)),((-10 -10,-10 -9,-9 10,-10 -10)))', 3); +INSERT INTO geo VALUES ('LINESTRING(1 0,10 0,10 10,0 10,1 0)', 4); +INSERT INTO geo VALUES ('MULTILINESTRING((1 0,10 0,10 10,0 10,1 0),(4 4,5 4,5 5,4 5,4 4))', 5); +SELECT readWkt(geom) FROM geo ORDER BY id; + +DROP TABLE IF EXISTS geo; +CREATE TABLE geo (s String, id Int) engine=Memory(); +INSERT INTO geo VALUES (unhex('010100000000000000000000000000000000000000'), 1); +INSERT INTO geo VALUES (unhex('0101000000000000000000f03f0000000000000000'), 2); +INSERT INTO geo VALUES (unhex('010100000000000000000000400000000000000000'), 3); +INSERT INTO geo VALUES (unhex('01030000000100000005000000000000000000f03f0000000000000000000000000000244000000000000000000000000000002440000000000000244000000000000000000000000000002440000000000000f03f0000000000000000'), 4); +INSERT INTO geo VALUES (unhex('010300000001000000050000000000000000000000000000000000000000000000000024400000000000000000000000000000244000000000000024400000000000000000000000000000244000000000000000000000000000000000'), 5); +INSERT INTO geo VALUES (unhex('010300000001000000050000000000000000000040000000000000000000000000000024400000000000000000000000000000244000000000000024400000000000000000000000000000244000000000000000400000000000000000'), 6); +INSERT INTO geo VALUES (unhex('010300000002000000050000000000000000000000000000000000000000000000000024400000000000000000000000000000244000000000000024400000000000000000000000000000244000000000000000000000000000000000050000000000000000001040000000000000104000000000000014400000000000001040000000000000144000000000000014400000000000001040000000000000144000000000000010400000000000001040'), 7); +INSERT INTO geo VALUES (unhex('010300000002000000050000000000000000000040000000000000000000000000000024400000000000000000000000000000244000000000000024400000000000000000000000000000244000000000000000400000000000000000050000000000000000001040000000000000104000000000000014400000000000001040000000000000144000000000000014400000000000001040000000000000144000000000000010400000000000001040'), 8); +INSERT INTO geo VALUES (unhex('01030000000200000005000000000000000000f03f0000000000000000000000000000244000000000000000000000000000002440000000000000244000000000000000000000000000002440000000000000f03f0000000000000000050000000000000000001040000000000000104000000000000014400000000000001040000000000000144000000000000014400000000000001040000000000000144000000000000010400000000000001040'), 9); +INSERT INTO geo VALUES (unhex('01060000000200000001030000000200000005000000000000000000f03f0000000000000000000000000000244000000000000000000000000000002440000000000000244000000000000000000000000000002440000000000000f03f00000000000000000500000000000000000010400000000000001040000000000000144000000000000010400000000000001440000000000000144000000000000010400000000000001440000000000000104000000000000010400103000000010000000400000000000000000024c000000000000024c000000000000024c000000000000022c000000000000022c0000000000000244000000000000024c000000000000024c0'), 10); +INSERT INTO geo VALUES (unhex('0106000000020000000103000000020000000500000000000000000000000000000000000000000000000000244000000000000000000000000000002440000000000000244000000000000000000000000000002440000000000000000000000000000000000500000000000000000010400000000000001040000000000000144000000000000010400000000000001440000000000000144000000000000010400000000000001440000000000000104000000000000010400103000000010000000400000000000000000024c000000000000024c000000000000024c000000000000022c000000000000022c0000000000000244000000000000024c000000000000024c0'), 11); +INSERT INTO geo VALUES (unhex('0106000000020000000103000000020000000500000000000000000000400000000000000000000000000000244000000000000000000000000000002440000000000000244000000000000000000000000000002440000000000000004000000000000000000500000000000000000010400000000000001040000000000000144000000000000010400000000000001440000000000000144000000000000010400000000000001440000000000000104000000000000010400103000000010000000400000000000000000024c000000000000024c000000000000024c000000000000022c000000000000022c0000000000000244000000000000024c000000000000024c0'), 12); +SELECT readWkb(s) FROM geo ORDER BY id; diff --git a/parser/testdata/03562_json_date_as_integer/ast.json b/parser/testdata/03562_json_date_as_integer/ast.json new file mode 100644 index 000000000..49902e923 --- /dev/null +++ b/parser/testdata/03562_json_date_as_integer/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function format (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Identifier JSONEachRow" + }, + { + "explain": " Identifier a Date" + }, + { + "explain": " Literal '{\"a\" : 52}'" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001236258, + "rows_read": 13, + "bytes_read": 495 + } +} diff --git a/parser/testdata/03562_json_date_as_integer/metadata.json b/parser/testdata/03562_json_date_as_integer/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03562_json_date_as_integer/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03562_json_date_as_integer/query.sql b/parser/testdata/03562_json_date_as_integer/query.sql new file mode 100644 index 000000000..3f8657227 --- /dev/null +++ b/parser/testdata/03562_json_date_as_integer/query.sql @@ -0,0 +1,2 @@ +SELECT * FROM format(JSONEachRow, "a Date", '{"a" : 52}'); +SELECT * FROM format(JSONEachRow, "a Date32", '{"a" : 52}'); diff --git a/parser/testdata/03562_parallel_replicas_remote_with_cluster/ast.json b/parser/testdata/03562_parallel_replicas_remote_with_cluster/ast.json new file mode 100644 index 000000000..13fc554b0 --- /dev/null +++ b/parser/testdata/03562_parallel_replicas_remote_with_cluster/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tt (children 1)" + }, + { + "explain": " Identifier tt" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001402523, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03562_parallel_replicas_remote_with_cluster/metadata.json b/parser/testdata/03562_parallel_replicas_remote_with_cluster/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03562_parallel_replicas_remote_with_cluster/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03562_parallel_replicas_remote_with_cluster/query.sql b/parser/testdata/03562_parallel_replicas_remote_with_cluster/query.sql new file mode 100644 index 000000000..97306a1b2 --- /dev/null +++ b/parser/testdata/03562_parallel_replicas_remote_with_cluster/query.sql @@ -0,0 +1,27 @@ +DROP TABLE IF EXISTS tt; +CREATE TABLE tt (n UInt64) ENGINE=MergeTree() ORDER BY tuple(); +INSERT INTO tt SELECT * FROM numbers(10); + +SET parallel_replicas_only_with_analyzer = 0; -- necessary for CI run with disabled analyzer + +-- when the query plan is serialized for distributed query, parallel replicas are not enabled because +-- (with prefer_localhost_replica) because all reading steps are ReadFromTable instead of ReadFromMergeTree +SET serialize_query_plan = 0; + +SET enable_parallel_replicas=1, max_parallel_replicas=3, parallel_replicas_for_non_replicated_merge_tree=1; +SELECT sum(n) FROM remote(test_cluster_one_shard_three_replicas_localhost, currentDatabase(), tt) settings log_comment='03562_8a6a4b56-b9fa-4f60-b201-b637056a89c5'; +SELECT sum(n) FROM remote(test_cluster_two_shard_three_replicas_localhost, currentDatabase(), tt) settings log_comment='03562_152a0cc0-0811-46c9-839e-0f17426a1fc6'; + +SYSTEM FLUSH LOGS query_log; + +SELECT countIf(ProfileEvents['ParallelReplicasQueryCount']>0) FROM system.query_log +WHERE type = 'QueryFinish' AND event_date >= yesterday() +AND initial_query_id IN (select query_id from system.query_log where current_database = currentDatabase() AND type = 'QueryFinish' AND event_date >= yesterday() AND log_comment = '03562_8a6a4b56-b9fa-4f60-b201-b637056a89c5') +SETTINGS parallel_replicas_for_non_replicated_merge_tree=0; + +SELECT countIf(ProfileEvents['ParallelReplicasQueryCount']>0) FROM system.query_log +WHERE type = 'QueryFinish' AND event_date >= yesterday() +AND initial_query_id IN (select query_id from system.query_log where current_database = currentDatabase() AND type = 'QueryFinish' AND event_date >= yesterday() AND log_comment = '03562_152a0cc0-0811-46c9-839e-0f17426a1fc6') +SETTINGS parallel_replicas_for_non_replicated_merge_tree=0; + +DROP TABLE tt; diff --git a/parser/testdata/03562_parallel_replicas_subquery_has_final/ast.json b/parser/testdata/03562_parallel_replicas_subquery_has_final/ast.json new file mode 100644 index 000000000..44cfc212b --- /dev/null +++ b/parser/testdata/03562_parallel_replicas_subquery_has_final/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00086065, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03562_parallel_replicas_subquery_has_final/metadata.json b/parser/testdata/03562_parallel_replicas_subquery_has_final/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03562_parallel_replicas_subquery_has_final/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03562_parallel_replicas_subquery_has_final/query.sql b/parser/testdata/03562_parallel_replicas_subquery_has_final/query.sql new file mode 100644 index 000000000..0af8a4fa3 --- /dev/null +++ b/parser/testdata/03562_parallel_replicas_subquery_has_final/query.sql @@ -0,0 +1,11 @@ +SET allow_suspicious_primary_key = 1; + +CREATE OR REPLACE TABLE t0 (c0 Int) ENGINE = SummingMergeTree() ORDER BY tuple(); +INSERT INTO t0 VALUES (1); + +SELECT 1 FROM t0 RIGHT JOIN (SELECT c0 FROM t0 FINAL) tx ON TRUE GROUP BY c0; + +SET enable_parallel_replicas = 1, parallel_replicas_only_with_analyzer = 0, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost'; +SELECT 1 FROM t0 RIGHT JOIN (SELECT c0 FROM t0 FINAL) tx ON TRUE GROUP BY c0; + +DROP TABLE t0; diff --git a/parser/testdata/03562_system_database_replicas/ast.json b/parser/testdata/03562_system_database_replicas/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03562_system_database_replicas/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03562_system_database_replicas/metadata.json b/parser/testdata/03562_system_database_replicas/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03562_system_database_replicas/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03562_system_database_replicas/query.sql b/parser/testdata/03562_system_database_replicas/query.sql new file mode 100644 index 000000000..fb5b7e191 --- /dev/null +++ b/parser/testdata/03562_system_database_replicas/query.sql @@ -0,0 +1,58 @@ +-- Tags: no-parallel + +DROP DATABASE IF EXISTS db_1 SYNC; +DROP DATABASE IF EXISTS db_2 SYNC; +DROP DATABASE IF EXISTS db_3 SYNC; +DROP DATABASE IF EXISTS db_4 SYNC; +DROP DATABASE IF EXISTS db_5 SYNC; +DROP DATABASE IF EXISTS db_6 SYNC; + +SELECT '-----------------------'; +SELECT 'simple SELECT'; +CREATE DATABASE db_1 ENGINE = Replicated('/test/db_1', '{shard}', '{replica}'); +CREATE DATABASE db_2 ENGINE = Replicated('/test/db_2', '{shard}', '{replica}'); +CREATE DATABASE db_3 ENGINE = Replicated('/test/db_3', '{shard}', '{replica}'); +CREATE DATABASE db_4 ENGINE = Replicated('/test/db_4', '{shard}', '{replica}'); +CREATE DATABASE db_5 ENGINE = Replicated('/test/db_5', '{shard}', '{replica}'); +CREATE DATABASE db_6 ENGINE = Replicated('/test/db_6', '{shard}', '{replica}'); +SELECT sleep(1) FORMAT Null; +SELECT * FROM system.database_replicas WHERE database LIKE 'db_%' ORDER BY database; + +SELECT database FROM system.database_replicas WHERE database LIKE 'db_%' ORDER BY database; +SELECT DISTINCT is_readonly FROM system.database_replicas WHERE database LIKE 'db_%'; + +SELECT '-----------------------'; +SELECT 'count'; +SELECT count(*) FROM system.database_replicas WHERE database LIKE 'db_%'; + +SELECT '-----------------------'; +SELECT 'SELECT with LIMIT'; +SELECT * FROM system.database_replicas WHERE database LIKE 'db_%' ORDER BY database LIMIT 1; +SELECT * FROM system.database_replicas WHERE database LIKE 'db_%' ORDER BY database LIMIT 6; +SELECT * FROM system.database_replicas WHERE database LIKE 'db_%' ORDER BY database LIMIT 7; + +SELECT '-----------------------'; +SELECT 'SELECT with max_block'; +SET max_block_size=2; +SELECT * FROM system.database_replicas WHERE database LIKE 'db_%' ORDER BY database; + +SET max_block_size=6; +SELECT * FROM system.database_replicas WHERE database LIKE 'db_%' ORDER BY database; + +SELECT '-----------------------'; +SELECT 'SELECT with WHERE'; +SELECT * FROM system.database_replicas WHERE database LIKE 'db_%' AND is_readonly=0 ORDER BY database; +SELECT * FROM system.database_replicas WHERE database LIKE 'db_%' AND is_readonly=1 ORDER BY database; +SELECT is_readonly FROM system.database_replicas WHERE database='db_2' ORDER BY database; +SELECT * FROM system.database_replicas WHERE database='db_11' ORDER BY database; + +SELECT '-----------------------'; +SELECT 'DROP DATABASE'; +DROP DATABASE db_1; +SELECT * FROM system.database_replicas WHERE database LIKE 'db_%' ORDER BY database; + +SELECT '-----------------------'; +SELECT 'SELECT max_log_ptr'; +SET distributed_ddl_output_mode='throw'; +CREATE TABLE db_2.test_table (n Int64) ENGINE=MergeTree ORDER BY n; +SELECT database, max_log_ptr FROM system.database_replicas WHERE database LIKE 'db_%' AND max_log_ptr > 1; diff --git a/parser/testdata/03563_coarser_minmax_indexes_first/ast.json b/parser/testdata/03563_coarser_minmax_indexes_first/ast.json new file mode 100644 index 000000000..aabb8574b --- /dev/null +++ b/parser/testdata/03563_coarser_minmax_indexes_first/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery skip_table (children 1)" + }, + { + "explain": " Identifier skip_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001551336, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/03563_coarser_minmax_indexes_first/metadata.json b/parser/testdata/03563_coarser_minmax_indexes_first/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03563_coarser_minmax_indexes_first/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03563_coarser_minmax_indexes_first/query.sql b/parser/testdata/03563_coarser_minmax_indexes_first/query.sql new file mode 100644 index 000000000..890c9c1dc --- /dev/null +++ b/parser/testdata/03563_coarser_minmax_indexes_first/query.sql @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS skip_table; + +CREATE TABLE skip_table +( + k UInt64, + v UInt64, + INDEX mm_fine v TYPE minmax GRANULARITY 1, -- fine-grained mm-index first + INDEX mm_coarse v TYPE minmax GRANULARITY 1024 +) +ENGINE = MergeTree +PRIMARY KEY k +SETTINGS index_granularity = 8192; + +INSERT INTO skip_table SELECT number, intDiv(number, 4096) FROM numbers(100000); + +SELECT trim(explain) FROM ( EXPLAIN indexes = 1 SELECT * FROM skip_table WHERE v = 125 SETTINGS per_part_index_stats=1) WHERE explain like '%Name%'; + +DROP TABLE skip_table; diff --git a/parser/testdata/03565_clickhouse_smaller_indexes_first/ast.json b/parser/testdata/03565_clickhouse_smaller_indexes_first/ast.json new file mode 100644 index 000000000..8dbbaf4ee --- /dev/null +++ b/parser/testdata/03565_clickhouse_smaller_indexes_first/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery skip_table (children 1)" + }, + { + "explain": " Identifier skip_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001004583, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/03565_clickhouse_smaller_indexes_first/metadata.json b/parser/testdata/03565_clickhouse_smaller_indexes_first/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03565_clickhouse_smaller_indexes_first/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03565_clickhouse_smaller_indexes_first/query.sql b/parser/testdata/03565_clickhouse_smaller_indexes_first/query.sql new file mode 100644 index 000000000..921a69699 --- /dev/null +++ b/parser/testdata/03565_clickhouse_smaller_indexes_first/query.sql @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS skip_table; + +CREATE TABLE skip_table +( + k UInt64, + v UInt64, + INDEX bf_big v TYPE bloom_filter(0.0000000001), + INDEX bf_small v TYPE bloom_filter(0.1) +) +ENGINE = MergeTree +PRIMARY KEY k +SETTINGS index_granularity = 8192; + +INSERT INTO skip_table SELECT number, intDiv(number, 4096) FROM numbers(100000); +SELECT trim(explain) FROM ( EXPLAIN indexes = 1 SELECT * FROM skip_table WHERE v = 125 SETTINGS per_part_index_stats=1) WHERE explain like '%Name%'; + +DROP TABLE skip_table; diff --git a/parser/testdata/03565_iceberg_field_ids_table/ast.json b/parser/testdata/03565_iceberg_field_ids_table/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03565_iceberg_field_ids_table/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03565_iceberg_field_ids_table/metadata.json b/parser/testdata/03565_iceberg_field_ids_table/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03565_iceberg_field_ids_table/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03565_iceberg_field_ids_table/query.sql b/parser/testdata/03565_iceberg_field_ids_table/query.sql new file mode 100644 index 000000000..fc7042225 --- /dev/null +++ b/parser/testdata/03565_iceberg_field_ids_table/query.sql @@ -0,0 +1,4 @@ +-- Tags: no-fasttest +-- Tag no-fasttest: Depends on AWS + +SELECT * FROM icebergS3(s3_conn, filename='field_ids_table_test', SETTINGS iceberg_metadata_table_uuid = '8f1f9ae2-18bb-421e-b640-ec2f85e67bce') ORDER BY ALL; diff --git a/parser/testdata/03565_union_all_nullptr/ast.json b/parser/testdata/03565_union_all_nullptr/ast.json new file mode 100644 index 000000000..277906029 --- /dev/null +++ b/parser/testdata/03565_union_all_nullptr/ast.json @@ -0,0 +1,40 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery t (children 2)" + }, + { + "explain": " Identifier t" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration x (children 1)" + }, + { + "explain": " DataType UInt8" + } + ], + + "rows": 6, + + "statistics": + { + "elapsed": 0.001221005, + "rows_read": 6, + "bytes_read": 201 + } +} diff --git a/parser/testdata/03565_union_all_nullptr/metadata.json b/parser/testdata/03565_union_all_nullptr/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03565_union_all_nullptr/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03565_union_all_nullptr/query.sql b/parser/testdata/03565_union_all_nullptr/query.sql new file mode 100644 index 000000000..f630c9727 --- /dev/null +++ b/parser/testdata/03565_union_all_nullptr/query.sql @@ -0,0 +1,3 @@ +CREATE TEMPORARY TABLE t (x UInt8); +WITH 2 AS x INSERT INTO TABLE t SELECT 1 UNION ALL (SELECT x); +SELECT * FROM t ORDER BY ALL; diff --git a/parser/testdata/03566_analyzer_single_with_scope/ast.json b/parser/testdata/03566_analyzer_single_with_scope/ast.json new file mode 100644 index 000000000..9d59259a0 --- /dev/null +++ b/parser/testdata/03566_analyzer_single_with_scope/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery tab (children 3)" + }, + { + "explain": " Identifier tab" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration x (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " ColumnDeclaration y (children 1)" + }, + { + "explain": " DataType UInt8" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001164712, + "rows_read": 12, + "bytes_read": 405 + } +} diff --git a/parser/testdata/03566_analyzer_single_with_scope/metadata.json b/parser/testdata/03566_analyzer_single_with_scope/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03566_analyzer_single_with_scope/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03566_analyzer_single_with_scope/query.sql b/parser/testdata/03566_analyzer_single_with_scope/query.sql new file mode 100644 index 000000000..d8935f232 --- /dev/null +++ b/parser/testdata/03566_analyzer_single_with_scope/query.sql @@ -0,0 +1,49 @@ +create table tab (x String, y UInt8) engine = MergeTree order by tuple(); +insert into tab select 'rue', 1; + +with ('t' || x) as y + select 1 from tab where y = 'true' settings enable_analyzer=0; +with ('t' || x) as y + select 1 from tab where y = 'true' settings enable_analyzer=1; + +with ('t' || x) as y select * from + (select 1 from tab where y = 'true') settings enable_analyzer=0; +with ('t' || x) as y select * from + (select 1 from tab where y = 'true') settings enable_analyzer=1; -- { serverError TYPE_MISMATCH } + +with + ('t' || x) as y, + 'rue' as x +select 1 from tab where y = 'true' settings enable_analyzer=1; + +with ('t' || x) as y, + 'rue' as x +select * from + (select 1 from tab where y = 'true') settings enable_analyzer=1; -- { serverError TYPE_MISMATCH } + +SET enable_scopes_for_with_statement = 0; + +SELECT 'ENABLED COMPATIBILITY MODE'; + +with ('t' || x) as y select * from + (select 1 from tab where y = 'true') settings enable_analyzer=1; + +with + ('t' || x) as y, + 'rue' as x +select 1 from tab where y = 'true' settings enable_analyzer=1; + +with ('t' || x) as y, + 'rue' as x +select * from + (select 1 from tab where y = 'true') settings enable_analyzer=1; + +with + ('t' || x) as y, + 'rue' as x +select 1 from tab where y = 'true' settings enable_analyzer=0; -- { serverError TYPE_MISMATCH } + +with ('t' || x) as y, + 'rue' as x +select * from + (select 1 from tab where y = 'true') settings enable_analyzer=0; -- { serverError TYPE_MISMATCH } diff --git a/parser/testdata/03566_inconsistent_formatting_functions_codecs/ast.json b/parser/testdata/03566_inconsistent_formatting_functions_codecs/ast.json new file mode 100644 index 000000000..5eb164f5e --- /dev/null +++ b/parser/testdata/03566_inconsistent_formatting_functions_codecs/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery a (children 3)" + }, + { + "explain": " Identifier a" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration b (children 1)" + }, + { + "explain": " DataType UInt8" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Identifier b" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001170814, + "rows_read": 9, + "bytes_read": 291 + } +} diff --git a/parser/testdata/03566_inconsistent_formatting_functions_codecs/metadata.json b/parser/testdata/03566_inconsistent_formatting_functions_codecs/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03566_inconsistent_formatting_functions_codecs/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03566_inconsistent_formatting_functions_codecs/query.sql b/parser/testdata/03566_inconsistent_formatting_functions_codecs/query.sql new file mode 100644 index 000000000..73d5cfc03 --- /dev/null +++ b/parser/testdata/03566_inconsistent_formatting_functions_codecs/query.sql @@ -0,0 +1,6 @@ +CREATE TEMPORARY TABLE a (b UInt8) ENGINE = MergeTree ORDER BY b; +ALTER TABLE a MODIFY COLUMN b CODEC(`@`); -- { serverError UNKNOWN_CODEC } +SELECT f(`@`); -- { serverError UNKNOWN_IDENTIFIER } +SELECT `@`(1); -- { serverError UNKNOWN_FUNCTION } +SELECT ` `(1); -- { serverError UNKNOWN_FUNCTION } +SELECT `упячка`(1); -- { serverError UNKNOWN_FUNCTION } diff --git a/parser/testdata/03566_low_cardinality_nan_unique/ast.json b/parser/testdata/03566_low_cardinality_nan_unique/ast.json new file mode 100644 index 000000000..6dccf984c --- /dev/null +++ b/parser/testdata/03566_low_cardinality_nan_unique/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001205039, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03566_low_cardinality_nan_unique/metadata.json b/parser/testdata/03566_low_cardinality_nan_unique/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03566_low_cardinality_nan_unique/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03566_low_cardinality_nan_unique/query.sql b/parser/testdata/03566_low_cardinality_nan_unique/query.sql new file mode 100644 index 000000000..a7529b9ec --- /dev/null +++ b/parser/testdata/03566_low_cardinality_nan_unique/query.sql @@ -0,0 +1,16 @@ +SET allow_suspicious_low_cardinality_types=1; + +SELECT 'float 32'; +CREATE TABLE table_f32 (c0 LowCardinality(Float32), c1 Int) ENGINE = MergeTree() ORDER BY (c0, c1); +INSERT INTO table_f32 VALUES (nan, 3), (-nan, 2), (nan, 1); +SELECT * FROM table_f32 ORDER BY all; + +SELECT 'float 64'; +CREATE TABLE table_f64 (c0 LowCardinality(Float64), c1 Int) ENGINE = MergeTree() ORDER BY (c0, c1); +INSERT INTO table_f64 VALUES (nan, 3), (-nan, 2), (nan, 1); +SELECT * FROM table_f64 ORDER BY all; + +SELECT 'bfloat 16'; +CREATE TABLE table_f16 (c0 LowCardinality(BFloat16), c1 Int) ENGINE = MergeTree() ORDER BY (c0, c1); +INSERT INTO table_f16 VALUES (nan, 3), (-nan, 2), (nan, 1); +SELECT * FROM table_f16 ORDER BY all; diff --git a/parser/testdata/03566_one_row_summing_merge_tree/ast.json b/parser/testdata/03566_one_row_summing_merge_tree/ast.json new file mode 100644 index 000000000..03669f6fe --- /dev/null +++ b/parser/testdata/03566_one_row_summing_merge_tree/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_table (children 1)" + }, + { + "explain": " Identifier test_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001471154, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/03566_one_row_summing_merge_tree/metadata.json b/parser/testdata/03566_one_row_summing_merge_tree/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03566_one_row_summing_merge_tree/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03566_one_row_summing_merge_tree/query.sql b/parser/testdata/03566_one_row_summing_merge_tree/query.sql new file mode 100644 index 000000000..e7141561e --- /dev/null +++ b/parser/testdata/03566_one_row_summing_merge_tree/query.sql @@ -0,0 +1,190 @@ +DROP TABLE IF EXISTS test_table; + +CREATE TABLE test_table +( + key UInt32, + A UInt32, + B String +) +ENGINE = SummingMergeTree() +ORDER BY key; + +INSERT INTO test_table Values(1,0,''); + +SELECT count() FROM test_table; + +DROP TABLE IF EXISTS test_table; + +CREATE TABLE test_table +( + key UInt32, + A UInt32, + B String +) +ENGINE = CoalescingMergeTree() +ORDER BY key; + +INSERT INTO test_table Values(1,0,''); + +SELECT count() FROM test_table; + +DROP TABLE IF EXISTS test_table; + +CREATE TABLE test_table +( + key UInt32, + A Nullable(UInt32), + B Nullable(String) +) +ENGINE = CoalescingMergeTree() +ORDER BY key; + +INSERT INTO test_table Values(1, 1, 'x'); +SELECT * FROM test_table FINAL ORDER BY ALL; + +INSERT INTO test_table Values(1, 2, 'y'); +SELECT * FROM test_table FINAL ORDER BY ALL; + +INSERT INTO test_table Values(1, 3, 'z'); +SELECT * FROM test_table FINAL ORDER BY ALL; + +DROP TABLE IF EXISTS test_table; + +CREATE TABLE test_table +( + key UInt32, + value Nullable(UInt32) +) +ENGINE = CoalescingMergeTree() +ORDER BY key; + +INSERT INTO test_table VALUES(1,6); +INSERT INTO test_table VALUES(1,NULL); +select * from test_table final; + +truncate table test_table; + +INSERT INTO test_table VALUES(1,6), (1,NULL); +select * from test_table final; + +select ' -- AggregatingMergeTree --\n' format TSVRaw; +DROP TABLE IF EXISTS test_table; + +CREATE TABLE test_table +( + key UInt32, + A SimpleAggregateFunction(anyLast,Nullable(Int64)), + B SimpleAggregateFunction(anyLast,Nullable(DateTime)), + C SimpleAggregateFunction(anyLast,Nullable(String)) +) +ENGINE = AggregatingMergeTree() +ORDER BY key; + +INSERT INTO test_table(key, A) values(1, 1); +--SELECT * FROM test_table final format PrettyCompact; +INSERT INTO test_table(key, B) values(1, '2020-01-01'); +--SELECT * FROM test_table final format PrettyCompact; +INSERT INTO test_table(key, C) values(1, 'a'); +--SELECT * FROM test_table final format PrettyCompact; +INSERT INTO test_table(key, B) values(1, '2022-01-01'); +--SELECT * FROM test_table final format PrettyCompact; +INSERT INTO test_table(key, A) values(1, 5); +--SELECT * FROM test_table final format PrettyCompact; +INSERT INTO test_table(key, B) values(1, Null); +--SELECT * FROM test_table final format PrettyCompact; +INSERT INTO test_table(key, A, C) values(1, Null, Null); +SELECT * FROM test_table final; + +select '\n\n -- CoalescingMergeTree --\n' format TSVRaw; + +drop table test_table; +CREATE TABLE test_table +( + key UInt32, + A Nullable(Int64), + B Nullable(DateTime), + C Nullable(String), +) +ENGINE = CoalescingMergeTree() +ORDER BY key; + +INSERT INTO test_table(key, A) values(1, 1); +--SELECT * FROM test_table final format PrettyCompact; +INSERT INTO test_table(key, B) values(1, '2020-01-01'); +--SELECT * FROM test_table final format PrettyCompact; +INSERT INTO test_table(key, C) values(1, 'a'); +--SELECT * FROM test_table final format PrettyCompact; +INSERT INTO test_table(key, B) values(1, '2022-01-01'); +--SELECT * FROM test_table final format PrettyCompact; +INSERT INTO test_table(key, A) values(1, 5); +--SELECT * FROM test_table final format PrettyCompact; +INSERT INTO test_table(key, B) values(1, Null); +--SELECT * FROM test_table final format PrettyCompact; +INSERT INTO test_table(key, A, C) values(1, Null, Null); +SELECT * FROM test_table final; + + +CREATE TABLE electric_vehicle_state +( + vin String, -- vehicle identification number + last_update DateTime64 Materialized now64(), -- optional (used with argMax) + battery_level Nullable(UInt8), -- in % + lat Nullable(Float64), -- latitude (°) + lon Nullable(Float64), -- longitude (°) + firmware_version Nullable(String), + cabin_temperature Nullable(Float32), -- in °C + speed_kmh Nullable(Float32) -- from sensor +) +ENGINE = CoalescingMergeTree +ORDER BY vin; + +-- ① Initial battery and firmware readings +INSERT INTO electric_vehicle_state VALUES +('5YJ3E1EA7KF000001', 82, NULL, NULL, '2024.14.5', NULL, NULL); + +-- ② GPS reports in later +INSERT INTO electric_vehicle_state VALUES +('5YJ3E1EA7KF000001', NULL, 37.7749, -122.4194, NULL, NULL, NULL); + +-- ③ Sensor update: temperature + speed +INSERT INTO electric_vehicle_state VALUES +('5YJ3E1EA7KF000001', NULL, NULL, NULL, NULL, 22.5, 67.3); + +-- ④ Battery drops to 78% +INSERT INTO electric_vehicle_state VALUES +('5YJ3E1EA7KF000001', 78, NULL, NULL, NULL, NULL, NULL); + +-- ⑤ Another car, initial firmware and temp readings +INSERT INTO electric_vehicle_state VALUES +('5YJ3E1EA7KF000099', NULL, NULL, NULL, '2024.14.5', 19.2, NULL); + +INSERT INTO electric_vehicle_state VALUES +('5YJ3E1EA7KF000099', NULL, NULL, NULL, '2025.38.46', 19.3, NULL); + +SELECT + vin, + battery_level AS batt, + lat AS lat, + lon AS lon, + firmware_version AS fw, + cabin_temperature AS temp, + speed_kmh AS speed +FROM electric_vehicle_state FINAL +ORDER BY vin; + +DROP TABLE IF EXISTS test_table; + +CREATE TABLE test_table +( + key UInt32, + value Nullable(UInt32), + value_arr Variant(Array(UInt32), Nothing) +) +ENGINE = CoalescingMergeTree() +ORDER BY key; + + +INSERT INTO test_table VALUES(1,6, NULL); +INSERT INTO test_table VALUES(1,NULL, [1]); + +select * from test_table final; diff --git a/parser/testdata/03566_system_completions_table/ast.json b/parser/testdata/03566_system_completions_table/ast.json new file mode 100644 index 000000000..7c4317c30 --- /dev/null +++ b/parser/testdata/03566_system_completions_table/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery 0003566aaadatabase (children 1)" + }, + { + "explain": " Identifier 0003566aaadatabase" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001145178, + "rows_read": 2, + "bytes_read": 90 + } +} diff --git a/parser/testdata/03566_system_completions_table/metadata.json b/parser/testdata/03566_system_completions_table/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03566_system_completions_table/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03566_system_completions_table/query.sql b/parser/testdata/03566_system_completions_table/query.sql new file mode 100644 index 000000000..91b990cf0 --- /dev/null +++ b/parser/testdata/03566_system_completions_table/query.sql @@ -0,0 +1,15 @@ +CREATE DATABASE IF NOT EXISTS `0003566aaadatabase`; +USE `0003566aaadatabase`; + +CREATE TABLE IF NOT EXISTS `0003566aaatable` ( + `0003566aaafoo` String, + `0003566aaabar` UInt16, + `0003566aaabaz` UInt128 +) ENGINE = Memory; + +SELECT * +FROM system.completions +WHERE startsWith(word, '0003566') +ORDER BY word +LIMIT 5 +FORMAT PrettyCompact; diff --git a/parser/testdata/03567_analyzer_single_with_scope_cycle/ast.json b/parser/testdata/03567_analyzer_single_with_scope_cycle/ast.json new file mode 100644 index 000000000..c96078b1c --- /dev/null +++ b/parser/testdata/03567_analyzer_single_with_scope_cycle/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery tab (children 3)" + }, + { + "explain": " Identifier tab" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration x (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " ColumnDeclaration y (children 1)" + }, + { + "explain": " DataType UInt8" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001222875, + "rows_read": 12, + "bytes_read": 405 + } +} diff --git a/parser/testdata/03567_analyzer_single_with_scope_cycle/metadata.json b/parser/testdata/03567_analyzer_single_with_scope_cycle/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03567_analyzer_single_with_scope_cycle/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03567_analyzer_single_with_scope_cycle/query.sql b/parser/testdata/03567_analyzer_single_with_scope_cycle/query.sql new file mode 100644 index 000000000..7ffa1cc8d --- /dev/null +++ b/parser/testdata/03567_analyzer_single_with_scope_cycle/query.sql @@ -0,0 +1,30 @@ +create table tab (x String, y UInt8) engine = MergeTree order by tuple(); +insert into tab select 'rue', 1; + +WITH + a as b +SELECT 1 FROM ( + WITH b as c + SELECT 1 FROM ( + WITH c as d + SELECT 1 FROM ( + SELECT 1 FROM tab WHERE e = 'true' + ) + ) +) +SETTINGS allow_experimental_analyzer = 0; -- { serverError UNKNOWN_IDENTIFIER } + +SET enable_scopes_for_with_statement = 0; + +WITH + a as b +SELECT 1 FROM ( + WITH b as c + SELECT 1 FROM ( + WITH c as d + SELECT 1 FROM ( + SELECT 1 FROM tab WHERE e = 'true' + ) + ) +) +SETTINGS allow_experimental_analyzer = 1; -- { serverError UNKNOWN_IDENTIFIER } diff --git a/parser/testdata/03567_finalize_write_buffer_valid_utf8/ast.json b/parser/testdata/03567_finalize_write_buffer_valid_utf8/ast.json new file mode 100644 index 000000000..41eb3857c --- /dev/null +++ b/parser/testdata/03567_finalize_write_buffer_valid_utf8/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "InsertQuery (children 3)" + }, + { + "explain": " Function file (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function currentDatabase (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Literal 'JSONColumns'" + }, + { + "explain": " Literal 'c0 Enum(x\\'e2\\' = 1)'" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Int64_-1" + }, + { + "explain": " Set" + }, + { + "explain": " Set" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001242528, + "rows_read": 14, + "bytes_read": 462 + } +} diff --git a/parser/testdata/03567_finalize_write_buffer_valid_utf8/metadata.json b/parser/testdata/03567_finalize_write_buffer_valid_utf8/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03567_finalize_write_buffer_valid_utf8/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03567_finalize_write_buffer_valid_utf8/query.sql b/parser/testdata/03567_finalize_write_buffer_valid_utf8/query.sql new file mode 100644 index 000000000..5f8c4cdda --- /dev/null +++ b/parser/testdata/03567_finalize_write_buffer_valid_utf8/query.sql @@ -0,0 +1 @@ +INSERT INTO TABLE FUNCTION file(currentDatabase(), 'JSONColumns', 'c0 Enum(x\'e2\' = 1)') SELECT -1 SETTINGS output_format_json_validate_utf8 = 1; -- { serverError UNKNOWN_ELEMENT_OF_ENUM } diff --git a/parser/testdata/03567_join_using_projection_distributed/ast.json b/parser/testdata/03567_join_using_projection_distributed/ast.json new file mode 100644 index 000000000..9ba7c8b6c --- /dev/null +++ b/parser/testdata/03567_join_using_projection_distributed/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001051769, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03567_join_using_projection_distributed/metadata.json b/parser/testdata/03567_join_using_projection_distributed/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03567_join_using_projection_distributed/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03567_join_using_projection_distributed/query.sql b/parser/testdata/03567_join_using_projection_distributed/query.sql new file mode 100644 index 000000000..3d4890d69 --- /dev/null +++ b/parser/testdata/03567_join_using_projection_distributed/query.sql @@ -0,0 +1,27 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE TABLE t1 ( `key` String, `attr` UInt32 ) ENGINE = MergeTree ORDER BY key; +CREATE TABLE t2 ( `key` String, `attr` UInt32 ) ENGINE = MergeTree ORDER BY key; + +INSERT INTO t1 VALUES ('a', 42), ('b', 43), ('c', 44); +INSERT INTO t2 VALUES ('AA', 111), ('AA', 222), ('other', 333); + +SELECT + CASE + WHEN key = 'a' THEN 'AA' + WHEN key = 'b' THEN 'BB' + ELSE 'other' + END AS key1, + * +FROM remote('127.0.0.{2,3}', currentDatabase(), t1) t1 +INNER JOIN +( + SELECT + key AS key1, + attr + FROM t2 +) AS a USING (key1) +ORDER BY a.attr +SETTINGS enable_analyzer = 1, analyzer_compatibility_join_using_top_level_identifier=1; + diff --git a/parser/testdata/03567_json_extract_case_insensitive_edge_cases/ast.json b/parser/testdata/03567_json_extract_case_insensitive_edge_cases/ast.json new file mode 100644 index 000000000..25b1bf498 --- /dev/null +++ b/parser/testdata/03567_json_extract_case_insensitive_edge_cases/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '--Edge cases for JSONExtractCaseInsensitive--'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001370807, + "rows_read": 5, + "bytes_read": 216 + } +} diff --git a/parser/testdata/03567_json_extract_case_insensitive_edge_cases/metadata.json b/parser/testdata/03567_json_extract_case_insensitive_edge_cases/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03567_json_extract_case_insensitive_edge_cases/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03567_json_extract_case_insensitive_edge_cases/query.sql b/parser/testdata/03567_json_extract_case_insensitive_edge_cases/query.sql new file mode 100644 index 000000000..f7d014f79 --- /dev/null +++ b/parser/testdata/03567_json_extract_case_insensitive_edge_cases/query.sql @@ -0,0 +1,52 @@ +SELECT '--Edge cases for JSONExtractCaseInsensitive--'; + +-- Keys with special characters +SELECT JSONExtractStringCaseInsensitive('{"key-with-dash": "value1"}', 'KEY-WITH-DASH'); +SELECT JSONExtractStringCaseInsensitive('{"key_with_underscore": "value2"}', 'KEY_WITH_UNDERSCORE'); +SELECT JSONExtractStringCaseInsensitive('{"key.with.dots": "value3"}', 'KEY.WITH.DOTS'); +SELECT JSONExtractStringCaseInsensitive('{"key with spaces": "value4"}', 'KEY WITH SPACES'); + +-- Unicode keys (ASCII case-insensitive only) +SELECT JSONExtractStringCaseInsensitive('{"café": "coffee"}', 'CAFÉ'); -- Should not match (non-ASCII) +SELECT JSONExtractStringCaseInsensitive('{"café": "coffee"}', 'café'); -- Exact match works + +-- Numeric string keys +SELECT JSONExtractStringCaseInsensitive('{"123": "numeric key"}', '123'); + +-- Empty string key +SELECT JSONExtractStringCaseInsensitive('{"": "empty key"}', ''); + +-- Very long keys +SELECT JSONExtractStringCaseInsensitive( + concat('{"', repeat('VeryLongKey', 100), '": "value"}'), + repeat('verylongkey', 100) +); + +-- Mixed types +SELECT JSONExtractStringCaseInsensitive('{"Key": 123}', 'key'); -- Number as string +SELECT JSONExtractStringCaseInsensitive('{"Key": true}', 'KEY'); -- Bool as string +SELECT JSONExtractIntCaseInsensitive('{"Key": "123"}', 'key'); -- String as number +SELECT JSONExtractBoolCaseInsensitive('{"Key": 1}', 'KEY'); -- Number as bool + +-- Null values +SELECT JSONExtractStringCaseInsensitive('{"Key": null}', 'key'); +SELECT JSONExtractIntCaseInsensitive('{"Key": null}', 'KEY'); + +-- Invalid JSON +SELECT JSONExtractStringCaseInsensitive('not a json', 'key'); +SELECT JSONExtractIntCaseInsensitive('{invalid json}', 'key'); + +-- Case sensitivity comparison +SELECT JSONExtractString('{"ABC": "def", "abc": "ghi"}', 'abc'); -- Case sensitive - exact match +SELECT JSONExtractStringCaseInsensitive('{"ABC": "def", "abc": "ghi"}', 'abc'); -- Case insensitive - first match + +-- Multiple levels of nesting +SELECT JSONExtractStringCaseInsensitive( + '{"LEVEL1": {"level2": {"LEVEL3": {"level4": "deep"}}}}', + 'level1', 'LEVEL2', 'level3', 'LEVEL4' +); + +-- Test additional functions with case-insensitive keys +SELECT JSONExtractArrayRawCaseInsensitive('{"ARRAY": ["test", 123, true]}', 'array'); +SELECT length(JSONExtractKeysAndValuesRawCaseInsensitive('{"KEY1": "value1", "key2": 100}')); +SELECT JSONExtractKeysCaseInsensitive('{"ABC": 1, "def": 2, "GHI": 3}')[1]; \ No newline at end of file diff --git a/parser/testdata/03567_nowInBlock64/ast.json b/parser/testdata/03567_nowInBlock64/ast.json new file mode 100644 index 000000000..4d05df44a --- /dev/null +++ b/parser/testdata/03567_nowInBlock64/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.0011005, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03567_nowInBlock64/metadata.json b/parser/testdata/03567_nowInBlock64/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03567_nowInBlock64/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03567_nowInBlock64/query.sql b/parser/testdata/03567_nowInBlock64/query.sql new file mode 100644 index 000000000..4c0129cd0 --- /dev/null +++ b/parser/testdata/03567_nowInBlock64/query.sql @@ -0,0 +1,14 @@ +SET max_rows_to_read = 0, max_bytes_to_read = 0; + +SELECT nowInBlock64(3, 'America/Sao_Paulo', 3); --{ serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT nowInBlock64(10); --{ serverError ARGUMENT_OUT_OF_BOUND} +SELECT nowInBlock64('string'); --{ serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT nowInBlock64(3, true); --{ serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT nowInBlock64(3, 3); --{ serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT nowInBlock64(3, 'string'); --{ serverError BAD_ARGUMENTS } + +SELECT count() FROM (SELECT DISTINCT nowInBlock64(), nowInBlock64(3), nowInBlock64(3, 'Pacific/Pitcairn') FROM system.numbers LIMIT 3); +SELECT nowInBlock64(NULL) IS NULL; + +-- Bug 85534 +SELECT nowInBlock64(materialize(toUInt128(3)), 'America/Sao_Paulo') FORMAT Null; -- { serverError ILLEGAL_COLUMN } diff --git a/parser/testdata/03568_ddsketch_merge/ast.json b/parser/testdata/03568_ddsketch_merge/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03568_ddsketch_merge/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03568_ddsketch_merge/metadata.json b/parser/testdata/03568_ddsketch_merge/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03568_ddsketch_merge/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03568_ddsketch_merge/query.sql b/parser/testdata/03568_ddsketch_merge/query.sql new file mode 100644 index 000000000..ccb818828 --- /dev/null +++ b/parser/testdata/03568_ddsketch_merge/query.sql @@ -0,0 +1,8 @@ +-- Tags: no-fasttest +-- Tag no-fasttest: Depends on base64Decode + +SELECT quantileDDMerge(0.01)(state) != 0 FROM format( + RowBinary, + 'state AggregateFunction(quantileDD(0.01), Float64)', + base64Decode('AgAAAAAAAABAAAAAAAAAAAABDAEPAgAAAAAAAPA/AwwA/v///w8CBAAAAAAAAAAAAs07f2aeoPY/AAAAAAAAAAABDAEjAgAAAAAAAPA/AwwA/v///w8CBAAAAAAAAAAA') +) diff --git a/parser/testdata/03568_json_extract_case_insensitive/ast.json b/parser/testdata/03568_json_extract_case_insensitive/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03568_json_extract_case_insensitive/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03568_json_extract_case_insensitive/metadata.json b/parser/testdata/03568_json_extract_case_insensitive/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03568_json_extract_case_insensitive/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03568_json_extract_case_insensitive/query.sql b/parser/testdata/03568_json_extract_case_insensitive/query.sql new file mode 100644 index 000000000..cee7fd4fe --- /dev/null +++ b/parser/testdata/03568_json_extract_case_insensitive/query.sql @@ -0,0 +1,71 @@ +-- Tags: no-fasttest +-- Tag: no-fasttest due to only SIMD JSON is available in fasttest + +SELECT '--JSONExtractCaseInsensitive--'; + +-- Basic case-insensitive key matching +SELECT JSONExtractStringCaseInsensitive('{"ABC": "def"}', 'abc'); +SELECT JSONExtractStringCaseInsensitive('{"abc": "def"}', 'ABC'); +SELECT JSONExtractStringCaseInsensitive('{"AbC": "def"}', 'aBc'); +SELECT JSONExtractStringCaseInsensitive('{"abc": "def", "ABC": "ghi"}', 'abc'); -- Should return first match + +-- Different data types +SELECT JSONExtractIntCaseInsensitive('{"Value": 123}', 'value'); +SELECT JSONExtractIntCaseInsensitive('{"VALUE": -456}', 'Value'); +SELECT JSONExtractUIntCaseInsensitive('{"COUNT": 789}', 'count'); +SELECT JSONExtractFloatCaseInsensitive('{"Price": 12.34}', 'PRICE'); +SELECT JSONExtractBoolCaseInsensitive('{"IsActive": true}', 'isactive'); + +-- Nested objects +SELECT JSONExtractStringCaseInsensitive('{"User": {"Name": "John"}}', 'user', 'name'); +SELECT JSONExtractIntCaseInsensitive('{"DATA": {"COUNT": 42}}', 'data', 'Count'); + +-- Arrays +SELECT JSONExtractIntCaseInsensitive('{"Items": [1, 2, 3]}', 'items', 1); +SELECT JSONExtractStringCaseInsensitive('{"TAGS": ["a", "b", "c"]}', 'tags', 0); + +-- Raw extraction +SELECT JSONExtractRawCaseInsensitive('{"Object": {"key": "value"}}', 'OBJECT'); +SELECT JSONExtractRawCaseInsensitive('{"Array": [1, 2, 3]}', 'array'); + +-- Generic extraction with type +SELECT JSONExtractCaseInsensitive('{"Number": 123}', 'number', 'Int32'); +SELECT JSONExtractCaseInsensitive('{"Text": "hello"}', 'TEXT', 'String'); +SELECT JSONExtractCaseInsensitive('{"List": [1, 2, 3]}', 'list', 'Array(Int32)'); + +-- Keys and values extraction +SELECT JSONExtractKeysAndValuesCaseInsensitive('{"Name": "Alice", "AGE": 30}', 'String'); +SELECT JSONExtractKeysAndValuesCaseInsensitive('{"ID": 1, "Value": 2}', 'Int32'); + +-- Non-existent keys +SELECT JSONExtractStringCaseInsensitive('{"abc": "def"}', 'xyz'); +SELECT JSONExtractIntCaseInsensitive('{"abc": 123}', 'XYZ'); + +-- Empty JSON +SELECT JSONExtractStringCaseInsensitive('{}', 'key'); + +-- Multiple keys with different cases (should return the first match) +SELECT JSONExtractStringCaseInsensitive('{"key": "first", "KEY": "second", "Key": "third"}', 'KEY'); + +-- Complex nested example +SELECT JSONExtractIntCaseInsensitive('{"LEVEL1": {"Level2": {"level3": 999}}}', 'level1', 'LEVEL2', 'LEVEL3'); + +-- Additional functions: ArrayRaw, KeysAndValuesRaw, Keys +SELECT JSONExtractArrayRawCaseInsensitive('{"Items": [1, 2, 3]}', 'ITEMS'); +SELECT JSONExtractKeysAndValuesRawCaseInsensitive('{"Name": "Alice", "AGE": 30}'); +SELECT JSONExtractKeysCaseInsensitive('{"Name": "Alice", "AGE": 30}'); + +-- Testing with both allow_simdjson settings +SELECT '--allow_simdjson=0--'; +SET allow_simdjson=0; + +SELECT JSONExtractStringCaseInsensitive('{"ABC": "def"}', 'abc'); +SELECT JSONExtractIntCaseInsensitive('{"Value": 123}', 'value'); +SELECT JSONExtractFloatCaseInsensitive('{"Price": 12.34}', 'PRICE'); + +SELECT '--allow_simdjson=1--'; +SET allow_simdjson=1; + +SELECT JSONExtractStringCaseInsensitive('{"ABC": "def"}', 'abc'); +SELECT JSONExtractIntCaseInsensitive('{"Value": 123}', 'value'); +SELECT JSONExtractFloatCaseInsensitive('{"Price": 12.34}', 'PRICE'); \ No newline at end of file diff --git a/parser/testdata/03568_mutation_affected_rows_counter/ast.json b/parser/testdata/03568_mutation_affected_rows_counter/ast.json new file mode 100644 index 000000000..bcfafe43e --- /dev/null +++ b/parser/testdata/03568_mutation_affected_rows_counter/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_mutation_rows_counter (children 1)" + }, + { + "explain": " Identifier t_mutation_rows_counter" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001171155, + "rows_read": 2, + "bytes_read": 98 + } +} diff --git a/parser/testdata/03568_mutation_affected_rows_counter/metadata.json b/parser/testdata/03568_mutation_affected_rows_counter/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03568_mutation_affected_rows_counter/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03568_mutation_affected_rows_counter/query.sql b/parser/testdata/03568_mutation_affected_rows_counter/query.sql new file mode 100644 index 000000000..4db176288 --- /dev/null +++ b/parser/testdata/03568_mutation_affected_rows_counter/query.sql @@ -0,0 +1,19 @@ +DROP TABLE IF EXISTS t_mutation_rows_counter; + +CREATE TABLE t_mutation_rows_counter (x UInt64) ENGINE = MergeTree ORDER BY tuple(); + +SET mutations_sync = 2; + +INSERT INTO t_mutation_rows_counter SELECT number FROM numbers(1000); +ALTER TABLE t_mutation_rows_counter UPDATE x = x + 1 WHERE x = 150; + +SELECT x, count() FROM t_mutation_rows_counter GROUP BY x HAVING count() > 1; +SYSTEM FLUSH LOGS part_log; + +SELECT + ProfileEvents['MutatedRows'], + ProfileEvents['MutationAffectedRowsUpperBound'] +FROM system.part_log +WHERE database = currentDatabase() AND table = 't_mutation_rows_counter' AND event_type = 'MutatePart'; + +DROP TABLE IF EXISTS t_mutation_rows_counter; diff --git a/parser/testdata/03568_udf_memory_tracking/ast.json b/parser/testdata/03568_udf_memory_tracking/ast.json new file mode 100644 index 000000000..0ad5b5dea --- /dev/null +++ b/parser/testdata/03568_udf_memory_tracking/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001196946, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03568_udf_memory_tracking/metadata.json b/parser/testdata/03568_udf_memory_tracking/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03568_udf_memory_tracking/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03568_udf_memory_tracking/query.sql b/parser/testdata/03568_udf_memory_tracking/query.sql new file mode 100644 index 000000000..9ba9fbea3 --- /dev/null +++ b/parser/testdata/03568_udf_memory_tracking/query.sql @@ -0,0 +1,18 @@ +SET log_queries = 1; + +SELECT test_function(number, 0) FROM numbers(100) FORMAT Null SETTINGS max_threads = 1, max_block_size = 1; +SELECT test_function(number, 0) FROM numbers(200) FORMAT Null SETTINGS max_threads = 1, max_block_size = 1; + +SYSTEM FLUSH LOGS query_log; + +SELECT + count() AS queries, + min(memory_usage) < 20000000 AS min_less_then_20mb, + max(memory_usage) < 20000000 AS max_less_then_20mb +FROM system.query_log +WHERE current_database = currentDatabase() + AND type = 'QueryFinish' + AND query_kind = 'Select' + AND query LIKE '%test_function(number, 0)%' +FORMAT Vertical; + diff --git a/parser/testdata/03569_max_joined_block_size_rows_bug/ast.json b/parser/testdata/03569_max_joined_block_size_rows_bug/ast.json new file mode 100644 index 000000000..e3ff583aa --- /dev/null +++ b/parser/testdata/03569_max_joined_block_size_rows_bug/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001191247, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03569_max_joined_block_size_rows_bug/metadata.json b/parser/testdata/03569_max_joined_block_size_rows_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03569_max_joined_block_size_rows_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03569_max_joined_block_size_rows_bug/query.sql b/parser/testdata/03569_max_joined_block_size_rows_bug/query.sql new file mode 100644 index 000000000..40e2abd84 --- /dev/null +++ b/parser/testdata/03569_max_joined_block_size_rows_bug/query.sql @@ -0,0 +1,2 @@ +set enable_analyzer=1; +select * from system.one, system.one settings max_joined_block_size_rows=0 format Null; diff --git a/parser/testdata/03570_dateTimeToUUIDv7/ast.json b/parser/testdata/03570_dateTimeToUUIDv7/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03570_dateTimeToUUIDv7/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03570_dateTimeToUUIDv7/metadata.json b/parser/testdata/03570_dateTimeToUUIDv7/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03570_dateTimeToUUIDv7/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03570_dateTimeToUUIDv7/query.sql b/parser/testdata/03570_dateTimeToUUIDv7/query.sql new file mode 100644 index 000000000..e66769c1c --- /dev/null +++ b/parser/testdata/03570_dateTimeToUUIDv7/query.sql @@ -0,0 +1,31 @@ +-- Tests function dateTimeToUUIDv7 + +SELECT 'Negative tests'; +SELECT dateTimeToUUIDv7(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT dateTimeToUUIDv7('2021-08-15 18:57:56'); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT dateTimeToUUIDv7(toDateTime('2021-08-15 18:57:56'), 'extra'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT dateTimeToUUIDv7(123.123456); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT 'Basic tests'; +SELECT toTypeName(dateTimeToUUIDv7(toDateTime('2021-08-15 18:57:56'))); +SELECT substring(hex(dateTimeToUUIDv7(toDateTime('2021-08-15 18:57:56'))), 13, 1); -- check version bits (should be '7') +SELECT bitAnd(bitShiftRight(toUInt128(dateTimeToUUIDv7(toDateTime('2021-08-15 18:57:56'))), 62), 3); -- check variant bits (should be '2') + +SELECT 'Counter functionality tests'; +-- multiple calls with same timestamp, should increment +SELECT + max(uuid) > min(uuid) +FROM ( + SELECT dateTimeToUUIDv7(toDateTime('2021-08-15 18:57:56')) AS uuid FROM numbers(2) +); + +SELECT 'Different timestamps should produce different UUIDs'; +SELECT dateTimeToUUIDv7(toDateTime('2021-08-15 18:57:56')) != dateTimeToUUIDv7(toDateTime('2021-08-15 18:57:57')); + +SELECT 'Timezone handling'; +SELECT dateTimeToUUIDv7(toDateTime('2021-08-15 18:57:56', 'UTC')) != dateTimeToUUIDv7(toDateTime('2021-08-15 18:57:56', 'Asia/Shanghai')); + +SELECT 'Verify timestamp extraction works correctly'; +SELECT toDateTime('2021-08-15 18:57:56', 'UTC') AS d, + UUIDv7ToDateTime(dateTimeToUUIDv7(d), 'UTC') == d, + UUIDv7ToDateTime(dateTimeToUUIDv7(materialize(d)), 'UTC') == d; diff --git a/parser/testdata/03570_date_to_datetime64_overflow/ast.json b/parser/testdata/03570_date_to_datetime64_overflow/ast.json new file mode 100644 index 000000000..af55d524f --- /dev/null +++ b/parser/testdata/03570_date_to_datetime64_overflow/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001000386, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03570_date_to_datetime64_overflow/metadata.json b/parser/testdata/03570_date_to_datetime64_overflow/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03570_date_to_datetime64_overflow/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03570_date_to_datetime64_overflow/query.sql b/parser/testdata/03570_date_to_datetime64_overflow/query.sql new file mode 100644 index 000000000..1fcc4e54d --- /dev/null +++ b/parser/testdata/03570_date_to_datetime64_overflow/query.sql @@ -0,0 +1,13 @@ +SET session_timezone='UTC'; + +WITH arrayJoin([toDate('1970-01-01'), + toDate('1970-01-02'), + toDate('2149-06-05'), + toDate('2149-06-06')]) AS date +SELECT + date, + toUInt16(date) AS d16, + toDateTime64(date, 9) AS dir_cast, + CAST(date, 'DateTime64(9)') AS cast_direct, + CAST(toDate32(date), 'DateTime64(9)')AS cast_via_date32, + CAST(toDateTime(date), 'DateTime64(9)') AS cast_via_datetime; -- max value for DateTime is 2106-02-07 06:28:15, so the value overflows diff --git a/parser/testdata/03570_insert_into_simple_alias/ast.json b/parser/testdata/03570_insert_into_simple_alias/ast.json new file mode 100644 index 000000000..856a671df --- /dev/null +++ b/parser/testdata/03570_insert_into_simple_alias/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery alias_insert_test (children 1)" + }, + { + "explain": " Identifier alias_insert_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00104769, + "rows_read": 2, + "bytes_read": 86 + } +} diff --git a/parser/testdata/03570_insert_into_simple_alias/metadata.json b/parser/testdata/03570_insert_into_simple_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03570_insert_into_simple_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03570_insert_into_simple_alias/query.sql b/parser/testdata/03570_insert_into_simple_alias/query.sql new file mode 100644 index 000000000..44ad12ec6 --- /dev/null +++ b/parser/testdata/03570_insert_into_simple_alias/query.sql @@ -0,0 +1,41 @@ +DROP TABLE IF EXISTS alias_insert_test; + +CREATE TABLE alias_insert_test +( + -- Base physical columns + `id` UInt64, + `name` String, + `value` Float64, + + -- Simple ALIAS columns (should be insertable with the new feature) + `UserID` ALIAS `id`, + `UserName` ALIAS `name`, + + -- Complex ALIAS columns (should remain non-insertable) + `UpperName` ALIAS upper(name), + `ValuePlusOne` ALIAS value + 1.0 +) +ENGINE = MergeTree() +ORDER BY id; + + +INSERT INTO alias_insert_test (id, name, value) VALUES (0, 'zeno', 100.5); +INSERT INTO alias_insert_test (UserID, UserName, value) VALUES (1, 'alice', 10.5); + +INSERT INTO alias_insert_test (id, UserName, value) VALUES (2, 'bob', 20.5); +INSERT INTO alias_insert_test (UserID, name, value) VALUES (3, 'charlie', 30.5); + +SET async_insert = 1; +SET wait_for_async_insert = 1; + +INSERT INTO alias_insert_test (UserID, UserName, value) VALUES (5, 'david_async', 500.0); +INSERT INTO alias_insert_test (id, UserName, value) VALUES (6, 'eve_async', 600.5); + +SET async_insert = 0; + +INSERT INTO alias_insert_test (id, UpperName, value) VALUES (99, 'FAIL', 999.0); -- { serverError NO_SUCH_COLUMN_IN_TABLE } +INSERT INTO alias_insert_test (id, name, ValuePlusOne) VALUES (99, 'FAIL', 999.0); -- { serverError NO_SUCH_COLUMN_IN_TABLE } + +SELECT id, name, value FROM alias_insert_test ORDER BY id; + +DROP TABLE IF EXISTS alias_insert_test; \ No newline at end of file diff --git a/parser/testdata/03570_limit_by_all/ast.json b/parser/testdata/03570_limit_by_all/ast.json new file mode 100644 index 000000000..388bd1004 --- /dev/null +++ b/parser/testdata/03570_limit_by_all/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001352085, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03570_limit_by_all/metadata.json b/parser/testdata/03570_limit_by_all/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03570_limit_by_all/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03570_limit_by_all/query.sql b/parser/testdata/03570_limit_by_all/query.sql new file mode 100644 index 000000000..b683d8c5f --- /dev/null +++ b/parser/testdata/03570_limit_by_all/query.sql @@ -0,0 +1,220 @@ +SET max_threads = 1; +SET max_insert_threads = 1; +SET max_block_size = 65536; +SET allow_experimental_analyzer = 1; + +DROP TABLE IF EXISTS test_limit_by_all; + +CREATE TABLE test_limit_by_all ( + id Int32, + category String, + value Int32, + name String +) ENGINE = Memory; + +INSERT INTO test_limit_by_all VALUES +(1, 'A', 100, 'item1'), +(1, 'A', 200, 'item2'), +(1, 'B', 300, 'item3'), +(2, 'A', 400, 'item4'), +(2, 'A', 500, 'item5'), +(2, 'B', 600, 'item6'), +(3, 'C', 700, 'item7'); + +-- Test 1: Basic LIMIT BY ALL usage +SELECT id, category, value +FROM test_limit_by_all +ORDER BY id, category, value +LIMIT 1 BY ALL; + +-- Test 2: LIMIT BY ALL with aggregate functions (should ignore aggregates) +SELECT id, category, count(*) as cnt +FROM test_limit_by_all +GROUP BY id, category +ORDER BY id, category +LIMIT 1 BY ALL; + +-- Test 3: LIMIT BY ALL with computed column - make deterministic by ordering by value +SELECT id, category, concat(category, '_', name) as combined +FROM test_limit_by_all +ORDER BY id, category, value -- Order by value to make deterministic when id,category are same +LIMIT 2 BY ALL; + +-- Test 4: Basic equivalence test +SELECT id, category, value +FROM test_limit_by_all +ORDER BY id, category, value +LIMIT 1 BY ALL; + +-- Test 5: Explicit column list (should be equivalent to test 4) +SELECT id, category, value +FROM test_limit_by_all +ORDER BY id, category, value +LIMIT 1 BY id, category, value; + +-- Test 6: EXPLAIN SYNTAX test +EXPLAIN SYNTAX +SELECT id, category, value, rand() AS r +FROM test_limit_by_all +ORDER BY id, category, value +LIMIT 1 BY ALL; + +-- Test 7: EXPLAIN QUERY TREE test +EXPLAIN QUERY TREE +SELECT id, category, value +FROM test_limit_by_all +ORDER BY id, category, value +LIMIT 1 BY ALL; + +-- Test 8: LIMIT BY ALL with window function - make deterministic +SELECT id, category, value, row_number() OVER (PARTITION BY category ORDER BY value) AS rn +FROM test_limit_by_all +ORDER BY id, category, value, rn +LIMIT 1 BY ALL LIMIT 3; + +-- Test 9: LIMIT BY ALL with WHERE clause - make deterministic +SELECT id, category, value +FROM test_limit_by_all +WHERE value > 200 +ORDER BY id, category, value -- Add value to ordering for deterministic results +LIMIT 1 BY ALL; + +SELECT id AS k, category +FROM test_limit_by_all +ORDER BY k, category, value +LIMIT 1 BY ALL; + +SELECT * +FROM test_limit_by_all +ORDER BY id, category, value, name +LIMIT 1 BY ALL; + +SELECT id +FROM (SELECT DISTINCT id, category FROM test_limit_by_all) +ORDER BY id, category +LIMIT 1 BY ALL; + +SELECT DISTINCT id, category +FROM test_limit_by_all +ORDER BY id, category +LIMIT 1 BY ALL +LIMIT 2; + +SELECT d, category, count() AS c +FROM +( + WITH toStartOfDay(toDateTime('2025-01-01 12:00:00')) AS d + SELECT d, category + FROM test_limit_by_all + ORDER BY d, category, value, name + LIMIT 2 BY ALL + SETTINGS enable_positional_arguments = 0 +) +GROUP BY d, category +ORDER BY d, category; + +SELECT id, category +FROM test_limit_by_all +ORDER BY id, category, value +LIMIT 1,2 BY ALL; + +SELECT id, category +FROM test_limit_by_all +ORDER BY id, category, value +LIMIT 2 OFFSET 1 BY ALL; + +SELECT id, category +FROM test_limit_by_all +ORDER BY value DESC +LIMIT 1 BY ALL +LIMIT 2; + +-- Only-aggregate SELECT -> expect syntax error 62 +SELECT count() +FROM test_limit_by_all +LIMIT 1 BY ALL; -- { serverError 62 } + +-- JOIN + ARRAY JOIN +DROP TABLE IF EXISTS test_limit_by_all_tags; +CREATE TABLE test_limit_by_all_tags (id Int32, tags Array(String)) ENGINE = Memory; +INSERT INTO test_limit_by_all_tags VALUES (1, ['x','y']), (2, ['y']), (3, ['z']); + +EXPLAIN SYNTAX +SELECT t.id, tag, sum(value) AS s +FROM test_limit_by_all AS t +LEFT JOIN test_limit_by_all_tags AS g USING (id) +ARRAY JOIN g.tags AS tag +GROUP BY t.id, tag +ORDER BY t.id, tag +LIMIT 1 BY ALL; + +SELECT t.id, tag +FROM test_limit_by_all AS t +LEFT JOIN test_limit_by_all_tags AS g USING (id) +ARRAY JOIN g.tags AS tag +ORDER BY t.id, tag, value +LIMIT 1 BY ALL; + +DROP TABLE test_limit_by_all_tags; + +WITH toStartOfHour(toDateTime('2025-01-01 12:00:00')) AS h +SELECT h, category +FROM test_limit_by_all +ORDER BY h, category, value +LIMIT 1 BY ALL +SETTINGS enable_positional_arguments = 0; + +SELECT id, category, value +FROM test_limit_by_all +ORDER BY id, category, value +LIMIT 1,2 BY id; + +SELECT id, category, value +FROM test_limit_by_all +ORDER BY id, category, value +LIMIT 2 OFFSET 1 BY id; + +EXPLAIN SYNTAX +SELECT id, category, value +FROM test_limit_by_all +ORDER BY 1, 2, 3 +LIMIT 2 BY 1, 2 +SETTINGS enable_positional_arguments=1; + +SELECT id, category, value +FROM test_limit_by_all +ORDER BY id, category, value +LIMIT -2; + +SELECT id, category, value +FROM test_limit_by_all +ORDER BY id, category, value +LIMIT -2 OFFSET -1; + +SELECT id, category, value +FROM test_limit_by_all +ORDER BY id, category, value +LIMIT -1 BY id; -- { serverError NOT_IMPLEMENTED } + +-- Should give no result +SELECT id, category, value +FROM test_limit_by_all +ORDER BY id, category, value +LIMIT 0 BY ALL; + +SELECT id, count() AS c +FROM test_limit_by_all +GROUP BY id, category +HAVING c >= 1 +ORDER BY id, c DESC, category +LIMIT 1 BY ALL; + +-- NULL key handling +INSERT INTO test_limit_by_all VALUES (4, NULL, 10, 'n1'), (4, NULL, 20, 'n2'); + +SELECT id, category +FROM test_limit_by_all +ORDER BY id, category NULLS FIRST, value +LIMIT 1 BY ALL; + +DROP TABLE test_limit_by_all; \ No newline at end of file diff --git a/parser/testdata/03571_join_inequality_constants/ast.json b/parser/testdata/03571_join_inequality_constants/ast.json new file mode 100644 index 000000000..683a3f7a2 --- /dev/null +++ b/parser/testdata/03571_join_inequality_constants/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00126631, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03571_join_inequality_constants/metadata.json b/parser/testdata/03571_join_inequality_constants/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03571_join_inequality_constants/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03571_join_inequality_constants/query.sql b/parser/testdata/03571_join_inequality_constants/query.sql new file mode 100644 index 000000000..2a2cb98eb --- /dev/null +++ b/parser/testdata/03571_join_inequality_constants/query.sql @@ -0,0 +1,4 @@ +SET enable_analyzer = 1; + +SELECT id FROM (SELECT toLowCardinality(1) AS id) AS a INNER JOIN (SELECT toLowCardinality(toUInt128(materialize(0))) AS id) AS b USING (id) INNER JOIN a AS t ON b.id < t.id; +SELECT 1 FROM (SELECT toLowCardinality(0) a) a FULL JOIN a b USING (a) JOIN a c ON b.a > c.a SETTINGS join_use_nulls = true; diff --git a/parser/testdata/03571_limit_by_all_old_planner/ast.json b/parser/testdata/03571_limit_by_all_old_planner/ast.json new file mode 100644 index 000000000..5c6ab4173 --- /dev/null +++ b/parser/testdata/03571_limit_by_all_old_planner/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001094169, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03571_limit_by_all_old_planner/metadata.json b/parser/testdata/03571_limit_by_all_old_planner/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03571_limit_by_all_old_planner/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03571_limit_by_all_old_planner/query.sql b/parser/testdata/03571_limit_by_all_old_planner/query.sql new file mode 100644 index 000000000..2e207f07a --- /dev/null +++ b/parser/testdata/03571_limit_by_all_old_planner/query.sql @@ -0,0 +1,122 @@ +SET max_threads = 1; +SET max_insert_threads = 1; +SET max_block_size = 65536; +SET enable_analyzer = 0; + +DROP TABLE IF EXISTS test_limit_by_all_old_planner; + +CREATE TABLE test_limit_by_all_old_planner ( + id Int32, + category String, + value Int32, + name String +) ENGINE = Memory; + +INSERT INTO test_limit_by_all_old_planner VALUES +(1, 'A', 100, 'item1'), +(1, 'A', 200, 'item2'), +(1, 'B', 300, 'item3'), +(2, 'A', 400, 'item4'), +(2, 'A', 500, 'item5'), +(2, 'B', 600, 'item6'), +(3, 'C', 700, 'item7'); + +-- Test 1: Test that LIMIT BY ALL throws an exception when using the old planner +-- This tests the changes in TreeWriter.cpp +SELECT id, category, value, name +FROM test_limit_by_all_old_planner +LIMIT 1 BY ALL +SETTINGS allow_experimental_analyzer = 0; -- {serverError NOT_IMPLEMENTED} + +-- Test 2: Basic LIMIT BY usage. +SELECT id, category, value +FROM test_limit_by_all_old_planner +ORDER BY id, category, value +LIMIT 1 BY id, category, value; + +-- Test 3: LIMIT BY with computed column - make deterministic by ordering by value +SELECT id, category, concat(category, '_', name) as combined +FROM test_limit_by_all_old_planner +ORDER BY id, category, value -- Order by value to make deterministic when id, category are same +LIMIT 2 BY id, category, combined; + +-- Test 4: LIMIT BY with window function - make deterministic +SELECT id, category, value, row_number() OVER (PARTITION BY category ORDER BY value) AS rn +FROM test_limit_by_all_old_planner +ORDER BY id, category, value, rn +LIMIT 1 BY id, category, value, rn LIMIT 3; + +-- Test 5: LIMIT BY with WHERE clause - make deterministic +SELECT id, category, value +FROM test_limit_by_all_old_planner +WHERE value > 200 +ORDER BY id, category, value -- Add value to ordering for deterministic results +LIMIT 1 BY id, category, value; + +-- Test 6: LIMIT BY with unique values +SELECT id +FROM (SELECT DISTINCT id, category FROM test_limit_by_all_old_planner) +ORDER BY id, category +LIMIT 1 BY id; + +-- Test 7: LIMIT BY with DISTINCT clause +SELECT DISTINCT id, category +FROM test_limit_by_all_old_planner +ORDER BY id, category +LIMIT 1 BY id, category +LIMIT 2; + +-- Test 8: Negative LIMIT BY should throw an exception +SELECT id, category, value +FROM test_limit_by_all_old_planner +ORDER BY id, category, value +LIMIT -1 BY id; -- { serverError NOT_IMPLEMENTED } + +-- Test 9: LIMIT BY with OFFSET +SELECT id, category +FROM test_limit_by_all_old_planner +ORDER BY id, category, value +LIMIT 2 OFFSET 1 BY id, category; + +-- Test 10: LIMIT BY with DESC ORDER BY +SELECT id, category +FROM test_limit_by_all_old_planner +ORDER BY value DESC +LIMIT 1 BY id, category +LIMIT 2; + +-- Test 11: 0 LIMIT BY - Should give no result +SELECT id, category, value +FROM test_limit_by_all_old_planner +ORDER BY id, category, value +LIMIT 0 BY id, category, value; + +-- Test 12: Misc + +SELECT id, category, value +FROM test_limit_by_all_old_planner +ORDER BY id, category, value +LIMIT 2 OFFSET 1 BY id; + +SELECT d, category, count() AS c +FROM +( + WITH toStartOfDay(toDateTime('2025-01-01 12:00:00')) AS d + SELECT d, category + FROM test_limit_by_all_old_planner + ORDER BY d, category, value, name + LIMIT 2 BY d, category + SETTINGS enable_positional_arguments = 0 +) +GROUP BY d, category +ORDER BY d, category; + +-- Test 13: NULL key handling +INSERT INTO test_limit_by_all_old_planner VALUES (4, NULL, 10, 'n1'), (4, NULL, 20, 'n2'); + +SELECT id, category +FROM test_limit_by_all_old_planner +ORDER BY id, category NULLS FIRST, value +LIMIT 1 BY id, category; + +DROP TABLE test_limit_by_all_old_planner; \ No newline at end of file diff --git a/parser/testdata/03571_lwd_and_projections/ast.json b/parser/testdata/03571_lwd_and_projections/ast.json new file mode 100644 index 000000000..89e0e0152 --- /dev/null +++ b/parser/testdata/03571_lwd_and_projections/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery weird_projections (children 1)" + }, + { + "explain": " Identifier weird_projections" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001307377, + "rows_read": 2, + "bytes_read": 86 + } +} diff --git a/parser/testdata/03571_lwd_and_projections/metadata.json b/parser/testdata/03571_lwd_and_projections/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03571_lwd_and_projections/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03571_lwd_and_projections/query.sql b/parser/testdata/03571_lwd_and_projections/query.sql new file mode 100644 index 000000000..95c2a481e --- /dev/null +++ b/parser/testdata/03571_lwd_and_projections/query.sql @@ -0,0 +1,26 @@ +DROP TABLE IF EXISTS weird_projections; + +CREATE TABLE weird_projections( + `account_id` UInt64, + `user_id` String, + PROJECTION events_by_day_proj + ( + SELECT + account_id, + countDistinct(user_id) AS total_users + GROUP BY + account_id + ) +) +ENGINE = ReplicatedMergeTree('/clickhouse/{database}/tables/test', '1') +ORDER BY (account_id) +SETTINGS index_granularity = 8192, lightweight_mutation_projection_mode = 'rebuild'; + +INSERT INTO weird_projections SELECT 134 as account_id, toString(account_id) as user_id FROM numbers(10000); +INSERT INTO weird_projections SELECT 132 as account_id, toString(account_id) as user_id FROM numbers(10000); + +OPTIMIZE TABLE weird_projections FINAL; + +DELETE FROM weird_projections WHERE account_id = 134; + +DROP TABLE IF EXISTS weird_projections; diff --git a/parser/testdata/03571_nullable_format_digit_groups/ast.json b/parser/testdata/03571_nullable_format_digit_groups/ast.json new file mode 100644 index 000000000..27c01f277 --- /dev/null +++ b/parser/testdata/03571_nullable_format_digit_groups/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001055123, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03571_nullable_format_digit_groups/metadata.json b/parser/testdata/03571_nullable_format_digit_groups/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03571_nullable_format_digit_groups/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03571_nullable_format_digit_groups/query.sql b/parser/testdata/03571_nullable_format_digit_groups/query.sql new file mode 100644 index 000000000..e94155122 --- /dev/null +++ b/parser/testdata/03571_nullable_format_digit_groups/query.sql @@ -0,0 +1,3 @@ +SET output_format_pretty_color = 1; +SET output_format_pretty_highlight_digit_groups = 1; +SELECT toNullable(3646616306303) FORMAT PrettyCompact; diff --git a/parser/testdata/03571_trying_to_get_name_of_not_a_column_asterisk/ast.json b/parser/testdata/03571_trying_to_get_name_of_not_a_column_asterisk/ast.json new file mode 100644 index 000000000..be99d1600 --- /dev/null +++ b/parser/testdata/03571_trying_to_get_name_of_not_a_column_asterisk/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t0 (children 1)" + }, + { + "explain": " Identifier t0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00096038, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03571_trying_to_get_name_of_not_a_column_asterisk/metadata.json b/parser/testdata/03571_trying_to_get_name_of_not_a_column_asterisk/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03571_trying_to_get_name_of_not_a_column_asterisk/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03571_trying_to_get_name_of_not_a_column_asterisk/query.sql b/parser/testdata/03571_trying_to_get_name_of_not_a_column_asterisk/query.sql new file mode 100644 index 000000000..04d4c987b --- /dev/null +++ b/parser/testdata/03571_trying_to_get_name_of_not_a_column_asterisk/query.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS t0; +CREATE TABLE t0 (c0 Int) ENGINE = MergeTree ORDER BY tuple(); +ALTER TABLE t0 MODIFY COLUMN c0 Int TTL indexHint(*); -- { serverError UNKNOWN_IDENTIFIER } +DROP TABLE t0; + +DROP TABLE IF EXISTS 02577_keepermap_delete_update; +CREATE TABLE 02577_keepermap_delete_update (key UInt64, value String, value2 UInt64) PRIMARY KEY(key); +DELETE FROM `02577_keepermap_delete_update` WHERE like(indexHint(*, indexHint(indexHint(toNullable(1.), 0, (20 IS NULL) IS NOT NULL, isNull(indexHint(indexHint(indexHint(indexHint(*), *), isZeroOrNull(materialize(indexHint(*, indexHint(toNullable(toInt128(100) IS NULL, 1.), toLowCardinality(0), isNullable(toNullable(20)) IS NULL, indexHint(isZeroOrNull(15), *, indexHint(*)), *), 100), 15))), indexHint(indexHint(isNullable(materialize(15 IS NOT NULL))), 100, *), 1, indexHint(indexHint(1, indexHint(100, *)), *)), toLowCardinality(15)), indexHint(*)), *)), value, 'Some%string'); -- { serverError UNKNOWN_IDENTIFIER } +DROP TABLE 02577_keepermap_delete_update; diff --git a/parser/testdata/03572_empty_tuple_in_nested_type/ast.json b/parser/testdata/03572_empty_tuple_in_nested_type/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03572_empty_tuple_in_nested_type/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03572_empty_tuple_in_nested_type/metadata.json b/parser/testdata/03572_empty_tuple_in_nested_type/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03572_empty_tuple_in_nested_type/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03572_empty_tuple_in_nested_type/query.sql b/parser/testdata/03572_empty_tuple_in_nested_type/query.sql new file mode 100644 index 000000000..9fbb20a4e --- /dev/null +++ b/parser/testdata/03572_empty_tuple_in_nested_type/query.sql @@ -0,0 +1,13 @@ +-- { echo ON } + +DROP TABLE IF EXISTS t0; + +CREATE TABLE t0 (c0 Array(Tuple())) ENGINE = Memory; + +SET max_insert_block_size = 4; + +INSERT INTO TABLE t0 (c0) VALUES ([()]), ([()]), ([()]), ([()]), ([()]), ([()]), ([()]::Array(Tuple())), ([()]), ([(), ()]), ([()]); + +DROP TABLE t0; + +SELECT [(), ()]; diff --git a/parser/testdata/03572_planner_merge_filter_into_join_bug/ast.json b/parser/testdata/03572_planner_merge_filter_into_join_bug/ast.json new file mode 100644 index 000000000..9ce86c82f --- /dev/null +++ b/parser/testdata/03572_planner_merge_filter_into_join_bug/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001309698, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03572_planner_merge_filter_into_join_bug/metadata.json b/parser/testdata/03572_planner_merge_filter_into_join_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03572_planner_merge_filter_into_join_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03572_planner_merge_filter_into_join_bug/query.sql b/parser/testdata/03572_planner_merge_filter_into_join_bug/query.sql new file mode 100644 index 000000000..15d7cff9d --- /dev/null +++ b/parser/testdata/03572_planner_merge_filter_into_join_bug/query.sql @@ -0,0 +1,11 @@ +SET enable_analyzer = 1; +SET enable_parallel_replicas = 0; + +SELECT table1.id1 AS id1, + table1.date1 AS date1 +FROM + (SELECT 1 AS id1, toDateTime('2025-01-01 01:00:00') AS date1) AS table1 + JOIN + (SELECT 1 AS id2, toDate('2025-01-01') AS date2) AS p + ON table1.id1 = p.id2 +WHERE toDate(table1.date1) = p.date2; diff --git a/parser/testdata/03572_pr_remote_in_subquery/ast.json b/parser/testdata/03572_pr_remote_in_subquery/ast.json new file mode 100644 index 000000000..297626477 --- /dev/null +++ b/parser/testdata/03572_pr_remote_in_subquery/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001131299, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03572_pr_remote_in_subquery/metadata.json b/parser/testdata/03572_pr_remote_in_subquery/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03572_pr_remote_in_subquery/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03572_pr_remote_in_subquery/query.sql b/parser/testdata/03572_pr_remote_in_subquery/query.sql new file mode 100644 index 000000000..e670d0cb7 --- /dev/null +++ b/parser/testdata/03572_pr_remote_in_subquery/query.sql @@ -0,0 +1,20 @@ +set enable_analyzer = 1; +set enable_parallel_replicas=1, max_parallel_replicas=2, parallel_replicas_local_plan=1; + +select '-- SELECT'; +SELECT * FROM (SELECT dummy AS k FROM remote('127.0.0.{1,2}', system.one)); + +DROP TABLE IF EXISTS t0; +CREATE TABLE t0 (c0 Int, c1 Int) ENGINE MergeTree() ORDER BY tuple(); +INSERT INTO t0 VALUES(0, 1); + +select '-- UNION'; +( + SELECT 1 x, x y FROM remote('localhost', currentDatabase(), t0) +) +UNION ALL +( + SELECT 1, c1 FROM t0 SETTINGS cluster_for_parallel_replicas='test_cluster_one_shard_three_replicas_localhost', parallel_replicas_for_non_replicated_merge_tree = 1 +); + +DROP TABLE t0; diff --git a/parser/testdata/03573_concurrent_hash_scatter_bug/ast.json b/parser/testdata/03573_concurrent_hash_scatter_bug/ast.json new file mode 100644 index 000000000..148cd8c1d --- /dev/null +++ b/parser/testdata/03573_concurrent_hash_scatter_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t0 (children 1)" + }, + { + "explain": " Identifier t0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001152414, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03573_concurrent_hash_scatter_bug/metadata.json b/parser/testdata/03573_concurrent_hash_scatter_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03573_concurrent_hash_scatter_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03573_concurrent_hash_scatter_bug/query.sql b/parser/testdata/03573_concurrent_hash_scatter_bug/query.sql new file mode 100644 index 000000000..9a09a1d14 --- /dev/null +++ b/parser/testdata/03573_concurrent_hash_scatter_bug/query.sql @@ -0,0 +1,23 @@ +DROP TABLE IF EXISTS t0; +DROP TABLE IF EXISTS test_table_join_1; +DROP TABLE IF EXISTS test_table_join_2; + +CREATE TABLE test_table_join_1 ( id UInt8, value String ) ENGINE = TinyLog; +CREATE TABLE test_table_join_2 ( id UInt16, value String ) ENGINE = TinyLog; +INSERT INTO test_table_join_1 VALUES (0,'Join_1_Value_0'),(1,'Join_1_Value_1'),(2,'Join_1_Value_2'),(0,'Join_1_Value_0'),(1,'Join_1_Value_1'),(2,'Join_1_Value_2'); +INSERT INTO test_table_join_2 VALUES (0,'Join_2_Value_0'),(1,'Join_2_Value_1'),(3,'Join_2_Value_3'),(0,'Join_2_Value_0'),(1,'Join_2_Value_1'),(3,'Join_2_Value_3'); + +CREATE TABLE t0 (i Int64, j Int16) ENGINE = MergeTree ORDER BY i; +INSERT INTO t0 VALUES (1, 1), (2, 2); + +SET join_algorithm = 'parallel_hash'; + +SELECT i FROM t0 ANY JOIN (SELECT 3 AS k) AS x ON x.k = j; + +INSERT INTO t0 SELECT number, number FROM numbers_mt(100_000) WHERE number != 3; +SELECT i FROM t0 ANY JOIN (SELECT 3 AS k) AS x ON x.k = j; + +SELECT * +FROM test_table_join_1 AS t1 +ANY INNER JOIN test_table_join_2 AS t2 USING (id) +ORDER BY id ASC, t1.value ASC; diff --git a/parser/testdata/03573_create_query_parameters_in_to_in_materialized_view/ast.json b/parser/testdata/03573_create_query_parameters_in_to_in_materialized_view/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03573_create_query_parameters_in_to_in_materialized_view/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03573_create_query_parameters_in_to_in_materialized_view/metadata.json b/parser/testdata/03573_create_query_parameters_in_to_in_materialized_view/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03573_create_query_parameters_in_to_in_materialized_view/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03573_create_query_parameters_in_to_in_materialized_view/query.sql b/parser/testdata/03573_create_query_parameters_in_to_in_materialized_view/query.sql new file mode 100644 index 000000000..3467ec547 --- /dev/null +++ b/parser/testdata/03573_create_query_parameters_in_to_in_materialized_view/query.sql @@ -0,0 +1,41 @@ +-- formatting +SELECT formatQuerySingleLine('create materialized view mv_kek to {db:Identifier}.{target_table:Identifier} as select * from null_kek'); +SELECT formatQuerySingleLine('create materialized view mv_kek to {target_table:Identifier} as select * from null_kek'); + +-- table name substituion +CREATE TABLE dst_table ENGINE = MergeTree ORDER BY number AS SELECT number FROM numbers(3); +CREATE TABLE src_table AS dst_table ENGINE = Null; + +SET param_dst_table = 'dst_table'; + +CREATE MATERIALIZED VIEW mv_table TO {dst_table:Identifier} AS SELECT * FROM src_table; + +INSERT INTO src_table SELECT 42; + +SELECT * FROM dst_table ORDER BY number; + +-- strange use case + +DROP TABLE mv_table, dst_table, src_table; + +CREATE TABLE dst_table (`number` UInt32) ENGINE = MergeTree ORDER BY number; +CREATE TABLE src_table AS dst_table ENGINE = Null; + +SET param_dst_table = 'dst_table'; +SET param_src_table = 'src_table'; + +CREATE MATERIALIZED VIEW mv_table TO {dst_table:Identifier} +AS SELECT * +FROM {src_table:Identifier} +WHERE number NOT IN ( + SELECT number + FROM {dst_table:Identifier} +); + +INSERT INTO src_table SELECT 42; +INSERT INTO src_table SELECT 2; + +INSERT INTO src_table SELECT 42; +INSERT INTO src_table SELECT 2; + +SELECT * FROM dst_table ORDER BY number ASC; diff --git a/parser/testdata/03573_json_keys_with_dots/ast.json b/parser/testdata/03573_json_keys_with_dots/ast.json new file mode 100644 index 000000000..be130f0db --- /dev/null +++ b/parser/testdata/03573_json_keys_with_dots/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001244638, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03573_json_keys_with_dots/metadata.json b/parser/testdata/03573_json_keys_with_dots/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03573_json_keys_with_dots/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03573_json_keys_with_dots/query.sql b/parser/testdata/03573_json_keys_with_dots/query.sql new file mode 100644 index 000000000..7e93807aa --- /dev/null +++ b/parser/testdata/03573_json_keys_with_dots/query.sql @@ -0,0 +1,9 @@ +SET json_type_escape_dots_in_keys=1; +SET enable_analyzer=1; + +SELECT '{"a" : {"b" : 42}}'::JSON AS json1, '{"a.b" : 42}'::JSON AS json2, JSONAllPaths(json1), JSONAllPaths(json2); +SELECT '{"a.b" : 42, "a" : {"b" : "Hello World!"}}'::JSON AS json, JSONAllPaths(json); +SELECT '{"a.b" : 42, "a" : {"b" : "Hello World!"}}'::JSON AS json, json.`a%2Eb`, json.a.b; +SELECT '{"a.b" : 42, "a" : {"b" : "Hello World!"}}'::JSON AS json, json.`a%2Eb`, json.`a.b`, json.a.b; +SELECT '{"a.b" : 42, "a" : {"b" : "Hello World!"}}'::JSON(`a%2Eb` UInt8) as json, json.`a%2Eb`, toTypeName(json.`a%2Eb`); + diff --git a/parser/testdata/03573_linear_regression_timeseries_functions/ast.json b/parser/testdata/03573_linear_regression_timeseries_functions/ast.json new file mode 100644 index 000000000..5b7e0a2ee --- /dev/null +++ b/parser/testdata/03573_linear_regression_timeseries_functions/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery ts_raw_data (children 3)" + }, + { + "explain": " Identifier ts_raw_data" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration timestamp (children 1)" + }, + { + "explain": " DataType DateTime64 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Literal 'UTC'" + }, + { + "explain": " ColumnDeclaration value (children 1)" + }, + { + "explain": " DataType Float64" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Identifier timestamp" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001286892, + "rows_read": 15, + "bytes_read": 555 + } +} diff --git a/parser/testdata/03573_linear_regression_timeseries_functions/metadata.json b/parser/testdata/03573_linear_regression_timeseries_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03573_linear_regression_timeseries_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03573_linear_regression_timeseries_functions/query.sql b/parser/testdata/03573_linear_regression_timeseries_functions/query.sql new file mode 100644 index 000000000..fd9041921 --- /dev/null +++ b/parser/testdata/03573_linear_regression_timeseries_functions/query.sql @@ -0,0 +1,38 @@ +CREATE TABLE ts_raw_data(timestamp DateTime64(3,'UTC'), value Float64) ENGINE = MergeTree() ORDER BY timestamp; + +INSERT INTO ts_raw_data SELECT arrayJoin(*).1::DateTime64(3, 'UTC') AS timestamp, arrayJoin(*).2 AS value +FROM ( +select [ +(1734955421.374, 0), +(1734955436.374, 0), +(1734955451.374, 0), +(1734955466.374, 0), +(1734955481.374, 0), +(1734955496.374, 0), +(1734955511.374, 1), +(1734955526.374, 3), +(1734955541.374, 5), +(1734955556.374, 5), +(1734955571.374, 5), +(1734955586.374, 5), +(1734955601.374, 8), +(1734955616.374, 8), +(1734955631.374, 8), +(1734955646.374, 8), +(1734955661.374, 8), +(1734955676.374, 8) +]); + +SELECT groupArraySorted(20)((timestamp::Decimal(20,3), value)) FROM ts_raw_data; + +SET allow_experimental_ts_to_grid_aggregate_function = 1; + +WITH + 1734955380 AS start, 1734955680 AS end, 15 AS step, 300 AS window, 60 as predict_offset, + range(start, end + 1, step) as grid +SELECT + arrayZip(grid, timeSeriesDerivToGrid(start, end, step, window)(toUnixTimestamp(timestamp), value)) as deriv_5m, + arrayZip(grid, timeSeriesPredictLinearToGrid(start, end, step, window, predict_offset)(timestamp, value)) as predict_linear_5m_offset_1m +FROM ts_raw_data FORMAT Vertical; + +DROP TABLE ts_raw_data; diff --git a/parser/testdata/03573_linear_regression_timeseries_functions_various_arguments/ast.json b/parser/testdata/03573_linear_regression_timeseries_functions_various_arguments/ast.json new file mode 100644 index 000000000..a8297d203 --- /dev/null +++ b/parser/testdata/03573_linear_regression_timeseries_functions_various_arguments/ast.json @@ -0,0 +1,76 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery ts_data (children 3)" + }, + { + "explain": " Identifier ts_data" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " ColumnDeclaration id (children 1)" + }, + { + "explain": " DataType UInt64" + }, + { + "explain": " ColumnDeclaration timestamps (children 1)" + }, + { + "explain": " DataType Array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " DataType DateTime" + }, + { + "explain": " ColumnDeclaration values (children 1)" + }, + { + "explain": " DataType Array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " DataType Float64" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Identifier id" + } + ], + + "rows": 18, + + "statistics": + { + "elapsed": 0.001384504, + "rows_read": 18, + "bytes_read": 663 + } +} diff --git a/parser/testdata/03573_linear_regression_timeseries_functions_various_arguments/metadata.json b/parser/testdata/03573_linear_regression_timeseries_functions_various_arguments/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03573_linear_regression_timeseries_functions_various_arguments/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03573_linear_regression_timeseries_functions_various_arguments/query.sql b/parser/testdata/03573_linear_regression_timeseries_functions_various_arguments/query.sql new file mode 100644 index 000000000..46930b94b --- /dev/null +++ b/parser/testdata/03573_linear_regression_timeseries_functions_various_arguments/query.sql @@ -0,0 +1,55 @@ +CREATE TABLE ts_data(id UInt64, timestamps Array(DateTime), values Array(Float64)) ENGINE = MergeTree() ORDER BY id; +CREATE TABLE ts_data_nullable(id UInt64, timestamp UInt32, value Nullable(Float64)) ENGINE = MergeTree() ORDER BY id; + +INSERT INTO ts_data VALUES (1, [10,20], [1,2]), (2, [30,40,50], [3,4]), (3, [60], [6]), (4, [], []), (5, [80], [8,9]), (6, [100], [10]); +INSERT INTO ts_data_nullable SELECT id, timestamp, value FROM ts_data ARRAY JOIN timestamps as timestamp, arrayResize(values, length(timestamps), NULL) AS value; + +SET allow_experimental_time_series_aggregate_functions = 1; + +-- Fail because of rows with non-matching lengths of timestamps and values +SELECT timeSeriesDerivToGrid(10, 120, 10, 10)(timestamps, values) FROM ts_data; -- {serverError BAD_ARGUMENTS} +SELECT timeSeriesPredictLinearToGrid(10, 120, 10, 10, 60)(timestamps, values) FROM ts_data; -- {serverError BAD_ARGUMENTS} + +-- Filter out invalid rows where timestamp and values arrays lengths do not match +SELECT 'staleness = 60:'; +SELECT timeSeriesDerivToGrid(10, 120, 10, 60)(timestamps, values) FROM ts_data WHERE length(timestamps) = length(values); +SELECT timeSeriesDerivToGridIf(10, 120, 10, 60)(timestamps, values, length(timestamps) = length(values)) FROM ts_data; +SELECT timeSeriesDerivToGridIf(10, 120, 10, 60)(timestamps, values, toNullable(length(timestamps) = length(values))) FROM ts_data; + +SELECT timeSeriesPredictLinearToGrid(10, 120, 10, 60, 60)(timestamps, values) FROM ts_data WHERE length(timestamps) = length(values); +SELECT timeSeriesPredictLinearToGridIf(10, 120, 10, 60, 60)(timestamps, values, length(timestamps) = length(values)) FROM ts_data; +SELECT timeSeriesPredictLinearToGridIf(10, 120, 10, 60, 60)(timestamps, values, toNullable(length(timestamps) = length(values))) FROM ts_data; + +SELECT timeSeriesPredictLinearToGrid(10, 120, 10, 60, 60.5)(timestamps, values) FROM ts_data WHERE length(timestamps) = length(values); +SELECT timeSeriesPredictLinearToGridIf(10, 120, 10, 60, 60.5)(timestamps, values, length(timestamps) = length(values)) FROM ts_data; +SELECT timeSeriesPredictLinearToGridIf(10, 120, 10, 60, 60.5)(timestamps, values, toNullable(length(timestamps) = length(values))) FROM ts_data; + +SELECT timeSeriesPredictLinearToGrid(10, 120, 10, 60, -60)(timestamps, values) FROM ts_data WHERE length(timestamps) = length(values); +SELECT timeSeriesPredictLinearToGridIf(10, 120, 10, 60, -60)(timestamps, values, length(timestamps) = length(values)) FROM ts_data; +SELECT timeSeriesPredictLinearToGridIf(10, 120, 10, 60, -60)(timestamps, values, toNullable(length(timestamps) = length(values))) FROM ts_data; + +SELECT 'staleness = 61:'; +SELECT timeSeriesDerivToGrid(10, 120, 10, 61)(timestamps, values) FROM ts_data WHERE length(timestamps) = length(values); +SELECT timeSeriesPredictLinearToGrid(10, 120, 10, 61, 60)(timestamps, values) FROM ts_data WHERE length(timestamps) = length(values); +SELECT timeSeriesPredictLinearToGrid(10, 120, 10, 61, 60.5)(timestamps, values) FROM ts_data WHERE length(timestamps) = length(values); +SELECT timeSeriesPredictLinearToGrid(10, 120, 10, 61, -60)(timestamps, values) FROM ts_data WHERE length(timestamps) = length(values); + +SELECT * FROM ts_data_nullable WHERE value IS NULL AND id < 5; + +-- Test with Nullable arguments +SELECT timeSeriesResampleToGridWithStaleness(15, 125, 10, 10)(arrayResize(timestamps, arrayMin([length(timestamps), length(values)]) as min_len), arrayResize(values, min_len)) FROM ts_data; +SELECT timeSeriesResampleToGridWithStaleness(15, 125, 10, 10)(timestamp, value) FROM ts_data_nullable; +SELECT timeSeriesResampleToGridWithStalenessIf(15, 125, 10, 10)(timestamp, value, id < 5) FROM ts_data_nullable; + +SELECT timeSeriesResampleToGridWithStaleness(15, 125, 10, 10)([10, 20, 30]::Array(UInt32), [1.0, 2.0, NULL]); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT timeSeriesResampleToGridWithStaleness(15, 125, 10, 10)([10, NULL, 30]::Array(Nullable(UInt32)), [1.0, 2.0, 3.0]); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} + +-- End timestamp not aligned by step +SELECT timeSeriesDerivToGrid(100, 120, 15, 20)([89, 101, 109]::Array(UInt32), [89, 101, 109]::Array(Float32)); +SELECT timeSeriesPredictLinearToGrid(100, 120, 15, 20, 60)([89, 101, 109]::Array(UInt32), [89, 101, 109]::Array(Float32)); + +SELECT timeSeriesDerivToGrid(100, 150, 10, 30)([1, 2, 3]::Array(UInt32), 1.); --{serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT timeSeriesPredictLinearToGrid(100, 150, 10, 30, 60)([1, 2, 3]::Array(UInt32), 1.); --{serverError ILLEGAL_TYPE_OF_ARGUMENT} + +DROP TABLE ts_data; +DROP TABLE ts_data_nullable; diff --git a/parser/testdata/03573_planner_merge_filter_into_join_bug_2/ast.json b/parser/testdata/03573_planner_merge_filter_into_join_bug_2/ast.json new file mode 100644 index 000000000..27c6687b7 --- /dev/null +++ b/parser/testdata/03573_planner_merge_filter_into_join_bug_2/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001338103, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03573_planner_merge_filter_into_join_bug_2/metadata.json b/parser/testdata/03573_planner_merge_filter_into_join_bug_2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03573_planner_merge_filter_into_join_bug_2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03573_planner_merge_filter_into_join_bug_2/query.sql b/parser/testdata/03573_planner_merge_filter_into_join_bug_2/query.sql new file mode 100644 index 000000000..c56cbaeac --- /dev/null +++ b/parser/testdata/03573_planner_merge_filter_into_join_bug_2/query.sql @@ -0,0 +1,10 @@ +SET enable_analyzer = 1; +SET enable_parallel_replicas = 0; + +SELECT 1 +FROM + (SELECT 1 c0) AS tx + JOIN + (SELECT 1 c0) AS ty + ON true +WHERE toInt32(tx.c0) = ty.c0; diff --git a/parser/testdata/03574_analyzer_merge_filter_into_join_bug/ast.json b/parser/testdata/03574_analyzer_merge_filter_into_join_bug/ast.json new file mode 100644 index 000000000..04fb1dc3c --- /dev/null +++ b/parser/testdata/03574_analyzer_merge_filter_into_join_bug/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001551806, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03574_analyzer_merge_filter_into_join_bug/metadata.json b/parser/testdata/03574_analyzer_merge_filter_into_join_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03574_analyzer_merge_filter_into_join_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03574_analyzer_merge_filter_into_join_bug/query.sql b/parser/testdata/03574_analyzer_merge_filter_into_join_bug/query.sql new file mode 100644 index 000000000..8002e62c9 --- /dev/null +++ b/parser/testdata/03574_analyzer_merge_filter_into_join_bug/query.sql @@ -0,0 +1,13 @@ +SET enable_analyzer = 1; +SET allow_experimental_correlated_subqueries = 1; +SET enable_parallel_replicas = 0; + +CREATE TABLE users (uid Int16, name String, age Int16) ORDER BY uid; + +INSERT INTO users VALUES (1231, 'John', 33); +INSERT INTO users VALUES (6666, 'Ksenia', 48); +INSERT INTO users VALUES (8888, 'Alice', 50); + +SELECT name, (SELECT count() FROM numbers(50) WHERE number = age) +FROM users +ORDER BY name; diff --git a/parser/testdata/03574_parallel_replicas_last_right_join/ast.json b/parser/testdata/03574_parallel_replicas_last_right_join/ast.json new file mode 100644 index 000000000..da0ac3afe --- /dev/null +++ b/parser/testdata/03574_parallel_replicas_last_right_join/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001483613, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03574_parallel_replicas_last_right_join/metadata.json b/parser/testdata/03574_parallel_replicas_last_right_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03574_parallel_replicas_last_right_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03574_parallel_replicas_last_right_join/query.sql b/parser/testdata/03574_parallel_replicas_last_right_join/query.sql new file mode 100644 index 000000000..6e32315ac --- /dev/null +++ b/parser/testdata/03574_parallel_replicas_last_right_join/query.sql @@ -0,0 +1,28 @@ +SET allow_experimental_parallel_reading_from_replicas=1; +SET max_parallel_replicas=3; +SET enable_analyzer=1; +SET parallel_replicas_for_non_replicated_merge_tree=1; +SET cluster_for_parallel_replicas='parallel_replicas'; + +DROP TABLE IF EXISTS t0; +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +CREATE TABLE t0 (c0 Int) ENGINE = MergeTree() ORDER BY tuple(); +INSERT INTO t0 VALUES (1), (2); +CREATE TABLE t1 (c0 Int) ENGINE = MergeTree() ORDER BY tuple(); +INSERT INTO t1 VALUES (2), (3); +CREATE TABLE t2 (c0 Int) ENGINE = MergeTree() ORDER BY tuple(); +INSERT INTO t2 VALUES (3), (4); + +SELECT * FROM ( + SELECT 1 FROM remote('localhost:9000', currentDatabase(), 't0') AS t0 + JOIN t1 ON t0.c0 = t1.c0 + RIGHT JOIN t2 ON t2.c0 = t1.c0 +) FORMAT Null; + + +SELECT * FROM ( + SELECT 1 FROM remote('localhost:9000', currentDatabase(), 't0') AS t0 + JOIN t1 ON TRUE + RIGHT JOIN t2 ON TRUE +) FORMAT Null; diff --git a/parser/testdata/03575_analyzer_merge_filter_into_join_bug_2/ast.json b/parser/testdata/03575_analyzer_merge_filter_into_join_bug_2/ast.json new file mode 100644 index 000000000..f91584f2b --- /dev/null +++ b/parser/testdata/03575_analyzer_merge_filter_into_join_bug_2/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001403747, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03575_analyzer_merge_filter_into_join_bug_2/metadata.json b/parser/testdata/03575_analyzer_merge_filter_into_join_bug_2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03575_analyzer_merge_filter_into_join_bug_2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03575_analyzer_merge_filter_into_join_bug_2/query.sql b/parser/testdata/03575_analyzer_merge_filter_into_join_bug_2/query.sql new file mode 100644 index 000000000..b5a1c2f2c --- /dev/null +++ b/parser/testdata/03575_analyzer_merge_filter_into_join_bug_2/query.sql @@ -0,0 +1,21 @@ +SET enable_analyzer = 1; +SET allow_experimental_correlated_subqueries = 1; +SET enable_parallel_replicas = 0; + +SELECT number +FROM numbers(10) AS t +WHERE exists(( + SELECT * + FROM + ( + SELECT number * 2 AS number + FROM + ( + SELECT number + FROM numbers(6) + WHERE (number + 2) < t.number + ) + ) + WHERE number = t.number +)) +ORDER BY number; diff --git a/parser/testdata/03576_analyzer_distributed_correlated_subquery/ast.json b/parser/testdata/03576_analyzer_distributed_correlated_subquery/ast.json new file mode 100644 index 000000000..ea19d7073 --- /dev/null +++ b/parser/testdata/03576_analyzer_distributed_correlated_subquery/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001471413, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03576_analyzer_distributed_correlated_subquery/metadata.json b/parser/testdata/03576_analyzer_distributed_correlated_subquery/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03576_analyzer_distributed_correlated_subquery/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03576_analyzer_distributed_correlated_subquery/query.sql b/parser/testdata/03576_analyzer_distributed_correlated_subquery/query.sql new file mode 100644 index 000000000..1788e1d73 --- /dev/null +++ b/parser/testdata/03576_analyzer_distributed_correlated_subquery/query.sql @@ -0,0 +1,5 @@ +SET enable_analyzer = 1; + +CREATE TABLE t0 (c0 Int) ENGINE = MergeTree() ORDER BY tuple(); +CREATE TABLE t1 (c0 Int) ENGINE = Distributed('test_cluster_two_shards', default, t0); +SELECT (SELECT _shard_num) FROM t1 GROUP BY _shard_num SETTINGS allow_experimental_correlated_subqueries = 1; -- { serverError NOT_IMPLEMENTED } diff --git a/parser/testdata/03576_analyzer_recursive_window/ast.json b/parser/testdata/03576_analyzer_recursive_window/ast.json new file mode 100644 index 000000000..ff44e8a99 --- /dev/null +++ b/parser/testdata/03576_analyzer_recursive_window/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001398549, + "rows_read": 5, + "bytes_read": 177 + } +} diff --git a/parser/testdata/03576_analyzer_recursive_window/metadata.json b/parser/testdata/03576_analyzer_recursive_window/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03576_analyzer_recursive_window/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03576_analyzer_recursive_window/query.sql b/parser/testdata/03576_analyzer_recursive_window/query.sql new file mode 100644 index 000000000..80ba78f5d --- /dev/null +++ b/parser/testdata/03576_analyzer_recursive_window/query.sql @@ -0,0 +1,7 @@ +SELECT 1 +WINDOW w0 AS (ORDER BY min(1) OVER (w0)) +SETTINGS allow_experimental_analyzer = 1; -- { serverError UNSUPPORTED_METHOD } + +SELECT 1 +WINDOW w0 AS (ORDER BY min(1) OVER (w0)) +SETTINGS allow_experimental_analyzer = 0; -- { serverError ILLEGAL_AGGREGATION } diff --git a/parser/testdata/03577_assert_on_estimated_block_size_bytes/ast.json b/parser/testdata/03577_assert_on_estimated_block_size_bytes/ast.json new file mode 100644 index 000000000..cc4bff5ef --- /dev/null +++ b/parser/testdata/03577_assert_on_estimated_block_size_bytes/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier c0" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function generateRandom (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Literal 'c0 Tuple()'" + }, + { + "explain": " Literal UInt64_17599311795067409937" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001538236, + "rows_read": 15, + "bytes_read": 579 + } +} diff --git a/parser/testdata/03577_assert_on_estimated_block_size_bytes/metadata.json b/parser/testdata/03577_assert_on_estimated_block_size_bytes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03577_assert_on_estimated_block_size_bytes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03577_assert_on_estimated_block_size_bytes/query.sql b/parser/testdata/03577_assert_on_estimated_block_size_bytes/query.sql new file mode 100644 index 000000000..3d7cff92e --- /dev/null +++ b/parser/testdata/03577_assert_on_estimated_block_size_bytes/query.sql @@ -0,0 +1 @@ +SELECT c0 FROM generateRandom('c0 Tuple()', 17599311795067409937, 1, 1) LIMIT 1; diff --git a/parser/testdata/03577_dynamic_json_update_issue/ast.json b/parser/testdata/03577_dynamic_json_update_issue/ast.json new file mode 100644 index 000000000..8c779e0a7 --- /dev/null +++ b/parser/testdata/03577_dynamic_json_update_issue/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001376367, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03577_dynamic_json_update_issue/metadata.json b/parser/testdata/03577_dynamic_json_update_issue/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03577_dynamic_json_update_issue/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03577_dynamic_json_update_issue/query.sql b/parser/testdata/03577_dynamic_json_update_issue/query.sql new file mode 100644 index 000000000..de46fb055 --- /dev/null +++ b/parser/testdata/03577_dynamic_json_update_issue/query.sql @@ -0,0 +1,10 @@ +SET mutations_sync = 1; + +DROP TABLE IF EXISTS test_updates; +CREATE TABLE test_updates (`id` UInt64, `json` JSON) ENGINE = MergeTree ORDER BY tuple() SETTINGS min_rows_for_wide_part = 1, min_bytes_for_wide_part = 1, index_granularity = 5, index_granularity_bytes = '10Mi'; +INSERT INTO test_updates SELECT number, '{"a" : 42}' FROM numbers(10); +ALTER TABLE test_updates (UPDATE json = '{"a" : [1, 2, 3]}' WHERE id >= 5); +SELECT * FROM test_updates; +ALTER TABLE test_updates (UPDATE json = '{"a" : [1, 2, 3]}' WHERE 5 >= id); +SELECT * FROM test_updates; +DROP TABLE test_updates; diff --git a/parser/testdata/03577_hash_output_format/ast.json b/parser/testdata/03577_hash_output_format/ast.json new file mode 100644 index 000000000..e2ceaf2bd --- /dev/null +++ b/parser/testdata/03577_hash_output_format/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Identifier Hash" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001665945, + "rows_read": 11, + "bytes_read": 408 + } +} diff --git a/parser/testdata/03577_hash_output_format/metadata.json b/parser/testdata/03577_hash_output_format/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03577_hash_output_format/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03577_hash_output_format/query.sql b/parser/testdata/03577_hash_output_format/query.sql new file mode 100644 index 000000000..1b21ac2ad --- /dev/null +++ b/parser/testdata/03577_hash_output_format/query.sql @@ -0,0 +1,3 @@ +SELECT number FROM system.numbers LIMIT 1 FORMAT Hash; +SELECT number FROM system.numbers LIMIT 20 FORMAT Hash; +SELECT number AS hello, toString(number) AS world, (hello, world) AS tuple, nullIf(hello % 3, 0) AS sometimes_nulls FROM system.numbers LIMIT 20 FORMAT Hash; diff --git a/parser/testdata/03577_server_constant_folding/ast.json b/parser/testdata/03577_server_constant_folding/ast.json new file mode 100644 index 000000000..f1373b1aa --- /dev/null +++ b/parser/testdata/03577_server_constant_folding/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001416584, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03577_server_constant_folding/metadata.json b/parser/testdata/03577_server_constant_folding/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03577_server_constant_folding/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03577_server_constant_folding/query.sql b/parser/testdata/03577_server_constant_folding/query.sql new file mode 100644 index 000000000..2db89e065 --- /dev/null +++ b/parser/testdata/03577_server_constant_folding/query.sql @@ -0,0 +1,177 @@ +SET prefer_localhost_replica = 0; + +SELECT '-- IN subquery'; + +SELECT shardNum(), number +FROM remote('127.0.0.{1..3}', numbers(100)) +WHERE number IN ( + SELECT number FROM numbers(10) WHERE number = shardNum() +) +ORDER BY 1, 2; + +SELECT '-- GLOBAL IN subquery'; + +SELECT shardNum(), number +FROM remote('127.0.0.{1..3}', numbers(100)) +WHERE number GLOBAL IN ( + SELECT number FROM numbers(10) WHERE number = shardNum() +) +ORDER BY 1, 2; + +SELECT '-- IN union'; + +SELECT shardNum(), number +FROM remote('127.0.0.{1..3}', numbers(100)) +WHERE number IN ( + SELECT number FROM numbers(10) WHERE number = shardNum() + UNION ALL + SELECT number FROM numbers(10) WHERE number = shardNum() * 2 +) +ORDER BY 1, 2; + +SELECT '-- GLOBAL IN union'; + +SELECT shardNum(), number +FROM remote('127.0.0.{1..3}', numbers(100)) +WHERE number GLOBAL IN ( + SELECT number FROM numbers(10) WHERE number = shardNum() + UNION ALL + SELECT number FROM numbers(10) WHERE number = shardNum() * 2 +) +ORDER BY 1, 2; + +SELECT '-- IN CTE subquery'; + +WITH flt AS ( + SELECT number FROM numbers(10) WHERE number = shardNum() +) +SELECT shardNum(), number +FROM remote('127.0.0.{1..3}', numbers(100)) +WHERE number IN (flt) +ORDER BY 1, 2; + +SELECT '-- GLOBAL IN CTE subquery'; + +WITH flt AS ( + SELECT number FROM numbers(10) WHERE number = shardNum() +) +SELECT shardNum(), number +FROM remote('127.0.0.{1..3}', numbers(100)) +WHERE number GLOBAL IN (flt) +ORDER BY 1, 2; + +SELECT '-- IN CTE union'; + +WITH flt AS ( + SELECT number FROM numbers(10) WHERE number = shardNum() + UNION ALL + SELECT number FROM numbers(10) WHERE number = shardNum() * 2 +) +SELECT shardNum(), number +FROM remote('127.0.0.{1..3}', numbers(100)) +WHERE number IN (flt) +ORDER BY 1, 2; + +SELECT '-- GLOBAL IN CTE union'; + +WITH flt AS ( + SELECT number FROM numbers(10) WHERE number = shardNum() + UNION ALL + SELECT number FROM numbers(10) WHERE number = shardNum() * 2 +) +SELECT shardNum(), number +FROM remote('127.0.0.{1..3}', numbers(100)) +WHERE number GLOBAL IN (flt) +ORDER BY 1, 2; + +-- Old analyzer has some issues with aliases for `remote()`, so the queries +-- executed w/o are slightly different, and required mostly to confirm the +-- same behavior for resolving shardNum() result value. + +SELECT '-- JOIN subquery, analyzer'; + +SELECT shardNum(), tab.number +FROM remote('127.0.0.{1..3}', numbers(100)) tab + ALL JOIN ( + SELECT number FROM numbers(10) WHERE number = shardNum() + ) flt ON tab.number = flt.number +ORDER BY 1, 2 +SETTINGS enable_analyzer = 1; + +SELECT '-- JOIN subquery, w/o analyzer'; + +SELECT shardNum(), number +FROM remote('127.0.0.{1..3}', numbers(100)) + ALL JOIN ( + SELECT number AS flt_number FROM numbers(10) WHERE number = shardNum() + ) flt ON number = flt_number +ORDER BY 1, 2 +SETTINGS enable_analyzer = 0, joined_subquery_requires_alias = 0; + +SELECT '-- GLOBAL JOIN subquery, analyzer'; + +SELECT shardNum(), tab.number +FROM remote('127.0.0.{1..3}', numbers(100)) tab + GLOBAL ALL JOIN ( + SELECT number FROM numbers(10) WHERE number = shardNum() + ) flt ON tab.number = flt.number +ORDER BY 1, 2 +SETTINGS enable_analyzer = 1; + +SELECT '-- GLOBAL JOIN subquery, w/o analyzer'; + +SELECT shardNum(), number +FROM remote('127.0.0.{1..3}', numbers(100)) + GLOBAL ALL JOIN ( + SELECT number AS flt_number FROM numbers(10) WHERE number = shardNum() + ) flt ON number = flt_number +ORDER BY 1, 2 +SETTINGS enable_analyzer = 0, joined_subquery_requires_alias = 0; + +SELECT '-- JOIN union, analyzer'; + +SELECT shardNum(), tab.number +FROM remote('127.0.0.{1..3}', numbers(100)) tab + ALL JOIN ( + SELECT number FROM numbers(10) WHERE number = shardNum() + UNION ALL + SELECT number FROM numbers(10) WHERE number = shardNum() * 2 + ) flt ON tab.number = flt.number +ORDER BY 1, 2 +SETTINGS enable_analyzer = 1; + +SELECT '-- JOIN union, w/o analyzer'; + +SELECT shardNum(), number +FROM remote('127.0.0.{1..3}', numbers(100)) + ALL JOIN ( + SELECT number AS flt_number FROM numbers(10) WHERE number = shardNum() + UNION ALL + SELECT number AS flt_number FROM numbers(10) WHERE number = shardNum() * 2 + ) flt ON number = flt_number +ORDER BY 1, 2 +SETTINGS enable_analyzer = 0, joined_subquery_requires_alias = 0; + +SELECT '-- GLOBAL JOIN union, analyzer'; + +SELECT shardNum(), tab.number +FROM remote('127.0.0.{1..3}', numbers(100)) tab + GLOBAL ALL JOIN ( + SELECT number FROM numbers(10) WHERE number = shardNum() + UNION ALL + SELECT number FROM numbers(10) WHERE number = shardNum() * 2 + ) flt ON tab.number = flt.number +ORDER BY 1, 2 +SETTINGS enable_analyzer = 1; + +SELECT '-- GLOBAL JOIN union, w/o analyzer'; + +SELECT shardNum(), number +FROM remote('127.0.0.{1..3}', numbers(100)) + GLOBAL ALL JOIN ( + SELECT number AS flt_number FROM numbers(10) WHERE number = shardNum() + UNION ALL + SELECT number AS flt_number FROM numbers(10) WHERE number = shardNum() * 2 + ) flt ON number = flt_number +ORDER BY 1, 2 +SETTINGS enable_analyzer = 0, joined_subquery_requires_alias = 0; diff --git a/parser/testdata/03577_temporary_table_as/ast.json b/parser/testdata/03577_temporary_table_as/ast.json new file mode 100644 index 000000000..b1b4edb10 --- /dev/null +++ b/parser/testdata/03577_temporary_table_as/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001506654, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/03577_temporary_table_as/metadata.json b/parser/testdata/03577_temporary_table_as/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03577_temporary_table_as/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03577_temporary_table_as/query.sql b/parser/testdata/03577_temporary_table_as/query.sql new file mode 100644 index 000000000..8613d8edb --- /dev/null +++ b/parser/testdata/03577_temporary_table_as/query.sql @@ -0,0 +1,6 @@ +DROP TABLE IF EXISTS test; +create table test (a UInt8, b String, c Nullable(Float), date Date) Engine=MergeTree ORDER BY (a,b) PARTITION BY date; +SET default_temporary_table_engine = 'MergeTree'; +CREATE TEMPORARY TABLE test3 AS test; +DROP TABLE test; +SHOW CREATE TEMPORARY TABLE test3; diff --git a/parser/testdata/03577_ub_max_column_in_block_size_bytes/ast.json b/parser/testdata/03577_ub_max_column_in_block_size_bytes/ast.json new file mode 100644 index 000000000..083d11f01 --- /dev/null +++ b/parser/testdata/03577_ub_max_column_in_block_size_bytes/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery t0 (children 3)" + }, + { + "explain": " Identifier t0" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration c0 (children 1)" + }, + { + "explain": " DataType Bool" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001121575, + "rows_read": 11, + "bytes_read": 371 + } +} diff --git a/parser/testdata/03577_ub_max_column_in_block_size_bytes/metadata.json b/parser/testdata/03577_ub_max_column_in_block_size_bytes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03577_ub_max_column_in_block_size_bytes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03577_ub_max_column_in_block_size_bytes/query.sql b/parser/testdata/03577_ub_max_column_in_block_size_bytes/query.sql new file mode 100644 index 000000000..d7cf78b4d --- /dev/null +++ b/parser/testdata/03577_ub_max_column_in_block_size_bytes/query.sql @@ -0,0 +1,5 @@ +CREATE TABLE t0 (c0 Bool) ENGINE = MergeTree() ORDER BY tuple(); + +INSERT INTO TABLE t0 (c0) VALUES (TRUE); + +SELECT c0 FROM t0 SETTINGS preferred_max_column_in_block_size_bytes = 18446744073709551615; diff --git a/parser/testdata/03577_vairant_lazy_materialization_bug/ast.json b/parser/testdata/03577_vairant_lazy_materialization_bug/ast.json new file mode 100644 index 000000000..e7e1c2106 --- /dev/null +++ b/parser/testdata/03577_vairant_lazy_materialization_bug/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00119975, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03577_vairant_lazy_materialization_bug/metadata.json b/parser/testdata/03577_vairant_lazy_materialization_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03577_vairant_lazy_materialization_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03577_vairant_lazy_materialization_bug/query.sql b/parser/testdata/03577_vairant_lazy_materialization_bug/query.sql new file mode 100644 index 000000000..0db5af125 --- /dev/null +++ b/parser/testdata/03577_vairant_lazy_materialization_bug/query.sql @@ -0,0 +1,6 @@ +SET query_plan_optimize_lazy_materialization=1; +SET query_plan_max_limit_for_lazy_materialization=10; +CREATE TABLE test (x Int, d Dynamic) ENGINE = MergeTree ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0; +INSERT INTO test SELECT number, number FROM numbers(3); +SELECT d.Date, d, d.String FROM test ORDER BY materialize(1), x DESC SETTINGS limit = 1; + diff --git a/parser/testdata/03578_distributed_kv_global_in/ast.json b/parser/testdata/03578_distributed_kv_global_in/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03578_distributed_kv_global_in/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03578_distributed_kv_global_in/metadata.json b/parser/testdata/03578_distributed_kv_global_in/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03578_distributed_kv_global_in/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03578_distributed_kv_global_in/query.sql b/parser/testdata/03578_distributed_kv_global_in/query.sql new file mode 100644 index 000000000..79bfd3a28 --- /dev/null +++ b/parser/testdata/03578_distributed_kv_global_in/query.sql @@ -0,0 +1,101 @@ +-- Tags: no-fasttest, use-rocksdb + +SET prefer_localhost_replica = 0; + +DROP TABLE IF EXISTS 03578_rocksdb_local, 03578_rocksdb_dist; + +CREATE TABLE IF NOT EXISTS 03578_rocksdb_local +( + key UInt64, + val String +) +ENGINE = EmbeddedRocksDB() +PRIMARY KEY key; + +CREATE TABLE IF NOT EXISTS 03578_rocksdb_dist +( + key UInt64, + val String +) +ENGINE = Distributed(test_cluster_two_shards_localhost, currentDatabase(), 03578_rocksdb_local); + +INSERT INTO 03578_rocksdb_local SELECT number, 'val-' || number FROM numbers(1000); + +SELECT '-- RocksDB: set'; + +SELECT * +FROM 03578_rocksdb_dist +WHERE key GLOBAL IN (0, 1, 2) +ORDER BY 1, 2; + +SELECT '-- RocksDB: subquery'; + +SELECT * +FROM 03578_rocksdb_dist +WHERE key GLOBAL IN ( + SELECT number FROM numbers(3) +) +ORDER BY 1, 2; + +SYSTEM FLUSH LOGS query_log; + +SELECT '-- Rows read:'; + +SELECT read_rows +FROM system.query_log +WHERE current_database = currentDatabase() + AND type = 'QueryFinish' + AND query LIKE '%FROM 03578_rocksdb_dist%' + AND is_initial_query +ORDER BY event_time_microseconds; + +DROP TABLE 03578_rocksdb_local, 03578_rocksdb_dist; + +DROP TABLE IF EXISTS 03578_keepermap_local, 03578_keepermap_dist; + +CREATE TABLE IF NOT EXISTS 03578_keepermap_local +( + key UInt64, + val String +) +ENGINE = KeeperMap('/' || currentDatabase() || '/test_03578_global_in') +PRIMARY KEY (key); + +CREATE TABLE IF NOT EXISTS 03578_keepermap_dist +( + key UInt64, + val String +) +ENGINE = Distributed(test_cluster_two_shards_localhost, currentDatabase(), 03578_keepermap_local); + +INSERT INTO 03578_keepermap_local SELECT number, 'val-' || number FROM numbers(1000); + +SELECT '-- KeeperMap: set'; + +SELECT * +FROM 03578_keepermap_dist +WHERE key GLOBAL IN (0, 1, 2) +ORDER BY 1, 2; + +SELECT '-- KeeperMap: subquery'; + +SELECT * +FROM 03578_keepermap_dist +WHERE key GLOBAL IN ( + SELECT number FROM numbers(3) +) +ORDER BY 1, 2; + +SYSTEM FLUSH LOGS query_log; + +SELECT '-- Rows read:'; + +SELECT read_rows +FROM system.query_log +WHERE current_database = currentDatabase() + AND type = 'QueryFinish' + AND query LIKE '%FROM 03578_keepermap_dist%' + AND is_initial_query +ORDER BY event_time_microseconds; + +DROP TABLE IF EXISTS 03578_keepermap_local, 03578_keepermap_dist; diff --git a/parser/testdata/03578_kv_in_type_casts/ast.json b/parser/testdata/03578_kv_in_type_casts/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03578_kv_in_type_casts/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03578_kv_in_type_casts/metadata.json b/parser/testdata/03578_kv_in_type_casts/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03578_kv_in_type_casts/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03578_kv_in_type_casts/query.sql b/parser/testdata/03578_kv_in_type_casts/query.sql new file mode 100644 index 000000000..f8166d465 --- /dev/null +++ b/parser/testdata/03578_kv_in_type_casts/query.sql @@ -0,0 +1,185 @@ +-- Tags: no-fasttest, use-rocksdb + +DROP TABLE IF EXISTS 03578_rocksdb; + +CREATE TABLE IF NOT EXISTS 03578_rocksdb +( + key UInt16, + val String +) +ENGINE = EmbeddedRocksDB() +PRIMARY KEY key; + +INSERT INTO 03578_rocksdb SELECT number, 'val-' || number FROM numbers(100); + +SELECT '-- RocksDB: set safe to cast'; +SELECT * FROM 03578_rocksdb WHERE key IN (0::UInt8, 1::UInt8) ORDER BY 1, 2; + +SELECT '-- RocksDB: set not safe to cast'; +SELECT * FROM 03578_rocksdb WHERE key IN (0::UInt32, 1::UInt32) ORDER BY 1, 2; + +SELECT '-- RocksDB: set with overflow'; +SELECT * FROM 03578_rocksdb WHERE key IN (0::UInt32, 1000000::UInt32) ORDER BY 1, 2; + +SELECT '-- RocksDB: set with cast failure'; +SELECT * FROM 03578_rocksdb WHERE key IN ('0', 'non-number') ORDER BY 1, 2; -- { serverError TYPE_MISMATCH } + +SELECT '-- RocksDB: subquery safe to cast'; +SELECT * FROM 03578_rocksdb WHERE key IN (SELECT 0::UInt8 UNION ALL SELECT 1::UInt8) ORDER BY 1, 2; + +SELECT '-- RocksDB: subquery not safe to cast'; +SELECT * FROM 03578_rocksdb WHERE key IN (SELECT 0::UInt32 UNION ALL SELECT 1::UInt32) ORDER BY 1, 2; + +SELECT '-- RocksDB: subquery with overflow'; +SELECT * FROM 03578_rocksdb WHERE key IN (SELECT 0::UInt32 UNION ALL SELECT 1000000::UInt32) ORDER BY 1, 2; + +SELECT '-- RocksDB: subquery with cast failure'; +SELECT * FROM 03578_rocksdb WHERE key IN (SELECT '0' UNION ALL SELECT 'non-number') ORDER BY 1, 2; + +SYSTEM FLUSH LOGS query_log; + +SELECT '-- Rows read:'; + +SELECT read_rows +FROM system.query_log +WHERE current_database = currentDatabase() + AND type = 'QueryFinish' + AND query LIKE '%FROM 03578_rocksdb%' + AND is_initial_query +ORDER BY event_time_microseconds; + +DROP TABLE 03578_rocksdb; + +SELECT ''; + +DROP TABLE IF EXISTS 03578_rocksdb_nullable; + +CREATE TABLE IF NOT EXISTS 03578_rocksdb_nullable +( + key Nullable(UInt16), + val String +) +ENGINE = EmbeddedRocksDB() +PRIMARY KEY key; + +INSERT INTO 03578_rocksdb_nullable SELECT null, 'val-null'; +INSERT INTO 03578_rocksdb_nullable SELECT number, 'val-' || number FROM numbers(99); + +SELECT '-- RocksDB null: set without null'; +SELECT * FROM 03578_rocksdb_nullable WHERE key IN (0, 1) ORDER BY 1, 2; + +SELECT '-- RocksDB null: set with null'; +SELECT * FROM 03578_rocksdb_nullable WHERE key IN (null, 1) ORDER BY 1, 2 SETTINGS transform_null_in = 1; + +SELECT '-- RocksDB null: subquery without null'; +SELECT * FROM 03578_rocksdb_nullable WHERE key IN (SELECT 0 UNION ALL SELECT 1) ORDER BY 1, 2; + +SELECT '-- RocksDB null: subquery with null'; +SELECT * FROM 03578_rocksdb_nullable WHERE key IN (SELECT null UNION ALL SELECT 1) ORDER BY 1, 2 SETTINGS transform_null_in = 1; + +SYSTEM FLUSH LOGS query_log; + +SELECT '-- Rows read:'; + +SELECT read_rows +FROM system.query_log +WHERE current_database = currentDatabase() + AND type = 'QueryFinish' + AND query LIKE '%FROM 03578_rocksdb_nullable%' + AND is_initial_query +ORDER BY event_time_microseconds; + +DROP TABLE 03578_rocksdb_nullable; + +SELECT ''; + +DROP TABLE IF EXISTS 03578_keepermap; + +CREATE TABLE IF NOT EXISTS 03578_keepermap +( + key UInt16, + val String +) +ENGINE = KeeperMap('/' || currentDatabase() || '/test_03578_type_cast') +PRIMARY KEY key; + +INSERT INTO 03578_keepermap SELECT number, 'val-' || number FROM numbers(100); + +SELECT '-- KeeperMap: set safe to cast'; +SELECT * FROM 03578_keepermap WHERE key IN (0::UInt8, 1::UInt8) ORDER BY 1, 2; + +SELECT '-- KeeperMap: set not safe to cast'; +SELECT * FROM 03578_keepermap WHERE key IN (0::UInt32, 1::UInt32) ORDER BY 1, 2; + +SELECT '-- KeeperMap: set with overflow'; +SELECT * FROM 03578_keepermap WHERE key IN (0::UInt32, 1000000::UInt32) ORDER BY 1, 2; + +SELECT '-- KeeperMap: set with cast failure'; +SELECT * FROM 03578_keepermap WHERE key IN ('0', 'non-number') ORDER BY 1, 2; -- { serverError TYPE_MISMATCH } + +SELECT '-- KeeperMap: subquery safe to cast'; +SELECT * FROM 03578_keepermap WHERE key IN (SELECT 0::UInt8 UNION ALL SELECT 1::UInt8) ORDER BY 1, 2; + +SELECT '-- KeeperMap: subquery not safe to cast'; +SELECT * FROM 03578_keepermap WHERE key IN (SELECT 0::UInt32 UNION ALL SELECT 1::UInt32) ORDER BY 1, 2; + +SELECT '-- KeeperMap: subquery with overflow'; +SELECT * FROM 03578_keepermap WHERE key IN (SELECT 0::UInt32 UNION ALL SELECT 1000000::UInt32) ORDER BY 1, 2; + +SELECT '-- KeeperMap: subquery with cast failure'; +SELECT * FROM 03578_keepermap WHERE key IN (SELECT '0' UNION ALL SELECT 'non-number') ORDER BY 1, 2; + +SYSTEM FLUSH LOGS query_log; + +SELECT '-- Rows read:'; + +SELECT read_rows +FROM system.query_log +WHERE current_database = currentDatabase() + AND type = 'QueryFinish' + AND query LIKE '%FROM 03578_keepermap%' + AND is_initial_query +ORDER BY event_time_microseconds; + +DROP TABLE 03578_keepermap; + +SELECT ''; + +DROP TABLE IF EXISTS 03578_keepermap_nullable; + +CREATE TABLE IF NOT EXISTS 03578_keepermap_nullable +( + key Nullable(UInt16), + val String +) +ENGINE = KeeperMap('/' || currentDatabase() || '/test_03578_type_cast_null') +PRIMARY KEY key; + +INSERT INTO 03578_keepermap_nullable SELECT null, 'val-null'; +INSERT INTO 03578_keepermap_nullable SELECT number, 'val-' || number FROM numbers(99); + +SELECT '-- KeeperMap null: set without null'; +SELECT * FROM 03578_keepermap_nullable WHERE key IN (0, 1) ORDER BY 1, 2; + +SELECT '-- KeeperMap null: set with null'; +SELECT * FROM 03578_keepermap_nullable WHERE key IN (null, 1) ORDER BY 1, 2 SETTINGS transform_null_in = 1; + +SELECT '-- KeeperMap null: subquery without null'; +SELECT * FROM 03578_keepermap_nullable WHERE key IN (SELECT 0 UNION ALL SELECT 1) ORDER BY 1, 2; + +SELECT '-- KeeperMap null: subquery with null'; +SELECT * FROM 03578_keepermap_nullable WHERE key IN (SELECT null UNION ALL SELECT 1) ORDER BY 1, 2 SETTINGS transform_null_in = 1; + +SYSTEM FLUSH LOGS query_log; + +SELECT '-- Rows read:'; + +SELECT read_rows +FROM system.query_log +WHERE current_database = currentDatabase() + AND type = 'QueryFinish' + AND query LIKE '%FROM 03578_keepermap_nullable%' + AND is_initial_query +ORDER BY event_time_microseconds; + +DROP TABLE 03578_keepermap_nullable; diff --git a/parser/testdata/03578_ttl_column_in_order_by_validation/ast.json b/parser/testdata/03578_ttl_column_in_order_by_validation/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03578_ttl_column_in_order_by_validation/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03578_ttl_column_in_order_by_validation/metadata.json b/parser/testdata/03578_ttl_column_in_order_by_validation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03578_ttl_column_in_order_by_validation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03578_ttl_column_in_order_by_validation/query.sql b/parser/testdata/03578_ttl_column_in_order_by_validation/query.sql new file mode 100644 index 000000000..360078b81 --- /dev/null +++ b/parser/testdata/03578_ttl_column_in_order_by_validation/query.sql @@ -0,0 +1,20 @@ +-- Test for issue #84442: ALTER MODIFY ORDER BY does not check if the new column has TTL +-- This test verifies that ALTER TABLE properly validates TTL columns in ORDER BY clauses + +CREATE TABLE IF NOT EXISTS test_break_ddl +( + id String, + event_date Date, + event_time DateTime, + message String +) +ENGINE = ReplacingMergeTree() +PARTITION BY event_date +ORDER BY (id, event_date, event_time); + +ALTER TABLE test_break_ddl + ADD COLUMN `source_address` String TTL event_time + toIntervalDay(30) AFTER event_time, + ADD COLUMN `destination_address` String TTL event_time + toIntervalDay(30) AFTER source_address, + MODIFY ORDER BY (id, event_date, event_time, source_address, destination_address); -- { serverError ILLEGAL_COLUMN } + +DROP TABLE IF EXISTS test_break_ddl; diff --git a/parser/testdata/03579_mergeTreeIndex_params/ast.json b/parser/testdata/03579_mergeTreeIndex_params/ast.json new file mode 100644 index 000000000..e9862567d --- /dev/null +++ b/parser/testdata/03579_mergeTreeIndex_params/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_mt_params (children 1)" + }, + { + "explain": " Identifier t_mt_params" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001197768, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/03579_mergeTreeIndex_params/metadata.json b/parser/testdata/03579_mergeTreeIndex_params/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03579_mergeTreeIndex_params/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03579_mergeTreeIndex_params/query.sql b/parser/testdata/03579_mergeTreeIndex_params/query.sql new file mode 100644 index 000000000..ec6c1bfa5 --- /dev/null +++ b/parser/testdata/03579_mergeTreeIndex_params/query.sql @@ -0,0 +1,20 @@ +DROP TABLE IF EXISTS t_mt_params; + +CREATE TABLE t_mt_params (s String, n UInt64) +ENGINE = MergeTree ORDER BY s PARTITION BY n % 2 +SETTINGS index_granularity = 3, min_bytes_for_wide_part = 0, min_rows_for_wide_part = 0, serialization_info_version = 'basic'; + +INSERT INTO t_mt_params VALUES ('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5); + +SELECT * FROM mergeTreeIndex(); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT * FROM mergeTreeIndex(currentDatabase()); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT * FROM mergeTreeIndex(currentDatabase(), 't_mt_params', non_existing_param = 1); -- { serverError BAD_ARGUMENTS } +SELECT * FROM mergeTreeIndex(currentDatabase(), 't_mt_params', with_marks = 1, non_existing_param = 1); -- { serverError BAD_ARGUMENTS } +SELECT * FROM mergeTreeIndex(currentDatabase(), 't_mt_params', with_marks = 1, with_minmax = 1, non_existing_param = 1); -- { serverError BAD_ARGUMENTS } + +SELECT * FROM mergeTreeIndex(currentDatabase(), 't_mt_params') ORDER BY ALL FORMAT TSVWithNames; +SELECT * FROM mergeTreeIndex(currentDatabase(), 't_mt_params', with_marks = 1) ORDER BY ALL FORMAT TSVWithNames; +SELECT * FROM mergeTreeIndex(currentDatabase(), 't_mt_params', with_minmax = 1) ORDER BY ALL FORMAT TSVWithNames; +SELECT * FROM mergeTreeIndex(currentDatabase(), 't_mt_params', with_marks = 1, with_minmax = 1) ORDER BY ALL FORMAT TSVWithNames; + +DROP TABLE t_mt_params; diff --git a/parser/testdata/03579_system_columns_column_alias/ast.json b/parser/testdata/03579_system_columns_column_alias/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03579_system_columns_column_alias/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03579_system_columns_column_alias/metadata.json b/parser/testdata/03579_system_columns_column_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03579_system_columns_column_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03579_system_columns_column_alias/query.sql b/parser/testdata/03579_system_columns_column_alias/query.sql new file mode 100644 index 000000000..6d717e35c --- /dev/null +++ b/parser/testdata/03579_system_columns_column_alias/query.sql @@ -0,0 +1,11 @@ +-- Test that system.columns has 'column' as an alias for 'name' + +-- Query using the original 'name' column +SELECT name FROM system.columns WHERE database = 'system' AND table = 'columns' LIMIT 1; + +-- Query using the new 'column' alias - should return identical results +SELECT column FROM system.columns WHERE database = 'system' AND table = 'columns' LIMIT 1; + +-- Test that both column names work in the same query +SELECT name, column, name = column as alias_works FROM system.columns +WHERE database = 'system' AND table = 'columns' LIMIT 1; \ No newline at end of file diff --git a/parser/testdata/03579_zero_copy_aggregating_final_anyLast/ast.json b/parser/testdata/03579_zero_copy_aggregating_final_anyLast/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03579_zero_copy_aggregating_final_anyLast/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03579_zero_copy_aggregating_final_anyLast/metadata.json b/parser/testdata/03579_zero_copy_aggregating_final_anyLast/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03579_zero_copy_aggregating_final_anyLast/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03579_zero_copy_aggregating_final_anyLast/query.sql b/parser/testdata/03579_zero_copy_aggregating_final_anyLast/query.sql new file mode 100644 index 000000000..92f440357 --- /dev/null +++ b/parser/testdata/03579_zero_copy_aggregating_final_anyLast/query.sql @@ -0,0 +1,38 @@ + +CREATE TABLE t_coalesce ( + k UInt64, + v1 SimpleAggregateFunction(anyLast, Nullable(String)), + v2 SimpleAggregateFunction(anyLast, Nullable(FixedString(20))), + v3 SimpleAggregateFunction(anyLast, Array(String)), + v4 SimpleAggregateFunction(anyLast, Map(String, String)), + v5 SimpleAggregateFunction(anyLast, JSON), + v6 SimpleAggregateFunction(anyLast, Variant(String, UInt64)), + v7 SimpleAggregateFunction(anyLast, Dynamic) +) ENGINE = AggregatingMergeTree() ORDER BY k; + +INSERT INTO t_coalesce (k) VALUES (1); +INSERT INTO t_coalesce (k, v1) VALUES (1, 'Hello'); +INSERT INTO t_coalesce (k, v2) VALUES (1, 'Annnnnnnnnnnnnnnnnnn'); +INSERT INTO t_coalesce (k, v3, v4, v5, v6, v7) VALUES (1, ['a', 'b'], {'a': 'b'}, '{"a": "b"}', '{"a": 1}', '{"a": "b"}'); + +SELECT k, v1, v2, v3, v4, v5, v6, v7 FROM t_coalesce FINAL; + +CREATE TABLE t_coalesce2 ( + k UInt64, + v1 SimpleAggregateFunction(any, Nullable(String)), + v2 SimpleAggregateFunction(any, Nullable(FixedString(20))), + v3 SimpleAggregateFunction(any, Array(String)), + v4 SimpleAggregateFunction(any, Map(String, String)), + v5 SimpleAggregateFunction(any, JSON), + v6 SimpleAggregateFunction(any, Variant(String, UInt64)), + v7 SimpleAggregateFunction(any, Dynamic) +) ENGINE = AggregatingMergeTree() ORDER BY k; + +INSERT INTO t_coalesce2 (k, v3, v4, v5, v6, v7) VALUES (1, ['a', 'b'], {'a': 'b'}, '{"a": "b"}', '{"a": 1}', '{"a": "b"}'); +INSERT INTO t_coalesce2 (k, v1) VALUES (1, 'Hello'); +INSERT INTO t_coalesce2 (k, v2) VALUES (1, 'Annnnnnnnnnnnnnnnnnn'); + +SELECT k, v1, v2, v3, v4, v5, v6, v7 FROM t_coalesce2 FINAL; + +DROP TABLE t_coalesce; +DROP TABLE t_coalesce2; diff --git a/parser/testdata/03580_external_merge_sort_with_lazy_columns/ast.json b/parser/testdata/03580_external_merge_sort_with_lazy_columns/ast.json new file mode 100644 index 000000000..b4fbf2793 --- /dev/null +++ b/parser/testdata/03580_external_merge_sort_with_lazy_columns/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001230364, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/03580_external_merge_sort_with_lazy_columns/metadata.json b/parser/testdata/03580_external_merge_sort_with_lazy_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03580_external_merge_sort_with_lazy_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03580_external_merge_sort_with_lazy_columns/query.sql b/parser/testdata/03580_external_merge_sort_with_lazy_columns/query.sql new file mode 100644 index 000000000..18010f892 --- /dev/null +++ b/parser/testdata/03580_external_merge_sort_with_lazy_columns/query.sql @@ -0,0 +1,15 @@ +DROP TABLE IF EXISTS test SYNC; +CREATE TABLE test +( + c1 LowCardinality(String), + c2 LowCardinality(String), + c3 UInt32 +) +ENGINE = MergeTree +ORDER BY (c1, c2, c3) +SETTINGS index_granularity = 8192; + +INSERT INTO test SELECT toString(number) AS c1, c1 AS c2, number AS c3 FROM system.numbers LIMIT 2; + +-- weird settings to force external sort +SELECT * FROM test ORDER BY c3 LIMIT 1 SETTINGS max_bytes_before_external_sort = 1, max_bytes_ratio_before_external_sort = 0.0; diff --git a/parser/testdata/03580_heredoc_ambiguity/ast.json b/parser/testdata/03580_heredoc_ambiguity/ast.json new file mode 100644 index 000000000..38ca24749 --- /dev/null +++ b/parser/testdata/03580_heredoc_ambiguity/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001268249, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03580_heredoc_ambiguity/metadata.json b/parser/testdata/03580_heredoc_ambiguity/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03580_heredoc_ambiguity/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03580_heredoc_ambiguity/query.sql b/parser/testdata/03580_heredoc_ambiguity/query.sql new file mode 100644 index 000000000..b18a6770f --- /dev/null +++ b/parser/testdata/03580_heredoc_ambiguity/query.sql @@ -0,0 +1,4 @@ +SET param_$1 = 'Hello', param_$2 = 'World'; + +SELECT {$1:String} AS x, {$2:String} AS y; -- {$1:String} AS x, {$2:String} AS y +SELECT {$1:String} AS x, {$2:String} AS y UNION ALL SELECT {$1:String} AS x, {$2:String} AS y; diff --git a/parser/testdata/03580_improve_prewhere/ast.json b/parser/testdata/03580_improve_prewhere/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03580_improve_prewhere/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03580_improve_prewhere/metadata.json b/parser/testdata/03580_improve_prewhere/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03580_improve_prewhere/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03580_improve_prewhere/query.sql b/parser/testdata/03580_improve_prewhere/query.sql new file mode 100644 index 000000000..2e163a795 --- /dev/null +++ b/parser/testdata/03580_improve_prewhere/query.sql @@ -0,0 +1,54 @@ +-- Tags: no-fasttest +set optimize_move_to_prewhere = 1; +set move_all_conditions_to_prewhere = 1; +set enable_multiple_prewhere_read_steps = 1; +set move_primary_key_columns_to_end_of_prewhere = 1; +set allow_reorder_prewhere_conditions = 1; +set enable_analyzer = 1; +set enable_parallel_replicas = 0; +set allow_experimental_statistics = 1; +set allow_statistics_optimize = 1; + +DROP TABLE IF EXISTS test_improve_prewhere; + +CREATE TABLE test_improve_prewhere ( + primary_key String STATISTICS(CountMin), + normal_column String STATISTICS(CountMin), + value UInt32 STATISTICS(TDigest), + date Date STATISTICS(CountMin), +) ENGINE = MergeTree() +ORDER BY primary_key; + +INSERT INTO test_improve_prewhere +SELECT + hex(number % 100) AS primary_key, + arrayElement(['hello', 'world', 'test', 'example', 'sample'], number % 5 + 1) AS normal_column, + number % 1000 + 1 AS value, + toDate('2025-08-01') + number AS date +FROM numbers(100000); + +-- { echoOn } +-- Condition: lower(primary_key) = '00' can't make use of primary key index. It shouldn't be moved to the end of prewhere conditions. +select trimLeft(explain) from ( +EXPLAIN actions=1 +SELECT * FROM test_improve_prewhere +WHERE date = '2025-08-05' and lower(primary_key) = '00' and normal_column != 'hello' and value < 100 +) where explain ilike '%Prewhere filter column%'; + +-- Condition: primary_key = '00' can use primary key index. It should be moved to the end of prewhere conditions. +select trimLeft(explain) from ( +EXPLAIN actions=1 +SELECT * FROM test_improve_prewhere +WHERE date = '2025-08-05' and primary_key = '00' and normal_column != 'hello' and value < 100 +) where explain ilike '%Prewhere filter column%'; + +-- Condition: lower(primary_key) IN ('00', '01') should be placed before Condition: normal_column != 'hello' and value < 100 +-- because it has a lower estimated selectivity. +select trimLeft(replaceRegexpAll(explain, '__set_String_\\d+_\\d+', '__set_String')) from ( +EXPLAIN actions=1 +SELECT * FROM test_improve_prewhere +WHERE date = '2025-08-05' and lower(primary_key) IN ('00', '01') and normal_column != 'hello' and value < 100 +) where explain ilike '%Prewhere filter column%'; +-- { echoOff } + +DROP TABLE test_improve_prewhere; diff --git a/parser/testdata/03580_join_runtime_filter/ast.json b/parser/testdata/03580_join_runtime_filter/ast.json new file mode 100644 index 000000000..7dae824fc --- /dev/null +++ b/parser/testdata/03580_join_runtime_filter/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery nation (children 3)" + }, + { + "explain": " Identifier nation" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration n_nationkey (children 1)" + }, + { + "explain": " DataType Int32" + }, + { + "explain": " ColumnDeclaration n_name (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Identifier n_nationkey" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001768202, + "rows_read": 11, + "bytes_read": 396 + } +} diff --git a/parser/testdata/03580_join_runtime_filter/metadata.json b/parser/testdata/03580_join_runtime_filter/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03580_join_runtime_filter/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03580_join_runtime_filter/query.sql b/parser/testdata/03580_join_runtime_filter/query.sql new file mode 100644 index 000000000..2af126fb3 --- /dev/null +++ b/parser/testdata/03580_join_runtime_filter/query.sql @@ -0,0 +1,151 @@ +CREATE TABLE nation(n_nationkey Int32, n_name String) ENGINE MergeTree ORDER BY n_nationkey; +CREATE TABLE customer(c_custkey Int32, c_nationkey Int32) ENGINE MergeTree ORDER BY c_custkey; +CREATE TABLE orders(o_orderkey Int32, o_custkey Int32, o_totalprice Decimal(15, 2)) ENGINE MergeTree ORDER BY o_orderkey; + +INSERT INTO nation VALUES (5,'ETHIOPIA'),(6,'FRANCE'),(7,'GERMANY'),(100,'UNKNOWN'); + +INSERT INTO customer SELECT number, 5 FROM numbers(500); +INSERT INTO customer SELECT number, 6 FROM numbers(6000); +INSERT INTO customer SELECT number, 7 FROM numbers(70000); +INSERT INTO customer SELECT number, 201 FROM numbers(1); +INSERT INTO customer SELECT number, 202 FROM numbers(2); + +INSERT INTO orders SELECT number, number/10000, number%1000 FROM numbers(1000000); + +SET enable_analyzer=1; +SET enable_join_runtime_filters=1; +SET enable_parallel_replicas=0; +SET join_algorithm = 'hash,parallel_hash'; + +SELECT avg(o_totalprice) +FROM orders, customer, nation +WHERE c_custkey = o_custkey AND c_nationkey = n_nationkey AND n_name = 'FRANCE'; + +-- 1 element in filter +SELECT count() +FROM customer, nation +WHERE c_nationkey = n_nationkey AND n_name = 'FRANCE'; + +-- 0 elements in filter ('WAKANDA' is not present in `nations` table) +SELECT count() +FROM customer, nation +WHERE c_nationkey = n_nationkey AND n_name = 'WAKANDA'; + +-- again 1 element in filter +SELECT count() +FROM customer, nation +WHERE c_nationkey = n_nationkey AND n_name IN ('WAKANDA', 'FRANCE'); + +-- 2 elements in filter +SELECT count() +FROM customer, nation +WHERE c_nationkey = n_nationkey AND n_name IN ('GERMANY', 'FRANCE'); + +-- 2 elements in filter store in a bloom filter +SELECT count() +FROM customer, nation +WHERE c_nationkey = n_nationkey AND n_name IN ('GERMANY', 'FRANCE') +SETTINGS join_runtime_filter_exact_values_limit=1; + +SELECT count() +FROM customer, nation +WHERE c_nationkey = n_nationkey+1 AND n_name = 'FRANCE'; + +SELECT count() +FROM customer, nation +WHERE c_nationkey+1 = n_nationkey AND n_name = 'FRANCE'; + +SELECT count() +FROM customer, nation +WHERE c_nationkey+1 = n_nationkey+1 AND n_name = 'FRANCE'; + +SELECT count() +FROM customer, nation +WHERE c_nationkey+1 = n_nationkey+1 AND n_name = 'FRANCE' +SETTINGS enable_join_runtime_filters = 1, join_runtime_bloom_filter_bytes = 100500000; -- {serverError PARAMETER_OUT_OF_BOUND} + +SELECT count() +FROM customer, nation +WHERE c_nationkey+1 = n_nationkey+1 AND n_name = 'FRANCE' +SETTINGS enable_join_runtime_filters = 1, join_runtime_bloom_filter_hash_functions = 20; -- {serverError PARAMETER_OUT_OF_BOUND} + +SELECT count() +FROM customer, nation +WHERE c_nationkey = n_nationkey AND n_name = 'FRANCE' +SETTINGS enable_join_runtime_filters = 1, join_runtime_bloom_filter_bytes = 4096, join_runtime_bloom_filter_hash_functions = 2; + +-- Join algorithm that doesn't support runtime filters +SELECT REGEXP_REPLACE(trimLeft(explain), '_runtime_filter_\\d+', '_runtime_filter_UNIQ_ID') +FROM ( + EXPLAIN actions=1 + SELECT count() + FROM customer, nation + WHERE c_nationkey = n_nationkey AND n_nationkey%10 = c_custkey%100 + SETTINGS query_plan_join_swap_table=0, join_algorithm='full_sorting_merge' +) +WHERE (explain ILIKE '%Filter column%') OR (explain LIKE '%BuildRuntimeFilter%') OR (explain LIKE '%FullSorting%'); + +-- Filters on multiple join predicates +SELECT REGEXP_REPLACE(trimLeft(explain), '_runtime_filter_\\d+', '_runtime_filter_UNIQ_ID') +FROM ( + EXPLAIN actions=1 + SELECT count() + FROM customer, nation + WHERE c_nationkey = n_nationkey AND n_nationkey%10 = c_custkey%100 + SETTINGS query_plan_join_swap_table=0 +) +WHERE (explain ILIKE '%Filter column%') OR (explain LIKE '%BuildRuntimeFilter%'); + +SELECT count() +FROM customer, nation +WHERE c_nationkey = n_nationkey AND n_nationkey%10 = c_custkey%100; + + +-- ANY JOIN +SELECT REGEXP_REPLACE(trimLeft(explain), '_runtime_filter_\\d+', '_runtime_filter_UNIQ_ID') +FROM ( + EXPLAIN actions=1 + SELECT count() + FROM customer ANY JOIN nation + ON c_nationkey = n_nationkey + SETTINGS query_plan_join_swap_table=0 +) +WHERE (explain ILIKE '%Filter column%') OR (explain LIKE '%BuildRuntimeFilter%') OR (explain LIKE '% Type:%') OR (explain LIKE '% Strictness:%'); + +SELECT count() +FROM customer ANY JOIN nation +ON c_nationkey = n_nationkey; + + +-- LEFT SEMI JOIN +SELECT REGEXP_REPLACE(trimLeft(explain), '_runtime_filter_\\d+', '_runtime_filter_UNIQ_ID') +FROM ( + EXPLAIN actions=1 + SELECT count() + FROM customer LEFT SEMI JOIN nation + ON c_nationkey = n_nationkey + SETTINGS query_plan_join_swap_table=0 +) +WHERE (explain ILIKE '%Filter column%') OR (explain LIKE '%BuildRuntimeFilter%') OR (explain LIKE '% Type:%') OR (explain LIKE '% Strictness:%'); + +SELECT count() +FROM customer LEFT SEMI JOIN nation +ON c_nationkey = n_nationkey; + + +-- RIGHT SEMI JOIN +SELECT REGEXP_REPLACE(trimLeft(explain), '_runtime_filter_\\d+', '_runtime_filter_UNIQ_ID') +FROM ( + EXPLAIN actions=1 + SELECT n_name + FROM customer RIGHT SEMI JOIN nation + ON c_nationkey = n_nationkey + ORDER BY ALL + SETTINGS query_plan_join_swap_table=0 +) +WHERE (explain ILIKE '%Filter column%') OR (explain LIKE '%BuildRuntimeFilter%') OR (explain LIKE '% Type:%') OR (explain LIKE '% Strictness:%'); + +SELECT n_name +FROM customer RIGHT SEMI JOIN nation +ON c_nationkey = n_nationkey +ORDER BY ALL; diff --git a/parser/testdata/03580_join_runtime_filter_column_type/ast.json b/parser/testdata/03580_join_runtime_filter_column_type/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03580_join_runtime_filter_column_type/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03580_join_runtime_filter_column_type/metadata.json b/parser/testdata/03580_join_runtime_filter_column_type/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03580_join_runtime_filter_column_type/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03580_join_runtime_filter_column_type/query.sql b/parser/testdata/03580_join_runtime_filter_column_type/query.sql new file mode 100644 index 000000000..fc085af75 --- /dev/null +++ b/parser/testdata/03580_join_runtime_filter_column_type/query.sql @@ -0,0 +1,5 @@ +-- Repro for https://github.com/ClickHouse/ClickHouse/issues/89062 +SELECT 1 +FROM numbers(1) AS t0 +WHERE EXISTS (SELECT t0._table) +SETTINGS enable_join_runtime_filters = 1, allow_experimental_correlated_subqueries = 1, enable_analyzer = 1; diff --git a/parser/testdata/03580_join_runtime_filter_prewhere/ast.json b/parser/testdata/03580_join_runtime_filter_prewhere/ast.json new file mode 100644 index 000000000..b5cf3d5ce --- /dev/null +++ b/parser/testdata/03580_join_runtime_filter_prewhere/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery nation (children 3)" + }, + { + "explain": " Identifier nation" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration n_nationkey (children 1)" + }, + { + "explain": " DataType Int32" + }, + { + "explain": " ColumnDeclaration n_name (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Identifier n_nationkey" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001420786, + "rows_read": 11, + "bytes_read": 396 + } +} diff --git a/parser/testdata/03580_join_runtime_filter_prewhere/metadata.json b/parser/testdata/03580_join_runtime_filter_prewhere/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03580_join_runtime_filter_prewhere/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03580_join_runtime_filter_prewhere/query.sql b/parser/testdata/03580_join_runtime_filter_prewhere/query.sql new file mode 100644 index 000000000..384286b1f --- /dev/null +++ b/parser/testdata/03580_join_runtime_filter_prewhere/query.sql @@ -0,0 +1,43 @@ +CREATE TABLE nation(n_nationkey Int32, n_name String) ENGINE MergeTree ORDER BY n_nationkey; +CREATE TABLE customer(c_custkey Int32, c_nationkey Int32) ENGINE MergeTree ORDER BY c_custkey; + +INSERT INTO nation VALUES (5,'ETHIOPIA'),(6,'FRANCE'),(7,'GERMANY'); + +INSERT INTO customer SELECT number, 5 FROM numbers(500); + +SET enable_analyzer=1; +SET enable_parallel_replicas=0; +SET join_algorithm = 'hash,parallel_hash'; + +SELECT '-- Check that filter on c_nationkey is moved to PREWHERE'; +SELECT REGEXP_REPLACE(trimLeft(explain), '_runtime_filter_\\d+', '_runtime_filter_UNIQ_ID') +FROM +( + SELECT * + FROM viewExplain('EXPLAIN', 'actions = 1', ( + SELECT + count(), + max(c_custkey) + FROM customer, nation + WHERE (c_nationkey = n_nationkey) AND (n_name = 'FRANCE') + SETTINGS enable_join_runtime_filters = 1, optimize_move_to_prewhere = 1, query_plan_join_swap_table = 0 + )) +) +WHERE (explain LIKE '%ReadFromMergeTree%') OR (explain LIKE '%Prewhere filter column:%') OR (explain LIKE '%Build%'); + + +SELECT '-- Check the same query but with swapped tables'; +SELECT REGEXP_REPLACE(trimLeft(explain), '_runtime_filter_\\d+', '_runtime_filter_UNIQ_ID') +FROM +( + SELECT * + FROM viewExplain('EXPLAIN', 'actions = 1', ( + SELECT + count(), + max(c_custkey) + FROM nation, customer + WHERE (c_nationkey = n_nationkey) AND (n_name = 'FRANCE') + SETTINGS enable_join_runtime_filters = 1, optimize_move_to_prewhere = 1, query_plan_join_swap_table = 1 + )) +) +WHERE (explain LIKE '%ReadFromMergeTree%') OR (explain LIKE '%Prewhere filter column:%') OR (explain LIKE '%Build%'); diff --git a/parser/testdata/03580_join_runtime_filter_pushdown/ast.json b/parser/testdata/03580_join_runtime_filter_pushdown/ast.json new file mode 100644 index 000000000..8721a6593 --- /dev/null +++ b/parser/testdata/03580_join_runtime_filter_pushdown/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001135026, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03580_join_runtime_filter_pushdown/metadata.json b/parser/testdata/03580_join_runtime_filter_pushdown/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03580_join_runtime_filter_pushdown/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03580_join_runtime_filter_pushdown/query.sql b/parser/testdata/03580_join_runtime_filter_pushdown/query.sql new file mode 100644 index 000000000..bbb67ef22 --- /dev/null +++ b/parser/testdata/03580_join_runtime_filter_pushdown/query.sql @@ -0,0 +1,18 @@ +SET enable_analyzer=1; + +SELECT REGEXP_REPLACE(explain, '_runtime_filter_\\d+', '_runtime_filter_UNIQ_ID') +FROM ( + +EXPLAIN +SELECT * +FROM ( + SELECT a.number AS a_number, b.number AS b_number + FROM numbers(10) AS a + JOIN numbers(10) AS b + ON a.number%2 = b.number%3 + ) AS ab + JOIN numbers(10) AS c + ON b_number = c.number+2 +SETTINGS enable_join_runtime_filters=1 + +); diff --git a/parser/testdata/03580_s3queue_settings_store/ast.json b/parser/testdata/03580_s3queue_settings_store/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03580_s3queue_settings_store/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03580_s3queue_settings_store/metadata.json b/parser/testdata/03580_s3queue_settings_store/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03580_s3queue_settings_store/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03580_s3queue_settings_store/query.sql b/parser/testdata/03580_s3queue_settings_store/query.sql new file mode 100644 index 000000000..c43cc8c54 --- /dev/null +++ b/parser/testdata/03580_s3queue_settings_store/query.sql @@ -0,0 +1,12 @@ +-- Tags: no-fasttest + +CREATE TABLE s3queue_test +( + `column1` UInt32, + `column2` UInt32, + `column3` UInt32 +) +ENGINE = S3Queue('http://whatever-we-dont-care:9001/root/s3queue_test_data/', 'username', 'password', CSV) +SETTINGS s3queue_loading_retries = 0, after_processing = 'delete', keeper_path = '/s3queue', mode = 'ordered', enable_hash_ring_filtering = 1, s3queue_enable_logging_to_s3queue_log = 1; + +SHOW CREATE TABLE s3queue_test; diff --git a/parser/testdata/03581_bool_literal_column_name/ast.json b/parser/testdata/03581_bool_literal_column_name/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03581_bool_literal_column_name/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03581_bool_literal_column_name/metadata.json b/parser/testdata/03581_bool_literal_column_name/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03581_bool_literal_column_name/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03581_bool_literal_column_name/query.sql b/parser/testdata/03581_bool_literal_column_name/query.sql new file mode 100644 index 000000000..44d0ff11f --- /dev/null +++ b/parser/testdata/03581_bool_literal_column_name/query.sql @@ -0,0 +1,11 @@ +-- Test for issue #84896: Incorrect autogenerated column name for Bool literals +-- This test ensures that Bool literals generate distinct column names from integer literals + +SELECT toTypeName(0), toTypeName(false); +SELECT toTypeName(1), toTypeName(true); + +SELECT + toTypeName(true) as bool_true_type, + toTypeName(false) as bool_false_type, + toTypeName(0) as int_zero_type, + toTypeName(1) as int_one_type; diff --git a/parser/testdata/03581_iceberg_struct_fields_ids/ast.json b/parser/testdata/03581_iceberg_struct_fields_ids/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03581_iceberg_struct_fields_ids/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03581_iceberg_struct_fields_ids/metadata.json b/parser/testdata/03581_iceberg_struct_fields_ids/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03581_iceberg_struct_fields_ids/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03581_iceberg_struct_fields_ids/query.sql b/parser/testdata/03581_iceberg_struct_fields_ids/query.sql new file mode 100644 index 000000000..c41db770b --- /dev/null +++ b/parser/testdata/03581_iceberg_struct_fields_ids/query.sql @@ -0,0 +1,7 @@ +-- Tags: no-fasttest +-- Tag no-fasttest: Depends on AWS + +-- This table was created by spark, but with changed column names in parquet +-- The purpose of this test is to verify that a column with a structured type with different names in Parquet and Iceberg metadata will be read correctly. +SELECT * FROM icebergS3(s3_conn, filename='field_ids_struct_test', SETTINGS iceberg_metadata_table_uuid = '149ecc15-7afc-4311-86b3-3a4c8d4ec08e') ORDER BY ALL; +SELECT * FROM icebergS3(s3_conn, filename='field_ids_complex_test', SETTINGS iceberg_metadata_table_uuid = 'd4b695ca-ceeb-4537-8a2a-eee90dc6e313') ORDER BY ALL; diff --git a/parser/testdata/03581_nested_storage_merge_distributed_order_by/ast.json b/parser/testdata/03581_nested_storage_merge_distributed_order_by/ast.json new file mode 100644 index 000000000..e2e9e776a --- /dev/null +++ b/parser/testdata/03581_nested_storage_merge_distributed_order_by/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001108452, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03581_nested_storage_merge_distributed_order_by/metadata.json b/parser/testdata/03581_nested_storage_merge_distributed_order_by/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03581_nested_storage_merge_distributed_order_by/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03581_nested_storage_merge_distributed_order_by/query.sql b/parser/testdata/03581_nested_storage_merge_distributed_order_by/query.sql new file mode 100644 index 000000000..45806d88d --- /dev/null +++ b/parser/testdata/03581_nested_storage_merge_distributed_order_by/query.sql @@ -0,0 +1,23 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS d1; +DROP TABLE IF EXISTS m1; +DROP TABLE IF EXISTS m2; + +CREATE TABLE t1 (key Int) ENGINE=MergeTree() ORDER BY key; +CREATE TABLE t2 (key Int) ENGINE=MergeTree() ORDER BY key; +CREATE TABLE d1 ENGINE=Distributed('test_shard_localhost', currentDatabase(), t2, rand()); +CREATE TABLE m1 ENGINE=Merge(currentDatabase(), '^(t1|d1)$'); +CREATE TABLE m2 ENGINE=Merge(currentDatabase(), '^(t1|m1)$'); +INSERT INTO t1 VALUES (1); +INSERT INTO t1 VALUES (2); + +SELECT * FROM m2 ORDER BY key ASC SETTINGS max_threads = 1; + +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS d1; +DROP TABLE IF EXISTS m1; +DROP TABLE IF EXISTS m2; \ No newline at end of file diff --git a/parser/testdata/03581_parallel_replicas_read_empty_ranges/ast.json b/parser/testdata/03581_parallel_replicas_read_empty_ranges/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03581_parallel_replicas_read_empty_ranges/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03581_parallel_replicas_read_empty_ranges/metadata.json b/parser/testdata/03581_parallel_replicas_read_empty_ranges/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03581_parallel_replicas_read_empty_ranges/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03581_parallel_replicas_read_empty_ranges/query.sql b/parser/testdata/03581_parallel_replicas_read_empty_ranges/query.sql new file mode 100644 index 000000000..1e9af5e2b --- /dev/null +++ b/parser/testdata/03581_parallel_replicas_read_empty_ranges/query.sql @@ -0,0 +1,38 @@ +set allow_experimental_parallel_reading_from_replicas = 1, + parallel_replicas_for_non_replicated_merge_tree = 1, + cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost'; + +drop table if exists 03581_data; + +create table 03581_data ( + key UInt32, + + val_minmax UInt32, + val_set UInt32, + + index skip_minmax val_minmax type set(0) granularity 1, + index skip_set val_set type set(0) granularity 1, +) +engine = MergeTree +order by key +settings index_granularity = 10; + +insert into 03581_data select number, number, number from numbers(1000); + +select 'Primary key:', count() from 03581_data where key = 2000; +select 'Skip index MinMax:', count() from 03581_data where val_minmax = 2000; +select 'Skip index Set:', count() from 03581_data where val_set = 2000; + +select ''; +select 'Rows read:'; + +system flush logs query_log; + +select read_rows +from system.query_log +where current_database = currentDatabase() + and type = 'QueryFinish' + and query ilike '% from 03581_data where %' +order by event_time_microseconds desc; + +drop table 03581_data; diff --git a/parser/testdata/03581_parallel_replicas_task_size/ast.json b/parser/testdata/03581_parallel_replicas_task_size/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03581_parallel_replicas_task_size/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03581_parallel_replicas_task_size/metadata.json b/parser/testdata/03581_parallel_replicas_task_size/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03581_parallel_replicas_task_size/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03581_parallel_replicas_task_size/query.sql b/parser/testdata/03581_parallel_replicas_task_size/query.sql new file mode 100644 index 000000000..09bbf819e --- /dev/null +++ b/parser/testdata/03581_parallel_replicas_task_size/query.sql @@ -0,0 +1,39 @@ +-- Tags: no-fasttest, no-random-settings +-- no-fasttest: requires s3 storage +-- no-random-settings: a lot of settings influence task sizes, so it is simple to disable randomization completely + +CREATE TABLE t(a UInt64, s String) ENGINE = MergeTree ORDER BY a SETTINGS storage_policy = 's3_cache', min_rows_for_wide_part = 10000, min_bytes_for_wide_part = 0; + +SYSTEM STOP MERGES t; + +INSERT INTO t SELECT *, randomString(100) FROM numbers_mt(3_000_000); +INSERT INTO t SELECT *, randomString(100) FROM numbers(1_000); +INSERT INTO t SELECT *, randomString(100) FROM numbers(1_000); +INSERT INTO t SELECT *, randomString(100) FROM numbers(1_000); +INSERT INTO t SELECT *, randomString(100) FROM numbers(1_000); + +-- The problem with too small task sizes specifically happens when we have compact parts. +-- Because for them we don't know individual column sizes, see `calculateMinMarksPerTask()` function. +SELECT + throwIf(countIf(part_type = 'Compact') = 0), + throwIf(countIf(part_type = 'Wide') = 0) +FROM system.parts +WHERE (database = currentDatabase()) AND (table = 't') +FORMAT Null; + +-- If ClickHouse will choose too small task size, we don't want to artificially correct it's decision. +SET max_threads = 3, merge_tree_min_read_task_size = 1; + +SET enable_parallel_replicas = 2, max_parallel_replicas = 3, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'parallel_replicas'; + +SELECT * FROM t FORMAT Null SETTINGS log_comment = 'parallel_replicas_task_size_82982938'; + +SYSTEM FLUSH LOGS query_log; + +-- The objective is to check that we request enough marks with each request. Obviously, the more we request, the less requests we will have. +-- Before the fix, in this particular case we made ~ 70 requests, now it should be <= 15 (25 is used to ensure no flakyness). +SELECT throwIf(ProfileEvents['ParallelReplicasNumRequests'] > 25) +FROM system.query_log +WHERE current_database = currentDatabase() AND log_comment = 'parallel_replicas_task_size_82982938' AND type = 'QueryFinish' +SETTINGS enable_parallel_replicas = 0 +FORMAT Null; diff --git a/parser/testdata/03581_read_in_order_use_virtual_row_WHERE/ast.json b/parser/testdata/03581_read_in_order_use_virtual_row_WHERE/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03581_read_in_order_use_virtual_row_WHERE/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03581_read_in_order_use_virtual_row_WHERE/metadata.json b/parser/testdata/03581_read_in_order_use_virtual_row_WHERE/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03581_read_in_order_use_virtual_row_WHERE/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03581_read_in_order_use_virtual_row_WHERE/query.sql b/parser/testdata/03581_read_in_order_use_virtual_row_WHERE/query.sql new file mode 100644 index 000000000..a859b3f3a --- /dev/null +++ b/parser/testdata/03581_read_in_order_use_virtual_row_WHERE/query.sql @@ -0,0 +1,26 @@ +-- Tags: no-random-merge-tree-settings, no-random-settings + +create table tab (x UInt64, y UInt64) engine = MergeTree order by x; + +insert into tab select number, number from numbers(1e6); +insert into tab select number, number from numbers(1e6, 1e6); + +select _part, min(x), max(x) from tab group by _part order by _part ; + +select x from tab where bitAnd(y, 1023) == 0 order by x limit 10 settings read_in_order_use_virtual_row=1, log_processors_profiles=1, optimize_move_to_prewhere=0, max_threads=2; + +system flush logs query_log, processors_profile_log; + +WITH + ( + SELECT query_id + FROM system.query_log + WHERE current_database = currentDatabase() AND query like 'select x from tab%' AND event_date >= (today() - 1) + ORDER BY event_time DESC + LIMIT 1 + ) AS id +SELECT + replace(name, 'ReadPoolParallelReplicasInOrder', 'ReadPoolInOrder') AS name, output_rows +from system.processors_profile_log where event_date >= (today() - 1) and query_id = id + and (name like '%MergeTreeSelect%' or name like '%VirtualRowTransform%') +ORDER BY name, output_rows; diff --git a/parser/testdata/03582_initcap_fixedstring/ast.json b/parser/testdata/03582_initcap_fixedstring/ast.json new file mode 100644 index 000000000..382009c14 --- /dev/null +++ b/parser/testdata/03582_initcap_fixedstring/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function initCap (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function arrayJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_['hello', 'world']" + }, + { + "explain": " Literal 'FixedString(5)'" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001369946, + "rows_read": 12, + "bytes_read": 493 + } +} diff --git a/parser/testdata/03582_initcap_fixedstring/metadata.json b/parser/testdata/03582_initcap_fixedstring/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03582_initcap_fixedstring/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03582_initcap_fixedstring/query.sql b/parser/testdata/03582_initcap_fixedstring/query.sql new file mode 100644 index 000000000..65f19ec1b --- /dev/null +++ b/parser/testdata/03582_initcap_fixedstring/query.sql @@ -0,0 +1,2 @@ +SELECT initCap(arrayJoin(['hello', 'world'])::FixedString(5)); +SELECT initCap(arrayJoin(['hello world', 'world hello'])::FixedString(11)); diff --git a/parser/testdata/03582_normalize_utf8_empty/ast.json b/parser/testdata/03582_normalize_utf8_empty/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03582_normalize_utf8_empty/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03582_normalize_utf8_empty/metadata.json b/parser/testdata/03582_normalize_utf8_empty/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03582_normalize_utf8_empty/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03582_normalize_utf8_empty/query.sql b/parser/testdata/03582_normalize_utf8_empty/query.sql new file mode 100644 index 000000000..d0ee9964d --- /dev/null +++ b/parser/testdata/03582_normalize_utf8_empty/query.sql @@ -0,0 +1,7 @@ +-- Tags: no-fasttest + +SELECT '' AS value, + normalizeUTF8NFC(value) AS nfc, length(nfc) AS nfc_len, + normalizeUTF8NFD(value) AS nfd, length(nfd) AS nfd_len, + normalizeUTF8NFKC(value) AS nfkc, length(nfkc) AS nfkc_len, + normalizeUTF8NFKD(value) AS nfkd, length(nfkd) AS nfkd_len; diff --git a/parser/testdata/03582_pr_read_in_order_hits/ast.json b/parser/testdata/03582_pr_read_in_order_hits/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03582_pr_read_in_order_hits/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03582_pr_read_in_order_hits/metadata.json b/parser/testdata/03582_pr_read_in_order_hits/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03582_pr_read_in_order_hits/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03582_pr_read_in_order_hits/query.sql b/parser/testdata/03582_pr_read_in_order_hits/query.sql new file mode 100644 index 000000000..a39bacbda --- /dev/null +++ b/parser/testdata/03582_pr_read_in_order_hits/query.sql @@ -0,0 +1,43 @@ +-- Tags: stateful +SET max_threads = 0; -- let's reset to automatic detection of the number of threads, otherwise test can be slow. + +SELECT '--- In order ---'; +SELECT 'Result hash : ', cityHash64(groupArray(CounterID)) +FROM +( + SELECT CounterID + FROM test.hits + WHERE domain(URL) IN ('yandex.ru', 'auto.ru', 'avito.ru') + ORDER BY CounterID + SETTINGS enable_parallel_replicas = 0 +); + +SELECT 'PR result hash: ', cityHash64(groupArray(CounterID)) +FROM +( + SELECT CounterID + FROM test.hits + WHERE domain(URL) IN ('yandex.ru', 'auto.ru', 'avito.ru') + ORDER BY CounterID + SETTINGS enable_parallel_replicas = 1, max_parallel_replicas=3, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost' +); +SELECT '-- In reverse order ---'; +SELECT 'Result hash : ', cityHash64(groupArray(CounterID)) +FROM +( + SELECT CounterID + FROM test.hits + WHERE domain(URL) IN ('yandex.ru', 'auto.ru', 'avito.ru') + ORDER BY CounterID DESC + SETTINGS enable_parallel_replicas = 0 +); + +SELECT 'PR result hash: ', cityHash64(groupArray(CounterID)) +FROM +( + SELECT CounterID + FROM test.hits + WHERE domain(URL) IN ('yandex.ru', 'auto.ru', 'avito.ru') + ORDER BY CounterID DESC + SETTINGS enable_parallel_replicas = 1, max_parallel_replicas=3, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost' +); diff --git a/parser/testdata/03583_rewrite_in_to_join/ast.json b/parser/testdata/03583_rewrite_in_to_join/ast.json new file mode 100644 index 000000000..73b23908e --- /dev/null +++ b/parser/testdata/03583_rewrite_in_to_join/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00143537, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03583_rewrite_in_to_join/metadata.json b/parser/testdata/03583_rewrite_in_to_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03583_rewrite_in_to_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03583_rewrite_in_to_join/query.sql b/parser/testdata/03583_rewrite_in_to_join/query.sql new file mode 100644 index 000000000..e13dd8bad --- /dev/null +++ b/parser/testdata/03583_rewrite_in_to_join/query.sql @@ -0,0 +1,95 @@ +SET enable_analyzer=1; +SET rewrite_in_to_join=1; +SET allow_experimental_correlated_subqueries=1; +SET correlated_subqueries_default_join_kind = 'left'; + +-- {echoOn} +-- Check that with these settings the plan contains a join +SELECT explain FROM ( + EXPLAIN keep_logical_steps=1, description=0 SELECT number IN (SELECT * FROM numbers(2)) FROM numbers(3) +) WHERE explain ILIKE '%join%'; + +SELECT number IN (SELECT * FROM numbers(2)) FROM numbers(3); + +SELECT number IN (SELECT number FROM numbers(2)) FROM numbers(3); + +SELECT * FROM numbers(3) WHERE number IN (SELECT number FROM numbers(2)); + +SELECT number IN (SELECT number, number FROM numbers(2)) FROM numbers(3); -- {serverError NUMBER_OF_COLUMNS_DOESNT_MATCH,BAD_ARGUMENTS, ILLEGAL_TYPE_OF_ARGUMENT} + +SELECT number IN (SELECT number IN (SELECT * FROM numbers(1)) FROM numbers(2)) FROM numbers(3); + +SELECT number IN (SELECT number FROM numbers(2) WHERE number IN (SELECT * FROM numbers(1))) FROM numbers(3); + +-- NOT IN +SELECT number NOT IN (SELECT * FROM numbers(2)) FROM numbers(3); + +SELECT * FROM numbers(3) WHERE number NOT IN (SELECT number FROM numbers(2)); + +SELECT number NOT IN (SELECT number, number FROM numbers(2)) FROM numbers(3); -- {serverError NUMBER_OF_COLUMNS_DOESNT_MATCH,BAD_ARGUMENTS, ILLEGAL_TYPE_OF_ARGUMENT} + +SELECT number NOT IN (SELECT number IN (SELECT * FROM numbers(1)) FROM numbers(2)) FROM numbers(3); + +SELECT number IN (SELECT number NOT IN (SELECT * FROM numbers(1)) FROM numbers(2)) FROM numbers(3); + +SELECT number NOT IN (SELECT number NOT IN (SELECT * FROM numbers(1)) FROM numbers(2)) FROM numbers(3); + +SELECT number IN (SELECT number FROM numbers(2) WHERE number NOT IN (SELECT * FROM numbers(1))) FROM numbers(3); + + +EXPLAIN keep_logical_steps=1, description=0 +SELECT * +FROM numbers(8) +WHERE number IN (select number from numbers(5)); + +-- Same subquery as CTE +EXPLAIN keep_logical_steps=1, description=0 +WITH + t as (select number from numbers(5)) +SELECT * +FROM numbers(8) +WHERE number IN t; + +WITH + t as (select number from numbers(5)) +SELECT * +FROM numbers(8) +WHERE number IN t; + +-- Tuple +SELECT * +FROM numbers(8) +WHERE (number+1, number+2) IN (select number, number+1 from numbers(5)); + +-- Tuple and CTE +WITH + t as (select number, number+1 from numbers(5)) +SELECT * +FROM numbers(8) +WHERE (number+1, number+2) in (t); + +-- Mismatching number of elements +SELECT * +FROM numbers(8) +WHERE (number+1, number+2, number+3) IN (select number, number+1 from numbers(5)); -- {serverError NUMBER_OF_COLUMNS_DOESNT_MATCH,BAD_ARGUMENTS, ILLEGAL_TYPE_OF_ARGUMENT} + +WITH + t as (select number, number+1 from numbers(5)) +SELECT * +FROM numbers(8) +WHERE (number+1, number+2, number+3) IN (t); -- {serverError NUMBER_OF_COLUMNS_DOESNT_MATCH,BAD_ARGUMENTS, ILLEGAL_TYPE_OF_ARGUMENT} + +-- Inside IF function condition and arguments +SELECT c0 = ANY(SELECT 1) ? 1 : 2 FROM (SELECT 1 c0) tx; + +SELECT if(dummy IN (SELECT 1) AS in_expression, 11, 22) FROM system.one; + +SELECT if(dummy IN (SELECT 1) AS in_expression, 11, 22) FROM system.one; + +SELECT if(dummy IN (SELECT 1) AS in_expression, in_expression, 22) FROM system.one; + +SELECT if(dummy IN (SELECT 1) AS in_expression, 11, in_expression) FROM system.one; + +SELECT if(dummy IN (SELECT 1) AS in_expression, in_expression, in_expression) FROM system.one; + +--{echoOff} diff --git a/parser/testdata/03591_optimize_prewhere_row_policy/ast.json b/parser/testdata/03591_optimize_prewhere_row_policy/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03591_optimize_prewhere_row_policy/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03591_optimize_prewhere_row_policy/metadata.json b/parser/testdata/03591_optimize_prewhere_row_policy/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03591_optimize_prewhere_row_policy/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03591_optimize_prewhere_row_policy/query.sql b/parser/testdata/03591_optimize_prewhere_row_policy/query.sql new file mode 100644 index 000000000..119273f1c --- /dev/null +++ b/parser/testdata/03591_optimize_prewhere_row_policy/query.sql @@ -0,0 +1,35 @@ +-- {echoOn} + +SET use_query_condition_cache = 0; +SET enable_parallel_replicas = 0; +SET allow_statistics_optimize = 0; + +DROP TABLE IF EXISTS 03591_test; + +DROP ROW POLICY IF EXISTS 03591_rp ON 03591_test; + +CREATE TABLE 03591_test (a Int32, b Int32) ENGINE=MergeTree ORDER BY tuple(); + +INSERT INTO 03591_test VALUES (3, 1), (2, 2), (3, 2); + +SELECT * FROM 03591_test; + +SELECT * FROM 03591_test WHERE throwIf(b=1, 'Should throw') SETTINGS optimize_move_to_prewhere = 1; -- {serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO} + +CREATE ROW POLICY 03591_rp ON 03591_test USING b=2 TO CURRENT_USER; + +SELECT * FROM 03591_test; + +-- Print plan with actions to make sure both a > 0 and b=2 are present in the prewhere section +EXPLAIN PLAN actions=1 SELECT * FROM 03591_test WHERE a > 0 SETTINGS optimize_move_to_prewhere = 1, allow_experimental_analyzer = 1; +EXPLAIN PLAN actions=1 SELECT * FROM 03591_test WHERE a > 0 SETTINGS optimize_move_to_prewhere = 1, allow_experimental_analyzer = 0; + +SELECT * FROM 03591_test WHERE throwIf(b=1, 'Should not throw because b=1 is not visible to this user due to the b=2 row policy') SETTINGS optimize_move_to_prewhere = 1; + +-- Print plan with actions to make sure a > 0, b = 2 and a = 3 are present in the prewhere section +EXPLAIN PLAN actions=1 SELECT * FROM 03591_test WHERE a > 0 SETTINGS optimize_move_to_prewhere = 1, additional_table_filters={'03591_test': 'a=3'}, allow_experimental_analyzer = 1; +EXPLAIN PLAN actions=1 SELECT * FROM 03591_test WHERE a > 0 SETTINGS optimize_move_to_prewhere = 1, additional_table_filters={'03591_test': 'a=3'}, allow_experimental_analyzer = 0; + +DROP ROW POLICY 03591_rp ON 03591_test; + +SELECT * FROM 03591_test WHERE throwIf(b=2, 'Should throw') SETTINGS optimize_move_to_prewhere = 1; -- {serverError FUNCTION_THROW_IF_VALUE_IS_NON_ZERO} diff --git a/parser/testdata/03592_dictionary_columns_trailing_comma/ast.json b/parser/testdata/03592_dictionary_columns_trailing_comma/ast.json new file mode 100644 index 000000000..646d6eba3 --- /dev/null +++ b/parser/testdata/03592_dictionary_columns_trailing_comma/ast.json @@ -0,0 +1,85 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery t_03592 (children 3)" + }, + { + "explain": " Identifier t_03592" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " DictionaryAttributeDeclaration k (children 1)" + }, + { + "explain": " DataType UInt64" + }, + { + "explain": " DictionaryAttributeDeclaration v (children 1)" + }, + { + "explain": " DataType Decimal (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_8" + }, + { + "explain": " Dictionary definition (children 4)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier k" + }, + { + "explain": " FunctionWithKeyValueArguments http (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " pair (children 1)" + }, + { + "explain": " Literal 'http:\/\/example.test\/'" + }, + { + "explain": " pair (children 1)" + }, + { + "explain": " Literal 'TSV'" + }, + { + "explain": " Dictionary lifetime" + }, + { + "explain": " Dictionary layout (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 21, + + "statistics": + { + "elapsed": 0.001782616, + "rows_read": 21, + "bytes_read": 767 + } +} diff --git a/parser/testdata/03592_dictionary_columns_trailing_comma/metadata.json b/parser/testdata/03592_dictionary_columns_trailing_comma/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03592_dictionary_columns_trailing_comma/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03592_dictionary_columns_trailing_comma/query.sql b/parser/testdata/03592_dictionary_columns_trailing_comma/query.sql new file mode 100644 index 000000000..b63a52a4a --- /dev/null +++ b/parser/testdata/03592_dictionary_columns_trailing_comma/query.sql @@ -0,0 +1 @@ +CREATE DICTIONARY t_03592 (k UInt64, v Decimal(8),) PRIMARY KEY k LAYOUT(FLAT()) SOURCE(HTTP(URL 'http://example.test/' FORMAT 'TSV')) LIFETIME(1000); diff --git a/parser/testdata/03592_distributed_alter_check_sharding_key/ast.json b/parser/testdata/03592_distributed_alter_check_sharding_key/ast.json new file mode 100644 index 000000000..7f08a0083 --- /dev/null +++ b/parser/testdata/03592_distributed_alter_check_sharding_key/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery t1 (children 3)" + }, + { + "explain": " Identifier t1" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration c0 (children 1)" + }, + { + "explain": " DataType Int" + }, + { + "explain": " ColumnDeclaration c1 (children 1)" + }, + { + "explain": " DataType Int" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function Distributed (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Literal 'test_shard_localhost'" + }, + { + "explain": " Identifier default" + }, + { + "explain": " Identifier t0" + }, + { + "explain": " Identifier c1" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001223506, + "rows_read": 15, + "bytes_read": 513 + } +} diff --git a/parser/testdata/03592_distributed_alter_check_sharding_key/metadata.json b/parser/testdata/03592_distributed_alter_check_sharding_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03592_distributed_alter_check_sharding_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03592_distributed_alter_check_sharding_key/query.sql b/parser/testdata/03592_distributed_alter_check_sharding_key/query.sql new file mode 100644 index 000000000..ba3341eac --- /dev/null +++ b/parser/testdata/03592_distributed_alter_check_sharding_key/query.sql @@ -0,0 +1,2 @@ +CREATE TABLE t1 (c0 Int, c1 Int) ENGINE = Distributed('test_shard_localhost', default, t0, `c1`); +ALTER TABLE t1 MODIFY COLUMN c1 String; -- { serverError TYPE_MISMATCH } diff --git a/parser/testdata/03592_s3queue_large_settings/ast.json b/parser/testdata/03592_s3queue_large_settings/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03592_s3queue_large_settings/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03592_s3queue_large_settings/metadata.json b/parser/testdata/03592_s3queue_large_settings/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03592_s3queue_large_settings/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03592_s3queue_large_settings/query.sql b/parser/testdata/03592_s3queue_large_settings/query.sql new file mode 100644 index 000000000..4e3742224 --- /dev/null +++ b/parser/testdata/03592_s3queue_large_settings/query.sql @@ -0,0 +1,6 @@ +-- Tags: no-fasttest + +CREATE TABLE s3_queue (name String, value UInt32) ENGINE = S3Queue('http://localhost:11111/test/{a,b,c}.tsv', 'user', 'password', CSV) SETTINGS s3queue_tracked_files_limit = 18446744073709551615, mode = 'ordered'; + +DETACH TABLE s3_queue; +ATTACH TABLE s3_queue; diff --git a/parser/testdata/03593_allow_projection_with_parent_part_offset/ast.json b/parser/testdata/03593_allow_projection_with_parent_part_offset/ast.json new file mode 100644 index 000000000..bf388726e --- /dev/null +++ b/parser/testdata/03593_allow_projection_with_parent_part_offset/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery 03593_t (children 1)" + }, + { + "explain": " Identifier 03593_t" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00144498, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/03593_allow_projection_with_parent_part_offset/metadata.json b/parser/testdata/03593_allow_projection_with_parent_part_offset/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03593_allow_projection_with_parent_part_offset/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03593_allow_projection_with_parent_part_offset/query.sql b/parser/testdata/03593_allow_projection_with_parent_part_offset/query.sql new file mode 100644 index 000000000..991061e42 --- /dev/null +++ b/parser/testdata/03593_allow_projection_with_parent_part_offset/query.sql @@ -0,0 +1,22 @@ +DROP TABLE IF EXISTS 03593_t; + +CREATE TABLE 03593_t ( + s String, + n UInt64, + PROJECTION prj_s_pos (SELECT _part_offset ORDER BY s)) +ENGINE = MergeTree +ORDER BY n +SETTINGS allow_part_offset_column_in_projections=0; -- {serverError BAD_ARGUMENTS} + +CREATE OR REPLACE TABLE 03593_t ( + s String, + n UInt64) +ENGINE = MergeTree +ORDER BY n +SETTINGS allow_part_offset_column_in_projections=0; + +ALTER TABLE 03593_t ADD projection prj_s_pos (SELECT _part_offset ORDER BY s); -- {serverError BAD_ARGUMENTS} + +SHOW CREATE TABLE 03593_t; + +DROP TABLE IF EXISTS 03593_t; diff --git a/parser/testdata/03593_any_join_swap_tables/ast.json b/parser/testdata/03593_any_join_swap_tables/ast.json new file mode 100644 index 000000000..12c64afd6 --- /dev/null +++ b/parser/testdata/03593_any_join_swap_tables/ast.json @@ -0,0 +1,40 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery lhs (children 2)" + }, + { + "explain": " Identifier lhs" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration a (children 1)" + }, + { + "explain": " DataType UInt32" + } + ], + + "rows": 6, + + "statistics": + { + "elapsed": 0.001090109, + "rows_read": 6, + "bytes_read": 206 + } +} diff --git a/parser/testdata/03593_any_join_swap_tables/metadata.json b/parser/testdata/03593_any_join_swap_tables/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03593_any_join_swap_tables/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03593_any_join_swap_tables/query.sql b/parser/testdata/03593_any_join_swap_tables/query.sql new file mode 100644 index 000000000..ac90ee072 --- /dev/null +++ b/parser/testdata/03593_any_join_swap_tables/query.sql @@ -0,0 +1,27 @@ +CREATE TABLE lhs(a UInt32) +ENGINE = MergeTree +ORDER BY tuple(); + +CREATE TABLE rhs(a UInt32) +ENGINE = MergeTree +ORDER BY tuple(); + +INSERT INTO lhs VALUES (1), (2), (3); +INSERT INTO rhs SELECT * FROM numbers_mt(1e6); + +SET enable_parallel_replicas = 0; -- join swap/reordering disabled with parallel replicas +SET enable_analyzer = 1, query_plan_join_swap_table = 'auto'; + +SELECT * +FROM lhs +ANY JOIN rhs +ON lhs.a = rhs.a +FORMAT Null +SETTINGS log_comment = '03593_any_join_swap_tables'; + +SYSTEM FLUSH LOGS query_log; + +SELECT ProfileEvents['JoinBuildTableRowCount'] AS build_table_size +FROM system.query_log +WHERE log_comment = '03593_any_join_swap_tables' AND current_database = currentDatabase() AND type = 'QueryFinish' AND event_date >= yesterday() AND event_time >= NOW() - INTERVAL '10 MINUTE'; + diff --git a/parser/testdata/03593_backup_with_broken_projection/ast.json b/parser/testdata/03593_backup_with_broken_projection/ast.json new file mode 100644 index 000000000..a5f7230b6 --- /dev/null +++ b/parser/testdata/03593_backup_with_broken_projection/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery 03593_backup_with_broken_projection (children 1)" + }, + { + "explain": " Identifier 03593_backup_with_broken_projection" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001089236, + "rows_read": 2, + "bytes_read": 123 + } +} diff --git a/parser/testdata/03593_backup_with_broken_projection/metadata.json b/parser/testdata/03593_backup_with_broken_projection/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03593_backup_with_broken_projection/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03593_backup_with_broken_projection/query.sql b/parser/testdata/03593_backup_with_broken_projection/query.sql new file mode 100644 index 000000000..6b79ec493 --- /dev/null +++ b/parser/testdata/03593_backup_with_broken_projection/query.sql @@ -0,0 +1,35 @@ +CREATE TABLE 03593_backup_with_broken_projection +( + `id` UInt64, + `string` String, + `time1` DateTime64(6), + `time2` DateTime64(6), + PROJECTION max_time + ( + SELECT + string, + max(time1), + max(time2) + GROUP BY string + ) +) +ENGINE = MergeTree +ORDER BY time1; + +INSERT INTO 03593_backup_with_broken_projection +SETTINGS max_block_size = 10000000, min_insert_block_size_rows = 10000000 +SELECT + number = 4000000, + 'test', + '2025-08-11', + '2025-08-11' +FROM system.numbers +LIMIT 5000000; + +ALTER TABLE 03593_backup_with_broken_projection + (UPDATE _row_exists = 0 WHERE id = 0) SETTINGS mutations_sync=1; + +ALTER TABLE 03593_backup_with_broken_projection + (UPDATE _row_exists = 0 WHERE id = 0) SETTINGS mutations_sync=1; + +BACKUP TABLE 03593_backup_with_broken_projection TO Null SETTINGS allow_backup_broken_projections = true, check_projection_parts = false FORMAT Null; \ No newline at end of file diff --git a/parser/testdata/03593_funcs_on_empty_string/ast.json b/parser/testdata/03593_funcs_on_empty_string/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03593_funcs_on_empty_string/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03593_funcs_on_empty_string/metadata.json b/parser/testdata/03593_funcs_on_empty_string/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03593_funcs_on_empty_string/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03593_funcs_on_empty_string/query.sql b/parser/testdata/03593_funcs_on_empty_string/query.sql new file mode 100644 index 000000000..c188d24d7 --- /dev/null +++ b/parser/testdata/03593_funcs_on_empty_string/query.sql @@ -0,0 +1,406 @@ +-- Tags: no-fasttest, no-openssl-fips +-- ^ certain functions are disabled in the fast test build. +-- ^ MD5 function is not available in FIPS build + +SET session_timezone = 'UTC'; + +SELECT BLAKE3(''); +SELECT CHARACTER_LENGTH(''); +SELECT CHAR_LENGTH(''); +SELECT CRC32(''); +SELECT CRC32IEEE(''); +SELECT CRC64(''); +SELECT FROM_BASE64(''); +SELECT IPv4StringToNumOrDefault(''); +SELECT IPv4StringToNumOrNull(''); +SELECT IPv6StringToNumOrDefault(''); +SELECT IPv6StringToNumOrNull(''); +SELECT JSONArrayLength(''); +SELECT JSONExtractArrayRaw(''); +SELECT JSONExtractArrayRawCaseInsensitive(''); +SELECT JSONExtractBool(''); +SELECT JSONExtractBoolCaseInsensitive(''); +SELECT JSONExtractFloat(''); +SELECT JSONExtractFloatCaseInsensitive(''); +SELECT JSONExtractInt(''); +SELECT JSONExtractIntCaseInsensitive(''); +SELECT JSONExtractKeys(''); +SELECT JSONExtractKeysAndValuesRaw(''); +SELECT JSONExtractKeysAndValuesRawCaseInsensitive(''); +SELECT JSONExtractKeysCaseInsensitive(''); +SELECT JSONExtractRaw(''); +SELECT JSONExtractRawCaseInsensitive(''); +SELECT JSONExtractString(''); +SELECT JSONExtractStringCaseInsensitive(''); +SELECT JSONExtractUInt(''); +SELECT JSONExtractUIntCaseInsensitive(''); +SELECT JSONHas(''); +SELECT JSONKey(''); +SELECT JSONLength(''); +SELECT JSONType(''); +SELECT JSON_ARRAY_LENGTH(''); +SELECT MACStringToNum(''); +SELECT MACStringToOUI(''); +SELECT MD4(''); +SELECT MD5(''); +SELECT OCTET_LENGTH(''); +SELECT RIPEMD160(''); +SELECT SHA1(''); +SELECT SHA224(''); +SELECT SHA256(''); +SELECT SHA384(''); +SELECT SHA512(''); +SELECT SHA512_256(''); +SELECT TO_BASE64(''); +SELECT URLHash(''); +SELECT URLHierarchy(''); +SELECT URLPathHierarchy(''); +SELECT UUIDStringToNum(''); +SELECT __scalarSubqueryResult(''); +SELECT alphaTokens(''); +SELECT any(''); +SELECT anyHeavy(''); +SELECT anyLast(''); +SELECT anyLastRespectNulls(''); +SELECT anyLast_respect_nulls(''); +SELECT anyRespectNulls(''); +SELECT anyValueRespectNulls(''); +SELECT any_respect_nulls(''); +SELECT any_value(''); +SELECT any_value_respect_nulls(''); +SELECT approx_top_count(''); +SELECT approx_top_k(''); +SELECT array(''); +SELECT array_agg(''); +SELECT ascii(''); +SELECT assumeNotNull(''); +SELECT base32Decode(''); +SELECT base32Encode(''); +SELECT base58Decode(''); +SELECT base58Encode(''); +SELECT base64Decode(''); +SELECT base64Encode(''); +SELECT base64URLDecode(''); +SELECT base64URLEncode(''); +SELECT basename(''); +SELECT bech32Decode(''); +SELECT bin(''); +SELECT bitCount(''); +SELECT bitNot(''); +SELECT blockSerializedSize(''); +SELECT cityHash64(''); +SELECT coalesce(''); +SELECT concat(''); +SELECT concatWithSeparator(''); +SELECT concatWithSeparatorAssumeInjective(''); +SELECT concat_ws(''); +SELECT count(''); +SELECT cutFragment(''); +SELECT cutQueryString(''); +SELECT cutQueryStringAndFragment(''); +SELECT cutToFirstSignificantSubdomain(''); +SELECT cutToFirstSignificantSubdomainRFC(''); +SELECT cutToFirstSignificantSubdomainWithWWW(''); +SELECT cutToFirstSignificantSubdomainWithWWWRFC(''); +SELECT cutWWW(''); +SELECT decodeHTMLComponent(''); +SELECT decodeURLComponent(''); +SELECT decodeURLFormComponent(''); +SELECT decodeXMLComponent(''); +SELECT defaultValueOfArgumentType(''); +SELECT demangle(''); +SELECT domain(''); +SELECT domainRFC(''); +SELECT domainWithoutWWW(''); +SELECT domainWithoutWWWRFC(''); +SELECT dumpColumnStructure(''); +SELECT empty(''); +SELECT encodeURLComponent(''); +SELECT encodeURLFormComponent(''); +SELECT encodeXMLComponent(''); +SELECT entropy(''); +SELECT estimateCompressionRatio(''); +SELECT extractKeyValuePairs(''); +SELECT extractKeyValuePairsWithEscaping(''); +SELECT extractTextFromHTML(''); +SELECT extractURLParameterNames(''); +SELECT extractURLParameters(''); +SELECT farmFingerprint64(''); +SELECT firstLine(''); +SELECT firstSignificantSubdomain(''); +SELECT firstSignificantSubdomainRFC(''); +SELECT firstValueRespectNulls(''); +SELECT first_value(''); +SELECT first_value_respect_nulls(''); +SELECT formatQueryOrNull(''); +SELECT formatQuerySingleLineOrNull(''); +SELECT fragment(''); +SELECT gccMurmurHash(''); +SELECT geohashDecode(''); +SELECT globalVariable(''); +SELECT greatest(''); +SELECT groupArray(''); +SELECT groupConcat(''); +SELECT groupUniqArray(''); +SELECT group_concat(''); +SELECT halfMD5(''); +SELECT hex(''); +SELECT hiveHash(''); +SELECT icebergHash(''); +SELECT identity(''); +SELECT idnaDecode(''); +SELECT idnaEncode(''); +SELECT ignore(''); +SELECT indexHint(''); +SELECT initcap(''); +SELECT initcapUTF8(''); +SELECT isConstant(''); +SELECT isIPv4String(''); +SELECT isIPv6String(''); +SELECT isNotNull(''); +SELECT isNull(''); +SELECT isNullable(''); +SELECT isValidJSON(''); +SELECT isValidUTF8(''); +SELECT javaHash(''); +SELECT javaHashUTF16LE(''); +SELECT kafkaMurmurHash(''); +SELECT keccak256(''); +SELECT lastValueRespectNulls(''); +SELECT last_value(''); +SELECT last_value_respect_nulls(''); +SELECT lcase(''); +SELECT least(''); +SELECT length(''); +SELECT lengthUTF8(''); +SELECT logTrace(''); +SELECT lower(''); +SELECT lowerUTF8(''); +SELECT ltrim(''); +SELECT mapFromString(''); +SELECT materialize(''); +SELECT max(''); +SELECT metroHash64(''); +SELECT min(''); +SELECT murmurHash2_32(''); +SELECT murmurHash2_64(''); +SELECT murmurHash3_128(''); +SELECT murmurHash3_32(''); +SELECT murmurHash3_64(''); +SELECT netloc(''); +SELECT ngramMinHash(''); +SELECT ngramMinHashArg(''); +SELECT ngramMinHashArgCaseInsensitive(''); +SELECT ngramMinHashArgCaseInsensitiveUTF8(''); +SELECT ngramMinHashArgUTF8(''); +SELECT ngramMinHashCaseInsensitive(''); +SELECT ngramMinHashCaseInsensitiveUTF8(''); +SELECT ngramMinHashUTF8(''); +SELECT ngramSimHash(''); +SELECT ngramSimHashCaseInsensitive(''); +SELECT ngramSimHashCaseInsensitiveUTF8(''); +SELECT ngramSimHashUTF8(''); +SELECT normalizeQuery(''); +SELECT normalizeQueryKeepNames(''); +SELECT normalizeUTF8NFC(''); +SELECT normalizeUTF8NFD(''); +SELECT normalizeUTF8NFKC(''); +SELECT normalizeUTF8NFKD(''); +SELECT normalizedQueryHash(''); +SELECT normalizedQueryHashKeepNames(''); +SELECT notEmpty(''); +SELECT nothing(''); +SELECT nothingNull(''); +SELECT nothingUInt64(''); +SELECT parseDateTime32BestEffortOrNull(''); +SELECT parseDateTime32BestEffortOrZero(''); +SELECT parseDateTime64BestEffortOrNull(''); +SELECT parseDateTime64BestEffortOrZero(''); +SELECT parseDateTime64BestEffortUSOrNull(''); +SELECT parseDateTime64BestEffortUSOrZero(''); +SELECT parseDateTime64InJodaSyntaxOrNull(''); +SELECT parseDateTime64InJodaSyntaxOrZero(''); +SELECT parseDateTime64OrNull(''); +SELECT parseDateTime64OrZero(''); +SELECT parseDateTimeBestEffortOrNull(''); +SELECT parseDateTimeBestEffortOrZero(''); +SELECT parseDateTimeBestEffortUSOrNull(''); +SELECT parseDateTimeBestEffortUSOrZero(''); +SELECT parseDateTimeInJodaSyntaxOrNull(''); +SELECT parseDateTimeInJodaSyntaxOrZero(''); +SELECT parseDateTimeOrNull(''); +SELECT parseDateTimeOrZero(''); +SELECT parseReadableSizeOrNull(''); +SELECT parseReadableSizeOrZero(''); +SELECT partitionID(''); +SELECT partitionId(''); +SELECT path(''); +SELECT pathFull(''); +SELECT port(''); +SELECT portRFC(''); +SELECT protocol(''); +SELECT punycodeDecode(''); +SELECT punycodeEncode(''); +SELECT queryString(''); +SELECT queryStringAndFragment(''); +SELECT regexpQuoteMeta(''); +SELECT reinterpretAsDate(''); +SELECT reinterpretAsDateTime(''); +SELECT reinterpretAsFloat32(''); +SELECT reinterpretAsFloat64(''); +SELECT reinterpretAsInt128(''); +SELECT reinterpretAsInt16(''); +SELECT reinterpretAsInt256(''); +SELECT reinterpretAsInt32(''); +SELECT reinterpretAsInt64(''); +SELECT reinterpretAsInt8(''); +SELECT reinterpretAsString(''); +SELECT reinterpretAsUInt128(''); +SELECT reinterpretAsUInt16(''); +SELECT reinterpretAsUInt256(''); +SELECT reinterpretAsUInt32(''); +SELECT reinterpretAsUInt64(''); +SELECT reinterpretAsUInt8(''); +SELECT reinterpretAsUUID(''); +SELECT reverse(''); +SELECT reverseUTF8(''); +SELECT rtrim(''); +SELECT singleValueOrNull(''); +SELECT sipHash128(''); +SELECT sipHash128Reference(''); +SELECT sipHash64(''); +SELECT soundex(''); +SELECT sparseGrams(''); +SELECT sparseGramsHashes(''); +SELECT sparseGramsHashesUTF8(''); +SELECT sparseGramsUTF8(''); +SELECT splitByAlpha(''); +SELECT splitByNonAlpha(''); +SELECT splitByWhitespace(''); +SELECT sqidDecode(''); +SELECT str_to_date(''); +SELECT str_to_map(''); +SELECT stringBytesEntropy(''); +SELECT stringBytesUniq(''); +SELECT timestamp(''); +SELECT toBFloat16OrNull(''); +SELECT toBFloat16OrZero(''); +SELECT toColumnTypeName(''); +SELECT toDate32OrDefault(''); +SELECT toDate32OrNull(''); +SELECT toDate32OrZero(''); +SELECT toDateOrDefault(''); +SELECT toDateOrNull(''); +SELECT toDateOrZero(''); +SELECT toDateTime64OrNull(''); +SELECT toDateTime64OrZero(''); +SELECT toDateTimeOrDefault(''); +SELECT toDateTimeOrNull(''); +SELECT toDateTimeOrZero(''); +SELECT toFloat32OrDefault(''); +SELECT toFloat32OrNull(''); +SELECT toFloat32OrZero(''); +SELECT toFloat64OrDefault(''); +SELECT toFloat64OrNull(''); +SELECT toFloat64OrZero(''); +SELECT toIPv4OrDefault(''); +SELECT toIPv4OrNull(''); +SELECT toIPv4OrZero(''); +SELECT toIPv6OrDefault(''); +SELECT toIPv6OrNull(''); +SELECT toIPv6OrZero(''); +SELECT toInt128OrDefault(''); +SELECT toInt128OrNull(''); +SELECT toInt128OrZero(''); +SELECT toInt16OrDefault(''); +SELECT toInt16OrNull(''); +SELECT toInt16OrZero(''); +SELECT toInt256OrDefault(''); +SELECT toInt256OrNull(''); +SELECT toInt256OrZero(''); +SELECT toInt32OrDefault(''); +SELECT toInt32OrNull(''); +SELECT toInt32OrZero(''); +SELECT toInt64OrDefault(''); +SELECT toInt64OrNull(''); +SELECT toInt64OrZero(''); +SELECT toInt8OrDefault(''); +SELECT toInt8OrNull(''); +SELECT toInt8OrZero(''); +SELECT toJSONString(''); +SELECT toLowCardinality(''); +SELECT toModifiedJulianDayOrNull(''); +SELECT toNullable(''); +SELECT toString(''); +SELECT toStringCutToZero(''); +SELECT toTime64OrNull(''); +SELECT toTime64OrZero(''); +SELECT toTimeOrNull(''); +SELECT toTimeOrZero(''); +SELECT toTypeName(''); +SELECT toUInt128OrDefault(''); +SELECT toUInt128OrNull(''); +SELECT toUInt128OrZero(''); +SELECT toUInt16OrDefault(''); +SELECT toUInt16OrNull(''); +SELECT toUInt16OrZero(''); +SELECT toUInt256OrDefault(''); +SELECT toUInt256OrNull(''); +SELECT toUInt256OrZero(''); +SELECT toUInt32OrDefault(''); +SELECT toUInt32OrNull(''); +SELECT toUInt32OrZero(''); +SELECT toUInt64OrDefault(''); +SELECT toUInt64OrNull(''); +SELECT toUInt64OrZero(''); +SELECT toUInt8OrDefault(''); +SELECT toUInt8OrNull(''); +SELECT toUInt8OrZero(''); +SELECT toUUIDOrDefault(''); +SELECT toUUIDOrNull(''); +SELECT toUUIDOrZero(''); +SELECT toValidUTF8(''); +SELECT tokens(''); +SELECT topK(''); +SELECT topLevelDomain(''); +SELECT topLevelDomainRFC(''); +SELECT trim(''); +SELECT trimBoth(''); +SELECT trimLeft(''); +SELECT trimRight(''); +SELECT tryBase32Decode(''); +SELECT tryBase58Decode(''); +SELECT tryBase64Decode(''); +SELECT tryBase64URLDecode(''); +SELECT tryIdnaEncode(''); +SELECT tryPunycodeDecode(''); +SELECT tuple(''); +SELECT ucase(''); +SELECT unbin(''); +SELECT unhex(''); +SELECT uniq(''); +SELECT uniqCombined(''); +SELECT uniqCombined64(''); +SELECT uniqExact(''); +SELECT uniqHLL12(''); +SELECT uniqTheta(''); +SELECT uniqUpTo(''); +SELECT upper(''); +SELECT upperUTF8(''); +SELECT visibleWidth(''); +SELECT wordShingleMinHash(''); +SELECT wordShingleMinHashArg(''); +SELECT wordShingleMinHashArgCaseInsensitive(''); +SELECT wordShingleMinHashArgCaseInsensitiveUTF8(''); +SELECT wordShingleMinHashArgUTF8(''); +SELECT wordShingleMinHashCaseInsensitive(''); +SELECT wordShingleMinHashCaseInsensitiveUTF8(''); +SELECT wordShingleMinHashUTF8(''); +SELECT wordShingleSimHash(''); +SELECT wordShingleSimHashCaseInsensitive(''); +SELECT wordShingleSimHashCaseInsensitiveUTF8(''); +SELECT wordShingleSimHashUTF8(''); +SELECT wyHash64(''); +SELECT xxHash32(''); +SELECT xxHash64(''); +SELECT xxh3(''); diff --git a/parser/testdata/03593_remote_map_in/ast.json b/parser/testdata/03593_remote_map_in/ast.json new file mode 100644 index 000000000..5c5ddf9b4 --- /dev/null +++ b/parser/testdata/03593_remote_map_in/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001214022, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03593_remote_map_in/metadata.json b/parser/testdata/03593_remote_map_in/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03593_remote_map_in/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03593_remote_map_in/query.sql b/parser/testdata/03593_remote_map_in/query.sql new file mode 100644 index 000000000..e52bfd936 --- /dev/null +++ b/parser/testdata/03593_remote_map_in/query.sql @@ -0,0 +1,6 @@ +SET enable_analyzer = 1; + +SELECT + arrayJoin(arrayMap(tag -> (map('a', 'value1', 'b', 'value2')[tag]), ['a', 'b'])) AS path, + path IN ['Uncategorized'] AS in_category +FROM remote('127.0.0.{1,2}', system.one); diff --git a/parser/testdata/03594_coalescing_merge_tree_segfault/ast.json b/parser/testdata/03594_coalescing_merge_tree_segfault/ast.json new file mode 100644 index 000000000..0184dec71 --- /dev/null +++ b/parser/testdata/03594_coalescing_merge_tree_segfault/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000927123, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03594_coalescing_merge_tree_segfault/metadata.json b/parser/testdata/03594_coalescing_merge_tree_segfault/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03594_coalescing_merge_tree_segfault/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03594_coalescing_merge_tree_segfault/query.sql b/parser/testdata/03594_coalescing_merge_tree_segfault/query.sql new file mode 100644 index 000000000..39ba9f07e --- /dev/null +++ b/parser/testdata/03594_coalescing_merge_tree_segfault/query.sql @@ -0,0 +1,4 @@ +SET allow_suspicious_primary_key = 1; +CREATE TABLE t0 (c0 String) ENGINE = CoalescingMergeTree() ORDER BY tuple(); +INSERT INTO TABLE t0 (c0) VALUES ('playl哪国人[]美国认识你很高兴'); +SELECT * FROM t0; diff --git a/parser/testdata/03594_constraint_subqery_logical_error/ast.json b/parser/testdata/03594_constraint_subqery_logical_error/ast.json new file mode 100644 index 000000000..749bdb92d --- /dev/null +++ b/parser/testdata/03594_constraint_subqery_logical_error/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery check_constraint (children 3)" + }, + { + "explain": " Identifier check_constraint" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration c0 (children 1)" + }, + { + "explain": " DataType Int" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001556648, + "rows_read": 11, + "bytes_read": 398 + } +} diff --git a/parser/testdata/03594_constraint_subqery_logical_error/metadata.json b/parser/testdata/03594_constraint_subqery_logical_error/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03594_constraint_subqery_logical_error/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03594_constraint_subqery_logical_error/query.sql b/parser/testdata/03594_constraint_subqery_logical_error/query.sql new file mode 100644 index 000000000..4e771f2b4 --- /dev/null +++ b/parser/testdata/03594_constraint_subqery_logical_error/query.sql @@ -0,0 +1,10 @@ +CREATE TABLE check_constraint (c0 Int) ENGINE = MergeTree() ORDER BY tuple(); +INSERT INTO TABLE check_constraint (c0) VALUES (1); +ALTER TABLE check_constraint ADD CONSTRAINT c0 CHECK (SELECT 1); +INSERT INTO TABLE check_constraint (c0) VALUES (1); -- { serverError UNKNOWN_IDENTIFIER } +SELECT 1 FROM check_constraint WHERE 1 = 1 SETTINGS optimize_substitute_columns = 1, convert_query_to_cnf = 1; + +CREATE TABLE assume_constraint (c0 Int) ENGINE = MergeTree() ORDER BY tuple(); +ALTER TABLE assume_constraint ADD CONSTRAINT c0 ASSUME (SELECT 1); +INSERT INTO TABLE assume_constraint (c0) VALUES (1); +SELECT 1 FROM assume_constraint WHERE 1 = 1 SETTINGS optimize_substitute_columns = 1, convert_query_to_cnf = 1; diff --git a/parser/testdata/03594_funcs_on_empty_arguments/ast.json b/parser/testdata/03594_funcs_on_empty_arguments/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03594_funcs_on_empty_arguments/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03594_funcs_on_empty_arguments/metadata.json b/parser/testdata/03594_funcs_on_empty_arguments/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03594_funcs_on_empty_arguments/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03594_funcs_on_empty_arguments/query.sql b/parser/testdata/03594_funcs_on_empty_arguments/query.sql new file mode 100644 index 000000000..a88b53e49 --- /dev/null +++ b/parser/testdata/03594_funcs_on_empty_arguments/query.sql @@ -0,0 +1,71 @@ +-- Tags: no-fasttest +-- ^ certain functions are disabled in the fast test build. + +SET session_timezone = 'UTC'; + +SELECT array() FROM system.one; +SELECT arrayZip() FROM system.one; +SELECT arrayZipUnaligned() FROM system.one; +SELECT blockSerializedSize() FROM system.one; +SELECT blockSize() FROM system.one; +SELECT byteSize() FROM system.one; +SELECT cityHash64() FROM system.one; +SELECT coalesce() FROM system.one; +SELECT concat() FROM system.one; +SELECT concatAssumeInjective() FROM system.one; +SELECT corrMatrix() FROM system.one; +SELECT count() FROM system.one; +SELECT covarPopMatrix() FROM system.one; +SELECT covarSampMatrix() FROM system.one; +SELECT e() FROM system.one; +SELECT emptyArrayDate() FROM system.one; +SELECT emptyArrayDateTime() FROM system.one; +SELECT emptyArrayFloat32() FROM system.one; +SELECT emptyArrayFloat64() FROM system.one; +SELECT emptyArrayInt16() FROM system.one; +SELECT emptyArrayInt32() FROM system.one; +SELECT emptyArrayInt64() FROM system.one; +SELECT emptyArrayInt8() FROM system.one; +SELECT emptyArrayString() FROM system.one; +SELECT emptyArrayUInt16() FROM system.one; +SELECT emptyArrayUInt32() FROM system.one; +SELECT emptyArrayUInt64() FROM system.one; +SELECT emptyArrayUInt8() FROM system.one; +SELECT farmFingerprint64() FROM system.one; +SELECT farmHash64() FROM system.one; +SELECT gccMurmurHash() FROM system.one; +SELECT h3GetRes0Indexes() FROM system.one; +SELECT halfMD5() FROM system.one; +SELECT hiveHash() FROM system.one; +SELECT ignore() FROM system.one; +SELECT indexHint() FROM system.one; +SELECT javaHash() FROM system.one; +SELECT javaHashUTF16LE() FROM system.one; +SELECT kafkaMurmurHash() FROM system.one; +SELECT map() FROM system.one; +SELECT metroHash64() FROM system.one; +SELECT murmurHash2_32() FROM system.one; +SELECT murmurHash2_64() FROM system.one; +SELECT murmurHash3_128() FROM system.one; +SELECT murmurHash3_32() FROM system.one; +SELECT murmurHash3_64() FROM system.one; +SELECT nothing() FROM system.one; +SELECT nothingNull() FROM system.one; +SELECT nothingUInt64() FROM system.one; +SELECT pi() FROM system.one; +SELECT rowNumberInAllBlocks() FROM system.one; +SELECT rowNumberInBlock() FROM system.one; +SELECT sipHash128() FROM system.one; +SELECT sipHash128Keyed() FROM system.one; +SELECT sipHash128Reference() FROM system.one; +SELECT sipHash128ReferenceKeyed() FROM system.one; +SELECT sipHash64() FROM system.one; +SELECT sipHash64Keyed() FROM system.one; +SELECT tcpPort() FROM system.one; +SELECT timeZone() FROM system.one; +SELECT timezone() FROM system.one; +SELECT tuple() FROM system.one; +SELECT wyHash64() FROM system.one; +SELECT xxHash32() FROM system.one; +SELECT xxHash64() FROM system.one; +SELECT xxh3() FROM system.one; diff --git a/parser/testdata/03594_is_valid_ascii/ast.json b/parser/testdata/03594_is_valid_ascii/ast.json new file mode 100644 index 000000000..43057a6e3 --- /dev/null +++ b/parser/testdata/03594_is_valid_ascii/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function isValidASCII (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal ''" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.numbers" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001346207, + "rows_read": 15, + "bytes_read": 581 + } +} diff --git a/parser/testdata/03594_is_valid_ascii/metadata.json b/parser/testdata/03594_is_valid_ascii/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03594_is_valid_ascii/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03594_is_valid_ascii/query.sql b/parser/testdata/03594_is_valid_ascii/query.sql new file mode 100644 index 000000000..4fe5e767b --- /dev/null +++ b/parser/testdata/03594_is_valid_ascii/query.sql @@ -0,0 +1,35 @@ +SELECT 1 = isValidASCII('') FROM system.numbers LIMIT 1; +SELECT 1 = isValidASCII('some text') FROM system.numbers LIMIT 1; +SELECT 1 = isValidASCII('\x00') FROM system.numbers LIMIT 1; +SELECT 1 = isValidASCII('\x66') FROM system.numbers LIMIT 1; +SELECT 1 = isValidASCII('\x7F') FROM system.numbers LIMIT 1; +SELECT 1 = isValidASCII('\x00\x7F') FROM system.numbers LIMIT 1; +SELECT 1 = isValidASCII('\x7F\x00') FROM system.numbers LIMIT 1; +SELECT 0 = isValidASCII('какой-то текст') FROM system.numbers LIMIT 1; +SELECT 0 = isValidASCII('\xC2\x80') FROM system.numbers LIMIT 1; +SELECT 1 = isValidASCII('hello world!') FROM system.numbers LIMIT 1; + +SELECT 1 = isASCII('') FROM system.numbers LIMIT 1; +SELECT 1 = isASCII('some text') FROM system.numbers LIMIT 1; +SELECT 1 = isASCII('\x00') FROM system.numbers LIMIT 1; +SELECT 1 = isASCII('\x66') FROM system.numbers LIMIT 1; +SELECT 1 = isASCII('\x7F') FROM system.numbers LIMIT 1; +SELECT 1 = isASCII('\x00\x7F') FROM system.numbers LIMIT 1; +SELECT 1 = isASCII('\x7F\x00') FROM system.numbers LIMIT 1; +SELECT 0 = isASCII('какой-то текст') FROM system.numbers LIMIT 1; +SELECT 0 = isASCII('\xC2\x80') FROM system.numbers LIMIT 1; +SELECT 1 = isASCII('hello world!') FROM system.numbers LIMIT 1; + +SELECT isValidASCII(toString(number)) FROM system.numbers WHERE number < 10; + +SELECT 1 = isValidASCII('\x00') FROM system.numbers LIMIT 1; +SELECT 1 = isValidASCII('\x7F') FROM system.numbers LIMIT 1; +SELECT 0 = isValidASCII('\x80') FROM system.numbers LIMIT 1; +SELECT 0 = isValidASCII('\xFF') FROM system.numbers LIMIT 1; + +SELECT 0 = isValidASCII('Hello\x80World') FROM system.numbers LIMIT 1; +SELECT 0 = isValidASCII('ASCII\xC2\x80Text') FROM system.numbers LIMIT 1; +SELECT 1 = isValidASCII('Pure ASCII 123 !@#') FROM system.numbers LIMIT 1; + +SELECT 1 = isValidASCII(toFixedString('ASCII', 5)) FROM system.numbers LIMIT 1; +SELECT 0 = isValidASCII(toFixedString('ASCII\x80', 6)) FROM system.numbers LIMIT 1; \ No newline at end of file diff --git a/parser/testdata/03594_is_valid_ascii_errors/ast.json b/parser/testdata/03594_is_valid_ascii_errors/ast.json new file mode 100644 index 000000000..f16f3f3c4 --- /dev/null +++ b/parser/testdata/03594_is_valid_ascii_errors/ast.json @@ -0,0 +1,43 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function isValidASCII (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_[UInt64_1, UInt64_2, UInt64_3]" + } + ], + + "rows": 7, + + "statistics": + { + "elapsed": 0.001144325, + "rows_read": 7, + "bytes_read": 293 + } +} diff --git a/parser/testdata/03594_is_valid_ascii_errors/metadata.json b/parser/testdata/03594_is_valid_ascii_errors/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03594_is_valid_ascii_errors/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03594_is_valid_ascii_errors/query.sql b/parser/testdata/03594_is_valid_ascii_errors/query.sql new file mode 100644 index 000000000..3ed48843f --- /dev/null +++ b/parser/testdata/03594_is_valid_ascii_errors/query.sql @@ -0,0 +1,4 @@ +SELECT isValidASCII([1, 2, 3]); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT isValidASCII(toUUID('00000000-0000-0000-0000-000000000000')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT isValidASCII(toIPv6('::1')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT isValidASCII(toIPv4('127.0.0.1')); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } \ No newline at end of file diff --git a/parser/testdata/03594_json_extract_decimal_precision/ast.json b/parser/testdata/03594_json_extract_decimal_precision/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03594_json_extract_decimal_precision/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03594_json_extract_decimal_precision/metadata.json b/parser/testdata/03594_json_extract_decimal_precision/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03594_json_extract_decimal_precision/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03594_json_extract_decimal_precision/query.sql b/parser/testdata/03594_json_extract_decimal_precision/query.sql new file mode 100644 index 000000000..84cd371fb --- /dev/null +++ b/parser/testdata/03594_json_extract_decimal_precision/query.sql @@ -0,0 +1,117 @@ +-- Test for JSONExtract Decimal precision preservation +-- This test verifies that JSONExtract correctly handles decimal values +-- without losing precision when extracting from JSON numbers +-- Fixes issue #69082 + +DROP TABLE IF EXISTS test_json_decimal_precision; + +CREATE TABLE test_json_decimal_precision +( + id UInt32, + json_data String +) ENGINE = Memory; + +-- Insert test data with decimal values that could lose precision +INSERT INTO test_json_decimal_precision VALUES +(1, '{"dec": 111244542.003}'), +(2, '{"dec": "111244542.003"}'), +(3, '{"dec": 123.4567890123456789}'), +(4, '{"dec": "123.4567890123456789"}'), +(5, '{"dec": 0.0000000000000001}'), +(6, '{"dec": "0.0000000000000001"}'), +(7, '{"dec": 999999999999999.9999999999999999}'), +(8, '{"dec": "999999999999999.9999999999999999"}'), +(9, '{"dec": 1.23456789012345678901234567890123456789}'), +(10, '{"dec": "1.23456789012345678901234567890123456789"}'), +(11, '{"dec": 0.00000000000000000000000000000000000001}'), +(12, '{"dec": "0.00000000000000000000000000000000000001"}'), +(13, '{"dec": 0.1}'), +(14, '{"dec": "0.1"}'), +(15, '{"dec": 0.123456789012345678901234567890123456789}'), +(16, '{"dec": "0.123456789012345678901234567890123456789"}'); + +-- Test 1: Verify that JSONExtract preserves precision for numeric JSON values +SELECT + 'Test 1: JSONExtract with numeric JSON values' as test_name, + id, + json_data, + JSONExtract(json_data, 'dec', 'Decimal(32,16)') as extracted_decimal, + JSONExtractString(json_data, 'dec')::Decimal(32,16) as string_cast_decimal, + JSONExtract(json_data, 'dec', 'Decimal(32,16)') = JSONExtractString(json_data, 'dec')::Decimal(32,16) as precision_preserved +FROM test_json_decimal_precision +WHERE id IN (1, 3, 5, 7, 9, 11, 13, 15) +ORDER BY id; + +-- Test 2: Verify that JSONExtract preserves precision for string JSON values +SELECT + 'Test 2: JSONExtract with string JSON values' as test_name, + id, + json_data, + JSONExtract(json_data, 'dec', 'Decimal(32,16)') as extracted_decimal, + JSONExtractString(json_data, 'dec')::Decimal(32,16) as string_cast_decimal, + JSONExtract(json_data, 'dec', 'Decimal(32,16)') = JSONExtractString(json_data, 'dec')::Decimal(32,16) as precision_preserved +FROM test_json_decimal_precision +WHERE id IN (2, 4, 6, 8, 10, 12, 14, 16) +ORDER BY id; + +-- Test 3: Verify specific precision values match expected results +SELECT + 'Test 3: Specific precision verification' as test_name, + id, + json_data, + JSONExtract(json_data, 'dec', 'Decimal(32,16)') as extracted_decimal, + JSONExtractString(json_data, 'dec')::Decimal(32,16) as string_cast_decimal, + JSONExtract(json_data, 'dec', 'Decimal(32,16)') = JSONExtractString(json_data, 'dec')::Decimal(32,16) as precision_preserved +FROM test_json_decimal_precision +ORDER BY id; + +-- Test 4: Verify that the fix resolves the original issue from #69082 +SELECT + 'Test 4: Original issue #69082 verification' as test_name, + JSONExtract('{"dec": 111244542.003}', 'dec', 'Decimal(31,16)') as direct_extraction, + JSONExtractString('{"dec": 111244542.003}', 'dec')::Decimal(32,16) as string_cast, + toString(JSONExtract('{"dec": 111244542.003}', 'dec', 'Decimal(31,16)')) as direct_extraction_str, + toString(JSONExtractString('{"dec": 111244542.003}', 'dec')::Decimal(32,16)) as string_cast_str, + JSONExtract('{"dec": 111244542.003}', 'dec', 'Decimal(31,16)') = JSONExtractString('{"dec": 111244542.003}', 'dec')::Decimal(32,16) as issue_resolved; + +-- Test 5: Edge cases with very large and very small numbers +SELECT + 'Test 5: Edge cases with extreme precision' as test_name, + id, + json_data, + JSONExtract(json_data, 'dec', 'Decimal(38,37)') as extracted_decimal, + JSONExtractString(json_data, 'dec')::Decimal(38,37) as string_cast_decimal, + JSONExtract(json_data, 'dec', 'Decimal(38,37)') = JSONExtractString(json_data, 'dec')::Decimal(38,37) as precision_preserved +FROM test_json_decimal_precision +WHERE id IN (9, 10, 11, 12) +ORDER BY id; + +-- Test 6: Performance test with multiple values +SELECT + 'Test 6: Performance test with multiple values' as test_name, + count() as total_rows, + countIf(JSONExtract(json_data, 'dec', 'Decimal(32,16)') = JSONExtractString(json_data, 'dec')::Decimal(32,16)) as precision_preserved_count, + countIf(JSONExtract(json_data, 'dec', 'Decimal(32,16)') != JSONExtractString(json_data, 'dec')::Decimal(32,16)) as precision_lost_count +FROM test_json_decimal_precision; + +-- Test 7: Verify that the fix works with different decimal scales +SELECT + 'Test 7: Different decimal scales' as test_name, + JSONExtract('{"dec": 123.456}', 'dec', 'Decimal(10,2)') as scale_2, + JSONExtract('{"dec": 123.456}', 'dec', 'Decimal(10,3)') as scale_3, + JSONExtract('{"dec": 123.456}', 'dec', 'Decimal(10,4)') as scale_4, + JSONExtract('{"dec": 123.456}', 'dec', 'Decimal(10,5)') as scale_5; + +-- Test 8: Verify that the fix works with negative numbers +SELECT + 'Test 8: Negative numbers' as test_name, + JSONExtract('{"dec": -111244542.003}', 'dec', 'Decimal(32,16)') as negative_extraction, + JSONExtractString('{"dec": -111244542.003}', 'dec')::Decimal(32,16) as negative_string_cast, + JSONExtract('{"dec": -111244542.003}', 'dec', 'Decimal(32,16)') = JSONExtractString('{"dec": -111244542.003}', 'dec')::Decimal(32,16) as negative_precision_preserved; + +-- Test 9: Simple case that demonstrates the fix +SELECT + 'Test 9: Simple precision demonstration' as test_name, + JSONExtract('{"amount": 16.4}', 'amount', 'Decimal64(6)') as json_extract_result, + CAST(JSONExtractString('{"amount": 16.4}', 'amount'), 'Decimal64(6)') as string_cast_result, + JSONExtract('{"amount": 16.4}', 'amount', 'Decimal64(6)') = CAST(JSONExtractString('{"amount": 16.4}', 'amount'), 'Decimal64(6)') as precision_preserved; \ No newline at end of file diff --git a/parser/testdata/03594_like_perfect_affix_rewrite_pass/ast.json b/parser/testdata/03594_like_perfect_affix_rewrite_pass/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03594_like_perfect_affix_rewrite_pass/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03594_like_perfect_affix_rewrite_pass/metadata.json b/parser/testdata/03594_like_perfect_affix_rewrite_pass/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03594_like_perfect_affix_rewrite_pass/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03594_like_perfect_affix_rewrite_pass/query.sql b/parser/testdata/03594_like_perfect_affix_rewrite_pass/query.sql new file mode 100644 index 000000000..90c98c1bc --- /dev/null +++ b/parser/testdata/03594_like_perfect_affix_rewrite_pass/query.sql @@ -0,0 +1,168 @@ +-- Test for LikePerfectAffixRewritePass optimization in analyzer + +SET enable_analyzer = 1; +SET optimize_rewrite_like_perfect_affix = 1; + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab ( + id UInt32, + col_string String, + col_fixedstring FixedString(32), + col_lowcardinality_string LowCardinality(String), + col_lowcardinality_fixedstring LowCardinality(FixedString(32)), + col_nullable_string Nullable(String), + col_nullable_fixedstring Nullable(FixedString(32)), + col_lowcardinality_nullable_string LowCardinality(Nullable(String)), + col_lowcardinality_nullable_fixedstring LowCardinality(Nullable(FixedString(32))) +) ENGINE = MergeTree() +ORDER BY col_string; + +INSERT INTO tab VALUES + (1, 'apple', 'fruit', 'aaa', 'aaa', 'aaa', 'aaa', 'aaa', 'aaa'), + (2, 'application', 'software', 'bbb', 'bbb', 'bbb', 'bbb', 'bbb', 'bbb'), + (3, 'apply', 'verb', 'ccc', 'ccc', 'ccc', 'ccc', 'ccc', 'ccc'), + (4, 'banana', 'fruit', 'aaa', 'aaa', 'aaa', 'aaa', 'aaa', 'aaa'), + (5, 'band', 'music', 'bbb', 'bbb', 'bbb', 'bbb', 'bbb', 'bbb'), + (6, 'Test', 'other', 'ccc', 'ccc', 'ccc', 'ccc', 'ccc', 'ccc'), + (7, 'A-Test', 'another', 'aaa', 'aaa', 'aaa', 'aaa', 'aaa', 'aaa'); + +SELECT '-- Test LIKE perfect prefix on String column - should be rewritten'; +EXPLAIN SYNTAX run_query_tree_passes = 1 SELECT count() FROM tab WHERE col_string LIKE 'app%'; +SELECT count() FROM tab WHERE col_string LIKE 'app%'; +SELECT count() FROM tab WHERE col_string LIKE 'app%' SETTINGS optimize_rewrite_like_perfect_affix = 0; +SELECT '-- Test LIKE perfect suffix on String column - should be rewritten'; +EXPLAIN SYNTAX run_query_tree_passes = 1 SELECT count() FROM tab WHERE col_string LIKE '%Test'; +SELECT count() FROM tab WHERE col_string LIKE '%Test'; +SELECT count() FROM tab WHERE col_string LIKE '%Test' SETTINGS optimize_rewrite_like_perfect_affix = 0; + +SELECT '-- Test LIKE perfect prefix on FixedString column - should be rewritten'; +EXPLAIN SYNTAX run_query_tree_passes = 1 SELECT count() FROM tab WHERE col_fixedstring LIKE 'fruit%'; +SELECT count() FROM tab WHERE col_fixedstring LIKE 'fruit%'; +SELECT count() FROM tab WHERE col_fixedstring LIKE 'fruit%' SETTINGS optimize_rewrite_like_perfect_affix = 0; +SELECT '-- Test LIKE perfect suffix on FixedString column - should NOT be rewritten'; +EXPLAIN SYNTAX run_query_tree_passes = 1 SELECT count() FROM tab WHERE col_fixedstring LIKE '%ther\0'; +SELECT count() FROM tab WHERE col_fixedstring LIKE '%ther\0'; +SELECT count() FROM tab WHERE col_fixedstring LIKE '%ther\0' SETTINGS optimize_rewrite_like_perfect_affix = 0; + +SELECT '-- Test NOT LIKE with perfect prefix on String column - should be rewritten'; +EXPLAIN SYNTAX run_query_tree_passes = 1 SELECT count() FROM tab WHERE col_string NOT LIKE 'app%'; +SELECT count() FROM tab WHERE col_string NOT LIKE 'app%'; +SELECT count() FROM tab WHERE col_string NOT LIKE 'app%' SETTINGS optimize_rewrite_like_perfect_affix = 0; +SELECT '-- Test NOT LIKE with perfect suffix on String column - should be rewritten'; +EXPLAIN SYNTAX run_query_tree_passes = 1 SELECT count() FROM tab WHERE col_string NOT LIKE '%Test'; +SELECT count() FROM tab WHERE col_string NOT LIKE '%Test'; +SELECT count() FROM tab WHERE col_string NOT LIKE '%Test' SETTINGS optimize_rewrite_like_perfect_affix = 0; + +SELECT '-- Test ILIKE with perfect prefix on String column - should NOT be rewritten'; +EXPLAIN SYNTAX run_query_tree_passes = 1 SELECT count() FROM tab WHERE col_string ILIKE 'APP%'; +SELECT count() FROM tab WHERE col_string ILIKE 'APP%'; +SELECT count() FROM tab WHERE col_string ILIKE 'APP%' SETTINGS optimize_rewrite_like_perfect_affix = 0; +SELECT '-- Test ILIKE with perfect suffix on String column - should NOT be rewritten'; +EXPLAIN SYNTAX run_query_tree_passes = 1 SELECT count() FROM tab WHERE col_string ILIKE '%TeST'; +SELECT count() FROM tab WHERE col_string ILIKE '%TeST'; +SELECT count() FROM tab WHERE col_string ILIKE '%TeST' SETTINGS optimize_rewrite_like_perfect_affix = 0; + +SELECT '-- Test NOT ILIKE with perfect prefix on String column - should NOT be rewritten'; +EXPLAIN SYNTAX run_query_tree_passes = 1 SELECT count() FROM tab WHERE col_string NOT ILIKE 'APP%'; +SELECT count() FROM tab WHERE col_string NOT ILIKE 'APP%'; +SELECT count() FROM tab WHERE col_string NOT ILIKE 'APP%' SETTINGS optimize_rewrite_like_perfect_affix = 0; +SELECT '-- Test NOT ILIKE with perfect suffix on String column - should NOT be rewritten'; +EXPLAIN SYNTAX run_query_tree_passes = 1 SELECT count() FROM tab WHERE col_string NOT ILIKE '%TeST'; +SELECT count() FROM tab WHERE col_string NOT ILIKE '%TeST'; +SELECT count() FROM tab WHERE col_string NOT ILIKE '%TeST' SETTINGS optimize_rewrite_like_perfect_affix = 0; + +SELECT '-- Test multiple LIKE conditions - all eligible ones should be rewritten'; +EXPLAIN SYNTAX run_query_tree_passes = 1 SELECT count() FROM tab WHERE col_string LIKE 'app%' AND col_fixedstring LIKE 'fruit%'; +SELECT count() FROM tab WHERE col_string LIKE 'app%' AND col_fixedstring LIKE 'fruit%'; +SELECT count() FROM tab WHERE col_string LIKE 'app%' AND col_fixedstring LIKE 'fruit%' SETTINGS optimize_rewrite_like_perfect_affix = 0; + +SELECT '-- Test without perfect affix - should NOT be rewritten'; +EXPLAIN SYNTAX run_query_tree_passes = 1 SELECT count() FROM tab WHERE col_string LIKE 'app_ication%'; +SELECT count() FROM tab WHERE col_string LIKE 'app_ication%'; +SELECT count() FROM tab WHERE col_string LIKE 'app_ication%' SETTINGS optimize_rewrite_like_perfect_affix = 0; + +EXPLAIN SYNTAX run_query_tree_passes = 1 SELECT count() FROM tab WHERE col_string LIKE '%app_ication'; +SELECT count() FROM tab WHERE col_string LIKE '%app_ication'; +SELECT count() FROM tab WHERE col_string LIKE '%app_ication' SETTINGS optimize_rewrite_like_perfect_affix = 0; + +EXPLAIN SYNTAX run_query_tree_passes = 1 SELECT count() FROM tab WHERE col_string LIKE '%app%'; +SELECT count() FROM tab WHERE col_string LIKE '%app%'; +SELECT count() FROM tab WHERE col_string LIKE '%app%' SETTINGS optimize_rewrite_like_perfect_affix = 0; + +EXPLAIN SYNTAX run_query_tree_passes = 1 SELECT count() FROM tab WHERE col_string LIKE '_est%'; +SELECT count() FROM tab WHERE col_string LIKE '_est%'; +SELECT count() FROM tab WHERE col_string LIKE '_est%' SETTINGS optimize_rewrite_like_perfect_affix = 0; + +EXPLAIN SYNTAX run_query_tree_passes = 1 SELECT count() FROM tab WHERE col_string LIKE 'Test'; +SELECT count() FROM tab WHERE col_string LIKE 'Test'; +SELECT count() FROM tab WHERE col_string LIKE 'Test' SETTINGS optimize_rewrite_like_perfect_affix = 0; + +EXPLAIN SYNTAX run_query_tree_passes = 1 SELECT count() FROM tab WHERE col_string LIKE '%'; +SELECT count() FROM tab WHERE col_string LIKE '%'; +SELECT count() FROM tab WHERE col_string LIKE '%' SETTINGS optimize_rewrite_like_perfect_affix = 0; + +EXPLAIN SYNTAX run_query_tree_passes = 1 SELECT count() FROM tab WHERE col_string NOT LIKE 'app_ication%'; +SELECT count() FROM tab WHERE col_string NOT LIKE 'app_ication%'; +SELECT count() FROM tab WHERE col_string NOT LIKE 'app_ication%' SETTINGS optimize_rewrite_like_perfect_affix = 0; + +EXPLAIN SYNTAX run_query_tree_passes = 1 SELECT count() FROM tab WHERE col_string NOT LIKE 'app_ication%'; +SELECT count() FROM tab WHERE col_string ILIKE 'app_ication%'; +SELECT count() FROM tab WHERE col_string ILIKE 'app_ication%' SETTINGS optimize_rewrite_like_perfect_affix = 0; + +EXPLAIN SYNTAX run_query_tree_passes = 1 SELECT count() FROM tab WHERE col_string NOT LIKE 'app_ication%'; +SELECT count() FROM tab WHERE col_string NOT ILIKE '%app_ication'; +SELECT count() FROM tab WHERE col_string NOT ILIKE '%app_ication' SETTINGS optimize_rewrite_like_perfect_affix = 0; + +SELECT '-- Test low cardinality string column - should be rewritten'; +EXPLAIN SYNTAX run_query_tree_passes = 1 SELECT count() FROM tab WHERE col_lowcardinality_string LIKE 'a%'; +SELECT count() from tab WHERE col_lowcardinality_string LIKE 'a%'; +SELECT count() from tab WHERE col_lowcardinality_string LIKE 'a%' SETTINGS optimize_rewrite_like_perfect_affix = 0; +EXPLAIN SYNTAX run_query_tree_passes = 1 SELECT count() FROM tab WHERE col_lowcardinality_string LIKE '%a'; +SELECT count() from tab WHERE col_lowcardinality_string LIKE '%a'; +SELECT count() from tab WHERE col_lowcardinality_string LIKE '%a' SETTINGS optimize_rewrite_like_perfect_affix = 0; + +SELECT '-- Test nullable string column - should be rewritten'; +EXPLAIN SYNTAX run_query_tree_passes = 1 SELECT count() FROM tab WHERE col_nullable_string LIKE 'a%'; +SELECT count() from tab WHERE col_nullable_string LIKE 'a%'; +SELECT count() from tab WHERE col_nullable_string LIKE 'a%' SETTINGS optimize_rewrite_like_perfect_affix = 0; +EXPLAIN SYNTAX run_query_tree_passes = 1 SELECT count() FROM tab WHERE col_nullable_string LIKE '%a'; +SELECT count() from tab WHERE col_nullable_string LIKE '%a'; +SELECT count() from tab WHERE col_nullable_string LIKE '%a' SETTINGS optimize_rewrite_like_perfect_affix = 0; + +SELECT '-- Test low cardinality nullable string column - should be rewritten'; +EXPLAIN SYNTAX run_query_tree_passes = 1 SELECT count() FROM tab WHERE col_lowcardinality_nullable_string LIKE 'a%'; +SELECT count() from tab WHERE col_lowcardinality_nullable_string LIKE 'a%'; +SELECT count() from tab WHERE col_lowcardinality_nullable_string LIKE 'a%' SETTINGS optimize_rewrite_like_perfect_affix = 0; +EXPLAIN SYNTAX run_query_tree_passes = 1 SELECT count() FROM tab WHERE col_lowcardinality_nullable_string LIKE '%a'; +SELECT count() from tab WHERE col_lowcardinality_nullable_string LIKE '%a'; +SELECT count() from tab WHERE col_lowcardinality_nullable_string LIKE '%a' SETTINGS optimize_rewrite_like_perfect_affix = 0; + +SELECT '-- Test low cardinality fixedstring with prefix - should be rewritten'; +EXPLAIN SYNTAX run_query_tree_passes = 1 SELECT count() FROM tab WHERE col_lowcardinality_fixedstring LIKE 'a%'; +SELECT count() from tab WHERE col_lowcardinality_fixedstring LIKE 'a%'; +SELECT count() from tab WHERE col_lowcardinality_fixedstring LIKE 'a%' SETTINGS optimize_rewrite_like_perfect_affix = 0; +SELECT '-- Test low cardinality fixedstring with suffix - should NOT be rewritten'; +EXPLAIN SYNTAX run_query_tree_passes = 1 SELECT count() FROM tab WHERE col_lowcardinality_fixedstring LIKE '%a\0'; +SELECT count() from tab WHERE col_lowcardinality_fixedstring LIKE '%a\0'; +SELECT count() from tab WHERE col_lowcardinality_fixedstring LIKE '%a\0' SETTINGS optimize_rewrite_like_perfect_affix = 0; + +SELECT '-- Test nullable fixedstring with prefix - should be rewritten'; +EXPLAIN SYNTAX run_query_tree_passes = 1 SELECT count() FROM tab WHERE col_nullable_fixedstring LIKE 'a%'; +SELECT count() from tab WHERE col_nullable_fixedstring LIKE 'a%'; +SELECT count() from tab WHERE col_nullable_fixedstring LIKE 'a%' SETTINGS optimize_rewrite_like_perfect_affix = 0; +SELECT '-- Test nullable fixedstring with suffix - should NOT be rewritten'; +EXPLAIN SYNTAX run_query_tree_passes = 1 SELECT count() FROM tab WHERE col_nullable_fixedstring LIKE '%a\0'; +SELECT count() from tab WHERE col_nullable_fixedstring LIKE '%a\0'; +SELECT count() from tab WHERE col_nullable_fixedstring LIKE '%a\0' SETTINGS optimize_rewrite_like_perfect_affix = 0; + +SELECT '-- Test low cardinality nullable fixedstring with prefix - should be rewritten'; +EXPLAIN SYNTAX run_query_tree_passes = 1 SELECT count() FROM tab WHERE col_lowcardinality_nullable_fixedstring LIKE 'a%'; +SELECT count() from tab WHERE col_lowcardinality_nullable_fixedstring LIKE 'a%'; +SELECT count() from tab WHERE col_lowcardinality_nullable_fixedstring LIKE 'a%' SETTINGS optimize_rewrite_like_perfect_affix = 0; +SELECT '-- Test low cardinality nullable fixedstring with suffix - should NOT be rewritten'; +EXPLAIN SYNTAX run_query_tree_passes = 1 SELECT count() FROM tab WHERE col_lowcardinality_nullable_fixedstring LIKE '%a\0'; +SELECT count() from tab WHERE col_lowcardinality_nullable_fixedstring LIKE '%a\0'; +SELECT count() from tab WHERE col_lowcardinality_nullable_fixedstring LIKE '%a\0' SETTINGS optimize_rewrite_like_perfect_affix = 0; + +DROP TABLE tab; diff --git a/parser/testdata/03594_system_grants_parameters/ast.json b/parser/testdata/03594_system_grants_parameters/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03594_system_grants_parameters/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03594_system_grants_parameters/metadata.json b/parser/testdata/03594_system_grants_parameters/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03594_system_grants_parameters/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03594_system_grants_parameters/query.sql b/parser/testdata/03594_system_grants_parameters/query.sql new file mode 100644 index 000000000..cc7857d20 --- /dev/null +++ b/parser/testdata/03594_system_grants_parameters/query.sql @@ -0,0 +1,23 @@ +-- Tags: no-fasttest, no-parallel + +DROP USER IF EXISTS test_user_03593; +DROP USER IF EXISTS test_user_03593_1; + + +CREATE USER test_user_03593; +CREATE USER test_user_03593_1; + +GRANT READ,WRITE ON S3 TO test_user_03593; +GRANT READ,WRITE ON URL TO test_user_03593; + +GRANT READ ON POSTGRES TO test_user_03593; +GRANT WRITE ON ODBC TO test_user_03593; + +GRANT SET DEFINER ON test_user_03593_1 TO test_user_03593; + +GRANT TABLE ENGINE ON TinyLog TO test_user_03593; + +SELECT access_type, access_object FROM system.grants WHERE user_name='test_user_03593'; + +DROP USER test_user_03593; +DROP USER test_user_03593_1; diff --git a/parser/testdata/03595_alter_drop_column_comment_if_exists/ast.json b/parser/testdata/03595_alter_drop_column_comment_if_exists/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03595_alter_drop_column_comment_if_exists/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03595_alter_drop_column_comment_if_exists/metadata.json b/parser/testdata/03595_alter_drop_column_comment_if_exists/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03595_alter_drop_column_comment_if_exists/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03595_alter_drop_column_comment_if_exists/query.sql b/parser/testdata/03595_alter_drop_column_comment_if_exists/query.sql new file mode 100644 index 000000000..635c43dfd --- /dev/null +++ b/parser/testdata/03595_alter_drop_column_comment_if_exists/query.sql @@ -0,0 +1,75 @@ +-- Tags: no-replicated-database + +-- Test for issue #85608: Logical Error when using DROP COLUMN and COMMENT COLUMN IF EXISTS in same ALTER +-- This test verifies that COMMENT COLUMN IF EXISTS works correctly when the column is being dropped in the same ALTER statement + +-- Test with Memory engine +DROP TABLE IF EXISTS test_alter_drop_comment; + +CREATE TABLE test_alter_drop_comment ( + c0 Int, + c1 Int, + c2 String +) ENGINE = Memory; + +-- Test case 1: DROP COLUMN + COMMENT COLUMN IF EXISTS (should succeed) +-- This was previously failing with "Cannot find column c0 in ColumnsDescription" +ALTER TABLE test_alter_drop_comment + DROP COLUMN c0, + COMMENT COLUMN IF EXISTS c0 'this comment should be silently ignored'; + +-- Verify that c0 is dropped and c1, c2 remain +DESCRIBE test_alter_drop_comment; + +-- Test case 2: COMMENT COLUMN IF EXISTS on non-existent column (should succeed) +ALTER TABLE test_alter_drop_comment + COMMENT COLUMN IF EXISTS non_existent_column 'this should be ignored'; + +-- Verify table structure is unchanged +DESCRIBE test_alter_drop_comment; + +-- Test case 3: COMMENT COLUMN without IF EXISTS on non-existent column (should fail) +ALTER TABLE test_alter_drop_comment + COMMENT COLUMN non_existent_column 'this should fail'; -- { serverError NOT_FOUND_COLUMN_IN_BLOCK } + +-- Test case 4: Multiple operations with IF EXISTS +ALTER TABLE test_alter_drop_comment + DROP COLUMN c1, + COMMENT COLUMN IF EXISTS c1 'dropped column comment', + COMMENT COLUMN IF EXISTS c2 'existing column comment'; + +-- Verify final state - only c2 should remain with comment +DESCRIBE test_alter_drop_comment; + +SELECT table, name, comment +FROM system.columns +WHERE table = 'test_alter_drop_comment' AND database = currentDatabase() +ORDER BY name; + +DROP TABLE test_alter_drop_comment; + +-- Test with different table engines +-- Test with MergeTree +CREATE TABLE test_alter_drop_comment_mt ( + id Int32, + value String, + status Int8 +) ENGINE = MergeTree() ORDER BY id; + +ALTER TABLE test_alter_drop_comment_mt + DROP COLUMN status, + COMMENT COLUMN IF EXISTS status 'dropped status column', + COMMENT COLUMN IF EXISTS value 'existing value column'; + +DESCRIBE test_alter_drop_comment_mt; + +DROP TABLE test_alter_drop_comment_mt; + +-- Test edge case: try to drop and comment the same column without IF EXISTS (should fail) +CREATE TABLE test_alter_fail (c0 Int, c1 Int) ENGINE = Memory; + +ALTER TABLE test_alter_fail + DROP COLUMN c0, + COMMENT COLUMN c0 'this should fail'; -- { serverError NOT_FOUND_COLUMN_IN_BLOCK } + +DROP TABLE test_alter_fail; diff --git a/parser/testdata/03595_alter_if_exists_mixed_commands/ast.json b/parser/testdata/03595_alter_if_exists_mixed_commands/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03595_alter_if_exists_mixed_commands/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03595_alter_if_exists_mixed_commands/metadata.json b/parser/testdata/03595_alter_if_exists_mixed_commands/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03595_alter_if_exists_mixed_commands/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03595_alter_if_exists_mixed_commands/query.sql b/parser/testdata/03595_alter_if_exists_mixed_commands/query.sql new file mode 100644 index 000000000..1f360dc5a --- /dev/null +++ b/parser/testdata/03595_alter_if_exists_mixed_commands/query.sql @@ -0,0 +1,30 @@ +-- Tags: no-replicated-database +-- Test runtime IF EXISTS checks for mixed replicated/non-replicated ALTER commands +-- This test verifies that IF EXISTS clauses work correctly when mixing different types +-- of ALTER operations (replicated and non-replicated) in the same statement + +DROP TABLE IF EXISTS test_alter_mixed; + +-- Test 1: COMMENT COLUMN IF EXISTS with column deleted by previous command +CREATE TABLE test_alter_mixed (x Int32, y String) ENGINE = Memory; +-- This should succeed - DROP removes x, COMMENT with IF EXISTS should be silently ignored +ALTER TABLE test_alter_mixed DROP COLUMN x, COMMENT COLUMN IF EXISTS x 'test comment'; +DESC test_alter_mixed; + +DROP TABLE test_alter_mixed; + +-- Test 2: Multiple operations with mixed types in sequence +CREATE TABLE test_alter_mixed (a Int32, b String, c Float64) ENGINE = Memory; +-- Complex case: multiple drops and modifications including comments +ALTER TABLE test_alter_mixed + DROP COLUMN a, + DROP COLUMN IF EXISTS a, + MODIFY COLUMN IF EXISTS a Int64, + COMMENT COLUMN IF EXISTS a 'should be ignored', + RENAME COLUMN IF EXISTS a TO a_renamed, + MODIFY COLUMN IF EXISTS b String DEFAULT 'test', + DROP COLUMN c, + MODIFY COLUMN IF EXISTS c Float32; +DESC test_alter_mixed; + +DROP TABLE test_alter_mixed; \ No newline at end of file diff --git a/parser/testdata/03595_alter_if_exists_runtime_check/ast.json b/parser/testdata/03595_alter_if_exists_runtime_check/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03595_alter_if_exists_runtime_check/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03595_alter_if_exists_runtime_check/metadata.json b/parser/testdata/03595_alter_if_exists_runtime_check/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03595_alter_if_exists_runtime_check/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03595_alter_if_exists_runtime_check/query.sql b/parser/testdata/03595_alter_if_exists_runtime_check/query.sql new file mode 100644 index 000000000..9192511fc --- /dev/null +++ b/parser/testdata/03595_alter_if_exists_runtime_check/query.sql @@ -0,0 +1,32 @@ +-- Test runtime IF EXISTS checks for ALTER COLUMN commands +-- This test verifies that IF EXISTS clauses work correctly when column state changes +-- between validate() and apply() phases within the same ALTER statement + +DROP TABLE IF EXISTS test_alter_if_exists; + +-- Test 1: DROP COLUMN IF EXISTS with column deleted by previous command +CREATE TABLE test_alter_if_exists (c0 Int32, c1 String) ENGINE = Memory; +-- This should succeed - first DROP removes c0, second DROP with IF EXISTS should be silently ignored +ALTER TABLE test_alter_if_exists DROP COLUMN c0, DROP COLUMN IF EXISTS c0; +DESC test_alter_if_exists; + +DROP TABLE test_alter_if_exists; + +-- Test 2: MODIFY COLUMN IF EXISTS with column deleted by previous command +CREATE TABLE test_alter_if_exists (c0 Int32, c1 String) ENGINE = Memory; +-- This should succeed - DROP removes c0, MODIFY with IF EXISTS should be silently ignored +ALTER TABLE test_alter_if_exists DROP COLUMN c0, MODIFY COLUMN IF EXISTS c0 Int64; +DESC test_alter_if_exists; + +DROP TABLE test_alter_if_exists; + +-- Test 3: RENAME COLUMN IF EXISTS with column deleted by previous command +CREATE TABLE test_alter_if_exists (x Int32, y String) ENGINE = Memory; +-- This should succeed - DROP removes x, RENAME with IF EXISTS should be silently ignored +ALTER TABLE test_alter_if_exists DROP COLUMN x, RENAME COLUMN IF EXISTS x TO z; +DESC test_alter_if_exists; + +DROP TABLE test_alter_if_exists; + +-- Test 4: Verify that without IF EXISTS, operations fail as expected +CREATE TABLE test_alter_if_exists (x Int32, y String) ENGINE = Memory; diff --git a/parser/testdata/03595_analyzer_lateral_join/ast.json b/parser/testdata/03595_analyzer_lateral_join/ast.json new file mode 100644 index 000000000..cf3689e8f --- /dev/null +++ b/parser/testdata/03595_analyzer_lateral_join/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000978504, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03595_analyzer_lateral_join/metadata.json b/parser/testdata/03595_analyzer_lateral_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03595_analyzer_lateral_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03595_analyzer_lateral_join/query.sql b/parser/testdata/03595_analyzer_lateral_join/query.sql new file mode 100644 index 000000000..1bf42956c --- /dev/null +++ b/parser/testdata/03595_analyzer_lateral_join/query.sql @@ -0,0 +1,82 @@ +SET enable_analyzer = 1; +SET allow_experimental_correlated_subqueries = 1; + +SELECT + t.a, + u.a +FROM + ( + SELECT + 1 AS a + ) AS t, + ( + SELECT 1 AS a + QUALIFY 0 = (t.a AS alias668) + ) AS u; -- { serverError NOT_IMPLEMENTED } + +SELECT + t.a, + u.a +FROM + ( + SELECT + 1 AS a + ) AS t, + ( + SELECT + DISTINCT *, + *, + 27, + '======Before Truncate======', + materialize(27), + 27, + *, + isZeroOrNull(27), + 27, + materialize(27), + *, + * IS NOT NULL, + *, + 27, + *, + toFixedString('======Before Truncate======', 27), + 27, + 27, + 27, + 27, + toLowCardinality(27), + toNullable(materialize(27)), + * IS NOT NULL, + 1 AS a QUALIFY ( + ( + *, + 27, + materialize(27), + 27, + '======Before Truncate======', + 27, + 27, + (27 IS NOT NULL), + * IS NOT NULL + ) IS NULL + ) = (t.a AS alias668) + ) AS u; -- { serverError NOT_IMPLEMENTED } + +SELECT + c, + a c +FROM + ( + SELECT + 1 a + ) X + CROSS JOIN ( + SELECT + 1 + WHERE + c = 1 + ) Y + CROSS JOIN ( + SELECT + 1 c + ) Z; -- { serverError NOT_IMPLEMENTED } diff --git a/parser/testdata/03595_changes_timeseries_functions/ast.json b/parser/testdata/03595_changes_timeseries_functions/ast.json new file mode 100644 index 000000000..ff03d9164 --- /dev/null +++ b/parser/testdata/03595_changes_timeseries_functions/ast.json @@ -0,0 +1,67 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery ts_raw_data (children 3)" + }, + { + "explain": " Identifier ts_raw_data" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration timestamp (children 1)" + }, + { + "explain": " DataType DateTime64 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " Literal 'UTC'" + }, + { + "explain": " ColumnDeclaration value (children 1)" + }, + { + "explain": " DataType Float64" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Identifier timestamp" + } + ], + + "rows": 15, + + "statistics": + { + "elapsed": 0.001144768, + "rows_read": 15, + "bytes_read": 555 + } +} diff --git a/parser/testdata/03595_changes_timeseries_functions/metadata.json b/parser/testdata/03595_changes_timeseries_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03595_changes_timeseries_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03595_changes_timeseries_functions/query.sql b/parser/testdata/03595_changes_timeseries_functions/query.sql new file mode 100644 index 000000000..541f7c269 --- /dev/null +++ b/parser/testdata/03595_changes_timeseries_functions/query.sql @@ -0,0 +1,38 @@ +CREATE TABLE ts_raw_data(timestamp DateTime64(3,'UTC'), value Float64) ENGINE = MergeTree() ORDER BY timestamp; + +INSERT INTO ts_raw_data SELECT arrayJoin(*).1::DateTime64(3, 'UTC') AS timestamp, arrayJoin(*).2 AS value +FROM ( +select [ +(1734955421.374, 0), +(1734955436.374, 0), +(1734955451.374, 1), +(1734955466.374, 1), +(1734955481.374, 1), +(1734955496.374, 3), +(1734955511.374, 3), +(1734955526.374, 3), +(1734955541.374, 5), +(1734955556.374, 3), +(1734955571.374, 3), +(1734955586.374, 3), +(1734955601.374, 2), +(1734955616.374, 4), +(1734955631.374, 6), +(1734955646.374, 8), +(1734955661.374, 8), +(1734955676.374, 8) +]); + +SELECT groupArraySorted(20)((timestamp::Decimal(20,3), value)) FROM ts_raw_data; + +SET allow_experimental_ts_to_grid_aggregate_function = 1; + +WITH + 1734955380 AS start, 1734955680 AS end, 15 AS step, 300 AS window, 60 as predict_offset, + range(start, end + 1, step) as grid +SELECT + arrayZip(grid, timeSeriesChangesToGrid(start, end, step, window)(toUnixTimestamp(timestamp), value)) as changes_5m, + arrayZip(grid, timeSeriesResetsToGrid(start, end, step, window)(toUnixTimestamp(timestamp), value)) as resets_5m +FROM ts_raw_data FORMAT Vertical; + +DROP TABLE ts_raw_data; diff --git a/parser/testdata/03595_changes_timeseries_functions_various_arguments/ast.json b/parser/testdata/03595_changes_timeseries_functions_various_arguments/ast.json new file mode 100644 index 000000000..ae27f277a --- /dev/null +++ b/parser/testdata/03595_changes_timeseries_functions_various_arguments/ast.json @@ -0,0 +1,76 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery ts_data (children 3)" + }, + { + "explain": " Identifier ts_data" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " ColumnDeclaration id (children 1)" + }, + { + "explain": " DataType UInt64" + }, + { + "explain": " ColumnDeclaration timestamps (children 1)" + }, + { + "explain": " DataType Array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " DataType DateTime" + }, + { + "explain": " ColumnDeclaration values (children 1)" + }, + { + "explain": " DataType Array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " DataType Float64" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Identifier id" + } + ], + + "rows": 18, + + "statistics": + { + "elapsed": 0.001088588, + "rows_read": 18, + "bytes_read": 663 + } +} diff --git a/parser/testdata/03595_changes_timeseries_functions_various_arguments/metadata.json b/parser/testdata/03595_changes_timeseries_functions_various_arguments/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03595_changes_timeseries_functions_various_arguments/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03595_changes_timeseries_functions_various_arguments/query.sql b/parser/testdata/03595_changes_timeseries_functions_various_arguments/query.sql new file mode 100644 index 000000000..91926c4e8 --- /dev/null +++ b/parser/testdata/03595_changes_timeseries_functions_various_arguments/query.sql @@ -0,0 +1,57 @@ +CREATE TABLE ts_data(id UInt64, timestamps Array(DateTime), values Array(Float64)) ENGINE = MergeTree() ORDER BY id; +CREATE TABLE ts_data_nullable(id UInt64, timestamp UInt32, value Nullable(Float64)) ENGINE = MergeTree() ORDER BY id; + +INSERT INTO ts_data VALUES (1, [10,20, 30], [1,5,5]), (2, [40,50,60], [1,3]), (3, [70], [2]), (4, [], []), (5, [80], [8,9]), (6, [100], [10]); +INSERT INTO ts_data_nullable SELECT id, timestamp, value FROM ts_data ARRAY JOIN timestamps as timestamp, arrayResize(values, length(timestamps), NULL) AS value; + +SET allow_experimental_time_series_aggregate_functions = 1; + +-- Fail because of rows with non-matching lengths of timestamps and values +SELECT timeSeriesChangesToGrid(10, 120, 10, 10)(timestamps, values) FROM ts_data; -- {serverError BAD_ARGUMENTS} +SELECT timeSeriesResetsToGrid(10, 120, 10, 10)(timestamps, values) FROM ts_data; -- {serverError BAD_ARGUMENTS} + +-- Filter out invalid rows where timestamp and values arrays lengths do not match +SELECT timeSeriesChangesToGrid(10, 120, 10, 70)(timestamps, values) FROM ts_data WHERE length(timestamps) = length(values); +SELECT timeSeriesChangesToGridIf(10, 120, 10, 70)(timestamps, values, length(timestamps) = length(values)) FROM ts_data; +SELECT timeSeriesChangesToGridIf(10, 120, 10, 70)(timestamps, values, toNullable(length(timestamps) = length(values))) FROM ts_data; + +SELECT timeSeriesResetsToGrid(10, 120, 10, 70)(timestamps, values) FROM ts_data WHERE length(timestamps) = length(values); +SELECT timeSeriesResetsToGridIf(10, 120, 10, 70)(timestamps, values, length(timestamps) = length(values)) FROM ts_data; +SELECT timeSeriesResetsToGridIf(10, 120, 10, 70)(timestamps, values, toNullable(length(timestamps) = length(values))) FROM ts_data; + + +SELECT * FROM ts_data_nullable WHERE value IS NULL AND id < 5; + +SELECT timeSeriesResampleToGridWithStalenessIf(15, 125, 10, 10)(timestamps, values, length(timestamps) = length(values)) FROM ts_data; + +-- Test with Nullable arguments +SELECT timeSeriesChangesToGrid(15, 125, 10, 20)(arrayResize(timestamps, arrayMin([length(timestamps), length(values)]) as min_len), arrayResize(values, min_len)) FROM ts_data; +SELECT timeSeriesChangesToGrid(15, 125, 10, 20)(timestamp, value) FROM ts_data_nullable; +SELECT timeSeriesChangesToGridIf(15, 125, 10, 20)(timestamp, value, id < 5) FROM ts_data_nullable; + +SELECT timeSeriesResetsToGrid(15, 125, 10, 20)(arrayResize(timestamps, arrayMin([length(timestamps), length(values)]) as min_len), arrayResize(values, min_len)) FROM ts_data; +SELECT timeSeriesResetsToGrid(15, 125, 10, 20)(timestamp, value) FROM ts_data_nullable; +SELECT timeSeriesResetsToGridIf(15, 125, 10, 20)(timestamp, value, id < 5) FROM ts_data_nullable; + +SELECT timeSeriesChangesToGrid(15, 125, 10, 20)([10, 20, 30]::Array(UInt32), [1.0, 2.0, NULL]); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT timeSeriesChangesToGrid(15, 125, 10, 20)([10, NULL, 30]::Array(Nullable(UInt32)), [1.0, 2.0, 3.0]); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} + +SELECT timeSeriesResetsToGrid(15, 125, 10, 20)([10, 20, 30]::Array(UInt32), [1.0, 2.0, NULL]); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT timeSeriesResetsToGrid(15, 125, 10, 20)([10, NULL, 30]::Array(Nullable(UInt32)), [1.0, 2.0, 3.0]); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} + +-- End timestamp not aligned by step +SELECT timeSeriesChangesToGrid(100, 140, 15, 40)([89, 101, 109]::Array(UInt32), [89, 101, 99]::Array(Float32)); +SELECT timeSeriesResetsToGrid(100, 140, 15, 40)([89, 101, 109]::Array(UInt32), [89, 101, 99]::Array(Float32)); + +-- Start timestamp equals to end timestamp +SELECT timeSeriesChangesToGrid(120, 120, 0, 40)([89, 101, 109]::Array(UInt32), [89, 101, 99]::Array(Float32)); +SELECT timeSeriesResetsToGrid(120, 120, 0, 40)([89, 101, 109]::Array(UInt32), [89, 101, 99]::Array(Float32)); + +SELECT timeSeriesChangesToGrid(100, 150, 10, 30)(toDateTime(105), [1., 2., 3.]); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT timeSeriesChangesToGrid(100, 150, 10, 30)([1, 2, 3]::Array(UInt32), 1.); --{serverError ILLEGAL_TYPE_OF_ARGUMENT} + +SELECT timeSeriesResetsToGrid(100, 150, 10, 30)(toDateTime(105), [1., 2., 3.]); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT timeSeriesResetsToGrid(100, 150, 10, 30)([1, 2, 3]::Array(UInt32), 1.); --{serverError ILLEGAL_TYPE_OF_ARGUMENT} + +DROP TABLE ts_data; +DROP TABLE ts_data_nullable; diff --git a/parser/testdata/03595_convert_any_join_to_semi_or_anti/ast.json b/parser/testdata/03595_convert_any_join_to_semi_or_anti/ast.json new file mode 100644 index 000000000..5d47f0bb1 --- /dev/null +++ b/parser/testdata/03595_convert_any_join_to_semi_or_anti/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001147168, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03595_convert_any_join_to_semi_or_anti/metadata.json b/parser/testdata/03595_convert_any_join_to_semi_or_anti/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03595_convert_any_join_to_semi_or_anti/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03595_convert_any_join_to_semi_or_anti/query.sql b/parser/testdata/03595_convert_any_join_to_semi_or_anti/query.sql new file mode 100644 index 000000000..c5f3ddd13 --- /dev/null +++ b/parser/testdata/03595_convert_any_join_to_semi_or_anti/query.sql @@ -0,0 +1,20 @@ +SET enable_analyzer = 1; +SET query_plan_join_swap_table = false; +SET enable_parallel_replicas = 0; +SET correlated_subqueries_default_join_kind = 'left'; + +CREATE TABLE users1 (uid Int16, name String, age Int16) ENGINE=Memory; +INSERT INTO users1 SELECT number as uid, 'Alice' as name, 30 as age FROM numbers(100000); + +CREATE TABLE users2 (uid Int16, name String, age Int16) ENGINE=Memory; +INSERT INTO users2 SELECT number as uid, 'Alice2' as name, 30 as age FROM numbers(1000); + +EXPLAIN actions = 1 +SELECT count() +FROM users1 u1 +WHERE EXISTS (SELECT * FROM users2 u2 WHERE u1.uid != u2.uid); + +EXPLAIN actions = 1 +SELECT count() +FROM users1 u1 +WHERE NOT EXISTS (SELECT * FROM users2 u2 WHERE u1.uid != u2.uid); diff --git a/parser/testdata/03595_equality_deletes_simple/ast.json b/parser/testdata/03595_equality_deletes_simple/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03595_equality_deletes_simple/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03595_equality_deletes_simple/metadata.json b/parser/testdata/03595_equality_deletes_simple/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03595_equality_deletes_simple/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03595_equality_deletes_simple/query.sql b/parser/testdata/03595_equality_deletes_simple/query.sql new file mode 100644 index 000000000..b70348337 --- /dev/null +++ b/parser/testdata/03595_equality_deletes_simple/query.sql @@ -0,0 +1,5 @@ +-- Tags: no-fasttest, no-parallel-replicas + +SELECT sum(id) FROM icebergS3(s3_conn, filename = 'deletes_db/eq_deletes_table'); +SELECT sum(id) FROM icebergS3Cluster('test_cluster_two_shards_localhost', s3_conn, filename = 'deletes_db/eq_deletes_table'); +SELECT sum(id), count(name) FROM icebergS3(s3_conn, filename = 'deletes_db/eq_deletes_table'); diff --git a/parser/testdata/03595_exists_as_scalar_subquery/ast.json b/parser/testdata/03595_exists_as_scalar_subquery/ast.json new file mode 100644 index 000000000..dec0cf0fd --- /dev/null +++ b/parser/testdata/03595_exists_as_scalar_subquery/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001237626, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03595_exists_as_scalar_subquery/metadata.json b/parser/testdata/03595_exists_as_scalar_subquery/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03595_exists_as_scalar_subquery/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03595_exists_as_scalar_subquery/query.sql b/parser/testdata/03595_exists_as_scalar_subquery/query.sql new file mode 100644 index 000000000..2d59db4c7 --- /dev/null +++ b/parser/testdata/03595_exists_as_scalar_subquery/query.sql @@ -0,0 +1,17 @@ +set enable_analyzer=1; +drop table if exists tab; + +create table tab (id Int32) engine = MergeTree order by id; +insert into tab values (1), (2), (3); + +set force_primary_key = 1; + +select * from tab where id > 0 or exists (select number from numbers(10) where number > 10) or exists (select number from numbers(10) where number > 10) settings execute_exists_as_scalar_subquery = 0; -- { serverError INDEX_NOT_USED } +select * from tab where id > 2 or exists (select number from numbers(10) where number > 10) or exists (select number from numbers(10) where number > 10) settings execute_exists_as_scalar_subquery = 1; + +set force_primary_key = 0; +system flush logs query_log; +SELECT 'ScalarSubqueriesGlobalCacheHit ' || ProfileEvents['ScalarSubqueriesGlobalCacheHit'] FROM system.query_log +WHERE type != 'QueryStart' AND current_database = currentDatabase() AND query like 'select%' AND Settings['execute_exists_as_scalar_subquery']='1'; + +SELECT EXISTS(SELECT 1 from numbers(2) where number != 0) settings execute_exists_as_scalar_subquery = 1; diff --git a/parser/testdata/03595_extract_url_parameters/ast.json b/parser/testdata/03595_extract_url_parameters/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03595_extract_url_parameters/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03595_extract_url_parameters/metadata.json b/parser/testdata/03595_extract_url_parameters/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03595_extract_url_parameters/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03595_extract_url_parameters/query.sql b/parser/testdata/03595_extract_url_parameters/query.sql new file mode 100644 index 000000000..fe8f207f1 --- /dev/null +++ b/parser/testdata/03595_extract_url_parameters/query.sql @@ -0,0 +1,3 @@ +-- Tags: no-fasttest, stateful +SELECT sum(cityHash64(extractURLParameters(URL))) FROM test.hits; +SELECT sum(cityHash64(extractURLParameterNames(URL))) FROM test.hits; diff --git a/parser/testdata/03595_funcs_on_zero/ast.json b/parser/testdata/03595_funcs_on_zero/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03595_funcs_on_zero/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03595_funcs_on_zero/metadata.json b/parser/testdata/03595_funcs_on_zero/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03595_funcs_on_zero/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03595_funcs_on_zero/query.sql b/parser/testdata/03595_funcs_on_zero/query.sql new file mode 100644 index 000000000..0bd905595 --- /dev/null +++ b/parser/testdata/03595_funcs_on_zero/query.sql @@ -0,0 +1,326 @@ +-- Tags: no-fasttest, no-openssl-fips +-- ^ certain functions are disabled in the fast test build. +-- ^ MD5 function is not available in FIPS build + +SET session_timezone = 'UTC'; + +SELECT BIT_AND(0) FROM system.one; +SELECT BIT_OR(0) FROM system.one; +SELECT BIT_XOR(0) FROM system.one; +SELECT DATE(0) FROM system.one; +SELECT FORMAT_BYTES(0) FROM system.one; +SELECT FROM_DAYS(0) FROM system.one; +SELECT FROM_UNIXTIME(0) FROM system.one; +SELECT INET_NTOA(0) FROM system.one; +SELECT IPv4NumToString(0) FROM system.one; +SELECT IPv4NumToStringClassC(0) FROM system.one; +SELECT STD(0) FROM system.one; +SELECT STDDEV_POP(0) FROM system.one; +SELECT STDDEV_SAMP(0) FROM system.one; +SELECT VAR_POP(0) FROM system.one; +SELECT VAR_SAMP(0) FROM system.one; +SELECT YYYYMMDDToDate(0) FROM system.one; +SELECT YYYYMMDDToDate32(0) FROM system.one; +SELECT YYYYMMDDhhmmssToDateTime(0) FROM system.one; +SELECT YYYYMMDDhhmmssToDateTime64(0) FROM system.one; +SELECT __bitSwapLastTwo(0) FROM system.one; +SELECT __bitWrapperFunc(0) FROM system.one; +SELECT __scalarSubqueryResult(0) FROM system.one; +SELECT abs(0) FROM system.one; +SELECT acos(0) FROM system.one; +SELECT acosh(0) FROM system.one; +SELECT any(0) FROM system.one; +SELECT anyHeavy(0) FROM system.one; +SELECT anyLast(0) FROM system.one; +SELECT anyLastRespectNulls(0) FROM system.one; +SELECT anyLast_respect_nulls(0) FROM system.one; +SELECT anyRespectNulls(0) FROM system.one; +SELECT anyValueRespectNulls(0) FROM system.one; +SELECT any_respect_nulls(0) FROM system.one; +SELECT any_value(0) FROM system.one; +SELECT any_value_respect_nulls(0) FROM system.one; +SELECT approx_top_count(0) FROM system.one; +SELECT approx_top_k(0) FROM system.one; +SELECT array(0) FROM system.one; +SELECT array_agg(0) FROM system.one; +SELECT asin(0) FROM system.one; +SELECT asinh(0) FROM system.one; +SELECT assumeNotNull(0) FROM system.one; +SELECT atan(0) FROM system.one; +SELECT atanh(0) FROM system.one; +SELECT avg(0) FROM system.one; +SELECT bin(0) FROM system.one; +SELECT bitCount(0) FROM system.one; +SELECT bitNot(0) FROM system.one; +SELECT bitPositionsToArray(0) FROM system.one; +SELECT bitmaskToArray(0) FROM system.one; +SELECT bitmaskToList(0) FROM system.one; +SELECT blockSerializedSize(0) FROM system.one; +SELECT byteSize(0) FROM system.one; +SELECT byteSwap(0) FROM system.one; +SELECT cbrt(0) FROM system.one; +SELECT ceil(0) FROM system.one; +SELECT ceiling(0) FROM system.one; +SELECT char(0) FROM system.one; +SELECT cityHash64(0) FROM system.one; +SELECT coalesce(0) FROM system.one; +SELECT concat(0) FROM system.one; +SELECT corrMatrix(0) FROM system.one; +SELECT cos(0) FROM system.one; +SELECT cosh(0) FROM system.one; +SELECT count(0) FROM system.one; +SELECT countDigits(0) FROM system.one; +SELECT covarPopMatrix(0) FROM system.one; +SELECT covarSampMatrix(0) FROM system.one; +SELECT defaultValueOfArgumentType(0) FROM system.one; +SELECT degrees(0) FROM system.one; +SELECT deltaSum(0) FROM system.one; +SELECT dumpColumnStructure(0) FROM system.one; +SELECT entropy(0) FROM system.one; +SELECT erf(0) FROM system.one; +SELECT erfc(0) FROM system.one; +SELECT errorCodeToName(0) FROM system.one; +SELECT estimateCompressionRatio(0) FROM system.one; +SELECT exp(0) FROM system.one; +SELECT exp10(0) FROM system.one; +SELECT exp2(0) FROM system.one; +SELECT factorial(0) FROM system.one; +SELECT farmFingerprint64(0) FROM system.one; +SELECT farmHash64(0) FROM system.one; +SELECT firstValueRespectNulls(0) FROM system.one; +SELECT first_value(0) FROM system.one; +SELECT first_value_respect_nulls(0) FROM system.one; +SELECT floor(0) FROM system.one; +SELECT formatReadableDecimalSize(0) FROM system.one; +SELECT formatReadableQuantity(0) FROM system.one; +SELECT formatReadableSize(0) FROM system.one; +SELECT formatReadableTimeDelta(0) FROM system.one; +SELECT fromDaysSinceYearZero(0) FROM system.one; +SELECT fromDaysSinceYearZero32(0) FROM system.one; +SELECT fromModifiedJulianDay(0) FROM system.one; +SELECT fromModifiedJulianDayOrNull(0) FROM system.one; +SELECT fromUnixTimestamp(0) FROM system.one; +SELECT fromUnixTimestamp64Micro(0) FROM system.one; +SELECT fromUnixTimestamp64Milli(0) FROM system.one; +SELECT fromUnixTimestamp64Nano(0) FROM system.one; +SELECT fromUnixTimestamp64Second(0) FROM system.one; +SELECT fromUnixTimestampInJodaSyntax(0) FROM system.one; +SELECT gccMurmurHash(0) FROM system.one; +SELECT getTypeSerializationStreams(0) FROM system.one; +SELECT greatest(0) FROM system.one; +SELECT groupArray(0) FROM system.one; +SELECT groupArrayMovingAvg(0) FROM system.one; +SELECT groupArrayMovingSum(0) FROM system.one; +SELECT groupBitAnd(0) FROM system.one; +SELECT groupBitOr(0) FROM system.one; +SELECT groupBitXor(0) FROM system.one; +SELECT groupBitmap(0) FROM system.one; +SELECT groupConcat(0) FROM system.one; +SELECT groupUniqArray(0) FROM system.one; +SELECT group_concat(0) FROM system.one; +SELECT h3EdgeAngle(0) FROM system.one; +SELECT h3EdgeLengthKm(0) FROM system.one; +SELECT h3EdgeLengthM(0) FROM system.one; +SELECT h3GetPentagonIndexes(0) FROM system.one; +SELECT h3HexAreaKm2(0) FROM system.one; +SELECT h3HexAreaM2(0) FROM system.one; +SELECT h3NumHexagons(0) FROM system.one; +SELECT halfMD5(0) FROM system.one; +SELECT hex(0) FROM system.one; +SELECT hilbertEncode(0) FROM system.one; +SELECT hiveHash(0) FROM system.one; +SELECT icebergHash(0) FROM system.one; +SELECT identity(0) FROM system.one; +SELECT ignore(0) FROM system.one; +SELECT indexHint(0) FROM system.one; +SELECT intExp10(0) FROM system.one; +SELECT intExp2(0) FROM system.one; +SELECT intHash32(0) FROM system.one; +SELECT intHash64(0) FROM system.one; +SELECT isConstant(0) FROM system.one; +SELECT isFinite(0) FROM system.one; +SELECT isInfinite(0) FROM system.one; +SELECT isNaN(0) FROM system.one; +SELECT isNotNull(0) FROM system.one; +SELECT isNull(0) FROM system.one; +SELECT isNullable(0) FROM system.one; +SELECT isZeroOrNull(0) FROM system.one; +SELECT kafkaMurmurHash(0) FROM system.one; +SELECT kurtPop(0) FROM system.one; +SELECT kurtSamp(0) FROM system.one; +SELECT lastValueRespectNulls(0) FROM system.one; +SELECT last_value(0) FROM system.one; +SELECT last_value_respect_nulls(0) FROM system.one; +SELECT least(0) FROM system.one; +SELECT lgamma(0) FROM system.one; +SELECT ln(0) FROM system.one; +SELECT log(0) FROM system.one; +SELECT log10(0) FROM system.one; +SELECT log1p(0) FROM system.one; +SELECT log2(0) FROM system.one; +SELECT materialize(0) FROM system.one; +SELECT max(0) FROM system.one; +SELECT median(0) FROM system.one; +SELECT medianBFloat16(0) FROM system.one; +SELECT medianExact(0) FROM system.one; +SELECT medianExactHigh(0) FROM system.one; +SELECT medianExactLow(0) FROM system.one; +SELECT medianTDigest(0) FROM system.one; +SELECT medianTiming(0) FROM system.one; +SELECT metroHash64(0) FROM system.one; +SELECT min(0) FROM system.one; +SELECT mortonEncode(0) FROM system.one; +SELECT murmurHash2_32(0) FROM system.one; +SELECT murmurHash2_64(0) FROM system.one; +SELECT murmurHash3_128(0) FROM system.one; +SELECT murmurHash3_32(0) FROM system.one; +SELECT murmurHash3_64(0) FROM system.one; +SELECT negate(0) FROM system.one; +SELECT not(0) FROM system.one; +SELECT nothing(0) FROM system.one; +SELECT nothingNull(0) FROM system.one; +SELECT nothingUInt64(0) FROM system.one; +SELECT partitionID(0) FROM system.one; +SELECT partitionId(0) FROM system.one; +SELECT quantile(0) FROM system.one; +SELECT quantileBFloat16(0) FROM system.one; +SELECT quantileExact(0) FROM system.one; +SELECT quantileExactExclusive(0) FROM system.one; +SELECT quantileExactHigh(0) FROM system.one; +SELECT quantileExactInclusive(0) FROM system.one; +SELECT quantileExactLow(0) FROM system.one; +SELECT quantileTDigest(0) FROM system.one; +SELECT quantileTiming(0) FROM system.one; +SELECT radians(0) FROM system.one; +SELECT range(0) FROM system.one; +SELECT reinterpretAsDate(0) FROM system.one; +SELECT reinterpretAsDateTime(0) FROM system.one; +SELECT reinterpretAsFixedString(0) FROM system.one; +SELECT reinterpretAsFloat32(0) FROM system.one; +SELECT reinterpretAsFloat64(0) FROM system.one; +SELECT reinterpretAsInt128(0) FROM system.one; +SELECT reinterpretAsInt16(0) FROM system.one; +SELECT reinterpretAsInt256(0) FROM system.one; +SELECT reinterpretAsInt32(0) FROM system.one; +SELECT reinterpretAsInt64(0) FROM system.one; +SELECT reinterpretAsInt8(0) FROM system.one; +SELECT reinterpretAsString(0) FROM system.one; +SELECT reinterpretAsUInt128(0) FROM system.one; +SELECT reinterpretAsUInt16(0) FROM system.one; +SELECT reinterpretAsUInt256(0) FROM system.one; +SELECT reinterpretAsUInt32(0) FROM system.one; +SELECT reinterpretAsUInt64(0) FROM system.one; +SELECT reinterpretAsUInt8(0) FROM system.one; +SELECT reinterpretAsUUID(0) FROM system.one; +SELECT round(0) FROM system.one; +SELECT roundAge(0) FROM system.one; +SELECT roundBankers(0) FROM system.one; +SELECT roundDuration(0) FROM system.one; +SELECT roundToExp2(0) FROM system.one; +SELECT sigmoid(0) FROM system.one; +SELECT sign(0) FROM system.one; +SELECT sin(0) FROM system.one; +SELECT singleValueOrNull(0) FROM system.one; +SELECT sinh(0) FROM system.one; +SELECT sipHash128(0) FROM system.one; +SELECT sipHash128Reference(0) FROM system.one; +SELECT sipHash64(0) FROM system.one; +SELECT skewPop(0) FROM system.one; +SELECT skewSamp(0) FROM system.one; +SELECT sleep(0) FROM system.one; +SELECT sleepEachRow(0) FROM system.one; +SELECT space(0) FROM system.one; +SELECT sqid(0) FROM system.one; +SELECT sqidEncode(0) FROM system.one; +SELECT sqrt(0) FROM system.one; +SELECT stddevPop(0) FROM system.one; +SELECT stddevPopStable(0) FROM system.one; +SELECT stddevSamp(0) FROM system.one; +SELECT stddevSampStable(0) FROM system.one; +SELECT sum(0) FROM system.one; +SELECT sumCount(0) FROM system.one; +SELECT sumKahan(0) FROM system.one; +SELECT sumWithOverflow(0) FROM system.one; +SELECT tan(0) FROM system.one; +SELECT tanh(0) FROM system.one; +SELECT tgamma(0) FROM system.one; +SELECT throwIf(0) FROM system.one; +SELECT toBFloat16(0) FROM system.one; +SELECT toBool(0) FROM system.one; +SELECT toColumnTypeName(0) FROM system.one; +SELECT toDate(0) FROM system.one; +SELECT toDate32(0) FROM system.one; +SELECT toDate32OrDefault(0) FROM system.one; +SELECT toDateOrDefault(0) FROM system.one; +SELECT toDateTime(0) FROM system.one; +SELECT toDateTime32(0) FROM system.one; +SELECT toDateTimeOrDefault(0) FROM system.one; +SELECT toFloat32(0) FROM system.one; +SELECT toFloat32OrDefault(0) FROM system.one; +SELECT toFloat64(0) FROM system.one; +SELECT toFloat64OrDefault(0) FROM system.one; +SELECT toIPv4(0) FROM system.one; +SELECT toIPv4OrDefault(0) FROM system.one; +SELECT toIPv6OrDefault(0) FROM system.one; +SELECT toInt128(0) FROM system.one; +SELECT toInt128OrDefault(0) FROM system.one; +SELECT toInt16(0) FROM system.one; +SELECT toInt16OrDefault(0) FROM system.one; +SELECT toInt256(0) FROM system.one; +SELECT toInt256OrDefault(0) FROM system.one; +SELECT toInt32(0) FROM system.one; +SELECT toInt32OrDefault(0) FROM system.one; +SELECT toInt64(0) FROM system.one; +SELECT toInt64OrDefault(0) FROM system.one; +SELECT toInt8(0) FROM system.one; +SELECT toInt8OrDefault(0) FROM system.one; +SELECT toIntervalDay(0) FROM system.one; +SELECT toIntervalHour(0) FROM system.one; +SELECT toIntervalMicrosecond(0) FROM system.one; +SELECT toIntervalMillisecond(0) FROM system.one; +SELECT toIntervalMinute(0) FROM system.one; +SELECT toIntervalMonth(0) FROM system.one; +SELECT toIntervalNanosecond(0) FROM system.one; +SELECT toIntervalQuarter(0) FROM system.one; +SELECT toIntervalSecond(0) FROM system.one; +SELECT toIntervalWeek(0) FROM system.one; +SELECT toIntervalYear(0) FROM system.one; +SELECT toJSONString(0) FROM system.one; +SELECT toLowCardinality(0) FROM system.one; +SELECT toNullable(0) FROM system.one; +SELECT toString(0) FROM system.one; +SELECT toTypeName(0) FROM system.one; +SELECT toUInt128(0) FROM system.one; +SELECT toUInt128OrDefault(0) FROM system.one; +SELECT toUInt16(0) FROM system.one; +SELECT toUInt16OrDefault(0) FROM system.one; +SELECT toUInt256(0) FROM system.one; +SELECT toUInt256OrDefault(0) FROM system.one; +SELECT toUInt32(0) FROM system.one; +SELECT toUInt32OrDefault(0) FROM system.one; +SELECT toUInt64(0) FROM system.one; +SELECT toUInt64OrDefault(0) FROM system.one; +SELECT toUInt8(0) FROM system.one; +SELECT toUInt8OrDefault(0) FROM system.one; +SELECT toUUIDOrDefault(0) FROM system.one; +SELECT toUnixTimestamp(0) FROM system.one; +SELECT topK(0) FROM system.one; +SELECT trunc(0) FROM system.one; +SELECT truncate(0) FROM system.one; +SELECT tuple(0) FROM system.one; +SELECT uniq(0) FROM system.one; +SELECT uniqCombined(0) FROM system.one; +SELECT uniqCombined64(0) FROM system.one; +SELECT uniqExact(0) FROM system.one; +SELECT uniqHLL12(0) FROM system.one; +SELECT uniqTheta(0) FROM system.one; +SELECT uniqUpTo(0) FROM system.one; +SELECT varPop(0) FROM system.one; +SELECT varPopStable(0) FROM system.one; +SELECT varSamp(0) FROM system.one; +SELECT varSampStable(0) FROM system.one; +SELECT visibleWidth(0) FROM system.one; +SELECT wyHash64(0) FROM system.one; +SELECT xxHash32(0) FROM system.one; +SELECT xxHash64(0) FROM system.one; +SELECT xxh3(0) FROM system.one; diff --git a/parser/testdata/03595_parallel_replicas_join_remote/ast.json b/parser/testdata/03595_parallel_replicas_join_remote/ast.json new file mode 100644 index 000000000..84e68db65 --- /dev/null +++ b/parser/testdata/03595_parallel_replicas_join_remote/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_join_remote_l (children 1)" + }, + { + "explain": " Identifier test_join_remote_l" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001240206, + "rows_read": 2, + "bytes_read": 88 + } +} diff --git a/parser/testdata/03595_parallel_replicas_join_remote/metadata.json b/parser/testdata/03595_parallel_replicas_join_remote/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03595_parallel_replicas_join_remote/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03595_parallel_replicas_join_remote/query.sql b/parser/testdata/03595_parallel_replicas_join_remote/query.sql new file mode 100644 index 000000000..8423d6854 --- /dev/null +++ b/parser/testdata/03595_parallel_replicas_join_remote/query.sql @@ -0,0 +1,39 @@ +DROP TABLE IF EXISTS test_join_remote_l; +DROP TABLE IF EXISTS test_join_remote_r; + +CREATE TABLE test_join_remote_l (c Int) ENGINE=MergeTree() ORDER BY tuple(); +CREATE TABLE test_join_remote_r (c Int) ENGINE=MergeTree() ORDER BY tuple(); +INSERT INTO test_join_remote_l VALUES (1); +INSERT INTO test_join_remote_r VALUES (1); + + +SET parallel_replicas_only_with_analyzer = 0; -- necessary for CI run with disabled analyzer +SET serialize_query_plan = 0; +SET enable_parallel_replicas=1, parallel_replicas_local_plan=1, max_parallel_replicas=3, parallel_replicas_for_non_replicated_merge_tree=1, cluster_for_parallel_replicas='test_cluster_one_shard_three_replicas_localhost'; + +SELECT '---'; +SELECT count() FROM test_join_remote_l x JOIN cluster(test_cluster_one_shard_three_replicas_localhost, currentDatabase(), test_join_remote_r) y ON TRUE; +SELECT count() FROM test_join_remote_l x JOIN clusterAllReplicas(test_cluster_one_shard_three_replicas_localhost, currentDatabase(), test_join_remote_r) y ON TRUE; +SELECT count() FROM test_join_remote_l x JOIN remote(test_cluster_one_shard_three_replicas_localhost, currentDatabase(), test_join_remote_r) y ON TRUE; +SELECT count() FROM test_join_remote_l x JOIN remoteSecure(test_cluster_one_shard_three_replicas_localhost, currentDatabase(), test_join_remote_r) y ON TRUE; + +SELECT '---'; +SELECT count() FROM test_join_remote_l x RIGHT JOIN cluster(test_cluster_one_shard_three_replicas_localhost, currentDatabase(), test_join_remote_r) y ON TRUE; +SELECT count() FROM test_join_remote_l x RIGHT JOIN clusterAllReplicas(test_cluster_one_shard_three_replicas_localhost, currentDatabase(), test_join_remote_r) y ON TRUE; +SELECT count() FROM test_join_remote_l x RIGHT JOIN remote(test_cluster_one_shard_three_replicas_localhost, currentDatabase(), test_join_remote_r) y ON TRUE; +SELECT count() FROM test_join_remote_l x RIGHT JOIN remoteSecure(test_cluster_one_shard_three_replicas_localhost, currentDatabase(), test_join_remote_r) y ON TRUE; + +SELECT '---'; +SELECT count() FROM cluster(test_cluster_one_shard_three_replicas_localhost, currentDatabase(), test_join_remote_l) x JOIN test_join_remote_r y ON TRUE; +SELECT count() FROM clusterAllReplicas(test_cluster_one_shard_three_replicas_localhost, currentDatabase(), test_join_remote_l) x JOIN test_join_remote_r y ON TRUE; +SELECT count() FROM remote(test_cluster_one_shard_three_replicas_localhost, currentDatabase(), test_join_remote_l) x JOIN test_join_remote_r y ON TRUE; +SELECT count() FROM remoteSecure(test_cluster_one_shard_three_replicas_localhost, currentDatabase(), test_join_remote_l) x JOIN test_join_remote_r y ON TRUE; + +SELECT '---'; +SELECT count() FROM cluster(test_cluster_one_shard_three_replicas_localhost, currentDatabase(), test_join_remote_l) x RIGHT JOIN test_join_remote_r y ON TRUE; +SELECT count() FROM clusterAllReplicas(test_cluster_one_shard_three_replicas_localhost, currentDatabase(), test_join_remote_l) x RIGHT JOIN test_join_remote_r y ON TRUE; +SELECT count() FROM remote(test_cluster_one_shard_three_replicas_localhost, currentDatabase(), test_join_remote_l) x RIGHT JOIN test_join_remote_r y ON TRUE; +SELECT count() FROM remoteSecure(test_cluster_one_shard_three_replicas_localhost, currentDatabase(), test_join_remote_l) x RIGHT JOIN test_join_remote_r y ON TRUE; + +DROP TABLE test_join_remote_l; +DROP TABLE test_join_remote_r; diff --git a/parser/testdata/03595_pread_threadpool_direct_io/ast.json b/parser/testdata/03595_pread_threadpool_direct_io/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03595_pread_threadpool_direct_io/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03595_pread_threadpool_direct_io/metadata.json b/parser/testdata/03595_pread_threadpool_direct_io/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03595_pread_threadpool_direct_io/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03595_pread_threadpool_direct_io/query.sql b/parser/testdata/03595_pread_threadpool_direct_io/query.sql new file mode 100644 index 000000000..ddad2ded8 --- /dev/null +++ b/parser/testdata/03595_pread_threadpool_direct_io/query.sql @@ -0,0 +1,50 @@ +-- Tags: no-parallel-replicas, no-object-storage + +set min_bytes_to_use_direct_io = 0; + +drop table if exists 03595_data; + +create table 03595_data (key UInt32, val String) engine = MergeTree order by key +as +select number, 'val-' || number from numbers(100000); + +select * from 03595_data +format Null +settings + local_filesystem_read_method = 'pread_threadpool', + min_bytes_to_use_direct_io = 1, + log_query_threads = 1, + use_uncompressed_cache = 0; + +-- If previous query was running w/o O_DIRECT (due to some bug) it may fill pagecache, +-- and then subsequent query will read from cache (if it will ignore O_DIRECT flag as well) with RWF_NOWAIT, +-- so let's make sure that this is not the case +select * from 03595_data +format Null +settings + local_filesystem_read_method = 'pread_threadpool', + min_bytes_to_use_direct_io = 1, + log_query_threads = 1, + use_uncompressed_cache = 0; + +system flush logs query_log, query_thread_log; + +with queries as ( + select query_id, row_number() over(order by event_time_microseconds) as ordinal + from system.query_log + where type = 'QueryFinish' + and current_database = currentDatabase() + and query_kind = 'Select' + and Settings['min_bytes_to_use_direct_io'] = '1' +) +select queries.ordinal, thread_name, sum(ProfileEvents['OSReadBytes']) > 0 as disk_reading +from system.query_thread_log qtl + join queries + on queries.query_id = qtl.query_id +where current_database = currentDatabase() + and query_id in (select query_id from queries) + and ProfileEvents['OSReadBytes'] > 0 +group by 1, 2 +order by 1, 2; + +drop table 03595_data; diff --git a/parser/testdata/03595_set_query_no_eq_set_to_one/ast.json b/parser/testdata/03595_set_query_no_eq_set_to_one/ast.json new file mode 100644 index 000000000..d8ffed1cb --- /dev/null +++ b/parser/testdata/03595_set_query_no_eq_set_to_one/ast.json @@ -0,0 +1,73 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Identifier name" + }, + { + "explain": " Identifier value" + }, + { + "explain": " Identifier changed" + }, + { + "explain": " Identifier default" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.settings" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier name" + }, + { + "explain": " Literal 'force_index_by_date'" + }, + { + "explain": " Set" + } + ], + + "rows": 17, + + "statistics": + { + "elapsed": 0.00126385, + "rows_read": 17, + "bytes_read": 606 + } +} diff --git a/parser/testdata/03595_set_query_no_eq_set_to_one/metadata.json b/parser/testdata/03595_set_query_no_eq_set_to_one/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03595_set_query_no_eq_set_to_one/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03595_set_query_no_eq_set_to_one/query.sql b/parser/testdata/03595_set_query_no_eq_set_to_one/query.sql new file mode 100644 index 000000000..5c6e604ed --- /dev/null +++ b/parser/testdata/03595_set_query_no_eq_set_to_one/query.sql @@ -0,0 +1,45 @@ +SELECT name, value, changed, default FROM system.settings WHERE name = 'force_index_by_date' SETTINGS force_index_by_date; +SELECT name, value, changed, default FROM system.settings WHERE name = 'force_index_by_date' SETTINGS force_index_by_date = 0; +SELECT name, value, changed, default FROM system.settings WHERE name = 'force_index_by_date' SETTINGS force_index_by_date = 1; +SELECT name, value, changed, default FROM system.settings WHERE name = 'force_index_by_date' SETTINGS force_index_by_date = DEFAULT; +SELECT name, value, changed, default FROM system.settings WHERE name = 'force_index_by_date' SETTINGS force_index_by_date FORMAT CSV; + +SELECT name, value, changed, default FROM system.settings WHERE name IN ('force_index_by_date', 'log_queries') ORDER BY name SETTINGS force_index_by_date, log_queries; +SELECT name, value, changed, default FROM system.settings WHERE name IN ('force_index_by_date', 'log_queries') ORDER BY name SETTINGS force_index_by_date = 0, log_queries; +SELECT name, value, changed, default FROM system.settings WHERE name IN ('force_index_by_date', 'log_queries') ORDER BY name SETTINGS force_index_by_date, log_queries = 0; +SELECT name, value, changed, default FROM system.settings WHERE name IN ('force_index_by_date', 'log_queries') SETTINGS force_index_by_date, log_queries FORMAT CSV; + +SET force_index_by_date; +SELECT name, value, changed, default FROM system.settings WHERE name = 'force_index_by_date'; +SET force_index_by_date = DEFAULT; +SELECT name, value, changed, default FROM system.settings WHERE name = 'force_index_by_date'; +SET force_index_by_date = 1; +SELECT name, value, changed, default FROM system.settings WHERE name = 'force_index_by_date'; +SET force_index_by_date = 0; +SELECT name, value, changed, default FROM system.settings WHERE name = 'force_index_by_date'; + +SET force_index_by_date = DEFAULT, log_queries = DEFAULT; +SELECT name, value, changed, default FROM system.settings WHERE name IN ('force_index_by_date', 'log_queries') ORDER BY name; +SET force_index_by_date, log_queries = 0; +SELECT name, value, changed, default FROM system.settings WHERE name IN ('force_index_by_date', 'log_queries') ORDER BY name; +SET force_index_by_date = 0, log_queries; +SELECT name, value, changed, default FROM system.settings WHERE name IN ('force_index_by_date', 'log_queries') ORDER BY name; +SET force_index_by_date = DEFAULT, log_queries = DEFAULT; +SELECT name, value, changed, default FROM system.settings WHERE name IN ('force_index_by_date', 'log_queries') ORDER BY name; +SET force_index_by_date, log_queries; +SELECT name, value, changed, default FROM system.settings WHERE name IN ('force_index_by_date', 'log_queries') ORDER BY name; + +CREATE TABLE t +( + id UInt32 +) +ENGINE = MergeTree +ORDER BY id +SETTINGS async_insert, optimize_on_insert; + +INSERT INTO t SETTINGS async_insert, optimize_on_insert VALUES (1), (2), (3); +INSERT INTO t SETTINGS async_insert=0, optimize_on_insert VALUES (4), (5), (6); +INSERT INTO t SETTINGS async_insert, optimize_on_insert=0 VALUES (7), (8), (9); +INSERT INTO t SETTINGS async_insert=1, optimize_on_insert=1 VALUES (10), (11), (12); + +SELECT id from t ORDER BY id ASC; \ No newline at end of file diff --git a/parser/testdata/03596_alter_update_column_with_subcolumn_used_in_materialized_expression/ast.json b/parser/testdata/03596_alter_update_column_with_subcolumn_used_in_materialized_expression/ast.json new file mode 100644 index 000000000..3657ed04f --- /dev/null +++ b/parser/testdata/03596_alter_update_column_with_subcolumn_used_in_materialized_expression/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001092779, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/03596_alter_update_column_with_subcolumn_used_in_materialized_expression/metadata.json b/parser/testdata/03596_alter_update_column_with_subcolumn_used_in_materialized_expression/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03596_alter_update_column_with_subcolumn_used_in_materialized_expression/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03596_alter_update_column_with_subcolumn_used_in_materialized_expression/query.sql b/parser/testdata/03596_alter_update_column_with_subcolumn_used_in_materialized_expression/query.sql new file mode 100644 index 000000000..36434e468 --- /dev/null +++ b/parser/testdata/03596_alter_update_column_with_subcolumn_used_in_materialized_expression/query.sql @@ -0,0 +1,55 @@ +drop table if exists test; +create table test (t Tuple(a UInt32), a UInt32 materialized t.a) engine=MergeTree() order by tuple(); +insert into test select tuple(1); +select t, a from test; +alter table test update t = tuple(2) where 1 settings mutations_sync=1; +select t, a from test; +alter table test update t = tuple(3) where 1 settings mutations_sync=1; +select t, a from test; +drop table test; + +drop table if exists test; +create table test (t Tuple(a UInt32), a UInt32 materialized t.a + 42) engine=MergeTree() order by tuple(); +insert into test select tuple(1); +select t, a from test; +alter table test update t = tuple(2) where 1 settings mutations_sync=1; +select t, a from test; +alter table test update t = tuple(3) where 1 settings mutations_sync=1; +select t, a from test; +drop table test; + + +create table test (t Tuple(a UInt32), a UInt32 materialized t.a) engine=MergeTree() order by a; +insert into test select tuple(1); +select t, a from test; +alter table test update t = tuple(2) where 1 settings mutations_sync=1; -- {serverError CANNOT_UPDATE_COLUMN} +drop table test; + +drop table if exists test; +create table test (json JSON, a UInt32 materialized json.a::UInt32) engine=MergeTree() order by tuple(); +insert into test select '{"a" : 1}'; +select json, a from test; +alter table test update json = '{"a" : 2}' where 1 settings mutations_sync=1; +select json, a from test; +alter table test update json = '{"a" : 3}' where 1 settings mutations_sync=1; +select json, a from test; +drop table test; + +drop table if exists test; +create table test (json JSON, a UInt32 materialized json.a::UInt32 + 42) engine=MergeTree() order by tuple(); +insert into test select '{"a" : 1}'; +select json, a from test; +alter table test update json = '{"a" : 2}' where 1 settings mutations_sync=1; +select json, a from test; +alter table test update json = '{"a" : 3}' where 1 settings mutations_sync=1; +select json, a from test; +drop table test; + + + +create table test (json JSON, a UInt32 materialized json.a::UInt32) engine=MergeTree() order by a; +insert into test select '{"a" : 1}'; +select json, a from test; +alter table test update json = '{"a" : 2}' where 1 settings mutations_sync=1; -- {serverError CANNOT_UPDATE_COLUMN} +drop table test; + diff --git a/parser/testdata/03596_parquet_prewhere_page_skip_bug/ast.json b/parser/testdata/03596_parquet_prewhere_page_skip_bug/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03596_parquet_prewhere_page_skip_bug/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03596_parquet_prewhere_page_skip_bug/metadata.json b/parser/testdata/03596_parquet_prewhere_page_skip_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03596_parquet_prewhere_page_skip_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03596_parquet_prewhere_page_skip_bug/query.sql b/parser/testdata/03596_parquet_prewhere_page_skip_bug/query.sql new file mode 100644 index 000000000..79cac73b1 --- /dev/null +++ b/parser/testdata/03596_parquet_prewhere_page_skip_bug/query.sql @@ -0,0 +1,9 @@ +-- Tags: no-fasttest, no-parallel + +set output_format_parquet_use_custom_encoder = 1; +set input_format_parquet_use_native_reader_v3 = 1; +set engine_file_truncate_on_insert = 1; + +insert into function file('03596_parquet_prewhere_page_skip_bug.parquet') select number as n, number*10 as n10 from numbers(200) settings output_format_parquet_data_page_size=100, output_format_parquet_batch_size=10, output_format_parquet_row_group_size=100, output_format_parquet_write_page_index=0; + +select n10 from file('03596_parquet_prewhere_page_skip_bug.parquet') prewhere n in (131, 174, 175, 176) order by all settings input_format_parquet_page_filter_push_down=0, input_format_parquet_filter_push_down=0, input_format_parquet_bloom_filter_push_down=0, input_format_parquet_max_block_size=10; diff --git a/parser/testdata/03597_alter_column_with_subcolumn_in_key/ast.json b/parser/testdata/03597_alter_column_with_subcolumn_in_key/ast.json new file mode 100644 index 000000000..795bf5085 --- /dev/null +++ b/parser/testdata/03597_alter_column_with_subcolumn_in_key/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001126753, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/03597_alter_column_with_subcolumn_in_key/metadata.json b/parser/testdata/03597_alter_column_with_subcolumn_in_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03597_alter_column_with_subcolumn_in_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03597_alter_column_with_subcolumn_in_key/query.sql b/parser/testdata/03597_alter_column_with_subcolumn_in_key/query.sql new file mode 100644 index 000000000..3eae3b63b --- /dev/null +++ b/parser/testdata/03597_alter_column_with_subcolumn_in_key/query.sql @@ -0,0 +1,14 @@ +drop table if exists test; +create table test (id UInt32, t Tuple(a UInt32)) engine=MergeTree order by t.a; +insert into test select 1, tuple(1); +alter table test update t = tuple(2) where 1; -- {serverError CANNOT_UPDATE_COLUMN} +alter table test modify column t Tuple(a String); -- {serverError ALTER_OF_COLUMN_IS_FORBIDDEN} +drop table test; + +drop table if exists test; +create table test (id UInt32, json JSON) engine=MergeTree order by json.a::Int64; +insert into test select 1, '{"a" : 42}'; +alter table test update json = '{}' where 1; -- {serverError CANNOT_UPDATE_COLUMN} +alter table test modify column json JSON(a String); -- {serverError ALTER_OF_COLUMN_IS_FORBIDDEN} +drop table test; + diff --git a/parser/testdata/03598_json_enum_default_value_in_typed_path/ast.json b/parser/testdata/03598_json_enum_default_value_in_typed_path/ast.json new file mode 100644 index 000000000..74bdad15d --- /dev/null +++ b/parser/testdata/03598_json_enum_default_value_in_typed_path/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001731288, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/03598_json_enum_default_value_in_typed_path/metadata.json b/parser/testdata/03598_json_enum_default_value_in_typed_path/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03598_json_enum_default_value_in_typed_path/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03598_json_enum_default_value_in_typed_path/query.sql b/parser/testdata/03598_json_enum_default_value_in_typed_path/query.sql new file mode 100644 index 000000000..b5e5fb745 --- /dev/null +++ b/parser/testdata/03598_json_enum_default_value_in_typed_path/query.sql @@ -0,0 +1,7 @@ +drop table if exists test; +create table test (json JSON(e Enum('a' = 1, 'b' = 2))) engine=MergeTree order by tuple(); +insert into test values ('{"e" : "a"}'), ('{"e" : "b"}'), ('{"e" : null}'), ('{}'); +select json from test; +select json.e from test; +drop table test; + diff --git a/parser/testdata/03599_bad_date_and_datetimes_inference/ast.json b/parser/testdata/03599_bad_date_and_datetimes_inference/ast.json new file mode 100644 index 000000000..d7f9a6813 --- /dev/null +++ b/parser/testdata/03599_bad_date_and_datetimes_inference/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001094747, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03599_bad_date_and_datetimes_inference/metadata.json b/parser/testdata/03599_bad_date_and_datetimes_inference/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03599_bad_date_and_datetimes_inference/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03599_bad_date_and_datetimes_inference/query.sql b/parser/testdata/03599_bad_date_and_datetimes_inference/query.sql new file mode 100644 index 000000000..94da1e2a1 --- /dev/null +++ b/parser/testdata/03599_bad_date_and_datetimes_inference/query.sql @@ -0,0 +1,58 @@ +set date_time_input_format='basic'; +set session_timezone='UTC'; + +select d, toTypeName(d) from format(JSONEachRow, '{"d" : "1800-01-01"}'); +select d, toTypeName(d) from format(JSONEachRow, '{"d" : "1800-01-01", "s" : "some string"}'); +select d, toTypeName(d) from format(JSONEachRow, '{"d" : "1969-12-31"}'); +select d, toTypeName(d) from format(JSONEachRow, '{"d" : "1969-12-31", "s" : "some string"}'); +select d, toTypeName(d) from format(JSONEachRow, '{"d" : "1800-01-01 00:00:00"}'); +select d, toTypeName(d) from format(JSONEachRow, '{"d" : "1800-01-01 00:00:00", "s" : "some string"}'); +select d, toTypeName(d) from format(JSONEachRow, '{"d" : "1969-12-31 23:59:59"}'); +select d, toTypeName(d) from format(JSONEachRow, '{"d" : "1969-12-31 23:59:59", "s" : "some string"}'); +select d, toTypeName(d) from format(JSONEachRow, '{"d" : "3000-01-01"}'); +select d, toTypeName(d) from format(JSONEachRow, '{"d" : "3000-01-01", "s" : "some string"}'); +select d, toTypeName(d) from format(JSONEachRow, '{"d" : "2149-06-07"}'); +select d, toTypeName(d) from format(JSONEachRow, '{"d" : "2149-06-07", "s" : "some string"}'); +select d, toTypeName(d) from format(JSONEachRow, '{"d" : "3000-01-01 00:00:00"}'); +select d, toTypeName(d) from format(JSONEachRow, '{"d" : "3000-01-01 00:00:00", "s" : "some string"}'); +select d, toTypeName(d) from format(JSONEachRow, '{"d" : "2106-02-07 06:28:16"}'); +select d, toTypeName(d) from format(JSONEachRow, '{"d" : "2106-02-07 06:28:16", "s" : "some string"}'); +select d, toTypeName(d) from format(JSONEachRow, '{"d" : "1900-01-01"}'); +select d, toTypeName(d) from format(JSONEachRow, '{"d" : "1900-01-01", "s" : "some string"}'); +select d, toTypeName(d) from format(JSONEachRow, '{"d" : "1900-01-01 00:00:00"}'); +select d, toTypeName(d) from format(JSONEachRow, '{"d" : "1900-01-01 00:00:00", "s" : "some string"}'); +select d, toTypeName(d) from format(JSONEachRow, '{"d" : "1899-12-31 23:59:59"}'); +select d, toTypeName(d) from format(JSONEachRow, '{"d" : "1899-12-31 23:59:59", "s" : "some string"}'); +select d, toTypeName(d) from format(JSONEachRow, '{"d" : "2300-01-01 00:00:00.000000000"}'); +select d, toTypeName(d) from format(JSONEachRow, '{"d" : "2300-01-01 00:00:00.000000000", "s" : "some string"}'); + +select '----------------------------------------------'; + +set date_time_input_format='best_effort'; +select d, toTypeName(d) from format(JSONEachRow, '{"d" : "1800-01-01"}'); +select d, toTypeName(d) from format(JSONEachRow, '{"d" : "1800-01-01", "s" : "some string"}'); +select d, toTypeName(d) from format(JSONEachRow, '{"d" : "1969-12-31"}'); +select d, toTypeName(d) from format(JSONEachRow, '{"d" : "1969-12-31", "s" : "some string"}'); +select d, toTypeName(d) from format(JSONEachRow, '{"d" : "1800-01-01 00:00:00"}'); +select d, toTypeName(d) from format(JSONEachRow, '{"d" : "1800-01-01 00:00:00", "s" : "some string"}'); +select d, toTypeName(d) from format(JSONEachRow, '{"d" : "1969-12-31 23:59:59"}'); +select d, toTypeName(d) from format(JSONEachRow, '{"d" : "1969-12-31 23:59:59", "s" : "some string"}'); +select d, toTypeName(d) from format(JSONEachRow, '{"d" : "3000-01-01"}'); +select d, toTypeName(d) from format(JSONEachRow, '{"d" : "3000-01-01", "s" : "some string"}'); +select d, toTypeName(d) from format(JSONEachRow, '{"d" : "2149-06-07"}'); +select d, toTypeName(d) from format(JSONEachRow, '{"d" : "2149-06-07", "s" : "some string"}'); +select d, toTypeName(d) from format(JSONEachRow, '{"d" : "3000-01-01 00:00:00"}'); +select d, toTypeName(d) from format(JSONEachRow, '{"d" : "3000-01-01 00:00:00", "s" : "some string"}'); +select d, toTypeName(d) from format(JSONEachRow, '{"d" : "2106-02-07 06:28:16"}'); +select d, toTypeName(d) from format(JSONEachRow, '{"d" : "2106-02-07 06:28:16", "s" : "some string"}'); +select d, toTypeName(d) from format(JSONEachRow, '{"d" : "1900-01-01"}'); +select d, toTypeName(d) from format(JSONEachRow, '{"d" : "1900-01-01", "s" : "some string"}'); +select d, toTypeName(d) from format(JSONEachRow, '{"d" : "1900-01-01 00:00:00"}'); +select d, toTypeName(d) from format(JSONEachRow, '{"d" : "1900-01-01 00:00:00", "s" : "some string"}'); +select d, toTypeName(d) from format(JSONEachRow, '{"d" : "1899-12-31 23:59:59"}'); +select d, toTypeName(d) from format(JSONEachRow, '{"d" : "1899-12-31 23:59:59", "s" : "some string"}'); +select d, toTypeName(d) from format(JSONEachRow, '{"d" : "2300-01-01 00:00:00.000000000"}'); +select d, toTypeName(d) from format(JSONEachRow, '{"d" : "2300-01-01 00:00:00.000000000", "s" : "some string"}'); + + + diff --git a/parser/testdata/03599_lightweight_delete_vertical_merge/ast.json b/parser/testdata/03599_lightweight_delete_vertical_merge/ast.json new file mode 100644 index 000000000..b78f23d75 --- /dev/null +++ b/parser/testdata/03599_lightweight_delete_vertical_merge/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_lwd_vertical (children 1)" + }, + { + "explain": " Identifier t_lwd_vertical" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001198871, + "rows_read": 2, + "bytes_read": 80 + } +} diff --git a/parser/testdata/03599_lightweight_delete_vertical_merge/metadata.json b/parser/testdata/03599_lightweight_delete_vertical_merge/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03599_lightweight_delete_vertical_merge/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03599_lightweight_delete_vertical_merge/query.sql b/parser/testdata/03599_lightweight_delete_vertical_merge/query.sql new file mode 100644 index 000000000..19e883864 --- /dev/null +++ b/parser/testdata/03599_lightweight_delete_vertical_merge/query.sql @@ -0,0 +1,50 @@ +DROP TABLE IF EXISTS t_lwd_vertical; + +CREATE TABLE t_lwd_vertical +( + id UInt8, + c1 UInt8, + c2 UInt8, + c3 UInt8, + c4 UInt8, +) +ENGINE = MergeTree +ORDER BY id +SETTINGS + min_bytes_for_wide_part = 0, + enable_block_number_column = 0, + enable_block_offset_column = 0, + vertical_merge_algorithm_min_rows_to_activate = 1, + vertical_merge_algorithm_min_columns_to_activate = 1, + vertical_merge_optimize_lightweight_delete = 1, + ratio_of_defaults_for_sparse_serialization = 1.0; + +INSERT INTO t_lwd_vertical SELECT number, rand(), rand(), rand(), rand() FROM numbers(100000); + +SET lightweight_delete_mode = 'alter_update'; + +DELETE FROM t_lwd_vertical WHERE id % 4 = 0; +SELECT count() FROM t_lwd_vertical; + +OPTIMIZE TABLE t_lwd_vertical FINAL; +SELECT count() FROM t_lwd_vertical; +SELECT count() FROM system.parts_columns WHERE database = currentDatabase() AND table = 't_lwd_vertical' AND active AND partition_id = 'all' AND column = '_row_exists'; + +DELETE FROM t_lwd_vertical WHERE 1; +SELECT count() FROM t_lwd_vertical; + +OPTIMIZE TABLE t_lwd_vertical FINAL; +SELECT count() FROM t_lwd_vertical; +SELECT count() FROM system.parts_columns WHERE database = currentDatabase() AND table = 't_lwd_vertical' AND active AND partition_id = 'all' AND column = '_row_exists'; + +SYSTEM FLUSH LOGS part_log; + +SELECT + merge_algorithm, + read_rows, + read_bytes, + rows, +FROM system.part_log WHERE database = currentDatabase() AND table = 't_lwd_vertical' AND event_type = 'MergeParts' +ORDER BY event_time_microseconds; + +DROP TABLE IF EXISTS t_lwd_vertical; diff --git a/parser/testdata/03600_analyzer_setting_bool/ast.json b/parser/testdata/03600_analyzer_setting_bool/ast.json new file mode 100644 index 000000000..f8938d6bf --- /dev/null +++ b/parser/testdata/03600_analyzer_setting_bool/ast.json @@ -0,0 +1,40 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Set" + } + ], + + "rows": 6, + + "statistics": + { + "elapsed": 0.001312204, + "rows_read": 6, + "bytes_read": 191 + } +} diff --git a/parser/testdata/03600_analyzer_setting_bool/metadata.json b/parser/testdata/03600_analyzer_setting_bool/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03600_analyzer_setting_bool/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03600_analyzer_setting_bool/query.sql b/parser/testdata/03600_analyzer_setting_bool/query.sql new file mode 100644 index 000000000..c54fa0f97 --- /dev/null +++ b/parser/testdata/03600_analyzer_setting_bool/query.sql @@ -0,0 +1,2 @@ +select 1 settings allow_experimental_analyzer='1'; +select 1 settings enable_analyzer='1'; diff --git a/parser/testdata/03600_replace_fixed_string_bug/ast.json b/parser/testdata/03600_replace_fixed_string_bug/ast.json new file mode 100644 index 000000000..b4e0d1443 --- /dev/null +++ b/parser/testdata/03600_replace_fixed_string_bug/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function replace (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function toFixedString (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'a'" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal ''" + }, + { + "explain": " Literal ''" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001393792, + "rows_read": 14, + "bytes_read": 525 + } +} diff --git a/parser/testdata/03600_replace_fixed_string_bug/metadata.json b/parser/testdata/03600_replace_fixed_string_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03600_replace_fixed_string_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03600_replace_fixed_string_bug/query.sql b/parser/testdata/03600_replace_fixed_string_bug/query.sql new file mode 100644 index 000000000..e7d24e5ee --- /dev/null +++ b/parser/testdata/03600_replace_fixed_string_bug/query.sql @@ -0,0 +1,2 @@ +SELECT replace(materialize(toFixedString('a', 1)), '', ''); + diff --git a/parser/testdata/03601_histogram_quantile/ast.json b/parser/testdata/03601_histogram_quantile/ast.json new file mode 100644 index 000000000..a5910a6e4 --- /dev/null +++ b/parser/testdata/03601_histogram_quantile/ast.json @@ -0,0 +1,79 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function quantilePrometheusHistogram (children 2)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tupleElement (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier args" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function plus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tupleElement (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier args" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Identifier number" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Float64_0.9" + } + ], + + "rows": 19, + + "statistics": + { + "elapsed": 0.001102166, + "rows_read": 19, + "bytes_read": 752 + } +} diff --git a/parser/testdata/03601_histogram_quantile/metadata.json b/parser/testdata/03601_histogram_quantile/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03601_histogram_quantile/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03601_histogram_quantile/query.sql b/parser/testdata/03601_histogram_quantile/query.sql new file mode 100644 index 000000000..605c54e1c --- /dev/null +++ b/parser/testdata/03601_histogram_quantile/query.sql @@ -0,0 +1,69 @@ +SELECT quantilePrometheusHistogram(0.9)(args.1, args.2 + number) +FROM ( + SELECT arrayJoin(arrayZip( + [0.0, 0.5, 1.0, +Inf], + [0.0, 10.0, 11.0, 12.0] + )) AS args, number + FROM numbers(10) +) +GROUP BY number +ORDER BY number; + +SELECT quantilePrometheusHistogram(0.9)(toFloat32(args.1), args.2 + number) -- Float32 upper bound values +FROM ( + SELECT arrayJoin(arrayZip( + [0.0, 0.5, 1.0, +Inf], + [0.0, 10.0, 11.0, 12.0] + )) AS args, number + FROM numbers(10) +) +GROUP BY number +ORDER BY number; + +SELECT quantilePrometheusHistogram(0.9)(args.1, args.2 + number) -- UInt cumulative histogram values +FROM ( + SELECT arrayJoin(arrayZip( + [0.0, 0.5, 1.0, +Inf], + [0, 10, 11, 12] + )) AS args, number + FROM numbers(10) +) +GROUP BY number +ORDER BY number; + +SELECT quantilePrometheusHistogram(0.9)(args.1, args.2) -- return NaN if no inf bucket +FROM ( + SELECT arrayJoin(arrayZip( + [0.0, 0.5, 1.0], + [0.0, 10.0, 11.0] + )) AS args +); + +SELECT quantilePrometheusHistogram(0.5)(+Inf, 10.0); -- return NaN if less than 2 buckets + +SELECT quantilePrometheusHistogram(0.2)(args.1, args.2) -- interpolate between minimum bucket upper bound and 0 +FROM ( + SELECT arrayJoin(arrayZip( + [0.5, 1.0, 2.0, +Inf], + [5.0, 10.0, 13.0, 15.0] + )) AS args +); + +SELECT quantilePrometheusHistogram(0.2)(args.1, args.2) -- do not interpolate if quantile position is in minimum bucket and minimum bucket upper bound is negative +FROM ( + SELECT arrayJoin(arrayZip( + [-0.5, 0.0, 1.0, +Inf], + [5.0, 10.0, 13.0, 15.0] + )) AS args +); + +SELECT quantilesPrometheusHistogram(0, 0.1, 0.3, 0.5, 0.7, 0.9, 1)(args.1, args.2 + number) +FROM ( + SELECT arrayJoin(arrayZip( + [0.0, 0.5, 1.0, +Inf], + [0.0, 10.0, 11.0, 12.0] + )) AS args, number + FROM numbers(10) +) +GROUP BY number +ORDER BY number; diff --git a/parser/testdata/03601_inconsistent_table_names/ast.json b/parser/testdata/03601_inconsistent_table_names/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03601_inconsistent_table_names/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03601_inconsistent_table_names/metadata.json b/parser/testdata/03601_inconsistent_table_names/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03601_inconsistent_table_names/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03601_inconsistent_table_names/query.sql b/parser/testdata/03601_inconsistent_table_names/query.sql new file mode 100644 index 000000000..774d3201b --- /dev/null +++ b/parser/testdata/03601_inconsistent_table_names/query.sql @@ -0,0 +1,6 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/70826 +SET enable_analyzer=1; +CREATE VIEW v0 AS SELECT 1 AS c0 FROM (SELECT 1) w CROSS JOIN (SELECT 1) z; +EXPLAIN SYNTAX SELECT 1 FROM v0 CROSS JOIN (SELECT 1) y CROSS JOIN (SELECT 1 x) x WHERE x.x = 1; + +EXPLAIN SYNTAX WITH rhs AS (SELECT 1) SELECT lhs.d2 FROM view(SELECT dummy AS d2 FROM system.one INNER JOIN (SELECT * FROM view(SELECT dummy AS d2 FROM system.one)) AS a ON a.d2 = d2) AS lhs RIGHT JOIN rhs USING (d1); diff --git a/parser/testdata/03601_insert_squashing_remove_const/ast.json b/parser/testdata/03601_insert_squashing_remove_const/ast.json new file mode 100644 index 000000000..d19275409 --- /dev/null +++ b/parser/testdata/03601_insert_squashing_remove_const/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tbl_x (children 1)" + }, + { + "explain": " Identifier tbl_x" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001215442, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/03601_insert_squashing_remove_const/metadata.json b/parser/testdata/03601_insert_squashing_remove_const/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03601_insert_squashing_remove_const/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03601_insert_squashing_remove_const/query.sql b/parser/testdata/03601_insert_squashing_remove_const/query.sql new file mode 100644 index 000000000..e2a02499f --- /dev/null +++ b/parser/testdata/03601_insert_squashing_remove_const/query.sql @@ -0,0 +1,28 @@ +DROP TABLE IF EXISTS tbl_x; + +CREATE TABLE tbl_x (col2 String) ENGINE = Memory; + +-- Produce Const and non-Const block in various SELECTs that may lead to UB w/o removing constness while squashing +INSERT INTO tbl_x +WITH + c4 AS + ( + SELECT 'aaa' AS col2 + UNION ALL + SELECT 'bbb' + ), + c6 AS + ( + SELECT r.col2 AS col2 + FROM (SELECT 'ccc' AS col2) AS r + LEFT JOIN (SELECT 'foo' AS col2) AS rt + USING col2 + ) +SELECT + * +FROM +( + SELECT * FROM c4 + UNION ALL + SELECT * FROM c6 +); diff --git a/parser/testdata/03601_json_from_string_accurate_cast_or_null/ast.json b/parser/testdata/03601_json_from_string_accurate_cast_or_null/ast.json new file mode 100644 index 000000000..702937dd1 --- /dev/null +++ b/parser/testdata/03601_json_from_string_accurate_cast_or_null/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function accurateCastOrNull (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '{\"a\" : 42, \"a\" : 43}'" + }, + { + "explain": " Literal 'JSON'" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001103875, + "rows_read": 8, + "bytes_read": 313 + } +} diff --git a/parser/testdata/03601_json_from_string_accurate_cast_or_null/metadata.json b/parser/testdata/03601_json_from_string_accurate_cast_or_null/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03601_json_from_string_accurate_cast_or_null/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03601_json_from_string_accurate_cast_or_null/query.sql b/parser/testdata/03601_json_from_string_accurate_cast_or_null/query.sql new file mode 100644 index 000000000..9583b8289 --- /dev/null +++ b/parser/testdata/03601_json_from_string_accurate_cast_or_null/query.sql @@ -0,0 +1,5 @@ +select accurateCastOrNull('{"a" : 42, "a" : 43}', 'JSON'); +select accurateCastOrNull(materialize('{"a" : 42, "a" : 43}'), 'JSON'); +select accurateCastOrDefault('{"a" : 42, "a" : 43}', 'JSON'); +select accurateCastOrDefault(materialize('{"a" : 42, "a" : 43}'), 'JSON'); + diff --git a/parser/testdata/03601_replace_regex_fixedstring_empty_needle/ast.json b/parser/testdata/03601_replace_regex_fixedstring_empty_needle/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03601_replace_regex_fixedstring_empty_needle/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03601_replace_regex_fixedstring_empty_needle/metadata.json b/parser/testdata/03601_replace_regex_fixedstring_empty_needle/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03601_replace_regex_fixedstring_empty_needle/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03601_replace_regex_fixedstring_empty_needle/query.sql b/parser/testdata/03601_replace_regex_fixedstring_empty_needle/query.sql new file mode 100644 index 000000000..95babe2f4 --- /dev/null +++ b/parser/testdata/03601_replace_regex_fixedstring_empty_needle/query.sql @@ -0,0 +1,3 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/86261 +SELECT replaceRegexpAll(materialize(toFixedString(toLowCardinality(concat('z', number)), 2)), '', 'aazzqa') +FROM numbers(10); \ No newline at end of file diff --git a/parser/testdata/03601_temporary_views/ast.json b/parser/testdata/03601_temporary_views/ast.json new file mode 100644 index 000000000..2910c4569 --- /dev/null +++ b/parser/testdata/03601_temporary_views/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery t_src (children 3)" + }, + { + "explain": " Identifier t_src" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration id (children 1)" + }, + { + "explain": " DataType UInt32" + }, + { + "explain": " ColumnDeclaration val (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function Memory" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001196867, + "rows_read": 10, + "bytes_read": 348 + } +} diff --git a/parser/testdata/03601_temporary_views/metadata.json b/parser/testdata/03601_temporary_views/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03601_temporary_views/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03601_temporary_views/query.sql b/parser/testdata/03601_temporary_views/query.sql new file mode 100644 index 000000000..3bdc9da15 --- /dev/null +++ b/parser/testdata/03601_temporary_views/query.sql @@ -0,0 +1,37 @@ +CREATE TEMPORARY TABLE t_src (id UInt32, val String) ENGINE = Memory; +INSERT INTO t_src VALUES (1,'a'), (2,'b'), (3,'c'); + +CREATE TEMPORARY VIEW tview_basic AS +SELECT id, upper(val) AS u +FROM t_src +WHERE id <= 2; + +SELECT * FROM tview_basic ORDER BY id; + +EXISTS TEMPORARY VIEW tview_basic; +SHOW TEMPORARY VIEW tview_basic; + +CREATE TEMPORARY VIEW IF NOT EXISTS tview_basic AS SELECT 0; + +CREATE OR REPLACE TEMPORARY VIEW tview_basic AS SELECT 0; -- { clientError SYNTAX_ERROR } + +CREATE TEMPORARY VIEW default.tview_db AS SELECT 1; -- { serverError BAD_DATABASE_FOR_TEMPORARY_TABLE } + +CREATE TEMPORARY VIEW tview_cluster ON CLUSTER 'test' AS SELECT 1; -- { serverError INCORRECT_QUERY } + +CREATE TEMPORARY TABLE tsrc_cluster ON CLUSTER 'test' AS SELECT 1; -- { serverError INCORRECT_QUERY } + +CREATE TEMPORARY VIEW tv_populate POPULATE AS SELECT 1; -- { clientError SYNTAX_ERROR } + +CREATE TEMPORARY VIEW tv_refresh REFRESH AS SELECT 1; -- { clientError SYNTAX_ERROR } + +CREATE TEMPORARY VIEW tv_to TO test_1602.tbl AS SELECT 1; -- { clientError SYNTAX_ERROR } + +CREATE TEMPORARY VIEW tv_engine ENGINE = Memory AS SELECT 1; -- { clientError SYNTAX_ERROR } + +CREATE TEMPORARY VIEW tv_empty EMPTY AS SELECT 1; -- { clientError SYNTAX_ERROR } + +CREATE TEMPORARY VIEW tv_inner INNER ENGINE = Memory AS SELECT 1; -- { clientError SYNTAX_ERROR } + +DROP TEMPORARY VIEW IF EXISTS tview_basic; +DROP TEMPORARY TABLE IF EXISTS t_src; \ No newline at end of file diff --git a/parser/testdata/03602_alter_update_nullable_json/ast.json b/parser/testdata/03602_alter_update_nullable_json/ast.json new file mode 100644 index 000000000..0f7b1d7f4 --- /dev/null +++ b/parser/testdata/03602_alter_update_nullable_json/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001475384, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03602_alter_update_nullable_json/metadata.json b/parser/testdata/03602_alter_update_nullable_json/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03602_alter_update_nullable_json/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03602_alter_update_nullable_json/query.sql b/parser/testdata/03602_alter_update_nullable_json/query.sql new file mode 100644 index 000000000..28c6343ed --- /dev/null +++ b/parser/testdata/03602_alter_update_nullable_json/query.sql @@ -0,0 +1,25 @@ +set mutations_sync=1; +set max_block_size=1000; + +drop table if exists test; +create table test (id UInt32, json Nullable(JSON(max_dynamic_paths=1))) engine=MergeTree order by tuple() settings min_bytes_for_wide_part='200G'; +insert into test select number, '{"a" : 1}' from numbers(100000); +alter table test update json='{"b" : 1}' where id > 90000; +select 'Dynamic paths'; +select distinct arrayJoin(JSONDynamicPaths(assumeNotNull(json))) from test; +select 'Shared data paths'; +select distinct arrayJoin(JSONSharedDataPaths(assumeNotNull(json))) from test; + +drop table test; + +create table test (id UInt32, json Nullable(JSON(max_dynamic_paths=1))) engine=MergeTree order by tuple() settings min_bytes_for_wide_part=1, min_rows_for_wide_part=1; +insert into test select number, '{"a" : 1}' from numbers(100000); +alter table test update json='{"b" : 1}' where id > 90000; +select 'Dynamic paths'; +select distinct arrayJoin(JSONDynamicPaths(assumeNotNull(json))) from test; +select 'Shared data paths'; +select distinct arrayJoin(JSONSharedDataPaths(assumeNotNull(json))) from test; + +drop table test; + + diff --git a/parser/testdata/03602_embeddedrock_path/ast.json b/parser/testdata/03602_embeddedrock_path/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03602_embeddedrock_path/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03602_embeddedrock_path/metadata.json b/parser/testdata/03602_embeddedrock_path/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03602_embeddedrock_path/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03602_embeddedrock_path/query.sql b/parser/testdata/03602_embeddedrock_path/query.sql new file mode 100644 index 000000000..43236a77a --- /dev/null +++ b/parser/testdata/03602_embeddedrock_path/query.sql @@ -0,0 +1,11 @@ +-- Tags: no-fasttest, use-rocksdb +-- no-fasttest: EmbeddedRocksDB requires libraries +CREATE TABLE embeddedrock_exploit +( + `key` String, + `v1` UInt32, + `v2` String, + `v3` Float32 +) +ENGINE = EmbeddedRocksDB(150,'/tmp/exploit') +PRIMARY KEY key; -- { serverError BAD_ARGUMENTS } \ No newline at end of file diff --git a/parser/testdata/03602_query_system_tables_definer/ast.json b/parser/testdata/03602_query_system_tables_definer/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03602_query_system_tables_definer/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03602_query_system_tables_definer/metadata.json b/parser/testdata/03602_query_system_tables_definer/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03602_query_system_tables_definer/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03602_query_system_tables_definer/query.sql b/parser/testdata/03602_query_system_tables_definer/query.sql new file mode 100644 index 000000000..96687363c --- /dev/null +++ b/parser/testdata/03602_query_system_tables_definer/query.sql @@ -0,0 +1,2 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/85973#issuecomment-3228974538 +SELECT count() != 0 FROM (Select definer FROM `system`.`tables`); \ No newline at end of file diff --git a/parser/testdata/03603_getSubcolumnType_msan/ast.json b/parser/testdata/03603_getSubcolumnType_msan/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03603_getSubcolumnType_msan/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03603_getSubcolumnType_msan/metadata.json b/parser/testdata/03603_getSubcolumnType_msan/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03603_getSubcolumnType_msan/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03603_getSubcolumnType_msan/query.sql b/parser/testdata/03603_getSubcolumnType_msan/query.sql new file mode 100644 index 000000000..a9d8e3282 --- /dev/null +++ b/parser/testdata/03603_getSubcolumnType_msan/query.sql @@ -0,0 +1,2 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/86279 +SELECT '{}'::JSON x QUALIFY x.^c0 = 1; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT, UNKNOWN_IDENTIFIER } diff --git a/parser/testdata/03603_ip_binary_operators/ast.json b/parser/testdata/03603_ip_binary_operators/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03603_ip_binary_operators/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03603_ip_binary_operators/metadata.json b/parser/testdata/03603_ip_binary_operators/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03603_ip_binary_operators/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03603_ip_binary_operators/query.sql b/parser/testdata/03603_ip_binary_operators/query.sql new file mode 100644 index 000000000..8ef723a28 --- /dev/null +++ b/parser/testdata/03603_ip_binary_operators/query.sql @@ -0,0 +1,30 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/71415 +SELECT now() + CAST(toFixedString(materialize(toNullable('1')), 1), 'IPv6'); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT CAST('2000-01-01', 'Date32') - CAST(0, 'IPv4'); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} + +DROP TABLE IF EXISTS t0; +CREATE TABLE t0 (c0 IPv4) ENGINE = Memory; +SELECT (t0.c0, t0.c0) * t0.c0 FROM t0; -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +DROP TABLE IF EXISTS t0; + +SELECT materialize('1')::IPv6 + '2000-01-01 00:00:00'::DateTime; -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} + +-- https://github.com/ClickHouse/ClickHouse/issues/83963 +CREATE TABLE `02763_alias__fuzz_25` (`x` DateTime64(3), `y` IPv4, `z` Float32 ALIAS x + y) ENGINE = MergeTree ORDER BY x; -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} + +-- Other non sensical operations +SELECT toIPv4('127.0.0.1') + 0.1; -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT 0.1 + toIPv4('127.0.0.1'); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT toIPv6('127.0.0.1') + 0.1; -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT 0.1 + toIPv6('127.0.0.1'); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} + +SELECT toIPv4('127.0.0.1') + 0.1::Float32; -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT toIPv6('127.0.0.1') + 0.1::Float32; -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT toIPv4('127.0.0.1') + 0.1::BFloat16; -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT toIPv6('127.0.0.1') + 0.1::BFloat16; -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} + +SELECT toIPv4('127.0.0.1') + INTERVAL 1 SECOND; -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT toIPv6('127.0.0.1') + INTERVAL 1 SECOND; -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} + +SELECT toIPv4('127.0.0.1') + toDecimal32(0.1, 2); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT toIPv6('127.0.0.1') + toDecimal32(0.1, 2); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} diff --git a/parser/testdata/03603_reading_s3_cluster_all_nodes_unavailable/ast.json b/parser/testdata/03603_reading_s3_cluster_all_nodes_unavailable/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03603_reading_s3_cluster_all_nodes_unavailable/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03603_reading_s3_cluster_all_nodes_unavailable/metadata.json b/parser/testdata/03603_reading_s3_cluster_all_nodes_unavailable/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03603_reading_s3_cluster_all_nodes_unavailable/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03603_reading_s3_cluster_all_nodes_unavailable/query.sql b/parser/testdata/03603_reading_s3_cluster_all_nodes_unavailable/query.sql new file mode 100644 index 000000000..80da21ffa --- /dev/null +++ b/parser/testdata/03603_reading_s3_cluster_all_nodes_unavailable/query.sql @@ -0,0 +1,4 @@ +-- Tags: no-fasttest +-- s3Cluster is not used in fast tests + +SELECT * FROM s3Cluster('test_cluster_multiple_nodes_all_unavailable', 'http://localhost:11111/test/a.tsv'); -- { serverError ALL_CONNECTION_TRIES_FAILED } diff --git a/parser/testdata/03604_and_join_use_nulls_bug_83977/ast.json b/parser/testdata/03604_and_join_use_nulls_bug_83977/ast.json new file mode 100644 index 000000000..037286333 --- /dev/null +++ b/parser/testdata/03604_and_join_use_nulls_bug_83977/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery AA (children 3)" + }, + { + "explain": " Identifier AA" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration key (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " ColumnDeclaration value (children 1)" + }, + { + "explain": " DataType Int64" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Identifier key" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001415953, + "rows_read": 11, + "bytes_read": 371 + } +} diff --git a/parser/testdata/03604_and_join_use_nulls_bug_83977/metadata.json b/parser/testdata/03604_and_join_use_nulls_bug_83977/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03604_and_join_use_nulls_bug_83977/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03604_and_join_use_nulls_bug_83977/query.sql b/parser/testdata/03604_and_join_use_nulls_bug_83977/query.sql new file mode 100644 index 000000000..73c930d4e --- /dev/null +++ b/parser/testdata/03604_and_join_use_nulls_bug_83977/query.sql @@ -0,0 +1,19 @@ +CREATE TABLE AA ( `key` String, `value` Int64 ) ENGINE = MergeTree ORDER BY (key); +INSERT INTO AA VALUES ('a', 5), ('b', 15); + +CREATE TABLE B ( `key` String, `flag` Bool ) ENGINE = MergeTree ORDER BY (key); +INSERT INTO B VALUES ('a', 1), ('c', 0); + +CREATE TABLE C ( `key` String ) ENGINE = MergeTree ORDER BY (key); +INSERT INTO C VALUES ('a'), ('d'); + +SELECT + `flag` AND value <= 10 +FROM ( + SELECT * FROM AA + LEFT JOIN B AS `t0` ON AA.`key` = `t0`.`key` + LEFT JOIN C AS `t1` ON AA.`key` = `t1`.`key` +) +ORDER BY ALL +SETTINGS join_use_nulls = 1, enable_analyzer = 1 +; diff --git a/parser/testdata/03604_dynamic_key_in_join/ast.json b/parser/testdata/03604_dynamic_key_in_join/ast.json new file mode 100644 index 000000000..efb6a264b --- /dev/null +++ b/parser/testdata/03604_dynamic_key_in_join/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001018629, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03604_dynamic_key_in_join/metadata.json b/parser/testdata/03604_dynamic_key_in_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03604_dynamic_key_in_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03604_dynamic_key_in_join/query.sql b/parser/testdata/03604_dynamic_key_in_join/query.sql new file mode 100644 index 000000000..18b0e3851 --- /dev/null +++ b/parser/testdata/03604_dynamic_key_in_join/query.sql @@ -0,0 +1,32 @@ +SET allow_dynamic_type_in_join_keys=0; + +DROP TABLE IF EXISTS t0; +DROP TABLE IF EXISTS t1; +CREATE TABLE t0 (c0 Dynamic, c1 Array(Dynamic), c2 Tuple(d Dynamic)) ENGINE = MergeTree() ORDER BY tuple(); +CREATE TABLE t1 (c0 Int32, c1 Array(Int32), c2 Tuple(d Int32)) ENGINE = MergeTree() ORDER BY tuple(); + +INSERT INTO TABLE t0 VALUES (1, [1, 2, 3], tuple(1)); +INSERT INTO TABLE t1 VALUES (1, [1, 2, 3], tuple(1)); + +SET enable_analyzer = 0; + +SELECT * FROM t0 JOIN t1 ON t0.c0 = t1.c0; -- {serverError ILLEGAL_COLUMN} +SELECT * FROM t0 JOIN t1 ON t0.c1 = t1.c1; -- {serverError ILLEGAL_COLUMN} +SELECT * FROM t0 JOIN t1 ON t0.c2 = t1.c2; -- {serverError ILLEGAL_COLUMN} + +SET enable_analyzer = 1; +SET query_plan_use_new_logical_join_step = 0; + +SELECT * FROM t0 JOIN t1 ON t0.c0 = t1.c0; -- {serverError ILLEGAL_COLUMN} +SELECT * FROM t0 JOIN t1 ON t0.c1 = t1.c1; -- {serverError ILLEGAL_COLUMN} +SELECT * FROM t0 JOIN t1 ON t0.c2 = t1.c2; -- {serverError ILLEGAL_COLUMN} + +SET query_plan_use_new_logical_join_step = 1; + +SELECT * FROM t0 JOIN t1 ON t0.c0 = t1.c0; -- {serverError ILLEGAL_COLUMN} +SELECT * FROM t0 JOIN t1 ON t0.c1 = t1.c1; -- {serverError ILLEGAL_COLUMN} +SELECT * FROM t0 JOIN t1 ON t0.c2 = t1.c2; -- {serverError ILLEGAL_COLUMN} + +DROP TABLE t0; +DROP TABLE t1; + diff --git a/parser/testdata/03604_functions_to_subcolumns_outer_join/ast.json b/parser/testdata/03604_functions_to_subcolumns_outer_join/ast.json new file mode 100644 index 000000000..76cab6fa2 --- /dev/null +++ b/parser/testdata/03604_functions_to_subcolumns_outer_join/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery users (children 1)" + }, + { + "explain": " Identifier users" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001194954, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/03604_functions_to_subcolumns_outer_join/metadata.json b/parser/testdata/03604_functions_to_subcolumns_outer_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03604_functions_to_subcolumns_outer_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03604_functions_to_subcolumns_outer_join/query.sql b/parser/testdata/03604_functions_to_subcolumns_outer_join/query.sql new file mode 100644 index 000000000..0c753e8a6 --- /dev/null +++ b/parser/testdata/03604_functions_to_subcolumns_outer_join/query.sql @@ -0,0 +1,21 @@ +DROP TABLE IF EXISTS users; +DROP TABLE IF EXISTS users_ext; +CREATE TABLE users (uid Int16, name String, age Int16) ENGINE = Memory; +INSERT INTO users VALUES (1231, 'John', 33); + +CREATE table users_ext(uid Int16, nullableStringCol Nullable(String)) ENGINE = Memory; +INSERT INTO users_ext VALUES (123, NULL); + +SELECT nullableStringCol IS NOT NULL FROM users LEFT JOIN users_ext ON users_ext.uid = users.uid; +SELECT count(nullableStringCol) FROM users LEFT JOIN users_ext ON users_ext.uid = users.uid; +SELECT nullableStringCol IS NULL FROM users LEFT JOIN users_ext ON users_ext.uid = users.uid; + +SELECT nullableStringCol IS NULL FROM users JOIN users users2 ON users.uid = users2.uid LEFT JOIN users_ext ON users_ext.uid = users.uid; +SELECT nullableStringCol IS NULL FROM users_ext RIGHT JOIN users users2 ON users_ext.uid = users2.uid LEFT JOIN users ON users2.uid = users.uid; + +SELECT nullableStringCol IS NULL FROM users_ext JOIN users users2 ON users_ext.uid = users2.uid FULL JOIN users ON users2.uid = users.uid; + +SELECT t1.nullableStringCol IS NULL, t2.nullableStringCol IS NULL, t3.nullableStringCol IS NULL +FROM users_ext t1 +FULL JOIN users_ext t2 ON t1.uid = t2.uid +FULL JOIN users_ext t3 ON t1.uid = t3.uid; diff --git a/parser/testdata/03604_join_reorder_pinned_bug/ast.json b/parser/testdata/03604_join_reorder_pinned_bug/ast.json new file mode 100644 index 000000000..92168176d --- /dev/null +++ b/parser/testdata/03604_join_reorder_pinned_bug/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001003389, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03604_join_reorder_pinned_bug/metadata.json b/parser/testdata/03604_join_reorder_pinned_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03604_join_reorder_pinned_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03604_join_reorder_pinned_bug/query.sql b/parser/testdata/03604_join_reorder_pinned_bug/query.sql new file mode 100644 index 000000000..fd238e33b --- /dev/null +++ b/parser/testdata/03604_join_reorder_pinned_bug/query.sql @@ -0,0 +1,9 @@ +SET query_plan_optimize_join_order_limit = 4; + +SELECT 1 + FROM (SELECT 1 c0 LIMIT 1) AS t1 + LEFT JOIN (SELECT 1 c0 LIMIT 1) t3 ON t1.c0 = t3.c0 +INNER JOIN (SELECT 1 c0 LIMIT 1) t5 ON t3.c0 = t5.c0 +INNER JOIN (SELECT 1 c0 LIMIT 1) t7 ON t5.c0 = t7.c0 +; + diff --git a/parser/testdata/03604_key_condition_set_tuple_bug/ast.json b/parser/testdata/03604_key_condition_set_tuple_bug/ast.json new file mode 100644 index 000000000..f47f19f4d --- /dev/null +++ b/parser/testdata/03604_key_condition_set_tuple_bug/ast.json @@ -0,0 +1,82 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery t (children 3)" + }, + { + "explain": " Identifier t" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " ColumnDeclaration a (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " ColumnDeclaration b (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " ColumnDeclaration c (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " ColumnDeclaration d (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Identifier a" + }, + { + "explain": " Identifier b" + }, + { + "explain": " Identifier c" + }, + { + "explain": " Identifier d" + }, + { + "explain": " Set" + } + ], + + "rows": 20, + + "statistics": + { + "elapsed": 0.000924457, + "rows_read": 20, + "bytes_read": 636 + } +} diff --git a/parser/testdata/03604_key_condition_set_tuple_bug/metadata.json b/parser/testdata/03604_key_condition_set_tuple_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03604_key_condition_set_tuple_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03604_key_condition_set_tuple_bug/query.sql b/parser/testdata/03604_key_condition_set_tuple_bug/query.sql new file mode 100644 index 000000000..3d0f986fb --- /dev/null +++ b/parser/testdata/03604_key_condition_set_tuple_bug/query.sql @@ -0,0 +1,9 @@ +create table t (a String, b String, c String, d String) order by (a, b, c, d) settings index_granularity=10; +insert into t select intDiv(number, 50), intDiv(number, 50), 0, number % 10 from numbers(50 + 10); +select count() from t where a = '0' and b = '0' and (c, d) in ('0', '5'); +select count() from t where a = '0' and b = '0' and (c, d) in ('0', '5') settings optimize_use_implicit_projections=0; + +-- Have some granules where all rows pass the filter. +insert into t select intDiv(number, 100), intDiv(number, 100), 0, intDiv(number, 33) from numbers(100 + 10); +select count() from t where a = '0' and b = '0' and (c, d) in ('0', '2'); +select count() from t where a = '0' and b = '0' and (c, d) in ('0', '2') settings optimize_use_implicit_projections=0; diff --git a/parser/testdata/03604_parallel_with_query_lock/ast.json b/parser/testdata/03604_parallel_with_query_lock/ast.json new file mode 100644 index 000000000..ada326eec --- /dev/null +++ b/parser/testdata/03604_parallel_with_query_lock/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001026225, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03604_parallel_with_query_lock/metadata.json b/parser/testdata/03604_parallel_with_query_lock/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03604_parallel_with_query_lock/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03604_parallel_with_query_lock/query.sql b/parser/testdata/03604_parallel_with_query_lock/query.sql new file mode 100644 index 000000000..2eeddc3d1 --- /dev/null +++ b/parser/testdata/03604_parallel_with_query_lock/query.sql @@ -0,0 +1,6 @@ +SET max_threads = 1; +SET lock_acquire_timeout = 1; + +CREATE TABLE t0 (c0 Int) ENGINE = Memory(); + +INSERT INTO TABLE t0 (c0) SELECT 1 PARALLEL WITH TRUNCATE t0; -- { serverError DEADLOCK_AVOIDED } diff --git a/parser/testdata/03604_plan_step_description_limit/ast.json b/parser/testdata/03604_plan_step_description_limit/ast.json new file mode 100644 index 000000000..e5fcfb42d --- /dev/null +++ b/parser/testdata/03604_plan_step_description_limit/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001269553, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03604_plan_step_description_limit/metadata.json b/parser/testdata/03604_plan_step_description_limit/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03604_plan_step_description_limit/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03604_plan_step_description_limit/query.sql b/parser/testdata/03604_plan_step_description_limit/query.sql new file mode 100644 index 000000000..1425b65fb --- /dev/null +++ b/parser/testdata/03604_plan_step_description_limit/query.sql @@ -0,0 +1,4 @@ +set enable_analyzer=1; + +explain description=1, optimize=0 select sum(number + 1) from numbers(10) settings query_plan_max_step_description_length=3; +explain description=1, optimize=1 select sum(number + 1) from numbers(10) settings query_plan_max_step_description_length=3; diff --git a/parser/testdata/03604_string_with_size_stream/ast.json b/parser/testdata/03604_string_with_size_stream/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03604_string_with_size_stream/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03604_string_with_size_stream/metadata.json b/parser/testdata/03604_string_with_size_stream/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03604_string_with_size_stream/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03604_string_with_size_stream/query.sql b/parser/testdata/03604_string_with_size_stream/query.sql new file mode 100644 index 000000000..0abf036de --- /dev/null +++ b/parser/testdata/03604_string_with_size_stream/query.sql @@ -0,0 +1,111 @@ +-- { echo ON } + +drop table if exists test; + +create table test (s String) engine MergeTree order by () settings serialization_info_version = 'basic', string_serialization_version = 'single_stream'; + +insert into test values ('hello world'); + +-- Old string type also supports .size subcolumn +select s.size from test; + +-- system.parts_columns table only lists physical subcolumns/substreams +select column, substreams, subcolumns.names, subcolumns.types from system.parts_columns where database = currentDatabase() and table = 'test' and active order by column; + +drop table test; + +create table test (s String) engine MergeTree order by () settings serialization_info_version = 'with_types', string_serialization_version = 'with_size_stream'; + +insert into test values ('hello world'); + +-- Verify that string type is serialized with a physical .size stream +select column, substreams, subcolumns.names, subcolumns.types from system.parts_columns where database = currentDatabase() and table = 'test' and active order by column; + +drop table test; + +-- When `serialization_info_version` is set to `single_stream`, any per-type string serialization version (`string_serialization_version`) will be ignored and reset to `DEFAULT`. +create table test (s String) engine MergeTree order by () settings serialization_info_version = 'basic', string_serialization_version = 'with_size_stream'; + +insert into test values ('hello world'); + +-- Verify that string type is not serialized with a physical .size stream +select column, substreams, subcolumns.names, subcolumns.types from system.parts_columns where database = currentDatabase() and table = 'test' and active order by column; + +drop table test; + +-- Lazy materialization test + +set query_plan_optimize_lazy_materialization=1; +set query_plan_max_limit_for_lazy_materialization=10; + +drop table if exists test_old; +drop table if exists test_new; + +create table test_old (x UInt64, y UInt64, s String) engine MergeTree order by x settings serialization_info_version = 'basic'; +create table test_new (x UInt64, y UInt64, s String) engine MergeTree order by x settings serialization_info_version = 'with_types', string_serialization_version = 'with_size_stream'; + +insert into test_old select number, number, number from numbers(10); +insert into test_new select number, number, number from numbers(10); + +select s.size, s from test_old where y > 5 order by y limit 2; +select s, s.size from test_old where y > 5 order by y limit 2; +select s.size, s from test_new where y > 5 order by y limit 2; +select s, s.size from test_new where y > 5 order by y limit 2; + +drop table test_old; +drop table test_new; + +-- Substreams cache test for Compact/Wide parts and inside Tuple + +drop table if exists test_old_compact; +drop table if exists test_old_wide; +drop table if exists test_new_compact; +drop table if exists test_new_wide; + +create table test_old_compact (s String, t Tuple(a String, b String)) engine MergeTree order by () settings serialization_info_version = 'basic', min_rows_for_wide_part = 10000000, min_bytes_for_wide_part = 10000000; +create table test_old_wide (s String, t Tuple(a String, b String)) engine MergeTree order by () settings serialization_info_version = 'basic', min_rows_for_wide_part = 1, min_bytes_for_wide_part = 1; +create table test_new_compact (s String, t Tuple(a String, b String)) engine MergeTree order by () settings serialization_info_version = 'with_types', string_serialization_version = 'with_size_stream', min_rows_for_wide_part = 10000000, min_bytes_for_wide_part = 10000000; +create table test_new_wide (s String, t Tuple(a String, b String)) engine MergeTree order by () settings serialization_info_version = 'with_types', string_serialization_version = 'with_size_stream', min_rows_for_wide_part = 1, min_bytes_for_wide_part = 1; + +insert into test_old_compact select number, (number, number) from numbers(10); +insert into test_old_wide select number, (number, number) from numbers(10); +insert into test_new_compact select number, (number, number) from numbers(10); +insert into test_new_wide select number, (number, number) from numbers(10); + +select s, s.size, t.a, t.a.size, t.b, t.b.size from test_old_compact order by all limit 2 offset 3; +select s.size, s, t.a, t.a.size, t.b, t.b.size from test_old_compact order by all limit 2 offset 3; +select s, s.size, t.a, t.a.size, t.b, t.b.size from test_old_wide order by all limit 2 offset 3; +select s.size, s, t.a, t.a.size, t.b, t.b.size from test_old_wide order by all limit 2 offset 3; + +select s, s.size, t.a, t.a.size, t.b, t.b.size from test_new_compact order by all limit 2 offset 3; +select s.size, s, t.a, t.a.size, t.b, t.b.size from test_new_compact order by all limit 2 offset 3; +select s, s.size, t.a, t.a.size, t.b, t.b.size from test_new_wide order by all limit 2 offset 3; +select s.size, s, t.a, t.a.size, t.b, t.b.size from test_new_wide order by all limit 2 offset 3; + +drop table test_old_compact; +drop table test_old_wide; +drop table test_new_compact; +drop table test_new_wide; + +-- Test empty string comparison and .size subcolumn optimization +set enable_analyzer = 1; +set optimize_empty_string_comparisons = 1; +set optimize_functions_to_subcolumns = 0; + +drop table if exists t_column_names; + +create table t_column_names (s String) engine Memory; + +insert into t_column_names values ('foo'); + +explain query tree dump_tree = 0, dump_ast = 1 select s != '' from t_column_names; + +select s != '' from t_column_names; + +set optimize_functions_to_subcolumns = 1; + +explain query tree dump_tree = 0, dump_ast = 1 select s != '' from t_column_names; + +select s != '' from t_column_names; + +drop table t_column_names; diff --git a/parser/testdata/03604_test_merge_tree_min_read_task_size_is_zero/ast.json b/parser/testdata/03604_test_merge_tree_min_read_task_size_is_zero/ast.json new file mode 100644 index 000000000..e9a1db651 --- /dev/null +++ b/parser/testdata/03604_test_merge_tree_min_read_task_size_is_zero/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery 03604_test (children 1)" + }, + { + "explain": " Identifier 03604_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001283942, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/03604_test_merge_tree_min_read_task_size_is_zero/metadata.json b/parser/testdata/03604_test_merge_tree_min_read_task_size_is_zero/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03604_test_merge_tree_min_read_task_size_is_zero/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03604_test_merge_tree_min_read_task_size_is_zero/query.sql b/parser/testdata/03604_test_merge_tree_min_read_task_size_is_zero/query.sql new file mode 100644 index 000000000..f8ed6d6f9 --- /dev/null +++ b/parser/testdata/03604_test_merge_tree_min_read_task_size_is_zero/query.sql @@ -0,0 +1,23 @@ +DROP TABLE IF EXISTS `03604_test`; + +SET allow_experimental_lightweight_update = 1; + +-- catch error BAD_ARGUMENTS +SET merge_tree_min_read_task_size = 0; -- { serverError BAD_ARGUMENTS } + +CREATE TABLE `03604_test` (c0 Int) +ENGINE = MergeTree() +ORDER BY tuple() +SETTINGS enable_block_number_column = 1, enable_block_offset_column = 1; + +INSERT INTO TABLE `03604_test` (c0) VALUES (1); + +DELETE FROM `03604_test` WHERE c0 = 2; +UPDATE `03604_test` SET c0 = 3 WHERE TRUE; + +-- catch error BAD_ARGUMENTS +SELECT count() +FROM `03604_test` +SETTINGS apply_mutations_on_fly = 1, merge_tree_min_read_task_size = 0; -- {clientError BAD_ARGUMENTS} + +DROP TABLE `03604_test`; \ No newline at end of file diff --git a/parser/testdata/03604_to_date_casts/ast.json b/parser/testdata/03604_to_date_casts/ast.json new file mode 100644 index 000000000..066a177f3 --- /dev/null +++ b/parser/testdata/03604_to_date_casts/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001363661, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03604_to_date_casts/metadata.json b/parser/testdata/03604_to_date_casts/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03604_to_date_casts/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03604_to_date_casts/query.sql b/parser/testdata/03604_to_date_casts/query.sql new file mode 100644 index 000000000..682b4aae8 --- /dev/null +++ b/parser/testdata/03604_to_date_casts/query.sql @@ -0,0 +1,111 @@ +SET session_timezone='UTC'; + +-- toDate32 function +CREATE TABLE test_date32_casts (from String, val Date32) Engine=Memory; + +SELECT 'check date32 interpreted as seconds'; +INSERT INTO test_date32_casts VALUES + ('Int32', 120530::Int32), + ('UInt32', 120530::UInt32), + ('Int64', 120530::Int64), + ('UInt64', 120530::UInt64), + ('Int128', 120530::Int128), + ('UInt128', 120530::UInt128), + ('Int256', 120530::Int256), + ('UInt256', 120530::UInt256), + ('Float32', 120530::Float32), + ('Float64', 120530::Float64), + ('BFloat16', 121344::BFloat16); -- BFloat16 can't represent 120530 exactly, but it's still the same date +SELECT from, val, val::Int32 FROM test_date32_casts ORDER BY ALL; + +TRUNCATE TABLE test_date32_casts; + +SELECT 'check date32 interpreted as days'; +INSERT INTO test_date32_casts VALUES + ('Int32', 7::Int32), + ('UInt32', 7::UInt32), + ('Int64', 7::Int64), + ('UInt64', 7::UInt64), + ('Int128', 7::Int128), + ('UInt128', 7::UInt128), + ('Int256', 7::Int256), + ('UInt256', 7::UInt256), + ('Float32', 7::Float32), + ('Float64', 7::Float64), + ('BFloat16', 7::BFloat16); +SELECT from, val, val::Int32 FROM test_date32_casts ORDER BY ALL; + +TRUNCATE TABLE test_date32_casts; + +SELECT 'check date32 negative limit'; +INSERT INTO test_date32_casts VALUES + ('Int16', -30000::Int16), + ('Int32', -30000::Int32), + ('Int64', -30000::Int64), + ('Int128', -30000::Int128), + ('Int256', -30000::Int256), + ('Float32', -30000::Float32), + ('Float64', -30000::Float64), + ('BFloat16', -30000::BFloat16); +SELECT from, val, val::Int32 FROM test_date32_casts ORDER BY ALL; + +-- toDate function +CREATE TABLE test_date_casts (from String, val Date) Engine=Memory; + +SELECT 'check date interpreted as seconds'; +INSERT INTO test_date_casts VALUES + ('Int32', 86400::Int32), + ('UInt32', 86400::UInt32), + ('Int64', 86400::Int64), + ('UInt64', 86400::UInt64), + ('Int128', 86400::Int128), + ('UInt128', 86400::UInt128), + ('Int256', 86400::Int256), + ('UInt256', 86400::UInt256), + ('Float32', 86400::Float32), + ('Float64', 86400::Float64), + ('BFloat16', 86800::BFloat16); -- BFloat16 can't represent 86400 exactly, but it's still the same date +SELECT from, val, val::UInt16 FROM test_date_casts ORDER BY ALL; + +TRUNCATE TABLE test_date_casts; + +SELECT 'check date interpreted as days'; +INSERT INTO test_date_casts VALUES + ('Int32', 7::Int32), + ('UInt32', 7::UInt32), + ('Int64', 7::Int64), + ('UInt64', 7::UInt64), + ('Int128', 7::Int128), + ('UInt128', 7::UInt128), + ('Int256', 7::Int256), + ('UInt256', 7::UInt256), + ('Float32', 7::Float32), + ('Float64', 7::Float64), + ('BFloat16', 7::BFloat16); +SELECT from, val, val::UInt16 FROM test_date_casts ORDER BY ALL; + +TRUNCATE TABLE test_date_casts; + +SELECT 'check date negative use zero'; +INSERT INTO test_date_casts VALUES + ('Int16', -10::Int8), + ('Int16', -10::Int16), + ('Int32', -10::Int32), + ('Int64', -10::Int64), + ('Int128', -10::Int128), + ('Int256', -10::Int256), + ('Float32', -10::Float32), + ('Float64', -10::Float64), + ('BFloat16', -10::BFloat16); +SELECT from, val, val::UInt16 FROM test_date_casts ORDER BY ALL; + +-- Fuzzed +SELECT 'fuzz_71531'; +CREATE TABLE fuzz_71531 (c0 Date32 DEFAULT -30000) ENGINE = Memory(); +INSERT INTO TABLE fuzz_71531 (c0) VALUES (DEFAULT), ('2000-01-01'); +SELECT c0 FROM fuzz_71531 ORDER BY c0 ASC; + +SELECT 'fuzz_86799'; +CREATE TABLE fuzz_86799 (c0 Date32) ENGINE = Memory; +INSERT INTO TABLE fuzz_86799 (c0) VALUES ('2000-01-01'), (799542731168215080 - 10::UInt128); +SELECT c0 FROM fuzz_86799 ORDER BY c0; diff --git a/parser/testdata/03605_dynamic_to_nullable_low_cardinality_bug/ast.json b/parser/testdata/03605_dynamic_to_nullable_low_cardinality_bug/ast.json new file mode 100644 index 000000000..afe09e505 --- /dev/null +++ b/parser/testdata/03605_dynamic_to_nullable_low_cardinality_bug/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001242766, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03605_dynamic_to_nullable_low_cardinality_bug/metadata.json b/parser/testdata/03605_dynamic_to_nullable_low_cardinality_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03605_dynamic_to_nullable_low_cardinality_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03605_dynamic_to_nullable_low_cardinality_bug/query.sql b/parser/testdata/03605_dynamic_to_nullable_low_cardinality_bug/query.sql new file mode 100644 index 000000000..b63981f24 --- /dev/null +++ b/parser/testdata/03605_dynamic_to_nullable_low_cardinality_bug/query.sql @@ -0,0 +1,15 @@ +SET allow_suspicious_low_cardinality_types = 1, allow_experimental_dynamic_type = 1, allow_dynamic_type_in_join_keys=1; + +DROP TABLE IF EXISTS t0; +DROP TABLE IF EXISTS t1; +CREATE TABLE t0 (c0 Dynamic) ENGINE = MergeTree() ORDER BY tuple(); +CREATE TABLE t1 (c0 LowCardinality(Nullable(Int))) ENGINE = MergeTree() ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 0; + +INSERT INTO TABLE t0 (c0) VALUES (1::LowCardinality(Int)); +INSERT INTO TABLE t1 (c0) VALUES (1); + +SELECT 1 FROM t0 JOIN t1 ON t0.c0 = t1.c0; + +DROP TABLE t0; +DROP TABLE t1; + diff --git a/parser/testdata/03606_nullable_json_group_by/ast.json b/parser/testdata/03606_nullable_json_group_by/ast.json new file mode 100644 index 000000000..ee8d22a20 --- /dev/null +++ b/parser/testdata/03606_nullable_json_group_by/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '{\"a\" : 42}'" + }, + { + "explain": " Literal 'Nullable(JSON)'" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001195068, + "rows_read": 10, + "bytes_read": 365 + } +} diff --git a/parser/testdata/03606_nullable_json_group_by/metadata.json b/parser/testdata/03606_nullable_json_group_by/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03606_nullable_json_group_by/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03606_nullable_json_group_by/query.sql b/parser/testdata/03606_nullable_json_group_by/query.sql new file mode 100644 index 000000000..addd59473 --- /dev/null +++ b/parser/testdata/03606_nullable_json_group_by/query.sql @@ -0,0 +1,6 @@ +select '{"a" : 42}'::Nullable(JSON) group by 1; +select materialize('{"a" : 42}')::Nullable(JSON) group by 1; +select null::Nullable(JSON) group by 1; +select materialize(null)::Nullable(JSON) group by 1; +select (number % 2 ? null : '{"a" : 42}')::Nullable(JSON) as a from numbers(4) group by 1 order by a; + diff --git a/parser/testdata/03610_disjunctions_pushdown_optimization/ast.json b/parser/testdata/03610_disjunctions_pushdown_optimization/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03610_disjunctions_pushdown_optimization/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03610_disjunctions_pushdown_optimization/metadata.json b/parser/testdata/03610_disjunctions_pushdown_optimization/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03610_disjunctions_pushdown_optimization/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03610_disjunctions_pushdown_optimization/query.sql b/parser/testdata/03610_disjunctions_pushdown_optimization/query.sql new file mode 100644 index 000000000..8887fd814 --- /dev/null +++ b/parser/testdata/03610_disjunctions_pushdown_optimization/query.sql @@ -0,0 +1,290 @@ +-- Test: Disjunctions pushdown into JOIN branches +-- This test exercises the optimizer controlled by the setting `use_join_disjunctions_push_down`. +-- It checks that disjunctions (OR) over conjunctions can be split and pushed as per-side +-- pre-join filters without changing query results, and that when the optimization is disabled +-- such pre-join filters are not produced. It also validates join-order-dependent pushdown +SET enable_analyzer=1; + +DROP TABLE IF EXISTS tp1; +DROP TABLE IF EXISTS tp2; + +CREATE TABLE tp1 (k Int32, a Int32) ENGINE = MergeTree() ORDER BY k; +CREATE TABLE tp2 (k Int32, x Int32) ENGINE = MergeTree() ORDER BY k; + +INSERT INTO tp1 VALUES (1,10),(2,20),(3,30),(4,40),(5,50),(6,60); +INSERT INTO tp2 VALUES (1,100),(2,100),(3,200),(4,200),(5,300),(6,300); + +-- We need to make sure that query plan creates the JOIN filter only with the optimization enabled, and WHERE filter in both cases + +---------- CASE A ---------- + +SELECT '--- CASE A: plan (enabled) ---'; +SET use_join_disjunctions_push_down = 1; +SELECT REGEXP_REPLACE(trimLeft(explain), '__set_Int32_\\d+_\\d+', '__set_Int32_UNIQ_ID') +FROM ( + EXPLAIN actions=1 + SELECT t1.k, t1.a, t2.x + FROM tp1 AS t1 + JOIN tp2 AS t2 ON t1.k = t2.k + WHERE (t1.k IN (1,2) AND t2.x = 100) OR (t1.k IN (3,4) AND t2.x = 200) + ORDER BY t1.k + ) + +WHERE explain ILIKE '%Filter column: %' SETTINGS enable_parallel_replicas = 0 +FORMAT TSV; + +SELECT '--- CASE A: plan (disabled) ---'; +SET use_join_disjunctions_push_down = 0; +SELECT REGEXP_REPLACE(trimLeft(explain), '__set_Int32_\\d+_\\d+', '__set_Int32_UNIQ_ID') +FROM ( + EXPLAIN actions=1 + SELECT t1.k, t1.a, t2.x + FROM tp1 AS t1 + JOIN tp2 AS t2 ON t1.k = t2.k + WHERE (t1.k IN (1,2) AND t2.x = 100) OR (t1.k IN (3,4) AND t2.x = 200) + ORDER BY t1.k + ) + +WHERE explain ILIKE '%Filter column: %' SETTINGS enable_parallel_replicas = 0 +FORMAT TSV; + +-- Results identical in both modes (k in {1,2,3,4}) +SELECT '--- CASE A: result (enabled) ---'; +SET use_join_disjunctions_push_down = 1; +SELECT t1.k, t1.a, t2.x +FROM tp1 AS t1 +JOIN tp2 AS t2 ON t1.k = t2.k +WHERE (t1.k IN (1,2) AND t2.x = 100) OR (t1.k IN (3,4) AND t2.x = 200) +ORDER BY t1.k; + +SELECT '--- CASE A: result (disabled) ---'; +SET use_join_disjunctions_push_down = 0; +SELECT t1.k, t1.a, t2.x +FROM tp1 AS t1 +JOIN tp2 AS t2 ON t1.k = t2.k +WHERE (t1.k IN (1,2) AND t2.x = 100) OR (t1.k IN (3,4) AND t2.x = 200) +ORDER BY t1.k; + +---------- CASE B ---------- + +SELECT '--- CASE B: plan (enabled) ---'; +SET use_join_disjunctions_push_down = 1; +SELECT REGEXP_REPLACE(trimLeft(explain), '__set_Int32_\\d+_\\d+', '__set_Int32_UNIQ_ID') +FROM ( + EXPLAIN actions=1 + SELECT t1.k, t1.a, t2.x + FROM tp1 AS t1 + JOIN tp2 AS t2 ON t1.k = t2.k + WHERE (t2.x = 100) OR (t2.x = 200) + ORDER BY t1.k + ) + +WHERE explain ILIKE '%Filter column: %' SETTINGS enable_parallel_replicas = 0 +FORMAT TSV; + +SELECT '--- CASE B: plan (disabled) ---'; +SET use_join_disjunctions_push_down = 0; +SELECT REGEXP_REPLACE(trimLeft(explain), '__set_Int32_\\d+_\\d+', '__set_Int32_UNIQ_ID') +FROM ( + EXPLAIN actions=1 + SELECT t1.k, t1.a, t2.x + FROM tp1 AS t1 + JOIN tp2 AS t2 ON t1.k = t2.k + WHERE (t2.x = 100) OR (t2.x = 200) + ORDER BY t1.k + ) + +WHERE explain ILIKE '%Filter column: %' SETTINGS enable_parallel_replicas = 0 +FORMAT TSV; + +-- Results identical in both modes (k in {1,2,3,4}) +SELECT '--- CASE B: result (enabled) ---'; +SET use_join_disjunctions_push_down = 1; +SELECT t1.k, t1.a, t2.x +FROM tp1 AS t1 +JOIN tp2 AS t2 ON t1.k = t2.k +WHERE (t2.x = 100) OR (t2.x = 200) +ORDER BY t1.k; + +SELECT '--- CASE B: result (disabled) ---'; +SET use_join_disjunctions_push_down = 0; +SELECT t1.k, t1.a, t2.x +FROM tp1 AS t1 +JOIN tp2 AS t2 ON t1.k = t2.k +WHERE (t2.x = 100) OR (t2.x = 200) +ORDER BY t1.k; + +---------- CASE C ---------- + +SELECT '--- CASE C: plan (enabled) ---'; +SET use_join_disjunctions_push_down = 1; +SELECT REGEXP_REPLACE(trimLeft(explain), '__set_Int32_\\d+_\\d+', '__set_Int32_UNIQ_ID') +FROM ( + EXPLAIN actions=1 + SELECT t1.k, t1.a, t2.x + FROM tp1 AS t1 + JOIN tp2 AS t2 ON t1.k = t2.k + WHERE (t1.k IN (1,2)) OR (t1.k IN (3,4)) + ORDER BY t1.k + ) + +WHERE explain ILIKE '%Filter column: %' SETTINGS enable_parallel_replicas = 0 +FORMAT TSV; + +SELECT '--- CASE C: plan (disabled) ---'; +SET use_join_disjunctions_push_down = 0; +SELECT REGEXP_REPLACE(trimLeft(explain), '__set_Int32_\\d+_\\d+', '__set_Int32_UNIQ_ID') +FROM ( + EXPLAIN actions=1 + SELECT t1.k, t1.a, t2.x + FROM tp1 AS t1 + JOIN tp2 AS t2 ON t1.k = t2.k + WHERE (t1.k IN (1,2)) OR (t1.k IN (3,4)) + ORDER BY t1.k + ) + +WHERE explain ILIKE '%Filter column: %' SETTINGS enable_parallel_replicas = 0 +FORMAT TSV; + +-- Results identical in both modes (k in {1,2,3,4}) +SELECT '--- CASE C: result (enabled) ---'; +SET use_join_disjunctions_push_down = 1; +SELECT t1.k, t1.a, t2.x +FROM tp1 AS t1 +JOIN tp2 AS t2 ON t1.k = t2.k +WHERE (t1.k IN (1,2)) OR (t1.k IN (3,4)) +ORDER BY t1.k; + +SELECT '--- CASE C: result (disabled) ---'; +SET use_join_disjunctions_push_down = 0; +SELECT t1.k, t1.a, t2.x +FROM tp1 AS t1 +JOIN tp2 AS t2 ON t1.k = t2.k +WHERE (t1.k IN (1,2)) OR (t1.k IN (3,4)) +ORDER BY t1.k; + +DROP TABLE tp1; +DROP TABLE tp2; + +---------- CASE D ---------- + +DROP TABLE IF EXISTS table1; +DROP TABLE IF EXISTS table2; + +CREATE TABLE table1 (a UInt32, b String) ENGINE = Memory; +CREATE TABLE table2 (c UInt32, d String) ENGINE = Memory; + +INSERT INTO table1 VALUES (5, 'a5'), (6, 'a6'), (7, 'a7'), (10, 'a10'); +INSERT INTO table2 VALUES (5, 'b5'), (6, 'b6'), (7, 'b7'), (10, 'b10'); + +SELECT '--- Case D: plan (enabled) ---'; +SET use_join_disjunctions_push_down = 1; +SELECT REGEXP_REPLACE(trimLeft(explain), '__set_Int32_\\d+_\\d+', '__set_Int32_UNIQ_ID') +FROM ( + EXPLAIN actions = 1 + SELECT * + FROM table1 + INNER JOIN table2 ON b = d + WHERE (a > 0) AND (c > 0) + AND ( ((a > 5) AND (c < 10)) OR ((a > 6) AND (c < 11)) ) + ORDER BY a ASC, c ASC + ) + +WHERE explain ILIKE '%Filter column: %' SETTINGS enable_parallel_replicas = 0 +FORMAT TSV; + +SELECT '--- Case D: plan (disabled) ---'; +SET use_join_disjunctions_push_down = 0; +SELECT REGEXP_REPLACE(trimLeft(explain), '__set_Int32_\\d+_\\d+', '__set_Int32_UNIQ_ID') +FROM ( + EXPLAIN actions = 1 + SELECT * + FROM table1 + INNER JOIN table2 ON b = d + WHERE (a > 0) AND (c > 0) + AND ( ((a > 5) AND (c < 10)) OR ((a > 6) AND (c < 11)) ) + ORDER BY a ASC, c ASC + ) + +WHERE explain ILIKE '%Filter column: %' SETTINGS enable_parallel_replicas = 0 +FORMAT TSV; + +SELECT '--- Case D: result (enabled) ---'; +SET use_join_disjunctions_push_down = 1; +SELECT * +FROM table1 +INNER JOIN table2 + ON (a = c) +WHERE (a > 0) AND (c > 0) + AND ( ((a > 5) AND (c < 10)) OR ((a > 6) AND (c < 11)) ) +ORDER BY a, c +FORMAT TSV; + +SELECT '--- Case D: result (disabled) ---'; +SET use_join_disjunctions_push_down = 0; +SELECT * +FROM table1 +INNER JOIN table2 + ON (a = c) +WHERE (a > 0) AND (c > 0) + AND ( ((a > 5) AND (c < 10)) OR ((a > 6) AND (c < 11)) ) +ORDER BY a, c +FORMAT TSV; + +DROP TABLE table1; +DROP TABLE table2; + +-- CASE E: every OR-branch contributes at least one pushable atom for that side +SELECT '--- Case E: result (enabled) ---'; +SET use_join_disjunctions_push_down = 1; +SELECT n1.number, n2.number +FROM numbers(6) AS n1, numbers(6) AS n2 +WHERE ((n1.number = 1 AND n2.number = 2) OR (n1.number = 3 AND n2.number = 4) OR (n1.number = 5)) +ORDER BY n1.number, n2.number +FORMAT TSV; + +SELECT '--- Case E: result (disabled) ---'; +SET use_join_disjunctions_push_down = 0; +SELECT n1.number, n2.number +FROM numbers(6) AS n1, numbers(6) AS n2 +WHERE ((n1.number = 1 AND n2.number = 2) OR (n1.number = 3 AND n2.number = 4) OR (n1.number = 5)) +ORDER BY n1.number, n2.number +FORMAT TSV; + +SELECT '--- Case F: plan (enabled) ---'; +SET use_join_disjunctions_push_down = 1; +SELECT REGEXP_REPLACE(trimLeft(explain), '__set_Int32_\\d+_\\d+', '__set_Int32_UNIQ_ID') +FROM ( + SELECT explain + FROM ( + EXPLAIN actions = 1 + SELECT + n1.number, + n2.number, + n3.number + FROM numbers(3) AS n2, numbers(3) AS n3, numbers(3) AS n1 + WHERE ((n1.number = 1) AND ((n2.number + n3.number) = 3)) + OR ((n1.number = 2) AND ((n2.number + n3.number) = 2)) + ) +) +WHERE explain ILIKE '%Filter column: %' SETTINGS enable_parallel_replicas = 0 +FORMAT TSV; + +SELECT '--- Case F: plan (disabled) ---'; +SET use_join_disjunctions_push_down = 0; +SELECT REGEXP_REPLACE(trimLeft(explain), '__set_Int32_\\d+_\\d+', '__set_Int32_UNIQ_ID') +FROM ( + SELECT explain + FROM ( + EXPLAIN actions = 1 + SELECT + n1.number, + n2.number, + n3.number + FROM numbers(3) AS n2, numbers(3) AS n3, numbers(3) AS n1 + WHERE ((n1.number = 1) AND ((n2.number + n3.number) = 3)) + OR ((n1.number = 2) AND ((n2.number + n3.number) = 2)) + ) +) +WHERE explain ILIKE '%Filter column: %' SETTINGS enable_parallel_replicas = 0 +FORMAT TSV; diff --git a/parser/testdata/03610_remote_queries_with_describe_compact_output/ast.json b/parser/testdata/03610_remote_queries_with_describe_compact_output/ast.json new file mode 100644 index 000000000..160fa07a7 --- /dev/null +++ b/parser/testdata/03610_remote_queries_with_describe_compact_output/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function remote (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '127.2'" + }, + { + "explain": " Identifier system.one" + }, + { + "explain": " Set" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001032439, + "rows_read": 13, + "bytes_read": 470 + } +} diff --git a/parser/testdata/03610_remote_queries_with_describe_compact_output/metadata.json b/parser/testdata/03610_remote_queries_with_describe_compact_output/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03610_remote_queries_with_describe_compact_output/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03610_remote_queries_with_describe_compact_output/query.sql b/parser/testdata/03610_remote_queries_with_describe_compact_output/query.sql new file mode 100644 index 000000000..675e40e91 --- /dev/null +++ b/parser/testdata/03610_remote_queries_with_describe_compact_output/query.sql @@ -0,0 +1 @@ +select * from remote('127.2', system.one) settings describe_compact_output=1; diff --git a/parser/testdata/03611_cte_deterministic/ast.json b/parser/testdata/03611_cte_deterministic/ast.json new file mode 100644 index 000000000..6c2429931 --- /dev/null +++ b/parser/testdata/03611_cte_deterministic/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001040424, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03611_cte_deterministic/metadata.json b/parser/testdata/03611_cte_deterministic/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03611_cte_deterministic/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03611_cte_deterministic/query.sql b/parser/testdata/03611_cte_deterministic/query.sql new file mode 100644 index 000000000..ce8ed7342 --- /dev/null +++ b/parser/testdata/03611_cte_deterministic/query.sql @@ -0,0 +1,5 @@ +set enable_analyzer = 1; + +with (select randConstant()) as a select a = a; +with (select now() + sleep(1)) as a select a = a; +with (select randConstant()) as b select b = b, a = b, `a=a` from (with (select randConstant()) as a select a, a = a as `a=a`); diff --git a/parser/testdata/03611_null_safe_comparsion/ast.json b/parser/testdata/03611_null_safe_comparsion/ast.json new file mode 100644 index 000000000..c86c7b2dc --- /dev/null +++ b/parser/testdata/03611_null_safe_comparsion/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001185884, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03611_null_safe_comparsion/metadata.json b/parser/testdata/03611_null_safe_comparsion/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03611_null_safe_comparsion/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03611_null_safe_comparsion/query.sql b/parser/testdata/03611_null_safe_comparsion/query.sql new file mode 100644 index 000000000..c566ca02f --- /dev/null +++ b/parser/testdata/03611_null_safe_comparsion/query.sql @@ -0,0 +1,698 @@ +SET enable_analyzer = 1; +SELECT '====================================================================='; +SELECT 'Test : ClickHouse NULL-safe comparison'; +SELECT '(1) <=> (IS NOT DISTINCT FROM)'; +SELECT '(2) IS DISTINCT FROM'; +SELECT '====================================================================='; +SELECT 'Purpose:'; +SELECT '1. Validate behavior of <=> and IS DISTINCT FROM across a wide range of ClickHouse data types and SQL clauses.'; +SELECT '2. Cover numeric, floating, string, enum, date/time, complex types, and NULL / NaN edge cases.'; +SELECT '3. test null-safe comparison in SELECT clause, WHERE, ORDER BY, GROUP BY, HAVING, JOIN, CASE/IF, WINDOW, and subqueries.'; +SELECT '====================================================================='; + +SELECT '0. Main table with many types'; +DROP TABLE IF EXISTS 03611_nscmp_tbl; +CREATE TABLE 03611_nscmp_tbl +( + `key` Int64, + `c_int8` Nullable(Int8), + `c_int16` Nullable(Int16), + `c_int32` Nullable(Int32), + `c_int64` Nullable(Int64), + `c_uint8` Nullable(UInt8), + `c_uint16` Nullable(UInt16), + `c_uint32` Nullable(UInt32), + `c_uint64` Nullable(UInt64), + `c_float32` Nullable(Float32), + `c_float64` Nullable(Float64), + `c_decimal` Nullable(Decimal(18, 4)), + `c_date` Nullable(Date), + `c_datetime` Nullable(DateTime), + `c_dt64` Nullable(DateTime64(3)), + `c_string` Nullable(String), + `c_fstring` Nullable(FixedString(4)), + `c_enum8` Nullable(Enum8('a' = 1, 'b' = 2, '' = 0)), + `c_enum16` Nullable(Enum16('x' = 100, 'y' = 200, '' = 0)), + `c_array` Array(Nullable(Int32)), + `c_tuple` Tuple(Nullable(Int32),Nullable(String)), + `c_map` Map(String, Nullable(Int32)), + `c_nullable` Nullable(Int32), + `c_uuid` Nullable(UUID), + `c_ipv4` Nullable(IPv4), + `c_ipv6` Nullable(IPv6), + `c_json` Nullable(JSON), + `c_nested` Nested( + id Nullable(Int32), + value Nullable(String) + ), + `c_variant` Variant(UInt64, String, Array(UInt64)), + `c_dynamic` Dynamic +) +ENGINE = MergeTree +ORDER BY key; + +SELECT 'Insert rows containing:'; +SELECT '• normal values'; +SELECT '• NULLs'; +SELECT '• NaN'; +SELECT '• edge numeric boundaries'; + +INSERT INTO 03611_nscmp_tbl VALUES +( + 1, + 1,1,1,1, 1,1,1,1, 1.0,1.0, 123.4567, + '2025-09-22', '2025-09-22 12:34:56', '2025-09-22 12:34:56.789', + 'abc','abcd','a','x', + [1,2,3], + (1,'t'), + map('k1',1), + 100, + generateUUIDv4(), + '127.0.0.1', + '::1', + '{"k":"v"}', + [1], -- c_nested.id + ['test nested'], -- c_nested.value + 'test variant', -- c_variant + 'test dynamic' -- c_dynamic +), +( + 2, + NULL,NULL,NULL,NULL, NULL,NULL,NULL,NULL, NULL,NULL, NULL, + NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, -- c_nested.id NULL + NULL, -- c_nested.value NULL + NULL, -- c_variant + NULL -- c_dynamic +), +( + 3, + 2,2,2,2, 2,2,2,2, nan,nan, 999.9999, + '2025-09-23', '2025-09-23 01:02:03', '2025-09-23 01:02:03.321', + 'xyz','zzzz','b','y', + [4,5], + (2,'u'), + map('k2',2), + 200, + generateUUIDv4(), + '10.0.0.1', + '2001:db8::1', + '{"k2":"v2"}', + [201], -- c_nested.id + ['nested_val'], -- c_nested.value + 24, -- c_variant + 12 -- c_dynamic +), +( + 4, + -5,-5,-5,-5, 255,65535,4294967295,18446744073709551615, -1,0.0, -123.0001, + '1970-01-01', '1970-01-01 00:00:00', '1970-01-01 00:00:00.000', + '', 'aaaa','a','x', + [],(0,''), + map('edge',-1), + NULL, + generateUUIDv4(), + '0.0.0.0', + '::', + '{}', + [], -- c_nested.id empty + [], -- c_nested.value empty + [1, 2, 3], -- c_variant + [2, 3, 4] -- c_dynamic +); + + +SELECT '1.1 Basic NULL-safe equality and distinctness'; +SELECT 'Compare same column to itself'; +SELECT + -- Integers + c_int8 <=> c_int8 AS c_int8_self_eq, + c_int8 IS DISTINCT FROM c_int8 AS c_int8_self_distinct, + + c_int16 <=> c_int16 AS c_int16_self_eq, + c_int16 IS DISTINCT FROM c_int16 AS c_int16_self_distinct, + + c_int32 <=> c_int32 AS c_int32_self_eq, + c_int32 IS DISTINCT FROM c_int32 AS c_int32_self_distinct, + + c_int64 <=> c_int64 AS c_int64_self_eq, + c_int64 IS DISTINCT FROM c_int64 AS c_int64_self_distinct, + + c_uint8 <=> c_uint8 AS c_uint8_self_eq, + c_uint8 IS DISTINCT FROM c_uint8 AS c_uint8_self_distinct, + + c_uint16 <=> c_uint16 AS c_uint16_self_eq, + c_uint16 IS DISTINCT FROM c_uint16 AS c_uint16_self_distinct, + + c_uint32 <=> c_uint32 AS c_uint32_self_eq, + c_uint32 IS DISTINCT FROM c_uint32 AS c_uint32_self_distinct, + + c_uint64 <=> c_uint64 AS c_uint64_self_eq, + c_uint64 IS DISTINCT FROM c_uint64 AS c_uint64_self_distinct, + + -- Floating-point + c_float32 <=> c_float32 AS c_float32_self_eq, + c_float32 IS DISTINCT FROM c_float32 AS c_float32_self_distinct, + + c_float64 <=> c_float64 AS c_float64_self_eq, + c_float64 IS DISTINCT FROM c_float64 AS c_float64_self_distinct, + + -- Decimal + c_decimal <=> c_decimal AS c_decimal_self_eq, + c_decimal IS DISTINCT FROM c_decimal AS c_decimal_self_distinct, + + -- Date / DateTime + c_date <=> c_date AS c_date_self_eq, + c_date IS DISTINCT FROM c_date AS c_date_self_distinct, + + c_datetime <=> c_datetime AS c_datetime_self_eq, + c_datetime IS DISTINCT FROM c_datetime AS c_datetime_self_distinct, + + c_dt64 <=> c_dt64 AS c_dt64_self_eq, + c_dt64 IS DISTINCT FROM c_dt64 AS c_dt64_self_distinct, + + -- Strings + c_string <=> c_string AS c_string_self_eq, + c_string IS DISTINCT FROM c_string AS c_string_self_distinct, + + c_fstring <=> c_fstring AS c_fstring_self_eq, + c_fstring IS DISTINCT FROM c_fstring AS c_fstring_self_distinct, + + -- Enums + c_enum8 <=> c_enum8 AS c_enum8_self_eq, + c_enum8 IS DISTINCT FROM c_enum8 AS c_enum8_self_distinct, + + c_enum16 <=> c_enum16 AS c_enum16_self_eq, + c_enum16 IS DISTINCT FROM c_enum16 AS c_enum16_self_distinct, + + -- Arrays + c_array <=> c_array AS c_array_self_eq, + c_array IS DISTINCT FROM c_array AS c_array_self_distinct, + + -- Tuple + c_tuple <=> c_tuple AS c_tuple_self_eq, + c_tuple IS DISTINCT FROM c_tuple AS c_tuple_self_distinct, + + -- Map + c_map <=> c_map AS c_map_self_eq, + c_map IS DISTINCT FROM c_map AS c_map_self_distinct, + + -- Nullable basic + c_nullable <=> c_nullable AS c_nullable_self_eq, + c_nullable IS DISTINCT FROM c_nullable AS c_nullable_self_distinct, + + -- UUID / IP + c_uuid <=> c_uuid AS c_uuid_self_eq, + c_uuid IS DISTINCT FROM c_uuid AS c_uuid_self_distinct, + + c_ipv4 <=> c_ipv4 AS c_ipv4_self_eq, + c_ipv4 IS DISTINCT FROM c_ipv4 AS c_ipv4_self_distinct, + + c_ipv6 <=> c_ipv6 AS c_ipv6_self_eq, + c_ipv6 IS DISTINCT FROM c_ipv6 AS c_ipv6_self_distinct, + + -- JSON + c_json <=> c_json AS c_json_self_eq, + c_json IS DISTINCT FROM c_json AS c_json_self_distinct, + + -- Nested + c_nested.id <=> c_nested.id AS c_nested_id_self_eq, + c_nested.id IS DISTINCT FROM c_nested.id AS c_nested_id_self_distinct, + c_nested.value <=> c_nested.value AS c_nested_value_self_eq, + c_nested.value IS DISTINCT FROM c_nested.value AS c_nested_value_self_distinct, + + -- Variant + c_variant <=> c_variant AS c_variant_self_eq, + c_variant IS DISTINCT FROM c_variant AS c_variant_self_distinct, + + -- Dynamic + c_dynamic <=> c_dynamic AS c_dynamic_self_eq, + c_dynamic IS DISTINCT FROM c_dynamic AS c_dynamic_self_distinct + +FROM 03611_nscmp_tbl +ORDER BY key; + + +SELECT '1.2 NaN behavior'; +SELECT 'NaN <=> NaN = 0, NaN <=> number = 0'; +SELECT 'NaN is distinct from NaN = 1, NaN is distinct from number = 1'; +SELECT + c_float32, + c_float32 <=> nan AS nan_cmp, + c_float32 IS DISTINCT FROM nan AS nan_distinct, + c_float32 <=> 1.0 AS cmp_with_one, + c_float32 IS DISTINCT FROM 1.0 AS cmp_with_one_distinct +FROM 03611_nscmp_tbl +ORDER BY key; + +-- 1.3 String, FixedString, Enum +SELECT '1.3 String, FixedString, Enum'; +SELECT + c_string <=> 'abc' AS string_vs_literal, + c_string IS DISTINCT FROM 'abc' AS string_vs_literal_distinct, + c_string <=> NULL AS string_vs_null, + c_string IS DISTINCT FROM NULL AS string_vs_null_distinct, + + c_fstring <=> 'abcd' AS fstring_vs_literal, + c_fstring IS DISTINCT FROM 'abcd' AS fstring_vs_literal_distinct, + c_fstring <=> NULL AS fstring_vs_null, + c_fstring IS DISTINCT FROM NULL AS fstring_vs_null_distinct, + + c_enum8 <=> CAST('a' AS Enum8('a'=1,'b'=2)) AS enum8_check, + c_enum8 IS DISTINCT FROM CAST('a' AS Enum8('a'=1,'b'=2)) AS enum8_check_distinct, + c_enum8 <=> NULL AS enum8_vs_null, + c_enum8 IS DISTINCT FROM NULL AS enum8_vs_null_distinct, + + c_enum16 <=> CAST('x' AS Enum16('x'=100,'y'=200)) AS enum16_check, + c_enum16 IS DISTINCT FROM CAST('x' AS Enum16('x'=100,'y'=200)) AS enum16_check_distinct, + c_enum16 <=> NULL AS enum16_vs_null, + c_enum16 IS DISTINCT FROM NULL AS enum16_vs_null_distinct +FROM 03611_nscmp_tbl +ORDER BY key; + +-- 1.4 Date and time +SELECT '1.4 Date and time'; +SELECT + c_date <=> toDate('2025-09-22') AS date_check, + c_date IS DISTINCT FROM toDate('2025-09-22') AS date_check_distinct, + c_date <=> NULL AS date_vs_null, + NULL <=> c_date AS null_vs_date, + c_date IS DISTINCT FROM NULL AS date_vs_null_distinct, + NULL IS DISTINCT FROM c_date AS null_vs_date_distinct, + + c_datetime <=> toDateTime('2025-09-22 12:34:56') AS datetime_check, + c_datetime IS DISTINCT FROM toDateTime('2025-09-22 12:34:56') AS datetime_check_distinct, + c_datetime <=> NULL AS datetime_vs_null, + NULL <=> c_datetime AS null_vs_datetime, + c_datetime IS DISTINCT FROM NULL AS datetime_vs_null_distinct, + NULL IS DISTINCT FROM c_datetime AS null_vs_datetime_distinct, + + c_dt64 <=> toDateTime64('2025-09-22 12:34:56.789',3) AS dt64_check, + c_dt64 IS DISTINCT FROM toDateTime64('2025-09-22 12:34:56.789',3) AS dt64_check_distinct, + c_dt64 <=> NULL AS dt64_vs_null, + NULL <=> c_dt64 AS null_vs_c_dt64, + c_dt64 IS DISTINCT FROM NULL AS dt64_vs_null_distinct, + NULL IS DISTINCT FROM c_dt64 AS null_vs_dt64_distinct +FROM 03611_nscmp_tbl +ORDER BY key; + +-- 1.5 Complex types: Array / Tuple / Map / Nullable / Variant / Dynamic / JSON +SELECT '1.5 Complex types: Array / Tuple / Map / Variant / Dynamic / JSON'; + +SELECT + -- ===================== + -- Array cmp + -- ===================== + c_array <=> [1, 2, 3] AS array_check, + c_array IS DISTINCT FROM [1, 2, 3] AS array_check_distinct, + c_array <=> [1, 2, NULL] AS array_check_with_null, + c_array IS DISTINCT FROM [1, 2, NULL] AS array_check_with_null_distinct, + c_array <=> [] AS array_empty_check, + c_array IS DISTINCT FROM [] AS array_empty_check_distinct, + c_array <=> NULL AS array_vs_null, + NULL <=> c_array AS null_vs_array, + c_array IS DISTINCT FROM NULL AS array_vs_null_distinct, + NULL IS DISTINCT FROM c_array AS null_vs_array_distinct, + + -- ===================== + -- Tuple cmp + -- ===================== + c_tuple <=> (1, 't') AS tuple_check, + c_tuple IS DISTINCT FROM (1, 't') AS tuple_check_distinct, + c_tuple <=> (NULL, NULL) AS tuple_null_check, + c_tuple IS DISTINCT FROM (NULL, NULL) AS tuple_null_check_distinct, + + -- ===================== + -- Map cmp + -- ===================== + c_map <=> map('k1', 1) AS map_check, + c_map IS DISTINCT FROM map('k1', 1) AS map_check_distinct, + c_map <=> map('k1', NULL, 'k2', 2) AS map_check_with_null, + c_map IS DISTINCT FROM map('k1', NULL, 'k2', 2) AS map_check_with_null_distinct, + c_map <=> map() AS map_empty_check, + c_map IS DISTINCT FROM map() AS map_empty_check_distinct, + c_map <=> NULL AS map_vs_null, + NULL <=> c_map AS null_vs_map, + c_map IS DISTINCT FROM NULL AS map_vs_null_distinct, + NULL IS DISTINCT FROM c_map AS null_vs_map_distinct, + + -- ===================== + -- Nullable cmp + -- ===================== + c_nullable <=> 100 AS nullable_vs_literal, + c_nullable IS DISTINCT FROM 100 AS nullable_vs_literal_distinct, + c_nullable <=> NULL AS nullable_vs_null, + NULL <=> c_nullable AS null_vs_nullable, + c_nullable IS DISTINCT FROM NULL AS nullable_vs_null_distinct, + NULL IS DISTINCT FROM c_nullable AS null_vs_nullable_distinct, + + -- ===================== + -- JSON cmp + -- ===================== + c_json <=> CAST('{"k": "v"}' AS JSON) AS json_check, + c_json IS DISTINCT FROM CAST('{"k": "v"}' AS JSON) AS json_check_distinct, + c_json <=> CAST('{"k2": "v2"}' AS JSON) AS json_check2, + c_json IS DISTINCT FROM CAST('{"k2": "v2"}' AS JSON) AS json_check2_distinct, + c_json <=> CAST('{}' AS JSON) AS json_empty_check, + c_json IS DISTINCT FROM CAST('{}' AS JSON) AS json_empty_check_distinct, + c_json <=> NULL AS json_vs_null, + NULL <=> c_json AS null_vs_json, + c_json IS DISTINCT FROM NULL AS json_vs_null_distinct, + NULL IS DISTINCT FROM c_json AS null_vs_json_distinct, + + -- ===================== + -- Variant cmp + -- ===================== + c_variant <=> 1::UInt64::Variant(UInt64, String, Array(UInt64)) AS variant_check_int, + c_variant IS DISTINCT FROM 1::UInt64::Variant(UInt64, String, Array(UInt64)) AS variant_check_int_distinct, + c_variant <=> 'test variant'::String::Variant(UInt64, String, Array(UInt64)) AS variant_check_str, + c_variant IS DISTINCT FROM 'test variant'::String::Variant(UInt64, String, Array(UInt64)) AS variant_check_str_distinct, + c_variant <=> [1,2,3]::Array(UInt64)::Variant(UInt64, String, Array(UInt64)) AS variant_check_array, + c_variant IS DISTINCT FROM [1,2,3]::Array(UInt64)::Variant(UInt64, String, Array(UInt64)) AS variant_check_array_distinct, + c_variant <=> NULL AS variant_vs_null, + NULL <=> c_variant AS null_vs_variant, + c_variant IS DISTINCT FROM NULL AS variant_vs_null_distinct, + NULL IS DISTINCT FROM c_variant AS null_vs_variant_distinct, + + -- ===================== + -- Dynamic cmp + -- ===================== + c_dynamic <=> 1::Dynamic AS dynamic_check_int, + c_dynamic IS DISTINCT FROM 1::Dynamic AS dynamic_check_int_distinct, + c_dynamic <=> 'test dynamic'::Dynamic AS dynamic_check_str, + c_dynamic IS DISTINCT FROM 'test dynamic'::Dynamic AS dynamic_check_str_distinct, + c_dynamic <=> [1, 2, 3]::Dynamic AS dynamic_check_array, + c_dynamic IS DISTINCT FROM [1, 2, 3]::Dynamic AS dynamic_check_array_distinct, + c_dynamic <=> NULL AS dynamic_vs_null, + NULL <=> c_dynamic AS null_vs_dynamic, + c_dynamic IS DISTINCT FROM NULL AS dynamic_vs_null_distinct, + NULL IS DISTINCT FROM c_dynamic AS null_vs_dynamic_distinct + +FROM 03611_nscmp_tbl +ORDER BY key +SETTINGS parallel_replicas_for_non_replicated_merge_tree = 0; + +-- 1.6 UUID / IPv4 / IPv6 +SELECT '1.6 UUID / IPv4 / IPv6 comparisons'; +SELECT 'NULL-safe comparisons with type-correct literals'; +SELECT + c_uuid <=> c_uuid AS uuid_self, + c_uuid IS DISTINCT FROM c_uuid AS uuid_self_distinct, + c_uuid <=> NULL AS uuid_vs_null, + NULL <=> c_uuid AS null_vs_uuid, + c_uuid IS DISTINCT FROM NULL AS uuid_vs_null_distinct, + NULL IS DISTINCT FROM c_uuid AS null_vs_uuid_distinct, + + c_ipv4 <=> toIPv4('127.0.0.1') AS ipv4_check, + c_ipv4 IS DISTINCT FROM toIPv4('127.0.0.1') AS ipv4_check_distinct, + c_ipv4 <=> NULL AS ipv4_vs_null, + NULL <=> c_ipv4 AS null_vs_ipv4, + c_ipv4 IS DISTINCT FROM NULL AS ipv4_vs_null_distinct, + NULL IS DISTINCT FROM c_ipv4 AS null_vs_ipv4_distinct, + + c_ipv6 <=> toIPv6('::1') AS ipv6_check, + c_ipv6 IS DISTINCT FROM toIPv6('::1') AS ipv6_check_distinct, + c_ipv6 <=> NULL AS ipv6_vs_null, + NULL <=> c_ipv6 AS null_vs_ipv6, + c_ipv6 IS DISTINCT FROM NULL AS ipv6_vs_null_distinct, + NULL IS DISTINCT FROM c_ipv6 AS null_vs_ipv6_distinct +FROM 03611_nscmp_tbl +ORDER BY key; + +SELECT '1.7 Cross-type test cases'; +-- Variant vs Dynamic +SELECT + c_variant <=> c_dynamic, + c_variant IS DISTINCT FROM c_dynamic +FROM 03611_nscmp_tbl +ORDER BY key; + +-- Dynamic vs IPv4/IPv6 +SELECT c_dynamic <=> c_ipv4 FROM 03611_nscmp_tbl ORDER BY key; + +SELECT c_dynamic IS DISTINCT FROM c_ipv4 FROM 03611_nscmp_tbl ORDER BY key; + +SELECT c_dynamic <=> c_ipv6 FROM 03611_nscmp_tbl ORDER BY key; + +SELECT c_dynamic IS DISTINCT FROM c_ipv6 FROM 03611_nscmp_tbl ORDER BY key; + +-- Dynamic vs String +SELECT + c_dynamic <=> c_string, + c_dynamic <=> c_fstring +FROM 03611_nscmp_tbl +ORDER BY key; + +SELECT + c_dynamic IS DISTINCT FROM c_string, + c_dynamic IS DISTINCT FROM c_fstring +FROM 03611_nscmp_tbl +ORDER BY key; + +-- Array vs Dynamic +SELECT + c_array <=> c_dynamic, + c_array IS DISTINCT FROM c_dynamic +FROM 03611_nscmp_tbl +ORDER BY key; + +-- Map vs Dynamic +SELECT + c_map <=> c_dynamic, + c_map IS DISTINCT FROM c_dynamic +FROM 03611_nscmp_tbl +ORDER BY key; + +-- Nested vs Dynamic +SELECT + c_nested.id <=> c_dynamic, + c_nested.value <=> c_dynamic +FROM 03611_nscmp_tbl +ORDER BY key; + +-- Tuple vs Dynamic + +SELECT + c_tuple <=> c_dynamic, + c_tuple IS DISTINCT FROM c_dynamic +FROM 03611_nscmp_tbl +ORDER BY key; + + +SELECT '1.8 Some ILLEGAL_TYPE_OF_ARGUMENT case'; +SELECT + c_tuple <=> NULL AS tuple_vs_null +FROM 03611_nscmp_tbl; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT + NULL <=> c_tuple AS tuple_vs_null +FROM 03611_nscmp_tbl; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT + c_tuple IS DISTINCT FROM NULL AS tuple_vs_null_distinct +FROM 03611_nscmp_tbl; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT + NULL IS DISTINCT FROM c_tuple AS tuple_vs_null_distinct +FROM 03611_nscmp_tbl; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT c_variant = 1 FROM 03611_nscmp_tbl; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT c_variant <=> c_ipv4 FROM 03611_nscmp_tbl; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT c_variant IS DISTINCT FROM c_ipv4 FROM 03611_nscmp_tbl; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT c_variant <=> c_ipv6 FROM 03611_nscmp_tbl; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT c_variant IS DISTINCT FROM c_ipv6 FROM 03611_nscmp_tbl; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT + c_variant <=> c_string, + c_variant <=> c_fstring +FROM 03611_nscmp_tbl; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT + c_variant IS DISTINCT FROM c_string, + c_variant IS DISTINCT FROM c_fstring +FROM 03611_nscmp_tbl; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT 'Array vs Map cross-type test cases'; + +-- Array vs Map +SELECT + c_array <=> c_map, + c_array IS DISTINCT FROM c_map +FROM 03611_nscmp_tbl; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT 'Map vs Variant cross-type test cases'; + +-- Map vs Variant +SELECT + c_map <=> c_variant, + c_map IS DISTINCT FROM c_variant +FROM 03611_nscmp_tbl; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +-- Array vs Variant +SELECT + c_array <=> c_variant, + c_array IS DISTINCT FROM c_variant +FROM 03611_nscmp_tbl; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + + +-- Array vs Tuple +SELECT + c_array <=> c_tuple, + c_array IS DISTINCT FROM c_tuple +FROM 03611_nscmp_tbl; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +-- Map vs Tuple +SELECT + c_map <=> c_tuple, + c_map IS DISTINCT FROM c_tuple +FROM 03611_nscmp_tbl; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +DROP TABLE IF EXISTS 03611_t_nullsafe; +CREATE TABLE IF NOT EXISTS 03611_t_nullsafe +( + id Int32, + a Nullable(Int32), + b Nullable(Int32), + txt Nullable(String) +) ENGINE = Memory; + +INSERT INTO 03611_t_nullsafe VALUES +(1, 1, 1, 'x'), +(2, 1, NULL, 'x'), +(3, NULL, NULL, 'y'), +(4, 2, 2, 'z'); + +SELECT '1.9 WHERE Clause'; +SELECT id, a, b, + (a <=> b) AS null_safe_equal, + (a IS DISTINCT FROM b) AS null_safe_distinct +FROM 03611_t_nullsafe +ORDER BY id; + + +SELECT '1.10 ORDER BY Clause'; +SELECT id, a, b, + (a IS DISTINCT FROM b) AS distinct_flag +FROM 03611_t_nullsafe +ORDER BY (a <=> b) DESC, distinct_flag ASC, id; + +SELECT '1.11 GROUP BY Clause'; +SELECT + a <=> b AS eq, + a IS DISTINCT FROM b AS distinct_flag, + count() AS cnt, + groupArray(id) AS ids +FROM 03611_t_nullsafe +GROUP BY eq, distinct_flag +ORDER BY eq; + +SELECT '1.12 Aggregate Func'; +SELECT + countIf(a <=> 1) AS cnt_equal_1, + countIf(a IS DISTINCT FROM 1) AS cnt_distinct_1, + count() AS total_count +FROM 03611_t_nullsafe +ORDER BY cnt_equal_1; + +SELECT '1.12 HAVING Clause'; +SELECT + max(a) AS max_a, + max(b) AS max_b +FROM `03611_t_nullsafe` +HAVING (max_a <=> max_b) OR (max_a IS DISTINCT FROM max_b) +ORDER BY max_a; + +SELECT '1.13 JOIN Clause'; +SELECT + l.id AS lid, + r.id AS rid, + l.a, + r.b +FROM 03611_t_nullsafe AS l +INNER JOIN 03611_t_nullsafe AS r ON l.a <=> r.b +ORDER BY lid, rid; + +SELECT + l.id AS lid, + r.id AS rid, + l.a, + r.b +FROM 03611_t_nullsafe AS l +INNER JOIN 03611_t_nullsafe AS r ON l.a IS DISTINCT FROM r.b +ORDER BY lid, rid; + +SELECT '1.14 CASE WHEN Clause'; +SELECT id, a, b, + CASE + WHEN a <=> b THEN 'null_safe_equal' + WHEN a IS DISTINCT FROM b THEN 'null_safe_distinct' + ELSE 'unexpected' + END AS comparison_result, + CASE + WHEN a <=> b THEN 'same' + WHEN a IS DISTINCT FROM b THEN 'different' + ELSE 'unknown' + END AS status +FROM 03611_t_nullsafe +ORDER BY id; + +SELECT '1.15 IF function'; +SELECT id, a, b, + IF(a <=> b, 'safe-equal', 'not_equal') AS equal_flag, + IF(a IS DISTINCT FROM b, 'distinct', 'same') AS distinct_flag +FROM 03611_t_nullsafe +ORDER BY id; + +SELECT '1.16 Window Func'; +SELECT (a IS DISTINCT FROM b) AS distinct_flag, + count() OVER (PARTITION BY a <=> b) AS partition_by_eq, + count() OVER (PARTITION BY a IS DISTINCT FROM b) AS partition_by_distinct, + count() OVER (PARTITION BY a <=> b, a IS DISTINCT FROM b) AS partition_by_both +FROM 03611_t_nullsafe +ORDER BY distinct_flag; + +SELECT '1.17 OR / AND'; +select 1 <=> 1 AND 1 is DISTINCT FROM 1, + 1 <=> 1 OR 1 is DISTINCT FROM 1; + +SELECT '1.18 Tuple has null'; +select (null,null,null) is DISTINCT FROM (null,null,null); +select (null,null,null) is DISTINCT FROM (null,null,1); +select (null,null,null) <=> (null,null,1); +select (null,null,null) <=> (null,null,null); + +select (null,null,(null,null,null)) is DISTINCT FROM (null,null,(null,null,1)); +select (null,null,(null,null,null)) is DISTINCT FROM (null,null,(null,null,null)); +select (null,null,(null,null,null)) <=> (null,null,(null,null,1)); +select (null,null,(null,null,null)) <=> (null,null,(null,null,null)); + +SELECT DISTINCT * WHERE 2 <=> materialize(2) +GROUP BY 1 +WITH TOTALS QUALIFY ((2 <=> 2) / ((2 IS NOT NULL) IS NULL), *, *, materialize(toNullable(2)), materialize(2), materialize(materialize(2)), 2) +<=> ((2 = *) * 2, *, *, 2, toNullable(2), 2, 2); + +SELECT '1.19 Alias function testing'; +SELECT isNotDistinctFrom(1, 1); +SELECT isNotDistinctFrom(1, 2); +SELECT isNotDistinctFrom(1, null); +SELECT isNotDistinctFrom(null, null); + +SELECT isDistinctFrom(1, 1); +SELECT isDistinctFrom(1, 2); +SELECT isDistinctFrom(1, null); +SELECT isDistinctFrom(null, null); + +DROP TABLE IF EXISTS 03611_nscmp_tbl; +DROP TABLE IF EXISTS 03611_t_nullsafe; diff --git a/parser/testdata/03611_point_in_polygon_key_condition_bug/ast.json b/parser/testdata/03611_point_in_polygon_key_condition_bug/ast.json new file mode 100644 index 000000000..4cb8c130f --- /dev/null +++ b/parser/testdata/03611_point_in_polygon_key_condition_bug/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery p3 (children 3)" + }, + { + "explain": " Identifier p3" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration x (children 1)" + }, + { + "explain": " DataType Int64" + }, + { + "explain": " ColumnDeclaration y (children 1)" + }, + { + "explain": " DataType Int64" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " Identifier y" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.000995967, + "rows_read": 14, + "bytes_read": 463 + } +} diff --git a/parser/testdata/03611_point_in_polygon_key_condition_bug/metadata.json b/parser/testdata/03611_point_in_polygon_key_condition_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03611_point_in_polygon_key_condition_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03611_point_in_polygon_key_condition_bug/query.sql b/parser/testdata/03611_point_in_polygon_key_condition_bug/query.sql new file mode 100644 index 000000000..d97355a44 --- /dev/null +++ b/parser/testdata/03611_point_in_polygon_key_condition_bug/query.sql @@ -0,0 +1,3 @@ +create table p3 (x Int64, y Int64) engine MergeTree order by (x, y); +insert into p3 values (100, 100); +explain indexes=1,projections=1 select * from p3 where pointInPolygon((x, y), [(0,0),(0,150),(150,150),(150,0)]) format Null; diff --git a/parser/testdata/03611_pr_global_join/ast.json b/parser/testdata/03611_pr_global_join/ast.json new file mode 100644 index 000000000..abce44aea --- /dev/null +++ b/parser/testdata/03611_pr_global_join/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001394716, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03611_pr_global_join/metadata.json b/parser/testdata/03611_pr_global_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03611_pr_global_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03611_pr_global_join/query.sql b/parser/testdata/03611_pr_global_join/query.sql new file mode 100644 index 000000000..1e14ebab0 --- /dev/null +++ b/parser/testdata/03611_pr_global_join/query.sql @@ -0,0 +1,30 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +CREATE TABLE t1 (c0 Int NULL, c1 Int) ENGINE = MergeTree() ORDER BY tuple(); +CREATE TABLE t2 (c0 Int NULL, c1 Int) ENGINE = MergeTree() ORDER BY tuple(); +INSERT INTO TABLE t1 (c1, c0) VALUES (1, 1); +INSERT INTO TABLE t2 (c0, c1) VALUES (1, 1); + +SET enable_parallel_replicas = 1; +SET max_parallel_replicas = 3, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_for_non_replicated_merge_tree = 1; + +SELECT * +FROM t2 +INNER JOIN t2 AS tx ON true +RIGHT JOIN t1 ON true; + +SELECT '---'; + +-- this query was problematic, now GLOBAL JOINs has been disabled in n-way JOINs +SELECT * +FROM t2 +INNER JOIN t2 AS tx ON true +GLOBAL RIGHT JOIN t1 ON true; + +SELECT '---'; +-- just check that simple GLOBAL JOIN works with parallel replicas +SELECT * +FROM t2 GLOBAL RIGHT JOIN t1 ON true; + +DROP TABLE t1; +DROP TABLE t2; diff --git a/parser/testdata/03611_uniqExact_bug/ast.json b/parser/testdata/03611_uniqExact_bug/ast.json new file mode 100644 index 000000000..587bde25a --- /dev/null +++ b/parser/testdata/03611_uniqExact_bug/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000908802, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03611_uniqExact_bug/metadata.json b/parser/testdata/03611_uniqExact_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03611_uniqExact_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03611_uniqExact_bug/query.sql b/parser/testdata/03611_uniqExact_bug/query.sql new file mode 100644 index 000000000..811369313 --- /dev/null +++ b/parser/testdata/03611_uniqExact_bug/query.sql @@ -0,0 +1,59 @@ +set max_threads=4; + +create table test(c1 Int64, c2 Int64) +Engine=MergeTree ORDER BY c2 AS +WITH gen as + (SELECT xxHash32(number) % 1000000 AS u1, + xxHash32(number+12345) % 1000000 AS u2 + FROM numbers(1e6)) +SELECT + 1071106, + 5+(1 + 151703 * ((2 * u1) * (2 * u2))) +FROM gen +union all +SELECT + 1071102, + 8+(1 + 151693 * ((2 * u1) * (2 * u2))) +FROM gen; + +select '--- ROLLUP ---' format TSVRaw; +SELECT + c1, + uniqExact(c2) +FROM test +GROUP BY c1 + WITH ROLLUP +ORDER BY c1 DESC; + +select '--- CUBE ---' format TSVRaw; + +SELECT + c1, + uniqExact(c2) +FROM test +GROUP BY c1 + WITH CUBE +ORDER BY c1 DESC; + +select '--- totals ---' format TSVRaw; + +SELECT + c1, + uniqExact(c2) +FROM test +GROUP BY c1 + WITH TOTALS +ORDER BY c1 DESC; + +select '--- grouping sets ---' format TSVRaw; + +SELECT + c1, + uniqExact(c2) +FROM test +GROUP BY + GROUPING SETS( + (c1), + () + ) +ORDER BY c1 DESC; diff --git a/parser/testdata/03611_verify_exception_in_iceberg_iterator/ast.json b/parser/testdata/03611_verify_exception_in_iceberg_iterator/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03611_verify_exception_in_iceberg_iterator/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03611_verify_exception_in_iceberg_iterator/metadata.json b/parser/testdata/03611_verify_exception_in_iceberg_iterator/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03611_verify_exception_in_iceberg_iterator/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03611_verify_exception_in_iceberg_iterator/query.sql b/parser/testdata/03611_verify_exception_in_iceberg_iterator/query.sql new file mode 100644 index 000000000..8b14337d4 --- /dev/null +++ b/parser/testdata/03611_verify_exception_in_iceberg_iterator/query.sql @@ -0,0 +1,9 @@ + +-- Tags: no-fasttest +-- Tag no-fasttest: Depends on AWS + +-- Verify that exception is thrown when Iceberg table contains corrupted Avro manifest files. +-- This test verifies not so much thrown error during avro files parsing but rather +-- that we correctly process exceptions thrown from iceberg iterator in background execution. + +SELECT * FROM icebergS3('http://localhost:11111/test/corrupted_avro_files_test/', 'clickhouse', 'clickhouse') SETTINGS use_iceberg_metadata_files_cache = False; -- { serverError INCORRECT_DATA} \ No newline at end of file diff --git a/parser/testdata/03611_window_definition_parsing/ast.json b/parser/testdata/03611_window_definition_parsing/ast.json new file mode 100644 index 000000000..75efcd73a --- /dev/null +++ b/parser/testdata/03611_window_definition_parsing/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001148185, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03611_window_definition_parsing/metadata.json b/parser/testdata/03611_window_definition_parsing/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03611_window_definition_parsing/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03611_window_definition_parsing/query.sql b/parser/testdata/03611_window_definition_parsing/query.sql new file mode 100644 index 000000000..a45fd8bd8 --- /dev/null +++ b/parser/testdata/03611_window_definition_parsing/query.sql @@ -0,0 +1,3 @@ +SET param_preceding_rows=1; +EXPLAIN AST SELECT sum(1) OVER (ORDER BY 1 ASC ROWS BETWEEN {preceding_rows:UInt64} PRECEDING AND CURRENT ROW); +SELECT sum(1) OVER (ORDER BY 1 ASC ROWS BETWEEN {preceding_rows:UInt64} PRECEDING AND CURRENT ROW); diff --git a/parser/testdata/03612_explain_indexes_bugs/ast.json b/parser/testdata/03612_explain_indexes_bugs/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03612_explain_indexes_bugs/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03612_explain_indexes_bugs/metadata.json b/parser/testdata/03612_explain_indexes_bugs/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03612_explain_indexes_bugs/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03612_explain_indexes_bugs/query.sql b/parser/testdata/03612_explain_indexes_bugs/query.sql new file mode 100644 index 000000000..5f7312525 --- /dev/null +++ b/parser/testdata/03612_explain_indexes_bugs/query.sql @@ -0,0 +1,16 @@ +-- Tags: no-parallel-replicas +-- no-parallel-replicas because the output of explain is different. +set enable_analyzer = 1; + +create table points (x Int64, y Int64) engine MergeTree order by (x, y); +insert into points values (100, 100); +explain indexes=1 select * from points where pointInPolygon((x, y), [(0,0),(0,150),(150,150),(150,0)]); +explain indexes=1 select * from points where plus(minus(x, 1), 10) < 10; +explain indexes=1 select * from points where (plus(minus(x, 1), 10), minus(plus(y, 2), 20)) in (10, 20); +explain indexes=1 select * from points where (plus(minus(x, 1), 10), minus(plus(x, 2), 20)) in (10, 20); + +create table morton (x UInt64, y UInt64) engine MergeTree order by mortonEncode(x, y); +insert into morton values (100, 200); +explain indexes=1 select * from morton where x > 100; +explain indexes=1 select x+y from morton where x+1 = 101; +select x+y from morton where x+1 = 101; diff --git a/parser/testdata/03612_storage_cluster_dynamic_subcolumns/ast.json b/parser/testdata/03612_storage_cluster_dynamic_subcolumns/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03612_storage_cluster_dynamic_subcolumns/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03612_storage_cluster_dynamic_subcolumns/metadata.json b/parser/testdata/03612_storage_cluster_dynamic_subcolumns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03612_storage_cluster_dynamic_subcolumns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03612_storage_cluster_dynamic_subcolumns/query.sql b/parser/testdata/03612_storage_cluster_dynamic_subcolumns/query.sql new file mode 100644 index 000000000..4717ac995 --- /dev/null +++ b/parser/testdata/03612_storage_cluster_dynamic_subcolumns/query.sql @@ -0,0 +1,12 @@ +-- Tags: no-fasttest + +drop table if exists test; +drop table if exists test_cluster; +create table test (json JSON, d Dynamic) engine=MergeTree order by tuple(); +insert into test select '{"a" : 42}', 42::Int64; +create table test_cluster as cluster(test_cluster_one_shard_three_replicas_localhost, currentDatabase(), 'test'); +select * from test_cluster; +select json.a, d.Int64 from test_cluster; +drop table test_cluster; +drop table test; + diff --git a/parser/testdata/03613_empty_tuple_permute_with_limit/ast.json b/parser/testdata/03613_empty_tuple_permute_with_limit/ast.json new file mode 100644 index 000000000..3c1faddc5 --- /dev/null +++ b/parser/testdata/03613_empty_tuple_permute_with_limit/ast.json @@ -0,0 +1,82 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 5)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_3" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Set" + } + ], + + "rows": 20, + + "statistics": + { + "elapsed": 0.001302015, + "rows_read": 20, + "bytes_read": 731 + } +} diff --git a/parser/testdata/03613_empty_tuple_permute_with_limit/metadata.json b/parser/testdata/03613_empty_tuple_permute_with_limit/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03613_empty_tuple_permute_with_limit/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03613_empty_tuple_permute_with_limit/query.sql b/parser/testdata/03613_empty_tuple_permute_with_limit/query.sql new file mode 100644 index 000000000..01a5e3ab6 --- /dev/null +++ b/parser/testdata/03613_empty_tuple_permute_with_limit/query.sql @@ -0,0 +1,2 @@ +SELECT 1 FROM numbers(3) ORDER BY [1, ()] FETCH FIRST 1 ROW ONLY SETTINGS allow_suspicious_types_in_order_by = 1, use_variant_as_common_type = 1; + diff --git a/parser/testdata/03620_json_advanced_shared_data_seek_bug/ast.json b/parser/testdata/03620_json_advanced_shared_data_seek_bug/ast.json new file mode 100644 index 000000000..fb4156ec0 --- /dev/null +++ b/parser/testdata/03620_json_advanced_shared_data_seek_bug/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00089968, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03620_json_advanced_shared_data_seek_bug/metadata.json b/parser/testdata/03620_json_advanced_shared_data_seek_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03620_json_advanced_shared_data_seek_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03620_json_advanced_shared_data_seek_bug/query.sql b/parser/testdata/03620_json_advanced_shared_data_seek_bug/query.sql new file mode 100644 index 000000000..cf98500cc --- /dev/null +++ b/parser/testdata/03620_json_advanced_shared_data_seek_bug/query.sql @@ -0,0 +1,9 @@ +set use_variant_as_common_type = 1; + +drop table if exists test; +create table test (id UInt64, json JSON(max_dynamic_paths=2, a.b.c UInt32)) engine=MergeTree order by id settings min_rows_for_wide_part=1000000000, min_bytes_for_wide_part=10000000000, index_granularity = 39547, object_serialization_version = 'v3', object_shared_data_serialization_version = 'advanced', object_shared_data_serialization_version_for_zero_level_parts = 'advanced', object_shared_data_buckets_for_compact_part = 3, object_shared_data_buckets_for_wide_part = 1, dynamic_serialization_version = 'v3'; + +insert into test select number, number < 500000 ? toJSONString(map('a.b.c', number, 'a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number))) : toJSONString(map('a.b.c', number, 'a.b.d', number::UInt32, 'a.b.e', 'str_' || toString(number), 'b.b._' || toString(number % 5), number::UInt32)) from numbers(400000, 200000); + +select json.b.b.`_1`.:String from test format Null settings max_threads=1; +drop table test; diff --git a/parser/testdata/03622_generic_aggregate_functions__state_compatibility/ast.json b/parser/testdata/03622_generic_aggregate_functions__state_compatibility/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03622_generic_aggregate_functions__state_compatibility/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03622_generic_aggregate_functions__state_compatibility/metadata.json b/parser/testdata/03622_generic_aggregate_functions__state_compatibility/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03622_generic_aggregate_functions__state_compatibility/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03622_generic_aggregate_functions__state_compatibility/query.sql b/parser/testdata/03622_generic_aggregate_functions__state_compatibility/query.sql new file mode 100644 index 000000000..844b93126 --- /dev/null +++ b/parser/testdata/03622_generic_aggregate_functions__state_compatibility/query.sql @@ -0,0 +1,62 @@ +--- Some aggregate functions use IColumn::serializeAggregationStateValueIntoArena +--- method for values serialization into aggregation states. +--- Aggregation states should not be changed for compatibility, +--- and this test should fail if something is changed in serializeAggregationStateValueIntoArena method. + +set session_timezone='UTC'; + +select hex(approx_top_sumState(3)(['str'], 1)); +select hex(approx_top_kState(3)(['str'])); +select hex(topKState(3)(['str'])); +select hex(topKWeightedState(3)(['str'], 1)); +select hex(maxDistinctState(['str'])); +select hex(argMaxDistinctState(['str'], ['str'])); +select hex(groupUniqArrayState(['str'])); +select hex(groupArrayIntersectState(['str'])); + +--- Check different data types +select hex(maxDistinctState(tuple('str', true))); +select hex(maxDistinctState(tuple('str', 42::Int8))); +select hex(maxDistinctState(tuple('str', 42::UInt8))); +select hex(maxDistinctState(tuple('str', 42::Int16))); +select hex(maxDistinctState(tuple('str', 42::UInt16))); +select hex(maxDistinctState(tuple('str', 42::Int32))); +select hex(maxDistinctState(tuple('str', 42::UInt32))); +select hex(maxDistinctState(tuple('str', 42::Int64))); +select hex(maxDistinctState(tuple('str', 42::UInt64))); +select hex(maxDistinctState(tuple('str', 42::Int128))); +select hex(maxDistinctState(tuple('str', 42::UInt128))); +select hex(maxDistinctState(tuple('str', 42::Int256))); +select hex(maxDistinctState(tuple('str', 42::UInt256))); +select hex(maxDistinctState(tuple('str', 42.42::BFloat16))); +select hex(maxDistinctState(tuple('str', 42.42::Float32))); +select hex(maxDistinctState(tuple('str', 42.42::Float64))); +select hex(maxDistinctState(tuple('str', 42.42::Decimal32(2)))); +select hex(maxDistinctState(tuple('str', 42.42::Decimal64(2)))); +select hex(maxDistinctState(tuple('str', 42.42::Decimal128(2)))); +select hex(maxDistinctState(tuple('str', 42.42::Decimal256(2)))); +select hex(maxDistinctState(tuple('str', 'str'::Nullable(String)))); +select hex(maxDistinctState(tuple('str', 'str'::FixedString(3)))); +select hex(maxDistinctState(tuple('str', 'str'::Nullable(FixedString(3))))); +select hex(maxDistinctState(tuple('str', 'str'::LowCardinality(String)))); +select hex(maxDistinctState(tuple('str', 'str'::LowCardinality(Nullable(String))))); +select hex(maxDistinctState(tuple('str', 'str'::LowCardinality(FixedString(3))))); +select hex(maxDistinctState(tuple('str', 'str'::LowCardinality(Nullable(FixedString(3)))))); +select hex(maxDistinctState(tuple('str', ['str']))); +select hex(maxDistinctState(tuple('str', map('str', 'str')))); +select hex(maxDistinctState(tuple('str', tuple('str')))); +select hex(maxDistinctState(tuple('str', '{"str" : "str"}'::JSON))); +select hex(maxDistinctState(tuple('str', '{"str" : "str"}'::JSON(max_dynamic_paths=0)))); +select hex(maxDistinctState(tuple('str', '{"str" : "str"}'::JSON(str String)))); +select hex(maxDistinctState(tuple('str', 'str'::Variant(String)))); +select hex(maxDistinctState(tuple('str', 'str'::Dynamic))); +select hex(maxDistinctState(tuple('str', 'str'::Dynamic(max_types=0)))); +select hex(maxDistinctState(tuple('str', '59cd9014-8730-444c-95d0-40ed67c54268'::UUID))); +select hex(maxDistinctState(tuple('str', '127.0.0.1'::IPv4))); +select hex(maxDistinctState(tuple('str', '127.0.0.1'::IPv6))); +select hex(maxDistinctState(tuple('str', '2020-01-01'::Date))); +select hex(maxDistinctState(tuple('str', '2020-01-01'::Date32))); +select hex(maxDistinctState(tuple('str', '2020-01-01'::DateTime))); +select hex(maxDistinctState(tuple('str', '2020-01-01'::DateTime64))); +select hex(maxDistinctState(tuple('str', 'a'::Enum8('a' = 1)))); +select hex(maxDistinctState(tuple('str', 'a'::Enum16('a' = 1)))); diff --git a/parser/testdata/03622_ttl_infos_where/ast.json b/parser/testdata/03622_ttl_infos_where/ast.json new file mode 100644 index 000000000..c847a35b0 --- /dev/null +++ b/parser/testdata/03622_ttl_infos_where/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery users (children 1)" + }, + { + "explain": " Identifier users" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001046545, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/03622_ttl_infos_where/metadata.json b/parser/testdata/03622_ttl_infos_where/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03622_ttl_infos_where/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03622_ttl_infos_where/query.sql b/parser/testdata/03622_ttl_infos_where/query.sql new file mode 100644 index 000000000..d7de1f597 --- /dev/null +++ b/parser/testdata/03622_ttl_infos_where/query.sql @@ -0,0 +1,49 @@ +DROP TABLE IF EXISTS users; +SET session_timezone = 'UTC'; + +CREATE TABLE users (uid Int16, d DateTime('UTC')) +ENGINE = MergeTree ORDER BY uid TTL d + INTERVAL 1 MONTH WHERE uid = 1 +SETTINGS merge_with_ttl_timeout = 0, min_bytes_for_wide_part = 0, vertical_merge_algorithm_min_rows_to_activate = 0, vertical_merge_algorithm_min_columns_to_activate = 0; + +SYSTEM STOP TTL MERGES users; + +INSERT INTO users SELECT arrayJoin([1,2]), toDateTime('2020-01-01 00:00:00', 'UTC'); +INSERT INTO users SELECT arrayJoin([2,3]), toDateTime('2020-01-01 00:00:00', 'UTC'); + +SELECT * FROM users ORDER BY ALL; + +SELECT + delete_ttl_info_min, + delete_ttl_info_max, + rows_where_ttl_info.min, + rows_where_ttl_info.max +FROM system.parts WHERE database = currentDatabase() AND table = 'users' AND active +ORDER BY name; + +SYSTEM START TTL MERGES users; +OPTIMIZE TABLE users FINAL; + +SELECT * FROM users ORDER BY ALL; + +SELECT + delete_ttl_info_min, + delete_ttl_info_max, + rows_where_ttl_info.min, + rows_where_ttl_info.max +FROM system.parts WHERE database = currentDatabase() AND table = 'users' AND active +ORDER BY name; + +-- Cannot assign merge because there is one part and all +-- TTLs that should be applied are already applied. +-- Previously it would succeed for TTL with WHERE. +OPTIMIZE TABLE users SETTINGS optimize_throw_if_noop = 1; -- { serverError CANNOT_ASSIGN_OPTIMIZE } + +DETACH TABLE users; +ATTACH TABLE users; + +-- Check that expired TTL doesn't affect the vertical merge algorithm +OPTIMIZE TABLE users FINAL; +SYSTEM FLUSH LOGS part_log; +SELECT merge_algorithm FROM system.part_log WHERE database = currentDatabase() AND table = 'users' AND event_type = 'MergeParts' ORDER BY event_time_microseconds DESC LIMIT 1; + +DROP TABLE IF EXISTS users; diff --git a/parser/testdata/03623_convert_any_join_to_semi_or_anti_2/ast.json b/parser/testdata/03623_convert_any_join_to_semi_or_anti_2/ast.json new file mode 100644 index 000000000..ae0d1802b --- /dev/null +++ b/parser/testdata/03623_convert_any_join_to_semi_or_anti_2/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001325638, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03623_convert_any_join_to_semi_or_anti_2/metadata.json b/parser/testdata/03623_convert_any_join_to_semi_or_anti_2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03623_convert_any_join_to_semi_or_anti_2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03623_convert_any_join_to_semi_or_anti_2/query.sql b/parser/testdata/03623_convert_any_join_to_semi_or_anti_2/query.sql new file mode 100644 index 000000000..1f50f6753 --- /dev/null +++ b/parser/testdata/03623_convert_any_join_to_semi_or_anti_2/query.sql @@ -0,0 +1,19 @@ +SET enable_analyzer = 1; +SET query_plan_join_swap_table = 0; +SET enable_parallel_replicas = 0; + +CREATE TABLE users1 (uid Int16, name String, age Int16) ENGINE=Memory; +INSERT INTO users1 SELECT number as uid, 'Alice' as name, 30 as age FROM numbers(100000); + +CREATE TABLE users2 (uid Int16, name String, age Int16) ENGINE=Memory; +INSERT INTO users2 SELECT number as uid, 'Alice2' as name, 30 as age FROM numbers(1000); + +EXPLAIN actions = 1, keep_logical_steps = 1 +SELECT count() +FROM (SELECT 1 as x, * FROM users1) u1 LEFT ANY JOIN users2 u2 ON u1.uid = u2.uid +WHERE 1 / u2.age > 1; + +EXPLAIN actions = 1, keep_logical_steps = 1 +SELECT count() +FROM (SELECT 1 as x, * FROM users1) u1 LEFT ANY JOIN users2 u2 ON u1.uid = u2.uid +WHERE u2.age > 1; diff --git a/parser/testdata/03623_datetime64_preepoch_fractional_precision/ast.json b/parser/testdata/03623_datetime64_preepoch_fractional_precision/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03623_datetime64_preepoch_fractional_precision/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03623_datetime64_preepoch_fractional_precision/metadata.json b/parser/testdata/03623_datetime64_preepoch_fractional_precision/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03623_datetime64_preepoch_fractional_precision/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03623_datetime64_preepoch_fractional_precision/query.sql b/parser/testdata/03623_datetime64_preepoch_fractional_precision/query.sql new file mode 100644 index 000000000..4b8a32b91 --- /dev/null +++ b/parser/testdata/03623_datetime64_preepoch_fractional_precision/query.sql @@ -0,0 +1,43 @@ +-- Tags: no-fasttest +-- Test for DateTime64 pre-epoch fractional seconds fix (GitHub issue #85396) + +-- 1. Test parseDateTime64BestEffort (src/IO/parseDateTimeBestEffort.cpp) +-- Original problem case +SELECT parseDateTime64BestEffort('1969-01-01 00:00:00.468', 3, 'UTC'); +-- Test with different scales +SELECT parseDateTime64BestEffort('1969-07-20 20:17:40.123456', 6, 'UTC'); +-- Test negative timestamps with fractional seconds +SELECT parseDateTime64BestEffort('1950-01-01 00:00:00.500', 3, 'UTC'); +-- Test epoch boundary +SELECT parseDateTime64BestEffort('1969-12-31 23:59:59.999', 3, 'UTC'); +SELECT parseDateTime64BestEffort('1970-01-01 00:00:00.000', 3, 'UTC'); + +-- 2. Test makeDateTime64 functions (src/Functions/makeDate.cpp) +SELECT makeDateTime64(1969, 1, 1, 0, 0, 0, 468, 3); +SELECT makeDateTime64(1969, 12, 31, 23, 59, 59, 999, 3); +SELECT makeDateTime64(1969, 6, 15, 12, 0, 0, 500000, 6); + +-- 3. Test changeYear/changeMonth/changeDay functions (src/Functions/changeDate.cpp) +SELECT changeYear('2024-01-01 00:00:00.462'::DateTime64, 1969); +SELECT changeMonth('1969-06-15 12:30:45.123'::DateTime64, 1); +SELECT changeDay('1969-12-01 08:15:22.789'::DateTime64, 31); + +-- 4. Test nowSubsecond (src/Functions/nowSubsecond.cpp) - indirect test via now64() +-- Note: nowSubsecond is used internally by now64() function +SELECT length(toString(now64(3))) > 0; + +-- 5. Test ULIDStringToDateTime conversion (src/Functions/ULIDStringToDateTime.cpp) +-- Note: Using fixed ULID to ensure test reproducibility across environments +SELECT ULIDStringToDateTime('01ARZ3NDEKTSV4RRFFQ69G5FAV', 'UTC'); + +-- 6. Test UUID conversion functions (src/Functions/FunctionsCodingUUID.cpp) +-- generateUUIDv7 uses DateTime64 internally +SELECT length(toString(generateUUIDv7())) = 36; + +-- 7. Verify Decimal types are unaffected by changes +SELECT CAST(-123.456 AS Decimal64(3)); +SELECT CAST(-1969.123 AS Decimal64(3)); +SELECT CAST(1234.567 AS Decimal32(3)); + +-- 8. Test arithmetic with DateTime64 pre-epoch +SELECT parseDateTime64BestEffort('1969-01-01 00:00:00.500', 3, 'UTC') + INTERVAL 1 SECOND; \ No newline at end of file diff --git a/parser/testdata/03623_header_filtering/ast.json b/parser/testdata/03623_header_filtering/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03623_header_filtering/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03623_header_filtering/metadata.json b/parser/testdata/03623_header_filtering/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03623_header_filtering/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03623_header_filtering/query.sql b/parser/testdata/03623_header_filtering/query.sql new file mode 100644 index 000000000..3f7f6d276 --- /dev/null +++ b/parser/testdata/03623_header_filtering/query.sql @@ -0,0 +1,13 @@ +SELECT * FROM url( + 'http://google.com', + 'RawBLOB', + 'data String', + headers('exact_header'='true') +); -- {serverError BAD_ARGUMENTS} + +SELECT * FROM url( + 'http://google.com', + 'RawBLOB', + 'data String', + headers('exact_header ' = 'true', 'exact_header ' = 'true') +); -- {serverError BAD_ARGUMENTS} \ No newline at end of file diff --git a/parser/testdata/03623_lazy_materialization_array_sizes_bug/ast.json b/parser/testdata/03623_lazy_materialization_array_sizes_bug/ast.json new file mode 100644 index 000000000..1a53a350a --- /dev/null +++ b/parser/testdata/03623_lazy_materialization_array_sizes_bug/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00102212, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03623_lazy_materialization_array_sizes_bug/metadata.json b/parser/testdata/03623_lazy_materialization_array_sizes_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03623_lazy_materialization_array_sizes_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03623_lazy_materialization_array_sizes_bug/query.sql b/parser/testdata/03623_lazy_materialization_array_sizes_bug/query.sql new file mode 100644 index 000000000..f2fa7ee37 --- /dev/null +++ b/parser/testdata/03623_lazy_materialization_array_sizes_bug/query.sql @@ -0,0 +1,9 @@ +set query_plan_optimize_lazy_materialization=1; +set query_plan_max_limit_for_lazy_materialization=10; + +drop table if exists test; +create table test (x UInt64, y UInt64, a Array(UInt64)) engine=MergeTree order by x settings min_rows_for_wide_part=1, min_bytes_for_wide_part=1; +insert into test select number, number, range(number) from numbers(10); +select a.size0, a from test where y > 5 order by y limit 2; +select a, a.size0 from test where y > 5 order by y limit 2; + diff --git a/parser/testdata/03623_parquet_bool/ast.json b/parser/testdata/03623_parquet_bool/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03623_parquet_bool/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03623_parquet_bool/metadata.json b/parser/testdata/03623_parquet_bool/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03623_parquet_bool/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03623_parquet_bool/query.sql b/parser/testdata/03623_parquet_bool/query.sql new file mode 100644 index 000000000..654caa5ff --- /dev/null +++ b/parser/testdata/03623_parquet_bool/query.sql @@ -0,0 +1,4 @@ +-- Tags: no-parallel, no-fasttest + +insert into function file('03626_parquet_bool.parquet') select true as x settings engine_file_truncate_on_insert=1; +select * from file('03626_parquet_bool.parquet') where x=1 settings input_format_parquet_use_native_reader_v3=1; diff --git a/parser/testdata/03623_pr_join_with_group_by_subquery/ast.json b/parser/testdata/03623_pr_join_with_group_by_subquery/ast.json new file mode 100644 index 000000000..41b31cf7d --- /dev/null +++ b/parser/testdata/03623_pr_join_with_group_by_subquery/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery users (children 1)" + }, + { + "explain": " Identifier users" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001098702, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/03623_pr_join_with_group_by_subquery/metadata.json b/parser/testdata/03623_pr_join_with_group_by_subquery/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03623_pr_join_with_group_by_subquery/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03623_pr_join_with_group_by_subquery/query.sql b/parser/testdata/03623_pr_join_with_group_by_subquery/query.sql new file mode 100644 index 000000000..ae6339b14 --- /dev/null +++ b/parser/testdata/03623_pr_join_with_group_by_subquery/query.sql @@ -0,0 +1,122 @@ +drop table if exists users sync; +drop table if exists messages sync; + +create table users (id Int64, name String) engine=ReplicatedMergeTree('/clickhouse/{database}/tables/03623_users', 'r1') order by tuple(); +create table messages (id Int64, user_id Int64, text String) engine=ReplicatedMergeTree('/clickhouse/{database}/tables/03623_messages', 'r1') order by tuple(); + +insert into users select number, concat('user_', toString(number)) from numbers(10); +insert into users select 11, concat('user_', toString(11)); +insert into messages select 100+number, number, concat('message_', toString(number)) from numbers(11); + +SET enable_parallel_replicas = 1, max_parallel_replicas = 3, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost'; + +SELECT '-- subquery INNER JOIN table'; +SELECT + name, + c +FROM +( + SELECT + user_id, + count() AS c + FROM messages + GROUP BY user_id +) AS messages +INNER JOIN users ON messages.user_id = users.id +ORDER BY + user_id ASC, + c ASC; + +SELECT '-- table INNER JOIN subquery'; +SELECT + name, + c +FROM +users +INNER JOIN +( + SELECT + user_id, + count() AS c + FROM messages + GROUP BY user_id +) AS messages +ON messages.user_id = users.id +ORDER BY + user_id ASC, + c ASC; + +SELECT '-- subquery LEFT JOIN table'; +SELECT + name, + c +FROM +( + SELECT + user_id, + count() AS c + FROM messages + GROUP BY user_id +) AS messages +LEFT JOIN users ON messages.user_id = users.id +ORDER BY + user_id ASC, + c ASC; + +SELECT '-- table LEFT JOIN subquery'; +SELECT + name, + c +FROM +users +LEFT JOIN +( + SELECT + user_id, + count() AS c + FROM messages + GROUP BY user_id +) AS messages +ON messages.user_id = users.id +ORDER BY + user_id ASC, + c ASC; + +SELECT '-- subquery RIGHT JOIN table'; +SELECT + name, + c +FROM +( + SELECT + user_id, + count() AS c + FROM messages + GROUP BY user_id +) AS messages +RIGHT JOIN users ON messages.user_id = users.id +ORDER BY + user_id ASC, + c ASC; + +SELECT '-- table RIGHT JOIN subquery'; +SELECT + name, + c +FROM +users +RIGHT JOIN +( + SELECT + user_id, + count() AS c + FROM messages + GROUP BY user_id +) AS messages +ON messages.user_id = users.id +ORDER BY + user_id ASC, + c ASC; + +drop table users sync; +drop table messages sync; diff --git a/parser/testdata/03623_setting_boolean_shorthand_err_test/ast.json b/parser/testdata/03623_setting_boolean_shorthand_err_test/ast.json new file mode 100644 index 000000000..694931331 --- /dev/null +++ b/parser/testdata/03623_setting_boolean_shorthand_err_test/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '-- Test against Boolean Setting'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001354023, + "rows_read": 5, + "bytes_read": 202 + } +} diff --git a/parser/testdata/03623_setting_boolean_shorthand_err_test/metadata.json b/parser/testdata/03623_setting_boolean_shorthand_err_test/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03623_setting_boolean_shorthand_err_test/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03623_setting_boolean_shorthand_err_test/query.sql b/parser/testdata/03623_setting_boolean_shorthand_err_test/query.sql new file mode 100644 index 000000000..ba68a1961 --- /dev/null +++ b/parser/testdata/03623_setting_boolean_shorthand_err_test/query.sql @@ -0,0 +1,21 @@ +SELECT '-- Test against Boolean Setting'; +SET optimize_on_insert; +SELECT getSetting('optimize_on_insert'); +SELECT 'ok'; + +SELECT '-- Test against String Setting'; +SET default_database_engine; -- { clientError SYNTAX_ERROR } +SELECT 'ok'; + +SELECT '-- Test against UInt64 Setting'; +SET max_threads; -- { clientError SYNTAX_ERROR } +SELECT 'ok'; + +SELECT '-- Test against Seconds Setting'; +SET max_execution_time; -- { clientError SYNTAX_ERROR } +SELECT 'ok'; + +SELECT '-- Test with normal syntax works'; +SET max_threads = 4; +SELECT getSetting('max_threads'); +SELECT 'ok'; diff --git a/parser/testdata/03624_csv_empty_array_from_empty_string/ast.json b/parser/testdata/03624_csv_empty_array_from_empty_string/ast.json new file mode 100644 index 000000000..3dd58cb3f --- /dev/null +++ b/parser/testdata/03624_csv_empty_array_from_empty_string/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function format (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Identifier CSV" + }, + { + "explain": " Literal 'c0 Array(Int)'" + }, + { + "explain": " Literal '\"\"'" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001285951, + "rows_read": 13, + "bytes_read": 485 + } +} diff --git a/parser/testdata/03624_csv_empty_array_from_empty_string/metadata.json b/parser/testdata/03624_csv_empty_array_from_empty_string/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03624_csv_empty_array_from_empty_string/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03624_csv_empty_array_from_empty_string/query.sql b/parser/testdata/03624_csv_empty_array_from_empty_string/query.sql new file mode 100644 index 000000000..bcc946b39 --- /dev/null +++ b/parser/testdata/03624_csv_empty_array_from_empty_string/query.sql @@ -0,0 +1,3 @@ + SELECT * FROM format(CSV, 'c0 Array(Int)', '""'); -- {serverError CANNOT_READ_ARRAY_FROM_TEXT} + SELECT * FROM format(CSV, 'c0 Variant(String, Array(Int))', '""'); + diff --git a/parser/testdata/03624_parquet_row_number/ast.json b/parser/testdata/03624_parquet_row_number/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03624_parquet_row_number/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03624_parquet_row_number/metadata.json b/parser/testdata/03624_parquet_row_number/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03624_parquet_row_number/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03624_parquet_row_number/query.sql b/parser/testdata/03624_parquet_row_number/query.sql new file mode 100644 index 000000000..b2dcab187 --- /dev/null +++ b/parser/testdata/03624_parquet_row_number/query.sql @@ -0,0 +1,7 @@ +-- Tags: no-fasttest, no-parallel + +set engine_file_truncate_on_insert = 1; + +insert into function file('03624_parquet_row_number.parquet') select number*10 as x from numbers(20) settings max_threads=1, output_format_parquet_row_group_size=5; + +select _row_number, x from file('03624_parquet_row_number.parquet') where x % 3 != 0 and x > 60 order by _row_number; diff --git a/parser/testdata/03624_pr_lefl_right_joins_chain/ast.json b/parser/testdata/03624_pr_lefl_right_joins_chain/ast.json new file mode 100644 index 000000000..8bc2bd924 --- /dev/null +++ b/parser/testdata/03624_pr_lefl_right_joins_chain/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tab (children 1)" + }, + { + "explain": " Identifier tab" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001240567, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/03624_pr_lefl_right_joins_chain/metadata.json b/parser/testdata/03624_pr_lefl_right_joins_chain/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03624_pr_lefl_right_joins_chain/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03624_pr_lefl_right_joins_chain/query.sql b/parser/testdata/03624_pr_lefl_right_joins_chain/query.sql new file mode 100644 index 000000000..1c74065f5 --- /dev/null +++ b/parser/testdata/03624_pr_lefl_right_joins_chain/query.sql @@ -0,0 +1,48 @@ +DROP TABLE IF EXISTS tab; +CREATE TABLE tab ( `k` Nullable(UInt32), `k1` Nullable(UInt32), `k2` Nullable(UInt32), `v` String ) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO tab VALUES (1, 1, 1, 'a'), (2, 2, 2, 'b'); + +DROP TABLE IF EXISTS mem; +CREATE TABLE mem ( `k` UInt64, `v` String ) ENGINE = Join(ANY, LEFT, k); +INSERT INTO mem VALUES (1, 'A'), (2, 'B'), (3, 'B'); + +DROP TABLE IF EXISTS mem2; +CREATE TABLE mem2 ( `k` UInt64, `v` String ) ENGINE = Join(ANY, RIGHT, k); +INSERT INTO mem2 VALUES (1, 'A'), (2, 'B'), (3, 'B'); + +SET enable_analyzer = 1; + +SELECT '-- no parallel replicas --'; +SELECT * +FROM tab +ANY LEFT JOIN mem ON k1 = mem.k +ANY RIGHT JOIN mem2 ON k2 = mem2.k +ORDER BY tab.v +SETTINGS enable_parallel_replicas=0; + +SELECT '-- parallel replicas --'; +SELECT * +FROM tab +ANY LEFT JOIN mem ON k1 = mem.k +ANY RIGHT JOIN mem2 ON k2 = mem2.k +ORDER BY tab.v +SETTINGS enable_parallel_replicas=1, max_parallel_replicas=3, cluster_for_parallel_replicas='test_cluster_one_shard_three_replicas_localhost', parallel_replicas_for_non_replicated_merge_tree=1; + + +SELECT '-- explain: check parallel replicas is disabled, looking at ReadFromRemoteParallelReplicas steps count --'; +SELECT count() +FROM +( + EXPLAIN + SELECT * + FROM tab + ANY LEFT JOIN mem ON k1 = mem.k + ANY RIGHT JOIN mem2 ON k2 = mem2.k + ORDER BY tab.v ASC + SETTINGS enable_parallel_replicas = 1, max_parallel_replicas = 3, cluster_for_parallel_replicas = 'test_cluster_one_shard_three_replicas_localhost', parallel_replicas_for_non_replicated_merge_tree = 1 +) +WHERE explain ILIKE '%ReadFromRemoteParallelReplicas%'; + +DROP TABLE mem2; +DROP TABLE mem; +DROP TABLE tab; diff --git a/parser/testdata/03624_resource_exhaustion_window_function/ast.json b/parser/testdata/03624_resource_exhaustion_window_function/ast.json new file mode 100644 index 000000000..0d8e101a9 --- /dev/null +++ b/parser/testdata/03624_resource_exhaustion_window_function/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery table_test (children 1)" + }, + { + "explain": " Identifier table_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001387965, + "rows_read": 2, + "bytes_read": 73 + } +} diff --git a/parser/testdata/03624_resource_exhaustion_window_function/metadata.json b/parser/testdata/03624_resource_exhaustion_window_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03624_resource_exhaustion_window_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03624_resource_exhaustion_window_function/query.sql b/parser/testdata/03624_resource_exhaustion_window_function/query.sql new file mode 100644 index 000000000..714f7937e --- /dev/null +++ b/parser/testdata/03624_resource_exhaustion_window_function/query.sql @@ -0,0 +1,57 @@ +CREATE TABLE table_test +( + `c1` Int32, + `c2` Nullable(Int32), + `c3` Nullable(String), + `c4` Nullable(Date32), + `c5` Nullable(String) +) +ENGINE = ReplacingMergeTree +ORDER BY c1 +SETTINGS index_granularity = 8192; + +INSERT INTO table_test SELECT * FROM generateRandom() LIMIT 1000; + +SELECT + *, + ROW_NUMBER() OVER (PARTITION BY c2, + c4 +ORDER BY + c5 DESC, + c3 DESC) AS row_n +FROM + ( + SELECT + t1.c1 as account_id, + t1.c2 as c2, + t1.c3 as c3, + t1.c4 as c4, + t1.c5 as c5, + ROW_NUMBER () OVER (PARTITION BY t1.c2, + t1.c3, + t1.c4 + ORDER BY + t1.c5 DESC) AS rn + FROM + table_test AS t1 + GROUP BY + t1.c1, + t1.c2, + t1.c3, + t1.c4, + t1.c5 + ORDER BY + t1.c2, + t1.c4, + t1.c5 +) +WHERE + rn = 1 +ORDER BY + c2, + c4, + c5, + c3 +FORMAT Null; + +DROP TABLE table_test; diff --git a/parser/testdata/03625_auto_statistics/ast.json b/parser/testdata/03625_auto_statistics/ast.json new file mode 100644 index 000000000..6f3b25367 --- /dev/null +++ b/parser/testdata/03625_auto_statistics/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_table (children 1)" + }, + { + "explain": " Identifier test_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001591269, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/03625_auto_statistics/metadata.json b/parser/testdata/03625_auto_statistics/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03625_auto_statistics/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03625_auto_statistics/query.sql b/parser/testdata/03625_auto_statistics/query.sql new file mode 100644 index 000000000..2b500a0b2 --- /dev/null +++ b/parser/testdata/03625_auto_statistics/query.sql @@ -0,0 +1,53 @@ +DROP TABLE IF EXISTS test_table; +SET allow_experimental_statistics = 1; + +CREATE TABLE test_table +( + id UInt64, + v1 String STATISTICS(uniq), + v2 UInt64 STATISTICS(tdigest), + v3 String +) +ENGINE = MergeTree +ORDER BY id +SETTINGS + enable_block_number_column = 0, + enable_block_offset_column = 0, + auto_statistics_types = 'uniq,minmax'; + +SYSTEM STOP MERGES test_table; + +INSERT INTO test_table SELECT number, if (rand() % 100 = 0, 'foo', ''), rand() % 2, rand() % 2 FROM numbers(100000); +INSERT INTO test_table SELECT number, if (rand() % 100 = 0, 'bar', ''), rand() % 2 + 5, rand() % 2 + 5 FROM numbers(100000); + +SELECT name, column, type, statistics, estimates.cardinality, estimates.min, estimates.max +FROM system.parts_columns +WHERE database = currentDatabase() AND table = 'test_table' AND active +ORDER BY name, column; + +SELECT count() FROM test_table WHERE NOT ignore(*); +SELECT uniqExact(v1), uniqExact(v2), uniqExact(v3) FROM test_table WHERE NOT ignore(*); + +SYSTEM START MERGES test_table; +OPTIMIZE TABLE test_table FINAL; + +SELECT name, column, type, statistics, estimates.cardinality, estimates.min, estimates.max +FROM system.parts_columns +WHERE database = currentDatabase() AND table = 'test_table' AND active +ORDER BY name, column; + +SELECT count() FROM test_table WHERE NOT ignore(*); +SELECT uniqExact(v1), uniqExact(v2), uniqExact(v3) FROM test_table WHERE NOT ignore(*); + +DETACH TABLE test_table; +ATTACH TABLE test_table; + +SELECT name, column, type, statistics, estimates.cardinality, estimates.min, estimates.max +FROM system.parts_columns +WHERE database = currentDatabase() AND table = 'test_table' AND active +ORDER BY name, column; + +SELECT count() FROM test_table WHERE NOT ignore(*); +SELECT uniqExact(v1), uniqExact(v2), uniqExact(v3) FROM test_table WHERE NOT ignore(*); + +DROP TABLE IF EXISTS test_table; diff --git a/parser/testdata/03625_auto_statistics_alter/ast.json b/parser/testdata/03625_auto_statistics_alter/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03625_auto_statistics_alter/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03625_auto_statistics_alter/metadata.json b/parser/testdata/03625_auto_statistics_alter/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03625_auto_statistics_alter/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03625_auto_statistics_alter/query.sql b/parser/testdata/03625_auto_statistics_alter/query.sql new file mode 100644 index 000000000..3c5b864df --- /dev/null +++ b/parser/testdata/03625_auto_statistics_alter/query.sql @@ -0,0 +1,63 @@ +-- Tags: no-fasttest +-- no-fasttest: 'countmin' sketches need a 3rd party library + +SET mutations_sync = 2; +SET allow_experimental_statistics = 1; +DROP TABLE IF EXISTS t_alter_auto_statistics; + +CREATE TABLE t_alter_auto_statistics +( + a UInt64, + b UInt64 STATISTICS (minmax), + c String +) +ENGINE = MergeTree ORDER BY a SETTINGS auto_statistics_types = ''; + +INSERT INTO t_alter_auto_statistics VALUES (1, 1, 'xxx'); + +SELECT 'no auto statistics'; + +SELECT column, type, statistics, estimates.cardinality, estimates.min, estimates.max +FROM system.parts_columns +WHERE table = 't_alter_auto_statistics' AND database = currentDatabase() AND active = 1 +ORDER BY name, column; + +ALTER TABLE t_alter_auto_statistics MODIFY SETTING auto_statistics_types = 'minmax, uniq, tdigest'; +ALTER TABLE t_alter_auto_statistics MATERIALIZE STATISTICS ALL; + +SELECT 'materialized minmax, uniq, tdigest'; + +SELECT column, type, statistics, estimates.cardinality, estimates.min, estimates.max +FROM system.parts_columns +WHERE table = 't_alter_auto_statistics' AND database = currentDatabase() AND active = 1 +ORDER BY name, column; + +ALTER TABLE t_alter_auto_statistics MODIFY SETTING auto_statistics_types = 'minmax, uniq, countmin'; +INSERT INTO t_alter_auto_statistics VALUES (2, 2, 'yyy'); + +SELECT 'added minmax, uniq, countmin'; + +SELECT column, type, statistics, estimates.cardinality, estimates.min, estimates.max +FROM system.parts_columns +WHERE table = 't_alter_auto_statistics' AND database = currentDatabase() AND active = 1 +ORDER BY name, column; + +ALTER TABLE t_alter_auto_statistics MATERIALIZE STATISTICS ALL; + +SELECT 'materialized minmax, uniq, countmin'; + +SELECT column, type, statistics, estimates.cardinality, estimates.min, estimates.max +FROM system.parts_columns +WHERE table = 't_alter_auto_statistics' AND database = currentDatabase() AND active = 1 +ORDER BY name, column; + +ALTER TABLE t_alter_auto_statistics CLEAR STATISTICS ALL; + +SELECT 'cleared statistics'; + +SELECT column, type, statistics, estimates.cardinality, estimates.min, estimates.max +FROM system.parts_columns +WHERE table = 't_alter_auto_statistics' AND database = currentDatabase() AND active = 1 +ORDER BY name, column; + +DROP TABLE IF EXISTS t_alter_auto_statistics; diff --git a/parser/testdata/03625_auto_statistics_alter_rmt/ast.json b/parser/testdata/03625_auto_statistics_alter_rmt/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03625_auto_statistics_alter_rmt/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03625_auto_statistics_alter_rmt/metadata.json b/parser/testdata/03625_auto_statistics_alter_rmt/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03625_auto_statistics_alter_rmt/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03625_auto_statistics_alter_rmt/query.sql b/parser/testdata/03625_auto_statistics_alter_rmt/query.sql new file mode 100644 index 000000000..685abaa78 --- /dev/null +++ b/parser/testdata/03625_auto_statistics_alter_rmt/query.sql @@ -0,0 +1,63 @@ +-- Tags: no-fasttest +-- no-fasttest: 'countmin' sketches need a 3rd party library + +SET mutations_sync = 2; +SET allow_experimental_statistics = 1; +DROP TABLE IF EXISTS t_alter_auto_statistics SYNC; + +CREATE TABLE t_alter_auto_statistics +( + a UInt64, + b UInt64 STATISTICS (minmax), + c String +) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/t_alter_auto_statistics', '1') ORDER BY a SETTINGS auto_statistics_types = ''; + +INSERT INTO t_alter_auto_statistics VALUES (1, 1, 'xxx'); + +SELECT 'no auto statistics'; + +SELECT column, type, statistics, estimates.cardinality, estimates.min, estimates.max +FROM system.parts_columns +WHERE table = 't_alter_auto_statistics' AND database = currentDatabase() AND active = 1 +ORDER BY name, column; + +ALTER TABLE t_alter_auto_statistics MODIFY SETTING auto_statistics_types = 'minmax, uniq, tdigest'; +ALTER TABLE t_alter_auto_statistics MATERIALIZE STATISTICS ALL; + +SELECT 'materialized minmax, uniq, tdigest'; + +SELECT column, type, statistics, estimates.cardinality, estimates.min, estimates.max +FROM system.parts_columns +WHERE table = 't_alter_auto_statistics' AND database = currentDatabase() AND active = 1 +ORDER BY name, column; + +ALTER TABLE t_alter_auto_statistics MODIFY SETTING auto_statistics_types = 'minmax, uniq, countmin'; +INSERT INTO t_alter_auto_statistics VALUES (2, 2, 'yyy'); + +SELECT 'added minmax, uniq, countmin'; + +SELECT column, type, statistics, estimates.cardinality, estimates.min, estimates.max +FROM system.parts_columns +WHERE table = 't_alter_auto_statistics' AND database = currentDatabase() AND active = 1 +ORDER BY name, column; + +ALTER TABLE t_alter_auto_statistics MATERIALIZE STATISTICS ALL; + +SELECT 'materialized minmax, uniq, countmin'; + +SELECT column, type, statistics, estimates.cardinality, estimates.min, estimates.max +FROM system.parts_columns +WHERE table = 't_alter_auto_statistics' AND database = currentDatabase() AND active = 1 +ORDER BY name, column; + +ALTER TABLE t_alter_auto_statistics CLEAR STATISTICS ALL; + +SELECT 'cleared statistics'; + +SELECT column, type, statistics, estimates.cardinality, estimates.min, estimates.max +FROM system.parts_columns +WHERE table = 't_alter_auto_statistics' AND database = currentDatabase() AND active = 1 +ORDER BY name, column; + +DROP TABLE IF EXISTS t_alter_auto_statistics SYNC; diff --git a/parser/testdata/03625_auto_statistics_rmt/ast.json b/parser/testdata/03625_auto_statistics_rmt/ast.json new file mode 100644 index 000000000..c79a74740 --- /dev/null +++ b/parser/testdata/03625_auto_statistics_rmt/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_table (children 1)" + }, + { + "explain": " Identifier test_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001275626, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/03625_auto_statistics_rmt/metadata.json b/parser/testdata/03625_auto_statistics_rmt/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03625_auto_statistics_rmt/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03625_auto_statistics_rmt/query.sql b/parser/testdata/03625_auto_statistics_rmt/query.sql new file mode 100644 index 000000000..5d59f7153 --- /dev/null +++ b/parser/testdata/03625_auto_statistics_rmt/query.sql @@ -0,0 +1,54 @@ +DROP TABLE IF EXISTS test_table; +SET allow_experimental_statistics = 1; +SET insert_keeper_fault_injection_probability = 0.0; + +CREATE TABLE test_table +( + id UInt64, + v1 String STATISTICS(uniq), + v2 UInt64 STATISTICS(tdigest), + v3 String +) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_table', '1') +ORDER BY id +SETTINGS + enable_block_number_column = 0, + enable_block_offset_column = 0, + auto_statistics_types = 'uniq,minmax'; + +SYSTEM STOP MERGES test_table; + +INSERT INTO test_table SELECT number, if (rand() % 100 = 0, 'foo', ''), rand() % 2, rand() % 2 FROM numbers(100000); +INSERT INTO test_table SELECT number, if (rand() % 100 = 0, 'bar', ''), rand() % 2 + 5, rand() % 2 + 5 FROM numbers(100000); + +SELECT name, column, type, statistics, estimates.cardinality, estimates.min, estimates.max +FROM system.parts_columns +WHERE database = currentDatabase() AND table = 'test_table' AND active +ORDER BY name, column; + +SELECT count() FROM test_table WHERE NOT ignore(*); +SELECT uniqExact(v1), uniqExact(v2), uniqExact(v3) FROM test_table WHERE NOT ignore(*); + +SYSTEM START MERGES test_table; +OPTIMIZE TABLE test_table FINAL; + +SELECT name, column, type, statistics, estimates.cardinality, estimates.min, estimates.max +FROM system.parts_columns +WHERE database = currentDatabase() AND table = 'test_table' AND active +ORDER BY name, column; + +SELECT count() FROM test_table WHERE NOT ignore(*); +SELECT uniqExact(v1), uniqExact(v2), uniqExact(v3) FROM test_table WHERE NOT ignore(*); + +DETACH TABLE test_table; +ATTACH TABLE test_table; + +SELECT name, column, type, statistics, estimates.cardinality, estimates.min, estimates.max +FROM system.parts_columns +WHERE database = currentDatabase() AND table = 'test_table' AND active +ORDER BY name, column; + +SELECT count() FROM test_table WHERE NOT ignore(*); +SELECT uniqExact(v1), uniqExact(v2), uniqExact(v3) FROM test_table WHERE NOT ignore(*); + +DROP TABLE IF EXISTS test_table; diff --git a/parser/testdata/03625_auto_statistics_validation/ast.json b/parser/testdata/03625_auto_statistics_validation/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03625_auto_statistics_validation/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03625_auto_statistics_validation/metadata.json b/parser/testdata/03625_auto_statistics_validation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03625_auto_statistics_validation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03625_auto_statistics_validation/query.sql b/parser/testdata/03625_auto_statistics_validation/query.sql new file mode 100644 index 000000000..7c394c8fb --- /dev/null +++ b/parser/testdata/03625_auto_statistics_validation/query.sql @@ -0,0 +1,21 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS t_auto_statistics_validation; + +CREATE TABLE t_auto_statistics_validation (x UInt64) ENGINE = MergeTree ORDER BY x SETTINGS auto_statistics_types = 'nonexisting'; -- { serverError INCORRECT_QUERY } +CREATE TABLE t_auto_statistics_validation (x UInt64) ENGINE = MergeTree ORDER BY x SETTINGS auto_statistics_types = 'minmax; countmin'; -- { serverError SYNTAX_ERROR } +CREATE TABLE t_auto_statistics_validation (x UInt64) ENGINE = MergeTree ORDER BY x SETTINGS auto_statistics_types = 'minmax, nonexisting, countmin'; -- { serverError INCORRECT_QUERY } + +CREATE TABLE t_auto_statistics_validation (x UInt64) ENGINE = MergeTree ORDER BY x SETTINGS auto_statistics_types = ''; DROP TABLE t_auto_statistics_validation; +CREATE TABLE t_auto_statistics_validation (x UInt64) ENGINE = MergeTree ORDER BY x SETTINGS auto_statistics_types = 'minmax, countmin, uniq'; DROP TABLE t_auto_statistics_validation; + +CREATE TABLE t_auto_statistics_validation (x UInt64) ENGINE = MergeTree ORDER BY x; + +ALTER TABLE t_auto_statistics_validation MODIFY SETTING auto_statistics_types = 'nonexisting'; -- { serverError INCORRECT_QUERY } +ALTER TABLE t_auto_statistics_validation MODIFY SETTING auto_statistics_types = 'minmax; countmin'; -- { serverError SYNTAX_ERROR } +ALTER TABLE t_auto_statistics_validation MODIFY SETTING auto_statistics_types = 'minmax, nonexisting, countmin'; -- { serverError INCORRECT_QUERY } + +ALTER TABLE t_auto_statistics_validation MODIFY SETTING auto_statistics_types = ''; +ALTER TABLE t_auto_statistics_validation MODIFY SETTING auto_statistics_types = 'minmax, countmin, uniq'; + +DROP TABLE t_auto_statistics_validation; diff --git a/parser/testdata/03625_case_without_condition_non_constant_branches/ast.json b/parser/testdata/03625_case_without_condition_non_constant_branches/ast.json new file mode 100644 index 000000000..643abf41e --- /dev/null +++ b/parser/testdata/03625_case_without_condition_non_constant_branches/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier CASE (alias number)" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001275166, + "rows_read": 5, + "bytes_read": 191 + } +} diff --git a/parser/testdata/03625_case_without_condition_non_constant_branches/metadata.json b/parser/testdata/03625_case_without_condition_non_constant_branches/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03625_case_without_condition_non_constant_branches/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03625_case_without_condition_non_constant_branches/query.sql b/parser/testdata/03625_case_without_condition_non_constant_branches/query.sql new file mode 100644 index 000000000..d4cfdc9fd --- /dev/null +++ b/parser/testdata/03625_case_without_condition_non_constant_branches/query.sql @@ -0,0 +1,5 @@ +SELECT CASE number + WHEN number * 2 - 4 THEN 'Hello' + WHEN number * 3 - 6 THEN 'world' + ELSE '' END +FROM numbers(10); diff --git a/parser/testdata/03625_prewhere-and-default-bug/ast.json b/parser/testdata/03625_prewhere-and-default-bug/ast.json new file mode 100644 index 000000000..9708f8130 --- /dev/null +++ b/parser/testdata/03625_prewhere-and-default-bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tab (children 1)" + }, + { + "explain": " Identifier tab" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001332708, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/03625_prewhere-and-default-bug/metadata.json b/parser/testdata/03625_prewhere-and-default-bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03625_prewhere-and-default-bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03625_prewhere-and-default-bug/query.sql b/parser/testdata/03625_prewhere-and-default-bug/query.sql new file mode 100644 index 000000000..92d1abb7d --- /dev/null +++ b/parser/testdata/03625_prewhere-and-default-bug/query.sql @@ -0,0 +1,7 @@ +drop table if exists tab; +create table tab (d DateTime64(3), p LowCardinality(String)) engine = MergeTree order by toDate(d); +insert into tab select toDateTime(toDate('2000-01-01')) + number, if(bitAnd(number, 1) = 0, 'a', 'b') from numbers(100); + +alter table tab add column t String default '' settings alter_sync = 2; + +select 1 from tab where d > toDateTime(toDate('2000-01-01')) and p in ('a') and 1 = 1 group by d, t, p; diff --git a/parser/testdata/03625_upper_lower_utf8_different_number_of_code_points/ast.json b/parser/testdata/03625_upper_lower_utf8_different_number_of_code_points/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03625_upper_lower_utf8_different_number_of_code_points/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03625_upper_lower_utf8_different_number_of_code_points/metadata.json b/parser/testdata/03625_upper_lower_utf8_different_number_of_code_points/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03625_upper_lower_utf8_different_number_of_code_points/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03625_upper_lower_utf8_different_number_of_code_points/query.sql b/parser/testdata/03625_upper_lower_utf8_different_number_of_code_points/query.sql new file mode 100644 index 000000000..67349af10 --- /dev/null +++ b/parser/testdata/03625_upper_lower_utf8_different_number_of_code_points/query.sql @@ -0,0 +1,4 @@ +-- Tags: no-fasttest +-- as it requires linking with ICU + +SELECT upperUTF8('ff'); diff --git a/parser/testdata/03626_case_function_with_dynamic_argument/ast.json b/parser/testdata/03626_case_function_with_dynamic_argument/ast.json new file mode 100644 index 000000000..d7105d945 --- /dev/null +++ b/parser/testdata/03626_case_function_with_dynamic_argument/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t0 (children 1)" + }, + { + "explain": " Identifier t0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001387591, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03626_case_function_with_dynamic_argument/metadata.json b/parser/testdata/03626_case_function_with_dynamic_argument/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03626_case_function_with_dynamic_argument/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03626_case_function_with_dynamic_argument/query.sql b/parser/testdata/03626_case_function_with_dynamic_argument/query.sql new file mode 100644 index 000000000..78f5e551c --- /dev/null +++ b/parser/testdata/03626_case_function_with_dynamic_argument/query.sql @@ -0,0 +1,5 @@ +DROP TABLE IF EXISTS t0; +CREATE TABLE t0 (c0 Dynamic) ENGINE = Memory; +INSERT INTO TABLE t0 (c0) VALUES (1), (1.2); +SELECT CASE 1 WHEN c0 THEN 1 END FROM t0; +DROP TABLE t0; diff --git a/parser/testdata/03627_non_constant_replacement_in_replace_regexp/ast.json b/parser/testdata/03627_non_constant_replacement_in_replace_regexp/ast.json new file mode 100644 index 000000000..4c5b1a288 --- /dev/null +++ b/parser/testdata/03627_non_constant_replacement_in_replace_regexp/ast.json @@ -0,0 +1,103 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function replaceRegexpAll (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal '1 2 3 123 5 100'" + }, + { + "explain": " Literal '\\\\d+'" + }, + { + "explain": " Function if (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function modulo (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Function toString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function repeat (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '\\\\0'" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + } + ], + + "rows": 27, + + "statistics": + { + "elapsed": 0.001302538, + "rows_read": 27, + "bytes_read": 1075 + } +} diff --git a/parser/testdata/03627_non_constant_replacement_in_replace_regexp/metadata.json b/parser/testdata/03627_non_constant_replacement_in_replace_regexp/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03627_non_constant_replacement_in_replace_regexp/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03627_non_constant_replacement_in_replace_regexp/query.sql b/parser/testdata/03627_non_constant_replacement_in_replace_regexp/query.sql new file mode 100644 index 000000000..d7b5084d5 --- /dev/null +++ b/parser/testdata/03627_non_constant_replacement_in_replace_regexp/query.sql @@ -0,0 +1 @@ +SELECT replaceRegexpAll('1 2 3 123 5 100', '\\d+', number % 2 ? toString(number) : repeat('\\0', number)) FROM numbers(10); diff --git a/parser/testdata/03628_named_tuple_element_in_order_by_key/ast.json b/parser/testdata/03628_named_tuple_element_in_order_by_key/ast.json new file mode 100644 index 000000000..69d5408c2 --- /dev/null +++ b/parser/testdata/03628_named_tuple_element_in_order_by_key/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001500452, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/03628_named_tuple_element_in_order_by_key/metadata.json b/parser/testdata/03628_named_tuple_element_in_order_by_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03628_named_tuple_element_in_order_by_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03628_named_tuple_element_in_order_by_key/query.sql b/parser/testdata/03628_named_tuple_element_in_order_by_key/query.sql new file mode 100644 index 000000000..5e8a12f3d --- /dev/null +++ b/parser/testdata/03628_named_tuple_element_in_order_by_key/query.sql @@ -0,0 +1,33 @@ +DROP TABLE IF EXISTS test; + +CREATE TABLE test +( + x Tuple(a UInt64, b String) +) +ENGINE = MergeTree +ORDER BY x.b +SETTINGS index_granularity = 1; + +INSERT INTO test VALUES ((1, 'hello')), ((2, 'World')); + +SELECT * FROM test ORDER BY x; +SELECT * FROM test ORDER BY x.a; +SELECT * FROM test ORDER BY x.b; +SELECT * FROM test WHERE x.a = 2; +-- Set `parallel_replicas_index_analysis_only_on_coordinator = 0` to prevent remote replicas from skipping index analysis in Parallel Replicas. +-- Otherwise, they may return full ranges and trigger max_rows_to_read validation failures. +SELECT * FROM test WHERE x.b = 'World' SETTINGS max_rows_to_read = 1, parallel_replicas_index_analysis_only_on_coordinator = 0; + +SELECT x.a FROM test ORDER BY x; +SELECT x.a FROM test ORDER BY x.a; +SELECT x.a FROM test ORDER BY x.b; +SELECT x.a FROM test WHERE x.a = 2; +SELECT x.a FROM test WHERE x.b = 'World' SETTINGS max_rows_to_read = 1, parallel_replicas_index_analysis_only_on_coordinator = 0; + +SELECT x.b FROM test ORDER BY x; +SELECT x.b FROM test ORDER BY x.a; +SELECT x.b FROM test ORDER BY x.b; +SELECT x.b FROM test WHERE x.a = 2; +SELECT x.b FROM test WHERE x.b = 'World' SETTINGS max_rows_to_read = 1, parallel_replicas_index_analysis_only_on_coordinator = 0; + +DROP TABLE test; diff --git a/parser/testdata/03628_parse_date_time_short_circuit/ast.json b/parser/testdata/03628_parse_date_time_short_circuit/ast.json new file mode 100644 index 000000000..38c1c6c6e --- /dev/null +++ b/parser/testdata/03628_parse_date_time_short_circuit/ast.json @@ -0,0 +1,100 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function if (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Function greater (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Function parseDateTime (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal ''" + }, + { + "explain": " Literal '%d-%m-%Y'" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '2020-01-01'" + }, + { + "explain": " Literal 'DateTime'" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2" + } + ], + + "rows": 26, + + "statistics": + { + "elapsed": 0.001717112, + "rows_read": 26, + "bytes_read": 1026 + } +} diff --git a/parser/testdata/03628_parse_date_time_short_circuit/metadata.json b/parser/testdata/03628_parse_date_time_short_circuit/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03628_parse_date_time_short_circuit/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03628_parse_date_time_short_circuit/query.sql b/parser/testdata/03628_parse_date_time_short_circuit/query.sql new file mode 100644 index 000000000..63532a937 --- /dev/null +++ b/parser/testdata/03628_parse_date_time_short_circuit/query.sql @@ -0,0 +1,2 @@ +select if(number > 10, parseDateTime(materialize(''), '%d-%m-%Y'), '2020-01-01'::DateTime) from numbers(2); + diff --git a/parser/testdata/03628_subcolumns_of_columns_with_dot_in_name/ast.json b/parser/testdata/03628_subcolumns_of_columns_with_dot_in_name/ast.json new file mode 100644 index 000000000..ca69111ed --- /dev/null +++ b/parser/testdata/03628_subcolumns_of_columns_with_dot_in_name/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001221649, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/03628_subcolumns_of_columns_with_dot_in_name/metadata.json b/parser/testdata/03628_subcolumns_of_columns_with_dot_in_name/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03628_subcolumns_of_columns_with_dot_in_name/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03628_subcolumns_of_columns_with_dot_in_name/query.sql b/parser/testdata/03628_subcolumns_of_columns_with_dot_in_name/query.sql new file mode 100644 index 000000000..d4564159b --- /dev/null +++ b/parser/testdata/03628_subcolumns_of_columns_with_dot_in_name/query.sql @@ -0,0 +1,57 @@ +drop table if exists test; +create table test (`my.json` JSON) engine=Memory; +insert into test select '{"a" : 42}'; +select my.json.a from test settings enable_analyzer=1; +select `my.json`.a from test settings enable_analyzer=1; +select my.json.a from test settings enable_analyzer=0; +select `my.json`.a from test settings enable_analyzer=0; +drop table test; + +select `t.t`.a from format(JSONEachRow, '`t.t` Tuple(a UInt32)', '{"t.t" : {"a" : 42}}'); + +create table test +( + `my.json` JSON(a UInt32), + a1 UInt32 materialized my.json.a, + a2 UInt32 default my.json.a, + b1 UInt32 materialized my.json.b, + b2 UInt32 default my.json.b, + index idx1 my.json.a type minmax, + index idx2 my.json.b::Int64 type minmax, + projection prj1 (select my.json, my.json.a, my.json.b order by my.json.a, my.json.b::Int32) +) engine=MergeTree order by (my.json.a, my.json.b::Int32, my.json.a + 42, my.json.b::Int32 + 42); +insert into test (my.json) select '{"a" : 42, "b" : 42}'; +select * from test; +select * from test order by my.json.a; +select * from test order by my.json.b::Int32; +insert into test (my.json) select '{"a" : 43, "b" : 43}'; +optimize table test final; +select * from test; +select * from test order by my.json.a; +select * from test order by my.json.b::Int32; + +alter table test modify column my.json JSON(a UInt32, b UInt32); -- {serverError ALTER_OF_COLUMN_IS_FORBIDDEN} +alter table test update `my.json` = '{}' where 1; -- {serverError CANNOT_UPDATE_COLUMN} + +drop table test; + +create table test +( + `my.tuple` Tuple(a UInt32), + a1 UInt32 materialized my.tuple.a, + a2 UInt32 default my.tuple.a, + index idx1 my.tuple.a type minmax, + projection prj1 (select my.tuple, my.tuple.a order by my.tuple.a) +) engine=MergeTree order by (my.tuple.a, my.tuple.a + 42); +insert into test (my.tuple) select tuple(42); +select * from test; +select * from test order by my.tuple.a; +insert into test (my.tuple) select tuple(43); +optimize table test final; +select * from test; +select * from test order by my.tuple.a; + +alter table test modify column my.tuple Tuple(a UInt32, b UInt32); -- {serverError ALTER_OF_COLUMN_IS_FORBIDDEN} +alter table test update `my.tuple` = tuple(0, 0) where 1; -- {serverError CANNOT_UPDATE_COLUMN} + +drop table test; \ No newline at end of file diff --git a/parser/testdata/03629_duplicate_partition_keys_crash/ast.json b/parser/testdata/03629_duplicate_partition_keys_crash/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03629_duplicate_partition_keys_crash/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03629_duplicate_partition_keys_crash/metadata.json b/parser/testdata/03629_duplicate_partition_keys_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03629_duplicate_partition_keys_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03629_duplicate_partition_keys_crash/query.sql b/parser/testdata/03629_duplicate_partition_keys_crash/query.sql new file mode 100644 index 000000000..17982763d --- /dev/null +++ b/parser/testdata/03629_duplicate_partition_keys_crash/query.sql @@ -0,0 +1,128 @@ +-- Test for issue #86540: Out of bounds access with duplicate partition keys +-- This test verifies that duplicate partition field names don't cause crashes + +DROP TABLE IF EXISTS test_duplicate_partition_keys; + +-- Test case 1: Duplicate column names in partition expression +CREATE TABLE test_duplicate_partition_keys ( + c0 String, + c1 Int32 +) +ENGINE = MergeTree() +PARTITION BY (c1, c1, sipHash64(c0)) +ORDER BY c0; + +-- Insert some test data +INSERT INTO test_duplicate_partition_keys VALUES ('test1', 1), ('test2', 2), ('test3', 1); + +-- This SELECT should not crash (was causing segfault before fix) +SELECT count() FROM test_duplicate_partition_keys WHERE c1 = 1; + +-- Test the SELECT with different conditions +SELECT c0, c1 FROM test_duplicate_partition_keys WHERE c1 = 2 ORDER BY c0; + +DROP TABLE test_duplicate_partition_keys; + +-- Test case 2: More complex duplicate partition expression +CREATE TABLE test_duplicate_partition_keys2 ( + a String, + b Int32, + c Int32 +) +ENGINE = MergeTree() +PARTITION BY (b, c, b, sipHash64(a)) +ORDER BY a; + +INSERT INTO test_duplicate_partition_keys2 VALUES ('x', 10, 20), ('y', 10, 30); + +-- This should also work without crashing +SELECT count() FROM test_duplicate_partition_keys2 WHERE b = 10; + +DROP TABLE test_duplicate_partition_keys2; + +-- Test case 3: Simple table with duplicate keys in different positions +CREATE TABLE test_triple_duplicate ( + x UInt32, + y String +) +ENGINE = MergeTree() +PARTITION BY (x, x, x) +ORDER BY y; + +INSERT INTO test_triple_duplicate VALUES (1, 'a'), (2, 'b'), (1, 'c'); + +-- Test SELECT with triple duplicate partition keys +SELECT count() FROM test_triple_duplicate WHERE x = 1; + +DROP TABLE test_triple_duplicate; + +-- Test case 4: Mixed expression duplicates with date functions +CREATE TABLE test_mixed_duplicates ( + id Int32, + name String, + create_date Date +) +ENGINE = MergeTree() +PARTITION BY (id, toYYYYMM(create_date), id, sipHash64(name)) +ORDER BY name; + +INSERT INTO test_mixed_duplicates VALUES (1, 'test1', '2024-01-01'), (2, 'test2', '2024-02-01'), (1, 'test3', '2024-01-01'); +SELECT count() FROM test_mixed_duplicates WHERE id = 1; + +DROP TABLE test_mixed_duplicates; + +-- Test case 5: Different data types with duplicates +CREATE TABLE test_type_duplicates ( + uint_col UInt32, + int_col Int64, + str_col String +) +ENGINE = MergeTree() +PARTITION BY (uint_col, int_col, uint_col, str_col, uint_col) +ORDER BY str_col; + +INSERT INTO test_type_duplicates VALUES (100, -200, 'abc'), (200, -400, 'def'), (100, -200, 'xyz'); +SELECT count() FROM test_type_duplicates WHERE uint_col = 100; + +DROP TABLE test_type_duplicates; + +-- Test case 6: Complex hash function duplicates +CREATE TABLE test_hash_duplicates ( + id UInt64, + data String +) +ENGINE = MergeTree() +PARTITION BY (sipHash64(data), cityHash64(data), sipHash64(data)) +ORDER BY id; + +INSERT INTO test_hash_duplicates VALUES (1, 'sample1'), (2, 'sample2'), (3, 'sample1'); +SELECT count() FROM test_hash_duplicates WHERE data = 'sample1'; + +DROP TABLE test_hash_duplicates; + +-- Test case 7: Extreme case - single field repeated many times +CREATE TABLE test_extreme_repeats ( + x UInt64 +) +ENGINE = MergeTree() +PARTITION BY (x, x, x, x, x, x) +ORDER BY x; + +INSERT INTO test_extreme_repeats VALUES (1), (2), (1), (3); +SELECT count() FROM test_extreme_repeats WHERE x = 1; + +DROP TABLE test_extreme_repeats; + +-- Test case 8: Performance test with moderate data +CREATE TABLE test_performance_duplicates ( + category UInt32, + name String +) +ENGINE = MergeTree() +PARTITION BY (category, category, category) +ORDER BY name; + +INSERT INTO test_performance_duplicates SELECT number % 10, concat('name', toString(number)) FROM numbers(1000); +SELECT count() FROM test_performance_duplicates WHERE category = 5; + +DROP TABLE test_performance_duplicates; \ No newline at end of file diff --git a/parser/testdata/03629_starts_endswith_caseinsensitive/ast.json b/parser/testdata/03629_starts_endswith_caseinsensitive/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03629_starts_endswith_caseinsensitive/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03629_starts_endswith_caseinsensitive/metadata.json b/parser/testdata/03629_starts_endswith_caseinsensitive/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03629_starts_endswith_caseinsensitive/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03629_starts_endswith_caseinsensitive/query.sql b/parser/testdata/03629_starts_endswith_caseinsensitive/query.sql new file mode 100644 index 000000000..8d0c387c0 --- /dev/null +++ b/parser/testdata/03629_starts_endswith_caseinsensitive/query.sql @@ -0,0 +1,84 @@ +-- Test for case-insensitive prefix and suffix matching functions, (starts|endswith)(UTF8)?CaseInsensitive + +SELECT '-- Test startsWithCaseInsensitive ASCII'; +SELECT startsWithCaseInsensitive('Marta', 'mA'), startsWithCaseInsensitive('match_me_not', 'Na'); + +SELECT '-- Test startsWithCaseInsensitive UTF8 Latin'; +SELECT startsWithCaseInsensitive('Bär', 'bä'), startsWithCaseInsensitive('Bär', 'BÄ'); + +SELECT '-- Test startsWithCaseInsensitive UTF8 non-Latin'; +SELECT startsWithCaseInsensitive('中国', '中'), startsWithCaseInsensitive('中国', '国'); + +SELECT '-- Test startsWithCaseInsensitiveUTF8 ASCII'; +SELECT startsWithCaseInsensitiveUTF8('Marta', 'mA'), startsWithCaseInsensitiveUTF8('match_me_not', 'Na'); + +SELECT '-- Test startsWithCaseInsensitiveUTF8 UTF8 Latin'; +SELECT startsWithCaseInsensitiveUTF8('Bär', 'bä'), startsWithCaseInsensitiveUTF8('Bär', 'BÄ'); + +SELECT '-- Test startsWithCaseInsensitiveUTF8 UTF8 non-Latin'; +SELECT startsWithCaseInsensitiveUTF8('中国', '中'), startsWithCaseInsensitiveUTF8('Hello中国', '中'); + +SELECT '-- Test endsWithCaseInsensitive ASCII'; +SELECT endsWithCaseInsensitive('Marta', 'tA'), endsWithCaseInsensitive('match_me_not', 'Na'); + +SELECT '-- Test endsWithCaseInsensitive UTF8 Latin'; +SELECT endsWithCaseInsensitive('Bär', 'äR'), endsWithCaseInsensitive('Bär', 'ÄR'); + +SELECT '-- Test endsWithCaseInsensitive UTF8 non-Latin'; +SELECT endsWithCaseInsensitive('中国', '国'), endsWithCaseInsensitive('中国', '中'); + +SELECT '-- Test endsWithCaseInsensitiveUTF8 ASCII'; +SELECT endsWithCaseInsensitiveUTF8('Marta', 'tA'), endsWithCaseInsensitiveUTF8('match_me_not', 'Na'); + +SELECT '-- Test endsWithCaseInsensitiveUTF8 UTF8 Latin'; +SELECT endsWithCaseInsensitiveUTF8('Bär', 'äR'), endsWithCaseInsensitiveUTF8('Bär', 'ÄR'); + +SELECT '-- Test endsWithCaseInsensitiveUTF8 UTF8 non-Latin'; +SELECT endsWithCaseInsensitiveUTF8('中国', '国'), endsWithCaseInsensitiveUTF8('中国', '中'); + +SELECT '-- Test invalid UTF8'; +SELECT startsWithCaseInsensitive('中国', '\xe4'), startsWithCaseInsensitiveUTF8('中国', '\xe4'); +SELECT endsWithCaseInsensitive('中国', '\xbd'), endsWithCaseInsensitiveUTF8('中国', '\xbd'); + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab(S1 String, S2 String, S3 FixedString(4)) ENGINE=Memory; +INSERT INTO tab values ('1a', 'a', 'AbA'), ('22', 'A', 'ab'), ('中国', '中', '国'); + +SELECT '-- Test constant needle with haystack - prefix'; +SELECT COUNT() FROM tab WHERE startsWithCaseInsensitive(S1, '1'); +SELECT COUNT() FROM tab WHERE startsWithCaseInsensitive(S2, '中'); +SELECT COUNT() FROM tab WHERE startsWithCaseInsensitive(S3, '国'); + +SELECT COUNT() FROM tab WHERE startsWithCaseInsensitiveUTF8(S1, '1'); +SELECT COUNT() FROM tab WHERE startsWithCaseInsensitiveUTF8(S2, '中'); +-- startsWithCaseCaseInsensitiveUTF8 does not support FixedString + +SELECT '-- Test constant needle with haystack - suffix'; +SELECT COUNT() FROM tab WHERE endsWithCaseInsensitive(S1, '2'); +SELECT COUNT() FROM tab WHERE endsWithCaseInsensitive(S2, '中'); +SELECT COUNT() FROM tab WHERE endsWithCaseInsensitive(S3, '国\0'); + +SELECT COUNT() FROM tab WHERE endsWithCaseInsensitiveUTF8(S1, '2'); +SELECT COUNT() FROM tab WHERE endsWithCaseInsensitiveUTF8(S2, '中'); +-- endsWithCaseCaseInsensitiveUTF8 does not support FixedString + +SELECT '-- Test column needle with haystack - prefix'; +SELECT COUNT() FROM tab WHERE startsWithCaseInsensitive(S1, S1); +SELECT COUNT() FROM tab WHERE startsWithCaseInsensitive(S1, S2); +SELECT COUNT() FROM tab WHERE startsWithCaseInsensitive(S2, S3); + +SELECT COUNT() FROM tab WHERE startsWithCaseInsensitiveUTF8(S1, S1); +SELECT COUNT() FROM tab WHERE startsWithCaseInsensitiveUTF8(S1, S2); +-- endsWithCaseCaseInsensitiveUTF8 does not support FixedString + +SELECT '-- Test column needle with haystack - suffix'; +SELECT COUNT() FROM tab WHERE endsWithCaseInsensitive(S1, S1); +SELECT COUNT() FROM tab WHERE endsWithCaseInsensitive(S1, S2); +SELECT COUNT() FROM tab WHERE endsWithCaseInsensitive(S2, S3); + +SELECT COUNT() FROM tab WHERE endsWithCaseInsensitiveUTF8(S1, S1); +SELECT COUNT() FROM tab WHERE endsWithCaseInsensitiveUTF8(S1, S2); +-- endsWithCaseCaseInsensitiveUTF8 does not support FixedString + +DROP TABLE tab; diff --git a/parser/testdata/03629_storage_s3_disallow_index_alter/ast.json b/parser/testdata/03629_storage_s3_disallow_index_alter/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03629_storage_s3_disallow_index_alter/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03629_storage_s3_disallow_index_alter/metadata.json b/parser/testdata/03629_storage_s3_disallow_index_alter/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03629_storage_s3_disallow_index_alter/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03629_storage_s3_disallow_index_alter/query.sql b/parser/testdata/03629_storage_s3_disallow_index_alter/query.sql new file mode 100644 index 000000000..fb0384aa8 --- /dev/null +++ b/parser/testdata/03629_storage_s3_disallow_index_alter/query.sql @@ -0,0 +1,10 @@ +-- Tags: no-fasttest +-- Tag no-fasttest: Depends on S3 +-- Issue: https://github.com/ClickHouse/ClickHouse/issues/87059 + +DROP TABLE IF EXISTS test_03629; +CREATE TABLE test_03629 (a UInt64) ENGINE = S3(s3_conn, filename='test_03629_{_partition_id}', format='Native') PARTITION BY a; +ALTER TABLE test_03629 ADD INDEX a_idx a TYPE set(0); -- { serverError NOT_IMPLEMENTED } +ALTER TABLE test_03629 ADD PROJECTION a_proj (SELECT a + 1 ORDER BY a); -- { serverError NOT_IMPLEMENTED } + +DROP TABLE test_03629; diff --git a/parser/testdata/03630_hash_join_max_block_size/ast.json b/parser/testdata/03630_hash_join_max_block_size/ast.json new file mode 100644 index 000000000..7529eb3bd --- /dev/null +++ b/parser/testdata/03630_hash_join_max_block_size/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery b_customers (children 1)" + }, + { + "explain": " Identifier b_customers" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001753303, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/03630_hash_join_max_block_size/metadata.json b/parser/testdata/03630_hash_join_max_block_size/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03630_hash_join_max_block_size/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03630_hash_join_max_block_size/query.sql b/parser/testdata/03630_hash_join_max_block_size/query.sql new file mode 100644 index 000000000..a2547d0e4 --- /dev/null +++ b/parser/testdata/03630_hash_join_max_block_size/query.sql @@ -0,0 +1,27 @@ +DROP TABLE IF EXISTS b_customers; +CREATE TABLE b_customers (customer_id Int64, first_order_id Int64) ENGINE = MergeTree ORDER BY customer_id; +INSERT INTO b_customers SELECT number, number * 20000000 FROM system.numbers LIMIT 2,100000; -- will work with LIMIT 10000 +INSERT INTO b_customers SELECT number * -1, -1 FROM system.numbers LIMIT 2; -- will work without this line or LIMIT 1 + +DROP TABLE IF EXISTS b_orders; +CREATE TABLE b_orders (order_id Int64, address_id String) ENGINE = MergeTree ORDER BY order_id; +INSERT INTO b_orders SELECT number, 'fake' FROM system.numbers LIMIT 80000; -- will work with LIMIT 70000 +INSERT INTO b_orders SELECT first_order_id, 'fake' FROM b_customers GROUP BY first_order_id; + +DROP TABLE IF EXISTS b_addresses; +CREATE TABLE b_addresses (address_id String) ENGINE = MergeTree ORDER BY address_id; + +set query_plan_join_swap_table = 0; + +select count() from +( + SELECT customers.customer_id AS dim_customers_id + FROM + b_orders AS orders + ANY RIGHT JOIN + b_customers AS customers + ON orders.order_id = customers.first_order_id + ANY LEFT JOIN + b_addresses AS shipping_addresses + ON shipping_addresses.address_id = orders.address_id +); diff --git a/parser/testdata/03630_join_blocks_with_different_constness/ast.json b/parser/testdata/03630_join_blocks_with_different_constness/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03630_join_blocks_with_different_constness/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03630_join_blocks_with_different_constness/metadata.json b/parser/testdata/03630_join_blocks_with_different_constness/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03630_join_blocks_with_different_constness/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03630_join_blocks_with_different_constness/query.sql b/parser/testdata/03630_join_blocks_with_different_constness/query.sql new file mode 100644 index 000000000..8a0133273 --- /dev/null +++ b/parser/testdata/03630_join_blocks_with_different_constness/query.sql @@ -0,0 +1,36 @@ +-- Regression for the case when the JOIN contains const and non-const blocks, which leads to UB: +-- +-- Too large size (18446603496615682040) passed to allocator. It indicates an error +WITH + input_1 AS (SELECT number::String AS parent_id, number::String as id, number::String as value FROM numbers_mt(1e6)), + dimensions_1 AS (SELECT number::String AS value_id FROM numbers_mt(1e6)), + dimensions_2 AS (SELECT number::String AS value_id FROM numbers_mt(1e6)), + parents AS + ( + SELECT 'foo' AS type, parent_id + FROM input_1 + GROUP BY parent_id + ), + parents_with_value AS + ( + SELECT type, parent_id, t.value + FROM parents + LEFT JOIN input_1 AS t ON t.id = parents.parent_id + ), + values AS + ( + SELECT 'foo' AS type, '' AS parent_id, value + FROM input_1 + ), + all AS + ( + SELECT * FROM parents_with_value + UNION ALL + SELECT * FROM values + ) +SELECT type, value +FROM all +INNER JOIN dimensions_1 AS dim1 ON all.value = dim1.value_id +INNER JOIN dimensions_2 AS dim2 ON all.value = dim2.value_id +FORMAT `Null` +SETTINGS max_block_size=65535, max_joined_block_size_rows=65535, max_threads=32; diff --git a/parser/testdata/03630_parquet_bool_bug/ast.json b/parser/testdata/03630_parquet_bool_bug/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03630_parquet_bool_bug/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03630_parquet_bool_bug/metadata.json b/parser/testdata/03630_parquet_bool_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03630_parquet_bool_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03630_parquet_bool_bug/query.sql b/parser/testdata/03630_parquet_bool_bug/query.sql new file mode 100644 index 000000000..e80f1dc30 --- /dev/null +++ b/parser/testdata/03630_parquet_bool_bug/query.sql @@ -0,0 +1,8 @@ +-- Tags: no-parallel, no-fasttest + +insert into function file('03630_parquet_bool_bug.parquet', Parquet, 'tags Array(Bool)') settings engine_file_truncate_on_insert=1 values ([false,false,false,false,false,false,false,false]), ([true,true,true,true,true,true,true,true]); +select sum(tags) from file('03630_parquet_bool_bug.parquet') array join tags settings input_format_parquet_use_native_reader_v3=1; + +-- Try all 256 1-byte masks to verify the bit shifting nonsense in PlainBooleanDecoder. +insert into function file('03630_parquet_bool_bug.parquet') select number as n, arrayMap(i -> toBool(bitShiftRight(number, i) % 2 = 1), range(8)) as bits from numbers(256) settings engine_file_truncate_on_insert=1; +select sum(n = arraySum(arrayMap(i -> bitShiftLeft(bits[i+1], i), range(8)))) as ok from file('03630_parquet_bool_bug.parquet') settings input_format_parquet_use_native_reader_v3=1, schema_inference_make_columns_nullable=0; diff --git a/parser/testdata/03631_array_of_empty_tuples/ast.json b/parser/testdata/03631_array_of_empty_tuples/ast.json new file mode 100644 index 000000000..29d27f1ad --- /dev/null +++ b/parser/testdata/03631_array_of_empty_tuples/ast.json @@ -0,0 +1,70 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2" + } + ], + + "rows": 16, + + "statistics": + { + "elapsed": 0.001685228, + "rows_read": 16, + "bytes_read": 620 + } +} diff --git a/parser/testdata/03631_array_of_empty_tuples/metadata.json b/parser/testdata/03631_array_of_empty_tuples/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03631_array_of_empty_tuples/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03631_array_of_empty_tuples/query.sql b/parser/testdata/03631_array_of_empty_tuples/query.sql new file mode 100644 index 000000000..c1cd37ebc --- /dev/null +++ b/parser/testdata/03631_array_of_empty_tuples/query.sql @@ -0,0 +1,2 @@ +select [(), ()] from numbers(2); + diff --git a/parser/testdata/03631_hive_columns_not_in_format_header/ast.json b/parser/testdata/03631_hive_columns_not_in_format_header/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03631_hive_columns_not_in_format_header/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03631_hive_columns_not_in_format_header/metadata.json b/parser/testdata/03631_hive_columns_not_in_format_header/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03631_hive_columns_not_in_format_header/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03631_hive_columns_not_in_format_header/query.sql b/parser/testdata/03631_hive_columns_not_in_format_header/query.sql new file mode 100644 index 000000000..895f7aa4d --- /dev/null +++ b/parser/testdata/03631_hive_columns_not_in_format_header/query.sql @@ -0,0 +1,13 @@ +-- Tags: no-parallel, no-fasttest, no-random-settings + +INSERT INTO FUNCTION s3( + s3_conn, + filename='03631', + format=Parquet, + partition_strategy='hive', + partition_columns_in_data_file=1) PARTITION BY (year, country) SELECT 'Brazil' as country, 2025 as year, 1 as id; + +-- distinct because minio isn't cleaned up +SELECT count(distinct year) FROM s3(s3_conn, filename='03631/**.parquet', format=RawBLOB) SETTINGS use_hive_partitioning=1; + +DESCRIBE s3(s3_conn, filename='03631/**.parquet', format=RawBLOB) SETTINGS use_hive_partitioning=1; diff --git a/parser/testdata/03631_select_replace_comprehensive/ast.json b/parser/testdata/03631_select_replace_comprehensive/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03631_select_replace_comprehensive/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03631_select_replace_comprehensive/metadata.json b/parser/testdata/03631_select_replace_comprehensive/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03631_select_replace_comprehensive/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03631_select_replace_comprehensive/query.sql b/parser/testdata/03631_select_replace_comprehensive/query.sql new file mode 100644 index 000000000..48dde1baa --- /dev/null +++ b/parser/testdata/03631_select_replace_comprehensive/query.sql @@ -0,0 +1,178 @@ +-- Test SELECT * REPLACE with all SQL clauses using new analyzer +-- This test verifies comprehensive fix for SELECT * REPLACE in all applicable clauses +-- See issue: https://github.com/ClickHouse/ClickHouse/issues/85313 +-- See PR: https://github.com/ClickHouse/ClickHouse/pull/87630 + +SET allow_experimental_analyzer = 1; + +-- Setup test tables +DROP TABLE IF EXISTS test_replace_main; +DROP TABLE IF EXISTS test_replace_merge; + +CREATE TABLE test_replace_main (id UInt32, a UInt32, b UInt32, c String) ENGINE = Memory; +INSERT INTO test_replace_main VALUES (1, 100, 200, 'alpha'), (2, 300, 100, 'beta'), (3, 200, 300, 'gamma'); + +CREATE TABLE test_replace_merge (id UInt32, a UInt32, b UInt32) ENGINE = MergeTree ORDER BY id; +INSERT INTO test_replace_merge VALUES (1, 100, 200), (2, 300, 100), (3, 200, 300); + +-- Test 1: WHERE clause (original bug) +SELECT '=== WHERE clause basic test ==='; +SELECT * REPLACE(a AS b) FROM test_replace_main WHERE b > 150 ORDER BY id; + +-- Test 1a: WHERE clause with arithmetic expressions +SELECT '=== WHERE arithmetic expressions test ==='; +DROP TABLE IF EXISTS test_arith; +CREATE TABLE test_arith (a UInt32, b UInt32) ENGINE = Memory; +INSERT INTO test_arith VALUES (1, 2), (2, 4); +SELECT * REPLACE(a + 10 AS b) FROM test_arith WHERE b = 11; +SELECT * REPLACE(a + 10 AS b) FROM test_arith WHERE b = 12; +SELECT * REPLACE(a * 10 + a AS b) FROM test_arith WHERE b = 11; + +-- Test 1b: String replacement in WHERE +DROP TABLE IF EXISTS test_replace_str; +CREATE TABLE test_replace_str (id UInt32, s String) ENGINE = Memory; +INSERT INTO test_replace_str VALUES (1, 'hello'), (2, 'world'); +SELECT * REPLACE(s || '_suffix' AS s) FROM test_replace_str WHERE s = 'hello_suffix'; + +DROP TABLE test_arith; +DROP TABLE test_replace_str; + +-- Test 2: ORDER BY clause - single column ascending +SELECT '=== ORDER BY clause test ==='; +SELECT * REPLACE(a AS b) FROM test_replace_main ORDER BY b; + +-- Test 3: ORDER BY clause - multiple columns with DESC +SELECT '=== Complex ORDER BY test ==='; +SELECT * REPLACE(a AS b) FROM test_replace_main ORDER BY b DESC, id; + +-- Test 4: PREWHERE clause (MergeTree only) +SELECT '=== PREWHERE clause test ==='; +SELECT * REPLACE(a AS b) FROM test_replace_merge PREWHERE b > 150 ORDER BY id; + +-- Test 5: LIMIT BY clause +SELECT '=== LIMIT BY clause test ==='; +DROP TABLE IF EXISTS test_limit_by; +CREATE TABLE test_limit_by (id UInt32, a UInt32, b UInt32) ENGINE = Memory; +INSERT INTO test_limit_by VALUES (1, 100, 200), (2, 100, 300), (3, 200, 400), (4, 200, 500); +SELECT * REPLACE(a AS b) FROM test_limit_by ORDER BY id LIMIT 1 BY b < 200; +DROP TABLE test_limit_by; + +-- Test 6: WINDOW clause +SELECT '=== WINDOW clause test ==='; +SELECT * REPLACE(a AS b), ROW_NUMBER() OVER (ORDER BY b) as rn +FROM test_replace_main +ORDER BY id; + +-- Test 7: Complex expressions in WHERE +SELECT '=== Complex expressions test ==='; +SELECT * REPLACE(a * 2 + 10 AS b) FROM test_replace_main WHERE b > 250 ORDER BY id; + +-- Test 8: Multiple REPLACE columns +SELECT '=== Multiple REPLACE test ==='; +SELECT * REPLACE(a AS b, 'replaced_' || c AS c) +FROM test_replace_main +WHERE b > 150 +ORDER BY id; + +-- Test 9: Subquery with REPLACE +SELECT '=== Subquery test ==='; +SELECT * FROM ( + SELECT * REPLACE(a AS b) FROM test_replace_main WHERE b > 150 +) ORDER BY id; + +-- Test 10: String operations in WHERE +SELECT '=== String operations test ==='; +SELECT * REPLACE(c || '_suffix' AS c) +FROM test_replace_main +WHERE c = 'beta_suffix' +ORDER BY id; + +-- Test 11: Multiple clauses together +SELECT '=== Multiple clauses test ==='; +SELECT * REPLACE(a AS b) FROM test_replace_main +WHERE b > 150 +ORDER BY b +LIMIT 2; + +-- Test 12: Nested expressions in clauses +SELECT '=== Nested expressions test ==='; +SELECT * REPLACE(a + 100 AS b) FROM test_replace_main WHERE b + 50 > 200 ORDER BY b; + +-- Test 14: GROUP BY in outer query with replaced values from subquery +SELECT '=== GROUP BY outer query test ==='; +DROP TABLE IF EXISTS test_group_by; +CREATE TABLE test_group_by (id UInt32, category String, value UInt32) ENGINE = Memory; +INSERT INTO test_group_by VALUES (1, 'A', 10), (2, 'A', 20), (3, 'B', 30), (4, 'B', 40); +SELECT category, sum(value) as total +FROM (SELECT * REPLACE(value * 10 AS value) FROM test_group_by) +GROUP BY category +ORDER BY category; +DROP TABLE test_group_by; + +-- Test 15: GROUP BY in subquery with replaced values +SELECT '=== GROUP BY in subquery test ==='; +DROP TABLE IF EXISTS test_group_by_sub; +CREATE TABLE test_group_by_sub (category String, value UInt32) ENGINE = Memory; +INSERT INTO test_group_by_sub VALUES ('A', 10), ('A', 20), ('B', 30), ('B', 40); +SELECT category, total +FROM ( + SELECT category || '_modified' AS category, sum(value) as total + FROM test_group_by_sub + GROUP BY category +) +ORDER BY category; +DROP TABLE test_group_by_sub; + +-- Test 16: GROUP BY clause with replaced column directly +SELECT '=== GROUP BY direct test ==='; +DROP TABLE IF EXISTS test_group_by_direct; +CREATE TABLE test_group_by_direct (category String, value UInt32) ENGINE = Memory; +INSERT INTO test_group_by_direct VALUES ('A', 10), ('A', 20), ('B', 30), ('B', 40); +SELECT category || '_modified' AS category, sum(value) as total +FROM test_group_by_direct +GROUP BY category +ORDER BY category; +DROP TABLE test_group_by_direct; + +-- Test 17: HAVING in outer query with replaced values from subquery +SELECT '=== HAVING outer query test ==='; +DROP TABLE IF EXISTS test_having; +CREATE TABLE test_having (id UInt32, category String, amount UInt32) ENGINE = Memory; +INSERT INTO test_having VALUES (1, 'X', 50), (2, 'X', 75), (3, 'Y', 100), (4, 'Y', 125); +SELECT category, sum(amount) as total +FROM (SELECT * REPLACE(amount + 100 AS amount) FROM test_having) +GROUP BY category +HAVING total > 200 +ORDER BY category; +DROP TABLE test_having; + +-- Test 18: HAVING in subquery with replaced values +SELECT '=== HAVING in subquery test ==='; +DROP TABLE IF EXISTS test_having_sub; +CREATE TABLE test_having_sub (category String, amount UInt32) ENGINE = Memory; +INSERT INTO test_having_sub VALUES ('X', 50), ('X', 75), ('Y', 100), ('Y', 125); +SELECT category, total +FROM ( + SELECT category, sum(amount + 100) as total + FROM test_having_sub + GROUP BY category + HAVING total > 200 +) +ORDER BY category; +DROP TABLE test_having_sub; + +-- Test 19: HAVING clause with replaced column directly +SELECT '=== HAVING direct test ==='; +DROP TABLE IF EXISTS test_having_direct; +CREATE TABLE test_having_direct (category String, amount UInt32) ENGINE = Memory; +INSERT INTO test_having_direct VALUES ('X', 50), ('X', 75), ('Y', 100), ('Y', 125); +SELECT category, sum(amount + 100) as total +FROM test_having_direct +GROUP BY category +HAVING total > 200 +ORDER BY category; +DROP TABLE test_having_direct; + +-- Cleanup +DROP TABLE test_replace_main; +DROP TABLE test_replace_merge; \ No newline at end of file diff --git a/parser/testdata/03632_default_minmax_indices_alter/ast.json b/parser/testdata/03632_default_minmax_indices_alter/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03632_default_minmax_indices_alter/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03632_default_minmax_indices_alter/metadata.json b/parser/testdata/03632_default_minmax_indices_alter/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03632_default_minmax_indices_alter/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03632_default_minmax_indices_alter/query.sql b/parser/testdata/03632_default_minmax_indices_alter/query.sql new file mode 100644 index 000000000..6357f3dea --- /dev/null +++ b/parser/testdata/03632_default_minmax_indices_alter/query.sql @@ -0,0 +1,13 @@ +-- Test for issue #75677 + +drop table if exists t; + +create table t (a UInt64, s String) engine = MergeTree order by tuple() settings add_minmax_index_for_numeric_columns = 1; + +show create table t; + +alter table t drop column s; + +show create table t; + +drop table t; \ No newline at end of file diff --git a/parser/testdata/03632_insert_select_cte_bug/ast.json b/parser/testdata/03632_insert_select_cte_bug/ast.json new file mode 100644 index 000000000..57d9ed3de --- /dev/null +++ b/parser/testdata/03632_insert_select_cte_bug/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001398329, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03632_insert_select_cte_bug/metadata.json b/parser/testdata/03632_insert_select_cte_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03632_insert_select_cte_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03632_insert_select_cte_bug/query.sql b/parser/testdata/03632_insert_select_cte_bug/query.sql new file mode 100644 index 000000000..0692f5d6b --- /dev/null +++ b/parser/testdata/03632_insert_select_cte_bug/query.sql @@ -0,0 +1,17 @@ +SET enable_analyzer=1; -- parallel distributed insert select for replicated tables works only with analyzer +SET parallel_distributed_insert_select=2; +SET enable_global_with_statement=1; + +DROP TABLE IF EXISTS test_insert SYNC; + +CREATE TABLE test_insert (c1 String, c2 UInt8) +ENGINE = ReplicatedMergeTree('/clickhouse/tables/{database}/test_03632/tables/test_insert', '{replica}') +ORDER BY (); + +INSERT INTO test_insert +WITH cte_test AS (SELECT '1234', 1) +SELECT * FROM cte_test; + +SELECT count() FROM test_insert; + +DROP TABLE test_insert; diff --git a/parser/testdata/03632_join_logical_assert_85403/ast.json b/parser/testdata/03632_join_logical_assert_85403/ast.json new file mode 100644 index 000000000..e1da48b95 --- /dev/null +++ b/parser/testdata/03632_join_logical_assert_85403/ast.json @@ -0,0 +1,118 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " TablesInSelectQuery (children 2)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (alias a) (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1 (alias x)" + }, + { + "explain": " Literal UInt64_1 (alias y)" + }, + { + "explain": " TablesInSelectQueryElement (children 2)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (alias b) (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1 (alias y)" + }, + { + "explain": " TableJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier y" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function round (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " Identifier b.y" + } + ], + + "rows": 32, + + "statistics": + { + "elapsed": 0.001686581, + "rows_read": 32, + "bytes_read": 1295 + } +} diff --git a/parser/testdata/03632_join_logical_assert_85403/metadata.json b/parser/testdata/03632_join_logical_assert_85403/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03632_join_logical_assert_85403/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03632_join_logical_assert_85403/query.sql b/parser/testdata/03632_join_logical_assert_85403/query.sql new file mode 100644 index 000000000..94043ca8d --- /dev/null +++ b/parser/testdata/03632_join_logical_assert_85403/query.sql @@ -0,0 +1,4 @@ +SELECT 1 FROM (SELECT 1 x, 1 y) a JOIN (SELECT 1 y) b USING (y) WHERE round(*) = b.y; + +SET query_plan_use_new_logical_join_step = 0; +SELECT 1 FROM (SELECT 1 x, 1 y) a JOIN (SELECT 1 y) b USING (y) WHERE round(*) = b.y; diff --git a/parser/testdata/03632_lowcard_join/ast.json b/parser/testdata/03632_lowcard_join/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03632_lowcard_join/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03632_lowcard_join/metadata.json b/parser/testdata/03632_lowcard_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03632_lowcard_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03632_lowcard_join/query.sql b/parser/testdata/03632_lowcard_join/query.sql new file mode 100644 index 000000000..863a66e2f --- /dev/null +++ b/parser/testdata/03632_lowcard_join/query.sql @@ -0,0 +1,16 @@ +-- Force new analyzer because the old one doesn't support multiple USING clauses in a query +SET allow_suspicious_low_cardinality_types = 1, enable_analyzer = 1; + +DROP TABLE IF EXISTS t0; +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE TABLE t0 (x Int, y LowCardinality(Nullable(Int)) ALIAS x) ENGINE = MergeTree ORDER BY x; +CREATE TABLE t1 (y LowCardinality(Int)) ENGINE = MergeTree ORDER BY y; +CREATE TABLE t2 (y Nullable(Int)) ENGINE = MergeTree ORDER BY y SETTINGS allow_nullable_key = 1; + +SELECT t1.* FROM t0 FULL JOIN t1 USING (y) JOIN t2 USING (y) PREWHERE toLowCardinality(1); + +DROP TABLE t0; +DROP TABLE t1; +DROP TABLE t2; diff --git a/parser/testdata/03632_temporary_table_not_allowed_columns/ast.json b/parser/testdata/03632_temporary_table_not_allowed_columns/ast.json new file mode 100644 index 000000000..b00cb8aea --- /dev/null +++ b/parser/testdata/03632_temporary_table_not_allowed_columns/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery test (children 3)" + }, + { + "explain": " Identifier test" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration _row_exists (children 1)" + }, + { + "explain": " DataType UInt32" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001283945, + "rows_read": 10, + "bytes_read": 348 + } +} diff --git a/parser/testdata/03632_temporary_table_not_allowed_columns/metadata.json b/parser/testdata/03632_temporary_table_not_allowed_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03632_temporary_table_not_allowed_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03632_temporary_table_not_allowed_columns/query.sql b/parser/testdata/03632_temporary_table_not_allowed_columns/query.sql new file mode 100644 index 000000000..509992730 --- /dev/null +++ b/parser/testdata/03632_temporary_table_not_allowed_columns/query.sql @@ -0,0 +1,3 @@ +create temporary table test (_row_exists UInt32) engine=MergeTree order by tuple(); -- {serverError ILLEGAL_COLUMN} +create temporary table test (d Dynamic) engine=Log(); -- {serverError ILLEGAL_COLUMN} + diff --git a/parser/testdata/03633_keeepr_host_server_setting/ast.json b/parser/testdata/03633_keeepr_host_server_setting/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03633_keeepr_host_server_setting/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03633_keeepr_host_server_setting/metadata.json b/parser/testdata/03633_keeepr_host_server_setting/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03633_keeepr_host_server_setting/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03633_keeepr_host_server_setting/query.sql b/parser/testdata/03633_keeepr_host_server_setting/query.sql new file mode 100644 index 000000000..f60a9f6b2 --- /dev/null +++ b/parser/testdata/03633_keeepr_host_server_setting/query.sql @@ -0,0 +1,3 @@ +-- Different values for tests with DatabaseReplicated and without. +SELECT value == '127.0.0.1:9181' OR value == 'localhost:9181,localhost:19181,localhost:29181' +FROM system.server_settings WHERE name = 'keeper_hosts'; diff --git a/parser/testdata/03633_mv_squash_parallel_inserts/ast.json b/parser/testdata/03633_mv_squash_parallel_inserts/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03633_mv_squash_parallel_inserts/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03633_mv_squash_parallel_inserts/metadata.json b/parser/testdata/03633_mv_squash_parallel_inserts/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03633_mv_squash_parallel_inserts/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03633_mv_squash_parallel_inserts/query.sql b/parser/testdata/03633_mv_squash_parallel_inserts/query.sql new file mode 100644 index 000000000..8b96f34e6 --- /dev/null +++ b/parser/testdata/03633_mv_squash_parallel_inserts/query.sql @@ -0,0 +1,31 @@ +-- Tags: no-debug, no-debug, no-asan, no-tsan, no-msan, no-ubsan, no-sanitize-coverage, no-parallel-replicas, no-flaky-check +-- - debug build adds CheckTokenTransform + +SET max_threads=2; +SET max_insert_threads=2; +SET parallel_view_processing=1; + +-- { echo } + +DROP TABLE IF EXISTS 03633_mv_src; +DROP TABLE IF EXISTS 03633_mv_dst; +DROP VIEW IF EXISTS 03633_mv; +CREATE TABLE 03633_mv_src (key Int) Engine=MergeTree ORDER BY (); +CREATE TABLE 03633_mv_dst (key Int) Engine=MergeTree ORDER BY (); +CREATE MATERIALIZED VIEW 03633_mv TO 03633_mv_dst AS SELECT * FROM 03633_mv_src; + +SET deduplicate_blocks_in_dependent_materialized_views=0; +SET materialized_views_squash_parallel_inserts=1; +EXPLAIN PIPELINE INSERT INTO 03633_mv_src SELECT * FROM system.one; + +SET deduplicate_blocks_in_dependent_materialized_views=0; +SET materialized_views_squash_parallel_inserts=0; +EXPLAIN PIPELINE INSERT INTO 03633_mv_src SELECT * FROM system.one; + +SET deduplicate_blocks_in_dependent_materialized_views=1; +SET materialized_views_squash_parallel_inserts=1; +EXPLAIN PIPELINE INSERT INTO 03633_mv_src SELECT * FROM system.one; + +DROP VIEW 03633_mv; +DROP TABLE 03633_mv_src; +DROP TABLE 03633_mv_dst; diff --git a/parser/testdata/03633_negative_limit_offset/ast.json b/parser/testdata/03633_negative_limit_offset/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03633_negative_limit_offset/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03633_negative_limit_offset/metadata.json b/parser/testdata/03633_negative_limit_offset/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03633_negative_limit_offset/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03633_negative_limit_offset/query.sql b/parser/testdata/03633_negative_limit_offset/query.sql new file mode 100644 index 000000000..adbfc5521 --- /dev/null +++ b/parser/testdata/03633_negative_limit_offset/query.sql @@ -0,0 +1,206 @@ +SET enable_analyzer=0; +SELECT 'Old Analyzer:'; + +SELECT 'Negative Limit Only'; +SELECT number FROM numbers(10) ORDER BY number LIMIT -1; +SELECT number FROM numbers(10) ORDER BY number LIMIT -3; +SELECT number FROM numbers(10) ORDER BY number LIMIT -100; +SELECT number FROM numbers(10) ORDER BY number LIMIT -0; +SELECT number FROM numbers(10) ORDER BY number LIMIT -9223372036854775808; +SELECT number FROM numbers(1000000) ORDER BY number LIMIT -1; + +SELECT 'Negative Offset Only'; +SELECT number FROM numbers(10) ORDER BY number OFFSET -1; +SELECT number FROM numbers(10) ORDER BY number OFFSET -3; +SELECT number FROM numbers(10) ORDER BY number OFFSET -100; +SELECT number FROM numbers(10) ORDER BY number OFFSET -0; +SELECT number FROM numbers(10) ORDER BY number OFFSET -9223372036854775808; +SELECT number FROM numbers(1000000) ORDER BY number OFFSET -999999; +SELECT number FROM numbers(1000000) OFFSET -1000000; + +SELECT 'Negative Limit and Negative Offset'; +SELECT number FROM numbers(10) ORDER BY number LIMIT -1 OFFSET -5; +SELECT number FROM numbers(10) ORDER BY number LIMIT -3 OFFSET -9; +SELECT number FROM numbers(10) ORDER BY number LIMIT -2 OFFSET -15; +SELECT number FROM numbers(1000) ORDER BY number LIMIT -5 OFFSET -4; +SELECT number FROM numbers(100000) ORDER BY number LIMIT -5 OFFSET -1000; + +SELECT 'Negative Limit and Positive Offset'; +SELECT number FROM numbers(10) ORDER BY number LIMIT -1 OFFSET 5; +SELECT number FROM numbers(10) ORDER BY number LIMIT -3 OFFSET 8; +SELECT number FROM numbers(10) ORDER BY number LIMIT -8 OFFSET 3; +SELECT number FROM numbers(100000) LIMIT -5 OFFSET 100000; + +SELECT 'Positive Limit and Negative Offset'; +SELECT number FROM numbers(10) ORDER BY number LIMIT 1 OFFSET -5; +SELECT number FROM numbers(10) ORDER BY number LIMIT 3 OFFSET -8; +SELECT number FROM numbers(10) ORDER BY number LIMIT 8 OFFSET -3; + +SELECT 'Misc'; +SELECT DISTINCT number % 8 AS x FROM numbers(120) ORDER BY x LIMIT -3 OFFSET -2; +SELECT DISTINCT number % 80 AS x FROM numbers(120) ORDER BY x LIMIT -3 OFFSET 50; +SELECT * FROM system.numbers_mt WHERE number = 1000000 OFFSET -1; +SELECT * FROM system.numbers_mt WHERE number = 1000000 LIMIT -1; +SELECT * FROM system.numbers_mt WHERE number = 1000000 LIMIT -1 OFFSET -1; +SELECT number FROM numbers(1000000) ORDER BY number LIMIT -1; +SELECT DISTINCT number FROM numbers(1000000) ORDER BY number LIMIT -1 OFFSET -999999; +SELECT number FROM numbers(1000000) ORDER BY number LIMIT -1 OFFSET -999999; +SELECT DISTINCT number FROM numbers(20) LIMIT 18446744073709551615 OFFSET -446744073709551615; + +SELECT 'Double Column'; +DROP TABLE IF EXISTS num_tab; +CREATE TABLE num_tab +( + `id` UInt8, + `val` UInt32 +) +ENGINE = MergeTree +ORDER BY (id, val) +AS SELECT + number % 2 AS id, + number AS val +FROM numbers(20); + +SELECT if((count() = 5) AND (min(val) = 15) AND (max(val) = 19) AND (sum(val) = 85) AND (uniqExact(id) = 2), 'OK', 'FAIL') +FROM +( + SELECT + id, + val + FROM num_tab ORDER BY val ASC LIMIT -5 +); + +SELECT 'Big Tables'; +DROP TABLE IF EXISTS num_tab; +CREATE TABLE num_tab +ENGINE = MergeTree +ORDER BY number +AS SELECT number FROM numbers(1000000); + +DROP TABLE IF EXISTS modified_tab; +CREATE TABLE modified_tab ENGINE=MergeTree() +ORDER BY number +AS SELECT number FROM +(SELECT number FROM num_tab ORDER BY number OFFSET -10); + +SELECT count(number), sum(number) FROM modified_tab; + + +DROP TABLE IF EXISTS num_tab; +CREATE TABLE num_tab +ENGINE = MergeTree +ORDER BY number +AS SELECT number FROM numbers(1000000); + +DROP TABLE IF EXISTS modified_tab; +CREATE TABLE modified_tab ENGINE=MergeTree() +ORDER BY number +AS SELECT number FROM +(SELECT number FROM num_tab ORDER BY number LIMIT -10 OFFSET -100000); + +SELECT number FROM modified_tab; + +SET enable_analyzer=1; +SELECT 'New Analyzer:'; +SELECT 'Negative Limit Only'; +SELECT number FROM numbers(10) ORDER BY number LIMIT -1; +SELECT number FROM numbers(10) ORDER BY number LIMIT -3; +SELECT number FROM numbers(10) ORDER BY number LIMIT -100; +SELECT number FROM numbers(10) ORDER BY number LIMIT -0; +SELECT number FROM numbers(10) ORDER BY number LIMIT -9223372036854775808; +SELECT number FROM numbers(1000000) ORDER BY number LIMIT -1; + +SELECT 'Negative Offset Only'; +SELECT number FROM numbers(10) ORDER BY number OFFSET -1; +SELECT number FROM numbers(10) ORDER BY number OFFSET -3; +SELECT number FROM numbers(10) ORDER BY number OFFSET -100; +SELECT number FROM numbers(10) ORDER BY number OFFSET -0; +SELECT number FROM numbers(10) ORDER BY number OFFSET -9223372036854775808; +SELECT number FROM numbers(1000000) ORDER BY number OFFSET -999999; +SELECT number FROM numbers(1000000) OFFSET -1000000; + +SELECT 'Negative Limit and Negative Offset'; +SELECT number FROM numbers(10) ORDER BY number LIMIT -1 OFFSET -5; +SELECT number FROM numbers(10) ORDER BY number LIMIT -3 OFFSET -9; +SELECT number FROM numbers(10) ORDER BY number LIMIT -2 OFFSET -15; +SELECT number FROM numbers(1000) ORDER BY number LIMIT -5 OFFSET -4; +SELECT number FROM numbers(100000) ORDER BY number LIMIT -5 OFFSET -1000; + +SELECT 'Negative Limit and Positive Offset'; +SELECT number FROM numbers(10) ORDER BY number LIMIT -1 OFFSET 5; +SELECT number FROM numbers(10) ORDER BY number LIMIT -3 OFFSET 8; +SELECT number FROM numbers(10) ORDER BY number LIMIT -8 OFFSET 3; +SELECT number FROM numbers(100000) LIMIT -5 OFFSET 100000; + +SELECT 'Positive Limit and Negative Offset'; +SELECT number FROM numbers(10) ORDER BY number LIMIT 1 OFFSET -5; +SELECT number FROM numbers(10) ORDER BY number LIMIT 3 OFFSET -8; +SELECT number FROM numbers(10) ORDER BY number LIMIT 8 OFFSET -3; + +SELECT 'Misc'; +SELECT DISTINCT number % 8 AS x FROM numbers(120) ORDER BY x LIMIT -3 OFFSET -2; +SELECT DISTINCT number % 80 AS x FROM numbers(120) ORDER BY x LIMIT -3 OFFSET 50; +SELECT * FROM system.numbers_mt WHERE number = 1000000 OFFSET -1; +SELECT * FROM system.numbers_mt WHERE number = 1000000 LIMIT -1; +SELECT * FROM system.numbers_mt WHERE number = 1000000 LIMIT -1 OFFSET -1; +SELECT number FROM numbers(1000000) ORDER BY number LIMIT -1; +SELECT DISTINCT number FROM numbers(1000000) ORDER BY number LIMIT -1 OFFSET -999999; +SELECT number FROM numbers(1000000) ORDER BY number LIMIT -1 OFFSET -999999; +SELECT DISTINCT number FROM numbers(20) LIMIT 18446744073709551615 OFFSET -446744073709551615; + +SELECT 'Double Column'; +DROP TABLE IF EXISTS num_tab; +CREATE TABLE num_tab +( + `id` UInt8, + `val` UInt32 +) +ENGINE = MergeTree +ORDER BY (id, val) +AS SELECT + number % 2 AS id, + number AS val +FROM numbers(20); + +SELECT if((count() = 5) AND (min(val) = 15) AND (max(val) = 19) AND (sum(val) = 85) AND (uniqExact(id) = 2), 'OK', 'FAIL') +FROM +( + SELECT + id, + val + FROM num_tab ORDER BY val ASC LIMIT -5 +); + +SELECT 'Big Tables'; +DROP TABLE IF EXISTS num_tab; +CREATE TABLE num_tab +ENGINE = MergeTree +ORDER BY number +AS SELECT number FROM numbers(1000000); + +DROP TABLE IF EXISTS modified_tab; +CREATE TABLE modified_tab ENGINE=MergeTree() +ORDER BY number +AS SELECT number FROM +(SELECT number FROM num_tab ORDER BY number OFFSET -10); + +SELECT count(number), sum(number) FROM modified_tab; + + +DROP TABLE IF EXISTS num_tab; +CREATE TABLE num_tab +ENGINE = MergeTree +ORDER BY number +AS SELECT number FROM numbers(1000000); + +DROP TABLE IF EXISTS modified_tab; +CREATE TABLE modified_tab ENGINE=MergeTree() +ORDER BY number +AS SELECT number FROM +(SELECT number FROM num_tab ORDER BY number LIMIT -10 OFFSET -100000); + +SELECT number FROM modified_tab; + +SELECT DISTINCT number +FROM (SELECT number FROM numbers_mt(1000000) LIMIT -214748) +WHERE 0; diff --git a/parser/testdata/03633_set_index_bulk_filtering/ast.json b/parser/testdata/03633_set_index_bulk_filtering/ast.json new file mode 100644 index 000000000..0e8949c44 --- /dev/null +++ b/parser/testdata/03633_set_index_bulk_filtering/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery tbulk (children 1)" + }, + { + "explain": " Identifier tbulk" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00149726, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/03633_set_index_bulk_filtering/metadata.json b/parser/testdata/03633_set_index_bulk_filtering/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03633_set_index_bulk_filtering/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03633_set_index_bulk_filtering/query.sql b/parser/testdata/03633_set_index_bulk_filtering/query.sql new file mode 100644 index 000000000..d3502a152 --- /dev/null +++ b/parser/testdata/03633_set_index_bulk_filtering/query.sql @@ -0,0 +1,11 @@ +DROP TABLE IF EXISTS tbulk; +CREATE TABLE tbulk ( + g UInt64, s Int32, k Int64, x UInt64, + INDEX gset g TYPE set (0) GRANULARITY 100 +) +engine=MergeTree +order by (x, k, s) +as select number%3, 1, 4, number%10 from numbers(1e6); + +select count(x) from tbulk where g = 1 and k = 1 settings secondary_indices_enable_bulk_filtering=0; +select count(x) from tbulk where g = 1 and k = 1 settings secondary_indices_enable_bulk_filtering=1; \ No newline at end of file diff --git a/parser/testdata/03634_subcolumns_in_temporary_table_parallel_replicas/ast.json b/parser/testdata/03634_subcolumns_in_temporary_table_parallel_replicas/ast.json new file mode 100644 index 000000000..c3d00fe3b --- /dev/null +++ b/parser/testdata/03634_subcolumns_in_temporary_table_parallel_replicas/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001182003, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03634_subcolumns_in_temporary_table_parallel_replicas/metadata.json b/parser/testdata/03634_subcolumns_in_temporary_table_parallel_replicas/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03634_subcolumns_in_temporary_table_parallel_replicas/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03634_subcolumns_in_temporary_table_parallel_replicas/query.sql b/parser/testdata/03634_subcolumns_in_temporary_table_parallel_replicas/query.sql new file mode 100644 index 000000000..49a8fe9e3 --- /dev/null +++ b/parser/testdata/03634_subcolumns_in_temporary_table_parallel_replicas/query.sql @@ -0,0 +1,7 @@ +SET enable_analyzer=1; +DROP TABLE IF EXISTS t0; +CREATE TABLE t0 (c0 Nullable(Int)) ENGINE = ReplicatedMergeTree('/clickhouse/tables/test_03634_{database}/t0', 'r1') ORDER BY tuple(); +INSERT INTO TABLE t0 (c0) VALUES (1); +SELECT tx.c0.null FROM t0 tx GLOBAL RIGHT JOIN t0 AS ty ON tx.c0 = ty.c0 SETTINGS allow_experimental_parallel_reading_from_replicas = 1, cluster_for_parallel_replicas = 'test_cluster_one_shard_two_replicas', max_parallel_replicas=10; +DROP TABLE t0; + diff --git a/parser/testdata/03635_in_function_different_types_many_columns/ast.json b/parser/testdata/03635_in_function_different_types_many_columns/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03635_in_function_different_types_many_columns/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03635_in_function_different_types_many_columns/metadata.json b/parser/testdata/03635_in_function_different_types_many_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03635_in_function_different_types_many_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03635_in_function_different_types_many_columns/query.sql b/parser/testdata/03635_in_function_different_types_many_columns/query.sql new file mode 100644 index 000000000..1be37f405 --- /dev/null +++ b/parser/testdata/03635_in_function_different_types_many_columns/query.sql @@ -0,0 +1,18 @@ +-- Tags: no-parallel-replicas, no-random-merge-tree-settings +-- followup to 02882_primary_key_index_in_function_different_types + +DROP TABLE IF EXISTS test_table; +CREATE TABLE test_table +( + id UInt64, + value UInt64 +) ENGINE=MergeTree ORDER BY (id, value) SETTINGS index_granularity = 8192, index_granularity_bytes = '1Mi'; + +INSERT INTO test_table SELECT number, number FROM numbers(10); + +EXPLAIN indexes = 1, description=0 SELECT id FROM test_table WHERE (id, value) IN (SELECT '5', number FROM numbers(5)); +EXPLAIN indexes = 1, description=0 SELECT id FROM test_table WHERE (id, value) IN (SELECT 'not a number', number FROM numbers(5)); +EXPLAIN indexes = 1, description=0 SELECT id FROM test_table WHERE (id, value) IN (SELECT 42, 'not a number' UNION ALL SELECT 5, toString(number) FROM numbers(5)); +EXPLAIN indexes = 1, description=0 SELECT id FROM test_table WHERE (id, value) IN (SELECT '42', 'not a number' UNION ALL SELECT 'not a number', '42' FROM numbers(5)); + +DROP TABLE test_table; diff --git a/parser/testdata/03636_empty_projection_block/ast.json b/parser/testdata/03636_empty_projection_block/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03636_empty_projection_block/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03636_empty_projection_block/metadata.json b/parser/testdata/03636_empty_projection_block/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03636_empty_projection_block/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03636_empty_projection_block/query.sql b/parser/testdata/03636_empty_projection_block/query.sql new file mode 100644 index 000000000..a734c0c7d --- /dev/null +++ b/parser/testdata/03636_empty_projection_block/query.sql @@ -0,0 +1,26 @@ +-- This test would hit a LOGICAL_ERROR during merge +CREATE TABLE post_state +( + `ts` DateTime, + `id` Int64, + `state` Nullable(UInt8) TTL ts + INTERVAL 1 MONTH, + PROJECTION p_digest_posts_state + ( + SELECT + id, + argMax(state, ts) AS state + GROUP BY id + ) +) +ENGINE = MergeTree() +ORDER BY id +TTL ts + toIntervalSecond(0) WHERE state IS NULL +SETTINGS index_granularity = 8192, deduplicate_merge_projection_mode='rebuild'; + +SYSTEM STOP MERGES post_state; +INSERT INTO post_state VALUES ('2024-01-01 00:00:00', 1, NULL); +INSERT INTO post_state VALUES ('2024-01-01 00:00:00', 1, NULL); +INSERT INTO post_state VALUES ('2024-01-01 00:00:00', 1, 1); +INSERT INTO post_state VALUES ('2024-01-01 00:00:00', 1, NULL); +SYSTEM START MERGES post_state; +OPTIMIZE TABLE post_state; \ No newline at end of file diff --git a/parser/testdata/03636_index_analysis_with_session_tz/ast.json b/parser/testdata/03636_index_analysis_with_session_tz/ast.json new file mode 100644 index 000000000..4769d1015 --- /dev/null +++ b/parser/testdata/03636_index_analysis_with_session_tz/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001207224, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03636_index_analysis_with_session_tz/metadata.json b/parser/testdata/03636_index_analysis_with_session_tz/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03636_index_analysis_with_session_tz/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03636_index_analysis_with_session_tz/query.sql b/parser/testdata/03636_index_analysis_with_session_tz/query.sql new file mode 100644 index 000000000..c897ab523 --- /dev/null +++ b/parser/testdata/03636_index_analysis_with_session_tz/query.sql @@ -0,0 +1,103 @@ +SET session_timezone = 'UTC'; +-- For explain with indexes and key condition values verification +SET parallel_replicas_local_plan = 1; + +DROP TABLE IF EXISTS 03636_data_pk, 03636_data_partitions, 03636_data_parsed; + +CREATE TABLE 03636_data_pk (ts DateTime) ENGINE = MergeTree ORDER BY toStartOfDay(ts) +AS +SELECT 1756882680; + +SELECT '-- PK UTC timezone'; + +SELECT count() FROM 03636_data_pk WHERE ts = 1756882680; + +SELECT trim(explain) +FROM ( + EXPLAIN indexes = 1 SELECT count() FROM 03636_data_pk WHERE ts = 1756882680 +) +WHERE trim(explain) ilike 'condition: %' + OR trim(explain) ilike 'parts: %' + OR trim(explain) ilike 'granules: %'; + +SELECT ''; +SELECT '-- PK EST timezone'; + +SELECT count() FROM 03636_data_pk WHERE ts = 1756882680 SETTINGS session_timezone = 'EST'; + +SELECT trim(explain) +FROM ( + EXPLAIN indexes = 1 SELECT count() FROM 03636_data_pk WHERE ts = 1756882680 +) +WHERE trim(explain) ilike 'condition: %' + OR trim(explain) ilike 'parts: %' + OR trim(explain) ilike 'granules: %' +SETTINGS session_timezone = 'EST'; + +DROP TABLE 03636_data_pk; + +CREATE TABLE 03636_data_partitions (ts DateTime) ENGINE = MergeTree ORDER BY tuple() PARTITION BY toStartOfDay(ts) +AS +SELECT 1756882680; + +SELECT ''; +SELECT '-- Partitions UTC timezone'; + +SELECT count() FROM 03636_data_partitions WHERE ts = 1756882680; + +SELECT trim(explain) +FROM ( + EXPLAIN indexes = 1 SELECT count() FROM 03636_data_partitions WHERE ts = 1756882680 +) +WHERE trim(explain) ilike 'condition: %' + OR trim(explain) ilike 'parts: %' + OR trim(explain) ilike 'granules: %'; + +SELECT ''; +SELECT '-- Partitions EST timezone'; + +SELECT count() FROM 03636_data_partitions WHERE ts = 1756882680 SETTINGS session_timezone = 'EST'; + +SELECT trim(explain) +FROM ( + EXPLAIN indexes = 1 SELECT count() FROM 03636_data_partitions WHERE ts = 1756882680 +) +WHERE trim(explain) ilike 'condition: %' + OR trim(explain) ilike 'parts: %' + OR trim(explain) ilike 'granules: %' +SETTINGS session_timezone = 'EST'; + +DROP TABLE 03636_data_partitions; + +CREATE TABLE 03636_data_parsed (ts String) ENGINE = MergeTree ORDER BY toStartOfDay(toDateTime(ts)) +AS +SELECT '2025-09-02 19:00:00'; + +SELECT ''; +SELECT '-- Partitions UTC timezone'; + +SELECT count() FROM 03636_data_parsed WHERE ts = '2025-09-02 19:00:00'; + +SELECT trim(explain) +FROM ( + EXPLAIN indexes = 1 SELECT count() FROM 03636_data_parsed WHERE ts = '2025-09-02 19:00:00' +) +WHERE trim(explain) ilike 'condition: %' + OR trim(explain) ilike 'parts: %' + OR trim(explain) ilike 'granules: %'; + +SELECT ''; +SELECT '-- Partitions EST timezone'; + +SELECT count() FROM 03636_data_parsed WHERE ts = '2025-09-02 19:00:00' SETTINGS session_timezone = 'EST'; + +SELECT trim(explain) +FROM ( + EXPLAIN indexes = 1 SELECT count() FROM 03636_data_parsed WHERE ts = '2025-09-02 19:00:00' +) +WHERE trim(explain) ilike 'condition: %' + OR trim(explain) ilike 'parts: %' + OR trim(explain) ilike 'granules: %' +SETTINGS session_timezone = 'EST'; + +DROP TABLE 03636_data_parsed; diff --git a/parser/testdata/03636_storage_alias_basic/ast.json b/parser/testdata/03636_storage_alias_basic/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03636_storage_alias_basic/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03636_storage_alias_basic/metadata.json b/parser/testdata/03636_storage_alias_basic/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03636_storage_alias_basic/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03636_storage_alias_basic/query.sql b/parser/testdata/03636_storage_alias_basic/query.sql new file mode 100644 index 000000000..92d633db0 --- /dev/null +++ b/parser/testdata/03636_storage_alias_basic/query.sql @@ -0,0 +1,266 @@ +-- { echo } +-- Tags: long + +DROP TABLE IF EXISTS source_table; +DROP TABLE IF EXISTS alias_1; +DROP TABLE IF EXISTS alias_2; +DROP TABLE IF EXISTS alias_3; +DROP TABLE IF EXISTS alias_4; + +SET allow_experimental_alias_table_engine = 1; + +-- Create source table +CREATE TABLE source_table (id UInt32, value String) ENGINE = MergeTree ORDER BY id; + +INSERT INTO source_table VALUES (1, 'one'), (2, 'two'), (3, 'three'); + +-- Test: Basic alias creation +SELECT 'Test Basic alias creation'; +CREATE TABLE alias_1 ENGINE = Alias('source_table'); +SELECT * FROM alias_1 ORDER BY id; + +-- Test: Alias with database name +SELECT 'Test Alias with database name'; +CREATE TABLE alias_2 ENGINE = Alias(currentDatabase(), 'source_table'); +SELECT * FROM alias_2 ORDER BY id; + +-- Test: Insert through alias +SELECT 'Test Insert through alias_1'; +INSERT INTO alias_1 VALUES (4, 'four'); +SELECT * FROM source_table ORDER BY id; + +-- Test: Insert through alias_2 +SELECT 'Test Insert through alias_2'; +INSERT INTO alias_2 VALUES (5, 'five'); +SELECT * FROM source_table ORDER BY id; + +-- Test: ALTER ADD COLUMN +SELECT 'Test ALTER ADD COLUMN'; +ALTER TABLE alias_1 ADD COLUMN status String DEFAULT 'active'; +SELECT id, value, status FROM source_table ORDER BY id; + +-- Test: INSERT with new column +SELECT 'Test Insert with new column'; +INSERT INTO alias_1 VALUES (6, 'six', 'inactive'); +SELECT * FROM source_table ORDER BY id; + +-- Test: Truncate +SELECT 'Test TRUNCATE'; +TRUNCATE TABLE alias_1; +SELECT count() FROM source_table; + +-- Re-insert data +INSERT INTO source_table VALUES (1, 'one', 'active'), (2, 'two', 'active'); + +-- Test: Rename alias +SELECT 'Test RENAME alias'; +RENAME TABLE alias_1 TO alias_3; +SELECT * FROM alias_3 ORDER BY id; + +-- Test: Drop alias (should not affect source table) +SELECT 'Test DROP alias'; +DROP TABLE alias_2; +DROP TABLE alias_3; +SELECT count() FROM source_table; + +-- Test: Create alias +SELECT 'Test Create alias'; +CREATE TABLE alias_4 ENGINE = Alias('source_table'); +SELECT * FROM alias_4 ORDER BY id; + +-- Test: OPTIMIZE through alias +SELECT 'Test OPTIMIZE'; +INSERT INTO alias_4 VALUES (10, 'ten', 'active'); +INSERT INTO alias_4 VALUES (11, 'eleven', 'active'); +INSERT INTO alias_4 VALUES (12, 'twelve', 'active'); +OPTIMIZE TABLE alias_4 FINAL; +SELECT count() AS parts_after FROM system.parts +WHERE database = currentDatabase() AND table = 'source_table' AND active; +SELECT count() FROM alias_4; + +-- Test: ALTER MODIFY SETTING +SELECT 'Test ALTER MODIFY SETTING'; +ALTER TABLE alias_4 MODIFY SETTING max_bytes_to_merge_at_max_space_in_pool = 1000000; +SHOW CREATE TABLE source_table FORMAT TSVRaw; + +-- Test: UPDATE through alias +SELECT 'Test UPDATE (mutate)'; +ALTER TABLE alias_4 UPDATE value = 'updated' WHERE id = 1 SETTINGS mutations_sync = 1; +SELECT id, value, status FROM source_table WHERE id = 1; + +-- Test: DELETE through alias +SELECT 'Test DELETE (mutate)'; +ALTER TABLE alias_4 DELETE WHERE id = 2 SETTINGS mutations_sync = 1; +SELECT count() FROM source_table WHERE id = 2; + +-- Test: Partition operations +SELECT 'Test Partition operations'; +DROP TABLE IF EXISTS source_partitioned; +DROP TABLE IF EXISTS alias_part; +CREATE TABLE source_partitioned (date Date, id UInt32, value String) + ENGINE = MergeTree PARTITION BY toYYYYMM(date) ORDER BY id; +CREATE TABLE alias_part ENGINE = Alias('source_partitioned'); +INSERT INTO alias_part VALUES ('2024-01-15', 1, 'january'), ('2024-02-15', 2, 'february'), ('2024-03-15', 3, 'march'); +SELECT count() FROM alias_part; + +-- Test: DETACH PARTITION +SELECT 'Test DETACH PARTITION'; +ALTER TABLE alias_part DETACH PARTITION '202401'; +SELECT count() FROM alias_part; + +-- Test: ATTACH PARTITION +SELECT 'Test ATTACH PARTITION'; +ALTER TABLE alias_part ATTACH PARTITION '202401'; +SELECT count() FROM alias_part; + +-- Test: DROP PARTITION +SELECT 'Test DROP PARTITION'; +ALTER TABLE alias_part DROP PARTITION '202403'; +SELECT count() FROM alias_part; + +-- Test: INSERT SELECT +SELECT 'Test INSERT SELECT'; +INSERT INTO alias_4 SELECT id + 100, value, status FROM source_table WHERE id <= 10; +SELECT count() FROM source_table WHERE id > 100; + +-- Test: EXCHANGE TABLES +DROP TABLE IF EXISTS table_a_exchange; +DROP TABLE IF EXISTS table_b_exchange; +DROP TABLE IF EXISTS alias_a_exchange; +DROP TABLE IF EXISTS alias_b_exchange; + +CREATE TABLE table_a_exchange (value String) ENGINE = MergeTree() ORDER BY value; +CREATE TABLE table_b_exchange (value String) ENGINE = MergeTree() ORDER BY value; + +INSERT INTO table_a_exchange VALUES ('from_a'); +INSERT INTO table_b_exchange VALUES ('from_b'); + +CREATE TABLE alias_a_exchange ENGINE = Alias(table_a_exchange); +CREATE TABLE alias_b_exchange ENGINE = Alias(table_b_exchange); + +SELECT 'Before EXCHANGE'; +SELECT * FROM alias_a_exchange ORDER BY value; +SELECT * FROM alias_b_exchange ORDER BY value; + +-- EXCHANGE the alias +EXCHANGE TABLES alias_a_exchange AND alias_b_exchange; + +SELECT 'After EXCHANGE alias tables'; +SELECT * FROM alias_a_exchange ORDER BY value; -- Should show 'from_b' +SELECT * FROM alias_b_exchange ORDER BY value; -- Should show 'from_a' + +-- EXCHANGE the source tables +EXCHANGE TABLES table_a_exchange AND table_b_exchange; + +SELECT 'After EXCHANGE source tables'; +SELECT * FROM alias_a_exchange ORDER BY value; -- Should show 'from_a' +SELECT * FROM alias_b_exchange ORDER BY value; -- Should show 'from_b' + +DROP TABLE alias_a_exchange; +DROP TABLE alias_b_exchange; +DROP TABLE table_a_exchange; +DROP TABLE table_b_exchange; + +-- Test: DETACH and ATTACH TABLE +SELECT 'Test DETACH and ATTACH TABLE'; +DROP TABLE IF EXISTS source_attach; +DROP TABLE IF EXISTS alias_attach; + +CREATE TABLE source_attach (id UInt32, data String) ENGINE = MergeTree ORDER BY id; +INSERT INTO source_attach VALUES (1, 'data1'), (2, 'data2'); + +CREATE TABLE alias_attach ENGINE = Alias('source_attach'); +SELECT * FROM alias_attach ORDER BY id; +SELECT * FROM source_attach ORDER BY id; + +-- DETACH the alias table +DETACH TABLE alias_attach; + +-- ATTACH the table back +ATTACH TABLE alias_attach; + +-- Verify it works after ATTACH +SELECT 'After ATTACH'; +SELECT * FROM alias_attach ORDER BY id; +SELECT * FROM source_attach ORDER BY id; + +-- Insert through alias after ATTACH +INSERT INTO alias_attach VALUES (3, 'data3'); +SELECT * FROM alias_attach ORDER BY id; +SELECT * FROM source_attach ORDER BY id; + +DROP TABLE alias_attach; +DROP TABLE source_attach; + +-- Test: Circular reference check +SELECT 'Test circular reference prevention'; +CREATE TABLE self_ref_test ENGINE = Alias('self_ref_test'); -- { serverError BAD_ARGUMENTS } + +SELECT 'Test ALTER target directly'; +CREATE TABLE metadata_target (id UInt32, value String) ENGINE = MergeTree ORDER BY id; +CREATE TABLE metadata_alias ENGINE = Alias('metadata_target'); + +INSERT INTO metadata_target VALUES (1, 'one'), (2, 'two'); +SELECT * FROM metadata_alias ORDER BY id; + +-- ALTER target table DIRECTLY (not through alias) +ALTER TABLE metadata_target ADD COLUMN extra String DEFAULT 'data1'; + +-- Alias should immediately see the new column (dynamic metadata fetch) +SELECT 'After ALTER target'; +SELECT name FROM system.columns WHERE database = currentDatabase() AND table = 'metadata_alias' ORDER BY name; + +-- INSERT through alias with new column should work +INSERT INTO metadata_alias VALUES (3, 'three', 'data3'); +SELECT * FROM metadata_alias ORDER BY id; + +ALTER TABLE metadata_target ADD COLUMN num UInt32 DEFAULT 0; +ALTER TABLE metadata_target MODIFY COLUMN extra String DEFAULT 'data2'; + +SELECT 'After multiple ALTERs'; +SELECT name FROM system.columns WHERE database = currentDatabase() AND table = 'metadata_alias' ORDER BY name; + +-- Insert with all columns +INSERT INTO metadata_alias VALUES (4, 'four', DEFAULT, 100); +SELECT id, value, extra, num FROM metadata_alias WHERE id = 4; + +-- DROP COLUMN on target +ALTER TABLE metadata_target DROP COLUMN num; + +-- Alias should not show dropped column +SELECT 'After DROP'; +SELECT name FROM system.columns WHERE database = currentDatabase() AND table = 'metadata_alias' ORDER BY name; + +-- INSERT after DROP should work (without dropped column) +INSERT INTO metadata_alias VALUES (5, 'five', 'data5'); +SELECT id, value, extra FROM metadata_alias WHERE id = 5; + +DROP TABLE metadata_alias; +DROP TABLE metadata_target; + +SELECT 'Test alias with missing target table'; +DROP TABLE IF EXISTS alias_with_missing_target; +DROP TABLE IF EXISTS temp_target; + +CREATE TABLE temp_target (id UInt32, value String) ENGINE = MergeTree ORDER BY id; +INSERT INTO temp_target VALUES (1, 'data1'), (2, 'data2'); + +CREATE TABLE alias_with_missing_target ENGINE = Alias('temp_target'); +SELECT * FROM alias_with_missing_target ORDER BY id; + +DROP TABLE temp_target; + +SELECT name, engine FROM system.tables WHERE database = currentDatabase() AND name = 'alias_with_missing_target'; + +SELECT arraySort(groupUniqArray(name)) FROM system.tables WHERE database = currentDatabase(); +SELECT arraySort(groupUniqArray(name)) FROM system.columns WHERE database = currentDatabase() AND table = 'alias_with_missing_target'; + +SELECT database, table, name FROM system.columns +WHERE database = currentDatabase() AND table = 'alias_with_missing_target' +ORDER BY name; + +SELECT name, engine, total_rows, total_bytes, data_paths +FROM system.tables +WHERE database = currentDatabase() AND name = 'alias_with_missing_target'; + +DROP TABLE alias_with_missing_target; \ No newline at end of file diff --git a/parser/testdata/03636_storage_alias_syntax/ast.json b/parser/testdata/03636_storage_alias_syntax/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03636_storage_alias_syntax/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03636_storage_alias_syntax/metadata.json b/parser/testdata/03636_storage_alias_syntax/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03636_storage_alias_syntax/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03636_storage_alias_syntax/query.sql b/parser/testdata/03636_storage_alias_syntax/query.sql new file mode 100644 index 000000000..07271ef3a --- /dev/null +++ b/parser/testdata/03636_storage_alias_syntax/query.sql @@ -0,0 +1,47 @@ +-- { echo } + +DROP TABLE IF EXISTS source_table; +DROP TABLE IF EXISTS alias_syntax_1; +DROP TABLE IF EXISTS alias_syntax_2; + +SET allow_experimental_alias_table_engine = 1; + +-- Create source table +CREATE TABLE source_table (id UInt32, name String, value Float64) +ENGINE = MergeTree ORDER BY id; + +INSERT INTO source_table VALUES (1, 'one', 10.1), (2, 'two', 20.2), (3, 'three', 30.3); + +-- Syntax: ENGINE = Alias(table) +SELECT 'Test ENGINE = Alias(table)'; +CREATE TABLE alias_syntax_1 ENGINE = Alias('source_table'); +SELECT * FROM alias_syntax_1 ORDER BY id; + +-- Syntax: ENGINE = Alias(db, table) +SELECT 'Test ENGINE = Alias(db, table)'; +CREATE TABLE alias_syntax_2 ENGINE = Alias(currentDatabase(), 'source_table'); +SELECT * FROM alias_syntax_2 ORDER BY id; + +-- Test: All aliases work identically +SELECT 'Test All aliases work identically'; +INSERT INTO alias_syntax_1 VALUES (4, 'four', 40.4); +SELECT count() FROM source_table; +SELECT count() FROM alias_syntax_1; +SELECT count() FROM alias_syntax_2; + +INSERT INTO alias_syntax_2 VALUES (5, 'five', 50.5); +SELECT count() FROM source_table; + +-- Test: with explicit columns (should fail) +SELECT 'Test Explicit columns should fail'; +CREATE TABLE alias_syntax_3 (id UInt32, name String, value Float64) ENGINE = Alias('source_table'); -- { serverError BAD_ARGUMENTS } + +-- Test: Alias to alias +DROP TABLE IF EXISTS base_table; +DROP TABLE IF EXISTS alias_1; +DROP TABLE IF EXISTS alias_2; + +CREATE TABLE base_table (id UInt32, value String) ENGINE = MergeTree ORDER BY id; +CREATE TABLE alias_1 ENGINE = Alias('base_table'); +CREATE TABLE alias_2 ENGINE = Alias('alias_1'); -- { serverError BAD_ARGUMENTS } + diff --git a/parser/testdata/03638_merge_max_dynamic_subcolumns_in_wide_part/ast.json b/parser/testdata/03638_merge_max_dynamic_subcolumns_in_wide_part/ast.json new file mode 100644 index 000000000..fb3a107a9 --- /dev/null +++ b/parser/testdata/03638_merge_max_dynamic_subcolumns_in_wide_part/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001195875, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/03638_merge_max_dynamic_subcolumns_in_wide_part/metadata.json b/parser/testdata/03638_merge_max_dynamic_subcolumns_in_wide_part/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03638_merge_max_dynamic_subcolumns_in_wide_part/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03638_merge_max_dynamic_subcolumns_in_wide_part/query.sql b/parser/testdata/03638_merge_max_dynamic_subcolumns_in_wide_part/query.sql new file mode 100644 index 000000000..ad3952650 --- /dev/null +++ b/parser/testdata/03638_merge_max_dynamic_subcolumns_in_wide_part/query.sql @@ -0,0 +1,16 @@ +drop table if exists test; +create table test (json JSON(max_dynamic_paths=4)) engine=MergeTree order by tuple() settings min_bytes_for_wide_part=1, min_rows_for_wide_part=1, merge_max_dynamic_subcolumns_in_wide_part=2; +insert into test select '{"a" : 42, "b" : 42, "c" : 42, "d" : 42, "e" : 42, "f" : 42}'; +select JSONDynamicPaths(json), JSONSharedDataPaths(json) from test; +insert into test select '{"a" : 42, "b" : 42, "c" : 42, "d" : 42, "e" : 42, "f" : 42}'; +optimize table test final; +select JSONDynamicPaths(json), JSONSharedDataPaths(json) from test limit 1; +drop table test; + +create table test (x UInt32, json JSON(max_dynamic_paths=4)) engine=MergeTree order by tuple() settings min_bytes_for_wide_part=1, min_rows_for_wide_part=1, merge_max_dynamic_subcolumns_in_wide_part=2, vertical_merge_algorithm_min_rows_to_activate=1, vertical_merge_algorithm_min_columns_to_activate=1; +insert into test select 42, '{"a" : 42, "b" : 42, "c" : 42, "d" : 42, "e" : 42, "f" : 42}'; +select JSONDynamicPaths(json), JSONSharedDataPaths(json) from test; +insert into test select 42, '{"a" : 42, "b" : 42, "c" : 42, "d" : 42, "e" : 42, "f" : 42}'; +optimize table test final; +select JSONDynamicPaths(json), JSONSharedDataPaths(json) from test limit 1; +drop table test \ No newline at end of file diff --git a/parser/testdata/03639_hash_of_dynamic_column/ast.json b/parser/testdata/03639_hash_of_dynamic_column/ast.json new file mode 100644 index 000000000..a948ab064 --- /dev/null +++ b/parser/testdata/03639_hash_of_dynamic_column/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function sipHash64 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '42'" + }, + { + "explain": " Literal 'Dynamic'" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001601874, + "rows_read": 10, + "bytes_read": 375 + } +} diff --git a/parser/testdata/03639_hash_of_dynamic_column/metadata.json b/parser/testdata/03639_hash_of_dynamic_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03639_hash_of_dynamic_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03639_hash_of_dynamic_column/query.sql b/parser/testdata/03639_hash_of_dynamic_column/query.sql new file mode 100644 index 000000000..f50f70ce6 --- /dev/null +++ b/parser/testdata/03639_hash_of_dynamic_column/query.sql @@ -0,0 +1,17 @@ +select sipHash64(42::Dynamic); +select sipHash64(42::Dynamic(max_types=0)); + +select sipHash64(43::Dynamic); +select sipHash64(43::Dynamic(max_types=0)); + +select sipHash64('str1'::Dynamic); +select sipHash64('str1'::Dynamic(max_types=0)); + +select sipHash64('str2'::Dynamic); +select sipHash64('str2'::Dynamic(max_types=0)); + +select sipHash64(NULL::Dynamic); +select sipHash64(NULL::Dynamic(max_types=0)); + +select sipHash64(tuple(42)::Dynamic); +select sipHash64(tuple(42)::Dynamic(max_types=0)); diff --git a/parser/testdata/03639_hash_of_json_column/ast.json b/parser/testdata/03639_hash_of_json_column/ast.json new file mode 100644 index 000000000..d4886c1ba --- /dev/null +++ b/parser/testdata/03639_hash_of_json_column/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001230024, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03639_hash_of_json_column/metadata.json b/parser/testdata/03639_hash_of_json_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03639_hash_of_json_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03639_hash_of_json_column/query.sql b/parser/testdata/03639_hash_of_json_column/query.sql new file mode 100644 index 000000000..fa0c29020 --- /dev/null +++ b/parser/testdata/03639_hash_of_json_column/query.sql @@ -0,0 +1,39 @@ +set session_timezone='UTC'; + +select sipHash64('{"a" : 42, "b" : "str", "c" : [{"d" : 1}, {"e" : 2}]}'::JSON); +select sipHash64('{"a" : 42, "b" : "str", "c" : [{"d" : 1}, {"e" : 2}], "f" : null}'::JSON); +select sipHash64('{"a" : 42, "b" : "str", "c" : [{"d" : 1}, {"e" : 2}]}'::JSON(max_dynamic_paths=100)); +select sipHash64('{"a" : 42, "b" : "str", "c" : [{"d" : 1}, {"e" : 2}]}'::JSON(max_dynamic_paths=1)); +select sipHash64('{"a" : 42, "b" : "str", "c" : [{"d" : 1}, {"e" : 2}]}'::JSON(max_dynamic_paths=0)); +select sipHash64('{"a" : 42, "b" : "str", "c" : [{"d" : 1}, {"e" : 2}]}'::JSON(max_dynamic_types=0)); +select sipHash64('{"a" : 42, "b" : "str", "c" : [{"d" : 1}, {"e" : 2}]}'::JSON(a Int64)); +select sipHash64('{"a" : 42, "b" : "str", "c" : [{"d" : 1}, {"e" : 2}]}'::JSON(a Dynamic)); +select sipHash64('{"a" : 42, "b" : "str", "c" : [{"d" : 1}, {"e" : 2}]}'::JSON(SKIP REGEXP '(abc)')); + +select sipHash64('{"a" : "1970-01-01 00:00:00.000000042", "b" : "str", "c" : [{"d" : 1}, {"e" : 2}]}'::JSON); + +select sipHash64(tuple(map('json', [toNullable('{"a" : 42, "b" : "str", "c" : [{"d" : 1}, {"e" : 2}]}'::JSON)]))); +select sipHash64(tuple(map('json', [toNullable('{"a" : 42, "b" : "str", "c" : [{"d" : 1}, {"e" : 2}], "f" : null}'::JSON)]))); +select sipHash64(tuple(map('json', [toNullable('{"a" : 42, "b" : "str", "c" : [{"d" : 1}, {"e" : 2}]}'::JSON(max_dynamic_paths=100))]))); +select sipHash64(tuple(map('json', [toNullable('{"a" : 42, "b" : "str", "c" : [{"d" : 1}, {"e" : 2}]}'::JSON(max_dynamic_paths=1))]))); +select sipHash64(tuple(map('json', [toNullable('{"a" : 42, "b" : "str", "c" : [{"d" : 1}, {"e" : 2}]}'::JSON(max_dynamic_paths=0))]))); +select sipHash64(tuple(map('json', [toNullable('{"a" : 42, "b" : "str", "c" : [{"d" : 1}, {"e" : 2}]}'::JSON(max_dynamic_types=0))]))); +select sipHash64(tuple(map('json', [toNullable('{"a" : 42, "b" : "str", "c" : [{"d" : 1}, {"e" : 2}]}'::JSON(a Int64))]))); +select sipHash64(tuple(map('json', [toNullable('{"a" : 42, "b" : "str", "c" : [{"d" : 1}, {"e" : 2}]}'::JSON(a Dynamic))]))); + + +drop table if exists test; +create table test (json JSON) engine=Memory; +insert into test values ('{"a" : 1}'), ('{"a" : 2}'), ('{}'), ('{"a" : null}'), ('{"b" : 1}'); +select json, sipHash64(json) from test; +drop table test; + +create table test (json JSON(max_dynamic_types=0)) engine=Memory; +insert into test values ('{"a" : 1}'), ('{"a" : 2}'), ('{}'), ('{"a" : null}'), ('{"b" : 1}'); +select json, sipHash64(json) from test; +drop table test; + +create table test (json JSON(max_dynamic_paths=0)) engine=Memory; +insert into test values ('{"a" : 1}'), ('{"a" : 2}'), ('{}'), ('{"a" : null}'), ('{"b" : 1}'); +select json, sipHash64(json) from test; +drop table test; diff --git a/parser/testdata/03640_alter_table_rewrite_parts/ast.json b/parser/testdata/03640_alter_table_rewrite_parts/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03640_alter_table_rewrite_parts/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03640_alter_table_rewrite_parts/metadata.json b/parser/testdata/03640_alter_table_rewrite_parts/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03640_alter_table_rewrite_parts/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03640_alter_table_rewrite_parts/query.sql b/parser/testdata/03640_alter_table_rewrite_parts/query.sql new file mode 100644 index 000000000..6ee3f5007 --- /dev/null +++ b/parser/testdata/03640_alter_table_rewrite_parts/query.sql @@ -0,0 +1,17 @@ +-- Firstly write parts with use_const_adaptive_granularity=0 and then enable it and check that index_granularity_bytes_in_memory_allocated=25 (sizeof constant granularity) + +drop table if exists test_materialize; +create table test_materialize (part Int, key Int, value String) engine=MergeTree() partition by part order by key settings index_granularity=100, use_const_adaptive_granularity=false, enable_index_granularity_compression=false, min_bytes_for_wide_part=0; +insert into test_materialize select intDiv(number, 5000), number, repeat('a', number) from numbers(10e3) settings max_block_size=10, min_insert_block_size_rows=10000; + +-- { echo } +-- 25 is the size of marks in case constant index granularity +select count() from test_materialize; +select partition_id, rows, index_granularity_bytes_in_memory_allocated>25 from system.parts where database = currentDatabase() and table = 'test_materialize' and active order by 1; +alter table test_materialize modify setting use_const_adaptive_granularity; +alter table test_materialize rewrite parts in partition 1 settings mutations_sync=2; +select partition_id, rows, index_granularity_bytes_in_memory_allocated>25 from system.parts where database = currentDatabase() and table = 'test_materialize' and active order by 1; +alter table test_materialize rewrite parts settings mutations_sync=2; +select partition_id, rows, index_granularity_bytes_in_memory_allocated from system.parts where database = currentDatabase() and table = 'test_materialize' and active order by 1; +select count() from test_materialize; +select * from system.mutations where database = currentDatabase() and not is_done format Vertical; diff --git a/parser/testdata/03640_load_marks_synchronously/ast.json b/parser/testdata/03640_load_marks_synchronously/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03640_load_marks_synchronously/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03640_load_marks_synchronously/metadata.json b/parser/testdata/03640_load_marks_synchronously/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03640_load_marks_synchronously/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03640_load_marks_synchronously/query.sql b/parser/testdata/03640_load_marks_synchronously/query.sql new file mode 100644 index 000000000..3e14af1ef --- /dev/null +++ b/parser/testdata/03640_load_marks_synchronously/query.sql @@ -0,0 +1,18 @@ +-- Tags: no-parallel-replicas + +create table data (key int) engine=MergeTree() order by key settings prewarm_mark_cache=0; +insert into data select * from numbers(1000); + +-- Clear marks cache +detach table data; +attach table data; + +select * from data settings load_marks_asynchronously=1 format Null /* 1 */; +select * from data settings load_marks_asynchronously=1 format Null /* 2 */; + +system flush logs query_log; +select query, ProfileEvents['BackgroundLoadingMarksTasks']>0 async, ProfileEvents['MarksTasksFromCache']>0 sync +from system.query_log +where current_database = currentDatabase() and query_kind = 'Select' and type != 'QueryStart' +order by event_time_microseconds +format Vertical; diff --git a/parser/testdata/03640_multiple_mutations_with_error_with_rewrite_parts/ast.json b/parser/testdata/03640_multiple_mutations_with_error_with_rewrite_parts/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03640_multiple_mutations_with_error_with_rewrite_parts/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03640_multiple_mutations_with_error_with_rewrite_parts/metadata.json b/parser/testdata/03640_multiple_mutations_with_error_with_rewrite_parts/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03640_multiple_mutations_with_error_with_rewrite_parts/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03640_multiple_mutations_with_error_with_rewrite_parts/query.sql b/parser/testdata/03640_multiple_mutations_with_error_with_rewrite_parts/query.sql new file mode 100644 index 000000000..47ab327ab --- /dev/null +++ b/parser/testdata/03640_multiple_mutations_with_error_with_rewrite_parts/query.sql @@ -0,0 +1,10 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/88150 +DROP TABLE IF EXISTS t0; +CREATE TABLE t0 (c0 Nullable(String)) ENGINE = MergeTree() ORDER BY tuple() SETTINGS allow_nullable_key = 1; +INSERT INTO TABLE t0 (c0) VALUES ('a'); +ALTER TABLE t0 DELETE WHERE c0.size = 'b' SETTINGS mutations_sync=1; -- { serverError UNFINISHED } +DELETE FROM t0 WHERE _block_number = 1; -- { serverError UNFINISHED } +ALTER TABLE t0 REWRITE PARTS SETTINGS mutations_sync=1; -- { serverError UNFINISHED } +KILL MUTATION WHERE database = currentDatabase() AND command = $doc$(DELETE WHERE c0.size = 'b')$doc$ SYNC FORMAT Null; +ALTER TABLE t0 REWRITE PARTS SETTINGS mutations_sync=1; +select * from system.mutations where database = currentDatabase() and not is_done format Vertical; diff --git a/parser/testdata/03640_multiple_mutations_with_rewrite_parts/ast.json b/parser/testdata/03640_multiple_mutations_with_rewrite_parts/ast.json new file mode 100644 index 000000000..720cc6c4d --- /dev/null +++ b/parser/testdata/03640_multiple_mutations_with_rewrite_parts/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_materialize (children 1)" + }, + { + "explain": " Identifier test_materialize" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001571202, + "rows_read": 2, + "bytes_read": 84 + } +} diff --git a/parser/testdata/03640_multiple_mutations_with_rewrite_parts/metadata.json b/parser/testdata/03640_multiple_mutations_with_rewrite_parts/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03640_multiple_mutations_with_rewrite_parts/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03640_multiple_mutations_with_rewrite_parts/query.sql b/parser/testdata/03640_multiple_mutations_with_rewrite_parts/query.sql new file mode 100644 index 000000000..6bdcfc627 --- /dev/null +++ b/parser/testdata/03640_multiple_mutations_with_rewrite_parts/query.sql @@ -0,0 +1,18 @@ +drop table if exists test_materialize; +create table test_materialize (part Int, key Int, value String) engine=MergeTree() partition by part order by key settings index_granularity=100, use_const_adaptive_granularity=false, enable_index_granularity_compression=false, min_bytes_for_wide_part=0; +insert into test_materialize select intDiv(number, 5000), number, repeat('a', number) from numbers(10e3) settings max_block_size=10, min_insert_block_size_rows=10000; + +-- { echoOn } +select partition_id, rows, index_granularity_bytes_in_memory_allocated>25 from system.parts where database = currentDatabase() and table = 'test_materialize' and active order by 1; +alter table test_materialize modify setting use_const_adaptive_granularity; +alter table test_materialize add column new_value String; +alter table test_materialize delete where new_value != ''; +alter table test_materialize rewrite parts settings mutations_sync=2; +select partition_id, rows, index_granularity_bytes_in_memory_allocated from system.parts where database = currentDatabase() and table = 'test_materialize' and active order by 1; + +alter table test_materialize modify setting use_const_adaptive_granularity=0; +alter table test_materialize rewrite parts; +alter table test_materialize delete where (key % 2) == 0 settings mutations_sync=2; +select partition_id, rows, index_granularity_bytes_in_memory_allocated>25 from system.parts where database = currentDatabase() and table = 'test_materialize' and active order by 1; + +select * from system.mutations where database = currentDatabase() and not is_done format Vertical; diff --git a/parser/testdata/03640_skip_indexes_data_types_with_or/ast.json b/parser/testdata/03640_skip_indexes_data_types_with_or/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03640_skip_indexes_data_types_with_or/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03640_skip_indexes_data_types_with_or/metadata.json b/parser/testdata/03640_skip_indexes_data_types_with_or/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03640_skip_indexes_data_types_with_or/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03640_skip_indexes_data_types_with_or/query.sql b/parser/testdata/03640_skip_indexes_data_types_with_or/query.sql new file mode 100644 index 000000000..54334991e --- /dev/null +++ b/parser/testdata/03640_skip_indexes_data_types_with_or/query.sql @@ -0,0 +1,91 @@ +-- Tags: no-parallel-replicas +-- no-parallel-replicas: funny EXPLAIN PLAN output + +-- Test that the skip indexes are utilized for AND and OR connected filter conditions +-- This test uses all the skip index types - minmax, set, bloom filter, text + +-- Settings needed to achieve stable EXPLAIN PLAN output +SET parallel_replicas_local_plan = 1; +SET use_query_condition_cache = 0; +SET use_skip_indexes_on_data_read = 0; +SET use_skip_indexes = 1; +SET allow_experimental_full_text_index = 1; + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab +( + id UInt32, + i Int32, + s String, + u UInt32, + t1 String, + t2 String, + INDEX minmax_index i TYPE minmax, + INDEX set_index s TYPE set(10), + INDEX bf_index u TYPE bloom_filter (0.001), + INDEX text_index1 t1 TYPE text(tokenizer = 'splitByNonAlpha') GRANULARITY 1, + INDEX text_index2 t2 TYPE text(tokenizer = 'splitByNonAlpha') GRANULARITY 1 +) +ENGINE = MergeTree +ORDER BY id +SETTINGS + index_granularity = 6, index_granularity_bytes = 0, + min_bytes_for_wide_part = 0, + min_bytes_for_full_part_storage = 0, + max_bytes_to_merge_at_max_space_in_pool = 1, + use_const_adaptive_granularity = 1; + +-- 600 rows, 100 granules +INSERT INTO tab + SELECT number, + number, + IF (number < 6, 'firststring', IF(number < 588, 'middlestring', 'laststring')), + number, + concat('This is text in row number', toString(number)), + concat('Some thing for line', toString(number)) + FROM numbers(600); + +SELECT 'Test without utilizing skip indexes for disjunctions'; +SET use_skip_indexes_for_disjunctions = 0; + +SELECT '-- Simple OR condition'; -- surviving granules: 100, but only 1 granule is real match +SELECT explain AS explain FROM ( + EXPLAIN indexes = 1 SELECT id FROM tab WHERE (i = 1 OR s = 'firststring' OR u = 1 OR hasToken(t1, 'number1')) +) WHERE explain LIKE '%Granules%' OR explain LIKE '%PrimaryKey%' OR explain LIKE '%Name%'; + +SELECT '-- Mixed AND/OR condition'; -- will show 50 granules, but real match is 0 granules +SELECT explain AS explain FROM ( + EXPLAIN indexes = 1 SELECT id FROM tab WHERE (id >= 301 AND (i = 1 OR s = 'firststring' OR u = 1 OR hasToken(t1, 'number1'))) +) WHERE explain LIKE '%Granules%' OR explain LIKE '%PrimaryKey%' OR explain LIKE '%Name%'; + + +-- Now test with feature enabled +SELECT 'Test with utilizing skip indexes for disjunctions'; +SET use_skip_indexes_for_disjunctions = 1; + +SELECT '-- Simple OR condition'; -- Should show 1 granule +SELECT explain AS explain FROM ( + EXPLAIN indexes = 1 SELECT id FROM tab WHERE (i = 1 OR s = 'firststring' OR u = 1 OR hasToken(t1, 'number1')) +) WHERE explain LIKE '%Granules%' OR explain LIKE '%PrimaryKey%' OR explain LIKE '%Name%'; + +SELECT '-- Mixed AND/OR condition'; -- final should be 0 granules +SELECT explain AS explain FROM ( + EXPLAIN indexes = 1 SELECT id FROM tab WHERE (id >= 301 AND (i = 1 OR s = 'firststring' OR u = 1 OR hasToken(t1, 'number1'))) +) WHERE explain LIKE '%Granules%' OR explain LIKE '%PrimaryKey%' OR explain LIKE '%Name%'; + +-- Now test with mixing stuff +SELECT '-- Should show 3 granules, laststring is in the last 2 granules'; +SELECT explain AS explain FROM ( + EXPLAIN indexes = 1 SELECT id FROM tab WHERE (i = 10 OR s = 'laststring') +) WHERE explain LIKE '%Granules%' OR explain LIKE '%PrimaryKey%' OR explain LIKE '%Name%'; + +SELECT '-- Should show 1 granule'; +SELECT explain AS explain FROM ( + EXPLAIN indexes = 1 SELECT id FROM tab WHERE (hasToken(t1, 'number1') OR hasToken(t2, 'line1')) +) WHERE explain LIKE '%Granules%' OR explain LIKE '%PrimaryKey%' OR explain LIKE '%Name%'; + +SELECT '-- Should show 2 granules'; +SELECT explain AS explain FROM ( + EXPLAIN indexes = 1 SELECT id FROM tab WHERE (hasToken(t1, 'number1') OR hasToken(t2, 'line85')) +) WHERE explain LIKE '%Granules%' OR explain LIKE '%PrimaryKey%' OR explain LIKE '%Name%'; diff --git a/parser/testdata/03640_skip_indexes_with_or/ast.json b/parser/testdata/03640_skip_indexes_with_or/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03640_skip_indexes_with_or/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03640_skip_indexes_with_or/metadata.json b/parser/testdata/03640_skip_indexes_with_or/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03640_skip_indexes_with_or/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03640_skip_indexes_with_or/query.sql b/parser/testdata/03640_skip_indexes_with_or/query.sql new file mode 100644 index 000000000..fdeacdc84 --- /dev/null +++ b/parser/testdata/03640_skip_indexes_with_or/query.sql @@ -0,0 +1,117 @@ +-- Tags: no-parallel-replicas +-- no-parallel-replicas: funny EXPLAIN PLAN output + +-- Test that the skip indexes are utilized for AND and OR connected filter conditions + +-- Settings needed to achieve stable EXPLAIN PLAN output +SET parallel_replicas_local_plan = 1; +SET use_query_condition_cache = 0; +SET use_skip_indexes_on_data_read = 0; +SET use_skip_indexes = 1; + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab +( + id UInt32, + v1 UInt32, + v2 UInt32, + v3 UInt32, + INDEX v1_index v1 TYPE minmax, + INDEX v2_index v2 TYPE minmax, +) +ENGINE = MergeTree +ORDER BY id +SETTINGS + index_granularity = 64, index_granularity_bytes = 0, + min_bytes_for_wide_part = 0, + min_bytes_for_full_part_storage = 0, + max_bytes_to_merge_at_max_space_in_pool = 1, + use_const_adaptive_granularity = 1; + +-- 157 ranges in total +INSERT INTO tab SELECT number + 1, number + 1, (10000 - number), (number * 5) FROM numbers(10000); + +SELECT 'Test without utilizing skip indexes for disjunctions'; +SET use_skip_indexes_for_disjunctions = 0; + +SELECT '-- Simple OR condition'; -- surviving granules: 159 +SELECT explain AS explain FROM ( + EXPLAIN indexes = 1 SELECT id FROM tab WHERE (v1 = 111 OR v2 = 111) +) WHERE explain LIKE '%Granules%' OR explain LIKE '%PrimaryKey%' OR explain LIKE '%Name%'; + +SELECT '-- Mixed AND/OR condition'; -- 79 +SELECT explain AS explain FROM ( + EXPLAIN indexes = 1 SELECT id FROM tab WHERE (id >= 5000 AND (v1 = 111 OR v2 = 111)) +) WHERE explain LIKE '%Granules%' OR explain LIKE '%PrimaryKey%' OR explain LIKE '%Name%'; + +SELECT '-- Mixed _part_offset / OR condition'; -- 79 +SELECT explain AS explain FROM ( + EXPLAIN indexes = 1 SELECT id FROM tab WHERE (_part_offset >= 5000 AND (v1 = 111 OR v2 = 111)) +) WHERE explain LIKE '%Granules%' OR explain LIKE '%PrimaryKey%' OR explain LIKE '%Name%'; + +SELECT '-- No skip index on v3'; -- 159 +SELECT explain AS explain FROM ( + EXPLAIN indexes = 1 SELECT id FROM tab WHERE v1 = 111 OR v2 = 111 OR v3 = 90000 +) WHERE explain LIKE '%Granules%' OR explain LIKE '%PrimaryKey%' OR explain LIKE '%Name%'; + +SELECT 'Test with utilizing skip indexes for disjunctions'; +SET use_skip_indexes_for_disjunctions = 1; + +SELECT '-- Simple OR condition'; -- 2 +SELECT explain AS explain FROM ( + EXPLAIN indexes = 1 SELECT id FROM tab WHERE (v1 = 111 OR v2 = 111) +) WHERE explain LIKE '%Granules%' OR explain LIKE '%PrimaryKey%' OR explain LIKE '%Name%'; + +SELECT '-- Mixed AND/OR condition'; -- 1 +SELECT explain AS explain FROM ( + EXPLAIN indexes = 1 SELECT id FROM tab WHERE (id >= 5000 AND (v1 = 111 OR v2 = 111)) +) WHERE explain LIKE '%Granules%' OR explain LIKE '%PrimaryKey%' OR explain LIKE '%Name%'; + +SELECT '-- Mixed _part_offset / OR condition'; -- 1 +SELECT explain AS explain FROM ( + EXPLAIN indexes = 1 SELECT id FROM tab WHERE (_part_offset >= 5000 AND (v1 = 111 OR v2 = 111)) +) WHERE explain LIKE '%Granules%' OR explain LIKE '%PrimaryKey%' OR explain LIKE '%Name%'; + +SELECT '-- No skip index on v3'; -- 157 +SELECT explain AS explain FROM ( + EXPLAIN indexes = 1 SELECT id FROM tab WHERE v1 = 111 OR v2 = 111 OR v3 = 90000 +) WHERE explain LIKE '%Granules%' OR explain LIKE '%PrimaryKey%' OR explain LIKE '%Name%'; + +SELECT '-- Complex condition'; -- 0 +SELECT explain AS explain FROM ( + EXPLAIN indexes = 1 SELECT id FROM tab WHERE (v1 BETWEEN 10 AND 20 AND v2 BETWEEN 10 AND 20) OR (v1 BETWEEN 100 AND 2000 AND v2 BETWEEN 100 AND 2000) OR (v1 > 9000 AND v2 > 9000) +) WHERE explain LIKE '%Granules%' OR explain LIKE '%PrimaryKey%' OR explain LIKE '%Name%'; + +-- Test with RPN size of 23 - only 6 granules and 6x64=384 rows should be read +SELECT count(*) FROM tab WHERE (v1 = 1 AND v2 = 10000) OR (v1 = 129 AND v2 = 9872) OR (v1 = 999 OR v2 = 9002) OR (v1 = 1300 AND v2 = 8701) OR (v1 = 5000 AND v2 = 5001) OR (v1 = 9000 AND v2 = 1001) SETTINGS max_rows_to_read=384; + +DROP TABLE tab; + +SELECT 'Test with composite primary key condition'; +CREATE TABLE tab +( + x UInt32, + y UInt32, + v1 UInt32, + v2 UInt32, + v3 UInt32, + INDEX v1_index v1 TYPE minmax, + INDEX v2_index v2 TYPE minmax, +) +ENGINE = MergeTree +ORDER BY (x, y) +SETTINGS + index_granularity = 100, index_granularity_bytes = 0, + min_bytes_for_wide_part = 0, min_bytes_for_full_part_storage = 0, + max_bytes_to_merge_at_max_space_in_pool = 1, + use_const_adaptive_granularity = 1; + +INSERT INTO tab SELECT (number + 1) / 10, (number + 1) % 100, number + 1, (10000 - number), (number * 5) FROM numbers(1000); + +-- 1 +SELECT explain AS explain FROM ( + EXPLAIN indexes = 1 SELECT x, y, v1, v2 FROM tab WHERE (x < 100 AND y < 20) AND (v1 = 111 OR v2 = 111) +) WHERE explain LIKE '%Granules%' OR explain LIKE '%PrimaryKey%' OR explain LIKE '%Name%'; + +DROP TABLE tab; diff --git a/parser/testdata/03640_variant_array_null_map_subcolumn/ast.json b/parser/testdata/03640_variant_array_null_map_subcolumn/ast.json new file mode 100644 index 000000000..53a53b456 --- /dev/null +++ b/parser/testdata/03640_variant_array_null_map_subcolumn/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001088696, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/03640_variant_array_null_map_subcolumn/metadata.json b/parser/testdata/03640_variant_array_null_map_subcolumn/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03640_variant_array_null_map_subcolumn/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03640_variant_array_null_map_subcolumn/query.sql b/parser/testdata/03640_variant_array_null_map_subcolumn/query.sql new file mode 100644 index 000000000..6afb40336 --- /dev/null +++ b/parser/testdata/03640_variant_array_null_map_subcolumn/query.sql @@ -0,0 +1,6 @@ +drop table if exists test; +create table test (v Variant(Array(Nullable(String)))) engine=MergeTree order by tuple(); +insert into test select ['hello', null, 'world']; +select v.`Array(Nullable(String))`.null from test; +drop table test; + diff --git a/parser/testdata/03641_analyzer_issue_85834/ast.json b/parser/testdata/03641_analyzer_issue_85834/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03641_analyzer_issue_85834/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03641_analyzer_issue_85834/metadata.json b/parser/testdata/03641_analyzer_issue_85834/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03641_analyzer_issue_85834/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03641_analyzer_issue_85834/query.sql b/parser/testdata/03641_analyzer_issue_85834/query.sql new file mode 100644 index 000000000..a8903f825 --- /dev/null +++ b/parser/testdata/03641_analyzer_issue_85834/query.sql @@ -0,0 +1,14 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/85834 + +DROP TABLE IF EXISTS test_generic_events_all; + +CREATE TABLE test_generic_events_all (APIKey UInt8, SessionType UInt8) ENGINE = MergeTree() PARTITION BY APIKey ORDER BY tuple(); +INSERT INTO test_generic_events_all VALUES( 42, 42 ); +ALTER TABLE test_generic_events_all ADD COLUMN OperatingSystem UInt64 DEFAULT 42; + +CREATE ROW POLICY rp ON test_generic_events_all USING APIKey>35 TO CURRENT_USER; + +SELECT OperatingSystem +FROM test_generic_events_all +PREWHERE APIKey = 42 +SETTINGS additional_table_filters = {'test_generic_events_all':'APIKey > 40'}; diff --git a/parser/testdata/03641_group_by_injective_functoon_bad_arguments/ast.json b/parser/testdata/03641_group_by_injective_functoon_bad_arguments/ast.json new file mode 100644 index 000000000..6f2ff4304 --- /dev/null +++ b/parser/testdata/03641_group_by_injective_functoon_bad_arguments/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery test (children 3)" + }, + { + "explain": " Identifier test" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration json (children 1)" + }, + { + "explain": " DataType JSON" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001454621, + "rows_read": 10, + "bytes_read": 339 + } +} diff --git a/parser/testdata/03641_group_by_injective_functoon_bad_arguments/metadata.json b/parser/testdata/03641_group_by_injective_functoon_bad_arguments/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03641_group_by_injective_functoon_bad_arguments/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03641_group_by_injective_functoon_bad_arguments/query.sql b/parser/testdata/03641_group_by_injective_functoon_bad_arguments/query.sql new file mode 100644 index 000000000..79e52c4a5 --- /dev/null +++ b/parser/testdata/03641_group_by_injective_functoon_bad_arguments/query.sql @@ -0,0 +1,11 @@ +create table test (json JSON) engine=MergeTree order by tuple(); +insert into test select '{"a" : "str"}'; + +-- This won't work, see https://github.com/ClickHouse/ClickHouse/issues/89854 +-- select count(), toString(json.a) from test group by toString(json.a) settings enable_analyzer=0, optimize_injective_functions_in_group_by=0; + +select count(), toString(json.a) from test group by toString(json.a) settings enable_analyzer=1, optimize_injective_functions_in_group_by=0; +select count(), toString(json.a) from test group by toString(json.a) settings enable_analyzer=1, optimize_injective_functions_in_group_by=1; +explain query tree select count(), toString(json.a) from test group by toString(json.a) settings enable_analyzer=1, optimize_injective_functions_in_group_by=1; +drop table test; + diff --git a/parser/testdata/03641_json_array_of_float_and_bool/ast.json b/parser/testdata/03641_json_array_of_float_and_bool/ast.json new file mode 100644 index 000000000..ac21c3486 --- /dev/null +++ b/parser/testdata/03641_json_array_of_float_and_bool/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001264591, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03641_json_array_of_float_and_bool/metadata.json b/parser/testdata/03641_json_array_of_float_and_bool/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03641_json_array_of_float_and_bool/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03641_json_array_of_float_and_bool/query.sql b/parser/testdata/03641_json_array_of_float_and_bool/query.sql new file mode 100644 index 000000000..65966d619 --- /dev/null +++ b/parser/testdata/03641_json_array_of_float_and_bool/query.sql @@ -0,0 +1,4 @@ +set enable_analyzer=1; + +select '{"a" : [42.42, false]}'::JSON as json, dynamicType(json.a) settings input_format_json_read_bools_as_numbers=1; +select '{"a" : [42.42, false]}'::JSON as json, dynamicType(json.a) settings input_format_json_read_bools_as_numbers=0; diff --git a/parser/testdata/03642_column_ttl_sparse/ast.json b/parser/testdata/03642_column_ttl_sparse/ast.json new file mode 100644 index 000000000..ff64c82b2 --- /dev/null +++ b/parser/testdata/03642_column_ttl_sparse/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery ttl_sparse_repro (children 1)" + }, + { + "explain": " Identifier ttl_sparse_repro" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001394266, + "rows_read": 2, + "bytes_read": 84 + } +} diff --git a/parser/testdata/03642_column_ttl_sparse/metadata.json b/parser/testdata/03642_column_ttl_sparse/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03642_column_ttl_sparse/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03642_column_ttl_sparse/query.sql b/parser/testdata/03642_column_ttl_sparse/query.sql new file mode 100644 index 000000000..ef2f3164e --- /dev/null +++ b/parser/testdata/03642_column_ttl_sparse/query.sql @@ -0,0 +1,26 @@ +DROP TABLE IF EXISTS ttl_sparse_repro; + +CREATE TABLE ttl_sparse_repro +( + a UInt64, + dt DateTime, + b UInt64 TTL dt + INTERVAL 2 SECOND, c UInt64 +) +ENGINE = MergeTree ORDER BY a SETTINGS ratio_of_defaults_for_sparse_serialization = 0.9; + +INSERT INTO ttl_sparse_repro SELECT number, now(), 1, if (number % 23 = 0, 1, 0) FROM numbers(10000); + +SELECT sum(c) FROM ttl_sparse_repro; + +SELECT sleep(3) FORMAT Null; + +OPTIMIZE TABLE ttl_sparse_repro FINAL; +SELECT sum(c) FROM ttl_sparse_repro; + +OPTIMIZE TABLE ttl_sparse_repro FINAL; +SELECT sum(c) FROM ttl_sparse_repro; + +OPTIMIZE TABLE ttl_sparse_repro FINAL; +SELECT sum(c) FROM ttl_sparse_repro; + +DROP TABLE IF EXISTS ttl_sparse_repro; diff --git a/parser/testdata/03642_system_instrument_symbols/ast.json b/parser/testdata/03642_system_instrument_symbols/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03642_system_instrument_symbols/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03642_system_instrument_symbols/metadata.json b/parser/testdata/03642_system_instrument_symbols/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03642_system_instrument_symbols/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03642_system_instrument_symbols/query.sql b/parser/testdata/03642_system_instrument_symbols/query.sql new file mode 100644 index 000000000..f80cd295c --- /dev/null +++ b/parser/testdata/03642_system_instrument_symbols/query.sql @@ -0,0 +1,5 @@ +-- Tags: use-xray + +SET allow_introspection_functions=1; + +SELECT count() > 0 FROM system.symbols WHERE symbol_demangled LIKE '%QueryMetricLog::startQuery%' AND function_id > 0 AND length(symbol_demangled) > 10; diff --git a/parser/testdata/03643_paste_join_disable_filter_pushdown/ast.json b/parser/testdata/03643_paste_join_disable_filter_pushdown/ast.json new file mode 100644 index 000000000..d7c272f23 --- /dev/null +++ b/parser/testdata/03643_paste_join_disable_filter_pushdown/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001291532, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03643_paste_join_disable_filter_pushdown/metadata.json b/parser/testdata/03643_paste_join_disable_filter_pushdown/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03643_paste_join_disable_filter_pushdown/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03643_paste_join_disable_filter_pushdown/query.sql b/parser/testdata/03643_paste_join_disable_filter_pushdown/query.sql new file mode 100644 index 000000000..9b8aab7f7 --- /dev/null +++ b/parser/testdata/03643_paste_join_disable_filter_pushdown/query.sql @@ -0,0 +1,39 @@ +SET enable_analyzer = 1; + +With + A AS ( + SELECT * FROM numbers(10) ORDER BY number ASC + ), + B AS ( + SELECT * FROM numbers(10) ORDER BY number ASC + ), + C AS ( + SELECT * FROM numbers(10) ORDER BY number DESC + ) +SELECT + * +FROM A +PASTE JOIN B AS B +PASTE JOIN C AS C +WHERE A.number % 2 == 0 +SETTINGS query_plan_filter_push_down = 1; + +SELECT '---------------------'; + +With + A AS ( + SELECT * FROM numbers(10) ORDER BY number ASC + ), + B AS ( + SELECT * FROM numbers(10) ORDER BY number ASC + ), + C AS ( + SELECT * FROM numbers(10) ORDER BY number DESC + ) +SELECT + * +FROM A +PASTE JOIN B AS B +PASTE JOIN C AS C +WHERE A.number % 2 == 0 +SETTINGS query_plan_filter_push_down = 0; diff --git a/parser/testdata/03644_join_order_mixed_comma_and_left/ast.json b/parser/testdata/03644_join_order_mixed_comma_and_left/ast.json new file mode 100644 index 000000000..eb8690b55 --- /dev/null +++ b/parser/testdata/03644_join_order_mixed_comma_and_left/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery n1 (children 1)" + }, + { + "explain": " Identifier n1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001539759, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03644_join_order_mixed_comma_and_left/metadata.json b/parser/testdata/03644_join_order_mixed_comma_and_left/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03644_join_order_mixed_comma_and_left/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03644_join_order_mixed_comma_and_left/query.sql b/parser/testdata/03644_join_order_mixed_comma_and_left/query.sql new file mode 100644 index 000000000..6777dbf0a --- /dev/null +++ b/parser/testdata/03644_join_order_mixed_comma_and_left/query.sql @@ -0,0 +1,23 @@ +DROP TABLE IF EXISTS n1; +DROP TABLE IF EXISTS n2; +DROP TABLE IF EXISTS n3; + +SET query_plan_optimize_join_order_limit=16; + +CREATE TABLE n1 (number UInt64) ENGINE = MergeTree ORDER BY number; +INSERT INTO n1 SELECT number FROM numbers(3); + +CREATE TABLE n2 (number UInt64) ENGINE = MergeTree ORDER BY number; +INSERT INTO n2 SELECT number FROM numbers(2); + +CREATE TABLE n3 (number UInt64) ENGINE = MergeTree ORDER BY number; +INSERT INTO n3 SELECT number FROM numbers(2); + +SELECT * FROM n1, n2 LEFT JOIN n3 ON n1.number = n3.number ORDER BY n1.number, n2.number, n3.number; + +INSERT INTO n2 SELECT number FROM numbers(4); +SELECT * FROM n1, n2 LEFT JOIN n3 ON n1.number = n3.number ORDER BY n1.number, n2.number, n3.number; + +INSERT INTO n3 SELECT number FROM numbers(4); +SELECT * FROM n1, n2 LEFT JOIN n3 ON n1.number = n3.number ORDER BY n1.number, n2.number, n3.number; + diff --git a/parser/testdata/03644_min_level_for_wide_part/ast.json b/parser/testdata/03644_min_level_for_wide_part/ast.json new file mode 100644 index 000000000..3f7528ffc --- /dev/null +++ b/parser/testdata/03644_min_level_for_wide_part/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001094998, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03644_min_level_for_wide_part/metadata.json b/parser/testdata/03644_min_level_for_wide_part/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03644_min_level_for_wide_part/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03644_min_level_for_wide_part/query.sql b/parser/testdata/03644_min_level_for_wide_part/query.sql new file mode 100644 index 000000000..4841f0791 --- /dev/null +++ b/parser/testdata/03644_min_level_for_wide_part/query.sql @@ -0,0 +1,38 @@ +SET alter_sync = 2; + +DROP TABLE IF EXISTS t_03644_min_level_for_wide_part; +DROP TABLE IF EXISTS t_03644_min_level_for_wide_part_rmt; + +-- Can produce initial parts with level 1 +SET optimize_on_insert = 0; + +CREATE TABLE t_03644_min_level_for_wide_part (x int) ENGINE = MergeTree ORDER BY x +SETTINGS min_bytes_for_wide_part = 0, min_rows_for_wide_part = 0, min_level_for_wide_part = 1; + +INSERT INTO t_03644_min_level_for_wide_part VALUES (1); + +SELECT level, part_type FROM system.parts WHERE database = currentDatabase() AND table = 't_03644_min_level_for_wide_part' AND active; + +INSERT INTO t_03644_min_level_for_wide_part VALUES (2); + +OPTIMIZE TABLE t_03644_min_level_for_wide_part FINAL; + +SELECT level, part_type FROM system.parts WHERE database = currentDatabase() AND table = 't_03644_min_level_for_wide_part' AND active; + +SET optimize_on_insert = 1; + +CREATE TABLE t_03644_min_level_for_wide_part_rmt (x int, y int) ENGINE = ReplacingMergeTree ORDER BY x +SETTINGS min_bytes_for_wide_part = 0, min_rows_for_wide_part = 0, min_level_for_wide_part = 2; + +INSERT INTO t_03644_min_level_for_wide_part_rmt SELECT number, number * 2 FROM numbers(10); + +SELECT level, part_type FROM system.parts WHERE database = currentDatabase() AND table = 't_03644_min_level_for_wide_part_rmt' AND active; + +INSERT INTO t_03644_min_level_for_wide_part_rmt VALUES (1, 2); + +OPTIMIZE TABLE t_03644_min_level_for_wide_part_rmt FINAL; + +SELECT level, part_type FROM system.parts WHERE database = currentDatabase() AND table = 't_03644_min_level_for_wide_part_rmt' AND active; + +DROP TABLE t_03644_min_level_for_wide_part; +DROP TABLE t_03644_min_level_for_wide_part_rmt; diff --git a/parser/testdata/03644_object_storage_correlated_subqueries/ast.json b/parser/testdata/03644_object_storage_correlated_subqueries/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03644_object_storage_correlated_subqueries/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03644_object_storage_correlated_subqueries/metadata.json b/parser/testdata/03644_object_storage_correlated_subqueries/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03644_object_storage_correlated_subqueries/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03644_object_storage_correlated_subqueries/query.sql b/parser/testdata/03644_object_storage_correlated_subqueries/query.sql new file mode 100644 index 000000000..4492d1e21 --- /dev/null +++ b/parser/testdata/03644_object_storage_correlated_subqueries/query.sql @@ -0,0 +1,16 @@ +-- Tags: no-fasttest +-- Tag no-fasttest: needs s3 + +-- Use correlated subqueries which are supported only by the new analyzer. +set enable_analyzer = 1; + +INSERT INTO TABLE FUNCTION s3('http://localhost:11111/test/test-data-03644_object_storage.csv', 'test', 'testtest', 'CSV', 'number UInt64') SELECT number FROM numbers(10) SETTINGS s3_truncate_on_insert = 1; + +SELECT n1.c1 +FROM s3('http://localhost:11111/test/test-data-03644_object_storage.csv', 'test', 'testtest') AS n1 +WHERE n1.c1 > ( + SELECT AVG(n2.c1) + FROM s3('http://localhost:11111/test/test-data-03644_object_storage.csv', 'test', 'testtest') AS n2 + WHERE n2.c1 < n1.c1 +) ORDER BY n1.c1 +SETTINGS allow_experimental_correlated_subqueries = 1; diff --git a/parser/testdata/03644_rows_before_aggregation_in_order/ast.json b/parser/testdata/03644_rows_before_aggregation_in_order/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03644_rows_before_aggregation_in_order/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03644_rows_before_aggregation_in_order/metadata.json b/parser/testdata/03644_rows_before_aggregation_in_order/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03644_rows_before_aggregation_in_order/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03644_rows_before_aggregation_in_order/query.sql b/parser/testdata/03644_rows_before_aggregation_in_order/query.sql new file mode 100644 index 000000000..abfd7f830 --- /dev/null +++ b/parser/testdata/03644_rows_before_aggregation_in_order/query.sql @@ -0,0 +1,23 @@ +-- Tags: no-parallel-replicas, no-random-merge-tree-settings +-- no-parallel-replicas: always returns rows_before_limit_counter in response + +drop table if exists 03644_data; + +create table 03644_data (i UInt32) engine = MergeTree order by i +as +select number from numbers(10000); + +select i +from 03644_data +group by i +having count() > 1 +settings + rows_before_aggregation = 1, + exact_rows_before_limit = 1, + output_format_write_statistics = 0, + max_block_size = 100, + aggregation_in_order_max_block_bytes = 8, + optimize_aggregation_in_order=1 +format JSONCompact; + +drop table 03644_data; diff --git a/parser/testdata/03646_array_join_empty/ast.json b/parser/testdata/03646_array_join_empty/ast.json new file mode 100644 index 000000000..a3e744f86 --- /dev/null +++ b/parser/testdata/03646_array_join_empty/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001018122, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03646_array_join_empty/metadata.json b/parser/testdata/03646_array_join_empty/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03646_array_join_empty/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03646_array_join_empty/query.sql b/parser/testdata/03646_array_join_empty/query.sql new file mode 100644 index 000000000..d2321b5c8 --- /dev/null +++ b/parser/testdata/03646_array_join_empty/query.sql @@ -0,0 +1,18 @@ +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 ( + x UInt32, + arr1 Array(Int32), + arr2 Array(Int32) +) ENGINE = Memory; + +INSERT INTO t1 VALUES (1, [10, 20], [30, 40]); + +-- Test normal COLUMNS() ARRAY JOIN (should work) +SELECT x, arr1, arr2 FROM t1 ARRAY JOIN COLUMNS('arr.*') ORDER BY arr1, arr2; + +-- Test COLUMNS() matching no columns (should fail) +SELECT * FROM t1 ARRAY JOIN COLUMNS('nonexistent'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +DROP TABLE t1; diff --git a/parser/testdata/03647_morton_encode_empty_tuple/ast.json b/parser/testdata/03647_morton_encode_empty_tuple/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03647_morton_encode_empty_tuple/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03647_morton_encode_empty_tuple/metadata.json b/parser/testdata/03647_morton_encode_empty_tuple/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03647_morton_encode_empty_tuple/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03647_morton_encode_empty_tuple/query.sql b/parser/testdata/03647_morton_encode_empty_tuple/query.sql new file mode 100644 index 000000000..a066e532a --- /dev/null +++ b/parser/testdata/03647_morton_encode_empty_tuple/query.sql @@ -0,0 +1,5 @@ +-- Test for issue #87840: mortonEncode with empty tuple should fail gracefully +SELECT mortonEncode(()); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +-- hilbertEncode should also reject empty tuple (uses same base class) +SELECT hilbertEncode(()); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } diff --git a/parser/testdata/03651_merge_tree_compact_read_string_size_subcolumn/ast.json b/parser/testdata/03651_merge_tree_compact_read_string_size_subcolumn/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03651_merge_tree_compact_read_string_size_subcolumn/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03651_merge_tree_compact_read_string_size_subcolumn/metadata.json b/parser/testdata/03651_merge_tree_compact_read_string_size_subcolumn/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03651_merge_tree_compact_read_string_size_subcolumn/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03651_merge_tree_compact_read_string_size_subcolumn/query.sql b/parser/testdata/03651_merge_tree_compact_read_string_size_subcolumn/query.sql new file mode 100644 index 000000000..e4fd6b9f4 --- /dev/null +++ b/parser/testdata/03651_merge_tree_compact_read_string_size_subcolumn/query.sql @@ -0,0 +1,11 @@ +-- { echo ON } + +DROP TABLE IF EXISTS t0; + +CREATE TABLE t0 (c0 String) ENGINE = MergeTree() ORDER BY (c0) SETTINGS write_marks_for_substreams_in_compact_parts = 0; + +INSERT INTO TABLE t0 (c0) VALUES(''); + +SELECT c0.size FROM t0; + +DROP TABLE t0; diff --git a/parser/testdata/03651_positional_argument_agg_projection/ast.json b/parser/testdata/03651_positional_argument_agg_projection/ast.json new file mode 100644 index 000000000..fb69f9a0a --- /dev/null +++ b/parser/testdata/03651_positional_argument_agg_projection/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001803767, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/03651_positional_argument_agg_projection/metadata.json b/parser/testdata/03651_positional_argument_agg_projection/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03651_positional_argument_agg_projection/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03651_positional_argument_agg_projection/query.sql b/parser/testdata/03651_positional_argument_agg_projection/query.sql new file mode 100644 index 000000000..c2729532e --- /dev/null +++ b/parser/testdata/03651_positional_argument_agg_projection/query.sql @@ -0,0 +1,20 @@ +DROP TABLE IF EXISTS test; + +CREATE TABLE test +( + `a` UInt64, + `b` String +) +ENGINE = MergeTree +ORDER BY a; + +ALTER TABLE test + ADD PROJECTION test_projection + ( + SELECT + 0 AS bug, + max(a) + GROUP BY bug + ); + +DROP TABLE test; diff --git a/parser/testdata/03652_coalescing_merge_tree_fix_empty_tuple/ast.json b/parser/testdata/03652_coalescing_merge_tree_fix_empty_tuple/ast.json new file mode 100644 index 000000000..9b47fa357 --- /dev/null +++ b/parser/testdata/03652_coalescing_merge_tree_fix_empty_tuple/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001343803, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03652_coalescing_merge_tree_fix_empty_tuple/metadata.json b/parser/testdata/03652_coalescing_merge_tree_fix_empty_tuple/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03652_coalescing_merge_tree_fix_empty_tuple/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03652_coalescing_merge_tree_fix_empty_tuple/query.sql b/parser/testdata/03652_coalescing_merge_tree_fix_empty_tuple/query.sql new file mode 100644 index 000000000..4035a1a99 --- /dev/null +++ b/parser/testdata/03652_coalescing_merge_tree_fix_empty_tuple/query.sql @@ -0,0 +1,42 @@ +SET allow_suspicious_primary_key = 1; + +DROP TABLE IF EXISTS t0; + +CREATE TABLE t0 (c0 Tuple(a Int32, b Nullable(Int32)), c1 Int32) ENGINE = SummingMergeTree() ORDER BY c1; +INSERT INTO t0 VALUES ((1,2), 0); +INSERT INTO t0 VALUES ((3,4), 0); +SELECT c0 FROM t0 FINAL; + +DROP TABLE IF EXISTS t0; + +CREATE TABLE t0 (c0 Tuple(a Int32, b Nullable(Int32)), c1 Int32) ENGINE = CoalescingMergeTree() ORDER BY c1; +INSERT INTO t0 VALUES ((1,2), 0); -- return this one because tuple has not a nullable type so we can not aggregate by tuple columns +INSERT INTO t0 VALUES ((3,4), 0); +SELECT c0 FROM t0 FINAL; + +DROP TABLE IF EXISTS t0; + +CREATE TABLE t0 (c0 Array(Nullable(Int32)), c1 Int32) ENGINE = SummingMergeTree() ORDER BY c1; +INSERT INTO t0 VALUES ([1,2], 0); +INSERT INTO t0 VALUES ([3,4], 0); +SELECT c0 FROM t0 FINAL; + + +DROP TABLE IF EXISTS t0; + +CREATE TABLE t0 (c0 Array(Nullable(Int32)), c1 Int32) ENGINE = CoalescingMergeTree() ORDER BY c1; +INSERT INTO t0 VALUES ([1,2], 0); -- return this one because array has not a nullable type so we can not aggregate by tuple columns +INSERT INTO t0 VALUES ([3,4], 0); +SELECT c0 FROM t0 FINAL; + +DROP TABLE IF EXISTS t0; + +CREATE TABLE t0 (c0 Tuple) ENGINE = CoalescingMergeTree() ORDER BY tuple(); +INSERT INTO t0 (c0) VALUES (()); +SELECT c0 FROM t0 FINAL; + +DROP TABLE IF EXISTS t0; + +CREATE TABLE t0 (c0 Tuple) ENGINE = SummingMergeTree() ORDER BY tuple(); +INSERT INTO t0 (c0) VALUES (()); +SELECT c0 FROM t0 FINAL; diff --git a/parser/testdata/03652_explain_input_header/ast.json b/parser/testdata/03652_explain_input_header/ast.json new file mode 100644 index 000000000..b342bcd6e --- /dev/null +++ b/parser/testdata/03652_explain_input_header/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001277709, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03652_explain_input_header/metadata.json b/parser/testdata/03652_explain_input_header/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03652_explain_input_header/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03652_explain_input_header/query.sql b/parser/testdata/03652_explain_input_header/query.sql new file mode 100644 index 000000000..4c2087240 --- /dev/null +++ b/parser/testdata/03652_explain_input_header/query.sql @@ -0,0 +1,39 @@ +SET enable_analyzer = 1; +SET enable_parallel_replicas = 0; +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE TABLE t1 (x Int32, y String) ENGINE = Memory; +CREATE TABLE t2 (x Int32, y String) ENGINE = Memory; + +INSERT INTO t1 VALUES (1, 'a'), (2, 'b'), (3, 'c'); +INSERT INTO t2 VALUES (1, 'a'), (2, 'b'), (3, 'c'); + +-- All possible combinations of header and input_header options +EXPLAIN PLAN header = 0, input_headers = 0 SELECT * FROM t1 WHERE x > 2 OR y = 'a' LIMIT 10; +EXPLAIN PLAN header = 0, input_headers = 1 SELECT * FROM t1 WHERE x > 2 OR y = 'a' LIMIT 10; +EXPLAIN PLAN header = 1, input_headers = 0 SELECT * FROM t1 WHERE x > 2 OR y = 'a' LIMIT 10; +EXPLAIN PLAN header = 1, input_headers = 1 SELECT * FROM t1 WHERE x > 2 OR y = 'a' LIMIT 10; + +-- One complex query with multiple input headers +EXPLAIN PLAN header = 1, input_headers = 1 +( + SELECT * FROM t1 INNER JOIN t2 USING x WHERE t1.x > 2 OR t2.y = 'a' LIMIT 10 +) UNION ALL ( + SELECT * FROM t2 LEFT JOIN t1 USING x WHERE t2.x < 2 OR t1.y = 'c' LIMIT 10 +); + +-- Same as above, but in JSON format +-- All possible combinations of header and input_header options +EXPLAIN PLAN json = 1, header = 0, input_headers = 0 SELECT * FROM t1 WHERE x > 2 OR y = 'a' LIMIT 10; +EXPLAIN PLAN json = 1, header = 0, input_headers = 1 SELECT * FROM t1 WHERE x > 2 OR y = 'a' LIMIT 10; +EXPLAIN PLAN json = 1, header = 1, input_headers = 0 SELECT * FROM t1 WHERE x > 2 OR y = 'a' LIMIT 10; +EXPLAIN PLAN json = 1, header = 1, input_headers = 1 SELECT * FROM t1 WHERE x > 2 OR y = 'a' LIMIT 10; + +-- One complex query with multiple input headers +EXPLAIN PLAN json = 1, header = 1, input_headers = 1 +( + SELECT * FROM t1 INNER JOIN t2 USING x WHERE t1.x > 2 OR t2.y = 'a' LIMIT 10 +) UNION ALL ( + SELECT * FROM t2 LEFT JOIN t1 USING x WHERE t2.x < 2 OR t1.y = 'c' LIMIT 10 +); diff --git a/parser/testdata/03652_generate_serial_id_non_constant_zookeeper/ast.json b/parser/testdata/03652_generate_serial_id_non_constant_zookeeper/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03652_generate_serial_id_non_constant_zookeeper/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03652_generate_serial_id_non_constant_zookeeper/metadata.json b/parser/testdata/03652_generate_serial_id_non_constant_zookeeper/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03652_generate_serial_id_non_constant_zookeeper/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03652_generate_serial_id_non_constant_zookeeper/query.sql b/parser/testdata/03652_generate_serial_id_non_constant_zookeeper/query.sql new file mode 100644 index 000000000..06f7f55db --- /dev/null +++ b/parser/testdata/03652_generate_serial_id_non_constant_zookeeper/query.sql @@ -0,0 +1,3 @@ +-- Tags: zookeeper + +WITH currentDatabase() || '_test1_' || (number MOD 3) AS key1, currentDatabase() || '_test2_' || (number DIV 3) AS key2 SELECT number, generateSerialID(key1), generateSerialID(key2) FROM numbers(10); diff --git a/parser/testdata/03652_join_using_legacy_step/ast.json b/parser/testdata/03652_join_using_legacy_step/ast.json new file mode 100644 index 000000000..fa8ba1769 --- /dev/null +++ b/parser/testdata/03652_join_using_legacy_step/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001285287, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03652_join_using_legacy_step/metadata.json b/parser/testdata/03652_join_using_legacy_step/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03652_join_using_legacy_step/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03652_join_using_legacy_step/query.sql b/parser/testdata/03652_join_using_legacy_step/query.sql new file mode 100644 index 000000000..4bcac4bb8 --- /dev/null +++ b/parser/testdata/03652_join_using_legacy_step/query.sql @@ -0,0 +1,52 @@ +SET query_plan_use_new_logical_join_step = 0; +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS t3; +DROP TABLE IF EXISTS t4; + +CREATE TABLE t1 (x Int8) ENGINE = Memory; +INSERT INTO t1 VALUES (1); +CREATE TABLE t2 (x UInt8) ENGINE = Memory; +INSERT INTO t2 VALUES (1); +CREATE TABLE t3 (x UInt16) ENGINE = Memory; +INSERT INTO t3 VALUES (1); +CREATE TABLE t4 (x UInt32) ENGINE = Memory; +INSERT INTO t4 VALUES (1); + +SELECT coalesce(t1.x, t2.x, t3.x, t4.x) AS x +FROM t1 +FULL OUTER JOIN t2 USING (x) +FULL OUTER JOIN t3 USING (x) +FULL OUTER JOIN t4 USING (x) +; + +SELECT 1 as x +FROM t1 +FULL OUTER JOIN t2 USING (x) +FULL OUTER JOIN t3 USING (x) +FULL OUTER JOIN t4 USING (x) +; + +SELECT 1 as x +FROM t1 +RIGHT OUTER JOIN t2 USING (x) +RIGHT OUTER JOIN t3 USING (x) +RIGHT OUTER JOIN t4 USING (x) +; + +SELECT 1 as x +FROM t1 +LEFT OUTER JOIN t2 USING (x) +LEFT OUTER JOIN t3 USING (x) +LEFT OUTER JOIN t4 USING (x) +; + + +SELECT 1 as x +FROM t1 +RIGHT OUTER JOIN t2 USING (x) +FULL OUTER JOIN t3 USING (x) +INNER JOIN t4 USING (x) +; diff --git a/parser/testdata/03653_fractional_limit_offset/ast.json b/parser/testdata/03653_fractional_limit_offset/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03653_fractional_limit_offset/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03653_fractional_limit_offset/metadata.json b/parser/testdata/03653_fractional_limit_offset/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03653_fractional_limit_offset/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03653_fractional_limit_offset/query.sql b/parser/testdata/03653_fractional_limit_offset/query.sql new file mode 100644 index 000000000..6c2e59fd6 --- /dev/null +++ b/parser/testdata/03653_fractional_limit_offset/query.sql @@ -0,0 +1,180 @@ +SET enable_analyzer=0; +SELECT 'Old Analyzer'; + +SELECT 'Fractional Limit Only:'; +SELECT number FROM numbers(10) LIMIT 0.1; +SELECT number FROM numbers(100) LIMIT 0.01; +SELECT number FROM numbers(1000) LIMIT 0.001; +SELECT number FROM numbers(10) LIMIT 0.5; +SELECT number FROM numbers(10) LIMIT 0.9; +SELECT number FROM numbers(10) LIMIT 0.99; + +SELECT 'Fractional Offset Only:'; +SELECT number FROM numbers(10) OFFSET 0.1; +SELECT number FROM numbers(100) OFFSET 0.01; +SELECT number FROM numbers(10) OFFSET 0.5; +SELECT number FROM numbers(10) OFFSET 0.9; +SELECT number FROM numbers(10) OFFSET 0.99; + +SELECT 'Fractional Limit and Fractional Offset:'; +SELECT number FROM numbers(10) LIMIT 0.1 OFFSET 0.1; +SELECT number FROM numbers(10) LIMIT 0.1 OFFSET 0.2; +SELECT number FROM numbers(10) LIMIT 0.1 OFFSET 0.5; +SELECT number FROM numbers(100) LIMIT 0.01 OFFSET 0.9; + +SELECT 'Fractional Limit and Normal Offset:'; +SELECT number FROM numbers(10) LIMIT 0.1 OFFSET 1; +SELECT number FROM numbers(10) LIMIT 0.1 OFFSET 2; +SELECT number FROM numbers(10) LIMIT 0.1 OFFSET 5; +SELECT number FROM numbers(100) LIMIT 0.01 OFFSET 90; + +SELECT 'Normal Limit and Fractional Offset:'; +SELECT number FROM numbers(10) LIMIT 1 OFFSET 0.1; +SELECT number FROM numbers(10) LIMIT 1 OFFSET 0.2; +SELECT number FROM numbers(10) LIMIT 1 OFFSET 0.5; +SELECT number FROM numbers(100) LIMIT 1 OFFSET 0.9; + +SELECT 'Misc:'; + +SELECT number FROM numbers(1000) LIMIT 1 OFFSET 0.5; + +SELECT number FROM numbers(1000) ORDER BY number DESC LIMIT 1 OFFSET 0.5; + +SELECT number FROM numbers(12) LIMIT 0.25 OFFSET 0.5; + +SELECT number FROM numbers(1000000) LIMIT 1 OFFSET 0.0999999; + +SELECT 'Double Column:'; + +DROP TABLE IF EXISTS num_tab; +CREATE TABLE num_tab +( + `id` UInt8, + `val` UInt32 +) +ENGINE = MergeTree +ORDER BY (id, val) +AS SELECT + number % 2 AS id, + number AS val +FROM numbers(20); + +SELECT + IF((count() = 5) AND (min(val) = 15) AND (max(val) = 19) AND (sum(val) = 85) AND (uniqExact(id) = 2), 'OK', 'FAIL') +FROM +( + SELECT + id, + val + FROM num_tab + ORDER BY val ASC + LIMIT 0.25 + OFFSET 0.75 +); + +SELECT 'Big Tables:'; + +DROP TABLE IF EXISTS num_tab; +CREATE TABLE num_tab +ENGINE = MergeTree +ORDER BY number +AS SELECT number FROM numbers(1000000); + +SELECT + number +FROM + num_tab +ORDER BY number +LIMIT 10 +OFFSET 0.99999; + +SET enable_analyzer=1; +SELECT 'New Analyzer'; + +SELECT 'Fractional Limit Only:'; +SELECT number FROM numbers(10) LIMIT 0.1; +SELECT number FROM numbers(100) LIMIT 0.01; +SELECT number FROM numbers(1000) LIMIT 0.001; +SELECT number FROM numbers(10) LIMIT 0.5; +SELECT number FROM numbers(10) LIMIT 0.9; +SELECT number FROM numbers(10) LIMIT 0.99; + +SELECT 'Fractional Offset Only:'; +SELECT number FROM numbers(10) OFFSET 0.1; +SELECT number FROM numbers(100) OFFSET 0.01; +SELECT number FROM numbers(10) OFFSET 0.5; +SELECT number FROM numbers(10) OFFSET 0.9; +SELECT number FROM numbers(10) OFFSET 0.99; + +SELECT 'Fractional Limit and Fractional Offset:'; +SELECT number FROM numbers(10) LIMIT 0.1 OFFSET 0.1; +SELECT number FROM numbers(10) LIMIT 0.1 OFFSET 0.2; +SELECT number FROM numbers(10) LIMIT 0.1 OFFSET 0.5; +SELECT number FROM numbers(100) LIMIT 0.01 OFFSET 0.9; + +SELECT 'Fractional Limit and Normal Offset:'; +SELECT number FROM numbers(10) LIMIT 0.1 OFFSET 1; +SELECT number FROM numbers(10) LIMIT 0.1 OFFSET 2; +SELECT number FROM numbers(10) LIMIT 0.1 OFFSET 5; +SELECT number FROM numbers(100) LIMIT 0.01 OFFSET 90; + +SELECT 'Normal Limit and Fractional Offset:'; +SELECT number FROM numbers(10) LIMIT 1 OFFSET 0.1; +SELECT number FROM numbers(10) LIMIT 1 OFFSET 0.2; +SELECT number FROM numbers(10) LIMIT 1 OFFSET 0.5; +SELECT number FROM numbers(100) LIMIT 1 OFFSET 0.9; + +SELECT 'Misc:'; + +SELECT number FROM numbers(1000) LIMIT 1 OFFSET 0.5; + +SELECT number FROM numbers(1000) ORDER BY number DESC LIMIT 1 OFFSET 0.5; + +SELECT number FROM numbers(12) LIMIT 0.25 OFFSET 0.5; + +SELECT number FROM numbers(1000000) LIMIT 1 OFFSET 0.0999999; + +SELECT 'Double Column:'; + +DROP TABLE IF EXISTS num_tab; + +CREATE TABLE num_tab +( + `id` UInt8, + `val` UInt32 +) +ENGINE = MergeTree +ORDER BY (id, val) +AS SELECT + number % 2 AS id, + number AS val +FROM numbers(20); + +SELECT + IF((count() = 5) AND (min(val) = 15) AND (max(val) = 19) AND (sum(val) = 85) AND (uniqExact(id) = 2), 'OK', 'FAIL') +FROM +( + SELECT + id, + val + FROM num_tab + ORDER BY val ASC + LIMIT 0.25 + OFFSET 0.75 +); + +SELECT 'Big Tables:'; + +DROP TABLE IF EXISTS num_tab; +CREATE TABLE num_tab +ENGINE = MergeTree +ORDER BY number +AS SELECT number FROM numbers(1000000); + +SELECT + number +FROM + num_tab +ORDER BY number +LIMIT 10 +OFFSET 0.99999; diff --git a/parser/testdata/03653_keeper_histogram_metrics/ast.json b/parser/testdata/03653_keeper_histogram_metrics/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03653_keeper_histogram_metrics/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03653_keeper_histogram_metrics/metadata.json b/parser/testdata/03653_keeper_histogram_metrics/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03653_keeper_histogram_metrics/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03653_keeper_histogram_metrics/query.sql b/parser/testdata/03653_keeper_histogram_metrics/query.sql new file mode 100644 index 000000000..66e2a671b --- /dev/null +++ b/parser/testdata/03653_keeper_histogram_metrics/query.sql @@ -0,0 +1,22 @@ +-- Tags: zookeeper + +-- Smoke test for the keeper client histogram metrics + +-- histograms with the operation_type label +SELECT value > 0 +FROM system.histogram_metrics +WHERE name = 'keeper_response_time_ms' + AND labels['operation_type'] = 'readonly' + AND labels['le'] = '+Inf'; + +SELECT value > 0 +FROM system.histogram_metrics +WHERE name = 'keeper_client_roundtrip_duration_milliseconds' + AND labels['operation_type'] = 'readonly' + AND labels['le'] = '+Inf'; + +-- histogram without the operation_type label +SELECT value > 0 +FROM system.histogram_metrics +WHERE name = 'keeper_client_queue_duration_milliseconds' + AND labels['le'] = '+Inf'; diff --git a/parser/testdata/03653_updating_minmax_idx_after_mutation/ast.json b/parser/testdata/03653_updating_minmax_idx_after_mutation/ast.json new file mode 100644 index 000000000..269176fac --- /dev/null +++ b/parser/testdata/03653_updating_minmax_idx_after_mutation/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.0015534, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/03653_updating_minmax_idx_after_mutation/metadata.json b/parser/testdata/03653_updating_minmax_idx_after_mutation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03653_updating_minmax_idx_after_mutation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03653_updating_minmax_idx_after_mutation/query.sql b/parser/testdata/03653_updating_minmax_idx_after_mutation/query.sql new file mode 100644 index 000000000..e27d8b47e --- /dev/null +++ b/parser/testdata/03653_updating_minmax_idx_after_mutation/query.sql @@ -0,0 +1,15 @@ +DROP TABLE IF EXISTS test; +CREATE TABLE test +( + time DateTime +) +ENGINE = MergeTree +PARTITION BY toYYYYMM(time) ORDER BY (); + +INSERT INTO test VALUES ('2000-01-01 01:02:03'), ('2000-01-01 04:05:06'); +SELECT max_time FROM system.parts WHERE database = currentDatabase() AND table = 'test' AND active; +SET mutations_sync = 1; +ALTER TABLE test DELETE WHERE time >= '2000-01-01 02:00:00'; +SELECT max_time FROM system.parts WHERE database = currentDatabase() AND table = 'test' AND active; + +DROP TABLE test; diff --git a/parser/testdata/03654_case_non_constant_null/ast.json b/parser/testdata/03654_case_non_constant_null/ast.json new file mode 100644 index 000000000..5d52ba155 --- /dev/null +++ b/parser/testdata/03654_case_non_constant_null/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test1 (children 1)" + }, + { + "explain": " Identifier test1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001434738, + "rows_read": 2, + "bytes_read": 62 + } +} diff --git a/parser/testdata/03654_case_non_constant_null/metadata.json b/parser/testdata/03654_case_non_constant_null/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03654_case_non_constant_null/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03654_case_non_constant_null/query.sql b/parser/testdata/03654_case_non_constant_null/query.sql new file mode 100644 index 000000000..001d1700a --- /dev/null +++ b/parser/testdata/03654_case_non_constant_null/query.sql @@ -0,0 +1,5 @@ +DROP TABLE IF EXISTS test1; +CREATE TABLE test1 (a Int, b Int, c int,d int) ENGINE = MergeTree() PRIMARY KEY a; +insert into test1 values(1,1,2,2); +select case a+b when c then 'c' when d then 'd' end from test1; +DROP TABLE test1; diff --git a/parser/testdata/03654_grouping_sets_any_min_max/ast.json b/parser/testdata/03654_grouping_sets_any_min_max/ast.json new file mode 100644 index 000000000..64667fad5 --- /dev/null +++ b/parser/testdata/03654_grouping_sets_any_min_max/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001342841, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03654_grouping_sets_any_min_max/metadata.json b/parser/testdata/03654_grouping_sets_any_min_max/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03654_grouping_sets_any_min_max/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03654_grouping_sets_any_min_max/query.sql b/parser/testdata/03654_grouping_sets_any_min_max/query.sql new file mode 100644 index 000000000..ece12b9e2 --- /dev/null +++ b/parser/testdata/03654_grouping_sets_any_min_max/query.sql @@ -0,0 +1,58 @@ +SET enable_analyzer = 1; + +SELECT grouping(num1), num1, + any(num1), min(num1), max(num1), sum(num1), avg(num1), count(num1), + any(num2), min(num2), max(num2), sum(num2), avg(num2), count(num2) +FROM (SELECT 10 AS num1, 20 AS num2) +GROUP BY GROUPING SETS ((num1), ()) +ORDER BY grouping(num1) DESC; + +DROP TABLE IF EXISTS users; +CREATE TABLE users (uid Int16, name String, age Int16, ts DateTime) ENGINE=MergeTree order by tuple(); + +INSERT INTO users VALUES (1231, 'John', 1, toDateTime('2025-10-11 12:13:14')); +INSERT INTO users VALUES (1231, 'John', 2, toDateTime('2025-10-11 12:13:14') + 1); +INSERT INTO users VALUES (1231, 'John', 3 , toDateTime('2025-10-11 12:13:14') + 2); + +INSERT INTO users VALUES (6666, 'Ksenia', 1, toDateTime('2025-10-11 12:13:14') + 3); +INSERT INTO users VALUES (6666, 'Ksenia', 2, toDateTime('2025-10-11 12:13:14') + 4); + +INSERT INTO users VALUES (8888, 'Alice', 1, toDateTime('2025-10-11 12:13:14') + 5); + +select arrayStringConcat(groupArray('-')) from numbers(67); + +-- Query A +select + uid, name + ,sum(age) + ,count() + ,arrayUniq(groupArray(ts)) + ,max(age) + ,max(ts) +from users +group by grouping sets +( + (*), + () +) +ORDER BY ALL; + +select arrayStringConcat(groupArray('-')) from numbers(67); + +-- Query B +select + uid, name + ,sum(age) + ,count() + ,arrayUniq(groupArray(ts)) + ,max(age) + ,max(ts) +from users +group by grouping sets +( + (uid, name), + () +) +ORDER BY ALL; + +DROP TABLE users; diff --git a/parser/testdata/03655_keeper_map_alter_comment/ast.json b/parser/testdata/03655_keeper_map_alter_comment/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03655_keeper_map_alter_comment/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03655_keeper_map_alter_comment/metadata.json b/parser/testdata/03655_keeper_map_alter_comment/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03655_keeper_map_alter_comment/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03655_keeper_map_alter_comment/query.sql b/parser/testdata/03655_keeper_map_alter_comment/query.sql new file mode 100644 index 000000000..206e4ad66 --- /dev/null +++ b/parser/testdata/03655_keeper_map_alter_comment/query.sql @@ -0,0 +1,20 @@ +-- Tags: zookeeper +SET distributed_ddl_output_mode = 'none'; + +DROP DATABASE IF EXISTS {CLICKHOUSE_DATABASE:Identifier}; +CREATE DATABASE {CLICKHOUSE_DATABASE:Identifier} ENGINE=Replicated('/clickhouse/databases/{database}', 'shard1', 'replica1'); +USE {CLICKHOUSE_DATABASE:Identifier}; + +CREATE TABLE 03655_keepermap (k UInt64) ENGINE = KeeperMap('/' || currentDatabase() || '/03655_keepermap') PRIMARY KEY (k); + +SELECT '-- Before ALTER:'; +SELECT 'local:', regexpExtract(create_table_query, '(`k`.+?)(\n|\))', 1) FROM system.tables WHERE database = currentDatabase() AND table = '03655_keepermap'; +SELECT 'keeper:', regexpExtract(value, '(`k`.+?)(\n|\))', 1) FROM system.zookeeper WHERE path = '/clickhouse/databases/' || currentDatabase() || '/metadata'; + +ALTER TABLE 03655_keepermap COMMENT COLUMN k 'some comment'; + +SELECT '-- After ALTER:'; +SELECT 'local:', regexpExtract(create_table_query, '(`k`.+?)(\n|\))', 1) FROM system.tables WHERE database = currentDatabase() AND table = '03655_keepermap'; +SELECT 'keeper:', regexpExtract(value, '(`k`.+?)(\n|\))', 1) FROM system.zookeeper WHERE path = '/clickhouse/databases/' || currentDatabase() || '/metadata'; + +DROP DATABASE {CLICKHOUSE_DATABASE:Identifier} SYNC; diff --git a/parser/testdata/03655_system_databases_is_external/ast.json b/parser/testdata/03655_system_databases_is_external/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03655_system_databases_is_external/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03655_system_databases_is_external/metadata.json b/parser/testdata/03655_system_databases_is_external/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03655_system_databases_is_external/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03655_system_databases_is_external/query.sql b/parser/testdata/03655_system_databases_is_external/query.sql new file mode 100644 index 000000000..9fcae7db7 --- /dev/null +++ b/parser/testdata/03655_system_databases_is_external/query.sql @@ -0,0 +1,26 @@ +-- Tags: no-fasttest +-- - no-fasttest -- no SQLite + +set allow_deprecated_database_ordinary=1; +-- Suppress "Server has databases (for example `X`) with Ordinary engine, which was deprecated." +set send_logs_level='error'; + +create database {CLICKHOUSE_DATABASE_1:Identifier} engine=Ordinary; +select engine, is_external from system.databases where name = {CLICKHOUSE_DATABASE_1:String}; +drop database {CLICKHOUSE_DATABASE_1:Identifier} sync; + +create database {CLICKHOUSE_DATABASE_1:Identifier} engine=Atomic; +select engine, is_external from system.databases where name = {CLICKHOUSE_DATABASE_1:String}; +drop database {CLICKHOUSE_DATABASE_1:Identifier} sync; + +create database {CLICKHOUSE_DATABASE_1:Identifier} engine=Memory; +select engine, is_external from system.databases where name = {CLICKHOUSE_DATABASE_1:String}; +drop database {CLICKHOUSE_DATABASE_1:Identifier} sync; + +create database {CLICKHOUSE_DATABASE_1:Identifier} engine=Replicated('/test/{database}/rdb', 's1', 'r1'); +select engine, is_external from system.databases where name = {CLICKHOUSE_DATABASE_1:String}; +drop database {CLICKHOUSE_DATABASE_1:Identifier} sync; + +create database {CLICKHOUSE_DATABASE_1:Identifier} engine=SQLite({CLICKHOUSE_DATABASE_1:String}); +select engine, is_external from system.databases where name = {CLICKHOUSE_DATABASE_1:String}; +drop database {CLICKHOUSE_DATABASE_1:Identifier} sync; diff --git a/parser/testdata/03656_nan_comparison/ast.json b/parser/testdata/03656_nan_comparison/ast.json new file mode 100644 index 000000000..5e480fd29 --- /dev/null +++ b/parser/testdata/03656_nan_comparison/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t7 (children 1)" + }, + { + "explain": " Identifier t7" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00173651, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03656_nan_comparison/metadata.json b/parser/testdata/03656_nan_comparison/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03656_nan_comparison/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03656_nan_comparison/query.sql b/parser/testdata/03656_nan_comparison/query.sql new file mode 100644 index 000000000..1b7565bb5 --- /dev/null +++ b/parser/testdata/03656_nan_comparison/query.sql @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS t7; +create table t7 (c57 UInt32) engine = MergeTree order by c57; +insert into t7 values (1); + +SELECT +( +select count(*) +from t7 as ref_0 +where ref_0.c57 <> (case when 1 = 1 then nan else ref_0.c57 end) +) += +( +select count(*) +from t7 as ref_0 +where ref_0.c57 <> nan +); + +DROP TABLE t7; diff --git a/parser/testdata/03657_gby_overflow_any_sparse/ast.json b/parser/testdata/03657_gby_overflow_any_sparse/ast.json new file mode 100644 index 000000000..5fe9d0930 --- /dev/null +++ b/parser/testdata/03657_gby_overflow_any_sparse/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery 03657_gby_overflow (children 1)" + }, + { + "explain": " Identifier 03657_gby_overflow" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.0012656, + "rows_read": 2, + "bytes_read": 88 + } +} diff --git a/parser/testdata/03657_gby_overflow_any_sparse/metadata.json b/parser/testdata/03657_gby_overflow_any_sparse/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03657_gby_overflow_any_sparse/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03657_gby_overflow_any_sparse/query.sql b/parser/testdata/03657_gby_overflow_any_sparse/query.sql new file mode 100644 index 000000000..9d0891b6b --- /dev/null +++ b/parser/testdata/03657_gby_overflow_any_sparse/query.sql @@ -0,0 +1,14 @@ +DROP TABLE IF EXISTS 03657_gby_overflow; + +CREATE TABLE 03657_gby_overflow(key UInt64, val UInt16) ENGINE = MergeTree ORDER BY tuple() +AS SELECT number, 0 from numbers(100000); + +SELECT key, any(val) FROM 03657_gby_overflow GROUP BY key ORDER BY key LIMIT 10 +SETTINGS group_by_overflow_mode = 'any', + max_rows_to_group_by = 100, + max_threads = 1, + max_block_size = 100, + group_by_two_level_threshold = 1000000000, + group_by_two_level_threshold_bytes = 1000000000; + +DROP TABLE 03657_gby_overflow; diff --git a/parser/testdata/03657_hash_vs_full_sorting_merge_join/ast.json b/parser/testdata/03657_hash_vs_full_sorting_merge_join/ast.json new file mode 100644 index 000000000..012a02382 --- /dev/null +++ b/parser/testdata/03657_hash_vs_full_sorting_merge_join/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001491377, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03657_hash_vs_full_sorting_merge_join/metadata.json b/parser/testdata/03657_hash_vs_full_sorting_merge_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03657_hash_vs_full_sorting_merge_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03657_hash_vs_full_sorting_merge_join/query.sql b/parser/testdata/03657_hash_vs_full_sorting_merge_join/query.sql new file mode 100644 index 000000000..c0696e6bb --- /dev/null +++ b/parser/testdata/03657_hash_vs_full_sorting_merge_join/query.sql @@ -0,0 +1,43 @@ +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS tn1; +DROP TABLE IF EXISTS tn2; + +CREATE TABLE t1 (key UInt32, s String) engine = TinyLog; +CREATE TABLE tn1 (key Nullable(UInt32), s String) engine = TinyLog; +CREATE TABLE t2 (key UInt32, s String) engine = TinyLog; +CREATE TABLE tn2 (key Nullable(UInt32), s String) engine = TinyLog; + +INSERT INTO t1 VALUES (1, 'val1'), (2, 'val21'), (2, 'val22'), (2, 'val23'), (2, 'val24'), (2, 'val25'), (2, 'val26'), (2, 'val27'), (3, 'val3'); +INSERT INTO tn1 VALUES (1, 'val1'), (NULL, 'val21'), (NULL, 'val22'), (NULL, 'val23'), (NULL, 'val24'), (NULL, 'val25'), (NULL, 'val26'), (NULL, 'val27'), (3, 'val3'); +INSERT INTO t2 VALUES (1, 'val11'), (1, 'val12'), (2, 'val22'), (2, 'val23'), (2, 'val24'), (2, 'val25'), (2, 'val26'), (2, 'val27'), (2, 'val28'), (3, 'val3'); +INSERT INTO tn2 VALUES (1, 'val11'), (1, 'val12'), (NULL, 'val22'), (NULL, 'val23'), (NULL, 'val24'), (NULL, 'val25'), (NULL, 'val26'), (NULL, 'val27'), (NULL, 'val28'), (3, 'val3'); + +SET enable_analyzer = 1; +SET join_algorithm = 'hash'; + +SELECT '---'; +SELECT key, length(t1.s), length(t2.s) FROM t1 AS t1 ALL FULL JOIN tn2 AS t2 USING (key) ORDER BY key, length(t1.s), length(t2.s); + +SET join_algorithm = 'full_sorting_merge'; + +SELECT '---'; +SELECT key, length(t1.s), length(t2.s) FROM t1 AS t1 ALL FULL JOIN tn2 AS t2 USING (key) ORDER BY key, length(t1.s), length(t2.s); + +SET join_use_nulls; + +SET join_algorithm = 'hash'; + +SELECT '---'; +SELECT key, length(t1.s), length(t2.s) FROM t1 AS t1 ALL FULL JOIN tn2 AS t2 USING (key) ORDER BY key, length(t1.s), length(t2.s); + +SET join_algorithm = 'full_sorting_merge'; + +SELECT '---'; +SELECT key, length(t1.s), length(t2.s) FROM t1 AS t1 ALL FULL JOIN tn2 AS t2 USING (key) ORDER BY key, length(t1.s), length(t2.s); + + +DROP TABLE t1; +DROP TABLE t2; +DROP TABLE tn1; +DROP TABLE tn2; diff --git a/parser/testdata/03657_merge_tree_disk_support_transaction/ast.json b/parser/testdata/03657_merge_tree_disk_support_transaction/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03657_merge_tree_disk_support_transaction/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03657_merge_tree_disk_support_transaction/metadata.json b/parser/testdata/03657_merge_tree_disk_support_transaction/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03657_merge_tree_disk_support_transaction/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03657_merge_tree_disk_support_transaction/query.sql b/parser/testdata/03657_merge_tree_disk_support_transaction/query.sql new file mode 100644 index 000000000..65d2330b1 --- /dev/null +++ b/parser/testdata/03657_merge_tree_disk_support_transaction/query.sql @@ -0,0 +1,65 @@ +-- Tags: no-ordinary-database, no-fasttest, no-encrypted-storage, no-async-insert + +CREATE OR REPLACE TABLE t (x INT) ENGINE=MergeTree ORDER BY x; +SET implicit_transaction=True; +INSERT INTO TABLE t VALUES (1); +SET implicit_transaction=False; + +CREATE OR REPLACE TABLE t (x INT) ENGINE=MergeTree ORDER BY x SETTINGS disk='local_disk'; +SET implicit_transaction=True; +INSERT INTO TABLE t VALUES (1); +SET implicit_transaction=False; + +CREATE OR REPLACE TABLE t (x INT) ENGINE=MergeTree ORDER BY x SETTINGS disk='local_disk_2'; +SET implicit_transaction=True; +INSERT INTO TABLE t VALUES (1); +SET implicit_transaction=False; + +CREATE OR REPLACE TABLE t (x INT) ENGINE=MergeTree ORDER BY x SETTINGS disk='local_disk_3'; +SET implicit_transaction=True; +INSERT INTO TABLE t VALUES (1); +SET implicit_transaction=False; + +CREATE OR REPLACE TABLE t (x INT) ENGINE=MergeTree ORDER BY x SETTINGS disk='s3_disk'; +SET implicit_transaction=True; +INSERT INTO TABLE t VALUES (1); +SET implicit_transaction=False; + +CREATE OR REPLACE TABLE t (x INT) ENGINE=MergeTree ORDER BY x SETTINGS disk='s3_plain_rewritable'; +SET implicit_transaction=True; +INSERT INTO TABLE t VALUES (1); -- { serverError NOT_IMPLEMENTED } +SET implicit_transaction=False; + +CREATE OR REPLACE TABLE t (x INT) ENGINE=MergeTree ORDER BY x SETTINGS disk='s3_cache'; +SET implicit_transaction=True; +INSERT INTO TABLE t VALUES (1); +SET implicit_transaction=False; + +CREATE OR REPLACE TABLE t (x INT) ENGINE=MergeTree ORDER BY x SETTINGS disk='local_plain_rewritable'; +SET implicit_transaction=True; +INSERT INTO TABLE t VALUES (1); -- { serverError NOT_IMPLEMENTED } +SET implicit_transaction=False; + +CREATE OR REPLACE TABLE t (x INT) ENGINE=MergeTree ORDER BY x SETTINGS disk='s3_plain_rewritable_cache'; +SET implicit_transaction=True; +INSERT INTO TABLE t VALUES (1); -- { serverError NOT_IMPLEMENTED } +SET implicit_transaction=False; + +CREATE OR REPLACE TABLE t (x INT) ENGINE=MergeTree ORDER BY x SETTINGS disk='s3_plain_rewritable_cache_multi'; +SET implicit_transaction=True; +INSERT INTO TABLE t VALUES (1); -- { serverError NOT_IMPLEMENTED } +SET implicit_transaction=False; + +CREATE OR REPLACE TABLE t (x INT) ENGINE=MergeTree ORDER BY x SETTINGS disk='local_cache'; +SET implicit_transaction=True; +INSERT INTO TABLE t VALUES (1); +SET implicit_transaction=False; + +CREATE OR REPLACE TABLE t (x INT) ENGINE=MergeTree ORDER BY x SETTINGS disk='local_cache_multi'; +SET implicit_transaction=True; +INSERT INTO TABLE t VALUES (1); +SET implicit_transaction=False; + +CREATE OR REPLACE TABLE t (x INT) ENGINE=MergeTree ORDER BY x SETTINGS disk='encrypted_s3_plain_rewritable_cache'; +SET implicit_transaction=True; +INSERT INTO TABLE t VALUES (1); -- { serverError NOT_IMPLEMENTED } \ No newline at end of file diff --git a/parser/testdata/03657_rollup_constant/ast.json b/parser/testdata/03657_rollup_constant/ast.json new file mode 100644 index 000000000..4e1e6dccc --- /dev/null +++ b/parser/testdata/03657_rollup_constant/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery my_first_table (children 1)" + }, + { + "explain": " Identifier my_first_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001299842, + "rows_read": 2, + "bytes_read": 80 + } +} diff --git a/parser/testdata/03657_rollup_constant/metadata.json b/parser/testdata/03657_rollup_constant/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03657_rollup_constant/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03657_rollup_constant/query.sql b/parser/testdata/03657_rollup_constant/query.sql new file mode 100644 index 000000000..72c84b981 --- /dev/null +++ b/parser/testdata/03657_rollup_constant/query.sql @@ -0,0 +1,31 @@ +DROP TABLE IF EXISTS my_first_table; + +CREATE TABLE my_first_table +( + user_id UInt32, + job_id UInt32, + message String, + timestamp DateTime, + metric Float32 +) +ENGINE = MergeTree() +PRIMARY KEY (user_id, timestamp); + +INSERT INTO my_first_table (user_id, job_id, message, timestamp, metric) VALUES + (101, 101,'Hello, ClickHouse!', now(), 1 ), + (101, 102,'Granules are the smallest chunks of data read', now() + 5, 3 ) + (102, 101,'Insert a lot of rows per batch', yesterday(), 2 ), + (102, 101,'Test1', today(), 1 ), + (102, 101,'Test2', today(), 2 ), + (102, 101,'Test3', today(), 2 ), + (102, 102,'Test4', today(), 4 ), + (102, 103,'Test5', today(), 4 ), + (102, 103,'Test6', today(), 1 ); + +SET enable_analyzer = 1; +SELECT 1 AS constant, user_id, job_id, sum(metric) +FROM my_first_table +GROUP BY constant, user_id, job_id WITH ROLLUP +ORDER BY constant = 0, user_id = 0, job_id = 0, constant, user_id, job_id; + +DROP TABLE my_first_table; diff --git a/parser/testdata/03658_joined_block_split_single_row_bytes/ast.json b/parser/testdata/03658_joined_block_split_single_row_bytes/ast.json new file mode 100644 index 000000000..91b607f3b --- /dev/null +++ b/parser/testdata/03658_joined_block_split_single_row_bytes/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001400825, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03658_joined_block_split_single_row_bytes/metadata.json b/parser/testdata/03658_joined_block_split_single_row_bytes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03658_joined_block_split_single_row_bytes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03658_joined_block_split_single_row_bytes/query.sql b/parser/testdata/03658_joined_block_split_single_row_bytes/query.sql new file mode 100644 index 000000000..15202a5f8 --- /dev/null +++ b/parser/testdata/03658_joined_block_split_single_row_bytes/query.sql @@ -0,0 +1,35 @@ +SET enable_lazy_columns_replication=0; + +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; + +CREATE TABLE t1 (id UInt32, key Int32, payload String) ENGINE = MergeTree ORDER BY id; +INSERT INTO t1 SELECT rand(), 1, 'a' || if(number = 99, repeat(toString(number), 1000), toString(number)) FROM numbers(100); + +CREATE TABLE t2 (id UInt32, key Int32, payload String) ENGINE = MergeTree ORDER BY id; +INSERT INTO t2 SELECT rand(), 1, 'b' || toString(number) FROM numbers(100_000); + +SET enable_analyzer = 1; +SET query_plan_join_swap_table = 0; +SET query_plan_optimize_join_order_limit = 1; + +SELECT + -- blocks with much of data are small in rows: + if(max(size) < 5_000_000 AND argMax(rows, size) < 10_000, 'Ok', format('Error: max_size={} rows={}', max(size), argMax(rows, size))), + -- but still there are large blocks with small strings + if(max(rows) >= 50_000, 'Ok', format('Error: {}', toString(max(rows)))) +FROM ( SELECT blockNumber() as bn, sum(byteSize(*)) as size, count() as rows FROM t1 INNER JOIN t2 ON t1.key = t2.key GROUP BY bn ) +SETTINGS joined_block_split_single_row = 1 + , max_joined_block_size_bytes = '4M' + , max_joined_block_size_rows = 65_000; + + +-- add 3Mb match +INSERT INTO t1 SELECT rand(), 2, repeat('aaa', 1_000_000); +INSERT INTO t2 SELECT rand(), 2, repeat('bbb', 1_000_000) from numbers(10); + +SELECT + -- limit is 4M but minimum block size is 6Mb, so it will be single row block + if(argMax(rows, size) = 1 AND max(size) < 10_000_000, 'Ok', format('Error: max_size={} rows={}', max(size), argMax(rows, size))) +FROM ( SELECT blockNumber() as bn, sum(byteSize(*)) as size, count() as rows FROM t1 INNER JOIN t2 ON t1.key = t2.key GROUP BY bn ) +SETTINGS joined_block_split_single_row = 1, max_joined_block_size_bytes = '4M'; diff --git a/parser/testdata/03658_negative_limit_offset_distributed/ast.json b/parser/testdata/03658_negative_limit_offset_distributed/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03658_negative_limit_offset_distributed/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03658_negative_limit_offset_distributed/metadata.json b/parser/testdata/03658_negative_limit_offset_distributed/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03658_negative_limit_offset_distributed/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03658_negative_limit_offset_distributed/query.sql b/parser/testdata/03658_negative_limit_offset_distributed/query.sql new file mode 100644 index 000000000..c2c2feaa3 --- /dev/null +++ b/parser/testdata/03658_negative_limit_offset_distributed/query.sql @@ -0,0 +1,26 @@ +-- Tags: distributed + +SET enable_analyzer=0; + +SELECT 'Old Analyzer:'; + +SELECT number FROM remote('127.0.0.{1,2,3}', numbers_mt(20)) ORDER BY number DESC LIMIT 5 OFFSET 20; + +SELECT number FROM remote('127.0.0.{1,2,3}', numbers_mt(20)) ORDER BY number LIMIT -5 OFFSET -20; + +SELECT number FROM remote('127.0.0.{1,2,3}', numbers_mt(20)) ORDER BY number LIMIT 5 OFFSET -20; + +SELECT number FROM remote('127.0.0.{1,2,3}', numbers_mt(20)) ORDER BY number LIMIT -5 OFFSET 20; + + +SET enable_analyzer=1; + +SELECT 'New Analyzer:'; + +SELECT number FROM remote('127.0.0.{1,2,3}', numbers_mt(20)) ORDER BY number DESC LIMIT 5 OFFSET 20; + +SELECT number FROM remote('127.0.0.{1,2,3}', numbers_mt(20)) ORDER BY number LIMIT -5 OFFSET -20; + +SELECT number FROM remote('127.0.0.{1,2,3}', numbers_mt(20)) ORDER BY number LIMIT 5 OFFSET -20; + +SELECT number FROM remote('127.0.0.{1,2,3}', numbers_mt(20)) ORDER BY number LIMIT -5 OFFSET 20; diff --git a/parser/testdata/03660_udf_subquery/ast.json b/parser/testdata/03660_udf_subquery/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03660_udf_subquery/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03660_udf_subquery/metadata.json b/parser/testdata/03660_udf_subquery/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03660_udf_subquery/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03660_udf_subquery/query.sql b/parser/testdata/03660_udf_subquery/query.sql new file mode 100644 index 000000000..3164765bf --- /dev/null +++ b/parser/testdata/03660_udf_subquery/query.sql @@ -0,0 +1,33 @@ +-- Tags: no-parallel + +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS dict; +create table dict engine=MergeTree() order by id as +select 1 as id, 'one' as name union all +select 2 as id, 'two' as name; + +CREATE OR REPLACE FUNCTION udf_type_of_int AS +int_ -> (select if(name = 'one', 'The One', 'other') from dict where id = int_); + +-- this part worked successfully +SELECT * FROM ( +select udf_type_of_int(1) union all +select udf_type_of_int(2) +) ORDER BY ALL; + +SELECT ''; + +-- ... and this not! +select udf_type_of_int(number) from numbers(5) order by number; + +SELECT ''; + +select number as id, udf_type_of_int(id) from numbers(5) order by number; + +SELECT ''; + +select number as id, udf_type_of_int(id or id = 1) from numbers(5) order by number; + +DROP FUNCTION udf_type_of_int; +DROP TABLE dict; diff --git a/parser/testdata/03663_parameterized_views_formatting_of_substitutions_excessive_backticks/ast.json b/parser/testdata/03663_parameterized_views_formatting_of_substitutions_excessive_backticks/ast.json new file mode 100644 index 000000000..b04691bd3 --- /dev/null +++ b/parser/testdata/03663_parameterized_views_formatting_of_substitutions_excessive_backticks/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery audit_size_column (children 1)" + }, + { + "explain": " Identifier audit_size_column" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001426105, + "rows_read": 2, + "bytes_read": 86 + } +} diff --git a/parser/testdata/03663_parameterized_views_formatting_of_substitutions_excessive_backticks/metadata.json b/parser/testdata/03663_parameterized_views_formatting_of_substitutions_excessive_backticks/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03663_parameterized_views_formatting_of_substitutions_excessive_backticks/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03663_parameterized_views_formatting_of_substitutions_excessive_backticks/query.sql b/parser/testdata/03663_parameterized_views_formatting_of_substitutions_excessive_backticks/query.sql new file mode 100644 index 000000000..01d1d8251 --- /dev/null +++ b/parser/testdata/03663_parameterized_views_formatting_of_substitutions_excessive_backticks/query.sql @@ -0,0 +1,26 @@ +DROP VIEW IF EXISTS audit_size_column; +CREATE VIEW audit_size_column +AS +SELECT + formatReadableSize(size) AS formatted, + sum(column_bytes_on_disk) AS size, + column, + table, + database +FROM + system.parts_columns +WHERE + active = 1 + AND (database = {db:String} OR database = currentDatabase()) + AND (match(table, {table:String})) + AND (match(column, {column:String})) +GROUP BY + database, + table, + column +; + +DETACH TABLE audit_size_column; +ATTACH TABLE audit_size_column; +SHOW TABLE audit_size_column FORMAT Raw; +DROP TABLE audit_size_column; diff --git a/parser/testdata/03664_parameterized_view_restart/ast.json b/parser/testdata/03664_parameterized_view_restart/ast.json new file mode 100644 index 000000000..fbb9374ab --- /dev/null +++ b/parser/testdata/03664_parameterized_view_restart/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery view_order_attribution (children 1)" + }, + { + "explain": " Identifier view_order_attribution" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001164908, + "rows_read": 2, + "bytes_read": 96 + } +} diff --git a/parser/testdata/03664_parameterized_view_restart/metadata.json b/parser/testdata/03664_parameterized_view_restart/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03664_parameterized_view_restart/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03664_parameterized_view_restart/query.sql b/parser/testdata/03664_parameterized_view_restart/query.sql new file mode 100644 index 000000000..8349440f5 --- /dev/null +++ b/parser/testdata/03664_parameterized_view_restart/query.sql @@ -0,0 +1,114 @@ +DROP TABLE IF EXISTS view_order_attribution; +DROP TABLE IF EXISTS order_attribution; + +CREATE TABLE order_attribution +( + `order_product_event_id` String, + `order_id` String, + `brand_id` String, + `user_id` String, + `gmv` Float64, + `gsv` Float64, + `po_created_at` DateTime64(9, 'UTC'), + `event_brand_id` Nullable(String), + `campaign_id` UInt32, + `event_bid_type` String, + `event_bid_value` Nullable(Float64), + `event_inventory_id` UInt32, + `event_user_id` String, + `ad_event_id` String, + `latest_ad_created_at` DateTime64(9, 'UTC'), + `event_type` String, + `attribution_window` UInt8, + `_version` DateTime DEFAULT now() +) +ENGINE = ReplacingMergeTree(_version) +ORDER BY (brand_id, campaign_id, event_type, latest_ad_created_at, user_id, order_product_event_id, ad_event_id) +SETTINGS index_granularity = 8192; + +CREATE VIEW view_order_attribution +( + `order_product_event_id` String, + `order_id` String, + `brand_id` String, + `user_id` String, + `sp_id` Nullable(String), + `gmv` Float64, + `gsv` Float64, + `po_created_at` DateTime64(9, 'UTC'), + `event_brand_id` Nullable(String), + `campaign_id` UInt32, + `event_bid_type` String, + `event_bid_value` Nullable(Float64), + `event_inventory_id` UInt32, + `event_user_id` String, + `ad_event_id` String, + `event_type` String, + `latest_ad_created_at` DateTime64(9, 'UTC') +) AS +SELECT + order_product_event_id, + order_id, + brand_id, + user_id, + sp_id, + gmv, + gsv, + po_created_at, + argMax(event_brand_id, event_rank) AS event_brand_id, + argMax(campaign_id, event_rank) AS campaign_id, + argMax(event_bid_type, event_rank) AS event_bid_type, + argMax(event_bid_value, event_rank) AS event_bid_value, + argMax(event_inventory_id, event_rank) AS event_inventory_id, + argMax(event_user_id, event_rank) AS event_user_id, + argMax(ad_event_id, event_rank) AS ad_event_id, + argMax(event_type, event_rank) AS event_type, + argMax(latest_ad_created_at, event_rank) AS latest_ad_created_at +FROM +( + SELECT + order_product_event_id, + order_id, + brand_id, + user_id, + sp_id, + gmv, + gsv, + po_created_at, + event_type, + if(event_type = 'click', 1, 0) AS event_rank, + argMax(event_brand_id, _version) AS event_brand_id, + argMax(campaign_id, _version) AS campaign_id, + argMax(event_bid_type, _version) AS event_bid_type, + argMax(event_bid_value, _version) AS event_bid_value, + argMax(event_inventory_id, _version) AS event_inventory_id, + argMax(event_user_id, _version) AS event_user_id, + argMax(ad_event_id, _version) AS ad_event_id, + argMax(latest_ad_created_at, _version) AS latest_ad_created_at + FROM analytics.order_attribution + WHERE attribution_window <= {attr_window:UInt32} + GROUP BY + order_product_event_id, + order_id, + brand_id, + user_id, + sp_id, + gmv, + gsv, + po_created_at, + event_type +) +GROUP BY + order_product_event_id, + order_id, + brand_id, + user_id, + sp_id, + gmv, + gsv, + po_created_at; + +DETACH TABLE view_order_attribution; +ATTACH TABLE view_order_attribution; +DROP TABLE view_order_attribution; +DROP TABLE order_attribution; diff --git a/parser/testdata/03666_count_matches_complexity/ast.json b/parser/testdata/03666_count_matches_complexity/ast.json new file mode 100644 index 000000000..af97b6f96 --- /dev/null +++ b/parser/testdata/03666_count_matches_complexity/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function countMatches (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function repeat (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0'" + }, + { + "explain": " Literal UInt64_1000000" + }, + { + "explain": " Literal 'a'" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001386841, + "rows_read": 11, + "bytes_read": 428 + } +} diff --git a/parser/testdata/03666_count_matches_complexity/metadata.json b/parser/testdata/03666_count_matches_complexity/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03666_count_matches_complexity/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03666_count_matches_complexity/query.sql b/parser/testdata/03666_count_matches_complexity/query.sql new file mode 100644 index 000000000..d866543b5 --- /dev/null +++ b/parser/testdata/03666_count_matches_complexity/query.sql @@ -0,0 +1,2 @@ +SELECT countMatches(repeat('\0\0\0\0\0\0\0\0\0\0', 1000000), 'a'); +SELECT countMatches(repeat('\0\0\0\0\0\0\0\0\0\0a', 1000000), 'a'); diff --git a/parser/testdata/03667_accurate_cast_datetime_overflow/ast.json b/parser/testdata/03667_accurate_cast_datetime_overflow/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03667_accurate_cast_datetime_overflow/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03667_accurate_cast_datetime_overflow/metadata.json b/parser/testdata/03667_accurate_cast_datetime_overflow/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03667_accurate_cast_datetime_overflow/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03667_accurate_cast_datetime_overflow/query.sql b/parser/testdata/03667_accurate_cast_datetime_overflow/query.sql new file mode 100644 index 000000000..fae125791 --- /dev/null +++ b/parser/testdata/03667_accurate_cast_datetime_overflow/query.sql @@ -0,0 +1,22 @@ +-- Test for issue #88166: DateTimeTransforms UBSAN overflow +-- This test ensures that accurateCast with out-of-range values doesn't cause UBSAN errors + +SET session_timezone = 'UTC'; + +-- Test with large positive value (should throw exception with proper error message) +SELECT accurateCast(100000000000000000000, 'DateTime'); -- {serverError CANNOT_CONVERT_TYPE} + +-- Test with large negative value +SELECT accurateCast(-100000000000000000000, 'DateTime'); -- {serverError CANNOT_CONVERT_TYPE} + +-- Test with maximum valid DateTime value (should work) +SELECT accurateCast(4294967295, 'DateTime'); + +-- Test with minimum valid DateTime value (should work) +SELECT accurateCast(0, 'DateTime'); + +-- Test with value just above maximum (should throw exception) +SELECT accurateCast(4294967296, 'DateTime'); -- {serverError CANNOT_CONVERT_TYPE} + +-- Test with value just below minimum (should throw exception) +SELECT accurateCast(-1, 'DateTime'); -- {serverError CANNOT_CONVERT_TYPE} diff --git a/parser/testdata/03667_drop_inner_table_size_limits/ast.json b/parser/testdata/03667_drop_inner_table_size_limits/ast.json new file mode 100644 index 000000000..6832f8dc9 --- /dev/null +++ b/parser/testdata/03667_drop_inner_table_size_limits/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery t (children 3)" + }, + { + "explain": " Identifier t" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration id (children 1)" + }, + { + "explain": " DataType UInt64" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Identifier id" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001197794, + "rows_read": 9, + "bytes_read": 294 + } +} diff --git a/parser/testdata/03667_drop_inner_table_size_limits/metadata.json b/parser/testdata/03667_drop_inner_table_size_limits/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03667_drop_inner_table_size_limits/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03667_drop_inner_table_size_limits/query.sql b/parser/testdata/03667_drop_inner_table_size_limits/query.sql new file mode 100644 index 000000000..0c9f08e69 --- /dev/null +++ b/parser/testdata/03667_drop_inner_table_size_limits/query.sql @@ -0,0 +1,7 @@ +CREATE TABLE t (id UInt64) ENGINE = MergeTree ORDER BY id; +CREATE MATERIALIZED VIEW mv (id UInt64) ENGINE = MergeTree ORDER BY id AS SELECT id FROM t; +INSERT INTO t SELECT number FROM numbers(1000); +SET max_table_size_to_drop = 1, max_partition_size_to_drop = 1; +DROP TABLE mv; -- { serverError TABLE_SIZE_EXCEEDS_MAX_DROP_SIZE_LIMIT } +SET max_table_size_to_drop = 0, max_partition_size_to_drop = 0; +DROP TABLE mv; diff --git a/parser/testdata/03667_insert_columns_description/ast.json b/parser/testdata/03667_insert_columns_description/ast.json new file mode 100644 index 000000000..683d048b9 --- /dev/null +++ b/parser/testdata/03667_insert_columns_description/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '-- remote table function columns description'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001216989, + "rows_read": 5, + "bytes_read": 215 + } +} diff --git a/parser/testdata/03667_insert_columns_description/metadata.json b/parser/testdata/03667_insert_columns_description/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03667_insert_columns_description/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03667_insert_columns_description/query.sql b/parser/testdata/03667_insert_columns_description/query.sql new file mode 100644 index 000000000..713e6b614 --- /dev/null +++ b/parser/testdata/03667_insert_columns_description/query.sql @@ -0,0 +1,16 @@ +SELECT '-- remote table function columns description'; +CREATE TABLE t0 (c Int DEFAULT 7) ENGINE = MergeTree() ORDER BY tuple(); + +INSERT INTO TABLE FUNCTION remote('localhost:9000', database(), 't0', 'default', '') VALUES (NULL); +INSERT INTO TABLE t0 VALUES (NULL); + +SELECT * FROM t0 ORDER BY ALL; + +CREATE TABLE fuzz_87972 (c0 Int MATERIALIZED 1, c1 Int EPHEMERAL) ENGINE = MergeTree() ORDER BY tuple(); +INSERT INTO TABLE FUNCTION remote('localhost:9000', database(), 'fuzz_87972', 'default', '') VALUES (); -- { error EMPTY_LIST_OF_COLUMNS_PASSED } + +SELECT '-- file table function columns description'; +INSERT INTO TABLE FUNCTION file(database() || '_test.csv', CSV, 'a Int, b Int DEFAULT 77') SELECT number, if(number%2=1, NULL, number) FROM numbers(3); +INSERT INTO TABLE FUNCTION file(database() || '_test.csv', CSV, 'a Int, b Int DEFAULT 77') VALUES (3, 3), (4, NULL); + +SELECT * FROM file(database() || '_test.csv') ORDER BY ALL; diff --git a/parser/testdata/03667_join_with_subquery_and_final/ast.json b/parser/testdata/03667_join_with_subquery_and_final/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03667_join_with_subquery_and_final/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03667_join_with_subquery_and_final/metadata.json b/parser/testdata/03667_join_with_subquery_and_final/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03667_join_with_subquery_and_final/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03667_join_with_subquery_and_final/query.sql b/parser/testdata/03667_join_with_subquery_and_final/query.sql new file mode 100644 index 000000000..a8fe33db3 --- /dev/null +++ b/parser/testdata/03667_join_with_subquery_and_final/query.sql @@ -0,0 +1,30 @@ +set enable_analyzer=0; + +DROP TABLE IF EXISTS 03667_t1; +DROP TABLE IF EXISTS 03667_t2; +DROP TABLE IF EXISTS 03667_t3; + +CREATE TABLE 03667_t1 ( `key` Int64, `value` Int64 ) ENGINE = ReplacingMergeTree PARTITION BY tuple() ORDER BY key SETTINGS index_granularity = 8192; +CREATE TABLE 03667_t2 ( `key` Int64, `value` Int64 ) ENGINE = ReplacingMergeTree PARTITION BY tuple() ORDER BY key SETTINGS index_granularity = 8192; +CREATE TABLE 03667_t3 ( `key` Int64, `value` Int64 ) ENGINE = ReplacingMergeTree PARTITION BY tuple() ORDER BY key SETTINGS index_granularity = 8192; + +explain select + * +from + 03667_t1 s final + join ( + select + * + from + 03667_t2 final + ) r final on s.key = r.key + join ( + select + * + from + 03667_t3 final + ) c final on s.key = c.key format Null; + +DROP TABLE IF EXISTS 03667_t1; +DROP TABLE IF EXISTS 03667_t2; +DROP TABLE IF EXISTS 03667_t3; diff --git a/parser/testdata/03667_pr_join_with_cross_join_on_left/ast.json b/parser/testdata/03667_pr_join_with_cross_join_on_left/ast.json new file mode 100644 index 000000000..71989a024 --- /dev/null +++ b/parser/testdata/03667_pr_join_with_cross_join_on_left/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery n1 (children 1)" + }, + { + "explain": " Identifier n1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000929045, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03667_pr_join_with_cross_join_on_left/metadata.json b/parser/testdata/03667_pr_join_with_cross_join_on_left/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03667_pr_join_with_cross_join_on_left/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03667_pr_join_with_cross_join_on_left/query.sql b/parser/testdata/03667_pr_join_with_cross_join_on_left/query.sql new file mode 100644 index 000000000..5398186d9 --- /dev/null +++ b/parser/testdata/03667_pr_join_with_cross_join_on_left/query.sql @@ -0,0 +1,19 @@ +DROP TABLE IF EXISTS n1; +DROP TABLE IF EXISTS n2; +DROP TABLE IF EXISTS n3; + +CREATE TABLE n1 (number UInt64) ENGINE = MergeTree ORDER BY number SETTINGS index_granularity=1; +INSERT INTO n1 SELECT number FROM numbers(3); + +CREATE TABLE n2 (number UInt64) ENGINE = MergeTree ORDER BY number SETTINGS index_granularity=1; +INSERT INTO n2 SELECT number FROM numbers(2); + +CREATE TABLE n3 (number UInt64) ENGINE = MergeTree ORDER BY number SETTINGS index_granularity=1; +INSERT INTO n3 SELECT number FROM numbers(2); + +SET enable_parallel_replicas=1, max_parallel_replicas=3, cluster_for_parallel_replicas='test_cluster_one_shard_three_replicas_localhost', parallel_replicas_for_non_replicated_merge_tree=1; +SELECT * FROM n1, n2 JOIN n3 ON n1.number = n3.number ORDER BY n1.number, n2.number, n3.number; + +DROP TABLE n3; +DROP TABLE n2; +DROP TABLE n1; diff --git a/parser/testdata/03667_text_index_validation/ast.json b/parser/testdata/03667_text_index_validation/ast.json new file mode 100644 index 000000000..36926129a --- /dev/null +++ b/parser/testdata/03667_text_index_validation/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery text_index_validation (children 1)" + }, + { + "explain": " Identifier text_index_validation" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001050628, + "rows_read": 2, + "bytes_read": 94 + } +} diff --git a/parser/testdata/03667_text_index_validation/metadata.json b/parser/testdata/03667_text_index_validation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03667_text_index_validation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03667_text_index_validation/query.sql b/parser/testdata/03667_text_index_validation/query.sql new file mode 100644 index 000000000..9b05683ba --- /dev/null +++ b/parser/testdata/03667_text_index_validation/query.sql @@ -0,0 +1,20 @@ +DROP TABLE IF EXISTS text_index_validation; + +SET allow_experimental_full_text_index = 1; + +CREATE TABLE text_index_validation (s String, INDEX idx_s (s) TYPE text(tokenizer = ngrams(0))) ENGINE = MergeTree ORDER BY tuple(); -- { serverError BAD_ARGUMENTS } +CREATE TABLE text_index_validation (s String, INDEX idx_s (s) TYPE text(tokenizer = ngrams(10))) ENGINE = MergeTree ORDER BY tuple(); -- { serverError BAD_ARGUMENTS } + +CREATE TABLE text_index_validation (s String, INDEX idx_s (s) TYPE text(tokenizer = array('a'))) ENGINE = MergeTree ORDER BY tuple(); -- { serverError INCORRECT_QUERY } +CREATE TABLE text_index_validation (s String, INDEX idx_s (s) TYPE text(tokenizer = splitByNonAlpha('a'))) ENGINE = MergeTree ORDER BY tuple(); -- { serverError INCORRECT_QUERY } +CREATE TABLE text_index_validation (s String, INDEX idx_s (s) TYPE text(tokenizer = splitByString([]))) ENGINE = MergeTree ORDER BY tuple(); -- { serverError INCORRECT_QUERY } + + +CREATE TABLE text_index_validation (s String, INDEX idx_s (s) TYPE text(tokenizer = sparseGrams(2, 2, 8))) ENGINE = MergeTree ORDER BY tuple(); -- { serverError INCORRECT_QUERY } +CREATE TABLE text_index_validation (s String, INDEX idx_s (s) TYPE text(tokenizer = sparseGrams(3, 2, 2))) ENGINE = MergeTree ORDER BY tuple(); -- { serverError INCORRECT_QUERY } +CREATE TABLE text_index_validation (s String, INDEX idx_s (s) TYPE text(tokenizer = sparseGrams(3, 1000))) ENGINE = MergeTree ORDER BY tuple(); -- { serverError INCORRECT_QUERY } +CREATE TABLE text_index_validation (s String, INDEX idx_s (s) TYPE text(tokenizer = sparseGrams(5, 3))) ENGINE = MergeTree ORDER BY tuple(); -- { serverError INCORRECT_QUERY } +CREATE TABLE text_index_validation (s String, INDEX idx_s (s) TYPE text(tokenizer = sparseGrams(3, 10, 11))) ENGINE = MergeTree ORDER BY tuple(); -- { serverError INCORRECT_QUERY } +CREATE TABLE text_index_validation (s String, INDEX idx_s (s) TYPE text(tokenizer = sparseGrams(3, 10, 2))) ENGINE = MergeTree ORDER BY tuple(); -- { serverError INCORRECT_QUERY } +CREATE TABLE text_index_validation (s String, INDEX idx_s (s) TYPE text(tokenizer = sparseGrams(3, 10, 5, 6, 7))) ENGINE = MergeTree ORDER BY tuple(); -- { serverError INCORRECT_QUERY } + diff --git a/parser/testdata/03668_shard_join_in_reverse_order/ast.json b/parser/testdata/03668_shard_join_in_reverse_order/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03668_shard_join_in_reverse_order/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03668_shard_join_in_reverse_order/metadata.json b/parser/testdata/03668_shard_join_in_reverse_order/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03668_shard_join_in_reverse_order/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03668_shard_join_in_reverse_order/query.sql b/parser/testdata/03668_shard_join_in_reverse_order/query.sql new file mode 100644 index 000000000..d60ee402a --- /dev/null +++ b/parser/testdata/03668_shard_join_in_reverse_order/query.sql @@ -0,0 +1,11 @@ +-- { echo ON } + +DROP TABLE IF EXISTS t0; + +CREATE TABLE t0 (c0 Int) ENGINE = MergeTree() ORDER BY (c0 DESC) SETTINGS index_granularity = 1, allow_experimental_reverse_key = 1; + +INSERT INTO TABLE t0 (c0) SELECT number FROM numbers(10); + +SELECT c0 FROM t0 JOIN t0 tx USING (c0) ORDER BY c0 SETTINGS query_plan_join_shard_by_pk_ranges = 1, max_threads = 2; + +DROP TABLE t0; diff --git a/parser/testdata/03669_min_max_projection_with_reverse_order_key/ast.json b/parser/testdata/03669_min_max_projection_with_reverse_order_key/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03669_min_max_projection_with_reverse_order_key/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03669_min_max_projection_with_reverse_order_key/metadata.json b/parser/testdata/03669_min_max_projection_with_reverse_order_key/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03669_min_max_projection_with_reverse_order_key/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03669_min_max_projection_with_reverse_order_key/query.sql b/parser/testdata/03669_min_max_projection_with_reverse_order_key/query.sql new file mode 100644 index 000000000..eea9ace4d --- /dev/null +++ b/parser/testdata/03669_min_max_projection_with_reverse_order_key/query.sql @@ -0,0 +1,11 @@ +-- { echo ON } + +DROP TABLE IF EXISTS desc_pk; + +CREATE TABLE desc_pk (`a` UInt32) ENGINE = MergeTree ORDER BY (a DESC) SETTINGS allow_experimental_reverse_key = 1; + +INSERT INTO desc_pk SELECT * FROM numbers(10); + +SELECT min(a), max(a) FROM desc_pk; + +DROP TABLE desc_pk; diff --git a/parser/testdata/03671_dict_in_subquery_in_index_analysis_context_expired/ast.json b/parser/testdata/03671_dict_in_subquery_in_index_analysis_context_expired/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03671_dict_in_subquery_in_index_analysis_context_expired/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03671_dict_in_subquery_in_index_analysis_context_expired/metadata.json b/parser/testdata/03671_dict_in_subquery_in_index_analysis_context_expired/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03671_dict_in_subquery_in_index_analysis_context_expired/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03671_dict_in_subquery_in_index_analysis_context_expired/query.sql b/parser/testdata/03671_dict_in_subquery_in_index_analysis_context_expired/query.sql new file mode 100644 index 000000000..df9af7c20 --- /dev/null +++ b/parser/testdata/03671_dict_in_subquery_in_index_analysis_context_expired/query.sql @@ -0,0 +1,28 @@ +-- Tags: no-parallel-replicas + +DROP DICTIONARY IF EXISTS dict; +DROP TABLE IF EXISTS info; +DROP TABLE IF EXISTS ids; + +CREATE TABLE info (iid UInt32) ENGINE = MergeTree() ORDER BY iid; +INSERT INTO info (iid) VALUES (1); + +CREATE TABLE ids (id Int64) ENGINE = MergeTree() ORDER BY (); +INSERT INTO ids (id) VALUES (1); + +CREATE DICTIONARY dict +( + id Int64, + children Array(Int64), +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(QUERY 'SELECT 1 id, [1] children')) +LAYOUT(DIRECT()); + +SELECT iid IN (SELECT DISTINCT arrayJoin(dictGet(dict, 'children', id)) FROM ids) +FROM +( + SELECT * + FROM info + WHERE (iid IN (SELECT DISTINCT arrayJoin(dictGet(dict, 'children', id)) FROM ids)) +); diff --git a/parser/testdata/03671_pk_in_subquery_context_expired/ast.json b/parser/testdata/03671_pk_in_subquery_context_expired/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03671_pk_in_subquery_context_expired/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03671_pk_in_subquery_context_expired/metadata.json b/parser/testdata/03671_pk_in_subquery_context_expired/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03671_pk_in_subquery_context_expired/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03671_pk_in_subquery_context_expired/query.sql b/parser/testdata/03671_pk_in_subquery_context_expired/query.sql new file mode 100644 index 000000000..d71680ae9 --- /dev/null +++ b/parser/testdata/03671_pk_in_subquery_context_expired/query.sql @@ -0,0 +1,82 @@ +-- Issue: https://github.com/ClickHouse/ClickHouse/issues/89433 + +DROP TABLE IF EXISTS tbl; +DROP TABLE IF EXISTS join_engine; + +CREATE TABLE tbl +( + `id1` LowCardinality(String), + `id2` LowCardinality(String), + `v` Int64 +) +ENGINE = MergeTree +ORDER BY (id1, id2, v); +INSERT INTO tbl VALUES ('a', 'b', 1); +CREATE TABLE join_engine +( + `id1` LowCardinality(String), + `id2` LowCardinality(String), + `v` Int64 +) +ENGINE = Join(ANY, LEFT, id1, id2); +INSERT INTO join_engine VALUES ('a', 'b', 1); + +WITH cte AS + ( + SELECT id2 + FROM tbl + WHERE joinGet(currentDatabase() || '.join_engine', 'v', id1, id2) = tbl.v + ) +SELECT uniq(id2) AS count +FROM +( + -- NOTE: the bug is reproduced only because due to + -- enable_global_with_statement adds "cte" here, but likely it will be + -- fixed one day... so I've added another test below that does not rely + -- on this fact + SELECT * + FROM tbl AS e + WHERE joinGet(currentDatabase() || '.join_engine', 'v', id1, id2) = e.v +) +WHERE id2 IN ( + SELECT id2 + FROM cte +) +UNION ALL +SELECT uniq(id2) AS count +FROM cte; + +SELECT 'Testing w/o relying on enable_global_with_statement...'; +-- +-- The same as before, but without relying on enable_global_with_statement +-- +SELECT uniq(id2) AS count +FROM +( + WITH cte AS + ( + SELECT id2 + FROM tbl + WHERE joinGet(currentDatabase() || '.join_engine', 'v', id1, id2) = tbl.v + ) + SELECT * + FROM tbl AS e + WHERE joinGet(currentDatabase() || '.join_engine', 'v', id1, id2) = e.v +) +WHERE id2 IN ( + SELECT id2 + FROM + ( + SELECT id2 + FROM tbl + WHERE joinGet(currentDatabase() || '.join_engine', 'v', id1, id2) = tbl.v + ) +) +UNION ALL +SELECT uniq(id2) AS count +FROM +( + SELECT id2 + FROM tbl + WHERE joinGet(currentDatabase() || '.join_engine', 'v', id1, id2) = tbl.v +); diff --git a/parser/testdata/03672_columns_same_as_subcolumns/ast.json b/parser/testdata/03672_columns_same_as_subcolumns/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03672_columns_same_as_subcolumns/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03672_columns_same_as_subcolumns/metadata.json b/parser/testdata/03672_columns_same_as_subcolumns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03672_columns_same_as_subcolumns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03672_columns_same_as_subcolumns/query.sql b/parser/testdata/03672_columns_same_as_subcolumns/query.sql new file mode 100644 index 000000000..b0936cbc3 --- /dev/null +++ b/parser/testdata/03672_columns_same_as_subcolumns/query.sql @@ -0,0 +1,41 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/89599 +DROP TABLE IF EXISTS opentelemetry_span_log_9997438610282160742; + +CREATE TABLE opentelemetry_span_log_9997438610282160742 +( + `pull_request_number` UInt32, + `commit_sha` String, + `check_start_time` DateTime, + `check_name` LowCardinality(String), + `instance_type` LowCardinality(String), + `trace_id` UUID, + `span_id` UInt64, + `parent_span_id` UInt64, + `operation_name` LowCardinality(String), + `kind` Enum8('INTERNAL' = 0, 'SERVER' = 1, 'CLIENT' = 2, 'PRODUCER' = 3, 'CONSUMER' = 4), + `start_time_us` UInt64, + `finish_time_us` UInt64, + `finish_date` Date, + `attribute` Map(LowCardinality(String), String), + `attribute.names` Array(LowCardinality(String)) ALIAS mapKeys(attribute), + `attribute.values` Array(String) ALIAS mapValues(attribute) +) +ENGINE = MergeTree() +PARTITION BY toYYYYMM(finish_date) +ORDER BY (check_name, finish_date, finish_time_us, trace_id) +SETTINGS index_granularity = 8192, old_parts_lifetime = 60; + +-- attribute.values column may conflict with subcolumn "values" of attribute map +ALTER TABLE opentelemetry_span_log_9997438610282160742 modify setting use_const_adaptive_granularity=1; +ALTER TABLE opentelemetry_span_log_9997438610282160742 rename column span_id to span_id2; + +-- attribute.values column (due to it comes first in the definition) will conflict with subcolumn "values" of attribute map +DROP TABLE IF EXISTS opentelemetry_span_log_compact; +CREATE TABLE opentelemetry_span_log_compact +( + `attribute.names` Array(LowCardinality(String)) ALIAS mapKeys(attribute), + `attribute.values` Array(String) ALIAS mapValues(attribute), + `attribute` Map(LowCardinality(String), String) +) +ENGINE = MergeTree +ORDER BY tuple(); diff --git a/parser/testdata/03672_nested_array_nested_tuple/ast.json b/parser/testdata/03672_nested_array_nested_tuple/ast.json new file mode 100644 index 000000000..fedf90b7a --- /dev/null +++ b/parser/testdata/03672_nested_array_nested_tuple/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery nest (children 1)" + }, + { + "explain": " Identifier nest" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001133566, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/03672_nested_array_nested_tuple/metadata.json b/parser/testdata/03672_nested_array_nested_tuple/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03672_nested_array_nested_tuple/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03672_nested_array_nested_tuple/query.sql b/parser/testdata/03672_nested_array_nested_tuple/query.sql new file mode 100644 index 000000000..19060b088 --- /dev/null +++ b/parser/testdata/03672_nested_array_nested_tuple/query.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS nest; +CREATE TABLE nest (nested_field Nested(e1 Int32)) ENGINE = MergeTree() ORDER BY nested_field.e1; +INSERT INTO nest (nested_field.e1) VALUES ([1, 2, 3]); +ALTER TABLE nest ADD COLUMN nested_field.e2 Array(Tuple(some_value Int32)); +OPTIMIZE TABLE nest FINAL; +SELECT * FROM nest; + +DROP TABLE IF EXISTS nest_2; +CREATE TABLE nest_2 (nested_field Nested(e1 Int32)) ENGINE = MergeTree() ORDER BY nested_field.e1; +INSERT INTO nest_2 (nested_field.e1) VALUES ([1, 2, 3]); +ALTER TABLE nest_2 ADD COLUMN nested_field.e2 Array(Tuple(some_value Tuple(another_value Int32))); +OPTIMIZE TABLE nest_2 FINAL; +SELECT * FROM nest_2; diff --git a/parser/testdata/03673_columns_description_cache/ast.json b/parser/testdata/03673_columns_description_cache/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03673_columns_description_cache/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03673_columns_description_cache/metadata.json b/parser/testdata/03673_columns_description_cache/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03673_columns_description_cache/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03673_columns_description_cache/query.sql b/parser/testdata/03673_columns_description_cache/query.sql new file mode 100644 index 000000000..925d90bb5 --- /dev/null +++ b/parser/testdata/03673_columns_description_cache/query.sql @@ -0,0 +1,31 @@ +-- Cache is only for MergeTree +drop table if exists t_mem; +create table t_mem (key Int) engine=Memory(); +insert into t_mem values (1); +select columns_descriptions_cache_size from system.tables where database = currentDatabase() and table = 't_mem'; + +-- MergeTree +drop table if exists t_mt; +-- { echoOn } +create table t_mt (key Int) engine=MergeTree() order by (); +select columns_descriptions_cache_size from system.tables where database = currentDatabase() and table = 't_mt'; +insert into t_mt values (1); +select columns_descriptions_cache_size from system.tables where database = currentDatabase() and table = 't_mt'; +insert into t_mt values (2); +select columns_descriptions_cache_size from system.tables where database = currentDatabase() and table = 't_mt'; +alter table t_mt add column value String settings mutations_sync=2; +insert into t_mt values (10, '10'); +select columns_descriptions_cache_size from system.tables where database = currentDatabase() and table = 't_mt'; +insert into t_mt values (20, '20'); +select columns_descriptions_cache_size from system.tables where database = currentDatabase() and table = 't_mt'; +-- now let's try to remove ColumnsDescription with old structure +alter table t_mt detach part 'all_1_1_0'; +alter table t_mt detach part 'all_2_2_0'; +select columns_descriptions_cache_size from system.tables where database = currentDatabase() and table = 't_mt'; +-- reattach +detach table t_mt; +select columns_descriptions_cache_size from system.tables where database = currentDatabase() and table = 't_mt'; +attach table t_mt; +select columns_descriptions_cache_size from system.tables where database = currentDatabase() and table = 't_mt'; +-- system.metrics +select value > 0 from system.metrics where metric = 'ColumnsDescriptionsCacheSize'; diff --git a/parser/testdata/03680_mergetree_shrink_const_from_prewhere/ast.json b/parser/testdata/03680_mergetree_shrink_const_from_prewhere/ast.json new file mode 100644 index 000000000..09062a656 --- /dev/null +++ b/parser/testdata/03680_mergetree_shrink_const_from_prewhere/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery const_node (children 1)" + }, + { + "explain": " Identifier const_node" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001342717, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/03680_mergetree_shrink_const_from_prewhere/metadata.json b/parser/testdata/03680_mergetree_shrink_const_from_prewhere/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03680_mergetree_shrink_const_from_prewhere/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03680_mergetree_shrink_const_from_prewhere/query.sql b/parser/testdata/03680_mergetree_shrink_const_from_prewhere/query.sql new file mode 100644 index 000000000..eaf1ee512 --- /dev/null +++ b/parser/testdata/03680_mergetree_shrink_const_from_prewhere/query.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS const_node; +CREATE TABLE const_node (`v` Nullable(UInt8)) ENGINE = MergeTree ORDER BY tuple(); +SYSTEM STOP MERGES const_node; +INSERT INTO const_node VALUES (1); +INSERT INTO const_node VALUES (2); +INSERT INTO const_node VALUES (3); +-- Here we have condition with a constant "materialize(255)", for which convertToFullColumnIfConst() will return underlying column w/o copying, +-- and later shrinkToFit() will be called from multiple threads on this column, and leads to UB +SELECT v FROM const_node PREWHERE and(materialize(255), *) ORDER BY v; diff --git a/parser/testdata/03681_distributed_fractional_limit_offset/ast.json b/parser/testdata/03681_distributed_fractional_limit_offset/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03681_distributed_fractional_limit_offset/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03681_distributed_fractional_limit_offset/metadata.json b/parser/testdata/03681_distributed_fractional_limit_offset/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03681_distributed_fractional_limit_offset/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03681_distributed_fractional_limit_offset/query.sql b/parser/testdata/03681_distributed_fractional_limit_offset/query.sql new file mode 100644 index 000000000..75fa98062 --- /dev/null +++ b/parser/testdata/03681_distributed_fractional_limit_offset/query.sql @@ -0,0 +1,23 @@ +SET enable_analyzer=0; + +SELECT 'Old Analyzer'; + +SELECT number from remote('127.0.0.{1,2,3}', numbers_mt(100)) ORDER BY number LIMIT 0.01; + +SELECT number from remote('127.0.0.{1,2,3}', numbers_mt(100)) ORDER BY number LIMIT 0.01 OFFSET 0.9; + +SELECT number from remote('127.0.0.{1,2,3}', numbers_mt(100)) ORDER BY number LIMIT 3 OFFSET 0.9; + +SELECT number from remote('127.0.0.{1,2,3}', numbers_mt(100)) ORDER BY number LIMIT 0.01 OFFSET 297; + +SET enable_analyzer=1; + +SELECT 'New Analyzer'; + +SELECT number from remote('127.0.0.{1,2,3}', numbers_mt(100)) ORDER BY number LIMIT 0.01; + +SELECT number from remote('127.0.0.{1,2,3}', numbers_mt(100)) ORDER BY number LIMIT 0.01 OFFSET 0.9; + +SELECT number from remote('127.0.0.{1,2,3}', numbers_mt(100)) ORDER BY number LIMIT 3 OFFSET 0.9; + +SELECT number from remote('127.0.0.{1,2,3}', numbers_mt(100)) ORDER BY number LIMIT 0.01 OFFSET 297; \ No newline at end of file diff --git a/parser/testdata/03681_lazy_materialization_with_read_in_order/ast.json b/parser/testdata/03681_lazy_materialization_with_read_in_order/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03681_lazy_materialization_with_read_in_order/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03681_lazy_materialization_with_read_in_order/metadata.json b/parser/testdata/03681_lazy_materialization_with_read_in_order/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03681_lazy_materialization_with_read_in_order/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03681_lazy_materialization_with_read_in_order/query.sql b/parser/testdata/03681_lazy_materialization_with_read_in_order/query.sql new file mode 100644 index 000000000..608d50257 --- /dev/null +++ b/parser/testdata/03681_lazy_materialization_with_read_in_order/query.sql @@ -0,0 +1,226 @@ +-- Test that lazy materialization works together with read-in-order optimization +-- Tags: no-random-settings + +SET query_plan_optimize_lazy_materialization = 1; +SET query_plan_max_limit_for_lazy_materialization = 10; +SET optimize_read_in_order = 1; +SET enable_analyzer = 1; +SET parallel_replicas_local_plan = 1; + +DROP TABLE IF EXISTS test_lazy_read_in_order; + +-- Create a table with sorting key on column 'a' +CREATE TABLE test_lazy_read_in_order +( + a UInt64, + b String, + c String, + d String, + e UInt64 +) ENGINE = MergeTree() + ORDER BY a; + +-- Insert test data +INSERT INTO test_lazy_read_in_order +SELECT number, + repeat('b', 100), + repeat('c', 100), + repeat('d', 100), + number * 2 +FROM numbers(1000); + +-- Test 1: ORDER BY on sorting key column 'a' with LIMIT +-- Should use both read-in-order AND lazy materialization +-- Columns b, c, d should be lazily materialized since they're not used in ORDER BY or WHERE +SELECT '=== Test 1: ORDER BY a (sorting key) ==='; +SELECT trimLeft(explain) +FROM ( + EXPLAIN PLAN actions=1 + SELECT a, b, c, d, e + FROM test_lazy_read_in_order + ORDER BY a + LIMIT 5 + SETTINGS max_threads=1 +) +WHERE explain LIKE '%LazilyRead%' + OR explain LIKE '%Lazily read columns:%' + OR explain LIKE '%ReadType:%' + OR explain LIKE '%Prefix sort description:%' + OR explain LIKE '%Result sort description:%'; + +SELECT a, e +FROM test_lazy_read_in_order +ORDER BY a +LIMIT 5; + +-- Test 2: ORDER BY on sorting key with WHERE clause +-- Columns not used in WHERE or ORDER BY should be lazily materialized +SELECT '=== Test 2: ORDER BY a with WHERE ==='; +SELECT trimLeft(explain) +FROM ( + EXPLAIN PLAN actions=1 + SELECT a, b, c, d, e + FROM test_lazy_read_in_order + WHERE e > 100 + ORDER BY a + LIMIT 5 + SETTINGS max_threads=1 +) +WHERE explain LIKE '%LazilyRead%' + OR explain LIKE '%Lazily read columns:%' + OR explain LIKE '%ReadType:%' + OR explain LIKE '%Prefix sort description:%' + OR explain LIKE '%Result sort description:%'; + +SELECT a, e +FROM test_lazy_read_in_order +WHERE e > 100 +ORDER BY a +LIMIT 5; + +-- Test 3: ORDER BY on sorting key with PREWHERE +-- Similar to Test 2 but with PREWHERE +SELECT '=== Test 3: ORDER BY a with PREWHERE ==='; +SELECT trimLeft(explain) +FROM ( + EXPLAIN PLAN actions=1 + SELECT a, b, c, d, e + FROM test_lazy_read_in_order + PREWHERE e > 100 + ORDER BY a + LIMIT 5 + SETTINGS max_threads=1 +) +WHERE explain LIKE '%LazilyRead%' + OR explain LIKE '%Lazily read columns:%' + OR explain LIKE '%ReadType:%' + OR explain LIKE '%Prefix sort description:%' + OR explain LIKE '%Result sort description:%'; + +SELECT a, e +FROM test_lazy_read_in_order PREWHERE e > 100 +ORDER BY a +LIMIT 5; + +-- Test 4: Verify that columns used in ORDER BY are NOT lazily materialized +-- Column 'e' is used in ORDER BY, so it should not be in LazilyRead +SELECT '=== Test 4: ORDER BY a, e (e should not be lazy) ==='; +SELECT trimLeft(explain) +FROM ( + EXPLAIN PLAN actions=1 + SELECT a, b, c, d, e + FROM test_lazy_read_in_order + ORDER BY a, e + LIMIT 5 + SETTINGS max_threads=1 +) +WHERE explain LIKE '%LazilyRead%' + OR explain LIKE '%Lazily read columns:%' + OR explain LIKE '%ReadType:%' + OR explain LIKE '%Prefix sort description:%' + OR explain LIKE '%Result sort description:%'; + +SELECT a, e +FROM test_lazy_read_in_order +ORDER BY a, e +LIMIT 5; + +-- Test 5: ORDER BY with expression on sorting key +-- Should still use read-in-order for the prefix +SELECT '=== Test 5: ORDER BY a, a+1 ==='; +SELECT trimLeft(explain) +FROM ( + EXPLAIN PLAN actions=1 + SELECT a, b, c, d, e + FROM test_lazy_read_in_order + ORDER BY a, a + 1 + LIMIT 5 + SETTINGS max_threads=1 +) +WHERE explain LIKE '%LazilyRead%' + OR explain LIKE '%Lazily read columns:%' + OR explain LIKE '%ReadType:%' + OR explain LIKE '%Prefix sort description:%' + OR explain LIKE '%Result sort description:%'; + +SELECT a, e +FROM test_lazy_read_in_order +ORDER BY a, a + 1 +LIMIT 5; + +DROP TABLE IF EXISTS test_lazy_read_in_order; + + +-- Additional correctness tests for lazy materialization with read-in-order +DROP TABLE IF EXISTS test_correctness; + +CREATE TABLE test_correctness +( + id UInt64, + value String, + score UInt64, + data String +) ENGINE = MergeTree() + ORDER BY id; + +-- Insert data in non-sequential order to test sorting +INSERT INTO test_correctness +VALUES (5, 'five', 50, 'data5'), + (2, 'two', 20, 'data2'), + (8, 'eight', 80, 'data8'), + (1, 'one', 10, 'data1'), + (9, 'nine', 90, 'data9'), + (3, 'three', 30, 'data3'), + (7, 'seven', 70, 'data7'), + (4, 'four', 40, 'data4'), + (6, 'six', 60, 'data6'), + (10, 'ten', 100, 'data10'); + +SELECT '=== Test 6: Verify ORDER BY ASC correctness ==='; +-- With both optimizations enabled +SELECT id, value, score +FROM test_correctness +ORDER BY id ASC +LIMIT 5; + +SELECT '=== Test 7: Verify ORDER BY DESC correctness ==='; +-- DESC should also work +SELECT id, value, score +FROM test_correctness +ORDER BY id DESC +LIMIT 5; + +SELECT '=== Test 8: Verify filtering with ORDER BY ==='; +-- Filter and order +SELECT id, value, score +FROM test_correctness +WHERE score >= 50 +ORDER BY id ASC; + +SELECT '=== Test 9: Compare with optimization disabled ==='; +-- Same query with optimizations disabled should give same results +SELECT id, value, score +FROM test_correctness +ORDER BY id ASC +LIMIT 5 +SETTINGS +optimize_read_in_order = 0, +query_plan_optimize_lazy_materialization = 0; + +SELECT '=== Test 10: Verify EXPLAIN shows both optimizations ==='; +SELECT trimLeft(explain) +FROM ( + EXPLAIN PLAN actions=1 + SELECT id, value, score, data + FROM test_correctness + ORDER BY id ASC + LIMIT 5 + SETTINGS max_threads=1 +) +WHERE explain LIKE '%LazilyRead%' + OR explain LIKE '%Lazily read columns:%' + OR explain LIKE '%ReadType:%' + OR explain LIKE '%Prefix sort description:%' + OR explain LIKE '%Result sort description:%'; + +DROP TABLE IF EXISTS test_correctness; diff --git a/parser/testdata/03699_reverse_utf8/ast.json b/parser/testdata/03699_reverse_utf8/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03699_reverse_utf8/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03699_reverse_utf8/metadata.json b/parser/testdata/03699_reverse_utf8/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03699_reverse_utf8/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03699_reverse_utf8/query.sql b/parser/testdata/03699_reverse_utf8/query.sql new file mode 100644 index 000000000..9c31b82ef --- /dev/null +++ b/parser/testdata/03699_reverse_utf8/query.sql @@ -0,0 +1,6 @@ +-- The function reverses the sequence of UTF-8 code points (that is different from bytes or full characters): + +SELECT reverseUTF8('привіт'); +SELECT reverseUTF8('🇬🇧🌈'); +SELECT reverseUTF8('🌈'); +SELECT reverseUTF8('नमस्ते'); diff --git a/parser/testdata/03700_vertical_format_pretty_print_json/ast.json b/parser/testdata/03700_vertical_format_pretty_print_json/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03700_vertical_format_pretty_print_json/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03700_vertical_format_pretty_print_json/metadata.json b/parser/testdata/03700_vertical_format_pretty_print_json/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03700_vertical_format_pretty_print_json/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03700_vertical_format_pretty_print_json/query.sql b/parser/testdata/03700_vertical_format_pretty_print_json/query.sql new file mode 100644 index 000000000..0d4576d21 --- /dev/null +++ b/parser/testdata/03700_vertical_format_pretty_print_json/query.sql @@ -0,0 +1,19 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS test_vertical_json; + +CREATE TABLE test_vertical_json +( + id UInt32, + data JSON, + nullableData Nullable(JSON) +) +ENGINE = Memory; + +INSERT INTO test_vertical_json VALUES (1, '{"name": "Alice", "age": 30, "address": {"city": "New York", "zip": "10001"}, "hobbies": ["reading", "cycling"]}', '{"foo": "bar"}'); +INSERT INTO test_vertical_json VALUES (2, NULL, NULL); + +SELECT * FROM test_vertical_json ORDER BY id FORMAT Vertical; + +DROP TABLE test_vertical_json; + diff --git a/parser/testdata/03701_analyzer_correlated_subquery_plan_reference/ast.json b/parser/testdata/03701_analyzer_correlated_subquery_plan_reference/ast.json new file mode 100644 index 000000000..c11829e20 --- /dev/null +++ b/parser/testdata/03701_analyzer_correlated_subquery_plan_reference/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001139016, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03701_analyzer_correlated_subquery_plan_reference/metadata.json b/parser/testdata/03701_analyzer_correlated_subquery_plan_reference/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03701_analyzer_correlated_subquery_plan_reference/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03701_analyzer_correlated_subquery_plan_reference/query.sql b/parser/testdata/03701_analyzer_correlated_subquery_plan_reference/query.sql new file mode 100644 index 000000000..2a2cf96b1 --- /dev/null +++ b/parser/testdata/03701_analyzer_correlated_subquery_plan_reference/query.sql @@ -0,0 +1,23 @@ +SET enable_analyzer = 1; +SET query_plan_join_swap_table = false; +SET enable_parallel_replicas = 0; +SET correlated_subqueries_default_join_kind = 'left'; + +CREATE TABLE t(x Int, y Int) ORDER BY () +AS SELECT number as x, number % 2 as y FROM numbers(100); + +EXPLAIN actions = 1 +SELECT + count() +FROM + t n +WHERE + EXISTS ( + SELECT + * + FROM + numbers(10) + WHERE + number != n.x + ) + AND n.y = 1; diff --git a/parser/testdata/03701_column_ttl_fully_expired/ast.json b/parser/testdata/03701_column_ttl_fully_expired/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03701_column_ttl_fully_expired/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03701_column_ttl_fully_expired/metadata.json b/parser/testdata/03701_column_ttl_fully_expired/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03701_column_ttl_fully_expired/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03701_column_ttl_fully_expired/query.sql b/parser/testdata/03701_column_ttl_fully_expired/query.sql new file mode 100644 index 000000000..979533ec2 --- /dev/null +++ b/parser/testdata/03701_column_ttl_fully_expired/query.sql @@ -0,0 +1,55 @@ +-- { echo ON } + +drop table if exists x; + +create table x (dt DateTime, i Int32 default 42 ttl dt + toIntervalDay(1), index idx(i) type set(100)) engine MergeTree partition by indexHint(dt) order by dt settings index_granularity = 8192, min_bytes_for_wide_part = 0; + +system stop merges x; + +insert into x values (now() - toIntervalDay(30), 1); + +select i from x where i = 1; + +system start merges x; + +optimize table x final; + +-- Run OPTIMIZE twice to ensure the second merge is triggered, as the issue occurs during the second merge phase. +optimize table x final; + +select i from x where i = 42; + +drop table x; + +create table x (dt DateTime, i Int32 default 42 ttl dt + toIntervalDay(1)) engine ReplacingMergeTree(i) order by dt settings index_granularity = 8192, min_bytes_for_wide_part = 0; + +system stop merges x; + +insert into x values (now() - toIntervalDay(30), 1); + +select i from x where i = 1; + +system start merges x; + +optimize table x final; + +-- Run OPTIMIZE twice to ensure the second merge is triggered, as the "Not found column i in block" issue occurs during the second merge phase. +optimize table x final; + +drop table x; + +create table x (dt DateTime, i Int32 default 42 ttl dt + toIntervalDay(1)) engine MergeTree order by dt settings index_granularity = 8192, min_bytes_for_wide_part = 0; + +system stop merges x; + +insert into x values (now() - toIntervalDay(30), 1), (now() - toIntervalDay(30), 2); + +system start merges x; + +optimize table x final; -- Ensure that column TTL is applied + +optimize table x final deduplicate by dt, i; + +select i from x; + +drop table x; diff --git a/parser/testdata/03701_create_or_replace_temporary_table/ast.json b/parser/testdata/03701_create_or_replace_temporary_table/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03701_create_or_replace_temporary_table/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03701_create_or_replace_temporary_table/metadata.json b/parser/testdata/03701_create_or_replace_temporary_table/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03701_create_or_replace_temporary_table/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03701_create_or_replace_temporary_table/query.sql b/parser/testdata/03701_create_or_replace_temporary_table/query.sql new file mode 100644 index 000000000..99ee614d8 --- /dev/null +++ b/parser/testdata/03701_create_or_replace_temporary_table/query.sql @@ -0,0 +1,17 @@ +-- Tags: no-parallel + +CREATE OR REPLACE TEMPORARY TABLE tmp (n UInt32) AS SELECT * FROM numbers(10); + +SELECT * FROM tmp; + +REPLACE TEMPORARY TABLE tmp (s String) AS SELECT 'a' FROM numbers(10); + +SELECT * FROM tmp; + +CREATE OR REPLACE TEMPORARY TABLE tmp (n UInt32, s String) AS SELECT number, 'a' FROM numbers(10); + +SELECT * FROM tmp; + +DROP TEMPORARY TABLE tmp; + +ATTACH TEMPORARY TABLE tmp; -- { clientError SYNTAX_ERROR } diff --git a/parser/testdata/03701_distinct_but_no_group_by_projection_table_use_check/ast.json b/parser/testdata/03701_distinct_but_no_group_by_projection_table_use_check/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03701_distinct_but_no_group_by_projection_table_use_check/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03701_distinct_but_no_group_by_projection_table_use_check/metadata.json b/parser/testdata/03701_distinct_but_no_group_by_projection_table_use_check/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03701_distinct_but_no_group_by_projection_table_use_check/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03701_distinct_but_no_group_by_projection_table_use_check/query.sql b/parser/testdata/03701_distinct_but_no_group_by_projection_table_use_check/query.sql new file mode 100644 index 000000000..31c01a129 --- /dev/null +++ b/parser/testdata/03701_distinct_but_no_group_by_projection_table_use_check/query.sql @@ -0,0 +1,72 @@ +-- Tags: no-replicated-database, no-parallel-replicas +-- no-replicated-database: EXPLAIN output differs for replicated database. +-- no-parallel-replicas: EXPLAIN output differs for parallel replicas. + +SELECT 'Simple:'; + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab ( + n UInt32, x UInt32, y UInt32, z UInt32, + projection p ( + SELECT count() + GROUP BY x, z + ) +) ENGINE = MergeTree +order by tuple(); + +INSERT INTO tab +SELECT number, number % 3, number % 5, number % 7 +FROM numbers_mt(30); + +SELECT 'Projection cols used:'; +SELECT DISTINCT x, z FROM tab; + +EXPLAIN SELECT DISTINCT x, z FROM tab; + +SELECT 'Some of projection cols used:'; +SELECT DISTINCT z FROM tab; + +EXPLAIN SELECT DISTINCT z FROM tab; + +SELECT 'Not all cols in projection:'; +SELECT DISTINCT x, y FROM tab; + +EXPLAIN SELECT DISTINCT x, y FROM tab; + +SELECT 'Filter present:'; +SELECT DISTINCT x, z FROM tab WHERE x IN (1, 2) AND z < 5; + +EXPLAIN SELECT DISTINCT x, z FROM tab WHERE x IN (1, 2) AND z < 5; + +SELECT 'Expression in select:'; + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab ( + n UInt32, x UInt32, y UInt32, + projection p ( + SELECT count() + GROUP BY x / 2, y % 10 + ) +) ENGINE = MergeTree +order by tuple(); + +INSERT INTO tab +SELECT number, number % 3, number % 5 +FROM numbers_mt(30); + +SELECT 'Projection cols used:'; +SELECT DISTINCT x / 2, y % 10 FROM tab; + +EXPLAIN SELECT DISTINCT x / 2, y % 10 FROM tab; + +SELECT 'Some of projection cols used:'; +SELECT DISTINCT x / 2 FROM tab; + +EXPLAIN SELECT DISTINCT x / 2 FROM tab; + +SELECT 'Not all cols in projection:'; +SELECT DISTINCT x / 2, y FROM tab; + +EXPLAIN SELECT DISTINCT x / 2, y FROM tab; diff --git a/parser/testdata/03701_json_duplicate_path_insert_select/ast.json b/parser/testdata/03701_json_duplicate_path_insert_select/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03701_json_duplicate_path_insert_select/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03701_json_duplicate_path_insert_select/metadata.json b/parser/testdata/03701_json_duplicate_path_insert_select/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03701_json_duplicate_path_insert_select/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03701_json_duplicate_path_insert_select/query.sql b/parser/testdata/03701_json_duplicate_path_insert_select/query.sql new file mode 100644 index 000000000..0b8243068 --- /dev/null +++ b/parser/testdata/03701_json_duplicate_path_insert_select/query.sql @@ -0,0 +1,12 @@ +-- Test INSERT...SELECT with duplicate paths +DROP TABLE IF EXISTS test_json_duplicates; +CREATE TABLE test_json_duplicates (json JSON) ENGINE = Memory; + +-- Should fail without setting +INSERT INTO test_json_duplicates SELECT '{"a": 1, "a": 2}'; -- {serverError INCORRECT_DATA} + +-- Should succeed with setting +INSERT INTO test_json_duplicates SELECT '{"a": 1, "a": 2}' SETTINGS type_json_skip_duplicated_paths=1; +SELECT * FROM test_json_duplicates; + +DROP TABLE test_json_duplicates; diff --git a/parser/testdata/03701_limit_by_in_order/ast.json b/parser/testdata/03701_limit_by_in_order/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03701_limit_by_in_order/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03701_limit_by_in_order/metadata.json b/parser/testdata/03701_limit_by_in_order/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03701_limit_by_in_order/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03701_limit_by_in_order/query.sql b/parser/testdata/03701_limit_by_in_order/query.sql new file mode 100644 index 000000000..f4192826f --- /dev/null +++ b/parser/testdata/03701_limit_by_in_order/query.sql @@ -0,0 +1,103 @@ +-- In order version of LIMIT BY works only if analyzer enabled +SET enable_analyzer = 1; + +DROP TABLE IF EXISTS 03701_unsorted, 03701_sorted; + +CREATE TABLE 03701_unsorted (key UInt32, val UInt32, dt Date) engine=MergeTree ORDER BY tuple(); + +INSERT INTO 03701_unsorted SELECT intDiv(number, 2), number, '2025-05-05' FROM numbers(10); +INSERT INTO 03701_unsorted SELECT intDiv(number, 3) + 10, number, '2025-05-06' FROM numbers(9); +INSERT INTO 03701_unsorted SELECT number + 50, number, '2025-05-07' FROM numbers(500); + +-- DISTINCT for explain outputs are required due to parallel replicas tests, as there +-- are created multiple LimitByTransforms (pushed down to replicas and the global one) + +SELECT DISTINCT 'Unsorted ORDER BY key LIMIT BY key: ' || trim(BOTH ' ' FROM explain) +FROM (EXPLAIN PIPELINE SELECT key FROM 03701_unsorted ORDER BY key LIMIT 1 BY key LIMIT 10) +WHERE explain LIKE '%LimitByTransform%'; + +SELECT DISTINCT 'Unsorted ORDER BY key DESC LIMIT BY key: ' || trim(BOTH ' ' FROM explain) +FROM (EXPLAIN PIPELINE SELECT key FROM 03701_unsorted ORDER BY key DESC LIMIT 1 BY key LIMIT 10) +WHERE explain LIKE '%LimitByTransform%'; + +SELECT DISTINCT 'Unsorted ORDER BY key, val LIMIT BY key: ' || trim(BOTH ' ' FROM explain) +FROM (EXPLAIN PIPELINE SELECT key FROM 03701_unsorted ORDER BY key, val LIMIT 1 BY key LIMIT 10) +WHERE explain LIKE '%LimitByTransform%'; + +SELECT DISTINCT 'Unsorted ORDER BY val LIMIT BY key: ' || trim(BOTH ' ' FROM explain) +FROM (EXPLAIN PIPELINE SELECT key FROM 03701_unsorted ORDER BY val LIMIT 1 BY key LIMIT 10) +WHERE explain LIKE '%LimitByTransform%'; + +SELECT DISTINCT 'Unsorted ORDER BY val, key LIMIT BY key: ' || trim(BOTH ' ' FROM explain) +FROM (EXPLAIN PIPELINE SELECT key FROM 03701_unsorted ORDER BY val, key LIMIT 1 BY key LIMIT 10) +WHERE explain LIKE '%LimitByTransform%'; + +SELECT DISTINCT 'Unsorted ORDER BY key LIMIT BY key, val: ' || trim(BOTH ' ' FROM explain) +FROM (EXPLAIN PIPELINE SELECT key FROM 03701_unsorted ORDER BY key LIMIT 1 BY key, val LIMIT 10) +WHERE explain LIKE '%LimitByTransform%'; + +SELECT DISTINCT 'Unsorted ORDER BY key, dt LIMIT BY key, val: ' || trim(BOTH ' ' FROM explain) +FROM (EXPLAIN PIPELINE SELECT key FROM 03701_unsorted ORDER BY key, dt LIMIT 1 BY key, val LIMIT 10) +WHERE explain LIKE '%LimitByTransform%'; + +SELECT DISTINCT 'Unsorted w/o ORDER BY: ' || trim(BOTH ' ' FROM explain) +FROM (EXPLAIN PIPELINE SELECT key FROM 03701_unsorted LIMIT 1 BY key LIMIT 10) +WHERE explain LIKE '%LimitByTransform%'; + +SELECT ''; + +SELECT '-- Unsorted with LIMIT 1 BY'; +SELECT key FROM 03701_unsorted ORDER BY key LIMIT 1 BY key LIMIT 10; +SELECT '-- Unsorted with LIMIT 2 BY'; +SELECT key FROM 03701_unsorted ORDER BY key LIMIT 2 BY key LIMIT 16; + +DROP TABLE 03701_unsorted; + +CREATE TABLE 03701_sorted (key UInt32, val UInt32, dt Date) engine=MergeTree ORDER BY key; + +INSERT INTO 03701_sorted SELECT intDiv(number, 2), number, '2025-05-05' FROM numbers(10); +INSERT INTO 03701_sorted SELECT intDiv(number, 3) + 10, number, '2025-05-06' FROM numbers(9); +INSERT INTO 03701_sorted SELECT number + 50, number, '2025-05-07' FROM numbers(500); + +SELECT ''; + +SELECT DISTINCT 'Sorted ORDER BY key LIMIT BY key: ' || trim(BOTH ' ' FROM explain) +FROM (EXPLAIN PIPELINE SELECT key FROM 03701_sorted ORDER BY key LIMIT 1 BY key LIMIT 10) +WHERE explain LIKE '%LimitByTransform%'; + +SELECT DISTINCT 'Sorted ORDER BY key DESC LIMIT BY key: ' || trim(BOTH ' ' FROM explain) +FROM (EXPLAIN PIPELINE SELECT key FROM 03701_sorted ORDER BY key DESC LIMIT 1 BY key LIMIT 10) +WHERE explain LIKE '%LimitByTransform%'; + +SELECT DISTINCT 'Sorted ORDER BY key, val LIMIT BY key: ' || trim(BOTH ' ' FROM explain) +FROM (EXPLAIN PIPELINE SELECT key FROM 03701_sorted ORDER BY key, val LIMIT 1 BY key LIMIT 10) +WHERE explain LIKE '%LimitByTransform%'; + +SELECT DISTINCT 'Sorted ORDER BY val LIMIT BY key: ' || trim(BOTH ' ' FROM explain) +FROM (EXPLAIN PIPELINE SELECT key FROM 03701_sorted ORDER BY val LIMIT 1 BY key LIMIT 10) +WHERE explain LIKE '%LimitByTransform%'; + +SELECT DISTINCT 'Sorted ORDER BY val, key LIMIT BY key: ' || trim(BOTH ' ' FROM explain) +FROM (EXPLAIN PIPELINE SELECT key FROM 03701_sorted ORDER BY val, key LIMIT 1 BY key LIMIT 10) +WHERE explain LIKE '%LimitByTransform%'; + +SELECT DISTINCT 'Sorted ORDER BY key LIMIT BY key, val: ' || trim(BOTH ' ' FROM explain) +FROM (EXPLAIN PIPELINE SELECT key FROM 03701_sorted ORDER BY key LIMIT 1 BY key, val LIMIT 10) +WHERE explain LIKE '%LimitByTransform%'; + +SELECT DISTINCT 'Sorted ORDER BY key, dt LIMIT BY key, val: ' || trim(BOTH ' ' FROM explain) +FROM (EXPLAIN PIPELINE SELECT key FROM 03701_sorted ORDER BY key, dt LIMIT 1 BY key, val LIMIT 10) +WHERE explain LIKE '%LimitByTransform%'; + +SELECT DISTINCT 'Sorted w/o ORDER BY: ' || trim(BOTH ' ' FROM explain) +FROM (EXPLAIN PIPELINE SELECT key FROM 03701_sorted LIMIT 1 BY key LIMIT 10) +WHERE explain LIKE '%LimitByTransform%'; + +SELECT ''; + +SELECT '-- Sorted with LIMIT 1 BY'; +SELECT key FROM 03701_sorted ORDER BY key LIMIT 1 BY key LIMIT 10; +SELECT '-- Sorted with LIMIT 2 BY'; +SELECT key FROM 03701_sorted ORDER BY key LIMIT 2 BY key LIMIT 16; + +DROP TABLE 03701_sorted; diff --git a/parser/testdata/03701_optimize_inverse_dictionary_lookup_basic/ast.json b/parser/testdata/03701_optimize_inverse_dictionary_lookup_basic/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03701_optimize_inverse_dictionary_lookup_basic/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03701_optimize_inverse_dictionary_lookup_basic/metadata.json b/parser/testdata/03701_optimize_inverse_dictionary_lookup_basic/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03701_optimize_inverse_dictionary_lookup_basic/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03701_optimize_inverse_dictionary_lookup_basic/query.sql b/parser/testdata/03701_optimize_inverse_dictionary_lookup_basic/query.sql new file mode 100644 index 000000000..8760e3bdd --- /dev/null +++ b/parser/testdata/03701_optimize_inverse_dictionary_lookup_basic/query.sql @@ -0,0 +1,438 @@ +-- Tags: no-replicated-database, no-parallel-replicas +-- no-parallel, no-parallel-replicas: Dictionary is not created in parallel replicas. + +SET enable_analyzer = 1; +SET optimize_inverse_dictionary_lookup = 1; +SET optimize_or_like_chain = 0; + +DROP DICTIONARY IF EXISTS colors; +DROP TABLE IF EXISTS ref_colors; +CREATE TABLE ref_colors +( + id UInt64, + name String, + n UInt64 +) +ENGINE = MergeTree +ORDER BY id; + +INSERT INTO ref_colors VALUES + (1, 'red', 5), + (2, 'blue', 7), + (3, 'red', 12), + (4, 'green', 0), + (5, 'Rose', 9); + +DROP DICTIONARY IF EXISTS colors; +CREATE DICTIONARY colors +( + id UInt64, + name String, + n UInt64 +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE 'ref_colors')) +LAYOUT(HASHED()) +LIFETIME(0); + +DROP TABLE IF EXISTS t; +CREATE TABLE t +( + color_id UInt64, + payload String +) +ENGINE = MergeTree +ORDER BY color_id; + +INSERT INTO t VALUES + (1, 'a'), + (2, 'b'), + (3, 'c'), + (4, 'd'), + (5, 'R'); + +SELECT 'Equality, LHS - plan'; +EXPLAIN SYNTAX run_query_tree_passes=1 +SELECT color_id, payload +FROM t +WHERE dictGetString('colors', 'name', color_id) = 'red' +ORDER BY color_id, payload; + +SELECT 'Equality, LHS'; +SELECT color_id, payload +FROM t +WHERE dictGetString('colors', 'name', color_id) = 'red' +ORDER BY color_id, payload; + +SELECT 'Equality, RHS - plan'; +EXPLAIN SYNTAX run_query_tree_passes=1 +SELECT color_id, payload +FROM t +WHERE 'red' = dictGetString('colors', 'name', color_id) +ORDER BY color_id, payload; + +SELECT 'Equality, RHS'; +SELECT color_id, payload +FROM t +WHERE 'red' = dictGetString('colors', 'name', color_id) +ORDER BY color_id, payload; + +SELECT 'Inequality <, LHS - plan'; +EXPLAIN SYNTAX run_query_tree_passes=1 +SELECT color_id, payload +FROM t +WHERE dictGetUInt64('colors', 'n', color_id) < 10 +ORDER BY color_id, payload; + +SELECT 'Inequality <, LHS'; +SELECT color_id, payload +FROM t +WHERE dictGetUInt64('colors', 'n', color_id) < 10 +ORDER BY color_id, payload; + +SELECT 'Inequality <, RHS - plan'; +EXPLAIN SYNTAX run_query_tree_passes=1 +SELECT color_id, payload +FROM t +WHERE 10 > dictGetUInt64('colors', 'n', color_id) +ORDER BY color_id, payload; + +SELECT 'Inequality <, RHS'; +SELECT color_id, payload +FROM t +WHERE 10 > dictGetUInt64('colors', 'n', color_id) +ORDER BY color_id, payload; + +SELECT 'Type variant cast, >= Int32 - plan'; +EXPLAIN SYNTAX run_query_tree_passes=1 +SELECT color_id, payload +FROM t +WHERE dictGetInt32('colors', 'n', color_id) >= 2 +ORDER BY color_id, payload; + +SELECT 'Type variant cast, >= Int32'; +SELECT color_id, payload +FROM t +WHERE dictGetInt32('colors', 'n', color_id) >= 2 +ORDER BY color_id, payload; + +SELECT 'LIKE - plan'; +EXPLAIN SYNTAX run_query_tree_passes=1 +SELECT color_id, payload +FROM t +WHERE dictGetString('colors', 'name', color_id) LIKE 'r%' +ORDER BY color_id, payload; + +SELECT 'LIKE'; +SELECT color_id, payload +FROM t +WHERE dictGetString('colors', 'name', color_id) LIKE 'r%' +ORDER BY color_id, payload; + +SELECT 'ILIKE - plan'; +EXPLAIN SYNTAX run_query_tree_passes=1 +SELECT color_id, payload +FROM t +WHERE dictGetString('colors', 'name', color_id) ILIKE 'r%' +ORDER BY color_id, payload; + +SELECT 'ILIKE'; +SELECT color_id, payload +FROM t +WHERE dictGetString('colors', 'name', color_id) ILIKE 'r%' +ORDER BY color_id, payload; + +SELECT 'equals() - plan'; +EXPLAIN SYNTAX run_query_tree_passes=1 +SELECT color_id +FROM t +WHERE equals(dictGetString('colors','name', color_id), 'red') +ORDER BY color_id; + +SELECT 'equals()'; +SELECT color_id +FROM t +WHERE equals(dictGetString('colors','name', color_id), 'red') +ORDER BY color_id; + +SELECT 'notEquals - plan'; +EXPLAIN SYNTAX run_query_tree_passes=1 +SELECT color_id, payload +FROM t +WHERE dictGetString('colors','name', color_id) != 'red' +ORDER BY color_id, payload; + +SELECT 'notEquals'; +SELECT color_id, payload +FROM t +WHERE dictGetString('colors','name', color_id) != 'red' +ORDER BY color_id, payload; + +SELECT 'NOT LIKE r% - plan'; +EXPLAIN SYNTAX run_query_tree_passes=1 +SELECT color_id, payload +FROM t +WHERE dictGetString('colors','name', color_id) NOT LIKE 'r%' +ORDER BY color_id, payload; + +SELECT 'NOT LIKE r%'; +SELECT color_id, payload +FROM t +WHERE dictGetString('colors','name', color_id) NOT LIKE 'r%' +ORDER BY color_id, payload; + +SELECT 'NOT ILIKE r% - plan'; +EXPLAIN SYNTAX run_query_tree_passes=1 +SELECT color_id, payload +FROM t +WHERE dictGetString('colors','name', color_id) NOT ILIKE 'r%' +ORDER BY color_id, payload; + +SELECT 'NOT ILIKE r%'; +SELECT color_id, payload +FROM t +WHERE dictGetString('colors','name', color_id) NOT ILIKE 'r%' +ORDER BY color_id, payload; + +SELECT 'match ^r - plan'; +EXPLAIN SYNTAX run_query_tree_passes=1 +SELECT color_id, payload +FROM t +WHERE match(dictGetString('colors','name', color_id), '^r') +ORDER BY color_id, payload; + +SELECT 'match ^r'; +SELECT color_id, payload +FROM t +WHERE match(dictGetString('colors','name', color_id), '^r') +ORDER BY color_id, payload; + +SELECT 'NOT recursion - plan'; +EXPLAIN SYNTAX run_query_tree_passes=1 +SELECT color_id, payload +FROM t +WHERE NOT (dictGetString('colors', 'name', color_id) = 'red') +ORDER BY color_id, payload; + +SELECT 'NOT recursion'; +SELECT color_id, payload +FROM t +WHERE NOT (dictGetString('colors', 'name', color_id) = 'red') +ORDER BY color_id, payload; + +SELECT 'AND/OR recursion - plan'; +EXPLAIN SYNTAX run_query_tree_passes=1 +SELECT color_id, payload +FROM t +WHERE (dictGetString('colors', 'name', color_id) = 'red' AND dictGetUInt64('colors', 'n', color_id) < 10) + OR dictGetString('colors', 'name', color_id) = 'green' +ORDER BY color_id, payload; + +SELECT 'AND/OR recursion'; +SELECT color_id, payload +FROM t +WHERE (dictGetString('colors', 'name', color_id) = 'red' AND dictGetUInt64('colors', 'n', color_id) < 10) + OR dictGetString('colors', 'name', color_id) = 'green' +ORDER BY color_id, payload; + +SELECT 'NULL constant - plan'; +EXPLAIN SYNTAX run_query_tree_passes=1 +SELECT color_id, payload +FROM t +WHERE dictGetString('colors', 'name', color_id) = NULL +ORDER BY color_id, payload; + +SELECT 'NULL constant'; +SELECT color_id, payload +FROM t +WHERE dictGetString('colors', 'name', color_id) = NULL +ORDER BY color_id, payload; + +SELECT 'PREWHERE - plan'; +EXPLAIN SYNTAX run_query_tree_passes=1 +SELECT color_id +FROM t +PREWHERE dictGetString('colors', 'name', color_id) = 'red' +ORDER BY color_id; + +SELECT 'PREWHERE'; +SELECT color_id +FROM t +PREWHERE dictGetString('colors', 'name', color_id) = 'red' +ORDER BY color_id; + +SELECT 'QUALIFY - plan'; +EXPLAIN SYNTAX run_query_tree_passes=1 +SELECT color_id, row_number() OVER (PARTITION BY 1 ORDER BY color_id) AS rn +FROM t +QUALIFY dictGetString('colors', 'name', color_id) = 'red' +ORDER BY color_id, rn; + +SELECT 'QUALIFY'; +SELECT color_id, row_number() OVER (PARTITION BY 1 ORDER BY color_id) AS rn +FROM t +QUALIFY dictGetString('colors', 'name', color_id) = 'red' +ORDER BY color_id, rn; + +SELECT 'Empty result set - plan'; +EXPLAIN SYNTAX run_query_tree_passes=1 +SELECT color_id +FROM t +WHERE dictGetString('colors', 'name', color_id) = 'nonexistent_color' +ORDER BY color_id; + +SELECT 'Empty result set'; +SELECT color_id +FROM t +WHERE dictGetString('colors', 'name', color_id) = 'nonexistent_color' +ORDER BY color_id; + +SELECT 'HAVING - plan'; +EXPLAIN SYNTAX run_query_tree_passes=1 +SELECT color_id, count() AS c +FROM t +GROUP BY color_id +HAVING dictGetString('colors','name', color_id) = 'red' +ORDER BY color_id, c; + +SELECT 'HAVING'; +SELECT color_id, count() AS c +FROM t +GROUP BY color_id +HAVING dictGetString('colors','name', color_id) = 'red' +ORDER BY color_id, c; + +SELECT 'JOIN ON (INNER) - plan'; +EXPLAIN SYNTAX run_query_tree_passes=1 +SELECT t1.color_id, t1.payload, t2.payload AS payload2 +FROM t AS t1 +INNER JOIN t AS t2 + ON t1.color_id = t2.color_id + AND dictGetString('colors','name', t1.color_id) = 'red' +ORDER BY t1.color_id, t1.payload, payload2; + +SELECT 'JOIN ON (INNER)'; +SELECT t1.color_id, t1.payload, t2.payload AS payload2 +FROM t AS t1 +INNER JOIN t AS t2 + ON t1.color_id = t2.color_id + AND dictGetString('colors','name', t1.color_id) = 'red' +ORDER BY t1.color_id, t1.payload, payload2; + +SELECT 'JOIN ON (LEFT) - plan'; +EXPLAIN SYNTAX run_query_tree_passes=1 +SELECT t1.color_id, t1.payload, t2.payload AS payload2 +FROM t AS t1 +LEFT JOIN t AS t2 + ON t1.color_id = t2.color_id + AND dictGetString('colors','name', t1.color_id) = 'red' +ORDER BY t1.color_id, t1.payload, payload2; + +SELECT 'JOIN ON (LEFT)'; +SELECT t1.color_id, t1.payload, t2.payload AS payload2 +FROM t AS t1 +LEFT JOIN t AS t2 + ON t1.color_id = t2.color_id + AND dictGetString('colors','name', t1.color_id) = 'red' +ORDER BY t1.color_id, t1.payload, payload2; + +SELECT 'SELECT multiIf - plan'; +EXPLAIN SYNTAX run_query_tree_passes=1 +SELECT color_id, payload, + multiIf(dictGetString('colors','name', color_id) = 'red', 'match', 'no_match') AS tag +FROM t +ORDER BY color_id, payload, tag; + +SELECT 'SELECT multiIf'; +SELECT color_id, payload, + multiIf(dictGetString('colors','name', color_id) = 'red', 'match', 'no_match') AS tag +FROM t +ORDER BY color_id, payload, tag; + +SELECT 'countIf - plan'; +EXPLAIN SYNTAX run_query_tree_passes=1 +SELECT countIf(dictGetString('colors','name', color_id) = 'red') AS cnt +FROM t; + +SELECT 'countIf'; +SELECT countIf(dictGetString('colors','name', color_id) = 'red') AS cnt +FROM t; + +SELECT 'sumIf - plan'; +EXPLAIN SYNTAX run_query_tree_passes=1 +SELECT sumIf(color_id, dictGetString('colors','name', color_id) = 'red') AS sum_id_match +FROM t; + +SELECT 'sumIf'; +SELECT sumIf(color_id, dictGetString('colors','name', color_id) = 'red') AS sum_id_match +FROM t; + +SELECT 'ORDER BY - plan'; +EXPLAIN SYNTAX run_query_tree_passes=1 +SELECT color_id, payload +FROM t +ORDER BY (dictGetString('colors','name', color_id) = 'red') DESC, color_id, payload; + +SELECT 'ORDER BY'; +SELECT color_id, payload +FROM t +ORDER BY (dictGetString('colors','name', color_id) = 'red') DESC, color_id, payload; + +SELECT 'GROUP BY - plan'; +EXPLAIN SYNTAX run_query_tree_passes=1 +SELECT (dictGetString('colors','name', color_id) = 'red') AS is_red, count() AS c +FROM t +GROUP BY (dictGetString('colors','name', color_id) = 'red') +ORDER BY is_red, c; + +SELECT 'GROUP BY'; +SELECT (dictGetString('colors','name', color_id) = 'red') AS is_red, count() AS c +FROM t +GROUP BY (dictGetString('colors','name', color_id) = 'red') +ORDER BY is_red, c; + +SELECT 'LIMIT BY - plan'; +EXPLAIN SYNTAX run_query_tree_passes=1 +SELECT color_id, payload +FROM t +ORDER BY color_id, payload +LIMIT 1 BY (dictGetString('colors','name', color_id) = 'red'); + +SELECT 'LIMIT BY'; +SELECT color_id, payload +FROM t +ORDER BY color_id, payload +LIMIT 1 BY (dictGetString('colors','name', color_id) = 'red'); + +SELECT 'WINDOW PARTITION BY - plan'; +EXPLAIN SYNTAX run_query_tree_passes=1 +SELECT color_id, + row_number() OVER ( + PARTITION BY (dictGetString('colors','name', color_id) = 'red') + ORDER BY color_id + ) AS rn +FROM t +ORDER BY color_id, rn; + +SELECT 'WINDOW PARTITION BY'; +SELECT color_id, + row_number() OVER ( + PARTITION BY (dictGetString('colors','name', color_id) = 'red') + ORDER BY color_id + ) AS rn +FROM t +ORDER BY color_id, rn; + +-- Negative: non-constant RHS, expect no rewrite +SELECT 'Negative: non-constant RHS - plan'; +EXPLAIN SYNTAX run_query_tree_passes=1 +SELECT color_id +FROM t +WHERE dictGetString('colors', 'name', color_id) = payload +ORDER BY color_id; + +SELECT 'Negative: non-constant RHS'; +SELECT color_id +FROM t +WHERE dictGetString('colors', 'name', color_id) = payload +ORDER BY color_id; diff --git a/parser/testdata/03701_parallel_replicas_in_shard_scope/ast.json b/parser/testdata/03701_parallel_replicas_in_shard_scope/ast.json new file mode 100644 index 000000000..fa5bac78c --- /dev/null +++ b/parser/testdata/03701_parallel_replicas_in_shard_scope/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_shard_scope (children 1)" + }, + { + "explain": " Identifier test_shard_scope" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001550268, + "rows_read": 2, + "bytes_read": 84 + } +} diff --git a/parser/testdata/03701_parallel_replicas_in_shard_scope/metadata.json b/parser/testdata/03701_parallel_replicas_in_shard_scope/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03701_parallel_replicas_in_shard_scope/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03701_parallel_replicas_in_shard_scope/query.sql b/parser/testdata/03701_parallel_replicas_in_shard_scope/query.sql new file mode 100644 index 000000000..d4c768330 --- /dev/null +++ b/parser/testdata/03701_parallel_replicas_in_shard_scope/query.sql @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS test_shard_scope; +DROP TABLE IF EXISTS dis_test_shard_scope; + +SET parallel_replicas_only_with_analyzer = 0; -- necessary for CI run with disabled analyzer +SET serialize_query_plan = 0; +SET enable_parallel_replicas=1, max_parallel_replicas=3, parallel_replicas_for_non_replicated_merge_tree=1, cluster_for_parallel_replicas='test_cluster_one_shard_three_replicas_localhost'; + +CREATE TABLE test_shard_scope (`time_col` DateTime) ENGINE=MergeTree() ORDER BY time_col; +CREATE TABLE dis_test_shard_scope ENGINE=Distributed(test_cluster_one_shard_three_replicas_localhost, currentDatabase(), test_shard_scope); + +INSERT INTO test_shard_scope (`time_col`) VALUES ('2025-10-23 10:26:46'), ('2025-10-23 10:26:47'); + +SELECT count(), max(time_col) from dis_test_shard_scope; + +DROP TABLE test_shard_scope; +DROP TABLE dis_test_shard_scope; diff --git a/parser/testdata/03701_replicated_column_short_circuit_filter/ast.json b/parser/testdata/03701_replicated_column_short_circuit_filter/ast.json new file mode 100644 index 000000000..146d1d918 --- /dev/null +++ b/parser/testdata/03701_replicated_column_short_circuit_filter/ast.json @@ -0,0 +1,109 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function intDiv (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function minus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " TablesInSelectQuery (children 2)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_4" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " ArrayJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function range (alias x) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function notEquals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_2" + }, + { + "explain": " Set" + } + ], + + "rows": 29, + + "statistics": + { + "elapsed": 0.001692588, + "rows_read": 29, + "bytes_read": 1098 + } +} diff --git a/parser/testdata/03701_replicated_column_short_circuit_filter/metadata.json b/parser/testdata/03701_replicated_column_short_circuit_filter/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03701_replicated_column_short_circuit_filter/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03701_replicated_column_short_circuit_filter/query.sql b/parser/testdata/03701_replicated_column_short_circuit_filter/query.sql new file mode 100644 index 000000000..7bd565ae2 --- /dev/null +++ b/parser/testdata/03701_replicated_column_short_circuit_filter/query.sql @@ -0,0 +1 @@ +select number, intDiv(1, number - 2) from numbers(4) array join range(number) as x where number != 2 settings enable_lazy_columns_replication=1, query_plan_filter_push_down=0; diff --git a/parser/testdata/03701_temporary_files_buffer_size/ast.json b/parser/testdata/03701_temporary_files_buffer_size/ast.json new file mode 100644 index 000000000..34b0277d3 --- /dev/null +++ b/parser/testdata/03701_temporary_files_buffer_size/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery t0 (children 3)" + }, + { + "explain": " Identifier t0" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration c0 (children 1)" + }, + { + "explain": " DataType Int" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function Memory (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001471768, + "rows_read": 9, + "bytes_read": 305 + } +} diff --git a/parser/testdata/03701_temporary_files_buffer_size/metadata.json b/parser/testdata/03701_temporary_files_buffer_size/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03701_temporary_files_buffer_size/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03701_temporary_files_buffer_size/query.sql b/parser/testdata/03701_temporary_files_buffer_size/query.sql new file mode 100644 index 000000000..938dcdc7f --- /dev/null +++ b/parser/testdata/03701_temporary_files_buffer_size/query.sql @@ -0,0 +1,6 @@ +CREATE TABLE t0 (c0 Int) ENGINE = Memory(); +INSERT INTO TABLE t0 (c0) VALUES (1); +SELECT c0 FROM t0 GROUP BY c0 +SETTINGS max_bytes_before_external_group_by = 1 + , temporary_files_buffer_size = 0 + , group_by_two_level_threshold = 1; -- { clientError BAD_ARGUMENTS } diff --git a/parser/testdata/03702_alter_codec_index/ast.json b/parser/testdata/03702_alter_codec_index/ast.json new file mode 100644 index 000000000..70baf8505 --- /dev/null +++ b/parser/testdata/03702_alter_codec_index/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_alter_codec_index (children 1)" + }, + { + "explain": " Identifier test_alter_codec_index" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001120989, + "rows_read": 2, + "bytes_read": 96 + } +} diff --git a/parser/testdata/03702_alter_codec_index/metadata.json b/parser/testdata/03702_alter_codec_index/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03702_alter_codec_index/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03702_alter_codec_index/query.sql b/parser/testdata/03702_alter_codec_index/query.sql new file mode 100644 index 000000000..6370ac31e --- /dev/null +++ b/parser/testdata/03702_alter_codec_index/query.sql @@ -0,0 +1,32 @@ +DROP TABLE IF EXISTS test_alter_codec_index; +CREATE TABLE test_alter_codec_index (`id` UInt64, value UInt64, INDEX id_index id TYPE minmax GRANULARITY 1) Engine=MergeTree() ORDER BY tuple(); +INSERT INTO test_alter_codec_index SELECT number, number * number from numbers(100); +ALTER TABLE test_alter_codec_index MODIFY COLUMN id UInt64 CODEC(NONE); +ALTER TABLE test_alter_codec_index MODIFY COLUMN id UInt64 CODEC(Delta, LZ4); + +ALTER TABLE test_alter_codec_index MODIFY SETTING alter_column_secondary_index_mode = 'throw'; +ALTER TABLE test_alter_codec_index MODIFY COLUMN id UInt32 CODEC(Delta, LZ4); -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } + +-- With rebuild the index will be updated, so ALTERs are allowed +ALTER TABLE test_alter_codec_index MODIFY SETTING alter_column_secondary_index_mode = 'rebuild'; +ALTER TABLE test_alter_codec_index MODIFY COLUMN id UInt32 CODEC(Delta, LZ4); +SELECT sum(id) FROM test_alter_codec_index; + +ALTER TABLE test_alter_codec_index MODIFY COLUMN id UInt64 DEFAULT 3 CODEC(Delta, LZ4); +SELECT sum(id) FROM test_alter_codec_index; + +INSERT INTO test_alter_codec_index (value) VALUES (1); +SELECT sum(id) FROM test_alter_codec_index; + +ALTER TABLE test_alter_codec_index MODIFY COLUMN id UInt64 ALIAS 3 CODEC(Delta, LZ4); -- { serverError INCORRECT_QUERY } + +ALTER TABLE test_alter_codec_index MODIFY COLUMN id UInt64 MATERIALIZED 3 CODEC(Delta, LZ4); +SELECT sum(id) FROM test_alter_codec_index; + +INSERT INTO test_alter_codec_index (value) VALUES (1); +SELECT sum(id) FROM test_alter_codec_index; + +ALTER TABLE test_alter_codec_index MODIFY COLUMN id Int64; +SELECT sum(id) FROM test_alter_codec_index; + +DROP TABLE IF EXISTS test_alter_codec_index; \ No newline at end of file diff --git a/parser/testdata/03702_alter_codec_pk/ast.json b/parser/testdata/03702_alter_codec_pk/ast.json new file mode 100644 index 000000000..8ba8afb7b --- /dev/null +++ b/parser/testdata/03702_alter_codec_pk/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_alter_codec_pk (children 1)" + }, + { + "explain": " Identifier test_alter_codec_pk" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001147468, + "rows_read": 2, + "bytes_read": 90 + } +} diff --git a/parser/testdata/03702_alter_codec_pk/metadata.json b/parser/testdata/03702_alter_codec_pk/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03702_alter_codec_pk/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03702_alter_codec_pk/query.sql b/parser/testdata/03702_alter_codec_pk/query.sql new file mode 100644 index 000000000..9036f638a --- /dev/null +++ b/parser/testdata/03702_alter_codec_pk/query.sql @@ -0,0 +1,18 @@ +DROP TABLE IF EXISTS test_alter_codec_pk; +CREATE TABLE test_alter_codec_pk (id UInt64, value UInt64) Engine=MergeTree() ORDER BY id; +INSERT INTO test_alter_codec_pk SELECT number, number * number from numbers(100); +-- { echoOn } +ALTER TABLE test_alter_codec_pk MODIFY COLUMN id UInt64 CODEC(NONE); +ALTER TABLE test_alter_codec_pk MODIFY COLUMN id UInt64 CODEC(Delta, LZ4); +SELECT sum(id) FROM test_alter_codec_pk; +ALTER TABLE test_alter_codec_pk MODIFY COLUMN id UInt32 CODEC(Delta, LZ4); -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } +ALTER TABLE test_alter_codec_pk MODIFY COLUMN id UInt64 DEFAULT 3 CODEC(Delta, LZ4); +INSERT INTO test_alter_codec_pk (value) VALUES (1); +SELECT sum(id) FROM test_alter_codec_pk; +ALTER TABLE test_alter_codec_pk MODIFY COLUMN id UInt64 ALIAS 3 CODEC(Delta, LZ4); -- { serverError UNKNOWN_IDENTIFIER } +ALTER TABLE test_alter_codec_pk MODIFY COLUMN id UInt64 MATERIALIZED 3 CODEC(Delta, LZ4); +INSERT INTO test_alter_codec_pk (value) VALUES (1); +SELECT sum(id) FROM test_alter_codec_pk; +ALTER TABLE test_alter_codec_pk MODIFY COLUMN id UInt64; +ALTER TABLE test_alter_codec_pk MODIFY COLUMN id Int64; -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } +DROP TABLE IF EXISTS test_alter_codec_pk; \ No newline at end of file diff --git a/parser/testdata/03702_alter_column_modify_secondary_index_general/ast.json b/parser/testdata/03702_alter_column_modify_secondary_index_general/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03702_alter_column_modify_secondary_index_general/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03702_alter_column_modify_secondary_index_general/metadata.json b/parser/testdata/03702_alter_column_modify_secondary_index_general/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03702_alter_column_modify_secondary_index_general/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03702_alter_column_modify_secondary_index_general/query.sql b/parser/testdata/03702_alter_column_modify_secondary_index_general/query.sql new file mode 100644 index 000000000..04ee3e15e --- /dev/null +++ b/parser/testdata/03702_alter_column_modify_secondary_index_general/query.sql @@ -0,0 +1,102 @@ +-- Tests the behavior of MergeTree setting 'alter_column_secondary_index_mode' with tables in compact and wide format +-- for UPDATE MODIFY COLUMN operations. + +SET apply_mutations_on_fly = 0; +SET mutations_sync = 1; +SET alter_sync = 1; + +DROP TABLE IF EXISTS test_compact; +DROP TABLE IF EXISTS test_wide; + +CREATE TABLE test_compact ( + a Int32, + b Int32, + c Int32, + INDEX idx_minmax b TYPE minmax +) +ENGINE = MergeTree ORDER BY a +SETTINGS min_bytes_for_wide_part = 999999999; + +CREATE TABLE test_wide ( + a Int32, + b Int32, + c Int32, + INDEX idx_minmax b TYPE minmax +) +ENGINE = MergeTree ORDER BY a +SETTINGS min_bytes_for_wide_part = 0; + +INSERT INTO test_compact VALUES (1, 1, 4); +INSERT INTO test_wide VALUES (1, 1, 4); + +SELECT 'Check behavior with THROW'; + +ALTER TABLE test_compact MODIFY SETTING alter_column_secondary_index_mode = 'throw'; +ALTER TABLE test_wide MODIFY SETTING alter_column_secondary_index_mode = 'throw'; + +-- ALTER TABLE MODIFY COLUMN is expected to throw +ALTER TABLE test_compact MODIFY COLUMN b String; -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } +ALTER TABLE test_wide MODIFY COLUMN b String; -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } + +-- However, it must be possible to change the column default value without an exception +ALTER TABLE test_compact MODIFY COLUMN b DEFAULT 123; +ALTER TABLE test_wide MODIFY COLUMN b DEFAULT 123; + +-- It's also possible to alter other columns that don't have secondary indexes +ALTER TABLE test_compact MODIFY COLUMN c String; + +SELECT 'Check behavior with COMPATIBILITY'; + +ALTER TABLE test_compact MODIFY SETTING alter_column_secondary_index_mode = 'compatibility'; +ALTER TABLE test_wide MODIFY SETTING alter_column_secondary_index_mode = 'compatibility'; + +-- ALTER TABLE MODIFY COLUMN is expected to throw +ALTER TABLE test_compact MODIFY COLUMN b String; -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } +ALTER TABLE test_wide MODIFY COLUMN b String; -- { serverError ALTER_OF_COLUMN_IS_FORBIDDEN } + +-- However, it must be possible to change the column default value without an exception +ALTER TABLE test_compact MODIFY COLUMN b DEFAULT 123; +ALTER TABLE test_wide MODIFY COLUMN b DEFAULT 123; + +SELECT 'Check behavior with REBUILD'; -- that's the default + +ALTER TABLE test_compact MODIFY SETTING alter_column_secondary_index_mode = 'rebuild'; +ALTER TABLE test_wide MODIFY SETTING alter_column_secondary_index_mode = 'rebuild'; + +-- Expect that ALTER TABLE MODIFY COLUMN works and the indexes must be rebuild +ALTER TABLE test_compact MODIFY COLUMN b Int32; +ALTER TABLE test_wide MODIFY COLUMN b Int32; + +SELECT table, name, 'Emtpy : ' || if(marks_bytes == 0, 'true', 'false') FROM system.data_skipping_indices WHERE table = 'test_compact' AND database = currentDatabase() AND name = 'idx_minmax'; +SELECT table, name, 'Emtpy : ' || if(marks_bytes == 0, 'true', 'false') FROM system.data_skipping_indices WHERE table = 'test_wide' AND database = currentDatabase() AND name = 'idx_minmax'; + +SELECT 'Check behavior with DROP'; + +ALTER TABLE test_compact MODIFY SETTING alter_column_secondary_index_mode = 'drop'; +ALTER TABLE test_wide MODIFY SETTING alter_column_secondary_index_mode = 'drop'; + +-- ALTER TABLE MODIFY COLUMN must work now and the indexes must be dropped +ALTER TABLE test_compact MODIFY COLUMN b String; +ALTER TABLE test_wide MODIFY COLUMN b String; + +SELECT table, name, 'Emtpy : ' || if(marks_bytes == 0, 'true', 'false') FROM system.data_skipping_indices WHERE table = 'test_compact' AND database = currentDatabase() AND name = 'idx_minmax'; +SELECT table, name, 'Emtpy : ' || if(marks_bytes == 0, 'true', 'false') FROM system.data_skipping_indices WHERE table = 'test_wide' AND database = currentDatabase() AND name = 'idx_minmax'; + +-- Check that changing the column default value still works +ALTER TABLE test_compact MODIFY COLUMN b DEFAULT '321'; +ALTER TABLE test_wide MODIFY COLUMN b DEFAULT '321'; + +SELECT 'Check REBUILD after DROP (parts without index)'; + +ALTER TABLE test_compact MODIFY SETTING alter_column_secondary_index_mode = 'rebuild'; +ALTER TABLE test_wide MODIFY SETTING alter_column_secondary_index_mode = 'rebuild'; + +-- Expect that ALTER TABLE MODIFY COLUMN works and the indexes must be rebuild +ALTER TABLE test_compact MODIFY COLUMN b Int32; +ALTER TABLE test_wide MODIFY COLUMN b Int32; + +SELECT table, name, 'Emtpy : ' || if(marks_bytes == 0, 'true', 'false') FROM system.data_skipping_indices WHERE table = 'test_compact' AND database = currentDatabase() AND name = 'idx_minmax'; +SELECT table, name, 'Emtpy : ' || if(marks_bytes == 0, 'true', 'false') FROM system.data_skipping_indices WHERE table = 'test_wide' AND database = currentDatabase() AND name = 'idx_minmax'; + +DROP TABLE test_compact; +DROP TABLE test_wide; \ No newline at end of file diff --git a/parser/testdata/03702_alter_column_update_and_delete_secondary_index_general/ast.json b/parser/testdata/03702_alter_column_update_and_delete_secondary_index_general/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03702_alter_column_update_and_delete_secondary_index_general/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03702_alter_column_update_and_delete_secondary_index_general/metadata.json b/parser/testdata/03702_alter_column_update_and_delete_secondary_index_general/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03702_alter_column_update_and_delete_secondary_index_general/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03702_alter_column_update_and_delete_secondary_index_general/query.sql b/parser/testdata/03702_alter_column_update_and_delete_secondary_index_general/query.sql new file mode 100644 index 000000000..a046d2023 --- /dev/null +++ b/parser/testdata/03702_alter_column_update_and_delete_secondary_index_general/query.sql @@ -0,0 +1,158 @@ +-- Tests the behavior of MergeTree setting 'alter_column_secondary_index_mode' with tables in compact and wide format +-- for ALTER TABLE {} DELETE and ALTER TABLE {} UPDATE + +SET apply_mutations_on_fly = 0; +SET mutations_sync = 1; +SET alter_sync = 1; + +DROP TABLE IF EXISTS test_compact; +DROP TABLE IF EXISTS test_wide; + +CREATE TABLE test_compact ( + a Int32, + b Int32, + c Int32, + INDEX idx_minmax b TYPE minmax +) +ENGINE = MergeTree ORDER BY a +SETTINGS min_bytes_for_wide_part = 999999999; + +CREATE TABLE test_wide ( + a Int32, + b Int32, + c Int32, + INDEX idx_minmax b TYPE minmax +) +ENGINE = MergeTree ORDER BY a +SETTINGS min_bytes_for_wide_part = 0; + +INSERT INTO test_compact VALUES (1, 1, 4); +INSERT INTO test_wide VALUES (1, 1, 4); + +SELECT '======== Check behavior with THROW ========'; + +ALTER TABLE test_compact MODIFY SETTING alter_column_secondary_index_mode = 'throw'; +ALTER TABLE test_wide MODIFY SETTING alter_column_secondary_index_mode = 'throw'; + +-- ALTER TABLE UPDATE are expected to throw +ALTER TABLE test_compact UPDATE b = 3 WHERE b = 1; -- { serverError SUPPORT_IS_DISABLED } +ALTER TABLE test_compact DELETE WHERE b = 1; -- { serverError SUPPORT_IS_DISABLED } +ALTER TABLE test_wide UPDATE b = 3 WHERE b = 1; -- { serverError SUPPORT_IS_DISABLED } +ALTER TABLE test_wide DELETE WHERE b = 1; -- { serverError SUPPORT_IS_DISABLED } + +SELECT '======== Check behavior with COMPATIBILITY ========'; -- Same as REBUILD + +ALTER TABLE test_compact MODIFY SETTING alter_column_secondary_index_mode = 'compatibility'; +ALTER TABLE test_wide MODIFY SETTING alter_column_secondary_index_mode = 'compatibility'; + +INSERT INTO test_compact VALUES (1, 100, 6); +OPTIMIZE TABLE test_compact FINAL; +INSERT INTO test_wide VALUES (1, 100, 6); +OPTIMIZE TABLE test_wide FINAL; +SELECT 'COMPACT BEFORE', * from test_compact; +SELECT 'WIDE BEFORE', * from test_wide; + +ALTER TABLE test_compact UPDATE b = 3 WHERE b = 1; +SELECT 'COMPACT AFTER UPDATE', * from test_compact; +ALTER TABLE test_wide UPDATE b = 3 WHERE b = 1; +SELECT 'WIDE AFTER UPDATE', * from test_wide; + +SELECT table, name, 'Emtpy : ' || if(marks_bytes == 0, 'true', 'false') FROM system.data_skipping_indices WHERE table = 'test_compact' AND database = currentDatabase() AND name = 'idx_minmax'; +SELECT table, name, 'Emtpy : ' || if(marks_bytes == 0, 'true', 'false') FROM system.data_skipping_indices WHERE table = 'test_wide' AND database = currentDatabase() AND name = 'idx_minmax'; + +ALTER TABLE test_compact DELETE WHERE b = 100; +SELECT 'COMPACT AFTER DELETE', * from test_compact; +ALTER TABLE test_wide DELETE WHERE b = 100; +SELECT 'WIDE AFTER DELETE', * from test_wide; + +SELECT table, name, 'Emtpy : ' || if(marks_bytes == 0, 'true', 'false') FROM system.data_skipping_indices WHERE table = 'test_compact' AND database = currentDatabase() AND name = 'idx_minmax'; +SELECT table, name, 'Emtpy : ' || if(marks_bytes == 0, 'true', 'false') FROM system.data_skipping_indices WHERE table = 'test_wide' AND database = currentDatabase() AND name = 'idx_minmax'; + + +SELECT '======== Check behavior with REBUILD ========'; + +ALTER TABLE test_compact MODIFY SETTING alter_column_secondary_index_mode = 'compatibility'; +ALTER TABLE test_wide MODIFY SETTING alter_column_secondary_index_mode = 'compatibility'; + +INSERT INTO test_compact VALUES (1, 100, 6); +OPTIMIZE TABLE test_compact FINAL; +INSERT INTO test_wide VALUES (1, 100, 6); +OPTIMIZE TABLE test_wide FINAL; +SELECT 'COMPACT BEFORE', * from test_compact; +SELECT 'WIDE BEFORE', * from test_wide; + +ALTER TABLE test_compact UPDATE b = 5 WHERE b = 3; +SELECT 'COMPACT AFTER UPDATE', * from test_compact; +ALTER TABLE test_wide UPDATE b = 5 WHERE b = 3; +SELECT 'WIDE AFTER UPDATE', * from test_wide; + +SELECT table, name, 'Emtpy : ' || if(marks_bytes == 0, 'true', 'false') FROM system.data_skipping_indices WHERE table = 'test_compact' AND database = currentDatabase() AND name = 'idx_minmax'; +SELECT table, name, 'Emtpy : ' || if(marks_bytes == 0, 'true', 'false') FROM system.data_skipping_indices WHERE table = 'test_wide' AND database = currentDatabase() AND name = 'idx_minmax'; + +ALTER TABLE test_compact DELETE WHERE b = 100; +SELECT 'COMPACT AFTER DELETE', * from test_compact; +ALTER TABLE test_wide DELETE WHERE b = 100; +SELECT 'WIDE AFTER DELETE', * from test_wide; + +SELECT table, name, 'Emtpy : ' || if(marks_bytes == 0, 'true', 'false') FROM system.data_skipping_indices WHERE table = 'test_compact' AND database = currentDatabase() AND name = 'idx_minmax'; +SELECT table, name, 'Emtpy : ' || if(marks_bytes == 0, 'true', 'false') FROM system.data_skipping_indices WHERE table = 'test_wide' AND database = currentDatabase() AND name = 'idx_minmax'; + + +SELECT '======== Check behavior with DROP ========'; + +ALTER TABLE test_compact MODIFY SETTING alter_column_secondary_index_mode = 'drop'; +ALTER TABLE test_wide MODIFY SETTING alter_column_secondary_index_mode = 'drop'; + +INSERT INTO test_compact VALUES (1, 100, 6); +OPTIMIZE TABLE test_compact FINAL; +INSERT INTO test_wide VALUES (1, 100, 6); +OPTIMIZE TABLE test_wide FINAL; + +SELECT 'COMPACT BEFORE', * from test_compact; +SELECT 'WIDE BEFORE', * from test_wide; + +ALTER TABLE test_compact UPDATE b = 7 WHERE b = 5; +SELECT 'COMPACT AFTER UPDATE', * from test_compact; +ALTER TABLE test_wide UPDATE b = 7 WHERE b = 5; +SELECT 'WIDE AFTER UPDATE', * from test_wide; + +-- Indices should have been dropped +SELECT table, name, 'Emtpy : ' || if(marks_bytes == 0, 'true', 'false') FROM system.data_skipping_indices WHERE table = 'test_compact' AND database = currentDatabase() AND name = 'idx_minmax'; +SELECT table, name, 'Emtpy : ' || if(marks_bytes == 0, 'true', 'false') FROM system.data_skipping_indices WHERE table = 'test_wide' AND database = currentDatabase() AND name = 'idx_minmax'; + +-- Regenerate them +OPTIMIZE TABLE test_compact FINAL; +OPTIMIZE TABLE test_wide FINAL; + +-- Check that indices are back +SELECT 'We are back #1'; +SELECT table, name, 'Emtpy : ' || if(marks_bytes == 0, 'true', 'false') FROM system.data_skipping_indices WHERE table = 'test_compact' AND database = currentDatabase() AND name = 'idx_minmax'; +SELECT table, name, 'Emtpy : ' || if(marks_bytes == 0, 'true', 'false') FROM system.data_skipping_indices WHERE table = 'test_wide' AND database = currentDatabase() AND name = 'idx_minmax'; + +-- Alter of unrelated columns should only drop indices if needed (compact format is dropped, but wide is kept) +ALTER TABLE test_compact UPDATE c = 7 WHERE c = 6; +SELECT 'COMPACT AFTER UPDATE', * from test_compact; +ALTER TABLE test_wide UPDATE c = 7 WHERE c = 6; +SELECT 'WIDE AFTER UPDATE', * from test_wide; + +-- Indices should have been dropped +SELECT table, name, 'Emtpy : ' || if(marks_bytes == 0, 'true', 'false') FROM system.data_skipping_indices WHERE table = 'test_compact' AND database = currentDatabase() AND name = 'idx_minmax'; +SELECT table, name, 'Emtpy : ' || if(marks_bytes == 0, 'true', 'false') FROM system.data_skipping_indices WHERE table = 'test_wide' AND database = currentDatabase() AND name = 'idx_minmax'; + +-- Regenerate them +OPTIMIZE TABLE test_compact FINAL; +OPTIMIZE TABLE test_wide FINAL; +-- Check that indices are back +SELECT 'We are back #2'; +SELECT table, name, 'Emtpy : ' || if(marks_bytes == 0, 'true', 'false') FROM system.data_skipping_indices WHERE table = 'test_compact' AND database = currentDatabase() AND name = 'idx_minmax'; +SELECT table, name, 'Emtpy : ' || if(marks_bytes == 0, 'true', 'false') FROM system.data_skipping_indices WHERE table = 'test_wide' AND database = currentDatabase() AND name = 'idx_minmax'; + +-- Delete over any column should drop indices since it requires changes +ALTER TABLE test_compact DELETE WHERE c = 7; +SELECT 'COMPACT AFTER DELETE', * from test_compact; +ALTER TABLE test_wide DELETE WHERE c = 7; +SELECT 'WIDE AFTER UPDATE', * from test_wide; + +-- Indices should have been dropped +SELECT table, name, 'Emtpy : ' || if(marks_bytes == 0, 'true', 'false') FROM system.data_skipping_indices WHERE table = 'test_compact' AND database = currentDatabase() AND name = 'idx_minmax'; +SELECT table, name, 'Emtpy : ' || if(marks_bytes == 0, 'true', 'false') FROM system.data_skipping_indices WHERE table = 'test_wide' AND database = currentDatabase() AND name = 'idx_minmax'; diff --git a/parser/testdata/03702_encode_decode_memory_usage/ast.json b/parser/testdata/03702_encode_decode_memory_usage/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03702_encode_decode_memory_usage/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03702_encode_decode_memory_usage/metadata.json b/parser/testdata/03702_encode_decode_memory_usage/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03702_encode_decode_memory_usage/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03702_encode_decode_memory_usage/query.sql b/parser/testdata/03702_encode_decode_memory_usage/query.sql new file mode 100644 index 000000000..1c7eaa5d6 --- /dev/null +++ b/parser/testdata/03702_encode_decode_memory_usage/query.sql @@ -0,0 +1,5 @@ +-- Tags: no-fasttest + +SELECT base32Decode(s) FROM (SELECT base32Encode(randomString(100)) AS s FROM numbers(100000)) FORMAT Null; +SELECT base58Decode(s) FROM (SELECT base58Encode(randomString(100)) AS s FROM numbers(100000)) FORMAT Null; +SELECT base64Decode(s) FROM (SELECT base64Encode(randomString(100)) AS s FROM numbers(100000)) FORMAT Null; \ No newline at end of file diff --git a/parser/testdata/03702_function_dict_get_keys_basic/ast.json b/parser/testdata/03702_function_dict_get_keys_basic/ast.json new file mode 100644 index 000000000..73ef9f8b9 --- /dev/null +++ b/parser/testdata/03702_function_dict_get_keys_basic/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'Negative'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001267205, + "rows_read": 5, + "bytes_read": 179 + } +} diff --git a/parser/testdata/03702_function_dict_get_keys_basic/metadata.json b/parser/testdata/03702_function_dict_get_keys_basic/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03702_function_dict_get_keys_basic/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03702_function_dict_get_keys_basic/query.sql b/parser/testdata/03702_function_dict_get_keys_basic/query.sql new file mode 100644 index 000000000..665043b52 --- /dev/null +++ b/parser/testdata/03702_function_dict_get_keys_basic/query.sql @@ -0,0 +1,407 @@ +SELECT 'Negative'; + +DROP DICTIONARY IF EXISTS dict_neg; +DROP TABLE IF EXISTS dict_src_neg; + +CREATE TABLE dict_src_neg +( + id UInt64, + u64 UInt64, + i32n Nullable(Int32) +) ENGINE = Memory; + +INSERT INTO dict_src_neg VALUES + (1, 7, NULL), + (2, 42, -10); + +CREATE DICTIONARY dict_neg +( + id UInt64, + u64 UInt64, + i32n Nullable(Int32) +) + +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE 'dict_src_neg')) +LIFETIME(0) +LAYOUT(HASHED()); + +SELECT dictGetKeys(toString(number), 'u64', toUInt64(7)) FROM numbers(1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT dictGetKeys('dict_neg', toString(number), toUInt64(7)) FROM numbers(1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT dictGetKeys('dict_neg', 'no_such_attr', toUInt64(7)); -- { serverError ILLEGAL_COLUMN } + +SELECT dictGetKeys('dict_neg', 'u64'); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +SELECT dictGetKeys('dict_neg', 'u64', toUInt64(7), 1); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +SELECT dictGetKeys('dict_neg', 'i32n', tuple(number)) FROM numbers(3); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT dictGetKeys('non_a_dict_name', 'i32n', tuple(number)) FROM numbers(3); -- { serverError BAD_ARGUMENTS } + +SELECT dictGetKeys('dict_neg', 'not_a_attr_col', tuple(number)) FROM numbers(3); -- { serverError ILLEGAL_COLUMN } + +SELECT 'Simple Key'; + +DROP DICTIONARY IF EXISTS dict_simple_kv; +DROP TABLE IF EXISTS dict_src_simple_kv; + +CREATE TABLE dict_src_simple_kv +( + id UInt64, + attr Int32 +) ENGINE = Memory; + +INSERT INTO dict_src_simple_kv VALUES + (1, 10), + (2, 10), + (3, 20); + +CREATE DICTIONARY dict_simple_kv +( + id UInt64, + attr Int32 +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE 'dict_src_simple_kv')) +LIFETIME(0) +LAYOUT(HASHED()); + +SELECT dictGetKeys('dict_simple_kv', 'attr', toUInt32(10)); + +SELECT toTypeName(dictGetKeys('dict_simple_kv', 'attr', toUInt32(10))); + +SELECT 'Complex Key with 2 elements'; + +DROP DICTIONARY IF EXISTS dict_complex2_kv; +DROP TABLE IF EXISTS dict_src_complex2_kv; + +CREATE TABLE dict_src_complex2_kv +( + k1 UInt64, + k2 String, + attr Int32 +) ENGINE = Memory; + +INSERT INTO dict_src_complex2_kv VALUES + (1, 'a', 10), + (2, 'b', 10), + (3, 'c', 20); + +CREATE DICTIONARY dict_complex2_kv +( + k1 UInt64, + k2 String, + attr Int32 +) +PRIMARY KEY k1, k2 +SOURCE(CLICKHOUSE(TABLE 'dict_src_complex2_kv')) +LIFETIME(0) +LAYOUT(COMPLEX_KEY_HASHED()); + +SELECT dictGetKeys('dict_complex2_kv', 'attr', 10); +SELECT toTypeName(dictGetKeys('dict_complex2_kv', 'attr', 10)); + + +SELECT 'Complex Key with 1 element'; + +DROP DICTIONARY IF EXISTS dict_complex1_kv; +DROP TABLE IF EXISTS dict_src_complex1_kv; + +CREATE TABLE dict_src_complex1_kv +( + k1 UInt64, + attr Int32 +) ENGINE = Memory; + +INSERT INTO dict_src_complex1_kv VALUES + (10, 1), + (20, 1); + +CREATE DICTIONARY dict_complex1_kv +( + k1 UInt64, + attr Int32 +) +PRIMARY KEY k1 +SOURCE(CLICKHOUSE(TABLE 'dict_src_complex1_kv')) +LIFETIME(0) +LAYOUT(COMPLEX_KEY_HASHED()); + +SELECT dictGetKeys('dict_complex1_kv', 'attr', 1); +SELECT toTypeName(dictGetKeys('dict_complex1_kv', 'attr', 1)); + +SELECT 'Complex Key with many elements'; + +DROP DICTIONARY IF EXISTS dict_complex_wide_kv; +DROP TABLE IF EXISTS dict_src_complex_wide_kv; + +CREATE TABLE dict_src_complex_wide_kv +( + a1 UInt64, + a2 Int64, + a3 String, + a4 Date, + a5 UUID, + a6 IPv4, + a7 IPv6, + a8 DateTime64(3), + attr Int32 +) ENGINE = Memory; + +INSERT INTO dict_src_complex_wide_kv VALUES + (1, -1, 'x', toDate('2000-01-02'), toUUID('01234567-89ab-cdef-0123-456789abcdef'), + toIPv4('1.2.3.4'), toIPv6('2001:db8::1'), toDateTime64('2025-10-10 03:04:05', 3), 10), + (2, -2, 'y', toDate('2000-01-03'), toUUID('89abcdef-0123-4567-89ab-cdef01234567'), + toIPv4('5.6.7.8'), toIPv6('2001:db8::2'), toDateTime64('2025-10-10 03:04:05', 3), 20); + +CREATE DICTIONARY dict_complex_wide_kv +( + a1 UInt64, + a2 Int64, + a3 String, + a4 Date, + a5 UUID, + a6 IPv4, + a7 IPv6, + a8 DateTime64(3), + attr Int32 +) + +PRIMARY KEY a1, a2, a3, a4, a5, a6, a7, a8 +SOURCE(CLICKHOUSE(TABLE 'dict_src_complex_wide_kv')) +LIFETIME(0) +LAYOUT(COMPLEX_KEY_HASHED()); + +SELECT dictGetKeys('dict_complex_wide_kv', 'attr', 10); +SELECT toTypeName(dictGetKeys('dict_complex_wide_kv', 'attr', 10)); + + +SELECT 'Attribute Types'; + +DROP DICTIONARY IF EXISTS dict_types; +DROP TABLE IF EXISTS dict_src_types; + +CREATE TABLE dict_src_types +( + id UInt64, + i8 Int8, + u8 UInt8, + i64 Int64, + u64 UInt64, + f32 Float32, + f64 Float64, + dec32 Decimal32(3), + dec64 Decimal64(3), + d Date, + dt DateTime, + dt64 DateTime64(3), + uuid UUID, + ip4 IPv4, + ip6 IPv6, + s String, + arr_u64 Array(UInt64), + arr_nested Array(Array(UInt8)), + n_i32 Nullable(Int32), + n_str Nullable(String) +) ENGINE = Memory; + +INSERT INTO dict_src_types VALUES +(1, + toInt8(-128), toUInt8(0), toInt64(-9223372036854775808), toUInt64(0), + toFloat32(-1.5), toFloat64(-1.5), + toDecimal32(-123.456, 3), toDecimal64(-123.456, 3), + toDate('2025-01-01'), toDateTime('2025-01-01 00:00:00'), toDateTime64('2025-01-01 00:00:00', 3), + toUUID('00000000-0000-0000-0000-000000000000'), + toIPv4('0.0.0.0'), toIPv6('::'), + '', [], [], NULL, NULL), +(2, + toInt8(0), toUInt8(127), toInt64(0), toUInt64(42), + toFloat32(1.5), toFloat64(42.25), + toDecimal32(1.234, 3), toDecimal64(42.500, 3), + toDate('2000-01-02'), toDateTime('2000-01-02 03:04:05'), toDateTime64('2000-01-02 03:04:05', 3), + toUUID('01234567-89ab-cdef-0123-456789abcdef'), + toIPv4('1.2.3.4'), toIPv6('2001:db8::1'), + 'alpha', [1,2], [[1,2],[3]], 0, 'x'), +(3, + toInt8(127), toUInt8(255), toInt64(9223372036854775807), toUInt64(18446744073709551615), + CAST('inf' AS Float32), CAST('nan' AS Float64), + toDecimal32(123.999, 3), toDecimal64(9999999.999, 3), + toDate('2106-02-07'), toDateTime('2106-02-07 06:28:15'), toDateTime64('2106-02-07 06:28:15', 3), + toUUID('89abcdef-0123-4567-89ab-cdef01234567'), + toIPv4('255.255.255.255'), toIPv6('ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff'), + 'beta', [9,8,7], [[4],[5,6]], NULL, 'y'); + +CREATE DICTIONARY dict_types +( + id UInt64, + i8 Int8, + u8 UInt8, + i64 Int64, + u64 UInt64, + f32 Float32, + f64 Float64, + dec32 Decimal32(3), + dec64 Decimal64(3), + d Date, + dt DateTime, + dt64 DateTime64(3), + uuid UUID, + ip4 IPv4, + ip6 IPv6, + s String, + arr_u64 Array(UInt64), + arr_nested Array(Array(UInt8)), + n_i32 Nullable(Int32), + n_str Nullable(String) +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE 'dict_src_types')) +LIFETIME(0) +LAYOUT(HASHED()); + +SELECT 'Small Integer'; +SELECT dictGetKeys('dict_types', 'i8', '-128'); +SELECT dictGetKeys('dict_types', 'i8', '0'); +SELECT dictGetKeys('dict_types', 'i8', '127'); + +SELECT 'Unsigned Integer'; +SELECT dictGetKeys('dict_types', 'u64', '0'); +SELECT dictGetKeys('dict_types', 'u64', '18446744073709551615'); + +SELECT 'Floating Point'; +SELECT dictGetKeys('dict_types', 'f32', '1.5'); +SELECT dictGetKeys('dict_types', 'f32', 'inf'); +SELECT dictGetKeys('dict_types', 'f32', inf); +SELECT dictGetKeys('dict_types', 'f32', '-inf'); +SELECT dictGetKeys('dict_types', 'f64', '42.25'); +SELECT dictGetKeys('dict_types', 'f64', 'nan'); +SELECT dictGetKeys('dict_types', 'f64', nan); + +SELECT 'Array'; +SELECT dictGetKeys('dict_types', 'arr_u64', []); +SELECT dictGetKeys('dict_types', 'arr_u64', [1,2]); +SELECT dictGetKeys('dict_types', 'arr_nested', [[1,2],[3]]); + +SELECT dictGetKeys('dict_types', 'arr_u64', '[]'); +SELECT dictGetKeys('dict_types', 'arr_u64', '[1,2]'); +SELECT dictGetKeys('dict_types', 'arr_nested', '[[1,2],[3]]'); + +SELECT 'Nullable'; +SELECT dictGetKeys('dict_types', 'n_i32', NULL); +SELECT dictGetKeys('dict_types', 'n_i32', 0); +SELECT dictGetKeys('dict_types', 'n_str', NULL); +SELECT dictGetKeys('dict_types', 'n_str', 'x'); + +SELECT 'Dates, UUID, IPs, String, Decimal'; +SELECT dictGetKeys('dict_types', 'd', '2000-01-02'); +SELECT dictGetKeys('dict_types', 'dt', '2000-01-02 03:04:05'); +SELECT dictGetKeys('dict_types', 'dt64', toDateTime64('1970-01-01 00:00:00', 3)); +SELECT dictGetKeys('dict_types', 'uuid', '01234567-89ab-cdef-0123-456789abcdef'); +SELECT dictGetKeys('dict_types', 'ip4', '1.2.3.4'); +SELECT dictGetKeys('dict_types', 'ip6', '2001:db8::1'); +SELECT dictGetKeys('dict_types', 's', ''); +SELECT dictGetKeys('dict_types', 'dec32', '1.234'); +SELECT dictGetKeys('dict_types', 'dec64', '42.500'); + + +SELECT 'Value Types'; + +DROP DICTIONARY IF EXISTS dict_valexpr; +DROP TABLE IF EXISTS dict_src_valexpr; + +CREATE TABLE dict_src_valexpr +( + id UInt64, + s String, + i32n Nullable(Int32), + u64 UInt64 +) ENGINE = Memory; + +INSERT INTO dict_src_valexpr VALUES + (1, 'alpha', 10, 42), + (2, 'beta', 10, 100), + (3, 'gamma', -5, 42), + (4, 'alpha', NULL, 7); + +CREATE DICTIONARY dict_valexpr +( + id UInt64, + s String, + i32n Nullable(Int32), + u64 UInt64 +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE 'dict_src_valexpr')) +LIFETIME(0) +LAYOUT(HASHED()); + +SELECT 'Constant value'; +SELECT dictGetKeys('dict_valexpr', 's', 'alpha'); + +SELECT 'Non-const vector'; +SELECT dictGetKeys('dict_valexpr', 's', v) +FROM (SELECT arrayJoin(['alpha','beta','zzz']) AS v); + +SELECT 'LowCardinality(String) constant'; +SELECT dictGetKeys('dict_valexpr', 's', CAST('alpha' AS LowCardinality(String))); + +SELECT 'LowCardinality(String) vector'; +SELECT dictGetKeys('dict_valexpr', 's', CAST(v AS LowCardinality(String))) +FROM (SELECT arrayJoin(['alpha','beta','zzz']) AS v); + +SELECT 'Nullable constant NULL and non-NULL'; +SELECT dictGetKeys('dict_valexpr', 'i32n', CAST(NULL AS Nullable(Int32))); +SELECT dictGetKeys('dict_valexpr', 'i32n', CAST(10 AS Nullable(Int32))); +SELECT dictGetKeys('dict_valexpr', 'i32n', CAST('10' AS Nullable(String))); + +SELECT dictGetKeys('dict_valexpr', 'u64', CAST(NULL AS Nullable(Int32))); -- { serverError CANNOT_INSERT_NULL_IN_ORDINARY_COLUMN } +SELECT dictGetKeys('dict_valexpr', 'u64', CAST(-42 AS Nullable(Int32))); -- { serverError CANNOT_CONVERT_TYPE } +SELECT dictGetKeys('dict_valexpr', 'u64', CAST('42' AS Nullable(String))); + +SELECT 'Nullable vector with NULLs interleaved'; +SELECT dictGetKeys('dict_valexpr', 'i32n', x) +FROM (SELECT arrayJoin([CAST(NULL AS Nullable(Int32)), CAST(10 AS Nullable(Int32)), CAST(NULL AS Nullable(Int32)), CAST(-5 AS Nullable(Int32))]) AS x); + +SELECT 'Type-mismatch constant convertible (String -> UInt64)'; +SELECT dictGetKeys('dict_valexpr', 'u64', '42'); + + +SELECT 'Match Patterns'; + +DROP DICTIONARY IF EXISTS dict_match; +DROP TABLE IF EXISTS dict_src_match; + +CREATE TABLE dict_src_match +( + id UInt64, + grp String +) ENGINE = Memory; + +INSERT INTO dict_src_match VALUES + (1, 'A'), (2, 'A'), (3, 'B'), (4, 'C'); + +CREATE DICTIONARY dict_match +( + id UInt64, + grp String +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE 'dict_src_match')) +LIFETIME(0) +LAYOUT(HASHED()); + +SELECT 'No matches at all (const and vector)'; +SELECT dictGetKeys('dict_match', 'grp', 'Z'); +SELECT dictGetKeys('dict_match', 'grp', v) +FROM (SELECT arrayJoin(['Z','Y']) AS v); + +SELECT 'One match per input row'; +SELECT dictGetKeys('dict_match', 'grp', v) +FROM (SELECT arrayJoin(['B','C']) AS v); + +SELECT 'Many matches per row (shared attribute)'; +SELECT dictGetKeys('dict_match', 'grp', 'A'); +SELECT dictGetKeys('dict_match', 'grp', v) +FROM (SELECT arrayJoin(['A','B','A']) AS v); diff --git a/parser/testdata/03702_geometry_functions/ast.json b/parser/testdata/03702_geometry_functions/ast.json new file mode 100644 index 000000000..f6bef4d48 --- /dev/null +++ b/parser/testdata/03702_geometry_functions/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001243662, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03702_geometry_functions/metadata.json b/parser/testdata/03702_geometry_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03702_geometry_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03702_geometry_functions/query.sql b/parser/testdata/03702_geometry_functions/query.sql new file mode 100644 index 000000000..39e506465 --- /dev/null +++ b/parser/testdata/03702_geometry_functions/query.sql @@ -0,0 +1,35 @@ +SET allow_suspicious_variant_types = 1; + +DROP TABLE IF EXISTS geo_dst; +DROP TABLE IF EXISTS geo; +DROP TABLE IF EXISTS variant_table; + +CREATE TABLE IF NOT EXISTS geo_dst (id Int32, geom Geometry) ENGINE = Memory(); + +CREATE TABLE IF NOT EXISTS geo (geom String, id Int) ENGINE = Memory(); +INSERT INTO geo VALUES ('POLYGON((30.0107 -15.6462,30.0502 -15.6401,30.09 -15.6294,30.1301 -15.6237,30.1699 -15.6322,30.1956 -15.6491,30.2072 -15.6532,30.2231 -15.6497,30.231 -15.6447,30.2461 -15.6321,30.2549 -15.6289,30.2801 -15.6323,30.2962 -15.639,30.3281 -15.6524,30.3567 -15.6515,30.3963 -15.636,30.3977 -15.7168,30.3993 -15.812,30.4013 -15.9317,30.4026 -16.0012,30.5148 -16.0004,30.5866 -16,30.7497 -15.9989,30.8574 -15.9981,30.9019 -16.0071,30.9422 -16.0345,30.9583 -16.0511,30.9731 -16.062,30.9898 -16.0643,31.012 -16.0549,31.0237 -16.0452,31.0422 -16.0249,31.0569 -16.0176,31.0654 -16.0196,31.0733 -16.0255,31.0809 -16.0259,31.089 -16.0119,31.1141 -15.9969,31.1585 -16.0002,31.26 -16.0235,31.2789 -16.0303,31.2953 -16.0417,31.3096 -16.059,31.3284 -16.0928,31.3409 -16.1067,31.3603 -16.1169,31.3703 -16.1237,31.3746 -16.1329,31.3778 -16.1422,31.384 -16.1488,31.3877 -16.1496,31.3956 -16.1477,31.3996 -16.1473,31.4043 -16.1499,31.4041 -16.1545,31.4027 -16.1594,31.4046 -16.1623,31.4241 -16.1647,31.4457 -16.165,31.4657 -16.1677,31.4806 -16.178,31.5192 -16.1965,31.6861 -16.2072,31.7107 -16.2179,31.7382 -16.2398,31.7988 -16.3037,31.8181 -16.3196,31.8601 -16.3408,31.8719 -16.3504,31.8807 -16.368,31.8856 -16.4063,31.8944 -16.4215,31.9103 -16.4289,32.0141 -16.4449,32.2118 -16.4402,32.2905 -16.4518,32.3937 -16.4918,32.5521 -16.5534,32.6718 -16.5998,32.6831 -16.6099,32.6879 -16.6243,32.6886 -16.6473,32.6987 -16.6868,32.7252 -16.7064,32.7309 -16.7087,32.7313 -16.7088,32.7399 -16.7032,32.7538 -16.6979,32.7693 -16.6955,32.8007 -16.6973,32.862 -16.7105,32.8934 -16.7124,32.9096 -16.7081,32.9396 -16.6898,32.9562 -16.6831,32.9685 -16.6816,32.9616 -16.7103,32.9334 -16.8158,32.9162 -16.8479,32.9005 -16.8678,32.8288 -16.9351,32.8301 -16.9415,32.8868 -17.0382,32.9285 -17.1095,32.9541 -17.1672,32.9678 -17.2289,32.9691 -17.2661,32.9694 -17.2761,32.9732 -17.2979,32.9836 -17.3178,32.9924 -17.3247,33.0147 -17.3367,33.0216 -17.3456,33.0225 -17.3615,33.0163 -17.3772,33.0117 -17.384,32.9974 -17.405,32.9582 -17.4785,32.9517 -17.4862,32.943 -17.4916,32.9366 -17.4983,32.9367 -17.5094,32.9472 -17.5432,32.9517 -17.5514,32.9691 -17.5646,33.0066 -17.581,33.0204 -17.5986,33.0245 -17.6192,33.0206 -17.6385,33.0041 -17.6756,33.0002 -17.7139,33.0032 -17.7577,32.9991 -17.7943,32.9736 -17.8106,32.957 -17.818,32.9461 -17.8347,32.9397 -17.8555,32.9369 -17.875,32.9384 -17.8946,32.9503 -17.9226,32.9521 -17.9402,32.9481 -17.9533,32.9404 -17.96,32.9324 -17.9649,32.9274 -17.9729,32.929 -17.9823,32.9412 -17.9963,32.9403 -18.0048,32.9349 -18.0246,32.9371 -18.0471,32.9723 -18.1503,32.9755 -18.1833,32.9749 -18.1908,32.9659 -18.2122,32.9582 -18.2254,32.9523 -18.233,32.9505 -18.2413,32.955 -18.2563,32.9702 -18.2775,33.0169 -18.3137,33.035 -18.3329,33.0428 -18.352,33.0381 -18.3631,33.0092 -18.3839,32.9882 -18.4132,32.9854 -18.4125,32.9868 -18.4223,32.9995 -18.4367,33.003 -18.4469,32.9964 -18.4671,32.9786 -18.4801,32.9566 -18.4899,32.9371 -18.501,32.9193 -18.51,32.9003 -18.5153,32.8831 -18.5221,32.8707 -18.5358,32.8683 -18.5526,32.8717 -18.5732,32.8845 -18.609,32.9146 -18.6659,32.9223 -18.6932,32.9202 -18.7262,32.9133 -18.753,32.9025 -18.7745,32.8852 -18.7878,32.8589 -18.79,32.8179 -18.787,32.7876 -18.7913,32.6914 -18.8343,32.6899 -18.8432,32.6968 -18.8972,32.7032 -18.9119,32.7158 -18.9198,32.7051 -18.9275,32.6922 -18.9343,32.6825 -18.9427,32.6811 -18.955,32.6886 -18.9773,32.6903 -18.9882,32.6886 -19.001,32.6911 -19.0143,32.699 -19.0222,32.7103 -19.026,32.7239 -19.0266,32.786 -19.0177,32.8034 -19.0196,32.8142 -19.0238,32.82 -19.0283,32.823 -19.0352,32.8253 -19.0468,32.8302 -19.0591,32.8381 -19.0669,32.8475 -19.0739,32.8559 -19.0837,32.8623 -19.1181,32.8332 -19.242,32.8322 -19.2667,32.8287 -19.2846,32.8207 -19.3013,32.8061 -19.3234,32.7688 -19.3636,32.7665 -19.3734,32.7685 -19.4028,32.7622 -19.4434,32.7634 -19.464,32.7739 -19.4759,32.7931 -19.4767,32.8113 -19.4745,32.8254 -19.4792,32.8322 -19.5009,32.8325 -19.5193,32.8254 -19.5916,32.8257 -19.6008,32.8282 -19.6106,32.8296 -19.6237,32.8254 -19.6333,32.8195 -19.642,32.8163 -19.6521,32.8196 -19.6743,32.831 -19.6852,32.8491 -19.6891,32.8722 -19.6902,32.8947 -19.6843,32.9246 -19.6553,32.9432 -19.6493,32.961 -19.6588,32.9624 -19.6791,32.9541 -19.7178,32.9624 -19.7354,32.9791 -19.7514,33.0006 -19.7643,33.0228 -19.7731,33.0328 -19.7842,33.0296 -19.8034,33.0229 -19.8269,33.0213 -19.8681,33.002 -19.927,32.9984 -20.0009,33.0044 -20.0243,33.0073 -20.032,32.9537 -20.0302,32.9401 -20.0415,32.9343 -20.0721,32.9265 -20.0865,32.9107 -20.0911,32.8944 -20.094,32.8853 -20.103,32.8779 -20.1517,32.8729 -20.1672,32.8593 -20.1909,32.8571 -20.2006,32.8583 -20.2075,32.8651 -20.2209,32.8656 -20.2289,32.8584 -20.2595,32.853 -20.2739,32.8452 -20.2867,32.8008 -20.3386,32.7359 -20.4142,32.7044 -20.4718,32.6718 -20.5318,32.6465 -20.558,32.6037 -20.5648,32.5565 -20.5593,32.5131 -20.5646,32.4816 -20.603,32.4711 -20.6455,32.4691 -20.6868,32.4835 -20.7942,32.4972 -20.8981,32.491 -20.9363,32.4677 -20.9802,32.4171 -21.0409,32.3398 -21.1341,32.3453 -21.1428,32.3599 -21.1514,32.3689 -21.163,32.3734 -21.1636,32.3777 -21.1634,32.3806 -21.1655,32.3805 -21.1722,32.3769 -21.1785,32.373 -21.184,32.3717 -21.1879,32.4446 -21.3047,32.4458 -21.309,32.4472 -21.3137,32.4085 -21.2903,32.373 -21.3279,32.3245 -21.3782,32.2722 -21.4325,32.2197 -21.4869,32.1673 -21.5413,32.1148 -21.5956,32.0624 -21.65,32.01 -21.7045,31.9576 -21.7588,31.9052 -21.8132,31.8527 -21.8676,31.8003 -21.922,31.7478 -21.9764,31.6955 -22.0307,31.6431 -22.0852,31.5907 -22.1396,31.5382 -22.1939,31.4858 -22.2483,31.4338 -22.302,31.3687 -22.345,31.2889 -22.3973,31.2656 -22.3655,31.2556 -22.358,31.2457 -22.3575,31.2296 -22.364,31.2215 -22.3649,31.2135 -22.3619,31.1979 -22.3526,31.1907 -22.3506,31.1837 -22.3456,31.1633 -22.3226,31.1526 -22.3164,31.1377 -22.3185,31.1045 -22.3334,31.097 -22.3349,31.0876 -22.3369,31.0703 -22.3337,31.0361 -22.3196,30.9272 -22.2957,30.8671 -22.2896,30.8379 -22.2823,30.8053 -22.2945,30.6939 -22.3028,30.6743 -22.3086,30.6474 -22.3264,30.6324 -22.3307,30.6256 -22.3286,30.6103 -22.3187,30.6011 -22.3164,30.5722 -22.3166,30.5074 -22.3096,30.4885 -22.3102,30.4692 -22.3151,30.4317 -22.3312,30.4127 -22.3369,30.3721 -22.3435,30.335 -22.3447,30.3008 -22.337,30.2693 -22.3164,30.2553 -22.3047,30.2404 -22.2962,30.2217 -22.2909,30.197 -22.2891,30.1527 -22.2948,30.1351 -22.2936,30.1111 -22.2823,30.0826 -22.2629,30.0679 -22.2571,30.0381 -22.2538,30.0359 -22.2506,30.0345 -22.2461,30.0155 -22.227,30.0053 -22.2223,29.9838 -22.2177,29.974 -22.214,29.9467 -22.1983,29.9321 -22.1944,29.896 -22.1914,29.8715 -22.1793,29.8373 -22.1724,29.7792 -22.1364,29.7589 -22.1309,29.6914 -22.1341,29.6796 -22.1383,29.6614 -22.1265,29.6411 -22.1292,29.604 -22.1451,29.5702 -22.142,29.551 -22.146,29.5425 -22.1625,29.5318 -22.1724,29.5069 -22.1701,29.4569 -22.1588,29.4361 -22.1631,29.3995 -22.1822,29.378 -22.1929,29.3633 -22.1923,29.3569 -22.1909,29.3501 -22.1867,29.2736 -22.1251,29.2673 -22.1158,29.2596 -22.0961,29.2541 -22.0871,29.2444 -22.0757,29.2393 -22.0726,29.1449 -22.0753,29.108 -22.0692,29.0708 -22.051,29.0405 -22.0209,29.0216 -21.9828,29.0138 -21.9404,29.0179 -21.8981,29.0289 -21.8766,29.0454 -21.8526,29.0576 -21.8292,29.0553 -21.81,29.0387 -21.7979,28.9987 -21.786,28.9808 -21.7748,28.9519 -21.7683,28.891 -21.7649,28.8609 -21.7574,28.7142 -21.6935,28.6684 -21.68,28.6297 -21.6513,28.6157 -21.6471,28.5859 -21.6444,28.554 -21.6366,28.5429 -21.6383,28.5325 -21.6431,28.4973 -21.6515,28.4814 -21.6574,28.4646 -21.6603,28.4431 -21.6558,28.3618 -21.6163,28.3219 -21.6035,28.2849 -21.5969,28.1657 -21.5952,28.0908 -21.5813,28.0329 -21.5779,28.0166 -21.5729,28.0026 -21.5642,27.9904 -21.5519,27.9847 -21.5429,27.9757 -21.5226,27.9706 -21.5144,27.9637 -21.5105,27.9581 -21.5115,27.9532 -21.5105,27.9493 -21.5008,27.9544 -21.4878,27.9504 -21.482,27.9433 -21.4799,27.9399 -21.478,27.9419 -21.4685,27.9496 -21.4565,27.953 -21.4487,27.9502 -21.4383,27.9205 -21.3812,27.9042 -21.3647,27.8978 -21.3554,27.8962 -21.3479,27.8967 -21.3324,27.8944 -21.3243,27.885 -21.3102,27.8491 -21.2697,27.8236 -21.2317,27.7938 -21.1974,27.7244 -21.1497,27.7092 -21.1345,27.6748 -21.0901,27.6666 -21.0712,27.6668 -21.0538,27.679 -21.0007,27.6804 -20.9796,27.6727 -20.9235,27.6726 -20.9137,27.6751 -20.8913,27.6748 -20.8799,27.676 -20.8667,27.6818 -20.8576,27.689 -20.849,27.6944 -20.8377,27.7096 -20.7567,27.7073 -20.7167,27.6825 -20.6373,27.6904 -20.6015,27.7026 -20.5661,27.7056 -20.5267,27.6981 -20.5091,27.6838 -20.4961,27.666 -20.4891,27.6258 -20.4886,27.5909 -20.4733,27.5341 -20.483,27.4539 -20.4733,27.3407 -20.473,27.306 -20.4774,27.2684 -20.4958,27.284 -20.3515,27.266 -20.2342,27.2149 -20.1105,27.2018 -20.093,27.1837 -20.0823,27.1629 -20.0766,27.1419 -20.0733,27.1297 -20.0729,27.1198 -20.0739,27.1096 -20.0732,27.0973 -20.0689,27.0865 -20.0605,27.0692 -20.0374,27.0601 -20.0276,27.0267 -20.0101,26.9943 -20.0068,26.9611 -20.0072,26.9251 -20.0009,26.8119 -19.9464,26.7745 -19.9398,26.7508 -19.9396,26.731 -19.9359,26.7139 -19.9274,26.6986 -19.9125,26.6848 -19.8945,26.6772 -19.8868,26.6738 -19.8834,26.6594 -19.8757,26.6141 -19.8634,26.5956 -19.8556,26.5819 -19.8421,26.5748 -19.8195,26.5663 -19.8008,26.5493 -19.7841,26.5089 -19.7593,26.4897 -19.7519,26.4503 -19.7433,26.4319 -19.7365,26.4128 -19.7196,26.3852 -19.6791,26.3627 -19.6676,26.3323 -19.6624,26.3244 -19.6591,26.3122 -19.6514,26.3125 -19.6496,26.3191 -19.6463,26.3263 -19.6339,26.3335 -19.613,26.331 -19.605,26.3211 -19.592,26.3132 -19.5842,26.3035 -19.5773,26.2926 -19.5725,26.2391 -19.5715,26.1945 -19.5602,26.1555 -19.5372,26.1303 -19.5011,26.0344 -19.2437,26.0114 -19.1998,25.9811 -19.1618,25.9565 -19.1221,25.9486 -19.1033,25.9449 -19.0792,25.9481 -19.0587,25.9644 -19.0216,25.9678 -19.001,25.9674 -18.9999,25.9407 -18.9213,25.8153 -18.814,25.7795 -18.7388,25.7734 -18.6656,25.7619 -18.6303,25.7369 -18.6087,25.6983 -18.5902,25.6695 -18.566,25.6221 -18.5011,25.6084 -18.4877,25.5744 -18.4657,25.5085 -18.3991,25.4956 -18.3789,25.4905 -18.3655,25.4812 -18.3234,25.4732 -18.3034,25.4409 -18.2532,25.4088 -18.176,25.3875 -18.139,25.3574 -18.1158,25.3234 -18.0966,25.2964 -18.0686,25.255 -18.0011,25.2261 -17.9319,25.2194 -17.908,25.2194 -17.8798,25.2598 -17.7941,25.2667 -17.8009,25.2854 -17.8093,25.3159 -17.8321,25.3355 -17.8412,25.3453 -17.8426,25.3765 -17.8412,25.4095 -17.853,25.4203 -17.8549,25.4956 -17.8549,25.5007 -17.856,25.5102 -17.8612,25.5165 -17.8623,25.5221 -17.8601,25.5309 -17.851,25.5368 -17.8487,25.604 -17.8362,25.657 -17.8139,25.6814 -17.8115,25.6942 -17.8194,25.7064 -17.8299,25.7438 -17.8394,25.766 -17.8498,25.786 -17.8622,25.7947 -17.8727,25.8044 -17.8882,25.8497 -17.9067,25.8636 -17.9238,25.8475 -17.9294,25.8462 -17.9437,25.8535 -17.96,25.8636 -17.9716,25.9245 -17.999,25.967 -18.0005,25.9785 -17.999,26.0337 -17.9716,26.0406 -17.9785,26.0466 -17.9663,26.0625 -17.9629,26.0812 -17.9624,26.0952 -17.9585,26.0962 -17.9546,26.0942 -17.9419,26.0952 -17.9381,26.1012 -17.9358,26.1186 -17.9316,26.1354 -17.9226,26.1586 -17.9183,26.1675 -17.9136,26.203 -17.8872,26.2119 -17.8828,26.2211 -17.8863,26.2282 -17.8947,26.2339 -17.904,26.2392 -17.9102,26.2483 -17.9134,26.2943 -17.9185,26.3038 -17.9228,26.312 -17.9284,26.3183 -17.9344,26.3255 -17.936,26.3627 -17.9306,26.4086 -17.939,26.4855 -17.9793,26.5271 -17.992,26.5536 -17.9965,26.5702 -18.0029,26.5834 -18.0132,26.5989 -18.03,26.6127 -18.0412,26.6288 -18.0492,26.6857 -18.0668,26.7 -18.0692,26.7119 -18.0658,26.7406 -18.0405,26.7536 -18.033,26.7697 -18.029,26.794 -18.0262,26.8883 -17.9846,26.912 -17.992,26.9487 -17.9689,26.9592 -17.9647,27.0063 -17.9627,27.0213 -17.9585,27.0485 -17.9443,27.0782 -17.917,27.1154 -17.8822,27.149 -17.8425,27.1465 -17.8189,27.1453 -17.7941,27.147 -17.7839,27.1571 -17.7693,27.4221 -17.5048,27.5243 -17.4151,27.5773 -17.3631,27.6045 -17.3128,27.6249 -17.2333,27.6412 -17.1985,27.7773 -17.0012,27.8169 -16.9596,27.8686 -16.9297,28.023 -16.8654,28.1139 -16.8276,28.2125 -16.7486,28.2801 -16.7065,28.6433 -16.5688,28.6907 -16.5603,28.7188 -16.5603,28.7328 -16.5581,28.7414 -16.5507,28.7611 -16.5323,28.7693 -16.5152,28.8089 -16.4863,28.8225 -16.4708,28.8291 -16.4346,28.8331 -16.4264,28.8572 -16.3882,28.857 -16.3655,28.8405 -16.3236,28.8368 -16.3063,28.8403 -16.2847,28.8642 -16.2312,28.8471 -16.2027,28.8525 -16.1628,28.8654 -16.1212,28.871 -16.0872,28.8685 -16.0822,28.8638 -16.0766,28.8593 -16.0696,28.8572 -16.0605,28.8603 -16.0494,28.8741 -16.0289,28.8772 -16.022,28.8989 -15.9955,28.9324 -15.9637,28.9469 -15.9572,28.9513 -15.9553,28.9728 -15.9514,29.0181 -15.9506,29.0423 -15.9463,29.0551 -15.9344,29.0763 -15.8954,29.0862 -15.8846,29.1022 -15.8709,29.1217 -15.8593,29.1419 -15.8545,29.151 -15.8488,29.1863 -15.8128,29.407 -15.7142,29.4221 -15.711,29.5085 -15.7036,29.5262 -15.6928,29.5634 -15.6621,29.5872 -15.6557,29.6086 -15.6584,29.628 -15.6636,29.6485 -15.6666,29.6728 -15.6633,29.73 -15.6447,29.7733 -15.6381,29.8143 -15.6197,29.8373 -15.6148,29.8818 -15.6188,29.9675 -15.6415,30.0107 -15.6462))', 1); +INSERT INTO geo VALUES ('POINT(0 0)', 2); +INSERT INTO geo VALUES ('MULTIPOLYGON(((1 0,10 0,10 10,0 10,1 0),(4 4,5 4,5 5,4 5,4 4)),((-10 -10,-10 -9,-9 10,-10 -10)))', 3); +INSERT INTO geo VALUES ('LINESTRING(1 0,10 0,10 10,0 10,1 0)', 4); +INSERT INTO geo VALUES ('MULTILINESTRING((1 0,10 0,10 10,0 10,1 0),(4 4,5 4,5 5,4 5,4 4))', 5); +INSERT INTO geo_dst SELECT id, readWkt(geom) FROM geo ORDER BY id; +INSERT INTO geo_dst VALUES (6, NULL); + +SELECT perimeterCartesian(geom) FROM geo_dst ORDER BY id; +SELECT areaCartesian(geom) FROM geo_dst ORDER BY id; + +CREATE TABLE variant_table +( + id Int32, + data Variant(UInt64, String, Array(String), Tuple(String, UInt32)) +) +ENGINE = Memory(); + +INSERT INTO variant_table VALUES (1, 123); + +SELECT perimeterCartesian(data) FROM variant_table; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT areaCartesian(data) FROM variant_table; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT perimeterCartesian(id, data) FROM variant_table; -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT areaCartesian(id, data) FROM variant_table; -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +SELECT perimeterCartesian(id) FROM variant_table; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } +SELECT areaCartesian(id) FROM variant_table; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } diff --git a/parser/testdata/03702_inject_random_orderby_doesnt_change_limit/ast.json b/parser/testdata/03702_inject_random_orderby_doesnt_change_limit/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03702_inject_random_orderby_doesnt_change_limit/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03702_inject_random_orderby_doesnt_change_limit/metadata.json b/parser/testdata/03702_inject_random_orderby_doesnt_change_limit/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03702_inject_random_orderby_doesnt_change_limit/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03702_inject_random_orderby_doesnt_change_limit/query.sql b/parser/testdata/03702_inject_random_orderby_doesnt_change_limit/query.sql new file mode 100644 index 000000000..f3b7df7fb --- /dev/null +++ b/parser/testdata/03702_inject_random_orderby_doesnt_change_limit/query.sql @@ -0,0 +1,17 @@ +-- A test for Bug 88496 + +-- The setting is disabled by default, enable it for the test. +SET inject_random_order_for_select_without_order_by = 1; + +-- Works only with enabled analyzer +SET enable_analyzer = 1; + +-- Expect that these queries don't time out + +SELECT number +FROM system.numbers +LIMIT 1; + +SELECT number FROM system.numbers LIMIT 1 +UNION ALL +SELECT number FROM system.numbers LIMIT 1; diff --git a/parser/testdata/03702_json_datetime_format_settings/ast.json b/parser/testdata/03702_json_datetime_format_settings/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03702_json_datetime_format_settings/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03702_json_datetime_format_settings/metadata.json b/parser/testdata/03702_json_datetime_format_settings/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03702_json_datetime_format_settings/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03702_json_datetime_format_settings/query.sql b/parser/testdata/03702_json_datetime_format_settings/query.sql new file mode 100644 index 000000000..27adc0c32 --- /dev/null +++ b/parser/testdata/03702_json_datetime_format_settings/query.sql @@ -0,0 +1,14 @@ +-- Test that format settings are respected during internal CAST for JSON type +SET enable_json_type = 1; + +-- Direct CAST should respect date_time_input_format setting +SELECT '{"d" : "2024 April 4"}'::JSON AS json, JSONAllPathsWithTypes(json) SETTINGS date_time_input_format = 'best_effort'; + +-- INSERT SELECT should also respect date_time_input_format setting +DROP TABLE IF EXISTS test_json_datetime; +CREATE TABLE test_json_datetime (json JSON) ENGINE = Memory; + +INSERT INTO test_json_datetime SELECT '{"a" : "2024 April 4"}' SETTINGS date_time_input_format = 'best_effort'; +SELECT JSONAllPathsWithTypes(json) FROM test_json_datetime; + +DROP TABLE test_json_datetime; diff --git a/parser/testdata/03702_optimize_inverse_dictionary_lookup_composite_and_layouts/ast.json b/parser/testdata/03702_optimize_inverse_dictionary_lookup_composite_and_layouts/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03702_optimize_inverse_dictionary_lookup_composite_and_layouts/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03702_optimize_inverse_dictionary_lookup_composite_and_layouts/metadata.json b/parser/testdata/03702_optimize_inverse_dictionary_lookup_composite_and_layouts/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03702_optimize_inverse_dictionary_lookup_composite_and_layouts/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03702_optimize_inverse_dictionary_lookup_composite_and_layouts/query.sql b/parser/testdata/03702_optimize_inverse_dictionary_lookup_composite_and_layouts/query.sql new file mode 100644 index 000000000..3f1122339 --- /dev/null +++ b/parser/testdata/03702_optimize_inverse_dictionary_lookup_composite_and_layouts/query.sql @@ -0,0 +1,235 @@ +-- Tags: no-replicated-database, no-parallel-replicas +-- no-parallel, no-parallel-replicas: Dictionary is not created in parallel replicas. + +SET enable_analyzer = 1; +SET optimize_inverse_dictionary_lookup = 1; +SET optimize_or_like_chain = 0; + +DROP DICTIONARY IF EXISTS dict_prices_ckh; +DROP DICTIONARY IF EXISTS dict_prices_ch_array; +DROP DICTIONARY IF EXISTS dict_prices_ck_sparse_hashed; +DROP DICTIONARY IF EXISTS dict_items_flat; +DROP DICTIONARY IF EXISTS dict_items_hashed; +DROP DICTIONARY IF EXISTS dict_items_hashed_array; +DROP DICTIONARY IF EXISTS dict_items_sparse_hashed; + +DROP TABLE IF EXISTS ref_prices_ckh; +DROP TABLE IF EXISTS ref_items_flat; +DROP TABLE IF EXISTS f; + +CREATE TABLE ref_prices_ckh +( + k1 UInt64, + k2 String, + price UInt64, + tag String +) +ENGINE = MergeTree +ORDER BY (k1, k2); + +INSERT INTO ref_prices_ckh VALUES + (1, 'a', 100, 'pro'), + (1, 'b', 50, 'basic'), + (2, 'a', 75, 'plus'), + (3, 'c', 10, 'cheap'); + +CREATE TABLE ref_items_flat +( + id UInt64, + name String, + score UInt64 +) +ENGINE = MergeTree +ORDER BY id; + +INSERT INTO ref_items_flat VALUES + (1, 'alpha', 10), + (2, 'beta', 5), + (3, 'alpha', 15); + +CREATE DICTIONARY dict_prices_ckh +( + k1 UInt64, + k2 String, + price UInt64, + tag String +) +PRIMARY KEY k1, k2 +SOURCE(CLICKHOUSE(TABLE 'ref_prices_ckh')) +LAYOUT(COMPLEX_KEY_HASHED()) +LIFETIME(0); + +CREATE DICTIONARY dict_prices_ch_array +( + k1 UInt64, + k2 String, + price UInt64, + tag String +) +PRIMARY KEY k1, k2 +SOURCE(CLICKHOUSE(TABLE 'ref_prices_ckh')) +LAYOUT(COMPLEX_KEY_HASHED_ARRAY()) +LIFETIME(0); + +CREATE DICTIONARY dict_prices_ck_sparse_hashed +( + k1 UInt64, + k2 String, + price UInt64, + tag String +) +PRIMARY KEY k1, k2 +SOURCE(CLICKHOUSE(TABLE 'ref_prices_ckh')) +LAYOUT(COMPLEX_KEY_SPARSE_HASHED()) +LIFETIME(0); + +CREATE DICTIONARY dict_items_flat +( + id UInt64, + name String, + score UInt64 +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE 'ref_items_flat')) +LAYOUT(FLAT()) +LIFETIME(0); + +CREATE DICTIONARY dict_items_hashed +( + id UInt64, + name String, + score UInt64 +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE 'ref_items_flat')) +LAYOUT(HASHED()) +LIFETIME(0); + +CREATE DICTIONARY dict_items_hashed_array +( + id UInt64, + name String, + score UInt64 +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE 'ref_items_flat')) +LAYOUT(HASHED_ARRAY()) +LIFETIME(0); + +CREATE DICTIONARY dict_items_sparse_hashed +( + id UInt64, + name String, + score UInt64 +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE 'ref_items_flat')) +LAYOUT(SPARSE_HASHED()) +LIFETIME(0); + +CREATE TABLE f +( + k1 UInt64, + k2 String, + id UInt64, + payload String +) +ENGINE = MergeTree +ORDER BY (k1, k2, id); + +INSERT INTO f VALUES + (1, 'a', 1, 'x'), + (1, 'b', 2, 'y'), + (2, 'a', 3, 'z'), + (2, 'b', 2, 'w'), + (3, 'c', 1, 'u'); + +SELECT 'ComplexKeyHashed - plan'; +EXPLAIN SYNTAX run_query_tree_passes=1 +SELECT k1, k2, payload +FROM f +WHERE dictGet('dict_prices_ckh', 'tag', (k1, k2)) = 'pro' +ORDER BY k1, k2, payload; + +SELECT 'ComplexKeyHashed'; +SELECT k1, k2, payload +FROM f +WHERE dictGet('dict_prices_ckh', 'tag', (k1, k2)) = 'pro' +ORDER BY k1, k2, payload; + +SELECT 'ComplexHashedArray - plan'; +EXPLAIN SYNTAX run_query_tree_passes=1 +SELECT k1, k2, payload +FROM f +WHERE dictGet('dict_prices_ch_array', 'tag', (k1, k2)) = 'pro' +ORDER BY k1, k2, payload; + +SELECT 'ComplexHashedArray'; +SELECT k1, k2, payload +FROM f +WHERE dictGet('dict_prices_ch_array', 'tag', (k1, k2)) = 'pro' +ORDER BY k1, k2, payload; + +SELECT 'ComplexKeySparseHashed - plan'; +EXPLAIN SYNTAX run_query_tree_passes=1 +SELECT k1, k2, payload +FROM f +WHERE dictGet('dict_prices_ck_sparse_hashed', 'tag', (k1, k2)) = 'pro' +ORDER BY k1, k2, payload; +SELECT 'ComplexKeySparseHashed'; +SELECT k1, k2, payload +FROM f +WHERE dictGet('dict_prices_ck_sparse_hashed', 'tag', (k1, k2)) = 'pro' +ORDER BY k1, k2, payload; + +SELECT 'Flat - plan'; +EXPLAIN SYNTAX run_query_tree_passes=1 +SELECT id, payload +FROM f +WHERE dictGet('dict_items_flat', 'name', id) = 'alpha' +ORDER BY id, payload; + +SELECT 'Flat'; +SELECT id, payload +FROM f +WHERE dictGet('dict_items_flat', 'name', id) = 'alpha' +ORDER BY id, payload; + +SELECT 'Hashed - plan'; +EXPLAIN SYNTAX run_query_tree_passes=1 +SELECT id, payload +FROM f +WHERE dictGet('dict_items_hashed', 'name', id) = 'alpha' +ORDER BY id, payload; + +SELECT 'Hashed'; +SELECT id, payload +FROM f +WHERE dictGet('dict_items_hashed', 'name', id) = 'alpha' +ORDER BY id, payload; + +SELECT 'HashedArray - plan'; +EXPLAIN SYNTAX run_query_tree_passes=1 +SELECT id, payload +FROM f +WHERE dictGet('dict_items_hashed_array', 'name', id) = 'alpha' +ORDER BY id, payload; + +SELECT 'HashedArray'; +SELECT id, payload +FROM f +WHERE dictGet('dict_items_hashed_array', 'name', id) = 'alpha' +ORDER BY id, payload; + +SELECT 'SparseHashed - plan'; +EXPLAIN SYNTAX run_query_tree_passes=1 +SELECT id, payload +FROM f +WHERE dictGet('dict_items_sparse_hashed', 'name', id) = 'alpha' +ORDER BY id, payload; + +SELECT 'SparseHashed'; +SELECT id, payload +FROM f +WHERE dictGet('dict_items_sparse_hashed', 'name', id) = 'alpha' +ORDER BY id, payload; diff --git a/parser/testdata/03702_text_index_hint_basics/ast.json b/parser/testdata/03702_text_index_hint_basics/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03702_text_index_hint_basics/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03702_text_index_hint_basics/metadata.json b/parser/testdata/03702_text_index_hint_basics/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03702_text_index_hint_basics/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03702_text_index_hint_basics/query.sql b/parser/testdata/03702_text_index_hint_basics/query.sql new file mode 100644 index 000000000..479e236ae --- /dev/null +++ b/parser/testdata/03702_text_index_hint_basics/query.sql @@ -0,0 +1,87 @@ +-- Tags: no-parallel-replicas + +SET enable_analyzer = 1; +SET allow_experimental_full_text_index = 1; +SET use_skip_indexes_on_data_read = 1; +SET query_plan_text_index_add_hint = 1; +SET allow_statistics_optimize = 0; + +DROP TABLE IF EXISTS t_text_index_hint; + +SELECT 'splitByNonAlpha'; + +CREATE TABLE t_text_index_hint +( + s String, + INDEX idx_s (s) TYPE text(tokenizer = splitByNonAlpha) GRANULARITY 4 +) ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO t_text_index_hint SELECT number FROM numbers(100000); + +SELECT count() FROM t_text_index_hint WHERE s = '5555'; + +SELECT trim(explain) FROM +( + EXPLAIN actions = 1 SELECT count() FROM t_text_index_hint WHERE s = '5555' SETTINGS use_skip_indexes_on_data_read = 1 +) WHERE explain ILIKE '%filter column%'; + +SELECT count() FROM t_text_index_hint WHERE s LIKE '%5555%'; + +SELECT trim(explain) FROM +( + EXPLAIN actions = 1 SELECT count() FROM t_text_index_hint WHERE s LIKE '%5555%' SETTINGS use_skip_indexes_on_data_read = 1 +) WHERE explain ILIKE '%filter column%'; + +DROP TABLE IF EXISTS t_text_index_hint; + +SELECT 'array'; + +CREATE TABLE t_text_index_hint +( + s String, + INDEX idx_s (s) TYPE text(tokenizer = array) GRANULARITY 4 +) ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO t_text_index_hint SELECT number FROM numbers(100000); + +SELECT count() FROM t_text_index_hint WHERE s = '5555'; + +SELECT trim(explain) FROM +( + EXPLAIN actions = 1 SELECT count() FROM t_text_index_hint WHERE s = '5555' SETTINGS use_skip_indexes_on_data_read = 1 +) WHERE explain ILIKE '%filter column%'; + +SELECT count() FROM t_text_index_hint WHERE s LIKE '%5555%'; + +SELECT trim(explain) FROM +( + EXPLAIN actions = 1 SELECT count() FROM t_text_index_hint WHERE s LIKE '%5555%' SETTINGS use_skip_indexes_on_data_read = 1 +) WHERE explain ILIKE '%filter column%'; + +DROP TABLE IF EXISTS t_text_index_hint; + +SELECT 'ngrams(3)'; + +CREATE TABLE t_text_index_hint +( + s String, + INDEX idx_s (s) TYPE text(tokenizer = ngrams(3)) GRANULARITY 4 +) ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO t_text_index_hint SELECT number FROM numbers(100000); + +SELECT count() FROM t_text_index_hint WHERE s = '5555'; + +SELECT trim(explain) FROM +( + EXPLAIN actions = 1 SELECT count() FROM t_text_index_hint WHERE s = '5555' SETTINGS use_skip_indexes_on_data_read = 1 +) WHERE explain ILIKE '%filter column%'; + +SELECT count() FROM t_text_index_hint WHERE s LIKE '%5555%'; + +SELECT trim(explain) FROM +( + EXPLAIN actions = 1 SELECT count() FROM t_text_index_hint WHERE s LIKE '%5555%' SETTINGS use_skip_indexes_on_data_read = 1 +) WHERE explain ILIKE '%filter column%'; + +DROP TABLE IF EXISTS t_text_index_hint; diff --git a/parser/testdata/03702_text_index_hint_events/ast.json b/parser/testdata/03702_text_index_hint_events/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03702_text_index_hint_events/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03702_text_index_hint_events/metadata.json b/parser/testdata/03702_text_index_hint_events/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03702_text_index_hint_events/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03702_text_index_hint_events/query.sql b/parser/testdata/03702_text_index_hint_events/query.sql new file mode 100644 index 000000000..7a70aa933 --- /dev/null +++ b/parser/testdata/03702_text_index_hint_events/query.sql @@ -0,0 +1,32 @@ +-- Tags: no-parallel-replicas +-- Random settings limits: index_granularity=(128, None) + +SET enable_analyzer = 1; +SET allow_experimental_full_text_index = 1; +SET use_skip_indexes_on_data_read = 1; +SET query_plan_text_index_add_hint = 1; + +DROP TABLE IF EXISTS t_text_index_hint_events; + +CREATE TABLE t_text_index_hint_events +( + s String, + INDEX idx_s (s) TYPE text(tokenizer = ngrams(3)) GRANULARITY 4 +) +ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO t_text_index_hint_events SELECT format('foo {} bar', number) FROM numbers(100000); + +SELECT count() FROM t_text_index_hint_events WHERE s LIKE '%foo%'; +SELECT count() FROM t_text_index_hint_events WHERE s LIKE '%7777%'; + +SYSTEM FLUSH LOGS query_log; + +SELECT + ProfileEvents['TextIndexUseHint'] > 0, + ProfileEvents['TextIndexDiscardHint'] > 0 +FROM system.query_log +WHERE current_database = currentDatabase() AND type = 'QueryFinish' AND query LIKE 'SELECT count() FROM t_text_index_hint_events%' +ORDER BY event_time_microseconds; + +DROP TABLE IF EXISTS t_text_index_hint_events; diff --git a/parser/testdata/03702_text_index_hint_low_cardinality/ast.json b/parser/testdata/03702_text_index_hint_low_cardinality/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03702_text_index_hint_low_cardinality/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03702_text_index_hint_low_cardinality/metadata.json b/parser/testdata/03702_text_index_hint_low_cardinality/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03702_text_index_hint_low_cardinality/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03702_text_index_hint_low_cardinality/query.sql b/parser/testdata/03702_text_index_hint_low_cardinality/query.sql new file mode 100644 index 000000000..42fcb0a6d --- /dev/null +++ b/parser/testdata/03702_text_index_hint_low_cardinality/query.sql @@ -0,0 +1,33 @@ +-- Tags: no-parallel-replicas + +DROP TABLE IF EXISTS t_direct_read_lc; +SET allow_experimental_full_text_index = 1; + +CREATE OR REPLACE TABLE t_direct_read_lc +( + c LowCardinality(String), + INDEX i c type text(tokenizer='array') +) +ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO t_direct_read_lc VALUES ('config'); + +SELECT count() FROM t_direct_read_lc WHERE c = 'config'; + +SELECT trim(explain) FROM +( + EXPLAIN actions = 1 SELECT count() FROM t_direct_read_lc WHERE c = 'config' + SETTINGS use_skip_indexes_on_data_read = 1, query_plan_text_index_add_hint = 1 +) +WHERE explain LIKE '%Filter column:%'; + +SELECT count() FROM t_direct_read_lc WHERE hasToken(c, 'config'); + +SELECT trim(explain) FROM +( + EXPLAIN actions = 1 SELECT count() FROM t_direct_read_lc WHERE hasToken(c, 'config') + SETTINGS use_skip_indexes_on_data_read = 1, query_plan_text_index_add_hint = 1 +) +WHERE explain LIKE '%Filter column:%'; + +DROP TABLE t_direct_read_lc; diff --git a/parser/testdata/03703_function_dict_get_keys_large/ast.json b/parser/testdata/03703_function_dict_get_keys_large/ast.json new file mode 100644 index 000000000..03504dd3b --- /dev/null +++ b/parser/testdata/03703_function_dict_get_keys_large/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery dict_big (children 1)" + }, + { + "explain": " Identifier dict_big" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001164683, + "rows_read": 2, + "bytes_read": 68 + } +} diff --git a/parser/testdata/03703_function_dict_get_keys_large/metadata.json b/parser/testdata/03703_function_dict_get_keys_large/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03703_function_dict_get_keys_large/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03703_function_dict_get_keys_large/query.sql b/parser/testdata/03703_function_dict_get_keys_large/query.sql new file mode 100644 index 000000000..4f6ab969a --- /dev/null +++ b/parser/testdata/03703_function_dict_get_keys_large/query.sql @@ -0,0 +1,57 @@ +DROP DICTIONARY IF EXISTS dict_big; +DROP TABLE IF EXISTS dict_src_big; + +CREATE TABLE dict_src_big +( + id UInt64, + grp String, + grp_round String +) ENGINE = Memory; + +INSERT INTO dict_src_big +SELECT + number AS id, + toString(number) AS grp, + toString(number % 2000) AS grp_round +FROM numbers(100000); + +CREATE DICTIONARY dict_big +( + id UInt64, + grp String, + grp_round String +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE 'dict_src_big')) +LIFETIME(0) +LAYOUT(HASHED()); + +SELECT 'Single exact match'; +SELECT length(dictGetKeys('dict_big', 'grp', '123')) ; +SELECT arraySum(dictGetKeys('dict_big', 'grp', '123')); + +SELECT 'Missing value'; +SELECT length(dictGetKeys('dict_big', 'grp', '100000')); +SELECT arraySum(dictGetKeys('dict_big', 'grp', '100000')); + +SELECT 'Multiple matches for same attribute value'; +SELECT length(dictGetKeys('dict_big', 'grp_round', '7')); +SELECT arraySum(dictGetKeys('dict_big', 'grp_round', '7')); + +SELECT 'Vector of inputs with mixed hits and misses'; +SELECT sum(length(dictGetKeys('dict_big', 'grp', toString(number)))) +FROM numbers(5); + +SELECT sum(arraySum(dictGetKeys('dict_big', 'grp', toString(number)))) +FROM numbers(3); + +SELECT 'All sum'; +SELECT sum(arraySum(dictGetKeys('dict_big', 'grp', toString(number)))) +FROM numbers(100000); + +SELECT 'Misc'; +SELECT sum(length(dictGetKeys('dict_big', 'grp_round', '7'))) +FROM numbers(2); + +SELECT length(dictGetKeys('dict_big', 'grp_round', '1999')); +SELECT arraySum(dictGetKeys('dict_big', 'grp_round', '1999')); diff --git a/parser/testdata/03703_optimize_inverse_dictionary_lookup_dictget_family/ast.json b/parser/testdata/03703_optimize_inverse_dictionary_lookup_dictget_family/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03703_optimize_inverse_dictionary_lookup_dictget_family/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03703_optimize_inverse_dictionary_lookup_dictget_family/metadata.json b/parser/testdata/03703_optimize_inverse_dictionary_lookup_dictget_family/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03703_optimize_inverse_dictionary_lookup_dictget_family/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03703_optimize_inverse_dictionary_lookup_dictget_family/query.sql b/parser/testdata/03703_optimize_inverse_dictionary_lookup_dictget_family/query.sql new file mode 100644 index 000000000..fbdbc37dd --- /dev/null +++ b/parser/testdata/03703_optimize_inverse_dictionary_lookup_dictget_family/query.sql @@ -0,0 +1,210 @@ +-- Tags: no-replicated-database, no-parallel-replicas +-- no-parallel, no-parallel-replicas: Dictionary is not created in parallel replicas. + +SET enable_analyzer = 1; +SET optimize_inverse_dictionary_lookup = 1; +SET optimize_or_like_chain = 0; + +DROP DICTIONARY IF EXISTS dictionary_all; +DROP TABLE IF EXISTS ref_table_all; +DROP TABLE IF EXISTS tab; + +CREATE TABLE ref_table_all +( + id UInt64, + name String, + i8 String, + i16 String, + i32 String, + i64 String, + u8 String, + u16 String, + u32 String, + u64 String, + f32 String, + f64 String, + d String, + dt String, + uid String, + ip4 String, + ip6 String +) +ENGINE = MergeTree +ORDER BY id; + +INSERT INTO ref_table_all VALUES + (1, 'alpha', -8, -16, -32, -64, 8, 16, 32, 64, 10.0, 20.0, + '2025-01-01', '2025-01-01 10:00:00', + '00000000-0000-0000-0000-000000000001', + '192.168.0.1', '2001:db8::1'), + (2, 'beta', -7, -15, -31, -63, 9, 17, 33, 65, 11.0, 21.0, + '2026-01-01', '2026-01-01 15:00:00', + '00000000-0000-0000-0000-000000000002', + '10.0.0.3', '2001:db8::2'); + +CREATE DICTIONARY dictionary_all +( + id UInt64, + name String, + i8 String, + i16 String, + i32 String, + i64 String, + u8 String, + u16 String, + u32 String, + u64 String, + f32 String, + f64 String, + d String, + dt String, + uid String, + ip4 String, + ip6 String +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE 'ref_table_all')) +LAYOUT(HASHED()) +LIFETIME(0); + +CREATE TABLE tab +( + id UInt64, + payload String +) +ENGINE = MergeTree +ORDER BY id; + +INSERT INTO tab VALUES (1,'x'),(2,'y'),(99,'z'); + +SELECT 'dictGet (generic) - plan'; +EXPLAIN SYNTAX run_query_tree_passes=1 +SELECT id, payload FROM tab +WHERE dictGet('dictionary_all', 'name', id) = 'alpha' +ORDER BY id, payload; + +SELECT 'dictGet (generic)'; +SELECT id, payload FROM tab +WHERE dictGet('dictionary_all', 'name', id) = 'alpha' +ORDER BY id, payload; + +SELECT 'dictGetString - plan'; +EXPLAIN SYNTAX run_query_tree_passes=1 +SELECT id, payload FROM tab +WHERE dictGetString('dictionary_all', 'name', id) = 'alpha' +ORDER BY id, payload; + +SELECT 'dictGetString'; +SELECT id, payload FROM tab +WHERE dictGetString('dictionary_all', 'name', id) = 'alpha' +ORDER BY id, payload; + +SELECT 'dictGetInt32 - plan'; +EXPLAIN SYNTAX run_query_tree_passes=1 +SELECT id, payload FROM tab +WHERE dictGetInt32('dictionary_all', 'i32', id) = -32 +ORDER BY id, payload; + +SELECT 'dictGetInt32'; +SELECT id, payload FROM tab +WHERE dictGetInt32('dictionary_all', 'i32', id) = -32 +ORDER BY id, payload; + +SELECT 'dictGetUInt64 - plan'; +EXPLAIN SYNTAX run_query_tree_passes=1 +SELECT id, payload FROM tab +WHERE dictGetUInt64('dictionary_all', 'u64', id) = 64 +ORDER BY id, payload; + +SELECT 'dictGetUInt64'; +SELECT id, payload FROM tab +WHERE dictGetUInt64('dictionary_all', 'u64', id) = 64 +ORDER BY id, payload; + +SELECT 'dictGetFloat64 - plan'; +EXPLAIN SYNTAX run_query_tree_passes=1 +SELECT id, payload FROM tab +WHERE dictGetFloat64('dictionary_all', 'f64', id) = 20.0 +ORDER BY id, payload; + +SELECT 'dictGetFloat64'; +SELECT id, payload FROM tab +WHERE dictGetFloat64('dictionary_all', 'f64', id) = 20.0 +ORDER BY id, payload; + +SELECT 'dictGetDate - plan'; +EXPLAIN SYNTAX run_query_tree_passes=1 +SELECT id, payload FROM tab +WHERE dictGetDate('dictionary_all', 'd', id) = toDate('2025-01-01') +ORDER BY id, payload; + +SELECT 'dictGetDate'; +SELECT id, payload FROM tab +WHERE dictGetDate('dictionary_all', 'd', id) = toDate('2025-01-01') +ORDER BY id, payload; + +SELECT 'dictGetDateTime - plan'; +EXPLAIN SYNTAX run_query_tree_passes=1 +SELECT id, payload FROM tab +WHERE dictGetDateTime('dictionary_all', 'dt', id) = toDateTime('2025-01-01 10:00:00') +ORDER BY id, payload; + +SELECT 'dictGetDateTime'; +SELECT id, payload FROM tab +WHERE dictGetDateTime('dictionary_all', 'dt', id) = toDateTime('2025-01-01 10:00:00') +ORDER BY id, payload; + +SELECT 'dictGetUUID - plan'; +EXPLAIN SYNTAX run_query_tree_passes=1 +SELECT id, payload FROM tab +WHERE dictGetUUID('dictionary_all', 'uid', id) = toUUID('00000000-0000-0000-0000-000000000001') +ORDER BY id, payload; + +SELECT 'dictGetUUID'; +SELECT id, payload FROM tab +WHERE dictGetUUID('dictionary_all', 'uid', id) = toUUID('00000000-0000-0000-0000-000000000001') +ORDER BY id, payload; + +SELECT 'dictGetIPv4 - plan'; +EXPLAIN SYNTAX run_query_tree_passes=1 +SELECT id, payload FROM tab +WHERE dictGetIPv4('dictionary_all', 'ip4', id) = toIPv4('192.168.0.1') +ORDER BY id, payload; + +SELECT 'dictGetIPv4'; +SELECT id, payload FROM tab +WHERE dictGetIPv4('dictionary_all', 'ip4', id) = toIPv4('192.168.0.1') +ORDER BY id, payload; + +SELECT 'dictGetIPv6 - plan'; +EXPLAIN SYNTAX run_query_tree_passes=1 +SELECT id, payload FROM tab +WHERE dictGetIPv6('dictionary_all', 'ip6', id) = toIPv6('2001:db8::1') +ORDER BY id, payload; + +SELECT 'dictGetIPv6'; +SELECT id, payload FROM tab +WHERE dictGetIPv6('dictionary_all', 'ip6', id) = toIPv6('2001:db8::1') +ORDER BY id, payload; + +SELECT 'dictGetOrNull(String) - plan'; +EXPLAIN SYNTAX run_query_tree_passes=1 +SELECT id, payload FROM tab +WHERE dictGetOrNull('dictionary_all', 'name', id) = 'alpha' +ORDER BY id, payload; + +SELECT 'dictGetOrNull(String)'; +SELECT id, payload FROM tab +WHERE dictGetOrNull('dictionary_all', 'name', id) = 'alpha' +ORDER BY id, payload; + +SELECT 'dictGetOrNull(String) IS NULL - plan'; +EXPLAIN SYNTAX run_query_tree_passes=1 +SELECT id, payload FROM tab +WHERE isNull(dictGetOrNull('dictionary_all','name', id)) +ORDER BY id, payload; + +SELECT 'dictGetOrNull(String) IS NULL'; +SELECT id, payload FROM tab +WHERE isNull(dictGetOrNull('dictionary_all','name', id)) +ORDER BY id, payload; diff --git a/parser/testdata/03703_prelimit_explain_message/ast.json b/parser/testdata/03703_prelimit_explain_message/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03703_prelimit_explain_message/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03703_prelimit_explain_message/metadata.json b/parser/testdata/03703_prelimit_explain_message/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03703_prelimit_explain_message/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03703_prelimit_explain_message/query.sql b/parser/testdata/03703_prelimit_explain_message/query.sql new file mode 100644 index 000000000..60040010a --- /dev/null +++ b/parser/testdata/03703_prelimit_explain_message/query.sql @@ -0,0 +1,11 @@ +SET enable_analyzer=0; + +EXPLAIN SELECT * FROM numbers(100) ORDER BY number LIMIT 10 OFFSET 90; + +EXPLAIN SELECT * FROM remote('127.0.0.{2,3}', numbers(100)) ORDER BY number LIMIT 1; + +SET enable_analyzer=1; + +EXPLAIN SELECT * FROM numbers(100) ORDER BY number LIMIT 10 OFFSET 90; + +EXPLAIN SELECT * FROM remote('127.0.0.{2,3}', numbers(100)) ORDER BY number LIMIT 1; \ No newline at end of file diff --git a/parser/testdata/03703_statistics_low_cardinality/ast.json b/parser/testdata/03703_statistics_low_cardinality/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03703_statistics_low_cardinality/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03703_statistics_low_cardinality/metadata.json b/parser/testdata/03703_statistics_low_cardinality/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03703_statistics_low_cardinality/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03703_statistics_low_cardinality/query.sql b/parser/testdata/03703_statistics_low_cardinality/query.sql new file mode 100644 index 000000000..ea7cf8a08 --- /dev/null +++ b/parser/testdata/03703_statistics_low_cardinality/query.sql @@ -0,0 +1,6 @@ +-- Tags: no-fasttest +set allow_statistics_optimize = 1; + +create table t (a Nullable(Int), b LowCardinality(Nullable(String))) Engine = MergeTree() ORDER BY () settings auto_statistics_types = 'minmax,uniq,tdigest,countmin'; +insert into t values (1 , '1'), (2, '2'), (3, '3'); +select * from t where a > 1 and b = '1'; diff --git a/parser/testdata/03704_default_empty_order_by/ast.json b/parser/testdata/03704_default_empty_order_by/ast.json new file mode 100644 index 000000000..9412abd71 --- /dev/null +++ b/parser/testdata/03704_default_empty_order_by/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00146071, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/03704_default_empty_order_by/metadata.json b/parser/testdata/03704_default_empty_order_by/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03704_default_empty_order_by/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03704_default_empty_order_by/query.sql b/parser/testdata/03704_default_empty_order_by/query.sql new file mode 100644 index 000000000..75da95974 --- /dev/null +++ b/parser/testdata/03704_default_empty_order_by/query.sql @@ -0,0 +1,3 @@ +DROP TABLE IF EXISTS test; +SET create_table_empty_primary_key_by_default = 1; +CREATE TABLE test (x UInt8); diff --git a/parser/testdata/03704_fractional_limit_with_ties/ast.json b/parser/testdata/03704_fractional_limit_with_ties/ast.json new file mode 100644 index 000000000..a53636d88 --- /dev/null +++ b/parser/testdata/03704_fractional_limit_with_ties/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery ties (children 1)" + }, + { + "explain": " Identifier ties" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001104213, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/03704_fractional_limit_with_ties/metadata.json b/parser/testdata/03704_fractional_limit_with_ties/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03704_fractional_limit_with_ties/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03704_fractional_limit_with_ties/query.sql b/parser/testdata/03704_fractional_limit_with_ties/query.sql new file mode 100644 index 000000000..8c65c6906 --- /dev/null +++ b/parser/testdata/03704_fractional_limit_with_ties/query.sql @@ -0,0 +1,60 @@ +DROP TABLE IF EXISTS ties; + +CREATE TABLE ties (id UInt8) ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO ties VALUES (1), (1), (1), (1), (2), (2), (2), (2), (3), (3); + +SELECT id FROM ties ORDER BY id LIMIT 0.1 WITH TIES; +SELECT '*'; +SELECT id FROM ties ORDER BY id LIMIT 0.2 WITH TIES; +SELECT '*'; +SELECT id FROM ties ORDER BY id LIMIT 0.3 WITH TIES; +SELECT ''; + +SELECT id FROM ties ORDER BY id LIMIT 0.5 WITH TIES; +SELECT '*'; +SELECT id FROM ties ORDER BY id LIMIT 0.6 WITH TIES; +SELECT '*'; +SELECT id FROM ties ORDER BY id LIMIT 0.7 WITH TIES; +SELECT ''; + +SELECT id FROM ties ORDER BY id LIMIT 0.9 WITH TIES; +SELECT '*'; +SELECT id FROM ties ORDER BY id; +SELECT ''; + +SET max_block_size = 2; + +SELECT id FROM ties ORDER BY id LIMIT 0.1 WITH TIES; +SELECT '*'; +SELECT id FROM ties ORDER BY id LIMIT 1, 0.1 WITH TIES; +SELECT '*'; +SELECT id FROM ties ORDER BY id LIMIT 2, 0.1 WITH TIES; +SELECT '*'; +SELECT id FROM ties ORDER BY id LIMIT 3, 0.1 WITH TIES; +SELECT '*'; + +SELECT id FROM ties ORDER BY id LIMIT 0.4, 0.1 WITH TIES; +SELECT '*'; + +SELECT id FROM ties ORDER BY id LIMIT 0.8, 0.1 WITH TIES; +SELECT ''; + +SET max_block_size = 3; + +SELECT id FROM ties ORDER BY id LIMIT 0.1 WITH TIES; +SELECT '*'; +SELECT id FROM ties ORDER BY id LIMIT 0.4, 0.1 WITH TIES; +SELECT '*'; +SELECT id FROM ties ORDER BY id LIMIT 0.8, 0.1 WITH TIES; +SELECT ''; + +SELECT count() FROM (SELECT number > 100 AS n FROM numbers(2000) ORDER BY n LIMIT 1, 0.01 WITH TIES); +SELECT '*'; +SET max_block_size = 5; +SELECT count() FROM (SELECT number < 100 AS n FROM numbers(2000) ORDER BY n DESC LIMIT 0.03 WITH TIES); +SELECT '*'; +SELECT count() FROM (SELECT number div 10 AS n FROM numbers(20) ORDER BY n LIMIT 0.25 WITH TIES); +SELECT ''; + +DROP TABLE ties; \ No newline at end of file diff --git a/parser/testdata/03704_function_dict_get_keys_cache_type/ast.json b/parser/testdata/03704_function_dict_get_keys_cache_type/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03704_function_dict_get_keys_cache_type/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03704_function_dict_get_keys_cache_type/metadata.json b/parser/testdata/03704_function_dict_get_keys_cache_type/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03704_function_dict_get_keys_cache_type/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03704_function_dict_get_keys_cache_type/query.sql b/parser/testdata/03704_function_dict_get_keys_cache_type/query.sql new file mode 100644 index 000000000..0836470ba --- /dev/null +++ b/parser/testdata/03704_function_dict_get_keys_cache_type/query.sql @@ -0,0 +1,105 @@ +-- These test checks whether the used cache only valid per query. +-- Tests should fail if the used cache is shared across queries. + +SELECT 'Cache Persistence Only Within A Single Query'; + +DROP DICTIONARY IF EXISTS colors; +DROP TABLE IF EXISTS dict_src; + +CREATE TABLE dict_src +( + id UInt64, + grp String +) ENGINE = Memory; + +INSERT INTO dict_src VALUES (1, 'blue'); + +CREATE DICTIONARY colors +( + id UInt64, + grp String +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE 'dict_src')) +LAYOUT(HASHED()) +LIFETIME(0); + +SELECT dictGetKeys('colors', 'grp', 'blue') AS keys +FROM numbers(1); + +TRUNCATE TABLE dict_src; +INSERT INTO dict_src VALUES (2, 'blue'); + +SYSTEM RELOAD DICTIONARY colors; + +SELECT 'After INSERT and RELOAD'; + +SELECT dictGetKeys('colors', 'grp', 'blue') AS keys +FROM numbers(1); + +DROP DICTIONARY IF EXISTS colors; +DROP TABLE IF EXISTS dict_src; + +SELECT 'Cache invalidation after dictionary reload with DELETE'; + +DROP DICTIONARY IF EXISTS dict_products; +DROP TABLE IF EXISTS src_products; +DROP TABLE IF EXISTS inputs; + +CREATE TABLE src_products +( + id UInt64, + category String, + brand String +) +ENGINE = Memory; + +INSERT INTO src_products VALUES + (1, 'catA', 'brandX'), + (2, 'catA', 'brandY'), + (3, 'catB', 'brandX'), + (4, 'catC', 'brandZ'), + (5, 'catB', 'brandZ'); + +CREATE DICTIONARY dict_products +( + id UInt64, + category String, + brand String +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE 'src_products')) +LIFETIME(MIN 0 MAX 0) +LAYOUT(HASHED()); + +CREATE TABLE inputs +( + target_category String, + target_brand String, + target_timezone String +) +ENGINE = Memory; + +INSERT INTO inputs VALUES + ('catA', 'brandX', 'UTC+1'), + ('catB', 'brandZ', 'UTC+9'), + ('catX', 'brandX', 'UTC-6'); + +SELECT + target_category, + dictGetKeys('dict_products', 'category', target_category) AS product_ids_by_category_before +FROM inputs +ORDER BY target_category, target_brand, target_timezone; + +ALTER TABLE src_products DELETE WHERE category = 'catA' + SETTINGS mutations_sync = 1; + +SYSTEM RELOAD DICTIONARY dict_products; + +SELECT 'After DELETE mutation and RELOAD'; + +SELECT + target_category, + dictGetKeys('dict_products', 'category', target_category) AS product_ids_by_category_after +FROM inputs +ORDER BY target_category, target_brand, target_timezone; diff --git a/parser/testdata/03705_count_if_asterisk/ast.json b/parser/testdata/03705_count_if_asterisk/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03705_count_if_asterisk/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03705_count_if_asterisk/metadata.json b/parser/testdata/03705_count_if_asterisk/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03705_count_if_asterisk/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03705_count_if_asterisk/query.sql b/parser/testdata/03705_count_if_asterisk/query.sql new file mode 100644 index 000000000..f070b26f7 --- /dev/null +++ b/parser/testdata/03705_count_if_asterisk/query.sql @@ -0,0 +1,8 @@ +-- Basic countIf with asterisk +SELECT countIf(*, number < 5) FROM numbers(10); + +-- countIf with asterisk and multiple columns in subquery +SELECT countIf(*, number < 20) FROM (SELECT number, 1, 2 FROM numbers(100)); + +-- count with filter syntax +SELECT count(*) FILTER (WHERE number < 20) FROM (SELECT number, 1, 2 FROM numbers(100)); diff --git a/parser/testdata/03705_fix_compression_T64_unaligned/ast.json b/parser/testdata/03705_fix_compression_T64_unaligned/ast.json new file mode 100644 index 000000000..3c272a398 --- /dev/null +++ b/parser/testdata/03705_fix_compression_T64_unaligned/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery compression_estimate_example (children 1)" + }, + { + "explain": " Identifier compression_estimate_example" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001040045, + "rows_read": 2, + "bytes_read": 108 + } +} diff --git a/parser/testdata/03705_fix_compression_T64_unaligned/metadata.json b/parser/testdata/03705_fix_compression_T64_unaligned/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03705_fix_compression_T64_unaligned/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03705_fix_compression_T64_unaligned/query.sql b/parser/testdata/03705_fix_compression_T64_unaligned/query.sql new file mode 100644 index 000000000..202209b08 --- /dev/null +++ b/parser/testdata/03705_fix_compression_T64_unaligned/query.sql @@ -0,0 +1,12 @@ +DROP TABLE IF EXISTS compression_estimate_example; +CREATE TABLE IF NOT EXISTS compression_estimate_example ( + number UInt64 +) +ENGINE = MergeTree() +ORDER BY number; + +INSERT INTO compression_estimate_example +SELECT number FROM system.numbers LIMIT 100_000; + +SELECT estimateCompressionRatio('DoubleDelta, T64, ZSTD')(number) AS estimate FROM compression_estimate_example FORMAT Null; +DROP TABLE IF EXISTS compression_estimate_example; \ No newline at end of file diff --git a/parser/testdata/03705_function_dict_get_keys_multiple_dict_and_no_caching/ast.json b/parser/testdata/03705_function_dict_get_keys_multiple_dict_and_no_caching/ast.json new file mode 100644 index 000000000..e2f55cd45 --- /dev/null +++ b/parser/testdata/03705_function_dict_get_keys_multiple_dict_and_no_caching/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery dict_products (children 1)" + }, + { + "explain": " Identifier dict_products" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001316126, + "rows_read": 2, + "bytes_read": 78 + } +} diff --git a/parser/testdata/03705_function_dict_get_keys_multiple_dict_and_no_caching/metadata.json b/parser/testdata/03705_function_dict_get_keys_multiple_dict_and_no_caching/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03705_function_dict_get_keys_multiple_dict_and_no_caching/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03705_function_dict_get_keys_multiple_dict_and_no_caching/query.sql b/parser/testdata/03705_function_dict_get_keys_multiple_dict_and_no_caching/query.sql new file mode 100644 index 000000000..115bde3cc --- /dev/null +++ b/parser/testdata/03705_function_dict_get_keys_multiple_dict_and_no_caching/query.sql @@ -0,0 +1,108 @@ +DROP DICTIONARY IF EXISTS dict_products; +DROP DICTIONARY IF EXISTS dict_geo; +DROP TABLE IF EXISTS src_products; +DROP TABLE IF EXISTS src_geo; +DROP TABLE IF EXISTS inputs; + +CREATE TABLE src_products +( + id UInt64, + category String, + brand String +) +ENGINE = Memory; + +INSERT INTO src_products VALUES + (1, 'catA', 'brandX'), + (2, 'catA', 'brandY'), + (3, 'catB', 'brandX'), + (4, 'catC', 'brandZ'), + (5, 'catB', 'brandZ'); + +CREATE TABLE src_geo +( + country String, + city String, + timezone String, + code UInt32 +) +ENGINE = Memory; + +INSERT INTO src_geo VALUES + ('US', 'NYC', 'UTC-5', 10001), + ('US', 'Chicago', 'UTC-6', 60601), + ('FR', 'Paris', 'UTC+1', 75000), + ('DE', 'Berlin', 'UTC+1', 10115), + ('JP', 'Tokyo', 'UTC+9', 100000); + +CREATE DICTIONARY dict_products +( + id UInt64, + category String, + brand String +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE 'src_products')) +LIFETIME(MIN 0 MAX 0) +LAYOUT(HASHED()); + +CREATE DICTIONARY dict_geo +( + country String, + city String, + timezone String, + code UInt32 +) +PRIMARY KEY country, city +SOURCE(CLICKHOUSE(TABLE 'src_geo')) +LIFETIME(MIN 0 MAX 0) +LAYOUT(COMPLEX_KEY_HASHED()); + +CREATE TABLE inputs +( + target_category String, + target_brand String, + target_timezone String +) +ENGINE = Memory; + +INSERT INTO inputs VALUES + ('catA', 'brandX', 'UTC+1'), + ('catB', 'brandZ', 'UTC+9'), + ('catX', 'brandX', 'UTC-6'); + +SELECT + target_category, + target_brand, + target_timezone, + dictGetKeys('dict_products', 'category', target_category) AS product_ids_by_category, + dictGetKeys('dict_products', 'brand', target_brand) AS product_ids_by_brand, + dictGetKeys('dict_geo', 'timezone', target_timezone) AS country_city_by_tz +FROM inputs +ORDER BY target_category, target_brand, target_timezone; + +SELECT dictGetKeys('dict_products', 'category', 'catA'); + +SELECT 'Composite value expressions'; + +SELECT dictGetKeys('dict_products', 'category', concat('cat', 'A')); + +SELECT + target_category, + dictGetKeys('dict_geo', 'timezone', concat('UTC', substring(target_timezone, 4))) AS country_city_by_tz_expr +FROM inputs +ORDER BY target_category, target_brand, target_timezone; + +SELECT 'Caching disabled'; + +SET max_reverse_dictionary_lookup_cache_size_bytes = 0; + +SELECT + target_category, + target_brand, + target_timezone, + dictGetKeys('dict_products', 'category', target_category) AS product_ids_by_category, + dictGetKeys('dict_products', 'brand', target_brand) AS product_ids_by_brand, + dictGetKeys('dict_geo', 'timezone', target_timezone) AS country_city_by_tz +FROM inputs +ORDER BY target_category, target_brand, target_timezone; diff --git a/parser/testdata/03707_analyzer_convert_outer_any_to_inner/ast.json b/parser/testdata/03707_analyzer_convert_outer_any_to_inner/ast.json new file mode 100644 index 000000000..11e385914 --- /dev/null +++ b/parser/testdata/03707_analyzer_convert_outer_any_to_inner/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001100447, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03707_analyzer_convert_outer_any_to_inner/metadata.json b/parser/testdata/03707_analyzer_convert_outer_any_to_inner/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03707_analyzer_convert_outer_any_to_inner/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03707_analyzer_convert_outer_any_to_inner/query.sql b/parser/testdata/03707_analyzer_convert_outer_any_to_inner/query.sql new file mode 100644 index 000000000..3bedc86c2 --- /dev/null +++ b/parser/testdata/03707_analyzer_convert_outer_any_to_inner/query.sql @@ -0,0 +1,26 @@ +SET enable_analyzer = 1; +SET enable_parallel_replicas = 0; +SET query_plan_join_swap_table = 0, query_plan_optimize_join_order_limit = 1; -- Changes query plan + +CREATE TABLE users (uid Int16, name String, age Int16) ENGINE=Memory; + +INSERT INTO users VALUES (1231, 'John', 33); +INSERT INTO users VALUES (6666, 'Ksenia', 48); +INSERT INTO users VALUES (8888, 'Alice', 50); + +EXPLAIN actions = 1, keep_logical_steps = 1 +SELECT * +FROM users u1 +WHERE uid = ( + SELECT sum(age) + FROM users u2 + WHERE u1.name = u2.name +); + +SELECT * +FROM users u1 +WHERE uid = ( + SELECT sum(age) + FROM users u2 + WHERE u1.name = u2.name +); diff --git a/parser/testdata/03707_empty_parts_with_non_empty_projections_merge/ast.json b/parser/testdata/03707_empty_parts_with_non_empty_projections_merge/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03707_empty_parts_with_non_empty_projections_merge/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03707_empty_parts_with_non_empty_projections_merge/metadata.json b/parser/testdata/03707_empty_parts_with_non_empty_projections_merge/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03707_empty_parts_with_non_empty_projections_merge/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03707_empty_parts_with_non_empty_projections_merge/query.sql b/parser/testdata/03707_empty_parts_with_non_empty_projections_merge/query.sql new file mode 100644 index 000000000..75aaaa1ad --- /dev/null +++ b/parser/testdata/03707_empty_parts_with_non_empty_projections_merge/query.sql @@ -0,0 +1,15 @@ +-- { echoOn } + +drop table if exists mt1; + +create table mt1 (time DateTime, projection proj (select min(time))) engine MergeTree order by () TTL time + interval 1 second settings remove_empty_parts=0, merge_with_ttl_timeout=0, deduplicate_merge_projection_mode='ignore'; + +system stop merges mt1; + +insert into mt1 select number from numbers(4) settings max_block_size=1, min_insert_block_size_bytes=1; + +system start merges mt1; + +optimize table mt1 final; + +optimize table mt1 final; diff --git a/parser/testdata/03707_function_array_remove/ast.json b/parser/testdata/03707_function_array_remove/ast.json new file mode 100644 index 000000000..e0caa0bce --- /dev/null +++ b/parser/testdata/03707_function_array_remove/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayRemove (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001469759, + "rows_read": 9, + "bytes_read": 334 + } +} diff --git a/parser/testdata/03707_function_array_remove/metadata.json b/parser/testdata/03707_function_array_remove/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03707_function_array_remove/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03707_function_array_remove/query.sql b/parser/testdata/03707_function_array_remove/query.sql new file mode 100644 index 000000000..499015eec --- /dev/null +++ b/parser/testdata/03707_function_array_remove/query.sql @@ -0,0 +1,101 @@ +SELECT arrayRemove([], 1); + +SELECT arrayRemove([0], 0); +SELECT arrayRemove([1], 1); +SELECT arrayRemove([2], 2); + +SELECT arrayRemove([1,1], 1); +SELECT arrayRemove([1,2], 1); +SELECT arrayRemove([1,1,2], 1); +SELECT arrayRemove([1,2,1], 1); +SELECT arrayRemove([2,1,1], 1); + +SELECT arrayRemove([1,2,2,3,3,3,4,4,4,4,5,5,5,5,5], 2); +SELECT arrayRemove([1,2,2,3,3,3,4,4,4,4,5,5,5,5,5], 3); +SELECT arrayRemove([1,2,2,3,3,3,4,4,4,4,5,5,5,5,5], 6); + +SELECT arrayRemove([1,2,3,2], 2*1); + +SELECT arrayRemove([NULL], NULL); +SELECT arrayRemove([1, NULL, 2], NULL); +SELECT arrayRemove([NULL, NULL, 1], NULL); + +SELECT arrayRemove([1, NULL, 2], 1); +SELECT arrayRemove([1, NULL, 2], 2); +SELECT arrayRemove([1, NULL, 2], 3); + +SELECT arrayRemove([1, 1, NULL, NULL, nan, nan, 2, 2, 2], NULL); +SELECT arrayRemove([1, 1, NULL, NULL, nan, nan, 2, 2, 2], nan); +SELECT arrayRemove([1, 1, NULL, NULL, nan, nan, 2, 2, 2], 2); + +SELECT arrayRemove(arrayMap(x -> 0, [NULL]), 0); +SELECT toString(arrayRemove(arrayMap(x -> 0, [NULL]), 0)); + +SELECT arrayRemove(['a','b','a'], 'a'); + +SELECT arrayRemove(['hello', 'world'], concat('wor', 'ld')); +SELECT arrayRemove(['foo', 'bar', 'foo'], repeat('f',1) || 'oo'); + +SELECT arrayRemove([[[]], [[], []], [[], []], [[]]], [[]]); +SELECT arrayRemove([[1], [1,2], [2,3], [1,2]], [1,2]); +SELECT arrayRemove([[1], [1,2], [2,3], [1,2]], [3]); + +CREATE TABLE test (array Array(UInt32), element UInt32) engine=Memory; +INSERT INTO test VALUES ([1, 2, 3, 2], 2), ([3, 4, 3, 5], 3), ([6, 7, 7, 8], 7); +SELECT arrayRemove(array, element) from test; + +SELECT arrayRemove([(1,2), (3,4)], (1,2)); + +SELECT arrayRemove([1,2,3]); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} +SELECT arrayRemove([1,2,3], 2, 3); -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} + +SELECT arrayRemove(1, 1); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +SELECT arrayRemove([1,2,3], [1]); -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} + +SELECT arrayRemove([1,2,3], '1'); -- {serverError NO_COMMON_TYPE} +SELECT arrayRemove(['a', 'b', 'c'], 1); -- {serverError NO_COMMON_TYPE} + +SELECT arrayRemove(CAST(['a', NULL, 'b'] AS Array(LowCardinality(Nullable(String)))), 'a'); +SELECT arrayRemove(CAST(['a', NULL, 'b'] AS Array(LowCardinality(Nullable(String)))), NULL); + +SELECT arrayRemove( + [CAST(1 AS Dynamic), CAST(NULL AS Dynamic), CAST(2 AS Dynamic)], + 1 +); +SELECT arrayRemove( + [CAST(1 AS Dynamic), CAST(NULL AS Dynamic), CAST(2 AS Dynamic)], + NULL +); + +SELECT arrayRemove( + [ + 1::Variant(UInt8, String), + 'x'::Variant(UInt8, String), + NULL::Variant(UInt8, String) + ], + 'x'::Variant(UInt8, String) +); +SELECT arrayRemove( + [ + 1::Variant(UInt8, String), + 'x'::Variant(UInt8, String), + NULL::Variant(UInt8, String) + ], + NULL +); + +CREATE TABLE arr_test (arr Array(Int32)) ENGINE = Memory; +INSERT INTO arr_test VALUES ([1, 2, 3]), ([3, 4, 5]); +SELECT arrayRemove(arr, 3) FROM arr_test; + +CREATE TABLE elem_test (arr Array(Int32), elem Nullable(Int32)) ENGINE = Memory; +INSERT INTO elem_test VALUES ([1,2,3], 2), ([1,2,3], NULL), ([1,2,3], 1); +SELECT arrayRemove(arr, elem) FROM elem_test; + +CREATE TABLE nullable_arr (arr Array(Nullable(Int32))) ENGINE = Memory; +INSERT INTO nullable_arr VALUES ([1,2,3]), ([NULL,2,3]); +SELECT arrayRemove(arr, 2) FROM nullable_arr; +SELECT arrayRemove(arr, NULL) FROM nullable_arr; + +SELECT arrayRemove(arr, elem) +FROM (SELECT [1,2,3] AS arr, number AS elem FROM numbers(3)); diff --git a/parser/testdata/03707_statistics_cache/ast.json b/parser/testdata/03707_statistics_cache/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03707_statistics_cache/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03707_statistics_cache/metadata.json b/parser/testdata/03707_statistics_cache/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03707_statistics_cache/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03707_statistics_cache/query.sql b/parser/testdata/03707_statistics_cache/query.sql new file mode 100644 index 000000000..5a275665b --- /dev/null +++ b/parser/testdata/03707_statistics_cache/query.sql @@ -0,0 +1,134 @@ +-- Tags: no-fasttest + +SET allow_experimental_statistics = 1; +SET allow_statistics_optimize = 1; +SET log_queries = 1; +SET log_query_settings = 1; +SET mutations_sync = 2; +SET max_execution_time = 60; + +-- test rely on local execution, - force parallel replicas to genearate local plan +SET parallel_replicas_local_plan=1; + +DROP TABLE IF EXISTS sc_core SYNC; + +CREATE TABLE sc_core +( + k UInt32, + v Nullable(Float64) +) +ENGINE = MergeTree +ORDER BY k +SETTINGS refresh_statistics_interval = 0; + +INSERT INTO sc_core +SELECT number, if(number % 20 = 0, NULL, toFloat64(rand()) / 4294967296.0) +FROM numbers(60000); + +ALTER TABLE sc_core ADD STATISTICS v TYPE TDigest; +ALTER TABLE sc_core MATERIALIZE STATISTICS ALL; + +------------------------------------------------------------ +-- SUM() must not trigger statistics +------------------------------------------------------------ +DROP TABLE IF EXISTS sc_unused SYNC; + +CREATE TABLE sc_unused +( + k UInt64, + val UInt64 +) +ENGINE = MergeTree +ORDER BY k +SETTINGS refresh_statistics_interval = 0; + +INSERT INTO sc_unused +SELECT number, number % 100 +FROM numbers(50000); + +ALTER TABLE sc_unused ADD STATISTICS val TYPE MinMax; +ALTER TABLE sc_unused MATERIALIZE STATISTICS ALL; + +SELECT sum(val) FROM sc_unused +SETTINGS use_statistics_cache = 0, log_comment = 'nouse-agg' FORMAT Null; + +SYSTEM FLUSH LOGS query_log; + +SELECT toUInt8(ProfileEvents['LoadedStatisticsMicroseconds'] = 0) +FROM system.query_log +WHERE type = 'QueryFinish' AND current_database = currentDatabase() AND log_comment = 'nouse-agg' +ORDER BY event_time_microseconds DESC +LIMIT 1; + +------------------------------------------------------------ +-- LowCardinality: CountMin https://github.com/ClickHouse/ClickHouse/issues/87886 +------------------------------------------------------------ +DROP TABLE IF EXISTS st_cm_lc SYNC; + +CREATE TABLE st_cm_lc +( + k UInt32, + cat LowCardinality(String) +) +ENGINE = MergeTree +ORDER BY k +SETTINGS refresh_statistics_interval = 0; + +INSERT INTO st_cm_lc +SELECT number, + if(number % 4 = 0, 'PROMO', concat('X', toString(number % 1000))) +FROM numbers(60000); + +ALTER TABLE st_cm_lc ADD STATISTICS cat TYPE CountMin; +ALTER TABLE st_cm_lc MATERIALIZE STATISTICS ALL; + +SELECT count() FROM st_cm_lc WHERE cat = 'PROMO' +SETTINGS use_statistics_cache = 0, log_comment = 'cm-lc-load' FORMAT Null; + +SYSTEM FLUSH LOGS query_log; + +SELECT toUInt8(ProfileEvents['LoadedStatisticsMicroseconds'] > 0) +FROM system.query_log +WHERE type = 'QueryFinish' AND current_database = currentDatabase() AND log_comment = 'cm-lc-load' +ORDER BY event_time_microseconds DESC +LIMIT 1; + +------------------------------------------------------------ +-- JOIN with Uniq +------------------------------------------------------------ +DROP TABLE IF EXISTS sj_a SYNC; +DROP TABLE IF EXISTS sj_b SYNC; + +CREATE TABLE sj_a (id UInt32, p UInt8) +ENGINE = MergeTree +ORDER BY id +SETTINGS refresh_statistics_interval = 0; + +CREATE TABLE sj_b (id UInt32, t LowCardinality(String)) +ENGINE = MergeTree +ORDER BY id +SETTINGS refresh_statistics_interval = 0; + +INSERT INTO sj_a SELECT number, number % 2 FROM numbers(60000); +INSERT INTO sj_b SELECT number, if(number % 5 = 0, 'PROMO', 'OTHER') FROM numbers(60000); + +ALTER TABLE sj_a ADD STATISTICS id TYPE Uniq; +ALTER TABLE sj_b ADD STATISTICS id TYPE Uniq; + +ALTER TABLE sj_a MATERIALIZE STATISTICS ALL; +ALTER TABLE sj_b MATERIALIZE STATISTICS ALL; + +SELECT count() +FROM sj_a a +JOIN sj_b b ON a.id = b.id +WHERE b.t = 'PROMO' +SETTINGS use_statistics_cache = 0, query_plan_optimize_join_order_limit = 10, log_comment = 'join-load' +FORMAT Null; + +SYSTEM FLUSH LOGS query_log; + +SELECT toUInt8(ProfileEvents['LoadedStatisticsMicroseconds'] > 0) +FROM system.query_log +WHERE type = 'QueryFinish' AND current_database = currentDatabase() AND log_comment = 'join-load' +ORDER BY event_time_microseconds DESC +LIMIT 1; diff --git a/parser/testdata/03708_analyzer_convert_any_outer_to_inner_2/ast.json b/parser/testdata/03708_analyzer_convert_any_outer_to_inner_2/ast.json new file mode 100644 index 000000000..ae80be503 --- /dev/null +++ b/parser/testdata/03708_analyzer_convert_any_outer_to_inner_2/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001173919, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03708_analyzer_convert_any_outer_to_inner_2/metadata.json b/parser/testdata/03708_analyzer_convert_any_outer_to_inner_2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03708_analyzer_convert_any_outer_to_inner_2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03708_analyzer_convert_any_outer_to_inner_2/query.sql b/parser/testdata/03708_analyzer_convert_any_outer_to_inner_2/query.sql new file mode 100644 index 000000000..b1ca46d11 --- /dev/null +++ b/parser/testdata/03708_analyzer_convert_any_outer_to_inner_2/query.sql @@ -0,0 +1,66 @@ +SET enable_analyzer = 1; +SET enable_parallel_replicas = 0; +SET query_plan_join_swap_table = 0, query_plan_optimize_join_order_limit = 1; -- Changes query plan + +CREATE TABLE users (uid Int16, name String, age Int16) ENGINE=Memory; + +INSERT INTO users VALUES (1231, 'John', 33); +INSERT INTO users VALUES (6666, 'Ksenia', 48); +INSERT INTO users VALUES (8888, 'Alice', 50); + +EXPLAIN actions = 1, keep_logical_steps = 1 +SELECT * +FROM users u1 LEFT ANY JOIN +( + SELECT sum(age)::Nullable(Int64) AS age_sum, name + FROM users + GROUP BY name +) u2 +ON u1.name = u2.name +WHERE uid < age_sum; + +SELECT * +FROM users u1 LEFT ANY JOIN +( + SELECT sum(age)::Nullable(Int64) AS age_sum, name + FROM users + GROUP BY name +) u2 +ON u1.name = u2.name +WHERE uid < age_sum; + +-- Do not convert to INNER JOIN +EXPLAIN actions = 1, keep_logical_steps = 1 +SELECT * +FROM users u1 LEFT ANY JOIN +( + SELECT sum(age)::Nullable(Int64) AS age_sum, name + FROM users + GROUP BY name WITH ROLLUP +) u2 +ON u1.name = u2.name +WHERE uid < age_sum; + +-- Do not convert to INNER JOIN +EXPLAIN actions = 1, keep_logical_steps = 1 +SELECT * +FROM users u1 LEFT ANY JOIN +( + SELECT sum(age)::Nullable(Int64) AS age_sum, name + FROM users + GROUP BY name WITH CUBE +) u2 +ON u1.name = u2.name +WHERE uid < age_sum; + +-- Do not convert to INNER JOIN +EXPLAIN actions = 1, keep_logical_steps = 1 +SELECT * +FROM users u1 LEFT ANY JOIN +( + SELECT sum(age)::Nullable(Int64) AS age_sum, name + FROM users + GROUP BY GROUPING SETS ((name), ()) +) u2 +ON u1.name = u2.name +WHERE uid < age_sum; diff --git a/parser/testdata/03708_exact_rows_before_limit_in/ast.json b/parser/testdata/03708_exact_rows_before_limit_in/ast.json new file mode 100644 index 000000000..140ccf85b --- /dev/null +++ b/parser/testdata/03708_exact_rows_before_limit_in/ast.json @@ -0,0 +1,112 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 5)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (alias v0) (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1 (alias c0)" + }, + { + "explain": " Function exists (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier v0.c0" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier v0.c0" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Set" + } + ], + + "rows": 30, + + "statistics": + { + "elapsed": 0.001717805, + "rows_read": 30, + "bytes_read": 1206 + } +} diff --git a/parser/testdata/03708_exact_rows_before_limit_in/metadata.json b/parser/testdata/03708_exact_rows_before_limit_in/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03708_exact_rows_before_limit_in/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03708_exact_rows_before_limit_in/query.sql b/parser/testdata/03708_exact_rows_before_limit_in/query.sql new file mode 100644 index 000000000..e75c1cd6f --- /dev/null +++ b/parser/testdata/03708_exact_rows_before_limit_in/query.sql @@ -0,0 +1,13 @@ +SELECT 1 FROM (SELECT 1 AS c0 WHERE EXISTS (SELECT 1) LIMIT 1) v0 GROUP BY v0.c0 HAVING v0.c0 = 1 SETTINGS exact_rows_before_limit = 1; +SELECT '---------'; +SELECT 1 FROM (SELECT 1 AS c0 WHERE EXISTS (SELECT 1) LIMIT 1) v0 GROUP BY v0.c0 HAVING v0.c0 = 2 SETTINGS exact_rows_before_limit = 1; +SELECT '---------'; +SELECT 1 FROM (SELECT 1 AS c0 WHERE EXISTS (SELECT 1) LIMIT 1) v0 GROUP BY v0.c0 HAVING v0.c0 = 1 AND v0.c0 = 1 SETTINGS exact_rows_before_limit = 1; +SELECT '---------'; +SELECT 1 FROM (SELECT 1 AS c0 WHERE EXISTS (SELECT 1) LIMIT 1) v0 GROUP BY v0.c0 HAVING v0.c0 = 1 AND v0.c0 = 1 SETTINGS exact_rows_before_limit = 1; +SELECT '---------'; +SELECT 1 FROM (SELECT 1 AS c0 WHERE EXISTS (SELECT 1) LIMIT 1) v0 GROUP BY v0.c0 HAVING v0.c0 = 1 AND v0.c0 = 2 SETTINGS exact_rows_before_limit = 1; +SELECT '---------'; +SELECT 1 FROM (SELECT 1 AS c0 WHERE EXISTS (SELECT 1) LIMIT 1) v0 GROUP BY v0.c0 HAVING v0.c0 = 1 OR v0.c0 = 1 SETTINGS exact_rows_before_limit = 1; +SELECT '---------'; +SELECT 1 FROM (SELECT 1 AS c0 WHERE EXISTS (SELECT 1) LIMIT 1) v0 GROUP BY v0.c0 HAVING v0.c0 = 1 OR v0.c0 = 2 SETTINGS exact_rows_before_limit = 1; diff --git a/parser/testdata/03708_flush_async_insert_queue_for_table/ast.json b/parser/testdata/03708_flush_async_insert_queue_for_table/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03708_flush_async_insert_queue_for_table/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03708_flush_async_insert_queue_for_table/metadata.json b/parser/testdata/03708_flush_async_insert_queue_for_table/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03708_flush_async_insert_queue_for_table/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03708_flush_async_insert_queue_for_table/query.sql b/parser/testdata/03708_flush_async_insert_queue_for_table/query.sql new file mode 100644 index 000000000..3b1d3ab36 --- /dev/null +++ b/parser/testdata/03708_flush_async_insert_queue_for_table/query.sql @@ -0,0 +1,47 @@ +-- Tags: no-parallel + +drop table if exists `test_table with spaces`; + +create table if not exists `test_table with spaces` +( + `id` UInt64, + `value` String +) +ORDER by id; + +set async_insert = 1; +set wait_for_async_insert = 0; + +insert into `test_table with spaces` values (1, 'a'), (2, 'b'), (3, 'c'); +insert into `test_table with spaces` values (2, 'b'), (3, 'c'), (4, 'd'); + +system flush async insert queue `test_table with spaces`; +select '`test_table with spaces`', count() from `test_table with spaces`; + +insert into `test_table with spaces` values (3, 'b'), (4, 'c'), (5, 'd'); + +system flush async insert queue `test_table with spaces`; +select '`test_table with spaces`', count() from `test_table with spaces`; + +drop table `test_table with spaces`; + + +drop database if exists `this.is.a.valid.databasename`; +create database `this.is.a.valid.databasename`; + +drop table if exists `this.is.a.valid.databasename`.`test_table with spaces`; +create table `this.is.a.valid.databasename`.`test_table with spaces` +( + `id` UInt64, + `value` String +) +ORDER by id; + +insert into `this.is.a.valid.databasename`.`test_table with spaces` values (1, 'a'), (2, 'b'), (3, 'c'); +insert into `this.is.a.valid.databasename`.`test_table with spaces` values (2, 'b'), (3, 'c'), (4, 'd'); + +system flush async insert queue `this.is.a.valid.databasename`.`test_table with spaces`; +select '`this.is.a.valid.databasename`.`test_table with spaces`', count() from `this.is.a.valid.databasename`.`test_table with spaces`; + +drop table `this.is.a.valid.databasename`.`test_table with spaces`; +drop database `this.is.a.valid.databasename`; diff --git a/parser/testdata/03708_join_or_to_right_any_bug/ast.json b/parser/testdata/03708_join_or_to_right_any_bug/ast.json new file mode 100644 index 000000000..fae5567f4 --- /dev/null +++ b/parser/testdata/03708_join_or_to_right_any_bug/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001198837, + "rows_read": 5, + "bytes_read": 169 + } +} diff --git a/parser/testdata/03708_join_or_to_right_any_bug/metadata.json b/parser/testdata/03708_join_or_to_right_any_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03708_join_or_to_right_any_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03708_join_or_to_right_any_bug/query.sql b/parser/testdata/03708_join_or_to_right_any_bug/query.sql new file mode 100644 index 000000000..6c0a07ea5 --- /dev/null +++ b/parser/testdata/03708_join_or_to_right_any_bug/query.sql @@ -0,0 +1,4 @@ +SELECT * +FROM ( SELECT number AS a, number + 1 AS b FROM numbers(1) ) AS l +INNER JOIN ( SELECT number AS a FROM numbers(2) ) AS r ON (l.a = r.a) OR (l.b = r.a) +ORDER BY ALL; diff --git a/parser/testdata/03708_low_cardinality_aggregate_state_compatibility/ast.json b/parser/testdata/03708_low_cardinality_aggregate_state_compatibility/ast.json new file mode 100644 index 000000000..5c472033d --- /dev/null +++ b/parser/testdata/03708_low_cardinality_aggregate_state_compatibility/ast.json @@ -0,0 +1,64 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function hex (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function maxDistinctState (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'str'" + }, + { + "explain": " Literal 'Variant(LowCardinality(String))'" + } + ], + + "rows": 14, + + "statistics": + { + "elapsed": 0.001498475, + "rows_read": 14, + "bytes_read": 591 + } +} diff --git a/parser/testdata/03708_low_cardinality_aggregate_state_compatibility/metadata.json b/parser/testdata/03708_low_cardinality_aggregate_state_compatibility/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03708_low_cardinality_aggregate_state_compatibility/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03708_low_cardinality_aggregate_state_compatibility/query.sql b/parser/testdata/03708_low_cardinality_aggregate_state_compatibility/query.sql new file mode 100644 index 000000000..329482e73 --- /dev/null +++ b/parser/testdata/03708_low_cardinality_aggregate_state_compatibility/query.sql @@ -0,0 +1,5 @@ +select hex(maxDistinctState(tuple('str'::Variant(LowCardinality(String))))); +select hex(maxDistinctState(tuple(''::Variant(LowCardinality(String))))); +select maxDistinctMerge(state) from (select maxDistinctState(tuple('str'::Variant(LowCardinality(String)))) as state); +select maxDistinctMerge(state) from (select maxDistinctState(tuple(''::Variant(LowCardinality(String)))) as state); + diff --git a/parser/testdata/03708_statistics_estimator_cast_type/ast.json b/parser/testdata/03708_statistics_estimator_cast_type/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03708_statistics_estimator_cast_type/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03708_statistics_estimator_cast_type/metadata.json b/parser/testdata/03708_statistics_estimator_cast_type/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03708_statistics_estimator_cast_type/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03708_statistics_estimator_cast_type/query.sql b/parser/testdata/03708_statistics_estimator_cast_type/query.sql new file mode 100644 index 000000000..713521e8f --- /dev/null +++ b/parser/testdata/03708_statistics_estimator_cast_type/query.sql @@ -0,0 +1,23 @@ +-- Tags: no-fasttest + +CREATE TABLE dt64test +( + `dt64_column` DateTime64(3), + `dt_column` DateTime DEFAULT toDateTime(dt64_column) +) +ENGINE = MergeTree +PARTITION BY toYYYYMM(dt64_column) +ORDER BY dt64_column SETTINGS auto_statistics_types='tdigest'; + +SET allow_statistics_optimize = 1; + +INSERT INTO dt64test (`dt64_column`) VALUES ('2020-01-13 13:37:00'); + +SELECT 'dt < const dt64' FROM dt64test WHERE dt_column < toDateTime64('2020-01-13 13:37:00', 3); + +CREATE TABLE t1 (c0 Decimal(18,0)) ENGINE = MergeTree() ORDER BY (c0) SETTINGS auto_statistics_types='countmin'; +INSERT INTO TABLE t1(c0) VALUES (1); + +SELECT c0 = 6812671276462221925::Int64 FROM t1; +SELECT 1 FROM t1 WHERE c0 = 6812671276462221925::Int64; + diff --git a/parser/testdata/03709_anti_join_runtime_filters/ast.json b/parser/testdata/03709_anti_join_runtime_filters/ast.json new file mode 100644 index 000000000..d3ace2fa5 --- /dev/null +++ b/parser/testdata/03709_anti_join_runtime_filters/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001160172, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03709_anti_join_runtime_filters/metadata.json b/parser/testdata/03709_anti_join_runtime_filters/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03709_anti_join_runtime_filters/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03709_anti_join_runtime_filters/query.sql b/parser/testdata/03709_anti_join_runtime_filters/query.sql new file mode 100644 index 000000000..66b2ce3d4 --- /dev/null +++ b/parser/testdata/03709_anti_join_runtime_filters/query.sql @@ -0,0 +1,41 @@ +SET enable_analyzer = 1; +SET enable_join_runtime_filters = 1; + +CREATE TABLE nation(n_nationkey Int32, n_name String) ENGINE MergeTree ORDER BY n_nationkey; +CREATE TABLE customer(c_custkey Int32, c_nationkey Int32) ENGINE MergeTree ORDER BY c_custkey; + +INSERT INTO nation VALUES (5,'ETHIOPIA'),(6,'FRANCE'),(7,'GERMANY'); +INSERT INTO customer SELECT number, 5 - (number % 2) FROM numbers(500); + +SET enable_parallel_replicas=0; +SET query_plan_join_swap_table=0; + +-- LEFT ANTI JOIN +SELECT REGEXP_REPLACE(trimLeft(explain), '_runtime_filter_\\d+', '_runtime_filter_UNIQ_ID') +FROM ( + EXPLAIN actions=1 + SELECT count() + FROM customer LEFT ANTI JOIN nation + ON c_nationkey = n_nationkey +) +WHERE (explain ILIKE '%Filter column%') OR (explain LIKE '%BuildRuntimeFilter%') OR (explain LIKE '% Type:%') OR (explain LIKE '% Strictness:%'); + +SELECT count() +FROM customer LEFT ANTI JOIN nation +ON c_nationkey = n_nationkey +SETTINGS enable_join_runtime_filters = 0; + +SELECT count() +FROM customer LEFT ANTI JOIN nation +ON c_nationkey = n_nationkey +SETTINGS enable_join_runtime_filters = 1; + +-- RIGHT ANTI JOIN +SELECT REGEXP_REPLACE(trimLeft(explain), '_runtime_filter_\\d+', '_runtime_filter_UNIQ_ID') +FROM ( + EXPLAIN actions=1 + SELECT count() + FROM customer RIGHT ANTI JOIN nation + ON c_nationkey = n_nationkey +) +WHERE (explain ILIKE '%Filter column%') OR (explain LIKE '%BuildRuntimeFilter%') OR (explain LIKE '% Type:%') OR (explain LIKE '% Strictness:%'); diff --git a/parser/testdata/03709_coalescing_final/ast.json b/parser/testdata/03709_coalescing_final/ast.json new file mode 100644 index 000000000..97a5a83d6 --- /dev/null +++ b/parser/testdata/03709_coalescing_final/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_table (children 1)" + }, + { + "explain": " Identifier test_table" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001294374, + "rows_read": 2, + "bytes_read": 72 + } +} diff --git a/parser/testdata/03709_coalescing_final/metadata.json b/parser/testdata/03709_coalescing_final/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03709_coalescing_final/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03709_coalescing_final/query.sql b/parser/testdata/03709_coalescing_final/query.sql new file mode 100644 index 000000000..1cd2f1cf5 --- /dev/null +++ b/parser/testdata/03709_coalescing_final/query.sql @@ -0,0 +1,21 @@ +DROP TABLE IF EXISTS test_table; + +CREATE TABLE test_table +( + key UInt32, + A Nullable(UInt32), + B Nullable(String), + C Tuple(Nullable(String)) +) +ENGINE = CoalescingMergeTree() +ORDER BY key; + +SET optimize_on_insert = 0; + +INSERT INTO test_table SELECT 1, Null, '', tuple(toNullable('xxx')) x; + +OPTIMIZE TABLE test_table FINAL; + +SELECT * FROM test_table FINAL; + +DROP TABLE test_table; diff --git a/parser/testdata/03709_parallel_replicas_right_join_with_distributed/ast.json b/parser/testdata/03709_parallel_replicas_right_join_with_distributed/ast.json new file mode 100644 index 000000000..62a5cec82 --- /dev/null +++ b/parser/testdata/03709_parallel_replicas_right_join_with_distributed/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery local_table_l (children 1)" + }, + { + "explain": " Identifier local_table_l" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001431057, + "rows_read": 2, + "bytes_read": 78 + } +} diff --git a/parser/testdata/03709_parallel_replicas_right_join_with_distributed/metadata.json b/parser/testdata/03709_parallel_replicas_right_join_with_distributed/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03709_parallel_replicas_right_join_with_distributed/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03709_parallel_replicas_right_join_with_distributed/query.sql b/parser/testdata/03709_parallel_replicas_right_join_with_distributed/query.sql new file mode 100644 index 000000000..a334aa3c0 --- /dev/null +++ b/parser/testdata/03709_parallel_replicas_right_join_with_distributed/query.sql @@ -0,0 +1,38 @@ +DROP TABLE IF EXISTS local_table_l; +DROP TABLE IF EXISTS local_table_r; +DROP TABLE IF EXISTS dis_table_r; + +CREATE TABLE local_table_l +( + `c` Int32 +) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS index_granularity = 1; + +CREATE TABLE local_table_r +( + `c` Int32 +) +ENGINE = MergeTree +ORDER BY tuple() +SETTINGS index_granularity = 1; + +CREATE TABLE dis_table_r +( + `c` Int32 +) +ENGINE = Distributed(test_cluster_one_shard_three_replicas_localhost, currentDatabase(), 'local_table_r'); + +SET parallel_replicas_only_with_analyzer=0; +SET serialize_query_plan=0, parallel_replicas_mark_segment_size=1, max_threads=1; +SET enable_parallel_replicas=1, parallel_replicas_local_plan=1, max_parallel_replicas=3, parallel_replicas_for_non_replicated_merge_tree=1, cluster_for_parallel_replicas='test_cluster_one_shard_three_replicas_localhost'; + +INSERT INTO local_table_l SELECT number AS c FROM numbers(10000); +INSERT INTO local_table_r SELECT number AS c FROM numbers(10000); + +SELECT count() FROM local_table_l AS l RIGHT JOIN dis_table_r AS r ON l.c = r.c; + +DROP TABLE local_table_l; +DROP TABLE local_table_r; +DROP TABLE dis_table_r; diff --git a/parser/testdata/03709_replicated_columns_right_join/ast.json b/parser/testdata/03709_replicated_columns_right_join/ast.json new file mode 100644 index 000000000..8409f946b --- /dev/null +++ b/parser/testdata/03709_replicated_columns_right_join/ast.json @@ -0,0 +1,163 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 4)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 2)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (alias left) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " TablesInSelectQueryElement (children 2)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (alias right) (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 4)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Identifier number (alias x)" + }, + { + "explain": " Function concat (alias str) (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'str'" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function arrayJoin (alias i) (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function range (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_10" + }, + { + "explain": " TableJoin (children 1)" + }, + { + "explain": " Function equals (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier left.number" + }, + { + "explain": " Identifier right.number" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier right.number" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Identifier i" + }, + { + "explain": " Set" + } + ], + + "rows": 47, + + "statistics": + { + "elapsed": 0.001795483, + "rows_read": 47, + "bytes_read": 1972 + } +} diff --git a/parser/testdata/03709_replicated_columns_right_join/metadata.json b/parser/testdata/03709_replicated_columns_right_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03709_replicated_columns_right_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03709_replicated_columns_right_join/query.sql b/parser/testdata/03709_replicated_columns_right_join/query.sql new file mode 100644 index 000000000..6888bff05 --- /dev/null +++ b/parser/testdata/03709_replicated_columns_right_join/query.sql @@ -0,0 +1 @@ +select * from numbers(10, 10) as left right join (select number, number as x, 'str' || number as str, arrayJoin(range(number)) as i from numbers(10)) as right on left.number = right.number order by right.number, i settings enable_lazy_columns_replication=1; diff --git a/parser/testdata/03710_analyzer_limit_by_aggregate_validation/ast.json b/parser/testdata/03710_analyzer_limit_by_aggregate_validation/ast.json new file mode 100644 index 000000000..4d4395478 --- /dev/null +++ b/parser/testdata/03710_analyzer_limit_by_aggregate_validation/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001383851, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03710_analyzer_limit_by_aggregate_validation/metadata.json b/parser/testdata/03710_analyzer_limit_by_aggregate_validation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03710_analyzer_limit_by_aggregate_validation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03710_analyzer_limit_by_aggregate_validation/query.sql b/parser/testdata/03710_analyzer_limit_by_aggregate_validation/query.sql new file mode 100644 index 000000000..09166bf1f --- /dev/null +++ b/parser/testdata/03710_analyzer_limit_by_aggregate_validation/query.sql @@ -0,0 +1,17 @@ +SET allow_experimental_analyzer = 1; + +DROP TABLE IF EXISTS test_limit_by_validation; +CREATE TABLE test_limit_by_validation (c0 Int32, c1 Int32, c2 Int32) ENGINE = Memory; +INSERT INTO test_limit_by_validation VALUES (1, 10, 100), (1, 20, 200), (2, 30, 300); + +SELECT c0 FROM test_limit_by_validation GROUP BY c0 LIMIT 1 BY c1; -- { serverError NOT_AN_AGGREGATE } + +SELECT c0, sum(c2) as s FROM test_limit_by_validation GROUP BY c0 LIMIT 1 BY c1; -- { serverError NOT_AN_AGGREGATE } + +SELECT c0, c1 FROM test_limit_by_validation GROUP BY c0, c1 ORDER BY c0, c1 LIMIT 1 BY c1; + +SELECT c0, sum(c1) as s FROM test_limit_by_validation GROUP BY c0 ORDER BY c0 LIMIT 1 BY c0; + +SELECT c0 + 1 as expr FROM test_limit_by_validation GROUP BY c0 + 1 ORDER BY expr LIMIT 1 BY expr; + +DROP TABLE test_limit_by_validation; diff --git a/parser/testdata/03710_argAndMinMax/ast.json b/parser/testdata/03710_argAndMinMax/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03710_argAndMinMax/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03710_argAndMinMax/metadata.json b/parser/testdata/03710_argAndMinMax/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03710_argAndMinMax/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03710_argAndMinMax/query.sql b/parser/testdata/03710_argAndMinMax/query.sql new file mode 100644 index 000000000..7e4e830e7 --- /dev/null +++ b/parser/testdata/03710_argAndMinMax/query.sql @@ -0,0 +1,16 @@ +-- types +select argAndMin(x.1, x.2), argAndMax(x.1, x.2) from (select (number, number + 1) as x from numbers(10)); +select argAndMin(x.1, x.2), argAndMax(x.1, x.2) from (select (toString(number), toInt32(number) + 1) as x from numbers(10)); +select argAndMin(x.1, x.2), argAndMax(x.1, x.2) from (select (toDate(number, 'UTC'), toDateTime(number, 'UTC') + 1) as x from numbers(10)); +select argAndMin(x.1, x.2), argAndMax(x.1, x.2) from (select (toDecimal32(number, 2), toDecimal64(number, 2) + 1) as x from numbers(10)); + +-- array +SELECT + argAndMinArray(id, num), + argAndMaxArray(id, num) +FROM +( + SELECT + arrayJoin([[10, 4, 3], [7, 5, 6], [8, 8, 2]]) AS num, + arrayJoin([[1, 2, 4]]) AS id +); diff --git a/parser/testdata/03710_array_join_in_map_bug/ast.json b/parser/testdata/03710_array_join_in_map_bug/ast.json new file mode 100644 index 000000000..6059f1558 --- /dev/null +++ b/parser/testdata/03710_array_join_in_map_bug/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000998824, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03710_array_join_in_map_bug/metadata.json b/parser/testdata/03710_array_join_in_map_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03710_array_join_in_map_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03710_array_join_in_map_bug/query.sql b/parser/testdata/03710_array_join_in_map_bug/query.sql new file mode 100644 index 000000000..3d9ebd81f --- /dev/null +++ b/parser/testdata/03710_array_join_in_map_bug/query.sql @@ -0,0 +1,33 @@ +SET enable_analyzer=1; + +SELECT DISTINCT + firstNonDefault(__table3.dummy, __table4.dummy) AS dummy, + __table4.`isNotNull(map(assumeNotNull(isNull(3)), NULL))` AS `isNotNull(map(assumeNotNull(isNull(3)), NULL))`, + __table4.`isNotDistinctFrom(3, isZeroOrNull(isNotNull(3)))` AS `isNotDistinctFrom(3, isZeroOrNull(isNotNull(3)))` +FROM system.one AS __table3 +ALL FULL OUTER JOIN +( + SELECT DISTINCT + __table5.dummy AS dummy, + __table5.`isNotNull(map(assumeNotNull(isNull(3)), NULL))` AS `isNotNull(map(assumeNotNull(isNull(3)), NULL))`, + _CAST(0, 'UInt8') AS `isNotDistinctFrom(3, isZeroOrNull(isNotNull(3)))` + FROM + ( + SELECT + __table6.dummy AS dummy, + _CAST(1, 'UInt8') AS `isNotNull(map(assumeNotNull(isNull(3)), NULL))` + FROM remote('127.0.0.3') AS __table6 + ) AS __table5 +) AS __table4 USING (dummy) +LEFT ARRAY JOIN [equals(materialize(3), _CAST(1, 'UInt8'))] AS __array_join_exp_2 +LEFT ARRAY JOIN map(_CAST(0, 'UInt8'), materialize(2), _CAST(0, 'UInt8'), _CAST('1', 'UInt128')) AS __array_join_exp_1 +GROUP BY + materialize(1), + __table4.`isNotNull(map(assumeNotNull(isNull(3)), NULL))`, + firstNonDefault(__table3.dummy, __table4.dummy), + __table4.`isNotDistinctFrom(3, isZeroOrNull(isNotNull(3)))`, + __table4.`isNotNull(map(assumeNotNull(isNull(3)), NULL))` + WITH CUBE +SETTINGS enable_lazy_columns_replication = 1; + + diff --git a/parser/testdata/03710_empty_tuple_lhs_in_function/ast.json b/parser/testdata/03710_empty_tuple_lhs_in_function/ast.json new file mode 100644 index 000000000..976d1ecd5 --- /dev/null +++ b/parser/testdata/03710_empty_tuple_lhs_in_function/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function in (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Literal 'Tuple()'" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001509318, + "rows_read": 13, + "bytes_read": 484 + } +} diff --git a/parser/testdata/03710_empty_tuple_lhs_in_function/metadata.json b/parser/testdata/03710_empty_tuple_lhs_in_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03710_empty_tuple_lhs_in_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03710_empty_tuple_lhs_in_function/query.sql b/parser/testdata/03710_empty_tuple_lhs_in_function/query.sql new file mode 100644 index 000000000..ae33bc977 --- /dev/null +++ b/parser/testdata/03710_empty_tuple_lhs_in_function/query.sql @@ -0,0 +1,90 @@ +SELECT CAST(tuple(), 'Tuple()') IN (tuple()); + +SELECT CAST(tuple(), 'Tuple()') IN [tuple()]; + +SELECT CAST(tuple(), 'Tuple()') IN [tuple(), tuple()]; + +SELECT CAST(tuple(), 'Tuple()') NOT IN (tuple()); + +SELECT CAST(tuple(), 'Tuple()') IN (tuple(1));-- { serverError TYPE_MISMATCH } + +SELECT CAST(tuple(), 'Tuple()') IN [()]; + +SELECT CAST(tuple(), 'Tuple()') IN (()); + +SELECT tuple() IN (tuple()); + +SELECT [tuple()] IN [[tuple()], [tuple()]]; + +SELECT [tuple()] IN [()]; + +SELECT tuple() IN (((tuple()))); + +SELECT tuple() IN [(((tuple())))]; + +DROP TABLE IF EXISTS test_empty_tuple; +CREATE TABLE test_empty_tuple (t Tuple()) ENGINE = Memory; +INSERT INTO test_empty_tuple VALUES (tuple()), (tuple()), (tuple()); + +SELECT t FROM test_empty_tuple WHERE t IN (tuple()); + +SELECT t FROM test_empty_tuple WHERE t IN [tuple()]; + +SELECT t FROM test_empty_tuple WHERE t IN [()]; + +SELECT t FROM test_empty_tuple WHERE t IN [tuple(), tuple()]; + +SELECT t FROM test_empty_tuple WHERE t IN tuple(); + +SELECT t FROM test_empty_tuple WHERE [t] IN [tuple()]; + +SELECT count() FROM test_empty_tuple WHERE t IN [tuple()]; + +SELECT arrayJoin([tuple(), tuple()]) IN (tuple()); + + +SET enable_analyzer = 0; + +SELECT CAST(tuple(), 'Tuple()') IN (tuple()); + +SELECT CAST(tuple(), 'Tuple()') IN [tuple()]; + +SELECT CAST(tuple(), 'Tuple()') IN [tuple(), tuple()]; + +SELECT CAST(tuple(), 'Tuple()') NOT IN (tuple()); + +SELECT CAST(tuple(), 'Tuple()') IN (tuple(1));-- { serverError TYPE_MISMATCH } + +SELECT CAST(tuple(), 'Tuple()') IN [()]; + +SELECT CAST(tuple(), 'Tuple()') IN (()); + +SELECT tuple() IN (tuple()); + +SELECT [tuple()] IN [[tuple()], [tuple()]]; + +SELECT [tuple()] IN [()]; + +SELECT tuple() IN (((tuple()))); + +SELECT tuple() IN [(((tuple())))]; + +DROP TABLE IF EXISTS test_empty_tuple; +CREATE TABLE test_empty_tuple (t Tuple()) ENGINE = Memory; +INSERT INTO test_empty_tuple VALUES (tuple()), (tuple()), (tuple()); + +SELECT t FROM test_empty_tuple WHERE t IN (tuple()); + +SELECT t FROM test_empty_tuple WHERE t IN [tuple()]; + +SELECT t FROM test_empty_tuple WHERE t IN [()]; + +SELECT t FROM test_empty_tuple WHERE t IN [tuple(), tuple()]; + +SELECT t FROM test_empty_tuple WHERE t IN tuple(); + +SELECT t FROM test_empty_tuple WHERE [t] IN [tuple()]; + +SELECT count() FROM test_empty_tuple WHERE t IN [tuple()]; + +SELECT arrayJoin([tuple(), tuple()]) IN (tuple()); diff --git a/parser/testdata/03710_midpoint/ast.json b/parser/testdata/03710_midpoint/ast.json new file mode 100644 index 000000000..de6ca91a9 --- /dev/null +++ b/parser/testdata/03710_midpoint/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery midpoint_test (children 1)" + }, + { + "explain": " Identifier midpoint_test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001105742, + "rows_read": 2, + "bytes_read": 78 + } +} diff --git a/parser/testdata/03710_midpoint/metadata.json b/parser/testdata/03710_midpoint/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03710_midpoint/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03710_midpoint/query.sql b/parser/testdata/03710_midpoint/query.sql new file mode 100644 index 000000000..906f9d419 --- /dev/null +++ b/parser/testdata/03710_midpoint/query.sql @@ -0,0 +1,75 @@ +DROP TABLE IF EXISTS midpoint_test; +CREATE TABLE midpoint_test +( + ui8 UInt8, + ui16 UInt16, + ui32 UInt32, + i8 Int8, + i16 Int16, + i32 Int32, + f32 Float32, + f64 Float64, + d32 Decimal32(3), + d64 Decimal64(3), +) +ENGINE = Memory; + +INSERT INTO midpoint_test VALUES + (1, 10, 100, -1, -10, -100, 1.5, 10.5, 1.234, 10.987), + (100, 200, 300, 50, 150, 250, 10.0, 20.0, 100.123, 200.456); + +-- =============================================================== +-- Integer types (signed, unsigned, mixed) +-- =============================================================== + +SELECT midpoint(ui8, ui16) AS result, toTypeName(result) AS type FROM midpoint_test; +SELECT midpoint(i8, i16) AS result, toTypeName(result) AS type FROM midpoint_test; +SELECT midpoint(ui32, i32) AS result, toTypeName(result) AS type FROM midpoint_test; + +-- With 3 args +SELECT midpoint(i8, i16, i32) AS result, toTypeName(result) AS type FROM midpoint_test; + +-- =============================================================== +-- Floating-point types +-- =============================================================== + +SELECT midpoint(f32, f64) AS result, toTypeName(result) AS type FROM midpoint_test; + +-- 3 args (float) +SELECT midpoint(f32, f64, 42.0) AS result, toTypeName(result) AS type FROM midpoint_test; + +-- =============================================================== +-- Mixed integer + float +-- =============================================================== + +SELECT midpoint(i32, f32) AS result, toTypeName(result) AS type FROM midpoint_test; +SELECT midpoint(f64, ui16, 122) AS result, toTypeName(result) AS type FROM midpoint_test; + +-- =============================================================== +-- Decimal types +-- =============================================================== + +SELECT midpoint(d32, d64) AS result, toTypeName(result) AS type FROM midpoint_test; + +-- Mixed decimal + integer +SELECT midpoint(d32, i32) AS result, toTypeName(result) AS type FROM midpoint_test; + +-- 3 args mixed decimal + int +SELECT midpoint(d64, 20, i32) AS result, toTypeName(result) AS type FROM midpoint_test; + +-- =============================================================== +-- Temporal types types +-- =============================================================== + +SELECT midpoint(toDate('2025-01-01'), toDate('2025-01-05')) AS result, toTypeName(result) AS type; +SELECT midpoint(toDateTime('2025-01-01 00:00:00'), toDateTime('2025-01-03 12:00:00')) AS result, toTypeName(result) AS type; +SELECT midpoint(toTime64('12:00:00', 0), toTime64('14:00:00', 0)) AS result, toTypeName(result) AS type; + +-- =============================================================== +-- Nulls +-- =============================================================== + +SELECT midpoint(123, null) AS result, toTypeName(result) AS type; +SELECT midpoint(3, 1.5, null) AS result, toTypeName(result) AS type; +SELECT midpoint(null, null) AS result, toTypeName(result) AS type; +SELECT midpoint(null, null, null) AS result, toTypeName(result) AS type; diff --git a/parser/testdata/03710_pr_insert_into_mv_with_join/ast.json b/parser/testdata/03710_pr_insert_into_mv_with_join/ast.json new file mode 100644 index 000000000..3133108f3 --- /dev/null +++ b/parser/testdata/03710_pr_insert_into_mv_with_join/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery mv (children 1)" + }, + { + "explain": " Identifier mv" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001078248, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03710_pr_insert_into_mv_with_join/metadata.json b/parser/testdata/03710_pr_insert_into_mv_with_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03710_pr_insert_into_mv_with_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03710_pr_insert_into_mv_with_join/query.sql b/parser/testdata/03710_pr_insert_into_mv_with_join/query.sql new file mode 100644 index 000000000..596dac67b --- /dev/null +++ b/parser/testdata/03710_pr_insert_into_mv_with_join/query.sql @@ -0,0 +1,28 @@ +DROP TABLE IF EXISTS mv; +DROP TABLE IF EXISTS n1_n2_join; +DROP TABLE IF EXISTS n1; +DROP TABLE IF EXISTS n2; + +CREATE TABLE n1 (key UInt64, value String) ENGINE = MergeTree ORDER BY key SETTINGS index_granularity=1; + +CREATE TABLE n2 (key UInt64, value Int64) ENGINE = MergeTree ORDER BY key SETTINGS index_granularity=1; + +CREATE TABLE n1_n2_join (k UInt64, v1 String, v2 Int64) ENGINE = MergeTree ORDER BY k; + +CREATE MATERIALIZED VIEW mv TO n1_n2_join +AS SELECT n1.key as k, n1.value as v1, n2.value as v2 from n1 JOIN n2 ON n1.key = n2.key ORDER BY n1.key; + +INSERT INTO n2 SELECT number, -number FROM numbers(10); + +SET enable_parallel_replicas=1, max_parallel_replicas=3, cluster_for_parallel_replicas='test_cluster_one_shard_three_replicas_localhost', parallel_replicas_for_non_replicated_merge_tree=1; + +-- inserting into n1 (left table) triggers JOIN in the materialized view +INSERT INTO n1 values(0, '11'); +INSERT INTO n1 SELECT number, toString(number) FROM numbers(10); + +SELECT * FROM n1_n2_join ORDER BY ALL; + +DROP TABLE IF EXISTS mv; +DROP TABLE IF EXISTS n1_n2_join; +DROP TABLE IF EXISTS n1; +DROP TABLE IF EXISTS n2; diff --git a/parser/testdata/03710_pr_join_with_mv/ast.json b/parser/testdata/03710_pr_join_with_mv/ast.json new file mode 100644 index 000000000..45277db0d --- /dev/null +++ b/parser/testdata/03710_pr_join_with_mv/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery mv2 (children 1)" + }, + { + "explain": " Identifier mv2" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001208387, + "rows_read": 2, + "bytes_read": 58 + } +} diff --git a/parser/testdata/03710_pr_join_with_mv/metadata.json b/parser/testdata/03710_pr_join_with_mv/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03710_pr_join_with_mv/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03710_pr_join_with_mv/query.sql b/parser/testdata/03710_pr_join_with_mv/query.sql new file mode 100644 index 000000000..41a8d08f1 --- /dev/null +++ b/parser/testdata/03710_pr_join_with_mv/query.sql @@ -0,0 +1,45 @@ +DROP TABLE IF EXISTS mv2; +DROP TABLE IF EXISTS mv; +DROP TABLE IF EXISTS n1_n2_join; +DROP TABLE IF EXISTS n1; +DROP TABLE IF EXISTS n2; +DROP TABLE IF EXISTS n3; + +CREATE TABLE n1 (key UInt64, value String) ENGINE = MergeTree ORDER BY key SETTINGS index_granularity=1; + +CREATE TABLE n2 (key UInt64, value Int64) ENGINE = MergeTree ORDER BY key SETTINGS index_granularity=1; + +CREATE TABLE n1_n2_join (k UInt64, v1 String, v2 Int64) ENGINE = MergeTree ORDER BY k; + +-- mv with explicit target table +CREATE MATERIALIZED VIEW mv TO n1_n2_join +AS SELECT n1.key as k, n1.value as v1, n2.value as v2 from n1 JOIN n2 ON n1.key = n2.key ORDER BY n1.key; + +INSERT INTO n2 SELECT number, -number FROM numbers(10); +INSERT INTO n1 SELECT number as key, toString(key) FROM numbers(10); + +CREATE TABLE n3 (key UInt64, value String) ENGINE = MergeTree ORDER BY key SETTINGS index_granularity=1; +INSERT INTO n3 SELECT number, toString(number + 100) FROM numbers(10); + +SET enable_parallel_replicas=1, max_parallel_replicas=3, cluster_for_parallel_replicas='test_cluster_one_shard_three_replicas_localhost', parallel_replicas_for_non_replicated_merge_tree=1; +(SELECT * FROM mv JOIN n3 ON mv.k = n3.key ORDER BY mv.k, n3.key) +EXCEPT +(SELECT * FROM mv JOIN n3 ON mv.k = n3.key ORDER BY mv.k, n3.key settings enable_parallel_replicas=0); + +-- materailzed view with inner table +CREATE MATERIALIZED VIEW mv2 +AS SELECT n1.key as k, n1.value as v1, n2.value as v2 from n1 JOIN n2 ON n1.key = n2.key ORDER BY n1.key; + +INSERT INTO n2 SELECT number, -number FROM numbers(10); +INSERT INTO n1 SELECT number as key, toString(key) FROM numbers(10); + +(SELECT * FROM mv2 JOIN n3 ON mv2.k = n3.key ORDER BY mv2.k, n3.key) +EXCEPT +(SELECT * FROM mv2 JOIN n3 ON mv2.k = n3.key ORDER BY mv2.k, n3.key settings enable_parallel_replicas=0); + +DROP TABLE mv2; +DROP TABLE mv; +DROP TABLE n1_n2_join; +DROP TABLE n1; +DROP TABLE n2; +DROP TABLE n3; diff --git a/parser/testdata/03711_deduplication_blocks_part_log/ast.json b/parser/testdata/03711_deduplication_blocks_part_log/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03711_deduplication_blocks_part_log/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03711_deduplication_blocks_part_log/metadata.json b/parser/testdata/03711_deduplication_blocks_part_log/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03711_deduplication_blocks_part_log/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03711_deduplication_blocks_part_log/query.sql b/parser/testdata/03711_deduplication_blocks_part_log/query.sql new file mode 100644 index 000000000..3621396fa --- /dev/null +++ b/parser/testdata/03711_deduplication_blocks_part_log/query.sql @@ -0,0 +1,87 @@ +-- Tags: no-parallel, no-parallel-replicas + +-- no-parallel-replicas -- https://github.com/ClickHouse/ClickHouse/issues/90063 + +DROP DATABASE IF EXISTS 03710_database; +CREATE DATABASE 03710_database; + +DROP TABLE IF EXISTS 03710_database.03711_join_with; +CREATE TABLE 03710_database.03711_join_with +( + id UInt32, + value String +) +ENGINE = MergeTree() +ORDER BY id +SETTINGS min_bytes_for_wide_part = 10000, min_rows_for_wide_part = 10000, serialization_info_version = 'basic', string_serialization_version = 'with_size_stream'; + +SYSTEM STOP MERGES 03710_database.03711_join_with; + +INSERT INTO 03710_database.03711_join_with VALUES (1, 'a1'), (1, 'b1'), (1, 'c1'); +INSERT INTO 03710_database.03711_join_with VALUES (2, 'a2'), (2, 'b2'), (2, 'c2'); + +DROP TABLE IF EXISTS 03710_database.03711_table; +CREATE TABLE 03710_database.03711_table +( + id UInt32 +) +ENGINE = MergeTree() +ORDER BY id +SETTINGS min_bytes_for_wide_part = 10000, min_rows_for_wide_part = 10000, serialization_info_version = 'basic', string_serialization_version = 'with_size_stream'; + +SYSTEM STOP MERGES 03710_database.03711_table; + +DROP TABLE IF EXISTS 03710_database.03711_mv_table_1; +CREATE TABLE 03710_database.03711_mv_table_1 +( + id UInt32, + value String +) +ENGINE = MergeTree() +ORDER BY id +SETTINGS min_bytes_for_wide_part = 10000, min_rows_for_wide_part = 10000, serialization_info_version = 'basic', string_serialization_version = 'with_size_stream'; + +SYSTEM STOP MERGES 03710_database.03711_mv_table_1; + +DROP TABLE IF EXISTS 03710_database.03711_mv_table_2; +CREATE TABLE 03710_database.03711_mv_table_2 +( + id UInt32, + value String +) +ENGINE = MergeTree() +ORDER BY id +SETTINGS min_bytes_for_wide_part = 10000, min_rows_for_wide_part = 10000, serialization_info_version = 'basic', string_serialization_version = 'with_size_stream'; + +SYSTEM STOP MERGES 03710_database.03711_mv_table_2; + +DROP TABLE IF EXISTS 03710_database.03711_mv_1; +CREATE MATERIALIZED VIEW 03710_database.03711_mv_1 +TO 03710_database.03711_mv_table_1 AS +SELECT r.id as id, r.value as value FROM 03710_database.03711_table as l JOIN 03710_database.03711_join_with as r ON l.id == r.id and l.id = 1; + +DROP TABLE IF EXISTS 03710_database.03711_mv_2; +CREATE MATERIALIZED VIEW 03710_database.03711_mv_2 +TO 03710_database.03711_mv_table_2 AS +SELECT r.id as id, r.value as value FROM 03710_database.03711_table as l JOIN 03710_database.03711_join_with as r ON l.id == r.id and l.id = 2; + +SET deduplicate_blocks_in_dependent_materialized_views=1; + +SET async_insert=0; +SET max_block_size=1; +SET max_insert_block_size=1; +SET min_insert_block_size_rows=0; +SET min_insert_block_size_bytes=0; + +INSERT INTO 03710_database.03711_table VALUES (1), (2); + +SYSTEM FLUSH LOGS part_log; + +SELECT table, name, argMax(part_type, event_time_microseconds), argMax(deduplication_block_ids, event_time_microseconds) FROM system.part_log +WHERE + table IN ['03711_join_with', '03711_table', '03711_mv_table_1', '03711_mv_table_2'] + AND database = '03710_database' +group BY database, table, name +ORDER BY ALL; + +DROP DATABASE 03710_database; diff --git a/parser/testdata/03711_json_skip_invalid_fields/ast.json b/parser/testdata/03711_json_skip_invalid_fields/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03711_json_skip_invalid_fields/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03711_json_skip_invalid_fields/metadata.json b/parser/testdata/03711_json_skip_invalid_fields/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03711_json_skip_invalid_fields/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03711_json_skip_invalid_fields/query.sql b/parser/testdata/03711_json_skip_invalid_fields/query.sql new file mode 100644 index 000000000..811be2fc1 --- /dev/null +++ b/parser/testdata/03711_json_skip_invalid_fields/query.sql @@ -0,0 +1,12 @@ +-- Test type_json_skip_invalid_typed_paths setting + +-- Test 1: JSON type column with typed paths - skip invalid field where type conversion fails +SELECT 'Test 1: Skip invalid typed path - string cannot be coerced to Int64'; +SELECT '{"a": "not_an_int", "b": "valid", "c": 123}'::JSON(a Int64, b String, c Int32) +SETTINGS type_json_skip_invalid_typed_paths = 1; + +-- Test 2: JSON type column - verify error is thrown when setting is disabled + +SELECT 'Test 2: Verify error thrown when setting disabled'; +SELECT '{"a": "not_an_int", "b": "valid", "c": 123}'::JSON(a Int64, b String, c Int32) +SETTINGS type_json_skip_invalid_typed_paths = 0; -- { serverError INCORRECT_DATA } diff --git a/parser/testdata/03711_merge_tree_deduplication_with_disk_not_support_writing_with_append/ast.json b/parser/testdata/03711_merge_tree_deduplication_with_disk_not_support_writing_with_append/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03711_merge_tree_deduplication_with_disk_not_support_writing_with_append/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03711_merge_tree_deduplication_with_disk_not_support_writing_with_append/metadata.json b/parser/testdata/03711_merge_tree_deduplication_with_disk_not_support_writing_with_append/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03711_merge_tree_deduplication_with_disk_not_support_writing_with_append/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03711_merge_tree_deduplication_with_disk_not_support_writing_with_append/query.sql b/parser/testdata/03711_merge_tree_deduplication_with_disk_not_support_writing_with_append/query.sql new file mode 100644 index 000000000..aeb1da3f1 --- /dev/null +++ b/parser/testdata/03711_merge_tree_deduplication_with_disk_not_support_writing_with_append/query.sql @@ -0,0 +1,197 @@ +-- Tags: long, no-replicated-database, no-fasttest +-- Tag no-replicated-database: Unsupported type of ALTER query + +-- Suppress error logs because `temporary_directories_lifetime` is 1 seconds (the default value is 1 day). +-- And when a part is duplicated, it will be removed. However, because `temporary_directories_lifetime` is 1 seconds (the default value is 1 day), +-- it might be removed by `MergeTreeData::clearOldTemporaryDirectories` thread. +-- `IMergeTreeDataPart::removeIfNeeded` will fail to remove the part, causing it to have some error logs. +SET send_logs_level = 'fatal'; + +DROP TABLE IF EXISTS merge_tree_deduplication; + +CREATE TABLE merge_tree_deduplication +( + key UInt64, + value String, + part UInt8 DEFAULT 77 +) +ENGINE=MergeTree() +ORDER BY key +PARTITION BY part +SETTINGS non_replicated_deduplication_window=3, disk='s3_plain_rewritable'; + +SYSTEM STOP MERGES merge_tree_deduplication; + +INSERT INTO merge_tree_deduplication (key, value) VALUES (1, '1'); + +SELECT key, value FROM merge_tree_deduplication; + +INSERT INTO merge_tree_deduplication (key, value) VALUES (1, '1'); + +SELECT key, value FROM merge_tree_deduplication; + +SELECT '==============='; + +INSERT INTO merge_tree_deduplication (key, value) VALUES (2, '2'); + +INSERT INTO merge_tree_deduplication (key, value) VALUES (3, '3'); + +INSERT INTO merge_tree_deduplication (key, value) VALUES (4, '4'); + +INSERT INTO merge_tree_deduplication (key, value) VALUES (1, '1'); + +SELECT key, value FROM merge_tree_deduplication ORDER BY key; + +SELECT '==============='; + +INSERT INTO merge_tree_deduplication (key, value) VALUES (5, '5'); + +INSERT INTO merge_tree_deduplication (key, value) VALUES (6, '6'); + +INSERT INTO merge_tree_deduplication (key, value) VALUES (7, '7'); + +INSERT INTO merge_tree_deduplication (key, value) VALUES (5, '5'); + +SELECT key, value FROM merge_tree_deduplication ORDER BY key; + +SELECT '==============='; + +INSERT INTO merge_tree_deduplication (key, value) VALUES (8, '8'); + +INSERT INTO merge_tree_deduplication (key, value) VALUES (9, '9'); + +INSERT INTO merge_tree_deduplication (key, value) VALUES (10, '10'); + +INSERT INTO merge_tree_deduplication (key, value) VALUES (11, '11'); + +INSERT INTO merge_tree_deduplication (key, value) VALUES (12, '12'); + +INSERT INTO merge_tree_deduplication (key, value) VALUES (10, '10'); +INSERT INTO merge_tree_deduplication (key, value) VALUES (11, '11'); +INSERT INTO merge_tree_deduplication (key, value) VALUES (12, '12'); + +SELECT key, value FROM merge_tree_deduplication ORDER BY key; + +SELECT '==============='; + +ALTER TABLE merge_tree_deduplication DROP PART '77_9_9_0'; -- some old part + +INSERT INTO merge_tree_deduplication (key, value) VALUES (10, '10'); + +SELECT key, value FROM merge_tree_deduplication WHERE key = 10; + +ALTER TABLE merge_tree_deduplication DROP PART '77_13_13_0'; -- fresh part + +INSERT INTO merge_tree_deduplication (key, value) VALUES (12, '12'); + +SELECT key, value FROM merge_tree_deduplication WHERE key = 12; + +DETACH TABLE merge_tree_deduplication; +ATTACH TABLE merge_tree_deduplication; + +OPTIMIZE TABLE merge_tree_deduplication FINAL; + +INSERT INTO merge_tree_deduplication (key, value) VALUES (11, '11'); -- deduplicated +INSERT INTO merge_tree_deduplication (key, value) VALUES (12, '12'); -- deduplicated + +SELECT '==============='; + +SELECT key, value FROM merge_tree_deduplication ORDER BY key; + +SELECT '==============='; + +INSERT INTO merge_tree_deduplication (key, value, part) VALUES (11, '11', 88); + +ALTER TABLE merge_tree_deduplication DROP PARTITION 77; + +INSERT INTO merge_tree_deduplication (key, value, part) VALUES (11, '11', 88); --deduplicated + +INSERT INTO merge_tree_deduplication (key, value) VALUES (11, '11'); -- not deduplicated +INSERT INTO merge_tree_deduplication (key, value) VALUES (12, '12'); -- not deduplicated + +SELECT part, key, value FROM merge_tree_deduplication ORDER BY key, part; + +-- Alters.... + +ALTER TABLE merge_tree_deduplication MODIFY SETTING non_replicated_deduplication_window = 2; + +SELECT '==============='; + +INSERT INTO merge_tree_deduplication (key, value, part) VALUES (1, '1', 33); +INSERT INTO merge_tree_deduplication (key, value, part) VALUES (2, '2', 33); +INSERT INTO merge_tree_deduplication (key, value, part) VALUES (3, '3', 33); +INSERT INTO merge_tree_deduplication (key, value, part) VALUES (1, '1', 33); + +SELECT * FROM merge_tree_deduplication WHERE part = 33 ORDER BY key; + +SELECT '==============='; + +ALTER TABLE merge_tree_deduplication MODIFY SETTING non_replicated_deduplication_window = 0; + +INSERT INTO merge_tree_deduplication (key, value, part) VALUES (1, '1', 33); +INSERT INTO merge_tree_deduplication (key, value, part) VALUES (1, '1', 33); + +DETACH TABLE merge_tree_deduplication; +ATTACH TABLE merge_tree_deduplication; + +SELECT * FROM merge_tree_deduplication WHERE part = 33 ORDER BY key; + +SELECT '==============='; + +ALTER TABLE merge_tree_deduplication MODIFY SETTING non_replicated_deduplication_window = 3; + +INSERT INTO merge_tree_deduplication (key, value, part) VALUES (1, '1', 33); + +SELECT * FROM merge_tree_deduplication WHERE part = 33 ORDER BY key; + +SELECT '==============='; + +INSERT INTO merge_tree_deduplication (key, value, part) VALUES (1, '1', 44); +INSERT INTO merge_tree_deduplication (key, value, part) VALUES (2, '2', 44); +INSERT INTO merge_tree_deduplication (key, value, part) VALUES (3, '3', 44); +INSERT INTO merge_tree_deduplication (key, value, part) VALUES (1, '1', 44); + +INSERT INTO merge_tree_deduplication (key, value, part) VALUES (4, '4', 44); + +DETACH TABLE merge_tree_deduplication; +ATTACH TABLE merge_tree_deduplication; + +SELECT * FROM merge_tree_deduplication WHERE part = 44 ORDER BY key; + +DROP TABLE IF EXISTS merge_tree_deduplication; + +SELECT '==============='; + +DROP TABLE IF EXISTS merge_tree_no_deduplication; + +CREATE TABLE merge_tree_no_deduplication +( + key UInt64, + value String +) +ENGINE=MergeTree() +ORDER BY key +SETTINGS disk='s3_plain_rewritable'; + +INSERT INTO merge_tree_no_deduplication (key, value) VALUES (1, '1'); +INSERT INTO merge_tree_no_deduplication (key, value) VALUES (1, '1'); + +SELECT * FROM merge_tree_no_deduplication ORDER BY key; + +SELECT '==============='; + +ALTER TABLE merge_tree_no_deduplication MODIFY SETTING non_replicated_deduplication_window = 3; + +INSERT INTO merge_tree_no_deduplication (key, value) VALUES (1, '1'); +INSERT INTO merge_tree_no_deduplication (key, value) VALUES (2, '2'); +INSERT INTO merge_tree_no_deduplication (key, value) VALUES (3, '3'); + +DETACH TABLE merge_tree_no_deduplication; +ATTACH TABLE merge_tree_no_deduplication; + +INSERT INTO merge_tree_no_deduplication (key, value) VALUES (1, '1'); +INSERT INTO merge_tree_no_deduplication (key, value) VALUES (4, '4'); + +SELECT * FROM merge_tree_no_deduplication ORDER BY key; + +DROP TABLE IF EXISTS merge_tree_no_deduplication; diff --git a/parser/testdata/03711_top_k_by_dynamic_filter/ast.json b/parser/testdata/03711_top_k_by_dynamic_filter/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03711_top_k_by_dynamic_filter/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03711_top_k_by_dynamic_filter/metadata.json b/parser/testdata/03711_top_k_by_dynamic_filter/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03711_top_k_by_dynamic_filter/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03711_top_k_by_dynamic_filter/query.sql b/parser/testdata/03711_top_k_by_dynamic_filter/query.sql new file mode 100644 index 000000000..3d23b2015 --- /dev/null +++ b/parser/testdata/03711_top_k_by_dynamic_filter/query.sql @@ -0,0 +1,22 @@ +-- Test for ORDER BY ... LIMIT n optimization (top-K) - dynamic PREWHERE filtering will be used to skip rows +-- Tags: long, no-tsan, no-asan, no-msan, no-s3-storage + +DROP TABLE IF EXISTS tab1; + +CREATE TABLE tab1 +( + id UInt32, + v1 UInt32, + v2 UInt32 +) Engine = MergeTree ORDER BY id SETTINGS index_granularity = 64, min_bytes_for_wide_part = 0, min_bytes_for_full_part_storage = 0, max_bytes_to_merge_at_max_space_in_pool = 1, use_const_adaptive_granularity = 1, index_granularity_bytes = 0; + +-- INSERT in random id order, so that v1 is scattered, but v1 is essentially 1..1000000 +INSERT INTO tab1 SELECT rand(), number + 1, number + 1 from numbers(1000000); + +SELECT v1, v2 FROM tab1 ORDER BY v1 DESC LIMIT 5 SETTINGS use_top_k_dynamic_filtering=1; +SELECT v1, v2 FROM tab1 ORDER BY v1 DESC LIMIT 20 SETTINGS use_top_k_dynamic_filtering=1; +SELECT v1, v2 FROM tab1 WHERE v2 > 100000 ORDER BY v1 DESC LIMIT 10 SETTINGS use_top_k_dynamic_filtering=1; + +SELECT v1, v2 FROM tab1 ORDER BY v1 ASC LIMIT 5 SETTINGS use_top_k_dynamic_filtering=1; +SELECT v1, v2 FROM tab1 ORDER BY v1 ASC LIMIT 20 SETTINGS use_top_k_dynamic_filtering=1; +SELECT v1, v2 FROM tab1 WHERE v2 > 100000 ORDER BY v1 ASC LIMIT 10 SETTINGS use_top_k_dynamic_filtering=1; diff --git a/parser/testdata/03711_top_k_by_skip_index/ast.json b/parser/testdata/03711_top_k_by_skip_index/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03711_top_k_by_skip_index/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03711_top_k_by_skip_index/metadata.json b/parser/testdata/03711_top_k_by_skip_index/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03711_top_k_by_skip_index/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03711_top_k_by_skip_index/query.sql b/parser/testdata/03711_top_k_by_skip_index/query.sql new file mode 100644 index 000000000..a894dfbb5 --- /dev/null +++ b/parser/testdata/03711_top_k_by_skip_index/query.sql @@ -0,0 +1,61 @@ +-- Test for verifying TopN optimizations +-- Tags: no-parallel-replicas + +SET merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability = 0; -- for stable max_rows_to_read +SET read_overflow_mode = 'break'; + +DROP TABLE IF EXISTS tab; + +CREATE TABLE tab +( + id Int32, + v1 Int32, + v2 Int32, + INDEX v1idx v1 TYPE minmax +) Engine = MergeTree ORDER BY id SETTINGS index_granularity = 64, min_bytes_for_wide_part = 0, min_bytes_for_full_part_storage = 0, max_bytes_to_merge_at_max_space_in_pool = 1, use_const_adaptive_granularity = 1, index_granularity_bytes = 0; + +INSERT INTO tab SELECT number, number, number FROM numbers(10000); + +-- Only 10 granules should be read +SELECT id, v1 FROM tab ORDER BY v1 ASC LIMIT 10 SETTINGS max_rows_to_read = 640, use_skip_indexes_for_top_k = 1, use_skip_indexes_on_data_read = 0; +SELECT id, v1 FROM tab ORDER BY v1 DESC LIMIT 10 SETTINGS max_rows_to_read = 640, use_skip_indexes_for_top_k = 1, use_skip_indexes_on_data_read = 0; + +-- Verify EXPLAIN indexes=1 output to confirm skip index usage for top-n +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes = 1 + SELECT id, v1 + FROM tab + ORDER BY v1 ASC + LIMIT 10 + SETTINGS use_skip_indexes_for_top_k = 1, use_skip_indexes_on_data_read = 0) +WHERE explain LIKE '%TopK%'; + +-- If WHERE clause is present, TopN via skip index only optimization not possible - row should not seen +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN indexes = 1 + SELECT id, v1 + FROM tab + WHERE v2 > 0 + ORDER BY v1 ASC + LIMIT 10 + SETTINGS use_skip_indexes_for_top_k = 1, use_skip_indexes_on_data_read = 0) +WHERE explain LIKE '%TopK%'; + +-- Verify that dynamic filter injects PREWHERE dynamic filter +SELECT trimLeft(explain) AS explain FROM ( + EXPLAIN actions = 1 + SELECT id, v1 + FROM tab + WHERE v2 > 0 + ORDER BY v1 ASC + LIMIT 10 + SETTINGS use_skip_indexes_for_top_k = 0, use_top_k_dynamic_filtering = 1) +WHERE explain LIKE '%topK%'; + +-- Verify execution of dynamic filter +SELECT id, v1 +FROM tab +WHERE v2 > 0 +ORDER BY v1 ASC +LIMIT 5 +SETTINGS use_skip_indexes_for_top_k = 0, use_top_k_dynamic_filtering = 1; diff --git a/parser/testdata/03711_top_k_by_skip_index_dynamic/ast.json b/parser/testdata/03711_top_k_by_skip_index_dynamic/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03711_top_k_by_skip_index_dynamic/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03711_top_k_by_skip_index_dynamic/metadata.json b/parser/testdata/03711_top_k_by_skip_index_dynamic/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03711_top_k_by_skip_index_dynamic/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03711_top_k_by_skip_index_dynamic/query.sql b/parser/testdata/03711_top_k_by_skip_index_dynamic/query.sql new file mode 100644 index 000000000..896ea5fd9 --- /dev/null +++ b/parser/testdata/03711_top_k_by_skip_index_dynamic/query.sql @@ -0,0 +1,23 @@ +-- Test for ORDER BY ... LIMIT n optimization (top-K) - minmax index will be 'dynamically' used to skip granules +-- Tags: long, no-tsan, no-asan, no-msan, no-s3-storage + +DROP TABLE IF EXISTS tab1; + +CREATE TABLE tab1 +( + id UInt32, + v1 DateTime, + v2 UInt32, + INDEX v1idx v1 TYPE minmax GRANULARITY 1 +) Engine = MergeTree ORDER BY id SETTINGS index_granularity = 64, min_bytes_for_wide_part = 0, min_bytes_for_full_part_storage = 0, max_bytes_to_merge_at_max_space_in_pool = 1, use_const_adaptive_granularity = 1, index_granularity_bytes = 0; + +-- INSERT in random id order, so that v1 is scattered, but v1 is essentially 1..1000000 +INSERT INTO tab1 SELECT rand(), toUnixTimestamp(toDateTime(number + 1)), number + 1 from numbers(1000000); + +SELECT toUnixTimestamp(v1), v2 FROM tab1 ORDER BY v1 DESC LIMIT 5 SETTINGS use_top_k_dynamic_filtering=1; +SELECT toUnixTimestamp(v1), v2 FROM tab1 ORDER BY v1 DESC LIMIT 20 SETTINGS use_top_k_dynamic_filtering=1; +SELECT toUnixTimestamp(v1), v2 FROM tab1 WHERE v2 > 100000 ORDER BY v1 DESC LIMIT 10 SETTINGS use_top_k_dynamic_filtering=1; + +SELECT toUnixTimestamp(v1), v2 FROM tab1 ORDER BY v1 ASC LIMIT 5 SETTINGS use_top_k_dynamic_filtering=1; +SELECT toUnixTimestamp(v1), v2 FROM tab1 ORDER BY v1 ASC LIMIT 20 SETTINGS use_top_k_dynamic_filtering=1; +SELECT toUnixTimestamp(v1), v2 FROM tab1 WHERE v2 > 100000 ORDER BY v1 ASC LIMIT 10 SETTINGS use_top_k_dynamic_filtering=1; diff --git a/parser/testdata/03712_json_advanced_shared_data_bug/ast.json b/parser/testdata/03712_json_advanced_shared_data_bug/ast.json new file mode 100644 index 000000000..9e11365f2 --- /dev/null +++ b/parser/testdata/03712_json_advanced_shared_data_bug/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001112069, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03712_json_advanced_shared_data_bug/metadata.json b/parser/testdata/03712_json_advanced_shared_data_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03712_json_advanced_shared_data_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03712_json_advanced_shared_data_bug/query.sql b/parser/testdata/03712_json_advanced_shared_data_bug/query.sql new file mode 100644 index 000000000..13a6e394b --- /dev/null +++ b/parser/testdata/03712_json_advanced_shared_data_bug/query.sql @@ -0,0 +1,14 @@ +SET optimize_if_transform_strings_to_enum=0; + +DROP TABLE IF EXISTS t0; +CREATE TABLE t0 (c0 JSON(max_dynamic_paths = 0)) ENGINE = MergeTree ORDER BY tuple() SETTINGS min_bytes_for_wide_part = 1, object_serialization_version = 'v3', object_shared_data_serialization_version_for_zero_level_parts = 'advanced', object_shared_data_buckets_for_wide_part = 1, index_granularity=2; + +INSERT INTO t0 SELECT multiIf( + number < 2, + '{"arr" : [{"arr1" : 9}]}', + '{"a" : {"b" : [{"c" : 42}]}}' +) FROM numbers(3); + +SELECT c0.arr.:`Array(JSON)`, c0.^a FROM t0; +DROP TABLE t0; + diff --git a/parser/testdata/03713_data_types_binary_deserialization_stack_overflow/ast.json b/parser/testdata/03713_data_types_binary_deserialization_stack_overflow/ast.json new file mode 100644 index 000000000..2f9cdeaef --- /dev/null +++ b/parser/testdata/03713_data_types_binary_deserialization_stack_overflow/ast.json @@ -0,0 +1,79 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function format (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier RowBinaryWithNamesAndTypes" + }, + { + "explain": " Function concat (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '\u0001\u0001x'" + }, + { + "explain": " Function repeat (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '\u001E'" + }, + { + "explain": " Literal UInt64_1000000" + }, + { + "explain": " Set" + } + ], + + "rows": 19, + + "statistics": + { + "elapsed": 0.00114644, + "rows_read": 19, + "bytes_read": 739 + } +} diff --git a/parser/testdata/03713_data_types_binary_deserialization_stack_overflow/metadata.json b/parser/testdata/03713_data_types_binary_deserialization_stack_overflow/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03713_data_types_binary_deserialization_stack_overflow/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03713_data_types_binary_deserialization_stack_overflow/query.sql b/parser/testdata/03713_data_types_binary_deserialization_stack_overflow/query.sql new file mode 100644 index 000000000..a013a8d0d --- /dev/null +++ b/parser/testdata/03713_data_types_binary_deserialization_stack_overflow/query.sql @@ -0,0 +1 @@ +select * from format(RowBinaryWithNamesAndTypes, x'010178' || repeat(x'1e', 1000000)) settings input_format_binary_decode_types_in_binary_format=1; -- {serverError CANNOT_EXTRACT_TABLE_STRUCTURE} diff --git a/parser/testdata/03713_group_by_injective_function_old_analyzer/ast.json b/parser/testdata/03713_group_by_injective_function_old_analyzer/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03713_group_by_injective_function_old_analyzer/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03713_group_by_injective_function_old_analyzer/metadata.json b/parser/testdata/03713_group_by_injective_function_old_analyzer/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03713_group_by_injective_function_old_analyzer/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03713_group_by_injective_function_old_analyzer/query.sql b/parser/testdata/03713_group_by_injective_function_old_analyzer/query.sql new file mode 100644 index 000000000..17c2d664b --- /dev/null +++ b/parser/testdata/03713_group_by_injective_function_old_analyzer/query.sql @@ -0,0 +1,8 @@ +SELECT + concat('a_', toString(number % 3)) AS a, + number % 5 AS b +FROM numbers(50) +GROUP BY (a, b) +ORDER BY (a, b) +LIMIT 1 +SETTINGS optimize_injective_functions_in_group_by = 0, enable_analyzer = 0; diff --git a/parser/testdata/03713_optimize_inverse_dictionary_lookup_setting_rewrite_in_to_join/ast.json b/parser/testdata/03713_optimize_inverse_dictionary_lookup_setting_rewrite_in_to_join/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03713_optimize_inverse_dictionary_lookup_setting_rewrite_in_to_join/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03713_optimize_inverse_dictionary_lookup_setting_rewrite_in_to_join/metadata.json b/parser/testdata/03713_optimize_inverse_dictionary_lookup_setting_rewrite_in_to_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03713_optimize_inverse_dictionary_lookup_setting_rewrite_in_to_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03713_optimize_inverse_dictionary_lookup_setting_rewrite_in_to_join/query.sql b/parser/testdata/03713_optimize_inverse_dictionary_lookup_setting_rewrite_in_to_join/query.sql new file mode 100644 index 000000000..cf313561f --- /dev/null +++ b/parser/testdata/03713_optimize_inverse_dictionary_lookup_setting_rewrite_in_to_join/query.sql @@ -0,0 +1,68 @@ +-- Tags: no-replicated-database, no-parallel-replicas +-- no-parallel, no-parallel-replicas: Dictionary is not created in parallel replicas. + +SET enable_analyzer = 1; +SET optimize_inverse_dictionary_lookup = 1; +SET optimize_or_like_chain = 0; + +-- Expect no rewrite of dictGet(...) = 'constant' +SET rewrite_in_to_join = 1; + +DROP DICTIONARY IF EXISTS colors; +DROP TABLE IF EXISTS ref_colors; +CREATE TABLE ref_colors +( + id UInt64, + name String, + n UInt64 +) +ENGINE = MergeTree +ORDER BY id; + +INSERT INTO ref_colors VALUES + (1, 'red', 5), + (2, 'blue', 7), + (3, 'red', 12), + (4, 'green', 0), + (5, 'Rose', 9); + +DROP DICTIONARY IF EXISTS colors; +CREATE DICTIONARY colors +( + id UInt64, + name String, + n UInt64 +) +PRIMARY KEY id +SOURCE(CLICKHOUSE(TABLE 'ref_colors')) +LAYOUT(HASHED()) +LIFETIME(0); + +DROP TABLE IF EXISTS t; +CREATE TABLE t +( + color_id UInt64, + payload String +) +ENGINE = MergeTree +ORDER BY color_id; + +INSERT INTO t VALUES + (1, 'a'), + (2, 'b'), + (3, 'c'), + (4, 'd'), + (5, 'R'); + +SELECT 'Equality, LHS - plan'; +EXPLAIN SYNTAX run_query_tree_passes=1 +SELECT color_id, payload +FROM t +WHERE dictGetString('colors', 'name', color_id) = 'red' +ORDER BY color_id, payload; + +SELECT 'Equality, LHS'; +SELECT color_id, payload +FROM t +WHERE dictGetString('colors', 'name', color_id) = 'red' +ORDER BY color_id, payload; diff --git a/parser/testdata/03713_replicated_columns_in_external_data_bug/ast.json b/parser/testdata/03713_replicated_columns_in_external_data_bug/ast.json new file mode 100644 index 000000000..c65935d40 --- /dev/null +++ b/parser/testdata/03713_replicated_columns_in_external_data_bug/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001424829, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03713_replicated_columns_in_external_data_bug/metadata.json b/parser/testdata/03713_replicated_columns_in_external_data_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03713_replicated_columns_in_external_data_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03713_replicated_columns_in_external_data_bug/query.sql b/parser/testdata/03713_replicated_columns_in_external_data_bug/query.sql new file mode 100644 index 000000000..a33c285f6 --- /dev/null +++ b/parser/testdata/03713_replicated_columns_in_external_data_bug/query.sql @@ -0,0 +1,3 @@ +set enable_analyzer=1; +select * from remote('127.0.0.{1,2,3}', numbers(100)) where number global in (select number::Dynamic from numbers(100) array Join range(number % 10)) limit 100 format Null settings enable_lazy_columns_replication=1; + diff --git a/parser/testdata/03714_base32_base58_short_string/ast.json b/parser/testdata/03714_base32_base58_short_string/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03714_base32_base58_short_string/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03714_base32_base58_short_string/metadata.json b/parser/testdata/03714_base32_base58_short_string/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03714_base32_base58_short_string/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03714_base32_base58_short_string/query.sql b/parser/testdata/03714_base32_base58_short_string/query.sql new file mode 100644 index 000000000..3690b6e7b --- /dev/null +++ b/parser/testdata/03714_base32_base58_short_string/query.sql @@ -0,0 +1,4 @@ +-- Tags: no-fasttest + +SELECT base32Encode(randomString(1, 100)) FROM numbers(1000) FORMAT Null; +SELECT base58Encode(randomString(1, 100)) FROM numbers(1000) FORMAT Null; \ No newline at end of file diff --git a/parser/testdata/03714_empty_tuple_reverse_function/ast.json b/parser/testdata/03714_empty_tuple_reverse_function/ast.json new file mode 100644 index 000000000..143ee24c1 --- /dev/null +++ b/parser/testdata/03714_empty_tuple_reverse_function/ast.json @@ -0,0 +1,46 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function reverse (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 8, + + "statistics": + { + "elapsed": 0.001169414, + "rows_read": 8, + "bytes_read": 300 + } +} diff --git a/parser/testdata/03714_empty_tuple_reverse_function/metadata.json b/parser/testdata/03714_empty_tuple_reverse_function/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03714_empty_tuple_reverse_function/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03714_empty_tuple_reverse_function/query.sql b/parser/testdata/03714_empty_tuple_reverse_function/query.sql new file mode 100644 index 000000000..1ef82834a --- /dev/null +++ b/parser/testdata/03714_empty_tuple_reverse_function/query.sql @@ -0,0 +1,27 @@ +SELECT reverse(()); + +SELECT reverse(tuple()); + +SELECT reverse(()) FROM numbers(3); + +WITH () AS x SELECT reverse(x); + +DROP TABLE IF EXISTS table_rev_empty_tuple; +CREATE TABLE table_rev_empty_tuple +( + x Tuple() +) ENGINE = Memory; + +INSERT INTO table_rev_empty_tuple SELECT tuple() FROM numbers(5); + +SELECT reverse(x) FROM table_rev_empty_tuple LIMIT 3; + +SELECT toTypeName(reverse(x)) FROM table_rev_empty_tuple LIMIT 1; + +DROP TABLE table_rev_empty_tuple; + +SELECT reverse((1, 'a', 3)); + +SELECT reverse([()]); + +SELECT reverse((())); diff --git a/parser/testdata/03714_queries_escaping_1/ast.json b/parser/testdata/03714_queries_escaping_1/ast.json new file mode 100644 index 000000000..7a85a6bff --- /dev/null +++ b/parser/testdata/03714_queries_escaping_1/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "ShowColumns" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001122457, + "rows_read": 1, + "bytes_read": 19 + } +} diff --git a/parser/testdata/03714_queries_escaping_1/metadata.json b/parser/testdata/03714_queries_escaping_1/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03714_queries_escaping_1/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03714_queries_escaping_1/query.sql b/parser/testdata/03714_queries_escaping_1/query.sql new file mode 100644 index 000000000..3675d867e --- /dev/null +++ b/parser/testdata/03714_queries_escaping_1/query.sql @@ -0,0 +1 @@ +show columns from a.b like 'a\' or 1=1;--' diff --git a/parser/testdata/03714_queries_escaping_2/ast.json b/parser/testdata/03714_queries_escaping_2/ast.json new file mode 100644 index 000000000..452a031e8 --- /dev/null +++ b/parser/testdata/03714_queries_escaping_2/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "ShowFunctions" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001245369, + "rows_read": 1, + "bytes_read": 21 + } +} diff --git a/parser/testdata/03714_queries_escaping_2/metadata.json b/parser/testdata/03714_queries_escaping_2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03714_queries_escaping_2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03714_queries_escaping_2/query.sql b/parser/testdata/03714_queries_escaping_2/query.sql new file mode 100644 index 000000000..33a489077 --- /dev/null +++ b/parser/testdata/03714_queries_escaping_2/query.sql @@ -0,0 +1 @@ +show functions like 'a\' or 1=1;--' diff --git a/parser/testdata/03715_empty_tuple_functions_conversion/ast.json b/parser/testdata/03715_empty_tuple_functions_conversion/ast.json new file mode 100644 index 000000000..630c2ff09 --- /dev/null +++ b/parser/testdata/03715_empty_tuple_functions_conversion/ast.json @@ -0,0 +1,49 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Literal 'SimpleAggregateFunction(min, Tuple())'" + } + ], + + "rows": 9, + + "statistics": + { + "elapsed": 0.001326292, + "rows_read": 9, + "bytes_read": 358 + } +} diff --git a/parser/testdata/03715_empty_tuple_functions_conversion/metadata.json b/parser/testdata/03715_empty_tuple_functions_conversion/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03715_empty_tuple_functions_conversion/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03715_empty_tuple_functions_conversion/query.sql b/parser/testdata/03715_empty_tuple_functions_conversion/query.sql new file mode 100644 index 000000000..0e2ed4643 --- /dev/null +++ b/parser/testdata/03715_empty_tuple_functions_conversion/query.sql @@ -0,0 +1,19 @@ +select CAST((), 'SimpleAggregateFunction(min, Tuple())'); + +DROP TABLE IF EXISTS tab; +CREATE TABLE tab (c0 Tuple()) ENGINE = Memory; +INSERT INTO tab VALUES (()), (()), (()); + +SELECT CAST(c0, 'SimpleAggregateFunction(min, Tuple())') FROM tab; + +DROP TABLE IF EXISTS t0; +CREATE TABLE t0 (c0 SimpleAggregateFunction(min, Tuple())) ENGINE = MergeTree() ORDER BY tuple(); +INSERT INTO t0 (c0) VALUES (tuple()); +SELECT * FROM t0; + +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (c0 SimpleAggregateFunction(min, Tuple())) ENGINE = MergeTree() ORDER BY tuple() SETTINGS enable_block_number_column = 1, enable_block_offset_column = 1; +UPDATE t1 SET c0 = () WHERE TRUE; +INSERT INTO t1 (c0) VALUES (tuple()), (tuple()), (tuple()); +UPDATE t1 SET c0 = () WHERE TRUE; +SELECT * FROM t1; diff --git a/parser/testdata/03716_anti_join_runtime_filters_2/ast.json b/parser/testdata/03716_anti_join_runtime_filters_2/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03716_anti_join_runtime_filters_2/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03716_anti_join_runtime_filters_2/metadata.json b/parser/testdata/03716_anti_join_runtime_filters_2/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03716_anti_join_runtime_filters_2/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03716_anti_join_runtime_filters_2/query.sql b/parser/testdata/03716_anti_join_runtime_filters_2/query.sql new file mode 100644 index 000000000..9e2216bb2 --- /dev/null +++ b/parser/testdata/03716_anti_join_runtime_filters_2/query.sql @@ -0,0 +1,61 @@ +-- Tags: no-random-merge-tree-settings + +SET enable_analyzer = 1; +SET enable_join_runtime_filters = 1; + +CREATE TABLE nation(n_nationkey Int32, n_name String) ENGINE MergeTree ORDER BY n_nationkey; +CREATE TABLE customer(c_custkey Int32, c_nationkey Int32) ENGINE MergeTree ORDER BY c_custkey; + +INSERT INTO nation VALUES (5,'ETHIOPIA'),(6,'FRANCE'),(7,'GERMANY'); +INSERT INTO customer SELECT number, 5 - (number % 2) FROM numbers(500); + +SET enable_parallel_replicas=0; +SET query_plan_join_swap_table=0; +SET join_algorithm='hash'; -- to make plan stable + +-- RIGHT ANTI JOIN +SELECT REGEXP_REPLACE(explain, '_runtime_filter_\\d+', '_runtime_filter_UNIQ_ID') +FROM ( + EXPLAIN actions=1 + SELECT count() + FROM customer + WHERE NOT EXISTS ( + SELECT * + FROM nation + WHERE c_nationkey = n_nationkey + ) +) +SETTINGS correlated_subqueries_default_join_kind = 'right'; + +SELECT count() +FROM customer +WHERE NOT EXISTS ( + SELECT * + FROM nation + WHERE c_nationkey = n_nationkey +) +SETTINGS correlated_subqueries_default_join_kind = 'right'; + +-- LEFT ANTI JOIN +SELECT REGEXP_REPLACE(explain, '_runtime_filter_\\d+', '_runtime_filter_UNIQ_ID') +FROM ( + EXPLAIN actions=1 + SELECT count() + FROM customer + WHERE NOT EXISTS ( + SELECT * + FROM nation + WHERE c_nationkey = n_nationkey + ) +) +SETTINGS correlated_subqueries_default_join_kind = 'left'; + +SELECT count() +FROM customer +WHERE NOT EXISTS ( + SELECT * + FROM nation + WHERE c_nationkey = n_nationkey +) +SETTINGS correlated_subqueries_default_join_kind = 'left'; + diff --git a/parser/testdata/03716_bson_each_row_empty_tuple_column/ast.json b/parser/testdata/03716_bson_each_row_empty_tuple_column/ast.json new file mode 100644 index 000000000..32c1ae90f --- /dev/null +++ b/parser/testdata/03716_bson_each_row_empty_tuple_column/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t0 (children 1)" + }, + { + "explain": " Identifier t0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001306715, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03716_bson_each_row_empty_tuple_column/metadata.json b/parser/testdata/03716_bson_each_row_empty_tuple_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03716_bson_each_row_empty_tuple_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03716_bson_each_row_empty_tuple_column/query.sql b/parser/testdata/03716_bson_each_row_empty_tuple_column/query.sql new file mode 100644 index 000000000..e93b96b1f --- /dev/null +++ b/parser/testdata/03716_bson_each_row_empty_tuple_column/query.sql @@ -0,0 +1,17 @@ +DROP TABLE IF EXISTS t0; +DROP TABLE IF EXISTS random_filename; + +CREATE TABLE t0 (c0 Int32, c1 Tuple()) ENGINE = Memory; +CREATE TABLE random_filename (name String) ENGINE = Memory; + +INSERT INTO random_filename SELECT concat('03716_test_bson_empty_tuple_', toString(generateUUIDv4()), '.bson'); + +INSERT INTO FUNCTION file((SELECT name FROM random_filename LIMIT 1), 'BSONEachRow', 'c0 Int32, c1 Tuple()') +SELECT 1, tuple() FROM numbers(5) SETTINGS engine_file_truncate_on_insert = 1; + +INSERT INTO t0 SELECT * FROM file((SELECT name FROM random_filename LIMIT 1), 'BSONEachRow', 'c0 Int32, c1 Tuple()'); + +SELECT * FROM t0 ORDER BY c0; + +DROP TABLE t0; +DROP TABLE random_filename; diff --git a/parser/testdata/03716_join_duplicate_columns_89411/ast.json b/parser/testdata/03716_join_duplicate_columns_89411/ast.json new file mode 100644 index 000000000..22b4187a6 --- /dev/null +++ b/parser/testdata/03716_join_duplicate_columns_89411/ast.json @@ -0,0 +1,40 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier t1.k" + }, + { + "explain": " Identifier t2.k" + } + ], + + "rows": 6, + + "statistics": + { + "elapsed": 0.001335207, + "rows_read": 6, + "bytes_read": 203 + } +} diff --git a/parser/testdata/03716_join_duplicate_columns_89411/metadata.json b/parser/testdata/03716_join_duplicate_columns_89411/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03716_join_duplicate_columns_89411/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03716_join_duplicate_columns_89411/query.sql b/parser/testdata/03716_join_duplicate_columns_89411/query.sql new file mode 100644 index 000000000..cd88b8984 --- /dev/null +++ b/parser/testdata/03716_join_duplicate_columns_89411/query.sql @@ -0,0 +1,14 @@ +SELECT t1.k, t2.k +FROM +( + SELECT number AS k + FROM numbers(10) +) AS t1 +INNER JOIN +( + SELECT + CAST(0, 'UInt64') AS k, k + FROM numbers(3) +) AS t2 on t1.k = t2.k +ORDER BY t1.k, t2.k +; diff --git a/parser/testdata/03716_join_right_side_sorting/ast.json b/parser/testdata/03716_join_right_side_sorting/ast.json new file mode 100644 index 000000000..d62c73390 --- /dev/null +++ b/parser/testdata/03716_join_right_side_sorting/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001287488, + "rows_read": 5, + "bytes_read": 177 + } +} diff --git a/parser/testdata/03716_join_right_side_sorting/metadata.json b/parser/testdata/03716_join_right_side_sorting/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03716_join_right_side_sorting/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03716_join_right_side_sorting/query.sql b/parser/testdata/03716_join_right_side_sorting/query.sql new file mode 100644 index 000000000..0fc789f09 --- /dev/null +++ b/parser/testdata/03716_join_right_side_sorting/query.sql @@ -0,0 +1,15 @@ +SELECT 1 +FROM +( + SELECT 1 AS x +) AS x +LEFT JOIN +( + SELECT 2 AS y +) AS y ON x.x = y.y +INNER JOIN +( + SELECT number + FROM numbers(40) +) AS z ON 1 +SETTINGS join_algorithm = 'hash', join_output_by_rowlist_perkey_rows_threshold = 80, allow_experimental_join_right_table_sorting = 1, allow_experimental_parallel_reading_from_replicas = 1; diff --git a/parser/testdata/03716_multiple_joins_using_top_level_identifier/ast.json b/parser/testdata/03716_multiple_joins_using_top_level_identifier/ast.json new file mode 100644 index 000000000..7ad4d33d8 --- /dev/null +++ b/parser/testdata/03716_multiple_joins_using_top_level_identifier/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001248299, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03716_multiple_joins_using_top_level_identifier/metadata.json b/parser/testdata/03716_multiple_joins_using_top_level_identifier/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03716_multiple_joins_using_top_level_identifier/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03716_multiple_joins_using_top_level_identifier/query.sql b/parser/testdata/03716_multiple_joins_using_top_level_identifier/query.sql new file mode 100644 index 000000000..f9587ff6c --- /dev/null +++ b/parser/testdata/03716_multiple_joins_using_top_level_identifier/query.sql @@ -0,0 +1,43 @@ +SET analyzer_compatibility_join_using_top_level_identifier = 1; + +DROP TABLE IF EXISTS t1; +DROP TABLE IF EXISTS t2; +DROP TABLE IF EXISTS t3; + +CREATE TABLE t1 (id String, val String) ENGINE = MergeTree() ORDER BY id; +CREATE TABLE t2 (id String, code String) ENGINE = MergeTree() ORDER BY id; +CREATE TABLE t3 (id String, code String) ENGINE = MergeTree() ORDER BY id; + +INSERT INTO t1 VALUES ('a', 'v'), ('b', 'w'); +INSERT INTO t2 VALUES ('b', 'c'); +INSERT INTO t3 VALUES ('a_1', 'c'), ('b_1', 'd'); + +SET enable_analyzer = 1; + +-- TODO: join_use_nulls reveals another issue in stress tests +-- Mute for now and track bug in +-- https://github.com/ClickHouse/ClickHouse/issues/87016 + +SELECT t1.id || '_1' AS id, t1.val +FROM t1 +LEFT JOIN t2 ON t1.id = t2.id +LEFT JOIN t3 USING (id) +ORDER BY t1.val +SETTINGS join_use_nulls = 0 +; + +SELECT t2.id || '_1' AS id, t1.val +FROM t1 +LEFT JOIN t2 ON t1.id = t2.id +LEFT JOIN t3 USING (id) +ORDER BY t1.val +SETTINGS join_use_nulls = 0 +; + +SELECT t1.id || t2.id || '_1' AS id, t1.val +FROM t1 +INNER JOIN t2 ON t1.id = t2.id +LEFT JOIN t3 USING (id) +ORDER BY t1.val +SETTINGS join_use_nulls = 0 +; -- { serverError AMBIGUOUS_IDENTIFIER } diff --git a/parser/testdata/03716_text_index_drop_caches/ast.json b/parser/testdata/03716_text_index_drop_caches/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03716_text_index_drop_caches/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03716_text_index_drop_caches/metadata.json b/parser/testdata/03716_text_index_drop_caches/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03716_text_index_drop_caches/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03716_text_index_drop_caches/query.sql b/parser/testdata/03716_text_index_drop_caches/query.sql new file mode 100644 index 000000000..0ff197475 --- /dev/null +++ b/parser/testdata/03716_text_index_drop_caches/query.sql @@ -0,0 +1,39 @@ +-- Tags: no-parallel-replicas, no-parallel + +DROP TABLE IF EXISTS t_text_index_drop_caches; + +SET allow_experimental_full_text_index = 1; +SET use_text_index_header_cache = 1; +SET use_text_index_dictionary_cache = 1; +SET use_text_index_postings_cache = 1; +SET use_skip_indexes_on_data_read = 1; + +CREATE TABLE t_text_index_drop_caches +( + s String, + INDEX idx(s) TYPE text(tokenizer = sparseGrams(3, 10)) +) +ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO t_text_index_drop_caches SELECT 'tkn' || toString(number) || 'nkt' FROM numbers(200000); + +SELECT count() FROM t_text_index_drop_caches WHERE s LIKE '%888%' SETTINGS use_skip_indexes = 0; +SELECT count() FROM t_text_index_drop_caches WHERE hasAnyTokens(s, '888'); +SELECT count() FROM t_text_index_drop_caches WHERE hasAnyTokens(s, '888'); + +SYSTEM DROP TEXT INDEX CACHES; + +SELECT count() FROM t_text_index_drop_caches WHERE hasAnyTokens(s, '888'); +SELECT count() FROM t_text_index_drop_caches WHERE hasAnyTokens(s, '888'); + +SYSTEM FLUSH LOGS query_log; + +SELECT + ProfileEvents['TextIndexHeaderCacheMisses'] > 0, + ProfileEvents['TextIndexDictionaryBlockCacheMisses'] > 0, + ProfileEvents['TextIndexPostingsCacheMisses'] > 0 +FROM system.query_log +WHERE current_database = currentDatabase() AND query LIKE '%SELECT count() FROM t_text_index_drop_caches%' AND type = 'QueryFinish' +ORDER BY event_time_microseconds; + +DROP TABLE IF EXISTS t_text_index_drop_caches; diff --git a/parser/testdata/03716_topk_bad_data/ast.json b/parser/testdata/03716_topk_bad_data/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03716_topk_bad_data/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03716_topk_bad_data/metadata.json b/parser/testdata/03716_topk_bad_data/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03716_topk_bad_data/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03716_topk_bad_data/query.sql b/parser/testdata/03716_topk_bad_data/query.sql new file mode 100644 index 000000000..a9acffaf6 --- /dev/null +++ b/parser/testdata/03716_topk_bad_data/query.sql @@ -0,0 +1,16 @@ +-- {echo On} +SELECT finalizeAggregation(CAST(unhex('012A0300000000000000030000000000000043434303000000000000004141410400000000000000414141410100800200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'), 'AggregateFunction(approx_top_k(3), Array(Array(String)))')); -- { serverError SIZES_OF_ARRAYS_DONT_MATCH } +SELECT finalizeAggregation(CAST(unhex('012A0300000000000000030000000000000043434303000000000000004141410400000000000000414141410100800200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'), 'AggregateFunction(topK(3), Array(Array(String)))')); -- { serverError SIZES_OF_ARRAYS_DONT_MATCH } + + +SELECT finalizeAggregation(CAST(unhex('012A0300000000000000030000000000000043434303000000000000004141410400000000000000414141410100800200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'), 'AggregateFunction(approx_top_k(3), Array(Array(String)))')); +SELECT finalizeAggregation(CAST(unhex('012A0300000000000000030000000000000043434303000000000000004141410400000000000000414141410100800200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'), 'AggregateFunction(topK(3), Array(Array(String)))')); + +-- State: Select hex(topKState(3)((number, number)) as t) AS v, toTypeName(t) from numbers(1) +-- 01100000000000000000000000000000000001004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +SELECT finalizeAggregation(CAST(unhex('01100000000000000000000000000000000001004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'), 'AggregateFunction(topK(3), Tuple(UInt64, UInt64))')); + +-- Add some extra bytes +SELECT finalizeAggregation(CAST(unhex('01122000000000000000000000000000000000010040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002454'), 'AggregateFunction(topK(3), Tuple(UInt64, UInt64))')); -- { serverError SIZES_OF_ARRAYS_DONT_MATCH } + +SELECT finalizeAggregation(CAST(unhex('012A03000000000000000300000000000000434343030000000000000041414104000000000000004141414101008002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001'), 'AggregateFunction(topK(3), Array(Array(String)))')); -- { serverError SIZES_OF_ARRAYS_DONT_MATCH } diff --git a/parser/testdata/03717_msgpack_empty_tuple_column/ast.json b/parser/testdata/03717_msgpack_empty_tuple_column/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03717_msgpack_empty_tuple_column/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03717_msgpack_empty_tuple_column/metadata.json b/parser/testdata/03717_msgpack_empty_tuple_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03717_msgpack_empty_tuple_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03717_msgpack_empty_tuple_column/query.sql b/parser/testdata/03717_msgpack_empty_tuple_column/query.sql new file mode 100644 index 000000000..45f81812b --- /dev/null +++ b/parser/testdata/03717_msgpack_empty_tuple_column/query.sql @@ -0,0 +1,20 @@ +-- Tags: no-fasttest +-- no-fasttest: 'MsgPack` format is not supported + +DROP TABLE IF EXISTS t0; +DROP TABLE IF EXISTS random_filename; + +CREATE TABLE t0 (c0 Int32, c1 Tuple()) ENGINE = Memory; +CREATE TABLE random_filename (name String) ENGINE = Memory; + +INSERT INTO random_filename SELECT concat('03716_test_msgpack_empty_tuple_', toString(generateUUIDv4()), '.msgpack'); + +INSERT INTO FUNCTION file((SELECT name FROM random_filename LIMIT 1), 'MsgPack', 'c0 Int32, c1 Tuple()') +SELECT 1, tuple() FROM numbers(5) SETTINGS engine_file_truncate_on_insert = 1; + +INSERT INTO t0 SELECT * FROM file((SELECT name FROM random_filename LIMIT 1), 'MsgPack', 'c0 Int32, c1 Tuple()'); + +SELECT * FROM t0 ORDER BY c0; + +DROP TABLE t0; +DROP TABLE random_filename; diff --git a/parser/testdata/03717_system_unicode_enums/ast.json b/parser/testdata/03717_system_unicode_enums/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03717_system_unicode_enums/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03717_system_unicode_enums/metadata.json b/parser/testdata/03717_system_unicode_enums/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03717_system_unicode_enums/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03717_system_unicode_enums/query.sql b/parser/testdata/03717_system_unicode_enums/query.sql new file mode 100644 index 000000000..579f59b06 --- /dev/null +++ b/parser/testdata/03717_system_unicode_enums/query.sql @@ -0,0 +1,5 @@ +-- Tags: no-fasttest +-- ^ depends on ICU library + +SELECT code_point, code_point_value, name, block FROM system.unicode WHERE numeric_type = 'Digit' AND block = 'Ethiopic' ORDER BY code_point; +SELECT code_point, code_point_value, name, block FROM system.unicode WHERE block = 'Emoticons' AND name LIKE '%CRY%' ORDER BY code_point; diff --git a/parser/testdata/03719_generic_hash_over_constant_and_non_constant/ast.json b/parser/testdata/03719_generic_hash_over_constant_and_non_constant/ast.json new file mode 100644 index 000000000..def69f363 --- /dev/null +++ b/parser/testdata/03719_generic_hash_over_constant_and_non_constant/ast.json @@ -0,0 +1,73 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function sipHash64 (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal '42'" + }, + { + "explain": " Literal 'Variant(UInt64, String)'" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2" + } + ], + + "rows": 17, + + "statistics": + { + "elapsed": 0.001378749, + "rows_read": 17, + "bytes_read": 674 + } +} diff --git a/parser/testdata/03719_generic_hash_over_constant_and_non_constant/metadata.json b/parser/testdata/03719_generic_hash_over_constant_and_non_constant/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03719_generic_hash_over_constant_and_non_constant/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03719_generic_hash_over_constant_and_non_constant/query.sql b/parser/testdata/03719_generic_hash_over_constant_and_non_constant/query.sql new file mode 100644 index 000000000..4bf1a9980 --- /dev/null +++ b/parser/testdata/03719_generic_hash_over_constant_and_non_constant/query.sql @@ -0,0 +1,2 @@ +select sipHash64(number, 42::Variant(UInt64, String)) from numbers(2); + diff --git a/parser/testdata/03719_ntile_no_partition_by_check/ast.json b/parser/testdata/03719_ntile_no_partition_by_check/ast.json new file mode 100644 index 000000000..5e84fba58 --- /dev/null +++ b/parser/testdata/03719_ntile_no_partition_by_check/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal 'With Partition By'" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001331433, + "rows_read": 5, + "bytes_read": 188 + } +} diff --git a/parser/testdata/03719_ntile_no_partition_by_check/metadata.json b/parser/testdata/03719_ntile_no_partition_by_check/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03719_ntile_no_partition_by_check/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03719_ntile_no_partition_by_check/query.sql b/parser/testdata/03719_ntile_no_partition_by_check/query.sql new file mode 100644 index 000000000..62fd6b398 --- /dev/null +++ b/parser/testdata/03719_ntile_no_partition_by_check/query.sql @@ -0,0 +1,15 @@ +SELECT 'With Partition By'; + +SELECT + round(exp(number), 3) AS x, + percent_rank(x) OVER (ORDER BY number ASC) AS rank, + ntile(10) OVER (PARTITION BY 1 ORDER BY number ASC) AS bucket +FROM numbers(11); + +SELECT 'No Partition By'; + +SELECT + round(exp(number), 3) AS x, + percent_rank(x) OVER (ORDER BY number ASC) AS rank, + ntile(10) OVER (ORDER BY number ASC) AS bucket +FROM numbers(11); diff --git a/parser/testdata/03720_const_limit_to_scalar/ast.json b/parser/testdata/03720_const_limit_to_scalar/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03720_const_limit_to_scalar/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03720_const_limit_to_scalar/metadata.json b/parser/testdata/03720_const_limit_to_scalar/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03720_const_limit_to_scalar/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03720_const_limit_to_scalar/query.sql b/parser/testdata/03720_const_limit_to_scalar/query.sql new file mode 100644 index 000000000..c449a69f8 --- /dev/null +++ b/parser/testdata/03720_const_limit_to_scalar/query.sql @@ -0,0 +1,17 @@ +-- Tags: distributed + +-- https://github.com/ClickHouse/ClickHouse/issues/89607 + +DROP TABLE IF EXISTS t0; +DROP TABLE IF EXISTS t1; + +CREATE TABLE t0 (c0 Int) ENGINE = MergeTree() ORDER BY tuple(); +CREATE TABLE t1 ENGINE = Distributed(test_shard_localhost, currentDatabase(), 't0'); + +SELECT c0 FROM t1 OFFSET -10 ROWS SETTINGS optimize_const_name_size = 1; +SELECT c0 FROM t1 OFFSET 10 ROWS SETTINGS optimize_const_name_size = 1; +SELECT c0 FROM t1 LIMIT -10 SETTINGS optimize_const_name_size = 1; +SELECT c0 FROM t1 LIMIT 10 SETTINGS optimize_const_name_size = 1; + +DROP TABLE IF EXISTS t0; +DROP TABLE IF EXISTS t1; diff --git a/parser/testdata/03720_datetime64_bad_inference/ast.json b/parser/testdata/03720_datetime64_bad_inference/ast.json new file mode 100644 index 000000000..598f90e93 --- /dev/null +++ b/parser/testdata/03720_datetime64_bad_inference/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001328679, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03720_datetime64_bad_inference/metadata.json b/parser/testdata/03720_datetime64_bad_inference/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03720_datetime64_bad_inference/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03720_datetime64_bad_inference/query.sql b/parser/testdata/03720_datetime64_bad_inference/query.sql new file mode 100644 index 000000000..d9c76a1bc --- /dev/null +++ b/parser/testdata/03720_datetime64_bad_inference/query.sql @@ -0,0 +1,4 @@ +set date_time_input_format='basic'; +desc format(JSONEachRow, '{"d" : "5981 10:01.000"}'); +select * from format(JSONEachRow, '{"d" : "5981 10:01.000"}'); + diff --git a/parser/testdata/03720_file_engine_second_crash/ast.json b/parser/testdata/03720_file_engine_second_crash/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03720_file_engine_second_crash/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03720_file_engine_second_crash/metadata.json b/parser/testdata/03720_file_engine_second_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03720_file_engine_second_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03720_file_engine_second_crash/query.sql b/parser/testdata/03720_file_engine_second_crash/query.sql new file mode 100644 index 000000000..25aec3add --- /dev/null +++ b/parser/testdata/03720_file_engine_second_crash/query.sql @@ -0,0 +1,2 @@ +-- Fuzzer. Second argument is some complex expression. +CREATE TABLE `t141` (`c0` Date) ENGINE = File(`c0`, (+`c0`.2) >= ANY(SELECT '307:21:40.753024937'::Time64(3)::Time64(3) OFFSET 0 ROWS)); -- {serverError BAD_ARGUMENTS} \ No newline at end of file diff --git a/parser/testdata/03720_ntile_double_order_by_check/ast.json b/parser/testdata/03720_ntile_double_order_by_check/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03720_ntile_double_order_by_check/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03720_ntile_double_order_by_check/metadata.json b/parser/testdata/03720_ntile_double_order_by_check/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03720_ntile_double_order_by_check/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03720_ntile_double_order_by_check/query.sql b/parser/testdata/03720_ntile_double_order_by_check/query.sql new file mode 100644 index 000000000..d91244fb0 --- /dev/null +++ b/parser/testdata/03720_ntile_double_order_by_check/query.sql @@ -0,0 +1,43 @@ +-- { echoOn } + +SELECT + ntile(1) OVER (ORDER BY id ASC) AS a, + ntile(2) OVER (ORDER BY id ASC) AS b +FROM +( + SELECT 1 AS id +) AS t; + +SELECT + ntile(1) OVER (ORDER BY id ASC) AS a, + ntile(2) OVER (ORDER BY id + 0 ASC) AS b +FROM +( + SELECT 1 AS id +) AS t; + +SELECT + ntile(1) OVER (ORDER BY id ASC ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS a, + ntile(2) OVER (ORDER BY id ASC ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS b +FROM +( + SELECT 1 AS id +) AS t; + + +DROP TABLE IF EXISTS test_ntile; + +CREATE TABLE test_ntile +( + id Int32 +) +ENGINE = Memory; + +INSERT INTO test_ntile VALUES (1), (2), (3), (4), (5), (6); + +SELECT + id, + ntile(1) OVER (ORDER BY id ASC) AS a, + ntile(2) OVER (ORDER BY id ASC) AS b +FROM test_ntile +ORDER BY id ASC; diff --git a/parser/testdata/03720_numbers_table_function_with_step_edge_cases/ast.json b/parser/testdata/03720_numbers_table_function_with_step_edge_cases/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03720_numbers_table_function_with_step_edge_cases/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03720_numbers_table_function_with_step_edge_cases/metadata.json b/parser/testdata/03720_numbers_table_function_with_step_edge_cases/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03720_numbers_table_function_with_step_edge_cases/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03720_numbers_table_function_with_step_edge_cases/query.sql b/parser/testdata/03720_numbers_table_function_with_step_edge_cases/query.sql new file mode 100644 index 000000000..7d328dbc7 --- /dev/null +++ b/parser/testdata/03720_numbers_table_function_with_step_edge_cases/query.sql @@ -0,0 +1,130 @@ +-- { echoOn } + +-- Logical error query +SELECT DISTINCT number * 1 +FROM numbers(10, sipHash64(sipHash64(sipHash64(2), 1), 1, 2, *), sipHash64(sipHash64(29103473, sipHash64(1), '3', sipHash64(1), 1))) +GROUP BY + 1, + isNullable(1) + WITH TOTALS +ORDER BY 1 ASC SETTINGS enable_analyzer = 1; + +-- Simplified version of the above query +SELECT number FROM numbers(10, 14630045721179951620, 6670599363308407409); + +SELECT number FROM numbers(10, 14630045721179951620, 6670599363308407409) LIMIT 10; + +SELECT count(), min(number), max(number), sum(number) FROM numbers(0, 1000, 1); + +SELECT count(), min(number), max(number), sum(number) FROM numbers(5, 1000, 7); + +SELECT number FROM numbers(18446744073709551614, 5, 1); + +SELECT number FROM numbers(18446744073709551610, 10, 1); + +SELECT number FROM numbers(18446744073709551610, 10, 3); + +SELECT number FROM numbers(18446744073709551615, 18446744073709551615, 18446744073709551615); + +SELECT number FROM numbers(18446744073709551615, 18446744073709551615, 1844674407370955161); + +SELECT number FROM numbers(5, 18446744073709551615, 1) LIMIT 10; + +SELECT number FROM numbers(0, 1000, 2) WHERE number BETWEEN 10 AND 40; + +SELECT number FROM numbers(18446744073709551610, 10, 3) WHERE number >= 18446744073709551612; + +SELECT number FROM numbers(18446744073709551615, 10, 3) WHERE number >= 5; + +SELECT number FROM numbers(18446744073709551610, 10, 3) WHERE number <= 5; + +SELECT number FROM numbers(18446744073709551610, 10, 3) WHERE number >= 18446744073709551612 OR number <= 5; + +SELECT count(), min(number), max(number), sum(number) FROM numbers(0, 1000, 1) WHERE number % 3 = 0; + +SELECT number FROM numbers(100, 10, 1) WHERE number < 50; + +SELECT number FROM system.numbers WHERE number < 10; + +SELECT * FROM numbers(10) LIMIT 0; + +SELECT * FROM system.numbers LIMIT 0; + +SET max_threads = 10; + +SELECT number FROM numbers_mt(10, 14630045721179951620, 6670599363308407409); + +SELECT number FROM numbers_mt(10, 14630045721179951620, 6670599363308407409) LIMIT 10; + +SELECT count(), min(number), max(number), sum(number) FROM numbers_mt(0, 1000, 1); + +SELECT count(), min(number), max(number), sum(number) FROM numbers_mt(5, 1000, 7); + +SELECT number FROM numbers_mt(18446744073709551614, 5, 1); + +SELECT number FROM numbers_mt(18446744073709551610, 10, 1); + +SELECT number FROM numbers_mt(18446744073709551610, 10, 3); + +SELECT number FROM numbers_mt(18446744073709551615, 18446744073709551615, 18446744073709551615); + +SELECT number FROM numbers_mt(18446744073709551615, 18446744073709551615, 1844674407370955161); + +SELECT number FROM numbers_mt(5, 18446744073709551615, 1) LIMIT 10; + +SELECT number FROM numbers_mt(0, 1000, 2) WHERE number BETWEEN 10 AND 40; + +SELECT number FROM numbers_mt(18446744073709551610, 10, 3) WHERE number >= 18446744073709551612; + +SELECT number FROM numbers_mt(18446744073709551615, 10, 3) WHERE number >= 5; + +SELECT number FROM numbers_mt(18446744073709551610, 10, 3) WHERE number <= 5; + +SELECT number FROM numbers_mt(18446744073709551610, 10, 3) WHERE number >= 18446744073709551612 OR number <= 5; + +SELECT count(), min(number), max(number), sum(number) FROM numbers_mt(0, 1000, 1) WHERE number % 3 = 0; + +SELECT number FROM numbers_mt(100, 10, 1) WHERE number < 50; + +SELECT number FROM system.numbers_mt WHERE number < 10; + +SELECT * FROM system.numbers_mt LIMIT 0; + +SELECT number FROM system.numbers_mt WHERE number < 100 LIMIT 5; + +SELECT number FROM system.numbers_mt WHERE number < 1000 LIMIT 5; + +SELECT count(), min(number), max(number), sum(number) FROM system.numbers_mt WHERE number < 1000000; + +SELECT count(), min(number), max(number), sum(number) FROM system.numbers_mt +WHERE number >= 5 AND number < 7000; + +SELECT number FROM system.numbers_mt +WHERE (number BETWEEN 10 AND 15) OR (number BETWEEN 100 AND 105); + +SELECT number FROM system.numbers_mt +WHERE (number BETWEEN 10 AND 20) OR (number BETWEEN 100 AND 110) LIMIT 7; + +SELECT count(), min(number), max(number), sum(number) FROM system.numbers_mt +WHERE number >= 100000 AND number < 100000 + 1000000; + +SELECT count(), min(number), max(number), sum(number) FROM system.numbers_mt +WHERE number >= 500 AND number < 2000; + +SELECT count(), min(number), max(number), sum(number) FROM system.numbers_mt +WHERE (number BETWEEN 0 AND 50) + OR (number BETWEEN 1000 AND 1100) + OR (number BETWEEN 100000 AND 100100); + +SELECT count(), min(number), max(number), sum(number) FROM system.numbers_mt +WHERE (number BETWEEN 2 AND 50) + OR (number BETWEEN 30 AND 60) + OR (number BETWEEN 50 AND 70); + +SELECT count(), min(number), max(number), sum(number) +FROM system.numbers_mt +WHERE number < 1000; + +SELECT number FROM system.numbers_mt WHERE number BETWEEN 123456 AND 1000000 LIMIT 10; + +SELECT * FROM system.numbers_mt LIMIT 0; diff --git a/parser/testdata/03720_ubsan_dictionary_parameters/ast.json b/parser/testdata/03720_ubsan_dictionary_parameters/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03720_ubsan_dictionary_parameters/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03720_ubsan_dictionary_parameters/metadata.json b/parser/testdata/03720_ubsan_dictionary_parameters/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03720_ubsan_dictionary_parameters/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03720_ubsan_dictionary_parameters/query.sql b/parser/testdata/03720_ubsan_dictionary_parameters/query.sql new file mode 100644 index 000000000..611ea95af --- /dev/null +++ b/parser/testdata/03720_ubsan_dictionary_parameters/query.sql @@ -0,0 +1,11 @@ +-- https://github.com/ClickHouse/ClickHouse/issues/78506 +-- Checks that there the dictionary parameters is read correctly + +CREATE DICTIONARY `d55` (`c0` SimpleAggregateFunction(anyLast, Date) +DEFAULT '75942d37-37c4-8ea0-4175-1a4e0cb18c3b' INJECTIVE) +PRIMARY KEY (`c0`) +SOURCE(CLICKHOUSE(DB currentDatabase() TABLE 't13')) +LAYOUT(HASHED(SHARD_LOAD_QUEUE_BACKLOG 2147483648)) +LIFETIME(2); + +SELECT * FROM d55; -- { serverError UNKNOWN_TABLE } diff --git a/parser/testdata/03721_aggregate_projection_actions_dag/ast.json b/parser/testdata/03721_aggregate_projection_actions_dag/ast.json new file mode 100644 index 000000000..a904671ee --- /dev/null +++ b/parser/testdata/03721_aggregate_projection_actions_dag/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00106565, + "rows_read": 2, + "bytes_read": 61 + } +} diff --git a/parser/testdata/03721_aggregate_projection_actions_dag/metadata.json b/parser/testdata/03721_aggregate_projection_actions_dag/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03721_aggregate_projection_actions_dag/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03721_aggregate_projection_actions_dag/query.sql b/parser/testdata/03721_aggregate_projection_actions_dag/query.sql new file mode 100644 index 000000000..7a913ad94 --- /dev/null +++ b/parser/testdata/03721_aggregate_projection_actions_dag/query.sql @@ -0,0 +1,42 @@ +CREATE TABLE test +( + `key` UInt64, + `value` Int64 +) +ENGINE = MergeTree +ORDER BY key; + +INSERT INTO test SELECT cityHash64(number) AS key, number AS value FROM numbers(100); + +SET enable_parallel_replicas = 0; +SET enable_analyzer = 1; +EXPLAIN PLAN +WITH + view_1 AS + ( + SELECT + key, + ROW_NUMBER() OVER (PARTITION BY key) AS rn + FROM test + ), + view_2 AS + ( + SELECT + key, + count() > 0 AS has_any + FROM test + GROUP BY + key + ), + events AS + ( + SELECT + * + FROM view_1 AS v1 + INNER JOIN view_2 AS v2_1 USING (key) + LEFT JOIN view_2 AS v2_2 USING (key) + WHERE v1.rn = 1 + ) +SELECT count() +FROM events +WHERE v2_1.has_any; diff --git a/parser/testdata/03721_concurrent_right_join_flags_per_row_bug/ast.json b/parser/testdata/03721_concurrent_right_join_flags_per_row_bug/ast.json new file mode 100644 index 000000000..d7ff68e01 --- /dev/null +++ b/parser/testdata/03721_concurrent_right_join_flags_per_row_bug/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00098797, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03721_concurrent_right_join_flags_per_row_bug/metadata.json b/parser/testdata/03721_concurrent_right_join_flags_per_row_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03721_concurrent_right_join_flags_per_row_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03721_concurrent_right_join_flags_per_row_bug/query.sql b/parser/testdata/03721_concurrent_right_join_flags_per_row_bug/query.sql new file mode 100644 index 000000000..7faeb2e10 --- /dev/null +++ b/parser/testdata/03721_concurrent_right_join_flags_per_row_bug/query.sql @@ -0,0 +1,12 @@ +SET join_algorithm = 'parallel_hash'; +SET enable_analyzer = 1; + +SET join_use_nulls = 1; + +SELECT ty.number, sipHash64(ty.number + 1) % 100 as a, tw.number, sipHash64(tw.number) % 100 as b +FROM numbers(1, 4) ty +RIGHT JOIN numbers(1, 4) tw +ON tw.number = ty.number + AND a <= b +ORDER BY ALL +; diff --git a/parser/testdata/03721_insert_replicated_array_nested_sizes_check/ast.json b/parser/testdata/03721_insert_replicated_array_nested_sizes_check/ast.json new file mode 100644 index 000000000..1f75aff95 --- /dev/null +++ b/parser/testdata/03721_insert_replicated_array_nested_sizes_check/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00111036, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03721_insert_replicated_array_nested_sizes_check/metadata.json b/parser/testdata/03721_insert_replicated_array_nested_sizes_check/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03721_insert_replicated_array_nested_sizes_check/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03721_insert_replicated_array_nested_sizes_check/query.sql b/parser/testdata/03721_insert_replicated_array_nested_sizes_check/query.sql new file mode 100644 index 000000000..512a89731 --- /dev/null +++ b/parser/testdata/03721_insert_replicated_array_nested_sizes_check/query.sql @@ -0,0 +1,7 @@ +set enable_analyzer=1; + +drop table if exists test; +create table test (a Array(UInt64)) engine=MergeTree order by tuple(); +insert into test select range(number) from numbers(3) array join range(number + 1); +select * from test; +drop table test; diff --git a/parser/testdata/03721_join_residual_condition_bug_88635/ast.json b/parser/testdata/03721_join_residual_condition_bug_88635/ast.json new file mode 100644 index 000000000..0a023227d --- /dev/null +++ b/parser/testdata/03721_join_residual_condition_bug_88635/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.00125368, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03721_join_residual_condition_bug_88635/metadata.json b/parser/testdata/03721_join_residual_condition_bug_88635/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03721_join_residual_condition_bug_88635/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03721_join_residual_condition_bug_88635/query.sql b/parser/testdata/03721_join_residual_condition_bug_88635/query.sql new file mode 100644 index 000000000..b91746847 --- /dev/null +++ b/parser/testdata/03721_join_residual_condition_bug_88635/query.sql @@ -0,0 +1,8 @@ +SET enable_analyzer = 1; + +SELECT 1 FROM numbers(3) tx +JOIN numbers(3) ty + ON tx.number = ty.number +JOIN numbers(3) tz + ON tz.number = ty.number AND if(tx.number % 2 == 0, 1, 2) != if(tz.number % 2 == 0, 1, 3) +SETTINGS query_plan_use_logical_join_step = 0; diff --git a/parser/testdata/03721_right_join_logical_step/ast.json b/parser/testdata/03721_right_join_logical_step/ast.json new file mode 100644 index 000000000..15de85f56 --- /dev/null +++ b/parser/testdata/03721_right_join_logical_step/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t0 (children 1)" + }, + { + "explain": " Identifier t0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001001576, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03721_right_join_logical_step/metadata.json b/parser/testdata/03721_right_join_logical_step/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03721_right_join_logical_step/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03721_right_join_logical_step/query.sql b/parser/testdata/03721_right_join_logical_step/query.sql new file mode 100644 index 000000000..2a3b3c5fe --- /dev/null +++ b/parser/testdata/03721_right_join_logical_step/query.sql @@ -0,0 +1,5 @@ +DROP TABLE IF EXISTS t0; +CREATE TABLE t0 (c0 UInt32, c1 UInt64) ENGINE = Memory; +INSERT INTO TABLE t0 (c0, c1) VALUES (1, 1); +SELECT ty.c0 FROM t0 RIGHT JOIN numbers(1) AS tx ON number = t0.c1 AND tx.number = t0.c0 CROSS JOIN t0 ty SETTINGS query_plan_join_swap_table = true, query_plan_use_new_logical_join_step = false; +DROP TABLE t0; diff --git a/parser/testdata/03721_statistics_alter_type_bug/ast.json b/parser/testdata/03721_statistics_alter_type_bug/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03721_statistics_alter_type_bug/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03721_statistics_alter_type_bug/metadata.json b/parser/testdata/03721_statistics_alter_type_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03721_statistics_alter_type_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03721_statistics_alter_type_bug/query.sql b/parser/testdata/03721_statistics_alter_type_bug/query.sql new file mode 100644 index 000000000..4bf35f21e --- /dev/null +++ b/parser/testdata/03721_statistics_alter_type_bug/query.sql @@ -0,0 +1,21 @@ +-- Tags: no-fasttest + +DROP TABLE IF EXISTS column_modify_test; +set allow_statistics_optimize=1; +CREATE TABLE column_modify_test (id UInt64, val String, other_col UInt64) engine=MergeTree ORDER BY id SETTINGS min_bytes_for_wide_part=0, auto_statistics_types='uniq,countmin'; +INSERT INTO column_modify_test VALUES (1,'one',0); + +ALTER TABLE column_modify_test MODIFY COLUMN val Nullable(String); + +alter table column_modify_test update other_col=1 where id = 1 SETTINGS mutations_sync=1; + +SELECT *, throwIf(val <> 'one') as issue FROM column_modify_test WHERE id = 1 FORMAT NULL; +DROP TABLE column_modify_test; +CREATE TABLE column_modify_test (id UInt64, val Nullable(String), other_col UInt64) engine=MergeTree ORDER BY id SETTINGS min_bytes_for_wide_part=0, auto_statistics_types='uniq,countmin'; +INSERT INTO column_modify_test VALUES (1,'one',0); + +ALTER TABLE column_modify_test MODIFY COLUMN val String; + +alter table column_modify_test update other_col=1 where id = 1 SETTINGS mutations_sync=1; + +SELECT *, throwIf(val <> 'one') as issue FROM column_modify_test WHERE id = 1 FORMAT NULL; diff --git a/parser/testdata/03722_function_trim_ltrim_rtrim_alias/ast.json b/parser/testdata/03722_function_trim_ltrim_rtrim_alias/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03722_function_trim_ltrim_rtrim_alias/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03722_function_trim_ltrim_rtrim_alias/metadata.json b/parser/testdata/03722_function_trim_ltrim_rtrim_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03722_function_trim_ltrim_rtrim_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03722_function_trim_ltrim_rtrim_alias/query.sql b/parser/testdata/03722_function_trim_ltrim_rtrim_alias/query.sql new file mode 100644 index 000000000..1ea32a63c --- /dev/null +++ b/parser/testdata/03722_function_trim_ltrim_rtrim_alias/query.sql @@ -0,0 +1,30 @@ +-- { echoOn } + +SELECT ltrim(' leading '), trimLeft(' leading '); +SELECT ltrim('xxleadingxx', 'x'), trimLeft('xxleadingxx', 'x'); + +SELECT rtrim(' trailing '), trimRight(' trailing '); +SELECT rtrim('xxtrailingxx', 'x'), trimRight('xxtrailingxx', 'x'); + +SELECT trim(' both '), trimBoth(' both '); +SELECT trim('$$both$$', '$'), trimBoth('$$both$$', '$'); +SELECT TRIM(' both '), trimBoth(' both '); + +SELECT TRIM(BOTH '$' FROM '$$both$$'), trimBoth('$$both$$', '$'); +SELECT TRIM(LEADING '$' FROM '$$both$$'), trimLeft('$$both$$', '$'); +SELECT TRIM(TRAILING '$' FROM '$$both$$'), trimRight('$$both$$', '$'); +SELECT TRIM(BOTH '' FROM 'xx'), trimBoth('xx', ''); +SELECT TRIM(LEADING '' FROM 'xx'), trimLeft('xx', ''); +SELECT TRIM(TRAILING '' FROM 'xx'), trimRight('xx', ''); +SELECT TRIM(BOTH concat('$', '$') FROM '$$both$$'), trimBoth('$$both$$', '$$'); + +SELECT ltrim('\t abc', '\t '), trimLeft('\t abc', '\t '); +SELECT rtrim('abc\t ', '\t '), trimRight('abc\t ', '\t '); + +SELECT TrIm(' x '), trimBoth(' x '); + +SELECT LTRIM(' x '), trimLeft(' x '); +SELECT LtRiM(' x '), trimLeft(' x '); + +SELECT RTRIM(' x '), trimRight(' x '); +SELECT RtRiM(' x '), trimRight(' x '); diff --git a/parser/testdata/03722_json_compact_part_substreams_cache_bug/ast.json b/parser/testdata/03722_json_compact_part_substreams_cache_bug/ast.json new file mode 100644 index 000000000..e3af7a84e --- /dev/null +++ b/parser/testdata/03722_json_compact_part_substreams_cache_bug/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001407259, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03722_json_compact_part_substreams_cache_bug/metadata.json b/parser/testdata/03722_json_compact_part_substreams_cache_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03722_json_compact_part_substreams_cache_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03722_json_compact_part_substreams_cache_bug/query.sql b/parser/testdata/03722_json_compact_part_substreams_cache_bug/query.sql new file mode 100644 index 000000000..af67b1ed8 --- /dev/null +++ b/parser/testdata/03722_json_compact_part_substreams_cache_bug/query.sql @@ -0,0 +1,7 @@ +SET use_variant_as_common_type = 1; +DROP TABLE IF EXISTS t0; +CREATE TABLE t0 (json Nullable(JSON)) ENGINE = MergeTree ORDER BY tuple() SETTINGS index_granularity=1, write_marks_for_substreams_in_compact_parts=1, min_bytes_for_wide_part='100G'; +INSERT INTO t0 SELECT toJSONString(map('a.b.d', CAST(number, 'UInt32'), 'a.b.e', concat('str_', toString(number)))) FROM numbers(3); +SELECT json.a.b.e, json.a.b.e.:Int64, json.^a FROM t0; +DROP TABLE t0; + diff --git a/parser/testdata/03722_random_utf8_bug/ast.json b/parser/testdata/03722_random_utf8_bug/ast.json new file mode 100644 index 000000000..39911ee40 --- /dev/null +++ b/parser/testdata/03722_random_utf8_bug/ast.json @@ -0,0 +1,88 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function randomStringUTF8 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function plus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Function minus (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal UInt64_18446744073709551615" + }, + { + "explain": " Literal UInt64_1000" + }, + { + "explain": " Function multiply (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier number" + }, + { + "explain": " Literal UInt64_2003" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_2" + } + ], + + "rows": 22, + + "statistics": + { + "elapsed": 0.001181232, + "rows_read": 22, + "bytes_read": 912 + } +} diff --git a/parser/testdata/03722_random_utf8_bug/metadata.json b/parser/testdata/03722_random_utf8_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03722_random_utf8_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03722_random_utf8_bug/query.sql b/parser/testdata/03722_random_utf8_bug/query.sql new file mode 100644 index 000000000..7b08f5623 --- /dev/null +++ b/parser/testdata/03722_random_utf8_bug/query.sql @@ -0,0 +1 @@ +select randomStringUTF8(18446744073709551615-1000+number*2003) from numbers(2); -- { serverError TOO_LARGE_STRING_SIZE } diff --git a/parser/testdata/03723_incorrect_implicit_projection/ast.json b/parser/testdata/03723_incorrect_implicit_projection/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03723_incorrect_implicit_projection/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03723_incorrect_implicit_projection/metadata.json b/parser/testdata/03723_incorrect_implicit_projection/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03723_incorrect_implicit_projection/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03723_incorrect_implicit_projection/query.sql b/parser/testdata/03723_incorrect_implicit_projection/query.sql new file mode 100644 index 000000000..693054eb3 --- /dev/null +++ b/parser/testdata/03723_incorrect_implicit_projection/query.sql @@ -0,0 +1,45 @@ +-- See also 03604_key_condition_set_tuple_bug.sql‎ + +DROP TABLE IF EXISTS prd_bid_events_simple_no_partition; +CREATE TABLE prd_bid_events_simple_no_partition +( + `type` LowCardinality(String), + `timestamp` DateTime64(9) +) +ENGINE = MergeTree() +PRIMARY KEY (timestamp, type) +ORDER BY (timestamp, type); + +INSERT INTO prd_bid_events_simple_no_partition +SELECT + arrayElement([ + 'impression', + 'start', + 'firstQuartile', + 'midpoint', + 'thirdQuartile', + 'complete', + 'ad_request', + 'random_value' + ], 1 + (number % 8)), + toDateTime64('2025-11-19 14:26:52' - toIntervalDay(number % 30) - toIntervalSecond(number % 86400) - toIntervalMillisecond(number % 1000), 9) +FROM numbers(500000); + +SELECT + type, + count() +FROM prd_bid_events_simple_no_partition +WHERE date(timestamp) = '2025-11-01' +GROUP BY type +HAVING type = 'ad_request'; + +SELECT count() +FROM prd_bid_events_simple_no_partition +WHERE (date(timestamp) = '2025-11-01') AND (type = 'ad_request') +SETTINGS optimize_use_implicit_projections = 0; + +SELECT + count() +FROM prd_bid_events_simple_no_partition +WHERE (date(timestamp) = '2025-11-01') AND (type = 'ad_request') +SETTINGS optimize_use_implicit_projections = 1; diff --git a/parser/testdata/03724_filter_assume_not_null_materialize/ast.json b/parser/testdata/03724_filter_assume_not_null_materialize/ast.json new file mode 100644 index 000000000..b9bdf07bd --- /dev/null +++ b/parser/testdata/03724_filter_assume_not_null_materialize/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.000983891, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03724_filter_assume_not_null_materialize/metadata.json b/parser/testdata/03724_filter_assume_not_null_materialize/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03724_filter_assume_not_null_materialize/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03724_filter_assume_not_null_materialize/query.sql b/parser/testdata/03724_filter_assume_not_null_materialize/query.sql new file mode 100644 index 000000000..0411708f5 --- /dev/null +++ b/parser/testdata/03724_filter_assume_not_null_materialize/query.sql @@ -0,0 +1,19 @@ +SET allow_not_comparable_types_in_comparison_functions = 0; + +SELECT (assumeNotNull((NULL)), 1); -- { serverError ILLEGAL_COLUMN } + +SELECT (assumeNotNull(materialize(NULL)), 1); -- { serverError ILLEGAL_COLUMN } + +SELECT 1 WHERE (assumeNotNull(NULL), 1) = (1, 1); -- { serverError ILLEGAL_COLUMN } + +SELECT 1 WHERE (assumeNotNull(materialize(NULL)), 1) = (1, 1); -- { serverError ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER } + +SET allow_not_comparable_types_in_comparison_functions = 1; + +SELECT (assumeNotNull((NULL)), 1); -- { serverError ILLEGAL_COLUMN } + +SELECT (assumeNotNull(materialize(NULL)), 1); -- { serverError ILLEGAL_COLUMN } + +SELECT 1 WHERE (assumeNotNull(NULL), 1) = (1, 1); -- { serverError ILLEGAL_COLUMN } + +SELECT 1 WHERE (assumeNotNull(materialize(NULL)), 1) = (1, 1); -- { serverError ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER } diff --git a/parser/testdata/03724_parallel_replicas_duplicate_requests/ast.json b/parser/testdata/03724_parallel_replicas_duplicate_requests/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03724_parallel_replicas_duplicate_requests/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03724_parallel_replicas_duplicate_requests/metadata.json b/parser/testdata/03724_parallel_replicas_duplicate_requests/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03724_parallel_replicas_duplicate_requests/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03724_parallel_replicas_duplicate_requests/query.sql b/parser/testdata/03724_parallel_replicas_duplicate_requests/query.sql new file mode 100644 index 000000000..d00e91073 --- /dev/null +++ b/parser/testdata/03724_parallel_replicas_duplicate_requests/query.sql @@ -0,0 +1,30 @@ +-- Tags: no-fasttest, no-random-merge-tree-settings, no-random-settings, no-parallel +-- no-parallel - due to usage of fail points +-- A lot of stars need to align for the issue to reproduce. Thus all the overridden settings are explicitly specified within the test and on top of that settings randomization is disabled. + +SET max_insert_threads=2, group_by_two_level_threshold=94218, group_by_two_level_threshold_bytes=12678590, distributed_aggregation_memory_efficient=1, fsync_metadata=0, output_format_parallel_formatting=1, input_format_parallel_parsing=1, min_chunk_bytes_for_parallel_parsing=19408425, max_read_buffer_size=740278, prefer_localhost_replica=0, max_block_size=9777, max_joined_block_size_rows=61084, joined_block_split_single_row=1, join_output_by_rowlist_perkey_rows_threshold=982, max_threads=3, optimize_append_index=0, use_hedged_requests=0, optimize_if_chain_to_multiif=0, optimize_if_transform_strings_to_enum=0, optimize_read_in_order=1, optimize_or_like_chain=1, optimize_substitute_columns=1, enable_multiple_prewhere_read_steps=0, read_in_order_two_level_merge_threshold=72, optimize_aggregation_in_order=1, aggregation_in_order_max_block_bytes=14627641, use_uncompressed_cache=0, min_bytes_to_use_direct_io=7726182779, min_bytes_to_use_mmap_io=10737418240, local_filesystem_read_method='read', remote_filesystem_read_method='threadpool', local_filesystem_read_prefetch=0, filesystem_cache_segments_batch_size=1, read_from_filesystem_cache_if_exists_otherwise_bypass_cache=0, throw_on_error_from_cache_on_write_operations=1, remote_filesystem_read_prefetch=0, allow_prefetched_read_pool_for_remote_filesystem=1, filesystem_prefetch_max_memory_usage='128Mi', filesystem_prefetches_limit=10, filesystem_prefetch_min_bytes_for_single_read_task='8Mi', filesystem_prefetch_step_marks=50, filesystem_prefetch_step_bytes='100Mi', compile_expressions=1, compile_aggregate_expressions=0, compile_sort_description=1, merge_tree_coarse_index_granularity=16, optimize_distinct_in_order=1, max_bytes_before_remerge_sort=615312931, min_compress_block_size=396667, max_compress_block_size=1465735, merge_tree_compact_parts_min_granules_to_multibuffer_read=109, optimize_sorting_by_input_stream_properties=1, http_response_buffer_size=7378705, http_wait_end_of_query='True', enable_memory_bound_merging_of_aggregation_results=1, min_count_to_compile_expression=3, min_count_to_compile_aggregate_expression=0, min_count_to_compile_sort_description=0, session_timezone='Africa/Khartoum', use_page_cache_for_disks_without_file_cache='False', page_cache_inject_eviction='False', merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability=0.69, prefer_external_sort_block_bytes=0, cross_join_min_rows_to_compress=100000000, cross_join_min_bytes_to_compress=100000000, min_external_table_block_size_bytes=0, max_parsing_threads=10, optimize_functions_to_subcolumns=0, parallel_replicas_local_plan=1, query_plan_join_swap_table='auto', enable_vertical_final=0, optimize_extract_common_expressions=1, use_async_executor_for_materialized_views=1, use_query_condition_cache=0, secondary_indices_enable_bulk_filtering=1, use_skip_indexes_if_final=1, use_skip_indexes_on_data_read=0, optimize_rewrite_like_perfect_affix=0, input_format_parquet_use_native_reader_v3=0, enable_lazy_columns_replication=0, allow_special_serialization_kinds_in_output_formats=0, max_bytes_before_external_sort=0, max_bytes_before_external_group_by=0, max_bytes_ratio_before_external_sort=0, max_bytes_ratio_before_external_group_by=0, use_skip_indexes_if_final_exact_mode=1; + +CREATE TABLE t(a UInt64, s String) ENGINE = MergeTree ORDER BY a SETTINGS ratio_of_defaults_for_sparse_serialization=1.0, prefer_fetch_merged_part_size_threshold=4900758417, vertical_merge_algorithm_min_rows_to_activate=380389, vertical_merge_algorithm_min_columns_to_activate=76, allow_vertical_merges_from_compact_to_wide_parts=1, min_merge_bytes_to_use_direct_io=9937308181, index_granularity_bytes=5843975, merge_max_block_size=15847, index_granularity=60662, marks_compress_block_size=98783, primary_key_compress_block_size=79758, replace_long_file_name_to_hash=1, max_file_name_length=0, min_bytes_for_full_part_storage=536870912, compact_parts_max_bytes_to_buffer=349746021, compact_parts_max_granules_to_buffer=256, compact_parts_merge_max_bytes_to_prefetch_part=16703512, cache_populated_by_fetch=0, concurrent_part_removal_threshold=93, old_parts_lifetime=10, prewarm_mark_cache=0, use_const_adaptive_granularity=0, enable_index_granularity_compression=0, enable_block_number_column=1, enable_block_offset_column=0, use_primary_key_cache=1, prewarm_primary_key_cache=0, object_serialization_version='v3', object_shared_data_serialization_version='advanced', object_shared_data_serialization_version_for_zero_level_parts='advanced', object_shared_data_buckets_for_compact_part=25, object_shared_data_buckets_for_wide_part=13, dynamic_serialization_version='v2', auto_statistics_types='tdigest,uniq', serialization_info_version='basic', string_serialization_version='with_size_stream', enable_shared_storage_snapshot_in_query=0, storage_policy = 's3_cache', min_rows_for_wide_part = 10000, min_bytes_for_wide_part = 0; + +INSERT INTO t SELECT *, randomString(100) FROM numbers_mt(3_000_000); + +SET enable_analyzer = 1; + +SET max_threads = 3, merge_tree_min_read_task_size = 1; + +SET enable_parallel_replicas = 2, max_parallel_replicas = 3, parallel_replicas_for_non_replicated_merge_tree = 1, cluster_for_parallel_replicas = 'parallel_replicas'; + +SYSTEM ENABLE FAILPOINT parallel_replicas_reading_response_timeout; + +SELECT * FROM t FORMAT Null; -- { serverError SOCKET_TIMEOUT } + +SYSTEM DISABLE FAILPOINT parallel_replicas_reading_response_timeout; + +SYSTEM ENABLE FAILPOINT parallel_replicas_reading_response_timeout; + +SET max_threads = 3, merge_tree_min_read_task_size = 1000; + +SELECT * FROM t ORDER BY a FORMAT Null; -- { serverError SOCKET_TIMEOUT } + +SYSTEM DISABLE FAILPOINT parallel_replicas_reading_response_timeout; + diff --git a/parser/testdata/03724_to_date_time_or_null_negative_arg_bug/ast.json b/parser/testdata/03724_to_date_time_or_null_negative_arg_bug/ast.json new file mode 100644 index 000000000..e893b1246 --- /dev/null +++ b/parser/testdata/03724_to_date_time_or_null_negative_arg_bug/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001312943, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03724_to_date_time_or_null_negative_arg_bug/metadata.json b/parser/testdata/03724_to_date_time_or_null_negative_arg_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03724_to_date_time_or_null_negative_arg_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03724_to_date_time_or_null_negative_arg_bug/query.sql b/parser/testdata/03724_to_date_time_or_null_negative_arg_bug/query.sql new file mode 100644 index 000000000..8653b99df --- /dev/null +++ b/parser/testdata/03724_to_date_time_or_null_negative_arg_bug/query.sql @@ -0,0 +1,58 @@ +set session_timezone='UTC'; +-- toDateOrNull: pre-epoch and far future dates +select 'toDateOrNull:'; +select '1960-01-01' as input, toDateOrNull('1960-01-01') as result; +select '1800-01-01' as input, toDateOrNull('1800-01-01') as result; +select '3000-01-01' as input, toDateOrNull('3000-01-01') as result; + +-- toDateTimeOrNull: pre-epoch and far future datetimes +select 'toDateTimeOrNull:'; +select '1960-01-01 00:00:00' as input, toDateTimeOrNull('1960-01-01 00:00:00') as result; +select '1800-01-01 00:00:00' as input, toDateTimeOrNull('1800-01-01 00:00:00') as result; +select '3000-01-01 00:00:00' as input, toDateTimeOrNull('3000-01-01 00:00:00') as result; + +-- toDateTime64OrNull: pre-epoch and far future datetimes +select 'toDateTime64OrNull:'; +select '1800-01-01 00:00:00' as input, toDateTime64OrNull('1800-01-01 00:00:00') as result; +select '3000-01-01 00:00:00' as input, toDateTime64OrNull('3000-01-01 00:00:00') as result; + +-- accurateCastOrNull to Date +select 'accurateCastOrNull to Date:'; +select '1960-01-01' as input, accurateCastOrNull('1960-01-01', 'Date') as result; +select '1800-01-01' as input, accurateCastOrNull('1800-01-01', 'Date') as result; +select '3000-01-01' as input, accurateCastOrNull('3000-01-01', 'Date') as result; + +-- accurateCastOrNull to DateTime +select 'accurateCastOrNull to DateTime:'; +select '1960-01-01' as input, accurateCastOrNull('1960-01-01', 'DateTime') as result; +select '1800-01-01' as input, accurateCastOrNull('1800-01-01', 'DateTime') as result; +select '3000-01-01' as input, accurateCastOrNull('3000-01-01', 'DateTime') as result; + +-- accurateCastOrNull to DateTime with best_effort mode +select 'accurateCastOrNull to DateTime (best_effort):'; +select '1960-01-01' as input, accurateCastOrNull('1960-01-01', 'DateTime') as result settings cast_string_to_date_time_mode='best_effort'; +select '1800-01-01' as input, accurateCastOrNull('1800-01-01', 'DateTime') as result settings cast_string_to_date_time_mode='best_effort'; +select '3000-01-01' as input, accurateCastOrNull('3000-01-01', 'DateTime') as result settings cast_string_to_date_time_mode='best_effort'; + +-- accurateCastOrNull to DateTime with best_effort_us mode +select 'accurateCastOrNull to DateTime (best_effort_us):'; +select '1960-01-01' as input, accurateCastOrNull('1960-01-01', 'DateTime') as result settings cast_string_to_date_time_mode='best_effort_us'; +select '1800-01-01' as input, accurateCastOrNull('1800-01-01', 'DateTime') as result settings cast_string_to_date_time_mode='best_effort_us'; +select '3000-01-01' as input, accurateCastOrNull('3000-01-01', 'DateTime') as result settings cast_string_to_date_time_mode='best_effort_us'; + +-- accurateCastOrNull to DateTime64 +select 'accurateCastOrNull to DateTime64:'; +select '1800-01-01' as input, accurateCastOrNull('1800-01-01', 'DateTime64') as result; +select '3000-01-01' as input, accurateCastOrNull('3000-01-01', 'DateTime64') as result; + +-- accurateCastOrNull to DateTime64 with best_effort mode +select 'accurateCastOrNull to DateTime64 (best_effort):'; +select '1960-01-01' as input, accurateCastOrNull('1960-01-01', 'DateTime64') as result settings cast_string_to_date_time_mode='best_effort'; +select '1800-01-01' as input, accurateCastOrNull('1800-01-01', 'DateTime64') as result settings cast_string_to_date_time_mode='best_effort'; +select '3000-01-01' as input, accurateCastOrNull('3000-01-01', 'DateTime64') as result settings cast_string_to_date_time_mode='best_effort'; + +-- accurateCastOrNull to DateTime64 with best_effort_us mode +select 'accurateCastOrNull to DateTime64 (best_effort_us):'; +select '1960-01-01' as input, accurateCastOrNull('1960-01-01', 'DateTime64') as result settings cast_string_to_date_time_mode='best_effort_us'; +select '1800-01-01' as input, accurateCastOrNull('1800-01-01', 'DateTime64') as result settings cast_string_to_date_time_mode='best_effort_us'; +select '3000-01-01' as input, accurateCastOrNull('3000-01-01', 'DateTime64') as result settings cast_string_to_date_time_mode='best_effort_us'; \ No newline at end of file diff --git a/parser/testdata/03725_empty_tuple_some_limit_with_ties_distinct/ast.json b/parser/testdata/03725_empty_tuple_some_limit_with_ties_distinct/ast.json new file mode 100644 index 000000000..e30eca05b --- /dev/null +++ b/parser/testdata/03725_empty_tuple_some_limit_with_ties_distinct/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " OrderByElement (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Literal UInt64_1" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001363746, + "rows_read": 10, + "bytes_read": 348 + } +} diff --git a/parser/testdata/03725_empty_tuple_some_limit_with_ties_distinct/metadata.json b/parser/testdata/03725_empty_tuple_some_limit_with_ties_distinct/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03725_empty_tuple_some_limit_with_ties_distinct/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03725_empty_tuple_some_limit_with_ties_distinct/query.sql b/parser/testdata/03725_empty_tuple_some_limit_with_ties_distinct/query.sql new file mode 100644 index 000000000..995a8129a --- /dev/null +++ b/parser/testdata/03725_empty_tuple_some_limit_with_ties_distinct/query.sql @@ -0,0 +1,9 @@ +SELECT () ORDER BY 1 LIMIT 1 WITH TIES; + +DROP TABLE IF EXISTS t0; + +CREATE TABLE t0 (v1 Nullable(Int8), v2 Decimal(18,4)) ENGINE = MergeTree() ORDER BY tuple(); +SELECT DISTINCT TOP 1 WITH TIES * FROM t0 ORDER BY tuple(); + +INSERT INTO t0 VALUES (NULL, 1.1), (NULL, 2.2), (1, 3.3), (2, 4.4); +SELECT DISTINCT TOP 1 WITH TIES * FROM t0 ORDER BY tuple(); diff --git a/parser/testdata/03725_json_dynamic_subcolumn_prewhere_zero_index_granularity_bytes/ast.json b/parser/testdata/03725_json_dynamic_subcolumn_prewhere_zero_index_granularity_bytes/ast.json new file mode 100644 index 000000000..a8c2b1277 --- /dev/null +++ b/parser/testdata/03725_json_dynamic_subcolumn_prewhere_zero_index_granularity_bytes/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001014437, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/03725_json_dynamic_subcolumn_prewhere_zero_index_granularity_bytes/metadata.json b/parser/testdata/03725_json_dynamic_subcolumn_prewhere_zero_index_granularity_bytes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03725_json_dynamic_subcolumn_prewhere_zero_index_granularity_bytes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03725_json_dynamic_subcolumn_prewhere_zero_index_granularity_bytes/query.sql b/parser/testdata/03725_json_dynamic_subcolumn_prewhere_zero_index_granularity_bytes/query.sql new file mode 100644 index 000000000..05a1c0a0d --- /dev/null +++ b/parser/testdata/03725_json_dynamic_subcolumn_prewhere_zero_index_granularity_bytes/query.sql @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS test; + +CREATE TABLE test ( + s String, + json JSON +) +ENGINE = MergeTree +ORDER BY (s) +SETTINGS index_granularity = 2, index_granularity_bytes = 0, min_rows_for_wide_part=0, min_bytes_for_wide_part=0; + +INSERT INTO test SELECT 'a', '{}' FROM numbers(1); + +SELECT count() FROM test WHERE s = 'a' AND json.a IS NULL; + +DROP TABLE test; + diff --git a/parser/testdata/03725_variant_element_null_map_subcolumn_prewhere_zero_index_granularity_bytes/ast.json b/parser/testdata/03725_variant_element_null_map_subcolumn_prewhere_zero_index_granularity_bytes/ast.json new file mode 100644 index 000000000..2401dc801 --- /dev/null +++ b/parser/testdata/03725_variant_element_null_map_subcolumn_prewhere_zero_index_granularity_bytes/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001357588, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/03725_variant_element_null_map_subcolumn_prewhere_zero_index_granularity_bytes/metadata.json b/parser/testdata/03725_variant_element_null_map_subcolumn_prewhere_zero_index_granularity_bytes/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03725_variant_element_null_map_subcolumn_prewhere_zero_index_granularity_bytes/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03725_variant_element_null_map_subcolumn_prewhere_zero_index_granularity_bytes/query.sql b/parser/testdata/03725_variant_element_null_map_subcolumn_prewhere_zero_index_granularity_bytes/query.sql new file mode 100644 index 000000000..9f240e708 --- /dev/null +++ b/parser/testdata/03725_variant_element_null_map_subcolumn_prewhere_zero_index_granularity_bytes/query.sql @@ -0,0 +1,6 @@ +drop table if exists test; +create table test (s Int128, v Variant(UUID, Int128)) engine=MergeTree order by s settings index_granularity = 2, index_granularity_bytes = 0, min_rows_for_wide_part=0, min_bytes_for_wide_part=0; +insert into test select 42::Int128, 42::Int128; +select v from test prewhere 1; +drop table test; + diff --git a/parser/testdata/03726_array_union_with_dynamic_argument/ast.json b/parser/testdata/03726_array_union_with_dynamic_argument/ast.json new file mode 100644 index 000000000..499347538 --- /dev/null +++ b/parser/testdata/03726_array_union_with_dynamic_argument/ast.json @@ -0,0 +1,61 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function arrayUnion (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal Array_[UInt64_1]" + }, + { + "explain": " Function array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function CAST (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Literal 'a'" + }, + { + "explain": " Literal 'Dynamic'" + } + ], + + "rows": 13, + + "statistics": + { + "elapsed": 0.001245418, + "rows_read": 13, + "bytes_read": 504 + } +} diff --git a/parser/testdata/03726_array_union_with_dynamic_argument/metadata.json b/parser/testdata/03726_array_union_with_dynamic_argument/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03726_array_union_with_dynamic_argument/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03726_array_union_with_dynamic_argument/query.sql b/parser/testdata/03726_array_union_with_dynamic_argument/query.sql new file mode 100644 index 000000000..fbf7897bf --- /dev/null +++ b/parser/testdata/03726_array_union_with_dynamic_argument/query.sql @@ -0,0 +1,2 @@ +SELECT arrayUnion([1], ['a'::Dynamic]); + diff --git a/parser/testdata/03727_alter_with_localhost_remote/ast.json b/parser/testdata/03727_alter_with_localhost_remote/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03727_alter_with_localhost_remote/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03727_alter_with_localhost_remote/metadata.json b/parser/testdata/03727_alter_with_localhost_remote/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03727_alter_with_localhost_remote/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03727_alter_with_localhost_remote/query.sql b/parser/testdata/03727_alter_with_localhost_remote/query.sql new file mode 100644 index 000000000..18e649cf7 --- /dev/null +++ b/parser/testdata/03727_alter_with_localhost_remote/query.sql @@ -0,0 +1,30 @@ +-- Tags: no-replicated-database, no-parallel + +DROP USER IF EXISTS test_03727; +CREATE USER test_03727; + +CREATE TABLE normal +( + n Int32, + s String +) +ENGINE = MergeTree() +ORDER BY n; + +CREATE TABLE secret +( + s String +) +ENGINE = MergeTree() +ORDER BY s; + +INSERT INTO normal VALUES (1, ''); +INSERT INTO secret VALUES ('secret'); + +GRANT ALTER UPDATE ON normal TO test_03727; +GRANT READ ON REMOTE to test_03727; +GRANT CREATE TEMPORARY TABLE ON *.* TO test_03727; + +EXECUTE AS test_03727 ALTER TABLE normal UPDATE s = (SELECT * FROM remote('localhost', currentDatabase(), 'secret') LIMIT 1) WHERE n=1; -- { serverError ACCESS_DENIED } + +DROP USER IF EXISTS test_03727; diff --git a/parser/testdata/03727_block_structure_mismatch_after_filter_push_down/ast.json b/parser/testdata/03727_block_structure_mismatch_after_filter_push_down/ast.json new file mode 100644 index 000000000..6833085c0 --- /dev/null +++ b/parser/testdata/03727_block_structure_mismatch_after_filter_push_down/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001102825, + "rows_read": 5, + "bytes_read": 169 + } +} diff --git a/parser/testdata/03727_block_structure_mismatch_after_filter_push_down/metadata.json b/parser/testdata/03727_block_structure_mismatch_after_filter_push_down/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03727_block_structure_mismatch_after_filter_push_down/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03727_block_structure_mismatch_after_filter_push_down/query.sql b/parser/testdata/03727_block_structure_mismatch_after_filter_push_down/query.sql new file mode 100644 index 000000000..e086ff6c0 --- /dev/null +++ b/parser/testdata/03727_block_structure_mismatch_after_filter_push_down/query.sql @@ -0,0 +1,11 @@ +SELECT * +FROM +( + SELECT materialize(toUInt256(2)) + UNION ALL + SELECT DISTINCT 1 +) +GROUP BY + ignore(lessOrEquals(18, isNotNull(toLowCardinality(10)))), + 1 +HAVING ignore(isZeroOrNull(5)); diff --git a/parser/testdata/03727_concat_with_separator_subquery/ast.json b/parser/testdata/03727_concat_with_separator_subquery/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03727_concat_with_separator_subquery/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03727_concat_with_separator_subquery/metadata.json b/parser/testdata/03727_concat_with_separator_subquery/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03727_concat_with_separator_subquery/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03727_concat_with_separator_subquery/query.sql b/parser/testdata/03727_concat_with_separator_subquery/query.sql new file mode 100644 index 000000000..d07dcd58d --- /dev/null +++ b/parser/testdata/03727_concat_with_separator_subquery/query.sql @@ -0,0 +1,56 @@ +-- { echoOn} + +SELECT concatWithSeparator(c0, 'b', 1) FROM (SELECT 'a' c0) tx; + +SELECT concatWithSeparator(c0, 1) FROM (SELECT 'a' AS c0) tx; + +SELECT concatWithSeparator(c0, 'b', 1) FROM (SELECT CAST(NULL AS Nullable(String)) AS c0) tx; + +SELECT concatWithSeparator('a', c1, 'b') FROM (SELECT 1 AS c1) tx; + +SELECT concatWithSeparator('a', c1, 'b') FROM (SELECT CAST(1 AS Nullable(UInt8)) AS c1) tx; + +SELECT concatWithSeparator('+-*/', c0, c1, c2) FROM (SELECT 'a' AS c0) t0 CROSS JOIN (SELECT 'b' AS c1) t1 CROSS JOIN (SELECT 1 AS c2) t2; + +SELECT concatWithSeparator(c0, c1, c2) +FROM +( + SELECT + 'a' AS c0, + toString(number) AS c1, + number + 9 AS c2 + FROM numbers(5) +); + +SELECT concatWithSeparator('-', [1, 2, 3], [4, 5, 6]) FROM numbers(3); + +SELECT concatWithSeparator('-', (1, 'a'), (2, 'b')) FROM numbers(3); + +SELECT concatWithSeparator(c0, c1, c2) +FROM +( + SELECT + '+' AS c0, + (toString(number), number + 1) AS c1, + [number + 9, number + 3, number + 2] AS c2 + FROM numbers(5) +); + +SELECT concatWithSeparator('+', c1, c2) +FROM +( + SELECT + (toString(number), number + 1) AS c1, + [number + 9, number + 3, number + 2] AS c2 + FROM numbers(5) +); + + +SELECT concatWithSeparator('+', c1, c2, 'zz') +FROM +( + SELECT + (toString(number), number + 1) AS c1, + [number + 9, number + 3, number + 2] AS c2 + FROM numbers(5) +); diff --git a/parser/testdata/03727_ipv4_parsing_bug/ast.json b/parser/testdata/03727_ipv4_parsing_bug/ast.json new file mode 100644 index 000000000..40424ff7c --- /dev/null +++ b/parser/testdata/03727_ipv4_parsing_bug/ast.json @@ -0,0 +1,73 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function IPv6NumToString (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function IPv6StringToNumOrDefault (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Identifier x" + }, + { + "explain": " TablesInSelectQuery (children 2)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " TableIdentifier system.one" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " ArrayJoin (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal Array_['24', '5.123.234'] (alias x)" + } + ], + + "rows": 17, + + "statistics": + { + "elapsed": 0.001128155, + "rows_read": 17, + "bytes_read": 728 + } +} diff --git a/parser/testdata/03727_ipv4_parsing_bug/metadata.json b/parser/testdata/03727_ipv4_parsing_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03727_ipv4_parsing_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03727_ipv4_parsing_bug/query.sql b/parser/testdata/03727_ipv4_parsing_bug/query.sql new file mode 100644 index 000000000..e304b778a --- /dev/null +++ b/parser/testdata/03727_ipv4_parsing_bug/query.sql @@ -0,0 +1,3 @@ +select IPv6NumToString(IPv6StringToNumOrDefault(x)) from system.one array join ['24', '5.123.234'] as x; +select IPv4NumToString(IPv4StringToNumOrDefault(x)) from system.one array join ['24', '5.123.234'] as x; +select IPv4NumToString(IPv4StringToNumOrDefault('111.111111.')); diff --git a/parser/testdata/03727_rename_nested_and_modify_in_one_later/ast.json b/parser/testdata/03727_rename_nested_and_modify_in_one_later/ast.json new file mode 100644 index 000000000..70da9a160 --- /dev/null +++ b/parser/testdata/03727_rename_nested_and_modify_in_one_later/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t0 (children 1)" + }, + { + "explain": " Identifier t0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001180232, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03727_rename_nested_and_modify_in_one_later/metadata.json b/parser/testdata/03727_rename_nested_and_modify_in_one_later/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03727_rename_nested_and_modify_in_one_later/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03727_rename_nested_and_modify_in_one_later/query.sql b/parser/testdata/03727_rename_nested_and_modify_in_one_later/query.sql new file mode 100644 index 000000000..926fce19f --- /dev/null +++ b/parser/testdata/03727_rename_nested_and_modify_in_one_later/query.sql @@ -0,0 +1,5 @@ +DROP TABLE IF EXISTS t0; +CREATE TABLE t0 (`c0.c1` Array(Int)) ENGINE = MergeTree ORDER BY tuple(); +INSERT INTO t0 VALUES ([1]); +ALTER TABLE t0 (RENAME COLUMN `c0.c1` TO `c0.c2`), (MODIFY COLUMN `c0.c1` MODIFY SETTING max_compress_block_size = 1); -- {serverError NOT_FOUND_COLUMN_IN_BLOCK} +DROP TABLE t0; diff --git a/parser/testdata/03727_tolowcardinality_nullable_cast/ast.json b/parser/testdata/03727_tolowcardinality_nullable_cast/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03727_tolowcardinality_nullable_cast/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03727_tolowcardinality_nullable_cast/metadata.json b/parser/testdata/03727_tolowcardinality_nullable_cast/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03727_tolowcardinality_nullable_cast/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03727_tolowcardinality_nullable_cast/query.sql b/parser/testdata/03727_tolowcardinality_nullable_cast/query.sql new file mode 100644 index 000000000..95419ee10 --- /dev/null +++ b/parser/testdata/03727_tolowcardinality_nullable_cast/query.sql @@ -0,0 +1,54 @@ +-- Test for issue #89412: Bad cast from ColumnNullable to ColumnLowCardinality + +DROP TABLE IF EXISTS test_tolowcardinality_nullable; + +-- Test 1: Original fiddle query from issue #89412 +DROP TABLE IF EXISTS t0; +CREATE TABLE t0 (c0 Nullable(Int)) ENGINE = MergeTree() ORDER BY tuple() PARTITION BY (toLowCardinality(c0)) SETTINGS allow_nullable_key = 1; +INSERT INTO TABLE t0 (c0) VALUES (0); +DELETE FROM t0 WHERE c0 = 1; +DROP TABLE t0; + +-- Test 2: Using toLowCardinality with Nullable in PARTITION BY +CREATE TABLE test_tolowcardinality_nullable +( + id UInt32, + str Nullable(String) +) +ENGINE = MergeTree() +PARTITION BY toLowCardinality(str) +ORDER BY id +SETTINGS allow_nullable_key = 1; + +INSERT INTO test_tolowcardinality_nullable VALUES (1, 'a'), (2, 'b'), (3, NULL), (4, 'a'); + +-- Query that triggers KeyCondition optimization with toLowCardinality +SELECT * FROM test_tolowcardinality_nullable WHERE toLowCardinality(str) = 'a' ORDER BY id; + +-- Mutation that also uses the partition key +ALTER TABLE test_tolowcardinality_nullable DELETE WHERE id = 1 SETTINGS mutations_sync = 2; + +SELECT * FROM test_tolowcardinality_nullable ORDER BY id; + +DROP TABLE test_tolowcardinality_nullable; + +-- Test 2: Direct toLowCardinality on Nullable column +SELECT toLowCardinality(materialize(toNullable('test'))) AS result; +SELECT toLowCardinality(materialize(CAST(NULL AS Nullable(String)))) AS result; + +-- Test 3: toLowCardinality in WHERE clause with Nullable +DROP TABLE IF EXISTS test_tolowcardinality_where; + +CREATE TABLE test_tolowcardinality_where +( + id UInt32, + val Nullable(String) +) +ENGINE = MergeTree() +ORDER BY id; + +INSERT INTO test_tolowcardinality_where VALUES (1, 'x'), (2, 'y'), (3, NULL); + +SELECT id FROM test_tolowcardinality_where WHERE toLowCardinality(val) = 'x' ORDER BY id; + +DROP TABLE test_tolowcardinality_where; diff --git a/parser/testdata/03728_analyzer_identifier_resolution_join/ast.json b/parser/testdata/03728_analyzer_identifier_resolution_join/ast.json new file mode 100644 index 000000000..b63cd34da --- /dev/null +++ b/parser/testdata/03728_analyzer_identifier_resolution_join/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery address (children 1)" + }, + { + "explain": " Identifier address" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001157547, + "rows_read": 2, + "bytes_read": 66 + } +} diff --git a/parser/testdata/03728_analyzer_identifier_resolution_join/metadata.json b/parser/testdata/03728_analyzer_identifier_resolution_join/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03728_analyzer_identifier_resolution_join/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03728_analyzer_identifier_resolution_join/query.sql b/parser/testdata/03728_analyzer_identifier_resolution_join/query.sql new file mode 100644 index 000000000..2aa308829 --- /dev/null +++ b/parser/testdata/03728_analyzer_identifier_resolution_join/query.sql @@ -0,0 +1,41 @@ +DROP TABLE IF EXISTS address; +DROP TABLE IF EXISTS fact_click; + +CREATE TABLE address +( + `email_address` String, + `domain` LowCardinality(String), + `first_name` String, + `last_name` String, + `country` LowCardinality(String), + `esp` UInt8, + `list_import_sid` LowCardinality(String) +) +ENGINE = MergeTree +PRIMARY KEY (list_import_sid, country, esp, domain, email_address) +ORDER BY (list_import_sid, country, esp, domain, email_address) +SETTINGS index_granularity = 8192; + +CREATE TABLE fact_click +( + `sid` String, + `campaign_sid` LowCardinality(String), + `campaign_batch_sid` LowCardinality(String), + `email_address` String +) +ENGINE = ReplacingMergeTree +ORDER BY (campaign_sid, campaign_batch_sid, sid) +SETTINGS index_granularity = 8192; + +SET enable_analyzer=1; + +WITH + records AS (SELECT address.email_address FROM address GROUP BY address.email_address), + stats AS ( + SELECT (SELECT COUNT(*) FROM fact_click INNER JOIN records ON records.email_address = fact_click.email_address) AS num_clicks, + (SELECT COUNT(*) FROM records) AS num_records + ) +SELECT * FROM stats; + +DROP TABLE address; +DROP TABLE fact_click; diff --git a/parser/testdata/03728_explain_column_structure/ast.json b/parser/testdata/03728_explain_column_structure/ast.json new file mode 100644 index 000000000..d50ee3f0f --- /dev/null +++ b/parser/testdata/03728_explain_column_structure/ast.json @@ -0,0 +1,115 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 3)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function viewExplain (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " Literal 'EXPLAIN'" + }, + { + "explain": " Literal 'header = 1, input_headers = 1'" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_1" + }, + { + "explain": " Function notLike (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " Identifier explain" + }, + { + "explain": " Literal 'Expression%'" + } + ], + + "rows": 31, + + "statistics": + { + "elapsed": 0.001959086, + "rows_read": 31, + "bytes_read": 1349 + } +} diff --git a/parser/testdata/03728_explain_column_structure/metadata.json b/parser/testdata/03728_explain_column_structure/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03728_explain_column_structure/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03728_explain_column_structure/query.sql b/parser/testdata/03728_explain_column_structure/query.sql new file mode 100644 index 000000000..4b67ef33a --- /dev/null +++ b/parser/testdata/03728_explain_column_structure/query.sql @@ -0,0 +1,2 @@ +SELECT * FROM (EXPLAIN PLAN header = 1, input_headers = 1 SELECT 1) WHERE explain NOT LIKE 'Expression%'; +SELECT * FROM (EXPLAIN PLAN header = 1, input_headers = 1, column_structure = 1 SELECT 1) WHERE explain NOT LIKE 'Expression%'; \ No newline at end of file diff --git a/parser/testdata/03729_function_hmac/ast.json b/parser/testdata/03729_function_hmac/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03729_function_hmac/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03729_function_hmac/metadata.json b/parser/testdata/03729_function_hmac/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03729_function_hmac/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03729_function_hmac/query.sql b/parser/testdata/03729_function_hmac/query.sql new file mode 100644 index 000000000..6c8a56584 --- /dev/null +++ b/parser/testdata/03729_function_hmac/query.sql @@ -0,0 +1,56 @@ +-- Tags: no-fasttest, no-openssl-fips +-- The function HMAC requires openssl library disabled in fasttest builds. + +-- HMAC basic function, test it's case insensitivity and correctness with known values +SELECT + hex(hmac('md5', 'The quick brown fox jumps over the lazy dog', 'secret_key')) AS hmac_md5, + hex(Hmac('sha1', 'The quick brown fox jumps over the lazy dog', 'secret_key')) AS hmac_sha1, + hex(HMac('sha224', 'The quick brown fox jumps over the lazy dog', 'secret_key')) AS hmac_sha224, + hex(HMAc('sha256', 'The quick brown fox jumps over the lazy dog', 'secret_key')) AS hmac_sha256, + hex(hmAc('sha384', 'The quick brown fox jumps over the lazy dog', 'secret_key')) AS hmac_sha384, + hex(HMAC('sha512', 'The quick brown fox jumps over the lazy dog', 'secret_key')) AS hmac_sha512 +FORMAT Vertical; + +SELECT ''; + +-- Test output lengths for all supported hash algorithms +SELECT length(HMAC('md4', 'test', 'key')); -- MD4 produces 16 bytes +SELECT length(HMAC('md5', 'test', 'key')); -- MD5 produces 16 bytes +SELECT length(HMAC('mdc2', 'test', 'key')); -- MDC2 produces 16 bytes +SELECT length(HMAC('ripemd', 'test', 'key')); -- ripemd produces 20 bytes +SELECT length(HMAC('sha1', 'test', 'key')); -- SHA1 produces 20 bytes +SELECT length(HMAC('sha224', 'test', 'key')); -- SHA224 produces 28 bytes +SELECT length(HMAC('sha256', 'test', 'key')); -- SHA256 produces 32 bytes +SELECT length(HMAC('sha384', 'test', 'key')); -- SHA384 produces 48 bytes +SELECT length(HMAC('sha512', 'test', 'key')); -- SHA512 produces 64 bytes +SELECT length(HMAC('sha512-224', 'test', 'key')); -- SHA512/224 produces 28 bytes +SELECT length(HMAC('sha512-256', 'test', 'key')); -- SHA512/256 produces 32 bytes +SELECT length(HMAC('sha3-224', 'test', 'key')); -- SHA3-224 produces 28 bytes +SELECT length(HMAC('sha3-256', 'test', 'key')); -- SHA3-256 produces 32 bytes +SELECT length(HMAC('sha3-384', 'test', 'key')); -- SHA3-384 produces 48 bytes +SELECT length(HMAC('sha3-512', 'test', 'key')); -- SHA3-512 produces 64 bytes +SELECT length(HMAC('blake2b512', 'test', 'key')); -- BLAKE2b-512 produces 64 bytes +SELECT length(HMAC('blake2s256', 'test', 'key')); -- BLAKE2s-256 produces 32 bytes +SELECT length(HMAC('sm3', 'test', 'key')); -- SM3 produces 32 bytes +SELECT length(HMAC('whirlpool', 'test', 'key')); -- Whirlpool produces 64 bytes + +SELECT ''; + +-- Test with empty strings +SELECT length(HMAC('sha256', '', 'key')) = 32; +SELECT length(HMAC('sha256', 'message', '')) = 32; +SELECT length(HMAC('sha256', '', '')) = 32; + +SELECT ''; + +-- Test with table data +CREATE TEMPORARY TABLE hmac_test (message String, key String); +INSERT INTO hmac_test VALUES ('hello', 'world'), ('foo', 'bar'), ('test', 'key'); +SELECT message, key, hex(HMAC('sha256', message, key)) AS hmac_hex FROM hmac_test ORDER BY message; +DROP TABLE hmac_test; + +-- Test invalid algorithm (should throw error) +SELECT HMAC('invalid_algo', 'message', 'key'); -- { serverError BAD_ARGUMENTS } + +-- Test big column +SELECT hmac('sha256', toString(number), 'key') FROM system.numbers LIMIT 100000 FORMAT Null; diff --git a/parser/testdata/03730_qbit_in_order_by_and_comparison_functions/ast.json b/parser/testdata/03730_qbit_in_order_by_and_comparison_functions/ast.json new file mode 100644 index 000000000..be527386a --- /dev/null +++ b/parser/testdata/03730_qbit_in_order_by_and_comparison_functions/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001517493, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03730_qbit_in_order_by_and_comparison_functions/metadata.json b/parser/testdata/03730_qbit_in_order_by_and_comparison_functions/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03730_qbit_in_order_by_and_comparison_functions/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03730_qbit_in_order_by_and_comparison_functions/query.sql b/parser/testdata/03730_qbit_in_order_by_and_comparison_functions/query.sql new file mode 100644 index 000000000..fca199e48 --- /dev/null +++ b/parser/testdata/03730_qbit_in_order_by_and_comparison_functions/query.sql @@ -0,0 +1,9 @@ +set allow_experimental_qbit_type=1; + +drop table if exists test; +create table test (qbit QBit(Float64, 3)) engine=MergeTree order by tuple(); +insert into test select [1., 2., 3.] from numbers(10); +select * from test order by qbit; -- {serverError ILLEGAL_COLUMN} +select qbit < qbit from test; -- {serverError ILLEGAL_TYPE_OF_ARGUMENT} +drop table test; + diff --git a/parser/testdata/03731_null_parts_in_storage_snapshot_with_only_analyze/ast.json b/parser/testdata/03731_null_parts_in_storage_snapshot_with_only_analyze/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03731_null_parts_in_storage_snapshot_with_only_analyze/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03731_null_parts_in_storage_snapshot_with_only_analyze/metadata.json b/parser/testdata/03731_null_parts_in_storage_snapshot_with_only_analyze/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03731_null_parts_in_storage_snapshot_with_only_analyze/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03731_null_parts_in_storage_snapshot_with_only_analyze/query.sql b/parser/testdata/03731_null_parts_in_storage_snapshot_with_only_analyze/query.sql new file mode 100644 index 000000000..d9b42b864 --- /dev/null +++ b/parser/testdata/03731_null_parts_in_storage_snapshot_with_only_analyze/query.sql @@ -0,0 +1,14 @@ +-- Possible crash in case of mutations contains subquery, that will use +-- InterpreterSelectQuery() with only_analyze=true, which uses +-- getStorageSnapshotWithoutData(), and may crash in +-- getConditionSelectivityEstimator() since parts was nullptr + +drop table if exists t0; +drop table if exists t1; + +create table t0 (key Int) engine=MergeTree order by () settings auto_statistics_types=''; +create table t1 (key Int) engine=MergeTree order by () settings auto_statistics_types=''; +insert into t0 values (1); +insert into t1 values (1); + +alter table t1 update key = 0 where 1 or not(not exists (select key from t0 where key > 0)) settings mutations_sync=2, allow_experimental_analyzer=0, query_plan_optimize_prewhere=0, query_plan_enable_optimizations=0, allow_statistics_optimize=1; diff --git a/parser/testdata/03731_query_condition_cache_folded_constants/ast.json b/parser/testdata/03731_query_condition_cache_folded_constants/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03731_query_condition_cache_folded_constants/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03731_query_condition_cache_folded_constants/metadata.json b/parser/testdata/03731_query_condition_cache_folded_constants/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03731_query_condition_cache_folded_constants/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03731_query_condition_cache_folded_constants/query.sql b/parser/testdata/03731_query_condition_cache_folded_constants/query.sql new file mode 100644 index 000000000..d3aa9db44 --- /dev/null +++ b/parser/testdata/03731_query_condition_cache_folded_constants/query.sql @@ -0,0 +1,34 @@ +SET prefer_localhost_replica = 0, + use_query_condition_cache = 1; + +DROP TABLE IF EXISTS 03731_data; + +CREATE TABLE 03731_data( `key` UInt64 ) ENGINE = MergeTree ORDER BY key SETTINGS index_granularity = 8192 +AS +SELECT number FROM numbers(30000); + +SELECT '-- First query run to populate query condition cache'; +SELECT + shardNum(), + min(key), + max(key), + count() +FROM remote('127.0.0.{1,2}', currentDatabase(), 03731_data) +WHERE (key >= (shardNum() * 10000)) + AND (key < ((shardNum() * 10000) + 10000)) +GROUP BY 1 +ORDER BY 1 ASC; + +SELECT '-- Second query run to assert that query condition cache doesnt affect results'; +SELECT + shardNum(), + min(key), + max(key), + count() +FROM remote('127.0.0.{1,2}', currentDatabase(), 03731_data) +WHERE (key >= (shardNum() * 10000)) + AND (key < ((shardNum() * 10000) + 10000)) +GROUP BY 1 +ORDER BY 1 ASC; + +DROP TABLE 03731_data; diff --git a/parser/testdata/03732_join_on_exists_bug/ast.json b/parser/testdata/03732_join_on_exists_bug/ast.json new file mode 100644 index 000000000..0ebca54a0 --- /dev/null +++ b/parser/testdata/03732_join_on_exists_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t0 (children 1)" + }, + { + "explain": " Identifier t0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001278289, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03732_join_on_exists_bug/metadata.json b/parser/testdata/03732_join_on_exists_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03732_join_on_exists_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03732_join_on_exists_bug/query.sql b/parser/testdata/03732_join_on_exists_bug/query.sql new file mode 100644 index 000000000..bf8057a64 --- /dev/null +++ b/parser/testdata/03732_join_on_exists_bug/query.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS t0; + +CREATE TABLE t0 (c0 Int) ENGINE = Memory; +INSERT INTO t0 VALUES (1), (2), (3); + +SELECT 1 FROM t0 ASOF JOIN t0 tx ON EXISTS (SELECT 1) JOIN t0 ty ON t0.c0 = ty.c0 +; -- { serverError INVALID_JOIN_ON_EXPRESSION } + +SELECT 1 FROM t0 ASOF JOIN t0 tx ON EXISTS (SELECT 1) JOIN t0 ty ON t0.c0 = ty.c0 +SETTINGS allow_general_join_planning = 0; -- { serverError INVALID_JOIN_ON_EXPRESSION } + +SELECT 1 FROM t0 ASOF JOIN t0 tx ON EXISTS (SELECT 1) JOIN t0 ty ON t0.c0 = ty.c0 +SETTINGS query_plan_use_new_logical_join_step = 0; -- { serverError INVALID_JOIN_ON_EXPRESSION } diff --git a/parser/testdata/03732_json_duplicated_path_in_dynamic_paths_and_shared_data_bug/ast.json b/parser/testdata/03732_json_duplicated_path_in_dynamic_paths_and_shared_data_bug/ast.json new file mode 100644 index 000000000..c94ff5d47 --- /dev/null +++ b/parser/testdata/03732_json_duplicated_path_in_dynamic_paths_and_shared_data_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001359397, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/03732_json_duplicated_path_in_dynamic_paths_and_shared_data_bug/metadata.json b/parser/testdata/03732_json_duplicated_path_in_dynamic_paths_and_shared_data_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03732_json_duplicated_path_in_dynamic_paths_and_shared_data_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03732_json_duplicated_path_in_dynamic_paths_and_shared_data_bug/query.sql b/parser/testdata/03732_json_duplicated_path_in_dynamic_paths_and_shared_data_bug/query.sql new file mode 100644 index 000000000..5385c5923 --- /dev/null +++ b/parser/testdata/03732_json_duplicated_path_in_dynamic_paths_and_shared_data_bug/query.sql @@ -0,0 +1,9 @@ +drop table if exists test; +drop table if exists test2; +create table test (id UInt64, json JSON) engine=MergeTree order by id; +insert into test select number, '{}' from numbers(100000); +alter table test update json = '{"a" : 42}' where id > 50000 settings mutations_sync=1; +create table test2 (json JSON) engine=MergeTree order by tuple(); +insert into test2 select if(id < 75000, json, '{"a" : 42}'::JSON) from test; +select * from test2 format Null; + diff --git a/parser/testdata/03732_json_duplicated_path_in_dynamic_paths_and_shared_data_compact_part_bug/ast.json b/parser/testdata/03732_json_duplicated_path_in_dynamic_paths_and_shared_data_compact_part_bug/ast.json new file mode 100644 index 000000000..b13cd0bd0 --- /dev/null +++ b/parser/testdata/03732_json_duplicated_path_in_dynamic_paths_and_shared_data_compact_part_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00115002, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/03732_json_duplicated_path_in_dynamic_paths_and_shared_data_compact_part_bug/metadata.json b/parser/testdata/03732_json_duplicated_path_in_dynamic_paths_and_shared_data_compact_part_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03732_json_duplicated_path_in_dynamic_paths_and_shared_data_compact_part_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03732_json_duplicated_path_in_dynamic_paths_and_shared_data_compact_part_bug/query.sql b/parser/testdata/03732_json_duplicated_path_in_dynamic_paths_and_shared_data_compact_part_bug/query.sql new file mode 100644 index 000000000..aa33cc217 --- /dev/null +++ b/parser/testdata/03732_json_duplicated_path_in_dynamic_paths_and_shared_data_compact_part_bug/query.sql @@ -0,0 +1,5 @@ +drop table if exists test; +create table test (id UInt64, json JSON(max_dynamic_paths=1)) engine=MergeTree order by tuple() settings min_bytes_for_wide_part='100G', write_marks_for_substreams_in_compact_parts=0; +insert into test select number, '{"a" : 42, "b" : {"c" : 42}}' from numbers(100000); +select json.^b from test order by id format Null; +drop table test; diff --git a/parser/testdata/03733_anti_join_runtime_filter_3/ast.json b/parser/testdata/03733_anti_join_runtime_filter_3/ast.json new file mode 100644 index 000000000..b1e924166 --- /dev/null +++ b/parser/testdata/03733_anti_join_runtime_filter_3/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001096248, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03733_anti_join_runtime_filter_3/metadata.json b/parser/testdata/03733_anti_join_runtime_filter_3/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03733_anti_join_runtime_filter_3/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03733_anti_join_runtime_filter_3/query.sql b/parser/testdata/03733_anti_join_runtime_filter_3/query.sql new file mode 100644 index 000000000..d57e1fc43 --- /dev/null +++ b/parser/testdata/03733_anti_join_runtime_filter_3/query.sql @@ -0,0 +1,99 @@ +SET enable_analyzer = 1; +SET enable_join_runtime_filters = 1; + +CREATE TABLE nation(n_nationkey Int32, n_name String) ENGINE MergeTree ORDER BY n_nationkey; +CREATE TABLE customer(c_custkey Int32, c_nationkey Int32) ENGINE MergeTree ORDER BY c_custkey; + +INSERT INTO nation VALUES (5,'ETHIOPIA'),(6,'FRANCE'),(7,'GERMANY'); + +INSERT INTO customer SELECT number, 5 FROM numbers(500); +INSERT INTO customer SELECT number, 6 FROM numbers(6000); +INSERT INTO customer SELECT number, 7 FROM numbers(70000); +INSERT INTO customer SELECT number, 201 FROM numbers(1); +INSERT INTO customer SELECT number, 202 FROM numbers(2); + +SET enable_parallel_replicas=0; +SET query_plan_join_swap_table=0; + +SELECT '1 element in filter'; +-- 1 element in filter +SELECT count() +FROM + customer + LEFT ANTI JOIN + (SELECT n_nationkey FROM nation WHERE n_name = 'FRANCE') as n + ON c_nationkey = n.n_nationkey; + +SELECT count() +FROM + (SELECT n_nationkey FROM nation WHERE n_name = 'FRANCE') as n + RIGHT ANTI JOIN + customer + ON c_nationkey = n.n_nationkey; + +SELECT '0 elements in filter'; +-- 0 elements in filter ('WAKANDA' is not present in `nations` table) +SELECT count() +FROM + customer + LEFT ANTI JOIN + (SELECT n_nationkey FROM nation WHERE n_name = 'WAKANDA') as n + ON c_nationkey = n.n_nationkey; + +SELECT count() +FROM + (SELECT n_nationkey FROM nation WHERE n_name = 'WAKANDA') as n + RIGHT ANTI JOIN + customer + ON c_nationkey = n.n_nationkey; + +SELECT 'Again 1 element in filter'; +-- again 1 element in filter + +SELECT count() +FROM + customer + LEFT ANTI JOIN + (SELECT n_nationkey FROM nation WHERE n_name IN ('WAKANDA', 'GERMANY')) as n + ON c_nationkey = n.n_nationkey; + +SELECT count() +FROM + (SELECT n_nationkey FROM nation WHERE n_name IN ('WAKANDA', 'GERMANY')) as n + RIGHT ANTI JOIN + customer + ON c_nationkey = n.n_nationkey; + +SELECT '2 elements in filter'; +-- 2 elements in filter +SELECT count() +FROM + customer + LEFT ANTI JOIN + (SELECT n_nationkey FROM nation WHERE n_name IN ('FRANCE', 'GERMANY')) as n + ON c_nationkey = n.n_nationkey; + +SELECT count() +FROM + (SELECT n_nationkey FROM nation WHERE n_name IN ('FRANCE', 'GERMANY')) as n + RIGHT ANTI JOIN + customer + ON c_nationkey = n.n_nationkey; + +SELECT '2 elements in filter in bloom filter'; +-- 2 elements in filter store in a bloom filter +SET join_runtime_filter_exact_values_limit = 1; + +SELECT count() +FROM + customer + LEFT ANTI JOIN + (SELECT n_nationkey FROM nation WHERE n_name IN ('FRANCE', 'GERMANY')) as n + ON c_nationkey = n.n_nationkey; + +SELECT count() +FROM + (SELECT n_nationkey FROM nation WHERE n_name IN ('FRANCE', 'GERMANY')) as n + RIGHT ANTI JOIN + customer + ON c_nationkey = n.n_nationkey; diff --git a/parser/testdata/03733_base58_decode_bug/ast.json b/parser/testdata/03733_base58_decode_bug/ast.json new file mode 100644 index 000000000..6a255effc --- /dev/null +++ b/parser/testdata/03733_base58_decode_bug/ast.json @@ -0,0 +1,70 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function base58Decode (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function materialize (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal '11111111111111'" + }, + { + "explain": " TablesInSelectQuery (children 1)" + }, + { + "explain": " TablesInSelectQueryElement (children 1)" + }, + { + "explain": " TableExpression (children 1)" + }, + { + "explain": " Function numbers (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_100000" + }, + { + "explain": " Identifier Null" + } + ], + + "rows": 16, + + "statistics": + { + "elapsed": 0.001387329, + "rows_read": 16, + "bytes_read": 645 + } +} diff --git a/parser/testdata/03733_base58_decode_bug/metadata.json b/parser/testdata/03733_base58_decode_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03733_base58_decode_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03733_base58_decode_bug/query.sql b/parser/testdata/03733_base58_decode_bug/query.sql new file mode 100644 index 000000000..b484dc74c --- /dev/null +++ b/parser/testdata/03733_base58_decode_bug/query.sql @@ -0,0 +1 @@ +SELECT base58Decode(materialize('11111111111111')) FROM numbers(100000) FORMAT Null; diff --git a/parser/testdata/03733_join_order_dp/ast.json b/parser/testdata/03733_join_order_dp/ast.json new file mode 100644 index 000000000..b41d0e462 --- /dev/null +++ b/parser/testdata/03733_join_order_dp/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001165055, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03733_join_order_dp/metadata.json b/parser/testdata/03733_join_order_dp/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03733_join_order_dp/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03733_join_order_dp/query.sql b/parser/testdata/03733_join_order_dp/query.sql new file mode 100644 index 000000000..c4d1e9064 --- /dev/null +++ b/parser/testdata/03733_join_order_dp/query.sql @@ -0,0 +1,144 @@ +SET allow_experimental_analyzer = 1; +SET query_plan_optimize_join_order_limit = 10; +SET allow_statistic_optimize = 1; +SET query_plan_join_swap_table='auto'; + +-- R1: Small dimension table (Demo size: 10) +CREATE TABLE R1 ( + A_ID UInt32, + A_Description String +) ENGINE = MergeTree() +PRIMARY KEY (A_ID) +SETTINGS auto_statistics_types = 'uniq'; + +-- R2: Large fact table (Demo size: 1,000) +-- Joins only with R1. +CREATE TABLE R2 ( + B_ID UInt32, + R1_A_ID UInt32, + B_Data Float64 +) ENGINE = MergeTree() +PRIMARY KEY (B_ID) +SETTINGS auto_statistics_types = 'uniq'; + +-- R3: Another large fact table (Demo size: 1,000) +-- Joins with R1 and R4. +CREATE TABLE R3 ( + C_ID UInt32, + R1_A_ID UInt32, + R4_D_ID UInt32, + C_Value Int32 +) ENGINE = MergeTree() +PRIMARY KEY (C_ID) +SETTINGS auto_statistics_types = 'uniq'; + +-- R4: Small lookup table (Demo size: 10) +-- Joins only with R3. +CREATE TABLE R4 ( + D_ID UInt32, + D_LookupCode String +) ENGINE = MergeTree() +PRIMARY KEY (D_ID) +SETTINGS auto_statistics_types = 'uniq'; + + +-- Populate R1 (Small: 10 rows) +INSERT INTO R1 (A_ID, A_Description) VALUES +(1, 'Type A'), (2, 'Type B'), (3, 'Type C'), (4, 'Type D'), (5, 'Type E'), +(6, 'Type F'), (7, 'Type G'), (8, 'Type H'), (9, 'Type I'), (10, 'Type J'); + +-- Populate R4 (Small: 10 rows) +INSERT INTO R4 (D_ID, D_LookupCode) VALUES +(101, 'Lookup X'), (102, 'Lookup Y'), (103, 'Lookup Z'), (104, 'Lookup W'), (105, 'Lookup V'), +(106, 'Lookup U'), (107, 'Lookup T'), (108, 'Lookup S'), (109, 'Lookup R'), (110, 'Lookup Q'); + +INSERT INTO R2 (B_ID, R1_A_ID, B_Data) +SELECT + number AS B_ID, + (number % 10) + 1 AS R1_A_ID, -- Links to R1.A_ID 1-10 + number / 100 +FROM numbers(1000); + +INSERT INTO R3 (C_ID, R1_A_ID, R4_D_ID, C_Value) +SELECT + number AS C_ID, + (number % 10) + 1 AS R1_A_ID, -- Links to R1.A_ID 1-10 + (number % 10) + 101 AS R4_D_ID, -- Links to R4.D_ID 101-110 + (number * 10) AS C_Value +FROM numbers(1000); + + +SELECT '========================================='; +SELECT 'Plan with greedy algorithm'; +EXPLAIN +SELECT + T1.A_Description, + T2.B_Data, + T3.C_Value, + T4.D_LookupCode +FROM R1 AS T1, R2 AS T2, R3 AS T3, R4 AS T4 +WHERE + T1.A_ID = T2.R1_A_ID + AND T1.A_ID = T3.R1_A_ID + AND T3.R4_D_ID = T4.D_ID + AND T1.A_Description = 'Type H' + AND T4.D_LookupCode = 'Lookup S' +SETTINGS query_plan_optimize_join_order_algorithm = 'greedy', enable_parallel_replicas = 0; + +SELECT sum(sipHash64( + T1.A_Description, + T2.B_Data, + T3.C_Value, + T4.D_LookupCode)) +FROM R1 AS T1, R2 AS T2, R3 AS T3, R4 AS T4 +WHERE + T1.A_ID = T2.R1_A_ID + AND T1.A_ID = T3.R1_A_ID + AND T3.R4_D_ID = T4.D_ID + AND T1.A_Description = 'Type H' + AND T4.D_LookupCode = 'Lookup S' +SETTINGS query_plan_optimize_join_order_algorithm = 'greedy'; + + +SELECT '========================================='; +SELECT 'Plan with DPsize algorithm'; +EXPLAIN +SELECT + T1.A_Description, + T2.B_Data, + T3.C_Value, + T4.D_LookupCode +FROM R1 AS T1, R2 AS T2, R3 AS T3, R4 AS T4 +WHERE + T1.A_ID = T2.R1_A_ID + AND T1.A_ID = T3.R1_A_ID + AND T3.R4_D_ID = T4.D_ID + AND T1.A_Description = 'Type H' + AND T4.D_LookupCode = 'Lookup S' +SETTINGS query_plan_optimize_join_order_algorithm = 'dpsize', enable_parallel_replicas = 0; + +SELECT sum(sipHash64( + T1.A_Description, + T2.B_Data, + T3.C_Value, + T4.D_LookupCode)) +FROM R1 AS T1, R2 AS T2, R3 AS T3, R4 AS T4 +WHERE + T1.A_ID = T2.R1_A_ID + AND T1.A_ID = T3.R1_A_ID + AND T3.R4_D_ID = T4.D_ID + AND T1.A_Description = 'Type H' + AND T4.D_LookupCode = 'Lookup S' +SETTINGS query_plan_optimize_join_order_algorithm = 'dpsize'; + + +SELECT '==========================================='; +SELECT 'Fallback to greedy'; + +SELECT 1 FROM (SELECT 1 c0) t0 LEFT JOIN (SELECT 1 c0) t1 ON t0.c0 = t1.c0 +SETTINGS query_plan_optimize_join_order_algorithm = 'dpsize', enable_parallel_replicas=0; --{serverError EXPERIMENTAL_FEATURE_ERROR} + +SELECT 1 FROM (SELECT 1 c0) t0 LEFT JOIN (SELECT 1 c0) t1 ON t0.c0 = t1.c0 +SETTINGS query_plan_optimize_join_order_algorithm = 'dpsize,greedy', enable_parallel_replicas=0; + + diff --git a/parser/testdata/03733_summing_merge_tree_nested_low_cardinality/ast.json b/parser/testdata/03733_summing_merge_tree_nested_low_cardinality/ast.json new file mode 100644 index 000000000..548b4f093 --- /dev/null +++ b/parser/testdata/03733_summing_merge_tree_nested_low_cardinality/ast.json @@ -0,0 +1,88 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery sums (children 3)" + }, + { + "explain": " Identifier sums" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 3)" + }, + { + "explain": " ColumnDeclaration key (children 1)" + }, + { + "explain": " DataType LowCardinality (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " ColumnDeclaration sumOfSums (children 1)" + }, + { + "explain": " DataType UInt64" + }, + { + "explain": " ColumnDeclaration sumsMap (children 1)" + }, + { + "explain": " DataType Nested (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " NameTypePair key (children 1)" + }, + { + "explain": " DataType LowCardinality (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " DataType String" + }, + { + "explain": " NameTypePair sum (children 1)" + }, + { + "explain": " DataType UInt64" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function SummingMergeTree" + }, + { + "explain": " Identifier key" + } + ], + + "rows": 22, + + "statistics": + { + "elapsed": 0.001302763, + "rows_read": 22, + "bytes_read": 848 + } +} diff --git a/parser/testdata/03733_summing_merge_tree_nested_low_cardinality/metadata.json b/parser/testdata/03733_summing_merge_tree_nested_low_cardinality/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03733_summing_merge_tree_nested_low_cardinality/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03733_summing_merge_tree_nested_low_cardinality/query.sql b/parser/testdata/03733_summing_merge_tree_nested_low_cardinality/query.sql new file mode 100644 index 000000000..6b4395264 --- /dev/null +++ b/parser/testdata/03733_summing_merge_tree_nested_low_cardinality/query.sql @@ -0,0 +1,8 @@ +CREATE TABLE sums (key LowCardinality(String), sumOfSums UInt64, sumsMap Nested (key LowCardinality(String), sum UInt64)) ENGINE = SummingMergeTree PRIMARY KEY (key); + +INSERT INTO sums (key, sumOfSums, sumsMap.key, sumsMap.sum) VALUES ('lol', 3, ['a', 'b'], [1, 2]); +INSERT INTO sums (key, sumOfSums, sumsMap.key, sumsMap.sum) VALUES ('lol', 7, ['a', 'b'], [3, 4]); + +OPTIMIZE TABLE sums; + +SELECT * FROM sums; diff --git a/parser/testdata/03735_excessive_buffer_flush/ast.json b/parser/testdata/03735_excessive_buffer_flush/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03735_excessive_buffer_flush/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03735_excessive_buffer_flush/metadata.json b/parser/testdata/03735_excessive_buffer_flush/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03735_excessive_buffer_flush/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03735_excessive_buffer_flush/query.sql b/parser/testdata/03735_excessive_buffer_flush/query.sql new file mode 100644 index 000000000..4b32e912b --- /dev/null +++ b/parser/testdata/03735_excessive_buffer_flush/query.sql @@ -0,0 +1,40 @@ +-- Note, this test uses sleep, but, it should not affect it's flakiness + +set function_sleep_max_microseconds_per_block=5e9; + +drop table if exists data; +create table data (key Int) engine=Null; + +drop table if exists empty_buffer; +create table empty_buffer (key Int) engine=Buffer(currentDatabase(), data, 2, 2, 4, 100_000, 1_000_000, 10e9, 10e9, 3); +select sleep(5) format Null; +optimize table empty_buffer; +drop table empty_buffer; + +drop table if exists empty_buffer_zero_time; +create table empty_buffer_zero_time (key Int) engine=Buffer(currentDatabase(), data, 2, 0, 0, 100_000, 1_000_000, 10e9, 10e9, 0); +select sleep(1) format Null; +optimize table empty_buffer_zero_time; +drop table empty_buffer_zero_time; + +drop table if exists buffer_flush_by_min; +create table buffer_flush_by_min (key Int) engine=Buffer(currentDatabase(), data, 2, 2, 4, 100_000, 1_000_000, 0, 10e9, 3); +insert into buffer_flush_by_min select * from numbers(100_000 + 1); +select sleep(5) format Null; +drop table buffer_flush_by_min; + +drop table if exists buffer_flush_by_max; +create table buffer_flush_by_max (key Int) engine=Buffer(currentDatabase(), data, 2, 2, 4, 100_000, 1_000_000, 0, 10e9); +insert into buffer_flush_by_max select * from numbers(1); +select sleep(5) format Null; +drop table buffer_flush_by_max; + +drop table if exists buffer_flush_by_flush_time; +create table buffer_flush_by_flush_time (key Int) engine=Buffer(currentDatabase(), data, 2, 2, 4, 100_000, 1_000_000, 10e9, 10e9, 3); +insert into buffer_flush_by_flush_time values (1); +select sleep(5) format Null; +drop table buffer_flush_by_flush_time; + +system flush logs text_log; +-- to avoid flakiness we only check that number of logs < 10, instead of some strict values +select extractAll(logger_name, 'StorageBuffer \\([^.]+\\.([^)]+)\\)')[1] as table_name, max2(count(), 10) from system.text_log where logger_name LIKE format('%StorageBuffer ({}.%', currentDatabase()) group by 1 order by 1; diff --git a/parser/testdata/03740_alter_modify_query_dict_name_in_cse/ast.json b/parser/testdata/03740_alter_modify_query_dict_name_in_cse/ast.json new file mode 100644 index 000000000..ab2f199d1 --- /dev/null +++ b/parser/testdata/03740_alter_modify_query_dict_name_in_cse/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery mv (children 1)" + }, + { + "explain": " Identifier mv" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000993701, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03740_alter_modify_query_dict_name_in_cse/metadata.json b/parser/testdata/03740_alter_modify_query_dict_name_in_cse/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03740_alter_modify_query_dict_name_in_cse/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03740_alter_modify_query_dict_name_in_cse/query.sql b/parser/testdata/03740_alter_modify_query_dict_name_in_cse/query.sql new file mode 100644 index 000000000..568d8850b --- /dev/null +++ b/parser/testdata/03740_alter_modify_query_dict_name_in_cse/query.sql @@ -0,0 +1,10 @@ +drop table if exists mv; +drop table if exists dst; +drop table if exists src; +drop dictionary if exists dict; + +create table src (key Int) engine=MergeTree order by (); +create table dst (key Int) engine=MergeTree order by (); +create dictionary dict (key Int, value Int) primary key key layout(direct) source(clickhouse(query 'select 0 key, 0 value')); +create materialized view mv to dst as select * from src; +alter table mv modify query with 'dict' as dict_name select dictGetInt32(dict_name, 'value', key) from src; diff --git a/parser/testdata/03741_adaptive_write_buffer_initial_size_zero/ast.json b/parser/testdata/03741_adaptive_write_buffer_initial_size_zero/ast.json new file mode 100644 index 000000000..3caf7f9ae --- /dev/null +++ b/parser/testdata/03741_adaptive_write_buffer_initial_size_zero/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t0 (children 1)" + }, + { + "explain": " Identifier t0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001028751, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03741_adaptive_write_buffer_initial_size_zero/metadata.json b/parser/testdata/03741_adaptive_write_buffer_initial_size_zero/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03741_adaptive_write_buffer_initial_size_zero/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03741_adaptive_write_buffer_initial_size_zero/query.sql b/parser/testdata/03741_adaptive_write_buffer_initial_size_zero/query.sql new file mode 100644 index 000000000..b05561a53 --- /dev/null +++ b/parser/testdata/03741_adaptive_write_buffer_initial_size_zero/query.sql @@ -0,0 +1,6 @@ +DROP TABLE IF EXISTS t0; +CREATE TABLE t0 (c0 Dynamic) ENGINE = MergeTree() ORDER BY tuple() SETTINGS index_granularity_bytes = 0, adaptive_write_buffer_initial_size = 0; -- { serverError BAD_ARGUMENTS } +CREATE TABLE t0 (c0 Dynamic) ENGINE = MergeTree() ORDER BY tuple() SETTINGS index_granularity_bytes = 0, adaptive_write_buffer_initial_size = 1; +INSERT INTO TABLE t0 (c0) VALUES (1); +SELECT c0 FROM t0; +DROP TABLE t0; diff --git a/parser/testdata/03741_dict_get_in_cte_with_no_arguments_old_analyzer/ast.json b/parser/testdata/03741_dict_get_in_cte_with_no_arguments_old_analyzer/ast.json new file mode 100644 index 000000000..d959c9f3a --- /dev/null +++ b/parser/testdata/03741_dict_get_in_cte_with_no_arguments_old_analyzer/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 2)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Subquery (children 1)" + }, + { + "explain": " SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Function dictGet (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Set" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.001060745, + "rows_read": 12, + "bytes_read": 443 + } +} diff --git a/parser/testdata/03741_dict_get_in_cte_with_no_arguments_old_analyzer/metadata.json b/parser/testdata/03741_dict_get_in_cte_with_no_arguments_old_analyzer/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03741_dict_get_in_cte_with_no_arguments_old_analyzer/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03741_dict_get_in_cte_with_no_arguments_old_analyzer/query.sql b/parser/testdata/03741_dict_get_in_cte_with_no_arguments_old_analyzer/query.sql new file mode 100644 index 000000000..3e2fb2443 --- /dev/null +++ b/parser/testdata/03741_dict_get_in_cte_with_no_arguments_old_analyzer/query.sql @@ -0,0 +1,2 @@ +SELECT ( SELECT dictGet() ) settings enable_analyzer=0; -- {serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH} + diff --git a/parser/testdata/03741_insert_select_subquery_from_file/ast.json b/parser/testdata/03741_insert_select_subquery_from_file/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03741_insert_select_subquery_from_file/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03741_insert_select_subquery_from_file/metadata.json b/parser/testdata/03741_insert_select_subquery_from_file/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03741_insert_select_subquery_from_file/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03741_insert_select_subquery_from_file/query.sql b/parser/testdata/03741_insert_select_subquery_from_file/query.sql new file mode 100644 index 000000000..a0ef87079 --- /dev/null +++ b/parser/testdata/03741_insert_select_subquery_from_file/query.sql @@ -0,0 +1,25 @@ +-- Fixed only for analyzer +SET enable_analyzer=1; + +INSERT INTO TABLE FUNCTION file(database() || '.test-data.json', JSON) + SELECT number numeric FROM numbers(10); + +CREATE VIEW test_view AS SELECT * FROM file(database() || '.test-data.json', JSON); + +CREATE TABLE test_table_view (a String) Engine=Memory AS + SELECT toString(numeric) FROM test_view; +SELECT COUNT(*) FROM test_table_view; + +INSERT INTO test_table_view + SELECT toString(numeric) FROM test_view; +SELECT COUNT(*) FROM test_table_view; + +-- Same without View + +CREATE TABLE test_table (a String) Engine=Memory AS + SELECT toString(numeric) FROM (SELECT * FROM file(database() || '.test-data.json', JSON)); +SELECT COUNT(*) FROM test_table; + +INSERT INTO test_table + SELECT toString(numeric) FROM (SELECT * FROM file(database() || '.test-data.json', JSON)); +SELECT COUNT(*) FROM test_table; diff --git a/parser/testdata/03741_s3_glob_table_path_pushdown/ast.json b/parser/testdata/03741_s3_glob_table_path_pushdown/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03741_s3_glob_table_path_pushdown/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03741_s3_glob_table_path_pushdown/metadata.json b/parser/testdata/03741_s3_glob_table_path_pushdown/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03741_s3_glob_table_path_pushdown/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03741_s3_glob_table_path_pushdown/query.sql b/parser/testdata/03741_s3_glob_table_path_pushdown/query.sql new file mode 100644 index 000000000..afd713463 --- /dev/null +++ b/parser/testdata/03741_s3_glob_table_path_pushdown/query.sql @@ -0,0 +1,74 @@ +-- Tags: no-fasttest +-- Tag no-fasttest: Depends on S3 + +SET s3_truncate_on_insert = 1, + s3_list_object_keys_size = 1; + +DROP TABLE IF EXISTS 03741_data, 03741_filter; + +INSERT INTO FUNCTION s3(s3_conn, url = 'http://localhost:11111/test/03741_data/file1.parquet', format = Parquet) SELECT number FROM numbers(10); +INSERT INTO FUNCTION s3(s3_conn, url = 'http://localhost:11111/test/03741_data/file2.parquet', format = Parquet) SELECT number FROM numbers(10); +INSERT INTO FUNCTION s3(s3_conn, url = 'http://localhost:11111/test/03741_data/nested/file3.parquet', format = Parquet) SELECT number FROM numbers(10); +INSERT INTO FUNCTION s3(s3_conn, url = 'http://localhost:11111/test/03741_data/nested/file4.parquet', format = Parquet) SELECT number FROM numbers(10); + +CREATE TABLE 03741_data ( number UInt64 ) +ENGINE = S3(s3_conn, url = 'http://localhost:11111/test/03741_data/**', format = Parquet); + +CREATE TABLE 03741_filter ( path String ) ENGINE = MergeTree ORDER BY tuple() +AS +SELECT 'test/03741_data/file1.parquet' UNION ALL SELECT 'test/03741_data/nested/file3.parquet'; + +SELECT _path, count() FROM 03741_data GROUP BY 1 ORDER BY 1; + +SELECT count() FROM 03741_data WHERE _path = 'test/03741_data/file1.parquet'; +SELECT count() FROM 03741_data WHERE _path != 'test/03741_data/file1.parquet'; +SELECT count() FROM 03741_data WHERE _path = 'clickhouse/fake_directory/file1.parquet'; + +SELECT count() FROM 03741_data WHERE _path IN ('test/03741_data/file1.parquet', 'test/03741_data/file2.parquet'); +SELECT count() FROM 03741_data WHERE _path NOT IN ('test/03741_data/file1.parquet', 'test/03741_data/file2.parquet'); +SELECT count() FROM 03741_data WHERE _path IN ('clickhouse/fake_directory/fake.parquet'); +SELECT count() FROM 03741_data WHERE _path IN ('clickhouse/fake_directory/fake.parquet', 'test/03741_data/nested/file3.parquet'); +SELECT count() FROM 03741_data WHERE _path NOT IN ('clickhouse/fake_directory/fake.parquet', 'test/03741_data/nested/file3.parquet'); + +SELECT count() FROM 03741_data WHERE _path IN (03741_filter); +SELECT count() FROM 03741_data WHERE _path NOT IN (03741_filter); + +SELECT count() FROM 03741_data WHERE _path IN (SELECT * FROM 03741_filter WHERE path LIKE '%nested%'); +SELECT count() FROM 03741_data WHERE _path NOT IN (SELECT * FROM 03741_filter WHERE path LIKE '%nested%'); +SELECT count() FROM 03741_data WHERE _path IN (SELECT * FROM 03741_filter UNION ALL SELECT 'clickhouse/fake_directory/fake.parquet'); +SELECT count() FROM 03741_data WHERE _path NOT IN (SELECT * FROM 03741_filter UNION ALL SELECT 'clickhouse/fake_directory/fake.parquet'); + +SELECT count() FROM 03741_data WHERE _path = 'test/03741_data/file1.parquet' AND number > 5; +SELECT count() FROM 03741_data WHERE _path = 'test/03741_data/file1.parquet' OR number > 5; +SELECT count() FROM 03741_data WHERE (_path = 'test/03741_data/file1.parquet' OR _path IN (SELECT 'test/03741_data/nested/file4.parquet')) AND number < 3; +SELECT count() FROM 03741_data WHERE (_path = 'test/03741_data/file1.parquet' AND number = 2) OR (_path = 'test/03741_data/nested/file4.parquet' AND number = 4); +SELECT count() FROM 03741_data WHERE (_path = 'test/03741_data/file1.parquet' OR number = 2) AND (_path = 'test/03741_data/file2.parquet' OR number <= 1); + +SELECT count() FROM 03741_data WHERE substr(_path, 1, 23) = 'test/03741_data/nested/'; + +SELECT count() FROM 03741_data WHERE _path = 'test/03741_data/file2.parquet' +SETTINGS s3_path_filter_limit = 0; + +SELECT count() FROM 03741_data WHERE _path IN ('test/03741_data/file1.parquet', 'test/03741_data/file2.parquet') +SETTINGS s3_path_filter_limit = 1; + +SELECT count() FROM 03741_data WHERE _path IN ('test/03741_data/file1.parquet', 'test/03741_data/file2.parquet') +SETTINGS s3_path_filter_limit = 2; + +SYSTEM FLUSH LOGS query_log; + +SELECT ''; +SELECT ProfileEvents['S3ListObjects'], ProfileEvents['EngineFileLikeReadFiles'] +FROM system.query_log +WHERE current_database = currentDatabase() + AND log_comment like '%03741_s3_glob_table_path_pushdown%' + AND query_kind = 'Select' + AND type = 'QueryFinish' +ORDER BY event_time_microseconds; + +-- Mutually exclusive filter uses glob iterator, and do some list ops, so +-- checking only the result, as this behavior can change in future +SELECT ''; +SELECT count() FROM 03741_data WHERE _path = 'test/03741_data/file1.parquet' AND _path = 'test/03741_data/file2.parquet'; + +DROP TABLE 03741_data, 03741_filter; diff --git a/parser/testdata/03741_subcolumns_of_materialized_columns_in_mutation/ast.json b/parser/testdata/03741_subcolumns_of_materialized_columns_in_mutation/ast.json new file mode 100644 index 000000000..88a061582 --- /dev/null +++ b/parser/testdata/03741_subcolumns_of_materialized_columns_in_mutation/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001557989, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/03741_subcolumns_of_materialized_columns_in_mutation/metadata.json b/parser/testdata/03741_subcolumns_of_materialized_columns_in_mutation/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03741_subcolumns_of_materialized_columns_in_mutation/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03741_subcolumns_of_materialized_columns_in_mutation/query.sql b/parser/testdata/03741_subcolumns_of_materialized_columns_in_mutation/query.sql new file mode 100644 index 000000000..187011654 --- /dev/null +++ b/parser/testdata/03741_subcolumns_of_materialized_columns_in_mutation/query.sql @@ -0,0 +1,7 @@ +drop table if exists test; +create table test (s String, json JSON materialized s) engine=MergeTree order by tuple(); +insert into test select '{"a" : 42}'; +alter table test update s = '{}' where json.a = 42 settings mutations_sync=1; +select * from test; +drop table test; + diff --git a/parser/testdata/03742_array_filter_is_null_empty_array/ast.json b/parser/testdata/03742_array_filter_is_null_empty_array/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03742_array_filter_is_null_empty_array/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03742_array_filter_is_null_empty_array/metadata.json b/parser/testdata/03742_array_filter_is_null_empty_array/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03742_array_filter_is_null_empty_array/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03742_array_filter_is_null_empty_array/query.sql b/parser/testdata/03742_array_filter_is_null_empty_array/query.sql new file mode 100644 index 000000000..cbdc23bbc --- /dev/null +++ b/parser/testdata/03742_array_filter_is_null_empty_array/query.sql @@ -0,0 +1,13 @@ +-- { echoOn } + +SELECT arrayFilter(x -> (x IS NOT NULL), []); + +SELECT arrayFilter(x -> (x IS NOT NULL), [NULL]); + +SELECT arrayFilter(x -> (x IS NOT NULL), [1]); + +SELECT arrayFilter(x -> (x IS NULL), []); + +SELECT arrayFilter(x -> (x IS NULL), [NULL]); + +SELECT arrayFilter(x -> (x IS NULL), [1]); diff --git a/parser/testdata/03742_array_join_empty_tuple/ast.json b/parser/testdata/03742_array_join_empty_tuple/ast.json new file mode 100644 index 000000000..92d32a558 --- /dev/null +++ b/parser/testdata/03742_array_join_empty_tuple/ast.json @@ -0,0 +1,70 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery t0 (children 3)" + }, + { + "explain": " Identifier t0" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration c0 (children 1)" + }, + { + "explain": " DataType Array (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " DataType Int" + }, + { + "explain": " ColumnDeclaration c1 (children 1)" + }, + { + "explain": " DataType Tuple (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function MergeTree (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Function tuple (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 16, + + "statistics": + { + "elapsed": 0.001301536, + "rows_read": 16, + "bytes_read": 561 + } +} diff --git a/parser/testdata/03742_array_join_empty_tuple/metadata.json b/parser/testdata/03742_array_join_empty_tuple/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03742_array_join_empty_tuple/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03742_array_join_empty_tuple/query.sql b/parser/testdata/03742_array_join_empty_tuple/query.sql new file mode 100644 index 000000000..0e7bad72a --- /dev/null +++ b/parser/testdata/03742_array_join_empty_tuple/query.sql @@ -0,0 +1,5 @@ +CREATE TABLE t0 (c0 Array(Int), c1 Tuple()) ENGINE = MergeTree() ORDER BY tuple(); + +INSERT INTO t0 (c0, c1) VALUES ([1], ()), ([], ()); + +SELECT * FROM t0 ARRAY JOIN c0 ORDER BY c1; diff --git a/parser/testdata/03742_lazy_materialization_of_array_after_alter_add_column/ast.json b/parser/testdata/03742_lazy_materialization_of_array_after_alter_add_column/ast.json new file mode 100644 index 000000000..5d4024774 --- /dev/null +++ b/parser/testdata/03742_lazy_materialization_of_array_after_alter_add_column/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_lazy (children 1)" + }, + { + "explain": " Identifier test_lazy" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001658806, + "rows_read": 2, + "bytes_read": 70 + } +} diff --git a/parser/testdata/03742_lazy_materialization_of_array_after_alter_add_column/metadata.json b/parser/testdata/03742_lazy_materialization_of_array_after_alter_add_column/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03742_lazy_materialization_of_array_after_alter_add_column/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03742_lazy_materialization_of_array_after_alter_add_column/query.sql b/parser/testdata/03742_lazy_materialization_of_array_after_alter_add_column/query.sql new file mode 100644 index 000000000..27958a300 --- /dev/null +++ b/parser/testdata/03742_lazy_materialization_of_array_after_alter_add_column/query.sql @@ -0,0 +1,7 @@ +drop table if exists test_lazy; +create table test_lazy (id UInt64) engine=MergeTree order by tuple() settings min_bytes_for_wide_part=1, min_rows_for_wide_part=1; +insert into test_lazy select * from numbers(100); +alter table test_lazy add column array Array(UInt64) settings mutations_sync=1; +select id, array from test_lazy where id = 42 order by id limit 10 settings query_plan_optimize_lazy_materialization = 1; +drop table test_lazy; + diff --git a/parser/testdata/03742_test_flattened_crash/ast.json b/parser/testdata/03742_test_flattened_crash/ast.json new file mode 100644 index 000000000..dd5b8e2ab --- /dev/null +++ b/parser/testdata/03742_test_flattened_crash/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_flatten_nested_crash (children 1)" + }, + { + "explain": " Identifier test_flatten_nested_crash" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001167501, + "rows_read": 2, + "bytes_read": 102 + } +} diff --git a/parser/testdata/03742_test_flattened_crash/metadata.json b/parser/testdata/03742_test_flattened_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03742_test_flattened_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03742_test_flattened_crash/query.sql b/parser/testdata/03742_test_flattened_crash/query.sql new file mode 100644 index 000000000..7cd6d0054 --- /dev/null +++ b/parser/testdata/03742_test_flattened_crash/query.sql @@ -0,0 +1,24 @@ +DROP TABLE IF EXISTS test_flatten_nested_crash; +CREATE TABLE test_flatten_nested_crash +( + `id` UInt64, + `tenant` String, + `arr.id` Array(Nullable(UInt64)), + `arr.name` Array(Nullable(String)), + `arr.nested` Array(Tuple(a String, b Float64)) +) +ENGINE = MergeTree +ORDER BY (id) +SETTINGS index_granularity = 8192; +INSERT INTO test_flatten_nested_crash +SELECT * FROM generateRandom( + '`id` UInt64, + `tenant` String, + `arr.id` Array(Nullable(UInt64)), + `arr.name` Array(Nullable(String)), + `arr.nested` Array(Tuple(a String, b Float64))', 1, 10 +) LIMIT 1; +ALTER TABLE test_flatten_nested_crash DROP COLUMN `arr.nested`; +ALTER TABLE test_flatten_nested_crash ADD COLUMN `arr.nested` Array(Tuple(a String, b Float64)); +SELECT arr.nested FROM test_flatten_nested_crash ORDER BY arr.nested LIMIT 1; +DROP TABLE test_flatten_nested_crash; diff --git a/parser/testdata/03743_fix_estimator_crash/ast.json b/parser/testdata/03743_fix_estimator_crash/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03743_fix_estimator_crash/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03743_fix_estimator_crash/metadata.json b/parser/testdata/03743_fix_estimator_crash/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03743_fix_estimator_crash/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03743_fix_estimator_crash/query.sql b/parser/testdata/03743_fix_estimator_crash/query.sql new file mode 100644 index 000000000..8ec8ad13c --- /dev/null +++ b/parser/testdata/03743_fix_estimator_crash/query.sql @@ -0,0 +1,8 @@ +-- Tags: no-fasttest + +CREATE TABLE t0 (c0 Int) ENGINE = MergeTree() ORDER BY tuple(); +SET query_plan_enable_optimizations = 0; +ALTER TABLE t0 MODIFY STATISTICS c0 TYPE Uniq, CountMin; +INSERT INTO TABLE t0 (c0) VALUES (1); +ALTER TABLE t0 MODIFY STATISTICS c0 TYPE CountMin; +ALTER TABLE t0 APPLY DELETED MASK IN PARTITION ID '1'; diff --git a/parser/testdata/03743_summing_and_aggregating_merge_tree_with_json_merge/ast.json b/parser/testdata/03743_summing_and_aggregating_merge_tree_with_json_merge/ast.json new file mode 100644 index 000000000..288bf20dc --- /dev/null +++ b/parser/testdata/03743_summing_and_aggregating_merge_tree_with_json_merge/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001107557, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03743_summing_and_aggregating_merge_tree_with_json_merge/metadata.json b/parser/testdata/03743_summing_and_aggregating_merge_tree_with_json_merge/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03743_summing_and_aggregating_merge_tree_with_json_merge/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03743_summing_and_aggregating_merge_tree_with_json_merge/query.sql b/parser/testdata/03743_summing_and_aggregating_merge_tree_with_json_merge/query.sql new file mode 100644 index 000000000..94a2d3aa9 --- /dev/null +++ b/parser/testdata/03743_summing_and_aggregating_merge_tree_with_json_merge/query.sql @@ -0,0 +1,34 @@ +set mutations_sync=1; + +drop table if exists test; +create table test (id UInt64, json JSON) engine=SummingMergeTree order by id settings min_bytes_for_wide_part=1, min_rows_for_wide_part=1, index_granularity=32, merge_max_block_size=32; +insert into test select number, '{}' from numbers(100); +alter table test update json = '{"a" : 42}' where id > 90; +optimize table test final; +select 'Dynamic paths'; +select distinct arrayJoin(JSONDynamicPaths(json)) from test; +select 'Shared data parhs'; +select distinct arrayJoin(JSONSharedDataPaths(json)) from test; +drop table test; + +create table test (id UInt64, json JSON) engine=AggregatingMergeTree order by id settings min_bytes_for_wide_part=1, min_rows_for_wide_part=1, index_granularity=32, merge_max_block_size=32; +insert into test select number, '{}' from numbers(100); +alter table test update json = '{"a" : 42}' where id > 90; +optimize table test final; +select 'Dynamic paths'; +select distinct arrayJoin(JSONDynamicPaths(json)) from test; +select 'Shared data parhs'; +select distinct arrayJoin(JSONSharedDataPaths(json)) from test; +drop table test; + +create table test (id UInt64, json JSON) engine=CoalescingMergeTree order by id settings min_bytes_for_wide_part=1, min_rows_for_wide_part=1, index_granularity=32, merge_max_block_size=32; +insert into test select number, '{}' from numbers(100); +alter table test update json = '{"a" : 42}' where id > 90; +optimize table test final; +select 'Dynamic paths'; +select distinct arrayJoin(JSONDynamicPaths(json)) from test; +select 'Shared data parhs'; +select distinct arrayJoin(JSONSharedDataPaths(json)) from test; +drop table test; + + diff --git a/parser/testdata/03745_fix_dynamic_structure_in_compact_part/ast.json b/parser/testdata/03745_fix_dynamic_structure_in_compact_part/ast.json new file mode 100644 index 000000000..17aa627e6 --- /dev/null +++ b/parser/testdata/03745_fix_dynamic_structure_in_compact_part/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001012763, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/03745_fix_dynamic_structure_in_compact_part/metadata.json b/parser/testdata/03745_fix_dynamic_structure_in_compact_part/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03745_fix_dynamic_structure_in_compact_part/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03745_fix_dynamic_structure_in_compact_part/query.sql b/parser/testdata/03745_fix_dynamic_structure_in_compact_part/query.sql new file mode 100644 index 000000000..83421e486 --- /dev/null +++ b/parser/testdata/03745_fix_dynamic_structure_in_compact_part/query.sql @@ -0,0 +1,6 @@ +drop table if exists test; +create table test (id UInt64, json JSON) engine=CoalescingMergeTree order by id settings min_bytes_for_wide_part='100G', merge_max_block_size=33, index_granularity=800; +insert into test select number, '{}' from numbers(10000); +alter table test update json = '{"a" : 42}' where id > 9000 settings mutations_sync=1; +optimize table test final; +drop table test; diff --git a/parser/testdata/03747_float_parsing_subnormal/ast.json b/parser/testdata/03747_float_parsing_subnormal/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03747_float_parsing_subnormal/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03747_float_parsing_subnormal/metadata.json b/parser/testdata/03747_float_parsing_subnormal/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03747_float_parsing_subnormal/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03747_float_parsing_subnormal/query.sql b/parser/testdata/03747_float_parsing_subnormal/query.sql new file mode 100644 index 000000000..241f3a8ef --- /dev/null +++ b/parser/testdata/03747_float_parsing_subnormal/query.sql @@ -0,0 +1,31 @@ +-- { echoOn } + +SELECT 'FLOAT NORMAL DBL MIN', 2.2250738585072014e-308, toTypeName(2.2250738585072014e-308); + +SELECT 'FLOAT SUB LARGEST BELOW MIN', 2.225073858507201e-308, toTypeName(2.225073858507201e-308); + +SELECT 'FLOAT SUB TRUE MIN', 5e-324, toTypeName(5e-324); + +SELECT 'FLOAT UNDERFLOW BELOW TRUE MIN', 1e-400, toTypeName(1e-400); + +SELECT 'FLOAT NORMAL DBL MAX', 1.7976931348623157e+308, toTypeName(1.7976931348623157e+308); + +SELECT 'FLOAT NEG NORMAL DBL MIN', -2.2250738585072014e-308, toTypeName(-2.2250738585072014e-308); + +SELECT 'FLOAT NEG SUB TRUE MIN', -5e-324, toTypeName(-5e-324); + +SELECT 'FLOAT NEG UNDERFLOW BELOW TRUE MIN', -1e-400, toTypeName(-1e-400); + +SELECT 'HEX NORMAL DBL MIN', 0x1p-1022, toTypeName(0x1p-1022); + +SELECT 'HEX SUB TRUE MIN', 0x1p-1074, toTypeName(0x1p-1074); + +SELECT 'HEX UNDERFLOW BELOW TRUE MIN', 0x1p-1075, toTypeName(0x1p-1075); + +SELECT 'HEX NORMAL DBL MAX', 0x1.fffffffffffffp1023, toTypeName(0x1.fffffffffffffp1023); + +SELECT 'BAREWORD INF', inf, toTypeName(inf); + +SELECT 'BAREWORD NEG INF', -inf, toTypeName(-inf); + +SELECT 'BAREWORD NAN', nan, toTypeName(nan); diff --git a/parser/testdata/03747_optimize_functions_to_subcolumns_columns_as_substreams/ast.json b/parser/testdata/03747_optimize_functions_to_subcolumns_columns_as_substreams/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03747_optimize_functions_to_subcolumns_columns_as_substreams/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03747_optimize_functions_to_subcolumns_columns_as_substreams/metadata.json b/parser/testdata/03747_optimize_functions_to_subcolumns_columns_as_substreams/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03747_optimize_functions_to_subcolumns_columns_as_substreams/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03747_optimize_functions_to_subcolumns_columns_as_substreams/query.sql b/parser/testdata/03747_optimize_functions_to_subcolumns_columns_as_substreams/query.sql new file mode 100644 index 000000000..3dc84840f --- /dev/null +++ b/parser/testdata/03747_optimize_functions_to_subcolumns_columns_as_substreams/query.sql @@ -0,0 +1,124 @@ +-- Test Array empty (size0 substream) +drop table if exists test_empty_array; +create table test_empty_array (id UInt64, `a.size0` UInt64, a Array(UInt64)) engine=MergeTree order by tuple(); +insert into test_empty_array select 42, 42, []; +-- { echo } +select id from test_empty_array where empty(a) settings optimize_functions_to_subcolumns=1; +select id from test_empty_array where empty(a) settings optimize_functions_to_subcolumns=0; +-- { echoOff } + +-- Test Array notEmpty (size0 substream) +drop table if exists test_notempty_array; +create table test_notempty_array (id UInt64, `a.size0` UInt64, a Array(UInt64)) engine=MergeTree order by tuple(); +insert into test_notempty_array select 42, 42, [1]; +-- { echo } +select id from test_notempty_array where notEmpty(a) settings optimize_functions_to_subcolumns=1; +select id from test_notempty_array where notEmpty(a) settings optimize_functions_to_subcolumns=0; +-- { echoOff } + +-- Test Array length (size0 substream) +drop table if exists test_length_array; +create table test_length_array (id UInt64, `a.size0` UInt64, a Array(UInt64)) engine=MergeTree order by tuple(); +insert into test_length_array select 42, 100, [1, 2, 3]; +-- { echo } +select id, length(a) from test_length_array settings optimize_functions_to_subcolumns=1; +select id, length(a) from test_length_array settings optimize_functions_to_subcolumns=0; +-- { echoOff } + +-- Test String empty (size substream) +drop table if exists test_empty_string; +create table test_empty_string (id UInt64, `s.size` UInt64, s String) engine=MergeTree order by tuple(); +insert into test_empty_string select 42, 42, ''; +-- { echo } +select id from test_empty_string where empty(s) settings optimize_functions_to_subcolumns=1; +select id from test_empty_string where empty(s) settings optimize_functions_to_subcolumns=0; +-- { echoOff } + +-- Test String notEmpty (size substream) +drop table if exists test_notempty_string; +create table test_notempty_string (id UInt64, `s.size` UInt64, s String) engine=MergeTree order by tuple(); +insert into test_notempty_string select 42, 42, 'hello'; +-- { echo } +select id from test_notempty_string where notEmpty(s) settings optimize_functions_to_subcolumns=1; +select id from test_notempty_string where notEmpty(s) settings optimize_functions_to_subcolumns=0; +-- { echoOff } + +-- Test String length (size substream) +drop table if exists test_length_string; +create table test_length_string (id UInt64, `s.size` UInt64, s String) engine=MergeTree order by tuple(); +insert into test_length_string select 42, 100, 'hello'; +-- { echo } +select id, length(s) from test_length_string settings optimize_functions_to_subcolumns=1; +select id, length(s) from test_length_string settings optimize_functions_to_subcolumns=0; +-- { echoOff } + +-- Test Map empty (size0 substream) +drop table if exists test_empty_map; +create table test_empty_map (id UInt64, `m.size0` UInt64, m Map(String, UInt64)) engine=MergeTree order by tuple(); +insert into test_empty_map select 42, 42, map(); +-- { echo } +select id from test_empty_map where empty(m) settings optimize_functions_to_subcolumns=1; +select id from test_empty_map where empty(m) settings optimize_functions_to_subcolumns=0; +-- { echoOff } + +-- Test Map notEmpty (size0 substream) +drop table if exists test_notempty_map; +create table test_notempty_map (id UInt64, `m.size0` UInt64, m Map(String, UInt64)) engine=MergeTree order by tuple(); +insert into test_notempty_map select 42, 42, map('a', 1); +-- { echo } +select id from test_notempty_map where notEmpty(m) settings optimize_functions_to_subcolumns=1; +select id from test_notempty_map where notEmpty(m) settings optimize_functions_to_subcolumns=0; +-- { echoOff } + +-- Test Map length (size0 substream) +drop table if exists test_length_map; +create table test_length_map (id UInt64, `m.size0` UInt64, m Map(String, UInt64)) engine=MergeTree order by tuple(); +insert into test_length_map select 42, 100, map('a', 1, 'b', 2); +-- { echo } +select id, length(m) from test_length_map settings optimize_functions_to_subcolumns=1; +select id, length(m) from test_length_map settings optimize_functions_to_subcolumns=0; +-- { echoOff } + +-- Test Map mapKeys (keys substream) +drop table if exists test_mapkeys; +create table test_mapkeys (id UInt64, `m.keys` Array(String), m Map(String, UInt64)) engine=MergeTree order by tuple(); -- { serverError BAD_ARGUMENTS } + +-- Test Map mapValues (values substream) +drop table if exists test_mapvalues; +create table test_mapvalues (id UInt64, `m.values` Array(UInt64), m Map(String, UInt64)) engine=MergeTree order by tuple(); -- { serverError BAD_ARGUMENTS } + +-- Test Map mapContainsKey (keys substream) +drop table if exists test_mapcontainskey; +create table test_mapcontainskey (id UInt64, `m.keys` Array(String), m Map(String, UInt64)) engine=MergeTree order by tuple(); -- { serverError BAD_ARGUMENTS } + +-- Test Nullable isNull (null substream) +drop table if exists test_isnull; +create table test_isnull (id UInt64, `n.null` UInt8, n Nullable(UInt64)) engine=MergeTree order by tuple(); +insert into test_isnull select 42, 1, NULL; +-- { echo } +select id from test_isnull where isNull(n) settings optimize_functions_to_subcolumns=1; +select id from test_isnull where isNull(n) settings optimize_functions_to_subcolumns=0; +-- { echoOff } + +-- Test Nullable isNotNull (null substream) +drop table if exists test_isnotnull; +create table test_isnotnull (id UInt64, `n.null` UInt8, n Nullable(UInt64)) engine=MergeTree order by tuple(); +insert into test_isnotnull select 42, 1, 100; +-- { echo } +select id from test_isnotnull where isNotNull(n) settings optimize_functions_to_subcolumns=1; +select id from test_isnotnull where isNotNull(n) settings optimize_functions_to_subcolumns=0; +-- { echoOff } + +-- Test Nullable count (null substream) +drop table if exists test_count_nullable; +create table test_count_nullable (id UInt64, `n.null` UInt8, n Nullable(UInt64)) engine=MergeTree order by tuple(); +insert into test_count_nullable select 42, 0, 100; +insert into test_count_nullable select 43, 1, NULL; +-- { echo } +select count(n) from test_count_nullable settings optimize_functions_to_subcolumns=1; +select count(n) from test_count_nullable settings optimize_functions_to_subcolumns=0; +-- { echoOff } + +-- Test Tuple tupleElement (named subcolumn) +drop table if exists test_tupleelement; +create table test_tupleelement (id UInt64, `t.a` UInt64, t Tuple(a UInt64, b String)) engine=MergeTree order by tuple(); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/03748_coalescing_merge_tree_tuple_low_cardinality_and_dynamic/ast.json b/parser/testdata/03748_coalescing_merge_tree_tuple_low_cardinality_and_dynamic/ast.json new file mode 100644 index 000000000..a842dc1ce --- /dev/null +++ b/parser/testdata/03748_coalescing_merge_tree_tuple_low_cardinality_and_dynamic/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001085795, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03748_coalescing_merge_tree_tuple_low_cardinality_and_dynamic/metadata.json b/parser/testdata/03748_coalescing_merge_tree_tuple_low_cardinality_and_dynamic/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03748_coalescing_merge_tree_tuple_low_cardinality_and_dynamic/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03748_coalescing_merge_tree_tuple_low_cardinality_and_dynamic/query.sql b/parser/testdata/03748_coalescing_merge_tree_tuple_low_cardinality_and_dynamic/query.sql new file mode 100644 index 000000000..cd8ddcf89 --- /dev/null +++ b/parser/testdata/03748_coalescing_merge_tree_tuple_low_cardinality_and_dynamic/query.sql @@ -0,0 +1,12 @@ +set mutations_sync=1; +drop table if exists test; +create table test (id UInt64, t Tuple(a LowCardinality(String), json JSON)) engine=CoalescingMergeTree order by id settings min_bytes_for_wide_part=1, min_rows_for_wide_part=1, index_granularity=32, merge_max_block_size=32; +insert into test select number, tuple('str', '{}') from numbers(100); +alter table test update t = tuple('str', '{"a" : 42}') where id > 90; +optimize table test final; +select 'Dynamic paths'; +select distinct arrayJoin(JSONDynamicPaths(t.json)) from test; +select 'Shared data parhs'; +select distinct arrayJoin(JSONSharedDataPaths(t.json)) from test; +drop table test; + diff --git a/parser/testdata/03748_default_minmax_indices_alter/ast.json b/parser/testdata/03748_default_minmax_indices_alter/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03748_default_minmax_indices_alter/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03748_default_minmax_indices_alter/metadata.json b/parser/testdata/03748_default_minmax_indices_alter/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03748_default_minmax_indices_alter/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03748_default_minmax_indices_alter/query.sql b/parser/testdata/03748_default_minmax_indices_alter/query.sql new file mode 100644 index 000000000..2245bee64 --- /dev/null +++ b/parser/testdata/03748_default_minmax_indices_alter/query.sql @@ -0,0 +1,38 @@ +-- { echoOn } +DROP TABLE IF EXISTS t_implicit; + +CREATE TABLE t_implicit (a UInt64, s String) ENGINE = MergeTree ORDER BY tuple() SETTINGS add_minmax_index_for_numeric_columns = 1; +SHOW CREATE TABLE t_implicit; +SELECT * FROM system.data_skipping_indices WHERE database = current_database() AND table = 't_implicit'; + +ALTER TABLE t_implicit DROP COLUMN s; +SHOW CREATE TABLE t_implicit; +SELECT * FROM system.data_skipping_indices WHERE database = current_database() AND table = 't_implicit'; + +ALTER TABLE t_implicit ADD COLUMN s2 String; +SHOW CREATE TABLE t_implicit; +SELECT * FROM system.data_skipping_indices WHERE database = current_database() AND table = 't_implicit'; + +ALTER TABLE t_implicit ADD COLUMN a2 UInt64; +SHOW CREATE TABLE t_implicit; +SELECT * FROM system.data_skipping_indices WHERE database = current_database() AND table = 't_implicit'; + +ALTER TABLE t_implicit RENAME COLUMN a2 TO a_renamed; +SHOW CREATE TABLE t_implicit; +SELECT * FROM system.data_skipping_indices WHERE database = current_database() AND table = 't_implicit'; + +DETACH TABLE t_implicit; +ATTACH TABLE t_implicit; + +SHOW CREATE TABLE t_implicit; +SELECT * FROM system.data_skipping_indices WHERE database = current_database() AND table = 't_implicit'; + +ALTER TABLE t_implicit MODIFY COLUMN s2 UInt32; +SHOW CREATE TABLE t_implicit; +SELECT * FROM system.data_skipping_indices WHERE database = current_database() AND table = 't_implicit'; + +ALTER TABLE t_implicit MODIFY COLUMN a_renamed String; +SHOW CREATE TABLE t_implicit; +SELECT * FROM system.data_skipping_indices WHERE database = current_database() AND table = 't_implicit'; + +DROP TABLE t_implicit; \ No newline at end of file diff --git a/parser/testdata/03748_tuple_of_sparse_elements_bug/ast.json b/parser/testdata/03748_tuple_of_sparse_elements_bug/ast.json new file mode 100644 index 000000000..30abc0cd5 --- /dev/null +++ b/parser/testdata/03748_tuple_of_sparse_elements_bug/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t0 (children 1)" + }, + { + "explain": " Identifier t0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001463909, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03748_tuple_of_sparse_elements_bug/metadata.json b/parser/testdata/03748_tuple_of_sparse_elements_bug/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03748_tuple_of_sparse_elements_bug/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03748_tuple_of_sparse_elements_bug/query.sql b/parser/testdata/03748_tuple_of_sparse_elements_bug/query.sql new file mode 100644 index 000000000..714b99114 --- /dev/null +++ b/parser/testdata/03748_tuple_of_sparse_elements_bug/query.sql @@ -0,0 +1,10 @@ +DROP TABLE IF EXISTS t0; +CREATE TABLE t0 (c0 String, c1 Tuple(Nullable(Int))) ENGINE = MergeTree() ORDER BY tuple(); +INSERT INTO TABLE t0 (c0) SELECT 'abc' FROM numbers(142); +INSERT INTO TABLE t0 (c0) VALUES ('abc'); +ALTER TABLE t0 CLEAR COLUMN c0; +INSERT INTO TABLE t0 (c0) VALUES ('abc'); +INSERT INTO TABLE t0 (c0) VALUES ('abc'); +INSERT INTO TABLE t0 (c0) VALUES ('abc'); +INSERT INTO TABLE t0 (c0) SELECT 'abc' FROM numbers(196); +DROP TABLE t0; diff --git a/parser/testdata/03749_implicit_index_ephemeral_alias/ast.json b/parser/testdata/03749_implicit_index_ephemeral_alias/ast.json new file mode 100644 index 000000000..be2a09bda --- /dev/null +++ b/parser/testdata/03749_implicit_index_ephemeral_alias/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test_string (children 1)" + }, + { + "explain": " Identifier test_string" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001033553, + "rows_read": 2, + "bytes_read": 74 + } +} diff --git a/parser/testdata/03749_implicit_index_ephemeral_alias/metadata.json b/parser/testdata/03749_implicit_index_ephemeral_alias/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03749_implicit_index_ephemeral_alias/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03749_implicit_index_ephemeral_alias/query.sql b/parser/testdata/03749_implicit_index_ephemeral_alias/query.sql new file mode 100644 index 000000000..4ab8494c9 --- /dev/null +++ b/parser/testdata/03749_implicit_index_ephemeral_alias/query.sql @@ -0,0 +1,24 @@ +DROP TABLE IF EXISTS test_string; +DROP TABLE IF EXISTS test_string_alias; +CREATE OR REPLACE TABLE test_string +( + id UInt64, + unhexed String EPHEMERAL, + hexed FixedString(4) DEFAULT unhex(unhexed) +) +ENGINE = MergeTree +ORDER BY id settings add_minmax_index_for_numeric_columns=1, add_minmax_index_for_string_columns=1; + +CREATE OR REPLACE TABLE test_string_alias +( + id UInt64, + unhexed String ALIAS 'abc', + hexed FixedString(4) DEFAULT unhex(unhexed) +) +ENGINE = MergeTree +ORDER BY id settings add_minmax_index_for_numeric_columns=1, add_minmax_index_for_string_columns=1; + +SELECT table, name FROM system.data_skipping_indices WHERE database = currentDatabase() ORDER BY table, name; + +DROP TABLE test_string; +DROP TABLE test_string_alias; \ No newline at end of file diff --git a/parser/testdata/03749_in_function_rewrite_lambda_lhs_non_const_rhs/ast.json b/parser/testdata/03749_in_function_rewrite_lambda_lhs_non_const_rhs/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03749_in_function_rewrite_lambda_lhs_non_const_rhs/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03749_in_function_rewrite_lambda_lhs_non_const_rhs/metadata.json b/parser/testdata/03749_in_function_rewrite_lambda_lhs_non_const_rhs/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03749_in_function_rewrite_lambda_lhs_non_const_rhs/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03749_in_function_rewrite_lambda_lhs_non_const_rhs/query.sql b/parser/testdata/03749_in_function_rewrite_lambda_lhs_non_const_rhs/query.sql new file mode 100644 index 000000000..1dfd88f01 --- /dev/null +++ b/parser/testdata/03749_in_function_rewrite_lambda_lhs_non_const_rhs/query.sql @@ -0,0 +1,11 @@ +-- { echoOn } + +SET enable_analyzer = 1; + +SELECT (y -> 1) IN (1, 1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT (y -> 1) IN (materialize(1), 2); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT (y -> 1) IN [materialize(1), 2]; -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } + +SELECT 1 WHERE (y -> 1) IN (materialize(1), 1); -- { serverError ILLEGAL_TYPE_OF_ARGUMENT } diff --git a/parser/testdata/03749_materialized_view_not_supports_parallel_write/ast.json b/parser/testdata/03749_materialized_view_not_supports_parallel_write/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03749_materialized_view_not_supports_parallel_write/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03749_materialized_view_not_supports_parallel_write/metadata.json b/parser/testdata/03749_materialized_view_not_supports_parallel_write/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03749_materialized_view_not_supports_parallel_write/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03749_materialized_view_not_supports_parallel_write/query.sql b/parser/testdata/03749_materialized_view_not_supports_parallel_write/query.sql new file mode 100644 index 000000000..468abb0ce --- /dev/null +++ b/parser/testdata/03749_materialized_view_not_supports_parallel_write/query.sql @@ -0,0 +1,16 @@ +-- Tags: no-debug, no-debug, no-asan, no-tsan, no-msan, no-ubsan, no-sanitize-coverage, no-parallel-replicas, no-flaky-check +-- - debug build adds CheckTokenTransform + +SET parallel_view_processing = 1, max_insert_threads = 2; + +CREATE TABLE test_set (c0 Int) ENGINE = Set; +CREATE TABLE test_table (c0 Int) ENGINE = MergeTree ORDER BY c0 PARTITION BY c0; +CREATE MATERIALIZED VIEW merge_tree_to_set TO test_set (c0 Int) AS (SELECT * FROM test_table); +-- Expect the single insert chain +EXPLAIN PIPELINE INSERT INTO TABLE test_table SELECT 1 FROM numbers(10); + +-- Fuzzed +CREATE TABLE t0 (c0 Int) ENGINE = Log; +CREATE TABLE t1 (c0 Int) ENGINE = Memory; +CREATE MATERIALIZED VIEW v0 TO t0 (c0 Int) AS (SELECT t1.* IS NULL c0 FROM t1); +INSERT INTO TABLE t1 (c0) SELECT c0 FROM generateRandom('c0 Int', 1, 1, 0) LIMIT 1; diff --git a/parser/testdata/03749_table_function_argument_asterisk/ast.json b/parser/testdata/03749_table_function_argument_asterisk/ast.json new file mode 100644 index 000000000..8b7305eb2 --- /dev/null +++ b/parser/testdata/03749_table_function_argument_asterisk/ast.json @@ -0,0 +1,37 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SelectWithUnionQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " SelectQuery (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Asterisk" + } + ], + + "rows": 5, + + "statistics": + { + "elapsed": 0.001306603, + "rows_read": 5, + "bytes_read": 169 + } +} diff --git a/parser/testdata/03749_table_function_argument_asterisk/metadata.json b/parser/testdata/03749_table_function_argument_asterisk/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03749_table_function_argument_asterisk/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03749_table_function_argument_asterisk/query.sql b/parser/testdata/03749_table_function_argument_asterisk/query.sql new file mode 100644 index 000000000..d438ee1e1 --- /dev/null +++ b/parser/testdata/03749_table_function_argument_asterisk/query.sql @@ -0,0 +1,36 @@ +SELECT * +FROM remote(*, '127.{1,2}', view( + SELECT 2 +)); -- { serverError BAD_ARGUMENTS } + +SELECT * +FROM remote(*, view( + SELECT 2 +)); -- { serverError BAD_ARGUMENTS } + +SELECT * +FROM remote(*, '127.{1,2}', view( + SELECT toLowCardinality(2) +)); -- { serverError BAD_ARGUMENTS } + +SELECT * +FROM remote(*, '127.{1,2}', view( + SELECT 1 + FROM numbers(1) + GROUP BY toLowCardinality(2) +)); -- { serverError BAD_ARGUMENTS } + +SELECT DISTINCT '/01650_drop_part_and_deduplication_partitioned_table/blocks/', 60, k1 +FROM remote(*, '127.{1,2}', view(SELECT 1 AS k1, 65535, 2 AS k2, 3 AS v +FROM numbers(2, cityHash64(k1)) +WHERE toLowCardinality(60) GROUP BY GROUPING SETS ((toLowCardinality(2))) +HAVING equals(k1, toNullable(60)))) FINAL; -- { serverError BAD_ARGUMENTS } + +SELECT * FROM numbers(*, 2); -- { serverError BAD_ARGUMENTS } + +SELECT * FROM numbers(2, *); -- { serverError BAD_ARGUMENTS } + +SELECT * FROM numbers_mt(2, *); -- { serverError BAD_ARGUMENTS } + +SELECT * +FROM generateSeries(*, 1, 3); -- { serverError BAD_ARGUMENTS } diff --git a/parser/testdata/03751_summing_coalescing_merge_tree_sparse_columns_in_header/ast.json b/parser/testdata/03751_summing_coalescing_merge_tree_sparse_columns_in_header/ast.json new file mode 100644 index 000000000..879a729c2 --- /dev/null +++ b/parser/testdata/03751_summing_coalescing_merge_tree_sparse_columns_in_header/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "Set" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.0012632, + "rows_read": 1, + "bytes_read": 11 + } +} diff --git a/parser/testdata/03751_summing_coalescing_merge_tree_sparse_columns_in_header/metadata.json b/parser/testdata/03751_summing_coalescing_merge_tree_sparse_columns_in_header/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03751_summing_coalescing_merge_tree_sparse_columns_in_header/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03751_summing_coalescing_merge_tree_sparse_columns_in_header/query.sql b/parser/testdata/03751_summing_coalescing_merge_tree_sparse_columns_in_header/query.sql new file mode 100644 index 000000000..bb72d8498 --- /dev/null +++ b/parser/testdata/03751_summing_coalescing_merge_tree_sparse_columns_in_header/query.sql @@ -0,0 +1,13 @@ +set allow_suspicious_primary_key = 1; +drop table if exists src; +create table src (x UInt64) engine=MergeTree order by tuple(); +insert into src select 0 from numbers(1000000); +drop table if exists dst; +create table dst (x UInt64) engine=CoalescingMergeTree order by tuple(); +insert into dst select * from src; +drop table dst; +create table dst (x UInt64) engine=SummingMergeTree order by tuple(); +insert into dst select * from src; +drop table dst; +drop table src; + diff --git a/parser/testdata/03752_constant_expression_with_untuple/ast.json b/parser/testdata/03752_constant_expression_with_untuple/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03752_constant_expression_with_untuple/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03752_constant_expression_with_untuple/metadata.json b/parser/testdata/03752_constant_expression_with_untuple/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03752_constant_expression_with_untuple/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03752_constant_expression_with_untuple/query.sql b/parser/testdata/03752_constant_expression_with_untuple/query.sql new file mode 100644 index 000000000..151f9861e --- /dev/null +++ b/parser/testdata/03752_constant_expression_with_untuple/query.sql @@ -0,0 +1,10 @@ +-- untuple is not available without analyzer +SET enable_analyzer=1; + +CREATE TABLE test Engine=Merge(default, untuple((1,1))); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } + +-- Fuzzed +DESCRIBE TABLE format(untuple((toIntervalQuarter(1), assumeNotNull(8) IS NOT NULL, isNullable(7) % 1, 4, materialize('hola'), 4)), JSONEachRow) + FINAL + SETTINGS schema_inference_hints = 'ð(Œ¼ð(Œ¼ð(Œ¼ð(Œ¼ð(Œ¼ð(Œ¼ð(Œ¼ð(Œ¼ð(Œ¼ð(Œ¼ð(Œ¼ð(Œ¼ð(Œ¼ð(Œ¼ð(Œ¼ð(Œ¼'; -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } +DESCRIBE TABLE format(untuple(((SELECT DISTINCT 1 GROUP BY or(isNullable(1048576), 1), or(1048576, 1 AS x, 10, isNull(isNullable(1))) WITH ROLLUP), 1)), JSONEachRow); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } diff --git a/parser/testdata/03752_join_part/ast.json b/parser/testdata/03752_join_part/ast.json new file mode 100644 index 000000000..50be0bc56 --- /dev/null +++ b/parser/testdata/03752_join_part/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t0 (children 1)" + }, + { + "explain": " Identifier t0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001157799, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03752_join_part/metadata.json b/parser/testdata/03752_join_part/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03752_join_part/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03752_join_part/query.sql b/parser/testdata/03752_join_part/query.sql new file mode 100644 index 000000000..9e5735363 --- /dev/null +++ b/parser/testdata/03752_join_part/query.sql @@ -0,0 +1,4 @@ +DROP TABLE IF EXISTS t0; +CREATE TABLE t0 (c0 Int) ENGINE = MergeTree() ORDER BY tuple(); +SELECT t0.c0 FROM t0 JOIN t0 tx ON t0.c0 = tx.c0 WHERE tx._part_offset = 1 AND randomFixedString(5) = tx._part SETTINGS query_plan_use_logical_join_step = 0, use_join_disjunctions_push_down = 1, enable_analyzer = 1; +DROP TABLE t0; diff --git a/parser/testdata/03753_merge_selector_amm/ast.json b/parser/testdata/03753_merge_selector_amm/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03753_merge_selector_amm/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03753_merge_selector_amm/metadata.json b/parser/testdata/03753_merge_selector_amm/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03753_merge_selector_amm/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03753_merge_selector_amm/query.sql b/parser/testdata/03753_merge_selector_amm/query.sql new file mode 100644 index 000000000..ae3c93376 --- /dev/null +++ b/parser/testdata/03753_merge_selector_amm/query.sql @@ -0,0 +1,15 @@ +-- Tags: long + +drop table if exists mt sync; + +create table mt (a UInt64, b UInt64) engine=MergeTree order by a +settings + merge_selector_enable_heuristic_to_lower_max_parts_to_merge_at_once=1, + max_parts_to_merge_at_once=10, + parts_to_throw_insert=50; + +insert into mt select number, number from numbers(100) settings max_block_size=1, min_insert_block_size_bytes=1; + +select count() from mt; + +drop table if exists mt sync; diff --git a/parser/testdata/03753_replacing_empty_order_by/ast.json b/parser/testdata/03753_replacing_empty_order_by/ast.json new file mode 100644 index 000000000..af8002737 --- /dev/null +++ b/parser/testdata/03753_replacing_empty_order_by/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t_empty_order_key (children 1)" + }, + { + "explain": " Identifier t_empty_order_key" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001387069, + "rows_read": 2, + "bytes_read": 86 + } +} diff --git a/parser/testdata/03753_replacing_empty_order_by/metadata.json b/parser/testdata/03753_replacing_empty_order_by/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03753_replacing_empty_order_by/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03753_replacing_empty_order_by/query.sql b/parser/testdata/03753_replacing_empty_order_by/query.sql new file mode 100644 index 000000000..6caa6c6a1 --- /dev/null +++ b/parser/testdata/03753_replacing_empty_order_by/query.sql @@ -0,0 +1,23 @@ +DROP TABLE IF EXISTS t_empty_order_key; + +SET allow_suspicious_primary_key = 0; + +-- CREATE TABLE t_empty_order_key(c0 String, c1 String) ENGINE = ReplacingMergeTree() ORDER BY tuple(); -- { serverError BAD_ARGUMENTS } + +SET allow_suspicious_primary_key = 1; + +CREATE TABLE t_empty_order_key(c0 String, c1 String) ENGINE = ReplacingMergeTree() ORDER BY tuple(); + +INSERT INTO TABLE t_empty_order_key (c0, c1) VALUES ('foo', 'bar'); +OPTIMIZE TABLE t_empty_order_key FINAL; +SELECT * FROM t_empty_order_key ORDER BY c0; +DROP TABLE t_empty_order_key; + +-- Check with forced vertical merge +CREATE TABLE t_empty_order_key(c0 String, c1 String) ENGINE = ReplacingMergeTree() ORDER BY tuple() +SETTINGS min_bytes_for_wide_part = 0, vertical_merge_algorithm_min_bytes_to_activate = 1, vertical_merge_algorithm_min_rows_to_activate = 0, index_granularity = 1, vertical_merge_algorithm_min_columns_to_activate = 1; + +INSERT INTO TABLE t_empty_order_key (c0, c1) VALUES ('foo', 'bar'); +OPTIMIZE TABLE t_empty_order_key FINAL; +SELECT * FROM t_empty_order_key ORDER BY c0; +DROP TABLE t_empty_order_key; diff --git a/parser/testdata/03753_segfault_with_empty_callback/ast.json b/parser/testdata/03753_segfault_with_empty_callback/ast.json new file mode 100644 index 000000000..b29a90733 --- /dev/null +++ b/parser/testdata/03753_segfault_with_empty_callback/ast.json @@ -0,0 +1,25 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "SYSTEM query" + } + ], + + "rows": 1, + + "statistics": + { + "elapsed": 0.001078891, + "rows_read": 1, + "bytes_read": 20 + } +} diff --git a/parser/testdata/03753_segfault_with_empty_callback/metadata.json b/parser/testdata/03753_segfault_with_empty_callback/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03753_segfault_with_empty_callback/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03753_segfault_with_empty_callback/query.sql b/parser/testdata/03753_segfault_with_empty_callback/query.sql new file mode 100644 index 000000000..187b27428 --- /dev/null +++ b/parser/testdata/03753_segfault_with_empty_callback/query.sql @@ -0,0 +1,2 @@ +SYSTEM ENABLE FAILPOINT execute_query_calling_empty_set_result_func_on_exception; +SELECT 1 FROM url('http://localhost:8123/?query=SELECT+1+FROM+t0+FORMAT+JSON', 'JSON', 'c0 Int') tx; -- { serverError RECEIVED_ERROR_FROM_REMOTE_IO_SERVER } diff --git a/parser/testdata/03754_h3_polygon_to_cells_const/ast.json b/parser/testdata/03754_h3_polygon_to_cells_const/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03754_h3_polygon_to_cells_const/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03754_h3_polygon_to_cells_const/metadata.json b/parser/testdata/03754_h3_polygon_to_cells_const/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03754_h3_polygon_to_cells_const/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03754_h3_polygon_to_cells_const/query.sql b/parser/testdata/03754_h3_polygon_to_cells_const/query.sql new file mode 100644 index 000000000..e4ccc9ad0 --- /dev/null +++ b/parser/testdata/03754_h3_polygon_to_cells_const/query.sql @@ -0,0 +1,42 @@ +-- Tags: no-fasttest + +WITH 7 AS resolution, + [(-122.40898669999721, 37.81331899998324), (-122.35447369999936, 37.71980619999785), (-122.4798767000009, 37.815157199999845)] AS ring, + ['872830820ffffff', '872830828ffffff', '87283082affffff', '87283082bffffff', '87283082effffff', '872830870ffffff', '872830876ffffff'] AS reference +SELECT h3PolygonToCells(ring, resolution), arraySort(arrayMap(x -> h3ToString(x), h3PolygonToCells(ring, resolution))) = reference; + +WITH 7 AS resolution, + [(-122.40898669999721, 37.81331899998324), (-122.35447369999936, 37.71980619999785), (-122.4798767000009, 37.815157199999845)] AS ring, + ['872830820ffffff', '872830828ffffff', '87283082affffff', '87283082bffffff', '87283082effffff', '872830870ffffff', '872830876ffffff'] AS reference +SELECT h3PolygonToCells(ring, materialize(resolution)), arraySort(arrayMap(x -> h3ToString(x), h3PolygonToCells(ring, materialize(resolution)))) = reference; + +WITH 7 AS resolution, + [(-122.40898669999721, 37.81331899998324), (-122.35447369999936, 37.71980619999785), (-122.4798767000009, 37.815157199999845)] AS ring, + ['872830820ffffff', '872830828ffffff', '87283082affffff', '87283082bffffff', '87283082effffff', '872830870ffffff', '872830876ffffff'] AS reference +SELECT h3PolygonToCells(materialize(ring), resolution), arraySort(arrayMap(x -> h3ToString(x), h3PolygonToCells(materialize(ring), resolution))) = reference; + +WITH 7 AS resolution, + [(-122.40898669999721, 37.81331899998324), (-122.35447369999936, 37.71980619999785), (-122.4798767000009, 37.815157199999845)] AS ring, + ['872830820ffffff', '872830828ffffff', '87283082affffff', '87283082bffffff', '87283082effffff', '872830870ffffff', '872830876ffffff'] AS reference +SELECT h3PolygonToCells(materialize(ring), materialize(resolution)), arraySort(arrayMap(x -> h3ToString(x), h3PolygonToCells(materialize(ring), materialize(resolution)))) = reference; + +WITH range(0, 8) AS resolutions, + [(-122.40898669999721, 37.81331899998324), (-122.35447369999936, 37.71980619999785), (-122.4798767000009, 37.815157199999845)] AS ring +SELECT resolution, h3PolygonToCells(ring, arrayJoin(resolutions) AS resolution) +ORDER BY resolution; + +DROP TABLE IF EXISTS rings; +CREATE TABLE rings (ring Ring, reference Array(String)) ENGINE = MergeTree ORDER BY tuple(); + +INSERT INTO rings SELECT + [(-122.40898669999721, 37.81331899998324), (-122.35447369999936, 37.71980619999785), (-122.4798767000009, 37.815157199999845)], + ['872830820ffffff', '872830828ffffff', '87283082affffff', '87283082bffffff', '87283082effffff', '872830870ffffff', '872830876ffffff'] +FROM numbers(10000); + +SELECT DISTINCT h3PolygonToCells(ring, 7), arraySort(arrayMap(x -> h3ToString(x), h3PolygonToCells(ring, 7))) = reference FROM rings; + +WITH range(0, 8) AS resolutions +SELECT DISTINCT resolution, h3PolygonToCells(ring, arrayJoin(resolutions) AS resolution) FROM rings +ORDER BY resolution; + +DROP TABLE rings; diff --git a/parser/testdata/03755_circular_dictionary/ast.json b/parser/testdata/03755_circular_dictionary/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03755_circular_dictionary/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03755_circular_dictionary/metadata.json b/parser/testdata/03755_circular_dictionary/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03755_circular_dictionary/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03755_circular_dictionary/query.sql b/parser/testdata/03755_circular_dictionary/query.sql new file mode 100644 index 000000000..45f3f2af2 --- /dev/null +++ b/parser/testdata/03755_circular_dictionary/query.sql @@ -0,0 +1,315 @@ +-- This tests shouldn't deadlock or crash the server + +DROP DICTIONARY IF EXISTS filesystem_dict; +DROP DICTIONARY IF EXISTS kafka_dict; +DROP DICTIONARY IF EXISTS mergetree_dict; +DROP DICTIONARY IF EXISTS ddlworker_dict; +DROP DICTIONARY IF EXISTS storages3_dict; +DROP DICTIONARY IF EXISTS background_dict; +DROP DICTIONARY IF EXISTS temporaryfiles_dict; +DROP DICTIONARY IF EXISTS parts_dict; +DROP DICTIONARY IF EXISTS distrcache_dict; +DROP DICTIONARY IF EXISTS drop_dict; + +CREATE DICTIONARY filesystem_dict +( + `metric` String, + `value` Int64 +) +PRIMARY KEY metric +SOURCE(CLICKHOUSE(QUERY 'SELECT metric, value FROM system.metrics WHERE metric LIKE \'Filesystem%\'')) +LIFETIME(MIN 0 MAX 1000) +LAYOUT(COMPLEX_KEY_HASHED()); + +-- Kafka metrics dictionary +CREATE DICTIONARY kafka_dict +( + `metric` String, + `value` Int64 +) +PRIMARY KEY metric +SOURCE(CLICKHOUSE(QUERY 'SELECT metric, value FROM system.metrics WHERE metric LIKE \'Kafka%\'')) +LIFETIME(MIN 0 MAX 1000) +LAYOUT(COMPLEX_KEY_HASHED()); + +-- MergeTree metrics dictionary +CREATE DICTIONARY mergetree_dict +( + `metric` String, + `value` Int64 +) +PRIMARY KEY metric +SOURCE(CLICKHOUSE(QUERY 'SELECT metric, value FROM system.metrics WHERE metric LIKE \'MergeTree%\'')) +LIFETIME(MIN 0 MAX 1000) +LAYOUT(COMPLEX_KEY_HASHED()); + +-- DDLWorker metrics dictionary +CREATE DICTIONARY ddlworker_dict +( + `metric` String, + `value` Int64 +) +PRIMARY KEY metric +SOURCE(CLICKHOUSE(QUERY 'SELECT metric, value FROM system.metrics WHERE metric LIKE \'DDLWorker%\'')) +LIFETIME(MIN 0 MAX 1000) +LAYOUT(COMPLEX_KEY_HASHED()); + +-- StorageS3 metrics dictionary +CREATE DICTIONARY storages3_dict +( + `metric` String, + `value` Int64 +) +PRIMARY KEY metric +SOURCE(CLICKHOUSE(QUERY 'SELECT metric, value FROM system.metrics WHERE metric LIKE \'StorageS3%\'')) +LIFETIME(MIN 0 MAX 1000) +LAYOUT(COMPLEX_KEY_HASHED()); + +-- Background metrics dictionary +CREATE DICTIONARY background_dict +( + `metric` String, + `value` Int64 +) +PRIMARY KEY metric +SOURCE(CLICKHOUSE(QUERY 'SELECT metric, value FROM system.metrics WHERE metric LIKE \'Background%\'')) +LIFETIME(MIN 0 MAX 1000) +LAYOUT(COMPLEX_KEY_HASHED()); + +-- TemporaryFiles metrics dictionary +CREATE DICTIONARY temporaryfiles_dict +( + `metric` String, + `value` Int64 +) +PRIMARY KEY metric +SOURCE(CLICKHOUSE(QUERY 'SELECT metric, value FROM system.metrics WHERE metric LIKE \'TemporaryFiles%\'')) +LIFETIME(MIN 0 MAX 1000) +LAYOUT(COMPLEX_KEY_HASHED()); + +-- Parts metrics dictionary +CREATE DICTIONARY parts_dict +( + `metric` String, + `value` Int64 +) +PRIMARY KEY metric +SOURCE(CLICKHOUSE(QUERY 'SELECT metric, value FROM system.metrics WHERE metric LIKE \'Parts%\'')) +LIFETIME(MIN 0 MAX 1000) +LAYOUT(COMPLEX_KEY_HASHED()); + +-- DistrCache metrics dictionary +CREATE DICTIONARY distrcache_dict +( + `metric` String, + `value` Int64 +) +PRIMARY KEY metric +SOURCE(CLICKHOUSE(QUERY 'SELECT metric, value FROM system.metrics WHERE metric LIKE \'DistrCache%\'')) +LIFETIME(MIN 0 MAX 1000) +LAYOUT(COMPLEX_KEY_HASHED()); + +-- Drop metrics dictionary +CREATE DICTIONARY drop_dict +( + `metric` String, + `value` Int64 +) +PRIMARY KEY metric +SOURCE(CLICKHOUSE(QUERY 'SELECT metric, value FROM system.metrics WHERE metric LIKE \'Drop%\'')) +LIFETIME(MIN 0 MAX 1000) +LAYOUT(COMPLEX_KEY_HASHED()); + +DROP TABLE IF EXISTS filesystem_metrics; +DROP TABLE IF EXISTS kafka_metrics; +DROP TABLE IF EXISTS mergetree_metrics; +DROP TABLE IF EXISTS ddlworker_metrics; +DROP TABLE IF EXISTS storages3_metrics; +DROP TABLE IF EXISTS background_metrics; +DROP TABLE IF EXISTS temporaryfiles_metrics; +DROP TABLE IF EXISTS parts_metrics; +DROP TABLE IF EXISTS distrcache_metrics; +DROP TABLE IF EXISTS drop_metrics; + +CREATE TABLE background_metrics +( + `metric` String, + `value` Int64, + PROJECTION values + ( + SELECT + metric, + dictGet('background_dict', 'value', metric) AS value + ORDER BY value + ) +) +ENGINE = MergeTree +ORDER BY metric; + +CREATE TABLE ddlworker_metrics +( + `metric` String, + `value` Int64, + PROJECTION values + ( + SELECT + metric, + dictGet('ddlworker_dict', 'value', metric) AS value + ORDER BY value + ) +) +ENGINE = MergeTree +ORDER BY metric; + +CREATE TABLE distrcache_metrics +( + `metric` String, + `value` Int64, + PROJECTION values + ( + SELECT + metric, + dictGet('distrcache_dict', 'value', metric) AS value + ORDER BY value + ) +) +ENGINE = MergeTree +ORDER BY metric; + +CREATE TABLE drop_metrics +( + `metric` String, + `value` Int64, + PROJECTION values + ( + SELECT + metric, + dictGet('drop_dict', 'value', metric) AS value + ORDER BY value + ) +) +ENGINE = MergeTree +ORDER BY metric; + +CREATE TABLE filesystem_metrics +( + `metric` String, + `value` Int64, + PROJECTION values + ( + SELECT + metric, + dictGet('filesystem_dict', 'value', metric) AS value + ORDER BY value + ) +) +ENGINE = MergeTree +ORDER BY metric; + +CREATE TABLE kafka_metrics +( + `metric` String, + `value` Int64, + PROJECTION values + ( + SELECT + metric, + dictGet('kafka_dict', 'value', metric) AS value + ORDER BY value + ) +) +ENGINE = MergeTree +ORDER BY metric; + +CREATE TABLE mergetree_metrics +( + `metric` String, + `value` Int64, + PROJECTION values + ( + SELECT + metric, + dictGet('mergetree_dict', 'value', metric) AS value + ORDER BY value + ) +) +ENGINE = MergeTree +ORDER BY metric; + +CREATE TABLE parts_metrics +( + `metric` String, + `value` Int64, + PROJECTION values + ( + SELECT + metric, + dictGet('parts_dict', 'value', metric) AS value + ORDER BY value + ) +) +ENGINE = MergeTree +ORDER BY metric; + +CREATE TABLE storages3_metrics +( + `metric` String, + `value` Int64, + PROJECTION values + ( + SELECT + metric, + dictGet('storages3_dict', 'value', metric) AS value + ORDER BY value + ) +) +ENGINE = MergeTree +ORDER BY metric; + +CREATE TABLE temporaryfiles_metrics +( + `metric` String, + `value` Int64, + PROJECTION values + ( + SELECT + metric, + dictGet('temporaryfiles_dict', 'value', metric) AS value + ORDER BY value + ) +) +ENGINE = MergeTree +ORDER BY metric; + +INSERT INTO background_metrics SELECT metric, value FROM system.metrics WHERE metric IN (SELECT metric FROM background_dict) ; +INSERT INTO ddlworker_metrics SELECT metric, value FROM system.metrics WHERE metric IN (SELECT metric FROM ddlworker_dict) ; +INSERT INTO distrcache_metrics SELECT metric, value FROM system.metrics WHERE metric IN (SELECT metric FROM distrcache_dict) ; +INSERT INTO drop_metrics SELECT metric, value FROM system.metrics WHERE metric IN (SELECT metric FROM drop_dict) ; +INSERT INTO filesystem_metrics SELECT metric, value FROM system.metrics WHERE metric IN (SELECT metric FROM filesystem_dict) ; +INSERT INTO kafka_metrics SELECT metric, value FROM system.metrics WHERE metric IN (SELECT metric FROM kafka_dict) ; +INSERT INTO mergetree_metrics SELECT metric, value FROM system.metrics WHERE metric IN (SELECT metric FROM mergetree_dict) ; +INSERT INTO parts_metrics SELECT metric, value FROM system.metrics WHERE metric IN (SELECT metric FROM parts_dict) ; +INSERT INTO storages3_metrics SELECT metric, value FROM system.metrics WHERE metric IN (SELECT metric FROM storages3_dict) ; +INSERT INTO temporaryfiles_metrics SELECT metric, value FROM system.metrics WHERE metric IN (SELECT metric FROM temporaryfiles_dict) ; + +DROP TABLE filesystem_metrics; +DROP TABLE kafka_metrics; +DROP TABLE mergetree_metrics; +DROP TABLE ddlworker_metrics; +DROP TABLE storages3_metrics; +DROP TABLE background_metrics; +DROP TABLE temporaryfiles_metrics; +DROP TABLE parts_metrics; +DROP TABLE distrcache_metrics; +DROP TABLE drop_metrics; + +DROP DICTIONARY filesystem_dict; +DROP DICTIONARY kafka_dict; +DROP DICTIONARY mergetree_dict; +DROP DICTIONARY ddlworker_dict; +DROP DICTIONARY storages3_dict; +DROP DICTIONARY background_dict; +DROP DICTIONARY temporaryfiles_dict; +DROP DICTIONARY parts_dict; +DROP DICTIONARY distrcache_dict; +DROP DICTIONARY drop_dict; diff --git a/parser/testdata/03755_enable_sparse_nullable_consistently/ast.json b/parser/testdata/03755_enable_sparse_nullable_consistently/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03755_enable_sparse_nullable_consistently/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03755_enable_sparse_nullable_consistently/metadata.json b/parser/testdata/03755_enable_sparse_nullable_consistently/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03755_enable_sparse_nullable_consistently/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03755_enable_sparse_nullable_consistently/query.sql b/parser/testdata/03755_enable_sparse_nullable_consistently/query.sql new file mode 100644 index 000000000..713a513f5 --- /dev/null +++ b/parser/testdata/03755_enable_sparse_nullable_consistently/query.sql @@ -0,0 +1,15 @@ +-- { echo ON } + +DROP TABLE IF EXISTS t; + +CREATE TABLE t (id UInt64, n Nullable(UInt64), s Nullable(String), t Tuple(a Nullable(String), b Nullable(UInt64))) ENGINE = MergeTree ORDER BY () SETTINGS index_granularity = 10, min_bytes_for_wide_part = 0, ratio_of_defaults_for_sparse_serialization = 0.1, serialization_info_version = 'with_types', nullable_serialization_version = 'basic'; + +INSERT INTO t(id) VALUES (1), (2), (3); + +SELECT + column, + substreams +FROM system.parts_columns +WHERE (database = currentDatabase()) AND (`table` = 't') AND active; + +DROP TABLE t; diff --git a/parser/testdata/03755_final_prewhere_duplicate_columns/ast.json b/parser/testdata/03755_final_prewhere_duplicate_columns/ast.json new file mode 100644 index 000000000..ed56af808 --- /dev/null +++ b/parser/testdata/03755_final_prewhere_duplicate_columns/ast.json @@ -0,0 +1,52 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery 03755_final_prewhere_duplicate_columns (children 3)" + }, + { + "explain": " Identifier 03755_final_prewhere_duplicate_columns" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration c0 (children 1)" + }, + { + "explain": " DataType UInt8" + }, + { + "explain": " Storage definition (children 2)" + }, + { + "explain": " Function AggregatingMergeTree (children 1)" + }, + { + "explain": " ExpressionList" + }, + { + "explain": " Identifier c0" + } + ], + + "rows": 10, + + "statistics": + { + "elapsed": 0.001518885, + "rows_read": 10, + "bytes_read": 416 + } +} diff --git a/parser/testdata/03755_final_prewhere_duplicate_columns/metadata.json b/parser/testdata/03755_final_prewhere_duplicate_columns/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03755_final_prewhere_duplicate_columns/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03755_final_prewhere_duplicate_columns/query.sql b/parser/testdata/03755_final_prewhere_duplicate_columns/query.sql new file mode 100644 index 000000000..ad0d01066 --- /dev/null +++ b/parser/testdata/03755_final_prewhere_duplicate_columns/query.sql @@ -0,0 +1,4 @@ +CREATE TABLE 03755_final_prewhere_duplicate_columns (c0 UInt8) ENGINE = AggregatingMergeTree() ORDER BY (c0); +INSERT INTO TABLE 03755_final_prewhere_duplicate_columns (c0) SELECT 2 FROM numbers(3); +INSERT INTO TABLE 03755_final_prewhere_duplicate_columns (c0) SELECT number FROM numbers(10); +SELECT 03755_final_prewhere_duplicate_columns.c0 FROM 03755_final_prewhere_duplicate_columns FINAL PREWHERE 03755_final_prewhere_duplicate_columns.c0 ORDER BY c0; diff --git a/parser/testdata/03755_nested_recursive_cte/ast.json b/parser/testdata/03755_nested_recursive_cte/ast.json new file mode 100644 index 000000000..e72b987d3 --- /dev/null +++ b/parser/testdata/03755_nested_recursive_cte/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t0 (children 1)" + }, + { + "explain": " Identifier t0" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.00125559, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03755_nested_recursive_cte/metadata.json b/parser/testdata/03755_nested_recursive_cte/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03755_nested_recursive_cte/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03755_nested_recursive_cte/query.sql b/parser/testdata/03755_nested_recursive_cte/query.sql new file mode 100644 index 000000000..d7c09e50c --- /dev/null +++ b/parser/testdata/03755_nested_recursive_cte/query.sql @@ -0,0 +1,21 @@ +DROP TABLE IF EXISTS t0; +CREATE TABLE t0 (x Int32) ENGINE = Memory; +INSERT INTO t0 VALUES (1); + +-- The original problematic query pattern - inner CTE references outer CTE +-- Using count() to get deterministic output regardless of how many rows are produced before hitting the limit +SET max_recursive_cte_evaluation_depth = 5; +SET enable_analyzer = 1; + +SELECT count() > 0 FROM ( + WITH RECURSIVE q AS ( + SELECT 1 FROM t0 UNION ALL + (WITH RECURSIVE x AS + (SELECT 1 FROM t0 UNION ALL + (SELECT 1 FROM q WHERE FALSE UNION ALL + SELECT 1 FROM x WHERE FALSE)) + SELECT 1 FROM x)) + SELECT 1 FROM q +); -- { serverError TOO_DEEP_RECURSION } + +DROP TABLE t0; diff --git a/parser/testdata/03755_pr_join_with_view/ast.json b/parser/testdata/03755_pr_join_with_view/ast.json new file mode 100644 index 000000000..85529700b --- /dev/null +++ b/parser/testdata/03755_pr_join_with_view/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery v (children 1)" + }, + { + "explain": " Identifier v" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.000958523, + "rows_read": 2, + "bytes_read": 54 + } +} diff --git a/parser/testdata/03755_pr_join_with_view/metadata.json b/parser/testdata/03755_pr_join_with_view/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03755_pr_join_with_view/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03755_pr_join_with_view/query.sql b/parser/testdata/03755_pr_join_with_view/query.sql new file mode 100644 index 000000000..832819bcb --- /dev/null +++ b/parser/testdata/03755_pr_join_with_view/query.sql @@ -0,0 +1,40 @@ +drop table if exists v; +drop table if exists t0 sync; +drop table if exists t1 sync; + +create table t0 (k UInt64, v String) engine ReplicatedMergeTree('/clickhouse/{database}/t0', '0') order by tuple(); +create table t1 (k UInt64, v String) engine ReplicatedMergeTree('/clickhouse/{database}/t1', '0') order by tuple(); + +CREATE VIEW v AS SELECT * FROM t0; + +insert into t0 select number, toString(number) from numbers(10); +insert into t1 select number, toString(number + 100) from numbers(10); + +SET enable_parallel_replicas=1, max_parallel_replicas=3, cluster_for_parallel_replicas='test_cluster_one_shard_three_replicas_localhost'; + +-- inner join +(select * from v join t1 using k order by all) +except +(select * from v join t1 using k order by all settings enable_parallel_replicas=0); + +(select v.k, v.v, t1.k, t1.v from v join t1 using k order by all) +except +(select v.k, v.v, t1.k, t1.v from t1 join v using k order by all); + +-- right join +(select * from v right join t1 using k order by all) +except +(select * from v right join t1 using k order by all settings enable_parallel_replicas=0); + +(select v.k, v.v, t1.k, t1.v from v right join t1 using k order by all) +except +(select v.k, v.v, t1.k, t1.v from t1 right join v using k order by all); + +-- left join +(select * from v left join t1 using k order by all) +except +(select * from v left join t1 using k order by all settings enable_parallel_replicas=0); + +(select v.k, v.v, t1.k, t1.v from v left join t1 using k order by all) +except +(select v.k, v.v, t1.k, t1.v from t1 left join v using k order by all); diff --git a/parser/testdata/03756_capn_proto_message_size_limit/ast.json b/parser/testdata/03756_capn_proto_message_size_limit/ast.json new file mode 100644 index 000000000..866cb996d --- /dev/null +++ b/parser/testdata/03756_capn_proto_message_size_limit/ast.json @@ -0,0 +1,55 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery 03756_capn_proto_message_size_limit (children 3)" + }, + { + "explain": " Identifier 03756_capn_proto_message_size_limit" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " ColumnDeclaration c0 (children 1)" + }, + { + "explain": " DataType Decimal32 (children 1)" + }, + { + "explain": " ExpressionList (children 1)" + }, + { + "explain": " Literal UInt64_8" + }, + { + "explain": " Storage definition (children 1)" + }, + { + "explain": " Function Memory (children 1)" + }, + { + "explain": " ExpressionList" + } + ], + + "rows": 11, + + "statistics": + { + "elapsed": 0.001548359, + "rows_read": 11, + "bytes_read": 460 + } +} diff --git a/parser/testdata/03756_capn_proto_message_size_limit/metadata.json b/parser/testdata/03756_capn_proto_message_size_limit/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03756_capn_proto_message_size_limit/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03756_capn_proto_message_size_limit/query.sql b/parser/testdata/03756_capn_proto_message_size_limit/query.sql new file mode 100644 index 000000000..994008b61 --- /dev/null +++ b/parser/testdata/03756_capn_proto_message_size_limit/query.sql @@ -0,0 +1,2 @@ +CREATE TABLE 03756_capn_proto_message_size_limit (c0 Decimal32(8)) ENGINE = Memory(); +INSERT INTO TABLE FUNCTION url('http://localhost:8123/?query=INSERT+INTO+' || currentDatabase() || '.03756_capn_proto_message_size_limit+(c0)+FORMAT+CapnProto', 'BSONEachRow', 'c0 Decimal32(8)') VALUES (toDecimal32('-8.0662922', 8)), (toDecimal32('-1.32114', 8)), (toDecimal32('1.6043432', 8)), (toDecimal32('9.3646', 8)), (toDecimal32('1.20', 8)); -- { serverError 86 } \ No newline at end of file diff --git a/parser/testdata/03756_mongodb_secret_arguments/ast.json b/parser/testdata/03756_mongodb_secret_arguments/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03756_mongodb_secret_arguments/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03756_mongodb_secret_arguments/metadata.json b/parser/testdata/03756_mongodb_secret_arguments/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03756_mongodb_secret_arguments/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03756_mongodb_secret_arguments/query.sql b/parser/testdata/03756_mongodb_secret_arguments/query.sql new file mode 100644 index 000000000..ce7077ab9 --- /dev/null +++ b/parser/testdata/03756_mongodb_secret_arguments/query.sql @@ -0,0 +1,3 @@ +-- Tags: no-fasttest + +SELECT * FROM mongodb(some_named_collection, now()); -- { serverError NUMBER_OF_ARGUMENTS_DOESNT_MATCH } \ No newline at end of file diff --git a/parser/testdata/03756_update_query_formatting/ast.json b/parser/testdata/03756_update_query_formatting/ast.json new file mode 100644 index 000000000..571fd03b1 --- /dev/null +++ b/parser/testdata/03756_update_query_formatting/ast.json @@ -0,0 +1,58 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "CreateQuery 03756_update_query_formatting (children 3)" + }, + { + "explain": " Identifier 03756_update_query_formatting" + }, + { + "explain": " Columns definition (children 1)" + }, + { + "explain": " ExpressionList (children 2)" + }, + { + "explain": " ColumnDeclaration a (children 1)" + }, + { + "explain": " DataType UInt64" + }, + { + "explain": " ColumnDeclaration b (children 1)" + }, + { + "explain": " DataType UInt64" + }, + { + "explain": " Storage definition (children 3)" + }, + { + "explain": " Function MergeTree" + }, + { + "explain": " Identifier a" + }, + { + "explain": " Set" + } + ], + + "rows": 12, + + "statistics": + { + "elapsed": 0.00130149, + "rows_read": 12, + "bytes_read": 431 + } +} diff --git a/parser/testdata/03756_update_query_formatting/metadata.json b/parser/testdata/03756_update_query_formatting/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03756_update_query_formatting/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03756_update_query_formatting/query.sql b/parser/testdata/03756_update_query_formatting/query.sql new file mode 100644 index 000000000..b16cf7247 --- /dev/null +++ b/parser/testdata/03756_update_query_formatting/query.sql @@ -0,0 +1,4 @@ +CREATE TABLE 03756_update_query_formatting (a UInt64, b UInt64) Engine = MergeTree ORDER BY a SETTINGS enable_block_number_column = 1, enable_block_offset_column = 1; +UPDATE 03756_update_query_formatting SET b = 2 WHERE (NOT a)[0] AS a0; -- { clientError SYNTAX_ERROR } +ALTER TABLE 03756_update_query_formatting UPDATE b = 2 WHERE (NOT a)[0] AS a0; -- { clientError SYNTAX_ERROR } +ALTER TABLE 03756_update_query_formatting DELETE WHERE (NOT a)[0] AS a0; -- { clientError SYNTAX_ERROR } \ No newline at end of file diff --git a/parser/testdata/03757_optimize_skip_unused_shards_with_type_cast/ast.json b/parser/testdata/03757_optimize_skip_unused_shards_with_type_cast/ast.json new file mode 100644 index 000000000..97a98b704 --- /dev/null +++ b/parser/testdata/03757_optimize_skip_unused_shards_with_type_cast/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery t1 (children 1)" + }, + { + "explain": " Identifier t1" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001329637, + "rows_read": 2, + "bytes_read": 56 + } +} diff --git a/parser/testdata/03757_optimize_skip_unused_shards_with_type_cast/metadata.json b/parser/testdata/03757_optimize_skip_unused_shards_with_type_cast/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03757_optimize_skip_unused_shards_with_type_cast/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03757_optimize_skip_unused_shards_with_type_cast/query.sql b/parser/testdata/03757_optimize_skip_unused_shards_with_type_cast/query.sql new file mode 100644 index 000000000..eb2b67580 --- /dev/null +++ b/parser/testdata/03757_optimize_skip_unused_shards_with_type_cast/query.sql @@ -0,0 +1,6 @@ +drop table if exists t1; + +create table t1 (Col LowCardinality(String)) engine = MergeTree; +insert into t1 values ('a'), ('b'), ('c'); + +select * from remote('127.{1,2}', currentDatabase(), t1, multiIf(Col = 'a', 0, Col = 'b', 1, -1)) where Col in ('a', 'b') order by all settings optimize_skip_unused_shards=1; diff --git a/parser/testdata/03758_positional_argument_agg_projection/ast.json b/parser/testdata/03758_positional_argument_agg_projection/ast.json new file mode 100644 index 000000000..ab3b759ba --- /dev/null +++ b/parser/testdata/03758_positional_argument_agg_projection/ast.json @@ -0,0 +1,28 @@ +{ + "meta": + [ + { + "name": "explain", + "type": "String" + } + ], + + "data": + [ + { + "explain": "DropQuery test (children 1)" + }, + { + "explain": " Identifier test" + } + ], + + "rows": 2, + + "statistics": + { + "elapsed": 0.001197315, + "rows_read": 2, + "bytes_read": 60 + } +} diff --git a/parser/testdata/03758_positional_argument_agg_projection/metadata.json b/parser/testdata/03758_positional_argument_agg_projection/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03758_positional_argument_agg_projection/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03758_positional_argument_agg_projection/query.sql b/parser/testdata/03758_positional_argument_agg_projection/query.sql new file mode 100644 index 000000000..a3f36b6ad --- /dev/null +++ b/parser/testdata/03758_positional_argument_agg_projection/query.sql @@ -0,0 +1,33 @@ +DROP TABLE IF EXISTS test; + +CREATE TABLE test +( + `a` UInt64, + `b` String +) +ENGINE = MergeTree +ORDER BY a; + +SET enable_positional_arguments_for_projections = 0; + +ALTER TABLE test + ADD PROJECTION test_projection + ( + SELECT + b, + a + GROUP BY 1 + ); -- { serverError NOT_AN_AGGREGATE } + +SET enable_positional_arguments_for_projections = 1; + +ALTER TABLE test + ADD PROJECTION test_projection + ( + SELECT + b, + a + GROUP BY 1, 2 + ); + +DROP TABLE test; diff --git a/parser/testdata/03759_marks_cache_events/ast.json b/parser/testdata/03759_marks_cache_events/ast.json new file mode 100644 index 000000000..490a2e17e --- /dev/null +++ b/parser/testdata/03759_marks_cache_events/ast.json @@ -0,0 +1 @@ +{"error": true} diff --git a/parser/testdata/03759_marks_cache_events/metadata.json b/parser/testdata/03759_marks_cache_events/metadata.json new file mode 100644 index 000000000..ef120d978 --- /dev/null +++ b/parser/testdata/03759_marks_cache_events/metadata.json @@ -0,0 +1 @@ +{"todo": true} diff --git a/parser/testdata/03759_marks_cache_events/query.sql b/parser/testdata/03759_marks_cache_events/query.sql new file mode 100644 index 000000000..01b8f33e2 --- /dev/null +++ b/parser/testdata/03759_marks_cache_events/query.sql @@ -0,0 +1,42 @@ +-- Tags: no-parallel-replicas + +drop table if exists data; +create table data (key Int) engine=MergeTree() order by () settings prewarm_mark_cache=0; + +set load_marks_asynchronously=0; + +insert into data values (1); +-- +-- SELECTs +-- +select * from data format Null settings load_marks_asynchronously=0; +select * from data format Null settings load_marks_asynchronously=0; +-- drop marks cache +detach table data; +attach table data; +select * from data format Null settings load_marks_asynchronously=1; +select * from data format Null settings load_marks_asynchronously=1; + +system flush logs query_log; +select query_kind, Settings['load_marks_asynchronously'] load_marks_asynchronously, ProfileEvents['MarkCacheHits'] hits, ProfileEvents['MarkCacheMisses'] misses + from system.query_log + where current_database = currentDatabase() and query_kind in ('Select', 'Insert') and type != 'QueryStart' + order by event_time_microseconds + format CSVWithNames; + +-- +-- metrics for merges +-- +-- only hits +optimize table data final; +-- drop marks cache to trigger misses +detach table data; +attach table data; +optimize table data final; + +system flush logs part_log; +select part_name, ProfileEvents['MarkCacheHits'] hits, ProfileEvents['MarkCacheMisses'] misses + from system.part_log + where database = currentDatabase() and event_type = 'MergeParts' + order by event_time_microseconds + format CSVWithNames; diff --git a/parser/testdata/accuratecast/explain.txt b/parser/testdata/accuratecast/explain.txt deleted file mode 100644 index d472709a4..000000000 --- a/parser/testdata/accuratecast/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function accurateCast (children 1) - ExpressionList (children 2) - Literal Float64_123.456 - Literal \'Int32\' diff --git a/parser/testdata/accuratecast/query.sql b/parser/testdata/accuratecast/query.sql deleted file mode 100644 index 326212324..000000000 --- a/parser/testdata/accuratecast/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT accurateCast(123.456, 'Int32') diff --git a/parser/testdata/add_column/explain.txt b/parser/testdata/add_column/explain.txt deleted file mode 100644 index eea39e22c..000000000 --- a/parser/testdata/add_column/explain.txt +++ /dev/null @@ -1,6 +0,0 @@ -AlterQuery test_table (children 2) - ExpressionList (children 1) - AlterCommand ADD_COLUMN (children 1) - ColumnDeclaration new_col (children 1) - DataType UInt64 - Identifier test_table diff --git a/parser/testdata/add_column/query.sql b/parser/testdata/add_column/query.sql deleted file mode 100644 index a3db8fa73..000000000 --- a/parser/testdata/add_column/query.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE test_table ADD COLUMN new_col UInt64 diff --git a/parser/testdata/add_column_after/explain.txt b/parser/testdata/add_column_after/explain.txt deleted file mode 100644 index 78b48f765..000000000 --- a/parser/testdata/add_column_after/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -AlterQuery test_table (children 2) - ExpressionList (children 1) - AlterCommand ADD_COLUMN (children 2) - ColumnDeclaration new_col (children 1) - DataType UInt64 - Identifier id - Identifier test_table diff --git a/parser/testdata/add_column_after/query.sql b/parser/testdata/add_column_after/query.sql deleted file mode 100644 index 005533b7e..000000000 --- a/parser/testdata/add_column_after/query.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE test_table ADD COLUMN new_col UInt64 AFTER id diff --git a/parser/testdata/add_column_if_not_exists/explain.txt b/parser/testdata/add_column_if_not_exists/explain.txt deleted file mode 100644 index eea39e22c..000000000 --- a/parser/testdata/add_column_if_not_exists/explain.txt +++ /dev/null @@ -1,6 +0,0 @@ -AlterQuery test_table (children 2) - ExpressionList (children 1) - AlterCommand ADD_COLUMN (children 1) - ColumnDeclaration new_col (children 1) - DataType UInt64 - Identifier test_table diff --git a/parser/testdata/add_column_if_not_exists/query.sql b/parser/testdata/add_column_if_not_exists/query.sql deleted file mode 100644 index eb65b909f..000000000 --- a/parser/testdata/add_column_if_not_exists/query.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE test_table ADD COLUMN IF NOT EXISTS new_col UInt64 diff --git a/parser/testdata/add_constraint/explain.txt b/parser/testdata/add_constraint/explain.txt deleted file mode 100644 index 7cc39c0ab..000000000 --- a/parser/testdata/add_constraint/explain.txt +++ /dev/null @@ -1,9 +0,0 @@ -AlterQuery test_table (children 2) - ExpressionList (children 1) - AlterCommand ADD_CONSTRAINT (children 1) - Constraint (children 1) - Function greater (children 1) - ExpressionList (children 2) - Identifier col - Literal UInt64_0 - Identifier test_table diff --git a/parser/testdata/add_constraint/query.sql b/parser/testdata/add_constraint/query.sql deleted file mode 100644 index e4e8da8ef..000000000 --- a/parser/testdata/add_constraint/query.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE test_table ADD CONSTRAINT c CHECK col > 0 diff --git a/parser/testdata/add_index/explain.txt b/parser/testdata/add_index/explain.txt deleted file mode 100644 index c018dd696..000000000 --- a/parser/testdata/add_index/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -AlterQuery test_table (children 2) - ExpressionList (children 1) - AlterCommand ADD_INDEX (children 1) - Index (children 2) - Identifier col - Function minmax (children 1) - ExpressionList - Identifier test_table diff --git a/parser/testdata/add_index/query.sql b/parser/testdata/add_index/query.sql deleted file mode 100644 index 1cde3c91e..000000000 --- a/parser/testdata/add_index/query.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE test_table ADD INDEX idx (col) TYPE minmax GRANULARITY 4 diff --git a/parser/testdata/adddays/explain.txt b/parser/testdata/adddays/explain.txt deleted file mode 100644 index b0f87793c..000000000 --- a/parser/testdata/adddays/explain.txt +++ /dev/null @@ -1,10 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function addDays (children 1) - ExpressionList (children 2) - Function toDate (children 1) - ExpressionList (children 1) - Literal \'2023-01-01\' - Literal UInt64_5 diff --git a/parser/testdata/adddays/query.sql b/parser/testdata/adddays/query.sql deleted file mode 100644 index b1867323b..000000000 --- a/parser/testdata/adddays/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT addDays(toDate('2023-01-01'), 5) diff --git a/parser/testdata/addition/explain.txt b/parser/testdata/addition/explain.txt deleted file mode 100644 index 0c644b4cc..000000000 --- a/parser/testdata/addition/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function plus (children 1) - ExpressionList (children 2) - Literal UInt64_1 - Literal UInt64_2 diff --git a/parser/testdata/addition/query.sql b/parser/testdata/addition/query.sql deleted file mode 100644 index 39517a1a8..000000000 --- a/parser/testdata/addition/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT 1 + 2 diff --git a/parser/testdata/aggregate_array_combinator/explain.txt b/parser/testdata/aggregate_array_combinator/explain.txt deleted file mode 100644 index 6c4073edd..000000000 --- a/parser/testdata/aggregate_array_combinator/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function sumArray (children 1) - ExpressionList (children 1) - Literal Array_[UInt64_1, UInt64_2, UInt64_3] diff --git a/parser/testdata/aggregate_array_combinator/query.sql b/parser/testdata/aggregate_array_combinator/query.sql deleted file mode 100644 index 3ac6cd5fe..000000000 --- a/parser/testdata/aggregate_array_combinator/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT sumArray([1, 2, 3]) diff --git a/parser/testdata/aggregate_merge_combinator/explain.txt b/parser/testdata/aggregate_merge_combinator/explain.txt deleted file mode 100644 index 3f6e571a3..000000000 --- a/parser/testdata/aggregate_merge_combinator/explain.txt +++ /dev/null @@ -1,11 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Function sumMerge (children 1) - ExpressionList (children 1) - Identifier sum_state - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier states_table diff --git a/parser/testdata/aggregate_merge_combinator/query.sql b/parser/testdata/aggregate_merge_combinator/query.sql deleted file mode 100644 index d8de99bd0..000000000 --- a/parser/testdata/aggregate_merge_combinator/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT sumMerge(sum_state) FROM states_table diff --git a/parser/testdata/aggregate_state_combinator/explain.txt b/parser/testdata/aggregate_state_combinator/explain.txt deleted file mode 100644 index 6b3495f07..000000000 --- a/parser/testdata/aggregate_state_combinator/explain.txt +++ /dev/null @@ -1,13 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Function sumState (children 1) - ExpressionList (children 1) - Identifier number - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (children 1) - ExpressionList (children 1) - Literal UInt64_10 diff --git a/parser/testdata/aggregate_state_combinator/query.sql b/parser/testdata/aggregate_state_combinator/query.sql deleted file mode 100644 index 4af1692cd..000000000 --- a/parser/testdata/aggregate_state_combinator/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT sumState(number) FROM numbers(10) diff --git a/parser/testdata/all_join/explain.txt b/parser/testdata/all_join/explain.txt deleted file mode 100644 index ae57e89c2..000000000 --- a/parser/testdata/all_join/explain.txt +++ /dev/null @@ -1,21 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Asterisk - TablesInSelectQuery (children 2) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (alias a) (children 1) - ExpressionList (children 1) - Literal UInt64_5 - TablesInSelectQueryElement (children 2) - TableExpression (children 1) - Function numbers (alias b) (children 1) - ExpressionList (children 1) - Literal UInt64_5 - TableJoin (children 1) - Function equals (children 1) - ExpressionList (children 2) - Identifier a.number - Identifier b.number diff --git a/parser/testdata/all_join/query.sql b/parser/testdata/all_join/query.sql deleted file mode 100644 index b07472b01..000000000 --- a/parser/testdata/all_join/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT * FROM numbers(5) AS a ALL JOIN numbers(5) AS b ON a.number = b.number diff --git a/parser/testdata/alter_add_column/explain.txt b/parser/testdata/alter_add_column/explain.txt deleted file mode 100644 index 16e8d3fdc..000000000 --- a/parser/testdata/alter_add_column/explain.txt +++ /dev/null @@ -1,6 +0,0 @@ -AlterQuery test (children 2) - ExpressionList (children 1) - AlterCommand ADD_COLUMN (children 1) - ColumnDeclaration age (children 1) - DataType UInt32 - Identifier test diff --git a/parser/testdata/alter_add_column/query.sql b/parser/testdata/alter_add_column/query.sql deleted file mode 100644 index e9d71cb74..000000000 --- a/parser/testdata/alter_add_column/query.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE test ADD COLUMN age UInt32 diff --git a/parser/testdata/alter_drop_column/explain.txt b/parser/testdata/alter_drop_column/explain.txt deleted file mode 100644 index 895b4ecd5..000000000 --- a/parser/testdata/alter_drop_column/explain.txt +++ /dev/null @@ -1,5 +0,0 @@ -AlterQuery test (children 2) - ExpressionList (children 1) - AlterCommand DROP_COLUMN (children 1) - Identifier age - Identifier test diff --git a/parser/testdata/alter_drop_column/query.sql b/parser/testdata/alter_drop_column/query.sql deleted file mode 100644 index 2ffc7afcc..000000000 --- a/parser/testdata/alter_drop_column/query.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE test DROP COLUMN age diff --git a/parser/testdata/anti_join/explain.txt b/parser/testdata/anti_join/explain.txt deleted file mode 100644 index ae57e89c2..000000000 --- a/parser/testdata/anti_join/explain.txt +++ /dev/null @@ -1,21 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Asterisk - TablesInSelectQuery (children 2) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (alias a) (children 1) - ExpressionList (children 1) - Literal UInt64_5 - TablesInSelectQueryElement (children 2) - TableExpression (children 1) - Function numbers (alias b) (children 1) - ExpressionList (children 1) - Literal UInt64_5 - TableJoin (children 1) - Function equals (children 1) - ExpressionList (children 2) - Identifier a.number - Identifier b.number diff --git a/parser/testdata/anti_join/query.sql b/parser/testdata/anti_join/query.sql deleted file mode 100644 index e24d18116..000000000 --- a/parser/testdata/anti_join/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT * FROM numbers(5) AS a ANTI JOIN numbers(5) AS b ON a.number = b.number diff --git a/parser/testdata/any_join/explain.txt b/parser/testdata/any_join/explain.txt deleted file mode 100644 index ae57e89c2..000000000 --- a/parser/testdata/any_join/explain.txt +++ /dev/null @@ -1,21 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Asterisk - TablesInSelectQuery (children 2) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (alias a) (children 1) - ExpressionList (children 1) - Literal UInt64_5 - TablesInSelectQueryElement (children 2) - TableExpression (children 1) - Function numbers (alias b) (children 1) - ExpressionList (children 1) - Literal UInt64_5 - TableJoin (children 1) - Function equals (children 1) - ExpressionList (children 2) - Identifier a.number - Identifier b.number diff --git a/parser/testdata/any_join/query.sql b/parser/testdata/any_join/query.sql deleted file mode 100644 index 951577542..000000000 --- a/parser/testdata/any_join/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT * FROM numbers(5) AS a ANY JOIN numbers(5) AS b ON a.number = b.number diff --git a/parser/testdata/arithmetic/explain.txt b/parser/testdata/arithmetic/explain.txt deleted file mode 100644 index ec26ab304..000000000 --- a/parser/testdata/arithmetic/explain.txt +++ /dev/null @@ -1,11 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function plus (children 1) - ExpressionList (children 2) - Literal UInt64_1 - Function multiply (children 1) - ExpressionList (children 2) - Literal UInt64_2 - Literal UInt64_3 diff --git a/parser/testdata/arithmetic/query.sql b/parser/testdata/arithmetic/query.sql deleted file mode 100644 index 33cbb5157..000000000 --- a/parser/testdata/arithmetic/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT 1 + 2 * 3 diff --git a/parser/testdata/array_constructor/explain.txt b/parser/testdata/array_constructor/explain.txt deleted file mode 100644 index eb99630e0..000000000 --- a/parser/testdata/array_constructor/explain.txt +++ /dev/null @@ -1,9 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function array (children 1) - ExpressionList (children 3) - Literal UInt64_1 - Literal UInt64_2 - Literal UInt64_3 diff --git a/parser/testdata/array_constructor/query.sql b/parser/testdata/array_constructor/query.sql deleted file mode 100644 index ed0688434..000000000 --- a/parser/testdata/array_constructor/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT array(1, 2, 3) diff --git a/parser/testdata/array_join_basic/explain.txt b/parser/testdata/array_join_basic/explain.txt deleted file mode 100644 index 56eb163a2..000000000 --- a/parser/testdata/array_join_basic/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function arrayJoin (children 1) - ExpressionList (children 1) - Literal Array_[\'Hello\', \'Goodbye\'] diff --git a/parser/testdata/array_join_basic/metadata.json b/parser/testdata/array_join_basic/metadata.json deleted file mode 100644 index 04010ce9a..000000000 --- a/parser/testdata/array_join_basic/metadata.json +++ /dev/null @@ -1 +0,0 @@ -{"source": "https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00008_array_join.sql"} diff --git a/parser/testdata/array_join_nested/explain.txt b/parser/testdata/array_join_nested/explain.txt deleted file mode 100644 index d1c5b9967..000000000 --- a/parser/testdata/array_join_nested/explain.txt +++ /dev/null @@ -1,10 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Function arrayJoin (alias x) (children 1) - ExpressionList (children 1) - Literal Array_[Array_[UInt64_3, UInt64_4, UInt64_5], Array_[UInt64_6, UInt64_7], Array_[UInt64_2], Array_[UInt64_1, UInt64_1]] - ExpressionList (children 1) - OrderByElement (children 1) - Identifier x diff --git a/parser/testdata/array_join_nested/metadata.json b/parser/testdata/array_join_nested/metadata.json deleted file mode 100644 index ef8a62da3..000000000 --- a/parser/testdata/array_join_nested/metadata.json +++ /dev/null @@ -1 +0,0 @@ -{"source": "https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00020_sorting_arrays.sql"} diff --git a/parser/testdata/array_join_stmt/explain.txt b/parser/testdata/array_join_stmt/explain.txt deleted file mode 100644 index ab679028b..000000000 --- a/parser/testdata/array_join_stmt/explain.txt +++ /dev/null @@ -1,14 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 2) - Identifier s - Identifier arr - TablesInSelectQuery (children 2) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier arrays_test - TablesInSelectQueryElement (children 1) - ArrayJoin (children 1) - ExpressionList (children 1) - Identifier arr diff --git a/parser/testdata/array_join_stmt/metadata.json b/parser/testdata/array_join_stmt/metadata.json deleted file mode 100644 index cdfa105d9..000000000 --- a/parser/testdata/array_join_stmt/metadata.json +++ /dev/null @@ -1 +0,0 @@ -{"todo": true, "source": "https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00013_create_table_with_arrays.sql"} diff --git a/parser/testdata/array_join_stmt/query.sql b/parser/testdata/array_join_stmt/query.sql deleted file mode 100644 index 238f88820..000000000 --- a/parser/testdata/array_join_stmt/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT s, arr FROM arrays_test ARRAY JOIN arr diff --git a/parser/testdata/array_join_with_alias/explain.txt b/parser/testdata/array_join_with_alias/explain.txt deleted file mode 100644 index 8607ddb1b..000000000 --- a/parser/testdata/array_join_with_alias/explain.txt +++ /dev/null @@ -1,15 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 3) - Identifier s - Identifier arr - Identifier a - TablesInSelectQuery (children 2) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier arrays_test - TablesInSelectQueryElement (children 1) - ArrayJoin (children 1) - ExpressionList (children 1) - Identifier arr (alias a) diff --git a/parser/testdata/array_join_with_alias/metadata.json b/parser/testdata/array_join_with_alias/metadata.json deleted file mode 100644 index cdfa105d9..000000000 --- a/parser/testdata/array_join_with_alias/metadata.json +++ /dev/null @@ -1 +0,0 @@ -{"todo": true, "source": "https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00013_create_table_with_arrays.sql"} diff --git a/parser/testdata/array_join_with_alias/query.sql b/parser/testdata/array_join_with_alias/query.sql deleted file mode 100644 index a523fa0f2..000000000 --- a/parser/testdata/array_join_with_alias/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT s, arr, a FROM arrays_test ARRAY JOIN arr AS a diff --git a/parser/testdata/array_join_with_enumerate/explain.txt b/parser/testdata/array_join_with_enumerate/explain.txt deleted file mode 100644 index fec6aa19d..000000000 --- a/parser/testdata/array_join_with_enumerate/explain.txt +++ /dev/null @@ -1,19 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 4) - Identifier s - Identifier arr - Identifier a - Identifier num - TablesInSelectQuery (children 2) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier arrays_test - TablesInSelectQueryElement (children 1) - ArrayJoin (children 1) - ExpressionList (children 2) - Identifier arr (alias a) - Function arrayEnumerate (alias num) (children 1) - ExpressionList (children 1) - Identifier arr diff --git a/parser/testdata/array_join_with_enumerate/metadata.json b/parser/testdata/array_join_with_enumerate/metadata.json deleted file mode 100644 index cdfa105d9..000000000 --- a/parser/testdata/array_join_with_enumerate/metadata.json +++ /dev/null @@ -1 +0,0 @@ -{"todo": true, "source": "https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00013_create_table_with_arrays.sql"} diff --git a/parser/testdata/array_join_with_enumerate/query.sql b/parser/testdata/array_join_with_enumerate/query.sql deleted file mode 100644 index 878f0952e..000000000 --- a/parser/testdata/array_join_with_enumerate/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT s, arr, a, num FROM arrays_test ARRAY JOIN arr AS a, arrayEnumerate(arr) AS num diff --git a/parser/testdata/array_join_with_map/explain.txt b/parser/testdata/array_join_with_map/explain.txt deleted file mode 100644 index 12d310e73..000000000 --- a/parser/testdata/array_join_with_map/explain.txt +++ /dev/null @@ -1,28 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 4) - Identifier s - Identifier arr - Identifier a - Identifier mapped - TablesInSelectQuery (children 2) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier arrays_test - TablesInSelectQueryElement (children 1) - ArrayJoin (children 1) - ExpressionList (children 2) - Identifier arr (alias a) - Function arrayMap (alias mapped) (children 1) - ExpressionList (children 2) - Function lambda (children 1) - ExpressionList (children 2) - Function tuple (children 1) - ExpressionList (children 1) - Identifier x - Function plus (children 1) - ExpressionList (children 2) - Identifier x - Literal UInt64_1 - Identifier arr diff --git a/parser/testdata/array_join_with_map/metadata.json b/parser/testdata/array_join_with_map/metadata.json deleted file mode 100644 index cdfa105d9..000000000 --- a/parser/testdata/array_join_with_map/metadata.json +++ /dev/null @@ -1 +0,0 @@ -{"todo": true, "source": "https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00013_create_table_with_arrays.sql"} diff --git a/parser/testdata/array_join_with_map/query.sql b/parser/testdata/array_join_with_map/query.sql deleted file mode 100644 index 010faea3d..000000000 --- a/parser/testdata/array_join_with_map/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT s, arr, a, mapped FROM arrays_test ARRAY JOIN arr AS a, arrayMap(x -> x + 1, arr) AS mapped diff --git a/parser/testdata/array_literal/explain.txt b/parser/testdata/array_literal/explain.txt deleted file mode 100644 index b5249a10e..000000000 --- a/parser/testdata/array_literal/explain.txt +++ /dev/null @@ -1,5 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Literal Array_[UInt64_1, UInt64_2, UInt64_3] diff --git a/parser/testdata/array_literal/query.sql b/parser/testdata/array_literal/query.sql deleted file mode 100644 index a6cbb72f7..000000000 --- a/parser/testdata/array_literal/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT [1, 2, 3] diff --git a/parser/testdata/array_literal_hello_goodbye/explain.txt b/parser/testdata/array_literal_hello_goodbye/explain.txt deleted file mode 100644 index c2b7fb37e..000000000 --- a/parser/testdata/array_literal_hello_goodbye/explain.txt +++ /dev/null @@ -1,5 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Literal Array_[\'Hello\', \'Goodbye\'] diff --git a/parser/testdata/array_literal_hello_goodbye/metadata.json b/parser/testdata/array_literal_hello_goodbye/metadata.json deleted file mode 100644 index b155a6a59..000000000 --- a/parser/testdata/array_literal_hello_goodbye/metadata.json +++ /dev/null @@ -1 +0,0 @@ -{"source": "https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00007_array.sql"} diff --git a/parser/testdata/array_literal_hello_goodbye/query.sql b/parser/testdata/array_literal_hello_goodbye/query.sql deleted file mode 100644 index 7c1f27f19..000000000 --- a/parser/testdata/array_literal_hello_goodbye/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT ['Hello', 'Goodbye'] diff --git a/parser/testdata/array_subscript/explain.txt b/parser/testdata/array_subscript/explain.txt deleted file mode 100644 index f882498fa..000000000 --- a/parser/testdata/array_subscript/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function arrayElement (children 1) - ExpressionList (children 2) - Literal Array_[UInt64_1, UInt64_2, UInt64_3] - Literal UInt64_1 diff --git a/parser/testdata/array_subscript/query.sql b/parser/testdata/array_subscript/query.sql deleted file mode 100644 index a4c7c1483..000000000 --- a/parser/testdata/array_subscript/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT [1, 2, 3][1] diff --git a/parser/testdata/arrayall_lambda/explain.txt b/parser/testdata/arrayall_lambda/explain.txt deleted file mode 100644 index c2ce6b905..000000000 --- a/parser/testdata/arrayall_lambda/explain.txt +++ /dev/null @@ -1,16 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function arrayAll (children 1) - ExpressionList (children 2) - Function lambda (children 1) - ExpressionList (children 2) - Function tuple (children 1) - ExpressionList (children 1) - Identifier x - Function greater (children 1) - ExpressionList (children 2) - Identifier x - Literal UInt64_0 - Literal Array_[UInt64_1, UInt64_2, UInt64_3] diff --git a/parser/testdata/arrayall_lambda/query.sql b/parser/testdata/arrayall_lambda/query.sql deleted file mode 100644 index b4f89a1b9..000000000 --- a/parser/testdata/arrayall_lambda/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT arrayAll(x -> x > 0, [1, 2, 3]) diff --git a/parser/testdata/arraycompact/explain.txt b/parser/testdata/arraycompact/explain.txt deleted file mode 100644 index fb3a2af01..000000000 --- a/parser/testdata/arraycompact/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function arrayCompact (children 1) - ExpressionList (children 1) - Literal Array_[UInt64_1, UInt64_1, UInt64_2, UInt64_2, UInt64_3, UInt64_3] diff --git a/parser/testdata/arraycompact/query.sql b/parser/testdata/arraycompact/query.sql deleted file mode 100644 index af9b18502..000000000 --- a/parser/testdata/arraycompact/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT arrayCompact([1, 1, 2, 2, 3, 3]) diff --git a/parser/testdata/arrayconcat/explain.txt b/parser/testdata/arrayconcat/explain.txt deleted file mode 100644 index 70fef2855..000000000 --- a/parser/testdata/arrayconcat/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function arrayConcat (children 1) - ExpressionList (children 2) - Literal Array_[UInt64_1, UInt64_2] - Literal Array_[UInt64_3, UInt64_4] diff --git a/parser/testdata/arrayconcat/query.sql b/parser/testdata/arrayconcat/query.sql deleted file mode 100644 index 5d7703d95..000000000 --- a/parser/testdata/arrayconcat/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT arrayConcat([1, 2], [3, 4]) diff --git a/parser/testdata/arraydistinct/explain.txt b/parser/testdata/arraydistinct/explain.txt deleted file mode 100644 index 273c7d662..000000000 --- a/parser/testdata/arraydistinct/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function arrayDistinct (children 1) - ExpressionList (children 1) - Literal Array_[UInt64_1, UInt64_1, UInt64_2, UInt64_2, UInt64_3, UInt64_3] diff --git a/parser/testdata/arraydistinct/query.sql b/parser/testdata/arraydistinct/query.sql deleted file mode 100644 index 623c55cfa..000000000 --- a/parser/testdata/arraydistinct/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT arrayDistinct([1, 1, 2, 2, 3, 3]) diff --git a/parser/testdata/arrayelement/explain.txt b/parser/testdata/arrayelement/explain.txt deleted file mode 100644 index f882498fa..000000000 --- a/parser/testdata/arrayelement/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function arrayElement (children 1) - ExpressionList (children 2) - Literal Array_[UInt64_1, UInt64_2, UInt64_3] - Literal UInt64_1 diff --git a/parser/testdata/arrayelement/query.sql b/parser/testdata/arrayelement/query.sql deleted file mode 100644 index 05056393e..000000000 --- a/parser/testdata/arrayelement/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT arrayElement([1, 2, 3], 1) diff --git a/parser/testdata/arrayenumerate/explain.txt b/parser/testdata/arrayenumerate/explain.txt deleted file mode 100644 index d903db1f6..000000000 --- a/parser/testdata/arrayenumerate/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function arrayEnumerate (children 1) - ExpressionList (children 1) - Literal Array_[UInt64_10, UInt64_20, UInt64_30] diff --git a/parser/testdata/arrayenumerate/query.sql b/parser/testdata/arrayenumerate/query.sql deleted file mode 100644 index b79e2b2f8..000000000 --- a/parser/testdata/arrayenumerate/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT arrayEnumerate([10, 20, 30]) diff --git a/parser/testdata/arrayenumerateuniq/explain.txt b/parser/testdata/arrayenumerateuniq/explain.txt deleted file mode 100644 index 53582af89..000000000 --- a/parser/testdata/arrayenumerateuniq/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function arrayEnumerateUniq (children 1) - ExpressionList (children 1) - Literal Array_[UInt64_10, UInt64_10, UInt64_20, UInt64_20] diff --git a/parser/testdata/arrayenumerateuniq/query.sql b/parser/testdata/arrayenumerateuniq/query.sql deleted file mode 100644 index 129796eb8..000000000 --- a/parser/testdata/arrayenumerateuniq/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT arrayEnumerateUniq([10, 10, 20, 20]) diff --git a/parser/testdata/arrayexists_lambda/explain.txt b/parser/testdata/arrayexists_lambda/explain.txt deleted file mode 100644 index 2a2a5c98a..000000000 --- a/parser/testdata/arrayexists_lambda/explain.txt +++ /dev/null @@ -1,16 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function arrayExists (children 1) - ExpressionList (children 2) - Function lambda (children 1) - ExpressionList (children 2) - Function tuple (children 1) - ExpressionList (children 1) - Identifier x - Function greater (children 1) - ExpressionList (children 2) - Identifier x - Literal UInt64_2 - Literal Array_[UInt64_1, UInt64_2, UInt64_3] diff --git a/parser/testdata/arrayexists_lambda/query.sql b/parser/testdata/arrayexists_lambda/query.sql deleted file mode 100644 index 9d9883b37..000000000 --- a/parser/testdata/arrayexists_lambda/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT arrayExists(x -> x > 2, [1, 2, 3]) diff --git a/parser/testdata/arrayexists_lambda_position/explain.txt b/parser/testdata/arrayexists_lambda_position/explain.txt deleted file mode 100644 index d7a44211d..000000000 --- a/parser/testdata/arrayexists_lambda_position/explain.txt +++ /dev/null @@ -1,19 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function arrayExists (children 1) - ExpressionList (children 2) - Function lambda (children 1) - ExpressionList (children 2) - Function tuple (children 1) - ExpressionList (children 1) - Identifier x - Function greater (children 1) - ExpressionList (children 2) - Function position (children 1) - ExpressionList (children 2) - Identifier x - Literal \'a\' - Literal UInt64_0 - Literal Array_[\'a\'] diff --git a/parser/testdata/arrayexists_lambda_position/metadata.json b/parser/testdata/arrayexists_lambda_position/metadata.json deleted file mode 100644 index e7bca5caa..000000000 --- a/parser/testdata/arrayexists_lambda_position/metadata.json +++ /dev/null @@ -1 +0,0 @@ -{"todo": true, "source": "https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00022_func_higher_order_and_constants.sql"} diff --git a/parser/testdata/arrayexists_lambda_position/query.sql b/parser/testdata/arrayexists_lambda_position/query.sql deleted file mode 100644 index 797d6f3d4..000000000 --- a/parser/testdata/arrayexists_lambda_position/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT arrayExists(x -> position(x, 'a') > 0, ['a']) diff --git a/parser/testdata/arrayfilter_lambda/explain.txt b/parser/testdata/arrayfilter_lambda/explain.txt deleted file mode 100644 index d338faa2b..000000000 --- a/parser/testdata/arrayfilter_lambda/explain.txt +++ /dev/null @@ -1,16 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function arrayFilter (children 1) - ExpressionList (children 2) - Function lambda (children 1) - ExpressionList (children 2) - Function tuple (children 1) - ExpressionList (children 1) - Identifier x - Function greater (children 1) - ExpressionList (children 2) - Identifier x - Literal UInt64_1 - Literal Array_[UInt64_1, UInt64_2, UInt64_3] diff --git a/parser/testdata/arrayfilter_lambda/query.sql b/parser/testdata/arrayfilter_lambda/query.sql deleted file mode 100644 index 84c7e7787..000000000 --- a/parser/testdata/arrayfilter_lambda/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT arrayFilter(x -> x > 1, [1, 2, 3]) diff --git a/parser/testdata/arrayfirst_lambda/explain.txt b/parser/testdata/arrayfirst_lambda/explain.txt deleted file mode 100644 index 6188a826a..000000000 --- a/parser/testdata/arrayfirst_lambda/explain.txt +++ /dev/null @@ -1,16 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function arrayFirst (children 1) - ExpressionList (children 2) - Function lambda (children 1) - ExpressionList (children 2) - Function tuple (children 1) - ExpressionList (children 1) - Identifier x - Function greater (children 1) - ExpressionList (children 2) - Identifier x - Literal UInt64_1 - Literal Array_[UInt64_1, UInt64_2, UInt64_3] diff --git a/parser/testdata/arrayfirst_lambda/query.sql b/parser/testdata/arrayfirst_lambda/query.sql deleted file mode 100644 index 8bc07fb14..000000000 --- a/parser/testdata/arrayfirst_lambda/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT arrayFirst(x -> x > 1, [1, 2, 3]) diff --git a/parser/testdata/arrayflatten/explain.txt b/parser/testdata/arrayflatten/explain.txt deleted file mode 100644 index 1aa397cf5..000000000 --- a/parser/testdata/arrayflatten/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function arrayFlatten (children 1) - ExpressionList (children 1) - Literal Array_[Array_[UInt64_1, UInt64_2], Array_[UInt64_3, UInt64_4]] diff --git a/parser/testdata/arrayflatten/query.sql b/parser/testdata/arrayflatten/query.sql deleted file mode 100644 index 3cbae03fc..000000000 --- a/parser/testdata/arrayflatten/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT arrayFlatten([[1, 2], [3, 4]]) diff --git a/parser/testdata/arrayjoin/explain.txt b/parser/testdata/arrayjoin/explain.txt deleted file mode 100644 index c22517781..000000000 --- a/parser/testdata/arrayjoin/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function arrayJoin (children 1) - ExpressionList (children 1) - Literal Array_[UInt64_1, UInt64_2, UInt64_3] diff --git a/parser/testdata/arrayjoin/query.sql b/parser/testdata/arrayjoin/query.sql deleted file mode 100644 index f6e4ad7df..000000000 --- a/parser/testdata/arrayjoin/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT arrayJoin([1, 2, 3]) diff --git a/parser/testdata/arraymap_lambda/explain.txt b/parser/testdata/arraymap_lambda/explain.txt deleted file mode 100644 index 751128a17..000000000 --- a/parser/testdata/arraymap_lambda/explain.txt +++ /dev/null @@ -1,16 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function arrayMap (children 1) - ExpressionList (children 2) - Function lambda (children 1) - ExpressionList (children 2) - Function tuple (children 1) - ExpressionList (children 1) - Identifier x - Function plus (children 1) - ExpressionList (children 2) - Identifier x - Literal UInt64_1 - Literal Array_[UInt64_1, UInt64_2, UInt64_3] diff --git a/parser/testdata/arraymap_lambda/query.sql b/parser/testdata/arraymap_lambda/query.sql deleted file mode 100644 index 7ca997ed7..000000000 --- a/parser/testdata/arraymap_lambda/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT arrayMap(x -> x + 1, [1, 2, 3]) diff --git a/parser/testdata/arraypopback/explain.txt b/parser/testdata/arraypopback/explain.txt deleted file mode 100644 index afcb57b5f..000000000 --- a/parser/testdata/arraypopback/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function arrayPopBack (children 1) - ExpressionList (children 1) - Literal Array_[UInt64_1, UInt64_2, UInt64_3] diff --git a/parser/testdata/arraypopback/query.sql b/parser/testdata/arraypopback/query.sql deleted file mode 100644 index d7c731a8a..000000000 --- a/parser/testdata/arraypopback/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT arrayPopBack([1, 2, 3]) diff --git a/parser/testdata/arraypopfront/explain.txt b/parser/testdata/arraypopfront/explain.txt deleted file mode 100644 index 642136fb4..000000000 --- a/parser/testdata/arraypopfront/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function arrayPopFront (children 1) - ExpressionList (children 1) - Literal Array_[UInt64_1, UInt64_2, UInt64_3] diff --git a/parser/testdata/arraypopfront/query.sql b/parser/testdata/arraypopfront/query.sql deleted file mode 100644 index 0ba385d7a..000000000 --- a/parser/testdata/arraypopfront/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT arrayPopFront([1, 2, 3]) diff --git a/parser/testdata/arraypushback/explain.txt b/parser/testdata/arraypushback/explain.txt deleted file mode 100644 index f1621f683..000000000 --- a/parser/testdata/arraypushback/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function arrayPushBack (children 1) - ExpressionList (children 2) - Literal Array_[UInt64_1, UInt64_2] - Literal UInt64_3 diff --git a/parser/testdata/arraypushback/query.sql b/parser/testdata/arraypushback/query.sql deleted file mode 100644 index 72bd3b418..000000000 --- a/parser/testdata/arraypushback/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT arrayPushBack([1, 2], 3) diff --git a/parser/testdata/arraypushfront/explain.txt b/parser/testdata/arraypushfront/explain.txt deleted file mode 100644 index 6a97d46ad..000000000 --- a/parser/testdata/arraypushfront/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function arrayPushFront (children 1) - ExpressionList (children 2) - Literal Array_[UInt64_2, UInt64_3] - Literal UInt64_1 diff --git a/parser/testdata/arraypushfront/query.sql b/parser/testdata/arraypushfront/query.sql deleted file mode 100644 index aee4318e7..000000000 --- a/parser/testdata/arraypushfront/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT arrayPushFront([2, 3], 1) diff --git a/parser/testdata/arrayreverse/explain.txt b/parser/testdata/arrayreverse/explain.txt deleted file mode 100644 index 2444d787b..000000000 --- a/parser/testdata/arrayreverse/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function arrayReverse (children 1) - ExpressionList (children 1) - Literal Array_[UInt64_1, UInt64_2, UInt64_3] diff --git a/parser/testdata/arrayreverse/query.sql b/parser/testdata/arrayreverse/query.sql deleted file mode 100644 index 8f249ea96..000000000 --- a/parser/testdata/arrayreverse/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT arrayReverse([1, 2, 3]) diff --git a/parser/testdata/arrayslice/explain.txt b/parser/testdata/arrayslice/explain.txt deleted file mode 100644 index 9e14d18f9..000000000 --- a/parser/testdata/arrayslice/explain.txt +++ /dev/null @@ -1,9 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function arraySlice (children 1) - ExpressionList (children 3) - Literal Array_[UInt64_1, UInt64_2, UInt64_3, UInt64_4, UInt64_5] - Literal UInt64_2 - Literal UInt64_3 diff --git a/parser/testdata/arrayslice/query.sql b/parser/testdata/arrayslice/query.sql deleted file mode 100644 index b2f6655dd..000000000 --- a/parser/testdata/arrayslice/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT arraySlice([1, 2, 3, 4, 5], 2, 3) diff --git a/parser/testdata/arraysort/explain.txt b/parser/testdata/arraysort/explain.txt deleted file mode 100644 index 15274c1ed..000000000 --- a/parser/testdata/arraysort/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function arraySort (children 1) - ExpressionList (children 1) - Literal Array_[UInt64_3, UInt64_1, UInt64_2] diff --git a/parser/testdata/arraysort/query.sql b/parser/testdata/arraysort/query.sql deleted file mode 100644 index 8b1fe19a3..000000000 --- a/parser/testdata/arraysort/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT arraySort([3, 1, 2]) diff --git a/parser/testdata/arraysplit_lambda/explain.txt b/parser/testdata/arraysplit_lambda/explain.txt deleted file mode 100644 index db09f799e..000000000 --- a/parser/testdata/arraysplit_lambda/explain.txt +++ /dev/null @@ -1,16 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function arraySplit (children 1) - ExpressionList (children 2) - Function lambda (children 1) - ExpressionList (children 2) - Function tuple (children 1) - ExpressionList (children 1) - Identifier x - Function equals (children 1) - ExpressionList (children 2) - Identifier x - Literal UInt64_2 - Literal Array_[UInt64_1, UInt64_2, UInt64_3, UInt64_4] diff --git a/parser/testdata/arraysplit_lambda/query.sql b/parser/testdata/arraysplit_lambda/query.sql deleted file mode 100644 index 0fe6b025e..000000000 --- a/parser/testdata/arraysplit_lambda/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT arraySplit(x -> x = 2, [1, 2, 3, 4]) diff --git a/parser/testdata/arraystringconcat/explain.txt b/parser/testdata/arraystringconcat/explain.txt deleted file mode 100644 index a6bfbd117..000000000 --- a/parser/testdata/arraystringconcat/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function arrayStringConcat (children 1) - ExpressionList (children 2) - Literal Array_[\'a\', \'b\', \'c\'] - Literal \',\' diff --git a/parser/testdata/arraystringconcat/query.sql b/parser/testdata/arraystringconcat/query.sql deleted file mode 100644 index f0aa82f27..000000000 --- a/parser/testdata/arraystringconcat/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT arrayStringConcat(['a', 'b', 'c'], ',') diff --git a/parser/testdata/arrayuniq/explain.txt b/parser/testdata/arrayuniq/explain.txt deleted file mode 100644 index 0c79b452c..000000000 --- a/parser/testdata/arrayuniq/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function arrayUniq (children 1) - ExpressionList (children 1) - Literal Array_[UInt64_1, UInt64_1, UInt64_2, UInt64_2, UInt64_3] diff --git a/parser/testdata/arrayuniq/query.sql b/parser/testdata/arrayuniq/query.sql deleted file mode 100644 index 7a0d6b8a9..000000000 --- a/parser/testdata/arrayuniq/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT arrayUniq([1, 1, 2, 2, 3]) diff --git a/parser/testdata/assumenotnull/explain.txt b/parser/testdata/assumenotnull/explain.txt deleted file mode 100644 index 958790123..000000000 --- a/parser/testdata/assumenotnull/explain.txt +++ /dev/null @@ -1,9 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function assumeNotNull (children 1) - ExpressionList (children 1) - Function toNullable (children 1) - ExpressionList (children 1) - Literal UInt64_1 diff --git a/parser/testdata/assumenotnull/query.sql b/parser/testdata/assumenotnull/query.sql deleted file mode 100644 index ccef8a8eb..000000000 --- a/parser/testdata/assumenotnull/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT assumeNotNull(toNullable(1)) diff --git a/parser/testdata/asterisk_with_except/explain.txt b/parser/testdata/asterisk_with_except/explain.txt deleted file mode 100644 index 4bdf04c81..000000000 --- a/parser/testdata/asterisk_with_except/explain.txt +++ /dev/null @@ -1,12 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Asterisk (children 1) - ColumnsTransformerList (children 1) - ColumnsExceptTransformer (children 1) - Identifier id - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier test_table diff --git a/parser/testdata/asterisk_with_except/query.sql b/parser/testdata/asterisk_with_except/query.sql deleted file mode 100644 index b43845bac..000000000 --- a/parser/testdata/asterisk_with_except/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT * EXCEPT (id) FROM test_table diff --git a/parser/testdata/asterisk_with_replace/explain.txt b/parser/testdata/asterisk_with_replace/explain.txt deleted file mode 100644 index 2dccd98ed..000000000 --- a/parser/testdata/asterisk_with_replace/explain.txt +++ /dev/null @@ -1,16 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Asterisk (children 1) - ColumnsTransformerList (children 1) - ColumnsReplaceTransformer (children 1) - ColumnsReplaceTransformer::Replacement (children 1) - Function plus (children 1) - ExpressionList (children 2) - Identifier id - Literal UInt64_1 - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier test_table diff --git a/parser/testdata/asterisk_with_replace/query.sql b/parser/testdata/asterisk_with_replace/query.sql deleted file mode 100644 index 31070a1bc..000000000 --- a/parser/testdata/asterisk_with_replace/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT * REPLACE (id + 1 AS id) FROM test_table diff --git a/parser/testdata/attach_partition/explain.txt b/parser/testdata/attach_partition/explain.txt deleted file mode 100644 index 4ac10a28b..000000000 --- a/parser/testdata/attach_partition/explain.txt +++ /dev/null @@ -1,6 +0,0 @@ -AlterQuery test_table (children 2) - ExpressionList (children 1) - AlterCommand ATTACH_PARTITION (children 1) - Partition (children 1) - Literal UInt64_202301 - Identifier test_table diff --git a/parser/testdata/attach_partition/query.sql b/parser/testdata/attach_partition/query.sql deleted file mode 100644 index 9fde18d95..000000000 --- a/parser/testdata/attach_partition/query.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE test_table ATTACH PARTITION 202301 diff --git a/parser/testdata/avg/explain.txt b/parser/testdata/avg/explain.txt deleted file mode 100644 index e9e212bb9..000000000 --- a/parser/testdata/avg/explain.txt +++ /dev/null @@ -1,11 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Function avg (children 1) - ExpressionList (children 1) - Identifier price - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier products diff --git a/parser/testdata/avg/query.sql b/parser/testdata/avg/query.sql deleted file mode 100644 index 6cfec37e5..000000000 --- a/parser/testdata/avg/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT avg(price) FROM products diff --git a/parser/testdata/avg_function/explain.txt b/parser/testdata/avg_function/explain.txt deleted file mode 100644 index e8128be3b..000000000 --- a/parser/testdata/avg_function/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function avg (children 1) - ExpressionList (children 1) - Literal UInt64_1 diff --git a/parser/testdata/avg_function/query.sql b/parser/testdata/avg_function/query.sql deleted file mode 100644 index 14b249704..000000000 --- a/parser/testdata/avg_function/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT avg(1) diff --git a/parser/testdata/avgif/explain.txt b/parser/testdata/avgif/explain.txt deleted file mode 100644 index 4f29bd412..000000000 --- a/parser/testdata/avgif/explain.txt +++ /dev/null @@ -1,17 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Function avgIf (children 1) - ExpressionList (children 2) - Identifier number - Function greater (children 1) - ExpressionList (children 2) - Identifier number - Literal UInt64_5 - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (children 1) - ExpressionList (children 1) - Literal UInt64_10 diff --git a/parser/testdata/avgif/query.sql b/parser/testdata/avgif/query.sql deleted file mode 100644 index dfe6ee104..000000000 --- a/parser/testdata/avgif/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT avgIf(number, number > 5) FROM numbers(10) diff --git a/parser/testdata/base64decode/explain.txt b/parser/testdata/base64decode/explain.txt deleted file mode 100644 index 177c266ad..000000000 --- a/parser/testdata/base64decode/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function base64Decode (children 1) - ExpressionList (children 1) - Literal \'aGVsbG8=\' diff --git a/parser/testdata/base64decode/query.sql b/parser/testdata/base64decode/query.sql deleted file mode 100644 index 342720d7b..000000000 --- a/parser/testdata/base64decode/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT base64Decode('aGVsbG8=') diff --git a/parser/testdata/base64encode/explain.txt b/parser/testdata/base64encode/explain.txt deleted file mode 100644 index 0b426f028..000000000 --- a/parser/testdata/base64encode/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function base64Encode (children 1) - ExpressionList (children 1) - Literal \'hello\' diff --git a/parser/testdata/base64encode/query.sql b/parser/testdata/base64encode/query.sql deleted file mode 100644 index 3704dd3d6..000000000 --- a/parser/testdata/base64encode/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT base64Encode('hello') diff --git a/parser/testdata/between/explain.txt b/parser/testdata/between/explain.txt deleted file mode 100644 index 7392ea295..000000000 --- a/parser/testdata/between/explain.txt +++ /dev/null @@ -1,19 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 3) - ExpressionList (children 1) - Asterisk - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier users - Function and (children 1) - ExpressionList (children 2) - Function greaterOrEquals (children 1) - ExpressionList (children 2) - Identifier id - Literal UInt64_1 - Function lessOrEquals (children 1) - ExpressionList (children 2) - Identifier id - Literal UInt64_10 diff --git a/parser/testdata/between/query.sql b/parser/testdata/between/query.sql deleted file mode 100644 index d6236749e..000000000 --- a/parser/testdata/between/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT * FROM users WHERE id BETWEEN 1 AND 10 diff --git a/parser/testdata/bitand/explain.txt b/parser/testdata/bitand/explain.txt deleted file mode 100644 index 440dcc4aa..000000000 --- a/parser/testdata/bitand/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function bitAnd (children 1) - ExpressionList (children 2) - Literal UInt64_1 - Literal UInt64_3 diff --git a/parser/testdata/bitand/query.sql b/parser/testdata/bitand/query.sql deleted file mode 100644 index 8636a370d..000000000 --- a/parser/testdata/bitand/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT bitAnd(1, 3) diff --git a/parser/testdata/bitnot/explain.txt b/parser/testdata/bitnot/explain.txt deleted file mode 100644 index a0367754e..000000000 --- a/parser/testdata/bitnot/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function bitNot (children 1) - ExpressionList (children 1) - Literal UInt64_1 diff --git a/parser/testdata/bitnot/query.sql b/parser/testdata/bitnot/query.sql deleted file mode 100644 index 017da52ab..000000000 --- a/parser/testdata/bitnot/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT bitNot(1) diff --git a/parser/testdata/bitor/explain.txt b/parser/testdata/bitor/explain.txt deleted file mode 100644 index 7245488d9..000000000 --- a/parser/testdata/bitor/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function bitOr (children 1) - ExpressionList (children 2) - Literal UInt64_1 - Literal UInt64_2 diff --git a/parser/testdata/bitor/query.sql b/parser/testdata/bitor/query.sql deleted file mode 100644 index 5b7933512..000000000 --- a/parser/testdata/bitor/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT bitOr(1, 2) diff --git a/parser/testdata/bitrotateleft/explain.txt b/parser/testdata/bitrotateleft/explain.txt deleted file mode 100644 index bc5bf2141..000000000 --- a/parser/testdata/bitrotateleft/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function bitRotateLeft (children 1) - ExpressionList (children 2) - Literal UInt64_1 - Literal UInt64_2 diff --git a/parser/testdata/bitrotateleft/query.sql b/parser/testdata/bitrotateleft/query.sql deleted file mode 100644 index b66b81c3a..000000000 --- a/parser/testdata/bitrotateleft/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT bitRotateLeft(1, 2) diff --git a/parser/testdata/bitrotateright/explain.txt b/parser/testdata/bitrotateright/explain.txt deleted file mode 100644 index 3c7989e99..000000000 --- a/parser/testdata/bitrotateright/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function bitRotateRight (children 1) - ExpressionList (children 2) - Literal UInt64_4 - Literal UInt64_1 diff --git a/parser/testdata/bitrotateright/query.sql b/parser/testdata/bitrotateright/query.sql deleted file mode 100644 index e35e3a8ff..000000000 --- a/parser/testdata/bitrotateright/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT bitRotateRight(4, 1) diff --git a/parser/testdata/bitshiftleft/explain.txt b/parser/testdata/bitshiftleft/explain.txt deleted file mode 100644 index d7b732d9f..000000000 --- a/parser/testdata/bitshiftleft/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function bitShiftLeft (children 1) - ExpressionList (children 2) - Literal UInt64_1 - Literal UInt64_2 diff --git a/parser/testdata/bitshiftleft/query.sql b/parser/testdata/bitshiftleft/query.sql deleted file mode 100644 index 21d1da13f..000000000 --- a/parser/testdata/bitshiftleft/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT bitShiftLeft(1, 2) diff --git a/parser/testdata/bitshiftright/explain.txt b/parser/testdata/bitshiftright/explain.txt deleted file mode 100644 index 9e9159396..000000000 --- a/parser/testdata/bitshiftright/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function bitShiftRight (children 1) - ExpressionList (children 2) - Literal UInt64_4 - Literal UInt64_1 diff --git a/parser/testdata/bitshiftright/query.sql b/parser/testdata/bitshiftright/query.sql deleted file mode 100644 index 198331443..000000000 --- a/parser/testdata/bitshiftright/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT bitShiftRight(4, 1) diff --git a/parser/testdata/bittest/explain.txt b/parser/testdata/bittest/explain.txt deleted file mode 100644 index 092b97e83..000000000 --- a/parser/testdata/bittest/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function bitTest (children 1) - ExpressionList (children 2) - Literal UInt64_15 - Literal UInt64_0 diff --git a/parser/testdata/bittest/query.sql b/parser/testdata/bittest/query.sql deleted file mode 100644 index d1f54f8e4..000000000 --- a/parser/testdata/bittest/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT bitTest(15, 0) diff --git a/parser/testdata/bittestall/explain.txt b/parser/testdata/bittestall/explain.txt deleted file mode 100644 index 4bb6f22df..000000000 --- a/parser/testdata/bittestall/explain.txt +++ /dev/null @@ -1,11 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function bitTestAll (children 1) - ExpressionList (children 5) - Literal UInt64_15 - Literal UInt64_0 - Literal UInt64_1 - Literal UInt64_2 - Literal UInt64_3 diff --git a/parser/testdata/bittestall/query.sql b/parser/testdata/bittestall/query.sql deleted file mode 100644 index a43dfbd58..000000000 --- a/parser/testdata/bittestall/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT bitTestAll(15, 0, 1, 2, 3) diff --git a/parser/testdata/bittestany/explain.txt b/parser/testdata/bittestany/explain.txt deleted file mode 100644 index 6a87b3471..000000000 --- a/parser/testdata/bittestany/explain.txt +++ /dev/null @@ -1,9 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function bitTestAny (children 1) - ExpressionList (children 3) - Literal UInt64_15 - Literal UInt64_0 - Literal UInt64_1 diff --git a/parser/testdata/bittestany/query.sql b/parser/testdata/bittestany/query.sql deleted file mode 100644 index c057eaba7..000000000 --- a/parser/testdata/bittestany/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT bitTestAny(15, 0, 1) diff --git a/parser/testdata/bitxor/explain.txt b/parser/testdata/bitxor/explain.txt deleted file mode 100644 index dedcf2c56..000000000 --- a/parser/testdata/bitxor/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function bitXor (children 1) - ExpressionList (children 2) - Literal UInt64_1 - Literal UInt64_3 diff --git a/parser/testdata/bitxor/query.sql b/parser/testdata/bitxor/query.sql deleted file mode 100644 index aafdd2bdf..000000000 --- a/parser/testdata/bitxor/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT bitXor(1, 3) diff --git a/parser/testdata/blocknumber/explain.txt b/parser/testdata/blocknumber/explain.txt deleted file mode 100644 index 53b5a011f..000000000 --- a/parser/testdata/blocknumber/explain.txt +++ /dev/null @@ -1,6 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function blockNumber (children 1) - ExpressionList diff --git a/parser/testdata/blocknumber/query.sql b/parser/testdata/blocknumber/query.sql deleted file mode 100644 index 0edac5fbd..000000000 --- a/parser/testdata/blocknumber/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT blockNumber() diff --git a/parser/testdata/boolean_false/explain.txt b/parser/testdata/boolean_false/explain.txt deleted file mode 100644 index d8b992a36..000000000 --- a/parser/testdata/boolean_false/explain.txt +++ /dev/null @@ -1,5 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Literal Bool_0 diff --git a/parser/testdata/boolean_false/query.sql b/parser/testdata/boolean_false/query.sql deleted file mode 100644 index 29744b66b..000000000 --- a/parser/testdata/boolean_false/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT false diff --git a/parser/testdata/boolean_true/explain.txt b/parser/testdata/boolean_true/explain.txt deleted file mode 100644 index 2ad097df6..000000000 --- a/parser/testdata/boolean_true/explain.txt +++ /dev/null @@ -1,5 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Literal Bool_1 diff --git a/parser/testdata/boolean_true/query.sql b/parser/testdata/boolean_true/query.sql deleted file mode 100644 index 91dbcbdde..000000000 --- a/parser/testdata/boolean_true/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT true diff --git a/parser/testdata/case_expression/explain.txt b/parser/testdata/case_expression/explain.txt deleted file mode 100644 index 871a4aa1b..000000000 --- a/parser/testdata/case_expression/explain.txt +++ /dev/null @@ -1,16 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Function multiIf (children 1) - ExpressionList (children 3) - Function greater (children 1) - ExpressionList (children 2) - Identifier id - Literal UInt64_1 - Literal \'big\' - Literal \'small\' - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier users diff --git a/parser/testdata/case_expression/query.sql b/parser/testdata/case_expression/query.sql deleted file mode 100644 index a1b691ebb..000000000 --- a/parser/testdata/case_expression/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT CASE WHEN id > 1 THEN 'big' ELSE 'small' END FROM users diff --git a/parser/testdata/case_when_else/explain.txt b/parser/testdata/case_when_else/explain.txt deleted file mode 100644 index 974b34614..000000000 --- a/parser/testdata/case_when_else/explain.txt +++ /dev/null @@ -1,12 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function multiIf (children 1) - ExpressionList (children 3) - Function greater (children 1) - ExpressionList (children 2) - Literal UInt64_1 - Literal UInt64_0 - Literal UInt64_1 - Literal UInt64_0 diff --git a/parser/testdata/case_when_else/query.sql b/parser/testdata/case_when_else/query.sql deleted file mode 100644 index 0961795b8..000000000 --- a/parser/testdata/case_when_else/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT CASE WHEN 1 > 0 THEN 1 ELSE 0 END diff --git a/parser/testdata/cast_function/explain.txt b/parser/testdata/cast_function/explain.txt deleted file mode 100644 index a9e31acd1..000000000 --- a/parser/testdata/cast_function/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function CAST (children 1) - ExpressionList (children 2) - Literal UInt64_1 - Literal \'String\' diff --git a/parser/testdata/cast_function/query.sql b/parser/testdata/cast_function/query.sql deleted file mode 100644 index 2d56d5e09..000000000 --- a/parser/testdata/cast_function/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT CAST(1 AS String) diff --git a/parser/testdata/cast_operator/explain.txt b/parser/testdata/cast_operator/explain.txt deleted file mode 100644 index cbf73a434..000000000 --- a/parser/testdata/cast_operator/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function CAST (children 1) - ExpressionList (children 2) - Literal \'1\' - Literal \'Int32\' diff --git a/parser/testdata/cast_operator/query.sql b/parser/testdata/cast_operator/query.sql deleted file mode 100644 index 22aaaf090..000000000 --- a/parser/testdata/cast_operator/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT 1::Int32 diff --git a/parser/testdata/cityhash64/explain.txt b/parser/testdata/cityhash64/explain.txt deleted file mode 100644 index 44ed3bec7..000000000 --- a/parser/testdata/cityhash64/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function cityHash64 (children 1) - ExpressionList (children 1) - Literal \'hello\' diff --git a/parser/testdata/cityhash64/query.sql b/parser/testdata/cityhash64/query.sql deleted file mode 100644 index e431814f5..000000000 --- a/parser/testdata/cityhash64/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT cityHash64('hello') diff --git a/parser/testdata/clear_index/explain.txt b/parser/testdata/clear_index/explain.txt deleted file mode 100644 index bb19729b4..000000000 --- a/parser/testdata/clear_index/explain.txt +++ /dev/null @@ -1,5 +0,0 @@ -AlterQuery test_table (children 2) - ExpressionList (children 1) - AlterCommand DROP_INDEX (children 1) - Identifier idx - Identifier test_table diff --git a/parser/testdata/clear_index/query.sql b/parser/testdata/clear_index/query.sql deleted file mode 100644 index bfa972e75..000000000 --- a/parser/testdata/clear_index/query.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE test_table CLEAR INDEX idx diff --git a/parser/testdata/coalesce/explain.txt b/parser/testdata/coalesce/explain.txt deleted file mode 100644 index c1f8d9633..000000000 --- a/parser/testdata/coalesce/explain.txt +++ /dev/null @@ -1,9 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function coalesce (children 1) - ExpressionList (children 3) - Literal NULL - Literal NULL - Literal UInt64_1 diff --git a/parser/testdata/coalesce/query.sql b/parser/testdata/coalesce/query.sql deleted file mode 100644 index 29643d256..000000000 --- a/parser/testdata/coalesce/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT coalesce(NULL, NULL, 1) diff --git a/parser/testdata/column_alias/explain.txt b/parser/testdata/column_alias/explain.txt deleted file mode 100644 index e64282599..000000000 --- a/parser/testdata/column_alias/explain.txt +++ /dev/null @@ -1,5 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Literal UInt64_1 (alias x) diff --git a/parser/testdata/column_alias/query.sql b/parser/testdata/column_alias/query.sql deleted file mode 100644 index c80c40255..000000000 --- a/parser/testdata/column_alias/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT 1 AS x diff --git a/parser/testdata/columns_matcher/explain.txt b/parser/testdata/columns_matcher/explain.txt deleted file mode 100644 index 0939f342f..000000000 --- a/parser/testdata/columns_matcher/explain.txt +++ /dev/null @@ -1,9 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - ColumnsRegexpMatcher - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier test_table diff --git a/parser/testdata/columns_matcher/query.sql b/parser/testdata/columns_matcher/query.sql deleted file mode 100644 index ec8775f98..000000000 --- a/parser/testdata/columns_matcher/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT COLUMNS('name.*') FROM test_table diff --git a/parser/testdata/comparison/explain.txt b/parser/testdata/comparison/explain.txt deleted file mode 100644 index 1070656e9..000000000 --- a/parser/testdata/comparison/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function less (children 1) - ExpressionList (children 2) - Literal UInt64_1 - Literal UInt64_2 diff --git a/parser/testdata/comparison/query.sql b/parser/testdata/comparison/query.sql deleted file mode 100644 index dd0c7c347..000000000 --- a/parser/testdata/comparison/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT 1 < 2 diff --git a/parser/testdata/complex_join_with_settings/explain.txt b/parser/testdata/complex_join_with_settings/explain.txt deleted file mode 100644 index 446b4c96a..000000000 --- a/parser/testdata/complex_join_with_settings/explain.txt +++ /dev/null @@ -1,60 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 4) - ExpressionList (children 1) - Asterisk - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Subquery (children 1) - SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 3) - ExpressionList (children 4) - Identifier number - Identifier n - Identifier j1 - Identifier j2 - TablesInSelectQuery (children 2) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Subquery (alias js1) (children 1) - SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 2) - Identifier number - Function divide (alias n) (children 1) - ExpressionList (children 2) - Identifier number - Literal UInt64_2 - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier system.numbers - TablesInSelectQueryElement (children 2) - TableExpression (children 1) - Subquery (alias js2) (children 1) - SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 3) - ExpressionList (children 3) - Function divide (alias n) (children 1) - ExpressionList (children 2) - Identifier number - Literal UInt64_3 - Identifier number (alias j1) - Literal \'Hello\' (alias j2) - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier system.numbers - Literal UInt64_10 - TableJoin (children 1) - ExpressionList (children 1) - Identifier n - Literal UInt64_10 - ExpressionList (children 1) - OrderByElement (children 1) - Identifier n - Set diff --git a/parser/testdata/complex_join_with_settings/metadata.json b/parser/testdata/complex_join_with_settings/metadata.json deleted file mode 100644 index 82d7218f2..000000000 --- a/parser/testdata/complex_join_with_settings/metadata.json +++ /dev/null @@ -1 +0,0 @@ -{"todo": true, "source": "https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00057_join_aliases.sql"} diff --git a/parser/testdata/complex_join_with_settings/query.sql b/parser/testdata/complex_join_with_settings/query.sql deleted file mode 100644 index 256a298b9..000000000 --- a/parser/testdata/complex_join_with_settings/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT * FROM (SELECT number, n, j1, j2 FROM (SELECT number, number / 2 AS n FROM system.numbers) js1 ANY LEFT JOIN (SELECT number / 3 AS n, number AS j1, 'Hello' AS j2 FROM system.numbers LIMIT 10) js2 USING n LIMIT 10) ORDER BY n SETTINGS join_algorithm = 'hash' diff --git a/parser/testdata/concat/explain.txt b/parser/testdata/concat/explain.txt deleted file mode 100644 index 76f1d1cf8..000000000 --- a/parser/testdata/concat/explain.txt +++ /dev/null @@ -1,9 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function concat (children 1) - ExpressionList (children 3) - Literal \'hello\' - Literal \' \' - Literal \'world\' diff --git a/parser/testdata/concat/query.sql b/parser/testdata/concat/query.sql deleted file mode 100644 index 020eca651..000000000 --- a/parser/testdata/concat/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT concat('hello', ' ', 'world') diff --git a/parser/testdata/conditional_ternary/explain.txt b/parser/testdata/conditional_ternary/explain.txt deleted file mode 100644 index db8eaadbd..000000000 --- a/parser/testdata/conditional_ternary/explain.txt +++ /dev/null @@ -1,12 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function if (children 1) - ExpressionList (children 3) - Function greater (children 1) - ExpressionList (children 2) - Literal UInt64_1 - Literal UInt64_0 - Literal \'yes\' - Literal \'no\' diff --git a/parser/testdata/conditional_ternary/query.sql b/parser/testdata/conditional_ternary/query.sql deleted file mode 100644 index 8f43876e9..000000000 --- a/parser/testdata/conditional_ternary/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT 1 > 0 ? 'yes' : 'no' diff --git a/parser/testdata/count/explain.txt b/parser/testdata/count/explain.txt deleted file mode 100644 index 667c7f0ac..000000000 --- a/parser/testdata/count/explain.txt +++ /dev/null @@ -1,11 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Function count (children 1) - ExpressionList (children 1) - Asterisk - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier users diff --git a/parser/testdata/count/query.sql b/parser/testdata/count/query.sql deleted file mode 100644 index 0aa4f466e..000000000 --- a/parser/testdata/count/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT count(*) FROM users diff --git a/parser/testdata/count_star/explain.txt b/parser/testdata/count_star/explain.txt deleted file mode 100644 index 1133f3d74..000000000 --- a/parser/testdata/count_star/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function count (children 1) - ExpressionList (children 1) - Asterisk diff --git a/parser/testdata/count_star/query.sql b/parser/testdata/count_star/query.sql deleted file mode 100644 index 9150f7390..000000000 --- a/parser/testdata/count_star/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT count(*) diff --git a/parser/testdata/countif/explain.txt b/parser/testdata/countif/explain.txt deleted file mode 100644 index 4bcb678e2..000000000 --- a/parser/testdata/countif/explain.txt +++ /dev/null @@ -1,16 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Function countIf (children 1) - ExpressionList (children 1) - Function greater (children 1) - ExpressionList (children 2) - Identifier number - Literal UInt64_5 - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (children 1) - ExpressionList (children 1) - Literal UInt64_10 diff --git a/parser/testdata/countif/query.sql b/parser/testdata/countif/query.sql deleted file mode 100644 index 7c7acd964..000000000 --- a/parser/testdata/countif/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT countIf(number > 5) FROM numbers(10) diff --git a/parser/testdata/create_database/explain.txt b/parser/testdata/create_database/explain.txt deleted file mode 100644 index 8280bde71..000000000 --- a/parser/testdata/create_database/explain.txt +++ /dev/null @@ -1,2 +0,0 @@ -CreateQuery test_db (children 1) - Identifier test_db diff --git a/parser/testdata/create_database/query.sql b/parser/testdata/create_database/query.sql deleted file mode 100644 index c6dd1ecac..000000000 --- a/parser/testdata/create_database/query.sql +++ /dev/null @@ -1 +0,0 @@ -CREATE DATABASE test_db diff --git a/parser/testdata/create_database_if_not_exists/explain.txt b/parser/testdata/create_database_if_not_exists/explain.txt deleted file mode 100644 index 8280bde71..000000000 --- a/parser/testdata/create_database_if_not_exists/explain.txt +++ /dev/null @@ -1,2 +0,0 @@ -CreateQuery test_db (children 1) - Identifier test_db diff --git a/parser/testdata/create_database_if_not_exists/query.sql b/parser/testdata/create_database_if_not_exists/query.sql deleted file mode 100644 index dc5f47fb7..000000000 --- a/parser/testdata/create_database_if_not_exists/query.sql +++ /dev/null @@ -1 +0,0 @@ -CREATE DATABASE IF NOT EXISTS test_db diff --git a/parser/testdata/create_database_with_engine/explain.txt b/parser/testdata/create_database_with_engine/explain.txt deleted file mode 100644 index 4941f12f7..000000000 --- a/parser/testdata/create_database_with_engine/explain.txt +++ /dev/null @@ -1,4 +0,0 @@ -CreateQuery test_db (children 2) - Identifier test_db - Storage definition (children 1) - Function Atomic diff --git a/parser/testdata/create_database_with_engine/query.sql b/parser/testdata/create_database_with_engine/query.sql deleted file mode 100644 index eb8bd44ac..000000000 --- a/parser/testdata/create_database_with_engine/query.sql +++ /dev/null @@ -1 +0,0 @@ -CREATE DATABASE test_db ENGINE = Atomic diff --git a/parser/testdata/create_materialized_view/explain.txt b/parser/testdata/create_materialized_view/explain.txt deleted file mode 100644 index 4fbc0d07c..000000000 --- a/parser/testdata/create_materialized_view/explain.txt +++ /dev/null @@ -1,10 +0,0 @@ -CreateQuery test_mv (children 3) - Identifier test_mv - SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Literal UInt64_1 - ViewTargets (children 1) - Storage definition (children 1) - Function Memory diff --git a/parser/testdata/create_materialized_view/query.sql b/parser/testdata/create_materialized_view/query.sql deleted file mode 100644 index 738931b3e..000000000 --- a/parser/testdata/create_materialized_view/query.sql +++ /dev/null @@ -1 +0,0 @@ -CREATE MATERIALIZED VIEW test_mv ENGINE = Memory AS SELECT 1 diff --git a/parser/testdata/create_materialized_view_aggregate/explain.txt b/parser/testdata/create_materialized_view_aggregate/explain.txt deleted file mode 100644 index 74a6d6588..000000000 --- a/parser/testdata/create_materialized_view_aggregate/explain.txt +++ /dev/null @@ -1,31 +0,0 @@ -CreateQuery basic_mv (children 3) - Identifier basic_mv - SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 3) - ExpressionList (children 4) - Identifier CounterID - Identifier StartDate - Function sumState (alias Visits) (children 1) - ExpressionList (children 1) - Identifier Sign - Function uniqState (alias Users) (children 1) - ExpressionList (children 1) - Identifier UserID - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier test.visits - ExpressionList (children 2) - Identifier CounterID - Identifier StartDate - ViewTargets (children 1) - Storage definition (children 1) - Function AggregatingMergeTree (children 1) - ExpressionList (children 3) - Identifier StartDate - Function tuple (children 1) - ExpressionList (children 2) - Identifier CounterID - Identifier StartDate - Literal UInt64_8192 diff --git a/parser/testdata/create_materialized_view_aggregate/metadata.json b/parser/testdata/create_materialized_view_aggregate/metadata.json deleted file mode 100644 index e95042307..000000000 --- a/parser/testdata/create_materialized_view_aggregate/metadata.json +++ /dev/null @@ -1 +0,0 @@ -{"todo": true, "source": "https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00040_aggregating_materialized_view.sql"} diff --git a/parser/testdata/create_materialized_view_aggregate/query.sql b/parser/testdata/create_materialized_view_aggregate/query.sql deleted file mode 100644 index 582763117..000000000 --- a/parser/testdata/create_materialized_view_aggregate/query.sql +++ /dev/null @@ -1 +0,0 @@ -CREATE MATERIALIZED VIEW basic_mv ENGINE = AggregatingMergeTree(StartDate, (CounterID, StartDate), 8192) AS SELECT CounterID, StartDate, sumState(Sign) AS Visits, uniqState(UserID) AS Users FROM test.visits GROUP BY CounterID, StartDate diff --git a/parser/testdata/create_table/explain.txt b/parser/testdata/create_table/explain.txt deleted file mode 100644 index 00865a235..000000000 --- a/parser/testdata/create_table/explain.txt +++ /dev/null @@ -1,12 +0,0 @@ -CreateQuery test (children 3) - Identifier test - Columns definition (children 1) - ExpressionList (children 2) - ColumnDeclaration id (children 1) - DataType UInt64 - ColumnDeclaration name (children 1) - DataType String - Storage definition (children 2) - Function MergeTree (children 1) - ExpressionList - Identifier id diff --git a/parser/testdata/create_table/query.sql b/parser/testdata/create_table/query.sql deleted file mode 100644 index 391bda886..000000000 --- a/parser/testdata/create_table/query.sql +++ /dev/null @@ -1 +0,0 @@ -CREATE TABLE test (id UInt64, name String) ENGINE = MergeTree() ORDER BY id diff --git a/parser/testdata/create_table_as_select/explain.txt b/parser/testdata/create_table_as_select/explain.txt deleted file mode 100644 index 54396af3d..000000000 --- a/parser/testdata/create_table_as_select/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -CreateQuery test_table (children 2) - Identifier test_table - SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Literal UInt64_1 (alias id) diff --git a/parser/testdata/create_table_as_select/query.sql b/parser/testdata/create_table_as_select/query.sql deleted file mode 100644 index 34509618a..000000000 --- a/parser/testdata/create_table_as_select/query.sql +++ /dev/null @@ -1 +0,0 @@ -CREATE TABLE test_table AS SELECT 1 AS id diff --git a/parser/testdata/create_table_if_not_exists/explain.txt b/parser/testdata/create_table_if_not_exists/explain.txt deleted file mode 100644 index 6fb719fb4..000000000 --- a/parser/testdata/create_table_if_not_exists/explain.txt +++ /dev/null @@ -1,10 +0,0 @@ -CreateQuery test (children 3) - Identifier test - Columns definition (children 1) - ExpressionList (children 1) - ColumnDeclaration id (children 1) - DataType UInt64 - Storage definition (children 2) - Function MergeTree (children 1) - ExpressionList - Identifier id diff --git a/parser/testdata/create_table_if_not_exists/query.sql b/parser/testdata/create_table_if_not_exists/query.sql deleted file mode 100644 index b53b83bff..000000000 --- a/parser/testdata/create_table_if_not_exists/query.sql +++ /dev/null @@ -1 +0,0 @@ -CREATE TABLE IF NOT EXISTS test (id UInt64) ENGINE = MergeTree() ORDER BY id diff --git a/parser/testdata/create_table_if_not_exists_ddl/explain.txt b/parser/testdata/create_table_if_not_exists_ddl/explain.txt deleted file mode 100644 index 51550472b..000000000 --- a/parser/testdata/create_table_if_not_exists_ddl/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -CreateQuery test_table (children 3) - Identifier test_table - Columns definition (children 1) - ExpressionList (children 1) - ColumnDeclaration id (children 1) - DataType UInt64 - Storage definition (children 1) - Function Memory diff --git a/parser/testdata/create_table_if_not_exists_ddl/query.sql b/parser/testdata/create_table_if_not_exists_ddl/query.sql deleted file mode 100644 index ab6173baa..000000000 --- a/parser/testdata/create_table_if_not_exists_ddl/query.sql +++ /dev/null @@ -1 +0,0 @@ -CREATE TABLE IF NOT EXISTS test_table (id UInt64) ENGINE = Memory diff --git a/parser/testdata/create_table_memory/explain.txt b/parser/testdata/create_table_memory/explain.txt deleted file mode 100644 index 9317b5544..000000000 --- a/parser/testdata/create_table_memory/explain.txt +++ /dev/null @@ -1,10 +0,0 @@ -CreateQuery test_table (children 3) - Identifier test_table - Columns definition (children 1) - ExpressionList (children 2) - ColumnDeclaration id (children 1) - DataType UInt64 - ColumnDeclaration name (children 1) - DataType String - Storage definition (children 1) - Function Memory diff --git a/parser/testdata/create_table_memory/query.sql b/parser/testdata/create_table_memory/query.sql deleted file mode 100644 index de7b4f676..000000000 --- a/parser/testdata/create_table_memory/query.sql +++ /dev/null @@ -1 +0,0 @@ -CREATE TABLE test_table (id UInt64, name String) ENGINE = Memory diff --git a/parser/testdata/create_table_mergetree/explain.txt b/parser/testdata/create_table_mergetree/explain.txt deleted file mode 100644 index ea75c9968..000000000 --- a/parser/testdata/create_table_mergetree/explain.txt +++ /dev/null @@ -1,10 +0,0 @@ -CreateQuery test_table (children 3) - Identifier test_table - Columns definition (children 1) - ExpressionList (children 1) - ColumnDeclaration id (children 1) - DataType UInt64 - Storage definition (children 2) - Function MergeTree (children 1) - ExpressionList - Identifier id diff --git a/parser/testdata/create_table_mergetree/query.sql b/parser/testdata/create_table_mergetree/query.sql deleted file mode 100644 index 64c9cb3fc..000000000 --- a/parser/testdata/create_table_mergetree/query.sql +++ /dev/null @@ -1 +0,0 @@ -CREATE TABLE test_table (id UInt64) ENGINE = MergeTree() ORDER BY id diff --git a/parser/testdata/create_table_with_codec/explain.txt b/parser/testdata/create_table_with_codec/explain.txt deleted file mode 100644 index 38a88fb5f..000000000 --- a/parser/testdata/create_table_with_codec/explain.txt +++ /dev/null @@ -1,11 +0,0 @@ -CreateQuery test_table (children 3) - Identifier test_table - Columns definition (children 1) - ExpressionList (children 1) - ColumnDeclaration id (children 2) - DataType UInt64 - Function CODEC (children 1) - ExpressionList (children 1) - Function LZ4 - Storage definition (children 1) - Function Memory diff --git a/parser/testdata/create_table_with_codec/query.sql b/parser/testdata/create_table_with_codec/query.sql deleted file mode 100644 index 795608a32..000000000 --- a/parser/testdata/create_table_with_codec/query.sql +++ /dev/null @@ -1 +0,0 @@ -CREATE TABLE test_table (id UInt64 CODEC(LZ4)) ENGINE = Memory diff --git a/parser/testdata/create_table_with_comment/explain.txt b/parser/testdata/create_table_with_comment/explain.txt deleted file mode 100644 index a58a21cd9..000000000 --- a/parser/testdata/create_table_with_comment/explain.txt +++ /dev/null @@ -1,9 +0,0 @@ -CreateQuery test_table (children 3) - Identifier test_table - Columns definition (children 1) - ExpressionList (children 1) - ColumnDeclaration id (children 2) - DataType UInt64 - Literal \'The ID\' - Storage definition (children 1) - Function Memory diff --git a/parser/testdata/create_table_with_comment/query.sql b/parser/testdata/create_table_with_comment/query.sql deleted file mode 100644 index 807946d3a..000000000 --- a/parser/testdata/create_table_with_comment/query.sql +++ /dev/null @@ -1 +0,0 @@ -CREATE TABLE test_table (id UInt64 COMMENT 'The ID') ENGINE = Memory diff --git a/parser/testdata/create_table_with_default/explain.txt b/parser/testdata/create_table_with_default/explain.txt deleted file mode 100644 index 79ad506bc..000000000 --- a/parser/testdata/create_table_with_default/explain.txt +++ /dev/null @@ -1,12 +0,0 @@ -CreateQuery test_table (children 3) - Identifier test_table - Columns definition (children 1) - ExpressionList (children 2) - ColumnDeclaration id (children 2) - DataType UInt64 - Literal UInt64_0 - ColumnDeclaration name (children 2) - DataType String - Literal \'\' - Storage definition (children 1) - Function Memory diff --git a/parser/testdata/create_table_with_default/query.sql b/parser/testdata/create_table_with_default/query.sql deleted file mode 100644 index ab3e5d2c5..000000000 --- a/parser/testdata/create_table_with_default/query.sql +++ /dev/null @@ -1 +0,0 @@ -CREATE TABLE test_table (id UInt64 DEFAULT 0, name String DEFAULT '') ENGINE = Memory diff --git a/parser/testdata/create_table_with_materialized/explain.txt b/parser/testdata/create_table_with_materialized/explain.txt deleted file mode 100644 index 847cb5285..000000000 --- a/parser/testdata/create_table_with_materialized/explain.txt +++ /dev/null @@ -1,9 +0,0 @@ -CreateQuery test_table (children 3) - Identifier test_table - Columns definition (children 1) - ExpressionList (children 1) - ColumnDeclaration id (children 2) - DataType UInt64 - Literal UInt64_0 - Storage definition (children 1) - Function Memory diff --git a/parser/testdata/create_table_with_materialized/query.sql b/parser/testdata/create_table_with_materialized/query.sql deleted file mode 100644 index 03c193410..000000000 --- a/parser/testdata/create_table_with_materialized/query.sql +++ /dev/null @@ -1 +0,0 @@ -CREATE TABLE test_table (id UInt64 MATERIALIZED 0) ENGINE = Memory diff --git a/parser/testdata/create_table_with_partition/explain.txt b/parser/testdata/create_table_with_partition/explain.txt deleted file mode 100644 index ab750e574..000000000 --- a/parser/testdata/create_table_with_partition/explain.txt +++ /dev/null @@ -1,15 +0,0 @@ -CreateQuery test_table (children 3) - Identifier test_table - Columns definition (children 1) - ExpressionList (children 2) - ColumnDeclaration id (children 1) - DataType UInt64 - ColumnDeclaration dt (children 1) - DataType Date - Storage definition (children 3) - Function MergeTree (children 1) - ExpressionList - Function toYYYYMM (children 1) - ExpressionList (children 1) - Identifier dt - Identifier id diff --git a/parser/testdata/create_table_with_partition/query.sql b/parser/testdata/create_table_with_partition/query.sql deleted file mode 100644 index 479ed7d57..000000000 --- a/parser/testdata/create_table_with_partition/query.sql +++ /dev/null @@ -1 +0,0 @@ -CREATE TABLE test_table (id UInt64, dt Date) ENGINE = MergeTree() PARTITION BY toYYYYMM(dt) ORDER BY id diff --git a/parser/testdata/create_table_with_primary_key/explain.txt b/parser/testdata/create_table_with_primary_key/explain.txt deleted file mode 100644 index 9f2a39259..000000000 --- a/parser/testdata/create_table_with_primary_key/explain.txt +++ /dev/null @@ -1,11 +0,0 @@ -CreateQuery test_table (children 3) - Identifier test_table - Columns definition (children 1) - ExpressionList (children 1) - ColumnDeclaration id (children 1) - DataType UInt64 - Storage definition (children 3) - Function MergeTree (children 1) - ExpressionList - Identifier id - Identifier id diff --git a/parser/testdata/create_table_with_primary_key/query.sql b/parser/testdata/create_table_with_primary_key/query.sql deleted file mode 100644 index 175c79d9c..000000000 --- a/parser/testdata/create_table_with_primary_key/query.sql +++ /dev/null @@ -1 +0,0 @@ -CREATE TABLE test_table (id UInt64) ENGINE = MergeTree() ORDER BY id PRIMARY KEY id diff --git a/parser/testdata/create_table_with_settings/explain.txt b/parser/testdata/create_table_with_settings/explain.txt deleted file mode 100644 index cb200ff08..000000000 --- a/parser/testdata/create_table_with_settings/explain.txt +++ /dev/null @@ -1,11 +0,0 @@ -CreateQuery test_table (children 3) - Identifier test_table - Columns definition (children 1) - ExpressionList (children 1) - ColumnDeclaration id (children 1) - DataType UInt64 - Storage definition (children 3) - Function MergeTree (children 1) - ExpressionList - Identifier id - Set diff --git a/parser/testdata/create_table_with_settings/query.sql b/parser/testdata/create_table_with_settings/query.sql deleted file mode 100644 index aefde6f6f..000000000 --- a/parser/testdata/create_table_with_settings/query.sql +++ /dev/null @@ -1 +0,0 @@ -CREATE TABLE test_table (id UInt64) ENGINE = MergeTree() ORDER BY id SETTINGS index_granularity = 8192 diff --git a/parser/testdata/create_view/explain.txt b/parser/testdata/create_view/explain.txt deleted file mode 100644 index d18115c29..000000000 --- a/parser/testdata/create_view/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -CreateQuery test_view (children 2) - Identifier test_view - SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Literal UInt64_1 diff --git a/parser/testdata/create_view/query.sql b/parser/testdata/create_view/query.sql deleted file mode 100644 index 87eae1e52..000000000 --- a/parser/testdata/create_view/query.sql +++ /dev/null @@ -1 +0,0 @@ -CREATE VIEW test_view AS SELECT 1 diff --git a/parser/testdata/create_view_if_not_exists/explain.txt b/parser/testdata/create_view_if_not_exists/explain.txt deleted file mode 100644 index d18115c29..000000000 --- a/parser/testdata/create_view_if_not_exists/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -CreateQuery test_view (children 2) - Identifier test_view - SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Literal UInt64_1 diff --git a/parser/testdata/create_view_if_not_exists/query.sql b/parser/testdata/create_view_if_not_exists/query.sql deleted file mode 100644 index 4664c8647..000000000 --- a/parser/testdata/create_view_if_not_exists/query.sql +++ /dev/null @@ -1 +0,0 @@ -CREATE VIEW IF NOT EXISTS test_view AS SELECT 1 diff --git a/parser/testdata/currentdatabase/explain.txt b/parser/testdata/currentdatabase/explain.txt deleted file mode 100644 index 937386530..000000000 --- a/parser/testdata/currentdatabase/explain.txt +++ /dev/null @@ -1,6 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function currentDatabase (children 1) - ExpressionList diff --git a/parser/testdata/currentdatabase/query.sql b/parser/testdata/currentdatabase/query.sql deleted file mode 100644 index bdb890c14..000000000 --- a/parser/testdata/currentdatabase/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT currentDatabase() diff --git a/parser/testdata/currentuser/explain.txt b/parser/testdata/currentuser/explain.txt deleted file mode 100644 index 0e4f46e30..000000000 --- a/parser/testdata/currentuser/explain.txt +++ /dev/null @@ -1,6 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function currentUser (children 1) - ExpressionList diff --git a/parser/testdata/currentuser/query.sql b/parser/testdata/currentuser/query.sql deleted file mode 100644 index a44e55714..000000000 --- a/parser/testdata/currentuser/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT currentUser() diff --git a/parser/testdata/cuturlparameter/explain.txt b/parser/testdata/cuturlparameter/explain.txt deleted file mode 100644 index 578812fdd..000000000 --- a/parser/testdata/cuturlparameter/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function cutURLParameter (children 1) - ExpressionList (children 2) - Literal \'https://example.com?a=1&b=2\' - Literal \'a\' diff --git a/parser/testdata/cuturlparameter/query.sql b/parser/testdata/cuturlparameter/query.sql deleted file mode 100644 index 58cf92c57..000000000 --- a/parser/testdata/cuturlparameter/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT cutURLParameter('https://example.com?a=1&b=2', 'a') diff --git a/parser/testdata/dateadd/explain.txt b/parser/testdata/dateadd/explain.txt deleted file mode 100644 index 11975ef3a..000000000 --- a/parser/testdata/dateadd/explain.txt +++ /dev/null @@ -1,12 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function plus (children 1) - ExpressionList (children 2) - Function toDate (children 1) - ExpressionList (children 1) - Literal \'2023-01-01\' - Function toIntervalDay (children 1) - ExpressionList (children 1) - Literal UInt64_1 diff --git a/parser/testdata/dateadd/query.sql b/parser/testdata/dateadd/query.sql deleted file mode 100644 index 4bf527966..000000000 --- a/parser/testdata/dateadd/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT dateAdd(day, 1, toDate('2023-01-01')) diff --git a/parser/testdata/datediff/explain.txt b/parser/testdata/datediff/explain.txt deleted file mode 100644 index 61d6211d3..000000000 --- a/parser/testdata/datediff/explain.txt +++ /dev/null @@ -1,13 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function dateDiff (children 1) - ExpressionList (children 3) - Literal \'day\' - Function toDate (children 1) - ExpressionList (children 1) - Literal \'2023-01-01\' - Function toDate (children 1) - ExpressionList (children 1) - Literal \'2023-01-31\' diff --git a/parser/testdata/datediff/query.sql b/parser/testdata/datediff/query.sql deleted file mode 100644 index 92e6ec6ca..000000000 --- a/parser/testdata/datediff/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT dateDiff('day', toDate('2023-01-01'), toDate('2023-01-31')) diff --git a/parser/testdata/datesub/explain.txt b/parser/testdata/datesub/explain.txt deleted file mode 100644 index 861a18076..000000000 --- a/parser/testdata/datesub/explain.txt +++ /dev/null @@ -1,12 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function minus (children 1) - ExpressionList (children 2) - Function toDate (children 1) - ExpressionList (children 1) - Literal \'2023-01-02\' - Function toIntervalDay (children 1) - ExpressionList (children 1) - Literal UInt64_1 diff --git a/parser/testdata/datesub/query.sql b/parser/testdata/datesub/query.sql deleted file mode 100644 index c27a8d823..000000000 --- a/parser/testdata/datesub/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT dateSub(day, 1, toDate('2023-01-02')) diff --git a/parser/testdata/dense_rank_function/explain.txt b/parser/testdata/dense_rank_function/explain.txt deleted file mode 100644 index 9ecaac006..000000000 --- a/parser/testdata/dense_rank_function/explain.txt +++ /dev/null @@ -1,17 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 2) - Identifier number - Function dense_rank (children 2) - ExpressionList - WindowDefinition (children 1) - ExpressionList (children 1) - OrderByElement (children 1) - Identifier number - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (children 1) - ExpressionList (children 1) - Literal UInt64_10 diff --git a/parser/testdata/dense_rank_function/query.sql b/parser/testdata/dense_rank_function/query.sql deleted file mode 100644 index 991cf54f3..000000000 --- a/parser/testdata/dense_rank_function/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT number, dense_rank() OVER (ORDER BY number) FROM numbers(10) diff --git a/parser/testdata/desc_table/explain.txt b/parser/testdata/desc_table/explain.txt deleted file mode 100644 index f016a4354..000000000 --- a/parser/testdata/desc_table/explain.txt +++ /dev/null @@ -1,3 +0,0 @@ -DescribeQuery (children 1) - TableExpression (children 1) - TableIdentifier system.one diff --git a/parser/testdata/desc_table/query.sql b/parser/testdata/desc_table/query.sql deleted file mode 100644 index bf850db93..000000000 --- a/parser/testdata/desc_table/query.sql +++ /dev/null @@ -1 +0,0 @@ -DESC TABLE system.one diff --git a/parser/testdata/describe_short/explain.txt b/parser/testdata/describe_short/explain.txt deleted file mode 100644 index f016a4354..000000000 --- a/parser/testdata/describe_short/explain.txt +++ /dev/null @@ -1,3 +0,0 @@ -DescribeQuery (children 1) - TableExpression (children 1) - TableIdentifier system.one diff --git a/parser/testdata/describe_short/query.sql b/parser/testdata/describe_short/query.sql deleted file mode 100644 index 1cd3aaef5..000000000 --- a/parser/testdata/describe_short/query.sql +++ /dev/null @@ -1 +0,0 @@ -DESCRIBE system.one diff --git a/parser/testdata/describe_table/explain.txt b/parser/testdata/describe_table/explain.txt deleted file mode 100644 index 8bb606792..000000000 --- a/parser/testdata/describe_table/explain.txt +++ /dev/null @@ -1,3 +0,0 @@ -DescribeQuery (children 1) - TableExpression (children 1) - TableIdentifier users diff --git a/parser/testdata/describe_table/query.sql b/parser/testdata/describe_table/query.sql deleted file mode 100644 index b536aa080..000000000 --- a/parser/testdata/describe_table/query.sql +++ /dev/null @@ -1 +0,0 @@ -DESCRIBE TABLE users diff --git a/parser/testdata/describe_table_full/explain.txt b/parser/testdata/describe_table_full/explain.txt deleted file mode 100644 index f016a4354..000000000 --- a/parser/testdata/describe_table_full/explain.txt +++ /dev/null @@ -1,3 +0,0 @@ -DescribeQuery (children 1) - TableExpression (children 1) - TableIdentifier system.one diff --git a/parser/testdata/describe_table_full/query.sql b/parser/testdata/describe_table_full/query.sql deleted file mode 100644 index 9772f920b..000000000 --- a/parser/testdata/describe_table_full/query.sql +++ /dev/null @@ -1 +0,0 @@ -DESCRIBE TABLE system.one diff --git a/parser/testdata/detach_partition/explain.txt b/parser/testdata/detach_partition/explain.txt deleted file mode 100644 index 05a3f7dc5..000000000 --- a/parser/testdata/detach_partition/explain.txt +++ /dev/null @@ -1,6 +0,0 @@ -AlterQuery test_table (children 2) - ExpressionList (children 1) - AlterCommand DROP_PARTITION (children 1) - Partition (children 1) - Literal UInt64_202301 - Identifier test_table diff --git a/parser/testdata/detach_partition/query.sql b/parser/testdata/detach_partition/query.sql deleted file mode 100644 index 48a97aabf..000000000 --- a/parser/testdata/detach_partition/query.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE test_table DETACH PARTITION 202301 diff --git a/parser/testdata/dictget/explain.txt b/parser/testdata/dictget/explain.txt deleted file mode 100644 index dbe211f23..000000000 --- a/parser/testdata/dictget/explain.txt +++ /dev/null @@ -1,11 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function dictGet (children 1) - ExpressionList (children 3) - Literal \'dict_name\' - Literal \'attr\' - Function toUInt64 (children 1) - ExpressionList (children 1) - Literal UInt64_1 diff --git a/parser/testdata/dictget/query.sql b/parser/testdata/dictget/query.sql deleted file mode 100644 index 24a5a6306..000000000 --- a/parser/testdata/dictget/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT dictGet('dict_name', 'attr', toUInt64(1)) diff --git a/parser/testdata/dictgetordefault/explain.txt b/parser/testdata/dictgetordefault/explain.txt deleted file mode 100644 index 7ba6d62aa..000000000 --- a/parser/testdata/dictgetordefault/explain.txt +++ /dev/null @@ -1,12 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function dictGetOrDefault (children 1) - ExpressionList (children 4) - Literal \'dict_name\' - Literal \'attr\' - Function toUInt64 (children 1) - ExpressionList (children 1) - Literal UInt64_1 - Literal \'default\' diff --git a/parser/testdata/dictgetordefault/query.sql b/parser/testdata/dictgetordefault/query.sql deleted file mode 100644 index f3f179642..000000000 --- a/parser/testdata/dictgetordefault/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT dictGetOrDefault('dict_name', 'attr', toUInt64(1), 'default') diff --git a/parser/testdata/dicthas/explain.txt b/parser/testdata/dicthas/explain.txt deleted file mode 100644 index 7c6bdf1e8..000000000 --- a/parser/testdata/dicthas/explain.txt +++ /dev/null @@ -1,10 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function dictHas (children 1) - ExpressionList (children 2) - Literal \'dict_name\' - Function toUInt64 (children 1) - ExpressionList (children 1) - Literal UInt64_1 diff --git a/parser/testdata/dicthas/query.sql b/parser/testdata/dicthas/query.sql deleted file mode 100644 index d313d1d31..000000000 --- a/parser/testdata/dicthas/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT dictHas('dict_name', toUInt64(1)) diff --git a/parser/testdata/distinct_in_function/explain.txt b/parser/testdata/distinct_in_function/explain.txt deleted file mode 100644 index 480f4e617..000000000 --- a/parser/testdata/distinct_in_function/explain.txt +++ /dev/null @@ -1,11 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Function countDistinct (children 1) - ExpressionList (children 1) - Identifier id - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier users diff --git a/parser/testdata/distinct_in_function/query.sql b/parser/testdata/distinct_in_function/query.sql deleted file mode 100644 index c56dd3516..000000000 --- a/parser/testdata/distinct_in_function/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT count(DISTINCT id) FROM users diff --git a/parser/testdata/distinct_subquery/explain.txt b/parser/testdata/distinct_subquery/explain.txt deleted file mode 100644 index 89ab10980..000000000 --- a/parser/testdata/distinct_subquery/explain.txt +++ /dev/null @@ -1,17 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Identifier x - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Subquery (children 1) - SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 2) - Literal UInt64_1 (alias x) - Function arrayJoin (alias y) (children 1) - ExpressionList (children 1) - Literal Array_[UInt64_1, UInt64_2] diff --git a/parser/testdata/distinct_subquery/metadata.json b/parser/testdata/distinct_subquery/metadata.json deleted file mode 100644 index ec307208c..000000000 --- a/parser/testdata/distinct_subquery/metadata.json +++ /dev/null @@ -1 +0,0 @@ -{"source": "https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00018_distinct_in_subquery.sql"} diff --git a/parser/testdata/domain/explain.txt b/parser/testdata/domain/explain.txt deleted file mode 100644 index a1fa96bb1..000000000 --- a/parser/testdata/domain/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function domain (children 1) - ExpressionList (children 1) - Literal \'https://example.com/path\' diff --git a/parser/testdata/domain/query.sql b/parser/testdata/domain/query.sql deleted file mode 100644 index 8e294fcc2..000000000 --- a/parser/testdata/domain/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT domain('https://example.com/path') diff --git a/parser/testdata/domainwithoutwww/explain.txt b/parser/testdata/domainwithoutwww/explain.txt deleted file mode 100644 index 8094ebe93..000000000 --- a/parser/testdata/domainwithoutwww/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function domainWithoutWWW (children 1) - ExpressionList (children 1) - Literal \'https://www.example.com\' diff --git a/parser/testdata/domainwithoutwww/query.sql b/parser/testdata/domainwithoutwww/query.sql deleted file mode 100644 index 6b38c0e33..000000000 --- a/parser/testdata/domainwithoutwww/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT domainWithoutWWW('https://www.example.com') diff --git a/parser/testdata/drop_column/explain.txt b/parser/testdata/drop_column/explain.txt deleted file mode 100644 index ad7b58ef6..000000000 --- a/parser/testdata/drop_column/explain.txt +++ /dev/null @@ -1,5 +0,0 @@ -AlterQuery test_table (children 2) - ExpressionList (children 1) - AlterCommand DROP_COLUMN (children 1) - Identifier old_col - Identifier test_table diff --git a/parser/testdata/drop_column/query.sql b/parser/testdata/drop_column/query.sql deleted file mode 100644 index 86fca9505..000000000 --- a/parser/testdata/drop_column/query.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE test_table DROP COLUMN old_col diff --git a/parser/testdata/drop_column_if_exists/explain.txt b/parser/testdata/drop_column_if_exists/explain.txt deleted file mode 100644 index ad7b58ef6..000000000 --- a/parser/testdata/drop_column_if_exists/explain.txt +++ /dev/null @@ -1,5 +0,0 @@ -AlterQuery test_table (children 2) - ExpressionList (children 1) - AlterCommand DROP_COLUMN (children 1) - Identifier old_col - Identifier test_table diff --git a/parser/testdata/drop_column_if_exists/query.sql b/parser/testdata/drop_column_if_exists/query.sql deleted file mode 100644 index 3561db788..000000000 --- a/parser/testdata/drop_column_if_exists/query.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE test_table DROP COLUMN IF EXISTS old_col diff --git a/parser/testdata/drop_compiled_expression_cache/explain.txt b/parser/testdata/drop_compiled_expression_cache/explain.txt deleted file mode 100644 index 1684cb9d8..000000000 --- a/parser/testdata/drop_compiled_expression_cache/explain.txt +++ /dev/null @@ -1 +0,0 @@ -SYSTEM query diff --git a/parser/testdata/drop_compiled_expression_cache/query.sql b/parser/testdata/drop_compiled_expression_cache/query.sql deleted file mode 100644 index 35ad10ea0..000000000 --- a/parser/testdata/drop_compiled_expression_cache/query.sql +++ /dev/null @@ -1 +0,0 @@ -SYSTEM DROP COMPILED EXPRESSION CACHE diff --git a/parser/testdata/drop_constraint/explain.txt b/parser/testdata/drop_constraint/explain.txt deleted file mode 100644 index 66ceadaf3..000000000 --- a/parser/testdata/drop_constraint/explain.txt +++ /dev/null @@ -1,5 +0,0 @@ -AlterQuery test_table (children 2) - ExpressionList (children 1) - AlterCommand DROP_CONSTRAINT (children 1) - Identifier c - Identifier test_table diff --git a/parser/testdata/drop_constraint/query.sql b/parser/testdata/drop_constraint/query.sql deleted file mode 100644 index b7b4acaa0..000000000 --- a/parser/testdata/drop_constraint/query.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE test_table DROP CONSTRAINT c diff --git a/parser/testdata/drop_database/explain.txt b/parser/testdata/drop_database/explain.txt deleted file mode 100644 index 40c2b35af..000000000 --- a/parser/testdata/drop_database/explain.txt +++ /dev/null @@ -1,2 +0,0 @@ -DropQuery test_db (children 1) - Identifier test_db diff --git a/parser/testdata/drop_database/query.sql b/parser/testdata/drop_database/query.sql deleted file mode 100644 index a5fa05ec4..000000000 --- a/parser/testdata/drop_database/query.sql +++ /dev/null @@ -1 +0,0 @@ -DROP DATABASE test_db diff --git a/parser/testdata/drop_database_if_exists/explain.txt b/parser/testdata/drop_database_if_exists/explain.txt deleted file mode 100644 index 40c2b35af..000000000 --- a/parser/testdata/drop_database_if_exists/explain.txt +++ /dev/null @@ -1,2 +0,0 @@ -DropQuery test_db (children 1) - Identifier test_db diff --git a/parser/testdata/drop_database_if_exists/query.sql b/parser/testdata/drop_database_if_exists/query.sql deleted file mode 100644 index 02c6ea30a..000000000 --- a/parser/testdata/drop_database_if_exists/query.sql +++ /dev/null @@ -1 +0,0 @@ -DROP DATABASE IF EXISTS test_db diff --git a/parser/testdata/drop_dns_cache/explain.txt b/parser/testdata/drop_dns_cache/explain.txt deleted file mode 100644 index 1684cb9d8..000000000 --- a/parser/testdata/drop_dns_cache/explain.txt +++ /dev/null @@ -1 +0,0 @@ -SYSTEM query diff --git a/parser/testdata/drop_dns_cache/query.sql b/parser/testdata/drop_dns_cache/query.sql deleted file mode 100644 index 8ea225a65..000000000 --- a/parser/testdata/drop_dns_cache/query.sql +++ /dev/null @@ -1 +0,0 @@ -SYSTEM DROP DNS CACHE diff --git a/parser/testdata/drop_index/explain.txt b/parser/testdata/drop_index/explain.txt deleted file mode 100644 index bb19729b4..000000000 --- a/parser/testdata/drop_index/explain.txt +++ /dev/null @@ -1,5 +0,0 @@ -AlterQuery test_table (children 2) - ExpressionList (children 1) - AlterCommand DROP_INDEX (children 1) - Identifier idx - Identifier test_table diff --git a/parser/testdata/drop_index/query.sql b/parser/testdata/drop_index/query.sql deleted file mode 100644 index 212fbef93..000000000 --- a/parser/testdata/drop_index/query.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE test_table DROP INDEX idx diff --git a/parser/testdata/drop_mark_cache/explain.txt b/parser/testdata/drop_mark_cache/explain.txt deleted file mode 100644 index 1684cb9d8..000000000 --- a/parser/testdata/drop_mark_cache/explain.txt +++ /dev/null @@ -1 +0,0 @@ -SYSTEM query diff --git a/parser/testdata/drop_mark_cache/query.sql b/parser/testdata/drop_mark_cache/query.sql deleted file mode 100644 index 26a97c481..000000000 --- a/parser/testdata/drop_mark_cache/query.sql +++ /dev/null @@ -1 +0,0 @@ -SYSTEM DROP MARK CACHE diff --git a/parser/testdata/drop_partition/explain.txt b/parser/testdata/drop_partition/explain.txt deleted file mode 100644 index 05a3f7dc5..000000000 --- a/parser/testdata/drop_partition/explain.txt +++ /dev/null @@ -1,6 +0,0 @@ -AlterQuery test_table (children 2) - ExpressionList (children 1) - AlterCommand DROP_PARTITION (children 1) - Partition (children 1) - Literal UInt64_202301 - Identifier test_table diff --git a/parser/testdata/drop_partition/query.sql b/parser/testdata/drop_partition/query.sql deleted file mode 100644 index fe1c81386..000000000 --- a/parser/testdata/drop_partition/query.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE test_table DROP PARTITION 202301 diff --git a/parser/testdata/drop_table/explain.txt b/parser/testdata/drop_table/explain.txt deleted file mode 100644 index 3737a8cf5..000000000 --- a/parser/testdata/drop_table/explain.txt +++ /dev/null @@ -1,2 +0,0 @@ -DropQuery test (children 1) - Identifier test diff --git a/parser/testdata/drop_table/query.sql b/parser/testdata/drop_table/query.sql deleted file mode 100644 index 7186aac36..000000000 --- a/parser/testdata/drop_table/query.sql +++ /dev/null @@ -1 +0,0 @@ -DROP TABLE test diff --git a/parser/testdata/drop_table_ddl/explain.txt b/parser/testdata/drop_table_ddl/explain.txt deleted file mode 100644 index d4bf1aa3f..000000000 --- a/parser/testdata/drop_table_ddl/explain.txt +++ /dev/null @@ -1,2 +0,0 @@ -DropQuery test_table (children 1) - Identifier test_table diff --git a/parser/testdata/drop_table_ddl/query.sql b/parser/testdata/drop_table_ddl/query.sql deleted file mode 100644 index 3051d3c2d..000000000 --- a/parser/testdata/drop_table_ddl/query.sql +++ /dev/null @@ -1 +0,0 @@ -DROP TABLE test_table diff --git a/parser/testdata/drop_table_if_exists/explain.txt b/parser/testdata/drop_table_if_exists/explain.txt deleted file mode 100644 index 3737a8cf5..000000000 --- a/parser/testdata/drop_table_if_exists/explain.txt +++ /dev/null @@ -1,2 +0,0 @@ -DropQuery test (children 1) - Identifier test diff --git a/parser/testdata/drop_table_if_exists/query.sql b/parser/testdata/drop_table_if_exists/query.sql deleted file mode 100644 index ebf2d590b..000000000 --- a/parser/testdata/drop_table_if_exists/query.sql +++ /dev/null @@ -1 +0,0 @@ -DROP TABLE IF EXISTS test diff --git a/parser/testdata/drop_table_if_exists_ddl/explain.txt b/parser/testdata/drop_table_if_exists_ddl/explain.txt deleted file mode 100644 index d4bf1aa3f..000000000 --- a/parser/testdata/drop_table_if_exists_ddl/explain.txt +++ /dev/null @@ -1,2 +0,0 @@ -DropQuery test_table (children 1) - Identifier test_table diff --git a/parser/testdata/drop_table_if_exists_ddl/query.sql b/parser/testdata/drop_table_if_exists_ddl/query.sql deleted file mode 100644 index 98c55733d..000000000 --- a/parser/testdata/drop_table_if_exists_ddl/query.sql +++ /dev/null @@ -1 +0,0 @@ -DROP TABLE IF EXISTS test_table diff --git a/parser/testdata/drop_table_sync/explain.txt b/parser/testdata/drop_table_sync/explain.txt deleted file mode 100644 index d4bf1aa3f..000000000 --- a/parser/testdata/drop_table_sync/explain.txt +++ /dev/null @@ -1,2 +0,0 @@ -DropQuery test_table (children 1) - Identifier test_table diff --git a/parser/testdata/drop_table_sync/query.sql b/parser/testdata/drop_table_sync/query.sql deleted file mode 100644 index c39e9d47f..000000000 --- a/parser/testdata/drop_table_sync/query.sql +++ /dev/null @@ -1 +0,0 @@ -DROP TABLE test_table SYNC diff --git a/parser/testdata/drop_uncompressed_cache/explain.txt b/parser/testdata/drop_uncompressed_cache/explain.txt deleted file mode 100644 index 1684cb9d8..000000000 --- a/parser/testdata/drop_uncompressed_cache/explain.txt +++ /dev/null @@ -1 +0,0 @@ -SYSTEM query diff --git a/parser/testdata/drop_uncompressed_cache/query.sql b/parser/testdata/drop_uncompressed_cache/query.sql deleted file mode 100644 index 178a59152..000000000 --- a/parser/testdata/drop_uncompressed_cache/query.sql +++ /dev/null @@ -1 +0,0 @@ -SYSTEM DROP UNCOMPRESSED CACHE diff --git a/parser/testdata/drop_view/explain.txt b/parser/testdata/drop_view/explain.txt deleted file mode 100644 index 8bbb0d82b..000000000 --- a/parser/testdata/drop_view/explain.txt +++ /dev/null @@ -1,2 +0,0 @@ -DropQuery test_view (children 1) - Identifier test_view diff --git a/parser/testdata/drop_view/query.sql b/parser/testdata/drop_view/query.sql deleted file mode 100644 index 7eebc2d76..000000000 --- a/parser/testdata/drop_view/query.sql +++ /dev/null @@ -1 +0,0 @@ -DROP VIEW test_view diff --git a/parser/testdata/drop_view_if_exists/explain.txt b/parser/testdata/drop_view_if_exists/explain.txt deleted file mode 100644 index 8bbb0d82b..000000000 --- a/parser/testdata/drop_view_if_exists/explain.txt +++ /dev/null @@ -1,2 +0,0 @@ -DropQuery test_view (children 1) - Identifier test_view diff --git a/parser/testdata/drop_view_if_exists/query.sql b/parser/testdata/drop_view_if_exists/query.sql deleted file mode 100644 index ddcc6ae3b..000000000 --- a/parser/testdata/drop_view_if_exists/query.sql +++ /dev/null @@ -1 +0,0 @@ -DROP VIEW IF EXISTS test_view diff --git a/parser/testdata/empty_array/explain.txt b/parser/testdata/empty_array/explain.txt deleted file mode 100644 index 676a37068..000000000 --- a/parser/testdata/empty_array/explain.txt +++ /dev/null @@ -1,6 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function emptyArrayUInt8 (children 1) - ExpressionList diff --git a/parser/testdata/empty_array/query.sql b/parser/testdata/empty_array/query.sql deleted file mode 100644 index da8305fb3..000000000 --- a/parser/testdata/empty_array/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT emptyArrayUInt8() diff --git a/parser/testdata/empty_array_literal/explain.txt b/parser/testdata/empty_array_literal/explain.txt deleted file mode 100644 index ee947e0c7..000000000 --- a/parser/testdata/empty_array_literal/explain.txt +++ /dev/null @@ -1,6 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function array (children 1) - ExpressionList diff --git a/parser/testdata/empty_array_literal/metadata.json b/parser/testdata/empty_array_literal/metadata.json deleted file mode 100644 index b155a6a59..000000000 --- a/parser/testdata/empty_array_literal/metadata.json +++ /dev/null @@ -1 +0,0 @@ -{"source": "https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00007_array.sql"} diff --git a/parser/testdata/empty_array_literal/query.sql b/parser/testdata/empty_array_literal/query.sql deleted file mode 100644 index d5cc202f8..000000000 --- a/parser/testdata/empty_array_literal/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT [] diff --git a/parser/testdata/empty_on_array/explain.txt b/parser/testdata/empty_on_array/explain.txt deleted file mode 100644 index f948dfe2f..000000000 --- a/parser/testdata/empty_on_array/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function empty (children 1) - ExpressionList (children 1) - Literal Array_[UInt64_1, UInt64_2, UInt64_3] diff --git a/parser/testdata/empty_on_array/query.sql b/parser/testdata/empty_on_array/query.sql deleted file mode 100644 index a4bfd3935..000000000 --- a/parser/testdata/empty_on_array/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT empty([1, 2, 3]) diff --git a/parser/testdata/empty_string/explain.txt b/parser/testdata/empty_string/explain.txt deleted file mode 100644 index 39adecca3..000000000 --- a/parser/testdata/empty_string/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function empty (children 1) - ExpressionList (children 1) - Literal \'\' diff --git a/parser/testdata/empty_string/query.sql b/parser/testdata/empty_string/query.sql deleted file mode 100644 index 0c32875f7..000000000 --- a/parser/testdata/empty_string/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT empty('') diff --git a/parser/testdata/exchange_tables/explain.txt b/parser/testdata/exchange_tables/explain.txt deleted file mode 100644 index 424334436..000000000 --- a/parser/testdata/exchange_tables/explain.txt +++ /dev/null @@ -1,3 +0,0 @@ -Rename (children 2) - Identifier table1 - Identifier table2 diff --git a/parser/testdata/exchange_tables/query.sql b/parser/testdata/exchange_tables/query.sql deleted file mode 100644 index 4abc67122..000000000 --- a/parser/testdata/exchange_tables/query.sql +++ /dev/null @@ -1 +0,0 @@ -EXCHANGE TABLES table1 AND table2 diff --git a/parser/testdata/exists_subquery/explain.txt b/parser/testdata/exists_subquery/explain.txt deleted file mode 100644 index 94fccfdbd..000000000 --- a/parser/testdata/exists_subquery/explain.txt +++ /dev/null @@ -1,29 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 3) - ExpressionList (children 1) - Identifier number - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (children 1) - ExpressionList (children 1) - Literal UInt64_10 - Function exists (children 1) - ExpressionList (children 1) - Subquery (children 1) - SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 3) - ExpressionList (children 1) - Literal UInt64_1 - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (children 1) - ExpressionList (children 1) - Literal UInt64_5 - Function equals (children 1) - ExpressionList (children 2) - Identifier number - Literal UInt64_1 diff --git a/parser/testdata/exists_subquery/query.sql b/parser/testdata/exists_subquery/query.sql deleted file mode 100644 index f620466c8..000000000 --- a/parser/testdata/exists_subquery/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT number FROM numbers(10) WHERE EXISTS (SELECT 1 FROM numbers(5) WHERE number = 1) diff --git a/parser/testdata/explain/explain.txt b/parser/testdata/explain/explain.txt deleted file mode 100644 index b3facf84e..000000000 --- a/parser/testdata/explain/explain.txt +++ /dev/null @@ -1,6 +0,0 @@ -Explain EXPLAIN (children 1) - SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Literal UInt64_1 diff --git a/parser/testdata/explain/query.sql b/parser/testdata/explain/query.sql deleted file mode 100644 index 946628096..000000000 --- a/parser/testdata/explain/query.sql +++ /dev/null @@ -1 +0,0 @@ -EXPLAIN SELECT 1 diff --git a/parser/testdata/explain_ast/explain.txt b/parser/testdata/explain_ast/explain.txt deleted file mode 100644 index d99fd0255..000000000 --- a/parser/testdata/explain_ast/explain.txt +++ /dev/null @@ -1,6 +0,0 @@ -Explain EXPLAIN AST (children 1) - SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Literal UInt64_1 diff --git a/parser/testdata/explain_ast/query.sql b/parser/testdata/explain_ast/query.sql deleted file mode 100644 index c7b0938dd..000000000 --- a/parser/testdata/explain_ast/query.sql +++ /dev/null @@ -1 +0,0 @@ -EXPLAIN AST SELECT 1 diff --git a/parser/testdata/explain_ast_stmt/explain.txt b/parser/testdata/explain_ast_stmt/explain.txt deleted file mode 100644 index d99fd0255..000000000 --- a/parser/testdata/explain_ast_stmt/explain.txt +++ /dev/null @@ -1,6 +0,0 @@ -Explain EXPLAIN AST (children 1) - SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Literal UInt64_1 diff --git a/parser/testdata/explain_ast_stmt/query.sql b/parser/testdata/explain_ast_stmt/query.sql deleted file mode 100644 index c7b0938dd..000000000 --- a/parser/testdata/explain_ast_stmt/query.sql +++ /dev/null @@ -1 +0,0 @@ -EXPLAIN AST SELECT 1 diff --git a/parser/testdata/explain_estimate/explain.txt b/parser/testdata/explain_estimate/explain.txt deleted file mode 100644 index 9351cb644..000000000 --- a/parser/testdata/explain_estimate/explain.txt +++ /dev/null @@ -1,6 +0,0 @@ -Explain EXPLAIN ESTIMATE (children 1) - SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Literal UInt64_1 diff --git a/parser/testdata/explain_estimate/query.sql b/parser/testdata/explain_estimate/query.sql deleted file mode 100644 index 7e45596fb..000000000 --- a/parser/testdata/explain_estimate/query.sql +++ /dev/null @@ -1 +0,0 @@ -EXPLAIN ESTIMATE SELECT 1 diff --git a/parser/testdata/explain_pipeline/explain.txt b/parser/testdata/explain_pipeline/explain.txt deleted file mode 100644 index 401f6841e..000000000 --- a/parser/testdata/explain_pipeline/explain.txt +++ /dev/null @@ -1,6 +0,0 @@ -Explain EXPLAIN PIPELINE (children 1) - SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Literal UInt64_1 diff --git a/parser/testdata/explain_pipeline/query.sql b/parser/testdata/explain_pipeline/query.sql deleted file mode 100644 index 705102940..000000000 --- a/parser/testdata/explain_pipeline/query.sql +++ /dev/null @@ -1 +0,0 @@ -EXPLAIN PIPELINE SELECT 1 diff --git a/parser/testdata/explain_plan/explain.txt b/parser/testdata/explain_plan/explain.txt deleted file mode 100644 index b3facf84e..000000000 --- a/parser/testdata/explain_plan/explain.txt +++ /dev/null @@ -1,6 +0,0 @@ -Explain EXPLAIN (children 1) - SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Literal UInt64_1 diff --git a/parser/testdata/explain_plan/query.sql b/parser/testdata/explain_plan/query.sql deleted file mode 100644 index 2289ca3d2..000000000 --- a/parser/testdata/explain_plan/query.sql +++ /dev/null @@ -1 +0,0 @@ -EXPLAIN PLAN SELECT 1 diff --git a/parser/testdata/explain_stmt/explain.txt b/parser/testdata/explain_stmt/explain.txt deleted file mode 100644 index b3facf84e..000000000 --- a/parser/testdata/explain_stmt/explain.txt +++ /dev/null @@ -1,6 +0,0 @@ -Explain EXPLAIN (children 1) - SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Literal UInt64_1 diff --git a/parser/testdata/explain_stmt/query.sql b/parser/testdata/explain_stmt/query.sql deleted file mode 100644 index 946628096..000000000 --- a/parser/testdata/explain_stmt/query.sql +++ /dev/null @@ -1 +0,0 @@ -EXPLAIN SELECT 1 diff --git a/parser/testdata/explain_syntax/explain.txt b/parser/testdata/explain_syntax/explain.txt deleted file mode 100644 index badb9820f..000000000 --- a/parser/testdata/explain_syntax/explain.txt +++ /dev/null @@ -1,6 +0,0 @@ -Explain EXPLAIN SYNTAX (children 1) - SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Literal UInt64_1 diff --git a/parser/testdata/explain_syntax/query.sql b/parser/testdata/explain_syntax/query.sql deleted file mode 100644 index b828dda3f..000000000 --- a/parser/testdata/explain_syntax/query.sql +++ /dev/null @@ -1 +0,0 @@ -EXPLAIN SYNTAX SELECT 1 diff --git a/parser/testdata/explicit_cross_join/explain.txt b/parser/testdata/explicit_cross_join/explain.txt deleted file mode 100644 index c8767344d..000000000 --- a/parser/testdata/explicit_cross_join/explain.txt +++ /dev/null @@ -1,17 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Asterisk - TablesInSelectQuery (children 2) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (alias a) (children 1) - ExpressionList (children 1) - Literal UInt64_5 - TablesInSelectQueryElement (children 2) - TableExpression (children 1) - Function numbers (alias b) (children 1) - ExpressionList (children 1) - Literal UInt64_5 - TableJoin diff --git a/parser/testdata/explicit_cross_join/query.sql b/parser/testdata/explicit_cross_join/query.sql deleted file mode 100644 index 188867d82..000000000 --- a/parser/testdata/explicit_cross_join/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT * FROM numbers(5) AS a CROSS JOIN numbers(5) AS b diff --git a/parser/testdata/extract_regex/explain.txt b/parser/testdata/extract_regex/explain.txt deleted file mode 100644 index 2e2b07afc..000000000 --- a/parser/testdata/extract_regex/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function extract (children 1) - ExpressionList (children 2) - Literal \'hello world\' - Literal \'w\\\\w+\' diff --git a/parser/testdata/extract_regex/query.sql b/parser/testdata/extract_regex/query.sql deleted file mode 100644 index 2d7e727ac..000000000 --- a/parser/testdata/extract_regex/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT extract('hello world', 'w\\w+') diff --git a/parser/testdata/extractall/explain.txt b/parser/testdata/extractall/explain.txt deleted file mode 100644 index f68038a38..000000000 --- a/parser/testdata/extractall/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function extractAll (children 1) - ExpressionList (children 2) - Literal \'hello world\' - Literal \'\\\\w+\' diff --git a/parser/testdata/extractall/query.sql b/parser/testdata/extractall/query.sql deleted file mode 100644 index e6dce2226..000000000 --- a/parser/testdata/extractall/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT extractAll('hello world', '\\w+') diff --git a/parser/testdata/extracturlparameter/explain.txt b/parser/testdata/extracturlparameter/explain.txt deleted file mode 100644 index 32c7e08d4..000000000 --- a/parser/testdata/extracturlparameter/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function extractURLParameter (children 1) - ExpressionList (children 2) - Literal \'https://example.com?a=1&b=2\' - Literal \'a\' diff --git a/parser/testdata/extracturlparameter/query.sql b/parser/testdata/extracturlparameter/query.sql deleted file mode 100644 index 48296432a..000000000 --- a/parser/testdata/extracturlparameter/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT extractURLParameter('https://example.com?a=1&b=2', 'a') diff --git a/parser/testdata/extracturlparameternames/explain.txt b/parser/testdata/extracturlparameternames/explain.txt deleted file mode 100644 index 855735195..000000000 --- a/parser/testdata/extracturlparameternames/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function extractURLParameterNames (children 1) - ExpressionList (children 1) - Literal \'https://example.com?a=1&b=2\' diff --git a/parser/testdata/extracturlparameternames/query.sql b/parser/testdata/extracturlparameternames/query.sql deleted file mode 100644 index fd5696010..000000000 --- a/parser/testdata/extracturlparameternames/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT extractURLParameterNames('https://example.com?a=1&b=2') diff --git a/parser/testdata/extracturlparameters/explain.txt b/parser/testdata/extracturlparameters/explain.txt deleted file mode 100644 index 8a9181c55..000000000 --- a/parser/testdata/extracturlparameters/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function extractURLParameters (children 1) - ExpressionList (children 1) - Literal \'https://example.com?a=1&b=2\' diff --git a/parser/testdata/extracturlparameters/query.sql b/parser/testdata/extracturlparameters/query.sql deleted file mode 100644 index 72e541be5..000000000 --- a/parser/testdata/extracturlparameters/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT extractURLParameters('https://example.com?a=1&b=2') diff --git a/parser/testdata/first_value_function/explain.txt b/parser/testdata/first_value_function/explain.txt deleted file mode 100644 index b5da13085..000000000 --- a/parser/testdata/first_value_function/explain.txt +++ /dev/null @@ -1,18 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 2) - Identifier number - Function first_value (children 2) - ExpressionList (children 1) - Identifier number - WindowDefinition (children 1) - ExpressionList (children 1) - OrderByElement (children 1) - Identifier number - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (children 1) - ExpressionList (children 1) - Literal UInt64_10 diff --git a/parser/testdata/first_value_function/query.sql b/parser/testdata/first_value_function/query.sql deleted file mode 100644 index 01a64bbf5..000000000 --- a/parser/testdata/first_value_function/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT number, first_value(number) OVER (ORDER BY number) FROM numbers(10) diff --git a/parser/testdata/float_literal/explain.txt b/parser/testdata/float_literal/explain.txt deleted file mode 100644 index 48ab7cab7..000000000 --- a/parser/testdata/float_literal/explain.txt +++ /dev/null @@ -1,5 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Literal Float64_3.14 diff --git a/parser/testdata/float_literal/query.sql b/parser/testdata/float_literal/query.sql deleted file mode 100644 index a1a67b611..000000000 --- a/parser/testdata/float_literal/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT 3.14 diff --git a/parser/testdata/flush_logs/explain.txt b/parser/testdata/flush_logs/explain.txt deleted file mode 100644 index 1684cb9d8..000000000 --- a/parser/testdata/flush_logs/explain.txt +++ /dev/null @@ -1 +0,0 @@ -SYSTEM query diff --git a/parser/testdata/flush_logs/query.sql b/parser/testdata/flush_logs/query.sql deleted file mode 100644 index e11ee5fdf..000000000 --- a/parser/testdata/flush_logs/query.sql +++ /dev/null @@ -1 +0,0 @@ -SYSTEM FLUSH LOGS diff --git a/parser/testdata/format_csv/explain.txt b/parser/testdata/format_csv/explain.txt deleted file mode 100644 index 370918e66..000000000 --- a/parser/testdata/format_csv/explain.txt +++ /dev/null @@ -1,6 +0,0 @@ -SelectWithUnionQuery (children 2) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Literal UInt64_1 - Identifier CSV diff --git a/parser/testdata/format_csv/query.sql b/parser/testdata/format_csv/query.sql deleted file mode 100644 index 57436f940..000000000 --- a/parser/testdata/format_csv/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT 1 FORMAT CSV diff --git a/parser/testdata/format_function/explain.txt b/parser/testdata/format_function/explain.txt deleted file mode 100644 index 9e3258e3a..000000000 --- a/parser/testdata/format_function/explain.txt +++ /dev/null @@ -1,9 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function format (children 1) - ExpressionList (children 3) - Literal \'{0} {1}\' - Literal \'hello\' - Literal \'world\' diff --git a/parser/testdata/format_function/query.sql b/parser/testdata/format_function/query.sql deleted file mode 100644 index 0cb9b4e08..000000000 --- a/parser/testdata/format_function/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT format('{0} {1}', 'hello', 'world') diff --git a/parser/testdata/format_json/explain.txt b/parser/testdata/format_json/explain.txt deleted file mode 100644 index c601338bb..000000000 --- a/parser/testdata/format_json/explain.txt +++ /dev/null @@ -1,6 +0,0 @@ -SelectWithUnionQuery (children 2) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Literal UInt64_1 - Identifier JSON diff --git a/parser/testdata/format_json/query.sql b/parser/testdata/format_json/query.sql deleted file mode 100644 index f22ca8241..000000000 --- a/parser/testdata/format_json/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT 1 FORMAT JSON diff --git a/parser/testdata/format_jsoncompact/explain.txt b/parser/testdata/format_jsoncompact/explain.txt deleted file mode 100644 index 8911dca3a..000000000 --- a/parser/testdata/format_jsoncompact/explain.txt +++ /dev/null @@ -1,6 +0,0 @@ -SelectWithUnionQuery (children 2) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Literal UInt64_1 - Identifier JSONCompact diff --git a/parser/testdata/format_jsoncompact/query.sql b/parser/testdata/format_jsoncompact/query.sql deleted file mode 100644 index 7e11bb7b8..000000000 --- a/parser/testdata/format_jsoncompact/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT 1 FORMAT JSONCompact diff --git a/parser/testdata/format_jsoneachrow/explain.txt b/parser/testdata/format_jsoneachrow/explain.txt deleted file mode 100644 index 353cc69fa..000000000 --- a/parser/testdata/format_jsoneachrow/explain.txt +++ /dev/null @@ -1,6 +0,0 @@ -SelectWithUnionQuery (children 2) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Literal UInt64_1 - Identifier JSONEachRow diff --git a/parser/testdata/format_jsoneachrow/query.sql b/parser/testdata/format_jsoneachrow/query.sql deleted file mode 100644 index f55b47ad1..000000000 --- a/parser/testdata/format_jsoneachrow/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT 1 FORMAT JSONEachRow diff --git a/parser/testdata/format_pretty/explain.txt b/parser/testdata/format_pretty/explain.txt deleted file mode 100644 index 199573598..000000000 --- a/parser/testdata/format_pretty/explain.txt +++ /dev/null @@ -1,6 +0,0 @@ -SelectWithUnionQuery (children 2) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Literal UInt64_1 - Identifier Pretty diff --git a/parser/testdata/format_pretty/query.sql b/parser/testdata/format_pretty/query.sql deleted file mode 100644 index 761b49840..000000000 --- a/parser/testdata/format_pretty/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT 1 FORMAT Pretty diff --git a/parser/testdata/format_tabseparated/explain.txt b/parser/testdata/format_tabseparated/explain.txt deleted file mode 100644 index 523fec89b..000000000 --- a/parser/testdata/format_tabseparated/explain.txt +++ /dev/null @@ -1,6 +0,0 @@ -SelectWithUnionQuery (children 2) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Literal UInt64_1 - Identifier TabSeparated diff --git a/parser/testdata/format_tabseparated/query.sql b/parser/testdata/format_tabseparated/query.sql deleted file mode 100644 index f8217f18f..000000000 --- a/parser/testdata/format_tabseparated/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT 1 FORMAT TabSeparated diff --git a/parser/testdata/format_tsv/explain.txt b/parser/testdata/format_tsv/explain.txt deleted file mode 100644 index b65b532e0..000000000 --- a/parser/testdata/format_tsv/explain.txt +++ /dev/null @@ -1,6 +0,0 @@ -SelectWithUnionQuery (children 2) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Literal UInt64_1 - Identifier TSV diff --git a/parser/testdata/format_tsv/query.sql b/parser/testdata/format_tsv/query.sql deleted file mode 100644 index ecb2d1da6..000000000 --- a/parser/testdata/format_tsv/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT 1 FORMAT TSV diff --git a/parser/testdata/format_vertical/explain.txt b/parser/testdata/format_vertical/explain.txt deleted file mode 100644 index f232dffce..000000000 --- a/parser/testdata/format_vertical/explain.txt +++ /dev/null @@ -1,6 +0,0 @@ -SelectWithUnionQuery (children 2) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Literal UInt64_1 - Identifier Vertical diff --git a/parser/testdata/format_vertical/query.sql b/parser/testdata/format_vertical/query.sql deleted file mode 100644 index b82725173..000000000 --- a/parser/testdata/format_vertical/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT 1 FORMAT Vertical diff --git a/parser/testdata/formatdatetime/explain.txt b/parser/testdata/formatdatetime/explain.txt deleted file mode 100644 index d50ca3590..000000000 --- a/parser/testdata/formatdatetime/explain.txt +++ /dev/null @@ -1,9 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function formatDateTime (children 1) - ExpressionList (children 2) - Function now (children 1) - ExpressionList - Literal \'%Y-%m-%d\' diff --git a/parser/testdata/formatdatetime/query.sql b/parser/testdata/formatdatetime/query.sql deleted file mode 100644 index 756a53059..000000000 --- a/parser/testdata/formatdatetime/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT formatDateTime(now(), '%Y-%m-%d') diff --git a/parser/testdata/fragment/explain.txt b/parser/testdata/fragment/explain.txt deleted file mode 100644 index 071cee5fd..000000000 --- a/parser/testdata/fragment/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function fragment (children 1) - ExpressionList (children 1) - Literal \'https://example.com/path#section\' diff --git a/parser/testdata/fragment/query.sql b/parser/testdata/fragment/query.sql deleted file mode 100644 index dcdda955e..000000000 --- a/parser/testdata/fragment/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT fragment('https://example.com/path#section') diff --git a/parser/testdata/freeze_partition/explain.txt b/parser/testdata/freeze_partition/explain.txt deleted file mode 100644 index 8fc39b757..000000000 --- a/parser/testdata/freeze_partition/explain.txt +++ /dev/null @@ -1,6 +0,0 @@ -AlterQuery test_table (children 2) - ExpressionList (children 1) - AlterCommand FREEZE_PARTITION (children 1) - Partition (children 1) - Literal UInt64_202301 - Identifier test_table diff --git a/parser/testdata/freeze_partition/query.sql b/parser/testdata/freeze_partition/query.sql deleted file mode 100644 index 0d12ea50c..000000000 --- a/parser/testdata/freeze_partition/query.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE test_table FREEZE PARTITION 202301 diff --git a/parser/testdata/freeze_table/explain.txt b/parser/testdata/freeze_table/explain.txt deleted file mode 100644 index 731441a4f..000000000 --- a/parser/testdata/freeze_table/explain.txt +++ /dev/null @@ -1,4 +0,0 @@ -AlterQuery test_table (children 2) - ExpressionList (children 1) - AlterCommand FREEZE_ALL - Identifier test_table diff --git a/parser/testdata/freeze_table/query.sql b/parser/testdata/freeze_table/query.sql deleted file mode 100644 index 15c04b054..000000000 --- a/parser/testdata/freeze_table/query.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE test_table FREEZE diff --git a/parser/testdata/fromunixtimestamp/explain.txt b/parser/testdata/fromunixtimestamp/explain.txt deleted file mode 100644 index ab382afb7..000000000 --- a/parser/testdata/fromunixtimestamp/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function fromUnixTimestamp (children 1) - ExpressionList (children 1) - Literal UInt64_1234567890 diff --git a/parser/testdata/fromunixtimestamp/query.sql b/parser/testdata/fromunixtimestamp/query.sql deleted file mode 100644 index 796625053..000000000 --- a/parser/testdata/fromunixtimestamp/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT fromUnixTimestamp(1234567890) diff --git a/parser/testdata/full_join/explain.txt b/parser/testdata/full_join/explain.txt deleted file mode 100644 index ae57e89c2..000000000 --- a/parser/testdata/full_join/explain.txt +++ /dev/null @@ -1,21 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Asterisk - TablesInSelectQuery (children 2) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (alias a) (children 1) - ExpressionList (children 1) - Literal UInt64_5 - TablesInSelectQueryElement (children 2) - TableExpression (children 1) - Function numbers (alias b) (children 1) - ExpressionList (children 1) - Literal UInt64_5 - TableJoin (children 1) - Function equals (children 1) - ExpressionList (children 2) - Identifier a.number - Identifier b.number diff --git a/parser/testdata/full_join/query.sql b/parser/testdata/full_join/query.sql deleted file mode 100644 index c3c820592..000000000 --- a/parser/testdata/full_join/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT * FROM numbers(5) AS a FULL JOIN numbers(5) AS b ON a.number = b.number diff --git a/parser/testdata/function_with_multiple_args/explain.txt b/parser/testdata/function_with_multiple_args/explain.txt deleted file mode 100644 index 44e84b401..000000000 --- a/parser/testdata/function_with_multiple_args/explain.txt +++ /dev/null @@ -1,13 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Function substring (children 1) - ExpressionList (children 3) - Identifier name - Literal UInt64_1 - Literal UInt64_5 - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier users diff --git a/parser/testdata/function_with_multiple_args/query.sql b/parser/testdata/function_with_multiple_args/query.sql deleted file mode 100644 index 99a6e0ab9..000000000 --- a/parser/testdata/function_with_multiple_args/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT substring(name, 1, 5) FROM users diff --git a/parser/testdata/generateuuidv4/explain.txt b/parser/testdata/generateuuidv4/explain.txt deleted file mode 100644 index c27f7c5cf..000000000 --- a/parser/testdata/generateuuidv4/explain.txt +++ /dev/null @@ -1,6 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function generateUUIDv4 (children 1) - ExpressionList diff --git a/parser/testdata/generateuuidv4/query.sql b/parser/testdata/generateuuidv4/query.sql deleted file mode 100644 index 2ac4f691e..000000000 --- a/parser/testdata/generateuuidv4/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT generateUUIDv4() diff --git a/parser/testdata/global_in/explain.txt b/parser/testdata/global_in/explain.txt deleted file mode 100644 index 8ccd32431..000000000 --- a/parser/testdata/global_in/explain.txt +++ /dev/null @@ -1,22 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 3) - ExpressionList (children 1) - Asterisk - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier test_table - Function globalIn (children 1) - ExpressionList (children 2) - Identifier id - Subquery (children 1) - SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Identifier id - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier other_table diff --git a/parser/testdata/global_in/query.sql b/parser/testdata/global_in/query.sql deleted file mode 100644 index b8f97caa3..000000000 --- a/parser/testdata/global_in/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT * FROM test_table WHERE id GLOBAL IN (SELECT id FROM other_table) diff --git a/parser/testdata/global_join/explain.txt b/parser/testdata/global_join/explain.txt deleted file mode 100644 index ae57e89c2..000000000 --- a/parser/testdata/global_join/explain.txt +++ /dev/null @@ -1,21 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Asterisk - TablesInSelectQuery (children 2) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (alias a) (children 1) - ExpressionList (children 1) - Literal UInt64_5 - TablesInSelectQueryElement (children 2) - TableExpression (children 1) - Function numbers (alias b) (children 1) - ExpressionList (children 1) - Literal UInt64_5 - TableJoin (children 1) - Function equals (children 1) - ExpressionList (children 2) - Identifier a.number - Identifier b.number diff --git a/parser/testdata/global_join/query.sql b/parser/testdata/global_join/query.sql deleted file mode 100644 index d502f6bb9..000000000 --- a/parser/testdata/global_join/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT * FROM numbers(5) AS a GLOBAL JOIN numbers(5) AS b ON a.number = b.number diff --git a/parser/testdata/global_not_in/explain.txt b/parser/testdata/global_not_in/explain.txt deleted file mode 100644 index 59d3c57f1..000000000 --- a/parser/testdata/global_not_in/explain.txt +++ /dev/null @@ -1,22 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 3) - ExpressionList (children 1) - Asterisk - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier test_table - Function globalNotIn (children 1) - ExpressionList (children 2) - Identifier id - Subquery (children 1) - SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Identifier id - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier other_table diff --git a/parser/testdata/global_not_in/query.sql b/parser/testdata/global_not_in/query.sql deleted file mode 100644 index 212c0bf1f..000000000 --- a/parser/testdata/global_not_in/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT * FROM test_table WHERE id GLOBAL NOT IN (SELECT id FROM other_table) diff --git a/parser/testdata/group_by/explain.txt b/parser/testdata/group_by/explain.txt deleted file mode 100644 index c8e722bbb..000000000 --- a/parser/testdata/group_by/explain.txt +++ /dev/null @@ -1,13 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 3) - ExpressionList (children 1) - Function count (children 1) - ExpressionList (children 1) - Asterisk - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier users - ExpressionList (children 1) - Identifier status diff --git a/parser/testdata/group_by/query.sql b/parser/testdata/group_by/query.sql deleted file mode 100644 index 477e73c12..000000000 --- a/parser/testdata/group_by/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT count(*) FROM users GROUP BY status diff --git a/parser/testdata/group_by_having/explain.txt b/parser/testdata/group_by_having/explain.txt deleted file mode 100644 index 7a2a372f6..000000000 --- a/parser/testdata/group_by_having/explain.txt +++ /dev/null @@ -1,19 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 4) - ExpressionList (children 1) - Function count (children 1) - ExpressionList (children 1) - Asterisk - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier users - ExpressionList (children 1) - Identifier status - Function greater (children 1) - ExpressionList (children 2) - Function count (children 1) - ExpressionList (children 1) - Asterisk - Literal UInt64_1 diff --git a/parser/testdata/group_by_having/query.sql b/parser/testdata/group_by_having/query.sql deleted file mode 100644 index 3868da675..000000000 --- a/parser/testdata/group_by_having/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT count(*) FROM users GROUP BY status HAVING count(*) > 1 diff --git a/parser/testdata/group_by_with_modulo/explain.txt b/parser/testdata/group_by_with_modulo/explain.txt deleted file mode 100644 index 084b5f6e2..000000000 --- a/parser/testdata/group_by_with_modulo/explain.txt +++ /dev/null @@ -1,18 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 3) - ExpressionList (children 1) - Function count (children 1) - ExpressionList (children 1) - Asterisk - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (children 1) - ExpressionList (children 1) - Literal UInt64_100 - ExpressionList (children 1) - Function modulo (children 1) - ExpressionList (children 2) - Identifier number - Literal UInt64_10 diff --git a/parser/testdata/group_by_with_modulo/query.sql b/parser/testdata/group_by_with_modulo/query.sql deleted file mode 100644 index 9ab5d5ad2..000000000 --- a/parser/testdata/group_by_with_modulo/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT count(*) FROM numbers(100) GROUP BY number % 10 diff --git a/parser/testdata/group_by_with_totals/explain.txt b/parser/testdata/group_by_with_totals/explain.txt deleted file mode 100644 index d9a6bdd32..000000000 --- a/parser/testdata/group_by_with_totals/explain.txt +++ /dev/null @@ -1,22 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 3) - ExpressionList (children 2) - Function modulo (children 1) - ExpressionList (children 2) - Identifier number - Literal UInt64_10 - Function count (children 1) - ExpressionList (children 1) - Asterisk - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (children 1) - ExpressionList (children 1) - Literal UInt64_100 - ExpressionList (children 1) - Function modulo (children 1) - ExpressionList (children 2) - Identifier number - Literal UInt64_10 diff --git a/parser/testdata/group_by_with_totals/query.sql b/parser/testdata/group_by_with_totals/query.sql deleted file mode 100644 index ae6702a42..000000000 --- a/parser/testdata/group_by_with_totals/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT number % 10, count(*) FROM numbers(100) GROUP BY number % 10 WITH TOTALS diff --git a/parser/testdata/grouparray/explain.txt b/parser/testdata/grouparray/explain.txt deleted file mode 100644 index da985cedc..000000000 --- a/parser/testdata/grouparray/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function groupArray (children 1) - ExpressionList (children 1) - Literal UInt64_1 diff --git a/parser/testdata/grouparray/query.sql b/parser/testdata/grouparray/query.sql deleted file mode 100644 index a934d1762..000000000 --- a/parser/testdata/grouparray/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT groupArray(1) diff --git a/parser/testdata/groupuniqarray/explain.txt b/parser/testdata/groupuniqarray/explain.txt deleted file mode 100644 index f2ea5f21c..000000000 --- a/parser/testdata/groupuniqarray/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function groupUniqArray (children 1) - ExpressionList (children 1) - Literal UInt64_1 diff --git a/parser/testdata/groupuniqarray/query.sql b/parser/testdata/groupuniqarray/query.sql deleted file mode 100644 index b02bb67bc..000000000 --- a/parser/testdata/groupuniqarray/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT groupUniqArray(1) diff --git a/parser/testdata/has_function/explain.txt b/parser/testdata/has_function/explain.txt deleted file mode 100644 index 8dcc81693..000000000 --- a/parser/testdata/has_function/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function has (children 1) - ExpressionList (children 2) - Literal Array_[UInt64_1, UInt64_2, UInt64_3] - Literal UInt64_2 diff --git a/parser/testdata/has_function/query.sql b/parser/testdata/has_function/query.sql deleted file mode 100644 index 22e27aa33..000000000 --- a/parser/testdata/has_function/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT has([1, 2, 3], 2) diff --git a/parser/testdata/hex/explain.txt b/parser/testdata/hex/explain.txt deleted file mode 100644 index 4b3ff4f0a..000000000 --- a/parser/testdata/hex/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function hex (children 1) - ExpressionList (children 1) - Literal \'hello\' diff --git a/parser/testdata/hex/query.sql b/parser/testdata/hex/query.sql deleted file mode 100644 index 6d1191bab..000000000 --- a/parser/testdata/hex/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT hex('hello') diff --git a/parser/testdata/hostname/explain.txt b/parser/testdata/hostname/explain.txt deleted file mode 100644 index ceac70c2f..000000000 --- a/parser/testdata/hostname/explain.txt +++ /dev/null @@ -1,6 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function hostName (children 1) - ExpressionList diff --git a/parser/testdata/hostname/query.sql b/parser/testdata/hostname/query.sql deleted file mode 100644 index e74a593e1..000000000 --- a/parser/testdata/hostname/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT hostName() diff --git a/parser/testdata/if_function/explain.txt b/parser/testdata/if_function/explain.txt deleted file mode 100644 index 57be2bad0..000000000 --- a/parser/testdata/if_function/explain.txt +++ /dev/null @@ -1,12 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function if (children 1) - ExpressionList (children 3) - Function greater (children 1) - ExpressionList (children 2) - Literal UInt64_1 - Literal UInt64_0 - Literal UInt64_1 - Literal UInt64_0 diff --git a/parser/testdata/if_function/query.sql b/parser/testdata/if_function/query.sql deleted file mode 100644 index 465e50b23..000000000 --- a/parser/testdata/if_function/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT if(1 > 0, 1, 0) diff --git a/parser/testdata/ifnull/explain.txt b/parser/testdata/ifnull/explain.txt deleted file mode 100644 index f6c413cd7..000000000 --- a/parser/testdata/ifnull/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function ifNull (children 1) - ExpressionList (children 2) - Literal NULL - Literal UInt64_0 diff --git a/parser/testdata/ifnull/query.sql b/parser/testdata/ifnull/query.sql deleted file mode 100644 index 4b0266102..000000000 --- a/parser/testdata/ifnull/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT ifNull(NULL, 0) diff --git a/parser/testdata/ignore/explain.txt b/parser/testdata/ignore/explain.txt deleted file mode 100644 index 9e7c93c35..000000000 --- a/parser/testdata/ignore/explain.txt +++ /dev/null @@ -1,9 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function ignore (children 1) - ExpressionList (children 3) - Literal UInt64_1 - Literal UInt64_2 - Literal UInt64_3 diff --git a/parser/testdata/ignore/query.sql b/parser/testdata/ignore/query.sql deleted file mode 100644 index a90b1d344..000000000 --- a/parser/testdata/ignore/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT ignore(1, 2, 3) diff --git a/parser/testdata/ilike/explain.txt b/parser/testdata/ilike/explain.txt deleted file mode 100644 index 653f15dec..000000000 --- a/parser/testdata/ilike/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function ilike (children 1) - ExpressionList (children 2) - Literal \'HELLO\' - Literal \'%ell%\' diff --git a/parser/testdata/ilike/query.sql b/parser/testdata/ilike/query.sql deleted file mode 100644 index 69db50c0b..000000000 --- a/parser/testdata/ilike/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT 'HELLO' ILIKE '%ell%' diff --git a/parser/testdata/implicit_cross_join/explain.txt b/parser/testdata/implicit_cross_join/explain.txt deleted file mode 100644 index c8767344d..000000000 --- a/parser/testdata/implicit_cross_join/explain.txt +++ /dev/null @@ -1,17 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Asterisk - TablesInSelectQuery (children 2) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (alias a) (children 1) - ExpressionList (children 1) - Literal UInt64_5 - TablesInSelectQueryElement (children 2) - TableExpression (children 1) - Function numbers (alias b) (children 1) - ExpressionList (children 1) - Literal UInt64_5 - TableJoin diff --git a/parser/testdata/implicit_cross_join/query.sql b/parser/testdata/implicit_cross_join/query.sql deleted file mode 100644 index 7c62073dd..000000000 --- a/parser/testdata/implicit_cross_join/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT * FROM numbers(5) AS a, numbers(5) AS b diff --git a/parser/testdata/in_list/explain.txt b/parser/testdata/in_list/explain.txt deleted file mode 100644 index 5e0f1e1be..000000000 --- a/parser/testdata/in_list/explain.txt +++ /dev/null @@ -1,13 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 3) - ExpressionList (children 1) - Asterisk - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier users - Function in (children 1) - ExpressionList (children 2) - Identifier id - Literal Tuple_(UInt64_1, UInt64_2, UInt64_3) diff --git a/parser/testdata/in_list/query.sql b/parser/testdata/in_list/query.sql deleted file mode 100644 index 69d456c96..000000000 --- a/parser/testdata/in_list/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT * FROM users WHERE id IN (1, 2, 3) diff --git a/parser/testdata/indexof/explain.txt b/parser/testdata/indexof/explain.txt deleted file mode 100644 index 6ff342a25..000000000 --- a/parser/testdata/indexof/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function indexOf (children 1) - ExpressionList (children 2) - Literal Array_[UInt64_1, UInt64_2, UInt64_3] - Literal UInt64_2 diff --git a/parser/testdata/indexof/query.sql b/parser/testdata/indexof/query.sql deleted file mode 100644 index 19c26b690..000000000 --- a/parser/testdata/indexof/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT indexOf([1, 2, 3], 2) diff --git a/parser/testdata/inner_join/explain.txt b/parser/testdata/inner_join/explain.txt deleted file mode 100644 index 97343415a..000000000 --- a/parser/testdata/inner_join/explain.txt +++ /dev/null @@ -1,17 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Asterisk - TablesInSelectQuery (children 2) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier users - TablesInSelectQueryElement (children 2) - TableExpression (children 1) - TableIdentifier orders - TableJoin (children 1) - Function equals (children 1) - ExpressionList (children 2) - Identifier users.id - Identifier orders.user_id diff --git a/parser/testdata/inner_join/query.sql b/parser/testdata/inner_join/query.sql deleted file mode 100644 index 3f85645ee..000000000 --- a/parser/testdata/inner_join/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT * FROM users INNER JOIN orders ON users.id = orders.user_id diff --git a/parser/testdata/inner_join_on/explain.txt b/parser/testdata/inner_join_on/explain.txt deleted file mode 100644 index ae57e89c2..000000000 --- a/parser/testdata/inner_join_on/explain.txt +++ /dev/null @@ -1,21 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Asterisk - TablesInSelectQuery (children 2) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (alias a) (children 1) - ExpressionList (children 1) - Literal UInt64_5 - TablesInSelectQueryElement (children 2) - TableExpression (children 1) - Function numbers (alias b) (children 1) - ExpressionList (children 1) - Literal UInt64_5 - TableJoin (children 1) - Function equals (children 1) - ExpressionList (children 2) - Identifier a.number - Identifier b.number diff --git a/parser/testdata/inner_join_on/query.sql b/parser/testdata/inner_join_on/query.sql deleted file mode 100644 index fcd91e362..000000000 --- a/parser/testdata/inner_join_on/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT * FROM numbers(5) AS a INNER JOIN numbers(5) AS b ON a.number = b.number diff --git a/parser/testdata/insert_format_csv/explain.txt b/parser/testdata/insert_format_csv/explain.txt deleted file mode 100644 index 9b03b6105..000000000 --- a/parser/testdata/insert_format_csv/explain.txt +++ /dev/null @@ -1,2 +0,0 @@ -InsertQuery (children 1) - Identifier test_table diff --git a/parser/testdata/insert_format_csv/query.sql b/parser/testdata/insert_format_csv/query.sql deleted file mode 100644 index 89942ef59..000000000 --- a/parser/testdata/insert_format_csv/query.sql +++ /dev/null @@ -1 +0,0 @@ -INSERT INTO test_table FORMAT CSV diff --git a/parser/testdata/insert_format_json/explain.txt b/parser/testdata/insert_format_json/explain.txt deleted file mode 100644 index 9b03b6105..000000000 --- a/parser/testdata/insert_format_json/explain.txt +++ /dev/null @@ -1,2 +0,0 @@ -InsertQuery (children 1) - Identifier test_table diff --git a/parser/testdata/insert_format_json/query.sql b/parser/testdata/insert_format_json/query.sql deleted file mode 100644 index 157b5b748..000000000 --- a/parser/testdata/insert_format_json/query.sql +++ /dev/null @@ -1 +0,0 @@ -INSERT INTO test_table FORMAT JSONEachRow diff --git a/parser/testdata/insert_into/explain.txt b/parser/testdata/insert_into/explain.txt deleted file mode 100644 index 3e8bb849b..000000000 --- a/parser/testdata/insert_into/explain.txt +++ /dev/null @@ -1,5 +0,0 @@ -InsertQuery (children 2) - Identifier users - ExpressionList (children 2) - Identifier id - Identifier name diff --git a/parser/testdata/insert_into/query.sql b/parser/testdata/insert_into/query.sql deleted file mode 100644 index 365e4d6cf..000000000 --- a/parser/testdata/insert_into/query.sql +++ /dev/null @@ -1 +0,0 @@ -INSERT INTO users (id, name) VALUES diff --git a/parser/testdata/insert_select/explain.txt b/parser/testdata/insert_select/explain.txt deleted file mode 100644 index 21d4a8c7f..000000000 --- a/parser/testdata/insert_select/explain.txt +++ /dev/null @@ -1,11 +0,0 @@ -InsertQuery (children 2) - Identifier users - SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Asterisk - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier old_users diff --git a/parser/testdata/insert_select/query.sql b/parser/testdata/insert_select/query.sql deleted file mode 100644 index e4259fbe0..000000000 --- a/parser/testdata/insert_select/query.sql +++ /dev/null @@ -1 +0,0 @@ -INSERT INTO users SELECT * FROM old_users diff --git a/parser/testdata/insert_select_ddl/explain.txt b/parser/testdata/insert_select_ddl/explain.txt deleted file mode 100644 index 672838dd5..000000000 --- a/parser/testdata/insert_select_ddl/explain.txt +++ /dev/null @@ -1,15 +0,0 @@ -InsertQuery (children 3) - Identifier test_table - ExpressionList (children 1) - Identifier id - SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Identifier number - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (children 1) - ExpressionList (children 1) - Literal UInt64_10 diff --git a/parser/testdata/insert_select_ddl/query.sql b/parser/testdata/insert_select_ddl/query.sql deleted file mode 100644 index c9e41f7fe..000000000 --- a/parser/testdata/insert_select_ddl/query.sql +++ /dev/null @@ -1 +0,0 @@ -INSERT INTO test_table (id) SELECT number FROM numbers(10) diff --git a/parser/testdata/insert_values/explain.txt b/parser/testdata/insert_values/explain.txt deleted file mode 100644 index 9b03b6105..000000000 --- a/parser/testdata/insert_values/explain.txt +++ /dev/null @@ -1,2 +0,0 @@ -InsertQuery (children 1) - Identifier test_table diff --git a/parser/testdata/insert_values/query.sql b/parser/testdata/insert_values/query.sql deleted file mode 100644 index 96ff31887..000000000 --- a/parser/testdata/insert_values/query.sql +++ /dev/null @@ -1 +0,0 @@ -INSERT INTO test_table VALUES diff --git a/parser/testdata/insert_with_columns/explain.txt b/parser/testdata/insert_with_columns/explain.txt deleted file mode 100644 index 707d31a96..000000000 --- a/parser/testdata/insert_with_columns/explain.txt +++ /dev/null @@ -1,5 +0,0 @@ -InsertQuery (children 2) - Identifier test_table - ExpressionList (children 2) - Identifier id - Identifier name diff --git a/parser/testdata/insert_with_columns/query.sql b/parser/testdata/insert_with_columns/query.sql deleted file mode 100644 index c164ca761..000000000 --- a/parser/testdata/insert_with_columns/query.sql +++ /dev/null @@ -1 +0,0 @@ -INSERT INTO test_table (id, name) VALUES diff --git a/parser/testdata/integer_literal/explain.txt b/parser/testdata/integer_literal/explain.txt deleted file mode 100644 index 42828662e..000000000 --- a/parser/testdata/integer_literal/explain.txt +++ /dev/null @@ -1,5 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Literal UInt64_42 diff --git a/parser/testdata/integer_literal/query.sql b/parser/testdata/integer_literal/query.sql deleted file mode 100644 index 3c24d87ab..000000000 --- a/parser/testdata/integer_literal/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT 42 diff --git a/parser/testdata/interval_add/explain.txt b/parser/testdata/interval_add/explain.txt deleted file mode 100644 index abbd37d7b..000000000 --- a/parser/testdata/interval_add/explain.txt +++ /dev/null @@ -1,11 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function plus (children 1) - ExpressionList (children 2) - Function now (children 1) - ExpressionList - Function toIntervalDay (children 1) - ExpressionList (children 1) - Literal UInt64_1 diff --git a/parser/testdata/interval_add/query.sql b/parser/testdata/interval_add/query.sql deleted file mode 100644 index 7841161b9..000000000 --- a/parser/testdata/interval_add/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT now() + INTERVAL 1 DAY diff --git a/parser/testdata/interval_subtract/explain.txt b/parser/testdata/interval_subtract/explain.txt deleted file mode 100644 index 5151c8757..000000000 --- a/parser/testdata/interval_subtract/explain.txt +++ /dev/null @@ -1,11 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function minus (children 1) - ExpressionList (children 2) - Function now (children 1) - ExpressionList - Function toIntervalHour (children 1) - ExpressionList (children 1) - Literal UInt64_1 diff --git a/parser/testdata/interval_subtract/query.sql b/parser/testdata/interval_subtract/query.sql deleted file mode 100644 index 92103affb..000000000 --- a/parser/testdata/interval_subtract/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT now() - INTERVAL 1 HOUR diff --git a/parser/testdata/into_outfile/explain.txt b/parser/testdata/into_outfile/explain.txt deleted file mode 100644 index 78ca0c1cc..000000000 --- a/parser/testdata/into_outfile/explain.txt +++ /dev/null @@ -1,6 +0,0 @@ -SelectWithUnionQuery (children 2) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Literal UInt64_1 - Literal \'output.csv\' diff --git a/parser/testdata/into_outfile/query.sql b/parser/testdata/into_outfile/query.sql deleted file mode 100644 index 9f41a4e91..000000000 --- a/parser/testdata/into_outfile/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT 1 INTO OUTFILE 'output.csv' diff --git a/parser/testdata/into_outfile_format/explain.txt b/parser/testdata/into_outfile_format/explain.txt deleted file mode 100644 index d5b255825..000000000 --- a/parser/testdata/into_outfile_format/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 3) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Literal UInt64_1 - Literal \'output.csv\' - Identifier CSV diff --git a/parser/testdata/into_outfile_format/query.sql b/parser/testdata/into_outfile_format/query.sql deleted file mode 100644 index a8b3444e3..000000000 --- a/parser/testdata/into_outfile_format/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT 1 INTO OUTFILE 'output.csv' FORMAT CSV diff --git a/parser/testdata/ipv4numtostring/explain.txt b/parser/testdata/ipv4numtostring/explain.txt deleted file mode 100644 index 890ff19a7..000000000 --- a/parser/testdata/ipv4numtostring/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function IPv4NumToString (children 1) - ExpressionList (children 1) - Literal UInt64_3232235777 diff --git a/parser/testdata/ipv4numtostring/query.sql b/parser/testdata/ipv4numtostring/query.sql deleted file mode 100644 index e2d841b7c..000000000 --- a/parser/testdata/ipv4numtostring/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT IPv4NumToString(3232235777) diff --git a/parser/testdata/ipv4stringtonum/explain.txt b/parser/testdata/ipv4stringtonum/explain.txt deleted file mode 100644 index 09c3915d8..000000000 --- a/parser/testdata/ipv4stringtonum/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function IPv4StringToNum (children 1) - ExpressionList (children 1) - Literal \'192.168.1.1\' diff --git a/parser/testdata/ipv4stringtonum/query.sql b/parser/testdata/ipv4stringtonum/query.sql deleted file mode 100644 index 08749e1c3..000000000 --- a/parser/testdata/ipv4stringtonum/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT IPv4StringToNum('192.168.1.1') diff --git a/parser/testdata/ipv6numtostring/explain.txt b/parser/testdata/ipv6numtostring/explain.txt deleted file mode 100644 index 50f553e2c..000000000 --- a/parser/testdata/ipv6numtostring/explain.txt +++ /dev/null @@ -1,10 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function IPv6NumToString (children 1) - ExpressionList (children 1) - Function toFixedString (children 1) - ExpressionList (children 2) - Literal \'0000000000000001\' - Literal UInt64_16 diff --git a/parser/testdata/ipv6numtostring/query.sql b/parser/testdata/ipv6numtostring/query.sql deleted file mode 100644 index 04446ef01..000000000 --- a/parser/testdata/ipv6numtostring/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT IPv6NumToString(toFixedString('0000000000000001', 16)) diff --git a/parser/testdata/is_not_null/explain.txt b/parser/testdata/is_not_null/explain.txt deleted file mode 100644 index cc27f7940..000000000 --- a/parser/testdata/is_not_null/explain.txt +++ /dev/null @@ -1,12 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 3) - ExpressionList (children 1) - Asterisk - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier users - Function isNotNull (children 1) - ExpressionList (children 1) - Identifier name diff --git a/parser/testdata/is_not_null/query.sql b/parser/testdata/is_not_null/query.sql deleted file mode 100644 index db29931ee..000000000 --- a/parser/testdata/is_not_null/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT * FROM users WHERE name IS NOT NULL diff --git a/parser/testdata/is_null/explain.txt b/parser/testdata/is_null/explain.txt deleted file mode 100644 index f7fee2ec0..000000000 --- a/parser/testdata/is_null/explain.txt +++ /dev/null @@ -1,12 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 3) - ExpressionList (children 1) - Asterisk - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier users - Function isNull (children 1) - ExpressionList (children 1) - Identifier name diff --git a/parser/testdata/is_null/query.sql b/parser/testdata/is_null/query.sql deleted file mode 100644 index da2db040d..000000000 --- a/parser/testdata/is_null/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT * FROM users WHERE name IS NULL diff --git a/parser/testdata/join_using/explain.txt b/parser/testdata/join_using/explain.txt deleted file mode 100644 index 3cff2891e..000000000 --- a/parser/testdata/join_using/explain.txt +++ /dev/null @@ -1,19 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Asterisk - TablesInSelectQuery (children 2) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (alias a) (children 1) - ExpressionList (children 1) - Literal UInt64_5 - TablesInSelectQueryElement (children 2) - TableExpression (children 1) - Function numbers (alias b) (children 1) - ExpressionList (children 1) - Literal UInt64_5 - TableJoin (children 1) - ExpressionList (children 1) - Identifier number diff --git a/parser/testdata/join_using/query.sql b/parser/testdata/join_using/query.sql deleted file mode 100644 index 539917757..000000000 --- a/parser/testdata/join_using/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT * FROM numbers(5) AS a JOIN numbers(5) AS b USING number diff --git a/parser/testdata/jsonextract/explain.txt b/parser/testdata/jsonextract/explain.txt deleted file mode 100644 index 3f6be4505..000000000 --- a/parser/testdata/jsonextract/explain.txt +++ /dev/null @@ -1,9 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function JSONExtract (children 1) - ExpressionList (children 3) - Literal \'{"a": 1}\' - Literal \'a\' - Literal \'Int32\' diff --git a/parser/testdata/jsonextract/query.sql b/parser/testdata/jsonextract/query.sql deleted file mode 100644 index a1cb96a97..000000000 --- a/parser/testdata/jsonextract/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT JSONExtract('{"a": 1}', 'a', 'Int32') diff --git a/parser/testdata/jsonextractarrayraw/explain.txt b/parser/testdata/jsonextractarrayraw/explain.txt deleted file mode 100644 index bf797745d..000000000 --- a/parser/testdata/jsonextractarrayraw/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function JSONExtractArrayRaw (children 1) - ExpressionList (children 2) - Literal \'{"a": [1,2,3]}\' - Literal \'a\' diff --git a/parser/testdata/jsonextractarrayraw/query.sql b/parser/testdata/jsonextractarrayraw/query.sql deleted file mode 100644 index 54a5b810b..000000000 --- a/parser/testdata/jsonextractarrayraw/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT JSONExtractArrayRaw('{"a": [1,2,3]}', 'a') diff --git a/parser/testdata/jsonextractbool/explain.txt b/parser/testdata/jsonextractbool/explain.txt deleted file mode 100644 index 5084670d3..000000000 --- a/parser/testdata/jsonextractbool/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function JSONExtractBool (children 1) - ExpressionList (children 2) - Literal \'{"a": true}\' - Literal \'a\' diff --git a/parser/testdata/jsonextractbool/query.sql b/parser/testdata/jsonextractbool/query.sql deleted file mode 100644 index b94c29e04..000000000 --- a/parser/testdata/jsonextractbool/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT JSONExtractBool('{"a": true}', 'a') diff --git a/parser/testdata/jsonextractfloat/explain.txt b/parser/testdata/jsonextractfloat/explain.txt deleted file mode 100644 index 3e7d65a73..000000000 --- a/parser/testdata/jsonextractfloat/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function JSONExtractFloat (children 1) - ExpressionList (children 2) - Literal \'{"a": 1.5}\' - Literal \'a\' diff --git a/parser/testdata/jsonextractfloat/query.sql b/parser/testdata/jsonextractfloat/query.sql deleted file mode 100644 index abe246572..000000000 --- a/parser/testdata/jsonextractfloat/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT JSONExtractFloat('{"a": 1.5}', 'a') diff --git a/parser/testdata/jsonextractint/explain.txt b/parser/testdata/jsonextractint/explain.txt deleted file mode 100644 index fbaf37402..000000000 --- a/parser/testdata/jsonextractint/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function JSONExtractInt (children 1) - ExpressionList (children 2) - Literal \'{"a": 1}\' - Literal \'a\' diff --git a/parser/testdata/jsonextractint/query.sql b/parser/testdata/jsonextractint/query.sql deleted file mode 100644 index 0b7bdf3ed..000000000 --- a/parser/testdata/jsonextractint/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT JSONExtractInt('{"a": 1}', 'a') diff --git a/parser/testdata/jsonextractkeysandvalues/explain.txt b/parser/testdata/jsonextractkeysandvalues/explain.txt deleted file mode 100644 index 06c76fb92..000000000 --- a/parser/testdata/jsonextractkeysandvalues/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function JSONExtractKeysAndValues (children 1) - ExpressionList (children 2) - Literal \'{"a": 1, "b": 2}\' - Literal \'Int32\' diff --git a/parser/testdata/jsonextractkeysandvalues/query.sql b/parser/testdata/jsonextractkeysandvalues/query.sql deleted file mode 100644 index e0b1b4f86..000000000 --- a/parser/testdata/jsonextractkeysandvalues/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT JSONExtractKeysAndValues('{"a": 1, "b": 2}', 'Int32') diff --git a/parser/testdata/jsonextractraw/explain.txt b/parser/testdata/jsonextractraw/explain.txt deleted file mode 100644 index 08e2a2227..000000000 --- a/parser/testdata/jsonextractraw/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function JSONExtractRaw (children 1) - ExpressionList (children 2) - Literal \'{"a": [1,2,3]}\' - Literal \'a\' diff --git a/parser/testdata/jsonextractraw/query.sql b/parser/testdata/jsonextractraw/query.sql deleted file mode 100644 index fdd0f1f7f..000000000 --- a/parser/testdata/jsonextractraw/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT JSONExtractRaw('{"a": [1,2,3]}', 'a') diff --git a/parser/testdata/jsonextractstring/explain.txt b/parser/testdata/jsonextractstring/explain.txt deleted file mode 100644 index e42a0b420..000000000 --- a/parser/testdata/jsonextractstring/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function JSONExtractString (children 1) - ExpressionList (children 2) - Literal \'{"a": "b"}\' - Literal \'a\' diff --git a/parser/testdata/jsonextractstring/query.sql b/parser/testdata/jsonextractstring/query.sql deleted file mode 100644 index 6a994d582..000000000 --- a/parser/testdata/jsonextractstring/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT JSONExtractString('{"a": "b"}', 'a') diff --git a/parser/testdata/lag_function/explain.txt b/parser/testdata/lag_function/explain.txt deleted file mode 100644 index e7245f45b..000000000 --- a/parser/testdata/lag_function/explain.txt +++ /dev/null @@ -1,18 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 2) - Identifier number - Function lag (children 2) - ExpressionList (children 1) - Identifier number - WindowDefinition (children 1) - ExpressionList (children 1) - OrderByElement (children 1) - Identifier number - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (children 1) - ExpressionList (children 1) - Literal UInt64_10 diff --git a/parser/testdata/lag_function/query.sql b/parser/testdata/lag_function/query.sql deleted file mode 100644 index 771fe1350..000000000 --- a/parser/testdata/lag_function/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT number, lag(number) OVER (ORDER BY number) FROM numbers(10) diff --git a/parser/testdata/last_value_function/explain.txt b/parser/testdata/last_value_function/explain.txt deleted file mode 100644 index 2372a025c..000000000 --- a/parser/testdata/last_value_function/explain.txt +++ /dev/null @@ -1,18 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 2) - Identifier number - Function last_value (children 2) - ExpressionList (children 1) - Identifier number - WindowDefinition (children 1) - ExpressionList (children 1) - OrderByElement (children 1) - Identifier number - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (children 1) - ExpressionList (children 1) - Literal UInt64_10 diff --git a/parser/testdata/last_value_function/query.sql b/parser/testdata/last_value_function/query.sql deleted file mode 100644 index 543d048cb..000000000 --- a/parser/testdata/last_value_function/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT number, last_value(number) OVER (ORDER BY number) FROM numbers(10) diff --git a/parser/testdata/lead_function/explain.txt b/parser/testdata/lead_function/explain.txt deleted file mode 100644 index ffca4674d..000000000 --- a/parser/testdata/lead_function/explain.txt +++ /dev/null @@ -1,18 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 2) - Identifier number - Function lead (children 2) - ExpressionList (children 1) - Identifier number - WindowDefinition (children 1) - ExpressionList (children 1) - OrderByElement (children 1) - Identifier number - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (children 1) - ExpressionList (children 1) - Literal UInt64_10 diff --git a/parser/testdata/lead_function/query.sql b/parser/testdata/lead_function/query.sql deleted file mode 100644 index d9e9edeed..000000000 --- a/parser/testdata/lead_function/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT number, lead(number) OVER (ORDER BY number) FROM numbers(10) diff --git a/parser/testdata/left_array_join/explain.txt b/parser/testdata/left_array_join/explain.txt deleted file mode 100644 index 8607ddb1b..000000000 --- a/parser/testdata/left_array_join/explain.txt +++ /dev/null @@ -1,15 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 3) - Identifier s - Identifier arr - Identifier a - TablesInSelectQuery (children 2) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier arrays_test - TablesInSelectQueryElement (children 1) - ArrayJoin (children 1) - ExpressionList (children 1) - Identifier arr (alias a) diff --git a/parser/testdata/left_array_join/query.sql b/parser/testdata/left_array_join/query.sql deleted file mode 100644 index ff89a5a32..000000000 --- a/parser/testdata/left_array_join/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT s, arr, a FROM arrays_test LEFT ARRAY JOIN arr AS a diff --git a/parser/testdata/left_join/explain.txt b/parser/testdata/left_join/explain.txt deleted file mode 100644 index 97343415a..000000000 --- a/parser/testdata/left_join/explain.txt +++ /dev/null @@ -1,17 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Asterisk - TablesInSelectQuery (children 2) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier users - TablesInSelectQueryElement (children 2) - TableExpression (children 1) - TableIdentifier orders - TableJoin (children 1) - Function equals (children 1) - ExpressionList (children 2) - Identifier users.id - Identifier orders.user_id diff --git a/parser/testdata/left_join/query.sql b/parser/testdata/left_join/query.sql deleted file mode 100644 index 5e90cbbc2..000000000 --- a/parser/testdata/left_join/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT * FROM users LEFT JOIN orders ON users.id = orders.user_id diff --git a/parser/testdata/left_join_on/explain.txt b/parser/testdata/left_join_on/explain.txt deleted file mode 100644 index ae57e89c2..000000000 --- a/parser/testdata/left_join_on/explain.txt +++ /dev/null @@ -1,21 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Asterisk - TablesInSelectQuery (children 2) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (alias a) (children 1) - ExpressionList (children 1) - Literal UInt64_5 - TablesInSelectQueryElement (children 2) - TableExpression (children 1) - Function numbers (alias b) (children 1) - ExpressionList (children 1) - Literal UInt64_5 - TableJoin (children 1) - Function equals (children 1) - ExpressionList (children 2) - Identifier a.number - Identifier b.number diff --git a/parser/testdata/left_join_on/query.sql b/parser/testdata/left_join_on/query.sql deleted file mode 100644 index 4f45095b6..000000000 --- a/parser/testdata/left_join_on/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT * FROM numbers(5) AS a LEFT JOIN numbers(5) AS b ON a.number = b.number diff --git a/parser/testdata/left_outer_join/explain.txt b/parser/testdata/left_outer_join/explain.txt deleted file mode 100644 index ae57e89c2..000000000 --- a/parser/testdata/left_outer_join/explain.txt +++ /dev/null @@ -1,21 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Asterisk - TablesInSelectQuery (children 2) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (alias a) (children 1) - ExpressionList (children 1) - Literal UInt64_5 - TablesInSelectQueryElement (children 2) - TableExpression (children 1) - Function numbers (alias b) (children 1) - ExpressionList (children 1) - Literal UInt64_5 - TableJoin (children 1) - Function equals (children 1) - ExpressionList (children 2) - Identifier a.number - Identifier b.number diff --git a/parser/testdata/left_outer_join/query.sql b/parser/testdata/left_outer_join/query.sql deleted file mode 100644 index 6f4019026..000000000 --- a/parser/testdata/left_outer_join/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT * FROM numbers(5) AS a LEFT OUTER JOIN numbers(5) AS b ON a.number = b.number diff --git a/parser/testdata/leftpad/explain.txt b/parser/testdata/leftpad/explain.txt deleted file mode 100644 index aeb33b7bc..000000000 --- a/parser/testdata/leftpad/explain.txt +++ /dev/null @@ -1,9 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function leftPad (children 1) - ExpressionList (children 3) - Literal \'123\' - Literal UInt64_5 - Literal \'0\' diff --git a/parser/testdata/leftpad/query.sql b/parser/testdata/leftpad/query.sql deleted file mode 100644 index ba3f2db09..000000000 --- a/parser/testdata/leftpad/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT leftPad('123', 5, '0') diff --git a/parser/testdata/length_on_array/explain.txt b/parser/testdata/length_on_array/explain.txt deleted file mode 100644 index 5f442ad48..000000000 --- a/parser/testdata/length_on_array/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function length (children 1) - ExpressionList (children 1) - Literal Array_[UInt64_1, UInt64_2, UInt64_3] diff --git a/parser/testdata/length_on_array/query.sql b/parser/testdata/length_on_array/query.sql deleted file mode 100644 index 174810c22..000000000 --- a/parser/testdata/length_on_array/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT length([1, 2, 3]) diff --git a/parser/testdata/length_string/explain.txt b/parser/testdata/length_string/explain.txt deleted file mode 100644 index 8918b9986..000000000 --- a/parser/testdata/length_string/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function length (children 1) - ExpressionList (children 1) - Literal \'hello\' diff --git a/parser/testdata/length_string/query.sql b/parser/testdata/length_string/query.sql deleted file mode 100644 index b19f3a3ad..000000000 --- a/parser/testdata/length_string/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT length('hello') diff --git a/parser/testdata/like/explain.txt b/parser/testdata/like/explain.txt deleted file mode 100644 index 64a8cc29e..000000000 --- a/parser/testdata/like/explain.txt +++ /dev/null @@ -1,13 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 3) - ExpressionList (children 1) - Asterisk - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier users - Function like (children 1) - ExpressionList (children 2) - Identifier name - Literal \'%test%\' diff --git a/parser/testdata/like/query.sql b/parser/testdata/like/query.sql deleted file mode 100644 index 9bd5d5bfd..000000000 --- a/parser/testdata/like/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT * FROM users WHERE name LIKE '%test%' diff --git a/parser/testdata/like_regex/explain.txt b/parser/testdata/like_regex/explain.txt deleted file mode 100644 index 54b42328e..000000000 --- a/parser/testdata/like_regex/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function like (children 1) - ExpressionList (children 2) - Literal \'hello\' - Literal \'%ell%\' diff --git a/parser/testdata/like_regex/query.sql b/parser/testdata/like_regex/query.sql deleted file mode 100644 index 873415bb6..000000000 --- a/parser/testdata/like_regex/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT 'hello' LIKE '%ell%' diff --git a/parser/testdata/limit/explain.txt b/parser/testdata/limit/explain.txt deleted file mode 100644 index cecede825..000000000 --- a/parser/testdata/limit/explain.txt +++ /dev/null @@ -1,6 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Literal UInt64_1 - Literal UInt64_10 diff --git a/parser/testdata/limit/query.sql b/parser/testdata/limit/query.sql deleted file mode 100644 index d99c5d610..000000000 --- a/parser/testdata/limit/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT 1 LIMIT 10 diff --git a/parser/testdata/limit_offset/explain.txt b/parser/testdata/limit_offset/explain.txt deleted file mode 100644 index 675aa6b31..000000000 --- a/parser/testdata/limit_offset/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 3) - ExpressionList (children 1) - Literal UInt64_1 - Literal UInt64_5 - Literal UInt64_10 diff --git a/parser/testdata/limit_offset/query.sql b/parser/testdata/limit_offset/query.sql deleted file mode 100644 index bdabc414b..000000000 --- a/parser/testdata/limit_offset/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT 1 LIMIT 10 OFFSET 5 diff --git a/parser/testdata/logical_and/explain.txt b/parser/testdata/logical_and/explain.txt deleted file mode 100644 index 7b5212a41..000000000 --- a/parser/testdata/logical_and/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function and (children 1) - ExpressionList (children 2) - Literal UInt64_1 - Literal UInt64_2 diff --git a/parser/testdata/logical_and/query.sql b/parser/testdata/logical_and/query.sql deleted file mode 100644 index 73d87f929..000000000 --- a/parser/testdata/logical_and/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT 1 AND 2 diff --git a/parser/testdata/logical_not/explain.txt b/parser/testdata/logical_not/explain.txt deleted file mode 100644 index c6ac2a7a0..000000000 --- a/parser/testdata/logical_not/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function not (children 1) - ExpressionList (children 1) - Literal UInt64_1 diff --git a/parser/testdata/logical_not/query.sql b/parser/testdata/logical_not/query.sql deleted file mode 100644 index c49c54864..000000000 --- a/parser/testdata/logical_not/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT NOT 1 diff --git a/parser/testdata/logical_not_true/explain.txt b/parser/testdata/logical_not_true/explain.txt deleted file mode 100644 index ca7ee27d3..000000000 --- a/parser/testdata/logical_not_true/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function not (children 1) - ExpressionList (children 1) - Literal Bool_1 diff --git a/parser/testdata/logical_not_true/query.sql b/parser/testdata/logical_not_true/query.sql deleted file mode 100644 index c31697205..000000000 --- a/parser/testdata/logical_not_true/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT NOT true diff --git a/parser/testdata/logical_or/explain.txt b/parser/testdata/logical_or/explain.txt deleted file mode 100644 index ac456115b..000000000 --- a/parser/testdata/logical_or/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function or (children 1) - ExpressionList (children 2) - Literal UInt64_1 - Literal UInt64_2 diff --git a/parser/testdata/logical_or/query.sql b/parser/testdata/logical_or/query.sql deleted file mode 100644 index 71bd4e2ff..000000000 --- a/parser/testdata/logical_or/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT 1 OR 2 diff --git a/parser/testdata/lower/explain.txt b/parser/testdata/lower/explain.txt deleted file mode 100644 index 8ef2dbc4b..000000000 --- a/parser/testdata/lower/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function lower (children 1) - ExpressionList (children 1) - Literal \'HELLO\' diff --git a/parser/testdata/lower/query.sql b/parser/testdata/lower/query.sql deleted file mode 100644 index 8eefeff85..000000000 --- a/parser/testdata/lower/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT lower('HELLO') diff --git a/parser/testdata/ltrim/explain.txt b/parser/testdata/ltrim/explain.txt deleted file mode 100644 index dcf2b619f..000000000 --- a/parser/testdata/ltrim/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function trimLeft (children 1) - ExpressionList (children 1) - Literal \' hello\' diff --git a/parser/testdata/ltrim/query.sql b/parser/testdata/ltrim/query.sql deleted file mode 100644 index 34c35d0af..000000000 --- a/parser/testdata/ltrim/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT ltrim(' hello') diff --git a/parser/testdata/map_element_access/explain.txt b/parser/testdata/map_element_access/explain.txt deleted file mode 100644 index c46f5f0f0..000000000 --- a/parser/testdata/map_element_access/explain.txt +++ /dev/null @@ -1,11 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function arrayElement (children 1) - ExpressionList (children 2) - Function map (children 1) - ExpressionList (children 2) - Literal \'key\' - Literal \'value\' - Literal \'key\' diff --git a/parser/testdata/map_element_access/query.sql b/parser/testdata/map_element_access/query.sql deleted file mode 100644 index ef7900d39..000000000 --- a/parser/testdata/map_element_access/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT map('key', 'value')['key'] diff --git a/parser/testdata/map_function/explain.txt b/parser/testdata/map_function/explain.txt deleted file mode 100644 index 5630699a4..000000000 --- a/parser/testdata/map_function/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function map (children 1) - ExpressionList (children 2) - Literal UInt64_1 - Literal UInt64_2 diff --git a/parser/testdata/map_function/query.sql b/parser/testdata/map_function/query.sql deleted file mode 100644 index fb7ba5db3..000000000 --- a/parser/testdata/map_function/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT map(1, 2) diff --git a/parser/testdata/mapcontains/explain.txt b/parser/testdata/mapcontains/explain.txt deleted file mode 100644 index 21ba72cd1..000000000 --- a/parser/testdata/mapcontains/explain.txt +++ /dev/null @@ -1,11 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function mapContains (children 1) - ExpressionList (children 2) - Function map (children 1) - ExpressionList (children 2) - Literal \'a\' - Literal UInt64_1 - Literal \'a\' diff --git a/parser/testdata/mapcontains/query.sql b/parser/testdata/mapcontains/query.sql deleted file mode 100644 index 42da1266e..000000000 --- a/parser/testdata/mapcontains/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT mapContains(map('a', 1), 'a') diff --git a/parser/testdata/mapkeys/explain.txt b/parser/testdata/mapkeys/explain.txt deleted file mode 100644 index 43ef5dba2..000000000 --- a/parser/testdata/mapkeys/explain.txt +++ /dev/null @@ -1,12 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function mapKeys (children 1) - ExpressionList (children 1) - Function map (children 1) - ExpressionList (children 4) - Literal \'a\' - Literal UInt64_1 - Literal \'b\' - Literal UInt64_2 diff --git a/parser/testdata/mapkeys/query.sql b/parser/testdata/mapkeys/query.sql deleted file mode 100644 index 19e9adf99..000000000 --- a/parser/testdata/mapkeys/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT mapKeys(map('a', 1, 'b', 2)) diff --git a/parser/testdata/mapvalues/explain.txt b/parser/testdata/mapvalues/explain.txt deleted file mode 100644 index 773f857af..000000000 --- a/parser/testdata/mapvalues/explain.txt +++ /dev/null @@ -1,12 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function mapValues (children 1) - ExpressionList (children 1) - Function map (children 1) - ExpressionList (children 4) - Literal \'a\' - Literal UInt64_1 - Literal \'b\' - Literal UInt64_2 diff --git a/parser/testdata/mapvalues/query.sql b/parser/testdata/mapvalues/query.sql deleted file mode 100644 index d4a239aee..000000000 --- a/parser/testdata/mapvalues/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT mapValues(map('a', 1, 'b', 2)) diff --git a/parser/testdata/match/explain.txt b/parser/testdata/match/explain.txt deleted file mode 100644 index 870d15ba0..000000000 --- a/parser/testdata/match/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function match (children 1) - ExpressionList (children 2) - Literal \'hello\' - Literal \'h.*o\' diff --git a/parser/testdata/match/query.sql b/parser/testdata/match/query.sql deleted file mode 100644 index d64ea3b67..000000000 --- a/parser/testdata/match/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT match('hello', 'h.*o') diff --git a/parser/testdata/materialize/explain.txt b/parser/testdata/materialize/explain.txt deleted file mode 100644 index 1abae3e95..000000000 --- a/parser/testdata/materialize/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function materialize (children 1) - ExpressionList (children 1) - Literal UInt64_1 diff --git a/parser/testdata/materialize/query.sql b/parser/testdata/materialize/query.sql deleted file mode 100644 index 9881bc275..000000000 --- a/parser/testdata/materialize/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT materialize(1) diff --git a/parser/testdata/materialize_index/explain.txt b/parser/testdata/materialize_index/explain.txt deleted file mode 100644 index 72f7be4d1..000000000 --- a/parser/testdata/materialize_index/explain.txt +++ /dev/null @@ -1,5 +0,0 @@ -AlterQuery test_table (children 2) - ExpressionList (children 1) - AlterCommand MATERIALIZE_INDEX (children 1) - Identifier idx - Identifier test_table diff --git a/parser/testdata/materialize_index/query.sql b/parser/testdata/materialize_index/query.sql deleted file mode 100644 index 0381520e0..000000000 --- a/parser/testdata/materialize_index/query.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE test_table MATERIALIZE INDEX idx diff --git a/parser/testdata/md5/explain.txt b/parser/testdata/md5/explain.txt deleted file mode 100644 index 2aff8a8ba..000000000 --- a/parser/testdata/md5/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function MD5 (children 1) - ExpressionList (children 1) - Literal \'hello\' diff --git a/parser/testdata/md5/query.sql b/parser/testdata/md5/query.sql deleted file mode 100644 index b09c380ea..000000000 --- a/parser/testdata/md5/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT MD5('hello') diff --git a/parser/testdata/min_max/explain.txt b/parser/testdata/min_max/explain.txt deleted file mode 100644 index 1ef702630..000000000 --- a/parser/testdata/min_max/explain.txt +++ /dev/null @@ -1,14 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 2) - Function min (children 1) - ExpressionList (children 1) - Identifier id - Function max (children 1) - ExpressionList (children 1) - Identifier id - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier users diff --git a/parser/testdata/min_max/query.sql b/parser/testdata/min_max/query.sql deleted file mode 100644 index d2dabe93f..000000000 --- a/parser/testdata/min_max/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT min(id), max(id) FROM users diff --git a/parser/testdata/min_max_functions/explain.txt b/parser/testdata/min_max_functions/explain.txt deleted file mode 100644 index 83f6a63f6..000000000 --- a/parser/testdata/min_max_functions/explain.txt +++ /dev/null @@ -1,10 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 2) - Function min (children 1) - ExpressionList (children 1) - Literal UInt64_1 - Function max (children 1) - ExpressionList (children 1) - Literal UInt64_1 diff --git a/parser/testdata/min_max_functions/query.sql b/parser/testdata/min_max_functions/query.sql deleted file mode 100644 index d15fe5e06..000000000 --- a/parser/testdata/min_max_functions/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT min(1), max(1) diff --git a/parser/testdata/modify_column/explain.txt b/parser/testdata/modify_column/explain.txt deleted file mode 100644 index 437f1afc3..000000000 --- a/parser/testdata/modify_column/explain.txt +++ /dev/null @@ -1,6 +0,0 @@ -AlterQuery test_table (children 2) - ExpressionList (children 1) - AlterCommand MODIFY_COLUMN (children 1) - ColumnDeclaration col (children 1) - DataType UInt64 - Identifier test_table diff --git a/parser/testdata/modify_column/query.sql b/parser/testdata/modify_column/query.sql deleted file mode 100644 index ca4efb1ec..000000000 --- a/parser/testdata/modify_column/query.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE test_table MODIFY COLUMN col UInt64 diff --git a/parser/testdata/modify_ttl/explain.txt b/parser/testdata/modify_ttl/explain.txt deleted file mode 100644 index 1f5bf5c09..000000000 --- a/parser/testdata/modify_ttl/explain.txt +++ /dev/null @@ -1,12 +0,0 @@ -AlterQuery test_table (children 2) - ExpressionList (children 1) - AlterCommand MODIFY_TTL (children 1) - ExpressionList (children 1) - TTLElement (children 1) - Function plus (children 1) - ExpressionList (children 2) - Identifier dt - Function toIntervalMonth (children 1) - ExpressionList (children 1) - Literal UInt64_1 - Identifier test_table diff --git a/parser/testdata/modify_ttl/query.sql b/parser/testdata/modify_ttl/query.sql deleted file mode 100644 index eca80ca56..000000000 --- a/parser/testdata/modify_ttl/query.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE test_table MODIFY TTL dt + INTERVAL 1 MONTH diff --git a/parser/testdata/multiif_function/explain.txt b/parser/testdata/multiif_function/explain.txt deleted file mode 100644 index d65c7d785..000000000 --- a/parser/testdata/multiif_function/explain.txt +++ /dev/null @@ -1,17 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function multiIf (children 1) - ExpressionList (children 5) - Function greater (children 1) - ExpressionList (children 2) - Literal UInt64_1 - Literal UInt64_0 - Literal UInt64_1 - Function greater (children 1) - ExpressionList (children 2) - Literal UInt64_2 - Literal UInt64_0 - Literal UInt64_2 - Literal UInt64_0 diff --git a/parser/testdata/multiif_function/query.sql b/parser/testdata/multiif_function/query.sql deleted file mode 100644 index 1f749f45d..000000000 --- a/parser/testdata/multiif_function/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT multiIf(1 > 0, 1, 2 > 0, 2, 0) diff --git a/parser/testdata/multiple_aliases/explain.txt b/parser/testdata/multiple_aliases/explain.txt deleted file mode 100644 index 2cda4d6ff..000000000 --- a/parser/testdata/multiple_aliases/explain.txt +++ /dev/null @@ -1,6 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 2) - Literal UInt64_1 (alias x) - Literal UInt64_2 (alias y) diff --git a/parser/testdata/multiple_aliases/query.sql b/parser/testdata/multiple_aliases/query.sql deleted file mode 100644 index 99c26b295..000000000 --- a/parser/testdata/multiple_aliases/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT 1 AS x, 2 AS y diff --git a/parser/testdata/multiple_tables/explain.txt b/parser/testdata/multiple_tables/explain.txt deleted file mode 100644 index 866876992..000000000 --- a/parser/testdata/multiple_tables/explain.txt +++ /dev/null @@ -1,13 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Asterisk - TablesInSelectQuery (children 2) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier users - TablesInSelectQueryElement (children 2) - TableExpression (children 1) - TableIdentifier orders - TableJoin diff --git a/parser/testdata/multiple_tables/query.sql b/parser/testdata/multiple_tables/query.sql deleted file mode 100644 index 9659203d9..000000000 --- a/parser/testdata/multiple_tables/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT * FROM users, orders diff --git a/parser/testdata/multiplication_precedence/explain.txt b/parser/testdata/multiplication_precedence/explain.txt deleted file mode 100644 index ec26ab304..000000000 --- a/parser/testdata/multiplication_precedence/explain.txt +++ /dev/null @@ -1,11 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function plus (children 1) - ExpressionList (children 2) - Literal UInt64_1 - Function multiply (children 1) - ExpressionList (children 2) - Literal UInt64_2 - Literal UInt64_3 diff --git a/parser/testdata/multiplication_precedence/query.sql b/parser/testdata/multiplication_precedence/query.sql deleted file mode 100644 index 33cbb5157..000000000 --- a/parser/testdata/multiplication_precedence/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT 1 + 2 * 3 diff --git a/parser/testdata/murmurhash2_32/explain.txt b/parser/testdata/murmurhash2_32/explain.txt deleted file mode 100644 index 5a1d053ef..000000000 --- a/parser/testdata/murmurhash2_32/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function murmurHash2_32 (children 1) - ExpressionList (children 1) - Literal \'hello\' diff --git a/parser/testdata/murmurhash2_32/query.sql b/parser/testdata/murmurhash2_32/query.sql deleted file mode 100644 index c1ff1f1c2..000000000 --- a/parser/testdata/murmurhash2_32/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT murmurHash2_32('hello') diff --git a/parser/testdata/murmurhash2_64/explain.txt b/parser/testdata/murmurhash2_64/explain.txt deleted file mode 100644 index cf8243d39..000000000 --- a/parser/testdata/murmurhash2_64/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function murmurHash2_64 (children 1) - ExpressionList (children 1) - Literal \'hello\' diff --git a/parser/testdata/murmurhash2_64/query.sql b/parser/testdata/murmurhash2_64/query.sql deleted file mode 100644 index e867f4f9d..000000000 --- a/parser/testdata/murmurhash2_64/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT murmurHash2_64('hello') diff --git a/parser/testdata/murmurhash3_128/explain.txt b/parser/testdata/murmurhash3_128/explain.txt deleted file mode 100644 index e92e9a84f..000000000 --- a/parser/testdata/murmurhash3_128/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function murmurHash3_128 (children 1) - ExpressionList (children 1) - Literal \'hello\' diff --git a/parser/testdata/murmurhash3_128/query.sql b/parser/testdata/murmurhash3_128/query.sql deleted file mode 100644 index 1986701e0..000000000 --- a/parser/testdata/murmurhash3_128/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT murmurHash3_128('hello') diff --git a/parser/testdata/murmurhash3_32/explain.txt b/parser/testdata/murmurhash3_32/explain.txt deleted file mode 100644 index 4cd28ad28..000000000 --- a/parser/testdata/murmurhash3_32/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function murmurHash3_32 (children 1) - ExpressionList (children 1) - Literal \'hello\' diff --git a/parser/testdata/murmurhash3_32/query.sql b/parser/testdata/murmurhash3_32/query.sql deleted file mode 100644 index e9c47431a..000000000 --- a/parser/testdata/murmurhash3_32/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT murmurHash3_32('hello') diff --git a/parser/testdata/murmurhash3_64/explain.txt b/parser/testdata/murmurhash3_64/explain.txt deleted file mode 100644 index da1dd92cd..000000000 --- a/parser/testdata/murmurhash3_64/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function murmurHash3_64 (children 1) - ExpressionList (children 1) - Literal \'hello\' diff --git a/parser/testdata/murmurhash3_64/query.sql b/parser/testdata/murmurhash3_64/query.sql deleted file mode 100644 index a3207dd63..000000000 --- a/parser/testdata/murmurhash3_64/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT murmurHash3_64('hello') diff --git a/parser/testdata/named_tuple_access/explain.txt b/parser/testdata/named_tuple_access/explain.txt deleted file mode 100644 index 678d308f2..000000000 --- a/parser/testdata/named_tuple_access/explain.txt +++ /dev/null @@ -1,9 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 2) - Literal Tuple_(UInt64_1, UInt64_2, UInt64_3) (alias t) - Function tupleElement (children 1) - ExpressionList (children 2) - Identifier t - Literal UInt64_1 diff --git a/parser/testdata/named_tuple_access/query.sql b/parser/testdata/named_tuple_access/query.sql deleted file mode 100644 index 912269cb1..000000000 --- a/parser/testdata/named_tuple_access/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT (1, 2, 3) AS t, t.1 diff --git a/parser/testdata/named_window/explain.txt b/parser/testdata/named_window/explain.txt deleted file mode 100644 index f8f838146..000000000 --- a/parser/testdata/named_window/explain.txt +++ /dev/null @@ -1,16 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 3) - ExpressionList (children 2) - Identifier number - Function sum (children 1) - ExpressionList (children 1) - Identifier number - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (children 1) - ExpressionList (children 1) - Literal UInt64_10 - ExpressionList (children 1) - WindowListElement diff --git a/parser/testdata/named_window/query.sql b/parser/testdata/named_window/query.sql deleted file mode 100644 index ad5fb189a..000000000 --- a/parser/testdata/named_window/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT number, sum(number) OVER w FROM numbers(10) WINDOW w AS (ORDER BY number) diff --git a/parser/testdata/neighbor/explain.txt b/parser/testdata/neighbor/explain.txt deleted file mode 100644 index fd4cd357a..000000000 --- a/parser/testdata/neighbor/explain.txt +++ /dev/null @@ -1,14 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Function neighbor (children 1) - ExpressionList (children 2) - Identifier number - Literal UInt64_1 - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (children 1) - ExpressionList (children 1) - Literal UInt64_10 diff --git a/parser/testdata/neighbor/query.sql b/parser/testdata/neighbor/query.sql deleted file mode 100644 index f9cd2fbb5..000000000 --- a/parser/testdata/neighbor/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT neighbor(number, 1) FROM numbers(10) diff --git a/parser/testdata/nested_function_call/explain.txt b/parser/testdata/nested_function_call/explain.txt deleted file mode 100644 index 626f9712b..000000000 --- a/parser/testdata/nested_function_call/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function toDate (children 1) - ExpressionList (children 1) - Function now (children 1) - ExpressionList diff --git a/parser/testdata/nested_function_call/query.sql b/parser/testdata/nested_function_call/query.sql deleted file mode 100644 index a075ab98a..000000000 --- a/parser/testdata/nested_function_call/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT toDate(now()) diff --git a/parser/testdata/nested_functions/explain.txt b/parser/testdata/nested_functions/explain.txt deleted file mode 100644 index 1a16764c5..000000000 --- a/parser/testdata/nested_functions/explain.txt +++ /dev/null @@ -1,12 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Function toDate (children 1) - ExpressionList (children 1) - Function now (children 1) - ExpressionList - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier users diff --git a/parser/testdata/nested_functions/query.sql b/parser/testdata/nested_functions/query.sql deleted file mode 100644 index 01ecdb076..000000000 --- a/parser/testdata/nested_functions/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT toDate(now()) FROM users diff --git a/parser/testdata/not_exists_subquery/explain.txt b/parser/testdata/not_exists_subquery/explain.txt deleted file mode 100644 index c73ac87d0..000000000 --- a/parser/testdata/not_exists_subquery/explain.txt +++ /dev/null @@ -1,22 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 3) - ExpressionList (children 1) - Identifier number - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (children 1) - ExpressionList (children 1) - Literal UInt64_10 - Function not (children 1) - ExpressionList (children 1) - Function exists (children 1) - ExpressionList (children 1) - Subquery (children 1) - SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Literal UInt64_1 - Literal UInt64_0 diff --git a/parser/testdata/not_exists_subquery/query.sql b/parser/testdata/not_exists_subquery/query.sql deleted file mode 100644 index a87d9a92b..000000000 --- a/parser/testdata/not_exists_subquery/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT number FROM numbers(10) WHERE NOT EXISTS (SELECT 1 WHERE 0) diff --git a/parser/testdata/not_ilike/explain.txt b/parser/testdata/not_ilike/explain.txt deleted file mode 100644 index 824c13a5d..000000000 --- a/parser/testdata/not_ilike/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function notILike (children 1) - ExpressionList (children 2) - Literal \'HELLO\' - Literal \'%xyz%\' diff --git a/parser/testdata/not_ilike/query.sql b/parser/testdata/not_ilike/query.sql deleted file mode 100644 index cb9815ddc..000000000 --- a/parser/testdata/not_ilike/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT 'HELLO' NOT ILIKE '%xyz%' diff --git a/parser/testdata/not_in/explain.txt b/parser/testdata/not_in/explain.txt deleted file mode 100644 index f834df5b6..000000000 --- a/parser/testdata/not_in/explain.txt +++ /dev/null @@ -1,13 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 3) - ExpressionList (children 1) - Asterisk - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier users - Function notIn (children 1) - ExpressionList (children 2) - Identifier id - Literal Tuple_(UInt64_1, UInt64_2, UInt64_3) diff --git a/parser/testdata/not_in/query.sql b/parser/testdata/not_in/query.sql deleted file mode 100644 index ab903c1e8..000000000 --- a/parser/testdata/not_in/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT * FROM users WHERE id NOT IN (1, 2, 3) diff --git a/parser/testdata/not_like/explain.txt b/parser/testdata/not_like/explain.txt deleted file mode 100644 index cca874984..000000000 --- a/parser/testdata/not_like/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function notLike (children 1) - ExpressionList (children 2) - Literal \'hello\' - Literal \'%xyz%\' diff --git a/parser/testdata/not_like/query.sql b/parser/testdata/not_like/query.sql deleted file mode 100644 index 84920b6e5..000000000 --- a/parser/testdata/not_like/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT 'hello' NOT LIKE '%xyz%' diff --git a/parser/testdata/notempty_on_array/explain.txt b/parser/testdata/notempty_on_array/explain.txt deleted file mode 100644 index 3ed04150e..000000000 --- a/parser/testdata/notempty_on_array/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function notEmpty (children 1) - ExpressionList (children 1) - Literal Array_[UInt64_1, UInt64_2, UInt64_3] diff --git a/parser/testdata/notempty_on_array/query.sql b/parser/testdata/notempty_on_array/query.sql deleted file mode 100644 index 19f36b10a..000000000 --- a/parser/testdata/notempty_on_array/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT notEmpty([1, 2, 3]) diff --git a/parser/testdata/notempty_string/explain.txt b/parser/testdata/notempty_string/explain.txt deleted file mode 100644 index 77e22c34f..000000000 --- a/parser/testdata/notempty_string/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function notEmpty (children 1) - ExpressionList (children 1) - Literal \'\' diff --git a/parser/testdata/notempty_string/query.sql b/parser/testdata/notempty_string/query.sql deleted file mode 100644 index 56bf5099f..000000000 --- a/parser/testdata/notempty_string/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT notEmpty('') diff --git a/parser/testdata/now_function/explain.txt b/parser/testdata/now_function/explain.txt deleted file mode 100644 index c8c2953c8..000000000 --- a/parser/testdata/now_function/explain.txt +++ /dev/null @@ -1,6 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function now (children 1) - ExpressionList diff --git a/parser/testdata/now_function/query.sql b/parser/testdata/now_function/query.sql deleted file mode 100644 index 31700225c..000000000 --- a/parser/testdata/now_function/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT now() diff --git a/parser/testdata/nth_value_function/explain.txt b/parser/testdata/nth_value_function/explain.txt deleted file mode 100644 index c7c26a330..000000000 --- a/parser/testdata/nth_value_function/explain.txt +++ /dev/null @@ -1,19 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 2) - Identifier number - Function nth_value (children 2) - ExpressionList (children 2) - Identifier number - Literal UInt64_2 - WindowDefinition (children 1) - ExpressionList (children 1) - OrderByElement (children 1) - Identifier number - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (children 1) - ExpressionList (children 1) - Literal UInt64_10 diff --git a/parser/testdata/nth_value_function/query.sql b/parser/testdata/nth_value_function/query.sql deleted file mode 100644 index 9b90c0635..000000000 --- a/parser/testdata/nth_value_function/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT number, nth_value(number, 2) OVER (ORDER BY number) FROM numbers(10) diff --git a/parser/testdata/null_literal/explain.txt b/parser/testdata/null_literal/explain.txt deleted file mode 100644 index c67ca85d2..000000000 --- a/parser/testdata/null_literal/explain.txt +++ /dev/null @@ -1,5 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Literal NULL diff --git a/parser/testdata/null_literal/query.sql b/parser/testdata/null_literal/query.sql deleted file mode 100644 index 7824d97b0..000000000 --- a/parser/testdata/null_literal/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT NULL diff --git a/parser/testdata/null_safe_equal/explain.txt b/parser/testdata/null_safe_equal/explain.txt deleted file mode 100644 index 3891134bb..000000000 --- a/parser/testdata/null_safe_equal/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function isNotDistinctFrom (children 1) - ExpressionList (children 2) - Literal NULL - Literal NULL diff --git a/parser/testdata/null_safe_equal/query.sql b/parser/testdata/null_safe_equal/query.sql deleted file mode 100644 index 3c0bd6aeb..000000000 --- a/parser/testdata/null_safe_equal/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT NULL <=> NULL diff --git a/parser/testdata/nullif/explain.txt b/parser/testdata/nullif/explain.txt deleted file mode 100644 index c79173b57..000000000 --- a/parser/testdata/nullif/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function nullIf (children 1) - ExpressionList (children 2) - Literal UInt64_1 - Literal UInt64_1 diff --git a/parser/testdata/nullif/query.sql b/parser/testdata/nullif/query.sql deleted file mode 100644 index 96475c944..000000000 --- a/parser/testdata/nullif/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT nullIf(1, 1) diff --git a/parser/testdata/numbers_function/explain.txt b/parser/testdata/numbers_function/explain.txt deleted file mode 100644 index f8090559f..000000000 --- a/parser/testdata/numbers_function/explain.txt +++ /dev/null @@ -1,11 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Identifier number - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (children 1) - ExpressionList (children 1) - Literal UInt64_10 diff --git a/parser/testdata/numbers_function/query.sql b/parser/testdata/numbers_function/query.sql deleted file mode 100644 index 04cff0c02..000000000 --- a/parser/testdata/numbers_function/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT number FROM numbers(10) diff --git a/parser/testdata/optimize_deduplicate/explain.txt b/parser/testdata/optimize_deduplicate/explain.txt deleted file mode 100644 index 07e75e1a0..000000000 --- a/parser/testdata/optimize_deduplicate/explain.txt +++ /dev/null @@ -1,2 +0,0 @@ -OptimizeQuery test_table_deduplicate (children 1) - Identifier test_table diff --git a/parser/testdata/optimize_deduplicate/query.sql b/parser/testdata/optimize_deduplicate/query.sql deleted file mode 100644 index 6cbf8cd87..000000000 --- a/parser/testdata/optimize_deduplicate/query.sql +++ /dev/null @@ -1 +0,0 @@ -OPTIMIZE TABLE test_table DEDUPLICATE diff --git a/parser/testdata/optimize_final/explain.txt b/parser/testdata/optimize_final/explain.txt deleted file mode 100644 index cb786f43a..000000000 --- a/parser/testdata/optimize_final/explain.txt +++ /dev/null @@ -1,2 +0,0 @@ -OptimizeQuery test_table_final (children 1) - Identifier test_table diff --git a/parser/testdata/optimize_final/query.sql b/parser/testdata/optimize_final/query.sql deleted file mode 100644 index 82d4e122b..000000000 --- a/parser/testdata/optimize_final/query.sql +++ /dev/null @@ -1 +0,0 @@ -OPTIMIZE TABLE test_table FINAL diff --git a/parser/testdata/optimize_partition/explain.txt b/parser/testdata/optimize_partition/explain.txt deleted file mode 100644 index cad0673ff..000000000 --- a/parser/testdata/optimize_partition/explain.txt +++ /dev/null @@ -1,4 +0,0 @@ -OptimizeQuery test_table (children 2) - Partition (children 1) - Literal UInt64_202301 - Identifier test_table diff --git a/parser/testdata/optimize_partition/query.sql b/parser/testdata/optimize_partition/query.sql deleted file mode 100644 index fc314a846..000000000 --- a/parser/testdata/optimize_partition/query.sql +++ /dev/null @@ -1 +0,0 @@ -OPTIMIZE TABLE test_table PARTITION 202301 diff --git a/parser/testdata/optimize_table/explain.txt b/parser/testdata/optimize_table/explain.txt deleted file mode 100644 index 5f8cd4b32..000000000 --- a/parser/testdata/optimize_table/explain.txt +++ /dev/null @@ -1,2 +0,0 @@ -OptimizeQuery test_table (children 1) - Identifier test_table diff --git a/parser/testdata/optimize_table/query.sql b/parser/testdata/optimize_table/query.sql deleted file mode 100644 index f70be21e0..000000000 --- a/parser/testdata/optimize_table/query.sql +++ /dev/null @@ -1 +0,0 @@ -OPTIMIZE TABLE test_table diff --git a/parser/testdata/order_by/explain.txt b/parser/testdata/order_by/explain.txt deleted file mode 100644 index c6ac63946..000000000 --- a/parser/testdata/order_by/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Literal UInt64_1 - ExpressionList (children 1) - OrderByElement (children 1) - Literal UInt64_1 diff --git a/parser/testdata/order_by/query.sql b/parser/testdata/order_by/query.sql deleted file mode 100644 index 4a98483b3..000000000 --- a/parser/testdata/order_by/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT 1 ORDER BY 1 diff --git a/parser/testdata/order_by_asc/explain.txt b/parser/testdata/order_by_asc/explain.txt deleted file mode 100644 index c6ac63946..000000000 --- a/parser/testdata/order_by_asc/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Literal UInt64_1 - ExpressionList (children 1) - OrderByElement (children 1) - Literal UInt64_1 diff --git a/parser/testdata/order_by_asc/query.sql b/parser/testdata/order_by_asc/query.sql deleted file mode 100644 index 977241a11..000000000 --- a/parser/testdata/order_by_asc/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT 1 ORDER BY 1 ASC diff --git a/parser/testdata/order_by_desc/explain.txt b/parser/testdata/order_by_desc/explain.txt deleted file mode 100644 index c6ac63946..000000000 --- a/parser/testdata/order_by_desc/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Literal UInt64_1 - ExpressionList (children 1) - OrderByElement (children 1) - Literal UInt64_1 diff --git a/parser/testdata/order_by_desc/query.sql b/parser/testdata/order_by_desc/query.sql deleted file mode 100644 index ceaca0bc1..000000000 --- a/parser/testdata/order_by_desc/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT 1 ORDER BY 1 DESC diff --git a/parser/testdata/order_by_desc_nulls_first/explain.txt b/parser/testdata/order_by_desc_nulls_first/explain.txt deleted file mode 100644 index c6ac63946..000000000 --- a/parser/testdata/order_by_desc_nulls_first/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Literal UInt64_1 - ExpressionList (children 1) - OrderByElement (children 1) - Literal UInt64_1 diff --git a/parser/testdata/order_by_desc_nulls_first/query.sql b/parser/testdata/order_by_desc_nulls_first/query.sql deleted file mode 100644 index 7d880d871..000000000 --- a/parser/testdata/order_by_desc_nulls_first/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT 1 ORDER BY 1 DESC NULLS FIRST diff --git a/parser/testdata/order_by_desc_nulls_last/explain.txt b/parser/testdata/order_by_desc_nulls_last/explain.txt deleted file mode 100644 index c6ac63946..000000000 --- a/parser/testdata/order_by_desc_nulls_last/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Literal UInt64_1 - ExpressionList (children 1) - OrderByElement (children 1) - Literal UInt64_1 diff --git a/parser/testdata/order_by_desc_nulls_last/query.sql b/parser/testdata/order_by_desc_nulls_last/query.sql deleted file mode 100644 index 64e76e26b..000000000 --- a/parser/testdata/order_by_desc_nulls_last/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT 1 ORDER BY 1 DESC NULLS LAST diff --git a/parser/testdata/order_by_with_fill/explain.txt b/parser/testdata/order_by_with_fill/explain.txt deleted file mode 100644 index 0c317efee..000000000 --- a/parser/testdata/order_by_with_fill/explain.txt +++ /dev/null @@ -1,14 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 3) - ExpressionList (children 1) - Identifier number - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (children 1) - ExpressionList (children 1) - Literal UInt64_10 - ExpressionList (children 1) - OrderByElement (children 1) - Identifier number diff --git a/parser/testdata/order_by_with_fill/query.sql b/parser/testdata/order_by_with_fill/query.sql deleted file mode 100644 index cb3ebd589..000000000 --- a/parser/testdata/order_by_with_fill/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT number FROM numbers(10) ORDER BY number WITH FILL diff --git a/parser/testdata/order_by_with_fill_from_to/explain.txt b/parser/testdata/order_by_with_fill_from_to/explain.txt deleted file mode 100644 index 5ae233292..000000000 --- a/parser/testdata/order_by_with_fill_from_to/explain.txt +++ /dev/null @@ -1,16 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 3) - ExpressionList (children 1) - Identifier number - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (children 1) - ExpressionList (children 1) - Literal UInt64_10 - ExpressionList (children 1) - OrderByElement (children 3) - Identifier number - Literal UInt64_0 - Literal UInt64_20 diff --git a/parser/testdata/order_by_with_fill_from_to/query.sql b/parser/testdata/order_by_with_fill_from_to/query.sql deleted file mode 100644 index 9855216a3..000000000 --- a/parser/testdata/order_by_with_fill_from_to/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT number FROM numbers(10) ORDER BY number WITH FILL FROM 0 TO 20 diff --git a/parser/testdata/order_by_with_fill_step/explain.txt b/parser/testdata/order_by_with_fill_step/explain.txt deleted file mode 100644 index 717288c21..000000000 --- a/parser/testdata/order_by_with_fill_step/explain.txt +++ /dev/null @@ -1,17 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 3) - ExpressionList (children 1) - Identifier number - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (children 1) - ExpressionList (children 1) - Literal UInt64_10 - ExpressionList (children 1) - OrderByElement (children 4) - Identifier number - Literal UInt64_0 - Literal UInt64_20 - Literal UInt64_2 diff --git a/parser/testdata/order_by_with_fill_step/query.sql b/parser/testdata/order_by_with_fill_step/query.sql deleted file mode 100644 index 02605bbb4..000000000 --- a/parser/testdata/order_by_with_fill_step/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT number FROM numbers(10) ORDER BY number WITH FILL FROM 0 TO 20 STEP 2 diff --git a/parser/testdata/parentheses/explain.txt b/parser/testdata/parentheses/explain.txt deleted file mode 100644 index 655ee38e0..000000000 --- a/parser/testdata/parentheses/explain.txt +++ /dev/null @@ -1,11 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function multiply (children 1) - ExpressionList (children 2) - Function plus (children 1) - ExpressionList (children 2) - Literal UInt64_1 - Literal UInt64_2 - Literal UInt64_3 diff --git a/parser/testdata/parentheses/query.sql b/parser/testdata/parentheses/query.sql deleted file mode 100644 index b8f738cf2..000000000 --- a/parser/testdata/parentheses/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT (1 + 2) * 3 diff --git a/parser/testdata/parentheses_precedence/explain.txt b/parser/testdata/parentheses_precedence/explain.txt deleted file mode 100644 index 655ee38e0..000000000 --- a/parser/testdata/parentheses_precedence/explain.txt +++ /dev/null @@ -1,11 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function multiply (children 1) - ExpressionList (children 2) - Function plus (children 1) - ExpressionList (children 2) - Literal UInt64_1 - Literal UInt64_2 - Literal UInt64_3 diff --git a/parser/testdata/parentheses_precedence/query.sql b/parser/testdata/parentheses_precedence/query.sql deleted file mode 100644 index b8f738cf2..000000000 --- a/parser/testdata/parentheses_precedence/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT (1 + 2) * 3 diff --git a/parser/testdata/path/explain.txt b/parser/testdata/path/explain.txt deleted file mode 100644 index e2167accf..000000000 --- a/parser/testdata/path/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function path (children 1) - ExpressionList (children 1) - Literal \'https://example.com/path/to/page\' diff --git a/parser/testdata/path/query.sql b/parser/testdata/path/query.sql deleted file mode 100644 index ebf0f558a..000000000 --- a/parser/testdata/path/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT path('https://example.com/path/to/page') diff --git a/parser/testdata/pathfull/explain.txt b/parser/testdata/pathfull/explain.txt deleted file mode 100644 index 07a0efaca..000000000 --- a/parser/testdata/pathfull/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function pathFull (children 1) - ExpressionList (children 1) - Literal \'https://example.com/path?query=1\' diff --git a/parser/testdata/pathfull/query.sql b/parser/testdata/pathfull/query.sql deleted file mode 100644 index c26d76ad5..000000000 --- a/parser/testdata/pathfull/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT pathFull('https://example.com/path?query=1') diff --git a/parser/testdata/position/explain.txt b/parser/testdata/position/explain.txt deleted file mode 100644 index a1701d92c..000000000 --- a/parser/testdata/position/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function position (children 1) - ExpressionList (children 2) - Literal \'hello\' - Literal \'l\' diff --git a/parser/testdata/position/query.sql b/parser/testdata/position/query.sql deleted file mode 100644 index 85fa72018..000000000 --- a/parser/testdata/position/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT position('hello', 'l') diff --git a/parser/testdata/positioncaseinsensitive/explain.txt b/parser/testdata/positioncaseinsensitive/explain.txt deleted file mode 100644 index b9e8281da..000000000 --- a/parser/testdata/positioncaseinsensitive/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function positionCaseInsensitive (children 1) - ExpressionList (children 2) - Literal \'HELLO\' - Literal \'l\' diff --git a/parser/testdata/positioncaseinsensitive/query.sql b/parser/testdata/positioncaseinsensitive/query.sql deleted file mode 100644 index 2b40cba3c..000000000 --- a/parser/testdata/positioncaseinsensitive/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT positionCaseInsensitive('HELLO', 'l') diff --git a/parser/testdata/prewhere/explain.txt b/parser/testdata/prewhere/explain.txt deleted file mode 100644 index faedf2a17..000000000 --- a/parser/testdata/prewhere/explain.txt +++ /dev/null @@ -1,13 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 3) - ExpressionList (children 1) - Asterisk - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier test_table - Function greater (children 1) - ExpressionList (children 2) - Identifier id - Literal UInt64_0 diff --git a/parser/testdata/prewhere/query.sql b/parser/testdata/prewhere/query.sql deleted file mode 100644 index d42597187..000000000 --- a/parser/testdata/prewhere/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT * FROM test_table PREWHERE id > 0 diff --git a/parser/testdata/prewhere_and_where/explain.txt b/parser/testdata/prewhere_and_where/explain.txt deleted file mode 100644 index 197669cff..000000000 --- a/parser/testdata/prewhere_and_where/explain.txt +++ /dev/null @@ -1,17 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 4) - ExpressionList (children 1) - Asterisk - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier test_table - Function greater (children 1) - ExpressionList (children 2) - Identifier id - Literal UInt64_0 - Function notEquals (children 1) - ExpressionList (children 2) - Identifier name - Literal \'\' diff --git a/parser/testdata/prewhere_and_where/query.sql b/parser/testdata/prewhere_and_where/query.sql deleted file mode 100644 index 7b8ae4a50..000000000 --- a/parser/testdata/prewhere_and_where/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT * FROM test_table PREWHERE id > 0 WHERE name != '' diff --git a/parser/testdata/protocol/explain.txt b/parser/testdata/protocol/explain.txt deleted file mode 100644 index 7f3f97614..000000000 --- a/parser/testdata/protocol/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function protocol (children 1) - ExpressionList (children 1) - Literal \'https://example.com/path\' diff --git a/parser/testdata/protocol/query.sql b/parser/testdata/protocol/query.sql deleted file mode 100644 index d07c995e6..000000000 --- a/parser/testdata/protocol/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT protocol('https://example.com/path') diff --git a/parser/testdata/quantile_parametric/explain.txt b/parser/testdata/quantile_parametric/explain.txt deleted file mode 100644 index 0747ec282..000000000 --- a/parser/testdata/quantile_parametric/explain.txt +++ /dev/null @@ -1,15 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Function quantile (children 2) - ExpressionList (children 1) - Identifier number - ExpressionList (children 1) - Literal Float64_0.9 - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (children 1) - ExpressionList (children 1) - Literal UInt64_100 diff --git a/parser/testdata/quantile_parametric/query.sql b/parser/testdata/quantile_parametric/query.sql deleted file mode 100644 index 24e0aff3c..000000000 --- a/parser/testdata/quantile_parametric/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT quantile(0.9)(number) FROM numbers(100) diff --git a/parser/testdata/quantiles_parametric/explain.txt b/parser/testdata/quantiles_parametric/explain.txt deleted file mode 100644 index 0eafc5ab7..000000000 --- a/parser/testdata/quantiles_parametric/explain.txt +++ /dev/null @@ -1,17 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Function quantiles (children 2) - ExpressionList (children 1) - Identifier number - ExpressionList (children 3) - Literal Float64_0.5 - Literal Float64_0.9 - Literal Float64_0.99 - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (children 1) - ExpressionList (children 1) - Literal UInt64_100 diff --git a/parser/testdata/quantiles_parametric/query.sql b/parser/testdata/quantiles_parametric/query.sql deleted file mode 100644 index ac4cdea6f..000000000 --- a/parser/testdata/quantiles_parametric/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT quantiles(0.5, 0.9, 0.99)(number) FROM numbers(100) diff --git a/parser/testdata/querystring/explain.txt b/parser/testdata/querystring/explain.txt deleted file mode 100644 index 386aed9f4..000000000 --- a/parser/testdata/querystring/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function queryString (children 1) - ExpressionList (children 1) - Literal \'https://example.com/path?query=1\' diff --git a/parser/testdata/querystring/query.sql b/parser/testdata/querystring/query.sql deleted file mode 100644 index f8a816d08..000000000 --- a/parser/testdata/querystring/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT queryString('https://example.com/path?query=1') diff --git a/parser/testdata/range_function/explain.txt b/parser/testdata/range_function/explain.txt deleted file mode 100644 index 33161fb9b..000000000 --- a/parser/testdata/range_function/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function range (children 1) - ExpressionList (children 1) - Literal UInt64_10 diff --git a/parser/testdata/range_function/query.sql b/parser/testdata/range_function/query.sql deleted file mode 100644 index 0648c7207..000000000 --- a/parser/testdata/range_function/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT range(10) diff --git a/parser/testdata/rank_function/explain.txt b/parser/testdata/rank_function/explain.txt deleted file mode 100644 index e6a2a2297..000000000 --- a/parser/testdata/rank_function/explain.txt +++ /dev/null @@ -1,17 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 2) - Identifier number - Function rank (children 2) - ExpressionList - WindowDefinition (children 1) - ExpressionList (children 1) - OrderByElement (children 1) - Identifier number - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (children 1) - ExpressionList (children 1) - Literal UInt64_10 diff --git a/parser/testdata/rank_function/query.sql b/parser/testdata/rank_function/query.sql deleted file mode 100644 index bd0276a60..000000000 --- a/parser/testdata/rank_function/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT number, rank() OVER (ORDER BY number) FROM numbers(10) diff --git a/parser/testdata/reinterpretasuint64/explain.txt b/parser/testdata/reinterpretasuint64/explain.txt deleted file mode 100644 index 5610980f7..000000000 --- a/parser/testdata/reinterpretasuint64/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function reinterpretAsUInt64 (children 1) - ExpressionList (children 1) - Literal \'hello\' diff --git a/parser/testdata/reinterpretasuint64/query.sql b/parser/testdata/reinterpretasuint64/query.sql deleted file mode 100644 index 7badea13c..000000000 --- a/parser/testdata/reinterpretasuint64/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT reinterpretAsUInt64('hello') diff --git a/parser/testdata/reload_config/explain.txt b/parser/testdata/reload_config/explain.txt deleted file mode 100644 index 1684cb9d8..000000000 --- a/parser/testdata/reload_config/explain.txt +++ /dev/null @@ -1 +0,0 @@ -SYSTEM query diff --git a/parser/testdata/reload_config/query.sql b/parser/testdata/reload_config/query.sql deleted file mode 100644 index f6fbf161a..000000000 --- a/parser/testdata/reload_config/query.sql +++ /dev/null @@ -1 +0,0 @@ -SYSTEM RELOAD CONFIG diff --git a/parser/testdata/reload_dictionaries/explain.txt b/parser/testdata/reload_dictionaries/explain.txt deleted file mode 100644 index 1684cb9d8..000000000 --- a/parser/testdata/reload_dictionaries/explain.txt +++ /dev/null @@ -1 +0,0 @@ -SYSTEM query diff --git a/parser/testdata/reload_dictionaries/query.sql b/parser/testdata/reload_dictionaries/query.sql deleted file mode 100644 index cb4d074be..000000000 --- a/parser/testdata/reload_dictionaries/query.sql +++ /dev/null @@ -1 +0,0 @@ -SYSTEM RELOAD DICTIONARIES diff --git a/parser/testdata/rename_column/explain.txt b/parser/testdata/rename_column/explain.txt deleted file mode 100644 index fd0f2fa30..000000000 --- a/parser/testdata/rename_column/explain.txt +++ /dev/null @@ -1,6 +0,0 @@ -AlterQuery test_table (children 2) - ExpressionList (children 1) - AlterCommand RENAME_COLUMN (children 2) - Identifier old_name - Identifier new_name - Identifier test_table diff --git a/parser/testdata/rename_column/query.sql b/parser/testdata/rename_column/query.sql deleted file mode 100644 index 01aba0e66..000000000 --- a/parser/testdata/rename_column/query.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE test_table RENAME COLUMN old_name TO new_name diff --git a/parser/testdata/rename_table/explain.txt b/parser/testdata/rename_table/explain.txt deleted file mode 100644 index 5b5554265..000000000 --- a/parser/testdata/rename_table/explain.txt +++ /dev/null @@ -1,3 +0,0 @@ -Rename (children 2) - Identifier old_table - Identifier new_table diff --git a/parser/testdata/rename_table/query.sql b/parser/testdata/rename_table/query.sql deleted file mode 100644 index f85995ed7..000000000 --- a/parser/testdata/rename_table/query.sql +++ /dev/null @@ -1 +0,0 @@ -RENAME TABLE old_table TO new_table diff --git a/parser/testdata/repeat/explain.txt b/parser/testdata/repeat/explain.txt deleted file mode 100644 index 7d538bcae..000000000 --- a/parser/testdata/repeat/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function repeat (children 1) - ExpressionList (children 2) - Literal \'abc\' - Literal UInt64_3 diff --git a/parser/testdata/repeat/query.sql b/parser/testdata/repeat/query.sql deleted file mode 100644 index a234c9787..000000000 --- a/parser/testdata/repeat/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT repeat('abc', 3) diff --git a/parser/testdata/replace_partition/explain.txt b/parser/testdata/replace_partition/explain.txt deleted file mode 100644 index 4e3124699..000000000 --- a/parser/testdata/replace_partition/explain.txt +++ /dev/null @@ -1,6 +0,0 @@ -AlterQuery test_table (children 2) - ExpressionList (children 1) - AlterCommand REPLACE_PARTITION (children 1) - Partition (children 1) - Literal UInt64_202301 - Identifier test_table diff --git a/parser/testdata/replace_partition/query.sql b/parser/testdata/replace_partition/query.sql deleted file mode 100644 index 27ddc020d..000000000 --- a/parser/testdata/replace_partition/query.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE test_table REPLACE PARTITION 202301 FROM other_table diff --git a/parser/testdata/replaceall/explain.txt b/parser/testdata/replaceall/explain.txt deleted file mode 100644 index fae79eab1..000000000 --- a/parser/testdata/replaceall/explain.txt +++ /dev/null @@ -1,9 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function replaceAll (children 1) - ExpressionList (children 3) - Literal \'hello\' - Literal \'l\' - Literal \'x\' diff --git a/parser/testdata/replaceall/query.sql b/parser/testdata/replaceall/query.sql deleted file mode 100644 index 877c80fe5..000000000 --- a/parser/testdata/replaceall/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT replaceAll('hello', 'l', 'x') diff --git a/parser/testdata/replaceone/explain.txt b/parser/testdata/replaceone/explain.txt deleted file mode 100644 index 8a6490ff0..000000000 --- a/parser/testdata/replaceone/explain.txt +++ /dev/null @@ -1,9 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function replaceOne (children 1) - ExpressionList (children 3) - Literal \'hello\' - Literal \'l\' - Literal \'x\' diff --git a/parser/testdata/replaceone/query.sql b/parser/testdata/replaceone/query.sql deleted file mode 100644 index 03f1bf5e4..000000000 --- a/parser/testdata/replaceone/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT replaceOne('hello', 'l', 'x') diff --git a/parser/testdata/restart_replica/explain.txt b/parser/testdata/restart_replica/explain.txt deleted file mode 100644 index 4375432bb..000000000 --- a/parser/testdata/restart_replica/explain.txt +++ /dev/null @@ -1,3 +0,0 @@ -SYSTEM query (children 2) - Identifier system - Identifier one diff --git a/parser/testdata/restart_replica/query.sql b/parser/testdata/restart_replica/query.sql deleted file mode 100644 index feae962e3..000000000 --- a/parser/testdata/restart_replica/query.sql +++ /dev/null @@ -1 +0,0 @@ -SYSTEM RESTART REPLICA system.one diff --git a/parser/testdata/reverse_string/explain.txt b/parser/testdata/reverse_string/explain.txt deleted file mode 100644 index 562fd0987..000000000 --- a/parser/testdata/reverse_string/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function reverse (children 1) - ExpressionList (children 1) - Literal \'hello\' diff --git a/parser/testdata/reverse_string/query.sql b/parser/testdata/reverse_string/query.sql deleted file mode 100644 index d372eb334..000000000 --- a/parser/testdata/reverse_string/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT reverse('hello') diff --git a/parser/testdata/right_join_on/explain.txt b/parser/testdata/right_join_on/explain.txt deleted file mode 100644 index ae57e89c2..000000000 --- a/parser/testdata/right_join_on/explain.txt +++ /dev/null @@ -1,21 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Asterisk - TablesInSelectQuery (children 2) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (alias a) (children 1) - ExpressionList (children 1) - Literal UInt64_5 - TablesInSelectQueryElement (children 2) - TableExpression (children 1) - Function numbers (alias b) (children 1) - ExpressionList (children 1) - Literal UInt64_5 - TableJoin (children 1) - Function equals (children 1) - ExpressionList (children 2) - Identifier a.number - Identifier b.number diff --git a/parser/testdata/right_join_on/query.sql b/parser/testdata/right_join_on/query.sql deleted file mode 100644 index 3b32e0eb0..000000000 --- a/parser/testdata/right_join_on/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT * FROM numbers(5) AS a RIGHT JOIN numbers(5) AS b ON a.number = b.number diff --git a/parser/testdata/rightpad/explain.txt b/parser/testdata/rightpad/explain.txt deleted file mode 100644 index bf059fcfc..000000000 --- a/parser/testdata/rightpad/explain.txt +++ /dev/null @@ -1,9 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function rightPad (children 1) - ExpressionList (children 3) - Literal \'123\' - Literal UInt64_5 - Literal \'0\' diff --git a/parser/testdata/rightpad/query.sql b/parser/testdata/rightpad/query.sql deleted file mode 100644 index 60b0a7083..000000000 --- a/parser/testdata/rightpad/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT rightPad('123', 5, '0') diff --git a/parser/testdata/row_number_over_empty/explain.txt b/parser/testdata/row_number_over_empty/explain.txt deleted file mode 100644 index aff2a1537..000000000 --- a/parser/testdata/row_number_over_empty/explain.txt +++ /dev/null @@ -1,14 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 2) - Identifier number - Function row_number (children 2) - ExpressionList - WindowDefinition - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (children 1) - ExpressionList (children 1) - Literal UInt64_10 diff --git a/parser/testdata/row_number_over_empty/query.sql b/parser/testdata/row_number_over_empty/query.sql deleted file mode 100644 index a7dd498b3..000000000 --- a/parser/testdata/row_number_over_empty/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT number, row_number() OVER () FROM numbers(10) diff --git a/parser/testdata/row_number_over_order_by/explain.txt b/parser/testdata/row_number_over_order_by/explain.txt deleted file mode 100644 index b032a2503..000000000 --- a/parser/testdata/row_number_over_order_by/explain.txt +++ /dev/null @@ -1,17 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 2) - Identifier number - Function row_number (children 2) - ExpressionList - WindowDefinition (children 1) - ExpressionList (children 1) - OrderByElement (children 1) - Identifier number - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (children 1) - ExpressionList (children 1) - Literal UInt64_10 diff --git a/parser/testdata/row_number_over_order_by/query.sql b/parser/testdata/row_number_over_order_by/query.sql deleted file mode 100644 index 9890a6d11..000000000 --- a/parser/testdata/row_number_over_order_by/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT number, row_number() OVER (ORDER BY number) FROM numbers(10) diff --git a/parser/testdata/rownumberinallblocks/explain.txt b/parser/testdata/rownumberinallblocks/explain.txt deleted file mode 100644 index 99079a4d0..000000000 --- a/parser/testdata/rownumberinallblocks/explain.txt +++ /dev/null @@ -1,6 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function rowNumberInAllBlocks (children 1) - ExpressionList diff --git a/parser/testdata/rownumberinallblocks/query.sql b/parser/testdata/rownumberinallblocks/query.sql deleted file mode 100644 index 475084105..000000000 --- a/parser/testdata/rownumberinallblocks/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT rowNumberInAllBlocks() diff --git a/parser/testdata/rownumberinblock/explain.txt b/parser/testdata/rownumberinblock/explain.txt deleted file mode 100644 index e4779d87d..000000000 --- a/parser/testdata/rownumberinblock/explain.txt +++ /dev/null @@ -1,6 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function rowNumberInBlock (children 1) - ExpressionList diff --git a/parser/testdata/rownumberinblock/query.sql b/parser/testdata/rownumberinblock/query.sql deleted file mode 100644 index 98e5a25c3..000000000 --- a/parser/testdata/rownumberinblock/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT rowNumberInBlock() diff --git a/parser/testdata/rtrim/explain.txt b/parser/testdata/rtrim/explain.txt deleted file mode 100644 index a7e34ab26..000000000 --- a/parser/testdata/rtrim/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function trimRight (children 1) - ExpressionList (children 1) - Literal \'hello \' diff --git a/parser/testdata/rtrim/query.sql b/parser/testdata/rtrim/query.sql deleted file mode 100644 index 99182a73b..000000000 --- a/parser/testdata/rtrim/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT rtrim('hello ') diff --git a/parser/testdata/runningaccumulate/explain.txt b/parser/testdata/runningaccumulate/explain.txt deleted file mode 100644 index 072fbd995..000000000 --- a/parser/testdata/runningaccumulate/explain.txt +++ /dev/null @@ -1,15 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Function runningAccumulate (children 1) - ExpressionList (children 1) - Function sumState (children 1) - ExpressionList (children 1) - Identifier number - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (children 1) - ExpressionList (children 1) - Literal UInt64_10 diff --git a/parser/testdata/runningaccumulate/query.sql b/parser/testdata/runningaccumulate/query.sql deleted file mode 100644 index 60d880dce..000000000 --- a/parser/testdata/runningaccumulate/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT runningAccumulate(sumState(number)) FROM numbers(10) diff --git a/parser/testdata/runningdifference/explain.txt b/parser/testdata/runningdifference/explain.txt deleted file mode 100644 index fe15af0be..000000000 --- a/parser/testdata/runningdifference/explain.txt +++ /dev/null @@ -1,13 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Function runningDifference (children 1) - ExpressionList (children 1) - Identifier number - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (children 1) - ExpressionList (children 1) - Literal UInt64_10 diff --git a/parser/testdata/runningdifference/query.sql b/parser/testdata/runningdifference/query.sql deleted file mode 100644 index 9eee5ee8b..000000000 --- a/parser/testdata/runningdifference/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT runningDifference(number) FROM numbers(10) diff --git a/parser/testdata/sample/explain.txt b/parser/testdata/sample/explain.txt deleted file mode 100644 index 0bb521926..000000000 --- a/parser/testdata/sample/explain.txt +++ /dev/null @@ -1,10 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Asterisk - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 2) - TableIdentifier test_table - SampleRatio 1 / 10 diff --git a/parser/testdata/sample/query.sql b/parser/testdata/sample/query.sql deleted file mode 100644 index a0f2e08f1..000000000 --- a/parser/testdata/sample/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT * FROM test_table SAMPLE 0.1 diff --git a/parser/testdata/sample_n/explain.txt b/parser/testdata/sample_n/explain.txt deleted file mode 100644 index 8c4fd77b4..000000000 --- a/parser/testdata/sample_n/explain.txt +++ /dev/null @@ -1,10 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Asterisk - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 2) - TableIdentifier test_table - SampleRatio 1000 diff --git a/parser/testdata/sample_n/query.sql b/parser/testdata/sample_n/query.sql deleted file mode 100644 index 214748814..000000000 --- a/parser/testdata/sample_n/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT * FROM test_table SAMPLE 1000 diff --git a/parser/testdata/sample_offset/explain.txt b/parser/testdata/sample_offset/explain.txt deleted file mode 100644 index 8e546f5f6..000000000 --- a/parser/testdata/sample_offset/explain.txt +++ /dev/null @@ -1,11 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Asterisk - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 3) - TableIdentifier test_table - SampleRatio 1 / 10 - SampleRatio 5 / 10 diff --git a/parser/testdata/sample_offset/query.sql b/parser/testdata/sample_offset/query.sql deleted file mode 100644 index bb306b26e..000000000 --- a/parser/testdata/sample_offset/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT * FROM test_table SAMPLE 0.1 OFFSET 0.5 diff --git a/parser/testdata/scalar_subquery/explain.txt b/parser/testdata/scalar_subquery/explain.txt deleted file mode 100644 index be752a4a7..000000000 --- a/parser/testdata/scalar_subquery/explain.txt +++ /dev/null @@ -1,10 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Subquery (children 1) - SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Literal UInt64_1 diff --git a/parser/testdata/scalar_subquery/query.sql b/parser/testdata/scalar_subquery/query.sql deleted file mode 100644 index aadcd3589..000000000 --- a/parser/testdata/scalar_subquery/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT (SELECT 1) diff --git a/parser/testdata/scalar_subquery_aggregate/explain.txt b/parser/testdata/scalar_subquery_aggregate/explain.txt deleted file mode 100644 index 9c7aceffc..000000000 --- a/parser/testdata/scalar_subquery_aggregate/explain.txt +++ /dev/null @@ -1,18 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Subquery (children 1) - SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Function max (children 1) - ExpressionList (children 1) - Identifier number - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (children 1) - ExpressionList (children 1) - Literal UInt64_10 diff --git a/parser/testdata/scalar_subquery_aggregate/query.sql b/parser/testdata/scalar_subquery_aggregate/query.sql deleted file mode 100644 index 01b76b2e1..000000000 --- a/parser/testdata/scalar_subquery_aggregate/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT (SELECT max(number) FROM numbers(10)) diff --git a/parser/testdata/select_columns/explain.txt b/parser/testdata/select_columns/explain.txt deleted file mode 100644 index 700b2648d..000000000 --- a/parser/testdata/select_columns/explain.txt +++ /dev/null @@ -1,10 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 2) - Identifier id - Identifier name - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier users diff --git a/parser/testdata/select_columns/query.sql b/parser/testdata/select_columns/query.sql deleted file mode 100644 index b229b261e..000000000 --- a/parser/testdata/select_columns/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT id, name FROM users diff --git a/parser/testdata/select_distinct/explain.txt b/parser/testdata/select_distinct/explain.txt deleted file mode 100644 index 15b5e01f2..000000000 --- a/parser/testdata/select_distinct/explain.txt +++ /dev/null @@ -1,9 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Identifier name - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier users diff --git a/parser/testdata/select_distinct/query.sql b/parser/testdata/select_distinct/query.sql deleted file mode 100644 index 342df6e6f..000000000 --- a/parser/testdata/select_distinct/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT DISTINCT name FROM users diff --git a/parser/testdata/select_final/explain.txt b/parser/testdata/select_final/explain.txt deleted file mode 100644 index 3d3fc5477..000000000 --- a/parser/testdata/select_final/explain.txt +++ /dev/null @@ -1,9 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Asterisk - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier test_table diff --git a/parser/testdata/select_final/query.sql b/parser/testdata/select_final/query.sql deleted file mode 100644 index 8e4e1586e..000000000 --- a/parser/testdata/select_final/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT * FROM test_table FINAL diff --git a/parser/testdata/select_from_system_table/explain.txt b/parser/testdata/select_from_system_table/explain.txt deleted file mode 100644 index c1b565496..000000000 --- a/parser/testdata/select_from_system_table/explain.txt +++ /dev/null @@ -1,9 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Asterisk - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier system.one diff --git a/parser/testdata/select_from_system_table/query.sql b/parser/testdata/select_from_system_table/query.sql deleted file mode 100644 index 5c6acf700..000000000 --- a/parser/testdata/select_from_system_table/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT * FROM system.one diff --git a/parser/testdata/select_with_alias/explain.txt b/parser/testdata/select_with_alias/explain.txt deleted file mode 100644 index e90ca7dfc..000000000 --- a/parser/testdata/select_with_alias/explain.txt +++ /dev/null @@ -1,9 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Identifier id (alias user_id) - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier users diff --git a/parser/testdata/select_with_alias/query.sql b/parser/testdata/select_with_alias/query.sql deleted file mode 100644 index a26a110d2..000000000 --- a/parser/testdata/select_with_alias/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT id AS user_id FROM users diff --git a/parser/testdata/select_with_limit/explain.txt b/parser/testdata/select_with_limit/explain.txt deleted file mode 100644 index 409eec5c6..000000000 --- a/parser/testdata/select_with_limit/explain.txt +++ /dev/null @@ -1,10 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 3) - ExpressionList (children 1) - Asterisk - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier users - Literal UInt64_10 diff --git a/parser/testdata/select_with_limit/query.sql b/parser/testdata/select_with_limit/query.sql deleted file mode 100644 index 639675ffa..000000000 --- a/parser/testdata/select_with_limit/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT * FROM users LIMIT 10 diff --git a/parser/testdata/select_with_offset/explain.txt b/parser/testdata/select_with_offset/explain.txt deleted file mode 100644 index feb062fbc..000000000 --- a/parser/testdata/select_with_offset/explain.txt +++ /dev/null @@ -1,11 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 4) - ExpressionList (children 1) - Asterisk - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier users - Literal UInt64_5 - Literal UInt64_10 diff --git a/parser/testdata/select_with_offset/query.sql b/parser/testdata/select_with_offset/query.sql deleted file mode 100644 index 0ecacbaa9..000000000 --- a/parser/testdata/select_with_offset/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT * FROM users LIMIT 10 OFFSET 5 diff --git a/parser/testdata/select_with_order/explain.txt b/parser/testdata/select_with_order/explain.txt deleted file mode 100644 index 625e44c49..000000000 --- a/parser/testdata/select_with_order/explain.txt +++ /dev/null @@ -1,12 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 3) - ExpressionList (children 1) - Asterisk - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier users - ExpressionList (children 1) - OrderByElement (children 1) - Identifier name diff --git a/parser/testdata/select_with_order/query.sql b/parser/testdata/select_with_order/query.sql deleted file mode 100644 index 36d9289ae..000000000 --- a/parser/testdata/select_with_order/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT * FROM users ORDER BY name ASC diff --git a/parser/testdata/select_with_order_desc/explain.txt b/parser/testdata/select_with_order_desc/explain.txt deleted file mode 100644 index ba1071d32..000000000 --- a/parser/testdata/select_with_order_desc/explain.txt +++ /dev/null @@ -1,12 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 3) - ExpressionList (children 1) - Asterisk - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier users - ExpressionList (children 1) - OrderByElement (children 1) - Identifier id diff --git a/parser/testdata/select_with_order_desc/query.sql b/parser/testdata/select_with_order_desc/query.sql deleted file mode 100644 index 7ad9360b7..000000000 --- a/parser/testdata/select_with_order_desc/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT * FROM users ORDER BY id DESC diff --git a/parser/testdata/select_with_settings/explain.txt b/parser/testdata/select_with_settings/explain.txt deleted file mode 100644 index f58e3fbda..000000000 --- a/parser/testdata/select_with_settings/explain.txt +++ /dev/null @@ -1,6 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Literal UInt64_1 - Set diff --git a/parser/testdata/select_with_settings/query.sql b/parser/testdata/select_with_settings/query.sql deleted file mode 100644 index 4863a2ad9..000000000 --- a/parser/testdata/select_with_settings/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT 1 SETTINGS max_threads = 1 diff --git a/parser/testdata/select_with_where/explain.txt b/parser/testdata/select_with_where/explain.txt deleted file mode 100644 index 581bf40e7..000000000 --- a/parser/testdata/select_with_where/explain.txt +++ /dev/null @@ -1,13 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 3) - ExpressionList (children 1) - Asterisk - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier users - Function equals (children 1) - ExpressionList (children 2) - Identifier id - Literal UInt64_1 diff --git a/parser/testdata/select_with_where/query.sql b/parser/testdata/select_with_where/query.sql deleted file mode 100644 index 90b4f7345..000000000 --- a/parser/testdata/select_with_where/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT * FROM users WHERE id = 1 diff --git a/parser/testdata/semi_join/explain.txt b/parser/testdata/semi_join/explain.txt deleted file mode 100644 index ae57e89c2..000000000 --- a/parser/testdata/semi_join/explain.txt +++ /dev/null @@ -1,21 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Asterisk - TablesInSelectQuery (children 2) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (alias a) (children 1) - ExpressionList (children 1) - Literal UInt64_5 - TablesInSelectQueryElement (children 2) - TableExpression (children 1) - Function numbers (alias b) (children 1) - ExpressionList (children 1) - Literal UInt64_5 - TableJoin (children 1) - Function equals (children 1) - ExpressionList (children 2) - Identifier a.number - Identifier b.number diff --git a/parser/testdata/semi_join/query.sql b/parser/testdata/semi_join/query.sql deleted file mode 100644 index e48043f8d..000000000 --- a/parser/testdata/semi_join/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT * FROM numbers(5) AS a SEMI JOIN numbers(5) AS b ON a.number = b.number diff --git a/parser/testdata/set_boolean_setting/explain.txt b/parser/testdata/set_boolean_setting/explain.txt deleted file mode 100644 index cb1142253..000000000 --- a/parser/testdata/set_boolean_setting/explain.txt +++ /dev/null @@ -1 +0,0 @@ -Set diff --git a/parser/testdata/set_boolean_setting/query.sql b/parser/testdata/set_boolean_setting/query.sql deleted file mode 100644 index 3c6f13fdc..000000000 --- a/parser/testdata/set_boolean_setting/query.sql +++ /dev/null @@ -1 +0,0 @@ -SET enable_optimize_predicate_expression = 1 diff --git a/parser/testdata/set_max_memory_usage/explain.txt b/parser/testdata/set_max_memory_usage/explain.txt deleted file mode 100644 index cb1142253..000000000 --- a/parser/testdata/set_max_memory_usage/explain.txt +++ /dev/null @@ -1 +0,0 @@ -Set diff --git a/parser/testdata/set_max_memory_usage/query.sql b/parser/testdata/set_max_memory_usage/query.sql deleted file mode 100644 index 7bcde05ed..000000000 --- a/parser/testdata/set_max_memory_usage/query.sql +++ /dev/null @@ -1 +0,0 @@ -SET max_memory_usage = 10000000 diff --git a/parser/testdata/set_max_threads/explain.txt b/parser/testdata/set_max_threads/explain.txt deleted file mode 100644 index cb1142253..000000000 --- a/parser/testdata/set_max_threads/explain.txt +++ /dev/null @@ -1 +0,0 @@ -Set diff --git a/parser/testdata/set_max_threads/query.sql b/parser/testdata/set_max_threads/query.sql deleted file mode 100644 index ed6ccd1ee..000000000 --- a/parser/testdata/set_max_threads/query.sql +++ /dev/null @@ -1 +0,0 @@ -SET max_threads = 4 diff --git a/parser/testdata/set_setting/explain.txt b/parser/testdata/set_setting/explain.txt deleted file mode 100644 index cb1142253..000000000 --- a/parser/testdata/set_setting/explain.txt +++ /dev/null @@ -1 +0,0 @@ -Set diff --git a/parser/testdata/set_setting/query.sql b/parser/testdata/set_setting/query.sql deleted file mode 100644 index ed6ccd1ee..000000000 --- a/parser/testdata/set_setting/query.sql +++ /dev/null @@ -1 +0,0 @@ -SET max_threads = 4 diff --git a/parser/testdata/sha1/explain.txt b/parser/testdata/sha1/explain.txt deleted file mode 100644 index 0e114f5e9..000000000 --- a/parser/testdata/sha1/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function SHA1 (children 1) - ExpressionList (children 1) - Literal \'hello\' diff --git a/parser/testdata/sha1/query.sql b/parser/testdata/sha1/query.sql deleted file mode 100644 index fcb0855f5..000000000 --- a/parser/testdata/sha1/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT SHA1('hello') diff --git a/parser/testdata/sha256/explain.txt b/parser/testdata/sha256/explain.txt deleted file mode 100644 index 8ad147d91..000000000 --- a/parser/testdata/sha256/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function SHA256 (children 1) - ExpressionList (children 1) - Literal \'hello\' diff --git a/parser/testdata/sha256/query.sql b/parser/testdata/sha256/query.sql deleted file mode 100644 index e7e066aa0..000000000 --- a/parser/testdata/sha256/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT SHA256('hello') diff --git a/parser/testdata/show_columns/explain.txt b/parser/testdata/show_columns/explain.txt deleted file mode 100644 index 96200a0c7..000000000 --- a/parser/testdata/show_columns/explain.txt +++ /dev/null @@ -1 +0,0 @@ -ShowColumns diff --git a/parser/testdata/show_columns/query.sql b/parser/testdata/show_columns/query.sql deleted file mode 100644 index 25041a14c..000000000 --- a/parser/testdata/show_columns/query.sql +++ /dev/null @@ -1 +0,0 @@ -SHOW COLUMNS FROM system.one diff --git a/parser/testdata/show_create_database/explain.txt b/parser/testdata/show_create_database/explain.txt deleted file mode 100644 index fb23bef7f..000000000 --- a/parser/testdata/show_create_database/explain.txt +++ /dev/null @@ -1,2 +0,0 @@ -ShowCreateDatabaseQuery system (children 1) - Identifier system diff --git a/parser/testdata/show_create_database/query.sql b/parser/testdata/show_create_database/query.sql deleted file mode 100644 index 402ff22f2..000000000 --- a/parser/testdata/show_create_database/query.sql +++ /dev/null @@ -1 +0,0 @@ -SHOW CREATE DATABASE system diff --git a/parser/testdata/show_create_table/explain.txt b/parser/testdata/show_create_table/explain.txt deleted file mode 100644 index 4e3a6730e..000000000 --- a/parser/testdata/show_create_table/explain.txt +++ /dev/null @@ -1,3 +0,0 @@ -ShowCreateTableQuery system one (children 2) - Identifier system - Identifier one diff --git a/parser/testdata/show_create_table/query.sql b/parser/testdata/show_create_table/query.sql deleted file mode 100644 index 86d23394c..000000000 --- a/parser/testdata/show_create_table/query.sql +++ /dev/null @@ -1 +0,0 @@ -SHOW CREATE TABLE system.one diff --git a/parser/testdata/show_databases/explain.txt b/parser/testdata/show_databases/explain.txt deleted file mode 100644 index 9cf4575c9..000000000 --- a/parser/testdata/show_databases/explain.txt +++ /dev/null @@ -1 +0,0 @@ -ShowTables diff --git a/parser/testdata/show_databases/query.sql b/parser/testdata/show_databases/query.sql deleted file mode 100644 index da16ca699..000000000 --- a/parser/testdata/show_databases/query.sql +++ /dev/null @@ -1 +0,0 @@ -SHOW DATABASES diff --git a/parser/testdata/show_databases_stmt/explain.txt b/parser/testdata/show_databases_stmt/explain.txt deleted file mode 100644 index 9cf4575c9..000000000 --- a/parser/testdata/show_databases_stmt/explain.txt +++ /dev/null @@ -1 +0,0 @@ -ShowTables diff --git a/parser/testdata/show_databases_stmt/query.sql b/parser/testdata/show_databases_stmt/query.sql deleted file mode 100644 index da16ca699..000000000 --- a/parser/testdata/show_databases_stmt/query.sql +++ /dev/null @@ -1 +0,0 @@ -SHOW DATABASES diff --git a/parser/testdata/show_dictionaries/explain.txt b/parser/testdata/show_dictionaries/explain.txt deleted file mode 100644 index 9cf4575c9..000000000 --- a/parser/testdata/show_dictionaries/explain.txt +++ /dev/null @@ -1 +0,0 @@ -ShowTables diff --git a/parser/testdata/show_dictionaries/query.sql b/parser/testdata/show_dictionaries/query.sql deleted file mode 100644 index 43efbf2ad..000000000 --- a/parser/testdata/show_dictionaries/query.sql +++ /dev/null @@ -1 +0,0 @@ -SHOW DICTIONARIES diff --git a/parser/testdata/show_processlist/explain.txt b/parser/testdata/show_processlist/explain.txt deleted file mode 100644 index 7a5e7380b..000000000 --- a/parser/testdata/show_processlist/explain.txt +++ /dev/null @@ -1 +0,0 @@ -ShowProcesslistQuery diff --git a/parser/testdata/show_processlist/query.sql b/parser/testdata/show_processlist/query.sql deleted file mode 100644 index c8cf70db4..000000000 --- a/parser/testdata/show_processlist/query.sql +++ /dev/null @@ -1 +0,0 @@ -SHOW PROCESSLIST diff --git a/parser/testdata/show_tables/explain.txt b/parser/testdata/show_tables/explain.txt deleted file mode 100644 index 9cf4575c9..000000000 --- a/parser/testdata/show_tables/explain.txt +++ /dev/null @@ -1 +0,0 @@ -ShowTables diff --git a/parser/testdata/show_tables/query.sql b/parser/testdata/show_tables/query.sql deleted file mode 100644 index 61b3cfc26..000000000 --- a/parser/testdata/show_tables/query.sql +++ /dev/null @@ -1 +0,0 @@ -SHOW TABLES diff --git a/parser/testdata/show_tables_from/explain.txt b/parser/testdata/show_tables_from/explain.txt deleted file mode 100644 index a3c174ff2..000000000 --- a/parser/testdata/show_tables_from/explain.txt +++ /dev/null @@ -1,2 +0,0 @@ -ShowTables (children 1) - Identifier system diff --git a/parser/testdata/show_tables_from/query.sql b/parser/testdata/show_tables_from/query.sql deleted file mode 100644 index 17aa88e46..000000000 --- a/parser/testdata/show_tables_from/query.sql +++ /dev/null @@ -1 +0,0 @@ -SHOW TABLES FROM system diff --git a/parser/testdata/show_tables_stmt/explain.txt b/parser/testdata/show_tables_stmt/explain.txt deleted file mode 100644 index 9cf4575c9..000000000 --- a/parser/testdata/show_tables_stmt/explain.txt +++ /dev/null @@ -1 +0,0 @@ -ShowTables diff --git a/parser/testdata/show_tables_stmt/query.sql b/parser/testdata/show_tables_stmt/query.sql deleted file mode 100644 index 61b3cfc26..000000000 --- a/parser/testdata/show_tables_stmt/query.sql +++ /dev/null @@ -1 +0,0 @@ -SHOW TABLES diff --git a/parser/testdata/simple_select/explain.txt b/parser/testdata/simple_select/explain.txt deleted file mode 100644 index 8827c47de..000000000 --- a/parser/testdata/simple_select/explain.txt +++ /dev/null @@ -1,5 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Literal UInt64_1 diff --git a/parser/testdata/siphash64/explain.txt b/parser/testdata/siphash64/explain.txt deleted file mode 100644 index 36b6fca9e..000000000 --- a/parser/testdata/siphash64/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function sipHash64 (children 1) - ExpressionList (children 1) - Literal \'hello\' diff --git a/parser/testdata/siphash64/query.sql b/parser/testdata/siphash64/query.sql deleted file mode 100644 index edb8b572f..000000000 --- a/parser/testdata/siphash64/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT sipHash64('hello') diff --git a/parser/testdata/sleep/explain.txt b/parser/testdata/sleep/explain.txt deleted file mode 100644 index 04ea5a250..000000000 --- a/parser/testdata/sleep/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function sleep (children 1) - ExpressionList (children 1) - Literal Float64_0.001 diff --git a/parser/testdata/sleep/query.sql b/parser/testdata/sleep/query.sql deleted file mode 100644 index 7a0e877f0..000000000 --- a/parser/testdata/sleep/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT sleep(0.001) diff --git a/parser/testdata/splitbychar/explain.txt b/parser/testdata/splitbychar/explain.txt deleted file mode 100644 index f5fd26d20..000000000 --- a/parser/testdata/splitbychar/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function splitByChar (children 1) - ExpressionList (children 2) - Literal \',\' - Literal \'a,b,c\' diff --git a/parser/testdata/splitbychar/query.sql b/parser/testdata/splitbychar/query.sql deleted file mode 100644 index 4b894cbb7..000000000 --- a/parser/testdata/splitbychar/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT splitByChar(',', 'a,b,c') diff --git a/parser/testdata/splitbystring/explain.txt b/parser/testdata/splitbystring/explain.txt deleted file mode 100644 index 4d4aeca2e..000000000 --- a/parser/testdata/splitbystring/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function splitByString (children 1) - ExpressionList (children 2) - Literal \',,\' - Literal \'a,,b,,c\' diff --git a/parser/testdata/splitbystring/query.sql b/parser/testdata/splitbystring/query.sql deleted file mode 100644 index 2c8a5c898..000000000 --- a/parser/testdata/splitbystring/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT splitByString(',,', 'a,,b,,c') diff --git a/parser/testdata/start_fetches/explain.txt b/parser/testdata/start_fetches/explain.txt deleted file mode 100644 index 1684cb9d8..000000000 --- a/parser/testdata/start_fetches/explain.txt +++ /dev/null @@ -1 +0,0 @@ -SYSTEM query diff --git a/parser/testdata/start_fetches/query.sql b/parser/testdata/start_fetches/query.sql deleted file mode 100644 index 38b44c629..000000000 --- a/parser/testdata/start_fetches/query.sql +++ /dev/null @@ -1 +0,0 @@ -SYSTEM START FETCHES diff --git a/parser/testdata/start_merges/explain.txt b/parser/testdata/start_merges/explain.txt deleted file mode 100644 index 1684cb9d8..000000000 --- a/parser/testdata/start_merges/explain.txt +++ /dev/null @@ -1 +0,0 @@ -SYSTEM query diff --git a/parser/testdata/start_merges/query.sql b/parser/testdata/start_merges/query.sql deleted file mode 100644 index f3756d614..000000000 --- a/parser/testdata/start_merges/query.sql +++ /dev/null @@ -1 +0,0 @@ -SYSTEM START MERGES diff --git a/parser/testdata/start_moves/explain.txt b/parser/testdata/start_moves/explain.txt deleted file mode 100644 index 1684cb9d8..000000000 --- a/parser/testdata/start_moves/explain.txt +++ /dev/null @@ -1 +0,0 @@ -SYSTEM query diff --git a/parser/testdata/start_moves/query.sql b/parser/testdata/start_moves/query.sql deleted file mode 100644 index 762d7c0b3..000000000 --- a/parser/testdata/start_moves/query.sql +++ /dev/null @@ -1 +0,0 @@ -SYSTEM START MOVES diff --git a/parser/testdata/start_replication_queues/explain.txt b/parser/testdata/start_replication_queues/explain.txt deleted file mode 100644 index 1684cb9d8..000000000 --- a/parser/testdata/start_replication_queues/explain.txt +++ /dev/null @@ -1 +0,0 @@ -SYSTEM query diff --git a/parser/testdata/start_replication_queues/query.sql b/parser/testdata/start_replication_queues/query.sql deleted file mode 100644 index 5bbbb1b1b..000000000 --- a/parser/testdata/start_replication_queues/query.sql +++ /dev/null @@ -1 +0,0 @@ -SYSTEM START REPLICATION QUEUES diff --git a/parser/testdata/start_ttl_merges/explain.txt b/parser/testdata/start_ttl_merges/explain.txt deleted file mode 100644 index 1684cb9d8..000000000 --- a/parser/testdata/start_ttl_merges/explain.txt +++ /dev/null @@ -1 +0,0 @@ -SYSTEM query diff --git a/parser/testdata/start_ttl_merges/query.sql b/parser/testdata/start_ttl_merges/query.sql deleted file mode 100644 index 465d46f6f..000000000 --- a/parser/testdata/start_ttl_merges/query.sql +++ /dev/null @@ -1 +0,0 @@ -SYSTEM START TTL MERGES diff --git a/parser/testdata/stop_fetches/explain.txt b/parser/testdata/stop_fetches/explain.txt deleted file mode 100644 index 1684cb9d8..000000000 --- a/parser/testdata/stop_fetches/explain.txt +++ /dev/null @@ -1 +0,0 @@ -SYSTEM query diff --git a/parser/testdata/stop_fetches/query.sql b/parser/testdata/stop_fetches/query.sql deleted file mode 100644 index 04ca98a75..000000000 --- a/parser/testdata/stop_fetches/query.sql +++ /dev/null @@ -1 +0,0 @@ -SYSTEM STOP FETCHES diff --git a/parser/testdata/stop_merges/explain.txt b/parser/testdata/stop_merges/explain.txt deleted file mode 100644 index 1684cb9d8..000000000 --- a/parser/testdata/stop_merges/explain.txt +++ /dev/null @@ -1 +0,0 @@ -SYSTEM query diff --git a/parser/testdata/stop_merges/query.sql b/parser/testdata/stop_merges/query.sql deleted file mode 100644 index dc59ce71b..000000000 --- a/parser/testdata/stop_merges/query.sql +++ /dev/null @@ -1 +0,0 @@ -SYSTEM STOP MERGES diff --git a/parser/testdata/stop_moves/explain.txt b/parser/testdata/stop_moves/explain.txt deleted file mode 100644 index 1684cb9d8..000000000 --- a/parser/testdata/stop_moves/explain.txt +++ /dev/null @@ -1 +0,0 @@ -SYSTEM query diff --git a/parser/testdata/stop_moves/query.sql b/parser/testdata/stop_moves/query.sql deleted file mode 100644 index 308f9eb97..000000000 --- a/parser/testdata/stop_moves/query.sql +++ /dev/null @@ -1 +0,0 @@ -SYSTEM STOP MOVES diff --git a/parser/testdata/stop_replication_queues/explain.txt b/parser/testdata/stop_replication_queues/explain.txt deleted file mode 100644 index 1684cb9d8..000000000 --- a/parser/testdata/stop_replication_queues/explain.txt +++ /dev/null @@ -1 +0,0 @@ -SYSTEM query diff --git a/parser/testdata/stop_replication_queues/query.sql b/parser/testdata/stop_replication_queues/query.sql deleted file mode 100644 index 96137a664..000000000 --- a/parser/testdata/stop_replication_queues/query.sql +++ /dev/null @@ -1 +0,0 @@ -SYSTEM STOP REPLICATION QUEUES diff --git a/parser/testdata/stop_ttl_merges/explain.txt b/parser/testdata/stop_ttl_merges/explain.txt deleted file mode 100644 index 1684cb9d8..000000000 --- a/parser/testdata/stop_ttl_merges/explain.txt +++ /dev/null @@ -1 +0,0 @@ -SYSTEM query diff --git a/parser/testdata/stop_ttl_merges/query.sql b/parser/testdata/stop_ttl_merges/query.sql deleted file mode 100644 index 23ce4bc5e..000000000 --- a/parser/testdata/stop_ttl_merges/query.sql +++ /dev/null @@ -1 +0,0 @@ -SYSTEM STOP TTL MERGES diff --git a/parser/testdata/string_literal/explain.txt b/parser/testdata/string_literal/explain.txt deleted file mode 100644 index 681d1bcb8..000000000 --- a/parser/testdata/string_literal/explain.txt +++ /dev/null @@ -1,5 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Literal \'hello\' diff --git a/parser/testdata/string_literal/query.sql b/parser/testdata/string_literal/query.sql deleted file mode 100644 index 2370c207f..000000000 --- a/parser/testdata/string_literal/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT 'hello' diff --git a/parser/testdata/subquery_in_from/explain.txt b/parser/testdata/subquery_in_from/explain.txt deleted file mode 100644 index a4421e20c..000000000 --- a/parser/testdata/subquery_in_from/explain.txt +++ /dev/null @@ -1,18 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Asterisk - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Subquery (alias t) (children 1) - SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Identifier id - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier users diff --git a/parser/testdata/subquery_in_from/query.sql b/parser/testdata/subquery_in_from/query.sql deleted file mode 100644 index 6e9e15c71..000000000 --- a/parser/testdata/subquery_in_from/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT * FROM (SELECT id FROM users) AS t diff --git a/parser/testdata/subquery_in_from_basic/explain.txt b/parser/testdata/subquery_in_from_basic/explain.txt deleted file mode 100644 index 855e73a03..000000000 --- a/parser/testdata/subquery_in_from_basic/explain.txt +++ /dev/null @@ -1,14 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Asterisk - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Subquery (children 1) - SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Literal UInt64_1 diff --git a/parser/testdata/subquery_in_from_basic/query.sql b/parser/testdata/subquery_in_from_basic/query.sql deleted file mode 100644 index e1b16542e..000000000 --- a/parser/testdata/subquery_in_from_basic/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT * FROM (SELECT 1) diff --git a/parser/testdata/subquery_in_from_with_alias/explain.txt b/parser/testdata/subquery_in_from_with_alias/explain.txt deleted file mode 100644 index abe5234be..000000000 --- a/parser/testdata/subquery_in_from_with_alias/explain.txt +++ /dev/null @@ -1,14 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Asterisk - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Subquery (alias t) (children 1) - SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Literal UInt64_1 diff --git a/parser/testdata/subquery_in_from_with_alias/query.sql b/parser/testdata/subquery_in_from_with_alias/query.sql deleted file mode 100644 index b3f9b62a8..000000000 --- a/parser/testdata/subquery_in_from_with_alias/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT * FROM (SELECT 1) AS t diff --git a/parser/testdata/subquery_in_in_clause/explain.txt b/parser/testdata/subquery_in_in_clause/explain.txt deleted file mode 100644 index 3c72b1dfb..000000000 --- a/parser/testdata/subquery_in_in_clause/explain.txt +++ /dev/null @@ -1,26 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 3) - ExpressionList (children 1) - Identifier number - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (children 1) - ExpressionList (children 1) - Literal UInt64_10 - Function in (children 1) - ExpressionList (children 2) - Identifier number - Subquery (children 1) - SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Identifier number - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (children 1) - ExpressionList (children 1) - Literal UInt64_5 diff --git a/parser/testdata/subquery_in_in_clause/query.sql b/parser/testdata/subquery_in_in_clause/query.sql deleted file mode 100644 index 92cf920bf..000000000 --- a/parser/testdata/subquery_in_in_clause/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT number FROM numbers(10) WHERE number IN (SELECT number FROM numbers(5)) diff --git a/parser/testdata/subquery_in_not_in_clause/explain.txt b/parser/testdata/subquery_in_not_in_clause/explain.txt deleted file mode 100644 index 61c74eede..000000000 --- a/parser/testdata/subquery_in_not_in_clause/explain.txt +++ /dev/null @@ -1,26 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 3) - ExpressionList (children 1) - Identifier number - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (children 1) - ExpressionList (children 1) - Literal UInt64_10 - Function notIn (children 1) - ExpressionList (children 2) - Identifier number - Subquery (children 1) - SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Identifier number - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (children 1) - ExpressionList (children 1) - Literal UInt64_5 diff --git a/parser/testdata/subquery_in_not_in_clause/query.sql b/parser/testdata/subquery_in_not_in_clause/query.sql deleted file mode 100644 index fa83f6481..000000000 --- a/parser/testdata/subquery_in_not_in_clause/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT number FROM numbers(10) WHERE number NOT IN (SELECT number FROM numbers(5)) diff --git a/parser/testdata/subquery_in_where/explain.txt b/parser/testdata/subquery_in_where/explain.txt deleted file mode 100644 index c8b79fe62..000000000 --- a/parser/testdata/subquery_in_where/explain.txt +++ /dev/null @@ -1,22 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 3) - ExpressionList (children 1) - Asterisk - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier users - Function in (children 1) - ExpressionList (children 2) - Identifier id - Subquery (children 1) - SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Identifier user_id - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier orders diff --git a/parser/testdata/subquery_in_where/query.sql b/parser/testdata/subquery_in_where/query.sql deleted file mode 100644 index 8390e3d41..000000000 --- a/parser/testdata/subquery_in_where/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT * FROM users WHERE id IN (SELECT user_id FROM orders) diff --git a/parser/testdata/subquery_with_where/explain.txt b/parser/testdata/subquery_with_where/explain.txt deleted file mode 100644 index a52e2eeaa..000000000 --- a/parser/testdata/subquery_with_where/explain.txt +++ /dev/null @@ -1,24 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 3) - ExpressionList (children 1) - Asterisk - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Subquery (children 1) - SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Identifier number (alias x) - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (children 1) - ExpressionList (children 1) - Literal UInt64_10 - Function greater (children 1) - ExpressionList (children 2) - Identifier x - Literal UInt64_5 diff --git a/parser/testdata/subquery_with_where/query.sql b/parser/testdata/subquery_with_where/query.sql deleted file mode 100644 index 98851012b..000000000 --- a/parser/testdata/subquery_with_where/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT * FROM (SELECT number AS x FROM numbers(10)) WHERE x > 5 diff --git a/parser/testdata/substring/explain.txt b/parser/testdata/substring/explain.txt deleted file mode 100644 index 043d090af..000000000 --- a/parser/testdata/substring/explain.txt +++ /dev/null @@ -1,9 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function substring (children 1) - ExpressionList (children 3) - Literal \'hello\' - Literal UInt64_1 - Literal UInt64_3 diff --git a/parser/testdata/substring/query.sql b/parser/testdata/substring/query.sql deleted file mode 100644 index 74294292f..000000000 --- a/parser/testdata/substring/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT substring('hello', 1, 3) diff --git a/parser/testdata/subtractdays/explain.txt b/parser/testdata/subtractdays/explain.txt deleted file mode 100644 index 6a969555a..000000000 --- a/parser/testdata/subtractdays/explain.txt +++ /dev/null @@ -1,10 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function subtractDays (children 1) - ExpressionList (children 2) - Function toDate (children 1) - ExpressionList (children 1) - Literal \'2023-01-06\' - Literal UInt64_5 diff --git a/parser/testdata/subtractdays/query.sql b/parser/testdata/subtractdays/query.sql deleted file mode 100644 index a039b5b11..000000000 --- a/parser/testdata/subtractdays/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT subtractDays(toDate('2023-01-06'), 5) diff --git a/parser/testdata/sum/explain.txt b/parser/testdata/sum/explain.txt deleted file mode 100644 index 759aee997..000000000 --- a/parser/testdata/sum/explain.txt +++ /dev/null @@ -1,11 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Function sum (children 1) - ExpressionList (children 1) - Identifier amount - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier orders diff --git a/parser/testdata/sum/query.sql b/parser/testdata/sum/query.sql deleted file mode 100644 index e2902c598..000000000 --- a/parser/testdata/sum/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT sum(amount) FROM orders diff --git a/parser/testdata/sum_function/explain.txt b/parser/testdata/sum_function/explain.txt deleted file mode 100644 index 2b7b69b9d..000000000 --- a/parser/testdata/sum_function/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function sum (children 1) - ExpressionList (children 1) - Literal UInt64_1 diff --git a/parser/testdata/sum_function/query.sql b/parser/testdata/sum_function/query.sql deleted file mode 100644 index 5712f43db..000000000 --- a/parser/testdata/sum_function/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT sum(1) diff --git a/parser/testdata/sum_over_empty/explain.txt b/parser/testdata/sum_over_empty/explain.txt deleted file mode 100644 index acf414b73..000000000 --- a/parser/testdata/sum_over_empty/explain.txt +++ /dev/null @@ -1,15 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 2) - Identifier number - Function sum (children 2) - ExpressionList (children 1) - Identifier number - WindowDefinition - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (children 1) - ExpressionList (children 1) - Literal UInt64_10 diff --git a/parser/testdata/sum_over_empty/query.sql b/parser/testdata/sum_over_empty/query.sql deleted file mode 100644 index 0f06758e3..000000000 --- a/parser/testdata/sum_over_empty/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT number, sum(number) OVER () FROM numbers(10) diff --git a/parser/testdata/sum_over_order_by/explain.txt b/parser/testdata/sum_over_order_by/explain.txt deleted file mode 100644 index 70e67be03..000000000 --- a/parser/testdata/sum_over_order_by/explain.txt +++ /dev/null @@ -1,18 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 2) - Identifier number - Function sum (children 2) - ExpressionList (children 1) - Identifier number - WindowDefinition (children 1) - ExpressionList (children 1) - OrderByElement (children 1) - Identifier number - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (children 1) - ExpressionList (children 1) - Literal UInt64_10 diff --git a/parser/testdata/sum_over_order_by/query.sql b/parser/testdata/sum_over_order_by/query.sql deleted file mode 100644 index abe8948aa..000000000 --- a/parser/testdata/sum_over_order_by/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT number, sum(number) OVER (ORDER BY number) FROM numbers(10) diff --git a/parser/testdata/sumif_with_lambda/explain.txt b/parser/testdata/sumif_with_lambda/explain.txt deleted file mode 100644 index 87001252a..000000000 --- a/parser/testdata/sumif_with_lambda/explain.txt +++ /dev/null @@ -1,17 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Function sumIf (children 1) - ExpressionList (children 2) - Identifier number - Function greater (children 1) - ExpressionList (children 2) - Identifier number - Literal UInt64_5 - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (children 1) - ExpressionList (children 1) - Literal UInt64_10 diff --git a/parser/testdata/sumif_with_lambda/query.sql b/parser/testdata/sumif_with_lambda/query.sql deleted file mode 100644 index c81854d6b..000000000 --- a/parser/testdata/sumif_with_lambda/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT sumIf(number, number > 5) FROM numbers(10) diff --git a/parser/testdata/summerge_uniqmerge/explain.txt b/parser/testdata/summerge_uniqmerge/explain.txt deleted file mode 100644 index a2f91b131..000000000 --- a/parser/testdata/summerge_uniqmerge/explain.txt +++ /dev/null @@ -1,20 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 4) - ExpressionList (children 3) - Identifier StartDate - Function sumMerge (alias Visits) (children 1) - ExpressionList (children 1) - Identifier Visits - Function uniqMerge (alias Users) (children 1) - ExpressionList (children 1) - Identifier Users - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier basic_mv - ExpressionList (children 1) - Identifier StartDate - ExpressionList (children 1) - OrderByElement (children 1) - Identifier StartDate diff --git a/parser/testdata/summerge_uniqmerge/metadata.json b/parser/testdata/summerge_uniqmerge/metadata.json deleted file mode 100644 index a708465fe..000000000 --- a/parser/testdata/summerge_uniqmerge/metadata.json +++ /dev/null @@ -1 +0,0 @@ -{"source": "https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00040_aggregating_materialized_view.sql"} diff --git a/parser/testdata/summerge_uniqmerge/query.sql b/parser/testdata/summerge_uniqmerge/query.sql deleted file mode 100644 index 801fe41ed..000000000 --- a/parser/testdata/summerge_uniqmerge/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT StartDate, sumMerge(Visits) AS Visits, uniqMerge(Users) AS Users FROM basic_mv GROUP BY StartDate ORDER BY StartDate diff --git a/parser/testdata/sync_replica/explain.txt b/parser/testdata/sync_replica/explain.txt deleted file mode 100644 index 4375432bb..000000000 --- a/parser/testdata/sync_replica/explain.txt +++ /dev/null @@ -1,3 +0,0 @@ -SYSTEM query (children 2) - Identifier system - Identifier one diff --git a/parser/testdata/sync_replica/query.sql b/parser/testdata/sync_replica/query.sql deleted file mode 100644 index 30d67e32a..000000000 --- a/parser/testdata/sync_replica/query.sql +++ /dev/null @@ -1 +0,0 @@ -SYSTEM SYNC REPLICA system.one diff --git a/parser/testdata/todate/explain.txt b/parser/testdata/todate/explain.txt deleted file mode 100644 index e31a3a30d..000000000 --- a/parser/testdata/todate/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function toDate (children 1) - ExpressionList (children 1) - Literal \'2023-01-01\' diff --git a/parser/testdata/todate/query.sql b/parser/testdata/todate/query.sql deleted file mode 100644 index e1fb9c2bb..000000000 --- a/parser/testdata/todate/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT toDate('2023-01-01') diff --git a/parser/testdata/todatetime/explain.txt b/parser/testdata/todatetime/explain.txt deleted file mode 100644 index 7f054242f..000000000 --- a/parser/testdata/todatetime/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function toDateTime (children 1) - ExpressionList (children 1) - Literal \'2023-01-01 12:00:00\' diff --git a/parser/testdata/todatetime/query.sql b/parser/testdata/todatetime/query.sql deleted file mode 100644 index eb9a7d7d9..000000000 --- a/parser/testdata/todatetime/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT toDateTime('2023-01-01 12:00:00') diff --git a/parser/testdata/todatetime64/explain.txt b/parser/testdata/todatetime64/explain.txt deleted file mode 100644 index f96e59e22..000000000 --- a/parser/testdata/todatetime64/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function toDateTime64 (children 1) - ExpressionList (children 2) - Literal \'2023-01-01 12:00:00.123\' - Literal UInt64_3 diff --git a/parser/testdata/todatetime64/query.sql b/parser/testdata/todatetime64/query.sql deleted file mode 100644 index 66fb5c0da..000000000 --- a/parser/testdata/todatetime64/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT toDateTime64('2023-01-01 12:00:00.123', 3) diff --git a/parser/testdata/today/explain.txt b/parser/testdata/today/explain.txt deleted file mode 100644 index be0836632..000000000 --- a/parser/testdata/today/explain.txt +++ /dev/null @@ -1,6 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function today (children 1) - ExpressionList diff --git a/parser/testdata/today/query.sql b/parser/testdata/today/query.sql deleted file mode 100644 index a6b087c71..000000000 --- a/parser/testdata/today/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT today() diff --git a/parser/testdata/todayofmonth/explain.txt b/parser/testdata/todayofmonth/explain.txt deleted file mode 100644 index 0be56a29b..000000000 --- a/parser/testdata/todayofmonth/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function toDayOfMonth (children 1) - ExpressionList (children 1) - Function now (children 1) - ExpressionList diff --git a/parser/testdata/todayofmonth/query.sql b/parser/testdata/todayofmonth/query.sql deleted file mode 100644 index 1edbfe374..000000000 --- a/parser/testdata/todayofmonth/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT toDayOfMonth(now()) diff --git a/parser/testdata/todayofweek/explain.txt b/parser/testdata/todayofweek/explain.txt deleted file mode 100644 index 925a2aa08..000000000 --- a/parser/testdata/todayofweek/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function toDayOfWeek (children 1) - ExpressionList (children 1) - Function now (children 1) - ExpressionList diff --git a/parser/testdata/todayofweek/query.sql b/parser/testdata/todayofweek/query.sql deleted file mode 100644 index 7dc578a04..000000000 --- a/parser/testdata/todayofweek/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT toDayOfWeek(now()) diff --git a/parser/testdata/todecimal32/explain.txt b/parser/testdata/todecimal32/explain.txt deleted file mode 100644 index 6e11f7423..000000000 --- a/parser/testdata/todecimal32/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function toDecimal32 (children 1) - ExpressionList (children 2) - Literal Float64_123.456 - Literal UInt64_2 diff --git a/parser/testdata/todecimal32/query.sql b/parser/testdata/todecimal32/query.sql deleted file mode 100644 index 214d3ef9f..000000000 --- a/parser/testdata/todecimal32/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT toDecimal32(123.456, 2) diff --git a/parser/testdata/todecimal64/explain.txt b/parser/testdata/todecimal64/explain.txt deleted file mode 100644 index b5a03e4af..000000000 --- a/parser/testdata/todecimal64/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function toDecimal64 (children 1) - ExpressionList (children 2) - Literal Float64_123.456 - Literal UInt64_2 diff --git a/parser/testdata/todecimal64/query.sql b/parser/testdata/todecimal64/query.sql deleted file mode 100644 index 96218d828..000000000 --- a/parser/testdata/todecimal64/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT toDecimal64(123.456, 2) diff --git a/parser/testdata/tofixedstring/explain.txt b/parser/testdata/tofixedstring/explain.txt deleted file mode 100644 index 3a721770b..000000000 --- a/parser/testdata/tofixedstring/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function toFixedString (children 1) - ExpressionList (children 2) - Literal \'hello\' - Literal UInt64_10 diff --git a/parser/testdata/tofixedstring/query.sql b/parser/testdata/tofixedstring/query.sql deleted file mode 100644 index 33103ce34..000000000 --- a/parser/testdata/tofixedstring/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT toFixedString('hello', 10) diff --git a/parser/testdata/tofloat32/explain.txt b/parser/testdata/tofloat32/explain.txt deleted file mode 100644 index ecd3545e4..000000000 --- a/parser/testdata/tofloat32/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function toFloat32 (children 1) - ExpressionList (children 1) - Literal Float64_123.456 diff --git a/parser/testdata/tofloat32/query.sql b/parser/testdata/tofloat32/query.sql deleted file mode 100644 index 4736e0f02..000000000 --- a/parser/testdata/tofloat32/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT toFloat32(123.456) diff --git a/parser/testdata/tofloat64/explain.txt b/parser/testdata/tofloat64/explain.txt deleted file mode 100644 index bb0e22488..000000000 --- a/parser/testdata/tofloat64/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function toFloat64 (children 1) - ExpressionList (children 1) - Literal Float64_123.456 diff --git a/parser/testdata/tofloat64/query.sql b/parser/testdata/tofloat64/query.sql deleted file mode 100644 index fdad261b8..000000000 --- a/parser/testdata/tofloat64/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT toFloat64(123.456) diff --git a/parser/testdata/tohour/explain.txt b/parser/testdata/tohour/explain.txt deleted file mode 100644 index ef72105e5..000000000 --- a/parser/testdata/tohour/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function toHour (children 1) - ExpressionList (children 1) - Function now (children 1) - ExpressionList diff --git a/parser/testdata/tohour/query.sql b/parser/testdata/tohour/query.sql deleted file mode 100644 index 447e9af55..000000000 --- a/parser/testdata/tohour/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT toHour(now()) diff --git a/parser/testdata/toint16/explain.txt b/parser/testdata/toint16/explain.txt deleted file mode 100644 index 76e96f8f6..000000000 --- a/parser/testdata/toint16/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function toInt16 (children 1) - ExpressionList (children 1) - Literal UInt64_123 diff --git a/parser/testdata/toint16/query.sql b/parser/testdata/toint16/query.sql deleted file mode 100644 index a871427b7..000000000 --- a/parser/testdata/toint16/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT toInt16(123) diff --git a/parser/testdata/toint32/explain.txt b/parser/testdata/toint32/explain.txt deleted file mode 100644 index 96fa45a64..000000000 --- a/parser/testdata/toint32/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function toInt32 (children 1) - ExpressionList (children 1) - Literal UInt64_123 diff --git a/parser/testdata/toint32/query.sql b/parser/testdata/toint32/query.sql deleted file mode 100644 index 389c7f39e..000000000 --- a/parser/testdata/toint32/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT toInt32(123) diff --git a/parser/testdata/toint64/explain.txt b/parser/testdata/toint64/explain.txt deleted file mode 100644 index fdd4e8f3e..000000000 --- a/parser/testdata/toint64/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function toInt64 (children 1) - ExpressionList (children 1) - Literal UInt64_123 diff --git a/parser/testdata/toint64/query.sql b/parser/testdata/toint64/query.sql deleted file mode 100644 index 1eb002042..000000000 --- a/parser/testdata/toint64/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT toInt64(123) diff --git a/parser/testdata/toint8/explain.txt b/parser/testdata/toint8/explain.txt deleted file mode 100644 index 52facd445..000000000 --- a/parser/testdata/toint8/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function toInt8 (children 1) - ExpressionList (children 1) - Literal UInt64_123 diff --git a/parser/testdata/toint8/query.sql b/parser/testdata/toint8/query.sql deleted file mode 100644 index 6c3d072cd..000000000 --- a/parser/testdata/toint8/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT toInt8(123) diff --git a/parser/testdata/tointervalday/explain.txt b/parser/testdata/tointervalday/explain.txt deleted file mode 100644 index 87a0c4754..000000000 --- a/parser/testdata/tointervalday/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function toIntervalDay (children 1) - ExpressionList (children 1) - Literal UInt64_1 diff --git a/parser/testdata/tointervalday/query.sql b/parser/testdata/tointervalday/query.sql deleted file mode 100644 index 976626691..000000000 --- a/parser/testdata/tointervalday/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT toIntervalDay(1) diff --git a/parser/testdata/tointervalmonth/explain.txt b/parser/testdata/tointervalmonth/explain.txt deleted file mode 100644 index eb04b312c..000000000 --- a/parser/testdata/tointervalmonth/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function toIntervalMonth (children 1) - ExpressionList (children 1) - Literal UInt64_1 diff --git a/parser/testdata/tointervalmonth/query.sql b/parser/testdata/tointervalmonth/query.sql deleted file mode 100644 index 85da35606..000000000 --- a/parser/testdata/tointervalmonth/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT toIntervalMonth(1) diff --git a/parser/testdata/toipv4/explain.txt b/parser/testdata/toipv4/explain.txt deleted file mode 100644 index 76e4807f1..000000000 --- a/parser/testdata/toipv4/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function toIPv4 (children 1) - ExpressionList (children 1) - Literal \'192.168.1.1\' diff --git a/parser/testdata/toipv4/query.sql b/parser/testdata/toipv4/query.sql deleted file mode 100644 index bfb7f9304..000000000 --- a/parser/testdata/toipv4/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT toIPv4('192.168.1.1') diff --git a/parser/testdata/toipv6/explain.txt b/parser/testdata/toipv6/explain.txt deleted file mode 100644 index 5f44ee1c6..000000000 --- a/parser/testdata/toipv6/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function toIPv6 (children 1) - ExpressionList (children 1) - Literal \'::1\' diff --git a/parser/testdata/toipv6/query.sql b/parser/testdata/toipv6/query.sql deleted file mode 100644 index fb2caee25..000000000 --- a/parser/testdata/toipv6/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT toIPv6('::1') diff --git a/parser/testdata/tominute/explain.txt b/parser/testdata/tominute/explain.txt deleted file mode 100644 index 3fb90eda0..000000000 --- a/parser/testdata/tominute/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function toMinute (children 1) - ExpressionList (children 1) - Function now (children 1) - ExpressionList diff --git a/parser/testdata/tominute/query.sql b/parser/testdata/tominute/query.sql deleted file mode 100644 index 3e9775a8f..000000000 --- a/parser/testdata/tominute/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT toMinute(now()) diff --git a/parser/testdata/tomonday/explain.txt b/parser/testdata/tomonday/explain.txt deleted file mode 100644 index d950178fd..000000000 --- a/parser/testdata/tomonday/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function toMonday (children 1) - ExpressionList (children 1) - Function now (children 1) - ExpressionList diff --git a/parser/testdata/tomonday/query.sql b/parser/testdata/tomonday/query.sql deleted file mode 100644 index 90bb7c2df..000000000 --- a/parser/testdata/tomonday/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT toMonday(now()) diff --git a/parser/testdata/tomonth/explain.txt b/parser/testdata/tomonth/explain.txt deleted file mode 100644 index deec1427d..000000000 --- a/parser/testdata/tomonth/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function toMonth (children 1) - ExpressionList (children 1) - Function now (children 1) - ExpressionList diff --git a/parser/testdata/tomonth/query.sql b/parser/testdata/tomonth/query.sql deleted file mode 100644 index f7b82df13..000000000 --- a/parser/testdata/tomonth/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT toMonth(now()) diff --git a/parser/testdata/tonullable/explain.txt b/parser/testdata/tonullable/explain.txt deleted file mode 100644 index db84cad67..000000000 --- a/parser/testdata/tonullable/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function toNullable (children 1) - ExpressionList (children 1) - Literal UInt64_1 diff --git a/parser/testdata/tonullable/query.sql b/parser/testdata/tonullable/query.sql deleted file mode 100644 index 6716c738c..000000000 --- a/parser/testdata/tonullable/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT toNullable(1) diff --git a/parser/testdata/topk_parametric/explain.txt b/parser/testdata/topk_parametric/explain.txt deleted file mode 100644 index a20becc92..000000000 --- a/parser/testdata/topk_parametric/explain.txt +++ /dev/null @@ -1,15 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Function topK (children 2) - ExpressionList (children 1) - Identifier number - ExpressionList (children 1) - Literal UInt64_5 - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (children 1) - ExpressionList (children 1) - Literal UInt64_100 diff --git a/parser/testdata/topk_parametric/query.sql b/parser/testdata/topk_parametric/query.sql deleted file mode 100644 index 6ab67013e..000000000 --- a/parser/testdata/topk_parametric/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT topK(5)(number) FROM numbers(100) diff --git a/parser/testdata/topleveldomain/explain.txt b/parser/testdata/topleveldomain/explain.txt deleted file mode 100644 index 783a8e196..000000000 --- a/parser/testdata/topleveldomain/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function topLevelDomain (children 1) - ExpressionList (children 1) - Literal \'https://example.com\' diff --git a/parser/testdata/topleveldomain/query.sql b/parser/testdata/topleveldomain/query.sql deleted file mode 100644 index 85b884b2f..000000000 --- a/parser/testdata/topleveldomain/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT topLevelDomain('https://example.com') diff --git a/parser/testdata/tosecond/explain.txt b/parser/testdata/tosecond/explain.txt deleted file mode 100644 index febd082d4..000000000 --- a/parser/testdata/tosecond/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function toSecond (children 1) - ExpressionList (children 1) - Function now (children 1) - ExpressionList diff --git a/parser/testdata/tosecond/query.sql b/parser/testdata/tosecond/query.sql deleted file mode 100644 index 9a6de3553..000000000 --- a/parser/testdata/tosecond/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT toSecond(now()) diff --git a/parser/testdata/tostartofday/explain.txt b/parser/testdata/tostartofday/explain.txt deleted file mode 100644 index ad041780e..000000000 --- a/parser/testdata/tostartofday/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function toStartOfDay (children 1) - ExpressionList (children 1) - Function now (children 1) - ExpressionList diff --git a/parser/testdata/tostartofday/query.sql b/parser/testdata/tostartofday/query.sql deleted file mode 100644 index c18b6b9bf..000000000 --- a/parser/testdata/tostartofday/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT toStartOfDay(now()) diff --git a/parser/testdata/tostartofhour/explain.txt b/parser/testdata/tostartofhour/explain.txt deleted file mode 100644 index 7e99976f9..000000000 --- a/parser/testdata/tostartofhour/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function toStartOfHour (children 1) - ExpressionList (children 1) - Function now (children 1) - ExpressionList diff --git a/parser/testdata/tostartofhour/query.sql b/parser/testdata/tostartofhour/query.sql deleted file mode 100644 index 45f8c800a..000000000 --- a/parser/testdata/tostartofhour/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT toStartOfHour(now()) diff --git a/parser/testdata/tostartofminute/explain.txt b/parser/testdata/tostartofminute/explain.txt deleted file mode 100644 index 15e99859a..000000000 --- a/parser/testdata/tostartofminute/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function toStartOfMinute (children 1) - ExpressionList (children 1) - Function now (children 1) - ExpressionList diff --git a/parser/testdata/tostartofminute/query.sql b/parser/testdata/tostartofminute/query.sql deleted file mode 100644 index 4d3c8d226..000000000 --- a/parser/testdata/tostartofminute/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT toStartOfMinute(now()) diff --git a/parser/testdata/tostartofmonth/explain.txt b/parser/testdata/tostartofmonth/explain.txt deleted file mode 100644 index cf33ef257..000000000 --- a/parser/testdata/tostartofmonth/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function toStartOfMonth (children 1) - ExpressionList (children 1) - Function now (children 1) - ExpressionList diff --git a/parser/testdata/tostartofmonth/query.sql b/parser/testdata/tostartofmonth/query.sql deleted file mode 100644 index 75715140a..000000000 --- a/parser/testdata/tostartofmonth/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT toStartOfMonth(now()) diff --git a/parser/testdata/tostartofweek/explain.txt b/parser/testdata/tostartofweek/explain.txt deleted file mode 100644 index 78779b8b9..000000000 --- a/parser/testdata/tostartofweek/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function toStartOfWeek (children 1) - ExpressionList (children 1) - Function now (children 1) - ExpressionList diff --git a/parser/testdata/tostartofweek/query.sql b/parser/testdata/tostartofweek/query.sql deleted file mode 100644 index 1a68551a9..000000000 --- a/parser/testdata/tostartofweek/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT toStartOfWeek(now()) diff --git a/parser/testdata/tostartofyear/explain.txt b/parser/testdata/tostartofyear/explain.txt deleted file mode 100644 index acf864fad..000000000 --- a/parser/testdata/tostartofyear/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function toStartOfYear (children 1) - ExpressionList (children 1) - Function now (children 1) - ExpressionList diff --git a/parser/testdata/tostartofyear/query.sql b/parser/testdata/tostartofyear/query.sql deleted file mode 100644 index 331c6df00..000000000 --- a/parser/testdata/tostartofyear/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT toStartOfYear(now()) diff --git a/parser/testdata/tostring/explain.txt b/parser/testdata/tostring/explain.txt deleted file mode 100644 index 29bf4516f..000000000 --- a/parser/testdata/tostring/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function toString (children 1) - ExpressionList (children 1) - Literal UInt64_123 diff --git a/parser/testdata/tostring/query.sql b/parser/testdata/tostring/query.sql deleted file mode 100644 index ea51cb4e6..000000000 --- a/parser/testdata/tostring/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT toString(123) diff --git a/parser/testdata/tostring_conversion/explain.txt b/parser/testdata/tostring_conversion/explain.txt deleted file mode 100644 index 29bf4516f..000000000 --- a/parser/testdata/tostring_conversion/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function toString (children 1) - ExpressionList (children 1) - Literal UInt64_123 diff --git a/parser/testdata/tostring_conversion/query.sql b/parser/testdata/tostring_conversion/query.sql deleted file mode 100644 index ea51cb4e6..000000000 --- a/parser/testdata/tostring_conversion/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT toString(123) diff --git a/parser/testdata/totypename_array/explain.txt b/parser/testdata/totypename_array/explain.txt deleted file mode 100644 index 30e4f561c..000000000 --- a/parser/testdata/totypename_array/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function toTypeName (children 1) - ExpressionList (children 1) - Literal Array_[UInt64_1, UInt64_2, UInt64_3] diff --git a/parser/testdata/totypename_array/query.sql b/parser/testdata/totypename_array/query.sql deleted file mode 100644 index fae415932..000000000 --- a/parser/testdata/totypename_array/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT toTypeName([1, 2, 3]) diff --git a/parser/testdata/totypename_int/explain.txt b/parser/testdata/totypename_int/explain.txt deleted file mode 100644 index c769ae5bf..000000000 --- a/parser/testdata/totypename_int/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function toTypeName (children 1) - ExpressionList (children 1) - Literal UInt64_1 diff --git a/parser/testdata/totypename_int/query.sql b/parser/testdata/totypename_int/query.sql deleted file mode 100644 index 56bd9b0f5..000000000 --- a/parser/testdata/totypename_int/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT toTypeName(1) diff --git a/parser/testdata/totypename_string/explain.txt b/parser/testdata/totypename_string/explain.txt deleted file mode 100644 index ec09787ca..000000000 --- a/parser/testdata/totypename_string/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function toTypeName (children 1) - ExpressionList (children 1) - Literal \'hello\' diff --git a/parser/testdata/totypename_string/query.sql b/parser/testdata/totypename_string/query.sql deleted file mode 100644 index 548162f63..000000000 --- a/parser/testdata/totypename_string/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT toTypeName('hello') diff --git a/parser/testdata/touint16/explain.txt b/parser/testdata/touint16/explain.txt deleted file mode 100644 index 5d1b34651..000000000 --- a/parser/testdata/touint16/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function toUInt16 (children 1) - ExpressionList (children 1) - Literal UInt64_123 diff --git a/parser/testdata/touint16/query.sql b/parser/testdata/touint16/query.sql deleted file mode 100644 index cadca134d..000000000 --- a/parser/testdata/touint16/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT toUInt16(123) diff --git a/parser/testdata/touint32/explain.txt b/parser/testdata/touint32/explain.txt deleted file mode 100644 index b270d3ffa..000000000 --- a/parser/testdata/touint32/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function toUInt32 (children 1) - ExpressionList (children 1) - Literal UInt64_123 diff --git a/parser/testdata/touint32/query.sql b/parser/testdata/touint32/query.sql deleted file mode 100644 index 0f63b73f1..000000000 --- a/parser/testdata/touint32/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT toUInt32(123) diff --git a/parser/testdata/touint64/explain.txt b/parser/testdata/touint64/explain.txt deleted file mode 100644 index 706a96b2f..000000000 --- a/parser/testdata/touint64/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function toUInt64 (children 1) - ExpressionList (children 1) - Literal UInt64_123 diff --git a/parser/testdata/touint64/query.sql b/parser/testdata/touint64/query.sql deleted file mode 100644 index 3d16bcfa0..000000000 --- a/parser/testdata/touint64/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT toUInt64(123) diff --git a/parser/testdata/touint8/explain.txt b/parser/testdata/touint8/explain.txt deleted file mode 100644 index 210571977..000000000 --- a/parser/testdata/touint8/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function toUInt8 (children 1) - ExpressionList (children 1) - Literal UInt64_123 diff --git a/parser/testdata/touint8/query.sql b/parser/testdata/touint8/query.sql deleted file mode 100644 index 1f184b456..000000000 --- a/parser/testdata/touint8/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT toUInt8(123) diff --git a/parser/testdata/tounixtimestamp/explain.txt b/parser/testdata/tounixtimestamp/explain.txt deleted file mode 100644 index 5fc588e27..000000000 --- a/parser/testdata/tounixtimestamp/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function toUnixTimestamp (children 1) - ExpressionList (children 1) - Function now (children 1) - ExpressionList diff --git a/parser/testdata/tounixtimestamp/query.sql b/parser/testdata/tounixtimestamp/query.sql deleted file mode 100644 index 1c3d785d2..000000000 --- a/parser/testdata/tounixtimestamp/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT toUnixTimestamp(now()) diff --git a/parser/testdata/touuid/explain.txt b/parser/testdata/touuid/explain.txt deleted file mode 100644 index a3a148318..000000000 --- a/parser/testdata/touuid/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function toUUID (children 1) - ExpressionList (children 1) - Literal \'00000000-0000-0000-0000-000000000000\' diff --git a/parser/testdata/touuid/query.sql b/parser/testdata/touuid/query.sql deleted file mode 100644 index 6a7865c68..000000000 --- a/parser/testdata/touuid/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT toUUID('00000000-0000-0000-0000-000000000000') diff --git a/parser/testdata/toyear/explain.txt b/parser/testdata/toyear/explain.txt deleted file mode 100644 index 344e289b7..000000000 --- a/parser/testdata/toyear/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function toYear (children 1) - ExpressionList (children 1) - Function now (children 1) - ExpressionList diff --git a/parser/testdata/toyear/query.sql b/parser/testdata/toyear/query.sql deleted file mode 100644 index b8de800bf..000000000 --- a/parser/testdata/toyear/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT toYear(now()) diff --git a/parser/testdata/transform_function/explain.txt b/parser/testdata/transform_function/explain.txt deleted file mode 100644 index 9095d170a..000000000 --- a/parser/testdata/transform_function/explain.txt +++ /dev/null @@ -1,10 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function transform (children 1) - ExpressionList (children 4) - Literal UInt64_1 - Literal Array_[UInt64_1, UInt64_2] - Literal Array_[UInt64_10, UInt64_20] - Literal UInt64_0 diff --git a/parser/testdata/transform_function/query.sql b/parser/testdata/transform_function/query.sql deleted file mode 100644 index 86db34be5..000000000 --- a/parser/testdata/transform_function/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT transform(1, [1, 2], [10, 20], 0) diff --git a/parser/testdata/trim/explain.txt b/parser/testdata/trim/explain.txt deleted file mode 100644 index a41621d41..000000000 --- a/parser/testdata/trim/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function trimBoth (children 1) - ExpressionList (children 1) - Literal \' hello \' diff --git a/parser/testdata/trim/query.sql b/parser/testdata/trim/query.sql deleted file mode 100644 index 2258205b7..000000000 --- a/parser/testdata/trim/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT trim(' hello ') diff --git a/parser/testdata/truncate_table/explain.txt b/parser/testdata/truncate_table/explain.txt deleted file mode 100644 index b901a5e5b..000000000 --- a/parser/testdata/truncate_table/explain.txt +++ /dev/null @@ -1,2 +0,0 @@ -TruncateQuery test (children 1) - Identifier test diff --git a/parser/testdata/truncate_table/query.sql b/parser/testdata/truncate_table/query.sql deleted file mode 100644 index 5a7a8e815..000000000 --- a/parser/testdata/truncate_table/query.sql +++ /dev/null @@ -1 +0,0 @@ -TRUNCATE TABLE test diff --git a/parser/testdata/truncate_table_ddl/explain.txt b/parser/testdata/truncate_table_ddl/explain.txt deleted file mode 100644 index ed163a101..000000000 --- a/parser/testdata/truncate_table_ddl/explain.txt +++ /dev/null @@ -1,2 +0,0 @@ -TruncateQuery test_table (children 1) - Identifier test_table diff --git a/parser/testdata/truncate_table_ddl/query.sql b/parser/testdata/truncate_table_ddl/query.sql deleted file mode 100644 index b34cabed6..000000000 --- a/parser/testdata/truncate_table_ddl/query.sql +++ /dev/null @@ -1 +0,0 @@ -TRUNCATE TABLE test_table diff --git a/parser/testdata/tuple_element_dot/explain.txt b/parser/testdata/tuple_element_dot/explain.txt deleted file mode 100644 index d4799404c..000000000 --- a/parser/testdata/tuple_element_dot/explain.txt +++ /dev/null @@ -1,12 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function tupleElement (children 1) - ExpressionList (children 2) - Function tuple (children 1) - ExpressionList (children 3) - Literal UInt64_1 - Literal UInt64_2 - Literal UInt64_3 - Literal UInt64_1 diff --git a/parser/testdata/tuple_element_dot/query.sql b/parser/testdata/tuple_element_dot/query.sql deleted file mode 100644 index 151a93d4e..000000000 --- a/parser/testdata/tuple_element_dot/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT tuple(1, 2, 3).1 diff --git a/parser/testdata/tuple_element_subscript/explain.txt b/parser/testdata/tuple_element_subscript/explain.txt deleted file mode 100644 index a07b08ed3..000000000 --- a/parser/testdata/tuple_element_subscript/explain.txt +++ /dev/null @@ -1,12 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function arrayElement (children 1) - ExpressionList (children 2) - Function tuple (children 1) - ExpressionList (children 3) - Literal UInt64_1 - Literal UInt64_2 - Literal UInt64_3 - Literal UInt64_1 diff --git a/parser/testdata/tuple_element_subscript/query.sql b/parser/testdata/tuple_element_subscript/query.sql deleted file mode 100644 index 07e4269c7..000000000 --- a/parser/testdata/tuple_element_subscript/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT tuple(1, 2, 3)[1] diff --git a/parser/testdata/tuple_function/explain.txt b/parser/testdata/tuple_function/explain.txt deleted file mode 100644 index 6a0590a02..000000000 --- a/parser/testdata/tuple_function/explain.txt +++ /dev/null @@ -1,9 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function tuple (children 1) - ExpressionList (children 3) - Literal UInt64_1 - Literal UInt64_2 - Literal UInt64_3 diff --git a/parser/testdata/tuple_function/query.sql b/parser/testdata/tuple_function/query.sql deleted file mode 100644 index aa9a7eed1..000000000 --- a/parser/testdata/tuple_function/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT tuple(1, 2, 3) diff --git a/parser/testdata/tuple_literal/explain.txt b/parser/testdata/tuple_literal/explain.txt deleted file mode 100644 index d763dbc37..000000000 --- a/parser/testdata/tuple_literal/explain.txt +++ /dev/null @@ -1,5 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Literal Tuple_(UInt64_1, \'a\') diff --git a/parser/testdata/tuple_literal/query.sql b/parser/testdata/tuple_literal/query.sql deleted file mode 100644 index eef3dd192..000000000 --- a/parser/testdata/tuple_literal/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT (1, 'a') diff --git a/parser/testdata/unary_minus/explain.txt b/parser/testdata/unary_minus/explain.txt deleted file mode 100644 index 671943f97..000000000 --- a/parser/testdata/unary_minus/explain.txt +++ /dev/null @@ -1,5 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Literal Int64_-5 diff --git a/parser/testdata/unary_minus/query.sql b/parser/testdata/unary_minus/query.sql deleted file mode 100644 index d281bcacc..000000000 --- a/parser/testdata/unary_minus/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT -5 diff --git a/parser/testdata/unhex/explain.txt b/parser/testdata/unhex/explain.txt deleted file mode 100644 index 95ed20dc3..000000000 --- a/parser/testdata/unhex/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function unhex (children 1) - ExpressionList (children 1) - Literal \'68656C6C6F\' diff --git a/parser/testdata/unhex/query.sql b/parser/testdata/unhex/query.sql deleted file mode 100644 index 2b2b8fc01..000000000 --- a/parser/testdata/unhex/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT unhex('68656C6C6F') diff --git a/parser/testdata/union_all/explain.txt b/parser/testdata/union_all/explain.txt deleted file mode 100644 index 8aadabac2..000000000 --- a/parser/testdata/union_all/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 2) - SelectQuery (children 1) - ExpressionList (children 1) - Literal UInt64_1 - SelectQuery (children 1) - ExpressionList (children 1) - Literal UInt64_2 diff --git a/parser/testdata/union_all/query.sql b/parser/testdata/union_all/query.sql deleted file mode 100644 index 7679fc93b..000000000 --- a/parser/testdata/union_all/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT 1 UNION ALL SELECT 2 diff --git a/parser/testdata/union_distinct/explain.txt b/parser/testdata/union_distinct/explain.txt deleted file mode 100644 index a2d3a7633..000000000 --- a/parser/testdata/union_distinct/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 2) - SelectQuery (children 1) - ExpressionList (children 1) - Literal UInt64_1 - SelectQuery (children 1) - ExpressionList (children 1) - Literal UInt64_1 diff --git a/parser/testdata/union_distinct/query.sql b/parser/testdata/union_distinct/query.sql deleted file mode 100644 index d17eac58f..000000000 --- a/parser/testdata/union_distinct/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT 1 UNION DISTINCT SELECT 1 diff --git a/parser/testdata/uniq_sum_aggregate/explain.txt b/parser/testdata/uniq_sum_aggregate/explain.txt deleted file mode 100644 index d241ba529..000000000 --- a/parser/testdata/uniq_sum_aggregate/explain.txt +++ /dev/null @@ -1,18 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 3) - ExpressionList (children 2) - Function uniq (children 1) - ExpressionList (children 1) - Identifier UserID - Function sum (children 1) - ExpressionList (children 1) - Identifier Sign - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier test.visits - Function equals (children 1) - ExpressionList (children 2) - Identifier CounterID - Literal UInt64_942285 diff --git a/parser/testdata/uniq_sum_aggregate/metadata.json b/parser/testdata/uniq_sum_aggregate/metadata.json deleted file mode 100644 index 0d340de78..000000000 --- a/parser/testdata/uniq_sum_aggregate/metadata.json +++ /dev/null @@ -1 +0,0 @@ -{"todo": true, "source": "https://github.com/ClickHouse/ClickHouse/blob/master/tests/queries/0_stateless/00021_1_select_with_in.sql"} diff --git a/parser/testdata/uniq_sum_aggregate/query.sql b/parser/testdata/uniq_sum_aggregate/query.sql deleted file mode 100644 index 45acc1cca..000000000 --- a/parser/testdata/uniq_sum_aggregate/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT uniq(UserID), sum(Sign) FROM test.visits WHERE CounterID = 942285 diff --git a/parser/testdata/upper/explain.txt b/parser/testdata/upper/explain.txt deleted file mode 100644 index 5d0605a3a..000000000 --- a/parser/testdata/upper/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function upper (children 1) - ExpressionList (children 1) - Literal \'hello\' diff --git a/parser/testdata/upper/query.sql b/parser/testdata/upper/query.sql deleted file mode 100644 index d266e0c36..000000000 --- a/parser/testdata/upper/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT upper('hello') diff --git a/parser/testdata/uptime/explain.txt b/parser/testdata/uptime/explain.txt deleted file mode 100644 index 7cdb9fb88..000000000 --- a/parser/testdata/uptime/explain.txt +++ /dev/null @@ -1,6 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function uptime (children 1) - ExpressionList diff --git a/parser/testdata/uptime/query.sql b/parser/testdata/uptime/query.sql deleted file mode 100644 index ddc5d47ec..000000000 --- a/parser/testdata/uptime/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT uptime() diff --git a/parser/testdata/use_database/explain.txt b/parser/testdata/use_database/explain.txt deleted file mode 100644 index ba3949f06..000000000 --- a/parser/testdata/use_database/explain.txt +++ /dev/null @@ -1,2 +0,0 @@ -UseQuery mydb (children 1) - Identifier mydb diff --git a/parser/testdata/use_database/query.sql b/parser/testdata/use_database/query.sql deleted file mode 100644 index 9de251f87..000000000 --- a/parser/testdata/use_database/query.sql +++ /dev/null @@ -1 +0,0 @@ -USE mydb diff --git a/parser/testdata/use_default/explain.txt b/parser/testdata/use_default/explain.txt deleted file mode 100644 index 54c11811c..000000000 --- a/parser/testdata/use_default/explain.txt +++ /dev/null @@ -1,2 +0,0 @@ -UseQuery default (children 1) - Identifier default diff --git a/parser/testdata/use_default/query.sql b/parser/testdata/use_default/query.sql deleted file mode 100644 index 4d9d91aa2..000000000 --- a/parser/testdata/use_default/query.sql +++ /dev/null @@ -1 +0,0 @@ -USE default diff --git a/parser/testdata/use_system/explain.txt b/parser/testdata/use_system/explain.txt deleted file mode 100644 index c70fa3b42..000000000 --- a/parser/testdata/use_system/explain.txt +++ /dev/null @@ -1,2 +0,0 @@ -UseQuery system (children 1) - Identifier system diff --git a/parser/testdata/use_system/query.sql b/parser/testdata/use_system/query.sql deleted file mode 100644 index 85fd91602..000000000 --- a/parser/testdata/use_system/query.sql +++ /dev/null @@ -1 +0,0 @@ -USE system diff --git a/parser/testdata/uuidnumtostring/explain.txt b/parser/testdata/uuidnumtostring/explain.txt deleted file mode 100644 index 87fabaf71..000000000 --- a/parser/testdata/uuidnumtostring/explain.txt +++ /dev/null @@ -1,10 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function UUIDNumToString (children 1) - ExpressionList (children 1) - Function toFixedString (children 1) - ExpressionList (children 2) - Literal \'0000000000000000\' - Literal UInt64_16 diff --git a/parser/testdata/uuidnumtostring/query.sql b/parser/testdata/uuidnumtostring/query.sql deleted file mode 100644 index 3c8686abe..000000000 --- a/parser/testdata/uuidnumtostring/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT UUIDNumToString(toFixedString('0000000000000000', 16)) diff --git a/parser/testdata/uuidstringtonum/explain.txt b/parser/testdata/uuidstringtonum/explain.txt deleted file mode 100644 index b36cf4738..000000000 --- a/parser/testdata/uuidstringtonum/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function UUIDStringToNum (children 1) - ExpressionList (children 1) - Literal \'00000000-0000-0000-0000-000000000000\' diff --git a/parser/testdata/uuidstringtonum/query.sql b/parser/testdata/uuidstringtonum/query.sql deleted file mode 100644 index 0e9ac1ae9..000000000 --- a/parser/testdata/uuidstringtonum/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT UUIDStringToNum('00000000-0000-0000-0000-000000000000') diff --git a/parser/testdata/version/explain.txt b/parser/testdata/version/explain.txt deleted file mode 100644 index c4d7e8958..000000000 --- a/parser/testdata/version/explain.txt +++ /dev/null @@ -1,6 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function version (children 1) - ExpressionList diff --git a/parser/testdata/version/query.sql b/parser/testdata/version/query.sql deleted file mode 100644 index f9962e2e9..000000000 --- a/parser/testdata/version/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT version() diff --git a/parser/testdata/where_and_condition/explain.txt b/parser/testdata/where_and_condition/explain.txt deleted file mode 100644 index 54a547106..000000000 --- a/parser/testdata/where_and_condition/explain.txt +++ /dev/null @@ -1,15 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Literal UInt64_1 - Function and (children 1) - ExpressionList (children 2) - Function greater (children 1) - ExpressionList (children 2) - Literal UInt64_1 - Literal UInt64_0 - Function less (children 1) - ExpressionList (children 2) - Literal UInt64_2 - Literal UInt64_3 diff --git a/parser/testdata/where_and_condition/query.sql b/parser/testdata/where_and_condition/query.sql deleted file mode 100644 index cdc17fe96..000000000 --- a/parser/testdata/where_and_condition/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT 1 WHERE 1 > 0 AND 2 < 3 diff --git a/parser/testdata/where_between/explain.txt b/parser/testdata/where_between/explain.txt deleted file mode 100644 index 09e8ec996..000000000 --- a/parser/testdata/where_between/explain.txt +++ /dev/null @@ -1,15 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Literal UInt64_1 - Function and (children 1) - ExpressionList (children 2) - Function greaterOrEquals (children 1) - ExpressionList (children 2) - Literal UInt64_1 - Literal UInt64_0 - Function lessOrEquals (children 1) - ExpressionList (children 2) - Literal UInt64_1 - Literal UInt64_10 diff --git a/parser/testdata/where_between/query.sql b/parser/testdata/where_between/query.sql deleted file mode 100644 index 906143c2f..000000000 --- a/parser/testdata/where_between/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT 1 WHERE 1 BETWEEN 0 AND 10 diff --git a/parser/testdata/where_equality/explain.txt b/parser/testdata/where_equality/explain.txt deleted file mode 100644 index d79ac2ace..000000000 --- a/parser/testdata/where_equality/explain.txt +++ /dev/null @@ -1,9 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Literal UInt64_1 - Function equals (children 1) - ExpressionList (children 2) - Literal UInt64_1 - Literal UInt64_1 diff --git a/parser/testdata/where_equality/query.sql b/parser/testdata/where_equality/query.sql deleted file mode 100644 index 9599194b7..000000000 --- a/parser/testdata/where_equality/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT 1 WHERE 1 = 1 diff --git a/parser/testdata/where_in_list/explain.txt b/parser/testdata/where_in_list/explain.txt deleted file mode 100644 index e422f8782..000000000 --- a/parser/testdata/where_in_list/explain.txt +++ /dev/null @@ -1,9 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Literal UInt64_1 - Function in (children 1) - ExpressionList (children 2) - Literal UInt64_1 - Literal Tuple_(UInt64_1, UInt64_2, UInt64_3) diff --git a/parser/testdata/where_in_list/query.sql b/parser/testdata/where_in_list/query.sql deleted file mode 100644 index b1360d92f..000000000 --- a/parser/testdata/where_in_list/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT 1 WHERE 1 IN (1, 2, 3) diff --git a/parser/testdata/where_is_not_null/explain.txt b/parser/testdata/where_is_not_null/explain.txt deleted file mode 100644 index 215c53ea9..000000000 --- a/parser/testdata/where_is_not_null/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Literal UInt64_1 - Function isNotNull (children 1) - ExpressionList (children 1) - Literal UInt64_1 diff --git a/parser/testdata/where_is_not_null/query.sql b/parser/testdata/where_is_not_null/query.sql deleted file mode 100644 index 90ff7c4fb..000000000 --- a/parser/testdata/where_is_not_null/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT 1 WHERE 1 IS NOT NULL diff --git a/parser/testdata/where_is_null/explain.txt b/parser/testdata/where_is_null/explain.txt deleted file mode 100644 index 49d98d24b..000000000 --- a/parser/testdata/where_is_null/explain.txt +++ /dev/null @@ -1,8 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Literal UInt64_1 - Function isNull (children 1) - ExpressionList (children 1) - Literal NULL diff --git a/parser/testdata/where_is_null/query.sql b/parser/testdata/where_is_null/query.sql deleted file mode 100644 index 27f6cda66..000000000 --- a/parser/testdata/where_is_null/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT 1 WHERE NULL IS NULL diff --git a/parser/testdata/window_frame_rows/explain.txt b/parser/testdata/window_frame_rows/explain.txt deleted file mode 100644 index e0f0e4791..000000000 --- a/parser/testdata/window_frame_rows/explain.txt +++ /dev/null @@ -1,17 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 2) - Identifier number - Function avg (children 2) - ExpressionList (children 1) - Identifier number - WindowDefinition (children 2) - Literal UInt64_1 - Literal UInt64_1 - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - Function numbers (children 1) - ExpressionList (children 1) - Literal UInt64_10 diff --git a/parser/testdata/window_frame_rows/query.sql b/parser/testdata/window_frame_rows/query.sql deleted file mode 100644 index 6db5be2ba..000000000 --- a/parser/testdata/window_frame_rows/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT number, avg(number) OVER (ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING) FROM numbers(10) diff --git a/parser/testdata/with_scalar/explain.txt b/parser/testdata/with_scalar/explain.txt deleted file mode 100644 index 29d4f0a17..000000000 --- a/parser/testdata/with_scalar/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 2) - ExpressionList (children 1) - Literal UInt64_1 (alias x) - ExpressionList (children 1) - Identifier x diff --git a/parser/testdata/with_scalar/query.sql b/parser/testdata/with_scalar/query.sql deleted file mode 100644 index 5415e0bc5..000000000 --- a/parser/testdata/with_scalar/query.sql +++ /dev/null @@ -1 +0,0 @@ -WITH 1 AS x SELECT x diff --git a/parser/testdata/with_subquery_cte/explain.txt b/parser/testdata/with_subquery_cte/explain.txt deleted file mode 100644 index e8ac7890d..000000000 --- a/parser/testdata/with_subquery_cte/explain.txt +++ /dev/null @@ -1,17 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 3) - ExpressionList (children 1) - WithElement (children 1) - Subquery (children 1) - SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Literal UInt64_1 - ExpressionList (children 1) - Asterisk - TablesInSelectQuery (children 1) - TablesInSelectQueryElement (children 1) - TableExpression (children 1) - TableIdentifier x diff --git a/parser/testdata/with_subquery_cte/query.sql b/parser/testdata/with_subquery_cte/query.sql deleted file mode 100644 index a803de93b..000000000 --- a/parser/testdata/with_subquery_cte/query.sql +++ /dev/null @@ -1 +0,0 @@ -WITH x AS (SELECT 1) SELECT * FROM x diff --git a/parser/testdata/xxhash32/explain.txt b/parser/testdata/xxhash32/explain.txt deleted file mode 100644 index a35267a08..000000000 --- a/parser/testdata/xxhash32/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function xxHash32 (children 1) - ExpressionList (children 1) - Literal \'hello\' diff --git a/parser/testdata/xxhash32/query.sql b/parser/testdata/xxhash32/query.sql deleted file mode 100644 index b1cd81904..000000000 --- a/parser/testdata/xxhash32/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT xxHash32('hello') diff --git a/parser/testdata/xxhash64/explain.txt b/parser/testdata/xxhash64/explain.txt deleted file mode 100644 index 334a4254c..000000000 --- a/parser/testdata/xxhash64/explain.txt +++ /dev/null @@ -1,7 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function xxHash64 (children 1) - ExpressionList (children 1) - Literal \'hello\' diff --git a/parser/testdata/xxhash64/query.sql b/parser/testdata/xxhash64/query.sql deleted file mode 100644 index b6fb81d0f..000000000 --- a/parser/testdata/xxhash64/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT xxHash64('hello') diff --git a/parser/testdata/yesterday/explain.txt b/parser/testdata/yesterday/explain.txt deleted file mode 100644 index 582dee513..000000000 --- a/parser/testdata/yesterday/explain.txt +++ /dev/null @@ -1,6 +0,0 @@ -SelectWithUnionQuery (children 1) - ExpressionList (children 1) - SelectQuery (children 1) - ExpressionList (children 1) - Function yesterday (children 1) - ExpressionList diff --git a/parser/testdata/yesterday/query.sql b/parser/testdata/yesterday/query.sql deleted file mode 100644 index 4aa7c55a5..000000000 --- a/parser/testdata/yesterday/query.sql +++ /dev/null @@ -1 +0,0 @@ -SELECT yesterday()